{"text":"package resource\n\nimport (\n\t\"bytes\"\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/qor\/qor\"\n\t\"github.com\/qor\/qor\/utils\"\n\t\"github.com\/qor\/roles\"\n\t\"github.com\/qor\/validations\"\n)\n\n\/\/ Metaor interface\ntype Metaor interface {\n\tGetName() string\n\tGetFieldName() string\n\tGetSetter() func(resource interface{}, metaValue *MetaValue, context *qor.Context)\n\tGetFormattedValuer() func(interface{}, *qor.Context) interface{}\n\tGetValuer() func(interface{}, *qor.Context) interface{}\n\tGetResource() Resourcer\n\tGetMetas() []Metaor\n\tSetPermission(*roles.Permission)\n\tHasPermission(roles.PermissionMode, *qor.Context) bool\n}\n\n\/\/ ConfigureMetaBeforeInitializeInterface if a struct's field's type implemented this interface, it will be called when initializing a meta\ntype ConfigureMetaBeforeInitializeInterface interface {\n\tConfigureQorMetaBeforeInitialize(Metaor)\n}\n\n\/\/ ConfigureMetaInterface if a struct's field's type implemented this interface, it will be called after configed\ntype ConfigureMetaInterface interface {\n\tConfigureQorMeta(Metaor)\n}\n\n\/\/ MetaConfigInterface meta configuration interface\ntype MetaConfigInterface interface {\n\tConfigureMetaInterface\n}\n\n\/\/ MetaConfig base meta config struct\ntype MetaConfig struct {\n}\n\n\/\/ ConfigureQorMeta implement the MetaConfigInterface\nfunc (MetaConfig) ConfigureQorMeta(Metaor) {\n}\n\n\/\/ Meta meta struct definition\ntype Meta struct {\n\tName string\n\tFieldName string\n\tFieldStruct *gorm.StructField\n\tSetter func(resource interface{}, metaValue *MetaValue, context *qor.Context)\n\tValuer func(interface{}, *qor.Context) interface{}\n\tFormattedValuer func(interface{}, *qor.Context) interface{}\n\tConfig MetaConfigInterface\n\tBaseResource Resourcer\n\tResource Resourcer\n\tPermission *roles.Permission\n}\n\n\/\/ GetBaseResource get base resource from meta\nfunc (meta Meta) GetBaseResource() Resourcer {\n\treturn meta.BaseResource\n}\n\n\/\/ GetName get meta's name\nfunc (meta Meta) GetName() string {\n\treturn meta.Name\n}\n\n\/\/ GetFieldName get meta's field name\nfunc (meta Meta) GetFieldName() string {\n\treturn meta.FieldName\n}\n\n\/\/ SetFieldName set meta's field name\nfunc (meta *Meta) SetFieldName(name string) {\n\tmeta.FieldName = name\n}\n\n\/\/ GetSetter get setter from meta\nfunc (meta Meta) GetSetter() func(resource interface{}, metaValue *MetaValue, context *qor.Context) {\n\treturn meta.Setter\n}\n\n\/\/ SetSetter set setter to meta\nfunc (meta *Meta) SetSetter(fc func(resource interface{}, metaValue *MetaValue, context *qor.Context)) {\n\tmeta.Setter = fc\n}\n\n\/\/ GetValuer get valuer from meta\nfunc (meta Meta) GetValuer() func(interface{}, *qor.Context) interface{} {\n\treturn meta.Valuer\n}\n\n\/\/ SetValuer set valuer for meta\nfunc (meta *Meta) SetValuer(fc func(interface{}, *qor.Context) interface{}) {\n\tmeta.Valuer = fc\n}\n\n\/\/ GetFormattedValuer get formatted valuer from meta\nfunc (meta *Meta) GetFormattedValuer() func(interface{}, *qor.Context) interface{} {\n\tif meta.FormattedValuer != nil {\n\t\treturn meta.FormattedValuer\n\t}\n\treturn meta.Valuer\n}\n\n\/\/ SetFormattedValuer set formatted valuer for meta\nfunc (meta *Meta) SetFormattedValuer(fc func(interface{}, *qor.Context) interface{}) {\n\tmeta.FormattedValuer = fc\n}\n\n\/\/ HasPermission check has permission or not\nfunc (meta Meta) HasPermission(mode roles.PermissionMode, context *qor.Context) bool {\n\tif meta.Permission == nil {\n\t\treturn true\n\t}\n\tvar roles = []interface{}{}\n\tfor _, role := range context.Roles {\n\t\troles = append(roles, role)\n\t}\n\treturn meta.Permission.HasPermission(mode, roles...)\n}\n\n\/\/ SetPermission set permission for meta\nfunc (meta *Meta) SetPermission(permission *roles.Permission) {\n\tmeta.Permission = permission\n}\n\n\/\/ PreInitialize when will be run before initialize, used to fill some basic necessary information\nfunc (meta *Meta) PreInitialize() error {\n\tif meta.Name == \"\" {\n\t\tutils.ExitWithMsg(\"Meta should have name: %v\", reflect.TypeOf(meta))\n\t} else if meta.FieldName == \"\" {\n\t\tmeta.FieldName = meta.Name\n\t}\n\n\t\/\/ parseNestedField used to handle case like Profile.Name\n\tvar parseNestedField = func(value reflect.Value, name string) (reflect.Value, string) {\n\t\tfields := strings.Split(name, \".\")\n\t\tvalue = reflect.Indirect(value)\n\t\tfor _, field := range fields[:len(fields)-1] {\n\t\t\tvalue = value.FieldByName(field)\n\t\t}\n\n\t\treturn value, fields[len(fields)-1]\n\t}\n\n\tvar getField = func(fields []*gorm.StructField, name string) *gorm.StructField {\n\t\tfor _, field := range fields {\n\t\t\tif field.Name == name || field.DBName == name {\n\t\t\t\treturn field\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tvar nestedField = strings.Contains(meta.FieldName, \".\")\n\tvar scope = &gorm.Scope{Value: meta.BaseResource.GetResource().Value}\n\tif nestedField {\n\t\tsubModel, name := parseNestedField(reflect.ValueOf(meta.BaseResource.GetResource().Value), meta.FieldName)\n\t\tmeta.FieldStruct = getField(scope.New(subModel.Interface()).GetStructFields(), name)\n\t} else {\n\t\tmeta.FieldStruct = getField(scope.GetStructFields(), meta.FieldName)\n\t}\n\treturn nil\n}\n\n\/\/ Initialize initialize meta, will set valuer, setter if haven't configure it\nfunc (meta *Meta) Initialize() error {\n\tvar (\n\t\tnestedField = strings.Contains(meta.FieldName, \".\")\n\t\tfield = meta.FieldStruct\n\t\thasColumn = meta.FieldStruct != nil\n\t)\n\n\tvar fieldType reflect.Type\n\tif hasColumn {\n\t\tfieldType = field.Struct.Type\n\t\tfor fieldType.Kind() == reflect.Ptr {\n\t\t\tfieldType = fieldType.Elem()\n\t\t}\n\t}\n\n\t\/\/ Set Meta Valuer\n\tif meta.Valuer == nil {\n\t\tif hasColumn {\n\t\t\tmeta.Valuer = func(value interface{}, context *qor.Context) interface{} {\n\t\t\t\tscope := context.GetDB().NewScope(value)\n\t\t\t\tfieldName := meta.FieldName\n\t\t\t\tif nestedField {\n\t\t\t\t\tfields := strings.Split(fieldName, \".\")\n\t\t\t\t\tfieldName = fields[len(fields)-1]\n\t\t\t\t}\n\n\t\t\t\tif f, ok := scope.FieldByName(fieldName); ok {\n\t\t\t\t\tif relationship := f.Relationship; relationship != nil && f.Field.CanAddr() && !scope.PrimaryKeyZero() {\n\t\t\t\t\t\tif (relationship.Kind == \"has_many\" || relationship.Kind == \"many_to_many\") && f.Field.Len() == 0 {\n\t\t\t\t\t\t\tcontext.GetDB().Model(value).Related(f.Field.Addr().Interface(), meta.FieldName)\n\t\t\t\t\t\t} else if (relationship.Kind == \"has_one\" || relationship.Kind == \"belongs_to\") && context.GetDB().NewScope(f.Field.Interface()).PrimaryKeyZero() {\n\t\t\t\t\t\t\tif f.Field.Kind() == reflect.Ptr && f.Field.IsNil() {\n\t\t\t\t\t\t\t\tf.Field.Set(reflect.New(f.Field.Type().Elem()))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tcontext.GetDB().Model(value).Related(f.Field.Addr().Interface(), meta.FieldName)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn f.Field.Interface()\n\t\t\t\t}\n\n\t\t\t\treturn \"\"\n\t\t\t}\n\t\t} else {\n\t\t\tutils.ExitWithMsg(\"Meta %v is not supported for resource %v, no `Valuer` configured for it\", meta.FieldName, reflect.TypeOf(meta.BaseResource.GetResource().Value))\n\t\t}\n\t}\n\n\tif meta.Setter == nil && hasColumn {\n\t\tif relationship := field.Relationship; relationship != nil {\n\t\t\tif relationship.Kind == \"belongs_to\" || relationship.Kind == \"many_to_many\" {\n\t\t\t\tmeta.Setter = func(resource interface{}, metaValue *MetaValue, context *qor.Context) {\n\t\t\t\t\tscope := &gorm.Scope{Value: resource}\n\t\t\t\t\treflectValue := reflect.Indirect(reflect.ValueOf(resource))\n\t\t\t\t\tfield := reflectValue.FieldByName(meta.FieldName)\n\n\t\t\t\t\tif field.Kind() == reflect.Ptr {\n\t\t\t\t\t\tif field.IsNil() {\n\t\t\t\t\t\t\tfield.Set(utils.NewValue(field.Type()).Elem())\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfor field.Kind() == reflect.Ptr {\n\t\t\t\t\t\t\tfield = field.Elem()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tprimaryKeys := utils.ToArray(metaValue.Value)\n\t\t\t\t\t\/\/ associations not changed for belongs to\n\t\t\t\t\tif relationship.Kind == \"belongs_to\" && len(relationship.ForeignFieldNames) == 1 {\n\t\t\t\t\t\toldPrimaryKeys := utils.ToArray(reflectValue.FieldByName(relationship.ForeignFieldNames[0]).Interface())\n\t\t\t\t\t\t\/\/ if not changed\n\t\t\t\t\t\tif fmt.Sprint(primaryKeys) == fmt.Sprint(oldPrimaryKeys) {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\/\/ if removed\n\t\t\t\t\t\tif len(primaryKeys) == 0 {\n\t\t\t\t\t\t\tfield := reflectValue.FieldByName(relationship.ForeignFieldNames[0])\n\t\t\t\t\t\t\tfield.Set(reflect.Zero(field.Type()))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif len(primaryKeys) > 0 {\n\t\t\t\t\t\t\/\/ set current field value to blank and replace it with new value\n\t\t\t\t\t\tfield.Set(reflect.Zero(field.Type()))\n\t\t\t\t\t\tcontext.GetDB().Where(primaryKeys).Find(field.Addr().Interface())\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Replace many 2 many relations\n\t\t\t\t\tif relationship.Kind == \"many_to_many\" {\n\t\t\t\t\t\tif !scope.PrimaryKeyZero() {\n\t\t\t\t\t\t\tcontext.GetDB().Model(resource).Association(meta.FieldName).Replace(field.Interface())\n\t\t\t\t\t\t\tfield.Set(reflect.Zero(field.Type()))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tmeta.Setter = func(resource interface{}, metaValue *MetaValue, context *qor.Context) {\n\t\t\t\tif metaValue == nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tvar (\n\t\t\t\t\tvalue = metaValue.Value\n\t\t\t\t\tfieldName = meta.FieldName\n\t\t\t\t)\n\n\t\t\t\tif value == nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tdefer func() {\n\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\tcontext.AddError(validations.NewError(resource, meta.Name, fmt.Sprintf(\"Can't set value %v\", value)))\n\t\t\t\t\t}\n\t\t\t\t}()\n\n\t\t\t\tif nestedField {\n\t\t\t\t\tfields := strings.Split(fieldName, \".\")\n\t\t\t\t\tfieldName = fields[len(fields)-1]\n\t\t\t\t}\n\n\t\t\t\tfield := reflect.Indirect(reflect.ValueOf(resource)).FieldByName(fieldName)\n\t\t\t\tif field.Kind() == reflect.Ptr {\n\t\t\t\t\tif field.IsNil() && utils.ToString(value) != \"\" {\n\t\t\t\t\t\tfield.Set(utils.NewValue(field.Type()).Elem())\n\t\t\t\t\t}\n\n\t\t\t\t\tif utils.ToString(value) == \"\" {\n\t\t\t\t\t\tfield.Set(reflect.Zero(field.Type()))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tfor field.Kind() == reflect.Ptr {\n\t\t\t\t\t\tfield = field.Elem()\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif field.IsValid() && field.CanAddr() {\n\t\t\t\t\tswitch field.Kind() {\n\t\t\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\t\t\tfield.SetInt(utils.ToInt(value))\n\t\t\t\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\t\t\t\tfield.SetUint(utils.ToUint(value))\n\t\t\t\t\tcase reflect.Float32, reflect.Float64:\n\t\t\t\t\t\tfield.SetFloat(utils.ToFloat(value))\n\t\t\t\t\tcase reflect.Bool:\n\t\t\t\t\t\t\/\/ TODO: add test\n\t\t\t\t\t\tif utils.ToString(value) == \"true\" {\n\t\t\t\t\t\t\tfield.SetBool(true)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tfield.SetBool(false)\n\t\t\t\t\t\t}\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tif scanner, ok := field.Addr().Interface().(sql.Scanner); ok {\n\t\t\t\t\t\t\tif value == nil && len(metaValue.MetaValues.Values) > 0 {\n\t\t\t\t\t\t\t\tdecodeMetaValuesToField(meta.Resource, field, metaValue, context)\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif scanner.Scan(value) != nil {\n\t\t\t\t\t\t\t\tif err := scanner.Scan(utils.ToString(value)); err != nil {\n\t\t\t\t\t\t\t\t\tcontext.AddError(err)\n\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else if reflect.TypeOf(\"\").ConvertibleTo(field.Type()) {\n\t\t\t\t\t\t\tfield.Set(reflect.ValueOf(utils.ToString(value)).Convert(field.Type()))\n\t\t\t\t\t\t} else if reflect.TypeOf([]string{}).ConvertibleTo(field.Type()) {\n\t\t\t\t\t\t\tfield.Set(reflect.ValueOf(utils.ToArray(value)).Convert(field.Type()))\n\t\t\t\t\t\t} else if rvalue := reflect.ValueOf(value); reflect.TypeOf(rvalue.Type()).ConvertibleTo(field.Type()) {\n\t\t\t\t\t\t\tfield.Set(rvalue.Convert(field.Type()))\n\t\t\t\t\t\t} else if _, ok := field.Addr().Interface().(*time.Time); ok {\n\t\t\t\t\t\t\tif str := utils.ToString(value); str != \"\" {\n\t\t\t\t\t\t\t\tif newTime, err := utils.ParseTime(str, context); err == nil {\n\t\t\t\t\t\t\t\t\tfield.Set(reflect.ValueOf(newTime))\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tfield.Set(reflect.Zero(field.Type()))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tvar buf = bytes.NewBufferString(\"\")\n\t\t\t\t\t\t\tjson.NewEncoder(buf).Encode(value)\n\t\t\t\t\t\t\tif err := json.NewDecoder(strings.NewReader(buf.String())).Decode(field.Addr().Interface()); err != nil {\n\t\t\t\t\t\t\t\tutils.ExitWithMsg(\"Can't set value %v to %v [meta %v]\", reflect.TypeOf(value), field.Type(), meta)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif nestedField {\n\t\toldvalue := meta.Valuer\n\t\tmeta.Valuer = func(value interface{}, context *qor.Context) interface{} {\n\t\t\treturn oldvalue(getNestedModel(value, meta.FieldName, context), context)\n\t\t}\n\t\toldSetter := meta.Setter\n\t\tmeta.Setter = func(resource interface{}, metaValue *MetaValue, context *qor.Context) {\n\t\t\toldSetter(getNestedModel(resource, meta.FieldName, context), metaValue, context)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getNestedModel(value interface{}, fieldName string, context *qor.Context) interface{} {\n\tmodel := reflect.Indirect(reflect.ValueOf(value))\n\tfields := strings.Split(fieldName, \".\")\n\tfor _, field := range fields[:len(fields)-1] {\n\t\tif model.CanAddr() {\n\t\t\tsubmodel := model.FieldByName(field)\n\t\t\tif context.GetDB().NewRecord(submodel.Interface()) && !context.GetDB().NewRecord(model.Addr().Interface()) {\n\t\t\t\tif submodel.CanAddr() {\n\t\t\t\t\tcontext.GetDB().Model(model.Addr().Interface()).Association(field).Find(submodel.Addr().Interface())\n\t\t\t\t\tmodel = submodel\n\t\t\t\t} else {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tmodel = submodel\n\t\t\t}\n\t\t}\n\t}\n\n\tif model.CanAddr() {\n\t\treturn model.Addr().Interface()\n\t}\n\treturn nil\n}\nSetterGeneratorpackage resource\n\nimport (\n\t\"bytes\"\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/qor\/qor\"\n\t\"github.com\/qor\/qor\/utils\"\n\t\"github.com\/qor\/roles\"\n\t\"github.com\/qor\/validations\"\n)\n\n\/\/ Metaor interface\ntype Metaor interface {\n\tGetName() string\n\tGetFieldName() string\n\tGetSetter() func(resource interface{}, metaValue *MetaValue, context *qor.Context)\n\tGetFormattedValuer() func(interface{}, *qor.Context) interface{}\n\tGetValuer() func(interface{}, *qor.Context) interface{}\n\tGetResource() Resourcer\n\tGetMetas() []Metaor\n\tSetPermission(*roles.Permission)\n\tHasPermission(roles.PermissionMode, *qor.Context) bool\n}\n\n\/\/ ConfigureMetaBeforeInitializeInterface if a struct's field's type implemented this interface, it will be called when initializing a meta\ntype ConfigureMetaBeforeInitializeInterface interface {\n\tConfigureQorMetaBeforeInitialize(Metaor)\n}\n\n\/\/ ConfigureMetaInterface if a struct's field's type implemented this interface, it will be called after configed\ntype ConfigureMetaInterface interface {\n\tConfigureQorMeta(Metaor)\n}\n\n\/\/ MetaConfigInterface meta configuration interface\ntype MetaConfigInterface interface {\n\tConfigureMetaInterface\n}\n\n\/\/ MetaConfig base meta config struct\ntype MetaConfig struct {\n}\n\n\/\/ ConfigureQorMeta implement the MetaConfigInterface\nfunc (MetaConfig) ConfigureQorMeta(Metaor) {\n}\n\n\/\/ Meta meta struct definition\ntype Meta struct {\n\tName string\n\tFieldName string\n\tFieldStruct *gorm.StructField\n\tSetter func(resource interface{}, metaValue *MetaValue, context *qor.Context)\n\tValuer func(interface{}, *qor.Context) interface{}\n\tFormattedValuer func(interface{}, *qor.Context) interface{}\n\tConfig MetaConfigInterface\n\tBaseResource Resourcer\n\tResource Resourcer\n\tPermission *roles.Permission\n}\n\n\/\/ GetBaseResource get base resource from meta\nfunc (meta Meta) GetBaseResource() Resourcer {\n\treturn meta.BaseResource\n}\n\n\/\/ GetName get meta's name\nfunc (meta Meta) GetName() string {\n\treturn meta.Name\n}\n\n\/\/ GetFieldName get meta's field name\nfunc (meta Meta) GetFieldName() string {\n\treturn meta.FieldName\n}\n\n\/\/ SetFieldName set meta's field name\nfunc (meta *Meta) SetFieldName(name string) {\n\tmeta.FieldName = name\n}\n\n\/\/ GetSetter get setter from meta\nfunc (meta Meta) GetSetter() func(resource interface{}, metaValue *MetaValue, context *qor.Context) {\n\treturn meta.Setter\n}\n\n\/\/ SetSetter set setter to meta\nfunc (meta *Meta) SetSetter(fc func(resource interface{}, metaValue *MetaValue, context *qor.Context)) {\n\tmeta.Setter = fc\n}\n\n\/\/ GetValuer get valuer from meta\nfunc (meta Meta) GetValuer() func(interface{}, *qor.Context) interface{} {\n\treturn meta.Valuer\n}\n\n\/\/ SetValuer set valuer for meta\nfunc (meta *Meta) SetValuer(fc func(interface{}, *qor.Context) interface{}) {\n\tmeta.Valuer = fc\n}\n\n\/\/ GetFormattedValuer get formatted valuer from meta\nfunc (meta *Meta) GetFormattedValuer() func(interface{}, *qor.Context) interface{} {\n\tif meta.FormattedValuer != nil {\n\t\treturn meta.FormattedValuer\n\t}\n\treturn meta.Valuer\n}\n\n\/\/ SetFormattedValuer set formatted valuer for meta\nfunc (meta *Meta) SetFormattedValuer(fc func(interface{}, *qor.Context) interface{}) {\n\tmeta.FormattedValuer = fc\n}\n\n\/\/ HasPermission check has permission or not\nfunc (meta Meta) HasPermission(mode roles.PermissionMode, context *qor.Context) bool {\n\tif meta.Permission == nil {\n\t\treturn true\n\t}\n\tvar roles = []interface{}{}\n\tfor _, role := range context.Roles {\n\t\troles = append(roles, role)\n\t}\n\treturn meta.Permission.HasPermission(mode, roles...)\n}\n\n\/\/ SetPermission set permission for meta\nfunc (meta *Meta) SetPermission(permission *roles.Permission) {\n\tmeta.Permission = permission\n}\n\n\/\/ PreInitialize when will be run before initialize, used to fill some basic necessary information\nfunc (meta *Meta) PreInitialize() error {\n\tif meta.Name == \"\" {\n\t\tutils.ExitWithMsg(\"Meta should have name: %v\", reflect.TypeOf(meta))\n\t} else if meta.FieldName == \"\" {\n\t\tmeta.FieldName = meta.Name\n\t}\n\n\t\/\/ parseNestedField used to handle case like Profile.Name\n\tvar parseNestedField = func(value reflect.Value, name string) (reflect.Value, string) {\n\t\tfields := strings.Split(name, \".\")\n\t\tvalue = reflect.Indirect(value)\n\t\tfor _, field := range fields[:len(fields)-1] {\n\t\t\tvalue = value.FieldByName(field)\n\t\t}\n\n\t\treturn value, fields[len(fields)-1]\n\t}\n\n\tvar getField = func(fields []*gorm.StructField, name string) *gorm.StructField {\n\t\tfor _, field := range fields {\n\t\t\tif field.Name == name || field.DBName == name {\n\t\t\t\treturn field\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tvar nestedField = strings.Contains(meta.FieldName, \".\")\n\tvar scope = &gorm.Scope{Value: meta.BaseResource.GetResource().Value}\n\tif nestedField {\n\t\tsubModel, name := parseNestedField(reflect.ValueOf(meta.BaseResource.GetResource().Value), meta.FieldName)\n\t\tmeta.FieldStruct = getField(scope.New(subModel.Interface()).GetStructFields(), name)\n\t} else {\n\t\tmeta.FieldStruct = getField(scope.GetStructFields(), meta.FieldName)\n\t}\n\treturn nil\n}\n\n\/\/ Initialize initialize meta, will set valuer, setter if haven't configure it\nfunc (meta *Meta) Initialize() error {\n\tvar (\n\t\tnestedField = strings.Contains(meta.FieldName, \".\")\n\t\tfield = meta.FieldStruct\n\t\thasColumn = meta.FieldStruct != nil\n\t)\n\n\tvar fieldType reflect.Type\n\tif hasColumn {\n\t\tfieldType = field.Struct.Type\n\t\tfor fieldType.Kind() == reflect.Ptr {\n\t\t\tfieldType = fieldType.Elem()\n\t\t}\n\t}\n\n\t\/\/ Set Meta Valuer\n\tif meta.Valuer == nil {\n\t\tif hasColumn {\n\t\t\tmeta.Valuer = func(value interface{}, context *qor.Context) interface{} {\n\t\t\t\tscope := context.GetDB().NewScope(value)\n\t\t\t\tfieldName := meta.FieldName\n\t\t\t\tif nestedField {\n\t\t\t\t\tfields := strings.Split(fieldName, \".\")\n\t\t\t\t\tfieldName = fields[len(fields)-1]\n\t\t\t\t}\n\n\t\t\t\tif f, ok := scope.FieldByName(fieldName); ok {\n\t\t\t\t\tif relationship := f.Relationship; relationship != nil && f.Field.CanAddr() && !scope.PrimaryKeyZero() {\n\t\t\t\t\t\tif (relationship.Kind == \"has_many\" || relationship.Kind == \"many_to_many\") && f.Field.Len() == 0 {\n\t\t\t\t\t\t\tcontext.GetDB().Model(value).Related(f.Field.Addr().Interface(), meta.FieldName)\n\t\t\t\t\t\t} else if (relationship.Kind == \"has_one\" || relationship.Kind == \"belongs_to\") && context.GetDB().NewScope(f.Field.Interface()).PrimaryKeyZero() {\n\t\t\t\t\t\t\tif f.Field.Kind() == reflect.Ptr && f.Field.IsNil() {\n\t\t\t\t\t\t\t\tf.Field.Set(reflect.New(f.Field.Type().Elem()))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tcontext.GetDB().Model(value).Related(f.Field.Addr().Interface(), meta.FieldName)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn f.Field.Interface()\n\t\t\t\t}\n\n\t\t\t\treturn \"\"\n\t\t\t}\n\t\t} else {\n\t\t\tutils.ExitWithMsg(\"Meta %v is not supported for resource %v, no `Valuer` configured for it\", meta.FieldName, reflect.TypeOf(meta.BaseResource.GetResource().Value))\n\t\t}\n\t}\n\n\tif meta.Setter == nil && hasColumn {\n\t\tif relationship := field.Relationship; relationship != nil {\n\t\t\tif relationship.Kind == \"belongs_to\" || relationship.Kind == \"many_to_many\" {\n\t\t\t\tmeta.Setter = func(resource interface{}, metaValue *MetaValue, context *qor.Context) {\n\t\t\t\t\tscope := &gorm.Scope{Value: resource}\n\t\t\t\t\treflectValue := reflect.Indirect(reflect.ValueOf(resource))\n\t\t\t\t\tfield := reflectValue.FieldByName(meta.FieldName)\n\n\t\t\t\t\tif field.Kind() == reflect.Ptr {\n\t\t\t\t\t\tif field.IsNil() {\n\t\t\t\t\t\t\tfield.Set(utils.NewValue(field.Type()).Elem())\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfor field.Kind() == reflect.Ptr {\n\t\t\t\t\t\t\tfield = field.Elem()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tprimaryKeys := utils.ToArray(metaValue.Value)\n\t\t\t\t\t\/\/ associations not changed for belongs to\n\t\t\t\t\tif relationship.Kind == \"belongs_to\" && len(relationship.ForeignFieldNames) == 1 {\n\t\t\t\t\t\toldPrimaryKeys := utils.ToArray(reflectValue.FieldByName(relationship.ForeignFieldNames[0]).Interface())\n\t\t\t\t\t\t\/\/ if not changed\n\t\t\t\t\t\tif fmt.Sprint(primaryKeys) == fmt.Sprint(oldPrimaryKeys) {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\/\/ if removed\n\t\t\t\t\t\tif len(primaryKeys) == 0 {\n\t\t\t\t\t\t\tfield := reflectValue.FieldByName(relationship.ForeignFieldNames[0])\n\t\t\t\t\t\t\tfield.Set(reflect.Zero(field.Type()))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif len(primaryKeys) > 0 {\n\t\t\t\t\t\t\/\/ set current field value to blank and replace it with new value\n\t\t\t\t\t\tfield.Set(reflect.Zero(field.Type()))\n\t\t\t\t\t\tcontext.GetDB().Where(primaryKeys).Find(field.Addr().Interface())\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Replace many 2 many relations\n\t\t\t\t\tif relationship.Kind == \"many_to_many\" {\n\t\t\t\t\t\tif !scope.PrimaryKeyZero() {\n\t\t\t\t\t\t\tcontext.GetDB().Model(resource).Association(meta.FieldName).Replace(field.Interface())\n\t\t\t\t\t\t\tfield.Set(reflect.Zero(field.Type()))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tgenerateSetter := func(setter func(field reflect.Value, metaValue *MetaValue)) func(record interface{}, metaValue *MetaValue, context *qor.Context) {\n\t\t\t\treturn func(record interface{}, metaValue *MetaValue, context *qor.Context) {\n\t\t\t\t\tif metaValue == nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tfieldName := meta.FieldName\n\n\t\t\t\t\tdefer func() {\n\t\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\t\tcontext.AddError(validations.NewError(record, meta.Name, fmt.Sprintf(\"Can't set value %v\", metaValue.Value)))\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\n\t\t\t\t\tif nestedField {\n\t\t\t\t\t\tfields := strings.Split(fieldName, \".\")\n\t\t\t\t\t\tfieldName = fields[len(fields)-1]\n\t\t\t\t\t}\n\n\t\t\t\t\tfield := reflect.Indirect(reflect.ValueOf(record)).FieldByName(fieldName)\n\t\t\t\t\tif field.Kind() == reflect.Ptr {\n\t\t\t\t\t\tif field.IsNil() && utils.ToString(metaValue.Value) != \"\" {\n\t\t\t\t\t\t\tfield.Set(utils.NewValue(field.Type()).Elem())\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif utils.ToString(metaValue.Value) == \"\" {\n\t\t\t\t\t\t\tfield.Set(reflect.Zero(field.Type()))\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfor field.Kind() == reflect.Ptr {\n\t\t\t\t\t\t\tfield = field.Elem()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif field.IsValid() && field.CanAddr() {\n\t\t\t\t\t\tsetter(field, metaValue)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfield := reflect.Indirect(reflect.ValueOf(meta.Resource.NewStruct())).FieldByName(fieldName)\n\t\t\tfor field.Kind() == reflect.Ptr {\n\t\t\t\tfield = field.Elem()\n\t\t\t}\n\n\t\t\tswitch field.Kind() {\n\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\tmeta.Setter = generateSetter(func(field reflect.Value, metaValue *MetaValue) {\n\t\t\t\t\tfield.SetInt(utils.ToInt(metaValue.Value))\n\t\t\t\t})\n\t\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\t\tmeta.Setter = generateSetter(func(field reflect.Value, metaValue *MetaValue) {\n\t\t\t\t\tfield.SetUint(utils.ToUint(metaValue.Value))\n\t\t\t\t})\n\t\t\tcase reflect.Float32, reflect.Float64:\n\t\t\t\tmeta.Setter = generateSetter(func(field reflect.Value, metaValue *MetaValue) {\n\t\t\t\t\tfield.SetFloat(utils.ToFloat(metaValue.Value))\n\t\t\t\t})\n\t\t\tcase reflect.Bool:\n\t\t\t\tmeta.Setter = generateSetter(func(field reflect.Value, metaValue *MetaValue) {\n\t\t\t\t\tif utils.ToString(metaValue.Value) == \"true\" {\n\t\t\t\t\t\tfield.SetBool(true)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfield.SetBool(false)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\tdefault:\n\t\t\t\tif _, ok := field.Addr().Interface().(sql.Scanner); ok {\n\t\t\t\t\tmeta.Setter = generateSetter(func(field reflect.Value, metaValue *MetaValue) {\n\t\t\t\t\t\tif scanner, ok := field.Addr().Interface().(sql.Scanner); ok {\n\t\t\t\t\t\t\tif value == nil && len(metaValue.MetaValues.Values) > 0 {\n\t\t\t\t\t\t\t\tdecodeMetaValuesToField(meta.Resource, field, metaValue, context)\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif scanner.Scan(value) != nil {\n\t\t\t\t\t\t\t\tif err := scanner.Scan(utils.ToString(value)); err != nil {\n\t\t\t\t\t\t\t\t\tcontext.AddError(err)\n\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t} else if reflect.TypeOf(\"\").ConvertibleTo(field.Type()) {\n\t\t\t\t\tfield.Set(reflect.ValueOf(utils.ToString(value)).Convert(field.Type()))\n\t\t\t\t} else if reflect.TypeOf([]string{}).ConvertibleTo(field.Type()) {\n\t\t\t\t\tfield.Set(reflect.ValueOf(utils.ToArray(value)).Convert(field.Type()))\n\t\t\t\t} else if rvalue := reflect.ValueOf(value); reflect.TypeOf(rvalue.Type()).ConvertibleTo(field.Type()) {\n\t\t\t\t\tfield.Set(rvalue.Convert(field.Type()))\n\t\t\t\t} else if _, ok := field.Addr().Interface().(*time.Time); ok {\n\t\t\t\t\tif str := utils.ToString(value); str != \"\" {\n\t\t\t\t\t\tif newTime, err := utils.ParseTime(str, context); err == nil {\n\t\t\t\t\t\t\tfield.Set(reflect.ValueOf(newTime))\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfield.Set(reflect.Zero(field.Type()))\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tvar buf = bytes.NewBufferString(\"\")\n\t\t\t\t\tjson.NewEncoder(buf).Encode(value)\n\t\t\t\t\tif err := json.NewDecoder(strings.NewReader(buf.String())).Decode(field.Addr().Interface()); err != nil {\n\t\t\t\t\t\tutils.ExitWithMsg(\"Can't set value %v to %v [meta %v]\", reflect.TypeOf(value), field.Type(), meta)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif nestedField {\n\t\toldvalue := meta.Valuer\n\t\tmeta.Valuer = func(value interface{}, context *qor.Context) interface{} {\n\t\t\treturn oldvalue(getNestedModel(value, meta.FieldName, context), context)\n\t\t}\n\t\toldSetter := meta.Setter\n\t\tmeta.Setter = func(resource interface{}, metaValue *MetaValue, context *qor.Context) {\n\t\t\toldSetter(getNestedModel(resource, meta.FieldName, context), metaValue, context)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getNestedModel(value interface{}, fieldName string, context *qor.Context) interface{} {\n\tmodel := reflect.Indirect(reflect.ValueOf(value))\n\tfields := strings.Split(fieldName, \".\")\n\tfor _, field := range fields[:len(fields)-1] {\n\t\tif model.CanAddr() {\n\t\t\tsubmodel := model.FieldByName(field)\n\t\t\tif context.GetDB().NewRecord(submodel.Interface()) && !context.GetDB().NewRecord(model.Addr().Interface()) {\n\t\t\t\tif submodel.CanAddr() {\n\t\t\t\t\tcontext.GetDB().Model(model.Addr().Interface()).Association(field).Find(submodel.Addr().Interface())\n\t\t\t\t\tmodel = submodel\n\t\t\t\t} else {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tmodel = submodel\n\t\t\t}\n\t\t}\n\t}\n\n\tif model.CanAddr() {\n\t\treturn model.Addr().Interface()\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"package commands\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/bryanl\/doit\"\n\t\"github.com\/bryanl\/doit\/do\"\n\t\"github.com\/digitalocean\/godo\"\n\t\"github.com\/gobwas\/glob\"\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ Droplet creates the droplet command.\nfunc Droplet() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"droplet\",\n\t\tAliases: []string{\"d\"},\n\t\tShort: \"droplet commands\",\n\t\tLong: \"droplet is used to access droplet commands\",\n\t}\n\n\tcmdBuilder(cmd, RunDropletActions, \"actions \", \"droplet actions\", writer,\n\t\taliasOpt(\"a\"), displayerType(&action{}))\n\n\tcmdBuilder(cmd, RunDropletBackups, \"backups \", \"droplet backups\", writer,\n\t\taliasOpt(\"b\"), displayerType(&image{}))\n\n\tcmdDropletCreate := cmdBuilder(cmd, RunDropletCreate, \"create NAME [NAME ...]\", \"create droplet\", writer,\n\t\taliasOpt(\"c\"), displayerType(&droplet{}))\n\taddStringSliceFlag(cmdDropletCreate, doit.ArgSSHKeys, []string{}, \"SSH Keys or fingerprints\")\n\taddStringFlag(cmdDropletCreate, doit.ArgUserData, \"\", \"User data\")\n\taddStringFlag(cmdDropletCreate, doit.ArgUserDataFile, \"\", \"User data file\")\n\taddBoolFlag(cmdDropletCreate, doit.ArgDropletWait, false, \"Wait for droplet to be created\")\n\taddStringFlag(cmdDropletCreate, doit.ArgRegionSlug, \"\", \"Droplet region\", requiredOpt())\n\taddStringFlag(cmdDropletCreate, doit.ArgSizeSlug, \"\", \"Droplet size\", requiredOpt())\n\taddBoolFlag(cmdDropletCreate, doit.ArgBackups, false, \"Backup droplet\")\n\taddBoolFlag(cmdDropletCreate, doit.ArgIPv6, false, \"IPv6 support\")\n\taddBoolFlag(cmdDropletCreate, doit.ArgPrivateNetworking, false, \"Private networking\")\n\taddStringFlag(cmdDropletCreate, doit.ArgImage, \"\", \"Droplet image\", requiredOpt())\n\n\tcmdBuilder(cmd, RunDropletDelete, \"delete ID [ID ...]\", \"delete droplet\", writer, aliasOpt(\"d\", \"del\"))\n\n\tcmdBuilder(cmd, RunDropletGet, \"get\", \"get droplet\", writer,\n\t\taliasOpt(\"g\"), displayerType(&droplet{}))\n\n\tcmdBuilder(cmd, RunDropletKernels, \"kernels \", \"droplet kernels\", writer,\n\t\taliasOpt(\"k\"), displayerType(&kernel{}))\n\n\tcmdRunDropletList := cmdBuilder(cmd, RunDropletList, \"list [GLOB]\", \"list droplets\", writer,\n\t\taliasOpt(\"ls\"), displayerType(&droplet{}))\n\taddStringFlag(cmdRunDropletList, doit.ArgRegionSlug, \"\", \"Droplet region\")\n\n\tcmdBuilder(cmd, RunDropletNeighbors, \"neighbors \", \"droplet neighbors\", writer,\n\t\taliasOpt(\"n\"), displayerType(&droplet{}))\n\n\tcmdBuilder(cmd, RunDropletSnapshots, \"snapshots \", \"snapshots\", writer,\n\t\taliasOpt(\"s\"), displayerType(&image{}))\n\n\treturn cmd\n}\n\n\/\/ NewCmdDropletActions creates a droplet action get command.\nfunc NewCmdDropletActions(out io.Writer) *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"actions\",\n\t\tShort: \"get droplet actions\",\n\t\tLong: \"get droplet actions\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tcheckErr(RunDropletActions(cmdNS(cmd), doit.DoitConfig, out, args), cmd)\n\t\t},\n\t}\n}\n\n\/\/ RunDropletActions returns a list of actions for a droplet.\nfunc RunDropletActions(ns string, config doit.Config, out io.Writer, args []string) error {\n\tclient := config.GetGodoClient()\n\tds := do.NewDropletsService(client)\n\n\tid, err := getDropletIDArg(ns, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlist, err := ds.Actions(id)\n\n\tdc := &displayer{\n\t\tns: ns,\n\t\tconfig: config,\n\t\titem: &action{actions: list},\n\t\tout: out,\n\t}\n\n\treturn dc.Display()\n}\n\n\/\/ RunDropletBackups returns a list of backup images for a droplet.\nfunc RunDropletBackups(ns string, config doit.Config, out io.Writer, args []string) error {\n\tclient := config.GetGodoClient()\n\tds := do.NewDropletsService(client)\n\n\tid, err := getDropletIDArg(ns, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlist, err := ds.Backups(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdc := &displayer{\n\t\tns: ns,\n\t\tconfig: config,\n\t\titem: &image{images: list},\n\t\tout: out,\n\t}\n\n\treturn dc.Display()\n}\n\n\/\/ RunDropletCreate creates a droplet.\nfunc RunDropletCreate(ns string, config doit.Config, out io.Writer, args []string) error {\n\tclient := config.GetGodoClient()\n\n\tif len(args) < 1 {\n\t\treturn doit.NewMissingArgsErr(ns)\n\t}\n\n\tregion, err := config.GetString(ns, doit.ArgRegionSlug)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsize, err := config.GetString(ns, doit.ArgSizeSlug)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbackups, err := config.GetBool(ns, doit.ArgBackups)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tipv6, err := config.GetBool(ns, doit.ArgIPv6)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprivateNetworking, err := config.GetBool(ns, doit.ArgPrivateNetworking)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkeys, err := config.GetStringSlice(ns, doit.ArgSSHKeys)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsshKeys := extractSSHKeys(keys)\n\n\tuserData, err := config.GetString(ns, doit.ArgUserData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfilename, err := config.GetString(ns, doit.ArgUserDataFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tuserData, err = extractUserData(userData, filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar createImage godo.DropletCreateImage\n\n\timageStr, err := config.GetString(ns, doit.ArgImage)\n\tif i, err := strconv.Atoi(imageStr); err == nil {\n\t\tcreateImage = godo.DropletCreateImage{ID: i}\n\t} else {\n\t\tcreateImage = godo.DropletCreateImage{Slug: imageStr}\n\t}\n\n\twait, err := config.GetBool(ns, doit.ArgDropletWait)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tds := do.NewDropletsService(client)\n\n\tvar wg sync.WaitGroup\n\terrs := make(chan error)\n\tfor _, name := range args {\n\t\tdcr := &godo.DropletCreateRequest{\n\t\t\tName: name,\n\t\t\tRegion: region,\n\t\t\tSize: size,\n\t\t\tImage: createImage,\n\t\t\tBackups: backups,\n\t\t\tIPv6: ipv6,\n\t\t\tPrivateNetworking: privateNetworking,\n\t\t\tSSHKeys: sshKeys,\n\t\t\tUserData: userData,\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\td, err := ds.Create(dcr, wait)\n\t\t\tif err != nil {\n\t\t\t\terrs <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tdc := &displayer{\n\t\t\t\tns: ns,\n\t\t\t\tconfig: config,\n\t\t\t\titem: &droplet{droplets: do.Droplets{*d}},\n\t\t\t\tout: out,\n\t\t\t}\n\n\t\t\tdc.Display()\n\t\t}()\n\t}\n\n\twg.Wait()\n\tclose(errs)\n\n\tfor err := range errs {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc extractSSHKeys(keys []string) []godo.DropletCreateSSHKey {\n\tsshKeys := []godo.DropletCreateSSHKey{}\n\n\tfor _, rawKey := range keys {\n\t\trawKey = strings.TrimPrefix(rawKey, \"[\")\n\t\trawKey = strings.TrimSuffix(rawKey, \"]\")\n\t\tif i, err := strconv.Atoi(rawKey); err == nil {\n\t\t\tsshKeys = append(sshKeys, godo.DropletCreateSSHKey{ID: i})\n\t\t\tcontinue\n\t\t}\n\n\t\tsshKeys = append(sshKeys, godo.DropletCreateSSHKey{Fingerprint: rawKey})\n\t}\n\n\treturn sshKeys\n}\n\nfunc extractUserData(userData, filename string) (string, error) {\n\tif userData == \"\" && filename != \"\" {\n\t\tdata, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tuserData = string(data)\n\t}\n\n\treturn userData, nil\n}\n\n\/\/ RunDropletDelete destroy a droplet by id.\nfunc RunDropletDelete(ns string, config doit.Config, out io.Writer, args []string) error {\n\tclient := config.GetGodoClient()\n\tds := do.NewDropletsService(client)\n\n\tif len(args) < 1 {\n\t\treturn doit.NewMissingArgsErr(ns)\n\t}\n\n\tfor _, idStr := range args {\n\t\tid, err := strconv.Atoi(idStr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = ds.Delete(id)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Printf(\"deleted droplet %d\\n\", id)\n\t}\n\n\treturn nil\n}\n\n\/\/ RunDropletGet returns a droplet.\nfunc RunDropletGet(ns string, config doit.Config, out io.Writer, args []string) error {\n\tid, err := getDropletIDArg(ns, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient := config.GetGodoClient()\n\tds := do.NewDropletsService(client)\n\n\td, err := ds.Get(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdc := &displayer{\n\t\tns: ns,\n\t\tconfig: config,\n\t\titem: &droplet{droplets: do.Droplets{*d}},\n\t\tout: out,\n\t}\n\n\treturn dc.Display()\n}\n\n\/\/ RunDropletKernels returns a list of available kernels for a droplet.\nfunc RunDropletKernels(ns string, config doit.Config, out io.Writer, args []string) error {\n\tclient := config.GetGodoClient()\n\tds := do.NewDropletsService(client)\n\tid, err := getDropletIDArg(ns, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlist, err := ds.Kernels(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdc := &displayer{\n\t\tns: ns,\n\t\tconfig: config,\n\t\titem: &kernel{kernels: list},\n\t\tout: out,\n\t}\n\n\treturn dc.Display()\n}\n\n\/\/ RunDropletList returns a list of droplets.\nfunc RunDropletList(ns string, config doit.Config, out io.Writer, args []string) error {\n\tclient := config.GetGodoClient()\n\tds := do.NewDropletsService(client)\n\n\tregion, err := config.GetString(ns, doit.ArgRegionSlug)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmatches := []glob.Glob{}\n\tfor _, globStr := range args {\n\t\tg, err := glob.Compile(globStr)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unknown glob %q\", globStr)\n\t\t}\n\n\t\tmatches = append(matches, g)\n\t}\n\n\tvar matchedList do.Droplets\n\n\tlist, err := ds.List()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, droplet := range list {\n\t\tvar skip = true\n\t\tif len(matches) == 0 {\n\t\t\tskip = false\n\t\t} else {\n\t\t\tfor _, m := range matches {\n\t\t\t\tif m.Match(droplet.Name) {\n\t\t\t\t\tskip = false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif !skip && region != \"\" {\n\t\t\tif region != droplet.Region.Slug {\n\t\t\t\tskip = true\n\t\t\t}\n\t\t}\n\n\t\tif !skip {\n\t\t\tmatchedList = append(matchedList, droplet)\n\t\t}\n\t}\n\n\tdc := &displayer{\n\t\tns: ns,\n\t\tconfig: config,\n\t\titem: &droplet{droplets: matchedList},\n\t\tout: out,\n\t}\n\n\treturn dc.Display()\n}\n\n\/\/ RunDropletNeighbors returns a list of droplet neighbors.\nfunc RunDropletNeighbors(ns string, config doit.Config, out io.Writer, args []string) error {\n\tclient := config.GetGodoClient()\n\tds := do.NewDropletsService(client)\n\n\tid, err := getDropletIDArg(ns, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlist, err := ds.Neighbors(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdc := &displayer{\n\t\tns: ns,\n\t\tconfig: config,\n\t\titem: &droplet{droplets: list},\n\t\tout: out,\n\t}\n\n\treturn dc.Display()\n}\n\n\/\/ RunDropletSnapshots returns a list of available kernels for a droplet.\nfunc RunDropletSnapshots(ns string, config doit.Config, out io.Writer, args []string) error {\n\tclient := config.GetGodoClient()\n\tds := do.NewDropletsService(client)\n\tid, err := getDropletIDArg(ns, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlist, err := ds.Snapshots(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdc := &displayer{\n\t\tns: ns,\n\t\tconfig: config,\n\t\titem: &image{images: list},\n\t\tout: out,\n\t}\n\n\treturn dc.Display()\n}\n\nfunc getDropletIDArg(ns string, args []string) (int, error) {\n\tif len(args) != 1 {\n\t\treturn 0, doit.NewMissingArgsErr(ns)\n\t}\n\n\treturn strconv.Atoi(args[0])\n}\nadd another alias for droplet deletepackage commands\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/bryanl\/doit\"\n\t\"github.com\/bryanl\/doit\/do\"\n\t\"github.com\/digitalocean\/godo\"\n\t\"github.com\/gobwas\/glob\"\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ Droplet creates the droplet command.\nfunc Droplet() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"droplet\",\n\t\tAliases: []string{\"d\"},\n\t\tShort: \"droplet commands\",\n\t\tLong: \"droplet is used to access droplet commands\",\n\t}\n\n\tcmdBuilder(cmd, RunDropletActions, \"actions \", \"droplet actions\", writer,\n\t\taliasOpt(\"a\"), displayerType(&action{}))\n\n\tcmdBuilder(cmd, RunDropletBackups, \"backups \", \"droplet backups\", writer,\n\t\taliasOpt(\"b\"), displayerType(&image{}))\n\n\tcmdDropletCreate := cmdBuilder(cmd, RunDropletCreate, \"create NAME [NAME ...]\", \"create droplet\", writer,\n\t\taliasOpt(\"c\"), displayerType(&droplet{}))\n\taddStringSliceFlag(cmdDropletCreate, doit.ArgSSHKeys, []string{}, \"SSH Keys or fingerprints\")\n\taddStringFlag(cmdDropletCreate, doit.ArgUserData, \"\", \"User data\")\n\taddStringFlag(cmdDropletCreate, doit.ArgUserDataFile, \"\", \"User data file\")\n\taddBoolFlag(cmdDropletCreate, doit.ArgDropletWait, false, \"Wait for droplet to be created\")\n\taddStringFlag(cmdDropletCreate, doit.ArgRegionSlug, \"\", \"Droplet region\", requiredOpt())\n\taddStringFlag(cmdDropletCreate, doit.ArgSizeSlug, \"\", \"Droplet size\", requiredOpt())\n\taddBoolFlag(cmdDropletCreate, doit.ArgBackups, false, \"Backup droplet\")\n\taddBoolFlag(cmdDropletCreate, doit.ArgIPv6, false, \"IPv6 support\")\n\taddBoolFlag(cmdDropletCreate, doit.ArgPrivateNetworking, false, \"Private networking\")\n\taddStringFlag(cmdDropletCreate, doit.ArgImage, \"\", \"Droplet image\", requiredOpt())\n\n\tcmdBuilder(cmd, RunDropletDelete, \"delete ID [ID ...]\", \"delete droplet\", writer,\n\t\taliasOpt(\"d\", \"del\", \"rm\"))\n\n\tcmdBuilder(cmd, RunDropletGet, \"get\", \"get droplet\", writer,\n\t\taliasOpt(\"g\"), displayerType(&droplet{}))\n\n\tcmdBuilder(cmd, RunDropletKernels, \"kernels \", \"droplet kernels\", writer,\n\t\taliasOpt(\"k\"), displayerType(&kernel{}))\n\n\tcmdRunDropletList := cmdBuilder(cmd, RunDropletList, \"list [GLOB]\", \"list droplets\", writer,\n\t\taliasOpt(\"ls\"), displayerType(&droplet{}))\n\taddStringFlag(cmdRunDropletList, doit.ArgRegionSlug, \"\", \"Droplet region\")\n\n\tcmdBuilder(cmd, RunDropletNeighbors, \"neighbors \", \"droplet neighbors\", writer,\n\t\taliasOpt(\"n\"), displayerType(&droplet{}))\n\n\tcmdBuilder(cmd, RunDropletSnapshots, \"snapshots \", \"snapshots\", writer,\n\t\taliasOpt(\"s\"), displayerType(&image{}))\n\n\treturn cmd\n}\n\n\/\/ NewCmdDropletActions creates a droplet action get command.\nfunc NewCmdDropletActions(out io.Writer) *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"actions\",\n\t\tShort: \"get droplet actions\",\n\t\tLong: \"get droplet actions\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tcheckErr(RunDropletActions(cmdNS(cmd), doit.DoitConfig, out, args), cmd)\n\t\t},\n\t}\n}\n\n\/\/ RunDropletActions returns a list of actions for a droplet.\nfunc RunDropletActions(ns string, config doit.Config, out io.Writer, args []string) error {\n\tclient := config.GetGodoClient()\n\tds := do.NewDropletsService(client)\n\n\tid, err := getDropletIDArg(ns, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlist, err := ds.Actions(id)\n\n\tdc := &displayer{\n\t\tns: ns,\n\t\tconfig: config,\n\t\titem: &action{actions: list},\n\t\tout: out,\n\t}\n\n\treturn dc.Display()\n}\n\n\/\/ RunDropletBackups returns a list of backup images for a droplet.\nfunc RunDropletBackups(ns string, config doit.Config, out io.Writer, args []string) error {\n\tclient := config.GetGodoClient()\n\tds := do.NewDropletsService(client)\n\n\tid, err := getDropletIDArg(ns, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlist, err := ds.Backups(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdc := &displayer{\n\t\tns: ns,\n\t\tconfig: config,\n\t\titem: &image{images: list},\n\t\tout: out,\n\t}\n\n\treturn dc.Display()\n}\n\n\/\/ RunDropletCreate creates a droplet.\nfunc RunDropletCreate(ns string, config doit.Config, out io.Writer, args []string) error {\n\tclient := config.GetGodoClient()\n\n\tif len(args) < 1 {\n\t\treturn doit.NewMissingArgsErr(ns)\n\t}\n\n\tregion, err := config.GetString(ns, doit.ArgRegionSlug)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsize, err := config.GetString(ns, doit.ArgSizeSlug)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbackups, err := config.GetBool(ns, doit.ArgBackups)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tipv6, err := config.GetBool(ns, doit.ArgIPv6)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprivateNetworking, err := config.GetBool(ns, doit.ArgPrivateNetworking)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkeys, err := config.GetStringSlice(ns, doit.ArgSSHKeys)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsshKeys := extractSSHKeys(keys)\n\n\tuserData, err := config.GetString(ns, doit.ArgUserData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfilename, err := config.GetString(ns, doit.ArgUserDataFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tuserData, err = extractUserData(userData, filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar createImage godo.DropletCreateImage\n\n\timageStr, err := config.GetString(ns, doit.ArgImage)\n\tif i, err := strconv.Atoi(imageStr); err == nil {\n\t\tcreateImage = godo.DropletCreateImage{ID: i}\n\t} else {\n\t\tcreateImage = godo.DropletCreateImage{Slug: imageStr}\n\t}\n\n\twait, err := config.GetBool(ns, doit.ArgDropletWait)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tds := do.NewDropletsService(client)\n\n\tvar wg sync.WaitGroup\n\terrs := make(chan error)\n\tfor _, name := range args {\n\t\tdcr := &godo.DropletCreateRequest{\n\t\t\tName: name,\n\t\t\tRegion: region,\n\t\t\tSize: size,\n\t\t\tImage: createImage,\n\t\t\tBackups: backups,\n\t\t\tIPv6: ipv6,\n\t\t\tPrivateNetworking: privateNetworking,\n\t\t\tSSHKeys: sshKeys,\n\t\t\tUserData: userData,\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\td, err := ds.Create(dcr, wait)\n\t\t\tif err != nil {\n\t\t\t\terrs <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tdc := &displayer{\n\t\t\t\tns: ns,\n\t\t\t\tconfig: config,\n\t\t\t\titem: &droplet{droplets: do.Droplets{*d}},\n\t\t\t\tout: out,\n\t\t\t}\n\n\t\t\tdc.Display()\n\t\t}()\n\t}\n\n\twg.Wait()\n\tclose(errs)\n\n\tfor err := range errs {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc extractSSHKeys(keys []string) []godo.DropletCreateSSHKey {\n\tsshKeys := []godo.DropletCreateSSHKey{}\n\n\tfor _, rawKey := range keys {\n\t\trawKey = strings.TrimPrefix(rawKey, \"[\")\n\t\trawKey = strings.TrimSuffix(rawKey, \"]\")\n\t\tif i, err := strconv.Atoi(rawKey); err == nil {\n\t\t\tsshKeys = append(sshKeys, godo.DropletCreateSSHKey{ID: i})\n\t\t\tcontinue\n\t\t}\n\n\t\tsshKeys = append(sshKeys, godo.DropletCreateSSHKey{Fingerprint: rawKey})\n\t}\n\n\treturn sshKeys\n}\n\nfunc extractUserData(userData, filename string) (string, error) {\n\tif userData == \"\" && filename != \"\" {\n\t\tdata, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tuserData = string(data)\n\t}\n\n\treturn userData, nil\n}\n\n\/\/ RunDropletDelete destroy a droplet by id.\nfunc RunDropletDelete(ns string, config doit.Config, out io.Writer, args []string) error {\n\tclient := config.GetGodoClient()\n\tds := do.NewDropletsService(client)\n\n\tif len(args) < 1 {\n\t\treturn doit.NewMissingArgsErr(ns)\n\t}\n\n\tfor _, idStr := range args {\n\t\tid, err := strconv.Atoi(idStr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = ds.Delete(id)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Printf(\"deleted droplet %d\\n\", id)\n\t}\n\n\treturn nil\n}\n\n\/\/ RunDropletGet returns a droplet.\nfunc RunDropletGet(ns string, config doit.Config, out io.Writer, args []string) error {\n\tid, err := getDropletIDArg(ns, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient := config.GetGodoClient()\n\tds := do.NewDropletsService(client)\n\n\td, err := ds.Get(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdc := &displayer{\n\t\tns: ns,\n\t\tconfig: config,\n\t\titem: &droplet{droplets: do.Droplets{*d}},\n\t\tout: out,\n\t}\n\n\treturn dc.Display()\n}\n\n\/\/ RunDropletKernels returns a list of available kernels for a droplet.\nfunc RunDropletKernels(ns string, config doit.Config, out io.Writer, args []string) error {\n\tclient := config.GetGodoClient()\n\tds := do.NewDropletsService(client)\n\tid, err := getDropletIDArg(ns, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlist, err := ds.Kernels(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdc := &displayer{\n\t\tns: ns,\n\t\tconfig: config,\n\t\titem: &kernel{kernels: list},\n\t\tout: out,\n\t}\n\n\treturn dc.Display()\n}\n\n\/\/ RunDropletList returns a list of droplets.\nfunc RunDropletList(ns string, config doit.Config, out io.Writer, args []string) error {\n\tclient := config.GetGodoClient()\n\tds := do.NewDropletsService(client)\n\n\tregion, err := config.GetString(ns, doit.ArgRegionSlug)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmatches := []glob.Glob{}\n\tfor _, globStr := range args {\n\t\tg, err := glob.Compile(globStr)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unknown glob %q\", globStr)\n\t\t}\n\n\t\tmatches = append(matches, g)\n\t}\n\n\tvar matchedList do.Droplets\n\n\tlist, err := ds.List()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, droplet := range list {\n\t\tvar skip = true\n\t\tif len(matches) == 0 {\n\t\t\tskip = false\n\t\t} else {\n\t\t\tfor _, m := range matches {\n\t\t\t\tif m.Match(droplet.Name) {\n\t\t\t\t\tskip = false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif !skip && region != \"\" {\n\t\t\tif region != droplet.Region.Slug {\n\t\t\t\tskip = true\n\t\t\t}\n\t\t}\n\n\t\tif !skip {\n\t\t\tmatchedList = append(matchedList, droplet)\n\t\t}\n\t}\n\n\tdc := &displayer{\n\t\tns: ns,\n\t\tconfig: config,\n\t\titem: &droplet{droplets: matchedList},\n\t\tout: out,\n\t}\n\n\treturn dc.Display()\n}\n\n\/\/ RunDropletNeighbors returns a list of droplet neighbors.\nfunc RunDropletNeighbors(ns string, config doit.Config, out io.Writer, args []string) error {\n\tclient := config.GetGodoClient()\n\tds := do.NewDropletsService(client)\n\n\tid, err := getDropletIDArg(ns, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlist, err := ds.Neighbors(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdc := &displayer{\n\t\tns: ns,\n\t\tconfig: config,\n\t\titem: &droplet{droplets: list},\n\t\tout: out,\n\t}\n\n\treturn dc.Display()\n}\n\n\/\/ RunDropletSnapshots returns a list of available kernels for a droplet.\nfunc RunDropletSnapshots(ns string, config doit.Config, out io.Writer, args []string) error {\n\tclient := config.GetGodoClient()\n\tds := do.NewDropletsService(client)\n\tid, err := getDropletIDArg(ns, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlist, err := ds.Snapshots(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdc := &displayer{\n\t\tns: ns,\n\t\tconfig: config,\n\t\titem: &image{images: list},\n\t\tout: out,\n\t}\n\n\treturn dc.Display()\n}\n\nfunc getDropletIDArg(ns string, args []string) (int, error) {\n\tif len(args) != 1 {\n\t\treturn 0, doit.NewMissingArgsErr(ns)\n\t}\n\n\treturn strconv.Atoi(args[0])\n}\n<|endoftext|>"} {"text":"package receiver\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"math\"\n\t\"strconv\"\n\t\"sync\/atomic\"\n\t\"unsafe\"\n\n\t\"github.com\/lomik\/carbon-clickhouse\/helper\/RowBinary\"\n\t\"github.com\/lomik\/carbon-clickhouse\/helper\/days1970\"\n)\n\n\/\/ https:\/\/github.com\/golang\/go\/issues\/2632#issuecomment-66061057\nfunc unsafeString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}\n\nfunc HasDoubleDot(p []byte) bool {\n\tfor i := 1; i < len(p); i += 2 {\n\t\tif p[i] == '.' {\n\t\t\tif p[i-1] == '.' {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif i+1 < len(p) && p[i+1] == '.' {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\nfunc RemoveDoubleDot(p []byte) []byte {\n\tif !HasDoubleDot(p) {\n\t\treturn p\n\t}\n\n\tshift := 0\n\tfor i := 1; i < len(p); i++ {\n\t\tif p[i] == '.' && p[i-1-shift] == '.' {\n\t\t\tshift++\n\t\t} else if shift > 0 {\n\t\t\tp[i-shift] = p[i]\n\t\t}\n\t}\n\n\treturn p[:len(p)-shift]\n}\n\nfunc PlainParseLine(p []byte) ([]byte, float64, uint32, error) {\n\ti1 := bytes.IndexByte(p, ' ')\n\tif i1 < 1 {\n\t\treturn nil, 0, 0, fmt.Errorf(\"bad message: %#v\", string(p))\n\t}\n\n\ti2 := bytes.IndexByte(p[i1+1:], ' ')\n\tif i2 < 1 {\n\t\treturn nil, 0, 0, fmt.Errorf(\"bad message: %#v\", string(p))\n\t}\n\ti2 += i1 + 1\n\n\ti3 := len(p)\n\tif p[i3-1] == '\\n' {\n\t\ti3--\n\t}\n\n\tvalue, err := strconv.ParseFloat(unsafeString(p[i1+1:i2]), 64)\n\tif err != nil || math.IsNaN(value) {\n\t\treturn nil, 0, 0, fmt.Errorf(\"bad message: %#v\", string(p))\n\t}\n\n\ttsf, err := strconv.ParseFloat(unsafeString(p[i2+1:i3]), 64)\n\tif err != nil || math.IsNaN(tsf) {\n\t\treturn nil, 0, 0, fmt.Errorf(\"bad message: %#v\", string(p))\n\t}\n\n\treturn RemoveDoubleDot(p[:i1]), value, uint32(tsf), nil\n}\n\nfunc PlainParseBuffer(exit chan struct{}, b *Buffer, out chan *RowBinary.WriteBuffer, days *days1970.Days, metricsReceived *uint32, errors *uint32) {\n\toffset := 0\n\tmetricCount := uint32(0)\n\terrorCount := uint32(0)\n\n\tversion := make([]byte, 4)\n\tbinary.LittleEndian.PutUint32(version, b.Time)\n\n\twb := RowBinary.GetWriteBuffer()\n\nMainLoop:\n\tfor offset < b.Used {\n\t\tlineEnd := bytes.IndexByte(b.Body[offset:b.Used], '\\n')\n\t\tif lineEnd < 0 {\n\t\t\terrorCount++\n\t\t\t\/\/ @TODO: log unfinished line\n\t\t\tbreak MainLoop\n\t\t} else if lineEnd == 0 {\n\t\t\t\/\/ skip empty line\n\t\t\toffset++\n\t\t\tcontinue MainLoop\n\t\t}\n\n\t\tname, value, timestamp, err := PlainParseLine(b.Body[offset : offset+lineEnd+1])\n\t\toffset += lineEnd + 1\n\n\t\t\/\/ @TODO: check required buffer size, get new\n\n\t\tif err != nil {\n\t\t\terrorCount++\n\t\t\t\/\/ @TODO: log error\n\t\t\tcontinue MainLoop\n\t\t}\n\n\t\t\/\/ write result to buffer for clickhouse\n\t\twb.WriteBytes(name)\n\t\twb.WriteFloat64(value)\n\t\twb.WriteUint32(timestamp)\n\t\twb.WriteUint16(days.TimestampWithNow(timestamp, b.Time))\n\t\twb.Write(version)\n\t\tmetricCount++\n\t}\n\n\tif metricCount > 0 {\n\t\tatomic.AddUint32(metricsReceived, metricCount)\n\t}\n\tif errorCount > 0 {\n\t\tatomic.AddUint32(errors, errorCount)\n\t}\n\n\tif wb.Empty() {\n\t\twb.Release()\n\t\treturn\n\t}\n\n\tselect {\n\tcase out <- wb:\n\t\t\/\/ pass\n\tcase <-exit:\n\t\treturn\n\t}\n}\n\nfunc PlainParser(exit chan struct{}, in chan *Buffer, out chan *RowBinary.WriteBuffer, metricsReceived *uint32, errors *uint32) {\n\tdays := &days1970.Days{}\n\n\tfor {\n\t\tselect {\n\t\tcase <-exit:\n\t\t\treturn\n\t\tcase b := <-in:\n\t\t\tPlainParseBuffer(exit, b, out, days, metricsReceived, errors)\n\t\t\tb.Release()\n\t\t}\n\t}\n}\nLet PlainParseLine ignore \\rpackage receiver\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"math\"\n\t\"strconv\"\n\t\"sync\/atomic\"\n\t\"unsafe\"\n\n\t\"github.com\/lomik\/carbon-clickhouse\/helper\/RowBinary\"\n\t\"github.com\/lomik\/carbon-clickhouse\/helper\/days1970\"\n)\n\n\/\/ https:\/\/github.com\/golang\/go\/issues\/2632#issuecomment-66061057\nfunc unsafeString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}\n\nfunc HasDoubleDot(p []byte) bool {\n\tfor i := 1; i < len(p); i += 2 {\n\t\tif p[i] == '.' {\n\t\t\tif p[i-1] == '.' {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif i+1 < len(p) && p[i+1] == '.' {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\nfunc RemoveDoubleDot(p []byte) []byte {\n\tif !HasDoubleDot(p) {\n\t\treturn p\n\t}\n\n\tshift := 0\n\tfor i := 1; i < len(p); i++ {\n\t\tif p[i] == '.' && p[i-1-shift] == '.' {\n\t\t\tshift++\n\t\t} else if shift > 0 {\n\t\t\tp[i-shift] = p[i]\n\t\t}\n\t}\n\n\treturn p[:len(p)-shift]\n}\n\nfunc PlainParseLine(p []byte) ([]byte, float64, uint32, error) {\n\ti1 := bytes.IndexByte(p, ' ')\n\tif i1 < 1 {\n\t\treturn nil, 0, 0, fmt.Errorf(\"bad message: %#v\", string(p))\n\t}\n\n\ti2 := bytes.IndexByte(p[i1+1:], ' ')\n\tif i2 < 1 {\n\t\treturn nil, 0, 0, fmt.Errorf(\"bad message: %#v\", string(p))\n\t}\n\ti2 += i1 + 1\n\n\ti3 := len(p)\n\tif p[i3-1] == '\\n' {\n\t\ti3--\n\t}\n\tif p[i3-1] == '\\r' {\n\t\ti3--\n\t}\n\n\tvalue, err := strconv.ParseFloat(unsafeString(p[i1+1:i2]), 64)\n\tif err != nil || math.IsNaN(value) {\n\t\treturn nil, 0, 0, fmt.Errorf(\"bad message: %#v\", string(p))\n\t}\n\n\ttsf, err := strconv.ParseFloat(unsafeString(p[i2+1:i3]), 64)\n\tif err != nil || math.IsNaN(tsf) {\n\t\treturn nil, 0, 0, fmt.Errorf(\"bad message: %#v\", string(p))\n\t}\n\n\treturn RemoveDoubleDot(p[:i1]), value, uint32(tsf), nil\n}\n\nfunc PlainParseBuffer(exit chan struct{}, b *Buffer, out chan *RowBinary.WriteBuffer, days *days1970.Days, metricsReceived *uint32, errors *uint32) {\n\toffset := 0\n\tmetricCount := uint32(0)\n\terrorCount := uint32(0)\n\n\tversion := make([]byte, 4)\n\tbinary.LittleEndian.PutUint32(version, b.Time)\n\n\twb := RowBinary.GetWriteBuffer()\n\nMainLoop:\n\tfor offset < b.Used {\n\t\tlineEnd := bytes.IndexByte(b.Body[offset:b.Used], '\\n')\n\t\tif lineEnd < 0 {\n\t\t\terrorCount++\n\t\t\t\/\/ @TODO: log unfinished line\n\t\t\tbreak MainLoop\n\t\t} else if lineEnd == 0 {\n\t\t\t\/\/ skip empty line\n\t\t\toffset++\n\t\t\tcontinue MainLoop\n\t\t}\n\n\t\tname, value, timestamp, err := PlainParseLine(b.Body[offset : offset+lineEnd+1])\n\t\toffset += lineEnd + 1\n\n\t\t\/\/ @TODO: check required buffer size, get new\n\n\t\tif err != nil {\n\t\t\terrorCount++\n\t\t\t\/\/ @TODO: log error\n\t\t\tcontinue MainLoop\n\t\t}\n\n\t\t\/\/ write result to buffer for clickhouse\n\t\twb.WriteBytes(name)\n\t\twb.WriteFloat64(value)\n\t\twb.WriteUint32(timestamp)\n\t\twb.WriteUint16(days.TimestampWithNow(timestamp, b.Time))\n\t\twb.Write(version)\n\t\tmetricCount++\n\t}\n\n\tif metricCount > 0 {\n\t\tatomic.AddUint32(metricsReceived, metricCount)\n\t}\n\tif errorCount > 0 {\n\t\tatomic.AddUint32(errors, errorCount)\n\t}\n\n\tif wb.Empty() {\n\t\twb.Release()\n\t\treturn\n\t}\n\n\tselect {\n\tcase out <- wb:\n\t\t\/\/ pass\n\tcase <-exit:\n\t\treturn\n\t}\n}\n\nfunc PlainParser(exit chan struct{}, in chan *Buffer, out chan *RowBinary.WriteBuffer, metricsReceived *uint32, errors *uint32) {\n\tdays := &days1970.Days{}\n\n\tfor {\n\t\tselect {\n\t\tcase <-exit:\n\t\t\treturn\n\t\tcase b := <-in:\n\t\t\tPlainParseBuffer(exit, b, out, days, metricsReceived, errors)\n\t\t\tb.Release()\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\n\/\/ The resource package provides the functionality of the \"resources\"\n\/\/ feature in Juju.\npackage resource\n\nimport (\n\t\"github.com\/juju\/errors\"\n\t\"gopkg.in\/juju\/charm.v6-unstable\/resource\"\n)\n\n\/\/ TODO(ericsnow) Move the this file or something similar to the charm repo?\n\n\/\/ NoRevision indicates that the spec does not have a revision specified.\nconst NoRevision = \"\"\n\n\/\/ Spec describes one resource that a service uses.\ntype Spec interface {\n\t\/\/ Definition is the basic info about the resource.\n\tDefinition() resource.Info\n\n\t\/\/ Origin identifies where the resource should come from.\n\tOrigin() Origin\n\n\t\/\/ Revision is the desired revision of the resource. It returns \"\"\n\t\/\/ for origins that do not support revisions.\n\tRevision() string\n}\n\n\/\/ NewSpec returns a new Spec for the given info.\nfunc NewSpec(info resource.Info, origin Origin, revision string) (Spec, error) {\n\tswitch origin {\n\tcase OriginUpload:\n\t\t\/\/ TODO(ericsnow) Fail if revision not NoRevision?\n\t\treturn &UploadSpec{info}, nil\n\tdefault:\n\t\treturn nil, errors.NotSupportedf(\"resource origin %q\", origin)\n\t}\n}\n\n\/\/ UploadSpec defines an *uploaded* resource that a service expects.\ntype UploadSpec struct {\n\tresource.Info\n}\n\n\/\/ Definition implements Spec.\nfunc (res UploadSpec) Definition() resource.Info {\n\treturn res.Info\n}\n\n\/\/ Origin implements Spec.\nfunc (res UploadSpec) Origin() Origin {\n\treturn OriginUpload\n}\n\n\/\/ Revision implements Spec.\nfunc (res UploadSpec) Revision() string {\n\treturn NoRevision\n}\nDo not export UploadSpec.\/\/ Copyright 2015 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\n\/\/ The resource package provides the functionality of the \"resources\"\n\/\/ feature in Juju.\npackage resource\n\nimport (\n\t\"github.com\/juju\/errors\"\n\t\"gopkg.in\/juju\/charm.v6-unstable\/resource\"\n)\n\n\/\/ TODO(ericsnow) Move the this file or something similar to the charm repo?\n\n\/\/ NoRevision indicates that the spec does not have a revision specified.\nconst NoRevision = \"\"\n\n\/\/ Spec describes one resource that a service uses.\ntype Spec interface {\n\t\/\/ Definition is the basic info about the resource.\n\tDefinition() resource.Info\n\n\t\/\/ Origin identifies where the resource should come from.\n\tOrigin() Origin\n\n\t\/\/ Revision is the desired revision of the resource. It returns \"\"\n\t\/\/ for origins that do not support revisions.\n\tRevision() string\n}\n\n\/\/ NewSpec returns a new Spec for the given info.\nfunc NewSpec(info resource.Info, origin Origin, revision string) (Spec, error) {\n\tswitch origin {\n\tcase OriginUpload:\n\t\t\/\/ TODO(ericsnow) Fail if revision not NoRevision?\n\t\treturn &uploadSpec{info}, nil\n\tdefault:\n\t\treturn nil, errors.NotSupportedf(\"resource origin %q\", origin)\n\t}\n}\n\n\/\/ uploadSpec defines an *uploaded* resource that a service expects.\ntype uploadSpec struct {\n\tresource.Info\n}\n\n\/\/ Definition implements Spec.\nfunc (res uploadSpec) Definition() resource.Info {\n\treturn res.Info\n}\n\n\/\/ Origin implements Spec.\nfunc (res uploadSpec) Origin() Origin {\n\treturn OriginUpload\n}\n\n\/\/ Revision implements Spec.\nfunc (res uploadSpec) Revision() string {\n\treturn NoRevision\n}\n<|endoftext|>"} {"text":"package main_test\n\nimport (\n\t\"io\/ioutil\"\n\t\"launchpad.net\/gnuflag\"\n\t. \"launchpad.net\/gocheck\"\n\t\"launchpad.net\/juju\/go\/cmd\"\n\tmain \"launchpad.net\/juju\/go\/cmd\/juju\"\n\t\"launchpad.net\/juju\/go\/environs\/dummy\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"reflect\"\n)\n\ntype cmdSuite struct {\n\thome string\n}\n\nvar _ = Suite(&cmdSuite{})\n\n\/\/ N.B. Barking is broken.\nvar config = `\ndefault:\n peckham\nenvironments:\n peckham:\n type: dummy\n zookeeper: false\n walthamstow:\n type: dummy\n zookeeper: false\n barking:\n type: dummy\n broken: true\n zookeeper: false\n`\n\nfunc (s *cmdSuite) SetUpTest(c *C) {\n\t\/\/ Arrange so that the \"home\" directory points\n\t\/\/ to a temporary directory containing the config file.\n\ts.home = os.Getenv(\"HOME\")\n\tdir := c.MkDir()\n\tos.Setenv(\"HOME\", dir)\n\terr := os.Mkdir(filepath.Join(dir, \".juju\"), 0777)\n\tc.Assert(err, IsNil)\n\terr = ioutil.WriteFile(filepath.Join(dir, \".juju\", \"environments.yaml\"), []byte(config), 0666)\n\tc.Assert(err, IsNil)\n}\n\nfunc (s *cmdSuite) TearDownTest(c *C) {\n\tos.Setenv(\"HOME\", s.home)\n\n\tdummy.Reset(nil)\n}\n\nfunc newFlagSet() *gnuflag.FlagSet {\n\treturn gnuflag.NewFlagSet(\"\", gnuflag.ContinueOnError)\n}\n\n\/\/ newCommand makes a new Command of the same\n\/\/ type as the old one.\nfunc newCommand(old cmd.Command) cmd.Command {\n\tv := reflect.New(reflect.TypeOf(old).Elem())\n\treturn v.Interface().(cmd.Command)\n}\n\n\/\/ testInit checks that a command initialises correctly\n\/\/ with the given set of arguments. It copies the\n\/\/ command so that we can run several tests on\n\/\/ the same command type, and returns the newly\n\/\/ copied and parsed command.\nfunc testInit(c *C, com cmd.Command, args []string, errPat string) cmd.Command {\n\tcom = newCommand(com)\n\terr := com.Init(newFlagSet(), args)\n\tif errPat != \"\" {\n\t\tc.Assert(err, ErrorMatches, errPat)\n\t} else {\n\t\tc.Assert(err, IsNil)\n\t}\n\treturn com\n}\n\n\/\/ assertConnName asserts that the Command is using\n\/\/ the given environment name.\n\/\/ Since every command has a different type,\n\/\/ we use reflection to look at the value of the\n\/\/ Conn field in the value.\nfunc assertConnName(c *C, com cmd.Command, name string) {\n\tv := reflect.ValueOf(com).Elem().FieldByName(\"EnvName\")\n\tc.Assert(v.IsValid(), Equals, true)\n\tc.Assert(v.Interface(), Equals, name)\n}\n\n\/\/ All members of EnvironmentInitTests are tested for the -environment and -e\n\/\/ flags, and that extra arguments will cause parsing to fail.\nvar EnvironmentInitTests = []cmd.Command{\n\t&main.BootstrapCommand{},\n\t&main.DestroyCommand{},\n}\n\n\/\/ TestEnvironmentInit tests that all commands which accept\n\/\/ the --environment variable initialise their\n\/\/ environment name correctly.\nfunc (*cmdSuite) TestEnvironmentInit(c *C) {\n\tfor i, command := range EnvironmentInitTests {\n\t\tc.Logf(\"test %d\", i)\n\t\tcom := testInit(c, command, nil, \"\")\n\t\tassertConnName(c, com, \"\")\n\n\t\tcom = testInit(c, command, []string{\"-e\", \"walthamstow\"}, \"\")\n\t\tassertConnName(c, com, \"walthamstow\")\n\n\t\tcom = testInit(c, command, []string{\"--environment\", \"walthamstow\"}, \"\")\n\t\tassertConnName(c, com, \"walthamstow\")\n\n\t\ttestInit(c, command, []string{\"hotdog\"}, \"unrecognised args.*\")\n\t}\n}\n\nvar CommandsTests = []struct {\n\tcmd cmd.Command\n\targs []string \/\/ Arguments to give to command.\n\tops []dummy.Operation \/\/ Expected operations performed by command.\n\tinitErr string \/\/ Expected error from Init.\n\trunErr string \/\/ Expected error from Run.\n}{\n\t{\n\t\tcmd: &main.BootstrapCommand{},\n\t\targs: []string{\"hotdog\"},\n\t\tinitErr: `unrecognised args: \\[hotdog\\]`,\n\t}, {\n\t\tcmd: &main.BootstrapCommand{},\n\t\tops: envOps(\"peckham\", dummy.OpBootstrap),\n\t}, {\n\t\tcmd: &main.BootstrapCommand{},\n\t\targs: []string{\"-e\", \"barking\"},\n\t\tops: envOps(\"barking\", dummy.OpBootstrap),\n\t\trunErr: \"broken environment\",\n\t}, {\n\t\tcmd: &main.DestroyCommand{},\n\t\tops: envOps(\"peckham\", dummy.OpDestroy),\n\t}, {\n\t\tcmd: &main.DestroyCommand{},\n\t\targs: []string{\"-e\", \"barking\"},\n\t\tops: envOps(\"barking\", dummy.OpDestroy),\n\t\trunErr: \"broken environment\",\n\t},\n}\n\nfunc (*cmdSuite) TestCommands(c *C) {\n\tfor i, t := range CommandsTests {\n\t\tc.Logf(\"test %d: %T\", i, t.cmd)\n\n\t\t\/\/ Gather operations as they happen.\n\t\topc := make(chan dummy.Operation)\n\t\tdone := make(chan bool)\n\t\tvar ops []dummy.Operation\n\t\tgo func() {\n\t\t\tfor op := range opc {\n\t\t\t\tops = append(ops, op)\n\t\t\t}\n\t\t\tdone <- true\n\t\t}()\n\t\tdummy.Reset(opc)\n\n\t\tcom := testInit(c, t.cmd, t.args, t.initErr)\n\t\tif t.initErr != \"\" {\n\t\t\t\/\/ It's already passed the test in testParse.\n\t\t\tcontinue\n\t\t}\n\n\t\terr := com.Run(cmd.DefaultContext())\n\t\t\/\/ signal that we're done with this listener channel.\n\t\tdummy.Reset(nil)\n\t\t<-done\n\t\tif t.runErr != \"\" {\n\t\t\tc.Assert(err, ErrorMatches, t.runErr)\n\t\t\tcontinue\n\t\t}\n\t\tc.Assert(err, IsNil)\n\t\tc.Check(ops, DeepEquals, t.ops)\n\t}\n}\n\n\/\/ envOps returns a slice of expected operations on a given\n\/\/ environment name.\nfunc envOps(name string, events ...dummy.OperationKind) []dummy.Operation {\n\tops := make([]dummy.Operation, len(events))\n\tfor i, e := range events {\n\t\tops[i] = dummy.Operation{\n\t\t\tEnvironName: name,\n\t\t\tKind: e,\n\t\t}\n\t}\n\treturn ops\n}\ncmd\/juju: use command factory in testpackage main_test\n\nimport (\n\t\"io\/ioutil\"\n\t\"launchpad.net\/gnuflag\"\n\t. \"launchpad.net\/gocheck\"\n\t\"launchpad.net\/juju\/go\/cmd\"\n\tmain \"launchpad.net\/juju\/go\/cmd\/juju\"\n\t\"launchpad.net\/juju\/go\/environs\/dummy\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"reflect\"\n)\n\ntype cmdSuite struct {\n\thome string\n}\n\nvar _ = Suite(&cmdSuite{})\n\n\/\/ N.B. Barking is broken.\nvar config = `\ndefault:\n peckham\nenvironments:\n peckham:\n type: dummy\n zookeeper: false\n walthamstow:\n type: dummy\n zookeeper: false\n barking:\n type: dummy\n broken: true\n zookeeper: false\n`\n\nfunc (s *cmdSuite) SetUpTest(c *C) {\n\t\/\/ Arrange so that the \"home\" directory points\n\t\/\/ to a temporary directory containing the config file.\n\ts.home = os.Getenv(\"HOME\")\n\tdir := c.MkDir()\n\tos.Setenv(\"HOME\", dir)\n\terr := os.Mkdir(filepath.Join(dir, \".juju\"), 0777)\n\tc.Assert(err, IsNil)\n\terr = ioutil.WriteFile(filepath.Join(dir, \".juju\", \"environments.yaml\"), []byte(config), 0666)\n\tc.Assert(err, IsNil)\n}\n\nfunc (s *cmdSuite) TearDownTest(c *C) {\n\tos.Setenv(\"HOME\", s.home)\n\n\tdummy.Reset(nil)\n}\n\nfunc newFlagSet() *gnuflag.FlagSet {\n\treturn gnuflag.NewFlagSet(\"\", gnuflag.ContinueOnError)\n}\n\n\/\/ testInit checks that a command initialises correctly\n\/\/ with the given set of arguments.\nfunc testInit(c *C, com cmd.Command, args []string, errPat string){\n\terr := com.Init(newFlagSet(), args)\n\tif errPat != \"\" {\n\t\tc.Assert(err, ErrorMatches, errPat)\n\t} else {\n\t\tc.Assert(err, IsNil)\n\t}\n}\n\n\/\/ assertConnName asserts that the Command is using\n\/\/ the given environment name.\n\/\/ Since every command has a different type,\n\/\/ we use reflection to look at the value of the\n\/\/ Conn field in the value.\nfunc assertConnName(c *C, com cmd.Command, name string) {\n\tv := reflect.ValueOf(com).Elem().FieldByName(\"EnvName\")\n\tc.Assert(v.IsValid(), Equals, true)\n\tc.Assert(v.Interface(), Equals, name)\n}\n\n\/\/ All members of EnvironmentInitTests are tested for the -environment and -e\n\/\/ flags, and that extra arguments will cause parsing to fail.\nvar EnvironmentInitTests = []func()cmd.Command{\n\tfunc() cmd.Command{return &main.BootstrapCommand{}},\n\tfunc() cmd.Command{return &main.DestroyCommand{}},\n}\n\n\/\/ TestEnvironmentInit tests that all commands which accept\n\/\/ the --environment variable initialise their\n\/\/ environment name correctly.\nfunc (*cmdSuite) TestEnvironmentInit(c *C) {\n\tfor i, cmdFunc := range EnvironmentInitTests {\n\t\tc.Logf(\"test %d\", i)\n\t\tcom := cmdFunc()\n\t\ttestInit(c, com, nil, \"\")\n\t\tassertConnName(c, com, \"\")\n\n\t\tcom = cmdFunc()\n\t\ttestInit(c, com, []string{\"-e\", \"walthamstow\"}, \"\")\n\t\tassertConnName(c, com, \"walthamstow\")\n\n\t\tcom = cmdFunc()\n\t\ttestInit(c, com, []string{\"--environment\", \"walthamstow\"}, \"\")\n\t\tassertConnName(c, com, \"walthamstow\")\n\n\t\tcom = cmdFunc()\n\t\ttestInit(c, com, []string{\"hotdog\"}, \"unrecognised args.*\")\n\t}\n}\n\nvar CommandsTests = []struct {\n\tcmd cmd.Command\n\targs []string \/\/ Arguments to give to command.\n\tops []dummy.Operation \/\/ Expected operations performed by command.\n\tinitErr string \/\/ Expected error from Init.\n\trunErr string \/\/ Expected error from Run.\n}{\n\t{\n\t\tcmd: &main.BootstrapCommand{},\n\t\targs: []string{\"hotdog\"},\n\t\tinitErr: `unrecognised args: \\[hotdog\\]`,\n\t}, {\n\t\tcmd: &main.BootstrapCommand{},\n\t\tops: envOps(\"peckham\", dummy.OpBootstrap),\n\t}, {\n\t\tcmd: &main.BootstrapCommand{},\n\t\targs: []string{\"-e\", \"barking\"},\n\t\tops: envOps(\"barking\", dummy.OpBootstrap),\n\t\trunErr: \"broken environment\",\n\t}, {\n\t\tcmd: &main.DestroyCommand{},\n\t\tops: envOps(\"peckham\", dummy.OpDestroy),\n\t}, {\n\t\tcmd: &main.DestroyCommand{},\n\t\targs: []string{\"-e\", \"barking\"},\n\t\tops: envOps(\"barking\", dummy.OpDestroy),\n\t\trunErr: \"broken environment\",\n\t},\n}\n\nfunc (*cmdSuite) TestCommands(c *C) {\n\tfor i, t := range CommandsTests {\n\t\tc.Logf(\"test %d: %T\", i, t.cmd)\n\n\t\t\/\/ Gather operations as they happen.\n\t\topc := make(chan dummy.Operation)\n\t\tdone := make(chan bool)\n\t\tvar ops []dummy.Operation\n\t\tgo func() {\n\t\t\tfor op := range opc {\n\t\t\t\tops = append(ops, op)\n\t\t\t}\n\t\t\tdone <- true\n\t\t}()\n\t\tdummy.Reset(opc)\n\n\t\ttestInit(c, t.cmd, t.args, t.initErr)\n\t\tif t.initErr != \"\" {\n\t\t\t\/\/ It's already passed the test in testParse.\n\t\t\tcontinue\n\t\t}\n\n\t\terr := t.cmd.Run(cmd.DefaultContext())\n\t\t\/\/ signal that we're done with this listener channel.\n\t\tdummy.Reset(nil)\n\t\t<-done\n\t\tif t.runErr != \"\" {\n\t\t\tc.Assert(err, ErrorMatches, t.runErr)\n\t\t\tcontinue\n\t\t}\n\t\tc.Assert(err, IsNil)\n\t\tc.Check(ops, DeepEquals, t.ops)\n\t}\n}\n\n\/\/ envOps returns a slice of expected operations on a given\n\/\/ environment name.\nfunc envOps(name string, events ...dummy.OperationKind) []dummy.Operation {\n\tops := make([]dummy.Operation, len(events))\n\tfor i, e := range events {\n\t\tops[i] = dummy.Operation{\n\t\t\tEnvironName: name,\n\t\t\tKind: e,\n\t\t}\n\t}\n\treturn ops\n}\n<|endoftext|>"} {"text":"package cmd\n\n\/**\n*\n* Author: Amir Mofasser \n*\thttps:\/\/github.com\/amimof\n*\n*\/\n\nimport (\n\t\"github.com\/amimof\/lazycopy\/fileutils\"\n\t\"github.com\/amimof\/loglevel-go\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"regexp\"\n\t\"flag\"\n\t\"path\"\n\t\"fmt\"\n\t\"strings\"\n\t\"bufio\"\n)\n\nvar (\n\tprog string\n\tsource string\n\tmroot string\n\tsroot string\n\tunit string\n\toverwrite bool\n\tconfirm bool\n\tlevel int\n\tverify bool\n)\n\ntype Movie struct {\n\ttitle string\n\tyear string\n\tregexp string\n\tfile os.FileInfo\n}\n\ntype Serie struct {\n\ttitle string\n\tseason string\n\tepisode string\n\tregexp string\n\tfile os.FileInfo\n}\n\n\/\/ List of file extensions. Otherwise we might get wierd matches when files contain numbers, such as log files.\n\/\/ var extensions []string = []string{\n\/\/ \t\"mkv\",\"MKV\",\"mp4\",\"MP4\",\"m4p\",\"M4P\",\"m4v\",\"M4V\",\"mpg\",\"MPG\",\"mpeg\",\"MPEG\",\"mp2\",\"MP2\",\"mpe\",\"MPE\",\"mpv\",\"MPV\",\"3gp\",\"3GP\",\"nsv\",\"NSV\",\"f4v\",\"F4V\",\"f4p\",\"F4P\",\"f4a\",\"F4A\",\"f4b\",\"F4P\",\"vob\",\"VOB\",\"avi\",\"AVI\",\"mov\",\"MOV\",\"wmv\",\"WMV\",\"asd\",\"ASD\",\"flv\",\"FLV\",\"ogv\",\"OGV\",\"ogg\",\"OGG\",\"qt\",\"QT\",\"yuv\",\"YUV\",\"rm\",\"RM\",\"rmvb\",\"RMVB\",\n\/\/ }\nvar extensions string = \"\\\\.(mkv|MKV|mp4|MP4|m4p|M4P|m4v|M4V|mpg|MPG|mpeg|MPEG|mp2|MP2|mpe|MPE|mpv|MPV|3gp|3GP|nsv|NSV|f4v|F4V|f4p|F4P|f4a|F4A|f4b|F4P|vob|VOB|avi|AVI|mov|MOV|wmv|WMV|asd|ASD|flv|FLV|ogv|OGV|ogg|OGG|qt|QT|yuv|YUV|rm|RM|rmvb|RMVB)\"\n\/\/ Expressions to use when evaluating series\nvar spattern []string = []string{\"(.*?)S(\\\\d{1,2})E(\\\\d{2})(.*)\",\n\t\"(.*?)s(\\\\d{1,2})e(\\\\d{2})(.*)\",\n\t\"(.*?)\\\\[?(\\\\d{1,2})x(\\\\d{2})\\\\]?(.*)\",\n\t\"(.*?)Season.?(\\\\d{1,2}).*?Episode.?(\\\\d{1,2})(.*)\",\n}\n\/\/ Expressions to use when evaluating movies\nvar mpattern []string = []string{\"(.*?)\\\\((17[0-9][0-9]|180[0-9]|181[0-9]|18[2-9]\\\\d|19\\\\d\\\\d|2\\\\d{3}|30[0-3]\\\\d|304[0-8])\\\\)(.*)\",\n\t\"(.*?)\\\\[(17[0-9][0-9]|180[0-9]|181[0-9]|18[2-9]\\\\d|19\\\\d\\\\d|2\\\\d{3}|30[0-3]\\\\d|304[0-8])\\\\](.*)\",\n\t\"(.*?)\\\\{(17[0-9][0-9]|180[0-9]|181[0-9]|18[2-9]\\\\d|19\\\\d\\\\d|2\\\\d{3}|30[0-3]\\\\d|304[0-8])\\\\}(.*)\",\n\t\"(.*?)(17[0-9][0-9]|180[0-9]|181[0-9]|18[2-9]\\\\d|19\\\\d\\\\d|2\\\\d{3}|30[0-3]\\\\d|304[0-8])(.*)\",\n\t\"(.*?)(\\\\d{3,4}p)(.*)\",\n}\nvar log *loglevel.Logger = loglevel.New()\n\n\/\/ Returns true if path exists\nfunc exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/ Return true if path is a file\nfunc isFile(path string) bool {\n\tfile, err := os.Stat(path)\n\tif err == nil && file.IsDir() != true {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\n\/\/ Checks wether a given filename is considered to be a movie or series\n\/\/ based on the specified regexp patterns\nfunc isMovie(file os.FileInfo, pattern []string) (*Movie, error) {\n\tfor _, element := range pattern {\n\t\tif !file.IsDir() {\n\t\t\telement = element+extensions\n\t\t}\n\t\tr, err := regexp.Compile(element)\n\t\tif err == nil {\n\t\t\tmatch := r.MatchString(file.Name())\n\t\t\tif match {\n\t\t\t\tresult := r.FindStringSubmatch(file.Name())\n\t\t\t\tmovie := &Movie{strings.Replace(strings.Trim(strings.Trim(result[1], \".\"), \" \"), \".\", \" \", -1), strings.Trim(result[2], \".\"), element, file}\n\t\t\t\treturn movie, nil\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn nil, nil\n}\n\nfunc isSerie(file os.FileInfo, pattern []string) (*Serie, error) {\n\tfor _, element := range pattern {\n\t\tif !file.IsDir() {\n\t\t\telement = element+extensions\n\t\t}\n\t\tr, err := regexp.Compile(element)\n\t\tif err == nil {\n\t\t\tmatch := r.MatchString(file.Name())\n\t\t\tif match {\n\t\t\t\tresult := r.FindStringSubmatch(file.Name())\n\t\t\t\tserie := &Serie{strings.Replace(strings.Trim(strings.Trim(result[1], \".\"), \" \"), \".\", \" \", -1), strings.Trim(result[2], \".\"), strings.Trim(result[3], \".\"), element, file}\n\t\t\t\treturn serie, nil\t\t\t\t\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn nil, nil\n}\n\n\/\/ Converts file size from bytes to kb, mb, gb or tb\nfunc convertFileSize(size int64, unit string) float64 {\n\tvar result float64 = 0\n\tswitch unit {\n\t\tcase \"k\":\n\t\t\tresult = float64(size) \/ 1024\n\t\tcase \"m\":\n\t\t\tresult = (float64(size) \/ 1024) \/ 1024\n\t\tcase \"g\":\n\t\t\tresult = ((float64(size) \/ 1024) \/ 1024) \/ 1024\n\t\tcase \"t\":\n\t\t\tresult = (((float64(size) \/ 1024) \/ 1024) \/ 1024) \/ 1024\n\t\tdefault:\n\t\t\tresult = ((float64(size) \/ 1024) \/ 1024) \/ 1024\n\t}\n\treturn result\n}\n\n\/\/ Prompts the user for comfirmation by askig a yes\/no question. The question can be \n\/\/ provided as msg. (yes\/no) [default] will be appended to the msg. \nfunc confirmCopy(msg string, def bool) bool {\n\n\tvar response string = \"\"\n\tvar defaultChoice = \"no\"\n\n\tif def {\n\t\tdefaultChoice = \"yes\"\n\t}\n\tfmt.Printf(\"%s (yes\/no) [%s] \", msg, defaultChoice)\n\n\tscanner := bufio.NewScanner(os.Stdin)\n\tok := scanner .Scan()\n\n\tif ok {\n\t\t\n\t\tresponse = strings.ToLower(strings.Trim(scanner.Text(), \" \"))\n\n\t\tif response == \"y\" || response == \"yes\" {\n\t\t\treturn true\n\t\t} else if response == \"n\" || response == \"no\" {\n\t\t\treturn false\n\t\t} else if response == \"\" && def {\n\t\t\treturn true\n\t\t} else {\n\t\t\tfmt.Println(\"Please type (y)es or (n)o.\")\n\t\t\treturn confirmCopy(msg, def)\n\t\t}\n\n\t}\n\n\treturn false\n\n}\n\n\/\/ Main loop\nfunc Execute() {\n\n\t\/\/ Arguments and parameters to the program feels un-natural. Perhaps find a \n\t\/\/ library that can handle the arguments for us. Also, thinking of renaming 'series' to 'tv-shows'\n\n\t\/\/ Read arguments\n\tflag.StringVar(&source, \"S\", \".\", \"Directories in which to look for media delimited by comma\")\n\tflag.StringVar(&mroot, \"m\", \".\", \"Directory to your movies.\")\n\tflag.StringVar(&sroot, \"s\", \".\", \"Directory to your series.\")\n\tflag.BoolVar(&overwrite, \"o\", false, \"Overwrite existing files\/folders when copying\")\n\tflag.BoolVar(&confirm, \"c\", false, \"Prompt for confirm when overwriting existing files\/folders\")\n\tflag.IntVar(&level, \"l\", 0, \"Log level. 3=DEBUG, 2=WARN, 1=INFO, 0=ERROR. (default \\\"0\\\")\")\n\tflag.StringVar(&unit, \"u\", \"g\", \"String representation of unit to use when calculating file sizes. Choices are k, m, g and t\")\n\tflag.BoolVar(&verify, \"v\", false, \"Verify, do not actually copy.\")\n\tflag.Parse()\n\n\t\/\/ Sets the loglevel.\n\t\/\/ First we need to read from args and convert it to an int\n\tlog.Level.SetLevel(level)\n\tlog.Debug(\"Log level is\", level)\n\n\t\/\/ Check if movies root exists\n\tif !exists(mroot) {\n\t\tlog.Errorf(\"Does not exist '%s'\\n\", mroot)\n\t\tos.Exit(1)\n\t}\n\t\/\/ Check if series root exists\n\tif !exists(sroot) {\n\t\tlog.Errorf(\"Does not exist '%s'\\n\", sroot)\n\t\tos.Exit(1)\n\t}\n\t\/\/ Check if movies root is a directory\n\tif isFile(mroot) {\n\t\tlog.Errorf(\"Is not a directory '%s'\\n\", mroot)\n\t\tos.Exit(1)\n\t}\n\t\/\/ Check if series root is a directory\n\tif isFile(sroot) {\n\t\tlog.Errorf(\"Is not a directory '%s'\\n\", sroot)\n\t\tos.Exit(1)\n\t}\n\n\tsources := strings.Split(source, \",\")\n\n\tfor i, s := range sources {\n\t\tlog.Debugf(\"[%b] - %s \\n\", i, s)\n\t\n\t\t\/\/ Check if source exists\n\t\tif !exists(s) {\n\t\t\tlog.Error(\"Does not exist\", s)\n\t\t\tos.Exit(1)\n\t\t}\n\t\t\/\/ Check if movies root is a directory\n\t\tif isFile(s) {\n\t\t\tlog.Error(\"Is not a directory\", s)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t\/\/ Main\n\tvar mmatches []string\n\tvar smatches []string\n\tvar totalWritten int64\n\n\tlog.Debug(\"Overwrite is set to\", overwrite)\n\n\tfor i, src := range sources {\n\n\t\tf, err := ioutil.ReadDir(src)\n\t\tlog.Debugf(\"[%d] Source is '%s'\\n\", i, src)\n\t\tvar index int64 = 0\n\n\t\tfor j, file := range f {\n\t\t\tlog.Debugf(\"Checking '%s' \\n\", file.Name())\n\n\t\t\t\/\/ Check for movies\n\t\t\tmovie, errM := isMovie(file, mpattern)\n\t\t\tif movie != nil {\n\t\t\t\tif errM == nil {\n\t\t\t\t\tlog.Debugf(\"[%d] ==== MOVIE START ==== \\n\", j)\n\t\t\t\t\tlog.Debugf(\"[%d] Movie. Title: '%s', Year: '%s', Filename: '%s'\\n\", j, movie.title, movie.year, movie.file.Name())\n\t\t\t\t\tlog.Debugf(\"[%d] Movie matched regexp: '%s'\\n\", j, movie.regexp)\n\t\t\t\t\tmmatches = append(mmatches, file.Name())\n\t\t\t\t\tsrcf := path.Join(src, file.Name())\n\t\t\t\t\tif confirm == true {\n\t\t\t\t\t\toverwrite = confirmCopy(\"Copy '\"+file.Name()+\"'?\", true)\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Don't do anything if verify flag is true\n\t\t\t\t\tif !verify {\n\t\t\t\t\t\tvar written int64\n\t\t\t\t\t\tdstf := path.Join(mroot, file.Name())\n\t\t\t\t\t\tlog.Debugf(\"[%d] Dest is '%s' \\n\", j, dstf)\n\n\t\t\t\t\t\t\/\/ Start the copy\n\t\t\t\t\t\twritten, err = fileutils.Copy(srcf, path.Join(mroot, file.Name()), overwrite)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Errorf(\"[%b] Can't copy '%s'. %s \\n\", j, file.Name(), err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ttotalWritten = totalWritten + written\n\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tlog.Debugf(\"[%d] ==== MOVIE END ==== \\n\", j)\n\t\t\t}\n\n\t\t\t\/\/ Check for series\n\t\t\tserie, errS := isSerie(file, spattern)\n\t\t\tif serie != nil {\n\t\t\t\tif errS == nil {\n\t\t\t\t\tlog.Debugf(\"[%d] ==== SERIE START ====\\n\", j)\n\t\t\t\t\tlog.Debugf(\"[%d] Serie. Title: '%s', Season: '%s', Episode: '%s', Filename: '%s'\\n\", j, serie.title, serie.season, serie.episode, serie.file.Name())\n\t\t\t\t\tlog.Debugf(\"[%d] Serie matched regexp: '%s'\\n\", j, serie.regexp)\n\t\t\t\t\tvar written int64\n\t\t\t\t\tsmatches = append(smatches, file.Name())\n\t\t\t\t\tif confirm == true {\n\t\t\t\t\t\toverwrite = confirmCopy(\"Copy '\"+file.Name()+\"'?\", true)\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Don't do anything if verify flag is true\n\t\t\t\t\tif !verify {\n\t\t\t\t\t\tsrcf := path.Join(src, file.Name())\n\n\t\t\t\t\t\t\/\/ Stat source so that we can perserve permissions when creating the directories if necessary\n\t\t\t\t\t\ts, err := os.Stat(path.Dir(srcf))\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Errorf(\"[%d] Couldn't stat. '%s' \\n\", j, err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\/\/ Create serie folder and season folders resursively\n\t\t\t\t\t\tdstFolder := path.Join(sroot, serie.title, \"Season \"+serie.season)\n\t\t\t\t\t\tif !exists(dstFolder) {\n\t\t\t\t\t\t\tlog.Debugf(\"[%s] Dest does not exist, creating '%s' \\n\", j, dstFolder)\n\t\t\t\t\t\t\terr = os.MkdirAll(dstFolder, s.Mode())\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tlog.Errorf(\"[%d] Couldn't create '%s'. %s \\n\", j, dstFolder, err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\/\/ Start copying\n\t\t\t\t\t\tdstf := path.Join(dstFolder, file.Name())\n\t\t\t\t\t\tlog.Debugf(\"[%d] Dest is '%s' \\n\", j, dstf)\n\t\t\t\t\t\twritten, err = fileutils.Copy(srcf, dstf, overwrite)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Errorf(\"[%d] Can't copy '%s'. %s\", j, file.Name(), err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ttotalWritten = totalWritten + written\n\n\t\t\t\t\t}\t\n\t\t\t\t}\n\t\t\t\tlog.Debugf(\"[%d] ==== SERIE END ==== \\n\", j)\n\t\t\t}\n\t\t\tindex++\n\t\t}\n\t}\n\n\tfmt.Println(\"Movies matched:\", len(mmatches))\n\tfmt.Println(\"Series matched:\", len(smatches))\n\tfmt.Printf(\"Copied %.2f%s\\n\", convertFileSize(totalWritten, unit), unit)\n\n}\nUpdated loggingpackage cmd\n\n\/**\n*\n* Author: Amir Mofasser \n*\thttps:\/\/github.com\/amimof\n*\n*\/\n\nimport (\n\t\"github.com\/amimof\/lazycopy\/fileutils\"\n\t\"github.com\/amimof\/loglevel-go\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"regexp\"\n\t\"flag\"\n\t\"path\"\n\t\"fmt\"\n\t\"strings\"\n\t\"bufio\"\n)\n\nvar (\n\tprog string\n\tsource string\n\tmroot string\n\tsroot string\n\tunit string\n\toverwrite bool\n\tconfirm bool\n\tlevel int\n\tverify bool\n)\n\ntype Movie struct {\n\ttitle string\n\tyear string\n\tregexp string\n\tfile os.FileInfo\n}\n\ntype Serie struct {\n\ttitle string\n\tseason string\n\tepisode string\n\tregexp string\n\tfile os.FileInfo\n}\n\n\/\/ List of file extensions. Otherwise we might get wierd matches when files contain numbers, such as log files.\n\/\/ var extensions []string = []string{\n\/\/ \t\"mkv\",\"MKV\",\"mp4\",\"MP4\",\"m4p\",\"M4P\",\"m4v\",\"M4V\",\"mpg\",\"MPG\",\"mpeg\",\"MPEG\",\"mp2\",\"MP2\",\"mpe\",\"MPE\",\"mpv\",\"MPV\",\"3gp\",\"3GP\",\"nsv\",\"NSV\",\"f4v\",\"F4V\",\"f4p\",\"F4P\",\"f4a\",\"F4A\",\"f4b\",\"F4P\",\"vob\",\"VOB\",\"avi\",\"AVI\",\"mov\",\"MOV\",\"wmv\",\"WMV\",\"asd\",\"ASD\",\"flv\",\"FLV\",\"ogv\",\"OGV\",\"ogg\",\"OGG\",\"qt\",\"QT\",\"yuv\",\"YUV\",\"rm\",\"RM\",\"rmvb\",\"RMVB\",\n\/\/ }\nvar extensions string = \"\\\\.(mkv|MKV|mp4|MP4|m4p|M4P|m4v|M4V|mpg|MPG|mpeg|MPEG|mp2|MP2|mpe|MPE|mpv|MPV|3gp|3GP|nsv|NSV|f4v|F4V|f4p|F4P|f4a|F4A|f4b|F4P|vob|VOB|avi|AVI|mov|MOV|wmv|WMV|asd|ASD|flv|FLV|ogv|OGV|ogg|OGG|qt|QT|yuv|YUV|rm|RM|rmvb|RMVB)\"\n\/\/ Expressions to use when evaluating series\nvar spattern []string = []string{\"(.*?)S(\\\\d{1,2})E(\\\\d{2})(.*)\",\n\t\"(.*?)s(\\\\d{1,2})e(\\\\d{2})(.*)\",\n\t\"(.*?)\\\\[?(\\\\d{1,2})x(\\\\d{2})\\\\]?(.*)\",\n\t\"(.*?)Season.?(\\\\d{1,2}).*?Episode.?(\\\\d{1,2})(.*)\",\n}\n\/\/ Expressions to use when evaluating movies\nvar mpattern []string = []string{\"(.*?)\\\\((17[0-9][0-9]|180[0-9]|181[0-9]|18[2-9]\\\\d|19\\\\d\\\\d|2\\\\d{3}|30[0-3]\\\\d|304[0-8])\\\\)(.*)\",\n\t\"(.*?)\\\\[(17[0-9][0-9]|180[0-9]|181[0-9]|18[2-9]\\\\d|19\\\\d\\\\d|2\\\\d{3}|30[0-3]\\\\d|304[0-8])\\\\](.*)\",\n\t\"(.*?)\\\\{(17[0-9][0-9]|180[0-9]|181[0-9]|18[2-9]\\\\d|19\\\\d\\\\d|2\\\\d{3}|30[0-3]\\\\d|304[0-8])\\\\}(.*)\",\n\t\"(.*?)(17[0-9][0-9]|180[0-9]|181[0-9]|18[2-9]\\\\d|19\\\\d\\\\d|2\\\\d{3}|30[0-3]\\\\d|304[0-8])(.*)\",\n\t\"(.*?)(\\\\d{3,4}p)(.*)\",\n}\nvar log *loglevel.Logger = loglevel.New()\n\n\/\/ Returns true if path exists\nfunc exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/ Return true if path is a file\nfunc isFile(path string) bool {\n\tfile, err := os.Stat(path)\n\tif err == nil && file.IsDir() != true {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\n\/\/ Checks wether a given filename is considered to be a movie or series\n\/\/ based on the specified regexp patterns\nfunc isMovie(file os.FileInfo, pattern []string) (*Movie, error) {\n\tfor _, element := range pattern {\n\t\tif !file.IsDir() {\n\t\t\telement = element+extensions\n\t\t}\n\t\tr, err := regexp.Compile(element)\n\t\tif err == nil {\n\t\t\tmatch := r.MatchString(file.Name())\n\t\t\tif match {\n\t\t\t\tresult := r.FindStringSubmatch(file.Name())\n\t\t\t\tmovie := &Movie{strings.Replace(strings.Trim(strings.Trim(result[1], \".\"), \" \"), \".\", \" \", -1), strings.Trim(result[2], \".\"), element, file}\n\t\t\t\treturn movie, nil\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn nil, nil\n}\n\nfunc isSerie(file os.FileInfo, pattern []string) (*Serie, error) {\n\tfor _, element := range pattern {\n\t\tif !file.IsDir() {\n\t\t\telement = element+extensions\n\t\t}\n\t\tr, err := regexp.Compile(element)\n\t\tif err == nil {\n\t\t\tmatch := r.MatchString(file.Name())\n\t\t\tif match {\n\t\t\t\tresult := r.FindStringSubmatch(file.Name())\n\t\t\t\tserie := &Serie{strings.Replace(strings.Trim(strings.Trim(result[1], \".\"), \" \"), \".\", \" \", -1), strings.Trim(result[2], \".\"), strings.Trim(result[3], \".\"), element, file}\n\t\t\t\treturn serie, nil\t\t\t\t\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn nil, nil\n}\n\n\/\/ Converts file size from bytes to kb, mb, gb or tb\nfunc convertFileSize(size int64, unit string) float64 {\n\tvar result float64 = 0\n\tswitch unit {\n\t\tcase \"k\":\n\t\t\tresult = float64(size) \/ 1024\n\t\tcase \"m\":\n\t\t\tresult = (float64(size) \/ 1024) \/ 1024\n\t\tcase \"g\":\n\t\t\tresult = ((float64(size) \/ 1024) \/ 1024) \/ 1024\n\t\tcase \"t\":\n\t\t\tresult = (((float64(size) \/ 1024) \/ 1024) \/ 1024) \/ 1024\n\t\tdefault:\n\t\t\tresult = ((float64(size) \/ 1024) \/ 1024) \/ 1024\n\t}\n\treturn result\n}\n\n\/\/ Prompts the user for comfirmation by askig a yes\/no question. The question can be \n\/\/ provided as msg. (yes\/no) [default] will be appended to the msg. \nfunc confirmCopy(msg string, def bool) bool {\n\n\tvar response string = \"\"\n\tvar defaultChoice = \"no\"\n\n\tif def {\n\t\tdefaultChoice = \"yes\"\n\t}\n\tfmt.Printf(\"%s (yes\/no) [%s] \", msg, defaultChoice)\n\n\tscanner := bufio.NewScanner(os.Stdin)\n\tok := scanner .Scan()\n\n\tif ok {\n\t\t\n\t\tresponse = strings.ToLower(strings.Trim(scanner.Text(), \" \"))\n\n\t\tif response == \"y\" || response == \"yes\" {\n\t\t\treturn true\n\t\t} else if response == \"n\" || response == \"no\" {\n\t\t\treturn false\n\t\t} else if response == \"\" && def {\n\t\t\treturn true\n\t\t} else {\n\t\t\tfmt.Println(\"Please type (y)es or (n)o.\")\n\t\t\treturn confirmCopy(msg, def)\n\t\t}\n\n\t}\n\n\treturn false\n\n}\n\n\/\/ Main loop\nfunc Execute() {\n\n\t\/\/ Arguments and parameters to the program feels un-natural. Perhaps find a \n\t\/\/ library that can handle the arguments for us. Also, thinking of renaming 'series' to 'tv-shows'\n\n\t\/\/ Read arguments\n\tflag.StringVar(&source, \"S\", \".\", \"Directories in which to look for media delimited by comma\")\n\tflag.StringVar(&mroot, \"m\", \".\", \"Directory to your movies.\")\n\tflag.StringVar(&sroot, \"s\", \".\", \"Directory to your series.\")\n\tflag.BoolVar(&overwrite, \"o\", false, \"Overwrite existing files\/folders when copying\")\n\tflag.BoolVar(&confirm, \"c\", false, \"Prompt for confirm when overwriting existing files\/folders\")\n\tflag.IntVar(&level, \"l\", 1, \"Log level. 3=DEBUG, 2=INFO, 1=WARN, 0=ERROR. (default \\\"0\\\")\")\n\tflag.StringVar(&unit, \"u\", \"g\", \"String representation of unit to use when calculating file sizes. Choices are k, m, g and t\")\n\tflag.BoolVar(&verify, \"v\", false, \"Verify, do not actually copy.\")\n\tflag.Parse()\n\n\t\/\/ Sets the loglevel.\n\t\/\/ First we need to read from args and convert it to an int\n\tlog.Level.SetLevel(level)\n\tlog.PrintTime = false\n\tlog.Debug(\"Log level is\", level)\n\n\t\/\/ Check if movies root exists\n\tif !exists(mroot) {\n\t\tlog.Errorf(\"Does not exist '%s'\\n\", mroot)\n\t}\n\t\/\/ Check if series root exists\n\tif !exists(sroot) {\n\t\tlog.Errorf(\"Does not exist '%s'\\n\", sroot)\n\t}\n\t\/\/ Check if movies root is a directory\n\tif isFile(mroot) {\n\t\tlog.Errorf(\"Is not a directory '%s'\\n\", mroot)\n\t}\n\t\/\/ Check if series root is a directory\n\tif isFile(sroot) {\n\t\tlog.Errorf(\"Is not a directory '%s'\\n\", sroot)\n\t}\n\n\tsources := strings.Split(source, \",\")\n\n\tfor i, s := range sources {\n\t\tlog.Debugf(\"[%b] - %s \\n\", i, s)\n\t\n\t\t\/\/ Check if source exists\n\t\tif !exists(s) {\n\t\t\tlog.Error(\"Does not exist\", s)\n\t\t}\n\t\t\/\/ Check if movies root is a directory\n\t\tif isFile(s) {\n\t\t\tlog.Error(\"Is not a directory\", s)\n\t\t}\n\t}\n\n\t\/\/ Main\n\tvar mmatches []string\n\tvar smatches []string\n\tvar totalWritten int64\n\n\tlog.Debug(\"Overwrite is set to\", overwrite)\n\n\tfor i, src := range sources {\n\n\t\tf, err := ioutil.ReadDir(src)\n\t\tlog.Debugf(\"[%d] Source is '%s'\\n\", i, src)\n\t\tvar index int64 = 0\n\n\t\tfor j, file := range f {\n\t\t\tlog.Debugf(\"Checking '%s' \\n\", file.Name())\n\n\t\t\t\/\/ Check for movies\n\t\t\tmovie, errM := isMovie(file, mpattern)\n\t\t\tif movie != nil {\n\t\t\t\tif errM == nil {\n\t\t\t\t\tlog.Debugf(\"[%d] ==== MOVIE START ==== \\n\", j)\n\t\t\t\t\tlog.Debugf(\"[%d] Movie. Title: '%s', Year: '%s', Filename: '%s'\\n\", j, movie.title, movie.year, movie.file.Name())\n\t\t\t\t\tlog.Debugf(\"[%d] Movie matched regexp: '%s'\\n\", j, movie.regexp)\n\t\t\t\t\tmmatches = append(mmatches, file.Name())\n\t\t\t\t\tsrcf := path.Join(src, file.Name())\n\t\t\t\t\tif confirm == true {\n\t\t\t\t\t\toverwrite = confirmCopy(\"Copy '\"+file.Name()+\"'?\", true)\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Don't do anything if verify flag is true\n\t\t\t\t\tif !verify {\n\t\t\t\t\t\tvar written int64\n\t\t\t\t\t\tdstf := path.Join(mroot, file.Name())\n\t\t\t\t\t\tlog.Debugf(\"[%d] Dest is '%s' \\n\", j, dstf)\n\n\t\t\t\t\t\t\/\/ Start the copy\n\t\t\t\t\t\twritten, err = fileutils.Copy(srcf, path.Join(mroot, file.Name()), overwrite)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Errorf(\"[%b] Can't copy '%s'. %s \\n\", j, file.Name(), err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ttotalWritten = totalWritten + written\n\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tlog.Debugf(\"[%d] ==== MOVIE END ==== \\n\", j)\n\t\t\t}\n\n\t\t\t\/\/ Check for series\n\t\t\tserie, errS := isSerie(file, spattern)\n\t\t\tif serie != nil {\n\t\t\t\tif errS == nil {\n\t\t\t\t\tlog.Debugf(\"[%d] ==== SERIE START ====\\n\", j)\n\t\t\t\t\tlog.Debugf(\"[%d] Serie. Title: '%s', Season: '%s', Episode: '%s', Filename: '%s'\\n\", j, serie.title, serie.season, serie.episode, serie.file.Name())\n\t\t\t\t\tlog.Debugf(\"[%d] Serie matched regexp: '%s'\\n\", j, serie.regexp)\n\t\t\t\t\tvar written int64\n\t\t\t\t\tsmatches = append(smatches, file.Name())\n\t\t\t\t\tif confirm == true {\n\t\t\t\t\t\toverwrite = confirmCopy(\"Copy '\"+file.Name()+\"'?\", true)\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Don't do anything if verify flag is true\n\t\t\t\t\tif !verify {\n\t\t\t\t\t\tsrcf := path.Join(src, file.Name())\n\n\t\t\t\t\t\t\/\/ Stat source so that we can perserve permissions when creating the directories if necessary\n\t\t\t\t\t\ts, err := os.Stat(path.Dir(srcf))\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Errorf(\"[%d] Couldn't stat. '%s' \\n\", j, err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\/\/ Create serie folder and season folders resursively\n\t\t\t\t\t\tdstFolder := path.Join(sroot, serie.title, \"Season \"+serie.season)\n\t\t\t\t\t\tif !exists(dstFolder) {\n\t\t\t\t\t\t\tlog.Debugf(\"[%s] Dest does not exist, creating '%s' \\n\", j, dstFolder)\n\t\t\t\t\t\t\terr = os.MkdirAll(dstFolder, s.Mode())\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tlog.Errorf(\"[%d] Couldn't create '%s'. %s \\n\", j, dstFolder, err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\/\/ Start copying\n\t\t\t\t\t\tdstf := path.Join(dstFolder, file.Name())\n\t\t\t\t\t\tlog.Debugf(\"[%d] Dest is '%s' \\n\", j, dstf)\n\t\t\t\t\t\twritten, err = fileutils.Copy(srcf, dstf, overwrite)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Errorf(\"[%d] Can't copy '%s'. %s\", j, file.Name(), err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ttotalWritten = totalWritten + written\n\n\t\t\t\t\t}\t\n\t\t\t\t}\n\t\t\t\tlog.Debugf(\"[%d] ==== SERIE END ==== \\n\", j)\n\t\t\t}\n\t\t\tindex++\n\t\t}\n\t}\n\n\tfmt.Println(\"Movies matched:\", len(mmatches))\n\tfmt.Println(\"Series matched:\", len(smatches))\n\tfmt.Printf(\"Copied %.2f%s\\n\", convertFileSize(totalWritten, unit), unit)\n\n}\n<|endoftext|>"} {"text":"[isolated] Use github.com\/klauspost\/compress\/zlib for compression<|endoftext|>"} {"text":"\/\/ Copyright 2017 Ole Krüger.\n\/\/ Licensed under the MIT license which can be found in the LICENSE file.\n\npackage knxnet\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"strconv\"\n\n\t\"github.com\/vapourismo\/knx-go\/knx\/util\"\n)\n\n\/\/ Protocol specifies a host protocol to use.\ntype Protocol uint8\n\nconst (\n\t\/\/ UDP4 indicates a communication using UDP over IPv4.\n\tUDP4 Protocol = 1\n\n\t\/\/ TCP4 indicates a communication using TCP over IPv4.\n\tTCP4 Protocol = 2\n)\n\n\/\/ Address is an IPv4 address.\ntype Address [4]byte\n\n\/\/ String formats the address.\nfunc (addr Address) String() string {\n\treturn fmt.Sprintf(\"%d.%d.%d.%d\", addr[0], addr[1], addr[2], addr[3])\n}\n\n\/\/ Port is a port number.\ntype Port uint16\n\n\/\/ HostInfo contains information about a host.\ntype HostInfo struct {\n\tProtocol Protocol\n\tAddress Address\n\tPort Port\n}\n\n\/\/ HostInfoFromAddress returns HostInfo from an address.\nfunc HostInfoFromAddress(address net.Addr) (HostInfo, error) {\n\thostinfo := HostInfo{}\n\n\tipS, portS, err := net.SplitHostPort(address.String())\n\tif err != nil {\n\t\treturn hostinfo, err\n\t}\n\n\tip := net.ParseIP(ipS)\n\tif ip == nil {\n\t\treturn hostinfo, fmt.Errorf(\"unable to determine IP\")\n\t}\n\n\tipv4 := ip.To4()\n\tif ipv4 == nil {\n\t\treturn hostinfo, fmt.Errorf(\"only IPv4 is currently supported\")\n\t}\n\n\tport, _ := strconv.ParseUint(portS, 10, 16)\n\tif port == 0 {\n\t\treturn hostinfo, fmt.Errorf(\"unable to determine port\")\n\t}\n\n\tcopy(hostinfo.Address[:], ipv4)\n\thostinfo.Port = Port(port)\n\n\tswitch address.Network() {\n\tcase \"udp\":\n\t\thostinfo.Protocol = UDP4\n\tcase \"tcp\":\n\t\thostinfo.Protocol = TCP4\n\tdefault:\n\t\treturn hostinfo, fmt.Errorf(\"unsupported network\")\n\t}\n\treturn hostinfo, nil\n}\n\n\/\/ Equals checks whether both structures are equal.\nfunc (info HostInfo) Equals(other HostInfo) bool {\n\treturn info.Protocol == other.Protocol &&\n\t\tinfo.Address == other.Address &&\n\t\tinfo.Port == other.Port\n}\n\n\/\/ Size returns the packed size.\nfunc (HostInfo) Size() uint {\n\treturn 8\n}\n\n\/\/ Pack assembles the Host Info structure in the given buffer.\nfunc (info *HostInfo) Pack(buffer []byte) {\n\tutil.PackSome(\n\t\tbuffer,\n\t\tbyte(8),\n\t\tuint8(info.Protocol),\n\t\tinfo.Address[:],\n\t\tuint16(info.Port),\n\t)\n}\n\n\/\/ Unpack parses the given data in order to initialize the structure.\nfunc (info *HostInfo) Unpack(data []byte) (n uint, err error) {\n\tvar length uint8\n\n\tif n, err = util.UnpackSome(\n\t\tdata, &length, (*uint8)(&info.Protocol), info.Address[:4], (*uint16)(&info.Port),\n\t); err != nil {\n\t\treturn\n\t}\n\n\tif length != 8 {\n\t\treturn n, errors.New(\"host info structure length is invalid\")\n\t}\n\n\treturn\n}\nRemoved unsupported tcp\/\/ Copyright 2017 Ole Krüger.\n\/\/ Licensed under the MIT license which can be found in the LICENSE file.\n\npackage knxnet\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"strconv\"\n\n\t\"github.com\/vapourismo\/knx-go\/knx\/util\"\n)\n\n\/\/ Protocol specifies a host protocol to use.\ntype Protocol uint8\n\nconst (\n\t\/\/ UDP4 indicates a communication using UDP over IPv4.\n\tUDP4 Protocol = 1\n\n\t\/\/ TCP4 indicates a communication using TCP over IPv4.\n\tTCP4 Protocol = 2\n)\n\n\/\/ Address is an IPv4 address.\ntype Address [4]byte\n\n\/\/ String formats the address.\nfunc (addr Address) String() string {\n\treturn fmt.Sprintf(\"%d.%d.%d.%d\", addr[0], addr[1], addr[2], addr[3])\n}\n\n\/\/ Port is a port number.\ntype Port uint16\n\n\/\/ HostInfo contains information about a host.\ntype HostInfo struct {\n\tProtocol Protocol\n\tAddress Address\n\tPort Port\n}\n\n\/\/ HostInfoFromAddress returns HostInfo from an address.\nfunc HostInfoFromAddress(address net.Addr) (HostInfo, error) {\n\thostinfo := HostInfo{}\n\n\tipS, portS, err := net.SplitHostPort(address.String())\n\tif err != nil {\n\t\treturn hostinfo, err\n\t}\n\n\tip := net.ParseIP(ipS)\n\tif ip == nil {\n\t\treturn hostinfo, fmt.Errorf(\"unable to determine IP\")\n\t}\n\n\tipv4 := ip.To4()\n\tif ipv4 == nil {\n\t\treturn hostinfo, fmt.Errorf(\"only IPv4 is currently supported\")\n\t}\n\n\tport, _ := strconv.ParseUint(portS, 10, 16)\n\tif port == 0 {\n\t\treturn hostinfo, fmt.Errorf(\"unable to determine port\")\n\t}\n\n\tcopy(hostinfo.Address[:], ipv4)\n\thostinfo.Port = Port(port)\n\n\tswitch address.Network() {\n\tcase \"udp\":\n\t\thostinfo.Protocol = UDP4\n\tdefault:\n\t\treturn hostinfo, fmt.Errorf(\"unsupported network\")\n\t}\n\treturn hostinfo, nil\n}\n\n\/\/ Equals checks whether both structures are equal.\nfunc (info HostInfo) Equals(other HostInfo) bool {\n\treturn info.Protocol == other.Protocol &&\n\t\tinfo.Address == other.Address &&\n\t\tinfo.Port == other.Port\n}\n\n\/\/ Size returns the packed size.\nfunc (HostInfo) Size() uint {\n\treturn 8\n}\n\n\/\/ Pack assembles the Host Info structure in the given buffer.\nfunc (info *HostInfo) Pack(buffer []byte) {\n\tutil.PackSome(\n\t\tbuffer,\n\t\tbyte(8),\n\t\tuint8(info.Protocol),\n\t\tinfo.Address[:],\n\t\tuint16(info.Port),\n\t)\n}\n\n\/\/ Unpack parses the given data in order to initialize the structure.\nfunc (info *HostInfo) Unpack(data []byte) (n uint, err error) {\n\tvar length uint8\n\n\tif n, err = util.UnpackSome(\n\t\tdata, &length, (*uint8)(&info.Protocol), info.Address[:4], (*uint16)(&info.Port),\n\t); err != nil {\n\t\treturn\n\t}\n\n\tif length != 8 {\n\t\treturn n, errors.New(\"host info structure length is invalid\")\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/itsabot\/abot\/core\"\n\t\"github.com\/itsabot\/abot\/core\/log\"\n\t\"github.com\/labstack\/echo\"\n)\n\nvar e *echo.Echo\nvar phone *string\n\nfunc TestMain(m *testing.M) {\n\tphone = flag.String(\"phone\", \"+13105555555\", \"phone number of test user\")\n\tflag.Parse()\n\tif err := os.Setenv(\"ABOT_ENV\", \"test\"); err != nil {\n\t\tos.Exit(1)\n\t}\n\tvar err error\n\tlog.Info(\"starting server\")\n\te, _, err = core.NewServer()\n\tif err != nil {\n\t\tlog.Info(\"failed to start server\", err)\n\t\tos.Exit(1)\n\t}\n\tlog.SetDebug(true)\n\tos.Exit(m.Run())\n}\n\nfunc TestIndex(t *testing.T) {\n\tu := \"http:\/\/localhost:\" + os.Getenv(\"PORT\")\n\tc, b := request(\"GET\", u, nil, e)\n\tif c != http.StatusOK {\n\t\tlog.Debug(string(b))\n\t\tt.Fatal(\"expected\", http.StatusOK, \"got\", c)\n\t}\n}\n\nfunc TestSignupSubmit(t *testing.T) {\n\treset(t)\n\tu := \"http:\/\/localhost:\" + os.Getenv(\"PORT\")\n\tbase := u + \"\/api\/signup.json\"\n\tdata := []byte(`{\n\t\t\"Name\": \"Tester\",\n\t\t\"Email\": \"test@example.com\",\n\t\t\"Password\": \"password\",\n\t\t\"FID\": \"+13105555555\"\n\t}`)\n\tc, b := request(\"POST\", base, data, e)\n\tif c != http.StatusOK {\n\t\tlog.Debug(string(b))\n\t\tt.Fatal(\"expected\", http.StatusOK, \"got\", c)\n\t}\n}\n\nfunc request(method, path string, data []byte, e *echo.Echo) (int, string) {\n\tr, err := http.NewRequest(method, path, bytes.NewBuffer(data))\n\tr.Header.Add(\"Content-Type\", \"application\/json\")\n\tif err != nil {\n\t\treturn 0, \"err completing request: \" + err.Error()\n\t}\n\tw := httptest.NewRecorder()\n\te.ServeHTTP(w, r)\n\treturn w.Code, w.Body.String()\n}\n\nfunc reset(t *testing.T) {\n\t_, err := core.DB().Exec(`DELETE FROM users`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\nFix test using new APIpackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/itsabot\/abot\/core\"\n\t\"github.com\/itsabot\/abot\/core\/log\"\n\t\"github.com\/labstack\/echo\"\n)\n\nvar e *echo.Echo\nvar phone *string\n\nfunc TestMain(m *testing.M) {\n\tphone = flag.String(\"phone\", \"+13105555555\", \"phone number of test user\")\n\tflag.Parse()\n\tif err := os.Setenv(\"ABOT_ENV\", \"test\"); err != nil {\n\t\tos.Exit(1)\n\t}\n\tvar err error\n\tlog.Info(\"starting server\")\n\te, err = core.NewServer()\n\tif err != nil {\n\t\tlog.Info(\"failed to start server\", err)\n\t\tos.Exit(1)\n\t}\n\tlog.SetDebug(true)\n\tos.Exit(m.Run())\n}\n\nfunc TestIndex(t *testing.T) {\n\tu := \"http:\/\/localhost:\" + os.Getenv(\"PORT\")\n\tc, b := request(\"GET\", u, nil, e)\n\tif c != http.StatusOK {\n\t\tlog.Debug(string(b))\n\t\tt.Fatal(\"expected\", http.StatusOK, \"got\", c)\n\t}\n}\n\nfunc TestSignupSubmit(t *testing.T) {\n\treset(t)\n\tu := \"http:\/\/localhost:\" + os.Getenv(\"PORT\")\n\tbase := u + \"\/api\/signup.json\"\n\tdata := []byte(`{\n\t\t\"Name\": \"Tester\",\n\t\t\"Email\": \"test@example.com\",\n\t\t\"Password\": \"password\",\n\t\t\"FID\": \"+13105555555\"\n\t}`)\n\tc, b := request(\"POST\", base, data, e)\n\tif c != http.StatusOK {\n\t\tlog.Debug(string(b))\n\t\tt.Fatal(\"expected\", http.StatusOK, \"got\", c)\n\t}\n}\n\nfunc request(method, path string, data []byte, e *echo.Echo) (int, string) {\n\tr, err := http.NewRequest(method, path, bytes.NewBuffer(data))\n\tr.Header.Add(\"Content-Type\", \"application\/json\")\n\tif err != nil {\n\t\treturn 0, \"err completing request: \" + err.Error()\n\t}\n\tw := httptest.NewRecorder()\n\te.ServeHTTP(w, r)\n\treturn w.Code, w.Body.String()\n}\n\nfunc reset(t *testing.T) {\n\t_, err := core.DB().Exec(`DELETE FROM users`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"package python\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\n\t\"strings\"\n\n\t\"sourcegraph.com\/sourcegraph\/srclib\/graph\"\n\t\"sourcegraph.com\/sourcegraph\/srclib\/grapher\"\n\t\"sourcegraph.com\/sourcegraph\/srclib\/unit\"\n)\n\ntype GraphContext struct {\n\tUnit *unit.SourceUnit\n\tReqs []*requirement\n}\n\nfunc NewGraphContext(unit *unit.SourceUnit) *GraphContext {\n\tvar g GraphContext\n\tg.Unit = unit\n\tfor _, dep := range unit.Dependencies {\n\t\tif req, err := asRequirement(dep); err == nil {\n\t\t\tg.Reqs = append(g.Reqs, req)\n\t\t}\n\t}\n\treturn &g\n}\n\n\/\/ Graphs the Python source unit. If run outside of a Docker container, this assumes that the source unit has already\n\/\/ been installed (via pip or `python setup.py install`).\nfunc (c *GraphContext) Graph() (*grapher.Output, error) {\n\tif os.Getenv(\"IN_DOCKER_CONTAINER\") != \"\" {\n\t\t\/\/ NOTE: this may cause an error when graphing any source unit that depends\n\t\t\/\/ on jedi (or any other dependency of the graph code)\n\t\trequirementFiles, err := filepath.Glob(filepath.Join(c.Unit.Dir, \"*requirements.txt\"))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, requirementFile := range requirementFiles {\n\t\t\texec.Command(\"pip\", \"install\", \"-r\", requirementFile)\n\t\t}\n\t\texec.Command(\"pip\", \"install\", \"-I\", c.Unit.Dir)\n\t}\n\n\tcmd := exec.Command(\"python\", \"-m\", \"grapher.graph\", c.Unit.Dir, \"--verbose\")\n\tcmd.Stderr = os.Stderr\n\tb, err := cmd.Output()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar raw RawOutput\n\tif err := json.Unmarshal(b, &raw); err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := c.transform(&raw, c.Unit)\n\treturn out, nil\n}\n\nfunc (c *GraphContext) transform(raw *RawOutput, unit *unit.SourceUnit) *grapher.Output {\n\tvar out grapher.Output\n\n\tfor _, def := range raw.Defs {\n\t\tout.Symbols = append(out.Symbols, c.transformDef(def))\n\t\tif doc := c.transformDefDoc(def); doc != nil {\n\t\t\tout.Docs = append(out.Docs, doc)\n\t\t}\n\t}\n\tfor _, ref := range raw.Refs {\n\t\tif outRef, err := c.transformRef(ref); err == nil {\n\t\t\tout.Refs = append(out.Refs, outRef)\n\t\t} else {\n\t\t\tlog.Printf(\"Could not transform ref %v: %s\", ref, err)\n\t\t}\n\t}\n\n\treturn &out\n}\n\nvar jediKindToSymbolKind = map[string]graph.SymbolKind{\n\t\"statement\": graph.Var,\n\t\"statementelement\": graph.Var,\n\t\"param\": graph.Var,\n\t\"module\": graph.Module,\n\t\"submodule\": graph.Module,\n\t\"class\": graph.Type,\n\t\"function\": graph.Func,\n\t\"lambda\": graph.Func,\n\t\"import\": graph.Var,\n}\n\nfunc (c *GraphContext) transformDef(rawDef *RawDef) *graph.Symbol {\n\treturn &graph.Symbol{\n\t\tSymbolKey: graph.SymbolKey{\n\t\t\tRepo: c.Unit.Repo,\n\t\t\tUnit: c.Unit.Name,\n\t\t\tUnitType: c.Unit.Type,\n\t\t\tPath: graph.SymbolPath(rawDef.Path),\n\t\t},\n\t\tTreePath: graph.TreePath(rawDef.Path), \/\/ TODO: make this consistent w\/ old way\n\t\tKind: jediKindToSymbolKind[rawDef.Kind],\n\t\tName: rawDef.Name,\n\t\tFile: rawDef.File,\n\t\tDefStart: rawDef.DefStart,\n\t\tDefEnd: rawDef.DefEnd,\n\t\tExported: rawDef.Exported,\n\t\tData: nil, \/\/ TODO\n\t}\n}\n\nfunc (c *GraphContext) transformRef(rawRef *RawRef) (*graph.Ref, error) {\n\tdefUnit, err := c.inferSourceUnit(rawRef, c.Reqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &graph.Ref{\n\t\tSymbolRepo: defUnit.Repo,\n\t\tSymbolUnitType: defUnit.Type,\n\t\tSymbolUnit: defUnit.Name,\n\t\tSymbolPath: graph.SymbolPath(rawRef.DefPath),\n\n\t\tRepo: c.Unit.Repo,\n\t\tUnit: c.Unit.Name,\n\t\tUnitType: c.Unit.Type,\n\n\t\tFile: rawRef.File,\n\t\tStart: rawRef.Start,\n\t\tEnd: rawRef.End,\n\t}, nil\n}\n\nfunc (c *GraphContext) transformDefDoc(rawDef *RawDef) *graph.Doc {\n\treturn nil\n}\n\nfunc (c *GraphContext) inferSourceUnit(rawRef *RawRef, reqs []*requirement) (*unit.SourceUnit, error) {\n\tif rawRef.ToBuiltin {\n\t\treturn stdLibPkg.SourceUnit(), nil\n\t}\n\treturn c.inferSourceUnitFromFile(rawRef.DefFile, reqs)\n}\n\n\/\/ Note: file is expected to be an absolute path\nfunc (c *GraphContext) inferSourceUnitFromFile(file string, reqs []*requirement) (*unit.SourceUnit, error) {\n\t\/\/ Case: in current source unit (u)\n\tpwd, _ := os.Getwd()\n\tif isSubPath(pwd, file) {\n\t\treturn c.Unit, nil\n\t}\n\n\t\/\/ Case: in dependent source unit(depUnits)\n\tfileCmps := strings.Split(file, string(filepath.Separator))\n\tpkgsDirIdx := -1\n\tfor i, cmp := range fileCmps {\n\t\tif cmp == \"site-packages\" || cmp == \"dist-packages\" {\n\t\t\tpkgsDirIdx = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif pkgsDirIdx != -1 {\n\t\tfileSubCmps := fileCmps[pkgsDirIdx+1:]\n\t\tfileSubPath := filepath.Join(fileSubCmps...)\n\n\t\tvar foundReq *requirement = nil\n\tFindReq:\n\t\tfor _, req := range reqs {\n\t\t\tfor _, pkg := range req.Packages {\n\t\t\t\tif isSubPath(moduleToFilepath(pkg, true), fileSubPath) {\n\t\t\t\t\tfoundReq = req\n\t\t\t\t\tbreak FindReq\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, mod := range req.Modules {\n\t\t\t\tif moduleToFilepath(mod, false) == fileSubPath {\n\t\t\t\t\tfoundReq = req\n\t\t\t\t\tbreak FindReq\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif foundReq == nil {\n\t\t\tvar candidatesStr string\n\t\t\tif len(reqs) <= 7 {\n\t\t\t\tcandidatesStr = fmt.Sprintf(\"%v\", reqs)\n\t\t\t} else {\n\t\t\t\tcandidatesStr = fmt.Sprintf(\"%v...\", reqs[:7])\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"Could not find requirement that contains file %s. Candidates were: %s\",\n\t\t\t\tfile, candidatesStr)\n\t\t}\n\n\t\treturn foundReq.SourceUnit(), nil\n\t}\n\n\t\/\/ Case 3: in std lib\n\tpythonDirIdx := -1\n\tfor i, cmp := range fileCmps {\n\t\tlog.Printf(\"# cmp: %s\", cmp)\n\t\tif strings.HasPrefix(cmp, \"python\") {\n\t\t\tpythonDirIdx = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif pythonDirIdx != -1 {\n\t\treturn stdLibPkg.SourceUnit(), nil\n\t}\n\n\treturn nil, fmt.Errorf(\"Cannot infer source unit for file %s\", file)\n}\n\nfunc isSubPath(parent, child string) bool {\n\trelpath, err := filepath.Rel(parent, child)\n\treturn err == nil && !strings.HasPrefix(relpath, \"..\")\n}\n\nfunc moduleToFilepath(moduleName string, isPackage bool) string {\n\tmoduleName = strings.Replace(moduleName, \".\", \"\/\", -1)\n\tif !isPackage {\n\t\tmoduleName += \".py\"\n\t}\n\treturn moduleName\n}\n\ntype RawOutput struct {\n\tDefs []*RawDef\n\tRefs []*RawRef\n}\n\ntype RawDef struct {\n\tPath string\n\tKind string\n\tName string\n\tFile string \/\/ relative path (to source unit directory)\n\tDefStart int\n\tDefEnd int\n\tExported bool\n\tDocstring string\n\tData interface{}\n}\n\ntype RawRef struct {\n\tDefPath string\n\tDef bool\n\tDefFile string \/\/ absolute path\n\tFile string \/\/ relative path (to source unit directory)\n\tStart int\n\tEnd int\n\tToBuiltin bool\n}\nremove printpackage python\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\n\t\"strings\"\n\n\t\"sourcegraph.com\/sourcegraph\/srclib\/graph\"\n\t\"sourcegraph.com\/sourcegraph\/srclib\/grapher\"\n\t\"sourcegraph.com\/sourcegraph\/srclib\/unit\"\n)\n\ntype GraphContext struct {\n\tUnit *unit.SourceUnit\n\tReqs []*requirement\n}\n\nfunc NewGraphContext(unit *unit.SourceUnit) *GraphContext {\n\tvar g GraphContext\n\tg.Unit = unit\n\tfor _, dep := range unit.Dependencies {\n\t\tif req, err := asRequirement(dep); err == nil {\n\t\t\tg.Reqs = append(g.Reqs, req)\n\t\t}\n\t}\n\treturn &g\n}\n\n\/\/ Graphs the Python source unit. If run outside of a Docker container, this assumes that the source unit has already\n\/\/ been installed (via pip or `python setup.py install`).\nfunc (c *GraphContext) Graph() (*grapher.Output, error) {\n\tif os.Getenv(\"IN_DOCKER_CONTAINER\") != \"\" {\n\t\t\/\/ NOTE: this may cause an error when graphing any source unit that depends\n\t\t\/\/ on jedi (or any other dependency of the graph code)\n\t\trequirementFiles, err := filepath.Glob(filepath.Join(c.Unit.Dir, \"*requirements.txt\"))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, requirementFile := range requirementFiles {\n\t\t\texec.Command(\"pip\", \"install\", \"-r\", requirementFile)\n\t\t}\n\t\texec.Command(\"pip\", \"install\", \"-I\", c.Unit.Dir)\n\t}\n\n\tcmd := exec.Command(\"python\", \"-m\", \"grapher.graph\", c.Unit.Dir, \"--verbose\")\n\tcmd.Stderr = os.Stderr\n\tb, err := cmd.Output()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar raw RawOutput\n\tif err := json.Unmarshal(b, &raw); err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := c.transform(&raw, c.Unit)\n\treturn out, nil\n}\n\nfunc (c *GraphContext) transform(raw *RawOutput, unit *unit.SourceUnit) *grapher.Output {\n\tvar out grapher.Output\n\n\tfor _, def := range raw.Defs {\n\t\tout.Symbols = append(out.Symbols, c.transformDef(def))\n\t\tif doc := c.transformDefDoc(def); doc != nil {\n\t\t\tout.Docs = append(out.Docs, doc)\n\t\t}\n\t}\n\tfor _, ref := range raw.Refs {\n\t\tif outRef, err := c.transformRef(ref); err == nil {\n\t\t\tout.Refs = append(out.Refs, outRef)\n\t\t} else {\n\t\t\tlog.Printf(\"Could not transform ref %v: %s\", ref, err)\n\t\t}\n\t}\n\n\treturn &out\n}\n\nvar jediKindToSymbolKind = map[string]graph.SymbolKind{\n\t\"statement\": graph.Var,\n\t\"statementelement\": graph.Var,\n\t\"param\": graph.Var,\n\t\"module\": graph.Module,\n\t\"submodule\": graph.Module,\n\t\"class\": graph.Type,\n\t\"function\": graph.Func,\n\t\"lambda\": graph.Func,\n\t\"import\": graph.Var,\n}\n\nfunc (c *GraphContext) transformDef(rawDef *RawDef) *graph.Symbol {\n\treturn &graph.Symbol{\n\t\tSymbolKey: graph.SymbolKey{\n\t\t\tRepo: c.Unit.Repo,\n\t\t\tUnit: c.Unit.Name,\n\t\t\tUnitType: c.Unit.Type,\n\t\t\tPath: graph.SymbolPath(rawDef.Path),\n\t\t},\n\t\tTreePath: graph.TreePath(rawDef.Path), \/\/ TODO: make this consistent w\/ old way\n\t\tKind: jediKindToSymbolKind[rawDef.Kind],\n\t\tName: rawDef.Name,\n\t\tFile: rawDef.File,\n\t\tDefStart: rawDef.DefStart,\n\t\tDefEnd: rawDef.DefEnd,\n\t\tExported: rawDef.Exported,\n\t\tData: nil, \/\/ TODO\n\t}\n}\n\nfunc (c *GraphContext) transformRef(rawRef *RawRef) (*graph.Ref, error) {\n\tdefUnit, err := c.inferSourceUnit(rawRef, c.Reqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &graph.Ref{\n\t\tSymbolRepo: defUnit.Repo,\n\t\tSymbolUnitType: defUnit.Type,\n\t\tSymbolUnit: defUnit.Name,\n\t\tSymbolPath: graph.SymbolPath(rawRef.DefPath),\n\n\t\tRepo: c.Unit.Repo,\n\t\tUnit: c.Unit.Name,\n\t\tUnitType: c.Unit.Type,\n\n\t\tFile: rawRef.File,\n\t\tStart: rawRef.Start,\n\t\tEnd: rawRef.End,\n\t}, nil\n}\n\nfunc (c *GraphContext) transformDefDoc(rawDef *RawDef) *graph.Doc {\n\treturn nil\n}\n\nfunc (c *GraphContext) inferSourceUnit(rawRef *RawRef, reqs []*requirement) (*unit.SourceUnit, error) {\n\tif rawRef.ToBuiltin {\n\t\treturn stdLibPkg.SourceUnit(), nil\n\t}\n\treturn c.inferSourceUnitFromFile(rawRef.DefFile, reqs)\n}\n\n\/\/ Note: file is expected to be an absolute path\nfunc (c *GraphContext) inferSourceUnitFromFile(file string, reqs []*requirement) (*unit.SourceUnit, error) {\n\t\/\/ Case: in current source unit (u)\n\tpwd, _ := os.Getwd()\n\tif isSubPath(pwd, file) {\n\t\treturn c.Unit, nil\n\t}\n\n\t\/\/ Case: in dependent source unit(depUnits)\n\tfileCmps := strings.Split(file, string(filepath.Separator))\n\tpkgsDirIdx := -1\n\tfor i, cmp := range fileCmps {\n\t\tif cmp == \"site-packages\" || cmp == \"dist-packages\" {\n\t\t\tpkgsDirIdx = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif pkgsDirIdx != -1 {\n\t\tfileSubCmps := fileCmps[pkgsDirIdx+1:]\n\t\tfileSubPath := filepath.Join(fileSubCmps...)\n\n\t\tvar foundReq *requirement = nil\n\tFindReq:\n\t\tfor _, req := range reqs {\n\t\t\tfor _, pkg := range req.Packages {\n\t\t\t\tif isSubPath(moduleToFilepath(pkg, true), fileSubPath) {\n\t\t\t\t\tfoundReq = req\n\t\t\t\t\tbreak FindReq\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, mod := range req.Modules {\n\t\t\t\tif moduleToFilepath(mod, false) == fileSubPath {\n\t\t\t\t\tfoundReq = req\n\t\t\t\t\tbreak FindReq\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif foundReq == nil {\n\t\t\tvar candidatesStr string\n\t\t\tif len(reqs) <= 7 {\n\t\t\t\tcandidatesStr = fmt.Sprintf(\"%v\", reqs)\n\t\t\t} else {\n\t\t\t\tcandidatesStr = fmt.Sprintf(\"%v...\", reqs[:7])\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"Could not find requirement that contains file %s. Candidates were: %s\",\n\t\t\t\tfile, candidatesStr)\n\t\t}\n\n\t\treturn foundReq.SourceUnit(), nil\n\t}\n\n\t\/\/ Case 3: in std lib\n\tpythonDirIdx := -1\n\tfor i, cmp := range fileCmps {\n\t\tif strings.HasPrefix(cmp, \"python\") {\n\t\t\tpythonDirIdx = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif pythonDirIdx != -1 {\n\t\treturn stdLibPkg.SourceUnit(), nil\n\t}\n\n\treturn nil, fmt.Errorf(\"Cannot infer source unit for file %s\", file)\n}\n\nfunc isSubPath(parent, child string) bool {\n\trelpath, err := filepath.Rel(parent, child)\n\treturn err == nil && !strings.HasPrefix(relpath, \"..\")\n}\n\nfunc moduleToFilepath(moduleName string, isPackage bool) string {\n\tmoduleName = strings.Replace(moduleName, \".\", \"\/\", -1)\n\tif !isPackage {\n\t\tmoduleName += \".py\"\n\t}\n\treturn moduleName\n}\n\ntype RawOutput struct {\n\tDefs []*RawDef\n\tRefs []*RawRef\n}\n\ntype RawDef struct {\n\tPath string\n\tKind string\n\tName string\n\tFile string \/\/ relative path (to source unit directory)\n\tDefStart int\n\tDefEnd int\n\tExported bool\n\tDocstring string\n\tData interface{}\n}\n\ntype RawRef struct {\n\tDefPath string\n\tDef bool\n\tDefFile string \/\/ absolute path\n\tFile string \/\/ relative path (to source unit directory)\n\tStart int\n\tEnd int\n\tToBuiltin bool\n}\n<|endoftext|>"} {"text":"package resp\n\n\/\/go:generate bash -c \"go run generate\/*.go | gofmt -s > auto.go\"\n\nimport (\n\t\"math\"\n\t\"math\/cmplx\"\n\t\"strings\"\n)\n\ntype Symmetry uint\n\nconst (\n\tSymmetryUnknown Symmetry = iota\n\tSymmetryNone\n\tSymmetryEven\n\tSymmetryOdd\n)\n\ntype PzTransferFunction uint\n\nconst (\n\tPZFunctionUnknown PzTransferFunction = iota\n\tPZFunctionLaplaceRadiansPerSecond\n\tPZFunctionLaplaceHertz\n\tPZFunctionLaplaceZTransform\n)\n\ntype ApproximationType uint\n\nconst (\n\tApproximationTypeUnknown ApproximationType = iota\n\tApproximationTypeMaclaurin\n)\n\ntype Datalogger struct {\n\tDataloggerList []string\n\tType string\n\tLabel string\n\tSampleRate float64\n\tFrequency float64\n\tStorageFormat string\n\tClockDrift float64\n\tFilterList []string\n\tStages []ResponseStage\n\tReversed bool\n}\n\ntype DataloggerModel struct {\n\tName string\n\tType string \/\/ FDSN StationXML Datalogger Type\n\tDescription string \/\/ FDSN StationXML Datalogger Description\n\tManufacturer string \/\/ FDSN StationXML Datalogger Manufacturer\n\tVendor string \/\/ FDSN StationXML Datalogger Vendor\n}\n\ntype Sensor struct {\n\tSensorList []string\n\tFilterList []string\n\tStages []ResponseStage\n\tChannels string\n\tReversed bool\n}\n\nfunc (s Sensor) Labels(axial bool) string {\n\tlabels := s.Channels\n\tif axial {\n\t\tlabels = strings.Replace(labels, \"N\", \"1\", -1)\n\t\tlabels = strings.Replace(labels, \"E\", \"2\", -1)\n\t} else {\n\t\tlabels = strings.Replace(labels, \"1\", \"N\", -1)\n\t\tlabels = strings.Replace(labels, \"2\", \"E\", -1)\n\t}\n\treturn labels\n}\n\ntype SensorComponent struct {\n\tAzimuth float64\n\tDip float64\n}\n\ntype SensorModel struct {\n\tName string\n\tType string \/\/ FDSN StationXML Sensor Type\n\tDescription string \/\/ FDSN StationXML Sensor Description\n\tManufacturer string \/\/ FDSN StationXML Vendor Description\n\tVendor string \/\/ FDSN StationXML Vendor Description\n\n\tComponents []SensorComponent\n}\n\ntype Response struct {\n\tName string\n\tSensors []Sensor\n\tDataloggers []Datalogger\n}\n\ntype Stream struct {\n\tDatalogger\n\tSensor\n\n\tComponents []SensorComponent\n}\n\nfunc (s Stream) Channels(axial bool) []string {\n\tvar channels []string\n\n\tlabels := s.Sensor.Labels(axial)\n\tif len(s.Components) < len(labels) && len(labels) > 0 {\n\t\tlabels = labels[0:len(s.Components)]\n\t}\n\n\tfor _, component := range labels {\n\t\tchannels = append(channels, s.Datalogger.Label+string(component))\n\t}\n\n\treturn channels\n}\n\nfunc (s Stream) Gain() float64 {\n\tvar gain float64 = 1.0\n\n\tfor _, stage := range append(s.Sensor.Stages, s.Datalogger.Stages...) {\n\t\tif stage.StageSet == nil {\n\t\t\tcontinue\n\t\t}\n\t\tswitch stage.StageSet.GetType() {\n\t\tcase \"fir\":\n\t\t\tgain *= stage.StageSet.(FIR).Gain\n\t\tdefault:\n\t\t\tgain *= stage.Gain\n\t\t}\n\t}\n\n\treturn gain\n}\n\ntype StageSet interface {\n\tGetType() string\n}\n\ntype ResponseStage struct {\n\tType string\n\tLookup string\n\tFilter string\n\tStageSet StageSet\n\tFrequency float64\n\tSampleRate float64\n\tDecimate int32\n\tGain float64\n\t\/\/\tScale float64\n\tCorrection float64\n\tDelay float64\n\tInputUnits string\n\tOutputUnits string\n}\n\ntype PAZ struct {\n\tName string\n\tCode PzTransferFunction\n\tType string\n\tNotes string\n\tPoles []complex128\n\tZeros []complex128\n}\n\nfunc (p PAZ) GetType() string {\n\treturn \"paz\"\n}\n\nfunc (p PAZ) Gain(freq float64) float64 {\n\tw := complex(0.0, func() float64 {\n\t\tswitch p.Code {\n\t\tcase PZFunctionLaplaceRadiansPerSecond:\n\t\t\treturn 2.0 * math.Pi * freq\n\t\tdefault:\n\t\t\treturn freq\n\t\t}\n\t}())\n\th := complex(float64(1.0), float64(0.0))\n\n\tfor _, zero := range p.Zeros {\n\t\th *= (w - zero)\n\t}\n\n\tfor _, pole := range p.Poles {\n\t\th \/= (w - pole)\n\t}\n\treturn cmplx.Abs(h)\n}\n\ntype FIR struct {\n\tName string\n\tCausal bool\n\tSymmetry Symmetry\n\tDecimation float64\n\tGain float64\n\tNotes *string\n\tFactors []float64\n\tReversed *bool\n}\n\nfunc (f FIR) GetType() string {\n\treturn \"fir\"\n}\n\ntype Coefficient struct {\n\tValue float64\n}\n\ntype Polynomial struct {\n\tName string\n\tGain float64\n\tApproximationType ApproximationType\n\tFrequencyLowerBound float64\n\tFrequencyUpperBound float64\n\tApproximationLowerBound float64\n\tApproximationUpperBound float64\n\tMaximumError float64\n\tNotes *string\n\n\tCoefficients []Coefficient\n}\n\nfunc (p Polynomial) GetType() string {\n\treturn \"poly\"\n}\n\ntype A2D struct {\n\tName string\n\tCode PzTransferFunction\n\tType string\n\tNotes string\n}\n\nfunc (a A2D) GetType() string {\n\treturn \"a2d\"\n}\nfix empty auto.go file issue (#219)package resp\n\n\/\/go:generate bash -c \"go run generate\/*.go | gofmt -s > auto.go; test -s auto.go || rm auto.go\"\n\nimport (\n\t\"math\"\n\t\"math\/cmplx\"\n\t\"strings\"\n)\n\ntype Symmetry uint\n\nconst (\n\tSymmetryUnknown Symmetry = iota\n\tSymmetryNone\n\tSymmetryEven\n\tSymmetryOdd\n)\n\ntype PzTransferFunction uint\n\nconst (\n\tPZFunctionUnknown PzTransferFunction = iota\n\tPZFunctionLaplaceRadiansPerSecond\n\tPZFunctionLaplaceHertz\n\tPZFunctionLaplaceZTransform\n)\n\ntype ApproximationType uint\n\nconst (\n\tApproximationTypeUnknown ApproximationType = iota\n\tApproximationTypeMaclaurin\n)\n\ntype Datalogger struct {\n\tDataloggerList []string\n\tType string\n\tLabel string\n\tSampleRate float64\n\tFrequency float64\n\tStorageFormat string\n\tClockDrift float64\n\tFilterList []string\n\tStages []ResponseStage\n\tReversed bool\n}\n\ntype DataloggerModel struct {\n\tName string\n\tType string \/\/ FDSN StationXML Datalogger Type\n\tDescription string \/\/ FDSN StationXML Datalogger Description\n\tManufacturer string \/\/ FDSN StationXML Datalogger Manufacturer\n\tVendor string \/\/ FDSN StationXML Datalogger Vendor\n}\n\ntype Sensor struct {\n\tSensorList []string\n\tFilterList []string\n\tStages []ResponseStage\n\tChannels string\n\tReversed bool\n}\n\nfunc (s Sensor) Labels(axial bool) string {\n\tlabels := s.Channels\n\tif axial {\n\t\tlabels = strings.Replace(labels, \"N\", \"1\", -1)\n\t\tlabels = strings.Replace(labels, \"E\", \"2\", -1)\n\t} else {\n\t\tlabels = strings.Replace(labels, \"1\", \"N\", -1)\n\t\tlabels = strings.Replace(labels, \"2\", \"E\", -1)\n\t}\n\treturn labels\n}\n\ntype SensorComponent struct {\n\tAzimuth float64\n\tDip float64\n}\n\ntype SensorModel struct {\n\tName string\n\tType string \/\/ FDSN StationXML Sensor Type\n\tDescription string \/\/ FDSN StationXML Sensor Description\n\tManufacturer string \/\/ FDSN StationXML Vendor Description\n\tVendor string \/\/ FDSN StationXML Vendor Description\n\n\tComponents []SensorComponent\n}\n\ntype Response struct {\n\tName string\n\tSensors []Sensor\n\tDataloggers []Datalogger\n}\n\ntype Stream struct {\n\tDatalogger\n\tSensor\n\n\tComponents []SensorComponent\n}\n\nfunc (s Stream) Channels(axial bool) []string {\n\tvar channels []string\n\n\tlabels := s.Sensor.Labels(axial)\n\tif len(s.Components) < len(labels) && len(labels) > 0 {\n\t\tlabels = labels[0:len(s.Components)]\n\t}\n\n\tfor _, component := range labels {\n\t\tchannels = append(channels, s.Datalogger.Label+string(component))\n\t}\n\n\treturn channels\n}\n\nfunc (s Stream) Gain() float64 {\n\tvar gain float64 = 1.0\n\n\tfor _, stage := range append(s.Sensor.Stages, s.Datalogger.Stages...) {\n\t\tif stage.StageSet == nil {\n\t\t\tcontinue\n\t\t}\n\t\tswitch stage.StageSet.GetType() {\n\t\tcase \"fir\":\n\t\t\tgain *= stage.StageSet.(FIR).Gain\n\t\tdefault:\n\t\t\tgain *= stage.Gain\n\t\t}\n\t}\n\n\treturn gain\n}\n\ntype StageSet interface {\n\tGetType() string\n}\n\ntype ResponseStage struct {\n\tType string\n\tLookup string\n\tFilter string\n\tStageSet StageSet\n\tFrequency float64\n\tSampleRate float64\n\tDecimate int32\n\tGain float64\n\t\/\/\tScale float64\n\tCorrection float64\n\tDelay float64\n\tInputUnits string\n\tOutputUnits string\n}\n\ntype PAZ struct {\n\tName string\n\tCode PzTransferFunction\n\tType string\n\tNotes string\n\tPoles []complex128\n\tZeros []complex128\n}\n\nfunc (p PAZ) GetType() string {\n\treturn \"paz\"\n}\n\nfunc (p PAZ) Gain(freq float64) float64 {\n\tw := complex(0.0, func() float64 {\n\t\tswitch p.Code {\n\t\tcase PZFunctionLaplaceRadiansPerSecond:\n\t\t\treturn 2.0 * math.Pi * freq\n\t\tdefault:\n\t\t\treturn freq\n\t\t}\n\t}())\n\th := complex(float64(1.0), float64(0.0))\n\n\tfor _, zero := range p.Zeros {\n\t\th *= (w - zero)\n\t}\n\n\tfor _, pole := range p.Poles {\n\t\th \/= (w - pole)\n\t}\n\treturn cmplx.Abs(h)\n}\n\ntype FIR struct {\n\tName string\n\tCausal bool\n\tSymmetry Symmetry\n\tDecimation float64\n\tGain float64\n\tNotes *string\n\tFactors []float64\n\tReversed *bool\n}\n\nfunc (f FIR) GetType() string {\n\treturn \"fir\"\n}\n\ntype Coefficient struct {\n\tValue float64\n}\n\ntype Polynomial struct {\n\tName string\n\tGain float64\n\tApproximationType ApproximationType\n\tFrequencyLowerBound float64\n\tFrequencyUpperBound float64\n\tApproximationLowerBound float64\n\tApproximationUpperBound float64\n\tMaximumError float64\n\tNotes *string\n\n\tCoefficients []Coefficient\n}\n\nfunc (p Polynomial) GetType() string {\n\treturn \"poly\"\n}\n\ntype A2D struct {\n\tName string\n\tCode PzTransferFunction\n\tType string\n\tNotes string\n}\n\nfunc (a A2D) GetType() string {\n\treturn \"a2d\"\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\"launchpad.net\/gnuflag\"\n\t\"launchpad.net\/juju-core\/cmd\"\n\t\"launchpad.net\/juju-core\/log\"\n\t\"launchpad.net\/juju-core\/environs\"\n\t\"launchpad.net\/juju-core\/state\"\n\t\"launchpad.net\/juju-core\/worker\/machiner\"\n\t\"launchpad.net\/tomb\"\n\t\"time\"\n)\n\n\/\/ MachineAgent is a cmd.Command responsible for running a machine agent.\ntype MachineAgent struct {\n\ttomb tomb.Tomb\n\tConf AgentConf\n\tMachineId int\n}\n\n\/\/ Info returns usage information for the command.\nfunc (a *MachineAgent) Info() *cmd.Info {\n\treturn &cmd.Info{\"machine\", \"\", \"run a juju machine agent\", \"\"}\n}\n\n\/\/ Init initializes the command for running.\nfunc (a *MachineAgent) Init(f *gnuflag.FlagSet, args []string) error {\n\ta.Conf.addFlags(f)\n\tf.IntVar(&a.MachineId, \"machine-id\", -1, \"id of the machine to run\")\n\tif err := f.Parse(true, args); err != nil {\n\t\treturn err\n\t}\n\tif a.MachineId < 0 {\n\t\treturn fmt.Errorf(\"--machine-id option must be set, and expects a non-negative integer\")\n\t}\n\treturn a.Conf.checkArgs(f.Args())\n}\n\n\/\/ Stop stops the machine agent.\nfunc (a *MachineAgent) Stop() error {\n\ta.tomb.Kill(nil)\n\treturn a.tomb.Wait()\n}\n\n\/\/ Run runs a machine agent.\nfunc (a *MachineAgent) Run(_ *cmd.Context) error {\n\tdefer a.tomb.Done()\n\tfor {\n\t\tlog.Printf(\"machine agent starting\")\n\t\terr := a.runOnce()\n\t\tif err == nil {\n\t\t\t\/\/ We have been explicitly stopped.\n\t\t\treturn nil\n\t\t}\n\t\tif ug, ok := err.(*UpgradedError); ok {\n\t\t\ttools, err := environs.ChangeAgentTools(\"machine\", ug.Binary)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"cannot change agent tools: %v\", err)\n\t\t\t\ttime.Sleep(retryDelay)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Printf(\"upgrade to %v from %q\", tools.Binary, tools.URL)\n\t\t\t\/\/ Return and let upstart deal with the restart.\n\t\t\treturn nil\n\t\t}\n\t\tlog.Printf(\"error from provisioner or firewaller: %v\", err)\n\t\ttime.Sleep(retryDelay)\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (a *MachineAgent) runOnce() error {\n\tst, err := state.Open(&a.Conf.StateInfo)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer st.Close()\n\tm, err := st.Machine(a.MachineId)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn runTasks(a.tomb.Dying(),\n\t\tmachiner.NewMachiner(m),\n\t\tNewUpgrader(\"machine\", m),\n\t)\n}\ncmd\/jujud: fix stopped checkpackage main\n\nimport (\n\t\"fmt\"\n\t\"launchpad.net\/gnuflag\"\n\t\"launchpad.net\/juju-core\/cmd\"\n\t\"launchpad.net\/juju-core\/log\"\n\t\"launchpad.net\/juju-core\/environs\"\n\t\"launchpad.net\/juju-core\/state\"\n\t\"launchpad.net\/juju-core\/worker\/machiner\"\n\t\"launchpad.net\/tomb\"\n\t\"time\"\n)\n\n\/\/ MachineAgent is a cmd.Command responsible for running a machine agent.\ntype MachineAgent struct {\n\ttomb tomb.Tomb\n\tConf AgentConf\n\tMachineId int\n}\n\n\/\/ Info returns usage information for the command.\nfunc (a *MachineAgent) Info() *cmd.Info {\n\treturn &cmd.Info{\"machine\", \"\", \"run a juju machine agent\", \"\"}\n}\n\n\/\/ Init initializes the command for running.\nfunc (a *MachineAgent) Init(f *gnuflag.FlagSet, args []string) error {\n\ta.Conf.addFlags(f)\n\tf.IntVar(&a.MachineId, \"machine-id\", -1, \"id of the machine to run\")\n\tif err := f.Parse(true, args); err != nil {\n\t\treturn err\n\t}\n\tif a.MachineId < 0 {\n\t\treturn fmt.Errorf(\"--machine-id option must be set, and expects a non-negative integer\")\n\t}\n\treturn a.Conf.checkArgs(f.Args())\n}\n\n\/\/ Stop stops the machine agent.\nfunc (a *MachineAgent) Stop() error {\n\ta.tomb.Kill(nil)\n\treturn a.tomb.Wait()\n}\n\n\/\/ Run runs a machine agent.\nfunc (a *MachineAgent) Run(_ *cmd.Context) error {\n\tdefer a.tomb.Done()\n\tfor {\n\t\tlog.Printf(\"machine agent starting\")\n\t\terr := a.runOnce()\n\t\tif a.tomb.Err() != tomb.ErrStillAlive {\n\t\t\t\/\/ We have been explicitly stopped.\n\t\t\treturn err\n\t\t}\n\t\tif ug, ok := err.(*UpgradedError); ok {\n\t\t\ttools, err := environs.ChangeAgentTools(\"machine\", ug.Binary)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"cannot change agent tools: %v\", err)\n\t\t\t\ttime.Sleep(retryDelay)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Printf(\"exiting to upgrade to %v from %q\", tools.Binary, tools.URL)\n\t\t\t\/\/ Return and let upstart deal with the restart.\n\t\t\treturn nil\n\t\t}\n\t\tlog.Printf(\"error from provisioner or firewaller: %v\", err)\n\t\ttime.Sleep(retryDelay)\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (a *MachineAgent) runOnce() error {\n\tst, err := state.Open(&a.Conf.StateInfo)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer st.Close()\n\tm, err := st.Machine(a.MachineId)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn runTasks(a.tomb.Dying(),\n\t\tmachiner.NewMachiner(m),\n\t\tNewUpgrader(\"machine\", m),\n\t)\n}\n<|endoftext|>"} {"text":"package storage\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/khlieng\/dispatch\/Godeps\/_workspace\/src\/github.com\/boltdb\/bolt\"\n)\n\nvar (\n\tappDir string\n\tdb *bolt.DB\n\n\tbucketUsers = []byte(\"Users\")\n\tbucketServers = []byte(\"Servers\")\n\tbucketChannels = []byte(\"Channels\")\n\tbucketMessages = []byte(\"Messages\")\n)\n\nfunc Initialize() {\n\tlog.Println(\"Storing data at\", Path.Root())\n\n\tvar err error\n\tdb, err = bolt.Open(Path.Database(), 0600, nil)\n\tif err != nil {\n\t\tlog.Fatal(\"Could not open database:\", err)\n\t}\n\n\tdb.Update(func(tx *bolt.Tx) error {\n\t\ttx.CreateBucketIfNotExists(bucketUsers)\n\t\ttx.CreateBucketIfNotExists(bucketServers)\n\t\ttx.CreateBucketIfNotExists(bucketChannels)\n\n\t\treturn nil\n\t})\n}\n\nfunc Close() {\n\tdb.Close()\n}\n\nfunc Clear() {\n\tos.RemoveAll(Path.Logs())\n\tos.Remove(Path.Database())\n}\nRemove unused variable appDirpackage storage\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/khlieng\/dispatch\/Godeps\/_workspace\/src\/github.com\/boltdb\/bolt\"\n)\n\nvar (\n\tdb *bolt.DB\n\n\tbucketUsers = []byte(\"Users\")\n\tbucketServers = []byte(\"Servers\")\n\tbucketChannels = []byte(\"Channels\")\n\tbucketMessages = []byte(\"Messages\")\n)\n\nfunc Initialize() {\n\tlog.Println(\"Storing data at\", Path.Root())\n\n\tvar err error\n\tdb, err = bolt.Open(Path.Database(), 0600, nil)\n\tif err != nil {\n\t\tlog.Fatal(\"Could not open database:\", err)\n\t}\n\n\tdb.Update(func(tx *bolt.Tx) error {\n\t\ttx.CreateBucketIfNotExists(bucketUsers)\n\t\ttx.CreateBucketIfNotExists(bucketServers)\n\t\ttx.CreateBucketIfNotExists(bucketChannels)\n\n\t\treturn nil\n\t})\n}\n\nfunc Close() {\n\tdb.Close()\n}\n\nfunc Clear() {\n\tos.RemoveAll(Path.Logs())\n\tos.Remove(Path.Database())\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"database\/sql\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"sort\"\n\t\"strconv\"\n\n\t_ \"code.google.com\/p\/go-sqlite\/go1\/sqlite3\"\n\t\"github.com\/rwcarlsen\/cyan\/query\"\n)\n\nvar (\n\thelp = flag.Bool(\"h\", false, \"Print this help message\")\n\tdbname = flag.String(\"db\", \"\", \"cyclus sqlite database to query\")\n\tsimid = flag.String(\"simid\", \"\", \"simulation id (empty string defaults to first sim id in database\")\n)\n\nvar command string\n\nvar db *sql.DB\n\nvar cmds = NewCmdSet()\n\nfunc init() {\n\tcmds.Register(\"agents\", \"list all agents in the simulation\", doAgents)\n\tcmds.Register(\"sims\", \"list all simulations in the database\", doSims)\n\tcmds.Register(\"inv\", \"show inventory of one or more agents at a specific timestep\", doInv)\n\tcmds.Register(\"created\", \"show material created by one or more agents between specific timesteps\", doCreated)\n\tcmds.Register(\"deployseries\", \"print a time-series of a prototype's total active deployments\", doDeploySeries)\n\tcmds.Register(\"flow\", \"Show total transacted material between two groups of agents between specific timesteps\", doFlow)\n\tcmds.Register(\"invseries\", \"print a time series of an agent's inventory for specified isotopes\", doInvSeries)\n\tcmds.Register(\"flowgraph\", \"print a graphviz dot graph of resource arcs between facilities\", doFlowGraph)\n}\n\nfunc main() {\n\tlog.SetFlags(0)\n\tflag.Parse()\n\n\tif *help || flag.NArg() < 1 {\n\t\tfmt.Println(\"Usage: metric -db [opts] [args...]\")\n\t\tfmt.Println(\"Calculates metrics for cyclus simulation data in a sqlite database.\")\n\t\tflag.PrintDefaults()\n\t\tfmt.Println(\"\\nCommands:\")\n\t\tfor i := range cmds.Names {\n\t\t\tfmt.Printf(\" - %v: %v\\n\", cmds.Names[i], cmds.Helps[i])\n\t\t}\n\t\treturn\n\t} else if *dbname == \"\" {\n\t\tlog.Fatal(\"must specify database with db flag\")\n\t}\n\n\tvar err error\n\tdb, err = sql.Open(\"sqlite3\", *dbname)\n\tfatalif(err)\n\tdefer db.Close()\n\n\tif *simid == \"\" {\n\t\tids, err := query.SimIds(db)\n\t\tfatalif(err)\n\t\t*simid = ids[0]\n\t}\n\n\tcmds.Execute(flag.Args())\n}\n\nfunc doSims(args []string) {\n\tids, err := query.SimIds(db)\n\tfatalif(err)\n\tfor _, id := range ids {\n\t\tinfo, err := query.SimStat(db, id)\n\t\tfatalif(err)\n\t\tfmt.Println(info)\n\t}\n}\n\nfunc doAgents(args []string) {\n\tags, err := query.AllAgents(db, *simid)\n\tfatalif(err)\n\tfor _, a := range ags {\n\t\tfmt.Println(a)\n\t}\n}\n\nfunc doInv(args []string) {\n\tfs := flag.NewFlagSet(\"inv\", flag.ExitOnError)\n\tt := fs.Int(\"t\", -1, \"timestep of inventory (-1 = end of simulation)\")\n\tfs.Usage = func() {\n\t\tlog.Print(\"Usage: inv [agent-id...]\\nZero agents uses all agent inventories\")\n\t\tfs.PrintDefaults()\n\t}\n\tfs.Parse(args)\n\n\tvar agents []int\n\tfor _, arg := range fs.Args() {\n\t\tid, err := strconv.Atoi(arg)\n\t\tfatalif(err)\n\t\tagents = append(agents, id)\n\t}\n\n\tm, err := query.InvAt(db, *simid, *t, agents...)\n\tfatalif(err)\n\tfmt.Printf(\"%+v\\n\", m)\n}\n\ntype Row struct {\n\tX int\n\tYs []float64\n}\n\ntype MultiSeries [][]query.XY\n\nfunc (ms MultiSeries) Rows() []Row {\n\trowmap := map[int]Row{}\n\txs := []int{}\n\tfor i, s := range ms {\n\t\tfor _, xy := range s {\n\t\t\trow, ok := rowmap[xy.X]\n\t\t\tif !ok {\n\t\t\t\txs = append(xs, xy.X)\n\t\t\t\trow.Ys = make([]float64, len(ms))\n\t\t\t\trow.X = xy.X\n\t\t\t}\n\t\t\trow.Ys[i] = xy.Y\n\t\t\trowmap[xy.X] = row\n\t\t}\n\t}\n\n\tsort.Ints(xs)\n\trows := make([]Row, 0, len(rowmap))\n\tfor _, x := range xs {\n\t\trows = append(rows, rowmap[x])\n\t}\n\treturn rows\n}\n\nfunc doInvSeries(args []string) {\n\tfs := flag.NewFlagSet(\"invseries\", flag.ExitOnError)\n\tfs.Usage = func() { log.Print(\"Usage: invseries [isotope...]\"); fs.PrintDefaults() }\n\tfs.Parse(args)\n\tif fs.NArg() < 2 {\n\t\tfs.Usage()\n\t\treturn\n\t}\n\n\tagent, err := strconv.Atoi(fs.Arg(0))\n\tfatalif(err)\n\n\tisos := []int{}\n\tfor _, arg := range fs.Args()[1:] {\n\t\tiso, err := strconv.Atoi(arg)\n\t\tfatalif(err)\n\t\tisos = append(isos, iso)\n\t}\n\n\tms := MultiSeries{}\n\tfor _, iso := range isos {\n\t\txys, err := query.InvSeries(db, *simid, agent, iso)\n\t\tms = append(ms, xys)\n\t\tfatalif(err)\n\t}\n\n\tfmt.Printf(\"# Agent %v inventory in kg\\n\", agent)\n\tfmt.Printf(\"# [Timestep]\")\n\tfor _, iso := range isos {\n\t\tfmt.Printf(\" [%v]\", iso)\n\t}\n\tfmt.Printf(\"\\n\")\n\tfor _, row := range ms.Rows() {\n\t\tfmt.Printf(\"%v\", row.X)\n\t\tfor _, y := range row.Ys {\n\t\t\tfmt.Printf(\" %v \", y)\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t}\n}\n\nfunc doFlowGraph(args []string) {\n\tfs := flag.NewFlagSet(\"flowgraph\", flag.ExitOnError)\n\tfs.Usage = func() { log.Print(\"Usage: flowgraph\"); fs.PrintDefaults() }\n\tproto := fs.Bool(\"proto\", false, \"aggregate nodes by prototype\")\n\tt0 := fs.Int(\"t1\", -1, \"beginning of time interval (default is beginning of simulation)\")\n\tt1 := fs.Int(\"t2\", -1, \"end of time interval (default if end of simulation)\")\n\tfs.Parse(args)\n\n\tarcs, err := query.FlowGraph(db, *simid, *t0, *t1, *proto)\n\tfatalif(err)\n\n\tfmt.Println(\"digraph ResourceFlows {\")\n\tfmt.Println(\" overlap = false;\")\n\tfmt.Println(\" nodesep=1.0;\")\n\tfmt.Println(\" edge [fontsize=9];\")\n\tfor _, arc := range arcs {\n\t\tfmt.Printf(\" \\\"%v\\\" -> \\\"%v\\\" [label=\\\"%v\\\\n(%.3g kg)\\\"];\\n\", arc.Src, arc.Dst, arc.Commod, arc.Quantity)\n\t}\n\tfmt.Println(\"}\")\n}\n\nfunc doDeploySeries(args []string) {\n\tfs := flag.NewFlagSet(\"deployseries\", flag.ExitOnError)\n\tfs.Usage = func() { log.Print(\"Usage: deployseries \"); fs.PrintDefaults() }\n\tfs.Parse(args)\n\tif fs.NArg() < 1 {\n\t\tfs.Usage()\n\t\treturn\n\t}\n\n\tproto := fs.Arg(0)\n\txys, err := query.DeployCumulative(db, *simid, proto)\n\tfatalif(err)\n\n\tfmt.Printf(\"# Prototype %v total active deployments\\n\", proto)\n\tfmt.Println(\"# [Timestep] [Count]\")\n\tfor _, xy := range xys {\n\t\tfmt.Printf(\"%v %v\\n\", xy.X, xy.Y)\n\t}\n}\n\nfunc doCreated(args []string) {\n\tfs := flag.NewFlagSet(\"created\", flag.ExitOnError)\n\tfs.Usage = func() { log.Print(\"Usage: created [agent-id...]\\nZero agents uses all agents\"); fs.PrintDefaults() }\n\tt0 := fs.Int(\"t1\", -1, \"beginning of time interval (default is beginning of simulation)\")\n\tt1 := fs.Int(\"t2\", -1, \"end of time interval (default if end of simulation)\")\n\tfs.Parse(args)\n\n\tvar agents []int\n\n\tfor _, arg := range fs.Args() {\n\t\tid, err := strconv.Atoi(arg)\n\t\tfatalif(err)\n\t\tagents = append(agents, id)\n\t}\n\n\tm, err := query.MatCreated(db, *simid, *t0, *t1, agents...)\n\tfatalif(err)\n\tfmt.Printf(\"%+v\\n\", m)\n}\n\nfunc doFlow(args []string) {\n\tfs := flag.NewFlagSet(\"created\", flag.ExitOnError)\n\tt0 := fs.Int(\"t1\", -1, \"beginning of time interval (default is beginning of simulation)\")\n\tt1 := fs.Int(\"t2\", -1, \"end of time interval (default if end of simulation)\")\n\tfs.Usage = func() {\n\t\tlog.Print(\"Usage: flow .. \\nZero agents uses all agents\")\n\t\tfs.PrintDefaults()\n\t}\n\tfs.Parse(args)\n\n\tvar from []int\n\tvar to []int\n\n\tif flag.NArg() < 3 {\n\t\tfs.Usage()\n\t\treturn\n\t}\n\n\tbefore := true\n\tfor _, arg := range flag.Args()[3:] {\n\t\tif arg == \"..\" {\n\t\t\tbefore = false\n\t\t\tcontinue\n\t\t}\n\n\t\tid, err := strconv.Atoi(arg)\n\t\tfatalif(err)\n\t\tif before {\n\t\t\tfrom = append(from, id)\n\t\t} else {\n\t\t\tto = append(to, id)\n\t\t}\n\t}\n\tif len(to) < 1 {\n\t\tfs.Usage()\n\t\treturn\n\t}\n\n\tm, err := query.Flow(db, *simid, *t0, *t1, from, to)\n\tfatalif(err)\n\tfmt.Printf(\"%+v\\n\", m)\n}\n\nfunc fatalif(err error) {\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\ntype CmdSet struct {\n\tfuncs map[string]func([]string)\n\tNames []string\n\tHelps []string\n}\n\nfunc NewCmdSet() *CmdSet {\n\treturn &CmdSet{funcs: map[string]func([]string){}}\n}\n\nfunc (cs *CmdSet) Register(name, brief string, f func([]string)) {\n\tcs.Names = append(cs.Names, name)\n\tcs.Helps = append(cs.Helps, brief)\n\tcs.funcs[name] = f\n}\n\nfunc (cs *CmdSet) Execute(args []string) {\n\tcmd := args[0]\n\tf, ok := cs.funcs[cmd]\n\tif !ok {\n\t\tlog.Fatalf(\"Invalid command '%v'\", cmd)\n\t}\n\tf(args[1:])\n}\nadded way to specify custom sql queries in a config file identified by -custom cli flagpackage main\n\nimport (\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"text\/tabwriter\"\n\n\t_ \"code.google.com\/p\/go-sqlite\/go1\/sqlite3\"\n\t\"github.com\/rwcarlsen\/cyan\/query\"\n)\n\nvar (\n\thelp = flag.Bool(\"h\", false, \"print this help message\")\n\tcustom = flag.String(\"custom\", \"\", \"path to custom sql query spec file\")\n\tdbname = flag.String(\"db\", \"\", \"cyclus sqlite database to query\")\n\tsimid = flag.String(\"simid\", \"\", \"simulation id (empty string defaults to first sim id in database\")\n)\n\nvar command string\n\nvar db *sql.DB\n\nvar cmds = NewCmdSet()\n\n\/\/ map[cmdname]sqltext\nvar customSql = map[string]string{}\n\nfunc init() {\n\tcmds.Register(\"agents\", \"list all agents in the simulation\", doAgents)\n\tcmds.Register(\"sims\", \"list all simulations in the database\", doSims)\n\tcmds.Register(\"inv\", \"show inventory of one or more agents at a specific timestep\", doInv)\n\tcmds.Register(\"created\", \"show material created by one or more agents between specific timesteps\", doCreated)\n\tcmds.Register(\"deployseries\", \"print a time-series of a prototype's total active deployments\", doDeploySeries)\n\tcmds.Register(\"flow\", \"Show total transacted material between two groups of agents between specific timesteps\", doFlow)\n\tcmds.Register(\"invseries\", \"print a time series of an agent's inventory for specified isotopes\", doInvSeries)\n\tcmds.Register(\"flowgraph\", \"print a graphviz dot graph of resource arcs between facilities\", doFlowGraph)\n}\n\nfunc main() {\n\tlog.SetFlags(0)\n\tflag.Parse()\n\n\tif *help || flag.NArg() < 1 {\n\t\tfmt.Println(\"Usage: metric -db [opts] [args...]\")\n\t\tfmt.Println(\"Calculates metrics for cyclus simulation data in a sqlite database.\")\n\t\tflag.PrintDefaults()\n\t\tfmt.Println(\"\\nCommands:\")\n\t\tfor i := range cmds.Names {\n\t\t\tfmt.Printf(\" - %v: %v\\n\", cmds.Names[i], cmds.Helps[i])\n\t\t}\n\t\treturn\n\t} else if *dbname == \"\" {\n\t\tlog.Fatal(\"must specify database with db flag\")\n\t}\n\n\tif *custom != \"\" {\n\t\tdata, err := ioutil.ReadFile(*custom)\n\t\tfatalif(err)\n\t\tfatalif(json.Unmarshal(data, &customSql))\n\t}\n\n\tvar err error\n\tdb, err = sql.Open(\"sqlite3\", *dbname)\n\tfatalif(err)\n\tdefer db.Close()\n\n\tif *simid == \"\" {\n\t\tids, err := query.SimIds(db)\n\t\tfatalif(err)\n\t\t*simid = ids[0]\n\t}\n\n\tcmds.Execute(flag.Args())\n}\n\nfunc doCustom(cmd string, oargs []string) {\n\ts, ok := customSql[cmd]\n\tif !ok {\n\t\tlog.Fatalf(\"Invalid command %v\", cmd)\n\t}\n\targs := make([]interface{}, len(oargs))\n\tfor i := range args {\n\t\targs[i] = oargs[i]\n\t}\n\trows, err := db.Query(s, args...)\n\tfatalif(err)\n\n\ttw := tabwriter.NewWriter(os.Stdout, 4, 4, 1, ' ', 0)\n\tcols, err := rows.Columns()\n\tfatalif(err)\n\tfor _, c := range cols {\n\t\t_, err := tw.Write([]byte(c + \"\\t\"))\n\t\tfatalif(err)\n\t}\n\t_, err = tw.Write([]byte(\"\\n\"))\n\tfatalif(err)\n\n\tfor rows.Next() {\n\t\tvs := make([]interface{}, len(cols))\n\t\tvals := make([]string, len(cols))\n\t\tfor i := range vals {\n\t\t\tvs[i] = &vals[i]\n\t\t}\n\t\terr := rows.Scan(vs...)\n\t\tfatalif(err)\n\n\t\tfor _, v := range vals {\n\t\t\t_, err := tw.Write([]byte(v + \"\\t\"))\n\t\t\tfatalif(err)\n\t\t}\n\t\t_, err = tw.Write([]byte(\"\\n\"))\n\t\tfatalif(err)\n\t}\n\tfatalif(rows.Err())\n\tfatalif(tw.Flush())\n\treturn\n}\n\nfunc doSims(cmd string, args []string) {\n\tids, err := query.SimIds(db)\n\tfatalif(err)\n\tfor _, id := range ids {\n\t\tinfo, err := query.SimStat(db, id)\n\t\tfatalif(err)\n\t\tfmt.Println(info)\n\t}\n}\n\nfunc doAgents(cmd string, args []string) {\n\tags, err := query.AllAgents(db, *simid)\n\tfatalif(err)\n\tfor _, a := range ags {\n\t\tfmt.Println(a)\n\t}\n}\n\nfunc doInv(cmd string, args []string) {\n\tfs := flag.NewFlagSet(\"inv\", flag.ExitOnError)\n\tt := fs.Int(\"t\", -1, \"timestep of inventory (-1 = end of simulation)\")\n\tfs.Usage = func() {\n\t\tlog.Print(\"Usage: inv [agent-id...]\\nZero agents uses all agent inventories\")\n\t\tfs.PrintDefaults()\n\t}\n\tfs.Parse(args)\n\n\tvar agents []int\n\tfor _, arg := range fs.Args() {\n\t\tid, err := strconv.Atoi(arg)\n\t\tfatalif(err)\n\t\tagents = append(agents, id)\n\t}\n\n\tm, err := query.InvAt(db, *simid, *t, agents...)\n\tfatalif(err)\n\tfmt.Printf(\"%+v\\n\", m)\n}\n\ntype Row struct {\n\tX int\n\tYs []float64\n}\n\ntype MultiSeries [][]query.XY\n\nfunc (ms MultiSeries) Rows() []Row {\n\trowmap := map[int]Row{}\n\txs := []int{}\n\tfor i, s := range ms {\n\t\tfor _, xy := range s {\n\t\t\trow, ok := rowmap[xy.X]\n\t\t\tif !ok {\n\t\t\t\txs = append(xs, xy.X)\n\t\t\t\trow.Ys = make([]float64, len(ms))\n\t\t\t\trow.X = xy.X\n\t\t\t}\n\t\t\trow.Ys[i] = xy.Y\n\t\t\trowmap[xy.X] = row\n\t\t}\n\t}\n\n\tsort.Ints(xs)\n\trows := make([]Row, 0, len(rowmap))\n\tfor _, x := range xs {\n\t\trows = append(rows, rowmap[x])\n\t}\n\treturn rows\n}\n\nfunc doInvSeries(cmd string, args []string) {\n\tfs := flag.NewFlagSet(\"invseries\", flag.ExitOnError)\n\tfs.Usage = func() { log.Print(\"Usage: invseries [isotope...]\"); fs.PrintDefaults() }\n\tfs.Parse(args)\n\tif fs.NArg() < 2 {\n\t\tfs.Usage()\n\t\treturn\n\t}\n\n\tagent, err := strconv.Atoi(fs.Arg(0))\n\tfatalif(err)\n\n\tisos := []int{}\n\tfor _, arg := range fs.Args()[1:] {\n\t\tiso, err := strconv.Atoi(arg)\n\t\tfatalif(err)\n\t\tisos = append(isos, iso)\n\t}\n\n\tms := MultiSeries{}\n\tfor _, iso := range isos {\n\t\txys, err := query.InvSeries(db, *simid, agent, iso)\n\t\tms = append(ms, xys)\n\t\tfatalif(err)\n\t}\n\n\tfmt.Printf(\"# Agent %v inventory in kg\\n\", agent)\n\tfmt.Printf(\"# [Timestep]\")\n\tfor _, iso := range isos {\n\t\tfmt.Printf(\" [%v]\", iso)\n\t}\n\tfmt.Printf(\"\\n\")\n\tfor _, row := range ms.Rows() {\n\t\tfmt.Printf(\"%v\", row.X)\n\t\tfor _, y := range row.Ys {\n\t\t\tfmt.Printf(\" %v \", y)\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t}\n}\n\nfunc doFlowGraph(cmd string, args []string) {\n\tfs := flag.NewFlagSet(\"flowgraph\", flag.ExitOnError)\n\tfs.Usage = func() { log.Print(\"Usage: flowgraph\"); fs.PrintDefaults() }\n\tproto := fs.Bool(\"proto\", false, \"aggregate nodes by prototype\")\n\tt0 := fs.Int(\"t1\", -1, \"beginning of time interval (default is beginning of simulation)\")\n\tt1 := fs.Int(\"t2\", -1, \"end of time interval (default if end of simulation)\")\n\tfs.Parse(args)\n\n\tarcs, err := query.FlowGraph(db, *simid, *t0, *t1, *proto)\n\tfatalif(err)\n\n\tfmt.Println(\"digraph ResourceFlows {\")\n\tfmt.Println(\" overlap = false;\")\n\tfmt.Println(\" nodesep=1.0;\")\n\tfmt.Println(\" edge [fontsize=9];\")\n\tfor _, arc := range arcs {\n\t\tfmt.Printf(\" \\\"%v\\\" -> \\\"%v\\\" [label=\\\"%v\\\\n(%.3g kg)\\\"];\\n\", arc.Src, arc.Dst, arc.Commod, arc.Quantity)\n\t}\n\tfmt.Println(\"}\")\n}\n\nfunc doDeploySeries(cmd string, args []string) {\n\tfs := flag.NewFlagSet(\"deployseries\", flag.ExitOnError)\n\tfs.Usage = func() { log.Print(\"Usage: deployseries \"); fs.PrintDefaults() }\n\tfs.Parse(args)\n\tif fs.NArg() < 1 {\n\t\tfs.Usage()\n\t\treturn\n\t}\n\n\tproto := fs.Arg(0)\n\txys, err := query.DeployCumulative(db, *simid, proto)\n\tfatalif(err)\n\n\tfmt.Printf(\"# Prototype %v total active deployments\\n\", proto)\n\tfmt.Println(\"# [Timestep] [Count]\")\n\tfor _, xy := range xys {\n\t\tfmt.Printf(\"%v %v\\n\", xy.X, xy.Y)\n\t}\n}\n\nfunc doCreated(cmd string, args []string) {\n\tfs := flag.NewFlagSet(\"created\", flag.ExitOnError)\n\tfs.Usage = func() { log.Print(\"Usage: created [agent-id...]\\nZero agents uses all agents\"); fs.PrintDefaults() }\n\tt0 := fs.Int(\"t1\", -1, \"beginning of time interval (default is beginning of simulation)\")\n\tt1 := fs.Int(\"t2\", -1, \"end of time interval (default if end of simulation)\")\n\tfs.Parse(args)\n\n\tvar agents []int\n\n\tfor _, arg := range fs.Args() {\n\t\tid, err := strconv.Atoi(arg)\n\t\tfatalif(err)\n\t\tagents = append(agents, id)\n\t}\n\n\tm, err := query.MatCreated(db, *simid, *t0, *t1, agents...)\n\tfatalif(err)\n\tfmt.Printf(\"%+v\\n\", m)\n}\n\nfunc doFlow(cmd string, args []string) {\n\tfs := flag.NewFlagSet(\"created\", flag.ExitOnError)\n\tt0 := fs.Int(\"t1\", -1, \"beginning of time interval (default is beginning of simulation)\")\n\tt1 := fs.Int(\"t2\", -1, \"end of time interval (default if end of simulation)\")\n\tfs.Usage = func() {\n\t\tlog.Print(\"Usage: flow .. \\nZero agents uses all agents\")\n\t\tfs.PrintDefaults()\n\t}\n\tfs.Parse(args)\n\n\tvar from []int\n\tvar to []int\n\n\tif flag.NArg() < 3 {\n\t\tfs.Usage()\n\t\treturn\n\t}\n\n\tbefore := true\n\tfor _, arg := range flag.Args()[3:] {\n\t\tif arg == \"..\" {\n\t\t\tbefore = false\n\t\t\tcontinue\n\t\t}\n\n\t\tid, err := strconv.Atoi(arg)\n\t\tfatalif(err)\n\t\tif before {\n\t\t\tfrom = append(from, id)\n\t\t} else {\n\t\t\tto = append(to, id)\n\t\t}\n\t}\n\tif len(to) < 1 {\n\t\tfs.Usage()\n\t\treturn\n\t}\n\n\tm, err := query.Flow(db, *simid, *t0, *t1, from, to)\n\tfatalif(err)\n\tfmt.Printf(\"%+v\\n\", m)\n}\n\nfunc fatalif(err error) {\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\ntype CmdSet struct {\n\tfuncs map[string]func(string, []string) \/\/ map[cmdname]func(cmdname, args)\n\tNames []string\n\tHelps []string\n}\n\nfunc NewCmdSet() *CmdSet {\n\treturn &CmdSet{funcs: map[string]func(string, []string){}}\n}\n\nfunc (cs *CmdSet) Register(name, brief string, f func(string, []string)) {\n\tcs.Names = append(cs.Names, name)\n\tcs.Helps = append(cs.Helps, brief)\n\tcs.funcs[name] = f\n}\n\nfunc (cs *CmdSet) Execute(args []string) {\n\tcmd := args[0]\n\tf, ok := cs.funcs[cmd]\n\tif !ok {\n\t\tdoCustom(cmd, args[1:])\n\t\treturn\n\t}\n\tf(cmd, args[1:])\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage draw\n\nimport (\n\t\"image\"\n\t\"testing\"\n)\n\nfunc eq(c0, c1 image.Color) bool {\n\tr0, g0, b0, a0 := c0.RGBA()\n\tr1, g1, b1, a1 := c1.RGBA()\n\treturn r0 == r1 && g0 == g1 && b0 == b1 && a0 == a1\n}\n\nfunc fillBlue(alpha int) image.Image {\n\treturn image.ColorImage{image.RGBAColor{0, 0, uint8(alpha), uint8(alpha)}}\n}\n\nfunc fillAlpha(alpha int) image.Image {\n\treturn image.ColorImage{image.AlphaColor{uint8(alpha)}}\n}\n\nfunc vgradGreen(alpha int) image.Image {\n\tm := image.NewRGBA(16, 16)\n\tfor y := 0; y < 16; y++ {\n\t\tfor x := 0; x < 16; x++ {\n\t\t\tm.Set(x, y, image.RGBAColor{0, uint8(y * alpha \/ 15), 0, uint8(alpha)})\n\t\t}\n\t}\n\treturn m\n}\n\nfunc vgradAlpha(alpha int) image.Image {\n\tm := image.NewAlpha(16, 16)\n\tfor y := 0; y < 16; y++ {\n\t\tfor x := 0; x < 16; x++ {\n\t\t\tm.Set(x, y, image.AlphaColor{uint8(y * alpha \/ 15)})\n\t\t}\n\t}\n\treturn m\n}\n\nfunc hgradRed(alpha int) Image {\n\tm := image.NewRGBA(16, 16)\n\tfor y := 0; y < 16; y++ {\n\t\tfor x := 0; x < 16; x++ {\n\t\t\tm.Set(x, y, image.RGBAColor{uint8(x * alpha \/ 15), 0, 0, uint8(alpha)})\n\t\t}\n\t}\n\treturn m\n}\n\nfunc gradYellow(alpha int) Image {\n\tm := image.NewRGBA(16, 16)\n\tfor y := 0; y < 16; y++ {\n\t\tfor x := 0; x < 16; x++ {\n\t\t\tm.Set(x, y, image.RGBAColor{uint8(x * alpha \/ 15), uint8(y * alpha \/ 15), 0, uint8(alpha)})\n\t\t}\n\t}\n\treturn m\n}\n\ntype drawTest struct {\n\tdesc string\n\tsrc image.Image\n\tmask image.Image\n\top Op\n\texpected image.Color\n}\n\nvar drawTests = []drawTest{\n\t\/\/ Uniform mask (0% opaque).\n\tdrawTest{\"nop\", vgradGreen(255), fillAlpha(0), Over, image.RGBAColor{136, 0, 0, 255}},\n\tdrawTest{\"clear\", vgradGreen(255), fillAlpha(0), Src, image.RGBAColor{0, 0, 0, 0}},\n\t\/\/ Uniform mask (100%, 75%, nil) and uniform source.\n\t\/\/ At (x, y) == (8, 8):\n\t\/\/ The destination pixel is {136, 0, 0, 255}.\n\t\/\/ The source pixel is {0, 0, 90, 90}.\n\tdrawTest{\"fill\", fillBlue(90), fillAlpha(255), Over, image.RGBAColor{88, 0, 90, 255}},\n\tdrawTest{\"fillSrc\", fillBlue(90), fillAlpha(255), Src, image.RGBAColor{0, 0, 90, 90}},\n\tdrawTest{\"fillAlpha\", fillBlue(90), fillAlpha(192), Over, image.RGBAColor{100, 0, 68, 255}},\n\tdrawTest{\"fillAlphaSrc\", fillBlue(90), fillAlpha(192), Src, image.RGBAColor{0, 0, 68, 68}},\n\tdrawTest{\"fillNil\", fillBlue(90), nil, Over, image.RGBAColor{88, 0, 90, 255}},\n\tdrawTest{\"fillNilSrc\", fillBlue(90), nil, Src, image.RGBAColor{0, 0, 90, 90}},\n\t\/\/ Uniform mask (100%, 75%, nil) and variable source.\n\t\/\/ At (x, y) == (8, 8):\n\t\/\/ The destination pixel is {136, 0, 0, 255}.\n\t\/\/ The source pixel is {0, 48, 0, 90}.\n\tdrawTest{\"copy\", vgradGreen(90), fillAlpha(255), Over, image.RGBAColor{88, 48, 0, 255}},\n\tdrawTest{\"copySrc\", vgradGreen(90), fillAlpha(255), Src, image.RGBAColor{0, 48, 0, 90}},\n\tdrawTest{\"copyAlpha\", vgradGreen(90), fillAlpha(192), Over, image.RGBAColor{100, 36, 0, 255}},\n\tdrawTest{\"copyAlphaSrc\", vgradGreen(90), fillAlpha(192), Src, image.RGBAColor{0, 36, 0, 68}},\n\tdrawTest{\"copyNil\", vgradGreen(90), nil, Over, image.RGBAColor{88, 48, 0, 255}},\n\tdrawTest{\"copyNilSrc\", vgradGreen(90), nil, Src, image.RGBAColor{0, 48, 0, 90}},\n\t\/\/ Variable mask and variable source.\n\t\/\/ At (x, y) == (8, 8):\n\t\/\/ The destination pixel is {136, 0, 0, 255}.\n\t\/\/ The source pixel is {0, 0, 255, 255}.\n\t\/\/ The mask pixel's alpha is 102, or 40%.\n\tdrawTest{\"generic\", fillBlue(255), vgradAlpha(192), Over, image.RGBAColor{81, 0, 102, 255}},\n\tdrawTest{\"genericSrc\", fillBlue(255), vgradAlpha(192), Src, image.RGBAColor{0, 0, 102, 102}},\n}\n\nfunc makeGolden(dst, src, mask image.Image, op Op) image.Image {\n\t\/\/ Since golden is a newly allocated image, we don't have to check if the\n\t\/\/ input source and mask images and the output golden image overlap.\n\tb := dst.Bounds()\n\tsx0 := src.Bounds().Min.X - b.Min.X\n\tsy0 := src.Bounds().Min.Y - b.Min.Y\n\tvar mx0, my0 int\n\tif mask != nil {\n\t\tmx0 = mask.Bounds().Min.X - b.Min.X\n\t\tmy0 = mask.Bounds().Min.Y - b.Min.Y\n\t}\n\tgolden := image.NewRGBA(b.Max.X, b.Max.Y)\n\tfor y := b.Min.Y; y < b.Max.Y; y++ {\n\t\tmy, sy := my0+y, sy0+y\n\t\tfor x := b.Min.X; x < b.Max.X; x++ {\n\t\t\tmx, sx := mx0+x, sx0+x\n\t\t\tconst M = 1<<16 - 1\n\t\t\tvar dr, dg, db, da uint32\n\t\t\tif op == Over {\n\t\t\t\tdr, dg, db, da = dst.At(x, y).RGBA()\n\t\t\t}\n\t\t\tsr, sg, sb, sa := src.At(sx, sy).RGBA()\n\t\t\tma := uint32(M)\n\t\t\tif mask != nil {\n\t\t\t\t_, _, _, ma = mask.At(mx, my).RGBA()\n\t\t\t}\n\t\t\ta := M - (sa * ma \/ M)\n\t\t\tgolden.Set(x, y, image.RGBA64Color{\n\t\t\t\tuint16((dr*a + sr*ma) \/ M),\n\t\t\t\tuint16((dg*a + sg*ma) \/ M),\n\t\t\t\tuint16((db*a + sb*ma) \/ M),\n\t\t\t\tuint16((da*a + sa*ma) \/ M),\n\t\t\t})\n\t\t}\n\t}\n\tgolden.Rect = b\n\treturn golden\n}\n\nfunc TestDraw(t *testing.T) {\nloop:\n\tfor _, test := range drawTests {\n\t\tdst := hgradRed(255)\n\t\t\/\/ Draw the (src, mask, op) onto a copy of dst using a slow but obviously correct implementation.\n\t\tgolden := makeGolden(dst, test.src, test.mask, test.op)\n\t\tb := dst.Bounds()\n\t\tif !b.Eq(golden.Bounds()) {\n\t\t\tt.Errorf(\"draw %s: bounds %v versus %v\", test.desc, dst.Bounds(), golden.Bounds())\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Draw the same combination onto the actual dst using the optimized DrawMask implementation.\n\t\tDrawMask(dst, b, test.src, image.ZP, test.mask, image.ZP, test.op)\n\t\t\/\/ Check that the resultant pixel at (8, 8) matches what we expect\n\t\t\/\/ (the expected value can be verified by hand).\n\t\tif !eq(dst.At(8, 8), test.expected) {\n\t\t\tt.Errorf(\"draw %s: at (8, 8) %v versus %v\", test.desc, dst.At(8, 8), test.expected)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Check that the resultant dst image matches the golden output.\n\t\tfor y := b.Min.Y; y < b.Max.Y; y++ {\n\t\t\tfor x := b.Min.X; x < b.Max.X; x++ {\n\t\t\t\tif !eq(dst.At(x, y), golden.At(x, y)) {\n\t\t\t\t\tt.Errorf(\"draw %s: at (%d, %d), %v versus golden %v\", test.desc, x, y, dst.At(x, y), golden.At(x, y))\n\t\t\t\t\tcontinue loop\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestDrawOverlap(t *testing.T) {\n\tfor _, op := range []Op{Over, Src} {\n\t\tfor yoff := -2; yoff <= 2; yoff++ {\n\t\tloop:\n\t\t\tfor xoff := -2; xoff <= 2; xoff++ {\n\t\t\t\tm := gradYellow(127).(*image.RGBA)\n\t\t\t\tdst := &image.RGBA{\n\t\t\t\t\tPix: m.Pix,\n\t\t\t\t\tStride: m.Stride,\n\t\t\t\t\tRect: image.Rect(5, 5, 10, 10),\n\t\t\t\t}\n\t\t\t\tsrc := &image.RGBA{\n\t\t\t\t\tPix: m.Pix,\n\t\t\t\t\tStride: m.Stride,\n\t\t\t\t\tRect: image.Rect(5+xoff, 5+yoff, 10+xoff, 10+yoff),\n\t\t\t\t}\n\t\t\t\t\/\/ Draw the (src, mask, op) onto a copy of dst using a slow but obviously correct implementation.\n\t\t\t\tgolden := makeGolden(dst, src, nil, op)\n\t\t\t\tb := dst.Bounds()\n\t\t\t\tif !b.Eq(golden.Bounds()) {\n\t\t\t\t\tt.Errorf(\"drawOverlap xoff=%d,yoff=%d: bounds %v versus %v\", xoff, yoff, dst.Bounds(), golden.Bounds())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t\/\/ Draw the same combination onto the actual dst using the optimized DrawMask implementation.\n\t\t\t\tDrawMask(dst, b, src, src.Bounds().Min, nil, image.ZP, op)\n\t\t\t\t\/\/ Check that the resultant dst image matches the golden output.\n\t\t\t\tfor y := b.Min.Y; y < b.Max.Y; y++ {\n\t\t\t\t\tfor x := b.Min.X; x < b.Max.X; x++ {\n\t\t\t\t\t\tif !eq(dst.At(x, y), golden.At(x, y)) {\n\t\t\t\t\t\t\tt.Errorf(\"drawOverlap xoff=%d,yoff=%d: at (%d, %d), %v versus golden %v\", xoff, yoff, x, y, dst.At(x, y), golden.At(x, y))\n\t\t\t\t\t\t\tcontinue loop\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ TestIssue836 verifies http:\/\/code.google.com\/p\/go\/issues\/detail?id=836.\nfunc TestIssue836(t *testing.T) {\n\ta := image.NewRGBA(1, 1)\n\tb := image.NewRGBA(2, 2)\n\tb.Set(0, 0, image.RGBAColor{0, 0, 0, 5})\n\tb.Set(1, 0, image.RGBAColor{0, 0, 5, 5})\n\tb.Set(0, 1, image.RGBAColor{0, 5, 0, 5})\n\tb.Set(1, 1, image.RGBAColor{5, 0, 0, 5})\n\tDraw(a, image.Rect(0, 0, 1, 1), b, image.Pt(1, 1))\n\tif !eq(image.RGBAColor{5, 0, 0, 5}, a.At(0, 0)) {\n\t\tt.Errorf(\"Issue 836: want %v got %v\", image.RGBAColor{5, 0, 0, 5}, a.At(0, 0))\n\t}\n}\nexp\/draw: unbreak build.\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage draw\n\nimport (\n\t\"image\"\n\t\"testing\"\n)\n\nfunc eq(c0, c1 image.Color) bool {\n\tr0, g0, b0, a0 := c0.RGBA()\n\tr1, g1, b1, a1 := c1.RGBA()\n\treturn r0 == r1 && g0 == g1 && b0 == b1 && a0 == a1\n}\n\nfunc fillBlue(alpha int) image.Image {\n\treturn image.NewColorImage(image.RGBAColor{0, 0, uint8(alpha), uint8(alpha)})\n}\n\nfunc fillAlpha(alpha int) image.Image {\n\treturn image.NewColorImage(image.AlphaColor{uint8(alpha)})\n}\n\nfunc vgradGreen(alpha int) image.Image {\n\tm := image.NewRGBA(16, 16)\n\tfor y := 0; y < 16; y++ {\n\t\tfor x := 0; x < 16; x++ {\n\t\t\tm.Set(x, y, image.RGBAColor{0, uint8(y * alpha \/ 15), 0, uint8(alpha)})\n\t\t}\n\t}\n\treturn m\n}\n\nfunc vgradAlpha(alpha int) image.Image {\n\tm := image.NewAlpha(16, 16)\n\tfor y := 0; y < 16; y++ {\n\t\tfor x := 0; x < 16; x++ {\n\t\t\tm.Set(x, y, image.AlphaColor{uint8(y * alpha \/ 15)})\n\t\t}\n\t}\n\treturn m\n}\n\nfunc hgradRed(alpha int) Image {\n\tm := image.NewRGBA(16, 16)\n\tfor y := 0; y < 16; y++ {\n\t\tfor x := 0; x < 16; x++ {\n\t\t\tm.Set(x, y, image.RGBAColor{uint8(x * alpha \/ 15), 0, 0, uint8(alpha)})\n\t\t}\n\t}\n\treturn m\n}\n\nfunc gradYellow(alpha int) Image {\n\tm := image.NewRGBA(16, 16)\n\tfor y := 0; y < 16; y++ {\n\t\tfor x := 0; x < 16; x++ {\n\t\t\tm.Set(x, y, image.RGBAColor{uint8(x * alpha \/ 15), uint8(y * alpha \/ 15), 0, uint8(alpha)})\n\t\t}\n\t}\n\treturn m\n}\n\ntype drawTest struct {\n\tdesc string\n\tsrc image.Image\n\tmask image.Image\n\top Op\n\texpected image.Color\n}\n\nvar drawTests = []drawTest{\n\t\/\/ Uniform mask (0% opaque).\n\tdrawTest{\"nop\", vgradGreen(255), fillAlpha(0), Over, image.RGBAColor{136, 0, 0, 255}},\n\tdrawTest{\"clear\", vgradGreen(255), fillAlpha(0), Src, image.RGBAColor{0, 0, 0, 0}},\n\t\/\/ Uniform mask (100%, 75%, nil) and uniform source.\n\t\/\/ At (x, y) == (8, 8):\n\t\/\/ The destination pixel is {136, 0, 0, 255}.\n\t\/\/ The source pixel is {0, 0, 90, 90}.\n\tdrawTest{\"fill\", fillBlue(90), fillAlpha(255), Over, image.RGBAColor{88, 0, 90, 255}},\n\tdrawTest{\"fillSrc\", fillBlue(90), fillAlpha(255), Src, image.RGBAColor{0, 0, 90, 90}},\n\tdrawTest{\"fillAlpha\", fillBlue(90), fillAlpha(192), Over, image.RGBAColor{100, 0, 68, 255}},\n\tdrawTest{\"fillAlphaSrc\", fillBlue(90), fillAlpha(192), Src, image.RGBAColor{0, 0, 68, 68}},\n\tdrawTest{\"fillNil\", fillBlue(90), nil, Over, image.RGBAColor{88, 0, 90, 255}},\n\tdrawTest{\"fillNilSrc\", fillBlue(90), nil, Src, image.RGBAColor{0, 0, 90, 90}},\n\t\/\/ Uniform mask (100%, 75%, nil) and variable source.\n\t\/\/ At (x, y) == (8, 8):\n\t\/\/ The destination pixel is {136, 0, 0, 255}.\n\t\/\/ The source pixel is {0, 48, 0, 90}.\n\tdrawTest{\"copy\", vgradGreen(90), fillAlpha(255), Over, image.RGBAColor{88, 48, 0, 255}},\n\tdrawTest{\"copySrc\", vgradGreen(90), fillAlpha(255), Src, image.RGBAColor{0, 48, 0, 90}},\n\tdrawTest{\"copyAlpha\", vgradGreen(90), fillAlpha(192), Over, image.RGBAColor{100, 36, 0, 255}},\n\tdrawTest{\"copyAlphaSrc\", vgradGreen(90), fillAlpha(192), Src, image.RGBAColor{0, 36, 0, 68}},\n\tdrawTest{\"copyNil\", vgradGreen(90), nil, Over, image.RGBAColor{88, 48, 0, 255}},\n\tdrawTest{\"copyNilSrc\", vgradGreen(90), nil, Src, image.RGBAColor{0, 48, 0, 90}},\n\t\/\/ Variable mask and variable source.\n\t\/\/ At (x, y) == (8, 8):\n\t\/\/ The destination pixel is {136, 0, 0, 255}.\n\t\/\/ The source pixel is {0, 0, 255, 255}.\n\t\/\/ The mask pixel's alpha is 102, or 40%.\n\tdrawTest{\"generic\", fillBlue(255), vgradAlpha(192), Over, image.RGBAColor{81, 0, 102, 255}},\n\tdrawTest{\"genericSrc\", fillBlue(255), vgradAlpha(192), Src, image.RGBAColor{0, 0, 102, 102}},\n}\n\nfunc makeGolden(dst, src, mask image.Image, op Op) image.Image {\n\t\/\/ Since golden is a newly allocated image, we don't have to check if the\n\t\/\/ input source and mask images and the output golden image overlap.\n\tb := dst.Bounds()\n\tsx0 := src.Bounds().Min.X - b.Min.X\n\tsy0 := src.Bounds().Min.Y - b.Min.Y\n\tvar mx0, my0 int\n\tif mask != nil {\n\t\tmx0 = mask.Bounds().Min.X - b.Min.X\n\t\tmy0 = mask.Bounds().Min.Y - b.Min.Y\n\t}\n\tgolden := image.NewRGBA(b.Max.X, b.Max.Y)\n\tfor y := b.Min.Y; y < b.Max.Y; y++ {\n\t\tmy, sy := my0+y, sy0+y\n\t\tfor x := b.Min.X; x < b.Max.X; x++ {\n\t\t\tmx, sx := mx0+x, sx0+x\n\t\t\tconst M = 1<<16 - 1\n\t\t\tvar dr, dg, db, da uint32\n\t\t\tif op == Over {\n\t\t\t\tdr, dg, db, da = dst.At(x, y).RGBA()\n\t\t\t}\n\t\t\tsr, sg, sb, sa := src.At(sx, sy).RGBA()\n\t\t\tma := uint32(M)\n\t\t\tif mask != nil {\n\t\t\t\t_, _, _, ma = mask.At(mx, my).RGBA()\n\t\t\t}\n\t\t\ta := M - (sa * ma \/ M)\n\t\t\tgolden.Set(x, y, image.RGBA64Color{\n\t\t\t\tuint16((dr*a + sr*ma) \/ M),\n\t\t\t\tuint16((dg*a + sg*ma) \/ M),\n\t\t\t\tuint16((db*a + sb*ma) \/ M),\n\t\t\t\tuint16((da*a + sa*ma) \/ M),\n\t\t\t})\n\t\t}\n\t}\n\tgolden.Rect = b\n\treturn golden\n}\n\nfunc TestDraw(t *testing.T) {\nloop:\n\tfor _, test := range drawTests {\n\t\tdst := hgradRed(255)\n\t\t\/\/ Draw the (src, mask, op) onto a copy of dst using a slow but obviously correct implementation.\n\t\tgolden := makeGolden(dst, test.src, test.mask, test.op)\n\t\tb := dst.Bounds()\n\t\tif !b.Eq(golden.Bounds()) {\n\t\t\tt.Errorf(\"draw %s: bounds %v versus %v\", test.desc, dst.Bounds(), golden.Bounds())\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Draw the same combination onto the actual dst using the optimized DrawMask implementation.\n\t\tDrawMask(dst, b, test.src, image.ZP, test.mask, image.ZP, test.op)\n\t\t\/\/ Check that the resultant pixel at (8, 8) matches what we expect\n\t\t\/\/ (the expected value can be verified by hand).\n\t\tif !eq(dst.At(8, 8), test.expected) {\n\t\t\tt.Errorf(\"draw %s: at (8, 8) %v versus %v\", test.desc, dst.At(8, 8), test.expected)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Check that the resultant dst image matches the golden output.\n\t\tfor y := b.Min.Y; y < b.Max.Y; y++ {\n\t\t\tfor x := b.Min.X; x < b.Max.X; x++ {\n\t\t\t\tif !eq(dst.At(x, y), golden.At(x, y)) {\n\t\t\t\t\tt.Errorf(\"draw %s: at (%d, %d), %v versus golden %v\", test.desc, x, y, dst.At(x, y), golden.At(x, y))\n\t\t\t\t\tcontinue loop\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestDrawOverlap(t *testing.T) {\n\tfor _, op := range []Op{Over, Src} {\n\t\tfor yoff := -2; yoff <= 2; yoff++ {\n\t\tloop:\n\t\t\tfor xoff := -2; xoff <= 2; xoff++ {\n\t\t\t\tm := gradYellow(127).(*image.RGBA)\n\t\t\t\tdst := &image.RGBA{\n\t\t\t\t\tPix: m.Pix,\n\t\t\t\t\tStride: m.Stride,\n\t\t\t\t\tRect: image.Rect(5, 5, 10, 10),\n\t\t\t\t}\n\t\t\t\tsrc := &image.RGBA{\n\t\t\t\t\tPix: m.Pix,\n\t\t\t\t\tStride: m.Stride,\n\t\t\t\t\tRect: image.Rect(5+xoff, 5+yoff, 10+xoff, 10+yoff),\n\t\t\t\t}\n\t\t\t\t\/\/ Draw the (src, mask, op) onto a copy of dst using a slow but obviously correct implementation.\n\t\t\t\tgolden := makeGolden(dst, src, nil, op)\n\t\t\t\tb := dst.Bounds()\n\t\t\t\tif !b.Eq(golden.Bounds()) {\n\t\t\t\t\tt.Errorf(\"drawOverlap xoff=%d,yoff=%d: bounds %v versus %v\", xoff, yoff, dst.Bounds(), golden.Bounds())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t\/\/ Draw the same combination onto the actual dst using the optimized DrawMask implementation.\n\t\t\t\tDrawMask(dst, b, src, src.Bounds().Min, nil, image.ZP, op)\n\t\t\t\t\/\/ Check that the resultant dst image matches the golden output.\n\t\t\t\tfor y := b.Min.Y; y < b.Max.Y; y++ {\n\t\t\t\t\tfor x := b.Min.X; x < b.Max.X; x++ {\n\t\t\t\t\t\tif !eq(dst.At(x, y), golden.At(x, y)) {\n\t\t\t\t\t\t\tt.Errorf(\"drawOverlap xoff=%d,yoff=%d: at (%d, %d), %v versus golden %v\", xoff, yoff, x, y, dst.At(x, y), golden.At(x, y))\n\t\t\t\t\t\t\tcontinue loop\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ TestIssue836 verifies http:\/\/code.google.com\/p\/go\/issues\/detail?id=836.\nfunc TestIssue836(t *testing.T) {\n\ta := image.NewRGBA(1, 1)\n\tb := image.NewRGBA(2, 2)\n\tb.Set(0, 0, image.RGBAColor{0, 0, 0, 5})\n\tb.Set(1, 0, image.RGBAColor{0, 0, 5, 5})\n\tb.Set(0, 1, image.RGBAColor{0, 5, 0, 5})\n\tb.Set(1, 1, image.RGBAColor{5, 0, 0, 5})\n\tDraw(a, image.Rect(0, 0, 1, 1), b, image.Pt(1, 1))\n\tif !eq(image.RGBAColor{5, 0, 0, 5}, a.At(0, 0)) {\n\t\tt.Errorf(\"Issue 836: want %v got %v\", image.RGBAColor{5, 0, 0, 5}, a.At(0, 0))\n\t}\n}\n<|endoftext|>"} {"text":"package raft\n\nimport (\n\t\"reflect\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/raft\/raftpb\"\n\t\"github.com\/coreos\/etcd\/third_party\/code.google.com\/p\/go.net\/context\"\n)\n\n\/\/ TestNodeStep ensures that node.Step sends msgProp to propc chan\n\/\/ and other kinds of messages to recvc chan.\nfunc TestNodeStep(t *testing.T) {\n\tfor i := range mtmap {\n\t\tn := &Node{\n\t\t\tpropc: make(chan raftpb.Message, 1),\n\t\t\trecvc: make(chan raftpb.Message, 1),\n\t\t}\n\t\tn.Step(context.TODO(), raftpb.Message{Type: int64(i)})\n\t\t\/\/ Proposal goes to proc chan. Others go to recvc chan.\n\t\tif int64(i) == msgProp {\n\t\t\tselect {\n\t\t\tcase <-n.propc:\n\t\t\tdefault:\n\t\t\t\tt.Errorf(\"%d: cannot receive %s on propc chan\", i, mtmap[i])\n\t\t\t}\n\t\t} else {\n\t\t\tselect {\n\t\t\tcase <-n.recvc:\n\t\t\tdefault:\n\t\t\t\tt.Errorf(\"%d: cannot receive %s on recvc chan\", i, mtmap[i])\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Cancel and Stop should unblock Step()\nfunc TestNodeStepUnblock(t *testing.T) {\n\t\/\/ a node without buffer to block step\n\tn := &Node{\n\t\tpropc: make(chan raftpb.Message),\n\t\tdone: make(chan struct{}),\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tstopFunc := func() { close(n.done) }\n\n\ttests := []struct {\n\t\tunblock func()\n\t\twerr error\n\t}{\n\t\t{stopFunc, ErrStopped},\n\t\t{cancel, context.Canceled},\n\t}\n\n\tfor i, tt := range tests {\n\t\terrc := make(chan error, 1)\n\t\tgo func() {\n\t\t\terr := n.Step(ctx, raftpb.Message{Type: msgProp})\n\t\t\terrc <- err\n\t\t}()\n\t\ttt.unblock()\n\t\tselect {\n\t\tcase err := <-errc:\n\t\t\tif err != tt.werr {\n\t\t\t\tt.Errorf(\"#%d: err = %v, want %v\", err, tt.werr)\n\t\t\t}\n\t\t\t\/\/clean up side-effect\n\t\t\tif ctx.Err() != nil {\n\t\t\t\tctx = context.TODO()\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-n.done:\n\t\t\t\tn.done = make(chan struct{})\n\t\t\tdefault:\n\t\t\t}\n\t\tcase <-time.After(time.Millisecond * 100):\n\t\t\tt.Errorf(\"#%d: failed to unblock step\", i)\n\t\t}\n\t}\n}\n\n\/\/ TestBlockProposal ensures that node will block proposal when it does not\n\/\/ know who is the current leader; node will accept proposal when it knows\n\/\/ who is the current leader.\nfunc TestBlockProposal(t *testing.T) {\n\tn := newNode()\n\tdefer n.Stop()\n\tr := newRaft(1, []int64{1}, 10, 1)\n\tgo n.run(r)\n\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\terrc <- n.Propose(context.TODO(), []byte(\"somedata\"))\n\t}()\n\n\tforceGosched()\n\tselect {\n\tcase err := <-errc:\n\t\tt.Errorf(\"err = %v, want blocking\", err)\n\tdefault:\n\t}\n\n\tn.Campaign(context.TODO())\n\tforceGosched()\n\tselect {\n\tcase err := <-errc:\n\t\tif err != nil {\n\t\t\tt.Errorf(\"err = %v, want %v\", err, nil)\n\t\t}\n\tdefault:\n\t\tt.Errorf(\"blocking proposal, want unblocking\")\n\t}\n}\n\nfunc TestReadyContainUpdates(t *testing.T) {\n\ttests := []struct {\n\t\trd Ready\n\t\twcontain bool\n\t}{\n\t\t{Ready{}, false},\n\t\t{Ready{State: raftpb.State{Vote: 1}}, true},\n\t\t{Ready{Entries: make([]raftpb.Entry, 1, 1)}, true},\n\t\t{Ready{CommittedEntries: make([]raftpb.Entry, 1, 1)}, true},\n\t\t{Ready{Messages: make([]raftpb.Message, 1, 1)}, true},\n\t}\n\n\tfor i, tt := range tests {\n\t\tif tt.rd.containsUpdates() != tt.wcontain {\n\t\t\tt.Errorf(\"#%d: containUpdates = %v, want %v\", i, tt.rd.containsUpdates(), tt.wcontain)\n\t\t}\n\t}\n}\n\nfunc TestNode(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\twants := []Ready{\n\t\t{\n\t\t\tState: raftpb.State{Term: 1, Vote: -1, Commit: 1, LastIndex: 1},\n\t\t\tEntries: []raftpb.Entry{{Term: 1, Index: 1}},\n\t\t\tCommittedEntries: []raftpb.Entry{{Term: 1, Index: 1}},\n\t\t},\n\t\t{\n\t\t\tState: raftpb.State{Term: 1, Vote: -1, Commit: 2, LastIndex: 2},\n\t\t\tEntries: []raftpb.Entry{{Term: 1, Index: 2, Data: []byte(\"foo\")}},\n\t\t\tCommittedEntries: []raftpb.Entry{{Term: 1, Index: 2, Data: []byte(\"foo\")}},\n\t\t},\n\t}\n\n\tn := Start(1, []int64{1}, 0, 0)\n\tn.Campaign(ctx)\n\tif g := <-n.Ready(); !reflect.DeepEqual(g, wants[0]) {\n\t\tt.Errorf(\"#%d: g = %+v,\\n w %+v\", 1, g, wants[0])\n\t}\n\n\tn.Propose(ctx, []byte(\"foo\"))\n\tif g := <-n.Ready(); !reflect.DeepEqual(g, wants[1]) {\n\t\tt.Errorf(\"#%d: g = %+v,\\n w %+v\", 2, g, wants[1])\n\t}\n\n\tselect {\n\tcase rd := <-n.Ready():\n\t\tt.Errorf(\"unexpected Ready: %+v\", rd)\n\tdefault:\n\t}\n}\n\nfunc TestNodeRestart(t *testing.T) {\n\tentries := []raftpb.Entry{\n\t\t{Term: 1, Index: 1},\n\t\t{Term: 1, Index: 2, Data: []byte(\"foo\")},\n\t}\n\tst := raftpb.State{Term: 1, Vote: -1, Commit: 1, LastIndex: 2}\n\n\twant := Ready{\n\t\tState: emptyState,\n\t\t\/\/ commit upto index commit index in st\n\t\tCommittedEntries: entries[:st.Commit],\n\t}\n\n\tn := Restart(1, []int64{1}, 0, 0, st, entries)\n\tif g := <-n.Ready(); !reflect.DeepEqual(g, want) {\n\t\tt.Errorf(\"g = %+v,\\n w %+v\", g, want)\n\t}\n\n\tselect {\n\tcase rd := <-n.Ready():\n\t\tt.Errorf(\"unexpected Ready: %+v\", rd)\n\tdefault:\n\t}\n}\n\nfunc TestIsStateEqual(t *testing.T) {\n\ttests := []struct {\n\t\tst raftpb.State\n\t\twe bool\n\t}{\n\t\t{emptyState, true},\n\t\t{raftpb.State{Vote: 1}, false},\n\t\t{raftpb.State{Commit: 1}, false},\n\t\t{raftpb.State{Term: 1}, false},\n\t\t{raftpb.State{LastIndex: 1}, false},\n\t}\n\n\tfor i, tt := range tests {\n\t\tif isStateEqual(tt.st, emptyState) != tt.we {\n\t\t\tt.Errorf(\"#%d, equal = %v, want %v\", i, isStateEqual(tt.st, emptyState), tt.we)\n\t\t}\n\t}\n}\n\n\/\/ WARNING: This is a hack.\n\/\/ Remove this when we are able to block\/check the status of the go-routines.\nfunc forceGosched() {\n\t\/\/ possibility enough to sched upto 10 go routines.\n\tfor i := 0; i < 10000; i++ {\n\t\truntime.Gosched()\n\t}\n}\nraft: move defer after runpackage raft\n\nimport (\n\t\"reflect\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/raft\/raftpb\"\n\t\"github.com\/coreos\/etcd\/third_party\/code.google.com\/p\/go.net\/context\"\n)\n\n\/\/ TestNodeStep ensures that node.Step sends msgProp to propc chan\n\/\/ and other kinds of messages to recvc chan.\nfunc TestNodeStep(t *testing.T) {\n\tfor i := range mtmap {\n\t\tn := &Node{\n\t\t\tpropc: make(chan raftpb.Message, 1),\n\t\t\trecvc: make(chan raftpb.Message, 1),\n\t\t}\n\t\tn.Step(context.TODO(), raftpb.Message{Type: int64(i)})\n\t\t\/\/ Proposal goes to proc chan. Others go to recvc chan.\n\t\tif int64(i) == msgProp {\n\t\t\tselect {\n\t\t\tcase <-n.propc:\n\t\t\tdefault:\n\t\t\t\tt.Errorf(\"%d: cannot receive %s on propc chan\", i, mtmap[i])\n\t\t\t}\n\t\t} else {\n\t\t\tselect {\n\t\t\tcase <-n.recvc:\n\t\t\tdefault:\n\t\t\t\tt.Errorf(\"%d: cannot receive %s on recvc chan\", i, mtmap[i])\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Cancel and Stop should unblock Step()\nfunc TestNodeStepUnblock(t *testing.T) {\n\t\/\/ a node without buffer to block step\n\tn := &Node{\n\t\tpropc: make(chan raftpb.Message),\n\t\tdone: make(chan struct{}),\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tstopFunc := func() { close(n.done) }\n\n\ttests := []struct {\n\t\tunblock func()\n\t\twerr error\n\t}{\n\t\t{stopFunc, ErrStopped},\n\t\t{cancel, context.Canceled},\n\t}\n\n\tfor i, tt := range tests {\n\t\terrc := make(chan error, 1)\n\t\tgo func() {\n\t\t\terr := n.Step(ctx, raftpb.Message{Type: msgProp})\n\t\t\terrc <- err\n\t\t}()\n\t\ttt.unblock()\n\t\tselect {\n\t\tcase err := <-errc:\n\t\t\tif err != tt.werr {\n\t\t\t\tt.Errorf(\"#%d: err = %v, want %v\", err, tt.werr)\n\t\t\t}\n\t\t\t\/\/clean up side-effect\n\t\t\tif ctx.Err() != nil {\n\t\t\t\tctx = context.TODO()\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-n.done:\n\t\t\t\tn.done = make(chan struct{})\n\t\t\tdefault:\n\t\t\t}\n\t\tcase <-time.After(time.Millisecond * 100):\n\t\t\tt.Errorf(\"#%d: failed to unblock step\", i)\n\t\t}\n\t}\n}\n\n\/\/ TestBlockProposal ensures that node will block proposal when it does not\n\/\/ know who is the current leader; node will accept proposal when it knows\n\/\/ who is the current leader.\nfunc TestBlockProposal(t *testing.T) {\n\tn := newNode()\n\tr := newRaft(1, []int64{1}, 10, 1)\n\tgo n.run(r)\n\tdefer n.Stop()\n\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\terrc <- n.Propose(context.TODO(), []byte(\"somedata\"))\n\t}()\n\n\tforceGosched()\n\tselect {\n\tcase err := <-errc:\n\t\tt.Errorf(\"err = %v, want blocking\", err)\n\tdefault:\n\t}\n\n\tn.Campaign(context.TODO())\n\tforceGosched()\n\tselect {\n\tcase err := <-errc:\n\t\tif err != nil {\n\t\t\tt.Errorf(\"err = %v, want %v\", err, nil)\n\t\t}\n\tdefault:\n\t\tt.Errorf(\"blocking proposal, want unblocking\")\n\t}\n}\n\nfunc TestReadyContainUpdates(t *testing.T) {\n\ttests := []struct {\n\t\trd Ready\n\t\twcontain bool\n\t}{\n\t\t{Ready{}, false},\n\t\t{Ready{State: raftpb.State{Vote: 1}}, true},\n\t\t{Ready{Entries: make([]raftpb.Entry, 1, 1)}, true},\n\t\t{Ready{CommittedEntries: make([]raftpb.Entry, 1, 1)}, true},\n\t\t{Ready{Messages: make([]raftpb.Message, 1, 1)}, true},\n\t}\n\n\tfor i, tt := range tests {\n\t\tif tt.rd.containsUpdates() != tt.wcontain {\n\t\t\tt.Errorf(\"#%d: containUpdates = %v, want %v\", i, tt.rd.containsUpdates(), tt.wcontain)\n\t\t}\n\t}\n}\n\nfunc TestNode(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\twants := []Ready{\n\t\t{\n\t\t\tState: raftpb.State{Term: 1, Vote: -1, Commit: 1, LastIndex: 1},\n\t\t\tEntries: []raftpb.Entry{{Term: 1, Index: 1}},\n\t\t\tCommittedEntries: []raftpb.Entry{{Term: 1, Index: 1}},\n\t\t},\n\t\t{\n\t\t\tState: raftpb.State{Term: 1, Vote: -1, Commit: 2, LastIndex: 2},\n\t\t\tEntries: []raftpb.Entry{{Term: 1, Index: 2, Data: []byte(\"foo\")}},\n\t\t\tCommittedEntries: []raftpb.Entry{{Term: 1, Index: 2, Data: []byte(\"foo\")}},\n\t\t},\n\t}\n\n\tn := Start(1, []int64{1}, 0, 0)\n\tn.Campaign(ctx)\n\tif g := <-n.Ready(); !reflect.DeepEqual(g, wants[0]) {\n\t\tt.Errorf(\"#%d: g = %+v,\\n w %+v\", 1, g, wants[0])\n\t}\n\n\tn.Propose(ctx, []byte(\"foo\"))\n\tif g := <-n.Ready(); !reflect.DeepEqual(g, wants[1]) {\n\t\tt.Errorf(\"#%d: g = %+v,\\n w %+v\", 2, g, wants[1])\n\t}\n\n\tselect {\n\tcase rd := <-n.Ready():\n\t\tt.Errorf(\"unexpected Ready: %+v\", rd)\n\tdefault:\n\t}\n}\n\nfunc TestNodeRestart(t *testing.T) {\n\tentries := []raftpb.Entry{\n\t\t{Term: 1, Index: 1},\n\t\t{Term: 1, Index: 2, Data: []byte(\"foo\")},\n\t}\n\tst := raftpb.State{Term: 1, Vote: -1, Commit: 1, LastIndex: 2}\n\n\twant := Ready{\n\t\tState: emptyState,\n\t\t\/\/ commit upto index commit index in st\n\t\tCommittedEntries: entries[:st.Commit],\n\t}\n\n\tn := Restart(1, []int64{1}, 0, 0, st, entries)\n\tif g := <-n.Ready(); !reflect.DeepEqual(g, want) {\n\t\tt.Errorf(\"g = %+v,\\n w %+v\", g, want)\n\t}\n\n\tselect {\n\tcase rd := <-n.Ready():\n\t\tt.Errorf(\"unexpected Ready: %+v\", rd)\n\tdefault:\n\t}\n}\n\nfunc TestIsStateEqual(t *testing.T) {\n\ttests := []struct {\n\t\tst raftpb.State\n\t\twe bool\n\t}{\n\t\t{emptyState, true},\n\t\t{raftpb.State{Vote: 1}, false},\n\t\t{raftpb.State{Commit: 1}, false},\n\t\t{raftpb.State{Term: 1}, false},\n\t\t{raftpb.State{LastIndex: 1}, false},\n\t}\n\n\tfor i, tt := range tests {\n\t\tif isStateEqual(tt.st, emptyState) != tt.we {\n\t\t\tt.Errorf(\"#%d, equal = %v, want %v\", i, isStateEqual(tt.st, emptyState), tt.we)\n\t\t}\n\t}\n}\n\n\/\/ WARNING: This is a hack.\n\/\/ Remove this when we are able to block\/check the status of the go-routines.\nfunc forceGosched() {\n\t\/\/ possibility enough to sched upto 10 go routines.\n\tfor i := 0; i < 10000; i++ {\n\t\truntime.Gosched()\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2017 The go-hep Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage lcio\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"go-hep.org\/x\/hep\/sio\"\n)\n\ntype RawCalorimeterHits struct {\n\tFlags Flags\n\tParams Params\n\tHits []RawCalorimeterHit\n}\n\nfunc (hits RawCalorimeterHits) String() string {\n\to := new(bytes.Buffer)\n\tfmt.Fprintf(o, \"%[1]s print out of RawCalorimeterHit collection %[1]s\\n\\n\", strings.Repeat(\"-\", 15))\n\tfmt.Fprintf(o, \" flag: 0x%x\\n%v\", hits.Flags, hits.Params)\n\tfmt.Fprintf(o, \" LCIO::RCHBIT_ID1 : %v\\n\", hits.Flags.Test(BitsRChID1))\n\tfmt.Fprintf(o, \" LCIO::RCHBIT_TIME : %v\\n\", hits.Flags.Test(BitsRChTime))\n\tfmt.Fprintf(o, \" LCIO::RCHBIT_NO_PTR : %v\\n\", hits.Flags.Test(BitsRChNoPtr))\n\n\t\/\/ FIXME(sbinet): CellIDDecoder\n\n\tfmt.Fprintf(o, \"\\n\")\n\n\thead := \" [ id ] | cellId0 ( M, S, I, J, K) |cellId1 | amplitude | time \\n\"\n\ttail := \"------------|---------------------------|--------|-----------|---------\\n\"\n\tfmt.Fprintf(o, head)\n\tfmt.Fprintf(o, tail)\n\tfor _, hit := range hits.Hits {\n\t\tfmt.Fprintf(o, \" [%08d] |%08d%19s|%08d|%10d |%8d\", 0, hit.CellID0, \"\", hit.CellID1, hit.Amplitude, hit.TimeStamp)\n\t\t\/\/ FIXME(sbinet): CellIDDecoder\n\t\tfmt.Fprintf(o, \"\\n id-fields: --- unknown\/default ---- \")\n\t\tfmt.Fprintf(o, \"\\n\")\n\t}\n\tfmt.Fprintf(o, tail)\n\treturn string(o.Bytes())\n}\n\nfunc (*RawCalorimeterHits) VersionSio() uint32 {\n\treturn Version\n}\n\nfunc (hits *RawCalorimeterHits) MarshalSio(w sio.Writer) error {\n\tpanic(\"not implemented\")\n}\n\nfunc (hits *RawCalorimeterHits) UnmarshalSio(r sio.Reader) error {\n\tdec := sio.NewDecoder(r)\n\tdec.Decode(&hits.Flags)\n\tdec.Decode(&hits.Params)\n\tvar n int32\n\tdec.Decode(&n)\n\thits.Hits = make([]RawCalorimeterHit, int(n))\n\tfor i := range hits.Hits {\n\t\thit := &hits.Hits[i]\n\t\tdec.Decode(&hit.CellID0)\n\t\tif r.VersionSio() == 8 || hits.Flags.Test(BitsRChID1) {\n\t\t\tdec.Decode(&hit.CellID1)\n\t\t}\n\t\tdec.Decode(&hit.Amplitude)\n\t\tif hits.Flags.Test(BitsRChTime) {\n\t\t\tdec.Decode(&hit.TimeStamp)\n\t\t}\n\t\tif !hits.Flags.Test(BitsRChNoPtr) {\n\t\t\tdec.Tag(hit)\n\t\t}\n\t}\n\treturn dec.Err()\n}\n\ntype RawCalorimeterHit struct {\n\tCellID0 int32\n\tCellID1 int32\n\tAmplitude int32\n\tTimeStamp int32\n}\n\nvar _ sio.Codec = (*RawCalorimeterHits)(nil)\nlcio: add sio-marshaling to RawCalorimeterHits\/\/ Copyright 2017 The go-hep Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage lcio\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"go-hep.org\/x\/hep\/sio\"\n)\n\ntype RawCalorimeterHits struct {\n\tFlags Flags\n\tParams Params\n\tHits []RawCalorimeterHit\n}\n\nfunc (hits RawCalorimeterHits) String() string {\n\to := new(bytes.Buffer)\n\tfmt.Fprintf(o, \"%[1]s print out of RawCalorimeterHit collection %[1]s\\n\\n\", strings.Repeat(\"-\", 15))\n\tfmt.Fprintf(o, \" flag: 0x%x\\n%v\", hits.Flags, hits.Params)\n\tfmt.Fprintf(o, \" LCIO::RCHBIT_ID1 : %v\\n\", hits.Flags.Test(BitsRChID1))\n\tfmt.Fprintf(o, \" LCIO::RCHBIT_TIME : %v\\n\", hits.Flags.Test(BitsRChTime))\n\tfmt.Fprintf(o, \" LCIO::RCHBIT_NO_PTR : %v\\n\", hits.Flags.Test(BitsRChNoPtr))\n\n\t\/\/ FIXME(sbinet): CellIDDecoder\n\n\tfmt.Fprintf(o, \"\\n\")\n\n\thead := \" [ id ] | cellId0 ( M, S, I, J, K) |cellId1 | amplitude | time \\n\"\n\ttail := \"------------|---------------------------|--------|-----------|---------\\n\"\n\tfmt.Fprintf(o, head)\n\tfmt.Fprintf(o, tail)\n\tfor _, hit := range hits.Hits {\n\t\tfmt.Fprintf(o, \" [%08d] |%08d%19s|%08d|%10d |%8d\", 0, hit.CellID0, \"\", hit.CellID1, hit.Amplitude, hit.TimeStamp)\n\t\t\/\/ FIXME(sbinet): CellIDDecoder\n\t\tfmt.Fprintf(o, \"\\n id-fields: --- unknown\/default ---- \")\n\t\tfmt.Fprintf(o, \"\\n\")\n\t}\n\tfmt.Fprintf(o, tail)\n\treturn string(o.Bytes())\n}\n\nfunc (*RawCalorimeterHits) VersionSio() uint32 {\n\treturn Version\n}\n\nfunc (hits *RawCalorimeterHits) MarshalSio(w sio.Writer) error {\n\tenc := sio.NewEncoder(w)\n\tenc.Encode(&hits.Flags)\n\tenc.Encode(&hits.Params)\n\tenc.Encode(int32(len(hits.Hits)))\n\tfor i := range hits.Hits {\n\t\thit := &hits.Hits[i]\n\t\tenc.Encode(&hit.CellID0)\n\t\tif hits.Flags.Test(BitsRChID1) {\n\t\t\tenc.Encode(&hit.CellID1)\n\t\t}\n\t\tenc.Encode(&hit.Amplitude)\n\t\tif hits.Flags.Test(BitsRChTime) {\n\t\t\tenc.Encode(&hit.TimeStamp)\n\t\t}\n\t\tif !hits.Flags.Test(BitsRChNoPtr) {\n\t\t\tenc.Tag(hit)\n\t\t}\n\t}\n\treturn enc.Err()\n}\n\nfunc (hits *RawCalorimeterHits) UnmarshalSio(r sio.Reader) error {\n\tdec := sio.NewDecoder(r)\n\tdec.Decode(&hits.Flags)\n\tdec.Decode(&hits.Params)\n\tvar n int32\n\tdec.Decode(&n)\n\thits.Hits = make([]RawCalorimeterHit, int(n))\n\tfor i := range hits.Hits {\n\t\thit := &hits.Hits[i]\n\t\tdec.Decode(&hit.CellID0)\n\t\tif r.VersionSio() == 8 || hits.Flags.Test(BitsRChID1) {\n\t\t\tdec.Decode(&hit.CellID1)\n\t\t}\n\t\tdec.Decode(&hit.Amplitude)\n\t\tif hits.Flags.Test(BitsRChTime) {\n\t\t\tdec.Decode(&hit.TimeStamp)\n\t\t}\n\t\tif !hits.Flags.Test(BitsRChNoPtr) {\n\t\t\tdec.Tag(hit)\n\t\t}\n\t}\n\treturn dec.Err()\n}\n\ntype RawCalorimeterHit struct {\n\tCellID0 int32\n\tCellID1 int32\n\tAmplitude int32\n\tTimeStamp int32\n}\n\nvar (\n\t_ sio.Versioner = (*RawCalorimeterHits)(nil)\n\t_ sio.Codec = (*RawCalorimeterHits)(nil)\n)\n<|endoftext|>"} {"text":"added dmidecode product<|endoftext|>"} {"text":"\/\/ Copyright 2014 Arne Roomann-Kurrik\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage twodee\n\nimport (\n\t\"fmt\"\n\t\"github.com\/go-gl\/gl\"\n)\n\ntype BatchRenderer struct {\n\t*Renderer\n\tProgram gl.Program\n\tPositionLoc gl.AttribLocation\n\tTextureLoc gl.AttribLocation\n\tTextureUnitLoc gl.UniformLocation\n\tModelViewLoc gl.UniformLocation\n\tProjectionLoc gl.UniformLocation\n}\n\nconst BATCH_FRAGMENT = `#version 150\nprecision mediump float;\n\nuniform sampler2D u_TextureUnit;\nin vec2 v_TextureCoordinates;\nout vec4 v_FragData;\n\nvoid main()\n{\n vec2 texcoords = v_TextureCoordinates;\n v_FragData = texture(u_TextureUnit, texcoords);\n \/\/v_FragData = vec4(1.0,0.0,0.0,1.0);\n}`\n\nconst BATCH_VERTEX = `#version 150\n\nin vec4 a_Position;\nin vec2 a_TextureCoordinates;\n\nuniform mat4 m_ModelViewMatrix;\nuniform mat4 m_ProjectionMatrix;\n\nout vec2 v_TextureCoordinates;\n\nvoid main()\n{\n v_TextureCoordinates = a_TextureCoordinates;\n gl_Position = m_ProjectionMatrix * m_ModelViewMatrix * a_Position;\n}`\n\nfunc NewBatchRenderer(bounds, screen Rectangle) (tr *BatchRenderer, err error) {\n\tvar (\n\t\tprogram gl.Program\n\t\tr *Renderer\n\t)\n\tif program, err = BuildProgram(BATCH_VERTEX, BATCH_FRAGMENT); err != nil {\n\t\treturn\n\t}\n\tif r, err = NewRenderer(bounds, screen); err != nil {\n\t\treturn\n\t}\n\ttr = &BatchRenderer{\n\t\tRenderer: r,\n\t\tProgram: program,\n\t\tPositionLoc: program.GetAttribLocation(\"a_Position\"),\n\t\tTextureLoc: program.GetAttribLocation(\"a_TextureCoordinates\"),\n\t\tTextureUnitLoc: program.GetUniformLocation(\"u_TextureUnit\"),\n\t\tModelViewLoc: program.GetUniformLocation(\"m_ModelViewMatrix\"),\n\t\tProjectionLoc: program.GetUniformLocation(\"m_ProjectionMatrix\"),\n\t}\n\tif e := gl.GetError(); e != 0 {\n\t\terr = fmt.Errorf(\"ERROR: OpenGL error %X\", e)\n\t}\n\treturn\n}\n\nfunc (r *BatchRenderer) Bind() error {\n\tr.Program.Use()\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tgl.ActiveTexture(gl.TEXTURE0)\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tr.TextureUnitLoc.Uniform1i(0)\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tr.ProjectionLoc.UniformMatrix4f(false, (*[16]float32)(r.Renderer.projection))\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\treturn nil\n}\n\nfunc (r *BatchRenderer) Draw(batch *Batch, x, y, rot float32) error {\n\tbatch.Texture.Bind()\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tbatch.Buffer.Bind(gl.ARRAY_BUFFER)\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tr.PositionLoc.AttribPointer(3, gl.FLOAT, false, 5*4, uintptr(0))\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tr.TextureLoc.AttribPointer(2, gl.FLOAT, false, 5*4, uintptr(3*4))\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tr.PositionLoc.EnableArray()\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tr.TextureLoc.EnableArray()\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tm := GetRotTransMatrix(x, y, 0, rot)\n\tr.ModelViewLoc.UniformMatrix4f(false, (*[16]float32)(m))\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tgl.DrawArrays(gl.TRIANGLES, 0, batch.Count)\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tbatch.Buffer.Unbind(gl.ARRAY_BUFFER)\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\treturn nil\n}\n\nfunc (tr *BatchRenderer) Unbind() error {\n\treturn nil\n}\n\nfunc (tr *BatchRenderer) Delete() error {\n\treturn nil\n}\n\ntype Batch struct {\n\tBuffer gl.Buffer\n\tTexture *Texture\n\tCount int\n}\n\ntype TexturedTile interface {\n\tScaledBounds(ratio float32) (x, y, w, h float32)\n\tScaledTextureBounds(rx float32, ry float32) (x, y, w, h float32)\n}\n\nfunc triangles(t TexturedTile, ratio, texw, texh float32) [30]float32 {\n\tvar (\n\t\tx, y, w, h = t.ScaledBounds(ratio)\n\t\ttx, ty, tw, th = t.ScaledTextureBounds(texw, texh)\n\t)\n\treturn [30]float32{\n\t\tx, y, 0.0,\n\t\ttx, ty,\n\n\t\tx + w, y + h, 0.0,\n\t\ttx + tw, ty + th,\n\n\t\tx, y + h, 0.0,\n\t\ttx, ty + th,\n\n\t\tx, y, 0.0,\n\t\ttx, ty,\n\n\t\tx + w, y, 0.0,\n\t\ttx + tw, ty,\n\n\t\tx + w, y + h, 0.0,\n\t\ttx + tw, ty + th,\n\t}\n}\n\nfunc LoadBatch(tiles []TexturedTile, metadata TileMetadata) (b *Batch, err error) {\n\tvar (\n\t\tstep = 30\n\t\tsize = len(tiles) * step\n\t\tvertices = make([]float32, size)\n\t\tvbo gl.Buffer\n\t\ttexture *Texture\n\t)\n\tif texture, err = LoadTexture(metadata.Path, gl.NEAREST); err != nil {\n\t\treturn\n\t}\n\tfor i := 0; i < len(tiles); i++ {\n\t\tif tiles[i] == nil {\n\t\t\tcontinue\n\t\t}\n\t\tv := triangles(tiles[i], float32(metadata.PxPerUnit), float32(texture.Width), float32(texture.Height))\n\t\tcopy(vertices[step*i:], v[:])\n\t}\n\tfmt.Printf(\"VERTICES %v\\n\", vertices[:30])\n\tif vbo, err = CreateVBO(len(vertices)*4, vertices, gl.STATIC_DRAW); err != nil {\n\t\treturn\n\t}\n\tb = &Batch{\n\t\tBuffer: vbo,\n\t\tTexture: texture,\n\t\tCount: len(vertices) \/ 5,\n\t}\n\treturn\n}\n\nfunc (b *Batch) Delete() {\n\tb.Texture.Delete()\n\tb.Buffer.Delete()\n}\nRemove debug output.\/\/ Copyright 2014 Arne Roomann-Kurrik\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage twodee\n\nimport (\n\t\"fmt\"\n\t\"github.com\/go-gl\/gl\"\n)\n\ntype BatchRenderer struct {\n\t*Renderer\n\tProgram gl.Program\n\tPositionLoc gl.AttribLocation\n\tTextureLoc gl.AttribLocation\n\tTextureUnitLoc gl.UniformLocation\n\tModelViewLoc gl.UniformLocation\n\tProjectionLoc gl.UniformLocation\n}\n\nconst BATCH_FRAGMENT = `#version 150\nprecision mediump float;\n\nuniform sampler2D u_TextureUnit;\nin vec2 v_TextureCoordinates;\nout vec4 v_FragData;\n\nvoid main()\n{\n vec2 texcoords = v_TextureCoordinates;\n v_FragData = texture(u_TextureUnit, texcoords);\n \/\/v_FragData = vec4(1.0,0.0,0.0,1.0);\n}`\n\nconst BATCH_VERTEX = `#version 150\n\nin vec4 a_Position;\nin vec2 a_TextureCoordinates;\n\nuniform mat4 m_ModelViewMatrix;\nuniform mat4 m_ProjectionMatrix;\n\nout vec2 v_TextureCoordinates;\n\nvoid main()\n{\n v_TextureCoordinates = a_TextureCoordinates;\n gl_Position = m_ProjectionMatrix * m_ModelViewMatrix * a_Position;\n}`\n\nfunc NewBatchRenderer(bounds, screen Rectangle) (tr *BatchRenderer, err error) {\n\tvar (\n\t\tprogram gl.Program\n\t\tr *Renderer\n\t)\n\tif program, err = BuildProgram(BATCH_VERTEX, BATCH_FRAGMENT); err != nil {\n\t\treturn\n\t}\n\tif r, err = NewRenderer(bounds, screen); err != nil {\n\t\treturn\n\t}\n\ttr = &BatchRenderer{\n\t\tRenderer: r,\n\t\tProgram: program,\n\t\tPositionLoc: program.GetAttribLocation(\"a_Position\"),\n\t\tTextureLoc: program.GetAttribLocation(\"a_TextureCoordinates\"),\n\t\tTextureUnitLoc: program.GetUniformLocation(\"u_TextureUnit\"),\n\t\tModelViewLoc: program.GetUniformLocation(\"m_ModelViewMatrix\"),\n\t\tProjectionLoc: program.GetUniformLocation(\"m_ProjectionMatrix\"),\n\t}\n\tif e := gl.GetError(); e != 0 {\n\t\terr = fmt.Errorf(\"ERROR: OpenGL error %X\", e)\n\t}\n\treturn\n}\n\nfunc (r *BatchRenderer) Bind() error {\n\tr.Program.Use()\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tgl.ActiveTexture(gl.TEXTURE0)\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tr.TextureUnitLoc.Uniform1i(0)\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tr.ProjectionLoc.UniformMatrix4f(false, (*[16]float32)(r.Renderer.projection))\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\treturn nil\n}\n\nfunc (r *BatchRenderer) Draw(batch *Batch, x, y, rot float32) error {\n\tbatch.Texture.Bind()\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tbatch.Buffer.Bind(gl.ARRAY_BUFFER)\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tr.PositionLoc.AttribPointer(3, gl.FLOAT, false, 5*4, uintptr(0))\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tr.TextureLoc.AttribPointer(2, gl.FLOAT, false, 5*4, uintptr(3*4))\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tr.PositionLoc.EnableArray()\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tr.TextureLoc.EnableArray()\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tm := GetRotTransMatrix(x, y, 0, rot)\n\tr.ModelViewLoc.UniformMatrix4f(false, (*[16]float32)(m))\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tgl.DrawArrays(gl.TRIANGLES, 0, batch.Count)\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\tbatch.Buffer.Unbind(gl.ARRAY_BUFFER)\n\tif e := gl.GetError(); e != 0 {\n\t\treturn fmt.Errorf(\"ERROR: %X\", e)\n\t}\n\treturn nil\n}\n\nfunc (tr *BatchRenderer) Unbind() error {\n\treturn nil\n}\n\nfunc (tr *BatchRenderer) Delete() error {\n\treturn nil\n}\n\ntype Batch struct {\n\tBuffer gl.Buffer\n\tTexture *Texture\n\tCount int\n}\n\ntype TexturedTile interface {\n\tScaledBounds(ratio float32) (x, y, w, h float32)\n\tScaledTextureBounds(rx float32, ry float32) (x, y, w, h float32)\n}\n\nfunc triangles(t TexturedTile, ratio, texw, texh float32) [30]float32 {\n\tvar (\n\t\tx, y, w, h = t.ScaledBounds(ratio)\n\t\ttx, ty, tw, th = t.ScaledTextureBounds(texw, texh)\n\t)\n\treturn [30]float32{\n\t\tx, y, 0.0,\n\t\ttx, ty,\n\n\t\tx + w, y + h, 0.0,\n\t\ttx + tw, ty + th,\n\n\t\tx, y + h, 0.0,\n\t\ttx, ty + th,\n\n\t\tx, y, 0.0,\n\t\ttx, ty,\n\n\t\tx + w, y, 0.0,\n\t\ttx + tw, ty,\n\n\t\tx + w, y + h, 0.0,\n\t\ttx + tw, ty + th,\n\t}\n}\n\nfunc LoadBatch(tiles []TexturedTile, metadata TileMetadata) (b *Batch, err error) {\n\tvar (\n\t\tstep = 30\n\t\tsize = len(tiles) * step\n\t\tvertices = make([]float32, size)\n\t\tvbo gl.Buffer\n\t\ttexture *Texture\n\t)\n\tif texture, err = LoadTexture(metadata.Path, gl.NEAREST); err != nil {\n\t\treturn\n\t}\n\tfor i := 0; i < len(tiles); i++ {\n\t\tif tiles[i] == nil {\n\t\t\tcontinue\n\t\t}\n\t\tv := triangles(tiles[i], float32(metadata.PxPerUnit), float32(texture.Width), float32(texture.Height))\n\t\tcopy(vertices[step*i:], v[:])\n\t}\n\tif vbo, err = CreateVBO(len(vertices)*4, vertices, gl.STATIC_DRAW); err != nil {\n\t\treturn\n\t}\n\tb = &Batch{\n\t\tBuffer: vbo,\n\t\tTexture: texture,\n\t\tCount: len(vertices) \/ 5,\n\t}\n\treturn\n}\n\nfunc (b *Batch) Delete() {\n\tb.Texture.Delete()\n\tb.Buffer.Delete()\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/whitepages\/terraform-provider-stingray\/Godeps\/_workspace\/src\/github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/whitepages\/terraform-provider-stingray\/Godeps\/_workspace\/src\/github.com\/whitepages\/go-stingray\"\n)\n\nfunc resourcePool() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourcePoolCreate,\n\t\tRead: resourcePoolRead,\n\t\tUpdate: resourcePoolUpdate,\n\t\tDelete: resourcePoolDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"bandwidth_class\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"connection_max_connect_time\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: 4,\n\t\t\t},\n\n\t\t\t\"connection_max_connections_per_node\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: 0,\n\t\t\t},\n\n\t\t\t\"connection_max_queue_size\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: 0,\n\t\t\t},\n\n\t\t\t\"connection_max_reply_time\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: 30,\n\t\t\t},\n\n\t\t\t\"connection_queue_timeout\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: 10,\n\t\t\t},\n\n\t\t\t\"failure_pool\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"load_balancing_algorithm\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: \"round_robin\",\n\t\t\t},\n\n\t\t\t\"load_balancing_priority_enabled\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: false,\n\t\t\t},\n\n\t\t\t\"load_balancing_priority_nodes\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"max_connection_attempts\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"max_idle_connections_pernode\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"max_timed_out_connection_attempts\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"monitors\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\tSet: schema.HashString,\n\t\t\t},\n\n\t\t\t\"node_close_with_rst\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"node_connection_attempts\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"nodes\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\tSet: schema.HashString,\n\t\t\t},\n\n\t\t\t\"note\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: \"\",\n\t\t\t},\n\n\t\t\t\"passive_monitoring\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: true,\n\t\t\t},\n\n\t\t\t\"persistence_class\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"tcp_nagle\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: true,\n\t\t\t},\n\n\t\t\t\"transparent\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourcePoolCreate(d *schema.ResourceData, meta interface{}) error {\n\terr := resourcePoolSet(d, meta)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn resourcePoolRead(d, meta)\n}\n\nfunc resourcePoolRead(d *schema.ResourceData, meta interface{}) error {\n\tc := meta.(*providerConfig).client\n\n\tr, resp, err := c.GetPool(d.Get(\"name\").(string))\n\tif err != nil {\n\t\tif resp != nil && resp.StatusCode == 404 {\n\t\t\t\/\/ The resource doesn't exist anymore\n\t\t\td.SetId(\"\")\n\n\t\t\treturn nil\n\t\t}\n\n\t\treturn fmt.Errorf(\"Error reading resource: %s\", err)\n\t}\n\n\td.Set(\"bandwidth_class\", string(*r.Basic.BandwidthClass))\n\td.Set(\"connection_max_connect_time\", int(*r.Connection.MaxConnectTime))\n\td.Set(\"connection_max_connections_per_node\", int(*r.Connection.MaxConnectionsPerNode))\n\td.Set(\"connection_max_queue_size\", int(*r.Connection.MaxQueueSize))\n\td.Set(\"connection_max_reply_time\", int(*r.Connection.MaxReplyTime))\n\td.Set(\"connection_queue_timeout\", int(*r.Connection.QueueTimeout))\n\td.Set(\"failure_pool\", string(*r.Basic.FailurePool))\n\td.Set(\"load_balancing_algorithm\", string(*r.LoadBalancing.Algorithm))\n\td.Set(\"load_balancing_priority_enabled\", bool(*r.LoadBalancing.PriorityEnabled))\n\td.Set(\"load_balancing_priority_nodes\", int(*r.LoadBalancing.PriorityNodes))\n\td.Set(\"max_connection_attempts\", int(*r.Basic.MaxConnectionAttempts))\n\td.Set(\"max_idle_connections_pernode\", int(*r.Basic.MaxIdleConnectionsPerNode))\n\td.Set(\"max_timed_out_connection_attempts\", int(*r.Basic.MaxTimedOutConnectionAttempts))\n\td.Set(\"monitors\", []string(*r.Basic.Monitors))\n\td.Set(\"node_close_with_rst\", bool(*r.Basic.NodeCloseWithRST))\n\td.Set(\"node_connection_attempts\", int(*r.Basic.NodeConnectionAttempts))\n\td.Set(\"nodes\", nodesTableToNodes(*r.Basic.NodesTable))\n\td.Set(\"note\", string(*r.Basic.Note))\n\td.Set(\"passive_monitoring\", bool(*r.Basic.PassiveMonitoring))\n\td.Set(\"persistence_class\", string(*r.Basic.PersistenceClass))\n\td.Set(\"tcp_nagle\", bool(*r.TCP.Nagle))\n\td.Set(\"transparent\", bool(*r.Basic.Transparent))\n\n\treturn nil\n}\n\nfunc resourcePoolUpdate(d *schema.ResourceData, meta interface{}) error {\n\terr := resourcePoolSet(d, meta)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn resourcePoolRead(d, meta)\n}\n\nfunc resourcePoolDelete(d *schema.ResourceData, meta interface{}) error {\n\tc := meta.(*providerConfig).client\n\tr := stingray.NewPool(d.Id())\n\n\t_, err := c.Delete(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc resourcePoolSet(d *schema.ResourceData, meta interface{}) error {\n\tc := meta.(*providerConfig).client\n\tr := stingray.NewPool(d.Get(\"name\").(string))\n\n\tsetString(&r.Basic.BandwidthClass, d, \"bandwidth_class\")\n\tsetInt(&r.Connection.MaxConnectTime, d, \"connection_max_connect_time\")\n\tsetInt(&r.Connection.MaxConnectionsPerNode, d, \"connection_max_connections_per_node\")\n\tsetInt(&r.Connection.MaxQueueSize, d, \"connection_max_queue_size\")\n\tsetInt(&r.Connection.MaxReplyTime, d, \"connection_max_reply_time\")\n\tsetInt(&r.Connection.QueueTimeout, d, \"connection_queue_timeout\")\n\tsetString(&r.Basic.FailurePool, d, \"failure_pool\")\n\tsetString(&r.LoadBalancing.Algorithm, d, \"load_balancing_algorithm\")\n\tsetBool(&r.LoadBalancing.PriorityEnabled, d, \"load_balancing_priority_enabled\")\n\tsetInt(&r.LoadBalancing.PriorityNodes, d, \"load_balancing_priority_nodes\")\n\tsetInt(&r.Basic.MaxConnectionAttempts, d, \"max_connection_attempts\")\n\tsetInt(&r.Basic.MaxIdleConnectionsPerNode, d, \"max_idle_connections_pernode\")\n\tsetInt(&r.Basic.MaxTimedOutConnectionAttempts, d, \"max_timed_out_connection_attempts\")\n\tsetStringSet(&r.Basic.Monitors, d, \"monitors\")\n\tsetBool(&r.Basic.NodeCloseWithRST, d, \"node_close_with_rst\")\n\tsetInt(&r.Basic.NodeConnectionAttempts, d, \"node_connection_attempts\")\n\tsetNodesTable(&r.Basic.NodesTable, d, \"nodes\")\n\tsetString(&r.Basic.Note, d, \"note\")\n\tsetBool(&r.Basic.PassiveMonitoring, d, \"passive_monitoring\")\n\tsetString(&r.Basic.PersistenceClass, d, \"persistence_class\")\n\tsetBool(&r.TCP.Nagle, d, \"tcp_nagle\")\n\tsetBool(&r.Basic.Transparent, d, \"transparent\")\n\n\t_, err := c.Set(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.SetId(d.Get(\"name\").(string))\n\n\treturn nil\n}\n\nfunc setNodesTable(target **stingray.NodesTable, d *schema.ResourceData, key string) {\n\tif _, ok := d.GetOk(key); ok {\n\t\tvar nodes []string\n\t\tif v := d.Get(key).(*schema.Set); v.Len() > 0 {\n\t\t\tnodes = make([]string, v.Len())\n\t\t\tfor i, v := range v.List() {\n\t\t\t\tnodes[i] = v.(string)\n\t\t\t}\n\t\t}\n\t\tnodesTable := nodesToNodesTable(nodes)\n\t\t*target = &nodesTable\n\t}\n}\n\nfunc nodesToNodesTable(nodes []string) stingray.NodesTable {\n\tt := []stingray.Node{}\n\n\tfor _, v := range nodes {\n\t\tt = append(t, stingray.Node{Node: stingray.String(v)})\n\t}\n\n\treturn t\n}\n\nfunc nodesTableToNodes(t []stingray.Node) []string {\n\tnodes := []string{}\n\n\tfor _, v := range t {\n\t\t\/\/ A node deleted from the web UI will still exist in\n\t\t\/\/ the nodes_table, but state and weight will not\n\t\t\/\/ exist\n\t\tif v.State != nil {\n\t\t\tnodes = append(nodes, *v.Node)\n\t\t}\n\t}\n\n\treturn nodes\n}\npool: Add missing parameters in udp sectionpackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/whitepages\/terraform-provider-stingray\/Godeps\/_workspace\/src\/github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/whitepages\/terraform-provider-stingray\/Godeps\/_workspace\/src\/github.com\/whitepages\/go-stingray\"\n)\n\nfunc resourcePool() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourcePoolCreate,\n\t\tRead: resourcePoolRead,\n\t\tUpdate: resourcePoolUpdate,\n\t\tDelete: resourcePoolDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"bandwidth_class\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"connection_max_connect_time\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: 4,\n\t\t\t},\n\n\t\t\t\"connection_max_connections_per_node\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: 0,\n\t\t\t},\n\n\t\t\t\"connection_max_queue_size\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: 0,\n\t\t\t},\n\n\t\t\t\"connection_max_reply_time\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: 30,\n\t\t\t},\n\n\t\t\t\"connection_queue_timeout\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: 10,\n\t\t\t},\n\n\t\t\t\"failure_pool\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"load_balancing_algorithm\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: \"round_robin\",\n\t\t\t},\n\n\t\t\t\"load_balancing_priority_enabled\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: false,\n\t\t\t},\n\n\t\t\t\"load_balancing_priority_nodes\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"max_connection_attempts\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"max_idle_connections_pernode\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"max_timed_out_connection_attempts\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"monitors\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\tSet: schema.HashString,\n\t\t\t},\n\n\t\t\t\"node_close_with_rst\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"node_connection_attempts\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"nodes\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\tSet: schema.HashString,\n\t\t\t},\n\n\t\t\t\"note\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: \"\",\n\t\t\t},\n\n\t\t\t\"passive_monitoring\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: true,\n\t\t\t},\n\n\t\t\t\"persistence_class\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"tcp_nagle\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: true,\n\t\t\t},\n\n\t\t\t\"transparent\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"udp_accept_from\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"udp_accept_from_mask\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourcePoolCreate(d *schema.ResourceData, meta interface{}) error {\n\terr := resourcePoolSet(d, meta)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn resourcePoolRead(d, meta)\n}\n\nfunc resourcePoolRead(d *schema.ResourceData, meta interface{}) error {\n\tc := meta.(*providerConfig).client\n\n\tr, resp, err := c.GetPool(d.Get(\"name\").(string))\n\tif err != nil {\n\t\tif resp != nil && resp.StatusCode == 404 {\n\t\t\t\/\/ The resource doesn't exist anymore\n\t\t\td.SetId(\"\")\n\n\t\t\treturn nil\n\t\t}\n\n\t\treturn fmt.Errorf(\"Error reading resource: %s\", err)\n\t}\n\n\td.Set(\"bandwidth_class\", string(*r.Basic.BandwidthClass))\n\td.Set(\"connection_max_connect_time\", int(*r.Connection.MaxConnectTime))\n\td.Set(\"connection_max_connections_per_node\", int(*r.Connection.MaxConnectionsPerNode))\n\td.Set(\"connection_max_queue_size\", int(*r.Connection.MaxQueueSize))\n\td.Set(\"connection_max_reply_time\", int(*r.Connection.MaxReplyTime))\n\td.Set(\"connection_queue_timeout\", int(*r.Connection.QueueTimeout))\n\td.Set(\"failure_pool\", string(*r.Basic.FailurePool))\n\td.Set(\"load_balancing_algorithm\", string(*r.LoadBalancing.Algorithm))\n\td.Set(\"load_balancing_priority_enabled\", bool(*r.LoadBalancing.PriorityEnabled))\n\td.Set(\"load_balancing_priority_nodes\", int(*r.LoadBalancing.PriorityNodes))\n\td.Set(\"max_connection_attempts\", int(*r.Basic.MaxConnectionAttempts))\n\td.Set(\"max_idle_connections_pernode\", int(*r.Basic.MaxIdleConnectionsPerNode))\n\td.Set(\"max_timed_out_connection_attempts\", int(*r.Basic.MaxTimedOutConnectionAttempts))\n\td.Set(\"monitors\", []string(*r.Basic.Monitors))\n\td.Set(\"node_close_with_rst\", bool(*r.Basic.NodeCloseWithRST))\n\td.Set(\"node_connection_attempts\", int(*r.Basic.NodeConnectionAttempts))\n\td.Set(\"nodes\", nodesTableToNodes(*r.Basic.NodesTable))\n\td.Set(\"note\", string(*r.Basic.Note))\n\td.Set(\"passive_monitoring\", bool(*r.Basic.PassiveMonitoring))\n\td.Set(\"persistence_class\", string(*r.Basic.PersistenceClass))\n\td.Set(\"tcp_nagle\", bool(*r.TCP.Nagle))\n\td.Set(\"udp_accept_from\", string(*r.UDP.AcceptFrom))\n\td.Set(\"udp_accept_from_mask_class\", string(*r.UDP.AcceptFromMask))\n\td.Set(\"transparent\", bool(*r.Basic.Transparent))\n\n\treturn nil\n}\n\nfunc resourcePoolUpdate(d *schema.ResourceData, meta interface{}) error {\n\terr := resourcePoolSet(d, meta)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn resourcePoolRead(d, meta)\n}\n\nfunc resourcePoolDelete(d *schema.ResourceData, meta interface{}) error {\n\tc := meta.(*providerConfig).client\n\tr := stingray.NewPool(d.Id())\n\n\t_, err := c.Delete(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc resourcePoolSet(d *schema.ResourceData, meta interface{}) error {\n\tc := meta.(*providerConfig).client\n\tr := stingray.NewPool(d.Get(\"name\").(string))\n\n\tsetString(&r.Basic.BandwidthClass, d, \"bandwidth_class\")\n\tsetInt(&r.Connection.MaxConnectTime, d, \"connection_max_connect_time\")\n\tsetInt(&r.Connection.MaxConnectionsPerNode, d, \"connection_max_connections_per_node\")\n\tsetInt(&r.Connection.MaxQueueSize, d, \"connection_max_queue_size\")\n\tsetInt(&r.Connection.MaxReplyTime, d, \"connection_max_reply_time\")\n\tsetInt(&r.Connection.QueueTimeout, d, \"connection_queue_timeout\")\n\tsetString(&r.Basic.FailurePool, d, \"failure_pool\")\n\tsetString(&r.LoadBalancing.Algorithm, d, \"load_balancing_algorithm\")\n\tsetBool(&r.LoadBalancing.PriorityEnabled, d, \"load_balancing_priority_enabled\")\n\tsetInt(&r.LoadBalancing.PriorityNodes, d, \"load_balancing_priority_nodes\")\n\tsetInt(&r.Basic.MaxConnectionAttempts, d, \"max_connection_attempts\")\n\tsetInt(&r.Basic.MaxIdleConnectionsPerNode, d, \"max_idle_connections_pernode\")\n\tsetInt(&r.Basic.MaxTimedOutConnectionAttempts, d, \"max_timed_out_connection_attempts\")\n\tsetStringSet(&r.Basic.Monitors, d, \"monitors\")\n\tsetBool(&r.Basic.NodeCloseWithRST, d, \"node_close_with_rst\")\n\tsetInt(&r.Basic.NodeConnectionAttempts, d, \"node_connection_attempts\")\n\tsetNodesTable(&r.Basic.NodesTable, d, \"nodes\")\n\tsetString(&r.Basic.Note, d, \"note\")\n\tsetBool(&r.Basic.PassiveMonitoring, d, \"passive_monitoring\")\n\tsetString(&r.Basic.PersistenceClass, d, \"persistence_class\")\n\tsetBool(&r.TCP.Nagle, d, \"tcp_nagle\")\n\tsetBool(&r.Basic.Transparent, d, \"transparent\")\n\tsetString(&r.UDP.AcceptFrom, d, \"udp_accept_from\")\n\tsetString(&r.UDP.AcceptFromMask, d, \"udp_accept_from_mask\")\n\n\t_, err := c.Set(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.SetId(d.Get(\"name\").(string))\n\n\treturn nil\n}\n\nfunc setNodesTable(target **stingray.NodesTable, d *schema.ResourceData, key string) {\n\tif _, ok := d.GetOk(key); ok {\n\t\tvar nodes []string\n\t\tif v := d.Get(key).(*schema.Set); v.Len() > 0 {\n\t\t\tnodes = make([]string, v.Len())\n\t\t\tfor i, v := range v.List() {\n\t\t\t\tnodes[i] = v.(string)\n\t\t\t}\n\t\t}\n\t\tnodesTable := nodesToNodesTable(nodes)\n\t\t*target = &nodesTable\n\t}\n}\n\nfunc nodesToNodesTable(nodes []string) stingray.NodesTable {\n\tt := []stingray.Node{}\n\n\tfor _, v := range nodes {\n\t\tt = append(t, stingray.Node{Node: stingray.String(v)})\n\t}\n\n\treturn t\n}\n\nfunc nodesTableToNodes(t []stingray.Node) []string {\n\tnodes := []string{}\n\n\tfor _, v := range t {\n\t\t\/\/ A node deleted from the web UI will still exist in\n\t\t\/\/ the nodes_table, but state and weight will not\n\t\t\/\/ exist\n\t\tif v.State != nil {\n\t\t\tnodes = append(nodes, *v.Node)\n\t\t}\n\t}\n\n\treturn nodes\n}\n<|endoftext|>"} {"text":"\/\/ The purpose of this tool is to provide a realistic load on the\n\/\/ system. It creates 98 albums, each contains 20 images. In total, it\n\/\/ sends 10094 requests.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/zitryss\/aye-and-nay\/pkg\/debug\"\n)\n\nvar (\n\taddress string\n\tconnections int\n\ttestdata string\n)\n\nfunc main() {\n\tflag.StringVar(&address, \"address\", \"https:\/\/localhost:8001\", \"\")\n\tflag.IntVar(&connections, \"connections\", 25, \"\")\n\tflag.StringVar(&testdata, \"testdata\", \".\/testdata\", \"\")\n\tflag.Parse()\n\n\thttp.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n\n\tsem := make(chan struct{}, connections)\n\tfor i := 0; i < 98; i++ {\n\t\tsem <- struct{}{}\n\t\tgo func() {\n\t\t\tdefer func() { <-sem }()\n\t\t\tsession := albumHtml()\n\t\t\tid := albumApi(session)\n\t\t\treadyApi(session, id)\n\t\t\tfor j := 0; j < 4; j++ {\n\t\t\t\tpairHtml(session, id)\n\t\t\t\tfor k := 0; k < 11; k++ {\n\t\t\t\t\ttoken1, token2 := pairApi(session, id)\n\t\t\t\t\tvoteApi(session, id, token1, token2)\n\t\t\t\t}\n\t\t\t\ttopHtml(session, id)\n\t\t\t\ttopApi(session, id)\n\t\t\t}\n\t\t}()\n\t}\n\tfor i := 0; i < connections; i++ {\n\t\tsem <- struct{}{}\n\t}\n}\n\nfunc albumHtml() string {\n\tresp, err := http.DefaultClient.Get(address + \"\/\")\n\tdebug.Check(err)\n\tdebug.Assert(resp.StatusCode == 200)\n\t_, err = io.Copy(ioutil.Discard, resp.Body)\n\tdebug.Check(err)\n\terr = resp.Body.Close()\n\tdebug.Check(err)\n\thead := resp.Header.Get(\"Set-Cookie\")\n\tstr := strings.TrimPrefix(head, \"session=\")\n\tsubstr := strings.Split(str, \";\")\n\tsession := substr[0]\n\treturn session\n}\n\nfunc albumApi(session string) string {\n\tbody := bytes.Buffer{}\n\tmulti := multipart.NewWriter(&body)\n\tfor i := 0; i < 4; i++ {\n\t\tfor _, filename := range []string{\"alan.jpg\", \"john.bmp\", \"dennis.png\", \"tim.gif\", \"linus.jpg\"} {\n\t\t\tpart, err := multi.CreateFormFile(\"images\", filename)\n\t\t\tdebug.Check(err)\n\t\t\tb, err := ioutil.ReadFile(testdata + \"\/\" + filename)\n\t\t\tdebug.Check(err)\n\t\t\t_, err = part.Write(b)\n\t\t\tdebug.Check(err)\n\t\t}\n\t}\n\terr := multi.Close()\n\tdebug.Check(err)\n\n\treq, err := http.NewRequest(\"POST\", address+\"\/api\/albums\/\", &body)\n\tdebug.Check(err)\n\treq.Header.Set(\"Content-Type\", multi.FormDataContentType())\n\tc := http.Cookie{}\n\tc.Name = \"session\"\n\tc.Value = session\n\treq.AddCookie(&c)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tdebug.Check(err)\n\tdebug.Assert(resp.StatusCode == 201)\n\n\ttype result struct {\n\t\tAlbum struct {\n\t\t\tId string\n\t\t}\n\t}\n\n\tres := result{}\n\terr = json.NewDecoder(resp.Body).Decode(&res)\n\tdebug.Check(err)\n\terr = resp.Body.Close()\n\tdebug.Check(err)\n\n\treturn res.Album.Id\n}\n\nfunc readyApi(session string, id string) {\n\treq, err := http.NewRequest(\"GET\", address+\"\/api\/albums\/\"+id+\"\/ready\/\", nil)\n\tdebug.Check(err)\n\tc := http.Cookie{}\n\tc.Name = \"session\"\n\tc.Value = session\n\treq.AddCookie(&c)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tdebug.Check(err)\n\tdebug.Assert(resp.StatusCode == 200)\n\n\ttype result struct {\n\t\tAlbum struct {\n\t\t\tProgress float64\n\t\t}\n\t}\n\n\tres := result{}\n\terr = json.NewDecoder(resp.Body).Decode(&res)\n\tdebug.Check(err)\n\terr = resp.Body.Close()\n\tdebug.Check(err)\n}\n\nfunc pairHtml(session string, id string) {\n\treq, err := http.NewRequest(\"GET\", address+\"\/albums\/\"+id+\"\/\", nil)\n\tdebug.Check(err)\n\tc := http.Cookie{}\n\tc.Name = \"session\"\n\tc.Value = session\n\treq.AddCookie(&c)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tdebug.Check(err)\n\tdebug.Assert(resp.StatusCode == 200)\n\t_, err = io.Copy(ioutil.Discard, resp.Body)\n\tdebug.Check(err)\n\terr = resp.Body.Close()\n\tdebug.Check(err)\n}\n\nfunc pairApi(session string, id string) (string, string) {\n\treq, err := http.NewRequest(\"GET\", address+\"\/api\/albums\/\"+id+\"\/\", nil)\n\tdebug.Check(err)\n\tc := http.Cookie{}\n\tc.Name = \"session\"\n\tc.Value = session\n\treq.AddCookie(&c)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tdebug.Check(err)\n\tdebug.Assert(resp.StatusCode == 200)\n\n\ttype result struct {\n\t\tImg1 struct {\n\t\t\tToken string\n\t\t\tSrc string\n\t\t}\n\t\tImg2 struct {\n\t\t\tToken string\n\t\t\tSrc string\n\t\t}\n\t}\n\n\tres := result{}\n\terr = json.NewDecoder(resp.Body).Decode(&res)\n\tdebug.Check(err)\n\terr = resp.Body.Close()\n\tdebug.Check(err)\n\n\treturn res.Img1.Token, res.Img2.Token\n}\n\nfunc voteApi(session string, id string, token1 string, token2 string) {\n\tbody := strings.NewReader(\"{\\\"album\\\":{\\\"imgFrom\\\":{\\\"token\\\":\\\"\" + token1 + \"\\\"},\\\"imgTo\\\":{\\\"token\\\":\\\"\" + token2 + \"\\\"}}}\")\n\treq, err := http.NewRequest(\"PATCH\", address+\"\/api\/albums\/\"+id+\"\/\", body)\n\tdebug.Check(err)\n\treq.Header.Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\tc := http.Cookie{}\n\tc.Name = \"session\"\n\tc.Value = session\n\treq.AddCookie(&c)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tdebug.Check(err)\n\tdebug.Assert(resp.StatusCode == 200)\n\t_, err = io.Copy(ioutil.Discard, resp.Body)\n\tdebug.Check(err)\n\terr = resp.Body.Close()\n\tdebug.Check(err)\n}\n\nfunc topHtml(session string, id string) {\n\treq, err := http.NewRequest(\"GET\", address+\"\/albums\/\"+id+\"\/top\/\", nil)\n\tdebug.Check(err)\n\tc := http.Cookie{}\n\tc.Name = \"session\"\n\tc.Value = session\n\treq.AddCookie(&c)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tdebug.Check(err)\n\tdebug.Assert(resp.StatusCode == 200)\n\t_, err = io.Copy(ioutil.Discard, resp.Body)\n\tdebug.Check(err)\n\terr = resp.Body.Close()\n\tdebug.Check(err)\n}\n\nfunc topApi(session string, id string) {\n\treq, err := http.NewRequest(\"GET\", address+\"\/api\/albums\/\"+id+\"\/top\/\", nil)\n\tdebug.Check(err)\n\tc := http.Cookie{}\n\tc.Name = \"session\"\n\tc.Value = session\n\treq.AddCookie(&c)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tdebug.Check(err)\n\tdebug.Assert(resp.StatusCode == 200)\n\t_, err = io.Copy(ioutil.Discard, resp.Body)\n\tdebug.Check(err)\n\terr = resp.Body.Close()\n\tdebug.Check(err)\n}\nDelete HTML and session requests\/\/ The purpose of this tool is to provide a realistic load on the\n\/\/ system. It creates 98 albums, each contains 20 images. In total, it\n\/\/ sends 9212 requests.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/zitryss\/aye-and-nay\/pkg\/debug\"\n)\n\nvar (\n\taddress string\n\tconnections int\n\ttestdata string\n)\n\nfunc main() {\n\tflag.StringVar(&address, \"address\", \"https:\/\/localhost:8001\", \"\")\n\tflag.IntVar(&connections, \"connections\", 25, \"\")\n\tflag.StringVar(&testdata, \"testdata\", \".\/testdata\", \"\")\n\tflag.Parse()\n\n\thttp.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n\n\tsem := make(chan struct{}, connections)\n\tfor i := 0; i < 98; i++ {\n\t\tsem <- struct{}{}\n\t\tgo func() {\n\t\t\tdefer func() { <-sem }()\n\t\t\tid := albumApi()\n\t\t\treadyApi(id)\n\t\t\tfor j := 0; j < 4; j++ {\n\t\t\t\tfor k := 0; k < 11; k++ {\n\t\t\t\t\ttoken1, token2 := pairApi(id)\n\t\t\t\t\tvoteApi(id, token1, token2)\n\t\t\t\t}\n\t\t\t\ttopApi(id)\n\t\t\t}\n\t\t}()\n\t}\n\tfor i := 0; i < connections; i++ {\n\t\tsem <- struct{}{}\n\t}\n}\n\nfunc albumApi() string {\n\tbody := bytes.Buffer{}\n\tmulti := multipart.NewWriter(&body)\n\tfor i := 0; i < 4; i++ {\n\t\tfor _, filename := range []string{\"alan.jpg\", \"john.bmp\", \"dennis.png\", \"tim.gif\", \"linus.jpg\"} {\n\t\t\tpart, err := multi.CreateFormFile(\"images\", filename)\n\t\t\tdebug.Check(err)\n\t\t\tb, err := ioutil.ReadFile(testdata + \"\/\" + filename)\n\t\t\tdebug.Check(err)\n\t\t\t_, err = part.Write(b)\n\t\t\tdebug.Check(err)\n\t\t}\n\t}\n\terr := multi.Close()\n\tdebug.Check(err)\n\n\treq, err := http.NewRequest(\"POST\", address+\"\/api\/albums\/\", &body)\n\tdebug.Check(err)\n\treq.Header.Set(\"Content-Type\", multi.FormDataContentType())\n\n\tresp, err := http.DefaultClient.Do(req)\n\tdebug.Check(err)\n\tdebug.Assert(resp.StatusCode == 201)\n\n\ttype result struct {\n\t\tAlbum struct {\n\t\t\tId string\n\t\t}\n\t}\n\n\tres := result{}\n\terr = json.NewDecoder(resp.Body).Decode(&res)\n\tdebug.Check(err)\n\terr = resp.Body.Close()\n\tdebug.Check(err)\n\n\treturn res.Album.Id\n}\n\nfunc readyApi(id string) {\n\treq, err := http.NewRequest(\"GET\", address+\"\/api\/albums\/\"+id+\"\/ready\/\", nil)\n\tdebug.Check(err)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tdebug.Check(err)\n\tdebug.Assert(resp.StatusCode == 200)\n\n\ttype result struct {\n\t\tAlbum struct {\n\t\t\tProgress float64\n\t\t}\n\t}\n\n\tres := result{}\n\terr = json.NewDecoder(resp.Body).Decode(&res)\n\tdebug.Check(err)\n\terr = resp.Body.Close()\n\tdebug.Check(err)\n}\n\nfunc pairApi(id string) (string, string) {\n\treq, err := http.NewRequest(\"GET\", address+\"\/api\/albums\/\"+id+\"\/\", nil)\n\tdebug.Check(err)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tdebug.Check(err)\n\tdebug.Assert(resp.StatusCode == 200)\n\n\ttype result struct {\n\t\tImg1 struct {\n\t\t\tToken string\n\t\t\tSrc string\n\t\t}\n\t\tImg2 struct {\n\t\t\tToken string\n\t\t\tSrc string\n\t\t}\n\t}\n\n\tres := result{}\n\terr = json.NewDecoder(resp.Body).Decode(&res)\n\tdebug.Check(err)\n\terr = resp.Body.Close()\n\tdebug.Check(err)\n\n\treturn res.Img1.Token, res.Img2.Token\n}\n\nfunc voteApi(id string, token1 string, token2 string) {\n\tbody := strings.NewReader(\"{\\\"album\\\":{\\\"imgFrom\\\":{\\\"token\\\":\\\"\" + token1 + \"\\\"},\\\"imgTo\\\":{\\\"token\\\":\\\"\" + token2 + \"\\\"}}}\")\n\treq, err := http.NewRequest(\"PATCH\", address+\"\/api\/albums\/\"+id+\"\/\", body)\n\tdebug.Check(err)\n\treq.Header.Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\n\tresp, err := http.DefaultClient.Do(req)\n\tdebug.Check(err)\n\tdebug.Assert(resp.StatusCode == 200)\n\t_, err = io.Copy(ioutil.Discard, resp.Body)\n\tdebug.Check(err)\n\terr = resp.Body.Close()\n\tdebug.Check(err)\n}\n\nfunc topApi(id string) {\n\treq, err := http.NewRequest(\"GET\", address+\"\/api\/albums\/\"+id+\"\/top\/\", nil)\n\tdebug.Check(err)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tdebug.Check(err)\n\tdebug.Assert(resp.StatusCode == 200)\n\t_, err = io.Copy(ioutil.Discard, resp.Body)\n\tdebug.Check(err)\n\terr = resp.Body.Close()\n\tdebug.Check(err)\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nfunc newProjectInDir(path string) error {\n\t\/\/ set path to be nested inside $GOPATH\/src\n\tgopath := os.Getenv(\"GOPATH\")\n\tpath = filepath.Join(gopath, \"src\", path)\n\n\t\/\/ check if anything exists at the path, ask if it should be overwritten\n\tif _, err := os.Stat(path); !os.IsNotExist(err) {\n\t\tfmt.Println(\"Path exists, overwrite contents? (y\/N):\")\n\n\t\tvar answer string\n\t\t_, err := fmt.Scanf(\"%s\\n\", &answer)\n\t\tif err != nil {\n\t\t\tif err.Error() == \"unexpected newline\" {\n\t\t\t\tanswer = \"\"\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tanswer = strings.ToLower(answer)\n\n\t\tswitch answer {\n\t\tcase \"n\", \"no\", \"\\r\\n\", \"\\n\", \"\":\n\t\t\tfmt.Println(\"\")\n\n\t\tcase \"y\", \"yes\":\n\t\t\terr := os.RemoveAll(path)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to overwrite %s. \\n%s\", path, err)\n\t\t\t}\n\n\t\t\treturn createProjInDir(path)\n\n\t\tdefault:\n\t\t\tfmt.Println(\"Input not recognized. No files overwritten. Answer as 'y' or 'n' only.\")\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn createProjInDir(path)\n}\n\nvar ponzuRepo = []string{\"github.com\", \"bosssauce\", \"ponzu\"}\n\nfunc createProjInDir(path string) error {\n\tgopath := os.Getenv(\"GOPATH\")\n\trepo := ponzuRepo\n\tlocal := filepath.Join(gopath, \"src\", filepath.Join(repo...))\n\tnetwork := \"https:\/\/\" + strings.Join(repo, \"\/\") + \".git\"\n\n\t\/\/ create the directory or overwrite it\n\terr := os.MkdirAll(path, os.ModeDir|os.ModePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif dev {\n\t\tif fork != \"\" {\n\t\t\tlocal = filepath.Join(gopath, \"src\", fork)\n\t\t}\n\n\t\tdevClone := exec.Command(\"git\", \"clone\", local, \"--branch\", \"ponzu-dev\", \"--single-branch\", path)\n\t\tdevClone.Stdout = os.Stdout\n\t\tdevClone.Stderr = os.Stderr\n\n\t\terr = devClone.Start()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = devClone.Wait()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = vendorCorePackages(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"Dev build cloned from \" + local + \":ponzu-dev\")\n\t\treturn nil\n\t}\n\n\t\/\/ try to git clone the repository from the local machine's $GOPATH\n\tlocalClone := exec.Command(\"git\", \"clone\", local, path)\n\tlocalClone.Stdout = os.Stdout\n\tlocalClone.Stderr = os.Stderr\n\n\terr = localClone.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = localClone.Wait()\n\tif err != nil {\n\t\tfmt.Println(\"Couldn't clone from\", local, \". Trying network...\")\n\n\t\t\/\/ try to git clone the repository over the network\n\t\tnetworkClone := exec.Command(\"git\", \"clone\", network, path)\n\t\tnetworkClone.Stdout = os.Stdout\n\t\tnetworkClone.Stderr = os.Stderr\n\n\t\terr = networkClone.Start()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Network clone failed to start. Try again and make sure you have a network connection.\")\n\t\t\treturn err\n\t\t}\n\t\terr = networkClone.Wait()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Network clone failure.\")\n\t\t\t\/\/ failed\n\t\t\treturn fmt.Errorf(\"Failed to clone files from local machine [%s] and over the network [%s].\\n%s\", local, network, err)\n\t\t}\n\t}\n\n\t\/\/ create an internal vendor directory in .\/cmd\/ponzu and move content,\n\t\/\/ management and system packages into it\n\terr = vendorCorePackages(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgitDir := filepath.Join(path, \".git\")\n\terr = os.RemoveAll(gitDir)\n\tif err != nil {\n\t\tfmt.Println(\"Failed to remove .git directory from your project path. Consider removing it manually.\")\n\t}\n\n\tfmt.Println(\"New ponzu project created at\", path)\n\treturn nil\n}\n\nfunc vendorCorePackages(path string) error {\n\tvendorPath := filepath.Join(path, \"cmd\", \"ponzu\", \"vendor\", \"github.com\", \"bosssauce\", \"ponzu\")\n\terr := os.MkdirAll(vendorPath, os.ModeDir|os.ModePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdirs := []string{\"content\", \"management\", \"system\"}\n\tfor _, dir := range dirs {\n\t\terr = os.Rename(filepath.Join(path, dir), filepath.Join(vendorPath, dir))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ create a user content directory\n\tcontentPath := filepath.Join(path, \"content\")\n\terr = os.Mkdir(contentPath, os.ModeDir|os.ModePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc copyFile(info os.FileInfo, src string, dst string) error {\n\tdstFile, err := os.Create(filepath.Join(dst, info.Name()))\n\tdefer dstFile.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrcFile, err := os.Open(filepath.Join(src, info.Name()))\n\tdefer srcFile.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = io.Copy(dstFile, srcFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc copyFilesWarnConflicts(srcDir, dstDir string, conflicts []string) error {\n\terr := filepath.Walk(srcDir, func(path string, info os.FileInfo, err error) error {\n\t\tfmt.Println(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif info.IsDir() {\n\t\t\tfmt.Println(path, srcDir)\n\t\t\tif len(path) > len(srcDir) {\n\t\t\t\tpath = path[len(srcDir)+1:]\n\t\t\t}\n\t\t\tdir := filepath.Join(dstDir, path)\n\t\t\terr := os.MkdirAll(dir, os.ModeDir|os.ModePerm)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\n\t\tfor _, conflict := range conflicts {\n\t\t\tif info.Name() == conflict {\n\t\t\t\tfmt.Println(\"Ponzu couldn't fully build your project:\")\n\t\t\t\tfmt.Println(\"You must rename the following file, as it conflicts with Ponzu core:\")\n\t\t\t\tfmt.Println(path)\n\n\t\t\t\tfmt.Println(\"Once the files above have been renamed, run '$ ponzu build' to retry.\")\n\t\t\t\treturn errors.New(\"Ponzu has very few internal conflicts, sorry for the inconvenience.\")\n\t\t\t}\n\t\t}\n\n\t\terr = copyFile(info, srcDir, dstDir)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc buildPonzuServer(args []string) error {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ copy all .\/content files to internal vendor directory\n\tsrc := \"content\"\n\tdst := filepath.Join(\"cmd\", \"ponzu\", \"vendor\", \"github.com\", \"bosssauce\", \"ponzu\", \"content\")\n\terr = copyFilesWarnConflicts(src, dst, []string{\"item.go\", \"types.go\"})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ copy all .\/addons files & dirs to internal vendor directory\n\tsrc = \"addons\"\n\tdst = filepath.Join(\"cmd\", \"ponzu\", \"vendor\")\n\terr = copyFilesWarnConflicts(src, dst, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ execute go build -o ponzu-cms cmd\/ponzu\/*.go\n\tmainPath := filepath.Join(pwd, \"cmd\", \"ponzu\", \"main.go\")\n\toptsPath := filepath.Join(pwd, \"cmd\", \"ponzu\", \"options.go\")\n\tgenPath := filepath.Join(pwd, \"cmd\", \"ponzu\", \"generate.go\")\n\tbuild := exec.Command(\"go\", \"build\", \"-o\", \"ponzu-server\", mainPath, optsPath, genPath)\n\tbuild.Stderr = os.Stderr\n\tbuild.Stdout = os.Stdout\n\n\terr = build.Start()\n\tif err != nil {\n\t\treturn errors.New(\"Ponzu build step failed. Please try again. \" + \"\\n\" + err.Error())\n\n\t}\n\terr = build.Wait()\n\tif err != nil {\n\t\treturn errors.New(\"Ponzu build step failed. Please try again. \" + \"\\n\" + err.Error())\n\n\t}\n\n\treturn nil\n}\ndebugpackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nfunc newProjectInDir(path string) error {\n\t\/\/ set path to be nested inside $GOPATH\/src\n\tgopath := os.Getenv(\"GOPATH\")\n\tpath = filepath.Join(gopath, \"src\", path)\n\n\t\/\/ check if anything exists at the path, ask if it should be overwritten\n\tif _, err := os.Stat(path); !os.IsNotExist(err) {\n\t\tfmt.Println(\"Path exists, overwrite contents? (y\/N):\")\n\n\t\tvar answer string\n\t\t_, err := fmt.Scanf(\"%s\\n\", &answer)\n\t\tif err != nil {\n\t\t\tif err.Error() == \"unexpected newline\" {\n\t\t\t\tanswer = \"\"\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tanswer = strings.ToLower(answer)\n\n\t\tswitch answer {\n\t\tcase \"n\", \"no\", \"\\r\\n\", \"\\n\", \"\":\n\t\t\tfmt.Println(\"\")\n\n\t\tcase \"y\", \"yes\":\n\t\t\terr := os.RemoveAll(path)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to overwrite %s. \\n%s\", path, err)\n\t\t\t}\n\n\t\t\treturn createProjInDir(path)\n\n\t\tdefault:\n\t\t\tfmt.Println(\"Input not recognized. No files overwritten. Answer as 'y' or 'n' only.\")\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn createProjInDir(path)\n}\n\nvar ponzuRepo = []string{\"github.com\", \"bosssauce\", \"ponzu\"}\n\nfunc createProjInDir(path string) error {\n\tgopath := os.Getenv(\"GOPATH\")\n\trepo := ponzuRepo\n\tlocal := filepath.Join(gopath, \"src\", filepath.Join(repo...))\n\tnetwork := \"https:\/\/\" + strings.Join(repo, \"\/\") + \".git\"\n\n\t\/\/ create the directory or overwrite it\n\terr := os.MkdirAll(path, os.ModeDir|os.ModePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif dev {\n\t\tif fork != \"\" {\n\t\t\tlocal = filepath.Join(gopath, \"src\", fork)\n\t\t}\n\n\t\tdevClone := exec.Command(\"git\", \"clone\", local, \"--branch\", \"ponzu-dev\", \"--single-branch\", path)\n\t\tdevClone.Stdout = os.Stdout\n\t\tdevClone.Stderr = os.Stderr\n\n\t\terr = devClone.Start()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = devClone.Wait()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = vendorCorePackages(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"Dev build cloned from \" + local + \":ponzu-dev\")\n\t\treturn nil\n\t}\n\n\t\/\/ try to git clone the repository from the local machine's $GOPATH\n\tlocalClone := exec.Command(\"git\", \"clone\", local, path)\n\tlocalClone.Stdout = os.Stdout\n\tlocalClone.Stderr = os.Stderr\n\n\terr = localClone.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = localClone.Wait()\n\tif err != nil {\n\t\tfmt.Println(\"Couldn't clone from\", local, \". Trying network...\")\n\n\t\t\/\/ try to git clone the repository over the network\n\t\tnetworkClone := exec.Command(\"git\", \"clone\", network, path)\n\t\tnetworkClone.Stdout = os.Stdout\n\t\tnetworkClone.Stderr = os.Stderr\n\n\t\terr = networkClone.Start()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Network clone failed to start. Try again and make sure you have a network connection.\")\n\t\t\treturn err\n\t\t}\n\t\terr = networkClone.Wait()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Network clone failure.\")\n\t\t\t\/\/ failed\n\t\t\treturn fmt.Errorf(\"Failed to clone files from local machine [%s] and over the network [%s].\\n%s\", local, network, err)\n\t\t}\n\t}\n\n\t\/\/ create an internal vendor directory in .\/cmd\/ponzu and move content,\n\t\/\/ management and system packages into it\n\terr = vendorCorePackages(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgitDir := filepath.Join(path, \".git\")\n\terr = os.RemoveAll(gitDir)\n\tif err != nil {\n\t\tfmt.Println(\"Failed to remove .git directory from your project path. Consider removing it manually.\")\n\t}\n\n\tfmt.Println(\"New ponzu project created at\", path)\n\treturn nil\n}\n\nfunc vendorCorePackages(path string) error {\n\tvendorPath := filepath.Join(path, \"cmd\", \"ponzu\", \"vendor\", \"github.com\", \"bosssauce\", \"ponzu\")\n\terr := os.MkdirAll(vendorPath, os.ModeDir|os.ModePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdirs := []string{\"content\", \"management\", \"system\"}\n\tfor _, dir := range dirs {\n\t\terr = os.Rename(filepath.Join(path, dir), filepath.Join(vendorPath, dir))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ create a user content directory\n\tcontentPath := filepath.Join(path, \"content\")\n\terr = os.Mkdir(contentPath, os.ModeDir|os.ModePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc copyFile(src, dst string) error {\n\tnoRoot := strings.Split(src, string(filepath.Separator))[1:]\n\tpath := filepath.Join(noRoot...)\n\tdstFile, err := os.Create(filepath.Join(dst, path))\n\tdefer dstFile.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrcFile, err := os.Open(src)\n\tdefer srcFile.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = io.Copy(dstFile, srcFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc copyFilesWarnConflicts(srcDir, dstDir string, conflicts []string) error {\n\terr := filepath.Walk(srcDir, func(path string, info os.FileInfo, err error) error {\n\t\tfmt.Println(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif info.IsDir() {\n\t\t\tfmt.Println(path, srcDir)\n\t\t\tif len(path) > len(srcDir) {\n\t\t\t\tpath = path[len(srcDir)+1:]\n\t\t\t}\n\t\t\tdir := filepath.Join(dstDir, path)\n\t\t\terr := os.MkdirAll(dir, os.ModeDir|os.ModePerm)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\n\t\tfor _, conflict := range conflicts {\n\t\t\tif info.Name() == conflict {\n\t\t\t\tfmt.Println(\"Ponzu couldn't fully build your project:\")\n\t\t\t\tfmt.Println(\"You must rename the following file, as it conflicts with Ponzu core:\")\n\t\t\t\tfmt.Println(path)\n\n\t\t\t\tfmt.Println(\"Once the files above have been renamed, run '$ ponzu build' to retry.\")\n\t\t\t\treturn errors.New(\"Ponzu has very few internal conflicts, sorry for the inconvenience.\")\n\t\t\t}\n\t\t}\n\n\t\terr = copyFile(path, dstDir)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc buildPonzuServer(args []string) error {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ copy all .\/content files to internal vendor directory\n\tsrc := \"content\"\n\tdst := filepath.Join(\"cmd\", \"ponzu\", \"vendor\", \"github.com\", \"bosssauce\", \"ponzu\", \"content\")\n\terr = copyFilesWarnConflicts(src, dst, []string{\"item.go\", \"types.go\"})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ copy all .\/addons files & dirs to internal vendor directory\n\tsrc = \"addons\"\n\tdst = filepath.Join(\"cmd\", \"ponzu\", \"vendor\")\n\terr = copyFilesWarnConflicts(src, dst, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ execute go build -o ponzu-cms cmd\/ponzu\/*.go\n\tmainPath := filepath.Join(pwd, \"cmd\", \"ponzu\", \"main.go\")\n\toptsPath := filepath.Join(pwd, \"cmd\", \"ponzu\", \"options.go\")\n\tgenPath := filepath.Join(pwd, \"cmd\", \"ponzu\", \"generate.go\")\n\tbuild := exec.Command(\"go\", \"build\", \"-o\", \"ponzu-server\", mainPath, optsPath, genPath)\n\tbuild.Stderr = os.Stderr\n\tbuild.Stdout = os.Stdout\n\n\terr = build.Start()\n\tif err != nil {\n\t\treturn errors.New(\"Ponzu build step failed. Please try again. \" + \"\\n\" + err.Error())\n\n\t}\n\terr = build.Wait()\n\tif err != nil {\n\t\treturn errors.New(\"Ponzu build step failed. Please try again. \" + \"\\n\" + err.Error())\n\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/*\n * MinIO Cloud Storage, (C) 2019 MinIO, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage logger\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/minio\/minio\/cmd\/config\"\n\t\"github.com\/minio\/minio\/pkg\/env\"\n)\n\n\/\/ Console logger target\ntype Console struct {\n\tEnabled bool `json:\"enabled\"`\n}\n\n\/\/ HTTP logger target\ntype HTTP struct {\n\tEnabled bool `json:\"enabled\"`\n\tEndpoint string `json:\"endpoint\"`\n\tAuthToken string `json:\"authToken\"`\n}\n\n\/\/ Config console and http logger targets\ntype Config struct {\n\tConsole Console `json:\"console\"`\n\tHTTP map[string]HTTP `json:\"http\"`\n\tAudit map[string]HTTP `json:\"audit\"`\n}\n\n\/\/ HTTP endpoint logger\nconst (\n\tEndpoint = \"endpoint\"\n\tAuthToken = \"auth_token\"\n\n\tEnvLoggerWebhookEndpoint = \"MINIO_LOGGER_WEBHOOK_ENDPOINT\"\n\tEnvLoggerWebhookAuthToken = \"MINIO_LOGGER_WEBHOOK_AUTH_TOKEN\"\n\n\tEnvAuditWebhookEndpoint = \"MINIO_AUDIT_WEBHOOK_ENDPOINT\"\n\tEnvAuditWebhookAuthToken = \"MINIO_AUDIT_WEBHOOK_AUTH_TOKEN\"\n)\n\n\/\/ Default KVS for loggerHTTP and loggerAuditHTTP\nvar (\n\tDefaultKVS = config.KVS{\n\t\tconfig.KV{\n\t\t\tKey: config.Enable,\n\t\t\tValue: config.EnableOff,\n\t\t},\n\t\tconfig.KV{\n\t\t\tKey: Endpoint,\n\t\t\tValue: \"\",\n\t\t},\n\t\tconfig.KV{\n\t\t\tKey: AuthToken,\n\t\t\tValue: \"\",\n\t\t},\n\t}\n\tDefaultAuditKVS = config.KVS{\n\t\tconfig.KV{\n\t\t\tKey: config.Enable,\n\t\t\tValue: config.EnableOff,\n\t\t},\n\t\tconfig.KV{\n\t\t\tKey: Endpoint,\n\t\t\tValue: \"\",\n\t\t},\n\t\tconfig.KV{\n\t\t\tKey: AuthToken,\n\t\t\tValue: \"\",\n\t\t},\n\t}\n)\n\n\/\/ NewConfig - initialize new logger config.\nfunc NewConfig() Config {\n\tcfg := Config{\n\t\t\/\/ Console logging is on by default\n\t\tConsole: Console{\n\t\t\tEnabled: true,\n\t\t},\n\t\tHTTP: make(map[string]HTTP),\n\t\tAudit: make(map[string]HTTP),\n\t}\n\n\t\/\/ Create an example HTTP logger\n\tcfg.HTTP[config.Default] = HTTP{\n\t\tEndpoint: \"https:\/\/username:password@example.com\/api\",\n\t}\n\n\t\/\/ Create an example Audit logger\n\tcfg.Audit[config.Default] = HTTP{\n\t\tEndpoint: \"https:\/\/username:password@example.com\/api\/audit\",\n\t}\n\n\treturn cfg\n}\n\n\/\/ LookupConfig - lookup logger config, override with ENVs if set.\nfunc LookupConfig(scfg config.Config) (Config, error) {\n\tcfg := NewConfig()\n\n\tenvs := env.List(EnvLoggerWebhookEndpoint)\n\tvar loggerTargets []string\n\tfor _, k := range envs {\n\t\ttarget := strings.TrimPrefix(k, EnvLoggerWebhookEndpoint+config.Default)\n\t\tif target == EnvLoggerWebhookEndpoint {\n\t\t\ttarget = config.Default\n\t\t}\n\t\tloggerTargets = append(loggerTargets, target)\n\t}\n\n\tvar loggerAuditTargets []string\n\tenvs = env.List(EnvAuditWebhookEndpoint)\n\tfor _, k := range envs {\n\t\ttarget := strings.TrimPrefix(k, EnvAuditWebhookEndpoint+config.Default)\n\t\tif target == EnvAuditWebhookEndpoint {\n\t\t\ttarget = config.Default\n\t\t}\n\t\tloggerAuditTargets = append(loggerAuditTargets, target)\n\t}\n\n\t\/\/ List legacy ENVs if any.\n\tenvs = env.List(EnvAuditLoggerHTTPEndpoint)\n\tfor _, k := range envs {\n\t\ttarget := strings.TrimPrefix(k, EnvAuditLoggerHTTPEndpoint+config.Default)\n\t\tif target == EnvAuditLoggerHTTPEndpoint {\n\t\t\ttarget = config.Default\n\t\t}\n\t\tloggerAuditTargets = append(loggerAuditTargets, target)\n\t}\n\n\tfor starget, kv := range scfg[config.LoggerWebhookSubSys] {\n\t\tsubSysTarget := config.LoggerWebhookSubSys\n\t\tif starget != config.Default {\n\t\t\tsubSysTarget = config.LoggerWebhookSubSys + config.SubSystemSeparator + starget\n\t\t}\n\t\tif err := config.CheckValidKeys(subSysTarget, kv, DefaultKVS); err != nil {\n\t\t\treturn cfg, err\n\t\t}\n\n\t\tenabled, err := config.ParseBool(kv.Get(config.Enable))\n\t\tif err != nil {\n\t\t\treturn cfg, err\n\t\t}\n\t\tif !enabled {\n\t\t\tcontinue\n\t\t}\n\n\t\tendpointEnv := EnvLoggerWebhookEndpoint\n\t\tif starget != config.Default {\n\t\t\tendpointEnv = EnvLoggerWebhookEndpoint + config.Default + starget\n\t\t}\n\t\tauthTokenEnv := EnvLoggerWebhookAuthToken\n\t\tif starget != config.Default {\n\t\t\tauthTokenEnv = EnvLoggerWebhookAuthToken + config.Default + starget\n\t\t}\n\t\tcfg.HTTP[starget] = HTTP{\n\t\t\tEnabled: true,\n\t\t\tEndpoint: env.Get(endpointEnv, kv.Get(Endpoint)),\n\t\t\tAuthToken: env.Get(authTokenEnv, kv.Get(AuthToken)),\n\t\t}\n\t}\n\n\tfor starget, kv := range scfg[config.AuditWebhookSubSys] {\n\t\tsubSysTarget := config.AuditWebhookSubSys\n\t\tif starget != config.Default {\n\t\t\tsubSysTarget = config.AuditWebhookSubSys + config.SubSystemSeparator + starget\n\t\t}\n\t\tif err := config.CheckValidKeys(subSysTarget, kv, DefaultAuditKVS); err != nil {\n\t\t\treturn cfg, err\n\t\t}\n\n\t\tenabled, err := config.ParseBool(kv.Get(config.Enable))\n\t\tif err != nil {\n\t\t\treturn cfg, err\n\t\t}\n\t\tif !enabled {\n\t\t\tcontinue\n\t\t}\n\n\t\tendpointEnv := EnvAuditWebhookEndpoint\n\t\tif starget != config.Default {\n\t\t\tendpointEnv = EnvAuditWebhookEndpoint + config.Default + starget\n\t\t}\n\t\tlegacyEndpointEnv := EnvAuditLoggerHTTPEndpoint\n\t\tif starget != config.Default {\n\t\t\tlegacyEndpointEnv = EnvAuditLoggerHTTPEndpoint + config.Default + starget\n\t\t}\n\t\tendpoint := env.Get(legacyEndpointEnv, \"\")\n\t\tif endpoint == \"\" {\n\t\t\tendpoint = env.Get(endpointEnv, kv.Get(Endpoint))\n\t\t}\n\t\tauthTokenEnv := EnvAuditWebhookAuthToken\n\t\tif starget != config.Default {\n\t\t\tauthTokenEnv = EnvAuditWebhookAuthToken + config.Default + starget\n\t\t}\n\t\tcfg.Audit[starget] = HTTP{\n\t\t\tEnabled: true,\n\t\t\tEndpoint: endpoint,\n\t\t\tAuthToken: env.Get(authTokenEnv, kv.Get(AuthToken)),\n\t\t}\n\t}\n\n\tfor _, target := range loggerTargets {\n\t\tendpointEnv := EnvLoggerWebhookEndpoint\n\t\tif target != config.Default {\n\t\t\tendpointEnv = EnvLoggerWebhookEndpoint + config.Default + target\n\t\t}\n\t\tauthTokenEnv := EnvLoggerWebhookAuthToken\n\t\tif target != config.Default {\n\t\t\tauthTokenEnv = EnvLoggerWebhookAuthToken + config.Default + target\n\t\t}\n\t\tcfg.HTTP[target] = HTTP{\n\t\t\tEnabled: true,\n\t\t\tEndpoint: env.Get(endpointEnv, \"\"),\n\t\t\tAuthToken: env.Get(authTokenEnv, \"\"),\n\t\t}\n\t}\n\n\tfor _, target := range loggerAuditTargets {\n\t\tendpointEnv := EnvLoggerWebhookEndpoint\n\t\tif target != config.Default {\n\t\t\tendpointEnv = EnvLoggerWebhookEndpoint + config.Default + target\n\t\t}\n\t\tlegacyEndpointEnv := EnvAuditLoggerHTTPEndpoint\n\t\tif target != config.Default {\n\t\t\tlegacyEndpointEnv = EnvAuditLoggerHTTPEndpoint + config.Default + target\n\t\t}\n\t\tendpoint := env.Get(legacyEndpointEnv, \"\")\n\t\tif endpoint == \"\" {\n\t\t\tendpoint = env.Get(endpointEnv, \"\")\n\t\t}\n\t\tauthTokenEnv := EnvLoggerWebhookAuthToken\n\t\tif target != config.Default {\n\t\t\tauthTokenEnv = EnvLoggerWebhookAuthToken + config.Default + target\n\t\t}\n\t\tcfg.Audit[target] = HTTP{\n\t\t\tEnabled: true,\n\t\t\tEndpoint: endpoint,\n\t\t\tAuthToken: env.Get(authTokenEnv, \"\"),\n\t\t}\n\t}\n\n\treturn cfg, nil\n}\nFix audit loading from the env and consider enable env variable (#9467)\/*\n * MinIO Cloud Storage, (C) 2019 MinIO, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage logger\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/minio\/minio\/cmd\/config\"\n\t\"github.com\/minio\/minio\/pkg\/env\"\n)\n\n\/\/ Console logger target\ntype Console struct {\n\tEnabled bool `json:\"enabled\"`\n}\n\n\/\/ HTTP logger target\ntype HTTP struct {\n\tEnabled bool `json:\"enabled\"`\n\tEndpoint string `json:\"endpoint\"`\n\tAuthToken string `json:\"authToken\"`\n}\n\n\/\/ Config console and http logger targets\ntype Config struct {\n\tConsole Console `json:\"console\"`\n\tHTTP map[string]HTTP `json:\"http\"`\n\tAudit map[string]HTTP `json:\"audit\"`\n}\n\n\/\/ HTTP endpoint logger\nconst (\n\tEndpoint = \"endpoint\"\n\tAuthToken = \"auth_token\"\n\n\tEnvLoggerWebhookEnable = \"MINIO_LOGGER_WEBHOOK_ENABLE\"\n\tEnvLoggerWebhookEndpoint = \"MINIO_LOGGER_WEBHOOK_ENDPOINT\"\n\tEnvLoggerWebhookAuthToken = \"MINIO_LOGGER_WEBHOOK_AUTH_TOKEN\"\n\n\tEnvAuditWebhookEnable = \"MINIO_AUDIT_WEBHOOK_ENABLE\"\n\tEnvAuditWebhookEndpoint = \"MINIO_AUDIT_WEBHOOK_ENDPOINT\"\n\tEnvAuditWebhookAuthToken = \"MINIO_AUDIT_WEBHOOK_AUTH_TOKEN\"\n)\n\n\/\/ Default KVS for loggerHTTP and loggerAuditHTTP\nvar (\n\tDefaultKVS = config.KVS{\n\t\tconfig.KV{\n\t\t\tKey: config.Enable,\n\t\t\tValue: config.EnableOff,\n\t\t},\n\t\tconfig.KV{\n\t\t\tKey: Endpoint,\n\t\t\tValue: \"\",\n\t\t},\n\t\tconfig.KV{\n\t\t\tKey: AuthToken,\n\t\t\tValue: \"\",\n\t\t},\n\t}\n\tDefaultAuditKVS = config.KVS{\n\t\tconfig.KV{\n\t\t\tKey: config.Enable,\n\t\t\tValue: config.EnableOff,\n\t\t},\n\t\tconfig.KV{\n\t\t\tKey: Endpoint,\n\t\t\tValue: \"\",\n\t\t},\n\t\tconfig.KV{\n\t\t\tKey: AuthToken,\n\t\t\tValue: \"\",\n\t\t},\n\t}\n)\n\n\/\/ NewConfig - initialize new logger config.\nfunc NewConfig() Config {\n\tcfg := Config{\n\t\t\/\/ Console logging is on by default\n\t\tConsole: Console{\n\t\t\tEnabled: true,\n\t\t},\n\t\tHTTP: make(map[string]HTTP),\n\t\tAudit: make(map[string]HTTP),\n\t}\n\n\t\/\/ Create an example HTTP logger\n\tcfg.HTTP[config.Default] = HTTP{\n\t\tEndpoint: \"https:\/\/username:password@example.com\/api\",\n\t}\n\n\t\/\/ Create an example Audit logger\n\tcfg.Audit[config.Default] = HTTP{\n\t\tEndpoint: \"https:\/\/username:password@example.com\/api\/audit\",\n\t}\n\n\treturn cfg\n}\n\n\/\/ LookupConfig - lookup logger config, override with ENVs if set.\nfunc LookupConfig(scfg config.Config) (Config, error) {\n\tcfg := NewConfig()\n\n\tenvs := env.List(EnvLoggerWebhookEndpoint)\n\tvar loggerTargets []string\n\tfor _, k := range envs {\n\t\ttarget := strings.TrimPrefix(k, EnvLoggerWebhookEndpoint+config.Default)\n\t\tif target == EnvLoggerWebhookEndpoint {\n\t\t\ttarget = config.Default\n\t\t}\n\t\tloggerTargets = append(loggerTargets, target)\n\t}\n\n\tvar loggerAuditTargets []string\n\tenvs = env.List(EnvAuditWebhookEndpoint)\n\tfor _, k := range envs {\n\t\ttarget := strings.TrimPrefix(k, EnvAuditWebhookEndpoint+config.Default)\n\t\tif target == EnvAuditWebhookEndpoint {\n\t\t\ttarget = config.Default\n\t\t}\n\t\tloggerAuditTargets = append(loggerAuditTargets, target)\n\t}\n\n\t\/\/ List legacy ENVs if any.\n\tenvs = env.List(EnvAuditLoggerHTTPEndpoint)\n\tfor _, k := range envs {\n\t\ttarget := strings.TrimPrefix(k, EnvAuditLoggerHTTPEndpoint+config.Default)\n\t\tif target == EnvAuditLoggerHTTPEndpoint {\n\t\t\ttarget = config.Default\n\t\t}\n\t\tloggerAuditTargets = append(loggerAuditTargets, target)\n\t}\n\n\t\/\/ Load HTTP logger from the environment if found\n\tfor _, target := range loggerTargets {\n\t\tenableEnv := EnvLoggerWebhookEnable\n\t\tif target != config.Default {\n\t\t\tenableEnv = EnvLoggerWebhookEnable + config.Default + target\n\t\t}\n\t\tenable, err := config.ParseBool(env.Get(enableEnv, config.EnableOn))\n\t\tif err != nil || !enable {\n\t\t\tcontinue\n\t\t}\n\t\tendpointEnv := EnvLoggerWebhookEndpoint\n\t\tif target != config.Default {\n\t\t\tendpointEnv = EnvLoggerWebhookEndpoint + config.Default + target\n\t\t}\n\t\tauthTokenEnv := EnvLoggerWebhookAuthToken\n\t\tif target != config.Default {\n\t\t\tauthTokenEnv = EnvLoggerWebhookAuthToken + config.Default + target\n\t\t}\n\t\tcfg.HTTP[target] = HTTP{\n\t\t\tEnabled: true,\n\t\t\tEndpoint: env.Get(endpointEnv, \"\"),\n\t\t\tAuthToken: env.Get(authTokenEnv, \"\"),\n\t\t}\n\t}\n\n\tfor _, target := range loggerAuditTargets {\n\t\tenableEnv := EnvAuditWebhookEnable\n\t\tif target != config.Default {\n\t\t\tenableEnv = EnvAuditWebhookEnable + config.Default + target\n\t\t}\n\t\tenable, err := config.ParseBool(env.Get(enableEnv, config.EnableOn))\n\t\tif err != nil || !enable {\n\t\t\tcontinue\n\t\t}\n\t\tendpointEnv := EnvAuditWebhookEndpoint\n\t\tif target != config.Default {\n\t\t\tendpointEnv = EnvAuditWebhookEndpoint + config.Default + target\n\t\t}\n\t\tlegacyEndpointEnv := EnvAuditLoggerHTTPEndpoint\n\t\tif target != config.Default {\n\t\t\tlegacyEndpointEnv = EnvAuditLoggerHTTPEndpoint + config.Default + target\n\t\t}\n\t\tendpoint := env.Get(legacyEndpointEnv, \"\")\n\t\tif endpoint == \"\" {\n\t\t\tendpoint = env.Get(endpointEnv, \"\")\n\t\t}\n\t\tauthTokenEnv := EnvAuditWebhookAuthToken\n\t\tif target != config.Default {\n\t\t\tauthTokenEnv = EnvAuditWebhookAuthToken + config.Default + target\n\t\t}\n\t\tcfg.Audit[target] = HTTP{\n\t\t\tEnabled: true,\n\t\t\tEndpoint: endpoint,\n\t\t\tAuthToken: env.Get(authTokenEnv, \"\"),\n\t\t}\n\t}\n\n\tfor starget, kv := range scfg[config.LoggerWebhookSubSys] {\n\t\tif l, ok := cfg.HTTP[starget]; ok && l.Enabled {\n\t\t\t\/\/ Ignore this HTTP logger config since there is\n\t\t\t\/\/ a target with the same name loaded and enabled\n\t\t\t\/\/ from the environment.\n\t\t\tcontinue\n\t\t}\n\t\tsubSysTarget := config.LoggerWebhookSubSys\n\t\tif starget != config.Default {\n\t\t\tsubSysTarget = config.LoggerWebhookSubSys + config.SubSystemSeparator + starget\n\t\t}\n\t\tif err := config.CheckValidKeys(subSysTarget, kv, DefaultKVS); err != nil {\n\t\t\treturn cfg, err\n\t\t}\n\n\t\tenabled, err := config.ParseBool(kv.Get(config.Enable))\n\t\tif err != nil {\n\t\t\treturn cfg, err\n\t\t}\n\t\tif !enabled {\n\t\t\tcontinue\n\t\t}\n\t\tcfg.HTTP[starget] = HTTP{\n\t\t\tEnabled: true,\n\t\t\tEndpoint: kv.Get(Endpoint),\n\t\t\tAuthToken: kv.Get(AuthToken),\n\t\t}\n\t}\n\n\tfor starget, kv := range scfg[config.AuditWebhookSubSys] {\n\t\tif l, ok := cfg.Audit[starget]; ok && l.Enabled {\n\t\t\t\/\/ Ignore this audit config since another target\n\t\t\t\/\/ with the same name is already loaded and enabled\n\t\t\t\/\/ in the shell environment.\n\t\t\tcontinue\n\t\t}\n\t\tsubSysTarget := config.AuditWebhookSubSys\n\t\tif starget != config.Default {\n\t\t\tsubSysTarget = config.AuditWebhookSubSys + config.SubSystemSeparator + starget\n\t\t}\n\t\tif err := config.CheckValidKeys(subSysTarget, kv, DefaultAuditKVS); err != nil {\n\t\t\treturn cfg, err\n\t\t}\n\t\tenabled, err := config.ParseBool(kv.Get(config.Enable))\n\t\tif err != nil {\n\t\t\treturn cfg, err\n\t\t}\n\t\tif !enabled {\n\t\t\tcontinue\n\t\t}\n\t\tcfg.Audit[starget] = HTTP{\n\t\t\tEnabled: true,\n\t\t\tEndpoint: kv.Get(Endpoint),\n\t\t\tAuthToken: kv.Get(AuthToken),\n\t\t}\n\t}\n\n\treturn cfg, nil\n}\n<|endoftext|>"} {"text":"\/*\nCommand mailfull is a CLI application using the mailfull package.\n*\/\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/directorz\/mailfull-go\"\n\t\"github.com\/directorz\/mailfull-go\/cmd\"\n\t\"github.com\/mitchellh\/cli\"\n)\n\nvar (\n\tversion = mailfull.Version\n\tgittag = \"\"\n)\n\nfunc init() {\n\tif gittag != \"\" {\n\t\tversion = version + \"-\" + gittag\n\t}\n}\n\nfunc main() {\n\tc := &cli.CLI{\n\t\tName: filepath.Base(os.Args[0]),\n\t\tVersion: version,\n\t\tArgs: os.Args[1:],\n\t}\n\n\tmeta := cmd.Meta{\n\t\tUI: &cli.BasicUi{\n\t\t\tReader: os.Stdin,\n\t\t\tWriter: os.Stdout,\n\t\t\tErrorWriter: os.Stderr,\n\t\t},\n\t\tCmdName: c.Name,\n\t\tVersion: c.Version,\n\t}\n\n\tc.Commands = map[string]cli.CommandFactory{\n\t\t\"init\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdInit{Meta: meta}, nil\n\t\t},\n\t\t\"genconfig\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdGenConfig{Meta: meta}, nil\n\t\t},\n\t\t\"domains\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdDomains{Meta: meta}, nil\n\t\t},\n\t\t\"domainadd\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdDomainAdd{Meta: meta}, nil\n\t\t},\n\t\t\"domaindel\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdDomainDel{Meta: meta}, nil\n\t\t},\n\t\t\"domaindisable\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdDomainDisable{Meta: meta}, nil\n\t\t},\n\t\t\"domainenable\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdDomainEnable{Meta: meta}, nil\n\t\t},\n\t\t\"aliasdomains\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdAliasDomains{Meta: meta}, nil\n\t\t},\n\t\t\"aliasdomainadd\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdAliasDomainAdd{Meta: meta}, nil\n\t\t},\n\t\t\"aliasdomaindel\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdAliasDomainDel{Meta: meta}, nil\n\t\t},\n\t\t\"users\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdUsers{Meta: meta}, nil\n\t\t},\n\t\t\"useradd\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdUserAdd{Meta: meta}, nil\n\t\t},\n\t\t\"userdel\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdUserDel{Meta: meta}, nil\n\t\t},\n\t\t\"userpasswd\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdUserPasswd{Meta: meta}, nil\n\t\t},\n\t\t\"usercheckpw\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdUserCheckPw{Meta: meta}, nil\n\t\t},\n\t\t\"aliasusers\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdAliasUsers{Meta: meta}, nil\n\t\t},\n\t\t\"aliasuseradd\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdAliasUserAdd{Meta: meta}, nil\n\t\t},\n\t\t\"aliasusermod\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdAliasUserMod{Meta: meta}, nil\n\t\t},\n\t\t\"aliasuserdel\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdAliasUserDel{Meta: meta}, nil\n\t\t},\n\t\t\"catchall\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdCatchAll{Meta: meta}, nil\n\t\t},\n\t\t\"catchallset\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdCatchAllSet{Meta: meta}, nil\n\t\t},\n\t\t\"catchallunset\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdCatchAllUnset{Meta: meta}, nil\n\t\t},\n\t\t\"commit\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdCommit{Meta: meta}, nil\n\t\t},\n\t}\n\n\texitCode, err := c.Run()\n\tif err != nil {\n\t\tfmt.Fprintf(meta.UI.ErrorWriter, \"%v\\n\", err)\n\t}\n\n\tos.Exit(exitCode)\n}\n\n\/\/ noCommitFlag returns true if `pargs` has \"-n\" flag.\n\/\/ `pargs` is overwrites with non-flag arguments.\nfunc noCommitFlag(pargs *[]string) (bool, error) {\n\tnFlag := false\n\n\tflagSet := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tflagSet.SetOutput(&bytes.Buffer{})\n\tflagSet.BoolVar(&nFlag, \"n\", nFlag, \"\")\n\terr := flagSet.Parse(*pargs)\n\t*pargs = flagSet.Args()\n\n\treturn nFlag, err\n}\nNow show runtime version in `mailfull --version`\/*\nCommand mailfull is a CLI application using the mailfull package.\n*\/\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\n\t\"github.com\/directorz\/mailfull-go\"\n\t\"github.com\/directorz\/mailfull-go\/cmd\"\n\t\"github.com\/mitchellh\/cli\"\n)\n\nvar (\n\tversion = mailfull.Version\n\tgittag = \"\"\n)\n\nfunc init() {\n\tif gittag != \"\" {\n\t\tversion += \"-\" + gittag\n\t}\n\tversion += fmt.Sprintf(\" (built with %s)\", runtime.Version())\n}\n\nfunc main() {\n\tc := &cli.CLI{\n\t\tName: filepath.Base(os.Args[0]),\n\t\tVersion: version,\n\t\tArgs: os.Args[1:],\n\t}\n\n\tmeta := cmd.Meta{\n\t\tUI: &cli.BasicUi{\n\t\t\tReader: os.Stdin,\n\t\t\tWriter: os.Stdout,\n\t\t\tErrorWriter: os.Stderr,\n\t\t},\n\t\tCmdName: c.Name,\n\t\tVersion: c.Version,\n\t}\n\n\tc.Commands = map[string]cli.CommandFactory{\n\t\t\"init\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdInit{Meta: meta}, nil\n\t\t},\n\t\t\"genconfig\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdGenConfig{Meta: meta}, nil\n\t\t},\n\t\t\"domains\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdDomains{Meta: meta}, nil\n\t\t},\n\t\t\"domainadd\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdDomainAdd{Meta: meta}, nil\n\t\t},\n\t\t\"domaindel\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdDomainDel{Meta: meta}, nil\n\t\t},\n\t\t\"domaindisable\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdDomainDisable{Meta: meta}, nil\n\t\t},\n\t\t\"domainenable\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdDomainEnable{Meta: meta}, nil\n\t\t},\n\t\t\"aliasdomains\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdAliasDomains{Meta: meta}, nil\n\t\t},\n\t\t\"aliasdomainadd\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdAliasDomainAdd{Meta: meta}, nil\n\t\t},\n\t\t\"aliasdomaindel\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdAliasDomainDel{Meta: meta}, nil\n\t\t},\n\t\t\"users\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdUsers{Meta: meta}, nil\n\t\t},\n\t\t\"useradd\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdUserAdd{Meta: meta}, nil\n\t\t},\n\t\t\"userdel\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdUserDel{Meta: meta}, nil\n\t\t},\n\t\t\"userpasswd\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdUserPasswd{Meta: meta}, nil\n\t\t},\n\t\t\"usercheckpw\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdUserCheckPw{Meta: meta}, nil\n\t\t},\n\t\t\"aliasusers\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdAliasUsers{Meta: meta}, nil\n\t\t},\n\t\t\"aliasuseradd\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdAliasUserAdd{Meta: meta}, nil\n\t\t},\n\t\t\"aliasusermod\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdAliasUserMod{Meta: meta}, nil\n\t\t},\n\t\t\"aliasuserdel\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdAliasUserDel{Meta: meta}, nil\n\t\t},\n\t\t\"catchall\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdCatchAll{Meta: meta}, nil\n\t\t},\n\t\t\"catchallset\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdCatchAllSet{Meta: meta}, nil\n\t\t},\n\t\t\"catchallunset\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdCatchAllUnset{Meta: meta}, nil\n\t\t},\n\t\t\"commit\": func() (cli.Command, error) {\n\t\t\tmeta.SubCmdName = c.Subcommand()\n\t\t\treturn &CmdCommit{Meta: meta}, nil\n\t\t},\n\t}\n\n\texitCode, err := c.Run()\n\tif err != nil {\n\t\tfmt.Fprintf(meta.UI.ErrorWriter, \"%v\\n\", err)\n\t}\n\n\tos.Exit(exitCode)\n}\n\n\/\/ noCommitFlag returns true if `pargs` has \"-n\" flag.\n\/\/ `pargs` is overwrites with non-flag arguments.\nfunc noCommitFlag(pargs *[]string) (bool, error) {\n\tnFlag := false\n\n\tflagSet := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tflagSet.SetOutput(&bytes.Buffer{})\n\tflagSet.BoolVar(&nFlag, \"n\", nFlag, \"\")\n\terr := flagSet.Parse(*pargs)\n\t*pargs = flagSet.Args()\n\n\treturn nFlag, err\n}\n<|endoftext|>"} {"text":"\/\/ Copyright (C) 2016 Arista Networks, Inc.\n\/\/ Use of this source code is governed by the Apache License 2.0\n\/\/ that can be found in the COPYING file.\n\n\/\/ The occlient tool is a client for the gRPC service for getting and setting the\n\/\/ OpenConfig configuration and state of a network device.\npackage main\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/aristanetworks\/glog\"\n\t\"github.com\/aristanetworks\/goarista\/openconfig\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n\t\"google.golang.org\/grpc\/metadata\"\n)\n\nvar addr = flag.String(\"addr\", \"localhost:6042\",\n\t\"Address of the server\")\n\nvar certFile = flag.String(\"certfile\", \"\",\n\t\"Path to client TLS certificate file\")\n\nvar keyFile = flag.String(\"keyfile\", \"\",\n\t\"Path to client TLS private key file\")\n\nvar caFile = flag.String(\"cafile\", \"\",\n\t\"Path to server TLS certificate file\")\n\nvar jsonOutput = flag.Bool(\"json\", false,\n\t\"Print the output in JSON instead of protobuf\")\n\nvar subscribe = flag.String(\"subscribe\", \"\",\n\t\"Comma-separated list of paths to subscribe to upon connecting to the server\")\n\nvar username = flag.String(\"username\", \"\",\n\t\"Username to authenticate with\")\n\nvar password = flag.String(\"password\", \"\",\n\t\"Password to authenticate with\")\n\nfunc main() {\n\tflag.Parse()\n\tvar opts []grpc.DialOption\n\tif *caFile != \"\" || *certFile != \"\" {\n\t\tconfig := &tls.Config{}\n\t\tif *caFile != \"\" {\n\t\t\tb, err := ioutil.ReadFile(*caFile)\n\t\t\tif err != nil {\n\t\t\t\tglog.Fatal(err)\n\t\t\t}\n\t\t\tcp := x509.NewCertPool()\n\t\t\tif !cp.AppendCertsFromPEM(b) {\n\t\t\t\tglog.Fatalf(\"credentials: failed to append certificates\")\n\t\t\t}\n\t\t\tconfig.RootCAs = cp\n\t\t} else {\n\t\t\tconfig.InsecureSkipVerify = true\n\t\t}\n\t\tif *certFile != \"\" {\n\t\t\tif *keyFile == \"\" {\n\t\t\t\tglog.Fatalf(\"Please provide both -certfile and -keyfile\")\n\t\t\t}\n\t\t\tcert, err := tls.LoadX509KeyPair(*certFile, *keyFile)\n\t\t\tif err != nil {\n\t\t\t\tglog.Fatal(err)\n\t\t\t}\n\t\t\tconfig.Certificates = []tls.Certificate{cert}\n\t\t}\n\t\topts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(config)))\n\t} else {\n\t\topts = append(opts, grpc.WithInsecure())\n\t}\n\tconn, err := grpc.Dial(*addr, opts...)\n\tif err != nil {\n\t\tglog.Fatalf(\"fail to dial: %s\", err)\n\t}\n\tdefer conn.Close()\n\tclient := openconfig.NewOpenConfigClient(conn)\n\n\tctx := context.Background()\n\tif *username != \"\" {\n\t\tctx = metadata.NewContext(ctx, metadata.Pairs(\n\t\t\t\"username\", *username,\n\t\t\t\"password\", *password))\n\t}\n\n\tstream, err := client.Subscribe(ctx)\n\tif err != nil {\n\t\tglog.Fatalf(\"Subscribe failed: %s\", err)\n\t}\n\tdefer stream.CloseSend()\n\n\tfor _, path := range strings.Split(*subscribe, \",\") {\n\t\tsub := &openconfig.SubscribeRequest{\n\t\t\tRequest: &openconfig.SubscribeRequest_Subscribe{\n\t\t\t\tSubscribe: &openconfig.SubscriptionList{\n\t\t\t\t\tSubscription: []*openconfig.Subscription{\n\t\t\t\t\t\t&openconfig.Subscription{\n\t\t\t\t\t\t\tPath: &openconfig.Path{Element: strings.Split(path, \"\/\")},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\terr = stream.Send(sub)\n\t\tif err != nil {\n\t\t\tglog.Fatalf(\"Failed to subscribe: %s\", err)\n\t\t}\n\t}\n\n\tfor {\n\t\tresp, err := stream.Recv()\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tglog.Fatalf(\"Error received from the server: %s\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tvar respTxt string\n\t\tif *jsonOutput {\n\t\t\trespTxt = jsonify(resp)\n\t\t} else {\n\t\t\trespTxt = proto.MarshalTextString(resp)\n\t\t}\n\t\tglog.Info(respTxt)\n\t}\n}\n\nfunc joinPath(path *openconfig.Path) string {\n\treturn strings.Join(path.Element, \"\/\")\n}\n\nfunc jsonify(resp *openconfig.SubscribeResponse) string {\n\tm := make(map[string]interface{}, 1)\n\tswitch resp := resp.Response.(type) {\n\tcase *openconfig.SubscribeResponse_Update:\n\t\tnotif := resp.Update\n\t\tm[\"timestamp\"] = notif.Timestamp\n\t\tm[\"path\"] = \"\/\" + joinPath(notif.Prefix)\n\t\tif len(notif.Update) != 0 {\n\t\t\tupdates := make(map[string]interface{}, len(notif.Update))\n\t\t\tfor _, update := range notif.Update {\n\t\t\t\tvar value interface{}\n\t\t\t\tswitch update.Value.Type {\n\t\t\t\tcase openconfig.Type_JSON:\n\t\t\t\t\terr := json.Unmarshal(update.Value.Value, &value)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tglog.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\tcase openconfig.Type_BYTES:\n\t\t\t\t\tvalue = strconv.Quote(string(update.Value.Value))\n\t\t\t\tdefault:\n\t\t\t\t\tglog.Fatalf(\"Unhandled type of value %v\", update.Value.Type)\n\t\t\t\t}\n\t\t\t\tupdates[joinPath(update.Path)] = value\n\t\t\t}\n\t\t\tm[\"updates\"] = updates\n\t\t}\n\t\tif len(notif.Delete) != 0 {\n\t\t\tdeletes := make([]string, len(notif.Delete))\n\t\t\tfor i, del := range notif.Delete {\n\t\t\t\tdeletes[i] = joinPath(del)\n\t\t\t}\n\t\t\tm[\"deletes\"] = deletes\n\t\t}\n\t\tm = map[string]interface{}{\"notification\": m}\n\tcase *openconfig.SubscribeResponse_Heartbeat:\n\t\tm[\"heartbeat\"] = resp.Heartbeat.Interval\n\tcase *openconfig.SubscribeResponse_SyncResponse:\n\t\tm[\"syncResponse\"] = resp.SyncResponse\n\tdefault:\n\t\tglog.Fatalf(\"Unknown type of response: %T: %s\", resp, resp)\n\t}\n\tjs, err := json.MarshalIndent(m, \"\", \" \")\n\tif err != nil {\n\t\tglog.Fatal(\"json: \", err)\n\t}\n\treturn string(js)\n}\ncmd\/occlient: connect to kafka\/\/ Copyright (C) 2016 Arista Networks, Inc.\n\/\/ Use of this source code is governed by the Apache License 2.0\n\/\/ that can be found in the COPYING file.\n\n\/\/ The occlient tool is a client for the gRPC service for getting and setting the\n\/\/ OpenConfig configuration and state of a network device.\npackage main\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/Shopify\/sarama\"\n\t\"github.com\/aristanetworks\/goarista\/kafka\"\n\t\"github.com\/aristanetworks\/goarista\/kafka\/openconfig\"\n\t\"github.com\/aristanetworks\/goarista\/kafka\/producer\"\n\n\t\"github.com\/aristanetworks\/glog\"\n\tpb \"github.com\/aristanetworks\/goarista\/openconfig\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n\t\"google.golang.org\/grpc\/metadata\"\n)\n\nvar addr = flag.String(\"addr\", \"localhost:6042\",\n\t\"Address of the OpenConfig server\")\n\nvar certFile = flag.String(\"certfile\", \"\",\n\t\"Path to client TLS certificate file\")\n\nvar keyFile = flag.String(\"keyfile\", \"\",\n\t\"Path to client TLS private key file\")\n\nvar caFile = flag.String(\"cafile\", \"\",\n\t\"Path to server TLS certificate file\")\n\nvar jsonOutput = flag.Bool(\"json\", false,\n\t\"Print the output in JSON instead of protobuf\")\n\nvar subscribe = flag.String(\"subscribe\", \"\",\n\t\"Comma-separated list of paths to subscribe to upon connecting to the server\")\n\nvar username = flag.String(\"username\", \"\",\n\t\"Username to authenticate with\")\n\nvar password = flag.String(\"password\", \"\",\n\t\"Password to authenticate with\")\n\nfunc main() {\n\tflag.Parse()\n\tvar opts []grpc.DialOption\n\tif *caFile != \"\" || *certFile != \"\" {\n\t\tconfig := &tls.Config{}\n\t\tif *caFile != \"\" {\n\t\t\tb, err := ioutil.ReadFile(*caFile)\n\t\t\tif err != nil {\n\t\t\t\tglog.Fatal(err)\n\t\t\t}\n\t\t\tcp := x509.NewCertPool()\n\t\t\tif !cp.AppendCertsFromPEM(b) {\n\t\t\t\tglog.Fatalf(\"credentials: failed to append certificates\")\n\t\t\t}\n\t\t\tconfig.RootCAs = cp\n\t\t} else {\n\t\t\tconfig.InsecureSkipVerify = true\n\t\t}\n\t\tif *certFile != \"\" {\n\t\t\tif *keyFile == \"\" {\n\t\t\t\tglog.Fatalf(\"Please provide both -certfile and -keyfile\")\n\t\t\t}\n\t\t\tcert, err := tls.LoadX509KeyPair(*certFile, *keyFile)\n\t\t\tif err != nil {\n\t\t\t\tglog.Fatal(err)\n\t\t\t}\n\t\t\tconfig.Certificates = []tls.Certificate{cert}\n\t\t}\n\t\topts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(config)))\n\t} else {\n\t\topts = append(opts, grpc.WithInsecure())\n\t}\n\tconn, err := grpc.Dial(*addr, opts...)\n\tif err != nil {\n\t\tglog.Fatalf(\"fail to dial: %s\", err)\n\t}\n\tdefer conn.Close()\n\tclient := pb.NewOpenConfigClient(conn)\n\n\tctx := context.Background()\n\tif *username != \"\" {\n\t\tctx = metadata.NewContext(ctx, metadata.Pairs(\n\t\t\t\"username\", *username,\n\t\t\t\"password\", *password))\n\t}\n\n\tstream, err := client.Subscribe(ctx)\n\tif err != nil {\n\t\tglog.Fatalf(\"Subscribe failed: %s\", err)\n\t}\n\tdefer stream.CloseSend()\n\n\tfor _, path := range strings.Split(*subscribe, \",\") {\n\t\tsub := &pb.SubscribeRequest{\n\t\t\tRequest: &pb.SubscribeRequest_Subscribe{\n\t\t\t\tSubscribe: &pb.SubscriptionList{\n\t\t\t\t\tSubscription: []*pb.Subscription{\n\t\t\t\t\t\t&pb.Subscription{\n\t\t\t\t\t\t\tPath: &pb.Path{Element: strings.Split(path, \"\/\")},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\terr = stream.Send(sub)\n\t\tif err != nil {\n\t\t\tglog.Fatalf(\"Failed to subscribe: %s\", err)\n\t\t}\n\t}\n\n\tvar kafkaChan chan proto.Message\n\tif *kafka.Addresses != \"\" {\n\t\tkafkaAddresses := strings.Split(*kafka.Addresses, \",\")\n\t\tkafkaConfig := sarama.NewConfig()\n\t\thostname, err := os.Hostname()\n\t\tif err != nil {\n\t\t\thostname = \"\"\n\t\t}\n\t\tkafkaConfig.ClientID = hostname\n\t\tkafkaConfig.Producer.Compression = sarama.CompressionSnappy\n\t\tkafkaConfig.Producer.Return.Successes = true\n\n\t\tkafkaClient, err := sarama.NewClient(kafkaAddresses, kafkaConfig)\n\t\tif err != nil {\n\t\t\tglog.Fatalf(\"Failed to create Kafka client: %s\", err)\n\t\t}\n\t\tkafkaChan = make(chan proto.Message)\n\t\tkey := sarama.StringEncoder(*addr)\n\t\tp, err := producer.New(os.Args[0], kafkaChan, kafkaClient, key, openconfig.MessageEncoder)\n\t\tif err != nil {\n\t\t\tglog.Fatalf(\"Failed to create Kafka producer: %s\", err)\n\t\t}\n\t\tgo p.Run()\n\t}\n\tfor {\n\t\tresp, err := stream.Recv()\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tglog.Fatalf(\"Error received from the server: %s\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tvar respTxt string\n\t\tif *jsonOutput {\n\t\t\trespTxt = jsonify(resp)\n\t\t} else {\n\t\t\trespTxt = proto.MarshalTextString(resp)\n\t\t}\n\t\tglog.Info(respTxt)\n\t\tif kafkaChan != nil {\n\t\t\tkafkaChan <- resp\n\t\t}\n\t}\n}\n\nfunc joinPath(path *pb.Path) string {\n\treturn strings.Join(path.Element, \"\/\")\n}\n\nfunc jsonify(resp *pb.SubscribeResponse) string {\n\tm := make(map[string]interface{}, 1)\n\tswitch resp := resp.Response.(type) {\n\tcase *pb.SubscribeResponse_Update:\n\t\tnotif := resp.Update\n\t\tm[\"timestamp\"] = notif.Timestamp\n\t\tm[\"path\"] = \"\/\" + joinPath(notif.Prefix)\n\t\tif len(notif.Update) != 0 {\n\t\t\tupdates := make(map[string]interface{}, len(notif.Update))\n\t\t\tfor _, update := range notif.Update {\n\t\t\t\tvar value interface{}\n\t\t\t\tswitch update.Value.Type {\n\t\t\t\tcase pb.Type_JSON:\n\t\t\t\t\terr := json.Unmarshal(update.Value.Value, &value)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tglog.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\tcase pb.Type_BYTES:\n\t\t\t\t\tvalue = strconv.Quote(string(update.Value.Value))\n\t\t\t\tdefault:\n\t\t\t\t\tglog.Fatalf(\"Unhandled type of value %v\", update.Value.Type)\n\t\t\t\t}\n\t\t\t\tupdates[joinPath(update.Path)] = value\n\t\t\t}\n\t\t\tm[\"updates\"] = updates\n\t\t}\n\t\tif len(notif.Delete) != 0 {\n\t\t\tdeletes := make([]string, len(notif.Delete))\n\t\t\tfor i, del := range notif.Delete {\n\t\t\t\tdeletes[i] = joinPath(del)\n\t\t\t}\n\t\t\tm[\"deletes\"] = deletes\n\t\t}\n\t\tm = map[string]interface{}{\"notification\": m}\n\tcase *pb.SubscribeResponse_Heartbeat:\n\t\tm[\"heartbeat\"] = resp.Heartbeat.Interval\n\tcase *pb.SubscribeResponse_SyncResponse:\n\t\tm[\"syncResponse\"] = resp.SyncResponse\n\tdefault:\n\t\tglog.Fatalf(\"Unknown type of response: %T: %s\", resp, resp)\n\t}\n\tjs, err := json.MarshalIndent(m, \"\", \" \")\n\tif err != nil {\n\t\tglog.Fatal(\"json: \", err)\n\t}\n\treturn string(js)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by the Apache 2.0\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n)\n\ntype golangorgBuilder struct{}\n\nfunc prefix8(s string) string {\n\tif len(s) < 8 {\n\t\treturn s\n\t}\n\treturn s[:8]\n}\n\nfunc (b golangorgBuilder) Signature(heads map[string]string) string {\n\treturn fmt.Sprintf(\"go=%v\/website=%v\", prefix8(heads[\"go\"]), prefix8(heads[\"website\"]))\n}\n\nfunc (b golangorgBuilder) Init(logger *log.Logger, dir, hostport string, heads map[string]string) (*exec.Cmd, error) {\n\tif _, ok := heads[\"go\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"missing HEAD revision for 'go' repo\")\n\t}\n\tif _, ok := heads[\"website\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"missing HEAD revision for 'website' repo\")\n\t}\n\tgoDir := filepath.Join(dir, \"go\")\n\twebsiteDir := filepath.Join(dir, \"website\")\n\tlogger.Printf(\"checking out go repo ...\")\n\tif err := checkout(repoURL+\"go\", heads[\"go\"], goDir); err != nil {\n\t\treturn nil, fmt.Errorf(\"checkout of go: %v\", err)\n\t}\n\tlogger.Printf(\"checking out website repo ...\")\n\tif err := checkout(repoURL+\"website\", heads[\"website\"], websiteDir); err != nil {\n\t\treturn nil, fmt.Errorf(\"checkout of website: %v\", err)\n\t}\n\n\tvar logWriter io.Writer = toLoggerWriter{logger}\n\n\tmake := exec.Command(filepath.Join(goDir, \"src\/make.bash\"))\n\tmake.Dir = filepath.Join(goDir, \"src\")\n\tmake.Stdout = logWriter\n\tmake.Stderr = logWriter\n\tlogger.Printf(\"running make.bash in %s ...\", make.Dir)\n\tif err := make.Run(); err != nil {\n\t\treturn nil, fmt.Errorf(\"running make.bash: %v\", err)\n\t}\n\n\tlogger.Printf(\"installing golangorg ...\")\n\tgoBin := filepath.Join(goDir, \"bin\/go\")\n\tbinDir := filepath.Join(dir, \"bin\")\n\tinstall := exec.Command(goBin, \"install\", \"golang.org\/x\/website\/cmd\/golangorg\")\n\tinstall.Stdout = logWriter\n\tinstall.Stderr = logWriter\n\tinstall.Env = append(os.Environ(),\n\t\t\"GOROOT=\"+goDir,\n\t\t\"GO111MODULE=on\",\n\t\t\"GOPROXY=https:\/\/proxy.golang.org\",\n\t\t\"GOBIN=\"+binDir,\n\t)\n\tif err := install.Run(); err != nil {\n\t\treturn nil, fmt.Errorf(\"go install golang.org\/x\/website\/cmd\/golangorg: %v\", err)\n\t}\n\n\tlogger.Printf(\"starting golangorg ...\")\n\tgolangorgBin := filepath.Join(binDir, \"golangorg\")\n\tgolangorg := exec.Command(golangorgBin, \"-http=\"+hostport, \"-index\", \"-index_interval=-1s\", \"-play\")\n\tgolangorg.Env = append(os.Environ(), \"GOROOT=\"+goDir)\n\tgolangorg.Stdout = logWriter\n\tgolangorg.Stderr = logWriter\n\tif err := golangorg.Start(); err != nil {\n\t\treturn nil, fmt.Errorf(\"starting golangorg: %v\", err)\n\t}\n\treturn golangorg, nil\n}\n\nvar indexingMsg = []byte(\"Indexing in progress: result may be inaccurate\")\n\nfunc (b golangorgBuilder) HealthCheck(hostport string) error {\n\tbody, err := getOK(fmt.Sprintf(\"http:\/\/%v\/search?q=FALLTHROUGH\", hostport))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif bytes.Contains(body, indexingMsg) {\n\t\treturn errors.New(\"still indexing\")\n\t}\n\treturn nil\n}\ncmd\/tip: use website repo on disk for installing cmd\/golangorg\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by the Apache 2.0\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n)\n\ntype golangorgBuilder struct{}\n\nfunc prefix8(s string) string {\n\tif len(s) < 8 {\n\t\treturn s\n\t}\n\treturn s[:8]\n}\n\nfunc (b golangorgBuilder) Signature(heads map[string]string) string {\n\treturn fmt.Sprintf(\"go=%v\/website=%v\", prefix8(heads[\"go\"]), prefix8(heads[\"website\"]))\n}\n\nfunc (b golangorgBuilder) Init(logger *log.Logger, dir, hostport string, heads map[string]string) (*exec.Cmd, error) {\n\tif _, ok := heads[\"go\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"missing HEAD revision for 'go' repo\")\n\t}\n\tif _, ok := heads[\"website\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"missing HEAD revision for 'website' repo\")\n\t}\n\tgoDir := filepath.Join(dir, \"go\")\n\twebsiteDir := filepath.Join(dir, \"website\")\n\tlogger.Printf(\"checking out go repo ...\")\n\tif err := checkout(repoURL+\"go\", heads[\"go\"], goDir); err != nil {\n\t\treturn nil, fmt.Errorf(\"checkout of go: %v\", err)\n\t}\n\tlogger.Printf(\"checking out website repo ...\")\n\tif err := checkout(repoURL+\"website\", heads[\"website\"], websiteDir); err != nil {\n\t\treturn nil, fmt.Errorf(\"checkout of website: %v\", err)\n\t}\n\n\tvar logWriter io.Writer = toLoggerWriter{logger}\n\n\tmake := exec.Command(filepath.Join(goDir, \"src\/make.bash\"))\n\tmake.Dir = filepath.Join(goDir, \"src\")\n\tmake.Stdout = logWriter\n\tmake.Stderr = logWriter\n\tlogger.Printf(\"running make.bash in %s ...\", make.Dir)\n\tif err := make.Run(); err != nil {\n\t\treturn nil, fmt.Errorf(\"running make.bash: %v\", err)\n\t}\n\n\tlogger.Printf(\"installing golangorg ...\")\n\tgoBin := filepath.Join(goDir, \"bin\/go\")\n\tbinDir := filepath.Join(dir, \"bin\")\n\tinstall := exec.Command(goBin, \"install\", \"golang.org\/x\/website\/cmd\/golangorg\")\n\tinstall.Stdout = logWriter\n\tinstall.Stderr = logWriter\n\tinstall.Dir = websiteDir\n\tinstall.Env = append(os.Environ(),\n\t\t\"GOROOT=\"+goDir,\n\t\t\"GO111MODULE=on\",\n\t\t\"GOPROXY=https:\/\/proxy.golang.org\",\n\t\t\"GOBIN=\"+binDir,\n\t)\n\tif err := install.Run(); err != nil {\n\t\treturn nil, fmt.Errorf(\"go install golang.org\/x\/website\/cmd\/golangorg: %v\", err)\n\t}\n\n\tlogger.Printf(\"starting golangorg ...\")\n\tgolangorgBin := filepath.Join(binDir, \"golangorg\")\n\tgolangorg := exec.Command(golangorgBin, \"-http=\"+hostport, \"-index\", \"-index_interval=-1s\", \"-play\")\n\tgolangorg.Env = append(os.Environ(), \"GOROOT=\"+goDir)\n\tgolangorg.Stdout = logWriter\n\tgolangorg.Stderr = logWriter\n\tif err := golangorg.Start(); err != nil {\n\t\treturn nil, fmt.Errorf(\"starting golangorg: %v\", err)\n\t}\n\treturn golangorg, nil\n}\n\nvar indexingMsg = []byte(\"Indexing in progress: result may be inaccurate\")\n\nfunc (b golangorgBuilder) HealthCheck(hostport string) error {\n\tbody, err := getOK(fmt.Sprintf(\"http:\/\/%v\/search?q=FALLTHROUGH\", hostport))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif bytes.Contains(body, indexingMsg) {\n\t\treturn errors.New(\"still indexing\")\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"package analytics\n\n\/\/\n\/\/ dependencies\n\/\/\n\nimport \"github.com\/jehiah\/go-strftime\"\nimport \"github.com\/twinj\/uuid\"\nimport . \"encoding\/json\"\nimport \"io\/ioutil\"\nimport \"net\/http\"\nimport \"errors\"\nimport \"bytes\"\nimport \"time\"\nimport \"log\"\n\n\/\/\n\/\/ Library version\n\/\/\n\nconst Version = \"0.0.1\"\n\n\/\/\n\/\/ Default API end-point\n\/\/\n\nconst api = \"https:\/\/api.segment.io\"\n\n\/\/\n\/\/ Message type.\n\/\/\n\ntype Message map[string]interface{}\n\n\/\/\n\/\/ Segment.io client\n\/\/\n\ntype Client struct {\n\tDebug bool\n\tBufferSize int\n\tFlushInterval time.Duration\n\tEndpoint string\n\tKey string\n\tbuffer []Message\n}\n\n\/\/\n\/\/ Batch message\n\/\/\n\ntype batch struct {\n\tMessages []Message `json:\"batch\"`\n\tMessageId string `json:\"messageId\"`\n}\n\n\/\/\n\/\/ UUID formatting.\n\/\/\n\nfunc init() {\n\t\/\/ TODO: wtf, this is lame\n\tuuid.SwitchFormat(uuid.CleanHyphen, false)\n}\n\n\/\/\n\/\/ Return a new Segment.io client\n\/\/ with the given write key.\n\/\/\n\nfunc New(key string) (c *Client) {\n\tdefer func() {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\ttime.Sleep(c.FlushInterval)\n\t\t\t\tc.log(\"interval %v reached\", c.FlushInterval)\n\t\t\t\tgo c.flush()\n\t\t\t}\n\t\t}()\n\t}()\n\n\treturn &Client{\n\t\tDebug: false,\n\t\tBufferSize: 500,\n\t\tFlushInterval: 30 * time.Second,\n\t\tKey: key,\n\t\tEndpoint: api,\n\t\tbuffer: make([]Message, 0),\n\t}\n}\n\n\/\/\n\/\/ Buffer an alias message.\n\/\/\n\nfunc (c *Client) Alias(msg Message) error {\n\tif msg[\"userId\"] == nil {\n\t\treturn errors.New(\"You must pass a 'userId'.\")\n\t}\n\n\tif msg[\"previousId\"] == nil {\n\t\treturn errors.New(\"You must pass a 'previousId'.\")\n\t}\n\n\tc.queue(message(msg))\n\n\treturn nil\n}\n\n\/\/\n\/\/ Buffer a page message.\n\/\/\n\nfunc (c *Client) Page(msg Message) error {\n\tif msg[\"userId\"] == nil && msg[\"anonymousId\"] == nil {\n\t\treturn errors.New(\"You must pass either an 'anonymousId' or 'userId'.\")\n\t}\n\n\tc.queue(message(msg))\n\n\treturn nil\n}\n\n\/\/\n\/\/ Buffer a screen message.\n\/\/\n\nfunc (c *Client) Screen(msg Message) error {\n\tif msg[\"userId\"] == nil && msg[\"anonymousId\"] == nil {\n\t\treturn errors.New(\"You must pass either an 'anonymousId' or 'userId'.\")\n\t}\n\n\tc.queue(message(msg))\n\n\treturn nil\n}\n\n\/\/\n\/\/ Buffer a group message.\n\/\/\n\nfunc (c *Client) Group(msg Message) error {\n\tif msg[\"groupId\"] == nil {\n\t\treturn errors.New(\"You must pass a 'groupId'.\")\n\t}\n\n\tif msg[\"userId\"] == nil && msg[\"anonymousId\"] == nil {\n\t\treturn errors.New(\"You must pass either an 'anonymousId' or 'userId'.\")\n\t}\n\n\tc.queue(message(msg))\n\n\treturn nil\n}\n\n\/\/\n\/\/ Buffer an identify message.\n\/\/\n\nfunc (c *Client) Identify(msg Message) error {\n\tif msg[\"userId\"] == nil && msg[\"anonymousId\"] == nil {\n\t\treturn errors.New(\"You must pass either an 'anonymousId' or 'userId'.\")\n\t}\n\n\tc.queue(message(msg))\n\n\treturn nil\n}\n\n\/\/\n\/\/ Buffer a track message.\n\/\/\n\nfunc (c *Client) Track(msg Message) error {\n\tif msg[\"event\"] == nil {\n\t\treturn errors.New(\"You must pass 'event'.\")\n\t}\n\n\tif msg[\"userId\"] == nil && msg[\"anonymousId\"] == nil {\n\t\treturn errors.New(\"You must pass either an 'anonymousId' or 'userId'.\")\n\t}\n\n\tc.queue(message(msg))\n\n\treturn nil\n}\n\n\/\/\n\/\/ Return a new initialized message map\n\/\/ with `msg` values and context merged.\n\/\/\n\nfunc message(msg Message) Message {\n\tm := newMessage()\n\n\tif msg[\"context\"] != nil {\n\t\tmerge(m[\"context\"].(map[string]interface{}), msg[\"context\"].(map[string]interface{}))\n\t\tdelete(msg, \"context\")\n\t}\n\n\tmerge(m, msg)\n\n\treturn m\n}\n\n\/\/\n\/\/ Return new initialzed message map.\n\/\/\n\nfunc newMessage() Message {\n\treturn Message{\n\t\t\"timestamp\": timestamp(),\n\t\t\"messageId\": uid(),\n\t\t\"context\": map[string]interface{}{\n\t\t\t\"version\": Version,\n\t\t\t\"library\": \"analytics-go\",\n\t\t},\n\t}\n}\n\n\/\/\n\/\/ Merge two maps.\n\/\/\n\nfunc merge(dst Message, src Message) {\n\tfor k, v := range src {\n\t\tdst[k] = v\n\t}\n}\n\n\/\/\n\/\/ Return uuid.\n\/\/\n\nfunc uid() string {\n\treturn uuid.NewV4().String()\n}\n\n\/\/\n\/\/ Return formatted timestamp.\n\/\/\n\nfunc timestamp() string {\n\treturn strftime.Format(\"%Y-%m-%dT%H:%M:%S%z\", time.Now())\n}\n\n\/\/\n\/\/ Log in debug mode.\n\/\/\n\nfunc (c *Client) log(format string, v ...interface{}) {\n\tif c.Debug {\n\t\tlog.Printf(format, v...)\n\t}\n}\n\n\/\/\n\/\/ Buffer the given message and flush\n\/\/ when the buffer exceeds .BufferSize.\n\/\/\n\nfunc (c *Client) queue(msg Message) {\n\tc.buffer = append(c.buffer, msg)\n\n\tc.log(\"buffer (%d\/%d) %v\", len(c.buffer), c.BufferSize, msg)\n\n\tif len(c.buffer) >= c.BufferSize {\n\t\tgo c.flush()\n\t}\n}\n\n\/\/\n\/\/ Return a batch message primed\n\/\/ with context properties\n\/\/\n\nfunc batchMessage(msgs []Message) *batch {\n\treturn &batch{\n\t\tMessageId: uid(),\n\t\tMessages: msgs,\n\t}\n}\n\n\/\/\n\/\/ Flush the buffered messages.\n\/\/\n\/\/ TODO: better error-handling,\n\/\/ this is really meh, it would\n\/\/ be better if we used a chan\n\/\/ to deliver them.\n\/\/\n\nfunc (c *Client) flush() error {\n\tif len(c.buffer) == 0 {\n\t\tc.log(\"no messages to flush\")\n\t\treturn nil\n\t}\n\n\tc.log(\"flushing %d messages\", len(c.buffer))\n\tjson, err := Marshal(batchMessage(c.buffer))\n\n\tif err != nil {\n\t\tc.log(\"error: %v\", err)\n\t\treturn err\n\t}\n\n\tc.buffer = nil\n\n\tclient := &http.Client{}\n\turl := c.Endpoint + \"\/v1\/import\"\n\tc.log(\"POST %s with %d bytes\", url, len(json))\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewBuffer(json))\n\n\tif err != nil {\n\t\tc.log(\"error: %v\", err)\n\t\treturn err\n\t}\n\n\treq.Header.Add(\"User-Agent\", \"analytics-go (version: \"+Version+\")\")\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\treq.Header.Add(\"Content-Length\", string(len(json)))\n\treq.SetBasicAuth(c.Key, \"\")\n\n\tres, err := client.Do(req)\n\n\tif err != nil {\n\t\tc.log(\"error: %v\", err)\n\t\treturn err\n\t}\n\n\tc.log(\"%d response\", res.StatusCode)\n\n\tif res.StatusCode >= 400 {\n\t\tbody, _ := ioutil.ReadAll(res.Body)\n\t\tc.log(\"error: %s\", string(body))\n\t\tc.log(\"error: %s\", string(json))\n\t}\n\n\treturn err\n}\nadd msg.type backpackage analytics\n\n\/\/\n\/\/ dependencies\n\/\/\n\nimport \"github.com\/jehiah\/go-strftime\"\nimport \"github.com\/twinj\/uuid\"\nimport . \"encoding\/json\"\nimport \"io\/ioutil\"\nimport \"net\/http\"\nimport \"errors\"\nimport \"bytes\"\nimport \"time\"\nimport \"log\"\n\n\/\/\n\/\/ Library version\n\/\/\n\nconst Version = \"0.0.1\"\n\n\/\/\n\/\/ Default API end-point\n\/\/\n\nconst api = \"https:\/\/api.segment.io\"\n\n\/\/\n\/\/ Message type.\n\/\/\n\ntype Message map[string]interface{}\n\n\/\/\n\/\/ Segment.io client\n\/\/\n\ntype Client struct {\n\tDebug bool\n\tBufferSize int\n\tFlushInterval time.Duration\n\tEndpoint string\n\tKey string\n\tbuffer []Message\n}\n\n\/\/\n\/\/ Batch message\n\/\/\n\ntype batch struct {\n\tMessages []Message `json:\"batch\"`\n\tMessageId string `json:\"messageId\"`\n}\n\n\/\/\n\/\/ UUID formatting.\n\/\/\n\nfunc init() {\n\t\/\/ TODO: wtf, this is lame\n\tuuid.SwitchFormat(uuid.CleanHyphen, false)\n}\n\n\/\/\n\/\/ Return a new Segment.io client\n\/\/ with the given write key.\n\/\/\n\nfunc New(key string) (c *Client) {\n\tdefer func() {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\ttime.Sleep(c.FlushInterval)\n\t\t\t\tc.log(\"interval %v reached\", c.FlushInterval)\n\t\t\t\tgo c.flush()\n\t\t\t}\n\t\t}()\n\t}()\n\n\treturn &Client{\n\t\tDebug: false,\n\t\tBufferSize: 500,\n\t\tFlushInterval: 30 * time.Second,\n\t\tKey: key,\n\t\tEndpoint: api,\n\t\tbuffer: make([]Message, 0),\n\t}\n}\n\n\/\/\n\/\/ Buffer an alias message.\n\/\/\n\nfunc (c *Client) Alias(msg Message) error {\n\tif msg[\"userId\"] == nil {\n\t\treturn errors.New(\"You must pass a 'userId'.\")\n\t}\n\n\tif msg[\"previousId\"] == nil {\n\t\treturn errors.New(\"You must pass a 'previousId'.\")\n\t}\n\n\tc.queue(message(msg, \"alias\"))\n\n\treturn nil\n}\n\n\/\/\n\/\/ Buffer a page message.\n\/\/\n\nfunc (c *Client) Page(msg Message) error {\n\tif msg[\"userId\"] == nil && msg[\"anonymousId\"] == nil {\n\t\treturn errors.New(\"You must pass either an 'anonymousId' or 'userId'.\")\n\t}\n\n\tc.queue(message(msg, \"page\"))\n\n\treturn nil\n}\n\n\/\/\n\/\/ Buffer a screen message.\n\/\/\n\nfunc (c *Client) Screen(msg Message) error {\n\tif msg[\"userId\"] == nil && msg[\"anonymousId\"] == nil {\n\t\treturn errors.New(\"You must pass either an 'anonymousId' or 'userId'.\")\n\t}\n\n\tc.queue(message(msg, \"screen\"))\n\n\treturn nil\n}\n\n\/\/\n\/\/ Buffer a group message.\n\/\/\n\nfunc (c *Client) Group(msg Message) error {\n\tif msg[\"groupId\"] == nil {\n\t\treturn errors.New(\"You must pass a 'groupId'.\")\n\t}\n\n\tif msg[\"userId\"] == nil && msg[\"anonymousId\"] == nil {\n\t\treturn errors.New(\"You must pass either an 'anonymousId' or 'userId'.\")\n\t}\n\n\tc.queue(message(msg, \"group\"))\n\n\treturn nil\n}\n\n\/\/\n\/\/ Buffer an identify message.\n\/\/\n\nfunc (c *Client) Identify(msg Message) error {\n\tif msg[\"userId\"] == nil && msg[\"anonymousId\"] == nil {\n\t\treturn errors.New(\"You must pass either an 'anonymousId' or 'userId'.\")\n\t}\n\n\tc.queue(message(msg, \"identify\"))\n\n\treturn nil\n}\n\n\/\/\n\/\/ Buffer a track message.\n\/\/\n\nfunc (c *Client) Track(msg Message) error {\n\tif msg[\"event\"] == nil {\n\t\treturn errors.New(\"You must pass 'event'.\")\n\t}\n\n\tif msg[\"userId\"] == nil && msg[\"anonymousId\"] == nil {\n\t\treturn errors.New(\"You must pass either an 'anonymousId' or 'userId'.\")\n\t}\n\n\tc.queue(message(msg, \"track\"))\n\n\treturn nil\n}\n\n\/\/\n\/\/ Return a new initialized message map\n\/\/ with `msg` values and context merged.\n\/\/\n\nfunc message(msg Message, call string) Message {\n\tm := newMessage(call)\n\n\tif msg[\"context\"] != nil {\n\t\tmerge(m[\"context\"].(map[string]interface{}), msg[\"context\"].(map[string]interface{}))\n\t\tdelete(msg, \"context\")\n\t}\n\n\tmerge(m, msg)\n\n\treturn m\n}\n\n\/\/\n\/\/ Return new initialzed message map.\n\/\/\n\nfunc newMessage(call string) Message {\n\treturn Message{\n\t\t\"type\": call,\n\t\t\"timestamp\": timestamp(),\n\t\t\"messageId\": uid(),\n\t\t\"context\": map[string]interface{}{\n\t\t\t\"version\": Version,\n\t\t\t\"library\": \"analytics-go\",\n\t\t},\n\t}\n}\n\n\/\/\n\/\/ Merge two maps.\n\/\/\n\nfunc merge(dst Message, src Message) {\n\tfor k, v := range src {\n\t\tdst[k] = v\n\t}\n}\n\n\/\/\n\/\/ Return uuid.\n\/\/\n\nfunc uid() string {\n\treturn uuid.NewV4().String()\n}\n\n\/\/\n\/\/ Return formatted timestamp.\n\/\/\n\nfunc timestamp() string {\n\treturn strftime.Format(\"%Y-%m-%dT%H:%M:%S%z\", time.Now())\n}\n\n\/\/\n\/\/ Log in debug mode.\n\/\/\n\nfunc (c *Client) log(format string, v ...interface{}) {\n\tif c.Debug {\n\t\tlog.Printf(format, v...)\n\t}\n}\n\n\/\/\n\/\/ Buffer the given message and flush\n\/\/ when the buffer exceeds .BufferSize.\n\/\/\n\nfunc (c *Client) queue(msg Message) {\n\tc.buffer = append(c.buffer, msg)\n\n\tc.log(\"buffer (%d\/%d) %v\", len(c.buffer), c.BufferSize, msg)\n\n\tif len(c.buffer) >= c.BufferSize {\n\t\tgo c.flush()\n\t}\n}\n\n\/\/\n\/\/ Return a batch message primed\n\/\/ with context properties\n\/\/\n\nfunc batchMessage(msgs []Message) *batch {\n\treturn &batch{\n\t\tMessageId: uid(),\n\t\tMessages: msgs,\n\t}\n}\n\n\/\/\n\/\/ Flush the buffered messages.\n\/\/\n\/\/ TODO: better error-handling,\n\/\/ this is really meh, it would\n\/\/ be better if we used a chan\n\/\/ to deliver them.\n\/\/\n\nfunc (c *Client) flush() error {\n\tif len(c.buffer) == 0 {\n\t\tc.log(\"no messages to flush\")\n\t\treturn nil\n\t}\n\n\tc.log(\"flushing %d messages\", len(c.buffer))\n\tjson, err := Marshal(batchMessage(c.buffer))\n\n\tif err != nil {\n\t\tc.log(\"error: %v\", err)\n\t\treturn err\n\t}\n\n\tc.buffer = nil\n\n\tclient := &http.Client{}\n\turl := c.Endpoint + \"\/v1\/import\"\n\tc.log(\"POST %s with %d bytes\", url, len(json))\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewBuffer(json))\n\n\tif err != nil {\n\t\tc.log(\"error: %v\", err)\n\t\treturn err\n\t}\n\n\treq.Header.Add(\"User-Agent\", \"analytics-go (version: \"+Version+\")\")\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\treq.Header.Add(\"Content-Length\", string(len(json)))\n\treq.SetBasicAuth(c.Key, \"\")\n\n\tres, err := client.Do(req)\n\n\tif err != nil {\n\t\tc.log(\"error: %v\", err)\n\t\treturn err\n\t}\n\n\tc.log(\"%d response\", res.StatusCode)\n\n\tif res.StatusCode >= 400 {\n\t\tbody, _ := ioutil.ReadAll(res.Body)\n\t\tc.log(\"error: %s\", string(body))\n\t\tc.log(\"error: %s\", string(json))\n\t}\n\n\treturn err\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"context\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/go-kit\/kit\/metrics\"\n\t\"github.com\/relab\/rkv\/rkvpb\"\n\t\"google.golang.org\/grpc\"\n)\n\ntype client struct {\n\tl *uint64\n\tcurrentLeader uint64\n\tservers []rkvpb.RKVClient\n\n\tid uint64\n\tseq uint64\n\n\tzipf *rand.Zipf\n\n\ts *stats\n}\n\nfunc newClient(leader *uint64, servers []string, zipf *rand.Zipf, s *stats) (*client, error) {\n\tconns := make([]rkvpb.RKVClient, len(servers))\n\n\tfor i, server := range servers {\n\t\tcc, err := grpc.Dial(server, grpc.WithBlock(), grpc.WithInsecure())\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tconns[i] = rkvpb.NewRKVClient(cc)\n\t}\n\n\tc := &client{\n\t\tl: leader,\n\t\tservers: conns,\n\t\tzipf: zipf,\n\t\ts: s,\n\t}\n\n\tres, err := c.register()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.id = res.ClientID\n\tc.seq = 1\n\n\treturn c, nil\n}\n\nconst (\n\tretryPerServer = 10\n\tsleepPerRound = 250 * time.Millisecond\n)\n\nfunc sleep(round int) {\n\tdur := sleepPerRound * time.Duration(round)\n\ttime.Sleep(dur)\n}\n\nfunc (c *client) register() (*rkvpb.RegisterResponse, error) {\n\tc.s.writeReqs.Add(1)\n\ttimer := metrics.NewTimer(c.s.writeLatency)\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tvar res *rkvpb.RegisterResponse\n\tvar err error\n\n\tfor i := 0; i < retryPerServer*len(c.servers); i++ {\n\t\tif i%len(c.servers) == 0 {\n\t\t\tsleep(i \/ len(c.servers))\n\t\t}\n\n\t\tres, err = c.leader().Register(ctx, &rkvpb.RegisterRequest{})\n\n\t\tif err != nil {\n\t\t\tc.nextLeader()\n\t\t\tcontinue\n\t\t}\n\n\t\tc.s.writes.Add(1)\n\t\ttimer.ObserveDuration()\n\t\tbreak\n\t}\n\n\treturn res, err\n}\n\nfunc (c *client) lookup() (*rkvpb.LookupResponse, error) {\n\tc.s.readReqs.Add(1)\n\ttimer := metrics.NewTimer(c.s.readLatency)\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tvar res *rkvpb.LookupResponse\n\tvar err error\n\n\tfor i := 0; i < retryPerServer*len(c.servers); i++ {\n\t\tif i%len(c.servers) == 0 {\n\t\t\tsleep(i \/ len(c.servers))\n\t\t}\n\n\t\tres, err = c.leader().Lookup(ctx, &rkvpb.LookupRequest{\n\t\t\tKey: strconv.FormatUint(c.zipf.Uint64(), 10),\n\t\t})\n\n\t\tif err != nil {\n\t\t\tc.nextLeader()\n\t\t\tcontinue\n\t\t}\n\n\t\tc.s.reads.Add(1)\n\t\ttimer.ObserveDuration()\n\t\tbreak\n\n\t}\n\n\treturn res, err\n}\n\nfunc (c *client) insert() (*rkvpb.InsertResponse, error) {\n\tc.s.writeReqs.Add(1)\n\ttimer := metrics.NewTimer(c.s.writeLatency)\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tvar res *rkvpb.InsertResponse\n\tvar err error\n\n\tfor i := 0; i < retryPerServer*len(c.servers); i++ {\n\t\tif i%len(c.servers) == 0 {\n\t\t\tsleep(i \/ len(c.servers))\n\t\t}\n\n\t\tres, err = c.leader().Insert(ctx, &rkvpb.InsertRequest{\n\t\t\tClientID: c.id,\n\t\t\tClientSeq: atomic.AddUint64(&c.seq, 1),\n\t\t\tKey: strconv.FormatUint(c.zipf.Uint64(), 10),\n\t\t\tValue: strconv.FormatUint(c.zipf.Uint64(), 10),\n\t\t})\n\n\t\tif err != nil {\n\t\t\tc.nextLeader()\n\t\t\tcontinue\n\t\t}\n\n\t\tc.s.writes.Add(1)\n\t\ttimer.ObserveDuration()\n\t\tbreak\n\t}\n\n\treturn res, err\n}\n\nfunc (c *client) leader() rkvpb.RKVClient {\n\treturn c.servers[c.getLeader()]\n}\n\nfunc (c *client) nextLeader() {\n\tnewLeader := (c.currentLeader + 1) % uint64(len(c.servers))\n\tatomic.CompareAndSwapUint64(c.l, c.currentLeader, newLeader)\n}\n\nfunc (c *client) getLeader() uint64 {\n\tc.currentLeader = atomic.LoadUint64(c.l)\n\treturn c.currentLeader\n}\nrkvctl: Set client request timeout to 10minpackage main\n\nimport (\n\t\"context\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/go-kit\/kit\/metrics\"\n\t\"github.com\/relab\/rkv\/rkvpb\"\n\t\"google.golang.org\/grpc\"\n)\n\ntype client struct {\n\tl *uint64\n\tcurrentLeader uint64\n\tservers []rkvpb.RKVClient\n\n\tid uint64\n\tseq uint64\n\n\tzipf *rand.Zipf\n\n\ts *stats\n}\n\nfunc newClient(leader *uint64, servers []string, zipf *rand.Zipf, s *stats) (*client, error) {\n\tconns := make([]rkvpb.RKVClient, len(servers))\n\n\tfor i, server := range servers {\n\t\tcc, err := grpc.Dial(server, grpc.WithBlock(), grpc.WithInsecure())\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tconns[i] = rkvpb.NewRKVClient(cc)\n\t}\n\n\tc := &client{\n\t\tl: leader,\n\t\tservers: conns,\n\t\tzipf: zipf,\n\t\ts: s,\n\t}\n\n\tres, err := c.register()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.id = res.ClientID\n\tc.seq = 1\n\n\treturn c, nil\n}\n\nconst (\n\tretryPerServer = 10\n\tsleepPerRound = 250 * time.Millisecond\n\trequestTimeout = 10 * time.Minute\n)\n\nfunc sleep(round int) {\n\tdur := sleepPerRound * time.Duration(round)\n\ttime.Sleep(dur)\n}\n\nfunc (c *client) register() (*rkvpb.RegisterResponse, error) {\n\tc.s.writeReqs.Add(1)\n\ttimer := metrics.NewTimer(c.s.writeLatency)\n\n\tctx, cancel := context.WithTimeout(context.Background(), requestTimeout)\n\tdefer cancel()\n\n\tvar res *rkvpb.RegisterResponse\n\tvar err error\n\n\tfor i := 0; i < retryPerServer*len(c.servers); i++ {\n\t\tif i%len(c.servers) == 0 {\n\t\t\tsleep(i \/ len(c.servers))\n\t\t}\n\n\t\tres, err = c.leader().Register(ctx, &rkvpb.RegisterRequest{})\n\n\t\tif err != nil {\n\t\t\tc.nextLeader()\n\t\t\tcontinue\n\t\t}\n\n\t\tc.s.writes.Add(1)\n\t\ttimer.ObserveDuration()\n\t\tbreak\n\t}\n\n\treturn res, err\n}\n\nfunc (c *client) lookup() (*rkvpb.LookupResponse, error) {\n\tc.s.readReqs.Add(1)\n\ttimer := metrics.NewTimer(c.s.readLatency)\n\n\tctx, cancel := context.WithTimeout(context.Background(), requestTimeout)\n\tdefer cancel()\n\n\tvar res *rkvpb.LookupResponse\n\tvar err error\n\n\tfor i := 0; i < retryPerServer*len(c.servers); i++ {\n\t\tif i%len(c.servers) == 0 {\n\t\t\tsleep(i \/ len(c.servers))\n\t\t}\n\n\t\tres, err = c.leader().Lookup(ctx, &rkvpb.LookupRequest{\n\t\t\tKey: strconv.FormatUint(c.zipf.Uint64(), 10),\n\t\t})\n\n\t\tif err != nil {\n\t\t\tc.nextLeader()\n\t\t\tcontinue\n\t\t}\n\n\t\tc.s.reads.Add(1)\n\t\ttimer.ObserveDuration()\n\t\tbreak\n\n\t}\n\n\treturn res, err\n}\n\nfunc (c *client) insert() (*rkvpb.InsertResponse, error) {\n\tc.s.writeReqs.Add(1)\n\ttimer := metrics.NewTimer(c.s.writeLatency)\n\n\tctx, cancel := context.WithTimeout(context.Background(), requestTimeout)\n\tdefer cancel()\n\n\tvar res *rkvpb.InsertResponse\n\tvar err error\n\n\tfor i := 0; i < retryPerServer*len(c.servers); i++ {\n\t\tif i%len(c.servers) == 0 {\n\t\t\tsleep(i \/ len(c.servers))\n\t\t}\n\n\t\tres, err = c.leader().Insert(ctx, &rkvpb.InsertRequest{\n\t\t\tClientID: c.id,\n\t\t\tClientSeq: atomic.AddUint64(&c.seq, 1),\n\t\t\tKey: strconv.FormatUint(c.zipf.Uint64(), 10),\n\t\t\tValue: strconv.FormatUint(c.zipf.Uint64(), 10),\n\t\t})\n\n\t\tif err != nil {\n\t\t\tc.nextLeader()\n\t\t\tcontinue\n\t\t}\n\n\t\tc.s.writes.Add(1)\n\t\ttimer.ObserveDuration()\n\t\tbreak\n\t}\n\n\treturn res, err\n}\n\nfunc (c *client) leader() rkvpb.RKVClient {\n\treturn c.servers[c.getLeader()]\n}\n\nfunc (c *client) nextLeader() {\n\tnewLeader := (c.currentLeader + 1) % uint64(len(c.servers))\n\tatomic.CompareAndSwapUint64(c.l, c.currentLeader, newLeader)\n}\n\nfunc (c *client) getLeader() uint64 {\n\tc.currentLeader = atomic.LoadUint64(c.l)\n\treturn c.currentLeader\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/chlunde\/syncr\"\n)\n\nconst (\n\tClearAndMoveHome = \"\\033[2J\\033[2H\"\n\tCyanBold = \"\\033[36;1m\"\n\tRedBold = \"\\033[31;1m\"\n\tGreenBold = \"\\033[32;1m\"\n\tResetColor = \"\\033[0m\"\n)\n\nfunc Display(buf io.Writer, syncrs []*syncr.Syncr) bool {\n\tfmt.Fprint(buf, ClearAndMoveHome)\n\n\tvar anyAlive bool\n\tfor _, s := range syncrs {\n\t\ts.Lock.Lock()\n\t\tfmt.Fprintln(buf, CyanBold, s.Description)\n\n\t\tvar statusColor string\n\t\tswitch {\n\t\tcase s.Error != nil:\n\t\t\tstatusColor = RedBold\n\t\tcase s.Dead:\n\t\t\tstatusColor = GreenBold\n\t\tdefault:\n\t\t\tstatusColor = ResetColor\n\t\t}\n\n\t\tfmt.Fprintf(buf, statusColor)\n\n\t\tif s.Error != nil {\n\t\t\tfmt.Fprintf(buf, \"Failed: %v\\n\", s.Error)\n\t\t}\n\n\t\tfmt.Fprintln(buf, s.Status[0])\n\n\t\t\/\/ Skip last line if it's blank we're not getting any more lines\n\t\t\/\/ prevents the status from jumping up and down\n\t\tif !s.Dead || len(s.Status[1]) > 0 {\n\t\t\tfmt.Fprintln(buf, s.Status[1])\n\t\t}\n\n\t\tfmt.Fprint(buf, ResetColor)\n\n\t\tif !s.Dead {\n\t\t\tanyAlive = true\n\t\t}\n\t\ts.Lock.Unlock()\n\t}\n\n\treturn anyAlive\n}\nCompact outputpackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/chlunde\/syncr\"\n)\n\nconst (\n\tClearAndMoveHome = \"\\033[2J\\033[2H\"\n\tCyanBold = \"\\033[36;1m\"\n\tRedBold = \"\\033[31;1m\"\n\tGreenBold = \"\\033[32;1m\"\n\tResetColor = \"\\033[0m\"\n)\n\nfunc Display(buf io.Writer, syncrs []*syncr.Syncr) bool {\n\tfmt.Fprint(buf, ClearAndMoveHome)\n\n\tvar anyAlive bool\n\tfor _, s := range syncrs {\n\t\ts.Lock.Lock()\n\t\tfmt.Fprint(buf, CyanBold, s.Description)\n\n\t\tvar statusColor string\n\t\tvar sep string\n\t\tswitch {\n\t\tcase s.Error != nil:\n\t\t\tstatusColor = RedBold\n\t\t\tsep = \"\\n\"\n\t\tcase s.Dead:\n\t\t\tstatusColor = GreenBold\n\t\t\tsep = \" \"\n\t\tdefault:\n\t\t\tstatusColor = ResetColor\n\t\t\tsep = \"\\n\"\n\t\t}\n\n\t\tfmt.Fprint(buf, statusColor, sep)\n\n\t\tif s.Error != nil {\n\t\t\tfmt.Fprintf(buf, \"Failed: %v\\n\", s.Error)\n\t\t}\n\n\t\tfmt.Fprintln(buf, s.Status[0])\n\n\t\t\/\/ Skip last line if it's blank we're not getting any more lines\n\t\t\/\/ prevents the status from jumping up and down\n\t\tif !s.Dead || len(s.Status[1]) > 0 {\n\t\t\tfmt.Fprintln(buf, s.Status[1])\n\t\t}\n\n\t\tfmt.Fprint(buf, ResetColor)\n\n\t\tif !s.Dead {\n\t\t\tanyAlive = true\n\t\t}\n\t\ts.Lock.Unlock()\n\t}\n\n\treturn anyAlive\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\n\t\"github.com\/antihax\/evedata\/internal\/redigohelper\"\n\t\"github.com\/antihax\/evedata\/internal\/sqlhelper\"\n\t\"github.com\/antihax\/evedata\/services\/vanguard\"\n\t\"github.com\/antihax\/evedata\/services\/vanguard\/models\"\n\t_ \"github.com\/antihax\/evedata\/services\/vanguard\/views\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"github.com\/gorilla\/context\"\n)\n\nfunc main() {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\tlog.SetPrefix(\"evedata vanguard: \")\n\n\tr := redigohelper.ConnectRedisProdPool()\n\tdb := sqlhelper.NewDatabase()\n\n\t\/\/ Make a new service and send it into the background.\n\tvanguard := vanguard.NewVanguard(r, db,\n\t\tos.Getenv(\"ESI_REFRESHKEY\"),\n\t\tos.Getenv(\"ESI_CLIENTID_TOKENSTORE\"),\n\t\tos.Getenv(\"ESI_SECRET_TOKENSTORE\"),\n\t\tos.Getenv(\"ESI_CLIENTID_SSO\"),\n\t\tos.Getenv(\"ESI_SECRET_SSO\"),\n\t\tos.Getenv(\"COOKIE_SECRET\"),\n\t\tos.Getenv(\"DOMAIN\"),\n\t)\n\tlog.Printf(\"Setup Router\\n\")\n\trtr := vanguard.NewRouter()\n\tdefer vanguard.Close()\n\n\t\/\/ Handle command line arguments\n\tif len(os.Args) > 1 {\n\t\tif os.Args[1] == \"dumpdb\" {\n\t\t\t\/\/ Dump the database to sql file.\n\t\t\tlog.Printf(\"Dumping Database to evedata.sql\\n\")\n\t\t\terr := models.DumpDatabase(\".\/sql\/evedata.sql\", \"evedata\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(err)\n\t\t\t}\n\t\t} else if os.Args[1] == \"flushcache\" {\n\t\t\t\/\/ Erase http cache in redis\n\t\t\tlog.Printf(\"Flushing Redis\\n\")\n\t\t\tconn := r.Get()\n\t\t\tdefer conn.Close()\n\t\t\tkeys, err := redis.Strings(conn.Do(\"KEYS\", \"*rediscache*\"))\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\tfor _, key := range keys {\n\t\t\t\t\tconn.Do(\"DEL\", key)\n\t\t\t\t\tlog.Printf(\"Deleting %s\\n\", key)\n\t\t\t\t}\n\t\t\t}\n\n\t\t} else if os.Args[1] == \"flushredis\" {\n\t\t\t\/\/ Erase everything in redis for modified deployments\n\t\t\tlog.Printf(\"Flushing Redis\\n\")\n\t\t\tconn := r.Get()\n\t\t\tdefer conn.Close()\n\t\t\tconn.Do(\"FLUSHALL\")\n\t\t}\n\t}\n\n\tlog.Printf(\"Start Listening\\n\")\n\tgo log.Fatalln(http.ListenAndServe(\":3000\", context.ClearHandler(rtr)))\n\n\tlog.Printf(\"In production\\n\")\n\t\/\/ Handle SIGINT and SIGTERM.\n\tch := make(chan os.Signal)\n\tsignal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)\n\tlog.Println(<-ch)\n}\nAdd ability to clear structure failurespackage main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\n\t\"github.com\/antihax\/evedata\/internal\/redigohelper\"\n\t\"github.com\/antihax\/evedata\/internal\/sqlhelper\"\n\t\"github.com\/antihax\/evedata\/services\/vanguard\"\n\t\"github.com\/antihax\/evedata\/services\/vanguard\/models\"\n\t_ \"github.com\/antihax\/evedata\/services\/vanguard\/views\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"github.com\/gorilla\/context\"\n)\n\nfunc main() {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\tlog.SetPrefix(\"evedata vanguard: \")\n\n\tr := redigohelper.ConnectRedisProdPool()\n\tdb := sqlhelper.NewDatabase()\n\n\t\/\/ Make a new service and send it into the background.\n\tvanguard := vanguard.NewVanguard(r, db,\n\t\tos.Getenv(\"ESI_REFRESHKEY\"),\n\t\tos.Getenv(\"ESI_CLIENTID_TOKENSTORE\"),\n\t\tos.Getenv(\"ESI_SECRET_TOKENSTORE\"),\n\t\tos.Getenv(\"ESI_CLIENTID_SSO\"),\n\t\tos.Getenv(\"ESI_SECRET_SSO\"),\n\t\tos.Getenv(\"COOKIE_SECRET\"),\n\t\tos.Getenv(\"DOMAIN\"),\n\t)\n\tlog.Printf(\"Setup Router\\n\")\n\trtr := vanguard.NewRouter()\n\tdefer vanguard.Close()\n\n\t\/\/ Handle command line arguments\n\tif len(os.Args) > 1 {\n\t\tif os.Args[1] == \"dumpdb\" {\n\t\t\t\/\/ Dump the database to sql file.\n\t\t\tlog.Printf(\"Dumping Database to evedata.sql\\n\")\n\t\t\terr := models.DumpDatabase(\".\/sql\/evedata.sql\", \"evedata\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(err)\n\t\t\t}\n\t\t} else if os.Args[1] == \"flushcache\" {\n\t\t\t\/\/ Erase http cache in redis\n\t\t\tlog.Printf(\"Flushing Redis\\n\")\n\t\t\tconn := r.Get()\n\t\t\tdefer conn.Close()\n\t\t\tkeys, err := redis.Strings(conn.Do(\"KEYS\", \"*rediscache*\"))\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\tfor _, key := range keys {\n\t\t\t\t\tconn.Do(\"DEL\", key)\n\t\t\t\t\tlog.Printf(\"Deleting %s\\n\", key)\n\t\t\t\t}\n\t\t\t}\n\t\t} else if os.Args[1] == \"flushstructures\" {\n\t\t\t\/\/ Erase http cache in redis\n\t\t\tlog.Printf(\"Flushing structures\\n\")\n\t\t\tconn := r.Get()\n\t\t\tdefer conn.Close()\n\t\t\tkeys, err := redis.Strings(conn.Do(\"KEYS\", \"*structure_failure*\"))\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\tfor _, key := range keys {\n\t\t\t\t\tconn.Do(\"DEL\", key)\n\t\t\t\t\tlog.Printf(\"Deleting %s\\n\", key)\n\t\t\t\t}\n\t\t\t}\n\n\t\t} else if os.Args[1] == \"flushredis\" {\n\t\t\t\/\/ Erase everything in redis for modified deployments\n\t\t\tlog.Printf(\"Flushing Redis\\n\")\n\t\t\tconn := r.Get()\n\t\t\tdefer conn.Close()\n\t\t\tconn.Do(\"FLUSHALL\")\n\t\t}\n\t}\n\n\tlog.Printf(\"Start Listening\\n\")\n\tgo log.Fatalln(http.ListenAndServe(\":3000\", context.ClearHandler(rtr)))\n\n\tlog.Printf(\"In production\\n\")\n\t\/\/ Handle SIGINT and SIGTERM.\n\tch := make(chan os.Signal)\n\tsignal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)\n\tlog.Println(<-ch)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2017 The go-hep Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ yoda2rio converts YODA files containing hbook-like values (H1D, H2D, P1D, ...)\n\/\/ into rio files.\n\/\/\n\/\/ Example:\n\/\/\n\/\/ $> yoda2rio rivet.yoda >| rivet.rio\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"go-hep.org\/x\/hep\/hbook\"\n\t\"go-hep.org\/x\/hep\/rio\"\n)\n\nfunc main() {\n\tlog.SetFlags(0)\n\tlog.SetPrefix(\"yoda2rio: \")\n\tlog.SetOutput(os.Stderr)\n\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(\n\t\t\tos.Stderr,\n\t\t\t`Usage: yoda2rio [options] [ [...]]\n\nex:\n $ yoda2rio rivet.yoda >| rivet.rio\n`)\n\t}\n\n\tflag.Parse()\n\n\tif flag.NArg() < 1 {\n\t\tlog.Printf(\"missing input file name\")\n\t\tflag.Usage()\n\t\tflag.PrintDefaults()\n\t}\n\n\to, err := rio.NewWriter(os.Stdout)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer o.Close()\n\n\tfor _, fname := range flag.Args() {\n\t\tconvert(o, fname)\n\t}\n}\n\nfunc convert(w *rio.Writer, fname string) {\n\tr, err := os.Open(fname)\n\tif err != nil {\n\t\tlog.Fatalf(\"error opening file [%s]: %v\\n\", fname, err)\n\t}\n\tdefer r.Close()\n\n\tvs, err := yodaSlice(r)\n\tif err != nil {\n\t\tlog.Fatalf(\"error decoding YODA file [%s]: %v\\n\", fname, err)\n\t}\n\n\tfor _, v := range vs {\n\t\terr = w.WriteValue(v.Name(), v.Value())\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"error writing %q from YODA file [%s]: %v\\n\", v.Name(), fname, err)\n\t\t}\n\t}\n}\n\nvar (\n\tyodaHeader = []byte(\"BEGIN YODA_\")\n\tyodaFooter = []byte(\"END YODA_\")\n)\n\nfunc yodaSlice(r io.Reader) ([]Yoda, error) {\n\tvar (\n\t\terr error\n\t\to []Yoda\n\t\tblock = make([]byte, 0, 1024)\n\t\trt reflect.Type\n\t\tname string\n\t)\n\tscan := bufio.NewScanner(r)\n\tfor scan.Scan() {\n\t\traw := scan.Bytes()\n\t\tswitch {\n\t\tcase bytes.HasPrefix(raw, yodaHeader):\n\t\t\trt, name, err = splitHeader(raw)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"error parsing YODA header: %v\", err)\n\t\t\t}\n\t\t\tblock = block[:0]\n\t\t\tblock = append(block, raw...)\n\t\t\tblock = append(block, '\\n')\n\n\t\tdefault:\n\t\t\tblock = append(block, raw...)\n\t\t\tblock = append(block, '\\n')\n\n\t\tcase bytes.HasPrefix(raw, yodaFooter):\n\t\t\tblock = append(block, raw...)\n\t\t\tblock = append(block, '\\n')\n\n\t\t\tv := reflect.New(rt).Elem()\n\t\t\terr = v.Addr().Interface().(unmarshalYoda).UnmarshalYODA(block)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"error unmarshaling YODA %q (type=%v): %v\\n%v\\n===\\n\", name, rt.Name(), err, string(block))\n\t\t\t}\n\t\t\to = append(o, &yoda{name: name, ptr: v.Addr().Interface()})\n\t\t}\n\t}\n\terr = scan.Err()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn o, nil\n}\n\nfunc splitHeader(raw []byte) (reflect.Type, string, error) {\n\traw = raw[len(yodaHeader):]\n\ti := bytes.Index(raw, []byte(\" \"))\n\tif i == -1 || i >= len(raw) {\n\t\treturn nil, \"\", fmt.Errorf(\"invalid YODA header (missing space)\")\n\t}\n\n\tvar rt reflect.Type\n\n\tswitch string(raw[:i]) {\n\tcase \"HISTO1D\":\n\t\trt = reflect.TypeOf((*hbook.H1D)(nil)).Elem()\n\tcase \"HISTO2D\":\n\t\trt = reflect.TypeOf((*hbook.H2D)(nil)).Elem()\n\tcase \"PROFILE1D\":\n\t\trt = reflect.TypeOf((*hbook.P1D)(nil)).Elem()\n\tcase \"SCATTER2D\":\n\t\trt = reflect.TypeOf((*hbook.S2D)(nil)).Elem()\n\tdefault:\n\t\tlog.Fatalf(\"unhandled YODA object type %q\", string(raw[:i]))\n\t}\n\n\tname := raw[i+2:] \/\/ +2 to also remove the leading '\/'\n\treturn rt, strings.TrimSpace(string(name)), nil\n}\n\ntype Yoda interface {\n\tName() string\n\tValue() interface{}\n}\n\ntype unmarshalYoda interface {\n\tUnmarshalYODA([]byte) error\n}\n\ntype yoda struct {\n\tname string\n\tptr interface{}\n}\n\nfunc (y *yoda) Name() string {\n\treturn y.name\n}\n\nfunc (y *yoda) Value() interface{} {\n\treturn y.ptr\n}\ncmd\/yoda2rio: refactor to use hbook\/yodacnv\/\/ Copyright 2017 The go-hep Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ yoda2rio converts YODA files containing hbook-like values (H1D, H2D, P1D, ...)\n\/\/ into rio files.\n\/\/\n\/\/ Example:\n\/\/\n\/\/ $> yoda2rio rivet.yoda >| rivet.rio\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"go-hep.org\/x\/hep\/hbook\/yodacnv\"\n\t\"go-hep.org\/x\/hep\/rio\"\n)\n\nfunc main() {\n\tlog.SetFlags(0)\n\tlog.SetPrefix(\"yoda2rio: \")\n\tlog.SetOutput(os.Stderr)\n\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(\n\t\t\tos.Stderr,\n\t\t\t`Usage: yoda2rio [options] [ [...]]\n\nex:\n $ yoda2rio rivet.yoda >| rivet.rio\n`)\n\t}\n\n\tflag.Parse()\n\n\tif flag.NArg() < 1 {\n\t\tlog.Printf(\"missing input file name\")\n\t\tflag.Usage()\n\t\tflag.PrintDefaults()\n\t}\n\n\to, err := rio.NewWriter(os.Stdout)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer o.Close()\n\n\tfor _, fname := range flag.Args() {\n\t\tconvert(o, fname)\n\t}\n}\n\nfunc convert(w *rio.Writer, fname string) {\n\tr, err := os.Open(fname)\n\tif err != nil {\n\t\tlog.Fatalf(\"error opening file [%s]: %v\\n\", fname, err)\n\t}\n\tdefer r.Close()\n\n\tvs, err := yodacnv.Read(r)\n\tif err != nil {\n\t\tlog.Fatalf(\"error decoding YODA file [%s]: %v\\n\", fname, err)\n\t}\n\n\tfor _, v := range vs {\n\t\terr = w.WriteValue(v.Name(), v)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"error writing %q from YODA file [%s]: %v\\n\", v.Name(), fname, err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2020 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/google\/go-cmp\/cmp\"\n\t\"github.com\/openconfig\/models-ci\/commonci\"\n)\n\n\/\/ Fake LabelPoster for testing.\ntype postLabelRecorder struct {\n\tlabels []string\n}\n\nfunc (p *postLabelRecorder) PostLabel(labelName, labelColor, owner, repo string, prNumber int) error {\n\tp.labels = append(p.labels, labelName)\n\treturn nil\n}\n\nfunc TestGenOpenConfigValidatorScript(t *testing.T) {\n\tprNumber = 1\n\tbasicModelMap, err := commonci.ParseOCModels(\"testdata\")\n\tif err != nil {\n\t\tt.Fatalf(\"TestGenOpenConfigLinterScript: Failed to parse models for testing: %v\", err)\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tinValidatorName string\n\t\tinModelMap commonci.OpenConfigModelMap\n\t\tinDisabledModelPaths map[string]bool\n\t\twantCmd string\n\t\twantSkipLabels []string\n\t\twantErr bool\n\t}{{\n\t\tname: \"basic pyang\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"pyang\",\n\t\twantCmd: `#!\/bin\/bash\nworkdir=\/workspace\/results\/pyang\nmkdir -p \"$workdir\"\nPYANG_MSG_TEMPLATE='messages:{{path:\"{file}\" line:{line} code:\"{code}\" type:\"{type}\" level:{level} message:'\"'{msg}'}}\"\ncmd=\"$@\"\noptions=(\n -p testdata\n -p \/workspace\/third_party\/ietf\n)\nscript_options=(\n --msg-template \"$PYANG_MSG_TEMPLATE\"\n)\nfunction run-dir() {\n declare prefix=\"$workdir\"\/\"$1\"==\"$2\"==\n shift 2\n echo $cmd \"${options[@]}\" \"$@\" > ${prefix}cmd\n if ! $($cmd \"${options[@]}\" \"${script_options[@]}\" \"$@\" &> ${prefix}pass); then\n mv ${prefix}pass ${prefix}fail\n fi\n}\nrun-dir \"acl\" \"openconfig-acl\" testdata\/acl\/openconfig-acl.yang testdata\/acl\/openconfig-acl-evil-twin.yang &\nrun-dir \"optical-transport\" \"openconfig-optical-amplifier\" testdata\/optical-transport\/openconfig-optical-amplifier.yang &\nrun-dir \"optical-transport\" \"openconfig-transport-line-protection\" testdata\/optical-transport\/openconfig-transport-line-protection.yang &\nwait\n`,\n\t}, {\n\t\tname: \"basic pyang with model to be skipped\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"pyang\",\n\t\tinDisabledModelPaths: map[string]bool{\"acl\": true, \"dne\": true},\n\t\twantSkipLabels: []string{\"skipped: acl\"},\n\t\twantCmd: `#!\/bin\/bash\nworkdir=\/workspace\/results\/pyang\nmkdir -p \"$workdir\"\nPYANG_MSG_TEMPLATE='messages:{{path:\"{file}\" line:{line} code:\"{code}\" type:\"{type}\" level:{level} message:'\"'{msg}'}}\"\ncmd=\"$@\"\noptions=(\n -p testdata\n -p \/workspace\/third_party\/ietf\n)\nscript_options=(\n --msg-template \"$PYANG_MSG_TEMPLATE\"\n)\nfunction run-dir() {\n declare prefix=\"$workdir\"\/\"$1\"==\"$2\"==\n shift 2\n echo $cmd \"${options[@]}\" \"$@\" > ${prefix}cmd\n if ! $($cmd \"${options[@]}\" \"${script_options[@]}\" \"$@\" &> ${prefix}pass); then\n mv ${prefix}pass ${prefix}fail\n fi\n}\nrun-dir \"optical-transport\" \"openconfig-optical-amplifier\" testdata\/optical-transport\/openconfig-optical-amplifier.yang &\nrun-dir \"optical-transport\" \"openconfig-transport-line-protection\" testdata\/optical-transport\/openconfig-transport-line-protection.yang &\nwait\n`,\n\t}, {\n\t\tname: \"basic oc-pyang\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"oc-pyang\",\n\t\twantCmd: `#!\/bin\/bash\nworkdir=\/workspace\/results\/oc-pyang\nmkdir -p \"$workdir\"\nPYANG_MSG_TEMPLATE='messages:{{path:\"{file}\" line:{line} code:\"{code}\" type:\"{type}\" level:{level} message:'\"'{msg}'}}\"\ncmd=\"$@\"\noptions=(\n -p testdata\n -p \/workspace\/third_party\/ietf\n --openconfig\n --ignore-error=OC_RELATIVE_PATH\n)\nscript_options=(\n --msg-template \"$PYANG_MSG_TEMPLATE\"\n)\nfunction run-dir() {\n declare prefix=\"$workdir\"\/\"$1\"==\"$2\"==\n shift 2\n echo $cmd \"${options[@]}\" \"$@\" > ${prefix}cmd\n if ! $($cmd \"${options[@]}\" \"${script_options[@]}\" \"$@\" &> ${prefix}pass); then\n mv ${prefix}pass ${prefix}fail\n fi\n}\nrun-dir \"acl\" \"openconfig-acl\" testdata\/acl\/openconfig-acl.yang testdata\/acl\/openconfig-acl-evil-twin.yang &\nrun-dir \"optical-transport\" \"openconfig-optical-amplifier\" testdata\/optical-transport\/openconfig-optical-amplifier.yang &\nrun-dir \"optical-transport\" \"openconfig-transport-line-protection\" testdata\/optical-transport\/openconfig-transport-line-protection.yang &\nwait\n`,\n\t}, {\n\t\tname: \"basic pyangbind\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"pyangbind\",\n\t\twantCmd: `#!\/bin\/bash\nworkdir=\/workspace\/results\/pyangbind\nmkdir -p \"$workdir\"\nPYANG_MSG_TEMPLATE='messages:{{path:\"{file}\" line:{line} code:\"{code}\" type:\"{type}\" level:{level} message:'\"'{msg}'}}\"\ncmd=\"$@\"\noptions=(\n -p testdata\n -p \/workspace\/third_party\/ietf\n -f pybind\n)\nscript_options=(\n --msg-template \"$PYANG_MSG_TEMPLATE\"\n)\nfunction run-dir() {\n declare prefix=\"$workdir\"\/\"$1\"==\"$2\"==\n local options=( -o \"$1\".\"$2\".binding.py \"${options[@]}\" )\n shift 2\n echo $cmd \"${options[@]}\" \"$@\" > ${prefix}cmd\n if ! $($cmd \"${options[@]}\" \"${script_options[@]}\" \"$@\" &> ${prefix}pass); then\n mv ${prefix}pass ${prefix}fail\n fi\n}\nrun-dir \"acl\" \"openconfig-acl\" testdata\/acl\/openconfig-acl.yang testdata\/acl\/openconfig-acl-evil-twin.yang &\nrun-dir \"optical-transport\" \"openconfig-optical-amplifier\" testdata\/optical-transport\/openconfig-optical-amplifier.yang &\nrun-dir \"optical-transport\" \"openconfig-transport-line-protection\" testdata\/optical-transport\/openconfig-transport-line-protection.yang &\nwait\n`,\n\t}, {\n\t\tname: \"basic goyang-ygot\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"goyang-ygot\",\n\t\twantCmd: `#!\/bin\/bash\nworkdir=\/workspace\/results\/goyang-ygot\nmkdir -p \"$workdir\"\ncmd=\"\/go\/bin\/generator\"\noptions=(\n -path=testdata,\/workspace\/third_party\/ietf\n -package_name=exampleoc -generate_fakeroot -fakeroot_name=device -compress_paths=true\n -shorten_enum_leaf_names -trim_enum_openconfig_prefix -typedef_enum_with_defmod -enum_suffix_for_simple_union_enums\n -exclude_modules=ietf-interfaces -generate_rename -generate_append -generate_getters\n -generate_leaf_getters -generate_delete -annotations\n -list_builder_key_threshold=3\n)\nscript_options=(\n)\nfunction run-dir() {\n declare prefix=\"$workdir\"\/\"$1\"==\"$2\"==\n outdir=\/go\/src\/\"$1\".\"$2\"\/\n mkdir \"$outdir\"\n local options=( -output_file=\"$outdir\"\/oc.go \"${options[@]}\" )\n shift 2\n echo $cmd \"${options[@]}\" \"$@\" > ${prefix}cmd\n status=0\n $($cmd \"${options[@]}\" \"${script_options[@]}\" \"$@\" &> ${prefix}pass) || status=1\n $(cd \"$outdir\" && go get && go build > ${prefix}pass) || status=1\n if [[ $status -eq \"1\" ]]; then\n mv ${prefix}pass ${prefix}fail\n fi\n}\nrun-dir \"acl\" \"openconfig-acl\" testdata\/acl\/openconfig-acl.yang testdata\/acl\/openconfig-acl-evil-twin.yang &\nrun-dir \"optical-transport\" \"openconfig-optical-amplifier\" testdata\/optical-transport\/openconfig-optical-amplifier.yang &\nrun-dir \"optical-transport\" \"openconfig-transport-line-protection\" testdata\/optical-transport\/openconfig-transport-line-protection.yang &\nwait\n`,\n\t}, {\n\t\tname: \"basic yanglint\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"yanglint\",\n\t\twantCmd: `#!\/bin\/bash\nworkdir=\/workspace\/results\/yanglint\nmkdir -p \"$workdir\"\ncmd=\"yanglint\"\noptions=(\n -p testdata\n -p \/workspace\/third_party\/ietf\n)\nscript_options=(\n)\nfunction run-dir() {\n declare prefix=\"$workdir\"\/\"$1\"==\"$2\"==\n shift 2\n echo $cmd \"${options[@]}\" \"$@\" > ${prefix}cmd\n if ! $($cmd \"${options[@]}\" \"${script_options[@]}\" \"$@\" &> ${prefix}pass); then\n mv ${prefix}pass ${prefix}fail\n fi\n}\nrun-dir \"acl\" \"openconfig-acl\" testdata\/acl\/openconfig-acl.yang testdata\/acl\/openconfig-acl-evil-twin.yang &\nrun-dir \"optical-transport\" \"openconfig-optical-amplifier\" testdata\/optical-transport\/openconfig-optical-amplifier.yang &\nrun-dir \"optical-transport\" \"openconfig-transport-line-protection\" testdata\/optical-transport\/openconfig-transport-line-protection.yang &\nwait\n`,\n\t}, {\n\t\tname: \"basic confd\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"confd\",\n\t\twantCmd: `#!\/bin\/bash\nworkdir=\/workspace\/results\/confd\nmkdir -p \"$workdir\"\nstatus=0\n$1 -c --yangpath $2 testdata\/acl\/openconfig-acl.yang &>> \/workspace\/results\/confd\/acl==openconfig-acl==pass || status=1\n$1 -c --yangpath $2 testdata\/acl\/openconfig-acl-evil-twin.yang &>> \/workspace\/results\/confd\/acl==openconfig-acl==pass || status=1\nif [[ $status -eq \"1\" ]]; then\n mv \/workspace\/results\/confd\/acl==openconfig-acl==pass \/workspace\/results\/confd\/acl==openconfig-acl==fail\nfi\nstatus=0\n$1 -c --yangpath $2 testdata\/optical-transport\/openconfig-optical-amplifier.yang &>> \/workspace\/results\/confd\/optical-transport==openconfig-optical-amplifier==pass || status=1\nif [[ $status -eq \"1\" ]]; then\n mv \/workspace\/results\/confd\/optical-transport==openconfig-optical-amplifier==pass \/workspace\/results\/confd\/optical-transport==openconfig-optical-amplifier==fail\nfi\nstatus=0\n$1 -c --yangpath $2 testdata\/optical-transport\/openconfig-transport-line-protection.yang &>> \/workspace\/results\/confd\/optical-transport==openconfig-transport-line-protection==pass || status=1\nif [[ $status -eq \"1\" ]]; then\n mv \/workspace\/results\/confd\/optical-transport==openconfig-transport-line-protection==pass \/workspace\/results\/confd\/optical-transport==openconfig-transport-line-protection==fail\nfi\nwait\n`,\n\t}, {\n\t\tname: \"basic misc-checks\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"misc-checks\",\n\t\twantCmd: `#!\/bin\/bash\nworkdir=\/workspace\/results\/misc-checks\nmkdir -p \"$workdir\"\nif ! \/go\/bin\/ocversion -p testdata,\/workspace\/third_party\/ietf testdata\/acl\/openconfig-acl.yang testdata\/acl\/openconfig-acl-evil-twin.yang > \/workspace\/results\/misc-checks\/acl.openconfig-acl.pr-file-parse-log; then\n >&2 echo \"parse of acl.openconfig-acl reported non-zero status.\"\nfi\nif ! \/go\/bin\/ocversion -p testdata,\/workspace\/third_party\/ietf testdata\/optical-transport\/openconfig-optical-amplifier.yang > \/workspace\/results\/misc-checks\/optical-transport.openconfig-optical-amplifier.pr-file-parse-log; then\n >&2 echo \"parse of optical-transport.openconfig-optical-amplifier reported non-zero status.\"\nfi\nif ! \/go\/bin\/ocversion -p testdata,\/workspace\/third_party\/ietf testdata\/optical-transport\/openconfig-transport-line-connectivity.yang testdata\/optical-transport\/openconfig-wavelength-router.yang > \/workspace\/results\/misc-checks\/optical-transport.openconfig-wavelength-router.pr-file-parse-log; then\n >&2 echo \"parse of optical-transport.openconfig-wavelength-router reported non-zero status.\"\nfi\nif ! \/go\/bin\/ocversion -p testdata,\/workspace\/third_party\/ietf testdata\/optical-transport\/openconfig-transport-line-protection.yang > \/workspace\/results\/misc-checks\/optical-transport.openconfig-transport-line-protection.pr-file-parse-log; then\n >&2 echo \"parse of optical-transport.openconfig-transport-line-protection reported non-zero status.\"\nfi\nif ! \/go\/bin\/ocversion -p testdata,\/workspace\/third_party\/ietf testdata\/optical-transport\/openconfig-optical-attenuator.yang > \/workspace\/results\/misc-checks\/optical-transport.openconfig-optical-attenuator.pr-file-parse-log; then\n >&2 echo \"parse of optical-transport.openconfig-optical-attenuator reported non-zero status.\"\nfi\nwait\n`,\n\t}, {\n\t\tname: \"unrecognized validatorID\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"foo\",\n\t\twantErr: true,\n\t}}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tlabelRecorder := &postLabelRecorder{}\n\t\t\tdisabledModelPaths = tt.inDisabledModelPaths\n\n\t\t\tgot, err := genOpenConfigValidatorScript(labelRecorder, tt.inValidatorName, \"\", tt.inModelMap)\n\t\t\tif got := err != nil; got != tt.wantErr {\n\t\t\t\tt.Fatalf(\"got error %v,\twantErr: %v\", err, tt.wantErr)\n\t\t\t}\n\t\t\tif diff := cmp.Diff(strings.Split(tt.wantCmd, \"\\n\"), strings.Split(got, \"\\n\")); diff != \"\" {\n\t\t\t\tt.Errorf(\"(-want, +got):\\n%s\", diff)\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tt.wantSkipLabels, labelRecorder.labels); diff != \"\" {\n\t\t\t\tt.Errorf(\"skipped models (-want, +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\ndebug\/\/ Copyright 2020 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/google\/go-cmp\/cmp\"\n\t\"github.com\/openconfig\/models-ci\/commonci\"\n)\n\n\/\/ Fake LabelPoster for testing.\ntype postLabelRecorder struct {\n\tlabels []string\n}\n\nfunc (p *postLabelRecorder) PostLabel(labelName, labelColor, owner, repo string, prNumber int) error {\n\tp.labels = append(p.labels, labelName)\n\treturn nil\n}\n\nfunc TestGenOpenConfigValidatorScript(t *testing.T) {\n\tprNumber = 1\n\tbasicModelMap, err := commonci.ParseOCModels(\"testdata\")\n\tif err != nil {\n\t\tt.Fatalf(\"TestGenOpenConfigLinterScript: Failed to parse models for testing: %v\", err)\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tinValidatorName string\n\t\tinModelMap commonci.OpenConfigModelMap\n\t\tinDisabledModelPaths map[string]bool\n\t\twantCmd string\n\t\twantSkipLabels []string\n\t\twantErr bool\n\t}{{\n\t\tname: \"basic pyang\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"pyang\",\n\t\twantCmd: `#!\/bin\/bash\nworkdir=\/workspace\/results\/pyang\nmkdir -p \"$workdir\"\nPYANG_MSG_TEMPLATE='messages:{{path:\"{file}\" line:{line} code:\"{code}\" type:\"{type}\" level:{level} message:'\"'{msg}'}}\"\ncmd=\"$@\"\noptions=(\n -p testdata\n -p \/workspace\/third_party\/ietf\n)\nscript_options=(\n --msg-template \"$PYANG_MSG_TEMPLATE\"\n)\nfunction run-dir() {\n declare prefix=\"$workdir\"\/\"$1\"==\"$2\"==\n shift 2\n echo $cmd \"${options[@]}\" \"$@\" > ${prefix}cmd\n if ! $($cmd \"${options[@]}\" \"${script_options[@]}\" \"$@\" &> ${prefix}pass); then\n mv ${prefix}pass ${prefix}fail\n fi\n}\nrun-dir \"acl\" \"openconfig-acl\" testdata\/acl\/openconfig-acl.yang testdata\/acl\/openconfig-acl-evil-twin.yang &\nrun-dir \"optical-transport\" \"openconfig-optical-amplifier\" testdata\/optical-transport\/openconfig-optical-amplifier.yang &\nrun-dir \"optical-transport\" \"openconfig-transport-line-protection\" testdata\/optical-transport\/openconfig-transport-line-protection.yang &\nwait\n`,\n\t}, {\n\t\tname: \"basic pyang with model to be skipped\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"pyang\",\n\t\tinDisabledModelPaths: map[string]bool{\"acl\": true, \"dne\": true},\n\t\twantSkipLabels: []string{\"skipped: acl\"},\n\t\twantCmd: `#!\/bin\/bash\nworkdir=\/workspace\/results\/pyang\nmkdir -p \"$workdir\"\nPYANG_MSG_TEMPLATE='messages:{{path:\"{file}\" line:{line} code:\"{code}\" type:\"{type}\" level:{level} message:'\"'{msg}'}}\"\ncmd=\"$@\"\noptions=(\n -p testdata\n -p \/workspace\/third_party\/ietf\n)\nscript_options=(\n --msg-template \"$PYANG_MSG_TEMPLATE\"\n)\nfunction run-dir() {\n declare prefix=\"$workdir\"\/\"$1\"==\"$2\"==\n shift 2\n echo $cmd \"${options[@]}\" \"$@\" > ${prefix}cmd\n if ! $($cmd \"${options[@]}\" \"${script_options[@]}\" \"$@\" &> ${prefix}pass); then\n mv ${prefix}pass ${prefix}fail\n fi\n}\nrun-dir \"optical-transport\" \"openconfig-optical-amplifier\" testdata\/optical-transport\/openconfig-optical-amplifier.yang &\nrun-dir \"optical-transport\" \"openconfig-transport-line-protection\" testdata\/optical-transport\/openconfig-transport-line-protection.yang &\nwait\n`,\n\t}, {\n\t\tname: \"basic oc-pyang\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"oc-pyang\",\n\t\twantCmd: `#!\/bin\/bash\nworkdir=\/workspace\/results\/oc-pyang\nmkdir -p \"$workdir\"\nPYANG_MSG_TEMPLATE='messages:{{path:\"{file}\" line:{line} code:\"{code}\" type:\"{type}\" level:{level} message:'\"'{msg}'}}\"\ncmd=\"$@\"\noptions=(\n -p testdata\n -p \/workspace\/third_party\/ietf\n --openconfig\n --ignore-error=OC_RELATIVE_PATH\n)\nscript_options=(\n --msg-template \"$PYANG_MSG_TEMPLATE\"\n)\nfunction run-dir() {\n declare prefix=\"$workdir\"\/\"$1\"==\"$2\"==\n shift 2\n echo $cmd \"${options[@]}\" \"$@\" > ${prefix}cmd\n if ! $($cmd \"${options[@]}\" \"${script_options[@]}\" \"$@\" &> ${prefix}pass); then\n mv ${prefix}pass ${prefix}fail\n fi\n}\nrun-dir \"acl\" \"openconfig-acl\" testdata\/acl\/openconfig-acl.yang testdata\/acl\/openconfig-acl-evil-twin.yang &\nrun-dir \"optical-transport\" \"openconfig-optical-amplifier\" testdata\/optical-transport\/openconfig-optical-amplifier.yang &\nrun-dir \"optical-transport\" \"openconfig-transport-line-protection\" testdata\/optical-transport\/openconfig-transport-line-protection.yang &\nwait\n`,\n\t}, {\n\t\tname: \"basic pyangbind\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"pyangbind\",\n\t\twantCmd: `#!\/bin\/bash\nworkdir=\/workspace\/results\/pyangbind\nmkdir -p \"$workdir\"\nPYANG_MSG_TEMPLATE='messages:{{path:\"{file}\" line:{line} code:\"{code}\" type:\"{type}\" level:{level} message:'\"'{msg}'}}\"\ncmd=\"$@\"\noptions=(\n -p testdata\n -p \/workspace\/third_party\/ietf\n -f pybind\n)\nscript_options=(\n --msg-template \"$PYANG_MSG_TEMPLATE\"\n)\nfunction run-dir() {\n declare prefix=\"$workdir\"\/\"$1\"==\"$2\"==\n local options=( -o \"$1\".\"$2\".binding.py \"${options[@]}\" )\n shift 2\n echo $cmd \"${options[@]}\" \"$@\" > ${prefix}cmd\n if ! $($cmd \"${options[@]}\" \"${script_options[@]}\" \"$@\" &> ${prefix}pass); then\n mv ${prefix}pass ${prefix}fail\n fi\n}\nrun-dir \"acl\" \"openconfig-acl\" testdata\/acl\/openconfig-acl.yang testdata\/acl\/openconfig-acl-evil-twin.yang &\nrun-dir \"optical-transport\" \"openconfig-optical-amplifier\" testdata\/optical-transport\/openconfig-optical-amplifier.yang &\nrun-dir \"optical-transport\" \"openconfig-transport-line-protection\" testdata\/optical-transport\/openconfig-transport-line-protection.yang &\nwait\n`,\n\t}, {\n\t\tname: \"basic goyang-ygot\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"goyang-ygot\",\n\t\twantCmd: `#!\/bin\/bash\nworkdir=\/workspace\/results\/goyang-ygot\nmkdir -p \"$workdir\"\ncmd=\"\/go\/bin\/generator\"\noptions=(\n -path=testdata,\/workspace\/third_party\/ietf\n -package_name=exampleoc -generate_fakeroot -fakeroot_name=device -compress_paths=true\n -shorten_enum_leaf_names -trim_enum_openconfig_prefix -typedef_enum_with_defmod -enum_suffix_for_simple_union_enums\n -exclude_modules=ietf-interfaces -generate_rename -generate_append -generate_getters\n -generate_leaf_getters -generate_delete -annotations\n -list_builder_key_threshold=3\n)\nscript_options=(\n)\nfunction run-dir() {\n declare prefix=\"$workdir\"\/\"$1\"==\"$2\"==\n outdir=\/go\/src\/\"$1\".\"$2\"\/\n mkdir \"$outdir\"\n local options=( -output_file=\"$outdir\"\/oc.go \"${options[@]}\" )\n shift 2\n echo $cmd \"${options[@]}\" \"$@\" > ${prefix}cmd\n status=0\n $cmd \"${options[@]}\" \"${script_options[@]}\" \"$@\" &> ${prefix}pass || status=1\n cd \"$outdir\" && go get && go build || status=1\n if [[ $status -eq \"1\" ]]; then\n mv ${prefix}pass ${prefix}fail\n fi\n}\nrun-dir \"acl\" \"openconfig-acl\" testdata\/acl\/openconfig-acl.yang testdata\/acl\/openconfig-acl-evil-twin.yang &\nrun-dir \"optical-transport\" \"openconfig-optical-amplifier\" testdata\/optical-transport\/openconfig-optical-amplifier.yang &\nrun-dir \"optical-transport\" \"openconfig-transport-line-protection\" testdata\/optical-transport\/openconfig-transport-line-protection.yang &\nwait\n`,\n\t}, {\n\t\tname: \"basic yanglint\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"yanglint\",\n\t\twantCmd: `#!\/bin\/bash\nworkdir=\/workspace\/results\/yanglint\nmkdir -p \"$workdir\"\ncmd=\"yanglint\"\noptions=(\n -p testdata\n -p \/workspace\/third_party\/ietf\n)\nscript_options=(\n)\nfunction run-dir() {\n declare prefix=\"$workdir\"\/\"$1\"==\"$2\"==\n shift 2\n echo $cmd \"${options[@]}\" \"$@\" > ${prefix}cmd\n if ! $($cmd \"${options[@]}\" \"${script_options[@]}\" \"$@\" &> ${prefix}pass); then\n mv ${prefix}pass ${prefix}fail\n fi\n}\nrun-dir \"acl\" \"openconfig-acl\" testdata\/acl\/openconfig-acl.yang testdata\/acl\/openconfig-acl-evil-twin.yang &\nrun-dir \"optical-transport\" \"openconfig-optical-amplifier\" testdata\/optical-transport\/openconfig-optical-amplifier.yang &\nrun-dir \"optical-transport\" \"openconfig-transport-line-protection\" testdata\/optical-transport\/openconfig-transport-line-protection.yang &\nwait\n`,\n\t}, {\n\t\tname: \"basic confd\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"confd\",\n\t\twantCmd: `#!\/bin\/bash\nworkdir=\/workspace\/results\/confd\nmkdir -p \"$workdir\"\nstatus=0\n$1 -c --yangpath $2 testdata\/acl\/openconfig-acl.yang &>> \/workspace\/results\/confd\/acl==openconfig-acl==pass || status=1\n$1 -c --yangpath $2 testdata\/acl\/openconfig-acl-evil-twin.yang &>> \/workspace\/results\/confd\/acl==openconfig-acl==pass || status=1\nif [[ $status -eq \"1\" ]]; then\n mv \/workspace\/results\/confd\/acl==openconfig-acl==pass \/workspace\/results\/confd\/acl==openconfig-acl==fail\nfi\nstatus=0\n$1 -c --yangpath $2 testdata\/optical-transport\/openconfig-optical-amplifier.yang &>> \/workspace\/results\/confd\/optical-transport==openconfig-optical-amplifier==pass || status=1\nif [[ $status -eq \"1\" ]]; then\n mv \/workspace\/results\/confd\/optical-transport==openconfig-optical-amplifier==pass \/workspace\/results\/confd\/optical-transport==openconfig-optical-amplifier==fail\nfi\nstatus=0\n$1 -c --yangpath $2 testdata\/optical-transport\/openconfig-transport-line-protection.yang &>> \/workspace\/results\/confd\/optical-transport==openconfig-transport-line-protection==pass || status=1\nif [[ $status -eq \"1\" ]]; then\n mv \/workspace\/results\/confd\/optical-transport==openconfig-transport-line-protection==pass \/workspace\/results\/confd\/optical-transport==openconfig-transport-line-protection==fail\nfi\nwait\n`,\n\t}, {\n\t\tname: \"basic misc-checks\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"misc-checks\",\n\t\twantCmd: `#!\/bin\/bash\nworkdir=\/workspace\/results\/misc-checks\nmkdir -p \"$workdir\"\nif ! \/go\/bin\/ocversion -p testdata,\/workspace\/third_party\/ietf testdata\/acl\/openconfig-acl.yang testdata\/acl\/openconfig-acl-evil-twin.yang > \/workspace\/results\/misc-checks\/acl.openconfig-acl.pr-file-parse-log; then\n >&2 echo \"parse of acl.openconfig-acl reported non-zero status.\"\nfi\nif ! \/go\/bin\/ocversion -p testdata,\/workspace\/third_party\/ietf testdata\/optical-transport\/openconfig-optical-amplifier.yang > \/workspace\/results\/misc-checks\/optical-transport.openconfig-optical-amplifier.pr-file-parse-log; then\n >&2 echo \"parse of optical-transport.openconfig-optical-amplifier reported non-zero status.\"\nfi\nif ! \/go\/bin\/ocversion -p testdata,\/workspace\/third_party\/ietf testdata\/optical-transport\/openconfig-transport-line-connectivity.yang testdata\/optical-transport\/openconfig-wavelength-router.yang > \/workspace\/results\/misc-checks\/optical-transport.openconfig-wavelength-router.pr-file-parse-log; then\n >&2 echo \"parse of optical-transport.openconfig-wavelength-router reported non-zero status.\"\nfi\nif ! \/go\/bin\/ocversion -p testdata,\/workspace\/third_party\/ietf testdata\/optical-transport\/openconfig-transport-line-protection.yang > \/workspace\/results\/misc-checks\/optical-transport.openconfig-transport-line-protection.pr-file-parse-log; then\n >&2 echo \"parse of optical-transport.openconfig-transport-line-protection reported non-zero status.\"\nfi\nif ! \/go\/bin\/ocversion -p testdata,\/workspace\/third_party\/ietf testdata\/optical-transport\/openconfig-optical-attenuator.yang > \/workspace\/results\/misc-checks\/optical-transport.openconfig-optical-attenuator.pr-file-parse-log; then\n >&2 echo \"parse of optical-transport.openconfig-optical-attenuator reported non-zero status.\"\nfi\nwait\n`,\n\t}, {\n\t\tname: \"unrecognized validatorID\",\n\t\tinModelMap: basicModelMap,\n\t\tinValidatorName: \"foo\",\n\t\twantErr: true,\n\t}}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tlabelRecorder := &postLabelRecorder{}\n\t\t\tdisabledModelPaths = tt.inDisabledModelPaths\n\n\t\t\tgot, err := genOpenConfigValidatorScript(labelRecorder, tt.inValidatorName, \"\", tt.inModelMap)\n\t\t\tif got := err != nil; got != tt.wantErr {\n\t\t\t\tt.Fatalf(\"got error %v,\twantErr: %v\", err, tt.wantErr)\n\t\t\t}\n\t\t\tif diff := cmp.Diff(strings.Split(tt.wantCmd, \"\\n\"), strings.Split(got, \"\\n\")); diff != \"\" {\n\t\t\t\tt.Errorf(\"(-want, +got):\\n%s\", diff)\n\t\t\t}\n\n\t\t\tif diff := cmp.Diff(tt.wantSkipLabels, labelRecorder.labels); diff != \"\" {\n\t\t\t\tt.Errorf(\"skipped models (-want, +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright © 2015 Steve Francia .\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\nvar cmdDirs = [...]string{\"cmd\", \"cmds\", \"command\", \"commands\"}\nvar srcPaths []string\n\nfunc init() {\n\t\/\/ Initialize srcPaths.\n\tenvGoPath := os.Getenv(\"GOPATH\")\n\tgoPaths := filepath.SplitList(envGoPath)\n\tif len(goPaths) == 0 {\n\t\ter(\"$GOPATH is not set\")\n\t}\n\tsrcPaths = make([]string, 0, len(goPaths))\n\tfor _, goPath := range goPaths {\n\t\tsrcPaths = append(srcPaths, filepath.Join(goPath, \"src\"))\n\t}\n}\n\nfunc er(msg interface{}) {\n\tfmt.Println(\"Error:\", msg)\n\tos.Exit(1)\n}\n\n\/\/ isEmpty checks if a given path is empty.\n\/\/ Hidden files in path are ignored.\nfunc isEmpty(path string) bool {\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\ter(err)\n\t}\n\n\tif !fi.IsDir() {\n\t\treturn fi.Size() == 0\n\t}\n\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\ter(err)\n\t}\n\tdefer f.Close()\n\n\tnames, err := f.Readdirnames(-1)\n\tif err != nil && err != io.EOF {\n\t\ter(err)\n\t}\n\n\tfor _, name := range names {\n\t\tif len(name) > 0 && name[0] != '.' {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ exists checks if a file or directory exists.\nfunc exists(path string) bool {\n\tif path == \"\" {\n\t\treturn false\n\t}\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif !os.IsNotExist(err) {\n\t\ter(err)\n\t}\n\treturn false\n}\n\nfunc executeTemplate(tmplStr string, data interface{}) (string, error) {\n\ttmpl, err := template.New(\"\").Funcs(template.FuncMap{\"comment\": commentifyString}).Parse(tmplStr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbuf := new(bytes.Buffer)\n\terr = tmpl.Execute(buf, data)\n\treturn buf.String(), err\n}\n\nfunc writeStringToFile(path string, s string) error {\n\treturn writeToFile(path, strings.NewReader(s))\n}\n\n\/\/ writeToFile writes r to file with path only\n\/\/ if file\/directory on given path doesn't exist.\n\/\/ If file\/directory exists on given path, then\n\/\/ it terminates app and prints an appropriate error.\nfunc writeToFile(path string, r io.Reader) error {\n\tif exists(path) {\n\t\treturn fmt.Errorf(\"%v already exists\", path)\n\t}\n\n\tdir := filepath.Dir(path)\n\tif dir != \"\" {\n\t\tif err := os.MkdirAll(dir, 0777); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfile, err := os.Create(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\t_, err = io.Copy(file, r)\n\treturn err\n}\n\n\/\/ commentfyString comments every line of in.\nfunc commentifyString(in string) string {\n\tvar newlines []string\n\tlines := strings.Split(in, \"\\n\")\n\tfor _, line := range lines {\n\t\tif strings.HasPrefix(line, \"\/\/\") {\n\t\t\tnewlines = append(newlines, line)\n\t\t} else {\n\t\t\tif line == \"\" {\n\t\t\t\tnewlines = append(newlines, \"\/\/\")\n\t\t\t} else {\n\t\t\t\tnewlines = append(newlines, \"\/\/ \"+line)\n\t\t\t}\n\t\t}\n\t}\n\treturn strings.Join(newlines, \"\\n\")\n}\nSupport default value of $GOPATH (#532)\/\/ Copyright © 2015 Steve Francia .\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\nvar cmdDirs = [...]string{\"cmd\", \"cmds\", \"command\", \"commands\"}\nvar srcPaths []string\n\nfunc init() {\n\t\/\/ Initialize srcPaths.\n\tenvGoPath := os.Getenv(\"GOPATH\")\n\tgoPaths := filepath.SplitList(envGoPath)\n\tif len(goPaths) == 0 {\n\t\t\/\/ Adapted from https:\/\/github.com\/Masterminds\/glide\/pull\/798\/files.\n\t\t\/\/ As of Go 1.8 the GOPATH is no longer required to be set. Instead there\n\t\t\/\/ is a default value. If there is no GOPATH check for the default value.\n\t\t\/\/ Note, checking the GOPATH first to avoid invoking the go toolchain if\n\t\t\/\/ possible.\n\n\t\tgoExecutable := os.Getenv(\"COBRA_GO_EXECUTABLE\")\n\t\tif len(goExecutable) <= 0 {\n\t\t\tgoExecutable = \"go\"\n\t\t}\n\n\t\tout, err := exec.Command(goExecutable, \"env\", \"GOPATH\").Output()\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\n\t\ttoolchainGoPath := strings.TrimSpace(string(out))\n\t\tgoPaths = filepath.SplitList(toolchainGoPath)\n\t\tif len(goPaths) == 0 {\n\t\t\ter(\"$GOPATH is not set\")\n\t\t}\n\t}\n\tsrcPaths = make([]string, 0, len(goPaths))\n\tfor _, goPath := range goPaths {\n\t\tsrcPaths = append(srcPaths, filepath.Join(goPath, \"src\"))\n\t}\n}\n\nfunc er(msg interface{}) {\n\tfmt.Println(\"Error:\", msg)\n\tos.Exit(1)\n}\n\n\/\/ isEmpty checks if a given path is empty.\n\/\/ Hidden files in path are ignored.\nfunc isEmpty(path string) bool {\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\ter(err)\n\t}\n\n\tif !fi.IsDir() {\n\t\treturn fi.Size() == 0\n\t}\n\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\ter(err)\n\t}\n\tdefer f.Close()\n\n\tnames, err := f.Readdirnames(-1)\n\tif err != nil && err != io.EOF {\n\t\ter(err)\n\t}\n\n\tfor _, name := range names {\n\t\tif len(name) > 0 && name[0] != '.' {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ exists checks if a file or directory exists.\nfunc exists(path string) bool {\n\tif path == \"\" {\n\t\treturn false\n\t}\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif !os.IsNotExist(err) {\n\t\ter(err)\n\t}\n\treturn false\n}\n\nfunc executeTemplate(tmplStr string, data interface{}) (string, error) {\n\ttmpl, err := template.New(\"\").Funcs(template.FuncMap{\"comment\": commentifyString}).Parse(tmplStr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbuf := new(bytes.Buffer)\n\terr = tmpl.Execute(buf, data)\n\treturn buf.String(), err\n}\n\nfunc writeStringToFile(path string, s string) error {\n\treturn writeToFile(path, strings.NewReader(s))\n}\n\n\/\/ writeToFile writes r to file with path only\n\/\/ if file\/directory on given path doesn't exist.\n\/\/ If file\/directory exists on given path, then\n\/\/ it terminates app and prints an appropriate error.\nfunc writeToFile(path string, r io.Reader) error {\n\tif exists(path) {\n\t\treturn fmt.Errorf(\"%v already exists\", path)\n\t}\n\n\tdir := filepath.Dir(path)\n\tif dir != \"\" {\n\t\tif err := os.MkdirAll(dir, 0777); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfile, err := os.Create(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\t_, err = io.Copy(file, r)\n\treturn err\n}\n\n\/\/ commentfyString comments every line of in.\nfunc commentifyString(in string) string {\n\tvar newlines []string\n\tlines := strings.Split(in, \"\\n\")\n\tfor _, line := range lines {\n\t\tif strings.HasPrefix(line, \"\/\/\") {\n\t\t\tnewlines = append(newlines, line)\n\t\t} else {\n\t\t\tif line == \"\" {\n\t\t\t\tnewlines = append(newlines, \"\/\/\")\n\t\t\t} else {\n\t\t\t\tnewlines = append(newlines, \"\/\/ \"+line)\n\t\t\t}\n\t\t}\n\t}\n\treturn strings.Join(newlines, \"\\n\")\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"net\/http\"\n\t\"flag\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"log\"\n\tph \"github.com\/advptr\/reverse-proxy\/proxyhandler\"\n\t\"crypto\/x509\"\n)\n\n\/\/ Main config\ntype Config struct {\n\tTrustFiles []string `json:\"trust-files\"`\n\tRoutes []ph.Route `json:\"routes\"\"`\n}\n\n\n\/\/ Program Options\ntype Options struct {\n\tServerAddress string\n\tConfigFile string\n}\n\n\n\/\/ Main Bootstrap\nfunc main() {\n\targs := args()\n\tconfig := config(args.ConfigFile)\n\ttrustedCertPool := config.trustedCertPool()\n\tfor _, r := range config.Routes {\n\t\thandler := ph.NewWSHandler(r, trustedCertPool)\n\t\thttp.HandleFunc(handler.Path(), handler.Handle)\n\t}\n\tlog.Printf(\"reverse-proxy on %v\\n\", args.ServerAddress)\n\n\terr := http.ListenAndServe(args.ServerAddress, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\n\/\/ Parse command args\nfunc args() Options {\n\tconst (\n\t\tdefaultServerAddress = \":80\"\n\t\tserverAddressUsage = \"server address: ':80', '0.0.0.0:8080'...\"\n\t\tdefaultRouteConfig = \"config.json\"\n\t\trouteConfigUsage = \"configuration file: 'config.json'\"\n\t)\n\taddress := flag.String(\"address\", defaultServerAddress, serverAddressUsage)\n\tconfig := flag.String(\"config\", defaultRouteConfig, routeConfigUsage)\n\tflag.Parse()\n\n\treturn Options{*address, *config}\n}\n\n\/\/\nfunc (c *Config) trustedCertPool() (*x509.CertPool) {\n\ttrustedCertPool := x509.NewCertPool()\n\tfor _, file := range c.TrustFiles {\n\t\ttrustedCert, err := ioutil.ReadFile(file)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\ttrustedCertPool.AppendCertsFromPEM(trustedCert);\n\t}\n\n\treturn trustedCertPool\n}\n\n\n\/\/ Unmarshal routes from JSON configuration file\nfunc config(configFile string) Config {\n\tfile, err := ioutil.ReadFile(configFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar config Config\n\terr = json.Unmarshal(file, &config)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn config\n}\n\nformattedpackage main\n\nimport (\n\t\"crypto\/x509\"\n\t\"encoding\/json\"\n\t\"flag\"\n\tph \"github.com\/advptr\/reverse-proxy\/proxyhandler\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n)\n\n\/\/ Main config\ntype Config struct {\n\tTrustFiles []string `json:\"trust-files\"`\n\tRoutes []ph.Route `json:\"routes\"\"`\n}\n\n\/\/ Program Options\ntype Options struct {\n\tServerAddress string\n\tConfigFile string\n}\n\n\/\/ Main Bootstrap\nfunc main() {\n\targs := args()\n\tconfig := config(args.ConfigFile)\n\ttrustedCertPool := config.trustedCertPool()\n\tfor _, r := range config.Routes {\n\t\thandler := ph.NewWSHandler(r, trustedCertPool)\n\t\thttp.HandleFunc(handler.Path(), handler.Handle)\n\t}\n\tlog.Printf(\"reverse-proxy on %v\\n\", args.ServerAddress)\n\n\terr := http.ListenAndServe(args.ServerAddress, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ Parse command args\nfunc args() Options {\n\tconst (\n\t\tdefaultServerAddress = \":80\"\n\t\tserverAddressUsage = \"server address: ':80', '0.0.0.0:8080'...\"\n\t\tdefaultRouteConfig = \"config.json\"\n\t\trouteConfigUsage = \"configuration file: 'config.json'\"\n\t)\n\taddress := flag.String(\"address\", defaultServerAddress, serverAddressUsage)\n\tconfig := flag.String(\"config\", defaultRouteConfig, routeConfigUsage)\n\tflag.Parse()\n\n\treturn Options{*address, *config}\n}\n\n\/\/\nfunc (c *Config) trustedCertPool() *x509.CertPool {\n\ttrustedCertPool := x509.NewCertPool()\n\tfor _, file := range c.TrustFiles {\n\t\ttrustedCert, err := ioutil.ReadFile(file)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\ttrustedCertPool.AppendCertsFromPEM(trustedCert)\n\t}\n\n\treturn trustedCertPool\n}\n\n\/\/ Unmarshal routes from JSON configuration file\nfunc config(configFile string) Config {\n\tfile, err := ioutil.ReadFile(configFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar config Config\n\terr = json.Unmarshal(file, &config)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn config\n}\n<|endoftext|>"} {"text":"\/\/ This file is an example of an S3-pulling plugin. This is a real-world\n\/\/ plugin that can actually be used in a production environment (compared to\n\/\/ the more general but dangerous \"external-images\" plugin). This requires you\n\/\/ to put your AWS access key information into the environment per AWS's\n\/\/ standard credential management: AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.\n\/\/ You may also put access keys in $HOME\/.aws\/credentials (or\n\/\/ docker\/s3credentials if you're using the docker-compose example override\n\/\/ setup). See docker\/s3credentials.example for an example credentials file.\n\/\/\n\/\/ When a resource is requested, if its IIIF id begins with \"s3:\", we treat the\n\/\/ rest of the id as an s3 id to be pulled from the configured zone and bucket.\n\/\/ As zone and bucket are configured on the server end, attack vectors seen in\n\/\/ the external images plugin are effectively nullified.\n\/\/\n\/\/ We assume the asset is already a format RAIS can serve (preferably JP2), and\n\/\/ we cache it locally with the same extension it has in S3. The IDToPath\n\/\/ return is the cached path so that RAIS can use the cached file immediately\n\/\/ after download. The JP2 cache is configurable via `S3Cache` in the RAIS\n\/\/ toml file or by setting `RAIS_S3CACHE` in the environment, and defaults to\n\/\/ `\/var\/cache\/rais-s3`.\n\/\/\n\/\/ Expiration of cached files must be managed externally (to avoid\n\/\/ over-complicating this plugin). A simple approach could be a cron job that\n\/\/ wipes out all cached data if it hasn't been accessed in the past 24 hours:\n\/\/\n\/\/ find \/var\/cache\/rais-s3 -type f -atime +1 -exec rm {} \\;\n\/\/\n\/\/ Depending how fast the cache grows, how much disk space you have available,\n\/\/ and how much variety you have in S3, you may want to monitor the cache\n\/\/ closely and tweak this cron job example as needed, or come up with something\n\/\/ more sophisticated.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"hash\/fnv\"\n\t\"path\/filepath\"\n\t\"rais\/src\/iiif\"\n\t\"rais\/src\/plugins\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/spf13\/viper\"\n\t\"github.com\/uoregon-libraries\/gopkg\/fileutil\"\n\t\"github.com\/uoregon-libraries\/gopkg\/logger\"\n)\n\nvar downloading = make(map[string]bool)\nvar m sync.RWMutex\n\nvar l *logger.Logger\n\nvar s3cache, s3zone, s3bucket string\n\n\/\/ Disabled lets the plugin manager know not to add this plugin's functions to\n\/\/ the global list unless sanity checks in Initialize() pass\nvar Disabled = true\n\n\/\/ Initialize sets up package variables for the s3 pulls and verifies sanity of\n\/\/ some of the configuration\nfunc Initialize() {\n\tviper.SetDefault(\"S3Cache\", \"\/var\/local\/rais-s3\")\n\ts3cache = viper.GetString(\"S3Cache\")\n\ts3zone = viper.GetString(\"S3Zone\")\n\ts3bucket = viper.GetString(\"S3Bucket\")\n\n\tif s3zone == \"\" {\n\t\tl.Infof(\"S3 plugin will not be enabled: S3Zone must be set in rais.toml or RAIS_S3ZONE must be set in the environment\")\n\t\treturn\n\t}\n\n\tif s3bucket == \"\" {\n\t\tl.Infof(\"S3 plugin will not be enabled: S3Bucket must be set in rais.toml or RAIS_S3BUCKET must be set in the environment\")\n\t\treturn\n\t}\n\n\tl.Debugf(\"Setting S3 cache location to %q\", s3cache)\n\tl.Debugf(\"Setting S3 zone to %q\", s3zone)\n\tl.Debugf(\"Setting S3 bucket to %q\", s3bucket)\n\tDisabled = false\n\n\tif fileutil.IsDir(s3cache) {\n\t\treturn\n\t}\n\tif !fileutil.MustNotExist(s3cache) {\n\t\tl.Fatalf(\"S3 plugin failure: %q must not exist or else must be a directory\", s3cache)\n\t}\n}\n\n\/\/ SetLogger is called by the RAIS server's plugin manager to let plugins use\n\/\/ the central logger\nfunc SetLogger(raisLogger *logger.Logger) {\n\tl = raisLogger\n}\n\nfunc buckets(s3ID string) (string, string) {\n\tvar h = fnv.New32()\n\th.Write([]byte(s3ID))\n\tvar val = int(h.Sum32() \/ 10000)\n\treturn strconv.Itoa(val % 100), strconv.Itoa((val \/ 100) % 100)\n}\n\n\/\/ IDToPath implements the auto-download logic when a IIIF ID\n\/\/ starts with \"s3:\"\nfunc IDToPath(id iiif.ID) (path string, err error) {\n\tvar ids = string(id)\n\tif len(ids) < 4 {\n\t\treturn \"\", plugins.ErrSkipped\n\t}\n\n\tif ids[:3] != \"s3:\" {\n\t\treturn \"\", plugins.ErrSkipped\n\t}\n\n\t\/\/ Check cache - don't re-download\n\tvar s3ID = ids[3:]\n\tvar bucket1, bucket2 = buckets(s3ID)\n\tpath = filepath.Join(s3cache, bucket1, bucket2, s3ID)\n\n\t\/\/ See if this file is currently being downloaded; if so we need to wait\n\tvar timeout = time.Now().Add(time.Second * 10)\n\tfor isDownloading(s3ID) {\n\t\ttime.Sleep(time.Millisecond * 250)\n\t\tif time.Now().After(timeout) {\n\t\t\treturn \"\", errors.New(\"timed out waiting for s3 download\")\n\t\t}\n\t}\n\n\tl.Debugf(\"s3-images plugin: Checking for cached file at %q\", path)\n\tif fileutil.MustNotExist(path) {\n\t\terr = pullImage(s3ID, path)\n\t} else {\n\t\tl.Debugf(\"s3-images plugin: cached file found\")\n\t}\n\n\treturn path, err\n}\n\nfunc pullImage(s3ID, path string) error {\n\tsetIsDownloading(s3ID)\n\tdefer clearIsDownloading(s3ID)\n\treturn s3download(s3ID, path)\n}\n\nfunc isDownloading(s3ID string) bool {\n\tm.RLock()\n\tdefer m.RUnlock()\n\treturn downloading[s3ID]\n}\n\nfunc setIsDownloading(s3ID string) {\n\tm.Lock()\n\tdefer m.Unlock()\n\tdownloading[s3ID] = true\n}\n\nfunc clearIsDownloading(s3ID string) {\n\tm.Lock()\n\tdefer m.Unlock()\n\tdelete(downloading, s3ID)\n}\nplugins\/s3-images: log S3 downloads\/\/ This file is an example of an S3-pulling plugin. This is a real-world\n\/\/ plugin that can actually be used in a production environment (compared to\n\/\/ the more general but dangerous \"external-images\" plugin). This requires you\n\/\/ to put your AWS access key information into the environment per AWS's\n\/\/ standard credential management: AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.\n\/\/ You may also put access keys in $HOME\/.aws\/credentials (or\n\/\/ docker\/s3credentials if you're using the docker-compose example override\n\/\/ setup). See docker\/s3credentials.example for an example credentials file.\n\/\/\n\/\/ When a resource is requested, if its IIIF id begins with \"s3:\", we treat the\n\/\/ rest of the id as an s3 id to be pulled from the configured zone and bucket.\n\/\/ As zone and bucket are configured on the server end, attack vectors seen in\n\/\/ the external images plugin are effectively nullified.\n\/\/\n\/\/ We assume the asset is already a format RAIS can serve (preferably JP2), and\n\/\/ we cache it locally with the same extension it has in S3. The IDToPath\n\/\/ return is the cached path so that RAIS can use the cached file immediately\n\/\/ after download. The JP2 cache is configurable via `S3Cache` in the RAIS\n\/\/ toml file or by setting `RAIS_S3CACHE` in the environment, and defaults to\n\/\/ `\/var\/cache\/rais-s3`.\n\/\/\n\/\/ Expiration of cached files must be managed externally (to avoid\n\/\/ over-complicating this plugin). A simple approach could be a cron job that\n\/\/ wipes out all cached data if it hasn't been accessed in the past 24 hours:\n\/\/\n\/\/ find \/var\/cache\/rais-s3 -type f -atime +1 -exec rm {} \\;\n\/\/\n\/\/ Depending how fast the cache grows, how much disk space you have available,\n\/\/ and how much variety you have in S3, you may want to monitor the cache\n\/\/ closely and tweak this cron job example as needed, or come up with something\n\/\/ more sophisticated.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"hash\/fnv\"\n\t\"path\/filepath\"\n\t\"rais\/src\/iiif\"\n\t\"rais\/src\/plugins\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/spf13\/viper\"\n\t\"github.com\/uoregon-libraries\/gopkg\/fileutil\"\n\t\"github.com\/uoregon-libraries\/gopkg\/logger\"\n)\n\nvar downloading = make(map[string]bool)\nvar m sync.RWMutex\n\nvar l *logger.Logger\n\nvar s3cache, s3zone, s3bucket string\n\n\/\/ Disabled lets the plugin manager know not to add this plugin's functions to\n\/\/ the global list unless sanity checks in Initialize() pass\nvar Disabled = true\n\n\/\/ Initialize sets up package variables for the s3 pulls and verifies sanity of\n\/\/ some of the configuration\nfunc Initialize() {\n\tviper.SetDefault(\"S3Cache\", \"\/var\/local\/rais-s3\")\n\ts3cache = viper.GetString(\"S3Cache\")\n\ts3zone = viper.GetString(\"S3Zone\")\n\ts3bucket = viper.GetString(\"S3Bucket\")\n\n\tif s3zone == \"\" {\n\t\tl.Infof(\"S3 plugin will not be enabled: S3Zone must be set in rais.toml or RAIS_S3ZONE must be set in the environment\")\n\t\treturn\n\t}\n\n\tif s3bucket == \"\" {\n\t\tl.Infof(\"S3 plugin will not be enabled: S3Bucket must be set in rais.toml or RAIS_S3BUCKET must be set in the environment\")\n\t\treturn\n\t}\n\n\tl.Debugf(\"Setting S3 cache location to %q\", s3cache)\n\tl.Debugf(\"Setting S3 zone to %q\", s3zone)\n\tl.Debugf(\"Setting S3 bucket to %q\", s3bucket)\n\tDisabled = false\n\n\tif fileutil.IsDir(s3cache) {\n\t\treturn\n\t}\n\tif !fileutil.MustNotExist(s3cache) {\n\t\tl.Fatalf(\"S3 plugin failure: %q must not exist or else must be a directory\", s3cache)\n\t}\n}\n\n\/\/ SetLogger is called by the RAIS server's plugin manager to let plugins use\n\/\/ the central logger\nfunc SetLogger(raisLogger *logger.Logger) {\n\tl = raisLogger\n}\n\nfunc buckets(s3ID string) (string, string) {\n\tvar h = fnv.New32()\n\th.Write([]byte(s3ID))\n\tvar val = int(h.Sum32() \/ 10000)\n\treturn strconv.Itoa(val % 100), strconv.Itoa((val \/ 100) % 100)\n}\n\n\/\/ IDToPath implements the auto-download logic when a IIIF ID\n\/\/ starts with \"s3:\"\nfunc IDToPath(id iiif.ID) (path string, err error) {\n\tvar ids = string(id)\n\tif len(ids) < 4 {\n\t\treturn \"\", plugins.ErrSkipped\n\t}\n\n\tif ids[:3] != \"s3:\" {\n\t\treturn \"\", plugins.ErrSkipped\n\t}\n\n\t\/\/ Check cache - don't re-download\n\tvar s3ID = ids[3:]\n\tvar bucket1, bucket2 = buckets(s3ID)\n\tpath = filepath.Join(s3cache, bucket1, bucket2, s3ID)\n\n\t\/\/ See if this file is currently being downloaded; if so we need to wait\n\tvar timeout = time.Now().Add(time.Second * 10)\n\tfor isDownloading(s3ID) {\n\t\ttime.Sleep(time.Millisecond * 250)\n\t\tif time.Now().After(timeout) {\n\t\t\treturn \"\", errors.New(\"timed out waiting for s3 download\")\n\t\t}\n\t}\n\n\tif fileutil.MustNotExist(path) {\n\t\tl.Debugf(\"s3-images plugin: no cached file at %q; downloading from S3\", path)\n\t\terr = pullImage(s3ID, path)\n\t}\n\n\treturn path, err\n}\n\nfunc pullImage(s3ID, path string) error {\n\tsetIsDownloading(s3ID)\n\tdefer clearIsDownloading(s3ID)\n\treturn s3download(s3ID, path)\n}\n\nfunc isDownloading(s3ID string) bool {\n\tm.RLock()\n\tdefer m.RUnlock()\n\treturn downloading[s3ID]\n}\n\nfunc setIsDownloading(s3ID string) {\n\tm.Lock()\n\tdefer m.Unlock()\n\tdownloading[s3ID] = true\n}\n\nfunc clearIsDownloading(s3ID string) {\n\tm.Lock()\n\tdefer m.Unlock()\n\tdelete(downloading, s3ID)\n}\n<|endoftext|>"} {"text":"package flappy\n\nimport (\n\tneural \"github.com\/poseidon4o\/go-neural\/src\/neural\"\n\tutil \"github.com\/poseidon4o\/go-neural\/src\/util\"\n\t\"math\"\n\t\"sort\"\n)\n\ntype NeuronName int\n\nconst (\n\tdiffY NeuronName = iota\n\tdiffX NeuronName = iota\n\tvelY NeuronName = iota\n\tH1 NeuronName = iota\n\tH2 NeuronName = iota\n\tH3 NeuronName = iota\n\tH4 NeuronName = iota\n\tjump NeuronName = iota\n\tNRN_COUNT int = iota\n)\n\nfunc nrn(name NeuronName) int {\n\treturn int(name)\n}\n\ntype Flappy struct {\n\tbirds Flock\n\tlvl Level\n\tdrawCb func(pos, size *util.Vector, color uint32)\n\tdrawSize int\n}\n\nfunc (f *Flappy) Completed() float64 {\n\treturn f.birds[0].bestX \/ f.lvl.size.X\n}\n\nfunc (f *Flappy) Done() bool {\n\treturn f.birds[0].bestX > f.lvl.pylons[len(f.lvl.pylons)-1].X\n}\n\nfunc (f *Flappy) SetDrawRectCb(cb func(pos, size *util.Vector, color uint32)) {\n\tf.drawCb = cb\n}\n\nfunc (f *Flappy) LogicTick(dt float64) {\n\tf.lvl.Step(dt)\n\tf.checkFlock()\n\tf.mutateFlock()\n\tf.thnikFlock()\n}\n\nfunc (f *Flappy) DrawTick() {\n\tvar (\n\t\tred = uint32(0xffff0000)\n\t\tgreen = uint32(0xff00ff00)\n\t\tblue = uint32(0xff0000ff)\n\t)\n\n\tvar tl, size util.Vector\n\tsize.X = float64(f.drawSize)\n\tsize.Y = float64(f.drawSize)\n\n\tfor c := range f.birds {\n\t\tf.drawCb(&f.birds[c].bird.Pos, &size, red)\n\t}\n\n\thSize := float64(PylonHole) \/ 2.0\n\tfor _, pylon := range f.lvl.pylons {\n\t\t\/\/ top part\n\t\ttl.X = pylon.X\n\t\ttl.Y = 0\n\t\tsize.Y = pylon.Y - hSize\n\t\tf.drawCb(&tl, &size, green)\n\n\t\t\/\/ bottom part\n\t\ttl.Y = pylon.Y + hSize\n\t\tsize.Y = f.lvl.size.Y - (pylon.Y + hSize)\n\t\tf.drawCb(&tl, &size, green)\n\n\t\t\/\/ middle point\n\t\ttl.Y = pylon.Y\n\t\tsize.Y = float64(f.drawSize)\n\t\tf.drawCb(&tl, &size, blue)\n\t}\n}\n\nfunc NewFlappy(birdCount int, size *util.Vector) *Flappy {\n\tlevel := NewLevel(int(size.X), int(size.Y))\n\n\tnets := make([]*neural.Net, birdCount, birdCount)\n\tfor c := range nets {\n\t\tnets[c] = neural.NewNet(NRN_COUNT)\n\n\t\t\/\/ diffY- to hidden\n\t\t*nets[c].Synapse(nrn(diffY), nrn(H1)) = 0.0\n\t\t*nets[c].Synapse(nrn(diffY), nrn(H2)) = 0.0\n\t\t*nets[c].Synapse(nrn(diffY), nrn(H3)) = 0.0\n\t\t*nets[c].Synapse(nrn(diffY), nrn(H4)) = 0.0\n\n\t\t\/\/ diffX- to hidden\n\t\t*nets[c].Synapse(nrn(diffX), nrn(H1)) = 0.0\n\t\t*nets[c].Synapse(nrn(diffX), nrn(H2)) = 0.0\n\t\t*nets[c].Synapse(nrn(diffX), nrn(H3)) = 0.0\n\t\t*nets[c].Synapse(nrn(diffX), nrn(H4)) = 0.0\n\n\t\t\/\/ velY - to hidden\n\t\t*nets[c].Synapse(nrn(velY), nrn(H1)) = 0.0\n\t\t*nets[c].Synapse(nrn(velY), nrn(H2)) = 0.0\n\t\t*nets[c].Synapse(nrn(velY), nrn(H3)) = 0.0\n\t\t*nets[c].Synapse(nrn(velY), nrn(H4)) = 0.0\n\n\t\t\/\/ hidden to output\n\t\t*nets[c].Synapse(nrn(H1), nrn(jump)) = 0.0\n\t\t*nets[c].Synapse(nrn(H2), nrn(jump)) = 0.0\n\t\t*nets[c].Synapse(nrn(H3), nrn(jump)) = 0.0\n\t\t*nets[c].Synapse(nrn(H4), nrn(jump)) = 0.0\n\n\t\tnets[c].Randomize()\n\t}\n\n\tlevel.AddBirds(birdCount)\n\tflock := make(Flock, birdCount)\n\tfor c := 0; c < birdCount; c++ {\n\t\tflock[c].bird = level.birds[c]\n\t\tflock[c].brain = nets[c]\n\t\tflock[c].bestX = 0\n\t}\n\n\treturn &Flappy{\n\t\tbirds: flock,\n\t\tlvl: *level,\n\t\tdrawCb: func(pos, size *util.Vector, color uint32) {},\n\t\tdrawSize: 5,\n\t}\n}\n\ntype FBird struct {\n\tbird *Bird\n\tbrain *neural.Net\n\tbestX float64\n\tdead bool\n}\n\ntype Flock []FBird\n\nfunc (birds Flock) Len() int {\n\treturn len(birds)\n}\n\nfunc (birds Flock) Less(c, r int) bool {\n\treturn birds[c].bestX > birds[r].bestX\n}\n\nfunc (birds Flock) Swap(c, r int) {\n\tbirds[c], birds[r] = birds[r], birds[c]\n}\n\n\/\/ will check if going from pos to next will collide\nfunc (f *Flappy) checkFlock() {\n\n\t\/\/ just for readability\n\tcollide := func(aX, bX, cX float64) bool {\n\t\t\/\/ c.X == d.X\n\t\treturn aX-1 <= cX && bX+1 >= cX\n\t}\n\n\thSize := float64(PylonHole \/ 2)\n\n\tfor c := range f.birds {\n\t\tif f.birds[c].bird.Pos.Y >= f.lvl.size.Y || f.birds[c].bird.Pos.Y < 1 {\n\t\t\t\/\/ hit ceeling or floor\n\t\t\tf.birds[c].dead = true\n\t\t\tcontinue\n\t\t}\n\n\t\tpylon := f.lvl.ClosestPylon(&f.birds[c].bird.Pos)\n\t\tif f.birds[c].bird.Pos.Y >= pylon.Y-hSize && f.birds[c].bird.Pos.Y <= pylon.Y+hSize {\n\t\t\t\/\/ in the pylon hole\n\t\t\tcontinue\n\t\t}\n\n\t\tf.birds[c].dead = collide(f.birds[c].bird.Pos.X, f.birds[c].bird.NextPos.X, pylon.X)\n\t}\n\n}\n\nfunc (f *Flappy) thnikFlock() {\n\twg := make(chan struct{}, len(f.birds))\n\n\tthinkBird := func(c int) {\n\t\tnext := f.lvl.FirstPylonAfter(&f.birds[c].bird.Pos)\n\t\tdiffYval := next.Y - f.birds[c].bird.Pos.Y\n\t\tdiffXval := next.X - f.birds[c].bird.Pos.X\n\n\t\tf.birds[c].brain.Stimulate(nrn(diffY), diffYval)\n\t\tf.birds[c].brain.Stimulate(nrn(diffX), diffXval)\n\t\tf.birds[c].brain.Stimulate(nrn(velY), f.birds[c].bird.Vel.Y)\n\n\t\tf.birds[c].brain.Step()\n\t\tif f.birds[c].brain.ValueOf(nrn(jump)) > 0.75 {\n\t\t\tf.birds[c].bird.Vel.Y = -500\n\t\t}\n\n\t\tf.birds[c].brain.Clear()\n\t\twg <- struct{}{}\n\t}\n\n\tfor c := 0; c < len(f.birds); c++ {\n\t\tgo thinkBird(c)\n\t}\n\n\tfor c := 0; c < len(f.birds); c++ {\n\t\t<-wg\n\t}\n}\n\nfunc (f *Flappy) mutateFlock() {\n\tsort.Sort(f.birds)\n\n\trandNet := func() *neural.Net {\n\t\treturn f.birds[int(neural.RandMax(float64(len(f.birds))))].brain\n\t}\n\n\tbest := f.birds[0].brain\n\n\tfor c := range f.birds {\n\t\tif f.birds[c].dead {\n\t\t\tf.birds[c].dead = false\n\t\t\tf.birds[c].bird.Pos = *f.lvl.NewBirdPos()\n\t\t\tf.birds[c].bird.Vel = *util.NewVector(SCROLL_SPEED, 0)\n\n\t\t\tf.birds[c].brain = neural.Cross(best, randNet())\n\n\t\t\tif neural.Chance(0.1) {\n\t\t\t\t\/\/ penalize best achievement due to mutation\n\t\t\t\tf.birds[c].bestX *= 0.99\n\t\t\t\tf.birds[c].brain.Mutate(0.33)\n\t\t\t}\n\t\t} else {\n\t\t\tf.birds[c].bird.Pos = f.birds[c].bird.NextPos\n\t\t\tf.birds[c].bestX = math.Max(f.birds[c].bird.Pos.X, f.birds[c].bestX)\n\t\t}\n\t}\n\n}\nMove declarations in the beginingpackage flappy\n\nimport (\n\tneural \"github.com\/poseidon4o\/go-neural\/src\/neural\"\n\tutil \"github.com\/poseidon4o\/go-neural\/src\/util\"\n\t\"math\"\n\t\"sort\"\n)\n\ntype NeuronName int\n\nconst (\n\tdiffY NeuronName = iota\n\tdiffX NeuronName = iota\n\tvelY NeuronName = iota\n\tH1 NeuronName = iota\n\tH2 NeuronName = iota\n\tH3 NeuronName = iota\n\tH4 NeuronName = iota\n\tjump NeuronName = iota\n\tNRN_COUNT int = iota\n)\n\nfunc nrn(name NeuronName) int {\n\treturn int(name)\n}\n\ntype FBird struct {\n\tbird *Bird\n\tbrain *neural.Net\n\tbestX float64\n\tdead bool\n}\n\ntype Flock []FBird\n\nfunc (birds Flock) Len() int {\n\treturn len(birds)\n}\n\nfunc (birds Flock) Less(c, r int) bool {\n\treturn birds[c].bestX > birds[r].bestX\n}\n\nfunc (birds Flock) Swap(c, r int) {\n\tbirds[c], birds[r] = birds[r], birds[c]\n}\n\ntype Flappy struct {\n\tbirds Flock\n\tlvl Level\n\tdrawCb func(pos, size *util.Vector, color uint32)\n\tdrawSize int\n}\n\nfunc (f *Flappy) Completed() float64 {\n\treturn f.birds[0].bestX \/ f.lvl.size.X\n}\n\nfunc (f *Flappy) Done() bool {\n\treturn f.birds[0].bestX > f.lvl.pylons[len(f.lvl.pylons)-1].X\n}\n\nfunc (f *Flappy) SetDrawRectCb(cb func(pos, size *util.Vector, color uint32)) {\n\tf.drawCb = cb\n}\n\nfunc (f *Flappy) LogicTick(dt float64) {\n\tf.lvl.Step(dt)\n\tf.checkFlock()\n\tf.mutateFlock()\n\tf.thnikFlock()\n}\n\nfunc (f *Flappy) DrawTick() {\n\tvar (\n\t\tred = uint32(0xffff0000)\n\t\tgreen = uint32(0xff00ff00)\n\t\tblue = uint32(0xff0000ff)\n\t)\n\n\tvar tl, size util.Vector\n\tsize.X = float64(f.drawSize)\n\tsize.Y = float64(f.drawSize)\n\n\tfor c := range f.birds {\n\t\tf.drawCb(&f.birds[c].bird.Pos, &size, red)\n\t}\n\n\thSize := float64(PylonHole) \/ 2.0\n\tfor _, pylon := range f.lvl.pylons {\n\t\t\/\/ top part\n\t\ttl.X = pylon.X\n\t\ttl.Y = 0\n\t\tsize.Y = pylon.Y - hSize\n\t\tf.drawCb(&tl, &size, green)\n\n\t\t\/\/ bottom part\n\t\ttl.Y = pylon.Y + hSize\n\t\tsize.Y = f.lvl.size.Y - (pylon.Y + hSize)\n\t\tf.drawCb(&tl, &size, green)\n\n\t\t\/\/ middle point\n\t\ttl.Y = pylon.Y\n\t\tsize.Y = float64(f.drawSize)\n\t\tf.drawCb(&tl, &size, blue)\n\t}\n}\n\nfunc NewFlappy(birdCount int, size *util.Vector) *Flappy {\n\tlevel := NewLevel(int(size.X), int(size.Y))\n\n\tnets := make([]*neural.Net, birdCount, birdCount)\n\tfor c := range nets {\n\t\tnets[c] = neural.NewNet(NRN_COUNT)\n\n\t\t\/\/ diffY- to hidden\n\t\t*nets[c].Synapse(nrn(diffY), nrn(H1)) = 0.0\n\t\t*nets[c].Synapse(nrn(diffY), nrn(H2)) = 0.0\n\t\t*nets[c].Synapse(nrn(diffY), nrn(H3)) = 0.0\n\t\t*nets[c].Synapse(nrn(diffY), nrn(H4)) = 0.0\n\n\t\t\/\/ diffX- to hidden\n\t\t*nets[c].Synapse(nrn(diffX), nrn(H1)) = 0.0\n\t\t*nets[c].Synapse(nrn(diffX), nrn(H2)) = 0.0\n\t\t*nets[c].Synapse(nrn(diffX), nrn(H3)) = 0.0\n\t\t*nets[c].Synapse(nrn(diffX), nrn(H4)) = 0.0\n\n\t\t\/\/ velY - to hidden\n\t\t*nets[c].Synapse(nrn(velY), nrn(H1)) = 0.0\n\t\t*nets[c].Synapse(nrn(velY), nrn(H2)) = 0.0\n\t\t*nets[c].Synapse(nrn(velY), nrn(H3)) = 0.0\n\t\t*nets[c].Synapse(nrn(velY), nrn(H4)) = 0.0\n\n\t\t\/\/ hidden to output\n\t\t*nets[c].Synapse(nrn(H1), nrn(jump)) = 0.0\n\t\t*nets[c].Synapse(nrn(H2), nrn(jump)) = 0.0\n\t\t*nets[c].Synapse(nrn(H3), nrn(jump)) = 0.0\n\t\t*nets[c].Synapse(nrn(H4), nrn(jump)) = 0.0\n\n\t\tnets[c].Randomize()\n\t}\n\n\tlevel.AddBirds(birdCount)\n\tflock := make(Flock, birdCount)\n\tfor c := 0; c < birdCount; c++ {\n\t\tflock[c].bird = level.birds[c]\n\t\tflock[c].brain = nets[c]\n\t\tflock[c].bestX = 0\n\t}\n\n\treturn &Flappy{\n\t\tbirds: flock,\n\t\tlvl: *level,\n\t\tdrawCb: func(pos, size *util.Vector, color uint32) {},\n\t\tdrawSize: 5,\n\t}\n}\n\n\/\/ will check if going from pos to next will collide\nfunc (f *Flappy) checkFlock() {\n\n\t\/\/ just for readability\n\tcollide := func(aX, bX, cX float64) bool {\n\t\t\/\/ c.X == d.X\n\t\treturn aX-1 <= cX && bX+1 >= cX\n\t}\n\n\thSize := float64(PylonHole \/ 2)\n\n\tfor c := range f.birds {\n\t\tif f.birds[c].bird.Pos.Y >= f.lvl.size.Y || f.birds[c].bird.Pos.Y < 1 {\n\t\t\t\/\/ hit ceeling or floor\n\t\t\tf.birds[c].dead = true\n\t\t\tcontinue\n\t\t}\n\n\t\tpylon := f.lvl.ClosestPylon(&f.birds[c].bird.Pos)\n\t\tif f.birds[c].bird.Pos.Y >= pylon.Y-hSize && f.birds[c].bird.Pos.Y <= pylon.Y+hSize {\n\t\t\t\/\/ in the pylon hole\n\t\t\tcontinue\n\t\t}\n\n\t\tf.birds[c].dead = collide(f.birds[c].bird.Pos.X, f.birds[c].bird.NextPos.X, pylon.X)\n\t}\n\n}\n\nfunc (f *Flappy) thnikFlock() {\n\twg := make(chan struct{}, len(f.birds))\n\n\tthinkBird := func(c int) {\n\t\tnext := f.lvl.FirstPylonAfter(&f.birds[c].bird.Pos)\n\t\tdiffYval := next.Y - f.birds[c].bird.Pos.Y\n\t\tdiffXval := next.X - f.birds[c].bird.Pos.X\n\n\t\tf.birds[c].brain.Stimulate(nrn(diffY), diffYval)\n\t\tf.birds[c].brain.Stimulate(nrn(diffX), diffXval)\n\t\tf.birds[c].brain.Stimulate(nrn(velY), f.birds[c].bird.Vel.Y)\n\n\t\tf.birds[c].brain.Step()\n\t\tif f.birds[c].brain.ValueOf(nrn(jump)) > 0.75 {\n\t\t\tf.birds[c].bird.Vel.Y = -500\n\t\t}\n\n\t\tf.birds[c].brain.Clear()\n\t\twg <- struct{}{}\n\t}\n\n\tfor c := 0; c < len(f.birds); c++ {\n\t\tgo thinkBird(c)\n\t}\n\n\tfor c := 0; c < len(f.birds); c++ {\n\t\t<-wg\n\t}\n}\n\nfunc (f *Flappy) mutateFlock() {\n\tsort.Sort(f.birds)\n\n\trandNet := func() *neural.Net {\n\t\treturn f.birds[int(neural.RandMax(float64(len(f.birds))))].brain\n\t}\n\n\tbest := f.birds[0].brain\n\n\tfor c := range f.birds {\n\t\tif f.birds[c].dead {\n\t\t\tf.birds[c].dead = false\n\t\t\tf.birds[c].bird.Pos = *f.lvl.NewBirdPos()\n\t\t\tf.birds[c].bird.Vel = *util.NewVector(SCROLL_SPEED, 0)\n\n\t\t\tf.birds[c].brain = neural.Cross(best, randNet())\n\n\t\t\tif neural.Chance(0.1) {\n\t\t\t\t\/\/ penalize best achievement due to mutation\n\t\t\t\tf.birds[c].bestX *= 0.99\n\t\t\t\tf.birds[c].brain.Mutate(0.33)\n\t\t\t}\n\t\t} else {\n\t\t\tf.birds[c].bird.Pos = f.birds[c].bird.NextPos\n\t\t\tf.birds[c].bestX = math.Max(f.birds[c].bird.Pos.X, f.birds[c].bestX)\n\t\t}\n\t}\n\n}\n<|endoftext|>"} {"text":"package stamp\n\nimport (\n\t\"net\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\n\t\"github.com\/dedis\/prifi\/coco\"\n\t\"github.com\/dedis\/prifi\/coco\/coconet\"\n\t\"github.com\/dedis\/prifi\/coco\/hashid\"\n\t\"github.com\/dedis\/prifi\/coco\/proof\"\n\t\"github.com\/dedis\/prifi\/coco\/sign\"\n\t\"github.com\/dedis\/prifi\/coco\/test\/logutils\"\n)\n\ntype Server struct {\n\tcoco.Signer\n\tname string\n\tClients map[string]coconet.Conn\n\n\t\/\/ for aggregating messages from clients\n\tmux sync.Mutex\n\tQueue [][]MustReplyMessage\n\tREADING int\n\tPROCESSING int\n\n\t\/\/ Leaves, Root and Proof for a round\n\tLeaves []hashid.HashId \/\/ can be removed after we verify protocol\n\tRoot hashid.HashId\n\tProofs []proof.Proof\n\n\trLock sync.Mutex\n\tmaxRounds int\n\tcloseChan chan bool\n\n\tLogger string\n\tHostname string\n\tApp string\n}\n\nfunc NewServer(signer coco.Signer) *Server {\n\ts := &Server{}\n\n\ts.Clients = make(map[string]coconet.Conn)\n\ts.Queue = make([][]MustReplyMessage, 2)\n\ts.READING = 0\n\ts.PROCESSING = 1\n\n\ts.Signer = signer\n\ts.Signer.RegisterAnnounceFunc(s.OnAnnounce())\n\ts.Signer.RegisterDoneFunc(s.OnDone())\n\ts.rLock = sync.Mutex{}\n\n\t\/\/ listen for client requests at one port higher\n\t\/\/ than the signing node\n\th, p, err := net.SplitHostPort(s.Signer.Name())\n\tif err == nil {\n\t\ti, err := strconv.Atoi(p)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\ts.name = net.JoinHostPort(h, strconv.Itoa(i+1))\n\t}\n\ts.Queue[s.READING] = make([]MustReplyMessage, 0)\n\ts.Queue[s.PROCESSING] = make([]MustReplyMessage, 0)\n\ts.closeChan = make(chan bool, 5)\n\treturn s\n}\n\nvar clientNumber int = 0\n\nfunc (s *Server) Close() {\n\tlog.Printf(\"closing stampserver: %p\", s)\n\ts.closeChan <- true\n\ts.Signer.Close()\n}\n\n\/\/ listen for clients connections\n\/\/ this server needs to be running on a different port\n\/\/ than the Signer that is beneath it\nfunc (s *Server) Listen() error {\n\t\/\/ log.Println(\"Listening @ \", s.name)\n\tln, err := net.Listen(\"tcp4\", s.name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tlog.Printf(\"LISTENING TO CLIENTS: %p\", s)\n\t\t\tconn, err := ln.Accept()\n\t\t\tif err != nil {\n\t\t\t\t\/\/ handle error\n\t\t\t\tlog.Errorln(\"failed to accept connection\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tc := coconet.NewTCPConnFromNet(conn)\n\t\t\t\/\/ log.Println(\"CLIENT TCP CONNECTION SUCCESSFULLY ESTABLISHED:\", c)\n\n\t\t\tif _, ok := s.Clients[c.Name()]; !ok {\n\t\t\t\ts.Clients[c.Name()] = c\n\n\t\t\t\tgo func(c coconet.Conn) {\n\t\t\t\t\tfor {\n\t\t\t\t\t\ttsm := TimeStampMessage{}\n\t\t\t\t\t\terr := c.Get(&tsm)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Errorf(\"%p Failed to get from child:\", s, err)\n\t\t\t\t\t\t\ts.Close()\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t\tswitch tsm.Type {\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tlog.Errorf(\"Message of unknown type: %v\\n\", tsm.Type)\n\t\t\t\t\t\tcase StampRequestType:\n\t\t\t\t\t\t\t\/\/ log.Println(\"RECEIVED STAMP REQUEST\")\n\t\t\t\t\t\t\ts.mux.Lock()\n\t\t\t\t\t\t\tREADING := s.READING\n\t\t\t\t\t\t\ts.Queue[READING] = append(s.Queue[READING],\n\t\t\t\t\t\t\t\tMustReplyMessage{Tsm: tsm, To: c.Name()})\n\t\t\t\t\t\t\ts.mux.Unlock()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}(c)\n\t\t\t}\n\n\t\t}\n\t}()\n\n\treturn nil\n}\n\n\/\/ Used for goconns\n\/\/ should only be used if clients are created in batch\nfunc (s *Server) ListenToClients() {\n\tlog.Printf(\"LISTENING TO CLIENTS: %p\", s, s.Clients)\n\tfor _, c := range s.Clients {\n\t\tgo func(c coconet.Conn) {\n\t\t\tfor {\n\t\t\t\ttsm := TimeStampMessage{}\n\t\t\t\terr := c.Get(&tsm)\n\t\t\t\tif err == coconet.ErrClosed {\n\t\t\t\t\tlog.Errorf(\"%p Failed to get from client:\", s, err)\n\t\t\t\t\ts.Close()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\"file\": logutils.File(),\n\t\t\t\t\t}).Errorf(\"%p failed To get message:\", s, err)\n\t\t\t\t}\n\t\t\t\tswitch tsm.Type {\n\t\t\t\tdefault:\n\t\t\t\t\tlog.Errorln(\"Message of unknown type\")\n\t\t\t\tcase StampRequestType:\n\t\t\t\t\t\/\/ log.Println(\"STAMP REQUEST\")\n\t\t\t\t\ts.mux.Lock()\n\t\t\t\t\tREADING := s.READING\n\t\t\t\t\ts.Queue[READING] = append(s.Queue[READING],\n\t\t\t\t\t\tMustReplyMessage{Tsm: tsm, To: c.Name()})\n\t\t\t\t\ts.mux.Unlock()\n\t\t\t\t}\n\t\t\t}\n\t\t}(c)\n\t}\n}\n\nfunc (s *Server) ConnectToLogger() {\n\treturn\n\tif s.Logger == \"\" || s.Hostname == \"\" || s.App == \"\" {\n\t\tlog.Println(\"skipping connect to logger\")\n\t\treturn\n\t}\n\tlog.Println(\"Connecting to Logger\")\n\tlh, _ := logutils.NewLoggerHook(s.Logger, s.Hostname, s.App)\n\tlog.Println(\"Connected to Logger\")\n\tlog.AddHook(lh)\n}\n\nfunc (s *Server) LogReRun(nextRole string, curRole string) {\n\tif nextRole == \"root\" {\n\t\tvar messg = s.Name() + \" became root\"\n\t\tif curRole == \"root\" {\n\t\t\tmessg = s.Name() + \" remained root\"\n\t\t}\n\n\t\tgo s.ConnectToLogger()\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"file\": logutils.File(),\n\t\t\t\"type\": \"role_change\",\n\t\t}).Infoln(messg)\n\t\tlog.Printf(\"role change: %p\", s)\n\n\t} else {\n\t\tvar messg = s.Name() + \" remained regular\"\n\t\tif curRole == \"root\" {\n\t\t\tmessg = s.Name() + \" became regular\"\n\t\t}\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"file\": logutils.File(),\n\t\t\t\"type\": \"role_change\",\n\t\t}).Infoln(messg)\n\t\tlog.Printf(\"role change: %p\", s)\n\n\t}\n\n}\n\nfunc (s *Server) runAsRoot(nRounds int) string {\n\t\/\/ every 5 seconds start a new round\n\tticker := time.Tick(ROUND_TIME)\n\tlog.Infoln(s.Name(), \"running as root\", s.LastRound(), int64(nRounds))\n\n\tfor {\n\t\tselect {\n\t\tcase nextRole := <-s.ViewChangeCh():\n\t\t\treturn nextRole\n\t\t\t\/\/ s.reRunWith(nextRole, nRounds, true)\n\t\tcase <-ticker:\n\n\t\t\tstart := time.Now()\n\t\t\tlog.Println(s.Name(), \"is STAMP SERVER STARTING SIGNING ROUND FOR:\", s.LastRound()+1, \"of\", nRounds)\n\n\t\t\terr := s.StartSigningRound()\n\t\t\tif err == sign.ChangingViewError {\n\t\t\t\t\/\/ report change in view, and continue with the select\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"file\": logutils.File(),\n\t\t\t\t\t\"type\": \"view_change\",\n\t\t\t\t}).Info(\"Tried to stary signing round on \" + s.Name() + \" but it reports view change in progress\")\n\t\t\t\t\/\/ skip # of failed round\n\t\t\t\t\/\/ s.SetLastSeenRound(s.LastRound() + 1)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t\treturn \"\"\n\t\t\t}\n\n\t\t\telapsed := time.Since(start)\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"file\": logutils.File(),\n\t\t\t\t\"type\": \"root_round\",\n\t\t\t\t\"round\": s.LastRound(),\n\t\t\t\t\"time\": elapsed,\n\t\t\t}).Info(\"root round\")\n\n\t\t\tif s.LastRound() >= int64(nRounds) {\n\t\t\t\tlog.Errorln(s.Name(), \"reports exceeded the max round: terminating\", s.LastRound(), \">=\", nRounds)\n\t\t\t\treturn \"close\"\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *Server) runAsRegular() string {\n\tselect {\n\tcase <-s.closeChan:\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"file\": logutils.File(),\n\t\t\t\"type\": \"close\",\n\t\t}).Infoln(\"server\" + s.Name() + \"has closed\")\n\t\treturn \"\"\n\n\tcase nextRole := <-s.ViewChangeCh():\n\t\treturn nextRole\n\t}\n}\n\n\/\/ Listen on client connections. If role is root also send annoucement\n\/\/ for all of the nRounds\nfunc (s *Server) Run(role string, nRounds int) {\n\t\/\/ defer func() {\n\t\/\/ \tlog.Infoln(s.Name(), \"CLOSE AFTER RUN\")\n\t\/\/ \ts.Close()\n\t\/\/ }()\n\tgo func() { err := s.Signer.Listen(); s.Close(); log.Error(err) }()\n\n\ts.rLock.Lock()\n\ts.maxRounds = nRounds\n\ts.rLock.Unlock()\n\n\tvar nextRole string \/\/ next role when view changes\n\tfor {\n\t\tswitch role {\n\n\t\tcase \"root\":\n\t\t\tnextRole = s.runAsRoot(nRounds)\n\t\tcase \"regular\":\n\t\t\tnextRole = s.runAsRegular()\n\t\tcase \"test\":\n\t\t\tticker := time.Tick(2000 * time.Millisecond)\n\t\t\tfor _ = range ticker {\n\t\t\t\ts.AggregateCommits(0)\n\t\t\t}\n\t\t}\n\n\t\tlog.Println(s.Name(), \"nextRole: \", nextRole)\n\t\tif nextRole == \"close\" {\n\t\t\ts.Close()\n\t\t\treturn\n\t\t}\n\t\tif nextRole == \"\" {\n\t\t\treturn\n\t\t}\n\t\ts.LogReRun(nextRole, role)\n\t\trole = nextRole\n\t}\n\n}\n\nfunc (s *Server) OnAnnounce() coco.CommitFunc {\n\treturn func(view int) []byte {\n\t\t\/\/log.Println(\"Aggregating Commits\")\n\t\treturn s.AggregateCommits(view)\n\t}\n}\n\nfunc (s *Server) OnDone() coco.DoneFunc {\n\treturn func(view int, SNRoot hashid.HashId, LogHash hashid.HashId, p proof.Proof) {\n\t\ts.mux.Lock()\n\t\tfor i, msg := range s.Queue[s.PROCESSING] {\n\t\t\t\/\/ proof to get from s.Root to big root\n\t\t\tcombProof := make(proof.Proof, len(p))\n\t\t\tcopy(combProof, p)\n\n\t\t\t\/\/ add my proof to get from a leaf message to my root s.Root\n\t\t\tcombProof = append(combProof, s.Proofs[i]...)\n\n\t\t\t\/\/ proof that i can get from a leaf message to the big root\n\t\t\tif coco.DEBUG == true {\n\t\t\t\tproof.CheckProof(s.Signer.(*sign.Node).Suite().Hash, SNRoot, s.Leaves[i], combProof)\n\t\t\t}\n\n\t\t\trespMessg := TimeStampMessage{\n\t\t\t\tType: StampReplyType,\n\t\t\t\tReqNo: msg.Tsm.ReqNo,\n\t\t\t\tSrep: &StampReply{Sig: SNRoot, Prf: combProof}}\n\n\t\t\ts.PutToClient(msg.To, respMessg)\n\t\t}\n\t\ts.mux.Unlock()\n\t}\n\n}\n\nfunc (s *Server) AggregateCommits(view int) []byte {\n\t\/\/log.Println(s.Name(), \"calling AggregateCommits\")\n\ts.mux.Lock()\n\t\/\/ get data from s once to avoid refetching from structure\n\tQueue := s.Queue\n\tREADING := s.READING\n\tPROCESSING := s.PROCESSING\n\t\/\/ messages read will now be processed\n\tREADING, PROCESSING = PROCESSING, READING\n\ts.READING, s.PROCESSING = s.PROCESSING, s.READING\n\ts.Queue[READING] = s.Queue[READING][:0]\n\n\t\/\/ give up if nothing to process\n\tif len(Queue[PROCESSING]) == 0 {\n\t\ts.mux.Unlock()\n\t\ts.Root = make([]byte, hashid.Size)\n\t\ts.Proofs = make([]proof.Proof, 1)\n\t\treturn s.Root\n\t}\n\n\t\/\/ pull out to be Merkle Tree leaves\n\ts.Leaves = make([]hashid.HashId, 0)\n\tfor _, msg := range Queue[PROCESSING] {\n\t\ts.Leaves = append(s.Leaves, hashid.HashId(msg.Tsm.Sreq.Val))\n\t}\n\ts.mux.Unlock()\n\n\t\/\/ non root servers keep track of rounds here\n\tif !s.IsRoot(view) {\n\t\ts.rLock.Lock()\n\t\tlsr := s.LastRound()\n\t\tmr := s.maxRounds\n\t\ts.rLock.Unlock()\n\t\t\/\/ if this is our last round then close the connections\n\t\tif lsr >= int64(mr) && mr >= 0 {\n\t\t\ts.closeChan <- true\n\t\t}\n\t}\n\n\t\/\/ create Merkle tree for this round's messages and check corectness\n\ts.Root, s.Proofs = proof.ProofTree(s.Suite().Hash, s.Leaves)\n\tif coco.DEBUG == true {\n\t\tif proof.CheckLocalProofs(s.Suite().Hash, s.Root, s.Leaves, s.Proofs) == true {\n\t\t\tlog.Println(\"Local Proofs of\", s.Name(), \"successful for round \"+strconv.Itoa(int(s.LastRound())))\n\t\t} else {\n\t\t\tpanic(\"Local Proofs\" + s.Name() + \" unsuccessful for round \" + strconv.Itoa(int(s.LastRound())))\n\t\t}\n\t}\n\n\treturn s.Root\n}\n\n\/\/ Send message to client given by name\nfunc (s *Server) PutToClient(name string, data coconet.BinaryMarshaler) {\n\terr := s.Clients[name].Put(data)\n\tif err == coconet.ErrClosed {\n\t\ts.Close()\n\t\treturn\n\t}\n\tif err != nil && err != coconet.ErrNotEstablished {\n\t\tlog.Warnf(\"%p error putting to client: %v\", s, err)\n\t}\n}\ndon't exit if stamp timed outpackage stamp\n\nimport (\n\t\"net\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\n\t\"github.com\/dedis\/prifi\/coco\"\n\t\"github.com\/dedis\/prifi\/coco\/coconet\"\n\t\"github.com\/dedis\/prifi\/coco\/hashid\"\n\t\"github.com\/dedis\/prifi\/coco\/proof\"\n\t\"github.com\/dedis\/prifi\/coco\/sign\"\n\t\"github.com\/dedis\/prifi\/coco\/test\/logutils\"\n)\n\ntype Server struct {\n\tcoco.Signer\n\tname string\n\tClients map[string]coconet.Conn\n\n\t\/\/ for aggregating messages from clients\n\tmux sync.Mutex\n\tQueue [][]MustReplyMessage\n\tREADING int\n\tPROCESSING int\n\n\t\/\/ Leaves, Root and Proof for a round\n\tLeaves []hashid.HashId \/\/ can be removed after we verify protocol\n\tRoot hashid.HashId\n\tProofs []proof.Proof\n\n\trLock sync.Mutex\n\tmaxRounds int\n\tcloseChan chan bool\n\n\tLogger string\n\tHostname string\n\tApp string\n}\n\nfunc NewServer(signer coco.Signer) *Server {\n\ts := &Server{}\n\n\ts.Clients = make(map[string]coconet.Conn)\n\ts.Queue = make([][]MustReplyMessage, 2)\n\ts.READING = 0\n\ts.PROCESSING = 1\n\n\ts.Signer = signer\n\ts.Signer.RegisterAnnounceFunc(s.OnAnnounce())\n\ts.Signer.RegisterDoneFunc(s.OnDone())\n\ts.rLock = sync.Mutex{}\n\n\t\/\/ listen for client requests at one port higher\n\t\/\/ than the signing node\n\th, p, err := net.SplitHostPort(s.Signer.Name())\n\tif err == nil {\n\t\ti, err := strconv.Atoi(p)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\ts.name = net.JoinHostPort(h, strconv.Itoa(i+1))\n\t}\n\ts.Queue[s.READING] = make([]MustReplyMessage, 0)\n\ts.Queue[s.PROCESSING] = make([]MustReplyMessage, 0)\n\ts.closeChan = make(chan bool, 5)\n\treturn s\n}\n\nvar clientNumber int = 0\n\nfunc (s *Server) Close() {\n\tlog.Printf(\"closing stampserver: %p\", s)\n\ts.closeChan <- true\n\ts.Signer.Close()\n}\n\n\/\/ listen for clients connections\n\/\/ this server needs to be running on a different port\n\/\/ than the Signer that is beneath it\nfunc (s *Server) Listen() error {\n\t\/\/ log.Println(\"Listening @ \", s.name)\n\tln, err := net.Listen(\"tcp4\", s.name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tlog.Printf(\"LISTENING TO CLIENTS: %p\", s)\n\t\t\tconn, err := ln.Accept()\n\t\t\tif err != nil {\n\t\t\t\t\/\/ handle error\n\t\t\t\tlog.Errorln(\"failed to accept connection\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tc := coconet.NewTCPConnFromNet(conn)\n\t\t\t\/\/ log.Println(\"CLIENT TCP CONNECTION SUCCESSFULLY ESTABLISHED:\", c)\n\n\t\t\tif _, ok := s.Clients[c.Name()]; !ok {\n\t\t\t\ts.Clients[c.Name()] = c\n\n\t\t\t\tgo func(c coconet.Conn) {\n\t\t\t\t\tfor {\n\t\t\t\t\t\ttsm := TimeStampMessage{}\n\t\t\t\t\t\terr := c.Get(&tsm)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Errorf(\"%p Failed to get from child:\", s, err)\n\t\t\t\t\t\t\ts.Close()\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t\tswitch tsm.Type {\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tlog.Errorf(\"Message of unknown type: %v\\n\", tsm.Type)\n\t\t\t\t\t\tcase StampRequestType:\n\t\t\t\t\t\t\t\/\/ log.Println(\"RECEIVED STAMP REQUEST\")\n\t\t\t\t\t\t\ts.mux.Lock()\n\t\t\t\t\t\t\tREADING := s.READING\n\t\t\t\t\t\t\ts.Queue[READING] = append(s.Queue[READING],\n\t\t\t\t\t\t\t\tMustReplyMessage{Tsm: tsm, To: c.Name()})\n\t\t\t\t\t\t\ts.mux.Unlock()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}(c)\n\t\t\t}\n\n\t\t}\n\t}()\n\n\treturn nil\n}\n\n\/\/ Used for goconns\n\/\/ should only be used if clients are created in batch\nfunc (s *Server) ListenToClients() {\n\tlog.Printf(\"LISTENING TO CLIENTS: %p\", s, s.Clients)\n\tfor _, c := range s.Clients {\n\t\tgo func(c coconet.Conn) {\n\t\t\tfor {\n\t\t\t\ttsm := TimeStampMessage{}\n\t\t\t\terr := c.Get(&tsm)\n\t\t\t\tif err == coconet.ErrClosed {\n\t\t\t\t\tlog.Errorf(\"%p Failed to get from client:\", s, err)\n\t\t\t\t\ts.Close()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\"file\": logutils.File(),\n\t\t\t\t\t}).Errorf(\"%p failed To get message:\", s, err)\n\t\t\t\t}\n\t\t\t\tswitch tsm.Type {\n\t\t\t\tdefault:\n\t\t\t\t\tlog.Errorln(\"Message of unknown type\")\n\t\t\t\tcase StampRequestType:\n\t\t\t\t\t\/\/ log.Println(\"STAMP REQUEST\")\n\t\t\t\t\ts.mux.Lock()\n\t\t\t\t\tREADING := s.READING\n\t\t\t\t\ts.Queue[READING] = append(s.Queue[READING],\n\t\t\t\t\t\tMustReplyMessage{Tsm: tsm, To: c.Name()})\n\t\t\t\t\ts.mux.Unlock()\n\t\t\t\t}\n\t\t\t}\n\t\t}(c)\n\t}\n}\n\nfunc (s *Server) ConnectToLogger() {\n\treturn\n\tif s.Logger == \"\" || s.Hostname == \"\" || s.App == \"\" {\n\t\tlog.Println(\"skipping connect to logger\")\n\t\treturn\n\t}\n\tlog.Println(\"Connecting to Logger\")\n\tlh, _ := logutils.NewLoggerHook(s.Logger, s.Hostname, s.App)\n\tlog.Println(\"Connected to Logger\")\n\tlog.AddHook(lh)\n}\n\nfunc (s *Server) LogReRun(nextRole string, curRole string) {\n\tif nextRole == \"root\" {\n\t\tvar messg = s.Name() + \" became root\"\n\t\tif curRole == \"root\" {\n\t\t\tmessg = s.Name() + \" remained root\"\n\t\t}\n\n\t\tgo s.ConnectToLogger()\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"file\": logutils.File(),\n\t\t\t\"type\": \"role_change\",\n\t\t}).Infoln(messg)\n\t\tlog.Printf(\"role change: %p\", s)\n\n\t} else {\n\t\tvar messg = s.Name() + \" remained regular\"\n\t\tif curRole == \"root\" {\n\t\t\tmessg = s.Name() + \" became regular\"\n\t\t}\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"file\": logutils.File(),\n\t\t\t\"type\": \"role_change\",\n\t\t}).Infoln(messg)\n\t\tlog.Printf(\"role change: %p\", s)\n\n\t}\n\n}\n\nfunc (s *Server) runAsRoot(nRounds int) string {\n\t\/\/ every 5 seconds start a new round\n\tticker := time.Tick(ROUND_TIME)\n\tlog.Infoln(s.Name(), \"running as root\", s.LastRound(), int64(nRounds))\n\n\tfor {\n\t\tselect {\n\t\tcase nextRole := <-s.ViewChangeCh():\n\t\t\treturn nextRole\n\t\t\t\/\/ s.reRunWith(nextRole, nRounds, true)\n\t\tcase <-ticker:\n\n\t\t\tstart := time.Now()\n\t\t\tlog.Println(s.Name(), \"is STAMP SERVER STARTING SIGNING ROUND FOR:\", s.LastRound()+1, \"of\", nRounds)\n\n\t\t\terr := s.StartSigningRound()\n\t\t\tif err == sign.ChangingViewError {\n\t\t\t\t\/\/ report change in view, and continue with the select\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"file\": logutils.File(),\n\t\t\t\t\t\"type\": \"view_change\",\n\t\t\t\t}).Info(\"Tried to stary signing round on \" + s.Name() + \" but it reports view change in progress\")\n\t\t\t\t\/\/ skip # of failed round\n\t\t\t\t\/\/ s.SetLastSeenRound(s.LastRound() + 1)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\telapsed := time.Since(start)\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"file\": logutils.File(),\n\t\t\t\t\"type\": \"root_round\",\n\t\t\t\t\"round\": s.LastRound(),\n\t\t\t\t\"time\": elapsed,\n\t\t\t}).Info(\"root round\")\n\n\t\t\tif s.LastRound() >= int64(nRounds) {\n\t\t\t\tlog.Errorln(s.Name(), \"reports exceeded the max round: terminating\", s.LastRound(), \">=\", nRounds)\n\t\t\t\treturn \"close\"\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *Server) runAsRegular() string {\n\tselect {\n\tcase <-s.closeChan:\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"file\": logutils.File(),\n\t\t\t\"type\": \"close\",\n\t\t}).Infoln(\"server\" + s.Name() + \"has closed\")\n\t\treturn \"\"\n\n\tcase nextRole := <-s.ViewChangeCh():\n\t\treturn nextRole\n\t}\n}\n\n\/\/ Listen on client connections. If role is root also send annoucement\n\/\/ for all of the nRounds\nfunc (s *Server) Run(role string, nRounds int) {\n\t\/\/ defer func() {\n\t\/\/ \tlog.Infoln(s.Name(), \"CLOSE AFTER RUN\")\n\t\/\/ \ts.Close()\n\t\/\/ }()\n\tgo func() { err := s.Signer.Listen(); s.Close(); log.Error(err) }()\n\n\ts.rLock.Lock()\n\ts.maxRounds = nRounds\n\ts.rLock.Unlock()\n\n\tvar nextRole string \/\/ next role when view changes\n\tfor {\n\t\tswitch role {\n\n\t\tcase \"root\":\n\t\t\tnextRole = s.runAsRoot(nRounds)\n\t\tcase \"regular\":\n\t\t\tnextRole = s.runAsRegular()\n\t\tcase \"test\":\n\t\t\tticker := time.Tick(2000 * time.Millisecond)\n\t\t\tfor _ = range ticker {\n\t\t\t\ts.AggregateCommits(0)\n\t\t\t}\n\t\t}\n\n\t\tlog.Println(s.Name(), \"nextRole: \", nextRole)\n\t\tif nextRole == \"close\" {\n\t\t\ts.Close()\n\t\t\treturn\n\t\t}\n\t\tif nextRole == \"\" {\n\t\t\treturn\n\t\t}\n\t\ts.LogReRun(nextRole, role)\n\t\trole = nextRole\n\t}\n\n}\n\nfunc (s *Server) OnAnnounce() coco.CommitFunc {\n\treturn func(view int) []byte {\n\t\t\/\/log.Println(\"Aggregating Commits\")\n\t\treturn s.AggregateCommits(view)\n\t}\n}\n\nfunc (s *Server) OnDone() coco.DoneFunc {\n\treturn func(view int, SNRoot hashid.HashId, LogHash hashid.HashId, p proof.Proof) {\n\t\ts.mux.Lock()\n\t\tfor i, msg := range s.Queue[s.PROCESSING] {\n\t\t\t\/\/ proof to get from s.Root to big root\n\t\t\tcombProof := make(proof.Proof, len(p))\n\t\t\tcopy(combProof, p)\n\n\t\t\t\/\/ add my proof to get from a leaf message to my root s.Root\n\t\t\tcombProof = append(combProof, s.Proofs[i]...)\n\n\t\t\t\/\/ proof that i can get from a leaf message to the big root\n\t\t\tif coco.DEBUG == true {\n\t\t\t\tproof.CheckProof(s.Signer.(*sign.Node).Suite().Hash, SNRoot, s.Leaves[i], combProof)\n\t\t\t}\n\n\t\t\trespMessg := TimeStampMessage{\n\t\t\t\tType: StampReplyType,\n\t\t\t\tReqNo: msg.Tsm.ReqNo,\n\t\t\t\tSrep: &StampReply{Sig: SNRoot, Prf: combProof}}\n\n\t\t\ts.PutToClient(msg.To, respMessg)\n\t\t}\n\t\ts.mux.Unlock()\n\t}\n\n}\n\nfunc (s *Server) AggregateCommits(view int) []byte {\n\t\/\/log.Println(s.Name(), \"calling AggregateCommits\")\n\ts.mux.Lock()\n\t\/\/ get data from s once to avoid refetching from structure\n\tQueue := s.Queue\n\tREADING := s.READING\n\tPROCESSING := s.PROCESSING\n\t\/\/ messages read will now be processed\n\tREADING, PROCESSING = PROCESSING, READING\n\ts.READING, s.PROCESSING = s.PROCESSING, s.READING\n\ts.Queue[READING] = s.Queue[READING][:0]\n\n\t\/\/ give up if nothing to process\n\tif len(Queue[PROCESSING]) == 0 {\n\t\ts.mux.Unlock()\n\t\ts.Root = make([]byte, hashid.Size)\n\t\ts.Proofs = make([]proof.Proof, 1)\n\t\treturn s.Root\n\t}\n\n\t\/\/ pull out to be Merkle Tree leaves\n\ts.Leaves = make([]hashid.HashId, 0)\n\tfor _, msg := range Queue[PROCESSING] {\n\t\ts.Leaves = append(s.Leaves, hashid.HashId(msg.Tsm.Sreq.Val))\n\t}\n\ts.mux.Unlock()\n\n\t\/\/ non root servers keep track of rounds here\n\tif !s.IsRoot(view) {\n\t\ts.rLock.Lock()\n\t\tlsr := s.LastRound()\n\t\tmr := s.maxRounds\n\t\ts.rLock.Unlock()\n\t\t\/\/ if this is our last round then close the connections\n\t\tif lsr >= int64(mr) && mr >= 0 {\n\t\t\ts.closeChan <- true\n\t\t}\n\t}\n\n\t\/\/ create Merkle tree for this round's messages and check corectness\n\ts.Root, s.Proofs = proof.ProofTree(s.Suite().Hash, s.Leaves)\n\tif coco.DEBUG == true {\n\t\tif proof.CheckLocalProofs(s.Suite().Hash, s.Root, s.Leaves, s.Proofs) == true {\n\t\t\tlog.Println(\"Local Proofs of\", s.Name(), \"successful for round \"+strconv.Itoa(int(s.LastRound())))\n\t\t} else {\n\t\t\tpanic(\"Local Proofs\" + s.Name() + \" unsuccessful for round \" + strconv.Itoa(int(s.LastRound())))\n\t\t}\n\t}\n\n\treturn s.Root\n}\n\n\/\/ Send message to client given by name\nfunc (s *Server) PutToClient(name string, data coconet.BinaryMarshaler) {\n\terr := s.Clients[name].Put(data)\n\tif err == coconet.ErrClosed {\n\t\ts.Close()\n\t\treturn\n\t}\n\tif err != nil && err != coconet.ErrNotEstablished {\n\t\tlog.Warnf(\"%p error putting to client: %v\", s, err)\n\t}\n}\n<|endoftext|>"} {"text":"package config\n\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"sync\"\n\n\t\"github.com\/hashicorp\/hil\"\n\t\"github.com\/hashicorp\/hil\/ast\"\n\t\"github.com\/mitchellh\/copystructure\"\n\t\"github.com\/mitchellh\/reflectwalk\"\n)\n\n\/\/ UnknownVariableValue is a sentinel value that can be used\n\/\/ to denote that the value of a variable is unknown at this time.\n\/\/ RawConfig uses this information to build up data about\n\/\/ unknown keys.\nconst UnknownVariableValue = \"74D93920-ED26-11E3-AC10-0800200C9A66\"\n\n\/\/ RawConfig is a structure that holds a piece of configuration\n\/\/ where te overall structure is unknown since it will be used\n\/\/ to configure a plugin or some other similar external component.\n\/\/\n\/\/ RawConfigs can be interpolated with variables that come from\n\/\/ other resources, user variables, etc.\n\/\/\n\/\/ RawConfig supports a query-like interface to request\n\/\/ information from deep within the structure.\ntype RawConfig struct {\n\tKey string\n\tRaw map[string]interface{}\n\tInterpolations []ast.Node\n\tVariables map[string]InterpolatedVariable\n\n\tlock sync.Mutex\n\tconfig map[string]interface{}\n\tunknownKeys []string\n}\n\n\/\/ NewRawConfig creates a new RawConfig structure and populates the\n\/\/ publicly readable struct fields.\nfunc NewRawConfig(raw map[string]interface{}) (*RawConfig, error) {\n\tresult := &RawConfig{Raw: raw}\n\tif err := result.init(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn result, nil\n}\n\n\/\/ RawMap returns a copy of the RawConfig.Raw map.\nfunc (r *RawConfig) RawMap() map[string]interface{} {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tm := make(map[string]interface{})\n\tfor k, v := range r.Raw {\n\t\tm[k] = v\n\t}\n\treturn m\n}\n\n\/\/ Copy returns a copy of this RawConfig, uninterpolated.\nfunc (r *RawConfig) Copy() *RawConfig {\n\tif r == nil {\n\t\treturn nil\n\t}\n\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tnewRaw := make(map[string]interface{})\n\tfor k, v := range r.Raw {\n\t\tnewRaw[k] = v\n\t}\n\n\tresult, err := NewRawConfig(newRaw)\n\tif err != nil {\n\t\tpanic(\"copy failed: \" + err.Error())\n\t}\n\n\tresult.Key = r.Key\n\treturn result\n}\n\n\/\/ Value returns the value of the configuration if this configuration\n\/\/ has a Key set. If this does not have a Key set, nil will be returned.\nfunc (r *RawConfig) Value() interface{} {\n\tif c := r.Config(); c != nil {\n\t\tif v, ok := c[r.Key]; ok {\n\t\t\treturn v\n\t\t}\n\t}\n\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\treturn r.Raw[r.Key]\n}\n\n\/\/ Config returns the entire configuration with the variables\n\/\/ interpolated from any call to Interpolate.\n\/\/\n\/\/ If any interpolated variables are unknown (value set to\n\/\/ UnknownVariableValue), the first non-container (map, slice, etc.) element\n\/\/ will be removed from the config. The keys of unknown variables\n\/\/ can be found using the UnknownKeys function.\n\/\/\n\/\/ By pruning out unknown keys from the configuration, the raw\n\/\/ structure will always successfully decode into its ultimate\n\/\/ structure using something like mapstructure.\nfunc (r *RawConfig) Config() map[string]interface{} {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\treturn r.config\n}\n\n\/\/ Interpolate uses the given mapping of variable values and uses\n\/\/ those as the values to replace any variables in this raw\n\/\/ configuration.\n\/\/\n\/\/ Any prior calls to Interpolate are replaced with this one.\n\/\/\n\/\/ If a variable key is missing, this will panic.\nfunc (r *RawConfig) Interpolate(vs map[string]ast.Variable) error {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tconfig := langEvalConfig(vs)\n\treturn r.interpolate(func(root ast.Node) (interface{}, error) {\n\t\t\/\/ We detect the variables again and check if the value of any\n\t\t\/\/ of the variables is the computed value. If it is, then we\n\t\t\/\/ treat this entire value as computed.\n\t\t\/\/\n\t\t\/\/ We have to do this here before the `lang.Eval` because\n\t\t\/\/ if any of the variables it depends on are computed, then\n\t\t\/\/ the interpolation can fail at runtime for other reasons. Example:\n\t\t\/\/ `${count.index+1}`: in a world where `count.index` is computed,\n\t\t\/\/ this would fail a type check since the computed placeholder is\n\t\t\/\/ a string, but realistically the whole value is just computed.\n\t\tvars, err := DetectVariables(root)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tfor _, v := range vars {\n\t\t\tvarVal, ok := vs[v.FullKey()]\n\t\t\tif ok && varVal.Value == UnknownVariableValue {\n\t\t\t\treturn UnknownVariableValue, nil\n\t\t\t}\n\t\t}\n\n\t\t\/\/ None of the variables we need are computed, meaning we should\n\t\t\/\/ be able to properly evaluate.\n\t\tresult, err := hil.Eval(root, config)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn result.Value, nil\n\t})\n}\n\n\/\/ Merge merges another RawConfig into this one (overriding any conflicting\n\/\/ values in this config) and returns a new config. The original config\n\/\/ is not modified.\nfunc (r *RawConfig) Merge(other *RawConfig) *RawConfig {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\t\/\/ Merge the raw configurations\n\traw := make(map[string]interface{})\n\tfor k, v := range r.Raw {\n\t\traw[k] = v\n\t}\n\tfor k, v := range other.Raw {\n\t\traw[k] = v\n\t}\n\n\t\/\/ Create the result\n\tresult, err := NewRawConfig(raw)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ Merge the interpolated results\n\tresult.config = make(map[string]interface{})\n\tfor k, v := range r.config {\n\t\tresult.config[k] = v\n\t}\n\tfor k, v := range other.config {\n\t\tresult.config[k] = v\n\t}\n\n\t\/\/ Build the unknown keys\n\tunknownKeys := make(map[string]struct{})\n\tfor _, k := range r.unknownKeys {\n\t\tunknownKeys[k] = struct{}{}\n\t}\n\tfor _, k := range other.unknownKeys {\n\t\tunknownKeys[k] = struct{}{}\n\t}\n\n\tresult.unknownKeys = make([]string, 0, len(unknownKeys))\n\tfor k, _ := range unknownKeys {\n\t\tresult.unknownKeys = append(result.unknownKeys, k)\n\t}\n\n\treturn result\n}\n\nfunc (r *RawConfig) init() error {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tr.config = r.Raw\n\tr.Interpolations = nil\n\tr.Variables = nil\n\n\tfn := func(node ast.Node) (interface{}, error) {\n\t\tr.Interpolations = append(r.Interpolations, node)\n\t\tvars, err := DetectVariables(node)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tfor _, v := range vars {\n\t\t\tif r.Variables == nil {\n\t\t\t\tr.Variables = make(map[string]InterpolatedVariable)\n\t\t\t}\n\n\t\t\tr.Variables[v.FullKey()] = v\n\t\t}\n\n\t\treturn \"\", nil\n\t}\n\n\twalker := &interpolationWalker{F: fn}\n\tif err := reflectwalk.Walk(r.Raw, walker); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (r *RawConfig) interpolate(fn interpolationWalkerFunc) error {\n\tconfig, err := copystructure.Copy(r.Raw)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.config = config.(map[string]interface{})\n\n\tw := &interpolationWalker{F: fn, Replace: true}\n\terr = reflectwalk.Walk(r.config, w)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.unknownKeys = w.unknownKeys\n\treturn nil\n}\n\nfunc (r *RawConfig) merge(r2 *RawConfig) *RawConfig {\n\trawRaw, err := copystructure.Copy(r.Raw)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\traw := rawRaw.(map[string]interface{})\n\tfor k, v := range r2.Raw {\n\t\traw[k] = v\n\t}\n\n\tresult, err := NewRawConfig(raw)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn result\n}\n\n\/\/ UnknownKeys returns the keys of the configuration that are unknown\n\/\/ because they had interpolated variables that must be computed.\nfunc (r *RawConfig) UnknownKeys() []string {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\treturn r.unknownKeys\n}\n\n\/\/ See GobEncode\nfunc (r *RawConfig) GobDecode(b []byte) error {\n\tvar data gobRawConfig\n\terr := gob.NewDecoder(bytes.NewReader(b)).Decode(&data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.Key = data.Key\n\tr.Raw = data.Raw\n\n\treturn r.init()\n}\n\n\/\/ GobEncode is a custom Gob encoder to use so that we only include the\n\/\/ raw configuration. Interpolated variables and such are lost and the\n\/\/ tree of interpolated variables is recomputed on decode, since it is\n\/\/ referentially transparent.\nfunc (r *RawConfig) GobEncode() ([]byte, error) {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tdata := gobRawConfig{\n\t\tKey: r.Key,\n\t\tRaw: r.Raw,\n\t}\n\n\tvar buf bytes.Buffer\n\tif err := gob.NewEncoder(&buf).Encode(data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}\n\ntype gobRawConfig struct {\n\tKey string\n\tRaw map[string]interface{}\n}\n\n\/\/ langEvalConfig returns the evaluation configuration we use to execute.\nfunc langEvalConfig(vs map[string]ast.Variable) *hil.EvalConfig {\n\tfuncMap := make(map[string]ast.Function)\n\tfor k, v := range Funcs() {\n\t\tfuncMap[k] = v\n\t}\n\tfuncMap[\"lookup\"] = interpolationFuncLookup(vs)\n\tfuncMap[\"keys\"] = interpolationFuncKeys(vs)\n\tfuncMap[\"values\"] = interpolationFuncValues(vs)\n\n\treturn &hil.EvalConfig{\n\t\tGlobalScope: &ast.BasicScope{\n\t\t\tVarMap: vs,\n\t\t\tFuncMap: funcMap,\n\t\t},\n\t}\n}\nconfig: RawConfig merge should only set unknown keys non-nil if non-emptypackage config\n\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"sync\"\n\n\t\"github.com\/hashicorp\/hil\"\n\t\"github.com\/hashicorp\/hil\/ast\"\n\t\"github.com\/mitchellh\/copystructure\"\n\t\"github.com\/mitchellh\/reflectwalk\"\n)\n\n\/\/ UnknownVariableValue is a sentinel value that can be used\n\/\/ to denote that the value of a variable is unknown at this time.\n\/\/ RawConfig uses this information to build up data about\n\/\/ unknown keys.\nconst UnknownVariableValue = \"74D93920-ED26-11E3-AC10-0800200C9A66\"\n\n\/\/ RawConfig is a structure that holds a piece of configuration\n\/\/ where te overall structure is unknown since it will be used\n\/\/ to configure a plugin or some other similar external component.\n\/\/\n\/\/ RawConfigs can be interpolated with variables that come from\n\/\/ other resources, user variables, etc.\n\/\/\n\/\/ RawConfig supports a query-like interface to request\n\/\/ information from deep within the structure.\ntype RawConfig struct {\n\tKey string\n\tRaw map[string]interface{}\n\tInterpolations []ast.Node\n\tVariables map[string]InterpolatedVariable\n\n\tlock sync.Mutex\n\tconfig map[string]interface{}\n\tunknownKeys []string\n}\n\n\/\/ NewRawConfig creates a new RawConfig structure and populates the\n\/\/ publicly readable struct fields.\nfunc NewRawConfig(raw map[string]interface{}) (*RawConfig, error) {\n\tresult := &RawConfig{Raw: raw}\n\tif err := result.init(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn result, nil\n}\n\n\/\/ RawMap returns a copy of the RawConfig.Raw map.\nfunc (r *RawConfig) RawMap() map[string]interface{} {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tm := make(map[string]interface{})\n\tfor k, v := range r.Raw {\n\t\tm[k] = v\n\t}\n\treturn m\n}\n\n\/\/ Copy returns a copy of this RawConfig, uninterpolated.\nfunc (r *RawConfig) Copy() *RawConfig {\n\tif r == nil {\n\t\treturn nil\n\t}\n\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tnewRaw := make(map[string]interface{})\n\tfor k, v := range r.Raw {\n\t\tnewRaw[k] = v\n\t}\n\n\tresult, err := NewRawConfig(newRaw)\n\tif err != nil {\n\t\tpanic(\"copy failed: \" + err.Error())\n\t}\n\n\tresult.Key = r.Key\n\treturn result\n}\n\n\/\/ Value returns the value of the configuration if this configuration\n\/\/ has a Key set. If this does not have a Key set, nil will be returned.\nfunc (r *RawConfig) Value() interface{} {\n\tif c := r.Config(); c != nil {\n\t\tif v, ok := c[r.Key]; ok {\n\t\t\treturn v\n\t\t}\n\t}\n\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\treturn r.Raw[r.Key]\n}\n\n\/\/ Config returns the entire configuration with the variables\n\/\/ interpolated from any call to Interpolate.\n\/\/\n\/\/ If any interpolated variables are unknown (value set to\n\/\/ UnknownVariableValue), the first non-container (map, slice, etc.) element\n\/\/ will be removed from the config. The keys of unknown variables\n\/\/ can be found using the UnknownKeys function.\n\/\/\n\/\/ By pruning out unknown keys from the configuration, the raw\n\/\/ structure will always successfully decode into its ultimate\n\/\/ structure using something like mapstructure.\nfunc (r *RawConfig) Config() map[string]interface{} {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\treturn r.config\n}\n\n\/\/ Interpolate uses the given mapping of variable values and uses\n\/\/ those as the values to replace any variables in this raw\n\/\/ configuration.\n\/\/\n\/\/ Any prior calls to Interpolate are replaced with this one.\n\/\/\n\/\/ If a variable key is missing, this will panic.\nfunc (r *RawConfig) Interpolate(vs map[string]ast.Variable) error {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tconfig := langEvalConfig(vs)\n\treturn r.interpolate(func(root ast.Node) (interface{}, error) {\n\t\t\/\/ We detect the variables again and check if the value of any\n\t\t\/\/ of the variables is the computed value. If it is, then we\n\t\t\/\/ treat this entire value as computed.\n\t\t\/\/\n\t\t\/\/ We have to do this here before the `lang.Eval` because\n\t\t\/\/ if any of the variables it depends on are computed, then\n\t\t\/\/ the interpolation can fail at runtime for other reasons. Example:\n\t\t\/\/ `${count.index+1}`: in a world where `count.index` is computed,\n\t\t\/\/ this would fail a type check since the computed placeholder is\n\t\t\/\/ a string, but realistically the whole value is just computed.\n\t\tvars, err := DetectVariables(root)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tfor _, v := range vars {\n\t\t\tvarVal, ok := vs[v.FullKey()]\n\t\t\tif ok && varVal.Value == UnknownVariableValue {\n\t\t\t\treturn UnknownVariableValue, nil\n\t\t\t}\n\t\t}\n\n\t\t\/\/ None of the variables we need are computed, meaning we should\n\t\t\/\/ be able to properly evaluate.\n\t\tresult, err := hil.Eval(root, config)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn result.Value, nil\n\t})\n}\n\n\/\/ Merge merges another RawConfig into this one (overriding any conflicting\n\/\/ values in this config) and returns a new config. The original config\n\/\/ is not modified.\nfunc (r *RawConfig) Merge(other *RawConfig) *RawConfig {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\t\/\/ Merge the raw configurations\n\traw := make(map[string]interface{})\n\tfor k, v := range r.Raw {\n\t\traw[k] = v\n\t}\n\tfor k, v := range other.Raw {\n\t\traw[k] = v\n\t}\n\n\t\/\/ Create the result\n\tresult, err := NewRawConfig(raw)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ Merge the interpolated results\n\tresult.config = make(map[string]interface{})\n\tfor k, v := range r.config {\n\t\tresult.config[k] = v\n\t}\n\tfor k, v := range other.config {\n\t\tresult.config[k] = v\n\t}\n\n\t\/\/ Build the unknown keys\n\tif len(r.unknownKeys) > 0 || len(other.unknownKeys) > 0 {\n\t\tunknownKeys := make(map[string]struct{})\n\t\tfor _, k := range r.unknownKeys {\n\t\t\tunknownKeys[k] = struct{}{}\n\t\t}\n\t\tfor _, k := range other.unknownKeys {\n\t\t\tunknownKeys[k] = struct{}{}\n\t\t}\n\n\t\tresult.unknownKeys = make([]string, 0, len(unknownKeys))\n\t\tfor k, _ := range unknownKeys {\n\t\t\tresult.unknownKeys = append(result.unknownKeys, k)\n\t\t}\n\t}\n\n\treturn result\n}\n\nfunc (r *RawConfig) init() error {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tr.config = r.Raw\n\tr.Interpolations = nil\n\tr.Variables = nil\n\n\tfn := func(node ast.Node) (interface{}, error) {\n\t\tr.Interpolations = append(r.Interpolations, node)\n\t\tvars, err := DetectVariables(node)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tfor _, v := range vars {\n\t\t\tif r.Variables == nil {\n\t\t\t\tr.Variables = make(map[string]InterpolatedVariable)\n\t\t\t}\n\n\t\t\tr.Variables[v.FullKey()] = v\n\t\t}\n\n\t\treturn \"\", nil\n\t}\n\n\twalker := &interpolationWalker{F: fn}\n\tif err := reflectwalk.Walk(r.Raw, walker); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (r *RawConfig) interpolate(fn interpolationWalkerFunc) error {\n\tconfig, err := copystructure.Copy(r.Raw)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.config = config.(map[string]interface{})\n\n\tw := &interpolationWalker{F: fn, Replace: true}\n\terr = reflectwalk.Walk(r.config, w)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.unknownKeys = w.unknownKeys\n\treturn nil\n}\n\nfunc (r *RawConfig) merge(r2 *RawConfig) *RawConfig {\n\trawRaw, err := copystructure.Copy(r.Raw)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\traw := rawRaw.(map[string]interface{})\n\tfor k, v := range r2.Raw {\n\t\traw[k] = v\n\t}\n\n\tresult, err := NewRawConfig(raw)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn result\n}\n\n\/\/ UnknownKeys returns the keys of the configuration that are unknown\n\/\/ because they had interpolated variables that must be computed.\nfunc (r *RawConfig) UnknownKeys() []string {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\treturn r.unknownKeys\n}\n\n\/\/ See GobEncode\nfunc (r *RawConfig) GobDecode(b []byte) error {\n\tvar data gobRawConfig\n\terr := gob.NewDecoder(bytes.NewReader(b)).Decode(&data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.Key = data.Key\n\tr.Raw = data.Raw\n\n\treturn r.init()\n}\n\n\/\/ GobEncode is a custom Gob encoder to use so that we only include the\n\/\/ raw configuration. Interpolated variables and such are lost and the\n\/\/ tree of interpolated variables is recomputed on decode, since it is\n\/\/ referentially transparent.\nfunc (r *RawConfig) GobEncode() ([]byte, error) {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tdata := gobRawConfig{\n\t\tKey: r.Key,\n\t\tRaw: r.Raw,\n\t}\n\n\tvar buf bytes.Buffer\n\tif err := gob.NewEncoder(&buf).Encode(data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}\n\ntype gobRawConfig struct {\n\tKey string\n\tRaw map[string]interface{}\n}\n\n\/\/ langEvalConfig returns the evaluation configuration we use to execute.\nfunc langEvalConfig(vs map[string]ast.Variable) *hil.EvalConfig {\n\tfuncMap := make(map[string]ast.Function)\n\tfor k, v := range Funcs() {\n\t\tfuncMap[k] = v\n\t}\n\tfuncMap[\"lookup\"] = interpolationFuncLookup(vs)\n\tfuncMap[\"keys\"] = interpolationFuncKeys(vs)\n\tfuncMap[\"values\"] = interpolationFuncValues(vs)\n\n\treturn &hil.EvalConfig{\n\t\tGlobalScope: &ast.BasicScope{\n\t\t\tVarMap: vs,\n\t\t\tFuncMap: funcMap,\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"package connectivity\n\nimport (\n\t\"bufio\"\n\t\"log\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n)\n\n\/\/ download test data from internet\nconst (\n\ttiny string = \"http:\/\/algs4.cs.princeton.edu\/15uf\/tinyUF.txt\"\n\tmedium string = \"http:\/\/algs4.cs.princeton.edu\/15uf\/mediumUF.txt\"\n\t\/\/ huge string = \"http:\/\/algs4.cs.princeton.edu\/15uf\/largeUF.txt\"\n)\n\nfunc loadTestDataFromWeb() []TestData {\n\tcl := &http.Client{}\n\ttr, err := cl.Get(tiny)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tmr, err := cl.Get(medium)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\t\/\/ hr, err := cl.Get(huge)\n\t\/\/ if err != nil {\n\t\/\/ \tlog.Fatalln(err)\n\t\/\/ }\n\n\tc := make(chan TestData)\n\t\/\/ go func() { c <- parse(hr) }()\n\tgo func() { c <- parse(mr) }()\n\tgo func() { c <- parse(tr) }()\n\n\tvar results []TestData\n\tfor i := 0; i < 2; \/*3*\/ i++ {\n\t\tresult := <-c\n\t\tresults = append(results, result)\n\t}\n\treturn results\n}\n\nfunc parse(r *http.Response) (result TestData) {\n\tdefer r.Body.Close()\n\n\ts := bufio.NewScanner(r.Body)\n\ts.Split(bufio.ScanWords)\n\tvar pairs []Pair\n\ts.Scan()\n\tg, err := strconv.Atoi(s.Text())\n\tif err != nil {\n\t\tlog.Panicf(\"first generator parse error %v\", g, err)\n\t}\n\tfor s.Scan() {\n\t\tvar pair Pair\n\t\tpair.Left, err = strconv.Atoi(s.Text())\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"parse left %v failed\", pair.Left, err)\n\t\t}\n\t\ts.Scan()\n\t\tpair.Right, err = strconv.Atoi(s.Text())\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"parse right %v failed\", pair.Right, err)\n\t\t}\n\t\tpairs = append(pairs, pair)\n\t}\n\n\tresult.Generator = g\n\tresult.Pairs = pairs\n\treturn result\n}\n\ntype Pair struct {\n\tLeft int\n\tRight int\n}\n\ntype TestData struct {\n\tGenerator int\n\tPairs []Pair\n}\n\nfunc TestJustTest(t *testing.T) {\n\tloadStart := time.Now()\n\tn := loadTestDataFromWeb()\n\tlog.Printf(\"loading used %v\", time.Since(loadStart))\n\tfor _, content := range n {\n\t\tlog.Printf(\"Generator %v has %v pairs\", content.Generator, len(content.Pairs))\n\t\talgoStart := time.Now()\n\t\ttest := NewWeightedCompression(content.Generator)\n\t\tfor idx, p := range content.Pairs {\n\t\t\t\/\/ log.Printf(\"%vth pair: %v and %v\", idx, p.Left, p.Right)\n\t\t\ttest.Union(p.Left, p.Right)\n\t\t\tif !test.Find(p.Left, p.Right) {\n\t\t\t\tt.Errorf(\"%v and %v are expected to be in the same component, this is the %vth union operation\", p.Left, p.Right, idx)\n\t\t\t}\n\t\t}\n\t\tlog.Printf(\"Weighted Compressed UnionFind used %v\", time.Since(algoStart))\n\t}\n}\n\n\/\/ test set is a struct of set\/bag of items, plus\n\/\/ the test subject -> 2 items\n\/\/ and expected output -> true or false\ntype TestSet struct {\n\tExp bool\n\tSubjectA int\n\tSubjectB int\n\tSet Component\n}\n\n\/\/ prepare test sets\nfunc getTestSets() *[]TestSet {\n\treturn &[]TestSet{\n\t\t{\n\t\t\tExp: true,\n\t\t\tSubjectA: 2,\n\t\t\tSubjectB: 3,\n\t\t\tSet: []int{2, 3, 6, 7},\n\t\t},\n\t\t{\n\t\t\tExp: true,\n\t\t\tSubjectA: 1,\n\t\t\tSubjectB: 4,\n\t\t\tSet: []int{1, 4, 5},\n\t\t},\n\t\t{\n\t\t\tExp: false,\n\t\t\tSubjectA: 2,\n\t\t\tSubjectB: 9,\n\t\t\tSet: []int{1, 2, 3, 4, 5, 7},\n\t\t},\n\t}\n}\n\nfunc TestFindQueryInSet(t *testing.T) {\n\ttestSets := getTestSets()\n\tfor _, test := range *testSets {\n\t\texp := test.Exp\n\t\tinputA := test.SubjectA\n\t\tinputB := test.SubjectB\n\t\tresult := test.Set.FindQueryInSingleSetNaive(inputA, inputB)\n\t\tif result != exp {\n\t\t\tt.Errorf(\"expected %q, got %q\", exp, result)\n\t\t}\n\t}\n}\n\n\/\/ we have multiple components (sets)\n\/\/ FindQuery is expected to Find if the 2 input items are\n\/\/ in the same component.\n\/\/ We need a struct for Components just like our previous TestSet\ntype TestConnectivity struct {\n\tExpected bool\n\tA int\n\tB int\n\tComponents\n}\n\nfunc getTestComponents() []Component {\n\treturn []Component{\n\t\tComponent{2, 3, 6, 7}, Component{1, 4}, Component{5, 8, 9},\n\t}\n}\n\nfunc getTestSuits() *[]TestConnectivity {\n\treturn &[]TestConnectivity{\n\t\t{\n\t\t\tExpected: true,\n\t\t\tA: 2,\n\t\t\tB: 3,\n\t\t\tComponents: getTestComponents(),\n\t\t},\n\t\t{\n\t\t\tExpected: true,\n\t\t\tA: 1,\n\t\t\tB: 4,\n\t\t\tComponents: getTestComponents(),\n\t\t},\n\t\t{\n\t\t\tExpected: false,\n\t\t\tA: 2,\n\t\t\tB: 9,\n\t\t\tComponents: getTestComponents(),\n\t\t},\n\t}\n}\n\n\/\/ test findquery in labyrinth\nfunc TestFindQuery(t *testing.T) {\n\ttestSuits := getTestSuits()\n\tfor _, test := range *testSuits {\n\t\texp := test.Expected\n\t\ta := test.A\n\t\tb := test.B\n\t\tresult := test.Components.FindQuery(a, b)\n\t\tif result != exp {\n\t\t\tt.Errorf(\"expected %q, got %q\", exp, result)\n\t\t}\n\t}\n}\n\n\/\/ helper func for checking 2 components\n\/\/ components sorted to deliver the false negative result\n\/\/ as soon as possible\nfunc checkComponent(a, b Component) bool {\n\tif a == nil && b == nil {\n\t\treturn true\n\t}\n\tif len(a) == len(b) {\n\t\tsort.Ints(a)\n\t\tsort.Ints(b)\n\t\tindex := 0\n\t\tfor index != len(a) {\n\t\t\tif a[index] != b[index] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tindex++\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ test helper func queryComponent\nfunc TestQueryComponent(t *testing.T) {\n\tc := Components(getTestComponents())\n\n\tif _, got := c.queryComponent(1); !checkComponent(got, c[1]) {\n\t\tt.Errorf(\"expected component %v, got %v\", c[1], got)\n\t}\n\tif _, got := c.queryComponent(2); !checkComponent(got, c[0]) {\n\t\tt.Errorf(\"expected component %v, got %v\", c[0], got)\n\t}\n\tif _, got := c.queryComponent(5); !checkComponent(got, c[2]) {\n\t\tt.Errorf(\"expected component %v, got %v\", c[2], got)\n\t}\n\t\/\/ not found digit should return nil instead of empty set, because it would\n\t\/\/ mess with FindQuery\n\tif _, got := c.queryComponent(0); !checkComponent(got, nil) {\n\t\tt.Errorf(\"expected component %v, got %v\", nil, got)\n\t}\n}\n\n\/\/ test constructor\nfunc TestConstructorNewConnectivity(t *testing.T) {\n\ttest := NewConnectivity(10)\n\texpected := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}\n\tindex := 0\n\tfor index != len(test.elements) {\n\t\tif expected[index] != test.elements[index] {\n\t\t\tt.Errorf(\"expected %v, got %v\", expected[index], test.elements[index])\n\t\t}\n\t\tindex++\n\t}\n}\n\n\/\/ test union\n\/\/ todo: check if the Union skips cases where two numbers are already connected\nfunc TestUnion(t *testing.T) {\n\tcomponents := Components(getTestComponents())\n\tcomponents.Union(1, 5)\n\tif len(components) != 2 {\n\t\tt.Errorf(\"expected reduced component of length %v, got %v\", 2, len(components))\n\t}\n\n\tfor _, c := range components {\n\t\tif len(c) != 4 {\n\t\t\tif b := checkComponent(c, []int{1, 4, 5, 8, 9}); !b {\n\t\t\t\tt.Errorf(\"expected matched component %q, got %q\", []int{1, 4, 5, 8, 9}, c)\n\t\t\t}\n\t\t}\n\t}\n}\nchange test to get all 3 url content with a timeout of 1 secondpackage connectivity\n\nimport (\n\t\"bufio\"\n\t\"log\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n)\n\n\/\/ download test data from internet\nconst (\n\ttiny string = \"http:\/\/algs4.cs.princeton.edu\/15uf\/tinyUF.txt\"\n\tmedium string = \"http:\/\/algs4.cs.princeton.edu\/15uf\/mediumUF.txt\"\n\thuge string = \"http:\/\/algs4.cs.princeton.edu\/15uf\/largeUF.txt\"\n)\n\nfunc loadTestDataFromWeb() []TestData {\n\n\tc := make(chan TestData)\n\tgo func() { c <- parse(tiny) }()\n\tgo func() { c <- parse(medium) }()\n\tgo func() { c <- parse(huge) }()\n\n\tvar results []TestData\n\ttimeout := time.After(1 * time.Second)\n\tfor i := 0; i < 3; i++ {\n\t\tselect {\n\t\tcase result := <-c:\n\t\t\tresults = append(results, result)\n\t\tcase <-timeout:\n\t\t\tlog.Println(\"timed out fetching data\")\n\t\t\treturn results\n\t\t}\n\n\t}\n\treturn results\n}\n\nfunc parse(url string) (result TestData) {\n\n\tcl := &http.Client{}\n\tr, err := cl.Get(url)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tdefer r.Body.Close()\n\n\ts := bufio.NewScanner(r.Body)\n\ts.Split(bufio.ScanWords)\n\tvar pairs []Pair\n\ts.Scan()\n\tg, err := strconv.Atoi(s.Text())\n\tif err != nil {\n\t\tlog.Panicf(\"first generator parse error %v\", g, err)\n\t}\n\tfor s.Scan() {\n\t\tvar pair Pair\n\t\tpair.Left, err = strconv.Atoi(s.Text())\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"parse left %v failed\", pair.Left, err)\n\t\t}\n\t\ts.Scan()\n\t\tpair.Right, err = strconv.Atoi(s.Text())\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"parse right %v failed\", pair.Right, err)\n\t\t}\n\t\tpairs = append(pairs, pair)\n\t}\n\n\tresult.Generator = g\n\tresult.Pairs = pairs\n\treturn result\n}\n\ntype Pair struct {\n\tLeft int\n\tRight int\n}\n\ntype TestData struct {\n\tGenerator int\n\tPairs []Pair\n}\n\nfunc TestJustTest(t *testing.T) {\n\tloadStart := time.Now()\n\tn := loadTestDataFromWeb()\n\tlog.Printf(\"loading used %v\", time.Since(loadStart))\n\tfor _, content := range n {\n\t\tlog.Printf(\"Generator %v has %v pairs\", content.Generator, len(content.Pairs))\n\t\talgoStart := time.Now()\n\t\ttest := NewWeightedCompression(content.Generator)\n\t\tfor idx, p := range content.Pairs {\n\t\t\t\/\/ log.Printf(\"%vth pair: %v and %v\", idx, p.Left, p.Right)\n\t\t\ttest.Union(p.Left, p.Right)\n\t\t\tif !test.Find(p.Left, p.Right) {\n\t\t\t\tt.Errorf(\"%v and %v are expected to be in the same component, this is the %vth union operation\", p.Left, p.Right, idx)\n\t\t\t}\n\t\t}\n\t\tlog.Printf(\"Weighted Compressed UnionFind used %v\", time.Since(algoStart))\n\t}\n}\n\n\/\/ test set is a struct of set\/bag of items, plus\n\/\/ the test subject -> 2 items\n\/\/ and expected output -> true or false\ntype TestSet struct {\n\tExp bool\n\tSubjectA int\n\tSubjectB int\n\tSet Component\n}\n\n\/\/ prepare test sets\nfunc getTestSets() *[]TestSet {\n\treturn &[]TestSet{\n\t\t{\n\t\t\tExp: true,\n\t\t\tSubjectA: 2,\n\t\t\tSubjectB: 3,\n\t\t\tSet: []int{2, 3, 6, 7},\n\t\t},\n\t\t{\n\t\t\tExp: true,\n\t\t\tSubjectA: 1,\n\t\t\tSubjectB: 4,\n\t\t\tSet: []int{1, 4, 5},\n\t\t},\n\t\t{\n\t\t\tExp: false,\n\t\t\tSubjectA: 2,\n\t\t\tSubjectB: 9,\n\t\t\tSet: []int{1, 2, 3, 4, 5, 7},\n\t\t},\n\t}\n}\n\nfunc TestFindQueryInSet(t *testing.T) {\n\ttestSets := getTestSets()\n\tfor _, test := range *testSets {\n\t\texp := test.Exp\n\t\tinputA := test.SubjectA\n\t\tinputB := test.SubjectB\n\t\tresult := test.Set.FindQueryInSingleSetNaive(inputA, inputB)\n\t\tif result != exp {\n\t\t\tt.Errorf(\"expected %q, got %q\", exp, result)\n\t\t}\n\t}\n}\n\n\/\/ we have multiple components (sets)\n\/\/ FindQuery is expected to Find if the 2 input items are\n\/\/ in the same component.\n\/\/ We need a struct for Components just like our previous TestSet\ntype TestConnectivity struct {\n\tExpected bool\n\tA int\n\tB int\n\tComponents\n}\n\nfunc getTestComponents() []Component {\n\treturn []Component{\n\t\tComponent{2, 3, 6, 7}, Component{1, 4}, Component{5, 8, 9},\n\t}\n}\n\nfunc getTestSuits() *[]TestConnectivity {\n\treturn &[]TestConnectivity{\n\t\t{\n\t\t\tExpected: true,\n\t\t\tA: 2,\n\t\t\tB: 3,\n\t\t\tComponents: getTestComponents(),\n\t\t},\n\t\t{\n\t\t\tExpected: true,\n\t\t\tA: 1,\n\t\t\tB: 4,\n\t\t\tComponents: getTestComponents(),\n\t\t},\n\t\t{\n\t\t\tExpected: false,\n\t\t\tA: 2,\n\t\t\tB: 9,\n\t\t\tComponents: getTestComponents(),\n\t\t},\n\t}\n}\n\n\/\/ test findquery in labyrinth\nfunc TestFindQuery(t *testing.T) {\n\ttestSuits := getTestSuits()\n\tfor _, test := range *testSuits {\n\t\texp := test.Expected\n\t\ta := test.A\n\t\tb := test.B\n\t\tresult := test.Components.FindQuery(a, b)\n\t\tif result != exp {\n\t\t\tt.Errorf(\"expected %q, got %q\", exp, result)\n\t\t}\n\t}\n}\n\n\/\/ helper func for checking 2 components\n\/\/ components sorted to deliver the false negative result\n\/\/ as soon as possible\nfunc checkComponent(a, b Component) bool {\n\tif a == nil && b == nil {\n\t\treturn true\n\t}\n\tif len(a) == len(b) {\n\t\tsort.Ints(a)\n\t\tsort.Ints(b)\n\t\tindex := 0\n\t\tfor index != len(a) {\n\t\t\tif a[index] != b[index] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tindex++\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ test helper func queryComponent\nfunc TestQueryComponent(t *testing.T) {\n\tc := Components(getTestComponents())\n\n\tif _, got := c.queryComponent(1); !checkComponent(got, c[1]) {\n\t\tt.Errorf(\"expected component %v, got %v\", c[1], got)\n\t}\n\tif _, got := c.queryComponent(2); !checkComponent(got, c[0]) {\n\t\tt.Errorf(\"expected component %v, got %v\", c[0], got)\n\t}\n\tif _, got := c.queryComponent(5); !checkComponent(got, c[2]) {\n\t\tt.Errorf(\"expected component %v, got %v\", c[2], got)\n\t}\n\t\/\/ not found digit should return nil instead of empty set, because it would\n\t\/\/ mess with FindQuery\n\tif _, got := c.queryComponent(0); !checkComponent(got, nil) {\n\t\tt.Errorf(\"expected component %v, got %v\", nil, got)\n\t}\n}\n\n\/\/ test constructor\nfunc TestConstructorNewConnectivity(t *testing.T) {\n\ttest := NewConnectivity(10)\n\texpected := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}\n\tindex := 0\n\tfor index != len(test.elements) {\n\t\tif expected[index] != test.elements[index] {\n\t\t\tt.Errorf(\"expected %v, got %v\", expected[index], test.elements[index])\n\t\t}\n\t\tindex++\n\t}\n}\n\n\/\/ test union\n\/\/ todo: check if the Union skips cases where two numbers are already connected\nfunc TestUnion(t *testing.T) {\n\tcomponents := Components(getTestComponents())\n\tcomponents.Union(1, 5)\n\tif len(components) != 2 {\n\t\tt.Errorf(\"expected reduced component of length %v, got %v\", 2, len(components))\n\t}\n\n\tfor _, c := range components {\n\t\tif len(c) != 4 {\n\t\t\tif b := checkComponent(c, []int{1, 4, 5, 8, 9}); !b {\n\t\t\t\tt.Errorf(\"expected matched component %q, got %q\", []int{1, 4, 5, 8, 9}, c)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2017 aaharu All rights reserved.\n\/\/ This source code is licensed under the BSD-style license found in\n\/\/ the LICENSE file in the root directory of this source tree.\n\npackage codegen\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/format\"\n\t\"strings\"\n\n\t\"github.com\/aaharu\/schemarshal\/utils\"\n\t\"github.com\/aaharu\/schemarshal\/version\"\n)\n\nvar enumList = map[string][]interface{}{} \/\/ FIXME\n\n\/\/ Generator of Go source code from JSON Schema\ntype Generator struct {\n\tname string \/\/ package nage\n\tcommand string\n\timports []*importSpec\n\tdecls []*typeSpec\n}\n\n\/\/ NewGenerator create Generator struct\nfunc NewGenerator(packageName string, command string) *Generator {\n\treturn &Generator{\n\t\tname: packageName,\n\t\tcommand: command,\n\t}\n}\n\n\/\/ AddImport add an import statement\nfunc (g *Generator) AddImport(path string, name string) {\n\tif g.imports == nil {\n\t\tg.imports = []*importSpec{}\n\t}\n\tg.imports = append(g.imports, &importSpec{\n\t\tname: name,\n\t\tpath: path,\n\t})\n}\n\n\/\/ AddType add a type statement\nfunc (g *Generator) AddType(name string, jsonType *JSONType) {\n\tif g.decls == nil {\n\t\tg.decls = []*typeSpec{}\n\t}\n\tg.decls = append(g.decls, &typeSpec{\n\t\tname: name,\n\t\tjsontype: jsonType,\n\t})\n}\n\n\/\/ Generate gofmt-ed Go source code\nfunc (g *Generator) Generate() ([]byte, error) {\n\tvar buf bytes.Buffer\n\n\tbuf.WriteString(fmt.Sprintf(\"\/\/ generated by %s `%s`\\n\", version.String(), g.command))\n\tbuf.WriteString(\"\\n\")\n\tbuf.WriteString(fmt.Sprintf(\"package %s\\n\\n\", g.name))\n\n\tif len(g.imports) > 1 {\n\t\tbuf.WriteString(fmt.Sprintf(\"import (\\n\"))\n\t\tfor i := range g.imports {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%s %s\\n\", g.imports[i].name, g.imports[i].path))\n\t\t}\n\t\tbuf.WriteString(fmt.Sprintf(\")\\n\\n\"))\n\t} else if len(g.imports) == 1 {\n\t\tbuf.WriteString(fmt.Sprintf(\"import %s %s\\n\\n\", g.imports[0].name, g.imports[0].path))\n\t}\n\n\tif g.decls != nil {\n\t\tfor i := range g.decls {\n\t\t\tbuf.WriteString(\"type \" + g.decls[i].name + \" \")\n\t\t\tbuf.Write(g.decls[i].jsontype.generate())\n\t\t\tbuf.WriteString(\"\\n\")\n\t\t}\n\t}\n\n\tif len(enumList) > 0 {\n\t\tbuf.WriteString(\"\\n\")\n\t\tfor typeName, enum := range enumList {\n\t\t\tenumName := typeName + \"Enum\"\n\t\t\tbuf.WriteString(\"type \" + enumName + \" int\\n\")\n\t\t\tbuf.WriteString(\"const (\\n\")\n\t\t\tfor i := range enum {\n\t\t\t\tif i == 0 {\n\t\t\t\t\tbuf.WriteString(enumName + utils.UpperCamelCase(fmt.Sprintf(\"%v\", enum[0])) + \" \" + enumName + \" = iota\\n\")\n\t\t\t\t} else {\n\t\t\t\t\tbuf.WriteString(enumName + utils.UpperCamelCase(fmt.Sprintf(\"%v\", enum[i])) + \"\\n\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tbuf.WriteString(\")\\n\")\n\t\t\tbuf.WriteString(\"func (enum \" + enumName + \") MarshalJSON() ([]byte, error) {\\n\")\n\t\t\tbuf.WriteString(\"var var\" + typeName + \" = []interface{}{\")\n\t\t\tfor i := range enum {\n\t\t\t\tswitch v := enum[i].(type) {\n\t\t\t\tcase string:\n\t\t\t\t\tbuf.WriteString(`\"`)\n\t\t\t\t\tbuf.WriteString(strings.Replace(v, `\"`, `\\\"`, -1))\n\t\t\t\t\tbuf.WriteString(`\"`)\n\t\t\t\tdefault:\n\t\t\t\t\tbuf.WriteString(fmt.Sprintf(\"%v\", v))\n\t\t\t\t}\n\t\t\t\tbuf.WriteString(\",\")\n\t\t\t}\n\t\t\tbuf.WriteString(\"}\\n\")\n\t\t\tbuf.WriteString(\"switch v:= var\" + typeName + \"[enum].(type) {\\n\")\n\t\t\tbuf.WriteString(\"case string:\\n\")\n\t\t\tbuf.WriteString(\"return []byte(fmt.Sprintf(\\\"\\\\\\\"%v\\\\\\\"\\\", strings.Replace(v, `\\\"`, `\\\\\\\"`, -1))), nil\\n\")\n\t\t\tbuf.WriteString(\"default:\\n\")\n\t\t\tbuf.WriteString(\"return []byte(fmt.Sprintf(\\\"%v\\\", v)), nil\\n\")\n\t\t\tbuf.WriteString(\"}\\n\")\n\t\t\tbuf.WriteString(\"}\\n\\n\")\n\t\t}\n\t}\n\n\treturn format.Source(buf.Bytes())\n}\n\ntype importSpec struct {\n\tname string \/\/ ident\n\tpath string\n}\n\ntype typeSpec struct {\n\tname string \/\/ type name\n\tjsontype *JSONType\n}\n\ntype jsonFormat int\n\nconst (\n\tformatObject jsonFormat = iota\n\tformatArray\n\tformatString\n\tformatBoolean\n\tformatNumber\n\tformatInteger\n\tformatDatetime\n)\n\n\/\/ JSONType ...\ntype JSONType struct {\n\tformat jsonFormat\n\tnullable bool\n\tfields []*field \/\/ only object\n\titemType *JSONType \/\/ only array\n\tenumType string\n}\n\n\/\/ AddField ...\nfunc (t *JSONType) AddField(f *field) {\n\tif t.fields == nil {\n\t\tt.fields = []*field{}\n\t}\n\tt.fields = append(t.fields, f)\n}\n\nfunc (t *JSONType) generate() []byte {\n\tvar buf bytes.Buffer\n\tif t.nullable {\n\t\tbuf.WriteString(\"*\")\n\t}\n\tif t.format == formatObject {\n\t\tif t.fields == nil {\n\t\t\tbuf.WriteString(\"map[string]interface{}\")\n\t\t} else {\n\t\t\tbuf.WriteString(\"struct {\\n\")\n\t\t\tfor i := range t.fields {\n\t\t\t\tbuf.WriteString(t.fields[i].name)\n\t\t\t\tbuf.WriteString(\" \")\n\t\t\t\tbuf.Write(t.fields[i].jsontype.generate())\n\t\t\t\tbuf.WriteString(\" \")\n\t\t\t\tbuf.Write(t.fields[i].jsontag.generate())\n\t\t\t\tbuf.WriteString(\"\\n\")\n\t\t\t}\n\t\t\tbuf.WriteString(\"}\")\n\t\t}\n\t} else if t.format == formatArray {\n\t\tbuf.WriteString(\"[]\")\n\t\tbuf.Write(t.itemType.generate())\n\t} else if t.format == formatString {\n\t\tbuf.WriteString(\"string\")\n\t} else if t.format == formatBoolean {\n\t\tbuf.WriteString(\"bool\")\n\t} else if t.format == formatNumber {\n\t\tbuf.WriteString(\"float64\")\n\t} else if t.format == formatInteger {\n\t\tbuf.WriteString(\"int\")\n\t} else if t.format == formatDatetime {\n\t\tbuf.WriteString(\"time.Time\")\n\t}\n\treturn buf.Bytes()\n}\n\ntype field struct {\n\tname string\n\tjsontype *JSONType\n\tjsontag *jsonTag\n}\n\ntype jsonTag struct {\n\tname string\n\tomitEmpty bool\n}\n\n\/\/ Generate JSON tag code\nfunc (t *jsonTag) generate() []byte {\n\tvar buf bytes.Buffer\n\tbuf.WriteString(\"`json:\\\"\")\n\tbuf.WriteString(t.name)\n\tif t.omitEmpty {\n\t\tbuf.WriteString(\",omitempty\")\n\t}\n\tbuf.WriteString(\"\\\"`\")\n\treturn buf.Bytes()\n}\nadd generated comment\/\/ Copyright 2017 aaharu All rights reserved.\n\/\/ This source code is licensed under the BSD-style license found in\n\/\/ the LICENSE file in the root directory of this source tree.\n\npackage codegen\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/format\"\n\t\"strings\"\n\n\t\"github.com\/aaharu\/schemarshal\/utils\"\n\t\"github.com\/aaharu\/schemarshal\/version\"\n)\n\nvar enumList = map[string][]interface{}{} \/\/ FIXME\n\n\/\/ Generator of Go source code from JSON Schema\ntype Generator struct {\n\tname string \/\/ package nage\n\tcommand string\n\timports []*importSpec\n\tdecls []*typeSpec\n}\n\n\/\/ NewGenerator create Generator struct\nfunc NewGenerator(packageName string, command string) *Generator {\n\treturn &Generator{\n\t\tname: packageName,\n\t\tcommand: command,\n\t}\n}\n\n\/\/ AddImport add an import statement\nfunc (g *Generator) AddImport(path string, name string) {\n\tif g.imports == nil {\n\t\tg.imports = []*importSpec{}\n\t}\n\tg.imports = append(g.imports, &importSpec{\n\t\tname: name,\n\t\tpath: path,\n\t})\n}\n\n\/\/ AddType add a type statement\nfunc (g *Generator) AddType(name string, jsonType *JSONType) {\n\tif g.decls == nil {\n\t\tg.decls = []*typeSpec{}\n\t}\n\tg.decls = append(g.decls, &typeSpec{\n\t\tname: name,\n\t\tjsontype: jsonType,\n\t})\n}\n\n\/\/ Generate gofmt-ed Go source code\nfunc (g *Generator) Generate() ([]byte, error) {\n\tvar buf bytes.Buffer\n\n\tbuf.WriteString(fmt.Sprintf(\"\/\/ generated by %s `%s`\\n\", version.String(), g.command))\n\tbuf.WriteString(\"\/\/ DO NOT RECOMMEND EDITING THIS FILE.\\n\\n\")\n\tbuf.WriteString(fmt.Sprintf(\"package %s\\n\\n\", g.name))\n\n\tif len(g.imports) > 1 {\n\t\tbuf.WriteString(\"import (\\n\")\n\t\tfor i := range g.imports {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%s %s\\n\", g.imports[i].name, g.imports[i].path))\n\t\t}\n\t\tbuf.WriteString(\")\\n\\n\")\n\t} else if len(g.imports) == 1 {\n\t\tbuf.WriteString(fmt.Sprintf(\"import %s %s\\n\\n\", g.imports[0].name, g.imports[0].path))\n\t}\n\n\tif g.decls != nil {\n\t\tfor i := range g.decls {\n\t\t\tbuf.WriteString(\"type \" + g.decls[i].name + \" \")\n\t\t\tbuf.Write(g.decls[i].jsontype.generate())\n\t\t\tbuf.WriteString(\"\\n\")\n\t\t}\n\t}\n\n\tif len(enumList) > 0 {\n\t\tbuf.WriteString(\"\\n\")\n\t\tfor typeName, enum := range enumList {\n\t\t\tenumName := typeName + \"Enum\"\n\t\t\tbuf.WriteString(\"type \" + enumName + \" int\\n\")\n\t\t\tbuf.WriteString(\"const (\\n\")\n\t\t\tfor i := range enum {\n\t\t\t\tif i == 0 {\n\t\t\t\t\tbuf.WriteString(enumName + utils.UpperCamelCase(fmt.Sprintf(\"%v\", enum[0])) + \" \" + enumName + \" = iota\\n\")\n\t\t\t\t} else {\n\t\t\t\t\tbuf.WriteString(enumName + utils.UpperCamelCase(fmt.Sprintf(\"%v\", enum[i])) + \"\\n\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tbuf.WriteString(\")\\n\")\n\t\t\tbuf.WriteString(\"func (enum \" + enumName + \") MarshalJSON() ([]byte, error) {\\n\")\n\t\t\tbuf.WriteString(\"var var\" + typeName + \" = []interface{}{\")\n\t\t\tfor i := range enum {\n\t\t\t\tswitch v := enum[i].(type) {\n\t\t\t\tcase string:\n\t\t\t\t\tbuf.WriteString(`\"`)\n\t\t\t\t\tbuf.WriteString(strings.Replace(v, `\"`, `\\\"`, -1))\n\t\t\t\t\tbuf.WriteString(`\"`)\n\t\t\t\tdefault:\n\t\t\t\t\tbuf.WriteString(fmt.Sprintf(\"%v\", v))\n\t\t\t\t}\n\t\t\t\tbuf.WriteString(\",\")\n\t\t\t}\n\t\t\tbuf.WriteString(\"}\\n\")\n\t\t\tbuf.WriteString(\"switch v:= var\" + typeName + \"[enum].(type) {\\n\")\n\t\t\tbuf.WriteString(\"case string:\\n\")\n\t\t\tbuf.WriteString(\"return []byte(fmt.Sprintf(\\\"\\\\\\\"%v\\\\\\\"\\\", strings.Replace(v, `\\\"`, `\\\\\\\"`, -1))), nil\\n\")\n\t\t\tbuf.WriteString(\"default:\\n\")\n\t\t\tbuf.WriteString(\"return []byte(fmt.Sprintf(\\\"%v\\\", v)), nil\\n\")\n\t\t\tbuf.WriteString(\"}\\n\")\n\t\t\tbuf.WriteString(\"}\\n\\n\")\n\t\t}\n\t}\n\n\treturn format.Source(buf.Bytes())\n}\n\ntype importSpec struct {\n\tname string \/\/ ident\n\tpath string\n}\n\ntype typeSpec struct {\n\tname string \/\/ type name\n\tjsontype *JSONType\n}\n\ntype jsonFormat int\n\nconst (\n\tformatObject jsonFormat = iota\n\tformatArray\n\tformatString\n\tformatBoolean\n\tformatNumber\n\tformatInteger\n\tformatDatetime\n)\n\n\/\/ JSONType ...\ntype JSONType struct {\n\tformat jsonFormat\n\tnullable bool\n\tfields []*field \/\/ only object\n\titemType *JSONType \/\/ only array\n\tenumType string\n}\n\n\/\/ AddField ...\nfunc (t *JSONType) AddField(f *field) {\n\tif t.fields == nil {\n\t\tt.fields = []*field{}\n\t}\n\tt.fields = append(t.fields, f)\n}\n\nfunc (t *JSONType) generate() []byte {\n\tvar buf bytes.Buffer\n\tif t.nullable {\n\t\tbuf.WriteString(\"*\")\n\t}\n\tif t.format == formatObject {\n\t\tif t.fields == nil {\n\t\t\tbuf.WriteString(\"map[string]interface{}\")\n\t\t} else {\n\t\t\tbuf.WriteString(\"struct {\\n\")\n\t\t\tfor i := range t.fields {\n\t\t\t\tbuf.WriteString(t.fields[i].name)\n\t\t\t\tbuf.WriteString(\" \")\n\t\t\t\tbuf.Write(t.fields[i].jsontype.generate())\n\t\t\t\tbuf.WriteString(\" \")\n\t\t\t\tbuf.Write(t.fields[i].jsontag.generate())\n\t\t\t\tbuf.WriteString(\"\\n\")\n\t\t\t}\n\t\t\tbuf.WriteString(\"}\")\n\t\t}\n\t} else if t.format == formatArray {\n\t\tbuf.WriteString(\"[]\")\n\t\tbuf.Write(t.itemType.generate())\n\t} else if t.format == formatString {\n\t\tbuf.WriteString(\"string\")\n\t} else if t.format == formatBoolean {\n\t\tbuf.WriteString(\"bool\")\n\t} else if t.format == formatNumber {\n\t\tbuf.WriteString(\"float64\")\n\t} else if t.format == formatInteger {\n\t\tbuf.WriteString(\"int\")\n\t} else if t.format == formatDatetime {\n\t\tbuf.WriteString(\"time.Time\")\n\t}\n\treturn buf.Bytes()\n}\n\ntype field struct {\n\tname string\n\tjsontype *JSONType\n\tjsontag *jsonTag\n}\n\ntype jsonTag struct {\n\tname string\n\tomitEmpty bool\n}\n\n\/\/ Generate JSON tag code\nfunc (t *jsonTag) generate() []byte {\n\tvar buf bytes.Buffer\n\tbuf.WriteString(\"`json:\\\"\")\n\tbuf.WriteString(t.name)\n\tif t.omitEmpty {\n\t\tbuf.WriteString(\",omitempty\")\n\t}\n\tbuf.WriteString(\"\\\"`\")\n\treturn buf.Bytes()\n}\n<|endoftext|>"} {"text":"package collector\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/digitalocean\/godo\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n)\n\n\/\/ DropletCollector collects metrics about all droplets.\ntype DropletCollector struct {\n\tclient *godo.Client\n\n\tUp *prometheus.Desc\n\tCPUs *prometheus.Desc\n\tMemory *prometheus.Desc\n\tDisk *prometheus.Desc\n\tPriceHourly *prometheus.Desc\n\tPriceMonthly *prometheus.Desc\n}\n\n\/\/ NewDropletCollector returns a new DropletCollector.\nfunc NewDropletCollector(client *godo.Client) *DropletCollector {\n\tlabels := []string{\"id\", \"name\", \"region\"}\n\n\treturn &DropletCollector{\n\t\tclient: client,\n\n\t\tUp: prometheus.NewDesc(\n\t\t\t\"digitalocean_droplet_up\",\n\t\t\t\"If 1 the droplet is up and running, 0 otherwise\",\n\t\t\tlabels, nil,\n\t\t),\n\t\tCPUs: prometheus.NewDesc(\n\t\t\t\"digitalocean_droplet_cpus\",\n\t\t\t\"Droplet's number of CPUs\",\n\t\t\tlabels, nil,\n\t\t),\n\t\tMemory: prometheus.NewDesc(\n\t\t\t\"digitalocean_droplet_memory_bytes\",\n\t\t\t\"Droplet's memory in bytes\",\n\t\t\tlabels, nil,\n\t\t),\n\t\tDisk: prometheus.NewDesc(\n\t\t\t\"digitalocean_droplet_disk_bytes\",\n\t\t\t\"Droplet's disk in bytes\",\n\t\t\tlabels, nil,\n\t\t),\n\t\tPriceHourly: prometheus.NewDesc(\n\t\t\t\"digitalocean_droplet_price_hourly\",\n\t\t\t\"Price of the Droplet billed hourly\",\n\t\t\tlabels, nil,\n\t\t),\n\t\tPriceMonthly: prometheus.NewDesc(\n\t\t\t\"digitalocean_droplet_price_monthly\",\n\t\t\t\"Price of the Droplet billed monthly\",\n\t\t\tlabels, nil,\n\t\t),\n\t}\n}\n\n\/\/ Describe sends the super-set of all possible descriptors of metrics\n\/\/ collected by this Collector.\nfunc (c *DropletCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Up\n\tch <- c.CPUs\n\tch <- c.Memory\n\tch <- c.Disk\n\tch <- c.PriceHourly\n\tch <- c.PriceMonthly\n}\n\n\/\/ Collect is called by the Prometheus registry when collecting metrics.\nfunc (c *DropletCollector) Collect(ch chan<- prometheus.Metric) {\n\tdroplets, _, err := c.client.Droplets.List(context.TODO(), nil)\n\tif err != nil {\n\t\tlog.Printf(\"Can't list droplets: %v\", err)\n\t}\n\n\tfor _, droplet := range droplets {\n\t\tlabels := []string{\n\t\t\tfmt.Sprintf(\"%d\", droplet.ID),\n\t\t\tdroplet.Name,\n\t\t\tdroplet.Region.Slug,\n\t\t}\n\n\t\tvar active float64\n\t\tif droplet.Status == \"active\" {\n\t\t\tactive = 1.0\n\t\t}\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.Up,\n\t\t\tprometheus.GaugeValue,\n\t\t\tactive,\n\t\t\tlabels...,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CPUs,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(droplet.Vcpus),\n\t\t\tlabels...,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.Memory,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(droplet.Memory*1024*1024),\n\t\t\tlabels...,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.Disk,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(droplet.Disk*1000*1000*1000),\n\t\t\tlabels...,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.PriceHourly,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(droplet.Size.PriceHourly), \/\/ TODO: Find out the correct data type\n\t\t\tlabels...,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.PriceMonthly,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(droplet.Size.PriceMonthly), \/\/ TODO: Find out the correct data type\n\t\t\tlabels...,\n\t\t)\n\t}\n}\nAdd that prices are in dollarspackage collector\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/digitalocean\/godo\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n)\n\n\/\/ DropletCollector collects metrics about all droplets.\ntype DropletCollector struct {\n\tclient *godo.Client\n\n\tUp *prometheus.Desc\n\tCPUs *prometheus.Desc\n\tMemory *prometheus.Desc\n\tDisk *prometheus.Desc\n\tPriceHourly *prometheus.Desc\n\tPriceMonthly *prometheus.Desc\n}\n\n\/\/ NewDropletCollector returns a new DropletCollector.\nfunc NewDropletCollector(client *godo.Client) *DropletCollector {\n\tlabels := []string{\"id\", \"name\", \"region\"}\n\n\treturn &DropletCollector{\n\t\tclient: client,\n\n\t\tUp: prometheus.NewDesc(\n\t\t\t\"digitalocean_droplet_up\",\n\t\t\t\"If 1 the droplet is up and running, 0 otherwise\",\n\t\t\tlabels, nil,\n\t\t),\n\t\tCPUs: prometheus.NewDesc(\n\t\t\t\"digitalocean_droplet_cpus\",\n\t\t\t\"Droplet's number of CPUs\",\n\t\t\tlabels, nil,\n\t\t),\n\t\tMemory: prometheus.NewDesc(\n\t\t\t\"digitalocean_droplet_memory_bytes\",\n\t\t\t\"Droplet's memory in bytes\",\n\t\t\tlabels, nil,\n\t\t),\n\t\tDisk: prometheus.NewDesc(\n\t\t\t\"digitalocean_droplet_disk_bytes\",\n\t\t\t\"Droplet's disk in bytes\",\n\t\t\tlabels, nil,\n\t\t),\n\t\tPriceHourly: prometheus.NewDesc(\n\t\t\t\"digitalocean_droplet_price_hourly\",\n\t\t\t\"Price of the Droplet billed hourly in dollars\",\n\t\t\tlabels, nil,\n\t\t),\n\t\tPriceMonthly: prometheus.NewDesc(\n\t\t\t\"digitalocean_droplet_price_monthly\",\n\t\t\t\"Price of the Droplet billed monthly in dollars\",\n\t\t\tlabels, nil,\n\t\t),\n\t}\n}\n\n\/\/ Describe sends the super-set of all possible descriptors of metrics\n\/\/ collected by this Collector.\nfunc (c *DropletCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Up\n\tch <- c.CPUs\n\tch <- c.Memory\n\tch <- c.Disk\n\tch <- c.PriceHourly\n\tch <- c.PriceMonthly\n}\n\n\/\/ Collect is called by the Prometheus registry when collecting metrics.\nfunc (c *DropletCollector) Collect(ch chan<- prometheus.Metric) {\n\tdroplets, _, err := c.client.Droplets.List(context.TODO(), nil)\n\tif err != nil {\n\t\tlog.Printf(\"Can't list droplets: %v\", err)\n\t}\n\n\tfor _, droplet := range droplets {\n\t\tlabels := []string{\n\t\t\tfmt.Sprintf(\"%d\", droplet.ID),\n\t\t\tdroplet.Name,\n\t\t\tdroplet.Region.Slug,\n\t\t}\n\n\t\tvar active float64\n\t\tif droplet.Status == \"active\" {\n\t\t\tactive = 1.0\n\t\t}\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.Up,\n\t\t\tprometheus.GaugeValue,\n\t\t\tactive,\n\t\t\tlabels...,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CPUs,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(droplet.Vcpus),\n\t\t\tlabels...,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.Memory,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(droplet.Memory*1024*1024),\n\t\t\tlabels...,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.Disk,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(droplet.Disk*1000*1000*1000),\n\t\t\tlabels...,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.PriceHourly,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(droplet.Size.PriceHourly),\n\t\t\tlabels...,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.PriceMonthly,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(droplet.Size.PriceMonthly),\n\t\t\tlabels...,\n\t\t)\n\t}\n}\n<|endoftext|>"} {"text":"package command\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n\n\tconsulapi \"github.com\/hashicorp\/consul\/api\"\n)\n\ntype KVDeleteCommand struct {\n\tMeta\n}\n\nfunc (c *KVDeleteCommand) Help() string {\n\thelpText := `\nUsage: consul-cli kv-delete [options] path\n\n Delete a given path from the K\/V\n\nOptions:\n\n` + c.ConsulHelp() +\n` --modifyindex=\tPerform a Check-and-Set delete\n\t\t\t\t(default: not set)\n --recurse\t\t\tPerform a recursive delete\n\t\t\t\t(default: false)\n`\n\n\treturn strings.TrimSpace(helpText)\n}\n\nfunc (c *KVDeleteCommand) Run (args[]string) int {\n\tvar modifyIndex string\n\tvar doRecurse bool\n\tvar dc string\n\n\tflags := c.Meta.FlagSet()\n\tflags.StringVar(&modifyIndex, \"modifyindex\", \"\", \"\")\n\tflags.StringVar(&dc, \"datacenter\", \"\", \"\")\n\tflags.BoolVar(&doRecurse, \"recurse\", false, \"\")\n\tflags.Usage = func() { c.UI.Output(c.Help()) }\n\n\tif err := flags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\textra := flags.Args()\n\tif len(extra) < 1 {\n\t\tc.UI.Error(\"Key path must be specified\")\n\t\tc.UI.Error(\"\")\n\t\tc.UI.Error(c.Help())\n\t\treturn 1\n\t}\n\n\tpath := extra[0]\n\n\tconsul, err := c.Client()\n\tif err != nil {\n\t\tc.UI.Error(err.Error())\n\t\treturn 1\n\t}\n\tclient := consul.KV()\n\n\twriteOpts := c.WriteOptions()\n\n\tswitch {\n\tcase doRecurse:\n\t\t_, err := client.DeleteTree(path, writeOpts)\n\t\tif err != nil {\n\t\t\tc.UI.Error(err.Error())\n\t\t\treturn 1\n\t\t}\n\tcase modifyIndex != \"\":\n\t\tm, err := strconv.ParseUint(modifyIndex, 0, 64)\n\t\tif err != nil {\n\t\t\tc.UI.Error(err.Error())\n\t\t\treturn 1\n\t\t}\n\t\tkv := consulapi.KVPair{\n\t\t\tKey:\t\tpath,\n\t\t\tModifyIndex:\tm,\n\t\t}\n\n\t\tsuccess, _, err := client.DeleteCAS(&kv, writeOpts)\n\t\tif err != nil {\n\t\t\tc.UI.Error(err.Error())\n\t\t\treturn 1\n\t\t}\n\n\t\tif !success {\n\t\t\treturn 1\n\t\t}\n\tdefault:\n\t\t_, err := client.Delete(path, writeOpts)\n\t\tif err != nil {\n\t\t\tc.UI.Error(err.Error())\n\t\t\treturn 1\n\t\t}\n\t}\n\n\treturn 0\n}\n\nfunc (c *KVDeleteCommand) Synopsis() string {\n\treturn \"Delete a path\"\n}\nRemove duplicate flagpackage command\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n\n\tconsulapi \"github.com\/hashicorp\/consul\/api\"\n)\n\ntype KVDeleteCommand struct {\n\tMeta\n}\n\nfunc (c *KVDeleteCommand) Help() string {\n\thelpText := `\nUsage: consul-cli kv-delete [options] path\n\n Delete a given path from the K\/V\n\nOptions:\n\n` + c.ConsulHelp() +\n` --modifyindex=\tPerform a Check-and-Set delete\n\t\t\t\t(default: not set)\n --recurse\t\t\tPerform a recursive delete\n\t\t\t\t(default: false)\n`\n\n\treturn strings.TrimSpace(helpText)\n}\n\nfunc (c *KVDeleteCommand) Run (args[]string) int {\n\tvar modifyIndex string\n\tvar doRecurse bool\n\n\tflags := c.Meta.FlagSet()\n\tflags.StringVar(&modifyIndex, \"modifyindex\", \"\", \"\")\n\tflags.BoolVar(&doRecurse, \"recurse\", false, \"\")\n\tflags.Usage = func() { c.UI.Output(c.Help()) }\n\n\tif err := flags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\textra := flags.Args()\n\tif len(extra) < 1 {\n\t\tc.UI.Error(\"Key path must be specified\")\n\t\tc.UI.Error(\"\")\n\t\tc.UI.Error(c.Help())\n\t\treturn 1\n\t}\n\n\tpath := extra[0]\n\n\tconsul, err := c.Client()\n\tif err != nil {\n\t\tc.UI.Error(err.Error())\n\t\treturn 1\n\t}\n\tclient := consul.KV()\n\n\twriteOpts := c.WriteOptions()\n\n\tswitch {\n\tcase doRecurse:\n\t\t_, err := client.DeleteTree(path, writeOpts)\n\t\tif err != nil {\n\t\t\tc.UI.Error(err.Error())\n\t\t\treturn 1\n\t\t}\n\tcase modifyIndex != \"\":\n\t\tm, err := strconv.ParseUint(modifyIndex, 0, 64)\n\t\tif err != nil {\n\t\t\tc.UI.Error(err.Error())\n\t\t\treturn 1\n\t\t}\n\t\tkv := consulapi.KVPair{\n\t\t\tKey:\t\tpath,\n\t\t\tModifyIndex:\tm,\n\t\t}\n\n\t\tsuccess, _, err := client.DeleteCAS(&kv, writeOpts)\n\t\tif err != nil {\n\t\t\tc.UI.Error(err.Error())\n\t\t\treturn 1\n\t\t}\n\n\t\tif !success {\n\t\t\treturn 1\n\t\t}\n\tdefault:\n\t\t_, err := client.Delete(path, writeOpts)\n\t\tif err != nil {\n\t\t\tc.UI.Error(err.Error())\n\t\t\treturn 1\n\t\t}\n\t}\n\n\treturn 0\n}\n\nfunc (c *KVDeleteCommand) Synopsis() string {\n\treturn \"Delete a path\"\n}\n<|endoftext|>"} {"text":"package command\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/mitchellh\/cli\"\n)\n\nfunc testReadCommand(tb testing.TB) (*cli.MockUi, *ReadCommand) {\n\ttb.Helper()\n\n\tui := cli.NewMockUi()\n\treturn ui, &ReadCommand{\n\t\tBaseCommand: &BaseCommand{\n\t\t\tUI: ui,\n\t\t},\n\t}\n}\n\nfunc TestReadCommand_Run(t *testing.T) {\n\tt.Parallel()\n\n\tcases := []struct {\n\t\tname string\n\t\targs []string\n\t\tout string\n\t\tcode int\n\t}{\n\t\t{\n\t\t\t\"not_enough_args\",\n\t\t\t[]string{},\n\t\t\t\"Not enough arguments\",\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t\"too_many_args\",\n\t\t\t[]string{\"foo\", \"bar\"},\n\t\t\t\"Too many arguments\",\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t\"not_found\",\n\t\t\t[]string{\"nope\/not\/once\/never\"},\n\t\t\t\"\",\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t\"default\",\n\t\t\t[]string{\"secret\/read\/foo\"},\n\t\t\t\"foo\",\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\t\"field\",\n\t\t\t[]string{\n\t\t\t\t\"-field\", \"foo\",\n\t\t\t\t\"secret\/read\/foo\",\n\t\t\t},\n\t\t\t\"bar\",\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\t\"field_not_found\",\n\t\t\t[]string{\n\t\t\t\t\"-field\", \"not-a-real-field\",\n\t\t\t\t\"secret\/read\/foo\",\n\t\t\t},\n\t\t\t\"not present in secret\",\n\t\t\t1,\n\t\t},\n\t}\n\n\tt.Run(\"validations\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tfor _, tc := range cases {\n\t\t\ttc := tc\n\n\t\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t\tt.Parallel()\n\n\t\t\t\tclient, closer := testVaultServer(t)\n\t\t\t\tdefer closer()\n\n\t\t\t\tif _, err := client.Logical().Write(\"secret\/read\/foo\", map[string]interface{}{\n\t\t\t\t\t\"foo\": \"bar\",\n\t\t\t\t}); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tui, cmd := testReadCommand(t)\n\t\t\t\tcmd.client = client\n\n\t\t\t\tcode := cmd.Run(tc.args)\n\t\t\t\tif code != tc.code {\n\t\t\t\t\tt.Errorf(\"expected %d to be %d\", code, tc.code)\n\t\t\t\t}\n\n\t\t\t\tcombined := ui.OutputWriter.String() + ui.ErrorWriter.String()\n\t\t\t\tif !strings.Contains(combined, tc.out) {\n\t\t\t\t\tt.Errorf(\"expected %q to contain %q\", combined, tc.out)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"communication_failure\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tclient, closer := testVaultServerBad(t)\n\t\tdefer closer()\n\n\t\tui, cmd := testReadCommand(t)\n\t\tcmd.client = client\n\n\t\tcode := cmd.Run([]string{\n\t\t\t\"secret\/foo\",\n\t\t})\n\t\tif exp := 2; code != exp {\n\t\t\tt.Errorf(\"expected %d to be %d\", code, exp)\n\t\t}\n\n\t\texpected := \"Error reading secret\/foo: \"\n\t\tcombined := ui.OutputWriter.String() + ui.ErrorWriter.String()\n\t\tif !strings.Contains(combined, expected) {\n\t\t\tt.Errorf(\"expected %q to contain %q\", combined, expected)\n\t\t}\n\t})\n\n\tt.Run(\"no_tabs\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\t_, cmd := testReadCommand(t)\n\t\tassertNoTabs(t, cmd)\n\t})\n}\nFix read testpackage command\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/mitchellh\/cli\"\n)\n\nfunc testReadCommand(tb testing.TB) (*cli.MockUi, *ReadCommand) {\n\ttb.Helper()\n\n\tui := cli.NewMockUi()\n\treturn ui, &ReadCommand{\n\t\tBaseCommand: &BaseCommand{\n\t\t\tUI: ui,\n\t\t},\n\t}\n}\n\nfunc TestReadCommand_Run(t *testing.T) {\n\tt.Parallel()\n\n\tcases := []struct {\n\t\tname string\n\t\targs []string\n\t\tout string\n\t\tcode int\n\t}{\n\t\t{\n\t\t\t\"not_enough_args\",\n\t\t\t[]string{},\n\t\t\t\"Not enough arguments\",\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t\"proper_args\",\n\t\t\t[]string{\"foo\", \"bar=baz\"},\n\t\t\t\"No value found at foo\\n\",\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t\"not_found\",\n\t\t\t[]string{\"nope\/not\/once\/never\"},\n\t\t\t\"\",\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t\"default\",\n\t\t\t[]string{\"secret\/read\/foo\"},\n\t\t\t\"foo\",\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\t\"field\",\n\t\t\t[]string{\n\t\t\t\t\"-field\", \"foo\",\n\t\t\t\t\"secret\/read\/foo\",\n\t\t\t},\n\t\t\t\"bar\",\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\t\"field_not_found\",\n\t\t\t[]string{\n\t\t\t\t\"-field\", \"not-a-real-field\",\n\t\t\t\t\"secret\/read\/foo\",\n\t\t\t},\n\t\t\t\"not present in secret\",\n\t\t\t1,\n\t\t},\n\t}\n\n\tt.Run(\"validations\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tfor _, tc := range cases {\n\t\t\ttc := tc\n\n\t\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t\tt.Parallel()\n\n\t\t\t\tclient, closer := testVaultServer(t)\n\t\t\t\tdefer closer()\n\n\t\t\t\tif _, err := client.Logical().Write(\"secret\/read\/foo\", map[string]interface{}{\n\t\t\t\t\t\"foo\": \"bar\",\n\t\t\t\t}); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tui, cmd := testReadCommand(t)\n\t\t\t\tcmd.client = client\n\n\t\t\t\tcode := cmd.Run(tc.args)\n\t\t\t\tif code != tc.code {\n\t\t\t\t\tt.Errorf(\"expected %d to be %d\", code, tc.code)\n\t\t\t\t}\n\n\t\t\t\tcombined := ui.OutputWriter.String() + ui.ErrorWriter.String()\n\t\t\t\tif !strings.Contains(combined, tc.out) {\n\t\t\t\t\tt.Errorf(\"%s: expected %q to contain %q\", tc.name, combined, tc.out)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"communication_failure\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tclient, closer := testVaultServerBad(t)\n\t\tdefer closer()\n\n\t\tui, cmd := testReadCommand(t)\n\t\tcmd.client = client\n\n\t\tcode := cmd.Run([]string{\n\t\t\t\"secret\/foo\",\n\t\t})\n\t\tif exp := 2; code != exp {\n\t\t\tt.Errorf(\"expected %d to be %d\", code, exp)\n\t\t}\n\n\t\texpected := \"Error reading secret\/foo: \"\n\t\tcombined := ui.OutputWriter.String() + ui.ErrorWriter.String()\n\t\tif !strings.Contains(combined, expected) {\n\t\t\tt.Errorf(\"expected %q to contain %q\", combined, expected)\n\t\t}\n\t})\n\n\tt.Run(\"no_tabs\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\t_, cmd := testReadCommand(t)\n\t\tassertNoTabs(t, cmd)\n\t})\n}\n<|endoftext|>"} {"text":"package command\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/terraform\/config\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"github.com\/mitchellh\/cli\"\n)\n\nfunc TestShow(t *testing.T) {\n\tui := new(cli.MockUi)\n\tc := &ShowCommand{\n\t\tContextOpts: testCtxConfig(testProvider()),\n\t\tUi: ui,\n\t}\n\n\targs := []string{\n\t\t\"bad\",\n\t\t\"bad\",\n\t}\n\tif code := c.Run(args); code != 1 {\n\t\tt.Fatalf(\"bad: \\n%s\", ui.OutputWriter.String())\n\t}\n}\n\nfunc TestShow_noArgs(t *testing.T) {\n\tui := new(cli.MockUi)\n\tc := &ShowCommand{\n\t\tContextOpts: testCtxConfig(testProvider()),\n\t\tUi: ui,\n\t}\n\n\targs := []string{}\n\tif code := c.Run(args); code != 1 {\n\t\tt.Fatalf(\"bad: \\n%s\", ui.OutputWriter.String())\n\t}\n}\n\nfunc TestShow_plan(t *testing.T) {\n\tplanPath := testPlanFile(t, &terraform.Plan{\n\t\tConfig: new(config.Config),\n\t})\n\n\tui := new(cli.MockUi)\n\tc := &ShowCommand{\n\t\tContextOpts: testCtxConfig(testProvider()),\n\t\tUi: ui,\n\t}\n\n\targs := []string{\n\t\tplanPath,\n\t}\n\tif code := c.Run(args); code != 0 {\n\t\tt.Fatalf(\"bad: \\n%s\", ui.ErrorWriter.String())\n\t}\n}\n\nfunc TestShow_state(t *testing.T) {\n\toriginalState := &terraform.State{\n\t\tResources: map[string]*terraform.ResourceState{\n\t\t\t\"test_instance.foo\": &terraform.ResourceState{\n\t\t\t\tID: \"bar\",\n\t\t\t\tType: \"test_instance\",\n\t\t\t},\n\t\t},\n\t}\n\n\tstatePath := testStateFile(t, originalState)\n\n\tui := new(cli.MockUi)\n\tc := &ShowCommand{\n\t\tContextOpts: testCtxConfig(testProvider()),\n\t\tUi: ui,\n\t}\n\n\targs := []string{\n\t\tstatePath,\n\t}\n\tif code := c.Run(args); code != 0 {\n\t\tt.Fatalf(\"bad: \\n%s\", ui.ErrorWriter.String())\n\t}\n}\ncommand\/show: fix testspackage command\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/terraform\/config\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"github.com\/mitchellh\/cli\"\n)\n\nfunc TestShow(t *testing.T) {\n\tui := new(cli.MockUi)\n\tc := &ShowCommand{\n\t\tMeta: Meta{\n\t\t\tContextOpts: testCtxConfig(testProvider()),\n\t\t\tUi: ui,\n\t\t},\n\t}\n\n\targs := []string{\n\t\t\"bad\",\n\t\t\"bad\",\n\t}\n\tif code := c.Run(args); code != 1 {\n\t\tt.Fatalf(\"bad: \\n%s\", ui.OutputWriter.String())\n\t}\n}\n\nfunc TestShow_noArgs(t *testing.T) {\n\tui := new(cli.MockUi)\n\tc := &ShowCommand{\n\t\tMeta: Meta{\n\t\t\tContextOpts: testCtxConfig(testProvider()),\n\t\t\tUi: ui,\n\t\t},\n\t}\n\n\targs := []string{}\n\tif code := c.Run(args); code != 1 {\n\t\tt.Fatalf(\"bad: \\n%s\", ui.OutputWriter.String())\n\t}\n}\n\nfunc TestShow_plan(t *testing.T) {\n\tplanPath := testPlanFile(t, &terraform.Plan{\n\t\tConfig: new(config.Config),\n\t})\n\n\tui := new(cli.MockUi)\n\tc := &ShowCommand{\n\t\tMeta: Meta{\n\t\t\tContextOpts: testCtxConfig(testProvider()),\n\t\t\tUi: ui,\n\t\t},\n\t}\n\n\targs := []string{\n\t\tplanPath,\n\t}\n\tif code := c.Run(args); code != 0 {\n\t\tt.Fatalf(\"bad: \\n%s\", ui.ErrorWriter.String())\n\t}\n}\n\nfunc TestShow_state(t *testing.T) {\n\toriginalState := &terraform.State{\n\t\tResources: map[string]*terraform.ResourceState{\n\t\t\t\"test_instance.foo\": &terraform.ResourceState{\n\t\t\t\tID: \"bar\",\n\t\t\t\tType: \"test_instance\",\n\t\t\t},\n\t\t},\n\t}\n\n\tstatePath := testStateFile(t, originalState)\n\n\tui := new(cli.MockUi)\n\tc := &ShowCommand{\n\t\tMeta: Meta{\n\t\t\tContextOpts: testCtxConfig(testProvider()),\n\t\t\tUi: ui,\n\t\t},\n\t}\n\n\targs := []string{\n\t\tstatePath,\n\t}\n\tif code := c.Run(args); code != 0 {\n\t\tt.Fatalf(\"bad: \\n%s\", ui.ErrorWriter.String())\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2019 The Hugo Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/gohugoio\/hugo\/hugolib\/paths\"\n\n\t\"github.com\/gohugoio\/hugo\/common\/hugo\"\n\t\"github.com\/gohugoio\/hugo\/common\/loggers\"\n\t\"github.com\/gohugoio\/hugo\/config\"\n\t\"github.com\/gohugoio\/hugo\/helpers\"\n\t\"github.com\/spf13\/cobra\"\n)\n\ntype commandsBuilder struct {\n\thugoBuilderCommon\n\n\tcommands []cmder\n}\n\nfunc newCommandsBuilder() *commandsBuilder {\n\treturn &commandsBuilder{}\n}\n\nfunc (b *commandsBuilder) addCommands(commands ...cmder) *commandsBuilder {\n\tb.commands = append(b.commands, commands...)\n\treturn b\n}\n\nfunc (b *commandsBuilder) addAll() *commandsBuilder {\n\tb.addCommands(\n\t\tb.newServerCmd(),\n\t\tnewVersionCmd(),\n\t\tnewEnvCmd(),\n\t\tnewConfigCmd(),\n\t\tnewCheckCmd(),\n\t\tnewDeployCmd(),\n\t\tnewConvertCmd(),\n\t\tb.newNewCmd(),\n\t\tnewListCmd(),\n\t\tnewImportCmd(),\n\t\tnewGenCmd(),\n\t\tcreateReleaser(),\n\t\tb.newModCmd(),\n\t)\n\n\treturn b\n}\n\nfunc (b *commandsBuilder) build() *hugoCmd {\n\th := b.newHugoCmd()\n\taddCommands(h.getCommand(), b.commands...)\n\treturn h\n}\n\nfunc addCommands(root *cobra.Command, commands ...cmder) {\n\tfor _, command := range commands {\n\t\tcmd := command.getCommand()\n\t\tif cmd == nil {\n\t\t\tcontinue\n\t\t}\n\t\troot.AddCommand(cmd)\n\t}\n}\n\ntype baseCmd struct {\n\tcmd *cobra.Command\n}\n\nvar _ commandsBuilderGetter = (*baseBuilderCmd)(nil)\n\n\/\/ Used in tests.\ntype commandsBuilderGetter interface {\n\tgetCommandsBuilder() *commandsBuilder\n}\ntype baseBuilderCmd struct {\n\t*baseCmd\n\t*commandsBuilder\n}\n\nfunc (b *baseBuilderCmd) getCommandsBuilder() *commandsBuilder {\n\treturn b.commandsBuilder\n}\n\nfunc (c *baseCmd) getCommand() *cobra.Command {\n\treturn c.cmd\n}\n\nfunc newBaseCmd(cmd *cobra.Command) *baseCmd {\n\treturn &baseCmd{cmd: cmd}\n}\n\nfunc (b *commandsBuilder) newBuilderCmd(cmd *cobra.Command) *baseBuilderCmd {\n\tbcmd := &baseBuilderCmd{commandsBuilder: b, baseCmd: &baseCmd{cmd: cmd}}\n\tbcmd.hugoBuilderCommon.handleFlags(cmd)\n\treturn bcmd\n}\n\nfunc (c *baseCmd) flagsToConfig(cfg config.Provider) {\n\tinitializeFlags(c.cmd, cfg)\n}\n\ntype hugoCmd struct {\n\t*baseBuilderCmd\n\n\t\/\/ Need to get the sites once built.\n\tc *commandeer\n}\n\nvar _ cmder = (*nilCommand)(nil)\n\ntype nilCommand struct {\n}\n\nfunc (c *nilCommand) getCommand() *cobra.Command {\n\treturn nil\n}\n\nfunc (c *nilCommand) flagsToConfig(cfg config.Provider) {\n\n}\n\nfunc (b *commandsBuilder) newHugoCmd() *hugoCmd {\n\tcc := &hugoCmd{}\n\n\tcc.baseBuilderCmd = b.newBuilderCmd(&cobra.Command{\n\t\tUse: \"hugo\",\n\t\tShort: \"hugo builds your site\",\n\t\tLong: `hugo is the main command, used to build your Hugo site.\n\nHugo is a Fast and Flexible Static Site Generator\nbuilt with love by spf13 and friends in Go.\n\nComplete documentation is available at http:\/\/gohugo.io\/.`,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tdefer cc.timeTrack(time.Now(), \"Total\")\n\t\t\tcfgInit := func(c *commandeer) error {\n\t\t\t\tif cc.buildWatch {\n\t\t\t\t\tc.Set(\"disableLiveReload\", true)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tc, err := initializeConfig(true, cc.buildWatch, &cc.hugoBuilderCommon, cc, cfgInit)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcc.c = c\n\n\t\t\treturn c.build()\n\t\t},\n\t})\n\n\tcc.cmd.PersistentFlags().StringVar(&cc.cfgFile, \"config\", \"\", \"config file (default is path\/config.yaml|json|toml)\")\n\tcc.cmd.PersistentFlags().StringVar(&cc.cfgDir, \"configDir\", \"config\", \"config dir\")\n\tcc.cmd.PersistentFlags().BoolVar(&cc.quiet, \"quiet\", false, \"build in quiet mode\")\n\n\t\/\/ Set bash-completion\n\t_ = cc.cmd.PersistentFlags().SetAnnotation(\"config\", cobra.BashCompFilenameExt, config.ValidConfigFileExtensions)\n\n\tcc.cmd.PersistentFlags().BoolVarP(&cc.verbose, \"verbose\", \"v\", false, \"verbose output\")\n\tcc.cmd.PersistentFlags().BoolVarP(&cc.debug, \"debug\", \"\", false, \"debug output\")\n\tcc.cmd.PersistentFlags().BoolVar(&cc.logging, \"log\", false, \"enable Logging\")\n\tcc.cmd.PersistentFlags().StringVar(&cc.logFile, \"logFile\", \"\", \"log File path (if set, logging enabled automatically)\")\n\tcc.cmd.PersistentFlags().BoolVar(&cc.verboseLog, \"verboseLog\", false, \"verbose logging\")\n\n\tcc.cmd.Flags().BoolVarP(&cc.buildWatch, \"watch\", \"w\", false, \"watch filesystem for changes and recreate as needed\")\n\n\tcc.cmd.Flags().Bool(\"renderToMemory\", false, \"render to memory (only useful for benchmark testing)\")\n\n\t\/\/ Set bash-completion\n\t_ = cc.cmd.PersistentFlags().SetAnnotation(\"logFile\", cobra.BashCompFilenameExt, []string{})\n\n\tcc.cmd.SetGlobalNormalizationFunc(helpers.NormalizeHugoFlags)\n\tcc.cmd.SilenceUsage = true\n\n\treturn cc\n}\n\ntype hugoBuilderCommon struct {\n\tsource string\n\tbaseURL string\n\tenvironment string\n\n\tbuildWatch bool\n\n\tgc bool\n\n\t\/\/ Profile flags (for debugging of performance problems)\n\tcpuprofile string\n\tmemprofile string\n\tmutexprofile string\n\ttraceprofile string\n\n\t\/\/ TODO(bep) var vs string\n\tlogging bool\n\tverbose bool\n\tverboseLog bool\n\tdebug bool\n\tquiet bool\n\n\tcfgFile string\n\tcfgDir string\n\tlogFile string\n}\n\nfunc (cc *hugoBuilderCommon) timeTrack(start time.Time, name string) {\n\tif cc.quiet {\n\t\treturn\n\t}\n\telapsed := time.Since(start)\n\tfmt.Printf(\"%s in %v ms\\n\", name, int(1000*elapsed.Seconds()))\n}\n\nfunc (cc *hugoBuilderCommon) getConfigDir(baseDir string) string {\n\tif cc.cfgDir != \"\" {\n\t\treturn paths.AbsPathify(baseDir, cc.cfgDir)\n\t}\n\n\tif v, found := os.LookupEnv(\"HUGO_CONFIGDIR\"); found {\n\t\treturn paths.AbsPathify(baseDir, v)\n\t}\n\n\treturn paths.AbsPathify(baseDir, \"config\")\n}\n\nfunc (cc *hugoBuilderCommon) getEnvironment(isServer bool) string {\n\tif cc.environment != \"\" {\n\t\treturn cc.environment\n\t}\n\n\tif v, found := os.LookupEnv(\"HUGO_ENVIRONMENT\"); found {\n\t\treturn v\n\t}\n\n\tif isServer {\n\t\treturn hugo.EnvironmentDevelopment\n\t}\n\n\treturn hugo.EnvironmentProduction\n}\n\nfunc (cc *hugoBuilderCommon) handleCommonBuilderFlags(cmd *cobra.Command) {\n\tcmd.PersistentFlags().StringVarP(&cc.source, \"source\", \"s\", \"\", \"filesystem path to read files relative from\")\n\tcmd.PersistentFlags().SetAnnotation(\"source\", cobra.BashCompSubdirsInDir, []string{})\n\tcmd.PersistentFlags().StringVarP(&cc.environment, \"environment\", \"e\", \"\", \"build environment\")\n\tcmd.PersistentFlags().StringP(\"themesDir\", \"\", \"\", \"filesystem path to themes directory\")\n\tcmd.PersistentFlags().BoolP(\"ignoreVendor\", \"\", false, \"ignores any _vendor directory\")\n}\n\nfunc (cc *hugoBuilderCommon) handleFlags(cmd *cobra.Command) {\n\tcc.handleCommonBuilderFlags(cmd)\n\tcmd.Flags().Bool(\"cleanDestinationDir\", false, \"remove files from destination not found in static directories\")\n\tcmd.Flags().BoolP(\"buildDrafts\", \"D\", false, \"include content marked as draft\")\n\tcmd.Flags().BoolP(\"buildFuture\", \"F\", false, \"include content with publishdate in the future\")\n\tcmd.Flags().BoolP(\"buildExpired\", \"E\", false, \"include expired content\")\n\tcmd.Flags().StringP(\"contentDir\", \"c\", \"\", \"filesystem path to content directory\")\n\tcmd.Flags().StringP(\"layoutDir\", \"l\", \"\", \"filesystem path to layout directory\")\n\tcmd.Flags().StringP(\"cacheDir\", \"\", \"\", \"filesystem path to cache directory. Defaults: $TMPDIR\/hugo_cache\/\")\n\tcmd.Flags().BoolP(\"ignoreCache\", \"\", false, \"ignores the cache directory\")\n\tcmd.Flags().StringP(\"destination\", \"d\", \"\", \"filesystem path to write files to\")\n\tcmd.Flags().StringSliceP(\"theme\", \"t\", []string{}, \"themes to use (located in \/themes\/THEMENAME\/)\")\n\tcmd.Flags().StringVarP(&cc.baseURL, \"baseURL\", \"b\", \"\", \"hostname (and path) to the root, e.g. http:\/\/spf13.com\/\")\n\tcmd.Flags().Bool(\"enableGitInfo\", false, \"add Git revision, date and author info to the pages\")\n\tcmd.Flags().BoolVar(&cc.gc, \"gc\", false, \"enable to run some cleanup tasks (remove unused cache files) after the build\")\n\n\tcmd.Flags().Bool(\"templateMetrics\", false, \"display metrics about template executions\")\n\tcmd.Flags().Bool(\"templateMetricsHints\", false, \"calculate some improvement hints when combined with --templateMetrics\")\n\tcmd.Flags().BoolP(\"forceSyncStatic\", \"\", false, \"copy all files when static is changed.\")\n\tcmd.Flags().BoolP(\"noTimes\", \"\", false, \"don't sync modification time of files\")\n\tcmd.Flags().BoolP(\"noChmod\", \"\", false, \"don't sync permission mode of files\")\n\tcmd.Flags().BoolP(\"i18n-warnings\", \"\", false, \"print missing translations\")\n\tcmd.Flags().BoolP(\"path-warnings\", \"\", false, \"print warnings on duplicate target paths etc.\")\n\tcmd.Flags().StringVarP(&cc.cpuprofile, \"profile-cpu\", \"\", \"\", \"write cpu profile to `file`\")\n\tcmd.Flags().StringVarP(&cc.memprofile, \"profile-mem\", \"\", \"\", \"write memory profile to `file`\")\n\tcmd.Flags().StringVarP(&cc.mutexprofile, \"profile-mutex\", \"\", \"\", \"write Mutex profile to `file`\")\n\tcmd.Flags().StringVarP(&cc.traceprofile, \"trace\", \"\", \"\", \"write trace to `file` (not useful in general)\")\n\n\t\/\/ Hide these for now.\n\tcmd.Flags().MarkHidden(\"profile-cpu\")\n\tcmd.Flags().MarkHidden(\"profile-mem\")\n\tcmd.Flags().MarkHidden(\"profile-mutex\")\n\n\tcmd.Flags().StringSlice(\"disableKinds\", []string{}, \"disable different kind of pages (home, RSS etc.)\")\n\n\tcmd.Flags().Bool(\"minify\", false, \"minify any supported output format (HTML, XML etc.)\")\n\n\t\/\/ Set bash-completion.\n\t\/\/ Each flag must first be defined before using the SetAnnotation() call.\n\t_ = cmd.Flags().SetAnnotation(\"source\", cobra.BashCompSubdirsInDir, []string{})\n\t_ = cmd.Flags().SetAnnotation(\"cacheDir\", cobra.BashCompSubdirsInDir, []string{})\n\t_ = cmd.Flags().SetAnnotation(\"destination\", cobra.BashCompSubdirsInDir, []string{})\n\t_ = cmd.Flags().SetAnnotation(\"theme\", cobra.BashCompSubdirsInDir, []string{\"themes\"})\n}\n\nfunc checkErr(logger *loggers.Logger, err error, s ...string) {\n\tif err == nil {\n\t\treturn\n\t}\n\tif len(s) == 0 {\n\t\tlogger.CRITICAL.Println(err)\n\t\treturn\n\t}\n\tfor _, message := range s {\n\t\tlogger.ERROR.Println(message)\n\t}\n\tlogger.ERROR.Println(err)\n}\ncommands: Use HUGO_ENV if set\/\/ Copyright 2019 The Hugo Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/gohugoio\/hugo\/hugolib\/paths\"\n\n\t\"github.com\/gohugoio\/hugo\/common\/hugo\"\n\t\"github.com\/gohugoio\/hugo\/common\/loggers\"\n\t\"github.com\/gohugoio\/hugo\/config\"\n\t\"github.com\/gohugoio\/hugo\/helpers\"\n\t\"github.com\/spf13\/cobra\"\n)\n\ntype commandsBuilder struct {\n\thugoBuilderCommon\n\n\tcommands []cmder\n}\n\nfunc newCommandsBuilder() *commandsBuilder {\n\treturn &commandsBuilder{}\n}\n\nfunc (b *commandsBuilder) addCommands(commands ...cmder) *commandsBuilder {\n\tb.commands = append(b.commands, commands...)\n\treturn b\n}\n\nfunc (b *commandsBuilder) addAll() *commandsBuilder {\n\tb.addCommands(\n\t\tb.newServerCmd(),\n\t\tnewVersionCmd(),\n\t\tnewEnvCmd(),\n\t\tnewConfigCmd(),\n\t\tnewCheckCmd(),\n\t\tnewDeployCmd(),\n\t\tnewConvertCmd(),\n\t\tb.newNewCmd(),\n\t\tnewListCmd(),\n\t\tnewImportCmd(),\n\t\tnewGenCmd(),\n\t\tcreateReleaser(),\n\t\tb.newModCmd(),\n\t)\n\n\treturn b\n}\n\nfunc (b *commandsBuilder) build() *hugoCmd {\n\th := b.newHugoCmd()\n\taddCommands(h.getCommand(), b.commands...)\n\treturn h\n}\n\nfunc addCommands(root *cobra.Command, commands ...cmder) {\n\tfor _, command := range commands {\n\t\tcmd := command.getCommand()\n\t\tif cmd == nil {\n\t\t\tcontinue\n\t\t}\n\t\troot.AddCommand(cmd)\n\t}\n}\n\ntype baseCmd struct {\n\tcmd *cobra.Command\n}\n\nvar _ commandsBuilderGetter = (*baseBuilderCmd)(nil)\n\n\/\/ Used in tests.\ntype commandsBuilderGetter interface {\n\tgetCommandsBuilder() *commandsBuilder\n}\ntype baseBuilderCmd struct {\n\t*baseCmd\n\t*commandsBuilder\n}\n\nfunc (b *baseBuilderCmd) getCommandsBuilder() *commandsBuilder {\n\treturn b.commandsBuilder\n}\n\nfunc (c *baseCmd) getCommand() *cobra.Command {\n\treturn c.cmd\n}\n\nfunc newBaseCmd(cmd *cobra.Command) *baseCmd {\n\treturn &baseCmd{cmd: cmd}\n}\n\nfunc (b *commandsBuilder) newBuilderCmd(cmd *cobra.Command) *baseBuilderCmd {\n\tbcmd := &baseBuilderCmd{commandsBuilder: b, baseCmd: &baseCmd{cmd: cmd}}\n\tbcmd.hugoBuilderCommon.handleFlags(cmd)\n\treturn bcmd\n}\n\nfunc (c *baseCmd) flagsToConfig(cfg config.Provider) {\n\tinitializeFlags(c.cmd, cfg)\n}\n\ntype hugoCmd struct {\n\t*baseBuilderCmd\n\n\t\/\/ Need to get the sites once built.\n\tc *commandeer\n}\n\nvar _ cmder = (*nilCommand)(nil)\n\ntype nilCommand struct {\n}\n\nfunc (c *nilCommand) getCommand() *cobra.Command {\n\treturn nil\n}\n\nfunc (c *nilCommand) flagsToConfig(cfg config.Provider) {\n\n}\n\nfunc (b *commandsBuilder) newHugoCmd() *hugoCmd {\n\tcc := &hugoCmd{}\n\n\tcc.baseBuilderCmd = b.newBuilderCmd(&cobra.Command{\n\t\tUse: \"hugo\",\n\t\tShort: \"hugo builds your site\",\n\t\tLong: `hugo is the main command, used to build your Hugo site.\n\nHugo is a Fast and Flexible Static Site Generator\nbuilt with love by spf13 and friends in Go.\n\nComplete documentation is available at http:\/\/gohugo.io\/.`,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tdefer cc.timeTrack(time.Now(), \"Total\")\n\t\t\tcfgInit := func(c *commandeer) error {\n\t\t\t\tif cc.buildWatch {\n\t\t\t\t\tc.Set(\"disableLiveReload\", true)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tc, err := initializeConfig(true, cc.buildWatch, &cc.hugoBuilderCommon, cc, cfgInit)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcc.c = c\n\n\t\t\treturn c.build()\n\t\t},\n\t})\n\n\tcc.cmd.PersistentFlags().StringVar(&cc.cfgFile, \"config\", \"\", \"config file (default is path\/config.yaml|json|toml)\")\n\tcc.cmd.PersistentFlags().StringVar(&cc.cfgDir, \"configDir\", \"config\", \"config dir\")\n\tcc.cmd.PersistentFlags().BoolVar(&cc.quiet, \"quiet\", false, \"build in quiet mode\")\n\n\t\/\/ Set bash-completion\n\t_ = cc.cmd.PersistentFlags().SetAnnotation(\"config\", cobra.BashCompFilenameExt, config.ValidConfigFileExtensions)\n\n\tcc.cmd.PersistentFlags().BoolVarP(&cc.verbose, \"verbose\", \"v\", false, \"verbose output\")\n\tcc.cmd.PersistentFlags().BoolVarP(&cc.debug, \"debug\", \"\", false, \"debug output\")\n\tcc.cmd.PersistentFlags().BoolVar(&cc.logging, \"log\", false, \"enable Logging\")\n\tcc.cmd.PersistentFlags().StringVar(&cc.logFile, \"logFile\", \"\", \"log File path (if set, logging enabled automatically)\")\n\tcc.cmd.PersistentFlags().BoolVar(&cc.verboseLog, \"verboseLog\", false, \"verbose logging\")\n\n\tcc.cmd.Flags().BoolVarP(&cc.buildWatch, \"watch\", \"w\", false, \"watch filesystem for changes and recreate as needed\")\n\n\tcc.cmd.Flags().Bool(\"renderToMemory\", false, \"render to memory (only useful for benchmark testing)\")\n\n\t\/\/ Set bash-completion\n\t_ = cc.cmd.PersistentFlags().SetAnnotation(\"logFile\", cobra.BashCompFilenameExt, []string{})\n\n\tcc.cmd.SetGlobalNormalizationFunc(helpers.NormalizeHugoFlags)\n\tcc.cmd.SilenceUsage = true\n\n\treturn cc\n}\n\ntype hugoBuilderCommon struct {\n\tsource string\n\tbaseURL string\n\tenvironment string\n\n\tbuildWatch bool\n\n\tgc bool\n\n\t\/\/ Profile flags (for debugging of performance problems)\n\tcpuprofile string\n\tmemprofile string\n\tmutexprofile string\n\ttraceprofile string\n\n\t\/\/ TODO(bep) var vs string\n\tlogging bool\n\tverbose bool\n\tverboseLog bool\n\tdebug bool\n\tquiet bool\n\n\tcfgFile string\n\tcfgDir string\n\tlogFile string\n}\n\nfunc (cc *hugoBuilderCommon) timeTrack(start time.Time, name string) {\n\tif cc.quiet {\n\t\treturn\n\t}\n\telapsed := time.Since(start)\n\tfmt.Printf(\"%s in %v ms\\n\", name, int(1000*elapsed.Seconds()))\n}\n\nfunc (cc *hugoBuilderCommon) getConfigDir(baseDir string) string {\n\tif cc.cfgDir != \"\" {\n\t\treturn paths.AbsPathify(baseDir, cc.cfgDir)\n\t}\n\n\tif v, found := os.LookupEnv(\"HUGO_CONFIGDIR\"); found {\n\t\treturn paths.AbsPathify(baseDir, v)\n\t}\n\n\treturn paths.AbsPathify(baseDir, \"config\")\n}\n\nfunc (cc *hugoBuilderCommon) getEnvironment(isServer bool) string {\n\tif cc.environment != \"\" {\n\t\treturn cc.environment\n\t}\n\n\tif v, found := os.LookupEnv(\"HUGO_ENVIRONMENT\"); found {\n\t\treturn v\n\t}\n\n\t\/\/ Used by Netlify and Forestry\n\tif v, found := os.LookupEnv(\"HUGO_ENV\"); found {\n\t\treturn v\n\t}\n\n\tif isServer {\n\t\treturn hugo.EnvironmentDevelopment\n\t}\n\n\treturn hugo.EnvironmentProduction\n}\n\nfunc (cc *hugoBuilderCommon) handleCommonBuilderFlags(cmd *cobra.Command) {\n\tcmd.PersistentFlags().StringVarP(&cc.source, \"source\", \"s\", \"\", \"filesystem path to read files relative from\")\n\tcmd.PersistentFlags().SetAnnotation(\"source\", cobra.BashCompSubdirsInDir, []string{})\n\tcmd.PersistentFlags().StringVarP(&cc.environment, \"environment\", \"e\", \"\", \"build environment\")\n\tcmd.PersistentFlags().StringP(\"themesDir\", \"\", \"\", \"filesystem path to themes directory\")\n\tcmd.PersistentFlags().BoolP(\"ignoreVendor\", \"\", false, \"ignores any _vendor directory\")\n}\n\nfunc (cc *hugoBuilderCommon) handleFlags(cmd *cobra.Command) {\n\tcc.handleCommonBuilderFlags(cmd)\n\tcmd.Flags().Bool(\"cleanDestinationDir\", false, \"remove files from destination not found in static directories\")\n\tcmd.Flags().BoolP(\"buildDrafts\", \"D\", false, \"include content marked as draft\")\n\tcmd.Flags().BoolP(\"buildFuture\", \"F\", false, \"include content with publishdate in the future\")\n\tcmd.Flags().BoolP(\"buildExpired\", \"E\", false, \"include expired content\")\n\tcmd.Flags().StringP(\"contentDir\", \"c\", \"\", \"filesystem path to content directory\")\n\tcmd.Flags().StringP(\"layoutDir\", \"l\", \"\", \"filesystem path to layout directory\")\n\tcmd.Flags().StringP(\"cacheDir\", \"\", \"\", \"filesystem path to cache directory. Defaults: $TMPDIR\/hugo_cache\/\")\n\tcmd.Flags().BoolP(\"ignoreCache\", \"\", false, \"ignores the cache directory\")\n\tcmd.Flags().StringP(\"destination\", \"d\", \"\", \"filesystem path to write files to\")\n\tcmd.Flags().StringSliceP(\"theme\", \"t\", []string{}, \"themes to use (located in \/themes\/THEMENAME\/)\")\n\tcmd.Flags().StringVarP(&cc.baseURL, \"baseURL\", \"b\", \"\", \"hostname (and path) to the root, e.g. http:\/\/spf13.com\/\")\n\tcmd.Flags().Bool(\"enableGitInfo\", false, \"add Git revision, date and author info to the pages\")\n\tcmd.Flags().BoolVar(&cc.gc, \"gc\", false, \"enable to run some cleanup tasks (remove unused cache files) after the build\")\n\n\tcmd.Flags().Bool(\"templateMetrics\", false, \"display metrics about template executions\")\n\tcmd.Flags().Bool(\"templateMetricsHints\", false, \"calculate some improvement hints when combined with --templateMetrics\")\n\tcmd.Flags().BoolP(\"forceSyncStatic\", \"\", false, \"copy all files when static is changed.\")\n\tcmd.Flags().BoolP(\"noTimes\", \"\", false, \"don't sync modification time of files\")\n\tcmd.Flags().BoolP(\"noChmod\", \"\", false, \"don't sync permission mode of files\")\n\tcmd.Flags().BoolP(\"i18n-warnings\", \"\", false, \"print missing translations\")\n\tcmd.Flags().BoolP(\"path-warnings\", \"\", false, \"print warnings on duplicate target paths etc.\")\n\tcmd.Flags().StringVarP(&cc.cpuprofile, \"profile-cpu\", \"\", \"\", \"write cpu profile to `file`\")\n\tcmd.Flags().StringVarP(&cc.memprofile, \"profile-mem\", \"\", \"\", \"write memory profile to `file`\")\n\tcmd.Flags().StringVarP(&cc.mutexprofile, \"profile-mutex\", \"\", \"\", \"write Mutex profile to `file`\")\n\tcmd.Flags().StringVarP(&cc.traceprofile, \"trace\", \"\", \"\", \"write trace to `file` (not useful in general)\")\n\n\t\/\/ Hide these for now.\n\tcmd.Flags().MarkHidden(\"profile-cpu\")\n\tcmd.Flags().MarkHidden(\"profile-mem\")\n\tcmd.Flags().MarkHidden(\"profile-mutex\")\n\n\tcmd.Flags().StringSlice(\"disableKinds\", []string{}, \"disable different kind of pages (home, RSS etc.)\")\n\n\tcmd.Flags().Bool(\"minify\", false, \"minify any supported output format (HTML, XML etc.)\")\n\n\t\/\/ Set bash-completion.\n\t\/\/ Each flag must first be defined before using the SetAnnotation() call.\n\t_ = cmd.Flags().SetAnnotation(\"source\", cobra.BashCompSubdirsInDir, []string{})\n\t_ = cmd.Flags().SetAnnotation(\"cacheDir\", cobra.BashCompSubdirsInDir, []string{})\n\t_ = cmd.Flags().SetAnnotation(\"destination\", cobra.BashCompSubdirsInDir, []string{})\n\t_ = cmd.Flags().SetAnnotation(\"theme\", cobra.BashCompSubdirsInDir, []string{\"themes\"})\n}\n\nfunc checkErr(logger *loggers.Logger, err error, s ...string) {\n\tif err == nil {\n\t\treturn\n\t}\n\tif len(s) == 0 {\n\t\tlogger.CRITICAL.Println(err)\n\t\treturn\n\t}\n\tfor _, message := range s {\n\t\tlogger.ERROR.Println(message)\n\t}\n\tlogger.ERROR.Println(err)\n}\n<|endoftext|>"} {"text":"package commands\n\nimport (\n\t\"flag\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com\/ostrost\/ostent\/flags\"\n\t\"github.com\/ostrost\/ostent\/ostent\"\n\t\"github.com\/ostrost\/ostent\/params\"\n\tmetrics \"github.com\/rcrowley\/go-metrics\"\n)\n\ntype graphite struct {\n\tlogger *Logger\n\tRefreshFlag flags.Period\n\tServerAddr flags.Bind\n}\n\nfunc graphiteCommandLine(cli *flag.FlagSet) CommandLineHandler {\n\tgr := &graphite{\n\t\tlogger: NewLogger(\"[ostent sendto-graphite] \"),\n\t\tRefreshFlag: flags.Period{Duration: 10 * time.Second}, \/\/ 10s default\n\t\tServerAddr: flags.NewBind(2003),\n\t}\n\tcli.Var(&gr.RefreshFlag, \"graphite-refresh\", \"Graphite refresh interval\")\n\tcli.Var(&gr.ServerAddr, \"sendto-graphite\", \"Graphite server address\")\n\treturn func() (AtexitHandler, bool, error) {\n\t\tif gr.ServerAddr.Host == \"\" {\n\t\t\treturn nil, false, nil\n\t\t}\n\t\tostent.AddBackground(func(defaultPeriod flags.Period) {\n\t\t\t\/* if gr.RefreshFlag.Duration == 0 { \/\/ if .RefreshFlag had no default\n\t\t\t\tgr.RefreshFlag = defaultPeriod\n\t\t\t} *\/\n\t\t\td := params.Duration(gr.RefreshFlag.Duration)\n\t\t\tgc := &carbond{\n\t\t\t\tlogger: gr.logger,\n\t\t\t\tserveraddr: gr.ServerAddr.String(),\n\t\t\t\tTicks: params.NewTicks(&d),\n\t\t\t}\n\t\t\tostent.Register <- gc\n\t\t})\n\t\treturn nil, false, nil\n\t}\n}\n\ntype carbond struct {\n\tlogger *Logger\n\tserveraddr string\n\tconn net.Conn\n\tparams.Ticks \/\/ Expired, Tick methods\n\tfailing bool\n}\n\nfunc (_ *carbond) CloseChans() {} \/\/ intentionally empty\nfunc (_ *carbond) Reload() {} \/\/ intentionally empty\n\nfunc (_ *carbond) Push(*ostent.IndexUpdate) {} \/\/ TODO?\n\nfunc (cd *carbond) Tack() {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", cd.serveraddr)\n\tif err != nil {\n\t\tcd.logger.Printf(\"Resolve Addr %s: %s\\n\", cd.serveraddr, err)\n\t\treturn\n\t}\n\t\/\/ go metrics.Graphite(ostent.Reg1s.Registry, 1*time.Second, \"ostent\", addr)\n\terr = metrics.GraphiteOnce(metrics.GraphiteConfig{\n\t\tDurationUnit: time.Nanosecond, \/\/ default, used(divided by thus must not be 0) with Timer metrics\n\t\tAddr: addr,\n\t\tRegistry: ostent.Reg1s.Registry,\n\t\tPrefix: \"ostent\",\n\t})\n\tif err != nil {\n\t\tif !cd.failing {\n\t\t\tcd.failing = true\n\t\t\tcd.logger.Printf(\"Sending: %s\\n\", err)\n\t\t}\n\t} else if cd.failing {\n\t\tcd.failing = false\n\t\tcd.logger.Printf(\"Recovered\\n\")\n\t}\n}\n\nfunc init() {\n\tAddCommandLine(graphiteCommandLine)\n}\nUse splitted go-metrics graphite adaptorpackage commands\n\nimport (\n\t\"flag\"\n\t\"net\"\n\t\"time\"\n\n\tgraphite \"github.com\/cyberdelia\/go-metrics-graphite\"\n\t\"github.com\/ostrost\/ostent\/flags\"\n\t\"github.com\/ostrost\/ostent\/ostent\"\n\t\"github.com\/ostrost\/ostent\/params\"\n)\n\ntype Graphite struct {\n\tLogger *Logger\n\tRefreshFlag flags.Period\n\tServerAddr flags.Bind\n}\n\nfunc graphiteCommandLine(cli *flag.FlagSet) CommandLineHandler {\n\tgr := &Graphite{\n\t\tLogger: NewLogger(\"[ostent sendto-graphite] \"),\n\t\tRefreshFlag: flags.Period{Duration: 10 * time.Second}, \/\/ 10s default\n\t\tServerAddr: flags.NewBind(2003),\n\t}\n\tcli.Var(&gr.RefreshFlag, \"graphite-refresh\", \"Graphite refresh interval\")\n\tcli.Var(&gr.ServerAddr, \"sendto-graphite\", \"Graphite server address\")\n\treturn func() (AtexitHandler, bool, error) {\n\t\tif gr.ServerAddr.Host == \"\" {\n\t\t\treturn nil, false, nil\n\t\t}\n\t\tostent.AddBackground(func(defaultPeriod flags.Period) {\n\t\t\t\/* if gr.RefreshFlag.Duration == 0 { \/\/ if .RefreshFlag had no default\n\t\t\t\tgr.RefreshFlag = defaultPeriod\n\t\t\t} *\/\n\t\t\td := params.Duration(gr.RefreshFlag.Duration)\n\t\t\tgc := &Carbond{\n\t\t\t\tLogger: gr.Logger,\n\t\t\t\tServerAddr: gr.ServerAddr.String(),\n\t\t\t\tTicks: params.NewTicks(&d),\n\t\t\t}\n\t\t\tostent.Register <- gc\n\t\t})\n\t\treturn nil, false, nil\n\t}\n}\n\ntype Carbond struct {\n\tLogger *Logger\n\tServerAddr string\n\tConn net.Conn\n\tparams.Ticks \/\/ Expired, Tick methods\n\tFailing bool\n}\n\nfunc (cd *Carbond) CloseChans() {} \/\/ intentionally empty\nfunc (cd *Carbond) Reload() {} \/\/ intentionally empty\nfunc (cd *Carbond) Push(*ostent.IndexUpdate) {} \/\/ TODO?\n\nfunc (cd *Carbond) Tack() {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", cd.ServerAddr)\n\tif err != nil {\n\t\tcd.Logger.Printf(\"Resolve Addr %s: %s\\n\", cd.ServerAddr, err)\n\t\treturn\n\t}\n\t\/\/ go graphite.Graphite(ostent.Reg1s.Registry, 1*time.Second, \"ostent\", addr)\n\terr = graphite.GraphiteOnce(graphite.GraphiteConfig{\n\t\tDurationUnit: time.Nanosecond, \/\/ default, used(divided by thus must not be 0) with Timer metrics\n\t\tAddr: addr,\n\t\tRegistry: ostent.Reg1s.Registry,\n\t\tPrefix: \"ostent\",\n\t})\n\tif err != nil {\n\t\tif !cd.Failing {\n\t\t\tcd.Failing = true\n\t\t\tcd.Logger.Printf(\"Sending: %s\\n\", err)\n\t\t}\n\t} else if cd.Failing {\n\t\tcd.Failing = false\n\t\tcd.Logger.Printf(\"Recovered\\n\")\n\t}\n}\n\nfunc init() {\n\tAddCommandLine(graphiteCommandLine)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2018 The Hugo Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage commands\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/gohugoio\/hugo\/parser\/metadecoders\"\n\n\t_errors \"github.com\/pkg\/errors\"\n\n\t\"github.com\/gohugoio\/hugo\/create\"\n\t\"github.com\/gohugoio\/hugo\/helpers\"\n\t\"github.com\/gohugoio\/hugo\/hugofs\"\n\t\"github.com\/gohugoio\/hugo\/parser\"\n\t\"github.com\/spf13\/cobra\"\n\tjww \"github.com\/spf13\/jwalterweatherman\"\n\t\"github.com\/spf13\/viper\"\n)\n\nvar _ cmder = (*newSiteCmd)(nil)\n\ntype newSiteCmd struct {\n\tconfigFormat string\n\n\t*baseCmd\n}\n\nfunc newNewSiteCmd() *newSiteCmd {\n\tccmd := &newSiteCmd{}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"site [path]\",\n\t\tShort: \"Create a new site (skeleton)\",\n\t\tLong: `Create a new site in the provided directory.\nThe new site will have the correct structure, but no content or theme yet.\nUse ` + \"`hugo new [contentPath]`\" + ` to create new content.`,\n\t\tRunE: ccmd.newSite,\n\t}\n\n\tcmd.Flags().StringVarP(&ccmd.configFormat, \"format\", \"f\", \"toml\", \"config & frontmatter format\")\n\tcmd.Flags().Bool(\"force\", false, \"init inside non-empty directory\")\n\n\tccmd.baseCmd = newBaseCmd(cmd)\n\n\treturn ccmd\n\n}\n\nfunc (n *newSiteCmd) doNewSite(fs *hugofs.Fs, basepath string, force bool) error {\n\tarcheTypePath := filepath.Join(basepath, \"archetypes\")\n\tdirs := []string{\n\t\tfilepath.Join(basepath, \"layouts\"),\n\t\tfilepath.Join(basepath, \"content\"),\n\t\tarcheTypePath,\n\t\tfilepath.Join(basepath, \"static\"),\n\t\tfilepath.Join(basepath, \"data\"),\n\t\tfilepath.Join(basepath, \"themes\"),\n\t}\n\n\tif exists, _ := helpers.Exists(basepath, fs.Source); exists {\n\t\tif isDir, _ := helpers.IsDir(basepath, fs.Source); !isDir {\n\t\t\treturn errors.New(basepath + \" already exists but not a directory\")\n\t\t}\n\n\t\tisEmpty, _ := helpers.IsEmpty(basepath, fs.Source)\n\n\t\tswitch {\n\t\tcase !isEmpty && !force:\n\t\t\treturn errors.New(basepath + \" already exists and is not empty\")\n\n\t\tcase !isEmpty && force:\n\t\t\tall := append(dirs, filepath.Join(basepath, \"config.\"+n.configFormat))\n\t\t\tfor _, path := range all {\n\t\t\t\tif exists, _ := helpers.Exists(path, fs.Source); exists {\n\t\t\t\t\treturn errors.New(path + \" already exists\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, dir := range dirs {\n\t\tif err := fs.Source.MkdirAll(dir, 0777); err != nil {\n\t\t\treturn _errors.Wrap(err, \"Failed to create dir\")\n\t\t}\n\t}\n\n\tcreateConfig(fs, basepath, n.configFormat)\n\n\t\/\/ Create a default archetype file.\n\thelpers.SafeWriteToDisk(filepath.Join(archeTypePath, \"default.md\"),\n\t\tstrings.NewReader(create.ArchetypeTemplateTemplate), fs.Source)\n\n\tjww.FEEDBACK.Printf(\"Congratulations! Your new Hugo site is created in %s.\\n\\n\", basepath)\n\tjww.FEEDBACK.Println(nextStepsText())\n\n\treturn nil\n}\n\n\/\/ newSite creates a new Hugo site and initializes a structured Hugo directory.\nfunc (n *newSiteCmd) newSite(cmd *cobra.Command, args []string) error {\n\tif len(args) < 1 {\n\t\treturn newUserError(\"path needs to be provided\")\n\t}\n\n\tcreatepath, err := filepath.Abs(filepath.Clean(args[0]))\n\tif err != nil {\n\t\treturn newUserError(err)\n\t}\n\n\tforceNew, _ := cmd.Flags().GetBool(\"force\")\n\n\treturn n.doNewSite(hugofs.NewDefault(viper.New()), createpath, forceNew)\n}\n\nfunc createConfig(fs *hugofs.Fs, inpath string, kind string) (err error) {\n\tin := map[string]string{\n\t\t\"baseURL\": \"http:\/\/example.org\/\",\n\t\t\"title\": \"My New Hugo Site\",\n\t\t\"languageCode\": \"en-us\",\n\t}\n\n\tvar buf bytes.Buffer\n\terr = parser.InterfaceToConfig(in, metadecoders.FormatFromString(kind), &buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn helpers.WriteToDisk(filepath.Join(inpath, \"config.\"+kind), &buf, fs.Source)\n}\n\nfunc nextStepsText() string {\n\tvar nextStepsText bytes.Buffer\n\n\tnextStepsText.WriteString(`Just a few more steps and you're ready to go:\n\n1. Download a theme into the same-named folder.\n Choose a theme from https:\/\/themes.gohugo.io\/ or\n create your own with the \"hugo new theme \" command.\n2. Perhaps you want to add some content. You can add single files\n with \"hugo new `)\n\n\tnextStepsText.WriteString(filepath.Join(\"\", \".\"))\n\n\tnextStepsText.WriteString(`\".\n3. Start the built-in live server via \"hugo server\".\n\nVisit https:\/\/gohugo.io\/ for quickstart guide and full documentation.`)\n\n\treturn nextStepsText.String()\n}\ncommands: Add hint when dir not empty\/\/ Copyright 2018 The Hugo Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage commands\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/gohugoio\/hugo\/parser\/metadecoders\"\n\n\t_errors \"github.com\/pkg\/errors\"\n\n\t\"github.com\/gohugoio\/hugo\/create\"\n\t\"github.com\/gohugoio\/hugo\/helpers\"\n\t\"github.com\/gohugoio\/hugo\/hugofs\"\n\t\"github.com\/gohugoio\/hugo\/parser\"\n\t\"github.com\/spf13\/cobra\"\n\tjww \"github.com\/spf13\/jwalterweatherman\"\n\t\"github.com\/spf13\/viper\"\n)\n\nvar _ cmder = (*newSiteCmd)(nil)\n\ntype newSiteCmd struct {\n\tconfigFormat string\n\n\t*baseCmd\n}\n\nfunc newNewSiteCmd() *newSiteCmd {\n\tccmd := &newSiteCmd{}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"site [path]\",\n\t\tShort: \"Create a new site (skeleton)\",\n\t\tLong: `Create a new site in the provided directory.\nThe new site will have the correct structure, but no content or theme yet.\nUse ` + \"`hugo new [contentPath]`\" + ` to create new content.`,\n\t\tRunE: ccmd.newSite,\n\t}\n\n\tcmd.Flags().StringVarP(&ccmd.configFormat, \"format\", \"f\", \"toml\", \"config & frontmatter format\")\n\tcmd.Flags().Bool(\"force\", false, \"init inside non-empty directory\")\n\n\tccmd.baseCmd = newBaseCmd(cmd)\n\n\treturn ccmd\n\n}\n\nfunc (n *newSiteCmd) doNewSite(fs *hugofs.Fs, basepath string, force bool) error {\n\tarcheTypePath := filepath.Join(basepath, \"archetypes\")\n\tdirs := []string{\n\t\tfilepath.Join(basepath, \"layouts\"),\n\t\tfilepath.Join(basepath, \"content\"),\n\t\tarcheTypePath,\n\t\tfilepath.Join(basepath, \"static\"),\n\t\tfilepath.Join(basepath, \"data\"),\n\t\tfilepath.Join(basepath, \"themes\"),\n\t}\n\n\tif exists, _ := helpers.Exists(basepath, fs.Source); exists {\n\t\tif isDir, _ := helpers.IsDir(basepath, fs.Source); !isDir {\n\t\t\treturn errors.New(basepath + \" already exists but not a directory\")\n\t\t}\n\n\t\tisEmpty, _ := helpers.IsEmpty(basepath, fs.Source)\n\n\t\tswitch {\n\t\tcase !isEmpty && !force:\n\t\t\treturn errors.New(basepath + \" already exists and is not empty. See --force.\")\n\n\t\tcase !isEmpty && force:\n\t\t\tall := append(dirs, filepath.Join(basepath, \"config.\"+n.configFormat))\n\t\t\tfor _, path := range all {\n\t\t\t\tif exists, _ := helpers.Exists(path, fs.Source); exists {\n\t\t\t\t\treturn errors.New(path + \" already exists\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, dir := range dirs {\n\t\tif err := fs.Source.MkdirAll(dir, 0777); err != nil {\n\t\t\treturn _errors.Wrap(err, \"Failed to create dir\")\n\t\t}\n\t}\n\n\tcreateConfig(fs, basepath, n.configFormat)\n\n\t\/\/ Create a default archetype file.\n\thelpers.SafeWriteToDisk(filepath.Join(archeTypePath, \"default.md\"),\n\t\tstrings.NewReader(create.ArchetypeTemplateTemplate), fs.Source)\n\n\tjww.FEEDBACK.Printf(\"Congratulations! Your new Hugo site is created in %s.\\n\\n\", basepath)\n\tjww.FEEDBACK.Println(nextStepsText())\n\n\treturn nil\n}\n\n\/\/ newSite creates a new Hugo site and initializes a structured Hugo directory.\nfunc (n *newSiteCmd) newSite(cmd *cobra.Command, args []string) error {\n\tif len(args) < 1 {\n\t\treturn newUserError(\"path needs to be provided\")\n\t}\n\n\tcreatepath, err := filepath.Abs(filepath.Clean(args[0]))\n\tif err != nil {\n\t\treturn newUserError(err)\n\t}\n\n\tforceNew, _ := cmd.Flags().GetBool(\"force\")\n\n\treturn n.doNewSite(hugofs.NewDefault(viper.New()), createpath, forceNew)\n}\n\nfunc createConfig(fs *hugofs.Fs, inpath string, kind string) (err error) {\n\tin := map[string]string{\n\t\t\"baseURL\": \"http:\/\/example.org\/\",\n\t\t\"title\": \"My New Hugo Site\",\n\t\t\"languageCode\": \"en-us\",\n\t}\n\n\tvar buf bytes.Buffer\n\terr = parser.InterfaceToConfig(in, metadecoders.FormatFromString(kind), &buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn helpers.WriteToDisk(filepath.Join(inpath, \"config.\"+kind), &buf, fs.Source)\n}\n\nfunc nextStepsText() string {\n\tvar nextStepsText bytes.Buffer\n\n\tnextStepsText.WriteString(`Just a few more steps and you're ready to go:\n\n1. Download a theme into the same-named folder.\n Choose a theme from https:\/\/themes.gohugo.io\/ or\n create your own with the \"hugo new theme \" command.\n2. Perhaps you want to add some content. You can add single files\n with \"hugo new `)\n\n\tnextStepsText.WriteString(filepath.Join(\"\", \".\"))\n\n\tnextStepsText.WriteString(`\".\n3. Start the built-in live server via \"hugo server\".\n\nVisit https:\/\/gohugo.io\/ for quickstart guide and full documentation.`)\n\n\treturn nextStepsText.String()\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\"gopkg.in\/telegram-bot-api.v4\"\n\t\"time\"\n\t\/\/\"os\/user\"\n)\n\nfunc regpi(msg *tgbotapi.Message, update tgbotapi.Update) {\n\tvar reply tgbotapi.MessageConfig\n\tvar user User\n\n\tuserId := msg.From.ID\n\tgroupId := msg.Chat.ID\n\n\tgdb.Where(\"userId = ? AND groupId = ?\", userId, groupId).First(&user)\n\n\n\tif len(msg.From.UserName) == 0 {\n\t\treply = tgbotapi.NewMessage(msg.Chat.ID, \"A girl has no name.\")\n\t} else if user.Id == 0 {\n\t\tuser.Username = \"@\" + msg.From.UserName\n\t\tuser.UserId = msg.From.ID\n\t\tuser.GroupID = int(groupId)\n\t\tuser.Score = 0\n\t\tuser.Usernick = fmt.Sprintf(\"%s %s\", msg.From.FirstName, msg.From.LastName)\n\t\tuser.Quota = 6\n\t\tgdb.Create(&user)\n\t\tgdb.Model(&user).Update(User{Quota: 6})\n\t\treply = tgbotapi.NewMessage(msg.Chat.ID, fmt.Sprintf(\"Ты регнулся, [%s](tg:\/\/user?id=%d)\\n\", user.Usernick, user.UserId))\n\t\treply.ParseMode = tgbotapi.ModeMarkdown\n\t} else {\n\t\treply = tgbotapi.NewMessage(msg.Chat.ID, fmt.Sprint(\"Эй, ты уже в игре!\"))\n\t}\n\n\treply.ReplyToMessageID = update.Message.MessageID\n\tbot.Send(reply)\n}\n\nfunc showpid(msg *tgbotapi.Message) {\n\tvar users []User\n\n\tgroupId := msg.Chat.ID\n\n\tgdb.Where(\"groupId = ?\", groupId).Find(&users)\n\n\tif len(users) != 0 {\n\t\toutput := \"Кандидаты в пидоры дня:\\n\"\n\t\tfor _, i := range users {\n\t\t\tif len(i.Usernick) > 0 {\n\t\t\t\toutput += fmt.Sprintf(\"[%s](tg:\/\/user?id=%d)\\n\", i.Usernick, i.UserId)\n\t\t\t} else {\n\t\t\t\toutput += fmt.Sprintf(\"[%s](tg:\/\/user?id=%d)\\n\", i.Username[1:], i.UserId)\n\t\t\t}\n\t\t}\n\t\toutput += \"Хочешь себя увидеть тут?\\nЖми \/regpi\"\n\t\treply := tgbotapi.NewMessage(msg.Chat.ID, output)\n\t\treply.ParseMode = tgbotapi.ModeMarkdown\n\t\tbot.Send(reply)\n\t} else {\n\t\toutput := \"Пидоров нет! Будь первым! Жми \/regpi\"\n\t\tbot.Send(tgbotapi.NewMessage(msg.Chat.ID, output))\n\t}\n}\n\nfunc pidorStat(msg *tgbotapi.Message) {\n\ttitles := map[int]string{\n\t\t1 : \"Пидоратор\",\n\t\t2 : \"Пидороль\",\n\t\t3 : \"Герцопидор\",\n\t\t4 : \"Пиркиз\",\n\t\t5 : \"Пидорон\",\n\t}\n\n\tvar users []User\n\tvar reply tgbotapi.MessageConfig\n\tvar flag bool\n\tvar currentUserName string\n\n\tgroupId := msg.Chat.ID\n\tcounter := 0\n\tvar titlesCounter int\n\n\tgdb.Where(\"groupId = ?\", groupId).Order(\"score desc\").Find(&users)\n\n\toutput := \"Статистика (первые 5):\\n\"\n\ttitlesCounter = 1\n\tfor _, i := range users {\n\t\tif i.Score != 0 {\n\t\t\tif len(i.Usernick) > 0 {\n\t\t\t\tcurrentUserName = i.Usernick\n\t\t\t} else {\n\t\t\t\tcurrentUserName = i.Username[1:]\n\t\t\t}\n\n\t\t\toutput += fmt.Sprintf(\"[%s](tg:\/\/user?id=%d) - %d (%s)\\n\", currentUserName, i.UserId, i.Score, titles[titlesCounter])\n\t\t\ttitlesCounter++\n\t\t\tflag = true\n\n\t\t\tif counter == 5 {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tcounter++\n\t\t\t}\n\t\t}\n\t}\n\n\tif flag {\n\t\treply = tgbotapi.NewMessage(msg.Chat.ID, output)\n\t\treply.ParseMode = tgbotapi.ModeMarkdown\n\t} else {\n\t\treply = tgbotapi.NewMessage(msg.Chat.ID, \"Пидор дня еще ни разу не был выбран! Жми \/pidor\")\n\t}\n\n\tbot.Send(reply)\n}\n\nfunc startQuiz(msg *tgbotapi.Message) {\n\tfirstPhrases := []string {\n\t\t\"Инициализирую поиск пидора дня...\",\n\t\t\"Внимание, ищу пидора!\",\n\t\t\"Ну-ка дай-ка...\",\n\t\t\"Такс, кто тут у нас мало каши ел?\",\n\t\t\"Инициализация.Поиск.\",\n\t}\n\n\tsecondPhrases := []string {\n\t\t\"Кажется я что-то вижу!\",\n\t\t\"Не может быть!\",\n\t\t\"Пожалуй препроверю...\",\n\t\t\"Найден!\",\n\t\t\"Прям по Бабичу!\",\n\t\t\"Как предсказал Великий Мейстер...\",\n\t}\n\n\tvar reply tgbotapi.MessageConfig\n\tvar theUser User\n\tvar users []User\n\tvar randomUser int\n\tvar currentUserName string\n\tvar winner User\n\tvar winnerScore int\n\tvar available Available\n\n\tgroupId := msg.Chat.ID\n\n\tgdb.Where(\"groupId = ?\", groupId).Find(&users)\n\tgdb.Where(\"groupId = ?\", groupId).First(&available)\n\n\trowsCounted := len(users)\n\tif rowsCounted == 0 {\n\t\treply = tgbotapi.NewMessage(msg.Chat.ID, \"Нет участников! Жми \/regpi\")\n\t\tbot.Send(reply)\n\t} else {\n\t\tif available.Flag {\n\t\t\tlenOfCurrentUsers := len(users)\n\t\t\tif lenOfCurrentUsers == 1 {\n\t\t\t\trandomUser = 0\n\t\t\t} else {\n\t\t\t\trandomUser = random(0, lenOfCurrentUsers - 1)\n\t\t\t}\n\n\t\t\tgdb.Where(\"id = ?\", users[randomUser].Id).First(&winner)\n\n\t\t\treply = tgbotapi.NewMessage(msg.Chat.ID, firstPhrases[random(0, len(firstPhrases) - 1)])\n\t\t\tbot.Send(reply)\n\t\t\ttime.Sleep(time.Second * 2)\n\t\t\treply = tgbotapi.NewMessage(msg.Chat.ID, secondPhrases[random(0, len(secondPhrases) - 1)])\n\t\t\tbot.Send(reply)\n\t\t\tgdb.Where(\"id = ? and groupId = ?\", theUser, groupId).First(&winner)\n\t\t\twinnerScore = winner.Score + 1\n\t\t\tgdb.Model(&users).Where(\"id = ?\", winner.Id).UpdateColumn(\"score\", winnerScore)\n\t\t\ttime.Sleep(time.Second * 2)\n\t\t\tif len(winner.Usernick) > 0 {\n\t\t\t\tcurrentUserName = winner.Usernick\n\t\t\t} else {\n\t\t\t\tcurrentUserName = winner.Username\n\t\t\t}\n\t\t\treply = tgbotapi.NewMessage(msg.Chat.ID, fmt.Sprintf(\"Ага! 🎉🎉🎉 Сегодня пидор - [%s](tg:\/\/user?id=%d)\", currentUserName, winner.UserId))\n\t\t\treply.ParseMode = tgbotapi.ModeMarkdown\n\t\t\tbot.Send(reply)\n\t\t\tgdb.Model(&available).Where(\"groupId = ?\", groupId).Update(\"flag\", false)\n\t\t\tgdb.Model(&available).Where(\"groupId = ?\", groupId).Update(\"userId\", winner.Id)\n\t\t} else {\n\t\t\tvar currentUser User\n\t\t\tgdb.Where(\"id = ?\", available.UserId).First(¤tUser)\n\t\t\tif len(currentUser.Usernick) > 0 {\n\t\t\t\tcurrentUserName = currentUser.Usernick\n\t\t\t} else {\n\t\t\t\tcurrentUserName = currentUser.Username[1:]\n\t\t\t}\n\t\t\treply = tgbotapi.NewMessage(msg.Chat.ID, fmt.Sprintf(\"🎉Сегодня у нас уже есть победитель - [%s](tg:\/\/user?id=%d)🎉\", currentUserName, currentUser.UserId))\n\t\t\treply.ParseMode = tgbotapi.ModeMarkdown\n\t\t\tbot.Send(reply)\n\t\t}\n\t}\n}\n\nfunc kekogen(msg *tgbotapi.Message) {\n\tvar reply tgbotapi.MessageConfig\n\tvar user User\n\n\tuserId := msg.From.ID\n\tgroupId := msg.Chat.ID\n\n\tgdb.Where(\"userId = ? and groupId = ?\", userId, groupId).First(&user)\n\tcurrentQuota := user.Quota\n\n\tif user.Id > 0 {\n\t\tif currentQuota > 1 {\n\t\t\tvowels := []string {\n\t\t\t\t\"а\", \"о\", \"и\", \"е\", \"у\", \"я\",\n\t\t\t}\n\t\t\tconsonants := []string {\n\t\t\t\t\"в\", \"д\", \"к\", \"л\", \"м\", \"н\", \"п\", \"р\", \"с\", \"т\", \"ф\", \"х\", \"ш\",\n\t\t\t}\n\t\t\tresult := \"кек\"\n\n\t\t\tfor x:= 0; x < 5; x++ {\n\t\t\t\tif x % 2 == 0 {\n\t\t\t\t\tresult += vowels[random(0, len(vowels) - 1)]\n\t\t\t\t} else {\n\t\t\t\t\tresult += consonants[random(0, len(consonants) - 1)]\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tgdb.Model(&user).Update(User{Quota: currentQuota - 1})\n\t\t\treply = tgbotapi.NewMessage(msg.Chat.ID, fmt.Sprintf(result))\n\t\t} else {\n\t\t\treply = tgbotapi.NewMessage(msg.Chat.ID, fmt.Sprintf(\"Твои кеки на сегодня кончились!\"))\n\t\t}\n\t} else {\n\t\treply = tgbotapi.NewMessage(msg.Chat.ID, fmt.Sprintf(\"Жми \/regpi чтобы зарегистрироваться и получить свои кеки!\"))\n\t}\n\n\tbot.Send(reply)\n}fix statpackage main\n\nimport (\n\t\"fmt\"\n\t\"gopkg.in\/telegram-bot-api.v4\"\n\t\"time\"\n\t\/\/\"os\/user\"\n)\n\nfunc regpi(msg *tgbotapi.Message, update tgbotapi.Update) {\n\tvar reply tgbotapi.MessageConfig\n\tvar user User\n\n\tuserId := msg.From.ID\n\tgroupId := msg.Chat.ID\n\n\tgdb.Where(\"userId = ? AND groupId = ?\", userId, groupId).First(&user)\n\n\n\tif len(msg.From.UserName) == 0 {\n\t\treply = tgbotapi.NewMessage(msg.Chat.ID, \"A girl has no name.\")\n\t} else if user.Id == 0 {\n\t\tuser.Username = \"@\" + msg.From.UserName\n\t\tuser.UserId = msg.From.ID\n\t\tuser.GroupID = int(groupId)\n\t\tuser.Score = 0\n\t\tuser.Usernick = fmt.Sprintf(\"%s %s\", msg.From.FirstName, msg.From.LastName)\n\t\tuser.Quota = 6\n\t\tgdb.Create(&user)\n\t\tgdb.Model(&user).Update(User{Quota: 6})\n\t\treply = tgbotapi.NewMessage(msg.Chat.ID, fmt.Sprintf(\"Ты регнулся, [%s](tg:\/\/user?id=%d)\\n\", user.Usernick, user.UserId))\n\t\treply.ParseMode = tgbotapi.ModeMarkdown\n\t} else {\n\t\treply = tgbotapi.NewMessage(msg.Chat.ID, fmt.Sprint(\"Эй, ты уже в игре!\"))\n\t}\n\n\treply.ReplyToMessageID = update.Message.MessageID\n\tbot.Send(reply)\n}\n\nfunc showpid(msg *tgbotapi.Message) {\n\tvar users []User\n\n\tgroupId := msg.Chat.ID\n\n\tgdb.Where(\"groupId = ?\", groupId).Find(&users)\n\n\tif len(users) != 0 {\n\t\toutput := \"Кандидаты в пидоры дня:\\n\"\n\t\tfor _, i := range users {\n\t\t\tif len(i.Usernick) > 0 {\n\t\t\t\toutput += fmt.Sprintf(\"[%s](tg:\/\/user?id=%d)\\n\", i.Usernick, i.UserId)\n\t\t\t} else {\n\t\t\t\toutput += fmt.Sprintf(\"[%s](tg:\/\/user?id=%d)\\n\", i.Username[1:], i.UserId)\n\t\t\t}\n\t\t}\n\t\toutput += \"Хочешь себя увидеть тут?\\nЖми \/regpi\"\n\t\treply := tgbotapi.NewMessage(msg.Chat.ID, output)\n\t\treply.ParseMode = tgbotapi.ModeMarkdown\n\t\tbot.Send(reply)\n\t} else {\n\t\toutput := \"Пидоров нет! Будь первым! Жми \/regpi\"\n\t\tbot.Send(tgbotapi.NewMessage(msg.Chat.ID, output))\n\t}\n}\n\nfunc pidorStat(msg *tgbotapi.Message) {\n\ttitles := map[int]string{\n\t\t1 : \"Пидоратор\",\n\t\t2 : \"Пидороль\",\n\t\t3 : \"Герцопидор\",\n\t\t4 : \"Пиркиз\",\n\t\t5 : \"Пидорон\",\n\t}\n\n\tvar users []User\n\tvar reply tgbotapi.MessageConfig\n\tvar flag bool\n\tvar currentUserName string\n\n\tgroupId := msg.Chat.ID\n\tcounter := 0\n\tvar titlesCounter int\n\n\tgdb.Where(\"groupId = ?\", groupId).Order(\"score desc\").Find(&users)\n\n\toutput := \"Статистика (первые 5):\\n\"\n\ttitlesCounter = 1\n\tfor _, i := range users {\n\t\tif i.Score != 0 {\n\t\t\tif len(i.Usernick) > 0 {\n\t\t\t\tcurrentUserName = i.Usernick\n\t\t\t} else {\n\t\t\t\tcurrentUserName = i.Username[1:]\n\t\t\t}\n\n\t\t\toutput += fmt.Sprintf(\"[%s](tg:\/\/user?id=%d) - %d (%s)\\n\", currentUserName, i.UserId, i.Score, titles[titlesCounter])\n\t\t\ttitlesCounter++\n\t\t\tflag = true\n\n\t\t\tif counter > 5 {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tcounter++\n\t\t\t}\n\t\t}\n\t}\n\n\tif flag {\n\t\treply = tgbotapi.NewMessage(msg.Chat.ID, output)\n\t\treply.ParseMode = tgbotapi.ModeMarkdown\n\t} else {\n\t\treply = tgbotapi.NewMessage(msg.Chat.ID, \"Пидор дня еще ни разу не был выбран! Жми \/pidor\")\n\t}\n\n\tbot.Send(reply)\n}\n\nfunc startQuiz(msg *tgbotapi.Message) {\n\tfirstPhrases := []string {\n\t\t\"Инициализирую поиск пидора дня...\",\n\t\t\"Внимание, ищу пидора!\",\n\t\t\"Ну-ка дай-ка...\",\n\t\t\"Такс, кто тут у нас мало каши ел?\",\n\t\t\"Инициализация.Поиск.\",\n\t}\n\n\tsecondPhrases := []string {\n\t\t\"Кажется я что-то вижу!\",\n\t\t\"Не может быть!\",\n\t\t\"Пожалуй препроверю...\",\n\t\t\"Найден!\",\n\t\t\"Прям по Бабичу!\",\n\t\t\"Как предсказал Великий Мейстер...\",\n\t}\n\n\tvar reply tgbotapi.MessageConfig\n\tvar theUser User\n\tvar users []User\n\tvar randomUser int\n\tvar currentUserName string\n\tvar winner User\n\tvar winnerScore int\n\tvar available Available\n\n\tgroupId := msg.Chat.ID\n\n\tgdb.Where(\"groupId = ?\", groupId).Find(&users)\n\tgdb.Where(\"groupId = ?\", groupId).First(&available)\n\n\trowsCounted := len(users)\n\tif rowsCounted == 0 {\n\t\treply = tgbotapi.NewMessage(msg.Chat.ID, \"Нет участников! Жми \/regpi\")\n\t\tbot.Send(reply)\n\t} else {\n\t\tif available.Flag {\n\t\t\tlenOfCurrentUsers := len(users)\n\t\t\tif lenOfCurrentUsers == 1 {\n\t\t\t\trandomUser = 0\n\t\t\t} else {\n\t\t\t\trandomUser = random(0, lenOfCurrentUsers - 1)\n\t\t\t}\n\n\t\t\tgdb.Where(\"id = ?\", users[randomUser].Id).First(&winner)\n\n\t\t\treply = tgbotapi.NewMessage(msg.Chat.ID, firstPhrases[random(0, len(firstPhrases) - 1)])\n\t\t\tbot.Send(reply)\n\t\t\ttime.Sleep(time.Second * 2)\n\t\t\treply = tgbotapi.NewMessage(msg.Chat.ID, secondPhrases[random(0, len(secondPhrases) - 1)])\n\t\t\tbot.Send(reply)\n\t\t\tgdb.Where(\"id = ? and groupId = ?\", theUser, groupId).First(&winner)\n\t\t\twinnerScore = winner.Score + 1\n\t\t\tgdb.Model(&users).Where(\"id = ?\", winner.Id).UpdateColumn(\"score\", winnerScore)\n\t\t\ttime.Sleep(time.Second * 2)\n\t\t\tif len(winner.Usernick) > 0 {\n\t\t\t\tcurrentUserName = winner.Usernick\n\t\t\t} else {\n\t\t\t\tcurrentUserName = winner.Username\n\t\t\t}\n\t\t\treply = tgbotapi.NewMessage(msg.Chat.ID, fmt.Sprintf(\"Ага! 🎉🎉🎉 Сегодня пидор - [%s](tg:\/\/user?id=%d)\", currentUserName, winner.UserId))\n\t\t\treply.ParseMode = tgbotapi.ModeMarkdown\n\t\t\tbot.Send(reply)\n\t\t\tgdb.Model(&available).Where(\"groupId = ?\", groupId).Update(\"flag\", false)\n\t\t\tgdb.Model(&available).Where(\"groupId = ?\", groupId).Update(\"userId\", winner.Id)\n\t\t} else {\n\t\t\tvar currentUser User\n\t\t\tgdb.Where(\"id = ?\", available.UserId).First(¤tUser)\n\t\t\tif len(currentUser.Usernick) > 0 {\n\t\t\t\tcurrentUserName = currentUser.Usernick\n\t\t\t} else {\n\t\t\t\tcurrentUserName = currentUser.Username[1:]\n\t\t\t}\n\t\t\treply = tgbotapi.NewMessage(msg.Chat.ID, fmt.Sprintf(\"🎉Сегодня у нас уже есть победитель - [%s](tg:\/\/user?id=%d)🎉\", currentUserName, currentUser.UserId))\n\t\t\treply.ParseMode = tgbotapi.ModeMarkdown\n\t\t\tbot.Send(reply)\n\t\t}\n\t}\n}\n\nfunc kekogen(msg *tgbotapi.Message) {\n\tvar reply tgbotapi.MessageConfig\n\tvar user User\n\n\tuserId := msg.From.ID\n\tgroupId := msg.Chat.ID\n\n\tgdb.Where(\"userId = ? and groupId = ?\", userId, groupId).First(&user)\n\tcurrentQuota := user.Quota\n\n\tif user.Id > 0 {\n\t\tif currentQuota > 1 {\n\t\t\tvowels := []string {\n\t\t\t\t\"а\", \"о\", \"и\", \"е\", \"у\", \"я\",\n\t\t\t}\n\t\t\tconsonants := []string {\n\t\t\t\t\"в\", \"д\", \"к\", \"л\", \"м\", \"н\", \"п\", \"р\", \"с\", \"т\", \"ф\", \"х\", \"ш\",\n\t\t\t}\n\t\t\tresult := \"кек\"\n\n\t\t\tfor x:= 0; x < 5; x++ {\n\t\t\t\tif x % 2 == 0 {\n\t\t\t\t\tresult += vowels[random(0, len(vowels) - 1)]\n\t\t\t\t} else {\n\t\t\t\t\tresult += consonants[random(0, len(consonants) - 1)]\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tgdb.Model(&user).Update(User{Quota: currentQuota - 1})\n\t\t\treply = tgbotapi.NewMessage(msg.Chat.ID, fmt.Sprintf(result))\n\t\t} else {\n\t\t\treply = tgbotapi.NewMessage(msg.Chat.ID, fmt.Sprintf(\"Твои кеки на сегодня кончились!\"))\n\t\t}\n\t} else {\n\t\treply = tgbotapi.NewMessage(msg.Chat.ID, fmt.Sprintf(\"Жми \/regpi чтобы зарегистрироваться и получить свои кеки!\"))\n\t}\n\n\tbot.Send(reply)\n}<|endoftext|>"} {"text":"package repository\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/stormcat24\/protodep\/dependency\"\n\t\"github.com\/stormcat24\/protodep\/helper\"\n\t\"github.com\/stormcat24\/protodep\/logger\"\n\t\"gopkg.in\/src-d\/go-git.v4\"\n\t\"gopkg.in\/src-d\/go-git.v4\/plumbing\"\n)\n\ntype GitRepository interface {\n\tOpen() (*OpenedRepository, error)\n\tProtoRootDir() string\n}\n\ntype GitHubRepository struct {\n\tprotodepDir string\n\tdep dependency.ProtoDepDependency\n\tauthProvider helper.AuthProvider\n}\n\nfunc NewGitRepository(protodepDir string, dep dependency.ProtoDepDependency, authProvider helper.AuthProvider) GitRepository {\n\treturn &GitHubRepository{\n\t\tprotodepDir: protodepDir,\n\t\tdep: dep,\n\t\tauthProvider: authProvider,\n\t}\n}\n\ntype OpenedRepository struct {\n\tRepository *git.Repository\n\tDep dependency.ProtoDepDependency\n\tHash string\n}\n\nfunc (r *GitHubRepository) Open() (*OpenedRepository, error) {\n\n\tbranch := \"master\"\n\tif r.dep.Branch != \"\" {\n\t\tbranch = r.dep.Branch\n\t}\n\n\trevision := r.dep.Revision\n\n\treponame := r.dep.Repository()\n\trepopath := filepath.Join(r.protodepDir, reponame)\n\n\tauth, err := r.authProvider.AuthMethod()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar rep *git.Repository\n\n\tif stat, err := os.Stat(repopath); err == nil && stat.IsDir() {\n\t\tspinner := logger.InfoWithSpinner(\"Getting %s \", reponame)\n\n\t\trep, err = git.PlainOpen(repopath)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"open repository is failed\")\n\t\t}\n\t\tspinner.Stop()\n\n\t\tfetchOpts := &git.FetchOptions{\n\t\t\tAuth: auth,\n\t\t}\n\n\t\t\/\/ TODO: Validate remote setting.\n\t\t\/\/ TODO: If .protodep cache remains with SSH, change remote target to HTTPS.\n\n\t\tif err := rep.Fetch(fetchOpts); err != nil {\n\t\t\tif err != git.NoErrAlreadyUpToDate {\n\t\t\t\treturn nil, errors.Wrap(err, \"fetch repository is failed\")\n\t\t\t}\n\t\t}\n\t\tspinner.Finish()\n\n\t} else {\n\t\tspinner := logger.InfoWithSpinner(\"Getting %s \", reponame)\n\t\t\/\/ IDEA: Is it better to register both ssh and HTTP?\n\t\trep, err = git.PlainClone(repopath, false, &git.CloneOptions{\n\t\t\tAuth: auth,\n\t\t\tURL: r.authProvider.GetRepositoryURL(reponame),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"clone repository is failed\")\n\t\t}\n\t\tspinner.Finish()\n\t}\n\n\twt, err := rep.Worktree()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get worktree is failed\")\n\t}\n\n\tif revision == \"\" {\n\t\ttarget, err := rep.Storer.Reference(plumbing.ReferenceName(fmt.Sprintf(\"refs\/remotes\/origin\/%s\", branch)))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"change branch to %s is failed\", branch)\n\t\t}\n\n\t\tif err := wt.Checkout(&git.CheckoutOptions{Hash: target.Hash()}); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"checkout to %s is failed\", revision)\n\t\t}\n\n\t\thead := plumbing.NewHashReference(plumbing.HEAD, target.Hash())\n\t\tif err := rep.Storer.SetReference(head); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"set head to %s is failed\", branch)\n\t\t}\n\t} else {\n\t\thash := plumbing.NewHash(revision)\n\t\tif err := wt.Checkout(&git.CheckoutOptions{Hash: hash}); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"checkout to %s is failed\", revision)\n\t\t}\n\n\t\ttag := plumbing.NewTagReferenceName(revision)\n\t\tref, err := rep.Reference(tag, false)\n\t\tif err != nil && err != plumbing.ErrReferenceNotFound {\n\t\t\treturn nil, errors.Wrapf(err, \"tag = %s\", tag)\n\t\t}\n\n\t\tif ref != nil {\n\t\t\thash = ref.Hash()\n\t\t}\n\n\t\thead := plumbing.NewHashReference(plumbing.HEAD, hash)\n\t\tif err := rep.Storer.SetReference(head); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"set head to %s is failed\", revision)\n\t\t}\n\t}\n\n\tcommiter, err := rep.Log(&git.LogOptions{})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get commit is failed\")\n\t}\n\n\tcurrent, err := commiter.Next()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get commit current is failed\")\n\t}\n\n\treturn &OpenedRepository{\n\t\tRepository: rep,\n\t\tDep: r.dep,\n\t\tHash: current.Hash.String(),\n\t}, nil\n}\n\nfunc (r *GitHubRepository) ProtoRootDir() string {\n\treturn filepath.Join(r.protodepDir, r.dep.Target)\n}\nFix checkout by tag in revision (#64)package repository\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/stormcat24\/protodep\/dependency\"\n\t\"github.com\/stormcat24\/protodep\/helper\"\n\t\"github.com\/stormcat24\/protodep\/logger\"\n\t\"gopkg.in\/src-d\/go-git.v4\"\n\t\"gopkg.in\/src-d\/go-git.v4\/plumbing\"\n)\n\ntype GitRepository interface {\n\tOpen() (*OpenedRepository, error)\n\tProtoRootDir() string\n}\n\ntype GitHubRepository struct {\n\tprotodepDir string\n\tdep dependency.ProtoDepDependency\n\tauthProvider helper.AuthProvider\n}\n\nfunc NewGitRepository(protodepDir string, dep dependency.ProtoDepDependency, authProvider helper.AuthProvider) GitRepository {\n\treturn &GitHubRepository{\n\t\tprotodepDir: protodepDir,\n\t\tdep: dep,\n\t\tauthProvider: authProvider,\n\t}\n}\n\ntype OpenedRepository struct {\n\tRepository *git.Repository\n\tDep dependency.ProtoDepDependency\n\tHash string\n}\n\nfunc (r *GitHubRepository) Open() (*OpenedRepository, error) {\n\n\tbranch := \"master\"\n\tif r.dep.Branch != \"\" {\n\t\tbranch = r.dep.Branch\n\t}\n\n\trevision := r.dep.Revision\n\n\treponame := r.dep.Repository()\n\trepopath := filepath.Join(r.protodepDir, reponame)\n\n\tauth, err := r.authProvider.AuthMethod()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar rep *git.Repository\n\n\tif stat, err := os.Stat(repopath); err == nil && stat.IsDir() {\n\t\tspinner := logger.InfoWithSpinner(\"Getting %s \", reponame)\n\n\t\trep, err = git.PlainOpen(repopath)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"open repository is failed\")\n\t\t}\n\t\tspinner.Stop()\n\n\t\tfetchOpts := &git.FetchOptions{\n\t\t\tAuth: auth,\n\t\t}\n\n\t\t\/\/ TODO: Validate remote setting.\n\t\t\/\/ TODO: If .protodep cache remains with SSH, change remote target to HTTPS.\n\n\t\tif err := rep.Fetch(fetchOpts); err != nil {\n\t\t\tif err != git.NoErrAlreadyUpToDate {\n\t\t\t\treturn nil, errors.Wrap(err, \"fetch repository is failed\")\n\t\t\t}\n\t\t}\n\t\tspinner.Finish()\n\n\t} else {\n\t\tspinner := logger.InfoWithSpinner(\"Getting %s \", reponame)\n\t\t\/\/ IDEA: Is it better to register both ssh and HTTP?\n\t\trep, err = git.PlainClone(repopath, false, &git.CloneOptions{\n\t\t\tAuth: auth,\n\t\t\tURL: r.authProvider.GetRepositoryURL(reponame),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"clone repository is failed\")\n\t\t}\n\t\tspinner.Finish()\n\t}\n\n\twt, err := rep.Worktree()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get worktree is failed\")\n\t}\n\n\tif revision == \"\" {\n\t\ttarget, err := rep.Storer.Reference(plumbing.ReferenceName(fmt.Sprintf(\"refs\/remotes\/origin\/%s\", branch)))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"change branch to %s is failed\", branch)\n\t\t}\n\n\t\tif err := wt.Checkout(&git.CheckoutOptions{Hash: target.Hash()}); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"checkout to %s is failed\", revision)\n\t\t}\n\n\t\thead := plumbing.NewHashReference(plumbing.HEAD, target.Hash())\n\t\tif err := rep.Storer.SetReference(head); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"set head to %s is failed\", branch)\n\t\t}\n\t} else {\n\t\tvar opts git.CheckoutOptions\n\n\t\ttag := plumbing.NewTagReferenceName(revision)\n\t\t_, err := rep.Reference(tag, false)\n\t\tif err != nil && err != plumbing.ErrReferenceNotFound {\n\t\t\treturn nil, errors.Wrapf(err, \"tag = %s\", tag)\n\t\t} else {\n\t\t\tif err != nil {\n\t\t\t\t\/\/ Tag not found, revision must be a hash\n\t\t\t\tlogger.Info(\"%s is not a tag, checking out by hash\", revision)\n\t\t\t\thash := plumbing.NewHash(revision)\n\t\t\t\topts = git.CheckoutOptions{Hash: hash}\n\t\t\t} else {\n\t\t\t\tlogger.Info(\"%s is a tag, checking out by tag\", revision)\n\t\t\t\topts = git.CheckoutOptions{Branch: tag}\n\t\t\t}\n\t\t}\n\n\t\tif err := wt.Checkout(&opts); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"checkout to %s is failed\", revision)\n\t\t}\n\t}\n\n\tcommiter, err := rep.Log(&git.LogOptions{})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get commit is failed\")\n\t}\n\n\tcurrent, err := commiter.Next()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get commit current is failed\")\n\t}\n\n\treturn &OpenedRepository{\n\t\tRepository: rep,\n\t\tDep: r.dep,\n\t\tHash: current.Hash.String(),\n\t}, nil\n}\n\nfunc (r *GitHubRepository) ProtoRootDir() string {\n\treturn filepath.Join(r.protodepDir, r.dep.Target)\n}\n<|endoftext|>"} {"text":"Fixing up the in memory L1<|endoftext|>"} {"text":"package rest\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/leanovate\/microtools\/routing\"\n)\n\ntype Resource interface {\n\tBeforeFilter(resp http.ResponseWriter, req *http.Request) bool\n\tSelf() Link\n\tGet(request *http.Request) (interface{}, error)\n\tPatch(request *http.Request) (interface{}, error)\n\tUpdate(request *http.Request) (interface{}, error)\n\tDelete(request *http.Request) (interface{}, error)\n\n\tSubResources() routing.Matcher\n}\n\ntype ResourceBase struct{}\n\nfunc (ResourceBase) BeforeFilter(resp http.ResponseWriter, req *http.Request) bool {\n\treturn true\n}\n\nfunc (ResourceBase) Get(request *http.Request) (interface{}, error) {\n\treturn nil, MethodNotAllowed\n}\n\nfunc (ResourceBase) Patch(request *http.Request) (interface{}, error) {\n\treturn nil, MethodNotAllowed\n}\n\nfunc (ResourceBase) Update(request *http.Request) (interface{}, error) {\n\treturn nil, MethodNotAllowed\n}\n\nfunc (ResourceBase) Delete(request *http.Request) (interface{}, error) {\n\treturn nil, MethodNotAllowed\n}\n\nfunc (ResourceBase) SubResources() routing.Matcher {\n\treturn HttpErrorMatcher(NotFound)\n}\n\ntype LimitedResource struct {\n\tResourceBase\n\tRequestSizeLimit int64\n}\n\nfunc (r LimitedResource) BeforeFilter(resp http.ResponseWriter, req *http.Request) bool {\n\tif req.Body != nil {\n\t\treq.Body = http.MaxBytesReader(resp, req.Body, r.RequestSizeLimit)\n\t}\n\treturn true\n}\n\nfunc ResourceMatcher(resource Resource) routing.Matcher {\n\treturn routing.Sequence(\n\t\trouting.EndSeq(\n\t\t\trouting.GET(restHandler{before: resource.BeforeFilter, handler: resource.Get}),\n\t\t\trouting.PUT(restHandler{before: resource.BeforeFilter, handler: resource.Update}),\n\t\t\trouting.PATCH(restHandler{before: resource.BeforeFilter, handler: resource.Patch}),\n\t\t\trouting.DELETE(restHandler{before: resource.BeforeFilter, handler: resource.Delete}),\n\t\t\tHttpErrorMatcher(MethodNotAllowed),\n\t\t),\n\t\tresource.SubResources(),\n\t)\n}\nAdd Post to resourcepackage rest\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/leanovate\/microtools\/routing\"\n)\n\ntype Resource interface {\n\tBeforeFilter(resp http.ResponseWriter, req *http.Request) bool\n\tSelf() Link\n\tGet(request *http.Request) (interface{}, error)\n\tPost(request *http.Request) (interface{}, error)\n\tPatch(request *http.Request) (interface{}, error)\n\tUpdate(request *http.Request) (interface{}, error)\n\tDelete(request *http.Request) (interface{}, error)\n\n\tSubResources() routing.Matcher\n}\n\ntype ResourceBase struct{}\n\nfunc (ResourceBase) BeforeFilter(resp http.ResponseWriter, req *http.Request) bool {\n\treturn true\n}\n\nfunc (ResourceBase) Get(request *http.Request) (interface{}, error) {\n\treturn nil, MethodNotAllowed\n}\n\nfunc (ResourceBase) Post(request *http.Request) (interface{}, error) {\n\treturn nil, MethodNotAllowed\n}\n\nfunc (ResourceBase) Patch(request *http.Request) (interface{}, error) {\n\treturn nil, MethodNotAllowed\n}\n\nfunc (ResourceBase) Update(request *http.Request) (interface{}, error) {\n\treturn nil, MethodNotAllowed\n}\n\nfunc (ResourceBase) Delete(request *http.Request) (interface{}, error) {\n\treturn nil, MethodNotAllowed\n}\n\nfunc (ResourceBase) SubResources() routing.Matcher {\n\treturn HttpErrorMatcher(NotFound)\n}\n\ntype LimitedResource struct {\n\tResourceBase\n\tRequestSizeLimit int64\n}\n\nfunc (r LimitedResource) BeforeFilter(resp http.ResponseWriter, req *http.Request) bool {\n\tif req.Body != nil {\n\t\treq.Body = http.MaxBytesReader(resp, req.Body, r.RequestSizeLimit)\n\t}\n\treturn true\n}\n\nfunc ResourceMatcher(resource Resource) routing.Matcher {\n\treturn routing.Sequence(\n\t\trouting.EndSeq(\n\t\t\trouting.GET(restHandler{before: resource.BeforeFilter, handler: resource.Get}),\n\t\t\trouting.POST(restHandler{before: resource.BeforeFilter, handler: resource.Post}),\n\t\t\trouting.PUT(restHandler{before: resource.BeforeFilter, handler: resource.Update}),\n\t\t\trouting.PATCH(restHandler{before: resource.BeforeFilter, handler: resource.Patch}),\n\t\t\trouting.DELETE(restHandler{before: resource.BeforeFilter, handler: resource.Delete}),\n\t\t\tHttpErrorMatcher(MethodNotAllowed),\n\t\t),\n\t\tresource.SubResources(),\n\t)\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"go\/format\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc operationEmbedGo(pkg *build.Package) {\n\n\tboxMap := findBoxes(pkg)\n\n\t\/\/ notify user when no calls to rice.FindBox are made (is this an error and therefore os.Exit(1) ?\n\tif len(boxMap) == 0 {\n\t\tfmt.Println(\"no calls to rice.FindBox() found\")\n\t\treturn\n\t}\n\n\tverbosef(\"\\n\")\n\n\tfor boxname := range boxMap {\n\t\t\/\/ find path and filename for this box\n\t\tboxPath := filepath.Join(pkg.Dir, boxname)\n\t\tboxFilename := strings.Replace(boxname, \"\/\", \"-\", -1)\n\t\tboxFilename = strings.Replace(boxFilename, \"..\", \"back\", -1)\n\t\tboxFilename = boxFilename + `.rice-box.go`\n\n\t\t\/\/ verbose info\n\t\tverbosef(\"embedding box '%s'\\n\", boxname)\n\t\tverbosef(\"\\tto file %s\\n\", boxFilename)\n\n\t\t\/\/ create box datastructure (used by template)\n\t\tbox := &boxDataType{\n\t\t\tPackage: pkg.Name,\n\t\t\tBoxName: boxname,\n\t\t\tUnixNow: time.Now().Unix(),\n\t\t\tFiles: make([]*fileDataType, 0),\n\t\t\tDirs: make(map[string]*dirDataType),\n\t\t}\n\n\t\t\/\/ fill box datastructure with file data\n\t\tfilepath.Walk(boxPath, func(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"error walking box: %s\\n\", err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\tfilename := strings.TrimPrefix(path, boxPath)\n\t\t\tfilename = strings.Replace(filename, \"\\\\\", \"\/\", -1)\n\t\t\tfilename = strings.TrimPrefix(filename, \"\/\")\n\t\t\tif info.IsDir() {\n\t\t\t\tdirData := &dirDataType{\n\t\t\t\t\tIdentifier: \"dir_\" + nextIdentifier(),\n\t\t\t\t\tFileName: filename,\n\t\t\t\t\tModTime: info.ModTime().Unix(),\n\t\t\t\t\tChildFiles: make([]*fileDataType, 0),\n\t\t\t\t\tChildDirs: make([]*dirDataType, 0),\n\t\t\t\t}\n\t\t\t\tverbosef(\"\\tincludes dir: '%s'\\n\", dirData.FileName)\n\t\t\t\tbox.Dirs[dirData.FileName] = dirData\n\n\t\t\t\t\/\/ add tree entry (skip for root, it'll create a recursion)\n\t\t\t\tif dirData.FileName != \"\" {\n\t\t\t\t\tpathParts := strings.Split(dirData.FileName, \"\/\")\n\t\t\t\t\tparentDir := box.Dirs[strings.Join(pathParts[:len(pathParts)-1], \"\/\")]\n\t\t\t\t\tparentDir.ChildDirs = append(parentDir.ChildDirs, dirData)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfileData := &fileDataType{\n\t\t\t\t\tIdentifier: \"file_\" + nextIdentifier(),\n\t\t\t\t\tFileName: filename,\n\t\t\t\t\tModTime: info.ModTime().Unix(),\n\t\t\t\t}\n\t\t\t\tverbosef(\"\\tincludes file: '%s'\\n\", fileData.FileName)\n\t\t\t\tfileData.Content, err = ioutil.ReadFile(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"error reading file content while walking box: %s\\n\", err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tbox.Files = append(box.Files, fileData)\n\n\t\t\t\t\/\/ add tree entry\n\t\t\t\tpathParts := strings.Split(fileData.FileName, \"\/\")\n\t\t\t\tparentDir := box.Dirs[strings.Join(pathParts[:len(pathParts)-1], \"\/\")]\n\t\t\t\tparentDir.ChildFiles = append(parentDir.ChildFiles, fileData)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\tembedSourceUnformated := bytes.NewBuffer(make([]byte, 0))\n\n\t\t\/\/ execute template to buffer\n\t\terr := tmplEmbeddedBox.Execute(embedSourceUnformated, box)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error writing embedded box to file (template execute): %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\t\/\/ format the source code\n\t\tembedSource, err := format.Source(embedSourceUnformated.Bytes())\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error formatting embedSource: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\t\/\/ create go file for box\n\t\tboxFile, err := os.Create(filepath.Join(pkg.Dir, boxFilename))\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error creating embedded box file: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tdefer boxFile.Close()\n\n\t\t\/\/ write source to file\n\t\t_, err = io.Copy(boxFile, bytes.NewBuffer(embedSource))\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error writing embedSource to file: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\nMake golint happy about generated code.package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"go\/format\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc operationEmbedGo(pkg *build.Package) {\n\n\tboxMap := findBoxes(pkg)\n\n\t\/\/ notify user when no calls to rice.FindBox are made (is this an error and therefore os.Exit(1) ?\n\tif len(boxMap) == 0 {\n\t\tfmt.Println(\"no calls to rice.FindBox() found\")\n\t\treturn\n\t}\n\n\tverbosef(\"\\n\")\n\n\tfor boxname := range boxMap {\n\t\t\/\/ find path and filename for this box\n\t\tboxPath := filepath.Join(pkg.Dir, boxname)\n\t\tboxFilename := strings.Replace(boxname, \"\/\", \"-\", -1)\n\t\tboxFilename = strings.Replace(boxFilename, \"..\", \"back\", -1)\n\t\tboxFilename = boxFilename + `.rice-box.go`\n\n\t\t\/\/ verbose info\n\t\tverbosef(\"embedding box '%s'\\n\", boxname)\n\t\tverbosef(\"\\tto file %s\\n\", boxFilename)\n\n\t\t\/\/ create box datastructure (used by template)\n\t\tbox := &boxDataType{\n\t\t\tPackage: pkg.Name,\n\t\t\tBoxName: boxname,\n\t\t\tUnixNow: time.Now().Unix(),\n\t\t\tFiles: make([]*fileDataType, 0),\n\t\t\tDirs: make(map[string]*dirDataType),\n\t\t}\n\n\t\t\/\/ fill box datastructure with file data\n\t\tfilepath.Walk(boxPath, func(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"error walking box: %s\\n\", err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\tfilename := strings.TrimPrefix(path, boxPath)\n\t\t\tfilename = strings.Replace(filename, \"\\\\\", \"\/\", -1)\n\t\t\tfilename = strings.TrimPrefix(filename, \"\/\")\n\t\t\tif info.IsDir() {\n\t\t\t\tdirData := &dirDataType{\n\t\t\t\t\tIdentifier: \"dir\" + nextIdentifier(),\n\t\t\t\t\tFileName: filename,\n\t\t\t\t\tModTime: info.ModTime().Unix(),\n\t\t\t\t\tChildFiles: make([]*fileDataType, 0),\n\t\t\t\t\tChildDirs: make([]*dirDataType, 0),\n\t\t\t\t}\n\t\t\t\tverbosef(\"\\tincludes dir: '%s'\\n\", dirData.FileName)\n\t\t\t\tbox.Dirs[dirData.FileName] = dirData\n\n\t\t\t\t\/\/ add tree entry (skip for root, it'll create a recursion)\n\t\t\t\tif dirData.FileName != \"\" {\n\t\t\t\t\tpathParts := strings.Split(dirData.FileName, \"\/\")\n\t\t\t\t\tparentDir := box.Dirs[strings.Join(pathParts[:len(pathParts)-1], \"\/\")]\n\t\t\t\t\tparentDir.ChildDirs = append(parentDir.ChildDirs, dirData)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfileData := &fileDataType{\n\t\t\t\t\tIdentifier: \"file\" + nextIdentifier(),\n\t\t\t\t\tFileName: filename,\n\t\t\t\t\tModTime: info.ModTime().Unix(),\n\t\t\t\t}\n\t\t\t\tverbosef(\"\\tincludes file: '%s'\\n\", fileData.FileName)\n\t\t\t\tfileData.Content, err = ioutil.ReadFile(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"error reading file content while walking box: %s\\n\", err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tbox.Files = append(box.Files, fileData)\n\n\t\t\t\t\/\/ add tree entry\n\t\t\t\tpathParts := strings.Split(fileData.FileName, \"\/\")\n\t\t\t\tparentDir := box.Dirs[strings.Join(pathParts[:len(pathParts)-1], \"\/\")]\n\t\t\t\tparentDir.ChildFiles = append(parentDir.ChildFiles, fileData)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\tembedSourceUnformated := bytes.NewBuffer(make([]byte, 0))\n\n\t\t\/\/ execute template to buffer\n\t\terr := tmplEmbeddedBox.Execute(embedSourceUnformated, box)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error writing embedded box to file (template execute): %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\t\/\/ format the source code\n\t\tembedSource, err := format.Source(embedSourceUnformated.Bytes())\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error formatting embedSource: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\t\/\/ create go file for box\n\t\tboxFile, err := os.Create(filepath.Join(pkg.Dir, boxFilename))\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error creating embedded box file: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tdefer boxFile.Close()\n\n\t\t\/\/ write source to file\n\t\t_, err = io.Copy(boxFile, bytes.NewBuffer(embedSource))\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error writing embedSource to file: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ chris 090115 Unix removable lock file.\n\n\/\/ TODO Note how Close calls errors are not handled.\n\npackage rmlock\n\nimport (\n\t\"os\"\n\n\t\"golang.org\/x\/sys\/unix\"\n)\n\ntype LockContext struct {\n\tglobalname string\n\n\tlocal *os.File\n}\n\nfunc globalCtx(globalname string, inner func() error) error {\n\tf, err := os.OpenFile(globalname, os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tif err := unix.Flock(int(f.Fd()), unix.LOCK_EX); err != nil {\n\t\treturn err\n\t}\n\n\tif err := inner(); err != nil {\n\t\treturn err\n\t}\n\n\tif err = unix.Flock(int(f.Fd()), unix.LOCK_UN); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc Lock(globalname, localname string) (*LockContext, error) {\n\tvar lc *LockContext\n\n\terr := globalCtx(globalname, func() error {\n\t\tf, err := os.Openfile(localname, os.O_CREATE, 0666)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = unix.Flock(int(f.Fd()), unix.LOCK_EX | unix.LOCK_NB)\n\t\tif err != nil {\n\t\t\tf.Close()\n\t\t\treturn err\n\t\t}\n\n\t\tlc = &LockContext{\n\t\t\tglobalname: globalname,\n\t\t\tlocal: f,\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn lc, nil\n\n}\n\nfunc (lc *LockContext) Unlock() error {\n\treturn globalCtx(lc.globalname, func() error {\n\t\tlc.local.Close()\n\t\treturn os.Remove(lc.local.Name())\n\t})\n}\nrmlock: Refactors mode to be a const.\/\/ chris 090115 Unix removable lock file.\n\n\/\/ TODO Note how Close calls errors are not handled.\n\npackage rmlock\n\nimport (\n\t\"os\"\n\n\t\"golang.org\/x\/sys\/unix\"\n)\n\nconst mode = 0666\n\ntype LockContext struct {\n\tglobalname string\n\n\tlocal *os.File\n}\n\nfunc globalCtx(globalname string, inner func() error) error {\n\tf, err := os.OpenFile(globalname, os.O_CREATE, mode)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tif err := unix.Flock(int(f.Fd()), unix.LOCK_EX); err != nil {\n\t\treturn err\n\t}\n\n\tif err := inner(); err != nil {\n\t\treturn err\n\t}\n\n\tif err = unix.Flock(int(f.Fd()), unix.LOCK_UN); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc Lock(globalname, localname string) (*LockContext, error) {\n\tvar lc *LockContext\n\n\terr := globalCtx(globalname, func() error {\n\t\tf, err := os.OpenFile(localname, os.O_CREATE, mode)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = unix.Flock(int(f.Fd()), unix.LOCK_EX | unix.LOCK_NB)\n\t\tif err != nil {\n\t\t\tf.Close()\n\t\t\treturn err\n\t\t}\n\n\t\tlc = &LockContext{\n\t\t\tglobalname: globalname,\n\t\t\tlocal: f,\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn lc, nil\n\n}\n\nfunc (lc *LockContext) Unlock() error {\n\treturn globalCtx(lc.globalname, func() error {\n\t\tlc.local.Close()\n\t\treturn os.Remove(lc.local.Name())\n\t})\n}\n<|endoftext|>"} {"text":"package radius\n\nimport (\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestRFCBuild(t *testing.T) {\n\tt.Parallel()\n\n\tvar packages []string\n\n\tf, err := os.Open(\".\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer f.Close()\n\n\tentries, err := f.Readdir(0)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, entry := range entries {\n\t\tif entry.IsDir() && strings.HasPrefix(entry.Name(), \"rfc\") {\n\t\t\tpackages = append(packages, entry.Name())\n\t\t}\n\t}\n\n\tfor _, pkg := range packages {\n\t\tfunc(pkg string) {\n\t\t\tt.Run(pkg, func(t *testing.T) {\n\t\t\t\tt.Parallel()\n\n\t\t\t\tcmd := exec.Command(\"go\", \"build\", \"layeh.com\/radius\/\"+pkg)\n\t\t\t\toutput, err := cmd.CombinedOutput()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"%s: %s\\n\", err, output)\n\t\t\t\t}\n\t\t\t})\n\t\t}(pkg)\n\t}\n}\nremove useless test<|endoftext|>"} {"text":"package sftp\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"sync\"\n\t\"syscall\"\n)\n\nvar maxTxPacket uint32 = 1 << 15\n\ntype handleHandler func(string) string\n\ntype Handlers struct {\n\tFileGet FileReader\n\tFilePut FileWriter\n\tFileCmd FileCmder\n\tFileInfo FileInfoer\n}\n\n\/\/ Server that abstracts the sftp protocol for a http request-like protocol\ntype RequestServer struct {\n\tserverConn\n\tHandlers Handlers\n\tpktChan chan packet\n\topenRequests map[string]*Request\n\topenRequestLock sync.RWMutex\n}\n\n\/\/ simple factory function\n\/\/ one server per user-session\nfunc NewRequestServer(rwc io.ReadWriteCloser, h Handlers) (*RequestServer, error) {\n\ts := &RequestServer{\n\t\tserverConn: serverConn{\n\t\t\tconn: conn{\n\t\t\t\tReader: rwc,\n\t\t\t\tWriteCloser: rwc,\n\t\t\t},\n\t\t},\n\t\tHandlers: h,\n\t\tpktChan: make(chan packet, sftpServerWorkerCount),\n\t\topenRequests: make(map[string]*Request),\n\t}\n\n\treturn s, nil\n}\n\nfunc (rs *RequestServer) nextRequest(r *Request) string {\n\trs.openRequestLock.Lock()\n\tdefer rs.openRequestLock.Unlock()\n\trs.openRequests[r.Filepath] = r\n\treturn r.Filepath\n}\n\nfunc (rs *RequestServer) getRequest(handle string) (*Request, bool) {\n\trs.openRequestLock.Lock()\n\tdefer rs.openRequestLock.Unlock()\n\tr, ok := rs.openRequests[handle]\n\treturn r, ok\n}\n\nfunc (rs *RequestServer) closeRequest(handle string) {\n\trs.openRequestLock.Lock()\n\tdefer rs.openRequestLock.Unlock()\n\tif _, ok := rs.openRequests[handle]; ok {\n\t\tdelete(rs.openRequests, handle)\n\t}\n}\n\n\/\/ close the read\/write\/closer to trigger exiting the main server loop\nfunc (rs *RequestServer) Close() error { return rs.conn.Close() }\n\n\/\/ start serving requests from user session\nfunc (rs *RequestServer) Serve() error {\n\tvar wg sync.WaitGroup\n\twg.Add(sftpServerWorkerCount)\n\tfor i := 0; i < sftpServerWorkerCount; i++ {\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tif err := rs.packetWorker(); err != nil {\n\t\t\t\trs.conn.Close() \/\/ shuts down recvPacket\n\t\t\t}\n\t\t}()\n\t}\n\n\tvar err error\n\tvar pktType uint8\n\tvar pktBytes []byte\n\tfor {\n\t\tpktType, pktBytes, err = rs.recvPacket()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tpkt, err := makePacket(rxPacket{fxp(pktType), pktBytes})\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\trs.pktChan <- pkt\n\t}\n\n\tclose(rs.pktChan) \/\/ shuts down sftpServerWorkers\n\twg.Wait() \/\/ wait for all workers to exit\n\treturn err\n}\n\nfunc (rs *RequestServer) packetWorker() error {\n\tfor pkt := range rs.pktChan {\n\t\tfmt.Println(\"Incoming Packet: \", pkt, reflect.TypeOf(pkt))\n\t\tvar handle string\n\t\tvar rpkt resp_packet\n\t\tvar err error\n\t\tswitch pkt := pkt.(type) {\n\t\tcase *sshFxInitPacket:\n\t\t\trpkt = sshFxVersionPacket{sftpProtocolVersion, nil}\n\t\tcase *sshFxpClosePacket:\n\t\t\thandle = pkt.getHandle()\n\t\t\trs.closeRequest(handle)\n\t\t\trpkt = statusFromError(pkt, nil)\n\t\tcase *sshFxpRealpathPacket:\n\t\t\trpkt = cleanPath(pkt)\n\t\tcase isOpener:\n\t\t\thandle = rs.nextRequest(newRequest(pkt.getPath()))\n\t\t\trpkt = sshFxpHandlePacket{pkt.id(), handle}\n\t\tcase hasPath:\n\t\t\thandle = rs.nextRequest(newRequest(pkt.getPath()))\n\t\t\trpkt = rs.request(handle, pkt)\n\t\tcase hasHandle:\n\t\t\thandle = pkt.getHandle()\n\t\t\trpkt = rs.request(handle, pkt)\n\t\t}\n\n\t\tfmt.Println(\"Reply Packet: \", rpkt, reflect.TypeOf(rpkt))\n\t\terr = rs.sendPacket(rpkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc cleanPath(pkt *sshFxpRealpathPacket) resp_packet {\n\tpath := pkt.getPath()\n\tif !filepath.IsAbs(path) {\n\t\tpath = \"\/\" + path\n\t} \/\/ all paths are absolute\n\n\tcleaned_path := filepath.Clean(path)\n\treturn &sshFxpNamePacket{\n\t\tID: pkt.id(),\n\t\tNameAttrs: []sshFxpNameAttr{{\n\t\t\tName: cleaned_path,\n\t\t\tLongName: cleaned_path,\n\t\t\tAttrs: emptyFileStat,\n\t\t}},\n\t}\n}\n\nfunc (rs *RequestServer) request(handle string, pkt packet) resp_packet {\n\tvar rpkt resp_packet\n\tvar err error\n\tif request, ok := rs.getRequest(handle); ok {\n\t\t\/\/ called here to keep packet handling out of request for testing\n\t\trequest.populate(pkt)\n\t\tfmt.Println(\"Request Method: \", request.Method)\n\t\trpkt, err = request.handle(rs.Handlers)\n\t\tif err != nil {\n\t\t\trpkt = statusFromError(pkt, err)\n\t\t}\n\t} else {\n\t\trpkt = statusFromError(pkt, syscall.EBADF)\n\t}\n\treturn rpkt\n}\nos.ErrorNotExist should convert to ssh_FX_NO_SUCH_FILEpackage sftp\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"sync\"\n\t\"syscall\"\n)\n\nvar maxTxPacket uint32 = 1 << 15\n\ntype handleHandler func(string) string\n\ntype Handlers struct {\n\tFileGet FileReader\n\tFilePut FileWriter\n\tFileCmd FileCmder\n\tFileInfo FileInfoer\n}\n\n\/\/ Server that abstracts the sftp protocol for a http request-like protocol\ntype RequestServer struct {\n\tserverConn\n\tHandlers Handlers\n\tpktChan chan packet\n\topenRequests map[string]*Request\n\topenRequestLock sync.RWMutex\n}\n\n\/\/ simple factory function\n\/\/ one server per user-session\nfunc NewRequestServer(rwc io.ReadWriteCloser, h Handlers) (*RequestServer, error) {\n\ts := &RequestServer{\n\t\tserverConn: serverConn{\n\t\t\tconn: conn{\n\t\t\t\tReader: rwc,\n\t\t\t\tWriteCloser: rwc,\n\t\t\t},\n\t\t},\n\t\tHandlers: h,\n\t\tpktChan: make(chan packet, sftpServerWorkerCount),\n\t\topenRequests: make(map[string]*Request),\n\t}\n\n\treturn s, nil\n}\n\nfunc (rs *RequestServer) nextRequest(r *Request) string {\n\trs.openRequestLock.Lock()\n\tdefer rs.openRequestLock.Unlock()\n\trs.openRequests[r.Filepath] = r\n\treturn r.Filepath\n}\n\nfunc (rs *RequestServer) getRequest(handle string) (*Request, bool) {\n\trs.openRequestLock.Lock()\n\tdefer rs.openRequestLock.Unlock()\n\tr, ok := rs.openRequests[handle]\n\treturn r, ok\n}\n\nfunc (rs *RequestServer) closeRequest(handle string) {\n\trs.openRequestLock.Lock()\n\tdefer rs.openRequestLock.Unlock()\n\tif _, ok := rs.openRequests[handle]; ok {\n\t\tdelete(rs.openRequests, handle)\n\t}\n}\n\n\/\/ close the read\/write\/closer to trigger exiting the main server loop\nfunc (rs *RequestServer) Close() error { return rs.conn.Close() }\n\n\/\/ start serving requests from user session\nfunc (rs *RequestServer) Serve() error {\n\tvar wg sync.WaitGroup\n\twg.Add(sftpServerWorkerCount)\n\tfor i := 0; i < sftpServerWorkerCount; i++ {\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tif err := rs.packetWorker(); err != nil {\n\t\t\t\trs.conn.Close() \/\/ shuts down recvPacket\n\t\t\t}\n\t\t}()\n\t}\n\n\tvar err error\n\tvar pktType uint8\n\tvar pktBytes []byte\n\tfor {\n\t\tpktType, pktBytes, err = rs.recvPacket()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tpkt, err := makePacket(rxPacket{fxp(pktType), pktBytes})\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\trs.pktChan <- pkt\n\t}\n\n\tclose(rs.pktChan) \/\/ shuts down sftpServerWorkers\n\twg.Wait() \/\/ wait for all workers to exit\n\treturn err\n}\n\nfunc (rs *RequestServer) packetWorker() error {\n\tfor pkt := range rs.pktChan {\n\t\tfmt.Println(\"Incoming Packet: \", pkt, reflect.TypeOf(pkt))\n\t\tvar handle string\n\t\tvar rpkt resp_packet\n\t\tvar err error\n\t\tswitch pkt := pkt.(type) {\n\t\tcase *sshFxInitPacket:\n\t\t\trpkt = sshFxVersionPacket{sftpProtocolVersion, nil}\n\t\tcase *sshFxpClosePacket:\n\t\t\thandle = pkt.getHandle()\n\t\t\trs.closeRequest(handle)\n\t\t\trpkt = statusFromError(pkt, nil)\n\t\tcase *sshFxpRealpathPacket:\n\t\t\trpkt = cleanPath(pkt)\n\t\tcase isOpener:\n\t\t\thandle = rs.nextRequest(newRequest(pkt.getPath()))\n\t\t\trpkt = sshFxpHandlePacket{pkt.id(), handle}\n\t\tcase hasPath:\n\t\t\thandle = rs.nextRequest(newRequest(pkt.getPath()))\n\t\t\trpkt = rs.request(handle, pkt)\n\t\tcase hasHandle:\n\t\t\thandle = pkt.getHandle()\n\t\t\trpkt = rs.request(handle, pkt)\n\t\t}\n\n\t\tfmt.Println(\"Reply Packet: \", rpkt, reflect.TypeOf(rpkt))\n\t\terr = rs.sendPacket(rpkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc cleanPath(pkt *sshFxpRealpathPacket) resp_packet {\n\tpath := pkt.getPath()\n\tif !filepath.IsAbs(path) {\n\t\tpath = \"\/\" + path\n\t} \/\/ all paths are absolute\n\n\tcleaned_path := filepath.Clean(path)\n\treturn &sshFxpNamePacket{\n\t\tID: pkt.id(),\n\t\tNameAttrs: []sshFxpNameAttr{{\n\t\t\tName: cleaned_path,\n\t\t\tLongName: cleaned_path,\n\t\t\tAttrs: emptyFileStat,\n\t\t}},\n\t}\n}\n\nfunc (rs *RequestServer) request(handle string, pkt packet) resp_packet {\n\tvar rpkt resp_packet\n\tvar err error\n\tif request, ok := rs.getRequest(handle); ok {\n\t\t\/\/ called here to keep packet handling out of request for testing\n\t\trequest.populate(pkt)\n\t\tfmt.Println(\"Request Method: \", request.Method)\n\t\trpkt, err = request.handle(rs.Handlers)\n\t\tif err != nil {\n\t\t\terr = errorAdapter(err)\n\t\t\trpkt = statusFromError(pkt, err)\n\t\t}\n\t} else {\n\t\trpkt = statusFromError(pkt, syscall.EBADF)\n\t}\n\treturn rpkt\n}\n\n\/\/ os.ErrNotExist should convert to ssh_FX_NO_SUCH_FILE, but is not recognized\n\/\/ by statusFromError. So we convert to syscall.ENOENT which it does.\nfunc errorAdapter(err error) error {\n\tif err == os.ErrNotExist {\n\t\treturn syscall.ENOENT\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"\/\/ Code generated by protoc-gen-grpc-gateway\n\/\/ source: zvelo\/msg\/api.proto\n\/\/ DO NOT EDIT!\n\n\/*\nPackage msg is a reverse proxy.\n\nIt translates gRPC into RESTful JSON APIs.\n*\/\npackage msg\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/grpc-ecosystem\/grpc-gateway\/runtime\"\n\t\"github.com\/grpc-ecosystem\/grpc-gateway\/utilities\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/grpclog\"\n\t\"google.golang.org\/grpc\/status\"\n)\n\nvar _ codes.Code\nvar _ io.Reader\nvar _ status.Status\nvar _ = runtime.String\nvar _ = utilities.NewDoubleArray\n\nfunc request_API_QueryV1_0(ctx context.Context, marshaler runtime.Marshaler, client APIClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar protoReq QueryRequests\n\tvar metadata runtime.ServerMetadata\n\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\n\tmsg, err := client.QueryV1(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n\n}\n\nfunc request_API_QueryResultV1_0(ctx context.Context, marshaler runtime.Marshaler, client APIClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar protoReq QueryPollRequest\n\tvar metadata runtime.ServerMetadata\n\n\tvar (\n\t\tval string\n\t\tok bool\n\t\terr error\n\t\t_ = err\n\t)\n\n\tval, ok = pathParams[\"request_id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"request_id\")\n\t}\n\n\tprotoReq.RequestId, err = runtime.String(val)\n\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"request_id\", err)\n\t}\n\n\tmsg, err := client.QueryResultV1(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n\n}\n\n\/\/ RegisterAPIHandlerFromEndpoint is same as RegisterAPIHandler but\n\/\/ automatically dials to \"endpoint\" and closes the connection when \"ctx\" gets done.\nfunc RegisterAPIHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {\n\tconn, err := grpc.Dial(endpoint, opts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tif cerr := conn.Close(); cerr != nil {\n\t\t\t\tgrpclog.Printf(\"Failed to close conn to %s: %v\", endpoint, cerr)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tgo func() {\n\t\t\t<-ctx.Done()\n\t\t\tif cerr := conn.Close(); cerr != nil {\n\t\t\t\tgrpclog.Printf(\"Failed to close conn to %s: %v\", endpoint, cerr)\n\t\t\t}\n\t\t}()\n\t}()\n\n\treturn RegisterAPIHandler(ctx, mux, conn)\n}\n\n\/\/ RegisterAPIHandler registers the http handlers for service API to \"mux\".\n\/\/ The handlers forward requests to the grpc endpoint over \"conn\".\nfunc RegisterAPIHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {\n\tclient := NewAPIClient(conn)\n\n\tmux.Handle(\"POST\", pattern_API_QueryV1_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tif cn, ok := w.(http.CloseNotifier); ok {\n\t\t\tgo func(done <-chan struct{}, closed <-chan bool) {\n\t\t\t\tselect {\n\t\t\t\tcase <-done:\n\t\t\t\tcase <-closed:\n\t\t\t\t\tcancel()\n\t\t\t\t}\n\t\t\t}(ctx.Done(), cn.CloseNotify())\n\t\t}\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_API_QueryV1_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_API_QueryV1_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"GET\", pattern_API_QueryResultV1_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tif cn, ok := w.(http.CloseNotifier); ok {\n\t\t\tgo func(done <-chan struct{}, closed <-chan bool) {\n\t\t\t\tselect {\n\t\t\t\tcase <-done:\n\t\t\t\tcase <-closed:\n\t\t\t\t\tcancel()\n\t\t\t\t}\n\t\t\t}(ctx.Done(), cn.CloseNotify())\n\t\t}\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_API_QueryResultV1_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_API_QueryResultV1_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\treturn nil\n}\n\nvar (\n\tpattern_API_QueryV1_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{\"v1\", \"query\"}, \"\"))\n\n\tpattern_API_QueryResultV1_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{\"v1\", \"query\", \"request_id\"}, \"\"))\n)\n\nvar (\n\tforward_API_QueryV1_0 = runtime.ForwardResponseMessage\n\n\tforward_API_QueryResultV1_0 = runtime.ForwardResponseMessage\n)\nrebuild\/\/ Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.\n\/\/ source: zvelo\/msg\/api.proto\n\n\/*\nPackage msg is a reverse proxy.\n\nIt translates gRPC into RESTful JSON APIs.\n*\/\npackage msg\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/grpc-ecosystem\/grpc-gateway\/runtime\"\n\t\"github.com\/grpc-ecosystem\/grpc-gateway\/utilities\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/grpclog\"\n\t\"google.golang.org\/grpc\/status\"\n)\n\nvar _ codes.Code\nvar _ io.Reader\nvar _ status.Status\nvar _ = runtime.String\nvar _ = utilities.NewDoubleArray\n\nfunc request_API_QueryV1_0(ctx context.Context, marshaler runtime.Marshaler, client APIClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar protoReq QueryRequests\n\tvar metadata runtime.ServerMetadata\n\n\tif err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"%v\", err)\n\t}\n\n\tmsg, err := client.QueryV1(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n\n}\n\nfunc request_API_QueryResultV1_0(ctx context.Context, marshaler runtime.Marshaler, client APIClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {\n\tvar protoReq QueryPollRequest\n\tvar metadata runtime.ServerMetadata\n\n\tvar (\n\t\tval string\n\t\tok bool\n\t\terr error\n\t\t_ = err\n\t)\n\n\tval, ok = pathParams[\"request_id\"]\n\tif !ok {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"missing parameter %s\", \"request_id\")\n\t}\n\n\tprotoReq.RequestId, err = runtime.String(val)\n\n\tif err != nil {\n\t\treturn nil, metadata, status.Errorf(codes.InvalidArgument, \"type mismatch, parameter: %s, error: %v\", \"request_id\", err)\n\t}\n\n\tmsg, err := client.QueryResultV1(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))\n\treturn msg, metadata, err\n\n}\n\n\/\/ RegisterAPIHandlerFromEndpoint is same as RegisterAPIHandler but\n\/\/ automatically dials to \"endpoint\" and closes the connection when \"ctx\" gets done.\nfunc RegisterAPIHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {\n\tconn, err := grpc.Dial(endpoint, opts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tif cerr := conn.Close(); cerr != nil {\n\t\t\t\tgrpclog.Printf(\"Failed to close conn to %s: %v\", endpoint, cerr)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tgo func() {\n\t\t\t<-ctx.Done()\n\t\t\tif cerr := conn.Close(); cerr != nil {\n\t\t\t\tgrpclog.Printf(\"Failed to close conn to %s: %v\", endpoint, cerr)\n\t\t\t}\n\t\t}()\n\t}()\n\n\treturn RegisterAPIHandler(ctx, mux, conn)\n}\n\n\/\/ RegisterAPIHandler registers the http handlers for service API to \"mux\".\n\/\/ The handlers forward requests to the grpc endpoint over \"conn\".\nfunc RegisterAPIHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {\n\treturn RegisterAPIHandlerClient(ctx, mux, NewAPIClient(conn))\n}\n\n\/\/ RegisterAPIHandler registers the http handlers for service API to \"mux\".\n\/\/ The handlers forward requests to the grpc endpoint over the given implementation of \"APIClient\".\n\/\/ Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in \"APIClient\"\n\/\/ doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in\n\/\/ \"APIClient\" to call the correct interceptors.\nfunc RegisterAPIHandlerClient(ctx context.Context, mux *runtime.ServeMux, client APIClient) error {\n\n\tmux.Handle(\"POST\", pattern_API_QueryV1_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tif cn, ok := w.(http.CloseNotifier); ok {\n\t\t\tgo func(done <-chan struct{}, closed <-chan bool) {\n\t\t\t\tselect {\n\t\t\t\tcase <-done:\n\t\t\t\tcase <-closed:\n\t\t\t\t\tcancel()\n\t\t\t\t}\n\t\t\t}(ctx.Done(), cn.CloseNotify())\n\t\t}\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_API_QueryV1_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_API_QueryV1_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"GET\", pattern_API_QueryResultV1_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tif cn, ok := w.(http.CloseNotifier); ok {\n\t\t\tgo func(done <-chan struct{}, closed <-chan bool) {\n\t\t\t\tselect {\n\t\t\t\tcase <-done:\n\t\t\t\tcase <-closed:\n\t\t\t\t\tcancel()\n\t\t\t\t}\n\t\t\t}(ctx.Done(), cn.CloseNotify())\n\t\t}\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_API_QueryResultV1_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_API_QueryResultV1_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\treturn nil\n}\n\nvar (\n\tpattern_API_QueryV1_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{\"v1\", \"query\"}, \"\"))\n\n\tpattern_API_QueryResultV1_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{\"v1\", \"query\", \"request_id\"}, \"\"))\n)\n\nvar (\n\tforward_API_QueryV1_0 = runtime.ForwardResponseMessage\n\n\tforward_API_QueryResultV1_0 = runtime.ForwardResponseMessage\n)\n<|endoftext|>"} {"text":"package resource\n\nimport (\n\t\"time\"\n\n\t\"github.com\/gbl08ma\/disturbancesmlx\/dataobjects\"\n\t\"github.com\/heetch\/sqalx\"\n\t\"github.com\/yarf-framework\/yarf\"\n)\n\n\/\/ Stats composites resource\ntype Stats struct {\n\tresource\n\tcalculator StatsCalculator\n}\n\ntype apiStats struct {\n\tLineStats map[string]apiLineStats `msgpack:\"lineStats\" json:\"lineStats\"`\n\tLastDisturbance time.Time `msgpack:\"lastDisturbance\" json:\"lastDisturbance\"`\n}\n\ntype apiLineStats struct {\n\tAvailability float64 `msgpack:\"availability\" json:\"availability\"`\n\tAverageDisturbanceDuration dataobjects.Duration `msgpack:\"avgDistDuration\" json:\"avgDistDuration\"`\n}\n\ntype StatsCalculator interface {\n\tAvailability(node sqalx.Node, line *dataobjects.Line, startTime time.Time, endTime time.Time) (float64, time.Duration, error)\n}\n\nfunc (r *Stats) WithNode(node sqalx.Node) *Stats {\n\tr.node = node\n\treturn r\n}\n\nfunc (r *Stats) WithCalculator(calculator StatsCalculator) *Stats {\n\tr.calculator = calculator\n\treturn r\n}\n\nfunc (r *Stats) Get(c *yarf.Context) error {\n\ttx, err := r.Beginx()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Commit() \/\/ read-only tx\n\n\tstart := c.Request.URL.Query().Get(\"start\")\n\tstartTime := time.Now().AddDate(0, 0, -7)\n\tif start != \"\" {\n\t\tstartTime, err = time.Parse(time.RFC3339, start)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tend := c.Request.URL.Query().Get(\"end\")\n\tendTime := time.Now()\n\tif end != \"\" {\n\t\tendTime, err = time.Parse(time.RFC3339, end)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif c.Param(\"id\") != \"\" {\n\t\tnetwork, err := dataobjects.GetNetwork(tx, c.Param(\"id\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tstats, err := r.getStatsForNetwork(tx, network, startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tRenderData(c, stats)\n\t} else {\n\t\tstatsMap := make(map[string]apiStats)\n\t\tnetworks, err := dataobjects.GetNetworks(tx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, network := range networks {\n\t\t\tstats, err := r.getStatsForNetwork(tx, network, startTime, endTime)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tstatsMap[network.ID] = stats\n\t\t}\n\t\tRenderData(c, statsMap)\n\t}\n\treturn nil\n}\n\nfunc (r *Stats) getStatsForNetwork(node sqalx.Node, network *dataobjects.Network, startTime time.Time, endTime time.Time) (apiStats, error) {\n\ttx, err := r.Beginx()\n\tif err != nil {\n\t\treturn apiStats{}, err\n\t}\n\tdefer tx.Commit() \/\/ read-only tx\n\n\tlastDist, err := r.getLastDisturbanceTimeForNetwork(tx, network)\n\n\tstats := apiStats{\n\t\tLastDisturbance: lastDist,\n\t\tLineStats: make(map[string]apiLineStats),\n\t}\n\n\tlines, err := network.Lines(tx)\n\tif err != nil {\n\t\treturn apiStats{}, err\n\t}\n\n\tfor _, line := range lines {\n\t\tavailability, avgDuration, err := r.calculator.Availability(tx, line, startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn apiStats{}, err\n\t\t}\n\t\tstats.LineStats[line.ID] = apiLineStats{\n\t\t\tAvailability: availability,\n\t\t\tAverageDisturbanceDuration: dataobjects.Duration(avgDuration),\n\t\t}\n\t}\n\treturn stats, nil\n}\n\nfunc (r *Stats) getLastDisturbanceTimeForNetwork(node sqalx.Node, network *dataobjects.Network) (time.Time, error) {\n\ttx, err := r.Beginx()\n\tif err != nil {\n\t\treturn time.Now().UTC(), err\n\t}\n\tdefer tx.Commit() \/\/ read-only tx\n\n\td, err := network.LastDisturbance(tx)\n\tif err != nil {\n\t\treturn time.Now().UTC(), err\n\t}\n\n\tif !d.Ended {\n\t\treturn time.Now().UTC(), nil\n\t}\n\treturn d.EndTime, nil\n}\nAdd client input validationpackage resource\n\nimport (\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/gbl08ma\/disturbancesmlx\/dataobjects\"\n\t\"github.com\/heetch\/sqalx\"\n\t\"github.com\/yarf-framework\/yarf\"\n)\n\n\/\/ Stats composites resource\ntype Stats struct {\n\tresource\n\tcalculator StatsCalculator\n}\n\ntype apiStats struct {\n\tLineStats map[string]apiLineStats `msgpack:\"lineStats\" json:\"lineStats\"`\n\tLastDisturbance time.Time `msgpack:\"lastDisturbance\" json:\"lastDisturbance\"`\n}\n\ntype apiLineStats struct {\n\tAvailability float64 `msgpack:\"availability\" json:\"availability\"`\n\tAverageDisturbanceDuration dataobjects.Duration `msgpack:\"avgDistDuration\" json:\"avgDistDuration\"`\n}\n\ntype StatsCalculator interface {\n\tAvailability(node sqalx.Node, line *dataobjects.Line, startTime time.Time, endTime time.Time) (float64, time.Duration, error)\n}\n\nfunc (r *Stats) WithNode(node sqalx.Node) *Stats {\n\tr.node = node\n\treturn r\n}\n\nfunc (r *Stats) WithCalculator(calculator StatsCalculator) *Stats {\n\tr.calculator = calculator\n\treturn r\n}\n\nfunc (r *Stats) Get(c *yarf.Context) error {\n\ttx, err := r.Beginx()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Commit() \/\/ read-only tx\n\n\tstart := c.Request.URL.Query().Get(\"start\")\n\tstartTime := time.Now().AddDate(0, 0, -7)\n\tif start != \"\" {\n\t\tstartTime, err = time.Parse(time.RFC3339, start)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tend := c.Request.URL.Query().Get(\"end\")\n\tendTime := time.Now()\n\tif end != \"\" {\n\t\tendTime, err = time.Parse(time.RFC3339, end)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif !endTime.After(startTime) {\n\t\treturn &yarf.CustomError{\n\t\t\tHTTPCode: http.StatusBadRequest,\n\t\t\tErrorMsg: \"End time must be after start time\",\n\t\t\tErrorBody: \"End time must be after start time\",\n\t\t}\n\t}\n\n\tif c.Param(\"id\") != \"\" {\n\t\tnetwork, err := dataobjects.GetNetwork(tx, c.Param(\"id\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tstats, err := r.getStatsForNetwork(tx, network, startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tRenderData(c, stats)\n\t} else {\n\t\tstatsMap := make(map[string]apiStats)\n\t\tnetworks, err := dataobjects.GetNetworks(tx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, network := range networks {\n\t\t\tstats, err := r.getStatsForNetwork(tx, network, startTime, endTime)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tstatsMap[network.ID] = stats\n\t\t}\n\t\tRenderData(c, statsMap)\n\t}\n\treturn nil\n}\n\nfunc (r *Stats) getStatsForNetwork(node sqalx.Node, network *dataobjects.Network, startTime time.Time, endTime time.Time) (apiStats, error) {\n\ttx, err := r.Beginx()\n\tif err != nil {\n\t\treturn apiStats{}, err\n\t}\n\tdefer tx.Commit() \/\/ read-only tx\n\n\tlastDist, err := r.getLastDisturbanceTimeForNetwork(tx, network)\n\n\tstats := apiStats{\n\t\tLastDisturbance: lastDist,\n\t\tLineStats: make(map[string]apiLineStats),\n\t}\n\n\tlines, err := network.Lines(tx)\n\tif err != nil {\n\t\treturn apiStats{}, err\n\t}\n\n\tfor _, line := range lines {\n\t\tavailability, avgDuration, err := r.calculator.Availability(tx, line, startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn apiStats{}, err\n\t\t}\n\t\tstats.LineStats[line.ID] = apiLineStats{\n\t\t\tAvailability: availability,\n\t\t\tAverageDisturbanceDuration: dataobjects.Duration(avgDuration),\n\t\t}\n\t}\n\treturn stats, nil\n}\n\nfunc (r *Stats) getLastDisturbanceTimeForNetwork(node sqalx.Node, network *dataobjects.Network) (time.Time, error) {\n\ttx, err := r.Beginx()\n\tif err != nil {\n\t\treturn time.Now().UTC(), err\n\t}\n\tdefer tx.Commit() \/\/ read-only tx\n\n\td, err := network.LastDisturbance(tx)\n\tif err != nil {\n\t\treturn time.Now().UTC(), err\n\t}\n\n\tif !d.Ended {\n\t\treturn time.Now().UTC(), nil\n\t}\n\treturn d.EndTime, nil\n}\n<|endoftext|>"} {"text":"package wikidump\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n)\n\ntype loggingWriter struct {\n\tw io.WriteCloser\n\tdone, total int64\n\tthreshold float32\n}\n\nfunc (w *loggingWriter) Write(p []byte) (n int, err error) {\n\tn, err = w.w.Write(p)\n\tw.done += int64(n)\n\tif float32(w.done) > w.threshold*float32(w.total) {\n\t\tlog.Printf(\"%3d%% done (%d of %d)\",\n\t\t\tint(100*w.threshold), w.done, w.total)\n\t\tw.threshold += .05\n\t}\n\treturn\n}\n\nfunc (w loggingWriter) Close() error {\n\treturn w.w.Close()\n}\n\nfunc nullLogger(string, ...interface{}) {\n}\n\n\/\/ Download database dump for wikiname (e.g., \"en\", \"sco\", \"nds_nl\") from\n\/\/ WikiMedia.\n\/\/\n\/\/ Returns the local file path of the dump, derived from the URL.\n\/\/\n\/\/ Logs its progress on the standard log if logProgress is true.\nfunc Download(wikiname string, logProgress bool) (filepath string, err error) {\n\treturn download(wikiname, \".\", logProgress, http.DefaultClient)\n}\n\nfunc download(wikiname, directory string, logProgress bool,\n\tclient *http.Client) (filepath string, err error) {\n\n\tlogprint := nullLogger\n\tif logProgress {\n\t\tlogprint = log.Printf\n\t}\n\n\turlstr := fmt.Sprintf(\n\t\t\"https:\/\/dumps.wikimedia.org\/%s\/latest\/%s-latest-pages-articles.xml.bz2\",\n\t\twikiname, wikiname)\n\tresp, err := client.Get(urlstr)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\terr = fmt.Errorf(\"HTTP error %d for %s\", resp.StatusCode, urlstr)\n\t}\n\n\tu, err := url.Parse(urlstr)\n\tif err != nil {\n\t\treturn\n\t}\n\tfilepath = path.Base(u.Path)\n\n\tvar out io.WriteCloser\n\tout, err = os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer out.Close()\n\n\tlogprint(\"downloading from %s to %s\", urlstr, filepath)\n\tif logProgress && resp.ContentLength >= 0 {\n\t\tout = &loggingWriter{\n\t\t\tw: out,\n\t\t\ttotal: resp.ContentLength,\n\t\t}\n\t}\n\t_, err = io.Copy(out, resp.Body)\n\tlogprint(\"download of %s done\", urlstr)\n\treturn\n}\nprogress bar in downloaderpackage wikidump\n\nimport (\n\t\"fmt\"\n\t\"github.com\/cheggaaa\/pb\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n)\n\nfunc nullLogger(string, ...interface{}) {\n}\n\n\/\/ Writer with progressbar.\ntype pbWriter struct {\n\tw io.WriteCloser\n\tbar *pb.ProgressBar\n}\n\nfunc newPbWriter(w io.WriteCloser, total int64) *pbWriter {\n\tpbw := &pbWriter{w, pb.New64(total)}\n\tpbw.bar.Start()\n\treturn pbw\n}\n\nfunc (w *pbWriter) Close() error {\n\tw.bar.Finish()\n\treturn w.w.Close()\n}\n\nfunc (w *pbWriter) Write(p []byte) (n int, err error) {\n\tn, err = w.w.Write(p)\n\tw.bar.Add(n)\n\treturn\n}\n\n\/\/ Download database dump for wikiname (e.g., \"en\", \"sco\", \"nds_nl\") from\n\/\/ WikiMedia.\n\/\/\n\/\/ Returns the local file path of the dump, derived from the URL.\n\/\/\n\/\/ Logs its progress on the standard log if logProgress is true.\nfunc Download(wikiname string, logProgress bool) (filepath string, err error) {\n\treturn download(wikiname, \".\", logProgress, http.DefaultClient)\n}\n\nfunc download(wikiname, directory string, logProgress bool,\n\tclient *http.Client) (filepath string, err error) {\n\n\tlogprint := nullLogger\n\tif logProgress {\n\t\tlogprint = log.Printf\n\t}\n\n\turlstr := fmt.Sprintf(\n\t\t\"https:\/\/dumps.wikimedia.org\/%s\/latest\/%s-latest-pages-articles.xml.bz2\",\n\t\twikiname, wikiname)\n\tresp, err := client.Get(urlstr)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\terr = fmt.Errorf(\"HTTP error %d for %s\", resp.StatusCode, urlstr)\n\t}\n\n\tu, err := url.Parse(urlstr)\n\tif err != nil {\n\t\treturn\n\t}\n\tfilepath = path.Base(u.Path)\n\n\tvar out io.WriteCloser\n\tout, err = os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer out.Close()\n\n\tlogprint(\"downloading from %s to %s\", urlstr, filepath)\n\tif logProgress && resp.ContentLength >= 0 {\n\t\tout = newPbWriter(out, resp.ContentLength)\n\t}\n\t_, err = io.Copy(out, resp.Body)\n\tlogprint(\"download of %s done\", urlstr)\n\treturn\n}\n<|endoftext|>"} {"text":"\/*\n Nging is a toolbox for webmasters\n Copyright (C) 2018-present Wenhui Shen \n\n This program is free software: you can redistribute it and\/or modify\n it under the terms of the GNU Affero General Public License as published\n by the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Affero General Public License for more details.\n\n You should have received a copy of the GNU Affero General Public License\n along with this program. If not, see .\n*\/\n\npackage model\n\nimport (\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/webx-top\/com\"\n\t\"github.com\/webx-top\/db\"\n\t\"github.com\/webx-top\/echo\"\n\n\t\"github.com\/admpub\/nging\/v3\/application\/dbschema\"\n\t\"github.com\/admpub\/nging\/v3\/application\/library\/common\"\n\t\"github.com\/admpub\/nging\/v3\/application\/library\/ip2region\"\n\t\"github.com\/admpub\/nging\/v3\/application\/model\/base\"\n)\n\nfunc NewLoginLog(ctx echo.Context) *LoginLog {\n\tm := &LoginLog{\n\t\tNgingLoginLog: &dbschema.NgingLoginLog{},\n\t\tbase: base.New(ctx),\n\t}\n\tm.SetContext(ctx)\n\treturn m\n}\n\ntype LoginLog struct {\n\t*dbschema.NgingLoginLog\n\tbase *base.Base\n}\n\nfunc (s *LoginLog) check() error {\n\tif !echo.Bool(`backend.Anonymous`) {\n\t\ts.IpAddress = s.base.RealIP()\n\t\tipInfo, err := ip2region.IPInfo(s.IpAddress)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.IpLocation = ip2region.Stringify(ipInfo)\n\t\ts.UserAgent = s.base.Request().UserAgent()\n\t}\n\ts.Success = common.GetBoolFlag(s.Success)\n\ts.Errpwd = com.MaskString(s.Errpwd)\n\tday, _ := strconv.Atoi(time.Now().Format(`20060102`))\n\ts.Day = uint(day)\n\treturn nil\n}\n\nfunc (s *LoginLog) Add() (pk interface{}, err error) {\n\tif err = s.check(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn s.NgingLoginLog.Add()\n}\n\nfunc (s *LoginLog) Edit(mw func(db.Result) db.Result, args ...interface{}) (err error) {\n\tif err = s.check(); err != nil {\n\t\treturn err\n\t}\n\treturn s.NgingLoginLog.Edit(mw, args...)\n}\n\nfunc (s *LoginLog) ListPage(cond *db.Compounds, sorts ...interface{}) ([]*dbschema.NgingLoginLog, error) {\n\t_, err := common.NewLister(s, nil, func(r db.Result) db.Result {\n\t\treturn r.OrderBy(sorts...)\n\t}, cond.And()).Paging(s.base.Context)\n\treturn s.Objects(), err\n}\nupdate\/*\n Nging is a toolbox for webmasters\n Copyright (C) 2018-present Wenhui Shen \n\n This program is free software: you can redistribute it and\/or modify\n it under the terms of the GNU Affero General Public License as published\n by the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Affero General Public License for more details.\n\n You should have received a copy of the GNU Affero General Public License\n along with this program. If not, see .\n*\/\n\npackage model\n\nimport (\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/webx-top\/com\"\n\t\"github.com\/webx-top\/db\"\n\t\"github.com\/webx-top\/echo\"\n\n\t\"github.com\/admpub\/nging\/v3\/application\/dbschema\"\n\t\"github.com\/admpub\/nging\/v3\/application\/library\/common\"\n\t\"github.com\/admpub\/nging\/v3\/application\/library\/ip2region\"\n\t\"github.com\/admpub\/nging\/v3\/application\/model\/base\"\n)\n\nfunc NewLoginLog(ctx echo.Context) *LoginLog {\n\tm := &LoginLog{\n\t\tNgingLoginLog: &dbschema.NgingLoginLog{},\n\t\tbase: base.New(ctx),\n\t}\n\tm.SetContext(ctx)\n\treturn m\n}\n\ntype LoginLog struct {\n\t*dbschema.NgingLoginLog\n\tbase *base.Base\n}\n\nfunc (s *LoginLog) check() error {\n\tk := `backend.Anonymous`\n\tif s.OwnerType != `user` {\n\t\tk = `frontend.Anonymous`\n\t}\n\tif !echo.Bool(k) {\n\t\ts.IpAddress = s.base.RealIP()\n\t\tipInfo, err := ip2region.IPInfo(s.IpAddress)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.IpLocation = ip2region.Stringify(ipInfo)\n\t\ts.UserAgent = s.base.Request().UserAgent()\n\t}\n\ts.Success = common.GetBoolFlag(s.Success)\n\ts.Errpwd = com.MaskString(s.Errpwd)\n\tday, _ := strconv.Atoi(time.Now().Format(`20060102`))\n\ts.Day = uint(day)\n\treturn nil\n}\n\nfunc (s *LoginLog) Add() (pk interface{}, err error) {\n\tif err = s.check(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn s.NgingLoginLog.Add()\n}\n\nfunc (s *LoginLog) Edit(mw func(db.Result) db.Result, args ...interface{}) (err error) {\n\tif err = s.check(); err != nil {\n\t\treturn err\n\t}\n\treturn s.NgingLoginLog.Edit(mw, args...)\n}\n\nfunc (s *LoginLog) ListPage(cond *db.Compounds, sorts ...interface{}) ([]*dbschema.NgingLoginLog, error) {\n\t_, err := common.NewLister(s, nil, func(r db.Result) db.Result {\n\t\treturn r.OrderBy(sorts...)\n\t}, cond.And()).Paging(s.base.Context)\n\treturn s.Objects(), err\n}\n<|endoftext|>"} {"text":"\/\/ package roundrobin implements dynamic weighted round robin load balancer http handler\npackage roundrobin\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sync\"\n\n\t\"github.com\/vulcand\/oxy\/utils\"\n)\n\n\/\/ Weight is an optional functional argument that sets weight of the server\nfunc Weight(w int) ServerOption {\n\treturn func(s *server) error {\n\t\tif w < 0 {\n\t\t\treturn fmt.Errorf(\"Weight should be >= 0\")\n\t\t}\n\t\ts.weight = w\n\t\treturn nil\n\t}\n}\n\n\/\/ ErrorHandler is a functional argument that sets error handler of the server\nfunc ErrorHandler(h utils.ErrorHandler) LBOption {\n\treturn func(s *RoundRobin) error {\n\t\ts.errHandler = h\n\t\treturn nil\n\t}\n}\n\ntype RoundRobin struct {\n\tmutex *sync.Mutex\n\tnext http.Handler\n\terrHandler utils.ErrorHandler\n\t\/\/ Current index (starts from -1)\n\tindex int\n\tservers []*server\n\tcurrentWeight int\n}\n\nfunc New(next http.Handler, opts ...LBOption) (*RoundRobin, error) {\n\trr := &RoundRobin{\n\t\tnext: next,\n\t\tindex: -1,\n\t\tmutex: &sync.Mutex{},\n\t\tservers: []*server{},\n\t}\n\tfor _, o := range opts {\n\t\tif err := o(rr); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif rr.errHandler == nil {\n\t\trr.errHandler = utils.DefaultHandler\n\t}\n\treturn rr, nil\n}\n\nfunc (r *RoundRobin) Next() http.Handler {\n\treturn r.next\n}\n\nfunc (r *RoundRobin) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\turl, err := r.NextServer()\n\tif err != nil {\n\t\tr.errHandler.ServeHTTP(w, req, err)\n\t\treturn\n\t}\n\t\/\/ make shallow copy of request before chaning anything to avoid side effects\n\tnewReq := *req\n\tnewReq.URL = url\n\tr.next.ServeHTTP(w, &newReq)\n}\n\nfunc (r *RoundRobin) NextServer() (*url.URL, error) {\n\tsrv, err := r.nextServer()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn utils.CopyURL(srv.url), nil\n}\n\nfunc (r *RoundRobin) nextServer() (*server, error) {\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\n\tif len(r.servers) == 0 {\n\t\treturn nil, fmt.Errorf(\"no servers in the pool\")\n\t}\n\n\t\/\/ The algo below may look messy, but is actually very simple\n\t\/\/ it calculates the GCD and subtracts it on every iteration, what interleaves servers\n\t\/\/ and allows us not to build an iterator every time we readjust weights\n\n\t\/\/ GCD across all enabled servers\n\tgcd := r.weightGcd()\n\t\/\/ Maximum weight across all enabled servers\n\tmax := r.maxWeight()\n\n\tfor {\n\t\tr.index = (r.index + 1) % len(r.servers)\n\t\tif r.index == 0 {\n\t\t\tr.currentWeight = r.currentWeight - gcd\n\t\t\tif r.currentWeight <= 0 {\n\t\t\t\tr.currentWeight = max\n\t\t\t\tif r.currentWeight == 0 {\n\t\t\t\t\treturn nil, fmt.Errorf(\"all servers have 0 weight\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tsrv := r.servers[r.index]\n\t\tif srv.weight >= r.currentWeight {\n\t\t\treturn srv, nil\n\t\t}\n\t}\n\t\/\/ We did full circle and found no available servers\n\treturn nil, fmt.Errorf(\"no available servers\")\n}\n\nfunc (r *RoundRobin) RemoveServer(u *url.URL) error {\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\n\te, index := r.findServerByURL(u)\n\tif e == nil {\n\t\treturn fmt.Errorf(\"server not found\")\n\t}\n\tr.servers = append(r.servers[:index], r.servers[index+1:]...)\n\tr.resetState()\n\treturn nil\n}\n\nfunc (rr *RoundRobin) Servers() []*url.URL {\n\trr.mutex.Lock()\n\tdefer rr.mutex.Unlock()\n\n\tout := make([]*url.URL, len(rr.servers))\n\tfor i, srv := range rr.servers {\n\t\tout[i] = srv.url\n\t}\n\treturn out\n}\n\nfunc (rr *RoundRobin) ServerWeight(u *url.URL) (int, bool) {\n\trr.mutex.Lock()\n\tdefer rr.mutex.Unlock()\n\n\tif s, _ := rr.findServerByURL(u); s != nil {\n\t\treturn s.weight, true\n\t}\n\treturn -1, false\n}\n\n\/\/ In case if server is already present in the load balancer, returns error\nfunc (rr *RoundRobin) UpsertServer(u *url.URL, options ...ServerOption) error {\n\trr.mutex.Lock()\n\tdefer rr.mutex.Unlock()\n\n\tif u == nil {\n\t\treturn fmt.Errorf(\"server URL can't be nil\")\n\t}\n\n\tif s, _ := rr.findServerByURL(u); s != nil {\n\t\tfor _, o := range options {\n\t\t\tif err := o(s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\trr.resetState()\n\t\treturn nil\n\t}\n\n\tsrv := &server{url: utils.CopyURL(u)}\n\tfor _, o := range options {\n\t\tif err := o(srv); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif srv.weight == 0 {\n\t\tsrv.weight = defaultWeight\n\t}\n\n\trr.servers = append(rr.servers, srv)\n\trr.resetState()\n\treturn nil\n}\n\nfunc (r *RoundRobin) resetIterator() {\n\tr.index = -1\n\tr.currentWeight = 0\n}\n\nfunc (r *RoundRobin) resetState() {\n\tr.resetIterator()\n}\n\nfunc (r *RoundRobin) findServerByURL(u *url.URL) (*server, int) {\n\tif len(r.servers) == 0 {\n\t\treturn nil, -1\n\t}\n\tfor i, s := range r.servers {\n\t\tif sameURL(u, s.url) {\n\t\t\treturn s, i\n\t\t}\n\t}\n\treturn nil, -1\n}\n\nfunc (rr *RoundRobin) maxWeight() int {\n\tmax := -1\n\tfor _, s := range rr.servers {\n\t\tif s.weight > max {\n\t\t\tmax = s.weight\n\t\t}\n\t}\n\treturn max\n}\n\nfunc (rr *RoundRobin) weightGcd() int {\n\tdivisor := -1\n\tfor _, s := range rr.servers {\n\t\tif divisor == -1 {\n\t\t\tdivisor = s.weight\n\t\t} else {\n\t\t\tdivisor = gcd(divisor, s.weight)\n\t\t}\n\t}\n\treturn divisor\n}\n\nfunc gcd(a, b int) int {\n\tfor b != 0 {\n\t\ta, b = b, a%b\n\t}\n\treturn a\n}\n\n\/\/ ServerOption provides various options for server, e.g. weight\ntype ServerOption func(*server) error\n\n\/\/ LBOption provides options for load balancer\ntype LBOption func(*RoundRobin) error\n\n\/\/ Set additional parameters for the server can be supplied when adding server\ntype server struct {\n\turl *url.URL\n\t\/\/ Relative weight for the enpoint to other enpoints in the load balancer\n\tweight int\n}\n\nconst defaultWeight = 1\n\nfunc sameURL(a, b *url.URL) bool {\n\treturn a.Path == b.Path && a.Host == b.Host && a.Scheme == b.Scheme\n}\n\ntype balancerHandler interface {\n\tServers() []*url.URL\n\tServeHTTP(w http.ResponseWriter, req *http.Request)\n\tServerWeight(u *url.URL) (int, bool)\n\tRemoveServer(u *url.URL) error\n\tUpsertServer(u *url.URL, options ...ServerOption) error\n\tNextServer() (*url.URL, error)\n\tNext() http.Handler\n}\nqnd cookie hacking\/\/ package roundrobin implements dynamic weighted round robin load balancer http handler\npackage roundrobin\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sync\"\n\n\t\"github.com\/vulcand\/oxy\/utils\"\n)\n\n\/\/ Weight is an optional functional argument that sets weight of the server\nfunc Weight(w int) ServerOption {\n\treturn func(s *server) error {\n\t\tif w < 0 {\n\t\t\treturn fmt.Errorf(\"Weight should be >= 0\")\n\t\t}\n\t\ts.weight = w\n\t\treturn nil\n\t}\n}\n\n\/\/ ErrorHandler is a functional argument that sets error handler of the server\nfunc ErrorHandler(h utils.ErrorHandler) LBOption {\n\treturn func(s *RoundRobin) error {\n\t\ts.errHandler = h\n\t\treturn nil\n\t}\n}\n\ntype RoundRobin struct {\n\tmutex *sync.Mutex\n\tnext http.Handler\n\terrHandler utils.ErrorHandler\n\t\/\/ Current index (starts from -1)\n\tindex int\n\tservers []*server\n\tcurrentWeight int\n}\n\nfunc New(next http.Handler, opts ...LBOption) (*RoundRobin, error) {\n\trr := &RoundRobin{\n\t\tnext: next,\n\t\tindex: -1,\n\t\tmutex: &sync.Mutex{},\n\t\tservers: []*server{},\n\t}\n\tfor _, o := range opts {\n\t\tif err := o(rr); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif rr.errHandler == nil {\n\t\trr.errHandler = utils.DefaultHandler\n\t}\n\treturn rr, nil\n}\n\nfunc (r *RoundRobin) Next() http.Handler {\n\treturn r.next\n}\n\nfunc (r *RoundRobin) getCookieVal(req *http.Request) (*url.URL, bool, error) {\n\t\/\/ Before we move on to a new server, peek at our req cookie to see if we are bound.\n\t\/\/ If so, serve to them.\n\tcookie, err := req.Cookie(\"__STICKY_SVR\")\n\tswitch err {\n\tcase nil:\n\tcase http.ErrNoCookie:\n\t\treturn nil, false, nil\n\tdefault:\n\t\treturn nil, false, err\n\n\t}\n\n\ts_url, err := url.Parse(cookie.Value)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\ts, i := r.findServerByURL(s_url)\n\tif i != -1 {\n\t\treturn s.url, true, nil\n\t} else {\n\t\treturn nil, false, nil\n\t}\n\n}\nfunc (r *RoundRobin) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\t\/\/ make shallow copy of request before chaning anything to avoid side effects\n\tnewReq := *req\n\tcookie_url, present, err := r.getCookieVal(&newReq)\n\n\tif err != nil {\n\t\tr.errHandler.ServeHTTP(w, req, err)\n\t\treturn\n\t}\n\tif present {\n\t\tnewReq.URL = cookie_url\n\t} else {\n\t\turl, err := r.NextServer()\n\t\tif err != nil {\n\t\t\tr.errHandler.ServeHTTP(w, req, err)\n\t\t\treturn\n\t\t}\n\t\tnewReq.URL = url\n\t}\n\tr.next.ServeHTTP(w, &newReq)\n}\n\nfunc (r *RoundRobin) NextServer() (*url.URL, error) {\n\tsrv, err := r.nextServer()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn utils.CopyURL(srv.url), nil\n}\n\nfunc (r *RoundRobin) nextServer() (*server, error) {\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\n\tif len(r.servers) == 0 {\n\t\treturn nil, fmt.Errorf(\"no servers in the pool\")\n\t}\n\n\t\/\/ The algo below may look messy, but is actually very simple\n\t\/\/ it calculates the GCD and subtracts it on every iteration, what interleaves servers\n\t\/\/ and allows us not to build an iterator every time we readjust weights\n\n\t\/\/ GCD across all enabled servers\n\tgcd := r.weightGcd()\n\t\/\/ Maximum weight across all enabled servers\n\tmax := r.maxWeight()\n\n\tfor {\n\t\tr.index = (r.index + 1) % len(r.servers)\n\t\tif r.index == 0 {\n\t\t\tr.currentWeight = r.currentWeight - gcd\n\t\t\tif r.currentWeight <= 0 {\n\t\t\t\tr.currentWeight = max\n\t\t\t\tif r.currentWeight == 0 {\n\t\t\t\t\treturn nil, fmt.Errorf(\"all servers have 0 weight\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tsrv := r.servers[r.index]\n\t\tif srv.weight >= r.currentWeight {\n\t\t\treturn srv, nil\n\t\t}\n\t}\n\t\/\/ We did full circle and found no available servers\n\treturn nil, fmt.Errorf(\"no available servers\")\n}\n\nfunc (r *RoundRobin) RemoveServer(u *url.URL) error {\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\n\te, index := r.findServerByURL(u)\n\tif e == nil {\n\t\treturn fmt.Errorf(\"server not found\")\n\t}\n\tr.servers = append(r.servers[:index], r.servers[index+1:]...)\n\tr.resetState()\n\treturn nil\n}\n\nfunc (rr *RoundRobin) Servers() []*url.URL {\n\trr.mutex.Lock()\n\tdefer rr.mutex.Unlock()\n\n\tout := make([]*url.URL, len(rr.servers))\n\tfor i, srv := range rr.servers {\n\t\tout[i] = srv.url\n\t}\n\treturn out\n}\n\nfunc (rr *RoundRobin) ServerWeight(u *url.URL) (int, bool) {\n\trr.mutex.Lock()\n\tdefer rr.mutex.Unlock()\n\n\tif s, _ := rr.findServerByURL(u); s != nil {\n\t\treturn s.weight, true\n\t}\n\treturn -1, false\n}\n\n\/\/ In case if server is already present in the load balancer, returns error\nfunc (rr *RoundRobin) UpsertServer(u *url.URL, options ...ServerOption) error {\n\trr.mutex.Lock()\n\tdefer rr.mutex.Unlock()\n\n\tif u == nil {\n\t\treturn fmt.Errorf(\"server URL can't be nil\")\n\t}\n\n\tif s, _ := rr.findServerByURL(u); s != nil {\n\t\tfor _, o := range options {\n\t\t\tif err := o(s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\trr.resetState()\n\t\treturn nil\n\t}\n\n\tsrv := &server{url: utils.CopyURL(u)}\n\tfor _, o := range options {\n\t\tif err := o(srv); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif srv.weight == 0 {\n\t\tsrv.weight = defaultWeight\n\t}\n\n\trr.servers = append(rr.servers, srv)\n\trr.resetState()\n\treturn nil\n}\n\nfunc (r *RoundRobin) resetIterator() {\n\tr.index = -1\n\tr.currentWeight = 0\n}\n\nfunc (r *RoundRobin) resetState() {\n\tr.resetIterator()\n}\n\nfunc (r *RoundRobin) findServerByURL(u *url.URL) (*server, int) {\n\tif len(r.servers) == 0 {\n\t\treturn nil, -1\n\t}\n\tfor i, s := range r.servers {\n\t\tif sameURL(u, s.url) {\n\t\t\treturn s, i\n\t\t}\n\t}\n\treturn nil, -1\n}\n\nfunc (rr *RoundRobin) maxWeight() int {\n\tmax := -1\n\tfor _, s := range rr.servers {\n\t\tif s.weight > max {\n\t\t\tmax = s.weight\n\t\t}\n\t}\n\treturn max\n}\n\nfunc (rr *RoundRobin) weightGcd() int {\n\tdivisor := -1\n\tfor _, s := range rr.servers {\n\t\tif divisor == -1 {\n\t\t\tdivisor = s.weight\n\t\t} else {\n\t\t\tdivisor = gcd(divisor, s.weight)\n\t\t}\n\t}\n\treturn divisor\n}\n\nfunc gcd(a, b int) int {\n\tfor b != 0 {\n\t\ta, b = b, a%b\n\t}\n\treturn a\n}\n\n\/\/ ServerOption provides various options for server, e.g. weight\ntype ServerOption func(*server) error\n\n\/\/ LBOption provides options for load balancer\ntype LBOption func(*RoundRobin) error\n\n\/\/ Set additional parameters for the server can be supplied when adding server\ntype server struct {\n\turl *url.URL\n\t\/\/ Relative weight for the enpoint to other enpoints in the load balancer\n\tweight int\n}\n\nconst defaultWeight = 1\n\nfunc sameURL(a, b *url.URL) bool {\n\treturn a.Path == b.Path && a.Host == b.Host && a.Scheme == b.Scheme\n}\n\ntype balancerHandler interface {\n\tServers() []*url.URL\n\tServeHTTP(w http.ResponseWriter, req *http.Request)\n\tServerWeight(u *url.URL) (int, bool)\n\tRemoveServer(u *url.URL) error\n\tUpsertServer(u *url.URL, options ...ServerOption) error\n\tNextServer() (*url.URL, error)\n\tNext() http.Handler\n}\n<|endoftext|>"} {"text":"package router\n\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst GossipInterval = 30 * time.Second\n\ntype GossipData interface {\n\tEncode() []byte\n\tMerge(GossipData)\n}\n\ntype Gossip interface {\n\t\/\/ specific message from one peer to another\n\t\/\/ intermediate peers relay it using unicast topology.\n\tGossipUnicast(dstPeerName PeerName, msg []byte) error\n\t\/\/ send gossip to every peer, relayed using broadcast topology.\n\tGossipBroadcast(update GossipData) error\n}\n\ntype Gossiper interface {\n\tOnGossipUnicast(sender PeerName, msg []byte) error\n\t\/\/ merge received data into state and return a representation of\n\t\/\/ the received data, for further propagation\n\tOnGossipBroadcast(update []byte) (GossipData, error)\n\t\/\/ return state of everything we know; gets called periodically\n\tGossip() GossipData\n\t\/\/ merge received data into state and return \"everything new I've\n\t\/\/ just learnt\", or nil if nothing in the received data was new\n\tOnGossip(update []byte) (GossipData, error)\n}\n\n\/\/ Accumulates GossipData that needs to be sent to one destination,\n\/\/ and sends it when possible.\ntype GossipSender struct {\n\tsend func(GossipData)\n\tcell chan GossipData\n\tflushch chan chan struct{}\n}\n\nfunc NewGossipSender(send func(GossipData)) *GossipSender {\n\treturn &GossipSender{send: send}\n}\n\nfunc (sender *GossipSender) Start() {\n\tsender.cell = make(chan GossipData, 1)\n\tsender.flushch = make(chan chan struct{})\n\tgo sender.run()\n}\n\nfunc (sender *GossipSender) run() {\n\tfor {\n\t\tselect {\n\t\tcase pending := <-sender.cell:\n\t\t\tif pending == nil { \/\/ receive zero value when chan is closed\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsender.send(pending)\n\t\tcase ch := <-sender.flushch:\n\t\t\t\/\/ ensure anything pending is sent, then close the channel we were given\n\t\t\tselect {\n\t\t\tcase pending := <-sender.cell:\n\t\t\t\tif pending != nil {\n\t\t\t\t\tsender.send(pending)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t}\n\t\t\tclose(ch)\n\t\t}\n\t}\n}\n\nfunc (sender *GossipSender) Send(data GossipData) {\n\t\/\/ NB: this must not be invoked concurrently\n\tselect {\n\tcase pending := <-sender.cell:\n\t\tpending.Merge(data)\n\t\tsender.cell <- pending\n\tdefault:\n\t\tsender.cell <- data\n\t}\n}\n\nfunc (sender *GossipSender) Stop() {\n\tclose(sender.cell)\n}\n\ntype connectionSenders map[Connection]*GossipSender\ntype peerSenders map[PeerName]*GossipSender\n\ntype GossipChannel struct {\n\tsync.Mutex\n\tourself *LocalPeer\n\troutes *Routes\n\tname string\n\tgossiper Gossiper\n\tsenders connectionSenders\n\tbroadcasters peerSenders\n}\n\ntype GossipChannels map[string]*GossipChannel\n\nfunc (router *Router) NewGossip(channelName string, g Gossiper) Gossip {\n\tchannel := &GossipChannel{\n\t\tourself: router.Ourself,\n\t\troutes: router.Routes,\n\t\tname: channelName,\n\t\tgossiper: g,\n\t\tsenders: make(connectionSenders),\n\t\tbroadcasters: make(peerSenders)}\n\trouter.GossipChannels[channelName] = channel\n\treturn channel\n}\n\nfunc (router *Router) SendAllGossip() {\n\tfor _, channel := range router.GossipChannels {\n\t\tif gossip := channel.gossiper.Gossip(); gossip != nil {\n\t\t\tchannel.Send(router.Ourself.Name, gossip)\n\t\t}\n\t}\n}\n\nfunc (router *Router) SendAllGossipDown(conn Connection) {\n\tfor _, channel := range router.GossipChannels {\n\t\tif gossip := channel.gossiper.Gossip(); gossip != nil {\n\t\t\tchannel.SendDown(conn, channel.gossiper.Gossip())\n\t\t}\n\t}\n}\n\nfunc (router *Router) handleGossip(tag ProtocolTag, payload []byte) error {\n\tdecoder := gob.NewDecoder(bytes.NewReader(payload))\n\tvar channelName string\n\tif err := decoder.Decode(&channelName); err != nil {\n\t\treturn err\n\t}\n\tchannel, found := router.GossipChannels[channelName]\n\tif !found {\n\t\treturn fmt.Errorf(\"[gossip] received unknown channel with name %s\", channelName)\n\t}\n\tvar srcName PeerName\n\tif err := decoder.Decode(&srcName); err != nil {\n\t\treturn err\n\t}\n\tswitch tag {\n\tcase ProtocolGossipUnicast:\n\t\treturn channel.deliverUnicast(srcName, payload, decoder)\n\tcase ProtocolGossipBroadcast:\n\t\treturn channel.deliverBroadcast(srcName, payload, decoder)\n\tcase ProtocolGossip:\n\t\treturn channel.deliver(srcName, payload, decoder)\n\t}\n\treturn nil\n}\n\nfunc (c *GossipChannel) deliverUnicast(srcName PeerName, origPayload []byte, dec *gob.Decoder) error {\n\tvar destName PeerName\n\tif err := dec.Decode(&destName); err != nil {\n\t\treturn err\n\t}\n\tif c.ourself.Name != destName {\n\t\treturn c.relayUnicast(destName, origPayload)\n\t}\n\tvar payload []byte\n\tif err := dec.Decode(&payload); err != nil {\n\t\treturn err\n\t}\n\treturn c.gossiper.OnGossipUnicast(srcName, payload)\n}\n\nfunc (c *GossipChannel) deliverBroadcast(srcName PeerName, _ []byte, dec *gob.Decoder) error {\n\tvar payload []byte\n\tif err := dec.Decode(&payload); err != nil {\n\t\treturn err\n\t}\n\tdata, err := c.gossiper.OnGossipBroadcast(payload)\n\tif err != nil || data == nil {\n\t\treturn err\n\t}\n\treturn c.relayBroadcast(srcName, data)\n}\n\nfunc (c *GossipChannel) deliver(srcName PeerName, _ []byte, dec *gob.Decoder) error {\n\tvar payload []byte\n\tif err := dec.Decode(&payload); err != nil {\n\t\treturn err\n\t}\n\tif data, err := c.gossiper.OnGossip(payload); err != nil {\n\t\treturn err\n\t} else if data != nil {\n\t\tc.Send(srcName, data)\n\t}\n\treturn nil\n}\n\nfunc (c *GossipChannel) Send(srcName PeerName, data GossipData) {\n\t\/\/ do this outside the lock below so we avoid lock nesting\n\tc.routes.EnsureRecalculated()\n\tselectedConnections := make(ConnectionSet)\n\tfor name := range c.routes.RandomNeighbours(srcName) {\n\t\tif conn, found := c.ourself.ConnectionTo(name); found {\n\t\t\tselectedConnections[conn] = void\n\t\t}\n\t}\n\tif len(selectedConnections) == 0 {\n\t\treturn\n\t}\n\tconnections := c.ourself.Connections()\n\tc.Lock()\n\tdefer c.Unlock()\n\t\/\/ GC - randomly (courtesy of go's map iterator) pick some\n\t\/\/ existing entries and stop&remove them if the associated\n\t\/\/ connection is no longer active. We stop as soon as we\n\t\/\/ encounter a valid entry; the idea being that when there is\n\t\/\/ little or no garbage then this executes close to O(1)[1],\n\t\/\/ whereas when there is lots of garbage we remove it quickly.\n\t\/\/\n\t\/\/ [1] TODO Unfortunately, due to the desire to avoid nested\n\t\/\/ locks, instead of simply invoking Peer.ConnectionTo(name)\n\t\/\/ below, we have that Peer.Connections() invocation above. That\n\t\/\/ is O(n_our_connections) at best.\n\tfor conn, sender := range c.senders {\n\t\tif _, found := connections[conn]; !found {\n\t\t\tdelete(c.senders, conn)\n\t\t\tsender.Stop()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tfor conn := range selectedConnections {\n\t\tc.sendDown(conn, data)\n\t}\n}\n\nfunc (c *GossipChannel) SendDown(conn Connection, data GossipData) {\n\tc.Lock()\n\tc.sendDown(conn, data)\n\tc.Unlock()\n}\n\nfunc (c *GossipChannel) sendDown(conn Connection, data GossipData) {\n\tsender, found := c.senders[conn]\n\tif !found {\n\t\tsender = NewGossipSender(func(pending GossipData) {\n\t\t\tprotocolMsg := ProtocolMsg{ProtocolGossip, GobEncode(c.name, c.ourself.Name, pending.Encode())}\n\t\t\tconn.(ProtocolSender).SendProtocolMsg(protocolMsg)\n\t\t})\n\t\tc.senders[conn] = sender\n\t\tsender.Start()\n\t}\n\tsender.Send(data)\n}\n\nfunc (c *GossipChannel) GossipUnicast(dstPeerName PeerName, msg []byte) error {\n\treturn c.relayUnicast(dstPeerName, GobEncode(c.name, c.ourself.Name, dstPeerName, msg))\n}\n\nfunc (c *GossipChannel) GossipBroadcast(update GossipData) error {\n\treturn c.relayBroadcast(c.ourself.Name, update)\n}\n\nfunc (c *GossipChannel) relayUnicast(dstPeerName PeerName, buf []byte) error {\n\tif relayPeerName, found := c.routes.UnicastAll(dstPeerName); !found {\n\t\tc.log(\"unknown relay destination:\", dstPeerName)\n\t} else if conn, found := c.ourself.ConnectionTo(relayPeerName); !found {\n\t\tc.log(\"unable to find connection to relay peer\", relayPeerName)\n\t} else {\n\t\tconn.(ProtocolSender).SendProtocolMsg(ProtocolMsg{ProtocolGossipUnicast, buf})\n\t}\n\treturn nil\n}\n\nfunc (c *GossipChannel) relayBroadcast(srcName PeerName, update GossipData) error {\n\tnames := c.routes.PeerNames() \/\/ do this outside the lock so they don't nest\n\tc.Lock()\n\tdefer c.Unlock()\n\t\/\/ GC - randomly (courtesy of go's map iterator) pick some\n\t\/\/ existing broadcasters and stop&remove them if their source peer\n\t\/\/ is unknown. We stop as soon as we encounter a valid entry; the\n\t\/\/ idea being that when there is little or no garbage then this\n\t\/\/ executes close to O(1)[1], whereas when there is lots of\n\t\/\/ garbage we remove it quickly.\n\t\/\/\n\t\/\/ [1] TODO Unfortunately, due to the desire to avoid nested\n\t\/\/ locks, instead of simply invoking Peers.Fetch(name) below, we\n\t\/\/ have that Peers.Names() invocation above. That is O(n_peers) at\n\t\/\/ best.\n\tfor name, broadcaster := range c.broadcasters {\n\t\tif _, found := names[name]; !found {\n\t\t\tdelete(c.broadcasters, name)\n\t\t\tbroadcaster.Stop()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tbroadcaster, found := c.broadcasters[srcName]\n\tif !found {\n\t\tbroadcaster = NewGossipSender(func(pending GossipData) { c.sendBroadcast(srcName, pending) })\n\t\tc.broadcasters[srcName] = broadcaster\n\t\tbroadcaster.Start()\n\t}\n\tbroadcaster.Send(update)\n\treturn nil\n}\n\nfunc (c *GossipChannel) sendBroadcast(srcName PeerName, update GossipData) {\n\tc.routes.EnsureRecalculated()\n\tnextHops := c.routes.BroadcastAll(srcName)\n\tif len(nextHops) == 0 {\n\t\treturn\n\t}\n\tprotocolMsg := ProtocolMsg{ProtocolGossipBroadcast, GobEncode(c.name, srcName, update.Encode())}\n\t\/\/ FIXME a single blocked connection can stall us\n\tfor _, conn := range c.ourself.ConnectionsTo(nextHops) {\n\t\tconn.(ProtocolSender).SendProtocolMsg(protocolMsg)\n\t}\n}\n\nfunc (c *GossipChannel) log(args ...interface{}) {\n\tlog.Println(append(append([]interface{}{}, \"[gossip \"+c.name+\"]:\"), args...)...)\n}\n\n\/\/ for testing\n\nfunc (router *Router) sendPendingGossip() {\n\tfor _, channel := range router.GossipChannels {\n\t\tchannel.Lock()\n\t\tfor _, sender := range channel.senders {\n\t\t\tch := make(chan struct{})\n\t\t\tsender.flushch <- ch\n\t\t\t<-ch\n\t\t}\n\t\tfor _, sender := range channel.broadcasters {\n\t\t\tch := make(chan struct{})\n\t\t\tsender.flushch <- ch\n\t\t\t<-ch\n\t\t}\n\t\tchannel.Unlock()\n\t}\n}\nFactor out GossipSender.flush()package router\n\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst GossipInterval = 30 * time.Second\n\ntype GossipData interface {\n\tEncode() []byte\n\tMerge(GossipData)\n}\n\ntype Gossip interface {\n\t\/\/ specific message from one peer to another\n\t\/\/ intermediate peers relay it using unicast topology.\n\tGossipUnicast(dstPeerName PeerName, msg []byte) error\n\t\/\/ send gossip to every peer, relayed using broadcast topology.\n\tGossipBroadcast(update GossipData) error\n}\n\ntype Gossiper interface {\n\tOnGossipUnicast(sender PeerName, msg []byte) error\n\t\/\/ merge received data into state and return a representation of\n\t\/\/ the received data, for further propagation\n\tOnGossipBroadcast(update []byte) (GossipData, error)\n\t\/\/ return state of everything we know; gets called periodically\n\tGossip() GossipData\n\t\/\/ merge received data into state and return \"everything new I've\n\t\/\/ just learnt\", or nil if nothing in the received data was new\n\tOnGossip(update []byte) (GossipData, error)\n}\n\n\/\/ Accumulates GossipData that needs to be sent to one destination,\n\/\/ and sends it when possible.\ntype GossipSender struct {\n\tsend func(GossipData)\n\tcell chan GossipData\n\tflushch chan chan struct{}\n}\n\nfunc NewGossipSender(send func(GossipData)) *GossipSender {\n\treturn &GossipSender{send: send}\n}\n\nfunc (sender *GossipSender) Start() {\n\tsender.cell = make(chan GossipData, 1)\n\tsender.flushch = make(chan chan struct{})\n\tgo sender.run()\n}\n\nfunc (sender *GossipSender) run() {\n\tfor {\n\t\tselect {\n\t\tcase pending := <-sender.cell:\n\t\t\tif pending == nil { \/\/ receive zero value when chan is closed\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsender.send(pending)\n\t\tcase ch := <-sender.flushch:\n\t\t\t\/\/ ensure anything pending is sent, then close the channel we were given\n\t\t\tselect {\n\t\t\tcase pending := <-sender.cell:\n\t\t\t\tif pending != nil {\n\t\t\t\t\tsender.send(pending)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t}\n\t\t\tclose(ch)\n\t\t}\n\t}\n}\n\nfunc (sender *GossipSender) Send(data GossipData) {\n\t\/\/ NB: this must not be invoked concurrently\n\tselect {\n\tcase pending := <-sender.cell:\n\t\tpending.Merge(data)\n\t\tsender.cell <- pending\n\tdefault:\n\t\tsender.cell <- data\n\t}\n}\n\nfunc (sender *GossipSender) Stop() {\n\tclose(sender.cell)\n}\n\ntype connectionSenders map[Connection]*GossipSender\ntype peerSenders map[PeerName]*GossipSender\n\ntype GossipChannel struct {\n\tsync.Mutex\n\tourself *LocalPeer\n\troutes *Routes\n\tname string\n\tgossiper Gossiper\n\tsenders connectionSenders\n\tbroadcasters peerSenders\n}\n\ntype GossipChannels map[string]*GossipChannel\n\nfunc (router *Router) NewGossip(channelName string, g Gossiper) Gossip {\n\tchannel := &GossipChannel{\n\t\tourself: router.Ourself,\n\t\troutes: router.Routes,\n\t\tname: channelName,\n\t\tgossiper: g,\n\t\tsenders: make(connectionSenders),\n\t\tbroadcasters: make(peerSenders)}\n\trouter.GossipChannels[channelName] = channel\n\treturn channel\n}\n\nfunc (router *Router) SendAllGossip() {\n\tfor _, channel := range router.GossipChannels {\n\t\tif gossip := channel.gossiper.Gossip(); gossip != nil {\n\t\t\tchannel.Send(router.Ourself.Name, gossip)\n\t\t}\n\t}\n}\n\nfunc (router *Router) SendAllGossipDown(conn Connection) {\n\tfor _, channel := range router.GossipChannels {\n\t\tif gossip := channel.gossiper.Gossip(); gossip != nil {\n\t\t\tchannel.SendDown(conn, channel.gossiper.Gossip())\n\t\t}\n\t}\n}\n\nfunc (router *Router) handleGossip(tag ProtocolTag, payload []byte) error {\n\tdecoder := gob.NewDecoder(bytes.NewReader(payload))\n\tvar channelName string\n\tif err := decoder.Decode(&channelName); err != nil {\n\t\treturn err\n\t}\n\tchannel, found := router.GossipChannels[channelName]\n\tif !found {\n\t\treturn fmt.Errorf(\"[gossip] received unknown channel with name %s\", channelName)\n\t}\n\tvar srcName PeerName\n\tif err := decoder.Decode(&srcName); err != nil {\n\t\treturn err\n\t}\n\tswitch tag {\n\tcase ProtocolGossipUnicast:\n\t\treturn channel.deliverUnicast(srcName, payload, decoder)\n\tcase ProtocolGossipBroadcast:\n\t\treturn channel.deliverBroadcast(srcName, payload, decoder)\n\tcase ProtocolGossip:\n\t\treturn channel.deliver(srcName, payload, decoder)\n\t}\n\treturn nil\n}\n\nfunc (c *GossipChannel) deliverUnicast(srcName PeerName, origPayload []byte, dec *gob.Decoder) error {\n\tvar destName PeerName\n\tif err := dec.Decode(&destName); err != nil {\n\t\treturn err\n\t}\n\tif c.ourself.Name != destName {\n\t\treturn c.relayUnicast(destName, origPayload)\n\t}\n\tvar payload []byte\n\tif err := dec.Decode(&payload); err != nil {\n\t\treturn err\n\t}\n\treturn c.gossiper.OnGossipUnicast(srcName, payload)\n}\n\nfunc (c *GossipChannel) deliverBroadcast(srcName PeerName, _ []byte, dec *gob.Decoder) error {\n\tvar payload []byte\n\tif err := dec.Decode(&payload); err != nil {\n\t\treturn err\n\t}\n\tdata, err := c.gossiper.OnGossipBroadcast(payload)\n\tif err != nil || data == nil {\n\t\treturn err\n\t}\n\treturn c.relayBroadcast(srcName, data)\n}\n\nfunc (c *GossipChannel) deliver(srcName PeerName, _ []byte, dec *gob.Decoder) error {\n\tvar payload []byte\n\tif err := dec.Decode(&payload); err != nil {\n\t\treturn err\n\t}\n\tif data, err := c.gossiper.OnGossip(payload); err != nil {\n\t\treturn err\n\t} else if data != nil {\n\t\tc.Send(srcName, data)\n\t}\n\treturn nil\n}\n\nfunc (c *GossipChannel) Send(srcName PeerName, data GossipData) {\n\t\/\/ do this outside the lock below so we avoid lock nesting\n\tc.routes.EnsureRecalculated()\n\tselectedConnections := make(ConnectionSet)\n\tfor name := range c.routes.RandomNeighbours(srcName) {\n\t\tif conn, found := c.ourself.ConnectionTo(name); found {\n\t\t\tselectedConnections[conn] = void\n\t\t}\n\t}\n\tif len(selectedConnections) == 0 {\n\t\treturn\n\t}\n\tconnections := c.ourself.Connections()\n\tc.Lock()\n\tdefer c.Unlock()\n\t\/\/ GC - randomly (courtesy of go's map iterator) pick some\n\t\/\/ existing entries and stop&remove them if the associated\n\t\/\/ connection is no longer active. We stop as soon as we\n\t\/\/ encounter a valid entry; the idea being that when there is\n\t\/\/ little or no garbage then this executes close to O(1)[1],\n\t\/\/ whereas when there is lots of garbage we remove it quickly.\n\t\/\/\n\t\/\/ [1] TODO Unfortunately, due to the desire to avoid nested\n\t\/\/ locks, instead of simply invoking Peer.ConnectionTo(name)\n\t\/\/ below, we have that Peer.Connections() invocation above. That\n\t\/\/ is O(n_our_connections) at best.\n\tfor conn, sender := range c.senders {\n\t\tif _, found := connections[conn]; !found {\n\t\t\tdelete(c.senders, conn)\n\t\t\tsender.Stop()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tfor conn := range selectedConnections {\n\t\tc.sendDown(conn, data)\n\t}\n}\n\nfunc (c *GossipChannel) SendDown(conn Connection, data GossipData) {\n\tc.Lock()\n\tc.sendDown(conn, data)\n\tc.Unlock()\n}\n\nfunc (c *GossipChannel) sendDown(conn Connection, data GossipData) {\n\tsender, found := c.senders[conn]\n\tif !found {\n\t\tsender = NewGossipSender(func(pending GossipData) {\n\t\t\tprotocolMsg := ProtocolMsg{ProtocolGossip, GobEncode(c.name, c.ourself.Name, pending.Encode())}\n\t\t\tconn.(ProtocolSender).SendProtocolMsg(protocolMsg)\n\t\t})\n\t\tc.senders[conn] = sender\n\t\tsender.Start()\n\t}\n\tsender.Send(data)\n}\n\nfunc (c *GossipChannel) GossipUnicast(dstPeerName PeerName, msg []byte) error {\n\treturn c.relayUnicast(dstPeerName, GobEncode(c.name, c.ourself.Name, dstPeerName, msg))\n}\n\nfunc (c *GossipChannel) GossipBroadcast(update GossipData) error {\n\treturn c.relayBroadcast(c.ourself.Name, update)\n}\n\nfunc (c *GossipChannel) relayUnicast(dstPeerName PeerName, buf []byte) error {\n\tif relayPeerName, found := c.routes.UnicastAll(dstPeerName); !found {\n\t\tc.log(\"unknown relay destination:\", dstPeerName)\n\t} else if conn, found := c.ourself.ConnectionTo(relayPeerName); !found {\n\t\tc.log(\"unable to find connection to relay peer\", relayPeerName)\n\t} else {\n\t\tconn.(ProtocolSender).SendProtocolMsg(ProtocolMsg{ProtocolGossipUnicast, buf})\n\t}\n\treturn nil\n}\n\nfunc (c *GossipChannel) relayBroadcast(srcName PeerName, update GossipData) error {\n\tnames := c.routes.PeerNames() \/\/ do this outside the lock so they don't nest\n\tc.Lock()\n\tdefer c.Unlock()\n\t\/\/ GC - randomly (courtesy of go's map iterator) pick some\n\t\/\/ existing broadcasters and stop&remove them if their source peer\n\t\/\/ is unknown. We stop as soon as we encounter a valid entry; the\n\t\/\/ idea being that when there is little or no garbage then this\n\t\/\/ executes close to O(1)[1], whereas when there is lots of\n\t\/\/ garbage we remove it quickly.\n\t\/\/\n\t\/\/ [1] TODO Unfortunately, due to the desire to avoid nested\n\t\/\/ locks, instead of simply invoking Peers.Fetch(name) below, we\n\t\/\/ have that Peers.Names() invocation above. That is O(n_peers) at\n\t\/\/ best.\n\tfor name, broadcaster := range c.broadcasters {\n\t\tif _, found := names[name]; !found {\n\t\t\tdelete(c.broadcasters, name)\n\t\t\tbroadcaster.Stop()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tbroadcaster, found := c.broadcasters[srcName]\n\tif !found {\n\t\tbroadcaster = NewGossipSender(func(pending GossipData) { c.sendBroadcast(srcName, pending) })\n\t\tc.broadcasters[srcName] = broadcaster\n\t\tbroadcaster.Start()\n\t}\n\tbroadcaster.Send(update)\n\treturn nil\n}\n\nfunc (c *GossipChannel) sendBroadcast(srcName PeerName, update GossipData) {\n\tc.routes.EnsureRecalculated()\n\tnextHops := c.routes.BroadcastAll(srcName)\n\tif len(nextHops) == 0 {\n\t\treturn\n\t}\n\tprotocolMsg := ProtocolMsg{ProtocolGossipBroadcast, GobEncode(c.name, srcName, update.Encode())}\n\t\/\/ FIXME a single blocked connection can stall us\n\tfor _, conn := range c.ourself.ConnectionsTo(nextHops) {\n\t\tconn.(ProtocolSender).SendProtocolMsg(protocolMsg)\n\t}\n}\n\nfunc (c *GossipChannel) log(args ...interface{}) {\n\tlog.Println(append(append([]interface{}{}, \"[gossip \"+c.name+\"]:\"), args...)...)\n}\n\n\/\/ for testing\n\nfunc (router *Router) sendPendingGossip() {\n\tfor _, channel := range router.GossipChannels {\n\t\tchannel.Lock()\n\t\tfor _, sender := range channel.senders {\n\t\t\tsender.flush()\n\t\t}\n\t\tfor _, sender := range channel.broadcasters {\n\t\t\tsender.flush()\n\t\t}\n\t\tchannel.Unlock()\n\t}\n}\n\nfunc (sender *GossipSender) flush() {\n\tch := make(chan struct{})\n\tsender.flushch <- ch\n\t<-ch\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2015 The ContainerOps Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage router\n\nimport (\n\t\"gopkg.in\/macaron.v1\"\n\n\t\"github.com\/containerops\/dockyard\/handler\"\n)\n\n\/\/ Dockyard Router Definition\nfunc SetRouters(m *macaron.Macaron) {\n\t\/\/ Web API\n\tm.Get(\"\/\", handler.IndexV1Handler)\n\n\t\/\/ Docker Registry V1\n\tm.Group(\"\/v1\", func() {\n\t\tm.Get(\"\/_ping\", handler.GetPingV1Handler)\n\n\t\tm.Get(\"\/users\", handler.GetUsersV1Handler)\n\t\tm.Post(\"\/users\", handler.PostUsersV1Handler)\n\n\t\tm.Group(\"\/repositories\", func() {\n\t\t\tm.Put(\"\/:namespace\/:repository\/tags\/:tag\", handler.PutTagV1Handler)\n\t\t\tm.Put(\"\/:namespace\/:repository\/images\", handler.PutRepositoryImagesV1Handler)\n\t\t\tm.Get(\"\/:namespace\/:repository\/images\", handler.GetRepositoryImagesV1Handler)\n\t\t\tm.Get(\"\/:namespace\/:repository\/tags\", handler.GetTagV1Handler)\n\t\t\tm.Put(\"\/:namespace\/:repository\", handler.PutRepositoryV1Handler)\n\t\t})\n\n\t\tm.Group(\"\/images\", func() {\n\t\t\tm.Get(\"\/:image\/ancestry\", handler.GetImageAncestryV1Handler)\n\t\t\tm.Get(\"\/:image\/json\", handler.GetImageJSONV1Handler)\n\t\t\tm.Get(\"\/:image\/layer\", handler.GetImageLayerV1Handler)\n\t\t\tm.Put(\"\/:image\/json\", handler.PutImageJSONV1Handler)\n\t\t\tm.Put(\"\/:image\/layer\", handler.PutImageLayerV1Handler)\n\t\t\tm.Put(\"\/:image\/checksum\", handler.PutImageChecksumV1Handler)\n\t\t})\n\t})\n\n\t\/\/ Docker Registry V2\n\tm.Group(\"\/v2\", func() {\n\t\tm.Get(\"\/\", handler.GetPingV2Handler)\n\t\tm.Get(\"\/_catalog\", handler.GetCatalogV2Handler)\n\n\t\t\/\/ user mode: \/namespace\/repository:tag\n\t\tm.Head(\"\/:namespace\/:repository\/blobs\/:digest\", handler.HeadBlobsV2Handler)\n\t\tm.Post(\"\/:namespace\/:repository\/blobs\/uploads\", handler.PostBlobsV2Handler)\n\t\tm.Patch(\"\/:namespace\/:repository\/blobs\/uploads\/:uuid\", handler.PatchBlobsV2Handler)\n\t\tm.Put(\"\/:namespace\/:repository\/blobs\/uploads\/:uuid\", handler.PutBlobsV2Handler)\n\t\tm.Get(\"\/:namespace\/:repository\/blobs\/:digest\", handler.GetBlobsV2Handler)\n\t\tm.Put(\"\/:namespace\/:repository\/manifests\/:tag\", handler.PutManifestsV2Handler)\n\t\tm.Get(\"\/:namespace\/:repository\/tags\/list\", handler.GetTagsListV2Handler)\n\t\tm.Get(\"\/:namespace\/:repository\/manifests\/:tag\", handler.GetManifestsV2Handler)\n\t\tm.Delete(\"\/:namespace\/:repository\/blobs\/:digest\", handler.DeleteBlobsV2Handler)\n\t\tm.Delete(\"\/:namespace\/:repository\/manifests\/:reference\", handler.DeleteManifestsV2Handler)\n\n\t\t\/\/ library mode: \/repository:tag\n\t\tm.Head(\"\/:repository\/blobs\/:digest\", handler.HeadBlobsV2LibraryHandler)\n\t\tm.Post(\"\/:repository\/blobs\/uploads\", handler.PostBlobsV2LibraryHandler)\n\t\tm.Patch(\"\/:repository\/blobs\/uploads\/:uuid\", handler.PatchBlobsV2LibraryHandler)\n\t\tm.Put(\"\/:repository\/blobs\/uploads\/:uuid\", handler.PutBlobsV2LibraryHandler)\n\t\tm.Get(\"\/:repository\/blobs\/:digest\", handler.GetBlobsV2LibraryHandler)\n\t\tm.Put(\"\/:repository\/manifests\/:tag\", handler.PutManifestsV2LibraryHandler)\n\t\tm.Get(\"\/:repository\/tags\/list\", handler.GetTagsListV2LibraryHandler)\n\t\tm.Get(\"\/:repository\/manifests\/:tag\", handler.GetManifestsV2LibraryHandler)\n\t\tm.Delete(\"\/:repository\/blobs\/:digest\", handler.DeleteBlobsV2LibraryHandler)\n\t\tm.Delete(\"\/:repository\/manifests\/:reference\", handler.DeleteManifestsV2LibraryHandler)\n\t})\n\n\t\/\/ App Discovery\n\tm.Group(\"\/app\", func() {\n\t\tm.Group(\"\/v1\", func() {\n\t\t\t\/\/ Global Search\n\t\t\tm.Get(\"\/search\", handler.AppGlobalSearchV1Handler)\n\n\t\t\tm.Group(\"\/:namespace\/:repository\", func() {\n\t\t\t\t\/\/ Discovery\n\t\t\t\tm.Get(\"\/?app-discovery=1\", handler.AppDiscoveryV1Handler)\n\n\t\t\t\t\/\/ Scoped Search\n\t\t\t\tm.Get(\"\/search\", handler.AppScopedSearchV1Handler)\n\t\t\t\tm.Get(\"\/list\", handler.AppGetListAppV1Handler)\n\n\t\t\t\t\/\/ Pull\n\t\t\t\tm.Get(\"\/:os\/:arch\/:app\", handler.AppGetFileV1Handler)\n\n\t\t\t\t\/\/ Push\n\t\t\t\tm.Post(\"\/\", handler.AppPostV1Handler)\n\t\t\t\tm.Put(\"\/:os\/:arch\/:app\/:tag\", handler.AppPutFileV1Handler)\n\t\t\t\tm.Put(\"\/:os\/:arch\/:app\/:tag\/manifests\", handler.AppPutManifestV1Handler)\n\t\t\t\tm.Patch(\"\/:os\/:arch\/:app\/:tag\/:status\", handler.AppPatchFileV1Handler)\n\t\t\t\tm.Delete(\"\/:os\/:arch\/:app\/:tag\", handler.AppDeleteFileByTagV1Handler)\n\t\t\t\tm.Delete(\"\/:os\/:arch\/:app\", handler.AppDeleteFileV1Handler)\n\t\t\t})\n\t\t})\n\t})\n\n\t\/\/ Appc Discovery\n\tm.Group(\"\/appc\", func() {\n\t\tm.Group(\"\/v1\", func() {\n\t\t\tm.Group(\"\/:namespace\/:repository\", func() {\n\t\t\t\t\/\/ Discovery\n\t\t\t\tm.Get(\"\/?ac-discovery=1\", handler.AppcDiscoveryV1Handler)\n\n\t\t\t\t\/\/ Pull\n\t\t\t\tm.Get(\"\/pubkeys\", handler.AppcGetPubkeysV1Handler)\n\t\t\t\tm.Get(\"\/:os\/:arch\/:aci\", handler.AppcGetACIV1Handler)\n\t\t\t})\n\n\t\t})\n\t})\n\n\t\/\/ VM Image Discovery\n\tm.Group(\"\/image\", func() {\n\t\tm.Group(\"\/v1\", func() {\n\t\t\t\/\/ Global Search\n\t\t\tm.Get(\"\/search\", handler.ImageGlobalSearchV1Handler)\n\n\t\t\tm.Group(\"\/:namespace\/:repository\", func() {\n\t\t\t\t\/\/ Discovery\n\t\t\t\tm.Get(\"\/?image-discovery=1\", handler.ImageDiscoveryV1Handler)\n\n\t\t\t\t\/\/ Scoped Search\n\t\t\t\tm.Get(\"\/search\", handler.ImageScopedSearchV1Handler)\n\t\t\t\tm.Get(\"\/list\", handler.ImageGetListV1Handler)\n\n\t\t\t\t\/\/ Pull\n\t\t\t\tm.Get(\"\/:os\/:arch\/:image\", handler.ImageGetFileV1Handler)\n\n\t\t\t\t\/\/ Push\n\t\t\t\tm.Post(\"\/\", handler.ImagePostV1Handler)\n\t\t\t\tm.Put(\"\/:os\/:arch\/:image\/:tag\", handler.ImagePutFileV1Handler)\n\t\t\t\tm.Put(\"\/:os\/:arch\/:image\/:tag\/manifests\", handler.ImagePutManifestV1Handler)\n\t\t\t\tm.Patch(\"\/:os\/:arch\/:image\/:tag\/:status\", handler.ImagePatchFileV1Handler)\n\t\t\t\tm.Delete(\"\/:os\/:arch\/:image\/:tag\", handler.ImageDeleteFileByTagV1Handler)\n\t\t\t\tm.Delete(\"\/:os\/:arch\/:image\", handler.ImageDeleteFileV1Handler)\n\t\t\t})\n\t\t})\n\t})\n\n\t\/\/ Sync APIS\n\tm.Group(\"\/sync\", func() {\n\t\tm.Group(\"\/v1\", func() {\n\t\t\t\/\/ Server Ping\n\t\t\tm.Get(\"\/ping\", handler.SyncGetPingV1Handler)\n\n\t\t\tm.Group(\"\/master\", func() {\n\t\t\t\t\/\/ Server Sync Of Master\n\t\t\t\tm.Post(\"\/registry\", handler.SyncMasterPostRegistryV1Handler)\n\t\t\t\tm.Delete(\"\/registry\", handler.SyncMasterDeleteRegistryV1Handler)\n\n\t\t\t\tm.Put(\"\/mode\", handler.SyncMasterPutModeRegistryV1Handler)\n\t\t\t})\n\n\t\t\tm.Group(\"\/slave\", func() {\n\t\t\t\t\/\/ Server Sync Of Slaver\n\t\t\t\tm.Post(\"\/registry\", handler.SyncSlavePostRegistryV1Handler)\n\t\t\t\tm.Put(\"\/registry\", handler.SyncSlavePutRegistryV1Handler)\n\t\t\t\tm.Delete(\"\/registry\", handler.SyncSlaveDeleteRegistryV1Handler)\n\n\t\t\t\tm.Put(\"\/mode\", handler.SyncSlavePutModeRegistryV1Handler)\n\n\t\t\t\t\/\/ Data Sync\n\t\t\t\tm.Get(\"\/list\", handler.SyncSlaveListDataV1Handler)\n\n\t\t\t\t\/\/ File Sync\n\t\t\t\tm.Put(\"\/:namespace\/:repository\/manifests\", handler.SyncSlavePutManifestsV1Handler)\n\t\t\t\tm.Put(\"\/:namespace\/:repository\/file\", handler.SyncSlavePutFileV1Handler)\n\t\t\t\tm.Put(\"\/:namespace\/:repository\/:status\", handler.SyncSlavePutStatusV1Handler)\n\t\t\t})\n\t\t})\n\t})\n\n\t\/\/ Admin APIs\n\tm.Group(\"\/admin\", func() {\n\t\tm.Group(\"\/v1\", func() {\n\t\t\t\/\/ Server Status\n\t\t\tm.Get(\"\/stats\/:type\", handler.AdminGetStatusV1Handler)\n\n\t\t\t\/\/ Server Config\n\t\t\tm.Get(\"\/config\", handler.AdminGetConfigV1Handler)\n\t\t\tm.Put(\"\/config\", handler.AdminSetConfigV1Handler)\n\n\t\t\t\/\/ Maintenance\n\t\t\tm.Post(\"\/maintenance\", handler.AdminPostMaintenance)\n\t\t})\n\t})\n}\nChange tag param to optional match in router.\/*\nCopyright 2015 The ContainerOps Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage router\n\nimport (\n\t\"gopkg.in\/macaron.v1\"\n\n\t\"github.com\/containerops\/dockyard\/handler\"\n)\n\n\/\/ Dockyard Router Definition\nfunc SetRouters(m *macaron.Macaron) {\n\t\/\/ Web API\n\tm.Get(\"\/\", handler.IndexV1Handler)\n\n\t\/\/ Docker Registry V1\n\tm.Group(\"\/v1\", func() {\n\t\tm.Get(\"\/_ping\", handler.GetPingV1Handler)\n\n\t\tm.Get(\"\/users\", handler.GetUsersV1Handler)\n\t\tm.Post(\"\/users\", handler.PostUsersV1Handler)\n\n\t\tm.Group(\"\/repositories\", func() {\n\t\t\tm.Put(\"\/:namespace\/:repository\/tags\/:tag\", handler.PutTagV1Handler)\n\t\t\tm.Put(\"\/:namespace\/:repository\/images\", handler.PutRepositoryImagesV1Handler)\n\t\t\tm.Get(\"\/:namespace\/:repository\/images\", handler.GetRepositoryImagesV1Handler)\n\t\t\tm.Get(\"\/:namespace\/:repository\/tags\", handler.GetTagV1Handler)\n\t\t\tm.Put(\"\/:namespace\/:repository\", handler.PutRepositoryV1Handler)\n\t\t})\n\n\t\tm.Group(\"\/images\", func() {\n\t\t\tm.Get(\"\/:image\/ancestry\", handler.GetImageAncestryV1Handler)\n\t\t\tm.Get(\"\/:image\/json\", handler.GetImageJSONV1Handler)\n\t\t\tm.Get(\"\/:image\/layer\", handler.GetImageLayerV1Handler)\n\t\t\tm.Put(\"\/:image\/json\", handler.PutImageJSONV1Handler)\n\t\t\tm.Put(\"\/:image\/layer\", handler.PutImageLayerV1Handler)\n\t\t\tm.Put(\"\/:image\/checksum\", handler.PutImageChecksumV1Handler)\n\t\t})\n\t})\n\n\t\/\/ Docker Registry V2\n\tm.Group(\"\/v2\", func() {\n\t\tm.Get(\"\/\", handler.GetPingV2Handler)\n\t\tm.Get(\"\/_catalog\", handler.GetCatalogV2Handler)\n\n\t\t\/\/ user mode: \/namespace\/repository:tag\n\t\tm.Head(\"\/:namespace\/:repository\/blobs\/:digest\", handler.HeadBlobsV2Handler)\n\t\tm.Post(\"\/:namespace\/:repository\/blobs\/uploads\", handler.PostBlobsV2Handler)\n\t\tm.Patch(\"\/:namespace\/:repository\/blobs\/uploads\/:uuid\", handler.PatchBlobsV2Handler)\n\t\tm.Put(\"\/:namespace\/:repository\/blobs\/uploads\/:uuid\", handler.PutBlobsV2Handler)\n\t\tm.Get(\"\/:namespace\/:repository\/blobs\/:digest\", handler.GetBlobsV2Handler)\n\t\tm.Put(\"\/:namespace\/:repository\/manifests\/:tag\", handler.PutManifestsV2Handler)\n\t\tm.Get(\"\/:namespace\/:repository\/tags\/list\", handler.GetTagsListV2Handler)\n\t\tm.Get(\"\/:namespace\/:repository\/manifests\/:tag\", handler.GetManifestsV2Handler)\n\t\tm.Delete(\"\/:namespace\/:repository\/blobs\/:digest\", handler.DeleteBlobsV2Handler)\n\t\tm.Delete(\"\/:namespace\/:repository\/manifests\/:reference\", handler.DeleteManifestsV2Handler)\n\n\t\t\/\/ library mode: \/repository:tag\n\t\tm.Head(\"\/:repository\/blobs\/:digest\", handler.HeadBlobsV2LibraryHandler)\n\t\tm.Post(\"\/:repository\/blobs\/uploads\", handler.PostBlobsV2LibraryHandler)\n\t\tm.Patch(\"\/:repository\/blobs\/uploads\/:uuid\", handler.PatchBlobsV2LibraryHandler)\n\t\tm.Put(\"\/:repository\/blobs\/uploads\/:uuid\", handler.PutBlobsV2LibraryHandler)\n\t\tm.Get(\"\/:repository\/blobs\/:digest\", handler.GetBlobsV2LibraryHandler)\n\t\tm.Put(\"\/:repository\/manifests\/:tag\", handler.PutManifestsV2LibraryHandler)\n\t\tm.Get(\"\/:repository\/tags\/list\", handler.GetTagsListV2LibraryHandler)\n\t\tm.Get(\"\/:repository\/manifests\/:tag\", handler.GetManifestsV2LibraryHandler)\n\t\tm.Delete(\"\/:repository\/blobs\/:digest\", handler.DeleteBlobsV2LibraryHandler)\n\t\tm.Delete(\"\/:repository\/manifests\/:reference\", handler.DeleteManifestsV2LibraryHandler)\n\t})\n\n\t\/\/ App Discovery\n\tm.Group(\"\/app\", func() {\n\t\tm.Group(\"\/v1\", func() {\n\t\t\t\/\/ Global Search\n\t\t\tm.Get(\"\/search\", handler.AppGlobalSearchV1Handler)\n\n\t\t\tm.Group(\"\/:namespace\/:repository\", func() {\n\t\t\t\t\/\/ Discovery\n\t\t\t\tm.Get(\"\/?app-discovery=1\", handler.AppDiscoveryV1Handler)\n\n\t\t\t\t\/\/ Scoped Search\n\t\t\t\tm.Get(\"\/search\/:?tag\", handler.AppScopedSearchV1Handler)\n\t\t\t\tm.Get(\"\/list\", handler.AppGetListAppV1Handler)\n\n\t\t\t\t\/\/ Pull\n\t\t\t\tm.Get(\"\/:os\/:arch\/:app\/?:tag\", handler.AppGetFileV1Handler)\n\n\t\t\t\t\/\/ Push\n\t\t\t\tm.Post(\"\/\", handler.AppPostV1Handler)\n\t\t\t\tm.Put(\"\/:os\/:arch\/:app\/?:tag\", handler.AppPutFileV1Handler)\n\t\t\t\tm.Put(\"\/:os\/:arch\/:app\/manifests\/?:tag\", handler.AppPutManifestV1Handler)\n\t\t\t\tm.Patch(\"\/:os\/:arch\/:app\/:status\/?:tag\", handler.AppPatchFileV1Handler)\n\t\t\t\tm.Delete(\"\/:os\/:arch\/:app\/?:tag\", handler.AppDeleteFileByTagV1Handler)\n\t\t\t\tm.Delete(\"\/:os\/:arch\/:app\/?:tag\", handler.AppDeleteFileV1Handler)\n\t\t\t})\n\t\t})\n\t})\n\n\t\/\/ Appc Discovery\n\tm.Group(\"\/appc\", func() {\n\t\tm.Group(\"\/v1\", func() {\n\t\t\tm.Group(\"\/:namespace\/:repository\", func() {\n\t\t\t\t\/\/ Discovery\n\t\t\t\tm.Get(\"\/?ac-discovery=1\", handler.AppcDiscoveryV1Handler)\n\n\t\t\t\t\/\/ Pull\n\t\t\t\tm.Get(\"\/pubkeys\", handler.AppcGetPubkeysV1Handler)\n\t\t\t\tm.Get(\"\/:os\/:arch\/:aci\", handler.AppcGetACIV1Handler)\n\t\t\t})\n\n\t\t})\n\t})\n\n\t\/\/ VM Image Discovery\n\tm.Group(\"\/image\", func() {\n\t\tm.Group(\"\/v1\", func() {\n\t\t\t\/\/ Global Search\n\t\t\tm.Get(\"\/search\", handler.ImageGlobalSearchV1Handler)\n\n\t\t\tm.Group(\"\/:namespace\/:repository\", func() {\n\t\t\t\t\/\/ Discovery\n\t\t\t\tm.Get(\"\/?image-discovery=1\", handler.ImageDiscoveryV1Handler)\n\n\t\t\t\t\/\/ Scoped Search\n\t\t\t\tm.Get(\"\/search\/:?tag\", handler.ImageScopedSearchV1Handler)\n\t\t\t\tm.Get(\"\/list\", handler.ImageGetListV1Handler)\n\n\t\t\t\t\/\/ Pull\n\t\t\t\tm.Get(\"\/:os\/:arch\/:image\/:?tag\", handler.ImageGetFileV1Handler)\n\n\t\t\t\t\/\/ Push\n\t\t\t\tm.Post(\"\/\", handler.ImagePostV1Handler)\n\t\t\t\tm.Put(\"\/:os\/:arch\/:image\/?:tag\", handler.ImagePutFileV1Handler)\n\t\t\t\tm.Put(\"\/:os\/:arch\/:image\/manifests\/?:tag\", handler.ImagePutManifestV1Handler)\n\t\t\t\tm.Patch(\"\/:os\/:arch\/:image\/:status\/?:tag\", handler.ImagePatchFileV1Handler)\n\t\t\t\tm.Delete(\"\/:os\/:arch\/:image\/?:tag\", handler.ImageDeleteFileByTagV1Handler)\n\t\t\t\tm.Delete(\"\/:os\/:arch\/:image\", handler.ImageDeleteFileV1Handler)\n\t\t\t})\n\t\t})\n\t})\n\n\t\/\/ Sync APIS\n\tm.Group(\"\/sync\", func() {\n\t\tm.Group(\"\/v1\", func() {\n\t\t\t\/\/ Server Ping\n\t\t\tm.Get(\"\/ping\", handler.SyncGetPingV1Handler)\n\n\t\t\tm.Group(\"\/master\", func() {\n\t\t\t\t\/\/ Server Sync Of Master\n\t\t\t\tm.Post(\"\/registry\", handler.SyncMasterPostRegistryV1Handler)\n\t\t\t\tm.Delete(\"\/registry\", handler.SyncMasterDeleteRegistryV1Handler)\n\n\t\t\t\tm.Put(\"\/mode\", handler.SyncMasterPutModeRegistryV1Handler)\n\t\t\t})\n\n\t\t\tm.Group(\"\/slave\", func() {\n\t\t\t\t\/\/ Server Sync Of Slaver\n\t\t\t\tm.Post(\"\/registry\", handler.SyncSlavePostRegistryV1Handler)\n\t\t\t\tm.Put(\"\/registry\", handler.SyncSlavePutRegistryV1Handler)\n\t\t\t\tm.Delete(\"\/registry\", handler.SyncSlaveDeleteRegistryV1Handler)\n\n\t\t\t\tm.Put(\"\/mode\", handler.SyncSlavePutModeRegistryV1Handler)\n\n\t\t\t\t\/\/ Data Sync\n\t\t\t\tm.Get(\"\/list\", handler.SyncSlaveListDataV1Handler)\n\n\t\t\t\t\/\/ File Sync\n\t\t\t\tm.Put(\"\/:namespace\/:repository\/manifests\", handler.SyncSlavePutManifestsV1Handler)\n\t\t\t\tm.Put(\"\/:namespace\/:repository\/file\", handler.SyncSlavePutFileV1Handler)\n\t\t\t\tm.Put(\"\/:namespace\/:repository\/:status\", handler.SyncSlavePutStatusV1Handler)\n\t\t\t})\n\t\t})\n\t})\n\n\t\/\/ Admin APIs\n\tm.Group(\"\/admin\", func() {\n\t\tm.Group(\"\/v1\", func() {\n\t\t\t\/\/ Server Status\n\t\t\tm.Get(\"\/stats\/:type\", handler.AdminGetStatusV1Handler)\n\n\t\t\t\/\/ Server Config\n\t\t\tm.Get(\"\/config\", handler.AdminGetConfigV1Handler)\n\t\t\tm.Put(\"\/config\", handler.AdminSetConfigV1Handler)\n\n\t\t\t\/\/ Maintenance\n\t\t\tm.Post(\"\/maintenance\", handler.AdminPostMaintenance)\n\t\t})\n\t})\n}\n<|endoftext|>"} {"text":"package router\n\nimport (\n\t\"encoding\/base32\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/NyaaPantsu\/nyaa\/cache\"\n\t\"github.com\/NyaaPantsu\/nyaa\/config\"\n\t\"github.com\/NyaaPantsu\/nyaa\/service\/upload\"\n\t\"github.com\/NyaaPantsu\/nyaa\/util\"\n\t\"github.com\/NyaaPantsu\/nyaa\/util\/categories\"\n\t\"github.com\/NyaaPantsu\/nyaa\/util\/metainfo\"\n\t\"github.com\/microcosm-cc\/bluemonday\"\n\t\"github.com\/zeebo\/bencode\"\n)\n\n\/\/ Use this, because we seem to avoid using models, and we would need\n\/\/ the torrent ID to create the File in the DB\ntype UploadedFile struct {\n\tPath []string\n\tFilesize int64\n}\n\n\/\/ UploadForm serializing HTTP form for torrent upload\ntype UploadForm struct {\n\tName string\n\tMagnet string\n\tCategory string\n\tRemake bool\n\tDescription string\n\tStatus int\n\tCaptchaID string\n\tWebsiteLink string\n\n\tInfohash string\n\tCategoryID int\n\tSubCategoryID int\n\tFilesize int64\n\tFilepath string\n\tFileList []UploadedFile\n}\n\n\/\/ TODO: these should be in another package (?)\n\n\/\/ form names\nconst UploadFormName = \"name\"\nconst UploadFormTorrent = \"torrent\"\nconst UploadFormMagnet = \"magnet\"\nconst UploadFormCategory = \"c\"\nconst UploadFormRemake = \"remake\"\nconst UploadFormDescription = \"desc\"\nconst UploadFormWebsiteLink = \"website_link\"\nconst UploadFormStatus = \"status\"\n\n\/\/ error indicating that you can't send both a magnet link and torrent\nvar ErrTorrentPlusMagnet = errors.New(\"Upload either a torrent file or magnet link, not both\")\n\n\/\/ error indicating a torrent is private\nvar ErrPrivateTorrent = errors.New(\"Torrent is private\")\n\n\/\/ error indicating a problem with its trackers\nvar ErrTrackerProblem = errors.New(\"Torrent does not have any (working) trackers: https:\/\/\" + config.WebAddress + \"\/faq#trackers\")\n\n\/\/ error indicating a torrent's name is invalid\nvar ErrInvalidTorrentName = errors.New(\"Torrent name is invalid\")\n\n\/\/ error indicating a torrent's description is invalid\nvar ErrInvalidTorrentDescription = errors.New(\"Torrent description is invalid\")\n\n\/\/ error indicating a torrent's description is invalid\nvar ErrInvalidWebsiteLink = errors.New(\"Website url or IRC link is invalid\")\n\n\/\/ error indicating a torrent's category is invalid\nvar ErrInvalidTorrentCategory = errors.New(\"Torrent category is invalid\")\n\nvar p = bluemonday.UGCPolicy()\n\n\/**\nUploadForm.ExtractInfo takes an http request and computes all fields for this form\n*\/\nfunc (f *UploadForm) ExtractInfo(r *http.Request) error {\n\n\tf.Name = r.FormValue(UploadFormName)\n\tf.Category = r.FormValue(UploadFormCategory)\n\tf.Description = r.FormValue(UploadFormDescription)\n\tf.WebsiteLink = r.FormValue(UploadFormWebsiteLink)\n\tf.Status, _ = strconv.Atoi(r.FormValue(UploadFormStatus))\n\tf.Magnet = r.FormValue(UploadFormMagnet)\n\tf.Remake = r.FormValue(UploadFormRemake) == \"on\"\n\n\t\/\/ trim whitespace\n\tf.Name = util.TrimWhitespaces(f.Name)\n\tf.Description = p.Sanitize(util.TrimWhitespaces(f.Description))\n\tf.WebsiteLink = util.TrimWhitespaces(f.WebsiteLink)\n\tf.Magnet = util.TrimWhitespaces(f.Magnet)\n\tcache.Impl.ClearAll()\n\n\tcatsSplit := strings.Split(f.Category, \"_\")\n\t\/\/ need this to prevent out of index panics\n\tif len(catsSplit) == 2 {\n\t\tCatID, err := strconv.Atoi(catsSplit[0])\n\t\tif err != nil {\n\t\t\treturn ErrInvalidTorrentCategory\n\t\t}\n\t\tSubCatID, err := strconv.Atoi(catsSplit[1])\n\t\tif err != nil {\n\t\t\treturn ErrInvalidTorrentCategory\n\t\t}\n\n\t\tif !categories.CategoryExists(f.Category) {\n\t\t\treturn ErrInvalidTorrentCategory\n\t\t}\n\n\t\tf.CategoryID = CatID\n\t\tf.SubCategoryID = SubCatID\n\t} else {\n\t\treturn ErrInvalidTorrentCategory\n\t}\n\tif f.WebsiteLink != \"\" {\n\t\t\/\/ WebsiteLink\n\t\turlRegexp, _ := regexp.Compile(`^(https?:\\\/\\\/|ircs?:\\\/\\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\\/\\w \\.-]*)*\\\/?$`)\n\t\tif !urlRegexp.MatchString(f.WebsiteLink) {\n\t\t\treturn ErrInvalidWebsiteLink\n\t\t}\n\t}\n\n\t\/\/ first: parse torrent file (if any) to fill missing information\n\ttfile, _, err := r.FormFile(UploadFormTorrent)\n\tif err == nil {\n\t\tvar torrent metainfo.TorrentFile\n\n\t\t\/\/ decode torrent\n\t\t_, seekErr := tfile.Seek(0, io.SeekStart)\n\t\tif seekErr != nil {\n\t\t\treturn seekErr\n\t\t}\n\t\terr = bencode.NewDecoder(tfile).Decode(&torrent)\n\t\tif err != nil {\n\t\t\treturn metainfo.ErrInvalidTorrentFile\n\t\t}\n\n\t\t\/\/ check a few things\n\t\tif torrent.IsPrivate() {\n\t\t\treturn ErrPrivateTorrent\n\t\t}\n\t\ttrackers := torrent.GetAllAnnounceURLS()\n\t\tif !uploadService.CheckTrackers(trackers) {\n\t\t\treturn ErrTrackerProblem\n\t\t}\n\n\t\t\/\/ Name\n\t\tif len(f.Name) == 0 {\n\t\t\tf.Name = torrent.TorrentName()\n\t\t}\n\n\t\t\/\/ Magnet link: if a file is provided it should be empty\n\t\tif len(f.Magnet) != 0 {\n\t\t\treturn ErrTorrentPlusMagnet\n\t\t}\n\t\tbinInfohash, err := torrent.Infohash()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tf.Infohash = strings.ToUpper(hex.EncodeToString(binInfohash[:]))\n\t\tf.Magnet = util.InfoHashToMagnet(f.Infohash, f.Name, trackers...)\n\n\t\t\/\/ extract filesize\n\t\tf.Filesize = int64(torrent.TotalSize())\n\n\t\t\/\/ extract filelist\n\t\tfileInfos := torrent.Info.GetFiles()\n\t\tfor _, fileInfo := range fileInfos {\n\t\t\tf.FileList = append(f.FileList, UploadedFile{\n\t\t\t\tPath: fileInfo.Path,\n\t\t\t\tFilesize: int64(fileInfo.Length),\n\t\t\t})\n\t\t}\n\t} else {\n\t\t\/\/ No torrent file provided\n\t\tmagnetUrl, err := url.Parse(string(f.Magnet)) \/\/?\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\txt := magnetUrl.Query().Get(\"xt\")\n\t\tif !strings.HasPrefix(xt, \"urn:btih:\") {\n\t\t\treturn errors.New(\"Incorrect magnet\")\n\t\t}\n\t\txt = strings.SplitAfter(xt, \":\")[2]\n\t\tf.Infohash = strings.ToUpper(strings.Split(xt, \"&\")[0])\n\t\tisBase32, err := regexp.MatchString(\"^[2-7A-Z]{32}$\", f.Infohash)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !isBase32 {\n\t\t\tisBase16, err := regexp.MatchString(\"^[0-9A-F]{40}$\", f.Infohash)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !isBase16 {\n\t\t\t\treturn errors.New(\"Incorrect hash\")\n\t\t\t}\n\t\t}\n\t\tf.Filesize = 0\n\t\tf.Filepath = \"\"\n\t}\n\n\t\/\/ then actually check that we have everything we need\n\tif len(f.Name) == 0 {\n\t\treturn ErrInvalidTorrentName\n\t}\n\n\t\/\/ after data has been checked & extracted, write it to disk\n\tif len(config.TorrentFileStorage) > 0 {\n\t\terr := WriteTorrentToDisk(tfile, f.Infohash+\".torrent\", &f.Filepath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tf.Filepath = \"\"\n\t}\n\n\treturn nil\n}\n\nfunc (f *UploadForm) ExtractEditInfo(r *http.Request) error {\n\tf.Name = r.FormValue(UploadFormName)\n\tf.Category = r.FormValue(UploadFormCategory)\n\tf.WebsiteLink = r.FormValue(UploadFormWebsiteLink)\n\tf.Description = r.FormValue(UploadFormDescription)\n\tf.Status, _ = strconv.Atoi(r.FormValue(UploadFormStatus))\n\n\t\/\/ trim whitespace\n\tf.Name = util.TrimWhitespaces(f.Name)\n\tf.Description = p.Sanitize(util.TrimWhitespaces(f.Description))\n\n\tcatsSplit := strings.Split(f.Category, \"_\")\n\t\/\/ need this to prevent out of index panics\n\tif len(catsSplit) == 2 {\n\t\tCatID, err := strconv.Atoi(catsSplit[0])\n\t\tif err != nil {\n\t\t\treturn ErrInvalidTorrentCategory\n\t\t}\n\t\tSubCatID, err := strconv.Atoi(catsSplit[1])\n\t\tif err != nil {\n\t\t\treturn ErrInvalidTorrentCategory\n\t\t}\n\n\t\tf.CategoryID = CatID\n\t\tf.SubCategoryID = SubCatID\n\t} else {\n\t\treturn ErrInvalidTorrentCategory\n\t}\n\treturn nil\n}\n\nfunc WriteTorrentToDisk(file multipart.File, name string, fullpath *string) error {\n\t_, seekErr := file.Seek(0, io.SeekStart)\n\tif seekErr != nil {\n\t\treturn seekErr\n\t}\n\tb, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*fullpath = fmt.Sprintf(\"%s%c%s\", config.TorrentFileStorage, os.PathSeparator, name)\n\treturn ioutil.WriteFile(*fullpath, b, 0644)\n}\n\n\/\/ NewUploadForm creates a new upload form given parameters as list\nfunc NewUploadForm(params ...string) (uploadForm UploadForm) {\n\tif len(params) > 1 {\n\t\tuploadForm.Category = params[0]\n\t} else {\n\t\tuploadForm.Category = \"3_12\"\n\t}\n\tif len(params) > 2 {\n\t\tuploadForm.Description = params[1]\n\t} else {\n\t\tuploadForm.Description = \"Description\"\n\t}\n\treturn\n}\nRemove unused packagepackage router\n\nimport (\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/NyaaPantsu\/nyaa\/cache\"\n\t\"github.com\/NyaaPantsu\/nyaa\/config\"\n\t\"github.com\/NyaaPantsu\/nyaa\/service\/upload\"\n\t\"github.com\/NyaaPantsu\/nyaa\/util\"\n\t\"github.com\/NyaaPantsu\/nyaa\/util\/categories\"\n\t\"github.com\/NyaaPantsu\/nyaa\/util\/metainfo\"\n\t\"github.com\/microcosm-cc\/bluemonday\"\n\t\"github.com\/zeebo\/bencode\"\n)\n\n\/\/ Use this, because we seem to avoid using models, and we would need\n\/\/ the torrent ID to create the File in the DB\ntype UploadedFile struct {\n\tPath []string\n\tFilesize int64\n}\n\n\/\/ UploadForm serializing HTTP form for torrent upload\ntype UploadForm struct {\n\tName string\n\tMagnet string\n\tCategory string\n\tRemake bool\n\tDescription string\n\tStatus int\n\tCaptchaID string\n\tWebsiteLink string\n\n\tInfohash string\n\tCategoryID int\n\tSubCategoryID int\n\tFilesize int64\n\tFilepath string\n\tFileList []UploadedFile\n}\n\n\/\/ TODO: these should be in another package (?)\n\n\/\/ form names\nconst UploadFormName = \"name\"\nconst UploadFormTorrent = \"torrent\"\nconst UploadFormMagnet = \"magnet\"\nconst UploadFormCategory = \"c\"\nconst UploadFormRemake = \"remake\"\nconst UploadFormDescription = \"desc\"\nconst UploadFormWebsiteLink = \"website_link\"\nconst UploadFormStatus = \"status\"\n\n\/\/ error indicating that you can't send both a magnet link and torrent\nvar ErrTorrentPlusMagnet = errors.New(\"Upload either a torrent file or magnet link, not both\")\n\n\/\/ error indicating a torrent is private\nvar ErrPrivateTorrent = errors.New(\"Torrent is private\")\n\n\/\/ error indicating a problem with its trackers\nvar ErrTrackerProblem = errors.New(\"Torrent does not have any (working) trackers: https:\/\/\" + config.WebAddress + \"\/faq#trackers\")\n\n\/\/ error indicating a torrent's name is invalid\nvar ErrInvalidTorrentName = errors.New(\"Torrent name is invalid\")\n\n\/\/ error indicating a torrent's description is invalid\nvar ErrInvalidTorrentDescription = errors.New(\"Torrent description is invalid\")\n\n\/\/ error indicating a torrent's description is invalid\nvar ErrInvalidWebsiteLink = errors.New(\"Website url or IRC link is invalid\")\n\n\/\/ error indicating a torrent's category is invalid\nvar ErrInvalidTorrentCategory = errors.New(\"Torrent category is invalid\")\n\nvar p = bluemonday.UGCPolicy()\n\n\/**\nUploadForm.ExtractInfo takes an http request and computes all fields for this form\n*\/\nfunc (f *UploadForm) ExtractInfo(r *http.Request) error {\n\n\tf.Name = r.FormValue(UploadFormName)\n\tf.Category = r.FormValue(UploadFormCategory)\n\tf.Description = r.FormValue(UploadFormDescription)\n\tf.WebsiteLink = r.FormValue(UploadFormWebsiteLink)\n\tf.Status, _ = strconv.Atoi(r.FormValue(UploadFormStatus))\n\tf.Magnet = r.FormValue(UploadFormMagnet)\n\tf.Remake = r.FormValue(UploadFormRemake) == \"on\"\n\n\t\/\/ trim whitespace\n\tf.Name = util.TrimWhitespaces(f.Name)\n\tf.Description = p.Sanitize(util.TrimWhitespaces(f.Description))\n\tf.WebsiteLink = util.TrimWhitespaces(f.WebsiteLink)\n\tf.Magnet = util.TrimWhitespaces(f.Magnet)\n\tcache.Impl.ClearAll()\n\n\tcatsSplit := strings.Split(f.Category, \"_\")\n\t\/\/ need this to prevent out of index panics\n\tif len(catsSplit) == 2 {\n\t\tCatID, err := strconv.Atoi(catsSplit[0])\n\t\tif err != nil {\n\t\t\treturn ErrInvalidTorrentCategory\n\t\t}\n\t\tSubCatID, err := strconv.Atoi(catsSplit[1])\n\t\tif err != nil {\n\t\t\treturn ErrInvalidTorrentCategory\n\t\t}\n\n\t\tif !categories.CategoryExists(f.Category) {\n\t\t\treturn ErrInvalidTorrentCategory\n\t\t}\n\n\t\tf.CategoryID = CatID\n\t\tf.SubCategoryID = SubCatID\n\t} else {\n\t\treturn ErrInvalidTorrentCategory\n\t}\n\tif f.WebsiteLink != \"\" {\n\t\t\/\/ WebsiteLink\n\t\turlRegexp, _ := regexp.Compile(`^(https?:\\\/\\\/|ircs?:\\\/\\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\\/\\w \\.-]*)*\\\/?$`)\n\t\tif !urlRegexp.MatchString(f.WebsiteLink) {\n\t\t\treturn ErrInvalidWebsiteLink\n\t\t}\n\t}\n\n\t\/\/ first: parse torrent file (if any) to fill missing information\n\ttfile, _, err := r.FormFile(UploadFormTorrent)\n\tif err == nil {\n\t\tvar torrent metainfo.TorrentFile\n\n\t\t\/\/ decode torrent\n\t\t_, seekErr := tfile.Seek(0, io.SeekStart)\n\t\tif seekErr != nil {\n\t\t\treturn seekErr\n\t\t}\n\t\terr = bencode.NewDecoder(tfile).Decode(&torrent)\n\t\tif err != nil {\n\t\t\treturn metainfo.ErrInvalidTorrentFile\n\t\t}\n\n\t\t\/\/ check a few things\n\t\tif torrent.IsPrivate() {\n\t\t\treturn ErrPrivateTorrent\n\t\t}\n\t\ttrackers := torrent.GetAllAnnounceURLS()\n\t\tif !uploadService.CheckTrackers(trackers) {\n\t\t\treturn ErrTrackerProblem\n\t\t}\n\n\t\t\/\/ Name\n\t\tif len(f.Name) == 0 {\n\t\t\tf.Name = torrent.TorrentName()\n\t\t}\n\n\t\t\/\/ Magnet link: if a file is provided it should be empty\n\t\tif len(f.Magnet) != 0 {\n\t\t\treturn ErrTorrentPlusMagnet\n\t\t}\n\t\tbinInfohash, err := torrent.Infohash()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tf.Infohash = strings.ToUpper(hex.EncodeToString(binInfohash[:]))\n\t\tf.Magnet = util.InfoHashToMagnet(f.Infohash, f.Name, trackers...)\n\n\t\t\/\/ extract filesize\n\t\tf.Filesize = int64(torrent.TotalSize())\n\n\t\t\/\/ extract filelist\n\t\tfileInfos := torrent.Info.GetFiles()\n\t\tfor _, fileInfo := range fileInfos {\n\t\t\tf.FileList = append(f.FileList, UploadedFile{\n\t\t\t\tPath: fileInfo.Path,\n\t\t\t\tFilesize: int64(fileInfo.Length),\n\t\t\t})\n\t\t}\n\t} else {\n\t\t\/\/ No torrent file provided\n\t\tmagnetUrl, err := url.Parse(string(f.Magnet)) \/\/?\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\txt := magnetUrl.Query().Get(\"xt\")\n\t\tif !strings.HasPrefix(xt, \"urn:btih:\") {\n\t\t\treturn errors.New(\"Incorrect magnet\")\n\t\t}\n\t\txt = strings.SplitAfter(xt, \":\")[2]\n\t\tf.Infohash = strings.ToUpper(strings.Split(xt, \"&\")[0])\n\t\tisBase32, err := regexp.MatchString(\"^[2-7A-Z]{32}$\", f.Infohash)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !isBase32 {\n\t\t\tisBase16, err := regexp.MatchString(\"^[0-9A-F]{40}$\", f.Infohash)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !isBase16 {\n\t\t\t\treturn errors.New(\"Incorrect hash\")\n\t\t\t}\n\t\t}\n\t\tf.Filesize = 0\n\t\tf.Filepath = \"\"\n\t}\n\n\t\/\/ then actually check that we have everything we need\n\tif len(f.Name) == 0 {\n\t\treturn ErrInvalidTorrentName\n\t}\n\n\t\/\/ after data has been checked & extracted, write it to disk\n\tif len(config.TorrentFileStorage) > 0 {\n\t\terr := WriteTorrentToDisk(tfile, f.Infohash+\".torrent\", &f.Filepath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tf.Filepath = \"\"\n\t}\n\n\treturn nil\n}\n\nfunc (f *UploadForm) ExtractEditInfo(r *http.Request) error {\n\tf.Name = r.FormValue(UploadFormName)\n\tf.Category = r.FormValue(UploadFormCategory)\n\tf.WebsiteLink = r.FormValue(UploadFormWebsiteLink)\n\tf.Description = r.FormValue(UploadFormDescription)\n\tf.Status, _ = strconv.Atoi(r.FormValue(UploadFormStatus))\n\n\t\/\/ trim whitespace\n\tf.Name = util.TrimWhitespaces(f.Name)\n\tf.Description = p.Sanitize(util.TrimWhitespaces(f.Description))\n\n\tcatsSplit := strings.Split(f.Category, \"_\")\n\t\/\/ need this to prevent out of index panics\n\tif len(catsSplit) == 2 {\n\t\tCatID, err := strconv.Atoi(catsSplit[0])\n\t\tif err != nil {\n\t\t\treturn ErrInvalidTorrentCategory\n\t\t}\n\t\tSubCatID, err := strconv.Atoi(catsSplit[1])\n\t\tif err != nil {\n\t\t\treturn ErrInvalidTorrentCategory\n\t\t}\n\n\t\tf.CategoryID = CatID\n\t\tf.SubCategoryID = SubCatID\n\t} else {\n\t\treturn ErrInvalidTorrentCategory\n\t}\n\treturn nil\n}\n\nfunc WriteTorrentToDisk(file multipart.File, name string, fullpath *string) error {\n\t_, seekErr := file.Seek(0, io.SeekStart)\n\tif seekErr != nil {\n\t\treturn seekErr\n\t}\n\tb, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*fullpath = fmt.Sprintf(\"%s%c%s\", config.TorrentFileStorage, os.PathSeparator, name)\n\treturn ioutil.WriteFile(*fullpath, b, 0644)\n}\n\n\/\/ NewUploadForm creates a new upload form given parameters as list\nfunc NewUploadForm(params ...string) (uploadForm UploadForm) {\n\tif len(params) > 1 {\n\t\tuploadForm.Category = params[0]\n\t} else {\n\t\tuploadForm.Category = \"3_12\"\n\t}\n\tif len(params) > 2 {\n\t\tuploadForm.Description = params[1]\n\t} else {\n\t\tuploadForm.Description = \"Description\"\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"package main\n\nfunc main() {\n}\nremove main.go<|endoftext|>"} {"text":"package collection\n\nimport (\n \"github.com\/kuzzleio\/sdk-go\/internal\"\n \"errors\"\n \"encoding\/json\"\n \"github.com\/kuzzleio\/sdk-go\/types\"\n)\n\n\/*\n Create a new empty data collection, with no associated mapping.\n*\/\nfunc (dc *Collection) Create(options *types.Options) (*types.AckResponse, error) {\n ch := make(chan types.KuzzleResponse)\n\n go dc.kuzzle.Query(internal.BuildQuery(dc.collection, dc.index, \"collection\", \"create\", nil), options, ch)\n\n res := <-ch\n\n if res.Error.Message != \"\" {\n return nil, errors.New(res.Error.Message)\n }\n\n ack := &types.AckResponse{}\n json.Unmarshal(res.Result, &ack)\n\n return ack, nil\n}Update create.gopackage collection\n\nimport (\n \"github.com\/kuzzleio\/sdk-go\/internal\"\n \"errors\"\n \"encoding\/json\"\n \"github.com\/kuzzleio\/sdk-go\/types\"\n)\n\n\/*\n Create a new empty data collection, with no associated mapping.\n*\/\nfunc (dc Collection) Create(options *types.Options) (*types.AckResponse, error) {\n ch := make(chan types.KuzzleResponse)\n\n go dc.kuzzle.Query(internal.BuildQuery(dc.collection, dc.index, \"collection\", \"create\", nil), options, ch)\n\n res := <-ch\n\n if res.Error.Message != \"\" {\n return nil, errors.New(res.Error.Message)\n }\n\n ack := &types.AckResponse{}\n json.Unmarshal(res.Result, &ack)\n\n return ack, nil\n}\n<|endoftext|>"} {"text":"package log\n\nimport (\n\t\"log\"\n\t\"os\"\n)\n\n\/\/ AccessStatus is the status of an access request from clients.\ntype AccessStatus string\n\nconst (\n\tAccessAccepted = AccessStatus(\"accepted\")\n\tAccessRejected = AccessStatus(\"rejected\")\n)\n\ntype accessLogger interface {\n\tLog(from, to string, status AccessStatus, reason string)\n}\n\ntype noOpAccessLogger struct {\n}\n\nfunc (logger *noOpAccessLogger) Log(from, to string, status AccessStatus, reason string) {\n\t\/\/ Swallow\n}\n\ntype accessLog struct {\n\tFrom string\n\tTo string\n\tStatus AccessStatus\n\tReason string\n}\n\ntype fileAccessLogger struct {\n\tqueue chan *accessLog\n\tlogger *log.Logger\n\tfile *os.File\n}\n\nfunc (logger *fileAccessLogger) close() {\n\tlogger.file.Close()\n}\n\nfunc (logger *fileAccessLogger) Log(from, to string, status AccessStatus, reason string) {\n\tlogger.queue <- &accessLog{\n\t\tFrom: from,\n\t\tTo: to,\n\t\tStatus: status,\n\t\tReason: reason,\n\t}\n}\n\nfunc (logger *fileAccessLogger) Run() {\n\tfor entry := range logger.queue {\n\t\tlogger.logger.Println(entry.From + \" \" + string(entry.Status) + \" \" + entry.To + \" \" + entry.Reason)\n\t}\n}\n\nfunc newFileAccessLogger(path string) accessLogger {\n\tfile, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)\n\tif err != nil {\n\t\tlog.Printf(\"Unable to create or open file (%s): %v\\n\", path, err)\n\t\treturn nil\n\t}\n\treturn &fileAccessLogger{\n\t\tqueue: make(chan *accessLog, 16),\n\t\tlogger: log.New(file, \"\", log.Ldate|log.Ltime),\n\t\tfile: file,\n\t}\n}\n\nvar accessLoggerInstance accessLogger = &noOpAccessLogger{}\n\n\/\/ InitAccessLogger initializes the access logger to write into the give file.\nfunc InitAccessLogger(file string) {\n\tlogger := newFileAccessLogger(file)\n\tif logger != nil {\n\t\tgo logger.(*fileAccessLogger).Run()\n\t\taccessLoggerInstance = logger\n\t}\n}\n\n\/\/ Access writes an access log.\nfunc Access(from, to string, status AccessStatus, reason string) {\n\taccessLoggerInstance.Log(from, to, status, reason)\n}\nDon't block main thread when writing access log.package log\n\nimport (\n\t\"log\"\n\t\"os\"\n)\n\n\/\/ AccessStatus is the status of an access request from clients.\ntype AccessStatus string\n\nconst (\n\tAccessAccepted = AccessStatus(\"accepted\")\n\tAccessRejected = AccessStatus(\"rejected\")\n)\n\ntype accessLogger interface {\n\tLog(from, to string, status AccessStatus, reason string)\n}\n\ntype noOpAccessLogger struct {\n}\n\nfunc (logger *noOpAccessLogger) Log(from, to string, status AccessStatus, reason string) {\n\t\/\/ Swallow\n}\n\ntype accessLog struct {\n\tFrom string\n\tTo string\n\tStatus AccessStatus\n\tReason string\n}\n\ntype fileAccessLogger struct {\n\tqueue chan *accessLog\n\tlogger *log.Logger\n\tfile *os.File\n}\n\nfunc (logger *fileAccessLogger) close() {\n\tlogger.file.Close()\n}\n\nfunc (logger *fileAccessLogger) Log(from, to string, status AccessStatus, reason string) {\n\tselect {\n\tcase logger.queue <- &accessLog{\n\t\tFrom: from,\n\t\tTo: to,\n\t\tStatus: status,\n\t\tReason: reason,\n\t}:\n\tdefault:\n\t\t\/\/ We don't expect this to happen, but don't want to block main thread as well.\n\t}\n}\n\nfunc (logger *fileAccessLogger) Run() {\n\tfor entry := range logger.queue {\n\t\tlogger.logger.Println(entry.From + \" \" + string(entry.Status) + \" \" + entry.To + \" \" + entry.Reason)\n\t}\n}\n\nfunc newFileAccessLogger(path string) accessLogger {\n\tfile, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)\n\tif err != nil {\n\t\tlog.Printf(\"Unable to create or open file (%s): %v\\n\", path, err)\n\t\treturn nil\n\t}\n\treturn &fileAccessLogger{\n\t\tqueue: make(chan *accessLog, 16),\n\t\tlogger: log.New(file, \"\", log.Ldate|log.Ltime),\n\t\tfile: file,\n\t}\n}\n\nvar accessLoggerInstance accessLogger = &noOpAccessLogger{}\n\n\/\/ InitAccessLogger initializes the access logger to write into the give file.\nfunc InitAccessLogger(file string) {\n\tlogger := newFileAccessLogger(file)\n\tif logger != nil {\n\t\tgo logger.(*fileAccessLogger).Run()\n\t\taccessLoggerInstance = logger\n\t}\n}\n\n\/\/ Access writes an access log.\nfunc Access(from, to string, status AccessStatus, reason string) {\n\taccessLoggerInstance.Log(from, to, status, reason)\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n)\n\nvar (\n\tedgeHost = flag.String(\"edgeHost\", \"www.gov.uk\", \"Hostname of edge\")\n\toriginPort = flag.Int(\"originPort\", 8080, \"Origin port to listen on for requests\")\n\tskipVerifyTLS = flag.Bool(\"skipVerifyTLS\", false, \"Skip TLS cert verification if set\")\n)\n\n\/\/ These consts and vars are available to all tests.\nconst requestTimeout = time.Second * 5\n\nvar (\n\tclient *http.Transport\n\toriginServer *CDNServeMux\n)\n\n\/\/ Setup clients and servers.\nfunc init() {\n\n\tflag.Parse()\n\n\ttlsOptions := &tls.Config{}\n\tif *skipVerifyTLS {\n\t\ttlsOptions.InsecureSkipVerify = true\n\t}\n\n\tclient = &http.Transport{\n\t\tResponseHeaderTimeout: requestTimeout,\n\t\tTLSClientConfig: tlsOptions,\n\t}\n\toriginServer = StartServer(*originPort)\n\n\tlog.Println(\"Confirming that CDN is healthy\")\n\terr := confirmEdgeIsHealthy(originServer, *edgeHost)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\nMake -edgeHost option mandatorypackage main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n)\n\nvar (\n\tedgeHost = flag.String(\"edgeHost\", \"\", \"Hostname of edge\")\n\toriginPort = flag.Int(\"originPort\", 8080, \"Origin port to listen on for requests\")\n\tskipVerifyTLS = flag.Bool(\"skipVerifyTLS\", false, \"Skip TLS cert verification if set\")\n)\n\n\/\/ These consts and vars are available to all tests.\nconst requestTimeout = time.Second * 5\n\nvar (\n\tclient *http.Transport\n\toriginServer *CDNServeMux\n)\n\n\/\/ Setup clients and servers.\nfunc init() {\n\n\tflag.Parse()\n\n\tif *edgeHost == \"\" {\n\t\tfmt.Println(\"ERROR: -edgeHost must be set to the CDN edge hostname we wish to test against\\n\")\n\t\tfmt.Println(\"Usage:\")\n\t\tflag.PrintDefaults()\n\t\tos.Exit(1)\n\t}\n\n\ttlsOptions := &tls.Config{}\n\tif *skipVerifyTLS {\n\t\ttlsOptions.InsecureSkipVerify = true\n\t}\n\n\tclient = &http.Transport{\n\t\tResponseHeaderTimeout: requestTimeout,\n\t\tTLSClientConfig: tlsOptions,\n\t}\n\toriginServer = StartServer(*originPort)\n\n\tlog.Println(\"Confirming that CDN is healthy\")\n\terr := confirmEdgeIsHealthy(originServer, *edgeHost)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"package config\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/cloudcredo\/cloudfocker\/utils\"\n\t\"github.com\/cloudfoundry-incubator\/candiedyaml\"\n)\n\ntype RunConfig struct {\n\tContainerName string\n\tDaemon bool\n\tMounts map[string]string\n\tPublishedPorts map[int]int\n\tEnvVars map[string]string\n\tImageTag string\n\tCommand []string\n}\n\nfunc NewStageRunConfig(cloudfoundryAppDir string, directories *Directories) (runConfig *RunConfig) {\n\trunConfig = &RunConfig{\n\t\tContainerName: \"cloudfocker-staging\",\n\t\tMounts: map[string]string{\n\t\t\tcloudfoundryAppDir: \"\/app\",\n\t\t\tdirectories.Droplet(): \"\/tmp\/droplet\",\n\t\t\tutils.CloudfockerHome() + \"\/result\": \"\/tmp\/result\",\n\t\t\tdirectories.Buildpacks(): \"\/tmp\/cloudfockerbuildpacks\",\n\t\t\tutils.CloudfockerHome() + \"\/cache\": \"\/tmp\/cache\",\n\t\t\tutils.CloudfockerHome() + \"\/focker\": \"\/focker\",\n\t\t},\n\t\tImageTag: \"cloudfocker-base:latest\",\n\t\tCommand: []string{\"\/focker\/fock\", \"stage\", \"internal\"},\n\t}\n\treturn\n}\n\nfunc NewRuntimeRunConfig(cloudfoundryDropletDir string) (runConfig *RunConfig) {\n\trunConfig = &RunConfig{\n\t\tContainerName: \"cloudfocker-runtime\",\n\t\tDaemon: true,\n\t\tMounts: map[string]string{\n\t\t\tcloudfoundryDropletDir + \"\/app\": \"\/app\",\n\t\t},\n\t\tPublishedPorts: map[int]int{8080: 8080},\n\t\tEnvVars: map[string]string{\n\t\t\t\"HOME\": \"\/app\",\n\t\t\t\"TMPDIR\": \"\/app\/tmp\",\n\t\t\t\"PORT\": \"8080\",\n\t\t\t\"VCAP_SERVICES\": vcapServices(cloudfoundryDropletDir),\n\t\t\t\"DATABASE_URL\": databaseURL(cloudfoundryDropletDir),\n\t\t},\n\t\tImageTag: \"cloudfocker-base:latest\",\n\t\tCommand: append([]string{\"\/bin\/bash\", \"\/app\/cloudfocker-start-1c4352a23e52040ddb1857d7675fe3cc.sh\", \"\/app\"},\n\t\t\tparseStartCommand(cloudfoundryDropletDir)...),\n\t}\n\treturn\n}\n\nfunc vcapServices(cloudfoundryDropletDir string) (services string) {\n\tservicesBytes, err := ioutil.ReadFile(cloudfoundryDropletDir + \"\/app\/vcap_services.json\")\n\tif err != nil {\n\t\treturn\n\t}\n\tservices = string(servicesBytes)\n\treturn\n}\n\ntype database struct {\n\tCredentials struct {\n\t\tURI string\n\t}\n}\n\nfunc databaseURL(cloudfoundryDropletDir string) (databaseURL string) {\n\tservicesBytes, err := ioutil.ReadFile(cloudfoundryDropletDir + \"\/app\/vcap_services.json\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar services map[string][]database\n\n\tjson.Unmarshal(servicesBytes, &services)\n\n\tfor _, serviceDatabase := range services {\n\t\tif len(serviceDatabase) > 0 && serviceDatabase[0].Credentials.URI != \"\" {\n\t\t\tdatabaseURL = serviceDatabase[0].Credentials.URI\n\t\t}\n\t}\n\n\treturn\n}\n\ntype StagingInfoYml struct {\n\tDetectedBuildpack string `yaml:\"detected_buildpack\"`\n\tStartCommand string `yaml:\"start_command\"`\n}\n\ntype ProcfileYml struct {\n\tWeb string `yaml:\"web\"`\n}\n\nfunc parseStartCommand(cloudfoundryDropletDir string) (startCommand []string) {\n\tstagingInfoFile, err := os.Open(cloudfoundryDropletDir + \"\/staging_info.yml\")\n\tif err == nil {\n\t\tstagingInfo := new(StagingInfoYml)\n\t\tdecoder := candiedyaml.NewDecoder(stagingInfoFile)\n\t\terr = decoder.Decode(stagingInfo)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to decode document: %s\", err)\n\t\t}\n\t\tstartCommand = strings.Split(stagingInfo.StartCommand, \" \")\n\t\tif startCommand[0] != \"\" {\n\t\t\treturn\n\t\t}\n\t\tprocfileFile, err := os.Open(cloudfoundryDropletDir + \"\/app\/Procfile\")\n\t\tif err == nil {\n\t\t\tprocfileInfo := new(ProcfileYml)\n\t\t\tdecoder := candiedyaml.NewDecoder(procfileFile)\n\t\t\terr = decoder.Decode(procfileInfo)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to decode document: %s\", err)\n\t\t\t}\n\t\t\tstartCommand = strings.Split(procfileInfo.Web, \" \")\n\t\t\treturn\n\t\t}\n\t}\n\tlog.Fatal(\"Unable to find staging_info.yml\")\n\treturn\n}\nadded description of container volume mappingspackage config\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/cloudcredo\/cloudfocker\/utils\"\n\t\"github.com\/cloudfoundry-incubator\/candiedyaml\"\n)\n\ntype RunConfig struct {\n\tContainerName string\n\tDaemon bool\n\tMounts map[string]string\n\tPublishedPorts map[int]int\n\tEnvVars map[string]string\n\tImageTag string\n\tCommand []string\n}\n\nfunc NewStageRunConfig(cloudfoundryAppDir string, directories *Directories) (runConfig *RunConfig) {\n\trunConfig = &RunConfig{\n\t\tContainerName: \"cloudfocker-staging\",\n\t\tMounts: map[string]string{ \/\/ host dir: container dir\n\t\t\tcloudfoundryAppDir: \"\/app\",\n\t\t\tdirectories.Droplet(): \"\/tmp\/droplet\",\n\t\t\tutils.CloudfockerHome() + \"\/result\": \"\/tmp\/result\",\n\t\t\tdirectories.Buildpacks(): \"\/tmp\/cloudfockerbuildpacks\",\n\t\t\tutils.CloudfockerHome() + \"\/cache\": \"\/tmp\/cache\",\n\t\t\tutils.CloudfockerHome() + \"\/focker\": \"\/focker\",\n\t\t},\n\t\tImageTag: \"cloudfocker-base:latest\",\n\t\tCommand: []string{\"\/focker\/fock\", \"stage\", \"internal\"},\n\t}\n\treturn\n}\n\nfunc NewRuntimeRunConfig(cloudfoundryDropletDir string) (runConfig *RunConfig) {\n\trunConfig = &RunConfig{\n\t\tContainerName: \"cloudfocker-runtime\",\n\t\tDaemon: true,\n\t\tMounts: map[string]string{\n\t\t\tcloudfoundryDropletDir + \"\/app\": \"\/app\",\n\t\t},\n\t\tPublishedPorts: map[int]int{8080: 8080},\n\t\tEnvVars: map[string]string{\n\t\t\t\"HOME\": \"\/app\",\n\t\t\t\"TMPDIR\": \"\/app\/tmp\",\n\t\t\t\"PORT\": \"8080\",\n\t\t\t\"VCAP_SERVICES\": vcapServices(cloudfoundryDropletDir),\n\t\t\t\"DATABASE_URL\": databaseURL(cloudfoundryDropletDir),\n\t\t},\n\t\tImageTag: \"cloudfocker-base:latest\",\n\t\tCommand: append([]string{\"\/bin\/bash\", \"\/app\/cloudfocker-start-1c4352a23e52040ddb1857d7675fe3cc.sh\", \"\/app\"},\n\t\t\tparseStartCommand(cloudfoundryDropletDir)...),\n\t}\n\treturn\n}\n\nfunc vcapServices(cloudfoundryDropletDir string) (services string) {\n\tservicesBytes, err := ioutil.ReadFile(cloudfoundryDropletDir + \"\/app\/vcap_services.json\")\n\tif err != nil {\n\t\treturn\n\t}\n\tservices = string(servicesBytes)\n\treturn\n}\n\ntype database struct {\n\tCredentials struct {\n\t\tURI string\n\t}\n}\n\nfunc databaseURL(cloudfoundryDropletDir string) (databaseURL string) {\n\tservicesBytes, err := ioutil.ReadFile(cloudfoundryDropletDir + \"\/app\/vcap_services.json\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar services map[string][]database\n\n\tjson.Unmarshal(servicesBytes, &services)\n\n\tfor _, serviceDatabase := range services {\n\t\tif len(serviceDatabase) > 0 && serviceDatabase[0].Credentials.URI != \"\" {\n\t\t\tdatabaseURL = serviceDatabase[0].Credentials.URI\n\t\t}\n\t}\n\n\treturn\n}\n\ntype StagingInfoYml struct {\n\tDetectedBuildpack string `yaml:\"detected_buildpack\"`\n\tStartCommand string `yaml:\"start_command\"`\n}\n\ntype ProcfileYml struct {\n\tWeb string `yaml:\"web\"`\n}\n\nfunc parseStartCommand(cloudfoundryDropletDir string) (startCommand []string) {\n\tstagingInfoFile, err := os.Open(cloudfoundryDropletDir + \"\/staging_info.yml\")\n\tif err == nil {\n\t\tstagingInfo := new(StagingInfoYml)\n\t\tdecoder := candiedyaml.NewDecoder(stagingInfoFile)\n\t\terr = decoder.Decode(stagingInfo)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to decode document: %s\", err)\n\t\t}\n\t\tstartCommand = strings.Split(stagingInfo.StartCommand, \" \")\n\t\tif startCommand[0] != \"\" {\n\t\t\treturn\n\t\t}\n\t\tprocfileFile, err := os.Open(cloudfoundryDropletDir + \"\/app\/Procfile\")\n\t\tif err == nil {\n\t\t\tprocfileInfo := new(ProcfileYml)\n\t\t\tdecoder := candiedyaml.NewDecoder(procfileFile)\n\t\t\terr = decoder.Decode(procfileInfo)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to decode document: %s\", err)\n\t\t\t}\n\t\t\tstartCommand = strings.Split(procfileInfo.Web, \" \")\n\t\t\treturn\n\t\t}\n\t}\n\tlog.Fatal(\"Unable to find staging_info.yml\")\n\treturn\n}\n<|endoftext|>"} {"text":"package ehttp\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"sync\/atomic\"\n)\n\n\/\/ ResponseWriter wraps http.ResponseWriter and holds\n\/\/ a flag to know if the headers have been sent as well has\n\/\/ the http code sent.\ntype ResponseWriter struct {\n\thttp.ResponseWriter\n\tcode *int32\n}\n\n\/\/ NewResponseWriter instantiates a new ehttp ResponseWriter.\nfunc NewResponseWriter(w http.ResponseWriter) *ResponseWriter {\n\treturn &ResponseWriter{\n\t\tResponseWriter: w,\n\t\tcode: new(int32),\n\t}\n}\n\n\/\/ Code return the http code stored in the response writer.\nfunc (w ResponseWriter) Code() int {\n\treturn int(atomic.LoadInt32(w.code))\n}\n\n\/\/ WriteHeader wraps underlying WriteHeader\n\/\/ - flag that the headers have been sent\n\/\/ - store the sent code\nfunc (w *ResponseWriter) WriteHeader(code int) {\n\tatomic.CompareAndSwapInt32(w.code, 0, int32(code))\n\tw.ResponseWriter.WriteHeader(code)\n}\n\n\/\/ Write wraps the underlying Write and flag that the headers have been sent.\nfunc (w *ResponseWriter) Write(buf []byte) (int, error) {\n\tatomic.CompareAndSwapInt32(w.code, 0, int32(http.StatusOK))\n\treturn w.ResponseWriter.Write(buf)\n}\n\n\/\/ Hijack wraps the underlying Hijack if available.\nfunc (w *ResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\thijacker, ok := w.ResponseWriter.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, nil, fmt.Errorf(\"not a hijacker\")\n\t}\n\treturn hijacker.Hijack()\n}\n\n\/\/ Flush wraps the underlying Flush. Panic if not a Flusher.\nfunc (w *ResponseWriter) Flush() {\n\tw.ResponseWriter.(http.Flusher).Flush()\n}\nMinor bugfix: don't send headers if already sentpackage ehttp\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"sync\/atomic\"\n)\n\n\/\/ ResponseWriter wraps http.ResponseWriter and holds\n\/\/ a flag to know if the headers have been sent as well has\n\/\/ the http code sent.\ntype ResponseWriter struct {\n\thttp.ResponseWriter\n\tcode *int32\n}\n\n\/\/ NewResponseWriter instantiates a new ehttp ResponseWriter.\nfunc NewResponseWriter(w http.ResponseWriter) *ResponseWriter {\n\treturn &ResponseWriter{\n\t\tResponseWriter: w,\n\t\tcode: new(int32),\n\t}\n}\n\n\/\/ Code return the http code stored in the response writer.\nfunc (w ResponseWriter) Code() int {\n\treturn int(atomic.LoadInt32(w.code))\n}\n\n\/\/ WriteHeader wraps underlying WriteHeader.\n\/\/ - sends the header only if not already sent.\n\/\/ - flag that the headers have been sent\n\/\/ - store the sent code\nfunc (w *ResponseWriter) WriteHeader(code int) {\n\tif atomic.CompareAndSwapInt32(w.code, 0, int32(code)) {\n\t\tw.ResponseWriter.WriteHeader(code)\n\t}\n}\n\n\/\/ Write wraps the underlying Write and flag that the headers have been sent.\n\/\/ Following the net\/http behavior, if no http status code has been set, assume http.StatusOK.\nfunc (w *ResponseWriter) Write(buf []byte) (int, error) {\n\tatomic.CompareAndSwapInt32(w.code, 0, int32(http.StatusOK))\n\treturn w.ResponseWriter.Write(buf)\n}\n\n\/\/ Hijack wraps the underlying Hijack if available.\nfunc (w *ResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\thijacker, ok := w.ResponseWriter.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, nil, fmt.Errorf(\"not a hijacker\")\n\t}\n\treturn hijacker.Hijack()\n}\n\n\/\/ Flush wraps the underlying Flush. Panic if not a Flusher.\nfunc (w *ResponseWriter) Flush() {\n\tw.ResponseWriter.(http.Flusher).Flush()\n}\n<|endoftext|>"} {"text":"Fix package comment<|endoftext|>"} {"text":"package gorethink\n\nimport (\n\t\"bufio\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\n\tp \"github.com\/dancannon\/gorethink\/ql2\"\n)\n\n\/\/ Write 'data' to conn\nfunc (c *Connection) writeData(data []byte) error {\n\t_, err := c.conn.Write(data[:])\n\tif err != nil {\n\t\treturn RqlConnectionError{err.Error()}\n\t}\n\n\treturn nil\n}\n\nfunc (c *Connection) writeHandshakeReq() error {\n\tpos := 0\n\tdataLen := 4 + 4 + len(c.opts.AuthKey) + 4\n\n\tdata := c.buf.takeSmallBuffer(dataLen)\n\tif data == nil {\n\t\treturn RqlDriverError{ErrBusyBuffer.Error()}\n\t}\n\n\t\/\/ Send the protocol version to the server as a 4-byte little-endian-encoded integer\n\tbinary.LittleEndian.PutUint32(data[pos:], uint32(p.VersionDummy_V0_3))\n\tpos += 4\n\n\t\/\/ Send the length of the auth key to the server as a 4-byte little-endian-encoded integer\n\tbinary.LittleEndian.PutUint32(data[pos:], uint32(len(c.opts.AuthKey)))\n\tpos += 4\n\n\t\/\/ Send the auth key as an ASCII string\n\tif len(c.opts.AuthKey) > 0 {\n\t\tpos += copy(data[pos:], c.opts.AuthKey)\n\t}\n\n\t\/\/ Send the protocol type as a 4-byte little-endian-encoded integer\n\tbinary.LittleEndian.PutUint32(data[pos:], uint32(p.VersionDummy_JSON))\n\tpos += 4\n\n\treturn c.writeData(data)\n}\n\nfunc (c *Connection) readHandshakeSuccess() error {\n\treader := bufio.NewReader(c.conn)\n\tline, err := reader.ReadBytes('\\x00')\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn fmt.Errorf(\"Unexpected EOF: %s\", string(line))\n\t\t}\n\t\treturn RqlConnectionError{err.Error()}\n\t}\n\t\/\/ convert to string and remove trailing NUL byte\n\tresponse := string(line[:len(line)-1])\n\tif response != \"SUCCESS\" {\n\t\t\/\/ we failed authorization or something else terrible happened\n\t\treturn RqlDriverError{fmt.Sprintf(\"Server dropped connection with message: \\\"%s\\\"\", response)}\n\t}\n\n\treturn nil\n}\n\nfunc (c *Connection) writeQuery(token int64, q []byte) error {\n\tpos := 0\n\tdataLen := 8 + 4 + len(q)\n\n\tdata := c.buf.takeSmallBuffer(dataLen)\n\tif data == nil {\n\t\treturn RqlDriverError{ErrBusyBuffer.Error()}\n\t}\n\n\t\/\/ Send the protocol version to the server as a 4-byte little-endian-encoded integer\n\tbinary.LittleEndian.PutUint64(data[pos:], uint64(token))\n\tpos += 8\n\n\t\/\/ Send the length of the auth key to the server as a 4-byte little-endian-encoded integer\n\tbinary.LittleEndian.PutUint32(data[pos:], uint32(len(q)))\n\tpos += 4\n\n\t\/\/ Send the auth key as an ASCII string\n\tpos += copy(data[pos:], q)\n\n\treturn c.writeData(data)\n}\nFixed writeQuery being too small when sending large queriespackage gorethink\n\nimport (\n\t\"bufio\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\n\tp \"github.com\/dancannon\/gorethink\/ql2\"\n)\n\n\/\/ Write 'data' to conn\nfunc (c *Connection) writeData(data []byte) error {\n\t_, err := c.conn.Write(data[:])\n\tif err != nil {\n\t\treturn RqlConnectionError{err.Error()}\n\t}\n\n\treturn nil\n}\n\nfunc (c *Connection) writeHandshakeReq() error {\n\tpos := 0\n\tdataLen := 4 + 4 + len(c.opts.AuthKey) + 4\n\n\tdata := c.buf.takeSmallBuffer(dataLen)\n\tif data == nil {\n\t\treturn RqlDriverError{ErrBusyBuffer.Error()}\n\t}\n\n\t\/\/ Send the protocol version to the server as a 4-byte little-endian-encoded integer\n\tbinary.LittleEndian.PutUint32(data[pos:], uint32(p.VersionDummy_V0_3))\n\tpos += 4\n\n\t\/\/ Send the length of the auth key to the server as a 4-byte little-endian-encoded integer\n\tbinary.LittleEndian.PutUint32(data[pos:], uint32(len(c.opts.AuthKey)))\n\tpos += 4\n\n\t\/\/ Send the auth key as an ASCII string\n\tif len(c.opts.AuthKey) > 0 {\n\t\tpos += copy(data[pos:], c.opts.AuthKey)\n\t}\n\n\t\/\/ Send the protocol type as a 4-byte little-endian-encoded integer\n\tbinary.LittleEndian.PutUint32(data[pos:], uint32(p.VersionDummy_JSON))\n\tpos += 4\n\n\treturn c.writeData(data)\n}\n\nfunc (c *Connection) readHandshakeSuccess() error {\n\treader := bufio.NewReader(c.conn)\n\tline, err := reader.ReadBytes('\\x00')\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn fmt.Errorf(\"Unexpected EOF: %s\", string(line))\n\t\t}\n\t\treturn RqlConnectionError{err.Error()}\n\t}\n\t\/\/ convert to string and remove trailing NUL byte\n\tresponse := string(line[:len(line)-1])\n\tif response != \"SUCCESS\" {\n\t\t\/\/ we failed authorization or something else terrible happened\n\t\treturn RqlDriverError{fmt.Sprintf(\"Server dropped connection with message: \\\"%s\\\"\", response)}\n\t}\n\n\treturn nil\n}\n\nfunc (c *Connection)(token int64, q []byte) error {\n\tpos := 0\n\tdataLen := 8 + 4 + len(q)\n\n\tdata := c.buf.takeBuffer(dataLen)\n\tif data == nil {\n\t\treturn RqlDriverError{ErrBusyBuffer.Error()}\n\t}\n\n\t\/\/ Send the protocol version to the server as a 4-byte little-endian-encoded integer\n\tbinary.LittleEndian.PutUint64(data[pos:], uint64(token))\n\tpos += 8\n\n\t\/\/ Send the length of the auth key to the server as a 4-byte little-endian-encoded integer\n\tbinary.LittleEndian.PutUint32(data[pos:], uint32(len(q)))\n\tpos += 4\n\n\t\/\/ Send the auth key as an ASCII string\n\tpos += copy(data[pos:], q)\n\n\treturn c.writeData(data)\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage testing\n\nimport (\n\t\"math\/rand\"\n\t\"testing\"\n\n\tadmissionv1beta1 \"k8s.io\/api\/admission\/v1beta1\"\n\tadmissionregv1beta1 \"k8s.io\/api\/admissionregistration\/v1beta1\"\n\tappsv1 \"k8s.io\/api\/apps\/v1\"\n\tappsv1beta1 \"k8s.io\/api\/apps\/v1beta1\"\n\tappsv1beta2 \"k8s.io\/api\/apps\/v1beta2\"\n\tauthenticationv1 \"k8s.io\/api\/authentication\/v1\"\n\tauthenticationv1beta1 \"k8s.io\/api\/authentication\/v1beta1\"\n\tauthorizationv1 \"k8s.io\/api\/authorization\/v1\"\n\tauthorizationv1beta1 \"k8s.io\/api\/authorization\/v1beta1\"\n\tautoscalingv1 \"k8s.io\/api\/autoscaling\/v1\"\n\tautoscalingv2beta1 \"k8s.io\/api\/autoscaling\/v2beta1\"\n\tbatchv1 \"k8s.io\/api\/batch\/v1\"\n\tbatchv1beta1 \"k8s.io\/api\/batch\/v1beta1\"\n\tbatchv2alpha1 \"k8s.io\/api\/batch\/v2alpha1\"\n\tcertificatesv1beta1 \"k8s.io\/api\/certificates\/v1beta1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\teventsv1beta1 \"k8s.io\/api\/events\/v1beta1\"\n\textensionsv1beta1 \"k8s.io\/api\/extensions\/v1beta1\"\n\timagepolicyv1alpha1 \"k8s.io\/api\/imagepolicy\/v1alpha1\"\n\tnetworkingv1 \"k8s.io\/api\/networking\/v1\"\n\tpolicyv1beta1 \"k8s.io\/api\/policy\/v1beta1\"\n\trbacv1 \"k8s.io\/api\/rbac\/v1\"\n\trbacv1alpha1 \"k8s.io\/api\/rbac\/v1alpha1\"\n\trbacv1beta1 \"k8s.io\/api\/rbac\/v1beta1\"\n\tschedulingv1 \"k8s.io\/api\/scheduling\/v1\"\n\tschedulingv1alpha1 \"k8s.io\/api\/scheduling\/v1alpha1\"\n\tschedulingv1beta1 \"k8s.io\/api\/scheduling\/v1beta1\"\n\tsettingsv1alpha1 \"k8s.io\/api\/settings\/v1alpha1\"\n\tstoragev1 \"k8s.io\/api\/storage\/v1\"\n\tstoragev1alpha1 \"k8s.io\/api\/storage\/v1alpha1\"\n\tstoragev1beta1 \"k8s.io\/api\/storage\/v1beta1\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/apitesting\/fuzzer\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/apitesting\/roundtrip\"\n\tgenericfuzzer \"k8s.io\/apimachinery\/pkg\/apis\/meta\/fuzzer\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/serializer\"\n)\n\nvar groups = []runtime.SchemeBuilder{\n\tadmissionv1beta1.SchemeBuilder,\n\tadmissionregv1beta1.SchemeBuilder,\n\tappsv1beta1.SchemeBuilder,\n\tappsv1beta2.SchemeBuilder,\n\tappsv1.SchemeBuilder,\n\tauthenticationv1beta1.SchemeBuilder,\n\tauthenticationv1.SchemeBuilder,\n\tauthorizationv1beta1.SchemeBuilder,\n\tauthorizationv1.SchemeBuilder,\n\tautoscalingv1.SchemeBuilder,\n\tautoscalingv2beta1.SchemeBuilder,\n\tbatchv2alpha1.SchemeBuilder,\n\tbatchv1beta1.SchemeBuilder,\n\tbatchv1.SchemeBuilder,\n\tcertificatesv1beta1.SchemeBuilder,\n\tcorev1.SchemeBuilder,\n\teventsv1beta1.SchemeBuilder,\n\textensionsv1beta1.SchemeBuilder,\n\timagepolicyv1alpha1.SchemeBuilder,\n\tnetworkingv1.SchemeBuilder,\n\tpolicyv1beta1.SchemeBuilder,\n\trbacv1alpha1.SchemeBuilder,\n\trbacv1beta1.SchemeBuilder,\n\trbacv1.SchemeBuilder,\n\tschedulingv1alpha1.SchemeBuilder,\n\tschedulingv1beta1.SchemeBuilder,\n\tschedulingv1.SchemeBuilder,\n\tsettingsv1alpha1.SchemeBuilder,\n\tstoragev1alpha1.SchemeBuilder,\n\tstoragev1beta1.SchemeBuilder,\n\tstoragev1.SchemeBuilder,\n}\n\nfunc TestRoundTripExternalTypes(t *testing.T) {\n\tscheme := runtime.NewScheme()\n\tcodecs := serializer.NewCodecFactory(scheme)\n\tfor _, builder := range groups {\n\t\trequire.NoError(t, builder.AddToScheme(scheme))\n\t}\n\tseed := rand.Int63()\n\t\/\/ I'm only using the generic fuzzer funcs, but at some point in time we might need to\n\t\/\/ switch to specialized. For now we're happy with the current serialization test.\n\tfuzzer := fuzzer.FuzzerFor(genericfuzzer.Funcs, rand.NewSource(seed), codecs)\n\n\troundtrip.RoundTripExternalTypes(t, scheme, codecs, fuzzer, nil)\n}\n\nfunc TestCompatibility(t *testing.T) {\n\tscheme := runtime.NewScheme()\n\tfor _, builder := range groups {\n\t\trequire.NoError(t, builder.AddToScheme(scheme))\n\t}\n\troundtrip.NewCompatibilityTestOptions(scheme).Complete(t).Run(t)\n}\nAdd missing API groups to compatibility test\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage testing\n\nimport (\n\t\"math\/rand\"\n\t\"testing\"\n\n\tadmissionv1beta1 \"k8s.io\/api\/admission\/v1beta1\"\n\tadmissionregv1beta1 \"k8s.io\/api\/admissionregistration\/v1beta1\"\n\tappsv1 \"k8s.io\/api\/apps\/v1\"\n\tappsv1beta1 \"k8s.io\/api\/apps\/v1beta1\"\n\tappsv1beta2 \"k8s.io\/api\/apps\/v1beta2\"\n\tauthenticationv1 \"k8s.io\/api\/authentication\/v1\"\n\tauthenticationv1beta1 \"k8s.io\/api\/authentication\/v1beta1\"\n\tauthorizationv1 \"k8s.io\/api\/authorization\/v1\"\n\tauthorizationv1beta1 \"k8s.io\/api\/authorization\/v1beta1\"\n\tautoscalingv1 \"k8s.io\/api\/autoscaling\/v1\"\n\tautoscalingv2beta1 \"k8s.io\/api\/autoscaling\/v2beta1\"\n\tautoscalingv2beta2 \"k8s.io\/api\/autoscaling\/v2beta2\"\n\tbatchv1 \"k8s.io\/api\/batch\/v1\"\n\tbatchv1beta1 \"k8s.io\/api\/batch\/v1beta1\"\n\tbatchv2alpha1 \"k8s.io\/api\/batch\/v2alpha1\"\n\tcertificatesv1beta1 \"k8s.io\/api\/certificates\/v1beta1\"\n\tcoordinationv1 \"k8s.io\/api\/coordination\/v1\"\n\tcoordinationv1beta1 \"k8s.io\/api\/coordination\/v1beta1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\teventsv1beta1 \"k8s.io\/api\/events\/v1beta1\"\n\textensionsv1beta1 \"k8s.io\/api\/extensions\/v1beta1\"\n\timagepolicyv1alpha1 \"k8s.io\/api\/imagepolicy\/v1alpha1\"\n\tnetworkingv1 \"k8s.io\/api\/networking\/v1\"\n\tnetworkingv1beta1 \"k8s.io\/api\/networking\/v1beta1\"\n\tnodev1alpha1 \"k8s.io\/api\/node\/v1alpha1\"\n\tnodev1beta1 \"k8s.io\/api\/node\/v1beta1\"\n\tpolicyv1beta1 \"k8s.io\/api\/policy\/v1beta1\"\n\trbacv1 \"k8s.io\/api\/rbac\/v1\"\n\trbacv1alpha1 \"k8s.io\/api\/rbac\/v1alpha1\"\n\trbacv1beta1 \"k8s.io\/api\/rbac\/v1beta1\"\n\tschedulingv1 \"k8s.io\/api\/scheduling\/v1\"\n\tschedulingv1alpha1 \"k8s.io\/api\/scheduling\/v1alpha1\"\n\tschedulingv1beta1 \"k8s.io\/api\/scheduling\/v1beta1\"\n\tsettingsv1alpha1 \"k8s.io\/api\/settings\/v1alpha1\"\n\tstoragev1 \"k8s.io\/api\/storage\/v1\"\n\tstoragev1alpha1 \"k8s.io\/api\/storage\/v1alpha1\"\n\tstoragev1beta1 \"k8s.io\/api\/storage\/v1beta1\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/apitesting\/fuzzer\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/apitesting\/roundtrip\"\n\tgenericfuzzer \"k8s.io\/apimachinery\/pkg\/apis\/meta\/fuzzer\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/serializer\"\n)\n\nvar groups = []runtime.SchemeBuilder{\n\tadmissionv1beta1.SchemeBuilder,\n\tadmissionregv1beta1.SchemeBuilder,\n\tappsv1beta1.SchemeBuilder,\n\tappsv1beta2.SchemeBuilder,\n\tappsv1.SchemeBuilder,\n\tauthenticationv1beta1.SchemeBuilder,\n\tauthenticationv1.SchemeBuilder,\n\tauthorizationv1beta1.SchemeBuilder,\n\tauthorizationv1.SchemeBuilder,\n\tautoscalingv1.SchemeBuilder,\n\tautoscalingv2beta1.SchemeBuilder,\n\tautoscalingv2beta2.SchemeBuilder,\n\tbatchv2alpha1.SchemeBuilder,\n\tbatchv1beta1.SchemeBuilder,\n\tbatchv1.SchemeBuilder,\n\tcertificatesv1beta1.SchemeBuilder,\n\tcoordinationv1.SchemeBuilder,\n\tcoordinationv1beta1.SchemeBuilder,\n\tcorev1.SchemeBuilder,\n\teventsv1beta1.SchemeBuilder,\n\textensionsv1beta1.SchemeBuilder,\n\timagepolicyv1alpha1.SchemeBuilder,\n\tnetworkingv1.SchemeBuilder,\n\tnetworkingv1beta1.SchemeBuilder,\n\tnodev1alpha1.SchemeBuilder,\n\tnodev1beta1.SchemeBuilder,\n\tpolicyv1beta1.SchemeBuilder,\n\trbacv1alpha1.SchemeBuilder,\n\trbacv1beta1.SchemeBuilder,\n\trbacv1.SchemeBuilder,\n\tschedulingv1alpha1.SchemeBuilder,\n\tschedulingv1beta1.SchemeBuilder,\n\tschedulingv1.SchemeBuilder,\n\tsettingsv1alpha1.SchemeBuilder,\n\tstoragev1alpha1.SchemeBuilder,\n\tstoragev1beta1.SchemeBuilder,\n\tstoragev1.SchemeBuilder,\n}\n\nfunc TestRoundTripExternalTypes(t *testing.T) {\n\tscheme := runtime.NewScheme()\n\tcodecs := serializer.NewCodecFactory(scheme)\n\tfor _, builder := range groups {\n\t\trequire.NoError(t, builder.AddToScheme(scheme))\n\t}\n\tseed := rand.Int63()\n\t\/\/ I'm only using the generic fuzzer funcs, but at some point in time we might need to\n\t\/\/ switch to specialized. For now we're happy with the current serialization test.\n\tfuzzer := fuzzer.FuzzerFor(genericfuzzer.Funcs, rand.NewSource(seed), codecs)\n\n\troundtrip.RoundTripExternalTypes(t, scheme, codecs, fuzzer, nil)\n}\n\nfunc TestCompatibility(t *testing.T) {\n\tscheme := runtime.NewScheme()\n\tfor _, builder := range groups {\n\t\trequire.NoError(t, builder.AddToScheme(scheme))\n\t}\n\troundtrip.NewCompatibilityTestOptions(scheme).Complete(t).Run(t)\n}\n<|endoftext|>"} {"text":"package content\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/bosssauce\/ponzu\/management\/editor\"\n\t\"github.com\/bosssauce\/ponzu\/system\/api\"\n)\n\n\/\/ Referenceable enures there is a way to reference the implenting type from\n\/\/ within another type's editor and from type-scoped API calls\ntype Referenceable interface {\n\tReferenced() []byte\n}\n\n\/\/ Select returns the []byte of a \"+\n \"<\/form><\/html>\", jsontype.Object.Counter)\n\tjsontype.Object.Counter++\n}\n\nfunc bruneHandler(w http.ResponseWriter, r *http.Request) {\n fmt.Fprintf(w, \"%v<\/em>
\"+\n \"\"+\n \"<\/form><\/html>\", jsontype.Object.Counter)\n\tjsontype.Object.Counter++\n\tdata, err := json.Marshal(jsontype)\n\terr = ioutil.WriteFile(\"config.json\", data, 755)\n\tif err != nil {\n\t\tfmt.Println(\"Can't write file.\")\n\t\treturn\n\t}\n}\n\ntype jsonobject struct {\n Object ObjectType\n}\n\ntype ObjectType struct {\n\tCounter int64\n}\n\nfunc main() {\n\tfile, e := ioutil.ReadFile(\"config.json\")\n if e != nil {\n fmt.Printf(\"File error: %v\\n\", e)\n\t\treturn\n }\n json.Unmarshal(file, &jsontype)\n fmt.Printf(\"Brunes counted: %v\", jsontype)\n fmt.Printf(\"%s\\n\", string(file))\n http.HandleFunc(\"\/\", handler)\n\thttp.HandleFunc(\"\/Blame Brune\", bruneHandler)\n http.ListenAndServe(\":80\", nil)\n\tfmt.Print(jsontype)\n}\nRemove printing.package main\n\nimport (\n \"fmt\"\n\t\"encoding\/json\"\n \"net\/http\"\n\t\"io\/ioutil\"\n)\n\nvar file []byte\nvar jsontype jsonobject\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n fmt.Fprintf(w, \"%v<\/em>\"+\n \"\"+\n \"<\/form><\/html>\", jsontype.Object.Counter)\n\tjsontype.Object.Counter++\n}\n\nfunc bruneHandler(w http.ResponseWriter, r *http.Request) {\n fmt.Fprintf(w, \"%v<\/em>\"+\n \"\"+\n \"<\/form><\/html>\", jsontype.Object.Counter)\n\tjsontype.Object.Counter++\n\tdata, err := json.Marshal(jsontype)\n\terr = ioutil.WriteFile(\"config.json\", data, 755)\n\tif err != nil {\n\t\tfmt.Println(\"Can't write file.\")\n\t\treturn\n\t}\n}\n\ntype jsonobject struct {\n Object ObjectType\n}\n\ntype ObjectType struct {\n\tCounter int64\n}\n\nfunc main() {\n\tfile, e := ioutil.ReadFile(\"config.json\")\n if e != nil {\n fmt.Printf(\"File error: %v\\n\", e)\n\t\treturn\n }\n json.Unmarshal(file, &jsontype)\n fmt.Printf(\"Brunes counted: %v\", jsontype)\n http.HandleFunc(\"\/\", handler)\n\thttp.HandleFunc(\"\/Blame Brune\", bruneHandler)\n http.ListenAndServe(\":80\", nil)\n\tfmt.Print(jsontype)\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n\n\t\"github.com\/xiegeo\/fensan\/bitset\"\n\t\"github.com\/xiegeo\/fensan\/hashtree\"\n\t\"github.com\/xiegeo\/fensan\/pb\"\n\t\"github.com\/xiegeo\/fensan\/pconn\"\n\t\"github.com\/xiegeo\/fensan\/store\"\n)\n\n\/\/make sure go get gets every sub package\nvar _ = bitset.CHECK_INTEX\nvar _ = hashtree.HashSize\nvar _ = &pb.StaticId{}\nvar _ = pconn.SendBytes\nvar _ = store.FileNone\n\nfunc main() {\n\tbuildProtoBuf()\n\ttestCode(\"bitset\")\n\ttestCode(\"hashtree\")\n\ttestCode(\"pb\")\n\ttestCode(\"pconn\")\n\ttestCode(\"store\")\n\tfmt.Println(\"\\n\\ndone all builds and tests\")\n}\n\nfunc buildProtoBuf() {\n\tdir = \"pb\"\n\tdefer func() { dir = \"\" }()\n\terr := doHiddenCmd(exec.Command(\"go\", \"test\", \"-v\"))\n\tif err != nil {\n\t\tfmt.Println(\"rebuilding .pb.go files\")\n\t\terr := doCmd(exec.Command(\"protoc\", \"--gogo_out=.\", \"*.proto\"))\n\t\tif err == nil {\n\t\t\tfmt.Println(\"rebuilded .pb.go files \")\n\t\t}\n\t}\n}\n\nfunc testCode(packageName string) {\n\tdir = packageName\n\tdefer func() { dir = \"\" }()\n\tnoErr(doCmd(exec.Command(\"go\", \"test\", \"-v\")))\n}\n\nfunc noErr(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nvar dir = \"\"\n\nfunc doCmd(cmd *exec.Cmd) error {\n\tcmd.Dir = dir\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tfmt.Println(cmd.Path)\n\t\tfmt.Println(cmd.Args)\n\t\tfmt.Printf(\"%s\\n\", out)\n\t\tfmt.Printf(\"error:%v\\n\", err)\n\t}\n\treturn err\n}\n\nfunc doHiddenCmd(cmd *exec.Cmd) error {\n\tcmd.Dir = dir\n\t_, err := cmd.CombinedOutput()\n\treturn err\n}\nbetter error message, and make it stop therepackage main\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n\n\t\"github.com\/xiegeo\/fensan\/bitset\"\n\t\"github.com\/xiegeo\/fensan\/hashtree\"\n\t\"github.com\/xiegeo\/fensan\/pb\"\n\t\"github.com\/xiegeo\/fensan\/pconn\"\n\t\"github.com\/xiegeo\/fensan\/store\"\n)\n\n\/\/make sure go get gets every sub package\nvar _ = bitset.CHECK_INTEX\nvar _ = hashtree.HashSize\nvar _ = &pb.StaticId{}\nvar _ = pconn.SendBytes\nvar _ = store.FileNone\n\nfunc main() {\n\tbuildProtoBuf()\n\ttestCode(\"bitset\")\n\ttestCode(\"hashtree\")\n\ttestCode(\"pb\")\n\ttestCode(\"pconn\")\n\ttestCode(\"store\")\n\tfmt.Println(\"\\n\\ndone all builds and tests\")\n}\n\nfunc buildProtoBuf() {\n\tdir = \"pb\"\n\tdefer func() { dir = \"\" }()\n\terr := doHiddenCmd(exec.Command(\"go\", \"test\", \"-v\"))\n\tif err != nil {\n\t\tfmt.Println(\"rebuilding .pb.go files\")\n\t\terr := doCmd(exec.Command(\"protoc\", \"--gogo_out=.\", \"*.proto\"))\n\t\tif err != nil {\n\t\t\tpanic(\"can't rebuild, see code.google.com\/p\/gogoprotobuf\/\")\n\t\t}\n\t\tif err == nil {\n\t\t\tfmt.Println(\"rebuilded .pb.go files \")\n\t\t}\n\t}\n}\n\nfunc testCode(packageName string) {\n\tdir = packageName\n\tdefer func() { dir = \"\" }()\n\tnoErr(doCmd(exec.Command(\"go\", \"test\", \"-v\")))\n}\n\nfunc noErr(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nvar dir = \"\"\n\nfunc doCmd(cmd *exec.Cmd) error {\n\tcmd.Dir = dir\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tfmt.Println(cmd.Path)\n\t\tfmt.Println(cmd.Args)\n\t\tfmt.Printf(\"%s\\n\", out)\n\t\tfmt.Printf(\"error:%v\\n\", err)\n\t}\n\treturn err\n}\n\nfunc doHiddenCmd(cmd *exec.Cmd) error {\n\tcmd.Dir = dir\n\t_, err := cmd.CombinedOutput()\n\treturn err\n}\n<|endoftext|>"} {"text":"package static\n\nimport (\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n)\n\nfunc Build(o Options, h http.Handler, paths []string, eh EventHandler) error {\n\tvar wg sync.WaitGroup\n\n\tpathsChan := make(chan string)\n\n\tfor i := 0; i < o.Concurrency; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tbuildPaths(o, h, pathsChan, eh)\n\t\t}()\n\t}\n\n\tfor _, path := range paths {\n\t\tpathsChan <- path\n\t}\n\n\tclose(pathsChan)\n\n\twg.Wait()\n\n\treturn nil\n}\n\nfunc buildPaths(o Options, h http.Handler, paths <-chan string, eh EventHandler) {\n\tfor path := range paths {\n\t\terr := BuildSingle(o, h, path)\n\t\tif eh != nil {\n\t\t\teh(Event{Action: \"build\", Path: path, Error: err})\n\t\t}\n\t}\n}\n\nfunc BuildSingle(o Options, h http.Handler, path string) error {\n\t_, err := os.Stat(o.OutputDir)\n\tif os.IsNotExist(err) {\n\t\terr = os.MkdirAll(o.OutputDir, 0755)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfp := o.OutputDir + path\n\tif strings.HasSuffix(fp, \"\/\") {\n\t\tfp += o.DirFilename\n\t}\n\n\tf, err := os.Create(fp)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tr, err := http.NewRequest(\"GET\", path, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\trw := newResponseWriter(f)\n\th.ServeHTTP(&rw, r)\n\n\treturn nil\n}\nRemoved the `error` return value from Build.package static\n\nimport (\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n)\n\nfunc Build(o Options, h http.Handler, paths []string, eh EventHandler) {\n\tvar wg sync.WaitGroup\n\n\tpathsChan := make(chan string)\n\n\tfor i := 0; i < o.Concurrency; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tbuildPaths(o, h, pathsChan, eh)\n\t\t}()\n\t}\n\n\tfor _, path := range paths {\n\t\tpathsChan <- path\n\t}\n\n\tclose(pathsChan)\n\n\twg.Wait()\n}\n\nfunc buildPaths(o Options, h http.Handler, paths <-chan string, eh EventHandler) {\n\tfor path := range paths {\n\t\terr := BuildSingle(o, h, path)\n\t\tif eh != nil {\n\t\t\teh(Event{Action: \"build\", Path: path, Error: err})\n\t\t}\n\t}\n}\n\nfunc BuildSingle(o Options, h http.Handler, path string) error {\n\t_, err := os.Stat(o.OutputDir)\n\tif os.IsNotExist(err) {\n\t\terr = os.MkdirAll(o.OutputDir, 0755)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfp := o.OutputDir + path\n\tif strings.HasSuffix(fp, \"\/\") {\n\t\tfp += o.DirFilename\n\t}\n\n\tf, err := os.Create(fp)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tr, err := http.NewRequest(\"GET\", path, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\trw := newResponseWriter(f)\n\th.ServeHTTP(&rw, r)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/\/+build ignore\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\nvar packages = []string{\"cli\", \"altsrc\"}\n\nfunc main() {\n\tapp := cli.NewApp()\n\n\tapp.Name = \"builder\"\n\tapp.Usage = \"Generates a new urfave\/cli build!\"\n\n\tapp.Commands = cli.Commands{\n\t\tcli.Command{\n\t\t\tName: \"vet\",\n\t\t\tAction: VetActionFunc,\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"test\",\n\t\t\tAction: TestActionFunc,\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"gfmrun\",\n\t\t\tAction: GfmrunActionFunc,\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"toc\",\n\t\t\tAction: TocActionFunc,\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"generate\",\n\t\t\tAction: GenActionFunc,\n\t\t},\n\t}\n\n\terr := app.Run(os.Args)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc runCmd(args ...string) error {\n\tvar cmd *exec.Cmd\n\tif len(args) > 1 {\n\t\tcmd = exec.Command(args[0], args[1:]...)\n\t} else {\n\t\tcmd = exec.Command(args[0])\n\t}\n\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\treturn cmd.Run()\n}\n\nfunc VetActionFunc(_ *cli.Context) error {\n\treturn runCmd(\"go\", \"vet\")\n}\n\nfunc TestActionFunc(c *cli.Context) error {\n\tfor _, pkg := range packages {\n\t\tvar packageName string\n\n\t\tif pkg == \"cli\" {\n\t\t\tpackageName = \"github.com\/urfave\/cli\"\n\t\t} else {\n\t\t\tpackageName = fmt.Sprintf(\"github.com\/urfave\/cli\/%s\", pkg)\n\t\t}\n\n\t\tcoverProfile := fmt.Sprintf(\"--coverprofile=%s.coverprofile\", pkg)\n\n\t\terr := runCmd(\"go\", \"test\", \"-v\", coverProfile, packageName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn testCleanup()\n}\n\nfunc testCleanup() error {\n\tvar out bytes.Buffer\n\n\tfor _, pkg := range packages {\n\t\tfile, err := os.Open(fmt.Sprintf(\"%s.coverprofile\", pkg))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tb, err := ioutil.ReadAll(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tout.Write(b)\n\t\terr = file.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = os.Remove(fmt.Sprintf(\"%s.coverprofile\", pkg))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\toutFile, err := os.Create(\"coverage.txt\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = out.WriteTo(outFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = outFile.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc GfmrunActionFunc(_ *cli.Context) error {\n\tfile, err := os.Open(\"README.md\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar counter int\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tif strings.Contains(scanner.Text(), \"package main\") {\n\t\t\tcounter++\n\t\t}\n\t}\n\n\terr = scanner.Err()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn runCmd(\"gfmrun\", \"-c\", fmt.Sprint(counter), \"-s\", \"README.md\")\n}\n\nfunc TocActionFunc(_ *cli.Context) error {\n\terr := runCmd(\"node_modules\/.bin\/markdown-toc\", \"-i\", \"README.md\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = runCmd(\"git\", \"diff\", \"--exit-code\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc GenActionFunc(_ *cli.Context) error {\n\terr := runCmd(\"go\", \"generate\", \"flag-gen\/main.go\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = runCmd(\"go\", \"generate\", \"cli.go\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = runCmd(\"git\", \"diff\", \"--exit-code\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\nRevert \"check length\"\/\/+build ignore\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\nvar packages = []string{\"cli\", \"altsrc\"}\n\nfunc main() {\n\tapp := cli.NewApp()\n\n\tapp.Name = \"builder\"\n\tapp.Usage = \"Generates a new urfave\/cli build!\"\n\n\tapp.Commands = cli.Commands{\n\t\tcli.Command{\n\t\t\tName: \"vet\",\n\t\t\tAction: VetActionFunc,\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"test\",\n\t\t\tAction: TestActionFunc,\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"gfmrun\",\n\t\t\tAction: GfmrunActionFunc,\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"toc\",\n\t\t\tAction: TocActionFunc,\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"generate\",\n\t\t\tAction: GenActionFunc,\n\t\t},\n\t}\n\n\terr := app.Run(os.Args)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc runCmd(args ...string) error {\n\tcmd := exec.Command(args[0], args[1:]...)\n\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\treturn cmd.Run()\n}\n\nfunc VetActionFunc(_ *cli.Context) error {\n\treturn runCmd(\"go\", \"vet\")\n}\n\nfunc TestActionFunc(c *cli.Context) error {\n\tfor _, pkg := range packages {\n\t\tvar packageName string\n\n\t\tif pkg == \"cli\" {\n\t\t\tpackageName = \"github.com\/urfave\/cli\"\n\t\t} else {\n\t\t\tpackageName = fmt.Sprintf(\"github.com\/urfave\/cli\/%s\", pkg)\n\t\t}\n\n\t\tcoverProfile := fmt.Sprintf(\"--coverprofile=%s.coverprofile\", pkg)\n\n\t\terr := runCmd(\"go\", \"test\", \"-v\", coverProfile, packageName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn testCleanup()\n}\n\nfunc testCleanup() error {\n\tvar out bytes.Buffer\n\n\tfor _, pkg := range packages {\n\t\tfile, err := os.Open(fmt.Sprintf(\"%s.coverprofile\", pkg))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tb, err := ioutil.ReadAll(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tout.Write(b)\n\t\terr = file.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = os.Remove(fmt.Sprintf(\"%s.coverprofile\", pkg))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\toutFile, err := os.Create(\"coverage.txt\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = out.WriteTo(outFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = outFile.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc GfmrunActionFunc(_ *cli.Context) error {\n\tfile, err := os.Open(\"README.md\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar counter int\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tif strings.Contains(scanner.Text(), \"package main\") {\n\t\t\tcounter++\n\t\t}\n\t}\n\n\terr = scanner.Err()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn runCmd(\"gfmrun\", \"-c\", fmt.Sprint(counter), \"-s\", \"README.md\")\n}\n\nfunc TocActionFunc(_ *cli.Context) error {\n\terr := runCmd(\"node_modules\/.bin\/markdown-toc\", \"-i\", \"README.md\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = runCmd(\"git\", \"diff\", \"--exit-code\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc GenActionFunc(_ *cli.Context) error {\n\terr := runCmd(\"go\", \"generate\", \"flag-gen\/main.go\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = runCmd(\"go\", \"generate\", \"cli.go\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = runCmd(\"git\", \"diff\", \"--exit-code\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"package postgresql_access\n\nimport (\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/lib\/pq\"\n)\n\ntype DatabaseInfo struct {\n\tHost string\n\tPort int\n\tUsername string\n\tPassword string\n\tDbname string\n}\n\ntype Configuration struct {\n\tDb DatabaseInfo\n}\n\nfunc AutoConnect() (*sql.DB, error) {\n\tpath := os.Getenv(\"GOPATH\")\n\treturn ConfigFilePathAutoConnect(path + \"\/configs\/config.json\")\n}\n\nfunc ConfigNameAutoConnect(config_name string) (*sql.DB, error) {\n\tpath := os.Getenv(\"GOPATH\")\n\treturn ConfigFilePathAutoConnect(path + \"\/configs\/\" + config_name)\n}\n\nfunc ConfigFilePathAutoConnect(config_path string) (*sql.DB, error) {\n\tvar err error\n\n\tconfig_file, err := os.Open(config_path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/Getting the database sql.DB pointer using the config_file\n\tdb, err := GetDatabaseConnection(config_file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/Testing the connections to verify we have connected to the database\n\t_, err = TestDatabaseConnection(db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}\n\nfunc GetDatabaseConnection(config_file *os.File) (*sql.DB, error) {\n\tvar err error\n\tvar config_struct Configuration\n\n\t\/\/decoding json config_file and setting it to the config_struct\n\tdecoder := json.NewDecoder(config_file)\n\terr = decoder.Decode(&config_struct)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/Setup database connection\n\tdb_url := fmt.Sprintf(\"postgres:\/\/%s:%s@%s\/%s\", config_struct.Db.Username, config_struct.Db.Password, config_struct.Db.Host, config_struct.Db.Dbname)\n\tdb, err := sql.Open(\"postgres\", db_url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}\n\nfunc ConnectToDatabase(dbname string, host string, port int, username string, password string) *sql.DB {\n\tdb_url := fmt.Sprintf(\"postgres:\/\/%s:%s@%s\/%s\", username, password, host, dbname)\n\tdb, err := sql.Open(\"postgres\", db_url)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\t\/\/return db\n}\n\nfunc TestDatabaseConnection(db *sql.DB) (*sql.Rows, error) {\n\t\/\/Testing for connectivity\n\tvar err error\n\tresp, err := db.Query(\"select version()\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}\n\nfunc CreateDatabase(db *sql.DB, create_database_sql string) error {\n\tvar err error\n\n\t_, err = db.Query(create_database_sql)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc CreateDatabaseTable(db *sql.DB, create_table_sql string) error {\n\tvar err error\n\n\t\/\/Query to create table with the sql passed\n\t_, err = db.Query(create_table_sql)\n\tif err != nil {\n\t\tif err, ok := err.(*pq.Error); ok {\n\t\t\t\/\/Check if table already exists\n\t\t\t\/\/Error code 42P07 is for relation already exists\n\t\t\tif err.Code != \"42P07\" {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc InsertSingleDataValue(db *sql.DB, table_name string, table_columns []string, data []interface{}) error {\n\n\t\/\/ Transaction Begins and must end with a commit or rollback\n\ttransaction, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Preparing statement with the table name and columns passed\n\tstatement, err := transaction.Prepare(pq.CopyIn(table_name, table_columns...))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Inserting Single Data row into the statement\n\t_, err = statement.Exec(data...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/*\n\t\t_, err = statement.Exec()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t*\/\n\n\t\/\/ Closing the connection of the statement\n\terr = statement.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Commiting and closing the transaction saving changes we have made in the database\n\terr = transaction.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc InsertMultiDataValues(db *sql.DB, table_name string, table_columns []string, data [][]interface{}) error {\n\t\/\/ Transaction Begins and must end with a commit or rollback\n\ttransaction, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Preparing statement with the table name and columns passed\n\tstatement, err := transaction.Prepare(pq.CopyIn(table_name, table_columns...))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Looping though all the data rows passed\n\tfor _, data_row := range data {\n\t\t\/\/ Inserting Single Data row into the statement\n\t\t_, err = statement.Exec(data_row...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/*\n\t\t_, err = stmt.Exec()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t*\/\n\n\t\/\/ Closing the connection of the statement\n\terr = statement.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Commiting and closing the transaction saving changes we have made in the database\n\terr = transaction.Commit()\n\tif err != nil {\n\t\ttransaction.Rollback()\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc QueryDatabase(db *sql.DB, sql_statment string) ([][]interface{}, int, error) {\n\tvar rowValues [][]interface{}\n\tvar count int = 0\n\n\t\/\/Sends the sql statement to the database and retures a set of rows\n\trows, err := db.Query(sql_statment)\n\n\tdefer rows.Close()\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\t\/\/Gets the Columns for the row set\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\t\/\/ While there is a next row\n\tfor rows.Next() {\n\n\t\t\/\/ making an interface array with the size of columns there are\n\t\tvals := make([]interface{}, len(cols))\n\n\t\t\/\/ Loops though the columns defines the variable types\n\t\tfor i, _ := range cols {\n\t\t\tvals[i] = new(sql.RawBytes)\n\t\t}\n\n\t\t\/\/ Scanes he row and fills it with the row values for each column\n\t\terr = rows.Scan(vals...)\n\t\tif err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\n\t\t\/\/ Loops though again to convert raw bytes to string vlaues\n\t\tfor i, val := range vals {\n\t\t\tif raw_bytes, ok := val.(*sql.RawBytes); ok {\n\t\t\t\tvals[i] = (string(*raw_bytes))\n\t\t\t\t*raw_bytes = nil \/\/ reset pointer to discard current value to avoid a bug\n\t\t\t}\n\t\t}\n\t\t\/\/ Added string array to list of already converted arrays and adds it to the count\n\t\trowValues = append(rowValues, vals)\n\t\tcount++\n\t}\n\treturn rowValues, count, nil\n}\n\nfunc ConvertToStringArray(arr [][]interface{}) string {\n\tvar stringArray string = \"ARRAY[\"\n\tfor x, OuterArr := range arr {\n\t\tif x != 0 {\n\t\t\tstringArray += \",\"\n\t\t}\n\t\tstringArray += \"[\"\n\t\tfor i, Value := range OuterArr {\n\t\t\tif i != 0 {\n\t\t\t\tstringArray += \",\"\n\t\t\t}\n\t\t\tstringArray += \"'\" + Value.(string) + \"'\"\n\t\t}\n\t\tstringArray += \"]\"\n\t}\n\n\tstringArray += \"]\"\n\treturn stringArray\n}\nFinally fix return errorpackage postgresql_access\n\nimport (\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/lib\/pq\"\n)\n\ntype DatabaseInfo struct {\n\tHost string\n\tPort int\n\tUsername string\n\tPassword string\n\tDbname string\n}\n\ntype Configuration struct {\n\tDb DatabaseInfo\n}\n\nfunc AutoConnect() (*sql.DB, error) {\n\tpath := os.Getenv(\"GOPATH\")\n\treturn ConfigFilePathAutoConnect(path + \"\/configs\/config.json\")\n}\n\nfunc ConfigNameAutoConnect(config_name string) (*sql.DB, error) {\n\tpath := os.Getenv(\"GOPATH\")\n\treturn ConfigFilePathAutoConnect(path + \"\/configs\/\" + config_name)\n}\n\nfunc ConfigFilePathAutoConnect(config_path string) (*sql.DB, error) {\n\tvar err error\n\n\tconfig_file, err := os.Open(config_path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/Getting the database sql.DB pointer using the config_file\n\tdb, err := GetDatabaseConnection(config_file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/Testing the connections to verify we have connected to the database\n\t_, err = TestDatabaseConnection(db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}\n\nfunc GetDatabaseConnection(config_file *os.File) (*sql.DB, error) {\n\tvar err error\n\tvar config_struct Configuration\n\n\t\/\/decoding json config_file and setting it to the config_struct\n\tdecoder := json.NewDecoder(config_file)\n\terr = decoder.Decode(&config_struct)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/Setup database connection\n\tdb_url := fmt.Sprintf(\"postgres:\/\/%s:%s@%s\/%s\", config_struct.Db.Username, config_struct.Db.Password, config_struct.Db.Host, config_struct.Db.Dbname)\n\tdb, err := sql.Open(\"postgres\", db_url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}\n\nfunc ConnectToDatabase(dbname string, host string, port int, username string, password string) *sql.DB {\n\tdb_url := fmt.Sprintf(\"postgres:\/\/%s:%s@%s\/%s\", username, password, host, dbname)\n\tdb, err := sql.Open(\"postgres\", db_url)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn db\n}\n\nfunc TestDatabaseConnection(db *sql.DB) (*sql.Rows, error) {\n\t\/\/Testing for connectivity\n\tvar err error\n\tresp, err := db.Query(\"select version()\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}\n\nfunc CreateDatabase(db *sql.DB, create_database_sql string) error {\n\tvar err error\n\n\t_, err = db.Query(create_database_sql)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc CreateDatabaseTable(db *sql.DB, create_table_sql string) error {\n\tvar err error\n\n\t\/\/Query to create table with the sql passed\n\t_, err = db.Query(create_table_sql)\n\tif err != nil {\n\t\tif err, ok := err.(*pq.Error); ok {\n\t\t\t\/\/Check if table already exists\n\t\t\t\/\/Error code 42P07 is for relation already exists\n\t\t\tif err.Code != \"42P07\" {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc InsertSingleDataValue(db *sql.DB, table_name string, table_columns []string, data []interface{}) error {\n\n\t\/\/ Transaction Begins and must end with a commit or rollback\n\ttransaction, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Preparing statement with the table name and columns passed\n\tstatement, err := transaction.Prepare(pq.CopyIn(table_name, table_columns...))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Inserting Single Data row into the statement\n\t_, err = statement.Exec(data...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/*\n\t\t_, err = statement.Exec()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t*\/\n\n\t\/\/ Closing the connection of the statement\n\terr = statement.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Commiting and closing the transaction saving changes we have made in the database\n\terr = transaction.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc InsertMultiDataValues(db *sql.DB, table_name string, table_columns []string, data [][]interface{}) error {\n\t\/\/ Transaction Begins and must end with a commit or rollback\n\ttransaction, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Preparing statement with the table name and columns passed\n\tstatement, err := transaction.Prepare(pq.CopyIn(table_name, table_columns...))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Looping though all the data rows passed\n\tfor _, data_row := range data {\n\t\t\/\/ Inserting Single Data row into the statement\n\t\t_, err = statement.Exec(data_row...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/*\n\t\t_, err = stmt.Exec()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t*\/\n\n\t\/\/ Closing the connection of the statement\n\terr = statement.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Commiting and closing the transaction saving changes we have made in the database\n\terr = transaction.Commit()\n\tif err != nil {\n\t\ttransaction.Rollback()\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc QueryDatabase(db *sql.DB, sql_statment string) ([][]interface{}, int, error) {\n\tvar rowValues [][]interface{}\n\tvar count int = 0\n\n\t\/\/Sends the sql statement to the database and retures a set of rows\n\trows, err := db.Query(sql_statment)\n\n\tdefer rows.Close()\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\t\/\/Gets the Columns for the row set\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\t\/\/ While there is a next row\n\tfor rows.Next() {\n\n\t\t\/\/ making an interface array with the size of columns there are\n\t\tvals := make([]interface{}, len(cols))\n\n\t\t\/\/ Loops though the columns defines the variable types\n\t\tfor i, _ := range cols {\n\t\t\tvals[i] = new(sql.RawBytes)\n\t\t}\n\n\t\t\/\/ Scanes he row and fills it with the row values for each column\n\t\terr = rows.Scan(vals...)\n\t\tif err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\n\t\t\/\/ Loops though again to convert raw bytes to string vlaues\n\t\tfor i, val := range vals {\n\t\t\tif raw_bytes, ok := val.(*sql.RawBytes); ok {\n\t\t\t\tvals[i] = (string(*raw_bytes))\n\t\t\t\t*raw_bytes = nil \/\/ reset pointer to discard current value to avoid a bug\n\t\t\t}\n\t\t}\n\t\t\/\/ Added string array to list of already converted arrays and adds it to the count\n\t\trowValues = append(rowValues, vals)\n\t\tcount++\n\t}\n\treturn rowValues, count, nil\n}\n\nfunc ConvertToStringArray(arr [][]interface{}) string {\n\tvar stringArray string = \"ARRAY[\"\n\tfor x, OuterArr := range arr {\n\t\tif x != 0 {\n\t\t\tstringArray += \",\"\n\t\t}\n\t\tstringArray += \"[\"\n\t\tfor i, Value := range OuterArr {\n\t\t\tif i != 0 {\n\t\t\t\tstringArray += \",\"\n\t\t\t}\n\t\t\tstringArray += \"'\" + Value.(string) + \"'\"\n\t\t}\n\t\tstringArray += \"]\"\n\t}\n\n\tstringArray += \"]\"\n\treturn stringArray\n}\n<|endoftext|>"} {"text":"package spec\n\nimport (\n\t\"encoding\/json\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\n\t\"fmt\"\n)\n\ntype KafkaCluster struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tMetadata metav1.ObjectMeta `json:\"metadata\"`\n\t\/\/APIVersion string `json:\"apiVersion\"`\n\t\/\/Kind string `json:\"kind\"`\n\tSpec KafkaClusterSpec `json:\"spec\"`\n}\n\ntype KafkaClusterList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tMetadata metav1.ListMeta `json:\"metadata\"`\n\n\tItems []KafkaCluster `json:\"items\"`\n}\n\ntype KafkaClusterSpec struct {\n\t\/\/Amount of Broker Nodes\n\tImage string `json:\"image\"`\n\tBrokerCount int32 `json:\"brokerCount\"`\n\tResources ResourceSpec `json:\"resources\"`\n\tKafkaOptions KafkaOption `json:\"kafkaOptions\"`\n\tjmxSidecar bool `json:\"jmxSidecar\"`\n\tTopics []KafkaTopicSpec `json:\"topics\"`\n\tZookeeperConnect string `json:\"zookeeperConnect\"`\n\tNodeSelector map[string]string `json:\"nodeSelector,omitempty\"`\n\tStorageClass string `json:\"storageClass\"` \/\/TODO use k8s type?\n\n\t\/\/ Toleration time if node is down\/unreachable\/not ready before moving to a new net\n\t\/\/ Set to 0 to disable moving to all together.\n\tMinimumGracePeriod int64 `json:\"minimumGracePeriod\"`\n\n\tLeaderImbalanceRatio float32 `json:\"leaderImbalanceRatio\"`\n\tLeaderImbalanceInterval int32 `json:\"leaderImbalanceInterval\"`\n}\n\n\/\/TODO refactor to just use native k8s types\ntype ResourceSpec struct {\n\tMemory string `json:\"memory\"`\n\tDiskSpace string `json:\"diskSpace\"`\n\tCPU string `json:\"cpu\"`\n}\n\ntype KafkaBrokerSpec struct {\n\tBrokerID int32 `json:\"brokerID\"`\n\n\tClientPort int32 `json:\"clientPort\"`\n\tTopics map[string]string `json:\"topics\"`\n}\n\ntype KafkaTopicSpec struct {\n\tName string `json:\"name\"`\n\tPartitions int32 `json:\"partitions\"`\n\tReplicationFactor int32 `json:\"replicationFactor\"`\n}\n\ntype KafkaClusterWatchEvent struct {\n\tType string `json:\"type\"`\n\tObject KafkaCluster `json:\"object\"`\n\tOldObject KafkaCluster `json:\"oldObject\"`\n}\n\ntype KafkaOption struct {\n\tLogRetentionHours int `json:\"logRetentionHours\"`\n\tAutoCreateTopics bool `json:\"autoCreateTopics\"`\n\tCompressionType string `json:compressionType`\n}\n\ntype KafkaReassignmentConfig struct {\n\tPartition []KafkaPartition `json:\"partition\"`\n\tVersion string `json:\"version\"`\n}\n\ntype KafkaPartition struct {\n\tPartition int32 `json:\"partition\"`\n\tReplicas []int32 `json:\"replicas\"`\n}\n\ntype KafkaTopic struct {\n\tTopic string `json:\"topic\"`\n\tPartitionFactor int32 `json:\"partition_factor\"`\n\tReplicationFactor int32 `json:\"replication_factor\"`\n\tPartitions []KafkaPartition `json:\"partitions\"`\n}\n\n\/\/No json needed since internal Event type.\ntype KafkaClusterEvent struct {\n\tType KafkaEventType\n\tCluster KafkaCluster\n\tOldCluster KafkaCluster\n}\n\ntype KafkaEventType int32\n\nconst (\n\tNEW_CLUSTER KafkaEventType = iota + 1\n\tDELTE_CLUSTER\n\tUPSIZE_CLUSTER\n\tDOWNSIZE_CLUSTER\n\tCHANGE_IMAGE\n\tCHANGE_BROKER_RESOURCES\n\tCHANGE_NAME\n\tCHANGE_ZOOKEEPER_CONNECT\n\tBROKER_CONFIG_CHANGE\n\tUNKNOWN_CHANGE\n\tDOWNSIZE_EVENT\n\t\/\/Cleanup event which get emmised after a Cluster Delete.\n\t\/\/Its ensure the deletion of the Statefulset after it has been scaled down.\n\tCLEANUP_EVENT\n\tKAKFA_EVENT\n)\n\ntype KafkaBrokerState string\n\nconst (\n\tEMPTY_BROKER KafkaBrokerState = \"EMPTYING\"\n\tREBALANCE_BROKER KafkaBrokerState = \"REBALANCING\"\n\tNORMAL_STATE KafkaBrokerState = \"NORMAL\"\n)\n\n\/\/ convenience functions\nfunc PrintCluster(cluster *KafkaCluster) string {\n\treturn fmt.Sprintf(\"%s\/%s, APIVersion: %s, Kind: %s, Value: %#v\", cluster.Metadata.Namespace, cluster.Metadata.Name, cluster.APIVersion, cluster.Kind, cluster)\n}\n\n\/\/ Required to satisfy Object interface\nfunc (e *KafkaCluster) GetObjectKind() schema.ObjectKind {\n\treturn &e.TypeMeta\n}\n\n\/\/ Required to satisfy ObjectMetaAccessor interface\nfunc (e *KafkaCluster) GetObjectMeta() metav1.Object {\n\treturn &e.Metadata\n}\n\n\/\/ Required to satisfy Object interface\nfunc (el *KafkaClusterList) GetObjectKind() schema.ObjectKind {\n\treturn &el.TypeMeta\n}\n\n\/\/ Required to satisfy ListMetaAccessor interface\nfunc (el *KafkaClusterList) GetListMeta() metav1.List {\n\treturn &el.Metadata\n}\n\n\/\/Shamefull copied over from: https:\/\/github.com\/kubernetes\/client-go\/blob\/master\/examples\/third-party-resources\/types.go\n\/\/ The code below is used only to work around a known problem with third-party\n\/\/ resources and ugorji. If\/when these issues are resolved, the code below\n\/\/ should no longer be required.\n\ntype KafkaClusterListCopy KafkaClusterList\ntype KafkaClusterCopy KafkaCluster\n\nfunc (e *KafkaCluster) UnmarshalJSON(data []byte) error {\n\ttmp := KafkaClusterCopy{}\n\terr := json.Unmarshal(data, &tmp)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttmp2 := KafkaCluster(tmp)\n\t*e = tmp2\n\treturn nil\n}\n\nfunc (el *KafkaClusterList) UnmarshalJSON(data []byte) error {\n\ttmp := KafkaClusterListCopy{}\n\terr := json.Unmarshal(data, &tmp)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttmp2 := KafkaClusterList(tmp)\n\t*el = tmp2\n\treturn nil\n}\nWorking on #15: KafkaBroker Options in Specpackage spec\n\nimport (\n\t\"encoding\/json\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\n\t\"fmt\"\n)\n\ntype KafkaCluster struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tMetadata metav1.ObjectMeta `json:\"metadata\"`\n\t\/\/APIVersion string `json:\"apiVersion\"`\n\t\/\/Kind string `json:\"kind\"`\n\tSpec KafkaClusterSpec `json:\"spec\"`\n}\n\ntype KafkaClusterList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tMetadata metav1.ListMeta `json:\"metadata\"`\n\n\tItems []KafkaCluster `json:\"items\"`\n}\n\ntype KafkaClusterSpec struct {\n\t\/\/Amount of Broker Nodes\n\tImage string `json:\"image\"`\n\tBrokerCount int32 `json:\"brokerCount\"`\n\tResources ResourceSpec `json:\"resources\"`\n\tKafkaOptions KafkaOptions `json:\"kafkaOptions\"`\n\tjmxSidecar bool `json:\"jmxSidecar\"`\n\tTopics []KafkaTopicSpec `json:\"topics\"`\n\tZookeeperConnect string `json:\"zookeeperConnect\"`\n\tNodeSelector map[string]string `json:\"nodeSelector,omitempty\"`\n\tStorageClass string `json:\"storageClass\"` \/\/TODO use k8s type?\n\n\t\/\/ Toleration time if node is down\/unreachable\/not ready before moving to a new net\n\t\/\/ Set to 0 to disable moving to all together.\n\tMinimumGracePeriod int64 `json:\"minimumGracePeriod\"`\n\n\tLeaderImbalanceRatio float32 `json:\"leaderImbalanceRatio\"`\n\tLeaderImbalanceInterval int32 `json:\"leaderImbalanceInterval\"`\n}\n\n\n\/\/TODO refactor to just use native k8s types\ntype ResourceSpec struct {\n\tMemory string `json:\"memory\"`\n\tDiskSpace string `json:\"diskSpace\"`\n\tCPU string `json:\"cpu\"`\n}\n\ntype KafkaBrokerSpec struct {\n\tBrokerID int32 `json:\"brokerID\"`\n\n\tClientPort int32 `json:\"clientPort\"`\n\tTopics map[string]string `json:\"topics\"`\n}\n\ntype KafkaTopicSpec struct {\n\tName string `json:\"name\"`\n\tPartitions int32 `json:\"partitions\"`\n\tReplicationFactor int32 `json:\"replicationFactor\"`\n}\n\ntype KafkaClusterWatchEvent struct {\n\tType string `json:\"type\"`\n\tObject KafkaCluster `json:\"object\"`\n\tOldObject KafkaCluster `json:\"oldObject\"`\n}\n\ntype KafkaOptions struct {\n\n\t\/\/ Enables auto create of topics on the broker\n\t\/\/ Default: true\n\tAutoCreateTopicsEnable bool `json:\"autoCreateTopicsEnable\"`\n\n\t\/\/Enables auto balancing of topic leaders. Done by a background thread.\n\t\/\/ Default: true\n\tAutoLeaderRebalanceEnable bool `json:\"autoLeaderRebalanceEnable\"`\n\n\t\/\/Amount of threads for various background tasks\n\t\/\/ Default: 10\n\tBackgroundThreads int32 `json:\"backgroudThreads\"`\n\n\t\/\/Default compression type for a topic. Can be \"gzip\", \"snappy\", \"lz4\"\n\t\/\/ Default: \"gzip\"\n\tCompressionType string `json:compressionType`\n\n\t\/\/Enables delete topic. Delete topic through the admin tool will have no effect if this config is turned off\n\t\/\/ Default: false\n\tDeleteTopicEnable bool `json:\"deleteTopicEnable\"`\n\n\t\/\/The frequency with which the partition rebalance check is triggered by the controller\n\t\/\/ Default:300\n\tLeaderImbalanceCheckIntervalSeconds int32 `json:\"leaderImbalanceCheckIntervalSeconds\"`\n\n\t\/\/ The ratio of leader imbalance allowed per broker.\n\t\/\/ The controller would trigger a leader balance if it goes above this value per broker. The value is specified in percentage.\n\t\/\/ Default: 10\n\tLeaderImbalanceBrokerPercentage int32 `json:\"leaderImbalanceBrokerPercentage\"`\n\n\t\/\/The number of messages accumulated on a log partition before messages are flushed to disk\n\t\/\/ Default: 9223372036854775807\n\tLogFlushIntervalMessages int64 `json:\"logFlushIntervalMessages\"`\n\n\t\/\/ The maximum time in ms that a message in any topic is kept in memory before flushed to disk.\n\t\/\/ If not set, the value in log.flush.scheduler.interval.ms is used\n\t\/\/ Default: null\n\tLogFlushIntervalMs int64 `json:logFlushIntervalMs`\n\n\t\/\/The frequency with which we update the persistent record of the last flush which acts as the log recovery point\n\t\/\/ Default: 60000\n\tLogFlushOffsetCheckpointIntervalMs int32 `json:logFlushOffsetCheckpointIntervalMs`\n\n\t\/\/The frequency in ms that the log flusher checks whether any log needs to be flushed to disk\n\t\/\/ Default: 9223372036854775807\n\tLogFlushSchedulerIntervalMs int64 `json:\"LogFlushSchedulerIntervalMs\"`\n\n\t\/\/ The maximum size of the log before deleting it\n\t\/\/ Default: -1\n\tLogRetentionBytes string `json:\"logRetentionBytes\"`\n\n\t\/\/ The number of hours to keep a log file before deleting it (in hours), tertiary to log.retention.ms property\n\t\/\/ Default: 168\n\tLogRetentionHours int32 `json:\"logRetentionHours\"`\n\n\t\/\/The maximum time before a new log segment is rolled out (in hours), secondary to log.roll.ms property\n\t\/\/ Default: 168\n\tLogRollHours int32 `json:\"logRollHours\"`\n\n\t\/\/ The maximum jitter to subtract from logRollTimeMillis (in hours), secondary to log.roll.jitter.ms property\n\t\/\/ Default: 0\n\tLogRollJitterHours int32 `json:logRollJitterHours`\n\n\t\/\/The maximum size of a single log file\n\t\/\/ Default: 1073741824\n\tLogSegmentBytes int32 `json:logSegmentBytes`\n\n\t\/\/ The amount of time to wait before deleting a file from the filesystem\n\t\/\/ Default: 60000\n\tLogSegmentDeleteDelayMS int64 `json:logSegmentDeleteDelayMS`\n\n\t\/\/ The maximum size of message that the server can receive\n\t\/\/ Default: 1000012\n\tMessagesMaxBytes int32 `json:messagesMaxBytes`\n\n\t\/\/ When a producer sets acks to \"all\" (or \"-1\"), min.insync.replicas specifies the minimum number of replicas that must acknowledge\n\t\/\/ a write for the write to be considered successful.\n\t\/\/ Can be overwritten at topic level\n\t\/\/ Default: 1\n\tMinInsyncReplicas int32 `json:minInsyncReplicas`\n\n\t\/\/ The number of io threads that the server uses for carrying out network requests\n\t\/\/ Default: 8\n\tNumIOThreads int32 `json:\"numIOThreads\"`\n\n\t\/\/ The number of network threads that the server uses for handling network requests\n\t\/\/ Default: 3\n\tNumNetworkThreads int32 `json:\"numNetworkThreads\"`\n\n\t\/\/The number of threads per data directory to be used for log recovery at startup and flushing at shutdown\n\t\/\/ Default: 1\n\tNumRecoveryThreadsPerDataDir int32 `json:\"numRecoveryThreadsPerDataDir\"`\n\n\t\/\/ Number of fetcher threads used to replicate messages from a source broker.\n\t\/\/ Increasing this value can increase the degree of I\/O parallelism in the follower broker.\n\t\/\/ Default: 1\n\tNumReplicaFetchers int32 `json:\"numReplicaFetchers\"`\n\n\t\/\/ The maximum size for a metadata entry associated with an offset commit.\n\t\/\/ Default: 4096\n\tOffsetMetadataMaxBytes int32 `json:\"offsetMetadataMaxBytes\"`\n\n\t\/\/ The required acks before the commit can be accepted. In general, the default (-1) should not be overridden\n\t\/\/ Default: -1\n\t\/\/ Commented out because of dangerous option\n\t\/\/OffsetCommitReadRequiredAcks int32 `json:\"offsetCommitReadRequiredAcks\"`\n\n\t\/\/Offset commit will be delayed until all replicas for the offsets topic receive the commit or this timeout is reached.\n\t\/\/ This is similar to the producer request timeout.\n\t\/\/ Default: 5000\n\tOffsetCommitTimeoutMs int32 `json:\"offsetCommitTimeoutMs\"`\n\n\t\/\/ Batch size for reading from the offsets segments when loading offsets into the cache.\n\t\/\/ Default: 5242880\n\tOffsetLoadBufferSize int32 `json:\"offsetLoadBufferSize\"`\n\n\t\/\/ Frequency at which to check for stale offsets\n\t\/\/ Default: 600000\n\tOffsetRetentionCheckIntervalMs int64 `json:\"offsetRetentionCheckIntervalMs\"`\n\n\t\/\/ Log retention window in minutes for offsets topic\n\t\/\/ Default: 1440\n\tOffsetRetentionMinutes int32 `json:\"offsetRetentionMinutes\"`\n\n\t\/\/ Compression codec for the offsets topic - compression may be used to achieve \"atomic\" commits\n\t\/\/ Default: 0\n\t\/\/Commented out, wrong doku? int fro compression???\n\t\/\/OffsetTopicCompressionCodec int32 `json:\"offset_topic_compression_coded\"`\n\n\t\/\/ The number of partitions for the offset commit topic (should not change after deployment)\n\t\/\/ Default: 50\n\tOffsetTopicNumPartitions int32 `json:\"offsetTopicNumPartitions\"`\n\n\t\/\/ The replication factor for the offsets topic (set higher to ensure availability).\n\t\/\/ To ensure that the effective replication factor of the offsets topic is the configured value, the number of alive brokers has to be at least the replication factor at the time of the first request for the offsets topic.\n\t\/\/ If not, either the offsets topic creation will fail or it will get a replication factor of min(alive brokers, configured replication factor)\n\t\/\/ Default: 3\n\tOffsetTopicReplicationFactor int32 `json:\"offsetTopicReplicationFactor\"`\n\n\t\/\/ The offsets topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads\n\t\/\/ Default: 104857600\n\tOffsetTopicSegmentsBytes int32 `json:\"offsetTopicSegmentsBytes\"`\n\n\t\/\/ The number of queued requests allowed before blocking the network threads\n\t\/\/ Default: 100\n\tQueuedMaxRequest int32 `json:\"queuedMaxRequest\"`\n\n\t\/\/ Minimum bytes expected for each fetch response. If not enough bytes, wait up to replicaMaxWaitTimeMs\n\t\/\/ Default: 1\n\tReplicaFetchMinBytes int32 `json:\"replicaFetchMinBytes\"`\n\n\t\/\/max wait time for each fetcher request issued by follower replicas.\n\t\/\/ This value should always be less than the replica.lag.time.max.ms at all times to prevent frequent shrinking of ISR for low throughput topics\n\t\/\/Default: 500\n\tReplicaFetchWaitMaxMs int32 `json:\"replicaFetchWaitMaxMs\"`\n\t\n\t\/\/The frequency with which the high watermark is saved out to disk\n\t\/\/ Default: 5000\n\tReplicaHighWatermarkCheckpointIntervalMs int64 `json:\"replicaHighWatermarkCheckpointIntervalMs\"`\n\n\t\/\/ If a follower hasn't sent any fetch requests or hasn't consumed up to the leaders log end offset for at least this time,\n\t\/\/ the leader will remove the follower from isr\n\t\/\/ Defaut: 10000\n\tReplicaLagTimeMaxMs int64 `json:\"replicaLagTimeMaxMs\"`\n\n\t\/\/ The socket receive buffer for network requests\n\t\/\/ Default: 65536\n\tReplicaSocketReceiveBufferBytes int32 `json:\"replicaSocketReceiveBufferBytes\"`\n\n\t\/\/ The socket timeout for network requests. Its value should be at least replica.fetch.wait.max.ms\n\t\/\/ Default: 30000\n\tReplicaSocketTimeoutMs int32 `json:\"replicaSocketTimeoutMs\"`\n\n\t\/\/ The configuration controls the maximum amount of time the client will wait for the response of a request.\n\t\/\/ If the response is not received before the timeout elapses the client will resend the request if necessary\n\t\/\/ or fail the request if retries are exhausted.\n\t\/\/ Default: 30000\n\tRequestTimeoutMs int32 `json:\"requestTimeoutMs\"`\n\n\t\/\/Socket Settings? TODO? needed\n\n\t\/\/ Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss\n\t\/\/ Default: true\n\tUncleanLeaderElectionEnable bool `json:\"uncleanLeaderElectionEnable\"`\n\n\t\/\/ The max time that the client waits to establish a connection to zookeeper.\n\t\/\/ If not set, the value in zookeeper.session.timeout.ms is used\n\t\/\/ Default: null\n\tZookeeperConnectionTimeoutMs int32 `json:\"zookeeperConnectionTimeoutMs\"`\n\n\t\/\/ Zookeeper session timeout\n\t\/\/ Default: 6000\n\tZookeeperSessionTimeoutMs int32 `json:\"zookeeperSessionTimeoutMs\"`\n}\n\ntype KafkaReassignmentConfig struct {\n\tPartition []KafkaPartition `json:\"partition\"`\n\tVersion string `json:\"version\"`\n}\n\ntype KafkaPartition struct {\n\tPartition int32 `json:\"partition\"`\n\tReplicas []int32 `json:\"replicas\"`\n}\n\ntype KafkaTopic struct {\n\tTopic string `json:\"topic\"`\n\tPartitionFactor int32 `json:\"partition_factor\"`\n\tReplicationFactor int32 `json:\"replication_factor\"`\n\tPartitions []KafkaPartition `json:\"partitions\"`\n}\n\n\/\/No json needed since internal Event type.\ntype KafkaClusterEvent struct {\n\tType KafkaEventType\n\tCluster KafkaCluster\n\tOldCluster KafkaCluster\n}\n\ntype KafkaEventType int32\n\nconst (\n\tNEW_CLUSTER KafkaEventType = iota + 1\n\tDELTE_CLUSTER\n\tUPSIZE_CLUSTER\n\tDOWNSIZE_CLUSTER\n\tCHANGE_IMAGE\n\tCHANGE_BROKER_RESOURCES\n\tCHANGE_NAME\n\tCHANGE_ZOOKEEPER_CONNECT\n\tBROKER_CONFIG_CHANGE\n\tUNKNOWN_CHANGE\n\tDOWNSIZE_EVENT\n\t\/\/Cleanup event which get emmised after a Cluster Delete.\n\t\/\/Its ensure the deletion of the Statefulset after it has been scaled down.\n\tCLEANUP_EVENT\n\tKAKFA_EVENT\n)\n\ntype KafkaBrokerState string\n\nconst (\n\tEMPTY_BROKER KafkaBrokerState = \"EMPTYING\"\n\tREBALANCE_BROKER KafkaBrokerState = \"REBALANCING\"\n\tNORMAL_STATE KafkaBrokerState = \"NORMAL\"\n)\n\n\/\/ convenience functions\nfunc PrintCluster(cluster *KafkaCluster) string {\n\treturn fmt.Sprintf(\"%s\/%s, APIVersion: %s, Kind: %s, Value: %#v\", cluster.Metadata.Namespace, cluster.Metadata.Name, cluster.APIVersion, cluster.Kind, cluster)\n}\n\n\/\/ Required to satisfy Object interface\nfunc (e *KafkaCluster) GetObjectKind() schema.ObjectKind {\n\treturn &e.TypeMeta\n}\n\n\/\/ Required to satisfy ObjectMetaAccessor interface\nfunc (e *KafkaCluster) GetObjectMeta() metav1.Object {\n\treturn &e.Metadata\n}\n\n\/\/ Required to satisfy Object interface\nfunc (el *KafkaClusterList) GetObjectKind() schema.ObjectKind {\n\treturn &el.TypeMeta\n}\n\n\/\/ Required to satisfy ListMetaAccessor interface\nfunc (el *KafkaClusterList) GetListMeta() metav1.List {\n\treturn &el.Metadata\n}\n\n\/\/Shamefull copied over from: https:\/\/github.com\/kubernetes\/client-go\/blob\/master\/examples\/third-party-resources\/types.go\n\/\/ The code below is used only to work around a known problem with third-party\n\/\/ resources and ugorji. If\/when these issues are resolved, the code below\n\/\/ should no longer be required.\n\ntype KafkaClusterListCopy KafkaClusterList\ntype KafkaClusterCopy KafkaCluster\n\nfunc (e *KafkaCluster) UnmarshalJSON(data []byte) error {\n\ttmp := KafkaClusterCopy{}\n\terr := json.Unmarshal(data, &tmp)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttmp2 := KafkaCluster(tmp)\n\t*e = tmp2\n\treturn nil\n}\n\nfunc (el *KafkaClusterList) UnmarshalJSON(data []byte) error {\n\ttmp := KafkaClusterListCopy{}\n\terr := json.Unmarshal(data, &tmp)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttmp2 := KafkaClusterList(tmp)\n\t*el = tmp2\n\treturn nil\n}\n<|endoftext|>"} {"text":"package libvirt\n\nimport (\n\t\"github.com\/influxdata\/telegraf\"\n\t\"github.com\/influxdata\/telegraf\/plugins\/inputs\"\n\tlv \"github.com\/libvirt\/libvirt-go\"\n)\n\nconst sampleConfig = `\n# specify a libvirt connection uri\nuri = \"qemu:\/\/\/system\"\n`\n\ntype Libvirt struct {\n\tUri string\n}\n\nfunc (l *Libvirt) SampleConfig() string {\n\treturn sampleConfig\n}\n\nfunc (l *Libvirt) Description() string {\n\treturn \"Read domain infos from a libvirt deamon\"\n}\n\nfunc (l *Libvirt) Gather(acc telegraf.Accumulator) error {\n\tconnection, err := lv.NewConnectReadOnly(l.Uri)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer connection.Close()\n\n\tdomains, err := connection.ListAllDomains(lv.CONNECT_LIST_DOMAINS_ACTIVE)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tacc.AddFields(\"vm.data\", map[string]interface{}{\"count\": len(domains)}, make(map[string]string))\n\n\tfor _, domain := range domains {\n\t\tdomainInfo, err := domain.GetInfo()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tuuid, err := domain.GetUUIDString()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tname, err := domain.GetName()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ttags := map[string]string{\n\t\t\t\"uuid\": uuid,\n\t\t\t\"name\": name,\n\t\t}\n\n\t\tfields := map[string]interface{}{\n\t\t\t\"cpu_time\": domainInfo.CpuTime,\n\t\t\t\"nr_virt_cpu\": domainInfo.NrVirtCpu,\n\t\t}\n\t\tstats, err := domain.MemoryStats(10, 0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tm := map[int32]string{\n\t\t\tint32(lv.DOMAIN_MEMORY_STAT_AVAILABLE): \"mem_max\",\n\t\t\tint32(lv.DOMAIN_MEMORY_STAT_USABLE): \"mem_free\",\n\t\t}\n\t\tfor _, stat := range stats {\n\t\t\tif val, ok := m[stat.Tag]; ok {\n\t\t\t\tfields[val] = stat.Val\n\t\t\t}\n\t\t}\n\t\tacc.AddFields(\"vm.data\", fields, tags)\n\n\t\tGatherInterfaces(*connection, domain, acc, tags)\n\n\t\tGatherDisks(*connection, domain, acc, tags)\n\n\t\tdomain.Free()\n\t}\n\n\treturn nil\n}\n\nfunc GatherInterfaces(c lv.Connect, d lv.Domain, acc telegraf.Accumulator, tags map[string]string) error {\n\tdomStat, err := c.GetAllDomainStats(\n\t\t[]*lv.Domain{&d},\n\t\tlv.DOMAIN_STATS_INTERFACE,\n\t\tlv.CONNECT_GET_ALL_DOMAINS_STATS_ACTIVE,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer domStat[0].Domain.Free()\n\tfor _, iface := range domStat[0].Net {\n\t\ttags[\"interface\"] = iface.Name\n\t\tfields := map[string]interface{}{\n\t\t\t\"rx_bytes\": iface.RxBytes,\n\t\t\t\"rx_packets\": iface.RxPkts,\n\t\t\t\"rx_errs\": iface.RxErrs,\n\t\t\t\"rx_drop\": iface.RxDrop,\n\t\t\t\"tx_bytes\": iface.TxBytes,\n\t\t\t\"tx_packets\": iface.TxPkts,\n\t\t\t\"tx_errs\": iface.TxErrs,\n\t\t\t\"tx_drop\": iface.TxDrop,\n\t\t}\n\t\tacc.AddFields(\"vm.data\", fields, tags)\n\t}\n\treturn nil\n}\n\nfunc GatherDisks(c lv.Connect, d lv.Domain, acc telegraf.Accumulator, tags map[string]string) error {\n\tdomStats, err := c.GetAllDomainStats(\n\t\t[]*lv.Domain{&d},\n\t\tlv.DOMAIN_STATS_BLOCK,\n\t\tlv.CONNECT_GET_ALL_DOMAINS_STATS_ACTIVE,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer domStats[0].Domain.Free()\n\tfor _, disk := range domStats[0].Block {\n\t\ttags[\"disk\"] = disk.Name\n\t\tfields := map[string]interface{}{\n\t\t\t\"rd_req\": disk.RdReqs,\n\t\t\t\"rd_bytes\": disk.RdBytes,\n\t\t\t\"wr_req\": disk.WrReqs,\n\t\t\t\"wr_bytes\": disk.WrBytes,\n\t\t\t\"errs\": disk.Errors,\n\t\t}\n\t\tacc.AddFields(\"vm.data\", fields, tags)\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tinputs.Add(\"libvirt\", func() telegraf.Input {\n\t\treturn &Libvirt{}\n\t})\n}\nremoved name and fixed mapspackage libvirt\n\nimport (\n\t\"github.com\/influxdata\/telegraf\"\n\t\"github.com\/influxdata\/telegraf\/plugins\/inputs\"\n\tlv \"github.com\/libvirt\/libvirt-go\"\n)\n\nconst sampleConfig = `\n# specify a libvirt connection uri\nuri = \"qemu:\/\/\/system\"\n`\n\ntype Libvirt struct {\n\tUri string\n}\n\nfunc (l *Libvirt) SampleConfig() string {\n\treturn sampleConfig\n}\n\nfunc (l *Libvirt) Description() string {\n\treturn \"Read domain infos from a libvirt deamon\"\n}\n\nfunc (l *Libvirt) Gather(acc telegraf.Accumulator) error {\n\tconnection, err := lv.NewConnectReadOnly(l.Uri)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer connection.Close()\n\n\tdomains, err := connection.ListAllDomains(lv.CONNECT_LIST_DOMAINS_ACTIVE)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tacc.AddFields(\"vm.data\", map[string]interface{}{\"count\": len(domains)}, make(map[string]string))\n\n\tfor _, domain := range domains {\n\t\tdomainInfo, err := domain.GetInfo()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tuuid, err := domain.GetUUIDString()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ttags := map[string]string{\n\t\t\t\"uuid\": uuid,\n\t\t}\n\n\t\tfields := map[string]interface{}{\n\t\t\t\"cpu_time\": domainInfo.CpuTime,\n\t\t\t\"nr_virt_cpu\": domainInfo.NrVirtCpu,\n\t\t}\n\t\tstats, err := domain.MemoryStats(10, 0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tm := map[int32]string{\n\t\t\tint32(lv.DOMAIN_MEMORY_STAT_AVAILABLE): \"mem_max\",\n\t\t\tint32(lv.DOMAIN_MEMORY_STAT_USABLE): \"mem_free\",\n\t\t}\n\t\tfor _, stat := range stats {\n\t\t\tif val, ok := m[stat.Tag]; ok {\n\t\t\t\tfields[val] = stat.Val\n\t\t\t}\n\t\t}\n\t\tacc.AddFields(\"vm.data\", fields, tags)\n\n\t\tGatherInterfaces(*connection, domain, acc, tags)\n\n\t\tGatherDisks(*connection, domain, acc, tags)\n\n\t\tdomain.Free()\n\t}\n\n\treturn nil\n}\n\nfunc GatherInterfaces(c lv.Connect, d lv.Domain, acc telegraf.Accumulator, tags map[string]string) error {\n\tdomStat, err := c.GetAllDomainStats(\n\t\t[]*lv.Domain{&d},\n\t\tlv.DOMAIN_STATS_INTERFACE,\n\t\tlv.CONNECT_GET_ALL_DOMAINS_STATS_ACTIVE,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer domStat[0].Domain.Free()\n\tfor _, iface := range domStat[0].Net {\n\t\ttags[\"interface\"] = iface.Name\n\t\tfields := map[string]interface{}{\n\t\t\t\"rx_bytes\": iface.RxBytes,\n\t\t\t\"rx_packets\": iface.RxPkts,\n\t\t\t\"rx_errs\": iface.RxErrs,\n\t\t\t\"rx_drop\": iface.RxDrop,\n\t\t\t\"tx_bytes\": iface.TxBytes,\n\t\t\t\"tx_packets\": iface.TxPkts,\n\t\t\t\"tx_errs\": iface.TxErrs,\n\t\t\t\"tx_drop\": iface.TxDrop,\n\t\t}\n\t\tacc.AddFields(\"vm.data\", fields, tags)\n\t}\n\tdelete(tags, \"interface\")\n\treturn nil\n}\n\nfunc GatherDisks(c lv.Connect, d lv.Domain, acc telegraf.Accumulator, tags map[string]string) error {\n\tdomStats, err := c.GetAllDomainStats(\n\t\t[]*lv.Domain{&d},\n\t\tlv.DOMAIN_STATS_BLOCK,\n\t\tlv.CONNECT_GET_ALL_DOMAINS_STATS_ACTIVE,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer domStats[0].Domain.Free()\n\tfor _, disk := range domStats[0].Block {\n\t\ttags[\"disk\"] = disk.Name\n\t\tfields := map[string]interface{}{\n\t\t\t\"rd_req\": disk.RdReqs,\n\t\t\t\"rd_bytes\": disk.RdBytes,\n\t\t\t\"wr_req\": disk.WrReqs,\n\t\t\t\"wr_bytes\": disk.WrBytes,\n\t\t\t\"errs\": disk.Errors,\n\t\t}\n\t\tacc.AddFields(\"vm.data\", fields, tags)\n\t}\n\tdelete(tags, \"disk\")\n\treturn nil\n}\n\nfunc init() {\n\tinputs.Add(\"libvirt\", func() telegraf.Input {\n\t\treturn &Libvirt{}\n\t})\n}\n<|endoftext|>"} {"text":"package uchiwa\n\nimport \"github.com\/palourde\/logger\"\n\n\/\/ BuildEvents constructs events objects for frontend consumption\nfunc BuildEvents() {\n\tfor _, e := range tmpResults.Events {\n\t\tm := e.(map[string]interface{})\n\n\t\t\/\/ build backward compatible event object for Sensu < 0.13.0\n\t\tif m[\"id\"] == nil {\n\n\t\t\t\/\/ build client object\n\t\t\tc := m[\"client\"]\n\t\t\tdelete(m, \"client\")\n\t\t\tm[\"client\"] = map[string]interface{}{\"name\": c}\n\n\t\t\t\/\/ build check object\n\t\t\tc = m[\"check\"]\n\t\t\tdelete(m, \"check\")\n\t\t\tm[\"check\"] = map[string]interface{}{\"name\": c, \"issued\": m[\"issued\"], \"output\": m[\"output\"], \"status\": m[\"status\"]}\n\n\t\t\t\/\/ is flapping?\n\t\t\tif m[\"action\"] == false {\n\t\t\t\tm[\"action\"] = \"create\"\n\t\t\t} else {\n\t\t\t\tm[\"action\"] = \"flapping\"\n\t\t\t}\n\n\t\t\t\/\/ remove old entries\n\t\t\tdelete(m, \"issued\")\n\t\t\tdelete(m, \"output\")\n\t\t\tdelete(m, \"status\")\n\t\t}\n\n\t\t\/\/ we assume the event isn't acknowledged in case we can't assert the following values\n\t\tm[\"acknowledged\"] = false\n\n\t\t\/\/ get client name\n\t\tc, ok := m[\"client\"].(map[string]interface{})\n\t\tif !ok {\n\t\t\tlogger.Warningf(\"Could not assert event's client interface from %+v\", c)\n\t\t\tcontinue\n\t\t}\n\n\t\tclientName, ok := c[\"name\"].(string)\n\t\tif !ok {\n\t\t\tlogger.Warningf(\"Could not assert event's client name from %+v\", c)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ get check name\n\t\tk := m[\"check\"].(map[string]interface{})\n\t\tif !ok {\n\t\t\tlogger.Warningf(\"Could not assert event's check interface from %+v\", k)\n\t\t\tcontinue\n\t\t}\n\n\t\tcheckName, ok := k[\"name\"].(string)\n\t\tif !ok {\n\t\t\tlogger.Warningf(\"Could not assert event's check name from %+v\", k)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ get dc name\n\t\tdcName, ok := m[\"dc\"].(string)\n\t\tif !ok {\n\t\t\tlogger.Warningf(\"Could not assert event's datacenter name from %+v\", m)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ determine if the event is acknowledged\n\t\tm[\"acknowledged\"] = isAcknowledged(clientName, checkName, dcName)\n\t}\n}\n\n\/\/ ResolveEvent send a POST request to the \/resolve endpoint in order to resolve an event\nfunc ResolveEvent(data interface{}) error {\n\tapi, m, err := findDcFromInterface(data)\n\t_, err = api.ResolveEvent(m[\"payload\"])\n\tif err != nil {\n\t\tlogger.Warning(err)\n\t\treturn err\n\t}\n\treturn nil\n}\nCatch the exception for the check name assertionpackage uchiwa\n\nimport \"github.com\/palourde\/logger\"\n\n\/\/ BuildEvents constructs events objects for frontend consumption\nfunc BuildEvents() {\n\tfor _, e := range tmpResults.Events {\n\t\tm := e.(map[string]interface{})\n\n\t\t\/\/ build backward compatible event object for Sensu < 0.13.0\n\t\tif m[\"id\"] == nil {\n\n\t\t\t\/\/ build client object\n\t\t\tc := m[\"client\"]\n\t\t\tdelete(m, \"client\")\n\t\t\tm[\"client\"] = map[string]interface{}{\"name\": c}\n\n\t\t\t\/\/ build check object\n\t\t\tc = m[\"check\"]\n\t\t\tdelete(m, \"check\")\n\t\t\tm[\"check\"] = map[string]interface{}{\"name\": c, \"issued\": m[\"issued\"], \"output\": m[\"output\"], \"status\": m[\"status\"]}\n\n\t\t\t\/\/ is flapping?\n\t\t\tif m[\"action\"] == false {\n\t\t\t\tm[\"action\"] = \"create\"\n\t\t\t} else {\n\t\t\t\tm[\"action\"] = \"flapping\"\n\t\t\t}\n\n\t\t\t\/\/ remove old entries\n\t\t\tdelete(m, \"issued\")\n\t\t\tdelete(m, \"output\")\n\t\t\tdelete(m, \"status\")\n\t\t}\n\n\t\t\/\/ we assume the event isn't acknowledged in case we can't assert the following values\n\t\tm[\"acknowledged\"] = false\n\n\t\t\/\/ get client name\n\t\tc, ok := m[\"client\"].(map[string]interface{})\n\t\tif !ok {\n\t\t\tlogger.Warningf(\"Could not assert event's client interface from %+v\", c)\n\t\t\tcontinue\n\t\t}\n\n\t\tclientName, ok := c[\"name\"].(string)\n\t\tif !ok {\n\t\t\tlogger.Warningf(\"Could not assert event's client name from %+v\", c)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ get check name\n\t\tk, ok := m[\"check\"].(map[string]interface{})\n\t\tif !ok {\n\t\t\tlogger.Warningf(\"Could not assert event's check interface from %+v\", k)\n\t\t\tcontinue\n\t\t}\n\n\t\tcheckName, ok := k[\"name\"].(string)\n\t\tif !ok {\n\t\t\tlogger.Warningf(\"Could not assert event's check name from %+v\", k)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ get dc name\n\t\tdcName, ok := m[\"dc\"].(string)\n\t\tif !ok {\n\t\t\tlogger.Warningf(\"Could not assert event's datacenter name from %+v\", m)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ determine if the event is acknowledged\n\t\tm[\"acknowledged\"] = isAcknowledged(clientName, checkName, dcName)\n\t}\n}\n\n\/\/ ResolveEvent send a POST request to the \/resolve endpoint in order to resolve an event\nfunc ResolveEvent(data interface{}) error {\n\tapi, m, err := findDcFromInterface(data)\n\t_, err = api.ResolveEvent(m[\"payload\"])\n\tif err != nil {\n\t\tlogger.Warning(err)\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/\/go:generate bitfanDoc\npackage templateprocessor\n\nimport (\n\t\"bytes\"\n\t\"text\/template\"\n\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t\"github.com\/vjeantet\/bitfan\/core\/location\"\n\t\"github.com\/vjeantet\/bitfan\/processors\"\n)\n\nfunc New() processors.Processor {\n\treturn &processor{opt: &options{}}\n}\n\ntype options struct {\n\t\/\/ If this filter is successful, add any arbitrary fields to this event.\n\tAdd_field map[string]interface{}\n\n\t\/\/ If this filter is successful, add arbitrary tags to the event. Tags can be dynamic\n\t\/\/ and include parts of the event using the %{field} syntax.\n\tTags []string\n\n\t\/\/ Add a type field to all events handled by this input\n\tType string\n\n\t\/\/ The codec used for input data. Input codecs are a convenient method for decoding\n\t\/\/ your data before it enters the input, without needing a separate filter in your bitfan pipeline\n\t\/\/ Codec string\n\n\t\/\/ Go Template content\n\t\/\/\n\t\/\/ set inline content, a path or an url to the template content\n\t\/\/\n\t\/\/ Go template : https:\/\/golang.org\/pkg\/html\/template\/\n\t\/\/ @ExampleLS location => \"test.tpl\"\n\t\/\/ @Type Location\n\tLocation string `mapstructure:\"location\" validate:\"required\"`\n\n\t\/\/ You can set variable to be used in template by using ${var}.\n\t\/\/ each reference will be replaced by the value of the variable found in Template's content\n\t\/\/ The replacement is case-sensitive.\n\t\/\/ @ExampleLS var => {\"hostname\"=>\"myhost\",\"varname\"=>\"varvalue\"}\n\tVar map[string]string `mapstructure:\"var\"`\n\n\t\/\/ Define the target field for placing the template execution result. If this setting is omitted,\n\t\/\/ the data will be stored in the \"output\" field\n\t\/\/ @ExampleLS target => \"mydata\"\n\t\/\/ @Default \"output\"\n\tTarget string `mapstructure:\"target\"`\n}\n\ntype processor struct {\n\tprocessors.Base\n\topt *options\n\tq chan bool\n\tTpl *template.Template\n}\n\nfunc (p *processor) Configure(ctx processors.ProcessorContext, conf map[string]interface{}) error {\n\tdefaults := options{\n\t\tTarget: \"data\",\n\t}\n\n\tp.opt = &defaults\n\n\terr := p.ConfigureAndValidate(ctx, conf, p.opt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tloc, err := location.NewLocation(p.opt.Location, p.ConfigWorkingLocation)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttpl, _, err := loc.TemplateWithOptions(p.opt.Var)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.Tpl = tpl\n\treturn nil\n}\n\nfunc (p *processor) Tick(e processors.IPacket) error {\n\treturn p.Receive(e)\n}\n\nfunc (p *processor) Receive(e processors.IPacket) error {\n\n\tbuff := bytes.NewBufferString(\"\")\n\tp.Tpl.Execute(buff, e.Fields())\n\n\tif len(p.opt.Var) > 0 {\n\t\te.Fields().SetValueForPath(p.opt.Var, \"var\")\n\t}\n\te.Fields().SetValueForPath(buff.String(), p.opt.Target)\n\n\tprocessors.ProcessCommonFields(e.Fields(), p.opt.Add_field, p.opt.Tags, p.opt.Type)\n\tp.Send(e)\n\n\treturn nil\n}\n\nfunc (p *processor) Stop(e processors.IPacket) error {\n\treturn nil\n}\nprocessor : template - fix default value for target\/\/go:generate bitfanDoc\npackage templateprocessor\n\nimport (\n\t\"bytes\"\n\t\"text\/template\"\n\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t\"github.com\/vjeantet\/bitfan\/core\/location\"\n\t\"github.com\/vjeantet\/bitfan\/processors\"\n)\n\nfunc New() processors.Processor {\n\treturn &processor{opt: &options{}}\n}\n\ntype options struct {\n\t\/\/ If this filter is successful, add any arbitrary fields to this event.\n\tAdd_field map[string]interface{}\n\n\t\/\/ If this filter is successful, add arbitrary tags to the event. Tags can be dynamic\n\t\/\/ and include parts of the event using the %{field} syntax.\n\tTags []string\n\n\t\/\/ Add a type field to all events handled by this input\n\tType string\n\n\t\/\/ The codec used for input data. Input codecs are a convenient method for decoding\n\t\/\/ your data before it enters the input, without needing a separate filter in your bitfan pipeline\n\t\/\/ Codec string\n\n\t\/\/ Go Template content\n\t\/\/\n\t\/\/ set inline content, a path or an url to the template content\n\t\/\/\n\t\/\/ Go template : https:\/\/golang.org\/pkg\/html\/template\/\n\t\/\/ @ExampleLS location => \"test.tpl\"\n\t\/\/ @Type Location\n\tLocation string `mapstructure:\"location\" validate:\"required\"`\n\n\t\/\/ You can set variable to be used in template by using ${var}.\n\t\/\/ each reference will be replaced by the value of the variable found in Template's content\n\t\/\/ The replacement is case-sensitive.\n\t\/\/ @ExampleLS var => {\"hostname\"=>\"myhost\",\"varname\"=>\"varvalue\"}\n\tVar map[string]string `mapstructure:\"var\"`\n\n\t\/\/ Define the target field for placing the template execution result. If this setting is omitted,\n\t\/\/ the data will be stored in the \"output\" field\n\t\/\/ @ExampleLS target => \"mydata\"\n\t\/\/ @Default \"output\"\n\tTarget string `mapstructure:\"target\"`\n}\n\ntype processor struct {\n\tprocessors.Base\n\topt *options\n\tq chan bool\n\tTpl *template.Template\n}\n\nfunc (p *processor) Configure(ctx processors.ProcessorContext, conf map[string]interface{}) error {\n\tdefaults := options{\n\t\tTarget: \"output\",\n\t}\n\n\tp.opt = &defaults\n\n\terr := p.ConfigureAndValidate(ctx, conf, p.opt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tloc, err := location.NewLocation(p.opt.Location, p.ConfigWorkingLocation)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttpl, _, err := loc.TemplateWithOptions(p.opt.Var)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.Tpl = tpl\n\treturn nil\n}\n\nfunc (p *processor) Tick(e processors.IPacket) error {\n\treturn p.Receive(e)\n}\n\nfunc (p *processor) Receive(e processors.IPacket) error {\n\n\tbuff := bytes.NewBufferString(\"\")\n\terr := p.Tpl.Execute(buff, e.Fields())\n\tif err != nil {\n\t\tp.Logger.Errorf(\"template error : %s\", err)\n\t\treturn err\n\t}\n\n\tif len(p.opt.Var) > 0 {\n\t\te.Fields().SetValueForPath(p.opt.Var, \"var\")\n\t}\n\te.Fields().SetValueForPath(buff.String(), p.opt.Target)\n\n\tprocessors.ProcessCommonFields(e.Fields(), p.opt.Add_field, p.opt.Tags, p.opt.Type)\n\tp.Send(e)\n\n\treturn nil\n}\n\nfunc (p *processor) Stop(e processors.IPacket) error {\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage internal \/\/ import \"github.com\/open-telemetry\/opentelemetry-collector-contrib\/receiver\/prometheusreceiver\/internal\"\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"go.opentelemetry.io\/collector\/pdata\/pcommon\"\n\t\"go.opentelemetry.io\/collector\/pdata\/pmetric\"\n\t\"go.uber.org\/zap\"\n)\n\n\/\/ Notes on garbage collection (gc):\n\/\/\n\/\/ Job-level gc:\n\/\/ The Prometheus receiver will likely execute in a long running service whose lifetime may exceed\n\/\/ the lifetimes of many of the jobs that it is collecting from. In order to keep the JobsMap from\n\/\/ leaking memory for entries of no-longer existing jobs, the JobsMap needs to remove entries that\n\/\/ haven't been accessed for a long period of time.\n\/\/\n\/\/ Timeseries-level gc:\n\/\/ Some jobs that the Prometheus receiver is collecting from may export timeseries based on metrics\n\/\/ from other jobs (e.g. cAdvisor). In order to keep the timeseriesMap from leaking memory for entries\n\/\/ of no-longer existing jobs, the timeseriesMap for each job needs to remove entries that haven't\n\/\/ been accessed for a long period of time.\n\/\/\n\/\/ The gc strategy uses a standard mark-and-sweep approach - each time a timeseriesMap is accessed,\n\/\/ it is marked. Similarly, each time a timeseriesinfo is accessed, it is also marked.\n\/\/\n\/\/ At the end of each JobsMap.get(), if the last time the JobsMap was gc'd exceeds the 'gcInterval',\n\/\/ the JobsMap is locked and any timeseriesMaps that are unmarked are removed from the JobsMap\n\/\/ otherwise the timeseriesMap is gc'd\n\/\/\n\/\/ The gc for the timeseriesMap is straightforward - the map is locked and, for each timeseriesinfo\n\/\/ in the map, if it has not been marked, it is removed otherwise it is unmarked.\n\/\/\n\/\/ Alternative Strategies\n\/\/ 1. If the job-level gc doesn't run often enough, or runs too often, a separate go routine can\n\/\/ be spawned at JobMap creation time that gc's at periodic intervals. This approach potentially\n\/\/ adds more contention and latency to each scrape so the current approach is used. Note that\n\/\/ the go routine will need to be cancelled upon Shutdown().\n\/\/ 2. If the gc of each timeseriesMap during the gc of the JobsMap causes too much contention,\n\/\/ the gc of timeseriesMaps can be moved to the end of MetricsAdjuster().AdjustMetricSlice(). This\n\/\/ approach requires adding 'lastGC' Time and (potentially) a gcInterval duration to\n\/\/ timeseriesMap so the current approach is used instead.\n\n\/\/ timeseriesinfo contains the information necessary to adjust from the initial point and to detect resets.\ntype timeseriesinfo struct {\n\tmark bool\n\n\tnumber numberInfo\n\thistogram histogramInfo\n\tsummary summaryInfo\n}\n\ntype numberInfo struct {\n\tstartTime pcommon.Timestamp\n\tpreviousValue float64\n}\n\ntype histogramInfo struct {\n\tstartTime pcommon.Timestamp\n\tpreviousCount uint64\n\tpreviousSum float64\n}\n\ntype summaryInfo struct {\n\tstartTime pcommon.Timestamp\n\tpreviousCount uint64\n\tpreviousSum float64\n}\n\n\/\/ timeseriesMap maps from a timeseries instance (metric * label values) to the timeseries info for\n\/\/ the instance.\ntype timeseriesMap struct {\n\tsync.RWMutex\n\t\/\/ The mutex is used to protect access to the member fields. It is acquired for the entirety of\n\t\/\/ AdjustMetricSlice() and also acquired by gc().\n\n\tmark bool\n\ttsiMap map[string]*timeseriesinfo\n}\n\n\/\/ Get the timeseriesinfo for the timeseries associated with the metric and label values.\nfunc (tsm *timeseriesMap) get(metric pmetric.Metric, kv pcommon.Map) (*timeseriesinfo, bool) {\n\t\/\/ This should only be invoked be functions called (directly or indirectly) by AdjustMetricSlice().\n\t\/\/ The lock protecting tsm.tsiMap is acquired there.\n\tname := metric.Name()\n\tsig := getTimeseriesSignature(name, kv)\n\tif metric.DataType() == pmetric.MetricDataTypeHistogram {\n\t\t\/\/ There are 2 types of Histograms whose aggregation temporality needs distinguishing:\n\t\t\/\/ * CumulativeHistogram\n\t\t\/\/ * GaugeHistogram\n\t\taggTemporality := metric.Histogram().AggregationTemporality()\n\t\tsig += \",\" + aggTemporality.String()\n\t}\n\ttsi, ok := tsm.tsiMap[sig]\n\tif !ok {\n\t\ttsi = ×eriesinfo{}\n\t\ttsm.tsiMap[sig] = tsi\n\t}\n\ttsm.mark = true\n\ttsi.mark = true\n\treturn tsi, ok\n}\n\n\/\/ Create a unique timeseries signature consisting of the metric name and label values.\nfunc getTimeseriesSignature(name string, kv pcommon.Map) string {\n\tlabelValues := make([]string, 0, kv.Len())\n\tkv.Sort().Range(func(_ string, attrValue pcommon.Value) bool {\n\t\tvalue := attrValue.StringVal()\n\t\tif value != \"\" {\n\t\t\tlabelValues = append(labelValues, value)\n\t\t}\n\t\treturn true\n\t})\n\treturn fmt.Sprintf(\"%s,%s\", name, strings.Join(labelValues, \",\"))\n}\n\n\/\/ Remove timeseries that have aged out.\nfunc (tsm *timeseriesMap) gc() {\n\ttsm.Lock()\n\tdefer tsm.Unlock()\n\t\/\/ this shouldn't happen under the current gc() strategy\n\tif !tsm.mark {\n\t\treturn\n\t}\n\tfor ts, tsi := range tsm.tsiMap {\n\t\tif !tsi.mark {\n\t\t\tdelete(tsm.tsiMap, ts)\n\t\t} else {\n\t\t\ttsi.mark = false\n\t\t}\n\t}\n\ttsm.mark = false\n}\n\nfunc newTimeseriesMap() *timeseriesMap {\n\treturn ×eriesMap{mark: true, tsiMap: map[string]*timeseriesinfo{}}\n}\n\n\/\/ JobsMap maps from a job instance to a map of timeseries instances for the job.\ntype JobsMap struct {\n\tsync.RWMutex\n\t\/\/ The mutex is used to protect access to the member fields. It is acquired for most of\n\t\/\/ get() and also acquired by gc().\n\n\tgcInterval time.Duration\n\tlastGC time.Time\n\tjobsMap map[string]*timeseriesMap\n}\n\n\/\/ NewJobsMap creates a new (empty) JobsMap.\nfunc NewJobsMap(gcInterval time.Duration) *JobsMap {\n\treturn &JobsMap{gcInterval: gcInterval, lastGC: time.Now(), jobsMap: make(map[string]*timeseriesMap)}\n}\n\n\/\/ Remove jobs and timeseries that have aged out.\nfunc (jm *JobsMap) gc() {\n\tjm.Lock()\n\tdefer jm.Unlock()\n\t\/\/ once the structure is locked, confirm that gc() is still necessary\n\tif time.Since(jm.lastGC) > jm.gcInterval {\n\t\tfor sig, tsm := range jm.jobsMap {\n\t\t\ttsm.RLock()\n\t\t\ttsmNotMarked := !tsm.mark\n\t\t\t\/\/ take a read lock here, no need to get a full lock as we have a lock on the JobsMap\n\t\t\ttsm.RUnlock()\n\t\t\tif tsmNotMarked {\n\t\t\t\tdelete(jm.jobsMap, sig)\n\t\t\t} else {\n\t\t\t\t\/\/ a full lock will be obtained in here, if required.\n\t\t\t\ttsm.gc()\n\t\t\t}\n\t\t}\n\t\tjm.lastGC = time.Now()\n\t}\n}\n\nfunc (jm *JobsMap) maybeGC() {\n\t\/\/ speculatively check if gc() is necessary, recheck once the structure is locked\n\tjm.RLock()\n\tdefer jm.RUnlock()\n\tif time.Since(jm.lastGC) > jm.gcInterval {\n\t\tgo jm.gc()\n\t}\n}\n\nfunc (jm *JobsMap) get(job, instance string) *timeseriesMap {\n\tsig := job + \":\" + instance\n\t\/\/ a read locke is taken here as we will not need to modify jobsMap if the target timeseriesMap is available.\n\tjm.RLock()\n\ttsm, ok := jm.jobsMap[sig]\n\tjm.RUnlock()\n\tdefer jm.maybeGC()\n\tif ok {\n\t\treturn tsm\n\t}\n\tjm.Lock()\n\tdefer jm.Unlock()\n\t\/\/ Now that we've got an exclusive lock, check once more to ensure an entry wasn't created in the interim\n\t\/\/ and then create a new timeseriesMap if required.\n\ttsm2, ok2 := jm.jobsMap[sig]\n\tif ok2 {\n\t\treturn tsm2\n\t}\n\ttsm2 = newTimeseriesMap()\n\tjm.jobsMap[sig] = tsm2\n\treturn tsm2\n}\n\n\/\/ MetricsAdjuster takes a map from a metric instance to the initial point in the metrics instance\n\/\/ and provides AdjustMetricSlice, which takes a sequence of metrics and adjust their start times based on\n\/\/ the initial points.\ntype MetricsAdjuster struct {\n\ttsm *timeseriesMap\n\tlogger *zap.Logger\n}\n\n\/\/ NewMetricsAdjuster is a constructor for MetricsAdjuster.\nfunc NewMetricsAdjuster(tsm *timeseriesMap, logger *zap.Logger) *MetricsAdjuster {\n\treturn &MetricsAdjuster{\n\t\ttsm: tsm,\n\t\tlogger: logger,\n\t}\n}\n\n\/\/ AdjustMetrics takes a sequence of metrics and adjust their start times based on the initial and\n\/\/ previous points in the timeseriesMap.\nfunc (ma *MetricsAdjuster) AdjustMetrics(metrics pmetric.Metrics) {\n\t\/\/ The lock on the relevant timeseriesMap is held throughout the adjustment process to ensure that\n\t\/\/ nothing else can modify the data used for adjustment.\n\tma.tsm.Lock()\n\tdefer ma.tsm.Unlock()\n\tfor i := 0; i < metrics.ResourceMetrics().Len(); i++ {\n\t\trm := metrics.ResourceMetrics().At(i)\n\t\tfor j := 0; j < rm.ScopeMetrics().Len(); j++ {\n\t\t\tilm := rm.ScopeMetrics().At(j)\n\t\t\tfor k := 0; k < ilm.Metrics().Len(); k++ {\n\t\t\t\tma.adjustMetric(ilm.Metrics().At(k))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (ma *MetricsAdjuster) adjustMetric(metric pmetric.Metric) {\n\tswitch dataType := metric.DataType(); dataType {\n\tcase pmetric.MetricDataTypeGauge:\n\t\t\/\/ gauges don't need to be adjusted so no additional processing is necessary\n\n\tcase pmetric.MetricDataTypeHistogram:\n\t\tma.adjustMetricHistogram(metric)\n\n\tcase pmetric.MetricDataTypeSummary:\n\t\tma.adjustMetricSummary(metric)\n\n\tcase pmetric.MetricDataTypeSum:\n\t\tma.adjustMetricSum(metric)\n\n\tdefault:\n\t\t\/\/ this shouldn't happen\n\t\tma.logger.Info(\"Adjust - skipping unexpected point\", zap.String(\"type\", dataType.String()))\n\t}\n}\n\nfunc (ma *MetricsAdjuster) adjustMetricHistogram(current pmetric.Metric) {\n\thistogram := current.Histogram()\n\tif histogram.AggregationTemporality() != pmetric.MetricAggregationTemporalityCumulative {\n\t\t\/\/ Only dealing with CumulativeDistributions.\n\t\treturn\n\t}\n\n\tcurrentPoints := histogram.DataPoints()\n\tfor i := 0; i < currentPoints.Len(); i++ {\n\t\tcurrentDist := currentPoints.At(i)\n\t\ttsi, found := ma.tsm.get(current, currentDist.Attributes())\n\t\tif !found {\n\t\t\t\/\/ initialize everything.\n\t\t\ttsi.histogram.startTime = currentDist.StartTimestamp()\n\t\t\ttsi.histogram.previousCount = currentDist.Count()\n\t\t\ttsi.histogram.previousSum = currentDist.Sum()\n\t\t\tcontinue\n\t\t}\n\n\t\tif currentDist.FlagsImmutable().NoRecordedValue() {\n\t\t\t\/\/ TODO: Investigate why this does not reset.\n\t\t\tcurrentDist.SetStartTimestamp(tsi.histogram.startTime)\n\t\t\tcontinue\n\t\t}\n\n\t\tif currentDist.Count() < tsi.histogram.previousCount || currentDist.Sum() < tsi.histogram.previousSum {\n\t\t\t\/\/ reset re-initialize everything.\n\t\t\ttsi.histogram.startTime = currentDist.StartTimestamp()\n\t\t\ttsi.histogram.previousCount = currentDist.Count()\n\t\t\ttsi.histogram.previousSum = currentDist.Sum()\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Update only previous values.\n\t\ttsi.histogram.previousCount = currentDist.Count()\n\t\ttsi.histogram.previousSum = currentDist.Sum()\n\t\tcurrentDist.SetStartTimestamp(tsi.histogram.startTime)\n\t}\n}\n\nfunc (ma *MetricsAdjuster) adjustMetricSum(current pmetric.Metric) {\n\tcurrentPoints := current.Sum().DataPoints()\n\tfor i := 0; i < currentPoints.Len(); i++ {\n\t\tcurrentSum := currentPoints.At(i)\n\t\ttsi, found := ma.tsm.get(current, currentSum.Attributes())\n\t\tif !found {\n\t\t\t\/\/ initialize everything.\n\t\t\ttsi.number.startTime = currentSum.StartTimestamp()\n\t\t\ttsi.number.previousValue = currentSum.DoubleVal()\n\t\t\tcontinue\n\t\t}\n\n\t\tif currentSum.FlagsImmutable().NoRecordedValue() {\n\t\t\t\/\/ TODO: Investigate why this does not reset.\n\t\t\tcurrentSum.SetStartTimestamp(tsi.number.startTime)\n\t\t\tcontinue\n\t\t}\n\n\t\tif currentSum.DoubleVal() < tsi.number.previousValue {\n\t\t\t\/\/ reset re-initialize everything.\n\t\t\ttsi.number.startTime = currentSum.StartTimestamp()\n\t\t\ttsi.number.previousValue = currentSum.DoubleVal()\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Update only previous values.\n\t\ttsi.number.previousValue = currentSum.DoubleVal()\n\t\tcurrentSum.SetStartTimestamp(tsi.number.startTime)\n\t}\n}\n\nfunc (ma *MetricsAdjuster) adjustMetricSummary(current pmetric.Metric) {\n\tcurrentPoints := current.Summary().DataPoints()\n\n\tfor i := 0; i < currentPoints.Len(); i++ {\n\t\tcurrentSummary := currentPoints.At(i)\n\t\ttsi, found := ma.tsm.get(current, currentSummary.Attributes())\n\t\tif !found {\n\t\t\t\/\/ initialize everything.\n\t\t\ttsi.summary.startTime = currentSummary.StartTimestamp()\n\t\t\ttsi.summary.previousCount = currentSummary.Count()\n\t\t\ttsi.summary.previousSum = currentSummary.Sum()\n\t\t\tcontinue\n\t\t}\n\n\t\tif currentSummary.FlagsImmutable().NoRecordedValue() {\n\t\t\t\/\/ TODO: Investigate why this does not reset.\n\t\t\tcurrentSummary.SetStartTimestamp(tsi.summary.startTime)\n\t\t\tcontinue\n\t\t}\n\n\t\tif (currentSummary.Count() != 0 &&\n\t\t\ttsi.summary.previousCount != 0 &&\n\t\t\tcurrentSummary.Count() < tsi.summary.previousCount) ||\n\t\t\t(currentSummary.Sum() != 0 &&\n\t\t\t\ttsi.summary.previousSum != 0 &&\n\t\t\t\tcurrentSummary.Sum() < tsi.summary.previousSum) {\n\t\t\t\/\/ reset re-initialize everything.\n\t\t\ttsi.summary.startTime = currentSummary.StartTimestamp()\n\t\t\ttsi.summary.previousCount = currentSummary.Count()\n\t\t\ttsi.summary.previousSum = currentSummary.Sum()\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Update only previous values.\n\t\ttsi.summary.previousCount = currentSummary.Count()\n\t\ttsi.summary.previousSum = currentSummary.Sum()\n\t\tcurrentSummary.SetStartTimestamp(tsi.summary.startTime)\n\t}\n}\n[chore] prometheusreceiver: remove unnecessary string concatenations and copies, use struct key (#13917)\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage internal \/\/ import \"github.com\/open-telemetry\/opentelemetry-collector-contrib\/receiver\/prometheusreceiver\/internal\"\n\nimport (\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"go.opentelemetry.io\/collector\/pdata\/pcommon\"\n\t\"go.opentelemetry.io\/collector\/pdata\/pmetric\"\n\t\"go.uber.org\/zap\"\n)\n\n\/\/ Notes on garbage collection (gc):\n\/\/\n\/\/ Job-level gc:\n\/\/ The Prometheus receiver will likely execute in a long running service whose lifetime may exceed\n\/\/ the lifetimes of many of the jobs that it is collecting from. In order to keep the JobsMap from\n\/\/ leaking memory for entries of no-longer existing jobs, the JobsMap needs to remove entries that\n\/\/ haven't been accessed for a long period of time.\n\/\/\n\/\/ Timeseries-level gc:\n\/\/ Some jobs that the Prometheus receiver is collecting from may export timeseries based on metrics\n\/\/ from other jobs (e.g. cAdvisor). In order to keep the timeseriesMap from leaking memory for entries\n\/\/ of no-longer existing jobs, the timeseriesMap for each job needs to remove entries that haven't\n\/\/ been accessed for a long period of time.\n\/\/\n\/\/ The gc strategy uses a standard mark-and-sweep approach - each time a timeseriesMap is accessed,\n\/\/ it is marked. Similarly, each time a timeseriesinfo is accessed, it is also marked.\n\/\/\n\/\/ At the end of each JobsMap.get(), if the last time the JobsMap was gc'd exceeds the 'gcInterval',\n\/\/ the JobsMap is locked and any timeseriesMaps that are unmarked are removed from the JobsMap\n\/\/ otherwise the timeseriesMap is gc'd\n\/\/\n\/\/ The gc for the timeseriesMap is straightforward - the map is locked and, for each timeseriesinfo\n\/\/ in the map, if it has not been marked, it is removed otherwise it is unmarked.\n\/\/\n\/\/ Alternative Strategies\n\/\/ 1. If the job-level gc doesn't run often enough, or runs too often, a separate go routine can\n\/\/ be spawned at JobMap creation time that gc's at periodic intervals. This approach potentially\n\/\/ adds more contention and latency to each scrape so the current approach is used. Note that\n\/\/ the go routine will need to be cancelled upon Shutdown().\n\/\/ 2. If the gc of each timeseriesMap during the gc of the JobsMap causes too much contention,\n\/\/ the gc of timeseriesMaps can be moved to the end of MetricsAdjuster().AdjustMetricSlice(). This\n\/\/ approach requires adding 'lastGC' Time and (potentially) a gcInterval duration to\n\/\/ timeseriesMap so the current approach is used instead.\n\n\/\/ timeseriesinfo contains the information necessary to adjust from the initial point and to detect resets.\ntype timeseriesinfo struct {\n\tmark bool\n\n\tnumber numberInfo\n\thistogram histogramInfo\n\tsummary summaryInfo\n}\n\ntype numberInfo struct {\n\tstartTime pcommon.Timestamp\n\tpreviousValue float64\n}\n\ntype histogramInfo struct {\n\tstartTime pcommon.Timestamp\n\tpreviousCount uint64\n\tpreviousSum float64\n}\n\ntype summaryInfo struct {\n\tstartTime pcommon.Timestamp\n\tpreviousCount uint64\n\tpreviousSum float64\n}\n\ntype timeserieskey struct {\n\tname string\n\tattributes string\n\taggTemporality pmetric.MetricAggregationTemporality\n}\n\n\/\/ timeseriesMap maps from a timeseries instance (metric * label values) to the timeseries info for\n\/\/ the instance.\ntype timeseriesMap struct {\n\tsync.RWMutex\n\t\/\/ The mutex is used to protect access to the member fields. It is acquired for the entirety of\n\t\/\/ AdjustMetricSlice() and also acquired by gc().\n\n\tmark bool\n\ttsiMap map[timeserieskey]*timeseriesinfo\n}\n\n\/\/ Get the timeseriesinfo for the timeseries associated with the metric and label values.\nfunc (tsm *timeseriesMap) get(metric pmetric.Metric, kv pcommon.Map) (*timeseriesinfo, bool) {\n\t\/\/ This should only be invoked be functions called (directly or indirectly) by AdjustMetricSlice().\n\t\/\/ The lock protecting tsm.tsiMap is acquired there.\n\tname := metric.Name()\n\tkey := timeserieskey{\n\t\tname: name,\n\t\tattributes: getAttributesSignature(kv),\n\t}\n\tif metric.DataType() == pmetric.MetricDataTypeHistogram {\n\t\t\/\/ There are 2 types of Histograms whose aggregation temporality needs distinguishing:\n\t\t\/\/ * CumulativeHistogram\n\t\t\/\/ * GaugeHistogram\n\t\tkey.aggTemporality = metric.Histogram().AggregationTemporality()\n\t}\n\n\ttsm.mark = true\n\ttsi, ok := tsm.tsiMap[key]\n\tif !ok {\n\t\ttsi = ×eriesinfo{}\n\t\ttsm.tsiMap[key] = tsi\n\t}\n\ttsi.mark = true\n\treturn tsi, ok\n}\n\n\/\/ Create a unique timeseries signature consisting of the metric name and label values.\nfunc getAttributesSignature(kv pcommon.Map) string {\n\tlabelValues := make([]string, 0, kv.Len())\n\tkv.Sort().Range(func(_ string, attrValue pcommon.Value) bool {\n\t\tvalue := attrValue.StringVal()\n\t\tif value != \"\" {\n\t\t\tlabelValues = append(labelValues, value)\n\t\t}\n\t\treturn true\n\t})\n\treturn strings.Join(labelValues, \",\")\n}\n\n\/\/ Remove timeseries that have aged out.\nfunc (tsm *timeseriesMap) gc() {\n\ttsm.Lock()\n\tdefer tsm.Unlock()\n\t\/\/ this shouldn't happen under the current gc() strategy\n\tif !tsm.mark {\n\t\treturn\n\t}\n\tfor ts, tsi := range tsm.tsiMap {\n\t\tif !tsi.mark {\n\t\t\tdelete(tsm.tsiMap, ts)\n\t\t} else {\n\t\t\ttsi.mark = false\n\t\t}\n\t}\n\ttsm.mark = false\n}\n\nfunc newTimeseriesMap() *timeseriesMap {\n\treturn ×eriesMap{mark: true, tsiMap: map[timeserieskey]*timeseriesinfo{}}\n}\n\n\/\/ JobsMap maps from a job instance to a map of timeseries instances for the job.\ntype JobsMap struct {\n\tsync.RWMutex\n\t\/\/ The mutex is used to protect access to the member fields. It is acquired for most of\n\t\/\/ get() and also acquired by gc().\n\n\tgcInterval time.Duration\n\tlastGC time.Time\n\tjobsMap map[string]*timeseriesMap\n}\n\n\/\/ NewJobsMap creates a new (empty) JobsMap.\nfunc NewJobsMap(gcInterval time.Duration) *JobsMap {\n\treturn &JobsMap{gcInterval: gcInterval, lastGC: time.Now(), jobsMap: make(map[string]*timeseriesMap)}\n}\n\n\/\/ Remove jobs and timeseries that have aged out.\nfunc (jm *JobsMap) gc() {\n\tjm.Lock()\n\tdefer jm.Unlock()\n\t\/\/ once the structure is locked, confirm that gc() is still necessary\n\tif time.Since(jm.lastGC) > jm.gcInterval {\n\t\tfor sig, tsm := range jm.jobsMap {\n\t\t\ttsm.RLock()\n\t\t\ttsmNotMarked := !tsm.mark\n\t\t\t\/\/ take a read lock here, no need to get a full lock as we have a lock on the JobsMap\n\t\t\ttsm.RUnlock()\n\t\t\tif tsmNotMarked {\n\t\t\t\tdelete(jm.jobsMap, sig)\n\t\t\t} else {\n\t\t\t\t\/\/ a full lock will be obtained in here, if required.\n\t\t\t\ttsm.gc()\n\t\t\t}\n\t\t}\n\t\tjm.lastGC = time.Now()\n\t}\n}\n\nfunc (jm *JobsMap) maybeGC() {\n\t\/\/ speculatively check if gc() is necessary, recheck once the structure is locked\n\tjm.RLock()\n\tdefer jm.RUnlock()\n\tif time.Since(jm.lastGC) > jm.gcInterval {\n\t\tgo jm.gc()\n\t}\n}\n\nfunc (jm *JobsMap) get(job, instance string) *timeseriesMap {\n\tsig := job + \":\" + instance\n\t\/\/ a read locke is taken here as we will not need to modify jobsMap if the target timeseriesMap is available.\n\tjm.RLock()\n\ttsm, ok := jm.jobsMap[sig]\n\tjm.RUnlock()\n\tdefer jm.maybeGC()\n\tif ok {\n\t\treturn tsm\n\t}\n\tjm.Lock()\n\tdefer jm.Unlock()\n\t\/\/ Now that we've got an exclusive lock, check once more to ensure an entry wasn't created in the interim\n\t\/\/ and then create a new timeseriesMap if required.\n\ttsm2, ok2 := jm.jobsMap[sig]\n\tif ok2 {\n\t\treturn tsm2\n\t}\n\ttsm2 = newTimeseriesMap()\n\tjm.jobsMap[sig] = tsm2\n\treturn tsm2\n}\n\n\/\/ MetricsAdjuster takes a map from a metric instance to the initial point in the metrics instance\n\/\/ and provides AdjustMetricSlice, which takes a sequence of metrics and adjust their start times based on\n\/\/ the initial points.\ntype MetricsAdjuster struct {\n\ttsm *timeseriesMap\n\tlogger *zap.Logger\n}\n\n\/\/ NewMetricsAdjuster is a constructor for MetricsAdjuster.\nfunc NewMetricsAdjuster(tsm *timeseriesMap, logger *zap.Logger) *MetricsAdjuster {\n\treturn &MetricsAdjuster{\n\t\ttsm: tsm,\n\t\tlogger: logger,\n\t}\n}\n\n\/\/ AdjustMetrics takes a sequence of metrics and adjust their start times based on the initial and\n\/\/ previous points in the timeseriesMap.\nfunc (ma *MetricsAdjuster) AdjustMetrics(metrics pmetric.Metrics) {\n\t\/\/ The lock on the relevant timeseriesMap is held throughout the adjustment process to ensure that\n\t\/\/ nothing else can modify the data used for adjustment.\n\tma.tsm.Lock()\n\tdefer ma.tsm.Unlock()\n\tfor i := 0; i < metrics.ResourceMetrics().Len(); i++ {\n\t\trm := metrics.ResourceMetrics().At(i)\n\t\tfor j := 0; j < rm.ScopeMetrics().Len(); j++ {\n\t\t\tilm := rm.ScopeMetrics().At(j)\n\t\t\tfor k := 0; k < ilm.Metrics().Len(); k++ {\n\t\t\t\tma.adjustMetric(ilm.Metrics().At(k))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (ma *MetricsAdjuster) adjustMetric(metric pmetric.Metric) {\n\tswitch dataType := metric.DataType(); dataType {\n\tcase pmetric.MetricDataTypeGauge:\n\t\t\/\/ gauges don't need to be adjusted so no additional processing is necessary\n\n\tcase pmetric.MetricDataTypeHistogram:\n\t\tma.adjustMetricHistogram(metric)\n\n\tcase pmetric.MetricDataTypeSummary:\n\t\tma.adjustMetricSummary(metric)\n\n\tcase pmetric.MetricDataTypeSum:\n\t\tma.adjustMetricSum(metric)\n\n\tdefault:\n\t\t\/\/ this shouldn't happen\n\t\tma.logger.Info(\"Adjust - skipping unexpected point\", zap.String(\"type\", dataType.String()))\n\t}\n}\n\nfunc (ma *MetricsAdjuster) adjustMetricHistogram(current pmetric.Metric) {\n\thistogram := current.Histogram()\n\tif histogram.AggregationTemporality() != pmetric.MetricAggregationTemporalityCumulative {\n\t\t\/\/ Only dealing with CumulativeDistributions.\n\t\treturn\n\t}\n\n\tcurrentPoints := histogram.DataPoints()\n\tfor i := 0; i < currentPoints.Len(); i++ {\n\t\tcurrentDist := currentPoints.At(i)\n\t\ttsi, found := ma.tsm.get(current, currentDist.Attributes())\n\t\tif !found {\n\t\t\t\/\/ initialize everything.\n\t\t\ttsi.histogram.startTime = currentDist.StartTimestamp()\n\t\t\ttsi.histogram.previousCount = currentDist.Count()\n\t\t\ttsi.histogram.previousSum = currentDist.Sum()\n\t\t\tcontinue\n\t\t}\n\n\t\tif currentDist.FlagsImmutable().NoRecordedValue() {\n\t\t\t\/\/ TODO: Investigate why this does not reset.\n\t\t\tcurrentDist.SetStartTimestamp(tsi.histogram.startTime)\n\t\t\tcontinue\n\t\t}\n\n\t\tif currentDist.Count() < tsi.histogram.previousCount || currentDist.Sum() < tsi.histogram.previousSum {\n\t\t\t\/\/ reset re-initialize everything.\n\t\t\ttsi.histogram.startTime = currentDist.StartTimestamp()\n\t\t\ttsi.histogram.previousCount = currentDist.Count()\n\t\t\ttsi.histogram.previousSum = currentDist.Sum()\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Update only previous values.\n\t\ttsi.histogram.previousCount = currentDist.Count()\n\t\ttsi.histogram.previousSum = currentDist.Sum()\n\t\tcurrentDist.SetStartTimestamp(tsi.histogram.startTime)\n\t}\n}\n\nfunc (ma *MetricsAdjuster) adjustMetricSum(current pmetric.Metric) {\n\tcurrentPoints := current.Sum().DataPoints()\n\tfor i := 0; i < currentPoints.Len(); i++ {\n\t\tcurrentSum := currentPoints.At(i)\n\t\ttsi, found := ma.tsm.get(current, currentSum.Attributes())\n\t\tif !found {\n\t\t\t\/\/ initialize everything.\n\t\t\ttsi.number.startTime = currentSum.StartTimestamp()\n\t\t\ttsi.number.previousValue = currentSum.DoubleVal()\n\t\t\tcontinue\n\t\t}\n\n\t\tif currentSum.FlagsImmutable().NoRecordedValue() {\n\t\t\t\/\/ TODO: Investigate why this does not reset.\n\t\t\tcurrentSum.SetStartTimestamp(tsi.number.startTime)\n\t\t\tcontinue\n\t\t}\n\n\t\tif currentSum.DoubleVal() < tsi.number.previousValue {\n\t\t\t\/\/ reset re-initialize everything.\n\t\t\ttsi.number.startTime = currentSum.StartTimestamp()\n\t\t\ttsi.number.previousValue = currentSum.DoubleVal()\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Update only previous values.\n\t\ttsi.number.previousValue = currentSum.DoubleVal()\n\t\tcurrentSum.SetStartTimestamp(tsi.number.startTime)\n\t}\n}\n\nfunc (ma *MetricsAdjuster) adjustMetricSummary(current pmetric.Metric) {\n\tcurrentPoints := current.Summary().DataPoints()\n\n\tfor i := 0; i < currentPoints.Len(); i++ {\n\t\tcurrentSummary := currentPoints.At(i)\n\t\ttsi, found := ma.tsm.get(current, currentSummary.Attributes())\n\t\tif !found {\n\t\t\t\/\/ initialize everything.\n\t\t\ttsi.summary.startTime = currentSummary.StartTimestamp()\n\t\t\ttsi.summary.previousCount = currentSummary.Count()\n\t\t\ttsi.summary.previousSum = currentSummary.Sum()\n\t\t\tcontinue\n\t\t}\n\n\t\tif currentSummary.FlagsImmutable().NoRecordedValue() {\n\t\t\t\/\/ TODO: Investigate why this does not reset.\n\t\t\tcurrentSummary.SetStartTimestamp(tsi.summary.startTime)\n\t\t\tcontinue\n\t\t}\n\n\t\tif (currentSummary.Count() != 0 &&\n\t\t\ttsi.summary.previousCount != 0 &&\n\t\t\tcurrentSummary.Count() < tsi.summary.previousCount) ||\n\t\t\t(currentSummary.Sum() != 0 &&\n\t\t\t\ttsi.summary.previousSum != 0 &&\n\t\t\t\tcurrentSummary.Sum() < tsi.summary.previousSum) {\n\t\t\t\/\/ reset re-initialize everything.\n\t\t\ttsi.summary.startTime = currentSummary.StartTimestamp()\n\t\t\ttsi.summary.previousCount = currentSummary.Count()\n\t\t\ttsi.summary.previousSum = currentSummary.Sum()\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Update only previous values.\n\t\ttsi.summary.previousCount = currentSummary.Count()\n\t\ttsi.summary.previousSum = currentSummary.Sum()\n\t\tcurrentSummary.SetStartTimestamp(tsi.summary.startTime)\n\t}\n}\n<|endoftext|>"} {"text":"test: match gccgo error messages for bug323.go.<|endoftext|>"} {"text":"package netscaler\n\nimport (\n\t\"github.com\/chiradeep\/go-nitro\/config\/ssl\"\n\n\t\"github.com\/chiradeep\/go-nitro\/netscaler\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\n\t\"fmt\"\n\t\"log\"\n)\n\nfunc resourceNetScalerSslcertkey() *schema.Resource {\n\treturn &schema.Resource{\n\t\tSchemaVersion: 1,\n\t\tCreate: createSslcertkeyFunc,\n\t\tRead: readSslcertkeyFunc,\n\t\tUpdate: updateSslcertkeyFunc,\n\t\tDelete: deleteSslcertkeyFunc,\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"bundle\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"cert\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"certkey\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"expirymonitor\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"fipskey\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"hsmkey\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"inform\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"key\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"linkcertkeyname\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"nodomaincheck\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"notificationperiod\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"ocspstaplingcache\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"passplain\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"password\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc createSslcertkeyFunc(d *schema.ResourceData, meta interface{}) error {\n\tlog.Printf(\"[DEBUG] netscaler-provider: In createSslcertkeyFunc\")\n\tclient := meta.(*NetScalerNitroClient).client\n\tvar sslcertkeyName string\n\tif v, ok := d.GetOk(\"certkey\"); ok {\n\t\tsslcertkeyName = v.(string)\n\t} else {\n\t\tsslcertkeyName = resource.PrefixedUniqueId(\"tf-sslcertkey-\")\n\t\td.Set(\"certkey\", sslcertkeyName)\n\t}\n\tsslcertkey := ssl.Sslcertkey{\n\t\tBundle: d.Get(\"bundle\").(string),\n\t\tCert: d.Get(\"cert\").(string),\n\t\tCertkey: d.Get(\"certkey\").(string),\n\t\tExpirymonitor: d.Get(\"expirymonitor\").(string),\n\t\tFipskey: d.Get(\"fipskey\").(string),\n\t\tHsmkey: d.Get(\"hsmkey\").(string),\n\t\tInform: d.Get(\"inform\").(string),\n\t\tKey: d.Get(\"key\").(string),\n\t\tLinkcertkeyname: d.Get(\"linkcertkeyname\").(string),\n\t\tNodomaincheck: d.Get(\"nodomaincheck\").(bool),\n\t\tNotificationperiod: d.Get(\"notificationperiod\").(int),\n\t\tOcspstaplingcache: d.Get(\"ocspstaplingcache\").(bool),\n\t\tPassplain: d.Get(\"passplain\").(string),\n\t\tPassword: d.Get(\"password\").(bool),\n\t}\n\n\t_, err := client.AddResource(netscaler.Sslcertkey.Type(), sslcertkeyName, &sslcertkey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.SetId(sslcertkeyName)\n\n\terr = readSslcertkeyFunc(d, meta)\n\tif err != nil {\n\t\tlog.Printf(\"[ERROR] netscaler-provider: ?? we just created this sslcertkey but we can't read it ?? %s\", sslcertkeyName)\n\t\treturn nil\n\t}\n\treturn nil\n}\n\nfunc readSslcertkeyFunc(d *schema.ResourceData, meta interface{}) error {\n\tlog.Printf(\"[DEBUG] netscaler-provider: In readSslcertkeyFunc\")\n\tclient := meta.(*NetScalerNitroClient).client\n\tsslcertkeyName := d.Id()\n\tlog.Printf(\"[DEBUG] netscaler-provider: Reading sslcertkey state %s\", sslcertkeyName)\n\tdata, err := client.FindResource(netscaler.Sslcertkey.Type(), sslcertkeyName)\n\tif err != nil {\n\t\tlog.Printf(\"[WARN] netscaler-provider: Clearing sslcertkey state %s\", sslcertkeyName)\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\td.Set(\"certkey\", data[\"certkey\"])\n\td.Set(\"bundle\", data[\"bundle\"])\n\td.Set(\"cert\", data[\"cert\"])\n\td.Set(\"certkey\", data[\"certkey\"])\n\td.Set(\"expirymonitor\", data[\"expirymonitor\"])\n\td.Set(\"fipskey\", data[\"fipskey\"])\n\td.Set(\"hsmkey\", data[\"hsmkey\"])\n\td.Set(\"inform\", data[\"inform\"])\n\td.Set(\"key\", data[\"key\"])\n\td.Set(\"linkcertkeyname\", data[\"linkcertkeyname\"])\n\td.Set(\"nodomaincheck\", data[\"nodomaincheck\"])\n\td.Set(\"notificationperiod\", data[\"notificationperiod\"])\n\td.Set(\"ocspstaplingcache\", data[\"ocspstaplingcache\"])\n\td.Set(\"passplain\", data[\"passplain\"])\n\td.Set(\"password\", data[\"password\"])\n\n\treturn nil\n\n}\n\nfunc updateSslcertkeyFunc(d *schema.ResourceData, meta interface{}) error {\n\tlog.Printf(\"[DEBUG] netscaler-provider: In updateSslcertkeyFunc\")\n\tclient := meta.(*NetScalerNitroClient).client\n\tsslcertkeyName := d.Get(\"certkey\").(string)\n\n\tsslcertkeyUpdate := ssl.Sslcertkey{\n\t\tCertkey: d.Get(\"certkey\").(string),\n\t}\n\tsslcertkeyChange := ssl.Sslcertkey{\n\t\tCertkey: d.Get(\"certkey\").(string),\n\t}\n\thasUpdate := false \/\/depending on which field changed, we have to use Update or Change API\n\thasChange := false\n\tif d.HasChange(\"expirymonitor\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: Expirymonitor has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyUpdate.Expirymonitor = d.Get(\"expirymonitor\").(string)\n\t\thasUpdate = true\n\t}\n\tif d.HasChange(\"notificationperiod\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: Notificationperiod has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyUpdate.Notificationperiod = d.Get(\"notificationperiod\").(int)\n\t\thasUpdate = true\n\t}\n\tif d.HasChange(\"cert\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: cert has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Cert = d.Get(\"cert\").(string)\n\t\thasChange = true\n\t}\n\tif d.HasChange(\"key\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: key has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Key = d.Get(\"key\").(string)\n\t\thasChange = true\n\t}\n\tif d.HasChange(\"password\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: password has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Password = d.Get(\"password\").(bool)\n\t\thasChange = true\n\t}\n\tif d.HasChange(\"fipskey\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: fipskey has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Fipskey = d.Get(\"fipskey\").(string)\n\t\thasChange = true\n\t}\n\tif d.HasChange(\"hsmkey\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: Hsmkey has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Hsmkey = d.Get(\"hsmkey\").(string)\n\t\thasChange = true\n\t}\n\tif d.HasChange(\"inform\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: inform has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Inform = d.Get(\"inform\").(string)\n\t\thasChange = true\n\t}\n\tif d.HasChange(\"passplain\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: passplain has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Passplain = d.Get(\"passplain\").(string)\n\t\thasChange = true\n\t}\n\tif d.HasChange(\"nodomaincheck\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: nodomaincheck has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Nodomaincheck = d.Get(\"nodomaincheck\").(bool)\n\t\thasChange = true\n\t}\n\tif d.HasChange(\"ocspstaplingcache\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: Ocspstaplingcache has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Ocspstaplingcache = d.Get(\"ocspstaplingcache\").(bool)\n\t\thasChange = true\n\t}\n\n\tif hasUpdate {\n\t\tsslcertkeyUpdate.Expirymonitor = d.Get(\"expirymonitor\").(string) \/\/always expected by NITRO API\n\t\t_, err := client.UpdateResource(netscaler.Sslcertkey.Type(), sslcertkeyName, &sslcertkeyUpdate)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error updating sslcertkey %s\", sslcertkeyName)\n\t\t}\n\t}\n\tif hasChange {\n\t\t_, err := client.ChangeResource(netscaler.Sslcertkey.Type(), sslcertkeyName, &sslcertkeyChange)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error changing sslcertkey %s\", sslcertkeyName)\n\t\t}\n\t}\n\treturn readSslcertkeyFunc(d, meta)\n}\n\nfunc deleteSslcertkeyFunc(d *schema.ResourceData, meta interface{}) error {\n\tlog.Printf(\"[DEBUG] netscaler-provider: In deleteSslcertkeyFunc\")\n\tclient := meta.(*NetScalerNitroClient).client\n\tsslcertkeyName := d.Id()\n\terr := client.DeleteResource(netscaler.Sslcertkey.Type(), sslcertkeyName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.SetId(\"\")\n\n\treturn nil\n}\nFix handling of nodomaincheck in sslcertkeypackage netscaler\n\nimport (\n\t\"github.com\/chiradeep\/go-nitro\/config\/ssl\"\n\n\t\"github.com\/chiradeep\/go-nitro\/netscaler\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\n\t\"fmt\"\n\t\"log\"\n)\n\nfunc resourceNetScalerSslcertkey() *schema.Resource {\n\treturn &schema.Resource{\n\t\tSchemaVersion: 1,\n\t\tCreate: createSslcertkeyFunc,\n\t\tRead: readSslcertkeyFunc,\n\t\tUpdate: updateSslcertkeyFunc,\n\t\tDelete: deleteSslcertkeyFunc,\n\t\tCustomizeDiff: customizeDiff,\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"bundle\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"cert\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"certkey\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"expirymonitor\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"fipskey\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"hsmkey\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"inform\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"key\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"linkcertkeyname\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"nodomaincheck\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"notificationperiod\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"ocspstaplingcache\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"passplain\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"password\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc createSslcertkeyFunc(d *schema.ResourceData, meta interface{}) error {\n\tlog.Printf(\"[DEBUG] netscaler-provider: In createSslcertkeyFunc\")\n\tclient := meta.(*NetScalerNitroClient).client\n\tvar sslcertkeyName string\n\tif v, ok := d.GetOk(\"certkey\"); ok {\n\t\tsslcertkeyName = v.(string)\n\t} else {\n\t\tsslcertkeyName = resource.PrefixedUniqueId(\"tf-sslcertkey-\")\n\t\td.Set(\"certkey\", sslcertkeyName)\n\t}\n\tsslcertkey := ssl.Sslcertkey{\n\t\tBundle: d.Get(\"bundle\").(string),\n\t\tCert: d.Get(\"cert\").(string),\n\t\tCertkey: d.Get(\"certkey\").(string),\n\t\tExpirymonitor: d.Get(\"expirymonitor\").(string),\n\t\tFipskey: d.Get(\"fipskey\").(string),\n\t\tHsmkey: d.Get(\"hsmkey\").(string),\n\t\tInform: d.Get(\"inform\").(string),\n\t\tKey: d.Get(\"key\").(string),\n\t\tLinkcertkeyname: d.Get(\"linkcertkeyname\").(string),\n\t\t\/\/ This is always set to false on creation which effectively excludes it from the request JSON\n\t\t\/\/ Nodomaincheck is not an object attribute but a flag for the change operation\n\t\t\/\/ of the resource\n\t\tNodomaincheck: false,\n\t\tNotificationperiod: d.Get(\"notificationperiod\").(int),\n\t\tOcspstaplingcache: d.Get(\"ocspstaplingcache\").(bool),\n\t\tPassplain: d.Get(\"passplain\").(string),\n\t\tPassword: d.Get(\"password\").(bool),\n\t}\n\n\t_, err := client.AddResource(netscaler.Sslcertkey.Type(), sslcertkeyName, &sslcertkey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.SetId(sslcertkeyName)\n\n\terr = readSslcertkeyFunc(d, meta)\n\tif err != nil {\n\t\tlog.Printf(\"[ERROR] netscaler-provider: ?? we just created this sslcertkey but we can't read it ?? %s\", sslcertkeyName)\n\t\treturn nil\n\t}\n\treturn nil\n}\n\nfunc readSslcertkeyFunc(d *schema.ResourceData, meta interface{}) error {\n\tlog.Printf(\"[DEBUG] netscaler-provider: In readSslcertkeyFunc\")\n\tclient := meta.(*NetScalerNitroClient).client\n\tsslcertkeyName := d.Id()\n\tlog.Printf(\"[DEBUG] netscaler-provider: Reading sslcertkey state %s\", sslcertkeyName)\n\tdata, err := client.FindResource(netscaler.Sslcertkey.Type(), sslcertkeyName)\n\tif err != nil {\n\t\tlog.Printf(\"[WARN] netscaler-provider: Clearing sslcertkey state %s\", sslcertkeyName)\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\td.Set(\"certkey\", data[\"certkey\"])\n\td.Set(\"bundle\", data[\"bundle\"])\n\td.Set(\"cert\", data[\"cert\"])\n\td.Set(\"certkey\", data[\"certkey\"])\n\td.Set(\"expirymonitor\", data[\"expirymonitor\"])\n\td.Set(\"fipskey\", data[\"fipskey\"])\n\td.Set(\"hsmkey\", data[\"hsmkey\"])\n\td.Set(\"inform\", data[\"inform\"])\n\td.Set(\"key\", data[\"key\"])\n\td.Set(\"linkcertkeyname\", data[\"linkcertkeyname\"])\n\td.Set(\"nodomaincheck\", data[\"nodomaincheck\"])\n\td.Set(\"notificationperiod\", data[\"notificationperiod\"])\n\td.Set(\"ocspstaplingcache\", data[\"ocspstaplingcache\"])\n\td.Set(\"passplain\", data[\"passplain\"])\n\td.Set(\"password\", data[\"password\"])\n\n\treturn nil\n\n}\n\nfunc updateSslcertkeyFunc(d *schema.ResourceData, meta interface{}) error {\n\tlog.Printf(\"[DEBUG] netscaler-provider: In updateSslcertkeyFunc\")\n\tclient := meta.(*NetScalerNitroClient).client\n\tsslcertkeyName := d.Get(\"certkey\").(string)\n\n\tsslcertkeyUpdate := ssl.Sslcertkey{\n\t\tCertkey: d.Get(\"certkey\").(string),\n\t}\n\tsslcertkeyChange := ssl.Sslcertkey{\n\t\tCertkey: d.Get(\"certkey\").(string),\n\t}\n\thasUpdate := false \/\/depending on which field changed, we have to use Update or Change API\n\thasChange := false\n\tif d.HasChange(\"expirymonitor\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: Expirymonitor has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyUpdate.Expirymonitor = d.Get(\"expirymonitor\").(string)\n\t\thasUpdate = true\n\t}\n\tif d.HasChange(\"notificationperiod\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: Notificationperiod has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyUpdate.Notificationperiod = d.Get(\"notificationperiod\").(int)\n\t\thasUpdate = true\n\t}\n\tif d.HasChange(\"cert\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: cert has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Cert = d.Get(\"cert\").(string)\n\t\thasChange = true\n\t}\n\tif d.HasChange(\"key\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: key has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Key = d.Get(\"key\").(string)\n\t\thasChange = true\n\t}\n\tif d.HasChange(\"password\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: password has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Password = d.Get(\"password\").(bool)\n\t\thasChange = true\n\t}\n\tif d.HasChange(\"fipskey\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: fipskey has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Fipskey = d.Get(\"fipskey\").(string)\n\t\thasChange = true\n\t}\n\tif d.HasChange(\"hsmkey\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: Hsmkey has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Hsmkey = d.Get(\"hsmkey\").(string)\n\t\thasChange = true\n\t}\n\tif d.HasChange(\"inform\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: inform has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Inform = d.Get(\"inform\").(string)\n\t\thasChange = true\n\t}\n\tif d.HasChange(\"passplain\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: passplain has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Passplain = d.Get(\"passplain\").(string)\n\t\thasChange = true\n\t}\n\tif d.HasChange(\"ocspstaplingcache\") {\n\t\tlog.Printf(\"[DEBUG] netscaler-provider: Ocspstaplingcache has changed for sslcertkey %s, starting update\", sslcertkeyName)\n\t\tsslcertkeyChange.Ocspstaplingcache = d.Get(\"ocspstaplingcache\").(bool)\n\t\thasChange = true\n\t}\n\n\tif hasUpdate {\n\t\tsslcertkeyUpdate.Expirymonitor = d.Get(\"expirymonitor\").(string) \/\/always expected by NITRO API\n\t\t_, err := client.UpdateResource(netscaler.Sslcertkey.Type(), sslcertkeyName, &sslcertkeyUpdate)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error updating sslcertkey %s\", sslcertkeyName)\n\t\t}\n\t}\n\t\/\/ nodomaincheck is a flag for the change operation\n\t\/\/ therefore its value is always used for the operation\n\tsslcertkeyChange.Nodomaincheck = d.Get(\"nodomaincheck\").(bool)\n\tif hasChange {\n\n\t\t_, err := client.ChangeResource(netscaler.Sslcertkey.Type(), sslcertkeyName, &sslcertkeyChange)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error changing sslcertkey %s\", sslcertkeyName)\n\t\t}\n\t}\n\treturn readSslcertkeyFunc(d, meta)\n}\n\nfunc deleteSslcertkeyFunc(d *schema.ResourceData, meta interface{}) error {\n\tlog.Printf(\"[DEBUG] netscaler-provider: In deleteSslcertkeyFunc\")\n\tclient := meta.(*NetScalerNitroClient).client\n\tsslcertkeyName := d.Id()\n\terr := client.DeleteResource(netscaler.Sslcertkey.Type(), sslcertkeyName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.SetId(\"\")\n\n\treturn nil\n}\n\nfunc customizeDiff(diff *schema.ResourceDiff, meta interface{}) error {\n\tlog.Printf(\"[DEBUG] netscaler-provider: In customizeDiff\")\n\to := diff.GetChangedKeysPrefix(\"\")\n\n\tif len(o) == 1 && o[0] == \"nodomaincheck\" {\n\t\tlog.Printf(\"Only nodomaincheck in diff\")\n\t\tdiff.Clear(\"nodomaincheck\")\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"package extra\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/name5566\/leaf\/log\"\n\t\"github.com\/name5566\/leaf\/util\"\n\t\"math\"\n\t\"reflect\"\n)\n\n\/\/ -------------------------\n\/\/ | id | protobuf message |\n\/\/ -------------------------\ntype ProtobufRouter struct {\n\tlittleEndian bool\n\tmsgInfo []*ProtobufMsgInfo\n\tmsgID map[reflect.Type]uint16\n}\n\ntype ProtobufMsgInfo struct {\n\tmsgType reflect.Type\n\tmsgRouter *util.CallRouter\n}\n\nfunc NewProtobufRouter() *ProtobufRouter {\n\tr := new(ProtobufRouter)\n\tr.littleEndian = false\n\tr.msgID = make(map[reflect.Type]uint16)\n\treturn r\n}\n\n\/\/ It's dangerous to call the method on routing or marshaling (unmarshaling)\nfunc (r *ProtobufRouter) SetByteOrder(littleEndian bool) {\n\tr.littleEndian = littleEndian\n}\n\n\/\/ It's dangerous to call the method on routing or marshaling (unmarshaling)\nfunc (r *ProtobufRouter) Register(msg proto.Message, msgRouter *util.CallRouter) {\n\tif len(r.msgInfo) >= math.MaxUint16 {\n\t\tlog.Fatal(\"too many protobuf messages (max = %v)\", math.MaxUint16)\n\t}\n\n\tmsgType := reflect.TypeOf(msg)\n\tif msgType == nil || msgType.Kind() != reflect.Ptr {\n\t\tlog.Fatal(\"protobuf message pointer required\")\n\t}\n\n\ti := new(ProtobufMsgInfo)\n\ti.msgType = msgType\n\ti.msgRouter = msgRouter\n\tr.msgInfo = append(r.msgInfo, i)\n\n\tr.msgID[msgType] = uint16(len(r.msgInfo) - 1)\n}\n\n\/\/ goroutine safe\nfunc (r *ProtobufRouter) Route(msg proto.Message, userData interface{}) error {\n\tmsgType := reflect.TypeOf(msg)\n\n\tid, ok := r.msgID[msgType]\n\tif !ok {\n\t\treturn errors.New(fmt.Sprintf(\"message %s not registered\", msgType))\n\t}\n\n\tmsgRouter := r.msgInfo[id].msgRouter\n\tif msgRouter != nil {\n\t\tmsgRouter.AsynCall0(msgType, msg, userData)\n\t}\n\treturn nil\n}\n\n\/\/ goroutine safe\nfunc (r *ProtobufRouter) Unmarshal(data []byte) (proto.Message, error) {\n\tif len(data) < 2 {\n\t\treturn nil, errors.New(\"protobuf data too short\")\n\t}\n\n\t\/\/ id\n\tvar id uint16\n\tif r.littleEndian {\n\t\tid = binary.LittleEndian.Uint16(data)\n\t} else {\n\t\tid = binary.BigEndian.Uint16(data)\n\t}\n\n\t\/\/ msg\n\tif id >= uint16(len(r.msgInfo)) {\n\t\treturn nil, errors.New(fmt.Sprintf(\"message id %v not registered\", id))\n\t}\n\tmsg := reflect.New(r.msgInfo[id].msgType.Elem()).Interface().(proto.Message)\n\treturn msg, proto.UnmarshalMerge(data[2:], msg)\n}\n\n\/\/ goroutine safe\nfunc (r *ProtobufRouter) Marshal(msg proto.Message) (id []byte, data []byte, err error) {\n\tmsgType := reflect.TypeOf(msg)\n\n\t\/\/ id\n\t_id, ok := r.msgID[msgType]\n\tif !ok {\n\t\terr = errors.New(fmt.Sprintf(\"message %s not registered\", msgType))\n\t\treturn\n\t}\n\n\tid = make([]byte, 2)\n\tif r.littleEndian {\n\t\tbinary.LittleEndian.PutUint16(id, _id)\n\t} else {\n\t\tbinary.BigEndian.PutUint16(id, _id)\n\t}\n\n\t\/\/ data\n\tdata, err = proto.Marshal(msg)\n\treturn\n}\nmsg handler support.package extra\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/name5566\/leaf\/log\"\n\t\"github.com\/name5566\/leaf\/util\"\n\t\"math\"\n\t\"reflect\"\n)\n\n\/\/ -------------------------\n\/\/ | id | protobuf message |\n\/\/ -------------------------\ntype ProtobufRouter struct {\n\tlittleEndian bool\n\tmsgInfo []*ProtobufMsgInfo\n\tmsgID map[reflect.Type]uint16\n}\n\ntype ProtobufMsgInfo struct {\n\tmsgType reflect.Type\n\tmsgRouter *util.CallRouter\n\tmsgHandler ProtobufMsgHandler\n}\n\ntype ProtobufMsgHandler func([]interface{})\n\nfunc NewProtobufRouter() *ProtobufRouter {\n\tr := new(ProtobufRouter)\n\tr.littleEndian = false\n\tr.msgID = make(map[reflect.Type]uint16)\n\treturn r\n}\n\n\/\/ It's dangerous to call the method on routing or marshaling (unmarshaling)\nfunc (r *ProtobufRouter) SetByteOrder(littleEndian bool) {\n\tr.littleEndian = littleEndian\n}\n\n\/\/ It's dangerous to call the method on routing or marshaling (unmarshaling)\nfunc (r *ProtobufRouter) RegisterRouter(msg proto.Message, msgRouter *util.CallRouter) {\n\tprotobufMsgInfo(msg).msgRouter = msgRouter\n}\n\n\/\/ It's dangerous to call the method on routing or marshaling (unmarshaling)\nfunc (r *ProtobufRouter) RegisterHandler(msg proto.Message, msgHandler ProtobufMsgHandler) {\n\tprotobufMsgInfo(msg).msgHandler = msgHandler\n}\n\nfunc (r *ProtobufRouter) protobufMsgInfo(msg proto.Message) *ProtobufMsgInfo {\n\tmsgType := reflect.TypeOf(msg)\n\tif msgType == nil || msgType.Kind() != reflect.Ptr {\n\t\tlog.Fatal(\"protobuf message pointer required\")\n\t}\n\n\tif id, ok := r.msgID[msgType]; ok {\n\t\treturn r.msgInfo[id]\n\t}\n\n\tif len(r.msgInfo) >= math.MaxUint16 {\n\t\tlog.Fatal(\"too many protobuf messages (max = %v)\", math.MaxUint16)\n\t}\n\n\ti := new(ProtobufMsgInfo)\n\ti.msgType = msgType\n\tr.msgInfo = append(r.msgInfo, i)\n\tr.msgID[msgType] = uint16(len(r.msgInfo) - 1)\n\treturn i\n}\n\n\/\/ goroutine safe\nfunc (r *ProtobufRouter) Route(msg proto.Message, userData interface{}) error {\n\tmsgType := reflect.TypeOf(msg)\n\n\tid, ok := r.msgID[msgType]\n\tif !ok {\n\t\treturn errors.New(fmt.Sprintf(\"message %s not registered\", msgType))\n\t}\n\n\ti := r.msgInfo[id]\n\tif i.msgHandler != nil {\n\t\ti.msgHandler(msgType, msg, userData)\n\t}\n\tif i.msgRouter != nil {\n\t\ti.msgRouter.AsynCall0(msgType, msg, userData)\n\t}\n\treturn nil\n}\n\n\/\/ goroutine safe\nfunc (r *ProtobufRouter) Unmarshal(data []byte) (proto.Message, error) {\n\tif len(data) < 2 {\n\t\treturn nil, errors.New(\"protobuf data too short\")\n\t}\n\n\t\/\/ id\n\tvar id uint16\n\tif r.littleEndian {\n\t\tid = binary.LittleEndian.Uint16(data)\n\t} else {\n\t\tid = binary.BigEndian.Uint16(data)\n\t}\n\n\t\/\/ msg\n\tif id >= uint16(len(r.msgInfo)) {\n\t\treturn nil, errors.New(fmt.Sprintf(\"message id %v not registered\", id))\n\t}\n\tmsg := reflect.New(r.msgInfo[id].msgType.Elem()).Interface().(proto.Message)\n\treturn msg, proto.UnmarshalMerge(data[2:], msg)\n}\n\n\/\/ goroutine safe\nfunc (r *ProtobufRouter) Marshal(msg proto.Message) (id []byte, data []byte, err error) {\n\tmsgType := reflect.TypeOf(msg)\n\n\t\/\/ id\n\t_id, ok := r.msgID[msgType]\n\tif !ok {\n\t\terr = errors.New(fmt.Sprintf(\"message %s not registered\", msgType))\n\t\treturn\n\t}\n\n\tid = make([]byte, 2)\n\tif r.littleEndian {\n\t\tbinary.LittleEndian.PutUint16(id, _id)\n\t} else {\n\t\tbinary.BigEndian.PutUint16(id, _id)\n\t}\n\n\t\/\/ data\n\tdata, err = proto.Marshal(msg)\n\treturn\n}\n<|endoftext|>"} {"text":"package common\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tgogit \"github.com\/go-git\/go-git\/v5\"\n\t\"github.com\/go-git\/go-git\/v5\/plumbing\"\n\tgitssh \"github.com\/go-git\/go-git\/v5\/plumbing\/transport\/ssh\"\n\t\"github.com\/projecteru2\/core\/types\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"golang.org\/x\/crypto\/ssh\"\n)\n\n\/\/ GitScm is gitlab or github source code manager\ntype GitScm struct {\n\thttp.Client\n\tConfig types.GitConfig\n\tAuthHeaders map[string]string\n\n\tkeyBytes []byte\n}\n\n\/\/ NewGitScm .\nfunc NewGitScm(config types.GitConfig, authHeaders map[string]string) (*GitScm, error) {\n\tb, err := ioutil.ReadFile(config.PrivateKey)\n\treturn &GitScm{\n\t\tConfig: config,\n\t\tAuthHeaders: authHeaders,\n\t\tkeyBytes: b,\n\t}, err\n}\n\n\/\/ SourceCode clone code from repository into path, by revision\nfunc (g *GitScm) SourceCode(ctx context.Context, repository, path, revision string, submodule bool) error {\n\tvar repo *gogit.Repository\n\tvar err error\n\tctx, cancel := context.WithTimeout(ctx, g.Config.CloneTimeout)\n\tdefer cancel()\n\tswitch {\n\tcase strings.Contains(repository, \"https:\/\/\"):\n\t\trepo, err = gogit.PlainCloneContext(ctx, path, false, &gogit.CloneOptions{\n\t\t\tURL: repository,\n\t\t})\n\tcase strings.Contains(repository, \"git@\") || strings.Contains(repository, \"gitlab@\"):\n\t\tsigner, signErr := ssh.ParsePrivateKey(g.keyBytes)\n\t\tif signErr != nil {\n\t\t\treturn signErr\n\t\t}\n\t\tsplitRepo := strings.Split(repository, \"@\")\n\t\tauth := &gitssh.PublicKeys{\n\t\t\tUser: splitRepo[0],\n\t\t\tSigner: signer,\n\t\t\tHostKeyCallbackHelper: gitssh.HostKeyCallbackHelper{\n\t\t\t\tHostKeyCallback: ssh.InsecureIgnoreHostKey(), \/\/ nolint\n\t\t\t},\n\t\t}\n\t\trepo, err = gogit.PlainCloneContext(ctx, path, false, &gogit.CloneOptions{\n\t\t\tURL: repository,\n\t\t\tProgress: os.Stdout,\n\t\t\tAuth: auth,\n\t\t})\n\tdefault:\n\t\treturn types.ErrNotSupport\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw, err := repo.Worktree()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thash, err := repo.ResolveRevision(plumbing.Revision(revision))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = w.Checkout(&gogit.CheckoutOptions{Hash: *hash}); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Infof(\"[SourceCode] Fetch repo %s\", repository)\n\tlog.Infof(\"[SourceCode] Checkout to commit %s\", hash)\n\n\t\/\/ Prepare submodules\n\tif submodule {\n\t\ts, err := w.Submodules()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn s.Update(&gogit.SubmoduleUpdateOptions{Init: true})\n\t}\n\treturn err\n}\n\n\/\/ Artifact download the artifact to the path, then unzip it\nfunc (g *GitScm) Artifact(artifact, path string) error {\n\treq, err := http.NewRequest(http.MethodGet, artifact, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor k, v := range g.AuthHeaders {\n\t\treq.Header.Add(k, v)\n\t}\n\n\tlog.Infof(\"[Artifact] Downloading artifacts from %q\", artifact)\n\tresp, err := g.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"Download artifact error %q, code %d\", artifact, resp.StatusCode)\n\t}\n\n\t\/\/ extract files from zipfile\n\treturn unzipFile(resp.Body, path)\n}\n\n\/\/ Security remove the .git folder\nfunc (g *GitScm) Security(path string) error {\n\treturn os.RemoveAll(filepath.Join(path, \".git\"))\n}\nbugfix: fix repo username when image building (#229)package common\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tgogit \"github.com\/go-git\/go-git\/v5\"\n\t\"github.com\/go-git\/go-git\/v5\/plumbing\"\n\tgitssh \"github.com\/go-git\/go-git\/v5\/plumbing\/transport\/ssh\"\n\t\"github.com\/projecteru2\/core\/types\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"golang.org\/x\/crypto\/ssh\"\n)\n\n\/\/ GitScm is gitlab or github source code manager\ntype GitScm struct {\n\thttp.Client\n\tConfig types.GitConfig\n\tAuthHeaders map[string]string\n\n\tkeyBytes []byte\n}\n\n\/\/ NewGitScm .\nfunc NewGitScm(config types.GitConfig, authHeaders map[string]string) (*GitScm, error) {\n\tb, err := ioutil.ReadFile(config.PrivateKey)\n\treturn &GitScm{\n\t\tConfig: config,\n\t\tAuthHeaders: authHeaders,\n\t\tkeyBytes: b,\n\t}, err\n}\n\n\/\/ SourceCode clone code from repository into path, by revision\nfunc (g *GitScm) SourceCode(ctx context.Context, repository, path, revision string, submodule bool) error {\n\tvar repo *gogit.Repository\n\tvar err error\n\tctx, cancel := context.WithTimeout(ctx, g.Config.CloneTimeout)\n\tdefer cancel()\n\tswitch {\n\tcase strings.Contains(repository, \"https:\/\/\"):\n\t\trepo, err = gogit.PlainCloneContext(ctx, path, false, &gogit.CloneOptions{\n\t\t\tURL: repository,\n\t\t})\n\tcase strings.Contains(repository, \"git@\") || strings.Contains(repository, \"gitlab@\"):\n\t\tsigner, signErr := ssh.ParsePrivateKey(g.keyBytes)\n\t\tif signErr != nil {\n\t\t\treturn signErr\n\t\t}\n\t\tsplitRepo := strings.Split(repository, \"@\")\n\t\tuser, parseErr := url.Parse(splitRepo[0])\n\t\tif parseErr != nil {\n\t\t\treturn parseErr\n\t\t}\n\t\tauth := &gitssh.PublicKeys{\n\t\t\tUser: user.Host + user.Path,\n\t\t\tSigner: signer,\n\t\t\tHostKeyCallbackHelper: gitssh.HostKeyCallbackHelper{\n\t\t\t\tHostKeyCallback: ssh.InsecureIgnoreHostKey(), \/\/ nolint\n\t\t\t},\n\t\t}\n\t\trepo, err = gogit.PlainCloneContext(ctx, path, false, &gogit.CloneOptions{\n\t\t\tURL: repository,\n\t\t\tProgress: ioutil.Discard,\n\t\t\tAuth: auth,\n\t\t})\n\tdefault:\n\t\treturn types.ErrNotSupport\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw, err := repo.Worktree()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thash, err := repo.ResolveRevision(plumbing.Revision(revision))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = w.Checkout(&gogit.CheckoutOptions{Hash: *hash}); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Infof(\"[SourceCode] Fetch repo %s\", repository)\n\tlog.Infof(\"[SourceCode] Checkout to commit %s\", hash)\n\n\t\/\/ Prepare submodules\n\tif submodule {\n\t\ts, err := w.Submodules()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn s.Update(&gogit.SubmoduleUpdateOptions{Init: true})\n\t}\n\treturn err\n}\n\n\/\/ Artifact download the artifact to the path, then unzip it\nfunc (g *GitScm) Artifact(artifact, path string) error {\n\treq, err := http.NewRequest(http.MethodGet, artifact, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor k, v := range g.AuthHeaders {\n\t\treq.Header.Add(k, v)\n\t}\n\n\tlog.Infof(\"[Artifact] Downloading artifacts from %q\", artifact)\n\tresp, err := g.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"Download artifact error %q, code %d\", artifact, resp.StatusCode)\n\t}\n\n\t\/\/ extract files from zipfile\n\treturn unzipFile(resp.Body, path)\n}\n\n\/\/ Security remove the .git folder\nfunc (g *GitScm) Security(path string) error {\n\treturn os.RemoveAll(filepath.Join(path, \".git\"))\n}\n<|endoftext|>"} {"text":"package stun\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n)\n\n\/\/ ErrorCodeAttribute represents ERROR-CODE attribute.\n\/\/\n\/\/ https:\/\/tools.ietf.org\/html\/rfc5389#section-15.6\ntype ErrorCodeAttribute struct {\n\tCode ErrorCode\n\tReason []byte\n}\n\nfunc (c ErrorCodeAttribute) String() string {\n\treturn fmt.Sprintf(\"%d: %s\", c.Code, c.Reason)\n}\t\n\n\/\/ constants for ERROR-CODE encoding.\nconst (\n\terrorCodeReasonStart = 4\n\terrorCodeClassByte = 2\n\terrorCodeNumberByte = 3\n\terrorCodeReasonMaxB = 763\n\terrorCodeModulo = 100\n)\n\n\/\/ AddTo adds ERROR-CODE to m.\nfunc (c ErrorCodeAttribute) AddTo(m *Message) error {\n\tvalue := make([]byte, 0, errorCodeReasonMaxB)\n\tif len(c.Reason) > errorCodeReasonMaxB {\n\t\treturn &AttrOverflowErr{\n\t\t\tGot: len(c.Reason) + errorCodeReasonStart,\n\t\t\tMax: errorCodeReasonMaxB + errorCodeReasonStart,\n\t\t\tType: AttrErrorCode,\n\t\t}\n\t}\n\tvalue = value[:errorCodeReasonStart+len(c.Reason)]\n\tnumber := byte(c.Code % errorCodeModulo) \/\/ error code modulo 100\n\tclass := byte(c.Code \/ errorCodeModulo) \/\/ hundred digit\n\tvalue[errorCodeClassByte] = class\n\tvalue[errorCodeNumberByte] = number\n\tcopy(value[errorCodeReasonStart:], c.Reason)\n\tm.Add(AttrErrorCode, value)\n\treturn nil\n}\n\n\/\/ GetFrom decodes ERROR-CODE from m. Reason is valid until m.Raw is valid.\nfunc (c *ErrorCodeAttribute) GetFrom(m *Message) error {\n\tv, err := m.Get(AttrErrorCode)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(v) < errorCodeReasonStart {\n\t\treturn io.ErrUnexpectedEOF\n\t}\n\tvar (\n\t\tclass = uint16(v[errorCodeClassByte])\n\t\tnumber = uint16(v[errorCodeNumberByte])\n\t\tcode = int(class*errorCodeModulo + number)\n\t)\n\tc.Code = ErrorCode(code)\n\tc.Reason = v[errorCodeReasonStart:]\n\treturn nil\n}\n\n\/\/ ErrorCode is code for ERROR-CODE attribute.\ntype ErrorCode int\n\n\/\/ ErrNoDefaultReason means that default reason for provided error code\n\/\/ is not defined in RFC.\nvar ErrNoDefaultReason = errors.New(\"no default reason for ErrorCode\")\n\n\/\/ AddTo adds ERROR-CODE with default reason to m. If there\n\/\/ is no default reason, returns ErrNoDefaultReason.\nfunc (c ErrorCode) AddTo(m *Message) error {\n\treason := errorReasons[c]\n\tif reason == nil {\n\t\treturn ErrNoDefaultReason\n\t}\n\ta := &ErrorCodeAttribute{\n\t\tCode: c,\n\t\tReason: reason,\n\t}\n\treturn a.AddTo(m)\n}\n\n\/\/ Possible error codes.\nconst (\n\tCodeTryAlternate ErrorCode = 300\n\tCodeBadRequest ErrorCode = 400\n\tCodeUnauthorised ErrorCode = 401\n\tCodeUnknownAttribute ErrorCode = 420\n\tCodeStaleNonce ErrorCode = 428\n\tCodeRoleConflict ErrorCode = 478\n\tCodeServerError ErrorCode = 500\n)\n\n\/\/ Error codes from RFC 5766.\n\/\/\n\/\/ https:\/\/trac.tools.ietf.org\/html\/rfc5766#section-15\nconst (\n\tCodeForbidden ErrorCode = 403 \/\/ Forbidden\n\tCodeAllocMismatch ErrorCode = 437 \/\/ Allocation Mismatch\n\tCodeWrongCredentials ErrorCode = 441 \/\/ Wrong Credentials\n\tCodeUnsupportedTransProto ErrorCode = 442 \/\/ Unsupported Transport Protocol\n\tCodeAllocQuotaReached ErrorCode = 486 \/\/ Allocation Quota Reached\n\tCodeInsufficientCapacity ErrorCode = 508 \/\/ Insufficient Capacity\n)\n\nvar errorReasons = map[ErrorCode][]byte{\n\tCodeTryAlternate: []byte(\"Try Alternate\"),\n\tCodeBadRequest: []byte(\"Bad Request\"),\n\tCodeUnauthorised: []byte(\"Unauthorised\"),\n\tCodeUnknownAttribute: []byte(\"Unknown Attribute\"),\n\tCodeStaleNonce: []byte(\"Stale Nonce\"),\n\tCodeServerError: []byte(\"Server Error\"),\n\tCodeRoleConflict: []byte(\"Role Conflict\"),\n\n\t\/\/ RFC 5766.\n\tCodeForbidden: []byte(\"Forbidden\"),\n\tCodeAllocMismatch: []byte(\"Allocation Mismatch\"),\n\tCodeWrongCredentials: []byte(\"Wrong Credentials\"),\n\tCodeUnsupportedTransProto: []byte(\"Unsupported Transport Protocol\"),\n\tCodeAllocQuotaReached: []byte(\"Allocation Quota Reached\"),\n\tCodeInsufficientCapacity: []byte(\"Insufficient Capacity\"),\n}\na:errorcode: gofmtpackage stun\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n)\n\n\/\/ ErrorCodeAttribute represents ERROR-CODE attribute.\n\/\/\n\/\/ https:\/\/tools.ietf.org\/html\/rfc5389#section-15.6\ntype ErrorCodeAttribute struct {\n\tCode ErrorCode\n\tReason []byte\n}\n\nfunc (c ErrorCodeAttribute) String() string {\n\treturn fmt.Sprintf(\"%d: %s\", c.Code, c.Reason)\n}\n\n\/\/ constants for ERROR-CODE encoding.\nconst (\n\terrorCodeReasonStart = 4\n\terrorCodeClassByte = 2\n\terrorCodeNumberByte = 3\n\terrorCodeReasonMaxB = 763\n\terrorCodeModulo = 100\n)\n\n\/\/ AddTo adds ERROR-CODE to m.\nfunc (c ErrorCodeAttribute) AddTo(m *Message) error {\n\tvalue := make([]byte, 0, errorCodeReasonMaxB)\n\tif len(c.Reason) > errorCodeReasonMaxB {\n\t\treturn &AttrOverflowErr{\n\t\t\tGot: len(c.Reason) + errorCodeReasonStart,\n\t\t\tMax: errorCodeReasonMaxB + errorCodeReasonStart,\n\t\t\tType: AttrErrorCode,\n\t\t}\n\t}\n\tvalue = value[:errorCodeReasonStart+len(c.Reason)]\n\tnumber := byte(c.Code % errorCodeModulo) \/\/ error code modulo 100\n\tclass := byte(c.Code \/ errorCodeModulo) \/\/ hundred digit\n\tvalue[errorCodeClassByte] = class\n\tvalue[errorCodeNumberByte] = number\n\tcopy(value[errorCodeReasonStart:], c.Reason)\n\tm.Add(AttrErrorCode, value)\n\treturn nil\n}\n\n\/\/ GetFrom decodes ERROR-CODE from m. Reason is valid until m.Raw is valid.\nfunc (c *ErrorCodeAttribute) GetFrom(m *Message) error {\n\tv, err := m.Get(AttrErrorCode)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(v) < errorCodeReasonStart {\n\t\treturn io.ErrUnexpectedEOF\n\t}\n\tvar (\n\t\tclass = uint16(v[errorCodeClassByte])\n\t\tnumber = uint16(v[errorCodeNumberByte])\n\t\tcode = int(class*errorCodeModulo + number)\n\t)\n\tc.Code = ErrorCode(code)\n\tc.Reason = v[errorCodeReasonStart:]\n\treturn nil\n}\n\n\/\/ ErrorCode is code for ERROR-CODE attribute.\ntype ErrorCode int\n\n\/\/ ErrNoDefaultReason means that default reason for provided error code\n\/\/ is not defined in RFC.\nvar ErrNoDefaultReason = errors.New(\"no default reason for ErrorCode\")\n\n\/\/ AddTo adds ERROR-CODE with default reason to m. If there\n\/\/ is no default reason, returns ErrNoDefaultReason.\nfunc (c ErrorCode) AddTo(m *Message) error {\n\treason := errorReasons[c]\n\tif reason == nil {\n\t\treturn ErrNoDefaultReason\n\t}\n\ta := &ErrorCodeAttribute{\n\t\tCode: c,\n\t\tReason: reason,\n\t}\n\treturn a.AddTo(m)\n}\n\n\/\/ Possible error codes.\nconst (\n\tCodeTryAlternate ErrorCode = 300\n\tCodeBadRequest ErrorCode = 400\n\tCodeUnauthorised ErrorCode = 401\n\tCodeUnknownAttribute ErrorCode = 420\n\tCodeStaleNonce ErrorCode = 428\n\tCodeRoleConflict ErrorCode = 478\n\tCodeServerError ErrorCode = 500\n)\n\n\/\/ Error codes from RFC 5766.\n\/\/\n\/\/ https:\/\/trac.tools.ietf.org\/html\/rfc5766#section-15\nconst (\n\tCodeForbidden ErrorCode = 403 \/\/ Forbidden\n\tCodeAllocMismatch ErrorCode = 437 \/\/ Allocation Mismatch\n\tCodeWrongCredentials ErrorCode = 441 \/\/ Wrong Credentials\n\tCodeUnsupportedTransProto ErrorCode = 442 \/\/ Unsupported Transport Protocol\n\tCodeAllocQuotaReached ErrorCode = 486 \/\/ Allocation Quota Reached\n\tCodeInsufficientCapacity ErrorCode = 508 \/\/ Insufficient Capacity\n)\n\nvar errorReasons = map[ErrorCode][]byte{\n\tCodeTryAlternate: []byte(\"Try Alternate\"),\n\tCodeBadRequest: []byte(\"Bad Request\"),\n\tCodeUnauthorised: []byte(\"Unauthorised\"),\n\tCodeUnknownAttribute: []byte(\"Unknown Attribute\"),\n\tCodeStaleNonce: []byte(\"Stale Nonce\"),\n\tCodeServerError: []byte(\"Server Error\"),\n\tCodeRoleConflict: []byte(\"Role Conflict\"),\n\n\t\/\/ RFC 5766.\n\tCodeForbidden: []byte(\"Forbidden\"),\n\tCodeAllocMismatch: []byte(\"Allocation Mismatch\"),\n\tCodeWrongCredentials: []byte(\"Wrong Credentials\"),\n\tCodeUnsupportedTransProto: []byte(\"Unsupported Transport Protocol\"),\n\tCodeAllocQuotaReached: []byte(\"Allocation Quota Reached\"),\n\tCodeInsufficientCapacity: []byte(\"Insufficient Capacity\"),\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"path\"\n)\n\ntype UnpackAction struct {\n\tBaseAction `yaml:\",inline\"`\n\tCompression string\n\tFile string\n}\n\nfunc (pf *UnpackAction) Run(context *DebosContext) error {\n\tpf.LogStart()\n\tinfile := path.Join(context.artifactdir, pf.File)\n\n\tos.MkdirAll(context.rootdir, 0755)\n\n\tlog.Printf(\"Unpacking %s\\n\", infile)\n\treturn Command{}.Run(\"unpack\", \"tar\", \"xzf\", infile, \"-C\", context.rootdir)\n}\nAction 'unpack' refactoredpackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n)\n\ntype UnpackAction struct {\n\tBaseAction `yaml:\",inline\"`\n\tCompression string\n\tFile string\n}\n\nfunc tarOptions(compression string) string {\n\tunpackTarOpts := map[string]string{\n\t\t\"gz\": \"-z\",\n\t\t\"bzip2\": \"-j\",\n\t\t\"xz\": \"-J\",\n\t} \/\/ Trying to guess all other supported formats\n\n\treturn unpackTarOpts[compression]\n}\n\nfunc UnpackTarArchive(infile, destination, compression string, options ...string) error {\n\tif err := os.MkdirAll(destination, 0755); err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Unpacking %s\\n\", infile)\n\n\tcommand := []string{\"tar\"}\n\tcommand = append(command, options...)\n\tcommand = append(command, \"-x\")\n\tif unpackTarOpt := tarOptions(compression); len(unpackTarOpt) > 0 {\n\t\tcommand = append(command, unpackTarOpt)\n\t}\n\tcommand = append(command, \"-f\", infile, \"-C\", destination)\n\n\treturn Command{}.Run(\"unpack\", command...)\n}\n\nfunc (pf *UnpackAction) Verify(context *DebosContext) error {\n\tunpackTarOpt := tarOptions(pf.Compression)\n\tif len(pf.Compression) > 0 && len(unpackTarOpt) == 0 {\n\t\treturn fmt.Errorf(\"Compression '%s' is not supported.\\n\", pf.Compression)\n\t}\n\n\treturn nil\n}\n\nfunc (pf *UnpackAction) Run(context *DebosContext) error {\n\tpf.LogStart()\n\tinfile := path.Join(context.artifactdir, pf.File)\n\n\treturn UnpackTarArchive(infile, context.rootdir, pf.Compression)\n}\n<|endoftext|>"} {"text":"package txtdirect\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestParse(t *testing.T) {\n\ttests := []struct {\n\t\ttxtRecord string\n\t\texpected record\n\t\terr error\n\t}{\n\t\t{\n\t\t\t\"v=txtv0;to=https:\/\/example.com\/;code=302\",\n\t\t\trecord{\n\t\t\t\tVersion: \"txtv0\",\n\t\t\t\tTo: \"https:\/\/example.com\/\",\n\t\t\t\tCode: 302,\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"v=txtv0;to=https:\/\/example.com\/\",\n\t\t\trecord{\n\t\t\t\tVersion: \"txtv0\",\n\t\t\t\tTo: \"https:\/\/example.com\/\",\n\t\t\t\tCode: 301,\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"v=txtv0;https:\/\/example.com\/;code=302\",\n\t\t\trecord{\n\t\t\t\tVersion: \"txtv0\",\n\t\t\t\tTo: \"https:\/\/example.com\/\",\n\t\t\t\tCode: 302,\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"v=txtv0;to=https:\/\/example.com\/;code=test\",\n\t\t\trecord{},\n\t\t\tfmt.Errorf(\"could not parse status code\"),\n\t\t},\n\t\t{\n\t\t\t\"v=txtv0;https:\/\/example.com\/;https:\/\/google.com;code=test\",\n\t\t\trecord{},\n\t\t\tfmt.Errorf(\"multiple values without keys\"),\n\t\t},\n\t\t{\n\t\t\t\"v=txtv1;to=https:\/\/example.com\/;code=test\",\n\t\t\trecord{},\n\t\t\tfmt.Errorf(\"unhandled version 'txtv1'\"),\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\tr := record{}\n\t\terr := r.Parse(test.txtRecord)\n\n\t\tif err != nil {\n\t\t\tif test.err == nil || !strings.HasPrefix(err.Error(), test.err.Error()) {\n\t\t\t\tt.Errorf(\"Test %d: Unexpected error: %s\", i, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif err == nil && test.err != nil {\n\t\t\tt.Errorf(\"Test %d: Expected error, got nil\", i)\n\t\t\tcontinue\n\t\t}\n\n\t\tif got, want := r.Version, test.expected.Version; got != want {\n\t\t\tt.Errorf(\"Test %d: Expected Version to be '%s', got '%s'\", i, want, got)\n\t\t}\n\t\tif got, want := r.To, test.expected.To; got != want {\n\t\t\tt.Errorf(\"Test %d: Expected To to be '%s', got '%s'\", i, want, got)\n\t\t}\n\t\tif got, want := r.Code, test.expected.Code; got != want {\n\t\t\tt.Errorf(\"Test %d: Expected Code to be '%d', got '%d'\", i, want, got)\n\t\t}\n\t}\n}\n\nfunc TestHandleDefault(t *testing.T) {\n\ttestURL := \"https:\/\/%d._td.txtdirect.org\"\n\tdnsURL := \"_redirect.%d._td.txtdirect.org\"\n\n\tfor i := 0; ; i++ {\n\t\t_, err := net.LookupTXT(fmt.Sprintf(dnsURL, i))\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\treq, _ := http.NewRequest(\"GET\", fmt.Sprintf(testURL, i), nil)\n\t\trec := httptest.NewRecorder()\n\t\terr = Redirect(rec, req)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t\t}\n\t}\n}\n\nfunc TestHandleSuccess(t *testing.T) {\n\ttestURL := \"https:\/\/%d._ths.txtdirect.org\"\n\tdnsURL := \"_redirect.%d._ths.txtdirect.org\"\n\n\tfor i := 0; ; i++ {\n\t\t_, err := net.LookupTXT(fmt.Sprintf(dnsURL, i))\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\treq, _ := http.NewRequest(\"GET\", fmt.Sprintf(testURL, i), nil)\n\t\trec := httptest.NewRecorder()\n\t\terr = Redirect(rec, req)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t\t}\n\t}\n}\n\nfunc TestHandleFailure(t *testing.T) {\n\ttestURL := \"https:\/\/%d._thf.txtdirect.org\"\n\tdnsURL := \"_redirect.%d._thf.txtdirect.org\"\n\n\tfor i := 0; ; i++ {\n\t\t_, err := net.LookupTXT(fmt.Sprintf(dnsURL, i))\n\t\tif err != nil {\n\t\t\tlog.Print(err, i)\n\t\t\tbreak\n\t\t}\n\t\treq, _ := http.NewRequest(\"GET\", fmt.Sprintf(testURL, i), nil)\n\t\trec := httptest.NewRecorder()\n\t\terr = Redirect(rec, req)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected error, got nil)\")\n\t\t}\n\t}\n}\nRename Handle to Redirect in testspackage txtdirect\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestParse(t *testing.T) {\n\ttests := []struct {\n\t\ttxtRecord string\n\t\texpected record\n\t\terr error\n\t}{\n\t\t{\n\t\t\t\"v=txtv0;to=https:\/\/example.com\/;code=302\",\n\t\t\trecord{\n\t\t\t\tVersion: \"txtv0\",\n\t\t\t\tTo: \"https:\/\/example.com\/\",\n\t\t\t\tCode: 302,\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"v=txtv0;to=https:\/\/example.com\/\",\n\t\t\trecord{\n\t\t\t\tVersion: \"txtv0\",\n\t\t\t\tTo: \"https:\/\/example.com\/\",\n\t\t\t\tCode: 301,\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"v=txtv0;https:\/\/example.com\/;code=302\",\n\t\t\trecord{\n\t\t\t\tVersion: \"txtv0\",\n\t\t\t\tTo: \"https:\/\/example.com\/\",\n\t\t\t\tCode: 302,\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"v=txtv0;to=https:\/\/example.com\/;code=test\",\n\t\t\trecord{},\n\t\t\tfmt.Errorf(\"could not parse status code\"),\n\t\t},\n\t\t{\n\t\t\t\"v=txtv0;https:\/\/example.com\/;https:\/\/google.com;code=test\",\n\t\t\trecord{},\n\t\t\tfmt.Errorf(\"multiple values without keys\"),\n\t\t},\n\t\t{\n\t\t\t\"v=txtv1;to=https:\/\/example.com\/;code=test\",\n\t\t\trecord{},\n\t\t\tfmt.Errorf(\"unhandled version 'txtv1'\"),\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\tr := record{}\n\t\terr := r.Parse(test.txtRecord)\n\n\t\tif err != nil {\n\t\t\tif test.err == nil || !strings.HasPrefix(err.Error(), test.err.Error()) {\n\t\t\t\tt.Errorf(\"Test %d: Unexpected error: %s\", i, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif err == nil && test.err != nil {\n\t\t\tt.Errorf(\"Test %d: Expected error, got nil\", i)\n\t\t\tcontinue\n\t\t}\n\n\t\tif got, want := r.Version, test.expected.Version; got != want {\n\t\t\tt.Errorf(\"Test %d: Expected Version to be '%s', got '%s'\", i, want, got)\n\t\t}\n\t\tif got, want := r.To, test.expected.To; got != want {\n\t\t\tt.Errorf(\"Test %d: Expected To to be '%s', got '%s'\", i, want, got)\n\t\t}\n\t\tif got, want := r.Code, test.expected.Code; got != want {\n\t\t\tt.Errorf(\"Test %d: Expected Code to be '%d', got '%d'\", i, want, got)\n\t\t}\n\t}\n}\n\nfunc TestRedirectDefault(t *testing.T) {\n\ttestURL := \"https:\/\/%d._td.txtdirect.org\"\n\tdnsURL := \"_redirect.%d._td.txtdirect.org\"\n\n\tfor i := 0; ; i++ {\n\t\t_, err := net.LookupTXT(fmt.Sprintf(dnsURL, i))\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\treq, _ := http.NewRequest(\"GET\", fmt.Sprintf(testURL, i), nil)\n\t\trec := httptest.NewRecorder()\n\t\terr = Redirect(rec, req)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t\t}\n\t}\n}\n\nfunc TestRedirectSuccess(t *testing.T) {\n\ttestURL := \"https:\/\/%d._ths.txtdirect.org\"\n\tdnsURL := \"_redirect.%d._ths.txtdirect.org\"\n\n\tfor i := 0; ; i++ {\n\t\t_, err := net.LookupTXT(fmt.Sprintf(dnsURL, i))\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\treq, _ := http.NewRequest(\"GET\", fmt.Sprintf(testURL, i), nil)\n\t\trec := httptest.NewRecorder()\n\t\terr = Redirect(rec, req)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t\t}\n\t}\n}\n\nfunc TestRedirectFailure(t *testing.T) {\n\ttestURL := \"https:\/\/%d._thf.txtdirect.org\"\n\tdnsURL := \"_redirect.%d._thf.txtdirect.org\"\n\n\tfor i := 0; ; i++ {\n\t\t_, err := net.LookupTXT(fmt.Sprintf(dnsURL, i))\n\t\tif err != nil {\n\t\t\tlog.Print(err, i)\n\t\t\tbreak\n\t\t}\n\t\treq, _ := http.NewRequest(\"GET\", fmt.Sprintf(testURL, i), nil)\n\t\trec := httptest.NewRecorder()\n\t\terr = Redirect(rec, req)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected error, got nil)\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package types\n\n\/\/ currency.go defines the internal currency object. One design goal of the\n\/\/ currency type is immutability: the currency type should be safe to pass\n\/\/ directly to other objects and packages. The currency object should never\n\/\/ have a negative value. The currency should never overflow. There is a\n\/\/ maximum size value that can be encoded (around 10^10^20), however exceeding\n\/\/ this value will not result in overflow.\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"math\/big\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n)\n\ntype (\n\t\/\/ A Currency represents a number of siacoins or siafunds. Internally, a\n\t\/\/ Currency value is unbounded; however, Currency values sent over the wire\n\t\/\/ protocol are subject to a maximum size of 255 bytes (approximately 10^614).\n\t\/\/ Unlike the math\/big library, whose methods modify their receiver, all\n\t\/\/ arithmetic Currency methods return a new value. Currency cannot be negative.\n\tCurrency struct {\n\t\ti big.Int\n\t}\n)\n\nvar (\n\tZeroCurrency = NewCurrency64(0)\n\n\tErrNegativeCurrency = errors.New(\"negative currency not allowed\")\n)\n\n\/\/ NewCurrency creates a Currency value from a big.Int. Undefined behavior\n\/\/ occurs if a negative input is used.\nfunc NewCurrency(b *big.Int) (c Currency) {\n\tif b.Sign() < 0 {\n\t\tif build.DEBUG {\n\t\t\tpanic(ErrNegativeCurrency)\n\t\t}\n\t} else {\n\t\tc.i = *b\n\t}\n\treturn\n}\n\n\/\/ NewCurrency64 creates a Currency value from a uint64.\nfunc NewCurrency64(x uint64) (c Currency) {\n\tc.i.SetUint64(x)\n\treturn\n}\n\n\/\/ Add returns a new Currency value y = c + x.\nfunc (c Currency) Add(x Currency) (y Currency) {\n\ty.i.Add(&c.i, &x.i)\n\treturn\n}\n\n\/\/ Big returns the value of c as a *big.Int. Importantly, it does not provide\n\/\/ access to the c's internal big.Int object, only a copy.\nfunc (c Currency) Big() *big.Int {\n\treturn new(big.Int).Set(&c.i)\n}\n\n\/\/ Cmp compares two Currency values. The return value follows the convention\n\/\/ of math\/big.\nfunc (c Currency) Cmp(y Currency) int {\n\treturn c.i.Cmp(&y.i)\n}\n\n\/\/ Div returns a new Currency value y = c \/ x.\nfunc (c Currency) Div(x Currency) (y Currency) {\n\ty.i.Div(&c.i, &x.i)\n\treturn\n}\n\n\/\/ Mul returns a new Currency value y = c * x.\nfunc (c Currency) Mul(x Currency) (y Currency) {\n\ty.i.Mul(&c.i, &x.i)\n\treturn\n}\n\n\/\/ MulFloat returns a new Currency value y = c * x, where x is a float64.\n\/\/ Behavior is undefined when x is negative.\nfunc (c Currency) MulFloat(x float64) (y Currency) {\n\tif x < 0 {\n\t\tif build.DEBUG {\n\t\t\tpanic(ErrNegativeCurrency)\n\t\t}\n\t} else {\n\t\tyRat := new(big.Rat).Mul(\n\t\t\tnew(big.Rat).SetInt(&c.i),\n\t\t\tnew(big.Rat).SetFloat64(x),\n\t\t)\n\t\ty.i.Div(yRat.Num(), yRat.Denom())\n\t}\n\treturn\n}\n\n\/\/ RoundDown returns the largest multiple of n <= c.\nfunc (c Currency) RoundDown(n uint64) (y Currency) {\n\tdiff := new(big.Int).Mod(&c.i, new(big.Int).SetUint64(n))\n\ty.i.Sub(&c.i, diff)\n\treturn\n}\n\n\/\/ IsZero returns true if the value is 0, false otherwise.\nfunc (c Currency) IsZero() bool {\n\treturn c.i.Sign() <= 0\n}\n\n\/\/ Sqrt returns a new Currency value y = sqrt(c). Result is rounded down to the\n\/\/ nearest integer.\nfunc (c Currency) Sqrt() (y Currency) {\n\tf, _ := new(big.Rat).SetInt(&c.i).Float64()\n\tsqrt := new(big.Rat).SetFloat64(math.Sqrt(f))\n\ty.i.Div(sqrt.Num(), sqrt.Denom())\n\treturn\n}\n\n\/\/ Sub returns a new Currency value y = c - x. Behavior is undefined when\n\/\/ c < x.\nfunc (c Currency) Sub(x Currency) (y Currency) {\n\tif c.Cmp(x) < 0 {\n\t\ty = c\n\t\tif build.DEBUG {\n\t\t\tpanic(ErrNegativeCurrency)\n\t\t}\n\t} else {\n\t\ty.i.Sub(&c.i, &x.i)\n\t}\n\treturn\n}\n\n\/\/ MarshalJSON implements the json.Marshaler interface.\nfunc (c Currency) MarshalJSON() ([]byte, error) {\n\treturn c.i.MarshalJSON()\n}\n\n\/\/ UnmarshalJSON implements the json.Unmarshaler interface. An error is\n\/\/ returned if a negative number is provided.\nfunc (c *Currency) UnmarshalJSON(b []byte) error {\n\terr := c.i.UnmarshalJSON(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif c.i.Sign() < 0 {\n\t\tc.i = *big.NewInt(0)\n\t\treturn ErrNegativeCurrency\n\t}\n\treturn nil\n}\n\n\/\/ MarshalSia implements the encoding.SiaMarshaler interface. It returns the\n\/\/ byte-slice representation of the Currency's internal big.Int. Note that as\n\/\/ the bytes of the big.Int correspond to the absolute value of the integer,\n\/\/ there is no way to marshal a negative Currency.\nfunc (c Currency) MarshalSia() []byte {\n\treturn c.i.Bytes()\n}\n\n\/\/ UnmarshalSia implements the encoding.SiaUnmarshaler interface.\nfunc (c *Currency) UnmarshalSia(b []byte) {\n\tc.i.SetBytes(b)\n}\n\n\/\/ String implements the fmt.Stringer interface.\nfunc (c Currency) String() string {\n\treturn c.i.String()\n}\n\n\/\/ Scan implements the fmt.Scanner interface, allowing Currency values to be\n\/\/ scanned from text.\nfunc (c *Currency) Scan(s fmt.ScanState, ch rune) error {\n\terr := c.i.Scan(s, ch)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif c.i.Sign() < 0 {\n\t\treturn ErrNegativeCurrency\n\t}\n\treturn nil\n}\nless confusing variable names in currency.gopackage types\n\n\/\/ currency.go defines the internal currency object. One design goal of the\n\/\/ currency type is immutability: the currency type should be safe to pass\n\/\/ directly to other objects and packages. The currency object should never\n\/\/ have a negative value. The currency should never overflow. There is a\n\/\/ maximum size value that can be encoded (around 10^10^20), however exceeding\n\/\/ this value will not result in overflow.\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"math\/big\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n)\n\ntype (\n\t\/\/ A Currency represents a number of siacoins or siafunds. Internally, a\n\t\/\/ Currency value is unbounded; however, Currency values sent over the wire\n\t\/\/ protocol are subject to a maximum size of 255 bytes (approximately 10^614).\n\t\/\/ Unlike the math\/big library, whose methods modify their receiver, all\n\t\/\/ arithmetic Currency methods return a new value. Currency cannot be negative.\n\tCurrency struct {\n\t\ti big.Int\n\t}\n)\n\nvar (\n\tZeroCurrency = NewCurrency64(0)\n\n\tErrNegativeCurrency = errors.New(\"negative currency not allowed\")\n)\n\n\/\/ NewCurrency creates a Currency value from a big.Int. Undefined behavior\n\/\/ occurs if a negative input is used.\nfunc NewCurrency(b *big.Int) (c Currency) {\n\tif b.Sign() < 0 {\n\t\tif build.DEBUG {\n\t\t\tpanic(ErrNegativeCurrency)\n\t\t}\n\t} else {\n\t\tc.i = *b\n\t}\n\treturn\n}\n\n\/\/ NewCurrency64 creates a Currency value from a uint64.\nfunc NewCurrency64(x uint64) (c Currency) {\n\tc.i.SetUint64(x)\n\treturn\n}\n\n\/\/ Add returns a new Currency value c = x + y\nfunc (x Currency) Add(y Currency) (c Currency) {\n\tc.i.Add(&x.i, &y.i)\n\treturn\n}\n\n\/\/ Big returns the value of c as a *big.Int. Importantly, it does not provide\n\/\/ access to the c's internal big.Int object, only a copy.\nfunc (c Currency) Big() *big.Int {\n\treturn new(big.Int).Set(&c.i)\n}\n\n\/\/ Cmp compares two Currency values. The return value follows the convention\n\/\/ of math\/big.\nfunc (x Currency) Cmp(y Currency) int {\n\treturn x.i.Cmp(&y.i)\n}\n\n\/\/ Div returns a new Currency value c = x \/ y.\nfunc (x Currency) Div(y Currency) (c Currency) {\n\tc.i.Div(&x.i, &y.i)\n\treturn\n}\n\n\/\/ Mul returns a new Currency value c = x * y.\nfunc (x Currency) Mul(y Currency) (c Currency) {\n\tc.i.Mul(&x.i, &y.i)\n\treturn\n}\n\n\/\/ MulFloat returns a new Currency value y = c * x, where x is a float64.\n\/\/ Behavior is undefined when x is negative.\nfunc (x Currency) MulFloat(y float64) (c Currency) {\n\tif y < 0 {\n\t\tif build.DEBUG {\n\t\t\tpanic(ErrNegativeCurrency)\n\t\t}\n\t} else {\n\t\tcRat := new(big.Rat).Mul(\n\t\t\tnew(big.Rat).SetInt(&x.i),\n\t\t\tnew(big.Rat).SetFloat64(y),\n\t\t)\n\t\tc.i.Div(cRat.Num(), cRat.Denom())\n\t}\n\treturn\n}\n\n\/\/ RoundDown returns the largest multiple of y <= x.\nfunc (x Currency) RoundDown(y uint64) (c Currency) {\n\tdiff := new(big.Int).Mod(&x.i, new(big.Int).SetUint64(y))\n\tc.i.Sub(&x.i, diff)\n\treturn\n}\n\n\/\/ IsZero returns true if the value is 0, false otherwise.\nfunc (c Currency) IsZero() bool {\n\treturn c.i.Sign() <= 0\n}\n\n\/\/ Sqrt returns a new Currency value y = sqrt(c). Result is rounded down to the\n\/\/ nearest integer.\nfunc (x Currency) Sqrt() (c Currency) {\n\tf, _ := new(big.Rat).SetInt(&x.i).Float64()\n\tsqrt := new(big.Rat).SetFloat64(math.Sqrt(f))\n\tc.i.Div(sqrt.Num(), sqrt.Denom())\n\treturn\n}\n\n\/\/ Sub returns a new Currency value c = x - y. Behavior is undefined when\n\/\/ x < y.\nfunc (x Currency) Sub(y Currency) (c Currency) {\n\tif x.Cmp(y) < 0 {\n\t\tc = x\n\t\tif build.DEBUG {\n\t\t\tpanic(ErrNegativeCurrency)\n\t\t}\n\t} else {\n\t\tc.i.Sub(&x.i, &y.i)\n\t}\n\treturn\n}\n\n\/\/ MarshalJSON implements the json.Marshaler interface.\nfunc (c Currency) MarshalJSON() ([]byte, error) {\n\treturn c.i.MarshalJSON()\n}\n\n\/\/ UnmarshalJSON implements the json.Unmarshaler interface. An error is\n\/\/ returned if a negative number is provided.\nfunc (c *Currency) UnmarshalJSON(b []byte) error {\n\terr := c.i.UnmarshalJSON(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif c.i.Sign() < 0 {\n\t\tc.i = *big.NewInt(0)\n\t\treturn ErrNegativeCurrency\n\t}\n\treturn nil\n}\n\n\/\/ MarshalSia implements the encoding.SiaMarshaler interface. It returns the\n\/\/ byte-slice representation of the Currency's internal big.Int. Note that as\n\/\/ the bytes of the big.Int correspond to the absolute value of the integer,\n\/\/ there is no way to marshal a negative Currency.\nfunc (c Currency) MarshalSia() []byte {\n\treturn c.i.Bytes()\n}\n\n\/\/ UnmarshalSia implements the encoding.SiaUnmarshaler interface.\nfunc (c *Currency) UnmarshalSia(b []byte) {\n\tc.i.SetBytes(b)\n}\n\n\/\/ String implements the fmt.Stringer interface.\nfunc (c Currency) String() string {\n\treturn c.i.String()\n}\n\n\/\/ Scan implements the fmt.Scanner interface, allowing Currency values to be\n\/\/ scanned from text.\nfunc (c *Currency) Scan(s fmt.ScanState, ch rune) error {\n\terr := c.i.Scan(s, ch)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif c.i.Sign() < 0 {\n\t\treturn ErrNegativeCurrency\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gcscaching_test\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/timeutil\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\/gcscaching\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\/gcscaching\/mock_gcscaching\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\/mock_gcs\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/oglemock\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\nfunc TestFastStatBucket(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nconst ttl = time.Second\n\ntype fastStatBucketTest struct {\n\tcache mock_gcscaching.MockStatCache\n\tclock timeutil.SimulatedClock\n\twrapped mock_gcs.MockBucket\n\n\tbucket gcs.Bucket\n}\n\nfunc (t *fastStatBucketTest) SetUp(ti *TestInfo) {\n\t\/\/ Set up a fixed, non-zero time.\n\tt.clock.SetTime(time.Date(2015, 4, 5, 2, 15, 0, 0, time.Local))\n\n\t\/\/ Set up dependencies.\n\tt.cache = mock_gcscaching.NewMockStatCache(ti.MockController, \"cache\")\n\tt.wrapped = mock_gcs.NewMockBucket(ti.MockController, \"wrapped\")\n\n\tt.bucket = gcscaching.NewFastStatBucket(\n\t\tttl,\n\t\tt.cache,\n\t\t&t.clock,\n\t\tt.wrapped)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ CreateObject\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype CreateObjectTest struct {\n\tfastStatBucketTest\n}\n\nfunc init() { RegisterTestSuite(&CreateObjectTest{}) }\n\nfunc (t *CreateObjectTest) CallsEraseAndWrapped() {\n\tconst name = \"taco\"\n\n\t\/\/ Erase\n\tExpectCall(t.cache, \"Erase\")(name)\n\n\t\/\/ Wrapped\n\tvar wrappedReq *gcs.CreateObjectRequest\n\tExpectCall(t.wrapped, \"CreateObject\")(Any(), Any()).\n\t\tWillOnce(DoAll(SaveArg(1, &wrappedReq), Return(nil, errors.New(\"\"))))\n\n\t\/\/ Call\n\treq := &gcs.CreateObjectRequest{\n\t\tName: name,\n\t}\n\n\t_, _ = t.bucket.CreateObject(nil, req)\n\n\tAssertNe(nil, wrappedReq)\n\tExpectEq(req, wrappedReq)\n}\n\nfunc (t *CreateObjectTest) WrappedFails() {\n\tconst name = \"\"\n\tvar err error\n\n\t\/\/ Erase\n\tExpectCall(t.cache, \"Erase\")(Any())\n\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"CreateObject\")(Any(), Any()).\n\t\tWillOnce(Return(nil, errors.New(\"taco\")))\n\n\t\/\/ Call\n\t_, err = t.bucket.CreateObject(nil, &gcs.CreateObjectRequest{})\n\n\tExpectThat(err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *CreateObjectTest) WrappedSucceeds() {\n\tconst name = \"taco\"\n\tvar err error\n\n\t\/\/ Erase\n\tExpectCall(t.cache, \"Erase\")(Any())\n\n\t\/\/ Wrapped\n\tobj := &gcs.Object{\n\t\tName: name,\n\t\tGeneration: 1234,\n\t}\n\n\tExpectCall(t.wrapped, \"CreateObject\")(Any(), Any()).\n\t\tWillOnce(Return(obj, nil))\n\n\t\/\/ Insert\n\tExpectCall(t.cache, \"Insert\")(obj, timeutil.TimeEq(t.clock.Now().Add(ttl)))\n\n\t\/\/ Call\n\to, err := t.bucket.CreateObject(nil, &gcs.CreateObjectRequest{})\n\n\tAssertEq(nil, err)\n\tExpectEq(obj, o)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ StatObject\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype StatObjectTest struct {\n\tfastStatBucketTest\n}\n\nfunc init() { RegisterTestSuite(&StatObjectTest{}) }\n\nfunc (t *StatObjectTest) CallsCache() {\n\tconst name = \"taco\"\n\n\t\/\/ LookUp\n\tExpectCall(t.cache, \"LookUp\")(name, timeutil.TimeEq(t.clock.Now())).\n\t\tWillOnce(Return(&gcs.Object{}))\n\n\t\/\/ Call\n\treq := &gcs.StatObjectRequest{\n\t\tName: name,\n\t}\n\n\t_, _ = t.bucket.StatObject(nil, req)\n}\n\nfunc (t *StatObjectTest) CacheHit() {\n\tconst name = \"taco\"\n\n\t\/\/ LookUp\n\tobj := &gcs.Object{\n\t\tName: name,\n\t}\n\n\tExpectCall(t.cache, \"LookUp\")(Any(), Any()).\n\t\tWillOnce(Return(obj))\n\n\t\/\/ Call\n\treq := &gcs.StatObjectRequest{\n\t\tName: name,\n\t}\n\n\to, err := t.bucket.StatObject(nil, req)\n\tAssertEq(nil, err)\n\tExpectEq(obj, o)\n}\n\nfunc (t *StatObjectTest) CallsWrapped() {\n\tconst name = \"\"\n\treq := &gcs.StatObjectRequest{\n\t\tName: name,\n\t}\n\n\t\/\/ LookUp\n\tExpectCall(t.cache, \"LookUp\")(Any(), Any()).\n\t\tWillOnce(Return(nil))\n\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"StatObject\")(Any(), req).\n\t\tWillOnce(Return(nil, errors.New(\"\")))\n\n\t\/\/ Call\n\t_, _ = t.bucket.StatObject(nil, req)\n}\n\nfunc (t *StatObjectTest) WrappedFails() {\n\tconst name = \"\"\n\n\t\/\/ LookUp\n\tExpectCall(t.cache, \"LookUp\")(Any(), Any()).\n\t\tWillOnce(Return(nil))\n\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"StatObject\")(Any(), Any()).\n\t\tWillOnce(Return(nil, errors.New(\"taco\")))\n\n\t\/\/ Call\n\treq := &gcs.StatObjectRequest{\n\t\tName: name,\n\t}\n\n\t_, err := t.bucket.StatObject(nil, req)\n\tExpectThat(err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *StatObjectTest) WrappedSucceeds() {\n\tconst name = \"taco\"\n\n\t\/\/ LookUp\n\tExpectCall(t.cache, \"LookUp\")(Any(), Any()).\n\t\tWillOnce(Return(nil))\n\n\t\/\/ Wrapped\n\tobj := &gcs.Object{\n\t\tName: name,\n\t}\n\n\tExpectCall(t.wrapped, \"StatObject\")(Any(), Any()).\n\t\tWillOnce(Return(obj, nil))\n\n\t\/\/ Insert\n\tExpectCall(t.cache, \"Insert\")(obj, timeutil.TimeEq(t.clock.Now().Add(ttl)))\n\n\t\/\/ Call\n\treq := &gcs.StatObjectRequest{\n\t\tName: name,\n\t}\n\n\to, err := t.bucket.StatObject(nil, req)\n\tAssertEq(nil, err)\n\tExpectEq(obj, o)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ ListObjects\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype ListObjectsTest struct {\n\tfastStatBucketTest\n}\n\nfunc init() { RegisterTestSuite(&ListObjectsTest{}) }\n\nfunc (t *ListObjectsTest) WrappedFails() {\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"ListObjects\")(Any(), Any()).\n\t\tWillOnce(Return(nil, errors.New(\"taco\")))\n\n\t\/\/ Call\n\t_, err := t.bucket.ListObjects(nil, &gcs.ListObjectsRequest{})\n\tExpectThat(err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *ListObjectsTest) EmptyListing() {\n\t\/\/ Wrapped\n\texpected := &gcs.Listing{}\n\n\tExpectCall(t.wrapped, \"ListObjects\")(Any(), Any()).\n\t\tWillOnce(Return(expected, nil))\n\n\t\/\/ Call\n\tlisting, err := t.bucket.ListObjects(nil, &gcs.ListObjectsRequest{})\n\n\tAssertEq(nil, err)\n\tExpectEq(expected, listing)\n}\n\nfunc (t *ListObjectsTest) NonEmptyListing() {\n\t\/\/ Wrapped\n\to0 := &gcs.Object{Name: \"taco\"}\n\to1 := &gcs.Object{Name: \"burrito\"}\n\n\texpected := &gcs.Listing{\n\t\tObjects: []*gcs.Object{o0, o1},\n\t}\n\n\tExpectCall(t.wrapped, \"ListObjects\")(Any(), Any()).\n\t\tWillOnce(Return(expected, nil))\n\n\t\/\/ Insert\n\tExpectCall(t.cache, \"Insert\")(o0, timeutil.TimeEq(t.clock.Now().Add(ttl)))\n\tExpectCall(t.cache, \"Insert\")(o1, timeutil.TimeEq(t.clock.Now().Add(ttl)))\n\n\t\/\/ Call\n\tlisting, err := t.bucket.ListObjects(nil, &gcs.ListObjectsRequest{})\n\n\tAssertEq(nil, err)\n\tExpectEq(expected, listing)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ UpdateObject\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype UpdateObjectTest struct {\n\tfastStatBucketTest\n}\n\nfunc init() { RegisterTestSuite(&UpdateObjectTest{}) }\n\nfunc (t *UpdateObjectTest) CallsEraseAndWrapped() {\n\tconst name = \"taco\"\n\n\t\/\/ Erase\n\tExpectCall(t.cache, \"Erase\")(name)\n\n\t\/\/ Wrapped\n\tvar wrappedReq *gcs.UpdateObjectRequest\n\tExpectCall(t.wrapped, \"UpdateObject\")(Any(), Any()).\n\t\tWillOnce(DoAll(SaveArg(1, &wrappedReq), Return(nil, errors.New(\"\"))))\n\n\t\/\/ Call\n\treq := &gcs.UpdateObjectRequest{\n\t\tName: name,\n\t}\n\n\t_, _ = t.bucket.UpdateObject(nil, req)\n\n\tAssertNe(nil, wrappedReq)\n\tExpectEq(req, wrappedReq)\n}\n\nfunc (t *UpdateObjectTest) WrappedFails() {\n\tconst name = \"\"\n\tvar err error\n\n\t\/\/ Erase\n\tExpectCall(t.cache, \"Erase\")(Any())\n\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"UpdateObject\")(Any(), Any()).\n\t\tWillOnce(Return(nil, errors.New(\"taco\")))\n\n\t\/\/ Call\n\t_, err = t.bucket.UpdateObject(nil, &gcs.UpdateObjectRequest{})\n\n\tExpectThat(err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *UpdateObjectTest) WrappedSucceeds() {\n\tconst name = \"taco\"\n\tvar err error\n\n\t\/\/ Erase\n\tExpectCall(t.cache, \"Erase\")(Any())\n\n\t\/\/ Wrapped\n\tobj := &gcs.Object{\n\t\tName: name,\n\t\tGeneration: 1234,\n\t}\n\n\tExpectCall(t.wrapped, \"UpdateObject\")(Any(), Any()).\n\t\tWillOnce(Return(obj, nil))\n\n\t\/\/ Insert\n\tExpectCall(t.cache, \"Insert\")(obj, timeutil.TimeEq(t.clock.Now().Add(ttl)))\n\n\t\/\/ Call\n\to, err := t.bucket.UpdateObject(nil, &gcs.UpdateObjectRequest{})\n\n\tAssertEq(nil, err)\n\tExpectEq(obj, o)\n}\nDeleteObjectTest\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gcscaching_test\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/timeutil\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\/gcscaching\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\/gcscaching\/mock_gcscaching\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\/mock_gcs\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/oglemock\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\nfunc TestFastStatBucket(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nconst ttl = time.Second\n\ntype fastStatBucketTest struct {\n\tcache mock_gcscaching.MockStatCache\n\tclock timeutil.SimulatedClock\n\twrapped mock_gcs.MockBucket\n\n\tbucket gcs.Bucket\n}\n\nfunc (t *fastStatBucketTest) SetUp(ti *TestInfo) {\n\t\/\/ Set up a fixed, non-zero time.\n\tt.clock.SetTime(time.Date(2015, 4, 5, 2, 15, 0, 0, time.Local))\n\n\t\/\/ Set up dependencies.\n\tt.cache = mock_gcscaching.NewMockStatCache(ti.MockController, \"cache\")\n\tt.wrapped = mock_gcs.NewMockBucket(ti.MockController, \"wrapped\")\n\n\tt.bucket = gcscaching.NewFastStatBucket(\n\t\tttl,\n\t\tt.cache,\n\t\t&t.clock,\n\t\tt.wrapped)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ CreateObject\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype CreateObjectTest struct {\n\tfastStatBucketTest\n}\n\nfunc init() { RegisterTestSuite(&CreateObjectTest{}) }\n\nfunc (t *CreateObjectTest) CallsEraseAndWrapped() {\n\tconst name = \"taco\"\n\n\t\/\/ Erase\n\tExpectCall(t.cache, \"Erase\")(name)\n\n\t\/\/ Wrapped\n\tvar wrappedReq *gcs.CreateObjectRequest\n\tExpectCall(t.wrapped, \"CreateObject\")(Any(), Any()).\n\t\tWillOnce(DoAll(SaveArg(1, &wrappedReq), Return(nil, errors.New(\"\"))))\n\n\t\/\/ Call\n\treq := &gcs.CreateObjectRequest{\n\t\tName: name,\n\t}\n\n\t_, _ = t.bucket.CreateObject(nil, req)\n\n\tAssertNe(nil, wrappedReq)\n\tExpectEq(req, wrappedReq)\n}\n\nfunc (t *CreateObjectTest) WrappedFails() {\n\tconst name = \"\"\n\tvar err error\n\n\t\/\/ Erase\n\tExpectCall(t.cache, \"Erase\")(Any())\n\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"CreateObject\")(Any(), Any()).\n\t\tWillOnce(Return(nil, errors.New(\"taco\")))\n\n\t\/\/ Call\n\t_, err = t.bucket.CreateObject(nil, &gcs.CreateObjectRequest{})\n\n\tExpectThat(err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *CreateObjectTest) WrappedSucceeds() {\n\tconst name = \"taco\"\n\tvar err error\n\n\t\/\/ Erase\n\tExpectCall(t.cache, \"Erase\")(Any())\n\n\t\/\/ Wrapped\n\tobj := &gcs.Object{\n\t\tName: name,\n\t\tGeneration: 1234,\n\t}\n\n\tExpectCall(t.wrapped, \"CreateObject\")(Any(), Any()).\n\t\tWillOnce(Return(obj, nil))\n\n\t\/\/ Insert\n\tExpectCall(t.cache, \"Insert\")(obj, timeutil.TimeEq(t.clock.Now().Add(ttl)))\n\n\t\/\/ Call\n\to, err := t.bucket.CreateObject(nil, &gcs.CreateObjectRequest{})\n\n\tAssertEq(nil, err)\n\tExpectEq(obj, o)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ StatObject\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype StatObjectTest struct {\n\tfastStatBucketTest\n}\n\nfunc init() { RegisterTestSuite(&StatObjectTest{}) }\n\nfunc (t *StatObjectTest) CallsCache() {\n\tconst name = \"taco\"\n\n\t\/\/ LookUp\n\tExpectCall(t.cache, \"LookUp\")(name, timeutil.TimeEq(t.clock.Now())).\n\t\tWillOnce(Return(&gcs.Object{}))\n\n\t\/\/ Call\n\treq := &gcs.StatObjectRequest{\n\t\tName: name,\n\t}\n\n\t_, _ = t.bucket.StatObject(nil, req)\n}\n\nfunc (t *StatObjectTest) CacheHit() {\n\tconst name = \"taco\"\n\n\t\/\/ LookUp\n\tobj := &gcs.Object{\n\t\tName: name,\n\t}\n\n\tExpectCall(t.cache, \"LookUp\")(Any(), Any()).\n\t\tWillOnce(Return(obj))\n\n\t\/\/ Call\n\treq := &gcs.StatObjectRequest{\n\t\tName: name,\n\t}\n\n\to, err := t.bucket.StatObject(nil, req)\n\tAssertEq(nil, err)\n\tExpectEq(obj, o)\n}\n\nfunc (t *StatObjectTest) CallsWrapped() {\n\tconst name = \"\"\n\treq := &gcs.StatObjectRequest{\n\t\tName: name,\n\t}\n\n\t\/\/ LookUp\n\tExpectCall(t.cache, \"LookUp\")(Any(), Any()).\n\t\tWillOnce(Return(nil))\n\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"StatObject\")(Any(), req).\n\t\tWillOnce(Return(nil, errors.New(\"\")))\n\n\t\/\/ Call\n\t_, _ = t.bucket.StatObject(nil, req)\n}\n\nfunc (t *StatObjectTest) WrappedFails() {\n\tconst name = \"\"\n\n\t\/\/ LookUp\n\tExpectCall(t.cache, \"LookUp\")(Any(), Any()).\n\t\tWillOnce(Return(nil))\n\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"StatObject\")(Any(), Any()).\n\t\tWillOnce(Return(nil, errors.New(\"taco\")))\n\n\t\/\/ Call\n\treq := &gcs.StatObjectRequest{\n\t\tName: name,\n\t}\n\n\t_, err := t.bucket.StatObject(nil, req)\n\tExpectThat(err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *StatObjectTest) WrappedSucceeds() {\n\tconst name = \"taco\"\n\n\t\/\/ LookUp\n\tExpectCall(t.cache, \"LookUp\")(Any(), Any()).\n\t\tWillOnce(Return(nil))\n\n\t\/\/ Wrapped\n\tobj := &gcs.Object{\n\t\tName: name,\n\t}\n\n\tExpectCall(t.wrapped, \"StatObject\")(Any(), Any()).\n\t\tWillOnce(Return(obj, nil))\n\n\t\/\/ Insert\n\tExpectCall(t.cache, \"Insert\")(obj, timeutil.TimeEq(t.clock.Now().Add(ttl)))\n\n\t\/\/ Call\n\treq := &gcs.StatObjectRequest{\n\t\tName: name,\n\t}\n\n\to, err := t.bucket.StatObject(nil, req)\n\tAssertEq(nil, err)\n\tExpectEq(obj, o)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ ListObjects\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype ListObjectsTest struct {\n\tfastStatBucketTest\n}\n\nfunc init() { RegisterTestSuite(&ListObjectsTest{}) }\n\nfunc (t *ListObjectsTest) WrappedFails() {\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"ListObjects\")(Any(), Any()).\n\t\tWillOnce(Return(nil, errors.New(\"taco\")))\n\n\t\/\/ Call\n\t_, err := t.bucket.ListObjects(nil, &gcs.ListObjectsRequest{})\n\tExpectThat(err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *ListObjectsTest) EmptyListing() {\n\t\/\/ Wrapped\n\texpected := &gcs.Listing{}\n\n\tExpectCall(t.wrapped, \"ListObjects\")(Any(), Any()).\n\t\tWillOnce(Return(expected, nil))\n\n\t\/\/ Call\n\tlisting, err := t.bucket.ListObjects(nil, &gcs.ListObjectsRequest{})\n\n\tAssertEq(nil, err)\n\tExpectEq(expected, listing)\n}\n\nfunc (t *ListObjectsTest) NonEmptyListing() {\n\t\/\/ Wrapped\n\to0 := &gcs.Object{Name: \"taco\"}\n\to1 := &gcs.Object{Name: \"burrito\"}\n\n\texpected := &gcs.Listing{\n\t\tObjects: []*gcs.Object{o0, o1},\n\t}\n\n\tExpectCall(t.wrapped, \"ListObjects\")(Any(), Any()).\n\t\tWillOnce(Return(expected, nil))\n\n\t\/\/ Insert\n\tExpectCall(t.cache, \"Insert\")(o0, timeutil.TimeEq(t.clock.Now().Add(ttl)))\n\tExpectCall(t.cache, \"Insert\")(o1, timeutil.TimeEq(t.clock.Now().Add(ttl)))\n\n\t\/\/ Call\n\tlisting, err := t.bucket.ListObjects(nil, &gcs.ListObjectsRequest{})\n\n\tAssertEq(nil, err)\n\tExpectEq(expected, listing)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ UpdateObject\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype UpdateObjectTest struct {\n\tfastStatBucketTest\n}\n\nfunc init() { RegisterTestSuite(&UpdateObjectTest{}) }\n\nfunc (t *UpdateObjectTest) CallsEraseAndWrapped() {\n\tconst name = \"taco\"\n\n\t\/\/ Erase\n\tExpectCall(t.cache, \"Erase\")(name)\n\n\t\/\/ Wrapped\n\tvar wrappedReq *gcs.UpdateObjectRequest\n\tExpectCall(t.wrapped, \"UpdateObject\")(Any(), Any()).\n\t\tWillOnce(DoAll(SaveArg(1, &wrappedReq), Return(nil, errors.New(\"\"))))\n\n\t\/\/ Call\n\treq := &gcs.UpdateObjectRequest{\n\t\tName: name,\n\t}\n\n\t_, _ = t.bucket.UpdateObject(nil, req)\n\n\tAssertNe(nil, wrappedReq)\n\tExpectEq(req, wrappedReq)\n}\n\nfunc (t *UpdateObjectTest) WrappedFails() {\n\tconst name = \"\"\n\tvar err error\n\n\t\/\/ Erase\n\tExpectCall(t.cache, \"Erase\")(Any())\n\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"UpdateObject\")(Any(), Any()).\n\t\tWillOnce(Return(nil, errors.New(\"taco\")))\n\n\t\/\/ Call\n\t_, err = t.bucket.UpdateObject(nil, &gcs.UpdateObjectRequest{})\n\n\tExpectThat(err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *UpdateObjectTest) WrappedSucceeds() {\n\tconst name = \"taco\"\n\tvar err error\n\n\t\/\/ Erase\n\tExpectCall(t.cache, \"Erase\")(Any())\n\n\t\/\/ Wrapped\n\tobj := &gcs.Object{\n\t\tName: name,\n\t\tGeneration: 1234,\n\t}\n\n\tExpectCall(t.wrapped, \"UpdateObject\")(Any(), Any()).\n\t\tWillOnce(Return(obj, nil))\n\n\t\/\/ Insert\n\tExpectCall(t.cache, \"Insert\")(obj, timeutil.TimeEq(t.clock.Now().Add(ttl)))\n\n\t\/\/ Call\n\to, err := t.bucket.UpdateObject(nil, &gcs.UpdateObjectRequest{})\n\n\tAssertEq(nil, err)\n\tExpectEq(obj, o)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ DeleteObject\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype DeleteObjectTest struct {\n\tfastStatBucketTest\n}\n\nfunc init() { RegisterTestSuite(&DeleteObjectTest{}) }\n\nfunc (t *DeleteObjectTest) CallsEraseAndWrapped() {\n\tconst name = \"taco\"\n\n\t\/\/ Erase\n\tExpectCall(t.cache, \"Erase\")(name)\n\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"DeleteObject\")(Any(), name).\n\t\tWillOnce(Return(errors.New(\"\")))\n\n\t\/\/ Call\n\t_ = t.bucket.DeleteObject(nil, name)\n}\n\nfunc (t *DeleteObjectTest) WrappedFails() {\n\tconst name = \"\"\n\tvar err error\n\n\t\/\/ Erase\n\tExpectCall(t.cache, \"Erase\")(Any())\n\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"DeleteObject\")(Any(), Any()).\n\t\tWillOnce(Return(errors.New(\"taco\")))\n\n\t\/\/ Call\n\terr = t.bucket.DeleteObject(nil, name)\n\n\tExpectThat(err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *DeleteObjectTest) WrappedSucceeds() {\n\tconst name = \"\"\n\tvar err error\n\n\t\/\/ Erase\n\tExpectCall(t.cache, \"Erase\")(Any())\n\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"DeleteObject\")(Any(), Any()).\n\t\tWillOnce(Return(nil))\n\n\t\/\/ Call\n\terr = t.bucket.DeleteObject(nil, name)\n\tAssertEq(nil, err)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2019 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage kubernetes\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/tsuru\/tsuru\/log\"\n\t\"github.com\/tsuru\/tsuru\/router\/rebuild\"\n\tapiv1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/informers\"\n\tv1informers \"k8s.io\/client-go\/informers\/core\/v1\"\n\t\"k8s.io\/client-go\/informers\/internalinterfaces\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n)\n\nconst (\n\tinformerSyncTimeout = time.Minute\n)\n\ntype clusterController struct {\n\tmu sync.Mutex\n\tcluster *ClusterClient\n\tinformerFactory informers.SharedInformerFactory\n\tpodInformer v1informers.PodInformer\n\tserviceInformer v1informers.ServiceInformer\n\tnodeInformer v1informers.NodeInformer\n\tstopCh chan struct{}\n}\n\nfunc initAllControllers(p *kubernetesProvisioner) error {\n\treturn forEachCluster(func(client *ClusterClient) error {\n\t\t_, err := getClusterController(p, client)\n\t\treturn err\n\t})\n}\n\nfunc getClusterController(p *kubernetesProvisioner, cluster *ClusterClient) (*clusterController, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tif c, ok := p.clusterControllers[cluster.Name]; ok {\n\t\treturn c, nil\n\t}\n\tc := &clusterController{\n\t\tcluster: cluster,\n\t\tstopCh: make(chan struct{}),\n\t}\n\terr := c.start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp.clusterControllers[cluster.Name] = c\n\treturn c, nil\n}\n\nfunc stopClusterController(p *kubernetesProvisioner, cluster *ClusterClient) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tif c, ok := p.clusterControllers[cluster.Name]; ok {\n\t\tc.stop()\n\t}\n\tdelete(p.clusterControllers, cluster.Name)\n}\n\nfunc (c *clusterController) stop() {\n\tclose(c.stopCh)\n}\n\nfunc (c *clusterController) start() error {\n\tinformer, err := c.getPodInformerWait(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\terr := c.onAdd(obj)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"[router-update-controller] error on add pod event: %v\", err)\n\t\t\t}\n\t\t},\n\t\tUpdateFunc: func(oldObj, newObj interface{}) {\n\t\t\terr := c.onUpdate(oldObj, newObj)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"[router-update-controller] error on update pod event: %v\", err)\n\t\t\t}\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\terr := c.onDelete(obj)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"[router-update-controller] error on delete pod event: %v\", err)\n\t\t\t}\n\t\t},\n\t})\n\treturn nil\n}\n\nfunc (m *clusterController) onAdd(obj interface{}) error {\n\t\/\/ Pods are never ready on add, ignore and do nothing\n\treturn nil\n}\n\nfunc (c *clusterController) onUpdate(oldObj, newObj interface{}) error {\n\tnewPod := oldObj.(*apiv1.Pod)\n\toldPod := newObj.(*apiv1.Pod)\n\tif newPod.ResourceVersion == oldPod.ResourceVersion {\n\t\treturn nil\n\t}\n\tc.addPod(newPod)\n\treturn nil\n}\n\nfunc (c *clusterController) onDelete(obj interface{}) error {\n\tif pod, ok := obj.(*apiv1.Pod); ok {\n\t\tc.addPod(pod)\n\t\treturn nil\n\t}\n\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\tif !ok {\n\t\treturn errors.Errorf(\"couldn't get object from tombstone %#v\", obj)\n\t}\n\tpod, ok := tombstone.Obj.(*apiv1.Pod)\n\tif !ok {\n\t\treturn errors.Errorf(\"tombstone contained object that is not a Pod: %#v\", obj)\n\t}\n\tc.addPod(pod)\n\treturn nil\n}\n\nfunc (c *clusterController) addPod(pod *apiv1.Pod) {\n\tlabelSet := labelSetFromMeta(&pod.ObjectMeta)\n\tappName := labelSet.AppName()\n\tif appName == \"\" {\n\t\treturn\n\t}\n\tif labelSet.IsDeploy() || labelSet.IsIsolatedRun() {\n\t\treturn\n\t}\n\trouterLocal, _ := c.cluster.RouterAddressLocal(labelSet.AppPool())\n\tif routerLocal {\n\t\trebuild.EnqueueRoutesRebuild(appName)\n\t}\n}\n\nfunc (p *clusterController) getPodInformer() (v1informers.PodInformer, error) {\n\treturn p.getPodInformerWait(true)\n}\n\nfunc (p *clusterController) getServiceInformer() (v1informers.ServiceInformer, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tif p.serviceInformer == nil {\n\t\terr := p.withInformerFactory(func(factory informers.SharedInformerFactory) {\n\t\t\tp.serviceInformer = factory.Core().V1().Services()\n\t\t\tp.serviceInformer.Informer()\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\terr := p.waitForSync(p.serviceInformer.Informer())\n\treturn p.serviceInformer, err\n}\n\nfunc (p *clusterController) getNodeInformer() (v1informers.NodeInformer, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tif p.nodeInformer == nil {\n\t\terr := p.withInformerFactory(func(factory informers.SharedInformerFactory) {\n\t\t\tp.nodeInformer = factory.Core().V1().Nodes()\n\t\t\tp.nodeInformer.Informer()\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\terr := p.waitForSync(p.nodeInformer.Informer())\n\treturn p.nodeInformer, err\n}\n\nfunc (p *clusterController) getPodInformerWait(wait bool) (v1informers.PodInformer, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tif p.podInformer == nil {\n\t\terr := p.withInformerFactory(func(factory informers.SharedInformerFactory) {\n\t\t\tp.podInformer = factory.Core().V1().Pods()\n\t\t\tp.podInformer.Informer()\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tvar err error\n\tif wait {\n\t\terr = p.waitForSync(p.podInformer.Informer())\n\t}\n\treturn p.podInformer, err\n}\n\nfunc (p *clusterController) withInformerFactory(fn func(factory informers.SharedInformerFactory)) error {\n\tfactory, err := p.getFactory()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfn(factory)\n\tfactory.Start(p.stopCh)\n\treturn nil\n}\n\nfunc (p *clusterController) getFactory() (informers.SharedInformerFactory, error) {\n\tif p.informerFactory != nil {\n\t\treturn p.informerFactory, nil\n\t}\n\tvar err error\n\tp.informerFactory, err = InformerFactory(p.cluster)\n\treturn p.informerFactory, err\n}\n\nfunc contextWithCancelByChannel(ctx context.Context, ch chan struct{}, timeout time.Duration) (context.Context, func()) {\n\tctx, cancel := context.WithTimeout(ctx, timeout)\n\tgo func() {\n\t\tselect {\n\t\tcase <-ch:\n\t\t\tcancel()\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}()\n\treturn ctx, cancel\n}\n\nfunc (p *clusterController) waitForSync(informer cache.SharedInformer) error {\n\tif informer.HasSynced() {\n\t\treturn nil\n\t}\n\tctx, cancel := contextWithCancelByChannel(context.Background(), p.stopCh, informerSyncTimeout)\n\tdefer cancel()\n\tcache.WaitForCacheSync(ctx.Done(), informer.HasSynced)\n\treturn ctx.Err()\n}\n\nvar InformerFactory = func(client *ClusterClient) (informers.SharedInformerFactory, error) {\n\ttimeout := client.restConfig.Timeout\n\trestConfig := *client.restConfig\n\trestConfig.Timeout = 0\n\tcli, err := ClientForConfig(&restConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttweakFunc := internalinterfaces.TweakListOptionsFunc(func(opts *metav1.ListOptions) {\n\t\tif opts.TimeoutSeconds == nil {\n\t\t\ttimeoutSec := int64(timeout.Seconds())\n\t\t\topts.TimeoutSeconds = &timeoutSec\n\t\t}\n\t})\n\treturn informers.NewFilteredSharedInformerFactory(cli, time.Minute, metav1.NamespaceAll, tweakFunc), nil\n}\nprovision\/kubernetes: fix receiver name in cluster controller methods\/\/ Copyright 2019 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage kubernetes\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/tsuru\/tsuru\/log\"\n\t\"github.com\/tsuru\/tsuru\/router\/rebuild\"\n\tapiv1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/informers\"\n\tv1informers \"k8s.io\/client-go\/informers\/core\/v1\"\n\t\"k8s.io\/client-go\/informers\/internalinterfaces\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n)\n\nconst (\n\tinformerSyncTimeout = time.Minute\n)\n\ntype clusterController struct {\n\tmu sync.Mutex\n\tcluster *ClusterClient\n\tinformerFactory informers.SharedInformerFactory\n\tpodInformer v1informers.PodInformer\n\tserviceInformer v1informers.ServiceInformer\n\tnodeInformer v1informers.NodeInformer\n\tstopCh chan struct{}\n}\n\nfunc initAllControllers(p *kubernetesProvisioner) error {\n\treturn forEachCluster(func(client *ClusterClient) error {\n\t\t_, err := getClusterController(p, client)\n\t\treturn err\n\t})\n}\n\nfunc getClusterController(p *kubernetesProvisioner, cluster *ClusterClient) (*clusterController, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tif c, ok := p.clusterControllers[cluster.Name]; ok {\n\t\treturn c, nil\n\t}\n\tc := &clusterController{\n\t\tcluster: cluster,\n\t\tstopCh: make(chan struct{}),\n\t}\n\terr := c.start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp.clusterControllers[cluster.Name] = c\n\treturn c, nil\n}\n\nfunc stopClusterController(p *kubernetesProvisioner, cluster *ClusterClient) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tif c, ok := p.clusterControllers[cluster.Name]; ok {\n\t\tc.stop()\n\t}\n\tdelete(p.clusterControllers, cluster.Name)\n}\n\nfunc (c *clusterController) stop() {\n\tclose(c.stopCh)\n}\n\nfunc (c *clusterController) start() error {\n\tinformer, err := c.getPodInformerWait(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\terr := c.onAdd(obj)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"[router-update-controller] error on add pod event: %v\", err)\n\t\t\t}\n\t\t},\n\t\tUpdateFunc: func(oldObj, newObj interface{}) {\n\t\t\terr := c.onUpdate(oldObj, newObj)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"[router-update-controller] error on update pod event: %v\", err)\n\t\t\t}\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\terr := c.onDelete(obj)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"[router-update-controller] error on delete pod event: %v\", err)\n\t\t\t}\n\t\t},\n\t})\n\treturn nil\n}\n\nfunc (c *clusterController) onAdd(obj interface{}) error {\n\t\/\/ Pods are never ready on add, ignore and do nothing\n\treturn nil\n}\n\nfunc (c *clusterController) onUpdate(oldObj, newObj interface{}) error {\n\tnewPod := oldObj.(*apiv1.Pod)\n\toldPod := newObj.(*apiv1.Pod)\n\tif newPod.ResourceVersion == oldPod.ResourceVersion {\n\t\treturn nil\n\t}\n\tc.addPod(newPod)\n\treturn nil\n}\n\nfunc (c *clusterController) onDelete(obj interface{}) error {\n\tif pod, ok := obj.(*apiv1.Pod); ok {\n\t\tc.addPod(pod)\n\t\treturn nil\n\t}\n\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\tif !ok {\n\t\treturn errors.Errorf(\"couldn't get object from tombstone %#v\", obj)\n\t}\n\tpod, ok := tombstone.Obj.(*apiv1.Pod)\n\tif !ok {\n\t\treturn errors.Errorf(\"tombstone contained object that is not a Pod: %#v\", obj)\n\t}\n\tc.addPod(pod)\n\treturn nil\n}\n\nfunc (c *clusterController) addPod(pod *apiv1.Pod) {\n\tlabelSet := labelSetFromMeta(&pod.ObjectMeta)\n\tappName := labelSet.AppName()\n\tif appName == \"\" {\n\t\treturn\n\t}\n\tif labelSet.IsDeploy() || labelSet.IsIsolatedRun() {\n\t\treturn\n\t}\n\trouterLocal, _ := c.cluster.RouterAddressLocal(labelSet.AppPool())\n\tif routerLocal {\n\t\trebuild.EnqueueRoutesRebuild(appName)\n\t}\n}\n\nfunc (c *clusterController) getPodInformer() (v1informers.PodInformer, error) {\n\treturn c.getPodInformerWait(true)\n}\n\nfunc (c *clusterController) getServiceInformer() (v1informers.ServiceInformer, error) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif c.serviceInformer == nil {\n\t\terr := c.withInformerFactory(func(factory informers.SharedInformerFactory) {\n\t\t\tc.serviceInformer = factory.Core().V1().Services()\n\t\t\tc.serviceInformer.Informer()\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\terr := c.waitForSync(c.serviceInformer.Informer())\n\treturn c.serviceInformer, err\n}\n\nfunc (c *clusterController) getNodeInformer() (v1informers.NodeInformer, error) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif c.nodeInformer == nil {\n\t\terr := c.withInformerFactory(func(factory informers.SharedInformerFactory) {\n\t\t\tc.nodeInformer = factory.Core().V1().Nodes()\n\t\t\tc.nodeInformer.Informer()\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\terr := c.waitForSync(c.nodeInformer.Informer())\n\treturn c.nodeInformer, err\n}\n\nfunc (c *clusterController) getPodInformerWait(wait bool) (v1informers.PodInformer, error) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif c.podInformer == nil {\n\t\terr := c.withInformerFactory(func(factory informers.SharedInformerFactory) {\n\t\t\tc.podInformer = factory.Core().V1().Pods()\n\t\t\tc.podInformer.Informer()\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tvar err error\n\tif wait {\n\t\terr = c.waitForSync(c.podInformer.Informer())\n\t}\n\treturn c.podInformer, err\n}\n\nfunc (c *clusterController) withInformerFactory(fn func(factory informers.SharedInformerFactory)) error {\n\tfactory, err := c.getFactory()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfn(factory)\n\tfactory.Start(c.stopCh)\n\treturn nil\n}\n\nfunc (c *clusterController) getFactory() (informers.SharedInformerFactory, error) {\n\tif c.informerFactory != nil {\n\t\treturn c.informerFactory, nil\n\t}\n\tvar err error\n\tc.informerFactory, err = InformerFactory(c.cluster)\n\treturn c.informerFactory, err\n}\n\nfunc contextWithCancelByChannel(ctx context.Context, ch chan struct{}, timeout time.Duration) (context.Context, func()) {\n\tctx, cancel := context.WithTimeout(ctx, timeout)\n\tgo func() {\n\t\tselect {\n\t\tcase <-ch:\n\t\t\tcancel()\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}()\n\treturn ctx, cancel\n}\n\nfunc (c *clusterController) waitForSync(informer cache.SharedInformer) error {\n\tif informer.HasSynced() {\n\t\treturn nil\n\t}\n\tctx, cancel := contextWithCancelByChannel(context.Background(), c.stopCh, informerSyncTimeout)\n\tdefer cancel()\n\tcache.WaitForCacheSync(ctx.Done(), informer.HasSynced)\n\treturn ctx.Err()\n}\n\nvar InformerFactory = func(client *ClusterClient) (informers.SharedInformerFactory, error) {\n\ttimeout := client.restConfig.Timeout\n\trestConfig := *client.restConfig\n\trestConfig.Timeout = 0\n\tcli, err := ClientForConfig(&restConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttweakFunc := internalinterfaces.TweakListOptionsFunc(func(opts *metav1.ListOptions) {\n\t\tif opts.TimeoutSeconds == nil {\n\t\t\ttimeoutSec := int64(timeout.Seconds())\n\t\t\topts.TimeoutSeconds = &timeoutSec\n\t\t}\n\t})\n\treturn informers.NewFilteredSharedInformerFactory(cli, time.Minute, metav1.NamespaceAll, tweakFunc), nil\n}\n<|endoftext|>"} {"text":"package main\n\n\/\/ #cgo LDFLAGS: -framework GLUT -framework OpenGL\n\/\/\n\/\/ #include \n\/\/ #include \n\/\/\n\/\/ void display(void);\n\/\/ void idle(void);\n\/\/\n\/\/ static void setGlutFuncs(void) {\n\/\/ glutDisplayFunc(display);\n\/\/ glutIdleFunc(idle);\n\/\/ }\n\/\/\nimport \"C\"\nimport (\n\t\"github.com\/hajimehoshi\/go.ebiten\"\n\t\"github.com\/hajimehoshi\/go.ebiten\/example\/game\"\n\t\"github.com\/hajimehoshi\/go.ebiten\/graphics\"\n\t\"os\"\n\t\"runtime\"\n\t\"unsafe\"\n)\n\ntype GlutUI struct {\n\tscreenWidth int\n\tscreenHeight int\n\tscreenScale int\n\tdevice graphics.Device\n}\n\nvar currentUI *GlutUI\n\n\/\/export display\nfunc display() {\n\tcurrentUI.device.Update()\n\tC.glutSwapBuffers()\n}\n\n\/\/export idle\nfunc idle() {\n\tC.glutPostRedisplay()\n}\n\nfunc (ui *GlutUI) Init() {\n\tcargs := []*C.char{}\n\tfor _, arg := range os.Args {\n\t\tcargs = append(cargs, C.CString(arg))\n\t}\n\tdefer func() {\n\t\tfor _, carg := range cargs {\n\t\t\tC.free(unsafe.Pointer(carg))\n\t\t}\n\t}()\n\tcargc := C.int(len(cargs))\n\n\tui.screenWidth = 256\n\tui.screenHeight = 240\n\tui.screenScale = 2\n\n\tC.glutInit(&cargc, &cargs[0])\n\tC.glutInitDisplayMode(C.GLUT_RGBA)\n\tC.glutInitWindowSize(\n\t\tC.int(ui.screenWidth*ui.screenScale),\n\t\tC.int(ui.screenHeight*ui.screenScale))\n\n\ttitle := C.CString(\"Ebiten Demo\")\n\tdefer C.free(unsafe.Pointer(title))\n\tC.glutCreateWindow(title)\n\n\tC.setGlutFuncs()\n}\n\nfunc (ui *GlutUI) ScreenWidth() int {\n\treturn ui.screenWidth\n}\n\nfunc (ui *GlutUI) ScreenHeight() int {\n\treturn ui.screenHeight\n}\n\nfunc (ui *GlutUI) ScreenScale() int {\n\treturn ui.screenScale\n}\n\nfunc (ui *GlutUI) Run(device graphics.Device) {\n\tui.device = device\n\tC.glutMainLoop()\n}\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tgame := game.NewRotatingImage()\n\tcurrentUI = &GlutUI{}\n\tcurrentUI.Init()\n\n\tebiten.OpenGLRun(game, currentUI)\n}\nRun Spritespackage main\n\n\/\/ #cgo LDFLAGS: -framework GLUT -framework OpenGL\n\/\/\n\/\/ #include \n\/\/ #include \n\/\/\n\/\/ void display(void);\n\/\/ void idle(void);\n\/\/\n\/\/ static void setGlutFuncs(void) {\n\/\/ glutDisplayFunc(display);\n\/\/ glutIdleFunc(idle);\n\/\/ }\n\/\/\nimport \"C\"\nimport (\n\t\"github.com\/hajimehoshi\/go.ebiten\"\n\t\"github.com\/hajimehoshi\/go.ebiten\/example\/game\"\n\t\"github.com\/hajimehoshi\/go.ebiten\/graphics\"\n\t\"os\"\n\t\"runtime\"\n\t\"unsafe\"\n)\n\ntype GlutUI struct {\n\tscreenWidth int\n\tscreenHeight int\n\tscreenScale int\n\tdevice graphics.Device\n}\n\nvar currentUI *GlutUI\n\n\/\/export display\nfunc display() {\n\tcurrentUI.device.Update()\n\tC.glutSwapBuffers()\n}\n\n\/\/export idle\nfunc idle() {\n\tC.glutPostRedisplay()\n}\n\nfunc (ui *GlutUI) Init() {\n\tcargs := []*C.char{}\n\tfor _, arg := range os.Args {\n\t\tcargs = append(cargs, C.CString(arg))\n\t}\n\tdefer func() {\n\t\tfor _, carg := range cargs {\n\t\t\tC.free(unsafe.Pointer(carg))\n\t\t}\n\t}()\n\tcargc := C.int(len(cargs))\n\n\tui.screenWidth = 256\n\tui.screenHeight = 240\n\tui.screenScale = 2\n\n\tC.glutInit(&cargc, &cargs[0])\n\tC.glutInitDisplayMode(C.GLUT_RGBA)\n\tC.glutInitWindowSize(\n\t\tC.int(ui.screenWidth*ui.screenScale),\n\t\tC.int(ui.screenHeight*ui.screenScale))\n\n\ttitle := C.CString(\"Ebiten Demo\")\n\tdefer C.free(unsafe.Pointer(title))\n\tC.glutCreateWindow(title)\n\n\tC.setGlutFuncs()\n}\n\nfunc (ui *GlutUI) ScreenWidth() int {\n\treturn ui.screenWidth\n}\n\nfunc (ui *GlutUI) ScreenHeight() int {\n\treturn ui.screenHeight\n}\n\nfunc (ui *GlutUI) ScreenScale() int {\n\treturn ui.screenScale\n}\n\nfunc (ui *GlutUI) Run(device graphics.Device) {\n\tui.device = device\n\tC.glutMainLoop()\n}\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tgameName := \"\"\n\tif 2 <= len(os.Args) {\n\t\tgameName = os.Args[1]\n\t}\n\n\tvar gm ebiten.Game\n\tswitch gameName {\n\tcase \"sprites\":\n\t\tgm = game.NewSprites()\n\tdefault:\n\t\tgm = game.NewRotatingImage()\n\t}\n\tcurrentUI = &GlutUI{}\n\tcurrentUI.Init()\n\n\tebiten.OpenGLRun(gm, currentUI)\n}\n<|endoftext|>"} {"text":"package eth\n\nimport (\n\t\"fmt\"\n)\n\nconst (\n\tErrMsgTooLarge = iota\n\tErrDecode\n\tErrInvalidMsgCode\n\tErrProtocolVersionMismatch\n\tErrNetworkIdMismatch\n\tErrGenesisBlockMismatch\n\tErrNoStatusMsg\n\tErrExtraStatusMsg\n\tErrInvalidBlock\n\tErrInvalidPoW\n\tErrUnrequestedBlock\n)\n\nvar errorToString = map[int]string{\n\tErrMsgTooLarge: \"Message too long\",\n\tErrDecode: \"Invalid message\",\n\tErrInvalidMsgCode: \"Invalid message code\",\n\tErrProtocolVersionMismatch: \"Protocol version mismatch\",\n\tErrNetworkIdMismatch: \"NetworkId mismatch\",\n\tErrGenesisBlockMismatch: \"Genesis block mismatch\",\n\tErrNoStatusMsg: \"No status message\",\n\tErrExtraStatusMsg: \"Extra status message\",\n\tErrInvalidBlock: \"Invalid block\",\n\tErrInvalidPoW: \"Invalid PoW\",\n\tErrUnrequestedBlock: \"Unrequested block\",\n}\n\ntype protocolError struct {\n\tCode int\n\tfatal bool\n\tmessage string\n\tformat string\n\tparams []interface{}\n\t\/\/ size int\n}\n\nfunc newProtocolError(code int, format string, params ...interface{}) *protocolError {\n\treturn &protocolError{Code: code, format: format, params: params}\n}\n\nfunc ProtocolError(code int, format string, params ...interface{}) (err *protocolError) {\n\terr = newProtocolError(code, format, params...)\n\t\/\/ report(err)\n\treturn\n}\n\nfunc (self protocolError) Error() (message string) {\n\tif len(message) == 0 {\n\t\tvar ok bool\n\t\tself.message, ok = errorToString[self.Code]\n\t\tif !ok {\n\t\t\tpanic(\"invalid error code\")\n\t\t}\n\t\tif self.format != \"\" {\n\t\t\tself.message += \": \" + fmt.Sprintf(self.format, self.params...)\n\t\t}\n\t}\n\treturn self.message\n}\n\nfunc (self *protocolError) Fatal() bool {\n\treturn self.fatal\n}\nadd ErrInsufficientChainInfo errorpackage eth\n\nimport (\n\t\"fmt\"\n)\n\nconst (\n\tErrMsgTooLarge = iota\n\tErrDecode\n\tErrInvalidMsgCode\n\tErrProtocolVersionMismatch\n\tErrNetworkIdMismatch\n\tErrGenesisBlockMismatch\n\tErrNoStatusMsg\n\tErrExtraStatusMsg\n\tErrInvalidBlock\n\tErrInvalidPoW\n\tErrUnrequestedBlock\n\tErrInsufficientChainInfo\n)\n\nvar errorToString = map[int]string{\n\tErrMsgTooLarge: \"Message too long\",\n\tErrDecode: \"Invalid message\",\n\tErrInvalidMsgCode: \"Invalid message code\",\n\tErrProtocolVersionMismatch: \"Protocol version mismatch\",\n\tErrNetworkIdMismatch: \"NetworkId mismatch\",\n\tErrGenesisBlockMismatch: \"Genesis block mismatch\",\n\tErrNoStatusMsg: \"No status message\",\n\tErrExtraStatusMsg: \"Extra status message\",\n\tErrInvalidBlock: \"Invalid block\",\n\tErrInvalidPoW: \"Invalid PoW\",\n\tErrUnrequestedBlock: \"Unrequested block\",\n\tErrInsufficientChainInfo: \"Insufficient chain info\",\n}\n\ntype protocolError struct {\n\tCode int\n\tfatal bool\n\tmessage string\n\tformat string\n\tparams []interface{}\n\t\/\/ size int\n}\n\nfunc newProtocolError(code int, format string, params ...interface{}) *protocolError {\n\treturn &protocolError{Code: code, format: format, params: params}\n}\n\nfunc ProtocolError(code int, format string, params ...interface{}) (err *protocolError) {\n\terr = newProtocolError(code, format, params...)\n\t\/\/ report(err)\n\treturn\n}\n\nfunc (self protocolError) Error() (message string) {\n\tif len(message) == 0 {\n\t\tvar ok bool\n\t\tself.message, ok = errorToString[self.Code]\n\t\tif !ok {\n\t\t\tpanic(\"invalid error code\")\n\t\t}\n\t\tif self.format != \"\" {\n\t\t\tself.message += \": \" + fmt.Sprintf(self.format, self.params...)\n\t\t}\n\t}\n\treturn self.message\n}\n\nfunc (self *protocolError) Fatal() bool {\n\treturn self.fatal\n}\n<|endoftext|>"} {"text":"package mint_test\n\nimport \"testing\"\nimport \"github.com\/otiai10\/mint\"\n\nfunc TestMint_ToBe(t *testing.T) {\n\tmint.Expect(t, 1).ToBe(1)\n}\nfunc TestMint_ToBe_Fail(t *testing.T) {\n\tr := mint.Expect(t, 2).Dry().ToBe(1)\n\t\/\/ assert mint by using mint\n\tmint.Expect(t, r.OK()).ToBe(false)\n}\n\ntype MyStruct struct{}\n\nfunc TestMint_TypeOf(t *testing.T) {\n\tmint.Expect(t, \"foo\").TypeOf(\"string\")\n\n\tbar := MyStruct{}\n\tmint.Expect(t, bar).TypeOf(\"mint_test.MyStruct\")\n}\nfunc TestMint_TypeOf_Fail(t *testing.T) {\n\tr := mint.Expect(t, \"foo\").Dry().TypeOf(\"int\")\n\t\/\/ assert mint by using mint\n\tmint.Expect(t, r.OK()).ToBe(false)\n\n\tbar := MyStruct{}\n\tr = mint.Expect(t, bar).Dry().TypeOf(\"foo.Bar\")\n\t\/\/ assert mint by using mint\n\tmint.Expect(t, r.OK()).ToBe(false)\n\tmint.Expect(t, r.Message()).ToBe(\"all_test.go at line 29\\nExpected type\\t`foo.Bar`\\nBut actual\\t`mint_test.MyStruct`\")\n}\n\nfunc TestMint_Not(t *testing.T) {\n\tmint.Expect(t, 100).Not().ToBe(200)\n\tmint.Expect(t, \"foo\").Not().TypeOf(\"int\")\n\tmint.Expect(t, true).Not().ToBe(nil)\n}\nfunc TestMint_Not_Fail(t *testing.T) {\n\tr := mint.Expect(t, \"foo\").Dry().Not().TypeOf(\"string\")\n\t\/\/ assert mint by using mint\n\tmint.Expect(t, r.OK()).Not().ToBe(true)\n}\n\nfunc TestMint_Deeply(t *testing.T) {\n\tmap0 := &map[int]string{\n\t\t3: \"three\",\n\t\t5: \"five\",\n\t\t10: \"ten\",\n\t}\n\tmap1 := &map[int]string{\n\t\t3: \"three\",\n\t\t5: \"five\",\n\t\t10: \"ten\",\n\t}\n\t\/\/ It SHALLOWLY different.\n\tmint.Expect(t, map0).Not().ToBe(map1)\n\t\/\/ But it DEEPLY equal.\n\tmint.Expect(t, map0).Deeply().ToBe(map1)\n}\n\nfunc TestMint_Deeply_slice(t *testing.T) {\n\ts1 := []int{1, 2, 3}\n\ts2 := []int{4, 5, 6}\n\ts3 := []int{1, 2, 3}\n\tmint.Expect(t, s1).Not().ToBe(s2)\n\tmint.Expect(t, s1).ToBe(s3)\n}\nfunc TestMint_Deeply_map(t *testing.T) {\n\tm1 := map[int]int{1: 1, 2: 4, 3: 9}\n\tm2 := map[int]int{1: 1, 2: 4, 3: 6}\n\tm3 := map[int]int{1: 1, 2: 4, 3: 9}\n\tmint.Expect(t, m1).Not().ToBe(m2)\n\tmint.Expect(t, m1).ToBe(m3)\n}\n\n\/\/ Blend is a shorhand to get testee\nfunc TestMint_Blend(t *testing.T) {\n\tm := mint.Blend(t)\n\t\/\/ assert mint by using mint\n\tmint.Expect(t, m).TypeOf(\"*mint.Mint\")\n\tmint.Expect(t, m.Expect(\"foo\")).TypeOf(\"*mint.Testee\")\n}\nFix for travispackage mint_test\n\nimport \"testing\"\nimport \"github.com\/otiai10\/mint\"\n\nfunc TestMint_ToBe(t *testing.T) {\n\tmint.Expect(t, 1).ToBe(1)\n}\nfunc TestMint_ToBe_Fail(t *testing.T) {\n\tr := mint.Expect(t, 2).Dry().ToBe(1)\n\t\/\/ assert mint by using mint\n\tmint.Expect(t, r.OK()).ToBe(false)\n}\n\ntype MyStruct struct{}\n\nfunc TestMint_TypeOf(t *testing.T) {\n\tmint.Expect(t, \"foo\").TypeOf(\"string\")\n\n\tbar := MyStruct{}\n\tmint.Expect(t, bar).TypeOf(\"mint_test.MyStruct\")\n}\nfunc TestMint_TypeOf_Fail(t *testing.T) {\n\tr := mint.Expect(t, \"foo\").Dry().TypeOf(\"int\")\n\t\/\/ assert mint by using mint\n\tmint.Expect(t, r.OK()).ToBe(false)\n\n\tbar := MyStruct{}\n\tr = mint.Expect(t, bar).Dry().TypeOf(\"foo.Bar\")\n\t\/\/ assert mint by using mint\n\tmint.Expect(t, r.OK()).ToBe(false)\n\tmint.Expect(t, r.Message()).ToBe(\"all_test.go at line 29\\n\\tExpected type\\t`foo.Bar`\\n\\tBut actual\\t`mint_test.MyStruct`\")\n}\n\nfunc TestMint_Not(t *testing.T) {\n\tmint.Expect(t, 100).Not().ToBe(200)\n\tmint.Expect(t, \"foo\").Not().TypeOf(\"int\")\n\tmint.Expect(t, true).Not().ToBe(nil)\n}\nfunc TestMint_Not_Fail(t *testing.T) {\n\tr := mint.Expect(t, \"foo\").Dry().Not().TypeOf(\"string\")\n\t\/\/ assert mint by using mint\n\tmint.Expect(t, r.OK()).Not().ToBe(true)\n}\n\nfunc TestMint_Deeply(t *testing.T) {\n\tmap0 := &map[int]string{\n\t\t3: \"three\",\n\t\t5: \"five\",\n\t\t10: \"ten\",\n\t}\n\tmap1 := &map[int]string{\n\t\t3: \"three\",\n\t\t5: \"five\",\n\t\t10: \"ten\",\n\t}\n\t\/\/ It SHALLOWLY different.\n\tmint.Expect(t, map0).Not().ToBe(map1)\n\t\/\/ But it DEEPLY equal.\n\tmint.Expect(t, map0).Deeply().ToBe(map1)\n}\n\nfunc TestMint_Deeply_slice(t *testing.T) {\n\ts1 := []int{1, 2, 3}\n\ts2 := []int{4, 5, 6}\n\ts3 := []int{1, 2, 3}\n\tmint.Expect(t, s1).Not().ToBe(s2)\n\tmint.Expect(t, s1).ToBe(s3)\n}\nfunc TestMint_Deeply_map(t *testing.T) {\n\tm1 := map[int]int{1: 1, 2: 4, 3: 9}\n\tm2 := map[int]int{1: 1, 2: 4, 3: 6}\n\tm3 := map[int]int{1: 1, 2: 4, 3: 9}\n\tmint.Expect(t, m1).Not().ToBe(m2)\n\tmint.Expect(t, m1).ToBe(m3)\n}\n\n\/\/ Blend is a shorhand to get testee\nfunc TestMint_Blend(t *testing.T) {\n\tm := mint.Blend(t)\n\t\/\/ assert mint by using mint\n\tmint.Expect(t, m).TypeOf(\"*mint.Mint\")\n\tmint.Expect(t, m.Expect(\"foo\")).TypeOf(\"*mint.Testee\")\n}\n<|endoftext|>"} {"text":"package main\n\nimport \"fmt\"\n\nconst rowLength = 8\nconst colLength = 8\n\ntype game struct {\n\tboard [rowLength][colLength]int\n\tscore map[int]int\n\tturn int\n}\n\nvar theGame = game{\n\t[8][8]int{},\n\tmap[int]int{},\n\t1,\n}\n\nfunc setupGame() {\n\ttheGame.board[3][3] = 1\n\ttheGame.board[4][4] = 1\n\ttheGame.board[3][4] = 2\n\ttheGame.board[4][3] = 2\n\ttheGame.score[1] = 2\n\ttheGame.score[2] = 2\n\tprintGame(&theGame.board)\n\tfindPotentialMoves(theGame.board, 1)\n}\n\nfunc findPotentialMoves(board [8][8]int, p int) {\n\tfor rowIndex, row := range board {\n\t\tfor colIndex := range row {\n\t\t\tif board[rowIndex][colIndex] == p {\n\t\t\t\tcheckDirection(0, 1, rowIndex, colIndex, p, &board)\n\t\t\t\tcheckDirection(1, 1, rowIndex, colIndex, p, &board)\n\t\t\t\tcheckDirection(0, -1, rowIndex, colIndex, p, &board)\n\t\t\t\tcheckDirection(1, -1, rowIndex, colIndex, p, &board)\n\t\t\t\tcheckDirection(1, 0, rowIndex, colIndex, p, &board)\n\t\t\t\tcheckDirection(-1, -1, rowIndex, colIndex, p, &board)\n\t\t\t\tcheckDirection(-1, 0, rowIndex, colIndex, p, &board)\n\t\t\t\tcheckDirection(-1, 1, rowIndex, colIndex, p, &board)\n\t\t\t}\n\t\t}\n\t}\n\tprintGame(&board)\n}\n\nfunc checkDirection(offsetY int, offsetX int, originX int, originY int, p int, board *[8][8]int) {\n\tif moveInBounds(offsetX+originX, offsetY+originY) {\n\t\tpreviousTile := board[originX][originY]\n\t\ttile := board[originX+offsetX][originY+offsetY]\n\n\t\tif tile != p && tile != 0 && tile != 3 {\n\t\t\tcheckDirection(offsetY, offsetX, originX+offsetX, originY+offsetY, p, board)\n\t\t} else if previousTile != p && previousTile != 0 && previousTile != 3 && tile == 0 {\n\t\t\tboard[originX+offsetX][originY+offsetY] = 3\n\t\t}\n\t}\n}\n\nfunc movePiece(move moveData) bool {\n\tvar valid = false\n\n\tfor i := -1; i < 2; i++ {\n\t\tfor j := -1; j < 2; j++ {\n\t\t\tif i != 0 || j != 0 {\n\t\t\t\tmoveMaid := validateCheckDirection(i, j, move.Row, move.Col, move.Player)\n\t\t\t\tif moveMaid == true {\n\t\t\t\t\tvalid = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif valid {\n\t\tprintGame(&theGame.board)\n\t\tfindPotentialMoves(theGame.board, getOpposingPlayer(move.Player))\n\t\ttheGame.turn = getOpposingPlayer(theGame.turn)\n\t}\n\treturn valid\n}\n\nfunc validateCheckDirection(offsetY int, offsetX int, originX int, originY int, p int) bool {\n\tif moveInBounds(offsetX+originX, offsetY+originY) {\n\t\toP := getOpposingPlayer(p)\n\t\tpreviousTile := theGame.board[originX][originY]\n\t\ttile := theGame.board[originX+offsetX][originY+offsetY]\n\n\t\tif tile != p && tile != 0 {\n\t\t\tif validateCheckDirection(offsetY, offsetX, originX+offsetX, originY+offsetY, p) {\n\t\t\t\ttheGame.board[originX][originY] = p\n\t\t\t\ttheGame.score[p] = theGame.score[p] + 1\n\t\t\t\ttheGame.score[oP] = theGame.score[oP] - 1\n\t\t\t\treturn true\n\t\t\t}\n\t\t} else if previousTile != p && previousTile != 0 && tile == p {\n\t\t\ttheGame.board[originX][originY] = p\n\t\t\ttheGame.score[p] = theGame.score[p] + 1\n\t\t\treturn true\n\t\t}\n\n\t\treturn false\n\t}\n\treturn false\n}\n\nfunc getValueAt(move moveData) {\n\tif moveInBounds(move.Row, move.Col) {\n\t\tfmt.Println(move)\n\t\tfmt.Println(theGame.board[move.Row][move.Col])\n\t} else {\n\t\tfmt.Println(\"Move Out Of Bounds\")\n\t}\n}\n\nfunc getOpposingPlayer(p int) int {\n\tif p == 1 {\n\t\treturn 2\n\t}\n\treturn 1\n}\n\nfunc moveInBounds(x int, y int) bool {\n\treturn x > 0 && y > 0 && x < rowLength-1 && y < colLength-1\n}\n\nfunc printGame(board *[8][8]int) {\n\tfor _, row := range board {\n\t\tfmt.Println(row)\n\t}\n\tfmt.Printf(\"Player1: %v - Player2: %v\\n\", theGame.score[1], theGame.score[2])\n}\nFixed bug with turn not resetting on new gamepackage main\n\nimport \"fmt\"\n\nconst rowLength = 8\nconst colLength = 8\n\ntype game struct {\n\tboard [rowLength][colLength]int\n\tscore map[int]int\n\tturn int\n}\n\nvar theGame = game{\n\t[8][8]int{},\n\tmap[int]int{},\n\t1,\n}\n\nfunc setupGame() {\n\ttheGame.board[3][3] = 1\n\ttheGame.board[4][4] = 1\n\ttheGame.board[3][4] = 2\n\ttheGame.board[4][3] = 2\n\ttheGame.score[1] = 2\n\ttheGame.score[2] = 2\n\ttheGame.turn = 1\n\tprintGame(&theGame.board)\n\tfindPotentialMoves(theGame.board, 1)\n}\n\nfunc findPotentialMoves(board [8][8]int, p int) {\n\tfor rowIndex, row := range board {\n\t\tfor colIndex := range row {\n\t\t\tif board[rowIndex][colIndex] == p {\n\t\t\t\tcheckDirection(0, 1, rowIndex, colIndex, p, &board)\n\t\t\t\tcheckDirection(1, 1, rowIndex, colIndex, p, &board)\n\t\t\t\tcheckDirection(0, -1, rowIndex, colIndex, p, &board)\n\t\t\t\tcheckDirection(1, -1, rowIndex, colIndex, p, &board)\n\t\t\t\tcheckDirection(1, 0, rowIndex, colIndex, p, &board)\n\t\t\t\tcheckDirection(-1, -1, rowIndex, colIndex, p, &board)\n\t\t\t\tcheckDirection(-1, 0, rowIndex, colIndex, p, &board)\n\t\t\t\tcheckDirection(-1, 1, rowIndex, colIndex, p, &board)\n\t\t\t}\n\t\t}\n\t}\n\tprintGame(&board)\n}\n\nfunc checkDirection(offsetY int, offsetX int, originX int, originY int, p int, board *[8][8]int) {\n\tif moveInBounds(offsetX+originX, offsetY+originY) {\n\t\tpreviousTile := board[originX][originY]\n\t\ttile := board[originX+offsetX][originY+offsetY]\n\n\t\tif tile != p && tile != 0 && tile != 3 {\n\t\t\tcheckDirection(offsetY, offsetX, originX+offsetX, originY+offsetY, p, board)\n\t\t} else if previousTile != p && previousTile != 0 && previousTile != 3 && tile == 0 {\n\t\t\tboard[originX+offsetX][originY+offsetY] = 3\n\t\t}\n\t}\n}\n\nfunc movePiece(move moveData) bool {\n\tvar valid = false\n\n\tfor i := -1; i < 2; i++ {\n\t\tfor j := -1; j < 2; j++ {\n\t\t\tif i != 0 || j != 0 {\n\t\t\t\tmoveMaid := validateCheckDirection(i, j, move.Row, move.Col, move.Player)\n\t\t\t\tif moveMaid == true {\n\t\t\t\t\tvalid = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif valid {\n\t\tprintGame(&theGame.board)\n\t\tfindPotentialMoves(theGame.board, getOpposingPlayer(move.Player))\n\t\ttheGame.turn = getOpposingPlayer(theGame.turn)\n\t}\n\treturn valid\n}\n\nfunc validateCheckDirection(offsetY int, offsetX int, originX int, originY int, p int) bool {\n\tif moveInBounds(offsetX+originX, offsetY+originY) {\n\t\toP := getOpposingPlayer(p)\n\t\tpreviousTile := theGame.board[originX][originY]\n\t\ttile := theGame.board[originX+offsetX][originY+offsetY]\n\n\t\tif tile != p && tile != 0 {\n\t\t\tif validateCheckDirection(offsetY, offsetX, originX+offsetX, originY+offsetY, p) {\n\t\t\t\ttheGame.board[originX][originY] = p\n\t\t\t\ttheGame.score[p] = theGame.score[p] + 1\n\t\t\t\ttheGame.score[oP] = theGame.score[oP] - 1\n\t\t\t\treturn true\n\t\t\t}\n\t\t} else if previousTile != p && previousTile != 0 && tile == p {\n\t\t\ttheGame.board[originX][originY] = p\n\t\t\ttheGame.score[p] = theGame.score[p] + 1\n\t\t\treturn true\n\t\t}\n\n\t\treturn false\n\t}\n\treturn false\n}\n\nfunc getValueAt(move moveData) {\n\tif moveInBounds(move.Row, move.Col) {\n\t\tfmt.Println(move)\n\t\tfmt.Println(theGame.board[move.Row][move.Col])\n\t} else {\n\t\tfmt.Println(\"Move Out Of Bounds\")\n\t}\n}\n\nfunc getOpposingPlayer(p int) int {\n\tif p == 1 {\n\t\treturn 2\n\t}\n\treturn 1\n}\n\nfunc moveInBounds(x int, y int) bool {\n\treturn x > 0 && y > 0 && x < rowLength-1 && y < colLength-1\n}\n\nfunc printGame(board *[8][8]int) {\n\tfor _, row := range board {\n\t\tfmt.Println(row)\n\t}\n\tfmt.Printf(\"Player1: %v - Player2: %v\\n\", theGame.score[1], theGame.score[2])\n}\n<|endoftext|>"} {"text":"\/\/ unico - Send Google+ activities to other networks\n\/\/\n\/\/ Copyright 2011 The Unico Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage unico\n\nimport (\n\t\"http\"\n\t\"os\"\n\t\"facebooklib\"\n\tplus \"google-api-go-client.googlecode.com\/hg\/plus\/v1\"\n\t\"appengine\"\n\t\"appengine\/urlfetch\"\n\n\t\"gorilla.googlecode.com\/hg\/gorilla\/sessions\"\n)\n\nfunc fbHandler(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\tid := \"\" \/\/r.FormValue(\"id\")\n\n\tif session, err := sessions.Session(r, \"\", \"datastore\"); err == nil {\n\t\tid = session[\"userID\"].(string)\n\t}\n\n\tif id == \"\" {\n\t\tserveError(c, w, os.NewError(\"Missing ID Parameter\"))\n\t\treturn\n\t}\n\n\tfc := facebooklib.NewFacebookClient(appConfig.FacebookAppId, appConfig.FacebookAppSecret)\n\tfc.Transport = &urlfetch.Transport{Context: c}\n\n\tcode := r.FormValue(\"code\")\n\tif code == \"\" {\n\n\t\thttp.Redirect(w, r, fc.AuthURL(\"http:\/\/\"+appConfig.AppHost+\"\/fb?id=\"+id, \"offline_access,publish_stream\"), http.StatusFound)\n\t\treturn\n\t}\n\n\tfc.RequestAccessToken(code, \"http:\/\/\"+appConfig.AppHost+\"\/fb?id=\"+id)\n\tuser := loadUser(r, id)\n\tif user.Id == \"\" {\n\t\tserveError(c, w, os.NewError(\"Invalid user ID\"))\n\t\treturn\n\t}\n\n\tuser.FBAccessToken = fc.AccessToken\n\tfbuser, _ := fc.CurrentUser()\n\tuser.FBId = fbuser.Id\n\tuser.FBName = fbuser.Name\n\tsaveUser(r, &user)\n\n\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n\n}\n\nfunc publishActivityToFacebook(w http.ResponseWriter, r *http.Request, act *plus.Activity, user *User) {\n\tc := appengine.NewContext(r)\n\tfc := facebooklib.NewFacebookClient(appConfig.FacebookAppId, appConfig.FacebookAppSecret)\n\tfc.Transport = &urlfetch.Transport{Context: c}\n\tfc.AccessToken = user.FBAccessToken\n\n\t_ = w\n\n\tvar attachment *plus.ActivityObjectAttachments\n\tobj := act.Object\n\tkind := \"\"\n\tcontent := \"\"\n\n\tif act.Verb == \"share\" {\n\t\tcontent = act.Annotation\n\t\tif content == \"\" {\n\t\t\tcontent = \"Resharing \" + obj.Actor.DisplayName\n\t\t}\n\t\tkind = \"status_share\"\n\t} else {\n\t\tkind = \"status\"\n\t\tif obj != nil {\n\t\t\tif len(obj.Attachments) > 0 {\n\t\t\t\tattachment = obj.Attachments[0]\n\t\t\t\tkind = attachment.ObjectType\n\t\t\t}\n\t\t\tcontent = obj.Content\n\t\t} else {\n\t\t\tcontent = act.Title\n\t\t}\n\n\t}\n\tcontent = removeTags(content)\n\n\n\tvar err os.Error\n\n\tswitch kind {\n\tcase \"status\":\n\t\t\/\/ post a status update\n\t\terr = fc.PostStatus(content)\n\t\treturn\n\tcase \"article\":\n\t\t\/\/ post a link\n\t\terr = fc.PostLink(content, attachment.Url)\n\tdefault:\n\t\tif obj != nil {\n\t\t\terr = fc.PostLink(content, obj.Url)\n\t\t}\n\t}\n\n\tif err == facebooklib.ErrOAuth {\n\t\tuser.DisableFacebook()\n\t\tsaveUser(r, user)\n\t}\n\tc.Debugf(\"publishActivityToFacebook(%s): err=%v\\n\", kind, err)\n\n}\nNo longer post \"Resharing Someone\" to FB\/\/ unico - Send Google+ activities to other networks\n\/\/\n\/\/ Copyright 2011 The Unico Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage unico\n\nimport (\n\t\"http\"\n\t\"os\"\n\t\"facebooklib\"\n\tplus \"google-api-go-client.googlecode.com\/hg\/plus\/v1\"\n\t\"appengine\"\n\t\"appengine\/urlfetch\"\n\n\t\"gorilla.googlecode.com\/hg\/gorilla\/sessions\"\n)\n\nfunc fbHandler(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\tid := \"\" \/\/r.FormValue(\"id\")\n\n\tif session, err := sessions.Session(r, \"\", \"datastore\"); err == nil {\n\t\tid = session[\"userID\"].(string)\n\t}\n\n\tif id == \"\" {\n\t\tserveError(c, w, os.NewError(\"Missing ID Parameter\"))\n\t\treturn\n\t}\n\n\tfc := facebooklib.NewFacebookClient(appConfig.FacebookAppId, appConfig.FacebookAppSecret)\n\tfc.Transport = &urlfetch.Transport{Context: c}\n\n\tcode := r.FormValue(\"code\")\n\tif code == \"\" {\n\n\t\thttp.Redirect(w, r, fc.AuthURL(\"http:\/\/\"+appConfig.AppHost+\"\/fb?id=\"+id, \"offline_access,publish_stream\"), http.StatusFound)\n\t\treturn\n\t}\n\n\tfc.RequestAccessToken(code, \"http:\/\/\"+appConfig.AppHost+\"\/fb?id=\"+id)\n\tuser := loadUser(r, id)\n\tif user.Id == \"\" {\n\t\tserveError(c, w, os.NewError(\"Invalid user ID\"))\n\t\treturn\n\t}\n\n\tuser.FBAccessToken = fc.AccessToken\n\tfbuser, _ := fc.CurrentUser()\n\tuser.FBId = fbuser.Id\n\tuser.FBName = fbuser.Name\n\tsaveUser(r, &user)\n\n\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n\n}\n\nfunc publishActivityToFacebook(w http.ResponseWriter, r *http.Request, act *plus.Activity, user *User) {\n\tc := appengine.NewContext(r)\n\tfc := facebooklib.NewFacebookClient(appConfig.FacebookAppId, appConfig.FacebookAppSecret)\n\tfc.Transport = &urlfetch.Transport{Context: c}\n\tfc.AccessToken = user.FBAccessToken\n\n\t_ = w\n\n\tvar attachment *plus.ActivityObjectAttachments\n\tobj := act.Object\n\tkind := \"\"\n\tcontent := \"\"\n\n\tif act.Verb == \"share\" {\n\t\tcontent = act.Annotation\n\t\t\/\/if content == \"\" {\n\t\t\/\/\tcontent = \"Resharing \" + obj.Actor.DisplayName\n\t\t\/\/}\n\t\tkind = \"status_share\"\n\t} else {\n\t\tkind = \"status\"\n\t\tif obj != nil {\n\t\t\tif len(obj.Attachments) > 0 {\n\t\t\t\tattachment = obj.Attachments[0]\n\t\t\t\tkind = attachment.ObjectType\n\t\t\t}\n\t\t\tcontent = obj.Content\n\t\t} else {\n\t\t\tcontent = act.Title\n\t\t}\n\n\t}\n\tcontent = removeTags(content)\n\n\n\tvar err os.Error\n\n\tswitch kind {\n\tcase \"status\":\n\t\t\/\/ post a status update\n\t\terr = fc.PostStatus(content)\n\t\treturn\n\tcase \"article\":\n\t\t\/\/ post a link\n\t\terr = fc.PostLink(content, attachment.Url)\n\tdefault:\n\t\tif obj != nil {\n\t\t\terr = fc.PostLink(content, obj.Url)\n\t\t}\n\t}\n\n\tif err == facebooklib.ErrOAuth {\n\t\tuser.DisableFacebook()\n\t\tsaveUser(r, user)\n\t}\n\tc.Debugf(\"publishActivityToFacebook(%s): err=%v\\n\", kind, err)\n\n}\n<|endoftext|>"} {"text":"package api\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"time\"\n)\n\nconst (\n\t\/\/ JobTypeService indicates a long-running processes\n\tJobTypeService = \"service\"\n\n\t\/\/ JobTypeBatch indicates a short-lived process\n\tJobTypeBatch = \"batch\"\n)\n\nconst (\n\t\/\/ RegisterEnforceIndexErrPrefix is the prefix to use in errors caused by\n\t\/\/ enforcing the job modify index during registers.\n\tRegisterEnforceIndexErrPrefix = \"Enforcing job modify index\"\n)\n\n\/\/ Jobs is used to access the job-specific endpoints.\ntype Jobs struct {\n\tclient *Client\n}\n\n\/\/ Jobs returns a handle on the jobs endpoints.\nfunc (c *Client) Jobs() *Jobs {\n\treturn &Jobs{client: c}\n}\n\n\/\/ Register is used to register a new job. It returns the ID\n\/\/ of the evaluation, along with any errors encountered.\nfunc (j *Jobs) Register(job *Job, q *WriteOptions) (string, *WriteMeta, error) {\n\n\tvar resp registerJobResponse\n\n\treq := &RegisterJobRequest{Job: job}\n\twm, err := j.client.write(\"\/v1\/jobs\", req, &resp, q)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\treturn resp.EvalID, wm, nil\n}\n\n\/\/ EnforceRegister is used to register a job enforcing its job modify index.\nfunc (j *Jobs) EnforceRegister(job *Job, modifyIndex uint64, q *WriteOptions) (string, *WriteMeta, error) {\n\n\tvar resp registerJobResponse\n\n\treq := &RegisterJobRequest{\n\t\tJob: job,\n\t\tEnforceIndex: true,\n\t\tJobModifyIndex: modifyIndex,\n\t}\n\twm, err := j.client.write(\"\/v1\/jobs\", req, &resp, q)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\treturn resp.EvalID, wm, nil\n}\n\n\/\/ List is used to list all of the existing jobs.\nfunc (j *Jobs) List(q *QueryOptions) ([]*JobListStub, *QueryMeta, error) {\n\tvar resp []*JobListStub\n\tqm, err := j.client.query(\"\/v1\/jobs\", &resp, q)\n\tif err != nil {\n\t\treturn nil, qm, err\n\t}\n\tsort.Sort(JobIDSort(resp))\n\treturn resp, qm, nil\n}\n\n\/\/ PrefixList is used to list all existing jobs that match the prefix.\nfunc (j *Jobs) PrefixList(prefix string) ([]*JobListStub, *QueryMeta, error) {\n\treturn j.List(&QueryOptions{Prefix: prefix})\n}\n\n\/\/ Info is used to retrieve information about a particular\n\/\/ job given its unique ID.\nfunc (j *Jobs) Info(jobID string, q *QueryOptions) (*Job, *QueryMeta, error) {\n\tvar resp Job\n\tqm, err := j.client.query(\"\/v1\/job\/\"+jobID, &resp, q)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn &resp, qm, nil\n}\n\n\/\/ Allocations is used to return the allocs for a given job ID.\nfunc (j *Jobs) Allocations(jobID string, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) {\n\tvar resp []*AllocationListStub\n\tqm, err := j.client.query(\"\/v1\/job\/\"+jobID+\"\/allocations\", &resp, q)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tsort.Sort(AllocIndexSort(resp))\n\treturn resp, qm, nil\n}\n\n\/\/ Evaluations is used to query the evaluations associated with\n\/\/ the given job ID.\nfunc (j *Jobs) Evaluations(jobID string, q *QueryOptions) ([]*Evaluation, *QueryMeta, error) {\n\tvar resp []*Evaluation\n\tqm, err := j.client.query(\"\/v1\/job\/\"+jobID+\"\/evaluations\", &resp, q)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tsort.Sort(EvalIndexSort(resp))\n\treturn resp, qm, nil\n}\n\n\/\/ Deregister is used to remove an existing job.\nfunc (j *Jobs) Deregister(jobID string, q *WriteOptions) (string, *WriteMeta, error) {\n\tvar resp deregisterJobResponse\n\twm, err := j.client.delete(\"\/v1\/job\/\"+jobID, &resp, q)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\treturn resp.EvalID, wm, nil\n}\n\n\/\/ ForceEvaluate is used to force-evaluate an existing job.\nfunc (j *Jobs) ForceEvaluate(jobID string, q *WriteOptions) (string, *WriteMeta, error) {\n\tvar resp registerJobResponse\n\twm, err := j.client.write(\"\/v1\/job\/\"+jobID+\"\/evaluate\", nil, &resp, q)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\treturn resp.EvalID, wm, nil\n}\n\n\/\/ PeriodicForce spawns a new instance of the periodic job and returns the eval ID\nfunc (j *Jobs) PeriodicForce(jobID string, q *WriteOptions) (string, *WriteMeta, error) {\n\tvar resp periodicForceResponse\n\twm, err := j.client.write(\"\/v1\/job\/\"+jobID+\"\/periodic\/force\", nil, &resp, q)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\treturn resp.EvalID, wm, nil\n}\n\nfunc (j *Jobs) Plan(job *Job, diff bool, q *WriteOptions) (*JobPlanResponse, *WriteMeta, error) {\n\tif job == nil {\n\t\treturn nil, nil, fmt.Errorf(\"must pass non-nil job\")\n\t}\n\n\tvar resp JobPlanResponse\n\treq := &JobPlanRequest{\n\t\tJob: job,\n\t\tDiff: diff,\n\t}\n\twm, err := j.client.write(\"\/v1\/job\/\"+job.ID+\"\/plan\", req, &resp, q)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &resp, wm, nil\n}\n\nfunc (j *Jobs) Summary(jobID string, q *QueryOptions) (*JobSummary, *QueryMeta, error) {\n\tvar resp JobSummary\n\tqm, err := j.client.query(\"\/v1\/job\/\"+jobID+\"\/summary\", &resp, q)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn &resp, qm, nil\n}\n\n\/\/ periodicForceResponse is used to deserialize a force response\ntype periodicForceResponse struct {\n\tEvalID string\n}\n\n\/\/ UpdateStrategy is for serializing update strategy for a job.\ntype UpdateStrategy struct {\n\tStagger time.Duration\n\tMaxParallel int\n}\n\n\/\/ PeriodicConfig is for serializing periodic config for a job.\ntype PeriodicConfig struct {\n\tEnabled bool\n\tSpec string\n\tSpecType string\n\tProhibitOverlap bool\n}\n\n\/\/ Job is used to serialize a job.\ntype Job struct {\n\tRegion string\n\tID string\n\tName string\n\tType string\n\tPriority int\n\tAllAtOnce bool\n\tDatacenters []string\n\tConstraints []*Constraint\n\tTaskGroups []*TaskGroup\n\tUpdate *UpdateStrategy\n\tPeriodic *PeriodicConfig\n\tMeta map[string]string\n\tVaultToken string\n\tStatus string\n\tStatusDescription string\n\tCreateIndex uint64\n\tModifyIndex uint64\n\tJobModifyIndex uint64\n}\n\n\/\/ JobSummary summarizes the state of the allocations of a job\ntype JobSummary struct {\n\tJobID string\n\tSummary map[string]TaskGroupSummary\n\n\t\/\/ Raft Indexes\n\tCreateIndex uint64\n\tModifyIndex uint64\n}\n\n\/\/ TaskGroup summarizes the state of all the allocations of a particular\n\/\/ TaskGroup\ntype TaskGroupSummary struct {\n\tQueued int\n\tComplete int\n\tFailed int\n\tRunning int\n\tStarting int\n\tLost int\n}\n\n\/\/ JobListStub is used to return a subset of information about\n\/\/ jobs during list operations.\ntype JobListStub struct {\n\tID string\n\tParentID string\n\tName string\n\tType string\n\tPriority int\n\tStatus string\n\tStatusDescription string\n\tJobSummary *JobSummary\n\tCreateIndex uint64\n\tModifyIndex uint64\n\tJobModifyIndex uint64\n}\n\n\/\/ JobIDSort is used to sort jobs by their job ID's.\ntype JobIDSort []*JobListStub\n\nfunc (j JobIDSort) Len() int {\n\treturn len(j)\n}\n\nfunc (j JobIDSort) Less(a, b int) bool {\n\treturn j[a].ID < j[b].ID\n}\n\nfunc (j JobIDSort) Swap(a, b int) {\n\tj[a], j[b] = j[b], j[a]\n}\n\n\/\/ NewServiceJob creates and returns a new service-style job\n\/\/ for long-lived processes using the provided name, ID, and\n\/\/ relative job priority.\nfunc NewServiceJob(id, name, region string, pri int) *Job {\n\treturn newJob(id, name, region, JobTypeService, pri)\n}\n\n\/\/ NewBatchJob creates and returns a new batch-style job for\n\/\/ short-lived processes using the provided name and ID along\n\/\/ with the relative job priority.\nfunc NewBatchJob(id, name, region string, pri int) *Job {\n\treturn newJob(id, name, region, JobTypeBatch, pri)\n}\n\n\/\/ newJob is used to create a new Job struct.\nfunc newJob(id, name, region, typ string, pri int) *Job {\n\treturn &Job{\n\t\tRegion: region,\n\t\tID: id,\n\t\tName: name,\n\t\tType: typ,\n\t\tPriority: pri,\n\t}\n}\n\n\/\/ SetMeta is used to set arbitrary k\/v pairs of metadata on a job.\nfunc (j *Job) SetMeta(key, val string) *Job {\n\tif j.Meta == nil {\n\t\tj.Meta = make(map[string]string)\n\t}\n\tj.Meta[key] = val\n\treturn j\n}\n\n\/\/ AddDatacenter is used to add a datacenter to a job.\nfunc (j *Job) AddDatacenter(dc string) *Job {\n\tj.Datacenters = append(j.Datacenters, dc)\n\treturn j\n}\n\n\/\/ Constrain is used to add a constraint to a job.\nfunc (j *Job) Constrain(c *Constraint) *Job {\n\tj.Constraints = append(j.Constraints, c)\n\treturn j\n}\n\n\/\/ AddTaskGroup adds a task group to an existing job.\nfunc (j *Job) AddTaskGroup(grp *TaskGroup) *Job {\n\tj.TaskGroups = append(j.TaskGroups, grp)\n\treturn j\n}\n\n\/\/ AddPeriodicConfig adds a periodic config to an existing job.\nfunc (j *Job) AddPeriodicConfig(cfg *PeriodicConfig) *Job {\n\tj.Periodic = cfg\n\treturn j\n}\n\n\/\/ RegisterJobRequest is used to serialize a job registration\ntype RegisterJobRequest struct {\n\tJob *Job\n\tEnforceIndex bool `json:\",omitempty\"`\n\tJobModifyIndex uint64 `json:\",omitempty\"`\n}\n\n\/\/ registerJobResponse is used to deserialize a job response\ntype registerJobResponse struct {\n\tEvalID string\n}\n\n\/\/ deregisterJobResponse is used to decode a deregister response\ntype deregisterJobResponse struct {\n\tEvalID string\n}\n\ntype JobPlanRequest struct {\n\tJob *Job\n\tDiff bool\n}\n\ntype JobPlanResponse struct {\n\tJobModifyIndex uint64\n\tCreatedEvals []*Evaluation\n\tDiff *JobDiff\n\tAnnotations *PlanAnnotations\n\tFailedTGAllocs map[string]*AllocationMetric\n\tNextPeriodicLaunch time.Time\n}\n\ntype JobDiff struct {\n\tType string\n\tID string\n\tFields []*FieldDiff\n\tObjects []*ObjectDiff\n\tTaskGroups []*TaskGroupDiff\n}\n\ntype TaskGroupDiff struct {\n\tType string\n\tName string\n\tFields []*FieldDiff\n\tObjects []*ObjectDiff\n\tTasks []*TaskDiff\n\tUpdates map[string]uint64\n}\n\ntype TaskDiff struct {\n\tType string\n\tName string\n\tFields []*FieldDiff\n\tObjects []*ObjectDiff\n\tAnnotations []string\n}\n\ntype FieldDiff struct {\n\tType string\n\tName string\n\tOld, New string\n\tAnnotations []string\n}\n\ntype ObjectDiff struct {\n\tType string\n\tName string\n\tFields []*FieldDiff\n\tObjects []*ObjectDiff\n}\n\ntype PlanAnnotations struct {\n\tDesiredTGUpdates map[string]*DesiredUpdates\n}\n\ntype DesiredUpdates struct {\n\tIgnore uint64\n\tPlace uint64\n\tMigrate uint64\n\tStop uint64\n\tInPlaceUpdate uint64\n\tDestructiveUpdate uint64\n}\nInclude parent ID in job info returned by Go APIpackage api\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"time\"\n)\n\nconst (\n\t\/\/ JobTypeService indicates a long-running processes\n\tJobTypeService = \"service\"\n\n\t\/\/ JobTypeBatch indicates a short-lived process\n\tJobTypeBatch = \"batch\"\n)\n\nconst (\n\t\/\/ RegisterEnforceIndexErrPrefix is the prefix to use in errors caused by\n\t\/\/ enforcing the job modify index during registers.\n\tRegisterEnforceIndexErrPrefix = \"Enforcing job modify index\"\n)\n\n\/\/ Jobs is used to access the job-specific endpoints.\ntype Jobs struct {\n\tclient *Client\n}\n\n\/\/ Jobs returns a handle on the jobs endpoints.\nfunc (c *Client) Jobs() *Jobs {\n\treturn &Jobs{client: c}\n}\n\n\/\/ Register is used to register a new job. It returns the ID\n\/\/ of the evaluation, along with any errors encountered.\nfunc (j *Jobs) Register(job *Job, q *WriteOptions) (string, *WriteMeta, error) {\n\n\tvar resp registerJobResponse\n\n\treq := &RegisterJobRequest{Job: job}\n\twm, err := j.client.write(\"\/v1\/jobs\", req, &resp, q)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\treturn resp.EvalID, wm, nil\n}\n\n\/\/ EnforceRegister is used to register a job enforcing its job modify index.\nfunc (j *Jobs) EnforceRegister(job *Job, modifyIndex uint64, q *WriteOptions) (string, *WriteMeta, error) {\n\n\tvar resp registerJobResponse\n\n\treq := &RegisterJobRequest{\n\t\tJob: job,\n\t\tEnforceIndex: true,\n\t\tJobModifyIndex: modifyIndex,\n\t}\n\twm, err := j.client.write(\"\/v1\/jobs\", req, &resp, q)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\treturn resp.EvalID, wm, nil\n}\n\n\/\/ List is used to list all of the existing jobs.\nfunc (j *Jobs) List(q *QueryOptions) ([]*JobListStub, *QueryMeta, error) {\n\tvar resp []*JobListStub\n\tqm, err := j.client.query(\"\/v1\/jobs\", &resp, q)\n\tif err != nil {\n\t\treturn nil, qm, err\n\t}\n\tsort.Sort(JobIDSort(resp))\n\treturn resp, qm, nil\n}\n\n\/\/ PrefixList is used to list all existing jobs that match the prefix.\nfunc (j *Jobs) PrefixList(prefix string) ([]*JobListStub, *QueryMeta, error) {\n\treturn j.List(&QueryOptions{Prefix: prefix})\n}\n\n\/\/ Info is used to retrieve information about a particular\n\/\/ job given its unique ID.\nfunc (j *Jobs) Info(jobID string, q *QueryOptions) (*Job, *QueryMeta, error) {\n\tvar resp Job\n\tqm, err := j.client.query(\"\/v1\/job\/\"+jobID, &resp, q)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn &resp, qm, nil\n}\n\n\/\/ Allocations is used to return the allocs for a given job ID.\nfunc (j *Jobs) Allocations(jobID string, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) {\n\tvar resp []*AllocationListStub\n\tqm, err := j.client.query(\"\/v1\/job\/\"+jobID+\"\/allocations\", &resp, q)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tsort.Sort(AllocIndexSort(resp))\n\treturn resp, qm, nil\n}\n\n\/\/ Evaluations is used to query the evaluations associated with\n\/\/ the given job ID.\nfunc (j *Jobs) Evaluations(jobID string, q *QueryOptions) ([]*Evaluation, *QueryMeta, error) {\n\tvar resp []*Evaluation\n\tqm, err := j.client.query(\"\/v1\/job\/\"+jobID+\"\/evaluations\", &resp, q)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tsort.Sort(EvalIndexSort(resp))\n\treturn resp, qm, nil\n}\n\n\/\/ Deregister is used to remove an existing job.\nfunc (j *Jobs) Deregister(jobID string, q *WriteOptions) (string, *WriteMeta, error) {\n\tvar resp deregisterJobResponse\n\twm, err := j.client.delete(\"\/v1\/job\/\"+jobID, &resp, q)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\treturn resp.EvalID, wm, nil\n}\n\n\/\/ ForceEvaluate is used to force-evaluate an existing job.\nfunc (j *Jobs) ForceEvaluate(jobID string, q *WriteOptions) (string, *WriteMeta, error) {\n\tvar resp registerJobResponse\n\twm, err := j.client.write(\"\/v1\/job\/\"+jobID+\"\/evaluate\", nil, &resp, q)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\treturn resp.EvalID, wm, nil\n}\n\n\/\/ PeriodicForce spawns a new instance of the periodic job and returns the eval ID\nfunc (j *Jobs) PeriodicForce(jobID string, q *WriteOptions) (string, *WriteMeta, error) {\n\tvar resp periodicForceResponse\n\twm, err := j.client.write(\"\/v1\/job\/\"+jobID+\"\/periodic\/force\", nil, &resp, q)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\treturn resp.EvalID, wm, nil\n}\n\nfunc (j *Jobs) Plan(job *Job, diff bool, q *WriteOptions) (*JobPlanResponse, *WriteMeta, error) {\n\tif job == nil {\n\t\treturn nil, nil, fmt.Errorf(\"must pass non-nil job\")\n\t}\n\n\tvar resp JobPlanResponse\n\treq := &JobPlanRequest{\n\t\tJob: job,\n\t\tDiff: diff,\n\t}\n\twm, err := j.client.write(\"\/v1\/job\/\"+job.ID+\"\/plan\", req, &resp, q)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &resp, wm, nil\n}\n\nfunc (j *Jobs) Summary(jobID string, q *QueryOptions) (*JobSummary, *QueryMeta, error) {\n\tvar resp JobSummary\n\tqm, err := j.client.query(\"\/v1\/job\/\"+jobID+\"\/summary\", &resp, q)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn &resp, qm, nil\n}\n\n\/\/ periodicForceResponse is used to deserialize a force response\ntype periodicForceResponse struct {\n\tEvalID string\n}\n\n\/\/ UpdateStrategy is for serializing update strategy for a job.\ntype UpdateStrategy struct {\n\tStagger time.Duration\n\tMaxParallel int\n}\n\n\/\/ PeriodicConfig is for serializing periodic config for a job.\ntype PeriodicConfig struct {\n\tEnabled bool\n\tSpec string\n\tSpecType string\n\tProhibitOverlap bool\n}\n\n\/\/ Job is used to serialize a job.\ntype Job struct {\n\tRegion string\n\tID string\n\tParentID string\n\tName string\n\tType string\n\tPriority int\n\tAllAtOnce bool\n\tDatacenters []string\n\tConstraints []*Constraint\n\tTaskGroups []*TaskGroup\n\tUpdate *UpdateStrategy\n\tPeriodic *PeriodicConfig\n\tMeta map[string]string\n\tVaultToken string\n\tStatus string\n\tStatusDescription string\n\tCreateIndex uint64\n\tModifyIndex uint64\n\tJobModifyIndex uint64\n}\n\n\/\/ JobSummary summarizes the state of the allocations of a job\ntype JobSummary struct {\n\tJobID string\n\tSummary map[string]TaskGroupSummary\n\n\t\/\/ Raft Indexes\n\tCreateIndex uint64\n\tModifyIndex uint64\n}\n\n\/\/ TaskGroup summarizes the state of all the allocations of a particular\n\/\/ TaskGroup\ntype TaskGroupSummary struct {\n\tQueued int\n\tComplete int\n\tFailed int\n\tRunning int\n\tStarting int\n\tLost int\n}\n\n\/\/ JobListStub is used to return a subset of information about\n\/\/ jobs during list operations.\ntype JobListStub struct {\n\tID string\n\tParentID string\n\tName string\n\tType string\n\tPriority int\n\tStatus string\n\tStatusDescription string\n\tJobSummary *JobSummary\n\tCreateIndex uint64\n\tModifyIndex uint64\n\tJobModifyIndex uint64\n}\n\n\/\/ JobIDSort is used to sort jobs by their job ID's.\ntype JobIDSort []*JobListStub\n\nfunc (j JobIDSort) Len() int {\n\treturn len(j)\n}\n\nfunc (j JobIDSort) Less(a, b int) bool {\n\treturn j[a].ID < j[b].ID\n}\n\nfunc (j JobIDSort) Swap(a, b int) {\n\tj[a], j[b] = j[b], j[a]\n}\n\n\/\/ NewServiceJob creates and returns a new service-style job\n\/\/ for long-lived processes using the provided name, ID, and\n\/\/ relative job priority.\nfunc NewServiceJob(id, name, region string, pri int) *Job {\n\treturn newJob(id, name, region, JobTypeService, pri)\n}\n\n\/\/ NewBatchJob creates and returns a new batch-style job for\n\/\/ short-lived processes using the provided name and ID along\n\/\/ with the relative job priority.\nfunc NewBatchJob(id, name, region string, pri int) *Job {\n\treturn newJob(id, name, region, JobTypeBatch, pri)\n}\n\n\/\/ newJob is used to create a new Job struct.\nfunc newJob(id, name, region, typ string, pri int) *Job {\n\treturn &Job{\n\t\tRegion: region,\n\t\tID: id,\n\t\tName: name,\n\t\tType: typ,\n\t\tPriority: pri,\n\t}\n}\n\n\/\/ SetMeta is used to set arbitrary k\/v pairs of metadata on a job.\nfunc (j *Job) SetMeta(key, val string) *Job {\n\tif j.Meta == nil {\n\t\tj.Meta = make(map[string]string)\n\t}\n\tj.Meta[key] = val\n\treturn j\n}\n\n\/\/ AddDatacenter is used to add a datacenter to a job.\nfunc (j *Job) AddDatacenter(dc string) *Job {\n\tj.Datacenters = append(j.Datacenters, dc)\n\treturn j\n}\n\n\/\/ Constrain is used to add a constraint to a job.\nfunc (j *Job) Constrain(c *Constraint) *Job {\n\tj.Constraints = append(j.Constraints, c)\n\treturn j\n}\n\n\/\/ AddTaskGroup adds a task group to an existing job.\nfunc (j *Job) AddTaskGroup(grp *TaskGroup) *Job {\n\tj.TaskGroups = append(j.TaskGroups, grp)\n\treturn j\n}\n\n\/\/ AddPeriodicConfig adds a periodic config to an existing job.\nfunc (j *Job) AddPeriodicConfig(cfg *PeriodicConfig) *Job {\n\tj.Periodic = cfg\n\treturn j\n}\n\n\/\/ RegisterJobRequest is used to serialize a job registration\ntype RegisterJobRequest struct {\n\tJob *Job\n\tEnforceIndex bool `json:\",omitempty\"`\n\tJobModifyIndex uint64 `json:\",omitempty\"`\n}\n\n\/\/ registerJobResponse is used to deserialize a job response\ntype registerJobResponse struct {\n\tEvalID string\n}\n\n\/\/ deregisterJobResponse is used to decode a deregister response\ntype deregisterJobResponse struct {\n\tEvalID string\n}\n\ntype JobPlanRequest struct {\n\tJob *Job\n\tDiff bool\n}\n\ntype JobPlanResponse struct {\n\tJobModifyIndex uint64\n\tCreatedEvals []*Evaluation\n\tDiff *JobDiff\n\tAnnotations *PlanAnnotations\n\tFailedTGAllocs map[string]*AllocationMetric\n\tNextPeriodicLaunch time.Time\n}\n\ntype JobDiff struct {\n\tType string\n\tID string\n\tFields []*FieldDiff\n\tObjects []*ObjectDiff\n\tTaskGroups []*TaskGroupDiff\n}\n\ntype TaskGroupDiff struct {\n\tType string\n\tName string\n\tFields []*FieldDiff\n\tObjects []*ObjectDiff\n\tTasks []*TaskDiff\n\tUpdates map[string]uint64\n}\n\ntype TaskDiff struct {\n\tType string\n\tName string\n\tFields []*FieldDiff\n\tObjects []*ObjectDiff\n\tAnnotations []string\n}\n\ntype FieldDiff struct {\n\tType string\n\tName string\n\tOld, New string\n\tAnnotations []string\n}\n\ntype ObjectDiff struct {\n\tType string\n\tName string\n\tFields []*FieldDiff\n\tObjects []*ObjectDiff\n}\n\ntype PlanAnnotations struct {\n\tDesiredTGUpdates map[string]*DesiredUpdates\n}\n\ntype DesiredUpdates struct {\n\tIgnore uint64\n\tPlace uint64\n\tMigrate uint64\n\tStop uint64\n\tInPlaceUpdate uint64\n\tDestructiveUpdate uint64\n}\n<|endoftext|>"} {"text":"package Mongo\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gopkg.in\/mgo.v2\"\n)\n\n\/\/ InitCollectionFromDatabase - initialize collection from mgo database\nfunc InitCollectionFromDatabase(db *mgo.Database, collectionName string) *mgo.Collection {\n\treturn db.C(collectionName)\n}\n\n\/\/ InitCollectionFromSession - initialize collection from mgo session\nfunc InitCollectionFromSession(session *mgo.Session, databaseName string, collectionName string) *mgo.Collection {\n\tdb := session.DB(databaseName)\n\treturn db.C(collectionName)\n}\n\n\/\/ InitCollectionFromConnectionString - initialize collection from a connection string\nfunc InitCollectionFromConnectionString(connectionString string, databaseName string, collectionName string) *mgo.Collection {\n\tdb, err := InitDatabaseFromConnection(connectionString, databaseName)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn db.C(collectionName)\n}\n\n\/\/ InitDatabaseFromConnection sets session informaiton from a connection string\nfunc InitDatabaseFromConnection(connectionString string, databaseName string) (*mgo.Database, error) {\n\tsession, err := InitSessionFromConnectionString(connectionString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn session.DB(databaseName), nil\n}\n\n\/\/ InitDatabaseFromSession sets session informaiton from a connection string\nfunc InitDatabaseFromSession(session *mgo.Session, databaseName string) (*mgo.Database, error) {\n\treturn session.DB(databaseName), nil\n}\n\n\/\/ InitSessionFromConnectionString get the session information for a conneciton\nfunc InitSessionFromConnectionString(connectionString string) (*mgo.Session, error) {\n\tdialInformation, sessionMode, err := GetDialInformation(connectionString)\n\tif err != nil {\n\t\tfmt.Println(\"error getting dial information\", err)\n\t\tpanic(err)\n\t}\n\treturn InitSessionFromDialInfo(dialInformation, sessionMode)\n}\n\n\/\/ InitSessionFromDialInfo get the session information for a conneciton\nfunc InitSessionFromDialInfo(dialInfo *mgo.DialInfo, sessionMode mgo.Mode) (*mgo.Session, error) {\n\tsession, err1 := mgo.DialWithInfo(dialInfo)\n\tif err1 != nil {\n\t\tpanic(err1)\n\t}\n\n\t\/\/ Optional. Switch the session to a monotonic behavior.\n\tsession.SetMode(sessionMode, true)\n\treturn session, nil\n}\n\n\/\/ GetDialInformation get the dial information\nfunc GetDialInformation(connectionString string) (*mgo.DialInfo, mgo.Mode, error) {\n\tinfo := &mgo.DialInfo{\n\t\tAddrs: []string{},\n\t\tTimeout: 60 * time.Second,\n\t\tDatabase: \"\",\n\t\tUsername: \"\",\n\t\tPassword: \"\",\n\t}\n\n\tresult := getConnectionStringItems(connectionString)\n\t\/\/fmt.Println(len(result))\n\tif len(result) < 2 {\n\t\treturn info, 2, errors.New(\"could not parse connecitonString\")\n\t}\n\n\thost := strings.Split(result[0], \",\")\n\tdatabaseName := result[1]\n\tinfo.Addrs = host\n\tinfo.Database = databaseName\n\tsessionMode := mgo.Primary\n\t\/\/ get options\n\tif len(result) == 3 {\n\t\tresultOptions := getOptionStringItems(result[2])\n\t\tfor i := range resultOptions {\n\t\t\tswitch {\n\t\t\tcase strings.Contains(resultOptions[i], \"replicaSet\"):\n\t\t\t\toptionValue := getValueFromPairString(resultOptions[i])\n\t\t\t\tif optionValue != \"\" {\n\t\t\t\t\tinfo.ReplicaSetName = optionValue\n\t\t\t\t}\n\t\t\tcase strings.Contains(resultOptions[i], \"maxPoolSize\"):\n\t\t\t\toptionValue := getValueFromPairInt(resultOptions[i])\n\t\t\t\tinfo.PoolLimit = optionValue\n\t\t\tcase strings.Contains(resultOptions[i], \"readPreference\"):\n\t\t\t\toptionValue := getValueFromPairString(resultOptions[i])\n\t\t\t\tsessionMode = getReadPreference(optionValue)\n\n\t\t\t} \/\/ end switch\n\n\t\t} \/\/ end for loop\n\t}\n\treturn info, sessionMode, nil\n}\n\nfunc getConnectionStringItems(connectionString string) []string {\n\tconnectionString = strings.Replace(connectionString, \"mongodb:\/\/\", \"\", 1)\n\treturn strings.Split(connectionString, \"\/\")\n}\n\nfunc getOptionStringItems(optionString string) []string {\n\toptionString = strings.Replace(optionString, \"?\", \"\", 1)\n\treturn strings.Split(optionString, \";\")\n}\n\nfunc getValueFromPairString(optionString string) string {\n\tkeyValue := strings.Split(optionString, \"=\")\n\tif len(keyValue) == 2 {\n\t\treturn keyValue[1]\n\t}\n\treturn \"\"\n}\n\nfunc getValueFromPairInt(optionString string) int {\n\tkeyValue := strings.Split(optionString, \"=\")\n\tif len(keyValue) == 2 {\n\t\tvalue, err := strconv.Atoi(keyValue[1])\n\t\tif err != nil {\n\t\t\treturn 100\n\t\t}\n\n\t\treturn value\n\t}\n\treturn 100\n}\n\nfunc getReadPreference(optionString string) mgo.Mode {\n\t\/\/ Primary Mode = 2 \/\/ Default mode. All operations read from the current replica set primary.\n\t\/\/ PrimaryPreferred Mode = 3 \/\/ Read from the primary if available. Read from the secondary otherwise.\n\t\/\/ Secondary Mode = 4 \/\/ Read from one of the nearest secondary members of the replica set.\n\t\/\/ SecondaryPreferred Mode = 5 \/\/ Read from one of the nearest secondaries if available. Read from primary otherwise.\n\t\/\/ Nearest Mode = 6 \/\/ Read from one of the nearest members, irrespective of it being primary or secondary.\n\t\/\/\n\t\/\/ \/\/ Read preference modes are specific to mgo:\n\t\/\/ Eventual Mode = 0 \/\/ Same as Nearest, but may change servers between reads.\n\t\/\/ Monotonic Mode = 1 \/\/ Same as SecondaryPreferred before first write. Same as Primary after first write.\n\t\/\/ Strong Mode = 2 \/\/ Same as Primary.\n\tswitch optionString {\n\tcase \"Primary\":\n\t\treturn mgo.Primary\n\n\t}\n\treturn mgo.Primary\n}\nupdated go repopackage Mongo\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gopkg.in\/mgo.v2\"\n)\n\n\/\/ InitCollectionFromDatabase - initialize collection from mgo database\nfunc InitCollectionFromDatabase(db *mgo.Database, collectionName string) *mgo.Collection {\n\treturn db.C(collectionName)\n}\n\n\/\/ InitCollectionFromSession - initialize collection from mgo session\nfunc InitCollectionFromSession(session *mgo.Session, databaseName string, collectionName string) *mgo.Collection {\n\tdb := session.DB(databaseName)\n\treturn db.C(collectionName)\n}\n\n\/\/ InitCollectionFromConnectionString - initialize collection from a connection string and passin database\nfunc InitCollectionFromConnectionString(connectionString string, databaseName string, collectionName string) *mgo.Collection {\n\tdb, err := InitDatabaseFromConnection(connectionString, databaseName)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn db.C(collectionName)\n}\n\n\/\/ InitCollectionAndDatabaseFromConnectionString - initialize collection from a connection string\nfunc InitCollectionAndDatabaseFromConnectionString(connectionString string, collectionName string) *mgo.Collection {\n\tdialInformation, _, err := GetDialInformation(connectionString)\n\tif err != nil {\n\t\tfmt.Println(\"error getting dial information\", err)\n\t\tpanic(err)\n\t}\n\n\tdb, dbErr := InitDatabaseFromConnection(connectionString, dialInformation.Database)\n\tif dbErr != nil {\n\t\tpanic(dbErr)\n\t}\n\treturn db.C(collectionName)\n}\n\n\/\/ InitDatabaseFromConnection sets session informaiton from a connection string\nfunc InitDatabaseFromConnection(connectionString string, databaseName string) (*mgo.Database, error) {\n\tsession, err := InitSessionFromConnectionString(connectionString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn session.DB(databaseName), nil\n}\n\n\/\/ InitDatabaseFromSession sets session informaiton from a connection string\nfunc InitDatabaseFromSession(session *mgo.Session, databaseName string) (*mgo.Database, error) {\n\treturn session.DB(databaseName), nil\n}\n\n\/\/ InitSessionFromConnectionString get the session information for a conneciton\nfunc InitSessionFromConnectionString(connectionString string) (*mgo.Session, error) {\n\tdialInformation, sessionMode, err := GetDialInformation(connectionString)\n\tif err != nil {\n\t\tfmt.Println(\"error getting dial information\", err)\n\t\tpanic(err)\n\t}\n\treturn InitSessionFromDialInfo(dialInformation, sessionMode)\n}\n\n\/\/ InitSessionFromDialInfo get the session information for a conneciton\nfunc InitSessionFromDialInfo(dialInfo *mgo.DialInfo, sessionMode mgo.Mode) (*mgo.Session, error) {\n\tsession, err1 := mgo.DialWithInfo(dialInfo)\n\tif err1 != nil {\n\t\tpanic(err1)\n\t}\n\n\t\/\/ Optional. Switch the session to a monotonic behavior.\n\tsession.SetMode(sessionMode, true)\n\treturn session, nil\n}\n\n\/\/ GetDialInformation get the dial information\nfunc GetDialInformation(connectionString string) (*mgo.DialInfo, mgo.Mode, error) {\n\tinfo := &mgo.DialInfo{\n\t\tAddrs: []string{},\n\t\tTimeout: 60 * time.Second,\n\t\tDatabase: \"\",\n\t\tUsername: \"\",\n\t\tPassword: \"\",\n\t}\n\n\tresult := getConnectionStringItems(connectionString)\n\t\/\/fmt.Println(len(result))\n\tif len(result) < 2 {\n\t\treturn info, 2, errors.New(\"could not parse connecitonString\")\n\t}\n\n\thost := strings.Split(result[0], \",\")\n\tdatabaseName := result[1]\n\tinfo.Addrs = host\n\tinfo.Database = databaseName\n\tsessionMode := mgo.Primary\n\t\/\/ get options\n\tif len(result) == 3 {\n\t\tresultOptions := getOptionStringItems(result[2])\n\t\tfor i := range resultOptions {\n\t\t\tswitch {\n\t\t\tcase strings.Contains(resultOptions[i], \"replicaSet\"):\n\t\t\t\toptionValue := getValueFromPairString(resultOptions[i])\n\t\t\t\tif optionValue != \"\" {\n\t\t\t\t\tinfo.ReplicaSetName = optionValue\n\t\t\t\t}\n\t\t\tcase strings.Contains(resultOptions[i], \"maxPoolSize\"):\n\t\t\t\toptionValue := getValueFromPairInt(resultOptions[i])\n\t\t\t\tinfo.PoolLimit = optionValue\n\t\t\tcase strings.Contains(resultOptions[i], \"readPreference\"):\n\t\t\t\toptionValue := getValueFromPairString(resultOptions[i])\n\t\t\t\tsessionMode = getReadPreference(optionValue)\n\n\t\t\t} \/\/ end switch\n\n\t\t} \/\/ end for loop\n\t}\n\treturn info, sessionMode, nil\n}\n\nfunc getConnectionStringItems(connectionString string) []string {\n\tconnectionString = strings.Replace(connectionString, \"mongodb:\/\/\", \"\", 1)\n\treturn strings.Split(connectionString, \"\/\")\n}\n\nfunc getOptionStringItems(optionString string) []string {\n\toptionString = strings.Replace(optionString, \"?\", \"\", 1)\n\treturn strings.Split(optionString, \";\")\n}\n\nfunc getValueFromPairString(optionString string) string {\n\tkeyValue := strings.Split(optionString, \"=\")\n\tif len(keyValue) == 2 {\n\t\treturn keyValue[1]\n\t}\n\treturn \"\"\n}\n\nfunc getValueFromPairInt(optionString string) int {\n\tkeyValue := strings.Split(optionString, \"=\")\n\tif len(keyValue) == 2 {\n\t\tvalue, err := strconv.Atoi(keyValue[1])\n\t\tif err != nil {\n\t\t\treturn 100\n\t\t}\n\n\t\treturn value\n\t}\n\treturn 100\n}\n\nfunc getReadPreference(optionString string) mgo.Mode {\n\t\/\/ Primary Mode = 2 \/\/ Default mode. All operations read from the current replica set primary.\n\t\/\/ PrimaryPreferred Mode = 3 \/\/ Read from the primary if available. Read from the secondary otherwise.\n\t\/\/ Secondary Mode = 4 \/\/ Read from one of the nearest secondary members of the replica set.\n\t\/\/ SecondaryPreferred Mode = 5 \/\/ Read from one of the nearest secondaries if available. Read from primary otherwise.\n\t\/\/ Nearest Mode = 6 \/\/ Read from one of the nearest members, irrespective of it being primary or secondary.\n\t\/\/\n\t\/\/ \/\/ Read preference modes are specific to mgo:\n\t\/\/ Eventual Mode = 0 \/\/ Same as Nearest, but may change servers between reads.\n\t\/\/ Monotonic Mode = 1 \/\/ Same as SecondaryPreferred before first write. Same as Primary after first write.\n\t\/\/ Strong Mode = 2 \/\/ Same as Primary.\n\tswitch optionString {\n\tcase \"Primary\":\n\t\treturn mgo.Primary\n\n\t}\n\treturn mgo.Primary\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"github.com\/jacobsa\/aws\/exp\/sdb\"\n\t. \"github.com\/jacobsa\/ogletest\"\n\t\"sync\"\n)\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype integrationTest struct {\n\tdb sdb.SimpleDB\n\n\tmutex sync.Mutex\n\tdomainsToDelete []sdb.Domain \/\/ Protected by mutex\n}\n\nfunc (t *integrationTest) SetUp(i *TestInfo) {\n\tvar err error\n\n\t\/\/ Open a connection.\n\tt.db, err = sdb.NewSimpleDB(sdb.Region(*g_region), g_accessKey)\n\tAssertEq(nil, err)\n}\n\nfunc (t *integrationTest) ensureDeleted(d sdb.Domain)\n\nfunc (t *integrationTest) TearDown() {\n\tt.mutex.Lock()\n\tdefer t.mutex.Unlock()\n\n\t\/\/ Delete each of the domains created during the test.\n\tfor _, d := range t.domainsToDelete {\n\t\tExpectEq(nil, t.db.DeleteDomain(d), \"Domain: %s\", d.Name())\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Domains\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype DomainsTest struct {\n\tintegrationTest\n}\n\nfunc init() { RegisterTestSuite(&DomainsTest{}) }\n\nfunc (t *DomainsTest) InvalidAccessKey() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *DomainsTest) DomainsHaveIndependentItems() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *DomainsTest) Delete() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *DomainsTest) OpeningTwiceDoesntDeleteExistingItems() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *DomainsTest) DeleteTwice() {\n\tExpectEq(\"TODO\", \"\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Items\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype ItemsTest struct {\n\tintegrationTest\n}\n\nfunc init() { RegisterTestSuite(&ItemsTest{}) }\n\nfunc (t *ItemsTest) WrongAccessKeySecret() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) InvalidUtf8ItemName() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) InvalidUtf8AttributeName() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) InvalidUtf8AttributeValue() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) LongItemName() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) LongAttributeName() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) LongAttributeValue() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) PutThenGet() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) BatchPutThenGet() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) BatchPutThenBatchGet() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) GetForNonExistentItem() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) GetParticularAttributes() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) BatchGetParticularAttributes() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) BatchGetForNonExistentItems() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) GetNonExistentAttributeName() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) BatchGetNonExistentAttributeName() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) FailedValuePrecondition() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) FailedExistencePrecondition() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) FailedNonExistencePrecondition() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SuccessfulPreconditions() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) DeleteParticularAttributes() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) DeleteAllAttributes() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) BatchDelete() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) InvalidSelectQuery() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SelectAll() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SelectItemName() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SelectCount() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SelectWithPredicates() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SelectWithSortOrder() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SelectWithLimit() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SelectEmptyResultSet() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SelectLargeResultSet() {\n\tExpectEq(\"TODO\", \"\")\n}\nFixed another error.\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"github.com\/jacobsa\/aws\/exp\/sdb\"\n\t. \"github.com\/jacobsa\/ogletest\"\n\t\"sync\"\n)\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype integrationTest struct {\n\tdb sdb.SimpleDB\n\n\tmutex sync.Mutex\n\tdomainsToDelete []sdb.Domain \/\/ Protected by mutex\n}\n\nfunc (t *integrationTest) SetUp(i *TestInfo) {\n\tvar err error\n\n\t\/\/ Open a connection.\n\tt.db, err = sdb.NewSimpleDB(sdb.Region(*g_region), g_accessKey)\n\tAssertEq(nil, err)\n}\n\nfunc (t *integrationTest) ensureDeleted(d sdb.Domain) {\n\tt.mutex.Lock()\n\tdefer t.mutex.Unlock()\n\n\tt.domainsToDelete = append(t.domainsToDelete, d)\n}\n\nfunc (t *integrationTest) TearDown() {\n\tt.mutex.Lock()\n\tdefer t.mutex.Unlock()\n\n\t\/\/ Delete each of the domains created during the test.\n\tfor _, d := range t.domainsToDelete {\n\t\tExpectEq(nil, t.db.DeleteDomain(d), \"Domain: %s\", d.Name())\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Domains\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype DomainsTest struct {\n\tintegrationTest\n}\n\nfunc init() { RegisterTestSuite(&DomainsTest{}) }\n\nfunc (t *DomainsTest) InvalidAccessKey() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *DomainsTest) DomainsHaveIndependentItems() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *DomainsTest) Delete() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *DomainsTest) OpeningTwiceDoesntDeleteExistingItems() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *DomainsTest) DeleteTwice() {\n\tExpectEq(\"TODO\", \"\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Items\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype ItemsTest struct {\n\tintegrationTest\n}\n\nfunc init() { RegisterTestSuite(&ItemsTest{}) }\n\nfunc (t *ItemsTest) WrongAccessKeySecret() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) InvalidUtf8ItemName() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) InvalidUtf8AttributeName() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) InvalidUtf8AttributeValue() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) LongItemName() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) LongAttributeName() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) LongAttributeValue() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) PutThenGet() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) BatchPutThenGet() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) BatchPutThenBatchGet() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) GetForNonExistentItem() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) GetParticularAttributes() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) BatchGetParticularAttributes() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) BatchGetForNonExistentItems() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) GetNonExistentAttributeName() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) BatchGetNonExistentAttributeName() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) FailedValuePrecondition() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) FailedExistencePrecondition() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) FailedNonExistencePrecondition() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SuccessfulPreconditions() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) DeleteParticularAttributes() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) DeleteAllAttributes() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) BatchDelete() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) InvalidSelectQuery() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SelectAll() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SelectItemName() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SelectCount() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SelectWithPredicates() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SelectWithSortOrder() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SelectWithLimit() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SelectEmptyResultSet() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *ItemsTest) SelectLargeResultSet() {\n\tExpectEq(\"TODO\", \"\")\n}\n<|endoftext|>"} {"text":"package ident\n\nimport (\n\t\"bytes\"\n\t\"github.com\/ionous\/sashimi\/util\/lang\"\n\t\"github.com\/satori\/go.uuid\"\n\t\"strings\"\n\t\"unicode\"\n)\n\n\/\/ Id uniquely identifies some resource.\n\/\/ NOTE: go is not capable of comparing slices(!?), and there is no interface for making a type hashable(!?)\n\/\/ ALSO: the default json encoding --- even when the marshal is implemented!? -- doesnt seem to support a struct as a map key.\n\/\/ Therefore, Id cannot store its parts as an []string. they must be joined first.\ntype Id string\n\n\/\/ String representation of the id (ex. for fmt), currently TitleCase.\nfunc (id Id) String() (ret string) {\n\tif id.Empty() {\n\t\tret = \"\"\n\t} else {\n\t\tparts := id.Split()\n\t\tfor i, s := range parts {\n\t\t\tparts[i] = lang.Capitalize(s)\n\t\t}\n\t\tret = strings.Join(parts, \"\")\n\t}\n\treturn ret\n}\n\nfunc (id Id) Empty() bool {\n\treturn len(id) == 0\n}\n\nfunc Compare(a, b Id) int {\n\treturn strings.Compare(string(a), string(b))\n}\n\nfunc Join(a, b Id) Id {\n\treturn Id(strings.Join(append(a.Split(), b.Split()...), \"-\"))\n}\n\nfunc Empty() (ret Id) {\n\treturn\n}\n\nfunc MakeUniqueId() Id {\n\tstr := uuid.NewV4().String()\n\treturn Id(str)\n}\n\n\/\/ MakeId creates a new string id from the passed raw string.\n\/\/ Dashes and spaces are treated as word separators.\n\/\/ Other Illegal characters ( leading digits and non-word characters ) are stripped.\n\/\/ NOTE: Articles ( the, etc. ) are stripped for easier matching at the script\/table\/level.\nfunc MakeId(name string) Id {\n\tvar parts parts\n\tstarted, inword, wasUpper := false, false, false\n\n\tfor _, r := range name {\n\t\tif r == '-' || r == '_' || r == '=' || unicode.IsSpace(r) {\n\t\t\tinword = false\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ this test similar to go scanner's is identifier alg:\n\t\t\/\/ it has _, which we treat as a space, and\n\t\t\/\/ its i>0, but since we are stripping spaces, 'started' is better.\n\t\tif unicode.IsLetter(r) || (started && unicode.IsDigit(r)) {\n\t\t\tstarted = true\n\t\t\tnowUpper := unicode.IsUpper(r)\n\t\t\t\/\/ classify some common word changes\n\t\t\tsameWord := inword && ((wasUpper == nowUpper) || (wasUpper && !nowUpper))\n\t\t\tif nowUpper {\n\t\t\t\tr = unicode.ToLower(r)\n\t\t\t}\n\t\t\tif !sameWord {\n\t\t\t\tparts.flush()\n\t\t\t}\n\t\t\tparts.WriteRune(r) \/\/ docs say err is always nil\n\t\t\twasUpper = nowUpper\n\t\t\tinword = true\n\t\t}\n\t}\n\n\tdashed := strings.Join(parts.flush(), \"-\")\n\treturn Id(dashed)\n}\n\n\/\/ Split the id into separated, lower cased components.\nfunc (id Id) Split() []string {\n\tdashed := string(id)\n\treturn strings.Split(dashed, \"-\")\n}\n\ntype parts struct {\n\tbytes.Buffer\n\tarr []string\n}\n\nfunc (p *parts) flush() []string {\n\tif p.Len() > 0 {\n\t\tp.arr = append(p.arr, p.String())\n\t\tp.Reset()\n\t}\n\treturn p.arr\n}\nfix: strings.Compare doesnt exist in appenginepackage ident\n\nimport (\n\t\"bytes\"\n\t\"github.com\/ionous\/sashimi\/util\/lang\"\n\t\"github.com\/satori\/go.uuid\"\n\t\"strings\"\n\t\"unicode\"\n)\n\n\/\/ Id uniquely identifies some resource.\n\/\/ NOTE: go is not capable of comparing slices(!?), and there is no interface for making a type hashable(!?)\n\/\/ ALSO: the default json encoding --- even when the marshal is implemented!? -- doesnt seem to support a struct as a map key.\n\/\/ Therefore, Id cannot store its parts as an []string. they must be joined first.\ntype Id string\n\n\/\/ String representation of the id (ex. for fmt), currently TitleCase.\nfunc (id Id) String() (ret string) {\n\tif id.Empty() {\n\t\tret = \"\"\n\t} else {\n\t\tparts := id.Split()\n\t\tfor i, s := range parts {\n\t\t\tparts[i] = lang.Capitalize(s)\n\t\t}\n\t\tret = strings.Join(parts, \"\")\n\t}\n\treturn ret\n}\n\nfunc (id Id) Empty() bool {\n\treturn len(id) == 0\n}\n\n\/\/ for some reason strings.Compare doesnt exist in go\/appengine:\n\/\/ theres this comment in the string source:\n\/\/ NOTE(rsc): ... Basically no one should use strings.Compare.\nfunc Compare(a, b Id) int {\n\tif a == b {\n\t\treturn 0\n\t}\n\tif a < b {\n\t\treturn -1\n\t}\n\treturn +1\n}\n\nfunc Join(a, b Id) Id {\n\treturn Id(strings.Join(append(a.Split(), b.Split()...), \"-\"))\n}\n\nfunc Empty() (ret Id) {\n\treturn\n}\n\nfunc MakeUniqueId() Id {\n\tstr := uuid.NewV4().String()\n\treturn Id(str)\n}\n\n\/\/ MakeId creates a new string id from the passed raw string.\n\/\/ Dashes and spaces are treated as word separators.\n\/\/ Other Illegal characters ( leading digits and non-word characters ) are stripped.\n\/\/ NOTE: Articles ( the, etc. ) are stripped for easier matching at the script\/table\/level.\nfunc MakeId(name string) Id {\n\tvar parts parts\n\tstarted, inword, wasUpper := false, false, false\n\n\tfor _, r := range name {\n\t\tif r == '-' || r == '_' || r == '=' || unicode.IsSpace(r) {\n\t\t\tinword = false\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ this test similar to go scanner's is identifier alg:\n\t\t\/\/ it has _, which we treat as a space, and\n\t\t\/\/ its i>0, but since we are stripping spaces, 'started' is better.\n\t\tif unicode.IsLetter(r) || (started && unicode.IsDigit(r)) {\n\t\t\tstarted = true\n\t\t\tnowUpper := unicode.IsUpper(r)\n\t\t\t\/\/ classify some common word changes\n\t\t\tsameWord := inword && ((wasUpper == nowUpper) || (wasUpper && !nowUpper))\n\t\t\tif nowUpper {\n\t\t\t\tr = unicode.ToLower(r)\n\t\t\t}\n\t\t\tif !sameWord {\n\t\t\t\tparts.flush()\n\t\t\t}\n\t\t\tparts.WriteRune(r) \/\/ docs say err is always nil\n\t\t\twasUpper = nowUpper\n\t\t\tinword = true\n\t\t}\n\t}\n\n\tdashed := strings.Join(parts.flush(), \"-\")\n\treturn Id(dashed)\n}\n\n\/\/ Split the id into separated, lower cased components.\nfunc (id Id) Split() []string {\n\tdashed := string(id)\n\treturn strings.Split(dashed, \"-\")\n}\n\ntype parts struct {\n\tbytes.Buffer\n\tarr []string\n}\n\nfunc (p *parts) flush() []string {\n\tif p.Len() > 0 {\n\t\tp.arr = append(p.arr, p.String())\n\t\tp.Reset()\n\t}\n\treturn p.arr\n}\n<|endoftext|>"} {"text":"\/\/ $G $D\/$F.go && $L $F.$A && .\/$A.out\n\n\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport \"runtime\"\n\nconst\tN\t= 1000;\t\t\/\/ sent messages\nconst\tM\t= 10;\t\t\/\/ receiving goroutines\nconst\tW\t= 2;\t\t\/\/ channel buffering\nvar\th\t[N]int;\t\t\/\/ marking of send\/recv\n\nfunc\nr(c chan int, m int) {\n\tfor {\n\t\tselect {\n\t\tcase r := <- c:\n\t\t\tif h[r] != 1 {\n\t\t\t\tpanicln(\"r\",\n\t\t\t\t\t\"m=\", m,\n\t\t\t\t\t\"r=\", r,\n\t\t\t\t\t\"h=\", h[r],\n\t\t\t\t);\n\t\t\t}\n\t\t\th[r] = 2;\n\t\t}\n\t}\n}\n\nfunc\ns(c chan int) {\n\tfor n:=0; ntrailing comma's are not accepted with current syntax\/\/ $G $D\/$F.go && $L $F.$A && .\/$A.out\n\n\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport \"runtime\"\n\nconst\tN\t= 1000;\t\t\/\/ sent messages\nconst\tM\t= 10;\t\t\/\/ receiving goroutines\nconst\tW\t= 2;\t\t\/\/ channel buffering\nvar\th\t[N]int;\t\t\/\/ marking of send\/recv\n\nfunc\nr(c chan int, m int) {\n\tfor {\n\t\tselect {\n\t\tcase r := <- c:\n\t\t\tif h[r] != 1 {\n\t\t\t\tpanicln(\"r\",\n\t\t\t\t\t\"m=\", m,\n\t\t\t\t\t\"r=\", r,\n\t\t\t\t\t\"h=\", h[r]);\n\t\t\t}\n\t\t\th[r] = 2;\n\t\t}\n\t}\n}\n\nfunc\ns(c chan int) {\n\tfor n:=0; n"} {"text":"package hive\n\nimport (\n\t\/\/\"fmt\"\n\t\"github.com\/eaciit\/toolkit\"\n\t. \"github.com\/frezadev\/hdc\/hive\"\n\t\/\/. \"github.com\/eaciit\/hdc\/hive\"\n\t\/\/ \"reflect\"\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n)\n\nvar h *Hive\nvar e error\n\ntype Sample7 struct {\n\tCode string `tag_name:\"code\"`\n\tDescription string `tag_name:\"description\"`\n\tTotal_emp string `tag_name:\"total_emp\"`\n\tSalary string `tag_name:\"salary\"`\n}\n\nfunc killApp(code int) {\n\tif h != nil {\n\t\th.Conn.Close()\n\t}\n\tos.Exit(code)\n}\n\nfunc fatalCheck(t *testing.T, what string, e error) {\n\tif e != nil {\n\t\tt.Fatalf(\"%s: %s\", what, e.Error())\n\t}\n}\n\n\/*func TestHiveConnect(t *testing.T) {\n\th = HiveConfig(\"192.168.0.223:10000\", \"default\", \"hdfs\", \"\", \"\")\n}*\/\n\nfunc TestHiveExec(t *testing.T) {\n\th = HiveConfig(\"192.168.0.223:10000\", \"default\", \"hdfs\", \"\", \"\")\n\tq := \"select * from sample_07 limit 5;\"\n\n\th.Conn.Open()\n\n\tresult, e := h.Exec(q)\n\n\tif e != nil {\n\t\tlog.Printf(\"error: \\n%v\\n\", e)\n\n\t} else {\n\t\tlog.Printf(\"result: \\n%v\\n\", result)\n\n\t\tfor _, res := range result {\n\t\t\tvar tmp toolkit.M\n\t\t\th.ParseOutput(res, &tmp)\n\t\t\tlog.Println(tmp)\n\t\t}\n\t}\n\n\th.Conn.Close()\n}\n\n\/* Populate will exec query and immidiately return the value into object\nPopulate is suitable for short type query that return limited data,\nExec is suitable for long type query that return massive amount of data and require time to produce it\n\nIdeally Populate should call Exec as well but already have predefined function on it receiving process\n*\/\nfunc TestHivePopulate(t *testing.T) {\n\th = HiveConfig(\"192.168.0.223:10000\", \"default\", \"hdfs\", \"\", \"\")\n\tq := \"select * from sample_07 limit 5;\"\n\n\tvar obj toolkit.M\n\n\th.Conn.Open()\n\n\tresult, e := h.Populate(q, &obj)\n\tfatalCheck(t, \"Populate\", e)\n\n\tif len(result) != 5 {\n\t\tlog.Printf(\"Error want %d got %d\", 5, len(result))\n\t}\n\n\tlog.Printf(\"Result: %s\", toolkit.JsonString(result))\n\n\th.Conn.Close()\n}\n\n\/*func TestExecLine(t *testing.T) {\n\tvar e error\n\th = HiveConfig(\"192.168.0.223:10000\", \"hdfs\", \"\", \"\", \"\")\n\tq := \"select * from sample_07 limit 5;\"\n\n\tlog.Println(\"---------------------- EXEC LINE ----------------\")\n\n\th.Conn.Open()\n\n\t\/\/ var DoSomething = func(res string) {\n\t\/\/ \ttmp := Sample7{}\n\t\/\/ \th.ParseOutput(res, &tmp)\n\t\/\/ \tfmt.Println(tmp)\n\t\/\/ }\n\n\t\/\/ e = h.ExecLine(q, DoSomething)\n\t\/\/ fmt.Printf(\"error: \\n%v\\n\", e)\n\n\th.Conn.Close()\n}*\/\n\n\/\/func main() {\n\n\/*fmt.Println(\"---------------------- EXEC LINE ----------------\")\n\n\/\/to execute query and read the result per line and then process its result\n\nvar DoSomething = func(res string) {\n\ttmp := Sample7{}\n\th.ParseOutput(res, &tmp)\n\tfmt.Println(tmp)\n}\n\ne = h.ExecLine(q, DoSomething)\nfmt.Printf(\"error: \\n%v\\n\", e)*\/\n\n\/*h = HiveConfig(\"192.168.0.223:10000\", \"default\", \"developer\", \"b1gD@T@\", nil)\nh.Header = []string{\"code\", \"description\", \"total_emp\", \"salary\"}\n\/\/ qTest := \"00-0000\tAll Occupations, asdfa,a dadsfasd\t134354250\t40690\"\n\/\/qTest := \"00-0000 All Occupations 134354250 40690\"\nqTest := \"00-0000,All Occupations asdfa a dadsfasd,134354250,40690\"\nvar result = Sample7{}\nh.ParseOutput(qTest, &result)\nfmt.Printf(\"result: %s\\n\", result.Code)\nfmt.Printf(\"result: %s\\n\", result.Description)\nfmt.Printf(\"result: %s\\n\", result.Total_emp)\nfmt.Printf(\"result: %s\\n\", result.Salary)*\/\n\/\/}\n\n\/\/ test := \"00-0000,All Occupations,134354250,40690\"\n\n\/*var x = Sample7{}\nvar z interface{}\nz = x\ns := reflect.ValueOf(&z).Elem()\ntypeOfT := s.Type()\nfmt.Println(reflect.ValueOf(&z).Interface())\nfor i := 0; i < s.NumField(); i++ {\n\tf := s.Field(i)\n\ttag := s.Type().Field(i).Tag\n\tfmt.Printf(\"%d: %s %s = %v | tag %s \\n\", i, typeOfT.Field(i).Name, f.Type(), f.Interface(), tag.Get(\"tag_name\"))\n\n}*\/\ntestingpackage hive\n\nimport (\n\t\/\/\"fmt\"\n\t\"github.com\/eaciit\/toolkit\"\n\t. \"github.com\/frezadev\/hdc\/hive\"\n\t\/\/. \"github.com\/eaciit\/hdc\/hive\"\n\t\/\/ \"reflect\"\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n)\n\nvar h *Hive\nvar e error\n\ntype Sample7 struct {\n\tCode string `tag_name:\"code\"`\n\tDescription string `tag_name:\"description\"`\n\tTotal_emp string `tag_name:\"total_emp\"`\n\tSalary string `tag_name:\"salary\"`\n}\n\nfunc killApp(code int) {\n\tif h != nil {\n\t\th.Conn.Close()\n\t}\n\tos.Exit(code)\n}\n\nfunc fatalCheck(t *testing.T, what string, e error) {\n\tif e != nil {\n\t\tt.Fatalf(\"%s: %s\", what, e.Error())\n\t}\n}\n\n\/*func TestHiveConnect(t *testing.T) {\n\th = HiveConfig(\"192.168.0.223:10000\", \"default\", \"hdfs\", \"\", \"\")\n}*\/\n\nfunc TestHiveExec(t *testing.T) {\n\th = HiveConfig(\"192.168.0.223:10000\", \"default\", \"hdfs\", \"\", \"\")\n\tq := \"select * from sample_07 limit 5;\"\n\n\th.Conn.Open()\n\n\tresult, e := h.Exec(q)\n\n\tif e != nil {\n\t\tlog.Printf(\"error: \\n%v\\n\", e)\n\n\t} else {\n\t\tlog.Printf(\"result: \\n%v\\n\", result)\n\n\t\tfor _, res := range result {\n\t\t\tvar tmp toolkit.M\n\t\t\th.ParseOutput(res, &tmp)\n\t\t\tlog.Println(tmp)\n\t\t}\n\t}\n\n\th.Conn.Close()\n}\n\n\/* Populate will exec query and immidiately return the value into object\nPopulate is suitable for short type query that return limited data,\nExec is suitable for long type query that return massive amount of data and require time to produce it\n\nIdeally Populate should call Exec as well but already have predefined function on it receiving process\n*\/\nfunc TestHivePopulate(t *testing.T) {\n\th = HiveConfig(\"192.168.0.223:10000\", \"default\", \"hdfs\", \"\", \"\")\n\tq := \"select * from sample_07 limit 5;\"\n\n\tvar obj toolkit.M\n\n\th.Conn.Open()\n\n\tresult, e = h.Populate(q, &obj)\n\tfatalCheck(t, \"Populate\", e)\n\n\tif len(result) != 5 {\n\t\tlog.Printf(\"Error want %d got %d\", 5, len(result))\n\t}\n\n\tlog.Printf(\"Result: \\n%s\", toolkit.JsonString(result))\n\n\th.Conn.Close()\n}\n\n\/*func TestExecLine(t *testing.T) {\n\tvar e error\n\th = HiveConfig(\"192.168.0.223:10000\", \"hdfs\", \"\", \"\", \"\")\n\tq := \"select * from sample_07 limit 5;\"\n\n\tlog.Println(\"---------------------- EXEC LINE ----------------\")\n\n\th.Conn.Open()\n\n\t\/\/ var DoSomething = func(res string) {\n\t\/\/ \ttmp := Sample7{}\n\t\/\/ \th.ParseOutput(res, &tmp)\n\t\/\/ \tfmt.Println(tmp)\n\t\/\/ }\n\n\t\/\/ e = h.ExecLine(q, DoSomething)\n\t\/\/ fmt.Printf(\"error: \\n%v\\n\", e)\n\n\th.Conn.Close()\n}*\/\n\n\/\/func main() {\n\n\/*fmt.Println(\"---------------------- EXEC LINE ----------------\")\n\n\/\/to execute query and read the result per line and then process its result\n\nvar DoSomething = func(res string) {\n\ttmp := Sample7{}\n\th.ParseOutput(res, &tmp)\n\tfmt.Println(tmp)\n}\n\ne = h.ExecLine(q, DoSomething)\nfmt.Printf(\"error: \\n%v\\n\", e)*\/\n\n\/*h = HiveConfig(\"192.168.0.223:10000\", \"default\", \"developer\", \"b1gD@T@\", nil)\nh.Header = []string{\"code\", \"description\", \"total_emp\", \"salary\"}\n\/\/ qTest := \"00-0000\tAll Occupations, asdfa,a dadsfasd\t134354250\t40690\"\n\/\/qTest := \"00-0000 All Occupations 134354250 40690\"\nqTest := \"00-0000,All Occupations asdfa a dadsfasd,134354250,40690\"\nvar result = Sample7{}\nh.ParseOutput(qTest, &result)\nfmt.Printf(\"result: %s\\n\", result.Code)\nfmt.Printf(\"result: %s\\n\", result.Description)\nfmt.Printf(\"result: %s\\n\", result.Total_emp)\nfmt.Printf(\"result: %s\\n\", result.Salary)*\/\n\/\/}\n\n\/\/ test := \"00-0000,All Occupations,134354250,40690\"\n\n\/*var x = Sample7{}\nvar z interface{}\nz = x\ns := reflect.ValueOf(&z).Elem()\ntypeOfT := s.Type()\nfmt.Println(reflect.ValueOf(&z).Interface())\nfor i := 0; i < s.NumField(); i++ {\n\tf := s.Field(i)\n\ttag := s.Type().Field(i).Tag\n\tfmt.Printf(\"%d: %s %s = %v | tag %s \\n\", i, typeOfT.Field(i).Name, f.Type(), f.Interface(), tag.Get(\"tag_name\"))\n\n}*\/\n<|endoftext|>"} {"text":"\/\/ +build !appengine\n\npackage aetest\n\nimport (\n\t\"bufio\"\n\t\"crypto\/rand\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/appengine\/internal\"\n)\n\n\/\/ NewInstance launches a running instance of api_server.py which can be used\n\/\/ for multiple test Contexts that delegate all App Engine API calls to that\n\/\/ instance.\n\/\/ If opts is nil the default values are used.\nfunc NewInstance(opts *Options) (Instance, error) {\n\ti := &instance{\n\t\topts: opts,\n\t\tappID: \"testapp\",\n\t}\n\tif opts != nil && opts.AppID != \"\" {\n\t\ti.appID = opts.AppID\n\t}\n\tif err := i.startChild(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn i, nil\n}\n\nfunc newSessionID() string {\n\tvar buf [16]byte\n\tio.ReadFull(rand.Reader, buf[:])\n\treturn fmt.Sprintf(\"%x\", buf[:])\n}\n\n\/\/ instance implements the Instance interface.\ntype instance struct {\n\topts *Options\n\tchild *exec.Cmd\n\tapiURL *url.URL \/\/ base URL of API HTTP server\n\tadminURL string \/\/ base URL of admin HTTP server\n\tappDir string\n\tappID string\n\trelFuncs []func() \/\/ funcs to release any associated contexts\n}\n\n\/\/ NewRequest returns an *http.Request associated with this instance.\nfunc (i *instance) NewRequest(method, urlStr string, body io.Reader) (*http.Request, error) {\n\treq, err := http.NewRequest(method, urlStr, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Associate this request.\n\trelease := internal.RegisterTestRequest(req, i.apiURL, func(ctx context.Context) context.Context {\n\t\tctx = internal.WithAppIDOverride(ctx, \"dev~\"+i.appID)\n\t\treturn ctx\n\t})\n\ti.relFuncs = append(i.relFuncs, release)\n\n\treturn req, nil\n}\n\n\/\/ Close kills the child api_server.py process, releasing its resources.\nfunc (i *instance) Close() (err error) {\n\tfor _, rel := range i.relFuncs {\n\t\trel()\n\t}\n\ti.relFuncs = nil\n\tif i.child == nil {\n\t\treturn nil\n\t}\n\tdefer func() {\n\t\ti.child = nil\n\t\terr1 := os.RemoveAll(i.appDir)\n\t\tif err == nil {\n\t\t\terr = err1\n\t\t}\n\t}()\n\n\tif p := i.child.Process; p != nil {\n\t\terrc := make(chan error, 1)\n\t\tgo func() {\n\t\t\terrc <- i.child.Wait()\n\t\t}()\n\n\t\t\/\/ Call the quit handler on the admin server.\n\t\tres, err := http.Get(i.adminURL + \"\/quit\")\n\t\tif err != nil {\n\t\t\tp.Kill()\n\t\t\treturn fmt.Errorf(\"unable to call \/quit handler: %v\", err)\n\t\t}\n\t\tres.Body.Close()\n\n\t\tselect {\n\t\tcase <-time.After(15 * time.Second):\n\t\t\tp.Kill()\n\t\t\treturn errors.New(\"timeout killing child process\")\n\t\tcase err = <-errc:\n\t\t\t\/\/ Do nothing.\n\t\t}\n\t}\n\treturn\n}\n\nfunc fileExists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn err == nil\n}\n\nfunc findPython() (path string, err error) {\n\tfor _, name := range []string{\"python2.7\", \"python\"} {\n\t\tpath, err = exec.LookPath(name)\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc findDevAppserver() (string, error) {\n\tif p := os.Getenv(\"APPENGINE_DEV_APPSERVER\"); p != \"\" {\n\t\tif fileExists(p) {\n\t\t\treturn p, nil\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"invalid APPENGINE_DEV_APPSERVER environment variable; path %q doesn't exist\", p)\n\t}\n\treturn exec.LookPath(\"dev_appserver.py\")\n}\n\nvar apiServerAddrRE = regexp.MustCompile(`Starting API server at: (\\S+)`)\nvar adminServerAddrRE = regexp.MustCompile(`Starting admin server at: (\\S+)`)\n\nfunc (i *instance) startChild() (err error) {\n\tif PrepareDevAppserver != nil {\n\t\tif err := PrepareDevAppserver(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tpython, err := findPython()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not find python interpreter: %v\", err)\n\t}\n\tdevAppserver, err := findDevAppserver()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not find dev_appserver.py: %v\", err)\n\t}\n\n\ti.appDir, err = ioutil.TempDir(\"\", \"appengine-aetest\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tos.RemoveAll(i.appDir)\n\t\t}\n\t}()\n\terr = os.Mkdir(filepath.Join(i.appDir, \"app\"), 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(filepath.Join(i.appDir, \"app\", \"app.yaml\"), []byte(i.appYAML()), 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(filepath.Join(i.appDir, \"app\", \"stubapp.go\"), []byte(appSource), 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tappserverArgs := []string{\n\t\tdevAppserver,\n\t\t\"--port=0\",\n\t\t\"--api_port=0\",\n\t\t\"--admin_port=0\",\n\t\t\"--skip_sdk_update_check=true\",\n\t\t\"--clear_datastore=true\",\n\t\t\"--clear_search_indexes=true\",\n\t\t\"--datastore_path\", filepath.Join(i.appDir, \"datastore\"),\n\t}\n\tif i.opts != nil && i.opts.StronglyConsistentDatastore {\n\t\tappserverArgs = append(appserverArgs, \"--datastore_consistency_policy=consistent\")\n\t}\n\tappserverArgs = append(appserverArgs, filepath.Join(i.appDir, \"app\"))\n\n\ti.child = exec.Command(python,\n\t\tappserverArgs...,\n\t)\n\ti.child.Stdout = os.Stdout\n\tvar stderr io.Reader\n\tstderr, err = i.child.StderrPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tstderr = io.TeeReader(stderr, os.Stderr)\n\tif err = i.child.Start(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Wait until we have read the URLs of the API server and admin interface.\n\terrc := make(chan error, 1)\n\tapic := make(chan *url.URL)\n\tadminc := make(chan string)\n\tgo func() {\n\t\ts := bufio.NewScanner(stderr)\n\t\tfor s.Scan() {\n\t\t\tif match := apiServerAddrRE.FindStringSubmatch(s.Text()); match != nil {\n\t\t\t\tu, err := url.Parse(match[1])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrc <- fmt.Errorf(\"failed to parse API URL %q: %v\", match[1], err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tapic <- u\n\t\t\t}\n\t\t\tif match := adminServerAddrRE.FindSubmatch(s.Bytes()); match != nil {\n\t\t\t\tadminc <- string(match[1])\n\t\t\t}\n\t\t}\n\t\tif err = s.Err(); err != nil {\n\t\t\terrc <- err\n\t\t}\n\t}()\n\n\tfor i.apiURL == nil || i.adminURL == \"\" {\n\t\tselect {\n\t\tcase i.apiURL = <-apic:\n\t\tcase i.adminURL = <-adminc:\n\t\tcase <-time.After(15 * time.Second):\n\t\t\tif p := i.child.Process; p != nil {\n\t\t\t\tp.Kill()\n\t\t\t}\n\t\t\treturn errors.New(\"timeout starting child process\")\n\t\tcase err := <-errc:\n\t\t\treturn fmt.Errorf(\"error reading child process stderr: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (i *instance) appYAML() string {\n\treturn fmt.Sprintf(appYAMLTemplate, i.appID)\n}\n\nconst appYAMLTemplate = `\napplication: %s\nversion: 1\nruntime: go\napi_version: go1\n\nhandlers:\n- url: \/.*\n script: _go_app\n`\n\nconst appSource = `\npackage nihilist\n\nfunc init() {}\n`\nappengine\/aetest: make aetest work with \"vm: true\" apps\/\/ +build !appengine\n\npackage aetest\n\nimport (\n\t\"bufio\"\n\t\"crypto\/rand\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/appengine\/internal\"\n)\n\n\/\/ NewInstance launches a running instance of api_server.py which can be used\n\/\/ for multiple test Contexts that delegate all App Engine API calls to that\n\/\/ instance.\n\/\/ If opts is nil the default values are used.\nfunc NewInstance(opts *Options) (Instance, error) {\n\ti := &instance{\n\t\topts: opts,\n\t\tappID: \"testapp\",\n\t}\n\tif opts != nil && opts.AppID != \"\" {\n\t\ti.appID = opts.AppID\n\t}\n\tif err := i.startChild(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn i, nil\n}\n\nfunc newSessionID() string {\n\tvar buf [16]byte\n\tio.ReadFull(rand.Reader, buf[:])\n\treturn fmt.Sprintf(\"%x\", buf[:])\n}\n\n\/\/ instance implements the Instance interface.\ntype instance struct {\n\topts *Options\n\tchild *exec.Cmd\n\tapiURL *url.URL \/\/ base URL of API HTTP server\n\tadminURL string \/\/ base URL of admin HTTP server\n\tappDir string\n\tappID string\n\trelFuncs []func() \/\/ funcs to release any associated contexts\n}\n\n\/\/ NewRequest returns an *http.Request associated with this instance.\nfunc (i *instance) NewRequest(method, urlStr string, body io.Reader) (*http.Request, error) {\n\treq, err := http.NewRequest(method, urlStr, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Associate this request.\n\trelease := internal.RegisterTestRequest(req, i.apiURL, func(ctx context.Context) context.Context {\n\t\tctx = internal.WithAppIDOverride(ctx, \"dev~\"+i.appID)\n\t\treturn ctx\n\t})\n\ti.relFuncs = append(i.relFuncs, release)\n\n\treturn req, nil\n}\n\n\/\/ Close kills the child api_server.py process, releasing its resources.\nfunc (i *instance) Close() (err error) {\n\tfor _, rel := range i.relFuncs {\n\t\trel()\n\t}\n\ti.relFuncs = nil\n\tif i.child == nil {\n\t\treturn nil\n\t}\n\tdefer func() {\n\t\ti.child = nil\n\t\terr1 := os.RemoveAll(i.appDir)\n\t\tif err == nil {\n\t\t\terr = err1\n\t\t}\n\t}()\n\n\tif p := i.child.Process; p != nil {\n\t\terrc := make(chan error, 1)\n\t\tgo func() {\n\t\t\terrc <- i.child.Wait()\n\t\t}()\n\n\t\t\/\/ Call the quit handler on the admin server.\n\t\tres, err := http.Get(i.adminURL + \"\/quit\")\n\t\tif err != nil {\n\t\t\tp.Kill()\n\t\t\treturn fmt.Errorf(\"unable to call \/quit handler: %v\", err)\n\t\t}\n\t\tres.Body.Close()\n\n\t\tselect {\n\t\tcase <-time.After(15 * time.Second):\n\t\t\tp.Kill()\n\t\t\treturn errors.New(\"timeout killing child process\")\n\t\tcase err = <-errc:\n\t\t\t\/\/ Do nothing.\n\t\t}\n\t}\n\treturn\n}\n\nfunc fileExists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn err == nil\n}\n\nfunc findPython() (path string, err error) {\n\tfor _, name := range []string{\"python2.7\", \"python\"} {\n\t\tpath, err = exec.LookPath(name)\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc findDevAppserver() (string, error) {\n\tif p := os.Getenv(\"APPENGINE_DEV_APPSERVER\"); p != \"\" {\n\t\tif fileExists(p) {\n\t\t\treturn p, nil\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"invalid APPENGINE_DEV_APPSERVER environment variable; path %q doesn't exist\", p)\n\t}\n\treturn exec.LookPath(\"dev_appserver.py\")\n}\n\nvar apiServerAddrRE = regexp.MustCompile(`Starting API server at: (\\S+)`)\nvar adminServerAddrRE = regexp.MustCompile(`Starting admin server at: (\\S+)`)\n\nfunc (i *instance) startChild() (err error) {\n\tif PrepareDevAppserver != nil {\n\t\tif err := PrepareDevAppserver(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tpython, err := findPython()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not find python interpreter: %v\", err)\n\t}\n\tdevAppserver, err := findDevAppserver()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not find dev_appserver.py: %v\", err)\n\t}\n\n\ti.appDir, err = ioutil.TempDir(\"\", \"appengine-aetest\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tos.RemoveAll(i.appDir)\n\t\t}\n\t}()\n\terr = os.Mkdir(filepath.Join(i.appDir, \"app\"), 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(filepath.Join(i.appDir, \"app\", \"app.yaml\"), []byte(i.appYAML()), 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(filepath.Join(i.appDir, \"app\", \"stubapp.go\"), []byte(appSource), 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tappserverArgs := []string{\n\t\tdevAppserver,\n\t\t\"--port=0\",\n\t\t\"--api_port=0\",\n\t\t\"--admin_port=0\",\n\t\t\"--skip_sdk_update_check=true\",\n\t\t\"--clear_datastore=true\",\n\t\t\"--clear_search_indexes=true\",\n\t\t\"--datastore_path\", filepath.Join(i.appDir, \"datastore\"),\n\t}\n\tif i.opts != nil && i.opts.StronglyConsistentDatastore {\n\t\tappserverArgs = append(appserverArgs, \"--datastore_consistency_policy=consistent\")\n\t}\n\tappserverArgs = append(appserverArgs, filepath.Join(i.appDir, \"app\"))\n\n\ti.child = exec.Command(python,\n\t\tappserverArgs...,\n\t)\n\ti.child.Stdout = os.Stdout\n\tvar stderr io.Reader\n\tstderr, err = i.child.StderrPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tstderr = io.TeeReader(stderr, os.Stderr)\n\tif err = i.child.Start(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Wait until we have read the URLs of the API server and admin interface.\n\terrc := make(chan error, 1)\n\tapic := make(chan *url.URL)\n\tadminc := make(chan string)\n\tgo func() {\n\t\ts := bufio.NewScanner(stderr)\n\t\tfor s.Scan() {\n\t\t\tif match := apiServerAddrRE.FindStringSubmatch(s.Text()); match != nil {\n\t\t\t\tu, err := url.Parse(match[1])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrc <- fmt.Errorf(\"failed to parse API URL %q: %v\", match[1], err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tapic <- u\n\t\t\t}\n\t\t\tif match := adminServerAddrRE.FindSubmatch(s.Bytes()); match != nil {\n\t\t\t\tadminc <- string(match[1])\n\t\t\t}\n\t\t}\n\t\tif err = s.Err(); err != nil {\n\t\t\terrc <- err\n\t\t}\n\t}()\n\n\tfor i.apiURL == nil || i.adminURL == \"\" {\n\t\tselect {\n\t\tcase i.apiURL = <-apic:\n\t\tcase i.adminURL = <-adminc:\n\t\tcase <-time.After(15 * time.Second):\n\t\t\tif p := i.child.Process; p != nil {\n\t\t\t\tp.Kill()\n\t\t\t}\n\t\t\treturn errors.New(\"timeout starting child process\")\n\t\tcase err := <-errc:\n\t\t\treturn fmt.Errorf(\"error reading child process stderr: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (i *instance) appYAML() string {\n\treturn fmt.Sprintf(appYAMLTemplate, i.appID)\n}\n\nconst appYAMLTemplate = `\napplication: %s\nversion: 1\nruntime: go\napi_version: go1\nvm: true\n\nhandlers:\n- url: \/.*\n script: _go_app\n`\n\nconst appSource = `\npackage main\nimport \"google.golang.org\/appengine\"\nfunc main() { appengine.Main() }\n`\n<|endoftext|>"} {"text":"package agent\n\nimport (\n\t\"encoding\/json\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/coreos\/go-etcd\/etcd\"\n)\n\nconst KeyPrefix = \"\/discover\"\n\ntype EtcdBackend struct {\n\tClient *etcd.Client\n}\n\nfunc servicePath(name, addr string) string {\n\tif addr == \"\" {\n\t\treturn KeyPrefix + \"\/services\/\" + name\n\t}\n\treturn KeyPrefix + \"\/services\/\" + name + \"\/\" + addr\n}\n\nfunc (b *EtcdBackend) Subscribe(name string) (UpdateStream, error) {\n\tstream := &etcdStream{ch: make(chan *ServiceUpdate), stop: make(chan bool)}\n\twatch := b.getStateChanges(name, stream.stop)\n\tresponse, _ := b.getCurrentState(name)\n\tgo func() {\n\t\tif response != nil {\n\t\t\tfor _, n := range response.Node.Nodes {\n\t\t\t\tif update := b.responseToUpdate(response, &n); update != nil {\n\t\t\t\t\tstream.ch <- update\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tstream.ch <- &ServiceUpdate{}\n\t\tfor resp := range watch {\n\t\t\tif update := b.responseToUpdate(resp, resp.Node); update != nil {\n\t\t\t\tstream.ch <- update\n\t\t\t}\n\t\t}\n\t}()\n\treturn stream, nil\n}\n\ntype etcdStream struct {\n\tch chan *ServiceUpdate\n\tstop chan bool\n\tstopOnce sync.Once\n}\n\nfunc (s *etcdStream) Chan() chan *ServiceUpdate { return s.ch }\n\nfunc (s *etcdStream) Close() { s.stopOnce.Do(func() { close(s.stop) }) }\n\nfunc (b *EtcdBackend) responseToUpdate(resp *etcd.Response, node *etcd.Node) *ServiceUpdate {\n\t\/\/ expected key structure: \/PREFIX\/services\/NAME\/ADDR\n\tsplitKey := strings.SplitN(node.Key, \"\/\", 5)\n\tif len(splitKey) < 5 {\n\t\treturn nil\n\t}\n\tserviceName := splitKey[3]\n\tserviceAddr := splitKey[4]\n\t\/\/if \"get\" == resp.Action || (\"set\" == resp.Action && node.Value != node.PrevValue) {\n\t\/\/ TODO: the above is broken right now. until then what happens if we don't ignore heartbeats?\n\t\/\/ should be resolved soon: http:\/\/thread.gmane.org\/gmane.comp.distributed.etcd\/56\n\tif \"get\" == resp.Action || \"set\" == resp.Action {\n\t\t\/\/ GET is because getCurrentState returns responses of Action GET.\n\t\t\/\/ some SETs are heartbeats, so we ignore SETs where value didn't change.\n\t\tvar serviceAttrs map[string]string\n\t\terr := json.Unmarshal([]byte(node.Value), &serviceAttrs)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &ServiceUpdate{\n\t\t\tName: serviceName,\n\t\t\tAddr: serviceAddr,\n\t\t\tOnline: true,\n\t\t\tAttrs: serviceAttrs,\n\t\t\tCreated: uint(node.CreatedIndex),\n\t\t}\n\t} else if \"delete\" == resp.Action || \"expire\" == resp.Action {\n\t\treturn &ServiceUpdate{\n\t\t\tName: serviceName,\n\t\t\tAddr: serviceAddr,\n\t\t}\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (b *EtcdBackend) getCurrentState(name string) (*etcd.Response, error) {\n\treturn b.Client.Get(servicePath(name, \"\"), false, true)\n}\n\nfunc (b *EtcdBackend) getStateChanges(name string, stop chan bool) chan *etcd.Response {\n\twatch := make(chan *etcd.Response)\n\tgo b.Client.Watch(servicePath(name, \"\"), 0, true, watch, stop)\n\treturn watch\n}\n\nfunc (b *EtcdBackend) Register(name, addr string, attrs map[string]string) error {\n\tattrsJson, err := json.Marshal(attrs)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = b.Client.Set(servicePath(name, addr), string(attrsJson), HeartbeatIntervalSecs+MissedHearbeatTTL)\n\treturn err\n}\n\nfunc (b *EtcdBackend) Heartbeat(name, addr string) error {\n\tresp, err := b.Client.Get(servicePath(name, addr), false, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ ignore test failure, it doesn't need a heartbeat if it was just set.\n\t_, err = b.Client.CompareAndSwap(servicePath(name, addr), resp.Node.Value, HeartbeatIntervalSecs+MissedHearbeatTTL, resp.Node.Value, resp.Node.ModifiedIndex)\n\treturn err\n}\n\nfunc (b *EtcdBackend) Unregister(name, addr string) error {\n\t_, err := b.Client.Delete(servicePath(name, addr), false)\n\treturn err\n}\ndiscoverd: Restore logic to ignore heartbeatspackage agent\n\nimport (\n\t\"encoding\/json\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/coreos\/go-etcd\/etcd\"\n)\n\nconst KeyPrefix = \"\/discover\"\n\ntype EtcdBackend struct {\n\tClient *etcd.Client\n}\n\nfunc servicePath(name, addr string) string {\n\tif addr == \"\" {\n\t\treturn KeyPrefix + \"\/services\/\" + name\n\t}\n\treturn KeyPrefix + \"\/services\/\" + name + \"\/\" + addr\n}\n\nfunc (b *EtcdBackend) Subscribe(name string) (UpdateStream, error) {\n\tstream := &etcdStream{ch: make(chan *ServiceUpdate), stop: make(chan bool)}\n\twatch := b.getStateChanges(name, stream.stop)\n\tresponse, _ := b.getCurrentState(name)\n\tgo func() {\n\t\tif response != nil {\n\t\t\tfor _, n := range response.Node.Nodes {\n\t\t\t\tif update := b.responseToUpdate(response, &n); update != nil {\n\t\t\t\t\tstream.ch <- update\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tstream.ch <- &ServiceUpdate{}\n\t\tfor resp := range watch {\n\t\t\tif update := b.responseToUpdate(resp, resp.Node); update != nil {\n\t\t\t\tstream.ch <- update\n\t\t\t}\n\t\t}\n\t}()\n\treturn stream, nil\n}\n\ntype etcdStream struct {\n\tch chan *ServiceUpdate\n\tstop chan bool\n\tstopOnce sync.Once\n}\n\nfunc (s *etcdStream) Chan() chan *ServiceUpdate { return s.ch }\n\nfunc (s *etcdStream) Close() { s.stopOnce.Do(func() { close(s.stop) }) }\n\nfunc (b *EtcdBackend) responseToUpdate(resp *etcd.Response, node *etcd.Node) *ServiceUpdate {\n\t\/\/ expected key structure: \/PREFIX\/services\/NAME\/ADDR\n\tsplitKey := strings.SplitN(node.Key, \"\/\", 5)\n\tif len(splitKey) < 5 {\n\t\treturn nil\n\t}\n\tserviceName := splitKey[3]\n\tserviceAddr := splitKey[4]\n\tif \"get\" == resp.Action || \"set\" == resp.Action && (resp.PrevNode == nil || node.Value != resp.PrevNode.Value) {\n\t\t\/\/ GET is because getCurrentState returns responses of Action GET.\n\t\t\/\/ some SETs are heartbeats, so we ignore SETs where value didn't change.\n\t\tvar serviceAttrs map[string]string\n\t\terr := json.Unmarshal([]byte(node.Value), &serviceAttrs)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &ServiceUpdate{\n\t\t\tName: serviceName,\n\t\t\tAddr: serviceAddr,\n\t\t\tOnline: true,\n\t\t\tAttrs: serviceAttrs,\n\t\t\tCreated: uint(node.CreatedIndex),\n\t\t}\n\t} else if \"delete\" == resp.Action || \"expire\" == resp.Action {\n\t\treturn &ServiceUpdate{\n\t\t\tName: serviceName,\n\t\t\tAddr: serviceAddr,\n\t\t}\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (b *EtcdBackend) getCurrentState(name string) (*etcd.Response, error) {\n\treturn b.Client.Get(servicePath(name, \"\"), false, true)\n}\n\nfunc (b *EtcdBackend) getStateChanges(name string, stop chan bool) chan *etcd.Response {\n\twatch := make(chan *etcd.Response)\n\tgo b.Client.Watch(servicePath(name, \"\"), 0, true, watch, stop)\n\treturn watch\n}\n\nfunc (b *EtcdBackend) Register(name, addr string, attrs map[string]string) error {\n\tattrsJson, err := json.Marshal(attrs)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = b.Client.Set(servicePath(name, addr), string(attrsJson), HeartbeatIntervalSecs+MissedHearbeatTTL)\n\treturn err\n}\n\nfunc (b *EtcdBackend) Heartbeat(name, addr string) error {\n\tresp, err := b.Client.Get(servicePath(name, addr), false, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ ignore test failure, it doesn't need a heartbeat if it was just set.\n\t_, err = b.Client.CompareAndSwap(servicePath(name, addr), resp.Node.Value, HeartbeatIntervalSecs+MissedHearbeatTTL, resp.Node.Value, resp.Node.ModifiedIndex)\n\treturn err\n}\n\nfunc (b *EtcdBackend) Unregister(name, addr string) error {\n\t_, err := b.Client.Delete(servicePath(name, addr), false)\n\treturn err\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"exec\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n)\n\n\/\/ run is a simple wrapper for exec.Run\/Close\nfunc run(envv []string, dir string, argv ...string) os.Error {\n\tif *verbose {\n\t\tlog.Println(\"run\", argv)\n\t}\n\tif runtime.GOOS == \"windows\" && isBash(argv[0]) {\n\t\t\/\/ shell script cannot be executed directly on Windows.\n\t\targv = append([]string{\"bash\", \"-c\"}, argv...)\n\t}\n\tbin, err := lookPath(argv[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\tp, err := exec.Run(bin, argv, envv, dir,\n\t\texec.DevNull, exec.DevNull, exec.PassThrough)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn p.Close()\n}\n\n\/\/ runLog runs a process and returns the combined stdout\/stderr, \n\/\/ as well as writing it to logfile (if specified).\nfunc runLog(envv []string, logfile, dir string, argv ...string) (output string, exitStatus int, err os.Error) {\n\tif *verbose {\n\t\tlog.Println(\"runLog\", argv)\n\t}\n\tif runtime.GOOS == \"windows\" && isBash(argv[0]) {\n\t\t\/\/ shell script cannot be executed directly on Windows.\n\t\targv = append([]string{\"bash\", \"-c\"}, argv...)\n\t}\n\tbin, err := lookPath(argv[0])\n\tif err != nil {\n\t\treturn\n\t}\n\tp, err := exec.Run(bin, argv, envv, dir,\n\t\texec.DevNull, exec.Pipe, exec.MergeWithStdout)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer p.Close()\n\tb := new(bytes.Buffer)\n\tvar w io.Writer = b\n\tif logfile != \"\" {\n\t\tf, err := os.OpenFile(logfile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer f.Close()\n\t\tw = io.MultiWriter(f, b)\n\t}\n\t_, err = io.Copy(w, p.Stdout)\n\tif err != nil {\n\t\treturn\n\t}\n\twait, err := p.Wait(0)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn b.String(), wait.WaitStatus.ExitStatus(), nil\n}\n\n\/\/ lookPath looks for cmd in $PATH if cmd does not begin with \/ or .\/ or ..\/.\nfunc lookPath(cmd string) (string, os.Error) {\n\tif strings.HasPrefix(cmd, \"\/\") || strings.HasPrefix(cmd, \".\/\") || strings.HasPrefix(cmd, \"..\/\") {\n\t\treturn cmd, nil\n\t}\n\treturn exec.LookPath(cmd)\n}\n\n\/\/ isBash determines if name refers to a shell script.\nfunc isBash(name string) bool {\n\t\/\/ TODO(brainman): perhaps it is too simple and needs better check.\n\treturn strings.HasSuffix(name, \".bash\")\n}\ngobuilder: remove some windows-specificity\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"exec\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/ run is a simple wrapper for exec.Run\/Close\nfunc run(envv []string, dir string, argv ...string) os.Error {\n\tif *verbose {\n\t\tlog.Println(\"run\", argv)\n\t}\n\targv = useBash(argv)\n\tbin, err := lookPath(argv[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\tp, err := exec.Run(bin, argv, envv, dir,\n\t\texec.DevNull, exec.DevNull, exec.PassThrough)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn p.Close()\n}\n\n\/\/ runLog runs a process and returns the combined stdout\/stderr, \n\/\/ as well as writing it to logfile (if specified).\nfunc runLog(envv []string, logfile, dir string, argv ...string) (output string, exitStatus int, err os.Error) {\n\tif *verbose {\n\t\tlog.Println(\"runLog\", argv)\n\t}\n\targv = useBash(argv)\n\tbin, err := lookPath(argv[0])\n\tif err != nil {\n\t\treturn\n\t}\n\tp, err := exec.Run(bin, argv, envv, dir,\n\t\texec.DevNull, exec.Pipe, exec.MergeWithStdout)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer p.Close()\n\tb := new(bytes.Buffer)\n\tvar w io.Writer = b\n\tif logfile != \"\" {\n\t\tf, err := os.OpenFile(logfile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer f.Close()\n\t\tw = io.MultiWriter(f, b)\n\t}\n\t_, err = io.Copy(w, p.Stdout)\n\tif err != nil {\n\t\treturn\n\t}\n\twait, err := p.Wait(0)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn b.String(), wait.WaitStatus.ExitStatus(), nil\n}\n\n\/\/ lookPath looks for cmd in $PATH if cmd does not begin with \/ or .\/ or ..\/.\nfunc lookPath(cmd string) (string, os.Error) {\n\tif strings.HasPrefix(cmd, \"\/\") || strings.HasPrefix(cmd, \".\/\") || strings.HasPrefix(cmd, \"..\/\") {\n\t\treturn cmd, nil\n\t}\n\treturn exec.LookPath(cmd)\n}\n\n\/\/ useBash prefixes a list of args with 'bash' if the first argument\n\/\/ is a bash script.\nfunc useBash(argv []string) []string {\n\t\/\/ TODO(brainman): choose a more reliable heuristic here.\n\tif strings.HasSuffix(argv[0], \".bash\") {\n\t\targv = append([]string{\"bash\"}, argv...)\n\t}\n\treturn argv\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"container\/vector\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tcodeProject = \"go\"\n\tcodePyScript = \"misc\/dashboard\/googlecode_upload.py\"\n\thgUrl = \"https:\/\/go.googlecode.com\/hg\/\"\n\twaitInterval = 10e9 \/\/ time to wait before checking for new revs\n\tmkdirPerm = 0750\n)\n\ntype Builder struct {\n\tname string\n\tgoos, goarch string\n\tkey string\n\tcodeUsername string\n\tcodePassword string\n}\n\ntype BenchRequest struct {\n\tbuilder *Builder\n\tcommit Commit\n\tpath string\n}\n\nvar (\n\tdashboard = flag.String(\"dashboard\", \"godashboard.appspot.com\", \"Go Dashboard Host\")\n\trunBenchmarks = flag.Bool(\"bench\", false, \"Run benchmarks\")\n\tbuildRelease = flag.Bool(\"release\", false, \"Build and upload binary release archives\")\n\tbuildRevision = flag.String(\"rev\", \"\", \"Build specified revision and exit\")\n\tbuildCmd = flag.String(\"cmd\", \".\/all.bash\", \"Build command (specify absolute or relative to go\/src\/)\")\n)\n\nvar (\n\tbuildroot = path.Join(os.TempDir(), \"gobuilder\")\n\tgoroot = path.Join(buildroot, \"goroot\")\n\treleaseRegexp = regexp.MustCompile(`^release\\.[0-9\\-]+`)\n\tbenchRequests vector.Vector\n)\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"usage: %s goos-goarch...\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t\tos.Exit(2)\n\t}\n\tflag.Parse()\n\tif len(flag.Args()) == 0 {\n\t\tflag.Usage()\n\t}\n\tbuilders := make([]*Builder, len(flag.Args()))\n\tfor i, builder := range flag.Args() {\n\t\tb, err := NewBuilder(builder)\n\t\tif err != nil {\n\t\t\tlog.Exit(err)\n\t\t}\n\t\tbuilders[i] = b\n\t}\n\tif err := os.RemoveAll(buildroot); err != nil {\n\t\tlog.Exitf(\"Error removing build root (%s): %s\", buildroot, err)\n\t}\n\tif err := os.Mkdir(buildroot, mkdirPerm); err != nil {\n\t\tlog.Exitf(\"Error making build root (%s): %s\", buildroot, err)\n\t}\n\tif err := run(nil, buildroot, \"hg\", \"clone\", hgUrl, goroot); err != nil {\n\t\tlog.Exit(\"Error cloning repository:\", err)\n\t}\n\t\/\/ if specified, build revision and return\n\tif *buildRevision != \"\" {\n\t\tc, err := getCommit(*buildRevision)\n\t\tif err != nil {\n\t\t\tlog.Exit(\"Error finding revision: \", err)\n\t\t}\n\t\tfor _, b := range builders {\n\t\t\tif err := b.buildCommit(c); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t\trunQueuedBenchmark()\n\t\t}\n\t\treturn\n\t}\n\t\/\/ check for new commits and build them\n\tfor {\n\t\terr := run(nil, goroot, \"hg\", \"pull\", \"-u\")\n\t\tif err != nil {\n\t\t\tlog.Println(\"hg pull failed:\", err)\n\t\t\ttime.Sleep(waitInterval)\n\t\t\tcontinue\n\t\t}\n\t\tbuilt := false\n\t\tfor _, b := range builders {\n\t\t\tif b.build() {\n\t\t\t\tbuilt = true\n\t\t\t}\n\t\t}\n\t\t\/\/ only run benchmarks if we didn't build anything\n\t\t\/\/ so that they don't hold up the builder queue\n\t\tif !built {\n\t\t\tif !runQueuedBenchmark() {\n\t\t\t\t\/\/ if we have no benchmarks to do, pause\n\t\t\t\ttime.Sleep(waitInterval)\n\t\t\t}\n\t\t\t\/\/ after running one benchmark, \n\t\t\t\/\/ continue to find and build new revisions.\n\t\t}\n\t}\n}\n\nfunc runQueuedBenchmark() bool {\n\tif benchRequests.Len() == 0 {\n\t\treturn false\n\t}\n\trunBenchmark(benchRequests.Pop().(BenchRequest))\n\treturn true\n}\n\nfunc runBenchmark(r BenchRequest) {\n\t\/\/ run benchmarks and send to dashboard\n\tlog.Println(r.builder.name, \"benchmarking\", r.commit.num)\n\tdefer os.RemoveAll(r.path)\n\tpkg := path.Join(r.path, \"go\", \"src\", \"pkg\")\n\tbin := path.Join(r.path, \"go\", \"bin\")\n\tenv := []string{\n\t\t\"GOOS=\" + r.builder.goos,\n\t\t\"GOARCH=\" + r.builder.goarch,\n\t\t\"PATH=\" + bin + \":\" + os.Getenv(\"PATH\"),\n\t}\n\tlogfile := path.Join(r.path, \"bench.log\")\n\tbenchLog, _, err := runLog(env, logfile, pkg, \"gomake\", \"bench\")\n\tif err != nil {\n\t\tlog.Println(r.builder.name, \"gomake bench:\", err)\n\t\treturn\n\t}\n\tif err = r.builder.recordBenchmarks(benchLog, r.commit); err != nil {\n\t\tlog.Println(\"recordBenchmarks:\", err)\n\t}\n}\n\nfunc NewBuilder(builder string) (*Builder, os.Error) {\n\tb := &Builder{name: builder}\n\n\t\/\/ get goos\/goarch from builder string\n\ts := strings.Split(builder, \"-\", 3)\n\tif len(s) == 2 {\n\t\tb.goos, b.goarch = s[0], s[1]\n\t} else {\n\t\treturn nil, fmt.Errorf(\"unsupported builder form: %s\", builder)\n\t}\n\n\t\/\/ read keys from keyfile\n\tfn := path.Join(os.Getenv(\"HOME\"), \".gobuildkey\")\n\tif s := fn + \"-\" + b.name; isFile(s) { \/\/ builder-specific file\n\t\tfn = s\n\t}\n\tc, err := ioutil.ReadFile(fn)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"readKeys %s (%s): %s\", b.name, fn, err)\n\t}\n\tv := strings.Split(string(c), \"\\n\", -1)\n\tb.key = v[0]\n\tif len(v) >= 3 {\n\t\tb.codeUsername, b.codePassword = v[1], v[2]\n\t}\n\n\treturn b, nil\n}\n\n\/\/ build checks for a new commit for this builder\n\/\/ and builds it if one is found. \n\/\/ It returns true if a build was attempted.\nfunc (b *Builder) build() bool {\n\tdefer func() {\n\t\terr := recover()\n\t\tif err != nil {\n\t\t\tlog.Println(b.name, \"build:\", err)\n\t\t}\n\t}()\n\tc, err := b.nextCommit()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn false\n\t}\n\tif c == nil {\n\t\treturn false\n\t}\n\terr = b.buildCommit(*c)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\treturn true\n}\n\n\/\/ nextCommit returns the next unbuilt Commit for this builder\nfunc (b *Builder) nextCommit() (nextC *Commit, err os.Error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"%s nextCommit: %s\", b.name, err)\n\t\t}\n\t}()\n\thw, err := b.getHighWater()\n\tif err != nil {\n\t\treturn\n\t}\n\tc, err := getCommit(hw)\n\tif err != nil {\n\t\treturn\n\t}\n\tnext := c.num + 1\n\tc, err = getCommit(strconv.Itoa(next))\n\tif err == nil && c.num == next {\n\t\treturn &c, nil\n\t}\n\treturn nil, nil\n}\n\nfunc (b *Builder) buildCommit(c Commit) (err os.Error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"%s buildCommit: %d: %s\", b.name, c.num, err)\n\t\t}\n\t}()\n\n\tlog.Println(b.name, \"building\", c.num)\n\n\t\/\/ create place in which to do work\n\tworkpath := path.Join(buildroot, b.name+\"-\"+strconv.Itoa(c.num))\n\terr = os.Mkdir(workpath, mkdirPerm)\n\tif err != nil {\n\t\treturn\n\t}\n\tbenchRequested := false\n\tdefer func() {\n\t\tif !benchRequested {\n\t\t\tos.RemoveAll(workpath)\n\t\t}\n\t}()\n\n\t\/\/ clone repo\n\terr = run(nil, workpath, \"hg\", \"clone\", goroot, \"go\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ update to specified revision\n\terr = run(nil, path.Join(workpath, \"go\"),\n\t\t\"hg\", \"update\", \"-r\", strconv.Itoa(c.num))\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ set up environment for build\/bench execution\n\tenv := []string{\n\t\t\"GOOS=\" + b.goos,\n\t\t\"GOARCH=\" + b.goarch,\n\t\t\"GOROOT_FINAL=\/usr\/local\/go\",\n\t\t\"PATH=\" + os.Getenv(\"PATH\"),\n\t}\n\tsrcDir := path.Join(workpath, \"go\", \"src\")\n\n\t\/\/ build\n\tlogfile := path.Join(workpath, \"build.log\")\n\tbuildLog, status, err := runLog(env, logfile, srcDir, *buildCmd)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"all.bash: %s\", err)\n\t}\n\tif status != 0 {\n\t\t\/\/ record failure\n\t\treturn b.recordResult(buildLog, c)\n\t}\n\n\t\/\/ record success\n\tif err = b.recordResult(\"\", c); err != nil {\n\t\treturn fmt.Errorf(\"recordResult: %s\", err)\n\t}\n\n\t\/\/ send benchmark request if benchmarks are enabled\n\tif *runBenchmarks {\n\t\tbenchRequests.Insert(0, BenchRequest{\n\t\t\tbuilder: b,\n\t\t\tcommit: c,\n\t\t\tpath: workpath,\n\t\t})\n\t\tbenchRequested = true\n\t}\n\n\t\/\/ finish here if codeUsername and codePassword aren't set\n\tif b.codeUsername == \"\" || b.codePassword == \"\" || !*buildRelease {\n\t\treturn\n\t}\n\n\t\/\/ if this is a release, create tgz and upload to google code\n\tif release := releaseRegexp.FindString(c.desc); release != \"\" {\n\t\t\/\/ clean out build state\n\t\terr = run(env, srcDir, \".\/clean.bash\", \"--nopkg\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"clean.bash: %s\", err)\n\t\t}\n\t\t\/\/ upload binary release\n\t\tfn := fmt.Sprintf(\"%s.%s-%s.tar.gz\", release, b.goos, b.goarch)\n\t\terr = run(nil, workpath, \"tar\", \"czf\", fn, \"go\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"tar: %s\", err)\n\t\t}\n\t\terr = run(nil, workpath, path.Join(goroot, codePyScript),\n\t\t\t\"-s\", release,\n\t\t\t\"-p\", codeProject,\n\t\t\t\"-u\", b.codeUsername,\n\t\t\t\"-w\", b.codePassword,\n\t\t\t\"-l\", fmt.Sprintf(\"%s,%s\", b.goos, b.goarch),\n\t\t\tfn)\n\t}\n\n\treturn\n}\n\nfunc isDirectory(name string) bool {\n\ts, err := os.Stat(name)\n\treturn err == nil && s.IsDirectory()\n}\n\nfunc isFile(name string) bool {\n\ts, err := os.Stat(name)\n\treturn err == nil && (s.IsRegular() || s.IsSymlink())\n}\nbuilder: pass GOHOSTOS and GOHOSTARCH to buildpackage main\n\nimport (\n\t\"container\/vector\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tcodeProject = \"go\"\n\tcodePyScript = \"misc\/dashboard\/googlecode_upload.py\"\n\thgUrl = \"https:\/\/go.googlecode.com\/hg\/\"\n\twaitInterval = 10e9 \/\/ time to wait before checking for new revs\n\tmkdirPerm = 0750\n)\n\ntype Builder struct {\n\tname string\n\tgoos, goarch string\n\tkey string\n\tcodeUsername string\n\tcodePassword string\n}\n\ntype BenchRequest struct {\n\tbuilder *Builder\n\tcommit Commit\n\tpath string\n}\n\nvar (\n\tdashboard = flag.String(\"dashboard\", \"godashboard.appspot.com\", \"Go Dashboard Host\")\n\trunBenchmarks = flag.Bool(\"bench\", false, \"Run benchmarks\")\n\tbuildRelease = flag.Bool(\"release\", false, \"Build and upload binary release archives\")\n\tbuildRevision = flag.String(\"rev\", \"\", \"Build specified revision and exit\")\n\tbuildCmd = flag.String(\"cmd\", \".\/all.bash\", \"Build command (specify absolute or relative to go\/src\/)\")\n)\n\nvar (\n\tbuildroot = path.Join(os.TempDir(), \"gobuilder\")\n\tgoroot = path.Join(buildroot, \"goroot\")\n\treleaseRegexp = regexp.MustCompile(`^release\\.[0-9\\-]+`)\n\tbenchRequests vector.Vector\n)\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"usage: %s goos-goarch...\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t\tos.Exit(2)\n\t}\n\tflag.Parse()\n\tif len(flag.Args()) == 0 {\n\t\tflag.Usage()\n\t}\n\tbuilders := make([]*Builder, len(flag.Args()))\n\tfor i, builder := range flag.Args() {\n\t\tb, err := NewBuilder(builder)\n\t\tif err != nil {\n\t\t\tlog.Exit(err)\n\t\t}\n\t\tbuilders[i] = b\n\t}\n\tif err := os.RemoveAll(buildroot); err != nil {\n\t\tlog.Exitf(\"Error removing build root (%s): %s\", buildroot, err)\n\t}\n\tif err := os.Mkdir(buildroot, mkdirPerm); err != nil {\n\t\tlog.Exitf(\"Error making build root (%s): %s\", buildroot, err)\n\t}\n\tif err := run(nil, buildroot, \"hg\", \"clone\", hgUrl, goroot); err != nil {\n\t\tlog.Exit(\"Error cloning repository:\", err)\n\t}\n\t\/\/ if specified, build revision and return\n\tif *buildRevision != \"\" {\n\t\tc, err := getCommit(*buildRevision)\n\t\tif err != nil {\n\t\t\tlog.Exit(\"Error finding revision: \", err)\n\t\t}\n\t\tfor _, b := range builders {\n\t\t\tif err := b.buildCommit(c); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t\trunQueuedBenchmark()\n\t\t}\n\t\treturn\n\t}\n\t\/\/ check for new commits and build them\n\tfor {\n\t\terr := run(nil, goroot, \"hg\", \"pull\", \"-u\")\n\t\tif err != nil {\n\t\t\tlog.Println(\"hg pull failed:\", err)\n\t\t\ttime.Sleep(waitInterval)\n\t\t\tcontinue\n\t\t}\n\t\tbuilt := false\n\t\tfor _, b := range builders {\n\t\t\tif b.build() {\n\t\t\t\tbuilt = true\n\t\t\t}\n\t\t}\n\t\t\/\/ only run benchmarks if we didn't build anything\n\t\t\/\/ so that they don't hold up the builder queue\n\t\tif !built {\n\t\t\tif !runQueuedBenchmark() {\n\t\t\t\t\/\/ if we have no benchmarks to do, pause\n\t\t\t\ttime.Sleep(waitInterval)\n\t\t\t}\n\t\t\t\/\/ after running one benchmark, \n\t\t\t\/\/ continue to find and build new revisions.\n\t\t}\n\t}\n}\n\nfunc runQueuedBenchmark() bool {\n\tif benchRequests.Len() == 0 {\n\t\treturn false\n\t}\n\trunBenchmark(benchRequests.Pop().(BenchRequest))\n\treturn true\n}\n\nfunc runBenchmark(r BenchRequest) {\n\t\/\/ run benchmarks and send to dashboard\n\tlog.Println(r.builder.name, \"benchmarking\", r.commit.num)\n\tdefer os.RemoveAll(r.path)\n\tpkg := path.Join(r.path, \"go\", \"src\", \"pkg\")\n\tbin := path.Join(r.path, \"go\", \"bin\")\n\tenv := []string{\n\t\t\"GOOS=\" + r.builder.goos,\n\t\t\"GOARCH=\" + r.builder.goarch,\n\t\t\"PATH=\" + bin + \":\" + os.Getenv(\"PATH\"),\n\t}\n\tlogfile := path.Join(r.path, \"bench.log\")\n\tbenchLog, _, err := runLog(env, logfile, pkg, \"gomake\", \"bench\")\n\tif err != nil {\n\t\tlog.Println(r.builder.name, \"gomake bench:\", err)\n\t\treturn\n\t}\n\tif err = r.builder.recordBenchmarks(benchLog, r.commit); err != nil {\n\t\tlog.Println(\"recordBenchmarks:\", err)\n\t}\n}\n\nfunc NewBuilder(builder string) (*Builder, os.Error) {\n\tb := &Builder{name: builder}\n\n\t\/\/ get goos\/goarch from builder string\n\ts := strings.Split(builder, \"-\", 3)\n\tif len(s) == 2 {\n\t\tb.goos, b.goarch = s[0], s[1]\n\t} else {\n\t\treturn nil, fmt.Errorf(\"unsupported builder form: %s\", builder)\n\t}\n\n\t\/\/ read keys from keyfile\n\tfn := path.Join(os.Getenv(\"HOME\"), \".gobuildkey\")\n\tif s := fn + \"-\" + b.name; isFile(s) { \/\/ builder-specific file\n\t\tfn = s\n\t}\n\tc, err := ioutil.ReadFile(fn)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"readKeys %s (%s): %s\", b.name, fn, err)\n\t}\n\tv := strings.Split(string(c), \"\\n\", -1)\n\tb.key = v[0]\n\tif len(v) >= 3 {\n\t\tb.codeUsername, b.codePassword = v[1], v[2]\n\t}\n\n\treturn b, nil\n}\n\n\/\/ build checks for a new commit for this builder\n\/\/ and builds it if one is found. \n\/\/ It returns true if a build was attempted.\nfunc (b *Builder) build() bool {\n\tdefer func() {\n\t\terr := recover()\n\t\tif err != nil {\n\t\t\tlog.Println(b.name, \"build:\", err)\n\t\t}\n\t}()\n\tc, err := b.nextCommit()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn false\n\t}\n\tif c == nil {\n\t\treturn false\n\t}\n\terr = b.buildCommit(*c)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\treturn true\n}\n\n\/\/ nextCommit returns the next unbuilt Commit for this builder\nfunc (b *Builder) nextCommit() (nextC *Commit, err os.Error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"%s nextCommit: %s\", b.name, err)\n\t\t}\n\t}()\n\thw, err := b.getHighWater()\n\tif err != nil {\n\t\treturn\n\t}\n\tc, err := getCommit(hw)\n\tif err != nil {\n\t\treturn\n\t}\n\tnext := c.num + 1\n\tc, err = getCommit(strconv.Itoa(next))\n\tif err == nil && c.num == next {\n\t\treturn &c, nil\n\t}\n\treturn nil, nil\n}\n\nfunc (b *Builder) buildCommit(c Commit) (err os.Error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"%s buildCommit: %d: %s\", b.name, c.num, err)\n\t\t}\n\t}()\n\n\tlog.Println(b.name, \"building\", c.num)\n\n\t\/\/ create place in which to do work\n\tworkpath := path.Join(buildroot, b.name+\"-\"+strconv.Itoa(c.num))\n\terr = os.Mkdir(workpath, mkdirPerm)\n\tif err != nil {\n\t\treturn\n\t}\n\tbenchRequested := false\n\tdefer func() {\n\t\tif !benchRequested {\n\t\t\tos.RemoveAll(workpath)\n\t\t}\n\t}()\n\n\t\/\/ clone repo\n\terr = run(nil, workpath, \"hg\", \"clone\", goroot, \"go\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ update to specified revision\n\terr = run(nil, path.Join(workpath, \"go\"),\n\t\t\"hg\", \"update\", \"-r\", strconv.Itoa(c.num))\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ set up environment for build\/bench execution\n\tenv := []string{\n\t\t\"GOOS=\" + b.goos,\n\t\t\"GOARCH=\" + b.goarch,\n\t\t\"GOHOSTOS=\" + os.Getenv(\"GOHOSTOS\"),\n\t\t\"GOHOSTARCH=\" + os.Getenv(\"GOHOSTARCH\"),\n\t\t\"GOROOT_FINAL=\/usr\/local\/go\",\n\t\t\"PATH=\" + os.Getenv(\"PATH\"),\n\t}\n\tsrcDir := path.Join(workpath, \"go\", \"src\")\n\n\t\/\/ build\n\tlogfile := path.Join(workpath, \"build.log\")\n\tbuildLog, status, err := runLog(env, logfile, srcDir, *buildCmd)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"all.bash: %s\", err)\n\t}\n\tif status != 0 {\n\t\t\/\/ record failure\n\t\treturn b.recordResult(buildLog, c)\n\t}\n\n\t\/\/ record success\n\tif err = b.recordResult(\"\", c); err != nil {\n\t\treturn fmt.Errorf(\"recordResult: %s\", err)\n\t}\n\n\t\/\/ send benchmark request if benchmarks are enabled\n\tif *runBenchmarks {\n\t\tbenchRequests.Insert(0, BenchRequest{\n\t\t\tbuilder: b,\n\t\t\tcommit: c,\n\t\t\tpath: workpath,\n\t\t})\n\t\tbenchRequested = true\n\t}\n\n\t\/\/ finish here if codeUsername and codePassword aren't set\n\tif b.codeUsername == \"\" || b.codePassword == \"\" || !*buildRelease {\n\t\treturn\n\t}\n\n\t\/\/ if this is a release, create tgz and upload to google code\n\tif release := releaseRegexp.FindString(c.desc); release != \"\" {\n\t\t\/\/ clean out build state\n\t\terr = run(env, srcDir, \".\/clean.bash\", \"--nopkg\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"clean.bash: %s\", err)\n\t\t}\n\t\t\/\/ upload binary release\n\t\tfn := fmt.Sprintf(\"%s.%s-%s.tar.gz\", release, b.goos, b.goarch)\n\t\terr = run(nil, workpath, \"tar\", \"czf\", fn, \"go\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"tar: %s\", err)\n\t\t}\n\t\terr = run(nil, workpath, path.Join(goroot, codePyScript),\n\t\t\t\"-s\", release,\n\t\t\t\"-p\", codeProject,\n\t\t\t\"-u\", b.codeUsername,\n\t\t\t\"-w\", b.codePassword,\n\t\t\t\"-l\", fmt.Sprintf(\"%s,%s\", b.goos, b.goarch),\n\t\t\tfn)\n\t}\n\n\treturn\n}\n\nfunc isDirectory(name string) bool {\n\ts, err := os.Stat(name)\n\treturn err == nil && s.IsDirectory()\n}\n\nfunc isFile(name string) bool {\n\ts, err := os.Stat(name)\n\treturn err == nil && (s.IsRegular() || s.IsSymlink())\n}\n<|endoftext|>"} {"text":"\npackage crypto\nimport (\n\t\"testing\"\n\t\"fmt\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestEncrypt(t *testing.T) {\n\tauthKey := []byte(\"545e0716f2fea0c7a9c46c74fec46c71\")\n\texpected := map[string]interface{}{\"entity\": \"something2\"}\n\tres, _ := Encrypt(expected, authKey, authKey)\n\tr, _ := Decrypt(res, authKey, authKey)\n\tassert.Equal(t, expected, r)\n}\n\nfunc TestDecrypt(t *testing.T) {\n\tauthKey := []byte(\"545e0716f2fea0c7a9c46c74fec46c71\")\n\tinput := \"WmtlMxl4d_VTfUYnl-A0Uycpr2e3VswKDwoPd03XtoY=.JZMk6FZloYh5BL0K7dHGSyTqB4lTgd9annrFEgLTELnxR3bHweL2\"\n\texpected := map[string]interface{}{\"entity\": \"something2\"}\n\tr, _ := Decrypt(input, authKey, authKey)\n\tassert.Equal(t, expected, r)\n\t\n}\nRemove fmt import since no more use, need to add git hooks :-\/\npackage crypto\nimport (\n\t\"testing\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestEncrypt(t *testing.T) {\n\tauthKey := []byte(\"545e0716f2fea0c7a9c46c74fec46c71\")\n\texpected := map[string]interface{}{\"entity\": \"something2\"}\n\tres, _ := Encrypt(expected, authKey, authKey)\n\tr, _ := Decrypt(res, authKey, authKey)\n\tassert.Equal(t, expected, r)\n}\n\nfunc TestDecrypt(t *testing.T) {\n\tauthKey := []byte(\"545e0716f2fea0c7a9c46c74fec46c71\")\n\tinput := \"WmtlMxl4d_VTfUYnl-A0Uycpr2e3VswKDwoPd03XtoY=.JZMk6FZloYh5BL0K7dHGSyTqB4lTgd9annrFEgLTELnxR3bHweL2\"\n\texpected := map[string]interface{}{\"entity\": \"something2\"}\n\tr, _ := Decrypt(input, authKey, authKey)\n\tassert.Equal(t, expected, r)\n\t\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"testing\"\n)\n\nfunc TestParseMaintainer(t *testing.T) {\n\tcases := []struct {\n\t\tin []byte\n\t\twant *Maintainer\n\t}{\n\t\t{[]byte(\"JohnDoe\"), &Maintainer{Name: \"JohnDoe\"}},\n\t\t{[]byte(\"John Doe\"), &Maintainer{Name: \"John Doe\"}},\n\t\t{[]byte(\" John Doe\"), &Maintainer{Name: \"John Doe\"}},\n\t\t{[]byte(\"John Doe \"), &Maintainer{Name: \"John Doe\"}},\n\t}\n\tfor _, c := range cases {\n\t\tgot, _ := ParseMaintainer(c.in)\n\t\tif got != c.want {\n\t\t\tt.Errorf(\"ParseMaintainer(%q) == %v, want %v\", c.in, got, c.want)\n\t\t}\n\t}\n}\nFix a test not to compare structs directlypackage main\n\nimport (\n\t\"testing\"\n)\n\nfunc TestParseMaintainer(t *testing.T) {\n\tcases := []struct {\n\t\tin []byte\n\t\twant *Maintainer\n\t}{\n\t\t{[]byte(\"JohnDoe\"), &Maintainer{Name: \"JohnDoe\"}},\n\t\t{[]byte(\"John Doe\"), &Maintainer{Name: \"John Doe\"}},\n\t\t{[]byte(\" John Doe\"), &Maintainer{Name: \"John Doe\"}},\n\t\t{[]byte(\"John Doe \"), &Maintainer{Name: \"John Doe\"}},\n\t}\n\tfor _, c := range cases {\n\t\tgot, _ := ParseMaintainer(c.in)\n\t\tif got.Name != c.want.Name {\n\t\t\tt.Errorf(\n\t\t\t\t\"ParseMaintainer(%q).Name == %q, want %q\",\n\t\t\t\tc.in,\n\t\t\t\tgot.Name,\n\t\t\t\tc.want.Name,\n\t\t\t)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage updater\n\nimport (\n\t\"time\"\n\n\t\"github.com\/keybase\/client\/go\/logger\"\n\tkeybase1 \"github.com\/keybase\/client\/go\/protocol\"\n\t\"github.com\/keybase\/client\/go\/updater\/sources\"\n)\n\ntype UpdateChecker struct {\n\tupdater *Updater\n\tui UI\n\tticker *time.Ticker\n\tlog logger.Logger\n}\n\nfunc NewUpdateChecker(updater *Updater, ui UI, log logger.Logger) UpdateChecker {\n\treturn UpdateChecker{\n\t\tupdater: updater,\n\t\tui: ui,\n\t\tlog: log,\n\t}\n}\n\nfunc (u *UpdateChecker) Check(force bool, requested bool) error {\n\tif !requested && !force {\n\t\tif lastCheckedPTime := u.updater.config.GetUpdateLastChecked(); lastCheckedPTime > 0 {\n\t\t\tlastChecked := keybase1.FromTime(lastCheckedPTime)\n\t\t\tif time.Now().Before(lastChecked.Add(checkDuration())) {\n\t\t\t\tu.log.Debug(\"Already checked: %s\", lastChecked)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\t_, err := u.updater.Update(u.ui, force, requested)\n\tif err != nil {\n\t\treturn err\n\t}\n\tu.log.Debug(\"Updater checked\")\n\tu.updater.config.SetUpdateLastChecked(keybase1.ToTime(time.Now()))\n\treturn nil\n}\n\nfunc (u *UpdateChecker) Start() {\n\tif u.ticker != nil {\n\t\treturn\n\t}\n\tu.ticker = time.NewTicker(tickDuration())\n\tgo func() {\n\t\tfor _ = range u.ticker.C {\n\t\t\tu.log.Debug(\"Checking for update (ticker)\")\n\t\t\tu.Check(false, false)\n\t\t}\n\t}()\n}\n\nfunc (u *UpdateChecker) Stop() {\n\tu.ticker.Stop()\n\tu.ticker = nil\n}\n\n\/\/ checkDuration is how often to check for updates\nfunc checkDuration() time.Duration {\n\tif sources.IsPrerelease {\n\t\treturn time.Hour\n\t}\n\treturn 24 * time.Hour\n}\n\n\/\/ tickDuration is how often to call check (should be less than checkDuration or snooze min)\nfunc tickDuration() time.Duration {\n\tif sources.IsPrerelease {\n\t\treturn 15 * time.Minute\n\t}\n\treturn time.Hour\n}\nRetry update check if took a long time\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage updater\n\nimport (\n\t\"time\"\n\n\t\"github.com\/keybase\/client\/go\/logger\"\n\tkeybase1 \"github.com\/keybase\/client\/go\/protocol\"\n\t\"github.com\/keybase\/client\/go\/updater\/sources\"\n)\n\ntype UpdateChecker struct {\n\tupdater *Updater\n\tui UI\n\tticker *time.Ticker\n\tlog logger.Logger\n}\n\nfunc NewUpdateChecker(updater *Updater, ui UI, log logger.Logger) UpdateChecker {\n\treturn UpdateChecker{\n\t\tupdater: updater,\n\t\tui: ui,\n\t\tlog: log,\n\t}\n}\n\nfunc (u *UpdateChecker) check(force bool, requested bool) error {\n\tupdateStart := time.Now()\n\t_, err := u.updater.Update(u.ui, force, requested)\n\tupdateEnd := time.Now()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Because updates might have been single-flighted, an update could have been\n\t\/\/ made available while the update prompt was showing. For example, if the\n\t\/\/ update prompt showed at 3am on Saturday morning and the person came back\n\t\/\/ Monday, they could have missed an update that happened in the meantime.\n\t\/\/ A quick fix for this situation is to run the updater check again if the\n\t\/\/ update took along time.\n\tif updateEnd.Sub(updateStart) > time.Minute*15 {\n\t\tu.log.Info(\"Update took a long time to run, so let's check again\")\n\t\treturn u.check(force, requested)\n\t}\n\treturn nil\n}\n\n\/\/ Check checks for an update. If not requested (by user) or forced it will\n\/\/ exit early if check has already been applied within checkDuration().\nfunc (u *UpdateChecker) Check(force bool, requested bool) error {\n\tif !requested && !force {\n\t\tif lastCheckedPTime := u.updater.config.GetUpdateLastChecked(); lastCheckedPTime > 0 {\n\t\t\tlastChecked := keybase1.FromTime(lastCheckedPTime)\n\t\t\tif time.Now().Before(lastChecked.Add(checkDuration())) {\n\t\t\t\tu.log.Debug(\"Already checked: %s\", lastChecked)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\tcheckTime := time.Now()\n\t\/\/ This may block awhile if the updater prompt is left shown for a long time\n\terr := u.check(force, requested)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tu.log.Debug(\"Saving updater last checked: %s\", checkTime)\n\tu.updater.config.SetUpdateLastChecked(keybase1.ToTime(checkTime))\n\treturn nil\n}\n\nfunc (u *UpdateChecker) Start() {\n\tif u.ticker != nil {\n\t\treturn\n\t}\n\tu.ticker = time.NewTicker(tickDuration())\n\tgo func() {\n\t\tfor _ = range u.ticker.C {\n\t\t\tu.log.Debug(\"Checking for update (ticker)\")\n\t\t\terr := u.Check(false, false)\n\t\t\tif err != nil {\n\t\t\t\tu.log.Errorf(\"Error in update: %s\", err)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (u *UpdateChecker) Stop() {\n\tu.ticker.Stop()\n\tu.ticker = nil\n}\n\n\/\/ checkDuration is how often to check for updates\nfunc checkDuration() time.Duration {\n\tif sources.IsPrerelease {\n\t\treturn time.Hour\n\t}\n\treturn 24 * time.Hour\n}\n\n\/\/ tickDuration is how often to call check (should be less than checkDuration or snooze min)\nfunc tickDuration() time.Duration {\n\tif sources.IsPrerelease {\n\t\treturn 15 * time.Minute\n\t}\n\treturn time.Hour\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/asamy45\/steam\"\n)\n\nfunc main() {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\n\ttimeTip, err := steam.GetTimeTip()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"Time tip: %#v\\n\", timeTip)\n\n\ttimeDiff := time.Duration(timeTip.Time - time.Now().Unix())\n\tsession := steam.Session{}\n\tif err := session.Login(os.Getenv(\"steamAccount\"), os.Getenv(\"steamPassword\"), os.Getenv(\"steamSharedSecret\"), timeDiff); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Print(\"Login successful\")\n\n\tprofileURL, err := session.GetProfileURL()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"Profile URL: %s\\n\", profileURL)\n\n\tprofileSetting := uint8( \/*Profile*\/ steam.PrivacyStatePublic | \/*Inventory*\/ steam.PrivacyStatePublic<<2 | \/*Gifts*\/ steam.PrivacyStatePublic<<4)\n\tif err = session.SetProfilePrivacy(profileURL, steam.CommentSettingSelf, profileSetting); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"Done editing profile: %d\", profileSetting)\n\n\tprofileInfo := map[string][]string{\n\t\t\"personaName\": {\"MasterOfTests\"},\n\t\t\"summary\": {\"i am just a test, go away\"},\n\t\t\"customURL\": {\"therealtesterOFDOOM\"},\n\t}\n\tif err = session.SetProfileInfo(profileURL, &profileInfo); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Print(\"Done editing profile info\")\n\n\tmyToken, err := session.GetMyTradeToken()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"Trade offer token: %s\\n\", myToken)\n\n\tkey, err := session.GetWebAPIKey()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Print(\"Key: \", key)\n\n\tsid := steam.SteamID(76561198078821986)\n\tinven, err := session.GetInventory(sid, 730, 2, false)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, item := range inven {\n\t\tlog.Printf(\"Item: %s = %d\\n\", item.Desc.MarketHashName, item.AssetID)\n\t}\n\n\tmarketPrices, err := session.GetMarketItemPriceHistory(730, \"P90 | Asiimov (Factory New)\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, v := range marketPrices {\n\t\tlog.Printf(\"%s -> %.2f (%s of same price)\\n\", v.Date, v.Price, v.Count)\n\t}\n\n\toverview, err := session.GetMarketItemPriceOverview(730, \"DE\", \"3\", \"P90 | Asiimov (Factory New)\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif overview.Success {\n\t\tlog.Println(\"Price overfiew for P90 Asiimov FN:\")\n\t\tlog.Printf(\"Volume: %s\\n\", overview.Volume)\n\t\tlog.Printf(\"Lowest price: %s Median Price: %s\", overview.LowestPrice, overview.MedianPrice)\n\t}\n\n\tresp, err := session.GetTradeOffers(steam.TradeFilterSentOffers, time.Now())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar receiptID uint64\n\tfor _, offer := range resp.SentOffers {\n\t\tvar sid steam.SteamID\n\t\tsid.Parse(offer.Partner, steam.AccountInstanceDesktop, steam.AccountTypeIndividual, steam.UniversePublic)\n\n\t\tif receiptID == 0 && len(offer.ReceiveItems) != 0 && offer.State == steam.TradeStateAccepted {\n\t\t\treceiptID = offer.ReceiptID\n\t\t}\n\n\t\tlog.Printf(\"Offer id: %d, Receipt ID: %d\", offer.ID, offer.ReceiptID)\n\t\tlog.Printf(\"Offer partner SteamID 64: %d\", uint64(sid))\n\t}\n\n\titems, err := session.GetTradeReceivedItems(receiptID)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, item := range items {\n\t\tlog.Printf(\"New asset id: %d\", item.AssetID)\n\t}\n\n\tidentity := os.Getenv(\"steamIdentitySecret\")\n\tconfirmations, err := session.GetConfirmations(identity, time.Now().Add(timeDiff).Unix())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor i := range confirmations {\n\t\tc := confirmations[i]\n\t\tlog.Printf(\"Confirmation ID: %d, Key: %d\\n\", c.ID, c.Key)\n\t\tlog.Printf(\"-> Title %s\\n\", c.Title)\n\t\tlog.Printf(\"-> Receiving %s\\n\", c.Receiving)\n\t\tlog.Printf(\"-> Since %s\\n\", c.Since)\n\n\t\ttid, err := session.GetConfirmationOfferID(key, c.ID, time.Now().Add(timeDiff).Unix())\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Printf(\"-> OfferID %d\\n\", tid)\n\n\t\terr = session.AnswerConfirmation(c, key, \"allow\", time.Now().Add(timeDiff).Unix())\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tlog.Printf(\"Accepted %d\\n\", c.ID)\n\t}\n\n\tlog.Println(\"Bye!\")\n}\ntypopackage main\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/asamy45\/steam\"\n)\n\nfunc main() {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\n\ttimeTip, err := steam.GetTimeTip()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"Time tip: %#v\\n\", timeTip)\n\n\ttimeDiff := time.Duration(timeTip.Time - time.Now().Unix())\n\tsession := steam.Session{}\n\tif err := session.Login(os.Getenv(\"steamAccount\"), os.Getenv(\"steamPassword\"), os.Getenv(\"steamSharedSecret\"), timeDiff); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Print(\"Login successful\")\n\n\tprofileURL, err := session.GetProfileURL()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"Profile URL: %s\\n\", profileURL)\n\n\tprofileSetting := uint8( \/*Profile*\/ steam.PrivacyStatePublic | \/*Inventory*\/ steam.PrivacyStatePublic<<2 | \/*Gifts*\/ steam.PrivacyStatePublic<<4)\n\tif err = session.SetProfilePrivacy(profileURL, steam.CommentSettingSelf, profileSetting); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"Done editing profile: %d\", profileSetting)\n\n\tprofileInfo := map[string][]string{\n\t\t\"personaName\": {\"MasterOfTests\"},\n\t\t\"summary\": {\"i am just a test, go away\"},\n\t\t\"customURL\": {\"therealtesterOFDOOM\"},\n\t}\n\tif err = session.SetProfileInfo(profileURL, &profileInfo); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Print(\"Done editing profile info\")\n\n\tmyToken, err := session.GetMyTradeToken()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"Trade offer token: %s\\n\", myToken)\n\n\tkey, err := session.GetWebAPIKey()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Print(\"Key: \", key)\n\n\tsid := steam.SteamID(76561198078821986)\n\tinven, err := session.GetInventory(sid, 730, 2, false)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, item := range inven {\n\t\tlog.Printf(\"Item: %s = %d\\n\", item.Desc.MarketHashName, item.AssetID)\n\t}\n\n\tmarketPrices, err := session.GetMarketItemPriceHistory(730, \"P90 | Asiimov (Factory New)\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, v := range marketPrices {\n\t\tlog.Printf(\"%s -> %.2f (%s of same price)\\n\", v.Date, v.Price, v.Count)\n\t}\n\n\toverview, err := session.GetMarketItemPriceOverview(730, \"DE\", \"3\", \"P90 | Asiimov (Factory New)\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif overview.Success {\n\t\tlog.Println(\"Price overfiew for P90 Asiimov FN:\")\n\t\tlog.Printf(\"Volume: %s\\n\", overview.Volume)\n\t\tlog.Printf(\"Lowest price: %s Median Price: %s\", overview.LowestPrice, overview.MedianPrice)\n\t}\n\n\tresp, err := session.GetTradeOffers(steam.TradeFilterSentOffers, time.Now())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar receiptID uint64\n\tfor _, offer := range resp.SentOffers {\n\t\tvar sid steam.SteamID\n\t\tsid.Parse(offer.Partner, steam.AccountInstanceDesktop, steam.AccountTypeIndividual, steam.UniversePublic)\n\n\t\tif receiptID == 0 && len(offer.RecvItems) != 0 && offer.State == steam.TradeStateAccepted {\n\t\t\treceiptID = offer.ReceiptID\n\t\t}\n\n\t\tlog.Printf(\"Offer id: %d, Receipt ID: %d\", offer.ID, offer.ReceiptID)\n\t\tlog.Printf(\"Offer partner SteamID 64: %d\", uint64(sid))\n\t}\n\n\titems, err := session.GetTradeReceivedItems(receiptID)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, item := range items {\n\t\tlog.Printf(\"New asset id: %d\", item.AssetID)\n\t}\n\n\tidentity := os.Getenv(\"steamIdentitySecret\")\n\tconfirmations, err := session.GetConfirmations(identity, time.Now().Add(timeDiff).Unix())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor i := range confirmations {\n\t\tc := confirmations[i]\n\t\tlog.Printf(\"Confirmation ID: %d, Key: %d\\n\", c.ID, c.Key)\n\t\tlog.Printf(\"-> Title %s\\n\", c.Title)\n\t\tlog.Printf(\"-> Receiving %s\\n\", c.Receiving)\n\t\tlog.Printf(\"-> Since %s\\n\", c.Since)\n\n\t\ttid, err := session.GetConfirmationOfferID(key, c.ID, time.Now().Add(timeDiff).Unix())\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Printf(\"-> OfferID %d\\n\", tid)\n\n\t\terr = session.AnswerConfirmation(c, key, \"allow\", time.Now().Add(timeDiff).Unix())\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tlog.Printf(\"Accepted %d\\n\", c.ID)\n\t}\n\n\tlog.Println(\"Bye!\")\n}\n<|endoftext|>"} {"text":"package runnerpool\n\nimport (\n\t\"context\"\n\t\"math\"\n\t\"time\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"go.opencensus.io\/stats\"\n\t\"go.opencensus.io\/stats\/view\"\n\t\"go.opencensus.io\/tag\"\n)\n\nvar (\n\tattemptCountMeasure = stats.Int64(\"lb_placer_attempt_count\", \"LB Placer Number of Runners Attempted Count\", \"\")\n\terrorPoolCountMeasure = stats.Int64(\"lb_placer_rp_error_count\", \"LB Placer RunnerPool RunnerList Error Count\", \"\")\n\temptyPoolCountMeasure = stats.Int64(\"lb_placer_rp_empty_count\", \"LB Placer RunnerPool RunnerList Empty Count\", \"\")\n\tcancelCountMeasure = stats.Int64(\"lb_placer_client_cancelled_count\", \"LB Placer Client Cancel Count\", \"\")\n\tplacedErrorCountMeasure = stats.Int64(\"lb_placer_placed_error_count\", \"LB Placer Placed Call Count With Errors\", \"\")\n\tplacedOKCountMeasure = stats.Int64(\"lb_placer_placed_ok_count\", \"LB Placer Placed Call Count Without Errors\", \"\")\n\tretryTooBusyCountMeasure = stats.Int64(\"lb_placer_retry_busy_count\", \"LB Placer Retry Count - Too Busy\", \"\")\n\tretryErrorCountMeasure = stats.Int64(\"lb_placer_retry_error_count\", \"LB Placer Retry Count - Errors\", \"\")\n\tplacerLatencyMeasure = stats.Int64(\"lb_placer_latency\", \"LB Placer Latency\", \"msecs\")\n)\n\n\/\/ Helper struct for tracking LB Placer latency and attempt counts\ntype attemptTracker struct {\n\tctx context.Context\n\tstartTime time.Time\n\tlastAttemptTime time.Time\n\tattemptCount int64\n}\n\nfunc newAttemptTracker(ctx context.Context) *attemptTracker {\n\treturn &attemptTracker{\n\t\tctx: ctx,\n\t\tstartTime: time.Now(),\n\t}\n}\n\nfunc (data *attemptTracker) finalizeAttempts(isSuccess bool) {\n\tstats.Record(data.ctx, attemptCountMeasure.M(data.attemptCount))\n\n\t\/\/ IMPORTANT: here we use (lastAttemptTime - startTime). We want to exclude TryExec\n\t\/\/ latency *if* TryExec() goes through with success. Placer latency metric only shows\n\t\/\/ how much time are spending in Placer loop\/retries. The metric includes rtt\/latency of\n\t\/\/ *all* unsuccessful NACK (retriable) responses from runners as well. For example, if\n\t\/\/ Placer loop here retries 4 runners (which takes 5 msecs each) and then 5th runner\n\t\/\/ succeeds (but takes 35 seconds to finish execution), we report 20 msecs as our LB\n\t\/\/ latency.\n\tendTime := data.lastAttemptTime\n\tif !isSuccess {\n\t\tendTime = time.Now()\n\t}\n\n\tstats.Record(data.ctx, placerLatencyMeasure.M(int64(endTime.Sub(data.startTime)\/time.Millisecond)))\n}\n\nfunc (data *attemptTracker) recordAttempt() {\n\tdata.lastAttemptTime = time.Now()\n\tif data.attemptCount != math.MaxInt64 {\n\t\tdata.attemptCount++\n\t}\n}\n\nfunc makeKeys(names []string) []tag.Key {\n\tvar tagKeys []tag.Key\n\tfor _, name := range names {\n\t\tkey, err := tag.NewKey(name)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Fatal(\"cannot create tag key for %v\", name)\n\t\t}\n\t\ttagKeys = append(tagKeys, key)\n\t}\n\treturn tagKeys\n}\n\nfunc createView(measure stats.Measure, agg *view.Aggregation, tagKeys []string) *view.View {\n\treturn &view.View{\n\t\tName: measure.Name(),\n\t\tDescription: measure.Description(),\n\t\tTagKeys: makeKeys(tagKeys),\n\t\tMeasure: measure,\n\t\tAggregation: agg,\n\t}\n}\n\nfunc RegisterPlacerViews(tagKeys []string) {\n\terr := view.Register(\n\t\tcreateView(attemptCountMeasure, view.Distribution(0, 1, 2, 4, 8, 32, 64, 256), tagKeys),\n\t\tcreateView(errorPoolCountMeasure, view.Count(), tagKeys),\n\t\tcreateView(emptyPoolCountMeasure, view.Count(), tagKeys),\n\t\tcreateView(cancelCountMeasure, view.Count(), tagKeys),\n\t\tcreateView(placedErrorCountMeasure, view.Count(), tagKeys),\n\t\tcreateView(placedOKCountMeasure, view.Count(), tagKeys),\n\t\tcreateView(retryTooBusyCountMeasure, view.Count(), tagKeys),\n\t\tcreateView(retryErrorCountMeasure, view.Count(), tagKeys),\n\t\tcreateView(placerLatencyMeasure, view.Distribution(1, 10, 25, 50, 200, 1000, 10000, 60000), tagKeys),\n\t)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"cannot create view\")\n\t}\n}\nAdd a finer-grained view for placer latency metrics (#1085)package runnerpool\n\nimport (\n\t\"context\"\n\t\"math\"\n\t\"time\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"go.opencensus.io\/stats\"\n\t\"go.opencensus.io\/stats\/view\"\n\t\"go.opencensus.io\/tag\"\n)\n\nvar (\n\tattemptCountMeasure = stats.Int64(\"lb_placer_attempt_count\", \"LB Placer Number of Runners Attempted Count\", \"\")\n\terrorPoolCountMeasure = stats.Int64(\"lb_placer_rp_error_count\", \"LB Placer RunnerPool RunnerList Error Count\", \"\")\n\temptyPoolCountMeasure = stats.Int64(\"lb_placer_rp_empty_count\", \"LB Placer RunnerPool RunnerList Empty Count\", \"\")\n\tcancelCountMeasure = stats.Int64(\"lb_placer_client_cancelled_count\", \"LB Placer Client Cancel Count\", \"\")\n\tplacedErrorCountMeasure = stats.Int64(\"lb_placer_placed_error_count\", \"LB Placer Placed Call Count With Errors\", \"\")\n\tplacedOKCountMeasure = stats.Int64(\"lb_placer_placed_ok_count\", \"LB Placer Placed Call Count Without Errors\", \"\")\n\tretryTooBusyCountMeasure = stats.Int64(\"lb_placer_retry_busy_count\", \"LB Placer Retry Count - Too Busy\", \"\")\n\tretryErrorCountMeasure = stats.Int64(\"lb_placer_retry_error_count\", \"LB Placer Retry Count - Errors\", \"\")\n\tplacerLatencyMeasure = stats.Int64(\"lb_placer_latency\", \"LB Placer Latency\", \"msecs\")\n)\n\n\/\/ Helper struct for tracking LB Placer latency and attempt counts\ntype attemptTracker struct {\n\tctx context.Context\n\tstartTime time.Time\n\tlastAttemptTime time.Time\n\tattemptCount int64\n}\n\nfunc newAttemptTracker(ctx context.Context) *attemptTracker {\n\treturn &attemptTracker{\n\t\tctx: ctx,\n\t\tstartTime: time.Now(),\n\t}\n}\n\nfunc (data *attemptTracker) finalizeAttempts(isSuccess bool) {\n\tstats.Record(data.ctx, attemptCountMeasure.M(data.attemptCount))\n\n\t\/\/ IMPORTANT: here we use (lastAttemptTime - startTime). We want to exclude TryExec\n\t\/\/ latency *if* TryExec() goes through with success. Placer latency metric only shows\n\t\/\/ how much time are spending in Placer loop\/retries. The metric includes rtt\/latency of\n\t\/\/ *all* unsuccessful NACK (retriable) responses from runners as well. For example, if\n\t\/\/ Placer loop here retries 4 runners (which takes 5 msecs each) and then 5th runner\n\t\/\/ succeeds (but takes 35 seconds to finish execution), we report 20 msecs as our LB\n\t\/\/ latency.\n\tendTime := data.lastAttemptTime\n\tif !isSuccess {\n\t\tendTime = time.Now()\n\t}\n\n\tstats.Record(data.ctx, placerLatencyMeasure.M(int64(endTime.Sub(data.startTime)\/time.Millisecond)))\n}\n\nfunc (data *attemptTracker) recordAttempt() {\n\tdata.lastAttemptTime = time.Now()\n\tif data.attemptCount != math.MaxInt64 {\n\t\tdata.attemptCount++\n\t}\n}\n\nfunc makeKeys(names []string) []tag.Key {\n\tvar tagKeys []tag.Key\n\tfor _, name := range names {\n\t\tkey, err := tag.NewKey(name)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Fatal(\"cannot create tag key for %v\", name)\n\t\t}\n\t\ttagKeys = append(tagKeys, key)\n\t}\n\treturn tagKeys\n}\n\nfunc createView(measure stats.Measure, agg *view.Aggregation, tagKeys []string) *view.View {\n\treturn &view.View{\n\t\tName: measure.Name(),\n\t\tDescription: measure.Description(),\n\t\tTagKeys: makeKeys(tagKeys),\n\t\tMeasure: measure,\n\t\tAggregation: agg,\n\t}\n}\n\nfunc RegisterPlacerViews(tagKeys []string) {\n\terr := view.Register(\n\t\tcreateView(attemptCountMeasure, view.Distribution(0, 1, 2, 4, 8, 32, 64, 256), tagKeys),\n\t\tcreateView(errorPoolCountMeasure, view.Count(), tagKeys),\n\t\tcreateView(emptyPoolCountMeasure, view.Count(), tagKeys),\n\t\tcreateView(cancelCountMeasure, view.Count(), tagKeys),\n\t\tcreateView(placedErrorCountMeasure, view.Count(), tagKeys),\n\t\tcreateView(placedOKCountMeasure, view.Count(), tagKeys),\n\t\tcreateView(retryTooBusyCountMeasure, view.Count(), tagKeys),\n\t\tcreateView(retryErrorCountMeasure, view.Count(), tagKeys),\n\t\tcreateView(placerLatencyMeasure, view.Distribution(1, 10, 25, 50, 200, 1000, 1500, 2000, 2500, 3000, 10000, 60000), tagKeys),\n\t)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"cannot create view\")\n\t}\n}\n<|endoftext|>"} {"text":"package utils\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"syscall\"\n\n\t\"github.com\/zenoss\/glog\"\n\tdocker \"github.com\/zenoss\/go-dockerclient\"\n)\n\n\/\/ connection to the docker client\nvar dockerClient *docker.Client\n\n\/\/ Opens a connection to docker if not already connected\nfunc connectDocker() (*docker.Client, error) {\n\tif dockerClient == nil {\n\t\tconst DockerEndpoint string = \"unix:\/\/\/var\/run\/docker.sock\"\n\t\tvar err error\n\t\tif dockerClient, err = docker.NewClient(DockerEndpoint); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not create a client to docker: %s\", err)\n\t\t}\n\t}\n\treturn dockerClient, nil\n}\n\n\/\/ getPIDFromDockerID returns the pid of a docker container\nfunc getPIDFromDockerID(containerID string) (string, error) {\n\t\/\/ retrieve host PID from containerID\n\tdockerClient, err := connectDocker()\n\tif err != nil {\n\t\tglog.Errorf(\"could not attach to docker client error:%v\\n\\n\", err)\n\t\treturn \"\", err\n\t}\n\tcontainer, err := dockerClient.InspectContainer(containerID)\n\tif err != nil {\n\t\tglog.Errorf(\"could not inspect container error:%v\\n\\n\", err)\n\t\treturn \"\", err\n\t}\n\n\tpid := fmt.Sprintf(\"%d\", container.State.Pid)\n\treturn pid, nil\n}\n\n\/\/ ExecNSEnter execs the command using nsenter\nfunc ExecNSEnter(containerID string, bashcmd []string) error {\n\tcommand, err := generateNSEnterCommand(containerID, bashcmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tglog.V(1).Infof(\"exec command for container:%v command: %v\\n\", containerID, command)\n\treturn syscall.Exec(command[0], command[0:], os.Environ())\n}\n\n\/\/ RunNSEnter runs the command using nsenter\nfunc RunNSEnter(containerID string, bashcmd []string) ([]byte, error) {\n\tcommand, err := generateNSEnterCommand(containerID, bashcmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tthecmd := exec.Command(command[0], command[1:]...)\n\toutput, err := thecmd.CombinedOutput()\n\tif err != nil {\n\t\tglog.Errorf(\"Error running command:'%s' output: %s error: %s\\n\", command, output, err)\n\t\treturn output, err\n\t}\n\tglog.V(1).Infof(\"Successfully ran command:'%s' output: %s\\n\", command, output)\n\treturn output, nil\n}\n\n\/\/ generateNSEnterCommand returns a slice containing nsenter command to exec\nfunc generateNSEnterCommand(containerID string, bashcmd []string) ([]string, error) {\n\tif containerID == \"\" {\n\t\treturn []string{}, fmt.Errorf(\"will not attach to container with empty containerID\")\n\t}\n\n\texeMap, err := exePaths([]string{\"sudo\", \"nsenter\"})\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\n\tpid, err := getPIDFromDockerID(containerID)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\n\tattachCmd := []string{exeMap[\"sudo\"], exeMap[\"nsenter\"], \"-m\", \"-u\", \"-i\", \"-n\", \"-p\", \"-t\", pid, \"--\"}\n\tattachCmd = append(attachCmd, bashcmd...)\n\tglog.V(1).Infof(\"attach command for container:%v command: %v\\n\", containerID, attachCmd)\n\treturn attachCmd, nil\n}\n\n\/\/ AttachAndRun attaches to a container and runs the command\nfunc AttachAndRun(containerID string, bashcmd []string) ([]byte, error) {\n\t_, err := exec.LookPath(\"nsenter\")\n\tif err != nil {\n\t\treturn RunNSInitWithRetry(containerID, bashcmd)\n\t}\n\n\treturn RunNSEnter(containerID, bashcmd)\n}\n\n\/\/ AttachAndExec attaches to a container and execs the command\nfunc AttachAndExec(containerID string, bashcmd []string) error {\n\t_, err := exec.LookPath(\"nsenter\")\n\tif err != nil {\n\t\treturn ExecNSInitWithRetry(containerID, bashcmd)\n\t}\n\n\treturn ExecNSEnter(containerID, bashcmd)\n}prepend bash -c for RunNSEnter() - leave ExecNSEnter alonepackage utils\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/zenoss\/glog\"\n\tdocker \"github.com\/zenoss\/go-dockerclient\"\n)\n\n\/\/ connection to the docker client\nvar dockerClient *docker.Client\n\n\/\/ Opens a connection to docker if not already connected\nfunc connectDocker() (*docker.Client, error) {\n\tif dockerClient == nil {\n\t\tconst DockerEndpoint string = \"unix:\/\/\/var\/run\/docker.sock\"\n\t\tvar err error\n\t\tif dockerClient, err = docker.NewClient(DockerEndpoint); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not create a client to docker: %s\", err)\n\t\t}\n\t}\n\treturn dockerClient, nil\n}\n\n\/\/ getPIDFromDockerID returns the pid of a docker container\nfunc getPIDFromDockerID(containerID string) (string, error) {\n\t\/\/ retrieve host PID from containerID\n\tdockerClient, err := connectDocker()\n\tif err != nil {\n\t\tglog.Errorf(\"could not attach to docker client error:%v\\n\\n\", err)\n\t\treturn \"\", err\n\t}\n\tcontainer, err := dockerClient.InspectContainer(containerID)\n\tif err != nil {\n\t\tglog.Errorf(\"could not inspect container error:%v\\n\\n\", err)\n\t\treturn \"\", err\n\t}\n\n\tpid := fmt.Sprintf(\"%d\", container.State.Pid)\n\treturn pid, nil\n}\n\n\/\/ ExecNSEnter execs the command using nsenter\nfunc ExecNSEnter(containerID string, bashcmd []string) error {\n\tcommand, err := generateNSEnterCommand(containerID, bashcmd, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tglog.V(1).Infof(\"exec command for container:%v command: %v\\n\", containerID, command)\n\treturn syscall.Exec(command[0], command[0:], os.Environ())\n}\n\n\/\/ RunNSEnter runs the command using nsenter\nfunc RunNSEnter(containerID string, bashcmd []string) ([]byte, error) {\n\tcommand, err := generateNSEnterCommand(containerID, bashcmd, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tthecmd := exec.Command(command[0], command[1:]...)\n\toutput, err := thecmd.CombinedOutput()\n\tif err != nil {\n\t\tglog.Errorf(\"Error running command:'%s' output: %s error: %s\\n\", command, output, err)\n\t\treturn output, err\n\t}\n\tglog.V(1).Infof(\"Successfully ran command:'%s' output: %s\\n\", command, output)\n\treturn output, nil\n}\n\n\/\/ generateNSEnterCommand returns a slice containing nsenter command to exec\nfunc generateNSEnterCommand(containerID string, bashcmd []string, prependBash bool) ([]string, error) {\n\tif containerID == \"\" {\n\t\treturn []string{}, fmt.Errorf(\"will not attach to container with empty containerID\")\n\t}\n\n\texeMap, err := exePaths([]string{\"sudo\", \"nsenter\"})\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\n\tpid, err := getPIDFromDockerID(containerID)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\n\tattachCmd := []string{exeMap[\"sudo\"], exeMap[\"nsenter\"], \"-m\", \"-u\", \"-i\", \"-n\", \"-p\", \"-t\", pid, \"--\"}\n\tif prependBash {\n\t\tattachCmd = append(attachCmd, \"\/bin\/bash\", \"-c\", fmt.Sprintf(\"%s\", strings.Join(bashcmd, \" \")))\n\t} else {\n\t\tattachCmd = append(attachCmd, bashcmd...)\n\t}\n\tglog.V(1).Infof(\"attach command for container:%v command: %v\\n\", containerID, attachCmd)\n\treturn attachCmd, nil\n}\n\n\/\/ AttachAndRun attaches to a container and runs the command\nfunc AttachAndRun(containerID string, bashcmd []string) ([]byte, error) {\n\t_, err := exec.LookPath(\"nsenter\")\n\tif err != nil {\n\t\treturn RunNSInitWithRetry(containerID, bashcmd)\n\t}\n\n\treturn RunNSEnter(containerID, bashcmd)\n}\n\n\/\/ AttachAndExec attaches to a container and execs the command\nfunc AttachAndExec(containerID string, bashcmd []string) error {\n\t_, err := exec.LookPath(\"nsenter\")\n\tif err != nil {\n\t\treturn ExecNSInitWithRetry(containerID, bashcmd)\n\t}\n\n\treturn ExecNSEnter(containerID, bashcmd)\n}<|endoftext|>"} {"text":"package utils\n\nimport (\n \"runtime\"\n \"strings\"\n \"strconv\"\n)\n\n\/\/ Get the version of go.\nfunc GoVersion() string {\n return runtime.Version()\n}\n\n\/\/ Determine whether the version is 1.3+ to support syscall.FcntlFlock.\nfunc IsSupportFLock() bool {\n versions := strings.Split(GoVersion()[2:], \".\")\n\n majorVersion, _ := strconv.Atoi(versions[0])\n if (majorVersion > 1) {\n return true\n } else if (majorVersion == 1) {\n minorVersion, _ := strconv.Atoi(versions[1])\n if (minorVersion >= 3) {\n return true\n } else {\n return false\n }\n } else {\n return false\n }\n}\nFormat version.gopackage utils\n\nimport (\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ Get the version of go.\nfunc GoVersion() string {\n\treturn runtime.Version()\n}\n\n\/\/ Determine whether the version is 1.3+ to support syscall.FcntlFlock.\nfunc IsSupportFLock() bool {\n\tversions := strings.Split(GoVersion()[2:], \".\")\n\n\tmajorVersion, _ := strconv.Atoi(versions[0])\n\tif majorVersion > 1 {\n\t\treturn true\n\t} else if majorVersion == 1 {\n\t\tminorVersion, _ := strconv.Atoi(versions[1])\n\t\tif minorVersion >= 3 {\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\treturn false\n\t}\n}\n<|endoftext|>"} {"text":"package yaml_test\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/go-playground\/validator\/v10\"\n\t\"github.com\/goccy\/go-yaml\"\n)\n\nfunc TestStructValidator(t *testing.T) {\n\n\tcases := []struct {\n\t\tTestName string\n\t\tYAMLContent string\n\t\tExpectedErr string\n\t\tInstance interface{}\n\t}{\n\t\t{\n\t\t\tTestName: \"Test Simple Validation\",\n\t\t\tYAMLContent: `---\n- name: john\n age: 20\n- name: tom\n age: -1\n- name: ken\n age: 10`,\n\t\t\tExpectedErr: `[5:8] Key: 'Age' Error:Field validation for 'Age' failed on the 'gte' tag\n 2 | - name: john\n 3 | age: 20\n 4 | - name: tom\n> 5 | age: -1\n ^\n 6 | - name: ken\n 7 | age: 10`,\n\t\t\tInstance: &[]struct {\n\t\t\t\tName string `yaml:\"name\" validate:\"required\"`\n\t\t\t\tAge int `yaml:\"age\" validate:\"gte=0,lt=120\"`\n\t\t\t}{},\n\t\t},\n\t\t{\n\t\t\tTestName: \"Test Nested Validation Missing Internal Required\",\n\t\t\tYAMLContent: `---\nname: john\nage: 10\naddr:\n number: seven`,\n\t\t\tExpectedErr: `[4:5] Key: 'State' Error:Field validation for 'State' failed on the 'required' tag\n 1 | ---\n 2 | name: john\n 3 | age: 10\n> 4 | addr:\n ^\n 5 | number: seven`,\n\t\t\tInstance: &struct {\n\t\t\t\tName string `yaml:\"name\" validate:\"required\"`\n\t\t\t\tAge int `yaml:\"age\" validate:\"gte=0,lt=120\"`\n\t\t\t\tAddr struct {\n\t\t\t\t\tNumber string `yaml:\"number\" validate:\"required\"`\n\t\t\t\t\tState string `yaml:\"state\" validate:\"required\"`\n\t\t\t\t} `yaml:\"addr\"`\n\t\t\t}{},\n\t\t},\n\t\t{\n\t\t\tTestName: \"Test nested Validation with unknown field\",\n\t\t\tYAMLContent: `---\nname: john\nage: 20\naddr:\n number: seven\n state: washington\n error: error\n`,\n\t\t\tExpectedErr: `[7:3] unknown field \"error\"\n 4 | addr:\n 5 | number: seven\n 6 | state: washington\n> 7 | error: error\n ^\n`,\n\t\t\tInstance: &struct {\n\t\t\t\tName string `yaml:\"name\" validate:\"required\"`\n\t\t\t\tAge int `yaml:\"age\" validate:\"gte=0,lt=120\"`\n\t\t\t\tAddr *struct {\n\t\t\t\t\tNumber string `yaml:\"number\" validate:\"required\"`\n\t\t\t\t\tState string `yaml:\"state\" validate:\"required\"`\n\t\t\t\t} `yaml:\"addr\" validate:\"required\"`\n\t\t\t}{},\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\ttc := tc \/\/ NOTE: https:\/\/github.com\/golang\/go\/wiki\/CommonMistakes#using-goroutines-on-loop-iterator-variables\n\t\tt.Run(tc.TestName, func(t *testing.T) {\n\t\t\tvalidate := validator.New()\n\t\t\tdec := yaml.NewDecoder(\n\t\t\t\tstrings.NewReader(tc.YAMLContent),\n\t\t\t\tyaml.Validator(validate),\n\t\t\t\tyaml.Strict(),\n\t\t\t)\n\t\t\terr := dec.Decode(tc.Instance)\n\t\t\tswitch {\n\t\t\tcase tc.ExpectedErr != \"\" && err == nil:\n\t\t\t\tt.Fatal(\"expected error\")\n\t\t\tcase tc.ExpectedErr == \"\" && err != nil:\n\t\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t\tcase tc.ExpectedErr != \"\" && tc.ExpectedErr != err.Error():\n\t\t\t\tt.Fatalf(\"expected `%s` but actual `%s`\", tc.ExpectedErr, err.Error())\n\t\t\t}\n\t\t})\n\t}\n}\nAdded a test case with top level required fieldpackage yaml_test\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/go-playground\/validator\/v10\"\n\t\"github.com\/goccy\/go-yaml\"\n)\n\nfunc TestStructValidator(t *testing.T) {\n\n\tcases := []struct {\n\t\tTestName string\n\t\tYAMLContent string\n\t\tExpectedErr string\n\t\tInstance interface{}\n\t}{\n\t\t{\n\t\t\tTestName: \"Test Simple Validation\",\n\t\t\tYAMLContent: `---\n- name: john\n age: 20\n- name: tom\n age: -1\n- name: ken\n age: 10`,\n\t\t\tExpectedErr: `[5:8] Key: 'Age' Error:Field validation for 'Age' failed on the 'gte' tag\n 2 | - name: john\n 3 | age: 20\n 4 | - name: tom\n> 5 | age: -1\n ^\n 6 | - name: ken\n 7 | age: 10`,\n\t\t\tInstance: &[]struct {\n\t\t\t\tName string `yaml:\"name\" validate:\"required\"`\n\t\t\t\tAge int `yaml:\"age\" validate:\"gte=0,lt=120\"`\n\t\t\t}{},\n\t\t},\n\t\t{\n\t\t\tTestName: \"Test Missing Required Field\",\n\t\t\tYAMLContent: `---\n- name: john\n age: 20\n- age: 10`,\n\t\t\tExpectedErr: `[4:1] Key: 'Name' Error:Field validation for 'Name' failed on the 'required' tag\n 1 | ---\n 2 | - name: john\n 3 | age: 20\n> 4 | - age: 10\n ^\n`,\n\t\t\tInstance: &[]struct {\n\t\t\t\tName string `yaml:\"name\" validate:\"required\"`\n\t\t\t\tAge int `yaml:\"age\" validate:\"gte=0,lt=120\"`\n\t\t\t}{},\n\t\t},\n\t\t{\n\t\t\tTestName: \"Test Nested Validation Missing Internal Required\",\n\t\t\tYAMLContent: `---\nname: john\nage: 10\naddr:\n number: seven`,\n\t\t\tExpectedErr: `[4:5] Key: 'State' Error:Field validation for 'State' failed on the 'required' tag\n 1 | ---\n 2 | name: john\n 3 | age: 10\n> 4 | addr:\n ^\n 5 | number: seven`,\n\t\t\tInstance: &struct {\n\t\t\t\tName string `yaml:\"name\" validate:\"required\"`\n\t\t\t\tAge int `yaml:\"age\" validate:\"gte=0,lt=120\"`\n\t\t\t\tAddr struct {\n\t\t\t\t\tNumber string `yaml:\"number\" validate:\"required\"`\n\t\t\t\t\tState string `yaml:\"state\" validate:\"required\"`\n\t\t\t\t} `yaml:\"addr\"`\n\t\t\t}{},\n\t\t},\n\t\t{\n\t\t\tTestName: \"Test nested Validation with unknown field\",\n\t\t\tYAMLContent: `---\nname: john\nage: 20\naddr:\n number: seven\n state: washington\n error: error\n`,\n\t\t\tExpectedErr: `[7:3] unknown field \"error\"\n 4 | addr:\n 5 | number: seven\n 6 | state: washington\n> 7 | error: error\n ^\n`,\n\t\t\tInstance: &struct {\n\t\t\t\tName string `yaml:\"name\" validate:\"required\"`\n\t\t\t\tAge int `yaml:\"age\" validate:\"gte=0,lt=120\"`\n\t\t\t\tAddr *struct {\n\t\t\t\t\tNumber string `yaml:\"number\" validate:\"required\"`\n\t\t\t\t\tState string `yaml:\"state\" validate:\"required\"`\n\t\t\t\t} `yaml:\"addr\" validate:\"required\"`\n\t\t\t}{},\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\ttc := tc \/\/ NOTE: https:\/\/github.com\/golang\/go\/wiki\/CommonMistakes#using-goroutines-on-loop-iterator-variables\n\t\tt.Run(tc.TestName, func(t *testing.T) {\n\t\t\tvalidate := validator.New()\n\t\t\tdec := yaml.NewDecoder(\n\t\t\t\tstrings.NewReader(tc.YAMLContent),\n\t\t\t\tyaml.Validator(validate),\n\t\t\t\tyaml.Strict(),\n\t\t\t)\n\t\t\terr := dec.Decode(tc.Instance)\n\t\t\tswitch {\n\t\t\tcase tc.ExpectedErr != \"\" && err == nil:\n\t\t\t\tt.Fatal(\"expected error\")\n\t\t\tcase tc.ExpectedErr == \"\" && err != nil:\n\t\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t\tcase tc.ExpectedErr != \"\" && tc.ExpectedErr != err.Error():\n\t\t\t\tt.Fatalf(\"expected `%s` but actual `%s`\", tc.ExpectedErr, err.Error())\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"package luar\n\nimport (\n\t\"container\/list\"\n\t\"reflect\"\n\t\"sync\"\n\n\t\"github.com\/yuin\/gopher-lua\"\n)\n\nvar (\n\tmu sync.Mutex\n\tcache = map[reflect.Type]lua.LValue{}\n\ttypeCache = map[reflect.Type]lua.LValue{}\n)\n\nfunc addMethods(L *lua.LState, value reflect.Value, tbl *lua.LTable) {\n\tvtype := value.Type()\n\tfor i := 0; i < vtype.NumMethod(); i++ {\n\t\tmethod := vtype.Method(i)\n\t\tif method.PkgPath != \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tfn := New(L, method.Func.Interface())\n\t\ttbl.RawSetString(method.Name, fn)\n\t\ttbl.RawSetString(getUnexportedName(method.Name), fn)\n\t}\n}\n\nfunc addFields(L *lua.LState, value reflect.Value, tbl *lua.LTable) {\n\ttype element struct {\n\t\tType reflect.Type\n\t\tIndex []int\n\t}\n\n\tqueue := list.New()\n\tvtype := value.Type()\n\tqueue.PushFront(element{\n\t\tType: vtype,\n\t})\n\n\tfor queue.Len() > 0 {\n\t\te := queue.Back()\n\t\telem := e.Value.(element)\n\t\tvtype := elem.Type\n\t\tif vtype.Kind() == reflect.Ptr {\n\t\t\tvtype = vtype.Elem()\n\t\t}\n\tfields:\n\t\tfor i := 0; i < vtype.NumField(); i++ {\n\t\t\tfield := vtype.Field(i)\n\t\t\tif field.PkgPath != \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar names []string\n\t\t\ttag := field.Tag.Get(\"luar\")\n\t\t\tif tag == \"-\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif tag != \"\" {\n\t\t\t\tnames = []string{\n\t\t\t\t\ttag,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tnames = []string{\n\t\t\t\t\tfield.Name,\n\t\t\t\t\tgetUnexportedName(field.Name),\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, key := range names {\n\t\t\t\tif tbl.RawGetString(key) != lua.LNil {\n\t\t\t\t\tcontinue fields\n\t\t\t\t}\n\t\t\t}\n\t\t\tindex := make([]int, len(elem.Index)+1)\n\t\t\tcopy(index, elem.Index)\n\t\t\tindex[len(elem.Index)] = i\n\n\t\t\tud := L.NewUserData()\n\t\t\tud.Value = index\n\t\t\tfor _, key := range names {\n\t\t\t\ttbl.RawSetString(key, ud)\n\t\t\t}\n\t\t\tif field.Anonymous {\n\t\t\t\tindex := make([]int, len(elem.Index)+1)\n\t\t\t\tcopy(index, elem.Index)\n\t\t\t\tindex[len(elem.Index)] = i\n\t\t\t\tqueue.PushFront(element{\n\t\t\t\t\tType: field.Type,\n\t\t\t\t\tIndex: index,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tqueue.Remove(e)\n\t}\n}\n\nfunc getMetatable(L *lua.LState, value reflect.Value) lua.LValue {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\n\tvtype := value.Type()\n\tif v := cache[vtype]; v != nil {\n\t\treturn v\n\t}\n\n\tmt := L.NewTable()\n\n\tswitch vtype.Kind() {\n\tcase reflect.Array:\n\t\tmethods := L.NewTable()\n\t\taddMethods(L, value, methods)\n\t\tmt.RawSetString(\"methods\", methods)\n\n\t\tmt.RawSetString(\"__index\", L.NewFunction(arrayIndex))\n\t\tmt.RawSetString(\"__len\", L.NewFunction(arrayLen))\n\t\tmt.RawSetString(\"__eq\", L.NewFunction(arrayEq))\n\tcase reflect.Chan:\n\t\tmethods := L.NewTable()\n\t\tmethods.RawSetString(\"send\", L.NewFunction(chanSend))\n\t\tmethods.RawSetString(\"receive\", L.NewFunction(chanReceive))\n\t\tmethods.RawSetString(\"close\", L.NewFunction(chanClose))\n\t\taddMethods(L, value, methods)\n\n\t\tmt.RawSetString(\"__index\", methods)\n\t\tmt.RawSetString(\"__len\", L.NewFunction(chanLen))\n\t\tmt.RawSetString(\"__tostring\", L.NewFunction(allTostring))\n\t\tmt.RawSetString(\"__eq\", L.NewFunction(chanEq))\n\tcase reflect.Map:\n\t\tmethods := L.NewTable()\n\t\taddMethods(L, value, methods)\n\t\tmt.RawSetString(\"methods\", methods)\n\n\t\tmt.RawSetString(\"__index\", L.NewFunction(mapIndex))\n\t\tmt.RawSetString(\"__newindex\", L.NewFunction(mapNewIndex))\n\t\tmt.RawSetString(\"__len\", L.NewFunction(mapLen))\n\t\tmt.RawSetString(\"__call\", L.NewFunction(mapCall))\n\t\tmt.RawSetString(\"__tostring\", L.NewFunction(allTostring))\n\t\tmt.RawSetString(\"__eq\", L.NewFunction(mapEq))\n\tcase reflect.Ptr:\n\t\tptrMethods := L.NewTable()\n\t\taddMethods(L, value, ptrMethods)\n\t\tmt.RawSetString(\"ptr_methods\", ptrMethods)\n\t\tmethods := L.NewTable()\n\t\taddMethods(L, value.Elem(), methods)\n\t\tmt.RawSetString(\"methods\", methods)\n\t\tif value.Elem().Kind() == reflect.Struct {\n\t\t\tfields := L.NewTable()\n\t\t\taddFields(L, value.Elem(), fields)\n\t\t\tmt.RawSetString(\"fields\", fields)\n\t\t}\n\n\t\tif value.Elem().Kind() == reflect.Array {\n\t\t\tmt.RawSetString(\"__index\", L.NewFunction(arrayIndex))\n\t\t} else {\n\t\t\tmt.RawSetString(\"__index\", L.NewFunction(ptrIndex))\n\t\t}\n\t\tswitch value.Elem().Kind() {\n\t\tcase reflect.Array:\n\t\t\tmt.RawSetString(\"__newindex\", L.NewFunction(arrayNewIndex))\n\t\tcase reflect.Struct:\n\t\t\tmt.RawSetString(\"__newindex\", L.NewFunction(structNewIndex))\n\t\t}\n\t\tmt.RawSetString(\"__pow\", L.NewFunction(ptrPow))\n\t\tmt.RawSetString(\"__tostring\", L.NewFunction(allTostring))\n\t\tmt.RawSetString(\"__unm\", L.NewFunction(ptrUnm))\n\t\tif value.Elem().Kind() == reflect.Array {\n\t\t\tmt.RawSetString(\"__len\", L.NewFunction(arrayLen))\n\t\t}\n\t\tmt.RawSetString(\"__eq\", L.NewFunction(ptrEq))\n\tcase reflect.Slice:\n\t\tmethods := L.NewTable()\n\t\tmethods.RawSetString(\"capacity\", L.NewFunction(sliceCapacity))\n\t\tmethods.RawSetString(\"append\", L.NewFunction(sliceAppend))\n\t\taddMethods(L, value, methods)\n\t\tmt.RawSetString(\"methods\", methods)\n\n\t\tmt.RawSetString(\"__index\", L.NewFunction(sliceIndex))\n\t\tmt.RawSetString(\"__newindex\", L.NewFunction(sliceNewIndex))\n\t\tmt.RawSetString(\"__len\", L.NewFunction(sliceLen))\n\t\tmt.RawSetString(\"__tostring\", L.NewFunction(allTostring))\n\t\tmt.RawSetString(\"__eq\", L.NewFunction(sliceEq))\n\tcase reflect.Struct:\n\t\tmethods := L.NewTable()\n\t\taddMethods(L, value, methods)\n\t\tmt.RawSetString(\"methods\", methods)\n\t\tfields := L.NewTable()\n\t\taddFields(L, value, fields)\n\t\tmt.RawSetString(\"fields\", fields)\n\n\t\tmt.RawSetString(\"__index\", L.NewFunction(structIndex))\n\t\tmt.RawSetString(\"__newindex\", L.NewFunction(structNewIndex))\n\t\tmt.RawSetString(\"__tostring\", L.NewFunction(allTostring))\n\t}\n\n\tcache[vtype] = mt\n\treturn mt\n}\n\nfunc getTypeMetatable(L *lua.LState, t reflect.Type) lua.LValue {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\n\tif v := typeCache[t]; v != nil {\n\t\treturn v\n\t}\n\n\tmt := L.NewTable()\n\tmt.RawSetString(\"__call\", L.NewFunction(typeCall))\n\tmt.RawSetString(\"__tostring\", L.NewFunction(allTostring))\n\tmt.RawSetString(\"__eq\", L.NewFunction(typeEq))\n\n\ttypeCache[t] = mt\n\treturn mt\n}\nstore cached metatables with Lua statepackage luar\n\nimport (\n\t\"container\/list\"\n\t\"reflect\"\n\t\"sync\"\n\n\t\"github.com\/yuin\/gopher-lua\"\n)\n\nconst cacheKey = \"github.com\/layeh\/gopher-luar\"\n\nvar mu sync.Mutex\n\ntype mtCache struct {\n\tregular, types map[reflect.Type]lua.LValue\n}\n\nfunc newMTCache() *mtCache {\n\treturn &mtCache{\n\t\tregular: make(map[reflect.Type]lua.LValue),\n\t\ttypes: make(map[reflect.Type]lua.LValue),\n\t}\n}\n\nfunc getMTCache(L *lua.LState) *mtCache {\n\tregistry, ok := L.Get(lua.RegistryIndex).(*lua.LTable)\n\tif !ok {\n\t\tpanic(\"gopher-luar: corrupt lua registry\")\n\t}\n\tlCache, ok := registry.RawGetString(cacheKey).(*lua.LUserData)\n\tif !ok {\n\t\tlCache = L.NewUserData()\n\t\tlCache.Value = newMTCache()\n\t\tregistry.RawSetString(cacheKey, lCache)\n\t}\n\tcache, ok := lCache.Value.(*mtCache)\n\tif !ok {\n\t\tpanic(\"gopher-luar: corrupt luar metatable cache\")\n\t}\n\treturn cache\n}\n\nfunc addMethods(L *lua.LState, value reflect.Value, tbl *lua.LTable) {\n\tvtype := value.Type()\n\tfor i := 0; i < vtype.NumMethod(); i++ {\n\t\tmethod := vtype.Method(i)\n\t\tif method.PkgPath != \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tfn := New(L, method.Func.Interface())\n\t\ttbl.RawSetString(method.Name, fn)\n\t\ttbl.RawSetString(getUnexportedName(method.Name), fn)\n\t}\n}\n\nfunc addFields(L *lua.LState, value reflect.Value, tbl *lua.LTable) {\n\ttype element struct {\n\t\tType reflect.Type\n\t\tIndex []int\n\t}\n\n\tqueue := list.New()\n\tvtype := value.Type()\n\tqueue.PushFront(element{\n\t\tType: vtype,\n\t})\n\n\tfor queue.Len() > 0 {\n\t\te := queue.Back()\n\t\telem := e.Value.(element)\n\t\tvtype := elem.Type\n\t\tif vtype.Kind() == reflect.Ptr {\n\t\t\tvtype = vtype.Elem()\n\t\t}\n\tfields:\n\t\tfor i := 0; i < vtype.NumField(); i++ {\n\t\t\tfield := vtype.Field(i)\n\t\t\tif field.PkgPath != \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar names []string\n\t\t\ttag := field.Tag.Get(\"luar\")\n\t\t\tif tag == \"-\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif tag != \"\" {\n\t\t\t\tnames = []string{\n\t\t\t\t\ttag,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tnames = []string{\n\t\t\t\t\tfield.Name,\n\t\t\t\t\tgetUnexportedName(field.Name),\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, key := range names {\n\t\t\t\tif tbl.RawGetString(key) != lua.LNil {\n\t\t\t\t\tcontinue fields\n\t\t\t\t}\n\t\t\t}\n\t\t\tindex := make([]int, len(elem.Index)+1)\n\t\t\tcopy(index, elem.Index)\n\t\t\tindex[len(elem.Index)] = i\n\n\t\t\tud := L.NewUserData()\n\t\t\tud.Value = index\n\t\t\tfor _, key := range names {\n\t\t\t\ttbl.RawSetString(key, ud)\n\t\t\t}\n\t\t\tif field.Anonymous {\n\t\t\t\tindex := make([]int, len(elem.Index)+1)\n\t\t\t\tcopy(index, elem.Index)\n\t\t\t\tindex[len(elem.Index)] = i\n\t\t\t\tqueue.PushFront(element{\n\t\t\t\t\tType: field.Type,\n\t\t\t\t\tIndex: index,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tqueue.Remove(e)\n\t}\n}\n\nfunc getMetatable(L *lua.LState, value reflect.Value) lua.LValue {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\n\tcache := getMTCache(L)\n\n\tvtype := value.Type()\n\tif v := cache.regular[vtype]; v != nil {\n\t\treturn v\n\t}\n\n\tmt := L.NewTable()\n\n\tswitch vtype.Kind() {\n\tcase reflect.Array:\n\t\tmethods := L.NewTable()\n\t\taddMethods(L, value, methods)\n\t\tmt.RawSetString(\"methods\", methods)\n\n\t\tmt.RawSetString(\"__index\", L.NewFunction(arrayIndex))\n\t\tmt.RawSetString(\"__len\", L.NewFunction(arrayLen))\n\t\tmt.RawSetString(\"__eq\", L.NewFunction(arrayEq))\n\tcase reflect.Chan:\n\t\tmethods := L.NewTable()\n\t\tmethods.RawSetString(\"send\", L.NewFunction(chanSend))\n\t\tmethods.RawSetString(\"receive\", L.NewFunction(chanReceive))\n\t\tmethods.RawSetString(\"close\", L.NewFunction(chanClose))\n\t\taddMethods(L, value, methods)\n\n\t\tmt.RawSetString(\"__index\", methods)\n\t\tmt.RawSetString(\"__len\", L.NewFunction(chanLen))\n\t\tmt.RawSetString(\"__tostring\", L.NewFunction(allTostring))\n\t\tmt.RawSetString(\"__eq\", L.NewFunction(chanEq))\n\tcase reflect.Map:\n\t\tmethods := L.NewTable()\n\t\taddMethods(L, value, methods)\n\t\tmt.RawSetString(\"methods\", methods)\n\n\t\tmt.RawSetString(\"__index\", L.NewFunction(mapIndex))\n\t\tmt.RawSetString(\"__newindex\", L.NewFunction(mapNewIndex))\n\t\tmt.RawSetString(\"__len\", L.NewFunction(mapLen))\n\t\tmt.RawSetString(\"__call\", L.NewFunction(mapCall))\n\t\tmt.RawSetString(\"__tostring\", L.NewFunction(allTostring))\n\t\tmt.RawSetString(\"__eq\", L.NewFunction(mapEq))\n\tcase reflect.Ptr:\n\t\tptrMethods := L.NewTable()\n\t\taddMethods(L, value, ptrMethods)\n\t\tmt.RawSetString(\"ptr_methods\", ptrMethods)\n\t\tmethods := L.NewTable()\n\t\taddMethods(L, value.Elem(), methods)\n\t\tmt.RawSetString(\"methods\", methods)\n\t\tif value.Elem().Kind() == reflect.Struct {\n\t\t\tfields := L.NewTable()\n\t\t\taddFields(L, value.Elem(), fields)\n\t\t\tmt.RawSetString(\"fields\", fields)\n\t\t}\n\n\t\tif value.Elem().Kind() == reflect.Array {\n\t\t\tmt.RawSetString(\"__index\", L.NewFunction(arrayIndex))\n\t\t} else {\n\t\t\tmt.RawSetString(\"__index\", L.NewFunction(ptrIndex))\n\t\t}\n\t\tswitch value.Elem().Kind() {\n\t\tcase reflect.Array:\n\t\t\tmt.RawSetString(\"__newindex\", L.NewFunction(arrayNewIndex))\n\t\tcase reflect.Struct:\n\t\t\tmt.RawSetString(\"__newindex\", L.NewFunction(structNewIndex))\n\t\t}\n\t\tmt.RawSetString(\"__pow\", L.NewFunction(ptrPow))\n\t\tmt.RawSetString(\"__tostring\", L.NewFunction(allTostring))\n\t\tmt.RawSetString(\"__unm\", L.NewFunction(ptrUnm))\n\t\tif value.Elem().Kind() == reflect.Array {\n\t\t\tmt.RawSetString(\"__len\", L.NewFunction(arrayLen))\n\t\t}\n\t\tmt.RawSetString(\"__eq\", L.NewFunction(ptrEq))\n\tcase reflect.Slice:\n\t\tmethods := L.NewTable()\n\t\tmethods.RawSetString(\"capacity\", L.NewFunction(sliceCapacity))\n\t\tmethods.RawSetString(\"append\", L.NewFunction(sliceAppend))\n\t\taddMethods(L, value, methods)\n\t\tmt.RawSetString(\"methods\", methods)\n\n\t\tmt.RawSetString(\"__index\", L.NewFunction(sliceIndex))\n\t\tmt.RawSetString(\"__newindex\", L.NewFunction(sliceNewIndex))\n\t\tmt.RawSetString(\"__len\", L.NewFunction(sliceLen))\n\t\tmt.RawSetString(\"__tostring\", L.NewFunction(allTostring))\n\t\tmt.RawSetString(\"__eq\", L.NewFunction(sliceEq))\n\tcase reflect.Struct:\n\t\tmethods := L.NewTable()\n\t\taddMethods(L, value, methods)\n\t\tmt.RawSetString(\"methods\", methods)\n\t\tfields := L.NewTable()\n\t\taddFields(L, value, fields)\n\t\tmt.RawSetString(\"fields\", fields)\n\n\t\tmt.RawSetString(\"__index\", L.NewFunction(structIndex))\n\t\tmt.RawSetString(\"__newindex\", L.NewFunction(structNewIndex))\n\t\tmt.RawSetString(\"__tostring\", L.NewFunction(allTostring))\n\t}\n\n\tcache.regular[vtype] = mt\n\treturn mt\n}\n\nfunc getTypeMetatable(L *lua.LState, t reflect.Type) lua.LValue {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\n\tcache := getMTCache(L)\n\n\tif v := cache.types[t]; v != nil {\n\t\treturn v\n\t}\n\n\tmt := L.NewTable()\n\tmt.RawSetString(\"__call\", L.NewFunction(typeCall))\n\tmt.RawSetString(\"__tostring\", L.NewFunction(allTostring))\n\tmt.RawSetString(\"__eq\", L.NewFunction(typeEq))\n\n\tcache.types[t] = mt\n\treturn mt\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"os\"\n\t\"strconv\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n)\n\ntype Cache struct {\n\tpool *redis.Pool\n}\n\n\/\/ set records a key in cache (redis)\nfunc (c *Cache) set(key string, value []byte) error {\n\tclient := c.pool.Get()\n\tdefer client.Close()\n\n\t_, err := client.Do(\"SET\", key, value)\n\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Error(\"Failed to record request...\")\n\t} else {\n\t\tlog.WithFields(log.Fields{}).Info(\"Request recorded!\")\n\t}\n}\n\n\/\/ getRedisPool returns thread safe Redis connection pool\nfunc getRedisPool() *redis.Pool {\n\n\t\/\/ getting redis connection\n\tmaxConnections := 10\n\tmc := os.Getenv(\"MaxConnections\")\n\tif mc != \"\" {\n\t\tmaxCons, err := strconv.Atoi(mc)\n\t\tif err != nil {\n\t\t\tmaxConnections = 10\n\t\t} else {\n\t\t\tmaxConnections = maxCons\n\t\t}\n\t}\n\t\/\/ getting redis client for state storing\n\tredisPool := redis.NewPool(func() (redis.Conn, error) {\n\t\tc, err := redis.Dial(\"tcp\", AppConfig.redisAddress)\n\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\"Error\": err.Error()}).Panic(\"Failed to create Redis connection pool!\")\n\t\t\treturn nil, err\n\t\t}\n\t\tif AppConfig.redisPassword != \"\" {\n\t\t\tif _, err := c.Do(\"AUTH\", AppConfig.redisPassword); err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"Error\": err.Error(),\n\t\t\t\t\t\"PasswordUsed\": AppConfig.redisPassword,\n\t\t\t\t}).Panic(\"Failed to authenticate to Redis!\")\n\t\t\t\tc.Close()\n\t\t\t\treturn nil, err\n\t\t\t} else {\n\t\t\t\tlog.Info(\"Authenticated to Redis successfully! \")\n\t\t\t}\n\t\t}\n\n\t\treturn c, err\n\t}, maxConnections)\n\n\tdefer redisPool.Close()\n\n\treturn redisPool\n}\nmore loggingpackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n)\n\nconst prefix = \"genproxy:\"\n\ntype Cache struct {\n\tpool *redis.Pool\n}\n\n\/\/ set records a key in cache (redis)\nfunc (c *Cache) set(key string, value []byte) error {\n\tclient := c.pool.Get()\n\tdefer client.Close()\n\n\t_, err := client.Do(\"SET\", fmt.Sprintf(prefix+key), value)\n\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Error(\"Failed to record request...\")\n\t} else {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"key\": fmt.Sprintf(prefix + key),\n\t\t}).Info(\"Request recorded!\")\n\t}\n\n\treturn err\n}\n\n\/\/ getRedisPool returns thread safe Redis connection pool\nfunc getRedisPool() *redis.Pool {\n\n\t\/\/ getting redis connection\n\tmaxConnections := 10\n\tmc := os.Getenv(\"MaxConnections\")\n\tif mc != \"\" {\n\t\tmaxCons, err := strconv.Atoi(mc)\n\t\tif err != nil {\n\t\t\tmaxConnections = 10\n\t\t} else {\n\t\t\tmaxConnections = maxCons\n\t\t}\n\t}\n\t\/\/ getting redis client for state storing\n\tredisPool := redis.NewPool(func() (redis.Conn, error) {\n\t\tc, err := redis.Dial(\"tcp\", AppConfig.redisAddress)\n\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\"Error\": err.Error()}).Panic(\"Failed to create Redis connection pool!\")\n\t\t\treturn nil, err\n\t\t}\n\t\tif AppConfig.redisPassword != \"\" {\n\t\t\tif _, err := c.Do(\"AUTH\", AppConfig.redisPassword); err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"Error\": err.Error(),\n\t\t\t\t\t\"PasswordUsed\": AppConfig.redisPassword,\n\t\t\t\t}).Panic(\"Failed to authenticate to Redis!\")\n\t\t\t\tc.Close()\n\t\t\t\treturn nil, err\n\t\t\t} else {\n\t\t\t\tlog.Info(\"Authenticated to Redis successfully! \")\n\t\t\t}\n\t\t}\n\n\t\treturn c, err\n\t}, maxConnections)\n\n\tdefer redisPool.Close()\n\n\treturn redisPool\n}\n<|endoftext|>"} {"text":"package kubernetes\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/rusenask\/cron\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\n\t\"github.com\/keel-hq\/keel\/approvals\"\n\t\"github.com\/keel-hq\/keel\/extension\/notification\"\n\t\"github.com\/keel-hq\/keel\/internal\/k8s\"\n\t\"github.com\/keel-hq\/keel\/internal\/policy\"\n\t\"github.com\/keel-hq\/keel\/types\"\n\t\"github.com\/keel-hq\/keel\/util\/image\"\n\t\"github.com\/keel-hq\/keel\/util\/policies\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nvar kubernetesVersionedUpdatesCounter = prometheus.NewCounterVec(\n\tprometheus.CounterOpts{\n\t\tName: \"kubernetes_versioned_updates_total\",\n\t\tHelp: \"How many versioned deployments were updated, partitioned by deployment name.\",\n\t},\n\t[]string{\"kubernetes\"},\n)\n\nvar kubernetesUnversionedUpdatesCounter = prometheus.NewCounterVec(\n\tprometheus.CounterOpts{\n\t\tName: \"kubernetes_unversioned_updates_total\",\n\t\tHelp: \"How many unversioned deployments were updated, partitioned by deployment name.\",\n\t},\n\t[]string{\"kubernetes\"},\n)\n\nfunc init() {\n\tprometheus.MustRegister(kubernetesVersionedUpdatesCounter)\n\tprometheus.MustRegister(kubernetesUnversionedUpdatesCounter)\n}\n\n\/\/ ProviderName - provider name\nconst ProviderName = \"kubernetes\"\n\nvar versionreg = regexp.MustCompile(`:[^:]*$`)\n\n\/\/ GenericResourceCache an interface for generic resource cache.\ntype GenericResourceCache interface {\n\t\/\/ Values returns a copy of the contents of the cache.\n\t\/\/ The slice and its contents should be treated as read-only.\n\tValues() []*k8s.GenericResource\n\n\t\/\/ Register registers ch to receive a value when Notify is called.\n\tRegister(chan int, int)\n}\n\n\/\/ UpdatePlan - deployment update plan\ntype UpdatePlan struct {\n\t\/\/ Updated deployment version\n\t\/\/ Deployment v1beta1.Deployment\n\tResource *k8s.GenericResource\n\n\t\/\/ Current (last seen cluster version)\n\tCurrentVersion string\n\t\/\/ New version that's already in the deployment\n\tNewVersion string\n}\n\nfunc (p *UpdatePlan) String() string {\n\tif p.Resource != nil {\n\t\treturn fmt.Sprintf(\"%s %s->%s\", p.Resource.Identifier, p.CurrentVersion, p.NewVersion)\n\t}\n\treturn \"empty plan\"\n}\n\n\/\/ Provider - kubernetes provider for auto update\ntype Provider struct {\n\timplementer Implementer\n\n\tsender notification.Sender\n\n\tapprovalManager approvals.Manager\n\n\tcache GenericResourceCache\n\n\tevents chan *types.Event\n\tstop chan struct{}\n}\n\n\/\/ NewProvider - create new kubernetes based provider\nfunc NewProvider(implementer Implementer, sender notification.Sender, approvalManager approvals.Manager, cache GenericResourceCache) (*Provider, error) {\n\treturn &Provider{\n\t\timplementer: implementer,\n\t\tcache: cache,\n\t\tapprovalManager: approvalManager,\n\t\tevents: make(chan *types.Event, 100),\n\t\tstop: make(chan struct{}),\n\t\tsender: sender,\n\t}, nil\n}\n\n\/\/ Submit - submit event to provider\nfunc (p *Provider) Submit(event types.Event) error {\n\tp.events <- &event\n\treturn nil\n}\n\n\/\/ GetName - get provider name\nfunc (p *Provider) GetName() string {\n\treturn ProviderName\n}\n\n\/\/ Start - starts kubernetes provider, waits for events\nfunc (p *Provider) Start() error {\n\treturn p.startInternal()\n}\n\n\/\/ Stop - stops kubernetes provider\nfunc (p *Provider) Stop() {\n\tclose(p.stop)\n}\n\n\/\/ TrackedImages returns a list of tracked images.\nfunc (p *Provider) TrackedImages() ([]*types.TrackedImage, error) {\n\tvar trackedImages []*types.TrackedImage\n\n\tfor _, gr := range p.cache.Values() {\n\t\tlabels := gr.GetLabels()\n\t\tannotations := gr.GetAnnotations()\n\n\t\t\/\/ ignoring unlabelled deployments\n\t\tplc := policy.GetPolicyFromLabelsOrAnnotations(labels, annotations)\n\t\tif plc.Type() == policy.PolicyTypeNone {\n\t\t\tcontinue\n\t\t}\n\n\t\tschedule, ok := annotations[types.KeelPollScheduleAnnotation]\n\t\tif ok {\n\t\t\t_, err := cron.Parse(schedule)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"error\": err,\n\t\t\t\t\t\"schedule\": schedule,\n\t\t\t\t\t\"name\": gr.Name,\n\t\t\t\t\t\"namespace\": gr.Namespace,\n\t\t\t\t}).Error(\"provider.kubernetes: failed to parse poll schedule, setting default schedule\")\n\t\t\t\tschedule = types.KeelPollDefaultSchedule\n\t\t\t}\n\t\t} else {\n\t\t\tschedule = types.KeelPollDefaultSchedule\n\t\t}\n\n\t\t\/\/ trigger type, we only care for \"poll\" type triggers\n\t\ttrigger := policies.GetTriggerPolicy(labels, annotations)\n\t\tsecrets := gr.GetImagePullSecrets()\n\t\timages := gr.GetImages()\n\t\tfor _, img := range images {\n\t\t\tref, err := image.Parse(img)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"error\": err,\n\t\t\t\t\t\"image\": img,\n\t\t\t\t\t\"namespace\": gr.Namespace,\n\t\t\t\t\t\"name\": gr.Name,\n\t\t\t\t}).Error(\"provider.kubernetes: failed to parse image\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttrackedImages = append(trackedImages, &types.TrackedImage{\n\t\t\t\tImage: ref,\n\t\t\t\tPollSchedule: schedule,\n\t\t\t\tTrigger: trigger,\n\t\t\t\tProvider: ProviderName,\n\t\t\t\tNamespace: gr.Namespace,\n\t\t\t\tSecrets: secrets,\n\t\t\t\tMeta: make(map[string]string),\n\t\t\t\tSemverPreReleaseTags: make(map[string]string),\n\t\t\t})\n\t\t}\n\t}\n\n\treturn trackedImages, nil\n}\n\nfunc (p *Provider) startInternal() error {\n\tfor {\n\t\tselect {\n\t\tcase event := <-p.events:\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"repository\": event.Repository.Name,\n\t\t\t\t\"tag\": event.Repository.Tag,\n\t\t\t\t\"registry\": event.Repository.Host,\n\t\t\t}).Info(\"provider.kubernetes: processing event\")\n\t\t\t_, err := p.processEvent(event)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"error\": err,\n\t\t\t\t\t\"image\": event.Repository.Name,\n\t\t\t\t\t\"tag\": event.Repository.Tag,\n\t\t\t\t}).Error(\"provider.kubernetes: failed to process event\")\n\t\t\t}\n\t\tcase <-p.stop:\n\t\t\tlog.Info(\"provider.kubernetes: got shutdown signal, stopping...\")\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (p *Provider) processEvent(event *types.Event) (updated []*k8s.GenericResource, err error) {\n\tplans, err := p.createUpdatePlans(&event.Repository)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(plans) == 0 {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"image\": event.Repository.Name,\n\t\t\t\"tag\": event.Repository.Tag,\n\t\t}).Info(\"provider.kubernetes: no plans for deployment updates found for this event\")\n\t\treturn\n\t}\n\n\tapprovedPlans := p.checkForApprovals(event, plans)\n\n\treturn p.updateDeployments(approvedPlans)\n}\n\nfunc (p *Provider) updateDeployments(plans []*UpdatePlan) (updated []*k8s.GenericResource, err error) {\n\tfor _, plan := range plans {\n\t\tresource := plan.Resource\n\n\t\tannotations := resource.GetAnnotations()\n\n\t\tnotificationChannels := types.ParseEventNotificationChannels(annotations)\n\n\t\tp.sender.Send(types.EventNotification{\n\t\t\tName: \"preparing to update resource\",\n\t\t\tMessage: fmt.Sprintf(\"Preparing to update %s %s\/%s %s->%s (%s)\", resource.Kind(), resource.Namespace, resource.Name, plan.CurrentVersion, plan.NewVersion, strings.Join(resource.GetImages(), \", \")),\n\t\t\tCreatedAt: time.Now(),\n\t\t\tType: types.NotificationPreDeploymentUpdate,\n\t\t\tLevel: types.LevelDebug,\n\t\t\tChannels: notificationChannels,\n\t\t})\n\n\t\tvar err error\n\n\t\ttimestamp := time.Now().Format(time.RFC3339)\n\t\tannotations[\"kubernetes.io\/change-cause\"] = fmt.Sprintf(\"keel automated update, version %s -> %s [%s]\", plan.CurrentVersion, plan.NewVersion, timestamp)\n\n\t\tresource.SetAnnotations(annotations)\n\n\t\terr = p.implementer.Update(resource)\n\t\tkubernetesVersionedUpdatesCounter.With(prometheus.Labels{\"kubernetes\": fmt.Sprintf(\"%s\/%s\", resource.Namespace, resource.Name)}).Inc()\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"namespace\": resource.Namespace,\n\t\t\t\t\"deployment\": resource.Name,\n\t\t\t\t\"kind\": resource.Kind(),\n\t\t\t\t\"update\": fmt.Sprintf(\"%s->%s\", plan.CurrentVersion, plan.NewVersion),\n\t\t\t}).Error(\"provider.kubernetes: got error while updating resource\")\n\n\t\t\tp.sender.Send(types.EventNotification{\n\t\t\t\tName: \"update resource\",\n\t\t\t\tMessage: fmt.Sprintf(\"%s %s\/%s update %s->%s failed, error: %s\", resource.Kind(), resource.Namespace, resource.Name, plan.CurrentVersion, plan.NewVersion, err),\n\t\t\t\tCreatedAt: time.Now(),\n\t\t\t\tType: types.NotificationDeploymentUpdate,\n\t\t\t\tLevel: types.LevelError,\n\t\t\t\tChannels: notificationChannels,\n\t\t\t})\n\n\t\t\tcontinue\n\t\t}\n\n\t\terr = p.updateComplete(plan)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"name\": resource.Name,\n\t\t\t\t\"kind\": resource.Kind(),\n\t\t\t\t\"namespace\": resource.Namespace,\n\t\t\t}).Warn(\"provider.kubernetes: got error while resetting approvals counter after successful update\")\n\t\t}\n\n\t\tvar msg string\n\t\treleaseNotes := types.ParseReleaseNotesURL(resource.GetAnnotations())\n\t\tif releaseNotes != \"\" {\n\t\t\tmsg = fmt.Sprintf(\"Successfully updated %s %s\/%s %s->%s (%s). Release notes: %s\", resource.Kind(), resource.Namespace, resource.Name, plan.CurrentVersion, plan.NewVersion, strings.Join(resource.GetImages(), \", \"), releaseNotes)\n\t\t} else {\n\t\t\tmsg = fmt.Sprintf(\"Successfully updated %s %s\/%s %s->%s (%s)\", resource.Kind(), resource.Namespace, resource.Name, plan.CurrentVersion, plan.NewVersion, strings.Join(resource.GetImages(), \", \"))\n\t\t}\n\n\t\tp.sender.Send(types.EventNotification{\n\t\t\tName: \"update resource\",\n\t\t\tMessage: msg,\n\t\t\tCreatedAt: time.Now(),\n\t\t\tType: types.NotificationDeploymentUpdate,\n\t\t\tLevel: types.LevelSuccess,\n\t\t\tChannels: notificationChannels,\n\t\t})\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"name\": resource.Name,\n\t\t\t\"kind\": resource.Kind(),\n\t\t\t\"previous\": plan.CurrentVersion,\n\t\t\t\"new\": plan.NewVersion,\n\t\t\t\"namespace\": resource.Namespace,\n\t\t}).Info(\"provider.kubernetes: resource updated\")\n\t\tupdated = append(updated, resource)\n\t}\n\n\treturn\n}\n\nfunc getDesiredImage(delta map[string]string, currentImage string) (string, error) {\n\tcurrentRef, err := image.Parse(currentImage)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor repository, tag := range delta {\n\t\tif repository == currentRef.Repository() {\n\t\t\tref, err := image.Parse(repository)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\t\/\/ updating image\n\t\t\tif ref.Registry() == image.DefaultRegistryHostname {\n\t\t\t\treturn fmt.Sprintf(\"%s:%s\", ref.ShortName(), tag), nil\n\t\t\t}\n\t\t\treturn fmt.Sprintf(\"%s:%s\", ref.Repository(), tag), nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"image %s not found in deltas\", currentImage)\n}\n\n\/\/ createUpdatePlans - impacted deployments by changed repository\nfunc (p *Provider) createUpdatePlans(repo *types.Repository) ([]*UpdatePlan, error) {\n\timpacted := []*UpdatePlan{}\n\n\tfor _, resource := range p.cache.Values() {\n\n\t\tlabels := resource.GetLabels()\n\t\tannotations := resource.GetAnnotations()\n\n\t\tplc := policy.GetPolicyFromLabelsOrAnnotations(labels, annotations)\n\t\tif plc.Type() == policy.PolicyTypeNone {\n\t\t\tlog.Debugf(\"no policy defined, skipping: %s, labels: %s, annotations: %s\", resource.Identifier, labels, annotations)\n\t\t\tcontinue\n\t\t}\n\n\t\tupdated, shouldUpdateDeployment, err := checkForUpdate(plc, repo, resource)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"deployment\": resource.Name,\n\t\t\t\t\"kind\": resource.Kind(),\n\t\t\t\t\"namespace\": resource.Namespace,\n\t\t\t}).Error(\"provider.kubernetes: got error while checking versioned resource\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif shouldUpdateDeployment {\n\t\t\timpacted = append(impacted, updated)\n\t\t}\n\t}\n\n\treturn impacted, nil\n}\n\nfunc (p *Provider) namespaces() (*v1.NamespaceList, error) {\n\treturn p.implementer.Namespaces()\n}\nincluding pre-releasespackage kubernetes\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Masterminds\/semver\"\n\t\"github.com\/rusenask\/cron\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\n\t\"github.com\/keel-hq\/keel\/approvals\"\n\t\"github.com\/keel-hq\/keel\/extension\/notification\"\n\t\"github.com\/keel-hq\/keel\/internal\/k8s\"\n\t\"github.com\/keel-hq\/keel\/internal\/policy\"\n\t\"github.com\/keel-hq\/keel\/types\"\n\t\"github.com\/keel-hq\/keel\/util\/image\"\n\t\"github.com\/keel-hq\/keel\/util\/policies\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nvar kubernetesVersionedUpdatesCounter = prometheus.NewCounterVec(\n\tprometheus.CounterOpts{\n\t\tName: \"kubernetes_versioned_updates_total\",\n\t\tHelp: \"How many versioned deployments were updated, partitioned by deployment name.\",\n\t},\n\t[]string{\"kubernetes\"},\n)\n\nvar kubernetesUnversionedUpdatesCounter = prometheus.NewCounterVec(\n\tprometheus.CounterOpts{\n\t\tName: \"kubernetes_unversioned_updates_total\",\n\t\tHelp: \"How many unversioned deployments were updated, partitioned by deployment name.\",\n\t},\n\t[]string{\"kubernetes\"},\n)\n\nfunc init() {\n\tprometheus.MustRegister(kubernetesVersionedUpdatesCounter)\n\tprometheus.MustRegister(kubernetesUnversionedUpdatesCounter)\n}\n\n\/\/ ProviderName - provider name\nconst ProviderName = \"kubernetes\"\n\nvar versionreg = regexp.MustCompile(`:[^:]*$`)\n\n\/\/ GenericResourceCache an interface for generic resource cache.\ntype GenericResourceCache interface {\n\t\/\/ Values returns a copy of the contents of the cache.\n\t\/\/ The slice and its contents should be treated as read-only.\n\tValues() []*k8s.GenericResource\n\n\t\/\/ Register registers ch to receive a value when Notify is called.\n\tRegister(chan int, int)\n}\n\n\/\/ UpdatePlan - deployment update plan\ntype UpdatePlan struct {\n\t\/\/ Updated deployment version\n\t\/\/ Deployment v1beta1.Deployment\n\tResource *k8s.GenericResource\n\n\t\/\/ Current (last seen cluster version)\n\tCurrentVersion string\n\t\/\/ New version that's already in the deployment\n\tNewVersion string\n}\n\nfunc (p *UpdatePlan) String() string {\n\tif p.Resource != nil {\n\t\treturn fmt.Sprintf(\"%s %s->%s\", p.Resource.Identifier, p.CurrentVersion, p.NewVersion)\n\t}\n\treturn \"empty plan\"\n}\n\n\/\/ Provider - kubernetes provider for auto update\ntype Provider struct {\n\timplementer Implementer\n\n\tsender notification.Sender\n\n\tapprovalManager approvals.Manager\n\n\tcache GenericResourceCache\n\n\tevents chan *types.Event\n\tstop chan struct{}\n}\n\n\/\/ NewProvider - create new kubernetes based provider\nfunc NewProvider(implementer Implementer, sender notification.Sender, approvalManager approvals.Manager, cache GenericResourceCache) (*Provider, error) {\n\treturn &Provider{\n\t\timplementer: implementer,\n\t\tcache: cache,\n\t\tapprovalManager: approvalManager,\n\t\tevents: make(chan *types.Event, 100),\n\t\tstop: make(chan struct{}),\n\t\tsender: sender,\n\t}, nil\n}\n\n\/\/ Submit - submit event to provider\nfunc (p *Provider) Submit(event types.Event) error {\n\tp.events <- &event\n\treturn nil\n}\n\n\/\/ GetName - get provider name\nfunc (p *Provider) GetName() string {\n\treturn ProviderName\n}\n\n\/\/ Start - starts kubernetes provider, waits for events\nfunc (p *Provider) Start() error {\n\treturn p.startInternal()\n}\n\n\/\/ Stop - stops kubernetes provider\nfunc (p *Provider) Stop() {\n\tclose(p.stop)\n}\n\n\/\/ TrackedImages returns a list of tracked images.\nfunc (p *Provider) TrackedImages() ([]*types.TrackedImage, error) {\n\tvar trackedImages []*types.TrackedImage\n\n\tfor _, gr := range p.cache.Values() {\n\t\tlabels := gr.GetLabels()\n\t\tannotations := gr.GetAnnotations()\n\n\t\t\/\/ ignoring unlabelled deployments\n\t\tplc := policy.GetPolicyFromLabelsOrAnnotations(labels, annotations)\n\t\tif plc.Type() == policy.PolicyTypeNone {\n\t\t\tcontinue\n\t\t}\n\n\t\tschedule, ok := annotations[types.KeelPollScheduleAnnotation]\n\t\tif ok {\n\t\t\t_, err := cron.Parse(schedule)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"error\": err,\n\t\t\t\t\t\"schedule\": schedule,\n\t\t\t\t\t\"name\": gr.Name,\n\t\t\t\t\t\"namespace\": gr.Namespace,\n\t\t\t\t}).Error(\"provider.kubernetes: failed to parse poll schedule, setting default schedule\")\n\t\t\t\tschedule = types.KeelPollDefaultSchedule\n\t\t\t}\n\t\t} else {\n\t\t\tschedule = types.KeelPollDefaultSchedule\n\t\t}\n\n\t\t\/\/ trigger type, we only care for \"poll\" type triggers\n\t\ttrigger := policies.GetTriggerPolicy(labels, annotations)\n\t\tsecrets := gr.GetImagePullSecrets()\n\t\timages := gr.GetImages()\n\t\tfor _, img := range images {\n\t\t\tref, err := image.Parse(img)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"error\": err,\n\t\t\t\t\t\"image\": img,\n\t\t\t\t\t\"namespace\": gr.Namespace,\n\t\t\t\t\t\"name\": gr.Name,\n\t\t\t\t}).Error(\"provider.kubernetes: failed to parse image\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsvp := make(map[string]string)\n\n\t\t\tsemverTag, err := semver.NewVersion(ref.Tag())\n\t\t\tif err == nil {\n\t\t\t\tif semverTag.Prerelease() != \"\" {\n\t\t\t\t\tsvp[semverTag.Prerelease()] = ref.Tag()\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttrackedImages = append(trackedImages, &types.TrackedImage{\n\t\t\t\tImage: ref,\n\t\t\t\tPollSchedule: schedule,\n\t\t\t\tTrigger: trigger,\n\t\t\t\tProvider: ProviderName,\n\t\t\t\tNamespace: gr.Namespace,\n\t\t\t\tSecrets: secrets,\n\t\t\t\tMeta: make(map[string]string),\n\t\t\t\tSemverPreReleaseTags: svp,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn trackedImages, nil\n}\n\nfunc (p *Provider) startInternal() error {\n\tfor {\n\t\tselect {\n\t\tcase event := <-p.events:\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"repository\": event.Repository.Name,\n\t\t\t\t\"tag\": event.Repository.Tag,\n\t\t\t\t\"registry\": event.Repository.Host,\n\t\t\t}).Info(\"provider.kubernetes: processing event\")\n\t\t\t_, err := p.processEvent(event)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"error\": err,\n\t\t\t\t\t\"image\": event.Repository.Name,\n\t\t\t\t\t\"tag\": event.Repository.Tag,\n\t\t\t\t}).Error(\"provider.kubernetes: failed to process event\")\n\t\t\t}\n\t\tcase <-p.stop:\n\t\t\tlog.Info(\"provider.kubernetes: got shutdown signal, stopping...\")\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (p *Provider) processEvent(event *types.Event) (updated []*k8s.GenericResource, err error) {\n\tplans, err := p.createUpdatePlans(&event.Repository)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(plans) == 0 {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"image\": event.Repository.Name,\n\t\t\t\"tag\": event.Repository.Tag,\n\t\t}).Info(\"provider.kubernetes: no plans for deployment updates found for this event\")\n\t\treturn\n\t}\n\n\tapprovedPlans := p.checkForApprovals(event, plans)\n\n\treturn p.updateDeployments(approvedPlans)\n}\n\nfunc (p *Provider) updateDeployments(plans []*UpdatePlan) (updated []*k8s.GenericResource, err error) {\n\tfor _, plan := range plans {\n\t\tresource := plan.Resource\n\n\t\tannotations := resource.GetAnnotations()\n\n\t\tnotificationChannels := types.ParseEventNotificationChannels(annotations)\n\n\t\tp.sender.Send(types.EventNotification{\n\t\t\tName: \"preparing to update resource\",\n\t\t\tMessage: fmt.Sprintf(\"Preparing to update %s %s\/%s %s->%s (%s)\", resource.Kind(), resource.Namespace, resource.Name, plan.CurrentVersion, plan.NewVersion, strings.Join(resource.GetImages(), \", \")),\n\t\t\tCreatedAt: time.Now(),\n\t\t\tType: types.NotificationPreDeploymentUpdate,\n\t\t\tLevel: types.LevelDebug,\n\t\t\tChannels: notificationChannels,\n\t\t})\n\n\t\tvar err error\n\n\t\ttimestamp := time.Now().Format(time.RFC3339)\n\t\tannotations[\"kubernetes.io\/change-cause\"] = fmt.Sprintf(\"keel automated update, version %s -> %s [%s]\", plan.CurrentVersion, plan.NewVersion, timestamp)\n\n\t\tresource.SetAnnotations(annotations)\n\n\t\terr = p.implementer.Update(resource)\n\t\tkubernetesVersionedUpdatesCounter.With(prometheus.Labels{\"kubernetes\": fmt.Sprintf(\"%s\/%s\", resource.Namespace, resource.Name)}).Inc()\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"namespace\": resource.Namespace,\n\t\t\t\t\"deployment\": resource.Name,\n\t\t\t\t\"kind\": resource.Kind(),\n\t\t\t\t\"update\": fmt.Sprintf(\"%s->%s\", plan.CurrentVersion, plan.NewVersion),\n\t\t\t}).Error(\"provider.kubernetes: got error while updating resource\")\n\n\t\t\tp.sender.Send(types.EventNotification{\n\t\t\t\tName: \"update resource\",\n\t\t\t\tMessage: fmt.Sprintf(\"%s %s\/%s update %s->%s failed, error: %s\", resource.Kind(), resource.Namespace, resource.Name, plan.CurrentVersion, plan.NewVersion, err),\n\t\t\t\tCreatedAt: time.Now(),\n\t\t\t\tType: types.NotificationDeploymentUpdate,\n\t\t\t\tLevel: types.LevelError,\n\t\t\t\tChannels: notificationChannels,\n\t\t\t})\n\n\t\t\tcontinue\n\t\t}\n\n\t\terr = p.updateComplete(plan)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"name\": resource.Name,\n\t\t\t\t\"kind\": resource.Kind(),\n\t\t\t\t\"namespace\": resource.Namespace,\n\t\t\t}).Warn(\"provider.kubernetes: got error while resetting approvals counter after successful update\")\n\t\t}\n\n\t\tvar msg string\n\t\treleaseNotes := types.ParseReleaseNotesURL(resource.GetAnnotations())\n\t\tif releaseNotes != \"\" {\n\t\t\tmsg = fmt.Sprintf(\"Successfully updated %s %s\/%s %s->%s (%s). Release notes: %s\", resource.Kind(), resource.Namespace, resource.Name, plan.CurrentVersion, plan.NewVersion, strings.Join(resource.GetImages(), \", \"), releaseNotes)\n\t\t} else {\n\t\t\tmsg = fmt.Sprintf(\"Successfully updated %s %s\/%s %s->%s (%s)\", resource.Kind(), resource.Namespace, resource.Name, plan.CurrentVersion, plan.NewVersion, strings.Join(resource.GetImages(), \", \"))\n\t\t}\n\n\t\tp.sender.Send(types.EventNotification{\n\t\t\tName: \"update resource\",\n\t\t\tMessage: msg,\n\t\t\tCreatedAt: time.Now(),\n\t\t\tType: types.NotificationDeploymentUpdate,\n\t\t\tLevel: types.LevelSuccess,\n\t\t\tChannels: notificationChannels,\n\t\t})\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"name\": resource.Name,\n\t\t\t\"kind\": resource.Kind(),\n\t\t\t\"previous\": plan.CurrentVersion,\n\t\t\t\"new\": plan.NewVersion,\n\t\t\t\"namespace\": resource.Namespace,\n\t\t}).Info(\"provider.kubernetes: resource updated\")\n\t\tupdated = append(updated, resource)\n\t}\n\n\treturn\n}\n\nfunc getDesiredImage(delta map[string]string, currentImage string) (string, error) {\n\tcurrentRef, err := image.Parse(currentImage)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor repository, tag := range delta {\n\t\tif repository == currentRef.Repository() {\n\t\t\tref, err := image.Parse(repository)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\t\/\/ updating image\n\t\t\tif ref.Registry() == image.DefaultRegistryHostname {\n\t\t\t\treturn fmt.Sprintf(\"%s:%s\", ref.ShortName(), tag), nil\n\t\t\t}\n\t\t\treturn fmt.Sprintf(\"%s:%s\", ref.Repository(), tag), nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"image %s not found in deltas\", currentImage)\n}\n\n\/\/ createUpdatePlans - impacted deployments by changed repository\nfunc (p *Provider) createUpdatePlans(repo *types.Repository) ([]*UpdatePlan, error) {\n\timpacted := []*UpdatePlan{}\n\n\tfor _, resource := range p.cache.Values() {\n\n\t\tlabels := resource.GetLabels()\n\t\tannotations := resource.GetAnnotations()\n\n\t\tplc := policy.GetPolicyFromLabelsOrAnnotations(labels, annotations)\n\t\tif plc.Type() == policy.PolicyTypeNone {\n\t\t\tlog.Debugf(\"no policy defined, skipping: %s, labels: %s, annotations: %s\", resource.Identifier, labels, annotations)\n\t\t\tcontinue\n\t\t}\n\n\t\tupdated, shouldUpdateDeployment, err := checkForUpdate(plc, repo, resource)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"deployment\": resource.Name,\n\t\t\t\t\"kind\": resource.Kind(),\n\t\t\t\t\"namespace\": resource.Namespace,\n\t\t\t}).Error(\"provider.kubernetes: got error while checking versioned resource\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif shouldUpdateDeployment {\n\t\t\timpacted = append(impacted, updated)\n\t\t}\n\t}\n\n\treturn impacted, nil\n}\n\nfunc (p *Provider) namespaces() (*v1.NamespaceList, error) {\n\treturn p.implementer.Namespaces()\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2017-2018 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage test\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\/atomic\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/inctimer\"\n\tcilium \"github.com\/cilium\/proxy\/go\/cilium\/api\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"golang.org\/x\/sys\/unix\"\n)\n\ntype AccessLogServer struct {\n\tPath string\n\tLogs chan cilium.EntryType\n\tclosing uint32 \/\/ non-zero if closing, accessed atomically\n\tlistener *net.UnixListener\n\tconns []*net.UnixConn\n}\n\n\/\/ Close removes the unix domain socket from the filesystem\nfunc (s *AccessLogServer) Close() {\n\tif s != nil {\n\t\tatomic.StoreUint32(&s.closing, 1)\n\t\ts.listener.Close()\n\t\tfor _, conn := range s.conns {\n\t\t\tconn.Close()\n\t\t}\n\t\tos.Remove(s.Path)\n\t}\n}\n\n\/\/ Clear empties the access log server buffer, counting the passes and drops\nfunc (s *AccessLogServer) Clear() (passed, drops int) {\n\tpasses, drops := 0, 0\n\tempty := false\n\tfor !empty {\n\t\tselect {\n\t\tcase entryType := <-s.Logs:\n\t\t\tif entryType == cilium.EntryType_Denied {\n\t\t\t\tdrops++\n\t\t\t} else {\n\t\t\t\tpasses++\n\t\t\t}\n\t\tcase <-inctimer.After(10 * time.Millisecond):\n\t\t\tempty = true\n\t\t}\n\t}\n\treturn passes, drops\n}\n\n\/\/ StartAccessLogServer starts the access log server.\nfunc StartAccessLogServer(accessLogName string, bufSize int) *AccessLogServer {\n\taccessLogPath := filepath.Join(Tmpdir, accessLogName)\n\n\tserver := &AccessLogServer{\n\t\tPath: accessLogPath,\n\t\tLogs: make(chan cilium.EntryType, bufSize),\n\t}\n\n\t\/\/ Create the access log listener\n\tos.Remove(accessLogPath) \/\/ Remove\/Unlink the old unix domain socket, if any.\n\tvar err error\n\tserver.listener, err = net.ListenUnix(\"unixpacket\", &net.UnixAddr{Name: accessLogPath, Net: \"unixpacket\"})\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to open access log listen socket at %s: %v\", accessLogPath, err)\n\t}\n\tserver.listener.SetUnlinkOnClose(true)\n\n\t\/\/ Make the socket accessible by non-root Envoy proxies, e.g. running in\n\t\/\/ sidecar containers.\n\tif err = os.Chmod(accessLogPath, 0777); err != nil {\n\t\tlog.Fatalf(\"Failed to change mode of access log listen socket at %s: %v\", accessLogPath, err)\n\t}\n\n\tlog.Debug(\"Starting Access Log Server\")\n\tgo func() {\n\t\tfor {\n\t\t\t\/\/ Each Envoy listener opens a new connection over the Unix domain socket.\n\t\t\t\/\/ Multiple worker threads serving the listener share that same connection\n\t\t\tuc, err := server.listener.AcceptUnix()\n\t\t\tif err != nil {\n\t\t\t\t\/\/ These errors are expected when we are closing down\n\t\t\t\tif atomic.LoadUint32(&server.closing) != 0 ||\n\t\t\t\t\terrors.Is(err, net.ErrClosed) ||\n\t\t\t\t\terrors.Is(err, syscall.EINVAL) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tlog.WithError(err).Warn(\"Failed to accept access log connection\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Debug(\"Accepted access log connection\")\n\n\t\t\tserver.conns = append(server.conns, uc)\n\t\t\t\/\/ Serve this access log socket in a goroutine, so we can serve multiple\n\t\t\t\/\/ connections concurrently.\n\t\t\tgo server.accessLogger(uc)\n\t\t}\n\t}()\n\n\treturn server\n}\n\n\/\/ isEOF returns true if the error message ends in \"EOF\". ReadMsgUnix returns extra info in the beginning.\nfunc isEOF(err error) bool {\n\tstrerr := err.Error()\n\terrlen := len(strerr)\n\treturn errlen >= 3 && strerr[errlen-3:] == io.EOF.Error()\n}\n\nfunc (s *AccessLogServer) accessLogger(conn *net.UnixConn) {\n\tdefer func() {\n\t\tlog.Debug(\"Closing access log connection\")\n\t\tconn.Close()\n\t}()\n\n\tbuf := make([]byte, 4096)\n\tfor {\n\t\tn, _, flags, _, err := conn.ReadMsgUnix(buf, nil)\n\t\tif err != nil {\n\t\t\tif !isEOF(err) && atomic.LoadUint32(&s.closing) == 0 {\n\t\t\t\tlog.WithError(err).Error(\"Error while reading from access log connection\")\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif flags&unix.MSG_TRUNC != 0 {\n\t\t\tlog.Warning(\"Discarded truncated access log message\")\n\t\t\tcontinue\n\t\t}\n\t\tpblog := cilium.LogEntry{}\n\t\terr = proto.Unmarshal(buf[:n], &pblog)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Warning(\"Discarded invalid access log message\")\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debugf(\"Access log message: %s\", pblog.String())\n\t\ts.Logs <- pblog.EntryType\n\t}\n}\nproxylib\/test: fix data race between StartAccessLogServer and Close\/\/ Copyright 2017-2018 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage test\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\/atomic\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/inctimer\"\n\tcilium \"github.com\/cilium\/proxy\/go\/cilium\/api\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"golang.org\/x\/sys\/unix\"\n)\n\ntype AccessLogServer struct {\n\tPath string\n\tLogs chan cilium.EntryType\n\tclosing uint32 \/\/ non-zero if closing, accessed atomically\n\tlistener *net.UnixListener\n\tconns []*net.UnixConn\n}\n\n\/\/ Close removes the unix domain socket from the filesystem\nfunc (s *AccessLogServer) Close() {\n\tif s != nil {\n\t\tatomic.StoreUint32(&s.closing, 1)\n\t\ts.listener.Close()\n\t\tfor _, conn := range s.conns {\n\t\t\tconn.Close()\n\t\t}\n\t\tos.Remove(s.Path)\n\t}\n}\n\n\/\/ Clear empties the access log server buffer, counting the passes and drops\nfunc (s *AccessLogServer) Clear() (passed, drops int) {\n\tpasses, drops := 0, 0\n\tempty := false\n\tfor !empty {\n\t\tselect {\n\t\tcase entryType := <-s.Logs:\n\t\t\tif entryType == cilium.EntryType_Denied {\n\t\t\t\tdrops++\n\t\t\t} else {\n\t\t\t\tpasses++\n\t\t\t}\n\t\tcase <-inctimer.After(10 * time.Millisecond):\n\t\t\tempty = true\n\t\t}\n\t}\n\treturn passes, drops\n}\n\n\/\/ StartAccessLogServer starts the access log server.\nfunc StartAccessLogServer(accessLogName string, bufSize int) *AccessLogServer {\n\taccessLogPath := filepath.Join(Tmpdir, accessLogName)\n\n\tserver := &AccessLogServer{\n\t\tPath: accessLogPath,\n\t\tLogs: make(chan cilium.EntryType, bufSize),\n\t}\n\n\t\/\/ Create the access log listener\n\tos.Remove(accessLogPath) \/\/ Remove\/Unlink the old unix domain socket, if any.\n\tvar err error\n\tserver.listener, err = net.ListenUnix(\"unixpacket\", &net.UnixAddr{Name: accessLogPath, Net: \"unixpacket\"})\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to open access log listen socket at %s: %v\", accessLogPath, err)\n\t}\n\tserver.listener.SetUnlinkOnClose(true)\n\n\t\/\/ Make the socket accessible by non-root Envoy proxies, e.g. running in\n\t\/\/ sidecar containers.\n\tif err = os.Chmod(accessLogPath, 0777); err != nil {\n\t\tlog.Fatalf(\"Failed to change mode of access log listen socket at %s: %v\", accessLogPath, err)\n\t}\n\n\tlog.Debug(\"Starting Access Log Server\")\n\tgo func() {\n\t\tfor {\n\t\t\t\/\/ Each Envoy listener opens a new connection over the Unix domain socket.\n\t\t\t\/\/ Multiple worker threads serving the listener share that same connection\n\t\t\tuc, err := server.listener.AcceptUnix()\n\t\t\tif err != nil {\n\t\t\t\t\/\/ These errors are expected when we are closing down\n\t\t\t\tif atomic.LoadUint32(&server.closing) != 0 ||\n\t\t\t\t\terrors.Is(err, net.ErrClosed) ||\n\t\t\t\t\terrors.Is(err, syscall.EINVAL) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tlog.WithError(err).Warn(\"Failed to accept access log connection\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif atomic.LoadUint32(&server.closing) != 0 {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tlog.Debug(\"Accepted access log connection\")\n\n\t\t\tserver.conns = append(server.conns, uc)\n\t\t\t\/\/ Serve this access log socket in a goroutine, so we can serve multiple\n\t\t\t\/\/ connections concurrently.\n\t\t\tgo server.accessLogger(uc)\n\t\t}\n\t}()\n\n\treturn server\n}\n\n\/\/ isEOF returns true if the error message ends in \"EOF\". ReadMsgUnix returns extra info in the beginning.\nfunc isEOF(err error) bool {\n\tstrerr := err.Error()\n\terrlen := len(strerr)\n\treturn errlen >= 3 && strerr[errlen-3:] == io.EOF.Error()\n}\n\nfunc (s *AccessLogServer) accessLogger(conn *net.UnixConn) {\n\tdefer func() {\n\t\tlog.Debug(\"Closing access log connection\")\n\t\tconn.Close()\n\t}()\n\n\tbuf := make([]byte, 4096)\n\tfor {\n\t\tn, _, flags, _, err := conn.ReadMsgUnix(buf, nil)\n\t\tif err != nil {\n\t\t\tif !isEOF(err) && atomic.LoadUint32(&s.closing) == 0 {\n\t\t\t\tlog.WithError(err).Error(\"Error while reading from access log connection\")\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif flags&unix.MSG_TRUNC != 0 {\n\t\t\tlog.Warning(\"Discarded truncated access log message\")\n\t\t\tcontinue\n\t\t}\n\t\tpblog := cilium.LogEntry{}\n\t\terr = proto.Unmarshal(buf[:n], &pblog)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Warning(\"Discarded invalid access log message\")\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debugf(\"Access log message: %s\", pblog.String())\n\t\ts.Logs <- pblog.EntryType\n\t}\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/couchbaselabs\/cbfs\/client\"\n\t\"github.com\/couchbaselabs\/cbfs\/tools\"\n\t\"github.com\/dustin\/httputil\"\n)\n\ntype findType int\n\nconst (\n\tfindTypeAny = findType(iota)\n\tfindTypeFile\n\tfindTypeDir\n)\n\nvar findFlags = flag.NewFlagSet(\"find\", flag.ExitOnError)\nvar findTemplate = findFlags.String(\"t\", \"\", \"Display template\")\nvar findTemplateFile = findFlags.String(\"T\", \"\", \"Display template filename\")\nvar findDashName = findFlags.String(\"name\", \"\", \"Glob name to match\")\nvar findDashIName = findFlags.String(\"iname\", \"\",\n\t\"Case insensitive glob name to match\")\nvar findDashMTime = findFlags.Duration(\"mtime\", 0, \"Find by mod time\")\n\nvar findDashType findType\n\nconst defaultFindTemplate = `{{.Name}}\n`\n\nfunc (t findType) String() string {\n\tswitch t {\n\tcase findTypeAny:\n\t\treturn \"\"\n\tcase findTypeFile:\n\t\treturn \"f\"\n\tcase findTypeDir:\n\t\treturn \"d\"\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (t *findType) Set(s string) error {\n\tswitch s {\n\tcase \"\":\n\t\t*t = findTypeAny\n\tcase \"f\":\n\t\t*t = findTypeFile\n\tcase \"d\":\n\t\t*t = findTypeDir\n\tdefault:\n\t\treturn fmt.Errorf(\"must be 'f' or 'd'\")\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tfindFlags.Var(&findDashType, \"type\", \"Type to match (f or d)\")\n}\n\ntype dirAndFileMatcher struct {\n\tm map[string]struct{}\n\tp string\n\ti bool \/\/ if true, case insensitive\n}\n\nfunc newDirAndFileMatcher() dirAndFileMatcher {\n\trv := dirAndFileMatcher{map[string]struct{}{}, *findDashName, false}\n\tif *findDashIName != \"\" {\n\t\trv.i = true\n\t\trv.p = strings.ToLower(*findDashIName)\n\t}\n\n\treturn rv\n}\n\ntype findMatch struct {\n\tpath string\n\tisDir bool\n}\n\nfunc (d dirAndFileMatcher) match(name string, isdir bool) bool {\n\tswitch findDashType {\n\tcase findTypeAny:\n\tcase findTypeFile:\n\t\tif isdir {\n\t\t\treturn false\n\t\t}\n\tcase findTypeDir:\n\t\tif !isdir {\n\t\t\treturn false\n\t\t}\n\t}\n\tm := name\n\tif d.i {\n\t\tm = strings.ToLower(m)\n\t}\n\tmatched, err := filepath.Match(d.p, m)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error globbing: %v\", err)\n\t}\n\treturn matched\n}\n\nfunc (d dirAndFileMatcher) matches(name string) []findMatch {\n\tif d.p == \"\" {\n\t\treturn []findMatch{{name, false}}\n\t}\n\tvar matches []findMatch\n\n\tdir := filepath.Dir(name)\n\tfor dir != \".\" {\n\t\tif _, seen := d.m[dir]; !seen {\n\t\t\td.m[dir] = struct{}{}\n\t\t\tif d.match(filepath.Base(dir), true) {\n\t\t\t\tmatches = append(matches, findMatch{dir, true})\n\t\t\t}\n\t\t}\n\t\tdir = filepath.Dir(dir)\n\t}\n\t\/\/ Reverse these so the traversal order makes sense\n\tfor i := 0; i < len(matches)\/2; i++ {\n\t\tj := len(matches) - i - 1\n\t\tmatches[i], matches[j] = matches[j], matches[i]\n\t}\n\n\tif d.match(filepath.Base(name), false) {\n\t\tmatches = append(matches, findMatch{name, false})\n\t}\n\treturn matches\n}\n\nfunc findMetaMatch(ref func(time.Time) bool, c cbfsclient.FileMeta) bool {\n\treturn true\n}\n\nfunc findGetRefTimeMatch(now time.Time) func(time.Time) bool {\n\tswitch {\n\tcase *findDashMTime > 0:\n\t\treturn now.Add(-*findDashMTime).Before\n\tcase *findDashMTime < 0:\n\t\treturn now.Add(*findDashMTime).After\n\t}\n\treturn func(time.Time) bool { return true }\n}\n\nfunc findCommand(u string, args []string) {\n\tif *findDashName != \"\" && *findDashIName != \"\" {\n\t\tlog.Fatalf(\"Can't specify both -name and -iname\")\n\t}\n\tsrc := findFlags.Arg(0)\n\tfor src[len(src)-1] == '\/' {\n\t\tsrc = src[:len(src)-1]\n\t}\n\n\ttmpl := cbfstool.GetTemplate(*findTemplate, *findTemplateFile,\n\t\tdefaultFindTemplate)\n\n\thttputil.InitHTTPTracker(false)\n\n\tclient, err := cbfsclient.New(u)\n\tcbfstool.MaybeFatal(err, \"Can't build a client: %v\", err)\n\n\tthings, err := client.ListDepth(src, 4096)\n\tcbfstool.MaybeFatal(err, \"Can't list things: %v\", err)\n\n\tmetaMatcher := findGetRefTimeMatch(time.Now())\n\tmatcher := newDirAndFileMatcher()\n\tfor fn, inf := range things.Files {\n\t\tif !metaMatcher(inf.Modified) {\n\t\t\tcontinue\n\t\t}\n\t\tfn = fn[len(src)+1:]\n\t\tfor _, match := range matcher.matches(fn) {\n\t\t\tif err := tmpl.Execute(os.Stdout, struct {\n\t\t\t\tName string\n\t\t\t\tIsDir bool\n\t\t\t\tMeta cbfsclient.FileMeta\n\t\t\t}{match.path, match.isDir, inf}); err != nil {\n\t\t\t\tlog.Fatalf(\"Error executing template: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n}\nAdded find -depth, which seems to demonstate a bug. :(package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/couchbaselabs\/cbfs\/client\"\n\t\"github.com\/couchbaselabs\/cbfs\/tools\"\n\t\"github.com\/dustin\/httputil\"\n)\n\ntype findType int\n\nconst (\n\tfindTypeAny = findType(iota)\n\tfindTypeFile\n\tfindTypeDir\n)\n\nvar findFlags = flag.NewFlagSet(\"find\", flag.ExitOnError)\nvar findTemplate = findFlags.String(\"t\", \"\", \"Display template\")\nvar findTemplateFile = findFlags.String(\"T\", \"\", \"Display template filename\")\nvar findDashName = findFlags.String(\"name\", \"\", \"Glob name to match\")\nvar findDashIName = findFlags.String(\"iname\", \"\",\n\t\"Case insensitive glob name to match\")\nvar findDashMTime = findFlags.Duration(\"mtime\", 0, \"Find by mod time\")\nvar findDashDepth = findFlags.Int(\"depth\", 4096, \"Maximum search depth\")\n\nvar findDashType findType\n\nconst defaultFindTemplate = `{{.Name}}\n`\n\nfunc (t findType) String() string {\n\tswitch t {\n\tcase findTypeAny:\n\t\treturn \"\"\n\tcase findTypeFile:\n\t\treturn \"f\"\n\tcase findTypeDir:\n\t\treturn \"d\"\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (t *findType) Set(s string) error {\n\tswitch s {\n\tcase \"\":\n\t\t*t = findTypeAny\n\tcase \"f\":\n\t\t*t = findTypeFile\n\tcase \"d\":\n\t\t*t = findTypeDir\n\tdefault:\n\t\treturn fmt.Errorf(\"must be 'f' or 'd'\")\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tfindFlags.Var(&findDashType, \"type\", \"Type to match (f or d)\")\n}\n\ntype dirAndFileMatcher struct {\n\tm map[string]struct{}\n\tp string\n\ti bool \/\/ if true, case insensitive\n}\n\nfunc newDirAndFileMatcher() dirAndFileMatcher {\n\trv := dirAndFileMatcher{map[string]struct{}{}, *findDashName, false}\n\tif *findDashIName != \"\" {\n\t\trv.i = true\n\t\trv.p = strings.ToLower(*findDashIName)\n\t}\n\n\treturn rv\n}\n\ntype findMatch struct {\n\tpath string\n\tisDir bool\n}\n\nfunc (d dirAndFileMatcher) match(name string, isdir bool) bool {\n\tswitch findDashType {\n\tcase findTypeAny:\n\tcase findTypeFile:\n\t\tif isdir {\n\t\t\treturn false\n\t\t}\n\tcase findTypeDir:\n\t\tif !isdir {\n\t\t\treturn false\n\t\t}\n\t}\n\tm := name\n\tif d.i {\n\t\tm = strings.ToLower(m)\n\t}\n\tmatched, err := filepath.Match(d.p, m)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error globbing: %v\", err)\n\t}\n\treturn matched\n}\n\nfunc (d dirAndFileMatcher) matches(name string) []findMatch {\n\tif d.p == \"\" {\n\t\treturn []findMatch{{name, false}}\n\t}\n\tvar matches []findMatch\n\n\tdir := filepath.Dir(name)\n\tfor dir != \".\" {\n\t\tif _, seen := d.m[dir]; !seen {\n\t\t\td.m[dir] = struct{}{}\n\t\t\tif d.match(filepath.Base(dir), true) {\n\t\t\t\tmatches = append(matches, findMatch{dir, true})\n\t\t\t}\n\t\t}\n\t\tdir = filepath.Dir(dir)\n\t}\n\t\/\/ Reverse these so the traversal order makes sense\n\tfor i := 0; i < len(matches)\/2; i++ {\n\t\tj := len(matches) - i - 1\n\t\tmatches[i], matches[j] = matches[j], matches[i]\n\t}\n\n\tif d.match(filepath.Base(name), false) {\n\t\tmatches = append(matches, findMatch{name, false})\n\t}\n\treturn matches\n}\n\nfunc findMetaMatch(ref func(time.Time) bool, c cbfsclient.FileMeta) bool {\n\treturn true\n}\n\nfunc findGetRefTimeMatch(now time.Time) func(time.Time) bool {\n\tswitch {\n\tcase *findDashMTime > 0:\n\t\treturn now.Add(-*findDashMTime).Before\n\tcase *findDashMTime < 0:\n\t\treturn now.Add(*findDashMTime).After\n\t}\n\treturn func(time.Time) bool { return true }\n}\n\nfunc findCommand(u string, args []string) {\n\tif *findDashName != \"\" && *findDashIName != \"\" {\n\t\tlog.Fatalf(\"Can't specify both -name and -iname\")\n\t}\n\tsrc := findFlags.Arg(0)\n\tfor src[len(src)-1] == '\/' {\n\t\tsrc = src[:len(src)-1]\n\t}\n\n\ttmpl := cbfstool.GetTemplate(*findTemplate, *findTemplateFile,\n\t\tdefaultFindTemplate)\n\n\thttputil.InitHTTPTracker(false)\n\n\tclient, err := cbfsclient.New(u)\n\tcbfstool.MaybeFatal(err, \"Can't build a client: %v\", err)\n\n\tthings, err := client.ListDepth(src, *findDashDepth)\n\tcbfstool.MaybeFatal(err, \"Can't list things: %v\", err)\n\n\tmetaMatcher := findGetRefTimeMatch(time.Now())\n\tmatcher := newDirAndFileMatcher()\n\tfor fn, inf := range things.Files {\n\t\tif !metaMatcher(inf.Modified) {\n\t\t\tcontinue\n\t\t}\n\t\tfn = fn[len(src)+1:]\n\t\tfor _, match := range matcher.matches(fn) {\n\t\t\tif err := tmpl.Execute(os.Stdout, struct {\n\t\t\t\tName string\n\t\t\t\tIsDir bool\n\t\t\t\tMeta cbfsclient.FileMeta\n\t\t\t}{match.path, match.isDir, inf}); err != nil {\n\t\t\t\tlog.Fatalf(\"Error executing template: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2012, The gohg Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD style license\n\/\/ that can be found in the LICENSE.txt file.\n\n\/\/ This program is an example of how you can use the gohg library.\n\npackage main\n\nimport (\n\t. \"bitbucket.org\/gohg\/gohg\"\n\t\"fmt\"\n\t\"log\"\n)\n\nfunc main() {\n\t\/\/ Set var hgexe to whatever is appropriate for your situation.\n\t\/\/ You can also change it to test with different versions of Mercurial.\n\thgexe := \"hg\"\n\trepo := \".\"\n\tvar cfg []string\n\n\thgcl := NewHgClient()\n\tif err := hgcl.Connect(hgexe, repo, cfg); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer hgcl.Disconnect()\n\n\thc, _ := NewHgCmd(\"log\")\n\to := make([]Option, 2)\n\tvar lim Limit = 2\n\to[0] = lim\n\tvar verb Verbose = true\n\to[1] = verb\n\thc.SetOptions(o)\n\t\/\/ hc.SetParams([]string{\"\\\"my param\\\"\"})\n\tcl, _ := hc.CmdLine(hgcl)\n\tfmt.Printf(\"%s\\n\", cl)\n\tres, _ := hc.Exec(hgcl)\n\tfmt.Printf(\"result:\\n%s\", string(res))\n}\nexample2: corrected call to NewHgCmd()\/\/ Copyright 2012, The gohg Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD style license\n\/\/ that can be found in the LICENSE.txt file.\n\n\/\/ This program is an example of how you can use the gohg library.\n\npackage main\n\nimport (\n\t. \"bitbucket.org\/gohg\/gohg\"\n\t\"fmt\"\n\t\"log\"\n)\n\nfunc main() {\n\t\/\/ Set var hgexe to whatever is appropriate for your situation.\n\t\/\/ You can also change it to test with different versions of Mercurial.\n\thgexe := \"hg\"\n\trepo := \".\"\n\tvar cfg []string\n\n\thgcl := NewHgClient()\n\tif err := hgcl.Connect(hgexe, repo, cfg); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer hgcl.Disconnect()\n\n\thc, _ := NewHgCmd(\"log\", nil, nil)\n\to := make([]Option, 2)\n\tvar lim Limit = 2\n\to[0] = lim\n\tvar verb Verbose = true\n\to[1] = verb\n\thc.SetOptions(o)\n\t\/\/ hc.SetParams([]string{\"\\\"my param\\\"\"})\n\tcl, _ := hc.CmdLine(hgcl)\n\tfmt.Printf(\"%s\\n\", cl)\n\tres, _ := hc.Exec(hgcl)\n\tfmt.Printf(\"result:\\n%s\", string(res))\n}\n<|endoftext|>"} {"text":"package eventapi\n\nimport (\n\t\"github.com\/agoravoting\/authapi\/middleware\"\n\ts \"github.com\/agoravoting\/authapi\/server\"\n\t\"github.com\/agoravoting\/authapi\/util\"\n\t\"github.com\/codegangsta\/negroni\"\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\/\/ \t\"net\/http\/httputil\"\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"strconv\"\n)\n\nconst (\n\tSESSION_EXPIRE = 3600\n)\n\ntype EventApi struct {\n\trouter *httprouter.Router\n\tname string\n\n\tinsertStmt *sqlx.NamedStmt\n\tgetStmt *sqlx.Stmt\n}\n\nfunc (ea *EventApi) Name() string {\n\treturn ea.name\n}\n\nfunc (ea *EventApi) Init() (err error) {\n\t\/\/ setup the routes\n\tea.router = httprouter.New()\n\tea.router.GET(\"\/\", middleware.Join(\n\t\ts.Server.ErrorWrap.Do(ea.list),\n\t\ts.Server.CheckPerms(\"superuser\", SESSION_EXPIRE)))\n\tea.router.POST(\"\/\", middleware.Join(\n\t\ts.Server.ErrorWrap.Do(ea.post),\n\t\ts.Server.CheckPerms(\"superuser\", SESSION_EXPIRE)))\n\n\tea.router.GET(\"\/:id\", middleware.Join(\n\t\ts.Server.ErrorWrap.Do(ea.get),\n\t\ts.Server.CheckPerms(\"(superuser|admin-auth-event-${id})\", SESSION_EXPIRE)))\n\n\t\/\/ setup prepared sql queries\n\tif ea.insertStmt, err = s.Server.Db.PrepareNamed(\"INSERT INTO event (name, auth_method, auth_method_config) VALUES (:name, :auth_method, :auth_method_config) RETURNING id\"); err != nil {\n\t\treturn\n\t}\n\n\tif ea.getStmt, err = s.Server.Db.Preparex(\"SELECT * FROM event WHERE id = $1\"); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ add the routes to the server\n\thandler := negroni.New(negroni.Wrap(ea.router))\n\ts.Server.Mux.OnMux(\"api\/v1\/event\", handler)\n\treturn\n}\n\n\/\/ lists the available events\nfunc (ea *EventApi) list(w http.ResponseWriter, r *http.Request, _ httprouter.Params) *middleware.HandledError {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"Hello world!\"))\n\treturn nil\n}\n\n\/\/ returns an event\nfunc (ea *EventApi) get(w http.ResponseWriter, r *http.Request, p httprouter.Params) *middleware.HandledError {\n\tvar (\n\t\te []Event\n\t\terr error\n\t\tid int\n\t)\n\tif id, err := strconv.ParseInt(p.ByName(\"id\"), 10, 32); err != nil || id <= 0 {\n\t\treturn &middleware.HandledError{err, 400, \"Invalid id format\", \"invalid-format\"}\n\t}\n\n\tif err = ea.getStmt.Select(&e, id); err != nil {\n\t\treturn &middleware.HandledError{err, 500, \"Database error\", \"error-select\"}\n\t}\n\n\tif len(e) == 0 {\n\t\treturn &middleware.HandledError{err, 404, \"Not found\", \"not-found\"}\n\t}\n\n\tb, err := e[0].Marshal()\n\tif err != nil {\n\t\treturn &middleware.HandledError{err, 500, \"Error marshalling the data\", \"marshall-error\"}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(b)\n\treturn nil\n}\n\n\/\/ parses an event from a request.\n\/\/ TODO: generalize and move to utils pkg\nfunc parseEvent(r *http.Request) (e Event, err error) {\n\t\/\/ \trb, err := httputil.DumpRequest(r, true)\n\tif err != nil {\n\t\treturn\n\t}\n\tdecoder := json.NewDecoder(r.Body)\n\terr = decoder.Decode(&e)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn\n}\n\n\/\/ add a new event\nfunc (ea *EventApi) post(w http.ResponseWriter, r *http.Request, _ httprouter.Params) *middleware.HandledError {\n\tvar (\n\t\ttx = s.Server.Db.MustBegin()\n\t\tevent Event\n\t\tid int\n\t\terr error\n\t)\n\tevent, err = parseEvent(r)\n\tif err != nil {\n\t\treturn &middleware.HandledError{err, 400, \"Invalid json-encoded event\", \"invalid-json\"}\n\t}\n\tevent_json, err := event.Json()\n\tif err != nil {\n\t\treturn &middleware.HandledError{err, 500, \"Error re-writing the data to json\", \"error-json-encode\"}\n\t}\n\n\tif err = tx.NamedStmt(ea.insertStmt).QueryRowx(event_json).Scan(&id); err != nil {\n\t\ttx.Rollback()\n\t\treturn &middleware.HandledError{err, 500, \"Error inserting the event\", \"error-insert\"}\n\t}\n\terr = tx.Commit()\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn &middleware.HandledError{err, 500, \"Error comitting the event\", \"error-commit\"}\n\t}\n\n\t\/\/ return id\n\tif err = util.WriteIdJson(w, id); err != nil {\n\t\treturn &middleware.HandledError{err, 500, \"Error returing the id\", \"error-return\"}\n\t}\n\treturn nil\n}\n\n\/\/ add the modules to available modules on startup\nfunc init() {\n\ts.Server.AvailableModules = append(s.Server.AvailableModules, &EventApi{name: \"github.com\/agoravoting\/authapi\/eventapi\"})\n}\nfixing composite literal uses unkeyed fields with HandledError, flagged by go vet toolpackage eventapi\n\nimport (\n\t\"github.com\/agoravoting\/authapi\/middleware\"\n\ts \"github.com\/agoravoting\/authapi\/server\"\n\t\"github.com\/agoravoting\/authapi\/util\"\n\t\"github.com\/codegangsta\/negroni\"\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\/\/ \t\"net\/http\/httputil\"\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"strconv\"\n)\n\nconst (\n\tSESSION_EXPIRE = 3600\n)\n\ntype EventApi struct {\n\trouter *httprouter.Router\n\tname string\n\n\tinsertStmt *sqlx.NamedStmt\n\tgetStmt *sqlx.Stmt\n}\n\nfunc (ea *EventApi) Name() string {\n\treturn ea.name\n}\n\nfunc (ea *EventApi) Init() (err error) {\n\t\/\/ setup the routes\n\tea.router = httprouter.New()\n\tea.router.GET(\"\/\", middleware.Join(\n\t\ts.Server.ErrorWrap.Do(ea.list),\n\t\ts.Server.CheckPerms(\"superuser\", SESSION_EXPIRE)))\n\tea.router.POST(\"\/\", middleware.Join(\n\t\ts.Server.ErrorWrap.Do(ea.post),\n\t\ts.Server.CheckPerms(\"superuser\", SESSION_EXPIRE)))\n\n\tea.router.GET(\"\/:id\", middleware.Join(\n\t\ts.Server.ErrorWrap.Do(ea.get),\n\t\ts.Server.CheckPerms(\"(superuser|admin-auth-event-${id})\", SESSION_EXPIRE)))\n\n\t\/\/ setup prepared sql queries\n\tif ea.insertStmt, err = s.Server.Db.PrepareNamed(\"INSERT INTO event (name, auth_method, auth_method_config) VALUES (:name, :auth_method, :auth_method_config) RETURNING id\"); err != nil {\n\t\treturn\n\t}\n\n\tif ea.getStmt, err = s.Server.Db.Preparex(\"SELECT * FROM event WHERE id = $1\"); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ add the routes to the server\n\thandler := negroni.New(negroni.Wrap(ea.router))\n\ts.Server.Mux.OnMux(\"api\/v1\/event\", handler)\n\treturn\n}\n\n\/\/ lists the available events\nfunc (ea *EventApi) list(w http.ResponseWriter, r *http.Request, _ httprouter.Params) *middleware.HandledError {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"Hello world!\"))\n\treturn nil\n}\n\n\/\/ returns an event\nfunc (ea *EventApi) get(w http.ResponseWriter, r *http.Request, p httprouter.Params) *middleware.HandledError {\n\tvar (\n\t\te []Event\n\t\terr error\n\t\tid int\n\t)\n\tif id, err := strconv.ParseInt(p.ByName(\"id\"), 10, 32); err != nil || id <= 0 {\n\t\treturn &middleware.HandledError{Err: err, Code: 400, Message: \"Invalid id format\", CodedMessage: \"invalid-format\"}\n\t}\n\n\tif err = ea.getStmt.Select(&e, id); err != nil {\n\t\treturn &middleware.HandledError{Err: err, Code: 500, Message: \"Database error\", CodedMessage: \"error-select\"}\n\t}\n\n\tif len(e) == 0 {\n\t\treturn &middleware.HandledError{Err: err, Code: 404, Message: \"Not found\", CodedMessage: \"not-found\"}\n\t}\n\n\tb, err := e[0].Marshal()\n\tif err != nil {\n\t\treturn &middleware.HandledError{Err: err, Code: 500, Message: \"Error marshalling the data\", CodedMessage: \"marshall-error\"}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(b)\n\treturn nil\n}\n\n\/\/ parses an event from a request.\n\/\/ TODO: generalize and move to utils pkg\nfunc parseEvent(r *http.Request) (e Event, err error) {\n\t\/\/ \trb, err := httputil.DumpRequest(r, true)\n\tif err != nil {\n\t\treturn\n\t}\n\tdecoder := json.NewDecoder(r.Body)\n\terr = decoder.Decode(&e)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn\n}\n\n\/\/ add a new event\nfunc (ea *EventApi) post(w http.ResponseWriter, r *http.Request, _ httprouter.Params) *middleware.HandledError {\n\tvar (\n\t\ttx = s.Server.Db.MustBegin()\n\t\tevent Event\n\t\tid int\n\t\terr error\n\t)\n\tevent, err = parseEvent(r)\n\tif err != nil {\n\t\treturn &middleware.HandledError{Err: err, Code: 400, Message: \"Invalid json-encoded event\", CodedMessage: \"invalid-json\"}\n\t}\n\tevent_json, err := event.Json()\n\tif err != nil {\n\t\treturn &middleware.HandledError{Err: err, Code: 500, Message: \"Error re-writing the data to json\", CodedMessage: \"error-json-encode\"}\n\t}\n\n\tif err = tx.NamedStmt(ea.insertStmt).QueryRowx(event_json).Scan(&id); err != nil {\n\t\ttx.Rollback()\n\t\treturn &middleware.HandledError{Err: err, Code: 500, Message: \"Error inserting the event\", CodedMessage: \"error-insert\"}\n\t}\n\terr = tx.Commit()\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn &middleware.HandledError{Err: err, Code: 500, Message: \"Error comitting the event\", CodedMessage: \"error-commit\"}\n\t}\n\n\t\/\/ return id\n\tif err = util.WriteIdJson(w, id); err != nil {\n\t\treturn &middleware.HandledError{Err: err, Code: 500, Message: \"Error returing the id\", CodedMessage: \"error-return\"}\n\t}\n\treturn nil\n}\n\n\/\/ add the modules to available modules on startup\nfunc init() {\n\ts.Server.AvailableModules = append(s.Server.AvailableModules, &EventApi{name: \"github.com\/agoravoting\/authapi\/eventapi\"})\n}\n<|endoftext|>"} {"text":"package eventsocketclient\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n)\n\nvar d websocket.Dialer\n\nfunc init() {\n\td = websocket.Dialer{\n\t\tNetDial: nil,\n\t\tTLSClientConfig: nil,\n\t\tHandshakeTimeout: time.Second * 5,\n\t\tReadBufferSize: 4096,\n\t\tWriteBufferSize: 4096,\n\t}\n}\n\n\/\/ Client is the main eventsocket client\ntype Client struct {\n\t\/\/ The client Id, provided by the server\n\tId string `json:\"Id\"`\n\n\t\/\/ channels for receiving messages\n\tRecvBroadcast chan *Received `json:\"-\"`\n\tRecvRequest chan *Received `json:\"-\"`\n\tRecvError chan error `json:\"-\"`\n\n\t\/\/ the URL to the server\n\turl string\n\n\t\/\/ the actual websocket connection, after we've connected\n\tws *websocket.Conn\n\n\t\/\/ a mapping of all requests that are currently inflight\n\tliveRequests map[string]chan *Received\n\n\t\/\/ a mapping of the subscription return channels\n\tsubscriptions map[string]chan *Received\n}\n\n\/\/ NewClient registers a new client with the server\nfunc NewClient(url string) (*Client, error) {\n\tresp, err := http.Post(fmt.Sprintf(\"http:\/\/%s\/v1\/clients\", url), \"application\/json\", strings.NewReader(\"\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tclient := Client{\n\t\turl: url,\n\t\tliveRequests: make(map[string]chan *Received),\n\t\tsubscriptions: make(map[string]chan *Received),\n\t\tRecvBroadcast: make(chan *Received),\n\t\tRecvRequest: make(chan *Received),\n\t\tRecvError: make(chan error),\n\t}\n\n\t\/\/ unmarshal the response body\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tjson.Unmarshal(body, &client)\n\n\treturn &client, nil\n}\n\n\/\/ DialWs will dial the websocket server, and get a websocket connection in return\nfunc (client *Client) DialWs() error {\n\theaders := http.Header{}\n\tws, _, err := d.Dial(fmt.Sprintf(\"ws:\/\/%s\/v1\/clients\/%s\/ws\", client.url, client.Id), headers)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient.ws = ws\n\n\treturn nil\n}\n\n\/\/ Reconnect attempts a reconnect with the server\nfunc (client *Client) Reconnect() error {\n\tclient.ws.Close()\n\n\treturn client.DialWs()\n}\n\n\/\/ Close closes the connections\nfunc (client *Client) Close() {\n\tclient.ws.Close()\n}\n\n\/\/ Recv reads from the socket\nfunc (client *Client) Recv() error {\n\tfor {\n\t\tm := &Message{}\n\t\tclient.SetReadDeadline(30 * time.Second)\n\t\tif err := client.ws.ReadJSON(m); err != nil {\n\t\t\t\/\/ TODO: something other than panic\n\t\t\tclient.RecvError <- err\n\t\t\tcontinue\n\t\t}\n\n\t\tr := &Received{\n\t\t\tMessage: m,\n\t\t\tErr: nil,\n\t\t}\n\n\t\tswitch m.MessageType {\n\t\tcase MESSAGE_TYPE_BROADCAST:\n\t\t\tclient.RecvBroadcast <- r\n\t\tcase MESSAGE_TYPE_STANDARD:\n\t\t\tclient.subscriptions[r.Message.Event] <- r\n\t\tcase MESSAGE_TYPE_REQUEST:\n\t\t\tclient.RecvRequest <- r\n\t\tcase MESSAGE_TYPE_REPLY:\n\t\t\tclient.liveRequests[m.RequestId] <- r\n\t\t\tclose(client.liveRequests[m.RequestId])\n\t\t\tdelete(client.liveRequests, m.RequestId)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Broadcast a payload\nfunc (client *Client) Broadcast(p *Payload) error {\n\tm := &Message{\n\t\tMessageType: MESSAGE_TYPE_BROADCAST,\n\t\tPayload: p,\n\t}\n\n\treturn client.write(m)\n}\n\n\/\/ Emit an event\nfunc (client *Client) Emit(event string, p *Payload) error {\n\tm := &Message{\n\t\tMessageType: MESSAGE_TYPE_STANDARD,\n\t\tEvent: event,\n\t\tPayload: p,\n\t}\n\n\treturn client.write(m)\n}\n\n\/\/ Suscribe to an event(s)\nfunc (client *Client) Suscribe(events ...string) (<-chan *Received, error) {\n\tp := NewPayload()\n\tp[\"Events\"] = events\n\n\tm := &Message{\n\t\tMessageType: MESSAGE_TYPE_SUSCRIBE,\n\t\tPayload: &p,\n\t}\n\n\tif err := client.write(m); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ create the return channel\n\tc := make(chan *Received)\n\tfor _, event := range events {\n\t\tclient.subscriptions[event] = c\n\t}\n\n\treturn c, nil\n}\n\n\/\/ Request will execute a request against a client\nfunc (client *Client) Request(id string, p *Payload) (<-chan *Received, error) {\n\trequestID := makeUuid()\n\tm := &Message{\n\t\tMessageType: MESSAGE_TYPE_REQUEST,\n\t\tRequestId: requestID,\n\t\tRequestClientId: id,\n\t\tPayload: p,\n\t}\n\n\tif err := client.write(m); err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := make(chan *Received)\n\tclient.liveRequests[requestID] = c\n\treturn c, nil\n}\n\n\/\/ Reply sends a reply to a request\nfunc (client *Client) Reply(requestID, cid string, p *Payload) error {\n\tm := &Message{\n\t\tMessageType: MESSAGE_TYPE_REPLY,\n\t\tReplyClientId: cid,\n\t\tRequestId: requestID,\n\t\tPayload: p,\n\t}\n\n\treturn client.write(m)\n\n}\n\n\/\/ write a message to the socket\nfunc (client *Client) write(m *Message) error {\n\treturn client.ws.WriteJSON(m)\n}\n\n\/\/ SetMaxMessageSize sets the max message size. Note: this is not the max frame size\nfunc (client *Client) SetMaxMessageSize(limit int64) {\n\tclient.ws.SetReadLimit(limit)\n}\n\n\/\/ SetReadDeadline sets the deadline to receive a response on the socket\nfunc (client *Client) SetReadDeadline(t time.Duration) {\n\tclient.ws.SetReadDeadline(time.Now().Add(t))\n}\nRemove 30 second ReadDeadlinepackage eventsocketclient\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n)\n\nvar d websocket.Dialer\n\nfunc init() {\n\td = websocket.Dialer{\n\t\tNetDial: nil,\n\t\tTLSClientConfig: nil,\n\t\tHandshakeTimeout: time.Second * 5,\n\t\tReadBufferSize: 4096,\n\t\tWriteBufferSize: 4096,\n\t}\n}\n\n\/\/ Client is the main eventsocket client\ntype Client struct {\n\t\/\/ The client Id, provided by the server\n\tId string `json:\"Id\"`\n\n\t\/\/ channels for receiving messages\n\tRecvBroadcast chan *Received `json:\"-\"`\n\tRecvRequest chan *Received `json:\"-\"`\n\tRecvError chan error `json:\"-\"`\n\n\t\/\/ the URL to the server\n\turl string\n\n\t\/\/ the actual websocket connection, after we've connected\n\tws *websocket.Conn\n\n\t\/\/ a mapping of all requests that are currently inflight\n\tliveRequests map[string]chan *Received\n\n\t\/\/ a mapping of the subscription return channels\n\tsubscriptions map[string]chan *Received\n}\n\n\/\/ NewClient registers a new client with the server\nfunc NewClient(url string) (*Client, error) {\n\tresp, err := http.Post(fmt.Sprintf(\"http:\/\/%s\/v1\/clients\", url), \"application\/json\", strings.NewReader(\"\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tclient := Client{\n\t\turl: url,\n\t\tliveRequests: make(map[string]chan *Received),\n\t\tsubscriptions: make(map[string]chan *Received),\n\t\tRecvBroadcast: make(chan *Received),\n\t\tRecvRequest: make(chan *Received),\n\t\tRecvError: make(chan error),\n\t}\n\n\t\/\/ unmarshal the response body\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tjson.Unmarshal(body, &client)\n\n\treturn &client, nil\n}\n\n\/\/ DialWs will dial the websocket server, and get a websocket connection in return\nfunc (client *Client) DialWs() error {\n\theaders := http.Header{}\n\tws, _, err := d.Dial(fmt.Sprintf(\"ws:\/\/%s\/v1\/clients\/%s\/ws\", client.url, client.Id), headers)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient.ws = ws\n\n\treturn nil\n}\n\n\/\/ Reconnect attempts a reconnect with the server\nfunc (client *Client) Reconnect() error {\n\tclient.ws.Close()\n\n\treturn client.DialWs()\n}\n\n\/\/ Close closes the connections\nfunc (client *Client) Close() {\n\tclient.ws.Close()\n}\n\n\/\/ Recv reads from the socket\nfunc (client *Client) Recv() error {\n\tfor {\n\t\tm := &Message{}\n\t\tif err := client.ws.ReadJSON(m); err != nil {\n\t\t\t\/\/ TODO: something other than panic\n\t\t\tclient.RecvError <- err\n\t\t\tcontinue\n\t\t}\n\n\t\tr := &Received{\n\t\t\tMessage: m,\n\t\t\tErr: nil,\n\t\t}\n\n\t\tswitch m.MessageType {\n\t\tcase MESSAGE_TYPE_BROADCAST:\n\t\t\tclient.RecvBroadcast <- r\n\t\tcase MESSAGE_TYPE_STANDARD:\n\t\t\tclient.subscriptions[r.Message.Event] <- r\n\t\tcase MESSAGE_TYPE_REQUEST:\n\t\t\tclient.RecvRequest <- r\n\t\tcase MESSAGE_TYPE_REPLY:\n\t\t\tclient.liveRequests[m.RequestId] <- r\n\t\t\tclose(client.liveRequests[m.RequestId])\n\t\t\tdelete(client.liveRequests, m.RequestId)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Broadcast a payload\nfunc (client *Client) Broadcast(p *Payload) error {\n\tm := &Message{\n\t\tMessageType: MESSAGE_TYPE_BROADCAST,\n\t\tPayload: p,\n\t}\n\n\treturn client.write(m)\n}\n\n\/\/ Emit an event\nfunc (client *Client) Emit(event string, p *Payload) error {\n\tm := &Message{\n\t\tMessageType: MESSAGE_TYPE_STANDARD,\n\t\tEvent: event,\n\t\tPayload: p,\n\t}\n\n\treturn client.write(m)\n}\n\n\/\/ Suscribe to an event(s)\nfunc (client *Client) Suscribe(events ...string) (<-chan *Received, error) {\n\tp := NewPayload()\n\tp[\"Events\"] = events\n\n\tm := &Message{\n\t\tMessageType: MESSAGE_TYPE_SUSCRIBE,\n\t\tPayload: &p,\n\t}\n\n\tif err := client.write(m); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ create the return channel\n\tc := make(chan *Received)\n\tfor _, event := range events {\n\t\tclient.subscriptions[event] = c\n\t}\n\n\treturn c, nil\n}\n\n\/\/ Request will execute a request against a client\nfunc (client *Client) Request(id string, p *Payload) (<-chan *Received, error) {\n\trequestID := makeUuid()\n\tm := &Message{\n\t\tMessageType: MESSAGE_TYPE_REQUEST,\n\t\tRequestId: requestID,\n\t\tRequestClientId: id,\n\t\tPayload: p,\n\t}\n\n\tif err := client.write(m); err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := make(chan *Received)\n\tclient.liveRequests[requestID] = c\n\treturn c, nil\n}\n\n\/\/ Reply sends a reply to a request\nfunc (client *Client) Reply(requestID, cid string, p *Payload) error {\n\tm := &Message{\n\t\tMessageType: MESSAGE_TYPE_REPLY,\n\t\tReplyClientId: cid,\n\t\tRequestId: requestID,\n\t\tPayload: p,\n\t}\n\n\treturn client.write(m)\n\n}\n\n\/\/ write a message to the socket\nfunc (client *Client) write(m *Message) error {\n\treturn client.ws.WriteJSON(m)\n}\n\n\/\/ SetMaxMessageSize sets the max message size. Note: this is not the max frame size\nfunc (client *Client) SetMaxMessageSize(limit int64) {\n\tclient.ws.SetReadLimit(limit)\n}\n\n\/\/ SetReadDeadline sets the deadline to receive a response on the socket\nfunc (client *Client) SetReadDeadline(t time.Duration) {\n\tclient.ws.SetReadDeadline(time.Now().Add(t))\n}\n<|endoftext|>"} {"text":"package algorithm\n\n\/\/SkipList\n\/\/author:Xiong Chuan Liang\n\/\/date:2014-1-27\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n)\n\nconst SKIPLIST_MAXLEVEL = 8 \/\/32\nconst SKIPLIST_P = 4\n\ntype SkipList struct {\n\tHeader []List\n\tLevel int\n}\n\nfunc NewSkipList() *SkipList {\n\treturn &SkipList{Level: 1, Header: make([]List, SKIPLIST_MAXLEVEL)}\n\n}\n\nfunc (skipList *SkipList) Insert(key int) {\n\tupdate := make(map[int]*Node)\n\n\tfor i := len(skipList.Header) - 1; i >= 0; i-- {\n\t\tif skipList.Header[i].Len() > 0 {\n\t\t\tfor e := skipList.Header[i].Front(); e != nil; e = e.Next() {\n\t\t\t\tif e.Value.(int) >= key {\n\t\t\t\t\tupdate[i] = e\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t} \/\/Heaer[lv].List\n\t} \/\/Header Level\n\n\tlevel := skipList.Random_level()\n\tif level > skipList.Level {\n\t\tskipList.Level = level\n\t}\n\n\tfor i := 0; i < level; i++ {\n\t\tif v, ok := update[i]; ok {\n\t\t\tskipList.Header[i].InsertBefore(key, v)\n\t\t} else {\n\t\t\tskipList.Header[i].PushBack(key)\n\t\t}\n\t}\n\n}\n\nfunc (skipList *SkipList) Search(key int) *Node {\n\n\tfor i := len(skipList.Header) - 1; i >= 0; i-- {\n\t\tif skipList.Header[i].Len() > 0 {\n\t\t\tfor e := skipList.Header[i].Front(); e != nil; e = e.Next() {\n\t\t\t\tswitch {\n\t\t\t\tcase e.Value.(int) == key:\n\t\t\t\t\tfmt.Println(\"Found level=\", i, \" key=\", key)\n\t\t\t\t\treturn e\n\t\t\t\tcase e.Value.(int) > key:\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} \/\/end for\n\n\t\t} \/\/end if\n\n\t} \/\/end for\n\treturn nil\n}\n\nfunc (skipList *SkipList) Delete(key int) {\n\tfor i := len(skipList.Header) - 1; i >= 0; i-- {\n\t\tif skipList.Header[i].Len() > 0 {\n\t\t\tfor e := skipList.Header[i].Front(); e != nil; e = e.Next() {\n\t\t\t\tswitch {\n\t\t\t\tcase e.Value.(int) == key:\n\t\t\t\t\tfmt.Println(\"Delete level=\", i, \" key=\", key)\n\t\t\t\t\tskipList.Header[i].remove(e)\n\n\t\t\t\tcase e.Value.(int) > key:\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} \/\/end for\n\n\t\t} \/\/end if\n\n\t} \/\/end for\n\n}\n\nfunc (skipList *SkipList) PrintSkipList() {\n\tfmt.Println(\"\\nSkipList-------------------------------------------\")\n\tfor i := SKIPLIST_MAXLEVEL - 1; i >= 0; i-- {\n\t\tfmt.Println(\"level:\", i)\n\t\tif skipList.Header[i].Len() > 0 {\n\t\t\tfor e := skipList.Header[i].Front(); e != nil; e = e.Next() {\n\t\t\t\tfmt.Printf(\"%d \", e.Value)\n\t\t\t} \/\/end for\n\t\t} \/\/end if\n\t\tfmt.Println(\"\\n--------------------------------------------------------\")\n\t} \/\/end for\n}\n\nfunc (skipList *SkipList) Random_level() int {\n\n\tlevel := 1\n\tfor (rand.Int31()&0xFFFF)%SKIPLIST_P == 0 {\n\t\tlevel += 1\n\t}\n\tif level < SKIPLIST_MAXLEVEL {\n\t\treturn level\n\t} else {\n\t\treturn SKIPLIST_MAXLEVEL\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype Node struct {\n\tnext, prev *Node\n\tlist *List\n\n\tValue interface{}\n}\n\ntype List struct {\n\troot Node\n\tlen int\n}\n\nfunc (node *Node) Next() *Node {\n\tif p := node.next; node.list != nil && p != &node.list.root {\n\t\treturn p\n\t}\n\treturn nil\n}\n\nfunc (node *Node) Prev() *Node {\n\tif p := node.prev; node.list != nil && p != &node.list.root {\n\t\treturn p\n\t}\n\treturn nil\n}\n\nfunc (list *List) Init() *List {\n\tlist.root.next = &list.root\n\tlist.root.prev = &list.root\n\tlist.len = 0\n\treturn list\n}\n\nfunc New() *List {\n\treturn new(List).Init()\n}\n\nfunc (list *List) lazyInit() {\n\tif list.root.next == nil {\n\t\tlist.Init()\n\t}\n}\n\nfunc (list *List) Len() int {\n\treturn list.len\n}\n\nfunc (list *List) remove(e *Node) *Node {\n\te.prev.next = e.next\n\te.next.prev = e.prev\n\te.next = nil\n\te.prev = nil\n\te.list = nil\n\tlist.len--\n\treturn e\n}\n\nfunc (list *List) Remove(e *Node) interface{} {\n\tif e.list == list {\n\t\tlist.remove(e)\n\t}\n\treturn e.Value\n}\n\nfunc (list *List) Front() *Node {\n\tif list.len == 0 {\n\t\treturn nil\n\t}\n\treturn list.root.next\n}\n\nfunc (list *List) Back() *Node {\n\tif list.len == 0 {\n\t\treturn nil\n\t}\n\treturn list.root.prev\n}\n\nfunc (list *List) insert(e, at *Node) *Node {\n\tn := at.next\n\tat.next = e\n\te.prev = at\n\te.next = n\n\tn.prev = e\n\te.list = list\n\n\tlist.len++\n\treturn e\n}\n\nfunc (list *List) insertValue(v interface{}, at *Node) *Node {\n\n\treturn list.insert(&Node{Value: v}, at)\n}\n\nfunc (list *List) InsertBefore(v interface{}, mark *Node) *Node {\n\tif mark.list != list {\n\t\treturn nil\n\t}\n\treturn list.insertValue(v, mark.prev)\n}\n\nfunc (list *List) PushBack(v interface{}) *Node {\n\tlist.lazyInit()\n\treturn list.insertValue(v, list.root.prev)\n}\nUpdate skiplist.gopackage algorithm\n\n\/\/SkipList\n\/\/author:Xiong Chuan Liang\n\/\/date:2014-1-28\n\nimport (\n\t\"fmt\"\n\t\/\/\"github.com\/xclpkg\/algorithm\"\n\t\"math\/rand\"\n)\n\n\nconst SKIPLIST_MAXLEVEL = 8 \/\/32\nconst SKIPLIST_P = 4\n\n\ntype Node struct {\n\tForward []Node\n\tValue interface{}\n}\n\n\nfunc NewNode(v interface{},level int) *Node {\n\treturn &Node{Value: v, Forward: make([]Node, level)} \n}\n\n\ntype SkipList struct {\n\tHeader *Node\n\tLevel int\n}\n\nfunc NewSkipList() *SkipList {\n\treturn &SkipList{Level: 1, Header: NewNode(0,SKIPLIST_MAXLEVEL)} \n}\n\nfunc (skipList *SkipList) Insert(key int) {\n\n\tupdate := make(map[int]*Node)\n\tnode := skipList.Header\n\n\tfor i := skipList.Level - 1; i >= 0; i-- {\n\t\t for {\n\t\t \t if node.Forward[i].Value != nil && node.Forward[i].Value.(int) < key {\n\t\t \t \tnode = &node.Forward[i]\n\t\t \t }else{\n\t\t \t \tbreak;\n\t\t \t }\n\t\t }\n\t\t update[i] = node\n\t}\n\n\tlevel := skipList.Random_level()\n\tif level > skipList.Level {\n\t\tfor i := skipList.Level; i < level; i++ {\n\t\t\tupdate[i] = skipList.Header\n\t\t}\n\t\tskipList.Level = level\n\t}\n\n\tnewNode := NewNode(key,level)\n\n\tfor i := 0; i < level; i++ {\n\t\tnewNode.Forward[i] = update[i].Forward[i]\t\t\n\t\tupdate[i].Forward[i] = *newNode\t\t\t\n\t}\n\n}\n\nfunc (skipList *SkipList) Random_level() int {\n\n\tlevel := 1\n\tfor (rand.Int31()&0xFFFF)%SKIPLIST_P == 0 {\n\t\tlevel += 1\n\t}\n\tif level < SKIPLIST_MAXLEVEL {\n\t\treturn level\n\t} else {\n\t\treturn SKIPLIST_MAXLEVEL\n\t}\n}\n\nfunc (skipList *SkipList) PrintSkipList() {\n\n\tfmt.Println(\"\\nSkipList-------------------------------------------\")\n\tfor i := SKIPLIST_MAXLEVEL - 1; i >= 0; i-- {\n\t\t\n\t\t fmt.Println(\"level:\", i)\n\t\t node := skipList.Header.Forward[i]\n\t\t for {\n\t\t \t if node.Value != nil {\n\t\t \t \tfmt.Printf(\"%d \", node.Value.(int))\n\t\t \t \tnode = node.Forward[i]\n\t\t \t }else{\n\t\t \t \tbreak;\n\t\t \t }\n\t\t }\n\t\t fmt.Println(\"\\n--------------------------------------------------------\")\n\t} \/\/end for\n\n\tfmt.Println(\"Current MaxLevel:\", skipList.Level)\n}\n\n\nfunc (skipList *SkipList) Search(key int) *Node {\n\n\tnode := skipList.Header\n\tfor i := skipList.Level - 1; i >= 0; i-- {\n\n\t\tfmt.Println(\"\\n Search() Level=\", i)\n\t\tfor {\n\t\t\tif node.Forward[i].Value == nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tfmt.Printf(\" %d \", node.Forward[i].Value)\n\t\t\tif node.Forward[i].Value.(int) == key {\n\t\t\t\tfmt.Println(\"\\nFound level=\", i, \" key=\", key)\t\t\t\n\t\t\t\treturn &node.Forward[i]\n\t\t\t}\n\n\t\t\tif node.Forward[i].Value.(int) < key {\n\t\t\t\tnode = &node.Forward[i]\n\t\t\t\tcontinue\n\t\t\t}else{ \/\/ > key \n\t\t\t\tbreak\n\t\t\t}\n\t\t } \/\/end for find\n\n\t} \/\/end level\n\treturn nil\n}\n\n\nfunc (skipList *SkipList)Remove(key int) {\n\n\tupdate := make(map[int]*Node)\n\tnode := skipList.Header\n\tfor i := skipList.Level - 1; i >= 0; i-- {\n\t\t\n\t\tfor {\n\n\t\t\tif node.Forward[i].Value == nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif node.Forward[i].Value.(int) == key {\n\t\t\t\tfmt.Println(\"Remove() level=\", i, \" key=\", key)\t\t\t\n\t\t\t\tupdate[i] = node \n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif node.Forward[i].Value.(int) < key {\n\t\t\t\tnode = &node.Forward[i]\n\t\t\t\tcontinue\n\t\t\t}else{ \/\/ > key \n\t\t\t\tbreak\n\t\t\t}\n\n\t\t } \/\/end for find\n\n\t} \/\/end level\n\n\tfor i,v := range update {\n\t\tif v == skipList.Header {\n\t\t\tskipList.Level --\n\t\t}\n \t\tv.Forward[i] = v.Forward[i].Forward[i]\t\t\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2016 Documize Inc. . All rights reserved.\n\/\/\n\/\/ This software (Documize Community Edition) is licensed under\n\/\/ GNU AGPL v3 http:\/\/www.gnu.org\/licenses\/agpl-3.0.en.html\n\/\/\n\/\/ You can operate outside the AGPL restrictions by purchasing\n\/\/ Documize Enterprise Edition and obtaining a commercial license\n\/\/ by contacting .\n\/\/\n\/\/ https:\/\/documize.com\n\n\/\/ Package storage sets up database persistence providers.\npackage storage\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/documize\/community\/core\/env\"\n\taccount \"github.com\/documize\/community\/domain\/account\"\n\tactivity \"github.com\/documize\/community\/domain\/activity\"\n\tattachment \"github.com\/documize\/community\/domain\/attachment\"\n\taudit \"github.com\/documize\/community\/domain\/audit\"\n\tblock \"github.com\/documize\/community\/domain\/block\"\n\tcategory \"github.com\/documize\/community\/domain\/category\"\n\tdocument \"github.com\/documize\/community\/domain\/document\"\n\tgroup \"github.com\/documize\/community\/domain\/group\"\n\tlabel \"github.com\/documize\/community\/domain\/label\"\n\tlink \"github.com\/documize\/community\/domain\/link\"\n\tmeta \"github.com\/documize\/community\/domain\/meta\"\n\torg \"github.com\/documize\/community\/domain\/organization\"\n\tpage \"github.com\/documize\/community\/domain\/page\"\n\tpermission \"github.com\/documize\/community\/domain\/permission\"\n\tpin \"github.com\/documize\/community\/domain\/pin\"\n\tsearch \"github.com\/documize\/community\/domain\/search\"\n\tsetting \"github.com\/documize\/community\/domain\/setting\"\n\tspace \"github.com\/documize\/community\/domain\/space\"\n\t\"github.com\/documize\/community\/domain\/store\"\n\tuser \"github.com\/documize\/community\/domain\/user\"\n\t_ \"github.com\/lib\/pq\" \/\/ the PostgreSQL driver is required behind the scenes\n)\n\n\/\/ PostgreSQLProvider supports by popular demand.\ntype PostgreSQLProvider struct {\n\t\/\/ User specified connection string.\n\tConnectionString string\n\n\t\/\/ Unused for this provider.\n\tVariant env.StoreType\n}\n\n\/\/ SetPostgreSQLProvider creates PostgreSQL provider\nfunc SetPostgreSQLProvider(r *env.Runtime, s *store.Store) {\n\t\/\/ Set up provider specific details.\n\tr.StoreProvider = PostgreSQLProvider{\n\t\tConnectionString: r.Flags.DBConn,\n\t\tVariant: env.StoreTypePostgreSQL,\n\t}\n\n\t\/\/ Wire up data providers.\n\n\t\/\/ Account\n\taccountStore := account.Store{}\n\taccountStore.Runtime = r\n\ts.Account = accountStore\n\n\t\/\/ Activity\n\tactivityStore := activity.Store{}\n\tactivityStore.Runtime = r\n\ts.Activity = activityStore\n\n\t\/\/ Attachment\n\tattachmentStore := attachment.Store{}\n\tattachmentStore.Runtime = r\n\ts.Attachment = attachmentStore\n\n\t\/\/ Audit\n\tauditStore := audit.Store{}\n\tauditStore.Runtime = r\n\ts.Audit = auditStore\n\n\t\/\/ Section Template\n\tblockStore := block.Store{}\n\tblockStore.Runtime = r\n\ts.Block = blockStore\n\n\t\/\/ Category\n\tcategoryStore := category.Store{}\n\tcategoryStore.Runtime = r\n\ts.Category = categoryStore\n\n\t\/\/ Document\n\tdocumentStore := document.Store{}\n\tdocumentStore.Runtime = r\n\ts.Document = documentStore\n\n\t\/\/ Group\n\tgroupStore := group.Store{}\n\tgroupStore.Runtime = r\n\ts.Group = groupStore\n\n\t\/\/ Link\n\tlinkStore := link.Store{}\n\tlinkStore.Runtime = r\n\ts.Link = linkStore\n\n\t\/\/ Meta\n\tmetaStore := meta.Store{}\n\tmetaStore.Runtime = r\n\ts.Meta = metaStore\n\n\t\/\/ Organization (tenant)\n\torgStore := org.Store{}\n\torgStore.Runtime = r\n\ts.Organization = orgStore\n\n\t\/\/ Page (section)\n\tpageStore := page.Store{}\n\tpageStore.Runtime = r\n\ts.Page = pageStore\n\n\t\/\/ Permission\n\tpermissionStore := permission.Store{}\n\tpermissionStore.Runtime = r\n\ts.Permission = permissionStore\n\n\t\/\/ Pin\n\tpinStore := pin.Store{}\n\tpinStore.Runtime = r\n\ts.Pin = pinStore\n\n\t\/\/ Search\n\tsearchStore := search.Store{}\n\tsearchStore.Runtime = r\n\ts.Search = searchStore\n\n\t\/\/ Setting\n\tsettingStore := setting.Store{}\n\tsettingStore.Runtime = r\n\ts.Setting = settingStore\n\n\t\/\/ Space\n\tspaceStore := space.Store{}\n\tspaceStore.Runtime = r\n\ts.Space = spaceStore\n\n\t\/\/ User\n\tuserStore := user.Store{}\n\tuserStore.Runtime = r\n\ts.User = userStore\n\n\t\/\/ Space Label\n\tlabelStore := label.Store{}\n\tlabelStore.Runtime = r\n\ts.Label = labelStore\n}\n\n\/\/ Type returns name of provider\nfunc (p PostgreSQLProvider) Type() env.StoreType {\n\treturn env.StoreTypePostgreSQL\n}\n\n\/\/ TypeVariant returns databse flavor\nfunc (p PostgreSQLProvider) TypeVariant() env.StoreType {\n\treturn p.Variant\n}\n\n\/\/ DriverName returns database\/sql driver name.\nfunc (p PostgreSQLProvider) DriverName() string {\n\treturn \"postgres\"\n}\n\n\/\/ Params returns connection string parameters that must be present before connecting to DB.\nfunc (p PostgreSQLProvider) Params() map[string]string {\n\t\/\/ Not used for this provider.\n\treturn map[string]string{}\n}\n\n\/\/ Example holds storage provider specific connection string format\n\/\/ used in error messages.\nfunc (p PostgreSQLProvider) Example() string {\n\treturn \"database connection string format is 'host=localhost port=5432 sslmode=disable user=admin password=secret dbname=documize'\"\n}\n\n\/\/ DatabaseName holds the SQL database name where Documize tables live.\nfunc (p PostgreSQLProvider) DatabaseName() string {\n\tbits := strings.Split(p.ConnectionString, \" \")\n\tfor _, s := range bits {\n\t\ts = strings.TrimSpace(s)\n\t\tif strings.Contains(s, \"dbname=\") {\n\t\t\ts = strings.Replace(s, \"dbname=\", \"\", 1)\n\n\t\t\treturn s\n\t\t}\n\t}\n\n\treturn \"\"\n}\n\n\/\/ MakeConnectionString returns provider specific DB connection string\n\/\/ complete with default parameters.\nfunc (p PostgreSQLProvider) MakeConnectionString() string {\n\t\/\/ No special processing so return as-is.\n\treturn p.ConnectionString\n}\n\n\/\/ QueryMeta is how to extract version number, collation, character set from database provider.\nfunc (p PostgreSQLProvider) QueryMeta() string {\n\t\/\/ SELECT version() as vstring, current_setting('server_version_num') as vnumber, pg_encoding_to_char(encoding) AS charset FROM pg_database WHERE datname = 'documize';\n\n\treturn fmt.Sprintf(`SELECT cast(current_setting('server_version_num') AS TEXT) AS version, version() AS comment, pg_encoding_to_char(encoding) AS charset, '' AS collation\n FROM pg_database WHERE datname = '%s'`, p.DatabaseName())\n}\n\n\/\/ QueryRecordVersionUpgrade returns database specific insert statement\n\/\/ that records the database version number.\nfunc (p PostgreSQLProvider) QueryRecordVersionUpgrade(version int) string {\n\t\/\/ Make record that holds new database version number.\n\tjson := fmt.Sprintf(\"{\\\"database\\\": \\\"%d\\\"}\", version)\n\n\treturn fmt.Sprintf(`INSERT INTO dmz_config (c_key,c_config) VALUES ('META','%s')\n ON CONFLICT (c_key) DO UPDATE SET c_config='%s' WHERE dmz_config.c_key='META'`, json, json)\n}\n\n\/\/ QueryRecordVersionUpgradeLegacy returns database specific insert statement\n\/\/ that records the database version number.\nfunc (p PostgreSQLProvider) QueryRecordVersionUpgradeLegacy(version int) string {\n\t\/\/ This provider has no legacy schema.\n\treturn p.QueryRecordVersionUpgrade(version)\n}\n\n\/\/ QueryGetDatabaseVersion returns the schema version number.\nfunc (p PostgreSQLProvider) QueryGetDatabaseVersion() string {\n\treturn \"SELECT c_config -> 'database' FROM dmz_config WHERE c_key = 'META';\"\n}\n\n\/\/ QueryGetDatabaseVersionLegacy returns the schema version number before The Great Schema Migration (v25, MySQL).\nfunc (p PostgreSQLProvider) QueryGetDatabaseVersionLegacy() string {\n\t\/\/ This provider has no legacy schema.\n\treturn p.QueryGetDatabaseVersion()\n}\n\n\/\/ QueryTableList returns a list tables in Documize database.\nfunc (p PostgreSQLProvider) QueryTableList() string {\n\treturn fmt.Sprintf(`select table_name\n FROM information_schema.tables\n WHERE table_type='BASE TABLE' AND table_schema NOT IN ('pg_catalog', 'information_schema') AND table_catalog='%s'`, p.DatabaseName())\n}\n\n\/\/ QueryDateInterval returns provider specific interval style\n\/\/ date SQL.\nfunc (p PostgreSQLProvider) QueryDateInterval(days int64) string {\n\treturn fmt.Sprintf(\"DATE(NOW()) - INTERVAL '%d day'\", days)\n}\n\n\/\/ JSONEmpty returns empty SQL JSON object.\n\/\/ Typically used as 2nd parameter to COALESCE().\nfunc (p PostgreSQLProvider) JSONEmpty() string {\n\treturn \"'{}'::json\"\n}\n\n\/\/ JSONGetValue returns JSON attribute selection syntax.\n\/\/ Typically used in SELECT query.\nfunc (p PostgreSQLProvider) JSONGetValue(column, attribute string) string {\n\tif len(attribute) > 0 {\n\t\treturn fmt.Sprintf(\"%s -> '%s'\", column, attribute)\n\t}\n\n\treturn fmt.Sprintf(\"%s\", column)\n}\n\n\/\/ VerfiyVersion checks to see if actual database meets\n\/\/ minimum version requirements.``\nfunc (p PostgreSQLProvider) VerfiyVersion(dbVersion string) (bool, string) {\n\t\/\/ All versions supported.\n\treturn true, \"\"\n}\n\n\/\/ VerfiyCharacterCollation needs to ensure utf8.\nfunc (p PostgreSQLProvider) VerfiyCharacterCollation(charset, collation string) (charOK bool, requirements string) {\n\tif strings.ToLower(charset) != \"utf8\" {\n\t\treturn false, fmt.Sprintf(\"PostgreSQL character set needs to be utf8, found %s\", charset)\n\t}\n\n\t\/\/ Collation check ignored.\n\n\treturn true, \"\"\n}\n\n\/\/ ConvertTimestamp returns SQL function to correctly convert\n\/\/ ISO 8601 format (e.g. '2016-09-08T06:37:23Z') to SQL specific\n\/\/ timestamp value (e.g. 2016-09-08 06:37:23).\n\/\/ Must use ? for parameter placeholder character as DB layer\n\/\/ will convert to database specific parameter placeholder character.\nfunc (p PostgreSQLProvider) ConvertTimestamp() (statement string) {\n\treturn `to_timestamp(?,'YYYY-MM-DD HH24:MI:SS')`\n}\n\n\/\/ IsTrue returns \"true\"\nfunc (p PostgreSQLProvider) IsTrue() string {\n\treturn \"true\"\n}\n\n\/\/ IsFalse returns \"false\"\nfunc (p PostgreSQLProvider) IsFalse() string {\n\treturn \"false\"\n}\n\n\/\/ RowLimit returns SQL for limiting number of rows returned.\nfunc (p PostgreSQLProvider) RowLimit(max int) string {\n\treturn fmt.Sprintf(\"LIMIT %d\", max)\n}\nIgnore PostGIS when determining if PostgreSQL is empty\/\/ Copyright 2016 Documize Inc. . All rights reserved.\n\/\/\n\/\/ This software (Documize Community Edition) is licensed under\n\/\/ GNU AGPL v3 http:\/\/www.gnu.org\/licenses\/agpl-3.0.en.html\n\/\/\n\/\/ You can operate outside the AGPL restrictions by purchasing\n\/\/ Documize Enterprise Edition and obtaining a commercial license\n\/\/ by contacting .\n\/\/\n\/\/ https:\/\/documize.com\n\n\/\/ Package storage sets up database persistence providers.\npackage storage\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/documize\/community\/core\/env\"\n\taccount \"github.com\/documize\/community\/domain\/account\"\n\tactivity \"github.com\/documize\/community\/domain\/activity\"\n\tattachment \"github.com\/documize\/community\/domain\/attachment\"\n\taudit \"github.com\/documize\/community\/domain\/audit\"\n\tblock \"github.com\/documize\/community\/domain\/block\"\n\tcategory \"github.com\/documize\/community\/domain\/category\"\n\tdocument \"github.com\/documize\/community\/domain\/document\"\n\tgroup \"github.com\/documize\/community\/domain\/group\"\n\tlabel \"github.com\/documize\/community\/domain\/label\"\n\tlink \"github.com\/documize\/community\/domain\/link\"\n\tmeta \"github.com\/documize\/community\/domain\/meta\"\n\torg \"github.com\/documize\/community\/domain\/organization\"\n\tpage \"github.com\/documize\/community\/domain\/page\"\n\tpermission \"github.com\/documize\/community\/domain\/permission\"\n\tpin \"github.com\/documize\/community\/domain\/pin\"\n\tsearch \"github.com\/documize\/community\/domain\/search\"\n\tsetting \"github.com\/documize\/community\/domain\/setting\"\n\tspace \"github.com\/documize\/community\/domain\/space\"\n\t\"github.com\/documize\/community\/domain\/store\"\n\tuser \"github.com\/documize\/community\/domain\/user\"\n\t_ \"github.com\/lib\/pq\" \/\/ the PostgreSQL driver is required behind the scenes\n)\n\n\/\/ PostgreSQLProvider supports by popular demand.\ntype PostgreSQLProvider struct {\n\t\/\/ User specified connection string.\n\tConnectionString string\n\n\t\/\/ Unused for this provider.\n\tVariant env.StoreType\n}\n\n\/\/ SetPostgreSQLProvider creates PostgreSQL provider\nfunc SetPostgreSQLProvider(r *env.Runtime, s *store.Store) {\n\t\/\/ Set up provider specific details.\n\tr.StoreProvider = PostgreSQLProvider{\n\t\tConnectionString: r.Flags.DBConn,\n\t\tVariant: env.StoreTypePostgreSQL,\n\t}\n\n\t\/\/ Wire up data providers.\n\n\t\/\/ Account\n\taccountStore := account.Store{}\n\taccountStore.Runtime = r\n\ts.Account = accountStore\n\n\t\/\/ Activity\n\tactivityStore := activity.Store{}\n\tactivityStore.Runtime = r\n\ts.Activity = activityStore\n\n\t\/\/ Attachment\n\tattachmentStore := attachment.Store{}\n\tattachmentStore.Runtime = r\n\ts.Attachment = attachmentStore\n\n\t\/\/ Audit\n\tauditStore := audit.Store{}\n\tauditStore.Runtime = r\n\ts.Audit = auditStore\n\n\t\/\/ Section Template\n\tblockStore := block.Store{}\n\tblockStore.Runtime = r\n\ts.Block = blockStore\n\n\t\/\/ Category\n\tcategoryStore := category.Store{}\n\tcategoryStore.Runtime = r\n\ts.Category = categoryStore\n\n\t\/\/ Document\n\tdocumentStore := document.Store{}\n\tdocumentStore.Runtime = r\n\ts.Document = documentStore\n\n\t\/\/ Group\n\tgroupStore := group.Store{}\n\tgroupStore.Runtime = r\n\ts.Group = groupStore\n\n\t\/\/ Link\n\tlinkStore := link.Store{}\n\tlinkStore.Runtime = r\n\ts.Link = linkStore\n\n\t\/\/ Meta\n\tmetaStore := meta.Store{}\n\tmetaStore.Runtime = r\n\ts.Meta = metaStore\n\n\t\/\/ Organization (tenant)\n\torgStore := org.Store{}\n\torgStore.Runtime = r\n\ts.Organization = orgStore\n\n\t\/\/ Page (section)\n\tpageStore := page.Store{}\n\tpageStore.Runtime = r\n\ts.Page = pageStore\n\n\t\/\/ Permission\n\tpermissionStore := permission.Store{}\n\tpermissionStore.Runtime = r\n\ts.Permission = permissionStore\n\n\t\/\/ Pin\n\tpinStore := pin.Store{}\n\tpinStore.Runtime = r\n\ts.Pin = pinStore\n\n\t\/\/ Search\n\tsearchStore := search.Store{}\n\tsearchStore.Runtime = r\n\ts.Search = searchStore\n\n\t\/\/ Setting\n\tsettingStore := setting.Store{}\n\tsettingStore.Runtime = r\n\ts.Setting = settingStore\n\n\t\/\/ Space\n\tspaceStore := space.Store{}\n\tspaceStore.Runtime = r\n\ts.Space = spaceStore\n\n\t\/\/ User\n\tuserStore := user.Store{}\n\tuserStore.Runtime = r\n\ts.User = userStore\n\n\t\/\/ Space Label\n\tlabelStore := label.Store{}\n\tlabelStore.Runtime = r\n\ts.Label = labelStore\n}\n\n\/\/ Type returns name of provider\nfunc (p PostgreSQLProvider) Type() env.StoreType {\n\treturn env.StoreTypePostgreSQL\n}\n\n\/\/ TypeVariant returns databse flavor\nfunc (p PostgreSQLProvider) TypeVariant() env.StoreType {\n\treturn p.Variant\n}\n\n\/\/ DriverName returns database\/sql driver name.\nfunc (p PostgreSQLProvider) DriverName() string {\n\treturn \"postgres\"\n}\n\n\/\/ Params returns connection string parameters that must be present before connecting to DB.\nfunc (p PostgreSQLProvider) Params() map[string]string {\n\t\/\/ Not used for this provider.\n\treturn map[string]string{}\n}\n\n\/\/ Example holds storage provider specific connection string format\n\/\/ used in error messages.\nfunc (p PostgreSQLProvider) Example() string {\n\treturn \"database connection string format is 'host=localhost port=5432 sslmode=disable user=admin password=secret dbname=documize'\"\n}\n\n\/\/ DatabaseName holds the SQL database name where Documize tables live.\nfunc (p PostgreSQLProvider) DatabaseName() string {\n\tbits := strings.Split(p.ConnectionString, \" \")\n\tfor _, s := range bits {\n\t\ts = strings.TrimSpace(s)\n\t\tif strings.Contains(s, \"dbname=\") {\n\t\t\ts = strings.Replace(s, \"dbname=\", \"\", 1)\n\n\t\t\treturn s\n\t\t}\n\t}\n\n\treturn \"\"\n}\n\n\/\/ MakeConnectionString returns provider specific DB connection string\n\/\/ complete with default parameters.\nfunc (p PostgreSQLProvider) MakeConnectionString() string {\n\t\/\/ No special processing so return as-is.\n\treturn p.ConnectionString\n}\n\n\/\/ QueryMeta is how to extract version number, collation, character set from database provider.\nfunc (p PostgreSQLProvider) QueryMeta() string {\n\t\/\/ SELECT version() as vstring, current_setting('server_version_num') as vnumber, pg_encoding_to_char(encoding) AS charset FROM pg_database WHERE datname = 'documize';\n\n\treturn fmt.Sprintf(`SELECT cast(current_setting('server_version_num') AS TEXT) AS version, version() AS comment, pg_encoding_to_char(encoding) AS charset, '' AS collation\n FROM pg_database WHERE datname = '%s'`, p.DatabaseName())\n}\n\n\/\/ QueryRecordVersionUpgrade returns database specific insert statement\n\/\/ that records the database version number.\nfunc (p PostgreSQLProvider) QueryRecordVersionUpgrade(version int) string {\n\t\/\/ Make record that holds new database version number.\n\tjson := fmt.Sprintf(\"{\\\"database\\\": \\\"%d\\\"}\", version)\n\n\treturn fmt.Sprintf(`INSERT INTO dmz_config (c_key,c_config) VALUES ('META','%s')\n ON CONFLICT (c_key) DO UPDATE SET c_config='%s' WHERE dmz_config.c_key='META'`, json, json)\n}\n\n\/\/ QueryRecordVersionUpgradeLegacy returns database specific insert statement\n\/\/ that records the database version number.\nfunc (p PostgreSQLProvider) QueryRecordVersionUpgradeLegacy(version int) string {\n\t\/\/ This provider has no legacy schema.\n\treturn p.QueryRecordVersionUpgrade(version)\n}\n\n\/\/ QueryGetDatabaseVersion returns the schema version number.\nfunc (p PostgreSQLProvider) QueryGetDatabaseVersion() string {\n\treturn \"SELECT c_config -> 'database' FROM dmz_config WHERE c_key = 'META';\"\n}\n\n\/\/ QueryGetDatabaseVersionLegacy returns the schema version number before The Great Schema Migration (v25, MySQL).\nfunc (p PostgreSQLProvider) QueryGetDatabaseVersionLegacy() string {\n\t\/\/ This provider has no legacy schema.\n\treturn p.QueryGetDatabaseVersion()\n}\n\n\/\/ QueryTableList returns a list tables in Documize database.\nfunc (p PostgreSQLProvider) QueryTableList() string {\n\treturn fmt.Sprintf(`select table_name\n FROM information_schema.tables\n WHERE table_type='BASE TABLE' AND table_schema NOT IN ('pg_catalog', 'information_schema') AND table_name != 'spatial_ref_sys' AND table_catalog='%s'`, p.DatabaseName())\n}\n\n\/\/ QueryDateInterval returns provider specific interval style\n\/\/ date SQL.\nfunc (p PostgreSQLProvider) QueryDateInterval(days int64) string {\n\treturn fmt.Sprintf(\"DATE(NOW()) - INTERVAL '%d day'\", days)\n}\n\n\/\/ JSONEmpty returns empty SQL JSON object.\n\/\/ Typically used as 2nd parameter to COALESCE().\nfunc (p PostgreSQLProvider) JSONEmpty() string {\n\treturn \"'{}'::json\"\n}\n\n\/\/ JSONGetValue returns JSON attribute selection syntax.\n\/\/ Typically used in SELECT query.\nfunc (p PostgreSQLProvider) JSONGetValue(column, attribute string) string {\n\tif len(attribute) > 0 {\n\t\treturn fmt.Sprintf(\"%s -> '%s'\", column, attribute)\n\t}\n\n\treturn fmt.Sprintf(\"%s\", column)\n}\n\n\/\/ VerfiyVersion checks to see if actual database meets\n\/\/ minimum version requirements.``\nfunc (p PostgreSQLProvider) VerfiyVersion(dbVersion string) (bool, string) {\n\t\/\/ All versions supported.\n\treturn true, \"\"\n}\n\n\/\/ VerfiyCharacterCollation needs to ensure utf8.\nfunc (p PostgreSQLProvider) VerfiyCharacterCollation(charset, collation string) (charOK bool, requirements string) {\n\tif strings.ToLower(charset) != \"utf8\" {\n\t\treturn false, fmt.Sprintf(\"PostgreSQL character set needs to be utf8, found %s\", charset)\n\t}\n\n\t\/\/ Collation check ignored.\n\n\treturn true, \"\"\n}\n\n\/\/ ConvertTimestamp returns SQL function to correctly convert\n\/\/ ISO 8601 format (e.g. '2016-09-08T06:37:23Z') to SQL specific\n\/\/ timestamp value (e.g. 2016-09-08 06:37:23).\n\/\/ Must use ? for parameter placeholder character as DB layer\n\/\/ will convert to database specific parameter placeholder character.\nfunc (p PostgreSQLProvider) ConvertTimestamp() (statement string) {\n\treturn `to_timestamp(?,'YYYY-MM-DD HH24:MI:SS')`\n}\n\n\/\/ IsTrue returns \"true\"\nfunc (p PostgreSQLProvider) IsTrue() string {\n\treturn \"true\"\n}\n\n\/\/ IsFalse returns \"false\"\nfunc (p PostgreSQLProvider) IsFalse() string {\n\treturn \"false\"\n}\n\n\/\/ RowLimit returns SQL for limiting number of rows returned.\nfunc (p PostgreSQLProvider) RowLimit(max int) string {\n\treturn fmt.Sprintf(\"LIMIT %d\", max)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2016 The go-vgo Project Developers. See the COPYRIGHT\n\/\/ file at the top-level directory of this distribution and at\n\/\/ https:\/\/github.com\/go-vgo\/robotgo\/blob\/master\/LICENSE\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 or the MIT license\n\/\/ , at your\n\/\/ option. This file may not be copied, modified, or distributed\n\/\/ except according to those terms.\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/go-vgo\/robotgo\"\n\t\/\/ \"go-vgo\/robotgo\"\n)\n\nfunc typeStr() {\n\t\/\/ importing \"Hello World\"\n\trobotgo.TypeStr(\"Hello World!\")\n\n\trobotgo.TypeStr(\"だんしゃり\")\n\trobotgo.MicroSleep(10.2)\n\n\trobotgo.TypeStr(\"Hi galaxy. こんにちは世界.\")\n\trobotgo.Sleep(2)\n\trobotgo.TypeString(\"So, hi, bye!\")\n\trobotgo.MilliSleep(100)\n\n\tustr := uint32(robotgo.CharCodeAt(\"So, hi, bye!\", 0))\n\trobotgo.UnicodeType(ustr)\n\n\trobotgo.PasteStr(\"paste string\")\n}\n\nfunc keyTap() {\n\t\/\/ press \"enter\"\n\trobotgo.KeyTap(\"enter\")\n\trobotgo.KeyTap(\"a\")\n\trobotgo.KeyTap(\"a\", \"ctrl\")\n\n\t\/\/ hide window\n\terr := robotgo.KeyTap(\"h\", \"cmd\")\n\tif err != \"\" {\n\t\tfmt.Println(\"robotgo.KeyTap run error is: \", err)\n\t}\n\n\trobotgo.KeyTap(\"h\", \"cmd\", 12)\n\n\t\/\/ press \"i\", \"alt\", \"command\" Key combination\n\trobotgo.KeyTap(\"i\", \"alt\", \"command\")\n\trobotgo.KeyTap(\"i\", \"alt\", \"cmd\", 11)\n\n\tarr := []string{\"alt\", \"cmd\"}\n\trobotgo.KeyTap(\"i\", arr)\n\trobotgo.KeyTap(\"i\", arr, 12)\n\n\trobotgo.KeyTap(\"i\", \"cmd\", \" alt\", \"shift\")\n\n\t\/\/ close window\n\trobotgo.KeyTap(\"w\", \"cmd\")\n\n\t\/\/ minimize window\n\trobotgo.KeyTap(\"m\", \"cmd\")\n\n\trobotgo.KeyTap(\"f1\", \"ctrl\")\n\trobotgo.KeyTap(\"a\", \"control\")\n}\n\nfunc keyToggle() {\n\trobotgo.KeyToggle(\"a\", \"down\")\n\trobotgo.KeyToggle(\"a\", \"down\", \"alt\")\n\trobotgo.KeyToggle(\"a\", \"up\", \"alt\", \"cmd\")\n\trobotgo.KeyToggle(\"q\", \"up\", \"alt\", \"cmd\", \"shift\")\n\n\terr := robotgo.KeyToggle(\"enter\", \"down\")\n\tif err != \"\" {\n\t\tfmt.Println(\"robotgo.KeyToggle run error is: \", err)\n\t}\n}\n\nfunc cilp() {\n\trobotgo.TypeString(\"en\")\n\n\t\/\/ write string to clipboard\n\trobotgo.WriteAll(\"テストする\")\n\t\/\/ read string from clipboard\n\ttext, err := robotgo.ReadAll()\n\tif err != nil {\n\t\tfmt.Println(\"robotgo.ReadAll err is: \", err)\n\t}\n\tfmt.Println(text)\n}\n\nfunc key() {\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Control the keyboard\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\ttypeStr()\n\n\tkeyTap()\n\tkeyToggle()\n\n\tcilp()\n}\n\nfunc main() {\n\tkey()\n}\nupdate keyboard example code, #238\/\/ Copyright 2016 The go-vgo Project Developers. See the COPYRIGHT\n\/\/ file at the top-level directory of this distribution and at\n\/\/ https:\/\/github.com\/go-vgo\/robotgo\/blob\/master\/LICENSE\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 or the MIT license\n\/\/ , at your\n\/\/ option. This file may not be copied, modified, or distributed\n\/\/ except according to those terms.\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/go-vgo\/robotgo\"\n\t\/\/ \"go-vgo\/robotgo\"\n)\n\nfunc typeStr() {\n\t\/\/ importing \"Hello World\"\n\trobotgo.TypeStr(\"Hello World!\")\n\n\trobotgo.TypeStr(\"だんしゃり\")\n\trobotgo.MicroSleep(10.2)\n\n\trobotgo.TypeStr(\"Hi galaxy. こんにちは世界.\")\n\trobotgo.Sleep(2)\n\trobotgo.TypeString(\"So, hi, bye!\")\n\trobotgo.MilliSleep(100)\n\n\tustr := uint32(robotgo.CharCodeAt(\"So, hi, bye!\", 0))\n\trobotgo.UnicodeType(ustr)\n\n\trobotgo.PasteStr(\"paste string\")\n}\n\nfunc keyTap() {\n\t\/\/ press \"enter\"\n\trobotgo.KeyTap(\"enter\")\n\trobotgo.KeyTap(\"a\")\n\trobotgo.MilliSleep(100)\n\trobotgo.KeyTap(\"a\", \"ctrl\")\n\n\t\/\/ hide window\n\terr := robotgo.KeyTap(\"h\", \"cmd\")\n\tif err != \"\" {\n\t\tfmt.Println(\"robotgo.KeyTap run error is: \", err)\n\t}\n\n\trobotgo.KeyTap(\"h\", \"cmd\", 12)\n\n\t\/\/ press \"i\", \"alt\", \"command\" Key combination\n\trobotgo.KeyTap(\"i\", \"alt\", \"command\")\n\trobotgo.KeyTap(\"i\", \"alt\", \"cmd\", 11)\n\n\tarr := []string{\"alt\", \"cmd\"}\n\trobotgo.KeyTap(\"i\", arr)\n\trobotgo.KeyTap(\"i\", arr, 12)\n\n\trobotgo.KeyTap(\"i\", \"cmd\", \" alt\", \"shift\")\n\n\t\/\/ close window\n\trobotgo.KeyTap(\"w\", \"cmd\")\n\n\t\/\/ minimize window\n\trobotgo.KeyTap(\"m\", \"cmd\")\n\n\trobotgo.KeyTap(\"f1\", \"ctrl\")\n\trobotgo.KeyTap(\"a\", \"control\")\n}\n\nfunc keyToggle() {\n\trobotgo.KeyToggle(\"a\", \"down\")\n\trobotgo.KeyToggle(\"a\", \"down\", \"alt\")\n\trobotgo.Sleep(1)\n\trobotgo.KeyToggle(\"a\", \"up\", \"alt\", \"cmd\")\n\trobotgo.MilliSleep(100)\n\trobotgo.KeyToggle(\"q\", \"up\", \"alt\", \"cmd\", \"shift\")\n\n\terr := robotgo.KeyToggle(\"enter\", \"down\")\n\tif err != \"\" {\n\t\tfmt.Println(\"robotgo.KeyToggle run error is: \", err)\n\t}\n}\n\nfunc cilp() {\n\trobotgo.TypeString(\"en\")\n\n\t\/\/ write string to clipboard\n\trobotgo.WriteAll(\"テストする\")\n\t\/\/ read string from clipboard\n\ttext, err := robotgo.ReadAll()\n\tif err != nil {\n\t\tfmt.Println(\"robotgo.ReadAll err is: \", err)\n\t}\n\tfmt.Println(text)\n}\n\nfunc key() {\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Control the keyboard\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\ttypeStr()\n\n\tkeyTap()\n\tkeyToggle()\n\n\tcilp()\n}\n\nfunc main() {\n\tkey()\n}\n<|endoftext|>"} {"text":"package vault\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/99designs\/aws-vault\/prompt\"\n\t\"github.com\/99designs\/keyring\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/iam\"\n)\n\ntype Rotator struct {\n\tKeyring keyring.Keyring\n\tMfaToken string\n\tMfaPrompt prompt.PromptFunc\n\tConfig *Config\n}\n\n\/\/ Rotate creates a new key and deletes the old one\nfunc (r *Rotator) Rotate(profile string) error {\n\tvar err error\n\n\tsource, _ := r.Config.SourceProfile(profile)\n\n\t\/\/ --------------------------------\n\t\/\/ Get the existing credentials\n\n\tprovider := &KeyringProvider{\n\t\tKeyring: r.Keyring,\n\t\tProfile: source.Name,\n\t\tRegion: source.Region,\n\t}\n\n\toldMasterCreds, err := provider.Retrieve()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toldSess := session.New(&aws.Config{Region: aws.String(provider.Region),\n\t\tCredentials: credentials.NewCredentials(&credentials.StaticProvider{Value: oldMasterCreds}),\n\t})\n\n\tcurrentUserName, err := GetUsernameFromSession(oldSess)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"Found old access key ****************%s for user %s\",\n\t\toldMasterCreds.AccessKeyID[len(oldMasterCreds.AccessKeyID)-4:],\n\t\tcurrentUserName)\n\n\t\/\/ We need to use a session as some credentials will requiring assuming a role to\n\t\/\/ get permission to create creds\n\toldSessionCreds, err := NewVaultCredentials(r.Keyring, profile, VaultOptions{\n\t\tMfaToken: r.MfaToken,\n\t\tMfaPrompt: r.MfaPrompt,\n\t\tConfig: r.Config,\n\t\tNoSession: false,\n\t\tMasterCreds: &oldMasterCreds,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toldSessionVal, err := oldSessionCreds.Get()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ --------------------------------\n\t\/\/ Create new access key\n\n\tlog.Println(\"Using old credentials to create a new access key\")\n\n\tvar iamUserName *string\n\n\t\/\/ A username is needed for some IAM calls if the credentials have assumed a role\n\tif oldSessionVal.SessionToken != \"\" || currentUserName != \"root\" {\n\t\tiamUserName = aws.String(currentUserName)\n\t}\n\n\toldSessionClient := iam.New(session.New(&aws.Config{Region: aws.String(provider.Region),\n\t\tCredentials: credentials.NewCredentials(&credentials.StaticProvider{Value: oldSessionVal}),\n\t}))\n\n\tcreateOut, err := oldSessionClient.CreateAccessKey(&iam.CreateAccessKeyInput{\n\t\tUserName: iamUserName,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Println(\"Created new access key\")\n\n\tnewMasterCreds := credentials.Value{\n\t\tAccessKeyID: *createOut.AccessKey.AccessKeyId,\n\t\tSecretAccessKey: *createOut.AccessKey.SecretAccessKey,\n\t}\n\n\tif err := provider.Store(newMasterCreds); err != nil {\n\t\treturn fmt.Errorf(\"Error storing new access key %v: %v\",\n\t\t\tnewMasterCreds.AccessKeyID, err)\n\t}\n\n\t\/\/ --------------------------------\n\t\/\/ Use new credentials to delete old access key\n\n\tlog.Println(\"Using new credentials to delete the old new access key\")\n\n\tnewSessionCreds, err := NewVaultCredentials(r.Keyring, profile, VaultOptions{\n\t\tMfaToken: r.MfaToken,\n\t\tMfaPrompt: r.MfaPrompt,\n\t\tConfig: r.Config,\n\t\tNoSession: false,\n\t\tMasterCreds: &newMasterCreds,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"Waiting for new IAM credentials to propagate (takes up to 10 seconds)\")\n\n\terr = retry(time.Second*20, time.Second*5, func() error {\n\t\tnewVal, err := newSessionCreds.Get()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tnewClient := iam.New(session.New(&aws.Config{Region: aws.String(provider.Region),\n\t\t\tCredentials: credentials.NewCredentials(&credentials.StaticProvider{Value: newVal}),\n\t\t}))\n\n\t\t_, err = newClient.DeleteAccessKey(&iam.DeleteAccessKeyInput{\n\t\t\tAccessKeyId: aws.String(oldMasterCreds.AccessKeyID),\n\t\t\tUserName: iamUserName,\n\t\t})\n\t\treturn err\n\t})\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Can't delete old access key %v: %v\", oldMasterCreds.AccessKeyID, err)\n\t}\n\n\t\/\/ --------------------------------\n\t\/\/ Delete old sessions\n\n\tsessions, err := NewKeyringSessions(r.Keyring, r.Config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif n, _ := sessions.Delete(profile); n > 0 {\n\t\tlog.Printf(\"Deleted %d existing sessions.\", n)\n\t}\n\n\tlog.Printf(\"Rotated credentials for profile %q in vault\", profile)\n\treturn nil\n}\n\nvar (\n\tgetUserErrorRegexp = regexp.MustCompile(`^AccessDenied: User: arn:aws:iam::(\\d+):user\/(.+) is not`)\n)\n\n\/\/ GetUsernameFromSession returns the IAM username (or root) associated with the current aws session\nfunc GetUsernameFromSession(sess *session.Session) (string, error) {\n\tclient := iam.New(sess)\n\n\tresp, err := client.GetUser(&iam.GetUserInput{})\n\tif err != nil {\n\t\t\/\/ Even if GetUser fails, the current user is included in the error. This happens when you have o IAM permissions\n\t\t\/\/ on the master credentials, but have permission to use assumeRole later\n\t\tmatches := getUserErrorRegexp.FindStringSubmatch(err.Error())\n\t\tif len(matches) > 0 {\n\t\t\treturn matches[2], nil\n\t\t}\n\n\t\treturn \"\", err\n\t}\n\n\tif resp.User.UserName != nil {\n\t\treturn *resp.User.UserName, nil\n\t}\n\n\tif resp.User.Arn != nil {\n\t\tarnParts := strings.Split(*resp.User.Arn, \":\")\n\t\treturn arnParts[len(arnParts)-1], nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"Couldn't determine current username\")\n}\n\nfunc retry(duration time.Duration, sleep time.Duration, callback func() error) (err error) {\n\tt0 := time.Now()\n\ti := 0\n\tfor {\n\t\ti++\n\n\t\terr = callback()\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\n\t\tdelta := time.Now().Sub(t0)\n\t\tif delta > duration {\n\t\t\treturn fmt.Errorf(\"After %d attempts (during %s), last error: %s\", i, delta, err)\n\t\t}\n\n\t\ttime.Sleep(sleep)\n\t\tlog.Println(\"Retrying after error:\", err)\n\t}\n}\nBest guess stab at IAM vs STS for Rotationpackage vault\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/99designs\/aws-vault\/prompt\"\n\t\"github.com\/99designs\/keyring\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/iam\"\n)\n\ntype Rotator struct {\n\tKeyring keyring.Keyring\n\tMfaToken string\n\tMfaPrompt prompt.PromptFunc\n\tConfig *Config\n}\n\n\/\/ Rotate creates a new key and deletes the old one\nfunc (r *Rotator) Rotate(profile string) error {\n\tvar err error\n\n\tsource, _ := r.Config.SourceProfile(profile)\n\n\t\/\/ --------------------------------\n\t\/\/ Get the existing credentials\n\n\tprovider := &KeyringProvider{\n\t\tKeyring: r.Keyring,\n\t\tProfile: source.Name,\n\t\tRegion: source.Region,\n\t}\n\n\toldMasterCreds, err := provider.Retrieve()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toldSess := session.New(&aws.Config{Region: aws.String(provider.Region),\n\t\tCredentials: credentials.NewCredentials(&credentials.StaticProvider{Value: oldMasterCreds}),\n\t})\n\n\tcurrentUserName, err := GetUsernameFromSession(oldSess)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"Found old access key ****************%s for user %s\",\n\t\toldMasterCreds.AccessKeyID[len(oldMasterCreds.AccessKeyID)-4:],\n\t\tcurrentUserName)\n\n\toldSessionCreds, err := NewVaultCredentials(r.Keyring, profile, VaultOptions{\n\t\tMfaToken: r.MfaToken,\n\t\tMfaPrompt: r.MfaPrompt,\n\t\tConfig: r.Config,\n\t\tNoSession: !r.needsSessionToRotate(profile),\n\t\tMasterCreds: &oldMasterCreds,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toldSessionVal, err := oldSessionCreds.Get()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ --------------------------------\n\t\/\/ Create new access key\n\n\tlog.Println(\"Using old credentials to create a new access key\")\n\n\tvar iamUserName *string\n\n\t\/\/ A username is needed for some IAM calls if the credentials have assumed a role\n\tif oldSessionVal.SessionToken != \"\" || currentUserName != \"root\" {\n\t\tiamUserName = aws.String(currentUserName)\n\t}\n\n\toldSessionClient := iam.New(session.New(&aws.Config{Region: aws.String(provider.Region),\n\t\tCredentials: credentials.NewCredentials(&credentials.StaticProvider{Value: oldSessionVal}),\n\t}))\n\n\tcreateOut, err := oldSessionClient.CreateAccessKey(&iam.CreateAccessKeyInput{\n\t\tUserName: iamUserName,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Println(\"Created new access key\")\n\n\tnewMasterCreds := credentials.Value{\n\t\tAccessKeyID: *createOut.AccessKey.AccessKeyId,\n\t\tSecretAccessKey: *createOut.AccessKey.SecretAccessKey,\n\t}\n\n\tif err := provider.Store(newMasterCreds); err != nil {\n\t\treturn fmt.Errorf(\"Error storing new access key %v: %v\",\n\t\t\tnewMasterCreds.AccessKeyID, err)\n\t}\n\n\t\/\/ --------------------------------\n\t\/\/ Use new credentials to delete old access key\n\n\tlog.Println(\"Using new credentials to delete the old new access key\")\n\n\tnewSessionCreds, err := NewVaultCredentials(r.Keyring, profile, VaultOptions{\n\t\tMfaToken: r.MfaToken,\n\t\tMfaPrompt: r.MfaPrompt,\n\t\tConfig: r.Config,\n\t\tNoSession: !r.needsSessionToRotate(profile),\n\t\tMasterCreds: &newMasterCreds,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"Waiting for new IAM credentials to propagate (takes up to 10 seconds)\")\n\n\terr = retry(time.Second*20, time.Second*5, func() error {\n\t\tnewVal, err := newSessionCreds.Get()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tnewClient := iam.New(session.New(&aws.Config{Region: aws.String(provider.Region),\n\t\t\tCredentials: credentials.NewCredentials(&credentials.StaticProvider{Value: newVal}),\n\t\t}))\n\n\t\t_, err = newClient.DeleteAccessKey(&iam.DeleteAccessKeyInput{\n\t\t\tAccessKeyId: aws.String(oldMasterCreds.AccessKeyID),\n\t\t\tUserName: iamUserName,\n\t\t})\n\t\treturn err\n\t})\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Can't delete old access key %v: %v\", oldMasterCreds.AccessKeyID, err)\n\t}\n\n\t\/\/ --------------------------------\n\t\/\/ Delete old sessions\n\n\tsessions, err := NewKeyringSessions(r.Keyring, r.Config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif n, _ := sessions.Delete(profile); n > 0 {\n\t\tlog.Printf(\"Deleted %d existing sessions.\", n)\n\t}\n\n\tlog.Printf(\"Rotated credentials for profile %q in vault\", profile)\n\treturn nil\n}\n\nvar (\n\tgetUserErrorRegexp = regexp.MustCompile(`^AccessDenied: User: arn:aws:iam::(\\d+):user\/(.+) is not`)\n)\n\n\/\/ GetUsernameFromSession returns the IAM username (or root) associated with the current aws session\nfunc GetUsernameFromSession(sess *session.Session) (string, error) {\n\tclient := iam.New(sess)\n\n\tresp, err := client.GetUser(&iam.GetUserInput{})\n\tif err != nil {\n\t\t\/\/ Even if GetUser fails, the current user is included in the error. This happens when you have o IAM permissions\n\t\t\/\/ on the master credentials, but have permission to use assumeRole later\n\t\tmatches := getUserErrorRegexp.FindStringSubmatch(err.Error())\n\t\tif len(matches) > 0 {\n\t\t\treturn matches[2], nil\n\t\t}\n\n\t\treturn \"\", err\n\t}\n\n\tif resp.User.UserName != nil {\n\t\treturn *resp.User.UserName, nil\n\t}\n\n\tif resp.User.Arn != nil {\n\t\tarnParts := strings.Split(*resp.User.Arn, \":\")\n\t\treturn arnParts[len(arnParts)-1], nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"Couldn't determine current username\")\n}\n\nfunc retry(duration time.Duration, sleep time.Duration, callback func() error) (err error) {\n\tt0 := time.Now()\n\ti := 0\n\tfor {\n\t\ti++\n\n\t\terr = callback()\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\n\t\tdelta := time.Now().Sub(t0)\n\t\tif delta > duration {\n\t\t\treturn fmt.Errorf(\"After %d attempts (during %s), last error: %s\", i, delta, err)\n\t\t}\n\n\t\ttime.Sleep(sleep)\n\t\tlog.Println(\"Retrying after error:\", err)\n\t}\n}\n\n\/\/ needsSessionToRotate attempts to resolve the dilemma around whether or not\n\/\/ profiles should use a session to be able to rotate.\n\/\/\n\/\/ Some profiles require assuming a role to get permission to create new\n\/\/ credentials. Alas, others which don't use a role are pure IAM and will\n\/\/ fail to create credentials when using an STS role, because AWS's IAM\n\/\/ systems hard-fail early when given STS credentials.\n\/\/\n\/\/ This is a heuristic which might need to continue to evolve. :(\nfunc (r *Rotator) needsSessionToRotate(profileName string) bool {\n\tif r.MfaToken != \"\" {\n\t\treturn true\n\t}\n\tsourceProfile, known := r.Config.SourceProfile(profileName)\n\tif !known {\n\t\t\/\/ best guess\n\t\treturn false\n\t}\n\tif sourceProfile.Name != profileName {\n\t\t\/\/ TODO: should this comparison be case-insensitive?\n\t\treturn true\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"package vcs\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/mail\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/knieriem\/hgo\"\n\thg_changelog \"github.com\/knieriem\/hgo\/changelog\"\n\thg_revlog \"github.com\/knieriem\/hgo\/revlog\"\n\thg_store \"github.com\/knieriem\/hgo\/store\"\n)\n\ntype HgRepositoryNative struct {\n\tdir string\n\tu *hgo.Repository\n\tst *hg_store.Store\n\tcl *hg_revlog.Index\n\tallTags *hgo.Tags\n\tbranchHeads *hgo.BranchHeads\n}\n\nfunc OpenHgRepositoryNative(dir string) (*HgRepositoryNative, error) {\n\tr, err := hgo.OpenRepository(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tst := r.NewStore()\n\tcl, err := st.OpenChangeLog()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tglobalTags, allTags := r.Tags()\n\tglobalTags.Sort()\n\tallTags.Sort()\n\tallTags.Add(\"tip\", cl.Tip().Id().Node())\n\n\tbh, err := r.BranchHeads()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &HgRepositoryNative{dir, r, st, cl, allTags, bh}, nil\n}\n\nfunc (r *HgRepositoryNative) ResolveRevision(spec string) (CommitID, error) {\n\tif id, err := r.ResolveBranch(spec); err == nil {\n\t\treturn id, nil\n\t}\n\tif id, err := r.ResolveTag(spec); err == nil {\n\t\treturn id, nil\n\t}\n\n\trec, err := r.parseRevisionSpec(spec).Lookup(r.cl)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn CommitID(hex.EncodeToString(rec.Id())), nil\n}\n\nfunc (r *HgRepositoryNative) ResolveTag(name string) (CommitID, error) {\n\tif id, ok := r.allTags.IdByName[name]; ok {\n\t\treturn CommitID(id), nil\n\t}\n\treturn \"\", ErrTagNotFound\n}\n\nfunc (r *HgRepositoryNative) ResolveBranch(name string) (CommitID, error) {\n\tif id, ok := r.branchHeads.IdByName[name]; ok {\n\t\treturn CommitID(id), nil\n\t}\n\treturn \"\", ErrBranchNotFound\n}\n\nfunc (r *HgRepositoryNative) GetCommit(id CommitID) (*Commit, error) {\n\trec, err := hg_revlog.NodeIdRevSpec(id).Lookup(r.cl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r.makeCommit(rec)\n}\n\nfunc (r *HgRepositoryNative) CommitLog(to CommitID) ([]*Commit, error) {\n\trec, err := hg_revlog.NodeIdRevSpec(to).Lookup(r.cl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar commits []*Commit\n\tfor ; ; rec = rec.Prev() {\n\t\tc, err := r.makeCommit(rec)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcommits = append(commits, c)\n\n\t\tif rec.IsStartOfBranch() {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn commits, nil\n}\n\nfunc (r *HgRepositoryNative) makeCommit(rec *hg_revlog.Rec) (*Commit, error) {\n\tfb := hg_revlog.NewFileBuilder()\n\tce, err := hg_changelog.BuildEntry(rec, fb)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taddr, err := mail.ParseAddress(ce.Committer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar parents []CommitID\n\tif !rec.IsStartOfBranch() {\n\t\tif p := rec.Parent(); p != nil {\n\t\t\tparents = append(parents, CommitID(hex.EncodeToString(rec.Parent().Id())))\n\t\t}\n\t\tif rec.Parent2Present() {\n\t\t\tparents = append(parents, CommitID(hex.EncodeToString(rec.Parent2().Id())))\n\t\t}\n\t}\n\n\treturn &Commit{\n\t\tID: CommitID(ce.Id),\n\t\tAuthor: Signature{addr.Name, addr.Address, ce.Date},\n\t\tMessage: ce.Comment,\n\t\tParents: parents,\n\t}, nil\n}\n\nfunc (r *HgRepositoryNative) FileSystem(at CommitID) (FileSystem, error) {\n\trec, err := hg_revlog.NodeIdRevSpec(at).Lookup(r.cl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &hgFSNative{\n\t\tdir: r.dir,\n\t\tat: hg_revlog.FileRevSpec(rec.FileRev()),\n\t\trepo: r.u,\n\t\tst: r.st,\n\t\tcl: r.cl,\n\t\tfb: hg_revlog.NewFileBuilder(),\n\t}, nil\n}\n\nfunc (r *HgRepositoryNative) parseRevisionSpec(s string) hg_revlog.RevisionSpec {\n\tif s == \"\" {\n\t\ts = \"tip\"\n\t\t\/\/ TODO(sqs): determine per-repository default branch name (not always \"default\"?)\n\t}\n\tif s == \"tip\" {\n\t\treturn hg_revlog.TipRevSpec{}\n\t}\n\tif s == \"null\" {\n\t\treturn hg_revlog.NullRevSpec{}\n\t}\n\tif id, ok := r.allTags.IdByName[s]; ok {\n\t\ts = id\n\t} else if i, err := strconv.Atoi(s); err == nil {\n\t\treturn hg_revlog.FileRevSpec(i)\n\t}\n\n\treturn hg_revlog.NodeIdRevSpec(s)\n}\n\ntype hgFSNative struct {\n\tdir string\n\tat hg_revlog.FileRevSpec\n\trepo *hgo.Repository\n\tst *hg_store.Store\n\tcl *hg_revlog.Index\n\tfb *hg_revlog.FileBuilder\n}\n\nfunc (fs *hgFSNative) manifestEntry(chgId hg_revlog.FileRevSpec, fileName string) (me *hg_store.ManifestEnt, err error) {\n\tm, err := fs.getManifest(chgId)\n\tif err != nil {\n\t\treturn\n\t}\n\tme = m.Map()[fileName]\n\tif me == nil {\n\t\terr = errors.New(\"file does not exist in given revision\")\n\t}\n\treturn\n}\n\nfunc (fs *hgFSNative) getManifest(chgId hg_revlog.FileRevSpec) (m hg_store.Manifest, err error) {\n\trec, err := chgId.Lookup(fs.cl)\n\tif err != nil {\n\t\treturn\n\t}\n\tc, err := hg_changelog.BuildEntry(rec, fs.fb)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ st := fs.repo.NewStore()\n\tmlog, err := fs.st.OpenManifests()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trec2, err := mlog.LookupRevision(int(c.Linkrev), c.ManifestNode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hg_store.BuildManifest(rec2, fs.fb)\n}\n\nfunc (fs *hgFSNative) getEntry(path string) (*hg_revlog.Rec, *hg_store.ManifestEnt, error) {\n\tfileLog, err := fs.st.OpenRevlog(path)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ Get file entry from manifest (so we can look up the correct record in the file revlog)\n\tent, err := fs.manifestEntry(fs.at, path)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ Lookup record in revlog\n\tentId, err := ent.Id()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tlinkRevSpec := hg_revlog.LinkRevSpec{\n\t\tRev: int(fs.at),\n\t\tFindPresent: func(maybeAncestors []*hg_revlog.Rec) (index int, err error) {\n\t\t\t\/\/ Find the present file record by matching against the manifest entry.\n\t\t\t\/\/\n\t\t\t\/\/ Note: this is necessary due to some edge case where the returned file record nodeid does not match the\n\t\t\t\/\/ entry in the manifest. So far, have not been able to repro this outside the Python standard library (try\n\t\t\t\/\/ Stat() on the README)\n\t\t\tfor a, anc := range maybeAncestors {\n\t\t\t\tif anc.Id().Eq(entId) {\n\t\t\t\t\treturn a, nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tancIds := make([]hg_revlog.NodeId, len(maybeAncestors))\n\t\t\tfor a, anc := range maybeAncestors {\n\t\t\t\tancIds[a] = anc.Id()\n\t\t\t}\n\t\t\treturn 0, fmt.Errorf(\"failed to find file record with nodeid matching manifest entry nodeid %v, candidates were %v\",\n\t\t\t\tentId, ancIds)\n\t\t},\n\t}\n\trec, err := linkRevSpec.Lookup(fileLog)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif rec.FileRev() == -1 {\n\t\treturn nil, nil, hg_revlog.ErrRevisionNotFound\n\t}\n\n\tif int(rec.Linkrev) == int(fs.at) {\n\t\t\/\/ The requested revision matches this record, which can be\n\t\t\/\/ used as a sign that the file exists. (TODO(sqs): original comments\n\t\t\/\/ say maybe this means the file is NOT existent yet? the word \"not\" is\n\t\t\/\/ not there but that seems to be a mistake.)\n\t\treturn rec, ent, nil\n\t}\n\n\tif !rec.IsLeaf() {\n\t\t\/\/ There are other records that have the current record as a parent.\n\t\t\/\/ This means, the file was existent, no need to check the manifest.\n\t\treturn rec, ent, nil\n\t}\n\n\treturn rec, ent, nil\n}\n\nfunc (fs *hgFSNative) Open(name string) (ReadSeekCloser, error) {\n\trec, _, err := fs.getEntry(name)\n\tif err != nil {\n\t\treturn nil, standardizeHgError(err)\n\t}\n\n\tdata, err := fs.readFile(rec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn nopCloser{bytes.NewReader(data)}, nil\n}\n\nfunc (fs *hgFSNative) readFile(rec *hg_revlog.Rec) ([]byte, error) {\n\tfb := hg_revlog.NewFileBuilder()\n\treturn fb.Build(rec)\n}\n\nfunc (fs *hgFSNative) Lstat(path string) (os.FileInfo, error) {\n\tfi, _, err := fs.lstat(path)\n\treturn fi, err\n}\n\nfunc (fs *hgFSNative) lstat(path string) (os.FileInfo, []byte, error) {\n\tpath = filepath.Clean(path)\n\n\trec, ent, err := fs.getEntry(path)\n\tif os.IsNotExist(err) {\n\t\t\/\/ check if path is a dir (dirs are not in hg's manifest, so we need to\n\t\t\/\/ hack around to get them).\n\t\tfi, err := fs.dirStat(path)\n\t\treturn fi, nil, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, standardizeHgError(err)\n\t}\n\n\tfi := fs.fileInfo(ent)\n\n\t\/\/ read data to determine file size\n\tdata, err := fs.readFile(rec)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfi.size = int64(len(data))\n\n\treturn fi, data, nil\n}\n\nfunc (fs *hgFSNative) Stat(path string) (os.FileInfo, error) {\n\tfi, data, err := fs.lstat(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif fi.Mode()&os.ModeSymlink != 0 {\n\t\t\/\/ derefence symlink\n\t\tderefPath := string(data)\n\t\tfi, err := fs.Lstat(derefPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfi.(*fileInfo).name = filepath.Base(path)\n\t\treturn fi, nil\n\t}\n\n\treturn fi, nil\n}\n\n\/\/ dirStat determines whether a directory exists at path by listing files\n\/\/ underneath it. If it has files, then it's a directory. We must do it this way\n\/\/ because hg doesn't track directories in the manifest.\nfunc (fs *hgFSNative) dirStat(path string) (os.FileInfo, error) {\n\tif path == \".\" {\n\t\treturn &fileInfo{\n\t\t\tname: \".\",\n\t\t\tmode: os.ModeDir,\n\t\t}, nil\n\t}\n\n\tm, err := fs.getManifest(fs.at)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdirPrefix := filepath.Clean(path) + \"\/\"\n\tfor _, e := range m {\n\t\tif strings.HasPrefix(e.FileName, dirPrefix) {\n\t\t\treturn &fileInfo{\n\t\t\t\tname: filepath.Base(path),\n\t\t\t\tmode: os.ModeDir,\n\t\t\t}, nil\n\t\t}\n\t}\n\n\treturn nil, os.ErrNotExist\n}\n\nfunc (fs *hgFSNative) fileInfo(ent *hg_store.ManifestEnt) *fileInfo {\n\tvar mode os.FileMode\n\tif ent.IsExecutable() {\n\t\tmode |= 0111 \/\/ +x\n\t}\n\tif ent.IsLink() {\n\t\tmode |= os.ModeSymlink\n\t}\n\n\treturn &fileInfo{\n\t\tname: filepath.Base(ent.FileName),\n\t\tmode: mode,\n\t}\n}\n\nfunc (fs *hgFSNative) ReadDir(path string) ([]os.FileInfo, error) {\n\tm, err := fs.getManifest(fs.at)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar fis []os.FileInfo\n\tsubdirs := make(map[string]struct{})\n\n\tvar dirPrefix string\n\tif path := filepath.Clean(path); path == \".\" {\n\t\tdirPrefix = \"\"\n\t} else {\n\t\tdirPrefix = path + \"\/\"\n\t}\n\tfor _, e := range m {\n\t\tif !strings.HasPrefix(e.FileName, dirPrefix) {\n\t\t\tcontinue\n\t\t}\n\t\tname := strings.TrimPrefix(e.FileName, dirPrefix)\n\t\tdir := filepath.Dir(name)\n\t\tif dir == \".\" {\n\t\t\tfis = append(fis, fs.fileInfo(&e))\n\t\t} else {\n\t\t\tsubdir := strings.SplitN(dir, \"\/\", 2)[0]\n\t\t\tif _, seen := subdirs[subdir]; !seen {\n\t\t\t\tfis = append(fis, &fileInfo{name: subdir, mode: os.ModeDir})\n\t\t\t\tsubdirs[subdir] = struct{}{}\n\t\t\t}\n\t\t}\n\t}\n\treturn fis, nil\n}\n\nfunc (fs *hgFSNative) String() string {\n\treturn fmt.Sprintf(\"hg repository %s commit %s (native)\", fs.dir, fs.at)\n}\n\nfunc standardizeHgError(err error) error {\n\tif err == hg_revlog.ErrRevisionNotFound {\n\t\treturn os.ErrNotExist\n\t}\n\treturn err\n}\n\ntype fileInfo struct {\n\tname string\n\tmode os.FileMode\n\tsize int64\n\tmtime time.Time\n}\n\nfunc (fi *fileInfo) Name() string { return fi.name }\nfunc (fi *fileInfo) Size() int64 { return fi.size }\nfunc (fi *fileInfo) Mode() os.FileMode { return fi.mode }\nfunc (fi *fileInfo) ModTime() time.Time { return fi.mtime }\nfunc (fi *fileInfo) IsDir() bool { return fi.Mode().IsDir() }\nfunc (fi *fileInfo) Sys() interface{} { return nil }\nupdate dependency: github.com\/knieriem\/hgo -> github.com\/beyang\/hgopackage vcs\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/mail\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/beyang\/hgo\"\n\thg_changelog \"github.com\/beyang\/hgo\/changelog\"\n\thg_revlog \"github.com\/beyang\/hgo\/revlog\"\n\thg_store \"github.com\/beyang\/hgo\/store\"\n)\n\ntype HgRepositoryNative struct {\n\tdir string\n\tu *hgo.Repository\n\tst *hg_store.Store\n\tcl *hg_revlog.Index\n\tallTags *hgo.Tags\n\tbranchHeads *hgo.BranchHeads\n}\n\nfunc OpenHgRepositoryNative(dir string) (*HgRepositoryNative, error) {\n\tr, err := hgo.OpenRepository(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tst := r.NewStore()\n\tcl, err := st.OpenChangeLog()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tglobalTags, allTags := r.Tags()\n\tglobalTags.Sort()\n\tallTags.Sort()\n\tallTags.Add(\"tip\", cl.Tip().Id().Node())\n\n\tbh, err := r.BranchHeads()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &HgRepositoryNative{dir, r, st, cl, allTags, bh}, nil\n}\n\nfunc (r *HgRepositoryNative) ResolveRevision(spec string) (CommitID, error) {\n\tif id, err := r.ResolveBranch(spec); err == nil {\n\t\treturn id, nil\n\t}\n\tif id, err := r.ResolveTag(spec); err == nil {\n\t\treturn id, nil\n\t}\n\n\trec, err := r.parseRevisionSpec(spec).Lookup(r.cl)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn CommitID(hex.EncodeToString(rec.Id())), nil\n}\n\nfunc (r *HgRepositoryNative) ResolveTag(name string) (CommitID, error) {\n\tif id, ok := r.allTags.IdByName[name]; ok {\n\t\treturn CommitID(id), nil\n\t}\n\treturn \"\", ErrTagNotFound\n}\n\nfunc (r *HgRepositoryNative) ResolveBranch(name string) (CommitID, error) {\n\tif id, ok := r.branchHeads.IdByName[name]; ok {\n\t\treturn CommitID(id), nil\n\t}\n\treturn \"\", ErrBranchNotFound\n}\n\nfunc (r *HgRepositoryNative) GetCommit(id CommitID) (*Commit, error) {\n\trec, err := hg_revlog.NodeIdRevSpec(id).Lookup(r.cl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r.makeCommit(rec)\n}\n\nfunc (r *HgRepositoryNative) CommitLog(to CommitID) ([]*Commit, error) {\n\trec, err := hg_revlog.NodeIdRevSpec(to).Lookup(r.cl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar commits []*Commit\n\tfor ; ; rec = rec.Prev() {\n\t\tc, err := r.makeCommit(rec)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcommits = append(commits, c)\n\n\t\tif rec.IsStartOfBranch() {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn commits, nil\n}\n\nfunc (r *HgRepositoryNative) makeCommit(rec *hg_revlog.Rec) (*Commit, error) {\n\tfb := hg_revlog.NewFileBuilder()\n\tce, err := hg_changelog.BuildEntry(rec, fb)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taddr, err := mail.ParseAddress(ce.Committer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar parents []CommitID\n\tif !rec.IsStartOfBranch() {\n\t\tif p := rec.Parent(); p != nil {\n\t\t\tparents = append(parents, CommitID(hex.EncodeToString(rec.Parent().Id())))\n\t\t}\n\t\tif rec.Parent2Present() {\n\t\t\tparents = append(parents, CommitID(hex.EncodeToString(rec.Parent2().Id())))\n\t\t}\n\t}\n\n\treturn &Commit{\n\t\tID: CommitID(ce.Id),\n\t\tAuthor: Signature{addr.Name, addr.Address, ce.Date},\n\t\tMessage: ce.Comment,\n\t\tParents: parents,\n\t}, nil\n}\n\nfunc (r *HgRepositoryNative) FileSystem(at CommitID) (FileSystem, error) {\n\trec, err := hg_revlog.NodeIdRevSpec(at).Lookup(r.cl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &hgFSNative{\n\t\tdir: r.dir,\n\t\tat: hg_revlog.FileRevSpec(rec.FileRev()),\n\t\trepo: r.u,\n\t\tst: r.st,\n\t\tcl: r.cl,\n\t\tfb: hg_revlog.NewFileBuilder(),\n\t}, nil\n}\n\nfunc (r *HgRepositoryNative) parseRevisionSpec(s string) hg_revlog.RevisionSpec {\n\tif s == \"\" {\n\t\ts = \"tip\"\n\t\t\/\/ TODO(sqs): determine per-repository default branch name (not always \"default\"?)\n\t}\n\tif s == \"tip\" {\n\t\treturn hg_revlog.TipRevSpec{}\n\t}\n\tif s == \"null\" {\n\t\treturn hg_revlog.NullRevSpec{}\n\t}\n\tif id, ok := r.allTags.IdByName[s]; ok {\n\t\ts = id\n\t} else if i, err := strconv.Atoi(s); err == nil {\n\t\treturn hg_revlog.FileRevSpec(i)\n\t}\n\n\treturn hg_revlog.NodeIdRevSpec(s)\n}\n\ntype hgFSNative struct {\n\tdir string\n\tat hg_revlog.FileRevSpec\n\trepo *hgo.Repository\n\tst *hg_store.Store\n\tcl *hg_revlog.Index\n\tfb *hg_revlog.FileBuilder\n}\n\nfunc (fs *hgFSNative) manifestEntry(chgId hg_revlog.FileRevSpec, fileName string) (me *hg_store.ManifestEnt, err error) {\n\tm, err := fs.getManifest(chgId)\n\tif err != nil {\n\t\treturn\n\t}\n\tme = m.Map()[fileName]\n\tif me == nil {\n\t\terr = errors.New(\"file does not exist in given revision\")\n\t}\n\treturn\n}\n\nfunc (fs *hgFSNative) getManifest(chgId hg_revlog.FileRevSpec) (m hg_store.Manifest, err error) {\n\trec, err := chgId.Lookup(fs.cl)\n\tif err != nil {\n\t\treturn\n\t}\n\tc, err := hg_changelog.BuildEntry(rec, fs.fb)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ st := fs.repo.NewStore()\n\tmlog, err := fs.st.OpenManifests()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trec2, err := mlog.LookupRevision(int(c.Linkrev), c.ManifestNode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hg_store.BuildManifest(rec2, fs.fb)\n}\n\nfunc (fs *hgFSNative) getEntry(path string) (*hg_revlog.Rec, *hg_store.ManifestEnt, error) {\n\tfileLog, err := fs.st.OpenRevlog(path)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ Get file entry from manifest (so we can look up the correct record in the file revlog)\n\tent, err := fs.manifestEntry(fs.at, path)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ Lookup record in revlog\n\tentId, err := ent.Id()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tlinkRevSpec := hg_revlog.LinkRevSpec{\n\t\tRev: int(fs.at),\n\t\tFindPresent: func(maybeAncestors []*hg_revlog.Rec) (index int, err error) {\n\t\t\t\/\/ Find the present file record by matching against the manifest entry.\n\t\t\t\/\/\n\t\t\t\/\/ Note: this is necessary due to some edge case where the returned file record nodeid does not match the\n\t\t\t\/\/ entry in the manifest. So far, have not been able to repro this outside the Python standard library (try\n\t\t\t\/\/ Stat() on the README)\n\t\t\tfor a, anc := range maybeAncestors {\n\t\t\t\tif anc.Id().Eq(entId) {\n\t\t\t\t\treturn a, nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tancIds := make([]hg_revlog.NodeId, len(maybeAncestors))\n\t\t\tfor a, anc := range maybeAncestors {\n\t\t\t\tancIds[a] = anc.Id()\n\t\t\t}\n\t\t\treturn 0, fmt.Errorf(\"failed to find file record with nodeid matching manifest entry nodeid %v, candidates were %v\",\n\t\t\t\tentId, ancIds)\n\t\t},\n\t}\n\trec, err := linkRevSpec.Lookup(fileLog)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif rec.FileRev() == -1 {\n\t\treturn nil, nil, hg_revlog.ErrRevisionNotFound\n\t}\n\n\tif int(rec.Linkrev) == int(fs.at) {\n\t\t\/\/ The requested revision matches this record, which can be\n\t\t\/\/ used as a sign that the file exists. (TODO(sqs): original comments\n\t\t\/\/ say maybe this means the file is NOT existent yet? the word \"not\" is\n\t\t\/\/ not there but that seems to be a mistake.)\n\t\treturn rec, ent, nil\n\t}\n\n\tif !rec.IsLeaf() {\n\t\t\/\/ There are other records that have the current record as a parent.\n\t\t\/\/ This means, the file was existent, no need to check the manifest.\n\t\treturn rec, ent, nil\n\t}\n\n\treturn rec, ent, nil\n}\n\nfunc (fs *hgFSNative) Open(name string) (ReadSeekCloser, error) {\n\trec, _, err := fs.getEntry(name)\n\tif err != nil {\n\t\treturn nil, standardizeHgError(err)\n\t}\n\n\tdata, err := fs.readFile(rec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn nopCloser{bytes.NewReader(data)}, nil\n}\n\nfunc (fs *hgFSNative) readFile(rec *hg_revlog.Rec) ([]byte, error) {\n\tfb := hg_revlog.NewFileBuilder()\n\treturn fb.Build(rec)\n}\n\nfunc (fs *hgFSNative) Lstat(path string) (os.FileInfo, error) {\n\tfi, _, err := fs.lstat(path)\n\treturn fi, err\n}\n\nfunc (fs *hgFSNative) lstat(path string) (os.FileInfo, []byte, error) {\n\tpath = filepath.Clean(path)\n\n\trec, ent, err := fs.getEntry(path)\n\tif os.IsNotExist(err) {\n\t\t\/\/ check if path is a dir (dirs are not in hg's manifest, so we need to\n\t\t\/\/ hack around to get them).\n\t\tfi, err := fs.dirStat(path)\n\t\treturn fi, nil, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, standardizeHgError(err)\n\t}\n\n\tfi := fs.fileInfo(ent)\n\n\t\/\/ read data to determine file size\n\tdata, err := fs.readFile(rec)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfi.size = int64(len(data))\n\n\treturn fi, data, nil\n}\n\nfunc (fs *hgFSNative) Stat(path string) (os.FileInfo, error) {\n\tfi, data, err := fs.lstat(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif fi.Mode()&os.ModeSymlink != 0 {\n\t\t\/\/ derefence symlink\n\t\tderefPath := string(data)\n\t\tfi, err := fs.Lstat(derefPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfi.(*fileInfo).name = filepath.Base(path)\n\t\treturn fi, nil\n\t}\n\n\treturn fi, nil\n}\n\n\/\/ dirStat determines whether a directory exists at path by listing files\n\/\/ underneath it. If it has files, then it's a directory. We must do it this way\n\/\/ because hg doesn't track directories in the manifest.\nfunc (fs *hgFSNative) dirStat(path string) (os.FileInfo, error) {\n\tif path == \".\" {\n\t\treturn &fileInfo{\n\t\t\tname: \".\",\n\t\t\tmode: os.ModeDir,\n\t\t}, nil\n\t}\n\n\tm, err := fs.getManifest(fs.at)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdirPrefix := filepath.Clean(path) + \"\/\"\n\tfor _, e := range m {\n\t\tif strings.HasPrefix(e.FileName, dirPrefix) {\n\t\t\treturn &fileInfo{\n\t\t\t\tname: filepath.Base(path),\n\t\t\t\tmode: os.ModeDir,\n\t\t\t}, nil\n\t\t}\n\t}\n\n\treturn nil, os.ErrNotExist\n}\n\nfunc (fs *hgFSNative) fileInfo(ent *hg_store.ManifestEnt) *fileInfo {\n\tvar mode os.FileMode\n\tif ent.IsExecutable() {\n\t\tmode |= 0111 \/\/ +x\n\t}\n\tif ent.IsLink() {\n\t\tmode |= os.ModeSymlink\n\t}\n\n\treturn &fileInfo{\n\t\tname: filepath.Base(ent.FileName),\n\t\tmode: mode,\n\t}\n}\n\nfunc (fs *hgFSNative) ReadDir(path string) ([]os.FileInfo, error) {\n\tm, err := fs.getManifest(fs.at)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar fis []os.FileInfo\n\tsubdirs := make(map[string]struct{})\n\n\tvar dirPrefix string\n\tif path := filepath.Clean(path); path == \".\" {\n\t\tdirPrefix = \"\"\n\t} else {\n\t\tdirPrefix = path + \"\/\"\n\t}\n\tfor _, e := range m {\n\t\tif !strings.HasPrefix(e.FileName, dirPrefix) {\n\t\t\tcontinue\n\t\t}\n\t\tname := strings.TrimPrefix(e.FileName, dirPrefix)\n\t\tdir := filepath.Dir(name)\n\t\tif dir == \".\" {\n\t\t\tfis = append(fis, fs.fileInfo(&e))\n\t\t} else {\n\t\t\tsubdir := strings.SplitN(dir, \"\/\", 2)[0]\n\t\t\tif _, seen := subdirs[subdir]; !seen {\n\t\t\t\tfis = append(fis, &fileInfo{name: subdir, mode: os.ModeDir})\n\t\t\t\tsubdirs[subdir] = struct{}{}\n\t\t\t}\n\t\t}\n\t}\n\treturn fis, nil\n}\n\nfunc (fs *hgFSNative) String() string {\n\treturn fmt.Sprintf(\"hg repository %s commit %s (native)\", fs.dir, fs.at)\n}\n\nfunc standardizeHgError(err error) error {\n\tif err == hg_revlog.ErrRevisionNotFound {\n\t\treturn os.ErrNotExist\n\t}\n\treturn err\n}\n\ntype fileInfo struct {\n\tname string\n\tmode os.FileMode\n\tsize int64\n\tmtime time.Time\n}\n\nfunc (fi *fileInfo) Name() string { return fi.name }\nfunc (fi *fileInfo) Size() int64 { return fi.size }\nfunc (fi *fileInfo) Mode() os.FileMode { return fi.mode }\nfunc (fi *fileInfo) ModTime() time.Time { return fi.mtime }\nfunc (fi *fileInfo) IsDir() bool { return fi.Mode().IsDir() }\nfunc (fi *fileInfo) Sys() interface{} { return nil }\n<|endoftext|>"} {"text":"package callback\n\n\/\/#include \"callback.h\"\nimport \"C\"\nimport (\n\t\"runtime\"\n\t\"unsafe\"\n)\n\n\/\/export newCallbackRunner\nfunc newCallbackRunner() {\n\tgo C.runCallbacks()\n}\n\nfunc init() {\n\t\/\/ work around issue 1560.\n\tif runtime.GOMAXPROCS(0) < 2 {\n\t\truntime.GOMAXPROCS(2)\n\t}\n\n\tC.callbackInit()\n\tgo C.runCallbacks()\n}\n\n\/\/ Func holds a pointer to the C callback function.\n\/\/ It can be used by converting it to a function pointer\n\/\/ with type void (*callback)(void (*f)(void*), void *arg);\n\/\/ When called, it calls the provided function f in a\n\/\/ a Go context.\nvar Func = callbackFunc\n\nvar callbackFunc = unsafe.Pointer(C.callbackFunc())\nupdate doc comments for callback.package callback\n\n\/\/#include \"callback.h\"\nimport \"C\"\nimport (\n\t\"runtime\"\n\t\"unsafe\"\n)\n\n\/\/export newCallbackRunner\nfunc newCallbackRunner() {\n\tgo C.runCallbacks()\n}\n\nfunc init() {\n\t\/\/ work around issue 1560.\n\tif runtime.GOMAXPROCS(0) < 2 {\n\t\truntime.GOMAXPROCS(2)\n\t}\n\n\tC.callbackInit()\n\tgo C.runCallbacks()\n}\n\n\/\/ Func holds a pointer to the C callback function.\n\/\/ When called, it calls the provided function f in a\n\/\/ a Go context with the given argument.\n\/\/\n\/\/ It can be used by first converting it to a function pointer\n\/\/ and then calling from C.\n\/\/ Here is an example that sets up the callback function:\n\/\/ \t\/\/static void (*callback)(void (*f)(void*), void *arg);\n\/\/\t\/\/void setCallback(void *c){\n\/\/\t\/\/\tcallback = c;\n\/\/\t\/\/}\n\/\/ import \"C\"\n\/\/\timport \"rog-go.googlecode.com\/hg\/exp\/callback\"\n\/\/\t\n\/\/\tfunc init() {\n\/\/\t\tC.setCallback(callback.Func)\n\/\/\t}\n\/\/\nvar Func = callbackFunc\n\nvar callbackFunc = unsafe.Pointer(C.callbackFunc())\n<|endoftext|>"} {"text":"\/\/ Copyright 2016 e-Xpert Solutions SA. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage f5\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/ ErrNoToken is the error returned when the Client does not have a token.\nvar ErrNoToken = errors.New(\"no token\")\n\n\/\/ An authFunc is function responsible for setting necessary headers to\n\/\/ perform authenticated requests.\ntype authFunc func(req *http.Request) error\n\n\/\/ A Client manages communication with the F5 API.\ntype Client struct {\n\tc http.Client\n\tbaseURL string\n\tmakeAuth authFunc\n\tt *http.Transport\n\n\ttoken string\n\n\t\/\/ Transaction\n\ttxID string \/\/ transaction ID to send for every request\n}\n\n\/\/ NewBasicClient creates a new F5 client with HTTP Basic Authentication.\n\/\/\n\/\/ baseURL is the base URL of the F5 API server.\nfunc NewBasicClient(baseURL, user, password string) (*Client, error) {\n\tt := &http.Transport{}\n\treturn &Client{\n\t\tc: http.Client{Transport: t},\n\t\tbaseURL: baseURL,\n\t\tt: t,\n\t\tmakeAuth: authFunc(func(req *http.Request) error {\n\t\t\treq.SetBasicAuth(user, password)\n\t\t\treturn nil\n\t\t}),\n\t}, nil\n}\n\n\/\/TokenClientConnection creates a new client with the given token.\nfunc TokenClientConnection(baseURL, token string) (*Client, error) {\n\tt := &http.Transport{}\n\tc := &Client{c: http.Client{Transport: t}, baseURL: baseURL, t: t}\n\tc.token = token\n\tc.makeAuth = authFunc(func(req *http.Request) error {\n\t\treq.Header.Add(\"X-F5-Auth-Token\", c.token)\n\t\treturn nil\n\t})\n\n\treturn c, nil\n}\n\n\/\/ CreateToken creates a new token with the given baseURL, user, password and loginProvName.\nfunc CreateToken(baseURL, user, password, loginProvName string) (string, error) {\n\tt := &http.Transport{}\n\tc := &Client{c: http.Client{Transport: t}, baseURL: baseURL, t: t}\n\tc.t.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n\t\/\/ Negociate token with a pair of username\/password.\n\tdata, err := json.Marshal(map[string]string{\"username\": user, \"password\": password, \"loginProviderName\": loginProvName})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create token client (cannot marshal user credentials): %v\", err)\n\t}\n\n\ttokReq, err := http.NewRequest(\"POST\", c.makeURL(\"\/mgmt\/shared\/authn\/login\"), bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create token client, (cannot create login request): %v\", err)\n\t}\n\n\ttokReq.Header.Add(\"Content-Type\", \"application\/json\")\n\ttokReq.SetBasicAuth(user, password)\n\n\tresp, err := c.c.Do(tokReq)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create token client (token negociation failed): %v\", err)\n\t}\n\tif resp.StatusCode >= 400 {\n\t\treturn \"\", fmt.Errorf(\"failed to create token client (token negociation failed): http status %s\", resp.Status)\n\t}\n\tdefer resp.Body.Close()\n\n\ttok := struct {\n\t\tToken struct {\n\t\t\tToken string `json:\"token\"`\n\t\t} `json:\"token\"`\n\t}{}\n\tdec := json.NewDecoder(resp.Body)\n\tif err := dec.Decode(&tok); err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create token client (cannot decode token): %v\", err)\n\t}\n\treturn tok.Token.Token, nil\n}\n\n\/\/ NewTokenClient creates a new F5 client with token based authentication.\n\/\/\n\/\/ baseURL is the base URL of the F5 API server.\nfunc NewTokenClient(baseURL, user, password, loginProvName string) (*Client, error) {\n\tt := &http.Transport{}\n\tc := Client{c: http.Client{Transport: t}, baseURL: baseURL, t: t}\n\n\t\/\/ Create auth function for token based authentication.\n\tc.makeAuth = authFunc(func(req *http.Request) error {\n\t\tif c.token == \"\" {\n\t\t\t\/\/ Negociate token with a pair of username\/password.\n\t\t\tdata, err := json.Marshal(map[string]string{\"username\": user, \"password\": password, \"loginProviderName\": loginProvName})\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to create token client (cannot marshal user credentials): %v\", err)\n\t\t\t}\n\n\t\t\ttokReq, err := http.NewRequest(\"POST\", c.makeURL(\"\/mgmt\/shared\/authn\/login\"), bytes.NewBuffer(data))\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to create token client, (cannot create login request): %v\", err)\n\t\t\t}\n\n\t\t\ttokReq.Header.Add(\"Content-Type\", \"application\/json\")\n\t\t\ttokReq.SetBasicAuth(user, password)\n\n\t\t\tresp, err := c.c.Do(tokReq)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to create token client (token negociation failed): %v\", err)\n\t\t\t}\n\t\t\tif resp.StatusCode >= 400 {\n\t\t\t\treturn fmt.Errorf(\"failed to create token client (token negociation failed): http status %s\", resp.Status)\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\n\t\t\ttok := struct {\n\t\t\t\tToken struct {\n\t\t\t\t\tToken string `json:\"token\"`\n\t\t\t\t} `json:\"token\"`\n\t\t\t}{}\n\t\t\tdec := json.NewDecoder(resp.Body)\n\t\t\tif err := dec.Decode(&tok); err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to create token client (cannot decode token): %v\", err)\n\t\t\t}\n\n\t\t\tc.token = tok.Token.Token\n\t\t}\n\t\treq.Header.Add(\"X-F5-Auth-Token\", c.token)\n\t\treturn nil\n\t})\n\n\treturn &c, nil\n}\n\n\/\/ DisableCertCheck disables certificate verification, meaning that insecure\n\/\/ certificate will not cause any error.\nfunc (c *Client) DisableCertCheck() {\n\tc.t.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n}\n\n\/\/ RevokeToken revokes the current token. If the Client has not been initialized\n\/\/ with NewTokenClient, ErrNoToken is returned.\nfunc (c *Client) RevokeToken() error {\n\tif c.token == \"\" {\n\t\treturn ErrNoToken\n\t}\n\n\tresp, err := c.SendRequest(\"DELETE\", \"\/mgmt\/shared\/authz\/tokens\/\"+c.token, nil)\n\tif err != nil {\n\t\treturn errors.New(\"token revocation request failed: \" + err.Error())\n\t}\n\tdefer resp.Body.Close()\n\n\tvar respData struct {\n\t\tToken string `json:\"token\"`\n\t}\n\tdec := json.NewDecoder(resp.Body)\n\tif err := dec.Decode(&respData); err != nil {\n\t\treturn errors.New(\"cannot decode token revocation response: \" + err.Error())\n\t}\n\tif respData.Token != c.token {\n\t\treturn errors.New(\"invalid token revocation response\")\n\t}\n\n\tc.token = \"\"\n\n\treturn nil\n}\n\n\/\/ UseProxy configures a proxy to use for outbound connections\nfunc (c *Client) UseProxy(proxy string) error {\n\tproxyURL, err := url.Parse(proxy)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.t.Proxy = http.ProxyURL(proxyURL)\n\treturn nil\n}\n\n\/\/ UseSystemProxy configures the client to use the system proxy\nfunc (c *Client) UseSystemProxy() error {\n\tproxy := os.Getenv(\"HTTP_PROXY\")\n\tif proxy != \"\" {\n\t\tproxyURL, err := url.Parse(proxy)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.t.Proxy = http.ProxyURL(proxyURL)\n\t}\n\treturn nil\n}\n\n\/\/ MakeRequest creates a request with headers appropriately set to make\n\/\/ authenticated requests. This method must be called for every new request.\nfunc (c *Client) MakeRequest(method, restPath string, data interface{}) (*http.Request, error) {\n\tvar (\n\t\treq *http.Request\n\t\terr error\n\t)\n\tif data != nil {\n\t\tbs, err := json.Marshal(data)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal data into json: %v\", err)\n\t\t}\n\t\treq, err = http.NewRequest(method, c.makeURL(restPath), bytes.NewBuffer(bs))\n\t} else {\n\t\treq, err = http.NewRequest(method, c.makeURL(restPath), nil)\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create F5 authenticated request: %v\", err)\n\t}\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\tif c.txID != \"\" {\n\t\treq.Header.Add(\"X-F5-REST-Coordination-Id\", c.txID)\n\t}\n\tif err := c.makeAuth(req); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}\n\n\/\/ Do sends an HTTP request and returns an HTTP response. It is just a wrapper\n\/\/ arround http.Client Do method.\n\/\/\n\/\/ Callers should close resp.Body when done reading from it.\n\/\/\n\/\/ See http package documentation for more information:\n\/\/ https:\/\/golang.org\/pkg\/net\/http\/#Client.Do\nfunc (c *Client) Do(req *http.Request) (*http.Response, error) {\n\treturn c.c.Do(req)\n}\n\n\/\/ SendRequest is a shortcut for MakeRequest() + Do() + ReadError().\nfunc (c *Client) SendRequest(method, restPath string, data interface{}) (*http.Response, error) {\n\treq, err := c.MakeRequest(method, restPath, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := c.ReadError(resp); err != nil {\n\t\tresp.Body.Close()\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}\n\n\/\/ ReadQuery performs a GET query and unmarshal the response (from JSON) into outputData.\n\/\/\n\/\/ outputData must be a pointer.\nfunc (c *Client) ReadQuery(restPath string, outputData interface{}) error {\n\tresp, err := c.SendRequest(\"GET\", restPath, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tdec := json.NewDecoder(resp.Body)\n\tif err := dec.Decode(outputData); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ ModQuery performs a modification query such as POST, PUT or DELETE.\nfunc (c *Client) ModQuery(method, restPath string, inputData interface{}) error {\n\tif method != \"POST\" && method != \"PUT\" && method != \"DELETE\" {\n\t\treturn errors.New(\"invalid method \" + method)\n\t}\n\tresp, err := c.SendRequest(method, restPath, inputData)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp.Body.Close()\n\treturn nil\n}\n\n\/\/ ReadError checks if a HTTP response contains an error and returns it.\nfunc (c *Client) ReadError(resp *http.Response) error {\n\tif resp.StatusCode >= 400 {\n\t\tif contentType := resp.Header.Get(\"Content-Type\"); !strings.Contains(contentType, \"application\/json\") {\n\t\t\treturn errors.New(\"http response error: \" + resp.Status)\n\t\t}\n\t\terrResp, err := NewRequestError(resp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn errResp\n\t}\n\treturn nil\n}\n\n\/\/ makeURL creates an URL from the client base URL and a given REST path. What\n\/\/ this function actually does is to concatenate the base URL and the REST path\n\/\/ by handling trailing slashes.\nfunc (c *Client) makeURL(restPath string) string {\n\treturn strings.TrimSuffix(c.baseURL, \"\/\") + \"\/\" + strings.TrimPrefix(restPath, \"\/\")\n}\n\nfunc (c *Client) clone() *Client {\n\treturn &Client{\n\t\tc: c.c,\n\t\tbaseURL: c.baseURL,\n\t\tmakeAuth: c.makeAuth,\n\t}\n}\nf5: add missing leading space in comment\/\/ Copyright 2016 e-Xpert Solutions SA. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage f5\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/ ErrNoToken is the error returned when the Client does not have a token.\nvar ErrNoToken = errors.New(\"no token\")\n\n\/\/ An authFunc is function responsible for setting necessary headers to\n\/\/ perform authenticated requests.\ntype authFunc func(req *http.Request) error\n\n\/\/ A Client manages communication with the F5 API.\ntype Client struct {\n\tc http.Client\n\tbaseURL string\n\tmakeAuth authFunc\n\tt *http.Transport\n\n\ttoken string\n\n\t\/\/ Transaction\n\ttxID string \/\/ transaction ID to send for every request\n}\n\n\/\/ NewBasicClient creates a new F5 client with HTTP Basic Authentication.\n\/\/\n\/\/ baseURL is the base URL of the F5 API server.\nfunc NewBasicClient(baseURL, user, password string) (*Client, error) {\n\tt := &http.Transport{}\n\treturn &Client{\n\t\tc: http.Client{Transport: t},\n\t\tbaseURL: baseURL,\n\t\tt: t,\n\t\tmakeAuth: authFunc(func(req *http.Request) error {\n\t\t\treq.SetBasicAuth(user, password)\n\t\t\treturn nil\n\t\t}),\n\t}, nil\n}\n\n\/\/ TokenClientConnection creates a new client with the given token.\nfunc TokenClientConnection(baseURL, token string) (*Client, error) {\n\tt := &http.Transport{}\n\tc := &Client{c: http.Client{Transport: t}, baseURL: baseURL, t: t}\n\tc.token = token\n\tc.makeAuth = authFunc(func(req *http.Request) error {\n\t\treq.Header.Add(\"X-F5-Auth-Token\", c.token)\n\t\treturn nil\n\t})\n\n\treturn c, nil\n}\n\n\/\/ CreateToken creates a new token with the given baseURL, user, password and loginProvName.\nfunc CreateToken(baseURL, user, password, loginProvName string) (string, error) {\n\tt := &http.Transport{}\n\tc := &Client{c: http.Client{Transport: t}, baseURL: baseURL, t: t}\n\tc.t.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n\t\/\/ Negociate token with a pair of username\/password.\n\tdata, err := json.Marshal(map[string]string{\"username\": user, \"password\": password, \"loginProviderName\": loginProvName})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create token client (cannot marshal user credentials): %v\", err)\n\t}\n\n\ttokReq, err := http.NewRequest(\"POST\", c.makeURL(\"\/mgmt\/shared\/authn\/login\"), bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create token client, (cannot create login request): %v\", err)\n\t}\n\n\ttokReq.Header.Add(\"Content-Type\", \"application\/json\")\n\ttokReq.SetBasicAuth(user, password)\n\n\tresp, err := c.c.Do(tokReq)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create token client (token negociation failed): %v\", err)\n\t}\n\tif resp.StatusCode >= 400 {\n\t\treturn \"\", fmt.Errorf(\"failed to create token client (token negociation failed): http status %s\", resp.Status)\n\t}\n\tdefer resp.Body.Close()\n\n\ttok := struct {\n\t\tToken struct {\n\t\t\tToken string `json:\"token\"`\n\t\t} `json:\"token\"`\n\t}{}\n\tdec := json.NewDecoder(resp.Body)\n\tif err := dec.Decode(&tok); err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create token client (cannot decode token): %v\", err)\n\t}\n\treturn tok.Token.Token, nil\n}\n\n\/\/ NewTokenClient creates a new F5 client with token based authentication.\n\/\/\n\/\/ baseURL is the base URL of the F5 API server.\nfunc NewTokenClient(baseURL, user, password, loginProvName string) (*Client, error) {\n\tt := &http.Transport{}\n\tc := Client{c: http.Client{Transport: t}, baseURL: baseURL, t: t}\n\n\t\/\/ Create auth function for token based authentication.\n\tc.makeAuth = authFunc(func(req *http.Request) error {\n\t\tif c.token == \"\" {\n\t\t\t\/\/ Negociate token with a pair of username\/password.\n\t\t\tdata, err := json.Marshal(map[string]string{\"username\": user, \"password\": password, \"loginProviderName\": loginProvName})\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to create token client (cannot marshal user credentials): %v\", err)\n\t\t\t}\n\n\t\t\ttokReq, err := http.NewRequest(\"POST\", c.makeURL(\"\/mgmt\/shared\/authn\/login\"), bytes.NewBuffer(data))\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to create token client, (cannot create login request): %v\", err)\n\t\t\t}\n\n\t\t\ttokReq.Header.Add(\"Content-Type\", \"application\/json\")\n\t\t\ttokReq.SetBasicAuth(user, password)\n\n\t\t\tresp, err := c.c.Do(tokReq)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to create token client (token negociation failed): %v\", err)\n\t\t\t}\n\t\t\tif resp.StatusCode >= 400 {\n\t\t\t\treturn fmt.Errorf(\"failed to create token client (token negociation failed): http status %s\", resp.Status)\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\n\t\t\ttok := struct {\n\t\t\t\tToken struct {\n\t\t\t\t\tToken string `json:\"token\"`\n\t\t\t\t} `json:\"token\"`\n\t\t\t}{}\n\t\t\tdec := json.NewDecoder(resp.Body)\n\t\t\tif err := dec.Decode(&tok); err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to create token client (cannot decode token): %v\", err)\n\t\t\t}\n\n\t\t\tc.token = tok.Token.Token\n\t\t}\n\t\treq.Header.Add(\"X-F5-Auth-Token\", c.token)\n\t\treturn nil\n\t})\n\n\treturn &c, nil\n}\n\n\/\/ DisableCertCheck disables certificate verification, meaning that insecure\n\/\/ certificate will not cause any error.\nfunc (c *Client) DisableCertCheck() {\n\tc.t.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n}\n\n\/\/ RevokeToken revokes the current token. If the Client has not been initialized\n\/\/ with NewTokenClient, ErrNoToken is returned.\nfunc (c *Client) RevokeToken() error {\n\tif c.token == \"\" {\n\t\treturn ErrNoToken\n\t}\n\n\tresp, err := c.SendRequest(\"DELETE\", \"\/mgmt\/shared\/authz\/tokens\/\"+c.token, nil)\n\tif err != nil {\n\t\treturn errors.New(\"token revocation request failed: \" + err.Error())\n\t}\n\tdefer resp.Body.Close()\n\n\tvar respData struct {\n\t\tToken string `json:\"token\"`\n\t}\n\tdec := json.NewDecoder(resp.Body)\n\tif err := dec.Decode(&respData); err != nil {\n\t\treturn errors.New(\"cannot decode token revocation response: \" + err.Error())\n\t}\n\tif respData.Token != c.token {\n\t\treturn errors.New(\"invalid token revocation response\")\n\t}\n\n\tc.token = \"\"\n\n\treturn nil\n}\n\n\/\/ UseProxy configures a proxy to use for outbound connections\nfunc (c *Client) UseProxy(proxy string) error {\n\tproxyURL, err := url.Parse(proxy)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.t.Proxy = http.ProxyURL(proxyURL)\n\treturn nil\n}\n\n\/\/ UseSystemProxy configures the client to use the system proxy\nfunc (c *Client) UseSystemProxy() error {\n\tproxy := os.Getenv(\"HTTP_PROXY\")\n\tif proxy != \"\" {\n\t\tproxyURL, err := url.Parse(proxy)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.t.Proxy = http.ProxyURL(proxyURL)\n\t}\n\treturn nil\n}\n\n\/\/ MakeRequest creates a request with headers appropriately set to make\n\/\/ authenticated requests. This method must be called for every new request.\nfunc (c *Client) MakeRequest(method, restPath string, data interface{}) (*http.Request, error) {\n\tvar (\n\t\treq *http.Request\n\t\terr error\n\t)\n\tif data != nil {\n\t\tbs, err := json.Marshal(data)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal data into json: %v\", err)\n\t\t}\n\t\treq, err = http.NewRequest(method, c.makeURL(restPath), bytes.NewBuffer(bs))\n\t} else {\n\t\treq, err = http.NewRequest(method, c.makeURL(restPath), nil)\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create F5 authenticated request: %v\", err)\n\t}\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\tif c.txID != \"\" {\n\t\treq.Header.Add(\"X-F5-REST-Coordination-Id\", c.txID)\n\t}\n\tif err := c.makeAuth(req); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}\n\n\/\/ Do sends an HTTP request and returns an HTTP response. It is just a wrapper\n\/\/ arround http.Client Do method.\n\/\/\n\/\/ Callers should close resp.Body when done reading from it.\n\/\/\n\/\/ See http package documentation for more information:\n\/\/ https:\/\/golang.org\/pkg\/net\/http\/#Client.Do\nfunc (c *Client) Do(req *http.Request) (*http.Response, error) {\n\treturn c.c.Do(req)\n}\n\n\/\/ SendRequest is a shortcut for MakeRequest() + Do() + ReadError().\nfunc (c *Client) SendRequest(method, restPath string, data interface{}) (*http.Response, error) {\n\treq, err := c.MakeRequest(method, restPath, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := c.ReadError(resp); err != nil {\n\t\tresp.Body.Close()\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}\n\n\/\/ ReadQuery performs a GET query and unmarshal the response (from JSON) into outputData.\n\/\/\n\/\/ outputData must be a pointer.\nfunc (c *Client) ReadQuery(restPath string, outputData interface{}) error {\n\tresp, err := c.SendRequest(\"GET\", restPath, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tdec := json.NewDecoder(resp.Body)\n\tif err := dec.Decode(outputData); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ ModQuery performs a modification query such as POST, PUT or DELETE.\nfunc (c *Client) ModQuery(method, restPath string, inputData interface{}) error {\n\tif method != \"POST\" && method != \"PUT\" && method != \"DELETE\" {\n\t\treturn errors.New(\"invalid method \" + method)\n\t}\n\tresp, err := c.SendRequest(method, restPath, inputData)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp.Body.Close()\n\treturn nil\n}\n\n\/\/ ReadError checks if a HTTP response contains an error and returns it.\nfunc (c *Client) ReadError(resp *http.Response) error {\n\tif resp.StatusCode >= 400 {\n\t\tif contentType := resp.Header.Get(\"Content-Type\"); !strings.Contains(contentType, \"application\/json\") {\n\t\t\treturn errors.New(\"http response error: \" + resp.Status)\n\t\t}\n\t\terrResp, err := NewRequestError(resp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn errResp\n\t}\n\treturn nil\n}\n\n\/\/ makeURL creates an URL from the client base URL and a given REST path. What\n\/\/ this function actually does is to concatenate the base URL and the REST path\n\/\/ by handling trailing slashes.\nfunc (c *Client) makeURL(restPath string) string {\n\treturn strings.TrimSuffix(c.baseURL, \"\/\") + \"\/\" + strings.TrimPrefix(restPath, \"\/\")\n}\n\nfunc (c *Client) clone() *Client {\n\treturn &Client{\n\t\tc: c.c,\n\t\tbaseURL: c.baseURL,\n\t\tmakeAuth: c.makeAuth,\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 ThoughtWorks, Inc.\n\n\/\/ This file is part of Gauge.\n\n\/\/ Gauge is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\n\/\/ Gauge is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with Gauge. If not, see .\n\npackage util\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/getgauge\/common\"\n)\n\n\/\/ progressReader is for indicating the download \/ upload progress on the console\ntype progressReader struct {\n\tio.Reader\n\tbytesTransfered int64\n\ttotalBytes int64\n\tprogress float64\n\tprogressDisplayed bool\n}\n\n\/\/ Read overrides the underlying io.Reader's Read method.\n\/\/ io.Copy() will be calling this method.\nfunc (w *progressReader) Read(p []byte) (int, error) {\n\tn, err := w.Reader.Read(p)\n\tif n > 0 {\n\t\tw.bytesTransfered += int64(n)\n\t\tpercent := float64(w.bytesTransfered) * float64(100) \/ float64(w.totalBytes)\n\t\tif percent-w.progress > 4 {\n\t\t\tfmt.Print(\".\")\n\t\t\tw.progress = percent\n\t\t\tw.progressDisplayed = true\n\t\t}\n\t}\n\treturn n, err\n}\n\n\/\/ Download fires a HTTP GET request to download a resource to target directory\nfunc Download(url, targetDir, fileName string, silent bool) (string, error) {\n\tif !common.DirExists(targetDir) {\n\t\treturn \"\", fmt.Errorf(\"Error downloading file: %s\\nTarget dir %s doesn't exists.\", url, targetDir)\n\t}\n\n\tif fileName == \"\" {\n\t\tfileName = filepath.Base(url)\n\t}\n\ttargetFile := filepath.Join(targetDir, fileName)\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif resp.StatusCode >= 400 {\n\t\treturn \"\", fmt.Errorf(\"Error downloading file: %s.\\n%s\", url, resp.Status)\n\t}\n\tdefer resp.Body.Close()\n\n\tout, err := os.Create(targetFile)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer out.Close()\n\tif silent {\n\t\t_, err = io.Copy(out, resp.Body)\n\t} else {\n\t\tprogressReader := &progressReader{Reader: resp.Body, totalBytes: resp.ContentLength}\n\t\t_, err = io.Copy(out, progressReader)\n\t\tif progressReader.progressDisplayed {\n\t\t\tfmt.Println()\n\t\t}\n\t}\n\treturn targetFile, err\n}\nadded debug message while download\/\/ Copyright 2015 ThoughtWorks, Inc.\n\n\/\/ This file is part of Gauge.\n\n\/\/ Gauge is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\n\/\/ Gauge is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with Gauge. If not, see .\n\npackage util\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/getgauge\/common\"\n\t\"github.com\/getgauge\/gauge\/logger\"\n)\n\n\/\/ progressReader is for indicating the download \/ upload progress on the console\ntype progressReader struct {\n\tio.Reader\n\tbytesTransfered int64\n\ttotalBytes int64\n\tprogress float64\n\tprogressDisplayed bool\n}\n\n\/\/ Read overrides the underlying io.Reader's Read method.\n\/\/ io.Copy() will be calling this method.\nfunc (w *progressReader) Read(p []byte) (int, error) {\n\tn, err := w.Reader.Read(p)\n\tif n > 0 {\n\t\tw.bytesTransfered += int64(n)\n\t\tpercent := float64(w.bytesTransfered) * float64(100) \/ float64(w.totalBytes)\n\t\tif percent-w.progress > 4 {\n\t\t\tfmt.Print(\".\")\n\t\t\tw.progress = percent\n\t\t\tw.progressDisplayed = true\n\t\t}\n\t}\n\treturn n, err\n}\n\n\/\/ Download fires a HTTP GET request to download a resource to target directory\nfunc Download(url, targetDir, fileName string, silent bool) (string, error) {\n\tif !common.DirExists(targetDir) {\n\t\treturn \"\", fmt.Errorf(\"Error downloading file: %s\\nTarget dir %s doesn't exists.\", url, targetDir)\n\t}\n\n\tif fileName == \"\" {\n\t\tfileName = filepath.Base(url)\n\t}\n\ttargetFile := filepath.Join(targetDir, fileName)\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif resp.StatusCode >= 400 {\n\t\treturn \"\", fmt.Errorf(\"Error downloading file: %s.\\n%s\", url, resp.Status)\n\t}\n\tif resp.StatusCode != 200 {\n\t\tlogger.Debugf(true, \"Unexpected status code while download. downloading file: %s.\\n%s\", url, resp.Status)\n\t}\n\n\tdefer resp.Body.Close()\n\n\tout, err := os.Create(targetFile)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer out.Close()\n\tif silent {\n\t\t_, err = io.Copy(out, resp.Body)\n\t} else {\n\t\tprogressReader := &progressReader{Reader: resp.Body, totalBytes: resp.ContentLength}\n\t\t_, err = io.Copy(out, progressReader)\n\t\tif progressReader.progressDisplayed {\n\t\t\tfmt.Println()\n\t\t}\n\t}\n\treturn targetFile, err\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2019 The Jetstack cert-manager contributors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage images\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\n\tflag \"github.com\/spf13\/pflag\"\n\n\t\"github.com\/jetstack\/cert-manager\/hack\/release\/pkg\/bazel\"\n\t\"github.com\/jetstack\/cert-manager\/hack\/release\/pkg\/flags\"\n\tlogf \"github.com\/jetstack\/cert-manager\/hack\/release\/pkg\/log\"\n\t\"github.com\/jetstack\/cert-manager\/hack\/release\/pkg\/util\"\n)\n\nvar (\n\tDefault = &Plugin{}\n\n\tsupportedGoArch = []string{\"amd64\", \"arm64\", \"arm\"}\n\tsupportedComponents = []string{\"acmesolver\", \"controller\", \"webhook\", \"cainjector\"}\n\tlog = logf.Log.WithName(\"images\")\n)\n\ntype Plugin struct {\n\t\/\/ The list of images to build (e.g. acmesolver, controller, webhook)\n\tComponents []string\n\n\t\/\/ If true, the built images will be exported to the configured docker\n\t\/\/ daemon when the Build() method is called.\n\tExportToDocker bool\n\n\t\/\/ List of architectures to build images for\n\tGoArch []string\n\n\t\/\/ DockerConfig is a path to a directory containing a config.json file that\n\t\/\/ is used for Docker authentication\n\tDockerConfig string\n\n\t\/\/ TODO: add GOOS support once the build system supports more than linux\n\n\t\/\/ built is set to true if Build() has completed successfully\n\tbuilt bool\n\t\/\/ configFileName is computed based on the DockerConfig field\n\tconfigFileName string\n}\n\nfunc (g *Plugin) AddFlags(fs *flag.FlagSet) {\n\tfs.BoolVar(&g.ExportToDocker, \"images.export\", false, \"if true, images will be exported to the currently configured docker daemon\")\n\tfs.StringSliceVar(&g.Components, \"images.components\", []string{\"acmesolver\", \"controller\", \"webhook\"}, \"the list of components to build images for\")\n\tfs.StringSliceVar(&g.GoArch, \"images.goarch\", []string{\"amd64\", \"arm64\", \"arm\"}, \"list of architectures to build images for\")\n\tfs.StringVar(&g.DockerConfig, \"images.docker-config\", \"\", \"path to a directory containing a docker config.json file used when pushing images\")\n}\n\nfunc (g *Plugin) Validate() []error {\n\tvar errs []error\n\n\t\/\/ validate goarch flag\n\tfor _, a := range g.GoArch {\n\t\tvalid := false\n\t\tfor _, sa := range supportedGoArch {\n\t\t\tif a == sa {\n\t\t\t\tvalid = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !valid {\n\t\t\terrs = append(errs, fmt.Errorf(\"invalid goarch value %q\", a))\n\t\t}\n\t}\n\n\t\/\/ validate components flag\n\tfor _, a := range g.Components {\n\t\tvalid := false\n\t\tfor _, sa := range supportedComponents {\n\t\t\tif a == sa {\n\t\t\t\tvalid = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !valid {\n\t\t\terrs = append(errs, fmt.Errorf(\"invalid component name %q\", a))\n\t\t}\n\t}\n\n\treturn errs\n}\n\nfunc (g *Plugin) InitPublish() []error {\n\tvar errs []error\n\n\tif g.DockerConfig != \"\" {\n\t\tconfigFileName := path.Join(g.DockerConfig, \"config.json\")\n\t\tf, err := os.Stat(configFileName)\n\t\tif err != nil {\n\t\t\treturn []error{fmt.Errorf(\"error checking config file: %v\", err)}\n\t\t}\n\t\tif f.IsDir() {\n\t\t\treturn []error{fmt.Errorf(\"docker config.json is not a file\")}\n\t\t}\n\t\tg.configFileName = g.DockerConfig\n\t}\n\n\treturn errs\n}\n\nfunc (g *Plugin) Build(ctx context.Context) error {\n\t_, err := g.build(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif g.ExportToDocker {\n\t\tlog.Info(\"Exporting docker images to local docker daemon\")\n\t\tif err := g.exportToDocker(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tlog.Info(\"skipping exporting docker images to docker daemon\")\n\t}\n\n\treturn nil\n}\n\nfunc (g *Plugin) Publish(ctx context.Context) error {\n\tlog.Info(\"running publish for image plugin\")\n\t\/\/ this case should never be reached, but we check it to be safe\n\tif !g.built {\n\t\tif _, err := g.build(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Info(\"pushing images\")\n\ttargets := g.generateTargets()\n\terr := g.pushImages(ctx, targets)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Info(\"published all docker images\")\n\n\treturn nil\n}\n\nfunc (g *Plugin) Complete() error {\n\tlog = log.WithName(\"default-flags\")\n\n\tif g.DockerConfig == \"\" {\n\t\tg.DockerConfig = os.Getenv(\"DOCKER_CONFIG\")\n\t\tif g.DockerConfig != \"\" {\n\t\t\tlog.Info(\"set default value\", \"flag\", \"images.docker-config\", \"value\", g.DockerConfig)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (g *Plugin) build(ctx context.Context) (imageTargets, error) {\n\ttargets := g.generateTargets()\n\n\tbazelTargets := targets.bazelTargets()\n\tlog := log.WithValues(\"images\", bazelTargets)\n\tlog.Info(\"building bazel image targets\")\n\n\t\/\/ set the os and arch to linux\/amd64\n\t\/\/ whilst we might be building cross-arch binaries, cgo only depends on\n\t\/\/ particular OS settings and not arch, so just by setting this to 'linux'\n\t\/\/ we can fix cross builds on platforms other than linux\n\t\/\/ if we support alternate OS values in future, this will need updating\n\t\/\/ with a call to BuildPlatformE per *OS*.\n\terr := bazel.Default.BuildPlatformE(ctx, log, \"linux\", \"amd64\", bazelTargets...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error building docker images (%v): %v\", targets, err)\n\t}\n\n\tg.built = true\n\treturn targets, nil\n}\n\nfunc (g *Plugin) exportToDocker(ctx context.Context) error {\n\ttargets := g.generateTargets()\n\tlog.WithValues(\"images\", targets.bazelExportTargets()).Info(\"exporting images to docker daemon\")\n\tfor _, target := range targets.bazelExportTargets() {\n\t\tlog := log.WithValues(\"target\", target)\n\t\tlog.Info(\"exporting image to docker daemon\")\n\t\t\/\/ set the os and arch to linux\/amd64\n\t\t\/\/ whilst we might be building cross-arch binaries, cgo only depends on\n\t\t\/\/ particular OS settings and not arch, so just by setting this to 'linux'\n\t\t\/\/ we can fix cross builds on platforms other than linux\n\t\t\/\/ if we support alternate OS values in future, this will need updating\n\t\t\/\/ with a call to BuildPlatformE per *OS*.\n\t\terr := bazel.Default.RunPlatformE(ctx, log, \"linux\", \"amd64\", target)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error exporting image %q to docker daemon: %v\", target, err)\n\t\t}\n\t}\n\n\tlog.WithValues(\"images\", targets.exportedImageNames()).Info(\"exported all docker images\")\n\n\treturn nil\n}\n\n\/\/ generateTargets generates a list of Bazel target names that must be\n\/\/ built for this invocation of the image builder\nfunc (g *Plugin) generateTargets() imageTargets {\n\tvar targets []imageTarget\n\tfor _, c := range g.Components {\n\t\tfor _, a := range g.GoArch {\n\t\t\ttargets = append(targets, imageTarget{c, \"linux\", a})\n\t\t}\n\t}\n\treturn targets\n}\n\n\/\/ pushImages will push the images built for this release to the registry\n\/\/ TODO: add support for calling container_push targets instead of just 'docker push'\nfunc (p *Plugin) pushImages(ctx context.Context, targets imageTargets) error {\n\terr := p.exportToDocker(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar images []string\n\tfor _, t := range targets {\n\t\timages = append(images, t.exportedImageNames()...)\n\t}\n\n\tlog.WithValues(\"images\", images).Info(\"pushing docker images\")\n\tfor _, img := range images {\n\t\tlog := log.WithValues(\"image\", img)\n\t\tlog.Info(\"pushing docker image\")\n\t\targs := []string{}\n\t\tif p.configFileName != \"\" {\n\t\t\targs = append(args, \"--config\", p.configFileName)\n\t\t}\n\t\targs = append(args, \"push\", img)\n\t\tcmd := exec.CommandContext(ctx, \"docker\", args...)\n\t\terr := util.RunE(log, cmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\ntype imageTargets []imageTarget\n\nfunc (i imageTargets) bazelTargets() []string {\n\tout := make([]string, len(i))\n\tfor idx, target := range i {\n\t\tout[idx] = target.bazelTarget()\n\t}\n\treturn out\n}\n\nfunc (i imageTargets) bazelExportTargets() []string {\n\tout := make([]string, len(i))\n\tfor idx, target := range i {\n\t\tout[idx] = target.bazelExportTarget()\n\t}\n\treturn out\n}\n\nfunc (i imageTargets) exportedImageNames() []string {\n\tout := make([]string, 0)\n\tfor _, target := range i {\n\t\tout = append(out, target.exportedImageNames()...)\n\t}\n\treturn out\n}\n\ntype imageTarget struct {\n\tname, os, arch string\n}\n\nfunc (i imageTarget) bazelTarget() string {\n\treturn fmt.Sprintf(\"\/\/cmd\/%s:image.%s-%s\", i.name, i.os, i.arch)\n}\n\nfunc (i imageTarget) bazelExportTarget() string {\n\treturn fmt.Sprintf(\"\/\/cmd\/%s:image.%s-%s.export\", i.name, i.os, i.arch)\n}\n\nfunc (i imageTarget) exportedImageNames() []string {\n\tif i.arch == \"amd64\" {\n\t\treturn []string{\n\t\t\tfmt.Sprintf(\"%s\/cert-manager-%s:%s\", flags.Default.DockerRepo, i.name, flags.Default.AppVersion),\n\t\t\tfmt.Sprintf(\"%s\/cert-manager-%s:%s\", flags.Default.DockerRepo, i.name, flags.Default.GitCommitRef),\n\t\t}\n\t}\n\n\treturn []string{\n\t\tfmt.Sprintf(\"%s\/cert-manager-%s-%s:%s\", flags.Default.DockerRepo, i.name, i.arch, flags.Default.AppVersion),\n\t\tfmt.Sprintf(\"%s\/cert-manager-%s-%s:%s\", flags.Default.DockerRepo, i.name, i.arch, flags.Default.GitCommitRef),\n\t}\n}\nAdd cainjector to list of images to build\/*\nCopyright 2019 The Jetstack cert-manager contributors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage images\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\n\tflag \"github.com\/spf13\/pflag\"\n\n\t\"github.com\/jetstack\/cert-manager\/hack\/release\/pkg\/bazel\"\n\t\"github.com\/jetstack\/cert-manager\/hack\/release\/pkg\/flags\"\n\tlogf \"github.com\/jetstack\/cert-manager\/hack\/release\/pkg\/log\"\n\t\"github.com\/jetstack\/cert-manager\/hack\/release\/pkg\/util\"\n)\n\nvar (\n\tDefault = &Plugin{}\n\n\tsupportedGoArch = []string{\"amd64\", \"arm64\", \"arm\"}\n\tsupportedComponents = []string{\"acmesolver\", \"controller\", \"webhook\", \"cainjector\"}\n\tlog = logf.Log.WithName(\"images\")\n)\n\ntype Plugin struct {\n\t\/\/ The list of images to build (e.g. acmesolver, controller, webhook)\n\tComponents []string\n\n\t\/\/ If true, the built images will be exported to the configured docker\n\t\/\/ daemon when the Build() method is called.\n\tExportToDocker bool\n\n\t\/\/ List of architectures to build images for\n\tGoArch []string\n\n\t\/\/ DockerConfig is a path to a directory containing a config.json file that\n\t\/\/ is used for Docker authentication\n\tDockerConfig string\n\n\t\/\/ TODO: add GOOS support once the build system supports more than linux\n\n\t\/\/ built is set to true if Build() has completed successfully\n\tbuilt bool\n\t\/\/ configFileName is computed based on the DockerConfig field\n\tconfigFileName string\n}\n\nfunc (g *Plugin) AddFlags(fs *flag.FlagSet) {\n\tfs.BoolVar(&g.ExportToDocker, \"images.export\", false, \"if true, images will be exported to the currently configured docker daemon\")\n\tfs.StringSliceVar(&g.Components, \"images.components\", []string{\"acmesolver\", \"controller\", \"webhook\", \"cainjector\"}, \"the list of components to build images for\")\n\tfs.StringSliceVar(&g.GoArch, \"images.goarch\", []string{\"amd64\", \"arm64\", \"arm\"}, \"list of architectures to build images for\")\n\tfs.StringVar(&g.DockerConfig, \"images.docker-config\", \"\", \"path to a directory containing a docker config.json file used when pushing images\")\n}\n\nfunc (g *Plugin) Validate() []error {\n\tvar errs []error\n\n\t\/\/ validate goarch flag\n\tfor _, a := range g.GoArch {\n\t\tvalid := false\n\t\tfor _, sa := range supportedGoArch {\n\t\t\tif a == sa {\n\t\t\t\tvalid = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !valid {\n\t\t\terrs = append(errs, fmt.Errorf(\"invalid goarch value %q\", a))\n\t\t}\n\t}\n\n\t\/\/ validate components flag\n\tfor _, a := range g.Components {\n\t\tvalid := false\n\t\tfor _, sa := range supportedComponents {\n\t\t\tif a == sa {\n\t\t\t\tvalid = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !valid {\n\t\t\terrs = append(errs, fmt.Errorf(\"invalid component name %q\", a))\n\t\t}\n\t}\n\n\treturn errs\n}\n\nfunc (g *Plugin) InitPublish() []error {\n\tvar errs []error\n\n\tif g.DockerConfig != \"\" {\n\t\tconfigFileName := path.Join(g.DockerConfig, \"config.json\")\n\t\tf, err := os.Stat(configFileName)\n\t\tif err != nil {\n\t\t\treturn []error{fmt.Errorf(\"error checking config file: %v\", err)}\n\t\t}\n\t\tif f.IsDir() {\n\t\t\treturn []error{fmt.Errorf(\"docker config.json is not a file\")}\n\t\t}\n\t\tg.configFileName = g.DockerConfig\n\t}\n\n\treturn errs\n}\n\nfunc (g *Plugin) Build(ctx context.Context) error {\n\t_, err := g.build(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif g.ExportToDocker {\n\t\tlog.Info(\"Exporting docker images to local docker daemon\")\n\t\tif err := g.exportToDocker(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tlog.Info(\"skipping exporting docker images to docker daemon\")\n\t}\n\n\treturn nil\n}\n\nfunc (g *Plugin) Publish(ctx context.Context) error {\n\tlog.Info(\"running publish for image plugin\")\n\t\/\/ this case should never be reached, but we check it to be safe\n\tif !g.built {\n\t\tif _, err := g.build(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Info(\"pushing images\")\n\ttargets := g.generateTargets()\n\terr := g.pushImages(ctx, targets)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Info(\"published all docker images\")\n\n\treturn nil\n}\n\nfunc (g *Plugin) Complete() error {\n\tlog = log.WithName(\"default-flags\")\n\n\tif g.DockerConfig == \"\" {\n\t\tg.DockerConfig = os.Getenv(\"DOCKER_CONFIG\")\n\t\tif g.DockerConfig != \"\" {\n\t\t\tlog.Info(\"set default value\", \"flag\", \"images.docker-config\", \"value\", g.DockerConfig)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (g *Plugin) build(ctx context.Context) (imageTargets, error) {\n\ttargets := g.generateTargets()\n\n\tbazelTargets := targets.bazelTargets()\n\tlog := log.WithValues(\"images\", bazelTargets)\n\tlog.Info(\"building bazel image targets\")\n\n\t\/\/ set the os and arch to linux\/amd64\n\t\/\/ whilst we might be building cross-arch binaries, cgo only depends on\n\t\/\/ particular OS settings and not arch, so just by setting this to 'linux'\n\t\/\/ we can fix cross builds on platforms other than linux\n\t\/\/ if we support alternate OS values in future, this will need updating\n\t\/\/ with a call to BuildPlatformE per *OS*.\n\terr := bazel.Default.BuildPlatformE(ctx, log, \"linux\", \"amd64\", bazelTargets...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error building docker images (%v): %v\", targets, err)\n\t}\n\n\tg.built = true\n\treturn targets, nil\n}\n\nfunc (g *Plugin) exportToDocker(ctx context.Context) error {\n\ttargets := g.generateTargets()\n\tlog.WithValues(\"images\", targets.bazelExportTargets()).Info(\"exporting images to docker daemon\")\n\tfor _, target := range targets.bazelExportTargets() {\n\t\tlog := log.WithValues(\"target\", target)\n\t\tlog.Info(\"exporting image to docker daemon\")\n\t\t\/\/ set the os and arch to linux\/amd64\n\t\t\/\/ whilst we might be building cross-arch binaries, cgo only depends on\n\t\t\/\/ particular OS settings and not arch, so just by setting this to 'linux'\n\t\t\/\/ we can fix cross builds on platforms other than linux\n\t\t\/\/ if we support alternate OS values in future, this will need updating\n\t\t\/\/ with a call to BuildPlatformE per *OS*.\n\t\terr := bazel.Default.RunPlatformE(ctx, log, \"linux\", \"amd64\", target)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error exporting image %q to docker daemon: %v\", target, err)\n\t\t}\n\t}\n\n\tlog.WithValues(\"images\", targets.exportedImageNames()).Info(\"exported all docker images\")\n\n\treturn nil\n}\n\n\/\/ generateTargets generates a list of Bazel target names that must be\n\/\/ built for this invocation of the image builder\nfunc (g *Plugin) generateTargets() imageTargets {\n\tvar targets []imageTarget\n\tfor _, c := range g.Components {\n\t\tfor _, a := range g.GoArch {\n\t\t\ttargets = append(targets, imageTarget{c, \"linux\", a})\n\t\t}\n\t}\n\treturn targets\n}\n\n\/\/ pushImages will push the images built for this release to the registry\n\/\/ TODO: add support for calling container_push targets instead of just 'docker push'\nfunc (p *Plugin) pushImages(ctx context.Context, targets imageTargets) error {\n\terr := p.exportToDocker(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar images []string\n\tfor _, t := range targets {\n\t\timages = append(images, t.exportedImageNames()...)\n\t}\n\n\tlog.WithValues(\"images\", images).Info(\"pushing docker images\")\n\tfor _, img := range images {\n\t\tlog := log.WithValues(\"image\", img)\n\t\tlog.Info(\"pushing docker image\")\n\t\targs := []string{}\n\t\tif p.configFileName != \"\" {\n\t\t\targs = append(args, \"--config\", p.configFileName)\n\t\t}\n\t\targs = append(args, \"push\", img)\n\t\tcmd := exec.CommandContext(ctx, \"docker\", args...)\n\t\terr := util.RunE(log, cmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\ntype imageTargets []imageTarget\n\nfunc (i imageTargets) bazelTargets() []string {\n\tout := make([]string, len(i))\n\tfor idx, target := range i {\n\t\tout[idx] = target.bazelTarget()\n\t}\n\treturn out\n}\n\nfunc (i imageTargets) bazelExportTargets() []string {\n\tout := make([]string, len(i))\n\tfor idx, target := range i {\n\t\tout[idx] = target.bazelExportTarget()\n\t}\n\treturn out\n}\n\nfunc (i imageTargets) exportedImageNames() []string {\n\tout := make([]string, 0)\n\tfor _, target := range i {\n\t\tout = append(out, target.exportedImageNames()...)\n\t}\n\treturn out\n}\n\ntype imageTarget struct {\n\tname, os, arch string\n}\n\nfunc (i imageTarget) bazelTarget() string {\n\treturn fmt.Sprintf(\"\/\/cmd\/%s:image.%s-%s\", i.name, i.os, i.arch)\n}\n\nfunc (i imageTarget) bazelExportTarget() string {\n\treturn fmt.Sprintf(\"\/\/cmd\/%s:image.%s-%s.export\", i.name, i.os, i.arch)\n}\n\nfunc (i imageTarget) exportedImageNames() []string {\n\tif i.arch == \"amd64\" {\n\t\treturn []string{\n\t\t\tfmt.Sprintf(\"%s\/cert-manager-%s:%s\", flags.Default.DockerRepo, i.name, flags.Default.AppVersion),\n\t\t\tfmt.Sprintf(\"%s\/cert-manager-%s:%s\", flags.Default.DockerRepo, i.name, flags.Default.GitCommitRef),\n\t\t}\n\t}\n\n\treturn []string{\n\t\tfmt.Sprintf(\"%s\/cert-manager-%s-%s:%s\", flags.Default.DockerRepo, i.name, i.arch, flags.Default.AppVersion),\n\t\tfmt.Sprintf(\"%s\/cert-manager-%s-%s:%s\", flags.Default.DockerRepo, i.name, i.arch, flags.Default.GitCommitRef),\n\t}\n}\n<|endoftext|>"} {"text":"package algorithmia\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype FileAttributes struct {\n\tFileName string `json:\"filename\"`\n\tLastModified string `json:\"last_modified\"`\n\tSize int64 `json:\"size\"`\n}\n\ntype DataFile struct {\n\tDataObjectType\n\n\tpath string\n\turl string\n\tlastModified time.Time\n\tsize int64\n\n\tclient *Client\n}\n\nfunc NewDataFile(client *Client, dataUrl string) *DataFile {\n\tp := strings.TrimSpace(dataUrl)\n\tif strings.HasPrefix(p, \"data:\/\/\") {\n\t\tp = p[len(\"data:\/\/\"):]\n\t} else if strings.HasPrefix(p, \"\/\") {\n\t\tp = p[1:]\n\t}\n\treturn &DataFile{\n\t\tDataObjectType: File,\n\t\tclient: client,\n\t\tpath: p,\n\t\turl: getUrl(p),\n\t}\n}\n\nfunc (f *DataFile) SetAttributes(attr *FileAttributes) error {\n\t\/\/%Y-%m-%dT%H:%M:%S.000Z\n\tt, err := time.Parse(\"2006-01-02T15:04:05.000Z\", attr.LastModified)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.lastModified = t\n\tf.size = attr.Size\n\treturn nil\n}\n\n\/\/Get file from the data api\nfunc (f *DataFile) File() (*os.File, error) {\n\tif exists, err := f.Exists(); err != nil {\n\t\treturn nil, err\n\t} else if !exists {\n\t\treturn nil, errors.New(fmt.Sprint(\"file does not exist -\", f.path))\n\t}\n\n\tresp, err := f.client.getHelper(f.url, url.Values{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trf, err := ioutil.TempFile(os.TempDir(), \"algorithmia\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = io.Copy(rf, resp.Body)\n\tif err != nil {\n\t\trf.Close()\n\t\treturn nil, err\n\t}\n\treturn rf, nil\n}\n\nfunc (f *DataFile) Exists() (bool, error) {\n\tresp, err := f.client.headHelper(f.url)\n\treturn resp.StatusCode == http.StatusOK, err\n}\n\nfunc (f *DataFile) Name() (string, error) {\n\t_, name, err := getParentAndBase(f.path)\n\treturn name, err\n}\n\nfunc (f *DataFile) Bytes() ([]byte, error) {\n\tif exists, err := f.Exists(); err != nil {\n\t\treturn nil, err\n\t} else if !exists {\n\t\treturn nil, errors.New(fmt.Sprint(\"file does not exist -\", f.path))\n\t}\n\n\tresp, err := f.client.getHelper(f.url, url.Values{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ioutil.ReadAll(resp.Body)\n}\n\nfunc (f *DataFile) StringContents() (string, error) {\n\tif b, err := f.Bytes(); err != nil {\n\t\treturn \"\", err\n\t} else {\n\t\treturn string(b), nil\n\t}\n}\n\nfunc (f *DataFile) Json(x interface{}) error {\n\tif exists, err := f.Exists(); err != nil {\n\t\treturn err\n\t} else if !exists {\n\t\treturn errors.New(fmt.Sprint(\"file does not exist -\", f.path))\n\t}\n\n\tresp, err := f.client.getHelper(f.url, url.Values{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn getJson(resp, x)\n}\n\n\/\/Post to data api\nfunc (f *DataFile) Put(data []byte) error {\n\tresp, err := f.client.putHelper(f.url, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb, err := getRaw(resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = errorFromJsonData(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/Post json to data api\nfunc (f *DataFile) PutJson(data interface{}) error {\n\tb, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn f.Put(b)\n}\n\n\/\/Post file to data api\nfunc (f *DataFile) PutFile(fpath string) error {\n\tb, err := ioutil.ReadFile(fpath)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn f.Put(b)\n}\n\n\/\/Delete from data api\nfunc (f *DataFile) Delete() error {\n\tresp, err := f.client.deleteHelper(f.url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := errorFromResponse(resp); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (f *DataFile) Path() string {\n\treturn f.path\n}\n\nfunc (f *DataFile) Url() string {\n\treturn f.url\n}\n\nfunc (f *DataFile) LastModified() time.Time {\n\treturn f.lastModified\n}\n\nfunc (f *DataFile) Size() int64 {\n\treturn f.size\n}\nDataFile Put argument type is now interface{}, for []byte use PutBytes insteadpackage algorithmia\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype FileAttributes struct {\n\tFileName string `json:\"filename\"`\n\tLastModified string `json:\"last_modified\"`\n\tSize int64 `json:\"size\"`\n}\n\ntype DataFile struct {\n\tDataObjectType\n\n\tpath string\n\turl string\n\tlastModified time.Time\n\tsize int64\n\n\tclient *Client\n}\n\nfunc NewDataFile(client *Client, dataUrl string) *DataFile {\n\tp := strings.TrimSpace(dataUrl)\n\tif strings.HasPrefix(p, \"data:\/\/\") {\n\t\tp = p[len(\"data:\/\/\"):]\n\t} else if strings.HasPrefix(p, \"\/\") {\n\t\tp = p[1:]\n\t}\n\treturn &DataFile{\n\t\tDataObjectType: File,\n\t\tclient: client,\n\t\tpath: p,\n\t\turl: getUrl(p),\n\t}\n}\n\nfunc (f *DataFile) SetAttributes(attr *FileAttributes) error {\n\t\/\/%Y-%m-%dT%H:%M:%S.000Z\n\tt, err := time.Parse(\"2006-01-02T15:04:05.000Z\", attr.LastModified)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.lastModified = t\n\tf.size = attr.Size\n\treturn nil\n}\n\n\/\/Get file from the data api\nfunc (f *DataFile) File() (*os.File, error) {\n\tif exists, err := f.Exists(); err != nil {\n\t\treturn nil, err\n\t} else if !exists {\n\t\treturn nil, errors.New(fmt.Sprint(\"file does not exist -\", f.path))\n\t}\n\n\tresp, err := f.client.getHelper(f.url, url.Values{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trf, err := ioutil.TempFile(os.TempDir(), \"algorithmia\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = io.Copy(rf, resp.Body)\n\tif err != nil {\n\t\trf.Close()\n\t\treturn nil, err\n\t}\n\tname := rf.Name()\n\tif rf.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn os.Open(name)\n}\n\nfunc (f *DataFile) Exists() (bool, error) {\n\tresp, err := f.client.headHelper(f.url)\n\treturn resp.StatusCode == http.StatusOK, err\n}\n\nfunc (f *DataFile) Name() (string, error) {\n\t_, name, err := getParentAndBase(f.path)\n\treturn name, err\n}\n\nfunc (f *DataFile) Bytes() ([]byte, error) {\n\tif exists, err := f.Exists(); err != nil {\n\t\treturn nil, err\n\t} else if !exists {\n\t\treturn nil, errors.New(fmt.Sprint(\"file does not exist -\", f.path))\n\t}\n\n\tresp, err := f.client.getHelper(f.url, url.Values{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ioutil.ReadAll(resp.Body)\n}\n\nfunc (f *DataFile) StringContents() (string, error) {\n\tif b, err := f.Bytes(); err != nil {\n\t\treturn \"\", err\n\t} else {\n\t\treturn string(b), nil\n\t}\n}\n\nfunc (f *DataFile) Json(x interface{}) error {\n\tif exists, err := f.Exists(); err != nil {\n\t\treturn err\n\t} else if !exists {\n\t\treturn errors.New(fmt.Sprint(\"file does not exist -\", f.path))\n\t}\n\n\tresp, err := f.client.getHelper(f.url, url.Values{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn getJson(resp, x)\n}\n\n\/\/Post to data api\nfunc (f *DataFile) Put(data interface{}) error {\n\tswitch dt := data.(type) {\n\tcase string:\n\t\treturn f.PutBytes([]byte(dt))\n\tcase []byte:\n\t\treturn f.PutBytes(dt)\n\tdefault:\n\t\treturn f.PutJson(data)\n\t}\n}\n\nfunc (f *DataFile) PutBytes(data []byte) error {\n\tresp, err := f.client.putHelper(f.url, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb, err := getRaw(resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = errorFromJsonData(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/Post json to data api\nfunc (f *DataFile) PutJson(data interface{}) error {\n\tb, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn f.PutBytes(b)\n}\n\n\/\/Post file to data api\nfunc (f *DataFile) PutFile(fpath string) error {\n\tb, err := ioutil.ReadFile(fpath)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn f.PutBytes(b)\n}\n\n\/\/Delete from data api\nfunc (f *DataFile) Delete() error {\n\tresp, err := f.client.deleteHelper(f.url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := errorFromResponse(resp); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (f *DataFile) Path() string {\n\treturn f.path\n}\n\nfunc (f *DataFile) Url() string {\n\treturn f.url\n}\n\nfunc (f *DataFile) LastModified() time.Time {\n\treturn f.lastModified\n}\n\nfunc (f *DataFile) Size() int64 {\n\treturn f.size\n}\n<|endoftext|>"} {"text":"package main\r\n\r\nimport (\r\n\t\"bytes\"\r\n\t\"encoding\/json\"\r\n\t\"fmt\"\r\n\t\"golang.org\/x\/net\/context\"\r\n\t\"google.golang.org\/appengine\"\r\n\t\"google.golang.org\/appengine\/log\"\r\n\t\"google.golang.org\/appengine\/urlfetch\"\r\n\t\"io\/ioutil\"\r\n\t\"net\/http\"\r\n)\r\n\r\ntype FacebookAttachment struct {\r\n\t\/* image, audio, video, file or location *\/\r\n\tType string `json:\"type,omitempty\"`\r\n\r\n\t\/* multimedia or location payload *\/\r\n\tPayload string `json:\"payload,omitempty\"`\r\n}\r\n\r\ntype FacebookQuickReply struct {\r\n\t\/* Custom data provided by the app *\/\r\n\tPayload string `json:\"payload,omitempty\"`\r\n}\r\n\r\ntype FacebookMessage struct {\r\n\r\n\t\/* Indicates the message sent from the page itself *\/\r\n\tIsEcho bool `json:\"is_echo,omitempty\"`\r\n\r\n\t\/* ID of the app from which the message was sent *\/\r\n\tAppId string `json:\"app_id,omitempty\"`\r\n\r\n\t\/* Custom string passed to the Send API as the metadata field *\/\r\n\tMetadata string `json:\"metadata,omitempty\"`\r\n\r\n\t\/* Message ID *\/\r\n\tMid string `json:\"mid,omitempty\"`\r\n\r\n\t\/* Message sequence number *\/\r\n\tSeq int64 `json:\"seq,omitempty\"`\r\n\r\n\t\/* Text of message *\/\r\n\tText string `json:\"text,omitempty\"`\r\n\r\n\t\/* Array containing attachment data *\/\r\n\tAttachments *[]FacebookAttachment `json:\"attachment,omitempty\"`\r\n\r\n\t\/* Optional custom data provided by the sending app *\/\r\n\tQuickReply *FacebookQuickReply `json:\"quick_reply,omitempty\"`\r\n}\r\n\r\ntype FacebookPerson struct {\r\n\tId string `json:\"id,omitempty\"`\r\n}\r\n\r\ntype FacebookPostback struct {\r\n\t\/* payload parameter that was defined with the button *\/\r\n\tPayload string `json:\"payload,omitempty\"`\r\n}\r\n\r\ntype FacebookOptIn struct {\r\n\t\/* data-ref parameter that was defined with the entry point *\/\r\n\tRef string `json:\"ref,omitempty\"`\r\n}\r\n\r\ntype FacebookAccountLinking struct {\r\n\t\/* linked or unlinked *\/\r\n\tstatus string `json:\"status,omitempty\"`\r\n\r\n\t\/* Value of pass-through authorization_code provided in the Linking Account flow *\/\r\n\tAuthorizationCode string `json:\"authorization_code,omitempty\"`\r\n}\r\n\r\ntype FacebookDelivery struct {\r\n\t\/* Array containing message IDs of messages that were delivered. Field may not be present. *\/\r\n\tMids []string `json:\"mids,omitempty\"`\r\n\r\n\t\/* All messages that were sent before this timestamp were delivered *\/\r\n\tWatermark int64 `json:\"watermark,omitempty\"`\r\n\r\n\t\/* Sequence number *\/\r\n\tSeq int64 `json:\"seq,omitempty\"`\r\n}\r\n\r\ntype FacebookRead struct {\r\n\t\/* All messages that were sent before this timestamp were read *\/\r\n\tWatermark int64 `json:\"watermark,omitempty\"`\r\n\r\n\t\/* Sequence number *\/\r\n\tSeq int64 `json:\"seq,omitempty\"`\r\n}\r\n\r\ntype FacebookMessaging struct {\r\n\t\/* Sender user ID *\/\r\n\tSender *FacebookPerson `json:\"sender,omitempty\"`\r\n\r\n\t\/* Recipient user ID *\/\r\n\tRecipient *FacebookPerson `json:\"recipient,omitempty\"`\r\n\r\n\t\/* Time of messaging (epoch time in milliseconds) *\/\r\n\tTimestamp int64 `json:\"timestamp,omitempty\"`\r\n\r\n\t\/* Message Received Payload *\/\r\n\tMessage *FacebookMessage `json:\"message,omitempty\"`\r\n\r\n\t\/* Postback Received Payload *\/\r\n\tPostback *FacebookPostback `json:\"postback,omitempty\"`\r\n\r\n\t\/* Authentification Payload *\/\r\n\tOptIn *FacebookOptIn `json:\"optin,omitempty\"`\r\n\r\n\t\/* Account Linking Payload *\/\r\n\tAccountLinking *FacebookAccountLinking `json:\"account_linking,omitempty\"`\r\n\r\n\t\/* Message Delivered Payload *\/\r\n\tDelivery *FacebookDelivery `json:\"delivery,omitempty\"`\r\n\r\n\t\/* Message Read Payload *\/\r\n\tRead *FacebookRead `json:\"read,omitempty\"`\r\n}\r\n\r\ntype FacebookEntry struct {\r\n\t\/* Page ID of page *\/\r\n\tId string `json:\"id,omitempty\"`\r\n\r\n\t\/* Time of update (epoch time in milliseconds) *\/\r\n\tTime int64 `json:\"time,omitempty\"`\r\n\r\n\t\/* Array containing objects related to messaging *\/\r\n\tMessaging []FacebookMessaging `json:\"messaging,omitempty\"`\r\n}\r\n\r\ntype FacebookPayload struct {\r\n\tObject string `json:\"object,omitempty\"`\r\n\r\n\t\/* Array containing event data *\/\r\n\tEntry []FacebookEntry `json:\"entry,omitempty\"`\r\n}\r\n\r\nfunc FacebookCallbackHandler(w http.ResponseWriter, r *http.Request) {\r\n\r\n\tc := appengine.NewContext(r)\r\n\tlog.Debugf(c, \">>> Facebook Callback Handler\")\r\n\r\n\tif r.Method == \"GET\" {\r\n\t\tFacebookCallbackGETHandler(w, r)\r\n\t} else if r.Method == \"POST\" {\r\n\t\tFacebookCallbackPOSTHandler(w, r)\r\n\t} else {\r\n\t\tlog.Errorf(c, \"Error, unkown method: %v\", r.Method)\r\n\t\thttp.Error(w, \"Method Not Allowed\", http.StatusMethodNotAllowed)\r\n\t\treturn\r\n\t}\r\n\r\n}\r\n\r\nfunc FacebookCallbackGETHandler(w http.ResponseWriter, r *http.Request) {\r\n\r\n\tc := appengine.NewContext(r)\r\n\tlog.Debugf(c, \">>>> FacebookCallbackGETHandler\")\r\n\r\n\tmode := r.FormValue(\"hub.mode\")\r\n\tlog.Debugf(c, \"Hub Mode: %v\", mode)\r\n\r\n\tchallenge := r.FormValue(\"hub.challenge\")\r\n\tlog.Debugf(c, \"Hub Challenge: %v\", challenge)\r\n\r\n\tverify_token := r.FormValue(\"hub.verify_token\")\r\n\tlog.Debugf(c, \"Hub Verify Token: %v\", verify_token)\r\n\r\n\tif verify_token != VERIFY_TOKEN {\r\n\t\tlog.Errorf(c, \"Error, bad verification token: %v\", verify_token)\r\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\r\n\tif mode != \"subscribe\" {\r\n\t\tlog.Errorf(c, \"Error, bad mode: %v\", mode)\r\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\r\n\tfmt.Fprintf(w, \"%v\", challenge)\r\n}\r\n\r\nfunc FacebookCallbackPOSTHandler(w http.ResponseWriter, r *http.Request) {\r\n\r\n\tc := appengine.NewContext(r)\r\n\tlog.Debugf(c, \">>>> FacebookCallbackPOSTHandler\")\r\n\r\n\tvar payload FacebookPayload\r\n\terr := UnmarshalRequest(c, r, &payload)\r\n\tif err != nil {\r\n\t\tlog.Errorf(c, \"Error reading JSON: %v\", err)\r\n\t\thttp.Error(w, \"Internal Server Error: \"+err.Error(), http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\r\n\tif payload.Object == \"page\" {\r\n\t\tlog.Debugf(c, \"Received %v entries\", len(payload.Entry))\r\n\r\n\t\tfor i, e := range payload.Entry {\r\n\t\t\tlog.Debugf(c, \"Entry #%v\", i)\r\n\t\t\tlog.Debugf(c, \"Message on page %v\", e.Id)\r\n\t\t\tlog.Debugf(c, \"Time: %v\", e.Time)\r\n\r\n\t\t\tif len(e.Messaging) > 0 {\r\n\t\t\t\tlog.Debugf(c, \"Received %v messages\", len(e.Messaging))\r\n\r\n\t\t\t\tfor _, m := range e.Messaging {\r\n\t\t\t\t\tlog.Debugf(c, \"Sender %v Recipient %v\", m.Sender.Id, m.Recipient.Id)\r\n\r\n\t\t\t\t\tif m.Message != nil {\r\n\t\t\t\t\t\tlog.Debugf(c, \" --- Message Received\")\r\n\t\t\t\t\t\tlog.Debugf(c, \"Message Id: %v\", m.Message.Mid)\r\n\t\t\t\t\t\tlog.Debugf(c, \"Message Seq: %v\", m.Message.Seq)\r\n\t\t\t\t\t\tlog.Debugf(c, \"Message: %v\", m.Message.Text)\r\n\r\n\t\t\t\t\t\terr = TreatMessage(c, m.Sender.Id, m.Message.Text)\r\n\t\t\t\t\t\tif err != nil {\r\n\t\t\t\t\t\t\tlog.Errorf(c, \"Error, sending message: %v\", err)\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tif m.Delivery != nil {\r\n\t\t\t\t\t\tlog.Debugf(c, \" --- Message Delivered\")\r\n\t\t\t\t\t\tlog.Debugf(c, \"Message Ids: %v\", m.Delivery.Mids)\r\n\t\t\t\t\t\tlog.Debugf(c, \"Message Seq: %v\", m.Delivery.Seq)\r\n\t\t\t\t\t\tlog.Debugf(c, \"Watermark: %v\", m.Delivery.Watermark)\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t}\r\n\t\t\t} else {\r\n\t\t\t\tlog.Warningf(c, \"No message in payload - feature not developed!\")\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t} else {\r\n\t\tlog.Errorf(c, \"Error, unkown Object type: %v\", payload.Object)\r\n\t\thttp.Error(w, \"Internal Server Error\", http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\r\n\tfmt.Fprintf(w, \"\")\r\n}\r\n\r\nfunc TreatMessage(c context.Context, recipientId string, message string) error {\r\n\tresponse := \"Hello \" + message\r\n\treturn SendFacebookMessage(c, recipientId, response)\r\n}\r\n\r\nfunc SendFacebookMessage(c context.Context, recipientId string, message string) error {\r\n\r\n\tmessaging := FacebookMessaging{\r\n\t\tRecipient: &FacebookPerson{\r\n\t\t\tId: recipientId,\r\n\t\t},\r\n\t\tMessage: &FacebookMessage{\r\n\t\t\tText: message,\r\n\t\t},\r\n\t}\r\n\r\n\tjsonData, err := json.Marshal(messaging)\r\n\tif err != nil {\r\n\t\tlog.Errorf(c, \"Error converting to JSON: %v\", err)\r\n\t\treturn err\r\n\t}\r\n\r\n\tURL := \"https:\/\/graph.facebook.com\/v2.6\/me\/messages?access_token=\" + PAGE_ACCESS_TOKEN\r\n\tlog.Debugf(c, \"Calling %v\", URL)\r\n\tresp, err := urlfetch.Client(c).Post(URL, \"application\/json\", bytes.NewReader(jsonData))\r\n\tif err != nil {\r\n\t\tlog.Errorf(c, \"Error posting message: %v\", err)\r\n\t\treturn err\r\n\t}\r\n\r\n\tdefer resp.Body.Close()\r\n\tbody, err := ioutil.ReadAll(resp.Body)\r\n\tif err != nil {\r\n\t\tlog.Errorf(c, \"Error reading response: %v\", err)\r\n\t\treturn err\r\n\t}\r\n\r\n\tlog.Debugf(c, \"Facebook response: %s\", body)\r\n\r\n\treturn nil\r\n\r\n}\r\nsimplified TreatMessage and rename GetResponsepackage main\r\n\r\nimport (\r\n\t\"bytes\"\r\n\t\"encoding\/json\"\r\n\t\"fmt\"\r\n\t\"golang.org\/x\/net\/context\"\r\n\t\"google.golang.org\/appengine\"\r\n\t\"google.golang.org\/appengine\/log\"\r\n\t\"google.golang.org\/appengine\/urlfetch\"\r\n\t\"io\/ioutil\"\r\n\t\"net\/http\"\r\n)\r\n\r\ntype FacebookAttachment struct {\r\n\t\/* image, audio, video, file or location *\/\r\n\tType string `json:\"type,omitempty\"`\r\n\r\n\t\/* multimedia or location payload *\/\r\n\tPayload string `json:\"payload,omitempty\"`\r\n}\r\n\r\ntype FacebookQuickReply struct {\r\n\t\/* Custom data provided by the app *\/\r\n\tPayload string `json:\"payload,omitempty\"`\r\n}\r\n\r\ntype FacebookMessage struct {\r\n\r\n\t\/* Indicates the message sent from the page itself *\/\r\n\tIsEcho bool `json:\"is_echo,omitempty\"`\r\n\r\n\t\/* ID of the app from which the message was sent *\/\r\n\tAppId string `json:\"app_id,omitempty\"`\r\n\r\n\t\/* Custom string passed to the Send API as the metadata field *\/\r\n\tMetadata string `json:\"metadata,omitempty\"`\r\n\r\n\t\/* Message ID *\/\r\n\tMid string `json:\"mid,omitempty\"`\r\n\r\n\t\/* Message sequence number *\/\r\n\tSeq int64 `json:\"seq,omitempty\"`\r\n\r\n\t\/* Text of message *\/\r\n\tText string `json:\"text,omitempty\"`\r\n\r\n\t\/* Array containing attachment data *\/\r\n\tAttachments *[]FacebookAttachment `json:\"attachment,omitempty\"`\r\n\r\n\t\/* Optional custom data provided by the sending app *\/\r\n\tQuickReply *FacebookQuickReply `json:\"quick_reply,omitempty\"`\r\n}\r\n\r\ntype FacebookPerson struct {\r\n\tId string `json:\"id,omitempty\"`\r\n}\r\n\r\ntype FacebookPostback struct {\r\n\t\/* payload parameter that was defined with the button *\/\r\n\tPayload string `json:\"payload,omitempty\"`\r\n}\r\n\r\ntype FacebookOptIn struct {\r\n\t\/* data-ref parameter that was defined with the entry point *\/\r\n\tRef string `json:\"ref,omitempty\"`\r\n}\r\n\r\ntype FacebookAccountLinking struct {\r\n\t\/* linked or unlinked *\/\r\n\tstatus string `json:\"status,omitempty\"`\r\n\r\n\t\/* Value of pass-through authorization_code provided in the Linking Account flow *\/\r\n\tAuthorizationCode string `json:\"authorization_code,omitempty\"`\r\n}\r\n\r\ntype FacebookDelivery struct {\r\n\t\/* Array containing message IDs of messages that were delivered. Field may not be present. *\/\r\n\tMids []string `json:\"mids,omitempty\"`\r\n\r\n\t\/* All messages that were sent before this timestamp were delivered *\/\r\n\tWatermark int64 `json:\"watermark,omitempty\"`\r\n\r\n\t\/* Sequence number *\/\r\n\tSeq int64 `json:\"seq,omitempty\"`\r\n}\r\n\r\ntype FacebookRead struct {\r\n\t\/* All messages that were sent before this timestamp were read *\/\r\n\tWatermark int64 `json:\"watermark,omitempty\"`\r\n\r\n\t\/* Sequence number *\/\r\n\tSeq int64 `json:\"seq,omitempty\"`\r\n}\r\n\r\ntype FacebookMessaging struct {\r\n\t\/* Sender user ID *\/\r\n\tSender *FacebookPerson `json:\"sender,omitempty\"`\r\n\r\n\t\/* Recipient user ID *\/\r\n\tRecipient *FacebookPerson `json:\"recipient,omitempty\"`\r\n\r\n\t\/* Time of messaging (epoch time in milliseconds) *\/\r\n\tTimestamp int64 `json:\"timestamp,omitempty\"`\r\n\r\n\t\/* Message Received Payload *\/\r\n\tMessage *FacebookMessage `json:\"message,omitempty\"`\r\n\r\n\t\/* Postback Received Payload *\/\r\n\tPostback *FacebookPostback `json:\"postback,omitempty\"`\r\n\r\n\t\/* Authentification Payload *\/\r\n\tOptIn *FacebookOptIn `json:\"optin,omitempty\"`\r\n\r\n\t\/* Account Linking Payload *\/\r\n\tAccountLinking *FacebookAccountLinking `json:\"account_linking,omitempty\"`\r\n\r\n\t\/* Message Delivered Payload *\/\r\n\tDelivery *FacebookDelivery `json:\"delivery,omitempty\"`\r\n\r\n\t\/* Message Read Payload *\/\r\n\tRead *FacebookRead `json:\"read,omitempty\"`\r\n}\r\n\r\ntype FacebookEntry struct {\r\n\t\/* Page ID of page *\/\r\n\tId string `json:\"id,omitempty\"`\r\n\r\n\t\/* Time of update (epoch time in milliseconds) *\/\r\n\tTime int64 `json:\"time,omitempty\"`\r\n\r\n\t\/* Array containing objects related to messaging *\/\r\n\tMessaging []FacebookMessaging `json:\"messaging,omitempty\"`\r\n}\r\n\r\ntype FacebookPayload struct {\r\n\tObject string `json:\"object,omitempty\"`\r\n\r\n\t\/* Array containing event data *\/\r\n\tEntry []FacebookEntry `json:\"entry,omitempty\"`\r\n}\r\n\r\nfunc FacebookCallbackHandler(w http.ResponseWriter, r *http.Request) {\r\n\r\n\tc := appengine.NewContext(r)\r\n\tlog.Debugf(c, \">>> Facebook Callback Handler\")\r\n\r\n\tif r.Method == \"GET\" {\r\n\t\tFacebookCallbackGETHandler(w, r)\r\n\t} else if r.Method == \"POST\" {\r\n\t\tFacebookCallbackPOSTHandler(w, r)\r\n\t} else {\r\n\t\tlog.Errorf(c, \"Error, unkown method: %v\", r.Method)\r\n\t\thttp.Error(w, \"Method Not Allowed\", http.StatusMethodNotAllowed)\r\n\t\treturn\r\n\t}\r\n\r\n}\r\n\r\nfunc FacebookCallbackGETHandler(w http.ResponseWriter, r *http.Request) {\r\n\r\n\tc := appengine.NewContext(r)\r\n\tlog.Debugf(c, \">>>> FacebookCallbackGETHandler\")\r\n\r\n\tmode := r.FormValue(\"hub.mode\")\r\n\tlog.Debugf(c, \"Hub Mode: %v\", mode)\r\n\r\n\tchallenge := r.FormValue(\"hub.challenge\")\r\n\tlog.Debugf(c, \"Hub Challenge: %v\", challenge)\r\n\r\n\tverify_token := r.FormValue(\"hub.verify_token\")\r\n\tlog.Debugf(c, \"Hub Verify Token: %v\", verify_token)\r\n\r\n\tif verify_token != VERIFY_TOKEN {\r\n\t\tlog.Errorf(c, \"Error, bad verification token: %v\", verify_token)\r\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\r\n\tif mode != \"subscribe\" {\r\n\t\tlog.Errorf(c, \"Error, bad mode: %v\", mode)\r\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\r\n\tfmt.Fprintf(w, \"%v\", challenge)\r\n}\r\n\r\nfunc FacebookCallbackPOSTHandler(w http.ResponseWriter, r *http.Request) {\r\n\r\n\tc := appengine.NewContext(r)\r\n\tlog.Debugf(c, \">>>> FacebookCallbackPOSTHandler\")\r\n\r\n\tvar payload FacebookPayload\r\n\terr := UnmarshalRequest(c, r, &payload)\r\n\tif err != nil {\r\n\t\tlog.Errorf(c, \"Error reading JSON: %v\", err)\r\n\t\thttp.Error(w, \"Internal Server Error: \"+err.Error(), http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\r\n\tif payload.Object == \"page\" {\r\n\t\tlog.Debugf(c, \"Received %v entries\", len(payload.Entry))\r\n\r\n\t\tfor i, e := range payload.Entry {\r\n\t\t\tlog.Debugf(c, \"Entry #%v\", i)\r\n\t\t\tlog.Debugf(c, \"Message on page %v\", e.Id)\r\n\t\t\tlog.Debugf(c, \"Time: %v\", e.Time)\r\n\r\n\t\t\tif len(e.Messaging) > 0 {\r\n\t\t\t\tlog.Debugf(c, \"Received %v messages\", len(e.Messaging))\r\n\r\n\t\t\t\tfor _, m := range e.Messaging {\r\n\t\t\t\t\tlog.Debugf(c, \"Sender %v Recipient %v\", m.Sender.Id, m.Recipient.Id)\r\n\r\n\t\t\t\t\tif m.Message != nil {\r\n\t\t\t\t\t\tlog.Debugf(c, \" --- Message Received\")\r\n\t\t\t\t\t\tlog.Debugf(c, \"Message Id: %v\", m.Message.Mid)\r\n\t\t\t\t\t\tlog.Debugf(c, \"Message Seq: %v\", m.Message.Seq)\r\n\t\t\t\t\t\tlog.Debugf(c, \"Message: %v\", m.Message.Text)\r\n\r\n\t\t\t\t\t\tresponse := GetResponse(c, m.Sender.Id, m.Message.Text)\r\n\r\n\t\t\t\t\t\terr = SendFacebookMessage(c, m.Sender.Id, response)\r\n\t\t\t\t\t\tif err != nil {\r\n\t\t\t\t\t\t\tlog.Errorf(c, \"Error, sending message: %v\", err)\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tif m.Delivery != nil {\r\n\t\t\t\t\t\tlog.Debugf(c, \" --- Message Delivered\")\r\n\t\t\t\t\t\tlog.Debugf(c, \"Message Ids: %v\", m.Delivery.Mids)\r\n\t\t\t\t\t\tlog.Debugf(c, \"Message Seq: %v\", m.Delivery.Seq)\r\n\t\t\t\t\t\tlog.Debugf(c, \"Watermark: %v\", m.Delivery.Watermark)\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t}\r\n\t\t\t} else {\r\n\t\t\t\tlog.Warningf(c, \"No message in payload - feature not developed!\")\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t} else {\r\n\t\tlog.Errorf(c, \"Error, unkown Object type: %v\", payload.Object)\r\n\t\thttp.Error(w, \"Internal Server Error\", http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\r\n\tfmt.Fprintf(w, \"\")\r\n}\r\n\r\nfunc GetResponse(c context.Context, recipientId string, message string) string {\r\n\tresponse := \"Hello \" + message\r\n\treturn response\r\n}\r\n\r\nfunc SendFacebookMessage(c context.Context, recipientId string, message string) error {\r\n\r\n\tmessaging := FacebookMessaging{\r\n\t\tRecipient: &FacebookPerson{\r\n\t\t\tId: recipientId,\r\n\t\t},\r\n\t\tMessage: &FacebookMessage{\r\n\t\t\tText: message,\r\n\t\t},\r\n\t}\r\n\r\n\tjsonData, err := json.Marshal(messaging)\r\n\tif err != nil {\r\n\t\tlog.Errorf(c, \"Error converting to JSON: %v\", err)\r\n\t\treturn err\r\n\t}\r\n\r\n\tURL := \"https:\/\/graph.facebook.com\/v2.6\/me\/messages?access_token=\" + PAGE_ACCESS_TOKEN\r\n\tlog.Debugf(c, \"Calling %v\", URL)\r\n\tresp, err := urlfetch.Client(c).Post(URL, \"application\/json\", bytes.NewReader(jsonData))\r\n\tif err != nil {\r\n\t\tlog.Errorf(c, \"Error posting message: %v\", err)\r\n\t\treturn err\r\n\t}\r\n\r\n\tdefer resp.Body.Close()\r\n\tbody, err := ioutil.ReadAll(resp.Body)\r\n\tif err != nil {\r\n\t\tlog.Errorf(c, \"Error reading response: %v\", err)\r\n\t\treturn err\r\n\t}\r\n\r\n\tlog.Debugf(c, \"Facebook response: %s\", body)\r\n\r\n\treturn nil\r\n\r\n}\r\n<|endoftext|>"} {"text":"package reservation\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nvar redisTestURL = os.Getenv(\"REDIS_TEST_URL\")\n\nfunc TestNewManager(t *testing.T) {\n\tmanager, err := NewManager(redisTestURL, \"test-worker\")\n\tassert.Nil(t, err)\n\t\/\/ If manager is not nil, NewManager was able to succesfully ping local redis\n\tassert.NotNil(t, manager)\n}\n\nfunc setUp(t *testing.T) (*Manager, string) {\n\tconn, err := redis.DialTimeout(\"tcp\", redisTestURL, 15*time.Second, 10*time.Second, 10*time.Second)\n\tmessage := fmt.Sprintf(\"Could not connect to redis at %s. Ensure redis is running at this location (maybe locally)?\", redisTestURL)\n\tassert.Nil(t, err, message)\n\tdefer conn.Close()\n\tconn.Do(\"FLUSHALL\")\n\tmanager, err := NewManager(redisTestURL, \"test-worker\")\n\tassert.Nil(t, err)\n\tresourceID := \"12345\"\n\treturn manager, resourceID\n}\n\nfunc TestManagerLockCreate(t *testing.T) {\n\tmanager, resourceID := setUp(t)\n\n\t\/\/ Make a new reservation\n\treservation, err := manager.Lock(resourceID)\n\tassert.Nil(t, err)\n\n\t\/\/ Assert reservation created with correct field values\n\tassert.NotNil(t, reservation)\n\n\t\/\/ Make the same reservation again and ensure error returned\n\tdupeReservation, err := manager.Lock(resourceID)\n\tassert.Nil(t, dupeReservation)\n\tassert.EqualError(t, err, fmt.Sprintf(\"Reservation already exists for resource %s\", resourceID))\n\n\t\/\/ Release the first reservation and ensure we can make a second reservation\n\terr = reservation.Release()\n\tassert.Nil(t, err)\n\treservation, err = manager.Lock(resourceID)\n\tassert.NotNil(t, reservation)\n}\n\nfunc TestManagerLockConcurrentRequests(t *testing.T) {\n\t\/\/ Test to ensure no race conditions when making many concurrent lock requests\n\tmanager, resourceID := setUp(t)\n\n\tvar wg sync.WaitGroup\n\n\thold := make(chan struct{})\n\n\t\/\/ Make 100 simultaneous requests for locks\n\tnumErrors := 0\n\tnumReservations := 0\n\tfor i := 0; i < 100; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t<-hold \/\/ try to read from channel to block the goroutine\n\t\t\treservation, err := manager.Lock(resourceID)\n\t\t\tif err != nil {\n\t\t\t\tnumErrors++\n\t\t\t}\n\t\t\tif reservation != nil {\n\t\t\t\tnumReservations++\n\t\t\t}\n\t\t}()\n\t}\n\tclose(hold) \/\/ close channel so all goroutines make requests at once\n\twg.Wait()\n\n\tassert.Equal(t, numReservations, 1, \"Expected only one reservation to be made\")\n\tassert.Equal(t, numErrors, 99, \"Expected 99 errors\")\n}\n\nfunc TestReservationTTL(t *testing.T) {\n\tmanager, resourceID := setUp(t)\n\n\t\/\/ This setup would never happen in production, but simulates orphaned reservations\n\t\/\/ being left in redis if (for example) the process dies unexpectedly without releasing\n\t\/\/ the reservation\n\tmanager.Heartbeat = 100 * time.Second\n\tmanager.TTL = 1 * time.Second\n\n\t\/\/ Create a reservation\n\treservation, err := manager.Lock(resourceID)\n\tassert.Nil(t, err)\n\n\t\/\/ Assert we can make a new reservation after TTL expires\n\ttime.Sleep(2 * time.Second)\n\treservation, err = manager.Lock(resourceID)\n\tassert.NotNil(t, reservation)\n\tassert.Nil(t, err)\n}\n\nfunc TestReservationExtend(t *testing.T) {\n\tmanager, resourceID := setUp(t)\n\n\tmanager.Heartbeat = 500 * time.Millisecond\n\tmanager.TTL = 1 * time.Second\n\n\t\/\/ Create a reservation\n\treservation, err := manager.Lock(resourceID)\n\tassert.Nil(t, err)\n\n\t\/\/ Assert we cannot make a new reservation because TTL has been extended\n\ttime.Sleep(2 * time.Second)\n\treservation, err = manager.Lock(resourceID)\n\tassert.Nil(t, reservation)\n\tassert.EqualError(t, err, fmt.Sprintf(\"Reservation already exists for resource %s\", resourceID))\n}\nrace condition test: check for exact error messagepackage reservation\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nvar redisTestURL = os.Getenv(\"REDIS_TEST_URL\")\n\nfunc TestNewManager(t *testing.T) {\n\tmanager, err := NewManager(redisTestURL, \"test-worker\")\n\tassert.Nil(t, err)\n\t\/\/ If manager is not nil, NewManager was able to succesfully ping local redis\n\tassert.NotNil(t, manager)\n}\n\nfunc setUp(t *testing.T) (*Manager, string) {\n\tconn, err := redis.DialTimeout(\"tcp\", redisTestURL, 15*time.Second, 10*time.Second, 10*time.Second)\n\tmessage := fmt.Sprintf(\"Could not connect to redis at %s. Ensure redis is running at this location (maybe locally)?\", redisTestURL)\n\tassert.Nil(t, err, message)\n\tdefer conn.Close()\n\tconn.Do(\"FLUSHALL\")\n\tmanager, err := NewManager(redisTestURL, \"test-worker\")\n\tassert.Nil(t, err)\n\tresourceID := \"12345\"\n\treturn manager, resourceID\n}\n\nfunc TestManagerLockCreate(t *testing.T) {\n\tmanager, resourceID := setUp(t)\n\n\t\/\/ Make a new reservation\n\treservation, err := manager.Lock(resourceID)\n\tassert.Nil(t, err)\n\n\t\/\/ Assert reservation created with correct field values\n\tassert.NotNil(t, reservation)\n\n\t\/\/ Make the same reservation again and ensure error returned\n\tdupeReservation, err := manager.Lock(resourceID)\n\tassert.Nil(t, dupeReservation)\n\tassert.EqualError(t, err, fmt.Sprintf(\"Reservation already exists for resource %s\", resourceID))\n\n\t\/\/ Release the first reservation and ensure we can make a second reservation\n\terr = reservation.Release()\n\tassert.Nil(t, err)\n\treservation, err = manager.Lock(resourceID)\n\tassert.NotNil(t, reservation)\n}\n\nfunc TestManagerLockConcurrentRequests(t *testing.T) {\n\t\/\/ Test to ensure no race conditions when making many concurrent lock requests\n\tmanager, resourceID := setUp(t)\n\n\tvar wg sync.WaitGroup\n\n\thold := make(chan struct{})\n\n\t\/\/ Make 100 simultaneous requests for locks\n\tnumErrors := 0\n\tnumReservations := 0\n\tfor i := 0; i < 100; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t<-hold \/\/ try to read from channel to block the goroutine\n\t\t\treservation, err := manager.Lock(resourceID)\n\t\t\texpectedErr := fmt.Sprintf(\"Reservation already exists for resource %s\", resourceID)\n\t\t\tif err != nil && err.Error() == expectedErr {\n\t\t\t\tnumErrors++\n\t\t\t}\n\t\t\tif reservation != nil {\n\t\t\t\tnumReservations++\n\t\t\t}\n\t\t}()\n\t}\n\tclose(hold) \/\/ close channel so all goroutines make requests at once\n\twg.Wait()\n\n\tassert.Equal(t, numReservations, 1, \"Expected only one reservation to be made\")\n\tassert.Equal(t, numErrors, 99, \"Expected 99 errors\")\n}\n\nfunc TestReservationTTL(t *testing.T) {\n\tmanager, resourceID := setUp(t)\n\n\t\/\/ This setup would never happen in production, but simulates orphaned reservations\n\t\/\/ being left in redis if (for example) the process dies unexpectedly without releasing\n\t\/\/ the reservation\n\tmanager.Heartbeat = 100 * time.Second\n\tmanager.TTL = 1 * time.Second\n\n\t\/\/ Create a reservation\n\treservation, err := manager.Lock(resourceID)\n\tassert.Nil(t, err)\n\n\t\/\/ Assert we can make a new reservation after TTL expires\n\ttime.Sleep(2 * time.Second)\n\treservation, err = manager.Lock(resourceID)\n\tassert.NotNil(t, reservation)\n\tassert.Nil(t, err)\n}\n\nfunc TestReservationExtend(t *testing.T) {\n\tmanager, resourceID := setUp(t)\n\n\tmanager.Heartbeat = 500 * time.Millisecond\n\tmanager.TTL = 1 * time.Second\n\n\t\/\/ Create a reservation\n\treservation, err := manager.Lock(resourceID)\n\tassert.Nil(t, err)\n\n\t\/\/ Assert we cannot make a new reservation because TTL has been extended\n\ttime.Sleep(2 * time.Second)\n\treservation, err = manager.Lock(resourceID)\n\tassert.Nil(t, reservation)\n\tassert.EqualError(t, err, fmt.Sprintf(\"Reservation already exists for resource %s\", resourceID))\n}\n<|endoftext|>"} {"text":"package firehosenozzle\n\nimport (\n\t\"crypto\/tls\"\n\t\"time\"\n\n\t\"github.com\/cloudfoundry\/noaa\/consumer\"\n\tnoaerrors \"github.com\/cloudfoundry\/noaa\/errors\"\n\t\"github.com\/cloudfoundry\/sonde-go\/events\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/prometheus\/common\/log\"\n\n\t\"github.com\/cloudfoundry-community\/firehose_exporter\/metrics\"\n)\n\ntype FirehoseNozzle struct {\n\turl string\n\tskipSSLValidation bool\n\tsubscriptionID string\n\tidleTimeout time.Duration\n\tminRetryDelay time.Duration\n\tmaxRetryDelay time.Duration\n\tmaxRetryCount int\n\tauthTokenRefresher consumer.TokenRefresher\n\tmetricsStore *metrics.Store\n\terrs <-chan error\n\tmessages <-chan *events.Envelope\n\tconsumer *consumer.Consumer\n}\n\nfunc New(\n\turl string,\n\tskipSSLValidation bool,\n\tsubscriptionID string,\n\tidleTimeout time.Duration,\n\tminRetryDelay time.Duration,\n\tmaxRetryDelay time.Duration,\n\tmaxRetryCount int,\n\tauthTokenRefresher consumer.TokenRefresher,\n\tmetricsStore *metrics.Store,\n) *FirehoseNozzle {\n\treturn &FirehoseNozzle{\n\t\turl: url,\n\t\tskipSSLValidation: skipSSLValidation,\n\t\tsubscriptionID: subscriptionID,\n\t\tidleTimeout: idleTimeout,\n\t\tminRetryDelay: minRetryDelay,\n\t\tmaxRetryDelay: maxRetryDelay,\n\t\tmaxRetryCount: maxRetryCount,\n\t\tauthTokenRefresher: authTokenRefresher,\n\t\tmetricsStore: metricsStore,\n\t\terrs: make(<-chan error),\n\t\tmessages: make(<-chan *events.Envelope),\n\t}\n}\n\nfunc (n *FirehoseNozzle) Start() error {\n\tlog.Info(\"Starting Firehose Nozzle...\")\n\tn.consumeFirehose()\n\terr := n.parseEnvelopes()\n\tlog.Info(\"Firehose Nozzle shutting down...\")\n\treturn err\n}\n\nfunc (n *FirehoseNozzle) consumeFirehose() {\n\tn.consumer = consumer.New(\n\t\tn.url,\n\t\t&tls.Config{InsecureSkipVerify: n.skipSSLValidation},\n\t\tnil,\n\t)\n\tn.consumer.RefreshTokenFrom(n.authTokenRefresher)\n\tn.consumer.SetDebugPrinter(DebugPrinter{})\n\tif n.idleTimeout > 0 {\n\t\tn.consumer.SetIdleTimeout(n.idleTimeout)\n\t}\n\tif n.minRetryDelay > 0 {\n\t\tn.consumer.SetMinRetryDelay(n.minRetryDelay)\n\t}\n\tif n.maxRetryDelay > 0 {\n\t\tn.consumer.SetMaxRetryDelay(n.maxRetryDelay)\n\t}\n\tif n.maxRetryCount > 0 {\n\t\tn.consumer.SetMaxRetryCount(n.maxRetryCount)\n\t}\n\tn.messages, n.errs = n.consumer.Firehose(n.subscriptionID, \"\")\n}\n\nfunc (n *FirehoseNozzle) parseEnvelopes() error {\n\tfor {\n\t\tselect {\n\t\tcase envelope := <-n.messages:\n\t\t\tn.handleMessage(envelope)\n\t\t\tn.metricsStore.AddMetric(envelope)\n\t\tcase err := <-n.errs:\n\t\t\tretryError := n.handleError(err)\n\t\t\tif !retryError {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (n *FirehoseNozzle) handleMessage(envelope *events.Envelope) {\n\tif envelope.GetEventType() == events.Envelope_CounterEvent && envelope.CounterEvent.GetName() == \"TruncatingBuffer.DroppedMessages\" && envelope.GetOrigin() == \"doppler\" {\n\t\tlog.Infof(\"We've intercepted an upstream message which indicates that the Nozzle or the TrafficController is not keeping up. Please try scaling up the Nozzle.\")\n\t\tn.metricsStore.AlertSlowConsumerError()\n\t}\n}\n\nfunc (n *FirehoseNozzle) handleError(err error) bool {\n\tlog.Errorf(\"Error while reading from the Firehose: %v\", err)\n\n\tswitch err.(type) {\n\tcase noaerrors.RetryError:\n\t\tswitch noaRetryError := err.(noaerrors.RetryError).Err.(type) {\n\t\tcase *websocket.CloseError:\n\t\t\tswitch noaRetryError.Code {\n\t\t\tcase websocket.ClosePolicyViolation:\n\t\t\t\tlog.Errorf(\"Nozzle couldn't keep up. Please try scaling up the Nozzle.\")\n\t\t\t\tn.metricsStore.AlertSlowConsumerError()\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\tlog.Info(\"Closing connection with Firehose...\")\n\tn.consumer.Close()\n\n\treturn false\n}\n\ntype DebugPrinter struct{}\n\nfunc (dp DebugPrinter) Print(title, dump string) {\n\tlog.Debugf(\"%s: %s\", title, dump)\n}\nSubscribe to only to metrics feed from firehosepackage firehosenozzle\n\nimport (\n\t\"crypto\/tls\"\n\t\"time\"\n\n\t\"github.com\/cloudfoundry\/noaa\/consumer\"\n\tnoaerrors \"github.com\/cloudfoundry\/noaa\/errors\"\n\t\"github.com\/cloudfoundry\/sonde-go\/events\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/prometheus\/common\/log\"\n\n\t\"github.com\/cloudfoundry-community\/firehose_exporter\/metrics\"\n)\n\ntype FirehoseNozzle struct {\n\turl string\n\tskipSSLValidation bool\n\tsubscriptionID string\n\tidleTimeout time.Duration\n\tminRetryDelay time.Duration\n\tmaxRetryDelay time.Duration\n\tmaxRetryCount int\n\tauthTokenRefresher consumer.TokenRefresher\n\tmetricsStore *metrics.Store\n\terrs <-chan error\n\tmessages <-chan *events.Envelope\n\tconsumer *consumer.Consumer\n}\n\nfunc New(\n\turl string,\n\tskipSSLValidation bool,\n\tsubscriptionID string,\n\tidleTimeout time.Duration,\n\tminRetryDelay time.Duration,\n\tmaxRetryDelay time.Duration,\n\tmaxRetryCount int,\n\tauthTokenRefresher consumer.TokenRefresher,\n\tmetricsStore *metrics.Store,\n) *FirehoseNozzle {\n\treturn &FirehoseNozzle{\n\t\turl: url,\n\t\tskipSSLValidation: skipSSLValidation,\n\t\tsubscriptionID: subscriptionID,\n\t\tidleTimeout: idleTimeout,\n\t\tminRetryDelay: minRetryDelay,\n\t\tmaxRetryDelay: maxRetryDelay,\n\t\tmaxRetryCount: maxRetryCount,\n\t\tauthTokenRefresher: authTokenRefresher,\n\t\tmetricsStore: metricsStore,\n\t\terrs: make(<-chan error),\n\t\tmessages: make(<-chan *events.Envelope),\n\t}\n}\n\nfunc (n *FirehoseNozzle) Start() error {\n\tlog.Info(\"Starting Firehose Nozzle...\")\n\tn.consumeFirehose()\n\terr := n.parseEnvelopes()\n\tlog.Info(\"Firehose Nozzle shutting down...\")\n\treturn err\n}\n\nfunc (n *FirehoseNozzle) consumeFirehose() {\n\tn.consumer = consumer.New(\n\t\tn.url,\n\t\t&tls.Config{InsecureSkipVerify: n.skipSSLValidation},\n\t\tnil,\n\t)\n\tn.consumer.RefreshTokenFrom(n.authTokenRefresher)\n\tn.consumer.SetDebugPrinter(DebugPrinter{})\n\tif n.idleTimeout > 0 {\n\t\tn.consumer.SetIdleTimeout(n.idleTimeout)\n\t}\n\tif n.minRetryDelay > 0 {\n\t\tn.consumer.SetMinRetryDelay(n.minRetryDelay)\n\t}\n\tif n.maxRetryDelay > 0 {\n\t\tn.consumer.SetMaxRetryDelay(n.maxRetryDelay)\n\t}\n\tif n.maxRetryCount > 0 {\n\t\tn.consumer.SetMaxRetryCount(n.maxRetryCount)\n\t}\n\tn.messages, n.errs = n.consumer.FilteredFirehose(n.subscriptionID, \"\", consumer.Metrics)\n}\n\nfunc (n *FirehoseNozzle) parseEnvelopes() error {\n\tfor {\n\t\tselect {\n\t\tcase envelope := <-n.messages:\n\t\t\tn.handleMessage(envelope)\n\t\t\tn.metricsStore.AddMetric(envelope)\n\t\tcase err := <-n.errs:\n\t\t\tretryError := n.handleError(err)\n\t\t\tif !retryError {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (n *FirehoseNozzle) handleMessage(envelope *events.Envelope) {\n\tif envelope.GetEventType() == events.Envelope_CounterEvent && envelope.CounterEvent.GetName() == \"TruncatingBuffer.DroppedMessages\" && envelope.GetOrigin() == \"doppler\" {\n\t\tlog.Infof(\"We've intercepted an upstream message which indicates that the Nozzle or the TrafficController is not keeping up. Please try scaling up the Nozzle.\")\n\t\tn.metricsStore.AlertSlowConsumerError()\n\t}\n}\n\nfunc (n *FirehoseNozzle) handleError(err error) bool {\n\tlog.Errorf(\"Error while reading from the Firehose: %v\", err)\n\n\tswitch err.(type) {\n\tcase noaerrors.RetryError:\n\t\tswitch noaRetryError := err.(noaerrors.RetryError).Err.(type) {\n\t\tcase *websocket.CloseError:\n\t\t\tswitch noaRetryError.Code {\n\t\t\tcase websocket.ClosePolicyViolation:\n\t\t\t\tlog.Errorf(\"Nozzle couldn't keep up. Please try scaling up the Nozzle.\")\n\t\t\t\tn.metricsStore.AlertSlowConsumerError()\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\tlog.Info(\"Closing connection with Firehose...\")\n\tn.consumer.Close()\n\n\treturn false\n}\n\ntype DebugPrinter struct{}\n\nfunc (dp DebugPrinter) Print(title, dump string) {\n\tlog.Debugf(\"%s: %s\", title, dump)\n}\n<|endoftext|>"} {"text":"package file\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\n\/\/IsFileExists 目录或文件是否存在\nfunc IsFileExists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\n\/\/IsFile 判断是否为文件\nfunc IsFile(path string) (bool, error) {\n\tinfo, err := os.Stat(path)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn info.Mode().IsRegular(), nil\n}\n\n\/\/CopyFile 文件拷贝,优先使用hard link\nfunc CopyFile(src, dest string) (err error) {\n\tsrcInfo, err := os.Stat(src)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !srcInfo.Mode().IsRegular() {\n\t\treturn fmt.Errorf(\"CopyFile: non-regular source file %s (%q)\", srcInfo.Name(), srcInfo.Mode().String())\n\t}\n\tdestInfo, err := os.Stat(dest)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !(destInfo.Mode().IsRegular()) {\n\t\t\treturn fmt.Errorf(\"CopyFile: non-regular destination file %s (%q)\", destInfo.Name(), destInfo.Mode().String())\n\t\t}\n\t\tif os.SameFile(srcInfo, destInfo) {\n\t\t\treturn\n\t\t}\n\t}\n\tif err = os.Link(src, dest); err == nil {\n\t\treturn\n\t}\n\terr = CopyFileContents(src, dest)\n\treturn\n}\n\n\/\/CopyFileContents 文件内容拷贝\nfunc CopyFileContents(src, dest string) (err error) {\n\tin, err := os.Open(src)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer in.Close()\n\tout, err := os.Create(dest)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tcerr := out.Close()\n\t\tif err == nil {\n\t\t\terr = cerr\n\t\t}\n\t}()\n\tif _, err = io.Copy(out, in); err != nil {\n\t\treturn\n\t}\n\terr = out.Sync()\n\treturn\n}\n更新部分方法命名package file\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\n\/\/IsExists 目录或文件是否存在\nfunc IsExists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\n\/\/IsFile 判断是否为文件\nfunc IsFile(path string) (bool, error) {\n\tinfo, err := os.Stat(path)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn info.Mode().IsRegular(), nil\n}\n\n\/\/Copy 文件拷贝,优先使用hard link\nfunc Copy(src, dest string) (err error) {\n\tsrcInfo, err := os.Stat(src)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !srcInfo.Mode().IsRegular() {\n\t\treturn fmt.Errorf(\"CopyFile: non-regular source file %s (%q)\", srcInfo.Name(), srcInfo.Mode().String())\n\t}\n\tdestInfo, err := os.Stat(dest)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !(destInfo.Mode().IsRegular()) {\n\t\t\treturn fmt.Errorf(\"CopyFile: non-regular destination file %s (%q)\", destInfo.Name(), destInfo.Mode().String())\n\t\t}\n\t\tif os.SameFile(srcInfo, destInfo) {\n\t\t\treturn\n\t\t}\n\t}\n\tif err = os.Link(src, dest); err == nil {\n\t\treturn\n\t}\n\terr = CopyContents(src, dest)\n\treturn\n}\n\n\/\/CopyFileContents 文件内容拷贝\nfunc CopyContents(src, dest string) (err error) {\n\tin, err := os.Open(src)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer in.Close()\n\tout, err := os.Create(dest)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tcerr := out.Close()\n\t\tif err == nil {\n\t\t\terr = cerr\n\t\t}\n\t}()\n\tif _, err = io.Copy(out, in); err != nil {\n\t\treturn\n\t}\n\terr = out.Sync()\n\treturn\n}\n<|endoftext|>"} {"text":"package file\n\nimport \"strings\"\nimport \"regexp\"\n\ntype Line []rune\n\nfunc (line Line) Dup() Line {\n\tstrLine := string(line)\n\treturn Line(strLine)\n}\n\nfunc (line Line) ToString() string {\n\treturn string(line)\n}\n\nfunc (line Line) CommonStart(other Line) Line {\n\tfor k, r := range line {\n\t\tif k >= len(other) || other[k] != r {\n\t\t\treturn line[:k].Dup()\n\t\t}\n\t}\n\treturn line.Dup()\n}\n\nfunc (line Line) Search(term string, start, end int) (int, int) {\n\tif len(line) == 0 {\n\t\treturn -1, -1\n\t}\n\tif end < 0 || end >= len(line) {\n\t\tend = len(line) + end\n\t\tif end < 0 || end < start {\n\t\t\treturn -1, -1\n\t\t}\n\t}\n\tn := len(term)\n\tvar startCol, endCol int\n\ttarget := string(line[start : end+1])\n\tif term[0:1] == \"\/\" && term[n-1:n] == \"\/\" {\n\t\tre, err := regexp.Compile(term[1 : n-1])\n\t\tif err != nil {\n\t\t\treturn -1, -1\n\t\t}\n\t\tcols := re.FindStringIndex(target)\n\t\tif cols == nil {\n\t\t\treturn -1, -1\n\t\t}\n\t\tstartCol = cols[0]\n\t\tendCol = cols[1]\n\t} else {\n\t\tstrLine := strings.ToLower(target)\n\t\tterm = strings.ToLower(term)\n\t\tstartCol = strings.Index(strLine, term)\n\t\tendCol = startCol + len(term)\n\t}\n\tif startCol < 0 {\n\t\treturn startCol, endCol\n\t} else {\n\t\treturn startCol + start, endCol + start\n\t}\n}\n\nfunc (line Line) tabs2spaces() Line {\n\tstrLine := string(line)\n\tstrLine = strings.Replace(strLine, \"\\t\", \" \", -1)\n\treturn Line(strLine)\n}\ncleanup file\/linepackage file\n\nimport \"strings\"\nimport \"regexp\"\n\ntype Line []rune\n\n\/\/ Dup returns a new Line with the same content.\nfunc (line Line) Dup() Line {\n\tstrLine := string(line)\n\treturn Line(strLine)\n}\n\n\/\/ ToString converts a Line into a string.\nfunc (line Line) ToString() string {\n\treturn string(line)\n}\n\n\/\/ CommonStart returns the sub-line that is common between\n\/\/ two lines.\nfunc (line Line) CommonStart(other Line) Line {\n\tfor k, r := range line {\n\t\tif k >= len(other) || other[k] != r {\n\t\t\treturn line[:k].Dup()\n\t\t}\n\t}\n\treturn line.Dup()\n}\n\n\/\/ Search returns the start\/end positions for a search term.\n\/\/ A -1 indicates no match.\nfunc (line Line) Search(term string, start, end int) (int, int) {\n\n\t\/\/ if line is empty, there is no match.\n\tif len(line) == 0 {\n\t\treturn -1, -1\n\t}\n\n\t\/\/ Negative end indicates \"from end of line\".\n\tif end < 0 || end >= len(line) {\n\t\tend = len(line) + end\n\t\tif end < 0 || end < start {\n\t\t\treturn -1, -1\n\t\t}\n\t}\n\n\treturn line.search(term, start, end)\n\n}\n\nfunc isRegex(term string) bool {\n\tn := len(term)\n\treturn (term[0:1] == \"\/\" && term[n-1:n] == \"\/\")\n}\n\nfunc (line Line) search(term string, start, end int) (int, int) {\n\n\tn := len(term)\n\tvar startCol, endCol int\n\ttarget := string(line[start : end+1])\n\n\tif isRegex(term) {\n\t\tre, err := regexp.Compile(term[1 : n-1])\n\t\tif err != nil {\n\t\t\treturn -1, -1\n\t\t}\n\t\tcols := re.FindStringIndex(target)\n\t\tif cols == nil {\n\t\t\treturn -1, -1\n\t\t}\n\t\tstartCol = cols[0]\n\t\tendCol = cols[1]\n\t} else {\n\t\tstrLine := strings.ToLower(target)\n\t\tterm = strings.ToLower(term)\n\t\tstartCol = strings.Index(strLine, term)\n\t\tendCol = startCol + len(term)\n\t}\n\tif startCol < 0 {\n\t\treturn startCol, endCol\n\t} else {\n\t\treturn startCol + start, endCol + start\n\t}\n}\n\nfunc (line Line) tabs2spaces() Line {\n\tstrLine := string(line)\n\tstrLine = strings.Replace(strLine, \"\\t\", \" \", -1)\n\treturn Line(strLine)\n}\n<|endoftext|>"} {"text":"package anidb\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Kovensky\/go-anidb\/misc\"\n\t\"github.com\/Kovensky\/go-anidb\/udp\"\n\t\"github.com\/Kovensky\/go-fscache\"\n\t\"image\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar _ cacheable = &File{}\n\nfunc (f *File) setCachedTS(ts time.Time) {\n\tf.Cached = ts\n}\n\nfunc (f *File) IsStale() bool {\n\tif f == nil {\n\t\treturn true\n\t}\n\tif f.Incomplete {\n\t\treturn time.Now().Sub(f.Cached) > FileIncompleteCacheDuration\n\t}\n\treturn time.Now().Sub(f.Cached) > FileCacheDuration\n}\n\nfunc cacheFile(f *File) {\n\tCacheSet(f.FID, \"fid\", \"by-ed2k\", f.Ed2kHash, f.Filesize)\n\tCacheSet(f, \"fid\", f.FID)\n}\n\ntype FID int\n\nfunc (fid FID) File() *File {\n\tvar f File\n\tif CacheGet(&f, \"fid\", fid) == nil {\n\t\treturn &f\n\t}\n\treturn nil\n}\n\n\/\/ Prefetches the Anime, Episode and Group that this\n\/\/ file is linked to using the given AniDB instance.\n\/\/\n\/\/ Returns a channel where this file will be sent to\n\/\/ when the prefetching is done; if the file is nil,\n\/\/ the channel will return nil.\nfunc (f *File) Prefetch(adb *AniDB) <-chan *File {\n\tch := make(chan *File, 1)\n\tgo func() {\n\t\tif f != nil {\n\t\t\ta := adb.AnimeByID(f.AID)\n\t\t\tg := adb.GroupByID(f.GID)\n\t\t\t<-a\n\t\t\t<-g\n\t\t\tch <- f\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}\n\n\/\/ Retrieves a File by its FID. Uses the UDP API.\nfunc (adb *AniDB) FileByID(fid FID) <-chan *File {\n\tkey := []fscache.CacheKey{\"fid\", fid}\n\n\tch := make(chan *File, 1)\n\n\tif fid < 1 {\n\t\tch <- nil\n\t\tclose(ch)\n\t\treturn ch\n\t}\n\n\tic := make(chan notification, 1)\n\tgo func() { ch <- (<-ic).(*File); close(ch) }()\n\tif intentMap.Intent(ic, key...) {\n\t\treturn ch\n\t}\n\n\tif !Cache.IsValid(InvalidKeyCacheDuration, key...) {\n\t\tintentMap.NotifyClose((*File)(nil), key...)\n\t\treturn ch\n\t}\n\n\tf := fid.File()\n\tif !f.IsStale() {\n\t\tintentMap.NotifyClose(f, key...)\n\t\treturn ch\n\t}\n\n\tgo func() {\n\t\treply := <-adb.udp.SendRecv(\"FILE\",\n\t\t\tparamMap{\n\t\t\t\t\"fid\": fid,\n\t\t\t\t\"fmask\": fileFmask,\n\t\t\t\t\"amask\": fileAmask,\n\t\t\t})\n\n\t\tif reply.Error() == nil {\n\t\t\tadb.parseFileResponse(&f, reply, false)\n\n\t\t\tcacheFile(f)\n\t\t} else if reply.Code() == 320 {\n\t\t\tCache.SetInvalid(key...)\n\t\t}\n\n\t\tintentMap.NotifyClose(f, key...)\n\t}()\n\treturn ch\n}\n\nvar validEd2kHash = regexp.MustCompile(`\\A[[:xdigit:]]{32}\\z`)\n\n\/\/ Retrieves a File by its Ed2kHash + Filesize combination. Uses the UDP API.\nfunc (adb *AniDB) FileByEd2kSize(ed2k string, size int64) <-chan *File {\n\tkey := []fscache.CacheKey{\"fid\", \"by-ed2k\", ed2k, size}\n\n\tch := make(chan *File, 1)\n\n\tif size < 1 || !validEd2kHash.MatchString(ed2k) {\n\t\tch <- nil\n\t\tclose(ch)\n\t\treturn ch\n\t}\n\t\/\/ AniDB always uses lower case hashes\n\ted2k = strings.ToLower(ed2k)\n\n\tic := make(chan notification, 1)\n\tgo func() {\n\t\tfid := (<-ic).(FID)\n\t\tif fid > 0 {\n\t\t\tch <- <-adb.FileByID(fid)\n\t\t}\n\t\tclose(ch)\n\t}()\n\tif intentMap.Intent(ic, key...) {\n\t\treturn ch\n\t}\n\n\tif !Cache.IsValid(InvalidKeyCacheDuration, key...) {\n\t\tintentMap.NotifyClose(FID(0), key...)\n\t\treturn ch\n\t}\n\n\tfid := FID(0)\n\n\tswitch ts, err := Cache.Get(&fid, key...); {\n\tcase err == nil && time.Now().Sub(ts) < FileCacheDuration:\n\t\tintentMap.NotifyClose(fid, key...)\n\t\treturn ch\n\t}\n\n\tgo func() {\n\t\treply := <-adb.udp.SendRecv(\"FILE\",\n\t\t\tparamMap{\n\t\t\t\t\"ed2k\": ed2k,\n\t\t\t\t\"size\": size,\n\t\t\t\t\"fmask\": fileFmask,\n\t\t\t\t\"amask\": fileAmask,\n\t\t\t})\n\n\t\tvar f *File\n\t\tif reply.Error() == nil {\n\t\t\tadb.parseFileResponse(&f, reply, false)\n\n\t\t\tfid = f.FID\n\n\t\t\tcacheFile(f)\n\t\t} else if reply.Code() == 320 { \/\/ file not found\n\t\t\tCache.SetInvalid(key...)\n\t\t} else if reply.Code() == 322 { \/\/ multiple files found\n\t\t\tpanic(\"Don't know what to do with \" + strings.Join(reply.Lines(), \"\\n\"))\n\t\t}\n\n\t\tintentMap.NotifyClose(fid, key...)\n\t}()\n\treturn ch\n}\n\nvar fileFmask = \"7fda7fe8\"\nvar fileAmask = \"00008000\"\n\nconst (\n\tstateCRCOK = 1 << iota\n\tstateCRCERR\n\tstateV2\n\tstateV3\n\tstateV4\n\tstateV5\n\tstateUncensored\n\tstateCensored\n)\n\nfunc sanitizeCodec(codec string) string {\n\tswitch codec {\n\tcase \"MP3 CBR\":\n\t\treturn \"MP3\"\n\tcase \"WMV9 (also WMV3)\":\n\t\treturn \"WMV9\"\n\tcase \"Ogg (Vorbis)\":\n\t\treturn \"Vorbis\"\n\tcase \"H264\/AVC\":\n\t\treturn \"H.264\"\n\t}\n\treturn codec\n}\n\nvar opedRE = regexp.MustCompile(`\\A(Opening|Ending)(?: (\\d+))?\\z`)\n\nfunc (adb *AniDB) parseFileResponse(f **File, reply udpapi.APIReply, calledFromFIDsByGID bool) bool {\n\tif reply.Error() != nil {\n\t\treturn false\n\t}\n\tif reply.Truncated() {\n\t\tpanic(\"Truncated\")\n\t}\n\n\tuidChan := make(chan UID, 1)\n\tif adb.udp.credentials != nil {\n\t\tgo func() { uidChan <- <-adb.GetUserUID(decrypt(adb.udp.credentials.username)) }()\n\t} else {\n\t\tuidChan <- 0\n\t\tclose(uidChan)\n\t}\n\n\tparts := strings.Split(reply.Lines()[1], \"|\")\n\tints := make([]int64, len(parts))\n\tfor i, p := range parts {\n\t\tints[i], _ = strconv.ParseInt(p, 10, 64)\n\t}\n\n\tpartial := false\n\n\trels := strings.Split(parts[5], \"'\")\n\trelList := make([]EID, 0, len(parts[5]))\n\trelated := make(RelatedEpisodes, len(parts[5]))\n\tfor _, rel := range rels {\n\t\tr := strings.Split(rel, \",\")\n\t\tif len(r) < 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\teid, _ := strconv.ParseInt(r[0], 10, 32)\n\t\tpct, _ := strconv.ParseInt(r[1], 10, 32)\n\t\trelList = append(relList, EID(eid))\n\t\trelated[EID(eid)] = float32(pct) \/ 100\n\n\t\tif pct != 100 {\n\t\t\tpartial = true\n\t\t}\n\t}\n\n\tepno := misc.ParseEpisodeList(parts[24])\n\tfid := FID(ints[0])\n\taid := AID(ints[1])\n\teid := EID(ints[2])\n\tgid := GID(ints[3])\n\tlid := LID(ints[4])\n\n\tif !epno[0].Start.ContainsEpisodes(epno[0].End) || len(epno) > 1 || len(relList) > 0 {\n\t\t\/\/ epno is broken -- we need to sanitize it\n\t\tthisEp := <-adb.EpisodeByID(eid)\n\t\tbad := false\n\t\tif thisEp != nil {\n\t\t\tparts := make([]string, 1, len(relList)+1)\n\t\t\tparts[0] = thisEp.Episode.String()\n\n\t\t\t\/\/ everything after this SHOULD be cache hits now, unless this is somehow\n\t\t\t\/\/ linked with an EID from a different anime (*stares at Haruhi*).\n\t\t\t\/\/ We don't want to use eps from different AIDs anyway, so that makes\n\t\t\t\/\/ the job easier.\n\n\t\t\t\/\/ We check if the related episodes are all in sequence from this one.\n\t\t\t\/\/ If they are, we build a new epno with the sequence. Otherwise,\n\t\t\t\/\/ our epno will only have the primary episode.\n\n\t\t\t\/\/ gather the episode numbers\n\t\t\tfor _, eid := range relList {\n\t\t\t\tif ep := eid.Episode(); ep != nil && ep.AID == thisEp.AID {\n\t\t\t\t\tparts = append(parts, ep.Episode.String())\n\t\t\t\t} else {\n\t\t\t\t\tbad = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttest := misc.EpisodeList{}\n\t\t\t\/\/ only if we didn't break the loop\n\t\t\tif !bad {\n\t\t\t\ttest = misc.ParseEpisodeList(strings.Join(parts, \",\"))\n\t\t\t}\n\n\t\t\tif partial {\n\t\t\t\tif calledFromFIDsByGID {\n\t\t\t\t\tepno = test\n\t\t\t\t\tadb.Logger.Printf(\"UDP!!! FID %d is only part of episode %s with no complementary files\", fid, epno)\n\t\t\t\t} else if len(test) == 1 && test[0].Start.Number == test[0].End.Number {\n\t\t\t\t\tfids := []int{}\n\n\t\t\t\t\tfor fid := range adb.FIDsByGID(thisEp, gid) {\n\t\t\t\t\t\tfids = append(fids, int(fid))\n\t\t\t\t\t}\n\t\t\t\t\tif len(fids) >= 1 && fids[0] == 0 {\n\t\t\t\t\t\tfids = fids[1:]\n\t\t\t\t\t\t\/\/ Only entry was API error\n\t\t\t\t\t\tif len(fids) == 0 {\n\t\t\t\t\t\t\treturn false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tsort.Sort(sort.IntSlice(fids))\n\t\t\t\t\tidx := sort.SearchInts(fids, int(fid))\n\t\t\t\t\tif idx == len(fids) {\n\t\t\t\t\t\tpanic(fmt.Sprintf(\"FID %d couldn't locate itself\", fid))\n\t\t\t\t\t}\n\n\t\t\t\t\tepno = test\n\n\t\t\t\t\t\/\/ equate pointers\n\t\t\t\t\tepno[0].End = epno[0].Start\n\n\t\t\t\t\tepno[0].Start.Parts = len(fids)\n\t\t\t\t\tepno[0].Start.Part = idx\n\t\t\t\t} else {\n\t\t\t\t\tpanic(fmt.Sprintf(\"Don't know what to do with partial episode %s (EID %d)\", test, eid))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t\/\/ if they're all in sequence, then we'll only have a single range in the list\n\t\t\t\tif len(test) == 1 {\n\t\t\t\t\tepno = test\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ use only the primary epno then\n\t\t\t\t\tepno = misc.ParseEpisodeList(thisEp.Episode.String())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tepstr := epno.String()\n\tif len(epno) == 1 && epno[0].Type == misc.EpisodeTypeCredits && epno[0].Len() == 1 {\n\t\ttyp := \"\"\n\t\tn := 0\n\n\t\tif ep := <-adb.EpisodeByID(eid); ep == nil {\n\t\t} else if m := opedRE.FindStringSubmatch(ep.Titles[\"en\"]); len(m) > 2 {\n\t\t\tnum, err := strconv.ParseInt(m[2], 10, 32)\n\t\t\tif err == nil {\n\t\t\t\tn = int(num)\n\t\t\t}\n\n\t\t\ttyp = m[1]\n\t\t}\n\n\t\tgobi := fmt.Sprintf(\"%d\", n)\n\t\tif n == 0 {\n\t\t\tgobi = \"\"\n\t\t}\n\n\t\tswitch typ {\n\t\tcase \"Opening\":\n\t\t\tepstr = \"OP\" + gobi\n\t\tcase \"Ending\":\n\t\t\tepstr = \"ED\" + gobi\n\t\t}\n\t}\n\n\tversion := FileVersion(1)\n\tswitch i := ints[7]; {\n\tcase i&stateV5 != 0:\n\t\tversion = 5\n\tcase i&stateV4 != 0:\n\t\tversion = 4\n\tcase i&stateV3 != 0:\n\t\tversion = 3\n\tcase i&stateV2 != 0:\n\t\tversion = 2\n\t}\n\n\tcodecs := strings.Split(parts[14], \"'\")\n\tbitrates := strings.Split(parts[15], \"'\")\n\talangs := strings.Split(parts[20], \"'\")\n\tstreams := make([]AudioStream, len(codecs))\n\tfor i := range streams {\n\t\tbr, _ := strconv.ParseInt(bitrates[i], 10, 32)\n\t\tstreams[i] = AudioStream{\n\t\t\tBitrate: int(br),\n\t\t\tCodec: sanitizeCodec(codecs[i]),\n\t\t\tLanguage: Language(alangs[i]),\n\t\t}\n\t}\n\n\tsl := strings.Split(parts[21], \"'\")\n\tslangs := make([]Language, len(sl))\n\tfor i := range sl {\n\t\tslangs[i] = Language(sl[i])\n\t}\n\n\tdepth := int(ints[12])\n\tif depth == 0 {\n\t\tdepth = 8\n\t}\n\tres := strings.Split(parts[18], \"x\")\n\twidth, _ := strconv.ParseInt(res[0], 10, 32)\n\theight, _ := strconv.ParseInt(res[1], 10, 32)\n\tvideo := VideoInfo{\n\t\tBitrate: int(ints[17]),\n\t\tCodec: sanitizeCodec(parts[16]),\n\t\tColorDepth: depth,\n\t\tResolution: image.Rect(0, 0, int(width), int(height)),\n\t}\n\n\tlidMap := LIDMap{}\n\tif *f != nil {\n\t\tlidMap = (*f).LID\n\t}\n\n\tuid := <-uidChan\n\tif uid != 0 {\n\t\tlidMap[uid] = lid\n\t}\n\n\t*f = &File{\n\t\tFID: fid,\n\n\t\tAID: aid,\n\t\tEID: eid,\n\t\tGID: gid,\n\t\tLID: lidMap,\n\n\t\tEpisodeString: epstr,\n\t\tEpisodeNumber: epno,\n\n\t\tRelatedEpisodes: related,\n\t\tDeprecated: ints[6] != 0,\n\n\t\tCRCMatch: ints[7]&stateCRCOK != 0,\n\t\tBadCRC: ints[7]&stateCRCERR != 0,\n\t\tVersion: version,\n\t\tUncensored: ints[7]&stateUncensored != 0,\n\t\tCensored: ints[7]&stateCensored != 0,\n\n\t\tIncomplete: video.Resolution.Empty(),\n\n\t\tFilesize: ints[8],\n\t\tEd2kHash: parts[9],\n\t\tSHA1Hash: parts[10],\n\t\tCRC32: parts[11],\n\n\t\tSource: FileSource(parts[13]),\n\n\t\tAudioStreams: streams,\n\t\tSubtitleLanguages: slangs,\n\t\tVideoInfo: video,\n\t\tFileExtension: parts[19],\n\n\t\tLength: time.Duration(ints[22]) * time.Second,\n\t\tAirDate: time.Unix(ints[23], 0),\n\t}\n\treturn true\n}\nanidb: Also cache the EID-AID relationship when caching FILE resultspackage anidb\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Kovensky\/go-anidb\/misc\"\n\t\"github.com\/Kovensky\/go-anidb\/udp\"\n\t\"github.com\/Kovensky\/go-fscache\"\n\t\"image\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar _ cacheable = &File{}\n\nfunc (f *File) setCachedTS(ts time.Time) {\n\tf.Cached = ts\n}\n\nfunc (f *File) IsStale() bool {\n\tif f == nil {\n\t\treturn true\n\t}\n\tif f.Incomplete {\n\t\treturn time.Now().Sub(f.Cached) > FileIncompleteCacheDuration\n\t}\n\treturn time.Now().Sub(f.Cached) > FileCacheDuration\n}\n\nfunc cacheFile(f *File) {\n\tCacheSet(f.AID, \"aid\", \"by-eid\", f.EID)\n\tCacheSet(f.FID, \"fid\", \"by-ed2k\", f.Ed2kHash, f.Filesize)\n\tCacheSet(f, \"fid\", f.FID)\n}\n\ntype FID int\n\nfunc (fid FID) File() *File {\n\tvar f File\n\tif CacheGet(&f, \"fid\", fid) == nil {\n\t\treturn &f\n\t}\n\treturn nil\n}\n\n\/\/ Prefetches the Anime, Episode and Group that this\n\/\/ file is linked to using the given AniDB instance.\n\/\/\n\/\/ Returns a channel where this file will be sent to\n\/\/ when the prefetching is done; if the file is nil,\n\/\/ the channel will return nil.\nfunc (f *File) Prefetch(adb *AniDB) <-chan *File {\n\tch := make(chan *File, 1)\n\tgo func() {\n\t\tif f != nil {\n\t\t\ta := adb.AnimeByID(f.AID)\n\t\t\tg := adb.GroupByID(f.GID)\n\t\t\t<-a\n\t\t\t<-g\n\t\t\tch <- f\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}\n\n\/\/ Retrieves a File by its FID. Uses the UDP API.\nfunc (adb *AniDB) FileByID(fid FID) <-chan *File {\n\tkey := []fscache.CacheKey{\"fid\", fid}\n\n\tch := make(chan *File, 1)\n\n\tif fid < 1 {\n\t\tch <- nil\n\t\tclose(ch)\n\t\treturn ch\n\t}\n\n\tic := make(chan notification, 1)\n\tgo func() { ch <- (<-ic).(*File); close(ch) }()\n\tif intentMap.Intent(ic, key...) {\n\t\treturn ch\n\t}\n\n\tif !Cache.IsValid(InvalidKeyCacheDuration, key...) {\n\t\tintentMap.NotifyClose((*File)(nil), key...)\n\t\treturn ch\n\t}\n\n\tf := fid.File()\n\tif !f.IsStale() {\n\t\tintentMap.NotifyClose(f, key...)\n\t\treturn ch\n\t}\n\n\tgo func() {\n\t\treply := <-adb.udp.SendRecv(\"FILE\",\n\t\t\tparamMap{\n\t\t\t\t\"fid\": fid,\n\t\t\t\t\"fmask\": fileFmask,\n\t\t\t\t\"amask\": fileAmask,\n\t\t\t})\n\n\t\tif reply.Error() == nil {\n\t\t\tadb.parseFileResponse(&f, reply, false)\n\n\t\t\tcacheFile(f)\n\t\t} else if reply.Code() == 320 {\n\t\t\tCache.SetInvalid(key...)\n\t\t}\n\n\t\tintentMap.NotifyClose(f, key...)\n\t}()\n\treturn ch\n}\n\nvar validEd2kHash = regexp.MustCompile(`\\A[[:xdigit:]]{32}\\z`)\n\n\/\/ Retrieves a File by its Ed2kHash + Filesize combination. Uses the UDP API.\nfunc (adb *AniDB) FileByEd2kSize(ed2k string, size int64) <-chan *File {\n\tkey := []fscache.CacheKey{\"fid\", \"by-ed2k\", ed2k, size}\n\n\tch := make(chan *File, 1)\n\n\tif size < 1 || !validEd2kHash.MatchString(ed2k) {\n\t\tch <- nil\n\t\tclose(ch)\n\t\treturn ch\n\t}\n\t\/\/ AniDB always uses lower case hashes\n\ted2k = strings.ToLower(ed2k)\n\n\tic := make(chan notification, 1)\n\tgo func() {\n\t\tfid := (<-ic).(FID)\n\t\tif fid > 0 {\n\t\t\tch <- <-adb.FileByID(fid)\n\t\t}\n\t\tclose(ch)\n\t}()\n\tif intentMap.Intent(ic, key...) {\n\t\treturn ch\n\t}\n\n\tif !Cache.IsValid(InvalidKeyCacheDuration, key...) {\n\t\tintentMap.NotifyClose(FID(0), key...)\n\t\treturn ch\n\t}\n\n\tfid := FID(0)\n\n\tswitch ts, err := Cache.Get(&fid, key...); {\n\tcase err == nil && time.Now().Sub(ts) < FileCacheDuration:\n\t\tintentMap.NotifyClose(fid, key...)\n\t\treturn ch\n\t}\n\n\tgo func() {\n\t\treply := <-adb.udp.SendRecv(\"FILE\",\n\t\t\tparamMap{\n\t\t\t\t\"ed2k\": ed2k,\n\t\t\t\t\"size\": size,\n\t\t\t\t\"fmask\": fileFmask,\n\t\t\t\t\"amask\": fileAmask,\n\t\t\t})\n\n\t\tvar f *File\n\t\tif reply.Error() == nil {\n\t\t\tadb.parseFileResponse(&f, reply, false)\n\n\t\t\tfid = f.FID\n\n\t\t\tcacheFile(f)\n\t\t} else if reply.Code() == 320 { \/\/ file not found\n\t\t\tCache.SetInvalid(key...)\n\t\t} else if reply.Code() == 322 { \/\/ multiple files found\n\t\t\tpanic(\"Don't know what to do with \" + strings.Join(reply.Lines(), \"\\n\"))\n\t\t}\n\n\t\tintentMap.NotifyClose(fid, key...)\n\t}()\n\treturn ch\n}\n\nvar fileFmask = \"7fda7fe8\"\nvar fileAmask = \"00008000\"\n\nconst (\n\tstateCRCOK = 1 << iota\n\tstateCRCERR\n\tstateV2\n\tstateV3\n\tstateV4\n\tstateV5\n\tstateUncensored\n\tstateCensored\n)\n\nfunc sanitizeCodec(codec string) string {\n\tswitch codec {\n\tcase \"MP3 CBR\":\n\t\treturn \"MP3\"\n\tcase \"WMV9 (also WMV3)\":\n\t\treturn \"WMV9\"\n\tcase \"Ogg (Vorbis)\":\n\t\treturn \"Vorbis\"\n\tcase \"H264\/AVC\":\n\t\treturn \"H.264\"\n\t}\n\treturn codec\n}\n\nvar opedRE = regexp.MustCompile(`\\A(Opening|Ending)(?: (\\d+))?\\z`)\n\nfunc (adb *AniDB) parseFileResponse(f **File, reply udpapi.APIReply, calledFromFIDsByGID bool) bool {\n\tif reply.Error() != nil {\n\t\treturn false\n\t}\n\tif reply.Truncated() {\n\t\tpanic(\"Truncated\")\n\t}\n\n\tuidChan := make(chan UID, 1)\n\tif adb.udp.credentials != nil {\n\t\tgo func() { uidChan <- <-adb.GetUserUID(decrypt(adb.udp.credentials.username)) }()\n\t} else {\n\t\tuidChan <- 0\n\t\tclose(uidChan)\n\t}\n\n\tparts := strings.Split(reply.Lines()[1], \"|\")\n\tints := make([]int64, len(parts))\n\tfor i, p := range parts {\n\t\tints[i], _ = strconv.ParseInt(p, 10, 64)\n\t}\n\n\tpartial := false\n\n\trels := strings.Split(parts[5], \"'\")\n\trelList := make([]EID, 0, len(parts[5]))\n\trelated := make(RelatedEpisodes, len(parts[5]))\n\tfor _, rel := range rels {\n\t\tr := strings.Split(rel, \",\")\n\t\tif len(r) < 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\teid, _ := strconv.ParseInt(r[0], 10, 32)\n\t\tpct, _ := strconv.ParseInt(r[1], 10, 32)\n\t\trelList = append(relList, EID(eid))\n\t\trelated[EID(eid)] = float32(pct) \/ 100\n\n\t\tif pct != 100 {\n\t\t\tpartial = true\n\t\t}\n\t}\n\n\tepno := misc.ParseEpisodeList(parts[24])\n\tfid := FID(ints[0])\n\taid := AID(ints[1])\n\teid := EID(ints[2])\n\tgid := GID(ints[3])\n\tlid := LID(ints[4])\n\n\tif !epno[0].Start.ContainsEpisodes(epno[0].End) || len(epno) > 1 || len(relList) > 0 {\n\t\t\/\/ epno is broken -- we need to sanitize it\n\t\tthisEp := <-adb.EpisodeByID(eid)\n\t\tbad := false\n\t\tif thisEp != nil {\n\t\t\tparts := make([]string, 1, len(relList)+1)\n\t\t\tparts[0] = thisEp.Episode.String()\n\n\t\t\t\/\/ everything after this SHOULD be cache hits now, unless this is somehow\n\t\t\t\/\/ linked with an EID from a different anime (*stares at Haruhi*).\n\t\t\t\/\/ We don't want to use eps from different AIDs anyway, so that makes\n\t\t\t\/\/ the job easier.\n\n\t\t\t\/\/ We check if the related episodes are all in sequence from this one.\n\t\t\t\/\/ If they are, we build a new epno with the sequence. Otherwise,\n\t\t\t\/\/ our epno will only have the primary episode.\n\n\t\t\t\/\/ gather the episode numbers\n\t\t\tfor _, eid := range relList {\n\t\t\t\tif ep := eid.Episode(); ep != nil && ep.AID == thisEp.AID {\n\t\t\t\t\tparts = append(parts, ep.Episode.String())\n\t\t\t\t} else {\n\t\t\t\t\tbad = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttest := misc.EpisodeList{}\n\t\t\t\/\/ only if we didn't break the loop\n\t\t\tif !bad {\n\t\t\t\ttest = misc.ParseEpisodeList(strings.Join(parts, \",\"))\n\t\t\t}\n\n\t\t\tif partial {\n\t\t\t\tif calledFromFIDsByGID {\n\t\t\t\t\tepno = test\n\t\t\t\t\tadb.Logger.Printf(\"UDP!!! FID %d is only part of episode %s with no complementary files\", fid, epno)\n\t\t\t\t} else if len(test) == 1 && test[0].Start.Number == test[0].End.Number {\n\t\t\t\t\tfids := []int{}\n\n\t\t\t\t\tfor fid := range adb.FIDsByGID(thisEp, gid) {\n\t\t\t\t\t\tfids = append(fids, int(fid))\n\t\t\t\t\t}\n\t\t\t\t\tif len(fids) >= 1 && fids[0] == 0 {\n\t\t\t\t\t\tfids = fids[1:]\n\t\t\t\t\t\t\/\/ Only entry was API error\n\t\t\t\t\t\tif len(fids) == 0 {\n\t\t\t\t\t\t\treturn false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tsort.Sort(sort.IntSlice(fids))\n\t\t\t\t\tidx := sort.SearchInts(fids, int(fid))\n\t\t\t\t\tif idx == len(fids) {\n\t\t\t\t\t\tpanic(fmt.Sprintf(\"FID %d couldn't locate itself\", fid))\n\t\t\t\t\t}\n\n\t\t\t\t\tepno = test\n\n\t\t\t\t\t\/\/ equate pointers\n\t\t\t\t\tepno[0].End = epno[0].Start\n\n\t\t\t\t\tepno[0].Start.Parts = len(fids)\n\t\t\t\t\tepno[0].Start.Part = idx\n\t\t\t\t} else {\n\t\t\t\t\tpanic(fmt.Sprintf(\"Don't know what to do with partial episode %s (EID %d)\", test, eid))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t\/\/ if they're all in sequence, then we'll only have a single range in the list\n\t\t\t\tif len(test) == 1 {\n\t\t\t\t\tepno = test\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ use only the primary epno then\n\t\t\t\t\tepno = misc.ParseEpisodeList(thisEp.Episode.String())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tepstr := epno.String()\n\tif len(epno) == 1 && epno[0].Type == misc.EpisodeTypeCredits && epno[0].Len() == 1 {\n\t\ttyp := \"\"\n\t\tn := 0\n\n\t\tif ep := <-adb.EpisodeByID(eid); ep == nil {\n\t\t} else if m := opedRE.FindStringSubmatch(ep.Titles[\"en\"]); len(m) > 2 {\n\t\t\tnum, err := strconv.ParseInt(m[2], 10, 32)\n\t\t\tif err == nil {\n\t\t\t\tn = int(num)\n\t\t\t}\n\n\t\t\ttyp = m[1]\n\t\t}\n\n\t\tgobi := fmt.Sprintf(\"%d\", n)\n\t\tif n == 0 {\n\t\t\tgobi = \"\"\n\t\t}\n\n\t\tswitch typ {\n\t\tcase \"Opening\":\n\t\t\tepstr = \"OP\" + gobi\n\t\tcase \"Ending\":\n\t\t\tepstr = \"ED\" + gobi\n\t\t}\n\t}\n\n\tversion := FileVersion(1)\n\tswitch i := ints[7]; {\n\tcase i&stateV5 != 0:\n\t\tversion = 5\n\tcase i&stateV4 != 0:\n\t\tversion = 4\n\tcase i&stateV3 != 0:\n\t\tversion = 3\n\tcase i&stateV2 != 0:\n\t\tversion = 2\n\t}\n\n\tcodecs := strings.Split(parts[14], \"'\")\n\tbitrates := strings.Split(parts[15], \"'\")\n\talangs := strings.Split(parts[20], \"'\")\n\tstreams := make([]AudioStream, len(codecs))\n\tfor i := range streams {\n\t\tbr, _ := strconv.ParseInt(bitrates[i], 10, 32)\n\t\tstreams[i] = AudioStream{\n\t\t\tBitrate: int(br),\n\t\t\tCodec: sanitizeCodec(codecs[i]),\n\t\t\tLanguage: Language(alangs[i]),\n\t\t}\n\t}\n\n\tsl := strings.Split(parts[21], \"'\")\n\tslangs := make([]Language, len(sl))\n\tfor i := range sl {\n\t\tslangs[i] = Language(sl[i])\n\t}\n\n\tdepth := int(ints[12])\n\tif depth == 0 {\n\t\tdepth = 8\n\t}\n\tres := strings.Split(parts[18], \"x\")\n\twidth, _ := strconv.ParseInt(res[0], 10, 32)\n\theight, _ := strconv.ParseInt(res[1], 10, 32)\n\tvideo := VideoInfo{\n\t\tBitrate: int(ints[17]),\n\t\tCodec: sanitizeCodec(parts[16]),\n\t\tColorDepth: depth,\n\t\tResolution: image.Rect(0, 0, int(width), int(height)),\n\t}\n\n\tlidMap := LIDMap{}\n\tif *f != nil {\n\t\tlidMap = (*f).LID\n\t}\n\n\tuid := <-uidChan\n\tif uid != 0 {\n\t\tlidMap[uid] = lid\n\t}\n\n\t*f = &File{\n\t\tFID: fid,\n\n\t\tAID: aid,\n\t\tEID: eid,\n\t\tGID: gid,\n\t\tLID: lidMap,\n\n\t\tEpisodeString: epstr,\n\t\tEpisodeNumber: epno,\n\n\t\tRelatedEpisodes: related,\n\t\tDeprecated: ints[6] != 0,\n\n\t\tCRCMatch: ints[7]&stateCRCOK != 0,\n\t\tBadCRC: ints[7]&stateCRCERR != 0,\n\t\tVersion: version,\n\t\tUncensored: ints[7]&stateUncensored != 0,\n\t\tCensored: ints[7]&stateCensored != 0,\n\n\t\tIncomplete: video.Resolution.Empty(),\n\n\t\tFilesize: ints[8],\n\t\tEd2kHash: parts[9],\n\t\tSHA1Hash: parts[10],\n\t\tCRC32: parts[11],\n\n\t\tSource: FileSource(parts[13]),\n\n\t\tAudioStreams: streams,\n\t\tSubtitleLanguages: slangs,\n\t\tVideoInfo: video,\n\t\tFileExtension: parts[19],\n\n\t\tLength: time.Duration(ints[22]) * time.Second,\n\t\tAirDate: time.Unix(ints[23], 0),\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"\/* Copyright 2014 Ooyala, Inc. All rights reserved.\n *\n * This file is licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file\n * except in compliance with the License. You may obtain a copy of the License at\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software distributed under the License is\n * distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and limitations under the License.\n *\/\n\npackage monitor\n\nimport (\n\t\"atlantis\/supervisor\/containers\/serialize\"\n\t\"atlantis\/supervisor\/rpc\/types\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/jigish\/go-flags\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tOK = iota\n\tWarning\n\tCritical\n\tUknown\n)\n\ntype Config struct {\n\tContainerFile string `toml:\"container_file\"`\n\tContainersDir string `toml:\"container_dir\"`\n\tInventoryDir string `toml:\"inventory_dir\"`\n\tSSHIdentity string `toml:\"ssh_identity\"`\n\tSSHUser string `toml:\"ssh_user\"`\n\tCheckName string `toml:\"check_name\"`\n\tCheckDir string `toml:\"check_dir\"`\n\tDefaultGroup string `toml:\"default_group\"`\n\tTimeoutDuration uint `toml:\"timeout_duration\"`\n\tVerbose bool `toml:\"verbose\"`\n}\n\ntype Opts struct {\n\tContainerFile string `short:\"f\" long:\"container-file\" description:\"file to get contianers information from\"`\n\tContainersDir string `short:\"s\" long:\"containers-dir\" description:\"directory where configs for each container live\"`\n\tSSHIdentity string `short:\"i\" long:\"ssh-identity\" description:\"file containing the SSH key for all containers\"`\n\tSSHUser string `short:\"u\" long:\"ssh-user\" description:\"user account to ssh into containers\"`\n\tCheckName string `short:\"n\" long:\"check-name\" description:\"service name that will appear in Nagios for the monitor\"`\n\tCheckDir string `short:\"d\" long:\"check-dir\" description:\"directory containing all the scripts for the monitoring checks\"`\n\tDefaultGroup string `short:\"g\" long:\"default-group\" description:\"default contact group to use if there is no valid group provided\"`\n\tConfig string `short:\"c\" long:\"config-file\" default:\"\/etc\/atlantis\/supervisor\/monitor.toml\" description:\"the config file to use\"`\n\tTimeoutDuration uint `short:\"t\" long:\"timeout-duration\" description:\"max number of seconds to wait for a monitoring check to finish\"`\n\tVerbose bool `short:\"v\" long:\"verbose\" default:false description:\"print verbose debug information\"`\n}\n\ntype ServiceCheck struct {\n\tService string\n\tUser string\n\tIdentity string\n\tHost string\n\tPort uint16\n\tScript string\n}\n\n\/\/TODO(mchandra):Need defaults defined by constants\nvar config = &Config{\n\tContainerFile: \"\/etc\/atlantis\/supervisor\/save\/containers\",\n\tContainersDir: \"\/etc\/atlantis\/containers\",\n\tInventoryDir: \"\/etc\/atlantis\/supervisor\/inventory\",\n\tSSHIdentity: \"\/opt\/atlantis\/supervisor\/master_id_rsa\",\n\tSSHUser: \"root\",\n\tCheckName: \"ContainerMonitor\",\n\tCheckDir: \"\/check_mk_checks\",\n\tDefaultGroup: \"atlantis_orphan_apps\",\n\tTimeoutDuration: 110,\n\tVerbose: false,\n}\n\nfunc (s *ServiceCheck) cmd() *exec.Cmd {\n\treturn silentSshCmd(s.User, s.Identity, s.Host, s.Script, s.Port)\n}\n\nfunc (s *ServiceCheck) timeOutMsg() string {\n\treturn fmt.Sprintf(\"%d %s - Timeout occured during check\\n\", Critical, s.Service)\n}\n\nfunc (s *ServiceCheck) errMsg(err error) string {\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"%d %s - %s\\n\", Critical, s.Service, err.Error())\n\t} else {\n\t\treturn fmt.Sprintf(\"%d %s - Error encountered while monitoring the service\\n\", Critical, s.Service)\n\t}\n}\n\nfunc (s *ServiceCheck) validate(msg string) string {\n\tm := strings.SplitN(msg, \" \", 4)\n\tif len(m) > 1 && m[1] == s.Service {\n\t\treturn msg\n\t}\n\treturn s.errMsg(nil)\n}\n\nfunc (s *ServiceCheck) runCheck(done chan bool) {\n\tout, err := s.cmd().Output()\n\tif err != nil {\n\t\tfmt.Print(s.errMsg(err))\n\t} else {\n\t\tfmt.Print(s.validate(string(out)))\n\t}\n\tdone <- true\n}\n\nfunc (s *ServiceCheck) checkWithTimeout(results chan bool, d time.Duration) {\n\tdone := make(chan bool, 1)\n\tgo s.runCheck(done)\n\tselect {\n\tcase <-done:\n\t\tresults <- true\n\tcase <-time.After(d):\n\t\tfmt.Print(s.timeOutMsg())\n\t\tresults <- true\n\t}\n}\n\ntype ContainerCheck struct {\n\tName string\n\tUser string\n\tIdentity string\n\tDirectory string\n\tInventory string\n\tContactGroup string\n\tcontainer *types.Container\n}\n\ntype ContainerConfig struct {\n\tDependencies map[string]interface{}\n}\n\nfunc (c *ContainerCheck) getContactGroup() {\n\tc.ContactGroup = config.DefaultGroup\n\tconfig_file := filepath.Join(config.ContainersDir, c.container.ID, \"config.json\")\n\tvar cont_config ContainerConfig\n\tif err := serialize.RetrieveObject(config_file, &cont_config); err != nil {\n\t\tfmt.Printf(\"%d %s - Could not retrieve container config %s: %s\\n\", Critical, config.CheckName, config_file, err)\n\t} else {\n\t\tdep, ok := cont_config.Dependencies[\"cmk\"]\n\t\tif !ok {\n\t\t\tfmt.Printf(\"%d %s - cmk dep not present, defaulting to %s contact group!\\n\", OK, config.CheckName, config.DefaultGroup)\n\t\t\treturn\n\t\t}\n\t\tcmk_dep, ok := dep.(map[string]interface{})\n\t\tif !ok {\n\t\t\tfmt.Printf(\"%d %s - cmk dep present, but value is not map[string]string!\\n\", Critical, config.CheckName)\n\t\t\treturn\n\t\t}\n\t\tval, ok := cmk_dep[\"contact_group\"]\n\t\tif !ok {\n\t\t\tfmt.Printf(\"%d %s - cmk dep present, but no contact_group key!\\n\", Critical, config.CheckName)\n\t\t\treturn\n\t\t}\n\t\tgroup, ok := val.(string)\n\t\tif ok {\n\t\t\tc.ContactGroup = group\n\t\t\tc.verifyContactGroup()\n\t\t} else {\n\t\t\tfmt.Printf(\"%d %s - Value for contact_group key of cmk dep is not a string!\\n\", Critical, config.CheckName)\n\t\t}\n\t}\n}\n\nfunc (c *ContainerCheck) setContactGroup(name string) {\n\tif len(c.ContactGroup) == 0 {\n\t\tc.getContactGroup()\n\t}\n\tinventoryPath := path.Join(c.Inventory, name)\n\tif _, err := os.Stat(inventoryPath); os.IsNotExist(err) {\n\t\toutput, err := exec.Command(\"\/usr\/bin\/cmk_admin\", \"-s\", name, \"-a\", c.ContactGroup).CombinedOutput()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"%d %s - Failure to update contact group for service %s. Error: %s\\n\", OK, config.CheckName, name, err.Error())\n\t\t} else {\n\t\t\tos.Create(inventoryPath)\n\t\t}\n\t\tif config.Verbose {\n\t\t\tfmt.Printf(\"\\n\/usr\/bin\/cmk_admin -s %s -a %s\\n%s\\n\\n\", name, c.ContactGroup, output)\n\t\t}\n\t}\n}\n\nfunc (c *ContainerCheck) verifyContactGroup() {\n\tvar output bytes.Buffer\n\tvar stderr bytes.Buffer\n\tcmk := exec.Command(\"\/usr\/bin\/cmk_admin\", \"-l\")\n\tegrep := exec.Command(\"egrep\", \"-x\", fmt.Sprintf(\"[*]?\\\\s+(%s)\", c.ContactGroup))\n\tvar err error\n\tif egrep.Stdin, err = cmk.StdoutPipe(); err != nil {\n\t\tfmt.Printf(\"%d %s - Error piping output while verifying %s. Error: %s\\n\", Critical, config.CheckName, c.ContactGroup, err.Error())\n\t\tc.ContactGroup = config.DefaultGroup\n\t\treturn\n\t}\n\tcmk.Stderr = &stderr\n\tegrep.Stdout, egrep.Stderr = &output, &stderr\n\tcmds := [2]*exec.Cmd{cmk, egrep}\n\tfor _, cmd := range cmds {\n\t\tif err := cmd.Start(); err != nil {\n\t\t\tfmt.Printf(\"%d %s - Error starting %s while verifying %s. Error: %s\\n\", Critical, config.CheckName, cmd.Path, c.ContactGroup, err.Error())\n\t\t\tc.ContactGroup = config.DefaultGroup\n\t\t\treturn\n\t\t}\n\t}\n\tfor _, cmd := range cmds {\n\t\tif err := cmd.Wait(); err != nil {\n\t\t\tfmt.Printf(\"%d %s - Error running %s while verfying %s. Error: %s\\n\", Critical, config.CheckName, cmd.Path, c.ContactGroup, err.Error())\n\t\t\tc.ContactGroup = config.DefaultGroup\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *ContainerCheck) Run(t time.Duration, done chan bool) {\n\tc.setContactGroup(c.Name)\n\tdefer func() { done <- true }()\n\to, err := silentSshCmd(c.User, c.Identity, c.container.Host, \"ls \"+c.Directory, c.container.SSHPort).Output()\n\tif err != nil {\n\t\tfmt.Printf(\"%d %s - Error getting checks for container: %s\\n\", Critical, c.Name, err.Error())\n\t\treturn\n\t}\n\tfmt.Printf(\"%d %s - Got checks for container\\n\", OK, c.Name)\n\tscripts := strings.Split(strings.TrimSpace(string(o)), \"\\n\")\n\tif len(scripts) == 0 || len(scripts[0]) == 0 {\n\t\t\/\/ nothing to check on this container, exit\n\t\treturn\n\t}\n\tc.checkAll(scripts, t)\n}\n\nfunc (c *ContainerCheck) checkAll(scripts []string, t time.Duration) {\n\tresults := make(chan bool, len(scripts))\n\tfor _, s := range scripts {\n\t\tserviceName := fmt.Sprintf(\"%s_%s\", strings.Split(s, \".\")[0], c.container.ID)\n\t\tc.setContactGroup(serviceName)\n\t\tgo c.serviceCheck(s).checkWithTimeout(results, t)\n\t}\n\tfor _ = range scripts {\n\t\t<-results\n\t}\n}\n\nfunc (c *ContainerCheck) serviceCheck(script string) *ServiceCheck {\n\t\/\/ The full path to the script is required\n\tcommand := fmt.Sprintf(\"%s\/%s %d %s\", c.Directory, script, c.container.PrimaryPort, c.container.ID)\n\t\/\/ The service name is obtained be removing the file extension from the script and appending the container\n\t\/\/ id\n\tserviceName := fmt.Sprintf(\"%s_%s\", strings.Split(script, \".\")[0], c.container.ID)\n\treturn &ServiceCheck{serviceName, c.User, c.Identity, c.container.Host, c.container.SSHPort, command}\n}\n\nfunc silentSshCmd(user, identity, host, cmd string, port uint16) *exec.Cmd {\n\targs := []string{\"-q\", user + \"@\" + host, \"-i\", identity, \"-p\", fmt.Sprintf(\"%d\", port), \"-o\", \"StrictHostKeyChecking=no\", cmd}\n\treturn exec.Command(\"ssh\", args...)\n}\n\nfunc overlayConfig() {\n\topts := &Opts{}\n\tflags.Parse(opts)\n\tif opts.Config != \"\" {\n\t\t_, err := toml.DecodeFile(opts.Config, config)\n\t\tif err != nil {\n\t\t\t\/\/ no need to panic here. we have reasonable defaults.\n\t\t}\n\t}\n\tif opts.ContainerFile != \"\" {\n\t\tconfig.ContainerFile = opts.ContainerFile\n\t}\n\tif opts.ContainersDir != \"\" {\n\t\tconfig.ContainersDir = opts.ContainersDir\n\t}\n\tif opts.SSHIdentity != \"\" {\n\t\tconfig.SSHIdentity = opts.SSHIdentity\n\t}\n\tif opts.SSHUser != \"\" {\n\t\tconfig.SSHUser = opts.SSHUser\n\t}\n\tif opts.CheckDir != \"\" {\n\t\tconfig.CheckDir = opts.CheckDir\n\t}\n\tif opts.CheckName != \"\" {\n\t\tconfig.CheckName = opts.CheckName\n\t}\n\tif opts.DefaultGroup != \"\" {\n\t\tconfig.DefaultGroup = opts.DefaultGroup\n\t}\n\tif opts.TimeoutDuration != 0 {\n\t\tconfig.TimeoutDuration = opts.TimeoutDuration\n\t}\n\tif opts.Verbose {\n\t\tconfig.Verbose = true\n\t}\n}\n\n\/\/file containing containers and service name to show in Nagios for the monitor itself\nfunc Run() {\n\toverlayConfig()\n\tvar contMap map[string]*types.Container\n\t\/\/Check if folder exists\n\t_, err := os.Stat(config.ContainerFile)\n\tif os.IsNotExist(err) {\n\t\tfmt.Printf(\"%d %s - Container file does not exists %s. Likely no live containers present.\\n\", OK, config.CheckName, config.ContainerFile)\n\t\treturn\n\t}\n\tif err := serialize.RetrieveObject(config.ContainerFile, &contMap); err != nil {\n\t\tfmt.Printf(\"%d %s - Error retrieving %s: %s\\n\", Critical, config.CheckName, config.ContainerFile, err)\n\t\treturn\n\t}\n\tdone := make(chan bool, len(contMap))\n\tconfig.SSHIdentity = strings.Replace(config.SSHIdentity, \"~\", os.Getenv(\"HOME\"), 1)\n\tfor _, c := range contMap {\n\t\tif c.Host == \"\" {\n\t\t\tc.Host = \"localhost\"\n\t\t}\n\t\tcheck := &ContainerCheck{config.CheckName + \"_\" + c.ID, config.SSHUser, config.SSHIdentity, config.CheckDir, config.InventoryDir, \"\", c}\n\t\tgo check.Run(time.Duration(config.TimeoutDuration)*time.Second, done)\n\t}\n\tfor _ = range contMap {\n\t\t<-done\n\t}\n\t\/\/ Clean up inventories from containers that no longer exist\n\terr = filepath.Walk(config.InventoryDir, func(path string, _ os.FileInfo, _ error) error {\n\t\tif path == config.InventoryDir {\n\t\t\treturn nil\n\t\t}\n\t\tvar err error\n\t\tsplit := strings.Split(path, \"_\")\n\t\tcont := split[len(split)-1]\n\t\tif _, ok := contMap[cont]; !ok {\n\t\t\terr = os.Remove(path)\n\t\t}\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tfmt.Printf(\"%d %s - Error iterating over inventory to delete obsolete markers. Error: %s\\n\", OK, config.CheckName, err.Error())\n\t}\n}\nsimplify verify as per CR\/* Copyright 2014 Ooyala, Inc. All rights reserved.\n *\n * This file is licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file\n * except in compliance with the License. You may obtain a copy of the License at\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software distributed under the License is\n * distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and limitations under the License.\n *\/\n\npackage monitor\n\nimport (\n\t\"atlantis\/supervisor\/containers\/serialize\"\n\t\"atlantis\/supervisor\/rpc\/types\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/jigish\/go-flags\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tOK = iota\n\tWarning\n\tCritical\n\tUknown\n)\n\ntype Config struct {\n\tContainerFile string `toml:\"container_file\"`\n\tContainersDir string `toml:\"container_dir\"`\n\tInventoryDir string `toml:\"inventory_dir\"`\n\tSSHIdentity string `toml:\"ssh_identity\"`\n\tSSHUser string `toml:\"ssh_user\"`\n\tCheckName string `toml:\"check_name\"`\n\tCheckDir string `toml:\"check_dir\"`\n\tDefaultGroup string `toml:\"default_group\"`\n\tTimeoutDuration uint `toml:\"timeout_duration\"`\n\tVerbose bool `toml:\"verbose\"`\n}\n\ntype Opts struct {\n\tContainerFile string `short:\"f\" long:\"container-file\" description:\"file to get container information\"`\n\tContainersDir string `short:\"s\" long:\"containers-dir\" description:\"directory containing configs for each container\"`\n\tSSHIdentity string `short:\"i\" long:\"ssh-identity\" description:\"file containing the SSH key for all containers\"`\n\tSSHUser string `short:\"u\" long:\"ssh-user\" description:\"user account to ssh into containers\"`\n\tCheckName string `short:\"n\" long:\"check-name\" description:\"service name that will appear in Nagios for the monitor\"`\n\tCheckDir string `short:\"d\" long:\"check-dir\" description:\"directory containing all the scripts for the monitoring checks\"`\n\tDefaultGroup string `short:\"g\" long:\"default-group\" description:\"default contact group to use if there is no valid group provided\"`\n\tConfig string `short:\"c\" long:\"config-file\" default:\"\/etc\/atlantis\/supervisor\/monitor.toml\" description:\"the config file to use\"`\n\tTimeoutDuration uint `short:\"t\" long:\"timeout-duration\" description:\"max number of seconds to wait for a monitoring check to finish\"`\n\tVerbose bool `short:\"v\" long:\"verbose\" default:false description:\"print verbose debug information\"`\n}\n\ntype ServiceCheck struct {\n\tService string\n\tUser string\n\tIdentity string\n\tHost string\n\tPort uint16\n\tScript string\n}\n\n\/\/TODO(mchandra):Need defaults defined by constants\nvar config = &Config{\n\tContainerFile: \"\/etc\/atlantis\/supervisor\/save\/containers\",\n\tContainersDir: \"\/etc\/atlantis\/containers\",\n\tInventoryDir: \"\/etc\/atlantis\/supervisor\/inventory\",\n\tSSHIdentity: \"\/opt\/atlantis\/supervisor\/master_id_rsa\",\n\tSSHUser: \"root\",\n\tCheckName: \"ContainerMonitor\",\n\tCheckDir: \"\/check_mk_checks\",\n\tDefaultGroup: \"atlantis_orphan_apps\",\n\tTimeoutDuration: 110,\n\tVerbose: false,\n}\n\nfunc (s *ServiceCheck) cmd() *exec.Cmd {\n\treturn silentSshCmd(s.User, s.Identity, s.Host, s.Script, s.Port)\n}\n\nfunc (s *ServiceCheck) timeOutMsg() string {\n\treturn fmt.Sprintf(\"%d %s - Timeout occured during check\\n\", Critical, s.Service)\n}\n\nfunc (s *ServiceCheck) errMsg(err error) string {\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"%d %s - %s\\n\", Critical, s.Service, err.Error())\n\t} else {\n\t\treturn fmt.Sprintf(\"%d %s - Error encountered while monitoring the service\\n\", Critical, s.Service)\n\t}\n}\n\nfunc (s *ServiceCheck) validate(msg string) string {\n\tm := strings.SplitN(msg, \" \", 4)\n\tif len(m) > 1 && m[1] == s.Service {\n\t\treturn msg\n\t}\n\treturn s.errMsg(nil)\n}\n\nfunc (s *ServiceCheck) runCheck(done chan bool) {\n\tout, err := s.cmd().Output()\n\tif err != nil {\n\t\tfmt.Print(s.errMsg(err))\n\t} else {\n\t\tfmt.Print(s.validate(string(out)))\n\t}\n\tdone <- true\n}\n\nfunc (s *ServiceCheck) checkWithTimeout(results chan bool, d time.Duration) {\n\tdone := make(chan bool, 1)\n\tgo s.runCheck(done)\n\tselect {\n\tcase <-done:\n\t\tresults <- true\n\tcase <-time.After(d):\n\t\tfmt.Print(s.timeOutMsg())\n\t\tresults <- true\n\t}\n}\n\ntype ContainerCheck struct {\n\tName string\n\tUser string\n\tIdentity string\n\tDirectory string\n\tInventory string\n\tContactGroup string\n\tcontainer *types.Container\n}\n\ntype ContainerConfig struct {\n\tDependencies map[string]interface{}\n}\n\nfunc (c *ContainerCheck) verifyContactGroup(group string) bool {\n\toutput, err := exec.Command(\"\/usr\/bin\/cmk_admin\", \"-l\").Output()\n\tfor _, l := range strings.Split(output, \"\\n\") {\n\t\tcg := strings.TrimSpace(strings.TrimPrefix(l, \"*\"))\n\t\tif cg == c.ContactGroup {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (c *ContainerCheck) getContactGroup() {\n\tc.ContactGroup = config.DefaultGroup\n\tconfig_file := filepath.Join(config.ContainersDir, c.container.ID, \"config.json\")\n\tvar cont_config ContainerConfig\n\tif err := serialize.RetrieveObject(config_file, &cont_config); err != nil {\n\t\tfmt.Printf(\"%d %s - Could not retrieve container config %s: %s\\n\", Critical, config.CheckName, config_file, err)\n\t} else {\n\t\tdep, ok := cont_config.Dependencies[\"cmk\"]\n\t\tif !ok {\n\t\t\tfmt.Printf(\"%d %s - cmk dep not present, defaulting to %s contact group!\\n\", OK, config.CheckName, config.DefaultGroup)\n\t\t\treturn\n\t\t}\n\t\tcmk_dep, ok := dep.(map[string]interface{})\n\t\tif !ok {\n\t\t\tfmt.Printf(\"%d %s - cmk dep present, but value is not map[string]string!\\n\", Critical, config.CheckName)\n\t\t\treturn\n\t\t}\n\t\tval, ok := cmk_dep[\"contact_group\"]\n\t\tif !ok {\n\t\t\tfmt.Printf(\"%d %s - cmk dep present, but no contact_group key!\\n\", Critical, config.CheckName)\n\t\t\treturn\n\t\t}\n\t\tgroup, ok := val.(string)\n\t\tif ok {\n\t\t\tgroup = strings.ToLower(group)\n\t\t\tif c.verifyContactGroup(group) {\n\t\t\t\tc.ContactGroup = group\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"%d %s - Specified contact_group does not exist in cmk! Falling back to default group %s.\\n\", Critical, config.CheckName, config.DefaultGroup)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Printf(\"%d %s - Value for contact_group key of cmk dep is not a string!\\n\", Critical, config.CheckName)\n\t\t}\n\t}\n}\n\nfunc (c *ContainerCheck) setContactGroup(name string) {\n\tif len(c.ContactGroup) == 0 {\n\t\tc.getContactGroup()\n\t}\n\tinventoryPath := path.Join(c.Inventory, name)\n\tif _, err := os.Stat(inventoryPath); os.IsNotExist(err) {\n\t\toutput, err := exec.Command(\"\/usr\/bin\/cmk_admin\", \"-s\", name, \"-a\", c.ContactGroup).CombinedOutput()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"%d %s - Failure to update contact group for service %s. Error: %s\\n\", OK, config.CheckName, name, err.Error())\n\t\t} else {\n\t\t\tos.Create(inventoryPath)\n\t\t}\n\t\tif config.Verbose {\n\t\t\tfmt.Printf(\"\\n\/usr\/bin\/cmk_admin -s %s -a %s\\n%s\\n\\n\", name, c.ContactGroup, output)\n\t\t}\n\t}\n}\n\nfunc (c *ContainerCheck) Run(t time.Duration, done chan bool) {\n\tc.setContactGroup(c.Name)\n\tdefer func() { done <- true }()\n\to, err := silentSshCmd(c.User, c.Identity, c.container.Host, \"ls \"+c.Directory, c.container.SSHPort).Output()\n\tif err != nil {\n\t\tfmt.Printf(\"%d %s - Error getting checks for container: %s\\n\", Critical, c.Name, err.Error())\n\t\treturn\n\t}\n\tfmt.Printf(\"%d %s - Got checks for container\\n\", OK, c.Name)\n\tscripts := strings.Split(strings.TrimSpace(string(o)), \"\\n\")\n\tif len(scripts) == 0 || len(scripts[0]) == 0 {\n\t\t\/\/ nothing to check on this container, exit\n\t\treturn\n\t}\n\tc.checkAll(scripts, t)\n}\n\nfunc (c *ContainerCheck) checkAll(scripts []string, t time.Duration) {\n\tresults := make(chan bool, len(scripts))\n\tfor _, s := range scripts {\n\t\tserviceName := fmt.Sprintf(\"%s_%s\", strings.Split(s, \".\")[0], c.container.ID)\n\t\tc.setContactGroup(serviceName)\n\t\tgo c.serviceCheck(s).checkWithTimeout(results, t)\n\t}\n\tfor _ = range scripts {\n\t\t<-results\n\t}\n}\n\nfunc (c *ContainerCheck) serviceCheck(script string) *ServiceCheck {\n\t\/\/ The full path to the script is required\n\tcommand := fmt.Sprintf(\"%s\/%s %d %s\", c.Directory, script, c.container.PrimaryPort, c.container.ID)\n\t\/\/ The service name is obtained be removing the file extension from the script and appending the container\n\t\/\/ id\n\tserviceName := fmt.Sprintf(\"%s_%s\", strings.Split(script, \".\")[0], c.container.ID)\n\treturn &ServiceCheck{serviceName, c.User, c.Identity, c.container.Host, c.container.SSHPort, command}\n}\n\nfunc silentSshCmd(user, identity, host, cmd string, port uint16) *exec.Cmd {\n\targs := []string{\"-q\", user + \"@\" + host, \"-i\", identity, \"-p\", fmt.Sprintf(\"%d\", port), \"-o\", \"StrictHostKeyChecking=no\", cmd}\n\treturn exec.Command(\"ssh\", args...)\n}\n\nfunc overlayConfig() {\n\topts := &Opts{}\n\tflags.Parse(opts)\n\tif opts.Config != \"\" {\n\t\t_, err := toml.DecodeFile(opts.Config, config)\n\t\tif err != nil {\n\t\t\t\/\/ no need to panic here. we have reasonable defaults.\n\t\t}\n\t}\n\tif opts.ContainerFile != \"\" {\n\t\tconfig.ContainerFile = opts.ContainerFile\n\t}\n\tif opts.ContainersDir != \"\" {\n\t\tconfig.ContainersDir = opts.ContainersDir\n\t}\n\tif opts.SSHIdentity != \"\" {\n\t\tconfig.SSHIdentity = opts.SSHIdentity\n\t}\n\tif opts.SSHUser != \"\" {\n\t\tconfig.SSHUser = opts.SSHUser\n\t}\n\tif opts.CheckDir != \"\" {\n\t\tconfig.CheckDir = opts.CheckDir\n\t}\n\tif opts.CheckName != \"\" {\n\t\tconfig.CheckName = opts.CheckName\n\t}\n\tif opts.DefaultGroup != \"\" {\n\t\tconfig.DefaultGroup = opts.DefaultGroup\n\t}\n\tif opts.TimeoutDuration != 0 {\n\t\tconfig.TimeoutDuration = opts.TimeoutDuration\n\t}\n\tif opts.Verbose {\n\t\tconfig.Verbose = true\n\t}\n}\n\n\/\/file containing containers and service name to show in Nagios for the monitor itself\nfunc Run() {\n\toverlayConfig()\n\tvar contMap map[string]*types.Container\n\t\/\/Check if folder exists\n\t_, err := os.Stat(config.ContainerFile)\n\tif os.IsNotExist(err) {\n\t\tfmt.Printf(\"%d %s - Container file does not exists %s. Likely no live containers present.\\n\", OK, config.CheckName, config.ContainerFile)\n\t\treturn\n\t}\n\tif err := serialize.RetrieveObject(config.ContainerFile, &contMap); err != nil {\n\t\tfmt.Printf(\"%d %s - Error retrieving %s: %s\\n\", Critical, config.CheckName, config.ContainerFile, err)\n\t\treturn\n\t}\n\tdone := make(chan bool, len(contMap))\n\tconfig.SSHIdentity = strings.Replace(config.SSHIdentity, \"~\", os.Getenv(\"HOME\"), 1)\n\tfor _, c := range contMap {\n\t\tif c.Host == \"\" {\n\t\t\tc.Host = \"localhost\"\n\t\t}\n\t\tcheck := &ContainerCheck{config.CheckName + \"_\" + c.ID, config.SSHUser, config.SSHIdentity, config.CheckDir, config.InventoryDir, \"\", c}\n\t\tgo check.Run(time.Duration(config.TimeoutDuration)*time.Second, done)\n\t}\n\tfor _ = range contMap {\n\t\t<-done\n\t}\n\t\/\/ Clean up inventories from containers that no longer exist\n\terr = filepath.Walk(config.InventoryDir, func(path string, _ os.FileInfo, _ error) error {\n\t\tif path == config.InventoryDir {\n\t\t\treturn nil\n\t\t}\n\t\tvar err error\n\t\tsplit := strings.Split(path, \"_\")\n\t\tcont := split[len(split)-1]\n\t\tif _, ok := contMap[cont]; !ok {\n\t\t\terr = os.Remove(path)\n\t\t}\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tfmt.Printf(\"%d %s - Error iterating over inventory to delete obsolete markers. Error: %s\\n\", OK, config.CheckName, err.Error())\n\t}\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\tfileTypeGallery = 0 \/\/only image extension\n\tfileTypeGlobal = 1 \/\/only image extension\n\tfileTypeVideo = 2 \/\/only video extension\n)\n\n\/\/serveFile serves image or video files\nfunc serveFile(w http.ResponseWriter, r *http.Request) {\n\n\tpath := filepath.Join(filepath.Dir(os.Args[0]), galleryPath, r.FormValue(\"gallery\"))\n\tfile := strings.Trim(r.FormValue(\"name\"), `.\/\\`)\n\tfiletype, _ := strconv.Atoi(r.FormValue(\"filetype\"))\n\tthumb, _ := strconv.ParseBool(r.FormValue(\"thumb\"))\n\n\tswitch filetype {\n\tcase fileTypeGlobal:\n\t\t\/\/do nothing\n\tcase fileTypeGallery:\n\t\tpath = filepath.Join(path, galleryImgPath)\n\tcase fileTypeVideo:\n\t\tpath = filepath.Join(path, galleryVidPath)\n\t}\n\tif thumb {\n\t\tpath = filepath.Join(path, thumbPath)\n\t}\n\tpath = filepath.Join(path, file)\n\t\/\/extra safety to serve only allowed filetypes\n\text := filepath.Ext(path)\n\tif (thumb || filetype == fileTypeGallery || filetype == fileTypeGlobal) && !isPictureExt(ext) {\n\t\tfmt.Fprintln(w, \"Not an image file\")\n\t\treturn\n\t}\n\tif (!thumb && filetype == fileTypeVideo) && !isVideoExt(ext) {\n\t\tfmt.Fprintln(w, \"Not a video file\")\n\t\treturn\n\t}\n\n\t\/\/Or ioutil.ReadFile...\n\tfh, err := os.Open(path)\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Cannot open file %s\", r.FormValue(\"file\"))\n\t\treturn\n\t}\n\tdefer fh.Close()\n\n\t\/\/serve file\n\tstat, err := fh.Stat()\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Cannot open file %s\", r.FormValue(\"file\"))\n\t\treturn\n\t}\n\thttp.ServeContent(w, r, r.FormValue(\"file\"), stat.ModTime(), fh)\n\treturn\n}\n\nfunc isPictureExt(ext string) bool {\n\tswitch strings.ToLower(ext) {\n\tcase \".jpg\", \".jpeg\", \".gif\", \".png\":\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc isVideoExt(ext string) bool {\n\tswitch strings.ToLower(ext) {\n\tcase \".mp4\", \".webm\", \".avi\", \".mkv\", \".mpeg\", \".mpg\":\n\t\treturn true\n\t}\n\treturn false\n}\nuse filename varpackage main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\tfileTypeGallery = 0 \/\/only image extension\n\tfileTypeGlobal = 1 \/\/only image extension\n\tfileTypeVideo = 2 \/\/only video extension\n)\n\n\/\/serveFile serves image or video files\nfunc serveFile(w http.ResponseWriter, r *http.Request) {\n\n\tpath := filepath.Join(filepath.Dir(os.Args[0]), galleryPath, r.FormValue(\"gallery\"))\n\tfile := strings.Trim(r.FormValue(\"name\"), `.\/\\`)\n\tfiletype, _ := strconv.Atoi(r.FormValue(\"filetype\"))\n\tthumb, _ := strconv.ParseBool(r.FormValue(\"thumb\"))\n\n\tswitch filetype {\n\tcase fileTypeGlobal:\n\t\t\/\/do nothing\n\tcase fileTypeGallery:\n\t\tpath = filepath.Join(path, galleryImgPath)\n\tcase fileTypeVideo:\n\t\tpath = filepath.Join(path, galleryVidPath)\n\t}\n\tif thumb {\n\t\tpath = filepath.Join(path, thumbPath)\n\t}\n\tpath = filepath.Join(path, file)\n\t\/\/extra safety to serve only allowed filetypes\n\text := filepath.Ext(path)\n\tif (thumb || filetype == fileTypeGallery || filetype == fileTypeGlobal) && !isPictureExt(ext) {\n\t\tfmt.Fprintln(w, \"Not an image file\")\n\t\treturn\n\t}\n\tif (!thumb && filetype == fileTypeVideo) && !isVideoExt(ext) {\n\t\tfmt.Fprintln(w, \"Not a video file\")\n\t\treturn\n\t}\n\n\t\/\/Open file\n\tfh, err := os.Open(path)\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Cannot open file %s\", r.FormValue(\"file\"))\n\t\treturn\n\t}\n\tdefer fh.Close()\n\n\t\/\/Serve file\n\tstat, err := fh.Stat()\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Cannot open file %s\", r.FormValue(\"file\"))\n\t\treturn\n\t}\n\thttp.ServeContent(w, r, file, stat.ModTime(), fh)\n\treturn\n}\n\nfunc isPictureExt(ext string) bool {\n\tswitch strings.ToLower(ext) {\n\tcase \".jpg\", \".jpeg\", \".gif\", \".png\":\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc isVideoExt(ext string) bool {\n\tswitch strings.ToLower(ext) {\n\tcase \".mp4\", \".webm\", \".avi\", \".mkv\", \".mpeg\", \".mpg\":\n\t\treturn true\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"package asset\n\nimport (\n\t\"encoding\/json\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"chain\/api\/appdb\"\n\tchainjson \"chain\/encoding\/json\"\n\t\"chain\/errors\"\n\t\"chain\/fedchain\/bc\"\n)\n\ntype nodeTx struct {\n\tID bc.Hash `json:\"id\"`\n\tTime time.Time `json:\"transaction_time\"`\n\tInputs []nodeTxInput `json:\"inputs\"`\n\tOutputs []nodeTxOutput `json:\"outputs\"`\n\tMetadata chainjson.HexBytes `json:\"metadata\"`\n}\n\ntype nodeTxInput struct {\n\tType string `json:\"type\"`\n\tTxID bc.Hash `json:\"transaction_id,omitempty\"`\n\tTxOut uint32 `json:\"transaction_output,omitempty\"`\n\tAssetID bc.AssetID `json:\"asset_id\"`\n\tAssetLabel string `json:\"asset_label,omitempty\"`\n\tAmount uint64 `json:\"amount\"`\n\tAddress chainjson.HexBytes `json:\"address,omitempty\"`\n\tAccountID string `json:\"account_id,omitempty\"`\n\tAccountLabel string `json:\"account_label\"`\n\tMetadata chainjson.HexBytes `json:\"metadata\"`\n\n\tmNodeID string\n}\n\ntype nodeTxOutput struct {\n\tAssetID bc.AssetID `json:\"asset_id\"`\n\tAssetLabel string `json:\"asset_label,omitempty\"`\n\tAmount uint64 `json:\"amount\"`\n\tAddress chainjson.HexBytes `json:\"address,omitempty\"`\n\tAccountID string `json:\"account_id,omitempty\"`\n\tAccountLabel string `json:\"account_label,omitempty\"`\n\tMetadata chainjson.HexBytes `json:\"metadata\"`\n\n\tmNodeID string\n}\n\n\/\/ WriteNodeTxs persists a transaction along with its metadata\n\/\/ for every node (issuer, manager) associated with the transaction.\nfunc WriteNodeTxs(ctx context.Context, tx *bc.Tx, ts time.Time) error {\n\tins, outs, err := appdb.GetActUTXOs(ctx, tx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"fetching utxo account data\")\n\t}\n\n\tassetMap := make(map[string]*appdb.ActAsset)\n\tvar assetIDs []string\n\tfor _, out := range tx.Outputs {\n\t\tassetIDs = append(assetIDs, out.AssetID.String())\n\t}\n\tassets, err := appdb.GetActAssets(ctx, assetIDs)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting assets\")\n\t}\n\tfor _, asset := range assets {\n\t\tassetMap[asset.ID] = asset\n\t}\n\n\taccountMap := make(map[string]*appdb.ActAccount)\n\tnodeAccounts := make(map[string][]string)\n\tvar accountIDs []string\n\tfor _, utxo := range append(ins, outs...) {\n\t\tif utxo.AccountID != \"\" {\n\t\t\taccountIDs = append(accountIDs, utxo.AccountID)\n\t\t}\n\t}\n\taccounts, err := appdb.GetActAccounts(ctx, accountIDs)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting accounts\")\n\t}\n\tfor _, acc := range accounts {\n\t\taccountMap[acc.ID] = acc\n\t\tnodeAccounts[acc.ManagerNodeID] = append(nodeAccounts[acc.ManagerNodeID], acc.ID)\n\t}\n\n\tnodeTx, err := generateNodeTx(tx, ins, outs, assetMap, accountMap, ts)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"generating master\")\n\t}\n\n\tif tx.IsIssuance() {\n\t\tfilteredTx, err := json.Marshal(filterAccounts(nodeTx, \"\"))\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"filtering tx\")\n\t\t}\n\t\terr = appdb.WriteIssuerTx(ctx, tx.Hash.String(), filteredTx, assets[0].IssuerNodeID, assets[0].ID)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"writing issuer tx\")\n\t\t}\n\t}\n\n\tfor nodeID, accountIDs := range nodeAccounts {\n\t\tfilteredTx, err := json.Marshal(filterAccounts(nodeTx, nodeID))\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"filtering tx\")\n\t\t}\n\t\terr = appdb.WriteManagerTx(ctx, tx.Hash.String(), filteredTx, nodeID, accountIDs)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"writing manager tx\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc generateNodeTx(\n\ttx *bc.Tx,\n\tins []*appdb.ActUTXO,\n\touts []*appdb.ActUTXO,\n\tassetMap map[string]*appdb.ActAsset,\n\taccountMap map[string]*appdb.ActAccount,\n\tts time.Time,\n) (*nodeTx, error) {\n\tactTx := &nodeTx{\n\t\tID: tx.Hash,\n\t\tMetadata: tx.Metadata,\n\t\tTime: ts,\n\t}\n\tfor i, in := range tx.Inputs {\n\t\tif in.IsIssuance() {\n\t\t\tvar (\n\t\t\t\tamt uint64\n\t\t\t\tassetID bc.AssetID\n\t\t\t)\n\t\t\tfor _, o := range tx.Outputs {\n\t\t\t\tamt += o.Value\n\t\t\t\tassetID = o.AssetID\n\t\t\t}\n\t\t\tasset := assetMap[assetID.String()]\n\t\t\tvar label string\n\t\t\tif asset != nil {\n\t\t\t\tlabel = asset.Label\n\t\t\t}\n\t\t\tactTx.Inputs = append(actTx.Inputs, nodeTxInput{\n\t\t\t\tType: \"issuance\",\n\t\t\t\tAssetID: assetID,\n\t\t\t\tAssetLabel: label,\n\t\t\t\tAmount: amt,\n\t\t\t\tMetadata: in.Metadata,\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\n\t\tassetID, err := bc.ParseHash(ins[i].AssetID)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"parsing utxo asset id\")\n\t\t}\n\t\tasset := assetMap[ins[i].AssetID]\n\t\tvar assetLabel string\n\t\tif asset != nil {\n\t\t\tassetLabel = asset.Label\n\t\t}\n\n\t\taccount := accountMap[ins[i].AccountID]\n\t\tvar accountLabel, mNodeID string\n\t\tif account != nil {\n\t\t\taccountLabel = account.Label\n\t\t\tmNodeID = account.ManagerNodeID\n\t\t}\n\n\t\tactTx.Inputs = append(actTx.Inputs, nodeTxInput{\n\t\t\tType: \"transfer\",\n\t\t\tTxID: in.Previous.Hash,\n\t\t\tTxOut: in.Previous.Index,\n\t\t\tAssetID: bc.AssetID(assetID),\n\t\t\tAssetLabel: assetLabel,\n\t\t\tAmount: ins[i].Amount,\n\t\t\tAccountID: ins[i].AccountID,\n\t\t\tAccountLabel: accountLabel,\n\t\t\tAddress: ins[i].Script,\n\t\t\tMetadata: in.Metadata,\n\t\t\tmNodeID: mNodeID,\n\t\t})\n\t}\n\n\tfor i, out := range tx.Outputs {\n\t\tasset := assetMap[out.AssetID.String()]\n\t\tvar assetLabel string\n\t\tif asset != nil {\n\t\t\tassetLabel = asset.Label\n\t\t}\n\n\t\taccount := accountMap[outs[i].AccountID]\n\t\tvar accountLabel, mNodeID string\n\t\tif account != nil {\n\t\t\taccountLabel = account.Label\n\t\t\tmNodeID = account.ManagerNodeID\n\t\t}\n\n\t\tactTx.Outputs = append(actTx.Outputs, nodeTxOutput{\n\t\t\tAssetID: out.AssetID,\n\t\t\tAssetLabel: assetLabel,\n\t\t\tAmount: out.Value,\n\t\t\tAddress: out.Script,\n\t\t\tAccountID: outs[i].AccountID,\n\t\t\tAccountLabel: accountLabel,\n\t\t\tMetadata: out.Metadata,\n\t\t\tmNodeID: mNodeID,\n\t\t})\n\t}\n\treturn actTx, nil\n}\n\nfunc filterAccounts(tx *nodeTx, keepID string) *nodeTx {\n\tfilteredTx := new(nodeTx)\n\t*filteredTx = *tx\n\tfilteredTx.Inputs = make([]nodeTxInput, len(tx.Inputs))\n\tcopy(filteredTx.Inputs, tx.Inputs)\n\tfilteredTx.Outputs = make([]nodeTxOutput, len(tx.Outputs))\n\tcopy(filteredTx.Outputs, tx.Outputs)\n\tfor i := range filteredTx.Inputs {\n\t\tin := &filteredTx.Inputs[i]\n\t\tif in.mNodeID != keepID {\n\t\t\tin.AccountLabel = \"\"\n\t\t\tin.AccountID = \"\"\n\t\t}\n\t}\n\tfor i := range filteredTx.Outputs {\n\t\tout := &filteredTx.Outputs[i]\n\t\tif out.mNodeID != keepID {\n\t\t\tout.AccountLabel = \"\"\n\t\t\tout.AccountID = \"\"\n\t\t}\n\t}\n\treturn filteredTx\n}\napi\/asset: add omitempty json tag to account_labelpackage asset\n\nimport (\n\t\"encoding\/json\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"chain\/api\/appdb\"\n\tchainjson \"chain\/encoding\/json\"\n\t\"chain\/errors\"\n\t\"chain\/fedchain\/bc\"\n)\n\ntype nodeTx struct {\n\tID bc.Hash `json:\"id\"`\n\tTime time.Time `json:\"transaction_time\"`\n\tInputs []nodeTxInput `json:\"inputs\"`\n\tOutputs []nodeTxOutput `json:\"outputs\"`\n\tMetadata chainjson.HexBytes `json:\"metadata\"`\n}\n\ntype nodeTxInput struct {\n\tType string `json:\"type\"`\n\tTxID bc.Hash `json:\"transaction_id,omitempty\"`\n\tTxOut uint32 `json:\"transaction_output,omitempty\"`\n\tAssetID bc.AssetID `json:\"asset_id\"`\n\tAssetLabel string `json:\"asset_label,omitempty\"`\n\tAmount uint64 `json:\"amount\"`\n\tAddress chainjson.HexBytes `json:\"address,omitempty\"`\n\tAccountID string `json:\"account_id,omitempty\"`\n\tAccountLabel string `json:\"account_label,omitempty\"`\n\tMetadata chainjson.HexBytes `json:\"metadata\"`\n\n\tmNodeID string\n}\n\ntype nodeTxOutput struct {\n\tAssetID bc.AssetID `json:\"asset_id\"`\n\tAssetLabel string `json:\"asset_label,omitempty\"`\n\tAmount uint64 `json:\"amount\"`\n\tAddress chainjson.HexBytes `json:\"address,omitempty\"`\n\tAccountID string `json:\"account_id,omitempty\"`\n\tAccountLabel string `json:\"account_label,omitempty\"`\n\tMetadata chainjson.HexBytes `json:\"metadata\"`\n\n\tmNodeID string\n}\n\n\/\/ WriteNodeTxs persists a transaction along with its metadata\n\/\/ for every node (issuer, manager) associated with the transaction.\nfunc WriteNodeTxs(ctx context.Context, tx *bc.Tx, ts time.Time) error {\n\tins, outs, err := appdb.GetActUTXOs(ctx, tx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"fetching utxo account data\")\n\t}\n\n\tassetMap := make(map[string]*appdb.ActAsset)\n\tvar assetIDs []string\n\tfor _, out := range tx.Outputs {\n\t\tassetIDs = append(assetIDs, out.AssetID.String())\n\t}\n\tassets, err := appdb.GetActAssets(ctx, assetIDs)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting assets\")\n\t}\n\tfor _, asset := range assets {\n\t\tassetMap[asset.ID] = asset\n\t}\n\n\taccountMap := make(map[string]*appdb.ActAccount)\n\tnodeAccounts := make(map[string][]string)\n\tvar accountIDs []string\n\tfor _, utxo := range append(ins, outs...) {\n\t\tif utxo.AccountID != \"\" {\n\t\t\taccountIDs = append(accountIDs, utxo.AccountID)\n\t\t}\n\t}\n\taccounts, err := appdb.GetActAccounts(ctx, accountIDs)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting accounts\")\n\t}\n\tfor _, acc := range accounts {\n\t\taccountMap[acc.ID] = acc\n\t\tnodeAccounts[acc.ManagerNodeID] = append(nodeAccounts[acc.ManagerNodeID], acc.ID)\n\t}\n\n\tnodeTx, err := generateNodeTx(tx, ins, outs, assetMap, accountMap, ts)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"generating master\")\n\t}\n\n\tif tx.IsIssuance() {\n\t\tfilteredTx, err := json.Marshal(filterAccounts(nodeTx, \"\"))\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"filtering tx\")\n\t\t}\n\t\terr = appdb.WriteIssuerTx(ctx, tx.Hash.String(), filteredTx, assets[0].IssuerNodeID, assets[0].ID)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"writing issuer tx\")\n\t\t}\n\t}\n\n\tfor nodeID, accountIDs := range nodeAccounts {\n\t\tfilteredTx, err := json.Marshal(filterAccounts(nodeTx, nodeID))\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"filtering tx\")\n\t\t}\n\t\terr = appdb.WriteManagerTx(ctx, tx.Hash.String(), filteredTx, nodeID, accountIDs)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"writing manager tx\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc generateNodeTx(\n\ttx *bc.Tx,\n\tins []*appdb.ActUTXO,\n\touts []*appdb.ActUTXO,\n\tassetMap map[string]*appdb.ActAsset,\n\taccountMap map[string]*appdb.ActAccount,\n\tts time.Time,\n) (*nodeTx, error) {\n\tactTx := &nodeTx{\n\t\tID: tx.Hash,\n\t\tMetadata: tx.Metadata,\n\t\tTime: ts,\n\t}\n\tfor i, in := range tx.Inputs {\n\t\tif in.IsIssuance() {\n\t\t\tvar (\n\t\t\t\tamt uint64\n\t\t\t\tassetID bc.AssetID\n\t\t\t)\n\t\t\tfor _, o := range tx.Outputs {\n\t\t\t\tamt += o.Value\n\t\t\t\tassetID = o.AssetID\n\t\t\t}\n\t\t\tasset := assetMap[assetID.String()]\n\t\t\tvar label string\n\t\t\tif asset != nil {\n\t\t\t\tlabel = asset.Label\n\t\t\t}\n\t\t\tactTx.Inputs = append(actTx.Inputs, nodeTxInput{\n\t\t\t\tType: \"issuance\",\n\t\t\t\tAssetID: assetID,\n\t\t\t\tAssetLabel: label,\n\t\t\t\tAmount: amt,\n\t\t\t\tMetadata: in.Metadata,\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\n\t\tassetID, err := bc.ParseHash(ins[i].AssetID)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"parsing utxo asset id\")\n\t\t}\n\t\tasset := assetMap[ins[i].AssetID]\n\t\tvar assetLabel string\n\t\tif asset != nil {\n\t\t\tassetLabel = asset.Label\n\t\t}\n\n\t\taccount := accountMap[ins[i].AccountID]\n\t\tvar accountLabel, mNodeID string\n\t\tif account != nil {\n\t\t\taccountLabel = account.Label\n\t\t\tmNodeID = account.ManagerNodeID\n\t\t}\n\n\t\tactTx.Inputs = append(actTx.Inputs, nodeTxInput{\n\t\t\tType: \"transfer\",\n\t\t\tTxID: in.Previous.Hash,\n\t\t\tTxOut: in.Previous.Index,\n\t\t\tAssetID: bc.AssetID(assetID),\n\t\t\tAssetLabel: assetLabel,\n\t\t\tAmount: ins[i].Amount,\n\t\t\tAccountID: ins[i].AccountID,\n\t\t\tAccountLabel: accountLabel,\n\t\t\tAddress: ins[i].Script,\n\t\t\tMetadata: in.Metadata,\n\t\t\tmNodeID: mNodeID,\n\t\t})\n\t}\n\n\tfor i, out := range tx.Outputs {\n\t\tasset := assetMap[out.AssetID.String()]\n\t\tvar assetLabel string\n\t\tif asset != nil {\n\t\t\tassetLabel = asset.Label\n\t\t}\n\n\t\taccount := accountMap[outs[i].AccountID]\n\t\tvar accountLabel, mNodeID string\n\t\tif account != nil {\n\t\t\taccountLabel = account.Label\n\t\t\tmNodeID = account.ManagerNodeID\n\t\t}\n\n\t\tactTx.Outputs = append(actTx.Outputs, nodeTxOutput{\n\t\t\tAssetID: out.AssetID,\n\t\t\tAssetLabel: assetLabel,\n\t\t\tAmount: out.Value,\n\t\t\tAddress: out.Script,\n\t\t\tAccountID: outs[i].AccountID,\n\t\t\tAccountLabel: accountLabel,\n\t\t\tMetadata: out.Metadata,\n\t\t\tmNodeID: mNodeID,\n\t\t})\n\t}\n\treturn actTx, nil\n}\n\nfunc filterAccounts(tx *nodeTx, keepID string) *nodeTx {\n\tfilteredTx := new(nodeTx)\n\t*filteredTx = *tx\n\tfilteredTx.Inputs = make([]nodeTxInput, len(tx.Inputs))\n\tcopy(filteredTx.Inputs, tx.Inputs)\n\tfilteredTx.Outputs = make([]nodeTxOutput, len(tx.Outputs))\n\tcopy(filteredTx.Outputs, tx.Outputs)\n\tfor i := range filteredTx.Inputs {\n\t\tin := &filteredTx.Inputs[i]\n\t\tif in.mNodeID != keepID {\n\t\t\tin.AccountLabel = \"\"\n\t\t\tin.AccountID = \"\"\n\t\t}\n\t}\n\tfor i := range filteredTx.Outputs {\n\t\tout := &filteredTx.Outputs[i]\n\t\tif out.mNodeID != keepID {\n\t\t\tout.AccountLabel = \"\"\n\t\t\tout.AccountID = \"\"\n\t\t}\n\t}\n\treturn filteredTx\n}\n<|endoftext|>"} {"text":"package database\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/asdine\/storm\"\n\t\"github.com\/asdine\/storm\/index\"\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/hashicorp\/raft\"\n)\n\ntype (\n\t\/\/ Database is the lowest level of the gansoi database, it represent the\n\t\/\/ on-disk database. Database implements raft.FSM.\n\tDatabase struct {\n\t\tdbMutex sync.RWMutex\n\t\tdb *storm.DB\n\t\tlistenersLock sync.RWMutex\n\t\tlisteners []Listener\n\t}\n)\n\nvar (\n\t\/\/ ErrNotFound is returned when the specified record is not saved.\n\tErrNotFound = storm.ErrNotFound\n)\n\nconst (\n\t\/\/ stableBucket is the bucket name used for raft stable storage.\n\tstableBucket = \"raft.StableStore\"\n\n\t\/\/ logBucket is used for raft log storage.\n\tlogBucket = \"raft.LogStore\"\n)\n\n\/\/ NewDatabase will instantiate a new database placed in filepath.\nfunc NewDatabase(filepath string) (*Database, error) {\n\td := &Database{}\n\n\terr := d.open(path.Join(filepath, \"gansoi.db\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn d, nil\n}\n\n\/\/ Close will close the database. Accessing the database after this will\n\/\/ result in a deadlock.\nfunc (d *Database) Close() error {\n\td.dbMutex.RLock()\n\treturn d.db.Close()\n}\n\n\/\/ open will open the underlying file storage.\nfunc (d *Database) open(filepath string) error {\n\tdb, err := storm.Open(\n\t\tfilepath,\n\t\tstorm.BoltOptions(0600, &bolt.Options{Timeout: 1 * time.Second}),\n\t\tstorm.AutoIncrement(),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.db = db\n\n\td.Storm().Bolt.Update(func(tx *bolt.Tx) error {\n\t\ttx.CreateBucketIfNotExists([]byte(logBucket))\n\n\t\treturn nil\n\t})\n\n\treturn nil\n}\n\n\/\/ Storm will return the underlying Storm database.\nfunc (d *Database) Storm() *storm.DB {\n\td.dbMutex.RLock()\n\tdefer d.dbMutex.RUnlock()\n\n\treturn d.db\n}\n\n\/\/ ProcessLogEntry will process the log entry and apply whatever needs doing.\nfunc (d *Database) ProcessLogEntry(entry *LogEntry) error {\n\tvar err error\n\n\tvar v interface{}\n\n\tswitch entry.Command {\n\tcase CommandSave:\n\t\tv, _ = entry.Payload()\n\t\terr = d.Save(v)\n\tcase CommandDelete:\n\t\tv, _ = entry.Payload()\n\t\terr = d.db.DeleteStruct(v)\n\tdefault:\n\t\terr = fmt.Errorf(\"not implemented\")\n\t}\n\n\tgo func(command Command, data interface{}, err error) {\n\t\td.listenersLock.RLock()\n\n\t\tfor _, listener := range d.listeners {\n\t\t\tlistener.PostLocalApply(command, data, err)\n\t\t}\n\n\t\td.listenersLock.RUnlock()\n\t}(entry.Command, v, err)\n\n\treturn err\n}\n\n\/\/ Apply implements raft.FSM.\nfunc (d *Database) Apply(l *raft.Log) interface{} {\n\tentry := &LogEntry{}\n\terr := json.Unmarshal(l.Data, entry)\n\tif err != nil {\n\t\t\/\/ This should not happen..?\n\t\tfmt.Printf(\"%s: '%s'\\n\", err.Error(), string(l.Data))\n\t\treturn nil\n\t}\n\n\treturn d.ProcessLogEntry(entry)\n}\n\n\/\/ Snapshot implements raft.FSM.\nfunc (d *Database) Snapshot() (raft.FSMSnapshot, error) {\n\treturn &Snapshot{db: d}, nil\n}\n\n\/\/ Restore implements raft.FSM.\nfunc (d *Database) Restore(source io.ReadCloser) error {\n\tdb := d.Storm().Bolt\n\td.dbMutex.Lock()\n\tdefer d.dbMutex.Unlock()\n\tdefer source.Close()\n\n\tpath := db.Path()\n\trestorePath := path + \".restoretmp\"\n\n\tfile, err := os.Create(restorePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\t_, err = io.Copy(file, source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = db.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = os.Remove(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = os.Rename(restorePath, path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = d.open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Save will save an object to the database.\nfunc (d *Database) Save(data interface{}) error {\n\td.dbMutex.RLock()\n\tdefer d.dbMutex.RUnlock()\n\n\treturn d.db.Save(data)\n}\n\n\/\/ One will retrieve one (or zero) record from the database.\nfunc (d *Database) One(fieldName string, value interface{}, to interface{}) error {\n\td.dbMutex.RLock()\n\tdefer d.dbMutex.RUnlock()\n\n\treturn d.db.One(fieldName, value, to)\n}\n\n\/\/ All lists all kinds of a type.\nfunc (d *Database) All(to interface{}, limit int, skip int, reverse bool) error {\n\td.dbMutex.RLock()\n\tdefer d.dbMutex.RUnlock()\n\n\treturn d.db.All(to, func(opts *index.Options) {\n\t\topts.Limit = limit\n\t\topts.Skip = skip\n\t\topts.Reverse = reverse\n\t})\n}\n\n\/\/ RegisterListener will register a listener for new changes to the database.\nfunc (d *Database) RegisterListener(listener Listener) {\n\td.listenersLock.Lock()\n\tdefer d.listenersLock.Unlock()\n\n\td.listeners = append(d.listeners, listener)\n}\n\n\/\/ Set implements raft.StableStore.\nfunc (d *Database) Set(k, v []byte) error {\n\treturn d.Storm().Set(stableBucket, k, v)\n}\n\n\/\/ Get implements raft.StableStore.\nfunc (d *Database) Get(k []byte) ([]byte, error) {\n\tvar value []byte\n\terr := d.Storm().Get(stableBucket, k, &value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn value, nil\n}\n\n\/\/ SetUint64 is like Set, but handles uint64 values\nfunc (d *Database) SetUint64(key []byte, val uint64) error {\n\treturn d.Set(key, uint64ToBytes(val))\n}\n\n\/\/ GetUint64 is like Get, but handles uint64 values\nfunc (d *Database) GetUint64(key []byte) (uint64, error) {\n\tval, err := d.Get(key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn bytesToUint64(val), nil\n}\n\n\/\/ FirstIndex implements raft.LogStore.\nfunc (d *Database) FirstIndex() (uint64, error) {\n\ttx, err := d.Storm().Bolt.Begin(false)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer tx.Rollback()\n\n\tcurs := tx.Bucket([]byte(logBucket)).Cursor()\n\n\tfirst, _ := curs.First()\n\tif first == nil {\n\t\treturn 0, nil\n\t}\n\n\treturn bytesToUint64(first), nil\n}\n\n\/\/ LastIndex implements raft.LogStore.\nfunc (d *Database) LastIndex() (uint64, error) {\n\ttx, err := d.Storm().Bolt.Begin(false)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer tx.Rollback()\n\n\tcurs := tx.Bucket([]byte(logBucket)).Cursor()\n\n\tlast, _ := curs.Last()\n\tif last == nil {\n\t\treturn 0, nil\n\t}\n\n\treturn bytesToUint64(last), nil\n}\n\n\/\/ GetLog implements raft.LogStore.\nfunc (d *Database) GetLog(idx uint64, log *raft.Log) error {\n\ttx, err := d.Storm().Bolt.Begin(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\n\tvalue := tx.Bucket([]byte(logBucket)).Get(uint64ToBytes(idx))\n\n\treturn json.Unmarshal(value, log)\n}\n\n\/\/ StoreLog implements raft.LogStore.\nfunc (d *Database) StoreLog(log *raft.Log) error {\n\treturn d.StoreLogs([]*raft.Log{log})\n}\n\n\/\/ StoreLogs implements raft.LogStore.\nfunc (d *Database) StoreLogs(logs []*raft.Log) error {\n\ttx, err := d.Storm().Bolt.Begin(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\n\tfor _, log := range logs {\n\t\tkey := uint64ToBytes(log.Index)\n\t\tvalue, err := json.Marshal(log)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbucket := tx.Bucket([]byte(logBucket))\n\t\terr = bucket.Put(key, value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn tx.Commit()\n}\n\n\/\/ DeleteRange implements raft.LogStore.\nfunc (d *Database) DeleteRange(min, max uint64) error {\n\tminKey := uint64ToBytes(min)\n\n\t\/\/ We can safely use standar Bolt functions, we're not interested in the\n\t\/\/ encoded values, we're only using delete, so Storm encoding\/decoding\n\t\/\/ doesn't matter.\n\ttx, err := d.Storm().Bolt.Begin(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\n\tcurs := tx.Bucket([]byte(logBucket)).Cursor()\n\tfor k, _ := curs.Seek(minKey); k != nil; k, _ = curs.Next() {\n\t\tif bytesToUint64(k) > max {\n\t\t\tbreak\n\t\t}\n\n\t\tif err := curs.Delete(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn tx.Commit()\n}\nAvoid deadlock in open() when restoring from snapshot.package database\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/asdine\/storm\"\n\t\"github.com\/asdine\/storm\/index\"\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/hashicorp\/raft\"\n)\n\ntype (\n\t\/\/ Database is the lowest level of the gansoi database, it represent the\n\t\/\/ on-disk database. Database implements raft.FSM.\n\tDatabase struct {\n\t\tdbMutex sync.RWMutex\n\t\tdb *storm.DB\n\t\tlistenersLock sync.RWMutex\n\t\tlisteners []Listener\n\t}\n)\n\nvar (\n\t\/\/ ErrNotFound is returned when the specified record is not saved.\n\tErrNotFound = storm.ErrNotFound\n)\n\nconst (\n\t\/\/ stableBucket is the bucket name used for raft stable storage.\n\tstableBucket = \"raft.StableStore\"\n\n\t\/\/ logBucket is used for raft log storage.\n\tlogBucket = \"raft.LogStore\"\n)\n\n\/\/ NewDatabase will instantiate a new database placed in filepath.\nfunc NewDatabase(filepath string) (*Database, error) {\n\td := &Database{}\n\n\terr := d.open(path.Join(filepath, \"gansoi.db\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn d, nil\n}\n\n\/\/ Close will close the database. Accessing the database after this will\n\/\/ result in a deadlock.\nfunc (d *Database) Close() error {\n\td.dbMutex.RLock()\n\treturn d.db.Close()\n}\n\n\/\/ open will open the underlying file storage.\nfunc (d *Database) open(filepath string) error {\n\tdb, err := storm.Open(\n\t\tfilepath,\n\t\tstorm.BoltOptions(0600, &bolt.Options{Timeout: 1 * time.Second}),\n\t\tstorm.AutoIncrement(),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.db = db\n\n\td.db.Bolt.Update(func(tx *bolt.Tx) error {\n\t\ttx.CreateBucketIfNotExists([]byte(logBucket))\n\n\t\treturn nil\n\t})\n\n\treturn nil\n}\n\n\/\/ Storm will return the underlying Storm database.\nfunc (d *Database) Storm() *storm.DB {\n\td.dbMutex.RLock()\n\tdefer d.dbMutex.RUnlock()\n\n\treturn d.db\n}\n\n\/\/ ProcessLogEntry will process the log entry and apply whatever needs doing.\nfunc (d *Database) ProcessLogEntry(entry *LogEntry) error {\n\tvar err error\n\n\tvar v interface{}\n\n\tswitch entry.Command {\n\tcase CommandSave:\n\t\tv, _ = entry.Payload()\n\t\terr = d.Save(v)\n\tcase CommandDelete:\n\t\tv, _ = entry.Payload()\n\t\terr = d.db.DeleteStruct(v)\n\tdefault:\n\t\terr = fmt.Errorf(\"not implemented\")\n\t}\n\n\tgo func(command Command, data interface{}, err error) {\n\t\td.listenersLock.RLock()\n\n\t\tfor _, listener := range d.listeners {\n\t\t\tlistener.PostLocalApply(command, data, err)\n\t\t}\n\n\t\td.listenersLock.RUnlock()\n\t}(entry.Command, v, err)\n\n\treturn err\n}\n\n\/\/ Apply implements raft.FSM.\nfunc (d *Database) Apply(l *raft.Log) interface{} {\n\tentry := &LogEntry{}\n\terr := json.Unmarshal(l.Data, entry)\n\tif err != nil {\n\t\t\/\/ This should not happen..?\n\t\tfmt.Printf(\"%s: '%s'\\n\", err.Error(), string(l.Data))\n\t\treturn nil\n\t}\n\n\treturn d.ProcessLogEntry(entry)\n}\n\n\/\/ Snapshot implements raft.FSM.\nfunc (d *Database) Snapshot() (raft.FSMSnapshot, error) {\n\treturn &Snapshot{db: d}, nil\n}\n\n\/\/ Restore implements raft.FSM.\nfunc (d *Database) Restore(source io.ReadCloser) error {\n\tdb := d.Storm().Bolt\n\td.dbMutex.Lock()\n\tdefer d.dbMutex.Unlock()\n\tdefer source.Close()\n\n\tpath := db.Path()\n\trestorePath := path + \".restoretmp\"\n\n\tfile, err := os.Create(restorePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\t_, err = io.Copy(file, source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = db.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = os.Remove(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = os.Rename(restorePath, path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = d.open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Save will save an object to the database.\nfunc (d *Database) Save(data interface{}) error {\n\td.dbMutex.RLock()\n\tdefer d.dbMutex.RUnlock()\n\n\treturn d.db.Save(data)\n}\n\n\/\/ One will retrieve one (or zero) record from the database.\nfunc (d *Database) One(fieldName string, value interface{}, to interface{}) error {\n\td.dbMutex.RLock()\n\tdefer d.dbMutex.RUnlock()\n\n\treturn d.db.One(fieldName, value, to)\n}\n\n\/\/ All lists all kinds of a type.\nfunc (d *Database) All(to interface{}, limit int, skip int, reverse bool) error {\n\td.dbMutex.RLock()\n\tdefer d.dbMutex.RUnlock()\n\n\treturn d.db.All(to, func(opts *index.Options) {\n\t\topts.Limit = limit\n\t\topts.Skip = skip\n\t\topts.Reverse = reverse\n\t})\n}\n\n\/\/ RegisterListener will register a listener for new changes to the database.\nfunc (d *Database) RegisterListener(listener Listener) {\n\td.listenersLock.Lock()\n\tdefer d.listenersLock.Unlock()\n\n\td.listeners = append(d.listeners, listener)\n}\n\n\/\/ Set implements raft.StableStore.\nfunc (d *Database) Set(k, v []byte) error {\n\treturn d.Storm().Set(stableBucket, k, v)\n}\n\n\/\/ Get implements raft.StableStore.\nfunc (d *Database) Get(k []byte) ([]byte, error) {\n\tvar value []byte\n\terr := d.Storm().Get(stableBucket, k, &value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn value, nil\n}\n\n\/\/ SetUint64 is like Set, but handles uint64 values\nfunc (d *Database) SetUint64(key []byte, val uint64) error {\n\treturn d.Set(key, uint64ToBytes(val))\n}\n\n\/\/ GetUint64 is like Get, but handles uint64 values\nfunc (d *Database) GetUint64(key []byte) (uint64, error) {\n\tval, err := d.Get(key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn bytesToUint64(val), nil\n}\n\n\/\/ FirstIndex implements raft.LogStore.\nfunc (d *Database) FirstIndex() (uint64, error) {\n\ttx, err := d.Storm().Bolt.Begin(false)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer tx.Rollback()\n\n\tcurs := tx.Bucket([]byte(logBucket)).Cursor()\n\n\tfirst, _ := curs.First()\n\tif first == nil {\n\t\treturn 0, nil\n\t}\n\n\treturn bytesToUint64(first), nil\n}\n\n\/\/ LastIndex implements raft.LogStore.\nfunc (d *Database) LastIndex() (uint64, error) {\n\ttx, err := d.Storm().Bolt.Begin(false)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer tx.Rollback()\n\n\tcurs := tx.Bucket([]byte(logBucket)).Cursor()\n\n\tlast, _ := curs.Last()\n\tif last == nil {\n\t\treturn 0, nil\n\t}\n\n\treturn bytesToUint64(last), nil\n}\n\n\/\/ GetLog implements raft.LogStore.\nfunc (d *Database) GetLog(idx uint64, log *raft.Log) error {\n\ttx, err := d.Storm().Bolt.Begin(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\n\tvalue := tx.Bucket([]byte(logBucket)).Get(uint64ToBytes(idx))\n\n\treturn json.Unmarshal(value, log)\n}\n\n\/\/ StoreLog implements raft.LogStore.\nfunc (d *Database) StoreLog(log *raft.Log) error {\n\treturn d.StoreLogs([]*raft.Log{log})\n}\n\n\/\/ StoreLogs implements raft.LogStore.\nfunc (d *Database) StoreLogs(logs []*raft.Log) error {\n\ttx, err := d.Storm().Bolt.Begin(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\n\tfor _, log := range logs {\n\t\tkey := uint64ToBytes(log.Index)\n\t\tvalue, err := json.Marshal(log)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbucket := tx.Bucket([]byte(logBucket))\n\t\terr = bucket.Put(key, value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn tx.Commit()\n}\n\n\/\/ DeleteRange implements raft.LogStore.\nfunc (d *Database) DeleteRange(min, max uint64) error {\n\tminKey := uint64ToBytes(min)\n\n\t\/\/ We can safely use standar Bolt functions, we're not interested in the\n\t\/\/ encoded values, we're only using delete, so Storm encoding\/decoding\n\t\/\/ doesn't matter.\n\ttx, err := d.Storm().Bolt.Begin(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\n\tcurs := tx.Bucket([]byte(logBucket)).Cursor()\n\tfor k, _ := curs.Seek(minKey); k != nil; k, _ = curs.Next() {\n\t\tif bytesToUint64(k) > max {\n\t\t\tbreak\n\t\t}\n\n\t\tif err := curs.Delete(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn tx.Commit()\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"github.com\/BurntSushi\/toml\"\n)\n\ntype Config struct {\n\tUpstream string\n\tReqFanFactor int\n\tTimeoutMS int\n}\n\nfunc snd (c chan string, s string) {\n\tgo func () { c <- s }()\n}\n\n\/\/ start starts Charm up and returns a done channel for the done message\nfunc start(confPath string) (chan string) {\n\tdone := make(chan string)\n\n\t\/\/ print a welcome message\n\tlog.Print(\"Charm is starting up.\")\n\t\/\/ read the config file\n\tlog.Printf(\". . Reading %s\", confPath)\n\ttomlData, err := ioutil.ReadFile(confPath)\n\tif err != nil {\n\t\tsnd(done, fmt.Sprintf(\"Could not read file at %s\", confPath))\n\t\treturn done\n\t}\n\t\/\/ populate the config struct\n\tlog.Print(\". . Loading config\")\n\tvar conf Config\n\t_, err = toml.Decode(string(tomlData), &conf)\n\tif err != nil {\n\t\tsnd(done, \"Could not decode config\")\n\t\treturn done\n\t}\n\t\/\/ report on the configuration\n\tlog.Print(\"Charm is configured!\")\n\tlog.Printf(\". . Stabilizing %v\", conf.Upstream)\n\tlog.Printf(\". . with %v duplicate requests\", conf.ReqFanFactor)\n\tlog.Printf(\". . and a %v milisecond timeout.\", conf.TimeoutMS)\n\tgo run(conf, done)\n\treturn done\n}\n\nfunc First() {}\n\ntype stableTransport struct {\n\twrappedTransport http.RoundTripper\n\treqFanFactor int\n}\n\n\/\/ stableTransport.RoundTrip makes many round trips and returns the first\n\/\/ response\nfunc (t *stableTransport) RoundTrip(r *http.Request) (*http.Response, error) {\n\tc := make(chan *http.Response)\n\tif t.wrappedTransport == nil {\n\t\tt.wrappedTransport = http.DefaultTransport\n\t}\n\t\/\/ fan out requests, send responses to the channel, log errors\n\tfor i := 0; i < t.reqFanFactor; i++ {\n\t\tgo func () {\n\t\t\tresp, err := t.wrappedTransport.RoundTrip(r)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"stableTransport.RoundTrip error (%)\\n\", err)\n\t\t\t} else {\n\t\t\t\tc <- resp\n\t\t\t}\n\t\t}()\n\t}\n\n\t\/\/ return the first good response\n\treturn <-c, nil\n}\n\ntype Stabilizer struct {\n\tupstreamURL *url.URL\n\treqFanFactor int\n}\n\n\/\/ Stabilizer.ServeHTTP proxies with a stable transport\nfunc (st *Stabilizer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tproxy := httputil.NewSingleHostReverseProxy(st.upstreamURL)\n\tproxy.Transport = &stableTransport{proxy.Transport, st.reqFanFactor}\n\tproxy.ServeHTTP(w, r)\n}\n\n\/\/ run\nfunc run(conf Config, done chan string) {\n\t\/\/ make a stabilizer\n\tupstreamURL := url.Parse(conf.Upstream)\n\tstabilizer := &Stabilizer{upstreamURL, conf.ReqFanFactor}\n\n\t\/\/ serve that stabilizer under a timeout\n\ttimeout := conf.TimeoutMS * time.Milliseconds\n\tlog.Fatal(http.ListenAndServe(\n\t\t\":8000\",\n\t\thttp.TimeoutHandler(stabilizer, timeout, \"upstream timeout\"),\n\t))\n}\n\nfunc main() {\n\t\/\/ start Charm,\n\tdone := start(\"\/secret\/charm.conf\")\n\t\/\/ when Charm is done, log the message and quit.\n log.Print(<-done)\n}\ninitial working versionpackage main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"time\"\n\t\"github.com\/BurntSushi\/toml\"\n)\n\ntype Config struct {\n\tUpstream string\n\tReqFanFactor int\n\tTimeoutMS int\n}\n\nfunc snd (c chan string, s string) {\n\tgo func () { c <- s }()\n}\n\n\/\/ start starts Charm up and returns a done channel for the done message\nfunc start(confPath string) (chan string) {\n\tdone := make(chan string)\n\n\t\/\/ print a welcome message\n\tlog.Print(\"Charm is starting up.\")\n\t\/\/ read the config file\n\tlog.Printf(\". . Reading %s\", confPath)\n\ttomlData, err := ioutil.ReadFile(confPath)\n\tif err != nil {\n\t\tsnd(done, fmt.Sprintf(\"Could not read file at %s\", confPath))\n\t\treturn done\n\t}\n\t\/\/ populate the config struct\n\tlog.Print(\". . Loading config\")\n\tvar conf Config\n\t_, err = toml.Decode(string(tomlData), &conf)\n\tif err != nil {\n\t\tsnd(done, \"Could not decode config\")\n\t\treturn done\n\t}\n\t\/\/ report on the configuration\n\tlog.Print(\"Charm is configured!\")\n\tlog.Printf(\". . Stabilizing %v\", conf.Upstream)\n\tlog.Printf(\". . with %v duplicate requests\", conf.ReqFanFactor)\n\tlog.Printf(\". . and a %v milisecond timeout.\", conf.TimeoutMS)\n\tgo run(conf, done)\n\treturn done\n}\n\nfunc First() {}\n\ntype stableTransport struct {\n\twrappedTransport http.RoundTripper\n\treqFanFactor int\n}\n\n\/\/ stableTransport.RoundTrip makes many round trips and returns the first\n\/\/ response\nfunc (t *stableTransport) RoundTrip(r *http.Request) (*http.Response, error) {\n\tc := make(chan *http.Response)\n\tif t.wrappedTransport == nil {\n\t\tt.wrappedTransport = http.DefaultTransport\n\t}\n\t\/\/ fan out requests, send responses to the channel, log errors\n\tfor i := 0; i < t.reqFanFactor; i++ {\n\t\ttime.Sleep(5)\n\t\tgo func () {\n\t\t\tresp, err := t.wrappedTransport.RoundTrip(r)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"stableTransport.RoundTrip error (%)\\n\", err)\n\t\t\t} else {\n\t\t\t\tc <- resp\n\t\t\t}\n\t\t}()\n\t}\n\n\t\/\/ return the first good response\n\treturn <-c, nil\n}\n\ntype Stabilizer struct {\n\tupstreamURL *url.URL\n\treqFanFactor int\n}\n\n\/\/ Stabilizer.ServeHTTP proxies with a stable transport\nfunc (st *Stabilizer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tproxy := httputil.NewSingleHostReverseProxy(st.upstreamURL)\n\tproxy.Transport = &stableTransport{proxy.Transport, st.reqFanFactor}\n\tproxy.ServeHTTP(w, r)\n}\n\n\/\/ run\nfunc run(conf Config, done chan string) {\n\t\/\/ make a stabilizer\n\tupstreamURL, err := url.Parse(conf.Upstream)\n\tif err != nil {\n\t\tlog.Fatal(\"error parsing Upstream URL\", conf.Upstream)\n\t}\n\tstabilizer := &Stabilizer{upstreamURL, conf.ReqFanFactor}\n\n\t\/\/ serve that stabilizer under a timeout\n\ttimeout := time.Duration(conf.TimeoutMS) * time.Millisecond\n\tlog.Fatal(http.ListenAndServe(\n\t\t\":8000\",\n\t\thttp.TimeoutHandler(stabilizer, timeout, \"upstream timeout\"),\n\t))\n}\n\nfunc main() {\n\t\/\/ start Charm,\n\tdone := start(\"\/secret\/charm.conf\")\n\t\/\/ when Charm is done, log the message and quit.\n log.Print(<-done)\n}\n<|endoftext|>"} {"text":"\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\/\/\n\/\/ Contributor: Julien Vehent jvehent@mozilla.com [:ulfr]\n\npackage main\n\nimport (\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"go.mozilla.org\/autograph\/signer\"\n)\n\n\/\/ a signaturerequest is sent by an autograph client to request\n\/\/ a signature on input data\ntype signaturerequest struct {\n\tInput string `json:\"input\"`\n\tKeyID string `json:\"keyid,omitempty\"`\n\tOptions interface{}\n}\n\n\/\/ a signatureresponse is returned by autograph to a client with\n\/\/ a signature computed on input data\ntype signatureresponse struct {\n\tRef string `json:\"ref\"`\n\tType string `json:\"type\"`\n\tMode string `json:\"mode\"`\n\tSignerID string `json:\"signer_id\"`\n\tPublicKey string `json:\"public_key\"`\n\tSignature string `json:\"signature,omitempty\"`\n\tSignedFile string `json:\"signed_file,omitempty\"`\n\tX5U string `json:\"x5u,omitempty\"`\n}\n\n\/\/ handleSignature endpoint accepts a list of signature requests in a HAWK authenticated POST request\n\/\/ and calls the signers to generate signature responses.\nfunc (a *autographer) handleSignature(w http.ResponseWriter, r *http.Request) {\n\trid := getRequestID(r)\n\tstarttime := time.Now()\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thttpError(w, r, http.StatusBadRequest, \"failed to read request body: %s\", err)\n\t\treturn\n\t}\n\tif r.Header.Get(\"Content-Type\") != \"application\/json\" {\n\t\thttpError(w, r, http.StatusBadRequest, \"invalid content type, expected application\/json\")\n\t\treturn\n\t}\n\tif len(body) < 10 {\n\t\t\/\/ it's impossible to have a valid request body smaller than 10 bytes\n\t\thttpError(w, r, http.StatusBadRequest, \"empty or invalid request request body\")\n\t\treturn\n\t}\n\tif len(body) > 1048576000 {\n\t\t\/\/ the max body size is hardcoded to 1GB. Seriously, what are you trying to sign?\n\t\thttpError(w, r, http.StatusBadRequest, \"request exceeds max size of 1GB\")\n\t\treturn\n\t}\n\tuserid, authorized, err := a.authorize(r, body)\n\tif err != nil || !authorized {\n\t\thttpError(w, r, http.StatusUnauthorized, \"authorization verification failed: %v\", err)\n\t\treturn\n\t}\n\tvar sigreqs []signaturerequest\n\terr = json.Unmarshal(body, &sigreqs)\n\tif err != nil {\n\t\thttpError(w, r, http.StatusBadRequest, \"failed to parse request body: %v\", err)\n\t\treturn\n\t}\n\tfor i, sigreq := range sigreqs {\n\t\tif sigreq.Input == \"\" {\n\t\t\thttpError(w, r, http.StatusBadRequest, fmt.Sprintf(\"missing input in signature request %d\", i))\n\t\t}\n\t}\n\tif a.debug {\n\t\tfmt.Printf(\"signature request\\n-----------------\\n%s\\n\", body)\n\t}\n\tsigresps := make([]signatureresponse, len(sigreqs))\n\t\/\/ Each signature requested in the http request body is processed individually.\n\t\/\/ For each, a signer is looked up, and used to compute a raw signature\n\t\/\/ the signature is then encoded appropriately, and added to the response slice\n\tfor i, sigreq := range sigreqs {\n\t\tvar (\n\t\t\tinput []byte\n\t\t\tsig signer.Signature\n\t\t\tsignedfile []byte\n\t\t\thashlog string\n\t\t)\n\n\t\t\/\/ Decode the base64 input data\n\t\tinput, err = base64.StdEncoding.DecodeString(sigreq.Input)\n\t\tif err != nil {\n\t\t\thttpError(w, r, http.StatusBadRequest, \"%v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Find the ID of the requested signer\n\t\t\/\/ Return an error if the signer is not found or if the user is not allowed\n\t\t\/\/ to use this signer\n\t\tsignerID, err := a.getSignerID(userid, sigreq.KeyID)\n\t\tif err != nil || signerID < 0 {\n\t\t\thttpError(w, r, http.StatusUnauthorized, \"%v\", err)\n\t\t\treturn\n\t\t}\n\t\tsigresps[i] = signatureresponse{\n\t\t\tRef: id(),\n\t\t\tType: a.signers[signerID].Config().Type,\n\t\t\tMode: a.signers[signerID].Config().Mode,\n\t\t\tSignerID: a.signers[signerID].Config().ID,\n\t\t\tPublicKey: a.signers[signerID].Config().PublicKey,\n\t\t\tSignedFile: base64.StdEncoding.EncodeToString(signedfile),\n\t\t\tX5U: a.signers[signerID].Config().X5U,\n\t\t}\n\t\t\/\/ Make sure the signer implements the right interface, then sign the data\n\t\tswitch r.URL.RequestURI() {\n\t\tcase \"\/sign\/hash\":\n\t\t\thashSigner, ok := a.signers[signerID].(signer.HashSigner)\n\t\t\tif !ok {\n\t\t\t\thttpError(w, r, http.StatusBadRequest, \"requested signer does not implement hash signing\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsig, err = hashSigner.SignHash(input, sigreq.Options)\n\t\t\tif err != nil {\n\t\t\t\thttpError(w, r, http.StatusInternalServerError, \"signing failed with error: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsigresps[i].Signature, err = sig.(signer.Signature).Marshal()\n\t\t\tif err != nil {\n\t\t\t\thttpError(w, r, http.StatusInternalServerError, \"encoding failed with error: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/ convert the input hash to hexadecimal for logging\n\t\t\thashlog = fmt.Sprintf(\"%X\", input)\n\n\t\tcase \"\/sign\/data\":\n\t\t\tdataSigner, ok := a.signers[signerID].(signer.DataSigner)\n\t\t\tif !ok {\n\t\t\t\thttpError(w, r, http.StatusBadRequest, \"requested signer does not implement data signing\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsig, err = dataSigner.SignData(input, sigreq.Options)\n\t\t\tif err != nil {\n\t\t\t\thttpError(w, r, http.StatusInternalServerError, \"signing failed with error: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsigresps[i].Signature, err = sig.(signer.Signature).Marshal()\n\t\t\tif err != nil {\n\t\t\t\thttpError(w, r, http.StatusInternalServerError, \"encoding failed with error: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/ calculate a hash of the input to store in the signing logs\n\t\t\tmd := sha256.New()\n\t\t\tmd.Write(input)\n\t\t\thashlog = fmt.Sprintf(\"%X\", md.Sum(nil))\n\n\t\tcase \"\/sign\/file\":\n\t\t\tfileSigner, ok := a.signers[signerID].(signer.FileSigner)\n\t\t\tif !ok {\n\t\t\t\thttpError(w, r, http.StatusBadRequest, \"requested signer does not implement file signing\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsignedfile, err = fileSigner.SignFile(input, sigreq.Options)\n\t\t\tif err != nil {\n\t\t\t\thttpError(w, r, http.StatusInternalServerError, \"signing failed with error: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsigresps[i].SignedFile = base64.StdEncoding.EncodeToString(signedfile)\n\t\t\t\/\/ calculate a hash of the input to store in the signing logs\n\t\t\tmd := sha256.New()\n\t\t\tmd.Write(input)\n\t\t\thashlog = fmt.Sprintf(\"%X\", md.Sum(nil))\n\t\t}\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"rid\": rid,\n\t\t\t\"ref\": sigresps[i].Ref,\n\t\t\t\"type\": sigresps[i].Type,\n\t\t\t\"signer_id\": sigresps[i].SignerID,\n\t\t\t\"input_hash\": hashlog,\n\t\t\t\"user_id\": userid,\n\t\t\t\"t\": int32(time.Since(starttime) \/ time.Millisecond), \/\/ request processing time in ms\n\t\t}).Info(\"signing operation succeeded\")\n\t}\n\trespdata, err := json.Marshal(sigresps)\n\tif err != nil {\n\t\thttpError(w, r, http.StatusInternalServerError, \"signing failed with error: %v\", err)\n\t\treturn\n\t}\n\tif a.debug {\n\t\tfmt.Printf(\"signature response\\n------------------\\n%s\\n\", respdata)\n\t}\n\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(http.StatusCreated)\n\tw.Write(respdata)\n\tlog.WithFields(log.Fields{\"rid\": rid}).Info(\"signing request completed successfully\")\n}\n\n\/\/ handleHeartbeat returns a simple message indicating that the API is alive and well\nfunc handleHeartbeat(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\thttpError(w, r, http.StatusMethodNotAllowed, \"%s method not allowed; endpoint accepts GET only\", r.Method)\n\t\treturn\n\t}\n\tw.Write([]byte(\"ohai\"))\n}\n\nfunc handleVersion(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\thttpError(w, r, http.StatusMethodNotAllowed, \"%s method not allowed; endpoint accepts GET only\", r.Method)\n\t\treturn\n\t}\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\thttpError(w, r, http.StatusInternalServerError, \"Could not get CWD\")\n\t\treturn\n\t}\n\tfilename := path.Clean(dir + string(os.PathSeparator) + \"version.json\")\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\thttpError(w, r, http.StatusNotFound, \"version.json file not found\")\n\t\treturn\n\t}\n\tstat, err := f.Stat()\n\tif err != nil {\n\t\thttpError(w, r, http.StatusInternalServerError, \"stat failed on version.json\")\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\thttp.ServeContent(w, r, \"version.json\", stat.ModTime(), f)\n}\nhandlers: time signatures relative to req start time\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\/\/\n\/\/ Contributor: Julien Vehent jvehent@mozilla.com [:ulfr]\n\npackage main\n\nimport (\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"go.mozilla.org\/autograph\/signer\"\n)\n\n\/\/ a signaturerequest is sent by an autograph client to request\n\/\/ a signature on input data\ntype signaturerequest struct {\n\tInput string `json:\"input\"`\n\tKeyID string `json:\"keyid,omitempty\"`\n\tOptions interface{}\n}\n\n\/\/ a signatureresponse is returned by autograph to a client with\n\/\/ a signature computed on input data\ntype signatureresponse struct {\n\tRef string `json:\"ref\"`\n\tType string `json:\"type\"`\n\tMode string `json:\"mode\"`\n\tSignerID string `json:\"signer_id\"`\n\tPublicKey string `json:\"public_key\"`\n\tSignature string `json:\"signature,omitempty\"`\n\tSignedFile string `json:\"signed_file,omitempty\"`\n\tX5U string `json:\"x5u,omitempty\"`\n}\n\n\/\/ handleSignature endpoint accepts a list of signature requests in a HAWK authenticated POST request\n\/\/ and calls the signers to generate signature responses.\nfunc (a *autographer) handleSignature(w http.ResponseWriter, r *http.Request) {\n\trid := getRequestID(r)\n\tstarttime := getRequestStartTime(r)\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thttpError(w, r, http.StatusBadRequest, \"failed to read request body: %s\", err)\n\t\treturn\n\t}\n\tif r.Header.Get(\"Content-Type\") != \"application\/json\" {\n\t\thttpError(w, r, http.StatusBadRequest, \"invalid content type, expected application\/json\")\n\t\treturn\n\t}\n\tif len(body) < 10 {\n\t\t\/\/ it's impossible to have a valid request body smaller than 10 bytes\n\t\thttpError(w, r, http.StatusBadRequest, \"empty or invalid request request body\")\n\t\treturn\n\t}\n\tif len(body) > 1048576000 {\n\t\t\/\/ the max body size is hardcoded to 1GB. Seriously, what are you trying to sign?\n\t\thttpError(w, r, http.StatusBadRequest, \"request exceeds max size of 1GB\")\n\t\treturn\n\t}\n\tuserid, authorized, err := a.authorize(r, body)\n\tif err != nil || !authorized {\n\t\thttpError(w, r, http.StatusUnauthorized, \"authorization verification failed: %v\", err)\n\t\treturn\n\t}\n\tvar sigreqs []signaturerequest\n\terr = json.Unmarshal(body, &sigreqs)\n\tif err != nil {\n\t\thttpError(w, r, http.StatusBadRequest, \"failed to parse request body: %v\", err)\n\t\treturn\n\t}\n\tfor i, sigreq := range sigreqs {\n\t\tif sigreq.Input == \"\" {\n\t\t\thttpError(w, r, http.StatusBadRequest, fmt.Sprintf(\"missing input in signature request %d\", i))\n\t\t}\n\t}\n\tif a.debug {\n\t\tfmt.Printf(\"signature request\\n-----------------\\n%s\\n\", body)\n\t}\n\tsigresps := make([]signatureresponse, len(sigreqs))\n\t\/\/ Each signature requested in the http request body is processed individually.\n\t\/\/ For each, a signer is looked up, and used to compute a raw signature\n\t\/\/ the signature is then encoded appropriately, and added to the response slice\n\tfor i, sigreq := range sigreqs {\n\t\tvar (\n\t\t\tinput []byte\n\t\t\tsig signer.Signature\n\t\t\tsignedfile []byte\n\t\t\thashlog string\n\t\t)\n\n\t\t\/\/ Decode the base64 input data\n\t\tinput, err = base64.StdEncoding.DecodeString(sigreq.Input)\n\t\tif err != nil {\n\t\t\thttpError(w, r, http.StatusBadRequest, \"%v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Find the ID of the requested signer\n\t\t\/\/ Return an error if the signer is not found or if the user is not allowed\n\t\t\/\/ to use this signer\n\t\tsignerID, err := a.getSignerID(userid, sigreq.KeyID)\n\t\tif err != nil || signerID < 0 {\n\t\t\thttpError(w, r, http.StatusUnauthorized, \"%v\", err)\n\t\t\treturn\n\t\t}\n\t\tsigresps[i] = signatureresponse{\n\t\t\tRef: id(),\n\t\t\tType: a.signers[signerID].Config().Type,\n\t\t\tMode: a.signers[signerID].Config().Mode,\n\t\t\tSignerID: a.signers[signerID].Config().ID,\n\t\t\tPublicKey: a.signers[signerID].Config().PublicKey,\n\t\t\tSignedFile: base64.StdEncoding.EncodeToString(signedfile),\n\t\t\tX5U: a.signers[signerID].Config().X5U,\n\t\t}\n\t\t\/\/ Make sure the signer implements the right interface, then sign the data\n\t\tswitch r.URL.RequestURI() {\n\t\tcase \"\/sign\/hash\":\n\t\t\thashSigner, ok := a.signers[signerID].(signer.HashSigner)\n\t\t\tif !ok {\n\t\t\t\thttpError(w, r, http.StatusBadRequest, \"requested signer does not implement hash signing\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsig, err = hashSigner.SignHash(input, sigreq.Options)\n\t\t\tif err != nil {\n\t\t\t\thttpError(w, r, http.StatusInternalServerError, \"signing failed with error: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsigresps[i].Signature, err = sig.(signer.Signature).Marshal()\n\t\t\tif err != nil {\n\t\t\t\thttpError(w, r, http.StatusInternalServerError, \"encoding failed with error: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/ convert the input hash to hexadecimal for logging\n\t\t\thashlog = fmt.Sprintf(\"%X\", input)\n\n\t\tcase \"\/sign\/data\":\n\t\t\tdataSigner, ok := a.signers[signerID].(signer.DataSigner)\n\t\t\tif !ok {\n\t\t\t\thttpError(w, r, http.StatusBadRequest, \"requested signer does not implement data signing\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsig, err = dataSigner.SignData(input, sigreq.Options)\n\t\t\tif err != nil {\n\t\t\t\thttpError(w, r, http.StatusInternalServerError, \"signing failed with error: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsigresps[i].Signature, err = sig.(signer.Signature).Marshal()\n\t\t\tif err != nil {\n\t\t\t\thttpError(w, r, http.StatusInternalServerError, \"encoding failed with error: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/ calculate a hash of the input to store in the signing logs\n\t\t\tmd := sha256.New()\n\t\t\tmd.Write(input)\n\t\t\thashlog = fmt.Sprintf(\"%X\", md.Sum(nil))\n\n\t\tcase \"\/sign\/file\":\n\t\t\tfileSigner, ok := a.signers[signerID].(signer.FileSigner)\n\t\t\tif !ok {\n\t\t\t\thttpError(w, r, http.StatusBadRequest, \"requested signer does not implement file signing\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsignedfile, err = fileSigner.SignFile(input, sigreq.Options)\n\t\t\tif err != nil {\n\t\t\t\thttpError(w, r, http.StatusInternalServerError, \"signing failed with error: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsigresps[i].SignedFile = base64.StdEncoding.EncodeToString(signedfile)\n\t\t\t\/\/ calculate a hash of the input to store in the signing logs\n\t\t\tmd := sha256.New()\n\t\t\tmd.Write(input)\n\t\t\thashlog = fmt.Sprintf(\"%X\", md.Sum(nil))\n\t\t}\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"rid\": rid,\n\t\t\t\"ref\": sigresps[i].Ref,\n\t\t\t\"type\": sigresps[i].Type,\n\t\t\t\"signer_id\": sigresps[i].SignerID,\n\t\t\t\"input_hash\": hashlog,\n\t\t\t\"user_id\": userid,\n\t\t\t\"t\": int32(time.Since(starttime) \/ time.Millisecond), \/\/ request processing time in ms\n\t\t}).Info(\"signing operation succeeded\")\n\t}\n\trespdata, err := json.Marshal(sigresps)\n\tif err != nil {\n\t\thttpError(w, r, http.StatusInternalServerError, \"signing failed with error: %v\", err)\n\t\treturn\n\t}\n\tif a.debug {\n\t\tfmt.Printf(\"signature response\\n------------------\\n%s\\n\", respdata)\n\t}\n\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(http.StatusCreated)\n\tw.Write(respdata)\n\tlog.WithFields(log.Fields{\"rid\": rid}).Info(\"signing request completed successfully\")\n}\n\n\/\/ handleHeartbeat returns a simple message indicating that the API is alive and well\nfunc handleHeartbeat(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\thttpError(w, r, http.StatusMethodNotAllowed, \"%s method not allowed; endpoint accepts GET only\", r.Method)\n\t\treturn\n\t}\n\tw.Write([]byte(\"ohai\"))\n}\n\nfunc handleVersion(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\thttpError(w, r, http.StatusMethodNotAllowed, \"%s method not allowed; endpoint accepts GET only\", r.Method)\n\t\treturn\n\t}\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\thttpError(w, r, http.StatusInternalServerError, \"Could not get CWD\")\n\t\treturn\n\t}\n\tfilename := path.Clean(dir + string(os.PathSeparator) + \"version.json\")\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\thttpError(w, r, http.StatusNotFound, \"version.json file not found\")\n\t\treturn\n\t}\n\tstat, err := f.Stat()\n\tif err != nil {\n\t\thttpError(w, r, http.StatusInternalServerError, \"stat failed on version.json\")\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\thttp.ServeContent(w, r, \"version.json\", stat.ModTime(), f)\n}\n<|endoftext|>"} {"text":"package exporter\n\nimport (\n\t\"io\"\n\t\"encoding\/json\"\n\t\"strconv\"\n\n\t\"github.com\/google\/cayley\/graph\"\n)\n\ntype Exporter struct {\n\twr io.Writer\n\tqstore graph.QuadStore\n\tqi graph.Iterator\n\terr error\n\tcount int32\n}\n\nfunc NewExporter(writer io.Writer, qstore graph.QuadStore) *Exporter {\n\treturn NewExporterForIterator(writer, qstore, qstore.QuadsAllIterator())\n}\n\nfunc NewExporterForIterator(writer io.Writer, qstore graph.QuadStore, qi graph.Iterator) *Exporter {\n\treturn &Exporter{wr: writer, qstore: qstore, qi: qi}\n}\n\n\/\/ number of records\nfunc (exp *Exporter) Count() int32 {\n\treturn exp.count\n}\n\nfunc (exp *Exporter) ExportQuad() {\n\texp.qi.Reset()\n\tfor it := exp.qi; graph.Next(it); {\n\t\texp.count++\n\t\tquad := exp.qstore.Quad(it.Result())\n\t\t\n\t\texp.WriteEscString(quad.Subject)\n\t\texp.Write(\" \")\n\t\texp.WriteEscString(quad.Predicate)\n\t\texp.Write(\" \")\n\t\texp.WriteEscString(quad.Object)\n\t\tif quad.Label != \"\" {\n\t\t\texp.Write(\" \")\n\t\t\texp.WriteEscString(quad.Label)\n\t\t}\n\t\texp.Write(\" .\\n\")\n\t} \n}\n\nfunc (exp *Exporter) ExportJson() {\n\tvar jstr []byte \n\texp.Write(\"[\")\n\texp.qi.Reset()\n\tfor it := exp.qi; graph.Next(it); {\n\t\texp.count++\n\t\tif exp.count > 1 {\n\t\t\texp.Write(\",\")\n\t\t}\n\n\t\tjstr, exp.err = json.Marshal(exp.qstore.Quad(it.Result()))\n\t\tif exp.err != nil {\n\t\t\treturn\n\t\t}\n\t\texp.Write(string(jstr[:]))\n\t}\n\texp.Write(\"]\\n\")\n}\n\n\/\/experimental\nfunc (exp *Exporter) ExportGml() {\n\tvar seen map[string]int32 \/\/ todo eliminate this for large dbs\n\tvar id int32\n\n\texp.Write(\"Creator Cayley\\ngraph\\n[\\n\")\n\n\tseen = make(map[string]int32)\n\texp.qi.Reset()\n\tfor it := exp.qi; graph.Next(it); {\n\t\tcur := exp.qstore.Quad(it.Result())\n\t\tif _, ok := seen[cur.Subject]; !ok {\n\t\t\texp.Write(\" node\\n [\\n id \")\n\t\t\tseen[cur.Subject] = id\n\t\t\texp.Write(strconv.FormatInt(int64(id), 10))\n\t\t\texp.Write(\"\\n label \")\n\t\t\texp.WriteEscString(cur.Subject)\n\t\t\texp.Write(\"\\n ]\\n\")\n\t\t\tid++\n\t\t}\n\t\tif _, ok := seen[cur.Object]; !ok {\n\t\t\texp.Write(\" node\\n [\\n id \")\n\t\t\tseen[cur.Object] = id\n\t\t\texp.Write(strconv.FormatInt(int64(id), 10))\n\t\t\texp.Write(\"\\n label \")\n\t\t\texp.WriteEscString(cur.Object)\n\t\t\texp.Write(\"\\n ]\\n\")\n\t\t\tid++\n\t\t}\n\t\texp.count++\n\t}\n\n\texp.qi.Reset()\n\tfor it := exp.qi; graph.Next(it); {\n\t\tcur := exp.qstore.Quad(it.Result())\n\t\texp.Write(\" edge\\n [\\n source \")\n\t\texp.Write(strconv.FormatInt(int64(seen[cur.Subject]), 10))\n\t\texp.Write(\"\\n target \")\n\t\texp.Write(strconv.FormatInt(int64(seen[cur.Object]), 10))\n\t\texp.Write(\"\\n label \")\n\t\texp.WriteEscString(cur.Predicate)\n\t\texp.Write(\"\\n ]\\n\")\n\t\texp.count++\n\t}\n\texp.Write(\"]\\n\")\n}\n\n\/\/experimental\nfunc (exp *Exporter) ExportGraphml() {\n\tvar seen map[string]bool \/\/ eliminate this for large databases\n\n\texp.Write(\"\\n\")\n\texp.Write(\"\\n\")\n\texp.Write(\" \\n\")\n\n\tseen = make(map[string]bool)\n\texp.qi.Reset()\n\tfor it := exp.qi; graph.Next(it); {\n\t\tcur := exp.qstore.Quad(it.Result())\n\t\tif found := seen[cur.Subject]; !found {\n\t\t\tseen[cur.Subject] = true\n\t\t\texp.Write(\" \\n\")\n\t\t}\n\t\tif found := seen[cur.Object]; !found {\n\t\t\tseen[cur.Object] = true\n\t\t\texp.Write(\" \\n\")\n\t\t}\n\t\texp.count++\n\t}\n\n\texp.qi.Reset()\n\tfor it := exp.qi; graph.Next(it); {\n\t\tcur := exp.qstore.Quad(it.Result())\n\t\texp.Write(\" \\n\")\n\t\texp.Write(\" \")\n\t\texp.Write(cur.Predicate)\n\t\texp.Write(\"<\/data>\\n <\/edge>\\n\")\n\t\texp.count++\n\t}\n\texp.Write(\" <\/graph>\\n<\/graphml>\\n\");\n}\n\n\/\/print out the string quoted, escaped\nfunc (exp *Exporter) WriteEscString(str string) {\n\tvar esc []byte\n \n\tif exp.err != nil {\n\t\treturn\n\t} \n\tesc, exp.err = json.Marshal(str)\n\tif exp.err != nil {\n\t\treturn\n\t} \n\t_, exp.err = exp.wr.Write(esc)\t\n}\n\nfunc (exp *Exporter) Write(str string) {\n\tif exp.err != nil {\n\t\treturn\n\t}\n\t_, exp.err = exp.wr.Write([]byte(str))\n}\n\nfunc (exp *Exporter) Err() error {\n\treturn exp.err\n}\nchanged count to just intpackage exporter\n\nimport (\n\t\"io\"\n\t\"encoding\/json\"\n\t\"strconv\"\n\n\t\"github.com\/google\/cayley\/graph\"\n)\n\ntype Exporter struct {\n\twr io.Writer\n\tqstore graph.QuadStore\n\tqi graph.Iterator\n\terr error\n\tcount int\n}\n\nfunc NewExporter(writer io.Writer, qstore graph.QuadStore) *Exporter {\n\treturn NewExporterForIterator(writer, qstore, qstore.QuadsAllIterator())\n}\n\nfunc NewExporterForIterator(writer io.Writer, qstore graph.QuadStore, qi graph.Iterator) *Exporter {\n\treturn &Exporter{wr: writer, qstore: qstore, qi: qi}\n}\n\n\/\/ number of records\nfunc (exp *Exporter) Count() int {\n\treturn exp.count\n}\n\nfunc (exp *Exporter) ExportQuad() {\n\texp.qi.Reset()\n\tfor it := exp.qi; graph.Next(it); {\n\t\texp.count++\n\t\tquad := exp.qstore.Quad(it.Result())\n\t\t\n\t\texp.WriteEscString(quad.Subject)\n\t\texp.Write(\" \")\n\t\texp.WriteEscString(quad.Predicate)\n\t\texp.Write(\" \")\n\t\texp.WriteEscString(quad.Object)\n\t\tif quad.Label != \"\" {\n\t\t\texp.Write(\" \")\n\t\t\texp.WriteEscString(quad.Label)\n\t\t}\n\t\texp.Write(\" .\\n\")\n\t} \n}\n\nfunc (exp *Exporter) ExportJson() {\n\tvar jstr []byte \n\texp.Write(\"[\")\n\texp.qi.Reset()\n\tfor it := exp.qi; graph.Next(it); {\n\t\texp.count++\n\t\tif exp.count > 1 {\n\t\t\texp.Write(\",\")\n\t\t}\n\n\t\tjstr, exp.err = json.Marshal(exp.qstore.Quad(it.Result()))\n\t\tif exp.err != nil {\n\t\t\treturn\n\t\t}\n\t\texp.Write(string(jstr[:]))\n\t}\n\texp.Write(\"]\\n\")\n}\n\n\/\/experimental\nfunc (exp *Exporter) ExportGml() {\n\tvar seen map[string]int32 \/\/ todo eliminate this for large dbs\n\tvar id int32\n\n\texp.Write(\"Creator Cayley\\ngraph\\n[\\n\")\n\n\tseen = make(map[string]int32)\n\texp.qi.Reset()\n\tfor it := exp.qi; graph.Next(it); {\n\t\tcur := exp.qstore.Quad(it.Result())\n\t\tif _, ok := seen[cur.Subject]; !ok {\n\t\t\texp.Write(\" node\\n [\\n id \")\n\t\t\tseen[cur.Subject] = id\n\t\t\texp.Write(strconv.FormatInt(int64(id), 10))\n\t\t\texp.Write(\"\\n label \")\n\t\t\texp.WriteEscString(cur.Subject)\n\t\t\texp.Write(\"\\n ]\\n\")\n\t\t\tid++\n\t\t}\n\t\tif _, ok := seen[cur.Object]; !ok {\n\t\t\texp.Write(\" node\\n [\\n id \")\n\t\t\tseen[cur.Object] = id\n\t\t\texp.Write(strconv.FormatInt(int64(id), 10))\n\t\t\texp.Write(\"\\n label \")\n\t\t\texp.WriteEscString(cur.Object)\n\t\t\texp.Write(\"\\n ]\\n\")\n\t\t\tid++\n\t\t}\n\t\texp.count++\n\t}\n\n\texp.qi.Reset()\n\tfor it := exp.qi; graph.Next(it); {\n\t\tcur := exp.qstore.Quad(it.Result())\n\t\texp.Write(\" edge\\n [\\n source \")\n\t\texp.Write(strconv.FormatInt(int64(seen[cur.Subject]), 10))\n\t\texp.Write(\"\\n target \")\n\t\texp.Write(strconv.FormatInt(int64(seen[cur.Object]), 10))\n\t\texp.Write(\"\\n label \")\n\t\texp.WriteEscString(cur.Predicate)\n\t\texp.Write(\"\\n ]\\n\")\n\t\texp.count++\n\t}\n\texp.Write(\"]\\n\")\n}\n\n\/\/experimental\nfunc (exp *Exporter) ExportGraphml() {\n\tvar seen map[string]bool \/\/ eliminate this for large databases\n\n\texp.Write(\"\\n\")\n\texp.Write(\"\\n\")\n\texp.Write(\" \\n\")\n\n\tseen = make(map[string]bool)\n\texp.qi.Reset()\n\tfor it := exp.qi; graph.Next(it); {\n\t\tcur := exp.qstore.Quad(it.Result())\n\t\tif found := seen[cur.Subject]; !found {\n\t\t\tseen[cur.Subject] = true\n\t\t\texp.Write(\" \\n\")\n\t\t}\n\t\tif found := seen[cur.Object]; !found {\n\t\t\tseen[cur.Object] = true\n\t\t\texp.Write(\" \\n\")\n\t\t}\n\t\texp.count++\n\t}\n\n\texp.qi.Reset()\n\tfor it := exp.qi; graph.Next(it); {\n\t\tcur := exp.qstore.Quad(it.Result())\n\t\texp.Write(\" \\n\")\n\t\texp.Write(\" \")\n\t\texp.Write(cur.Predicate)\n\t\texp.Write(\"<\/data>\\n <\/edge>\\n\")\n\t\texp.count++\n\t}\n\texp.Write(\" <\/graph>\\n<\/graphml>\\n\");\n}\n\n\/\/print out the string quoted, escaped\nfunc (exp *Exporter) WriteEscString(str string) {\n\tvar esc []byte\n \n\tif exp.err != nil {\n\t\treturn\n\t} \n\tesc, exp.err = json.Marshal(str)\n\tif exp.err != nil {\n\t\treturn\n\t} \n\t_, exp.err = exp.wr.Write(esc)\t\n}\n\nfunc (exp *Exporter) Write(str string) {\n\tif exp.err != nil {\n\t\treturn\n\t}\n\t_, exp.err = exp.wr.Write([]byte(str))\n}\n\nfunc (exp *Exporter) Err() error {\n\treturn exp.err\n}\n<|endoftext|>"} {"text":"package executor\n\nimport (\n\tlog \"github.com\/mesosphere\/kubernetes-mesos\/3rdparty\/github.com\/golang\/glog\"\n\t\"github.com\/mesosphere\/kubernetes-mesos\/3rdparty\/github.com\/mesosphere\/mesos-go\/mesos\"\n)\n\n\/\/ KuberneteExecutor is an mesos executor that runs pods\n\/\/ in a minion machine.\ntype KuberneteExecutor struct {\n}\n\n\/\/ New creates a new kubernete executor.\nfunc New() *KuberneteExecutor {\n\treturn &KuberneteExecutor{}\n}\n\n\/\/ Registered is called when the executor is successfully registered with the slave.\nfunc (k *KuberneteExecutor) Registered(driver mesos.ExecutorDriver,\n\texecutorInfo *mesos.ExecutorInfo, frameworkInfo *mesos.FrameworkInfo, slaveInfo *mesos.SlaveInfo) {\n\tlog.Infof(\"Executor %v of framework %v registered with slave %v\\n\",\n\t\texecutorInfo, frameworkInfo, slaveInfo)\n\n\t\/\/ TODO(yifan): Update the executor's status.\n}\n\n\/\/ Reregistered is called when the executor is successfully re-registered with the slave.\n\/\/ This can happen when the slave fails over.\nfunc (k *KuberneteExecutor) Reregistered(driver mesos.ExecutorDriver,\n\texecutorInfo *mesos.ExecutorInfo, frameworkInfo *mesos.FrameworkInfo, slaveInfo *mesos.SlaveInfo) {\n\tlog.Infof(\"Executor %v of framework %v reregistered with slave %v\\n\",\n\t\texecutorInfo, frameworkInfo, slaveInfo)\n\n\t\/\/ TODO(yifan): Update the executor's status.\n}\n\n\/\/ Disconnected is called when the executor is disconnected with the slave.\nfunc (k *KuberneteExecutor) Disconnected(driver mesos.ExecutorDriver) {\n\tlog.Infof(\"Slave is disconnected\\n\")\n\n\t\/\/ TODO(yifan): Update the executor's status.\n}\n\n\/\/ LaunchTask is called when the executor receives a request to launch a task.\nfunc (k *KuberneteExecutor) LaunchTask(driver mesos.ExecutorDriver, taskInfo *mesos.TaskInfo) {\n\tlog.Infof(\"Launch task %v\\n\", taskInfo)\n\n\t\/\/ TODO(yifan): Launch a pod by update the pod manifest, and do a sync.\n\t\/\/ Then send back status update.\n}\n\n\/\/ KillTask is called when the executor receives a request to kill a task.\nfunc (k *KuberneteExecutor) KillTask(driver mesos.ExecutorDriver, taskId *mesos.TaskID) {\n\tlog.Infof(\"Kill task %v\\n\", taskId)\n\n\t\/\/ TODO(yifan): kill the container by updating the pod manifest, and do a sync.\n\t\/\/ Then send back status update.\n}\n\n\/\/ FrameworkMessage is called when the framework sends some message to the executor\nfunc (k *KuberneteExecutor) FrameworkMessage(driver mesos.ExecutorDriver, message string) {\n\tlog.Infof(\"Receives message from framework %v\\n\", message)\n}\n\n\/\/ Shutdown is called when the executor receives a shutdown request.\nfunc (k *KuberneteExecutor) Shutdown(driver mesos.ExecutorDriver) {\n\tlog.Infof(\"Shutdown the executor\\n\")\n\n\t\/\/ TODO(yifan): kill all running containers by updating the pod manifest, and do a sync.\n\t\/\/ Then send back status update.\n}\n\n\/\/ Error is called when some error happens.\nfunc (k *KuberneteExecutor) Error(driver mesos.ExecutorDriver, message string) {\n\tlog.Errorf(\"Executor error: %v\\n\", message)\n}\nEnable kuberlete executor to interact with kuberlete.package executor\n\nimport (\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/kubelet\"\n\tlog \"github.com\/mesosphere\/kubernetes-mesos\/3rdparty\/github.com\/golang\/glog\"\n\t\"github.com\/mesosphere\/kubernetes-mesos\/3rdparty\/github.com\/mesosphere\/mesos-go\/mesos\"\n)\n\ntype kuberTask struct {\n\tmesosTaskInfo *mesos.TaskInfo\n\tcontainerManifest *api.ContainerManifest\n}\n\n\/\/ KuberneteExecutor is an mesos executor that runs pods\n\/\/ in a minion machine.\ntype KuberneteExecutor struct {\n\t*kubelet.Kubelet \/\/ the kubelet instance.\n\tregistered bool\n\ttasks map[string]*kuberTask\n}\n\n\/\/ New creates a new kubernete executor.\nfunc New() *KuberneteExecutor {\n\treturn &KuberneteExecutor{\n\t\tkubelet.New(),\n\t\tfalse,\n\t\tmake(map[string]*kuberTask),\n\t}\n}\n\nfunc (k *KuberneteExecutor) gatherContainerManifests() []api.ContainerManifest {\n\tvar manifests []api.ContainerManifest\n\tfor _, task := range k.tasks {\n\t\tmanifests = append(manifests, *task.containerManifest)\n\t}\n\treturn manifests\n}\n\n\/\/ Registered is called when the executor is successfully registered with the slave.\nfunc (k *KuberneteExecutor) Registered(driver mesos.ExecutorDriver,\n\texecutorInfo *mesos.ExecutorInfo, frameworkInfo *mesos.FrameworkInfo, slaveInfo *mesos.SlaveInfo) {\n\tlog.Infof(\"Executor %v of framework %v registered with slave %v\\n\",\n\t\texecutorInfo, frameworkInfo, slaveInfo)\n\n\tk.registered = true\n}\n\n\/\/ Reregistered is called when the executor is successfully re-registered with the slave.\n\/\/ This can happen when the slave fails over.\nfunc (k *KuberneteExecutor) Reregistered(driver mesos.ExecutorDriver,\n\texecutorInfo *mesos.ExecutorInfo, frameworkInfo *mesos.FrameworkInfo, slaveInfo *mesos.SlaveInfo) {\n\tlog.Infof(\"Executor %v of framework %v reregistered with slave %v\\n\",\n\t\texecutorInfo, frameworkInfo, slaveInfo)\n\n\tk.registered = true\n}\n\n\/\/ Disconnected is called when the executor is disconnected with the slave.\nfunc (k *KuberneteExecutor) Disconnected(driver mesos.ExecutorDriver) {\n\tlog.Infof(\"Slave is disconnected\\n\")\n\n\tk.registered = false\n}\n\n\/\/ LaunchTask is called when the executor receives a request to launch a task.\nfunc (k *KuberneteExecutor) LaunchTask(driver mesos.ExecutorDriver, taskInfo *mesos.TaskInfo) {\n\tlog.Infof(\"Launch task %v\\n\", taskInfo)\n\n\tif !k.registered {\n\t\t\/\/ TODO(yifan): Send back status update task lost.\n\t\treturn\n\t}\n\n\ttaskId := taskInfo.GetTaskId().GetValue()\n\tif _, found := k.tasks[taskId]; found {\n\t\tlog.Warningf(\"Task already launched\\n\")\n\t\t\/\/ Not to send back TASK_RUNNING here, because\n\t\t\/\/ may be duplicated messages or duplicated task id.\n\t\treturn\n\t}\n\n\t\/\/ Get the container manifest from the taskInfo.\n\tvar manifest api.ContainerManifest\n\tif err := k.ExtractYAMLData(taskInfo.GetData(), &manifest); err != nil {\n\t\tlog.Warningf(\"Failed to extract yaml data from the taskInfo.data %v\\n\", err)\n\n\t\t\/\/ TODO(yifan): Send back status update task lost.\n\t\treturn\n\t}\n\n\t\/\/ Add the task.\n\tk.tasks[taskId] = &kuberTask{\n\t\tmesosTaskInfo: taskInfo,\n\t\tcontainerManifest: &manifest,\n\t}\n\n\t\/\/ Call SyncManifests to force kuberlete to run docker containers.\n\tif err := k.SyncManifests(k.gatherContainerManifests()); err != nil {\n\t\tlog.Warningf(\"Failed to extract yaml data from the taskInfo.data %v\\n\", err)\n\n\t\t\/\/ TODO(yifan): Send back status update task lost.\n\t\treturn\n\t}\n\n\t\/\/ TODO(yifan): Send back TASK_RUNNING.\n}\n\n\/\/ KillTask is called when the executor receives a request to kill a task.\nfunc (k *KuberneteExecutor) KillTask(driver mesos.ExecutorDriver, taskId *mesos.TaskID) {\n\tlog.Infof(\"Kill task %v\\n\", taskId)\n\n\ttid := taskId.GetValue()\n\tif _, ok := k.tasks[tid]; !ok {\n\t\tlog.Infof(\"Failed to kill task, unknown task %v\\n\", tid)\n\t\treturn\n\t}\n\tdelete(k.tasks, tid)\n\n\t\/\/ Call SyncManifests to force kuberlete to kill deleted containers.\n\tif err := k.SyncManifests(k.gatherContainerManifests()); err != nil {\n\t\tlog.Warningf(\"Failed to extract yaml data from the taskInfo.data %v\\n\", err)\n\n\t\t\/\/ TODO(yifan): Check the error type, (if already killed?)\n\t\treturn\n\t}\n\n\t\/\/ TODO(yifan): Send back TASK_LOST.\n}\n\n\/\/ FrameworkMessage is called when the framework sends some message to the executor\nfunc (k *KuberneteExecutor) FrameworkMessage(driver mesos.ExecutorDriver, message string) {\n\tlog.Infof(\"Receives message from framework %v\\n\", message)\n}\n\n\/\/ Shutdown is called when the executor receives a shutdown request.\nfunc (k *KuberneteExecutor) Shutdown(driver mesos.ExecutorDriver) {\n\tlog.Infof(\"Shutdown the executor\\n\")\n\n\tfor tid := range k.tasks {\n\t\tdelete(k.tasks, tid)\n\t\t\/\/ Call SyncManifests to force kuberlete to kill deleted containers.\n\t\tif err := k.SyncManifests(k.gatherContainerManifests()); err != nil {\n\t\t\tlog.Warningf(\"Failed to extract yaml data from the taskInfo.data %v\\n\", err)\n\n\t\t\t\/\/ TODO(yifan): Check the error type, (if already killed?)\n\t\t}\n\t}\n\t\/\/ Then send back TASK_LOST.\n}\n\n\/\/ Error is called when some error happens.\nfunc (k *KuberneteExecutor) Error(driver mesos.ExecutorDriver, message string) {\n\tlog.Errorf(\"Executor error: %v\\n\", message)\n}\n<|endoftext|>"} {"text":"package host\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n\t\"github.com\/NebulousLabs\/Sia\/crypto\"\n\t\"github.com\/NebulousLabs\/Sia\/modules\"\n\t\"github.com\/NebulousLabs\/Sia\/persist\"\n\t\"github.com\/NebulousLabs\/Sia\/types\"\n)\n\n\/\/ compat04Metadata is the header used by 0.4.x versions of the host.\nvar compat04Metadata = persist.Metadata{\n\tHeader: \"Sia Host\",\n\tVersion: \"0.4\",\n}\n\n\/\/ compat04Obligation contains the fields that were used by the 0.4.x to\n\/\/ represent contract obligations.\ntype compat04Obligation struct {\n\tID types.FileContractID\n\tFileContract types.FileContract\n\tPath string\n}\n\n\/\/ compat04Host contains the fields that were saved to disk by 0.4.x hosts.\ntype compat04Host struct {\n\tSpaceRemaining int64\n\tFileCounter int\n\tProfit types.Currency\n\tHostSettings modules.HostSettings\n\tObligations []compat04Obligation\n\tSecretKey crypto.SecretKey\n\tPublicKey types.SiaPublicKey\n}\n\n\/\/ loadCompat04Obligations loads all of the file contract obligations found by\nfunc (h *Host) loadCompat04Obligations(c04os []compat04Obligation) []*contractObligation {\n\tcos := make([]*contractObligation, 0, len(c04os))\n\tfor _, c04o := range c04os {\n\t\t\/\/ Create an upgraded contract obligation out of the compatibility\n\t\t\/\/ obligation and add it to the set of upgraded obligations.\n\t\tco := &contractObligation{\n\t\t\tID: c04o.ID,\n\t\t\tOriginTransaction: types.Transaction{\n\t\t\t\tFileContracts: []types.FileContract{\n\t\t\t\t\tc04o.FileContract,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\tPath: c04o.Path,\n\t\t}\n\n\t\t\/\/ Update the statistics of the obligation, but do not add any action\n\t\t\/\/ items for the obligation. Action items will be added after the\n\t\t\/\/ consensus set has been scanned. Scanning the consensus set will\n\t\t\/\/ reveal which obligations have transactions on the blockchain, all\n\t\t\/\/ other obligations will be discarded.\n\t\th.obligationsByID[co.ID] = co\n\t\th.spaceRemaining -= int64(co.fileSize())\n\t\th.anticipatedRevenue = h.anticipatedRevenue.Add(co.value())\n\t\tcos = append(cos, co)\n\t}\n\treturn cos\n}\n\n\/\/ compatibilityLoad tries to load the file as a compatible version.\nfunc (h *Host) compatibilityLoad() error {\n\t\/\/ Try loading the file as a 0.4 file.\n\tc04h := new(compat04Host)\n\terr := persist.LoadFile(compat04Metadata, c04h, filepath.Join(h.persistDir, \"settings.json\"))\n\tif err != nil {\n\t\t\/\/ 0.4.x is the only backwards compatibility provided. File could not\n\t\t\/\/ be loaded.\n\t\treturn err\n\t}\n\n\t\/\/ Copy over host identity.\n\th.publicKey = c04h.PublicKey\n\th.secretKey = c04h.SecretKey\n\n\t\/\/ Copy file management, including providing compatibility for old\n\t\/\/ obligations.\n\th.fileCounter = int64(c04h.FileCounter)\n\th.spaceRemaining = c04h.HostSettings.TotalStorage\n\tupgradedObligations := h.loadCompat04Obligations(c04h.Obligations)\n\n\t\/\/ Copy over statistics.\n\th.revenue = c04h.Profit\n\n\t\/\/ Copy over utilities.\n\th.settings = c04h.HostSettings\n\n\t\/\/ Subscribe to the consensus set.\n\tif build.DEBUG && h.recentChange != (modules.ConsensusChangeID{}) {\n\t\tpanic(\"compatibility loading is not starting from blank consensus?\")\n\t}\n\t\/\/ initConsensusSubscription will scan through the consensus set and set\n\t\/\/ 'OriginConfirmed' and 'RevisionConfirmed' when the accociated file\n\t\/\/ contract and file contract revisions are found.\n\terr = h.initConsensusSubscription()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Remove all obligations that have not had their origin transactions\n\t\/\/ confirmed on the blockchain, and add action items for the rest.\n\tfor _, uo := range upgradedObligations {\n\t\tif !uo.OriginConfirmed {\n\t\t\t\/\/ Because there is no transaction on the blockchain, and because\n\t\t\t\/\/ the old host did not keep the transaction, it's highly unlikely\n\t\t\t\/\/ that a transaction will appear - the obligation should be\n\t\t\t\/\/ removed.\n\t\t\th.removeObligation(uo, obligationFailed)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Check that the file Merkle root matches the Merkle root found in the\n\t\t\/\/ blockchain.\n\t\tfile, err := os.Open(uo.Path)\n\t\tif err != nil {\n\t\t\th.log.Println(\"Compatibility contract file could not be opened.\")\n\t\t\th.removeObligation(uo, obligationFailed)\n\t\t\tcontinue\n\t\t}\n\t\tmerkleRoot, err := crypto.ReaderMerkleRoot(file)\n\t\tfile.Close() \/\/ Close the file after use, to prevent buildup if the loop iterates many times.\n\t\tif err != nil {\n\t\t\th.log.Println(\"Compatibility contract file could not be checksummed\")\n\t\t\th.removeObligation(uo, obligationFailed)\n\t\t\tcontinue\n\t\t}\n\t\tif merkleRoot != uo.merkleRoot() {\n\t\t\th.log.Println(\"Compatibility contract file has the wrong merkle root\")\n\t\t\th.removeObligation(uo, obligationFailed)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ No action items have been created for this contract yet, create\n\t\t\/\/ them now.\n\t\th.handleActionItem(uo)\n\t}\n\treturn nil\n}\nset AcceptingContracts to true in compat loadpackage host\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n\t\"github.com\/NebulousLabs\/Sia\/crypto\"\n\t\"github.com\/NebulousLabs\/Sia\/modules\"\n\t\"github.com\/NebulousLabs\/Sia\/persist\"\n\t\"github.com\/NebulousLabs\/Sia\/types\"\n)\n\n\/\/ compat04Metadata is the header used by 0.4.x versions of the host.\nvar compat04Metadata = persist.Metadata{\n\tHeader: \"Sia Host\",\n\tVersion: \"0.4\",\n}\n\n\/\/ compat04Obligation contains the fields that were used by the 0.4.x to\n\/\/ represent contract obligations.\ntype compat04Obligation struct {\n\tID types.FileContractID\n\tFileContract types.FileContract\n\tPath string\n}\n\n\/\/ compat04Host contains the fields that were saved to disk by 0.4.x hosts.\ntype compat04Host struct {\n\tSpaceRemaining int64\n\tFileCounter int\n\tProfit types.Currency\n\tHostSettings modules.HostSettings\n\tObligations []compat04Obligation\n\tSecretKey crypto.SecretKey\n\tPublicKey types.SiaPublicKey\n}\n\n\/\/ loadCompat04Obligations loads all of the file contract obligations found by\nfunc (h *Host) loadCompat04Obligations(c04os []compat04Obligation) []*contractObligation {\n\tcos := make([]*contractObligation, 0, len(c04os))\n\tfor _, c04o := range c04os {\n\t\t\/\/ Create an upgraded contract obligation out of the compatibility\n\t\t\/\/ obligation and add it to the set of upgraded obligations.\n\t\tco := &contractObligation{\n\t\t\tID: c04o.ID,\n\t\t\tOriginTransaction: types.Transaction{\n\t\t\t\tFileContracts: []types.FileContract{\n\t\t\t\t\tc04o.FileContract,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\tPath: c04o.Path,\n\t\t}\n\n\t\t\/\/ Update the statistics of the obligation, but do not add any action\n\t\t\/\/ items for the obligation. Action items will be added after the\n\t\t\/\/ consensus set has been scanned. Scanning the consensus set will\n\t\t\/\/ reveal which obligations have transactions on the blockchain, all\n\t\t\/\/ other obligations will be discarded.\n\t\th.obligationsByID[co.ID] = co\n\t\th.spaceRemaining -= int64(co.fileSize())\n\t\th.anticipatedRevenue = h.anticipatedRevenue.Add(co.value())\n\t\tcos = append(cos, co)\n\t}\n\treturn cos\n}\n\n\/\/ compatibilityLoad tries to load the file as a compatible version.\nfunc (h *Host) compatibilityLoad() error {\n\t\/\/ Try loading the file as a 0.4 file.\n\tc04h := new(compat04Host)\n\terr := persist.LoadFile(compat04Metadata, c04h, filepath.Join(h.persistDir, \"settings.json\"))\n\tif err != nil {\n\t\t\/\/ 0.4.x is the only backwards compatibility provided. File could not\n\t\t\/\/ be loaded.\n\t\treturn err\n\t}\n\n\t\/\/ Copy over host identity.\n\th.publicKey = c04h.PublicKey\n\th.secretKey = c04h.SecretKey\n\n\t\/\/ Copy file management, including providing compatibility for old\n\t\/\/ obligations.\n\th.fileCounter = int64(c04h.FileCounter)\n\th.spaceRemaining = c04h.HostSettings.TotalStorage\n\tupgradedObligations := h.loadCompat04Obligations(c04h.Obligations)\n\n\t\/\/ Copy over statistics.\n\th.revenue = c04h.Profit\n\n\t\/\/ Copy over utilities.\n\th.settings = c04h.HostSettings\n\t\/\/ AcceptingContracts should be true by default\n\th.settings.AcceptingContracts = true\n\n\t\/\/ Subscribe to the consensus set.\n\tif build.DEBUG && h.recentChange != (modules.ConsensusChangeID{}) {\n\t\tpanic(\"compatibility loading is not starting from blank consensus?\")\n\t}\n\t\/\/ initConsensusSubscription will scan through the consensus set and set\n\t\/\/ 'OriginConfirmed' and 'RevisionConfirmed' when the accociated file\n\t\/\/ contract and file contract revisions are found.\n\terr = h.initConsensusSubscription()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Remove all obligations that have not had their origin transactions\n\t\/\/ confirmed on the blockchain, and add action items for the rest.\n\tfor _, uo := range upgradedObligations {\n\t\tif !uo.OriginConfirmed {\n\t\t\t\/\/ Because there is no transaction on the blockchain, and because\n\t\t\t\/\/ the old host did not keep the transaction, it's highly unlikely\n\t\t\t\/\/ that a transaction will appear - the obligation should be\n\t\t\t\/\/ removed.\n\t\t\th.removeObligation(uo, obligationFailed)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Check that the file Merkle root matches the Merkle root found in the\n\t\t\/\/ blockchain.\n\t\tfile, err := os.Open(uo.Path)\n\t\tif err != nil {\n\t\t\th.log.Println(\"Compatibility contract file could not be opened.\")\n\t\t\th.removeObligation(uo, obligationFailed)\n\t\t\tcontinue\n\t\t}\n\t\tmerkleRoot, err := crypto.ReaderMerkleRoot(file)\n\t\tfile.Close() \/\/ Close the file after use, to prevent buildup if the loop iterates many times.\n\t\tif err != nil {\n\t\t\th.log.Println(\"Compatibility contract file could not be checksummed\")\n\t\t\th.removeObligation(uo, obligationFailed)\n\t\t\tcontinue\n\t\t}\n\t\tif merkleRoot != uo.merkleRoot() {\n\t\t\th.log.Println(\"Compatibility contract file has the wrong merkle root\")\n\t\t\th.removeObligation(uo, obligationFailed)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ No action items have been created for this contract yet, create\n\t\t\/\/ them now.\n\t\th.handleActionItem(uo)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"package exporter\n\nimport (\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/log\"\n)\n\nconst (\n\tnamespace = \"clickhouse\" \/\/ For Prometheus metrics.\n)\n\n\/\/ Exporter collects clickhouse stats from the given URI and exports them using\n\/\/ the prometheus metrics package.\ntype Exporter struct {\n\tmetricsURI string\n\tasyncMetricsURI string\n\teventsURI string\n\tpartsURI string\n\tclient *http.Client\n\n\tscrapeFailures prometheus.Counter\n\n\tuser string\n\tpassword string\n}\n\n\/\/ NewExporter returns an initialized Exporter.\nfunc NewExporter(uri url.URL, insecure bool, user, password string) *Exporter {\n\tq := uri.Query()\n\tmetricsURI := uri\n\tq.Set(\"query\", \"select metric, value from system.metrics\")\n\tmetricsURI.RawQuery = q.Encode()\n\n\tasyncMetricsURI := uri\n\tq.Set(\"query\", \"select metric, value from system.asynchronous_metrics\")\n\tasyncMetricsURI.RawQuery = q.Encode()\n\n\teventsURI := uri\n\tq.Set(\"query\", \"select event, value from system.events\")\n\teventsURI.RawQuery = q.Encode()\n\n\tpartsURI := uri\n\tq.Set(\"query\", \"select database, table, sum(bytes) as bytes, count() as parts, sum(rows) as rows from system.parts where active = 1 group by database, table\")\n\tpartsURI.RawQuery = q.Encode()\n\n\treturn &Exporter{\n\t\tmetricsURI: metricsURI.String(),\n\t\tasyncMetricsURI: asyncMetricsURI.String(),\n\t\teventsURI: eventsURI.String(),\n\t\tpartsURI: partsURI.String(),\n\t\tscrapeFailures: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"exporter_scrape_failures_total\",\n\t\t\tHelp: \"Number of errors while scraping clickhouse.\",\n\t\t}),\n\t\tclient: &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},\n\t\t\t},\n\t\t\tTimeout: 30 * time.Second,\n\t\t},\n\t\tuser: user,\n\t\tpassword: password,\n\t}\n}\n\n\/\/ Describe describes all the metrics ever exported by the clickhouse exporter. It\n\/\/ implements prometheus.Collector.\nfunc (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\t\/\/ We cannot know in advance what metrics the exporter will generate\n\t\/\/ from clickhouse. So we use the poor man's describe method: Run a collect\n\t\/\/ and send the descriptors of all the collected metrics.\n\n\tmetricCh := make(chan prometheus.Metric)\n\tdoneCh := make(chan struct{})\n\n\tgo func() {\n\t\tfor m := range metricCh {\n\t\t\tch <- m.Desc()\n\t\t}\n\t\tclose(doneCh)\n\t}()\n\n\te.Collect(metricCh)\n\tclose(metricCh)\n\t<-doneCh\n}\n\nfunc (e *Exporter) collect(ch chan<- prometheus.Metric) error {\n\tmetrics, err := e.parseKeyValueResponse(e.metricsURI)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error scraping clickhouse url %v: %v\", e.metricsURI, err)\n\t}\n\n\tfor _, m := range metrics {\n\t\tnewMetric := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: metricName(m.key),\n\t\t\tHelp: \"Number of \" + m.key + \" currently processed\",\n\t\t}, []string{}).WithLabelValues()\n\t\tnewMetric.Set(float64(m.value))\n\t\tnewMetric.Collect(ch)\n\t}\n\n\tasyncMetrics, err := e.parseKeyValueResponse(e.asyncMetricsURI)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error scraping clickhouse url %v: %v\", e.asyncMetricsURI, err)\n\t}\n\n\tfor _, am := range asyncMetrics {\n\t\tnewMetric := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: metricName(am.key),\n\t\t\tHelp: \"Number of \" + am.key + \" async processed\",\n\t\t}, []string{}).WithLabelValues()\n\t\tnewMetric.Set(float64(am.value))\n\t\tnewMetric.Collect(ch)\n\t}\n\n\tevents, err := e.parseKeyValueResponse(e.eventsURI)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error scraping clickhouse url %v: %v\", e.eventsURI, err)\n\t}\n\n\tfor _, ev := range events {\n\t\tnewMetric, _ := prometheus.NewConstMetric(\n\t\t\tprometheus.NewDesc(\n\t\t\t\tnamespace+\"_\"+metricName(ev.key)+\"_total\",\n\t\t\t\t\"Number of \"+ev.key+\" total processed\", []string{}, nil),\n\t\t\tprometheus.CounterValue, float64(ev.value))\n\t\tch <- newMetric\n\t}\n\n\tparts, err := e.parsePartsResponse(e.partsURI)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error scraping clickhouse url %v: %v\", e.partsURI, err)\n\t}\n\n\tfor _, part := range parts {\n\t\tnewBytesMetric := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"table_parts_bytes\",\n\t\t\tHelp: \"Table size in bytes\",\n\t\t}, []string{\"database\", \"table\"}).WithLabelValues(part.database, part.table)\n\t\tnewBytesMetric.Set(float64(part.bytes))\n\t\tnewBytesMetric.Collect(ch)\n\n\t\tnewCountMetric := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"table_parts_count\",\n\t\t\tHelp: \"Number of parts of the table\",\n\t\t}, []string{\"database\", \"table\"}).WithLabelValues(part.database, part.table)\n\t\tnewCountMetric.Set(float64(part.parts))\n\t\tnewCountMetric.Collect(ch)\n\n\t\tnewRowsMetric := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"table_parts_rows\",\n\t\t\tHelp: \"Number of rows in the table\",\n\t\t}, []string{\"database\", \"table\"}).WithLabelValues(part.database, part.table)\n\t\tnewRowsMetric.Set(float64(part.rows))\n\t\tnewRowsMetric.Collect(ch)\n\t}\n\n\treturn nil\n}\n\nfunc (e *Exporter) handleResponse(uri string) ([]byte, error) {\n\treq, err := http.NewRequest(\"GET\", uri, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif e.user != \"\" && e.password != \"\" {\n\t\treq.Header.Set(\"X-ClickHouse-User\", e.user)\n\t\treq.Header.Set(\"X-ClickHouse-Key\", e.password)\n\t}\n\tresp, err := e.client.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error scraping clickhouse: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif resp.StatusCode < 200 || resp.StatusCode >= 400 {\n\t\tif err != nil {\n\t\t\tdata = []byte(err.Error())\n\t\t}\n\t\treturn nil, fmt.Errorf(\"Status %s (%d): %s\", resp.Status, resp.StatusCode, data)\n\t}\n\n\treturn data, nil\n}\n\ntype lineResult struct {\n\tkey string\n\tvalue int\n}\n\nfunc parseNumber(s string) (int, error) {\n\tv, err := strconv.Atoi(s)\n\tif err == nil {\n\t\treturn v, nil\n\t}\n\tif !errors.Is(err, strconv.ErrSyntax) {\n\t\treturn 0, err\n\t}\n\n\tvv, err := strconv.ParseFloat(s, 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn int(vv), nil\n}\n\nfunc (e *Exporter) parseKeyValueResponse(uri string) ([]lineResult, error) {\n\tdata, err := e.handleResponse(uri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Parsing results\n\tlines := strings.Split(string(data), \"\\n\")\n\tvar results []lineResult = make([]lineResult, 0)\n\n\tfor i, line := range lines {\n\t\tparts := strings.Fields(line)\n\t\tif len(parts) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif len(parts) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"parseKeyValueResponse: unexpected %d line: %s\", i, line)\n\t\t}\n\t\tk := strings.TrimSpace(parts[0])\n\t\tv, err := parseNumber(strings.TrimSpace(parts[1]))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresults = append(results, lineResult{k, v})\n\n\t}\n\treturn results, nil\n}\n\ntype partsResult struct {\n\tdatabase string\n\ttable string\n\tbytes int\n\tparts int\n\trows int\n}\n\nfunc (e *Exporter) parsePartsResponse(uri string) ([]partsResult, error) {\n\tdata, err := e.handleResponse(uri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Parsing results\n\tlines := strings.Split(string(data), \"\\n\")\n\tvar results []partsResult = make([]partsResult, 0)\n\n\tfor i, line := range lines {\n\t\tparts := strings.Fields(line)\n\t\tif len(parts) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif len(parts) != 5 {\n\t\t\treturn nil, fmt.Errorf(\"parsePartsResponse: unexpected %d line: %s\", i, line)\n\t\t}\n\t\tdatabase := strings.TrimSpace(parts[0])\n\t\ttable := strings.TrimSpace(parts[1])\n\n\t\tbytes, err := strconv.Atoi(strings.TrimSpace(parts[2]))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcount, err := strconv.Atoi(strings.TrimSpace(parts[3]))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\trows, err := strconv.Atoi(strings.TrimSpace(parts[4]))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, partsResult{database, table, bytes, count, rows})\n\t}\n\n\treturn results, nil\n}\n\n\/\/ Collect fetches the stats from configured clickhouse location and delivers them\n\/\/ as Prometheus metrics. It implements prometheus.Collector.\nfunc (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tupValue := 1\n\n\tif err := e.collect(ch); err != nil {\n\t\tlog.Printf(\"Error scraping clickhouse: %s\", err)\n\t\te.scrapeFailures.Inc()\n\t\te.scrapeFailures.Collect(ch)\n\n\t\tupValue = 0\n\t}\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tprometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, \"\", \"up\"),\n\t\t\t\"Was the last query of ClickHouse successful.\",\n\t\t\tnil, nil,\n\t\t),\n\t\tprometheus.GaugeValue, float64(upValue),\n\t)\n\n}\n\nfunc metricName(in string) string {\n\tout := toSnake(in)\n\treturn strings.Replace(out, \".\", \"_\", -1)\n}\n\n\/\/ toSnake convert the given string to snake case following the Golang format:\n\/\/ acronyms are converted to lower-case and preceded by an underscore.\nfunc toSnake(in string) string {\n\trunes := []rune(in)\n\tlength := len(runes)\n\n\tvar out []rune\n\tfor i := 0; i < length; i++ {\n\t\tif i > 0 && unicode.IsUpper(runes[i]) && ((i+1 < length && unicode.IsLower(runes[i+1])) || unicode.IsLower(runes[i-1])) {\n\t\t\tout = append(out, '_')\n\t\t}\n\t\tout = append(out, unicode.ToLower(runes[i]))\n\t}\n\n\treturn string(out)\n}\n\n\/\/ check interface\nvar _ prometheus.Collector = (*Exporter)(nil)\nsupport float valuespackage exporter\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/log\"\n)\n\nconst (\n\tnamespace = \"clickhouse\" \/\/ For Prometheus metrics.\n)\n\n\/\/ Exporter collects clickhouse stats from the given URI and exports them using\n\/\/ the prometheus metrics package.\ntype Exporter struct {\n\tmetricsURI string\n\tasyncMetricsURI string\n\teventsURI string\n\tpartsURI string\n\tclient *http.Client\n\n\tscrapeFailures prometheus.Counter\n\n\tuser string\n\tpassword string\n}\n\n\/\/ NewExporter returns an initialized Exporter.\nfunc NewExporter(uri url.URL, insecure bool, user, password string) *Exporter {\n\tq := uri.Query()\n\tmetricsURI := uri\n\tq.Set(\"query\", \"select metric, value from system.metrics\")\n\tmetricsURI.RawQuery = q.Encode()\n\n\tasyncMetricsURI := uri\n\tq.Set(\"query\", \"select metric, value from system.asynchronous_metrics\")\n\tasyncMetricsURI.RawQuery = q.Encode()\n\n\teventsURI := uri\n\tq.Set(\"query\", \"select event, value from system.events\")\n\teventsURI.RawQuery = q.Encode()\n\n\tpartsURI := uri\n\tq.Set(\"query\", \"select database, table, sum(bytes) as bytes, count() as parts, sum(rows) as rows from system.parts where active = 1 group by database, table\")\n\tpartsURI.RawQuery = q.Encode()\n\n\treturn &Exporter{\n\t\tmetricsURI: metricsURI.String(),\n\t\tasyncMetricsURI: asyncMetricsURI.String(),\n\t\teventsURI: eventsURI.String(),\n\t\tpartsURI: partsURI.String(),\n\t\tscrapeFailures: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"exporter_scrape_failures_total\",\n\t\t\tHelp: \"Number of errors while scraping clickhouse.\",\n\t\t}),\n\t\tclient: &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},\n\t\t\t},\n\t\t\tTimeout: 30 * time.Second,\n\t\t},\n\t\tuser: user,\n\t\tpassword: password,\n\t}\n}\n\n\/\/ Describe describes all the metrics ever exported by the clickhouse exporter. It\n\/\/ implements prometheus.Collector.\nfunc (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\t\/\/ We cannot know in advance what metrics the exporter will generate\n\t\/\/ from clickhouse. So we use the poor man's describe method: Run a collect\n\t\/\/ and send the descriptors of all the collected metrics.\n\n\tmetricCh := make(chan prometheus.Metric)\n\tdoneCh := make(chan struct{})\n\n\tgo func() {\n\t\tfor m := range metricCh {\n\t\t\tch <- m.Desc()\n\t\t}\n\t\tclose(doneCh)\n\t}()\n\n\te.Collect(metricCh)\n\tclose(metricCh)\n\t<-doneCh\n}\n\nfunc (e *Exporter) collect(ch chan<- prometheus.Metric) error {\n\tmetrics, err := e.parseKeyValueResponse(e.metricsURI)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error scraping clickhouse url %v: %v\", e.metricsURI, err)\n\t}\n\n\tfor _, m := range metrics {\n\t\tnewMetric := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: metricName(m.key),\n\t\t\tHelp: \"Number of \" + m.key + \" currently processed\",\n\t\t}, []string{}).WithLabelValues()\n\t\tnewMetric.Set(m.value)\n\t\tnewMetric.Collect(ch)\n\t}\n\n\tasyncMetrics, err := e.parseKeyValueResponse(e.asyncMetricsURI)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error scraping clickhouse url %v: %v\", e.asyncMetricsURI, err)\n\t}\n\n\tfor _, am := range asyncMetrics {\n\t\tnewMetric := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: metricName(am.key),\n\t\t\tHelp: \"Number of \" + am.key + \" async processed\",\n\t\t}, []string{}).WithLabelValues()\n\t\tnewMetric.Set(am.value)\n\t\tnewMetric.Collect(ch)\n\t}\n\n\tevents, err := e.parseKeyValueResponse(e.eventsURI)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error scraping clickhouse url %v: %v\", e.eventsURI, err)\n\t}\n\n\tfor _, ev := range events {\n\t\tnewMetric, _ := prometheus.NewConstMetric(\n\t\t\tprometheus.NewDesc(\n\t\t\t\tnamespace+\"_\"+metricName(ev.key)+\"_total\",\n\t\t\t\t\"Number of \"+ev.key+\" total processed\", []string{}, nil),\n\t\t\tprometheus.CounterValue, float64(ev.value))\n\t\tch <- newMetric\n\t}\n\n\tparts, err := e.parsePartsResponse(e.partsURI)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error scraping clickhouse url %v: %v\", e.partsURI, err)\n\t}\n\n\tfor _, part := range parts {\n\t\tnewBytesMetric := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"table_parts_bytes\",\n\t\t\tHelp: \"Table size in bytes\",\n\t\t}, []string{\"database\", \"table\"}).WithLabelValues(part.database, part.table)\n\t\tnewBytesMetric.Set(float64(part.bytes))\n\t\tnewBytesMetric.Collect(ch)\n\n\t\tnewCountMetric := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"table_parts_count\",\n\t\t\tHelp: \"Number of parts of the table\",\n\t\t}, []string{\"database\", \"table\"}).WithLabelValues(part.database, part.table)\n\t\tnewCountMetric.Set(float64(part.parts))\n\t\tnewCountMetric.Collect(ch)\n\n\t\tnewRowsMetric := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"table_parts_rows\",\n\t\t\tHelp: \"Number of rows in the table\",\n\t\t}, []string{\"database\", \"table\"}).WithLabelValues(part.database, part.table)\n\t\tnewRowsMetric.Set(float64(part.rows))\n\t\tnewRowsMetric.Collect(ch)\n\t}\n\n\treturn nil\n}\n\nfunc (e *Exporter) handleResponse(uri string) ([]byte, error) {\n\treq, err := http.NewRequest(\"GET\", uri, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif e.user != \"\" && e.password != \"\" {\n\t\treq.Header.Set(\"X-ClickHouse-User\", e.user)\n\t\treq.Header.Set(\"X-ClickHouse-Key\", e.password)\n\t}\n\tresp, err := e.client.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error scraping clickhouse: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif resp.StatusCode < 200 || resp.StatusCode >= 400 {\n\t\tif err != nil {\n\t\t\tdata = []byte(err.Error())\n\t\t}\n\t\treturn nil, fmt.Errorf(\"Status %s (%d): %s\", resp.Status, resp.StatusCode, data)\n\t}\n\n\treturn data, nil\n}\n\ntype lineResult struct {\n\tkey string\n\tvalue float64\n}\n\nfunc parseNumber(s string) (float64, error) {\n\tv, err := strconv.ParseFloat(s, 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn v, nil\n}\n\nfunc (e *Exporter) parseKeyValueResponse(uri string) ([]lineResult, error) {\n\tdata, err := e.handleResponse(uri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Parsing results\n\tlines := strings.Split(string(data), \"\\n\")\n\tvar results []lineResult = make([]lineResult, 0)\n\n\tfor i, line := range lines {\n\t\tparts := strings.Fields(line)\n\t\tif len(parts) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif len(parts) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"parseKeyValueResponse: unexpected %d line: %s\", i, line)\n\t\t}\n\t\tk := strings.TrimSpace(parts[0])\n\t\tv, err := parseNumber(strings.TrimSpace(parts[1]))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresults = append(results, lineResult{k, v})\n\n\t}\n\treturn results, nil\n}\n\ntype partsResult struct {\n\tdatabase string\n\ttable string\n\tbytes int\n\tparts int\n\trows int\n}\n\nfunc (e *Exporter) parsePartsResponse(uri string) ([]partsResult, error) {\n\tdata, err := e.handleResponse(uri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Parsing results\n\tlines := strings.Split(string(data), \"\\n\")\n\tvar results []partsResult = make([]partsResult, 0)\n\n\tfor i, line := range lines {\n\t\tparts := strings.Fields(line)\n\t\tif len(parts) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif len(parts) != 5 {\n\t\t\treturn nil, fmt.Errorf(\"parsePartsResponse: unexpected %d line: %s\", i, line)\n\t\t}\n\t\tdatabase := strings.TrimSpace(parts[0])\n\t\ttable := strings.TrimSpace(parts[1])\n\n\t\tbytes, err := strconv.Atoi(strings.TrimSpace(parts[2]))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcount, err := strconv.Atoi(strings.TrimSpace(parts[3]))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\trows, err := strconv.Atoi(strings.TrimSpace(parts[4]))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, partsResult{database, table, bytes, count, rows})\n\t}\n\n\treturn results, nil\n}\n\n\/\/ Collect fetches the stats from configured clickhouse location and delivers them\n\/\/ as Prometheus metrics. It implements prometheus.Collector.\nfunc (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tupValue := 1\n\n\tif err := e.collect(ch); err != nil {\n\t\tlog.Printf(\"Error scraping clickhouse: %s\", err)\n\t\te.scrapeFailures.Inc()\n\t\te.scrapeFailures.Collect(ch)\n\n\t\tupValue = 0\n\t}\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tprometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, \"\", \"up\"),\n\t\t\t\"Was the last query of ClickHouse successful.\",\n\t\t\tnil, nil,\n\t\t),\n\t\tprometheus.GaugeValue, float64(upValue),\n\t)\n\n}\n\nfunc metricName(in string) string {\n\tout := toSnake(in)\n\treturn strings.Replace(out, \".\", \"_\", -1)\n}\n\n\/\/ toSnake convert the given string to snake case following the Golang format:\n\/\/ acronyms are converted to lower-case and preceded by an underscore.\nfunc toSnake(in string) string {\n\trunes := []rune(in)\n\tlength := len(runes)\n\n\tvar out []rune\n\tfor i := 0; i < length; i++ {\n\t\tif i > 0 && unicode.IsUpper(runes[i]) && ((i+1 < length && unicode.IsLower(runes[i+1])) || unicode.IsLower(runes[i-1])) {\n\t\t\tout = append(out, '_')\n\t\t}\n\t\tout = append(out, unicode.ToLower(runes[i]))\n\t}\n\n\treturn string(out)\n}\n\n\/\/ check interface\nvar _ prometheus.Collector = (*Exporter)(nil)\n<|endoftext|>"} {"text":"package executor\n\nimport (\n\t\"golang.org\/x\/net\/context\"\n\t\"github.com\/coreos\/etcd\/client\"\n\t\"strings\"\n\t\"strconv\"\n\t\"github.com\/shopspring\/decimal\"\n\t\"fmt\"\n\t\"time\"\n)\n\ntype Publisher interface {\n\tPublish(futureId int, buy, sell map[decimal.Decimal]int)\n\tPublishStatus(id string, status int)\n\tPublishLatestPrice(id string, price decimal.Decimal, t time.Time)\n}\n\ntype publisher struct {\n\tetcd client.Client\n\tkapi client.KeysAPI\n}\n\nconst PublishKeyBuy \t= \"futures\/future_id\/buy\"\nconst PublishKeySell\t= \"futures\/future_id\/sell\"\nconst PublishKeyStatus = \"consignations\/id\/status\"\nconst PublishKeyLatestPrice = \"futures\/id\/latest\"\n\nfunc (p *publisher) Publish(futureId int,buy,sell map[decimal.Decimal]int) {\n\n\tp.kapi.Set(context.Background(),\n\t\tstrings.Replace(PublishKeyBuy,\"future_id\",strconv.Itoa(futureId),1),\n\t\tgetPricesString(buy),nil)\n\tp.kapi.Set(context.Background(),\n\t\tstrings.Replace(PublishKeySell,\"future_id\",strconv.Itoa(futureId),1),\n\t\tgetPricesString(sell),nil)\n}\n\nfunc (p *publisher) PublishStatus(id string, status int) () {\n\tp.kapi.Set(context.Background(),\n\t\tstrings.Replace(PublishKeyStatus,\"id\",id,1),\n\t\tstrconv.Itoa(status),nil)\n}\n\nfunc (p *publisher) PublishLatestPrice(id string, price decimal.Decimal, t time.Time) () {\n\t_,err := p.kapi.Set(context.Background(),\n\t\tstrings.Replace(PublishKeyLatestPrice,\"id\",id,1),\n\t\tprice.String() + \",\" + t.String(),nil)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\n\n\n\nfunc NewPublisher(config client.Config) Publisher {\n\tc, err := client.New(config)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn &publisher{\n\t\tetcd: c,\n\t\tkapi: client.NewKeysAPI(c),\n\t}\n}\n\nfunc getPricesString(data map[decimal.Decimal]int) string {\n\tres := \"\"\n\tfor price, quantity := range data {\n\t\tres += price.String()+\"=\"+strconv.Itoa(quantity)+ \",\"\n\t}\n\tif res != \"\" {\n\t\tres = res[:len(res)-1]\n\t}\n\treturn res\n}\nPublish depth at one keypackage executor\n\nimport (\n\t\"golang.org\/x\/net\/context\"\n\t\"github.com\/coreos\/etcd\/client\"\n\t\"strings\"\n\t\"strconv\"\n\t\"github.com\/shopspring\/decimal\"\n\t\"fmt\"\n\t\"time\"\n)\n\ntype Publisher interface {\n\tPublish(futureId int, buy, sell map[decimal.Decimal]int)\n\tPublishStatus(id string, status int)\n\tPublishLatestPrice(id string, price decimal.Decimal, t time.Time)\n}\n\ntype publisher struct {\n\tetcd client.Client\n\tkapi client.KeysAPI\n}\n\nconst PublishKeyBuy \t= \"futures\/future_id\/buy\"\nconst PublishKeySell\t= \"futures\/future_id\/sell\"\nconst PublishKeyStatus = \"consignations\/id\/status\"\nconst PublishKeyLatestPrice = \"futures\/id\/latest\"\nconst PublishKeyFuture = \"futures\/id\"\n\nfunc (p *publisher) Publish(futureId int,buy,sell map[decimal.Decimal]int) {\n\n\tp.kapi.Set(context.Background(),\n\t\tstrings.Replace(PublishKeyFuture,\"future_id\",strconv.Itoa(futureId),1),\n\t\tgetPricesString(buy) + \";\" + getPricesString(sell),nil)\n\n}\n\nfunc (p *publisher) PublishStatus(id string, status int) () {\n\tp.kapi.Set(context.Background(),\n\t\tstrings.Replace(PublishKeyStatus,\"id\",id,1),\n\t\tstrconv.Itoa(status),nil)\n}\n\nfunc (p *publisher) PublishLatestPrice(id string, price decimal.Decimal, t time.Time) () {\n\t_,err := p.kapi.Set(context.Background(),\n\t\tstrings.Replace(PublishKeyLatestPrice,\"id\",id,1),\n\t\tprice.String() + \",\" + t.String(),nil)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\n\n\n\nfunc NewPublisher(config client.Config) Publisher {\n\tc, err := client.New(config)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn &publisher{\n\t\tetcd: c,\n\t\tkapi: client.NewKeysAPI(c),\n\t}\n}\n\nfunc getPricesString(data map[decimal.Decimal]int) string {\n\tres := \"\"\n\tfor price, quantity := range data {\n\t\tres += price.String()+\"=\"+strconv.Itoa(quantity)+ \",\"\n\t}\n\tif res != \"\" {\n\t\tres = res[:len(res)-1]\n\t}\n\treturn res\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"..\/..\/..\/codec\"\n\t\"..\/..\/..\/internal\"\n\t\"google.golang.org\/grpc\"\n\t\"log\"\n\t\"net\"\n\/\/\t\"time\"\n\t\"fmt\"\n\/\/\t\"math\/rand\"\n\t\"runtime\"\n\t\"sync\"\n)\n\nvar context string = `\n子曰:“学而时习之,不亦悦乎?有朋自远方来,不亦乐乎?人不知而不愠,不亦君子乎?”\n\n有子曰:“其为人也孝悌而好犯上者,鲜矣。不好犯上而好作乱者,未之有也。君子务本,本立而道生。孝悌也者,其为仁之本与?”\n\n子曰:“巧言令色,鲜矣仁。”\n\n曾子曰:吾日三省乎吾身。为人谋而不忠乎?与朋友交而不信乎?传不习乎?\n\n子曰:道千乘之国,敬事而信,节用而爱人,使民以时。\n\n子曰:弟子入则孝,出则悌,谨而信,泛爱众而亲仁,行有余力,则以学文。\n\n子夏曰:贤贤易色,事父母,能竭其力。事君,能致其身。与朋友交,言而有信。虽曰未学,吾必谓之学矣。\n\n子曰:君子不重则不威,学则不固。主忠信,无友不如己者,过则勿惮改。\n\n曾子曰:慎终追远,民德归厚矣。\n\n子禽问于子贡曰:“夫子至于是邦也,必闻其政。求之与?抑与之与?”子贡曰:“夫子温良恭俭让以得之。夫子求之也,其诸异乎人之求之与?”\n\n子曰:父在,观其志。父没,观其行。三年无改于父之道,可谓孝矣。\n\n有子曰:礼之用,和为贵。先王之道斯为美。小大由之,有所不行。知和而和,不以礼节之,亦不可行也。\n\n有子曰:信近于义,言可复也。恭近于礼,远耻辱也。因不失其亲,亦可宗也。\n\n子曰:君子食无求饱,居无求安。敏于事而慎于言,就有道而正焉。可谓好学也已。\n\n子贡曰:“贫而无谄,富而无骄。何如?”子曰:“可也。未若贫而乐,富而好礼者也。”子贡曰:“诗云:如切如磋,如琢如磨。其斯之谓与?”子曰:“赐也,始可与言诗已矣。告诸往而知来者。”\n\n子曰:不患人之不己知,患不知人也。\n`\n\n\nfunc main() {\n\tvar opts []grpc.DialOption\n\topts = append(opts, grpc.WithInsecure())\n\n\t\/\/enable snappy compress\n\tif c, err := codec.New(\"snappy\"); err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t} else {\n\t\tif cc, err := codec.WithProto(c); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\topts = append(opts, grpc.WithCodec(cc))\n\t\t}\n\t}\n\n\tclient, err := internal.NewClient(\"127.0.0.1:8080\", nil, opts...)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tvar conn net.Conn\n\tconn, err = client.Dial(\"tcp\", \"127.0.0.1:8888\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tdefer conn.Close()\n\n\tvar waitGroup sync.WaitGroup\n\n\twaitGroup.Add(1)\n\tgo func() {\n\t\tdefer waitGroup.Done()\n\n\t\tbuff := make([]byte, 1024)\n\n\t\tfor {\n\t\t\tn, err := conn.Read(buff)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/log.Println(\"Read:\", string(buff[:n]))\n\t\t\tfmt.Println(string(buff[:n]))\n\t\t}\n\t}()\n\n\tsender := func() {\n\t\tfor {\n\t\t\t\/\/now := <- time.After(time.Millisecond * 1000)\n\t\t\t\/\/now := time.Now()\n\t\t\t\/\/bytes := []byte(now.String())\n\t\t\tbytes := []byte(blob)\n\n\t\t\t_, err := conn.Write(bytes)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i:=0; i清理longalive.gopackage main\n\nimport (\n\t\"..\/..\/..\/codec\"\n\t\"..\/..\/..\/internal\"\n\t\"google.golang.org\/grpc\"\n\t\"log\"\n\t\"net\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"sync\"\n)\n\nfunc main() {\n\tvar opts []grpc.DialOption\n\topts = append(opts, grpc.WithInsecure())\n\n\t\/\/enable snappy compress\n\tif c, err := codec.New(\"snappy\"); err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t} else {\n\t\tif cc, err := codec.WithProto(c); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\topts = append(opts, grpc.WithCodec(cc))\n\t\t}\n\t}\n\n\tclient, err := internal.NewClient(\"127.0.0.1:8080\", nil, opts...)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tvar conn net.Conn\n\tconn, err = client.Dial(\"tcp\", \"127.0.0.1:8888\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tdefer conn.Close()\n\n\tvar waitGroup sync.WaitGroup\n\n\twaitGroup.Add(1)\n\tgo func() {\n\t\tdefer waitGroup.Done()\n\n\t\tbuff := make([]byte, 1024)\n\n\t\tfor {\n\t\t\tn, err := conn.Read(buff)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfmt.Println(string(buff[:n]))\n\t\t}\n\t}()\n\n\tsender := func() {\n\t\tfor {\n\t\t\tbytes := []byte(blob)\n\n\t\t\t_, err := conn.Write(bytes)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i:=0; i"} {"text":"package middleware\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/DeedleFake\/Go-PhysicsFS\/physfs\"\n\t\"github.com\/carbonsrv\/carbon\/modules\/glue\"\n\t\"github.com\/carbonsrv\/carbon\/modules\/helpers\"\n\t\"github.com\/carbonsrv\/carbon\/modules\/scheduler\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/pmylund\/go-cache\"\n\t\"github.com\/vifino\/golua\/lua\"\n\t\"github.com\/vifino\/luar\"\n\t\"net\/http\"\n\t\"time\"\n)\n\n\/\/ Cache\nvar kvstore *cache.Cache\nvar cbc *cache.Cache\nvar cfe *cache.Cache\nvar LDumper *lua.State\n\n\/\/ cacheDump dumps the source bytecode cached\nfunc cacheDump(file string) (string, error, bool) {\n\tdata_tmp, found := cbc.Get(file)\n\tif found == false {\n\t\tdata, err := fileRead(file)\n\t\tif err != nil {\n\t\t\treturn \"\", err, false\n\t\t}\n\t\tres, err := bcdump(data)\n\t\tif err != nil {\n\t\t\treturn \"\", err, true\n\t\t}\n\t\tcbc.Set(file, res, cache.DefaultExpiration)\n\t\treturn res, nil, false\n\t} else {\n\t\t\/\/debug(\"Using Bytecode-cache for \" + file)\n\t\treturn data_tmp.(string), nil, false\n\t}\n}\n\n\/\/ bcdump actually dumps the bytecode\nfunc bcdump(data string) (string, error) {\n\tif LDumper.LoadString(data) != 0 {\n\t\treturn \"\", errors.New(LDumper.ToString(-1))\n\t}\n\tdefer LDumper.Pop(1)\n\treturn LDumper.FDump(), nil\n}\n\n\/\/ FS\nvar filesystem http.FileSystem\n\nfunc fileExists(file string) bool {\n\tdata_tmp, found := cfe.Get(file)\n\tif found == false {\n\t\texists := physfs.Exists(file)\n\t\tcfe.Set(file, exists, cache.DefaultExpiration)\n\t\treturn exists\n\t} else {\n\t\treturn data_tmp.(bool)\n\t}\n}\n\nfunc fileRead(file string) (string, error) {\n\tf, err := filesystem.Open(file)\n\tdefer f.Close()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tr := bufio.NewReader(f)\n\tbuf := make([]byte, fi.Size())\n\t_, err = r.Read(buf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(buf), err\n}\n\n\/\/ Preloader\/Starter\nvar jobs int\n\n\/\/ Preloaded is the chan that contains the preloaded states\nvar Preloaded chan *lua.State\n\n\/\/ Preloader is the function that preloads the states\nfunc Preloader() {\n\tPreloaded = make(chan *lua.State, jobs)\n\tfor {\n\t\t\/\/fmt.Println(\"preloading\")\n\t\tL := luar.Init()\n\t\tBind(L, webroot)\n\t\terr := L.DoString(glue.MainGlue())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\terr = L.DoString(glue.RouteGlue())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tPreloaded <- L\n\t}\n}\n\n\/\/ GetInstance grabs an instance from the preloaded list\nfunc GetInstance() *lua.State {\n\t\/\/fmt.Println(\"grabbing instance\")\n\tL := <-Preloaded\n\t\/\/fmt.Println(\"Done\")\n\treturn L\n}\n\n\/\/ Init\nfunc Init(j int, cfe_new *cache.Cache, kvstore_new *cache.Cache, root string) {\n\twebroot = root\n\tcfe = cfe_new\n\tkvstore = kvstore_new\n\tjobs = j\n\tfilesystem = physfs.FileSystem()\n\tcbc = cache.New(5*time.Minute, 30*time.Second) \/\/ Initialize cache with 5 minute lifetime and purge every 30 seconds\n\tLDumper = luar.Init()\n}\n\n\/\/ Lua behaves PHP-like, but for Lua\nfunc Lua() func(*gin.Context) {\n\t\/\/LDumper := luar.Init()\n\treturn func(context *gin.Context) {\n\t\tfile := context.Request.URL.Path\n\t\tif fileExists(file) {\n\t\t\t\/\/fmt.Println(\"start\")\n\t\t\tL := GetInstance()\n\t\t\t\/\/fmt.Println(\"after start\")\n\t\t\tdefer scheduler.Add(func() {\n\t\t\t\tL.Close()\n\t\t\t})\n\t\t\t\/\/fmt.Println(\"after after start\")\n\t\t\tBindContext(L, context)\n\t\t\t\/\/fmt.Println(\"before cache\")\n\t\t\tcode, err, lerr := cacheDump(file)\n\t\t\t\/\/fmt.Println(\"after cache\")\n\t\t\tif err != nil {\n\t\t\t\tif lerr == false {\n\t\t\t\t\tcontext.Next()\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `\n\t\t\t\t\tSyntax Error in `+context.Request.URL.Path+`<\/title>\n\t\t\t\t\t<body>\n\t\t\t\t\t\t<h1>Syntax Error in file `+context.Request.URL.Path+`:<\/h1>\n\t\t\t\t\t\t<code>`+string(err.Error())+`<\/code>\n\t\t\t\t\t<\/body>\n\t\t\t\t\t<\/html>`)\n\t\t\t\t\tcontext.Abort()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/fmt.Println(\"before loadbuffer\")\n\t\t\tL.LoadBuffer(code, len(code), file) \/\/ This shouldn't error, was checked earlier.\n\t\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n\t\t\t\t<head><title>Runtime Error in `+context.Request.URL.Path+`<\/title>\n\t\t\t\t<body>\n\t\t\t\t\t<h1>Runtime Error in file `+context.Request.URL.Path+`:<\/h1>\n\t\t\t\t\t<code>`+L.ToString(-1)+`<\/code>\n\t\t\t\t<\/body>\n\t\t\t\t<\/html>`)\n\t\t\t\tcontext.Abort()\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/*L.DoString(\"return CONTENT_TO_RETURN\")\n\t\t\tv := luar.CopyTableToMap(L, nil, -1)\n\t\t\tm := v.(map[string]interface{})\n\t\t\ti := int(m[\"code\"].(float64))\n\t\t\tif err != nil {\n\t\t\t\ti = http.StatusOK\n\t\t\t}*\/\n\t\t\t\/\/helpers.HTMLString(context, i, m[\"content\"].(string))\n\t\t} else {\n\t\t\tcontext.Next()\n\t\t}\n\t}\n}\n\n\/\/ DLR_NS does Route creation by lua\nfunc DLR_NS(bcode string, dobind bool, vals map[string]interface{}) (func(*gin.Context), error) {\n\t\/*code, err := bcdump(code)\n\tif err != nil {\n\t\treturn func(*gin.Context) {}, err\n\t}*\/\n\treturn func(context *gin.Context) {\n\t\tL := GetInstance()\n\t\tif dobind {\n\t\t\tluar.Register(L, \"\", vals)\n\t\t}\n\t\tdefer scheduler.Add(func() {\n\t\t\tL.Close()\n\t\t})\n\t\tBindContext(L, context)\n\t\t\/\/fmt.Println(\"before loadbuffer\")\n\t\t\/*if L.LoadBuffer(bcode, len(bcode), \"route\") != 0 {\n\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n\t\t\t<head><title>Syntax Error in `+context.Request.URL.Path+`<\/title>\n\t\t\t<body>\n\t\t\t\t<h1>Syntax Error in Lua Route on `+context.Request.URL.Path+`:<\/h1>\n\t\t\t\t<code>`+L.ToString(-1)+`<\/code>\n\t\t\t<\/body>\n\t\t\t<\/html>`)\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}*\/\n\t\tL.LoadBuffer(bcode, len(bcode), \"route\")\n\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n\t\t\t<head><title>Runtime Error on `+context.Request.URL.Path+`<\/title>\n\t\t\t<body>\n\t\t\t\t<h1>Runtime Error in Lua Route on `+context.Request.URL.Path+`:<\/h1>\n\t\t\t\t<code>`+L.ToString(-1)+`<\/code>\n\t\t\t<\/body>\n\t\t\t<\/html>`)\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}\n\t}, nil\n}\n\n\/\/ DLR_RUS does the same as above, but reuses states\nfunc DLR_RUS(bcode string, instances int, dobind bool, vals map[string]interface{}) (func(*gin.Context), error) { \/\/ Same as above, but reuses states. Much faster. Higher memory use though, because more states.\n\tinsts := instances\n\tif instances < 0 {\n\t\tinsts = 2\n\t\tif jobs\/2 > 1 {\n\t\t\tinsts = jobs\n\t\t}\n\t}\n\tschan := make(chan *lua.State, insts)\n\tfor i := 0; i < jobs\/2; i++ {\n\t\tL := GetInstance()\n\t\tif dobind {\n\t\t\tluar.Register(L, \"\", vals)\n\t\t}\n\t\tif L.LoadBuffer(bcode, len(bcode), \"route\") != 0 {\n\t\t\treturn func(context *gin.Context) {}, errors.New(L.ToString(-1))\n\t\t}\n\t\tL.PushValue(-1)\n\t\tschan <- L\n\t}\n\treturn func(context *gin.Context) {\n\t\tL := <-schan\n\t\tBindContext(L, context)\n\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n\t\t\t<head><title>Runtime Error on `+context.Request.URL.Path+`<\/title>\n\t\t\t<body>\n\t\t\t\t<h1>Runtime Error in Lua Route on `+context.Request.URL.Path+`:<\/h1>\n\t\t\t\t<code>`+L.ToString(-1)+`<\/code>\n\t\t\t<\/body>\n\t\t\t<\/html>`)\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}\n\t\tL.PushValue(-1)\n\t\tschan <- L\n\t}, nil\n}\n\nvar upgrader = websocket.Upgrader{\n\tReadBufferSize: 1024,\n\tWriteBufferSize: 1024,\n\tCheckOrigin: func(r *http.Request) bool { \/\/ Because it breaks some things.\n\t\treturn true\n\t},\n}\n\n\/\/ DLRWS_NS does the same as the first one, but for websockets.\nfunc DLRWS_NS(bcode string, dobind bool, vals map[string]interface{}) (func(*gin.Context), error) { \/\/ Same as above, but for websockets.\n\treturn func(context *gin.Context) {\n\t\tL := GetInstance()\n\t\tBindContext(L, context)\n\t\tif dobind {\n\t\t\tluar.Register(L, \"\", vals)\n\t\t}\n\n\t\tconn, err := upgrader.Upgrade(context.Writer, context.Request, nil)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Websocket error: \" + err.Error()) \/\/ silent error.\n\t\t}\n\t\tluar.Register(L, \"ws\", luar.Map{\n\t\t\t\"con\": conn,\n\t\t\t\"BinaryMessage\": websocket.BinaryMessage,\n\t\t\t\"TextMessage\": websocket.TextMessage,\n\t\t\t\/\/\"read\": conn.ReadMessage,\n\t\t\t\/\/\"send\": conn.SendMessage,\n\t\t\t\"read\": (func() (int, string, error) {\n\t\t\t\tmessageType, p, err := conn.ReadMessage()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn -1, \"\", err\n\t\t\t\t}\n\t\t\t\treturn messageType, string(p), nil\n\t\t\t}),\n\t\t\t\"read_con\": (func(con *websocket.Conn) (int, string, error) {\n\t\t\t\tmessageType, p, err := con.ReadMessage()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn -1, \"\", err\n\t\t\t\t}\n\t\t\t\treturn messageType, string(p), nil\n\t\t\t}),\n\t\t\t\"send\": (func(t int, cnt string) error {\n\t\t\t\treturn conn.WriteMessage(t, []byte(cnt))\n\t\t\t}),\n\t\t\t\"send_con\": (func(con *websocket.Conn, t int, cnt string) error {\n\t\t\t\treturn con.WriteMessage(t, []byte(cnt))\n\t\t\t}),\n\t\t\t\"close\": (func() error {\n\t\t\t\treturn conn.Close()\n\t\t\t}),\n\t\t\t\"close_con\": (func(con *websocket.Conn) error {\n\t\t\t\treturn con.Close()\n\t\t\t}),\n\t\t})\n\t\tL.LoadBuffer(bcode, len(bcode), \"route\")\n\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\tfmt.Println(\"Websocket Lua error: \" + L.ToString(-1))\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}\n\t\t\/\/ Close websocket.\n\t\tconn.Close()\n\n\t\tL.Close()\n\t}, nil\n}\n\n\/\/ DLRWS_RUS also does the thing for websockets, but reuses states, not quite handy given how many connections a websocket could take and how long the connection could keep alive.\nfunc DLRWS_RUS(bcode string, instances int, dobind bool, vals map[string]interface{}) (func(*gin.Context), error) { \/\/ Same as above, but reusing states.\n\tinsts := instances\n\tif instances < 0 {\n\t\tinsts = 2\n\t\tif jobs\/2 > 1 {\n\t\t\tinsts = jobs\n\t\t}\n\t}\n\tschan := make(chan *lua.State, insts)\n\tfor i := 0; i < jobs\/2; i++ {\n\t\tL := GetInstance()\n\t\tif dobind {\n\t\t\tluar.Register(L, \"\", vals)\n\t\t}\n\t\tschan <- L\n\t}\n\treturn func(context *gin.Context) {\n\t\tL := <-schan\n\t\tBindContext(L, context)\n\t\tconn, err := upgrader.Upgrade(context.Writer, context.Request, nil)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Websocket error: \" + err.Error()) \/\/ silent error.\n\t\t}\n\t\tluar.Register(L, \"ws\", luar.Map{\n\t\t\t\"con\": conn,\n\t\t\t\"BinaryMessage\": websocket.BinaryMessage,\n\t\t\t\"TextMessage\": websocket.TextMessage,\n\t\t\t\/\/\"read\": conn.ReadMessage,\n\t\t\t\/\/\"send\": conn.SendMessage,\n\t\t\t\"read\": (func() (int, string, error) {\n\t\t\t\tmessageType, p, err := conn.ReadMessage()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn -1, \"\", err\n\t\t\t\t}\n\t\t\t\treturn messageType, string(p), nil\n\t\t\t}),\n\t\t\t\"read_con\": (func(con *websocket.Conn) (int, string, error) {\n\t\t\t\tmessageType, p, err := con.ReadMessage()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn -1, \"\", err\n\t\t\t\t}\n\t\t\t\treturn messageType, string(p), nil\n\t\t\t}),\n\t\t\t\"send\": (func(t int, cnt string) error {\n\t\t\t\treturn conn.WriteMessage(t, []byte(cnt))\n\t\t\t}),\n\t\t\t\"send_con\": (func(con *websocket.Conn, t int, cnt string) error {\n\t\t\t\treturn con.WriteMessage(t, []byte(cnt))\n\t\t\t}),\n\t\t\t\"close\": (func() error {\n\t\t\t\treturn conn.Close()\n\t\t\t}),\n\t\t\t\"close_con\": (func(con *websocket.Conn) error {\n\t\t\t\treturn con.Close()\n\t\t\t}),\n\t\t})\n\t\tL.LoadBuffer(bcode, len(bcode), \"route\")\n\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\tfmt.Println(\"Websocket Lua error: \" + L.ToString(-1))\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}\n\t\tschan <- L\n\t\t\/\/ Close websocket.\n\t\tconn.Close()\n\t}, nil\n}\n<commit_msg>Fix formatting in error pages.<commit_after>package middleware\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/DeedleFake\/Go-PhysicsFS\/physfs\"\n\t\"github.com\/carbonsrv\/carbon\/modules\/glue\"\n\t\"github.com\/carbonsrv\/carbon\/modules\/helpers\"\n\t\"github.com\/carbonsrv\/carbon\/modules\/scheduler\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/pmylund\/go-cache\"\n\t\"github.com\/vifino\/golua\/lua\"\n\t\"github.com\/vifino\/luar\"\n\t\"net\/http\"\n\t\"time\"\n)\n\n\/\/ Cache\nvar kvstore *cache.Cache\nvar cbc *cache.Cache\nvar cfe *cache.Cache\nvar LDumper *lua.State\n\n\/\/ cacheDump dumps the source bytecode cached\nfunc cacheDump(file string) (string, error, bool) {\n\tdata_tmp, found := cbc.Get(file)\n\tif found == false {\n\t\tdata, err := fileRead(file)\n\t\tif err != nil {\n\t\t\treturn \"\", err, false\n\t\t}\n\t\tres, err := bcdump(data)\n\t\tif err != nil {\n\t\t\treturn \"\", err, true\n\t\t}\n\t\tcbc.Set(file, res, cache.DefaultExpiration)\n\t\treturn res, nil, false\n\t} else {\n\t\t\/\/debug(\"Using Bytecode-cache for \" + file)\n\t\treturn data_tmp.(string), nil, false\n\t}\n}\n\n\/\/ bcdump actually dumps the bytecode\nfunc bcdump(data string) (string, error) {\n\tif LDumper.LoadString(data) != 0 {\n\t\treturn \"\", errors.New(LDumper.ToString(-1))\n\t}\n\tdefer LDumper.Pop(1)\n\treturn LDumper.FDump(), nil\n}\n\n\/\/ FS\nvar filesystem http.FileSystem\n\nfunc fileExists(file string) bool {\n\tdata_tmp, found := cfe.Get(file)\n\tif found == false {\n\t\texists := physfs.Exists(file)\n\t\tcfe.Set(file, exists, cache.DefaultExpiration)\n\t\treturn exists\n\t} else {\n\t\treturn data_tmp.(bool)\n\t}\n}\n\nfunc fileRead(file string) (string, error) {\n\tf, err := filesystem.Open(file)\n\tdefer f.Close()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tr := bufio.NewReader(f)\n\tbuf := make([]byte, fi.Size())\n\t_, err = r.Read(buf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(buf), err\n}\n\n\/\/ Preloader\/Starter\nvar jobs int\n\n\/\/ Preloaded is the chan that contains the preloaded states\nvar Preloaded chan *lua.State\n\n\/\/ Preloader is the function that preloads the states\nfunc Preloader() {\n\tPreloaded = make(chan *lua.State, jobs)\n\tfor {\n\t\t\/\/fmt.Println(\"preloading\")\n\t\tL := luar.Init()\n\t\tBind(L, webroot)\n\t\terr := L.DoString(glue.MainGlue())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\terr = L.DoString(glue.RouteGlue())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tPreloaded <- L\n\t}\n}\n\n\/\/ GetInstance grabs an instance from the preloaded list\nfunc GetInstance() *lua.State {\n\t\/\/fmt.Println(\"grabbing instance\")\n\tL := <-Preloaded\n\t\/\/fmt.Println(\"Done\")\n\treturn L\n}\n\n\/\/ Init\nfunc Init(j int, cfe_new *cache.Cache, kvstore_new *cache.Cache, root string) {\n\twebroot = root\n\tcfe = cfe_new\n\tkvstore = kvstore_new\n\tjobs = j\n\tfilesystem = physfs.FileSystem()\n\tcbc = cache.New(5*time.Minute, 30*time.Second) \/\/ Initialize cache with 5 minute lifetime and purge every 30 seconds\n\tLDumper = luar.Init()\n}\n\n\/\/ Lua behaves PHP-like, but for Lua\nfunc Lua() func(*gin.Context) {\n\t\/\/LDumper := luar.Init()\n\treturn func(context *gin.Context) {\n\t\tfile := context.Request.URL.Path\n\t\tif fileExists(file) {\n\t\t\t\/\/fmt.Println(\"start\")\n\t\t\tL := GetInstance()\n\t\t\t\/\/fmt.Println(\"after start\")\n\t\t\tdefer scheduler.Add(func() {\n\t\t\t\tL.Close()\n\t\t\t})\n\t\t\t\/\/fmt.Println(\"after after start\")\n\t\t\tBindContext(L, context)\n\t\t\t\/\/fmt.Println(\"before cache\")\n\t\t\tcode, err, lerr := cacheDump(file)\n\t\t\t\/\/fmt.Println(\"after cache\")\n\t\t\tif err != nil {\n\t\t\t\tif lerr == false {\n\t\t\t\t\tcontext.Next()\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n<head><title>Syntax Error in `+context.Request.URL.Path+`<\/title>\n<body>\n\t<h1>Syntax Error in file `+context.Request.URL.Path+`:<\/h1>\n\t<pre>`+string(err.Error())+`<\/pre>\n<\/body>\n<\/html>`)\n\t\t\t\t\tcontext.Abort()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/fmt.Println(\"before loadbuffer\")\n\t\t\tL.LoadBuffer(code, len(code), file) \/\/ This shouldn't error, was checked earlier.\n\t\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n<head><title>Runtime Error in `+context.Request.URL.Path+`<\/title>\n<body>\n\t<h1>Runtime Error in file `+context.Request.URL.Path+`:<\/h1>\n\t<pre>`+L.ToString(-1)+`<\/pre>\n<\/body>\n<\/html>`)\n\t\t\t\tcontext.Abort()\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/*L.DoString(\"return CONTENT_TO_RETURN\")\n\t\t\tv := luar.CopyTableToMap(L, nil, -1)\n\t\t\tm := v.(map[string]interface{})\n\t\t\ti := int(m[\"code\"].(float64))\n\t\t\tif err != nil {\n\t\t\t\ti = http.StatusOK\n\t\t\t}*\/\n\t\t\t\/\/helpers.HTMLString(context, i, m[\"content\"].(string))\n\t\t} else {\n\t\t\tcontext.Next()\n\t\t}\n\t}\n}\n\n\/\/ DLR_NS does Route creation by lua\nfunc DLR_NS(bcode string, dobind bool, vals map[string]interface{}) (func(*gin.Context), error) {\n\t\/*code, err := bcdump(code)\n\tif err != nil {\n\t\treturn func(*gin.Context) {}, err\n\t}*\/\n\treturn func(context *gin.Context) {\n\t\tL := GetInstance()\n\t\tif dobind {\n\t\t\tluar.Register(L, \"\", vals)\n\t\t}\n\t\tdefer scheduler.Add(func() {\n\t\t\tL.Close()\n\t\t})\n\t\tBindContext(L, context)\n\t\t\/\/fmt.Println(\"before loadbuffer\")\n\t\t\/*if L.LoadBuffer(bcode, len(bcode), \"route\") != 0 {\n\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n\t\t\t<head><title>Syntax Error in `+context.Request.URL.Path+`<\/title>\n\t\t\t<body>\n\t\t\t\t<h1>Syntax Error in Lua Route on `+context.Request.URL.Path+`:<\/h1>\n\t\t\t\t<code>`+L.ToString(-1)+`<\/code>\n\t\t\t<\/body>\n\t\t\t<\/html>`)\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}*\/\n\t\tL.LoadBuffer(bcode, len(bcode), \"route\")\n\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n<head><title>Runtime Error on `+context.Request.URL.Path+`<\/title>\n<body>\n\t<h1>Runtime Error in Lua Route on `+context.Request.URL.Path+`:<\/h1>\n\t<pre>`+L.ToString(-1)+`<\/pre>\n<\/body>\n<\/html>`)\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}\n\t}, nil\n}\n\n\/\/ DLR_RUS does the same as above, but reuses states\nfunc DLR_RUS(bcode string, instances int, dobind bool, vals map[string]interface{}) (func(*gin.Context), error) { \/\/ Same as above, but reuses states. Much faster. Higher memory use though, because more states.\n\tinsts := instances\n\tif instances < 0 {\n\t\tinsts = 2\n\t\tif jobs\/2 > 1 {\n\t\t\tinsts = jobs\n\t\t}\n\t}\n\tschan := make(chan *lua.State, insts)\n\tfor i := 0; i < jobs\/2; i++ {\n\t\tL := GetInstance()\n\t\tif dobind {\n\t\t\tluar.Register(L, \"\", vals)\n\t\t}\n\t\tif L.LoadBuffer(bcode, len(bcode), \"route\") != 0 {\n\t\t\treturn func(context *gin.Context) {}, errors.New(L.ToString(-1))\n\t\t}\n\t\tL.PushValue(-1)\n\t\tschan <- L\n\t}\n\treturn func(context *gin.Context) {\n\t\tL := <-schan\n\t\tBindContext(L, context)\n\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n<head><title>Runtime Error on `+context.Request.URL.Path+`<\/title>\n<body>\n\t<h1>Runtime Error in Lua Route on `+context.Request.URL.Path+`:<\/h1>\n\t<pre>`+L.ToString(-1)+`<\/pre>\n<\/body>\n<\/html>`)\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}\n\t\tL.PushValue(-1)\n\t\tschan <- L\n\t}, nil\n}\n\nvar upgrader = websocket.Upgrader{\n\tReadBufferSize: 1024,\n\tWriteBufferSize: 1024,\n\tCheckOrigin: func(r *http.Request) bool { \/\/ Because it breaks some things.\n\t\treturn true\n\t},\n}\n\n\/\/ DLRWS_NS does the same as the first one, but for websockets.\nfunc DLRWS_NS(bcode string, dobind bool, vals map[string]interface{}) (func(*gin.Context), error) { \/\/ Same as above, but for websockets.\n\treturn func(context *gin.Context) {\n\t\tL := GetInstance()\n\t\tBindContext(L, context)\n\t\tif dobind {\n\t\t\tluar.Register(L, \"\", vals)\n\t\t}\n\n\t\tconn, err := upgrader.Upgrade(context.Writer, context.Request, nil)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Websocket error: \" + err.Error()) \/\/ silent error.\n\t\t}\n\t\tluar.Register(L, \"ws\", luar.Map{\n\t\t\t\"con\": conn,\n\t\t\t\"BinaryMessage\": websocket.BinaryMessage,\n\t\t\t\"TextMessage\": websocket.TextMessage,\n\t\t\t\/\/\"read\": conn.ReadMessage,\n\t\t\t\/\/\"send\": conn.SendMessage,\n\t\t\t\"read\": (func() (int, string, error) {\n\t\t\t\tmessageType, p, err := conn.ReadMessage()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn -1, \"\", err\n\t\t\t\t}\n\t\t\t\treturn messageType, string(p), nil\n\t\t\t}),\n\t\t\t\"read_con\": (func(con *websocket.Conn) (int, string, error) {\n\t\t\t\tmessageType, p, err := con.ReadMessage()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn -1, \"\", err\n\t\t\t\t}\n\t\t\t\treturn messageType, string(p), nil\n\t\t\t}),\n\t\t\t\"send\": (func(t int, cnt string) error {\n\t\t\t\treturn conn.WriteMessage(t, []byte(cnt))\n\t\t\t}),\n\t\t\t\"send_con\": (func(con *websocket.Conn, t int, cnt string) error {\n\t\t\t\treturn con.WriteMessage(t, []byte(cnt))\n\t\t\t}),\n\t\t\t\"close\": (func() error {\n\t\t\t\treturn conn.Close()\n\t\t\t}),\n\t\t\t\"close_con\": (func(con *websocket.Conn) error {\n\t\t\t\treturn con.Close()\n\t\t\t}),\n\t\t})\n\t\tL.LoadBuffer(bcode, len(bcode), \"route\")\n\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\tfmt.Println(\"Websocket Lua error: \" + L.ToString(-1))\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}\n\t\t\/\/ Close websocket.\n\t\tconn.Close()\n\n\t\tL.Close()\n\t}, nil\n}\n\n\/\/ DLRWS_RUS also does the thing for websockets, but reuses states, not quite handy given how many connections a websocket could take and how long the connection could keep alive.\nfunc DLRWS_RUS(bcode string, instances int, dobind bool, vals map[string]interface{}) (func(*gin.Context), error) { \/\/ Same as above, but reusing states.\n\tinsts := instances\n\tif instances < 0 {\n\t\tinsts = 2\n\t\tif jobs\/2 > 1 {\n\t\t\tinsts = jobs\n\t\t}\n\t}\n\tschan := make(chan *lua.State, insts)\n\tfor i := 0; i < jobs\/2; i++ {\n\t\tL := GetInstance()\n\t\tif dobind {\n\t\t\tluar.Register(L, \"\", vals)\n\t\t}\n\t\tschan <- L\n\t}\n\treturn func(context *gin.Context) {\n\t\tL := <-schan\n\t\tBindContext(L, context)\n\t\tconn, err := upgrader.Upgrade(context.Writer, context.Request, nil)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Websocket error: \" + err.Error()) \/\/ silent error.\n\t\t}\n\t\tluar.Register(L, \"ws\", luar.Map{\n\t\t\t\"con\": conn,\n\t\t\t\"BinaryMessage\": websocket.BinaryMessage,\n\t\t\t\"TextMessage\": websocket.TextMessage,\n\t\t\t\/\/\"read\": conn.ReadMessage,\n\t\t\t\/\/\"send\": conn.SendMessage,\n\t\t\t\"read\": (func() (int, string, error) {\n\t\t\t\tmessageType, p, err := conn.ReadMessage()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn -1, \"\", err\n\t\t\t\t}\n\t\t\t\treturn messageType, string(p), nil\n\t\t\t}),\n\t\t\t\"read_con\": (func(con *websocket.Conn) (int, string, error) {\n\t\t\t\tmessageType, p, err := con.ReadMessage()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn -1, \"\", err\n\t\t\t\t}\n\t\t\t\treturn messageType, string(p), nil\n\t\t\t}),\n\t\t\t\"send\": (func(t int, cnt string) error {\n\t\t\t\treturn conn.WriteMessage(t, []byte(cnt))\n\t\t\t}),\n\t\t\t\"send_con\": (func(con *websocket.Conn, t int, cnt string) error {\n\t\t\t\treturn con.WriteMessage(t, []byte(cnt))\n\t\t\t}),\n\t\t\t\"close\": (func() error {\n\t\t\t\treturn conn.Close()\n\t\t\t}),\n\t\t\t\"close_con\": (func(con *websocket.Conn) error {\n\t\t\t\treturn con.Close()\n\t\t\t}),\n\t\t})\n\t\tL.LoadBuffer(bcode, len(bcode), \"route\")\n\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\tfmt.Println(\"Websocket Lua error: \" + L.ToString(-1))\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}\n\t\tschan <- L\n\t\t\/\/ Close websocket.\n\t\tconn.Close()\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"..\/gorilla\/mux\/mux\"\n\t\"bytes\"\n\t\"net\/http\"\n)\n\nfunc initHandlers(r *mux.Router) {\n\thttp.HandleFunc(\"\/\", myhandler)\n}\n\nfunc myhandler(writer http.ResponseWriter, req *http.Request) {\n\ttpl, err := loadTemplate(\"index.html\")\n\tif err != nil {\n\t\thttp.Error(writer, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tmydata := Example{Name: \"Joe\"}\n\tbuff := new(bytes.Buffer)\n\n\ttpl.Execute(buff, mydata)\n\twriter.Write(buff.Bytes())\n}\n\ntype Example struct {\n\tName string\n}\n<commit_msg>handlers.go example updated<commit_after>package main\n\nimport (\n\t\"..\/gorilla\/mux\/mux\"\n\t\"bytes\"\n\t\"net\/http\"\n)\n\ntype Example struct {\n\tName string\n}\n\n\n\/\/ in this function, we define all our patterns and their \n\/\/ handler functions\nfunc initHandlers(r *mux.Router) {\n\t\/\/ 1. If gorilla-mux is enabled, r is pointer to mux.Router and we have to use it.\n\t\/\/ Once first match is found, appropriate handler is called, so make sure you \n\t\/\/ order patterns appropriately. Pattern is regexp.\n\t\/\/ usage would be: r.HandleFunc(\"\/some-regexp-pattern\", handlerFn)\n\t\/\/\n\t\/\/ 2. If gorilla-mux is disabled, you will use default http's way of defining patterns.\n\t\/\/ Patterns are not regexp. Instead, longest match will win, so you don't have to worry\n\t\/\/ about ordering in this case.\n\t\/\/ usage: http.HandleFunc(\"\/pattern\", handlerFn)\n\t\/\/\n\t\/\/ this code is based on sample config provided, where gorilla-mux is disabled, so\n\thttp.HandleFunc(\"\/\", indexPage)\n\t\n}\n\n\n\/\/ indexPage() is a handler which will load some template and send the result back to the client\nfunc indexPage(writer http.ResponseWriter, req *http.Request) {\n\ttpl, err := loadTemplate(\"index.html\")\n\tif err != nil {\n\t\thttp.Error(writer, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tmydata := Example{Name: \"Joe\"}\n\tbuff := new(bytes.Buffer)\n\n\ttpl.Execute(buff, mydata)\n\twriter.Write(buff.Bytes())\n}\n\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Tom Thorogood. All rights reserved.\n\/\/ Use of this source code is governed by a Modified\n\/\/ BSD License that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/tmthrgd\/id3v2\"\n)\n\nvar dryrun = flag.Bool(\"dry-run\", false, \"does not perform the file renaming\")\n\nfunc scan(work workUnit) error {\n\tf, err := os.Open(work.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tframes, err := id3v2.Scan(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttit2 := frames.Lookup(id3v2.FrameTIT2)\n\ttpe1 := frames.Lookup(id3v2.FrameTPE1)\n\n\tif tit2 == nil || tpe1 == nil {\n\t\tif filepath.Ext(work.path) != \".mp3\" {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn errors.New(\"missing TIT2 or TPE1 frame\")\n\t}\n\n\ttitle, err := tit2.Text()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tartist, err := tpe1.Text()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewName := title + \" - \" + artist + filepath.Ext(work.path)\n\tnewName = strings.Replace(newName, string(filepath.Separator), \"-\", -1)\n\n\tnewPath := filepath.Join(filepath.Dir(work.path), newName)\n\n\tif work.path == newPath {\n\t\treturn nil\n\t}\n\n\tnewURL := (&url.URL{\n\t\tScheme: \"file\",\n\t\tPath: newPath,\n\t}).String()\n\t*work.out = newURL\n\n\tname, padding := filepath.Base(work.path), \" \"\n\tif len(name) < 100 {\n\t\tpadding = strings.Repeat(\" \", 100-len(name))\n\t}\n\n\tfmt.Printf(\"%s%s%s\\n\", name, padding, filepath.Base(newPath))\n\n\tif *dryrun {\n\t\treturn nil\n\t}\n\n\tif _, err := os.Stat(newPath); err == nil {\n\t\treturn errors.New(\"destination file exists\")\n\t}\n\n\treturn os.Rename(work.path, newPath)\n}\n\nfunc worker(ch chan workUnit, wg *sync.WaitGroup) {\n\tfor work := range ch {\n\t\tif err := scan(work); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"<%s>: %v\\n\", work.path, err)\n\t\t}\n\n\t\twg.Done()\n\t}\n}\n\ntype workUnit struct {\n\tpath string\n\tout *string\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif flag.NArg() == 0 {\n\t\tfmt.Println(\"Usage: id3-music-renamer.go [-dry-run] <m3u8 playlist> [<m3u8 playlist out>]\")\n\t\tos.Exit(1)\n\t}\n\n\tr, err := os.Open(flag.Arg(0))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer r.Close()\n\n\tvar wg sync.WaitGroup\n\n\twork := make(chan workUnit, 32)\n\tdefer close(work)\n\n\tfor i := 0; i < cap(work); i++ {\n\t\tgo worker(work, &wg)\n\t}\n\n\ts := bufio.NewScanner(r)\n\n\tvar out []*string\n\n\tfor s.Scan() {\n\t\tline := s.Text()\n\n\t\tpath, err := url.Parse(line)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tout = append(out, &line)\n\n\t\tif path.Scheme != \"file\" {\n\t\t\tcontinue\n\t\t}\n\n\t\twg.Add(1)\n\t\twork <- workUnit{path.Path, out[len(out)-1]}\n\t}\n\n\tif s.Err() != nil {\n\t\tpanic(s.Err())\n\t}\n\n\twg.Wait()\n\n\tvar outPath string\n\tif flag.NArg() == 2 {\n\t\toutPath = flag.Arg(1)\n\t} else {\n\t\toutPath = flag.Arg(0) + \"-new.m3u8\"\n\t}\n\n\tw, err := os.Create(outPath)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer w.Close()\n\n\tbw := bufio.NewWriter(w)\n\n\tfor _, line := range out {\n\t\tif _, err := io.WriteString(bw, *line); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tif _, err := io.WriteString(bw, \"\\n\"); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tif err := bw.Flush(); err != nil {\n\t\tpanic(err)\n\t}\n}\n<commit_msg>Use *bufio.Writer methods in main in id3-music-renamer.go<commit_after>\/\/ Copyright 2017 Tom Thorogood. All rights reserved.\n\/\/ Use of this source code is governed by a Modified\n\/\/ BSD License that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/tmthrgd\/id3v2\"\n)\n\nvar dryrun = flag.Bool(\"dry-run\", false, \"does not perform the file renaming\")\n\nfunc scan(work workUnit) error {\n\tf, err := os.Open(work.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tframes, err := id3v2.Scan(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttit2 := frames.Lookup(id3v2.FrameTIT2)\n\ttpe1 := frames.Lookup(id3v2.FrameTPE1)\n\n\tif tit2 == nil || tpe1 == nil {\n\t\tif filepath.Ext(work.path) != \".mp3\" {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn errors.New(\"missing TIT2 or TPE1 frame\")\n\t}\n\n\ttitle, err := tit2.Text()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tartist, err := tpe1.Text()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewName := title + \" - \" + artist + filepath.Ext(work.path)\n\tnewName = strings.Replace(newName, string(filepath.Separator), \"-\", -1)\n\n\tnewPath := filepath.Join(filepath.Dir(work.path), newName)\n\n\tif work.path == newPath {\n\t\treturn nil\n\t}\n\n\tnewURL := (&url.URL{\n\t\tScheme: \"file\",\n\t\tPath: newPath,\n\t}).String()\n\t*work.out = newURL\n\n\tname, padding := filepath.Base(work.path), \" \"\n\tif len(name) < 100 {\n\t\tpadding = strings.Repeat(\" \", 100-len(name))\n\t}\n\n\tfmt.Printf(\"%s%s%s\\n\", name, padding, filepath.Base(newPath))\n\n\tif *dryrun {\n\t\treturn nil\n\t}\n\n\tif _, err := os.Stat(newPath); err == nil {\n\t\treturn errors.New(\"destination file exists\")\n\t}\n\n\treturn os.Rename(work.path, newPath)\n}\n\nfunc worker(ch chan workUnit, wg *sync.WaitGroup) {\n\tfor work := range ch {\n\t\tif err := scan(work); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"<%s>: %v\\n\", work.path, err)\n\t\t}\n\n\t\twg.Done()\n\t}\n}\n\ntype workUnit struct {\n\tpath string\n\tout *string\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif flag.NArg() == 0 {\n\t\tfmt.Println(\"Usage: id3-music-renamer.go [-dry-run] <m3u8 playlist> [<m3u8 playlist out>]\")\n\t\tos.Exit(1)\n\t}\n\n\tr, err := os.Open(flag.Arg(0))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer r.Close()\n\n\tvar wg sync.WaitGroup\n\n\twork := make(chan workUnit, 32)\n\tdefer close(work)\n\n\tfor i := 0; i < cap(work); i++ {\n\t\tgo worker(work, &wg)\n\t}\n\n\ts := bufio.NewScanner(r)\n\n\tvar out []*string\n\n\tfor s.Scan() {\n\t\tline := s.Text()\n\n\t\tpath, err := url.Parse(line)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tout = append(out, &line)\n\n\t\tif path.Scheme != \"file\" {\n\t\t\tcontinue\n\t\t}\n\n\t\twg.Add(1)\n\t\twork <- workUnit{path.Path, out[len(out)-1]}\n\t}\n\n\tif s.Err() != nil {\n\t\tpanic(s.Err())\n\t}\n\n\twg.Wait()\n\n\tvar outPath string\n\tif flag.NArg() == 2 {\n\t\toutPath = flag.Arg(1)\n\t} else {\n\t\toutPath = flag.Arg(0) + \"-new.m3u8\"\n\t}\n\n\tw, err := os.Create(outPath)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer w.Close()\n\n\tbw := bufio.NewWriter(w)\n\n\tfor _, line := range out {\n\t\tif _, err := bw.WriteString(*line); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tif err := bw.WriteByte('\\n'); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tif err := bw.Flush(); err != nil {\n\t\tpanic(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package scheduler\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/pivotal-golang\/lager\"\n\n\t\"github.com\/concourse\/atc\"\n\t\"github.com\/concourse\/atc\/db\"\n\t\"github.com\/concourse\/atc\/engine\"\n)\n\n\/\/go:generate counterfeiter . PipelineDB\n\ntype PipelineDB interface {\n\tCreateJobBuild(job string) (db.Build, error)\n\tGetJobBuildForInputs(job string, inputs []db.BuildInput) (db.Build, error)\n\tCreateJobBuildIfNoBuildsPending(job string) (db.Build, bool, error)\n\tGetNextPendingBuild(job string) (db.Build, error)\n\tSaveResourceVersions(atc.ResourceConfig, []atc.Version) error\n\tGetLatestInputVersions([]atc.JobInput) ([]db.BuildInput, error)\n\tScheduleBuild(buildID int, jobConfig atc.JobConfig) (bool, error)\n}\n\n\/\/go:generate counterfeiter . BuildsDB\n\ntype BuildsDB interface {\n\tGetAllStartedBuilds() ([]db.Build, error)\n\tErrorBuild(buildID int, err error) error\n}\n\n\/\/go:generate counterfeiter . BuildFactory\n\ntype BuildFactory interface {\n\tCreate(atc.JobConfig, atc.ResourceConfigs, []db.BuildInput) (atc.Plan, error)\n}\n\ntype Waiter interface {\n\tWait()\n}\n\n\/\/go:generate counterfeiter . Scanner\n\ntype Scanner interface {\n\tScan(lager.Logger, string) error\n}\n\ntype Scheduler struct {\n\tPipelineDB PipelineDB\n\tBuildsDB BuildsDB\n\tFactory BuildFactory\n\tEngine engine.Engine\n\tScanner Scanner\n}\n\nfunc (s *Scheduler) BuildLatestInputs(logger lager.Logger, job atc.JobConfig, resources atc.ResourceConfigs) error {\n\tlogger = logger.Session(\"build-latest\")\n\n\tinputs := job.Inputs()\n\n\tif len(inputs) == 0 {\n\t\t\/\/ no inputs; no-op\n\t\treturn nil\n\t}\n\n\tlatestInputs, err := s.PipelineDB.GetLatestInputVersions(inputs)\n\tif err != nil {\n\t\tif err == db.ErrNoVersions {\n\t\t\tlogger.Debug(\"no-input-versions-available\")\n\t\t\treturn nil\n\t\t}\n\n\t\tlogger.Error(\"failed-to-get-latest-input-versions\", err)\n\t\treturn err\n\t}\n\n\tcheckInputs := []db.BuildInput{}\n\tfor _, input := range latestInputs {\n\t\tfor _, ji := range inputs {\n\t\t\tif ji.Name == input.Name {\n\t\t\t\tif ji.Trigger {\n\t\t\t\t\tcheckInputs = append(checkInputs, input)\n\t\t\t\t}\n\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(checkInputs) == 0 {\n\t\tlogger.Debug(\"no-triggered-input-versions\")\n\t\treturn nil\n\t}\n\n\texistingBuild, err := s.PipelineDB.GetJobBuildForInputs(job.Name, checkInputs)\n\tif err == nil {\n\t\tlogger.Debug(\"build-already-exists-for-inputs\", lager.Data{\n\t\t\t\"existing-build\": existingBuild.ID,\n\t\t})\n\n\t\treturn nil\n\t}\n\n\tbuild, created, err := s.PipelineDB.CreateJobBuildIfNoBuildsPending(job.Name)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-create-build\", err)\n\t\treturn err\n\t}\n\n\tif !created {\n\t\tlogger.Info(\"did-not-create-build-as-it-already-is-pending\")\n\t\treturn nil\n\t}\n\n\tlogger = logger.WithData(lager.Data{\"build\": build.ID})\n\n\tlogger.Debug(\"created-build\")\n\n\ts.TryNextPendingBuild(logger, job, resources).Wait()\n\n\treturn nil\n}\n\nfunc (s *Scheduler) TryNextPendingBuild(logger lager.Logger, job atc.JobConfig, resources atc.ResourceConfigs) Waiter {\n\tlogger = logger.Session(\"try-next-pending\")\n\n\twg := new(sync.WaitGroup)\n\n\twg.Add(1)\n\tgo func() {\n\t\tbuild, err := s.PipelineDB.GetNextPendingBuild(job.Name)\n\t\tif err != nil {\n\t\t\tif err == db.ErrNoBuild {\n\t\t\t\twg.Done()\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlogger.Error(\"failed-to-get-next-pending-build\", err)\n\n\t\t\twg.Done()\n\n\t\t\treturn\n\t\t}\n\n\t\tcreatedBuild := s.scheduleAndResumePendingBuild(logger, build, job, resources)\n\n\t\twg.Done()\n\n\t\tif createdBuild != nil {\n\t\t\tlogger.Info(\"building\")\n\t\t\tcreatedBuild.Resume(logger)\n\t\t}\n\t}()\n\n\treturn wg\n}\n\nfunc (s *Scheduler) TriggerImmediately(logger lager.Logger, job atc.JobConfig, resources atc.ResourceConfigs) (db.Build, error) {\n\tlogger = logger.Session(\"trigger-immediately\")\n\n\tbuild, err := s.PipelineDB.CreateJobBuild(job.Name)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-create-build\", err)\n\t\treturn db.Build{}, err\n\t}\n\n\tgo func() {\n\t\tcreatedBuild := s.scheduleAndResumePendingBuild(logger, build, job, resources)\n\t\tif createdBuild != nil {\n\t\t\tlogger.Info(\"building\")\n\t\t\tcreatedBuild.Resume(logger)\n\t\t}\n\t}()\n\n\treturn build, nil\n}\n\nfunc (s *Scheduler) scheduleAndResumePendingBuild(logger lager.Logger, build db.Build, job atc.JobConfig, resources atc.ResourceConfigs) engine.Build {\n\tlogger = logger.WithData(lager.Data{\"build\": build.ID})\n\n\tscheduled, err := s.PipelineDB.ScheduleBuild(build.ID, job)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-schedule-build\", err)\n\t\treturn nil\n\t}\n\n\tif !scheduled {\n\t\tlogger.Debug(\"build-could-not-be-scheduled\")\n\t\treturn nil\n\t}\n\n\tbuildInputs := job.Inputs()\n\n\tfor _, input := range buildInputs {\n\t\tscanLog := logger.Session(\"scan\", lager.Data{\n\t\t\t\"input\": input.Name,\n\t\t\t\"resource\": input.Resource,\n\t\t})\n\n\t\terr := s.Scanner.Scan(scanLog, input.Resource)\n\t\tif err != nil {\n\t\t\tscanLog.Error(\"failed-to-scan\", err)\n\n\t\t\terr := s.BuildsDB.ErrorBuild(build.ID, err)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"failed-to-mark-build-as-errored\", err)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\n\t\tscanLog.Info(\"done\")\n\t}\n\n\tinputs, err := s.PipelineDB.GetLatestInputVersions(buildInputs)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-get-latest-input-versions\", err)\n\t\treturn nil\n\t}\n\n\tplan, err := s.Factory.Create(job, resources, inputs)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-create-build-plan\", err)\n\t\treturn nil\n\t}\n\n\tcreatedBuild, err := s.Engine.CreateBuild(build, plan)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-create-build\", err)\n\t\treturn nil\n\t}\n\n\treturn createdBuild\n}\n<commit_msg>Directly schedule build instead of depending on other methods<commit_after>package scheduler\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/pivotal-golang\/lager\"\n\n\t\"github.com\/concourse\/atc\"\n\t\"github.com\/concourse\/atc\/db\"\n\t\"github.com\/concourse\/atc\/engine\"\n)\n\n\/\/go:generate counterfeiter . PipelineDB\n\ntype PipelineDB interface {\n\tCreateJobBuild(job string) (db.Build, error)\n\tGetJobBuildForInputs(job string, inputs []db.BuildInput) (db.Build, error)\n\tCreateJobBuildIfNoBuildsPending(job string) (db.Build, bool, error)\n\tGetNextPendingBuild(job string) (db.Build, error)\n\tSaveResourceVersions(atc.ResourceConfig, []atc.Version) error\n\tGetLatestInputVersions([]atc.JobInput) ([]db.BuildInput, error)\n\tScheduleBuild(buildID int, jobConfig atc.JobConfig) (bool, error)\n}\n\n\/\/go:generate counterfeiter . BuildsDB\n\ntype BuildsDB interface {\n\tGetAllStartedBuilds() ([]db.Build, error)\n\tErrorBuild(buildID int, err error) error\n}\n\n\/\/go:generate counterfeiter . BuildFactory\n\ntype BuildFactory interface {\n\tCreate(atc.JobConfig, atc.ResourceConfigs, []db.BuildInput) (atc.Plan, error)\n}\n\ntype Waiter interface {\n\tWait()\n}\n\n\/\/go:generate counterfeiter . Scanner\n\ntype Scanner interface {\n\tScan(lager.Logger, string) error\n}\n\ntype Scheduler struct {\n\tPipelineDB PipelineDB\n\tBuildsDB BuildsDB\n\tFactory BuildFactory\n\tEngine engine.Engine\n\tScanner Scanner\n}\n\nfunc (s *Scheduler) BuildLatestInputs(logger lager.Logger, job atc.JobConfig, resources atc.ResourceConfigs) error {\n\tlogger = logger.Session(\"build-latest\")\n\n\tinputs := job.Inputs()\n\n\tif len(inputs) == 0 {\n\t\t\/\/ no inputs; no-op\n\t\treturn nil\n\t}\n\n\tlatestInputs, err := s.PipelineDB.GetLatestInputVersions(inputs)\n\tif err != nil {\n\t\tif err == db.ErrNoVersions {\n\t\t\tlogger.Debug(\"no-input-versions-available\")\n\t\t\treturn nil\n\t\t}\n\n\t\tlogger.Error(\"failed-to-get-latest-input-versions\", err)\n\t\treturn err\n\t}\n\n\tcheckInputs := []db.BuildInput{}\n\tfor _, input := range latestInputs {\n\t\tfor _, ji := range inputs {\n\t\t\tif ji.Name == input.Name {\n\t\t\t\tif ji.Trigger {\n\t\t\t\t\tcheckInputs = append(checkInputs, input)\n\t\t\t\t}\n\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(checkInputs) == 0 {\n\t\tlogger.Debug(\"no-triggered-input-versions\")\n\t\treturn nil\n\t}\n\n\texistingBuild, err := s.PipelineDB.GetJobBuildForInputs(job.Name, checkInputs)\n\tif err == nil {\n\t\tlogger.Debug(\"build-already-exists-for-inputs\", lager.Data{\n\t\t\t\"existing-build\": existingBuild.ID,\n\t\t})\n\n\t\treturn nil\n\t}\n\n\tbuild, created, err := s.PipelineDB.CreateJobBuildIfNoBuildsPending(job.Name)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-create-build\", err)\n\t\treturn err\n\t}\n\n\tif !created {\n\t\tlogger.Info(\"did-not-create-build-as-it-already-is-pending\")\n\t\treturn nil\n\t}\n\n\tlogger = logger.WithData(lager.Data{\"build\": build.ID})\n\n\tlogger.Debug(\"created-build\")\n\n\tcreatedBuild := s.scheduleAndResumePendingBuild(logger, build, job, resources)\n\n\tif createdBuild != nil {\n\t\tlogger.Info(\"building\")\n\t\tgo createdBuild.Resume(logger)\n\t}\n\n\treturn nil\n}\n\nfunc (s *Scheduler) TryNextPendingBuild(logger lager.Logger, job atc.JobConfig, resources atc.ResourceConfigs) Waiter {\n\tlogger = logger.Session(\"try-next-pending\")\n\n\twg := new(sync.WaitGroup)\n\n\twg.Add(1)\n\tgo func() {\n\t\tbuild, err := s.PipelineDB.GetNextPendingBuild(job.Name)\n\t\tif err != nil {\n\t\t\tif err == db.ErrNoBuild {\n\t\t\t\twg.Done()\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlogger.Error(\"failed-to-get-next-pending-build\", err)\n\n\t\t\twg.Done()\n\n\t\t\treturn\n\t\t}\n\n\t\tcreatedBuild := s.scheduleAndResumePendingBuild(logger, build, job, resources)\n\n\t\twg.Done()\n\n\t\tif createdBuild != nil {\n\t\t\tlogger.Info(\"building\")\n\t\t\tcreatedBuild.Resume(logger)\n\t\t}\n\t}()\n\n\treturn wg\n}\n\nfunc (s *Scheduler) TriggerImmediately(logger lager.Logger, job atc.JobConfig, resources atc.ResourceConfigs) (db.Build, error) {\n\tlogger = logger.Session(\"trigger-immediately\")\n\n\tbuild, err := s.PipelineDB.CreateJobBuild(job.Name)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-create-build\", err)\n\t\treturn db.Build{}, err\n\t}\n\n\tgo func() {\n\t\tcreatedBuild := s.scheduleAndResumePendingBuild(logger, build, job, resources)\n\t\tif createdBuild != nil {\n\t\t\tlogger.Info(\"building\")\n\t\t\tcreatedBuild.Resume(logger)\n\t\t}\n\t}()\n\n\treturn build, nil\n}\n\nfunc (s *Scheduler) scheduleAndResumePendingBuild(logger lager.Logger, build db.Build, job atc.JobConfig, resources atc.ResourceConfigs) engine.Build {\n\tlogger = logger.WithData(lager.Data{\"build\": build.ID})\n\n\tscheduled, err := s.PipelineDB.ScheduleBuild(build.ID, job)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-schedule-build\", err)\n\t\treturn nil\n\t}\n\n\tif !scheduled {\n\t\tlogger.Debug(\"build-could-not-be-scheduled\")\n\t\treturn nil\n\t}\n\n\tbuildInputs := job.Inputs()\n\n\tfor _, input := range buildInputs {\n\t\tscanLog := logger.Session(\"scan\", lager.Data{\n\t\t\t\"input\": input.Name,\n\t\t\t\"resource\": input.Resource,\n\t\t})\n\n\t\terr := s.Scanner.Scan(scanLog, input.Resource)\n\t\tif err != nil {\n\t\t\tscanLog.Error(\"failed-to-scan\", err)\n\n\t\t\terr := s.BuildsDB.ErrorBuild(build.ID, err)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"failed-to-mark-build-as-errored\", err)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\n\t\tscanLog.Info(\"done\")\n\t}\n\n\tinputs, err := s.PipelineDB.GetLatestInputVersions(buildInputs)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-get-latest-input-versions\", err)\n\t\treturn nil\n\t}\n\n\tplan, err := s.Factory.Create(job, resources, inputs)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-create-build-plan\", err)\n\t\treturn nil\n\t}\n\n\tcreatedBuild, err := s.Engine.CreateBuild(build, plan)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-create-build\", err)\n\t\treturn nil\n\t}\n\n\treturn createdBuild\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package explorer handles the block explorer subsystem for generating the\n\/\/ explorer pages.\n\/\/ Copyright (c) 2017, The dcrdata developers\n\/\/ See LICENSE for details.\npackage explorer\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/decred\/dcrd\/chaincfg\"\n\t\"github.com\/decred\/dcrd\/dcrjson\"\n\t\"github.com\/decred\/dcrd\/dcrutil\"\n\t\"github.com\/decred\/dcrd\/wire\"\n\t\"github.com\/decred\/dcrdata\/blockdata\"\n\t\"github.com\/decred\/dcrdata\/db\/dbtypes\"\n\t\"github.com\/decred\/dcrdata\/txhelpers\"\n\thumanize \"github.com\/dustin\/go-humanize\"\n\t\"github.com\/go-chi\/chi\"\n\t\"github.com\/go-chi\/chi\/middleware\"\n\t\"github.com\/rs\/cors\"\n)\n\nconst (\n\tmaxExplorerRows = 2000\n\tminExplorerRows = 20\n\tdefaultAddressRows int64 = 20\n\tMaxAddressRows int64 = 1000\n\tMaxUnconfirmedPossible int64 = 1000\n)\n\n\/\/ explorerDataSourceLite implements an interface for collecting data for the\n\/\/ explorer pages\ntype explorerDataSourceLite interface {\n\tGetExplorerBlock(hash string) *BlockInfo\n\tGetExplorerBlocks(start int, end int) []*BlockBasic\n\tGetBlockHeight(hash string) (int64, error)\n\tGetBlockHash(idx int64) (string, error)\n\tGetExplorerTx(txid string) *TxInfo\n\tGetExplorerAddress(address string, count, offset int64) *AddressInfo\n\tDecodeRawTransaction(txhex string) (*dcrjson.TxRawResult, error)\n\tSendRawTransaction(txhex string) (string, error)\n\tGetHeight() int\n\tGetChainParams() *chaincfg.Params\n\tUnconfirmedTxnsForAddress(address string) (*txhelpers.AddressOutpoints, int64, error)\n\tGetMempool() []MempoolTx\n\tTxHeight(txid string) (height int64)\n}\n\n\/\/ explorerDataSource implements extra data retrieval functions that require a\n\/\/ faster solution than RPC, or additional functionality.\ntype explorerDataSource interface {\n\tSpendingTransaction(fundingTx string, vout uint32) (string, uint32, int8, error)\n\tSpendingTransactions(fundingTxID string) ([]string, []uint32, []uint32, error)\n\tPoolStatusForTicket(txid string) (dbtypes.TicketSpendType, dbtypes.TicketPoolStatus, error)\n\tAddressHistory(address string, N, offset int64, txnType dbtypes.AddrTxnType) ([]*dbtypes.AddressRow, *AddressBalance, error)\n\tDevBalance() (*AddressBalance, error)\n\tFillAddressTransactions(addrInfo *AddressInfo) error\n\tBlockMissedVotes(blockHash string) ([]string, error)\n}\n\n\/\/ TicketStatusText generates the text to display on the explorer's transaction\n\/\/ page for the \"POOL STATUS\" field.\nfunc TicketStatusText(s dbtypes.TicketSpendType, p dbtypes.TicketPoolStatus) string {\n\tswitch p {\n\tcase dbtypes.PoolStatusLive:\n\t\treturn \"In Live Ticket Pool\"\n\tcase dbtypes.PoolStatusVoted:\n\t\treturn \"Voted\"\n\tcase dbtypes.PoolStatusExpired:\n\t\tswitch s {\n\t\tcase dbtypes.TicketUnspent:\n\t\t\treturn \"Expired, Unrevoked\"\n\t\tcase dbtypes.TicketRevoked:\n\t\t\treturn \"Expired, Revoked\"\n\t\tdefault:\n\t\t\treturn \"invalid ticket state\"\n\t\t}\n\tcase dbtypes.PoolStatusMissed:\n\t\tswitch s {\n\t\tcase dbtypes.TicketUnspent:\n\t\t\treturn \"Missed, Unrevoked\"\n\t\tcase dbtypes.TicketRevoked:\n\t\t\treturn \"Missed, Reevoked\"\n\t\tdefault:\n\t\t\treturn \"invalid ticket state\"\n\t\t}\n\tdefault:\n\t\treturn \"Immature\"\n\t}\n}\n\ntype explorerUI struct {\n\tMux *chi.Mux\n\tblockData explorerDataSourceLite\n\texplorerSource explorerDataSource\n\tliteMode bool\n\ttemplates templates\n\twsHub *WebsocketHub\n\tNewBlockDataMtx sync.RWMutex\n\tNewBlockData *BlockBasic\n\tExtraInfo *HomeInfo\n\tMempoolData *MempoolInfo\n\tChainParams *chaincfg.Params\n\tVersion string\n}\n\nfunc (exp *explorerUI) reloadTemplates() error {\n\treturn exp.templates.reloadTemplates()\n}\n\n\/\/ See reloadsig*.go for an exported method\nfunc (exp *explorerUI) reloadTemplatesSig(sig os.Signal) {\n\tsigChan := make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, sig)\n\n\tgo func() {\n\t\tfor {\n\t\t\tsigr := <-sigChan\n\t\t\tlog.Infof(\"Received %s\", sig)\n\t\t\tif sigr == sig {\n\t\t\t\tif err := exp.reloadTemplates(); err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tlog.Infof(\"Explorer UI html templates reparsed.\")\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ StopWebsocketHub stops the websocket hub\nfunc (exp *explorerUI) StopWebsocketHub() {\n\tif exp == nil {\n\t\treturn\n\t}\n\tlog.Info(\"Stopping websocket hub.\")\n\texp.wsHub.Stop()\n}\n\n\/\/ New returns an initialized instance of explorerUI\nfunc New(dataSource explorerDataSourceLite, primaryDataSource explorerDataSource,\n\tuseRealIP bool, appVersion string) *explorerUI {\n\texp := new(explorerUI)\n\texp.Mux = chi.NewRouter()\n\texp.blockData = dataSource\n\texp.explorerSource = primaryDataSource\n\texp.MempoolData = new(MempoolInfo)\n\texp.Version = appVersion\n\t\/\/ explorerDataSource is an interface that could have a value of pointer\n\t\/\/ type, and if either is nil this means lite mode.\n\tif exp.explorerSource == nil || reflect.ValueOf(exp.explorerSource).IsNil() {\n\t\texp.liteMode = true\n\t}\n\n\tif useRealIP {\n\t\texp.Mux.Use(middleware.RealIP)\n\t}\n\n\tparams := exp.blockData.GetChainParams()\n\texp.ChainParams = params\n\n\t\/\/ Development subsidy address of the current network\n\tdevSubsidyAddress, err := dbtypes.DevSubsidyAddress(params)\n\tif err != nil {\n\t\tlog.Warnf(\"explorer.New: %v\", err)\n\t}\n\tlog.Debugf(\"Organization address: %s\", devSubsidyAddress)\n\texp.ExtraInfo = &HomeInfo{\n\t\tDevAddress: devSubsidyAddress,\n\t}\n\n\tnoTemplateError := func(err error) *explorerUI {\n\t\tlog.Errorf(\"Unable to create new html template: %v\", err)\n\t\treturn nil\n\t}\n\ttmpls := []string{\"home\", \"explorer\", \"mempool\", \"block\", \"tx\", \"address\", \"rawtx\", \"error\", \"parameters\"}\n\n\ttempDefaults := []string{\"extras\"}\n\n\texp.templates = newTemplates(\"views\", tempDefaults, makeTemplateFuncMap(exp.ChainParams))\n\n\tfor _, name := range tmpls {\n\t\tif err := exp.templates.addTemplate(name); err != nil {\n\t\t\treturn noTemplateError(err)\n\t\t}\n\t}\n\n\texp.addRoutes()\n\n\texp.wsHub = NewWebsocketHub()\n\n\tgo exp.wsHub.run()\n\n\treturn exp\n}\n\nfunc (exp *explorerUI) Store(blockData *blockdata.BlockData, _ *wire.MsgBlock) error {\n\texp.NewBlockDataMtx.Lock()\n\tbData := blockData.ToBlockExplorerSummary()\n\tnewBlockData := &BlockBasic{\n\t\tHeight: int64(bData.Height),\n\t\tVoters: bData.Voters,\n\t\tFreshStake: bData.FreshStake,\n\t\tSize: int32(bData.Size),\n\t\tTransactions: bData.TxLen,\n\t\tBlockTime: bData.Time,\n\t\tFormattedTime: bData.FormattedTime,\n\t\tFormattedBytes: humanize.Bytes(uint64(bData.Size)),\n\t\tRevocations: uint32(bData.Revocations),\n\t}\n\texp.NewBlockData = newBlockData\n\tpercentage := func(a float64, b float64) float64 {\n\t\treturn (a \/ b) * 100\n\t}\n\n\texp.ExtraInfo = &HomeInfo{\n\t\tCoinSupply: blockData.ExtraInfo.CoinSupply,\n\t\tStakeDiff: blockData.CurrentStakeDiff.CurrentStakeDifficulty,\n\t\tIdxBlockInWindow: blockData.IdxBlockInWindow,\n\t\tIdxInRewardWindow: int(newBlockData.Height % exp.ChainParams.SubsidyReductionInterval),\n\t\tDevAddress: exp.ExtraInfo.DevAddress,\n\t\tDifficulty: blockData.Header.Difficulty,\n\t\tNBlockSubsidy: BlockSubsidy{\n\t\t\tDev: blockData.ExtraInfo.NextBlockSubsidy.Developer,\n\t\t\tPoS: blockData.ExtraInfo.NextBlockSubsidy.PoS,\n\t\t\tPoW: blockData.ExtraInfo.NextBlockSubsidy.PoW,\n\t\t\tTotal: blockData.ExtraInfo.NextBlockSubsidy.Total,\n\t\t},\n\t\tParams: ChainParams{\n\t\t\tWindowSize: exp.ChainParams.StakeDiffWindowSize,\n\t\t\tRewardWindowSize: exp.ChainParams.SubsidyReductionInterval,\n\t\t\tBlockTime: exp.ChainParams.TargetTimePerBlock.Nanoseconds(),\n\t\t},\n\t\tPoolInfo: TicketPoolInfo{\n\t\t\tSize: blockData.PoolInfo.Size,\n\t\t\tValue: blockData.PoolInfo.Value,\n\t\t\tValAvg: blockData.PoolInfo.ValAvg,\n\t\t\tPercentage: percentage(blockData.PoolInfo.Value, dcrutil.Amount(blockData.ExtraInfo.CoinSupply).ToCoin()),\n\t\t\tTarget: exp.ChainParams.TicketPoolSize * exp.ChainParams.TicketsPerBlock,\n\t\t\tPercentTarget: func() float64 {\n\t\t\t\ttarget := float64(exp.ChainParams.TicketPoolSize * exp.ChainParams.TicketsPerBlock)\n\t\t\t\treturn float64(blockData.PoolInfo.Size) \/ target * 100\n\t\t\t}(),\n\t\t},\n\t\tTicketROI: func() float64 {\n\t\t\tPosSubPerVote := dcrutil.Amount(blockData.ExtraInfo.NextBlockSubsidy.PoS).ToCoin() \/ float64(exp.ChainParams.TicketsPerBlock)\n\t\t\treturn percentage(PosSubPerVote, blockData.CurrentStakeDiff.CurrentStakeDifficulty)\n\t\t}(),\n\n\t\t\/\/ If there are ticketpoolsize*TicketsPerBlock total tickets and\n\t\t\/\/ TicketsPerBlock are drawn every block, and assuming random selection\n\t\t\/\/ of tickets, then any one ticket will, on average, be selected to vote\n\t\t\/\/ once every ticketpoolsize blocks\n\n\t\t\/\/ Small deviations in reality are due to:\n\t\t\/\/ 1. Not all blocks have 5 votes. On average each block in Decred\n\t\t\/\/ currently has about 4.8 votes per block\n\t\t\/\/ 2. Total tickets in the pool varies slightly above and below\n\t\t\/\/ ticketpoolsize*TicketsPerBlock depending on supply and demand\n\n\t\t\/\/ Both minor deviations are not accounted for in the general ROI\n\t\t\/\/ calculation below because the variance they cause would be would be\n\t\t\/\/ extremely small.\n\n\t\t\/\/ The actual ROI of a ticket needs to also take into consideration the\n\t\t\/\/ ticket maturity (time from ticket purchase until its eligible to vote)\n\t\t\/\/ and coinbase maturity (time after vote until funds distributed to\n\t\t\/\/ ticket holder are avaliable to use)\n\t\tROIPeriod: func() string {\n\t\t\tPosAvgTotalBlocks := float64(\n\t\t\t\texp.ChainParams.TicketPoolSize +\n\t\t\t\t\texp.ChainParams.TicketMaturity +\n\t\t\t\t\texp.ChainParams.CoinbaseMaturity)\n\t\t\treturn fmt.Sprintf(\"%.2f days\", exp.ChainParams.TargetTimePerBlock.Seconds()*PosAvgTotalBlocks\/86400)\n\t\t}(),\n\t}\n\n\tif !exp.liteMode {\n\t\tgo exp.updateDevFundBalance()\n\t}\n\n\texp.NewBlockDataMtx.Unlock()\n\n\t\/\/ Signal to the websocket hub that a new block was received, but do not\n\t\/\/ block Store(), and do not hang forever in a goroutine waiting to send.\n\tgo func() {\n\t\tselect {\n\t\tcase exp.wsHub.HubRelay <- sigNewBlock:\n\t\tcase <-time.After(time.Second * 10):\n\t\t\tlog.Errorf(\"sigNewBlock send failed: Timeout waiting for WebsocketHub.\")\n\t\t}\n\t}()\n\n\tlog.Debugf(\"Got new block %d for the explorer.\", newBlockData.Height)\n\n\treturn nil\n}\n\nfunc (exp *explorerUI) updateDevFundBalance() {\n\t\/\/ yield processor to other goroutines\n\truntime.Gosched()\n\texp.NewBlockDataMtx.Lock()\n\tdefer exp.NewBlockDataMtx.Unlock()\n\n\tdevBalance, err := exp.explorerSource.DevBalance()\n\tif err == nil && devBalance != nil {\n\t\texp.ExtraInfo.DevFund = devBalance.TotalUnspent\n\t} else {\n\t\tlog.Errorf(\"explorerUI.updateDevFundBalance failed: %v\", err)\n\t}\n}\n\nfunc (exp *explorerUI) addRoutes() {\n\texp.Mux.Use(middleware.Logger)\n\texp.Mux.Use(middleware.Recoverer)\n\tcorsMW := cors.Default()\n\texp.Mux.Use(corsMW.Handler)\n\n\tredirect := func(url string) http.HandlerFunc {\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\tx := chi.URLParam(r, \"x\")\n\t\t\tif x != \"\" {\n\t\t\t\tx = \"\/\" + x\n\t\t\t}\n\t\t\thttp.Redirect(w, r, \"\/\"+url+x, http.StatusPermanentRedirect)\n\t\t}\n\t}\n\texp.Mux.Get(\"\/\", redirect(\"blocks\"))\n\n\texp.Mux.Get(\"\/block\/{x}\", redirect(\"block\"))\n\n\texp.Mux.Get(\"\/tx\/{x}\", redirect(\"tx\"))\n\n\texp.Mux.Get(\"\/address\/{x}\", redirect(\"address\"))\n\n\texp.Mux.Get(\"\/decodetx\", redirect(\"decodetx\"))\n}\n<commit_msg>dev funds zero (#461)<commit_after>\/\/ Package explorer handles the block explorer subsystem for generating the\n\/\/ explorer pages.\n\/\/ Copyright (c) 2017, The dcrdata developers\n\/\/ See LICENSE for details.\npackage explorer\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/decred\/dcrd\/chaincfg\"\n\t\"github.com\/decred\/dcrd\/dcrjson\"\n\t\"github.com\/decred\/dcrd\/dcrutil\"\n\t\"github.com\/decred\/dcrd\/wire\"\n\t\"github.com\/decred\/dcrdata\/blockdata\"\n\t\"github.com\/decred\/dcrdata\/db\/dbtypes\"\n\t\"github.com\/decred\/dcrdata\/txhelpers\"\n\thumanize \"github.com\/dustin\/go-humanize\"\n\t\"github.com\/go-chi\/chi\"\n\t\"github.com\/go-chi\/chi\/middleware\"\n\t\"github.com\/rs\/cors\"\n)\n\nconst (\n\tmaxExplorerRows = 2000\n\tminExplorerRows = 20\n\tdefaultAddressRows int64 = 20\n\tMaxAddressRows int64 = 1000\n\tMaxUnconfirmedPossible int64 = 1000\n)\n\n\/\/ explorerDataSourceLite implements an interface for collecting data for the\n\/\/ explorer pages\ntype explorerDataSourceLite interface {\n\tGetExplorerBlock(hash string) *BlockInfo\n\tGetExplorerBlocks(start int, end int) []*BlockBasic\n\tGetBlockHeight(hash string) (int64, error)\n\tGetBlockHash(idx int64) (string, error)\n\tGetExplorerTx(txid string) *TxInfo\n\tGetExplorerAddress(address string, count, offset int64) *AddressInfo\n\tDecodeRawTransaction(txhex string) (*dcrjson.TxRawResult, error)\n\tSendRawTransaction(txhex string) (string, error)\n\tGetHeight() int\n\tGetChainParams() *chaincfg.Params\n\tUnconfirmedTxnsForAddress(address string) (*txhelpers.AddressOutpoints, int64, error)\n\tGetMempool() []MempoolTx\n\tTxHeight(txid string) (height int64)\n}\n\n\/\/ explorerDataSource implements extra data retrieval functions that require a\n\/\/ faster solution than RPC, or additional functionality.\ntype explorerDataSource interface {\n\tSpendingTransaction(fundingTx string, vout uint32) (string, uint32, int8, error)\n\tSpendingTransactions(fundingTxID string) ([]string, []uint32, []uint32, error)\n\tPoolStatusForTicket(txid string) (dbtypes.TicketSpendType, dbtypes.TicketPoolStatus, error)\n\tAddressHistory(address string, N, offset int64, txnType dbtypes.AddrTxnType) ([]*dbtypes.AddressRow, *AddressBalance, error)\n\tDevBalance() (*AddressBalance, error)\n\tFillAddressTransactions(addrInfo *AddressInfo) error\n\tBlockMissedVotes(blockHash string) ([]string, error)\n}\n\n\/\/ TicketStatusText generates the text to display on the explorer's transaction\n\/\/ page for the \"POOL STATUS\" field.\nfunc TicketStatusText(s dbtypes.TicketSpendType, p dbtypes.TicketPoolStatus) string {\n\tswitch p {\n\tcase dbtypes.PoolStatusLive:\n\t\treturn \"In Live Ticket Pool\"\n\tcase dbtypes.PoolStatusVoted:\n\t\treturn \"Voted\"\n\tcase dbtypes.PoolStatusExpired:\n\t\tswitch s {\n\t\tcase dbtypes.TicketUnspent:\n\t\t\treturn \"Expired, Unrevoked\"\n\t\tcase dbtypes.TicketRevoked:\n\t\t\treturn \"Expired, Revoked\"\n\t\tdefault:\n\t\t\treturn \"invalid ticket state\"\n\t\t}\n\tcase dbtypes.PoolStatusMissed:\n\t\tswitch s {\n\t\tcase dbtypes.TicketUnspent:\n\t\t\treturn \"Missed, Unrevoked\"\n\t\tcase dbtypes.TicketRevoked:\n\t\t\treturn \"Missed, Reevoked\"\n\t\tdefault:\n\t\t\treturn \"invalid ticket state\"\n\t\t}\n\tdefault:\n\t\treturn \"Immature\"\n\t}\n}\n\ntype explorerUI struct {\n\tMux *chi.Mux\n\tblockData explorerDataSourceLite\n\texplorerSource explorerDataSource\n\tliteMode bool\n\ttemplates templates\n\twsHub *WebsocketHub\n\tNewBlockDataMtx sync.RWMutex\n\tNewBlockData *BlockBasic\n\tExtraInfo *HomeInfo\n\tMempoolData *MempoolInfo\n\tChainParams *chaincfg.Params\n\tVersion string\n}\n\nfunc (exp *explorerUI) reloadTemplates() error {\n\treturn exp.templates.reloadTemplates()\n}\n\n\/\/ See reloadsig*.go for an exported method\nfunc (exp *explorerUI) reloadTemplatesSig(sig os.Signal) {\n\tsigChan := make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, sig)\n\n\tgo func() {\n\t\tfor {\n\t\t\tsigr := <-sigChan\n\t\t\tlog.Infof(\"Received %s\", sig)\n\t\t\tif sigr == sig {\n\t\t\t\tif err := exp.reloadTemplates(); err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tlog.Infof(\"Explorer UI html templates reparsed.\")\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ StopWebsocketHub stops the websocket hub\nfunc (exp *explorerUI) StopWebsocketHub() {\n\tif exp == nil {\n\t\treturn\n\t}\n\tlog.Info(\"Stopping websocket hub.\")\n\texp.wsHub.Stop()\n}\n\n\/\/ New returns an initialized instance of explorerUI\nfunc New(dataSource explorerDataSourceLite, primaryDataSource explorerDataSource,\n\tuseRealIP bool, appVersion string) *explorerUI {\n\texp := new(explorerUI)\n\texp.Mux = chi.NewRouter()\n\texp.blockData = dataSource\n\texp.explorerSource = primaryDataSource\n\texp.MempoolData = new(MempoolInfo)\n\texp.Version = appVersion\n\t\/\/ explorerDataSource is an interface that could have a value of pointer\n\t\/\/ type, and if either is nil this means lite mode.\n\tif exp.explorerSource == nil || reflect.ValueOf(exp.explorerSource).IsNil() {\n\t\texp.liteMode = true\n\t}\n\n\tif useRealIP {\n\t\texp.Mux.Use(middleware.RealIP)\n\t}\n\n\tparams := exp.blockData.GetChainParams()\n\texp.ChainParams = params\n\n\t\/\/ Development subsidy address of the current network\n\tdevSubsidyAddress, err := dbtypes.DevSubsidyAddress(params)\n\tif err != nil {\n\t\tlog.Warnf(\"explorer.New: %v\", err)\n\t}\n\tlog.Debugf(\"Organization address: %s\", devSubsidyAddress)\n\n\t\/\/ Set default static values for ExtraInfo\n\texp.ExtraInfo = &HomeInfo{\n\t\tDevAddress: devSubsidyAddress,\n\t\tParams: ChainParams{\n\t\t\tWindowSize: exp.ChainParams.StakeDiffWindowSize,\n\t\t\tRewardWindowSize: exp.ChainParams.SubsidyReductionInterval,\n\t\t\tBlockTime: exp.ChainParams.TargetTimePerBlock.Nanoseconds(),\n\t\t},\n\t\tPoolInfo: TicketPoolInfo{\n\t\t\tTarget: exp.ChainParams.TicketPoolSize * exp.ChainParams.TicketsPerBlock,\n\t\t},\n\t}\n\n\tnoTemplateError := func(err error) *explorerUI {\n\t\tlog.Errorf(\"Unable to create new html template: %v\", err)\n\t\treturn nil\n\t}\n\ttmpls := []string{\"home\", \"explorer\", \"mempool\", \"block\", \"tx\", \"address\", \"rawtx\", \"error\", \"parameters\"}\n\n\ttempDefaults := []string{\"extras\"}\n\n\texp.templates = newTemplates(\"views\", tempDefaults, makeTemplateFuncMap(exp.ChainParams))\n\n\tfor _, name := range tmpls {\n\t\tif err := exp.templates.addTemplate(name); err != nil {\n\t\t\treturn noTemplateError(err)\n\t\t}\n\t}\n\n\texp.addRoutes()\n\n\texp.wsHub = NewWebsocketHub()\n\n\tgo exp.wsHub.run()\n\n\treturn exp\n}\n\nfunc (exp *explorerUI) Store(blockData *blockdata.BlockData, _ *wire.MsgBlock) error {\n\texp.NewBlockDataMtx.Lock()\n\tbData := blockData.ToBlockExplorerSummary()\n\tnewBlockData := &BlockBasic{\n\t\tHeight: int64(bData.Height),\n\t\tVoters: bData.Voters,\n\t\tFreshStake: bData.FreshStake,\n\t\tSize: int32(bData.Size),\n\t\tTransactions: bData.TxLen,\n\t\tBlockTime: bData.Time,\n\t\tFormattedTime: bData.FormattedTime,\n\t\tFormattedBytes: humanize.Bytes(uint64(bData.Size)),\n\t\tRevocations: uint32(bData.Revocations),\n\t}\n\texp.NewBlockData = newBlockData\n\tpercentage := func(a float64, b float64) float64 {\n\t\treturn (a \/ b) * 100\n\t}\n\n\t\/\/ Update all ExtraInfo with latest data\n\texp.ExtraInfo.CoinSupply = blockData.ExtraInfo.CoinSupply\n\texp.ExtraInfo.StakeDiff = blockData.CurrentStakeDiff.CurrentStakeDifficulty\n\texp.ExtraInfo.IdxBlockInWindow = blockData.IdxBlockInWindow\n\texp.ExtraInfo.IdxInRewardWindow = int(newBlockData.Height % exp.ChainParams.SubsidyReductionInterval)\n\texp.ExtraInfo.Difficulty = blockData.Header.Difficulty\n\texp.ExtraInfo.NBlockSubsidy.Dev = blockData.ExtraInfo.NextBlockSubsidy.Developer\n\texp.ExtraInfo.NBlockSubsidy.PoS = blockData.ExtraInfo.NextBlockSubsidy.PoS\n\texp.ExtraInfo.NBlockSubsidy.PoW = blockData.ExtraInfo.NextBlockSubsidy.PoW\n\texp.ExtraInfo.NBlockSubsidy.Total = blockData.ExtraInfo.NextBlockSubsidy.Total\n\texp.ExtraInfo.PoolInfo.Size = blockData.PoolInfo.Size\n\texp.ExtraInfo.PoolInfo.Value = blockData.PoolInfo.Value\n\texp.ExtraInfo.PoolInfo.ValAvg = blockData.PoolInfo.ValAvg\n\texp.ExtraInfo.PoolInfo.Percentage = percentage(blockData.PoolInfo.Value, dcrutil.Amount(blockData.ExtraInfo.CoinSupply).ToCoin())\n\n\texp.ExtraInfo.PoolInfo.PercentTarget = func() float64 {\n\t\ttarget := float64(exp.ChainParams.TicketPoolSize * exp.ChainParams.TicketsPerBlock)\n\t\treturn float64(blockData.PoolInfo.Size) \/ target * 100\n\t}()\n\n\texp.ExtraInfo.TicketROI = func() float64 {\n\t\tPosSubPerVote := dcrutil.Amount(blockData.ExtraInfo.NextBlockSubsidy.PoS).ToCoin() \/ float64(exp.ChainParams.TicketsPerBlock)\n\t\treturn percentage(PosSubPerVote, blockData.CurrentStakeDiff.CurrentStakeDifficulty)\n\t}()\n\n\t\/\/ If there are ticketpoolsize*TicketsPerBlock total tickets and\n\t\/\/ TicketsPerBlock are drawn every block, and assuming random selection\n\t\/\/ of tickets, then any one ticket will, on average, be selected to vote\n\t\/\/ once every ticketpoolsize blocks\n\n\t\/\/ Small deviations in reality are due to:\n\t\/\/ 1. Not all blocks have 5 votes. On average each block in Decred\n\t\/\/ currently has about 4.8 votes per block\n\t\/\/ 2. Total tickets in the pool varies slightly above and below\n\t\/\/ ticketpoolsize*TicketsPerBlock depending on supply and demand\n\n\t\/\/ Both minor deviations are not accounted for in the general ROI\n\t\/\/ calculation below because the variance they cause would be would be\n\t\/\/ extremely small.\n\n\t\/\/ The actual ROI of a ticket needs to also take into consideration the\n\t\/\/ ticket maturity (time from ticket purchase until its eligible to vote)\n\t\/\/ and coinbase maturity (time after vote until funds distributed to\n\t\/\/ ticket holder are avaliable to use)\n\texp.ExtraInfo.ROIPeriod = func() string {\n\t\tPosAvgTotalBlocks := float64(\n\t\t\texp.ChainParams.TicketPoolSize +\n\t\t\t\texp.ChainParams.TicketMaturity +\n\t\t\t\texp.ChainParams.CoinbaseMaturity)\n\t\treturn fmt.Sprintf(\"%.2f days\", exp.ChainParams.TargetTimePerBlock.Seconds()*PosAvgTotalBlocks\/86400)\n\t}()\n\n\texp.NewBlockDataMtx.Unlock()\n\n\tif !exp.liteMode {\n\t\tgo exp.updateDevFundBalance()\n\t}\n\n\t\/\/ Signal to the websocket hub that a new block was received, but do not\n\t\/\/ block Store(), and do not hang forever in a goroutine waiting to send.\n\tgo func() {\n\t\tselect {\n\t\tcase exp.wsHub.HubRelay <- sigNewBlock:\n\t\tcase <-time.After(time.Second * 10):\n\t\t\tlog.Errorf(\"sigNewBlock send failed: Timeout waiting for WebsocketHub.\")\n\t\t}\n\t}()\n\n\tlog.Debugf(\"Got new block %d for the explorer.\", newBlockData.Height)\n\n\treturn nil\n}\n\nfunc (exp *explorerUI) updateDevFundBalance() {\n\t\/\/ yield processor to other goroutines\n\truntime.Gosched()\n\texp.NewBlockDataMtx.Lock()\n\tdefer exp.NewBlockDataMtx.Unlock()\n\n\tdevBalance, err := exp.explorerSource.DevBalance()\n\tif err == nil && devBalance != nil {\n\t\texp.ExtraInfo.DevFund = devBalance.TotalUnspent\n\t} else {\n\t\tlog.Errorf(\"explorerUI.updateDevFundBalance failed: %v\", err)\n\t}\n}\n\nfunc (exp *explorerUI) addRoutes() {\n\texp.Mux.Use(middleware.Logger)\n\texp.Mux.Use(middleware.Recoverer)\n\tcorsMW := cors.Default()\n\texp.Mux.Use(corsMW.Handler)\n\n\tredirect := func(url string) http.HandlerFunc {\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\tx := chi.URLParam(r, \"x\")\n\t\t\tif x != \"\" {\n\t\t\t\tx = \"\/\" + x\n\t\t\t}\n\t\t\thttp.Redirect(w, r, \"\/\"+url+x, http.StatusPermanentRedirect)\n\t\t}\n\t}\n\texp.Mux.Get(\"\/\", redirect(\"blocks\"))\n\n\texp.Mux.Get(\"\/block\/{x}\", redirect(\"block\"))\n\n\texp.Mux.Get(\"\/tx\/{x}\", redirect(\"tx\"))\n\n\texp.Mux.Get(\"\/address\/{x}\", redirect(\"address\"))\n\n\texp.Mux.Get(\"\/decodetx\", redirect(\"decodetx\"))\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/satori\/go.uuid\"\n\t\"k8s.io\/client-go\/tools\/clientcmd\"\n)\n\nconst configEnvVar = \"PACH_CONFIG\"\nconst contextEnvVar = \"PACH_CONTEXT\"\n\nvar defaultConfigDir = filepath.Join(os.Getenv(\"HOME\"), \".pachyderm\")\nvar defaultConfigPath = filepath.Join(defaultConfigDir, \"config.json\")\n\nfunc configPath() string {\n\tif env, ok := os.LookupEnv(configEnvVar); ok {\n\t\treturn env\n\t}\n\treturn defaultConfigPath\n}\n\n\/\/ ActiveContext gets the active context in the config\nfunc (c *Config) ActiveContext() (*Context, error) {\n\tif env, ok := os.LookupEnv(contextEnvVar); ok {\n\t\tcontext := c.V2.Contexts[env]\n\t\tif context == nil {\n\t\t\treturn nil, fmt.Errorf(\"`%s` refers to a context that does not exist\", contextEnvVar)\n\t\t}\n\t\treturn context, nil\n\t}\n\tcontext := c.V2.Contexts[c.V2.ActiveContext]\n\tif context == nil {\n\t\treturn nil, fmt.Errorf(\"the active context references one that does exist; change the active context first\")\n\t}\n\treturn context, nil\n}\n\n\/\/ Read loads the Pachyderm config on this machine.\n\/\/ If an existing configuration cannot be found, it sets up the defaults. Read\n\/\/ returns a nil Config if and only if it returns a non-nil error.\nfunc Read() (*Config, error) {\n\tvar c *Config\n\n\t\/\/ Read json file\n\tp := configPath()\n\tif raw, err := ioutil.ReadFile(p); err == nil {\n\t\terr = json.Unmarshal(raw, &c)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if os.IsNotExist(err) {\n\t\t\/\/ File doesn't exist, so create a new config\n\t\tfmt.Fprintf(os.Stderr, \"No config detected at %q. Generating new config...\\n\", p)\n\t\tc = &Config{}\n\t} else {\n\t\treturn nil, fmt.Errorf(\"fatal: could not read config at %q: %v\", p, err)\n\t}\n\n\tupdated := false\n\n\tif c.UserID == \"\" {\n\t\tupdated = true\n\t\tfmt.Fprintln(os.Stderr, \"No UserID present in config - generating new one.\")\n\t\tuuid, err := uuid.NewV4()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not generate new user ID: %v\", err)\n\t\t}\n\t\tc.UserID = uuid.String()\n\t}\n\n\tif c.V2 == nil {\n\t\tupdated = true\n\t\tif err := c.initV2(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif updated {\n\t\tfmt.Fprintf(os.Stderr, \"Rewriting config at %q.\\n\", p)\n\n\t\tif err := c.Write(); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not rewrite config at %q: %v\", p, err)\n\t\t}\n\t}\n\n\treturn c, nil\n}\n\nfunc (c *Config) initV2() error {\n\trules := clientcmd.NewDefaultClientConfigLoadingRules()\n\toverrides := &clientcmd.ConfigOverrides{}\n\tkubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides)\n\trawKubeConfig, err := kubeConfig.RawConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tkubeContext := rawKubeConfig.CurrentContext\n\tfmt.Fprintf(os.Stderr, \"No config V2 present in config - generating a new one off of the kube context '%s'.\\n\", kubeContext)\n\n\tc.V2 = &ConfigV2{\n\t\tActiveContext: \"default\",\n\t\tContexts: map[string]*Context{},\n\t}\n\n\tif c.V1 != nil {\n\t\tc.V2.Contexts[\"default\"] = &Context{\n\t\t\tSource: ContextSource_CONFIG_V1,\n\t\t\tPachdAddress: c.V1.PachdAddress,\n\t\t\tServerCAs: c.V1.ServerCAs,\n\t\t\tSessionToken: c.V1.SessionToken,\n\t\t\tActiveTransaction: c.V1.ActiveTransaction,\n\t\t\tKubeContext: kubeContext,\n\t\t}\n\n\t\tc.V1 = nil\n\t} else {\n\t\tc.V2.Contexts[\"default\"] = &Context{\n\t\t\tSource: ContextSource_NONE,\n\t\t\tKubeContext: kubeContext,\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Write writes the configuration in 'c' to this machine's Pachyderm config\n\/\/ file.\nfunc (c *Config) Write() error {\n\tif c.V1 != nil {\n\t\tpanic(\"v1 config included, implying a bug\")\n\t}\n\n\trawConfig, err := json.MarshalIndent(c, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ If we're not using a custom config path, create the default config path\n\tp := configPath()\n\tif _, ok := os.LookupEnv(configEnvVar); ok {\n\t\t\/\/ using overridden config path -- just make sure the parent dir exists\n\t\td := filepath.Dir(p)\n\t\tif _, err := os.Stat(d); err != nil {\n\t\t\treturn fmt.Errorf(\"cannot use config at %s: could not stat parent directory (%v)\", p, err)\n\t\t}\n\t} else {\n\t\t\/\/ using the default config path, create the config directory\n\t\terr = os.MkdirAll(defaultConfigDir, 0755)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn ioutil.WriteFile(p, rawConfig, 0644)\n}\n<commit_msg>Ensure config is read only once<commit_after>package config\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\n\t\"github.com\/satori\/go.uuid\"\n\t\"k8s.io\/client-go\/tools\/clientcmd\"\n)\n\nconst configEnvVar = \"PACH_CONFIG\"\nconst contextEnvVar = \"PACH_CONTEXT\"\n\nvar defaultConfigDir = filepath.Join(os.Getenv(\"HOME\"), \".pachyderm\")\nvar defaultConfigPath = filepath.Join(defaultConfigDir, \"config.json\")\n\nvar readerOnce sync.Once\nvar value *Config\nvar readErr error\n\nfunc configPath() string {\n\tif env, ok := os.LookupEnv(configEnvVar); ok {\n\t\treturn env\n\t}\n\treturn defaultConfigPath\n}\n\n\/\/ ActiveContext gets the active context in the config\nfunc (c *Config) ActiveContext() (*Context, error) {\n\tif env, ok := os.LookupEnv(contextEnvVar); ok {\n\t\tcontext := c.V2.Contexts[env]\n\t\tif context == nil {\n\t\t\treturn nil, fmt.Errorf(\"`%s` refers to a context that does not exist\", contextEnvVar)\n\t\t}\n\t\treturn context, nil\n\t}\n\tcontext := c.V2.Contexts[c.V2.ActiveContext]\n\tif context == nil {\n\t\treturn nil, fmt.Errorf(\"the active context references one that does exist; change the active context first\")\n\t}\n\treturn context, nil\n}\n\n\/\/ Read loads the Pachyderm config on this machine.\n\/\/ If an existing configuration cannot be found, it sets up the defaults. Read\n\/\/ returns a nil Config if and only if it returns a non-nil error.\nfunc Read() (*Config, error) {\n\treaderOnce.Do(func() {\n\t\t\/\/ Read json file\n\t\tp := configPath()\n\t\tif raw, err := ioutil.ReadFile(p); err == nil {\n\t\t\terr = json.Unmarshal(raw, &value)\n\t\t\tif err != nil {\n\t\t\t\treadErr = err\n\t\t\t\treturn\n\t\t\t}\n\t\t} else if os.IsNotExist(err) {\n\t\t\t\/\/ File doesn't exist, so create a new config\n\t\t\tfmt.Fprintf(os.Stderr, \"No config detected at %q. Generating new config...\\n\", p)\n\t\t\tvalue = &Config{}\n\t\t} else {\n\t\t\treadErr = fmt.Errorf(\"fatal: could not read config at %q: %v\", p, err)\n\t\t\treturn\n\t\t}\n\n\t\tupdated := false\n\n\t\tif value.UserID == \"\" {\n\t\t\tupdated = true\n\t\t\tfmt.Fprintln(os.Stderr, \"No UserID present in config - generating new one.\")\n\t\t\tuuid, err := uuid.NewV4()\n\t\t\tif err != nil {\n\t\t\t\treadErr = fmt.Errorf(\"could not generate new user ID: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvalue.UserID = uuid.String()\n\t\t}\n\n\t\tif value.V2 == nil {\n\t\t\tupdated = true\n\t\t\tif err := value.initV2(); err != nil {\n\t\t\t\treadErr = err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tif updated {\n\t\t\tfmt.Fprintf(os.Stderr, \"Rewriting config at %q.\\n\", p)\n\n\t\t\tif err := value.Write(); err != nil {\n\t\t\t\treadErr = fmt.Errorf(\"could not rewrite config at %q: %v\", p, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t})\n\n\treturn value, readErr\n}\n\nfunc (c *Config) initV2() error {\n\trules := clientcmd.NewDefaultClientConfigLoadingRules()\n\toverrides := &clientcmd.ConfigOverrides{}\n\tkubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides)\n\trawKubeConfig, err := kubeConfig.RawConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tkubeContext := rawKubeConfig.CurrentContext\n\tfmt.Fprintf(os.Stderr, \"No config V2 present in config - generating a new one off of the kube context '%s'.\\n\", kubeContext)\n\n\tc.V2 = &ConfigV2{\n\t\tActiveContext: \"default\",\n\t\tContexts: map[string]*Context{},\n\t}\n\n\tif c.V1 != nil {\n\t\tc.V2.Contexts[\"default\"] = &Context{\n\t\t\tSource: ContextSource_CONFIG_V1,\n\t\t\tPachdAddress: c.V1.PachdAddress,\n\t\t\tServerCAs: c.V1.ServerCAs,\n\t\t\tSessionToken: c.V1.SessionToken,\n\t\t\tActiveTransaction: c.V1.ActiveTransaction,\n\t\t\tKubeContext: kubeContext,\n\t\t}\n\n\t\tc.V1 = nil\n\t} else {\n\t\tc.V2.Contexts[\"default\"] = &Context{\n\t\t\tSource: ContextSource_NONE,\n\t\t\tKubeContext: kubeContext,\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Write writes the configuration in 'c' to this machine's Pachyderm config\n\/\/ file.\nfunc (c *Config) Write() error {\n\tif c.V1 != nil {\n\t\tpanic(\"v1 config included, implying a bug\")\n\t}\n\n\trawConfig, err := json.MarshalIndent(c, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ If we're not using a custom config path, create the default config path\n\tp := configPath()\n\tif _, ok := os.LookupEnv(configEnvVar); ok {\n\t\t\/\/ using overridden config path -- just make sure the parent dir exists\n\t\td := filepath.Dir(p)\n\t\tif _, err := os.Stat(d); err != nil {\n\t\t\treturn fmt.Errorf(\"cannot use config at %s: could not stat parent directory (%v)\", p, err)\n\t\t}\n\t} else {\n\t\t\/\/ using the default config path, create the config directory\n\t\terr = os.MkdirAll(defaultConfigDir, 0755)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn ioutil.WriteFile(p, rawConfig, 0644)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Bulldozer Framework\n * Copyright (C) DesertBit\n *\/\n\npackage firewall\n\nimport (\n\t\"code.desertbit.com\/bulldozer\/bulldozer\/settings\"\n\t\"code.desertbit.com\/bulldozer\/bulldozer\/utils\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\tblockList map[string]int64 = make(map[string]int64)\n\tblockListMutex sync.Mutex\n\n\trequestCounter map[string]int = make(map[string]int)\n\trequestCounterMutex sync.Mutex\n)\n\nfunc init() {\n\t\/\/ Start the loops in an new goroutine\n\tgo cleanupRequestsCountLoop()\n\tgo releaseBlockedRemoteAddrLoop()\n}\n\n\/\/##############\/\/\n\/\/### Public ###\/\/\n\/\/##############\/\/\n\n\/\/ NewRequest tells the firewall, that a new request happened.\n\/\/ False is returned, if this request should be blocked,\n\/\/ because the IP is on the block list.\n\/\/ The remote address is always returned for logging purpose.\nfunc NewRequest(req *http.Request) (bool, string) {\n\t\/\/ Get the remote addresse of the request\n\tremoteAddr, _ := utils.RemoteAddress(req)\n\n\t\/\/ Check if this remote address is blocked\n\tif isBlocked(remoteAddr) {\n\t\treturn false, remoteAddr\n\t}\n\n\t\/\/ Register the new request in a new goroutine\n\tgo addRequest(remoteAddr)\n\n\treturn true, remoteAddr\n}\n\n\/\/###############\/\/\n\/\/### Private ###\/\/\n\/\/###############\/\/\n\n\/\/ isBlocked checks if the remote addres is blocked\nfunc isBlocked(remoteAddr string) bool {\n\t\/\/ Lock the mutex\n\tblockListMutex.Lock()\n\tdefer blockListMutex.Unlock()\n\n\t\/\/ Check if the remote addresse exists in the blocked map\n\t_, exists := blockList[remoteAddr]\n\treturn exists\n}\n\nfunc addRequest(remoteAddr string) {\n\t\/\/ Lock the mutex\n\trequestCounterMutex.Lock()\n\tdefer requestCounterMutex.Unlock()\n\n\tcount, ok := requestCounter[remoteAddr]\n\tif !ok {\n\t\tcount = 1\n\t}\n\n\t\/\/ Add the remote address to the block map, if the count\n\t\/\/ reached the limit.\n\tif count > settings.Settings.FirewallMaxRequestsPerMinute {\n\t\t\/\/ Remove the remote address from the request counter map\n\t\tdelete(requestCounter, remoteAddr)\n\n\t\t\/\/ Get the current timestamp\n\t\ttimestamp := time.Now().Unix()\n\n\t\t\/\/ Lock the mutex\n\t\tblockListMutex.Lock()\n\t\tdefer blockListMutex.Unlock()\n\n\t\t\/\/ Add the remote address with a timestamp to the block map\n\t\tblockList[remoteAddr] = timestamp\n\n\t\treturn\n\t}\n\n\t\/\/ Save the incremented count to the map\n\trequestCounter[remoteAddr] = count + 1\n}\n\nfunc cleanupRequestsCountLoop() {\n\tfor {\n\t\t\/\/ Sleep one minute\n\t\ttime.Sleep(time.Minute)\n\n\t\tfunc() {\n\t\t\t\/\/ Lock the mutex\n\t\t\trequestCounterMutex.Lock()\n\t\t\tdefer requestCounterMutex.Unlock()\n\n\t\t\t\/\/ Clear the map if not emtpy\n\t\t\tif len(requestCounter) > 0 {\n\t\t\t\trequestCounter = make(map[string]int)\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc releaseBlockedRemoteAddrLoop() {\n\tfor {\n\t\t\/\/ Sleep one minute\n\t\ttime.Sleep(time.Minute)\n\n\t\tfunc() {\n\t\t\t\/\/ Lock the mutex\n\t\t\tblockListMutex.Lock()\n\t\t\tdefer blockListMutex.Unlock()\n\n\t\t\t\/\/ Return if nothing to do\n\t\t\tif len(blockList) == 0 {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ Create the release timestamp\n\t\t\treleaseTimestamp := time.Now().Unix() - int64(settings.Settings.FirewallReleaseBlockAfter)\n\n\t\t\t\/\/ Remove expired blocks\n\t\t\tfor key, timestamp := range blockList {\n\t\t\t\tif timestamp < releaseTimestamp {\n\t\t\t\t\tdelete(blockList, key)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n<commit_msg>fixed spelling mistake<commit_after>\/*\n * Bulldozer Framework\n * Copyright (C) DesertBit\n *\/\n\npackage firewall\n\nimport (\n\t\"code.desertbit.com\/bulldozer\/bulldozer\/settings\"\n\t\"code.desertbit.com\/bulldozer\/bulldozer\/utils\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\tblockList map[string]int64 = make(map[string]int64)\n\tblockListMutex sync.Mutex\n\n\trequestCounter map[string]int = make(map[string]int)\n\trequestCounterMutex sync.Mutex\n)\n\nfunc init() {\n\t\/\/ Start the loops in an new goroutine\n\tgo cleanupRequestsCountLoop()\n\tgo releaseBlockedRemoteAddrLoop()\n}\n\n\/\/##############\/\/\n\/\/### Public ###\/\/\n\/\/##############\/\/\n\n\/\/ NewRequest tells the firewall, that a new request happened.\n\/\/ False is returned, if this request should be blocked,\n\/\/ because the IP is on the block list.\n\/\/ The remote address is always returned for logging purpose.\nfunc NewRequest(req *http.Request) (bool, string) {\n\t\/\/ Get the remote addresse of the request\n\tremoteAddr, _ := utils.RemoteAddress(req)\n\n\t\/\/ Check if this remote address is blocked\n\tif isBlocked(remoteAddr) {\n\t\treturn false, remoteAddr\n\t}\n\n\t\/\/ Register the new request in a new goroutine\n\tgo addRequest(remoteAddr)\n\n\treturn true, remoteAddr\n}\n\n\/\/###############\/\/\n\/\/### Private ###\/\/\n\/\/###############\/\/\n\n\/\/ isBlocked checks if the remote addres is blocked\nfunc isBlocked(remoteAddr string) bool {\n\t\/\/ Lock the mutex\n\tblockListMutex.Lock()\n\tdefer blockListMutex.Unlock()\n\n\t\/\/ Check if the remote addresse exists in the blocked map\n\t_, exists := blockList[remoteAddr]\n\treturn exists\n}\n\nfunc addRequest(remoteAddr string) {\n\t\/\/ Lock the mutex\n\trequestCounterMutex.Lock()\n\tdefer requestCounterMutex.Unlock()\n\n\tcount, ok := requestCounter[remoteAddr]\n\tif !ok {\n\t\tcount = 1\n\t}\n\n\t\/\/ Add the remote address to the block map, if the count\n\t\/\/ reached the limit.\n\tif count > settings.Settings.FirewallMaxRequestsPerMinute {\n\t\t\/\/ Remove the remote address from the request counter map\n\t\tdelete(requestCounter, remoteAddr)\n\n\t\t\/\/ Get the current timestamp\n\t\ttimestamp := time.Now().Unix()\n\n\t\t\/\/ Lock the mutex\n\t\tblockListMutex.Lock()\n\t\tdefer blockListMutex.Unlock()\n\n\t\t\/\/ Add the remote address with the timestamp to the block map\n\t\tblockList[remoteAddr] = timestamp\n\n\t\treturn\n\t}\n\n\t\/\/ Save the incremented count to the map\n\trequestCounter[remoteAddr] = count + 1\n}\n\nfunc cleanupRequestsCountLoop() {\n\tfor {\n\t\t\/\/ Sleep one minute\n\t\ttime.Sleep(time.Minute)\n\n\t\tfunc() {\n\t\t\t\/\/ Lock the mutex\n\t\t\trequestCounterMutex.Lock()\n\t\t\tdefer requestCounterMutex.Unlock()\n\n\t\t\t\/\/ Clear the map if not emtpy\n\t\t\tif len(requestCounter) > 0 {\n\t\t\t\trequestCounter = make(map[string]int)\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc releaseBlockedRemoteAddrLoop() {\n\tfor {\n\t\t\/\/ Sleep one minute\n\t\ttime.Sleep(time.Minute)\n\n\t\tfunc() {\n\t\t\t\/\/ Lock the mutex\n\t\t\tblockListMutex.Lock()\n\t\t\tdefer blockListMutex.Unlock()\n\n\t\t\t\/\/ Return if nothing to do\n\t\t\tif len(blockList) == 0 {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ Create the release timestamp\n\t\t\treleaseTimestamp := time.Now().Unix() - int64(settings.Settings.FirewallReleaseBlockAfter)\n\n\t\t\t\/\/ Remove expired blocks\n\t\t\tfor key, timestamp := range blockList {\n\t\t\t\tif timestamp < releaseTimestamp {\n\t\t\t\t\tdelete(blockList, key)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ Val is a supporting type to hold and represent enumerated value\ntype Val struct {\n\ti int\n\ts []string\n}\n\n\/\/ String pretty printer for Val type. It outputs either space-joined V.s,\n\/\/ or V.i in string form.\nfunc (v Val) String() string {\n\ts := strconv.Itoa(v.i)\n\tif len(v.s) > 0 {\n\t\ts = strings.Join(v.s, \" \")\n\t}\n\treturn s\n}\n\n\/\/ Start of the program\nfunc main() {\n\t\/\/ Create filtering pipeline\n\tin := createPipeline(\n\t\tmakeGenerator(),\n\t\tmakeLimitFilter(45),\n\t\tmakeValueFilter(3, \"fizz\"),\n\t\tmakeValueFilter(5, \"buzz\"),\n\t\tmakeValueFilter(6, \"boom\"),\n\t\tmakeValueFilter(9, \"bang\"),\n\t)\n\t\/\/ Display all values that are passed through the pipeline\n\tfor v := range in {\n\t\tfmt.Println(v)\n\t}\n}\n\n\/\/ Pipeline factory\nfunc createPipeline(gen Generator, filters ...Filter) <-chan Val {\n\t\/\/ Start generator\n\tch := gen()\n\tfor _, f := range filters {\n\t\t\/\/ Add filter to the chain\n\t\tch = f(ch)\n\t}\n\t\/\/ Return resulting channel\n\treturn ch\n}\n\n\/\/ Generator type\ntype Generator func() <-chan Val\n\n\/\/ Create Val generator\nfunc makeGenerator() Generator {\n\treturn func() <-chan Val {\n\t\tch := make(chan Val)\n\t\tgo func() {\n\t\t\tfor i := 1; ; i++ {\n\t\t\t\tch <- Val{i, make([]string, 0, 1)}\n\t\t\t}\n\t\t\tclose(ch)\n\t\t}()\n\t\treturn ch\n\t}\n}\n\n\/\/ Filter is generic interface to a filtering function\ntype Filter func(in <-chan Val) <-chan Val\n\n\/\/ Logic function that does the actual filtering\ntype FilterLogicFn func(out chan<- Val, in <-chan Val)\n\n\/\/ apply creats Filter by applying specified FilterLogicFn\nfunc apply(fl FilterLogicFn) Filter {\n\treturn func(in <-chan Val) <-chan Val {\n\t\tout := make(chan Val)\n\t\tgo func() {\n\t\t\tdefer close(out) \/\/ Always close channel, even if fl() panics\n\t\t\tfl(out, in)\n\t\t}()\n\t\treturn out\n\t}\n}\n\n\/\/ Limit filter generated implementation.\n\/\/ Must use function to capture parameter in a closure\nfunc makeLimitFilter(limit int) Filter {\n\treturn apply(func(limit int) FilterLogicFn {\n\t\treturn func(out chan<- Val, in <-chan Val) {\n\t\t\tfor i := 0; i < limit; i++ {\n\t\t\t\tv, ok := <-in\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t\/\/ passthrough\n\t\t\t\tout <- v\n\t\t\t}\n\t\t}\n\t}(limit))\n}\n\n\/\/ Value filter generator implementaion\n\/\/ Must use function to capture parameter in a closure\nfunc makeValueFilter(num int, msg string) Filter {\n\treturn apply(func(div int, msg string) FilterLogicFn {\n\t\treturn func(out chan<- Val, in <-chan Val) {\n\t\t\tfor {\n\t\t\t\tv, ok := <-in\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t\/\/ Check if number if divisible by our divizor\n\t\t\t\tif v.i%div == 0 {\n\t\t\t\t\tv.s = append(v.s, msg)\n\t\t\t\t}\n\t\t\t\tout <- v\n\t\t\t}\n\t\t}\n\t}(num, msg))\n}\n<commit_msg>Renamed filter operation type<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ Val is a supporting type to hold and represent enumerated value\ntype Val struct {\n\ti int\n\ts []string\n}\n\n\/\/ String pretty printer for Val type. It outputs either space-joined V.s,\n\/\/ or V.i in string form.\nfunc (v Val) String() string {\n\ts := strconv.Itoa(v.i)\n\tif len(v.s) > 0 {\n\t\ts = strings.Join(v.s, \" \")\n\t}\n\treturn s\n}\n\n\/\/ Start of the program\nfunc main() {\n\t\/\/ Create filtering pipeline\n\tin := createPipeline(\n\t\tmakeGenerator(),\n\t\tmakeLimitFilter(45),\n\t\tmakeValueFilter(3, \"fizz\"),\n\t\tmakeValueFilter(5, \"buzz\"),\n\t\tmakeValueFilter(6, \"boom\"),\n\t\tmakeValueFilter(9, \"bang\"),\n\t)\n\t\/\/ Display all values that are passed through the pipeline\n\tfor v := range in {\n\t\tfmt.Println(v)\n\t}\n}\n\n\/\/ Pipeline factory\nfunc createPipeline(gen Generator, filters ...Filter) <-chan Val {\n\t\/\/ Start generator\n\tch := gen()\n\tfor _, f := range filters {\n\t\t\/\/ Add filter to the chain\n\t\tch = f(ch)\n\t}\n\t\/\/ Return resulting channel\n\treturn ch\n}\n\n\/\/ Generator type\ntype Generator func() <-chan Val\n\n\/\/ Create Val generator\nfunc makeGenerator() Generator {\n\treturn func() <-chan Val {\n\t\tch := make(chan Val)\n\t\tgo func() {\n\t\t\tfor i := 1; ; i++ {\n\t\t\t\tch <- Val{i, make([]string, 0, 1)}\n\t\t\t}\n\t\t}()\n\t\treturn ch\n\t}\n}\n\n\/\/ Filter is generic interface to a filtering function\ntype Filter func(in <-chan Val) <-chan Val\n\n\/\/ Logic function that does the actual filtering\ntype FilterOpFn func(out chan<- Val, in <-chan Val)\n\n\/\/ apply creats Filter by applying specified FilterOpFn\nfunc apply(fl FilterOpFn) Filter {\n\treturn func(in <-chan Val) <-chan Val {\n\t\tout := make(chan Val)\n\t\tgo func() {\n\t\t\tdefer close(out) \/\/ Always close channel, even if fl() panics\n\t\t\tfl(out, in)\n\t\t}()\n\t\treturn out\n\t}\n}\n\n\/\/ Limit filter generated implementation.\n\/\/ Must use function to capture parameter in a closure\nfunc makeLimitFilter(limit int) Filter {\n\treturn apply(func(limit int) FilterOpFn {\n\t\treturn func(out chan<- Val, in <-chan Val) {\n\t\t\tfor i := 0; i < limit; i++ {\n\t\t\t\tv, ok := <-in\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t\/\/ passthrough\n\t\t\t\tout <- v\n\t\t\t}\n\t\t}\n\t}(limit))\n}\n\n\/\/ Value filter generator implementaion\n\/\/ Must use function to capture parameter in a closure\nfunc makeValueFilter(num int, msg string) Filter {\n\treturn apply(func(div int, msg string) FilterOpFn {\n\t\treturn func(out chan<- Val, in <-chan Val) {\n\t\t\tfor {\n\t\t\t\tv, ok := <-in\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t\/\/ Check if number if divisible by our divizor\n\t\t\t\tif v.i%div == 0 {\n\t\t\t\t\tv.s = append(v.s, msg)\n\t\t\t\t}\n\t\t\t\tout <- v\n\t\t\t}\n\t\t}\n\t}(num, msg))\n}\n<|endoftext|>"} {"text":"<commit_before>package flagfile\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"reflect\"\n)\n\n\/\/ FlagSet is a subset of methods of *flag.FlagSet\ntype FlagSet interface {\n\tParse([]string) error\n\tLookup(name string) *flag.Flag\n}\n\n\/\/ Decoder can decode a stream of bytes into structured values.\n\/\/ The root value for flagfile.File must be of type:\n\/\/ []string\n\/\/ []interface{}\n\/\/ map[string]interface{}\n\/\/\n\/\/ Example: encoding\/json.Decoder\ntype Decoder interface {\n\tDecode(interface{}) error\n}\n\ntype DecoderBuilder func(r io.Reader) Decoder\n\ntype file struct {\n\tflagset FlagSet\n\tdecoder DecoderBuilder\n\tcontextDir []string\n}\n\nfunc File(flagset FlagSet, decoder DecoderBuilder) flag.Value {\n\treturn &file{\n\t\tflagset: flagset,\n\t\tdecoder: decoder,\n\t}\n}\n\nfunc (file) String() string { return \"\" }\n\nfunc (f *file) Set(path string) error {\n\tif !filepath.IsAbs(path) {\n\t\tvar ctxDir string\n\t\tif len(f.contextDir) == 0 {\n\t\t\tvar e error\n\t\t\tctxDir, e = os.Getwd()\n\t\t\tif e != nil {\n\t\t\t\treturn e\n\t\t\t}\n\t\t} else {\n\t\t\tctxDir = f.contextDir[len(f.contextDir)-1]\n\t\t}\n\t\tpath = filepath.Join(ctxDir, path)\n\t}\n\n\tr, err := os.Open(path)\n\tif err != nil {\n\t\t\/\/ TODO return a structured error to allow l18n\n\t\treturn fmt.Errorf(\"%s: %s\", path, err)\n\t}\n\n\tv, err := func(r *os.File) (interface{}, error) {\n\t\tdefer r.Close()\n\t\tdec := f.decoder(r)\n\t\tvar v interface{}\n\t\terr = dec.Decode(&v)\n\t\treturn v, err\n\t}(r)\n\tif err != nil {\n\t\t\/\/ TODO return a structured error to allow l18n\n\t\treturn fmt.Errorf(\"%s: can't decode: %s\", path, err)\n\t}\n\n\t\/\/ push directory of the path\n\tf.contextDir = append(f.contextDir, filepath.Dir(path))\n\tdefer func() {\n\t\t\/\/ pop contextDir\n\t\tf.contextDir = f.contextDir[:len(f.contextDir)-1]\n\t}()\n\n\tswitch v := v.(type) {\n\tcase []interface{}:\n\t\targs := make([]string, 0, len(v))\n\t\tfor _, arg := range v {\n\t\t\targs = append(args, fmt.Sprint(arg))\n\t\t}\n\t\terr = f.flagset.Parse(args)\n\tcase []string:\n\t\terr = f.flagset.Parse(v)\n\tcase map[string]interface{}:\n\t\terr = parseObject(f.flagset, v)\n\t\t\/\/ TODO map[string]string\n\t\t\/\/ TODO map[string][]string\n\tdefault:\n\t\treturn fmt.Errorf(\"unexpected type %T\", v)\n\t}\n\tif err != nil {\n\t\terr = fmt.Errorf(\"%s#%s\", path, err)\n\t}\n\treturn err\n}\n\nfunc parseObject(flagset FlagSet, m map[string]interface{}) error {\n\tfor k, v := range m {\n\t\tf := flagset.Lookup(k)\n\t\tif f == nil {\n\t\t\treturn fmt.Errorf(\"\/%s: unknown flag\", k)\n\t\t}\n\t\tvar err error\n\t\tswitch v := v.(type) {\n\t\tcase string:\n\t\t\terr = f.Value.Set(v)\n\t\tcase []string:\n\t\t\tfor i, v := range v {\n\t\t\t\terr = f.Value.Set(v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"\/%s\/%d: %s\", k, i, err)\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\trv := reflect.ValueOf(v)\n\t\t\tif rv.Kind() == reflect.Array || rv.Kind() == reflect.Slice {\n\t\t\t\tfor i := 0; i < rv.Len(); i++ {\n\t\t\t\t\terr = f.Value.Set(fmt.Sprint(rv.Index(i).Interface()))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"\/%s\/%d: %s\", k, i, err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr = f.Value.Set(fmt.Sprint(v))\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"\/%s: %s\", k, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>flagfile: add doc<commit_after>package flagfile\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"reflect\"\n)\n\n\/\/ FlagSet is a subset of methods of *flag.FlagSet\ntype FlagSet interface {\n\tParse([]string) error\n\tLookup(name string) *flag.Flag\n}\n\n\/\/ Decoder can decode a stream of bytes into structured values.\n\/\/ The root value for flagfile.File must be of type:\n\/\/ []string\n\/\/ []interface{}\n\/\/ map[string]interface{}\n\/\/\n\/\/ Example: encoding\/json.Decoder\ntype Decoder interface {\n\tDecode(interface{}) error\n}\n\ntype DecoderBuilder func(r io.Reader) Decoder\n\ntype file struct {\n\tflagset FlagSet\n\tdecoder DecoderBuilder\n\tcontextDir []string\n}\n\n\/\/ File allows to define a command-line flag that gives a path to a structured\n\/\/ file whose content will be expanded as command-line arguments and injected into flagset.\n\/\/\n\/\/ The structured data may be:\n\/\/ - an array of strings given to flagset.Parse\n\/\/ - a map where keys are argument names (without leading '-') and keys are values\n\/\/ or arrays of values\n\/\/\n\/\/ This flag is reentrant, so the file may refer to (include) other files by reusing the\n\/\/ same flag.\nfunc File(flagset FlagSet, decoder DecoderBuilder) flag.Value {\n\treturn &file{\n\t\tflagset: flagset,\n\t\tdecoder: decoder,\n\t}\n}\n\nfunc (file) String() string { return \"\" }\n\n\/\/ Set loads the given file, decodes it and interprets\n\/\/ its structured data as a list of arguments.\n\/\/\n\/\/ Set is part of the flag.Value interface.\nfunc (f *file) Set(path string) error {\n\tif !filepath.IsAbs(path) {\n\t\tvar ctxDir string\n\t\tif len(f.contextDir) == 0 {\n\t\t\tvar e error\n\t\t\tctxDir, e = os.Getwd()\n\t\t\tif e != nil {\n\t\t\t\treturn e\n\t\t\t}\n\t\t} else {\n\t\t\tctxDir = f.contextDir[len(f.contextDir)-1]\n\t\t}\n\t\tpath = filepath.Join(ctxDir, path)\n\t}\n\n\tr, err := os.Open(path)\n\tif err != nil {\n\t\t\/\/ TODO return a structured error to allow l18n\n\t\treturn fmt.Errorf(\"%s: %s\", path, err)\n\t}\n\n\tv, err := func(r *os.File) (interface{}, error) {\n\t\tdefer r.Close()\n\t\tdec := f.decoder(r)\n\t\tvar v interface{}\n\t\terr = dec.Decode(&v)\n\t\treturn v, err\n\t}(r)\n\tif err != nil {\n\t\t\/\/ TODO return a structured error to allow l18n\n\t\treturn fmt.Errorf(\"%s: can't decode: %s\", path, err)\n\t}\n\n\t\/\/ push directory of the path\n\tf.contextDir = append(f.contextDir, filepath.Dir(path))\n\tdefer func() {\n\t\t\/\/ pop contextDir\n\t\tf.contextDir = f.contextDir[:len(f.contextDir)-1]\n\t}()\n\n\tswitch v := v.(type) {\n\tcase []interface{}:\n\t\targs := make([]string, 0, len(v))\n\t\tfor _, arg := range v {\n\t\t\targs = append(args, fmt.Sprint(arg))\n\t\t}\n\t\terr = f.flagset.Parse(args)\n\tcase []string:\n\t\terr = f.flagset.Parse(v)\n\tcase map[string]interface{}:\n\t\terr = parseObject(f.flagset, v)\n\t\t\/\/ TODO map[string]string\n\t\t\/\/ TODO map[string][]string\n\tdefault:\n\t\treturn fmt.Errorf(\"unexpected type %T\", v)\n\t}\n\tif err != nil {\n\t\terr = fmt.Errorf(\"%s#%s\", path, err)\n\t}\n\treturn err\n}\n\nfunc parseObject(flagset FlagSet, m map[string]interface{}) error {\n\tfor k, v := range m {\n\t\tf := flagset.Lookup(k)\n\t\tif f == nil {\n\t\t\treturn fmt.Errorf(\"\/%s: unknown flag\", k)\n\t\t}\n\t\tvar err error\n\t\tswitch v := v.(type) {\n\t\tcase string:\n\t\t\terr = f.Value.Set(v)\n\t\tcase []string:\n\t\t\tfor i, v := range v {\n\t\t\t\terr = f.Value.Set(v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"\/%s\/%d: %s\", k, i, err)\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\trv := reflect.ValueOf(v)\n\t\t\tif rv.Kind() == reflect.Array || rv.Kind() == reflect.Slice {\n\t\t\t\tfor i := 0; i < rv.Len(); i++ {\n\t\t\t\t\terr = f.Value.Set(fmt.Sprint(rv.Index(i).Interface()))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"\/%s\/%d: %s\", k, i, err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr = f.Value.Set(fmt.Sprint(v))\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"\/%s: %s\", k, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package db\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc TestL0Compaction(t *testing.T) {\n\tpath := \"\/tmp\/compactor_test\/TestL0Compaction\"\n\tos.RemoveAll(path)\n\terr := os.MkdirAll(path, os.ModePerm)\n\tif err != nil {\n\t\tt.Error(\"Fails to create testing dir\")\n\t}\n\n\topt := DbOption{\n\t\tpath: path,\n\t\tenv: MakeNativeEnv(),\n\t\tcomp: ByteOrder(0),\n\t\tnumTblCache: 8,\n\t\tminLogSize: 64,\n\t\tmaxL0Levels: 4,\n\t\tminTableSize: 4 * 1024 * 1024,\n\t}\n\n\tdb := MakeDb(opt)\n\tif db == nil {\n\t\tt.Error(\"Fails to create a DB!\")\n\t}\n\n\tbigVal := make([]byte, 64)\n\tfor idx, _ := range bigVal {\n\t\tbigVal[idx] = 'a'\n\t}\n\n\tvar wopt WriteOptions\n\tvar ropt ReadOptions\n\n\tdb.Put(wopt, []byte(\"hello\"), []byte(\"world\"))\n\n\tfor i := 1000; i < 1002; i++ {\n\t\tkey := fmt.Sprintf(\"%d\", i)\n\t\tstatus, finish := db.PutMore(wopt, []byte(key), bigVal)\n\n\t\tif !status.Ok() {\n\t\t\tt.Error(\"Fails to put an item!\")\n\t\t}\n\n\t\t\/\/ Wait until L0 compaction completes.\n\t\tfor v := range finish {\n\t\t\tif v != true {\n\t\t\t\tt.Error(\"Does not finish compaction successfully\")\n\t\t\t}\n\t\t}\n\t}\n\n\tval, status := db.Get(ropt, []byte(\"hello\"))\n\tif !status.Ok() || string(val) != \"world\" {\n\t\tt.Error(\"Fails to get a key\")\n\t}\n}\n<commit_msg>Extend L0 compaction test case<commit_after>package db\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc TestL0Compaction(t *testing.T) {\n\tpath := \"\/tmp\/compactor_test\/TestL0Compaction\"\n\tos.RemoveAll(path)\n\terr := os.MkdirAll(path, os.ModePerm)\n\tif err != nil {\n\t\tt.Error(\"Fails to create testing dir\")\n\t}\n\n\topt := DbOption{\n\t\tpath: path,\n\t\tenv: MakeNativeEnv(),\n\t\tcomp: ByteOrder(0),\n\t\tnumTblCache: 8,\n\t\tminLogSize: 64,\n\t\tmaxL0Levels: 4,\n\t\tminTableSize: 4 * 1024 * 1024,\n\t}\n\n\tdb := MakeDb(opt)\n\tif db == nil {\n\t\tt.Error(\"Fails to create a DB!\")\n\t}\n\n\tbigVal := make([]byte, 64)\n\tfor idx, _ := range bigVal {\n\t\tbigVal[idx] = 'a'\n\t}\n\n\tvar wopt WriteOptions\n\tvar ropt ReadOptions\n\n\tdb.Put(wopt, []byte(\"hello\"), []byte(\"world\"))\n\n\tfor i := 1000; i < 1002; i++ {\n\t\tkey := fmt.Sprintf(\"%d\", i)\n\t\tstatus, finish := db.PutMore(wopt, []byte(key), bigVal)\n\n\t\tif !status.Ok() {\n\t\t\tt.Error(\"Fails to put an item!\")\n\t\t}\n\n\t\t\/\/ Wait until L0 compaction completes.\n\t\tfor v := range finish {\n\t\t\tif v != true {\n\t\t\t\tt.Error(\"Does not finish compaction successfully\")\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Verify a particular key is present.\n\tval, status := db.Get(ropt, []byte(\"hello\"))\n\tif !status.Ok() || string(val) != \"world\" {\n\t\tt.Error(\"Fails to get a key\")\n\t}\n\n\t\/\/ Verify that all keys are present.\n\tit := db.NewIterator(ropt)\n\tit.SeekToFirst()\n\n\tif !it.Valid() || string(it.Key()) != \"1000\" {\n\t\tt.Error(\"Fails to get expected key\")\n\t}\n\n\tit.Next()\n\n\tif !it.Valid() || string(it.Key()) != \"1001\" {\n\t\tt.Error(\"Fails to get expected key\")\n\t}\n\n\tit.Next()\n\n\tif !it.Valid() || string(it.Key()) != \"hello\" {\n\t\tt.Error(\"Fails to get expected key\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cachingfs\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/jacobsa\/fuse\"\n\t\"github.com\/jacobsa\/fuse\/fuseops\"\n\t\"github.com\/jacobsa\/gcloud\/syncutil\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\t\/\/ Sizes of the files according to the file system.\n\tFooSize = 123\n\tBarSize = 456\n)\n\n\/\/ A file system with a fixed structure that looks like this:\n\/\/\n\/\/ foo\n\/\/ dir\/\n\/\/ bar\n\/\/\n\/\/ The file system is configured with durations that specify how long to allow\n\/\/ inode entries and attributes to be cached, used when responding to fuse\n\/\/ requests. It also exposes methods for renumbering inodes and updating mtimes\n\/\/ that are useful in testing that these durations are honored.\ntype CachingFS interface {\n\tfuse.Server\n\n\t\/\/ Return the current inode ID of the file\/directory with the given name.\n\tFooID() fuseops.InodeID\n\tDirID() fuseops.InodeID\n\tBarID() fuseops.InodeID\n\n\t\/\/ Cause the inode IDs to change to values that have never before been used.\n\tRenumberInodes()\n\n\t\/\/ Cause further queries for the attributes of inodes to use the supplied\n\t\/\/ time as the inode's mtime.\n\tSetMtime(mtime time.Time)\n}\n\n\/\/ Create a file system that issues cacheable responses according to the\n\/\/ following rules:\n\/\/\n\/\/ * LookUpInodeResponse.Entry.EntryExpiration is set according to\n\/\/ lookupEntryTimeout.\n\/\/\n\/\/ * GetInodeAttributesResponse.AttributesExpiration is set according to\n\/\/ getattrTimeout.\n\/\/\n\/\/ * Nothing else is marked cacheable. (In particular, the attributes\n\/\/ returned by LookUpInode are not cacheable.)\n\/\/\nfunc NewCachingFS(\n\tlookupEntryTimeout time.Duration,\n\tgetattrTimeout time.Duration) (fs CachingFS, err error) {\n\troundUp := func(n fuseops.InodeID) fuseops.InodeID {\n\t\treturn numInodes * ((n + numInodes - 1) \/ numInodes)\n\t}\n\n\tcfs := &cachingFS{\n\t\tlookupEntryTimeout: lookupEntryTimeout,\n\t\tgetattrTimeout: getattrTimeout,\n\t\tbaseID: roundUp(fuse.RootInodeID + 1),\n\t\tmtime: time.Now(),\n\t}\n\n\tcfs.mu = syncutil.NewInvariantMutex(cfs.checkInvariants)\n\n\tfs = cfs\n\treturn\n}\n\nconst (\n\t\/\/ Inode IDs are issued such that \"foo\" always receives an ID that is\n\t\/\/ congruent to fooOffset modulo numInodes, etc.\n\tfooOffset = iota\n\tdirOffset\n\tbarOffset\n\tnumInodes\n)\n\ntype cachingFS struct {\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Constant data\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tlookupEntryTimeout time.Duration\n\tgetattrTimeout time.Duration\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tmu syncutil.InvariantMutex\n\n\t\/\/ The current ID of the lowest numbered non-root inode.\n\t\/\/\n\t\/\/ INVARIANT: baseID > fuse.RootInodeID\n\t\/\/ INVARIANT: baseID % numInodes == 0\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tbaseID fuseops.InodeID\n\n\t\/\/ GUARDED_BY(mu)\n\tmtime time.Time\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (fs *cachingFS) checkInvariants() {\n\t\/\/ INVARIANT: baseID > fuse.RootInodeID\n\t\/\/ INVARIANT: baseID % numInodes == 0\n\tif fs.baseID <= fuse.RootInodeID || fs.baseID%numInodes != 0 {\n\t\tpanic(fmt.Sprintf(\"Bad baseID: %v\", fs.baseID))\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *cachingFS) fooID() fuseops.InodeID {\n\treturn fs.baseID + fooOffset\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *cachingFS) dirID() fuseops.InodeID {\n\treturn fs.baseID + dirOffset\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *cachingFS) barID() fuseops.InodeID {\n\treturn fs.baseID + barOffset\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *cachingFS) rootAttrs() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tMode: os.ModeDir | 0777,\n\t\tMtime: fs.mtime,\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *cachingFS) fooAttrs() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tNlink: 1,\n\t\tSize: FooSize,\n\t\tMode: 0777,\n\t\tMtime: fs.mtime,\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *cachingFS) dirAttrs() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: os.ModeDir | 0777,\n\t\tMtime: fs.mtime,\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *cachingFS) barAttrs() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tNlink: 1,\n\t\tSize: BarSize,\n\t\tMode: 0777,\n\t\tMtime: fs.mtime,\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Public interface\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *cachingFS) FooID() fuseops.InodeID {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\treturn fs.fooID()\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *cachingFS) DirID() fuseops.InodeID {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\treturn fs.dirID()\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *cachingFS) BarID() fuseops.InodeID {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\treturn fs.barID()\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *cachingFS) RenumberInodes() {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\tfs.baseID += numInodes\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *cachingFS) SetMtime(mtime time.Time) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\tfs.mtime = mtime\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ FileSystem methods\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (fs *cachingFS) Init(\n\tctx context.Context,\n\treq *fuse.InitRequest) (resp *fuse.InitResponse, err error) {\n\tresp = &fuse.InitResponse{}\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *cachingFS) LookUpInode(\n\tctx context.Context,\n\treq *fuse.LookUpInodeRequest) (resp *fuse.LookUpInodeResponse, err error) {\n\tresp = &fuse.LookUpInodeResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Find the ID and attributes.\n\tvar id fuseops.InodeID\n\tvar attrs fuseops.InodeAttributes\n\n\tswitch req.Name {\n\tcase \"foo\":\n\t\t\/\/ Parent must be the root.\n\t\tif req.Parent != fuse.RootInodeID {\n\t\t\terr = fuse.ENOENT\n\t\t\treturn\n\t\t}\n\n\t\tid = fs.fooID()\n\t\tattrs = fs.fooAttrs()\n\n\tcase \"dir\":\n\t\t\/\/ Parent must be the root.\n\t\tif req.Parent != fuse.RootInodeID {\n\t\t\terr = fuse.ENOENT\n\t\t\treturn\n\t\t}\n\n\t\tid = fs.dirID()\n\t\tattrs = fs.dirAttrs()\n\n\tcase \"bar\":\n\t\t\/\/ Parent must be dir.\n\t\tif req.Parent == fuse.RootInodeID || req.Parent%numInodes != dirOffset {\n\t\t\terr = fuse.ENOENT\n\t\t\treturn\n\t\t}\n\n\t\tid = fs.barID()\n\t\tattrs = fs.barAttrs()\n\n\tdefault:\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n\n\t\/\/ Fill in the response.\n\tresp.Entry.Child = id\n\tresp.Entry.Attributes = attrs\n\tresp.Entry.EntryExpiration = time.Now().Add(fs.lookupEntryTimeout)\n\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *cachingFS) GetInodeAttributes(\n\tctx context.Context,\n\treq *fuse.GetInodeAttributesRequest) (\n\tresp *fuse.GetInodeAttributesResponse, err error) {\n\tresp = &fuse.GetInodeAttributesResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Figure out which inode the request is for.\n\tvar attrs fuseops.InodeAttributes\n\n\tswitch {\n\tcase req.Inode == fuse.RootInodeID:\n\t\tattrs = fs.rootAttrs()\n\n\tcase req.Inode%numInodes == fooOffset:\n\t\tattrs = fs.fooAttrs()\n\n\tcase req.Inode%numInodes == dirOffset:\n\t\tattrs = fs.dirAttrs()\n\n\tcase req.Inode%numInodes == barOffset:\n\t\tattrs = fs.barAttrs()\n\t}\n\n\t\/\/ Fill in the response.\n\tresp.Attributes = attrs\n\tresp.AttributesExpiration = time.Now().Add(fs.getattrTimeout)\n\n\treturn\n}\n\nfunc (fs *cachingFS) OpenDir(\n\tctx context.Context,\n\treq *fuse.OpenDirRequest) (\n\tresp *fuse.OpenDirResponse, err error) {\n\tresp = &fuse.OpenDirResponse{}\n\treturn\n}\n\nfunc (fs *cachingFS) OpenFile(\n\tctx context.Context,\n\treq *fuse.OpenFileRequest) (\n\tresp *fuse.OpenFileResponse, err error) {\n\tresp = &fuse.OpenFileResponse{}\n\treturn\n}\n<commit_msg>Fixed more errors.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cachingfs\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/jacobsa\/fuse\"\n\t\"github.com\/jacobsa\/fuse\/fuseops\"\n\t\"github.com\/jacobsa\/gcloud\/syncutil\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\t\/\/ Sizes of the files according to the file system.\n\tFooSize = 123\n\tBarSize = 456\n)\n\n\/\/ A file system with a fixed structure that looks like this:\n\/\/\n\/\/ foo\n\/\/ dir\/\n\/\/ bar\n\/\/\n\/\/ The file system is configured with durations that specify how long to allow\n\/\/ inode entries and attributes to be cached, used when responding to fuse\n\/\/ requests. It also exposes methods for renumbering inodes and updating mtimes\n\/\/ that are useful in testing that these durations are honored.\ntype CachingFS interface {\n\tfuse.Server\n\n\t\/\/ Return the current inode ID of the file\/directory with the given name.\n\tFooID() fuseops.InodeID\n\tDirID() fuseops.InodeID\n\tBarID() fuseops.InodeID\n\n\t\/\/ Cause the inode IDs to change to values that have never before been used.\n\tRenumberInodes()\n\n\t\/\/ Cause further queries for the attributes of inodes to use the supplied\n\t\/\/ time as the inode's mtime.\n\tSetMtime(mtime time.Time)\n}\n\n\/\/ Create a file system that issues cacheable responses according to the\n\/\/ following rules:\n\/\/\n\/\/ * LookUpInodeResponse.Entry.EntryExpiration is set according to\n\/\/ lookupEntryTimeout.\n\/\/\n\/\/ * GetInodeAttributesResponse.AttributesExpiration is set according to\n\/\/ getattrTimeout.\n\/\/\n\/\/ * Nothing else is marked cacheable. (In particular, the attributes\n\/\/ returned by LookUpInode are not cacheable.)\n\/\/\nfunc NewCachingFS(\n\tlookupEntryTimeout time.Duration,\n\tgetattrTimeout time.Duration) (fs CachingFS, err error) {\n\troundUp := func(n fuseops.InodeID) fuseops.InodeID {\n\t\treturn numInodes * ((n + numInodes - 1) \/ numInodes)\n\t}\n\n\tcfs := &cachingFS{\n\t\tlookupEntryTimeout: lookupEntryTimeout,\n\t\tgetattrTimeout: getattrTimeout,\n\t\tbaseID: roundUp(fuseops.RootInodeID + 1),\n\t\tmtime: time.Now(),\n\t}\n\n\tcfs.mu = syncutil.NewInvariantMutex(cfs.checkInvariants)\n\n\tfs = cfs\n\treturn\n}\n\nconst (\n\t\/\/ Inode IDs are issued such that \"foo\" always receives an ID that is\n\t\/\/ congruent to fooOffset modulo numInodes, etc.\n\tfooOffset = iota\n\tdirOffset\n\tbarOffset\n\tnumInodes\n)\n\ntype cachingFS struct {\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Constant data\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tlookupEntryTimeout time.Duration\n\tgetattrTimeout time.Duration\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tmu syncutil.InvariantMutex\n\n\t\/\/ The current ID of the lowest numbered non-root inode.\n\t\/\/\n\t\/\/ INVARIANT: baseID > fuseops.RootInodeID\n\t\/\/ INVARIANT: baseID % numInodes == 0\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tbaseID fuseops.InodeID\n\n\t\/\/ GUARDED_BY(mu)\n\tmtime time.Time\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (fs *cachingFS) checkInvariants() {\n\t\/\/ INVARIANT: baseID > fuseops.RootInodeID\n\t\/\/ INVARIANT: baseID % numInodes == 0\n\tif fs.baseID <= fuseops.RootInodeID || fs.baseID%numInodes != 0 {\n\t\tpanic(fmt.Sprintf(\"Bad baseID: %v\", fs.baseID))\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *cachingFS) fooID() fuseops.InodeID {\n\treturn fs.baseID + fooOffset\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *cachingFS) dirID() fuseops.InodeID {\n\treturn fs.baseID + dirOffset\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *cachingFS) barID() fuseops.InodeID {\n\treturn fs.baseID + barOffset\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *cachingFS) rootAttrs() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tMode: os.ModeDir | 0777,\n\t\tMtime: fs.mtime,\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *cachingFS) fooAttrs() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tNlink: 1,\n\t\tSize: FooSize,\n\t\tMode: 0777,\n\t\tMtime: fs.mtime,\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *cachingFS) dirAttrs() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: os.ModeDir | 0777,\n\t\tMtime: fs.mtime,\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *cachingFS) barAttrs() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tNlink: 1,\n\t\tSize: BarSize,\n\t\tMode: 0777,\n\t\tMtime: fs.mtime,\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Public interface\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *cachingFS) FooID() fuseops.InodeID {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\treturn fs.fooID()\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *cachingFS) DirID() fuseops.InodeID {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\treturn fs.dirID()\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *cachingFS) BarID() fuseops.InodeID {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\treturn fs.barID()\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *cachingFS) RenumberInodes() {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\tfs.baseID += numInodes\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *cachingFS) SetMtime(mtime time.Time) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\tfs.mtime = mtime\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Op methods\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (fs *cachingFS) init(op *fuseops.InitOp) {\n\top.Respond(nil)\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *cachingFS) LookUpInode(\n\tctx context.Context,\n\treq *fuse.LookUpInodeRequest) (resp *fuse.LookUpInodeResponse, err error) {\n\tresp = &fuse.LookUpInodeResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Find the ID and attributes.\n\tvar id fuseops.InodeID\n\tvar attrs fuseops.InodeAttributes\n\n\tswitch req.Name {\n\tcase \"foo\":\n\t\t\/\/ Parent must be the root.\n\t\tif req.Parent != fuseops.RootInodeID {\n\t\t\terr = fuse.ENOENT\n\t\t\treturn\n\t\t}\n\n\t\tid = fs.fooID()\n\t\tattrs = fs.fooAttrs()\n\n\tcase \"dir\":\n\t\t\/\/ Parent must be the root.\n\t\tif req.Parent != fuseops.RootInodeID {\n\t\t\terr = fuse.ENOENT\n\t\t\treturn\n\t\t}\n\n\t\tid = fs.dirID()\n\t\tattrs = fs.dirAttrs()\n\n\tcase \"bar\":\n\t\t\/\/ Parent must be dir.\n\t\tif req.Parent == fuseops.RootInodeID || req.Parent%numInodes != dirOffset {\n\t\t\terr = fuse.ENOENT\n\t\t\treturn\n\t\t}\n\n\t\tid = fs.barID()\n\t\tattrs = fs.barAttrs()\n\n\tdefault:\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n\n\t\/\/ Fill in the response.\n\tresp.Entry.Child = id\n\tresp.Entry.Attributes = attrs\n\tresp.Entry.EntryExpiration = time.Now().Add(fs.lookupEntryTimeout)\n\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *cachingFS) GetInodeAttributes(\n\tctx context.Context,\n\treq *fuse.GetInodeAttributesRequest) (\n\tresp *fuse.GetInodeAttributesResponse, err error) {\n\tresp = &fuse.GetInodeAttributesResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Figure out which inode the request is for.\n\tvar attrs fuseops.InodeAttributes\n\n\tswitch {\n\tcase req.Inode == fuseops.RootInodeID:\n\t\tattrs = fs.rootAttrs()\n\n\tcase req.Inode%numInodes == fooOffset:\n\t\tattrs = fs.fooAttrs()\n\n\tcase req.Inode%numInodes == dirOffset:\n\t\tattrs = fs.dirAttrs()\n\n\tcase req.Inode%numInodes == barOffset:\n\t\tattrs = fs.barAttrs()\n\t}\n\n\t\/\/ Fill in the response.\n\tresp.Attributes = attrs\n\tresp.AttributesExpiration = time.Now().Add(fs.getattrTimeout)\n\n\treturn\n}\n\nfunc (fs *cachingFS) OpenDir(\n\tctx context.Context,\n\treq *fuse.OpenDirRequest) (\n\tresp *fuse.OpenDirResponse, err error) {\n\tresp = &fuse.OpenDirResponse{}\n\treturn\n}\n\nfunc (fs *cachingFS) OpenFile(\n\tctx context.Context,\n\treq *fuse.OpenFileRequest) (\n\tresp *fuse.OpenFileResponse, err error) {\n\tresp = &fuse.OpenFileResponse{}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package volume\n\nimport (\n\t\"errors\"\n\t\"math\"\n\n\t\"github.com\/go-ole\/go-ole\"\n\t\"github.com\/moutend\/go-wca\"\n)\n\n\/\/ GetVolume returns the current volume (0 to 100).\nfunc GetVolume() (int, error) {\n\tvol, err := invoke(func(aev *wca.IAudioEndpointVolume) (interface{}, error) {\n\t\tvar level float32\n\t\terr := aev.GetMasterVolumeLevelScalar(&level)\n\t\tvol := math.Floor(float64(level*100.0 + 0.5))\n\t\treturn vol, err\n\t})\n\tif vol == nil {\n\t\treturn 0, err\n\t}\n\treturn vol.(int), err\n}\n\n\/\/ SetVolume sets the sound volume to the specified value.\nfunc SetVolume(volume int) error {\n\tif volume < 0 || 100 < volume {\n\t\treturn errors.New(\"out of valid volume range\")\n\t}\n\t_, err := invoke(func(aev *wca.IAudioEndpointVolume) (interface{}, error) {\n\t\terr := aev.SetMasterVolumeLevelScalar(float32(volume)\/100, nil)\n\t\treturn nil, err\n\t})\n\treturn err\n}\n\n\/\/ IncreaseVolume increases (or decreases) the audio volume by the specified value.\nfunc IncreaseVolume(diff int) error {\n\tpanic(\"not implemented on Windows\")\n\treturn nil\n}\n\n\/\/ GetMuted returns the current muted status.\nfunc GetMuted() (bool, error) {\n\tpanic(\"not implemented on Windows\")\n\treturn false, nil\n}\n\n\/\/ Mute mutes the audio.\nfunc Mute() error {\n\tpanic(\"not implemented on Windows\")\n\treturn nil\n}\n\n\/\/ Unmute unmutes the audio.\nfunc Unmute() error {\n\tpanic(\"not implemented on Windows\")\n\treturn nil\n}\n\nfunc invoke(f func(aev *wca.IAudioEndpointVolume) (interface{}, error)) (ret interface{}, err error) {\n\tif err = ole.CoInitializeEx(0, ole.COINIT_APARTMENTTHREADED); err != nil {\n\t\treturn\n\t}\n\tdefer ole.CoUninitialize()\n\n\tvar mmde *wca.IMMDeviceEnumerator\n\tif err = wca.CoCreateInstance(wca.CLSID_MMDeviceEnumerator, 0, wca.CLSCTX_ALL, wca.IID_IMMDeviceEnumerator, &mmde); err != nil {\n\t\treturn\n\t}\n\tdefer mmde.Release()\n\n\tvar mmd *wca.IMMDevice\n\tif err = mmde.GetDefaultAudioEndpoint(wca.ERender, wca.EConsole, &mmd); err != nil {\n\t\treturn\n\t}\n\tdefer mmd.Release()\n\n\tvar ps *wca.IPropertyStore\n\tif err = mmd.OpenPropertyStore(wca.STGM_READ, &ps); err != nil {\n\t\treturn\n\t}\n\tdefer ps.Release()\n\n\tvar pv wca.PROPVARIANT\n\tif err = ps.GetValue(&wca.PKEY_Device_FriendlyName, &pv); err != nil {\n\t\treturn\n\t}\n\n\tvar aev *wca.IAudioEndpointVolume\n\tif err = mmd.Activate(wca.IID_IAudioEndpointVolume, wca.CLSCTX_ALL, nil, &aev); err != nil {\n\t\treturn\n\t}\n\tdefer aev.Release()\n\n\tret, err = f(aev)\n\treturn\n}\n<commit_msg>implement IncreaseVolume for Windows<commit_after>package volume\n\nimport (\n\t\"errors\"\n\t\"math\"\n\n\t\"github.com\/go-ole\/go-ole\"\n\t\"github.com\/moutend\/go-wca\"\n)\n\n\/\/ GetVolume returns the current volume (0 to 100).\nfunc GetVolume() (int, error) {\n\tvol, err := invoke(func(aev *wca.IAudioEndpointVolume) (interface{}, error) {\n\t\tvar level float32\n\t\terr := aev.GetMasterVolumeLevelScalar(&level)\n\t\tvol := math.Floor(float64(level*100.0 + 0.5))\n\t\treturn vol, err\n\t})\n\tif vol == nil {\n\t\treturn 0, err\n\t}\n\treturn vol.(int), err\n}\n\n\/\/ SetVolume sets the sound volume to the specified value.\nfunc SetVolume(volume int) error {\n\tif volume < 0 || 100 < volume {\n\t\treturn errors.New(\"out of valid volume range\")\n\t}\n\t_, err := invoke(func(aev *wca.IAudioEndpointVolume) (interface{}, error) {\n\t\terr := aev.SetMasterVolumeLevelScalar(float32(volume)\/100, nil)\n\t\treturn nil, err\n\t})\n\treturn err\n}\n\n\/\/ IncreaseVolume increases (or decreases) the audio volume by the specified value.\nfunc IncreaseVolume(diff int) error {\n\t_, err := invoke(func(aev *wca.IAudioEndpointVolume) (interface{}, error) {\n\t\tvar level float32\n\t\terr := aev.GetMasterVolumeLevelScalar(&level)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvol := math.Min(math.Max(math.Floor(float64(level*100.0+0.5))+float64(diff), 0.0), 100.0)\n\t\terr = aev.SetMasterVolumeLevelScalar(float32(vol)\/100, nil)\n\t\treturn nil, err\n\t})\n\treturn err\n}\n\n\/\/ GetMuted returns the current muted status.\nfunc GetMuted() (bool, error) {\n\tpanic(\"not implemented on Windows\")\n\treturn false, nil\n}\n\n\/\/ Mute mutes the audio.\nfunc Mute() error {\n\tpanic(\"not implemented on Windows\")\n\treturn nil\n}\n\n\/\/ Unmute unmutes the audio.\nfunc Unmute() error {\n\tpanic(\"not implemented on Windows\")\n\treturn nil\n}\n\nfunc invoke(f func(aev *wca.IAudioEndpointVolume) (interface{}, error)) (ret interface{}, err error) {\n\tif err = ole.CoInitializeEx(0, ole.COINIT_APARTMENTTHREADED); err != nil {\n\t\treturn\n\t}\n\tdefer ole.CoUninitialize()\n\n\tvar mmde *wca.IMMDeviceEnumerator\n\tif err = wca.CoCreateInstance(wca.CLSID_MMDeviceEnumerator, 0, wca.CLSCTX_ALL, wca.IID_IMMDeviceEnumerator, &mmde); err != nil {\n\t\treturn\n\t}\n\tdefer mmde.Release()\n\n\tvar mmd *wca.IMMDevice\n\tif err = mmde.GetDefaultAudioEndpoint(wca.ERender, wca.EConsole, &mmd); err != nil {\n\t\treturn\n\t}\n\tdefer mmd.Release()\n\n\tvar ps *wca.IPropertyStore\n\tif err = mmd.OpenPropertyStore(wca.STGM_READ, &ps); err != nil {\n\t\treturn\n\t}\n\tdefer ps.Release()\n\n\tvar pv wca.PROPVARIANT\n\tif err = ps.GetValue(&wca.PKEY_Device_FriendlyName, &pv); err != nil {\n\t\treturn\n\t}\n\n\tvar aev *wca.IAudioEndpointVolume\n\tif err = mmd.Activate(wca.IID_IAudioEndpointVolume, wca.CLSCTX_ALL, nil, &aev); err != nil {\n\t\treturn\n\t}\n\tdefer aev.Release()\n\n\tret, err = f(aev)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2014 Marty Schoch\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file\n\/\/ except in compliance with the License. You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing, software distributed under the\n\/\/ License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,\n\/\/ either express or implied. See the License for the specific language governing permissions\n\/\/ and limitations under the License.\n\npackage gouchstore\n\nimport (\n\t\"hash\/crc32\"\n\n\t\"code.google.com\/p\/snappy-go\/snappy\"\n)\n\nconst gs_CHUNK_LENGTH_SIZE int64 = 4\nconst gs_CHUNK_CRC_SIZE int64 = 4\n\n\/\/ attempt to read a chunk at the specified location\nfunc (g *Gouchstore) readChunkAt(pos int64, header bool) ([]byte, error) {\n\t\/\/ chunk starts with 8 bytes (32bit length, 32bit crc)\n\tchunkPrefix := make([]byte, gs_CHUNK_LENGTH_SIZE+gs_CHUNK_CRC_SIZE)\n\tn, err := g.readAt(chunkPrefix, pos)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n < gs_CHUNK_LENGTH_SIZE+gs_CHUNK_CRC_SIZE {\n\t\treturn nil, gs_ERROR_INVALID_CHUNK_SHORT_PREFIX\n\t}\n\n\tsize := decode_raw31(chunkPrefix[0:gs_CHUNK_LENGTH_SIZE])\n\tcrc := decode_raw32(chunkPrefix[gs_CHUNK_LENGTH_SIZE : gs_CHUNK_LENGTH_SIZE+gs_CHUNK_CRC_SIZE])\n\n\t\/\/ size should at least be the size of the length field + 1\n\tif size < uint32(gs_CHUNK_LENGTH_SIZE+1) {\n\t\treturn nil, gs_ERROR_INVALID_CHUNK_SIZE_TOO_SMALL\n\t}\n\tif header {\n\t\tsize -= uint32(gs_CHUNK_LENGTH_SIZE) \/\/ headers include the length of the hash, data does not\n\t}\n\n\tdata := make([]byte, size)\n\tpos += n \/\/ skip the actual number of bytes read for the header (may be more than header size if we crossed a block boundary)\n\tn, err = g.readAt(data, pos)\n\tif uint32(n) < size {\n\t\treturn nil, gs_ERROR_INVALID_CHUNK_DATA_LESS_THAN_SIZE\n\t}\n\n\t\/\/ validate crc\n\tactualCRC := crc32.ChecksumIEEE(data)\n\tif actualCRC != crc {\n\t\treturn nil, gs_ERROR_INVALID_CHUNK_BAD_CRC\n\t}\n\n\treturn data, nil\n}\n\nfunc (g *Gouchstore) readCompressedDataChunkAt(pos int64) ([]byte, error) {\n\tchunk, err := g.readChunkAt(pos, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdecompressedChunk, err := snappy.Decode(nil, chunk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn decompressedChunk, nil\n}\n\nfunc (g *Gouchstore) writeChunk(buf []byte, header bool) (int64, int64, error) {\n\t\/\/ always write to the end of the file\n\tstartPos := g.pos\n\tpos := startPos\n\tendpos := pos\n\n\t\/\/ if we're writing a header, advance to the next block size boundary\n\tif header {\n\t\tif pos%gs_BLOCK_SIZE != 0 {\n\t\t\tpos += (gs_BLOCK_SIZE - (pos % gs_BLOCK_SIZE))\n\t\t}\n\t}\n\n\t\/\/ chunk starts with 8 bytes (32bit length, 32bit crc)\n\tsize := uint32(len(buf))\n\tif header {\n\t\tsize += uint32(gs_CHUNK_CRC_SIZE) \/\/ header chunks include the length of the hash\n\t}\n\tcrc := crc32.ChecksumIEEE(buf)\n\n\tvar sizeBytes []byte\n\tif header {\n\t\tsizeBytes = encode_raw32(size)\n\t} else {\n\t\tsizeBytes = encode_raw31_highestbiton(size)\n\t}\n\tcrcBytes := encode_raw32(crc)\n\twritten, err := g.writeAt(sizeBytes, pos, header)\n\tif err != nil {\n\t\treturn pos, written, err\n\t}\n\tg.pos += written\n\tpos += written\n\tendpos += written\n\twritten, err = g.writeAt(crcBytes, pos, header)\n\tif err != nil {\n\t\treturn pos, written, err\n\t}\n\tg.pos += written\n\tpos += written\n\tendpos += written\n\twritten, err = g.writeAt(buf, pos, header)\n\tif err != nil {\n\t\treturn pos, written, err\n\t}\n\tg.pos += written\n\tendpos += written\n\n\treturn startPos, endpos - startPos, nil\n}\n\nfunc (g *Gouchstore) writeCompressedChunk(buf []byte) (int64, int64, error) {\n\tcompressed, err := snappy.Encode(nil, buf)\n\tif err != nil {\n\t\treturn -1, -1, err\n\t}\n\treturn g.writeChunk(compressed, false)\n}\n<commit_msg>fix major bug overwriting file header<commit_after>\/\/ Copyright (c) 2014 Marty Schoch\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file\n\/\/ except in compliance with the License. You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing, software distributed under the\n\/\/ License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,\n\/\/ either express or implied. See the License for the specific language governing permissions\n\/\/ and limitations under the License.\n\npackage gouchstore\n\nimport (\n\t\"hash\/crc32\"\n\n\t\"code.google.com\/p\/snappy-go\/snappy\"\n)\n\nconst gs_CHUNK_LENGTH_SIZE int64 = 4\nconst gs_CHUNK_CRC_SIZE int64 = 4\n\n\/\/ attempt to read a chunk at the specified location\nfunc (g *Gouchstore) readChunkAt(pos int64, header bool) ([]byte, error) {\n\t\/\/ chunk starts with 8 bytes (32bit length, 32bit crc)\n\tchunkPrefix := make([]byte, gs_CHUNK_LENGTH_SIZE+gs_CHUNK_CRC_SIZE)\n\tn, err := g.readAt(chunkPrefix, pos)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n < gs_CHUNK_LENGTH_SIZE+gs_CHUNK_CRC_SIZE {\n\t\treturn nil, gs_ERROR_INVALID_CHUNK_SHORT_PREFIX\n\t}\n\n\tsize := decode_raw31(chunkPrefix[0:gs_CHUNK_LENGTH_SIZE])\n\tcrc := decode_raw32(chunkPrefix[gs_CHUNK_LENGTH_SIZE : gs_CHUNK_LENGTH_SIZE+gs_CHUNK_CRC_SIZE])\n\n\t\/\/ size should at least be the size of the length field + 1\n\tif size < uint32(gs_CHUNK_LENGTH_SIZE+1) {\n\t\treturn nil, gs_ERROR_INVALID_CHUNK_SIZE_TOO_SMALL\n\t}\n\tif header {\n\t\tsize -= uint32(gs_CHUNK_LENGTH_SIZE) \/\/ headers include the length of the hash, data does not\n\t}\n\n\tdata := make([]byte, size)\n\tpos += n \/\/ skip the actual number of bytes read for the header (may be more than header size if we crossed a block boundary)\n\tn, err = g.readAt(data, pos)\n\tif uint32(n) < size {\n\t\treturn nil, gs_ERROR_INVALID_CHUNK_DATA_LESS_THAN_SIZE\n\t}\n\n\t\/\/ validate crc\n\tactualCRC := crc32.ChecksumIEEE(data)\n\tif actualCRC != crc {\n\t\treturn nil, gs_ERROR_INVALID_CHUNK_BAD_CRC\n\t}\n\n\treturn data, nil\n}\n\nfunc (g *Gouchstore) readCompressedDataChunkAt(pos int64) ([]byte, error) {\n\tchunk, err := g.readChunkAt(pos, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdecompressedChunk, err := snappy.Decode(nil, chunk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn decompressedChunk, nil\n}\n\nfunc (g *Gouchstore) writeChunk(buf []byte, header bool) (int64, int64, error) {\n\t\/\/ always write to the end of the file\n\tstartPos := g.pos\n\tpos := startPos\n\tendpos := pos\n\n\t\/\/ if we're writing a header, advance to the next block size boundary\n\tif header {\n\t\tif pos%gs_BLOCK_SIZE != 0 {\n\t\t\tpos += (gs_BLOCK_SIZE - (pos % gs_BLOCK_SIZE))\n\t\t\tg.pos += (gs_BLOCK_SIZE - (pos % gs_BLOCK_SIZE))\n\t\t}\n\t}\n\n\t\/\/ chunk starts with 8 bytes (32bit length, 32bit crc)\n\tsize := uint32(len(buf))\n\tif header {\n\t\tsize += uint32(gs_CHUNK_CRC_SIZE) \/\/ header chunks include the length of the hash\n\t}\n\tcrc := crc32.ChecksumIEEE(buf)\n\n\tvar sizeBytes []byte\n\tif header {\n\t\tsizeBytes = encode_raw32(size)\n\t} else {\n\t\tsizeBytes = encode_raw31_highestbiton(size)\n\t}\n\tcrcBytes := encode_raw32(crc)\n\twritten, err := g.writeAt(sizeBytes, pos, header)\n\tif err != nil {\n\t\treturn pos, written, err\n\t}\n\tg.pos += written\n\tpos += written\n\tendpos += written\n\twritten, err = g.writeAt(crcBytes, pos, header)\n\tif err != nil {\n\t\treturn pos, written, err\n\t}\n\tg.pos += written\n\tpos += written\n\tendpos += written\n\twritten, err = g.writeAt(buf, pos, header)\n\tif err != nil {\n\t\treturn pos, written, err\n\t}\n\tg.pos += written\n\tendpos += written\n\n\treturn startPos, endpos - startPos, nil\n}\n\nfunc (g *Gouchstore) writeCompressedChunk(buf []byte) (int64, int64, error) {\n\tcompressed, err := snappy.Encode(nil, buf)\n\tif err != nil {\n\t\treturn -1, -1, err\n\t}\n\treturn g.writeChunk(compressed, false)\n}\n<|endoftext|>"} {"text":"<commit_before>package jobs\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/cozy\/cozy-stack\/pkg\/consts\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/globals\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/jobs\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/scheduler\"\n\t\"github.com\/cozy\/cozy-stack\/web\/jsonapi\"\n\t\"github.com\/cozy\/cozy-stack\/web\/middlewares\"\n\t\"github.com\/cozy\/cozy-stack\/web\/permissions\"\n\tmultierror \"github.com\/hashicorp\/go-multierror\"\n\t\"github.com\/labstack\/echo\"\n\n\t\/\/ exec is needed for bad triggers cleanup\n\n\t\/\/ import workers\n\t_ \"github.com\/cozy\/cozy-stack\/pkg\/workers\/exec\"\n\t_ \"github.com\/cozy\/cozy-stack\/pkg\/workers\/log\"\n\t_ \"github.com\/cozy\/cozy-stack\/pkg\/workers\/mails\"\n\t_ \"github.com\/cozy\/cozy-stack\/pkg\/workers\/sharings\"\n\t_ \"github.com\/cozy\/cozy-stack\/pkg\/workers\/thumbnail\"\n\t_ \"github.com\/cozy\/cozy-stack\/pkg\/workers\/unzip\"\n)\n\ntype (\n\tapiJob struct {\n\t\tj *jobs.JobInfos\n\t}\n\tapiJobRequest struct {\n\t\tArguments json.RawMessage `json:\"arguments\"`\n\t\tOptions *jobs.JobOptions `json:\"options\"`\n\t}\n\tapiQueue struct {\n\t\tCount int `json:\"count\"`\n\t\tworkerType string\n\t}\n\tapiTrigger struct {\n\t\tt scheduler.Trigger\n\t}\n\tapiTriggerRequest struct {\n\t\tType string `json:\"type\"`\n\t\tArguments string `json:\"arguments\"`\n\t\tWorkerType string `json:\"worker\"`\n\t\tWorkerArguments json.RawMessage `json:\"worker_arguments\"`\n\t\tDebounce string `json:\"debounce\"`\n\t\tOptions *jobs.JobOptions `json:\"options\"`\n\t}\n)\n\nfunc (j *apiJob) ID() string { return j.j.ID() }\nfunc (j *apiJob) Rev() string { return j.j.Rev() }\nfunc (j *apiJob) DocType() string { return consts.Jobs }\nfunc (j *apiJob) Clone() couchdb.Doc { return j }\nfunc (j *apiJob) SetID(_ string) {}\nfunc (j *apiJob) SetRev(_ string) {}\nfunc (j *apiJob) Relationships() jsonapi.RelationshipMap { return nil }\nfunc (j *apiJob) Included() []jsonapi.Object { return nil }\nfunc (j *apiJob) Links() *jsonapi.LinksList {\n\treturn &jsonapi.LinksList{Self: \"\/jobs\/\" + j.j.WorkerType + \"\/\" + j.j.ID()}\n}\nfunc (j *apiJob) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(j.j)\n}\n\nfunc (q *apiQueue) ID() string { return q.workerType }\nfunc (q *apiQueue) Rev() string { return \"\" }\nfunc (q *apiQueue) DocType() string { return consts.Queues }\nfunc (q *apiQueue) Clone() couchdb.Doc { return q }\nfunc (q *apiQueue) SetID(_ string) {}\nfunc (q *apiQueue) SetRev(_ string) {}\nfunc (q *apiQueue) Relationships() jsonapi.RelationshipMap { return nil }\nfunc (q *apiQueue) Included() []jsonapi.Object { return nil }\nfunc (q *apiQueue) Links() *jsonapi.LinksList {\n\treturn &jsonapi.LinksList{Self: \"\/jobs\/queue\/\" + q.workerType}\n}\nfunc (q *apiQueue) Valid(key, value string) bool {\n\tswitch key {\n\tcase \"worker\":\n\t\treturn q.workerType == value\n\t}\n\treturn false\n}\n\nfunc (t *apiTrigger) ID() string { return t.t.Infos().TID }\nfunc (t *apiTrigger) Rev() string { return \"\" }\nfunc (t *apiTrigger) DocType() string { return consts.Triggers }\nfunc (t *apiTrigger) Clone() couchdb.Doc { return t }\nfunc (t *apiTrigger) SetID(_ string) {}\nfunc (t *apiTrigger) SetRev(_ string) {}\nfunc (t *apiTrigger) Relationships() jsonapi.RelationshipMap { return nil }\nfunc (t *apiTrigger) Included() []jsonapi.Object { return nil }\nfunc (t *apiTrigger) Links() *jsonapi.LinksList {\n\treturn &jsonapi.LinksList{Self: \"\/jobs\/triggers\/\" + t.ID()}\n}\nfunc (t *apiTrigger) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(t.t.Infos())\n}\n\nfunc getQueue(c echo.Context) error {\n\tworkerType := c.Param(\"worker-type\")\n\tcount, err := globals.GetBroker().QueueLen(workerType)\n\tif err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\to := &apiQueue{\n\t\tworkerType: workerType,\n\t\tCount: count,\n\t}\n\n\tif err := permissions.Allow(c, permissions.GET, o); err != nil {\n\t\treturn err\n\t}\n\n\treturn jsonapi.Data(c, http.StatusOK, o, nil)\n}\n\nfunc pushJob(c echo.Context) error {\n\tinstance := middlewares.GetInstance(c)\n\n\treq := &apiJobRequest{}\n\tif _, err := jsonapi.Bind(c.Request(), &req); err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\n\tjr := &jobs.JobRequest{\n\t\tDomain: instance.Domain,\n\t\tWorkerType: c.Param(\"worker-type\"),\n\t\tOptions: req.Options,\n\t\tMessage: &jobs.Message{\n\t\t\tType: jobs.JSONEncoding,\n\t\t\tData: req.Arguments,\n\t\t},\n\t}\n\tif err := permissions.Allow(c, permissions.POST, jr); err != nil {\n\t\treturn err\n\t}\n\n\tjob, err := globals.GetBroker().PushJob(jr)\n\tif err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\n\treturn jsonapi.Data(c, http.StatusAccepted, &apiJob{job}, nil)\n}\n\nfunc newTrigger(c echo.Context) error {\n\tinstance := middlewares.GetInstance(c)\n\tsched := globals.GetScheduler()\n\treq := &apiTriggerRequest{}\n\tif _, err := jsonapi.Bind(c.Request(), &req); err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\n\tif req.Debounce != \"\" {\n\t\tif _, err := time.ParseDuration(req.Debounce); err != nil {\n\t\t\treturn jsonapi.InvalidAttribute(\"debounce\", err)\n\t\t}\n\t}\n\n\tt, err := scheduler.NewTrigger(&scheduler.TriggerInfos{\n\t\tType: req.Type,\n\t\tWorkerType: req.WorkerType,\n\t\tDomain: instance.Domain,\n\t\tArguments: req.Arguments,\n\t\tDebounce: req.Debounce,\n\t\tOptions: req.Options,\n\t\tMessage: &jobs.Message{\n\t\t\tType: jobs.JSONEncoding,\n\t\t\tData: req.WorkerArguments,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\n\tif err = permissions.Allow(c, permissions.POST, t); err != nil {\n\t\treturn err\n\t}\n\n\tif err = sched.Add(t); err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\treturn jsonapi.Data(c, http.StatusCreated, &apiTrigger{t}, nil)\n}\n\nfunc getTrigger(c echo.Context) error {\n\tinstance := middlewares.GetInstance(c)\n\tsched := globals.GetScheduler()\n\tt, err := sched.Get(instance.Domain, c.Param(\"trigger-id\"))\n\tif err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\tif err := permissions.Allow(c, permissions.GET, t); err != nil {\n\t\treturn err\n\t}\n\treturn jsonapi.Data(c, http.StatusOK, &apiTrigger{t}, nil)\n}\n\nfunc deleteTrigger(c echo.Context) error {\n\tinstance := middlewares.GetInstance(c)\n\tsched := globals.GetScheduler()\n\tt, err := sched.Get(instance.Domain, c.Param(\"trigger-id\"))\n\tif err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\tif err := permissions.Allow(c, permissions.DELETE, t); err != nil {\n\t\treturn err\n\t}\n\tif err := sched.Delete(instance.Domain, c.Param(\"trigger-id\")); err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\treturn c.NoContent(http.StatusNoContent)\n}\n\nfunc getAllTriggers(c echo.Context) error {\n\tinstance := middlewares.GetInstance(c)\n\tworkerFilter := c.QueryParam(\"Worker\")\n\tsched := globals.GetScheduler()\n\tif err := permissions.AllowWholeType(c, permissions.GET, consts.Triggers); err != nil {\n\t\treturn err\n\t}\n\tts, err := sched.GetAll(instance.Domain)\n\tif err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\tobjs := make([]jsonapi.Object, 0, len(ts))\n\tfor _, t := range ts {\n\t\tif workerFilter == \"\" || t.Infos().WorkerType == workerFilter {\n\t\t\tobjs = append(objs, &apiTrigger{t})\n\t\t}\n\t}\n\treturn jsonapi.DataList(c, http.StatusOK, objs, nil)\n}\n\nfunc getJob(c echo.Context) error {\n\tinstance := middlewares.GetInstance(c)\n\tjob, err := globals.GetBroker().GetJobInfos(instance.Domain, c.Param(\"job-id\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := permissions.Allow(c, permissions.GET, job); err != nil {\n\t\treturn err\n\t}\n\treturn jsonapi.Data(c, http.StatusOK, &apiJob{job}, nil)\n}\n\nfunc cleanJobs(c echo.Context) error {\n\tinstance := middlewares.GetInstance(c)\n\tif err := permissions.AllowWholeType(c, permissions.GET, consts.Jobs); err != nil {\n\t\treturn err\n\t}\n\tvar ups []*jobs.JobInfos\n\tnow := time.Now()\n\terr := couchdb.ForeachDocs(instance, consts.Jobs, func(data []byte) error {\n\t\tvar job *jobs.JobInfos\n\t\tif err := json.Unmarshal(data, &job); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif job.State != jobs.Running {\n\t\t\treturn nil\n\t\t}\n\t\tif job.StartedAt.Add(1 * time.Hour).Before(now) {\n\t\t\tups = append(ups, job)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil && !couchdb.IsNoDatabaseError(err) {\n\t\treturn err\n\t}\n\tvar errf error\n\tfor _, j := range ups {\n\t\tj.State = jobs.Done\n\t\terr := couchdb.UpdateDoc(instance, j)\n\t\tif err != nil {\n\t\t\terrf = multierror.Append(errf, err)\n\t\t}\n\t}\n\tif errf != nil {\n\t\treturn errf\n\t}\n\treturn c.JSON(200, map[string]int{\"deleted\": len(ups)})\n}\n\n\/\/ Routes sets the routing for the jobs service\nfunc Routes(router *echo.Group) {\n\trouter.GET(\"\/queue\/:worker-type\", getQueue)\n\trouter.POST(\"\/queue\/:worker-type\", pushJob)\n\n\trouter.GET(\"\/triggers\", getAllTriggers)\n\trouter.POST(\"\/triggers\", newTrigger)\n\trouter.GET(\"\/triggers\/:trigger-id\", getTrigger)\n\trouter.DELETE(\"\/triggers\/:trigger-id\", deleteTrigger)\n\n\trouter.POST(\"\/clean\", cleanJobs)\n\trouter.GET(\"\/:job-id\", getJob)\n}\n\nfunc wrapJobsError(err error) error {\n\tswitch err {\n\tcase scheduler.ErrNotFoundTrigger,\n\t\tjobs.ErrNotFoundJob,\n\t\tjobs.ErrUnknownWorker:\n\t\treturn jsonapi.NotFound(err)\n\tcase scheduler.ErrUnknownTrigger:\n\t\treturn jsonapi.InvalidAttribute(\"Type\", err)\n\t}\n\treturn err\n}\n<commit_msg>Remove outdated comment<commit_after>package jobs\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/cozy\/cozy-stack\/pkg\/consts\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/globals\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/jobs\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/scheduler\"\n\t\"github.com\/cozy\/cozy-stack\/web\/jsonapi\"\n\t\"github.com\/cozy\/cozy-stack\/web\/middlewares\"\n\t\"github.com\/cozy\/cozy-stack\/web\/permissions\"\n\tmultierror \"github.com\/hashicorp\/go-multierror\"\n\t\"github.com\/labstack\/echo\"\n\n\t\/\/ import workers\n\t_ \"github.com\/cozy\/cozy-stack\/pkg\/workers\/exec\"\n\t_ \"github.com\/cozy\/cozy-stack\/pkg\/workers\/log\"\n\t_ \"github.com\/cozy\/cozy-stack\/pkg\/workers\/mails\"\n\t_ \"github.com\/cozy\/cozy-stack\/pkg\/workers\/sharings\"\n\t_ \"github.com\/cozy\/cozy-stack\/pkg\/workers\/thumbnail\"\n\t_ \"github.com\/cozy\/cozy-stack\/pkg\/workers\/unzip\"\n)\n\ntype (\n\tapiJob struct {\n\t\tj *jobs.JobInfos\n\t}\n\tapiJobRequest struct {\n\t\tArguments json.RawMessage `json:\"arguments\"`\n\t\tOptions *jobs.JobOptions `json:\"options\"`\n\t}\n\tapiQueue struct {\n\t\tCount int `json:\"count\"`\n\t\tworkerType string\n\t}\n\tapiTrigger struct {\n\t\tt scheduler.Trigger\n\t}\n\tapiTriggerRequest struct {\n\t\tType string `json:\"type\"`\n\t\tArguments string `json:\"arguments\"`\n\t\tWorkerType string `json:\"worker\"`\n\t\tWorkerArguments json.RawMessage `json:\"worker_arguments\"`\n\t\tDebounce string `json:\"debounce\"`\n\t\tOptions *jobs.JobOptions `json:\"options\"`\n\t}\n)\n\nfunc (j *apiJob) ID() string { return j.j.ID() }\nfunc (j *apiJob) Rev() string { return j.j.Rev() }\nfunc (j *apiJob) DocType() string { return consts.Jobs }\nfunc (j *apiJob) Clone() couchdb.Doc { return j }\nfunc (j *apiJob) SetID(_ string) {}\nfunc (j *apiJob) SetRev(_ string) {}\nfunc (j *apiJob) Relationships() jsonapi.RelationshipMap { return nil }\nfunc (j *apiJob) Included() []jsonapi.Object { return nil }\nfunc (j *apiJob) Links() *jsonapi.LinksList {\n\treturn &jsonapi.LinksList{Self: \"\/jobs\/\" + j.j.WorkerType + \"\/\" + j.j.ID()}\n}\nfunc (j *apiJob) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(j.j)\n}\n\nfunc (q *apiQueue) ID() string { return q.workerType }\nfunc (q *apiQueue) Rev() string { return \"\" }\nfunc (q *apiQueue) DocType() string { return consts.Queues }\nfunc (q *apiQueue) Clone() couchdb.Doc { return q }\nfunc (q *apiQueue) SetID(_ string) {}\nfunc (q *apiQueue) SetRev(_ string) {}\nfunc (q *apiQueue) Relationships() jsonapi.RelationshipMap { return nil }\nfunc (q *apiQueue) Included() []jsonapi.Object { return nil }\nfunc (q *apiQueue) Links() *jsonapi.LinksList {\n\treturn &jsonapi.LinksList{Self: \"\/jobs\/queue\/\" + q.workerType}\n}\nfunc (q *apiQueue) Valid(key, value string) bool {\n\tswitch key {\n\tcase \"worker\":\n\t\treturn q.workerType == value\n\t}\n\treturn false\n}\n\nfunc (t *apiTrigger) ID() string { return t.t.Infos().TID }\nfunc (t *apiTrigger) Rev() string { return \"\" }\nfunc (t *apiTrigger) DocType() string { return consts.Triggers }\nfunc (t *apiTrigger) Clone() couchdb.Doc { return t }\nfunc (t *apiTrigger) SetID(_ string) {}\nfunc (t *apiTrigger) SetRev(_ string) {}\nfunc (t *apiTrigger) Relationships() jsonapi.RelationshipMap { return nil }\nfunc (t *apiTrigger) Included() []jsonapi.Object { return nil }\nfunc (t *apiTrigger) Links() *jsonapi.LinksList {\n\treturn &jsonapi.LinksList{Self: \"\/jobs\/triggers\/\" + t.ID()}\n}\nfunc (t *apiTrigger) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(t.t.Infos())\n}\n\nfunc getQueue(c echo.Context) error {\n\tworkerType := c.Param(\"worker-type\")\n\tcount, err := globals.GetBroker().QueueLen(workerType)\n\tif err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\to := &apiQueue{\n\t\tworkerType: workerType,\n\t\tCount: count,\n\t}\n\n\tif err := permissions.Allow(c, permissions.GET, o); err != nil {\n\t\treturn err\n\t}\n\n\treturn jsonapi.Data(c, http.StatusOK, o, nil)\n}\n\nfunc pushJob(c echo.Context) error {\n\tinstance := middlewares.GetInstance(c)\n\n\treq := &apiJobRequest{}\n\tif _, err := jsonapi.Bind(c.Request(), &req); err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\n\tjr := &jobs.JobRequest{\n\t\tDomain: instance.Domain,\n\t\tWorkerType: c.Param(\"worker-type\"),\n\t\tOptions: req.Options,\n\t\tMessage: &jobs.Message{\n\t\t\tType: jobs.JSONEncoding,\n\t\t\tData: req.Arguments,\n\t\t},\n\t}\n\tif err := permissions.Allow(c, permissions.POST, jr); err != nil {\n\t\treturn err\n\t}\n\n\tjob, err := globals.GetBroker().PushJob(jr)\n\tif err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\n\treturn jsonapi.Data(c, http.StatusAccepted, &apiJob{job}, nil)\n}\n\nfunc newTrigger(c echo.Context) error {\n\tinstance := middlewares.GetInstance(c)\n\tsched := globals.GetScheduler()\n\treq := &apiTriggerRequest{}\n\tif _, err := jsonapi.Bind(c.Request(), &req); err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\n\tif req.Debounce != \"\" {\n\t\tif _, err := time.ParseDuration(req.Debounce); err != nil {\n\t\t\treturn jsonapi.InvalidAttribute(\"debounce\", err)\n\t\t}\n\t}\n\n\tt, err := scheduler.NewTrigger(&scheduler.TriggerInfos{\n\t\tType: req.Type,\n\t\tWorkerType: req.WorkerType,\n\t\tDomain: instance.Domain,\n\t\tArguments: req.Arguments,\n\t\tDebounce: req.Debounce,\n\t\tOptions: req.Options,\n\t\tMessage: &jobs.Message{\n\t\t\tType: jobs.JSONEncoding,\n\t\t\tData: req.WorkerArguments,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\n\tif err = permissions.Allow(c, permissions.POST, t); err != nil {\n\t\treturn err\n\t}\n\n\tif err = sched.Add(t); err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\treturn jsonapi.Data(c, http.StatusCreated, &apiTrigger{t}, nil)\n}\n\nfunc getTrigger(c echo.Context) error {\n\tinstance := middlewares.GetInstance(c)\n\tsched := globals.GetScheduler()\n\tt, err := sched.Get(instance.Domain, c.Param(\"trigger-id\"))\n\tif err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\tif err := permissions.Allow(c, permissions.GET, t); err != nil {\n\t\treturn err\n\t}\n\treturn jsonapi.Data(c, http.StatusOK, &apiTrigger{t}, nil)\n}\n\nfunc deleteTrigger(c echo.Context) error {\n\tinstance := middlewares.GetInstance(c)\n\tsched := globals.GetScheduler()\n\tt, err := sched.Get(instance.Domain, c.Param(\"trigger-id\"))\n\tif err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\tif err := permissions.Allow(c, permissions.DELETE, t); err != nil {\n\t\treturn err\n\t}\n\tif err := sched.Delete(instance.Domain, c.Param(\"trigger-id\")); err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\treturn c.NoContent(http.StatusNoContent)\n}\n\nfunc getAllTriggers(c echo.Context) error {\n\tinstance := middlewares.GetInstance(c)\n\tworkerFilter := c.QueryParam(\"Worker\")\n\tsched := globals.GetScheduler()\n\tif err := permissions.AllowWholeType(c, permissions.GET, consts.Triggers); err != nil {\n\t\treturn err\n\t}\n\tts, err := sched.GetAll(instance.Domain)\n\tif err != nil {\n\t\treturn wrapJobsError(err)\n\t}\n\tobjs := make([]jsonapi.Object, 0, len(ts))\n\tfor _, t := range ts {\n\t\tif workerFilter == \"\" || t.Infos().WorkerType == workerFilter {\n\t\t\tobjs = append(objs, &apiTrigger{t})\n\t\t}\n\t}\n\treturn jsonapi.DataList(c, http.StatusOK, objs, nil)\n}\n\nfunc getJob(c echo.Context) error {\n\tinstance := middlewares.GetInstance(c)\n\tjob, err := globals.GetBroker().GetJobInfos(instance.Domain, c.Param(\"job-id\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := permissions.Allow(c, permissions.GET, job); err != nil {\n\t\treturn err\n\t}\n\treturn jsonapi.Data(c, http.StatusOK, &apiJob{job}, nil)\n}\n\nfunc cleanJobs(c echo.Context) error {\n\tinstance := middlewares.GetInstance(c)\n\tif err := permissions.AllowWholeType(c, permissions.GET, consts.Jobs); err != nil {\n\t\treturn err\n\t}\n\tvar ups []*jobs.JobInfos\n\tnow := time.Now()\n\terr := couchdb.ForeachDocs(instance, consts.Jobs, func(data []byte) error {\n\t\tvar job *jobs.JobInfos\n\t\tif err := json.Unmarshal(data, &job); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif job.State != jobs.Running {\n\t\t\treturn nil\n\t\t}\n\t\tif job.StartedAt.Add(1 * time.Hour).Before(now) {\n\t\t\tups = append(ups, job)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil && !couchdb.IsNoDatabaseError(err) {\n\t\treturn err\n\t}\n\tvar errf error\n\tfor _, j := range ups {\n\t\tj.State = jobs.Done\n\t\terr := couchdb.UpdateDoc(instance, j)\n\t\tif err != nil {\n\t\t\terrf = multierror.Append(errf, err)\n\t\t}\n\t}\n\tif errf != nil {\n\t\treturn errf\n\t}\n\treturn c.JSON(200, map[string]int{\"deleted\": len(ups)})\n}\n\n\/\/ Routes sets the routing for the jobs service\nfunc Routes(router *echo.Group) {\n\trouter.GET(\"\/queue\/:worker-type\", getQueue)\n\trouter.POST(\"\/queue\/:worker-type\", pushJob)\n\n\trouter.GET(\"\/triggers\", getAllTriggers)\n\trouter.POST(\"\/triggers\", newTrigger)\n\trouter.GET(\"\/triggers\/:trigger-id\", getTrigger)\n\trouter.DELETE(\"\/triggers\/:trigger-id\", deleteTrigger)\n\n\trouter.POST(\"\/clean\", cleanJobs)\n\trouter.GET(\"\/:job-id\", getJob)\n}\n\nfunc wrapJobsError(err error) error {\n\tswitch err {\n\tcase scheduler.ErrNotFoundTrigger,\n\t\tjobs.ErrNotFoundJob,\n\t\tjobs.ErrUnknownWorker:\n\t\treturn jsonapi.NotFound(err)\n\tcase scheduler.ErrUnknownTrigger:\n\t\treturn jsonapi.InvalidAttribute(\"Type\", err)\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/coreos\/go-semver\/semver\"\n\t\"github.com\/fnproject\/fn\/fnlb\/lb\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nconst VERSION = \"0.0.256\"\n\nfunc main() {\n\t\/\/ XXX (reed): normalize\n\tlevel, err := logrus.ParseLevel(os.Getenv(\"LOG_LEVEL\"))\n\tif err != nil {\n\t\tlevel = logrus.InfoLevel\n\t}\n\tlogrus.SetLevel(level)\n\n\tfnodes := flag.String(\"nodes\", \"\", \"comma separated list of functions nodes\")\n\tminAPIVersion := flag.String(\"min-api-version\", \"0.0.223\", \"minimal node API to accept\")\n\n\tvar conf lb.Config\n\tflag.StringVar(&conf.DBurl, \"db\", \"sqlite3:\/\/:memory:\", \"backend to store nodes, default to in memory; use k8s for kuberneted\")\n\tflag.StringVar(&conf.Listen, \"listen\", \":8081\", \"port to run on\")\n\tflag.StringVar(&conf.MgmtListen, \"mgmt-listen\", \":8081\", \"management port to run on\")\n\tflag.IntVar(&conf.ShutdownTimeout, \"shutdown-timeout\", 0, \"graceful shutdown timeout\")\n\tflag.IntVar(&conf.HealthcheckInterval, \"hc-interval\", 3, \"how often to check f(x) nodes, in seconds\")\n\tflag.StringVar(&conf.HealthcheckEndpoint, \"hc-path\", \"\/version\", \"endpoint to determine node health\")\n\tflag.IntVar(&conf.HealthcheckUnhealthy, \"hc-unhealthy\", 2, \"threshold of failed checks to declare node unhealthy\")\n\tflag.IntVar(&conf.HealthcheckHealthy, \"hc-healthy\", 1, \"threshold of success checks to declare node healthy\")\n\tflag.IntVar(&conf.HealthcheckTimeout, \"hc-timeout\", 5, \"timeout of healthcheck endpoint, in seconds\")\n\tflag.StringVar(&conf.ZipkinURL, \"zipkin\", \"\", \"zipkin endpoint to send traces\")\n\tflag.StringVar(&conf.Namespace, \"namespace\", \"\", \"kubernetes namespace to monitor\")\n\tflag.StringVar(&conf.LabelSelector, \"label-selector\", \"\", \"kubernetes label selector to monitor\")\n\tflag.IntVar(&conf.TargetPort, \"target-port\", 8080, \"kubernetes port to target on selected pods\")\n\n\tflag.Parse()\n\n\tconf.MinAPIVersion = semver.New(*minAPIVersion)\n\n\tif len(*fnodes) > 0 {\n\t\t\/\/ starting w\/o nodes is fine too\n\t\tconf.Nodes = strings.Split(*fnodes, \",\")\n\t}\n\n\tconf.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: 10 * time.Second,\n\t\t\tKeepAlive: 120 * time.Second,\n\t\t}).Dial,\n\t\tMaxIdleConnsPerHost: 512,\n\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tClientSessionCache: tls.NewLRUClientSessionCache(4096),\n\t\t},\n\t}\n\n\tdb, err := lb.NewDB(conf) \/\/ Handles case where DBurl == \"k8s\"\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"error setting up database\")\n\t}\n\tdefer db.Close()\n\n\tg, err := lb.NewAllGrouper(conf, db)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"error setting up grouper\")\n\t}\n\n\tr := lb.NewConsistentRouter(conf)\n\tk := func(r *http.Request) (string, error) {\n\t\treturn r.URL.Path, nil\n\t}\n\n\tservers := make([]*http.Server, 0, 1)\n\thandler := lb.NewProxy(k, g, r, conf)\n\n\t\/\/ a separate mgmt listener is requested? then let's create a LB traffic only server\n\tif conf.Listen != conf.MgmtListen {\n\t\tservers = append(servers, &http.Server{Addr: conf.Listen, Handler: handler})\n\t\thandler = lb.NullHandler()\n\t}\n\n\t\/\/ add mgmt endpoints to the handler\n\thandler = g.Wrap(handler) \/\/ add\/del\/list endpoints\n\thandler = r.Wrap(handler) \/\/ stats \/ dash endpoint\n\n\tservers = append(servers, &http.Server{Addr: conf.MgmtListen, Handler: handler})\n\tserve(servers, &conf)\n}\n\nfunc serve(servers []*http.Server, conf *lb.Config) {\n\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGQUIT, syscall.SIGINT)\n\n\tfor i := 0; i < len(servers); i++ {\n\t\tgo func(idx int) {\n\t\t\terr := servers[idx].ListenAndServe()\n\t\t\tif err != nil && err != http.ErrServerClosed {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": idx}).WithError(err).Fatal(\"server error\")\n\t\t\t} else {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": idx}).Info(\"server stopped\")\n\t\t\t}\n\t\t}(i)\n\t}\n\n\tsig := <-ch\n\tlogrus.WithFields(logrus.Fields{\"signal\": sig}).Info(\"received signal\")\n\n\tfor i := 0; i < len(servers); i++ {\n\n\t\tctx := context.Background()\n\n\t\tif conf.ShutdownTimeout > 0 {\n\t\t\ttmpCtx, cancel := context.WithTimeout(context.Background(), time.Duration(conf.ShutdownTimeout)*time.Second)\n\t\t\tctx = tmpCtx\n\t\t\tdefer cancel()\n\t\t}\n\n\t\terr := servers[i].Shutdown(ctx) \/\/ safe shutdown\n\t\tif err != nil {\n\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": i}).WithError(err).Fatal(\"server shutdown error\")\n\t\t} else {\n\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": i}).Info(\"server shutdown\")\n\t\t}\n\t}\n}\n<commit_msg>fnlb: 0.0.257 release [skip ci]<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/coreos\/go-semver\/semver\"\n\t\"github.com\/fnproject\/fn\/fnlb\/lb\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nconst VERSION = \"0.0.257\"\n\nfunc main() {\n\t\/\/ XXX (reed): normalize\n\tlevel, err := logrus.ParseLevel(os.Getenv(\"LOG_LEVEL\"))\n\tif err != nil {\n\t\tlevel = logrus.InfoLevel\n\t}\n\tlogrus.SetLevel(level)\n\n\tfnodes := flag.String(\"nodes\", \"\", \"comma separated list of functions nodes\")\n\tminAPIVersion := flag.String(\"min-api-version\", \"0.0.224\", \"minimal node API to accept\")\n\n\tvar conf lb.Config\n\tflag.StringVar(&conf.DBurl, \"db\", \"sqlite3:\/\/:memory:\", \"backend to store nodes, default to in memory; use k8s for kuberneted\")\n\tflag.StringVar(&conf.Listen, \"listen\", \":8081\", \"port to run on\")\n\tflag.StringVar(&conf.MgmtListen, \"mgmt-listen\", \":8081\", \"management port to run on\")\n\tflag.IntVar(&conf.ShutdownTimeout, \"shutdown-timeout\", 0, \"graceful shutdown timeout\")\n\tflag.IntVar(&conf.HealthcheckInterval, \"hc-interval\", 3, \"how often to check f(x) nodes, in seconds\")\n\tflag.StringVar(&conf.HealthcheckEndpoint, \"hc-path\", \"\/version\", \"endpoint to determine node health\")\n\tflag.IntVar(&conf.HealthcheckUnhealthy, \"hc-unhealthy\", 2, \"threshold of failed checks to declare node unhealthy\")\n\tflag.IntVar(&conf.HealthcheckHealthy, \"hc-healthy\", 1, \"threshold of success checks to declare node healthy\")\n\tflag.IntVar(&conf.HealthcheckTimeout, \"hc-timeout\", 5, \"timeout of healthcheck endpoint, in seconds\")\n\tflag.StringVar(&conf.ZipkinURL, \"zipkin\", \"\", \"zipkin endpoint to send traces\")\n\tflag.StringVar(&conf.Namespace, \"namespace\", \"\", \"kubernetes namespace to monitor\")\n\tflag.StringVar(&conf.LabelSelector, \"label-selector\", \"\", \"kubernetes label selector to monitor\")\n\tflag.IntVar(&conf.TargetPort, \"target-port\", 8080, \"kubernetes port to target on selected pods\")\n\n\tflag.Parse()\n\n\tconf.MinAPIVersion = semver.New(*minAPIVersion)\n\n\tif len(*fnodes) > 0 {\n\t\t\/\/ starting w\/o nodes is fine too\n\t\tconf.Nodes = strings.Split(*fnodes, \",\")\n\t}\n\n\tconf.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: 10 * time.Second,\n\t\t\tKeepAlive: 120 * time.Second,\n\t\t}).Dial,\n\t\tMaxIdleConnsPerHost: 512,\n\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tClientSessionCache: tls.NewLRUClientSessionCache(4096),\n\t\t},\n\t}\n\n\tdb, err := lb.NewDB(conf) \/\/ Handles case where DBurl == \"k8s\"\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"error setting up database\")\n\t}\n\tdefer db.Close()\n\n\tg, err := lb.NewAllGrouper(conf, db)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"error setting up grouper\")\n\t}\n\n\tr := lb.NewConsistentRouter(conf)\n\tk := func(r *http.Request) (string, error) {\n\t\treturn r.URL.Path, nil\n\t}\n\n\tservers := make([]*http.Server, 0, 1)\n\thandler := lb.NewProxy(k, g, r, conf)\n\n\t\/\/ a separate mgmt listener is requested? then let's create a LB traffic only server\n\tif conf.Listen != conf.MgmtListen {\n\t\tservers = append(servers, &http.Server{Addr: conf.Listen, Handler: handler})\n\t\thandler = lb.NullHandler()\n\t}\n\n\t\/\/ add mgmt endpoints to the handler\n\thandler = g.Wrap(handler) \/\/ add\/del\/list endpoints\n\thandler = r.Wrap(handler) \/\/ stats \/ dash endpoint\n\n\tservers = append(servers, &http.Server{Addr: conf.MgmtListen, Handler: handler})\n\tserve(servers, &conf)\n}\n\nfunc serve(servers []*http.Server, conf *lb.Config) {\n\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGQUIT, syscall.SIGINT)\n\n\tfor i := 0; i < len(servers); i++ {\n\t\tgo func(idx int) {\n\t\t\terr := servers[idx].ListenAndServe()\n\t\t\tif err != nil && err != http.ErrServerClosed {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": idx}).WithError(err).Fatal(\"server error\")\n\t\t\t} else {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": idx}).Info(\"server stopped\")\n\t\t\t}\n\t\t}(i)\n\t}\n\n\tsig := <-ch\n\tlogrus.WithFields(logrus.Fields{\"signal\": sig}).Info(\"received signal\")\n\n\tfor i := 0; i < len(servers); i++ {\n\n\t\tctx := context.Background()\n\n\t\tif conf.ShutdownTimeout > 0 {\n\t\t\ttmpCtx, cancel := context.WithTimeout(context.Background(), time.Duration(conf.ShutdownTimeout)*time.Second)\n\t\t\tctx = tmpCtx\n\t\t\tdefer cancel()\n\t\t}\n\n\t\terr := servers[i].Shutdown(ctx) \/\/ safe shutdown\n\t\tif err != nil {\n\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": i}).WithError(err).Fatal(\"server shutdown error\")\n\t\t} else {\n\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": i}).Info(\"server shutdown\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/coreos\/go-semver\/semver\"\n\t\"github.com\/fnproject\/fn\/fnlb\/lb\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nconst VERSION = \"0.0.257\"\n\nfunc main() {\n\t\/\/ XXX (reed): normalize\n\tlevel, err := logrus.ParseLevel(os.Getenv(\"LOG_LEVEL\"))\n\tif err != nil {\n\t\tlevel = logrus.InfoLevel\n\t}\n\tlogrus.SetLevel(level)\n\n\tfnodes := flag.String(\"nodes\", \"\", \"comma separated list of functions nodes\")\n\tminAPIVersion := flag.String(\"min-api-version\", \"0.0.224\", \"minimal node API to accept\")\n\n\tvar conf lb.Config\n\tflag.StringVar(&conf.DBurl, \"db\", \"sqlite3:\/\/:memory:\", \"backend to store nodes, default to in memory; use k8s for kuberneted\")\n\tflag.StringVar(&conf.Listen, \"listen\", \":8081\", \"port to run on\")\n\tflag.StringVar(&conf.MgmtListen, \"mgmt-listen\", \":8081\", \"management port to run on\")\n\tflag.IntVar(&conf.ShutdownTimeout, \"shutdown-timeout\", 0, \"graceful shutdown timeout\")\n\tflag.IntVar(&conf.HealthcheckInterval, \"hc-interval\", 3, \"how often to check f(x) nodes, in seconds\")\n\tflag.StringVar(&conf.HealthcheckEndpoint, \"hc-path\", \"\/version\", \"endpoint to determine node health\")\n\tflag.IntVar(&conf.HealthcheckUnhealthy, \"hc-unhealthy\", 2, \"threshold of failed checks to declare node unhealthy\")\n\tflag.IntVar(&conf.HealthcheckHealthy, \"hc-healthy\", 1, \"threshold of success checks to declare node healthy\")\n\tflag.IntVar(&conf.HealthcheckTimeout, \"hc-timeout\", 5, \"timeout of healthcheck endpoint, in seconds\")\n\tflag.StringVar(&conf.ZipkinURL, \"zipkin\", \"\", \"zipkin endpoint to send traces\")\n\tflag.StringVar(&conf.Namespace, \"namespace\", \"\", \"kubernetes namespace to monitor\")\n\tflag.StringVar(&conf.LabelSelector, \"label-selector\", \"\", \"kubernetes label selector to monitor\")\n\tflag.IntVar(&conf.TargetPort, \"target-port\", 8080, \"kubernetes port to target on selected pods\")\n\n\tflag.Parse()\n\n\tconf.MinAPIVersion = semver.New(*minAPIVersion)\n\n\tif len(*fnodes) > 0 {\n\t\t\/\/ starting w\/o nodes is fine too\n\t\tconf.Nodes = strings.Split(*fnodes, \",\")\n\t}\n\n\tconf.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: 10 * time.Second,\n\t\t\tKeepAlive: 120 * time.Second,\n\t\t}).Dial,\n\t\tMaxIdleConnsPerHost: 512,\n\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tClientSessionCache: tls.NewLRUClientSessionCache(4096),\n\t\t},\n\t}\n\n\tdb, err := lb.NewDB(conf) \/\/ Handles case where DBurl == \"k8s\"\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"error setting up database\")\n\t}\n\tdefer db.Close()\n\n\tg, err := lb.NewAllGrouper(conf, db)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"error setting up grouper\")\n\t}\n\n\tr := lb.NewConsistentRouter(conf)\n\tk := func(r *http.Request) (string, error) {\n\t\treturn r.URL.Path, nil\n\t}\n\n\tservers := make([]*http.Server, 0, 1)\n\thandler := lb.NewProxy(k, g, r, conf)\n\n\t\/\/ a separate mgmt listener is requested? then let's create a LB traffic only server\n\tif conf.Listen != conf.MgmtListen {\n\t\tservers = append(servers, &http.Server{Addr: conf.Listen, Handler: handler})\n\t\thandler = lb.NullHandler()\n\t}\n\n\t\/\/ add mgmt endpoints to the handler\n\thandler = g.Wrap(handler) \/\/ add\/del\/list endpoints\n\thandler = r.Wrap(handler) \/\/ stats \/ dash endpoint\n\n\tservers = append(servers, &http.Server{Addr: conf.MgmtListen, Handler: handler})\n\tserve(servers, &conf)\n}\n\nfunc serve(servers []*http.Server, conf *lb.Config) {\n\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGQUIT, syscall.SIGINT)\n\n\tfor i := 0; i < len(servers); i++ {\n\t\tgo func(idx int) {\n\t\t\terr := servers[idx].ListenAndServe()\n\t\t\tif err != nil && err != http.ErrServerClosed {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": idx}).WithError(err).Fatal(\"server error\")\n\t\t\t} else {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": idx}).Info(\"server stopped\")\n\t\t\t}\n\t\t}(i)\n\t}\n\n\tsig := <-ch\n\tlogrus.WithFields(logrus.Fields{\"signal\": sig}).Info(\"received signal\")\n\n\tfor i := 0; i < len(servers); i++ {\n\n\t\tctx := context.Background()\n\n\t\tif conf.ShutdownTimeout > 0 {\n\t\t\ttmpCtx, cancel := context.WithTimeout(context.Background(), time.Duration(conf.ShutdownTimeout)*time.Second)\n\t\t\tctx = tmpCtx\n\t\t\tdefer cancel()\n\t\t}\n\n\t\terr := servers[i].Shutdown(ctx) \/\/ safe shutdown\n\t\tif err != nil {\n\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": i}).WithError(err).Fatal(\"server shutdown error\")\n\t\t} else {\n\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": i}).Info(\"server shutdown\")\n\t\t}\n\t}\n}\n<commit_msg>fnlb: 0.0.258 release [skip ci]<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/coreos\/go-semver\/semver\"\n\t\"github.com\/fnproject\/fn\/fnlb\/lb\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nconst VERSION = \"0.0.258\"\n\nfunc main() {\n\t\/\/ XXX (reed): normalize\n\tlevel, err := logrus.ParseLevel(os.Getenv(\"LOG_LEVEL\"))\n\tif err != nil {\n\t\tlevel = logrus.InfoLevel\n\t}\n\tlogrus.SetLevel(level)\n\n\tfnodes := flag.String(\"nodes\", \"\", \"comma separated list of functions nodes\")\n\tminAPIVersion := flag.String(\"min-api-version\", \"0.0.225\", \"minimal node API to accept\")\n\n\tvar conf lb.Config\n\tflag.StringVar(&conf.DBurl, \"db\", \"sqlite3:\/\/:memory:\", \"backend to store nodes, default to in memory; use k8s for kuberneted\")\n\tflag.StringVar(&conf.Listen, \"listen\", \":8081\", \"port to run on\")\n\tflag.StringVar(&conf.MgmtListen, \"mgmt-listen\", \":8081\", \"management port to run on\")\n\tflag.IntVar(&conf.ShutdownTimeout, \"shutdown-timeout\", 0, \"graceful shutdown timeout\")\n\tflag.IntVar(&conf.HealthcheckInterval, \"hc-interval\", 3, \"how often to check f(x) nodes, in seconds\")\n\tflag.StringVar(&conf.HealthcheckEndpoint, \"hc-path\", \"\/version\", \"endpoint to determine node health\")\n\tflag.IntVar(&conf.HealthcheckUnhealthy, \"hc-unhealthy\", 2, \"threshold of failed checks to declare node unhealthy\")\n\tflag.IntVar(&conf.HealthcheckHealthy, \"hc-healthy\", 1, \"threshold of success checks to declare node healthy\")\n\tflag.IntVar(&conf.HealthcheckTimeout, \"hc-timeout\", 5, \"timeout of healthcheck endpoint, in seconds\")\n\tflag.StringVar(&conf.ZipkinURL, \"zipkin\", \"\", \"zipkin endpoint to send traces\")\n\tflag.StringVar(&conf.Namespace, \"namespace\", \"\", \"kubernetes namespace to monitor\")\n\tflag.StringVar(&conf.LabelSelector, \"label-selector\", \"\", \"kubernetes label selector to monitor\")\n\tflag.IntVar(&conf.TargetPort, \"target-port\", 8080, \"kubernetes port to target on selected pods\")\n\n\tflag.Parse()\n\n\tconf.MinAPIVersion = semver.New(*minAPIVersion)\n\n\tif len(*fnodes) > 0 {\n\t\t\/\/ starting w\/o nodes is fine too\n\t\tconf.Nodes = strings.Split(*fnodes, \",\")\n\t}\n\n\tconf.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: 10 * time.Second,\n\t\t\tKeepAlive: 120 * time.Second,\n\t\t}).Dial,\n\t\tMaxIdleConnsPerHost: 512,\n\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tClientSessionCache: tls.NewLRUClientSessionCache(4096),\n\t\t},\n\t}\n\n\tdb, err := lb.NewDB(conf) \/\/ Handles case where DBurl == \"k8s\"\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"error setting up database\")\n\t}\n\tdefer db.Close()\n\n\tg, err := lb.NewAllGrouper(conf, db)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"error setting up grouper\")\n\t}\n\n\tr := lb.NewConsistentRouter(conf)\n\tk := func(r *http.Request) (string, error) {\n\t\treturn r.URL.Path, nil\n\t}\n\n\tservers := make([]*http.Server, 0, 1)\n\thandler := lb.NewProxy(k, g, r, conf)\n\n\t\/\/ a separate mgmt listener is requested? then let's create a LB traffic only server\n\tif conf.Listen != conf.MgmtListen {\n\t\tservers = append(servers, &http.Server{Addr: conf.Listen, Handler: handler})\n\t\thandler = lb.NullHandler()\n\t}\n\n\t\/\/ add mgmt endpoints to the handler\n\thandler = g.Wrap(handler) \/\/ add\/del\/list endpoints\n\thandler = r.Wrap(handler) \/\/ stats \/ dash endpoint\n\n\tservers = append(servers, &http.Server{Addr: conf.MgmtListen, Handler: handler})\n\tserve(servers, &conf)\n}\n\nfunc serve(servers []*http.Server, conf *lb.Config) {\n\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGQUIT, syscall.SIGINT)\n\n\tfor i := 0; i < len(servers); i++ {\n\t\tgo func(idx int) {\n\t\t\terr := servers[idx].ListenAndServe()\n\t\t\tif err != nil && err != http.ErrServerClosed {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": idx}).WithError(err).Fatal(\"server error\")\n\t\t\t} else {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": idx}).Info(\"server stopped\")\n\t\t\t}\n\t\t}(i)\n\t}\n\n\tsig := <-ch\n\tlogrus.WithFields(logrus.Fields{\"signal\": sig}).Info(\"received signal\")\n\n\tfor i := 0; i < len(servers); i++ {\n\n\t\tctx := context.Background()\n\n\t\tif conf.ShutdownTimeout > 0 {\n\t\t\ttmpCtx, cancel := context.WithTimeout(context.Background(), time.Duration(conf.ShutdownTimeout)*time.Second)\n\t\t\tctx = tmpCtx\n\t\t\tdefer cancel()\n\t\t}\n\n\t\terr := servers[i].Shutdown(ctx) \/\/ safe shutdown\n\t\tif err != nil {\n\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": i}).WithError(err).Fatal(\"server shutdown error\")\n\t\t} else {\n\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": i}).Info(\"server shutdown\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/coreos\/go-semver\/semver\"\n\t\"github.com\/fnproject\/fn\/fnlb\/lb\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nconst VERSION = \"0.0.58\"\n\nfunc main() {\n\t\/\/ XXX (reed): normalize\n\tfnodes := flag.String(\"nodes\", \"\", \"comma separated list of functions nodes\")\n\tminAPIVersion := flag.String(\"min-api-version\", \"0.0.25\", \"minimal node API to accept\")\n\n\tvar conf lb.Config\n\tflag.StringVar(&conf.DBurl, \"db\", \"sqlite3:\/\/:memory:\", \"backend to store nodes, default to in memory\")\n\tflag.StringVar(&conf.Listen, \"listen\", \":8081\", \"port to run on\")\n\tflag.IntVar(&conf.HealthcheckInterval, \"hc-interval\", 3, \"how often to check f(x) nodes, in seconds\")\n\tflag.StringVar(&conf.HealthcheckEndpoint, \"hc-path\", \"\/version\", \"endpoint to determine node health\")\n\tflag.IntVar(&conf.HealthcheckUnhealthy, \"hc-unhealthy\", 2, \"threshold of failed checks to declare node unhealthy\")\n\tflag.IntVar(&conf.HealthcheckTimeout, \"hc-timeout\", 5, \"timeout of healthcheck endpoint, in seconds\")\n\tflag.StringVar(&conf.ZipkinURL, \"zipkin\", \"\", \"zipkin endpoint to send traces\")\n\tflag.Parse()\n\n\tconf.MinAPIVersion = semver.New(*minAPIVersion)\n\n\tif len(*fnodes) > 0 {\n\t\t\/\/ starting w\/o nodes is fine too\n\t\tconf.Nodes = strings.Split(*fnodes, \",\")\n\t}\n\n\tconf.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: 10 * time.Second,\n\t\t\tKeepAlive: 120 * time.Second,\n\t\t}).Dial,\n\t\tMaxIdleConnsPerHost: 512,\n\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tClientSessionCache: tls.NewLRUClientSessionCache(4096),\n\t\t},\n\t}\n\n\tg, err := lb.NewAllGrouper(conf)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"error setting up grouper\")\n\t}\n\n\tr := lb.NewConsistentRouter(conf)\n\tk := func(r *http.Request) (string, error) {\n\t\treturn r.URL.Path, nil\n\t}\n\n\th := lb.NewProxy(k, g, r, conf)\n\th = g.Wrap(h) \/\/ add\/del\/list endpoints\n\th = r.Wrap(h) \/\/ stats \/ dash endpoint\n\n\terr = serve(conf.Listen, h)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"server error\")\n\t}\n}\n\nfunc serve(addr string, handler http.Handler) error {\n\tserver := &http.Server{Addr: addr, Handler: handler}\n\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGQUIT, syscall.SIGINT)\n\tgo func() {\n\t\tfor sig := range ch {\n\t\t\tlogrus.WithFields(logrus.Fields{\"signal\": sig}).Info(\"received signal\")\n\t\t\tserver.Shutdown(context.Background()) \/\/ safe shutdown\n\t\t\treturn\n\t\t}\n\t}()\n\treturn server.ListenAndServe()\n}\n<commit_msg>fnlb: 0.0.59 release [skip ci]<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/coreos\/go-semver\/semver\"\n\t\"github.com\/fnproject\/fn\/fnlb\/lb\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nconst VERSION = \"0.0.59\"\n\nfunc main() {\n\t\/\/ XXX (reed): normalize\n\tfnodes := flag.String(\"nodes\", \"\", \"comma separated list of functions nodes\")\n\tminAPIVersion := flag.String(\"min-api-version\", \"0.0.26\", \"minimal node API to accept\")\n\n\tvar conf lb.Config\n\tflag.StringVar(&conf.DBurl, \"db\", \"sqlite3:\/\/:memory:\", \"backend to store nodes, default to in memory\")\n\tflag.StringVar(&conf.Listen, \"listen\", \":8081\", \"port to run on\")\n\tflag.IntVar(&conf.HealthcheckInterval, \"hc-interval\", 3, \"how often to check f(x) nodes, in seconds\")\n\tflag.StringVar(&conf.HealthcheckEndpoint, \"hc-path\", \"\/version\", \"endpoint to determine node health\")\n\tflag.IntVar(&conf.HealthcheckUnhealthy, \"hc-unhealthy\", 2, \"threshold of failed checks to declare node unhealthy\")\n\tflag.IntVar(&conf.HealthcheckTimeout, \"hc-timeout\", 5, \"timeout of healthcheck endpoint, in seconds\")\n\tflag.StringVar(&conf.ZipkinURL, \"zipkin\", \"\", \"zipkin endpoint to send traces\")\n\tflag.Parse()\n\n\tconf.MinAPIVersion = semver.New(*minAPIVersion)\n\n\tif len(*fnodes) > 0 {\n\t\t\/\/ starting w\/o nodes is fine too\n\t\tconf.Nodes = strings.Split(*fnodes, \",\")\n\t}\n\n\tconf.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: 10 * time.Second,\n\t\t\tKeepAlive: 120 * time.Second,\n\t\t}).Dial,\n\t\tMaxIdleConnsPerHost: 512,\n\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tClientSessionCache: tls.NewLRUClientSessionCache(4096),\n\t\t},\n\t}\n\n\tg, err := lb.NewAllGrouper(conf)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"error setting up grouper\")\n\t}\n\n\tr := lb.NewConsistentRouter(conf)\n\tk := func(r *http.Request) (string, error) {\n\t\treturn r.URL.Path, nil\n\t}\n\n\th := lb.NewProxy(k, g, r, conf)\n\th = g.Wrap(h) \/\/ add\/del\/list endpoints\n\th = r.Wrap(h) \/\/ stats \/ dash endpoint\n\n\terr = serve(conf.Listen, h)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"server error\")\n\t}\n}\n\nfunc serve(addr string, handler http.Handler) error {\n\tserver := &http.Server{Addr: addr, Handler: handler}\n\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGQUIT, syscall.SIGINT)\n\tgo func() {\n\t\tfor sig := range ch {\n\t\t\tlogrus.WithFields(logrus.Fields{\"signal\": sig}).Info(\"received signal\")\n\t\t\tserver.Shutdown(context.Background()) \/\/ safe shutdown\n\t\t\treturn\n\t\t}\n\t}()\n\treturn server.ListenAndServe()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/fnproject\/fn\/fnlb\/lb\"\n)\n\nconst VERSION = \"0.0.3\"\n\nfunc main() {\n\t\/\/ XXX (reed): normalize\n\tfnodes := flag.String(\"nodes\", \"\", \"comma separated list of functions nodes\")\n\n\tvar conf lb.Config\n\tflag.StringVar(&conf.DBurl, \"db\", \"sqlite3:\/\/:memory:\", \"backend to store nodes, default to in memory\")\n\tflag.StringVar(&conf.Listen, \"listen\", \":8081\", \"port to run on\")\n\tflag.IntVar(&conf.HealthcheckInterval, \"hc-interval\", 3, \"how often to check f(x) nodes, in seconds\")\n\tflag.StringVar(&conf.HealthcheckEndpoint, \"hc-path\", \"\/version\", \"endpoint to determine node health\")\n\tflag.IntVar(&conf.HealthcheckUnhealthy, \"hc-unhealthy\", 2, \"threshold of failed checks to declare node unhealthy\")\n\tflag.IntVar(&conf.HealthcheckTimeout, \"hc-timeout\", 5, \"timeout of healthcheck endpoint, in seconds\")\n\tflag.Parse()\n\n\tif len(*fnodes) > 0 {\n\t\t\/\/ starting w\/o nodes is fine too\n\t\tconf.Nodes = strings.Split(*fnodes, \",\")\n\t}\n\n\tconf.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: 10 * time.Second,\n\t\t\tKeepAlive: 120 * time.Second,\n\t\t}).Dial,\n\t\tMaxIdleConnsPerHost: 512,\n\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tClientSessionCache: tls.NewLRUClientSessionCache(4096),\n\t\t},\n\t}\n\n\tg, err := lb.NewAllGrouper(conf)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"error setting up grouper\")\n\t}\n\n\tr := lb.NewConsistentRouter(conf)\n\tk := func(r *http.Request) (string, error) {\n\t\treturn r.URL.Path, nil\n\t}\n\n\th := lb.NewProxy(k, g, r, conf)\n\th = g.Wrap(h) \/\/ add\/del\/list endpoints\n\th = r.Wrap(h) \/\/ stats \/ dash endpoint\n\n\terr = serve(conf.Listen, h)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"server error\")\n\t}\n}\n\nfunc serve(addr string, handler http.Handler) error {\n\tserver := &http.Server{Addr: addr, Handler: handler}\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tdefer wg.Wait()\n\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGQUIT, syscall.SIGINT)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor sig := range ch {\n\t\t\tlogrus.WithFields(logrus.Fields{\"signal\": sig}).Info(\"received signal\")\n\t\t\tserver.Shutdown(context.Background()) \/\/ safe shutdown\n\t\t\treturn\n\t\t}\n\t}()\n\treturn server.ListenAndServe()\n}\n<commit_msg>fnlb: 0.0.4 release [skip ci]<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/fnproject\/fn\/fnlb\/lb\"\n)\n\nconst VERSION = \"0.0.4\"\n\nfunc main() {\n\t\/\/ XXX (reed): normalize\n\tfnodes := flag.String(\"nodes\", \"\", \"comma separated list of functions nodes\")\n\n\tvar conf lb.Config\n\tflag.StringVar(&conf.DBurl, \"db\", \"sqlite3:\/\/:memory:\", \"backend to store nodes, default to in memory\")\n\tflag.StringVar(&conf.Listen, \"listen\", \":8081\", \"port to run on\")\n\tflag.IntVar(&conf.HealthcheckInterval, \"hc-interval\", 3, \"how often to check f(x) nodes, in seconds\")\n\tflag.StringVar(&conf.HealthcheckEndpoint, \"hc-path\", \"\/version\", \"endpoint to determine node health\")\n\tflag.IntVar(&conf.HealthcheckUnhealthy, \"hc-unhealthy\", 2, \"threshold of failed checks to declare node unhealthy\")\n\tflag.IntVar(&conf.HealthcheckTimeout, \"hc-timeout\", 5, \"timeout of healthcheck endpoint, in seconds\")\n\tflag.Parse()\n\n\tif len(*fnodes) > 0 {\n\t\t\/\/ starting w\/o nodes is fine too\n\t\tconf.Nodes = strings.Split(*fnodes, \",\")\n\t}\n\n\tconf.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: 10 * time.Second,\n\t\t\tKeepAlive: 120 * time.Second,\n\t\t}).Dial,\n\t\tMaxIdleConnsPerHost: 512,\n\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tClientSessionCache: tls.NewLRUClientSessionCache(4096),\n\t\t},\n\t}\n\n\tg, err := lb.NewAllGrouper(conf)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"error setting up grouper\")\n\t}\n\n\tr := lb.NewConsistentRouter(conf)\n\tk := func(r *http.Request) (string, error) {\n\t\treturn r.URL.Path, nil\n\t}\n\n\th := lb.NewProxy(k, g, r, conf)\n\th = g.Wrap(h) \/\/ add\/del\/list endpoints\n\th = r.Wrap(h) \/\/ stats \/ dash endpoint\n\n\terr = serve(conf.Listen, h)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"server error\")\n\t}\n}\n\nfunc serve(addr string, handler http.Handler) error {\n\tserver := &http.Server{Addr: addr, Handler: handler}\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tdefer wg.Wait()\n\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGQUIT, syscall.SIGINT)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor sig := range ch {\n\t\t\tlogrus.WithFields(logrus.Fields{\"signal\": sig}).Info(\"received signal\")\n\t\t\tserver.Shutdown(context.Background()) \/\/ safe shutdown\n\t\t\treturn\n\t\t}\n\t}()\n\treturn server.ListenAndServe()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/coreos\/go-semver\/semver\"\n\t\"github.com\/fnproject\/fn\/fnlb\/lb\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nconst VERSION = \"0.0.150\"\n\nfunc main() {\n\t\/\/ XXX (reed): normalize\n\tfnodes := flag.String(\"nodes\", \"\", \"comma separated list of functions nodes\")\n\tminAPIVersion := flag.String(\"min-api-version\", \"0.0.117\", \"minimal node API to accept\")\n\n\tvar conf lb.Config\n\tflag.StringVar(&conf.DBurl, \"db\", \"sqlite3:\/\/:memory:\", \"backend to store nodes, default to in memory\")\n\tflag.StringVar(&conf.Listen, \"listen\", \":8081\", \"port to run on\")\n\tflag.StringVar(&conf.MgmtListen, \"mgmt-listen\", \":8081\", \"management port to run on\")\n\tflag.IntVar(&conf.ShutdownTimeout, \"shutdown-timeout\", 0, \"graceful shutdown timeout\")\n\tflag.IntVar(&conf.HealthcheckInterval, \"hc-interval\", 3, \"how often to check f(x) nodes, in seconds\")\n\tflag.StringVar(&conf.HealthcheckEndpoint, \"hc-path\", \"\/version\", \"endpoint to determine node health\")\n\tflag.IntVar(&conf.HealthcheckUnhealthy, \"hc-unhealthy\", 2, \"threshold of failed checks to declare node unhealthy\")\n\tflag.IntVar(&conf.HealthcheckHealthy, \"hc-healthy\", 1, \"threshold of success checks to declare node healthy\")\n\tflag.IntVar(&conf.HealthcheckTimeout, \"hc-timeout\", 5, \"timeout of healthcheck endpoint, in seconds\")\n\tflag.StringVar(&conf.ZipkinURL, \"zipkin\", \"\", \"zipkin endpoint to send traces\")\n\tflag.Parse()\n\n\tconf.MinAPIVersion = semver.New(*minAPIVersion)\n\n\tif len(*fnodes) > 0 {\n\t\t\/\/ starting w\/o nodes is fine too\n\t\tconf.Nodes = strings.Split(*fnodes, \",\")\n\t}\n\n\tconf.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: 10 * time.Second,\n\t\t\tKeepAlive: 120 * time.Second,\n\t\t}).Dial,\n\t\tMaxIdleConnsPerHost: 512,\n\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tClientSessionCache: tls.NewLRUClientSessionCache(4096),\n\t\t},\n\t}\n\n\tdb, err := lb.NewDB(conf)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"error setting up database\")\n\t}\n\n\tg, err := lb.NewAllGrouper(conf, db)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"error setting up grouper\")\n\t}\n\n\tr := lb.NewConsistentRouter(conf)\n\tk := func(r *http.Request) (string, error) {\n\t\treturn r.URL.Path, nil\n\t}\n\n\tservers := make([]*http.Server, 0, 1)\n\thandler := lb.NewProxy(k, g, r, conf)\n\n\t\/\/ a separate mgmt listener is requested? then let's create a LB traffic only server\n\tif conf.Listen != conf.MgmtListen {\n\t\tservers = append(servers, &http.Server{Addr: conf.Listen, Handler: handler})\n\t\thandler = lb.NullHandler()\n\t}\n\n\t\/\/ add mgmt endpoints to the handler\n\thandler = g.Wrap(handler) \/\/ add\/del\/list endpoints\n\thandler = r.Wrap(handler) \/\/ stats \/ dash endpoint\n\n\tservers = append(servers, &http.Server{Addr: conf.MgmtListen, Handler: handler})\n\tserve(servers, &conf)\n}\n\nfunc serve(servers []*http.Server, conf *lb.Config) {\n\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGQUIT, syscall.SIGINT)\n\n\tfor i := 0; i < len(servers); i++ {\n\t\tgo func(idx int) {\n\t\t\terr := servers[idx].ListenAndServe()\n\t\t\tif err != nil && err != http.ErrServerClosed {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": idx}).WithError(err).Fatal(\"server error\")\n\t\t\t} else {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": idx}).Info(\"server stopped\")\n\t\t\t}\n\t\t}(i)\n\t}\n\n\tsig := <-ch\n\tlogrus.WithFields(logrus.Fields{\"signal\": sig}).Info(\"received signal\")\n\n\tfor i := 0; i < len(servers); i++ {\n\n\t\tctx := context.Background()\n\n\t\tif conf.ShutdownTimeout > 0 {\n\t\t\ttmpCtx, cancel := context.WithTimeout(context.Background(), time.Duration(conf.ShutdownTimeout)*time.Second)\n\t\t\tctx = tmpCtx\n\t\t\tdefer cancel()\n\t\t}\n\n\t\terr := servers[i].Shutdown(ctx) \/\/ safe shutdown\n\t\tif err != nil {\n\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": i}).WithError(err).Fatal(\"server shutdown error\")\n\t\t} else {\n\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": i}).Info(\"server shutdown\")\n\t\t}\n\t}\n}\n<commit_msg>fnlb: 0.0.151 release [skip ci]<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/coreos\/go-semver\/semver\"\n\t\"github.com\/fnproject\/fn\/fnlb\/lb\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nconst VERSION = \"0.0.151\"\n\nfunc main() {\n\t\/\/ XXX (reed): normalize\n\tfnodes := flag.String(\"nodes\", \"\", \"comma separated list of functions nodes\")\n\tminAPIVersion := flag.String(\"min-api-version\", \"0.0.118\", \"minimal node API to accept\")\n\n\tvar conf lb.Config\n\tflag.StringVar(&conf.DBurl, \"db\", \"sqlite3:\/\/:memory:\", \"backend to store nodes, default to in memory\")\n\tflag.StringVar(&conf.Listen, \"listen\", \":8081\", \"port to run on\")\n\tflag.StringVar(&conf.MgmtListen, \"mgmt-listen\", \":8081\", \"management port to run on\")\n\tflag.IntVar(&conf.ShutdownTimeout, \"shutdown-timeout\", 0, \"graceful shutdown timeout\")\n\tflag.IntVar(&conf.HealthcheckInterval, \"hc-interval\", 3, \"how often to check f(x) nodes, in seconds\")\n\tflag.StringVar(&conf.HealthcheckEndpoint, \"hc-path\", \"\/version\", \"endpoint to determine node health\")\n\tflag.IntVar(&conf.HealthcheckUnhealthy, \"hc-unhealthy\", 2, \"threshold of failed checks to declare node unhealthy\")\n\tflag.IntVar(&conf.HealthcheckHealthy, \"hc-healthy\", 1, \"threshold of success checks to declare node healthy\")\n\tflag.IntVar(&conf.HealthcheckTimeout, \"hc-timeout\", 5, \"timeout of healthcheck endpoint, in seconds\")\n\tflag.StringVar(&conf.ZipkinURL, \"zipkin\", \"\", \"zipkin endpoint to send traces\")\n\tflag.Parse()\n\n\tconf.MinAPIVersion = semver.New(*minAPIVersion)\n\n\tif len(*fnodes) > 0 {\n\t\t\/\/ starting w\/o nodes is fine too\n\t\tconf.Nodes = strings.Split(*fnodes, \",\")\n\t}\n\n\tconf.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: 10 * time.Second,\n\t\t\tKeepAlive: 120 * time.Second,\n\t\t}).Dial,\n\t\tMaxIdleConnsPerHost: 512,\n\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tClientSessionCache: tls.NewLRUClientSessionCache(4096),\n\t\t},\n\t}\n\n\tdb, err := lb.NewDB(conf)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"error setting up database\")\n\t}\n\n\tg, err := lb.NewAllGrouper(conf, db)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"error setting up grouper\")\n\t}\n\n\tr := lb.NewConsistentRouter(conf)\n\tk := func(r *http.Request) (string, error) {\n\t\treturn r.URL.Path, nil\n\t}\n\n\tservers := make([]*http.Server, 0, 1)\n\thandler := lb.NewProxy(k, g, r, conf)\n\n\t\/\/ a separate mgmt listener is requested? then let's create a LB traffic only server\n\tif conf.Listen != conf.MgmtListen {\n\t\tservers = append(servers, &http.Server{Addr: conf.Listen, Handler: handler})\n\t\thandler = lb.NullHandler()\n\t}\n\n\t\/\/ add mgmt endpoints to the handler\n\thandler = g.Wrap(handler) \/\/ add\/del\/list endpoints\n\thandler = r.Wrap(handler) \/\/ stats \/ dash endpoint\n\n\tservers = append(servers, &http.Server{Addr: conf.MgmtListen, Handler: handler})\n\tserve(servers, &conf)\n}\n\nfunc serve(servers []*http.Server, conf *lb.Config) {\n\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGQUIT, syscall.SIGINT)\n\n\tfor i := 0; i < len(servers); i++ {\n\t\tgo func(idx int) {\n\t\t\terr := servers[idx].ListenAndServe()\n\t\t\tif err != nil && err != http.ErrServerClosed {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": idx}).WithError(err).Fatal(\"server error\")\n\t\t\t} else {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": idx}).Info(\"server stopped\")\n\t\t\t}\n\t\t}(i)\n\t}\n\n\tsig := <-ch\n\tlogrus.WithFields(logrus.Fields{\"signal\": sig}).Info(\"received signal\")\n\n\tfor i := 0; i < len(servers); i++ {\n\n\t\tctx := context.Background()\n\n\t\tif conf.ShutdownTimeout > 0 {\n\t\t\ttmpCtx, cancel := context.WithTimeout(context.Background(), time.Duration(conf.ShutdownTimeout)*time.Second)\n\t\t\tctx = tmpCtx\n\t\t\tdefer cancel()\n\t\t}\n\n\t\terr := servers[i].Shutdown(ctx) \/\/ safe shutdown\n\t\tif err != nil {\n\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": i}).WithError(err).Fatal(\"server shutdown error\")\n\t\t} else {\n\t\t\tlogrus.WithFields(logrus.Fields{\"server_id\": i}).Info(\"server shutdown\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/hatofmonkeys\/cloudfocker\/focker\"\n\t\"log\"\n\t\"os\"\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"Focker\"\n\tapp.Usage = \"Fock the Cloud, run apps locally!\"\n\tapp.Action = func(c *cli.Context) {\n\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"docker\",\n\t\t\tUsage: \"print the local Docker info\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tfocker.DockerVersion(os.Stdout)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"this\",\n\t\t\tUsage: \"Download the Cloud Foundry base image\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tfocker.ImportRootfsImage(os.Stdout)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"dockerfile\",\n\t\t\tUsage: \"Create a dockerfile for the application\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tfocker.WriteDockerfile(os.Stdout)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"build\",\n\t\t\tUsage: \"Create an image for the application\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tfocker.BuildImage(os.Stdout)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"up\",\n\t\t\tUsage: \"Start the container for the application\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tpwd, err := os.Getwd()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\" %s\", err)\n\t\t\t\t} else {\n\t\t\t\t\tfocker.RunStager(os.Stdout, pwd)\n\t\t\t\t}\n\t\t\t\tfocker.RunRuntime(os.Stdout)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"off\",\n\t\t\tUsage: \"Stop the container and remove it\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tfocker.StopRuntime()\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"add-buildpack\",\n\t\t\tUsage: \"add-buildpack [URL] - add a buildpack from a GitHub URL\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tif url := c.Args().First(); url != \"\" {\n\t\t\t\t\tfocker.AddBuildpack(os.Stdout, url)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"Please supply a GitHub URL to download\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"stage\",\n\t\t\tUsage: \"stage an application\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tpwd, err := os.Getwd()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\" %s\", err)\n\t\t\t\t} else {\n\t\t\t\t\tfocker.RunStager(os.Stdout, pwd)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"run\",\n\t\t\tUsage: \"Run the staged container\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tfocker.RunRuntime(os.Stdout)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"stage-internal\",\n\t\t\tUsage: \"used to stage the app inside a container\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tfocker.StageApp(os.Stdout)\n\t\t\t},\n\t\t\tHideHelp: true,\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n}\n<commit_msg>Tidied up CLI<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/hatofmonkeys\/cloudfocker\/focker\"\n\t\"log\"\n\t\"os\"\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"Focker\"\n\tapp.Usage = \"Fock the Cloud, run apps locally!\"\n\tapp.Action = func(c *cli.Context) {\n\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"docker\",\n\t\t\tUsage: \"print the local Docker info\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tfocker.DockerVersion(os.Stdout)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"this\",\n\t\t\tUsage: \"Download the Cloud Foundry base image\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tfocker.ImportRootfsImage(os.Stdout)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"up\",\n\t\t\tUsage: \"Start the container for the application\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tpwd, err := os.Getwd()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\" %s\", err)\n\t\t\t\t} else {\n\t\t\t\t\tfocker.RunStager(os.Stdout, pwd)\n\t\t\t\t}\n\t\t\t\tfocker.RunRuntime(os.Stdout)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"off\",\n\t\t\tUsage: \"Stop the container and remove it\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tfocker.StopRuntime()\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"add-buildpack\",\n\t\t\tUsage: \"add-buildpack [URL] - add a buildpack from a GitHub URL\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tif url := c.Args().First(); url != \"\" {\n\t\t\t\t\tfocker.AddBuildpack(os.Stdout, url)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"Please supply a GitHub URL to download\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"stage\",\n\t\t\tUsage: \"stage an application\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tpwd, err := os.Getwd()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\" %s\", err)\n\t\t\t\t} else {\n\t\t\t\t\tfocker.RunStager(os.Stdout, pwd)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"run\",\n\t\t\tUsage: \"run the staged container\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tfocker.RunRuntime(os.Stdout)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"stage-internal\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfocker := focker.NewFocker()\n\t\t\t\tfocker.StageApp(os.Stdout)\n\t\t\t},\n\t\t\tHideHelp: true,\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ parsing mathematical expressions\n\/\/\n\/\/go:generate nex lexer.nex\n\/\/go:generate go tool yacc -o parser.y.go parser.y\npackage parser\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\nfunc Formula(n int) string {\n\tif true {\n\t\tvar terms []string\n\t\tfor i := 0; i < n; i++ {\n\t\t\tterms = append(terms, fmt.Sprintf(\"x%d\", i))\n\t\t}\n\t\treturn \"f:=\" + strings.Join(terms, \"*\")\n\t}\n\tvar terms []string\n\tfor i := 0; i < n; i++ {\n\t\tterm := fmt.Sprintf(\"%f * x%d\", rand.Float64(), i)\n\t\tterms = append(terms, term)\n\t}\n\treturn fmt.Sprintf(\"f := sqrt(a*x) + pow(a,b) + a-b+c\/d + log(1 + exp(-y * (%s)))\", strings.Join(terms, \"+\"))\n}\n\nfunc computeDerivatives(w io.Writer, steps []Step, private string) {\n\tn := len(steps)\n\tfor i := 0; i < n; i++ {\n\t\tj := n - i - 1\n\t\tstep := steps[j]\n\t\tif i == 0 {\n\t\t\tfmt.Fprintf(w, \"const b_%s = 1.0\\n\", step.lhs)\n\t\t} else {\n\t\t\tfmt.Fprintf(w, \"b_%s := 0.0\\n\", step.lhs)\n\t\t}\n\t\tfor k := j + 1; k < n; k++ {\n\t\t\ts3 := steps[k]\n\t\t\tif d := derivative(s3, step, private); len(d) > 0 {\n\t\t\t\tfmt.Fprintf(w, \"b_%s += b_%s * (%s)\\n\", step.lhs, s3.lhs, d)\n\t\t\t}\n\t\t}\n\t\tif step.decl {\n\t\t\tfmt.Fprintf(w, \"grad_%s[\\\"%s\\\"] = b_%s\\n\", private, step.rhs, step.lhs)\n\t\t}\n\t}\n}\n\ntype Step struct {\n\tdecl bool\n\tlhs string\n\trhs string\n\tf string\n\targs []string\n}\n\nfunc derivative(num, denom Step, private string) string {\n\tvar args []int\n\tfor i, a := range num.args {\n\t\tif a == denom.lhs {\n\t\t\targs = append(args, i)\n\t\t}\n\t}\n\tif len(args) == 0 {\n\t\treturn \"\"\n\t}\n\tvar list []string\n\tfor _, a := range args {\n\t\tlist = append(list, fmt.Sprintf(\"d_%s_%s(%d,%s)\", num.f, private, a, strings.Join(num.args, \",\")))\n\t}\n\treturn strings.Join(list, \"+\")\n}\n\ntype NodeType string\n\nconst (\n\tnumberNT NodeType = \"NUM\"\n\tidentifierNT NodeType = \"IDENT\"\n\tfunctionNT NodeType = \"FUNC\"\n)\n\ntype Node struct {\n\tType NodeType `json:\"T,omitempty\"`\n\tS string `json:\",omitempty\"`\n\tF float64 `json:\",omitempty\"`\n\tChildren []*Node `json:\"C,omitempty\"`\n\tName string `json:\"N,omitempty\"`\n}\n\nfunc (n Node) String() string {\n\tbuf, _ := json.Marshal(n)\n\treturn string(buf)\n}\n\nfunc Run(args []string) {\n\tvar private, templates, formula, output string\n\tvar dx float64\n\tflags := flag.NewFlagSet(\"parse\", flag.ExitOnError)\n\tflags.StringVar(&formula, \"formula\", Formula(10), \"the formula to parse\")\n\tflags.StringVar(&private, \"private\", defaultPrivateString, \"the private variable string\")\n\tflags.StringVar(&templates, \"templates\", defaultTemplates, \"directory of go template functions\")\n\tflags.StringVar(&output, \"output\", \"compute.go\", \"name of go program to output\")\n\tflags.Float64Var(&dx, \"dx\", defaultDx, \"infinitesimal for numerical differentiation\")\n\tflags.Parse(args)\n\n\tcode, err := Parse(private, templates, formula, 0.00001)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tf, err := os.Create(output)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tf.Write(code)\n\tf.Close()\n}\n\nconst (\n\tdefaultPrivateString = \"pvt\"\n\tdefaultDx = 0.00001\n\tdefaultTemplates = \"src\/xoba\/ad\/parser\/templates\"\n)\n\nfunc Parse(private, templates, formula string, dx float64) ([]byte, error) {\n\tif len(private) == 0 {\n\t\tprivate = defaultPrivateString\n\t}\n\tif dx == 0 {\n\t\tdx = defaultDx\n\t}\n\tif len(templates) == 0 {\n\t\ttemplates = defaultTemplates\n\t}\n\tlex := NewContext(NewLexer(strings.NewReader(formula)))\n\tyyParse(lex)\n\tif len(lex.errors) > 0 {\n\t\treturn nil, lex.errors[0]\n\t}\n\tif lex.lhs == nil || lex.rhs == nil {\n\t\treturn nil, fmt.Errorf(\"parse error: lhs or rhs is nil\")\n\t}\n\tvars := make(map[string]string)\n\tvp := &VarParser{vars: vars}\n\tsp := &StepParser{vars: vars}\n\tvar steps []Step\n\tsteps = append(steps, vp.getVars(lex.rhs, private)...)\n\tsteps = append(steps, sp.program(lex.rhs, private)...)\n\ty := steps[len(steps)-1].lhs\n\n\tchecker := new(bytes.Buffer)\n\t{\n\t\tfmt.Fprintf(checker, \"const delta_%s = %f\\n\", private, dx)\n\t\tfmt.Fprintf(checker, `calc_%s := func() float64 {\n%s\nreturn %s\n}\n`, private, formula, lex.lhs.S)\n\t\tfmt.Fprintf(checker, \"tmp1_%s := calc_%s()\\n\", private, private)\n\t\tfor k := range vars {\n\t\t\tfmt.Fprintln(checker, \"{\")\n\t\t\tfmt.Fprintf(checker, \"%s += delta_%s\\n\", k, private)\n\t\t\tfmt.Fprintf(checker, \"tmp2_%s := calc_%s()\\n\", private, private)\n\t\t\tfmt.Fprintf(checker, \"%s -= delta_%s\\n\", k, private)\n\t\t\tfmt.Fprintf(checker, \"grad_%s[%q] = (tmp2_%s - tmp1_%s)\/delta_%s\\n\", private, k, private, private, private)\n\t\t\tfmt.Fprintln(checker, \"}\")\n\t\t}\n\t}\n\n\tpgm := new(bytes.Buffer)\n\tfor _, s := range steps {\n\t\tfmt.Fprintln(pgm, s)\n\t}\n\tcomputeDerivatives(pgm, steps, private)\n\tvar list []string\n\tfor v := range vars {\n\t\tlist = append(list, v)\n\t}\n\tsort.Strings(list)\n\n\tf := new(bytes.Buffer)\n\n\tdecls := new(bytes.Buffer)\n\tfor _, v := range list {\n\t\tfmt.Fprintf(decls, \"%s := rand.Float64();\\n\", v)\n\t\tfmt.Fprintf(decls, \"fmt.Printf(\\\"setting %s = %%.20f\\\\n\\\",%s)\\n\", v, v)\n\t}\n\tfmt.Fprintln(decls, `fmt.Println();`)\n\n\tvar imports Imports\n\timports.Add(\"fmt\")\n\timports.Add(\"math\")\n\timports.Add(\"time\")\n\timports.Add(\"math\/rand\")\n\n\tt := template.Must(template.New(\"output.go\").Parse(`package main\n{{.imports}}\n\n\/\/ automatically compute the value and gradient of {{.qformula}}\nfunc ComputeAD({{.vars}} float64) (float64,map[string]float64) {\ngrad_{{.private}} := make(map[string]float64)\n{{.program}} return {{.y}},grad_{{.private}};\n}\n\nfunc main() {\nfmt.Println(\"running autodiff code on {{.formula}}\\n\");\nrand.Seed(time.Now().UTC().UnixNano())\n{{.decls}} \n\n\tc1, grad1 := ComputeAD({{.vars}})\n\tfmt.Printf(\"autodiff value : %f\\n\", c1)\n\tfmt.Printf(\"autodiff gradient: %v\\n\\n\", grad1)\n\n\tc2, grad2 := ComputeNumerical({{.vars}})\n\tfmt.Printf(\"numeric value : %f\\n\", c2)\n\tfmt.Printf(\"numeric gradient: %v\\n\\n\", grad2)\n\n\tvar total float64\n\tadd := func(n string, x float64) {\n\t\tfmt.Printf(\"%s difference: %f\\n\", n, x)\n\t\ttotal += math.Abs(x)\n\t}\n\tadd(\"value\", c1-c2)\n\tfor k, v := range grad2 {\n\t\tadd(fmt.Sprintf(\"grad[%3s]\", k), grad1[k]-v)\n\t}\nfmt.Printf(\"\\nsum of absolute differences: %f\\n\",total);\n}\n\n\/\/ numerically compute the value and gradient of {{.qformula}}\nfunc ComputeNumerical({{.vars}} float64) (float64,map[string]float64) {\ngrad_{{.private}} := make(map[string]float64)\n{{.checker}} return tmp1_{{.private}},grad_{{.private}};\n}\n\n{{.funcs}}\n\n\n`))\n\n\ttemplateImports, code, err := GenTemplates(templates, private)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\timports.AddAll(templateImports)\n\n\tt.Execute(f, map[string]interface{}{\n\t\t\"decls\": decls.String(),\n\t\t\"formula\": formula,\n\t\t\"qformula\": fmt.Sprintf(\"%q\", formula),\n\t\t\"lhs\": lex.lhs.S,\n\t\t\"vars\": strings.Join(list, \", \"),\n\t\t\"program\": pgm.String(),\n\t\t\"checker\": checker.String(),\n\t\t\"y\": y,\n\t\t\"funcs\": code,\n\t\t\"private\": private,\n\t\t\"imports\": imports.String(),\n\t})\n\n\treturn GofmtBuffer(f.Bytes())\n}\n\ntype Imports struct {\n\tlist []string\n}\n\nfunc (i Imports) String() string {\n\tm := make(map[string]bool)\n\tfor _, x := range i.list {\n\t\tx = strings.Replace(x, `\"`, ``, -1)\n\t\tm[x] = true\n\t}\n\tout := new(bytes.Buffer)\n\tfmt.Fprintf(out, \"import (\\n\")\n\tfor k := range m {\n\t\tfmt.Fprintf(out, \"%q\\n\", k)\n\t}\n\tfmt.Fprintf(out, \")\\n\")\n\treturn out.String()\n}\n\nfunc (i *Imports) Add(x string) {\n\ti.list = append(i.list, x)\n}\n\nfunc (i *Imports) AddAll(list []string) {\n\tfor _, x := range list {\n\t\ti.Add(x)\n\t}\n}\n\nfunc formatImports(list []string) string {\n\tvar out []string\n\tm := make(map[string]bool)\n\tfor _, x := range list {\n\t\tm[x] = true\n\t}\n\tfor k := range m {\n\t\tout = append(out, k)\n\t}\n\treturn strings.Join(out, \"\\n\")\n}\n\nfunc Gofmt(p string) error {\n\tcmd := exec.Command(\"gofmt\", \"-w\", p)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\nfunc GofmtBuffer(code []byte) ([]byte, error) {\n\tout := new(bytes.Buffer)\n\tcmd := exec.Command(\"gofmt\")\n\tcmd.Stdin = bytes.NewReader(code)\n\tcmd.Stdout = out\n\tif err := cmd.Run(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn out.Bytes(), nil\n}\n\ntype VarParser struct {\n\ti int\n\tvars map[string]string\n}\n\n\/\/ gets all the vars and name them in-place\nfunc (v *VarParser) getVars(rhs *Node, private string) (out []Step) {\n\tswitch rhs.Type {\n\tcase identifierNT:\n\t\tif _, ok := v.vars[rhs.S]; !ok {\n\t\t\trhs.Name = fmt.Sprintf(\"v_%d_%s\", v.i, private)\n\t\t\tv.vars[rhs.S] = rhs.Name\n\t\t\tv.i++\n\t\t\tstep := Step{\n\t\t\t\tdecl: true,\n\t\t\t\tlhs: rhs.Name,\n\t\t\t\trhs: rhs.S,\n\t\t\t}\n\t\t\tout = append(out, step)\n\t\t} else {\n\t\t\trhs.Name = v.vars[rhs.S]\n\t\t}\n\tcase functionNT:\n\t\tfor _, n := range rhs.Children {\n\t\t\tout = append(out, v.getVars(n, private)...)\n\t\t}\n\t}\n\treturn\n}\n\ntype StepParser struct {\n\ti int\n\tvars map[string]string\n}\n\nfunc (s Step) String() string {\n\treturn fmt.Sprintf(\"%s := %s;\", s.lhs, s.rhs)\n}\n\nfunc (s *StepParser) program(rhs *Node, private string) (out []Step) {\n\tswitch rhs.Type {\n\tcase numberNT:\n\t\trhs.Name = fmt.Sprintf(\"%f\", rhs.F)\n\tcase functionNT:\n\t\tvar args []string\n\t\tfor _, n := range rhs.Children {\n\t\t\tsteps := s.program(n, private)\n\t\t\tout = append(out, steps...)\n\t\t\targs = append(args, n.Name)\n\t\t}\n\t\tstep := Step{\n\t\t\tlhs: fmt.Sprintf(\"s_%d_%s\", s.i, private),\n\t\t\trhs: fmt.Sprintf(\"%s_%s(%s)\", rhs.S, private, strings.Join(args, \",\")),\n\t\t\tf: rhs.S,\n\t\t\targs: args,\n\t\t}\n\t\tout = append(out, step)\n\t\trhs.Name = step.lhs\n\t\ts.i++\n\t}\n\treturn out\n}\n\nfunc Function(ident string, args ...*Node) *Node {\n\treturn &Node{\n\t\tType: functionNT,\n\t\tS: ident,\n\t\tChildren: args,\n\t}\n}\n\nfunc Number(n float64) *Node {\n\treturn &Node{\n\t\tType: numberNT,\n\t\tF: n,\n\t}\n}\n\nfunc LexIdentifier(s string) *Node {\n\treturn &Node{\n\t\tType: identifierNT,\n\t\tS: s,\n\t}\n}\n\nfunc LexNumber(s string) *Node {\n\tn, _ := strconv.ParseFloat(s, 64)\n\treturn Number(n)\n}\n\nfunc Negate(a *Node) *Node {\n\treturn Function(\"multiply\", Number(-1), a)\n}\n\ntype context struct {\n\tlhs *Node\n\trhs *Node\n\tyyLexer\n\terrors []error\n}\n\nfunc NewContext(y yyLexer) *context {\n\treturn &context{yyLexer: y}\n}\n\nfunc (c *context) Error(e string) {\n\tc.Error2(fmt.Errorf(\"%s\", e))\n}\n\nfunc (c *context) Error2(e error) {\n\tlog.Printf(\"oops: %v\\n\", e)\n\tc.errors = append(c.errors, e)\n}\n<commit_msg>comments and more description<commit_after>\/\/ parsing mathematical expressions\n\/\/\n\/\/go:generate nex lexer.nex\n\/\/go:generate go tool yacc -o parser.y.go parser.y\npackage parser\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\nfunc Formula(n int) string {\n\tif true {\n\t\tvar terms []string\n\t\tfor i := 0; i < n; i++ {\n\t\t\tterms = append(terms, fmt.Sprintf(\"x%d\", i))\n\t\t}\n\t\treturn \"f:=\" + strings.Join(terms, \"*\")\n\t}\n\tvar terms []string\n\tfor i := 0; i < n; i++ {\n\t\tterm := fmt.Sprintf(\"%f * x%d\", rand.Float64(), i)\n\t\tterms = append(terms, term)\n\t}\n\treturn fmt.Sprintf(\"f := sqrt(a*x) + pow(a,b) + a-b+c\/d + log(1 + exp(-y * (%s)))\", strings.Join(terms, \"+\"))\n}\n\nfunc computeDerivatives(w io.Writer, steps []Step, private string) {\n\tn := len(steps)\n\tfor i := 0; i < n; i++ {\n\t\tj := n - i - 1\n\t\tstep := steps[j]\n\t\tif i == 0 {\n\t\t\tfmt.Fprintf(w, \"const b_%s = 1.0\\n\", step.lhs)\n\t\t} else {\n\t\t\tfmt.Fprintf(w, \"b_%s := 0.0\\n\", step.lhs)\n\t\t}\n\t\tfor k := j + 1; k < n; k++ {\n\t\t\ts3 := steps[k]\n\t\t\tif d := derivative(s3, step, private); len(d) > 0 {\n\t\t\t\tfmt.Fprintf(w, \"b_%s += b_%s * (%s)\\n\", step.lhs, s3.lhs, d)\n\t\t\t}\n\t\t}\n\t\tif step.decl {\n\t\t\tfmt.Fprintf(w, \"grad_%s[\\\"%s\\\"] = b_%s\\n\", private, step.rhs, step.lhs)\n\t\t}\n\t}\n}\n\ntype Step struct {\n\tdecl bool\n\tlhs string\n\trhs string\n\tf string\n\targs []string\n}\n\nfunc derivative(num, denom Step, private string) string {\n\tvar args []int\n\tfor i, a := range num.args {\n\t\tif a == denom.lhs {\n\t\t\targs = append(args, i)\n\t\t}\n\t}\n\tif len(args) == 0 {\n\t\treturn \"\"\n\t}\n\tvar list []string\n\tfor _, a := range args {\n\t\tlist = append(list, fmt.Sprintf(\"d_%s_%s(%d,%s)\", num.f, private, a, strings.Join(num.args, \",\")))\n\t}\n\treturn strings.Join(list, \"+\")\n}\n\ntype NodeType string\n\nconst (\n\tnumberNT NodeType = \"NUM\"\n\tidentifierNT NodeType = \"IDENT\"\n\tfunctionNT NodeType = \"FUNC\"\n)\n\ntype Node struct {\n\tType NodeType `json:\"T,omitempty\"`\n\tS string `json:\",omitempty\"`\n\tF float64 `json:\",omitempty\"`\n\tChildren []*Node `json:\"C,omitempty\"`\n\tName string `json:\"N,omitempty\"`\n}\n\nfunc (n Node) String() string {\n\tbuf, _ := json.Marshal(n)\n\treturn string(buf)\n}\n\nfunc Run(args []string) {\n\tvar private, templates, formula, output string\n\tvar dx float64\n\tflags := flag.NewFlagSet(\"parse\", flag.ExitOnError)\n\tflags.StringVar(&formula, \"formula\", Formula(10), \"the formula to parse\")\n\tflags.StringVar(&private, \"private\", defaultPrivateString, \"the private variable string\")\n\tflags.StringVar(&templates, \"templates\", defaultTemplates, \"directory of go template functions\")\n\tflags.StringVar(&output, \"output\", \"compute.go\", \"name of go program to output\")\n\tflags.Float64Var(&dx, \"dx\", defaultDx, \"infinitesimal for numerical differentiation\")\n\tflags.Parse(args)\n\n\tcode, err := Parse(private, templates, formula, 0.00001)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tf, err := os.Create(output)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tf.Write(code)\n\tf.Close()\n}\n\nconst (\n\tdefaultPrivateString = \"pvt\"\n\tdefaultDx = 0.00001\n\tdefaultTemplates = \"src\/xoba\/ad\/parser\/templates\"\n)\n\nfunc Parse(private, templates, formula string, dx float64) ([]byte, error) {\n\tif len(private) == 0 {\n\t\tprivate = defaultPrivateString\n\t}\n\tif dx == 0 {\n\t\tdx = defaultDx\n\t}\n\tif len(templates) == 0 {\n\t\ttemplates = defaultTemplates\n\t}\n\tlex := NewContext(NewLexer(strings.NewReader(formula)))\n\tyyParse(lex)\n\tif len(lex.errors) > 0 {\n\t\treturn nil, lex.errors[0]\n\t}\n\tif lex.lhs == nil || lex.rhs == nil {\n\t\treturn nil, fmt.Errorf(\"parse error: lhs or rhs is nil\")\n\t}\n\tvars := make(map[string]string)\n\tvp := &VarParser{vars: vars}\n\tsp := &StepParser{vars: vars}\n\tvar steps []Step\n\tsteps = append(steps, vp.getVars(lex.rhs, private)...)\n\tsteps = append(steps, sp.program(lex.rhs, private)...)\n\ty := steps[len(steps)-1].lhs\n\n\tchecker := new(bytes.Buffer)\n\t{\n\t\tfmt.Fprintf(checker, \"const delta_%s = %f\\n\", private, dx)\n\t\tfmt.Fprintf(checker, `calc_%s := func() float64 {\n%s\nreturn %s\n}\n`, private, formula, lex.lhs.S)\n\t\tfmt.Fprintf(checker, \"tmp1_%s := calc_%s()\\n\", private, private)\n\t\tfor k := range vars {\n\t\t\tfmt.Fprintln(checker, \"{\")\n\t\t\tfmt.Fprintf(checker, \"%s += delta_%s\\n\", k, private)\n\t\t\tfmt.Fprintf(checker, \"tmp2_%s := calc_%s()\\n\", private, private)\n\t\t\tfmt.Fprintf(checker, \"%s -= delta_%s\\n\", k, private)\n\t\t\tfmt.Fprintf(checker, \"grad_%s[%q] = (tmp2_%s - tmp1_%s)\/delta_%s\\n\", private, k, private, private, private)\n\t\t\tfmt.Fprintln(checker, \"}\")\n\t\t}\n\t}\n\n\tpgm := new(bytes.Buffer)\n\tfor _, s := range steps {\n\t\tfmt.Fprintln(pgm, s)\n\t}\n\tcomputeDerivatives(pgm, steps, private)\n\tvar list []string\n\tfor v := range vars {\n\t\tlist = append(list, v)\n\t}\n\tsort.Strings(list)\n\n\tf := new(bytes.Buffer)\n\n\tdecls := new(bytes.Buffer)\n\tfor _, v := range list {\n\t\tfmt.Fprintf(decls, \"%s := rand.Float64();\\n\", v)\n\t\tfmt.Fprintf(decls, \"fmt.Printf(\\\"setting %s = %%.20f\\\\n\\\",%s)\\n\", v, v)\n\t}\n\tfmt.Fprintln(decls, `fmt.Println();`)\n\n\tvar imports Imports\n\timports.Add(\"fmt\")\n\timports.Add(\"math\")\n\timports.Add(\"time\")\n\timports.Add(\"math\/rand\")\n\n\tt := template.Must(template.New(\"output.go\").Parse(`\/\/ created {{.time}}\n\/\/ see https:\/\/github.com\/xoba\/ad\n\npackage main\n{{.imports}}\n\n\/\/ automatically compute the value and gradient of {{.qformula}}\nfunc ComputeAD({{.vars}} float64) (float64,map[string]float64) {\ngrad_{{.private}} := make(map[string]float64)\n{{.program}} return {{.y}},grad_{{.private}};\n}\n\nfunc main() {\nfmt.Printf(\"running autodiff code of {{.time}} on %q\\n\\n\", {{ printf \"%q\" .formula }});\nrand.Seed(time.Now().UTC().UnixNano())\n{{.decls}} \n\n\tc1, grad1 := ComputeAD({{.vars}})\n\tfmt.Printf(\"autodiff value : %f\\n\", c1)\n\tfmt.Printf(\"autodiff gradient: %v\\n\\n\", grad1)\n\n\tc2, grad2 := ComputeNumerical({{.vars}})\n\tfmt.Printf(\"numeric value : %f\\n\", c2)\n\tfmt.Printf(\"numeric gradient: %v\\n\\n\", grad2)\n\n\tvar total float64\n\tadd := func(n string, x float64) {\n\t\tfmt.Printf(\"%s difference: %f\\n\", n, x)\n\t\ttotal += math.Abs(x)\n\t}\n\tadd(\"value\", c1-c2)\n\tfor k, v := range grad2 {\n\t\tadd(fmt.Sprintf(\"grad[%3s]\", k), grad1[k]-v)\n\t}\nfmt.Printf(\"\\nsum of absolute differences: %f\\n\",total);\n}\n\n\/\/ numerically compute the value and gradient of {{.qformula}}\nfunc ComputeNumerical({{.vars}} float64) (float64,map[string]float64) {\ngrad_{{.private}} := make(map[string]float64)\n{{.checker}} return tmp1_{{.private}},grad_{{.private}};\n}\n\n{{.funcs}}\n\n\n`))\n\n\ttemplateImports, code, err := GenTemplates(templates, private)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\timports.AddAll(templateImports)\n\n\tt.Execute(f, map[string]interface{}{\n\t\t\"time\": time.Now().UTC().Format(\"2006-01-02T15:04:05.000Z\"),\n\t\t\"decls\": decls.String(),\n\t\t\"formula\": formula,\n\t\t\"qformula\": fmt.Sprintf(\"%q\", formula),\n\t\t\"lhs\": lex.lhs.S,\n\t\t\"vars\": strings.Join(list, \", \"),\n\t\t\"program\": pgm.String(),\n\t\t\"checker\": checker.String(),\n\t\t\"y\": y,\n\t\t\"funcs\": code,\n\t\t\"private\": private,\n\t\t\"imports\": imports.String(),\n\t})\n\n\treturn GofmtBuffer(f.Bytes())\n}\n\ntype Imports struct {\n\tlist []string\n}\n\nfunc (i Imports) String() string {\n\tm := make(map[string]bool)\n\tfor _, x := range i.list {\n\t\tx = strings.Replace(x, `\"`, ``, -1)\n\t\tm[x] = true\n\t}\n\tout := new(bytes.Buffer)\n\tfmt.Fprintf(out, \"import (\\n\")\n\tfor k := range m {\n\t\tfmt.Fprintf(out, \"%q\\n\", k)\n\t}\n\tfmt.Fprintf(out, \")\\n\")\n\treturn out.String()\n}\n\nfunc (i *Imports) Add(x string) {\n\ti.list = append(i.list, x)\n}\n\nfunc (i *Imports) AddAll(list []string) {\n\tfor _, x := range list {\n\t\ti.Add(x)\n\t}\n}\n\nfunc formatImports(list []string) string {\n\tvar out []string\n\tm := make(map[string]bool)\n\tfor _, x := range list {\n\t\tm[x] = true\n\t}\n\tfor k := range m {\n\t\tout = append(out, k)\n\t}\n\treturn strings.Join(out, \"\\n\")\n}\n\nfunc Gofmt(p string) error {\n\tcmd := exec.Command(\"gofmt\", \"-w\", p)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\nfunc GofmtBuffer(code []byte) ([]byte, error) {\n\tout := new(bytes.Buffer)\n\tcmd := exec.Command(\"gofmt\")\n\tcmd.Stdin = bytes.NewReader(code)\n\tcmd.Stdout = out\n\tif err := cmd.Run(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn out.Bytes(), nil\n}\n\ntype VarParser struct {\n\ti int\n\tvars map[string]string\n}\n\n\/\/ gets all the vars and name them in-place\nfunc (v *VarParser) getVars(rhs *Node, private string) (out []Step) {\n\tswitch rhs.Type {\n\tcase identifierNT:\n\t\tif _, ok := v.vars[rhs.S]; !ok {\n\t\t\trhs.Name = fmt.Sprintf(\"v_%d_%s\", v.i, private)\n\t\t\tv.vars[rhs.S] = rhs.Name\n\t\t\tv.i++\n\t\t\tstep := Step{\n\t\t\t\tdecl: true,\n\t\t\t\tlhs: rhs.Name,\n\t\t\t\trhs: rhs.S,\n\t\t\t}\n\t\t\tout = append(out, step)\n\t\t} else {\n\t\t\trhs.Name = v.vars[rhs.S]\n\t\t}\n\tcase functionNT:\n\t\tfor _, n := range rhs.Children {\n\t\t\tout = append(out, v.getVars(n, private)...)\n\t\t}\n\t}\n\treturn\n}\n\ntype StepParser struct {\n\ti int\n\tvars map[string]string\n}\n\nfunc (s Step) String() string {\n\treturn fmt.Sprintf(\"%s := %s;\", s.lhs, s.rhs)\n}\n\nfunc (s *StepParser) program(rhs *Node, private string) (out []Step) {\n\tswitch rhs.Type {\n\tcase numberNT:\n\t\trhs.Name = fmt.Sprintf(\"%f\", rhs.F)\n\tcase functionNT:\n\t\tvar args []string\n\t\tfor _, n := range rhs.Children {\n\t\t\tsteps := s.program(n, private)\n\t\t\tout = append(out, steps...)\n\t\t\targs = append(args, n.Name)\n\t\t}\n\t\tstep := Step{\n\t\t\tlhs: fmt.Sprintf(\"s_%d_%s\", s.i, private),\n\t\t\trhs: fmt.Sprintf(\"%s_%s(%s)\", rhs.S, private, strings.Join(args, \",\")),\n\t\t\tf: rhs.S,\n\t\t\targs: args,\n\t\t}\n\t\tout = append(out, step)\n\t\trhs.Name = step.lhs\n\t\ts.i++\n\t}\n\treturn out\n}\n\nfunc Function(ident string, args ...*Node) *Node {\n\treturn &Node{\n\t\tType: functionNT,\n\t\tS: ident,\n\t\tChildren: args,\n\t}\n}\n\nfunc Number(n float64) *Node {\n\treturn &Node{\n\t\tType: numberNT,\n\t\tF: n,\n\t}\n}\n\nfunc LexIdentifier(s string) *Node {\n\treturn &Node{\n\t\tType: identifierNT,\n\t\tS: s,\n\t}\n}\n\nfunc LexNumber(s string) *Node {\n\tn, _ := strconv.ParseFloat(s, 64)\n\treturn Number(n)\n}\n\nfunc Negate(a *Node) *Node {\n\treturn Function(\"multiply\", Number(-1), a)\n}\n\ntype context struct {\n\tlhs *Node\n\trhs *Node\n\tyyLexer\n\terrors []error\n}\n\nfunc NewContext(y yyLexer) *context {\n\treturn &context{yyLexer: y}\n}\n\nfunc (c *context) Error(e string) {\n\tc.Error2(fmt.Errorf(\"%s\", e))\n}\n\nfunc (c *context) Error2(e error) {\n\tlog.Printf(\"oops: %v\\n\", e)\n\tc.errors = append(c.errors, e)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package jsutil provides utility functions for interacting with\n\/\/ native JavaScript APIs via syscall\/js API.\n\/\/ It has support for common types in honnef.co\/go\/js\/dom\/v2.\npackage jsutil\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"syscall\/js\"\n\n\t\"honnef.co\/go\/js\/dom\/v2\"\n)\n\n\/\/ Wrap returns a wrapper func that handles the conversion from native JavaScript js.Value parameters\n\/\/ to the following types.\n\/\/\n\/\/ It supports js.Value (left unmodified), dom.Document, dom.Element, dom.Event, dom.HTMLElement, dom.Node.\n\/\/ It has to be one of those types exactly; it can't be another type that implements the interface like *dom.BasicElement.\n\/\/\n\/\/ For other types, the input is assumed to be a JSON string which is then unmarshalled into that type.\n\/\/\n\/\/ If the number of arguments provided to the wrapped func doesn't match\n\/\/ the number of arguments for original func, it panics.\n\/\/\n\/\/ Here is example usage:\n\/\/\n\/\/ \t<span onclick=\"Handler(event, this, {{.SomeStruct | json}});\">Example<\/span>\n\/\/\n\/\/ \tfunc Handler(event dom.Event, htmlElement dom.HTMLElement, data someStruct) {\n\/\/ \t\tdata.Foo = ... \/\/ Use event, htmlElement, data.\n\/\/ \t}\n\/\/\n\/\/ \tfunc main() {\n\/\/ \t\tjs.Global().Set(\"Handler\", jsutil.Wrap(Handler))\n\/\/ \t}\nfunc Wrap(fn interface{}) js.Func {\n\tv := reflect.ValueOf(fn)\n\treturn js.FuncOf(func(_ js.Value, args []js.Value) interface{} {\n\t\tif len(args) != v.Type().NumIn() {\n\t\t\tpanic(fmt.Errorf(\"wrapped %v got %v arguments, want %v\", v.Type().String(), len(args), v.Type().NumIn()))\n\t\t}\n\t\tin := make([]reflect.Value, v.Type().NumIn())\n\t\tfor i := range in {\n\t\t\tswitch t := v.Type().In(i); t {\n\t\t\t\/\/ js.Value is passed through.\n\t\t\tcase typeOf((*js.Value)(nil)):\n\t\t\t\tin[i] = reflect.ValueOf(args[i])\n\n\t\t\t\/\/ dom types are wrapped.\n\t\t\tcase typeOf((*dom.Document)(nil)):\n\t\t\t\tin[i] = reflect.ValueOf(dom.WrapDocument(args[i]))\n\t\t\tcase typeOf((*dom.Element)(nil)):\n\t\t\t\tin[i] = reflect.ValueOf(dom.WrapElement(args[i]))\n\t\t\tcase typeOf((*dom.Event)(nil)):\n\t\t\t\tin[i] = reflect.ValueOf(dom.WrapEvent(args[i]))\n\t\t\tcase typeOf((*dom.HTMLElement)(nil)):\n\t\t\t\tin[i] = reflect.ValueOf(dom.WrapHTMLElement(args[i]))\n\t\t\tcase typeOf((*dom.Node)(nil)):\n\t\t\t\tin[i] = reflect.ValueOf(dom.WrapNode(args[i]))\n\n\t\t\t\/\/ Unmarshal incoming encoded JSON into the Go type.\n\t\t\tdefault:\n\t\t\t\tif args[i].Type() != js.TypeString {\n\t\t\t\t\tpanic(fmt.Errorf(\"jsutil: incoming value type is %s; want a string with JSON content\", args[i].Type()))\n\t\t\t\t}\n\t\t\t\tp := reflect.New(t)\n\t\t\t\terr := json.Unmarshal([]byte(args[i].String()), p.Interface())\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(fmt.Errorf(\"jsutil: unmarshaling JSON %q into type %s failed: %v\", args[i], t, err))\n\t\t\t\t}\n\t\t\t\tin[i] = reflect.Indirect(p)\n\t\t\t}\n\t\t}\n\t\tv.Call(in)\n\t\treturn nil\n\t})\n}\n\n\/\/ typeOf returns the reflect.Type of what the pointer points to.\nfunc typeOf(pointer interface{}) reflect.Type {\n\treturn reflect.TypeOf(pointer).Elem()\n}\n<commit_msg>gopherjs_http\/jsutil\/v2: add js build constraint<commit_after>\/\/ +build js\n\n\/\/ Package jsutil provides utility functions for interacting with\n\/\/ native JavaScript APIs via syscall\/js API.\n\/\/ It has support for common types in honnef.co\/go\/js\/dom\/v2.\npackage jsutil\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"syscall\/js\"\n\n\t\"honnef.co\/go\/js\/dom\/v2\"\n)\n\n\/\/ Wrap returns a wrapper func that handles the conversion from native JavaScript js.Value parameters\n\/\/ to the following types.\n\/\/\n\/\/ It supports js.Value (left unmodified), dom.Document, dom.Element, dom.Event, dom.HTMLElement, dom.Node.\n\/\/ It has to be one of those types exactly; it can't be another type that implements the interface like *dom.BasicElement.\n\/\/\n\/\/ For other types, the input is assumed to be a JSON string which is then unmarshalled into that type.\n\/\/\n\/\/ If the number of arguments provided to the wrapped func doesn't match\n\/\/ the number of arguments for original func, it panics.\n\/\/\n\/\/ Here is example usage:\n\/\/\n\/\/ \t<span onclick=\"Handler(event, this, {{.SomeStruct | json}});\">Example<\/span>\n\/\/\n\/\/ \tfunc Handler(event dom.Event, htmlElement dom.HTMLElement, data someStruct) {\n\/\/ \t\tdata.Foo = ... \/\/ Use event, htmlElement, data.\n\/\/ \t}\n\/\/\n\/\/ \tfunc main() {\n\/\/ \t\tjs.Global().Set(\"Handler\", jsutil.Wrap(Handler))\n\/\/ \t}\nfunc Wrap(fn interface{}) js.Func {\n\tv := reflect.ValueOf(fn)\n\treturn js.FuncOf(func(_ js.Value, args []js.Value) interface{} {\n\t\tif len(args) != v.Type().NumIn() {\n\t\t\tpanic(fmt.Errorf(\"wrapped %v got %v arguments, want %v\", v.Type().String(), len(args), v.Type().NumIn()))\n\t\t}\n\t\tin := make([]reflect.Value, v.Type().NumIn())\n\t\tfor i := range in {\n\t\t\tswitch t := v.Type().In(i); t {\n\t\t\t\/\/ js.Value is passed through.\n\t\t\tcase typeOf((*js.Value)(nil)):\n\t\t\t\tin[i] = reflect.ValueOf(args[i])\n\n\t\t\t\/\/ dom types are wrapped.\n\t\t\tcase typeOf((*dom.Document)(nil)):\n\t\t\t\tin[i] = reflect.ValueOf(dom.WrapDocument(args[i]))\n\t\t\tcase typeOf((*dom.Element)(nil)):\n\t\t\t\tin[i] = reflect.ValueOf(dom.WrapElement(args[i]))\n\t\t\tcase typeOf((*dom.Event)(nil)):\n\t\t\t\tin[i] = reflect.ValueOf(dom.WrapEvent(args[i]))\n\t\t\tcase typeOf((*dom.HTMLElement)(nil)):\n\t\t\t\tin[i] = reflect.ValueOf(dom.WrapHTMLElement(args[i]))\n\t\t\tcase typeOf((*dom.Node)(nil)):\n\t\t\t\tin[i] = reflect.ValueOf(dom.WrapNode(args[i]))\n\n\t\t\t\/\/ Unmarshal incoming encoded JSON into the Go type.\n\t\t\tdefault:\n\t\t\t\tif args[i].Type() != js.TypeString {\n\t\t\t\t\tpanic(fmt.Errorf(\"jsutil: incoming value type is %s; want a string with JSON content\", args[i].Type()))\n\t\t\t\t}\n\t\t\t\tp := reflect.New(t)\n\t\t\t\terr := json.Unmarshal([]byte(args[i].String()), p.Interface())\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(fmt.Errorf(\"jsutil: unmarshaling JSON %q into type %s failed: %v\", args[i], t, err))\n\t\t\t\t}\n\t\t\t\tin[i] = reflect.Indirect(p)\n\t\t\t}\n\t\t}\n\t\tv.Call(in)\n\t\treturn nil\n\t})\n}\n\n\/\/ typeOf returns the reflect.Type of what the pointer points to.\nfunc typeOf(pointer interface{}) reflect.Type {\n\treturn reflect.TypeOf(pointer).Elem()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"code.google.com\/p\/go.exp\/fsnotify\"\n\t\"github.com\/hishboy\/gocommons\/lang\"\n)\n\nfunc isMember(element string, array []string) bool {\n\tfor i := 0; i < len(array); i++ {\n\t\tif array[i] == element {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc collectPaths(path []string) []string {\n\t\/\/ paths to be returned\n\tcollectedPaths := make([]string, 1, 1)\n\n\t\/\/\n\tfor _, thisPath := range paths {\n\t\terr := filepath.Walk(thisPath, func(path string, info os.FileInfo, err error) error {\n\t\t\tif info.IsDir() {\n\t\t\t\tcollectedPaths = append(collectedPaths, path)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}\n\n\treturn collectedPaths\n}\n\nfunc SetupWatch(paths []string, excludes []string) (int, *fsnotify.Watcher) {\n\tvar watchedCount int\n\n\tpaths = collectPaths(paths)\n\texcludes = collectPaths(excludes)\n\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tfmt.Println(\"Error establishing watcher: \", err)\n\t}\n\n\t\/\/ establish watches\n\tfor _, path := range paths {\n\t\tif !(isMember(path, excludes)) {\n\t\t\terr = watcher.Watch(path)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error: \", err, \" establishing watch on: \", path)\n\t\t\t}\n\t\t\twatchedCount++\n\t\t}\n\t}\n\treturn watcher, watchedCount\n}\n<commit_msg>Removed more useless imports. Fixed variable name mismatch.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"code.google.com\/p\/go.exp\/fsnotify\"\n)\n\nfunc isMember(element string, array []string) bool {\n\tfor i := 0; i < len(array); i++ {\n\t\tif array[i] == element {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc collectPaths(paths []string) []string {\n\t\/\/ paths to be returned\n\tcollectedPaths := make([]string, 1, 1)\n\n\t\/\/\n\tfor _, thisPath := range paths {\n\t\terr := filepath.Walk(thisPath, func(path string, info os.FileInfo, err error) error {\n\t\t\tif info.IsDir() {\n\t\t\t\tcollectedPaths = append(collectedPaths, path)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}\n\n\treturn collectedPaths\n}\n\nfunc SetupWatch(paths []string, excludes []string) (int, *fsnotify.Watcher) {\n\tvar watchedCount int\n\n\tpaths = collectPaths(paths)\n\texcludes = collectPaths(excludes)\n\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tfmt.Println(\"Error establishing watcher: \", err)\n\t}\n\n\t\/\/ establish watches\n\tfor _, path := range paths {\n\t\tif !(isMember(path, excludes)) {\n\t\t\terr = watcher.Watch(path)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error: \", err, \" establishing watch on: \", path)\n\t\t\t}\n\t\t\twatchedCount++\n\t\t}\n\t}\n\treturn watchedCount, watcher\n}\n<|endoftext|>"} {"text":"<commit_before>package clone\n\nimport (\n\t\"errors\"\n\t_ \"fmt\"\n\t\"github.com\/jeffail\/tunny\"\n\tcsv \"github.com\/whosonfirst\/go-whosonfirst-csv\"\n\tlog \"github.com\/whosonfirst\/go-whosonfirst-log\"\n\tpool \"github.com\/whosonfirst\/go-whosonfirst-pool\"\n\tutils \"github.com\/whosonfirst\/go-whosonfirst-utils\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\ntype WOFClone struct {\n\tSource string\n\tDest string\n\tSuccess int64\n\tError int64\n\tSkipped int64\n\tScheduled int64\n\tCompleted int64\n\tMaxRetries float64 \/\/ max percentage of errors over scheduled\n\tFailed []string\n\tLogger *log.WOFLogger\n\tclient *http.Client\n\tretries *pool.LIFOPool\n\tworkpool *tunny.WorkPool\n\ttimer time.Time\n\tdone chan bool\n}\n\nfunc NewWOFClone(source string, dest string, procs int, logger *log.WOFLogger) (*WOFClone, error) {\n\n\t\/\/ https:\/\/golang.org\/src\/net\/http\/filetransport.go\n\n\tu, err := url.Parse(source)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar cl *http.Client\n\n\tif u.Scheme == \"file\" {\n\n\t\t\/\/ See also: https:\/\/code.google.com\/p\/go\/issues\/detail?id=2113\n\n\t\troot := u.Path\n\n\t\tif !strings.HasSuffix(root, \"\/\") {\n\t\t\troot = root + \"\/\"\n\t\t}\n\n\t\t\/\/ See this:\n\t\t\/\/ root = \"\/\"\n\t\t\/\/ That's what necessary to make cloning local files work absent\n\t\t\/\/ tweaking the code below to see whether we are a \"file\" source\n\t\t\/\/ and modifying the request URL. The fear of blindly opening up\n\t\t\/\/ the root level directory on the file system in this context\n\t\t\/\/ seems a bit premature (not to mention silly) but measure twice\n\t\t\/\/ and all that good stuff... (20160112\/thisisaaronland)\n\t\t\n\t\tt := &http.Transport{}\n\t\tt.RegisterProtocol(\"file\", http.NewFileTransport(http.Dir(root)))\n\n\t\tcl = &http.Client{Transport: t}\n\t} else {\n\t\tcl = &http.Client{}\n\t}\n\n\truntime.GOMAXPROCS(procs)\n\n\tworkpool, _ := tunny.CreatePoolGeneric(procs).Open()\n\tretries := pool.NewLIFOPool()\n\n\tch := make(chan bool)\n\n\tc := WOFClone{\n\t\tSuccess: 0,\n\t\tError: 0,\n\t\tSkipped: 0,\n\t\tSource: source,\n\t\tDest: dest,\n\t\tLogger: logger,\n\t\tMaxRetries: 25.0, \/\/ maybe allow this to be user-defined ?\n\t\tclient: cl,\n\t\tworkpool: workpool,\n\t\tretries: retries,\n\t\ttimer: time.Now(),\n\t\tdone: ch,\n\t}\n\n\tgo func(c *WOFClone) {\n\n\t\tfor {\n\t\t\tselect {\n\n\t\t\tcase <-c.done:\n\t\t\t\tbreak\n\t\t\tcase <-time.After(1 * time.Second):\n\t\t\t\tc.Status()\n\t\t\t}\n\t\t}\n\t}(&c)\n\n\treturn &c, nil\n}\n\nfunc (c *WOFClone) CloneMetaFile(file string, skip_existing bool, force_updates bool) error {\n\n\tabs_path, _ := filepath.Abs(file)\n\n\treader, read_err := csv.NewDictReader(abs_path)\n\n\tif read_err != nil {\n\t\tc.Logger.Error(\"Failed to read %s, because %v\", abs_path, read_err)\n\t\treturn read_err\n\t}\n\n\twg := new(sync.WaitGroup)\n\n\tc.timer = time.Now()\n\n\tfor {\n\n\t\trow, err := reader.Read()\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trel_path, ok := row[\"path\"]\n\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tensure_changes := true\n\t\thas_changes := true\n\t\tcarry_on := false\n\n\t\tremote := c.Source + rel_path\n\t\tlocal := path.Join(c.Dest, rel_path)\n\n\t\t_, err = os.Stat(local)\n\n\t\tif !os.IsNotExist(err) {\n\n\t\t\tif force_updates {\n\n\t\t\t\tc.Logger.Debug(\"%s already but we are forcing updates\", local)\n\t\t\t} else if skip_existing {\n\n\t\t\t\tc.Logger.Debug(\"%s already exists and we are skipping things that exist\", local)\n\t\t\t\tcarry_on = true\n\n\t\t\t} else {\n\n\t\t\t\tfile_hash, ok := row[\"file_hash\"]\n\n\t\t\t\tt1 := time.Now()\n\n\t\t\t\tif ok {\n\t\t\t\t\tc.Logger.Debug(\"comparing hardcoded hash (%s) for %s\", file_hash, local)\n\t\t\t\t\thas_changes, _ = c.HasHashChanged(file_hash, remote)\n\t\t\t\t} else {\n\t\t\t\t\thas_changes, _ = c.HasChanged(local, remote)\n\t\t\t\t}\n\n\t\t\t\tif !has_changes {\n\t\t\t\t\tc.Logger.Info(\"no changes to %s\", local)\n\t\t\t\t\tcarry_on = true\n\t\t\t\t}\n\n\t\t\t\tt2 := time.Since(t1)\n\n\t\t\t\tc.Logger.Debug(\"time to determine whether %s has changed (%t), %v\", local, has_changes, t2)\n\t\t\t}\n\n\t\t\tif carry_on {\n\n\t\t\t\tatomic.AddInt64(&c.Scheduled, 1)\n\t\t\t\tatomic.AddInt64(&c.Completed, 1)\n\t\t\t\tatomic.AddInt64(&c.Skipped, 1)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tensure_changes = false\n\t\t}\n\n\t\twg.Add(1)\n\t\tatomic.AddInt64(&c.Scheduled, 1)\n\n\t\tgo func(c *WOFClone, rel_path string, ensure_changes bool) {\n\n\t\t\tdefer wg.Done()\n\n\t\t\t_, err = c.workpool.SendWork(func() {\n\n\t\t\t\tt1 := time.Now()\n\t\t\t\tcl_err := c.ClonePath(rel_path, ensure_changes)\n\n\t\t\t\tt2 := time.Since(t1)\n\n\t\t\t\tc.Logger.Debug(\"time to process %s : %v\", rel_path, t2)\n\n\t\t\t\tif cl_err != nil {\n\t\t\t\t\tatomic.AddInt64(&c.Error, 1)\n\t\t\t\t\tc.retries.Push(&pool.PoolString{String: rel_path})\n\t\t\t\t} else {\n\t\t\t\t\tatomic.AddInt64(&c.Success, 1)\n\t\t\t\t}\n\n\t\t\t\tatomic.AddInt64(&c.Completed, 1)\n\t\t\t})\n\n\t\t}(c, rel_path, ensure_changes)\n\t}\n\n\twg.Wait()\n\n\tok := c.ProcessRetries()\n\n\tif !ok {\n\t\tc.Logger.Warning(\"failed to process retries\")\n\t\treturn errors.New(\"One of file failed to be cloned\")\n\t}\n\n\treturn nil\n}\n\nfunc (c *WOFClone) ProcessRetries() bool {\n\n\tto_retry := c.retries.Length()\n\n\tif to_retry > 0 {\n\n\t\tscheduled_f := float64(c.Scheduled)\n\t\tretry_f := float64(to_retry)\n\n\t\tpct := (retry_f \/ scheduled_f) * 100.0\n\n\t\tif pct > c.MaxRetries {\n\t\t\tc.Logger.Warning(\"E_EXCESSIVE_ERRORS, %f percent of scheduled processes failed thus undermining our faith that they will work now...\", pct)\n\t\t\treturn false\n\t\t}\n\n\t\tc.Logger.Info(\"There are %d failed requests that will now be retried\", to_retry)\n\n\t\twg := new(sync.WaitGroup)\n\n\t\tfor c.retries.Length() > 0 {\n\n\t\t\tr, ok := c.retries.Pop()\n\n\t\t\tif !ok {\n\t\t\t\tc.Logger.Error(\"failed to pop retries because... computers?\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\trel_path := r.StringValue()\n\n\t\t\tatomic.AddInt64(&c.Scheduled, 1)\n\t\t\twg.Add(1)\n\n\t\t\tgo func(c *WOFClone, rel_path string) {\n\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tc.workpool.SendWork(func() {\n\n\t\t\t\t\tensure_changes := true\n\n\t\t\t\t\tt1 := time.Now()\n\n\t\t\t\t\tcl_err := c.ClonePath(rel_path, ensure_changes)\n\n\t\t\t\t\tt2 := time.Since(t1)\n\n\t\t\t\t\tc.Logger.Debug(\"time to retry clone %s : %v\\n\", rel_path, t2)\n\n\t\t\t\t\tif cl_err != nil {\n\t\t\t\t\t\tatomic.AddInt64(&c.Error, 1)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tatomic.AddInt64(&c.Error, -1)\n\t\t\t\t\t}\n\n\t\t\t\t\tatomic.AddInt64(&c.Completed, 1)\n\t\t\t\t})\n\n\t\t\t}(c, rel_path)\n\t\t}\n\n\t\twg.Wait()\n\t}\n\n\treturn true\n}\n\nfunc (c *WOFClone) ClonePath(rel_path string, ensure_changes bool) error {\n\n\tremote := c.Source + rel_path\n\tlocal := path.Join(c.Dest, rel_path)\n\n\t_, err := os.Stat(local)\n\n\tif !os.IsNotExist(err) && ensure_changes {\n\n\t\tchange, _ := c.HasChanged(local, remote)\n\n\t\tif !change {\n\n\t\t\tc.Logger.Debug(\"%s has not changed so skipping\", local)\n\t\t\tatomic.AddInt64(&c.Skipped, 1)\n\t\t\treturn nil\n\t\t}\n\n\t}\n\n\tprocess_err := c.Process(remote, local)\n\n\tif process_err != nil {\n\t\treturn process_err\n\t}\n\n\treturn nil\n}\n\n\/\/ don't return true if there's a problem - move that logic up above\n\nfunc (c *WOFClone) HasChanged(local string, remote string) (bool, error) {\n\n\tchange := true\n\n\tlocal_hash, err := utils.HashFile(local)\n\n\tif err != nil {\n\t\tc.Logger.Error(\"Failed to hash %s, becase %v\", local, err)\n\t\treturn change, err\n\t}\n\n\treturn c.HasHashChanged(local_hash, remote)\n}\n\nfunc (c *WOFClone) HasHashChanged(local_hash string, remote string) (bool, error) {\n\n\tchange := true\n\n\trsp, err := c.Fetch(\"HEAD\", remote)\n\n\tif err != nil {\n\t\treturn change, err\n\t}\n\n\trsp.Body.Close()\n\t\/\/ defer rsp.Body.Close()\n\n\tetag := rsp.Header.Get(\"Etag\")\n\tremote_hash := strings.Replace(etag, \"\\\"\", \"\", -1)\n\n\tif local_hash == remote_hash {\n\t\tchange = false\n\t}\n\n\treturn change, nil\n}\n\nfunc (c *WOFClone) Process(remote string, local string) error {\n\n\tc.Logger.Debug(\"fetch %s and store in %s\", remote, local)\n\n\tlocal_root := path.Dir(local)\n\n\t_, err := os.Stat(local_root)\n\n\tif os.IsNotExist(err) {\n\t\tc.Logger.Info(\"create %s\", local_root)\n\t\tos.MkdirAll(local_root, 0755)\n\t}\n\n\tt1 := time.Now()\n\n\trsp, fetch_err := c.Fetch(\"GET\", remote)\n\n\tt2 := time.Since(t1)\n\n\tc.Logger.Debug(\"time to fetch %s: %v\", remote, t2)\n\n\tif fetch_err != nil {\n\t\treturn fetch_err\n\t}\n\n\t\/\/ defer rsp.Body.Close()\n\n\tcontents, read_err := ioutil.ReadAll(rsp.Body)\n\n\tif read_err != nil {\n\t\tc.Logger.Error(\"failed to read body for %s, because %v\", remote, read_err)\n\t\treturn read_err\n\t}\n\n\trsp.Body.Close()\n\n\tgo func(local string, contents []byte) error {\n\n\t\twrite_err := ioutil.WriteFile(local, contents, 0644)\n\n\t\tif write_err != nil {\n\t\t\tc.Logger.Error(\"Failed to write %s, because %v\", local, write_err)\n\n\t\t\tatomic.AddInt64(&c.Success, -1)\n\t\t\tatomic.AddInt64(&c.Error, 1)\n\n\t\t\treturn write_err\n\t\t}\n\n\t\tc.Logger.Debug(\"Wrote %s to disk\", local)\n\t\treturn nil\n\t}(local, contents)\n\n\treturn nil\n}\n\nfunc (c *WOFClone) Fetch(method string, url string) (*http.Response, error) {\n\n\tc.Logger.Debug(\"%s %s\", method, url)\n\n\treq, _ := http.NewRequest(method, url, nil)\n\treq.Close = true\n\n\trsp, err := c.client.Do(req)\n\n\tif err != nil {\n\t\tc.Logger.Error(\"Failed to %s %s, because %v\", method, url, err)\n\t\treturn nil, err\n\t}\n\n\t\/\/ Notice how we are not closing rsp.Body - that's because we are passing\n\t\/\/ it (rsp) back up the stack\n\n\t\/\/ See also: https:\/\/github.com\/whosonfirst\/go-whosonfirst-clone\/issues\/6\n\n\texpected := 200\n\n\tif rsp.StatusCode != expected {\n\t\tc.Logger.Error(\"Failed to %s %s, because we expected %d from source and got '%s' instead\", method, url, expected, rsp.Status)\n\t\treturn nil, errors.New(rsp.Status)\n\t}\n\n\treturn rsp, nil\n}\n\nfunc (c *WOFClone) Status() {\n\n\tt2 := time.Since(c.timer)\n\n\tc.Logger.Info(\"scheduled: %d completed: %d success: %d error: %d skipped: %d to retry: %d goroutines: %d time: %v\",\n\t\tc.Scheduled, c.Completed, c.Success, c.Error, c.Skipped, c.retries.Length(), runtime.NumGoroutine(), t2)\n}\n<commit_msg>update Fetch to account for file:\/\/\/ source URLs; notes and comments<commit_after>package clone\n\nimport (\n\t\"errors\"\n\t_ \"fmt\"\n\t\"github.com\/jeffail\/tunny\"\n\tcsv \"github.com\/whosonfirst\/go-whosonfirst-csv\"\n\tlog \"github.com\/whosonfirst\/go-whosonfirst-log\"\n\tpool \"github.com\/whosonfirst\/go-whosonfirst-pool\"\n\tutils \"github.com\/whosonfirst\/go-whosonfirst-utils\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\ntype WOFClone struct {\n\tSource string\n\tDest string\n\tSuccess int64\n\tError int64\n\tSkipped int64\n\tScheduled int64\n\tCompleted int64\n\tMaxRetries float64 \/\/ max percentage of errors over scheduled\n\tFailed []string\n\tLogger *log.WOFLogger\n\tclient *http.Client\n\tretries *pool.LIFOPool\n\tworkpool *tunny.WorkPool\n\ttimer time.Time\n\tdone chan bool\n}\n\nfunc NewWOFClone(source string, dest string, procs int, logger *log.WOFLogger) (*WOFClone, error) {\n\n\t\/\/ https:\/\/golang.org\/src\/net\/http\/filetransport.go\n\n\tu, err := url.Parse(source)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar cl *http.Client\n\n\tif u.Scheme == \"file\" {\n\n\t\troot := u.Path\n\n\t\tif !strings.HasSuffix(root, \"\/\") {\n\t\t\troot = root + \"\/\"\n\t\t}\n\n\t\t\/*\n\t\t\tPay attention to what's going here. Absent tweaking the URL to\n\t\t\tfetch in the 'Fetch' method the following will not work. In\n\t\t\torder to make this working *without* tweaking the URL you would\n\t\t\tneed to specifiy the root as '\/' which just seems like a bad\n\t\t\tidea. The fear of blindly opening up the root level directory on\n\t\t\tthe file system in this context may seem a bit premature (not to\n\t\t\tmention silly) but measure twice and all that good stuff...\n\t\t\tSee also: https:\/\/code.google.com\/p\/go\/issues\/detail?id=2113\n\t\t\t(20160112\/thisisaaronland)\n\t\t*\/\n\n\t\tt := &http.Transport{}\n\t\tt.RegisterProtocol(\"file\", http.NewFileTransport(http.Dir(root)))\n\n\t\tcl = &http.Client{Transport: t}\n\t} else {\n\t\tcl = &http.Client{}\n\t}\n\n\truntime.GOMAXPROCS(procs)\n\n\tworkpool, _ := tunny.CreatePoolGeneric(procs).Open()\n\tretries := pool.NewLIFOPool()\n\n\tch := make(chan bool)\n\n\tc := WOFClone{\n\t\tSuccess: 0,\n\t\tError: 0,\n\t\tSkipped: 0,\n\t\tSource: source,\n\t\tDest: dest,\n\t\tLogger: logger,\n\t\tMaxRetries: 25.0, \/\/ maybe allow this to be user-defined ?\n\t\tclient: cl,\n\t\tworkpool: workpool,\n\t\tretries: retries,\n\t\ttimer: time.Now(),\n\t\tdone: ch,\n\t}\n\n\tgo func(c *WOFClone) {\n\n\t\tfor {\n\t\t\tselect {\n\n\t\t\tcase <-c.done:\n\t\t\t\tbreak\n\t\t\tcase <-time.After(1 * time.Second):\n\t\t\t\tc.Status()\n\t\t\t}\n\t\t}\n\t}(&c)\n\n\treturn &c, nil\n}\n\nfunc (c *WOFClone) CloneMetaFile(file string, skip_existing bool, force_updates bool) error {\n\n\tabs_path, _ := filepath.Abs(file)\n\n\treader, read_err := csv.NewDictReader(abs_path)\n\n\tif read_err != nil {\n\t\tc.Logger.Error(\"Failed to read %s, because %v\", abs_path, read_err)\n\t\treturn read_err\n\t}\n\n\twg := new(sync.WaitGroup)\n\n\tc.timer = time.Now()\n\n\tfor {\n\n\t\trow, err := reader.Read()\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trel_path, ok := row[\"path\"]\n\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tensure_changes := true\n\t\thas_changes := true\n\t\tcarry_on := false\n\n\t\tremote := c.Source + rel_path\n\t\tlocal := path.Join(c.Dest, rel_path)\n\n\t\t_, err = os.Stat(local)\n\n\t\tif !os.IsNotExist(err) {\n\n\t\t\tif force_updates {\n\n\t\t\t\tc.Logger.Debug(\"%s already but we are forcing updates\", local)\n\t\t\t} else if skip_existing {\n\n\t\t\t\tc.Logger.Debug(\"%s already exists and we are skipping things that exist\", local)\n\t\t\t\tcarry_on = true\n\n\t\t\t} else {\n\n\t\t\t\tfile_hash, ok := row[\"file_hash\"]\n\n\t\t\t\tt1 := time.Now()\n\n\t\t\t\tif ok {\n\t\t\t\t\tc.Logger.Debug(\"comparing hardcoded hash (%s) for %s\", file_hash, local)\n\t\t\t\t\thas_changes, _ = c.HasHashChanged(file_hash, remote)\n\t\t\t\t} else {\n\t\t\t\t\thas_changes, _ = c.HasChanged(local, remote)\n\t\t\t\t}\n\n\t\t\t\tif !has_changes {\n\t\t\t\t\tc.Logger.Info(\"no changes to %s\", local)\n\t\t\t\t\tcarry_on = true\n\t\t\t\t}\n\n\t\t\t\tt2 := time.Since(t1)\n\n\t\t\t\tc.Logger.Debug(\"time to determine whether %s has changed (%t), %v\", local, has_changes, t2)\n\t\t\t}\n\n\t\t\tif carry_on {\n\n\t\t\t\tatomic.AddInt64(&c.Scheduled, 1)\n\t\t\t\tatomic.AddInt64(&c.Completed, 1)\n\t\t\t\tatomic.AddInt64(&c.Skipped, 1)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tensure_changes = false\n\t\t}\n\n\t\twg.Add(1)\n\t\tatomic.AddInt64(&c.Scheduled, 1)\n\n\t\tgo func(c *WOFClone, rel_path string, ensure_changes bool) {\n\n\t\t\tdefer wg.Done()\n\n\t\t\t_, err = c.workpool.SendWork(func() {\n\n\t\t\t\tt1 := time.Now()\n\t\t\t\tcl_err := c.ClonePath(rel_path, ensure_changes)\n\n\t\t\t\tt2 := time.Since(t1)\n\n\t\t\t\tc.Logger.Debug(\"time to process %s : %v\", rel_path, t2)\n\n\t\t\t\tif cl_err != nil {\n\t\t\t\t\tatomic.AddInt64(&c.Error, 1)\n\t\t\t\t\tc.retries.Push(&pool.PoolString{String: rel_path})\n\t\t\t\t} else {\n\t\t\t\t\tatomic.AddInt64(&c.Success, 1)\n\t\t\t\t}\n\n\t\t\t\tatomic.AddInt64(&c.Completed, 1)\n\t\t\t})\n\n\t\t}(c, rel_path, ensure_changes)\n\t}\n\n\twg.Wait()\n\n\tok := c.ProcessRetries()\n\n\tif !ok {\n\t\tc.Logger.Warning(\"failed to process retries\")\n\t\treturn errors.New(\"One of file failed to be cloned\")\n\t}\n\n\treturn nil\n}\n\nfunc (c *WOFClone) ProcessRetries() bool {\n\n\tto_retry := c.retries.Length()\n\n\tif to_retry > 0 {\n\n\t\tscheduled_f := float64(c.Scheduled)\n\t\tretry_f := float64(to_retry)\n\n\t\tpct := (retry_f \/ scheduled_f) * 100.0\n\n\t\tif pct > c.MaxRetries {\n\t\t\tc.Logger.Warning(\"E_EXCESSIVE_ERRORS, %f percent of scheduled processes failed thus undermining our faith that they will work now...\", pct)\n\t\t\treturn false\n\t\t}\n\n\t\tc.Logger.Info(\"There are %d failed requests that will now be retried\", to_retry)\n\n\t\twg := new(sync.WaitGroup)\n\n\t\tfor c.retries.Length() > 0 {\n\n\t\t\tr, ok := c.retries.Pop()\n\n\t\t\tif !ok {\n\t\t\t\tc.Logger.Error(\"failed to pop retries because... computers?\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\trel_path := r.StringValue()\n\n\t\t\tatomic.AddInt64(&c.Scheduled, 1)\n\t\t\twg.Add(1)\n\n\t\t\tgo func(c *WOFClone, rel_path string) {\n\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tc.workpool.SendWork(func() {\n\n\t\t\t\t\tensure_changes := true\n\n\t\t\t\t\tt1 := time.Now()\n\n\t\t\t\t\tcl_err := c.ClonePath(rel_path, ensure_changes)\n\n\t\t\t\t\tt2 := time.Since(t1)\n\n\t\t\t\t\tc.Logger.Debug(\"time to retry clone %s : %v\\n\", rel_path, t2)\n\n\t\t\t\t\tif cl_err != nil {\n\t\t\t\t\t\tatomic.AddInt64(&c.Error, 1)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tatomic.AddInt64(&c.Error, -1)\n\t\t\t\t\t}\n\n\t\t\t\t\tatomic.AddInt64(&c.Completed, 1)\n\t\t\t\t})\n\n\t\t\t}(c, rel_path)\n\t\t}\n\n\t\twg.Wait()\n\t}\n\n\treturn true\n}\n\nfunc (c *WOFClone) ClonePath(rel_path string, ensure_changes bool) error {\n\n\tremote := c.Source + rel_path\n\tlocal := path.Join(c.Dest, rel_path)\n\n\t_, err := os.Stat(local)\n\n\tif !os.IsNotExist(err) && ensure_changes {\n\n\t\tchange, _ := c.HasChanged(local, remote)\n\n\t\tif !change {\n\n\t\t\tc.Logger.Debug(\"%s has not changed so skipping\", local)\n\t\t\tatomic.AddInt64(&c.Skipped, 1)\n\t\t\treturn nil\n\t\t}\n\n\t}\n\n\tprocess_err := c.Process(remote, local)\n\n\tif process_err != nil {\n\t\treturn process_err\n\t}\n\n\treturn nil\n}\n\n\/\/ don't return true if there's a problem - move that logic up above\n\nfunc (c *WOFClone) HasChanged(local string, remote string) (bool, error) {\n\n\tchange := true\n\n\tlocal_hash, err := utils.HashFile(local)\n\n\tif err != nil {\n\t\tc.Logger.Error(\"Failed to hash %s, becase %v\", local, err)\n\t\treturn change, err\n\t}\n\n\treturn c.HasHashChanged(local_hash, remote)\n}\n\nfunc (c *WOFClone) HasHashChanged(local_hash string, remote string) (bool, error) {\n\n\tchange := true\n\n\trsp, err := c.Fetch(\"HEAD\", remote)\n\n\tif err != nil {\n\t\treturn change, err\n\t}\n\n\trsp.Body.Close()\n\t\/\/ defer rsp.Body.Close()\n\n\tetag := rsp.Header.Get(\"Etag\")\n\tremote_hash := strings.Replace(etag, \"\\\"\", \"\", -1)\n\n\tif local_hash == remote_hash {\n\t\tchange = false\n\t}\n\n\treturn change, nil\n}\n\nfunc (c *WOFClone) Process(remote string, local string) error {\n\n\tc.Logger.Debug(\"fetch %s and store in %s\", remote, local)\n\n\tlocal_root := path.Dir(local)\n\n\t_, err := os.Stat(local_root)\n\n\tif os.IsNotExist(err) {\n\t\tc.Logger.Debug(\"create %s\", local_root)\n\t\tos.MkdirAll(local_root, 0755)\n\t}\n\n\tt1 := time.Now()\n\n\trsp, fetch_err := c.Fetch(\"GET\", remote)\n\n\tt2 := time.Since(t1)\n\n\tc.Logger.Debug(\"time to fetch %s: %v\", remote, t2)\n\n\tif fetch_err != nil {\n\t\treturn fetch_err\n\t}\n\n\t\/\/ defer rsp.Body.Close()\n\n\tcontents, read_err := ioutil.ReadAll(rsp.Body)\n\n\tif read_err != nil {\n\t\tc.Logger.Error(\"failed to read body for %s, because %v\", remote, read_err)\n\t\treturn read_err\n\t}\n\n\trsp.Body.Close()\n\n\tgo func(local string, contents []byte) error {\n\n\t\twrite_err := ioutil.WriteFile(local, contents, 0644)\n\n\t\tif write_err != nil {\n\t\t\tc.Logger.Error(\"Failed to write %s, because %v\", local, write_err)\n\n\t\t\tatomic.AddInt64(&c.Success, -1)\n\t\t\tatomic.AddInt64(&c.Error, 1)\n\n\t\t\treturn write_err\n\t\t}\n\n\t\tc.Logger.Debug(\"Wrote %s to disk\", local)\n\t\treturn nil\n\t}(local, contents)\n\n\treturn nil\n}\n\nfunc (c *WOFClone) Fetch(method string, remote string) (*http.Response, error) {\n\n\t\/*\n\t See notes in NewWOFClone for details on what's going on here. Given that\n\t we are already testing whether c.Source is a file URI parsing remote here\n\t is probably a bit of a waste. We will live with the cost for now and optimize\n\t as necessary... (20160112\/thisisaaronland)\n\t*\/\n\n\tu, _ := url.Parse(remote)\n\n\tif u.Scheme == \"file\" {\n\t\tremote = strings.Replace(remote, c.Source, \"\", 1)\n\t\tremote = \"file:\/\/\/\" + remote\n\t\tc.Logger.Debug(\"remote is now %s\", remote)\n\t}\n\n\tc.Logger.Debug(\"%s %s\", method, remote)\n\n\treq, _ := http.NewRequest(method, remote, nil)\n\treq.Close = true\n\n\trsp, err := c.client.Do(req)\n\n\tif err != nil {\n\t\tc.Logger.Error(\"Failed to %s %s, because %v\", method, remote, err)\n\t\treturn nil, err\n\t}\n\n\t\/\/ Notice how we are not closing rsp.Body - that's because we are passing\n\t\/\/ it (rsp) back up the stack\n\n\t\/\/ See also: https:\/\/github.com\/whosonfirst\/go-whosonfirst-clone\/issues\/6\n\n\texpected := 200\n\n\tif rsp.StatusCode != expected {\n\t\tc.Logger.Error(\"Failed to %s %s, because we expected %d from source and got '%s' instead\", method, remote, expected, rsp.Status)\n\t\treturn nil, errors.New(rsp.Status)\n\t}\n\n\treturn rsp, nil\n}\n\nfunc (c *WOFClone) Status() {\n\n\tt2 := time.Since(c.timer)\n\n\tc.Logger.Info(\"scheduled: %d completed: %d success: %d error: %d skipped: %d to retry: %d goroutines: %d time: %v\",\n\t\tc.Scheduled, c.Completed, c.Success, c.Error, c.Skipped, c.retries.Length(), runtime.NumGoroutine(), t2)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright (c) Clinton Freeman 2016\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and\n * associated documentation files (the \"Software\"), to deal in the Software without restriction,\n * including without limitation the rights to use, copy, modify, merge, publish, distribute,\n * sublicense, and\/or sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in all copies or\n * substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT\n * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n *\/\n\npackage main\n\nimport (\n\t\"github.com\/akualab\/dmx\"\n\t\"github.com\/kidoman\/embd\"\n\t_ \"github.com\/kidoman\/embd\/host\/all\"\n\t\"log\"\n\t\"time\"\n)\n\n\/\/ WeatherMachine holds connections to everything we need to manipulate the installation.\ntype WeatherMachine struct {\n\tstop chan bool \/\/ Channel for stopping the control elements of the installation.\n\tdmx *dmx.DMX \/\/ The DMX connection for writting messages to the Smoke machine and lights.\n\tconfig Configuration \/\/ The configuration element for the installation.\n\tlastRun time.Time \/\/ The last time the installation was run.\n}\n\n\/\/ ****************************************************************************\n\/\/ ****************************************************************************\n\/\/ Functions for manipulating the installation state; idle, warmup and running.\n\/\/ ****************************************************************************\n\/\/ ****************************************************************************\n\n\/\/ stateFunctions are used to manipulate the WeatherMachine through the various states.\ntype stateFn func(state *WeatherMachine, msg HRMsg) stateFn\n\n\/\/ idle is the state the weathermachine enters when sitting alone, with no one interacting with it.\nfunc idle(state *WeatherMachine, msg HRMsg) (sF stateFn) {\n\tif msg.Contact {\n\t\tlog.Printf(\"INFO: entering warmup\")\n\t\tenableLight(state.config.S1Beat, state.config, state.dmx)\n\t\tgo enablePump(state.config, state.stop)\n\n\t\treturn warmup \/\/ skin contact has been made, enable light and enter warmup.\n\t}\n\n\treturn idle \/\/ remain idle.\n}\n\n\/\/ warmup is the state the weathermachine enters when someone first touches it.\nfunc warmup(state *WeatherMachine, msg HRMsg) stateFn {\n\tif msg.Contact && msg.HeartRate > 0 {\n\t\t\/\/ Wait for the fog to clear from the last run before running again.\n\t\tlog.Printf(\"INFO: Waiting for fog to clear\")\n\t\td := int64(state.config.FanDuration) - time.Since(state.lastRun).Nanoseconds()\n\t\ttime.Sleep(time.Nanosecond * time.Duration(d))\n\n\t\tlog.Printf(\"INFO: entering running\")\n\t\tgo enableLightPulse(state.config, msg.HeartRate, state.stop, state.dmx)\n\t\tgo enableSmoke(state.config, state.stop, state.dmx)\n\t\tgo enableFan(state.config, state.stop)\n\n\t\treturn running \/\/ skin contact and heart rate recieved, start the installation.\n\t} else if !msg.Contact {\n\t\tstate.stop <- true \/\/ Pump starts at initial contact. If we lost contact between\n\t\t\/\/ then and now we need to shut it down.\n\t\tstate.lastRun = time.Now()\n\n\t\tdisableLight(state.config, state.dmx)\n\n\t\tlog.Printf(\"INFO: entering idle\")\n\t\treturn idle \/\/ skin contact lost. Return to idle.\n\t}\n\n\treturn warmup\n}\n\n\/\/ running is the state the weathermachine enters when someone is engaging with it.\nfunc running(state *WeatherMachine, msg HRMsg) stateFn {\n\tif !msg.Contact {\n\t\tstate.stop <- true\n\t\tstate.stop <- true\n\t\tstate.stop <- true\n\t\tstate.stop <- true\n\n\t\tlog.Printf(\"INFO: entering idle\")\n\t\treturn idle \/\/ skin contact lost. Return to idle.\n\t}\n\n\treturn running \/\/ Keep the installation running.\n}\n\n\/\/ ****************************************************************************\n\/\/ ****************************************************************************\n\/\/ Functions for manipulating the physical installation; lights, smoke and fan.\n\/\/ ****************************************************************************\n\/\/ ****************************************************************************\n\n\/\/ enableLight turns on the light via the supplied DMX connection 'dmx' with the supplied colour 'l'.\nfunc enableLight(l LightColour, c Configuration, dmx *dmx.DMX) {\n\tlog.Printf(\"INFO: Light on\")\n\tembd.DigitalWrite(c.GPIOPinLight, embd.High)\n\tdmx.SetChannel(4, byte(l.Red))\n\tdmx.SetChannel(5, byte(l.Green))\n\tdmx.SetChannel(6, byte(l.Blue))\n\tdmx.SetChannel(7, byte(l.Amber))\n\tdmx.SetChannel(8, byte(l.Dimmer))\n\tdmx.Render()\n}\n\n\/\/ disableLight turns off the light via the supplied DMX connection 'dmx'.\nfunc disableLight(c Configuration, dmx *dmx.DMX) {\n\tlog.Printf(\"INFO: Light off\")\n\tembd.DigitalWrite(c.GPIOPinLight, embd.Low)\n\tdmx.SetChannel(4, 0)\n\tdmx.SetChannel(5, 0)\n\tdmx.SetChannel(6, 0)\n\tdmx.SetChannel(7, 0)\n\tdmx.SetChannel(8, 0)\n\tdmx.Render()\n}\n\n\/\/ pulseLight pulses the light for a fixed duration.\nfunc pulseLight(c Configuration, dmx *dmx.DMX) {\n\tenableLight(c.S1Beat, c, dmx)\n\ttime.Sleep(time.Millisecond * time.Duration(c.S1Duration))\n\tdisableLight(c, dmx)\n\n\ttime.Sleep(time.Millisecond * time.Duration(c.S1Pause))\n\n\tenableLight(c.S2Beat, c, dmx)\n\ttime.Sleep(time.Millisecond * time.Duration(c.S2Duration))\n\tdisableLight(c, dmx)\n}\n\n\/\/ enableLightPulse starts the light pulsing by the frequency defined by hr. The light remains\n\/\/ pulsing till being notified to stop on d.\nfunc enableLightPulse(c Configuration, hr int, d chan bool, dmx *dmx.DMX) {\n\t\/\/ Perform the first heart beat straight away.\n\tpulseLight(c, dmx)\n\n\tdt := int((60000.0 \/ float32(hr)) * c.BeatRate)\n\tticker := time.NewTicker(time.Millisecond * time.Duration(dt)).C\n\n\t\/\/ Sharp fixed length, pulse of light with variable off gap depending on HR.\n\tfor {\n\t\tselect {\n\t\tcase <-ticker:\n\t\t\tpulseLight(c, dmx)\n\n\t\tcase <-d:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ pulsePump runs the pump for the duration specified in the configuration.\nfunc pulsePump(c Configuration) {\n\tlog.Printf(\"INFO: Pump on\")\n\tembd.DigitalWrite(c.GPIOPinPump, embd.High)\n\n\ttime.Sleep(time.Millisecond * time.Duration(c.PumpDuration))\n\n\tlog.Printf(\"INFO: Pump Off\")\n\tembd.DigitalWrite(c.GPIOPinPump, embd.Low)\n}\n\n\/\/ enablePump switches the relay on for the water pump after DeltaTPump milliseconds have expired\n\/\/ in the configuration. Pump remains on till being notified to stop on d.\nfunc enablePump(c Configuration, d chan bool) {\n\tdt := time.NewTimer(time.Millisecond * time.Duration(c.DeltaTPump)).C\n\tvar ticker <-chan time.Time\n\n\tfor {\n\t\tselect {\n\t\tcase <-dt:\n\t\t\tpulsePump(c)\n\t\t\tticker = time.NewTicker(time.Millisecond * time.Duration(c.PumpInterval)).C\n\n\t\tcase <-ticker:\n\t\t\tpulsePump(c)\n\n\t\tcase <-d:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ enableFan switches the relay on for the fan after DeltaTFan milliseconds have expired\n\/\/ in the configuration. Fan remains on till being notified to stop on d.\nfunc enableFan(c Configuration, d chan bool) {\n\tdt := time.NewTimer(time.Millisecond * time.Duration(c.DeltaTFan)).C\n\n\tfor {\n\t\tselect {\n\t\tcase <-dt:\n\t\t\tlog.Printf(\"INFO: Fan On\")\n\t\t\tembd.DigitalWrite(c.GPIOPinFan, embd.High)\n\n\t\tcase <-d:\n\t\t\t\/\/ Wait for the fan duration to clear the smoke chamber.\n\t\t\tft := time.NewTimer(time.Millisecond * time.Duration(c.FanDuration)).C\n\t\t\t<-ft\n\t\t\tlog.Printf(\"INFO: Fan Off\")\n\t\t\tembd.DigitalWrite(c.GPIOPinFan, embd.Low)\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ puffSmoke enables the smoke machine via the supplied DMX connection 'dmx' for a period of\n\/\/ time and intentsity supplied in configuration.\nfunc puffSmoke(c Configuration, dmx *dmx.DMX) {\n\tlog.Printf(\"INFO: Smoke on\")\n\tdmx.SetChannel(1, byte(c.SmokeVolume))\n\tdmx.Render()\n\n\ttime.Sleep(time.Millisecond * time.Duration(c.SmokeDuration))\n\n\tlog.Printf(\"INFO: Smoke off\")\n\tdmx.SetChannel(1, 0)\n\tdmx.Render()\n}\n\n\/\/ enableSmoke enages the DMX smoke machine by the SmokeVolume amount in the configuration.\n\/\/ Smoke Machine remains on till being notified to stop on d.\nfunc enableSmoke(c Configuration, d chan bool, dmx *dmx.DMX) {\n\tdt := time.NewTimer(time.Millisecond * time.Duration(c.DeltaTSmoke)).C\n\tvar ticker <-chan time.Time\n\n\tfor {\n\t\tselect {\n\t\tcase <-dt:\n\t\t\tpuffSmoke(c, dmx)\n\t\t\tticker = time.NewTicker(time.Millisecond * time.Duration(c.SmokeInterval)).C\n\n\t\tcase <-ticker:\n\t\t\tpuffSmoke(c, dmx)\n\n\t\tcase <-d:\n\t\t\treturn\n\t\t}\n\t}\n}\n<commit_msg>milliseconds to nanoseconds.<commit_after>\/*\n * Copyright (c) Clinton Freeman 2016\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and\n * associated documentation files (the \"Software\"), to deal in the Software without restriction,\n * including without limitation the rights to use, copy, modify, merge, publish, distribute,\n * sublicense, and\/or sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in all copies or\n * substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT\n * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n *\/\n\npackage main\n\nimport (\n\t\"github.com\/akualab\/dmx\"\n\t\"github.com\/kidoman\/embd\"\n\t_ \"github.com\/kidoman\/embd\/host\/all\"\n\t\"log\"\n\t\"time\"\n)\n\n\/\/ WeatherMachine holds connections to everything we need to manipulate the installation.\ntype WeatherMachine struct {\n\tstop chan bool \/\/ Channel for stopping the control elements of the installation.\n\tdmx *dmx.DMX \/\/ The DMX connection for writting messages to the Smoke machine and lights.\n\tconfig Configuration \/\/ The configuration element for the installation.\n\tlastRun time.Time \/\/ The last time the installation was run.\n}\n\n\/\/ ****************************************************************************\n\/\/ ****************************************************************************\n\/\/ Functions for manipulating the installation state; idle, warmup and running.\n\/\/ ****************************************************************************\n\/\/ ****************************************************************************\n\n\/\/ stateFunctions are used to manipulate the WeatherMachine through the various states.\ntype stateFn func(state *WeatherMachine, msg HRMsg) stateFn\n\n\/\/ idle is the state the weathermachine enters when sitting alone, with no one interacting with it.\nfunc idle(state *WeatherMachine, msg HRMsg) (sF stateFn) {\n\tif msg.Contact {\n\t\tlog.Printf(\"INFO: entering warmup\")\n\t\tenableLight(state.config.S1Beat, state.config, state.dmx)\n\t\tgo enablePump(state.config, state.stop)\n\n\t\treturn warmup \/\/ skin contact has been made, enable light and enter warmup.\n\t}\n\n\treturn idle \/\/ remain idle.\n}\n\n\/\/ warmup is the state the weathermachine enters when someone first touches it.\nfunc warmup(state *WeatherMachine, msg HRMsg) stateFn {\n\tif msg.Contact && msg.HeartRate > 0 {\n\t\t\/\/ Wait for the fog to clear from the last run before running again.\n\t\tlog.Printf(\"INFO: Waiting for fog to clear\")\n\t\td := (int64(state.config.FanDuration)*1000000) - time.Since(state.lastRun).Nanoseconds()\n\t\ttime.Sleep(time.Nanosecond * time.Duration(d))\n\n\t\tlog.Printf(\"INFO: entering running\")\n\t\tgo enableLightPulse(state.config, msg.HeartRate, state.stop, state.dmx)\n\t\tgo enableSmoke(state.config, state.stop, state.dmx)\n\t\tgo enableFan(state.config, state.stop)\n\n\t\treturn running \/\/ skin contact and heart rate recieved, start the installation.\n\t} else if !msg.Contact {\n\t\tstate.stop <- true \/\/ Pump starts at initial contact. If we lost contact between\n\t\t\/\/ then and now we need to shut it down.\n\t\tstate.lastRun = time.Now()\n\n\t\tdisableLight(state.config, state.dmx)\n\n\t\tlog.Printf(\"INFO: entering idle\")\n\t\treturn idle \/\/ skin contact lost. Return to idle.\n\t}\n\n\treturn warmup\n}\n\n\/\/ running is the state the weathermachine enters when someone is engaging with it.\nfunc running(state *WeatherMachine, msg HRMsg) stateFn {\n\tif !msg.Contact {\n\t\tstate.stop <- true\n\t\tstate.stop <- true\n\t\tstate.stop <- true\n\t\tstate.stop <- true\n\n\t\tlog.Printf(\"INFO: entering idle\")\n\t\treturn idle \/\/ skin contact lost. Return to idle.\n\t}\n\n\treturn running \/\/ Keep the installation running.\n}\n\n\/\/ ****************************************************************************\n\/\/ ****************************************************************************\n\/\/ Functions for manipulating the physical installation; lights, smoke and fan.\n\/\/ ****************************************************************************\n\/\/ ****************************************************************************\n\n\/\/ enableLight turns on the light via the supplied DMX connection 'dmx' with the supplied colour 'l'.\nfunc enableLight(l LightColour, c Configuration, dmx *dmx.DMX) {\n\tlog.Printf(\"INFO: Light on\")\n\tembd.DigitalWrite(c.GPIOPinLight, embd.High)\n\tdmx.SetChannel(4, byte(l.Red))\n\tdmx.SetChannel(5, byte(l.Green))\n\tdmx.SetChannel(6, byte(l.Blue))\n\tdmx.SetChannel(7, byte(l.Amber))\n\tdmx.SetChannel(8, byte(l.Dimmer))\n\tdmx.Render()\n}\n\n\/\/ disableLight turns off the light via the supplied DMX connection 'dmx'.\nfunc disableLight(c Configuration, dmx *dmx.DMX) {\n\tlog.Printf(\"INFO: Light off\")\n\tembd.DigitalWrite(c.GPIOPinLight, embd.Low)\n\tdmx.SetChannel(4, 0)\n\tdmx.SetChannel(5, 0)\n\tdmx.SetChannel(6, 0)\n\tdmx.SetChannel(7, 0)\n\tdmx.SetChannel(8, 0)\n\tdmx.Render()\n}\n\n\/\/ pulseLight pulses the light for a fixed duration.\nfunc pulseLight(c Configuration, dmx *dmx.DMX) {\n\tenableLight(c.S1Beat, c, dmx)\n\ttime.Sleep(time.Millisecond * time.Duration(c.S1Duration))\n\tdisableLight(c, dmx)\n\n\ttime.Sleep(time.Millisecond * time.Duration(c.S1Pause))\n\n\tenableLight(c.S2Beat, c, dmx)\n\ttime.Sleep(time.Millisecond * time.Duration(c.S2Duration))\n\tdisableLight(c, dmx)\n}\n\n\/\/ enableLightPulse starts the light pulsing by the frequency defined by hr. The light remains\n\/\/ pulsing till being notified to stop on d.\nfunc enableLightPulse(c Configuration, hr int, d chan bool, dmx *dmx.DMX) {\n\t\/\/ Perform the first heart beat straight away.\n\tpulseLight(c, dmx)\n\n\tdt := int((60000.0 \/ float32(hr)) * c.BeatRate)\n\tticker := time.NewTicker(time.Millisecond * time.Duration(dt)).C\n\n\t\/\/ Sharp fixed length, pulse of light with variable off gap depending on HR.\n\tfor {\n\t\tselect {\n\t\tcase <-ticker:\n\t\t\tpulseLight(c, dmx)\n\n\t\tcase <-d:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ pulsePump runs the pump for the duration specified in the configuration.\nfunc pulsePump(c Configuration) {\n\tlog.Printf(\"INFO: Pump on\")\n\tembd.DigitalWrite(c.GPIOPinPump, embd.High)\n\n\ttime.Sleep(time.Millisecond * time.Duration(c.PumpDuration))\n\n\tlog.Printf(\"INFO: Pump Off\")\n\tembd.DigitalWrite(c.GPIOPinPump, embd.Low)\n}\n\n\/\/ enablePump switches the relay on for the water pump after DeltaTPump milliseconds have expired\n\/\/ in the configuration. Pump remains on till being notified to stop on d.\nfunc enablePump(c Configuration, d chan bool) {\n\tdt := time.NewTimer(time.Millisecond * time.Duration(c.DeltaTPump)).C\n\tvar ticker <-chan time.Time\n\n\tfor {\n\t\tselect {\n\t\tcase <-dt:\n\t\t\tpulsePump(c)\n\t\t\tticker = time.NewTicker(time.Millisecond * time.Duration(c.PumpInterval)).C\n\n\t\tcase <-ticker:\n\t\t\tpulsePump(c)\n\n\t\tcase <-d:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ enableFan switches the relay on for the fan after DeltaTFan milliseconds have expired\n\/\/ in the configuration. Fan remains on till being notified to stop on d.\nfunc enableFan(c Configuration, d chan bool) {\n\tdt := time.NewTimer(time.Millisecond * time.Duration(c.DeltaTFan)).C\n\n\tfor {\n\t\tselect {\n\t\tcase <-dt:\n\t\t\tlog.Printf(\"INFO: Fan On\")\n\t\t\tembd.DigitalWrite(c.GPIOPinFan, embd.High)\n\n\t\tcase <-d:\n\t\t\t\/\/ Wait for the fan duration to clear the smoke chamber.\n\t\t\tft := time.NewTimer(time.Millisecond * time.Duration(c.FanDuration)).C\n\t\t\t<-ft\n\t\t\tlog.Printf(\"INFO: Fan Off\")\n\t\t\tembd.DigitalWrite(c.GPIOPinFan, embd.Low)\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ puffSmoke enables the smoke machine via the supplied DMX connection 'dmx' for a period of\n\/\/ time and intentsity supplied in configuration.\nfunc puffSmoke(c Configuration, dmx *dmx.DMX) {\n\tlog.Printf(\"INFO: Smoke on\")\n\tdmx.SetChannel(1, byte(c.SmokeVolume))\n\tdmx.Render()\n\n\ttime.Sleep(time.Millisecond * time.Duration(c.SmokeDuration))\n\n\tlog.Printf(\"INFO: Smoke off\")\n\tdmx.SetChannel(1, 0)\n\tdmx.Render()\n}\n\n\/\/ enableSmoke enages the DMX smoke machine by the SmokeVolume amount in the configuration.\n\/\/ Smoke Machine remains on till being notified to stop on d.\nfunc enableSmoke(c Configuration, d chan bool, dmx *dmx.DMX) {\n\tdt := time.NewTimer(time.Millisecond * time.Duration(c.DeltaTSmoke)).C\n\tvar ticker <-chan time.Time\n\n\tfor {\n\t\tselect {\n\t\tcase <-dt:\n\t\t\tpuffSmoke(c, dmx)\n\t\t\tticker = time.NewTicker(time.Millisecond * time.Duration(c.SmokeInterval)).C\n\n\t\tcase <-ticker:\n\t\t\tpuffSmoke(c, dmx)\n\n\t\tcase <-d:\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 NDP Systèmes. All Rights Reserved.\n\/\/ See LICENSE file for full licensing details.\n\npackage templates\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/hexya-erp\/hexya\/hexya\/tools\/xmlutils\"\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n)\n\nvar tmplDef1 = `\n<template id=\"my_id\" page=\"True\">\n\t<div>\n\t\t<span t-foreach=\"lines\" t-as=\"line\">\n\t\t\t<h1 t-esc=\"line.UserName\"\/>\n\t\t\t<label for=\"Age\"\/>\n\t\t\t<p>Hello World<\/p>\n\t\t<\/span>\n\t<\/div>\n<\/template>\n`\n\nvar tmplDef2 = `\n<template id=\"my_other_id\" priority=\"12\" optional=\"enabled\">\n\t<div>\n\t\t<h1>Name<\/h1>\n\t\t<div name=\"position_info\">\n\t\t\t<t t-esc=\"Function\"\/>\n\t\t<\/div>\n\t\t<div name=\"contact_data\">\n\t\t\t<t t-esc=\"Email\"\/>\n\t\t<\/div>\n\t<\/div>\n<\/template>\n`\n\nvar tmplDef3 = `\n<template inherit_id=\"my_other_id\">\n\t<div name=\"position_info\" position=\"inside\">\n\t\t<t t-esc=\"CompanyName\"\/>\n\t<\/div>\n\t<xpath expr=\"\/\/t[@t-esc='Email']\" position=\"after\">\n\t\t<t t-esc=\"Phone\"\/>\n\t<\/xpath>\n<\/template>\n`\n\nvar tmplDef4 = `\n<template inherit_id=\"my_other_id\">\n\t<div name=\"contact_data\" position=\"before\">\n\t\t<div>\n\t\t\t<t t-esc=\"Address\"\/>\n\t\t<\/div>\n\t\t<hr\/>\n\t<\/div>\n\t<h1 position=\"replace\">\n\t\t<h2><t t-esc=\"Name\"\/><\/h2>\n\t<\/h1>\n<\/template>\n`\n\nvar tmplDef5 = `\n<template inherit_id=\"my_other_id\">\n\t<xpath expr=\"\/\/t[@t-esc='Address']\/..\" position=\"attributes\">\n\t\t<attribute name=\"name\">address<\/attribute>\n\t\t<attribute name=\"string\">Address<\/attribute>\n\t<\/xpath>\n<\/template>\n`\n\nvar tmplDef8 = `\n<template inherit_id=\"my_other_id\" id=\"new_base_view\">\n\t<xpath expr=\"\/\/t[@t-esc='Email']\" position=\"after\">\n\t\t<t t-raw=\"Fax\"\/>\n\t<\/xpath>\n<\/template>\n`\n\nvar tmplDef9 = `\n<template inherit_id=\"new_base_view\">\n\t<xpath expr=\"\/\/t[@t-raw='Fax']\" position=\"before\">\n\t\t<t t-raw=\"Mobile\"\/>\n\t<\/xpath>\n<\/template>\n`\n\nfunc loadView(xml string) {\n\telt, err := xmlutils.XMLToElement(xml)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tLoadFromEtree(elt)\n}\n\nfunc TestViews(t *testing.T) {\n\tConvey(\"Creating Template 1\", t, func() {\n\t\tRegistry.collection = newCollection()\n\t\tloadView(tmplDef1)\n\t\tBootStrap()\n\t\tSo(len(Registry.collection.templates), ShouldEqual, 1)\n\t\tSo(Registry.collection.GetByID(\"my_id\"), ShouldNotBeNil)\n\t\ttemplate := Registry.collection.GetByID(\"my_id\")\n\t\tSo(template.ID, ShouldEqual, \"my_id\")\n\t\tSo(template.Page, ShouldEqual, true)\n\t\tSo(template.Optional, ShouldEqual, false)\n\t\tSo(template.OptionalDefault, ShouldEqual, false)\n\t\tSo(template.Priority, ShouldEqual, 16)\n\t\tSo(string(template.p2Content), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}\n\t<div>\n\t\t{% for line in lines %}<span>\n\t\t\t<h1>{{ line.UserName }}<\/h1>\n\t\t\t<label for=\"Age\"\/>\n\t\t\t<p>Hello World<\/p>\n\t\t<\/span>{% endfor %}\n\t<\/div>\n`)\n\t})\n\tConvey(\"Creating Template 2\", t, func() {\n\t\tRegistry.collection = newCollection()\n\t\tloadView(tmplDef1)\n\t\tloadView(tmplDef2)\n\t\tBootStrap()\n\t\tSo(len(Registry.collection.templates), ShouldEqual, 2)\n\t\tSo(Registry.collection.GetByID(\"my_other_id\"), ShouldNotBeNil)\n\t\ttemplate := Registry.collection.GetByID(\"my_other_id\")\n\t\tSo(template.ID, ShouldEqual, \"my_other_id\")\n\t\tSo(template.Page, ShouldEqual, false)\n\t\tSo(template.Optional, ShouldEqual, true)\n\t\tSo(template.OptionalDefault, ShouldEqual, true)\n\t\tSo(template.Priority, ShouldEqual, 12)\n\t\tSo(string(template.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}\n\t<div>\n\t\t<h1>Name<\/h1>\n\t\t<div name=\"position_info\">\n\t\t\t{{ Function }}\n\t\t<\/div>\n\t\t<div name=\"contact_data\">\n\t\t\t{{ Email }}\n\t\t<\/div>\n\t<\/div>\n`)\n\t})\n\tConvey(\"Inheriting Template 2\", t, func() {\n\t\tRegistry.collection = newCollection()\n\t\tloadView(tmplDef1)\n\t\tloadView(tmplDef2)\n\t\tloadView(tmplDef3)\n\t\tBootStrap()\n\t\tSo(len(Registry.collection.templates), ShouldEqual, 2)\n\t\tSo(Registry.collection.GetByID(\"my_id\"), ShouldNotBeNil)\n\t\tSo(Registry.collection.GetByID(\"my_other_id\"), ShouldNotBeNil)\n\t\ttemplate1 := Registry.collection.GetByID(\"my_id\")\n\t\tSo(string(template1.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}\n\t<div>\n\t\t{% for line in lines %}<span>\n\t\t\t<h1>{{ line.UserName }}<\/h1>\n\t\t\t<label for=\"Age\"\/>\n\t\t\t<p>Hello World<\/p>\n\t\t<\/span>{% endfor %}\n\t<\/div>\n`)\n\t\ttemplate2 := Registry.collection.GetByID(\"my_other_id\")\n\t\tSo(string(template2.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}<div>\n\t\t<h1>Name<\/h1>\n\t\t<div name=\"position_info\">\n\t\t\t{{ Function }}\n\t\t{{ CompanyName }}<\/div>\n\t\t<div name=\"contact_data\">\n\t\t\t{{ Email }}{{ Phone }}\n\t\t<\/div>\n\t<\/div>`)\n\t})\n\n\tConvey(\"More inheritance on Template 2\", t, func() {\n\t\tRegistry.collection = newCollection()\n\t\tloadView(tmplDef1)\n\t\tloadView(tmplDef2)\n\t\tloadView(tmplDef3)\n\t\tloadView(tmplDef4)\n\t\tBootStrap()\n\t\tSo(len(Registry.collection.templates), ShouldEqual, 2)\n\t\tSo(Registry.collection.GetByID(\"my_id\"), ShouldNotBeNil)\n\t\tSo(Registry.collection.GetByID(\"my_other_id\"), ShouldNotBeNil)\n\t\ttemplate2 := Registry.collection.GetByID(\"my_other_id\")\n\t\tSo(string(template2.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}<div>\n\t\t<h2>{{ Name }}<\/h2>\n\t\t<div name=\"position_info\">\n\t\t\t{{ Function }}\n\t\t{{ CompanyName }}<\/div>\n\t\t<div>\n\t\t\t{{ Address }}\n\t\t<\/div><hr\/><div name=\"contact_data\">\n\t\t\t{{ Email }}{{ Phone }}\n\t\t<\/div>\n\t<\/div>`)\n\n\t})\n\tConvey(\"Modifying inherited modifications on Template 2\", t, func() {\n\t\tRegistry.collection = newCollection()\n\t\tloadView(tmplDef1)\n\t\tloadView(tmplDef2)\n\t\tloadView(tmplDef3)\n\t\tloadView(tmplDef4)\n\t\tloadView(tmplDef5)\n\t\tBootStrap()\n\t\tSo(len(Registry.collection.templates), ShouldEqual, 2)\n\t\tSo(Registry.collection.GetByID(\"my_id\"), ShouldNotBeNil)\n\t\tSo(Registry.collection.GetByID(\"my_other_id\"), ShouldNotBeNil)\n\t\ttemplate2 := Registry.collection.GetByID(\"my_other_id\")\n\t\tSo(string(template2.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}<div>\n\t\t<h2>{{ Name }}<\/h2>\n\t\t<div name=\"position_info\">\n\t\t\t{{ Function }}\n\t\t{{ CompanyName }}<\/div>\n\t\t<div name=\"address\" string=\"Address\">\n\t\t\t{{ Address }}\n\t\t<\/div><hr\/><div name=\"contact_data\">\n\t\t\t{{ Email }}{{ Phone }}\n\t\t<\/div>\n\t<\/div>`)\n\t})\n\tConvey(\"Create new base template from inheritance\", t, func() {\n\t\tRegistry.collection = newCollection()\n\t\tloadView(tmplDef1)\n\t\tloadView(tmplDef2)\n\t\tloadView(tmplDef3)\n\t\tloadView(tmplDef4)\n\t\tloadView(tmplDef5)\n\t\tloadView(tmplDef8)\n\t\tBootStrap()\n\t\tSo(Registry.collection.GetByID(\"my_other_id\"), ShouldNotBeNil)\n\t\tSo(Registry.collection.GetByID(\"new_base_view\"), ShouldNotBeNil)\n\t\ttemplate2 := Registry.collection.GetByID(\"my_other_id\")\n\t\tnewTemplate := Registry.collection.GetByID(\"new_base_view\")\n\t\tSo(string(template2.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}<div>\n\t\t<h2>{{ Name }}<\/h2>\n\t\t<div name=\"position_info\">\n\t\t\t{{ Function }}\n\t\t{{ CompanyName }}<\/div>\n\t\t<div name=\"address\" string=\"Address\">\n\t\t\t{{ Address }}\n\t\t<\/div><hr\/><div name=\"contact_data\">\n\t\t\t{{ Email }}{{ Phone }}\n\t\t<\/div>\n\t<\/div>`)\n\t\tSo(string(newTemplate.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}<div>\n\t\t<h2>{{ Name }}<\/h2>\n\t\t<div name=\"position_info\">\n\t\t\t{{ Function }}\n\t\t{{ CompanyName }}<\/div>\n\t\t<div name=\"address\" string=\"Address\">\n\t\t\t{{ Address }}\n\t\t<\/div><hr\/><div name=\"contact_data\">\n\t\t\t{{ Email }}{{ Fax|safe }}{{ Phone }}\n\t\t<\/div>\n\t<\/div>`)\n\t})\n\tConvey(\"Inheriting new base template from inheritance\", t, func() {\n\t\tRegistry.collection = newCollection()\n\t\tloadView(tmplDef1)\n\t\tloadView(tmplDef2)\n\t\tloadView(tmplDef3)\n\t\tloadView(tmplDef4)\n\t\tloadView(tmplDef5)\n\t\tloadView(tmplDef8)\n\t\tloadView(tmplDef9)\n\t\tBootStrap()\n\t\tSo(Registry.collection.GetByID(\"my_other_id\"), ShouldNotBeNil)\n\t\tSo(Registry.collection.GetByID(\"new_base_view\"), ShouldNotBeNil)\n\t\ttemplate2 := Registry.collection.GetByID(\"my_other_id\")\n\t\tnewTemplate := Registry.collection.GetByID(\"new_base_view\")\n\t\tSo(string(template2.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}<div>\n\t\t<h2>{{ Name }}<\/h2>\n\t\t<div name=\"position_info\">\n\t\t\t{{ Function }}\n\t\t{{ CompanyName }}<\/div>\n\t\t<div name=\"address\" string=\"Address\">\n\t\t\t{{ Address }}\n\t\t<\/div><hr\/><div name=\"contact_data\">\n\t\t\t{{ Email }}{{ Phone }}\n\t\t<\/div>\n\t<\/div>`)\n\t\tSo(string(newTemplate.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}<div>\n\t\t<h2>{{ Name }}<\/h2>\n\t\t<div name=\"position_info\">\n\t\t\t{{ Function }}\n\t\t{{ CompanyName }}<\/div>\n\t\t<div name=\"address\" string=\"Address\">\n\t\t\t{{ Address }}\n\t\t<\/div><hr\/><div name=\"contact_data\">\n\t\t\t{{ Email }}{{ Mobile|safe }}{{ Fax|safe }}{{ Phone }}\n\t\t<\/div>\n\t<\/div>`)\n\t})\n}\n<commit_msg>Improved test coverage on templates<commit_after>\/\/ Copyright 2018 NDP Systèmes. All Rights Reserved.\n\/\/ See LICENSE file for full licensing details.\n\npackage templates\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/flosch\/pongo2\"\n\t\"github.com\/hexya-erp\/hexya\/hexya\/i18n\"\n\t\"github.com\/hexya-erp\/hexya\/hexya\/tools\/xmlutils\"\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n)\n\nvar tmplDef1 = `\n<template id=\"my_id\" page=\"True\">\n\t<div>\n\t\t<span t-foreach=\"lines\" t-as=\"line\">\n\t\t\t<h1 t-esc=\"line.UserName\"\/>\n\t\t\t<label for=\"Age\"\/>\n\t\t\t<p>Hello World<\/p>\n\t\t<\/span>\n\t<\/div>\n<\/template>\n`\n\nvar tmplDef2 = `\n<template id=\"my_other_id\" priority=\"12\" optional=\"enabled\">\n\t<div>\n\t\t<h1>Name<\/h1>\n\t\t<div name=\"position_info\">\n\t\t\t<t t-esc=\"Function\"\/>\n\t\t<\/div>\n\t\t<div name=\"contact_data\">\n\t\t\t<t t-esc=\"Email\"\/>\n\t\t<\/div>\n\t<\/div>\n<\/template>\n`\n\nvar tmplDef3 = `\n<template inherit_id=\"my_other_id\">\n\t<div name=\"position_info\" position=\"inside\">\n\t\t<t t-esc=\"CompanyName\"\/>\n\t<\/div>\n\t<xpath expr=\"\/\/t[@t-esc='Email']\" position=\"after\">\n\t\t<t t-esc=\"Phone\"\/>\n\t<\/xpath>\n<\/template>\n`\n\nvar tmplDef4 = `\n<template inherit_id=\"my_other_id\">\n\t<div name=\"contact_data\" position=\"before\">\n\t\t<div>\n\t\t\t<t t-esc=\"Address\"\/>\n\t\t<\/div>\n\t\t<hr\/>\n\t<\/div>\n\t<h1 position=\"replace\">\n\t\t<h2><t t-esc=\"Name\"\/><\/h2>\n\t<\/h1>\n<\/template>\n`\n\nvar tmplDef5 = `\n<template inherit_id=\"my_other_id\">\n\t<xpath expr=\"\/\/t[@t-esc='Address']\/..\" position=\"attributes\">\n\t\t<attribute name=\"name\">address<\/attribute>\n\t\t<attribute name=\"string\">Address<\/attribute>\n\t<\/xpath>\n<\/template>\n`\n\nvar tmplDef8 = `\n<template inherit_id=\"my_other_id\" id=\"new_base_view\" priority=\"13\" optional=\"disabled\" page=\"True\">\n\t<xpath expr=\"\/\/t[@t-esc='Email']\" position=\"after\">\n\t\t<t t-raw=\"Fax\"\/>\n\t<\/xpath>\n<\/template>\n`\n\nvar tmplDef9 = `\n<template inherit_id=\"new_base_view\">\n\t<xpath expr=\"\/\/t[@t-raw='Fax']\" position=\"before\">\n\t\t<t t-raw=\"Mobile\"\/>\n\t<\/xpath>\n<\/template>\n`\n\nfunc loadView(xml string) {\n\telt, err := xmlutils.XMLToElement(xml)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tLoadFromEtree(elt)\n}\n\nfunc TestViews(t *testing.T) {\n\tConvey(\"Setting two languages\", t, func() {\n\t\ti18n.Langs = []string{\"fr\", \"de\"}\n\t})\n\tConvey(\"Creating Template 1\", t, func() {\n\t\tloadView(tmplDef1)\n\t\tBootStrap()\n\t\tSo(len(Registry.collection.templates), ShouldEqual, 1)\n\t\tSo(Registry.collection.GetByID(\"my_id\"), ShouldNotBeNil)\n\t\ttemplate := Registry.collection.GetByID(\"my_id\")\n\t\tSo(template.ID, ShouldEqual, \"my_id\")\n\t\tSo(template.Page, ShouldEqual, true)\n\t\tSo(template.Optional, ShouldEqual, false)\n\t\tSo(template.OptionalDefault, ShouldEqual, false)\n\t\tSo(template.Priority, ShouldEqual, 16)\n\t\tSo(string(template.p2Content), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}\n\t<div>\n\t\t{% for line in lines %}<span>\n\t\t\t<h1>{{ line.UserName }}<\/h1>\n\t\t\t<label for=\"Age\"\/>\n\t\t\t<p>Hello World<\/p>\n\t\t<\/span>{% endfor %}\n\t<\/div>\n`)\n\t})\n\tConvey(\"Creating Template 2\", t, func() {\n\t\tloadView(tmplDef1)\n\t\tloadView(tmplDef2)\n\t\tBootStrap()\n\t\tSo(len(Registry.collection.templates), ShouldEqual, 2)\n\t\tSo(Registry.collection.GetByID(\"my_other_id\"), ShouldNotBeNil)\n\t\ttemplate := Registry.collection.GetByID(\"my_other_id\")\n\t\tSo(template.ID, ShouldEqual, \"my_other_id\")\n\t\tSo(template.Page, ShouldEqual, false)\n\t\tSo(template.Optional, ShouldEqual, true)\n\t\tSo(template.OptionalDefault, ShouldEqual, true)\n\t\tSo(template.Priority, ShouldEqual, 12)\n\t\tSo(string(template.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}\n\t<div>\n\t\t<h1>Name<\/h1>\n\t\t<div name=\"position_info\">\n\t\t\t{{ Function }}\n\t\t<\/div>\n\t\t<div name=\"contact_data\">\n\t\t\t{{ Email }}\n\t\t<\/div>\n\t<\/div>\n`)\n\t})\n\tConvey(\"Inheriting Template 2\", t, func() {\n\t\tloadView(tmplDef1)\n\t\tloadView(tmplDef2)\n\t\tloadView(tmplDef3)\n\t\tBootStrap()\n\t\tSo(len(Registry.collection.templates), ShouldEqual, 2)\n\t\tSo(Registry.collection.GetByID(\"my_id\"), ShouldNotBeNil)\n\t\tSo(Registry.collection.GetByID(\"my_other_id\"), ShouldNotBeNil)\n\t\ttemplate1 := Registry.collection.GetByID(\"my_id\")\n\t\tSo(string(template1.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}\n\t<div>\n\t\t{% for line in lines %}<span>\n\t\t\t<h1>{{ line.UserName }}<\/h1>\n\t\t\t<label for=\"Age\"\/>\n\t\t\t<p>Hello World<\/p>\n\t\t<\/span>{% endfor %}\n\t<\/div>\n`)\n\t\ttemplate2 := Registry.collection.GetByID(\"my_other_id\")\n\t\tSo(string(template2.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}<div>\n\t\t<h1>Name<\/h1>\n\t\t<div name=\"position_info\">\n\t\t\t{{ Function }}\n\t\t{{ CompanyName }}<\/div>\n\t\t<div name=\"contact_data\">\n\t\t\t{{ Email }}{{ Phone }}\n\t\t<\/div>\n\t<\/div>`)\n\t})\n\n\tConvey(\"More inheritance on Template 2\", t, func() {\n\t\tloadView(tmplDef1)\n\t\tloadView(tmplDef2)\n\t\tloadView(tmplDef3)\n\t\tloadView(tmplDef4)\n\t\tBootStrap()\n\t\tSo(len(Registry.collection.templates), ShouldEqual, 2)\n\t\tSo(Registry.collection.GetByID(\"my_id\"), ShouldNotBeNil)\n\t\tSo(Registry.collection.GetByID(\"my_other_id\"), ShouldNotBeNil)\n\t\ttemplate2 := Registry.collection.GetByID(\"my_other_id\")\n\t\tSo(string(template2.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}<div>\n\t\t<h2>{{ Name }}<\/h2>\n\t\t<div name=\"position_info\">\n\t\t\t{{ Function }}\n\t\t{{ CompanyName }}<\/div>\n\t\t<div>\n\t\t\t{{ Address }}\n\t\t<\/div><hr\/><div name=\"contact_data\">\n\t\t\t{{ Email }}{{ Phone }}\n\t\t<\/div>\n\t<\/div>`)\n\n\t})\n\tConvey(\"Modifying inherited modifications on Template 2\", t, func() {\n\t\tloadView(tmplDef1)\n\t\tloadView(tmplDef2)\n\t\tloadView(tmplDef3)\n\t\tloadView(tmplDef4)\n\t\tloadView(tmplDef5)\n\t\tBootStrap()\n\t\tSo(len(Registry.collection.templates), ShouldEqual, 2)\n\t\tSo(Registry.collection.GetByID(\"my_id\"), ShouldNotBeNil)\n\t\tSo(Registry.collection.GetByID(\"my_other_id\"), ShouldNotBeNil)\n\t\ttemplate2 := Registry.collection.GetByID(\"my_other_id\")\n\t\tSo(string(template2.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}<div>\n\t\t<h2>{{ Name }}<\/h2>\n\t\t<div name=\"position_info\">\n\t\t\t{{ Function }}\n\t\t{{ CompanyName }}<\/div>\n\t\t<div name=\"address\" string=\"Address\">\n\t\t\t{{ Address }}\n\t\t<\/div><hr\/><div name=\"contact_data\">\n\t\t\t{{ Email }}{{ Phone }}\n\t\t<\/div>\n\t<\/div>`)\n\t})\n\tConvey(\"Create new base template from inheritance\", t, func() {\n\t\tloadView(tmplDef1)\n\t\tloadView(tmplDef2)\n\t\tloadView(tmplDef3)\n\t\tloadView(tmplDef4)\n\t\tloadView(tmplDef5)\n\t\tloadView(tmplDef8)\n\t\tBootStrap()\n\t\tSo(Registry.collection.GetByID(\"my_other_id\"), ShouldNotBeNil)\n\t\tSo(Registry.collection.GetByID(\"new_base_view\"), ShouldNotBeNil)\n\t\ttemplate2 := Registry.collection.GetByID(\"my_other_id\")\n\t\tnewTemplate := Registry.collection.GetByID(\"new_base_view\")\n\t\tSo(string(template2.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}<div>\n\t\t<h2>{{ Name }}<\/h2>\n\t\t<div name=\"position_info\">\n\t\t\t{{ Function }}\n\t\t{{ CompanyName }}<\/div>\n\t\t<div name=\"address\" string=\"Address\">\n\t\t\t{{ Address }}\n\t\t<\/div><hr\/><div name=\"contact_data\">\n\t\t\t{{ Email }}{{ Phone }}\n\t\t<\/div>\n\t<\/div>`)\n\t\tSo(template2.Priority, ShouldEqual, 12)\n\t\tSo(template2.Page, ShouldBeFalse)\n\t\tSo(template2.Optional, ShouldBeTrue)\n\t\tSo(template2.OptionalDefault, ShouldBeTrue)\n\t\tSo(newTemplate.Priority, ShouldEqual, 13)\n\t\tSo(newTemplate.Page, ShouldBeTrue)\n\t\tSo(newTemplate.Optional, ShouldBeTrue)\n\t\tSo(newTemplate.OptionalDefault, ShouldBeFalse)\n\t\tSo(string(newTemplate.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}<div>\n\t\t<h2>{{ Name }}<\/h2>\n\t\t<div name=\"position_info\">\n\t\t\t{{ Function }}\n\t\t{{ CompanyName }}<\/div>\n\t\t<div name=\"address\" string=\"Address\">\n\t\t\t{{ Address }}\n\t\t<\/div><hr\/><div name=\"contact_data\">\n\t\t\t{{ Email }}{{ Fax|safe }}{{ Phone }}\n\t\t<\/div>\n\t<\/div>`)\n\t})\n\tConvey(\"Inheriting new base template from inheritance\", t, func() {\n\t\tRegistry.collection = newCollection()\n\t\tloadView(tmplDef1)\n\t\tloadView(tmplDef2)\n\t\tloadView(tmplDef3)\n\t\tloadView(tmplDef4)\n\t\tloadView(tmplDef5)\n\t\tloadView(tmplDef8)\n\t\tloadView(tmplDef9)\n\t\tBootStrap()\n\t\tSo(Registry.collection.GetByID(\"my_other_id\"), ShouldNotBeNil)\n\t\tSo(Registry.collection.GetByID(\"new_base_view\"), ShouldNotBeNil)\n\t\ttemplate2 := Registry.collection.GetByID(\"my_other_id\")\n\t\tnewTemplate := Registry.collection.GetByID(\"new_base_view\")\n\t\tSo(string(template2.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}<div>\n\t\t<h2>{{ Name }}<\/h2>\n\t\t<div name=\"position_info\">\n\t\t\t{{ Function }}\n\t\t{{ CompanyName }}<\/div>\n\t\t<div name=\"address\" string=\"Address\">\n\t\t\t{{ Address }}\n\t\t<\/div><hr\/><div name=\"contact_data\">\n\t\t\t{{ Email }}{{ Phone }}\n\t\t<\/div>\n\t<\/div>`)\n\t\tSo(string(newTemplate.Content(\"\")), ShouldEqual,\n\t\t\t`{% set _1 = _0 %}<div>\n\t\t<h2>{{ Name }}<\/h2>\n\t\t<div name=\"position_info\">\n\t\t\t{{ Function }}\n\t\t{{ CompanyName }}<\/div>\n\t\t<div name=\"address\" string=\"Address\">\n\t\t\t{{ Address }}\n\t\t<\/div><hr\/><div name=\"contact_data\">\n\t\t\t{{ Email }}{{ Mobile|safe }}{{ Fax|safe }}{{ Phone }}\n\t\t<\/div>\n\t<\/div>`)\n\t})\n\tConvey(\"Testing gin loading\", t, func() {\n\t\tinst := Registry.Instance(\"my_id\", pongo2.Context{\n\t\t\t\"lines\": []map[string]string{\n\t\t\t\t{\"UserName\": \"jsmith\"},\n\t\t\t\t{\"UserName\": \"wsmith\"},\n\t\t\t}})\n\t\tw := httptest.NewRecorder()\n\t\tinst.Render(w)\n\t\tbody, _ := ioutil.ReadAll(w.Result().Body)\n\t\tSo(string(body), ShouldEqual, `\n\t<div>\n\t\t<span>\n\t\t\t<h1>jsmith<\/h1>\n\t\t\t<label for=\"Age\"\/>\n\t\t\t<p>Hello World<\/p>\n\t\t<\/span><span>\n\t\t\t<h1>wsmith<\/h1>\n\t\t\t<label for=\"Age\"\/>\n\t\t\t<p>Hello World<\/p>\n\t\t<\/span>\n\t<\/div>\n`)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage mungers\n\nimport (\n\t\"time\"\n\n\t\"k8s.io\/contrib\/mungegithub\/features\"\n\t\"k8s.io\/contrib\/mungegithub\/github\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\tvalidBranches = []string{\"master\"}\n)\n\n\/\/ PingCIMunger looks for situations CI (Travis | Shippable) has flaked for some\n\/\/ reason and we want to re-run them. Achieves this by closing and re-opening the pr\ntype PingCIMunger struct{}\n\nfunc init() {\n\tRegisterMungerOrDie(PingCIMunger{})\n}\n\n\/\/ Name is the name usable in --pr-mungers\nfunc (PingCIMunger) Name() string { return \"ping-ci\" }\n\n\/\/ RequiredFeatures is a slice of 'features' that must be provided\nfunc (PingCIMunger) RequiredFeatures() []string { return []string{} }\n\n\/\/ Initialize will initialize the munger\nfunc (PingCIMunger) Initialize(config *github.Config, features *features.Features) error { return nil }\n\n\/\/ EachLoop is called at the start of every munge loop\nfunc (PingCIMunger) EachLoop() error { return nil }\n\n\/\/ AddFlags will add any request flags to the cobra `cmd`\nfunc (PingCIMunger) AddFlags(cmd *cobra.Command, config *github.Config) {}\n\n\/\/ Munge is the workhorse the will actually make updates to the PR\nfunc (PingCIMunger) Munge(obj *github.MungeObject) {\n\tif !obj.IsPR() {\n\t\treturn\n\t}\n\n\t\/\/ This munger only runs on certain branches, since travis\/CI only listens\n\t\/\/ on certain branches\n\tvalidBranch := false\n\tfor _, b := range validBranches {\n\t\tif obj.IsForBranch(b) {\n\t\t\tvalidBranch = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !validBranch {\n\t\treturn\n\t}\n\n\tif !obj.HasLabel(lgtmLabel) {\n\t\treturn\n\t}\n\tmergeable, err := obj.IsMergeable()\n\tif err != nil {\n\t\tglog.V(2).Infof(\"ping CI skipping %d - problem determining mergeability\", *obj.Issue.Number)\n\t\treturn\n\t}\n\tif !mergeable {\n\t\tglog.V(2).Infof(\"ping CI skipping %d - not mergeable\", *obj.Issue.Number)\n\t\treturn\n\t}\n\tif state := obj.GetStatusState([]string{travisContext}); state == \"incomplete\" {\n\t\tmsg := \"Travis continuous integration appears to have missed, closing and re-opening to trigger it\"\n\t\tobj.WriteComment(msg)\n\n\t\tobj.ClosePR()\n\t\ttime.Sleep(5 * time.Second)\n\t\tobj.OpenPR(10)\n\t}\n}\n<commit_msg>Delete ping_ci - We don't use it any more<commit_after><|endoftext|>"} {"text":"<commit_before>package core\n\nimport (\n\t\"context\"\n\n\t\"v2ray.com\/core\/common\"\n)\n\n\/\/ CreateObject creates a new object based on the given V2Ray instance and config. The V2Ray instance may be nil.\nfunc CreateObject(v *Instance, config interface{}) (interface{}, error) {\n\tctx := context.Background()\n\tif v != nil {\n\t\tctx = context.WithValue(ctx, v2rayKey, v)\n\t}\n\treturn common.CreateObject(ctx, config)\n}\n\nfunc PrintDeprecatedFeatureWarning(feature string) {\n\tnewError(\"You are using a deprecated feature: \" + feature + \". Please update your config file with latest configuration format, or update your client software.\")\n}\n<commit_msg>write error to log<commit_after>package core\n\nimport (\n\t\"context\"\n\n\t\"v2ray.com\/core\/common\"\n)\n\n\/\/ CreateObject creates a new object based on the given V2Ray instance and config. The V2Ray instance may be nil.\nfunc CreateObject(v *Instance, config interface{}) (interface{}, error) {\n\tctx := context.Background()\n\tif v != nil {\n\t\tctx = context.WithValue(ctx, v2rayKey, v)\n\t}\n\treturn common.CreateObject(ctx, config)\n}\n\nfunc PrintDeprecatedFeatureWarning(feature string) {\n\tnewError(\"You are using a deprecated feature: \" + feature + \". Please update your config file with latest configuration format, or update your client software.\").WriteToLog()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\n\t_ \"github.com\/mattn\/go-sqlite3\"\n\n\t\"github.com\/biogo\/biogo\/feat\"\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/mnsmar\/htsdb\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nconst prog = \"htsdb-relative-pos-dist\"\nconst version = \"0.2\"\nconst descr = `Measure relative position distribution for reads in database 1\nagainst reads in database 2. For each possible relative position, print the\nnumber of read pairs with this relative positioning, the total number of\npossible pairs and the total number of reads in each database. Read relative\nposition is measured either 5'-5' or 3'-3'. Positive numbers indicate read 1\nis downstream of read 2. Provided SQL\nfilters will apply to all counts.`\n\nvar (\n\tapp = kingpin.New(prog, descr)\n\n\tdbFile1 = app.Flag(\"db1\", \"SQLite file for database 1.\").\n\t\tPlaceHolder(\"<file>\").Required().String()\n\ttab1 = app.Flag(\"table1\", \"Database table name for db1.\").\n\t\tDefault(\"sample\").String()\n\twhere1 = app.Flag(\"where1\", \"SQL filter injected in WHERE clause for db1.\").\n\t\tPlaceHolder(\"<SQL>\").String()\n\tdbFile2 = app.Flag(\"db2\", \"SQLite file for database 2.\").\n\t\tPlaceHolder(\"<file>\").Required().String()\n\ttab2 = app.Flag(\"table2\", \"Database table name for db2.\").\n\t\tDefault(\"sample\").String()\n\twhere2 = app.Flag(\"where2\", \"SQL filter injected in WHERE clause for db2.\").\n\t\tPlaceHolder(\"<SQL>\").String()\n\tfrom = app.Flag(\"pos\", \"Reference point for relative position measurement.\").\n\t\tRequired().PlaceHolder(\"<5p|3p>\").Enum(\"5p\", \"3p\")\n\tanti = app.Flag(\"anti\", \"Compare reads on opposite instead of same orientation.\").\n\t\tBool()\n\tspan = app.Flag(\"span\", \"Maximum distance between compared reads.\").\n\t\tDefault(\"100\").PlaceHolder(\"<int>\").Int()\n\tverbose = app.Flag(\"verbose\", \"Verbose mode.\").Short('v').Bool()\n)\n\nfunc main() {\n\tapp.HelpFlag.Short('h')\n\tapp.Version(version)\n\t_, err := app.Parse(os.Args[1:])\n\tif err != nil {\n\t\tkingpin.Fatalf(\"%s\", err)\n\t}\n\n\t\/\/ assemble sqlx select builders\n\treadsBuilder1 := htsdb.RangeBuilder.From(*tab1)\n\tcountBuilder1 := htsdb.CountBuilder.From(*tab1)\n\tif *where1 != \"\" {\n\t\treadsBuilder1 = readsBuilder1.Where(*where1)\n\t\tcountBuilder1 = countBuilder1.Where(*where1)\n\t}\n\treadsBuilder2 := htsdb.RangeBuilder.From(*tab2)\n\trefsBuilder2 := htsdb.ReferenceBuilder.From(*tab2)\n\tcountBuilder2 := htsdb.CountBuilder.From(*tab2)\n\tif *where2 != \"\" {\n\t\treadsBuilder2 = readsBuilder2.Where(*where2)\n\t\trefsBuilder2 = refsBuilder2.Where(*where2)\n\t\tcountBuilder2 = countBuilder2.Where(*where2)\n\t}\n\n\t\/\/ open database connections.\n\tvar db1, db2 *sqlx.DB\n\tif db1, err = sqlx.Connect(\"sqlite3\", *dbFile1); err != nil {\n\t\tpanic(err)\n\t}\n\tif db2, err = sqlx.Connect(\"sqlite3\", *dbFile2); err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ prepare statements.\n\tquery1, _, err := readsBuilder1.Where(\"strand = ? AND rname = ?\").ToSql()\n\tpanicOnError(err)\n\treadsStmt1, err := db1.Preparex(query1)\n\tpanicOnError(err)\n\tquery2, _, err := readsBuilder2.Where(\"strand = ? AND rname = ?\").ToSql()\n\tpanicOnError(err)\n\treadsStmt2, err := db2.Preparex(query2)\n\tpanicOnError(err)\n\n\t\/\/ select reference features\n\trefs, err := htsdb.SelectReferences(db2, refsBuilder2)\n\n\t\/\/ count records\n\tcountQuery1, _, err := countBuilder1.ToSql()\n\tpanicOnError(err)\n\tvar totalCount1 int\n\tif err = db1.Get(&totalCount1, countQuery1); err != nil {\n\t\tpanic(err)\n\t}\n\tcountQuery2, _, err := countBuilder2.ToSql()\n\tpanicOnError(err)\n\tvar totalCount2 int\n\tif err = db2.Get(&totalCount2, countQuery2); err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ get position extracting function\n\tgetPos := htsdb.Head\n\tif *from == \"3p\" {\n\t\tgetPos = htsdb.Tail\n\t}\n\n\t\/\/ count histogram around reference.\n\thists := make(chan map[int]uint)\n\tvar wg sync.WaitGroup\n\tfor _, ref := range refs {\n\t\tfor _, ori := range []feat.Orientation{feat.Forward, feat.Reverse} {\n\t\t\twg.Add(1)\n\t\t\tgo func(ori feat.Orientation, ref htsdb.Reference) {\n\t\t\t\tif *verbose == true {\n\t\t\t\t\tlog.Printf(\"orient:%s, chrom:%s\\n\", ori, ref.Chrom)\n\t\t\t\t}\n\t\t\t\tdefer wg.Done()\n\t\t\t\tvar r htsdb.Range\n\n\t\t\t\twig := make(map[int]uint)\n\t\t\t\tori1 := ori\n\t\t\t\tif *anti == true {\n\t\t\t\t\tori1 = -1 * ori1\n\t\t\t\t}\n\t\t\t\trows1, err := readsStmt1.Queryx(ori1, ref.Chrom)\n\t\t\t\tpanicOnError(err)\n\t\t\t\tfor rows1.Next() {\n\t\t\t\t\terr = rows1.StructScan(&r)\n\t\t\t\t\tpanicOnError(err)\n\t\t\t\t\tpos := getPos(&r, ori1)\n\t\t\t\t\twig[pos]++\n\t\t\t\t}\n\n\t\t\t\thist := make(map[int]uint)\n\t\t\t\trows2, err := readsStmt2.Queryx(ori, ref.Chrom)\n\t\t\t\tpanicOnError(err)\n\t\t\t\tfor rows2.Next() {\n\t\t\t\t\terr = rows2.StructScan(&r)\n\t\t\t\t\tpanicOnError(err)\n\t\t\t\t\tpos := getPos(&r, ori)\n\t\t\t\t\tfor relPos := -*span; relPos <= *span; relPos++ {\n\t\t\t\t\t\tif pos+relPos < 0 {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\thist[relPos*int(ori)] += wig[pos+relPos]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\thists <- hist\n\t\t\t}(ori, ref)\n\t\t}\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(hists)\n\n\t}()\n\n\t\/\/ aggregate histograms from goroutines\n\taggrHist := make(map[int]uint)\n\tfor hist := range hists {\n\t\tfor k, v := range hist {\n\t\t\taggrHist[k] += v\n\t\t}\n\t}\n\n\t\/\/ print results.\n\tfmt.Printf(\"pos\\tpairs\\treadCount1\\treadCount2\\n\")\n\tfor i := -*span; i <= *span; i++ {\n\t\tfmt.Printf(\"%d\\t%d\\t%d\\t%d\\n\", i, aggrHist[i], totalCount1, totalCount2)\n\t}\n}\n\nfunc panicOnError(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n<commit_msg>htsdb-relative-pos-distro: fix program name<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\n\t_ \"github.com\/mattn\/go-sqlite3\"\n\n\t\"github.com\/biogo\/biogo\/feat\"\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/mnsmar\/htsdb\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nconst prog = \"htsdb-relative-pos-distro\"\nconst version = \"0.2\"\nconst descr = `Measure relative position distribution for reads in database 1\nagainst reads in database 2. For each possible relative position, print the\nnumber of read pairs with this relative positioning, the total number of\npossible pairs and the total number of reads in each database. Read relative\nposition is measured either 5'-5' or 3'-3'. Positive numbers indicate read 1\nis downstream of read 2. Provided SQL\nfilters will apply to all counts.`\n\nvar (\n\tapp = kingpin.New(prog, descr)\n\n\tdbFile1 = app.Flag(\"db1\", \"SQLite file for database 1.\").\n\t\tPlaceHolder(\"<file>\").Required().String()\n\ttab1 = app.Flag(\"table1\", \"Database table name for db1.\").\n\t\tDefault(\"sample\").String()\n\twhere1 = app.Flag(\"where1\", \"SQL filter injected in WHERE clause for db1.\").\n\t\tPlaceHolder(\"<SQL>\").String()\n\tdbFile2 = app.Flag(\"db2\", \"SQLite file for database 2.\").\n\t\tPlaceHolder(\"<file>\").Required().String()\n\ttab2 = app.Flag(\"table2\", \"Database table name for db2.\").\n\t\tDefault(\"sample\").String()\n\twhere2 = app.Flag(\"where2\", \"SQL filter injected in WHERE clause for db2.\").\n\t\tPlaceHolder(\"<SQL>\").String()\n\tfrom = app.Flag(\"pos\", \"Reference point for relative position measurement.\").\n\t\tRequired().PlaceHolder(\"<5p|3p>\").Enum(\"5p\", \"3p\")\n\tanti = app.Flag(\"anti\", \"Compare reads on opposite instead of same orientation.\").\n\t\tBool()\n\tspan = app.Flag(\"span\", \"Maximum distance between compared reads.\").\n\t\tDefault(\"100\").PlaceHolder(\"<int>\").Int()\n\tverbose = app.Flag(\"verbose\", \"Verbose mode.\").Short('v').Bool()\n)\n\nfunc main() {\n\tapp.HelpFlag.Short('h')\n\tapp.Version(version)\n\t_, err := app.Parse(os.Args[1:])\n\tif err != nil {\n\t\tkingpin.Fatalf(\"%s\", err)\n\t}\n\n\t\/\/ assemble sqlx select builders\n\treadsBuilder1 := htsdb.RangeBuilder.From(*tab1)\n\tcountBuilder1 := htsdb.CountBuilder.From(*tab1)\n\tif *where1 != \"\" {\n\t\treadsBuilder1 = readsBuilder1.Where(*where1)\n\t\tcountBuilder1 = countBuilder1.Where(*where1)\n\t}\n\treadsBuilder2 := htsdb.RangeBuilder.From(*tab2)\n\trefsBuilder2 := htsdb.ReferenceBuilder.From(*tab2)\n\tcountBuilder2 := htsdb.CountBuilder.From(*tab2)\n\tif *where2 != \"\" {\n\t\treadsBuilder2 = readsBuilder2.Where(*where2)\n\t\trefsBuilder2 = refsBuilder2.Where(*where2)\n\t\tcountBuilder2 = countBuilder2.Where(*where2)\n\t}\n\n\t\/\/ open database connections.\n\tvar db1, db2 *sqlx.DB\n\tif db1, err = sqlx.Connect(\"sqlite3\", *dbFile1); err != nil {\n\t\tpanic(err)\n\t}\n\tif db2, err = sqlx.Connect(\"sqlite3\", *dbFile2); err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ prepare statements.\n\tquery1, _, err := readsBuilder1.Where(\"strand = ? AND rname = ?\").ToSql()\n\tpanicOnError(err)\n\treadsStmt1, err := db1.Preparex(query1)\n\tpanicOnError(err)\n\tquery2, _, err := readsBuilder2.Where(\"strand = ? AND rname = ?\").ToSql()\n\tpanicOnError(err)\n\treadsStmt2, err := db2.Preparex(query2)\n\tpanicOnError(err)\n\n\t\/\/ select reference features\n\trefs, err := htsdb.SelectReferences(db2, refsBuilder2)\n\n\t\/\/ count records\n\tcountQuery1, _, err := countBuilder1.ToSql()\n\tpanicOnError(err)\n\tvar totalCount1 int\n\tif err = db1.Get(&totalCount1, countQuery1); err != nil {\n\t\tpanic(err)\n\t}\n\tcountQuery2, _, err := countBuilder2.ToSql()\n\tpanicOnError(err)\n\tvar totalCount2 int\n\tif err = db2.Get(&totalCount2, countQuery2); err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ get position extracting function\n\tgetPos := htsdb.Head\n\tif *from == \"3p\" {\n\t\tgetPos = htsdb.Tail\n\t}\n\n\t\/\/ count histogram around reference.\n\thists := make(chan map[int]uint)\n\tvar wg sync.WaitGroup\n\tfor _, ref := range refs {\n\t\tfor _, ori := range []feat.Orientation{feat.Forward, feat.Reverse} {\n\t\t\twg.Add(1)\n\t\t\tgo func(ori feat.Orientation, ref htsdb.Reference) {\n\t\t\t\tif *verbose == true {\n\t\t\t\t\tlog.Printf(\"orient:%s, chrom:%s\\n\", ori, ref.Chrom)\n\t\t\t\t}\n\t\t\t\tdefer wg.Done()\n\t\t\t\tvar r htsdb.Range\n\n\t\t\t\twig := make(map[int]uint)\n\t\t\t\tori1 := ori\n\t\t\t\tif *anti == true {\n\t\t\t\t\tori1 = -1 * ori1\n\t\t\t\t}\n\t\t\t\trows1, err := readsStmt1.Queryx(ori1, ref.Chrom)\n\t\t\t\tpanicOnError(err)\n\t\t\t\tfor rows1.Next() {\n\t\t\t\t\terr = rows1.StructScan(&r)\n\t\t\t\t\tpanicOnError(err)\n\t\t\t\t\tpos := getPos(&r, ori1)\n\t\t\t\t\twig[pos]++\n\t\t\t\t}\n\n\t\t\t\thist := make(map[int]uint)\n\t\t\t\trows2, err := readsStmt2.Queryx(ori, ref.Chrom)\n\t\t\t\tpanicOnError(err)\n\t\t\t\tfor rows2.Next() {\n\t\t\t\t\terr = rows2.StructScan(&r)\n\t\t\t\t\tpanicOnError(err)\n\t\t\t\t\tpos := getPos(&r, ori)\n\t\t\t\t\tfor relPos := -*span; relPos <= *span; relPos++ {\n\t\t\t\t\t\tif pos+relPos < 0 {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\thist[relPos*int(ori)] += wig[pos+relPos]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\thists <- hist\n\t\t\t}(ori, ref)\n\t\t}\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(hists)\n\n\t}()\n\n\t\/\/ aggregate histograms from goroutines\n\taggrHist := make(map[int]uint)\n\tfor hist := range hists {\n\t\tfor k, v := range hist {\n\t\t\taggrHist[k] += v\n\t\t}\n\t}\n\n\t\/\/ print results.\n\tfmt.Printf(\"pos\\tpairs\\treadCount1\\treadCount2\\n\")\n\tfor i := -*span; i <= *span; i++ {\n\t\tfmt.Printf(\"%d\\t%d\\t%d\\t%d\\n\", i, aggrHist[i], totalCount1, totalCount2)\n\t}\n}\n\nfunc panicOnError(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Eleme Inc. All rights reserved.\n\npackage webapp\n\nimport (\n\t\"github.com\/eleme\/banshee\/models\"\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"github.com\/mattn\/go-sqlite3\"\n\t\"net\/http\"\n\t\"strconv\"\n)\n\n\/\/ getProjects returns all projects.\nfunc getProjects(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tvar projs []models.Project\n\tif err := db.Admin.DB().Find(&projs).Error; err != nil {\n\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\treturn\n\t}\n\tResponseJSONOK(w, projs)\n}\n\n\/\/ getProject returns project by id.\nfunc getProject(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\/\/ Params\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tResponseError(w, ErrProjectID)\n\t\treturn\n\t}\n\t\/\/ Query db.\n\tproj := &models.Project{}\n\tif err := db.Admin.DB().First(proj, id).Error; err != nil {\n\t\tswitch err {\n\t\tcase gorm.RecordNotFound:\n\t\t\tResponseError(w, ErrProjectNotFound)\n\t\t\treturn\n\t\tdefault:\n\t\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\t\treturn\n\t\t}\n\t}\n\tResponseJSONOK(w, proj)\n}\n\n\/\/ createProject request\ntype createProjectRequest struct {\n\tName string `json:\"name\"`\n}\n\n\/\/ createProject creates a project.\nfunc createProject(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\t\/\/ Request\n\treq := &createProjectRequest{}\n\tif err := RequestBind(r, req); err != nil {\n\t\tResponseError(w, ErrBadRequest)\n\t\treturn\n\t}\n\t\/\/ Validate\n\tif err := models.ValidateProjectName(req.Name); err != nil {\n\t\tResponseError(w, NewValidationWebError(err))\n\t\treturn\n\t}\n\t\/\/ Save.\n\tproj := &models.Project{Name: req.Name}\n\tif err := db.Admin.DB().Create(proj).Error; err != nil {\n\t\tsqliteErr, ok := err.(sqlite3.Error)\n\t\tif ok {\n\t\t\tswitch sqliteErr.ExtendedCode {\n\t\t\tcase sqlite3.ErrConstraintNotNull:\n\t\t\t\tResponseError(w, ErrNotNull)\n\t\t\t\treturn\n\t\t\tcase sqlite3.ErrConstraintPrimaryKey:\n\t\t\t\tResponseError(w, ErrPrimaryKey)\n\t\t\t\treturn\n\t\t\tcase sqlite3.ErrConstraintUnique:\n\t\t\t\tResponseError(w, ErrDuplicateProjectName)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\treturn\n\t}\n\tResponseJSONOK(w, proj)\n}\n\n\/\/ updateProject request\ntype updateProjectRequest struct {\n\tName string `json:\"name\"`\n}\n\n\/\/ updateProject updates a project.\nfunc updateProject(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\/\/ Params\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tResponseError(w, ErrProjectID)\n\t\treturn\n\t}\n\t\/\/ Request\n\treq := &updateProjectRequest{}\n\tif err := RequestBind(r, req); err != nil {\n\t\tResponseError(w, ErrBadRequest)\n\t\treturn\n\t}\n\t\/\/ Validate\n\tif err := models.ValidateProjectName(req.Name); err != nil {\n\t\tResponseError(w, NewValidationWebError(err))\n\t\treturn\n\t}\n\t\/\/ Find\n\tproj := &models.Project{}\n\tif err := db.Admin.DB().First(proj, id).Error; err != nil {\n\t\tswitch err {\n\t\tcase gorm.RecordNotFound:\n\t\t\tResponseError(w, ErrProjectNotFound)\n\t\t\treturn\n\t\tdefault:\n\t\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\t\treturn\n\t\t}\n\t}\n\t\/\/ Patch.\n\tproj.Name = req.Name\n\tif err := db.Admin.DB().Save(proj).Error; err != nil {\n\t\tif err == gorm.RecordNotFound {\n\t\t\t\/\/ Not found.\n\t\t\tResponseError(w, ErrProjectNotFound)\n\t\t\treturn\n\t\t}\n\t\t\/\/ Writer errors.\n\t\tsqliteErr, ok := err.(sqlite3.Error)\n\t\tif ok {\n\t\t\tswitch sqliteErr.ExtendedCode {\n\t\t\tcase sqlite3.ErrConstraintNotNull:\n\t\t\t\tResponseError(w, ErrNotNull)\n\t\t\t\treturn\n\t\t\tcase sqlite3.ErrConstraintUnique:\n\t\t\t\tResponseError(w, ErrDuplicateProjectName)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\treturn\n\t}\n\tResponseJSONOK(w, proj)\n}\n\n\/\/ deleteProject deletes a project.\nfunc deleteProject(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\/\/ Params\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tResponseError(w, ErrProjectID)\n\t\treturn\n\t}\n\t\/\/ Delete.\n\tif err := db.Admin.DB().Delete(&models.Project{ID: id}).Error; err != nil {\n\t\tswitch err {\n\t\tcase gorm.RecordNotFound:\n\t\t\tResponseError(w, ErrProjectNotFound)\n\t\t\treturn\n\t\tdefault:\n\t\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ getProjectRules gets project rules.\nfunc getProjectRules(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\/\/ Params\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tResponseError(w, ErrProjectID)\n\t\treturn\n\t}\n\t\/\/ Query\n\tvar rules []models.Rule\n\tif err := db.Admin.DB().Model(&models.Project{ID: id}).Related(&rules).Error; err != nil {\n\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\treturn\n\t}\n\tfor i := 0; i < len(rules); i++ {\n\t\trules[i].BuildRepr()\n\t\trules[i].SetNumMetrics(len(db.Index.Filter(rules[i].Pattern)))\n\t}\n\tResponseJSONOK(w, rules)\n}\n\n\/\/ getProjectUsers gets project users.\nfunc getProjectUsers(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\/\/ Params\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tResponseError(w, ErrProjectID)\n\t\treturn\n\t}\n\t\/\/ Query\n\tvar users []models.User\n\tif err := db.Admin.DB().Model(&models.Project{ID: id}).Association(\"Users\").Find(&users).Error; err != nil {\n\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\treturn\n\t}\n\t\/\/ Universals\n\tvar univs []models.User\n\tif err := db.Admin.DB().Where(\"universal = ?\", true).Find(&univs).Error; err != nil {\n\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\treturn\n\t}\n\tusers = append(users, univs...)\n\tResponseJSONOK(w, users)\n}\n\n\/\/ addProjectUserRequest is the request of addProjectUser\ntype addProjectUserRequest struct {\n\tName string `json:\"name\"`\n}\n\n\/\/ projectUser is the tempory select result for table `project_users`\ntype projectUser struct {\n\tUserID int `sql:\"user_id\"`\n\tProjectID int `sql:\"project_id\"`\n}\n\n\/\/ addProjectUser adds a user to a project.\nfunc addProjectUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\/\/ Params\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tResponseError(w, ErrProjectID)\n\t\treturn\n\t}\n\t\/\/ Request\n\treq := &addProjectUserRequest{}\n\tif err := RequestBind(r, req); err != nil {\n\t\tResponseError(w, ErrBadRequest)\n\t\treturn\n\t}\n\t\/\/ Find user.\n\tuser := &models.User{}\n\tif err := db.Admin.DB().Where(\"name = ?\", req.Name).First(user).Error; err != nil {\n\t\tswitch err {\n\t\tcase gorm.RecordNotFound:\n\t\t\tResponseError(w, ErrUserNotFound)\n\t\t\treturn\n\t\tdefault:\n\t\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\t\treturn\n\t\t}\n\t}\n\tif user.Universal {\n\t\tResponseError(w, ErrProjectUniversalUser)\n\t\treturn\n\t}\n\t\/\/ Find proj\n\tproj := &models.Project{}\n\tif err := db.Admin.DB().First(proj, id).Error; err != nil {\n\t\tif err == gorm.RecordNotFound {\n\t\t\tResponseError(w, ErrNotFound)\n\t\t\treturn\n\t\t}\n\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\treturn\n\t}\n\t\/\/ Note: Gorm only insert values to join-table if the primary key not in\n\t\/\/ the join-table. So we select the record at first here.\n\tif err := db.Admin.DB().Table(\"project_users\").Where(\"user_id = ? and project_id = ?\", user.ID, proj.ID).Find(&projectUser{}).Error; err == nil {\n\t\tResponseError(w, ErrDuplicateProjectUser)\n\t\treturn\n\t}\n\t\/\/ Append user.\n\tif err := db.Admin.DB().Model(proj).Association(\"Users\").Append(user).Error; err != nil {\n\t\tif err == gorm.RecordNotFound {\n\t\t\t\/\/ User or Project not found.\n\t\t\tResponseError(w, ErrNotFound)\n\t\t\treturn\n\t\t}\n\t\t\/\/ Duplicate primay key.\n\t\tsqliteErr, ok := err.(sqlite3.Error)\n\t\tif ok {\n\t\t\tswitch sqliteErr.ExtendedCode {\n\t\t\tcase sqlite3.ErrConstraintUnique:\n\t\t\t\tResponseError(w, ErrDuplicateProjectUser)\n\t\t\t\treturn\n\t\t\tcase sqlite3.ErrConstraintPrimaryKey:\n\t\t\t\tResponseError(w, ErrDuplicateProjectUser)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\t\/\/ Unexcepted error.\n\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\treturn\n\t}\n}\n\n\/\/ deleteProjectUser deletes a user from a project.\nfunc deleteProjectUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\/\/ Params\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tResponseError(w, ErrProjectID)\n\t\treturn\n\t}\n\tuserID, err := strconv.Atoi(ps.ByName(\"user_id\"))\n\tif err != nil {\n\t\tResponseError(w, ErrUserID)\n\t\treturn\n\t}\n\t\/\/ Find user.\n\tuser := &models.User{}\n\tif err := db.Admin.DB().First(user, userID).Error; err != nil {\n\t\tswitch err {\n\t\tcase gorm.RecordNotFound:\n\t\t\tResponseError(w, ErrUserNotFound)\n\t\t\treturn\n\t\tdefault:\n\t\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\t\treturn\n\t\t}\n\t}\n\t\/\/ Find proj.\n\tproj := &models.Project{}\n\tif err := db.Admin.DB().First(proj, id).Error; err != nil {\n\t\tswitch err {\n\t\tcase gorm.RecordNotFound:\n\t\t\tResponseError(w, ErrProjectNotFound)\n\t\t\treturn\n\t\tdefault:\n\t\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\t\treturn\n\t\t}\n\t}\n\t\/\/ Delete user.\n\tif err := db.Admin.DB().Model(proj).Association(\"Users\").Delete(user).Error; err != nil {\n\t\tswitch err {\n\t\tcase gorm.RecordNotFound:\n\t\t\tResponseError(w, ErrNotFound)\n\t\t\treturn\n\t\tdefault:\n\t\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\t\treturn\n\t\t}\n\t}\n}\n<commit_msg>Add SilentTimeStart[End] to updateProject request<commit_after>\/\/ Copyright 2015 Eleme Inc. All rights reserved.\n\npackage webapp\n\nimport (\n\t\"github.com\/eleme\/banshee\/models\"\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"github.com\/mattn\/go-sqlite3\"\n\t\"net\/http\"\n\t\"strconv\"\n)\n\n\/\/ getProjects returns all projects.\nfunc getProjects(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tvar projs []models.Project\n\tif err := db.Admin.DB().Find(&projs).Error; err != nil {\n\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\treturn\n\t}\n\tResponseJSONOK(w, projs)\n}\n\n\/\/ getProject returns project by id.\nfunc getProject(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\/\/ Params\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tResponseError(w, ErrProjectID)\n\t\treturn\n\t}\n\t\/\/ Query db.\n\tproj := &models.Project{}\n\tif err := db.Admin.DB().First(proj, id).Error; err != nil {\n\t\tswitch err {\n\t\tcase gorm.RecordNotFound:\n\t\t\tResponseError(w, ErrProjectNotFound)\n\t\t\treturn\n\t\tdefault:\n\t\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\t\treturn\n\t\t}\n\t}\n\tResponseJSONOK(w, proj)\n}\n\n\/\/ createProject request\ntype createProjectRequest struct {\n\tName string `json:\"name\"`\n}\n\n\/\/ createProject creates a project.\nfunc createProject(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\t\/\/ Request\n\treq := &createProjectRequest{}\n\tif err := RequestBind(r, req); err != nil {\n\t\tResponseError(w, ErrBadRequest)\n\t\treturn\n\t}\n\t\/\/ Validate\n\tif err := models.ValidateProjectName(req.Name); err != nil {\n\t\tResponseError(w, NewValidationWebError(err))\n\t\treturn\n\t}\n\t\/\/ Save.\n\tproj := &models.Project{Name: req.Name}\n\tif err := db.Admin.DB().Create(proj).Error; err != nil {\n\t\tsqliteErr, ok := err.(sqlite3.Error)\n\t\tif ok {\n\t\t\tswitch sqliteErr.ExtendedCode {\n\t\t\tcase sqlite3.ErrConstraintNotNull:\n\t\t\t\tResponseError(w, ErrNotNull)\n\t\t\t\treturn\n\t\t\tcase sqlite3.ErrConstraintPrimaryKey:\n\t\t\t\tResponseError(w, ErrPrimaryKey)\n\t\t\t\treturn\n\t\t\tcase sqlite3.ErrConstraintUnique:\n\t\t\t\tResponseError(w, ErrDuplicateProjectName)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\treturn\n\t}\n\tResponseJSONOK(w, proj)\n}\n\n\/\/ updateProject request\ntype updateProjectRequest struct {\n\tName string `json:\"name\"`\n\tEnableSilent bool `json:\"enableSilent\"`\n\tSilentTimeStart int `json:\"silentTimeStart\"`\n\tSilentTimeEnd int `json:\"silentTimeEnd\"`\n}\n\n\/\/ updateProject updates a project.\nfunc updateProject(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\/\/ Params\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tResponseError(w, ErrProjectID)\n\t\treturn\n\t}\n\t\/\/ Request\n\treq := &updateProjectRequest{}\n\tif err := RequestBind(r, req); err != nil {\n\t\tResponseError(w, ErrBadRequest)\n\t\treturn\n\t}\n\t\/\/ Validate\n\tif err := models.ValidateProjectName(req.Name); err != nil {\n\t\tResponseError(w, NewValidationWebError(err))\n\t\treturn\n\t}\n\tif err := models.ValidateProjectSilentRange(req.SilentTimeStart, req.SilentTimeEnd); err != nil {\n\t\tResponseError(w, NewValidationWebError(err))\n\t\treturn\n\t}\n\t\/\/ Find\n\tproj := &models.Project{}\n\tif err := db.Admin.DB().First(proj, id).Error; err != nil {\n\t\tswitch err {\n\t\tcase gorm.RecordNotFound:\n\t\t\tResponseError(w, ErrProjectNotFound)\n\t\t\treturn\n\t\tdefault:\n\t\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\t\treturn\n\t\t}\n\t}\n\t\/\/ Patch.\n\tproj.Name = req.Name\n\tproj.EnableSilent = req.EnableSilent\n\tproj.SilentTimeStart = req.SilentTimeStart\n\tproj.SilentTimeEnd = req.SilentTimeEnd\n\tif err := db.Admin.DB().Save(proj).Error; err != nil {\n\t\tif err == gorm.RecordNotFound {\n\t\t\t\/\/ Not found.\n\t\t\tResponseError(w, ErrProjectNotFound)\n\t\t\treturn\n\t\t}\n\t\t\/\/ Writer errors.\n\t\tsqliteErr, ok := err.(sqlite3.Error)\n\t\tif ok {\n\t\t\tswitch sqliteErr.ExtendedCode {\n\t\t\tcase sqlite3.ErrConstraintNotNull:\n\t\t\t\tResponseError(w, ErrNotNull)\n\t\t\t\treturn\n\t\t\tcase sqlite3.ErrConstraintUnique:\n\t\t\t\tResponseError(w, ErrDuplicateProjectName)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\treturn\n\t}\n\tResponseJSONOK(w, proj)\n}\n\n\/\/ deleteProject deletes a project.\nfunc deleteProject(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\/\/ Params\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tResponseError(w, ErrProjectID)\n\t\treturn\n\t}\n\t\/\/ Delete.\n\tif err := db.Admin.DB().Delete(&models.Project{ID: id}).Error; err != nil {\n\t\tswitch err {\n\t\tcase gorm.RecordNotFound:\n\t\t\tResponseError(w, ErrProjectNotFound)\n\t\t\treturn\n\t\tdefault:\n\t\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ getProjectRules gets project rules.\nfunc getProjectRules(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\/\/ Params\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tResponseError(w, ErrProjectID)\n\t\treturn\n\t}\n\t\/\/ Query\n\tvar rules []models.Rule\n\tif err := db.Admin.DB().Model(&models.Project{ID: id}).Related(&rules).Error; err != nil {\n\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\treturn\n\t}\n\tfor i := 0; i < len(rules); i++ {\n\t\trules[i].BuildRepr()\n\t\trules[i].SetNumMetrics(len(db.Index.Filter(rules[i].Pattern)))\n\t}\n\tResponseJSONOK(w, rules)\n}\n\n\/\/ getProjectUsers gets project users.\nfunc getProjectUsers(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\/\/ Params\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tResponseError(w, ErrProjectID)\n\t\treturn\n\t}\n\t\/\/ Query\n\tvar users []models.User\n\tif err := db.Admin.DB().Model(&models.Project{ID: id}).Association(\"Users\").Find(&users).Error; err != nil {\n\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\treturn\n\t}\n\t\/\/ Universals\n\tvar univs []models.User\n\tif err := db.Admin.DB().Where(\"universal = ?\", true).Find(&univs).Error; err != nil {\n\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\treturn\n\t}\n\tusers = append(users, univs...)\n\tResponseJSONOK(w, users)\n}\n\n\/\/ addProjectUserRequest is the request of addProjectUser\ntype addProjectUserRequest struct {\n\tName string `json:\"name\"`\n}\n\n\/\/ projectUser is the tempory select result for table `project_users`\ntype projectUser struct {\n\tUserID int `sql:\"user_id\"`\n\tProjectID int `sql:\"project_id\"`\n}\n\n\/\/ addProjectUser adds a user to a project.\nfunc addProjectUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\/\/ Params\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tResponseError(w, ErrProjectID)\n\t\treturn\n\t}\n\t\/\/ Request\n\treq := &addProjectUserRequest{}\n\tif err := RequestBind(r, req); err != nil {\n\t\tResponseError(w, ErrBadRequest)\n\t\treturn\n\t}\n\t\/\/ Find user.\n\tuser := &models.User{}\n\tif err := db.Admin.DB().Where(\"name = ?\", req.Name).First(user).Error; err != nil {\n\t\tswitch err {\n\t\tcase gorm.RecordNotFound:\n\t\t\tResponseError(w, ErrUserNotFound)\n\t\t\treturn\n\t\tdefault:\n\t\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\t\treturn\n\t\t}\n\t}\n\tif user.Universal {\n\t\tResponseError(w, ErrProjectUniversalUser)\n\t\treturn\n\t}\n\t\/\/ Find proj\n\tproj := &models.Project{}\n\tif err := db.Admin.DB().First(proj, id).Error; err != nil {\n\t\tif err == gorm.RecordNotFound {\n\t\t\tResponseError(w, ErrNotFound)\n\t\t\treturn\n\t\t}\n\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\treturn\n\t}\n\t\/\/ Note: Gorm only insert values to join-table if the primary key not in\n\t\/\/ the join-table. So we select the record at first here.\n\tif err := db.Admin.DB().Table(\"project_users\").Where(\"user_id = ? and project_id = ?\", user.ID, proj.ID).Find(&projectUser{}).Error; err == nil {\n\t\tResponseError(w, ErrDuplicateProjectUser)\n\t\treturn\n\t}\n\t\/\/ Append user.\n\tif err := db.Admin.DB().Model(proj).Association(\"Users\").Append(user).Error; err != nil {\n\t\tif err == gorm.RecordNotFound {\n\t\t\t\/\/ User or Project not found.\n\t\t\tResponseError(w, ErrNotFound)\n\t\t\treturn\n\t\t}\n\t\t\/\/ Duplicate primay key.\n\t\tsqliteErr, ok := err.(sqlite3.Error)\n\t\tif ok {\n\t\t\tswitch sqliteErr.ExtendedCode {\n\t\t\tcase sqlite3.ErrConstraintUnique:\n\t\t\t\tResponseError(w, ErrDuplicateProjectUser)\n\t\t\t\treturn\n\t\t\tcase sqlite3.ErrConstraintPrimaryKey:\n\t\t\t\tResponseError(w, ErrDuplicateProjectUser)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\t\/\/ Unexcepted error.\n\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\treturn\n\t}\n}\n\n\/\/ deleteProjectUser deletes a user from a project.\nfunc deleteProjectUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\/\/ Params\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tResponseError(w, ErrProjectID)\n\t\treturn\n\t}\n\tuserID, err := strconv.Atoi(ps.ByName(\"user_id\"))\n\tif err != nil {\n\t\tResponseError(w, ErrUserID)\n\t\treturn\n\t}\n\t\/\/ Find user.\n\tuser := &models.User{}\n\tif err := db.Admin.DB().First(user, userID).Error; err != nil {\n\t\tswitch err {\n\t\tcase gorm.RecordNotFound:\n\t\t\tResponseError(w, ErrUserNotFound)\n\t\t\treturn\n\t\tdefault:\n\t\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\t\treturn\n\t\t}\n\t}\n\t\/\/ Find proj.\n\tproj := &models.Project{}\n\tif err := db.Admin.DB().First(proj, id).Error; err != nil {\n\t\tswitch err {\n\t\tcase gorm.RecordNotFound:\n\t\t\tResponseError(w, ErrProjectNotFound)\n\t\t\treturn\n\t\tdefault:\n\t\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\t\treturn\n\t\t}\n\t}\n\t\/\/ Delete user.\n\tif err := db.Admin.DB().Model(proj).Association(\"Users\").Delete(user).Error; err != nil {\n\t\tswitch err {\n\t\tcase gorm.RecordNotFound:\n\t\t\tResponseError(w, ErrNotFound)\n\t\t\treturn\n\t\tdefault:\n\t\t\tResponseError(w, NewUnexceptedWebError(err))\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package cocoa\r\n\/\/+build darwin\r\n\r\n\/*\r\n#cgo darwin CFLAGS: -DDARWIN -x objective-c -fobjc-arc\r\n#cgo darwin LDFLAGS: -framework Cocoa\r\n\r\n#include \"cocoa.h\"\r\n*\/\r\nimport \"C\"\r\n\r\nimport (\r\n\t\"runtime\"\r\n)\r\n\r\ntype startCallback func()\r\ntype urlCallback func(url string)\r\n\r\nvar (\r\n\tstartupCallbacks = []startCallback{}\r\n\turlCallbacks = []urlCallback{}\r\n)\r\n\r\nfunc AddUrlCallback(c urlCallback) {\r\n\turlCallbacks = append(urlCallbacks,c)\r\n}\r\nfunc AddStartCallback(c startCallback) {\r\n\tstartupCallbacks = append(startupCallbacks, c)\r\n}\r\n\r\nfunc Start(onStart startCallback) {\r\n\tAddStartCallback(onStart)\r\n\r\n\truntime.LockOSThread()\r\n\tC.cocoaMain()\r\n}\r\n\r\nfunc Stop() {\r\n\tC.cocoaExit()\r\n}\r\n\r\nfunc ShowFileDialog(title, defaultDirectory string,\r\n\tfileTypes []string,\r\n\tforFiles bool, multiselect bool) {\r\n\ttitlePtr := C.CString(title)\r\n\tdirPtr := C.CString(defaultDirectory)\r\n\r\n\tvar filesBool C.BOOL\r\n\tif (forFiles) {\r\n\t\tfilesBool = C.BOOL(1)\r\n\t} else {\r\n\t\tfilesBool = C.BOOL(0)\r\n\t}\r\n\r\n\tvar selectBool C.BOOL\r\n\tif (selectBool) {\r\n\t\tselectBool = C.BOOL(1)\r\n\t} else {\r\n\t\tselectBool = C.BOOL(0)\r\n\t}\r\n\r\n\tC.showOpenPanel(titlePtr, dirPtr, filesBool, selectBool)\r\n\r\n\tC.free(unsafe.Pointer(titlePtr))\r\n\tC.free(unsafe.Pointer(dirPtr))\r\n}\r\n\r\nfunc ShowDialog(message string) {\r\n\tmsgStr := C.CString(message)\r\n\tC.showDialog(msgStr)\r\n\tC.free(unsafe.Pointer(msgStr))\r\n}\r\n\r\nfunc Log(message string) {\r\n\tmsgStr := C.CString(message)\r\n\tC.printLog(msgStr)\r\n\tC.free(usnafe.Pointer(msgStr))\r\n}\r\n\r\n\/\/export cocoaStart\r\nfunc cocoastart() {\r\n\tfor _, f := range startupCallbacks {\r\n\t\tf()\r\n\t}\r\n}\r\n\r\n\/\/export cocoaUrl\r\nfunc cocoaUrl(data C.CString) {\r\n\turl := C.GoString(data)\r\n\tfor _, f := range urlCallbacks {\r\n\t\tf(url)\r\n\t}\r\n}\r\n<commit_msg>Typo in export name for cocoaStart<commit_after>package cocoa\r\n\/\/+build darwin\r\n\r\n\/*\r\n#cgo darwin CFLAGS: -DDARWIN -x objective-c -fobjc-arc\r\n#cgo darwin LDFLAGS: -framework Cocoa\r\n\r\n#include \"cocoa.h\"\r\n*\/\r\nimport \"C\"\r\n\r\nimport (\r\n\t\"runtime\"\r\n)\r\n\r\ntype startCallback func()\r\ntype urlCallback func(url string)\r\n\r\nvar (\r\n\tstartupCallbacks = []startCallback{}\r\n\turlCallbacks = []urlCallback{}\r\n)\r\n\r\nfunc AddUrlCallback(c urlCallback) {\r\n\turlCallbacks = append(urlCallbacks,c)\r\n}\r\nfunc AddStartCallback(c startCallback) {\r\n\tstartupCallbacks = append(startupCallbacks, c)\r\n}\r\n\r\nfunc Start(onStart startCallback) {\r\n\tAddStartCallback(onStart)\r\n\r\n\truntime.LockOSThread()\r\n\tC.cocoaMain()\r\n}\r\n\r\nfunc Stop() {\r\n\tC.cocoaExit()\r\n}\r\n\r\nfunc ShowFileDialog(title, defaultDirectory string,\r\n\tfileTypes []string,\r\n\tforFiles bool, multiselect bool) {\r\n\ttitlePtr := C.CString(title)\r\n\tdirPtr := C.CString(defaultDirectory)\r\n\r\n\tvar filesBool C.BOOL\r\n\tif (forFiles) {\r\n\t\tfilesBool = C.BOOL(1)\r\n\t} else {\r\n\t\tfilesBool = C.BOOL(0)\r\n\t}\r\n\r\n\tvar selectBool C.BOOL\r\n\tif (selectBool) {\r\n\t\tselectBool = C.BOOL(1)\r\n\t} else {\r\n\t\tselectBool = C.BOOL(0)\r\n\t}\r\n\r\n\tC.showOpenPanel(titlePtr, dirPtr, filesBool, selectBool)\r\n\r\n\tC.free(unsafe.Pointer(titlePtr))\r\n\tC.free(unsafe.Pointer(dirPtr))\r\n}\r\n\r\nfunc ShowDialog(message string) {\r\n\tmsgStr := C.CString(message)\r\n\tC.showDialog(msgStr)\r\n\tC.free(unsafe.Pointer(msgStr))\r\n}\r\n\r\nfunc Log(message string) {\r\n\tmsgStr := C.CString(message)\r\n\tC.printLog(msgStr)\r\n\tC.free(usnafe.Pointer(msgStr))\r\n}\r\n\r\n\/\/export cocoaStart\r\nfunc cocoaStart() {\r\n\tfor _, f := range startupCallbacks {\r\n\t\tf()\r\n\t}\r\n}\r\n\r\n\/\/export cocoaUrl\r\nfunc cocoaUrl(data C.CString) {\r\n\turl := C.GoString(data)\r\n\tfor _, f := range urlCallbacks {\r\n\t\tf(url)\r\n\t}\r\n}\r\n<|endoftext|>"} {"text":"<commit_before>package goridge\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"net\/rpc\"\n\t\"sync\"\n\n\tj \"github.com\/json-iterator\/go\"\n\t\"github.com\/spiral\/errors\"\n\t\"go.uber.org\/multierr\"\n)\n\nvar json = j.ConfigCompatibleWithStandardLibrary\n\n\/\/ Codec represent net\/rpc bridge over Goridge socket relay.\ntype Codec struct {\n\trelay Relay\n\tclosed bool\n\tframe *Frame\n\tcodec sync.Map\n}\n\n\/\/ NewCodec initiates new server rpc codec over socket connection.\nfunc NewCodec(rwc io.ReadWriteCloser) *Codec {\n\treturn &Codec{\n\t\trelay: NewSocketRelay(rwc),\n\t\tcodec: sync.Map{},\n\t}\n}\n\n\/\/ NewCodecWithRelay initiates new server rpc codec with a relay of choice.\nfunc NewCodecWithRelay(relay Relay) *Codec {\n\treturn &Codec{relay: relay}\n}\n\n\/\/ WriteResponse marshals response, byte slice or error to remote party.\nfunc (c *Codec) WriteResponse(r *rpc.Response, body interface{}) error {\n\tconst op = errors.Op(\"codec: write response\")\n\tframe := NewFrame()\n\n\t\/\/ SEQ_ID + METHOD_NAME_LEN\n\tframe.WriteOptions(uint32(r.Seq), uint32(len(r.ServiceMethod)))\n\t\/\/ Write protocol version\n\tframe.WriteVersion(VERSION_1)\n\n\t\/\/ load and delete associated codec to not waste memory\n\t\/\/ because we write it to the frame and don't need more information about it\n\tcodec, ok := c.codec.LoadAndDelete(r.Seq)\n\tif !ok {\n\t\t\/\/ fallback codec\n\t\tframe.WriteFlags(CODEC_GOB)\n\t} else {\n\t\tframe.WriteFlags(codec.(FrameFlag))\n\t}\n\n\t\/\/ initialize buffer\n\tbuf := new(bytes.Buffer)\n\t\/\/ writeServiceMethod to the buffer\n\tbuf.WriteString(r.ServiceMethod)\n\n\t\/\/ if error returned, we sending it via relay and return error from WriteResponse\n\tif r.Error != \"\" {\n\t\t\/\/ Append error flag\n\t\treturn c.handleError(r, frame, buf, errors.Str(r.Error))\n\t}\n\n\t\/\/ read flag previously written\n\t\/\/ TODO might be better to save it to local variable\n\tflags := frame.ReadFlags()\n\n\tswitch {\n\tcase flags&byte(CODEC_JSON) != 0:\n\t\terr := encodeJSON(buf, body)\n\t\tif err != nil {\n\t\t\treturn c.handleError(r, frame, buf, err)\n\t\t}\n\t\t\/\/ everything written correct\n\t\tframe.WritePayloadLen(uint32(buf.Len()))\n\t\tframe.WritePayload(buf.Bytes())\n\n\t\tframe.WriteCRC()\n\t\treturn c.relay.Send(frame)\n\tcase flags&byte(CODEC_MSGPACK) != 0:\n\t\terr := encodeMsgPack(buf, body)\n\t\tif err != nil {\n\t\t\treturn c.handleError(r, frame, buf, err)\n\t\t}\n\t\t\/\/ everything written correct\n\t\tframe.WritePayloadLen(uint32(buf.Len()))\n\t\tframe.WritePayload(buf.Bytes())\n\n\t\tframe.WriteCRC()\n\t\treturn c.relay.Send(frame)\n\tcase flags&byte(CODEC_RAW) != 0:\n\t\terr := encodeRaw(buf, body)\n\t\tif err != nil {\n\t\t\treturn c.handleError(r, frame, buf, err)\n\t\t}\n\t\t\/\/ everything written correct\n\t\tframe.WritePayloadLen(uint32(buf.Len()))\n\t\tframe.WritePayload(buf.Bytes())\n\n\t\tframe.WriteCRC()\n\t\treturn c.relay.Send(frame)\n\tcase flags&byte(CODEC_GOB) != 0:\n\t\terr := encodeGob(buf, body)\n\t\tif err != nil {\n\t\t\treturn c.handleError(r, frame, buf, err)\n\t\t}\n\t\t\/\/ everything written correct\n\t\tframe.WritePayloadLen(uint32(buf.Len()))\n\t\tframe.WritePayload(buf.Bytes())\n\n\t\tframe.WriteCRC()\n\t\treturn c.relay.Send(frame)\n\tdefault:\n\t\treturn errors.E(op, errors.Str(\"unknown codec\"))\n\t}\n}\n\nfunc (c *Codec) handleError(r *rpc.Response, frame *Frame, buf *bytes.Buffer, err error) error {\n\t\/\/ just to be sure, remove all data from buffer and write new\n\tbuf.Truncate(0)\n\t\/\/ write all possible errors\n\terr = multierr.Append(err, errors.Str(r.Error))\n\tbuf.WriteString(r.ServiceMethod)\n\n\tconst op = errors.Op(\"handle codec error\")\n\tframe.WriteFlags(ERROR)\n\t\/\/ error should be here\n\tif err != nil {\n\t\tbuf.WriteString(err.Error())\n\t}\n\tframe.WritePayloadLen(uint32(buf.Len()))\n\tframe.WritePayload(buf.Bytes())\n\n\tframe.WriteCRC()\n\t_ = c.relay.Send(frame)\n\treturn errors.E(op, errors.Str(r.Error))\n}\n\n\/\/ ReadRequestHeader receives\nfunc (c *Codec) ReadRequestHeader(r *rpc.Request) error {\n\tconst op = errors.Op(\"codec: read request header\")\n\tframe := NewFrame()\n\terr := c.relay.Receive(frame)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ opts[0] sequence ID\n\t\/\/ opts[1] service method name offset from payload in bytes\n\topts := frame.ReadOptions()\n\tif len(opts) != 2 {\n\t\treturn errors.E(op, errors.Str(\"should be 2 options. SEQ_ID and METHOD_LEN\"))\n\t}\n\n\tr.Seq = uint64(opts[0])\n\tr.ServiceMethod = string(frame.Payload()[:opts[1]])\n\tc.frame = frame\n\treturn c.storeCodec(r, frame.ReadFlags())\n}\n\nfunc (c *Codec) storeCodec(r *rpc.Request, flag byte) error {\n\tswitch {\n\tcase flag&byte(CODEC_JSON) != 0:\n\t\tc.codec.Store(r.Seq, CODEC_JSON)\n\tcase flag&byte(CODEC_RAW) != 0:\n\t\tc.codec.Store(r.Seq, CODEC_RAW)\n\tcase flag&byte(CODEC_MSGPACK) != 0:\n\t\tc.codec.Store(r.Seq, CODEC_MSGPACK)\n\tcase flag&byte(CODEC_GOB) != 0:\n\t\tc.codec.Store(r.Seq, CODEC_GOB)\n\tdefault:\n\t\tc.codec.Store(r.Seq, CODEC_GOB)\n\t}\n\n\treturn nil\n}\n\n\/\/ ReadRequestBody fetches prefixed body data and automatically unmarshal it as json. RawBody flag will populate\n\/\/ []byte lice argument for rpc method.\nfunc (c *Codec) ReadRequestBody(out interface{}) error {\n\tconst op = errors.Op(\"codec read request body\")\n\tif out == nil {\n\t\treturn nil\n\t}\n\n\tdefer func() {\n\t\tc.frame = nil\n\t}()\n\n\tflags := c.frame.ReadFlags()\n\n\tif flags&byte(CODEC_JSON) != byte(0) {\n\t\treturn decodeJSON(out, c.frame)\n\t}\n\n\tif flags&byte(CODEC_GOB) != byte(0) {\n\t\treturn decodeGob(out, c.frame)\n\t}\n\n\tif flags&byte(CODEC_RAW) != byte(0) {\n\t\treturn decodeRaw(out, c.frame)\n\t}\n\n\tif flags&byte(CODEC_MSGPACK) != byte(0) {\n\t\treturn decodeMsgPack(out, c.frame)\n\t}\n\n\treturn errors.E(op, errors.Str(\"unknown decoder used in frame\"))\n}\n\n\/\/ Close underlying socket.\nfunc (c *Codec) Close() error {\n\tif c.closed {\n\t\treturn nil\n\t}\n\n\tc.closed = true\n\treturn c.relay.Close()\n}\n<commit_msg>Split sync.Map load and delete for go1.14<commit_after>package goridge\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"net\/rpc\"\n\t\"sync\"\n\n\tj \"github.com\/json-iterator\/go\"\n\t\"github.com\/spiral\/errors\"\n\t\"go.uber.org\/multierr\"\n)\n\nvar json = j.ConfigCompatibleWithStandardLibrary\n\n\/\/ Codec represent net\/rpc bridge over Goridge socket relay.\ntype Codec struct {\n\trelay Relay\n\tclosed bool\n\tframe *Frame\n\tcodec sync.Map\n}\n\n\/\/ NewCodec initiates new server rpc codec over socket connection.\nfunc NewCodec(rwc io.ReadWriteCloser) *Codec {\n\treturn &Codec{\n\t\trelay: NewSocketRelay(rwc),\n\t\tcodec: sync.Map{},\n\t}\n}\n\n\/\/ NewCodecWithRelay initiates new server rpc codec with a relay of choice.\nfunc NewCodecWithRelay(relay Relay) *Codec {\n\treturn &Codec{relay: relay}\n}\n\n\/\/ WriteResponse marshals response, byte slice or error to remote party.\nfunc (c *Codec) WriteResponse(r *rpc.Response, body interface{}) error {\n\tconst op = errors.Op(\"codec: write response\")\n\tframe := NewFrame()\n\n\t\/\/ SEQ_ID + METHOD_NAME_LEN\n\tframe.WriteOptions(uint32(r.Seq), uint32(len(r.ServiceMethod)))\n\t\/\/ Write protocol version\n\tframe.WriteVersion(VERSION_1)\n\n\t\/\/ load and delete associated codec to not waste memory\n\t\/\/ because we write it to the frame and don't need more information about it\n\t\/\/ as for go.14, Load and Delete are separate methods\n\tcodec, ok := c.codec.Load(r.Seq)\n\tif !ok {\n\t\t\/\/ fallback codec\n\t\tframe.WriteFlags(CODEC_GOB)\n\t} else {\n\t\tframe.WriteFlags(codec.(FrameFlag))\n\t}\n\n\t\/\/ delete the key\n\tc.codec.Delete(r.Seq)\n\n\t\/\/ initialize buffer\n\tbuf := new(bytes.Buffer)\n\t\/\/ writeServiceMethod to the buffer\n\tbuf.WriteString(r.ServiceMethod)\n\n\t\/\/ if error returned, we sending it via relay and return error from WriteResponse\n\tif r.Error != \"\" {\n\t\t\/\/ Append error flag\n\t\treturn c.handleError(r, frame, buf, errors.Str(r.Error))\n\t}\n\n\t\/\/ read flag previously written\n\t\/\/ TODO might be better to save it to local variable\n\tflags := frame.ReadFlags()\n\n\tswitch {\n\tcase flags&byte(CODEC_JSON) != 0:\n\t\terr := encodeJSON(buf, body)\n\t\tif err != nil {\n\t\t\treturn c.handleError(r, frame, buf, err)\n\t\t}\n\t\t\/\/ everything written correct\n\t\tframe.WritePayloadLen(uint32(buf.Len()))\n\t\tframe.WritePayload(buf.Bytes())\n\n\t\tframe.WriteCRC()\n\t\treturn c.relay.Send(frame)\n\tcase flags&byte(CODEC_MSGPACK) != 0:\n\t\terr := encodeMsgPack(buf, body)\n\t\tif err != nil {\n\t\t\treturn c.handleError(r, frame, buf, err)\n\t\t}\n\t\t\/\/ everything written correct\n\t\tframe.WritePayloadLen(uint32(buf.Len()))\n\t\tframe.WritePayload(buf.Bytes())\n\n\t\tframe.WriteCRC()\n\t\treturn c.relay.Send(frame)\n\tcase flags&byte(CODEC_RAW) != 0:\n\t\terr := encodeRaw(buf, body)\n\t\tif err != nil {\n\t\t\treturn c.handleError(r, frame, buf, err)\n\t\t}\n\t\t\/\/ everything written correct\n\t\tframe.WritePayloadLen(uint32(buf.Len()))\n\t\tframe.WritePayload(buf.Bytes())\n\n\t\tframe.WriteCRC()\n\t\treturn c.relay.Send(frame)\n\tcase flags&byte(CODEC_GOB) != 0:\n\t\terr := encodeGob(buf, body)\n\t\tif err != nil {\n\t\t\treturn c.handleError(r, frame, buf, err)\n\t\t}\n\t\t\/\/ everything written correct\n\t\tframe.WritePayloadLen(uint32(buf.Len()))\n\t\tframe.WritePayload(buf.Bytes())\n\n\t\tframe.WriteCRC()\n\t\treturn c.relay.Send(frame)\n\tdefault:\n\t\treturn errors.E(op, errors.Str(\"unknown codec\"))\n\t}\n}\n\nfunc (c *Codec) handleError(r *rpc.Response, frame *Frame, buf *bytes.Buffer, err error) error {\n\t\/\/ just to be sure, remove all data from buffer and write new\n\tbuf.Truncate(0)\n\t\/\/ write all possible errors\n\terr = multierr.Append(err, errors.Str(r.Error))\n\tbuf.WriteString(r.ServiceMethod)\n\n\tconst op = errors.Op(\"handle codec error\")\n\tframe.WriteFlags(ERROR)\n\t\/\/ error should be here\n\tif err != nil {\n\t\tbuf.WriteString(err.Error())\n\t}\n\tframe.WritePayloadLen(uint32(buf.Len()))\n\tframe.WritePayload(buf.Bytes())\n\n\tframe.WriteCRC()\n\t_ = c.relay.Send(frame)\n\treturn errors.E(op, errors.Str(r.Error))\n}\n\n\/\/ ReadRequestHeader receives\nfunc (c *Codec) ReadRequestHeader(r *rpc.Request) error {\n\tconst op = errors.Op(\"codec: read request header\")\n\tframe := NewFrame()\n\terr := c.relay.Receive(frame)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ opts[0] sequence ID\n\t\/\/ opts[1] service method name offset from payload in bytes\n\topts := frame.ReadOptions()\n\tif len(opts) != 2 {\n\t\treturn errors.E(op, errors.Str(\"should be 2 options. SEQ_ID and METHOD_LEN\"))\n\t}\n\n\tr.Seq = uint64(opts[0])\n\tr.ServiceMethod = string(frame.Payload()[:opts[1]])\n\tc.frame = frame\n\treturn c.storeCodec(r, frame.ReadFlags())\n}\n\nfunc (c *Codec) storeCodec(r *rpc.Request, flag byte) error {\n\tswitch {\n\tcase flag&byte(CODEC_JSON) != 0:\n\t\tc.codec.Store(r.Seq, CODEC_JSON)\n\tcase flag&byte(CODEC_RAW) != 0:\n\t\tc.codec.Store(r.Seq, CODEC_RAW)\n\tcase flag&byte(CODEC_MSGPACK) != 0:\n\t\tc.codec.Store(r.Seq, CODEC_MSGPACK)\n\tcase flag&byte(CODEC_GOB) != 0:\n\t\tc.codec.Store(r.Seq, CODEC_GOB)\n\tdefault:\n\t\tc.codec.Store(r.Seq, CODEC_GOB)\n\t}\n\n\treturn nil\n}\n\n\/\/ ReadRequestBody fetches prefixed body data and automatically unmarshal it as json. RawBody flag will populate\n\/\/ []byte lice argument for rpc method.\nfunc (c *Codec) ReadRequestBody(out interface{}) error {\n\tconst op = errors.Op(\"codec read request body\")\n\tif out == nil {\n\t\treturn nil\n\t}\n\n\tdefer func() {\n\t\tc.frame = nil\n\t}()\n\n\tflags := c.frame.ReadFlags()\n\n\tif flags&byte(CODEC_JSON) != byte(0) {\n\t\treturn decodeJSON(out, c.frame)\n\t}\n\n\tif flags&byte(CODEC_GOB) != byte(0) {\n\t\treturn decodeGob(out, c.frame)\n\t}\n\n\tif flags&byte(CODEC_RAW) != byte(0) {\n\t\treturn decodeRaw(out, c.frame)\n\t}\n\n\tif flags&byte(CODEC_MSGPACK) != byte(0) {\n\t\treturn decodeMsgPack(out, c.frame)\n\t}\n\n\treturn errors.E(op, errors.Str(\"unknown decoder used in frame\"))\n}\n\n\/\/ Close underlying socket.\nfunc (c *Codec) Close() error {\n\tif c.closed {\n\t\treturn nil\n\t}\n\n\tc.closed = true\n\treturn c.relay.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package obj\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\ntype Suite struct {\n\t\/\/ FIXME this type is still, yes still, wrong: you need a factory for machines,\n\t\/\/ because otherwise you're gonna have a bad day if the same type comes up twice in a stack.\n\t\/\/ We really do need some declaritive type thing for \"machinestrategy\" here, separate from the machine impl.\n\tmappings map[reflect.Type]MarshalMachine\n}\n\n\/*\n\tFolds another behavior dispatch into the suite.\n\n\tThe `typeHint` parameter is an instance of the type you want dispatch\n\tto use this machine for. A zero instance is fine.\n\tThus, calls to this method usually resemble the following:\n\n\t\tsuite.Add(YourType{}, &SomeMachineImpl{})\n*\/\nfunc (s *Suite) Add(typeHint interface{}, mach MarshalMachine) {\n\tif s.mappings == nil {\n\t\ts.mappings = make(map[reflect.Type]MarshalMachine)\n\t}\n\trt := reflect.TypeOf(typeHint)\n\tfor {\n\t\tif rt.Kind() == reflect.Ptr {\n\t\t\trt = rt.Elem()\n\t\t}\n\t}\n\ts.mappings[rt] = mach\n}\n\nfunc (s *Suite) pickMarshalMachine(valp interface{}) MarshalMachine {\n\tmach := s.maybePickMarshalMachine(valp)\n\tif mach == nil {\n\t\tpanic(fmt.Errorf(\"no machine available in suite for type %T\", valp))\n\t}\n\treturn mach\n}\n\n\/*\n\tPicks an unmarshal machine, returning the custom impls for any\n\tcommon\/primitive types, and advanced machines where structs get involved.\n\n\tThe argument should be the address of the actual value of interest.\n\n\tReturns nil if there is no marshal machine in the suite for this type.\n*\/\nfunc (s *Suite) maybePickMarshalMachine(valp interface{}) MarshalMachine {\n\t\/\/ TODO : we can use type switches to do some primitives efficiently here\n\t\/\/ before we turn to the reflective path.\n\tval_rt := reflect.ValueOf(valp).Elem().Type()\n\treturn s.maybeMarshalMachineForType(val_rt)\n}\n\nfunc (s *Suite) marshalMachineForType(rt reflect.Type) MarshalMachine {\n\tmach := s.maybeMarshalMachineForType(rt)\n\tif mach == nil {\n\t\tpanic(fmt.Errorf(\"no machine available in suite for type %s\", rt.Name()))\n\t}\n\treturn mach\n}\n\n\/*\n\tLike `pickMarshalMachine`, but requiring only the reflect type info.\n\tThis is useable when you only have the type info available (rather than an instance);\n\tthis comes up when for example looking up the machine to use for all values\n\tin a slice based on the slice type info.\n\n\t(Using an instance may be able to take faster, non-reflective paths for\n\tprimitive values.)\n\n\tIn contrast to the method that takes a `valp interface{}`, this type info\n\tis understood to already be dereferenced.\n\n\tReturns nil if there is no marshal machine in the suite for this type.\n*\/\nfunc (s *Suite) maybeMarshalMachineForType(rt reflect.Type) MarshalMachine {\n\tswitch rt.Kind() {\n\tcase reflect.Bool:\n\t\treturn &MarshalMachineLiteral{}\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn &MarshalMachineLiteral{}\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\treturn &MarshalMachineLiteral{}\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn &MarshalMachineLiteral{}\n\tcase reflect.String:\n\t\treturn &MarshalMachineLiteral{}\n\tcase reflect.Slice:\n\t\t\/\/ TODO also bytes should get a special path\n\t\treturn &MarshalMachineSliceWildcard{}\n\tcase reflect.Array:\n\t\treturn &MarshalMachineSliceWildcard{}\n\tcase reflect.Map:\n\t\t\/\/ Consider (for efficiency in happy paths):\n\t\t\/\/\t\tswitch v2 := v.(type) {\n\t\t\/\/\t\tcase map[string]interface{}:\n\t\t\/\/\t\t\t_ = v2\n\t\t\/\/\t\t\treturn nil \/\/ TODO special\n\t\t\/\/\t\tdefault:\n\t\t\/\/\t\t\treturn &MarshalMachineMapWildcard{}\n\t\t\/\/\t\t}\n\t\t\/\/ but, it's not clear how we'd cache this.\n\t\t\/\/ possibly we should attach this whole method to the Driver,\n\t\t\/\/ so it can have state for cache.\n\t\treturn &MarshalMachineMapWildcard{}\n\tcase reflect.Ptr:\n\t\t\/\/ REVIEW: doing this once, fine. but unbounded? questionable.\n\t\treturn s.maybeMarshalMachineForType(rt.Elem())\n\tcase reflect.Struct:\n\t\treturn s.mappings[rt]\n\tcase reflect.Interface:\n\t\tpanic(\"TODO iface\")\n\tcase reflect.Func:\n\t\tpanic(\"TODO func\") \/\/ hey, if we can find it in the suite\n\tdefault:\n\t\tpanic(fmt.Errorf(\"excursion %s\", rt.Kind()))\n\t}\n}\n<commit_msg>Fix missing loop break suite initializer func.<commit_after>package obj\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\ntype Suite struct {\n\t\/\/ FIXME this type is still, yes still, wrong: you need a factory for machines,\n\t\/\/ because otherwise you're gonna have a bad day if the same type comes up twice in a stack.\n\t\/\/ We really do need some declaritive type thing for \"machinestrategy\" here, separate from the machine impl.\n\tmappings map[reflect.Type]MarshalMachine\n}\n\n\/*\n\tFolds another behavior dispatch into the suite.\n\n\tThe `typeHint` parameter is an instance of the type you want dispatch\n\tto use this machine for. A zero instance is fine.\n\tThus, calls to this method usually resemble the following:\n\n\t\tsuite.Add(YourType{}, &SomeMachineImpl{})\n*\/\nfunc (s *Suite) Add(typeHint interface{}, mach MarshalMachine) {\n\tif s.mappings == nil {\n\t\ts.mappings = make(map[reflect.Type]MarshalMachine)\n\t}\n\trt := reflect.TypeOf(typeHint)\n\tfor rt.Kind() == reflect.Ptr {\n\t\trt = rt.Elem()\n\t}\n\ts.mappings[rt] = mach\n}\n\nfunc (s *Suite) pickMarshalMachine(valp interface{}) MarshalMachine {\n\tmach := s.maybePickMarshalMachine(valp)\n\tif mach == nil {\n\t\tpanic(fmt.Errorf(\"no machine available in suite for type %T\", valp))\n\t}\n\treturn mach\n}\n\n\/*\n\tPicks an unmarshal machine, returning the custom impls for any\n\tcommon\/primitive types, and advanced machines where structs get involved.\n\n\tThe argument should be the address of the actual value of interest.\n\n\tReturns nil if there is no marshal machine in the suite for this type.\n*\/\nfunc (s *Suite) maybePickMarshalMachine(valp interface{}) MarshalMachine {\n\t\/\/ TODO : we can use type switches to do some primitives efficiently here\n\t\/\/ before we turn to the reflective path.\n\tval_rt := reflect.ValueOf(valp).Elem().Type()\n\treturn s.maybeMarshalMachineForType(val_rt)\n}\n\nfunc (s *Suite) marshalMachineForType(rt reflect.Type) MarshalMachine {\n\tmach := s.maybeMarshalMachineForType(rt)\n\tif mach == nil {\n\t\tpanic(fmt.Errorf(\"no machine available in suite for type %s\", rt.Name()))\n\t}\n\treturn mach\n}\n\n\/*\n\tLike `pickMarshalMachine`, but requiring only the reflect type info.\n\tThis is useable when you only have the type info available (rather than an instance);\n\tthis comes up when for example looking up the machine to use for all values\n\tin a slice based on the slice type info.\n\n\t(Using an instance may be able to take faster, non-reflective paths for\n\tprimitive values.)\n\n\tIn contrast to the method that takes a `valp interface{}`, this type info\n\tis understood to already be dereferenced.\n\n\tReturns nil if there is no marshal machine in the suite for this type.\n*\/\nfunc (s *Suite) maybeMarshalMachineForType(rt reflect.Type) MarshalMachine {\n\tswitch rt.Kind() {\n\tcase reflect.Bool:\n\t\treturn &MarshalMachineLiteral{}\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn &MarshalMachineLiteral{}\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\treturn &MarshalMachineLiteral{}\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn &MarshalMachineLiteral{}\n\tcase reflect.String:\n\t\treturn &MarshalMachineLiteral{}\n\tcase reflect.Slice:\n\t\t\/\/ TODO also bytes should get a special path\n\t\treturn &MarshalMachineSliceWildcard{}\n\tcase reflect.Array:\n\t\treturn &MarshalMachineSliceWildcard{}\n\tcase reflect.Map:\n\t\t\/\/ Consider (for efficiency in happy paths):\n\t\t\/\/\t\tswitch v2 := v.(type) {\n\t\t\/\/\t\tcase map[string]interface{}:\n\t\t\/\/\t\t\t_ = v2\n\t\t\/\/\t\t\treturn nil \/\/ TODO special\n\t\t\/\/\t\tdefault:\n\t\t\/\/\t\t\treturn &MarshalMachineMapWildcard{}\n\t\t\/\/\t\t}\n\t\t\/\/ but, it's not clear how we'd cache this.\n\t\t\/\/ possibly we should attach this whole method to the Driver,\n\t\t\/\/ so it can have state for cache.\n\t\treturn &MarshalMachineMapWildcard{}\n\tcase reflect.Ptr:\n\t\t\/\/ REVIEW: doing this once, fine. but unbounded? questionable.\n\t\treturn s.maybeMarshalMachineForType(rt.Elem())\n\tcase reflect.Struct:\n\t\treturn s.mappings[rt]\n\tcase reflect.Interface:\n\t\tpanic(\"TODO iface\")\n\tcase reflect.Func:\n\t\tpanic(\"TODO func\") \/\/ hey, if we can find it in the suite\n\tdefault:\n\t\tpanic(fmt.Errorf(\"excursion %s\", rt.Kind()))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package fastclient\n\nimport (\n\t\"flag\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/valyala\/fasthttp\"\n)\n\nvar (\n\thttpClientRequestTimeout = flag.Duration(\"httpClientRequestTimeout\", time.Second*10, \"Maximum time to wait for http response\")\n\thttpClientKeepAlivePeriod = flag.Duration(\"httpClientKeepAlivePeriod\", time.Second*5, \"Interval for sending keep-alive messages on keepalive connections. Zero disables keep-alive messages\")\n\thttpClientReadBufferSize = flag.Int(\"httpClientReadBufferSize\", 8*1024, \"Per-connection read buffer size for httpclient\")\n\thttpClientWriteBufferSize = flag.Int(\"httpClientWriteBufferSize\", 8*1024, \"Per-connection write buffer size for httpclient\")\n)\n\nconst (\n\tjobCapacity = 10000\n\tmaxIdleConnDuration = time.Second\n\tmaxConns = 1<<31 - 1\n)\n\ntype Client struct {\n\t*fasthttp.HostClient\n\twg sync.WaitGroup\n\n\tsync.Mutex\n\tJobsch chan struct{}\n\tworkers int\n}\n\nvar (\n\trequest *fasthttp.Request\n\tt time.Duration\n)\n\nfunc New(r *fasthttp.Request, timeout time.Duration) *Client {\n\tregisterMetrics()\n\trequest = r\n\tt = timeout\n\n\taddr, isTLS := acquireAddr(request)\n\treturn &Client{\n\t\tJobsch: make(chan struct{}, jobCapacity),\n\t\tHostClient: &fasthttp.HostClient{\n\t\t\tAddr: addr,\n\t\t\tIsTLS: isTLS,\n\t\t\tDial: dial,\n\t\t\tMaxIdleConnDuration: maxIdleConnDuration,\n\t\t\tMaxConns: maxConns,\n\t\t},\n\t}\n}\n\nfunc (c *Client) Amount() int {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\treturn c.workers\n}\n\nfunc (c *Client) Overflow() int {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\treturn len(c.Jobsch)\n}\n\nfunc drainChan(ch chan struct{}) {\n\tfor {\n\t\tselect {\n\t\tcase <-ch:\n\t\t\tcontinue\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *Client) Flush() {\n\tdrainChan(c.Jobsch)\n\tclose(c.Jobsch)\n\n\tc.wg.Wait()\n\tflushMetrics()\n\tc.workers = 0\n\tc.Jobsch = make(chan struct{}, jobCapacity)\n}\n\nfunc (c *Client) RunWorkers(n int) {\n\tfor i := 0; i < n; i++ {\n\t\tc.wg.Add(1)\n\t\tgo func() {\n\t\t\tc.Lock()\n\t\t\tc.workers++\n\t\t\tc.Unlock()\n\n\t\t\tc.run()\n\t\t\tc.wg.Done()\n\t\t}()\n\t}\n}\n\nfunc (c *Client) run() {\n\tvar resp fasthttp.Response\n\tr := new(fasthttp.Request)\n\trequest.CopyTo(r)\n\tfor range c.Jobsch {\n\t\ts := time.Now()\n\t\terr := c.DoTimeout(r, &resp, t)\n\t\tif err != nil {\n\t\t\tif err == fasthttp.ErrTimeout {\n\t\t\t\ttimeouts.Inc()\n\t\t\t}\n\t\t\terrors.Inc()\n\t\t} else {\n\t\t\trequestSuccess.Inc()\n\t\t}\n\t\trequestDuration.Observe(float64(time.Since(s).Seconds()))\n\t\trequestSum.Inc()\n\t}\n}\n\ntype hostConn struct {\n\tnet.Conn\n\taddr string\n\tclosed uint32\n\tconnOpen prometheus.Gauge\n\treadError prometheus.Counter\n\twriteError prometheus.Counter\n\tbytesWritten prometheus.Counter\n\tbytesRead prometheus.Counter\n}\n\nfunc dial(addr string) (net.Conn, error) {\n\tconn, err := fasthttp.DialTimeout(addr, *httpClientRequestTimeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = setupTCPConn(conn); err != nil {\n\t\tconnError.Inc()\n\t\tconn.Close()\n\t\treturn nil, err\n\t}\n\n\tconnOpen.Inc()\n\treturn &hostConn{\n\t\tConn: conn,\n\t\taddr: addr,\n\t\tconnOpen: connOpen,\n\t\treadError: readError,\n\t\twriteError: writeError,\n\t\tbytesWritten: bytesWritten,\n\t\tbytesRead: bytesRead,\n\t}, nil\n}\n\nfunc setupTCPConn(conn net.Conn) error {\n\tc, ok := conn.(*net.TCPConn)\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tvar err error\n\tif *httpClientReadBufferSize > 0 {\n\t\tif err = c.SetReadBuffer(*httpClientReadBufferSize); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif *httpClientWriteBufferSize > 0 {\n\t\tif err = c.SetWriteBuffer(*httpClientWriteBufferSize); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif *httpClientKeepAlivePeriod > 0 {\n\t\tif err = c.SetKeepAlive(true); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = c.SetKeepAlivePeriod(*httpClientKeepAlivePeriod); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (hc *hostConn) Close() error {\n\tif atomic.AddUint32(&hc.closed, 1) == 1 {\n\t\thc.connOpen.Dec()\n\t}\n\n\treturn hc.Conn.Close()\n}\n\nfunc (hc *hostConn) Write(p []byte) (int, error) {\n\tn, err := hc.Conn.Write(p)\n\thc.bytesWritten.Add(float64(n))\n\tif err != nil {\n\t\thc.writeError.Inc()\n\t}\n\treturn n, err\n}\n\nfunc (hc *hostConn) Read(p []byte) (int, error) {\n\tn, err := hc.Conn.Read(p)\n\thc.bytesRead.Add(float64(n))\n\tif err != nil && err != io.EOF {\n\t\thc.readError.Inc()\n\t}\n\treturn n, err\n}\n\nfunc acquireAddr(req *fasthttp.Request) (string, bool) {\n\taddr := string(req.URI().Host())\n\tif len(addr) == 0 {\n\t\tlog.Fatalf(\"address cannot be empty\")\n\t}\n\tisTLS := string(request.URI().Scheme()) == \"https\"\n\ttmp := strings.SplitN(addr, \":\", 2)\n\tif len(tmp) != 2 {\n\t\tport := \":80\"\n\t\tif isTLS {\n\t\t\tport = \":443\"\n\t\t}\n\t\treturn tmp[0] + port, isTLS\n\t}\n\tport := tmp[1]\n\tportInt, err := strconv.Atoi(port)\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot parse port %q of addr %q: %s\", port, addr, err)\n\t}\n\tif portInt < 0 {\n\t\tlog.Fatalf(\"upstreamHosts port %d cannot be negative: %q\", portInt, addr)\n\t}\n\n\treturn addr, isTLS\n}\n<commit_msg>rm global variables<commit_after>package fastclient\n\nimport (\n\t\"flag\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/valyala\/fasthttp\"\n)\n\nvar (\n\thttpClientRequestTimeout = flag.Duration(\"httpClientRequestTimeout\", time.Second*10, \"Maximum time to wait for http response\")\n\thttpClientKeepAlivePeriod = flag.Duration(\"httpClientKeepAlivePeriod\", time.Second*5, \"Interval for sending keep-alive messages on keepalive connections. Zero disables keep-alive messages\")\n\thttpClientReadBufferSize = flag.Int(\"httpClientReadBufferSize\", 8*1024, \"Per-connection read buffer size for httpclient\")\n\thttpClientWriteBufferSize = flag.Int(\"httpClientWriteBufferSize\", 8*1024, \"Per-connection write buffer size for httpclient\")\n)\n\nconst (\n\tjobCapacity = 10000\n\tmaxIdleConnDuration = time.Second\n\tmaxConns = 1<<31 - 1\n)\n\ntype Client struct {\n\t*fasthttp.HostClient\n\twg sync.WaitGroup\n\ttimeout time.Duration\n\trequest *fasthttp.Request\n\n\tsync.Mutex\n\tJobsch chan struct{}\n\tworkers int\n}\n\nfunc New(request *fasthttp.Request, timeout time.Duration) *Client {\n\tregisterMetrics()\n\taddr, isTLS := acquireAddr(request)\n\treturn &Client{\n\t\tJobsch: make(chan struct{}, jobCapacity),\n\t\ttimeout: timeout,\n\t\trequest: request,\n\t\tHostClient: &fasthttp.HostClient{\n\t\t\tAddr: addr,\n\t\t\tIsTLS: isTLS,\n\t\t\tDial: dial,\n\t\t\tMaxIdleConnDuration: maxIdleConnDuration,\n\t\t\tMaxConns: maxConns,\n\t\t},\n\t}\n}\n\nfunc (c *Client) Amount() int {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\treturn c.workers\n}\n\nfunc (c *Client) Overflow() int {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\treturn len(c.Jobsch)\n}\n\nfunc drainChan(ch chan struct{}) {\n\tfor {\n\t\tselect {\n\t\tcase <-ch:\n\t\t\tcontinue\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *Client) Flush() {\n\tdrainChan(c.Jobsch)\n\tclose(c.Jobsch)\n\n\tc.wg.Wait()\n\tflushMetrics()\n\tc.workers = 0\n\tc.Jobsch = make(chan struct{}, jobCapacity)\n}\n\nfunc (c *Client) RunWorkers(n int) {\n\tfor i := 0; i < n; i++ {\n\t\tc.wg.Add(1)\n\t\tgo func() {\n\t\t\tc.Lock()\n\t\t\tc.workers++\n\t\t\tc.Unlock()\n\n\t\t\tc.run()\n\t\t\tc.wg.Done()\n\t\t}()\n\t}\n}\n\nfunc (c *Client) run() {\n\tvar resp fasthttp.Response\n\tr := new(fasthttp.Request)\n\tc.request.CopyTo(r)\n\tfor range c.Jobsch {\n\t\ts := time.Now()\n\t\terr := c.DoTimeout(r, &resp, c.timeout)\n\t\tif err != nil {\n\t\t\tif err == fasthttp.ErrTimeout {\n\t\t\t\ttimeouts.Inc()\n\t\t\t}\n\t\t\terrors.Inc()\n\t\t} else {\n\t\t\trequestSuccess.Inc()\n\t\t}\n\t\trequestDuration.Observe(float64(time.Since(s).Seconds()))\n\t\trequestSum.Inc()\n\t}\n}\n\ntype hostConn struct {\n\tnet.Conn\n\taddr string\n\tclosed uint32\n\tconnOpen prometheus.Gauge\n\treadError prometheus.Counter\n\twriteError prometheus.Counter\n\tbytesWritten prometheus.Counter\n\tbytesRead prometheus.Counter\n}\n\nfunc dial(addr string) (net.Conn, error) {\n\tconn, err := fasthttp.DialTimeout(addr, *httpClientRequestTimeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = setupTCPConn(conn); err != nil {\n\t\tconnError.Inc()\n\t\tconn.Close()\n\t\treturn nil, err\n\t}\n\n\tconnOpen.Inc()\n\treturn &hostConn{\n\t\tConn: conn,\n\t\taddr: addr,\n\t\tconnOpen: connOpen,\n\t\treadError: readError,\n\t\twriteError: writeError,\n\t\tbytesWritten: bytesWritten,\n\t\tbytesRead: bytesRead,\n\t}, nil\n}\n\nfunc setupTCPConn(conn net.Conn) error {\n\tc, ok := conn.(*net.TCPConn)\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tvar err error\n\tif *httpClientReadBufferSize > 0 {\n\t\tif err = c.SetReadBuffer(*httpClientReadBufferSize); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif *httpClientWriteBufferSize > 0 {\n\t\tif err = c.SetWriteBuffer(*httpClientWriteBufferSize); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif *httpClientKeepAlivePeriod > 0 {\n\t\tif err = c.SetKeepAlive(true); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = c.SetKeepAlivePeriod(*httpClientKeepAlivePeriod); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (hc *hostConn) Close() error {\n\tif atomic.AddUint32(&hc.closed, 1) == 1 {\n\t\thc.connOpen.Dec()\n\t}\n\n\treturn hc.Conn.Close()\n}\n\nfunc (hc *hostConn) Write(p []byte) (int, error) {\n\tn, err := hc.Conn.Write(p)\n\thc.bytesWritten.Add(float64(n))\n\tif err != nil {\n\t\thc.writeError.Inc()\n\t}\n\treturn n, err\n}\n\nfunc (hc *hostConn) Read(p []byte) (int, error) {\n\tn, err := hc.Conn.Read(p)\n\thc.bytesRead.Add(float64(n))\n\tif err != nil && err != io.EOF {\n\t\thc.readError.Inc()\n\t}\n\treturn n, err\n}\n\nfunc acquireAddr(req *fasthttp.Request) (string, bool) {\n\taddr := string(req.URI().Host())\n\tif len(addr) == 0 {\n\t\tlog.Fatalf(\"address cannot be empty\")\n\t}\n\tisTLS := string(req.URI().Scheme()) == \"https\"\n\ttmp := strings.SplitN(addr, \":\", 2)\n\tif len(tmp) != 2 {\n\t\tport := \":80\"\n\t\tif isTLS {\n\t\t\tport = \":443\"\n\t\t}\n\t\treturn tmp[0] + port, isTLS\n\t}\n\tport := tmp[1]\n\tportInt, err := strconv.Atoi(port)\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot parse port %q of addr %q: %s\", port, addr, err)\n\t}\n\tif portInt < 0 {\n\t\tlog.Fatalf(\"upstreamHosts port %d cannot be negative: %q\", portInt, addr)\n\t}\n\n\treturn addr, isTLS\n}\n<|endoftext|>"} {"text":"<commit_before>package detector\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/hcl\/hcl\/ast\"\n\t\"github.com\/hashicorp\/hcl\/hcl\/token\"\n\t\"github.com\/wata727\/tflint\/config\"\n\t\"github.com\/wata727\/tflint\/evaluator\"\n\t\"github.com\/wata727\/tflint\/issue\"\n\t\"github.com\/wata727\/tflint\/logger\"\n)\n\ntype Detector struct {\n\tListMap map[string]*ast.ObjectList\n\tConfig *config.Config\n\tAwsClient *config.AwsClient\n\tEvalConfig *evaluator.Evaluator\n\tLogger *logger.Logger\n\tError bool\n}\n\nvar detectors = map[string]string{\n\t\"aws_instance_invalid_type\": \"DetectAwsInstanceInvalidType\",\n\t\"aws_instance_previous_type\": \"DetectAwsInstancePreviousType\",\n\t\"aws_instance_not_specified_iam_profile\": \"DetectAwsInstanceNotSpecifiedIamProfile\",\n\t\"aws_instance_default_standard_volume\": \"DetectAwsInstanceDefaultStandardVolume\",\n\t\"aws_db_instance_default_parameter_group\": \"DetectAwsDbInstanceDefaultParameterGroup\",\n\t\"aws_elasticache_cluster_default_parameter_group\": \"DetectAwsElasticacheClusterDefaultParameterGroup\",\n\t\"aws_instance_invalid_iam_profile\": \"DetectAwsInstanceInvalidIamProfile\",\n}\n\nfunc NewDetector(listMap map[string]*ast.ObjectList, c *config.Config) (*Detector, error) {\n\tevalConfig, err := evaluator.NewEvaluator(listMap, c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Detector{\n\t\tListMap: listMap,\n\t\tConfig: c,\n\t\tAwsClient: c.NewAwsClient(),\n\t\tEvalConfig: evalConfig,\n\t\tLogger: logger.Init(c.Debug),\n\t\tError: false,\n\t}, nil\n}\n\nfunc hclLiteralToken(item *ast.ObjectItem, k string) (token.Token, error) {\n\tobjItems, err := hclObjectItems(item, k)\n\tif err != nil {\n\t\treturn token.Token{}, err\n\t}\n\n\tif v, ok := objItems[0].Val.(*ast.LiteralType); ok {\n\t\treturn v.Token, nil\n\t}\n\treturn token.Token{}, fmt.Errorf(\"ERROR: `%s` value is not literal\", k)\n}\n\nfunc hclObjectItems(item *ast.ObjectItem, k string) ([]*ast.ObjectItem, error) {\n\titems := item.Val.(*ast.ObjectType).List.Filter(k).Items\n\tif len(items) == 0 {\n\t\treturn []*ast.ObjectItem{}, fmt.Errorf(\"ERROR: key `%s` not found\", k)\n\t}\n\treturn items, nil\n}\n\nfunc IsKeyNotFound(item *ast.ObjectItem, k string) bool {\n\titems := item.Val.(*ast.ObjectType).List.Filter(k).Items\n\treturn len(items) == 0\n}\n\nfunc (d *Detector) Detect() []*issue.Issue {\n\tvar issues = []*issue.Issue{}\n\n\tfor ruleName, detectorMethod := range detectors {\n\t\tif d.Config.IgnoreRule[ruleName] {\n\t\t\td.Logger.Info(fmt.Sprintf(\"ignore rule `%s`\", ruleName))\n\t\t\tcontinue\n\t\t}\n\t\td.Logger.Info(fmt.Sprintf(\"detect by `%s`\", ruleName))\n\t\tmethod := reflect.ValueOf(d).MethodByName(detectorMethod)\n\t\tmethod.Call([]reflect.Value{reflect.ValueOf(&issues)})\n\n\t\tfor name, m := range d.EvalConfig.ModuleConfig {\n\t\t\tif d.Config.IgnoreModule[m.Source] {\n\t\t\t\td.Logger.Info(fmt.Sprintf(\"ignore module `%s`\", name))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\td.Logger.Info(fmt.Sprintf(\"detect module `%s`\", name))\n\t\t\tmoduleDetector := &Detector{\n\t\t\t\tListMap: m.ListMap,\n\t\t\t\tConfig: d.Config,\n\t\t\t\tEvalConfig: &evaluator.Evaluator{\n\t\t\t\t\tConfig: m.Config,\n\t\t\t\t},\n\t\t\t\tLogger: d.Logger,\n\t\t\t}\n\t\t\tmethod := reflect.ValueOf(moduleDetector).MethodByName(detectorMethod)\n\t\t\tmethod.Call([]reflect.Value{reflect.ValueOf(&issues)})\n\t\t}\n\t}\n\n\treturn issues\n}\n\nfunc (d *Detector) evalToString(v string) (string, error) {\n\tev, err := d.EvalConfig.Eval(strings.Trim(v, \"\\\"\"))\n\n\tif err != nil {\n\t\treturn \"\", err\n\t} else if reflect.TypeOf(ev).Kind() != reflect.String {\n\t\treturn \"\", fmt.Errorf(\"ERROR: `%s` is not string\", v)\n\t} else if ev.(string) == \"[NOT EVALUABLE]\" {\n\t\treturn \"\", fmt.Errorf(\"ERROR; `%s` is not evaluable\", v)\n\t}\n\n\treturn ev.(string), nil\n}\n<commit_msg>add AwsClient to moduleDetector<commit_after>package detector\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/hcl\/hcl\/ast\"\n\t\"github.com\/hashicorp\/hcl\/hcl\/token\"\n\t\"github.com\/wata727\/tflint\/config\"\n\t\"github.com\/wata727\/tflint\/evaluator\"\n\t\"github.com\/wata727\/tflint\/issue\"\n\t\"github.com\/wata727\/tflint\/logger\"\n)\n\ntype Detector struct {\n\tListMap map[string]*ast.ObjectList\n\tConfig *config.Config\n\tAwsClient *config.AwsClient\n\tEvalConfig *evaluator.Evaluator\n\tLogger *logger.Logger\n\tError bool\n}\n\nvar detectors = map[string]string{\n\t\"aws_instance_invalid_type\": \"DetectAwsInstanceInvalidType\",\n\t\"aws_instance_previous_type\": \"DetectAwsInstancePreviousType\",\n\t\"aws_instance_not_specified_iam_profile\": \"DetectAwsInstanceNotSpecifiedIamProfile\",\n\t\"aws_instance_default_standard_volume\": \"DetectAwsInstanceDefaultStandardVolume\",\n\t\"aws_db_instance_default_parameter_group\": \"DetectAwsDbInstanceDefaultParameterGroup\",\n\t\"aws_elasticache_cluster_default_parameter_group\": \"DetectAwsElasticacheClusterDefaultParameterGroup\",\n\t\"aws_instance_invalid_iam_profile\": \"DetectAwsInstanceInvalidIamProfile\",\n}\n\nfunc NewDetector(listMap map[string]*ast.ObjectList, c *config.Config) (*Detector, error) {\n\tevalConfig, err := evaluator.NewEvaluator(listMap, c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Detector{\n\t\tListMap: listMap,\n\t\tConfig: c,\n\t\tAwsClient: c.NewAwsClient(),\n\t\tEvalConfig: evalConfig,\n\t\tLogger: logger.Init(c.Debug),\n\t\tError: false,\n\t}, nil\n}\n\nfunc hclLiteralToken(item *ast.ObjectItem, k string) (token.Token, error) {\n\tobjItems, err := hclObjectItems(item, k)\n\tif err != nil {\n\t\treturn token.Token{}, err\n\t}\n\n\tif v, ok := objItems[0].Val.(*ast.LiteralType); ok {\n\t\treturn v.Token, nil\n\t}\n\treturn token.Token{}, fmt.Errorf(\"ERROR: `%s` value is not literal\", k)\n}\n\nfunc hclObjectItems(item *ast.ObjectItem, k string) ([]*ast.ObjectItem, error) {\n\titems := item.Val.(*ast.ObjectType).List.Filter(k).Items\n\tif len(items) == 0 {\n\t\treturn []*ast.ObjectItem{}, fmt.Errorf(\"ERROR: key `%s` not found\", k)\n\t}\n\treturn items, nil\n}\n\nfunc IsKeyNotFound(item *ast.ObjectItem, k string) bool {\n\titems := item.Val.(*ast.ObjectType).List.Filter(k).Items\n\treturn len(items) == 0\n}\n\nfunc (d *Detector) Detect() []*issue.Issue {\n\tvar issues = []*issue.Issue{}\n\n\tfor ruleName, detectorMethod := range detectors {\n\t\tif d.Config.IgnoreRule[ruleName] {\n\t\t\td.Logger.Info(fmt.Sprintf(\"ignore rule `%s`\", ruleName))\n\t\t\tcontinue\n\t\t}\n\t\td.Logger.Info(fmt.Sprintf(\"detect by `%s`\", ruleName))\n\t\tmethod := reflect.ValueOf(d).MethodByName(detectorMethod)\n\t\tmethod.Call([]reflect.Value{reflect.ValueOf(&issues)})\n\n\t\tfor name, m := range d.EvalConfig.ModuleConfig {\n\t\t\tif d.Config.IgnoreModule[m.Source] {\n\t\t\t\td.Logger.Info(fmt.Sprintf(\"ignore module `%s`\", name))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\td.Logger.Info(fmt.Sprintf(\"detect module `%s`\", name))\n\t\t\tmoduleDetector := &Detector{\n\t\t\t\tListMap: m.ListMap,\n\t\t\t\tConfig: d.Config,\n\t\t\t\tAwsClient: d.AwsClient,\n\t\t\t\tEvalConfig: &evaluator.Evaluator{\n\t\t\t\t\tConfig: m.Config,\n\t\t\t\t},\n\t\t\t\tLogger: d.Logger,\n\t\t\t\tError: false,\n\t\t\t}\n\t\t\tmethod := reflect.ValueOf(moduleDetector).MethodByName(detectorMethod)\n\t\t\tmethod.Call([]reflect.Value{reflect.ValueOf(&issues)})\n\t\t}\n\t}\n\n\treturn issues\n}\n\nfunc (d *Detector) evalToString(v string) (string, error) {\n\tev, err := d.EvalConfig.Eval(strings.Trim(v, \"\\\"\"))\n\n\tif err != nil {\n\t\treturn \"\", err\n\t} else if reflect.TypeOf(ev).Kind() != reflect.String {\n\t\treturn \"\", fmt.Errorf(\"ERROR: `%s` is not string\", v)\n\t} else if ev.(string) == \"[NOT EVALUABLE]\" {\n\t\treturn \"\", fmt.Errorf(\"ERROR; `%s` is not evaluable\", v)\n\t}\n\n\treturn ev.(string), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package testcover\n\nimport (\n\t\"bytes\"\n\t\"crypto\/sha1\"\n\t\"encoding\/binary\"\n)\n\nfunc Fuzz(data []byte) int {\n\tif len(data) < 20 {\n\t\treturn 0\n\t}\n\tx := binary.BigEndian.Uint32(data[12:])\n\tif x == 0x45839281 {\n\t\tbingo()\n\t}\n\tif data[10] == 0xfd && data[15] == 0x9a && data[17] == 0x71 {\n\t\tbingo()\n\t}\n\tswitch binary.LittleEndian.Uint32(data[10:]) {\n\tdefault:\n\t\tbingo()\n\tcase 0x12345678:\n\t\tbingo()\n\tcase 0x98765432:\n\t\tbingo()\n\t}\n\tswitch {\n\tcase binary.LittleEndian.Uint32(data[8:]) == 0x12345678:\n\t\tbingo()\n\tdefault:\n\t\tbingo()\n\tcase 0x98765432 == binary.BigEndian.Uint32(data[7:]):\n\t\tbingo()\n\t}\n\n\tswitch string(data[5:9]) {\n\tcase \"ABCD\":\n\t\tbingo()\n\tcase \"QWER\":\n\t\tbingo()\n\tcase \"ZXCV\":\n\t\tbingo()\n\t}\n\n\tn := binary.BigEndian.Uint32(data[0:4])\n\tif int(n) <= len(data)-4 {\n\t\ts := string(data[4 : 4+n])\n\t\tif s == \"eat this\" {\n\t\t\tbingo()\n\t\t}\n\t}\n\n\tif f := binary.BigEndian.Uint32(data[9:]) > 0xfffffffd; f {\n\t\tbingo()\n\t}\n\n\tmagic := uint32(data[5]) | uint32(data[6])<<8 | uint32(data[9])<<16 | uint32(data[11])<<24\n\tif magic == 0xabcd1234 {\n\t\tbingo()\n\t}\n\n\ttype Hdr struct {\n\t\tMagic [8]byte\n\t\tN uint32\n\t}\n\tvar hdr Hdr\n\tbinary.Read(bytes.NewReader(data), binary.LittleEndian, &hdr)\n\tif hdr.Magic == [8]byte{'m', 'a', 'g', 'i', 'c', 'h', 'd', 'r'} {\n\t\tbingo()\n\t}\n\n\ttype Name string\n\tname := Name(data[4:9])\n\tif name == \"12345\" {\n\t\tbingo()\n\t}\n\n\tif len(data) > 40 {\n\t\thash1 := sha1.Sum(data[0:20])\n\t\tvar hash2 [20]byte\n\t\tbinary.Read(bytes.NewReader(data[20:40]), binary.LittleEndian, &hash2)\n\t\tif hash1 == hash2 {\n\t\t\tbingo()\n\t\t}\n\t}\n\n\treturn 0\n}\n\nfunc bingo() {\n\tif theFalse {\n\t\tbingo()\n\t}\n}\n\nvar theFalse = false\n<commit_msg>make afl-style coverage counters slightly finer-grained encoding\/xml matches \"CDATA[\" string byte-by-byte in a loop more coverage for small hit counts should make cracking of such code faster<commit_after>package testcover\n\nimport (\n\t\"bytes\"\n\t\"crypto\/sha1\"\n\t\"encoding\/binary\"\n)\n\nfunc Fuzz(data []byte) int {\n\tif len(data) < 20 {\n\t\treturn 0\n\t}\n\tx := binary.BigEndian.Uint32(data[12:])\n\tif x == 0x45839281 {\n\t\tbingo()\n\t}\n\tif data[10] == 0xfd && data[15] == 0x9a && data[17] == 0x71 {\n\t\tbingo()\n\t}\n\tswitch binary.LittleEndian.Uint32(data[10:]) {\n\tdefault:\n\t\tbingo()\n\tcase 0x12345678:\n\t\tbingo()\n\tcase 0x98765432:\n\t\tbingo()\n\t}\n\tswitch {\n\tcase binary.LittleEndian.Uint32(data[8:]) == 0x12345678:\n\t\tbingo()\n\tdefault:\n\t\tbingo()\n\tcase 0x98765432 == binary.BigEndian.Uint32(data[7:]):\n\t\tbingo()\n\t}\n\n\tswitch string(data[5:9]) {\n\tcase \"ABCD\":\n\t\tbingo()\n\tcase \"QWER\":\n\t\tbingo()\n\tcase \"ZXCV\":\n\t\tbingo()\n\t}\n\n\tn := binary.BigEndian.Uint32(data[0:4])\n\tif int(n) <= len(data)-4 {\n\t\ts := string(data[4 : 4+n])\n\t\tif s == \"eat this\" {\n\t\t\tbingo()\n\t\t}\n\t}\n\n\tif f := binary.BigEndian.Uint32(data[9:]) > 0xfffffffd; f {\n\t\tbingo()\n\t}\n\n\tmagic := uint32(data[5]) | uint32(data[6])<<8 | uint32(data[9])<<16 | uint32(data[11])<<24\n\tif magic == 0xabcd1234 {\n\t\tbingo()\n\t}\n\n\ttype Hdr struct {\n\t\tMagic [8]byte\n\t\tN uint32\n\t}\n\tvar hdr Hdr\n\tbinary.Read(bytes.NewReader(data), binary.LittleEndian, &hdr)\n\tif hdr.Magic == [8]byte{'m', 'a', 'g', 'i', 'c', 'h', 'd', 'r'} {\n\t\tbingo()\n\t}\n\n\ttype Name string\n\tname := Name(data[4:9])\n\tif name == \"12345\" {\n\t\tbingo()\n\t}\n\n\tif len(data) > 40 {\n\t\thash1 := sha1.Sum(data[0:20])\n\t\tvar hash2 [20]byte\n\t\tbinary.Read(bytes.NewReader(data[20:40]), binary.LittleEndian, &hash2)\n\t\tif hash1 == hash2 {\n\t\t\tbingo()\n\t\t}\n\t}\n\n\tfor i := 0; i < 6; i++ {\n\t\tif data[i] != \"CDATA[\"[i] {\n\t\t\tgoto fail\n\t\t}\n\t}\n\tbingo()\nfail:\n\n\treturn 0\n}\n\nfunc bingo() {\n\tif theFalse {\n\t\tbingo()\n\t}\n}\n\nvar theFalse = false\n<|endoftext|>"} {"text":"<commit_before>package terraform\n\nimport (\n\t\"reflect\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n)\n\nfunc TestNodeRefreshableManagedResourceDynamicExpand_scaleOut(t *testing.T) {\n\tvar stateLock sync.RWMutex\n\n\taddr, err := ParseResourceAddress(\"aws_instance.foo\")\n\tif err != nil {\n\t\tt.Fatalf(\"bad: %s\", err)\n\t}\n\n\tm := testModule(t, \"refresh-resource-scale-inout\")\n\n\tstate := &State{\n\t\tModules: []*ModuleState{\n\t\t\t&ModuleState{\n\t\t\t\tPath: rootModulePath,\n\t\t\t\tResources: map[string]*ResourceState{\n\t\t\t\t\t\"aws_instance.foo.0\": &ResourceState{\n\t\t\t\t\t\tType: \"aws_instance\",\n\t\t\t\t\t\tDeposed: []*InstanceState{\n\t\t\t\t\t\t\t&InstanceState{\n\t\t\t\t\t\t\t\tID: \"foo\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"aws_instance.foo.1\": &ResourceState{\n\t\t\t\t\t\tType: \"aws_instance\",\n\t\t\t\t\t\tDeposed: []*InstanceState{\n\t\t\t\t\t\t\t&InstanceState{\n\t\t\t\t\t\t\t\tID: \"bar\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tn := &NodeRefreshableManagedResource{\n\t\tNodeAbstractCountResource: &NodeAbstractCountResource{\n\t\t\tNodeAbstractResource: &NodeAbstractResource{\n\t\t\t\tAddr: addr,\n\t\t\t\tConfig: m.Config().Resources[0],\n\t\t\t},\n\t\t},\n\t}\n\n\tg, err := n.DynamicExpand(&MockEvalContext{\n\t\tPathPath: []string{\"root\"},\n\t\tStateState: state,\n\t\tStateLock: &stateLock,\n\t})\n\n\tactual := g.StringWithNodeTypes()\n\texpected := `aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance\naws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance\naws_instance.foo[2] - *terraform.NodeRefreshableManagedResourceInstance\nroot - terraform.graphNodeRoot\n aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance\n aws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance\n aws_instance.foo[2] - *terraform.NodeRefreshableManagedResourceInstance\n`\n\tif expected != actual {\n\t\tt.Fatalf(\"Expected:\\n%s\\nGot:\\n%s\", expected, actual)\n\t}\n}\n\nfunc TestNodeRefreshableManagedResourceDynamicExpand_scaleIn(t *testing.T) {\n\tvar stateLock sync.RWMutex\n\n\taddr, err := ParseResourceAddress(\"aws_instance.foo\")\n\tif err != nil {\n\t\tt.Fatalf(\"bad: %s\", err)\n\t}\n\n\tm := testModule(t, \"refresh-resource-scale-inout\")\n\n\tstate := &State{\n\t\tModules: []*ModuleState{\n\t\t\t&ModuleState{\n\t\t\t\tPath: rootModulePath,\n\t\t\t\tResources: map[string]*ResourceState{\n\t\t\t\t\t\"aws_instance.foo.0\": &ResourceState{\n\t\t\t\t\t\tType: \"aws_instance\",\n\t\t\t\t\t\tDeposed: []*InstanceState{\n\t\t\t\t\t\t\t&InstanceState{\n\t\t\t\t\t\t\t\tID: \"foo\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"aws_instance.foo.1\": &ResourceState{\n\t\t\t\t\t\tType: \"aws_instance\",\n\t\t\t\t\t\tDeposed: []*InstanceState{\n\t\t\t\t\t\t\t&InstanceState{\n\t\t\t\t\t\t\t\tID: \"bar\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"aws_instance.foo.2\": &ResourceState{\n\t\t\t\t\t\tType: \"aws_instance\",\n\t\t\t\t\t\tDeposed: []*InstanceState{\n\t\t\t\t\t\t\t&InstanceState{\n\t\t\t\t\t\t\t\tID: \"baz\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"aws_instance.foo.3\": &ResourceState{\n\t\t\t\t\t\tType: \"aws_instance\",\n\t\t\t\t\t\tDeposed: []*InstanceState{\n\t\t\t\t\t\t\t&InstanceState{\n\t\t\t\t\t\t\t\tID: \"qux\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tn := &NodeRefreshableManagedResource{\n\t\tNodeAbstractCountResource: &NodeAbstractCountResource{\n\t\t\tNodeAbstractResource: &NodeAbstractResource{\n\t\t\t\tAddr: addr,\n\t\t\t\tConfig: m.Config().Resources[0],\n\t\t\t},\n\t\t},\n\t}\n\n\tg, err := n.DynamicExpand(&MockEvalContext{\n\t\tPathPath: []string{\"root\"},\n\t\tStateState: state,\n\t\tStateLock: &stateLock,\n\t})\n\n\tactual := g.StringWithNodeTypes()\n\texpected := `aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance\naws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance\naws_instance.foo[2] - *terraform.NodeRefreshableManagedResourceInstance\naws_instance.foo[3] - *terraform.NodeRefreshableManagedResourceInstance\nroot - terraform.graphNodeRoot\n aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance\n aws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance\n aws_instance.foo[2] - *terraform.NodeRefreshableManagedResourceInstance\n aws_instance.foo[3] - *terraform.NodeRefreshableManagedResourceInstance\n`\n\tif expected != actual {\n\t\tt.Fatalf(\"Expected:\\n%s\\nGot:\\n%s\", expected, actual)\n\t}\n}\n\nfunc TestNodeRefreshableManagedResourceEvalTree_scaleOut(t *testing.T) {\n\tvar provider ResourceProvider\n\tvar state *InstanceState\n\tvar resourceConfig *ResourceConfig\n\n\taddr, err := ParseResourceAddress(\"aws_instance.foo[2]\")\n\tif err != nil {\n\t\tt.Fatalf(\"bad: %s\", err)\n\t}\n\n\tm := testModule(t, \"refresh-resource-scale-inout\")\n\n\tn := &NodeRefreshableManagedResourceInstance{\n\t\tNodeAbstractResource: &NodeAbstractResource{\n\t\t\tAddr: addr,\n\t\t\tConfig: m.Config().Resources[0],\n\t\t},\n\t}\n\n\tactual := n.EvalTree()\n\n\texpected := &EvalSequence{\n\t\tNodes: []EvalNode{\n\t\t\t&EvalInterpolate{\n\t\t\t\tConfig: n.Config.RawConfig.Copy(),\n\t\t\t\tResource: &Resource{\n\t\t\t\t\tName: addr.Name,\n\t\t\t\t\tType: addr.Type,\n\t\t\t\t\tCountIndex: addr.Index,\n\t\t\t\t},\n\t\t\t\tOutput: &resourceConfig,\n\t\t\t},\n\t\t\t&EvalGetProvider{\n\t\t\t\tName: n.ProvidedBy()[0],\n\t\t\t\tOutput: &provider,\n\t\t\t},\n\t\t\t&EvalValidateResource{\n\t\t\t\tProvider: &provider,\n\t\t\t\tConfig: &resourceConfig,\n\t\t\t\tResourceName: n.Config.Name,\n\t\t\t\tResourceType: n.Config.Type,\n\t\t\t\tResourceMode: n.Config.Mode,\n\t\t\t\tIgnoreWarnings: true,\n\t\t\t},\n\t\t\t&EvalReadState{\n\t\t\t\tName: addr.stateId(),\n\t\t\t\tOutput: &state,\n\t\t\t},\n\t\t\t&EvalDiff{\n\t\t\t\tName: addr.stateId(),\n\t\t\t\tInfo: &InstanceInfo{\n\t\t\t\t\tId: addr.stateId(),\n\t\t\t\t\tType: addr.Type,\n\t\t\t\t\tModulePath: normalizeModulePath(addr.Path),\n\t\t\t\t},\n\t\t\t\tConfig: &resourceConfig,\n\t\t\t\tResource: n.Config,\n\t\t\t\tProvider: &provider,\n\t\t\t\tState: &state,\n\t\t\t\tOutputState: &state,\n\t\t\t\tStub: true,\n\t\t\t},\n\t\t\t&EvalWriteState{\n\t\t\t\tName: addr.stateId(),\n\t\t\t\tResourceType: n.Config.Type,\n\t\t\t\tProvider: n.Config.Provider,\n\t\t\t\tDependencies: n.StateReferences(),\n\t\t\t\tState: &state,\n\t\t\t},\n\t\t},\n\t}\n\n\tif !reflect.DeepEqual(expected, actual) {\n\t\tt.Fatalf(\"Expected:\\n\\n%s\\nGot:\\n\\n%s\\n\", spew.Sdump(expected), spew.Sdump(actual))\n\t}\n}\n<commit_msg>core: Simplify TestNodeRefreshableManagedResourceEvalTree_scaleOut<commit_after>package terraform\n\nimport (\n\t\"reflect\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n)\n\nfunc TestNodeRefreshableManagedResourceDynamicExpand_scaleOut(t *testing.T) {\n\tvar stateLock sync.RWMutex\n\n\taddr, err := ParseResourceAddress(\"aws_instance.foo\")\n\tif err != nil {\n\t\tt.Fatalf(\"bad: %s\", err)\n\t}\n\n\tm := testModule(t, \"refresh-resource-scale-inout\")\n\n\tstate := &State{\n\t\tModules: []*ModuleState{\n\t\t\t&ModuleState{\n\t\t\t\tPath: rootModulePath,\n\t\t\t\tResources: map[string]*ResourceState{\n\t\t\t\t\t\"aws_instance.foo.0\": &ResourceState{\n\t\t\t\t\t\tType: \"aws_instance\",\n\t\t\t\t\t\tDeposed: []*InstanceState{\n\t\t\t\t\t\t\t&InstanceState{\n\t\t\t\t\t\t\t\tID: \"foo\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"aws_instance.foo.1\": &ResourceState{\n\t\t\t\t\t\tType: \"aws_instance\",\n\t\t\t\t\t\tDeposed: []*InstanceState{\n\t\t\t\t\t\t\t&InstanceState{\n\t\t\t\t\t\t\t\tID: \"bar\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tn := &NodeRefreshableManagedResource{\n\t\tNodeAbstractCountResource: &NodeAbstractCountResource{\n\t\t\tNodeAbstractResource: &NodeAbstractResource{\n\t\t\t\tAddr: addr,\n\t\t\t\tConfig: m.Config().Resources[0],\n\t\t\t},\n\t\t},\n\t}\n\n\tg, err := n.DynamicExpand(&MockEvalContext{\n\t\tPathPath: []string{\"root\"},\n\t\tStateState: state,\n\t\tStateLock: &stateLock,\n\t})\n\n\tactual := g.StringWithNodeTypes()\n\texpected := `aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance\naws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance\naws_instance.foo[2] - *terraform.NodeRefreshableManagedResourceInstance\nroot - terraform.graphNodeRoot\n aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance\n aws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance\n aws_instance.foo[2] - *terraform.NodeRefreshableManagedResourceInstance\n`\n\tif expected != actual {\n\t\tt.Fatalf(\"Expected:\\n%s\\nGot:\\n%s\", expected, actual)\n\t}\n}\n\nfunc TestNodeRefreshableManagedResourceDynamicExpand_scaleIn(t *testing.T) {\n\tvar stateLock sync.RWMutex\n\n\taddr, err := ParseResourceAddress(\"aws_instance.foo\")\n\tif err != nil {\n\t\tt.Fatalf(\"bad: %s\", err)\n\t}\n\n\tm := testModule(t, \"refresh-resource-scale-inout\")\n\n\tstate := &State{\n\t\tModules: []*ModuleState{\n\t\t\t&ModuleState{\n\t\t\t\tPath: rootModulePath,\n\t\t\t\tResources: map[string]*ResourceState{\n\t\t\t\t\t\"aws_instance.foo.0\": &ResourceState{\n\t\t\t\t\t\tType: \"aws_instance\",\n\t\t\t\t\t\tDeposed: []*InstanceState{\n\t\t\t\t\t\t\t&InstanceState{\n\t\t\t\t\t\t\t\tID: \"foo\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"aws_instance.foo.1\": &ResourceState{\n\t\t\t\t\t\tType: \"aws_instance\",\n\t\t\t\t\t\tDeposed: []*InstanceState{\n\t\t\t\t\t\t\t&InstanceState{\n\t\t\t\t\t\t\t\tID: \"bar\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"aws_instance.foo.2\": &ResourceState{\n\t\t\t\t\t\tType: \"aws_instance\",\n\t\t\t\t\t\tDeposed: []*InstanceState{\n\t\t\t\t\t\t\t&InstanceState{\n\t\t\t\t\t\t\t\tID: \"baz\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"aws_instance.foo.3\": &ResourceState{\n\t\t\t\t\t\tType: \"aws_instance\",\n\t\t\t\t\t\tDeposed: []*InstanceState{\n\t\t\t\t\t\t\t&InstanceState{\n\t\t\t\t\t\t\t\tID: \"qux\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tn := &NodeRefreshableManagedResource{\n\t\tNodeAbstractCountResource: &NodeAbstractCountResource{\n\t\t\tNodeAbstractResource: &NodeAbstractResource{\n\t\t\t\tAddr: addr,\n\t\t\t\tConfig: m.Config().Resources[0],\n\t\t\t},\n\t\t},\n\t}\n\n\tg, err := n.DynamicExpand(&MockEvalContext{\n\t\tPathPath: []string{\"root\"},\n\t\tStateState: state,\n\t\tStateLock: &stateLock,\n\t})\n\n\tactual := g.StringWithNodeTypes()\n\texpected := `aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance\naws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance\naws_instance.foo[2] - *terraform.NodeRefreshableManagedResourceInstance\naws_instance.foo[3] - *terraform.NodeRefreshableManagedResourceInstance\nroot - terraform.graphNodeRoot\n aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance\n aws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance\n aws_instance.foo[2] - *terraform.NodeRefreshableManagedResourceInstance\n aws_instance.foo[3] - *terraform.NodeRefreshableManagedResourceInstance\n`\n\tif expected != actual {\n\t\tt.Fatalf(\"Expected:\\n%s\\nGot:\\n%s\", expected, actual)\n\t}\n}\n\nfunc TestNodeRefreshableManagedResourceEvalTree_scaleOut(t *testing.T) {\n\taddr, err := ParseResourceAddress(\"aws_instance.foo[2]\")\n\tif err != nil {\n\t\tt.Fatalf(\"bad: %s\", err)\n\t}\n\n\tm := testModule(t, \"refresh-resource-scale-inout\")\n\n\tn := &NodeRefreshableManagedResourceInstance{\n\t\tNodeAbstractResource: &NodeAbstractResource{\n\t\t\tAddr: addr,\n\t\t\tConfig: m.Config().Resources[0],\n\t\t},\n\t}\n\n\tactual := n.EvalTree()\n\texpected := n.evalTreeManagedResourceNoState()\n\n\tif !reflect.DeepEqual(expected, actual) {\n\t\tt.Fatalf(\"Expected:\\n\\n%s\\nGot:\\n\\n%s\\n\", spew.Sdump(expected), spew.Sdump(actual))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package dokugen\n\nimport (\n\t\"testing\"\n)\n\ntype SimpleRankedObject struct {\n\trank int\n\tid string\n}\n\nfunc (self *SimpleRankedObject) Rank() int {\n\treturn self.rank\n}\n\nfunc TestFiniteQueue(t *testing.T) {\n\tqueue := NewFiniteQueue(1, DIM)\n\tif queue == nil {\n\t\tt.Log(\"We didn't get a queue back from the constructor\")\n\t\tt.Fail()\n\t}\n\t\/\/Note that the first item does not fit in the first bucket on purpose.\n\tobjects := [...]*SimpleRankedObject{{3, \"a\"}, {4, \"b\"}, {4, \"c\"}, {5, \"d\"}}\n\tfor _, object := range objects {\n\t\tqueue.Insert(object)\n\t}\n\tfor _, obj := range objects {\n\t\tretrievedObj := queue.Get()\n\t\tif retrievedObj == nil {\n\t\t\tt.Log(\"We got back a nil before we were expecting to\")\n\t\t\tt.Fail()\n\t\t\tcontinue\n\t\t}\n\t\tif retrievedObj.Rank() != obj.Rank() {\n\t\t\tt.Log(\"We got back an object with the wrong rank: \", retrievedObj.Rank(), \" is not \", obj.Rank())\n\t\t\tt.Fail()\n\t\t}\n\t\tconvertedObj, _ := retrievedObj.(*SimpleRankedObject)\n\t\t\/\/We tried comparing addresses here, but they weren't the same. Why? Are we copying something somewhere?\n\t\tif convertedObj.id != obj.id {\n\t\t\t\/\/Note that technically the API doesn't require that items with the same rank come back out in the same order.\n\t\t\t\/\/So this test will fail even in some valid cases.\n\t\t\tt.Log(\"We didn't get back the objects we put in.\")\n\t\t\tt.Fail()\n\t\t}\n\t}\n\tif queue.Get() != nil {\n\t\tt.Log(\"We were able to get back more objects than what we put in.\")\n\t\tt.Fail()\n\t}\n\n\t\/\/Now test changing rank.\n\tfor _, obj := range objects {\n\t\tqueue.Insert(obj)\n\t}\n\n\tobjects[1].rank = 4\n\tqueue.Insert(objects[1])\n\t\/\/We'll sneak in a test for double-inserting here.\n\tqueue.Insert(objects[1])\n\t\/\/Keep track of our golden set, too.\n\ttemp := objects[1]\n\tobjects[1] = objects[2]\n\tobjects[2] = objects[3]\n\tobjects[3] = temp\n\n\tfor _, obj := range objects {\n\t\tretrievedObj := queue.Get()\n\t\tif retrievedObj == nil {\n\t\t\tt.Log(\"We got back a nil before we were expecting to\")\n\t\t\tt.Fail()\n\t\t\tcontinue\n\t\t}\n\t\tif retrievedObj.Rank() != obj.Rank() {\n\t\t\tt.Log(\"We got back an object with the wrong rank: \", retrievedObj.Rank(), \" is not \", obj.Rank())\n\t\t\tt.Fail()\n\t\t}\n\t\tconvertedObj, _ := retrievedObj.(*SimpleRankedObject)\n\t\t\/\/We tried comparing addresses here, but they weren't the same. Why? Are we copying something somewhere?\n\t\tif convertedObj.id != obj.id {\n\t\t\t\/\/Note that technically the API doesn't require that items with the same rank come back out in the same order.\n\t\t\t\/\/So this test will fail even in some valid cases.\n\t\t\tt.Log(\"We didn't get back the objects we put in.\")\n\t\t\tt.Fail()\n\t\t}\n\t}\n\tif queue.Get() != nil {\n\t\tt.Log(\"We were able to get back more objects than what we put in.\")\n\t\tt.Fail()\n\t}\n}\n<commit_msg>Nevermind, I didn't update the full test case. TESTS PASS now.<commit_after>package dokugen\n\nimport (\n\t\"testing\"\n)\n\ntype SimpleRankedObject struct {\n\trank int\n\tid string\n}\n\nfunc (self *SimpleRankedObject) Rank() int {\n\treturn self.rank\n}\n\nfunc TestFiniteQueue(t *testing.T) {\n\tqueue := NewFiniteQueue(1, DIM)\n\tif queue == nil {\n\t\tt.Log(\"We didn't get a queue back from the constructor\")\n\t\tt.Fail()\n\t}\n\t\/\/Note that the first item does not fit in the first bucket on purpose.\n\tobjects := [...]*SimpleRankedObject{{3, \"a\"}, {4, \"b\"}, {4, \"c\"}, {5, \"d\"}}\n\tfor _, object := range objects {\n\t\tqueue.Insert(object)\n\t}\n\tfor _, obj := range objects {\n\t\tretrievedObj := queue.Get()\n\t\tif retrievedObj == nil {\n\t\t\tt.Log(\"We got back a nil before we were expecting to\")\n\t\t\tt.Fail()\n\t\t\tcontinue\n\t\t}\n\t\tif retrievedObj.Rank() != obj.Rank() {\n\t\t\tt.Log(\"We got back an object with the wrong rank: \", retrievedObj.Rank(), \" is not \", obj.Rank())\n\t\t\tt.Fail()\n\t\t}\n\t\tconvertedObj, _ := retrievedObj.(*SimpleRankedObject)\n\t\t\/\/We tried comparing addresses here, but they weren't the same. Why? Are we copying something somewhere?\n\t\tif convertedObj.id != obj.id {\n\t\t\t\/\/Note that technically the API doesn't require that items with the same rank come back out in the same order.\n\t\t\t\/\/So this test will fail even in some valid cases.\n\t\t\tt.Log(\"We didn't get back the objects we put in.\")\n\t\t\tt.Fail()\n\t\t}\n\t}\n\tif queue.Get() != nil {\n\t\tt.Log(\"We were able to get back more objects than what we put in.\")\n\t\tt.Fail()\n\t}\n\n\t\/\/Now test changing rank.\n\tfor _, obj := range objects {\n\t\tqueue.Insert(obj)\n\t}\n\n\tobjects[1].rank = 6\n\tqueue.Insert(objects[1])\n\t\/\/We'll sneak in a test for double-inserting here.\n\tqueue.Insert(objects[1])\n\t\/\/Keep track of our golden set, too.\n\ttemp := objects[1]\n\tobjects[1] = objects[2]\n\tobjects[2] = objects[3]\n\tobjects[3] = temp\n\n\tfor _, obj := range objects {\n\t\tretrievedObj := queue.Get()\n\t\tif retrievedObj == nil {\n\t\t\tt.Log(\"We got back a nil before we were expecting to\")\n\t\t\tt.Fail()\n\t\t\tcontinue\n\t\t}\n\t\tif retrievedObj.Rank() != obj.Rank() {\n\t\t\tt.Log(\"We got back an object with the wrong rank: \", retrievedObj.Rank(), \" is not \", obj.Rank())\n\t\t\tt.Fail()\n\t\t}\n\t\tconvertedObj, _ := retrievedObj.(*SimpleRankedObject)\n\t\t\/\/We tried comparing addresses here, but they weren't the same. Why? Are we copying something somewhere?\n\t\tif convertedObj.id != obj.id {\n\t\t\t\/\/Note that technically the API doesn't require that items with the same rank come back out in the same order.\n\t\t\t\/\/So this test will fail even in some valid cases.\n\t\t\tt.Log(\"We didn't get back the objects we put in.\")\n\t\t\tt.Fail()\n\t\t}\n\t}\n\tif queue.Get() != nil {\n\t\tt.Log(\"We were able to get back more objects than what we put in.\")\n\t\tt.Fail()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nconst (\n\t\/\/ Name of this command.\n\tName string = \"logcatf\"\n\t\/\/ Version of this command.\n\tVersion string = \"0.1.0\"\n\t\/\/ DefaultFormat will be used if format wasn't specified.\n\tDefaultFormat string = \"%time %tag %priority %message\"\n)\n\n\/\/ Message has message strings.\nvar Message = map[string]string{\n\t\"commandDescription\": \"A command to format Android Logcat\",\n\t\"helpFormat\": `Format of output\n - Available keyword:\n %t (%time), %a (%tag), %p (%priority)\n %i (%pid), %I (%tid), %m (%message)\n - Example:\n adb logcat -v time | logcatf \"%t %4i %m\"\n adb logcat -v threadtime | logcatf \"%t, %a, %m\" > logcat.csv\n adb logcat -v threadtime | logcatf \"%t\" -o \"Exception\" -c \"adb shell screencap -p \/data\/local\/tmp\/screen.png\"\n - Default Format:\n \"%time %tag %priority %message\"`,\n\t\"helpTrigger\": \"regex to trigger a COMMAND.\",\n\t\"helpCommand\": \"COMMAND will be executed on regex mathed. In COMMAND, you can use parsed logcat as shell variables. ex) `\\\\${message}` You need to escape a dollar mark.\",\n\t\"helpEncode\": \"output character encode.\",\n\t\"helpToCsv\": \"output to CSV format. double-quote will be escaped.\",\n\t\"msgUnavailableKeyword\": \"error: %s is not available. Please check `Availavle Keyword:` on help.\",\n\t\"msgDuplicatedKeyword\": \"error: %s or %s is duplicated.\",\n}\n\nvar (\n\t\/\/ UTF8 represents encode `utf-8`\n\tUTF8 = \"utf-8\"\n)\n<commit_msg>updage help message<commit_after>package main\n\nconst (\n\t\/\/ Name of this command.\n\tName string = \"logcatf\"\n\t\/\/ Version of this command.\n\tVersion string = \"0.1.0\"\n\t\/\/ DefaultFormat will be used if format wasn't specified.\n\tDefaultFormat string = \"%time %tag %priority %message\"\n)\n\n\/\/ Message has message strings.\nvar Message = map[string]string{\n\t\"commandDescription\": \"A command line tool for format Android Logcat\",\n\t\"helpFormat\": `Format of output\n - Available keyword:\n %t (%time), %a (%tag), %p (%priority)\n %i (%pid), %I (%tid), %m (%message)\n - Example:\n adb logcat -v time | logcatf \"%t %4i %m\"\n adb logcat -v threadtime | logcatf \"%t, %a, %m\" --to-csv > logcat.csv\n adb logcat -v | logcatf -o \"Exception\" -c \"adb shell screencap -p \/data\/local\/tmp\/s.png\"\n - Default Format:\n \"%time %tag %priority %message\"`,\n\t\"helpTrigger\": \"regex to trigger a COMMAND.\",\n\t\"helpCommand\": \"COMMAND will be executed on regex mathed. In COMMAND, you can use parsed logcat as shell variables. ex) `\\\\${message}` You need to escape a dollar mark.\",\n\t\"helpEncode\": \"output character encode.\",\n\t\"helpToCsv\": \"output to CSV format. double-quote will be escaped.\",\n\t\"msgUnavailableKeyword\": \"error: %s is not available. Please check `Availavle Keyword:` on help.\",\n\t\"msgDuplicatedKeyword\": \"error: %s or %s is duplicated.\",\n}\n\nvar (\n\t\/\/ UTF8 represents encode `utf-8`\n\tUTF8 = \"utf-8\"\n)\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Martin Hebnes Pedersen (LA5NTA). All rights reserved.\n\/\/ Use of this source code is governed by the MIT-license that can be\n\/\/ found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/fsnotify\/fsnotify\"\n\t\"github.com\/gorilla\/websocket\"\n\n\t\"github.com\/la5nta\/pat\/internal\/osutil\"\n\t\"github.com\/la5nta\/wl2k-go\/mailbox\"\n)\n\nconst KeepaliveInterval = 4 * time.Minute\n\n\/\/ WSConn represent one connection in the WSHub pool\ntype WSConn struct {\n\tconn *websocket.Conn\n\tout chan interface{}\n}\n\n\/\/ WSHub is a hub for broadcasting data to several websocket connections\ntype WSHub struct {\n\tmu sync.Mutex\n\tpool map[*WSConn]struct{}\n}\n\nfunc NewWSHub() *WSHub {\n\tw := &WSHub{pool: map[*WSConn]struct{}{}}\n\tgo w.watchMBox()\n\treturn w\n}\n\nfunc (w *WSHub) UpdateStatus() { w.WriteJSON(struct{ Status Status }{getStatus()}) }\nfunc (w *WSHub) WriteProgress(p Progress) { w.WriteJSON(struct{ Progress Progress }{p}) }\nfunc (w *WSHub) WriteNotification(n Notification) {\n\tw.WriteJSON(struct{ Notification Notification }{n})\n}\n\nfunc (w *WSHub) Prompt(p Prompt) {\n\tw.WriteJSON(struct{ Prompt Prompt }{p})\n\tgo func() { <-p.cancel; w.WriteJSON(struct{ PromptAbort Prompt }{p}) }()\n}\n\nfunc (w *WSHub) WriteJSON(v interface{}) {\n\tif w == nil {\n\t\treturn\n\t}\n\n\tw.mu.Lock()\n\tfor c := range w.pool {\n\t\tselect {\n\t\tcase c.out <- v:\n\t\tcase <-time.After(3 * time.Second):\n\t\t\tlog.Println(\"Closing one unresponsive web socket\")\n\t\t\tc.conn.Close()\n\t\t\tdelete(w.pool, c)\n\t\t}\n\t}\n\tw.mu.Unlock()\n}\n\nfunc (w *WSHub) ClientAddrs() []string {\n\tif w == nil {\n\t\treturn nil\n\t}\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\taddrs := make([]string, 0, len(w.pool))\n\tfor c := range w.pool {\n\t\taddrs = append(addrs, c.conn.RemoteAddr().String())\n\t}\n\treturn addrs\n}\n\nfunc (w *WSHub) watchMBox() {\n\t\/\/ Maximise ulimit -n:\n\t\/\/ fsnotify opens a file descriptor for every file in the directories it watches, which\n\t\/\/ may more files than the current soft limit. The is especially a problem on macOS which\n\t\/\/ has a default soft limit of only 256 files. Windows does not have a such a limit.\n\tif runtime.GOOS != \"windows\" {\n\t\tif err := osutil.RaiseOpenFileLimit(4096); err != nil {\n\t\t\tlog.Printf(\"Unable to raise open file limit: %v\", err)\n\t\t}\n\t}\n\n\tfsWatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Println(\"Unable to start fs watcher: \", err)\n\t} else {\n\t\tp := path.Join(mbox.MBoxPath, mailbox.DIR_INBOX)\n\t\tif err := fsWatcher.Add(p); err != nil {\n\t\t\tlog.Printf(\"Unable to add path '%s' to fs watcher: %s\", p, err)\n\t\t}\n\n\t\t\/\/ These will probably fail if the first failed, but it's not important to log all.\n\t\tfsWatcher.Add(path.Join(mbox.MBoxPath, mailbox.DIR_OUTBOX))\n\t\tfsWatcher.Add(path.Join(mbox.MBoxPath, mailbox.DIR_SENT))\n\t\tfsWatcher.Add(path.Join(mbox.MBoxPath, mailbox.DIR_ARCHIVE))\n\t\tdefer fsWatcher.Close()\n\t}\n\n\tfor {\n\t\tselect {\n\t\t\/\/ Filesystem events\n\t\tcase <-fsWatcher.Events:\n\t\t\tdrainEvents(fsWatcher)\n\t\t\twebsocketHub.WriteJSON(struct {\n\t\t\t\tUpdateMailbox bool\n\t\t\t}{true})\n\t\tcase err := <-fsWatcher.Errors:\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n}\n\n\/\/ Handle adds a new websocket to the hub\n\/\/\n\/\/ It will block until the client either stops responding or closes the connection.\nfunc (w *WSHub) Handle(conn *websocket.Conn) {\n\tc := &WSConn{\n\t\tconn: conn,\n\t\tout: make(chan interface{}, 1),\n\t}\n\n\tw.mu.Lock()\n\tw.pool[c] = struct{}{}\n\tw.mu.Unlock()\n\n\t\/\/ Initial status update\n\t\/\/ (broadcasted as it includes info to other clients about this new one)\n\tw.UpdateStatus()\n\n\tquit := wsReadLoop(conn)\n\n\tlines, done, err := tailFile(fOptions.LogPath)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tdefer close(done)\n\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(KeepaliveInterval):\n\t\t\tc.conn.WriteJSON(struct {\n\t\t\t\tPing bool\n\t\t\t}{true})\n\t\tcase line := <-lines:\n\t\t\tc.conn.WriteJSON(struct {\n\t\t\t\tLogLine string\n\t\t\t}{string(line)})\n\t\tcase v := <-c.out:\n\t\t\terr := c.conn.WriteJSON(v)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\tcase <-quit:\n\t\t\t\/\/ The read loop failed\/disconnected. Remove from hub.\n\t\t\tc.conn.Close()\n\t\t\tw.mu.Lock()\n\t\t\tdelete(w.pool, c)\n\t\t\tw.mu.Unlock()\n\t\t\tw.UpdateStatus()\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc drainEvents(w *fsnotify.Watcher) {\n\tfor {\n\t\tselect {\n\t\tcase <-w.Events:\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Expects the file to never get renamed\/truncated or deleted\nfunc tailFile(path string) (<-chan []byte, chan<- struct{}, error) {\n\tlines := make(chan []byte)\n\tdone := make(chan struct{})\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tgo func() {\n\t\trd := bufio.NewReader(file)\n\t\tfor {\n\t\t\tdata, _, err := rd.ReadLine()\n\t\t\tif errors.Is(err, io.EOF) {\n\t\t\t\ttime.Sleep(time.Millisecond * 100)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\tfile.Close()\n\t\t\t\treturn\n\t\t\tcase lines <- data:\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn lines, done, nil\n}\n\nfunc handleWSMessage(v map[string]json.RawMessage) {\n\traw, ok := v[\"prompt_response\"]\n\tif !ok {\n\t\treturn\n\t}\n\tvar resp PromptResponse\n\tjson.Unmarshal(raw, &resp)\n\tpromptHub.Respond(resp.ID, resp.Value, resp.Err)\n}\n\nfunc wsReadLoop(c *websocket.Conn) <-chan struct{} {\n\tquit := make(chan struct{})\n\tgo func() {\n\t\tfor {\n\t\t\tv := map[string]json.RawMessage{}\n\t\t\terr := c.ReadJSON(&v)\n\t\t\tif err != nil {\n\t\t\t\tclose(quit)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, ok := v[\"Pong\"]; ok {\n\t\t\t\t\/\/ For Keepalive only. Could use a delayed ping response to detect dead connections in the future.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgo handleWSMessage(v)\n\t\t}\n\t}()\n\treturn quit\n}\n<commit_msg>Improve mailbox fsnotify handling<commit_after>\/\/ Copyright 2016 Martin Hebnes Pedersen (LA5NTA). All rights reserved.\n\/\/ Use of this source code is governed by the MIT-license that can be\n\/\/ found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/fsnotify\/fsnotify\"\n\t\"github.com\/gorilla\/websocket\"\n\n\t\"github.com\/la5nta\/pat\/internal\/osutil\"\n\t\"github.com\/la5nta\/wl2k-go\/mailbox\"\n)\n\nconst KeepaliveInterval = 4 * time.Minute\n\n\/\/ WSConn represent one connection in the WSHub pool\ntype WSConn struct {\n\tconn *websocket.Conn\n\tout chan interface{}\n}\n\n\/\/ WSHub is a hub for broadcasting data to several websocket connections\ntype WSHub struct {\n\tmu sync.Mutex\n\tpool map[*WSConn]struct{}\n}\n\nfunc NewWSHub() *WSHub {\n\tw := &WSHub{pool: map[*WSConn]struct{}{}}\n\tgo w.watchMBox()\n\treturn w\n}\n\nfunc (w *WSHub) UpdateStatus() { w.WriteJSON(struct{ Status Status }{getStatus()}) }\nfunc (w *WSHub) WriteProgress(p Progress) { w.WriteJSON(struct{ Progress Progress }{p}) }\nfunc (w *WSHub) WriteNotification(n Notification) {\n\tw.WriteJSON(struct{ Notification Notification }{n})\n}\n\nfunc (w *WSHub) Prompt(p Prompt) {\n\tw.WriteJSON(struct{ Prompt Prompt }{p})\n\tgo func() { <-p.cancel; w.WriteJSON(struct{ PromptAbort Prompt }{p}) }()\n}\n\nfunc (w *WSHub) WriteJSON(v interface{}) {\n\tif w == nil {\n\t\treturn\n\t}\n\n\tw.mu.Lock()\n\tfor c := range w.pool {\n\t\tselect {\n\t\tcase c.out <- v:\n\t\tcase <-time.After(3 * time.Second):\n\t\t\tlog.Println(\"Closing one unresponsive web socket\")\n\t\t\tc.conn.Close()\n\t\t\tdelete(w.pool, c)\n\t\t}\n\t}\n\tw.mu.Unlock()\n}\n\nfunc (w *WSHub) ClientAddrs() []string {\n\tif w == nil {\n\t\treturn nil\n\t}\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\taddrs := make([]string, 0, len(w.pool))\n\tfor c := range w.pool {\n\t\taddrs = append(addrs, c.conn.RemoteAddr().String())\n\t}\n\treturn addrs\n}\n\nfunc (w *WSHub) watchMBox() {\n\t\/\/ Maximise ulimit -n:\n\t\/\/ fsnotify opens a file descriptor for every file in the directories it watches, which\n\t\/\/ may more files than the current soft limit. The is especially a problem on macOS which\n\t\/\/ has a default soft limit of only 256 files. Windows does not have a such a limit.\n\tif runtime.GOOS != \"windows\" {\n\t\tif err := osutil.RaiseOpenFileLimit(4096); err != nil {\n\t\t\tlog.Printf(\"Unable to raise open file limit: %v\", err)\n\t\t}\n\t}\n\n\tfsWatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Println(\"Unable to start fs watcher: \", err)\n\t} else {\n\t\tp := path.Join(mbox.MBoxPath, mailbox.DIR_INBOX)\n\t\tif err := fsWatcher.Add(p); err != nil {\n\t\t\tlog.Printf(\"Unable to add path '%s' to fs watcher: %s\", p, err)\n\t\t}\n\n\t\t\/\/ These will probably fail if the first failed, but it's not important to log all.\n\t\tfsWatcher.Add(path.Join(mbox.MBoxPath, mailbox.DIR_OUTBOX))\n\t\tfsWatcher.Add(path.Join(mbox.MBoxPath, mailbox.DIR_SENT))\n\t\tfsWatcher.Add(path.Join(mbox.MBoxPath, mailbox.DIR_ARCHIVE))\n\t\tdefer fsWatcher.Close()\n\t}\n\n\tfor {\n\t\tselect {\n\t\t\/\/ Filesystem events\n\t\tcase e := <-fsWatcher.Events:\n\t\t\tif e.Op == fsnotify.Chmod {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ Make sure we don't send many of these events over a short period.\n\t\t\tdrainUntilSilence(fsWatcher, 100*time.Millisecond)\n\t\t\twebsocketHub.WriteJSON(struct {\n\t\t\t\tUpdateMailbox bool\n\t\t\t}{true})\n\t\tcase err := <-fsWatcher.Errors:\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n}\n\n\/\/ Handle adds a new websocket to the hub\n\/\/\n\/\/ It will block until the client either stops responding or closes the connection.\nfunc (w *WSHub) Handle(conn *websocket.Conn) {\n\tc := &WSConn{\n\t\tconn: conn,\n\t\tout: make(chan interface{}, 1),\n\t}\n\n\tw.mu.Lock()\n\tw.pool[c] = struct{}{}\n\tw.mu.Unlock()\n\n\t\/\/ Initial status update\n\t\/\/ (broadcasted as it includes info to other clients about this new one)\n\tw.UpdateStatus()\n\n\tquit := wsReadLoop(conn)\n\n\tlines, done, err := tailFile(fOptions.LogPath)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tdefer close(done)\n\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(KeepaliveInterval):\n\t\t\tc.conn.WriteJSON(struct {\n\t\t\t\tPing bool\n\t\t\t}{true})\n\t\tcase line := <-lines:\n\t\t\tc.conn.WriteJSON(struct {\n\t\t\t\tLogLine string\n\t\t\t}{string(line)})\n\t\tcase v := <-c.out:\n\t\t\terr := c.conn.WriteJSON(v)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\tcase <-quit:\n\t\t\t\/\/ The read loop failed\/disconnected. Remove from hub.\n\t\t\tc.conn.Close()\n\t\t\tw.mu.Lock()\n\t\t\tdelete(w.pool, c)\n\t\t\tw.mu.Unlock()\n\t\t\tw.UpdateStatus()\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ drainEvents reads from w.Events and blocks until the channel has been silent for at least 50 ms.\nfunc drainUntilSilence(w *fsnotify.Watcher, silenceDur time.Duration) {\n\ttimer := time.NewTimer(silenceDur)\n\tdefer timer.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-w.Events:\n\t\t\tif !timer.Stop() {\n\t\t\t\t<-timer.C\n\t\t\t}\n\t\t\ttimer.Reset(silenceDur)\n\t\tcase <-timer.C:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Expects the file to never get renamed\/truncated or deleted\nfunc tailFile(path string) (<-chan []byte, chan<- struct{}, error) {\n\tlines := make(chan []byte)\n\tdone := make(chan struct{})\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tgo func() {\n\t\trd := bufio.NewReader(file)\n\t\tfor {\n\t\t\tdata, _, err := rd.ReadLine()\n\t\t\tif errors.Is(err, io.EOF) {\n\t\t\t\ttime.Sleep(time.Millisecond * 100)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\tfile.Close()\n\t\t\t\treturn\n\t\t\tcase lines <- data:\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn lines, done, nil\n}\n\nfunc handleWSMessage(v map[string]json.RawMessage) {\n\traw, ok := v[\"prompt_response\"]\n\tif !ok {\n\t\treturn\n\t}\n\tvar resp PromptResponse\n\tjson.Unmarshal(raw, &resp)\n\tpromptHub.Respond(resp.ID, resp.Value, resp.Err)\n}\n\nfunc wsReadLoop(c *websocket.Conn) <-chan struct{} {\n\tquit := make(chan struct{})\n\tgo func() {\n\t\tfor {\n\t\t\tv := map[string]json.RawMessage{}\n\t\t\terr := c.ReadJSON(&v)\n\t\t\tif err != nil {\n\t\t\t\tclose(quit)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, ok := v[\"Pong\"]; ok {\n\t\t\t\t\/\/ For Keepalive only. Could use a delayed ping response to detect dead connections in the future.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgo handleWSMessage(v)\n\t\t}\n\t}()\n\treturn quit\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\n\/\/ from time to time see https:\/\/godoc.org\/golang.org\/x\/crypto\/acme\/autocert\n\/\/ it isn't ready for usage now, but can simple code in future.\n\nimport (\n\t\"crypto\/rsa\"\n\n\tcryptorand \"crypto\/rand\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"errors\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/hlandau\/acme\/acmeapi\"\n\t\"github.com\/hlandau\/acme\/acmeapi\/acmeutils\"\n\t\"context\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\t\"encoding\/pem\"\n)\n\nconst (\n\tSNI01_EXPIRE_TOKEN time.Duration = time.Minute * 10\n\tACME_DOMAIN_SUFFIX = \".acme.invalid\"\n)\n\ntype acmeStruct struct {\n\tserverAddress string\n\tprivateKey *rsa.PrivateKey\n\tclient *acmeapi.Client\n\n\tmutex *sync.Mutex\n\tacmeauthDomainsMutex *sync.Mutex\n\tacmeAuthDomains map[string]time.Time\n}\n\nfunc (this *acmeStruct) Init() {\n\tthis.client = &acmeapi.Client{\n\t\tAccountKey: this.privateKey,\n\t\tDirectoryURL: this.serverAddress,\n\t}\n\n\tthis.mutex = &sync.Mutex{}\n\n\tthis.acmeauthDomainsMutex = &sync.Mutex{}\n\tthis.acmeAuthDomains = make(map[string]time.Time)\n\tthis.CleanupTimer()\n}\n\nfunc (this *acmeStruct) RegisterEnsure(ctx context.Context) (err error) {\n\treg := &acmeapi.Registration{}\n\tfor i := 0; i < TRY_COUNT+1; i++ { \/\/ +1 count need for request latest agreement uri\n\t\treg.AgreementURI = reg.LatestAgreementURI\n\t\tif reg.AgreementURI != \"\" {\n\t\t\tlogrus.Info(\"Auto agree with terms:\", reg.LatestAgreementURI)\n\t\t}\n\t\terr = this.client.UpsertRegistration(reg, ctx)\n\t\tif reg.AgreementURI != \"\" && err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn err\n}\n\nfunc (this *acmeStruct) Cleanup() {\n\tthis.mutex.Lock()\n\tdefer this.mutex.Unlock()\n\n\tnow := time.Now()\n\tfor token, expire := range this.acmeAuthDomains {\n\t\tif expire.Before(now) {\n\t\t\tdelete(this.acmeAuthDomains, token)\n\t\t}\n\t}\n}\n\nfunc (this *acmeStruct) CleanupTimer() {\n\tthis.Cleanup()\n\ttime.AfterFunc(SNI01_EXPIRE_TOKEN, this.Cleanup)\n}\n\nfunc (this *acmeStruct) CreateCertificate(domain string) (cert *tls.Certificate, err error) {\n\t\/\/ Check suffix for avoid mutex sync in DeleteAcmeAuthDomain\n\tif strings.HasSuffix(domain, ACME_DOMAIN_SUFFIX) {\n\t\tlogrus.Debugf(\"Detect auth-domain mode for domain '%v'\", domain)\n\t\tif this.DeleteAcmeAuthDomain(domain) {\n\t\t\tlogrus.Debugf(\"Return self-signed certificate for domain '%v'\", domain)\n\t\t\treturn this.createCertificateSelfSigned(domain)\n\t\t} else {\n\t\t\tlogrus.Debugf(\"Detect auth-domain is not present in list '%v'\", domain)\n\t\t\treturn nil, errors.New(\"Now allowed auth-domain\")\n\t\t}\n\t}\n\n\t\/\/ check about we serve the domain\n\tips, err := net.LookupIP(domain)\n\tif err != nil {\n\t\tlogrus.Warnf(\"Can't lookup ip for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't lookup ip of the domain\")\n\t}\n\tisLocalIP := false\ncheckLocalIP:\n\tfor _, ip := range ips {\n\t\tfor _, localIP := range localIPs {\n\t\t\tif ip.Equal(localIP) {\n\t\t\t\tisLocalIP = true\n\t\t\t\tbreak checkLocalIP\n\t\t\t}\n\t\t}\n\t}\n\tif !isLocalIP {\n\t\tlogrus.Warnf(\"Domain have ip of other server. Domain '%v', Domain ips: %v, Server ips: %v\", domain, ips, localIPs)\n\t\treturn nil, errors.New(\"Domain have ip of other server.\")\n\t}\n\n\treturn this.createCertificateAcme(domain)\n}\n\nfunc (this *acmeStruct) createCertificateAcme(domain string) (cert *tls.Certificate, err error) {\n\tthis.mutex.Lock()\n\tdefer this.mutex.Unlock()\n\n\tvar auth *acmeapi.Authorization\n\n\tctx, cancelFunc := context.WithTimeout(context.Background(), LETSENCRYPT_CREATE_CERTIFICATE_TIMEOUT)\n\tdefer cancelFunc()\n\n\tfor i := 0; i < TRY_COUNT; i++ {\n\t\tauth, err = this.client.NewAuthorization(domain, ctx)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\tlogrus.Infof(\"Can't create new authorization for domain '%v': %v\", domain, err)\n\t\t\ttime.Sleep(RETRY_SLEEP)\n\t\t}\n\t}\n\tif err == nil {\n\t\tlogrus.Infof(\"Create authorization for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create new authorization for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't create new authorization for domain\")\n\t}\n\n\tif logrus.GetLevel() >= logrus.DebugLevel {\n\t\tchallengeTypes := make([]string, len(auth.Challenges))\n\t\tfor i := range auth.Challenges {\n\t\t\tchallengeTypes[i] = auth.Challenges[i].Type\n\t\t}\n\t\tlogrus.Debugf(\"Challenge types for domain '%v': %v. Challenge combinations: %v\", domain, challengeTypes, auth.Combinations)\n\t}\n\n\tcanAuthorize := false\n\tvar challenge *acmeapi.Challenge\n\tfor _, cmb := range auth.Combinations {\n\t\tif len(cmb) == 1 && auth.Challenges[cmb[0]].Type == \"tls-sni-01\" {\n\t\t\tcanAuthorize = true\n\t\t\tchallenge = auth.Challenges[cmb[0]]\n\t\t\tbreak\n\t\t}\n\t}\n\tif !canAuthorize {\n\t\tlogrus.Errorf(\"Can't find good challange combination for domain: '%v'\", domain)\n\t\treturn nil, errors.New(\"Can't find good challange combination\")\n\t}\n\n\tacmeHostName, err := acmeutils.TLSSNIHostname(this.privateKey, challenge.Token)\n\tif err == nil {\n\t\tlogrus.Debugf(\"Create acme-auth hostname for domain '%v': %v\", domain, acmeHostName)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create acme domain for domain '%v' token '%v': %v\", domain, challenge.Token, err)\n\t\treturn nil, errors.New(\"Can't create acme domain\")\n\t}\n\tthis.PutAcmeAuthDomain(acmeHostName)\n\n\tchallengeResponse, err := acmeutils.ChallengeResponseJSON(this.privateKey, challenge.Token, challenge.Type)\n\tif err == nil {\n\t\tlogrus.Debugf(\"Create challenge response for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create challenge response for domain '%v', token '%v', challenge type %v: %v\",\n\t\t\tdomain, challenge.Token, challenge.Type, err)\n\t\treturn nil, errors.New(\"Can't create challenge response\")\n\t}\n\tfor i := 0; i < TRY_COUNT; i++ {\n\t\terr = this.client.RespondToChallenge(challenge, challengeResponse, this.privateKey, ctx)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\tlogrus.Info(\"Can't send response for challenge of domain '%v': %v\", domain, err)\n\t\t\ttime.Sleep(RETRY_SLEEP)\n\t\t}\n\t}\n\tif err == nil {\n\t\tlogrus.Debugf(\"Send challenge response for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't send response for challenge of domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't send response for challenge\")\n\t}\n\n\tfor i := 0; i < TRY_COUNT; i++ {\n\t\terr = this.client.LoadChallenge(challenge, ctx)\n\t\tif err != nil {\n\t\t\tlogrus.Infof(\"Can't load challenge for domain '%v': %v\", domain, err)\n\t\t\ttime.Sleep(RETRY_SLEEP)\n\t\t}\n\t}\n\tif err == nil {\n\t\tlogrus.Debugf(\"Load challenge for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't load challenge for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't load challenge\")\n\t}\n\n\t\/\/ Generate CSR\n\tcertKey, err := rsa.GenerateKey(cryptorand.Reader, PRIVATE_KEY_BITS)\n\tif err == nil {\n\t\tlogrus.Debugf(\"Create private key for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create rsa key for domain '%v': %v\", err)\n\t\treturn nil, errors.New(\"Can't create rsa key\")\n\t}\n\tcertRequest := &x509.CertificateRequest{\n\t\tSignatureAlgorithm: x509.SHA256WithRSA,\n\t\tPublicKeyAlgorithm: x509.RSA,\n\t\tPublicKey: &certKey.PublicKey,\n\t\tSubject: pkix.Name{CommonName: domain},\n\t\tDNSNames: []string{domain},\n\t}\n\tcsrDER, err := x509.CreateCertificateRequest(cryptorand.Reader, certRequest, certKey)\n\tif err == nil {\n\t\tlogrus.Debugf(\"Create CSR for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create csr for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't create csr\")\n\t}\n\n\tvar certResponse *acmeapi.Certificate\n\tfor i := 0; i < TRY_COUNT; i++ {\n\t\tcertResponse, err = this.client.RequestCertificate(csrDER, ctx)\n\t\tif err != nil {\n\t\t\tlogrus.Infof(\"Can't get certificate for domain '%v': %v\", domain, err)\n\t\t\ttime.Sleep(RETRY_SLEEP)\n\t\t}\n\t}\n\tif err == nil {\n\t\tlogrus.Infof(\"Get certificate for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't get certificate for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't request certificate\")\n\t}\n\n\tpemEncode := func(b []byte, t string) []byte {\n\t\treturn pem.EncodeToMemory(&pem.Block{Bytes: b, Type: t})\n\t}\n\tcertPEM := pem.EncodeToMemory(&pem.Block{Bytes: certResponse.Certificate, Type: \"CERTIFICATE\"})\n\tfor _, extraCert := range certResponse.ExtraCertificates {\n\t\textraCertPEM := pem.EncodeToMemory(&pem.Block{Bytes: extraCert, Type: \"CERTIFICATE\"})\n\t\tcertPEM = append(certPEM, '\\n')\n\t\tcertPEM = append(certPEM, extraCertPEM...)\n\t}\n\tlogrus.Debugf(\"CERT PEM:\\n%s\", certPEM)\n\tcertKeyPEM := pemEncode(x509.MarshalPKCS1PrivateKey(certKey), \"RSA PRIVATE KEY\")\n\n\ttmpCert, err := tls.X509KeyPair(certPEM, certKeyPEM)\n\tlogrus.Debugf(\"Parsed cert count for domain '%v':\", len(tmpCert.Certificate))\n\tif err == nil {\n\t\tlogrus.Infof(\"Cert for domain '%v' parsed.\", domain)\n\t\tcert = &tmpCert\n\t} else {\n\t\tlogrus.Errorf(\"Can't parse cert for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't parse cert for domain\")\n\t}\n\n\treturn cert, nil\n}\n\nfunc (this *acmeStruct) createCertificateSelfSigned(domain string) (cert *tls.Certificate, err error) {\n\tderCert, privateKey, err := acmeutils.CreateTLSSNICertificate(domain)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Can't create tls-sni-01 self-signed certificate for '%v': %v\", domain, err)\n\t\treturn nil, err\n\t}\n\n\tcert = &tls.Certificate{}\n\tcert.Certificate = [][]byte{derCert}\n\tcert.PrivateKey = privateKey\n\treturn cert, nil\n}\n\nfunc (this *acmeStruct) PutAcmeAuthDomain(domain string) {\n\tthis.acmeauthDomainsMutex.Lock()\n\tdefer this.acmeauthDomainsMutex.Unlock()\n\n\tlogrus.Debug(\"Put acme auth domain:\", domain)\n\tthis.acmeAuthDomains[domain] = time.Now().Add(SNI01_EXPIRE_TOKEN)\n}\n\nfunc (this *acmeStruct) DeleteAcmeAuthDomain(domain string) bool {\n\tthis.acmeauthDomainsMutex.Lock()\n\tdefer this.acmeauthDomainsMutex.Unlock()\n\n\tlogrus.Debug(\"Delete acme auth domain:\", domain)\n\t_, ok := this.acmeAuthDomains[domain]\n\tif ok {\n\t\tdelete(this.acmeAuthDomains, domain)\n\t}\n\treturn ok\n}\n<commit_msg>allow check auth domain many times<commit_after>package main\n\n\/\/ from time to time see https:\/\/godoc.org\/golang.org\/x\/crypto\/acme\/autocert\n\/\/ it isn't ready for usage now, but can simple code in future.\n\nimport (\n\t\"crypto\/rsa\"\n\n\tcryptorand \"crypto\/rand\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"errors\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/hlandau\/acme\/acmeapi\"\n\t\"github.com\/hlandau\/acme\/acmeapi\/acmeutils\"\n\t\"context\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\t\"encoding\/pem\"\n)\n\nconst (\n\tSNI01_EXPIRE_TOKEN time.Duration = time.Minute * 10\n\tACME_DOMAIN_SUFFIX = \".acme.invalid\"\n)\n\ntype acmeStruct struct {\n\tserverAddress string\n\tprivateKey *rsa.PrivateKey\n\tclient *acmeapi.Client\n\n\tmutex *sync.Mutex\n\tauthDomainsMutex *sync.Mutex\n\tauthDomains map[string]time.Time\n}\n\nfunc (this *acmeStruct) authDomainPut(domain string) {\n\tthis.authDomainsMutex.Lock()\n\tdefer this.authDomainsMutex.Unlock()\n\n\tlogrus.Debug(\"Put acme auth domain:\", domain)\n\tthis.authDomains[domain] = time.Now().Add(SNI01_EXPIRE_TOKEN)\n}\nfunc (this *acmeStruct) authDomainCheck(domain string)bool {\n\tthis.authDomainsMutex.Lock()\n\tdefer this.authDomainsMutex.Unlock()\n\n\tlogrus.Debug(\"Check acme auth domain:\", domain)\n\t_, ok := this.authDomains[domain]\n\treturn ok\n}\n\nfunc (this *acmeStruct) authDomainDelete(domain string) {\n\tthis.authDomainsMutex.Lock()\n\tdefer this.authDomainsMutex.Unlock()\n\n\tlogrus.Debug(\"Delete acme auth domain:\", domain)\n\tdelete(this.authDomains, domain)\n}\n\nfunc (this *acmeStruct) Init() {\n\tthis.client = &acmeapi.Client{\n\t\tAccountKey: this.privateKey,\n\t\tDirectoryURL: this.serverAddress,\n\t}\n\n\tthis.mutex = &sync.Mutex{}\n\n\tthis.authDomainsMutex = &sync.Mutex{}\n\tthis.authDomains = make(map[string]time.Time)\n\tthis.CleanupTimer()\n}\n\nfunc (this *acmeStruct) RegisterEnsure(ctx context.Context) (err error) {\n\treg := &acmeapi.Registration{}\n\tfor i := 0; i < TRY_COUNT+1; i++ { \/\/ +1 count need for request latest agreement uri\n\t\treg.AgreementURI = reg.LatestAgreementURI\n\t\tif reg.AgreementURI != \"\" {\n\t\t\tlogrus.Info(\"Auto agree with terms:\", reg.LatestAgreementURI)\n\t\t}\n\t\terr = this.client.UpsertRegistration(reg, ctx)\n\t\tif reg.AgreementURI != \"\" && err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn err\n}\n\nfunc (this *acmeStruct) Cleanup() {\n\tthis.mutex.Lock()\n\tdefer this.mutex.Unlock()\n\n\tnow := time.Now()\n\tfor token, expire := range this.authDomains {\n\t\tif expire.Before(now) {\n\t\t\tdelete(this.authDomains, token)\n\t\t}\n\t}\n}\n\nfunc (this *acmeStruct) CleanupTimer() {\n\tthis.Cleanup()\n\ttime.AfterFunc(SNI01_EXPIRE_TOKEN, this.Cleanup)\n}\n\nfunc (this *acmeStruct) CreateCertificate(domain string) (cert *tls.Certificate, err error) {\n\t\/\/ Check suffix for avoid mutex sync in DeleteAcmeAuthDomain\n\tif strings.HasSuffix(domain, ACME_DOMAIN_SUFFIX) {\n\t\tlogrus.Debugf(\"Detect auth-domain mode for domain '%v'\", domain)\n\t\tif this.authDomainCheck(domain) {\n\t\t\tlogrus.Debugf(\"Return self-signed certificate for domain '%v'\", domain)\n\t\t\treturn this.createCertificateSelfSigned(domain)\n\t\t} else {\n\t\t\tlogrus.Debugf(\"Detect auth-domain is not present in list '%v'\", domain)\n\t\t\treturn nil, errors.New(\"Now allowed auth-domain\")\n\t\t}\n\t}\n\n\t\/\/ check about we serve the domain\n\tips, err := net.LookupIP(domain)\n\tif err != nil {\n\t\tlogrus.Warnf(\"Can't lookup ip for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't lookup ip of the domain\")\n\t}\n\tisLocalIP := false\ncheckLocalIP:\n\tfor _, ip := range ips {\n\t\tfor _, localIP := range localIPs {\n\t\t\tif ip.Equal(localIP) {\n\t\t\t\tisLocalIP = true\n\t\t\t\tbreak checkLocalIP\n\t\t\t}\n\t\t}\n\t}\n\tif !isLocalIP {\n\t\tlogrus.Warnf(\"Domain have ip of other server. Domain '%v', Domain ips: %v, Server ips: %v\", domain, ips, localIPs)\n\t\treturn nil, errors.New(\"Domain have ip of other server.\")\n\t}\n\n\treturn this.createCertificateAcme(domain)\n}\n\nfunc (this *acmeStruct) createCertificateAcme(domain string) (cert *tls.Certificate, err error) {\n\tthis.mutex.Lock()\n\tdefer this.mutex.Unlock()\n\n\tvar auth *acmeapi.Authorization\n\n\tctx, cancelFunc := context.WithTimeout(context.Background(), LETSENCRYPT_CREATE_CERTIFICATE_TIMEOUT)\n\tdefer cancelFunc()\n\n\tfor i := 0; i < TRY_COUNT; i++ {\n\t\tauth, err = this.client.NewAuthorization(domain, ctx)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\tlogrus.Infof(\"Can't create new authorization for domain '%v': %v\", domain, err)\n\t\t\ttime.Sleep(RETRY_SLEEP)\n\t\t}\n\t}\n\tif err == nil {\n\t\tlogrus.Infof(\"Create authorization for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create new authorization for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't create new authorization for domain\")\n\t}\n\n\tif logrus.GetLevel() >= logrus.DebugLevel {\n\t\tchallengeTypes := make([]string, len(auth.Challenges))\n\t\tfor i := range auth.Challenges {\n\t\t\tchallengeTypes[i] = auth.Challenges[i].Type\n\t\t}\n\t\tlogrus.Debugf(\"Challenge types for domain '%v': %v. Challenge combinations: %v\", domain, challengeTypes, auth.Combinations)\n\t}\n\n\tcanAuthorize := false\n\tvar challenge *acmeapi.Challenge\n\tfor _, cmb := range auth.Combinations {\n\t\tif len(cmb) == 1 && auth.Challenges[cmb[0]].Type == \"tls-sni-01\" {\n\t\t\tcanAuthorize = true\n\t\t\tchallenge = auth.Challenges[cmb[0]]\n\t\t\tbreak\n\t\t}\n\t}\n\tif !canAuthorize {\n\t\tlogrus.Errorf(\"Can't find good challange combination for domain: '%v'\", domain)\n\t\treturn nil, errors.New(\"Can't find good challange combination\")\n\t}\n\n\tacmeHostName, err := acmeutils.TLSSNIHostname(this.privateKey, challenge.Token)\n\tif err == nil {\n\t\tlogrus.Debugf(\"Create acme-auth hostname for domain '%v': %v\", domain, acmeHostName)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create acme domain for domain '%v' token '%v': %v\", domain, challenge.Token, err)\n\t\treturn nil, errors.New(\"Can't create acme domain\")\n\t}\n\tthis.authDomainPut(acmeHostName)\n\tdefer this.authDomainDelete(acmeHostName)\n\n\tchallengeResponse, err := acmeutils.ChallengeResponseJSON(this.privateKey, challenge.Token, challenge.Type)\n\tif err == nil {\n\t\tlogrus.Debugf(\"Create challenge response for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create challenge response for domain '%v', token '%v', challenge type %v: %v\",\n\t\t\tdomain, challenge.Token, challenge.Type, err)\n\t\treturn nil, errors.New(\"Can't create challenge response\")\n\t}\n\tfor i := 0; i < TRY_COUNT; i++ {\n\t\terr = this.client.RespondToChallenge(challenge, challengeResponse, this.privateKey, ctx)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\tlogrus.Info(\"Can't send response for challenge of domain '%v': %v\", domain, err)\n\t\t\ttime.Sleep(RETRY_SLEEP)\n\t\t}\n\t}\n\tif err == nil {\n\t\tlogrus.Debugf(\"Send challenge response for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't send response for challenge of domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't send response for challenge\")\n\t}\n\n\tfor i := 0; i < TRY_COUNT; i++ {\n\t\terr = this.client.LoadChallenge(challenge, ctx)\n\t\tif err != nil {\n\t\t\tlogrus.Infof(\"Can't load challenge for domain '%v': %v\", domain, err)\n\t\t\ttime.Sleep(RETRY_SLEEP)\n\t\t}\n\t}\n\tif err == nil {\n\t\tlogrus.Debugf(\"Load challenge for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't load challenge for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't load challenge\")\n\t}\n\n\t\/\/ Generate CSR\n\tcertKey, err := rsa.GenerateKey(cryptorand.Reader, PRIVATE_KEY_BITS)\n\tif err == nil {\n\t\tlogrus.Debugf(\"Create private key for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create rsa key for domain '%v': %v\", err)\n\t\treturn nil, errors.New(\"Can't create rsa key\")\n\t}\n\tcertRequest := &x509.CertificateRequest{\n\t\tSignatureAlgorithm: x509.SHA256WithRSA,\n\t\tPublicKeyAlgorithm: x509.RSA,\n\t\tPublicKey: &certKey.PublicKey,\n\t\tSubject: pkix.Name{CommonName: domain},\n\t\tDNSNames: []string{domain},\n\t}\n\tcsrDER, err := x509.CreateCertificateRequest(cryptorand.Reader, certRequest, certKey)\n\tif err == nil {\n\t\tlogrus.Debugf(\"Create CSR for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create csr for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't create csr\")\n\t}\n\n\tvar certResponse *acmeapi.Certificate\n\tfor i := 0; i < TRY_COUNT; i++ {\n\t\tcertResponse, err = this.client.RequestCertificate(csrDER, ctx)\n\t\tif err != nil {\n\t\t\tlogrus.Infof(\"Can't get certificate for domain '%v': %v\", domain, err)\n\t\t\ttime.Sleep(RETRY_SLEEP)\n\t\t}\n\t}\n\tif err == nil {\n\t\tlogrus.Infof(\"Get certificate for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't get certificate for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't request certificate\")\n\t}\n\n\tpemEncode := func(b []byte, t string) []byte {\n\t\treturn pem.EncodeToMemory(&pem.Block{Bytes: b, Type: t})\n\t}\n\tcertPEM := pem.EncodeToMemory(&pem.Block{Bytes: certResponse.Certificate, Type: \"CERTIFICATE\"})\n\tfor _, extraCert := range certResponse.ExtraCertificates {\n\t\textraCertPEM := pem.EncodeToMemory(&pem.Block{Bytes: extraCert, Type: \"CERTIFICATE\"})\n\t\tcertPEM = append(certPEM, '\\n')\n\t\tcertPEM = append(certPEM, extraCertPEM...)\n\t}\n\tlogrus.Debugf(\"CERT PEM:\\n%s\", certPEM)\n\tcertKeyPEM := pemEncode(x509.MarshalPKCS1PrivateKey(certKey), \"RSA PRIVATE KEY\")\n\n\ttmpCert, err := tls.X509KeyPair(certPEM, certKeyPEM)\n\tlogrus.Debugf(\"Parsed cert count for domain '%v':\", len(tmpCert.Certificate))\n\tif err == nil {\n\t\tlogrus.Infof(\"Cert for domain '%v' parsed.\", domain)\n\t\tcert = &tmpCert\n\t} else {\n\t\tlogrus.Errorf(\"Can't parse cert for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't parse cert for domain\")\n\t}\n\n\treturn cert, nil\n}\n\nfunc (this *acmeStruct) createCertificateSelfSigned(domain string) (cert *tls.Certificate, err error) {\n\tderCert, privateKey, err := acmeutils.CreateTLSSNICertificate(domain)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Can't create tls-sni-01 self-signed certificate for '%v': %v\", domain, err)\n\t\treturn nil, err\n\t}\n\n\tcert = &tls.Certificate{}\n\tcert.Certificate = [][]byte{derCert}\n\tcert.PrivateKey = privateKey\n\treturn cert, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/rsa\"\n\n\tcryptorand \"crypto\/rand\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"errors\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/hlandau\/acme\/acmeapi\"\n\t\"github.com\/hlandau\/acme\/acmeapi\/acmeutils\"\n\t\"context\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\t\"encoding\/pem\"\n)\n\nconst (\n\tSNI01_EXPIRE_TOKEN time.Duration = time.Minute * 10\n)\n\ntype acmeStruct struct {\n\tserverAddress string\n\tprivateKey *rsa.PrivateKey\n\tclient *acmeapi.Client\n\n\tmutex *sync.Mutex\n\tacmeauthDomainsMutex *sync.Mutex\n\tacmeAuthDomains map[string]time.Time\n}\n\nfunc (this *acmeStruct) Init() {\n\tthis.client = &acmeapi.Client{\n\t\tAccountKey: this.privateKey,\n\t\tDirectoryURL: this.serverAddress,\n\t}\n\n\tthis.mutex = &sync.Mutex{}\n\n\tthis.acmeauthDomainsMutex = &sync.Mutex{}\n\tthis.acmeAuthDomains = make(map[string]time.Time)\n\tthis.CleanupTimer()\n}\n\nfunc (this *acmeStruct) RegisterEnsure(ctx context.Context) (err error) {\n\treg := &acmeapi.Registration{}\n\tfor i := 0; i < TRY_COUNT+1; i++ { \/\/ +1 count need for request latest agreement uri\n\t\treg.AgreementURI = reg.LatestAgreementURI\n\t\tif reg.AgreementURI != \"\" {\n\t\t\tlogrus.Info(\"Auto agree with terms:\", reg.LatestAgreementURI)\n\t\t}\n\t\terr = this.client.UpsertRegistration(reg, ctx)\n\t\tif reg.AgreementURI != \"\" && err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn err\n}\n\nfunc (this *acmeStruct) Cleanup() {\n\tthis.mutex.Lock()\n\tdefer this.mutex.Unlock()\n\n\tnow := time.Now()\n\tfor token, expire := range this.acmeAuthDomains {\n\t\tif expire.Before(now) {\n\t\t\tdelete(this.acmeAuthDomains, token)\n\t\t}\n\t}\n}\n\nfunc (this *acmeStruct) CleanupTimer() {\n\tthis.Cleanup()\n\ttime.AfterFunc(SNI01_EXPIRE_TOKEN, this.Cleanup)\n}\n\nfunc (this *acmeStruct) CreateCertificate(domain string) (cert *tls.Certificate, err error) {\n\t\/\/ Check suffix for avoid mutex sync in DeleteAcmeAuthDomain\n\tif strings.HasSuffix(domain, \".acme.invalid\") {\n\t\tlogrus.Debugf(\"Detect auth-domain mode for domain '%v'\", domain)\n\t\tif this.DeleteAcmeAuthDomain(domain) {\n\t\t\tlogrus.Debugf(\"Return self-signed certificate for domain '%v'\", domain)\n\t\t\treturn this.createCertificateSelfSigned(domain)\n\t\t} else {\n\t\t\tlogrus.Debugf(\"Detect auth-domain is not present in list '%v'\", domain)\n\t\t\treturn nil, errors.New(\"Now allowed auth-domain\")\n\t\t}\n\t}\n\n\t\/\/ check about we serve the domain\n\tips, err := net.LookupIP(domain)\n\tif err != nil {\n\t\tlogrus.Warnf(\"Can't lookup ip for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't lookup ip of the domain\")\n\t}\n\tisLocalIP := false\ncheckLocalIP:\n\tfor _, ip := range ips {\n\t\tfor _, localIP := range localIPs {\n\t\t\tif ip.Equal(localIP) {\n\t\t\t\tisLocalIP = true\n\t\t\t\tbreak checkLocalIP\n\t\t\t}\n\t\t}\n\t}\n\tif !isLocalIP {\n\t\tlogrus.Warnf(\"Domain have ip of other server. Domain '%v', Domain ips: %v, Server ips: %v\", domain, ips, localIPs)\n\t\treturn nil, errors.New(\"Domain have ip of other server.\")\n\t}\n\n\treturn this.createCertificateAcme(domain)\n}\n\nfunc (this *acmeStruct) createCertificateAcme(domain string) (cert *tls.Certificate, err error) {\n\tthis.mutex.Lock()\n\tdefer this.mutex.Unlock()\n\n\tvar auth *acmeapi.Authorization\n\n\tctx, cancelFunc := context.WithTimeout(context.Background(), LETSENCRYPT_CREATE_CERTIFICATE_TIMEOUT)\n\tdefer cancelFunc()\n\n\tfor i := 0; i < TRY_COUNT; i++ {\n\t\tauth, err = this.client.NewAuthorization(domain, ctx)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\tlogrus.Infof(\"Can't create new authorization for domain '%v': %v\", domain, err)\n\t\t\ttime.Sleep(RETRY_SLEEP)\n\t\t}\n\t}\n\tif err == nil {\n\t\tlogrus.Infof(\"Create authorization for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create new authorization for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't create new authorization for domain\")\n\t}\n\n\tif logrus.GetLevel() >= logrus.DebugLevel {\n\t\tchallengeTypes := make([]string, len(auth.Challenges))\n\t\tfor i := range auth.Challenges {\n\t\t\tchallengeTypes[i] = auth.Challenges[i].Type\n\t\t}\n\t\tlogrus.Debugf(\"Challenge types for domain '%v': %v. Challenge combinations: %v\", domain, challengeTypes, auth.Combinations)\n\t}\n\n\tcanAuthorize := false\n\tvar challenge *acmeapi.Challenge\n\tfor _, cmb := range auth.Combinations {\n\t\tif len(cmb) == 1 && auth.Challenges[cmb[0]].Type == \"tls-sni-01\" {\n\t\t\tcanAuthorize = true\n\t\t\tchallenge = auth.Challenges[cmb[0]]\n\t\t\tbreak\n\t\t}\n\t}\n\tif !canAuthorize {\n\t\tlogrus.Errorf(\"Can't find good challange combination for domain: '%v'\", domain)\n\t\treturn nil, errors.New(\"Can't find good challange combination\")\n\t}\n\n\tacmeHostName, err := acmeutils.TLSSNIHostname(this.privateKey, challenge.Token)\n\tif err == nil {\n\t\tlogrus.Debugf(\"Create acme-auth hostname for domain '%v': %v\", domain, acmeHostName)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create acme domain for domain '%v' token '%v': %v\", domain, challenge.Token, err)\n\t\treturn nil, errors.New(\"Can't create acme domain\")\n\t}\n\tthis.PutAcmeAuthDomain(acmeHostName)\n\n\tchallengeResponse, err := acmeutils.ChallengeResponseJSON(this.privateKey, challenge.Token, challenge.Type)\n\tif err == nil {\n\t\tlogrus.Debugf(\"Create challenge response for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create challenge response for domain '%v', token '%v', challenge type %v: %v\",\n\t\t\tdomain, challenge.Token, challenge.Type, err)\n\t\treturn nil, errors.New(\"Can't create challenge response\")\n\t}\n\tfor i := 0; i < TRY_COUNT; i++ {\n\t\terr = this.client.RespondToChallenge(challenge, challengeResponse, this.privateKey, ctx)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\tlogrus.Info(\"Can't send response for challenge of domain '%v': %v\", domain, err)\n\t\t\ttime.Sleep(RETRY_SLEEP)\n\t\t}\n\t}\n\tif err == nil {\n\t\tlogrus.Debugf(\"Send challenge response for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't send response for challenge of domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't send response for challenge\")\n\t}\n\n\tfor i := 0; i < TRY_COUNT; i++ {\n\t\terr = this.client.LoadChallenge(challenge, ctx)\n\t\tif err != nil {\n\t\t\tlogrus.Infof(\"Can't load challenge for domain '%v': %v\", domain, err)\n\t\t\ttime.Sleep(RETRY_SLEEP)\n\t\t}\n\t}\n\tif err == nil {\n\t\tlogrus.Debugf(\"Load challenge for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't load challenge for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't load challenge\")\n\t}\n\n\t\/\/ Generate CSR\n\tcertKey, err := rsa.GenerateKey(cryptorand.Reader, PRIVATE_KEY_BITS)\n\tif err == nil {\n\t\tlogrus.Debugf(\"Create private key for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create rsa key for domain '%v': %v\", err)\n\t\treturn nil, errors.New(\"Can't create rsa key\")\n\t}\n\tcertRequest := &x509.CertificateRequest{\n\t\tSignatureAlgorithm: x509.SHA256WithRSA,\n\t\tPublicKeyAlgorithm: x509.RSA,\n\t\tPublicKey: &certKey.PublicKey,\n\t\tSubject: pkix.Name{CommonName: domain},\n\t\tDNSNames: []string{domain},\n\t}\n\tcsrDER, err := x509.CreateCertificateRequest(cryptorand.Reader, certRequest, certKey)\n\tif err == nil {\n\t\tlogrus.Debugf(\"Create CSR for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create csr for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't create csr\")\n\t}\n\n\tvar certResponse *acmeapi.Certificate\n\tfor i := 0; i < TRY_COUNT; i++ {\n\t\tcertResponse, err = this.client.RequestCertificate(csrDER, ctx)\n\t\tif err != nil {\n\t\t\tlogrus.Infof(\"Can't get certificate for domain '%v': %v\", domain, err)\n\t\t\ttime.Sleep(RETRY_SLEEP)\n\t\t}\n\t}\n\tif err == nil {\n\t\tlogrus.Infof(\"Get certificate for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't get certificate for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't request certificate\")\n\t}\n\n\tpemEncode := func(b []byte, t string) []byte {\n\t\treturn pem.EncodeToMemory(&pem.Block{Bytes: b, Type: t})\n\t}\n\tcertPEM := pem.EncodeToMemory(&pem.Block{Bytes: certResponse.Certificate, Type: \"CERTIFICATE\"})\n\tfor _, extraCert := range certResponse.ExtraCertificates {\n\t\textraCertPEM := pem.EncodeToMemory(&pem.Block{Bytes: extraCert, Type: \"CERTIFICATE\"})\n\t\tcertPEM = append(certPEM, '\\n')\n\t\tcertPEM = append(certPEM, extraCertPEM...)\n\t}\n\tlogrus.Debugf(\"CERT PEM:\\n%s\", certPEM)\n\tcertKeyPEM := pemEncode(x509.MarshalPKCS1PrivateKey(certKey), \"RSA PRIVATE KEY\")\n\n\ttmpCert, err := tls.X509KeyPair(certPEM, certKeyPEM)\n\tif err == nil {\n\t\tlogrus.Infof(\"Cert for domain '%v' parsed.\", domain)\n\t\tcert = &tmpCert\n\t} else {\n\t\tlogrus.Errorf(\"Can't parse cert for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't parse cert for domain\")\n\t}\n\n\treturn cert, nil\n}\n\nfunc (this *acmeStruct) createCertificateSelfSigned(domain string) (cert *tls.Certificate, err error) {\n\tderCert, privateKey, err := acmeutils.CreateTLSSNICertificate(domain)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Can't create tls-sni-01 self-signed certificate for '%v': %v\", domain, err)\n\t\treturn nil, err\n\t}\n\n\tcert = &tls.Certificate{}\n\tcert.Certificate = [][]byte{derCert}\n\tcert.PrivateKey = privateKey\n\treturn cert, nil\n}\n\nfunc (this *acmeStruct) PutAcmeAuthDomain(domain string) {\n\tthis.acmeauthDomainsMutex.Lock()\n\tdefer this.acmeauthDomainsMutex.Unlock()\n\n\tlogrus.Debug(\"Put acme auth domain:\", domain)\n\tthis.acmeAuthDomains[domain] = time.Now().Add(SNI01_EXPIRE_TOKEN)\n}\n\nfunc (this *acmeStruct) DeleteAcmeAuthDomain(domain string) bool {\n\tthis.acmeauthDomainsMutex.Lock()\n\tdefer this.acmeauthDomainsMutex.Unlock()\n\n\tlogrus.Debug(\"Delete acme auth domain:\", domain)\n\t_, ok := this.acmeAuthDomains[domain]\n\tif ok {\n\t\tdelete(this.acmeAuthDomains, domain)\n\t}\n\treturn ok\n}\n<commit_msg>paced cert count<commit_after>package main\n\nimport (\n\t\"crypto\/rsa\"\n\n\tcryptorand \"crypto\/rand\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"errors\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/hlandau\/acme\/acmeapi\"\n\t\"github.com\/hlandau\/acme\/acmeapi\/acmeutils\"\n\t\"context\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\t\"encoding\/pem\"\n)\n\nconst (\n\tSNI01_EXPIRE_TOKEN time.Duration = time.Minute * 10\n)\n\ntype acmeStruct struct {\n\tserverAddress string\n\tprivateKey *rsa.PrivateKey\n\tclient *acmeapi.Client\n\n\tmutex *sync.Mutex\n\tacmeauthDomainsMutex *sync.Mutex\n\tacmeAuthDomains map[string]time.Time\n}\n\nfunc (this *acmeStruct) Init() {\n\tthis.client = &acmeapi.Client{\n\t\tAccountKey: this.privateKey,\n\t\tDirectoryURL: this.serverAddress,\n\t}\n\n\tthis.mutex = &sync.Mutex{}\n\n\tthis.acmeauthDomainsMutex = &sync.Mutex{}\n\tthis.acmeAuthDomains = make(map[string]time.Time)\n\tthis.CleanupTimer()\n}\n\nfunc (this *acmeStruct) RegisterEnsure(ctx context.Context) (err error) {\n\treg := &acmeapi.Registration{}\n\tfor i := 0; i < TRY_COUNT+1; i++ { \/\/ +1 count need for request latest agreement uri\n\t\treg.AgreementURI = reg.LatestAgreementURI\n\t\tif reg.AgreementURI != \"\" {\n\t\t\tlogrus.Info(\"Auto agree with terms:\", reg.LatestAgreementURI)\n\t\t}\n\t\terr = this.client.UpsertRegistration(reg, ctx)\n\t\tif reg.AgreementURI != \"\" && err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn err\n}\n\nfunc (this *acmeStruct) Cleanup() {\n\tthis.mutex.Lock()\n\tdefer this.mutex.Unlock()\n\n\tnow := time.Now()\n\tfor token, expire := range this.acmeAuthDomains {\n\t\tif expire.Before(now) {\n\t\t\tdelete(this.acmeAuthDomains, token)\n\t\t}\n\t}\n}\n\nfunc (this *acmeStruct) CleanupTimer() {\n\tthis.Cleanup()\n\ttime.AfterFunc(SNI01_EXPIRE_TOKEN, this.Cleanup)\n}\n\nfunc (this *acmeStruct) CreateCertificate(domain string) (cert *tls.Certificate, err error) {\n\t\/\/ Check suffix for avoid mutex sync in DeleteAcmeAuthDomain\n\tif strings.HasSuffix(domain, \".acme.invalid\") {\n\t\tlogrus.Debugf(\"Detect auth-domain mode for domain '%v'\", domain)\n\t\tif this.DeleteAcmeAuthDomain(domain) {\n\t\t\tlogrus.Debugf(\"Return self-signed certificate for domain '%v'\", domain)\n\t\t\treturn this.createCertificateSelfSigned(domain)\n\t\t} else {\n\t\t\tlogrus.Debugf(\"Detect auth-domain is not present in list '%v'\", domain)\n\t\t\treturn nil, errors.New(\"Now allowed auth-domain\")\n\t\t}\n\t}\n\n\t\/\/ check about we serve the domain\n\tips, err := net.LookupIP(domain)\n\tif err != nil {\n\t\tlogrus.Warnf(\"Can't lookup ip for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't lookup ip of the domain\")\n\t}\n\tisLocalIP := false\ncheckLocalIP:\n\tfor _, ip := range ips {\n\t\tfor _, localIP := range localIPs {\n\t\t\tif ip.Equal(localIP) {\n\t\t\t\tisLocalIP = true\n\t\t\t\tbreak checkLocalIP\n\t\t\t}\n\t\t}\n\t}\n\tif !isLocalIP {\n\t\tlogrus.Warnf(\"Domain have ip of other server. Domain '%v', Domain ips: %v, Server ips: %v\", domain, ips, localIPs)\n\t\treturn nil, errors.New(\"Domain have ip of other server.\")\n\t}\n\n\treturn this.createCertificateAcme(domain)\n}\n\nfunc (this *acmeStruct) createCertificateAcme(domain string) (cert *tls.Certificate, err error) {\n\tthis.mutex.Lock()\n\tdefer this.mutex.Unlock()\n\n\tvar auth *acmeapi.Authorization\n\n\tctx, cancelFunc := context.WithTimeout(context.Background(), LETSENCRYPT_CREATE_CERTIFICATE_TIMEOUT)\n\tdefer cancelFunc()\n\n\tfor i := 0; i < TRY_COUNT; i++ {\n\t\tauth, err = this.client.NewAuthorization(domain, ctx)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\tlogrus.Infof(\"Can't create new authorization for domain '%v': %v\", domain, err)\n\t\t\ttime.Sleep(RETRY_SLEEP)\n\t\t}\n\t}\n\tif err == nil {\n\t\tlogrus.Infof(\"Create authorization for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create new authorization for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't create new authorization for domain\")\n\t}\n\n\tif logrus.GetLevel() >= logrus.DebugLevel {\n\t\tchallengeTypes := make([]string, len(auth.Challenges))\n\t\tfor i := range auth.Challenges {\n\t\t\tchallengeTypes[i] = auth.Challenges[i].Type\n\t\t}\n\t\tlogrus.Debugf(\"Challenge types for domain '%v': %v. Challenge combinations: %v\", domain, challengeTypes, auth.Combinations)\n\t}\n\n\tcanAuthorize := false\n\tvar challenge *acmeapi.Challenge\n\tfor _, cmb := range auth.Combinations {\n\t\tif len(cmb) == 1 && auth.Challenges[cmb[0]].Type == \"tls-sni-01\" {\n\t\t\tcanAuthorize = true\n\t\t\tchallenge = auth.Challenges[cmb[0]]\n\t\t\tbreak\n\t\t}\n\t}\n\tif !canAuthorize {\n\t\tlogrus.Errorf(\"Can't find good challange combination for domain: '%v'\", domain)\n\t\treturn nil, errors.New(\"Can't find good challange combination\")\n\t}\n\n\tacmeHostName, err := acmeutils.TLSSNIHostname(this.privateKey, challenge.Token)\n\tif err == nil {\n\t\tlogrus.Debugf(\"Create acme-auth hostname for domain '%v': %v\", domain, acmeHostName)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create acme domain for domain '%v' token '%v': %v\", domain, challenge.Token, err)\n\t\treturn nil, errors.New(\"Can't create acme domain\")\n\t}\n\tthis.PutAcmeAuthDomain(acmeHostName)\n\n\tchallengeResponse, err := acmeutils.ChallengeResponseJSON(this.privateKey, challenge.Token, challenge.Type)\n\tif err == nil {\n\t\tlogrus.Debugf(\"Create challenge response for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create challenge response for domain '%v', token '%v', challenge type %v: %v\",\n\t\t\tdomain, challenge.Token, challenge.Type, err)\n\t\treturn nil, errors.New(\"Can't create challenge response\")\n\t}\n\tfor i := 0; i < TRY_COUNT; i++ {\n\t\terr = this.client.RespondToChallenge(challenge, challengeResponse, this.privateKey, ctx)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\tlogrus.Info(\"Can't send response for challenge of domain '%v': %v\", domain, err)\n\t\t\ttime.Sleep(RETRY_SLEEP)\n\t\t}\n\t}\n\tif err == nil {\n\t\tlogrus.Debugf(\"Send challenge response for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't send response for challenge of domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't send response for challenge\")\n\t}\n\n\tfor i := 0; i < TRY_COUNT; i++ {\n\t\terr = this.client.LoadChallenge(challenge, ctx)\n\t\tif err != nil {\n\t\t\tlogrus.Infof(\"Can't load challenge for domain '%v': %v\", domain, err)\n\t\t\ttime.Sleep(RETRY_SLEEP)\n\t\t}\n\t}\n\tif err == nil {\n\t\tlogrus.Debugf(\"Load challenge for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't load challenge for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't load challenge\")\n\t}\n\n\t\/\/ Generate CSR\n\tcertKey, err := rsa.GenerateKey(cryptorand.Reader, PRIVATE_KEY_BITS)\n\tif err == nil {\n\t\tlogrus.Debugf(\"Create private key for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create rsa key for domain '%v': %v\", err)\n\t\treturn nil, errors.New(\"Can't create rsa key\")\n\t}\n\tcertRequest := &x509.CertificateRequest{\n\t\tSignatureAlgorithm: x509.SHA256WithRSA,\n\t\tPublicKeyAlgorithm: x509.RSA,\n\t\tPublicKey: &certKey.PublicKey,\n\t\tSubject: pkix.Name{CommonName: domain},\n\t\tDNSNames: []string{domain},\n\t}\n\tcsrDER, err := x509.CreateCertificateRequest(cryptorand.Reader, certRequest, certKey)\n\tif err == nil {\n\t\tlogrus.Debugf(\"Create CSR for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't create csr for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't create csr\")\n\t}\n\n\tvar certResponse *acmeapi.Certificate\n\tfor i := 0; i < TRY_COUNT; i++ {\n\t\tcertResponse, err = this.client.RequestCertificate(csrDER, ctx)\n\t\tif err != nil {\n\t\t\tlogrus.Infof(\"Can't get certificate for domain '%v': %v\", domain, err)\n\t\t\ttime.Sleep(RETRY_SLEEP)\n\t\t}\n\t}\n\tif err == nil {\n\t\tlogrus.Infof(\"Get certificate for domain '%v'\", domain)\n\t} else {\n\t\tlogrus.Errorf(\"Can't get certificate for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't request certificate\")\n\t}\n\n\tpemEncode := func(b []byte, t string) []byte {\n\t\treturn pem.EncodeToMemory(&pem.Block{Bytes: b, Type: t})\n\t}\n\tcertPEM := pem.EncodeToMemory(&pem.Block{Bytes: certResponse.Certificate, Type: \"CERTIFICATE\"})\n\tfor _, extraCert := range certResponse.ExtraCertificates {\n\t\textraCertPEM := pem.EncodeToMemory(&pem.Block{Bytes: extraCert, Type: \"CERTIFICATE\"})\n\t\tcertPEM = append(certPEM, '\\n')\n\t\tcertPEM = append(certPEM, extraCertPEM...)\n\t}\n\tlogrus.Debugf(\"CERT PEM:\\n%s\", certPEM)\n\tcertKeyPEM := pemEncode(x509.MarshalPKCS1PrivateKey(certKey), \"RSA PRIVATE KEY\")\n\n\ttmpCert, err := tls.X509KeyPair(certPEM, certKeyPEM)\n\tlogrus.Debugf(\"Parsed cert count for domain '%v':\", len(tmpCert.Certificate))\n\tif err == nil {\n\t\tlogrus.Infof(\"Cert for domain '%v' parsed.\", domain)\n\t\tcert = &tmpCert\n\t} else {\n\t\tlogrus.Errorf(\"Can't parse cert for domain '%v': %v\", domain, err)\n\t\treturn nil, errors.New(\"Can't parse cert for domain\")\n\t}\n\n\treturn cert, nil\n}\n\nfunc (this *acmeStruct) createCertificateSelfSigned(domain string) (cert *tls.Certificate, err error) {\n\tderCert, privateKey, err := acmeutils.CreateTLSSNICertificate(domain)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Can't create tls-sni-01 self-signed certificate for '%v': %v\", domain, err)\n\t\treturn nil, err\n\t}\n\n\tcert = &tls.Certificate{}\n\tcert.Certificate = [][]byte{derCert}\n\tcert.PrivateKey = privateKey\n\treturn cert, nil\n}\n\nfunc (this *acmeStruct) PutAcmeAuthDomain(domain string) {\n\tthis.acmeauthDomainsMutex.Lock()\n\tdefer this.acmeauthDomainsMutex.Unlock()\n\n\tlogrus.Debug(\"Put acme auth domain:\", domain)\n\tthis.acmeAuthDomains[domain] = time.Now().Add(SNI01_EXPIRE_TOKEN)\n}\n\nfunc (this *acmeStruct) DeleteAcmeAuthDomain(domain string) bool {\n\tthis.acmeauthDomainsMutex.Lock()\n\tdefer this.acmeauthDomainsMutex.Unlock()\n\n\tlogrus.Debug(\"Delete acme auth domain:\", domain)\n\t_, ok := this.acmeAuthDomains[domain]\n\tif ok {\n\t\tdelete(this.acmeAuthDomains, domain)\n\t}\n\treturn ok\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Random odds and ends.\n\npackage fuse\n\nimport (\n\t\"os\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"syscall\"\n\t\"unsafe\"\n\t\"io\/ioutil\"\n)\n\n\nfunc (code Status) String() string {\n\tif code == OK {\n\t\treturn \"OK\"\n\t}\n\treturn fmt.Sprintf(\"%d=%v\", int(code), os.Errno(code))\n}\n\n\/\/ Make a temporary directory securely.\nfunc MakeTempDir() string {\n\tnm, err := ioutil.TempDir(\"\", \"go-fuse\")\n\tif err != nil {\n\t\tpanic(\"TempDir() failed: \" + err.String())\n\t}\n\treturn nm\n}\n\n\/\/ Convert os.Error back to Errno based errors.\nfunc OsErrorToErrno(err os.Error) Status {\n\tif err != nil {\n\t\tswitch t := err.(type) {\n\t\tcase os.Errno:\n\t\t\treturn Status(t)\n\t\tcase *os.SyscallError:\n\t\t\treturn Status(t.Errno)\n\t\tcase *os.PathError:\n\t\t\treturn OsErrorToErrno(t.Error)\n\t\tcase *os.LinkError:\n\t\t\treturn OsErrorToErrno(t.Error)\n\t\tdefault:\n\t\t\tlog.Println(\"can't convert error type:\", err)\n\t\t\treturn ENOSYS\n\t\t}\n\t}\n\treturn OK\n}\n\nfunc SplitNs(time float64, secs *uint64, nsecs *uint32) {\n\t*nsecs = uint32(1e9 * (time - math.Trunc(time)))\n\t*secs = uint64(math.Trunc(time))\n}\n\nfunc CopyFileInfo(fi *os.FileInfo, attr *Attr) {\n\tattr.Ino = uint64(fi.Ino)\n\tattr.Size = uint64(fi.Size)\n\tattr.Blocks = uint64(fi.Blocks)\n\n\tattr.Atime = uint64(fi.Atime_ns \/ 1e9)\n\tattr.Atimensec = uint32(fi.Atime_ns % 1e9)\n\n\tattr.Mtime = uint64(fi.Mtime_ns \/ 1e9)\n\tattr.Mtimensec = uint32(fi.Mtime_ns % 1e9)\n\n\tattr.Ctime = uint64(fi.Ctime_ns \/ 1e9)\n\tattr.Ctimensec = uint32(fi.Ctime_ns % 1e9)\n\n\tattr.Mode = fi.Mode\n\tattr.Nlink = uint32(fi.Nlink)\n\tattr.Uid = uint32(fi.Uid)\n\tattr.Gid = uint32(fi.Gid)\n\tattr.Rdev = uint32(fi.Rdev)\n\tattr.Blksize = uint32(fi.Blksize)\n}\n\n\nfunc writev(fd int, iovecs *syscall.Iovec, cnt int) (n int, errno int) {\n\tn1, _, e1 := syscall.Syscall(\n\t\tsyscall.SYS_WRITEV,\n\t\tuintptr(fd), uintptr(unsafe.Pointer(iovecs)), uintptr(cnt))\n\treturn int(n1), int(e1)\n}\n\nfunc Writev(fd int, packet [][]byte) (n int, err os.Error) {\n\tiovecs := make([]syscall.Iovec, 0, len(packet))\n\n\tfor _, v := range packet {\n\t\tif v == nil || len(v) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvec := syscall.Iovec{\n\t\t\tBase: &v[0],\n\t\t}\n\t\tvec.SetLen(len(v))\n\t\tiovecs = append(iovecs, vec)\n\t}\n\n\tif len(iovecs) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tn, errno := writev(fd, &iovecs[0], len(iovecs))\n\tif errno != 0 {\n\t\terr = os.NewSyscallError(\"writev\", errno)\n\t}\n\treturn n, err\n}\n\nfunc CountCpus() int {\n\tvar contents [10240]byte\n\n\tf, err := os.Open(\"\/proc\/stat\")\n\tdefer f.Close()\n\tif err != nil {\n\t\treturn 1\n\t}\n\tn, _ := f.Read(contents[:])\n\tre, _ := regexp.Compile(\"\\ncpu[0-9]\")\n\n\treturn len(re.FindAllString(string(contents[:n]), 100))\n}\n\n\/\/ Creates a return entry for a non-existent path.\nfunc NegativeEntry(time float64) *EntryOut {\n\tout := new(EntryOut)\n\tout.NodeId = 0\n\tSplitNs(time, &out.EntryValid, &out.EntryValidNsec)\n\treturn out\n}\n\nfunc ModeToType(mode uint32) uint32 {\n\treturn (mode & 0170000) >> 12\n}\n\n\nfunc CheckSuccess(e os.Error) {\n\tif e != nil {\n\t\tpanic(fmt.Sprintf(\"Unexpected error: %v\", e))\n\t}\n}\n\nfunc MyPID() string {\n\tv, _ := os.Readlink(\"\/proc\/self\")\n\treturn v\n}\n\n\/\/ Thanks to Andrew Gerrand for this hack.\nfunc asSlice(ptr unsafe.Pointer, byteCount int) []byte {\n\th := &reflect.SliceHeader{uintptr(ptr), byteCount, byteCount}\n\treturn *(*[]byte)(unsafe.Pointer(h))\n}\n<commit_msg>Introduce Status.Ok().<commit_after>\/\/ Random odds and ends.\n\npackage fuse\n\nimport (\n\t\"os\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"syscall\"\n\t\"unsafe\"\n\t\"io\/ioutil\"\n)\n\n\nfunc (code Status) String() string {\n\tif code == OK {\n\t\treturn \"OK\"\n\t}\n\treturn fmt.Sprintf(\"%d=%v\", int(code), os.Errno(code))\n}\n\nfunc (code Status) Ok() bool {\n\treturn code == OK\n}\n\n\/\/ Make a temporary directory securely.\nfunc MakeTempDir() string {\n\tnm, err := ioutil.TempDir(\"\", \"go-fuse\")\n\tif err != nil {\n\t\tpanic(\"TempDir() failed: \" + err.String())\n\t}\n\treturn nm\n}\n\n\/\/ Convert os.Error back to Errno based errors.\nfunc OsErrorToErrno(err os.Error) Status {\n\tif err != nil {\n\t\tswitch t := err.(type) {\n\t\tcase os.Errno:\n\t\t\treturn Status(t)\n\t\tcase *os.SyscallError:\n\t\t\treturn Status(t.Errno)\n\t\tcase *os.PathError:\n\t\t\treturn OsErrorToErrno(t.Error)\n\t\tcase *os.LinkError:\n\t\t\treturn OsErrorToErrno(t.Error)\n\t\tdefault:\n\t\t\tlog.Println(\"can't convert error type:\", err)\n\t\t\treturn ENOSYS\n\t\t}\n\t}\n\treturn OK\n}\n\nfunc SplitNs(time float64, secs *uint64, nsecs *uint32) {\n\t*nsecs = uint32(1e9 * (time - math.Trunc(time)))\n\t*secs = uint64(math.Trunc(time))\n}\n\nfunc CopyFileInfo(fi *os.FileInfo, attr *Attr) {\n\tattr.Ino = uint64(fi.Ino)\n\tattr.Size = uint64(fi.Size)\n\tattr.Blocks = uint64(fi.Blocks)\n\n\tattr.Atime = uint64(fi.Atime_ns \/ 1e9)\n\tattr.Atimensec = uint32(fi.Atime_ns % 1e9)\n\n\tattr.Mtime = uint64(fi.Mtime_ns \/ 1e9)\n\tattr.Mtimensec = uint32(fi.Mtime_ns % 1e9)\n\n\tattr.Ctime = uint64(fi.Ctime_ns \/ 1e9)\n\tattr.Ctimensec = uint32(fi.Ctime_ns % 1e9)\n\n\tattr.Mode = fi.Mode\n\tattr.Nlink = uint32(fi.Nlink)\n\tattr.Uid = uint32(fi.Uid)\n\tattr.Gid = uint32(fi.Gid)\n\tattr.Rdev = uint32(fi.Rdev)\n\tattr.Blksize = uint32(fi.Blksize)\n}\n\n\nfunc writev(fd int, iovecs *syscall.Iovec, cnt int) (n int, errno int) {\n\tn1, _, e1 := syscall.Syscall(\n\t\tsyscall.SYS_WRITEV,\n\t\tuintptr(fd), uintptr(unsafe.Pointer(iovecs)), uintptr(cnt))\n\treturn int(n1), int(e1)\n}\n\nfunc Writev(fd int, packet [][]byte) (n int, err os.Error) {\n\tiovecs := make([]syscall.Iovec, 0, len(packet))\n\n\tfor _, v := range packet {\n\t\tif v == nil || len(v) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvec := syscall.Iovec{\n\t\t\tBase: &v[0],\n\t\t}\n\t\tvec.SetLen(len(v))\n\t\tiovecs = append(iovecs, vec)\n\t}\n\n\tif len(iovecs) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tn, errno := writev(fd, &iovecs[0], len(iovecs))\n\tif errno != 0 {\n\t\terr = os.NewSyscallError(\"writev\", errno)\n\t}\n\treturn n, err\n}\n\nfunc CountCpus() int {\n\tvar contents [10240]byte\n\n\tf, err := os.Open(\"\/proc\/stat\")\n\tdefer f.Close()\n\tif err != nil {\n\t\treturn 1\n\t}\n\tn, _ := f.Read(contents[:])\n\tre, _ := regexp.Compile(\"\\ncpu[0-9]\")\n\n\treturn len(re.FindAllString(string(contents[:n]), 100))\n}\n\n\/\/ Creates a return entry for a non-existent path.\nfunc NegativeEntry(time float64) *EntryOut {\n\tout := new(EntryOut)\n\tout.NodeId = 0\n\tSplitNs(time, &out.EntryValid, &out.EntryValidNsec)\n\treturn out\n}\n\nfunc ModeToType(mode uint32) uint32 {\n\treturn (mode & 0170000) >> 12\n}\n\n\nfunc CheckSuccess(e os.Error) {\n\tif e != nil {\n\t\tpanic(fmt.Sprintf(\"Unexpected error: %v\", e))\n\t}\n}\n\nfunc MyPID() string {\n\tv, _ := os.Readlink(\"\/proc\/self\")\n\treturn v\n}\n\n\/\/ Thanks to Andrew Gerrand for this hack.\nfunc asSlice(ptr unsafe.Pointer, byteCount int) []byte {\n\th := &reflect.SliceHeader{uintptr(ptr), byteCount, byteCount}\n\treturn *(*[]byte)(unsafe.Pointer(h))\n}\n<|endoftext|>"} {"text":"<commit_before>package model\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com\/satori\/go.uuid\"\n)\n\ntype Account struct {\n\tID int `json:\"id\" example:\"1\" format:\"int64\"`\n\tName string `json:\"name\" example:\"account name\"`\n\tUUID uuid.UUID `json:\"uuid\" example:\"550e8400-e29b-41d4-a716-446655440000\" format:\"uuid\"`\n}\n\nvar (\n\tErrNameInvalid = errors.New(\"name is empty\")\n)\n\ntype AddAccount struct {\n\tName string `json:\"name\" example:\"account name\"`\n}\n\nfunc (a AddAccount) Validation() error {\n\tswitch {\n\tcase len(a.Name) == 0:\n\t\treturn ErrNameInvalid\n\tdefault:\n\t\treturn nil\n\t}\n}\n\ntype UpdateAccount struct {\n\tName string `json:\"name\" example:\"account name\"`\n}\n\nfunc (a UpdateAccount) Validation() error {\n\tswitch {\n\tcase len(a.Name) == 0:\n\t\treturn ErrNameInvalid\n\tdefault:\n\t\treturn nil\n\t}\n}\n\nfunc AccountsAll(q string) ([]Account, error) {\n\tif q == \"\" {\n\t\treturn accounts, nil\n\t}\n\tas := []Account{}\n\tfor k, v := range accounts {\n\t\tif q == v.Name {\n\t\t\tas = append(as, accounts[k])\n\t\t}\n\t}\n\treturn as, nil\n}\n\nfunc AccountOne(id int) (Account, error) {\n\tfor _, v := range accounts {\n\t\tif id == v.ID {\n\t\t\treturn v, nil\n\t\t}\n\t}\n\treturn Account{}, ErrNoRow\n}\n\nfunc (a Account) Insert() (int, error) {\n\taccountMaxID++\n\ta.ID = accountMaxID\n\taccounts = append(accounts, a)\n\treturn accountMaxID, nil\n}\n\nfunc Delete(id int) error {\n\tfor k, v := range accounts {\n\t\tif id == v.ID {\n\t\t\taccounts = append(accounts[:k], accounts[k+1:]...)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"account id=%d is not found\", id)\n}\n\nfunc (a Account) Update() error {\n\tfor k, v := range accounts {\n\t\tif a.ID == v.ID {\n\t\t\taccounts[k].Name = a.Name\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"account id=%d is not found\", a.ID)\n}\n\nvar accountMaxID = 3\nvar accounts = []Account{\n\tAccount{ID: 1, Name: \"account_1\"},\n\tAccount{ID: 2, Name: \"account_2\"},\n\tAccount{ID: 3, Name: \"account_3\"},\n}\n<commit_msg>example: update account.go (#196)<commit_after>package model\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com\/satori\/go.uuid\"\n)\n\ntype Account struct {\n\tID int `json:\"id\" example:\"1\" format:\"int64\"`\n\tName string `json:\"name\" example:\"account name\"`\n\tUUID uuid.UUID `json:\"uuid\" example:\"550e8400-e29b-41d4-a716-446655440000\" format:\"uuid\"`\n}\n\nvar (\n\tErrNameInvalid = errors.New(\"name is empty\")\n)\n\ntype AddAccount struct {\n\tName string `json:\"name\" example:\"account name\"`\n}\n\nfunc (a AddAccount) Validation() error {\n\tswitch {\n\tcase len(a.Name) == 0:\n\t\treturn ErrNameInvalid\n\tdefault:\n\t\treturn nil\n\t}\n}\n\ntype UpdateAccount struct {\n\tName string `json:\"name\" example:\"account name\"`\n}\n\nfunc (a UpdateAccount) Validation() error {\n\tswitch {\n\tcase len(a.Name) == 0:\n\t\treturn ErrNameInvalid\n\tdefault:\n\t\treturn nil\n\t}\n}\n\nfunc AccountsAll(q string) ([]Account, error) {\n\tif q == \"\" {\n\t\treturn accounts, nil\n\t}\n\tas := []Account{}\n\tfor k, v := range accounts {\n\t\tif q == v.Name {\n\t\t\tas = append(as, accounts[k])\n\t\t}\n\t}\n\treturn as, nil\n}\n\nfunc AccountOne(id int) (Account, error) {\n\tfor _, v := range accounts {\n\t\tif id == v.ID {\n\t\t\treturn v, nil\n\t\t}\n\t}\n\treturn Account{}, ErrNoRow\n}\n\nfunc (a Account) Insert() (int, error) {\n\taccountMaxID++\n\ta.ID = accountMaxID\n\ta.Name = fmt.Sprintf(\"account_%d\", accountMaxID)\n\taccounts = append(accounts, a)\n\treturn accountMaxID, nil\n}\n\nfunc Delete(id int) error {\n\tfor k, v := range accounts {\n\t\tif id == v.ID {\n\t\t\taccounts = append(accounts[:k], accounts[k+1:]...)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"account id=%d is not found\", id)\n}\n\nfunc (a Account) Update() error {\n\tfor k, v := range accounts {\n\t\tif a.ID == v.ID {\n\t\t\taccounts[k].Name = a.Name\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"account id=%d is not found\", a.ID)\n}\n\nvar accountMaxID = 3\nvar accounts = []Account{\n\tAccount{ID: 1, Name: \"account_1\"},\n\tAccount{ID: 2, Name: \"account_2\"},\n\tAccount{ID: 3, Name: \"account_3\"},\n}\n<|endoftext|>"} {"text":"<commit_before>package jwd\n\nimport (\n\t\"math\"\n\t\"strings\"\n\n\t\"github.com\/9uuso\/unidecode\"\n)\n\n\/\/ According to this tool: http:\/\/www.csun.edu\/english\/edit_distance.php\n\/\/ parameter order should have no difference in the result. Therefore,\n\/\/ to avoid panicing later on, we will order the strings according to\n\/\/ their length.\nfunc sort(s1, s2 string) (string, string) {\n\tif strings.Count(s1, \"\")-1 <= strings.Count(s2, \"\")-1 {\n\t\treturn s1, s2\n\t}\n\treturn s2, s1\n}\n\n\/\/ Calculate calculates Jaro-Winkler distance of two strings. The function lowercases and sorts the parameters\n\/\/ so that that the longest string is evaluated against the shorter one.\nfunc Calculate(s1, s2 string) float64 {\n\n\ts1 = unidecode.Unidecode(s1)\n\ts2 = unidecode.Unidecode(s2)\n\n\ts1, s2 = sort(strings.ToLower(s1), strings.ToLower(s2))\n\n\t\/\/ This avoids the function to return NaN.\n\tif strings.Count(s1, \"\") == 1 || strings.Count(s2, \"\") == 1 {\n\t\treturn float64(0)\n\t}\n\n\t\/\/ m as `matching characters`\n\t\/\/ t as `transposition`\n\t\/\/ l as `the length of common prefix at the start of the string up to a maximum of 4 characters`.\n\t\/\/ See more: https:\/\/en.wikipedia.org\/wiki\/Jaro%E2%80%93Winkler_distance\n\tm := 0\n\tt := 0\n\tl := 0\n\n\twindow := math.Floor(float64(math.Max(float64(len(s1)), float64(len(s2)))\/2) - 1)\n\n\t\/\/debug:\n\t\/\/fmt.Println(\"s1:\", s1, \"s2:\", s2)\n\t\/\/fmt.Println(\"Match window:\", window)\n\t\/\/fmt.Println(\"len(s1):\", len(s1), \"len(s2):\", len(s2))\n\n\tfor i := 0; i < len(s1); i++ {\n\t\t\/\/ Exact match\n\t\tif s1[i] == s2[i] {\n\t\t\tm++\n\t\t\t\/\/ Common prefix limitter\n\t\t\tif i == l && i < 4 {\n\t\t\t\tl++\n\t\t\t}\n\t\t} else {\n\t\t\tif strings.Contains(s2, string(s1[i])) {\n\t\t\t\t\/\/ The character is also considered matching if the amount of characters between the occurances in s1 and s2\n\t\t\t\t\/\/ is less than match window\n\t\t\t\tgap := strings.Index(s2, string(s1[i])) - strings.Index(s1, string(s1[i]))\n\t\t\t\tif gap <= int(window) {\n\t\t\t\t\tm++\n\t\t\t\t\t\/\/ Check if transposition is in reach of window\n\t\t\t\t\tfor k := i; k < len(s1); k++ {\n\t\t\t\t\t\tif strings.Index(s2, string(s1[k])) <= i {\n\t\t\t\t\t\t\tt++\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tdistance := (float64(m)\/float64(len(s1)) + float64(m)\/float64(len(s2)) + (float64(m)-math.Floor(float64(t)\/float64(2)))\/float64(m)) \/ float64(3)\n\tjwd := distance + (float64(l) * float64(0.1) * (float64(1) - distance))\n\n\t\/\/debug:\n\t\/\/fmt.Println(\"- transpositions:\", t)\n\t\/\/fmt.Println(\"- matches:\", m)\n\t\/\/fmt.Println(\"- l:\", l)\n\t\/\/fmt.Println(jwd)\n\n\treturn jwd\n\n}\n<commit_msg>update unidecode package import path<commit_after>package jwd\n\nimport (\n\t\"math\"\n\t\"strings\"\n\n\t\"gopkgs.com\/unidecode.v1\"\n)\n\n\/\/ According to this tool: http:\/\/www.csun.edu\/english\/edit_distance.php\n\/\/ parameter order should have no difference in the result. Therefore,\n\/\/ to avoid panicing later on, we will order the strings according to\n\/\/ their length.\nfunc sort(s1, s2 string) (string, string) {\n\tif strings.Count(s1, \"\")-1 <= strings.Count(s2, \"\")-1 {\n\t\treturn s1, s2\n\t}\n\treturn s2, s1\n}\n\n\/\/ Calculate calculates Jaro-Winkler distance of two strings. The function lowercases and sorts the parameters\n\/\/ so that that the longest string is evaluated against the shorter one.\nfunc Calculate(s1, s2 string) float64 {\n\n\ts1 = unidecode.Unidecode(s1)\n\ts2 = unidecode.Unidecode(s2)\n\n\ts1, s2 = sort(strings.ToLower(s1), strings.ToLower(s2))\n\n\t\/\/ This avoids the function to return NaN.\n\tif strings.Count(s1, \"\") == 1 || strings.Count(s2, \"\") == 1 {\n\t\treturn float64(0)\n\t}\n\n\t\/\/ m as `matching characters`\n\t\/\/ t as `transposition`\n\t\/\/ l as `the length of common prefix at the start of the string up to a maximum of 4 characters`.\n\t\/\/ See more: https:\/\/en.wikipedia.org\/wiki\/Jaro%E2%80%93Winkler_distance\n\tm := 0\n\tt := 0\n\tl := 0\n\n\twindow := math.Floor(float64(math.Max(float64(len(s1)), float64(len(s2)))\/2) - 1)\n\n\t\/\/debug:\n\t\/\/fmt.Println(\"s1:\", s1, \"s2:\", s2)\n\t\/\/fmt.Println(\"Match window:\", window)\n\t\/\/fmt.Println(\"len(s1):\", len(s1), \"len(s2):\", len(s2))\n\n\tfor i := 0; i < len(s1); i++ {\n\t\t\/\/ Exact match\n\t\tif s1[i] == s2[i] {\n\t\t\tm++\n\t\t\t\/\/ Common prefix limitter\n\t\t\tif i == l && i < 4 {\n\t\t\t\tl++\n\t\t\t}\n\t\t} else {\n\t\t\tif strings.Contains(s2, string(s1[i])) {\n\t\t\t\t\/\/ The character is also considered matching if the amount of characters between the occurances in s1 and s2\n\t\t\t\t\/\/ is less than match window\n\t\t\t\tgap := strings.Index(s2, string(s1[i])) - strings.Index(s1, string(s1[i]))\n\t\t\t\tif gap <= int(window) {\n\t\t\t\t\tm++\n\t\t\t\t\t\/\/ Check if transposition is in reach of window\n\t\t\t\t\tfor k := i; k < len(s1); k++ {\n\t\t\t\t\t\tif strings.Index(s2, string(s1[k])) <= i {\n\t\t\t\t\t\t\tt++\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tdistance := (float64(m)\/float64(len(s1)) + float64(m)\/float64(len(s2)) + (float64(m)-math.Floor(float64(t)\/float64(2)))\/float64(m)) \/ float64(3)\n\tjwd := distance + (float64(l) * float64(0.1) * (float64(1) - distance))\n\n\t\/\/debug:\n\t\/\/fmt.Println(\"- transpositions:\", t)\n\t\/\/fmt.Println(\"- matches:\", m)\n\t\/\/fmt.Println(\"- l:\", l)\n\t\/\/fmt.Println(jwd)\n\n\treturn jwd\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 gandalf authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/bmizerany\/pat\"\n\t\"github.com\/tsuru\/config\"\n\t\"github.com\/tsuru\/gandalf\/api\"\n\t\"log\"\n\t\"net\/http\"\n)\n\nconst version = \"0.4.1\"\n\nfunc main() {\n\tdry := flag.Bool(\"dry\", false, \"dry-run: does not start the server (for testing purpose)\")\n\tconfigFile := flag.String(\"config\", \"\/etc\/gandalf.conf\", \"Gandalf configuration file\")\n\tgVersion := flag.Bool(\"version\", false, \"Print version and exit\")\n\tflag.Parse()\n\n\tif *gVersion {\n\t\tfmt.Printf(\"gandalf-webserver version %s\\n\", version)\n\t\treturn\n\t}\n\n\terr := config.ReadAndWatchConfigFile(*configFile)\n\tif err != nil {\n\t\tmsg := `Could not find gandalf config file. Searched on %s.\nFor an example conf check gandalf\/etc\/gandalf.conf file.\\n %s`\n\t\tlog.Panicf(msg, *configFile, err)\n\t}\n\trouter := pat.New()\n\trouter.Post(\"\/user\/:name\/key\", http.HandlerFunc(api.AddKey))\n\trouter.Del(\"\/user\/:name\/key\/:keyname\", http.HandlerFunc(api.RemoveKey))\n\trouter.Get(\"\/user\/:name\/keys\", http.HandlerFunc(api.ListKeys))\n\trouter.Post(\"\/user\", http.HandlerFunc(api.NewUser))\n\trouter.Del(\"\/user\/:name\", http.HandlerFunc(api.RemoveUser))\n\trouter.Post(\"\/repository\", http.HandlerFunc(api.NewRepository))\n\trouter.Post(\"\/repository\/grant\", http.HandlerFunc(api.GrantAccess))\n\trouter.Del(\"\/repository\/revoke\", http.HandlerFunc(api.RevokeAccess))\n\trouter.Del(\"\/repository\/:name\", http.HandlerFunc(api.RemoveRepository))\n\trouter.Get(\"\/repository\/:name\", http.HandlerFunc(api.GetRepository))\n\trouter.Put(\"\/repository\/:name\", http.HandlerFunc(api.RenameRepository))\n\trouter.Get(\"\/repository\/:name\/archive\/:ref.:format\", http.HandlerFunc(api.GetArchive))\n\trouter.Get(\"\/repository\/:name\/contents\", http.HandlerFunc(api.GetFileContents))\n\trouter.Get(\"\/repository\/:name\/tree\/:path\", http.HandlerFunc(api.GetTree))\n\trouter.Get(\"\/repository\/:name\/tree\", http.HandlerFunc(api.GetTree))\n\trouter.Get(\"\/repository\/:name\/branch\", http.HandlerFunc(api.GetTree))\n\trouter.Get(\"\/healthcheck\/\", http.HandlerFunc(api.HealthCheck))\n\trouter.Post(\"\/hook\/:name\", http.HandlerFunc(api.AddHook))\n\n\tbind, err := config.GetString(\"bind\")\n\tif err != nil {\n\t\tvar perr error\n\t\tbind, perr = config.GetString(\"webserver:port\")\n\t\tif perr != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tif !*dry {\n\t\tlog.Fatal(http.ListenAndServe(bind, router))\n\t}\n}\n<commit_msg>Main: Fixing Branch handler<commit_after>\/\/ Copyright 2014 gandalf authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/bmizerany\/pat\"\n\t\"github.com\/tsuru\/config\"\n\t\"github.com\/tsuru\/gandalf\/api\"\n\t\"log\"\n\t\"net\/http\"\n)\n\nconst version = \"0.4.1\"\n\nfunc main() {\n\tdry := flag.Bool(\"dry\", false, \"dry-run: does not start the server (for testing purpose)\")\n\tconfigFile := flag.String(\"config\", \"\/etc\/gandalf.conf\", \"Gandalf configuration file\")\n\tgVersion := flag.Bool(\"version\", false, \"Print version and exit\")\n\tflag.Parse()\n\n\tif *gVersion {\n\t\tfmt.Printf(\"gandalf-webserver version %s\\n\", version)\n\t\treturn\n\t}\n\n\terr := config.ReadAndWatchConfigFile(*configFile)\n\tif err != nil {\n\t\tmsg := `Could not find gandalf config file. Searched on %s.\nFor an example conf check gandalf\/etc\/gandalf.conf file.\\n %s`\n\t\tlog.Panicf(msg, *configFile, err)\n\t}\n\trouter := pat.New()\n\trouter.Post(\"\/user\/:name\/key\", http.HandlerFunc(api.AddKey))\n\trouter.Del(\"\/user\/:name\/key\/:keyname\", http.HandlerFunc(api.RemoveKey))\n\trouter.Get(\"\/user\/:name\/keys\", http.HandlerFunc(api.ListKeys))\n\trouter.Post(\"\/user\", http.HandlerFunc(api.NewUser))\n\trouter.Del(\"\/user\/:name\", http.HandlerFunc(api.RemoveUser))\n\trouter.Post(\"\/repository\", http.HandlerFunc(api.NewRepository))\n\trouter.Post(\"\/repository\/grant\", http.HandlerFunc(api.GrantAccess))\n\trouter.Del(\"\/repository\/revoke\", http.HandlerFunc(api.RevokeAccess))\n\trouter.Del(\"\/repository\/:name\", http.HandlerFunc(api.RemoveRepository))\n\trouter.Get(\"\/repository\/:name\", http.HandlerFunc(api.GetRepository))\n\trouter.Put(\"\/repository\/:name\", http.HandlerFunc(api.RenameRepository))\n\trouter.Get(\"\/repository\/:name\/archive\/:ref.:format\", http.HandlerFunc(api.GetArchive))\n\trouter.Get(\"\/repository\/:name\/contents\", http.HandlerFunc(api.GetFileContents))\n\trouter.Get(\"\/repository\/:name\/tree\/:path\", http.HandlerFunc(api.GetTree))\n\trouter.Get(\"\/repository\/:name\/tree\", http.HandlerFunc(api.GetTree))\n\trouter.Get(\"\/repository\/:name\/branch\", http.HandlerFunc(api.GetBranch))\n\trouter.Get(\"\/healthcheck\/\", http.HandlerFunc(api.HealthCheck))\n\trouter.Post(\"\/hook\/:name\", http.HandlerFunc(api.AddHook))\n\n\tbind, err := config.GetString(\"bind\")\n\tif err != nil {\n\t\tvar perr error\n\t\tbind, perr = config.GetString(\"webserver:port\")\n\t\tif perr != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tif !*dry {\n\t\tlog.Fatal(http.ListenAndServe(bind, router))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/currantlabs\/ble\"\n\t\"github.com\/currantlabs\/ble\/examples\/lib\/dev\"\n)\n\nvar (\n\tdevice = flag.String(\"device\", \"default\", \"implementation of ble\")\n\tname = flag.String(\"name\", \"Gopher\", \"name of remote peripheral\")\n\taddr = flag.String(\"addr\", \"\", \"address of remote peripheral (MAC on Linux, UUID on OS X)\")\n\tsub = flag.Duration(\"sub\", 0, \"subscribe to notification and indication for a specified period\")\n\tsd = flag.Duration(\"sd\", 5*time.Second, \"scanning duration, 0 for indefinitely\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\td, err := dev.NewDevice(*device)\n\tif err != nil {\n\t\tlog.Fatalf(\"can't new device : %s\", err)\n\t}\n\tble.SetDefaultDevice(d)\n\n\t\/\/ Default to search device with name of Gopher (or specified by user).\n\tfilter := func(a ble.Advertisement) bool {\n\t\treturn strings.ToUpper(a.LocalName()) == strings.ToUpper(*name)\n\t}\n\n\t\/\/ If addr is specified, search for addr instead.\n\tif len(*addr) != 0 {\n\t\tfilter = func(a ble.Advertisement) bool {\n\t\t\treturn strings.ToUpper(a.Address().String()) == strings.ToUpper(*addr)\n\t\t}\n\t}\n\n\t\/\/ Scan for specified durantion, or until interrupted by user.\n\tfmt.Printf(\"Scanning for %s...\\n\", *sd)\n\tctx := ble.WithSigHandler(context.WithTimeout(context.Background(), *sd))\n\tcln, err := ble.Connect(ctx, filter)\n\tif err != nil {\n\t\tlog.Fatalf(\"can't connect : %s\", err)\n\t}\n\n\tgo func() {\n\t\tcln.Disconnected()\n\t\tfmt.Printf(\"[ %s ] is disconnected \\n\", cln.Address())\n\t}()\n\n\tfmt.Printf(\"Discovering profile...\\n\")\n\tp, err := cln.DiscoverProfile(true)\n\tif err != nil {\n\t\tlog.Fatalf(\"can't discover profile: %s\", err)\n\t}\n\n\t\/\/ Start the exploration.\n\texplore(cln, p)\n\n\t\/\/ Disconnect the connection. (On OS X, this might take a while.)\n\tfmt.Printf(\"Disconnecting [ %s ]... (this might take up to few seconds on OS X)\\n\", cln.Address())\n\tcln.CancelConnection()\n}\n\nfunc explore(cln ble.Client, p *ble.Profile) error {\n\tfor _, s := range p.Services {\n\t\tfmt.Printf(\" Service: %s %s, Handle (0x%02X)\\n\", s.UUID, ble.Name(s.UUID), s.Handle)\n\n\t\tfor _, c := range s.Characteristics {\n\t\t\tfmt.Printf(\" Characteristic: %s %s, Property: 0x%02X (%s), Handle(0x%02X), VHandle(0x%02X)\\n\",\n\t\t\t\tc.UUID, ble.Name(c.UUID), c.Property, propString(c.Property), c.Handle, c.ValueHandle)\n\t\t\tif (c.Property & ble.CharRead) != 0 {\n\t\t\t\tb, err := cln.ReadCharacteristic(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"Failed to read characteristic: %s\\n\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\" Value %x | %q\\n\", b, b)\n\t\t\t}\n\n\t\t\tfor _, d := range c.Descriptors {\n\t\t\t\tfmt.Printf(\" Descriptor: %s %s, Handle(0x%02x)\\n\", d.UUID, ble.Name(d.UUID), d.Handle)\n\t\t\t\tb, err := cln.ReadDescriptor(d)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"Failed to read descriptor: %s\\n\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\" Value %x | %q\\n\", b, b)\n\t\t\t}\n\n\t\t\tif *sub != 0 {\n\t\t\t\t\/\/ Don't bother to subscribe the Service Changed characteristics.\n\t\t\t\tif c.UUID.Equal(ble.ServiceChangedUUID) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t\/\/ Don't touch the Apple-specific Service\/Characteristic.\n\t\t\t\t\/\/ Service: D0611E78BBB44591A5F8487910AE4366\n\t\t\t\t\/\/ Characteristic: 8667556C9A374C9184ED54EE27D90049, Property: 0x18 (WN),\n\t\t\t\t\/\/ Descriptor: 2902, Client Characteristic Configuration\n\t\t\t\t\/\/ Value 0000 | \"\\x00\\x00\"\n\t\t\t\tif c.UUID.Equal(ble.MustParse(\"8667556C9A374C9184ED54EE27D90049\")) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif (c.Property & ble.CharNotify) != 0 {\n\t\t\t\t\tfmt.Printf(\"\\n-- Subscribe to notification for %s --\\n\", *sub)\n\t\t\t\t\th := func(req []byte) { fmt.Printf(\"Notified: %q [ % X ]\\n\", string(req), req) }\n\t\t\t\t\tif err := cln.Subscribe(c, false, h); err != nil {\n\t\t\t\t\t\tlog.Fatalf(\"subscribe failed: %s\", err)\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(*sub)\n\t\t\t\t\tif err := cln.Unsubscribe(c, false); err != nil {\n\t\t\t\t\t\tlog.Fatalf(\"unsubscribe failed: %s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Printf(\"-- Unsubscribe to notification --\\n\")\n\t\t\t\t}\n\t\t\t\tif (c.Property & ble.CharIndicate) != 0 {\n\t\t\t\t\tfmt.Printf(\"\\n-- Subscribe to indication of %s --\\n\", *sub)\n\t\t\t\t\th := func(req []byte) { fmt.Printf(\"Indicated: %q [ % X ]\\n\", string(req), req) }\n\t\t\t\t\tif err := cln.Subscribe(c, true, h); err != nil {\n\t\t\t\t\t\tlog.Fatalf(\"subscribe failed: %s\", err)\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(*sub)\n\t\t\t\t\tif err := cln.Unsubscribe(c, true); err != nil {\n\t\t\t\t\t\tlog.Fatalf(\"unsubscribe failed: %s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Printf(\"-- Unsubscribe to indication --\\n\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t}\n\treturn nil\n}\n\nfunc propString(p ble.Property) string {\n\tvar s string\n\tfor k, v := range map[ble.Property]string{\n\t\tble.CharBroadcast: \"B\",\n\t\tble.CharRead: \"R\",\n\t\tble.CharWriteNR: \"w\",\n\t\tble.CharWrite: \"W\",\n\t\tble.CharNotify: \"N\",\n\t\tble.CharIndicate: \"I\",\n\t\tble.CharSignedWrite: \"S\",\n\t\tble.CharExtended: \"E\",\n\t} {\n\t\tif p&k != 0 {\n\t\t\ts += v\n\t\t}\n\t}\n\treturn s\n}\n\nfunc chkErr(err error) {\n\tswitch errors.Cause(err) {\n\tcase nil:\n\tcase context.DeadlineExceeded:\n\t\tfmt.Printf(\"done\\n\")\n\tcase context.Canceled:\n\t\tfmt.Printf(\"canceled\\n\")\n\tdefault:\n\t\tlog.Fatalf(err.Error())\n\t}\n}\n<commit_msg>example: update explorer to wait disconnection complete.<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/currantlabs\/ble\"\n\t\"github.com\/currantlabs\/ble\/examples\/lib\/dev\"\n)\n\nvar (\n\tdevice = flag.String(\"device\", \"default\", \"implementation of ble\")\n\tname = flag.String(\"name\", \"Gopher\", \"name of remote peripheral\")\n\taddr = flag.String(\"addr\", \"\", \"address of remote peripheral (MAC on Linux, UUID on OS X)\")\n\tsub = flag.Duration(\"sub\", 0, \"subscribe to notification and indication for a specified period\")\n\tsd = flag.Duration(\"sd\", 5*time.Second, \"scanning duration, 0 for indefinitely\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\td, err := dev.NewDevice(*device)\n\tif err != nil {\n\t\tlog.Fatalf(\"can't new device : %s\", err)\n\t}\n\tble.SetDefaultDevice(d)\n\n\t\/\/ Default to search device with name of Gopher (or specified by user).\n\tfilter := func(a ble.Advertisement) bool {\n\t\treturn strings.ToUpper(a.LocalName()) == strings.ToUpper(*name)\n\t}\n\n\t\/\/ If addr is specified, search for addr instead.\n\tif len(*addr) != 0 {\n\t\tfilter = func(a ble.Advertisement) bool {\n\t\t\treturn strings.ToUpper(a.Address().String()) == strings.ToUpper(*addr)\n\t\t}\n\t}\n\n\t\/\/ Scan for specified durantion, or until interrupted by user.\n\tfmt.Printf(\"Scanning for %s...\\n\", *sd)\n\tctx := ble.WithSigHandler(context.WithTimeout(context.Background(), *sd))\n\tcln, err := ble.Connect(ctx, filter)\n\tif err != nil {\n\t\tlog.Fatalf(\"can't connect : %s\", err)\n\t}\n\n\t\/\/ Make sure we had the chance to print out the message.\n\tdone := make(chan struct{})\n\t\/\/ Normally, the connection is disconnected by us after our exploration.\n\t\/\/ However, it can be asynchronously disconnected by the remote peripheral.\n\t\/\/ So we wait(detect) the disconnection in the go routine.\n\tgo func() {\n\t\t<-cln.Disconnected()\n\t\tfmt.Printf(\"[ %s ] is disconnected \\n\", cln.Address())\n\t\tclose(done)\n\t}()\n\n\tfmt.Printf(\"Discovering profile...\\n\")\n\tp, err := cln.DiscoverProfile(true)\n\tif err != nil {\n\t\tlog.Fatalf(\"can't discover profile: %s\", err)\n\t}\n\n\t\/\/ Start the exploration.\n\texplore(cln, p)\n\n\t\/\/ Disconnect the connection. (On OS X, this might take a while.)\n\tfmt.Printf(\"Disconnecting [ %s ]... (this might take up to few seconds on OS X)\\n\", cln.Address())\n\tcln.CancelConnection()\n\n\t<-done\n}\n\nfunc explore(cln ble.Client, p *ble.Profile) error {\n\tfor _, s := range p.Services {\n\t\tfmt.Printf(\" Service: %s %s, Handle (0x%02X)\\n\", s.UUID, ble.Name(s.UUID), s.Handle)\n\n\t\tfor _, c := range s.Characteristics {\n\t\t\tfmt.Printf(\" Characteristic: %s %s, Property: 0x%02X (%s), Handle(0x%02X), VHandle(0x%02X)\\n\",\n\t\t\t\tc.UUID, ble.Name(c.UUID), c.Property, propString(c.Property), c.Handle, c.ValueHandle)\n\t\t\tif (c.Property & ble.CharRead) != 0 {\n\t\t\t\tb, err := cln.ReadCharacteristic(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"Failed to read characteristic: %s\\n\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\" Value %x | %q\\n\", b, b)\n\t\t\t}\n\n\t\t\tfor _, d := range c.Descriptors {\n\t\t\t\tfmt.Printf(\" Descriptor: %s %s, Handle(0x%02x)\\n\", d.UUID, ble.Name(d.UUID), d.Handle)\n\t\t\t\tb, err := cln.ReadDescriptor(d)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"Failed to read descriptor: %s\\n\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\" Value %x | %q\\n\", b, b)\n\t\t\t}\n\n\t\t\tif *sub != 0 {\n\t\t\t\t\/\/ Don't bother to subscribe the Service Changed characteristics.\n\t\t\t\tif c.UUID.Equal(ble.ServiceChangedUUID) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t\/\/ Don't touch the Apple-specific Service\/Characteristic.\n\t\t\t\t\/\/ Service: D0611E78BBB44591A5F8487910AE4366\n\t\t\t\t\/\/ Characteristic: 8667556C9A374C9184ED54EE27D90049, Property: 0x18 (WN),\n\t\t\t\t\/\/ Descriptor: 2902, Client Characteristic Configuration\n\t\t\t\t\/\/ Value 0000 | \"\\x00\\x00\"\n\t\t\t\tif c.UUID.Equal(ble.MustParse(\"8667556C9A374C9184ED54EE27D90049\")) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif (c.Property & ble.CharNotify) != 0 {\n\t\t\t\t\tfmt.Printf(\"\\n-- Subscribe to notification for %s --\\n\", *sub)\n\t\t\t\t\th := func(req []byte) { fmt.Printf(\"Notified: %q [ % X ]\\n\", string(req), req) }\n\t\t\t\t\tif err := cln.Subscribe(c, false, h); err != nil {\n\t\t\t\t\t\tlog.Fatalf(\"subscribe failed: %s\", err)\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(*sub)\n\t\t\t\t\tif err := cln.Unsubscribe(c, false); err != nil {\n\t\t\t\t\t\tlog.Fatalf(\"unsubscribe failed: %s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Printf(\"-- Unsubscribe to notification --\\n\")\n\t\t\t\t}\n\t\t\t\tif (c.Property & ble.CharIndicate) != 0 {\n\t\t\t\t\tfmt.Printf(\"\\n-- Subscribe to indication of %s --\\n\", *sub)\n\t\t\t\t\th := func(req []byte) { fmt.Printf(\"Indicated: %q [ % X ]\\n\", string(req), req) }\n\t\t\t\t\tif err := cln.Subscribe(c, true, h); err != nil {\n\t\t\t\t\t\tlog.Fatalf(\"subscribe failed: %s\", err)\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(*sub)\n\t\t\t\t\tif err := cln.Unsubscribe(c, true); err != nil {\n\t\t\t\t\t\tlog.Fatalf(\"unsubscribe failed: %s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Printf(\"-- Unsubscribe to indication --\\n\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t}\n\treturn nil\n}\n\nfunc propString(p ble.Property) string {\n\tvar s string\n\tfor k, v := range map[ble.Property]string{\n\t\tble.CharBroadcast: \"B\",\n\t\tble.CharRead: \"R\",\n\t\tble.CharWriteNR: \"w\",\n\t\tble.CharWrite: \"W\",\n\t\tble.CharNotify: \"N\",\n\t\tble.CharIndicate: \"I\",\n\t\tble.CharSignedWrite: \"S\",\n\t\tble.CharExtended: \"E\",\n\t} {\n\t\tif p&k != 0 {\n\t\t\ts += v\n\t\t}\n\t}\n\treturn s\n}\n\nfunc chkErr(err error) {\n\tswitch errors.Cause(err) {\n\tcase nil:\n\tcase context.DeadlineExceeded:\n\t\tfmt.Printf(\"done\\n\")\n\tcase context.Canceled:\n\t\tfmt.Printf(\"canceled\\n\")\n\tdefault:\n\t\tlog.Fatalf(err.Error())\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gass\n\nimport (\n\t\"testing\"\n)\n\nfunc testFile(t *testing.T, file string) {\n\tif _, err := CompileFile(file, true); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestParser(t *testing.T) {\n\ttestFile(\"test\/parser.gass\", \"test\/parser.css\")\n\ttestFile(\"test\/variables.gass\", \"test\/variables.css\")\n\ttestFile(\"test\/functions.gass\", \"test\/functions.css\")\n}\n\n\/*\nfunc processExpect(path string, info os.FileInfo, err error) error {\n\tif filepath.Ext(path) == \".gass\" && !strings.HasPrefix(path, \"_\") {\n\t\tin, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tout := CompileString(string(in))\n\t\texp, err := ioutil.ReadFile(path[:len(path)-5] + \".expect\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tioutil.WriteFile(path[:len(path)-5]+\".log\", []byte(out), 0)\n\t\tif out != string(exp) {\n\t\t\treturn errors.New(\"mismatch:\" + path)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc TestExpect(t *testing.T) {\n\tif err := filepath.Walk(\"test\/expect\", processExpect); err != nil {\n\t\tt.Error(err)\n\t}\n}\n*\/\n<commit_msg>fix gass_test error<commit_after>package gass\n\nimport (\n\t\"testing\"\n)\n\nfunc testFile(t *testing.T, file string) {\n\tif _, err := CompileFile(file, true); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestParser(t *testing.T) {\n\ttestFile(t, \"test\/parser.gass\")\n\ttestFile(t, \"test\/variables.gass\")\n\ttestFile(t, \"test\/functions.gass\")\n}\n\n\/*\nfunc processExpect(path string, info os.FileInfo, err error) error {\n\tif filepath.Ext(path) == \".gass\" && !strings.HasPrefix(path, \"_\") {\n\t\tin, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tout := CompileString(string(in))\n\t\texp, err := ioutil.ReadFile(path[:len(path)-5] + \".expect\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tioutil.WriteFile(path[:len(path)-5]+\".log\", []byte(out), 0)\n\t\tif out != string(exp) {\n\t\t\treturn errors.New(\"mismatch:\" + path)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc TestExpect(t *testing.T) {\n\tif err := filepath.Walk(\"test\/expect\", processExpect); err != nil {\n\t\tt.Error(err)\n\t}\n}\n*\/\n<|endoftext|>"} {"text":"<commit_before><commit_msg>fix (api): sql: no rows in result set on project.Load (#288)<commit_after><|endoftext|>"} {"text":"<commit_before>package mpb\n\nimport (\n\t\"container\/heap\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/vbauerster\/mpb\/v4\/cwriter\"\n)\n\nconst (\n\t\/\/ default RefreshRate\n\tprr = 120 * time.Millisecond\n\t\/\/ default width\n\tpwidth = 80\n)\n\n\/\/ Progress represents the container that renders Progress bars\ntype Progress struct {\n\tuwg *sync.WaitGroup\n\tcwg *sync.WaitGroup\n\tbwg *sync.WaitGroup\n\toperateState chan func(*pState)\n\tdone chan struct{}\n}\n\ntype pState struct {\n\tbHeap *priorityQueue\n\tshutdownPending []*Bar\n\theapUpdated bool\n\tidCounter int\n\twidth int\n\trr time.Duration\n\tpMatrix map[int][]chan int\n\taMatrix map[int][]chan int\n\tforceRefreshCh chan time.Time\n\toutput io.Writer\n\n\t\/\/ following are provided\/overrided by user\n\tctx context.Context\n\tuwg *sync.WaitGroup\n\tmanualRefreshCh <-chan time.Time\n\tshutdownNotifier chan struct{}\n\twaitBars map[*Bar]*Bar\n\tdebugOut io.Writer\n}\n\n\/\/ New creates new Progress instance, which orchestrates bars rendering\n\/\/ process. Accepts mpb.ContainerOption funcs for customization.\nfunc New(options ...ContainerOption) *Progress {\n\tpq := make(priorityQueue, 0)\n\theap.Init(&pq)\n\n\ts := &pState{\n\t\tctx: context.Background(),\n\t\tbHeap: &pq,\n\t\twidth: pwidth,\n\t\trr: prr,\n\t\twaitBars: make(map[*Bar]*Bar),\n\t\tdebugOut: ioutil.Discard,\n\t\tforceRefreshCh: make(chan time.Time),\n\t\toutput: os.Stdout,\n\t}\n\n\tfor _, opt := range options {\n\t\tif opt != nil {\n\t\t\topt(s)\n\t\t}\n\t}\n\n\tp := &Progress{\n\t\tuwg: s.uwg,\n\t\tcwg: new(sync.WaitGroup),\n\t\tbwg: new(sync.WaitGroup),\n\t\toperateState: make(chan func(*pState)),\n\t\tdone: make(chan struct{}),\n\t}\n\tp.cwg.Add(1)\n\tgo p.serve(s, cwriter.New(s.output))\n\treturn p\n}\n\n\/\/ AddBar creates a new progress bar and adds to the container.\nfunc (p *Progress) AddBar(total int64, options ...BarOption) *Bar {\n\treturn p.Add(total, newDefaultBarFiller(), options...)\n}\n\n\/\/ AddSpinner creates a new spinner bar and adds to the container.\nfunc (p *Progress) AddSpinner(total int64, alignment SpinnerAlignment, options ...BarOption) *Bar {\n\tfiller := &spinnerFiller{\n\t\tframes: defaultSpinnerStyle,\n\t\talignment: alignment,\n\t}\n\treturn p.Add(total, filler, options...)\n}\n\n\/\/ Add creates a bar which renders itself by provided filler.\nfunc (p *Progress) Add(total int64, filler Filler, options ...BarOption) *Bar {\n\tp.bwg.Add(1)\n\tresult := make(chan *Bar)\n\tselect {\n\tcase p.operateState <- func(s *pState) {\n\t\tb := newBar(s.ctx, p.bwg, filler, s.idCounter, s.width, total, options...)\n\t\tif b.runningBar != nil {\n\t\t\ts.waitBars[b.runningBar] = b\n\t\t} else {\n\t\t\theap.Push(s.bHeap, b)\n\t\t\ts.heapUpdated = true\n\t\t}\n\t\ts.idCounter++\n\t\tresult <- b\n\t}:\n\t\treturn <-result\n\tcase <-p.done:\n\t\tp.bwg.Done()\n\t\treturn nil\n\t}\n}\n\n\/\/ Abort is only effective while bar progress is running, it means\n\/\/ remove bar now without waiting for its completion. If bar is already\n\/\/ completed, there is nothing to abort. If you need to remove bar\n\/\/ after completion, use BarRemoveOnComplete BarOption.\nfunc (p *Progress) Abort(b *Bar, remove bool) {\n\tselect {\n\tcase p.operateState <- func(s *pState) {\n\t\tif b.index < 0 {\n\t\t\treturn\n\t\t}\n\t\tif remove {\n\t\t\ts.heapUpdated = heap.Remove(s.bHeap, b.index) != nil\n\t\t}\n\t\ts.shutdownPending = append(s.shutdownPending, b)\n\t}:\n\tcase <-p.done:\n\t}\n}\n\n\/\/ UpdateBarPriority provides a way to change bar's order position.\n\/\/ Zero is highest priority, i.e. bar will be on top.\nfunc (p *Progress) UpdateBarPriority(b *Bar, priority int) {\n\tselect {\n\tcase p.operateState <- func(s *pState) { s.bHeap.update(b, priority) }:\n\tcase <-p.done:\n\t}\n}\n\n\/\/ BarCount returns bars count\nfunc (p *Progress) BarCount() int {\n\tresult := make(chan int, 1)\n\tselect {\n\tcase p.operateState <- func(s *pState) { result <- s.bHeap.Len() }:\n\t\treturn <-result\n\tcase <-p.done:\n\t\treturn 0\n\t}\n}\n\n\/\/ Wait waits far all bars to complete and finally shutdowns container.\n\/\/ After this method has been called, there is no way to reuse *Progress\n\/\/ instance.\nfunc (p *Progress) Wait() {\n\tif p.uwg != nil {\n\t\t\/\/ wait for user wg\n\t\tp.uwg.Wait()\n\t}\n\n\t\/\/ wait for bars to quit, if any\n\tp.bwg.Wait()\n\n\tclose(p.done)\n\n\t\/\/ wait for container to quit\n\tp.cwg.Wait()\n}\n\nfunc (p *Progress) serve(s *pState, cw *cwriter.Writer) {\n\tdefer p.cwg.Done()\n\n\tmanualOrTickCh, cleanUp := s.manualOrTick()\n\tdefer cleanUp()\n\n\trefreshCh := fanInRefreshSrc(p.done, s.forceRefreshCh, manualOrTickCh)\n\n\tfor {\n\t\tselect {\n\t\tcase op := <-p.operateState:\n\t\t\top(s)\n\t\tcase _, ok := <-refreshCh:\n\t\t\tif !ok {\n\t\t\t\tif s.shutdownNotifier != nil {\n\t\t\t\t\tclose(s.shutdownNotifier)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := s.render(p.done, cw); err != nil {\n\t\t\t\tfmt.Fprintf(s.debugOut, \"[mpb] %s %v\\n\", time.Now(), err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *pState) render(done <-chan struct{}, cw *cwriter.Writer) error {\n\tif s.heapUpdated {\n\t\ts.updateSyncMatrix()\n\t\ts.heapUpdated = false\n\t}\n\tsyncWidth(s.pMatrix)\n\tsyncWidth(s.aMatrix)\n\n\ttw, err := cw.GetWidth()\n\tif err != nil {\n\t\ttw = s.width\n\t}\n\tfor i := 0; i < s.bHeap.Len(); i++ {\n\t\tbar := (*s.bHeap)[i]\n\t\tgo bar.render(s.debugOut, tw)\n\t}\n\n\treturn s.flush(done, cw)\n}\n\nfunc (s *pState) flush(done <-chan struct{}, cw *cwriter.Writer) error {\n\tvar lineCount int\n\tfor s.bHeap.Len() > 0 {\n\t\tbar := heap.Pop(s.bHeap).(*Bar)\n\t\tframe := <-bar.bFrameCh\n\t\tdefer func() {\n\t\t\tif frame.toShutdown {\n\t\t\t\t\/\/ force next refresh asap, without waiting for ticker\n\t\t\t\tgo func() {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase s.forceRefreshCh <- time.Now():\n\t\t\t\t\tcase <-done:\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\t\/\/ shutdown at next flush, in other words decrement underlying WaitGroup\n\t\t\t\t\/\/ only after the bar with completed state has been flushed. this\n\t\t\t\t\/\/ ensures no bar ends up with less than 100% rendered.\n\t\t\t\ts.shutdownPending = append(s.shutdownPending, bar)\n\t\t\t\tif replacementBar, ok := s.waitBars[bar]; ok {\n\t\t\t\t\theap.Push(s.bHeap, replacementBar)\n\t\t\t\t\ts.heapUpdated = true\n\t\t\t\t\tdelete(s.waitBars, bar)\n\t\t\t\t}\n\t\t\t\tif frame.removeOnComplete {\n\t\t\t\t\ts.heapUpdated = true\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\theap.Push(s.bHeap, bar)\n\t\t}()\n\t\tcw.ReadFrom(frame.rd)\n\t\tlineCount += frame.extendedLines + 1\n\t}\n\n\tfor i := len(s.shutdownPending) - 1; i >= 0; i-- {\n\t\tclose(s.shutdownPending[i].shutdown)\n\t\ts.shutdownPending = s.shutdownPending[:i]\n\t}\n\n\treturn cw.Flush(lineCount)\n}\n\nfunc (s *pState) manualOrTick() (<-chan time.Time, func()) {\n\tif s.manualRefreshCh != nil {\n\t\treturn s.manualRefreshCh, func() {}\n\t}\n\tticker := time.NewTicker(s.rr)\n\treturn ticker.C, ticker.Stop\n}\n\nfunc (s *pState) updateSyncMatrix() {\n\ts.pMatrix = make(map[int][]chan int)\n\ts.aMatrix = make(map[int][]chan int)\n\tfor i := 0; i < s.bHeap.Len(); i++ {\n\t\tbar := (*s.bHeap)[i]\n\t\ttable := bar.wSyncTable()\n\t\tpRow, aRow := table[0], table[1]\n\n\t\tfor i, ch := range pRow {\n\t\t\ts.pMatrix[i] = append(s.pMatrix[i], ch)\n\t\t}\n\n\t\tfor i, ch := range aRow {\n\t\t\ts.aMatrix[i] = append(s.aMatrix[i], ch)\n\t\t}\n\t}\n}\n\nfunc syncWidth(matrix map[int][]chan int) {\n\tfor _, column := range matrix {\n\t\tcolumn := column\n\t\tgo func() {\n\t\t\tvar maxWidth int\n\t\t\tfor _, ch := range column {\n\t\t\t\tw := <-ch\n\t\t\t\tif w > maxWidth {\n\t\t\t\t\tmaxWidth = w\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, ch := range column {\n\t\t\t\tch <- maxWidth\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc fanInRefreshSrc(done <-chan struct{}, channels ...<-chan time.Time) <-chan time.Time {\n\tvar wg sync.WaitGroup\n\tmultiplexedStream := make(chan time.Time)\n\n\tmultiplex := func(c <-chan time.Time) {\n\t\tdefer wg.Done()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase v := <-c:\n\t\t\t\tmultiplexedStream <- v\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\twg.Add(len(channels))\n\tfor _, c := range channels {\n\t\tgo multiplex(c)\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(multiplexedStream)\n\t}()\n\n\treturn multiplexedStream\n}\n<commit_msg>case bar.done<commit_after>package mpb\n\nimport (\n\t\"container\/heap\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/vbauerster\/mpb\/v4\/cwriter\"\n)\n\nconst (\n\t\/\/ default RefreshRate\n\tprr = 120 * time.Millisecond\n\t\/\/ default width\n\tpwidth = 80\n)\n\n\/\/ Progress represents the container that renders Progress bars\ntype Progress struct {\n\tuwg *sync.WaitGroup\n\tcwg *sync.WaitGroup\n\tbwg *sync.WaitGroup\n\toperateState chan func(*pState)\n\tdone chan struct{}\n}\n\ntype pState struct {\n\tbHeap *priorityQueue\n\tshutdownPending []*Bar\n\theapUpdated bool\n\tidCounter int\n\twidth int\n\trr time.Duration\n\tpMatrix map[int][]chan int\n\taMatrix map[int][]chan int\n\tforceRefreshCh chan time.Time\n\toutput io.Writer\n\n\t\/\/ following are provided\/overrided by user\n\tctx context.Context\n\tuwg *sync.WaitGroup\n\tmanualRefreshCh <-chan time.Time\n\tshutdownNotifier chan struct{}\n\twaitBars map[*Bar]*Bar\n\tdebugOut io.Writer\n}\n\n\/\/ New creates new Progress instance, which orchestrates bars rendering\n\/\/ process. Accepts mpb.ContainerOption funcs for customization.\nfunc New(options ...ContainerOption) *Progress {\n\tpq := make(priorityQueue, 0)\n\theap.Init(&pq)\n\n\ts := &pState{\n\t\tctx: context.Background(),\n\t\tbHeap: &pq,\n\t\twidth: pwidth,\n\t\trr: prr,\n\t\twaitBars: make(map[*Bar]*Bar),\n\t\tdebugOut: ioutil.Discard,\n\t\tforceRefreshCh: make(chan time.Time),\n\t\toutput: os.Stdout,\n\t}\n\n\tfor _, opt := range options {\n\t\tif opt != nil {\n\t\t\topt(s)\n\t\t}\n\t}\n\n\tp := &Progress{\n\t\tuwg: s.uwg,\n\t\tcwg: new(sync.WaitGroup),\n\t\tbwg: new(sync.WaitGroup),\n\t\toperateState: make(chan func(*pState)),\n\t\tdone: make(chan struct{}),\n\t}\n\tp.cwg.Add(1)\n\tgo p.serve(s, cwriter.New(s.output))\n\treturn p\n}\n\n\/\/ AddBar creates a new progress bar and adds to the container.\nfunc (p *Progress) AddBar(total int64, options ...BarOption) *Bar {\n\treturn p.Add(total, newDefaultBarFiller(), options...)\n}\n\n\/\/ AddSpinner creates a new spinner bar and adds to the container.\nfunc (p *Progress) AddSpinner(total int64, alignment SpinnerAlignment, options ...BarOption) *Bar {\n\tfiller := &spinnerFiller{\n\t\tframes: defaultSpinnerStyle,\n\t\talignment: alignment,\n\t}\n\treturn p.Add(total, filler, options...)\n}\n\n\/\/ Add creates a bar which renders itself by provided filler.\nfunc (p *Progress) Add(total int64, filler Filler, options ...BarOption) *Bar {\n\tp.bwg.Add(1)\n\tresult := make(chan *Bar)\n\tselect {\n\tcase p.operateState <- func(s *pState) {\n\t\tb := newBar(s.ctx, p.bwg, filler, s.idCounter, s.width, total, options...)\n\t\tif b.runningBar != nil {\n\t\t\ts.waitBars[b.runningBar] = b\n\t\t} else {\n\t\t\theap.Push(s.bHeap, b)\n\t\t\ts.heapUpdated = true\n\t\t}\n\t\ts.idCounter++\n\t\tresult <- b\n\t}:\n\t\treturn <-result\n\tcase <-p.done:\n\t\tp.bwg.Done()\n\t\treturn nil\n\t}\n}\n\n\/\/ Abort is only effective while bar progress is running, it means\n\/\/ remove bar now without waiting for its completion. If bar is already\n\/\/ completed, there is nothing to abort. If you need to remove bar\n\/\/ after completion, use BarRemoveOnComplete BarOption.\nfunc (p *Progress) Abort(b *Bar, remove bool) {\n\tselect {\n\tcase p.operateState <- func(s *pState) {\n\t\tif b.index < 0 {\n\t\t\treturn\n\t\t}\n\t\tif remove {\n\t\t\ts.heapUpdated = heap.Remove(s.bHeap, b.index) != nil\n\t\t}\n\t\ts.shutdownPending = append(s.shutdownPending, b)\n\t}:\n\tcase <-p.done:\n\t}\n}\n\n\/\/ UpdateBarPriority provides a way to change bar's order position.\n\/\/ Zero is highest priority, i.e. bar will be on top.\nfunc (p *Progress) UpdateBarPriority(b *Bar, priority int) {\n\tselect {\n\tcase p.operateState <- func(s *pState) { s.bHeap.update(b, priority) }:\n\tcase <-p.done:\n\t}\n}\n\n\/\/ BarCount returns bars count\nfunc (p *Progress) BarCount() int {\n\tresult := make(chan int, 1)\n\tselect {\n\tcase p.operateState <- func(s *pState) { result <- s.bHeap.Len() }:\n\t\treturn <-result\n\tcase <-p.done:\n\t\treturn 0\n\t}\n}\n\n\/\/ Wait waits far all bars to complete and finally shutdowns container.\n\/\/ After this method has been called, there is no way to reuse *Progress\n\/\/ instance.\nfunc (p *Progress) Wait() {\n\tif p.uwg != nil {\n\t\t\/\/ wait for user wg\n\t\tp.uwg.Wait()\n\t}\n\n\t\/\/ wait for bars to quit, if any\n\tp.bwg.Wait()\n\n\tclose(p.done)\n\n\t\/\/ wait for container to quit\n\tp.cwg.Wait()\n}\n\nfunc (p *Progress) serve(s *pState, cw *cwriter.Writer) {\n\tdefer p.cwg.Done()\n\n\tmanualOrTickCh, cleanUp := s.manualOrTick()\n\tdefer cleanUp()\n\n\trefreshCh := fanInRefreshSrc(p.done, s.forceRefreshCh, manualOrTickCh)\n\n\tfor {\n\t\tselect {\n\t\tcase op := <-p.operateState:\n\t\t\top(s)\n\t\tcase _, ok := <-refreshCh:\n\t\t\tif !ok {\n\t\t\t\tif s.shutdownNotifier != nil {\n\t\t\t\t\tclose(s.shutdownNotifier)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := s.render(cw); err != nil {\n\t\t\t\tfmt.Fprintf(s.debugOut, \"[mpb] %s %v\\n\", time.Now(), err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *pState) render(cw *cwriter.Writer) error {\n\tif s.heapUpdated {\n\t\ts.updateSyncMatrix()\n\t\ts.heapUpdated = false\n\t}\n\tsyncWidth(s.pMatrix)\n\tsyncWidth(s.aMatrix)\n\n\ttw, err := cw.GetWidth()\n\tif err != nil {\n\t\ttw = s.width\n\t}\n\tfor i := 0; i < s.bHeap.Len(); i++ {\n\t\tbar := (*s.bHeap)[i]\n\t\tgo bar.render(s.debugOut, tw)\n\t}\n\n\treturn s.flush(cw)\n}\n\nfunc (s *pState) flush(cw *cwriter.Writer) error {\n\tvar lineCount int\n\tfor s.bHeap.Len() > 0 {\n\t\tbar := heap.Pop(s.bHeap).(*Bar)\n\t\tframe := <-bar.bFrameCh\n\t\tdefer func() {\n\t\t\tif frame.toShutdown {\n\t\t\t\tgo func() {\n\t\t\t\t\t\/\/ force next refresh, so it will be triggered either by ticker or by\n\t\t\t\t\t\/\/ this goroutine, whichever comes first\n\t\t\t\t\tselect {\n\t\t\t\t\tcase s.forceRefreshCh <- time.Now():\n\t\t\t\t\tcase <-bar.done:\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\t\/\/ shutdown at next flush, in other words decrement underlying WaitGroup\n\t\t\t\t\/\/ only after the bar with completed state has been flushed. this\n\t\t\t\t\/\/ ensures no bar ends up with less than 100% rendered.\n\t\t\t\ts.shutdownPending = append(s.shutdownPending, bar)\n\t\t\t\tif replacementBar, ok := s.waitBars[bar]; ok {\n\t\t\t\t\theap.Push(s.bHeap, replacementBar)\n\t\t\t\t\ts.heapUpdated = true\n\t\t\t\t\tdelete(s.waitBars, bar)\n\t\t\t\t}\n\t\t\t\tif frame.removeOnComplete {\n\t\t\t\t\ts.heapUpdated = true\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\theap.Push(s.bHeap, bar)\n\t\t}()\n\t\tcw.ReadFrom(frame.rd)\n\t\tlineCount += frame.extendedLines + 1\n\t}\n\n\tfor i := len(s.shutdownPending) - 1; i >= 0; i-- {\n\t\tclose(s.shutdownPending[i].shutdown)\n\t\ts.shutdownPending = s.shutdownPending[:i]\n\t}\n\n\treturn cw.Flush(lineCount)\n}\n\nfunc (s *pState) manualOrTick() (<-chan time.Time, func()) {\n\tif s.manualRefreshCh != nil {\n\t\treturn s.manualRefreshCh, func() {}\n\t}\n\tticker := time.NewTicker(s.rr)\n\treturn ticker.C, ticker.Stop\n}\n\nfunc (s *pState) updateSyncMatrix() {\n\ts.pMatrix = make(map[int][]chan int)\n\ts.aMatrix = make(map[int][]chan int)\n\tfor i := 0; i < s.bHeap.Len(); i++ {\n\t\tbar := (*s.bHeap)[i]\n\t\ttable := bar.wSyncTable()\n\t\tpRow, aRow := table[0], table[1]\n\n\t\tfor i, ch := range pRow {\n\t\t\ts.pMatrix[i] = append(s.pMatrix[i], ch)\n\t\t}\n\n\t\tfor i, ch := range aRow {\n\t\t\ts.aMatrix[i] = append(s.aMatrix[i], ch)\n\t\t}\n\t}\n}\n\nfunc syncWidth(matrix map[int][]chan int) {\n\tfor _, column := range matrix {\n\t\tcolumn := column\n\t\tgo func() {\n\t\t\tvar maxWidth int\n\t\t\tfor _, ch := range column {\n\t\t\t\tw := <-ch\n\t\t\t\tif w > maxWidth {\n\t\t\t\t\tmaxWidth = w\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, ch := range column {\n\t\t\t\tch <- maxWidth\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc fanInRefreshSrc(done <-chan struct{}, channels ...<-chan time.Time) <-chan time.Time {\n\tvar wg sync.WaitGroup\n\tmultiplexedStream := make(chan time.Time)\n\n\tmultiplex := func(c <-chan time.Time) {\n\t\tdefer wg.Done()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase v := <-c:\n\t\t\t\tmultiplexedStream <- v\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\twg.Add(len(channels))\n\tfor _, c := range channels {\n\t\tgo multiplex(c)\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(multiplexedStream)\n\t}()\n\n\treturn multiplexedStream\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"database\/sql\"\n\t\"encoding\/xml\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"sync\"\n\n\t\"github.com\/geo-stanciu\/go-utils\/utils\"\n\t\"github.com\/sirupsen\/logrus\"\n\n\t_ \"github.com\/denisenkom\/go-mssqldb\"\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t_ \"github.com\/lib\/pq\"\n\t\/\/_ \"github.com\/mattn\/go-oci8\"\n)\n\nvar (\n\tappName = \"GoExchRates\"\n\tappVersion = \"0.0.0.2\"\n\tlog = logrus.New()\n\taudit = utils.AuditLog{}\n\tdb *sql.DB\n\tdbUtils *utils.DbUtils\n\tconfig = Configuration{}\n)\n\nfunc init() {\n\t\/\/ Log as JSON instead of the default ASCII formatter.\n\tlog.Formatter = new(logrus.JSONFormatter)\n\tlog.Level = logrus.DebugLevel\n\n\tdbUtils = new(utils.DbUtils)\n}\n\n\/\/ Rate - Exchange rate struct\ntype Rate struct {\n\tCurrency string `xml:\"currency,attr\"`\n\tMultiplier string `xml:\"multiplier,attr\"`\n\tRate string `xml:\",chardata\"`\n}\n\n\/\/ Cube - colection of exchange rates\ntype Cube struct {\n\tDate string `xml:\"date,attr\"`\n\tRate []Rate\n}\n\n\/\/ ParseSourceStream - Parse Source Stream\ntype ParseSourceStream func(source io.Reader) error\n\nfunc main() {\n\tvar err error\n\tvar wg sync.WaitGroup\n\n\tcfgFile := \".\/conf.json\"\n\terr = config.ReadFromFile(cfgFile)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\terr = dbUtils.Connect2Database(&db, config.DbType, config.DbURL)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\taudit.SetLogger(appName+\"\/\"+appVersion, log, dbUtils)\n\taudit.SetWaitGroup(&wg)\n\tdefer audit.Close()\n\n\tmw := io.MultiWriter(os.Stdout, audit)\n\tlog.Out = mw\n\n\terr = prepareCurrencies()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tif len(os.Args) >= 2 {\n\t\terr = getStreamFromFile(os.Args[1], parseXMLSource)\n\t} else {\n\t\terr = getStreamFromURL(config.RatesXMLUrl, parseXMLSource)\n\t}\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\taudit.Log(nil, \"import exchange rates\", \"Import done.\")\n\twg.Wait()\n}\n\nfunc getStreamFromURL(url string, callback ParseSourceStream) error {\n\tresponse, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\n\terr = callback(response.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc getStreamFromFile(filename string, callback ParseSourceStream) error {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\terr = callback(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc parseXMLSource(source io.Reader) error {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\n\tdecoder := xml.NewDecoder(source)\n\n\tfor {\n\t\tt, _ := decoder.Token()\n\t\tif t == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tswitch se := t.(type) {\n\t\tcase xml.StartElement:\n\t\t\tif se.Name.Local == \"Cube\" {\n\t\t\t\tvar cube Cube\n\t\t\t\tdecoder.DecodeElement(&cube, &se)\n\n\t\t\t\terr := storeRates(tx, cube)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\ttx.Commit()\n\n\treturn nil\n}\n\nfunc addCurrencyIfNotExists(tx *sql.Tx, currency string) (int32, error) {\n\tvar currencyID int32\n\n\tpq := dbUtils.PQuery(`\n\t\tSELECT currency_id FROM currency WHERE currency = ?\n\t`, currency)\n\n\terr := tx.QueryRow(pq.Query, pq.Args...).Scan(¤cyID)\n\tif err != nil && err != sql.ErrNoRows {\n\t\treturn -1, err\n\t} else if currencyID > 0 {\n\t\treturn currencyID, nil\n\t}\n\n\tpq = dbUtils.PQuery(`\n\t\tINSERT INTO currency (currency) VALUES (?)\n\t`, currency)\n\n\t_, err = dbUtils.ExecTx(tx, pq)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\treturn addCurrencyIfNotExists(tx, currency)\n}\n\nfunc prepareCurrencies() error {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\n\trefCurrencyID, err := addCurrencyIfNotExists(tx, \"RON\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = addCurrencyIfNotExists(tx, \"EUR\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = addCurrencyIfNotExists(tx, \"USD\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = addCurrencyIfNotExists(tx, \"CHF\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = storeRate(tx, \"1970-01-01\", refCurrencyID, \"RON\", 1.0, 1.0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttx.Commit()\n\n\treturn nil\n}\n\nfunc storeRates(tx *sql.Tx, cube Cube) error {\n\tvar err error\n\tvar refCurrencyID int32\n\n\taudit.Log(nil, \"import exchange rates\", \"Importing exchange rates...\", \"data\", cube.Date)\n\n\tpq := dbUtils.PQuery(`\n\t\tSELECT currency_id FROM currency WHERE currency = ?\n\t`, \"RON\")\n\n\terr = tx.QueryRow(pq.Query, pq.Args...).Scan(&refCurrencyID)\n\n\tswitch {\n\tcase err == sql.ErrNoRows:\n\t\treturn nil\n\tcase err != nil:\n\t\treturn err\n\t}\n\n\tfor _, rate := range cube.Rate {\n\t\tmultiplier := 1.0\n\t\texchRate := 1.0\n\n\t\tif rate.Multiplier == \"-\" {\n\t\t\tmultiplier = 1.0\n\t\t} else if len(rate.Multiplier) > 0 {\n\t\t\tmultiplier, err = strconv.ParseFloat(rate.Multiplier, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif rate.Rate == \"-\" {\n\t\t\tcontinue\n\t\t} else {\n\t\t\texchRate, err = strconv.ParseFloat(rate.Rate, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\terr = storeRate(tx, cube.Date, refCurrencyID, rate.Currency, multiplier, exchRate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc storeRate(tx *sql.Tx, date string, refCurrencyID int32, currency string, multiplier float64, exchRate float64) error {\n\tvar found bool\n\tvar currencyID int32\n\tvar err error\n\trate := exchRate \/ multiplier\n\n\tif config.AddMissingCurrencies {\n\t\tcurrencyID, err = addCurrencyIfNotExists(tx, currency)\n\t} else {\n\t\t\/\/ get\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpq := dbUtils.PQuery(`\n\t\tSELECT CASE WHEN EXISTS (\n\t\t\tSELECT 1\n\t\t\tFROM exchange_rate \n\t\t WHERE currency_id = ? \n\t\t AND exchange_date = DATE ?\n\t\t) THEN 1 ELSE 0 END\n\t\tFROM dual\n\t`, currencyID,\n\t\tdate)\n\n\terr = tx.QueryRow(pq.Query, pq.Args...).Scan(&found)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !found {\n\t\tpq = dbUtils.PQuery(`\n\t\t\tINSERT INTO exchange_rate (\n\t\t\t\treference_currency_id,\n\t\t\t\tcurrency_id,\n\t\t\t\texchange_date,\n\t\t\t\trate\n\t\t\t)\n\t\t\tVALUES (?, ?, DATE ?, ?)\n\t\t`, refCurrencyID,\n\t\t\tcurrencyID,\n\t\t\tdate,\n\t\t\trate)\n\n\t\t_, err = dbUtils.ExecTx(tx, pq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\taudit.Log(nil,\n\t\t\t\"import exchange rates\",\n\t\t\t\"add exchange rate\",\n\t\t\t\"data\", date,\n\t\t\t\"currency\", currency,\n\t\t\t\"rate\", rate)\n\t}\n\n\treturn nil\n}\n<commit_msg>finish with getting new currencies in get exchange rates<commit_after>package main\n\nimport (\n\t\"database\/sql\"\n\t\"encoding\/xml\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"sync\"\n\n\t\"github.com\/geo-stanciu\/go-utils\/utils\"\n\t\"github.com\/sirupsen\/logrus\"\n\n\t_ \"github.com\/denisenkom\/go-mssqldb\"\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t_ \"github.com\/lib\/pq\"\n\t\/\/_ \"github.com\/mattn\/go-oci8\"\n)\n\nvar (\n\tappName = \"GoExchRates\"\n\tappVersion = \"0.0.0.2\"\n\tlog = logrus.New()\n\taudit = utils.AuditLog{}\n\tdb *sql.DB\n\tdbUtils *utils.DbUtils\n\tconfig = Configuration{}\n)\n\nfunc init() {\n\t\/\/ Log as JSON instead of the default ASCII formatter.\n\tlog.Formatter = new(logrus.JSONFormatter)\n\tlog.Level = logrus.DebugLevel\n\n\tdbUtils = new(utils.DbUtils)\n}\n\n\/\/ Rate - Exchange rate struct\ntype Rate struct {\n\tCurrency string `xml:\"currency,attr\"`\n\tMultiplier string `xml:\"multiplier,attr\"`\n\tRate string `xml:\",chardata\"`\n}\n\n\/\/ Cube - colection of exchange rates\ntype Cube struct {\n\tDate string `xml:\"date,attr\"`\n\tRate []Rate\n}\n\n\/\/ ParseSourceStream - Parse Source Stream\ntype ParseSourceStream func(source io.Reader) error\n\nfunc main() {\n\tvar err error\n\tvar wg sync.WaitGroup\n\n\tcfgFile := \".\/conf.json\"\n\terr = config.ReadFromFile(cfgFile)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\terr = dbUtils.Connect2Database(&db, config.DbType, config.DbURL)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\taudit.SetLogger(appName+\"\/\"+appVersion, log, dbUtils)\n\taudit.SetWaitGroup(&wg)\n\tdefer audit.Close()\n\n\tmw := io.MultiWriter(os.Stdout, audit)\n\tlog.Out = mw\n\n\terr = prepareCurrencies()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tif len(os.Args) >= 2 {\n\t\terr = getStreamFromFile(os.Args[1], parseXMLSource)\n\t} else {\n\t\terr = getStreamFromURL(config.RatesXMLUrl, parseXMLSource)\n\t}\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\taudit.Log(nil, \"import exchange rates\", \"Import done.\")\n\twg.Wait()\n}\n\nfunc getStreamFromURL(url string, callback ParseSourceStream) error {\n\tresponse, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\n\terr = callback(response.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc getStreamFromFile(filename string, callback ParseSourceStream) error {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\terr = callback(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc parseXMLSource(source io.Reader) error {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\n\tdecoder := xml.NewDecoder(source)\n\n\tfor {\n\t\tt, _ := decoder.Token()\n\t\tif t == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tswitch se := t.(type) {\n\t\tcase xml.StartElement:\n\t\t\tif se.Name.Local == \"Cube\" {\n\t\t\t\tvar cube Cube\n\t\t\t\tdecoder.DecodeElement(&cube, &se)\n\n\t\t\t\terr := storeRates(tx, cube)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\ttx.Commit()\n\n\treturn nil\n}\n\nfunc getCurrencyIfExists(tx *sql.Tx, currency string) (int32, error) {\n\tvar currencyID int32\n\n\tpq := dbUtils.PQuery(`\n\t\tSELECT currency_id FROM currency WHERE currency = ?\n\t`, currency)\n\n\terr := tx.QueryRow(pq.Query, pq.Args...).Scan(¤cyID)\n\tif err != nil && err != sql.ErrNoRows {\n\t\treturn -1, err\n\t}\n\treturn currencyID, nil\n}\n\nfunc addCurrencyIfNotExists(tx *sql.Tx, currency string) (int32, error) {\n\tcurrencyID, err := getCurrencyIfExists(tx, currency)\n\tif err != nil {\n\t\treturn -1, err\n\t} else if currencyID > 0 {\n\t\treturn currencyID, nil\n\t}\n\n\tpq := dbUtils.PQuery(`\n\t\tINSERT INTO currency (currency) VALUES (?)\n\t`, currency)\n\n\t_, err = dbUtils.ExecTx(tx, pq)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\taudit.Log(nil, \"add currency\", \"Adding missing currency...\", \"currency\", currency)\n\n\treturn addCurrencyIfNotExists(tx, currency)\n}\n\nfunc prepareCurrencies() error {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\n\trefCurrencyID, err := addCurrencyIfNotExists(tx, \"RON\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = addCurrencyIfNotExists(tx, \"EUR\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = addCurrencyIfNotExists(tx, \"USD\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = addCurrencyIfNotExists(tx, \"CHF\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = storeRate(tx, \"1970-01-01\", refCurrencyID, \"RON\", 1.0, 1.0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttx.Commit()\n\n\treturn nil\n}\n\nfunc storeRates(tx *sql.Tx, cube Cube) error {\n\tvar err error\n\tvar refCurrencyID int32\n\n\taudit.Log(nil, \"exchange rates\", \"Importing exchange rates...\", \"date\", cube.Date)\n\n\trefCurrencyID, err = addCurrencyIfNotExists(tx, \"RON\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, rate := range cube.Rate {\n\t\tmultiplier := 1.0\n\t\texchRate := 1.0\n\n\t\tif rate.Multiplier == \"-\" {\n\t\t\tmultiplier = 1.0\n\t\t} else if len(rate.Multiplier) > 0 {\n\t\t\tmultiplier, err = strconv.ParseFloat(rate.Multiplier, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif rate.Rate == \"-\" {\n\t\t\tcontinue\n\t\t} else {\n\t\t\texchRate, err = strconv.ParseFloat(rate.Rate, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\terr = storeRate(tx, cube.Date, refCurrencyID, rate.Currency, multiplier, exchRate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc storeRate(tx *sql.Tx, date string, refCurrencyID int32, currency string, multiplier float64, exchRate float64) error {\n\tvar found bool\n\tvar currencyID int32\n\tvar err error\n\trate := exchRate \/ multiplier\n\n\tif config.AddMissingCurrencies {\n\t\tcurrencyID, err = addCurrencyIfNotExists(tx, currency)\n\t} else {\n\t\tcurrencyID, err = getCurrencyIfExists(tx, currency)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpq := dbUtils.PQuery(`\n\t\tSELECT CASE WHEN EXISTS (\n\t\t\tSELECT 1\n\t\t\tFROM exchange_rate \n\t\t WHERE currency_id = ? \n\t\t AND exchange_date = DATE ?\n\t\t) THEN 1 ELSE 0 END\n\t\tFROM dual\n\t`, currencyID,\n\t\tdate)\n\n\terr = tx.QueryRow(pq.Query, pq.Args...).Scan(&found)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !found {\n\t\tpq = dbUtils.PQuery(`\n\t\t\tINSERT INTO exchange_rate (\n\t\t\t\treference_currency_id,\n\t\t\t\tcurrency_id,\n\t\t\t\texchange_date,\n\t\t\t\trate\n\t\t\t)\n\t\t\tVALUES (?, ?, DATE ?, ?)\n\t\t`, refCurrencyID,\n\t\t\tcurrencyID,\n\t\t\tdate,\n\t\t\trate)\n\n\t\t_, err = dbUtils.ExecTx(tx, pq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\taudit.Log(nil,\n\t\t\t\"add exchange rate\",\n\t\t\t\"added value\",\n\t\t\t\"data\", date,\n\t\t\t\"currency\", currency,\n\t\t\t\"rate\", rate)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"math\/rand\"\n\t\"time\"\n\n\t\"github.com\/vbauerster\/mpb\"\n)\n\nfunc main() {\n\t\/\/ Star mpb's rendering goroutine.\n\t\/\/ If you don't plan to cancel, feed with nil\n\t\/\/ otherwise provide context.Context, see cancel example\n\tp := mpb.New(nil)\n\t\/\/ Set custom width for every bar, which mpb will contain\n\t\/\/ The default one in 70\n\tp.SetWidth(80)\n\t\/\/ Set custom format for every bar, the default one is \"[=>-]\"\n\tp.Format(\"╢▌▌░╟\")\n\t\/\/ Set custom refresh rate, the default one is 100 ms\n\tp.RefreshRate(120 * time.Millisecond)\n\n\t\/\/ Add a bar. You're not limited to just one bar, add many if you need.\n\tbar := p.AddBar(100).PrependName(\"Single Bar:\", 0).AppendPercentage()\n\n\tfor i := 0; i < 100; i++ {\n\t\tbar.Incr(1) \/\/ increment progress bar\n\t\ttime.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond)\n\t}\n\n\t\/\/ Don't forget to stop mpb's rendering goroutine\n\tp.Stop()\n\n\t\/\/ You cannot add bars after p.Stop() has been called\n\t\/\/ p.AddBar(100) \/\/ will panic\n}\n<commit_msg>refactor: singleBar<commit_after>package main\n\nimport (\n\t\"math\/rand\"\n\t\"time\"\n\n\t\"github.com\/vbauerster\/mpb\"\n)\n\nfunc main() {\n\t\/\/ Star mpb's rendering goroutine.\n\t\/\/ If you don't plan to cancel, feed with nil\n\t\/\/ otherwise provide context.Context, see cancel example\n\tp := mpb.New(nil)\n\t\/\/ Set custom width for every bar, which mpb will contain\n\t\/\/ The default one in 70\n\tp.SetWidth(80)\n\t\/\/ Set custom format for every bar, the default one is \"[=>-]\"\n\tp.Format(\"╢▌▌░╟\")\n\t\/\/ Set custom refresh rate, the default one is 100 ms\n\tp.RefreshRate(120 * time.Millisecond)\n\n\t\/\/ Add a bar. You're not limited to just one bar, add many if you need.\n\tbar := p.AddBar(100).PrependName(\"Single Bar:\", 0, 0).AppendPercentage(5, 0)\n\n\tfor i := 0; i < 100; i++ {\n\t\tbar.Incr(1) \/\/ increment progress bar\n\t\ttime.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond)\n\t}\n\n\t\/\/ Don't forget to stop mpb's rendering goroutine\n\tp.Stop()\n\n\t\/\/ You cannot add bars after p.Stop() has been called\n\t\/\/ p.AddBar(100) \/\/ will panic\n}\n<|endoftext|>"} {"text":"<commit_before>package main\nimport (\n\t\"net\"\n\t\"crypto\/rc4\"\n\t\"fmt\"\n)\n\n\/\/ PWClient объект игрового клиента низкого уровня\ntype PWClient struct {\n\tlogin\t\t\tstring\n\tpassword\t\tstring\n\tuid \t\t\t[]byte\n\tuid2\t\t\t[]byte\n\ttoken \t\t\t[]byte\n\tconn \t\t\tnet.Conn\n\tkey \t\t\t[]byte\n\tisLoginCompleted\tbool\n\tloginResult\t\t\tbool\n\trc4encode\t\t\t[]byte\n\trc4decode\t\t\t[]byte\n\trccipherenc\t\t\t*rc4.Cipher\n\trccipherdec\t\t\t*rc4.Cipher\n\tmppc\t\t\t\t*MPPC\n\taccountkey\t\t\t[]byte\n\tunkIDOnlineAnnounce []byte\n\tunkDataOnlineAnnounce []byte\n}\n\n\/\/ newPWClient создает новый игровой клиент низкого уровня\nfunc newPWClient() *PWClient {\n\tpw := &PWClient{}\n\treturn pw\n}\n\n\/\/ Connect метод подключается к игровому серверу\nfunc (pw *PWClient) Connect(server, login, pass string) error {\n\tpw.login = login\n\tpw.password = pass\n\n\t\/\/ отправить запрос серверу mail.ru и получить уиды и токен\n\tuid,uid2,token,err := getLoginPass(login, pass)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpw.uid = []byte(uid)\n\tpw.uid2 = []byte(uid2)\n\tpw.token = []byte(token)\n\n\tconnectViaSocks := 0\t\/\/ флаг определяющий соединенеие с игровым сервером (напрямую, через прокси-сокет)\n\tvar conn net.Conn\n\tif connectViaSocks == 1 {\n\t\tfmt.Println(\"Connect to socks.\")\n\t\tconn, err = dialSocks4(SOCKS4, \"localhost:30000\", server)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"socks error:\", err)\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tconn, err = net.Dial(\"tcp\", server)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"dial error:\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tpw.conn = conn\n\n\tgo getFromServer(pw)\n\n\treturn nil\n\n}<commit_msg>Добавлены обработчики приема и отправки данных между ботом и сервером Добавлены обработчики пакетов 1, 2, 4 и 8f<commit_after>package main\nimport (\n\t\"net\"\n\t\"crypto\/rc4\"\n\t\"fmt\"\n\t\"strings\"\n\t\"errors\"\n\t\"net\/url\"\n\t\"net\/http\"\n\t\"bytes\"\n\t\"strconv\"\n\t\"io\/ioutil\"\n\t\"encoding\/xml\"\n\t\"encoding\/binary\"\n\t\"io\"\n)\n\n\/\/ PWClient объект игрового клиента низкого уровня\ntype PWClient struct {\n\tlogin\t\t\tstring\n\tpassword\t\tstring\n\tuid \t\t\t[]byte\n\tuid2\t\t\t[]byte\n\ttoken \t\t\t[]byte\n\tconn \t\t\tnet.Conn\n\tkey \t\t\t[]byte\n\tisLoginCompleted\tbool\n\tloginResult\t\t\tbool\n\trc4encode\t\t\t[]byte\n\trc4decode\t\t\t[]byte\n\trccipherenc\t\t\t*rc4.Cipher\n\trccipherdec\t\t\t*rc4.Cipher\n\tmppc\t\t\t\t*MPPC\n\taccountkey\t\t\t[]byte\n\tunkIDOnlineAnnounce []byte\n\tunkDataOnlineAnnounce []byte\n}\n\n\/\/ newPWClient создает новый игровой клиент низкого уровня\nfunc newPWClient() *PWClient {\n\tpw := &PWClient{}\n\treturn pw\n}\n\n\/\/ Connect метод подключается к игровому серверу\nfunc (pw *PWClient) Connect(server, login, pass string) error {\n\tpw.login = login\n\tpw.password = pass\n\n\t\/\/ отправить запрос серверу mail.ru и получить уиды и токен\n\tuid,uid2,token,err := getLoginAndPass(login, pass)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpw.uid = []byte(uid)\n\tpw.uid2 = []byte(uid2)\n\tpw.token = []byte(token)\n\n\tconnectViaSocks := 0\t\/\/ флаг определяющий соединенеие с игровым сервером (напрямую, через прокси-сокет)\n\tvar conn net.Conn\n\tif connectViaSocks == 1 {\n\t\tfmt.Println(\"Connect to socks.\")\n\t\tconn, err = dialSocks4(SOCKS4, \"localhost:30000\", server)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"socks error:\", err)\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tconn, err = net.Dial(\"tcp\", server)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"dial error:\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tpw.conn = conn\n\n\tgo getFromServer(pw)\n\n\treturn nil\n\n}\n\n\/\/ getLoginAndPass функция отправляет запрос на сайт mail.ru и получает уиды и токен для авторизации на игровом сайте\nfunc getLoginAndPass(email, pass string) (string, string, string, error) {\n\tsplit := strings.Split(email, \"@\")\n\tif len(split) < 2 {\n\t\treturn \"\", \"\", \"\", errors.New(\"Неправильный e-mail.\")\n\t}\n\n\tdomain := split[1]\n\n\tmailDomains := []string{\"mail.ru\", \"inbox.ru\", \"bk.ru\", \"list.ru\"}\n\tfoundMailDomains := false\n\tfor _, v := range mailDomains {\n\t\tif v == domain {\n\t\t\tfoundMailDomains = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tvar uid, uid2, token string\n\n\tif foundMailDomains == true {\n\t\tfmt.Println(\"Found mail domain\")\n\/\/\t\tTODO обработка ситуации, когда e-mail заведен в mail.ru\n\t} else {\n\t\tapiURL := \"http:\/\/authdl.mail.ru\"\n\t\tresource := \"\/sz.php\"\n\t\tdata := url.Values{}\n\t\tparams := fmt.Sprintf(`<?xml version=\"1.0\" encoding=\"UTF-8\"?><AutoLogin ProjectId=\"61\" SubProjectId=\"0\" ShardId=\"0\" Username=\"%s\" Password=\"%s\"\/>`, email, pass)\n\t\tu, _ := url.ParseRequestURI(apiURL)\n\t\tu.Path = resource\n\t\turlStr := fmt.Sprintf(\"%v\", u)\n\n\t\tclient := &http.Client{}\n\t\tr, _ := http.NewRequest(\"POST\", urlStr, bytes.NewBufferString(params)) \/\/ <-- URL-encoded payload\n\t\tr.Header.Add(\"User-Agent\", \"Downloader\/4260\")\n\t\tr.Header.Add(\"Content-Length\", strconv.Itoa(len(data.Encode())))\n\t\tr.Header.Add(\"Accept-Encoding\", \"identity\")\n\t\tresp, _ := client.Do(r)\n\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", \"\", err\n\t\t}\n\n\t\ttype AuthPers struct {\n\t\t\tXMLName xml.Name `xml:\"AutoLogin\"`\n\t\t\tUID2 string `xml:\"PersId,attr\"`\n\t\t\tToken string `xml:\"Key,attr\"`\n\t\t}\n\t\tvar q AuthPers\n\t\txml.Unmarshal(body, &q)\n\t\tuid2 = q.UID2\n\t\ttoken = q.Token\n\n\t\t\/\/\n\t\tparams = fmt.Sprintf(`<?xml version=\"1.0\" encoding=\"UTF-8\"?><PersList ProjectId=\"61\" SubProjectId=\"0\" ShardId=\"0\" Username=\"%s\" Password=\"%s\"\/>`, email, pass)\n\t\tr, _ = http.NewRequest(\"POST\", urlStr, bytes.NewBufferString(params)) \/\/ <-- URL-encoded payload\n\t\tr.Header.Add(\"User-Agent\", \"Downloader\/4260\")\n\t\tr.Header.Add(\"Content-Length\", strconv.Itoa(len(data.Encode())))\n\t\tr.Header.Add(\"Accept-Encoding\", \"identity\")\n\t\tresp, _ = client.Do(r)\n\n\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t\t\/\/fmt.Println(string(body))\n\n\t\ttype Pers struct {\n\t\t\tXMLName xml.Name `xml:\"Pers\"`\n\t\t\tID string `xml:\"Id,attr\"`\n\t\t\tTitle string `xml:\"Title,attr\"`\n\t\t\tCli string `xml:\"Cli,attr\"`\n\t\t}\n\n\t\ttype PersList struct {\n\t\t\tXMLName xml.Name `xml:\"PersList\"`\n\t\t\tPersID string `xml:\"PersId,attr\"`\n\t\t\tPersList []Pers `xml:\"Pers\"`\n\t\t}\n\n\t\tvar q1 PersList\n\t\txml.Unmarshal(body, &q1)\n\t\tif len(q1.PersList) == 0 {\n\t\t\treturn \"\", \"\", \"\", errors.New(\"У учетной записи нет игровых аккаунтов.\")\n\t\t}\n\t\tuid = q1.PersList[0].ID\n\n\t}\n\n\t\/\/fmt.Println(\"Ответ от сервера\", uid, uid2, token)\n\treturn uid, uid2, token, nil\n}\n\n\/\/ getFromServer Обработчик данных от сервера. Получает данные и вызывает обработчики пакетов\nfunc getFromServer(pwclient *PWClient) {\n\t\/\/ если выходим, то закрываем соединение\n\tdefer pwclient.conn.Close()\n\n\tconn := pwclient.conn\n\tinBuf := make([]byte, 1024)\n\tfor {\n\n\t\tn, err := conn.Read(inBuf[0:])\n\t\tif err != nil && err != io.EOF {\n\t\t\tpanic(fmt.Sprintf(\"Ошибка при получении данных из соединения. %v\", err))\n\t\t}\n\t\tif err == io.EOF {\n\t\t\tpanic(fmt.Sprintf(\"Ошибка при получении данных из соединения. %v. bufer = %x\", err, inBuf))\n\t\t}\n\t\tif n < 2 {\n\t\t\tpanic(fmt.Sprintf(\"Пакет 0-ой длины.\"))\n\t\t}\n\t\tbuf := make([]byte, n)\n\t\tcopy(buf, inBuf)\n\n\t\tif pwclient.isLoginCompleted {\n\t\t\tfmt.Printf(\"До дешифрования (получение). Длина буфера = %d, [% X]\\n\", len(buf), buf)\n\t\t\tbufsrc := make([]byte, len(buf))\n\t\t\tcopy(bufsrc, buf)\n\t\t\tpwclient.rccipherdec.XORKeyStream(buf, bufsrc)\n\t\t\tfmt.Printf(\"После дешифрования (получение). Длина буфера = %d, [% X]\\n\", len(buf), buf)\n\t\t\tbufsrc = make([]byte, len(buf))\n\t\t\tcopy(bufsrc, buf)\n\t\t\tbuf = pwclient.mppc.Unpack(bufsrc)\n\t\t\tfmt.Printf(\"После распаковки. Длина буфера = %d, [% X]\\n\", len(buf), buf)\n\n\t\t}\n\n\t\treader := bytes.NewBuffer(buf)\n\t\tvar firstbyte, secondbyte byte\n\t\tvar kod, length uint16\n\n\t\tfirstbyte, _ = reader.ReadByte()\n\t\tfmt.Printf(\"firstbyte (1) = %x\\n\", firstbyte)\n\t\tvar src []byte\n\t\tif firstbyte >= 0x80 {\n\t\t\tfirstbyte -= 0x80\n\t\t\tfmt.Printf(\"firstbyte (2) = %x\\n\", firstbyte)\n\t\t\tsecondbyte, _ = reader.ReadByte()\n\t\t\tfmt.Printf(\"secondbyte = %x\\n\", secondbyte)\n\t\t\tsrc = append(src, firstbyte, secondbyte)\n\t\t\tkod = binary.BigEndian.Uint16(src)\n\t\t} else {\n\t\t\tkod = uint16(firstbyte)\n\t\t}\n\n\t\tfmt.Printf(\"kod = %x\\n\", kod)\n\n\t\t\/\/kod := buf[0] \/\/ код пакета\n\t\t\/\/var length uint16\n\n\t\tfirstbyte = 0\n\t\tsecondbyte = 0\n\n\t\tsrc = make([]byte, 0)\n\t\tfirstbyte, _ = reader.ReadByte()\n\t\tif firstbyte >= 0x80 {\n\t\t\tfirstbyte -= 0x80\n\t\t\tsecondbyte, _ = reader.ReadByte()\n\t\t\tsrc = append(src, firstbyte, secondbyte)\n\t\t\tlength = binary.BigEndian.Uint16(src)\n\t\t} else {\n\t\t\tlength = uint16(firstbyte)\n\t\t}\n\n\t\t\/\/if kod == 1 || kod == 2 || kod == 4 {\n\t\t\/\/\tlength = int(buf[1]) + 2 \/\/ длина пакета\n\n\t\tif int(length) < reader.Len() {\n\t\t\tpanic(fmt.Sprintf(\"Значение длины пакета больше, чем фактическая длина пакета.\"))\n\t\t}\n\t\t\/\/}\n\n\t\tswitch kod {\n\t\tcase 1: \/\/ 0x01 ServerInfo\n\t\t\tfmt.Println(\"Код 1. ServerInfo\", buf)\n\t\t\tpacketServerInfo(pwclient, reader.Bytes())\n\n\t\tcase 2:\n\t\t\tfmt.Println(\"Код 2. SMKey\", buf)\n\t\t\tpacketSMKey(pwclient, reader.Bytes())\n\t\tcase 4:\n\t\t\tfmt.Println(\"Код 4. OnlineAnnounce\", buf)\n\t\t\tpacketOnlineAnnounce(pwclient, reader.Bytes())\n\t\tcase 0x8f:\n\t\t\tfmt.Println(\"Код 0x8f. LastLogin\", buf)\n\t\tcase 0x53:\n\t\t\tfmt.Println(\"Код 0x53. LastLogin\", buf)\n\t\t\t\/\/packetRoleListRe(pwclient, reader.Bytes())\n\t\tdefault:\n\t\t\tfmt.Println(\"default\", kod, reader.Len(), reader.Bytes())\n\t\t}\n\n\t}\n}\n\nfunc sendToServer(pwclient *PWClient, pkt []byte) error {\n\tconn := pwclient.conn\n\tfmt.Printf(\"До шифрования (отправка). [% X]\\n\", pkt)\n\tif pwclient.isLoginCompleted {\n\t\tpktsrc := pkt\n\t\tpwclient.rccipherenc.XORKeyStream(pkt, pktsrc)\n\t\tfmt.Printf(\"После шифрования (отправка). [% X]\\n\", pkt)\n\t}\n\t_, err := conn.Write(pkt)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn nil\n}\n\n\/\/ packetServerInfo обработчик пакета ServerInfo\nfunc packetServerInfo(pwclient *PWClient, pkt []byte) {\n\tif pwclient.key != nil {\n\t\treturn\n\t}\n\tkeylen := pkt[0]\n\tkey := pkt[1:keylen]\n\tpwclient.key = key\n\tfmt.Printf(\"%v, %v, % X\\n\", pkt, key, key)\n\tsendLogginAnnounce(pwclient)\n}\n\n\/\/ sendLoggingAnnounce Подготовка пакета LoggingAnnounce для отправки серверу\nfunc sendLogginAnnounce(pwclient *PWClient) {\n\thash := pwclient.token\n\n\t\/\/\thmacsha256 := hmac.New(sha256.New, pwclient.key)\n\t\/\/\thmacsha256.Write(append(pwclient.uid, pwclient.uid2...))\n\t\/\/\tresult1 := hmacsha256.Sum(nil)\n\t\/\/\tresult2 := hex.EncodeToString(result1)\n\t\/\/\tresult3 := []byte(result2)\n\t\/\/\thash := result3\n\t\/\/\tfmt.Printf(\"[% X]\\n\", pwclient.token)\n\t\/\/\tfmt.Printf(\"[% X]\\n\", hash)\n\n\tuid := pwclient.uid\n\n\tvar data []byte\n\tdata = append(data, uint8ToSlicebyte(uint8(len(uid)))...)\n\tdata = append(data, []byte(uid)...)\n\tdata = append(data, uint8ToSlicebyte(uint8(len(hash)))...)\n\tdata = append(data, hash...)\n\tdata = append(data, 0x02, 0x04, 0xFF, 0xFF, 0xFF, 0xFF)\n\n\tvar pkt []byte\n\tpkt = append(pkt, byte(3))\n\tpkt = append(pkt, uint8ToSlicebyte(uint8(len(data)))...)\n\tpkt = append(pkt, data...)\n\n\terr := sendToServer(pwclient, pkt)\n\tif err != nil {\n\n\t}\n}\n\nfunc packetSMKey(pwclient *PWClient, pkt []byte) {\n\tfmt.Println(pkt)\n\tencHashLen := pkt[0]\n\tencHash := pkt[1 : encHashLen+1]\n\tforce := pkt[encHashLen+1]\n\tfmt.Printf(\"%v\\n\", pkt)\n\tfmt.Printf(\"%v, %v, %v\\n\", encHashLen, encHash, force)\n\n\tpwclient.isLoginCompleted = true\n\tpwclient.loginResult = true\n\n\t\/\/ дек хэш\n\tdecHash, err := randomNextBytes(16)\n\t\/\/decHash := []byte{0x4D, 0x64, 0x6E, 0xD2, 0xCF, 0x4B, 0xB1, 0x5B, 0x7B, 0xB3, 0x70, 0x7B, 0x46, 0x10, 0x5C, 0xEB}\n\t\/\/err := error(nil)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"error: %v\", err))\n\t}\n\n\trc4encode := getRC4Key(encHash, pwclient.uid, pwclient.token)\n\trc4decode := getRC4Key(decHash, pwclient.uid, pwclient.token)\n\n\tpwclient.rc4encode = rc4encode\n\tpwclient.rc4decode = rc4decode\n\n\trccipher, _ := rc4.NewCipher(rc4encode)\n\tpwclient.rccipherenc = rccipher\n\trccipher, _ = rc4.NewCipher(rc4decode)\n\tpwclient.rccipherdec = rccipher\n\n\tmppc := newMPPC()\n\tpwclient.mppc = mppc\n\n\t\/\/fmt.Printf(\"packetSMKey: [% X], [% X], [% X], [% X]\\n\", rc4encode, rc4decode, encode.m_Table, decode.m_Table)\n\terr = sendCMKey(pwclient, decHash)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Ошибка при отправке пакета CMKey\"))\n\t}\n}\n\nfunc sendCMKey(pwclient *PWClient, decHash []byte) error {\n\tforce := 1\n\tvar data []byte\n\tdata = append(data, uint8ToSlicebyte(uint8(len(decHash)))...)\n\tdata = append(data, decHash...)\n\tdata = append(data, byte(force))\n\n\tvar pkt []byte\n\tpkt = append(pkt, byte(2))\n\tpkt = append(pkt, uint8ToSlicebyte(uint8(len(data)))...)\n\tpkt = append(pkt, data...)\n\n\terr := sendToServer(pwclient, pkt)\n\tif err != nil {\n\n\t}\n\treturn nil\n}\n\nfunc packetOnlineAnnounce(pwclient *PWClient, pkt []byte) {\n\taccountkey := pkt[:4]\n\t\/\/unkID := pkt[4:8]\n\t\/\/unkData := pkt[8:]\n\n\tpwclient.accountkey = accountkey\n\n\terr := sendRoleList(pwclient, []byte{0xFF, 0xFF, 0xFF, 0xFF})\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Ошибка при отправке пакета RoleList\"))\n\t}\n}\n\nfunc sendRoleList(pwclient *PWClient, slot []byte) error {\n\tvar data []byte\n\tdata = append(data, pwclient.accountkey...)\n\tdata = append(data, []byte{0, 0, 0, 0}...)\n\tdata = append(data, slot...)\n\n\tvar pkt []byte\n\tpkt = append(pkt, byte(0x52))\n\tpkt = append(pkt, uint8ToSlicebyte(uint8(len(data)))...)\n\tpkt = append(pkt, data...)\n\n\terr := sendToServer(pwclient, pkt)\n\tif err != nil {\n\n\t}\n\treturn nil\n}\n\nfunc uint8ToSlicebyte(num uint8) []byte {\n\n\tbuf := new(bytes.Buffer)\n\terr := binary.Write(buf, binary.LittleEndian, num)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tresult := buf.Bytes()\n\treturn result[:]\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build !static\n\npackage inspectors\n\nimport (\n\t\"flag\"\n\n\tlog \"github.com\/cihub\/seelog\"\n\t\"github.com\/mitchellh\/cli\"\n\n\tinspector \"github.com\/osrg\/namazu\/nmz\/inspector\/ethernet\"\n)\n\ntype etherFlags struct {\n\tcommonFlags\n\tHookSwitchZMQAddr string\n\tNFQNumber int\n}\n\nvar (\n\tetherFlagset = flag.NewFlagSet(\"ethernet\", flag.ExitOnError)\n\t_etherFlags = etherFlags{}\n)\n\nfunc init() {\n\tinitCommon(etherFlagset, &_etherFlags.commonFlags, \"_namazu_ethernet_inspector\")\n\tetherFlagset.StringVar(&_etherFlags.HookSwitchZMQAddr, \"hookswitch\",\n\t\t\"ipc:\/\/\/tmp\/namazu-hookswitch-zmq\", \"HookSwitch ZeroMQ addr\")\n\tetherFlagset.IntVar(&_etherFlags.NFQNumber, \"nfq-number\",\n\t\t-1, \"netfilter_queue number\")\n}\n\ntype etherCmd struct {\n}\n\nfunc EtherCommandFactory() (cli.Command, error) {\n\treturn etherCmd{}, nil\n}\n\nfunc (cmd etherCmd) Help() string {\n\treturn \"Please run `nmz --help inspectors` instead\"\n}\n\nfunc (cmd etherCmd) Synopsis() string {\n\treturn \"Start Ethernet inspector\"\n}\n\nfunc (cmd etherCmd) Run(args []string) int {\n\tif err := etherFlagset.Parse(args); err != nil {\n\t\tlog.Critical(err)\n\t\treturn 1\n\t}\n\n\tuseHookSwitch := _etherFlags.NFQNumber < 0\n\n\tif useHookSwitch && _etherFlags.HookSwitchZMQAddr == \"\" {\n\t\tlog.Critical(\"hookswitch is invalid\")\n\t\treturn 1\n\t}\n\tif !useHookSwitch && _etherFlags.NFQNumber > 0xFFFF {\n\t\tlog.Critical(\"nfq-number is invalid\")\n\t\treturn 1\n\t}\n\n\tautopilot, err := conditionalStartAutopilotOrchestrator(_etherFlags.commonFlags)\n\tif err != nil {\n\t\tlog.Critical(err)\n\t\treturn 1\n\t}\n\tlog.Infof(\"Autopilot-mode: %t\", autopilot)\n\n\tvar etherInspector inspector.EthernetInspector\n\tif useHookSwitch {\n\t\tlog.Infof(\"Using hookswitch %s\", _etherFlags.HookSwitchZMQAddr)\n\t\tetherInspector = &inspector.HookSwitchInspector{\n\t\t\tOrchestratorURL: _etherFlags.OrchestratorURL,\n\t\t\tEntityID: _etherFlags.EntityID,\n\t\t\tHookSwitchZMQAddr: _etherFlags.HookSwitchZMQAddr,\n\t\t\tEnableTCPWatcher: true,\n\t\t}\n\t} else {\n\t\tlog.Infof(\"Using NFQ %s\", _etherFlags.HookSwitchZMQAddr)\n\t\tetherInspector = &inspector.NFQInspector{\n\t\t\tOrchestratorURL: _etherFlags.OrchestratorURL,\n\t\t\tEntityID: _etherFlags.EntityID,\n\t\t\tNFQNumber: uint16(_etherFlags.NFQNumber),\n\t\t\tEnableTCPWatcher: true,\n\t\t}\n\t}\n\n\tif err := etherInspector.Serve(); err != nil {\n\t\tpanic(log.Critical(err))\n\t}\n\n\t\/\/ NOTREACHED\n\treturn 0\n}\n<commit_msg>cli: not print zmq addr in netfilter mode (#176)<commit_after>\/\/ Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build !static\n\npackage inspectors\n\nimport (\n\t\"flag\"\n\n\tlog \"github.com\/cihub\/seelog\"\n\t\"github.com\/mitchellh\/cli\"\n\n\tinspector \"github.com\/osrg\/namazu\/nmz\/inspector\/ethernet\"\n)\n\ntype etherFlags struct {\n\tcommonFlags\n\tHookSwitchZMQAddr string\n\tNFQNumber int\n}\n\nvar (\n\tetherFlagset = flag.NewFlagSet(\"ethernet\", flag.ExitOnError)\n\t_etherFlags = etherFlags{}\n)\n\nfunc init() {\n\tinitCommon(etherFlagset, &_etherFlags.commonFlags, \"_namazu_ethernet_inspector\")\n\tetherFlagset.StringVar(&_etherFlags.HookSwitchZMQAddr, \"hookswitch\",\n\t\t\"ipc:\/\/\/tmp\/namazu-hookswitch-zmq\", \"HookSwitch ZeroMQ addr\")\n\tetherFlagset.IntVar(&_etherFlags.NFQNumber, \"nfq-number\",\n\t\t-1, \"netfilter_queue number\")\n}\n\ntype etherCmd struct {\n}\n\nfunc EtherCommandFactory() (cli.Command, error) {\n\treturn etherCmd{}, nil\n}\n\nfunc (cmd etherCmd) Help() string {\n\treturn \"Please run `nmz --help inspectors` instead\"\n}\n\nfunc (cmd etherCmd) Synopsis() string {\n\treturn \"Start Ethernet inspector\"\n}\n\nfunc (cmd etherCmd) Run(args []string) int {\n\tif err := etherFlagset.Parse(args); err != nil {\n\t\tlog.Critical(err)\n\t\treturn 1\n\t}\n\n\tuseHookSwitch := _etherFlags.NFQNumber < 0\n\n\tif useHookSwitch && _etherFlags.HookSwitchZMQAddr == \"\" {\n\t\tlog.Critical(\"hookswitch is invalid\")\n\t\treturn 1\n\t}\n\tif !useHookSwitch && _etherFlags.NFQNumber > 0xFFFF {\n\t\tlog.Critical(\"nfq-number is invalid\")\n\t\treturn 1\n\t}\n\n\tautopilot, err := conditionalStartAutopilotOrchestrator(_etherFlags.commonFlags)\n\tif err != nil {\n\t\tlog.Critical(err)\n\t\treturn 1\n\t}\n\tlog.Infof(\"Autopilot-mode: %t\", autopilot)\n\n\tvar etherInspector inspector.EthernetInspector\n\tif useHookSwitch {\n\t\tlog.Infof(\"Using hookswitch %s\", _etherFlags.HookSwitchZMQAddr)\n\t\tetherInspector = &inspector.HookSwitchInspector{\n\t\t\tOrchestratorURL: _etherFlags.OrchestratorURL,\n\t\t\tEntityID: _etherFlags.EntityID,\n\t\t\tHookSwitchZMQAddr: _etherFlags.HookSwitchZMQAddr,\n\t\t\tEnableTCPWatcher: true,\n\t\t}\n\t} else {\n\t\tlog.Infof(\"Using NFQ %d\", _etherFlags.NFQNumber)\n\t\tetherInspector = &inspector.NFQInspector{\n\t\t\tOrchestratorURL: _etherFlags.OrchestratorURL,\n\t\t\tEntityID: _etherFlags.EntityID,\n\t\t\tNFQNumber: uint16(_etherFlags.NFQNumber),\n\t\t\tEnableTCPWatcher: true,\n\t\t}\n\t}\n\n\tif err := etherInspector.Serve(); err != nil {\n\t\tpanic(log.Critical(err))\n\t}\n\n\t\/\/ NOTREACHED\n\treturn 0\n}\n<|endoftext|>"} {"text":"<commit_before>package apns\r\n\r\nimport (\r\n\t\"bytes\"\r\n\t\"crypto\/tls\"\r\n\t\"encoding\/binary\"\r\n\t\"errors\"\r\n\t\"github.com\/zhangpeihao\/log\"\r\n\t\"net\"\r\n\t\"time\"\r\n)\r\n\r\nconst (\r\n\tMIN_QUEUE_SIZE = 8\r\n)\r\n\r\nvar (\r\n\tLOG_HEADERS = []string{\"Out\", \"Out_E\"}\r\n\tlogger *log.Logger\r\n\tREAD_TIMEOUT = time.Second * 3600\r\n)\r\n\r\nvar (\r\n\tErrClosed = errors.New(\"Closed\")\r\n\tErrBlocked = errors.New(\"Blocked\")\r\n\tErrTimeout = errors.New(\"Timeout\")\r\n)\r\n\r\nfunc InitLog(l *log.Logger) {\r\n\tlogger = l\r\n\tlogger.Println(\"InitLog()\")\r\n}\r\n\r\ntype Conn struct {\r\n\tc *tls.Conn\r\n\tsendTimeout time.Duration\r\n\texit bool\r\n}\r\n\r\nfunc Dial(serverAddress string, cert []tls.Certificate,\r\n\tsendTimeout time.Duration) (c *Conn, err error) {\r\n\tvar conn net.Conn\r\n\tif conn, err = net.DialTimeout(\"tcp\", serverAddress, sendTimeout); err != nil {\r\n\t\treturn\r\n\t}\r\n\ttlsConn := tls.Client(conn, &tls.Config{\r\n\t\tCertificates: cert,\r\n\t\tInsecureSkipVerify: true,\r\n\t})\r\n\tif err = tlsConn.SetWriteDeadline(time.Now().Add(sendTimeout)); err != nil {\r\n\t\treturn\r\n\t}\r\n\thandshakeChan := make(chan bool, 1)\r\n\tgo func(ch chan<- bool) {\r\n\t\tlogger.Debugln(\"apns.Dial() Handshake\")\r\n\t\tif err = tlsConn.Handshake(); err != nil {\r\n\t\t\tlogger.Debugln(\"apnd.Dial() Handshake failed\")\r\n\t\t\tch <- false\r\n\t\t\treturn\r\n\t\t}\r\n\t\tlogger.Debugln(\"apns.Dial() Handshake success\")\r\n\t\tch <- true\r\n\t}(handshakeChan)\r\n\tselect {\r\n\tcase b := <-handshakeChan:\r\n\t\tif !b {\r\n\t\t\treturn\r\n\t\t}\r\n\tcase <-time.After(time.Second * time.Duration(5)):\r\n\t\tlogger.Debugln(\"apnd.Dial() Handshake timeout\")\r\n\t\tconn.Close()\r\n\t\ttlsConn.Close()\r\n\t\terr = ErrTimeout\r\n\t\treturn\r\n\t}\r\n\tc = &Conn{\r\n\t\tc: tlsConn,\r\n\t\tsendTimeout: sendTimeout,\r\n\t}\r\n\r\n\tgo c.readLoop()\r\n\treturn\r\n}\r\n\r\nfunc (c *Conn) Closed() bool {\r\n\treturn c.exit\r\n}\r\n\r\nfunc (c *Conn) Close() {\r\n\tc.exit = true\r\n\tc.c.Close()\r\n}\r\n\r\nfunc (c *Conn) readLoop() {\r\n\t\/\/\tvar err error\r\n\tif err := c.c.SetReadDeadline(time.Unix(9999999999, 0)); err != nil {\r\n\t\tlogger.Add(\"Out_E\", int64(1))\r\n\t\tlogger.Warningln(\"apns.Conn::readLoop() SetReadDeadline err:\", err)\r\n\t\tc.Close()\r\n\t\treturn\r\n\t}\r\n\tbuf := make([]byte, 6)\r\n\tfor !c.exit {\r\n\t\t\/\/ read response\r\n\t\tif n, err := c.c.Read(buf); err != nil {\r\n\t\t\tnetErr, ok := err.(net.Error)\r\n\t\t\tif ok && netErr.Temporary() {\r\n\t\t\t\ttime.Sleep(time.Second)\r\n\t\t\t} else {\r\n\t\t\t\tlogger.Add(\"Out_E\", int64(1))\r\n\t\t\t\tlogger.Debugln(\"apns.Conn::readLoop() Read err:\", err)\r\n\t\t\t\tif n > 0 {\r\n\t\t\t\t\tlogger.Debugf(\"APNS read %02X\\n\", buf)\r\n\t\t\t\t}\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t} else {\r\n\t\t\tlogger.Debugf(\"APNS read %02X\\n\", buf)\r\n\t\t}\r\n\t}\r\n\tc.Close()\r\n}\r\n\r\nfunc (c *Conn) Send(data []byte) (err error) {\r\n\tif c.exit {\r\n\t\treturn ErrClosed\r\n\t}\r\n\tif err = c.c.SetWriteDeadline(time.Now().Add(c.sendTimeout)); err != nil {\r\n\t\tlogger.Add(\"Out_E\", int64(1))\r\n\t\tlogger.Warningln(\"apns.Conn::Send() SetWriteDeadline err:\", err)\r\n\t\treturn\r\n\t}\r\n\tlogger.Debugf(\"sendLoop() data: % 02X\\n\", data)\r\n\tif _, err = c.c.Write(data); err != nil {\r\n\t\tlogger.Add(\"Out_E\", int64(1))\r\n\t\tlogger.Warningln(\"apns.Conn::Send() Write err:\", err)\r\n\t\treturn\r\n\t}\r\n\r\n\tlogger.Add(\"Out\", int64(1))\r\n\treturn\r\n}\r\n\r\nfunc (c *Conn) SendMessage(deviceToken []byte, message []byte) (err error) {\r\n\tbuf := new(bytes.Buffer)\r\n\tif _, err = buf.Write([]byte{0, 0, 32}); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif _, err = buf.Write(deviceToken); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif err = binary.Write(buf, binary.BigEndian, uint16(len(message))); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif _, err = buf.Write(message); err != nil {\r\n\t\treturn\r\n\t}\r\n\treturn c.Send(buf.Bytes())\r\n}\r\n<commit_msg>Append version 1 and 2<commit_after>package apns\r\n\r\nimport (\r\n\t\"bytes\"\r\n\t\"crypto\/tls\"\r\n\t\"encoding\/binary\"\r\n\t\"errors\"\r\n\t\"github.com\/zhangpeihao\/log\"\r\n\t\"net\"\r\n\t\"time\"\r\n)\r\n\r\nconst (\r\n\tMIN_QUEUE_SIZE = 8\r\n)\r\n\r\nvar (\r\n\tLOG_HEADERS = []string{\"Out\", \"Out_E\"}\r\n\tlogger *log.Logger\r\n\tREAD_TIMEOUT = time.Second * 3600\r\n)\r\n\r\nvar (\r\n\tErrClosed = errors.New(\"Closed\")\r\n\tErrBlocked = errors.New(\"Blocked\")\r\n\tErrTimeout = errors.New(\"Timeout\")\r\n)\r\n\r\nfunc InitLog(l *log.Logger) {\r\n\tlogger = l\r\n\tlogger.Println(\"InitLog()\")\r\n}\r\n\r\ntype Conn struct {\r\n\tc *tls.Conn\r\n\tsendTimeout time.Duration\r\n\texit bool\r\n}\r\n\r\nfunc Dial(serverAddress string, cert []tls.Certificate,\r\n\tsendTimeout time.Duration) (c *Conn, err error) {\r\n\tvar conn net.Conn\r\n\tif conn, err = net.DialTimeout(\"tcp\", serverAddress, sendTimeout); err != nil {\r\n\t\treturn\r\n\t}\r\n\ttlsConn := tls.Client(conn, &tls.Config{\r\n\t\tCertificates: cert,\r\n\t\tInsecureSkipVerify: true,\r\n\t})\r\n\tif err = tlsConn.SetWriteDeadline(time.Now().Add(sendTimeout)); err != nil {\r\n\t\treturn\r\n\t}\r\n\thandshakeChan := make(chan bool, 1)\r\n\tgo func(ch chan<- bool) {\r\n\t\tlogger.Debugln(\"apns.Dial() Handshake\")\r\n\t\tif err = tlsConn.Handshake(); err != nil {\r\n\t\t\tlogger.Debugln(\"apnd.Dial() Handshake failed\")\r\n\t\t\tch <- false\r\n\t\t\treturn\r\n\t\t}\r\n\t\tlogger.Debugln(\"apns.Dial() Handshake success\")\r\n\t\tch <- true\r\n\t}(handshakeChan)\r\n\tselect {\r\n\tcase b := <-handshakeChan:\r\n\t\tif !b {\r\n\t\t\treturn\r\n\t\t}\r\n\tcase <-time.After(time.Second * time.Duration(5)):\r\n\t\tlogger.Debugln(\"apnd.Dial() Handshake timeout\")\r\n\t\tconn.Close()\r\n\t\ttlsConn.Close()\r\n\t\terr = ErrTimeout\r\n\t\treturn\r\n\t}\r\n\tc = &Conn{\r\n\t\tc: tlsConn,\r\n\t\tsendTimeout: sendTimeout,\r\n\t}\r\n\r\n\tgo c.readLoop()\r\n\treturn\r\n}\r\n\r\nfunc (c *Conn) Closed() bool {\r\n\treturn c.exit\r\n}\r\n\r\nfunc (c *Conn) Close() {\r\n\tc.exit = true\r\n\tc.c.Close()\r\n}\r\n\r\nfunc (c *Conn) readLoop() {\r\n\t\/\/\tvar err error\r\n\tif err := c.c.SetReadDeadline(time.Unix(9999999999, 0)); err != nil {\r\n\t\tlogger.Add(\"Out_E\", int64(1))\r\n\t\tlogger.Warningln(\"apns.Conn::readLoop() SetReadDeadline err:\", err)\r\n\t\tc.Close()\r\n\t\treturn\r\n\t}\r\n\tbuf := make([]byte, 6, 6)\r\n\tfor !c.exit {\r\n\t\t\/\/ read response\r\n\t\tif n, err := c.c.Read(buf); err != nil {\r\n\t\t\tnetErr, ok := err.(net.Error)\r\n\t\t\tif ok && netErr.Temporary() {\r\n\t\t\t\ttime.Sleep(time.Second)\r\n\t\t\t} else {\r\n\t\t\t\tlogger.Add(\"Out_E\", int64(1))\r\n\t\t\t\tlogger.Debugln(\"apns.Conn::readLoop() Read err:\", err)\r\n\t\t\t\tif n > 0 {\r\n\t\t\t\t\tlogger.Debugf(\"APNS read %02X\\n\", buf)\r\n\t\t\t\t}\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t} else {\r\n\t\t\tlogger.Debugf(\"APNS read %02X\\n\", buf)\r\n\t\t}\r\n\t}\r\n\tc.Close()\r\n}\r\n\r\nfunc (c *Conn) Send(data []byte) (err error) {\r\n\tif c.exit {\r\n\t\treturn ErrClosed\r\n\t}\r\n\tif err = c.c.SetWriteDeadline(time.Now().Add(c.sendTimeout)); err != nil {\r\n\t\tlogger.Add(\"Out_E\", int64(1))\r\n\t\tlogger.Warningln(\"apns.Conn::Send() SetWriteDeadline err:\", err)\r\n\t\treturn\r\n\t}\r\n\tlogger.Debugf(\"sendLoop() data: % 02X\\n\", data)\r\n\tif _, err = c.c.Write(data); err != nil {\r\n\t\tlogger.Add(\"Out_E\", int64(1))\r\n\t\tlogger.Warningln(\"apns.Conn::Send() Write err:\", err)\r\n\t\treturn\r\n\t}\r\n\r\n\tlogger.Add(\"Out\", int64(1))\r\n\treturn\r\n}\r\n\r\nfunc (c *Conn) SendMessage(deviceToken []byte, message []byte) (err error) {\r\n\tbuf := new(bytes.Buffer)\r\n\tif _, err = buf.Write([]byte{0, 0, 32}); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif _, err = buf.Write(deviceToken); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif err = binary.Write(buf, binary.BigEndian, uint16(len(message))); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif _, err = buf.Write(message); err != nil {\r\n\t\treturn\r\n\t}\r\n\treturn c.Send(buf.Bytes())\r\n}\r\n\r\nfunc (c *Conn) SendMessage1(deviceToken []byte, message []byte) (err error) {\r\n\tbuf := new(bytes.Buffer)\r\n\tif err = buf.WriteByte(byte(1)); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif _, err = buf.Write(deviceToken[:4]); err != nil {\r\n\t\treturn\r\n\t}\r\n\texpiration := uint32(time.Now().Unix()) + 7*24*3600\r\n\tif err = binary.Write(buf, binary.BigEndian, expiration); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif err = binary.Write(buf, binary.BigEndian, uint16(len(deviceToken))); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif _, err = buf.Write(deviceToken); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif err = binary.Write(buf, binary.BigEndian, uint16(len(message))); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif _, err = buf.Write(message); err != nil {\r\n\t\treturn\r\n\t}\r\n\treturn c.Send(buf.Bytes())\r\n}\r\n\r\nfunc (c *Conn) SendMessage2(deviceToken []byte, message []byte) (err error) {\r\n\tbuf := new(bytes.Buffer)\r\n\tif err = buf.WriteByte(byte(2)); err != nil {\r\n\t\treturn\r\n\t}\r\n\tmsglen := len(message)\r\n\tframelen := uint32(msglen+3) + (32 + 3) + (4 + 3) + (4 + 3)\r\n\tif err = binary.Write(buf, binary.BigEndian, framelen); err != nil {\r\n\t\treturn\r\n\t}\r\n\t\/\/ Token\r\n\tif err = buf.WriteByte(byte(1)); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif err = binary.Write(buf, binary.BigEndian, uint16(32)); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif _, err = buf.Write(deviceToken); err != nil {\r\n\t\treturn\r\n\t}\r\n\t\/\/ Payload\r\n\tif err = buf.WriteByte(byte(2)); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif err = binary.Write(buf, binary.BigEndian, uint16(msglen)); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif _, err = buf.Write(message); err != nil {\r\n\t\treturn\r\n\t}\r\n\t\/\/ Notification identifier\r\n\tif err = buf.WriteByte(byte(3)); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif err = binary.Write(buf, binary.BigEndian, uint16(4)); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif _, err = buf.Write(deviceToken[:4]); err != nil {\r\n\t\treturn\r\n\t}\r\n\t\/\/ Expiration\r\n\texpiration := uint32(time.Now().Unix()) + 7*24*3600\r\n\tif err = buf.WriteByte(byte(4)); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif err = binary.Write(buf, binary.BigEndian, uint16(4)); err != nil {\r\n\t\treturn\r\n\t}\r\n\tif err = binary.Write(buf, binary.BigEndian, expiration); err != nil {\r\n\t\treturn\r\n\t}\r\n\treturn c.Send(buf.Bytes())\r\n}\r\n<|endoftext|>"} {"text":"<commit_before>package gfx\n\nimport (\n\t\"math\"\n\n\t\"github.com\/tanema\/amore\/gfx\/gl\"\n)\n\ntype vertexBuffer struct {\n\tisBound bool \/\/ Whether the buffer is currently bound.\n\tusage Usage \/\/ Usage hint. GL_[DYNAMIC, STATIC, STREAM]_DRAW.\n\tvbo gl.Buffer \/\/ The VBO identifier. Assigned by OpenGL.\n\tdata []float32 \/\/ A pointer to mapped memory.\n\tmodifiedOffset int\n\tmodifiedSize int\n}\n\nfunc newVertexBuffer(size int, data []float32, usage Usage) *vertexBuffer {\n\tnewBuffer := &vertexBuffer{\n\t\tusage: usage,\n\t\tdata: make([]float32, size),\n\t}\n\tif len(data) > 0 {\n\t\tcopy(newBuffer.data, data[:size])\n\t}\n\tregisterVolatile(newBuffer)\n\treturn newBuffer\n}\n\nfunc (buffer *vertexBuffer) bufferStatic() {\n\tif buffer.modifiedSize == 0 {\n\t\treturn\n\t}\n\t\/\/ Upload the mapped data to the buffer.\n\tgl.BufferSubData(gl.ARRAY_BUFFER, buffer.modifiedOffset*4, buffer.modifiedSize*4, gl.Ptr(&buffer.data[buffer.modifiedOffset]))\n}\n\nfunc (buffer *vertexBuffer) bufferStream() {\n\t\/\/ \"orphan\" current buffer to avoid implicit synchronisation on the GPU:\n\t\/\/ http:\/\/www.seas.upenn.edu\/~pcozzi\/OpenGLInsights\/OpenGLInsights-AsynchronousBufferTransfers.pdf\n\tgl.BufferData(gl.ARRAY_BUFFER, len(buffer.data)*4, gl.Ptr(nil), uint32(buffer.usage))\n\tgl.BufferData(gl.ARRAY_BUFFER, len(buffer.data)*4, gl.Ptr(buffer.data), uint32(buffer.usage))\n}\n\nfunc (buffer *vertexBuffer) bufferData() {\n\tif buffer.modifiedSize != 0 { \/\/if there is no modified size might as well do the whole buffer\n\t\tbuffer.modifiedOffset = int(math.Min(float64(buffer.modifiedOffset), float64(len(buffer.data)-1)))\n\t\tbuffer.modifiedSize = int(math.Min(float64(buffer.modifiedSize), float64(len(buffer.data)-buffer.modifiedOffset)))\n\t} else {\n\t\tbuffer.modifiedOffset = 0\n\t\tbuffer.modifiedSize = len(buffer.data)\n\t}\n\n\tbuffer.bind()\n\tif buffer.modifiedSize > 0 {\n\t\tswitch buffer.usage {\n\t\tcase UsageStatic:\n\t\t\tbuffer.bufferStatic()\n\t\tcase UsageStream:\n\t\t\tbuffer.bufferStream()\n\t\tcase UsageDynamic:\n\t\t\t\/\/ It's probably more efficient to treat it like a streaming buffer if\n\t\t\t\/\/ at least a third of its contents have been modified during the map().\n\t\t\tif buffer.modifiedSize >= len(buffer.data)\/3 {\n\t\t\t\tbuffer.bufferStream()\n\t\t\t} else {\n\t\t\t\tbuffer.bufferStatic()\n\t\t\t}\n\t\t}\n\t}\n\tbuffer.modifiedOffset = 0\n\tbuffer.modifiedSize = 0\n}\n\nfunc (buffer *vertexBuffer) bind() {\n\tgl.BindBuffer(gl.ARRAY_BUFFER, buffer.vbo)\n\tbuffer.isBound = true\n}\n\nfunc (buffer *vertexBuffer) unbind() {\n\tif buffer.isBound {\n\t\tgl.BindBuffer(gl.ARRAY_BUFFER, gl.Buffer{})\n\t}\n\tbuffer.isBound = false\n}\n\nfunc (buffer *vertexBuffer) fill(offset int, data []float32) {\n\tcopy(buffer.data[offset:], data[:])\n\tif !buffer.vbo.Valid() {\n\t\treturn\n\t}\n\t\/\/ We're being conservative right now by internally marking the whole range\n\t\/\/ from the start of section a to the end of section b as modified if both\n\t\/\/ a and b are marked as modified.\n\toldRangeEnd := buffer.modifiedOffset + buffer.modifiedSize\n\tbuffer.modifiedOffset = int(math.Min(float64(buffer.modifiedOffset), float64(offset)))\n\tnewRangeEnd := int(math.Max(float64(offset+len(data)), float64(oldRangeEnd)))\n\tbuffer.modifiedSize = newRangeEnd - buffer.modifiedOffset\n\tbuffer.bufferData()\n}\n\nfunc (buffer *vertexBuffer) loadVolatile() bool {\n\tbuffer.vbo = gl.CreateBuffer()\n\tbuffer.bind()\n\tdefer buffer.unbind()\n\tgl.BufferData(gl.ARRAY_BUFFER, len(buffer.data)*4, gl.Ptr(buffer.data), uint32(buffer.usage))\n\treturn true\n}\n\nfunc (buffer *vertexBuffer) unloadVolatile() {\n\tgl.DeleteBuffer(buffer.vbo)\n\tbuffer.vbo.Value = 0\n}\n<commit_msg>better vars<commit_after>package gfx\n\nimport (\n\t\"encoding\/binary\"\n\t\"math\"\n\n\t\"github.com\/goxjs\/gl\"\n\t\"golang.org\/x\/mobile\/exp\/f32\"\n)\n\ntype vertexBuffer struct {\n\tisBound bool \/\/ Whether the buffer is currently bound.\n\tusage Usage \/\/ Usage hint. GL_[DYNAMIC, STATIC, STREAM]_DRAW.\n\tvbo gl.Buffer \/\/ The VBO identifier. Assigned by OpenGL.\n\tdata []float32 \/\/ A pointer to mapped memory.\n\tmodifiedOffset int\n\tmodifiedSize int\n}\n\nfunc newVertexBuffer(size int, data []float32, usage Usage) *vertexBuffer {\n\tnewBuffer := &vertexBuffer{\n\t\tusage: usage,\n\t\tdata: make([]float32, size),\n\t}\n\tif len(data) > 0 {\n\t\tcopy(newBuffer.data, data[:size])\n\t}\n\tregisterVolatile(newBuffer)\n\treturn newBuffer\n}\n\nfunc (buffer *vertexBuffer) bufferStatic() {\n\tif buffer.modifiedSize == 0 {\n\t\treturn\n\t}\n\t\/\/ Upload the mapped data to the buffer.\n\tf32.Bytes(binary.LittleEndian, buffer.data[buffer.modifiedOffset:buffer.modifiedSize]...)\n}\n\nfunc (buffer *vertexBuffer) bufferStream() {\n\tgl.BufferData(gl.ARRAY_BUFFER, f32.Bytes(binary.LittleEndian, buffer.data...), gl.Enum(buffer.usage))\n}\n\nfunc (buffer *vertexBuffer) bufferData() {\n\tif buffer.modifiedSize != 0 { \/\/if there is no modified size might as well do the whole buffer\n\t\tbuffer.modifiedOffset = int(math.Min(float64(buffer.modifiedOffset), float64(len(buffer.data)-1)))\n\t\tbuffer.modifiedSize = int(math.Min(float64(buffer.modifiedSize), float64(len(buffer.data)-buffer.modifiedOffset)))\n\t} else {\n\t\tbuffer.modifiedOffset = 0\n\t\tbuffer.modifiedSize = len(buffer.data)\n\t}\n\n\tbuffer.bind()\n\tif buffer.modifiedSize > 0 {\n\t\tswitch buffer.usage {\n\t\tcase UsageStatic:\n\t\t\tbuffer.bufferStatic()\n\t\tcase UsageStream:\n\t\t\tbuffer.bufferStream()\n\t\tcase UsageDynamic:\n\t\t\t\/\/ It's probably more efficient to treat it like a streaming buffer if\n\t\t\t\/\/ at least a third of its contents have been modified during the map().\n\t\t\tif buffer.modifiedSize >= len(buffer.data)\/3 {\n\t\t\t\tbuffer.bufferStream()\n\t\t\t} else {\n\t\t\t\tbuffer.bufferStatic()\n\t\t\t}\n\t\t}\n\t}\n\tbuffer.modifiedOffset = 0\n\tbuffer.modifiedSize = 0\n}\n\nfunc (buffer *vertexBuffer) bind() {\n\tgl.BindBuffer(gl.ARRAY_BUFFER, buffer.vbo)\n\tbuffer.isBound = true\n}\n\nfunc (buffer *vertexBuffer) unbind() {\n\tif buffer.isBound {\n\t\tgl.BindBuffer(gl.ARRAY_BUFFER, gl.Buffer{})\n\t}\n\tbuffer.isBound = false\n}\n\nfunc (buffer *vertexBuffer) fill(offset int, data []float32) {\n\tcopy(buffer.data[offset:], data[:])\n\tif !buffer.vbo.Valid() {\n\t\treturn\n\t}\n\t\/\/ We're being conservative right now by internally marking the whole range\n\t\/\/ from the start of section a to the end of section b as modified if both\n\t\/\/ a and b are marked as modified.\n\toldRangeEnd := buffer.modifiedOffset + buffer.modifiedSize\n\tbuffer.modifiedOffset = int(math.Min(float64(buffer.modifiedOffset), float64(offset)))\n\tnewRangeEnd := int(math.Max(float64(offset+len(data)), float64(oldRangeEnd)))\n\tbuffer.modifiedSize = newRangeEnd - buffer.modifiedOffset\n\tbuffer.bufferData()\n}\n\nfunc (buffer *vertexBuffer) loadVolatile() bool {\n\tbuffer.vbo = gl.CreateBuffer()\n\tbuffer.bind()\n\tdefer buffer.unbind()\n\tgl.BufferData(gl.ARRAY_BUFFER, f32.Bytes(binary.LittleEndian, buffer.data...), gl.Enum(buffer.usage))\n\treturn true\n}\n\nfunc (buffer *vertexBuffer) unloadVolatile() {\n\tgl.DeleteBuffer(buffer.vbo)\n\tbuffer.vbo.Value = 0\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\nvar types = []string{\n\t\"int\",\n\t\"float64\",\n\t\"string\",\n}\n\nfunc genFoldLeft(t1 string, t2 string) {\n\tfoldLeftTemplate := fmt.Sprintf(\"func FoldLeft%s%s(l []%s, z %s, f func(%s, %s) %s) %s {\\n\\tacc := z\\n\\tthese := l\\n\\tfor len(these) != 0 {\\n\\t\\tacc = f(acc, these[0])\\n\\t\\tthese = these[1:]\\n\\t}\\n\\treturn acc\\n}\\n\", t1, t2, t1, t2, t2, t1, t2, t2)\n\tfmt.Println(foldLeftTemplate)\n}\n\nfunc genSelfGen(t string) {\n\tvar selfTemplate = []string{\n\t\t\"func Self\",\n\t\t\"(x \",\n\t\t\") \",\n\t\t\" {\\n\\treturn x\\n}\\n\",\n\t}\n\tfmt.Println(strings.Join(selfTemplate, t))\n}\n\nfunc main() {\n\tfmt.Println(\"package generics\")\n\tfmt.Println(\"\")\n\tfor _, t := range types {\n\t\tgenSelfGen(t)\n\t}\n\tfor _, t1 := range types {\n\t\tfor _, t2 := range types {\n\t\t\tif t1 != t2 {\n\t\t\t\tgenFoldLeft(t1, t2)\n\t\t\t}\n\t\t}\n\t}\n\tfor _, t := range types {\n\t\tgenFoldLeft(t, t)\n\t}\n}\n<commit_msg>added map :)<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\nvar types = []string{\n\t\"int\",\n\t\"float64\",\n\t\"string\",\n}\n\nfunc genMap(t1 string, t2 string) {\n\tmapTemplate := fmt.Sprintf(\"func Map%s%s(l []%s, f func(%s) %s) []%s {\\n\\tvar b []%s\\n\\tthese := l\\n\\tfor len(these) != 0 {\\n\\t\\tb = append(b, f(these[0]))\\n\\t\\tthese = these[1:]\\n\\t}\\n\\treturn b\\n}\\n\", t1, t2, t1, t1, t2, t2, t2)\n\tfmt.Println(mapTemplate)\n}\n\nfunc genFoldLeft(t1 string, t2 string) {\n\tfoldLeftTemplate := fmt.Sprintf(\"func FoldLeft%s%s(l []%s, z %s, f func(%s, %s) %s) %s {\\n\\tacc := z\\n\\tthese := l\\n\\tfor len(these) != 0 {\\n\\t\\tacc = f(acc, these[0])\\n\\t\\tthese = these[1:]\\n\\t}\\n\\treturn acc\\n}\\n\", t1, t2, t1, t2, t2, t1, t2, t2)\n\tfmt.Println(foldLeftTemplate)\n}\n\nfunc genSelf(t string) {\n\tvar selfTemplate = []string{\n\t\t\"func Self\",\n\t\t\"(x \",\n\t\t\") \",\n\t\t\" {\\n\\treturn x\\n}\\n\",\n\t}\n\tfmt.Println(strings.Join(selfTemplate, t))\n}\n\nfunc main() {\n\tfmt.Println(\"package generics\")\n\tfmt.Println(\"\")\n\tfor _, t := range types {\n\t\tgenSelf(t)\n\t}\n\tfor _, t1 := range types {\n\t\tfor _, t2 := range types {\n\t\t\tif t1 != t2 {\n\t\t\t\tgenFoldLeft(t1, t2)\n\t\t\t\tgenMap(t1, t2)\n\t\t\t}\n\t\t}\n\t}\n\tfor _, t := range types {\n\t\tgenFoldLeft(t, t)\n\t\tgenMap(t, t)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"github.com\/sugyan\/ttyread\"\n\t\"image\"\n\t\"image\/gif\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"j4k.co\/terminal\"\n\t\"log\"\n\t\"os\"\n)\n\n\/\/ GifGenerator type\ntype GifGenerator struct {\n\tSpeed float64\n\tCol int\n\tRow int\n\tNoLoop bool\n}\n\n\/\/ NewGifGenerator returns GifGenerator instance\nfunc NewGifGenerator() *GifGenerator {\n\treturn &GifGenerator{\n\t\tSpeed: 1.0,\n\t\tCol: 80,\n\t\tRow: 24,\n\t\tNoLoop: false,\n\t}\n}\n\n\/\/ Generate writes to outFile an animated GIF\nfunc (g *GifGenerator) Generate(input string, output string) (err error) {\n\t\/\/ input\n\tinFile, err := os.Open(input)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer inFile.Close()\n\n\t\/\/ virtual terminal\n\tvar state = terminal.State{}\n\tvt, err := terminal.Create(&state, ioutil.NopCloser(bytes.NewBuffer([]byte{})))\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer vt.Close()\n\tvt.Resize(g.Col, g.Row)\n\n\t\/\/ read ttyrecord\n\treader := ttyread.NewTtyReader(inFile)\n\tvar (\n\t\tprevTv *ttyread.TimeVal\n\t\timages []*image.Paletted\n\t\tdelays []int\n\t)\n\tfor {\n\t\tvar data *ttyread.TtyData\n\t\tdata, err = reader.ReadData()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tvar diff ttyread.TimeVal\n\t\tif prevTv != nil {\n\t\t\tdiff = data.TimeVal.Subtract(*prevTv)\n\t\t}\n\t\tprevTv = &data.TimeVal\n\n\t\t\/\/ calc delay and capture\n\t\tdelay := int(float64(diff.Sec*1000000+diff.Usec)\/g.Speed) \/ 10000\n\t\tif delay > 0 {\n\t\t\tlog.Println(delay)\n\t\t\tvar img *image.Paletted\n\t\t\timg, err = g.Capture(&state)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\timages = append(images, img)\n\t\t\tdelays = append(delays, delay)\n\t\t}\n\t\t\/\/ write to vt\n\t\t_, err = vt.Write(*data.Buffer)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\toutFile, err := os.Create(output)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer outFile.Close()\n\topts := gif.GIF{\n\t\tImage: images,\n\t\tDelay: delays,\n\t}\n\tif g.NoLoop {\n\t\topts.LoopCount = 1\n\t}\n\terr = gif.EncodeAll(outFile, &opts)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn nil\n}\n<commit_msg>remove debug log<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"github.com\/sugyan\/ttyread\"\n\t\"image\"\n\t\"image\/gif\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"j4k.co\/terminal\"\n\t\"os\"\n)\n\n\/\/ GifGenerator type\ntype GifGenerator struct {\n\tSpeed float64\n\tCol int\n\tRow int\n\tNoLoop bool\n}\n\n\/\/ NewGifGenerator returns GifGenerator instance\nfunc NewGifGenerator() *GifGenerator {\n\treturn &GifGenerator{\n\t\tSpeed: 1.0,\n\t\tCol: 80,\n\t\tRow: 24,\n\t\tNoLoop: false,\n\t}\n}\n\n\/\/ Generate writes to outFile an animated GIF\nfunc (g *GifGenerator) Generate(input string, output string) (err error) {\n\t\/\/ input\n\tinFile, err := os.Open(input)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer inFile.Close()\n\n\t\/\/ virtual terminal\n\tvar state = terminal.State{}\n\tvt, err := terminal.Create(&state, ioutil.NopCloser(bytes.NewBuffer([]byte{})))\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer vt.Close()\n\tvt.Resize(g.Col, g.Row)\n\n\t\/\/ read ttyrecord\n\treader := ttyread.NewTtyReader(inFile)\n\tvar (\n\t\tprevTv *ttyread.TimeVal\n\t\timages []*image.Paletted\n\t\tdelays []int\n\t)\n\tfor {\n\t\tvar data *ttyread.TtyData\n\t\tdata, err = reader.ReadData()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tvar diff ttyread.TimeVal\n\t\tif prevTv != nil {\n\t\t\tdiff = data.TimeVal.Subtract(*prevTv)\n\t\t}\n\t\tprevTv = &data.TimeVal\n\n\t\t\/\/ calc delay and capture\n\t\tdelay := int(float64(diff.Sec*1000000+diff.Usec)\/g.Speed) \/ 10000\n\t\tif delay > 0 {\n\t\t\tvar img *image.Paletted\n\t\t\timg, err = g.Capture(&state)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\timages = append(images, img)\n\t\t\tdelays = append(delays, delay)\n\t\t}\n\t\t\/\/ write to vt\n\t\t_, err = vt.Write(*data.Buffer)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\toutFile, err := os.Create(output)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer outFile.Close()\n\topts := gif.GIF{\n\t\tImage: images,\n\t\tDelay: delays,\n\t}\n\tif g.NoLoop {\n\t\topts.LoopCount = 1\n\t}\n\terr = gif.EncodeAll(outFile, &opts)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package fireauth\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\t\/\/ Version used for creating token\n\tVersion = 0\n\t\/\/ TokenSep used as a delimiter for the token\n\tTokenSep = \".\"\n)\n\n\/\/ Generator represents a token generator\ntype Generator struct {\n\tsecret string\n}\n\n\/\/ Option represent the claims used when creating an authentication token\n\/\/ https:\/\/www.firebase.com\/docs\/rest\/guide\/user-auth.html#section-rest-tokens-without-helpers\ntype Option struct {\n\t\/\/ NotBefote is the token \"not before\" date as a number of seconds since the Unix epoch.\n\t\/\/ If specified, the token will not be considered valid until after this date.\n\tNotBefore int `json:\"nbf,omitempty\"`\n\n\t\/\/ Expiration is the token expiration date as a number of seconds since the Unix epoch.\n\t\/\/ If not specified, by default the token will expire 24 hours after the \"issued at\" date (iat).\n\tExpiration int `json:\"exp,omitempty\"`\n\n\t\/\/ Admin when set to true to make this an \"admin\" token, which grants full read and\n\t\/\/ write access to all data.\n\tAdmin bool `json:\"admin,omitempty\"`\n\n\t\/\/ Debug when set to true to enable debug mode, which provides verbose error messages\n\t\/\/ when Security and Firebase Rules fail.\n\tDebug bool `json:\"debug,omitempty\"`\n}\n\n\/\/ Data is used to create a token. The token data can contain any data of your choosing,\n\/\/ however it must contain a `uid` key, which must be a string of less than 256 characters\ntype Data map[string]interface{}\n\n\/\/ New creates a new Generator\nfunc New(secret string) *Generator {\n\treturn &Generator{\n\t\tsecret: secret,\n\t}\n}\n\n\/\/ CreateToken generates a new token with the given Data and options\nfunc (t *Generator) CreateToken(data Data, options *Option) (string, error) {\n\t\/\/ make sure we have valid parameters\n\tif data == nil && (options == nil || (!options.Admin && !options.Debug)) {\n\t\treturn \"\", errors.New(\"Data is empty and no options are set. This token will have no effect on Firebase.\")\n\t}\n\n\t\/\/ validate the data\n\tif err := validate(data, (options != nil && options.Admin)); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ generate the encoded headers\n\tencodedHeader, err := encodedHeader()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ setup the claims for the token\n\tclaim := struct {\n\t\t*Option\n\t\tVersion int `json:\"v\"`\n\t\tData Data `json:\"d\"`\n\t\tIssuedAt int64 `json:\"iat\"`\n\t}{\n\t\tOption: options,\n\t\tVersion: Version,\n\t\tData: data,\n\t\tIssuedAt: time.Now().Unix(),\n\t}\n\n\t\/\/ generate the encoded claims\n\tclaimBytes, err := json.Marshal(claim)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tencodedClaim := encode(claimBytes)\n\n\t\/\/ create the token\n\tsecureString := fmt.Sprintf(\"%s%s%s\", encodedHeader, TokenSep, encodedClaim)\n\tsignature := sign(secureString, t.secret)\n\ttoken := fmt.Sprintf(\"%s%s%s\", secureString, TokenSep, signature)\n\n\tif len(token) > 1024 {\n\t\treturn \"\", errors.New(\"Generated token is too long. The token cannot be longer than 1024 bytes.\")\n\t}\n\treturn token, nil\n}\n\nfunc encodedHeader() (string, error) {\n\theaders := struct {\n\t\tAlgorithm string `json:\"alg\"`\n\t\tType string `json:\"typ\"`\n\t}{\n\t\tAlgorithm: \"HS256\",\n\t\tType: \"JWT\",\n\t}\n\n\theaderBytes, err := json.Marshal(headers)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn encode(headerBytes), nil\n}\n\nfunc validate(data Data, isAdmind bool) error {\n\tuid, containsID := data[\"uid\"]\n\tif !containsID && !isAdmind {\n\t\treturn errors.New(`Data payload must contain a \"uid\" key`)\n\t}\n\n\tif _, isString := uid.(string); containsID && !isString {\n\t\treturn errors.New(`Data payload key \"uid\" must be a string`)\n\t}\n\n\tif containsID && len(uid.(string)) > 256 {\n\t\treturn errors.New(`Data payload key \"uid\" must not be longer than 256 characters`)\n\t}\n\treturn nil\n}\n\nfunc encode(data []byte) string {\n\treturn strings.Replace(base64.URLEncoding.EncodeToString(data), \"=\", \"\", -1)\n}\n\nfunc sign(message, secret string) string {\n\tkey := []byte(secret)\n\th := hmac.New(sha256.New, key)\n\th.Write([]byte(message))\n\treturn encode(h.Sum(nil))\n}\n<commit_msg>using int64 instead of int to help with setting the number of seconds for expiration\/timeAfter<commit_after>package fireauth\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\t\/\/ Version used for creating token\n\tVersion = 0\n\t\/\/ TokenSep used as a delimiter for the token\n\tTokenSep = \".\"\n)\n\n\/\/ Generator represents a token generator\ntype Generator struct {\n\tsecret string\n}\n\n\/\/ Option represent the claims used when creating an authentication token\n\/\/ https:\/\/www.firebase.com\/docs\/rest\/guide\/user-auth.html#section-rest-tokens-without-helpers\ntype Option struct {\n\t\/\/ NotBefote is the token \"not before\" date as a number of seconds since the Unix epoch.\n\t\/\/ If specified, the token will not be considered valid until after this date.\n\tNotBefore int64 `json:\"nbf,omitempty\"`\n\n\t\/\/ Expiration is the token expiration date as a number of seconds since the Unix epoch.\n\t\/\/ If not specified, by default the token will expire 24 hours after the \"issued at\" date (iat).\n\tExpiration int64 `json:\"exp,omitempty\"`\n\n\t\/\/ Admin when set to true to make this an \"admin\" token, which grants full read and\n\t\/\/ write access to all data.\n\tAdmin bool `json:\"admin,omitempty\"`\n\n\t\/\/ Debug when set to true to enable debug mode, which provides verbose error messages\n\t\/\/ when Security and Firebase Rules fail.\n\tDebug bool `json:\"debug,omitempty\"`\n}\n\n\/\/ Data is used to create a token. The token data can contain any data of your choosing,\n\/\/ however it must contain a `uid` key, which must be a string of less than 256 characters\ntype Data map[string]interface{}\n\n\/\/ New creates a new Generator\nfunc New(secret string) *Generator {\n\treturn &Generator{\n\t\tsecret: secret,\n\t}\n}\n\n\/\/ CreateToken generates a new token with the given Data and options\nfunc (t *Generator) CreateToken(data Data, options *Option) (string, error) {\n\t\/\/ make sure we have valid parameters\n\tif data == nil && (options == nil || (!options.Admin && !options.Debug)) {\n\t\treturn \"\", errors.New(\"Data is empty and no options are set. This token will have no effect on Firebase.\")\n\t}\n\n\t\/\/ validate the data\n\tif err := validate(data, (options != nil && options.Admin)); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ generate the encoded headers\n\tencodedHeader, err := encodedHeader()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ setup the claims for the token\n\tclaim := struct {\n\t\t*Option\n\t\tVersion int `json:\"v\"`\n\t\tData Data `json:\"d\"`\n\t\tIssuedAt int64 `json:\"iat\"`\n\t}{\n\t\tOption: options,\n\t\tVersion: Version,\n\t\tData: data,\n\t\tIssuedAt: time.Now().Unix(),\n\t}\n\n\t\/\/ generate the encoded claims\n\tclaimBytes, err := json.Marshal(claim)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tencodedClaim := encode(claimBytes)\n\n\t\/\/ create the token\n\tsecureString := fmt.Sprintf(\"%s%s%s\", encodedHeader, TokenSep, encodedClaim)\n\tsignature := sign(secureString, t.secret)\n\ttoken := fmt.Sprintf(\"%s%s%s\", secureString, TokenSep, signature)\n\n\tif len(token) > 1024 {\n\t\treturn \"\", errors.New(\"Generated token is too long. The token cannot be longer than 1024 bytes.\")\n\t}\n\treturn token, nil\n}\n\nfunc encodedHeader() (string, error) {\n\theaders := struct {\n\t\tAlgorithm string `json:\"alg\"`\n\t\tType string `json:\"typ\"`\n\t}{\n\t\tAlgorithm: \"HS256\",\n\t\tType: \"JWT\",\n\t}\n\n\theaderBytes, err := json.Marshal(headers)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn encode(headerBytes), nil\n}\n\nfunc validate(data Data, isAdmind bool) error {\n\tuid, containsID := data[\"uid\"]\n\tif !containsID && !isAdmind {\n\t\treturn errors.New(`Data payload must contain a \"uid\" key`)\n\t}\n\n\tif _, isString := uid.(string); containsID && !isString {\n\t\treturn errors.New(`Data payload key \"uid\" must be a string`)\n\t}\n\n\tif containsID && len(uid.(string)) > 256 {\n\t\treturn errors.New(`Data payload key \"uid\" must not be longer than 256 characters`)\n\t}\n\treturn nil\n}\n\nfunc encode(data []byte) string {\n\treturn strings.Replace(base64.URLEncoding.EncodeToString(data), \"=\", \"\", -1)\n}\n\nfunc sign(message, secret string) string {\n\tkey := []byte(secret)\n\th := hmac.New(sha256.New, key)\n\th.Write([]byte(message))\n\treturn encode(h.Sum(nil))\n}\n<|endoftext|>"} {"text":"<commit_before>package gurgling\n\nimport (\n \"net\/http\"\n \"sync\"\n \"io\"\n . \"github.com\/levythu\/gurgling\/definition\"\n \"github.com\/levythu\/gurgling\/encoding\"\n fp \"path\/filepath\"\n MIME \"mime\"\n \"os\"\n)\n\ntype Response interface {\n \/\/ Quick send with code 200. While done, any other operation except Write is not allowed anymore.\n \/\/ However, due to the framework of net\/http, the response will not be closed until\n \/\/ the function returns. So it is suggested to return immediately.\n Send(string) error\n\n \/\/ Set headers.\n Set(string, string) error\n\n \/\/ Get the value in the headers. If nothing, returns \"\".\n Get(string) string\n\n \/\/ Write data to response body. It allows any corresponding operation.\n Write([]byte) (int, error)\n\n \/\/ Send files without any extra headers except contenttype and encrypt.\n \/\/ if contenttype is \"\", it will be inferred from file extension.\n \/\/ if encoder is nil, no encoder is used\n SendFileEx(string, string, encoding.Encoder, int) error\n \/\/ Shorthand for SendFileEx, infer mime, using gzip and return 200.\n SendFile(string) error\n\n \/\/ While done, any other operation except Write is not allowed anymore.\n Status(string, int) error\n\n \/\/ get the Original resonse, only use it for advanced purpose\n R() http.ResponseWriter\n\n \/\/ extra use for midwares. Most of time the value is a function.\n F() map[string]Tout\n}\nfunc NewResponse(w http.ResponseWriter) Response {\n return &OriResponse{\n r: w,\n haveSent: false,\n lock: &sync.Mutex{},\n f: make(map[string]Tout),\n }\n}\n\ntype OriResponse struct {\n \/\/ the Original resonse, only use it for advanced purpose\n r http.ResponseWriter\n \/\/ to guarantee the send action is only triggered once\n haveSent bool\n f map[string]Tout\n\n lock *sync.Mutex\n}\n\nfunc (this *OriResponse)Send(content string) error {\n return this.Status(content, 200)\n}\nfunc (this *OriResponse)Status(content string, code int) error {\n this.lock.Lock()\n defer this.lock.Unlock()\n\n if (this.haveSent) {\n return RES_HEAD_ALREADY_SENT\n }\n this.r.Header().Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n this.r.WriteHeader(code)\n this.haveSent=true\n _, err:=io.WriteString(this.r, content)\n\n return err\n}\nfunc (this *OriResponse)Set(key string, val string) error {\n this.lock.Lock()\n defer this.lock.Unlock()\n\n if (this.haveSent) {\n return RES_HEAD_ALREADY_SENT\n }\n this.r.Header().Set(key, val)\n return nil\n}\nfunc (this *OriResponse)Get(key string) string {\n return this.r.Header().Get(key)\n}\nfunc (this *OriResponse)Write(content []byte) (int, error) {\n return this.r.Write(content)\n}\nfunc (this *OriResponse)R() http.ResponseWriter {\n return this.r\n}\nfunc (this *OriResponse)F() map[string]Tout {\n return this.f\n}\nfunc (this *OriResponse)SendFile(filepath string) error {\n return this.SendFileEx(filepath, \"\", encoding.GZipEncoder, 200)\n}\nfunc (this *OriResponse)SendFileEx(filepath string, mime string, encoder encoding.Encoder, httpCode int) error {\n this.lock.Lock()\n defer this.lock.Unlock()\n if (this.haveSent) {\n return RES_HEAD_ALREADY_SENT\n }\n\n \/\/ prepare file\n var fileHandler, fileErr=os.Open(filepath)\n if fileErr!=nil {\n return SENDFILE_FILEPATH_ERROR\n }\n\n \/\/ init mime\n if mime==\"\" {\n \/\/ infer the content type\n mime=MIME.TypeByExtension(fp.Ext(filepath))\n if mime==\"\" {\n mime=DEFAULT_CONTENT_TYPE\n }\n }\n \/\/ init encoder\n if encoder==nil {\n encoder=encoding.NOEncoder\n }\n var desWriter=encoder.WriterWrapper(this.r)\n if desWriter==nil {\n \/\/ fail to create. return a safe encoder or error? Now returns error.\n return SENDFILE_ENCODER_NOT_READY\n }\n\n this.haveSent=true\n\n \/\/ set headers\n this.r.Header().Set(CONTENT_TYPE_KEY, mime)\n if encoder.ContentEncoding()!=\"\" {\n this.r.Header().Set(CONTENT_ENCODING, encoder.ContentEncoding())\n }\n this.r.Header().Set(TRANSFER_ENCODING, CHUNCKED_TRANSFER_ENCODING)\n\n this.r.WriteHeader(httpCode)\n\n \/\/ trnsfer file\n _, copyError:=io.Copy(desWriter, fileHandler)\n desWriter.Close()\n fileHandler.Close()\n \/\/ No need to close request writer. There's no such an interface.\n if copyError!=nil {\n return SENDFILE_SENT_BUT_ABORT\n }\n\n return nil\n}\n<commit_msg>bugs fix<commit_after>package gurgling\n\nimport (\n \"net\/http\"\n \"sync\"\n \"io\"\n . \"github.com\/levythu\/gurgling\/definition\"\n \"github.com\/levythu\/gurgling\/encoding\"\n fp \"path\/filepath\"\n MIME \"mime\"\n \"os\"\n)\n\ntype Response interface {\n \/\/ Quick send with code 200. While done, any other operation except Write is not allowed anymore.\n \/\/ However, due to the framework of net\/http, the response will not be closed until\n \/\/ the function returns. So it is suggested to return immediately.\n Send(string) error\n\n \/\/ Set headers.\n Set(string, string) error\n\n \/\/ Get the value in the headers. If nothing, returns \"\".\n Get(string) string\n\n \/\/ Write data to response body. It allows any corresponding operation.\n Write([]byte) (int, error)\n\n \/\/ Send files without any extra headers except contenttype and encrypt.\n \/\/ if contenttype is \"\", it will be inferred from file extension.\n \/\/ if encoder is nil, no encoder is used\n SendFileEx(string, string, encoding.Encoder, int) error\n \/\/ Shorthand for SendFileEx, infer mime, using gzip and return 200.\n SendFile(string) error\n\n \/\/ While done, any other operation except Write is not allowed anymore.\n Status(string, int) error\n\n \/\/ get the Original resonse, only use it for advanced purpose\n R() http.ResponseWriter\n\n \/\/ extra use for midwares. Most of time the value is a function.\n F() map[string]Tout\n}\nfunc NewResponse(w http.ResponseWriter) Response {\n return &OriResponse{\n r: w,\n haveSent: false,\n lock: &sync.Mutex{},\n f: make(map[string]Tout),\n }\n}\n\ntype OriResponse struct {\n \/\/ the Original resonse, only use it for advanced purpose\n r http.ResponseWriter\n \/\/ to guarantee the send action is only triggered once\n haveSent bool\n f map[string]Tout\n\n lock *sync.Mutex\n}\n\nfunc (this *OriResponse)Send(content string) error {\n return this.Status(content, 200)\n}\nfunc (this *OriResponse)Status(content string, code int) error {\n this.lock.Lock()\n defer this.lock.Unlock()\n\n if (this.haveSent) {\n return RES_HEAD_ALREADY_SENT\n }\n this.r.Header().Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n this.r.WriteHeader(code)\n this.haveSent=true\n _, err:=io.WriteString(this.r, content)\n\n return err\n}\nfunc (this *OriResponse)Set(key string, val string) error {\n this.lock.Lock()\n defer this.lock.Unlock()\n\n if (this.haveSent) {\n return RES_HEAD_ALREADY_SENT\n }\n this.r.Header().Set(key, val)\n return nil\n}\nfunc (this *OriResponse)Get(key string) string {\n return this.r.Header().Get(key)\n}\nfunc (this *OriResponse)Write(content []byte) (int, error) {\n return this.r.Write(content)\n}\nfunc (this *OriResponse)R() http.ResponseWriter {\n return this.r\n}\nfunc (this *OriResponse)F() map[string]Tout {\n return this.f\n}\nfunc (this *OriResponse)SendFile(filepath string) error {\n return this.SendFileEx(filepath, \"\", encoding.GZipEncoder, 200)\n}\nfunc (this *OriResponse)SendFileEx(filepath string, mime string, encoder encoding.Encoder, httpCode int) error {\n this.lock.Lock()\n defer this.lock.Unlock()\n if (this.haveSent) {\n return RES_HEAD_ALREADY_SENT\n }\n\n \/\/ prepare file\n var fileHandler, fileErr=os.Open(filepath)\n if fileErr!=nil {\n return SENDFILE_FILEPATH_ERROR\n }\n\n \/\/ init mime\n if mime==\"\" {\n \/\/ infer the content type\n mime=MIME.TypeByExtension(fp.Ext(filepath))\n if mime==\"\" {\n mime=DEFAULT_CONTENT_TYPE\n }\n }\n \/\/ init encoder\n if encoder==nil {\n encoder=encoding.NOEncoder\n }\n var desWriter=encoder.WriterWrapper(this.r)\n if desWriter==nil {\n \/\/ fail to create. return a safe encoder or error? Now returns error.\n return SENDFILE_ENCODER_NOT_READY\n }\n\n this.haveSent=true\n\n \/\/ set headers\n this.r.Header().Set(CONTENT_TYPE_KEY, mime)\n if encoder.ContentEncoding()!=\"\" {\n this.r.Header().Set(CONTENT_ENCODING, encoder.ContentEncoding())\n }\n\n this.r.WriteHeader(httpCode)\n\n \/\/ trnsfer file\n _, copyError:=io.Copy(desWriter, fileHandler)\n desWriter.Close()\n fileHandler.Close()\n \/\/ No need to close request writer. There's no such an interface.\n if copyError!=nil {\n return SENDFILE_SENT_BUT_ABORT\n }\n\n return nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The etcd-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage v1beta2\n\nimport (\n\t\"errors\"\n\t\"strings\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\nconst (\n\tdefaultRepository = \"quay.io\/coreos\/etcd\"\n\tDefaultEtcdVersion = \"3.2.13\"\n)\n\nvar (\n\t\/\/ TODO: move validation code into separate package.\n\tErrBackupUnsetRestoreSet = errors.New(\"spec: backup policy must be set if restore policy is set\")\n)\n\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\n\/\/ EtcdClusterList is a list of etcd clusters.\ntype EtcdClusterList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\t\/\/ Standard list metadata\n\t\/\/ More info: http:\/\/releases.k8s.io\/HEAD\/docs\/devel\/api-conventions.md#metadata\n\tmetav1.ListMeta `json:\"metadata,omitempty\"`\n\tItems []EtcdCluster `json:\"items\"`\n}\n\n\/\/ +genclient\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\ntype EtcdCluster struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata,omitempty\"`\n\tSpec ClusterSpec `json:\"spec\"`\n\tStatus ClusterStatus `json:\"status\"`\n}\n\nfunc (c *EtcdCluster) AsOwner() metav1.OwnerReference {\n\ttrueVar := true\n\treturn metav1.OwnerReference{\n\t\tAPIVersion: SchemeGroupVersion.String(),\n\t\tKind: EtcdClusterResourceKind,\n\t\tName: c.Name,\n\t\tUID: c.UID,\n\t\tController: &trueVar,\n\t}\n}\n\ntype ClusterSpec struct {\n\t\/\/ Size is the expected size of the etcd cluster.\n\t\/\/ The etcd-operator will eventually make the size of the running\n\t\/\/ cluster equal to the expected size.\n\t\/\/ The vaild range of the size is from 1 to 7.\n\tSize int `json:\"size\"`\n\t\/\/ Repository is the name of the repository that hosts\n\t\/\/ etcd container images. It should be direct clone of the repository in official\n\t\/\/ release:\n\t\/\/ https:\/\/github.com\/coreos\/etcd\/releases\n\t\/\/ That means, it should have exact same tags and the same meaning for the tags.\n\t\/\/\n\t\/\/ By default, it is `quay.io\/coreos\/etcd`.\n\tRepository string `json:\"repository,omitempty\"`\n\t\/\/ **DEPRECATED**. Use Repository instead.\n\t\/\/ TODO: remove this field in v0.7.2 .\n\tBaseImage string `json:\"baseImage,omitempty\"`\n\n\t\/\/ Version is the expected version of the etcd cluster.\n\t\/\/ The etcd-operator will eventually make the etcd cluster version\n\t\/\/ equal to the expected version.\n\t\/\/\n\t\/\/ The version must follow the [semver]( http:\/\/semver.org) format, for example \"3.2.11\".\n\t\/\/ Only etcd released versions are supported: https:\/\/github.com\/coreos\/etcd\/releases\n\t\/\/\n\t\/\/ If version is not set, default is \"3.2.13\".\n\tVersion string `json:\"version,omitempty\"`\n\n\t\/\/ Paused is to pause the control of the operator for the etcd cluster.\n\tPaused bool `json:\"paused,omitempty\"`\n\n\t\/\/ Pod defines the policy to create pod for the etcd pod.\n\t\/\/\n\t\/\/ Updating Pod does not take effect on any existing etcd pods.\n\tPod *PodPolicy `json:\"pod,omitempty\"`\n\n\t\/\/ SelfHosted determines if the etcd cluster is used for a self-hosted\n\t\/\/ Kubernetes cluster.\n\t\/\/\n\t\/\/ SelfHosted is a cluster initialization configuration. It cannot be updated.\n\tSelfHosted *SelfHostedPolicy `json:\"selfHosted,omitempty\"`\n\n\t\/\/ etcd cluster TLS configuration\n\tTLS *TLSPolicy `json:\"TLS,omitempty\"`\n}\n\n\/\/ PodPolicy defines the policy to create pod for the etcd container.\ntype PodPolicy struct {\n\t\/\/ Labels specifies the labels to attach to pods the operator creates for the\n\t\/\/ etcd cluster.\n\t\/\/ \"app\" and \"etcd_*\" labels are reserved for the internal use of the etcd operator.\n\t\/\/ Do not overwrite them.\n\tLabels map[string]string `json:\"labels,omitempty\"`\n\n\t\/\/ NodeSelector specifies a map of key-value pairs. For the pod to be eligible\n\t\/\/ to run on a node, the node must have each of the indicated key-value pairs as\n\t\/\/ labels.\n\tNodeSelector map[string]string `json:\"nodeSelector,omitempty\"`\n\n\t\/\/ The scheduling constraints on etcd pods.\n\tAffinity *v1.Affinity `json:\"affinity,omitempty\"`\n\t\/\/ **DEPRECATED**. Use Affinity instead.\n\tAntiAffinity bool `json:\"antiAffinity,omitempty\"`\n\n\t\/\/ Resources is the resource requirements for the etcd container.\n\t\/\/ This field cannot be updated once the cluster is created.\n\tResources v1.ResourceRequirements `json:\"resources,omitempty\"`\n\n\t\/\/ Tolerations specifies the pod's tolerations.\n\tTolerations []v1.Toleration `json:\"tolerations,omitempty\"`\n\n\t\/\/ List of environment variables to set in the etcd container.\n\t\/\/ This is used to configure etcd process. etcd cluster cannot be created, when\n\t\/\/ bad environement variables are provided. Do not overwrite any flags used to\n\t\/\/ bootstrap the cluster (for example `--initial-cluster` flag).\n\t\/\/ This field cannot be updated.\n\tEtcdEnv []v1.EnvVar `json:\"etcdEnv,omitempty\"`\n}\n\nfunc (c *ClusterSpec) Validate() error {\n\tif c.TLS != nil {\n\t\tif err := c.TLS.Validate(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif c.Pod != nil {\n\t\tfor k := range c.Pod.Labels {\n\t\t\tif k == \"app\" || strings.HasPrefix(k, \"etcd_\") {\n\t\t\t\treturn errors.New(\"spec: pod labels contains reserved label\")\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ SetDefaults cleans up user passed spec, e.g. defaulting, transforming fields.\n\/\/ TODO: move this to admission controller\nfunc (e *EtcdCluster) SetDefaults() {\n\tc := &e.Spec\n\tif len(c.Repository) == 0 {\n\t\tif len(c.BaseImage) != 0 {\n\t\t\tc.Repository = c.BaseImage\n\t\t} else {\n\t\t\tc.Repository = defaultRepository\n\t\t}\n\t}\n\n\tif len(c.Version) == 0 {\n\t\tc.Version = DefaultEtcdVersion\n\t}\n\n\tc.Version = strings.TrimLeft(c.Version, \"v\")\n\n\t\/\/ convert PodPolicy.AntiAffinity to Pod.Affinity.PodAntiAffinity\n\t\/\/ TODO: Remove this once PodPolicy.AntiAffinity is removed\n\tif c.Pod != nil && c.Pod.AntiAffinity && c.Pod.Affinity == nil {\n\t\tc.Pod.Affinity = &v1.Affinity{\n\t\t\tPodAntiAffinity: &v1.PodAntiAffinity{\n\t\t\t\tRequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{\n\t\t\t\t\t{\n\t\t\t\t\t\t\/\/ set anti-affinity to the etcd pods that belongs to the same cluster\n\t\t\t\t\t\tLabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{\n\t\t\t\t\t\t\t\"etcd_cluster\": e.Name,\n\t\t\t\t\t\t}},\n\t\t\t\t\t\tTopologyKey: \"kubernetes.io\/hostname\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n}\n<commit_msg>pkg\/apis: change usage of 3.2.11 to 3.2.13 in comments<commit_after>\/\/ Copyright 2016 The etcd-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage v1beta2\n\nimport (\n\t\"errors\"\n\t\"strings\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\nconst (\n\tdefaultRepository = \"quay.io\/coreos\/etcd\"\n\tDefaultEtcdVersion = \"3.2.13\"\n)\n\nvar (\n\t\/\/ TODO: move validation code into separate package.\n\tErrBackupUnsetRestoreSet = errors.New(\"spec: backup policy must be set if restore policy is set\")\n)\n\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\n\/\/ EtcdClusterList is a list of etcd clusters.\ntype EtcdClusterList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\t\/\/ Standard list metadata\n\t\/\/ More info: http:\/\/releases.k8s.io\/HEAD\/docs\/devel\/api-conventions.md#metadata\n\tmetav1.ListMeta `json:\"metadata,omitempty\"`\n\tItems []EtcdCluster `json:\"items\"`\n}\n\n\/\/ +genclient\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\ntype EtcdCluster struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata,omitempty\"`\n\tSpec ClusterSpec `json:\"spec\"`\n\tStatus ClusterStatus `json:\"status\"`\n}\n\nfunc (c *EtcdCluster) AsOwner() metav1.OwnerReference {\n\ttrueVar := true\n\treturn metav1.OwnerReference{\n\t\tAPIVersion: SchemeGroupVersion.String(),\n\t\tKind: EtcdClusterResourceKind,\n\t\tName: c.Name,\n\t\tUID: c.UID,\n\t\tController: &trueVar,\n\t}\n}\n\ntype ClusterSpec struct {\n\t\/\/ Size is the expected size of the etcd cluster.\n\t\/\/ The etcd-operator will eventually make the size of the running\n\t\/\/ cluster equal to the expected size.\n\t\/\/ The vaild range of the size is from 1 to 7.\n\tSize int `json:\"size\"`\n\t\/\/ Repository is the name of the repository that hosts\n\t\/\/ etcd container images. It should be direct clone of the repository in official\n\t\/\/ release:\n\t\/\/ https:\/\/github.com\/coreos\/etcd\/releases\n\t\/\/ That means, it should have exact same tags and the same meaning for the tags.\n\t\/\/\n\t\/\/ By default, it is `quay.io\/coreos\/etcd`.\n\tRepository string `json:\"repository,omitempty\"`\n\t\/\/ **DEPRECATED**. Use Repository instead.\n\t\/\/ TODO: remove this field in v0.7.2 .\n\tBaseImage string `json:\"baseImage,omitempty\"`\n\n\t\/\/ Version is the expected version of the etcd cluster.\n\t\/\/ The etcd-operator will eventually make the etcd cluster version\n\t\/\/ equal to the expected version.\n\t\/\/\n\t\/\/ The version must follow the [semver]( http:\/\/semver.org) format, for example \"3.2.13\".\n\t\/\/ Only etcd released versions are supported: https:\/\/github.com\/coreos\/etcd\/releases\n\t\/\/\n\t\/\/ If version is not set, default is \"3.2.13\".\n\tVersion string `json:\"version,omitempty\"`\n\n\t\/\/ Paused is to pause the control of the operator for the etcd cluster.\n\tPaused bool `json:\"paused,omitempty\"`\n\n\t\/\/ Pod defines the policy to create pod for the etcd pod.\n\t\/\/\n\t\/\/ Updating Pod does not take effect on any existing etcd pods.\n\tPod *PodPolicy `json:\"pod,omitempty\"`\n\n\t\/\/ SelfHosted determines if the etcd cluster is used for a self-hosted\n\t\/\/ Kubernetes cluster.\n\t\/\/\n\t\/\/ SelfHosted is a cluster initialization configuration. It cannot be updated.\n\tSelfHosted *SelfHostedPolicy `json:\"selfHosted,omitempty\"`\n\n\t\/\/ etcd cluster TLS configuration\n\tTLS *TLSPolicy `json:\"TLS,omitempty\"`\n}\n\n\/\/ PodPolicy defines the policy to create pod for the etcd container.\ntype PodPolicy struct {\n\t\/\/ Labels specifies the labels to attach to pods the operator creates for the\n\t\/\/ etcd cluster.\n\t\/\/ \"app\" and \"etcd_*\" labels are reserved for the internal use of the etcd operator.\n\t\/\/ Do not overwrite them.\n\tLabels map[string]string `json:\"labels,omitempty\"`\n\n\t\/\/ NodeSelector specifies a map of key-value pairs. For the pod to be eligible\n\t\/\/ to run on a node, the node must have each of the indicated key-value pairs as\n\t\/\/ labels.\n\tNodeSelector map[string]string `json:\"nodeSelector,omitempty\"`\n\n\t\/\/ The scheduling constraints on etcd pods.\n\tAffinity *v1.Affinity `json:\"affinity,omitempty\"`\n\t\/\/ **DEPRECATED**. Use Affinity instead.\n\tAntiAffinity bool `json:\"antiAffinity,omitempty\"`\n\n\t\/\/ Resources is the resource requirements for the etcd container.\n\t\/\/ This field cannot be updated once the cluster is created.\n\tResources v1.ResourceRequirements `json:\"resources,omitempty\"`\n\n\t\/\/ Tolerations specifies the pod's tolerations.\n\tTolerations []v1.Toleration `json:\"tolerations,omitempty\"`\n\n\t\/\/ List of environment variables to set in the etcd container.\n\t\/\/ This is used to configure etcd process. etcd cluster cannot be created, when\n\t\/\/ bad environement variables are provided. Do not overwrite any flags used to\n\t\/\/ bootstrap the cluster (for example `--initial-cluster` flag).\n\t\/\/ This field cannot be updated.\n\tEtcdEnv []v1.EnvVar `json:\"etcdEnv,omitempty\"`\n}\n\nfunc (c *ClusterSpec) Validate() error {\n\tif c.TLS != nil {\n\t\tif err := c.TLS.Validate(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif c.Pod != nil {\n\t\tfor k := range c.Pod.Labels {\n\t\t\tif k == \"app\" || strings.HasPrefix(k, \"etcd_\") {\n\t\t\t\treturn errors.New(\"spec: pod labels contains reserved label\")\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ SetDefaults cleans up user passed spec, e.g. defaulting, transforming fields.\n\/\/ TODO: move this to admission controller\nfunc (e *EtcdCluster) SetDefaults() {\n\tc := &e.Spec\n\tif len(c.Repository) == 0 {\n\t\tif len(c.BaseImage) != 0 {\n\t\t\tc.Repository = c.BaseImage\n\t\t} else {\n\t\t\tc.Repository = defaultRepository\n\t\t}\n\t}\n\n\tif len(c.Version) == 0 {\n\t\tc.Version = DefaultEtcdVersion\n\t}\n\n\tc.Version = strings.TrimLeft(c.Version, \"v\")\n\n\t\/\/ convert PodPolicy.AntiAffinity to Pod.Affinity.PodAntiAffinity\n\t\/\/ TODO: Remove this once PodPolicy.AntiAffinity is removed\n\tif c.Pod != nil && c.Pod.AntiAffinity && c.Pod.Affinity == nil {\n\t\tc.Pod.Affinity = &v1.Affinity{\n\t\t\tPodAntiAffinity: &v1.PodAntiAffinity{\n\t\t\t\tRequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{\n\t\t\t\t\t{\n\t\t\t\t\t\t\/\/ set anti-affinity to the etcd pods that belongs to the same cluster\n\t\t\t\t\t\tLabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{\n\t\t\t\t\t\t\t\"etcd_cluster\": e.Name,\n\t\t\t\t\t\t}},\n\t\t\t\t\t\tTopologyKey: \"kubernetes.io\/hostname\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package echo\n\nimport (\n\t\"bufio\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\n\t\"github.com\/labstack\/gommon\/color\"\n)\n\ntype (\n\tResponse struct {\n\t\twriter http.ResponseWriter\n\t\tstatus int\n\t\tsize int64\n\t\tcommitted bool\n\t}\n)\n\nfunc NewResponse(w http.ResponseWriter) *Response {\n\treturn &Response{writer: w}\n}\n\nfunc (r *Response) SetWriter(w http.ResponseWriter) {\n\tr.writer = w\n}\n\nfunc (r *Response) Header() http.Header {\n\treturn r.writer.Header()\n}\n\nfunc (r *Response) Writer() http.ResponseWriter {\n\treturn r.writer\n}\n\nfunc (r *Response) WriteHeader(code int) {\n\tif r.committed {\n\t\t\/\/ TODO: Warning\n\t\tlog.Printf(\"echo => %s\", color.Yellow(\"response already committed\"))\n\t\treturn\n\t}\n\tr.status = code\n\tr.writer.WriteHeader(code)\n\tr.committed = true\n}\n\nfunc (r *Response) Write(b []byte) (n int, err error) {\n\tn, err = r.writer.Write(b)\n\tr.size += int64(n)\n\treturn n, err\n}\n\n\/\/ Flush wraps response writer's Flush function.\nfunc (r *Response) Flush() {\n\tr.writer.(http.Flusher).Flush()\n}\n\n\/\/ Hijack wraps response writer's Hijack function.\nfunc (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn r.writer.(http.Hijacker).Hijack()\n}\n\n\/\/ CloseNotify wraps response writer's CloseNotify function.\nfunc (r *Response) CloseNotify() <-chan bool {\n\treturn r.writer.(http.CloseNotifier).CloseNotify()\n}\n\nfunc (r *Response) Status() int {\n\treturn r.status\n}\n\nfunc (r *Response) Size() int64 {\n\treturn r.size\n}\n\nfunc (r *Response) reset(w http.ResponseWriter) {\n\tr.writer = w\n\tr.size = 0\n\tr.status = http.StatusOK\n\tr.committed = false\n}\n\n\/\/func (r *Response) clear() {\n\/\/\tr.Header().Del(ContentType)\n\/\/\tr.committed = false\n\/\/}\n<commit_msg>adding response.committed getter<commit_after>package echo\n\nimport (\n\t\"bufio\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\n\t\"github.com\/labstack\/gommon\/color\"\n)\n\ntype (\n\tResponse struct {\n\t\twriter http.ResponseWriter\n\t\tstatus int\n\t\tsize int64\n\t\tcommitted bool\n\t}\n)\n\nfunc NewResponse(w http.ResponseWriter) *Response {\n\treturn &Response{writer: w}\n}\n\nfunc (r *Response) SetWriter(w http.ResponseWriter) {\n\tr.writer = w\n}\n\nfunc (r *Response) Header() http.Header {\n\treturn r.writer.Header()\n}\n\nfunc (r *Response) Writer() http.ResponseWriter {\n\treturn r.writer\n}\n\nfunc (r *Response) WriteHeader(code int) {\n\tif r.committed {\n\t\t\/\/ TODO: Warning\n\t\tlog.Printf(\"echo => %s\", color.Yellow(\"response already committed\"))\n\t\treturn\n\t}\n\tr.status = code\n\tr.writer.WriteHeader(code)\n\tr.committed = true\n}\n\nfunc (r *Response) Write(b []byte) (n int, err error) {\n\tn, err = r.writer.Write(b)\n\tr.size += int64(n)\n\treturn n, err\n}\n\n\/\/ Flush wraps response writer's Flush function.\nfunc (r *Response) Flush() {\n\tr.writer.(http.Flusher).Flush()\n}\n\n\/\/ Hijack wraps response writer's Hijack function.\nfunc (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn r.writer.(http.Hijacker).Hijack()\n}\n\n\/\/ CloseNotify wraps response writer's CloseNotify function.\nfunc (r *Response) CloseNotify() <-chan bool {\n\treturn r.writer.(http.CloseNotifier).CloseNotify()\n}\n\nfunc (r *Response) Status() int {\n\treturn r.status\n}\n\nfunc (r *Response) Size() int64 {\n\treturn r.size\n}\n\nfunc (r *Response) Committed() bool {\n\treturn r.committed\n}\n\nfunc (r *Response) reset(w http.ResponseWriter) {\n\tr.writer = w\n\tr.size = 0\n\tr.status = http.StatusOK\n\tr.committed = false\n}\n\n\/\/func (r *Response) clear() {\n\/\/\tr.Header().Del(ContentType)\n\/\/\tr.committed = false\n\/\/}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage capabilities\n\nimport (\n\t\"sync\"\n)\n\n\/\/ Capabilities defines the set of capabilities available within the system.\n\/\/ For now these are global. Eventually they may be per-user\ntype Capabilities struct {\n\tAllowPrivileged bool\n\n\t\/\/ List of pod sources for which using host network is allowed.\n\tHostNetworkSources []string\n}\n\nvar once sync.Once\nvar capabilities *Capabilities\n\n\/\/ Initialize the capability set. This can only be done once per binary, subsequent calls are ignored.\nfunc Initialize(c Capabilities) {\n\t\/\/ Only do this once\n\tonce.Do(func() {\n\t\tcapabilities = &c\n\t})\n}\n\n\/\/ Setup the capability set. It wraps Initialize for improving usibility.\nfunc Setup(allowPrivileged bool, hostNetworkSources []string) {\n\tInitialize(Capabilities{\n\t\tAllowPrivileged: allowPrivileged,\n\t\tHostNetworkSources: hostNetworkSources,\n\t})\n}\n\n\/\/ SetCapabilitiesForTests. Convenience method for testing. This should only be called from tests.\nfunc SetForTests(c Capabilities) {\n\tcapabilities = &c\n}\n\n\/\/ Returns a read-only copy of the system capabilities.\nfunc Get() Capabilities {\n\tif capabilities == nil {\n\t\tInitialize(Capabilities{\n\t\t\tAllowPrivileged: false,\n\t\t\tHostNetworkSources: []string{},\n\t\t})\n\t}\n\treturn *capabilities\n}\n<commit_msg>Fix benign data race in pod workers.<commit_after>\/*\nCopyright 2014 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage capabilities\n\nimport (\n\t\"sync\"\n)\n\n\/\/ Capabilities defines the set of capabilities available within the system.\n\/\/ For now these are global. Eventually they may be per-user\ntype Capabilities struct {\n\tAllowPrivileged bool\n\n\t\/\/ List of pod sources for which using host network is allowed.\n\tHostNetworkSources []string\n}\n\n\/\/ TODO: Clean these up into a singleton\nvar once sync.Once\nvar lock sync.Mutex\nvar capabilities *Capabilities\n\n\/\/ Initialize the capability set. This can only be done once per binary, subsequent calls are ignored.\nfunc Initialize(c Capabilities) {\n\t\/\/ Only do this once\n\tonce.Do(func() {\n\t\tcapabilities = &c\n\t})\n}\n\n\/\/ Setup the capability set. It wraps Initialize for improving usibility.\nfunc Setup(allowPrivileged bool, hostNetworkSources []string) {\n\tInitialize(Capabilities{\n\t\tAllowPrivileged: allowPrivileged,\n\t\tHostNetworkSources: hostNetworkSources,\n\t})\n}\n\n\/\/ SetCapabilitiesForTests. Convenience method for testing. This should only be called from tests.\nfunc SetForTests(c Capabilities) {\n\tlock.Lock()\n\tdefer lock.Unlock()\n\tcapabilities = &c\n}\n\n\/\/ Returns a read-only copy of the system capabilities.\nfunc Get() Capabilities {\n\tlock.Lock()\n\tdefer lock.Unlock()\n\t\/\/ This check prevents clobbering of capabilities that might've been set via SetForTests\n\tif capabilities == nil {\n\t\tInitialize(Capabilities{\n\t\t\tAllowPrivileged: false,\n\t\t\tHostNetworkSources: []string{},\n\t\t})\n\t}\n\treturn *capabilities\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"). You may\n\/\/ not use this file except in compliance with the License. A copy of the\n\/\/ License is located at\n\/\/\n\/\/ http:\/\/aws.amazon.com\/apache2.0\/\n\/\/\n\/\/ or in the \"license\" file accompanying this file. This file is distributed\n\/\/ on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n\/\/ express or implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\npackage capabilities\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\t\/\/ Command is the option for the plugin to print version\n\tCommand = \"capabilities\"\n\t\/\/ TaskENICapability is the capability to support basic task-eni network mode\n\tTaskENICapability = \"awsvpc-network-mode\"\n)\n\n\/\/ Capability indicates the capability of a plugin\ntype Capability struct {\n\tCapabilities []string `json:\"capabilities,omitempty\"`\n}\n\n\/\/ New returns a Capability object with specified capabilities\nfunc New(capabilities ...string) *Capability {\n\treturn &Capability{\n\t\tCapabilities: capabilities,\n\t}\n}\n\n\/\/ String returns the JSON string of the Capability struct\nfunc (cap *Capability) String() (string, error) {\n\tdata, err := json.Marshal(cap)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"capabilities: failed to marshal capabilities info: %v\", cap.Capabilities)\n\t}\n\n\treturn string(data), nil\n}\n\n\/\/ Print writes the supported capabilities info into the stdout\nfunc (cap *Capability) Print() error {\n\tinfo, err := cap.String()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(info)\n\treturn nil\n}\n<commit_msg>pkg\/capabilities: Rename cap to non built-in name 'capability'<commit_after>\/\/ Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"). You may\n\/\/ not use this file except in compliance with the License. A copy of the\n\/\/ License is located at\n\/\/\n\/\/ http:\/\/aws.amazon.com\/apache2.0\/\n\/\/\n\/\/ or in the \"license\" file accompanying this file. This file is distributed\n\/\/ on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n\/\/ express or implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\npackage capabilities\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\t\/\/ Command is the option for the plugin to print version\n\tCommand = \"capabilities\"\n\t\/\/ TaskENICapability is the capability to support basic task-eni network mode\n\tTaskENICapability = \"awsvpc-network-mode\"\n)\n\n\/\/ Capability indicates the capability of a plugin\ntype Capability struct {\n\tCapabilities []string `json:\"capabilities,omitempty\"`\n}\n\n\/\/ New returns a Capability object with specified capabilities\nfunc New(capabilities ...string) *Capability {\n\treturn &Capability{\n\t\tCapabilities: capabilities,\n\t}\n}\n\n\/\/ String returns the JSON string of the Capability struct\nfunc (capability *Capability) String() (string, error) {\n\tdata, err := json.Marshal(capability)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"capabilities: failed to marshal capabilities info: %v\", capability.Capabilities)\n\t}\n\n\treturn string(data), nil\n}\n\n\/\/ Print writes the supported capabilities info into the stdout\nfunc (capability *Capability) Print() error {\n\tinfo, err := capability.String()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(info)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package gccxml\n\n\/\/\n\/\/ static int _go_gccxml_char_is_signed()\n\/\/ {\n\/\/ char c = 255;\n\/\/ if (c > 128) {\n\/\/ return 0;\n\/\/ }\n\/\/ return 1;\n\/\/ }\nimport \"C\"\n\nimport (\n\t\"bitbucket.org\/binet\/go-cxxdict\/pkg\/cxxtypes\"\n)\n\nfunc gccxml_get_char_type() cxxtypes.TypeKind {\n\n\tchar_tk := cxxtypes.TK_Char_S\n\tif C._go_gccxml_char_is_signed() == C.int(0) {\n\t\tchar_tk = cxxtypes.TK_Char_U\n\t}\n\n\treturn char_tk\n}\n\n\/\/ EOF\n<commit_msg>use CHAR_MIN to detect if char is signed or not<commit_after>package gccxml\n\n\/\/ #include <limits.h>\n\/\/ static int _go_gccxml_char_is_signed()\n\/\/ {\n\/\/ return CHAR_MIN < 0;\n\/\/ }\nimport \"C\"\n\nimport (\n\t\"bitbucket.org\/binet\/go-cxxdict\/pkg\/cxxtypes\"\n)\n\nfunc gccxml_get_char_type() cxxtypes.TypeKind {\n\n\tchar_tk := cxxtypes.TK_Char_S\n\tif C._go_gccxml_char_is_signed() == C.int(0) {\n\t\tchar_tk = cxxtypes.TK_Char_U\n\t}\n\n\treturn char_tk\n}\n\n\/\/ EOF\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage metrics\n\nimport (\n\t\"bufio\"\n\t\/\/\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tutilnet \"k8s.io\/apimachinery\/pkg\/util\/net\"\n\t\/\/utilruntime \"k8s.io\/apimachinery\/pkg\/util\/runtime\"\n\n\t\"github.com\/emicklei\/go-restful\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n)\n\nvar (\n\t\/\/ TODO(a-robinson): Add unit tests for the handling of these metrics once\n\t\/\/ the upstream library supports it.\n\trequestCounter = prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"apiserver_request_count\",\n\t\t\tHelp: \"Counter of apiserver requests broken out for each verb, API resource, client, and HTTP response contentType and code.\",\n\t\t},\n\t\t[]string{\"verb\", \"resource\", \"subresource\", \"scope\", \"client\", \"contentType\", \"code\"},\n\t)\n\trequestLatencies = prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tName: \"apiserver_request_latencies\",\n\t\t\tHelp: \"Response latency distribution in microseconds for each verb, resource and subresource.\",\n\t\t\t\/\/ Use buckets ranging from 125 ms to 8 seconds.\n\t\t\tBuckets: prometheus.ExponentialBuckets(125000, 2.0, 7),\n\t\t},\n\t\t[]string{\"verb\", \"resource\", \"subresource\", \"scope\"},\n\t)\n\trequestLatenciesSummary = prometheus.NewSummaryVec(\n\t\tprometheus.SummaryOpts{\n\t\t\tName: \"apiserver_request_latencies_summary\",\n\t\t\tHelp: \"Response latency summary in microseconds for each verb, resource and subresource.\",\n\t\t\t\/\/ Make the sliding window of 1h.\n\t\t\tMaxAge: time.Hour,\n\t\t},\n\t\t[]string{\"verb\", \"resource\", \"subresource\", \"scope\"},\n\t)\n\tresponseSizes = prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tName: \"apiserver_response_sizes\",\n\t\t\tHelp: \"Response size distribution in bytes for each verb, resource, subresource and scope (namespace\/cluster).\",\n\t\t\t\/\/ Use buckets ranging from 1000 bytes (1KB) to 10^9 bytes (1GB).\n\t\t\tBuckets: prometheus.ExponentialBuckets(1000, 10.0, 7),\n\t\t},\n\t\t[]string{\"verb\", \"resource\", \"subresource\", \"scope\"},\n\t)\n\tkubectlExeRegexp = regexp.MustCompile(`^.*((?i:kubectl\\.exe))`)\n)\n\n\/\/ Register all metrics.\nfunc Register() {\n\tprometheus.MustRegister(requestCounter)\n\tprometheus.MustRegister(requestLatencies)\n\tprometheus.MustRegister(requestLatenciesSummary)\n\tprometheus.MustRegister(responseSizes)\n}\n\n\/\/ Monitor records a request to the apiserver endpoints that follow the Kubernetes API conventions. verb must be\n\/\/ uppercase to be backwards compatible with existing monitoring tooling.\nfunc Monitor(verb, resource, subresource, scope, client, contentType string, httpCode, respSize int, reqStart time.Time) {\n\telapsed := float64((time.Since(reqStart)) \/ time.Microsecond)\n\trequestCounter.WithLabelValues(verb, resource, subresource, scope, client, contentType, codeToString(httpCode)).Inc()\n\trequestLatencies.WithLabelValues(verb, resource, subresource, scope).Observe(elapsed)\n\trequestLatenciesSummary.WithLabelValues(verb, resource, subresource, scope).Observe(elapsed)\n\t\/\/ We are only interested in response sizes of read requests.\n\tif verb == \"GET\" || verb == \"LIST\" {\n\t\tresponseSizes.WithLabelValues(verb, resource, subresource, scope).Observe(float64(respSize))\n\t}\n}\n\n\/\/ MonitorRequest handles standard transformations for client and the reported verb and then invokes Monitor to record\n\/\/ a request. verb must be uppercase to be backwards compatible with existing monitoring tooling.\nfunc MonitorRequest(request *http.Request, verb, resource, subresource, scope, contentType string, httpCode, respSize int, reqStart time.Time) {\n\treportedVerb := verb\n\tif verb == \"LIST\" {\n\t\t\/\/ see apimachinery\/pkg\/runtime\/conversion.go Convert_Slice_string_To_bool\n\t\tif values := request.URL.Query()[\"watch\"]; len(values) > 0 {\n\t\t\tif value := strings.ToLower(values[0]); value != \"0\" && value != \"false\" {\n\t\t\t\treportedVerb = \"WATCH\"\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ normalize the legacy WATCHLIST to WATCH to ensure users aren't surprised by metrics\n\tif verb == \"WATCHLIST\" {\n\t\treportedVerb = \"WATCH\"\n\t}\n\n\tclient := cleanUserAgent(utilnet.GetHTTPClient(request))\n\tMonitor(reportedVerb, resource, subresource, scope, client, contentType, httpCode, respSize, reqStart)\n}\n\nfunc Reset() {\n\trequestCounter.Reset()\n\trequestLatencies.Reset()\n\trequestLatenciesSummary.Reset()\n\tresponseSizes.Reset()\n}\n\n\/\/ InstrumentRouteFunc works like Prometheus' InstrumentHandlerFunc but wraps\n\/\/ the go-restful RouteFunction instead of a HandlerFunc\nfunc InstrumentRouteFunc(verb, resource, subresource, scope string, routeFunc restful.RouteFunction) restful.RouteFunction {\n\treturn restful.RouteFunction(func(request *restful.Request, response *restful.Response) {\n\t\tnow := time.Now()\n\n\t\tdelegate := &ResponseWriterDelegator{ResponseWriter: response.ResponseWriter}\n\n\t\t_, cn := response.ResponseWriter.(http.CloseNotifier)\n\t\t_, fl := response.ResponseWriter.(http.Flusher)\n\t\t_, hj := response.ResponseWriter.(http.Hijacker)\n\t\tvar rw http.ResponseWriter\n\t\tif cn && fl && hj {\n\t\t\trw = &fancyResponseWriterDelegator{delegate}\n\t\t} else {\n\t\t\trw = delegate\n\t\t}\n\t\tresponse.ResponseWriter = rw\n\n\t\trouteFunc(request, response)\n\n\t\tMonitorRequest(request.Request, verb, resource, subresource, scope, delegate.Header().Get(\"Content-Type\"), delegate.Status(), delegate.ContentLength(), now)\n\t})\n}\n\nfunc cleanUserAgent(ua string) string {\n\t\/\/ We collapse all \"web browser\"-type user agents into one \"browser\" to reduce metric cardinality.\n\tif strings.HasPrefix(ua, \"Mozilla\/\") {\n\t\treturn \"Browser\"\n\t}\n\t\/\/ If an old \"kubectl.exe\" has passed us its full path, we discard the path portion.\n\tua = kubectlExeRegexp.ReplaceAllString(ua, \"$1\")\n\treturn ua\n}\n\n\/\/ ResponseWriterDelegator interface wraps http.ResponseWriter to additionally record content-length, status-code, etc.\ntype ResponseWriterDelegator struct {\n\thttp.ResponseWriter\n\n\tstatus int\n\twritten int64\n\twroteHeader bool\n}\n\nfunc (r *ResponseWriterDelegator) WriteHeader(code int) {\n\tr.status = code\n\tr.wroteHeader = true\n\tr.ResponseWriter.WriteHeader(code)\n}\n\nfunc (r *ResponseWriterDelegator) Write(b []byte) (int, error) {\n\tif !r.wroteHeader {\n\t\tr.WriteHeader(http.StatusOK)\n\t}\n\tn, err := r.ResponseWriter.Write(b)\n\tr.written += int64(n)\n\treturn n, err\n}\n\nfunc (r *ResponseWriterDelegator) Status() int {\n\treturn r.status\n}\n\nfunc (r *ResponseWriterDelegator) ContentLength() int {\n\treturn int(r.written)\n}\n\ntype fancyResponseWriterDelegator struct {\n\t*ResponseWriterDelegator\n}\n\nfunc (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {\n\treturn f.ResponseWriter.(http.CloseNotifier).CloseNotify()\n}\n\nfunc (f *fancyResponseWriterDelegator) Flush() {\n\tf.ResponseWriter.(http.Flusher).Flush()\n}\n\nfunc (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn f.ResponseWriter.(http.Hijacker).Hijack()\n}\n\n\/\/ Small optimization over Itoa\nfunc codeToString(s int) string {\n\tswitch s {\n\tcase 100:\n\t\treturn \"100\"\n\tcase 101:\n\t\treturn \"101\"\n\n\tcase 200:\n\t\treturn \"200\"\n\tcase 201:\n\t\treturn \"201\"\n\tcase 202:\n\t\treturn \"202\"\n\tcase 203:\n\t\treturn \"203\"\n\tcase 204:\n\t\treturn \"204\"\n\tcase 205:\n\t\treturn \"205\"\n\tcase 206:\n\t\treturn \"206\"\n\n\tcase 300:\n\t\treturn \"300\"\n\tcase 301:\n\t\treturn \"301\"\n\tcase 302:\n\t\treturn \"302\"\n\tcase 304:\n\t\treturn \"304\"\n\tcase 305:\n\t\treturn \"305\"\n\tcase 307:\n\t\treturn \"307\"\n\n\tcase 400:\n\t\treturn \"400\"\n\tcase 401:\n\t\treturn \"401\"\n\tcase 402:\n\t\treturn \"402\"\n\tcase 403:\n\t\treturn \"403\"\n\tcase 404:\n\t\treturn \"404\"\n\tcase 405:\n\t\treturn \"405\"\n\tcase 406:\n\t\treturn \"406\"\n\tcase 407:\n\t\treturn \"407\"\n\tcase 408:\n\t\treturn \"408\"\n\tcase 409:\n\t\treturn \"409\"\n\tcase 410:\n\t\treturn \"410\"\n\tcase 411:\n\t\treturn \"411\"\n\tcase 412:\n\t\treturn \"412\"\n\tcase 413:\n\t\treturn \"413\"\n\tcase 414:\n\t\treturn \"414\"\n\tcase 415:\n\t\treturn \"415\"\n\tcase 416:\n\t\treturn \"416\"\n\tcase 417:\n\t\treturn \"417\"\n\tcase 418:\n\t\treturn \"418\"\n\n\tcase 500:\n\t\treturn \"500\"\n\tcase 501:\n\t\treturn \"501\"\n\tcase 502:\n\t\treturn \"502\"\n\tcase 503:\n\t\treturn \"503\"\n\tcase 504:\n\t\treturn \"504\"\n\tcase 505:\n\t\treturn \"505\"\n\n\tcase 428:\n\t\treturn \"428\"\n\tcase 429:\n\t\treturn \"429\"\n\tcase 431:\n\t\treturn \"431\"\n\tcase 511:\n\t\treturn \"511\"\n\n\tdefault:\n\t\treturn strconv.Itoa(s)\n\t}\n}\n<commit_msg>Increase sliding window to 5hr for request_latencies metric<commit_after>\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage metrics\n\nimport (\n\t\"bufio\"\n\t\/\/\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tutilnet \"k8s.io\/apimachinery\/pkg\/util\/net\"\n\t\/\/utilruntime \"k8s.io\/apimachinery\/pkg\/util\/runtime\"\n\n\t\"github.com\/emicklei\/go-restful\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n)\n\nvar (\n\t\/\/ TODO(a-robinson): Add unit tests for the handling of these metrics once\n\t\/\/ the upstream library supports it.\n\trequestCounter = prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"apiserver_request_count\",\n\t\t\tHelp: \"Counter of apiserver requests broken out for each verb, API resource, client, and HTTP response contentType and code.\",\n\t\t},\n\t\t[]string{\"verb\", \"resource\", \"subresource\", \"scope\", \"client\", \"contentType\", \"code\"},\n\t)\n\trequestLatencies = prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tName: \"apiserver_request_latencies\",\n\t\t\tHelp: \"Response latency distribution in microseconds for each verb, resource and subresource.\",\n\t\t\t\/\/ Use buckets ranging from 125 ms to 8 seconds.\n\t\t\tBuckets: prometheus.ExponentialBuckets(125000, 2.0, 7),\n\t\t},\n\t\t[]string{\"verb\", \"resource\", \"subresource\", \"scope\"},\n\t)\n\trequestLatenciesSummary = prometheus.NewSummaryVec(\n\t\tprometheus.SummaryOpts{\n\t\t\tName: \"apiserver_request_latencies_summary\",\n\t\t\tHelp: \"Response latency summary in microseconds for each verb, resource and subresource.\",\n\t\t\t\/\/ Make the sliding window of 5h.\n\t\t\t\/\/ TODO: The value for this should be based on our SLI definition (medium term).\n\t\t\tMaxAge: 5 * time.Hour,\n\t\t},\n\t\t[]string{\"verb\", \"resource\", \"subresource\", \"scope\"},\n\t)\n\tresponseSizes = prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tName: \"apiserver_response_sizes\",\n\t\t\tHelp: \"Response size distribution in bytes for each verb, resource, subresource and scope (namespace\/cluster).\",\n\t\t\t\/\/ Use buckets ranging from 1000 bytes (1KB) to 10^9 bytes (1GB).\n\t\t\tBuckets: prometheus.ExponentialBuckets(1000, 10.0, 7),\n\t\t},\n\t\t[]string{\"verb\", \"resource\", \"subresource\", \"scope\"},\n\t)\n\tkubectlExeRegexp = regexp.MustCompile(`^.*((?i:kubectl\\.exe))`)\n)\n\n\/\/ Register all metrics.\nfunc Register() {\n\tprometheus.MustRegister(requestCounter)\n\tprometheus.MustRegister(requestLatencies)\n\tprometheus.MustRegister(requestLatenciesSummary)\n\tprometheus.MustRegister(responseSizes)\n}\n\n\/\/ Monitor records a request to the apiserver endpoints that follow the Kubernetes API conventions. verb must be\n\/\/ uppercase to be backwards compatible with existing monitoring tooling.\nfunc Monitor(verb, resource, subresource, scope, client, contentType string, httpCode, respSize int, reqStart time.Time) {\n\telapsed := float64((time.Since(reqStart)) \/ time.Microsecond)\n\trequestCounter.WithLabelValues(verb, resource, subresource, scope, client, contentType, codeToString(httpCode)).Inc()\n\trequestLatencies.WithLabelValues(verb, resource, subresource, scope).Observe(elapsed)\n\trequestLatenciesSummary.WithLabelValues(verb, resource, subresource, scope).Observe(elapsed)\n\t\/\/ We are only interested in response sizes of read requests.\n\tif verb == \"GET\" || verb == \"LIST\" {\n\t\tresponseSizes.WithLabelValues(verb, resource, subresource, scope).Observe(float64(respSize))\n\t}\n}\n\n\/\/ MonitorRequest handles standard transformations for client and the reported verb and then invokes Monitor to record\n\/\/ a request. verb must be uppercase to be backwards compatible with existing monitoring tooling.\nfunc MonitorRequest(request *http.Request, verb, resource, subresource, scope, contentType string, httpCode, respSize int, reqStart time.Time) {\n\treportedVerb := verb\n\tif verb == \"LIST\" {\n\t\t\/\/ see apimachinery\/pkg\/runtime\/conversion.go Convert_Slice_string_To_bool\n\t\tif values := request.URL.Query()[\"watch\"]; len(values) > 0 {\n\t\t\tif value := strings.ToLower(values[0]); value != \"0\" && value != \"false\" {\n\t\t\t\treportedVerb = \"WATCH\"\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ normalize the legacy WATCHLIST to WATCH to ensure users aren't surprised by metrics\n\tif verb == \"WATCHLIST\" {\n\t\treportedVerb = \"WATCH\"\n\t}\n\n\tclient := cleanUserAgent(utilnet.GetHTTPClient(request))\n\tMonitor(reportedVerb, resource, subresource, scope, client, contentType, httpCode, respSize, reqStart)\n}\n\nfunc Reset() {\n\trequestCounter.Reset()\n\trequestLatencies.Reset()\n\trequestLatenciesSummary.Reset()\n\tresponseSizes.Reset()\n}\n\n\/\/ InstrumentRouteFunc works like Prometheus' InstrumentHandlerFunc but wraps\n\/\/ the go-restful RouteFunction instead of a HandlerFunc\nfunc InstrumentRouteFunc(verb, resource, subresource, scope string, routeFunc restful.RouteFunction) restful.RouteFunction {\n\treturn restful.RouteFunction(func(request *restful.Request, response *restful.Response) {\n\t\tnow := time.Now()\n\n\t\tdelegate := &ResponseWriterDelegator{ResponseWriter: response.ResponseWriter}\n\n\t\t_, cn := response.ResponseWriter.(http.CloseNotifier)\n\t\t_, fl := response.ResponseWriter.(http.Flusher)\n\t\t_, hj := response.ResponseWriter.(http.Hijacker)\n\t\tvar rw http.ResponseWriter\n\t\tif cn && fl && hj {\n\t\t\trw = &fancyResponseWriterDelegator{delegate}\n\t\t} else {\n\t\t\trw = delegate\n\t\t}\n\t\tresponse.ResponseWriter = rw\n\n\t\trouteFunc(request, response)\n\n\t\tMonitorRequest(request.Request, verb, resource, subresource, scope, delegate.Header().Get(\"Content-Type\"), delegate.Status(), delegate.ContentLength(), now)\n\t})\n}\n\nfunc cleanUserAgent(ua string) string {\n\t\/\/ We collapse all \"web browser\"-type user agents into one \"browser\" to reduce metric cardinality.\n\tif strings.HasPrefix(ua, \"Mozilla\/\") {\n\t\treturn \"Browser\"\n\t}\n\t\/\/ If an old \"kubectl.exe\" has passed us its full path, we discard the path portion.\n\tua = kubectlExeRegexp.ReplaceAllString(ua, \"$1\")\n\treturn ua\n}\n\n\/\/ ResponseWriterDelegator interface wraps http.ResponseWriter to additionally record content-length, status-code, etc.\ntype ResponseWriterDelegator struct {\n\thttp.ResponseWriter\n\n\tstatus int\n\twritten int64\n\twroteHeader bool\n}\n\nfunc (r *ResponseWriterDelegator) WriteHeader(code int) {\n\tr.status = code\n\tr.wroteHeader = true\n\tr.ResponseWriter.WriteHeader(code)\n}\n\nfunc (r *ResponseWriterDelegator) Write(b []byte) (int, error) {\n\tif !r.wroteHeader {\n\t\tr.WriteHeader(http.StatusOK)\n\t}\n\tn, err := r.ResponseWriter.Write(b)\n\tr.written += int64(n)\n\treturn n, err\n}\n\nfunc (r *ResponseWriterDelegator) Status() int {\n\treturn r.status\n}\n\nfunc (r *ResponseWriterDelegator) ContentLength() int {\n\treturn int(r.written)\n}\n\ntype fancyResponseWriterDelegator struct {\n\t*ResponseWriterDelegator\n}\n\nfunc (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {\n\treturn f.ResponseWriter.(http.CloseNotifier).CloseNotify()\n}\n\nfunc (f *fancyResponseWriterDelegator) Flush() {\n\tf.ResponseWriter.(http.Flusher).Flush()\n}\n\nfunc (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn f.ResponseWriter.(http.Hijacker).Hijack()\n}\n\n\/\/ Small optimization over Itoa\nfunc codeToString(s int) string {\n\tswitch s {\n\tcase 100:\n\t\treturn \"100\"\n\tcase 101:\n\t\treturn \"101\"\n\n\tcase 200:\n\t\treturn \"200\"\n\tcase 201:\n\t\treturn \"201\"\n\tcase 202:\n\t\treturn \"202\"\n\tcase 203:\n\t\treturn \"203\"\n\tcase 204:\n\t\treturn \"204\"\n\tcase 205:\n\t\treturn \"205\"\n\tcase 206:\n\t\treturn \"206\"\n\n\tcase 300:\n\t\treturn \"300\"\n\tcase 301:\n\t\treturn \"301\"\n\tcase 302:\n\t\treturn \"302\"\n\tcase 304:\n\t\treturn \"304\"\n\tcase 305:\n\t\treturn \"305\"\n\tcase 307:\n\t\treturn \"307\"\n\n\tcase 400:\n\t\treturn \"400\"\n\tcase 401:\n\t\treturn \"401\"\n\tcase 402:\n\t\treturn \"402\"\n\tcase 403:\n\t\treturn \"403\"\n\tcase 404:\n\t\treturn \"404\"\n\tcase 405:\n\t\treturn \"405\"\n\tcase 406:\n\t\treturn \"406\"\n\tcase 407:\n\t\treturn \"407\"\n\tcase 408:\n\t\treturn \"408\"\n\tcase 409:\n\t\treturn \"409\"\n\tcase 410:\n\t\treturn \"410\"\n\tcase 411:\n\t\treturn \"411\"\n\tcase 412:\n\t\treturn \"412\"\n\tcase 413:\n\t\treturn \"413\"\n\tcase 414:\n\t\treturn \"414\"\n\tcase 415:\n\t\treturn \"415\"\n\tcase 416:\n\t\treturn \"416\"\n\tcase 417:\n\t\treturn \"417\"\n\tcase 418:\n\t\treturn \"418\"\n\n\tcase 500:\n\t\treturn \"500\"\n\tcase 501:\n\t\treturn \"501\"\n\tcase 502:\n\t\treturn \"502\"\n\tcase 503:\n\t\treturn \"503\"\n\tcase 504:\n\t\treturn \"504\"\n\tcase 505:\n\t\treturn \"505\"\n\n\tcase 428:\n\t\treturn \"428\"\n\tcase 429:\n\t\treturn \"429\"\n\tcase 431:\n\t\treturn \"431\"\n\tcase 511:\n\t\treturn \"511\"\n\n\tdefault:\n\t\treturn strconv.Itoa(s)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright 2019 The Knative Authors.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage ingress\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"kourier\/pkg\/config\"\n\t\"net\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"knative.dev\/serving\/pkg\/reconciler\/ingress\/resources\"\n\n\t\"knative.dev\/pkg\/system\"\n\n\t\"knative.dev\/serving\/pkg\/apis\/networking\/v1alpha1\"\n\n\t\"go.uber.org\/zap\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/selection\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\tcorev1listers \"k8s.io\/client-go\/listers\/core\/v1\"\n\t\"k8s.io\/client-go\/util\/workqueue\"\n\n\t\"knative.dev\/serving\/pkg\/network\/prober\"\n)\n\nconst (\n\tprobeConcurrency = 5\n\tprobeInterval = 1 * time.Second\n\tstateExpiration = 5 * time.Minute\n\tcleanupPeriod = 1 * time.Minute\n\tgatewayLabelSelectorKey = \"app\"\n\tgatewayLabelSelectorValue = \"3scale-kourier-gateway\"\n)\n\nvar dialContext = (&net.Dialer{}).DialContext\n\n\/\/ IngressState represents the probing progress at the Ingress scope\ntype IngressState struct {\n\tid string\n\tingress *v1alpha1.Ingress\n\t\/\/ pendingCount is the number of pods that haven't been successfully probed yet\n\tpendingCount int32\n\tlastAccessed time.Time\n\n\tcancel func()\n}\n\n\/\/ podState represents the probing progress at the Pod scope\ntype podState struct {\n\tsuccessCount int32\n\n\tcontext context.Context\n\tcancel func()\n}\n\ntype workItem struct {\n\tingressState *IngressState\n\tpodState *podState\n\turl string\n\tpodIP string\n\thostname string\n}\n\n\/\/ StatusManager provides a way to check if a VirtualService is ready\ntype StatusManager interface {\n\tIsReady(snapshotID string) (bool, error)\n}\n\n\/\/ StatusProber provides a way to check if a VirtualService is ready by probing the Envoy pods\n\/\/ handling that VirtualService.\ntype StatusProber struct {\n\tlogger *zap.SugaredLogger\n\n\t\/\/ mu guards snapshotStates and podStates\n\tmu sync.Mutex\n\tingressStates map[string]*IngressState\n\tpodStates map[string]*podState\n\n\tworkQueue workqueue.RateLimitingInterface\n\tpodLister corev1listers.PodLister\n\n\treadyCallback func(ingress *v1alpha1.Ingress)\n\tprobeConcurrency int\n\tstateExpiration time.Duration\n\tcleanupPeriod time.Duration\n}\n\n\/\/ NewStatusProber creates a new instance of StatusProber\nfunc NewStatusProber(\n\tlogger *zap.SugaredLogger,\n\tpodLister corev1listers.PodLister,\n\treadyCallback func(ingress *v1alpha1.Ingress)) *StatusProber {\n\treturn &StatusProber{\n\t\tlogger: logger,\n\t\tingressStates: make(map[string]*IngressState),\n\t\tpodStates: make(map[string]*podState),\n\t\tworkQueue: workqueue.NewNamedRateLimitingQueue(\n\t\t\tworkqueue.DefaultControllerRateLimiter(),\n\t\t\t\"ProbingQueue\"),\n\t\treadyCallback: readyCallback,\n\t\tpodLister: podLister,\n\t\tprobeConcurrency: probeConcurrency,\n\t\tstateExpiration: stateExpiration,\n\t\tcleanupPeriod: cleanupPeriod,\n\t}\n}\n\n\/\/ IsReady checks if the provided Ingress is ready, i.e. the Envoy pods serving the Ingress\n\/\/ have all been updated. This function is designed to be used by the Ingress controller, i.e. it\n\/\/ will be called in the order of reconciliation. This means that if IsReady is called on an Ingress,\n\/\/ this Ingress is the latest known version and therefore anything related to older versions can be ignored.\n\/\/ Also, it means that IsReady is not called concurrently.\nfunc (m *StatusProber) IsReady(ingress *v1alpha1.Ingress) (bool, error) {\n\n\thash, err := resources.ComputeIngressHash(ingress)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tingressKey := fmt.Sprintf(\"%x\", hash)\n\n\tif ready, ok := func() (bool, bool) {\n\t\tm.mu.Lock()\n\t\tdefer m.mu.Unlock()\n\t\tif state, ok := m.ingressStates[ingressKey]; ok {\n\t\t\tif state.id == ingressKey {\n\t\t\t\tstate.lastAccessed = time.Now()\n\t\t\t\treturn atomic.LoadInt32(&state.pendingCount) == 0, true\n\t\t\t}\n\n\t\t\t\/\/ Cancel the polling for the outdated version\n\t\t\tstate.cancel()\n\t\t\tdelete(m.ingressStates, ingressKey)\n\t\t}\n\t\treturn false, false\n\t}(); ok {\n\t\treturn ready, nil\n\t}\n\n\tingCtx, cancel := context.WithCancel(context.Background())\n\tsnapshotState := &IngressState{\n\t\tid: ingressKey,\n\t\tingress: ingress,\n\t\tpendingCount: 0,\n\t\tlastAccessed: time.Now(),\n\t\tcancel: cancel,\n\t}\n\n\tvar workItems []*workItem\n\tselector := labels.NewSelector()\n\tlabelSelector, err := labels.NewRequirement(gatewayLabelSelectorKey, selection.Equals, []string{gatewayLabelSelectorValue})\n\tif err != nil {\n\t\tm.logger.Errorf(\"Failed to create 'Equals' requirement from %q=%q: %w\", gatewayLabelSelectorKey, gatewayLabelSelectorValue, err)\n\t}\n\tselector = selector.Add(*labelSelector)\n\n\tgatewayPods, err := m.podLister.Pods(system.Namespace()).List(selector)\n\tif err != nil {\n\t\tm.logger.Errorf(\"failed to get gateway pods: %w\", err)\n\t}\n\n\tif len(gatewayPods) == 0 {\n\t\treturn false, nil\n\t}\n\n\tfor _, gwPod := range gatewayPods {\n\t\tip := gwPod.Status.PodIP\n\t\tctx, cancel := context.WithCancel(ingCtx)\n\t\tpodState := &podState{\n\t\t\tsuccessCount: 0,\n\t\t\tcontext: ctx,\n\t\t\tcancel: cancel,\n\t\t}\n\t\t\/\/ Save the podState to be able to cancel it in case of Pod deletion\n\t\tfunc() {\n\t\t\tm.mu.Lock()\n\t\t\tdefer m.mu.Unlock()\n\t\t\tm.podStates[ip] = podState\n\t\t}()\n\n\t\t\/\/ Update states and cleanup m.podStates when probing is done or cancelled\n\t\tgo func(ip string) {\n\t\t\t<-podState.context.Done()\n\t\t\tm.updateStates(snapshotState, podState)\n\n\t\t\tm.mu.Lock()\n\t\t\tdefer m.mu.Unlock()\n\t\t\t\/\/ It is critical to check that the current podState is also the one stored in the map\n\t\t\t\/\/ before deleting it because it could have been replaced if a new version of the ingress\n\t\t\t\/\/ has started being probed.\n\t\t\tif state, ok := m.podStates[ip]; ok && state == podState {\n\t\t\t\tdelete(m.podStates, ip)\n\t\t\t}\n\t\t}(ip)\n\n\t\tport := strconv.Itoa(int(config.HttpPortInternal))\n\n\t\tworkItem := &workItem{\n\t\t\tingressState: snapshotState,\n\t\t\tpodState: podState,\n\t\t\turl: \"http:\/\/\" + gwPod.Status.PodIP + \":\" + port + config.InternalKourierPath + \"\/\" + ingressKey,\n\t\t\tpodIP: ip,\n\t\t\thostname: config.InternalKourierDomain,\n\t\t}\n\t\tworkItems = append(workItems, workItem)\n\n\t}\n\n\tsnapshotState.pendingCount += int32(len(gatewayPods))\n\n\tfunc() {\n\t\tm.mu.Lock()\n\t\tdefer m.mu.Unlock()\n\t\tm.ingressStates[ingressKey] = snapshotState\n\t}()\n\tfor _, workItem := range workItems {\n\t\tm.workQueue.AddRateLimited(workItem)\n\t\tm.logger.Infof(\"Queuing probe for %s, IP: %s (depth: %d)\", workItem.url, workItem.podIP, m.workQueue.Len())\n\t}\n\treturn len(workItems) == 0, nil\n}\n\n\/\/ Start starts the StatusManager background operations\nfunc (m *StatusProber) Start(done <-chan struct{}) {\n\t\/\/ Start the worker goroutines\n\tfor i := 0; i < m.probeConcurrency; i++ {\n\t\tgo func() {\n\t\t\tfor m.processWorkItem() {\n\t\t\t}\n\t\t}()\n\t}\n\n\t\/\/ Cleanup the states periodically\n\tgo wait.Until(m.expireOldStates, m.cleanupPeriod, done)\n\n\t\/\/ Stop processing the queue when cancelled\n\tgo func() {\n\t\t<-done\n\t\tm.workQueue.ShutDown()\n\t}()\n}\n\n\/\/ CancelIngress cancels probing of the provided Ingress.\nfunc (m *StatusProber) CancelIngress(ingress *v1alpha1.Ingress) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\thash, err := resources.ComputeIngressHash(ingress)\n\tif err != nil {\n\t\tlogrus.Error(\"failed to compute ingress Hash: %s\", err)\n\t}\n\tingressKey := fmt.Sprintf(\"%x\", hash)\n\tif state, ok := m.ingressStates[ingressKey]; ok {\n\t\tstate.cancel()\n\t}\n}\n\n\/\/ CancelPodProbing cancels probing of the provided Pod IP.\nfunc (m *StatusProber) CancelPodProbing(pod *corev1.Pod) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tif state, ok := m.podStates[pod.Status.PodIP]; ok {\n\t\tstate.cancel()\n\t}\n}\n\n\/\/ expireOldStates removes the states that haven't been accessed in a while.\nfunc (m *StatusProber) expireOldStates() {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tfor key, state := range m.ingressStates {\n\t\tif time.Since(state.lastAccessed) > m.stateExpiration {\n\t\t\tstate.cancel()\n\t\t\tdelete(m.ingressStates, key)\n\t\t}\n\t}\n}\n\n\/\/ processWorkItem processes a single work item from workQueue.\n\/\/ It returns false when there is no more items to process, true otherwise.\nfunc (m *StatusProber) processWorkItem() bool {\n\tobj, shutdown := m.workQueue.Get()\n\tif shutdown {\n\t\treturn false\n\t}\n\n\tdefer m.workQueue.Done(obj)\n\n\t\/\/ Crash if the item is not of the expected type\n\titem, ok := obj.(*workItem)\n\tif !ok {\n\t\tm.logger.Fatalf(\"Unexpected work item type: want: %s, got: %s\\n\", reflect.TypeOf(&workItem{}).Name(), reflect.TypeOf(obj).Name())\n\t}\n\tm.logger.Infof(\"Processing probe for %s, IP: %s (depth: %d)\", item.url, item.podIP, m.workQueue.Len())\n\n\ttransport := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{\n\t\t\t\/\/ We only want to know that the Gateway is configured, not that the configuration is valid.\n\t\t\t\/\/ Therefore, we can safely ignore any TLS certificate validation.\n\t\t\tInsecureSkipVerify: true,\n\t\t},\n\t\tDisableKeepAlives: true,\n\t\tDialContext: func(ctx context.Context, network, addr string) (conn net.Conn, e error) {\n\t\t\t\/\/ Requests with the IP as hostname and the Host header set do no pass client-side validation\n\t\t\t\/\/ because the HTTP client validates that the hostname (not the Host header) matches the server\n\t\t\t\/\/ TLS certificate Common Name or Alternative Names. Therefore, http.Request.URL is set to the\n\t\t\t\/\/ hostname and it is substituted it here with the target IP.\n\t\t\treturn dialContext(ctx, network, net.JoinHostPort(item.podIP, strconv.Itoa(int(config.HttpPortInternal))))\n\t\t}}\n\n\tok, err := prober.Do(\n\t\titem.podState.context,\n\t\ttransport,\n\t\titem.url,\n\t\tprober.WithHost(config.InternalKourierDomain),\n\t\tprober.ExpectsStatusCodes([]int{http.StatusOK}),\n\t)\n\n\t\/\/ In case of cancellation, drop the work item\n\tselect {\n\tcase <-item.podState.context.Done():\n\t\tm.workQueue.Forget(obj)\n\t\treturn true\n\tdefault:\n\t}\n\n\tif err != nil || !ok {\n\t\t\/\/ In case of error, enqueue for retry\n\t\tm.workQueue.AddRateLimited(obj)\n\t\tm.logger.Errorf(\"Probing of %s failed, IP: %s, ready: %t, error: %v (depth: %d)\", item.url, item.podIP, ok, err, m.workQueue.Len())\n\t} else {\n\t\tm.updateStates(item.ingressState, item.podState)\n\t}\n\treturn true\n}\n\nfunc (m *StatusProber) updateStates(ingressState *IngressState, podState *podState) {\n\tif atomic.AddInt32(&podState.successCount, 1) == 1 {\n\t\t\/\/ This is the first successful probe call for the pod, cancel all other work items for this pod\n\t\tpodState.cancel()\n\n\t\t\/\/ This is the last pod being successfully probed, the Ingress is ready\n\t\tif atomic.AddInt32(&ingressState.pendingCount, -1) == 0 {\n\t\t\tm.readyCallback(ingressState.ingress)\n\t\t}\n\t}\n}\n<commit_msg>fixup: Use internal logger and fix format<commit_after>\/*\n Copyright 2019 The Knative Authors.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage ingress\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"kourier\/pkg\/config\"\n\t\"net\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"knative.dev\/serving\/pkg\/reconciler\/ingress\/resources\"\n\n\t\"knative.dev\/pkg\/system\"\n\n\t\"knative.dev\/serving\/pkg\/apis\/networking\/v1alpha1\"\n\n\t\"go.uber.org\/zap\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/selection\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\tcorev1listers \"k8s.io\/client-go\/listers\/core\/v1\"\n\t\"k8s.io\/client-go\/util\/workqueue\"\n\n\t\"knative.dev\/serving\/pkg\/network\/prober\"\n)\n\nconst (\n\tprobeConcurrency = 5\n\tprobeInterval = 1 * time.Second\n\tstateExpiration = 5 * time.Minute\n\tcleanupPeriod = 1 * time.Minute\n\tgatewayLabelSelectorKey = \"app\"\n\tgatewayLabelSelectorValue = \"3scale-kourier-gateway\"\n)\n\nvar dialContext = (&net.Dialer{}).DialContext\n\n\/\/ IngressState represents the probing progress at the Ingress scope\ntype IngressState struct {\n\tid string\n\tingress *v1alpha1.Ingress\n\t\/\/ pendingCount is the number of pods that haven't been successfully probed yet\n\tpendingCount int32\n\tlastAccessed time.Time\n\n\tcancel func()\n}\n\n\/\/ podState represents the probing progress at the Pod scope\ntype podState struct {\n\tsuccessCount int32\n\n\tcontext context.Context\n\tcancel func()\n}\n\ntype workItem struct {\n\tingressState *IngressState\n\tpodState *podState\n\turl string\n\tpodIP string\n\thostname string\n}\n\n\/\/ StatusManager provides a way to check if a VirtualService is ready\ntype StatusManager interface {\n\tIsReady(snapshotID string) (bool, error)\n}\n\n\/\/ StatusProber provides a way to check if a VirtualService is ready by probing the Envoy pods\n\/\/ handling that VirtualService.\ntype StatusProber struct {\n\tlogger *zap.SugaredLogger\n\n\t\/\/ mu guards snapshotStates and podStates\n\tmu sync.Mutex\n\tingressStates map[string]*IngressState\n\tpodStates map[string]*podState\n\n\tworkQueue workqueue.RateLimitingInterface\n\tpodLister corev1listers.PodLister\n\n\treadyCallback func(ingress *v1alpha1.Ingress)\n\tprobeConcurrency int\n\tstateExpiration time.Duration\n\tcleanupPeriod time.Duration\n}\n\n\/\/ NewStatusProber creates a new instance of StatusProber\nfunc NewStatusProber(\n\tlogger *zap.SugaredLogger,\n\tpodLister corev1listers.PodLister,\n\treadyCallback func(ingress *v1alpha1.Ingress)) *StatusProber {\n\treturn &StatusProber{\n\t\tlogger: logger,\n\t\tingressStates: make(map[string]*IngressState),\n\t\tpodStates: make(map[string]*podState),\n\t\tworkQueue: workqueue.NewNamedRateLimitingQueue(\n\t\t\tworkqueue.DefaultControllerRateLimiter(),\n\t\t\t\"ProbingQueue\"),\n\t\treadyCallback: readyCallback,\n\t\tpodLister: podLister,\n\t\tprobeConcurrency: probeConcurrency,\n\t\tstateExpiration: stateExpiration,\n\t\tcleanupPeriod: cleanupPeriod,\n\t}\n}\n\n\/\/ IsReady checks if the provided Ingress is ready, i.e. the Envoy pods serving the Ingress\n\/\/ have all been updated. This function is designed to be used by the Ingress controller, i.e. it\n\/\/ will be called in the order of reconciliation. This means that if IsReady is called on an Ingress,\n\/\/ this Ingress is the latest known version and therefore anything related to older versions can be ignored.\n\/\/ Also, it means that IsReady is not called concurrently.\nfunc (m *StatusProber) IsReady(ingress *v1alpha1.Ingress) (bool, error) {\n\n\thash, err := resources.ComputeIngressHash(ingress)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tingressKey := fmt.Sprintf(\"%x\", hash)\n\n\tif ready, ok := func() (bool, bool) {\n\t\tm.mu.Lock()\n\t\tdefer m.mu.Unlock()\n\t\tif state, ok := m.ingressStates[ingressKey]; ok {\n\t\t\tif state.id == ingressKey {\n\t\t\t\tstate.lastAccessed = time.Now()\n\t\t\t\treturn atomic.LoadInt32(&state.pendingCount) == 0, true\n\t\t\t}\n\n\t\t\t\/\/ Cancel the polling for the outdated version\n\t\t\tstate.cancel()\n\t\t\tdelete(m.ingressStates, ingressKey)\n\t\t}\n\t\treturn false, false\n\t}(); ok {\n\t\treturn ready, nil\n\t}\n\n\tingCtx, cancel := context.WithCancel(context.Background())\n\tsnapshotState := &IngressState{\n\t\tid: ingressKey,\n\t\tingress: ingress,\n\t\tpendingCount: 0,\n\t\tlastAccessed: time.Now(),\n\t\tcancel: cancel,\n\t}\n\n\tvar workItems []*workItem\n\tselector := labels.NewSelector()\n\tlabelSelector, err := labels.NewRequirement(gatewayLabelSelectorKey, selection.Equals, []string{gatewayLabelSelectorValue})\n\tif err != nil {\n\t\tm.logger.Errorf(\"Failed to create 'Equals' requirement from %q=%q: %w\", gatewayLabelSelectorKey, gatewayLabelSelectorValue, err)\n\t}\n\tselector = selector.Add(*labelSelector)\n\n\tgatewayPods, err := m.podLister.Pods(system.Namespace()).List(selector)\n\tif err != nil {\n\t\tm.logger.Errorf(\"failed to get gateway pods: %w\", err)\n\t}\n\n\tif len(gatewayPods) == 0 {\n\t\treturn false, nil\n\t}\n\n\tfor _, gwPod := range gatewayPods {\n\t\tip := gwPod.Status.PodIP\n\t\tctx, cancel := context.WithCancel(ingCtx)\n\t\tpodState := &podState{\n\t\t\tsuccessCount: 0,\n\t\t\tcontext: ctx,\n\t\t\tcancel: cancel,\n\t\t}\n\t\t\/\/ Save the podState to be able to cancel it in case of Pod deletion\n\t\tfunc() {\n\t\t\tm.mu.Lock()\n\t\t\tdefer m.mu.Unlock()\n\t\t\tm.podStates[ip] = podState\n\t\t}()\n\n\t\t\/\/ Update states and cleanup m.podStates when probing is done or cancelled\n\t\tgo func(ip string) {\n\t\t\t<-podState.context.Done()\n\t\t\tm.updateStates(snapshotState, podState)\n\n\t\t\tm.mu.Lock()\n\t\t\tdefer m.mu.Unlock()\n\t\t\t\/\/ It is critical to check that the current podState is also the one stored in the map\n\t\t\t\/\/ before deleting it because it could have been replaced if a new version of the ingress\n\t\t\t\/\/ has started being probed.\n\t\t\tif state, ok := m.podStates[ip]; ok && state == podState {\n\t\t\t\tdelete(m.podStates, ip)\n\t\t\t}\n\t\t}(ip)\n\n\t\tport := strconv.Itoa(int(config.HttpPortInternal))\n\n\t\tworkItem := &workItem{\n\t\t\tingressState: snapshotState,\n\t\t\tpodState: podState,\n\t\t\turl: \"http:\/\/\" + gwPod.Status.PodIP + \":\" + port + config.InternalKourierPath + \"\/\" + ingressKey,\n\t\t\tpodIP: ip,\n\t\t\thostname: config.InternalKourierDomain,\n\t\t}\n\t\tworkItems = append(workItems, workItem)\n\n\t}\n\n\tsnapshotState.pendingCount += int32(len(gatewayPods))\n\n\tfunc() {\n\t\tm.mu.Lock()\n\t\tdefer m.mu.Unlock()\n\t\tm.ingressStates[ingressKey] = snapshotState\n\t}()\n\tfor _, workItem := range workItems {\n\t\tm.workQueue.AddRateLimited(workItem)\n\t\tm.logger.Infof(\"Queuing probe for %s, IP: %s (depth: %d)\", workItem.url, workItem.podIP, m.workQueue.Len())\n\t}\n\treturn len(workItems) == 0, nil\n}\n\n\/\/ Start starts the StatusManager background operations\nfunc (m *StatusProber) Start(done <-chan struct{}) {\n\t\/\/ Start the worker goroutines\n\tfor i := 0; i < m.probeConcurrency; i++ {\n\t\tgo func() {\n\t\t\tfor m.processWorkItem() {\n\t\t\t}\n\t\t}()\n\t}\n\n\t\/\/ Cleanup the states periodically\n\tgo wait.Until(m.expireOldStates, m.cleanupPeriod, done)\n\n\t\/\/ Stop processing the queue when cancelled\n\tgo func() {\n\t\t<-done\n\t\tm.workQueue.ShutDown()\n\t}()\n}\n\n\/\/ CancelIngress cancels probing of the provided Ingress.\nfunc (m *StatusProber) CancelIngress(ingress *v1alpha1.Ingress) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\thash, err := resources.ComputeIngressHash(ingress)\n\tif err != nil {\n\t\tm.logger.Errorf(\"failed to compute ingress Hash: %s\", err)\n\t}\n\tingressKey := fmt.Sprintf(\"%x\", hash)\n\tif state, ok := m.ingressStates[ingressKey]; ok {\n\t\tstate.cancel()\n\t}\n}\n\n\/\/ CancelPodProbing cancels probing of the provided Pod IP.\nfunc (m *StatusProber) CancelPodProbing(pod *corev1.Pod) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tif state, ok := m.podStates[pod.Status.PodIP]; ok {\n\t\tstate.cancel()\n\t}\n}\n\n\/\/ expireOldStates removes the states that haven't been accessed in a while.\nfunc (m *StatusProber) expireOldStates() {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tfor key, state := range m.ingressStates {\n\t\tif time.Since(state.lastAccessed) > m.stateExpiration {\n\t\t\tstate.cancel()\n\t\t\tdelete(m.ingressStates, key)\n\t\t}\n\t}\n}\n\n\/\/ processWorkItem processes a single work item from workQueue.\n\/\/ It returns false when there is no more items to process, true otherwise.\nfunc (m *StatusProber) processWorkItem() bool {\n\tobj, shutdown := m.workQueue.Get()\n\tif shutdown {\n\t\treturn false\n\t}\n\n\tdefer m.workQueue.Done(obj)\n\n\t\/\/ Crash if the item is not of the expected type\n\titem, ok := obj.(*workItem)\n\tif !ok {\n\t\tm.logger.Fatalf(\"Unexpected work item type: want: %s, got: %s\\n\", reflect.TypeOf(&workItem{}).Name(), reflect.TypeOf(obj).Name())\n\t}\n\tm.logger.Infof(\"Processing probe for %s, IP: %s (depth: %d)\", item.url, item.podIP, m.workQueue.Len())\n\n\ttransport := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{\n\t\t\t\/\/ We only want to know that the Gateway is configured, not that the configuration is valid.\n\t\t\t\/\/ Therefore, we can safely ignore any TLS certificate validation.\n\t\t\tInsecureSkipVerify: true,\n\t\t},\n\t\tDisableKeepAlives: true,\n\t\tDialContext: func(ctx context.Context, network, addr string) (conn net.Conn, e error) {\n\t\t\t\/\/ Requests with the IP as hostname and the Host header set do no pass client-side validation\n\t\t\t\/\/ because the HTTP client validates that the hostname (not the Host header) matches the server\n\t\t\t\/\/ TLS certificate Common Name or Alternative Names. Therefore, http.Request.URL is set to the\n\t\t\t\/\/ hostname and it is substituted it here with the target IP.\n\t\t\treturn dialContext(ctx, network, net.JoinHostPort(item.podIP, strconv.Itoa(int(config.HttpPortInternal))))\n\t\t}}\n\n\tok, err := prober.Do(\n\t\titem.podState.context,\n\t\ttransport,\n\t\titem.url,\n\t\tprober.WithHost(config.InternalKourierDomain),\n\t\tprober.ExpectsStatusCodes([]int{http.StatusOK}),\n\t)\n\n\t\/\/ In case of cancellation, drop the work item\n\tselect {\n\tcase <-item.podState.context.Done():\n\t\tm.workQueue.Forget(obj)\n\t\treturn true\n\tdefault:\n\t}\n\n\tif err != nil || !ok {\n\t\t\/\/ In case of error, enqueue for retry\n\t\tm.workQueue.AddRateLimited(obj)\n\t\tm.logger.Errorf(\"Probing of %s failed, IP: %s, ready: %t, error: %v (depth: %d)\", item.url, item.podIP, ok, err, m.workQueue.Len())\n\t} else {\n\t\tm.updateStates(item.ingressState, item.podState)\n\t}\n\treturn true\n}\n\nfunc (m *StatusProber) updateStates(ingressState *IngressState, podState *podState) {\n\tif atomic.AddInt32(&podState.successCount, 1) == 1 {\n\t\t\/\/ This is the first successful probe call for the pod, cancel all other work items for this pod\n\t\tpodState.cancel()\n\n\t\t\/\/ This is the last pod being successfully probed, the Ingress is ready\n\t\tif atomic.AddInt32(&ingressState.pendingCount, -1) == 0 {\n\t\t\tm.readyCallback(ingressState.ingress)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cache\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/golang\/groupcache\/lru\"\n)\n\ntype LRUExpireCache struct {\n\tcache *lru.Cache\n\tlock sync.RWMutex\n}\n\nfunc NewLRUExpireCache(maxSize int) *LRUExpireCache {\n\treturn &LRUExpireCache{cache: lru.New(maxSize)}\n}\n\ntype cacheEntry struct {\n\tvalue interface{}\n\texpireTime time.Time\n}\n\nfunc (c *LRUExpireCache) Add(key lru.Key, value interface{}, ttl time.Duration) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\tc.cache.Add(key, &cacheEntry{value, time.Now().Add(ttl)})\n\t\/\/ Remove entry from cache after ttl.\n\ttime.AfterFunc(ttl, func() { c.remove(key) })\n}\n\nfunc (c *LRUExpireCache) Get(key lru.Key) (interface{}, bool) {\n\tc.lock.RLock()\n\tdefer c.lock.RUnlock()\n\te, ok := c.cache.Get(key)\n\tif !ok {\n\t\treturn nil, false\n\t}\n\tif time.Now().After(e.(*cacheEntry).expireTime) {\n\t\tgo c.remove(key)\n\t\treturn nil, false\n\t}\n\treturn e.(*cacheEntry).value, true\n}\n\nfunc (c *LRUExpireCache) remove(key lru.Key) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\tc.cache.Remove(key)\n}\n<commit_msg>LRUExpireCache#Get requires write lock<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cache\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/golang\/groupcache\/lru\"\n)\n\ntype LRUExpireCache struct {\n\tcache *lru.Cache\n\tlock sync.Mutex\n}\n\nfunc NewLRUExpireCache(maxSize int) *LRUExpireCache {\n\treturn &LRUExpireCache{cache: lru.New(maxSize)}\n}\n\ntype cacheEntry struct {\n\tvalue interface{}\n\texpireTime time.Time\n}\n\nfunc (c *LRUExpireCache) Add(key lru.Key, value interface{}, ttl time.Duration) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\tc.cache.Add(key, &cacheEntry{value, time.Now().Add(ttl)})\n\t\/\/ Remove entry from cache after ttl.\n\ttime.AfterFunc(ttl, func() { c.remove(key) })\n}\n\nfunc (c *LRUExpireCache) Get(key lru.Key) (interface{}, bool) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\te, ok := c.cache.Get(key)\n\tif !ok {\n\t\treturn nil, false\n\t}\n\tif time.Now().After(e.(*cacheEntry).expireTime) {\n\t\tgo c.remove(key)\n\t\treturn nil, false\n\t}\n\treturn e.(*cacheEntry).value, true\n}\n\nfunc (c *LRUExpireCache) remove(key lru.Key) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\tc.cache.Remove(key)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"code.google.com\/p\/goauth2\/oauth\"\n\t\"code.google.com\/p\/goauth2\/oauth\/jwt\"\n\t\"encoding\/gob\"\n\t\"encoding\/json\"\n\t\"net\/http\"\n)\n\nconst tokensFile = \"~tokens.gob\"\n\nfunc authStart() {\n\targs := endpointFs.Args()\n\tif len(args) != 3 {\n\t\tfmt.Println(\"Invalid arguments, must provide API and method, e.g.:\")\n\t\tfmt.Println(\" gapi auth.start <api> <method>\")\n\t\treturn\n\t}\n\tapi := loadAPI(args[1])\n\tif api == nil || (len(api.Resources) == 0 && len(api.Methods) == 0) {\n\t\tfmt.Println(\"Couldn't load API \", args[1])\n\t\treturn\n\t}\n\tm := findMethod(args[2], *api)\n\tif m.Scopes == nil {\n\t\tfmt.Println(\"Method doesn't require auth\")\n\t\treturn\n\t}\n\toauthConfig.Scope = strings.Join(m.Scopes, \" \")\n\tfmt.Println(\"Open a browser and visit the following URL:\")\n\tfmt.Println(oauthConfig.AuthCodeURL(\"\"))\n\tfmt.Println(\"Then run the following command with the resulting auth code:\")\n\tfmt.Println(\"gapi auth.finish <code>\")\n}\nfunc authFinish() {\n\targs := endpointFs.Args()\n\tif len(args) != 2 {\n\t\tfmt.Println(\"Invalid arguments, must provide code, e.g.:\")\n\t\tfmt.Println(\" gapi auth.finish <code>\")\n\t\treturn\n\t}\n\tcode := args[1]\n\tt := &oauth.Transport{Config: oauthConfig}\n\ttok, err := t.Exchange(code)\n\tmaybeFatal(\"error exchanging code\", err)\n\ttoks, err := loadTokens()\n\tmaybeFatal(\"error loading tokens\", err)\n\tinf, err := getTokenInfo(tok.AccessToken)\n\tmaybeFatal(\"error getting tokeninfo\", err)\n\ttoks.Tok[inf.Scope] = token{\n\t\tAccessToken: tok.AccessToken,\n\t\tRefreshToken: tok.RefreshToken,\n\t}\n\terr = toks.save()\n\tmaybeFatal(\"error saving tokens\", err)\n\tfmt.Println(\"Token saved\")\n}\nfunc authPrint() {\n\targs := endpointFs.Args()\n\tif len(args) != 3 {\n\t\tfmt.Println(\"Invalid arguments, must provide API and method, e.g.:\")\n\t\tfmt.Println(\" gapi auth.print <api> <method>\")\n\t\treturn\n\t}\n\tapi := loadAPI(args[1])\n\tif api == nil || (len(api.Resources) == 0 && len(api.Methods) == 0) {\n\t\tlog.Fatal(\"Couldn't load API \", args[1])\n\t}\n\tm := findMethod(args[2], *api)\n\ttoks, err := loadTokens()\n\tmaybeFatal(\"error loading tokens\", err)\n\tif tok, found := toks.Tok[strings.Join(m.Scopes, \" \")]; found {\n\t\t\/\/ TODO: If necessary, refresh, store, and print the new token.\n\t\tfmt.Println(tok.AccessToken)\n\t} else {\n\t\tfmt.Println(\"No token found. Run the following command to store a token:\")\n\t\tfmt.Println(\"gapi auth.start\", api.Name, m.ID[len(api.Name)+1:])\n\t}\n}\nfunc authRevoke() {\n\targs := endpointFs.Args()\n\tif len(args) != 3 {\n\t\tfmt.Println(\"Invalid arguments, must provide API and method, e.g.:\")\n\t\tfmt.Println(\" gapi auth.print <api> <method>\")\n\t\treturn\n\t}\n\tapi := loadAPI(args[1])\n\tif api == nil || (len(api.Resources) == 0 && len(api.Methods) == 0) {\n\t\tlog.Fatal(\"Couldn't load API \", args[1])\n\t}\n\tm := findMethod(args[2], *api)\n\ttoks, err := loadTokens()\n\tmaybeFatal(\"error loading tokens\", err)\n\tif tok, found := toks.Tok[strings.Join(m.Scopes, \" \")]; found {\n\t\t_, err := http.Get(\"https:\/\/accounts.google.com\/o\/oauth2\/revoke?token=\" + tok)\n\t\tmaybeFatal(\"error revoking token\", err)\n\t\tdelete(toks.Tok, strings.Join(m.Scopes, \" \"))\n\t\ttoks.save()\n\t}\n}\n\ntype tokenInfo struct {\n\tScope string\n\tExpiresIn int `json:\"expires_in\"`\n\tAccessType string `json:\"access_type\"`\n}\n\nfunc (inf tokenInfo) expired() bool {\n\treturn inf.ExpiresIn < 0\n}\nfunc getTokenInfo(tok string) (*tokenInfo, error) {\n\tr, err := http.Post(\"https:\/\/www.googleapis.com\/oauth2\/v2\/tokeninfo?access_token=\"+tok, \"\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Body.Close()\n\tvar info tokenInfo\n\terr = json.NewDecoder(r.Body).Decode(&info)\n\treturn &info, err\n}\n\ntype tokens struct {\n\tTok map[string]token\n}\n\ntype token struct {\n\tAccessToken, RefreshToken string\n}\n\nfunc loadTokens() (*tokens, error) {\n\tfi, err := os.Stat(tokensFile)\n\tif err == os.ErrNotExist || err == io.EOF {\n\t\treturn &tokens{make(map[string]token)}, nil\n\t} else if err != nil {\n\t\treturn &tokens{make(map[string]token)}, nil\n\t}\n\tif fi.Size() == 0 {\n\t\treturn &tokens{make(map[string]token)}, nil\n\t}\n\tf, err := os.Open(tokensFile)\n\tif err != nil && err != io.EOF {\n\t\treturn &tokens{make(map[string]token)}, nil\n\t}\n\tdefer f.Close()\n\tvar t tokens\n\t_ = gob.NewDecoder(f).Decode(&t)\n\treturn &t, nil\n}\n\nfunc (t *tokens) save() error {\n\tf, err := os.Create(tokensFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\treturn gob.NewEncoder(f).Encode(t)\n}\n\nfunc accessTokenFromPemFile(scope string) string {\n\tsecretBytes, err := ioutil.ReadFile(*flagSecrets)\n\tmaybeFatal(\"error reading secrets file:\", err)\n\tvar config struct {\n\t\tWeb struct {\n\t\t\tClientEmail string `json:\"client_email\"`\n\t\t\tTokenURI string `json:\"token_uri\"`\n\t\t}\n\t}\n\terr = json.Unmarshal(secretBytes, &config)\n\tmaybeFatal(\"error unmarshalling secrets:\", err)\n\n\tkeyBytes, err := ioutil.ReadFile(*flagPem)\n\tmaybeFatal(\"error reading private key file:\", err)\n\n\t\/\/ Craft the ClaimSet and JWT token.\n\tt := jwt.NewToken(config.Web.ClientEmail, scope, keyBytes)\n\tt.ClaimSet.Aud = config.Web.TokenURI\n\n\t\/\/ We need to provide a client.\n\tc := &http.Client{}\n\n\t\/\/ Get the access token.\n\to, err := t.Assert(c)\n\tmaybeFatal(\"assertion error:\", err)\n\n\treturn o.AccessToken\n}\n<commit_msg>Fix revokeToken<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"code.google.com\/p\/goauth2\/oauth\"\n\t\"code.google.com\/p\/goauth2\/oauth\/jwt\"\n\t\"encoding\/gob\"\n\t\"encoding\/json\"\n\t\"net\/http\"\n)\n\nconst tokensFile = \"~tokens.gob\"\n\nfunc authStart() {\n\targs := endpointFs.Args()\n\tif len(args) != 3 {\n\t\tfmt.Println(\"Invalid arguments, must provide API and method, e.g.:\")\n\t\tfmt.Println(\" gapi auth.start <api> <method>\")\n\t\treturn\n\t}\n\tapi := loadAPI(args[1])\n\tif api == nil || (len(api.Resources) == 0 && len(api.Methods) == 0) {\n\t\tfmt.Println(\"Couldn't load API \", args[1])\n\t\treturn\n\t}\n\tm := findMethod(args[2], *api)\n\tif m.Scopes == nil {\n\t\tfmt.Println(\"Method doesn't require auth\")\n\t\treturn\n\t}\n\toauthConfig.Scope = strings.Join(m.Scopes, \" \")\n\tfmt.Println(\"Open a browser and visit the following URL:\")\n\tfmt.Println(oauthConfig.AuthCodeURL(\"\"))\n\tfmt.Println(\"Then run the following command with the resulting auth code:\")\n\tfmt.Println(\"gapi auth.finish <code>\")\n}\nfunc authFinish() {\n\targs := endpointFs.Args()\n\tif len(args) != 2 {\n\t\tfmt.Println(\"Invalid arguments, must provide code, e.g.:\")\n\t\tfmt.Println(\" gapi auth.finish <code>\")\n\t\treturn\n\t}\n\tcode := args[1]\n\tt := &oauth.Transport{Config: oauthConfig}\n\ttok, err := t.Exchange(code)\n\tmaybeFatal(\"error exchanging code\", err)\n\ttoks, err := loadTokens()\n\tmaybeFatal(\"error loading tokens\", err)\n\tinf, err := getTokenInfo(tok.AccessToken)\n\tmaybeFatal(\"error getting tokeninfo\", err)\n\ttoks.Tok[inf.Scope] = token{\n\t\tAccessToken: tok.AccessToken,\n\t\tRefreshToken: tok.RefreshToken,\n\t}\n\terr = toks.save()\n\tmaybeFatal(\"error saving tokens\", err)\n\tfmt.Println(\"Token saved\")\n}\nfunc authPrint() {\n\targs := endpointFs.Args()\n\tif len(args) != 3 {\n\t\tfmt.Println(\"Invalid arguments, must provide API and method, e.g.:\")\n\t\tfmt.Println(\" gapi auth.print <api> <method>\")\n\t\treturn\n\t}\n\tapi := loadAPI(args[1])\n\tif api == nil || (len(api.Resources) == 0 && len(api.Methods) == 0) {\n\t\tlog.Fatal(\"Couldn't load API \", args[1])\n\t}\n\tm := findMethod(args[2], *api)\n\ttoks, err := loadTokens()\n\tmaybeFatal(\"error loading tokens\", err)\n\tif tok, found := toks.Tok[strings.Join(m.Scopes, \" \")]; found {\n\t\t\/\/ TODO: If necessary, refresh, store, and print the new token.\n\t\tfmt.Println(tok.AccessToken)\n\t} else {\n\t\tfmt.Println(\"No token found. Run the following command to store a token:\")\n\t\tfmt.Println(\"gapi auth.start\", api.Name, m.ID[len(api.Name)+1:])\n\t}\n}\nfunc authRevoke() {\n\targs := endpointFs.Args()\n\tif len(args) != 3 {\n\t\tfmt.Println(\"Invalid arguments, must provide API and method, e.g.:\")\n\t\tfmt.Println(\" gapi auth.print <api> <method>\")\n\t\treturn\n\t}\n\tapi := loadAPI(args[1])\n\tif api == nil || (len(api.Resources) == 0 && len(api.Methods) == 0) {\n\t\tlog.Fatal(\"Couldn't load API \", args[1])\n\t}\n\tm := findMethod(args[2], *api)\n\ttoks, err := loadTokens()\n\tmaybeFatal(\"error loading tokens\", err)\n\tif tok, found := toks.Tok[strings.Join(m.Scopes, \" \")]; found {\n\t\t_, err := http.Get(\"https:\/\/accounts.google.com\/o\/oauth2\/revoke?token=\" + tok.AccessToken)\n\t\tmaybeFatal(\"error revoking token\", err)\n\t\tdelete(toks.Tok, strings.Join(m.Scopes, \" \"))\n\t\ttoks.save()\n\t}\n}\n\ntype tokenInfo struct {\n\tScope string\n\tExpiresIn int `json:\"expires_in\"`\n\tAccessType string `json:\"access_type\"`\n}\n\nfunc (inf tokenInfo) expired() bool {\n\treturn inf.ExpiresIn < 0\n}\nfunc getTokenInfo(tok string) (*tokenInfo, error) {\n\tr, err := http.Post(\"https:\/\/www.googleapis.com\/oauth2\/v2\/tokeninfo?access_token=\"+tok, \"\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Body.Close()\n\tvar info tokenInfo\n\terr = json.NewDecoder(r.Body).Decode(&info)\n\treturn &info, err\n}\n\ntype tokens struct {\n\tTok map[string]token\n}\n\ntype token struct {\n\tAccessToken, RefreshToken string\n}\n\nfunc loadTokens() (*tokens, error) {\n\tfi, err := os.Stat(tokensFile)\n\tif err == os.ErrNotExist || err == io.EOF {\n\t\treturn &tokens{make(map[string]token)}, nil\n\t} else if err != nil {\n\t\treturn &tokens{make(map[string]token)}, nil\n\t}\n\tif fi.Size() == 0 {\n\t\treturn &tokens{make(map[string]token)}, nil\n\t}\n\tf, err := os.Open(tokensFile)\n\tif err != nil && err != io.EOF {\n\t\treturn &tokens{make(map[string]token)}, nil\n\t}\n\tdefer f.Close()\n\tvar t tokens\n\t_ = gob.NewDecoder(f).Decode(&t)\n\treturn &t, nil\n}\n\nfunc (t *tokens) save() error {\n\tf, err := os.Create(tokensFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\treturn gob.NewEncoder(f).Encode(t)\n}\n\nfunc accessTokenFromPemFile(scope string) string {\n\tsecretBytes, err := ioutil.ReadFile(*flagSecrets)\n\tmaybeFatal(\"error reading secrets file:\", err)\n\tvar config struct {\n\t\tWeb struct {\n\t\t\tClientEmail string `json:\"client_email\"`\n\t\t\tTokenURI string `json:\"token_uri\"`\n\t\t}\n\t}\n\terr = json.Unmarshal(secretBytes, &config)\n\tmaybeFatal(\"error unmarshalling secrets:\", err)\n\n\tkeyBytes, err := ioutil.ReadFile(*flagPem)\n\tmaybeFatal(\"error reading private key file:\", err)\n\n\t\/\/ Craft the ClaimSet and JWT token.\n\tt := jwt.NewToken(config.Web.ClientEmail, scope, keyBytes)\n\tt.ClaimSet.Aud = config.Web.TokenURI\n\n\t\/\/ We need to provide a client.\n\tc := &http.Client{}\n\n\t\/\/ Get the access token.\n\to, err := t.Assert(c)\n\tmaybeFatal(\"assertion error:\", err)\n\n\treturn o.AccessToken\n}\n<|endoftext|>"} {"text":"<commit_before>package dnsimple\n\nimport (\n\t\"fmt\"\n)\n\n\/\/ ServicesService handles communication with the service related\n\/\/ methods of the DNSimple API.\n\/\/\n\/\/ See https:\/\/developer.dnsimple.com\/v2\/services\/\ntype ServicesService struct {\n\tclient *Client\n}\n\n\/\/ ServiceSetting represents a single group of settings for a DNSimple Service.\ntype ServiceSetting struct {\n\tName string `json:\"name,omitempty\"`\n\tLabel string `json:\"label,omitempty\"`\n\tAppend string `json:\"append,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tExample string `json:\"example,omitempty\"`\n\tPassword bool `json:\"password,omitempty\"`\n}\n\n\/\/ Service represents a Service in DNSimple.\ntype Service struct {\n\tID int `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tShortName string `json:\"short_name,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tSetupDescription string `json:\"setup_description,omitempty\"`\n\tRequiresSetup bool `json:\"requires_setup,omitempty\"`\n\tDefaultSubdomain string `json:\"default_subdomain,omitempty\"`\n\tCreatedAt string `json:\"created_at,omitempty\"`\n\tUpdatedAt string `json:\"updated_at,omitempty\"`\n\tSettings []ServiceSetting `json:\"settings,omitempty\"`\n}\n\nfunc servicePath(serviceID string) string {\n\tif serviceID != \"\" {\n\t\treturn fmt.Sprintf(\"\/services\/%v\", serviceID)\n\t}\n\treturn fmt.Sprintf(\"\/services\")\n}\n\n\/\/ ServiceResponse represents a response from an API method that returns a Service struct.\ntype ServiceResponse struct {\n\tResponse\n\tData *Service `json:\"data\"`\n}\n\n\/\/ ServicesResponse represents a response from an API method that returns a collection of Service struct.\ntype ServicesResponse struct {\n\tResponse\n\tData []Service `json:\"data\"`\n}\n\n\/\/ ListServices list the services for an account.\n\/\/\n\/\/ See https:\/\/developer.dnsimple.com\/v2\/services\/#list\nfunc (s *ServicesService) ListServices(options *ListOptions) (*ServicesResponse, error) {\n\tpath := versioned(servicePath(\"\"))\n\tservicesResponse := &ServicesResponse{}\n\n\tpath, err := addURLQueryOptions(path, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := s.client.get(path, servicesResponse)\n\tif err != nil {\n\t\treturn servicesResponse, err\n\t}\n\n\tservicesResponse.HttpResponse = resp\n\treturn servicesResponse, nil\n}\n\n\/\/ GetService fetches a service.\n\/\/\n\/\/ See https:\/\/developer.dnsimple.com\/v2\/services\/#get\nfunc (s *ServicesService) GetService(serviceID string) (*ServiceResponse, error) {\n\tpath := versioned(servicePath(serviceID))\n\tserviceResponse := &ServiceResponse{}\n\n\tresp, err := s.client.get(path, serviceResponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserviceResponse.HttpResponse = resp\n\treturn serviceResponse, nil\n}\n<commit_msg>Fix misleading method comment<commit_after>package dnsimple\n\nimport (\n\t\"fmt\"\n)\n\n\/\/ ServicesService handles communication with the service related\n\/\/ methods of the DNSimple API.\n\/\/\n\/\/ See https:\/\/developer.dnsimple.com\/v2\/services\/\ntype ServicesService struct {\n\tclient *Client\n}\n\n\/\/ ServiceSetting represents a single group of settings for a DNSimple Service.\ntype ServiceSetting struct {\n\tName string `json:\"name,omitempty\"`\n\tLabel string `json:\"label,omitempty\"`\n\tAppend string `json:\"append,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tExample string `json:\"example,omitempty\"`\n\tPassword bool `json:\"password,omitempty\"`\n}\n\n\/\/ Service represents a Service in DNSimple.\ntype Service struct {\n\tID int `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tShortName string `json:\"short_name,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tSetupDescription string `json:\"setup_description,omitempty\"`\n\tRequiresSetup bool `json:\"requires_setup,omitempty\"`\n\tDefaultSubdomain string `json:\"default_subdomain,omitempty\"`\n\tCreatedAt string `json:\"created_at,omitempty\"`\n\tUpdatedAt string `json:\"updated_at,omitempty\"`\n\tSettings []ServiceSetting `json:\"settings,omitempty\"`\n}\n\nfunc servicePath(serviceID string) string {\n\tif serviceID != \"\" {\n\t\treturn fmt.Sprintf(\"\/services\/%v\", serviceID)\n\t}\n\treturn fmt.Sprintf(\"\/services\")\n}\n\n\/\/ ServiceResponse represents a response from an API method that returns a Service struct.\ntype ServiceResponse struct {\n\tResponse\n\tData *Service `json:\"data\"`\n}\n\n\/\/ ServicesResponse represents a response from an API method that returns a collection of Service struct.\ntype ServicesResponse struct {\n\tResponse\n\tData []Service `json:\"data\"`\n}\n\n\/\/ ListServices list the one-click services available in DNSimple.\n\/\/\n\/\/ See https:\/\/developer.dnsimple.com\/v2\/services\/#list\nfunc (s *ServicesService) ListServices(options *ListOptions) (*ServicesResponse, error) {\n\tpath := versioned(servicePath(\"\"))\n\tservicesResponse := &ServicesResponse{}\n\n\tpath, err := addURLQueryOptions(path, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := s.client.get(path, servicesResponse)\n\tif err != nil {\n\t\treturn servicesResponse, err\n\t}\n\n\tservicesResponse.HttpResponse = resp\n\treturn servicesResponse, nil\n}\n\n\/\/ GetService fetches a one-click service.\n\/\/\n\/\/ See https:\/\/developer.dnsimple.com\/v2\/services\/#get\nfunc (s *ServicesService) GetService(serviceID string) (*ServiceResponse, error) {\n\tpath := versioned(servicePath(serviceID))\n\tserviceResponse := &ServiceResponse{}\n\n\tresp, err := s.client.get(path, serviceResponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserviceResponse.HttpResponse = resp\n\treturn serviceResponse, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package leancloud\n\nimport (\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/oxfeeefeee\/appgo\"\n\t\"github.com\/parnurzeal\/gorequest\"\n\t\"strings\"\n)\n\nconst (\n\tsendMethod = \"POST\"\n\tsendUrl = \"https:\/\/leancloud.cn\/1.1\/push\"\n\tpush_batch_size = 5000\n)\n\nvar (\n\tappId string\n\tappKey string\n\tandroidAction string\n\texpiration int\n\tprod string\n)\n\nfunc init() {\n\tappId = appgo.Conf.Leancloud.AppId\n\tappKey = appgo.Conf.Leancloud.AppKey\n\tandroidAction = appgo.Conf.Leancloud.AndroidAction\n\texpiration = appgo.Conf.Leancloud.ExpirationInterval\n\tprod = \"prod\"\n\tif appgo.Conf.DevMode {\n\t\tprod = \"dev\"\n\t}\n}\n\ntype IosApnsAlert struct {\n\tTitle string `json:\"title,omitempty\"`\n\tBody string `json:\"body,omitempty\"`\n\tActionLocKey string `json:\"action-loc-key,omitempty\"`\n\tLocKey string `json:\"loc-key,omitempty\"`\n\tLocArgs []string `json:\"loc-args,omitempty\"`\n\tLaunchImage string `json:\"launch-image,omitempty\"`\n}\n\ntype IosData struct {\n\tAlert *IosApnsAlert `json:\"alert,omitempty\"`\n\tBadge int `json:\"badge,omitempty\"`\n\tSound string `json:\"sound,omitempty\"`\n\tContentAvailable int `json:\"content-available,omitempty\"`\n\tCustom interface{} `json:\"custom,omitempty\"`\n}\n\ntype AndroidData struct {\n\tAlert string `json:\"alert,omitempty\"`\n\tAlert2 string `json:\"alert2,omitempty\"`\n\tTitle string `json:\"title,omitempty\"`\n\tAction string `json:\"action,omitempty\"`\n\tPlaySound bool `json:\"play_sound\"`\n\tSilent bool `json:\"silent\"`\n\tCustom interface{} `json:\"custom,omitempty\"`\n}\n\ntype IosAndroidData struct {\n\tIos *IosData `json:\"ios,omitempty\"`\n\tAndroid *AndroidData `json:\"android,omitempty\"`\n}\n\ntype LeancloudPush struct {\n\tExpirationInterval int `json:\"expiration_interval,omitempty\"`\n\tProd string `json:\"prod,omitempty\"`\n\tData *IosAndroidData `json:\"data,omitempty\"`\n\tCql string `json:\"cql,omitempty\"`\n}\n\ntype Leancloud struct{}\n\nfunc (_ Leancloud) Name() string {\n\treturn \"leancloud\"\n}\n\nfunc (l Leancloud) PushNotif(pushInfo map[appgo.Id]*appgo.PushInfo, content *appgo.PushData) {\n\tuserIds := make([]string, 0, len(pushInfo))\n\tfor uid, _ := range pushInfo {\n\t\tuserIds = append(userIds, \"\\\"\"+uid.String()+\"\\\"\")\n\t}\n\tfor i := 0; i < len(userIds); i += push_batch_size {\n\t\tend := i + push_batch_size\n\t\tif end > len(userIds) {\n\t\t\tend = len(userIds)\n\t\t}\n\t\tidstr := strings.Join(userIds[i:end], \",\")\n\t\tgo l.doPushNotif(idstr, content)\n\t}\n}\n\nfunc (_ Leancloud) doPushNotif(ids string, content *appgo.PushData) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Errorln(\"doPushNotif paniced: \", r)\n\t\t}\n\t}()\n\tcql := \"select * from _Installation where userId in ( \" + ids + \" )\"\n\tpl := buildPayload(content)\n\trequest(&LeancloudPush{\n\t\tExpirationInterval: expiration,\n\t\tProd: prod,\n\t\tData: pl,\n\t\tCql: cql,\n\t})\n}\n\nfunc buildPayload(content *appgo.PushData) *IosAndroidData {\n\treturn &IosAndroidData{\n\t\t&IosData{\n\t\t\tAlert: &IosApnsAlert{\n\t\t\t\tTitle: content.Title,\n\t\t\t\tBody: content.Message,\n\t\t\t},\n\t\t\tBadge: content.Badge,\n\t\t\tSound: content.Sound,\n\t\t\tCustom: content.Custom,\n\t\t},\n\t\t&AndroidData{\n\t\t\tAlert: content.Message,\n\t\t\tAlert2: content.Message,\n\t\t\tTitle: content.Title,\n\t\t\tAction: androidAction,\n\t\t\tPlaySound: len(content.Sound) > 0,\n\t\t\tSilent: true,\n\t\t\tCustom: content.Custom,\n\t\t},\n\t}\n}\n\nfunc request(p *LeancloudPush) {\n\t_, ret, errs := gorequest.New().SetDebug(true).\n\t\tPost(sendUrl).\n\t\tSet(\"X-LC-Id\", appId).\n\t\tSet(\"X-LC-Key\", appKey).\n\t\tSendStruct(p).\n\t\tEnd()\n\tif errs != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"errors\": errs,\n\t\t\t\"url\": sendUrl,\n\t\t\t\"body\": ret,\n\t\t}).Error(\"Failed to request leancloud push\")\n\t\treturn\n\t}\n\tlog.Infoln(\"leancloud push return: \", ret)\n}\n<commit_msg>leave iOS push title as default<commit_after>package leancloud\n\nimport (\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/oxfeeefeee\/appgo\"\n\t\"github.com\/parnurzeal\/gorequest\"\n\t\"strings\"\n)\n\nconst (\n\tsendMethod = \"POST\"\n\tsendUrl = \"https:\/\/leancloud.cn\/1.1\/push\"\n\tpush_batch_size = 5000\n)\n\nvar (\n\tappId string\n\tappKey string\n\tandroidAction string\n\texpiration int\n\tprod string\n)\n\nfunc init() {\n\tappId = appgo.Conf.Leancloud.AppId\n\tappKey = appgo.Conf.Leancloud.AppKey\n\tandroidAction = appgo.Conf.Leancloud.AndroidAction\n\texpiration = appgo.Conf.Leancloud.ExpirationInterval\n\tprod = \"prod\"\n\tif appgo.Conf.DevMode {\n\t\tprod = \"dev\"\n\t}\n}\n\ntype IosApnsAlert struct {\n\tTitle string `json:\"title,omitempty\"`\n\tBody string `json:\"body,omitempty\"`\n\tActionLocKey string `json:\"action-loc-key,omitempty\"`\n\tLocKey string `json:\"loc-key,omitempty\"`\n\tLocArgs []string `json:\"loc-args,omitempty\"`\n\tLaunchImage string `json:\"launch-image,omitempty\"`\n}\n\ntype IosData struct {\n\tAlert *IosApnsAlert `json:\"alert,omitempty\"`\n\tBadge int `json:\"badge,omitempty\"`\n\tSound string `json:\"sound,omitempty\"`\n\tContentAvailable int `json:\"content-available,omitempty\"`\n\tCustom interface{} `json:\"custom,omitempty\"`\n}\n\ntype AndroidData struct {\n\tAlert string `json:\"alert,omitempty\"`\n\tAlert2 string `json:\"alert2,omitempty\"`\n\tTitle string `json:\"title,omitempty\"`\n\tAction string `json:\"action,omitempty\"`\n\tPlaySound bool `json:\"play_sound\"`\n\tSilent bool `json:\"silent\"`\n\tCustom interface{} `json:\"custom,omitempty\"`\n}\n\ntype IosAndroidData struct {\n\tIos *IosData `json:\"ios,omitempty\"`\n\tAndroid *AndroidData `json:\"android,omitempty\"`\n}\n\ntype LeancloudPush struct {\n\tExpirationInterval int `json:\"expiration_interval,omitempty\"`\n\tProd string `json:\"prod,omitempty\"`\n\tData *IosAndroidData `json:\"data,omitempty\"`\n\tCql string `json:\"cql,omitempty\"`\n}\n\ntype Leancloud struct{}\n\nfunc (_ Leancloud) Name() string {\n\treturn \"leancloud\"\n}\n\nfunc (l Leancloud) PushNotif(pushInfo map[appgo.Id]*appgo.PushInfo, content *appgo.PushData) {\n\tuserIds := make([]string, 0, len(pushInfo))\n\tfor uid, _ := range pushInfo {\n\t\tuserIds = append(userIds, \"\\\"\"+uid.String()+\"\\\"\")\n\t}\n\tfor i := 0; i < len(userIds); i += push_batch_size {\n\t\tend := i + push_batch_size\n\t\tif end > len(userIds) {\n\t\t\tend = len(userIds)\n\t\t}\n\t\tidstr := strings.Join(userIds[i:end], \",\")\n\t\tgo l.doPushNotif(idstr, content)\n\t}\n}\n\nfunc (_ Leancloud) doPushNotif(ids string, content *appgo.PushData) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Errorln(\"doPushNotif paniced: \", r)\n\t\t}\n\t}()\n\tcql := \"select * from _Installation where userId in ( \" + ids + \" )\"\n\tpl := buildPayload(content)\n\trequest(&LeancloudPush{\n\t\tExpirationInterval: expiration,\n\t\tProd: prod,\n\t\tData: pl,\n\t\tCql: cql,\n\t})\n}\n\nfunc buildPayload(content *appgo.PushData) *IosAndroidData {\n\treturn &IosAndroidData{\n\t\t&IosData{\n\t\t\tAlert: &IosApnsAlert{\n\t\t\t\tTitle: \"\",\n\t\t\t\tBody: content.Message,\n\t\t\t},\n\t\t\tBadge: content.Badge,\n\t\t\tSound: content.Sound,\n\t\t\tCustom: content.Custom,\n\t\t},\n\t\t&AndroidData{\n\t\t\tAlert: content.Message,\n\t\t\tAlert2: content.Message,\n\t\t\tTitle: content.Title,\n\t\t\tAction: androidAction,\n\t\t\tPlaySound: len(content.Sound) > 0,\n\t\t\tSilent: true,\n\t\t\tCustom: content.Custom,\n\t\t},\n\t}\n}\n\nfunc request(p *LeancloudPush) {\n\t_, ret, errs := gorequest.New().SetDebug(true).\n\t\tPost(sendUrl).\n\t\tSet(\"X-LC-Id\", appId).\n\t\tSet(\"X-LC-Key\", appKey).\n\t\tSendStruct(p).\n\t\tEnd()\n\tif errs != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"errors\": errs,\n\t\t\t\"url\": sendUrl,\n\t\t\t\"body\": ret,\n\t\t}).Error(\"Failed to request leancloud push\")\n\t\treturn\n\t}\n\tlog.Infoln(\"leancloud push return: \", ret)\n}\n<|endoftext|>"} {"text":"<commit_before>package manager\n\nimport (\n\t\"net\/url\"\n\t\"testing\"\n\n\t\"github.com\/coreos\/dex\/db\"\n\t\"github.com\/coreos\/dex\/session\"\n\t\"github.com\/coreos\/go-oidc\/oidc\"\n)\n\nfunc staticGenerateCodeFunc(code string) GenerateCodeFunc {\n\treturn func() (string, error) {\n\t\treturn code, nil\n\t}\n}\n\nfunc newManager(t *testing.T) *SessionManager {\n\tdbMap := db.NewMemDB()\n\treturn NewSessionManager(db.NewSessionRepo(dbMap), db.NewSessionKeyRepo(dbMap))\n}\n\nfunc TestSessionManagerNewSession(t *testing.T) {\n\tsm := newManager(t)\n\tsm.GenerateCode = staticGenerateCodeFunc(\"boo\")\n\tgot, err := sm.NewSession(\"bogus_idpc\", \"XXX\", \"bogus\", url.URL{}, \"\", false, []string{\"openid\"})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\tif got != \"boo\" {\n\t\tt.Fatalf(\"Incorrect Session ID: want=%s got=%s\", \"boo\", got)\n\t}\n}\n\nfunc TestSessionAttachRemoteIdentityTwice(t *testing.T) {\n\tsm := newManager(t)\n\tsessionID, err := sm.NewSession(\"bogus_idpc\", \"XXX\", \"bogus\", url.URL{}, \"\", false, []string{\"openid\"})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\tident := oidc.Identity{ID: \"YYY\", Name: \"elroy\", Email: \"elroy@example.com\"}\n\tif _, err := sm.AttachRemoteIdentity(sessionID, ident); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\tif _, err := sm.AttachRemoteIdentity(sessionID, ident); err == nil {\n\t\tt.Fatalf(\"Expected non-nil error\")\n\t}\n}\n\nfunc TestSessionManagerExchangeKey(t *testing.T) {\n\tsm := newManager(t)\n\tsessionID, err := sm.NewSession(\"connector_id\", \"XXX\", \"bogus\", url.URL{}, \"\", false, []string{\"openid\"})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\tkey, err := sm.NewSessionKey(sessionID)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\tgot, err := sm.ExchangeKey(key)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\tif got != sessionID {\n\t\tt.Fatalf(\"Incorrect Session ID: want=%s got=%s\", sessionID, got)\n\t}\n\n\tif _, err := sm.ExchangeKey(key); err == nil {\n\t\tt.Fatalf(\"Received nil response from attempt with spent Session key\")\n\t}\n}\n\nfunc TestSessionManagerGetSessionInStateNoExist(t *testing.T) {\n\tsm := newManager(t)\n\tses, err := sm.getSessionInState(\"123\", session.SessionStateNew)\n\tif err == nil {\n\t\tt.Errorf(\"Expected non-nil error\")\n\t}\n\tif ses != nil {\n\t\tt.Errorf(\"Expected nil Session\")\n\t}\n}\n\nfunc TestSessionManagerGetSessionInStateWrongState(t *testing.T) {\n\tsm := newManager(t)\n\tsessionID, err := sm.NewSession(\"connector_id\", \"XXX\", \"bogus\", url.URL{}, \"\", false, []string{\"openid\"})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\tses, err := sm.getSessionInState(sessionID, session.SessionStateDead)\n\tif err == nil {\n\t\tt.Errorf(\"Expected non-nil error\")\n\t}\n\tif ses != nil {\n\t\tt.Errorf(\"Expected nil Session\")\n\t}\n}\n\nfunc TestSessionManagerKill(t *testing.T) {\n\tsm := newManager(t)\n\tsessionID, err := sm.NewSession(\"connector_id\", \"XXX\", \"bogus\", url.URL{}, \"\", false, []string{\"openid\"})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\tident := oidc.Identity{ID: \"YYY\", Name: \"elroy\", Email: \"elroy@example.com\"}\n\tif _, err := sm.AttachRemoteIdentity(sessionID, ident); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\tses, err := sm.Kill(sessionID)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %v\", err)\n\t}\n\tif ses == nil {\n\t\tt.Fatalf(\"Expected non-nil Session\")\n\t}\n\n\tif ses.ClientState != \"bogus\" {\n\t\tt.Errorf(\"Unexpected Session: %#v\", ses)\n\t}\n}\n<commit_msg>session: remove unused function argument<commit_after>package manager\n\nimport (\n\t\"net\/url\"\n\t\"testing\"\n\n\t\"github.com\/coreos\/dex\/db\"\n\t\"github.com\/coreos\/dex\/session\"\n\t\"github.com\/coreos\/go-oidc\/oidc\"\n)\n\nfunc staticGenerateCodeFunc(code string) GenerateCodeFunc {\n\treturn func() (string, error) {\n\t\treturn code, nil\n\t}\n}\n\nfunc newManager() *SessionManager {\n\tdbMap := db.NewMemDB()\n\treturn NewSessionManager(db.NewSessionRepo(dbMap), db.NewSessionKeyRepo(dbMap))\n}\n\nfunc TestSessionManagerNewSession(t *testing.T) {\n\tsm := newManager()\n\tsm.GenerateCode = staticGenerateCodeFunc(\"boo\")\n\tgot, err := sm.NewSession(\"bogus_idpc\", \"XXX\", \"bogus\", url.URL{}, \"\", false, []string{\"openid\"})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\tif got != \"boo\" {\n\t\tt.Fatalf(\"Incorrect Session ID: want=%s got=%s\", \"boo\", got)\n\t}\n}\n\nfunc TestSessionAttachRemoteIdentityTwice(t *testing.T) {\n\tsm := newManager()\n\tsessionID, err := sm.NewSession(\"bogus_idpc\", \"XXX\", \"bogus\", url.URL{}, \"\", false, []string{\"openid\"})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\tident := oidc.Identity{ID: \"YYY\", Name: \"elroy\", Email: \"elroy@example.com\"}\n\tif _, err := sm.AttachRemoteIdentity(sessionID, ident); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\tif _, err := sm.AttachRemoteIdentity(sessionID, ident); err == nil {\n\t\tt.Fatalf(\"Expected non-nil error\")\n\t}\n}\n\nfunc TestSessionManagerExchangeKey(t *testing.T) {\n\tsm := newManager()\n\tsessionID, err := sm.NewSession(\"connector_id\", \"XXX\", \"bogus\", url.URL{}, \"\", false, []string{\"openid\"})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\tkey, err := sm.NewSessionKey(sessionID)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\tgot, err := sm.ExchangeKey(key)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\tif got != sessionID {\n\t\tt.Fatalf(\"Incorrect Session ID: want=%s got=%s\", sessionID, got)\n\t}\n\n\tif _, err := sm.ExchangeKey(key); err == nil {\n\t\tt.Fatalf(\"Received nil response from attempt with spent Session key\")\n\t}\n}\n\nfunc TestSessionManagerGetSessionInStateNoExist(t *testing.T) {\n\tsm := newManager()\n\tses, err := sm.getSessionInState(\"123\", session.SessionStateNew)\n\tif err == nil {\n\t\tt.Errorf(\"Expected non-nil error\")\n\t}\n\tif ses != nil {\n\t\tt.Errorf(\"Expected nil Session\")\n\t}\n}\n\nfunc TestSessionManagerGetSessionInStateWrongState(t *testing.T) {\n\tsm := newManager()\n\tsessionID, err := sm.NewSession(\"connector_id\", \"XXX\", \"bogus\", url.URL{}, \"\", false, []string{\"openid\"})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\tses, err := sm.getSessionInState(sessionID, session.SessionStateDead)\n\tif err == nil {\n\t\tt.Errorf(\"Expected non-nil error\")\n\t}\n\tif ses != nil {\n\t\tt.Errorf(\"Expected nil Session\")\n\t}\n}\n\nfunc TestSessionManagerKill(t *testing.T) {\n\tsm := newManager()\n\tsessionID, err := sm.NewSession(\"connector_id\", \"XXX\", \"bogus\", url.URL{}, \"\", false, []string{\"openid\"})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\tident := oidc.Identity{ID: \"YYY\", Name: \"elroy\", Email: \"elroy@example.com\"}\n\tif _, err := sm.AttachRemoteIdentity(sessionID, ident); err != nil {\n\t\tt.Fatalf(\"Unexpected error: %v\", err)\n\t}\n\n\tses, err := sm.Kill(sessionID)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %v\", err)\n\t}\n\tif ses == nil {\n\t\tt.Fatalf(\"Expected non-nil Session\")\n\t}\n\n\tif ses.ClientState != \"bogus\" {\n\t\tt.Errorf(\"Unexpected Session: %#v\", ses)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package odb\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\n\t\"github.com\/git-lfs\/git-lfs\/errors\"\n\t\"github.com\/git-lfs\/git-lfs\/git\"\n\t\"github.com\/git-lfs\/git-lfs\/lfs\"\n)\n\n\/\/ ObjectDatabase enables the reading and writing of objects against a storage\n\/\/ backend.\ntype ObjectDatabase struct {\n\t\/\/ s is the storage backend which opens\/creates\/reads\/writes.\n\ts storer\n\n\t\/\/ closed is a uint32 managed by sync\/atomic's <X>Uint32 methods. It\n\t\/\/ yields a value of 0 if the *ObjectDatabase it is stored upon is open,\n\t\/\/ and a value of 1 if it is closed.\n\tclosed uint32\n\t\/\/ objectScanner is the running instance of `*git.ObjectScanner` used to\n\t\/\/ scan packed objects not found in .git\/objects\/xx\/... directly.\n\tobjectScanner *git.ObjectScanner\n}\n\n\/\/ FromFilesystem constructs an *ObjectDatabase instance that is backed by a\n\/\/ directory on the filesystem. Specifically, this should point to:\n\/\/\n\/\/ \/absolute\/repo\/path\/.git\/objects\nfunc FromFilesystem(root string) (*ObjectDatabase, error) {\n\tos, err := git.NewObjectScanner()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ObjectDatabase{\n\t\ts: newFileStorer(root),\n\t\tobjectScanner: os,\n\t}, nil\n}\n\n\/\/ Close closes the *ObjectDatabase, freeing any open resources (namely: the\n\/\/ `*git.ObjectScanner instance), and returning any errors encountered in\n\/\/ closing them.\n\/\/\n\/\/ If Close() has already been called, this function will return an error.\nfunc (o *ObjectDatabase) Close() error {\n\tif !atomic.CompareAndSwapUint32(&o.closed, 0, 1) {\n\t\treturn errors.New(\"git\/odb: *ObjectDatabase already closed\")\n\t}\n\n\tif err := o.objectScanner.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Blob returns a *Blob as identified by the SHA given, or an error if one was\n\/\/ encountered.\nfunc (o *ObjectDatabase) Blob(sha []byte) (*Blob, error) {\n\tvar b Blob\n\n\tif err := o.decode(sha, &b); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &b, nil\n}\n\n\/\/ Tree returns a *Tree as identified by the SHA given, or an error if one was\n\/\/ encountered.\nfunc (o *ObjectDatabase) Tree(sha []byte) (*Tree, error) {\n\tvar t Tree\n\tif err := o.decode(sha, &t); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &t, nil\n}\n\n\/\/ Commit returns a *Commit as identified by the SHA given, or an error if one\n\/\/ was encountered.\nfunc (o *ObjectDatabase) Commit(sha []byte) (*Commit, error) {\n\tvar c Commit\n\n\tif err := o.decode(sha, &c); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &c, nil\n}\n\n\/\/ WriteBlob stores a *Blob on disk and returns the SHA it is uniquely\n\/\/ identified by, or an error if one was encountered.\nfunc (o *ObjectDatabase) WriteBlob(b *Blob) ([]byte, error) {\n\tbuf, err := lfs.TempFile(\"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer buf.Close()\n\n\tsha, _, err := o.encodeBuffer(b, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = b.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn sha, nil\n}\n\n\/\/ WriteTree stores a *Tree on disk and returns the SHA it is uniquely\n\/\/ identified by, or an error if one was encountered.\nfunc (o *ObjectDatabase) WriteTree(t *Tree) ([]byte, error) {\n\tsha, _, err := o.encode(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn sha, nil\n}\n\n\/\/ WriteCommit stores a *Commit on disk and returns the SHA it is uniquely\n\/\/ identified by, or an error if one was encountered.\nfunc (o *ObjectDatabase) WriteCommit(c *Commit) ([]byte, error) {\n\tsha, _, err := o.encode(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn sha, nil\n}\n\n\/\/ Root returns the filesystem root that this *ObjectDatabase works within, if\n\/\/ backed by a fileStorer (constructed by FromFilesystem). If so, it returns\n\/\/ the fully-qualified path on a disk and a value of true.\n\/\/\n\/\/ Otherwise, it returns empty-string and a value of false.\nfunc (o *ObjectDatabase) Root() (string, bool) {\n\ttype rooter interface {\n\t\tRoot() string\n\t}\n\n\tif root, ok := o.s.(rooter); ok {\n\t\treturn root.Root(), true\n\t}\n\treturn \"\", false\n}\n\n\/\/ encode encodes and saves an object to the storage backend and uses an\n\/\/ in-memory buffer to calculate the object's encoded body.\nfunc (d *ObjectDatabase) encode(object Object) (sha []byte, n int64, err error) {\n\treturn d.encodeBuffer(object, bytes.NewBuffer(nil))\n}\n\n\/\/ encodeBuffer encodes and saves an object to the storage backend by using the\n\/\/ given buffer to calculate and store the object's encoded body.\nfunc (d *ObjectDatabase) encodeBuffer(object Object, buf io.ReadWriter) (sha []byte, n int64, err error) {\n\tcn, err := object.Encode(buf)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\ttmp, err := lfs.TempFile(\"\")\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\tdefer tmp.Close()\n\n\tto := NewObjectWriter(tmp)\n\tif _, err = to.WriteHeader(object.Type(), int64(cn)); err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tif seek, ok := buf.(io.Seeker); ok {\n\t\tif _, err = seek.Seek(0, io.SeekStart); err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\t}\n\n\tif _, err = io.Copy(to, buf); err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tif err = to.Close(); err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tif _, err := tmp.Seek(0, io.SeekStart); err != nil {\n\t\treturn nil, 0, err\n\t}\n\treturn d.save(to.Sha(), tmp)\n}\n\n\/\/ save writes the given buffer to the location given by the storer \"o.s\" as\n\/\/ identified by the sha []byte.\nfunc (o *ObjectDatabase) save(sha []byte, buf io.Reader) ([]byte, int64, error) {\n\tn, err := o.s.Store(sha, buf)\n\n\treturn sha, n, err\n}\n\n\/\/ open gives an `*ObjectReader` for the given loose object keyed by the given\n\/\/ \"sha\" []byte, or an error.\nfunc (o *ObjectDatabase) open(sha []byte) (*ObjectReader, error) {\n\tf, err := o.s.Open(sha)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\t\/\/ If there was some other issue beyond not being able\n\t\t\t\/\/ to find the object, return that immediately and don't\n\t\t\t\/\/ try and fallback to the *git.ObjectScanner.\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Otherwise, if the file simply couldn't be found, attempt to\n\t\t\/\/ load its contents from the *git.ObjectScanner by leveraging\n\t\t\/\/ `git-cat-file --batch`.\n\t\tif atomic.LoadUint32(&o.closed) == 1 {\n\t\t\treturn nil, errors.New(\"git\/odb: cannot use closed *git.ObjectScanner\")\n\t\t}\n\n\t\tif !o.objectScanner.Scan(hex.EncodeToString(sha)) {\n\t\t\treturn nil, o.objectScanner.Err()\n\t\t}\n\n\t\treturn NewUncompressedObjectReader(io.MultiReader(\n\t\t\t\/\/ Git object header:\n\t\t\tstrings.NewReader(fmt.Sprintf(\"%s %d\\x00\",\n\t\t\t\to.objectScanner.Type(), o.objectScanner.Size(),\n\t\t\t)),\n\n\t\t\t\/\/ Git object (uncompressed) contents:\n\t\t\to.objectScanner.Contents(),\n\t\t))\n\t}\n\n\treturn NewObjectReadCloser(f)\n}\n\n\/\/ decode decodes an object given by the sha \"sha []byte\" into the given object\n\/\/ \"into\", or returns an error if one was encountered.\n\/\/\n\/\/ Ordinarily, it closes the object's underlying io.ReadCloser (if it implements\n\/\/ the `io.Closer` interface), but skips this if the \"into\" Object is of type\n\/\/ BlobObjectType. Blob's don't exhaust the buffer completely (they instead\n\/\/ maintain a handle on the blob's contents via an io.LimitedReader) and\n\/\/ therefore cannot be closed until signaled explicitly by git\/odb.Blob.Close().\nfunc (o *ObjectDatabase) decode(sha []byte, into Object) error {\n\tr, err := o.open(sha)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttyp, size, err := r.Header()\n\tif err != nil {\n\t\treturn err\n\t} else if typ != into.Type() {\n\t\treturn &UnexpectedObjectType{Got: typ, Wanted: into.Type()}\n\t}\n\n\tif _, err = into.Decode(r, size); err != nil {\n\t\treturn err\n\t}\n\n\tif into.Type() == BlobObjectType {\n\t\treturn nil\n\t}\n\treturn r.Close()\n}\n<commit_msg>git\/odb: remove temporary files after migration<commit_after>package odb\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\n\t\"github.com\/git-lfs\/git-lfs\/errors\"\n\t\"github.com\/git-lfs\/git-lfs\/git\"\n\t\"github.com\/git-lfs\/git-lfs\/lfs\"\n)\n\n\/\/ ObjectDatabase enables the reading and writing of objects against a storage\n\/\/ backend.\ntype ObjectDatabase struct {\n\t\/\/ s is the storage backend which opens\/creates\/reads\/writes.\n\ts storer\n\n\t\/\/ closed is a uint32 managed by sync\/atomic's <X>Uint32 methods. It\n\t\/\/ yields a value of 0 if the *ObjectDatabase it is stored upon is open,\n\t\/\/ and a value of 1 if it is closed.\n\tclosed uint32\n\t\/\/ objectScanner is the running instance of `*git.ObjectScanner` used to\n\t\/\/ scan packed objects not found in .git\/objects\/xx\/... directly.\n\tobjectScanner *git.ObjectScanner\n}\n\n\/\/ FromFilesystem constructs an *ObjectDatabase instance that is backed by a\n\/\/ directory on the filesystem. Specifically, this should point to:\n\/\/\n\/\/ \/absolute\/repo\/path\/.git\/objects\nfunc FromFilesystem(root string) (*ObjectDatabase, error) {\n\tos, err := git.NewObjectScanner()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ObjectDatabase{\n\t\ts: newFileStorer(root),\n\t\tobjectScanner: os,\n\t}, nil\n}\n\n\/\/ Close closes the *ObjectDatabase, freeing any open resources (namely: the\n\/\/ `*git.ObjectScanner instance), and returning any errors encountered in\n\/\/ closing them.\n\/\/\n\/\/ If Close() has already been called, this function will return an error.\nfunc (o *ObjectDatabase) Close() error {\n\tif !atomic.CompareAndSwapUint32(&o.closed, 0, 1) {\n\t\treturn errors.New(\"git\/odb: *ObjectDatabase already closed\")\n\t}\n\n\tif err := o.objectScanner.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Blob returns a *Blob as identified by the SHA given, or an error if one was\n\/\/ encountered.\nfunc (o *ObjectDatabase) Blob(sha []byte) (*Blob, error) {\n\tvar b Blob\n\n\tif err := o.decode(sha, &b); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &b, nil\n}\n\n\/\/ Tree returns a *Tree as identified by the SHA given, or an error if one was\n\/\/ encountered.\nfunc (o *ObjectDatabase) Tree(sha []byte) (*Tree, error) {\n\tvar t Tree\n\tif err := o.decode(sha, &t); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &t, nil\n}\n\n\/\/ Commit returns a *Commit as identified by the SHA given, or an error if one\n\/\/ was encountered.\nfunc (o *ObjectDatabase) Commit(sha []byte) (*Commit, error) {\n\tvar c Commit\n\n\tif err := o.decode(sha, &c); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &c, nil\n}\n\n\/\/ WriteBlob stores a *Blob on disk and returns the SHA it is uniquely\n\/\/ identified by, or an error if one was encountered.\nfunc (o *ObjectDatabase) WriteBlob(b *Blob) ([]byte, error) {\n\tbuf, err := lfs.TempFile(\"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer os.Rename(buf.Name())\n\n\tsha, _, err := o.encodeBuffer(b, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = b.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn sha, nil\n}\n\n\/\/ WriteTree stores a *Tree on disk and returns the SHA it is uniquely\n\/\/ identified by, or an error if one was encountered.\nfunc (o *ObjectDatabase) WriteTree(t *Tree) ([]byte, error) {\n\tsha, _, err := o.encode(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn sha, nil\n}\n\n\/\/ WriteCommit stores a *Commit on disk and returns the SHA it is uniquely\n\/\/ identified by, or an error if one was encountered.\nfunc (o *ObjectDatabase) WriteCommit(c *Commit) ([]byte, error) {\n\tsha, _, err := o.encode(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn sha, nil\n}\n\n\/\/ Root returns the filesystem root that this *ObjectDatabase works within, if\n\/\/ backed by a fileStorer (constructed by FromFilesystem). If so, it returns\n\/\/ the fully-qualified path on a disk and a value of true.\n\/\/\n\/\/ Otherwise, it returns empty-string and a value of false.\nfunc (o *ObjectDatabase) Root() (string, bool) {\n\ttype rooter interface {\n\t\tRoot() string\n\t}\n\n\tif root, ok := o.s.(rooter); ok {\n\t\treturn root.Root(), true\n\t}\n\treturn \"\", false\n}\n\n\/\/ encode encodes and saves an object to the storage backend and uses an\n\/\/ in-memory buffer to calculate the object's encoded body.\nfunc (d *ObjectDatabase) encode(object Object) (sha []byte, n int64, err error) {\n\treturn d.encodeBuffer(object, bytes.NewBuffer(nil))\n}\n\n\/\/ encodeBuffer encodes and saves an object to the storage backend by using the\n\/\/ given buffer to calculate and store the object's encoded body.\nfunc (d *ObjectDatabase) encodeBuffer(object Object, buf io.ReadWriter) (sha []byte, n int64, err error) {\n\tcn, err := object.Encode(buf)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\ttmp, err := lfs.TempFile(\"\")\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\tdefer os.Remove(tmp.Name())\n\n\tto := NewObjectWriter(tmp)\n\tif _, err = to.WriteHeader(object.Type(), int64(cn)); err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tif seek, ok := buf.(io.Seeker); ok {\n\t\tif _, err = seek.Seek(0, io.SeekStart); err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\t}\n\n\tif _, err = io.Copy(to, buf); err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tif err = to.Close(); err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tif _, err := tmp.Seek(0, io.SeekStart); err != nil {\n\t\treturn nil, 0, err\n\t}\n\treturn d.save(to.Sha(), tmp)\n}\n\n\/\/ save writes the given buffer to the location given by the storer \"o.s\" as\n\/\/ identified by the sha []byte.\nfunc (o *ObjectDatabase) save(sha []byte, buf io.Reader) ([]byte, int64, error) {\n\tn, err := o.s.Store(sha, buf)\n\n\treturn sha, n, err\n}\n\n\/\/ open gives an `*ObjectReader` for the given loose object keyed by the given\n\/\/ \"sha\" []byte, or an error.\nfunc (o *ObjectDatabase) open(sha []byte) (*ObjectReader, error) {\n\tf, err := o.s.Open(sha)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\t\/\/ If there was some other issue beyond not being able\n\t\t\t\/\/ to find the object, return that immediately and don't\n\t\t\t\/\/ try and fallback to the *git.ObjectScanner.\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Otherwise, if the file simply couldn't be found, attempt to\n\t\t\/\/ load its contents from the *git.ObjectScanner by leveraging\n\t\t\/\/ `git-cat-file --batch`.\n\t\tif atomic.LoadUint32(&o.closed) == 1 {\n\t\t\treturn nil, errors.New(\"git\/odb: cannot use closed *git.ObjectScanner\")\n\t\t}\n\n\t\tif !o.objectScanner.Scan(hex.EncodeToString(sha)) {\n\t\t\treturn nil, o.objectScanner.Err()\n\t\t}\n\n\t\treturn NewUncompressedObjectReader(io.MultiReader(\n\t\t\t\/\/ Git object header:\n\t\t\tstrings.NewReader(fmt.Sprintf(\"%s %d\\x00\",\n\t\t\t\to.objectScanner.Type(), o.objectScanner.Size(),\n\t\t\t)),\n\n\t\t\t\/\/ Git object (uncompressed) contents:\n\t\t\to.objectScanner.Contents(),\n\t\t))\n\t}\n\n\treturn NewObjectReadCloser(f)\n}\n\n\/\/ decode decodes an object given by the sha \"sha []byte\" into the given object\n\/\/ \"into\", or returns an error if one was encountered.\n\/\/\n\/\/ Ordinarily, it closes the object's underlying io.ReadCloser (if it implements\n\/\/ the `io.Closer` interface), but skips this if the \"into\" Object is of type\n\/\/ BlobObjectType. Blob's don't exhaust the buffer completely (they instead\n\/\/ maintain a handle on the blob's contents via an io.LimitedReader) and\n\/\/ therefore cannot be closed until signaled explicitly by git\/odb.Blob.Close().\nfunc (o *ObjectDatabase) decode(sha []byte, into Object) error {\n\tr, err := o.open(sha)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttyp, size, err := r.Header()\n\tif err != nil {\n\t\treturn err\n\t} else if typ != into.Type() {\n\t\treturn &UnexpectedObjectType{Got: typ, Wanted: into.Type()}\n\t}\n\n\tif _, err = into.Decode(r, size); err != nil {\n\t\treturn err\n\t}\n\n\tif into.Type() == BlobObjectType {\n\t\treturn nil\n\t}\n\treturn r.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"git.lukas.moe\/sn0w\/Karen\/logger\"\n \"github.com\/sn0w\/discordgo\"\n \"time\"\n)\n\nvar BETA_GUILDS = [...]string{\n \"259831440064118784\", \/\/ FADED's Sandbox (Me)\n \"180818466847064065\", \/\/ Karen's Sandbox (Me)\n \"172041631258640384\", \/\/ P0WERPLANT (Me)\n \"161637499939192832\", \/\/ Coding Lounge (Devsome)\n \"225168913808228352\", \/\/ Emily's Space (Kaaz)\n \"267186654312136704\", \/\/ Shinda Sekai Sensen (黒ゲロロロ)\n \"244110097599430667\", \/\/ S E K A I (Senpai \/「 ステ 」Abuse)\n \"268279577598492672\", \/\/ ZAKINET (Senpai \/「 ステ 」Abuse)\n \"266326434505687041\", \/\/ Bot Test (quoththeraven)\n \"267658193407049728\", \/\/ Bot Creation (quoththeraven)\n \"106029722458136576\", \/\/ Shadow Realm (WhereIsMyAim)\n \"268143270520029187\", \/\/ Joel's Beasts (Joel)\n \"271346578189582339\", \/\/ Universe Internet Ltd. (Inside24)\n}\n\n\/\/ Automatically leaves guilds that are not registered beta testers\nfunc autoLeaver(session *discordgo.Session) {\n for {\n for _, guild := range session.State.Guilds {\n match := false\n\n for _, betaGuild := range BETA_GUILDS {\n if guild.ID == betaGuild {\n match = true\n break\n }\n }\n\n if !match {\n logger.WARNING.L(\"beta\", \"Leaving guild \" + guild.ID + \" (\" + guild.Name + \") because it didn't apply for the beta\")\n session.GuildLeave(guild.ID)\n }\n }\n\n time.Sleep(10 * time.Second)\n }\n}\n<commit_msg>Re add turdy-rep. (GIT Y U NO WORK?)<commit_after>package main\n\nimport (\n \"git.lukas.moe\/sn0w\/Karen\/logger\"\n \"github.com\/sn0w\/discordgo\"\n \"time\"\n)\n\nvar BETA_GUILDS = [...]string{\n \"259831440064118784\", \/\/ FADED's Sandbox (Me)\n \"180818466847064065\", \/\/ Karen's Sandbox (Me)\n \"172041631258640384\", \/\/ P0WERPLANT (Me)\n \"161637499939192832\", \/\/ Coding Lounge (Devsome)\n \"225168913808228352\", \/\/ Emily's Space (Kaaz)\n \"267186654312136704\", \/\/ Shinda Sekai Sensen (黒ゲロロロ)\n \"244110097599430667\", \/\/ S E K A I (Senpai \/「 ステ 」Abuse)\n \"268279577598492672\", \/\/ ZAKINET (Senpai \/「 ステ 」Abuse)\n \"266326434505687041\", \/\/ Bot Test (quoththeraven)\n \"267658193407049728\", \/\/ Bot Creation (quoththeraven)\n \"106029722458136576\", \/\/ Shadow Realm (WhereIsMyAim)\n \"268143270520029187\", \/\/ Joel's Beasts (Joel)\n \"271346578189582339\", \/\/ Universe Internet Ltd. (Inside24)\n \"270353850085408780\", \/\/ Turdy Republic (Moopdedoop)\n}\n\n\/\/ Automatically leaves guilds that are not registered beta testers\nfunc autoLeaver(session *discordgo.Session) {\n for {\n for _, guild := range session.State.Guilds {\n match := false\n\n for _, betaGuild := range BETA_GUILDS {\n if guild.ID == betaGuild {\n match = true\n break\n }\n }\n\n if !match {\n logger.WARNING.L(\"beta\", \"Leaving guild \" + guild.ID + \" (\" + guild.Name + \") because it didn't apply for the beta\")\n session.GuildLeave(guild.ID)\n }\n }\n\n time.Sleep(10 * time.Second)\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package sfv\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"text\/template\"\n)\n\nconst sfv = `\n; Generated by BSDSFV for UNiX, $Revision: 1.18 $ on 2014-08-10 at 20:47.28\n;\n{{.File1}} 7E3265A8\n{{.File2}} 04A2B3E9\n`\n\ntype values struct {\n\tFile1 string\n\tFile2 string\n}\n\nfunc tempFile(content string) (*os.File, error) {\n\tf, err := ioutil.TempFile(\"\", \"gosfv\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb := []byte(content)\n\tif err := ioutil.WriteFile(f.Name(), b, 0600); err != nil {\n\t\treturn nil, err\n\t}\n\treturn f, err\n}\n\nfunc createSFVFile() (*os.File, error) {\n\tf1, err := tempFile(\"foo\\n\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tf2, err := tempFile(\"bar\\n\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvalues := values{\n\t\tFile1: path.Base(f1.Name()),\n\t\tFile2: path.Base(f2.Name()),\n\t}\n\tvar b bytes.Buffer\n\tt := template.Must(template.New(\"sfv\").Parse(sfv))\n\tif err := t.Execute(&b, values); err != nil {\n\t\treturn nil, err\n\t}\n\tsfvFile, err := tempFile(b.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn sfvFile, nil\n}\n\nfunc TestParseChecksum(t *testing.T) {\n\tline := \"foo 7E3265A8\"\n\tchecksum, err := parseChecksum(\"\/tmp\", line)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif expected := \"\/tmp\/foo\"; checksum.Path != expected {\n\t\tt.Fatalf(\"Expected %s, got %s\", expected, checksum.Path)\n\t}\n\tif expected := uint32(2117232040); checksum.CRC32 != expected {\n\t\tt.Fatalf(\"Expected %d, got %d\", expected, checksum.CRC32)\n\t}\n}\n\nfunc TestParseChecksums(t *testing.T) {\n\tin := \"; comment\\n\" +\n\t\t\"file1 7E3265A8\\n\" +\n\t\t\"file2 04A2B3E7\\r\\n\" +\n\t\t\"file3 04A2B3E9\\n\" +\n\t\t\"file4 \\t04A2B3E6\"\n\tout := []Checksum{\n\t\tChecksum{Path: \"\/tmp\/file1\", Filename: \"file1\", CRC32: 2117232040},\n\t\tChecksum{Path: \"\/tmp\/file2\", Filename: \"file2\", CRC32: 77771751},\n\t\tChecksum{Path: \"\/tmp\/file3\", Filename: \"file3\", CRC32: 77771753},\n\t\tChecksum{Path: \"\/tmp\/file4\", Filename: \"file4\", CRC32: 77771750},\n\t}\n\tchecksums, err := parseChecksums(\"\/tmp\", strings.NewReader(in))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !reflect.DeepEqual(checksums, out) {\n\t\tt.Fatalf(\"Expected %+v, got %+v\", out, checksums)\n\t}\n}\n\nfunc TestRead(t *testing.T) {\n\tf, err := createSFVFile()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsfv, err := Read(f.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\tfor _, c := range sfv.Checksums {\n\t\t\tos.Remove(c.Path) \/\/ Ignore error\n\t\t}\n\t\tos.Remove(sfv.Path) \/\/ Ignore error\n\t}()\n\tif path := f.Name(); sfv.Path != path {\n\t\tt.Fatalf(\"Expected %s, got %s\", path, sfv.Path)\n\t}\n\tif exist := sfv.IsExist(); !exist {\n\t\tt.Fatal(\"Expected true, got false\")\n\t}\n\tok, err := sfv.Verify()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !ok {\n\t\tt.Fatal(\"Expected true, got false\")\n\t}\n}\n\nfunc TestEmptySFV(t *testing.T) {\n\tsfv := SFV{Path: \"\/tmp\/sfv.sfv\"}\n\tif _, err := sfv.Verify(); err == nil {\n\t\tt.Fatal(\"Expected error\")\n\t}\n}\n<commit_msg>Test Find<commit_after>package sfv\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"text\/template\"\n)\n\nconst sfv = `\n; Generated by BSDSFV for UNiX, $Revision: 1.18 $ on 2014-08-10 at 20:47.28\n;\n{{.File1}} 7E3265A8\n{{.File2}} 04A2B3E9\n`\n\ntype values struct {\n\tFile1 string\n\tFile2 string\n}\n\nfunc tempFile(content string) (*os.File, error) {\n\tf, err := ioutil.TempFile(\"\", \"gosfv\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb := []byte(content)\n\tif err := ioutil.WriteFile(f.Name(), b, 0600); err != nil {\n\t\treturn nil, err\n\t}\n\treturn f, err\n}\n\nfunc createSFVFile() (*os.File, error) {\n\tf1, err := tempFile(\"foo\\n\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tf2, err := tempFile(\"bar\\n\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvalues := values{\n\t\tFile1: path.Base(f1.Name()),\n\t\tFile2: path.Base(f2.Name()),\n\t}\n\tvar b bytes.Buffer\n\tt := template.Must(template.New(\"sfv\").Parse(sfv))\n\tif err := t.Execute(&b, values); err != nil {\n\t\treturn nil, err\n\t}\n\tsfvFile, err := tempFile(b.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn sfvFile, nil\n}\n\nfunc TestParseChecksum(t *testing.T) {\n\tline := \"foo 7E3265A8\"\n\tchecksum, err := parseChecksum(\"\/tmp\", line)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif expected := \"\/tmp\/foo\"; checksum.Path != expected {\n\t\tt.Fatalf(\"Expected %s, got %s\", expected, checksum.Path)\n\t}\n\tif expected := uint32(2117232040); checksum.CRC32 != expected {\n\t\tt.Fatalf(\"Expected %d, got %d\", expected, checksum.CRC32)\n\t}\n}\n\nfunc TestParseChecksums(t *testing.T) {\n\tin := \"; comment\\n\" +\n\t\t\"file1 7E3265A8\\n\" +\n\t\t\"file2 04A2B3E7\\r\\n\" +\n\t\t\"file3 04A2B3E9\\n\" +\n\t\t\"file4 \\t04A2B3E6\"\n\tout := []Checksum{\n\t\tChecksum{Path: \"\/tmp\/file1\", Filename: \"file1\", CRC32: 2117232040},\n\t\tChecksum{Path: \"\/tmp\/file2\", Filename: \"file2\", CRC32: 77771751},\n\t\tChecksum{Path: \"\/tmp\/file3\", Filename: \"file3\", CRC32: 77771753},\n\t\tChecksum{Path: \"\/tmp\/file4\", Filename: \"file4\", CRC32: 77771750},\n\t}\n\tchecksums, err := parseChecksums(\"\/tmp\", strings.NewReader(in))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !reflect.DeepEqual(checksums, out) {\n\t\tt.Fatalf(\"Expected %+v, got %+v\", out, checksums)\n\t}\n}\n\nfunc TestRead(t *testing.T) {\n\tf, err := createSFVFile()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsfv, err := Read(f.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\tfor _, c := range sfv.Checksums {\n\t\t\tos.Remove(c.Path) \/\/ Ignore error\n\t\t}\n\t\tos.Remove(sfv.Path) \/\/ Ignore error\n\t}()\n\tif path := f.Name(); sfv.Path != path {\n\t\tt.Fatalf(\"Expected %s, got %s\", path, sfv.Path)\n\t}\n\tif exist := sfv.IsExist(); !exist {\n\t\tt.Fatal(\"Expected true, got false\")\n\t}\n\tok, err := sfv.Verify()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !ok {\n\t\tt.Fatal(\"Expected true, got false\")\n\t}\n}\n\nfunc TestFind(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"gosfv\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\t\/\/ No SFV file in dir should result in error\n\tif _, err := Find(dir); err == nil {\n\t\tt.Fatal(\"Expected error\")\n\t}\n\tfilepath := filepath.Join(dir, \"test.sfv\")\n\tif err := ioutil.WriteFile(filepath, []byte{}, 0600); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsfv, err := Find(dir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif sfv.Path != filepath {\n\t\tt.Fatalf(\"Expected %q, got %q\", filepath, sfv.Path)\n\t}\n}\n\nfunc TestEmptySFV(t *testing.T) {\n\tsfv := SFV{Path: \"\/tmp\/sfv.sfv\"}\n\tif _, err := sfv.Verify(); err == nil {\n\t\tt.Fatal(\"Expected error\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\tiniconf \"code.google.com\/p\/goconf\/conf\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\tnsq \"github.com\/bitly\/go-nsq\"\n\t\"github.com\/gamelost\/bot3server\/server\"\n\tirc \"github.com\/gamelost\/goirc\/client\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\nconst (\n\tBOT_CONFIG = \"bot3.config\"\n\tPRIVMSG = \"PRIVMSG\"\n)\n\ntype Request struct {\n\tRawLine *irc.Line\n\tTimestamp time.Time\n}\n\nfunc main() {\n\n\t\/\/ the quit channel\n\tsigChan := make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)\n\n\t\/\/ read in necessary configuration\n\tconfigFile, err := iniconf.ReadConfigFile(BOT_CONFIG)\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to read configuration file. Exiting now.\")\n\t}\n\n\t\/\/ convert to bot3config\n\tbot3config, err := Bot3ConfigFromConfigFile(configFile)\n\tif err != nil {\n\t\tlog.Fatal(\"Incomplete or invalid config file for bot3config. Exiting now.\")\n\t}\n\n\t\/\/ set up Bot3 instnace\n\tbot3 := &Bot3{}\n\tbot3.QuitChan = sigChan\n\tbot3.init(bot3config)\n\tbot3.connect()\n\n\t\/\/ receiving quit shuts down\n\t<-sigChan\n}\n\n\/\/ struct type for Bot3\ntype Bot3 struct {\n\tConfig *Bot3Config\n\tConnection *irc.Conn\n\t\/\/ NSQ input\/output to bot3Server\n\tBotServerOutputReader *nsq.Consumer\n\tBotServerInputWriter *nsq.Producer\n\tBotServerHeartbeatReader *nsq.Consumer\n\t\/\/ program quit chan\n\tQuitChan chan os.Signal\n\t\/\/ bot state\n\tSilenced bool\n\tBotServerOnline bool\n\tLastBotServerHeartbeat *server.Bot3ServerHeartbeat\n\tBot3ServerHeartbeatChan chan *server.Bot3ServerHeartbeat\n\tMagicIdentifier string\n\t\/\/ messagehandlers\n\tIRCMessageHandler *MessageHandler\n}\n\nfunc (b *Bot3) init(config *Bot3Config) error {\n\n\tb.Config = config\n\tb.Silenced = false\n\n\t\/\/ set up magicid\n\tb.MagicIdentifier = config.MagicIdentifier\n\n\tlog.Printf(\"Bot nick will be: %s and will join %s\\n\", b.Config.BotNick, b.Config.BotChannelToJoin)\n\tcfg := irc.NewConfig(b.Config.BotNick)\n\tcfg.SSL = false\n\tcfg.Server = b.Config.BotIRCServer\n\tcfg.NewNick = func(n string) string { return n + \"^\" }\n\tcfg.Pass = b.Config.BotPass\n\tc := irc.Client(cfg)\n\n\t\/\/ assign connection\n\tb.Connection = c\n\tb.Bot3ServerHeartbeatChan = make(chan *server.Bot3ServerHeartbeat)\n\tb.BotServerOnline = true\n\n\t\/\/ set up listener for heartbeat from bot3server\n\theartbeatReader, err := nsq.NewConsumer(\"bot3server-heartbeat\", \"main#ephemeral\", nsq.NewConfig())\n\tif err != nil {\n\t\tpanic(err)\n\t\tb.QuitChan <- syscall.SIGINT\n\t}\n\tb.BotServerHeartbeatReader = heartbeatReader\n\thbmh := &HeartbeatMessageHandler{Bot3ServerHeartbeatChan: b.Bot3ServerHeartbeatChan}\n\tb.BotServerHeartbeatReader.SetHandler(hbmh)\n\tb.BotServerHeartbeatReader.ConnectToNSQLookupd(\"127.0.0.1:4161\")\n\n\t\/\/ set up goroutine to listen for heartbeat\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-b.Bot3ServerHeartbeatChan:\n\t\t\t\t\/\/ if we're coming back online, broadcast message\n\t\t\t\tif b.BotServerOnline == false {\n\t\t\t\t\tb.BotServerOnline = true\n\t\t\t\t\tc.Nick(b.Config.BotNick)\n\t\t\t\t\tc.Privmsg(b.Config.BotChannelToJoin, \"Wheee. Restored connection to bot3server!\")\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\tcase <-time.After(time.Second * 5):\n\t\t\t\t\/\/ if initially going offline, broadcast message\n\t\t\t\tif b.BotServerOnline == true {\n\t\t\t\t\tc.Privmsg(b.Config.BotChannelToJoin, \"Welp! I seem to have lost connection to the bot3server. Will continue to look for it.\")\n\t\t\t\t\tc.Nick(b.Config.BotOfflinePrefix + \"_SERVEROFFLINE\")\n\t\t\t\t\tb.BotServerOnline = false\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ set up reader and message handler for botserver-output\n\toutputReader, err := nsq.NewConsumer(b.Config.Bot3ServerOutputTopic, \"main\", nsq.NewConfig())\n\tif err != nil {\n\t\tpanic(err)\n\t\tb.QuitChan <- syscall.SIGINT\n\t}\n\tb.BotServerOutputReader = outputReader\n\tb.IRCMessageHandler = &MessageHandler{Connection: c, MagicIdentifier: b.Config.MagicIdentifier, Requests: map[string]*Request{}}\n\tb.BotServerOutputReader.SetHandler(b.IRCMessageHandler)\n\tb.BotServerOutputReader.ConnectToNSQLookupd(\"127.0.0.1:4161\")\n\n\t\/\/ set up writer for botserver-input\n\twriter, err := nsq.NewProducer(\"127.0.0.1:4150\", nsq.NewConfig())\n\tif err != nil {\n\t\tpanic(err)\n\t\tb.QuitChan <- syscall.SIGINT\n\t}\n\n\tb.BotServerInputWriter = writer\n\n\t\/\/ Add handlers to do things here!\n\t\/\/ e.g. join a channel on connect.\n\tc.HandleFunc(\"connected\",\n\t\tfunc(conn *irc.Conn, line *irc.Line) {\n\t\t\tlog.Printf(\"Joining channel %s\", b.Config.BotChannelToJoin)\n\t\t\tconn.Join(b.Config.BotChannelToJoin)\n\t\t})\n\n\t\/\/ And a signal on disconnect\n\tc.HandleFunc(\"disconnected\",\n\t\tfunc(conn *irc.Conn, line *irc.Line) {\n\t\t\tlog.Printf(\"Received quit command\")\n\t\t\tb.QuitChan <- syscall.SIGINT\n\t\t})\n\n\t\/\/ hardcoded kill command just in case\n\tc.HandleFunc(PRIVMSG,\n\t\tfunc(conn *irc.Conn, line *irc.Line) {\n\t\t\tif strings.HasPrefix(\"!quit \"+b.Config.BotNick, line.Text()) {\n\t\t\t\tif b.Config.IsAdminNick(line.Nick) {\n\t\t\t\t\tb.QuitChan <- syscall.SIGINT\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\/\/ handle !silent\n\tc.HandleFunc(PRIVMSG,\n\t\tfunc(conn *irc.Conn, line *irc.Line) {\n\t\t\tif strings.HasPrefix(\"!silent\", line.Text()) {\n\t\t\t\tif b.Config.IsAdminNick(line.Nick) {\n\t\t\t\t\tif !b.Silenced {\n\t\t\t\t\t\tc.Nick(b.Config.BotOfflinePrefix + \"_SILENCED\")\n\t\t\t\t\t\tb.Silenced = true\n\t\t\t\t\t} else {\n\t\t\t\t\t\tc.Nick(b.Config.BotNick)\n\t\t\t\t\t\tb.Silenced = false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\/\/ handle privmsgs\n\tc.HandleFunc(PRIVMSG,\n\t\tfunc(conn *irc.Conn, line *irc.Line) {\n\t\t\treqid := uuid.NewUUID().String()\n\t\t\tb.IRCMessageHandler.Requests[reqid] = &Request{RawLine: line, Timestamp: time.Now()}\n\t\t\tbotRequest := &server.BotRequest{Identifier: reqid, Nick: line.Nick, Channel: line.Target(), ChatText: line.Text()}\n\t\t\tencodedRequest, err := json.Marshal(botRequest)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error encoding request: %s\\n\", err.Error())\n\t\t\t}\n\n\t\t\t\/\/ write to nsq only if not silenced, otherwise drop message\n\t\t\tif !b.Silenced {\n\t\t\t\terr := b.BotServerInputWriter.Publish(b.Config.Bot3ServerInputTopic, encodedRequest)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Silenced - will not output message.\")\n\t\t\t}\n\t\t})\n\n\t\/\/ disabling this for now since it breaks remindme\n\t\/\/ go func() {\n\t\/\/ \tfor {\n\t\/\/ \t\texp := time.Now()\n\t\/\/ \t\t<-time.After(time.Second * 5)\n\t\/\/ \t\tfor k, v := range b.IRCMessageHandler.Requests {\n\t\/\/ \t\t\tif v.Timestamp.Unix() < exp.Unix() {\n\t\/\/ \t\t\t\tlog.Printf(\"Expiring %+v\\n\", v)\n\t\/\/ \t\t\t\tdelete(b.IRCMessageHandler.Requests, k)\n\t\/\/ \t\t\t}\n\t\/\/ \t\t}\n\t\/\/ \t}\n\t\/\/ }()\n\n\treturn nil\n}\n\nfunc (b *Bot3) connect() {\n\n\t\/\/ Tell client to connect.\n\tif err := b.Connection.Connect(); err != nil {\n\t\tfmt.Printf(\"Connection error: %s\\n\", err.Error())\n\t\tb.QuitChan <- syscall.SIGINT\n\t} else {\n\t\tlog.Printf(\"Successfully connected.\")\n\t}\n\n}\n<commit_msg>updates for newer nsq api<commit_after>package main\n\nimport (\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\tiniconf \"code.google.com\/p\/goconf\/conf\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\tnsq \"github.com\/bitly\/go-nsq\"\n\t\"github.com\/gamelost\/bot3server\/server\"\n\tirc \"github.com\/gamelost\/goirc\/client\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\nconst (\n\tBOT_CONFIG = \"bot3.config\"\n\tPRIVMSG = \"PRIVMSG\"\n)\n\ntype Request struct {\n\tRawLine *irc.Line\n\tTimestamp time.Time\n}\n\nfunc main() {\n\n\t\/\/ the quit channel\n\tsigChan := make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)\n\n\t\/\/ read in necessary configuration\n\tconfigFile, err := iniconf.ReadConfigFile(BOT_CONFIG)\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to read configuration file. Exiting now.\")\n\t}\n\n\t\/\/ convert to bot3config\n\tbot3config, err := Bot3ConfigFromConfigFile(configFile)\n\tif err != nil {\n\t\tlog.Fatal(\"Incomplete or invalid config file for bot3config. Exiting now.\")\n\t}\n\n\t\/\/ set up Bot3 instnace\n\tbot3 := &Bot3{}\n\tbot3.QuitChan = sigChan\n\tbot3.init(bot3config)\n\tbot3.connect()\n\n\t\/\/ receiving quit shuts down\n\t<-sigChan\n}\n\n\/\/ struct type for Bot3\ntype Bot3 struct {\n\tConfig *Bot3Config\n\tConnection *irc.Conn\n\t\/\/ NSQ input\/output to bot3Server\n\tBotServerOutputReader *nsq.Consumer\n\tBotServerInputWriter *nsq.Producer\n\tBotServerHeartbeatReader *nsq.Consumer\n\t\/\/ program quit chan\n\tQuitChan chan os.Signal\n\t\/\/ bot state\n\tSilenced bool\n\tBotServerOnline bool\n\tLastBotServerHeartbeat *server.Bot3ServerHeartbeat\n\tBot3ServerHeartbeatChan chan *server.Bot3ServerHeartbeat\n\tMagicIdentifier string\n\t\/\/ messagehandlers\n\tIRCMessageHandler *MessageHandler\n}\n\nfunc (b *Bot3) init(config *Bot3Config) error {\n\n\tb.Config = config\n\tb.Silenced = false\n\n\t\/\/ set up magicid\n\tb.MagicIdentifier = config.MagicIdentifier\n\n\tlog.Printf(\"Bot nick will be: %s and will join %s\\n\", b.Config.BotNick, b.Config.BotChannelToJoin)\n\tcfg := irc.NewConfig(b.Config.BotNick)\n\tcfg.SSL = false\n\tcfg.Server = b.Config.BotIRCServer\n\tcfg.NewNick = func(n string) string { return n + \"^\" }\n\tcfg.Pass = b.Config.BotPass\n\tc := irc.Client(cfg)\n\n\t\/\/ assign connection\n\tb.Connection = c\n\tb.Bot3ServerHeartbeatChan = make(chan *server.Bot3ServerHeartbeat)\n\tb.BotServerOnline = true\n\n\t\/\/ set up listener for heartbeat from bot3server\n\theartbeatReader, err := nsq.NewConsumer(\"bot3server-heartbeat\", \"main\", nsq.NewConfig())\n\tif err != nil {\n\t\tpanic(err)\n\t\tb.QuitChan <- syscall.SIGINT\n\t}\n\tb.BotServerHeartbeatReader = heartbeatReader\n\thbmh := &HeartbeatMessageHandler{Bot3ServerHeartbeatChan: b.Bot3ServerHeartbeatChan}\n\tb.BotServerHeartbeatReader.AddHandler(hbmh)\n\tb.BotServerHeartbeatReader.ConnectToNSQLookupd(\"127.0.0.1:4161\")\n\n\t\/\/ set up goroutine to listen for heartbeat\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-b.Bot3ServerHeartbeatChan:\n\t\t\t\t\/\/ if we're coming back online, broadcast message\n\t\t\t\tif b.BotServerOnline == false {\n\t\t\t\t\tb.BotServerOnline = true\n\t\t\t\t\tc.Nick(b.Config.BotNick)\n\t\t\t\t\tc.Privmsg(b.Config.BotChannelToJoin, \"Wheee. Restored connection to bot3server!\")\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\tcase <-time.After(time.Second * 5):\n\t\t\t\t\/\/ if initially going offline, broadcast message\n\t\t\t\tif b.BotServerOnline == true {\n\t\t\t\t\tc.Privmsg(b.Config.BotChannelToJoin, \"Welp! I seem to have lost connection to the bot3server. Will continue to look for it.\")\n\t\t\t\t\tc.Nick(b.Config.BotOfflinePrefix + \"_SERVEROFFLINE\")\n\t\t\t\t\tb.BotServerOnline = false\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ set up reader and message handler for botserver-output\n\toutputReader, err := nsq.NewConsumer(b.Config.Bot3ServerOutputTopic, \"main\", nsq.NewConfig())\n\tif err != nil {\n\t\tpanic(err)\n\t\tb.QuitChan <- syscall.SIGINT\n\t}\n\tb.BotServerOutputReader = outputReader\n\tb.IRCMessageHandler = &MessageHandler{Connection: c, MagicIdentifier: b.Config.MagicIdentifier, Requests: map[string]*Request{}}\n\tb.BotServerOutputReader.AddHandler(b.IRCMessageHandler)\n\tb.BotServerOutputReader.ConnectToNSQLookupd(\"127.0.0.1:4161\")\n\n\t\/\/ set up writer for botserver-input\n\twriter, err := nsq.NewProducer(\"127.0.0.1:4150\", nsq.NewConfig())\n\tif err != nil {\n\t\tpanic(err)\n\t\tb.QuitChan <- syscall.SIGINT\n\t}\n\n\tb.BotServerInputWriter = writer\n\n\t\/\/ Add handlers to do things here!\n\t\/\/ e.g. join a channel on connect.\n\tc.HandleFunc(\"connected\",\n\t\tfunc(conn *irc.Conn, line *irc.Line) {\n\t\t\tlog.Printf(\"Joining channel %s\", b.Config.BotChannelToJoin)\n\t\t\tconn.Join(b.Config.BotChannelToJoin)\n\t\t})\n\n\t\/\/ And a signal on disconnect\n\tc.HandleFunc(\"disconnected\",\n\t\tfunc(conn *irc.Conn, line *irc.Line) {\n\t\t\tlog.Printf(\"Received quit command\")\n\t\t\tb.QuitChan <- syscall.SIGINT\n\t\t})\n\n\t\/\/ hardcoded kill command just in case\n\tc.HandleFunc(PRIVMSG,\n\t\tfunc(conn *irc.Conn, line *irc.Line) {\n\t\t\tif strings.HasPrefix(\"!quit \"+b.Config.BotNick, line.Text()) {\n\t\t\t\tif b.Config.IsAdminNick(line.Nick) {\n\t\t\t\t\tb.QuitChan <- syscall.SIGINT\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\/\/ handle !silent\n\tc.HandleFunc(PRIVMSG,\n\t\tfunc(conn *irc.Conn, line *irc.Line) {\n\t\t\tif strings.HasPrefix(\"!silent\", line.Text()) {\n\t\t\t\tif b.Config.IsAdminNick(line.Nick) {\n\t\t\t\t\tif !b.Silenced {\n\t\t\t\t\t\tc.Nick(b.Config.BotOfflinePrefix + \"_SILENCED\")\n\t\t\t\t\t\tb.Silenced = true\n\t\t\t\t\t} else {\n\t\t\t\t\t\tc.Nick(b.Config.BotNick)\n\t\t\t\t\t\tb.Silenced = false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\/\/ handle privmsgs\n\tc.HandleFunc(PRIVMSG,\n\t\tfunc(conn *irc.Conn, line *irc.Line) {\n\t\t\treqid := uuid.NewUUID().String()\n\t\t\tb.IRCMessageHandler.Requests[reqid] = &Request{RawLine: line, Timestamp: time.Now()}\n\t\t\tbotRequest := &server.BotRequest{Identifier: reqid, Nick: line.Nick, Channel: line.Target(), ChatText: line.Text()}\n\t\t\tencodedRequest, err := json.Marshal(botRequest)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error encoding request: %s\\n\", err.Error())\n\t\t\t}\n\n\t\t\t\/\/ write to nsq only if not silenced, otherwise drop message\n\t\t\tif !b.Silenced {\n\t\t\t\terr := b.BotServerInputWriter.Publish(b.Config.Bot3ServerInputTopic, encodedRequest)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Silenced - will not output message.\")\n\t\t\t}\n\t\t})\n\n\t\/\/ disabling this for now since it breaks remindme\n\t\/\/ go func() {\n\t\/\/ \tfor {\n\t\/\/ \t\texp := time.Now()\n\t\/\/ \t\t<-time.After(time.Second * 5)\n\t\/\/ \t\tfor k, v := range b.IRCMessageHandler.Requests {\n\t\/\/ \t\t\tif v.Timestamp.Unix() < exp.Unix() {\n\t\/\/ \t\t\t\tlog.Printf(\"Expiring %+v\\n\", v)\n\t\/\/ \t\t\t\tdelete(b.IRCMessageHandler.Requests, k)\n\t\/\/ \t\t\t}\n\t\/\/ \t\t}\n\t\/\/ \t}\n\t\/\/ }()\n\n\treturn nil\n}\n\nfunc (b *Bot3) connect() {\n\n\t\/\/ Tell client to connect.\n\tif err := b.Connection.Connect(); err != nil {\n\t\tfmt.Printf(\"Connection error: %s\\n\", err.Error())\n\t\tb.QuitChan <- syscall.SIGINT\n\t} else {\n\t\tlog.Printf(\"Successfully connected.\")\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Mathias Fiedler\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gl\n\n\/\/ #include <GLES3\/gl3.h>\n\/\/ #include <stdlib.h>\nimport \"C\"\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"unsafe\"\n)\n\ntype Shader C.GLuint\ntype ShaderType C.GLenum\ntype ShaderBinaryFormat C.GLint\n\nconst (\n\tVERTEX_SHADER ShaderType = C.GL_VERTEX_SHADER\n\tFRAGMENT_SHADER = C.GL_FRAGMENT_SHADER\n)\n\ntype Program struct {\n\tid C.GLuint\n\n\tshadersValid bool\n\tshaders []Shader\n}\n\nfunc CreateProgram() Program {\n\treturn Program{id: C.glCreateProgram()}\n}\n\nfunc CreateShader(stype ShaderType) Shader {\n\treturn Shader(C.glCreateShader(C.GLenum(stype)))\n}\n\nfunc (program *Program) Shaders() []Shader {\n\tif !program.shadersValid {\n\t\tnshaders := C.GLint(0)\n\t\tC.glGetProgramiv(program.id, C.GL_ATTACHED_SHADERS, &nshaders)\n\t\tprogram.shaders = make([]Shader, nshaders)\n\t\tC.glGetAttachedShaders(program.id, C.GLsizei(nshaders), nil, (*C.GLuint)(&program.shaders[0]))\n\t\tprogram.shadersValid = true\n\t}\n\treturn program.shaders\n}\n\nfunc (program *Program) AttachShader(shader Shader) {\n\tC.glAttachShader(program.id, C.GLuint(shader))\n\tprogram.shadersValid = false\n}\n\nfunc (program *Program) BindAttribLocation(attrib VertexAttrib, name string) {\n\n\tcstr := C.CString(name)\n\tdefer C.free(unsafe.Pointer(cstr))\n\n\tC.glBindAttribLocation(program.id, attrib.Index(), (*C.GLchar)(cstr))\n}\n\nfunc (program *Program) Delete() {\n\tC.glDeleteProgram(program.id)\n}\n\nfunc (shader Shader) Delete() {\n\tC.glDeleteShader(C.GLuint(shader))\n}\n\nfunc (program *Program) DetachShader(shader Shader) {\n\tC.glDetachShader(program.id, C.GLuint(shader))\n\tprogram.shadersValid = false\n}\n\n\/\/ func (program *Program) GetActiveAttrib(index uint) (size int, atype int, name string) {\n\n\/\/ \tnameBuf := make([]byte, MAX_ATTRIBUTE_LENGTH)\n\/\/ \tsizeBuf := C.GLint(0)\n\/\/ \ttypeBuf := C.GLenum(0)\n\n\/\/ \tC.glGetActiveAttrib(program.id, C.GLuint(index), C.GLsizei(len(nameBuf)), C.NULL, &sizeBuf, &typeBuf, (*C.GLchar)(&nameBuf[0]))\n\n\/\/ \tsize = int(sizeBuf)\n\/\/ \tatype = int(typeBuf)\n\/\/ \tname = string(nameBuf)\n\/\/ }\n\n\/\/ func (program *Program) GetActiveUniform(index uint) (size int, utype int, name string) {\n\n\/\/ \tnameBuf := make([]byte, MAX_UNIFORM_LENGTH)\n\/\/ \tsizeBuf := C.GLint(0)\n\/\/ \ttypeBuf := C.GLenum(0)\n\n\/\/ \tC.glGetActiveUniform(program.id, C.GLuint(index), C.GLsizei(len(nameBuf)), C.NULL, &sizeBuf, &typeBuf, (*C.GLchar)(&nameBuf[0]))\n\n\/\/ \tsize = int(sizeBuf)\n\/\/ \tutype = int(typeBuf)\n\/\/ \tname = string(nameBuf)\n\/\/ }\n\n\/\/ func (program *Program) GetAttachedShaders(GLsizei maxcount, GLsizei* count) []uint {\n\/\/ \tC.glGetAttachedShaders(program.id, GLsizei maxcount, GLsizei* count, GLuint* shaders)\n\/\/ }\n\n\/\/ func (program *Program) GetProgramiv(pname int, params int) {\n\/\/ \tC.glGetProgramiv(program.id, GLenum pname, GLint* params)\n\/\/ }\n\n\/\/ func (program *Program) InfoLog(GLsizei bufsize, GLsizei* length, GLchar* infolog) {\n\/\/ \tC.glGetProgramInfoLog(program.id, GLsizei bufsize, GLsizei* length, GLchar* infolog)\n\/\/ }\n\n\/\/ func GetShaderiv(shader Shader, pname int, params int) {\n\/\/ \tC.glGetShaderiv(C.GLuint(shader), GLenum pname, GLint* params)\n\/\/ }\n\n\/\/ func GetShaderInfoLog(shader Shader, GLsizei bufsize, GLsizei* length, GLchar* infolog) {\n\/\/ \tC.glGetShaderInfoLog(C.GLuint(shader), GLsizei bufsize, GLsizei* length, GLchar* infolog)\n\/\/ }\n\n\/\/ func GetShaderPrecisionFormat(shadertype ShaderType, precisiontype int, GLint* range, precision int) {\n\/\/ \tC.glGetShaderPrecisionFormat(GLenum shadertype, GLenum precisiontype, GLint* range, GLint* precision)\n\/\/ }\n\n\/\/ func GetShaderSource(shader Shader, GLsizei bufsize, GLsizei* length, GLchar* source) {\n\/\/ \tC.glGetShaderSource(C.GLuint(shader), GLsizei bufsize, GLsizei* length, GLchar* source)\n\/\/ }\n\n\/\/ func (program *Program) GetUniformfv(location int, GLfloat* params) {\n\/\/ \tC.glGetUniformfv(program.id, GLint location, GLfloat* params)\n\/\/ }\n\n\/\/ func (program *Program) GetUniformiv(GLint location, params int) {\n\/\/ \tC.glGetUniformiv(program.id, GLint location, GLint* params)\n\/\/ }\n\n\/\/ GL_APICALL int GL_APIENTRY glGetAttribLocation (GLuint program, const GLchar* name);\n\/\/ GL_APICALL int GL_APIENTRY glGetUniformLocation (GLuint program, const GLchar* name);\n\n\/\/ func ShaderBinary(GLsizei n, shaders []uint, binaryformat int, const GLvoid* binary, GLsizei length) {\n\/\/ \tC.glShaderBinary(GLsizei n, const GLuint* shaders, GLenum binaryformat, const GLvoid* binary, GLsizei length)\n\/\/ }\n\nfunc GetShaderBinaryFormats() (formats []ShaderBinaryFormat) {\n\tnumFormats := C.GLint(0)\n\tC.glGetIntegerv(C.GL_NUM_SHADER_BINARY_FORMATS, &numFormats)\n\tif numFormats > 0 {\n\t\tformats = make([]ShaderBinaryFormat, numFormats)\n\t\tC.glGetIntegerv(C.GL_SHADER_BINARY_FORMATS, (*C.GLint)(&formats[0]))\n\t}\n\treturn\n}\n\nfunc ShaderBinary(shaders []Shader, format ShaderBinaryFormat, binary []byte) {\n\tC.glShaderBinary(C.GLsizei(len(shaders)), (*C.GLuint)(&shaders[0]), C.GLenum(format), unsafe.Pointer(&binary[0]), C.GLsizei(len(binary)))\n}\n\nfunc (shader Shader) Source(src string) {\n\tcstr := (*C.GLchar)(C.CString(src))\n\tdefer C.free(unsafe.Pointer(cstr))\n\tslen := C.GLint(len(src))\n\tC.glShaderSource(C.GLuint(shader), 1, &cstr, &slen)\n}\n\nfunc (shader Shader) Compile() {\n\tC.glCompileShader(C.GLuint(shader))\n\n\tstatus := C.GLint(0)\n\tC.glGetShaderiv(C.GLuint(shader), C.GL_COMPILE_STATUS, &status)\n\n\tif status != C.GL_TRUE {\n\t\tloglen := C.GLint(0)\n\t\tC.glGetShaderiv(C.GLuint(shader), C.GL_INFO_LOG_LENGTH, &loglen)\n\t\tlog := (*C.GLchar)(C.malloc(C.size_t(loglen)))\n\t\tdefer C.free(unsafe.Pointer(log))\n\t\tC.glGetShaderInfoLog(C.GLuint(shader), C.GLsizei(loglen), nil, log)\n\t\tpanic(fmt.Errorf(\"Failed to compile shader: %s\", C.GoString((*C.char)(log))))\n\t}\n}\n\nfunc (program *Program) Link() {\n\tC.glLinkProgram(program.id)\n\n\tstatus := C.GLint(0)\n\tC.glGetProgramiv(program.id, C.GL_LINK_STATUS, &status)\n\n\tif status != C.GL_TRUE {\n\t\tloglen := C.GLint(0)\n\t\tC.glGetProgramiv(program.id, C.GL_INFO_LOG_LENGTH, &loglen)\n\t\tlog := (*C.GLchar)(C.malloc(C.size_t(loglen)))\n\t\tdefer C.free(unsafe.Pointer(log))\n\t\tC.glGetProgramInfoLog(program.id, C.GLsizei(loglen), nil, log)\n\t\tpanic(fmt.Errorf(\"Failed to link shader: %s\", C.GoString((*C.char)(log))))\n\t}\n}\n\nfunc (program *Program) Use() {\n\tC.glUseProgram(program.id)\n}\n\nfunc (program *Program) Validate() {\n\tC.glValidateProgram(program.id)\n}\n\nfunc ReleaseShaderCompiler() {\n\tC.glReleaseShaderCompiler()\n}\n\nfunc (t ShaderType) String() string {\n\tswitch t {\n\tcase VERTEX_SHADER:\n\t\treturn \"VERTEX_SHADER\"\n\tcase FRAGMENT_SHADER:\n\t\treturn \"FRAGMENT_SHADER\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"ShaderType(%#x)\", t)\n\t}\n}\n\nfunc (program *Program) BindVariables(s interface{}) {\n\n\tut := reflect.TypeOf((*Uniform)(nil)).Elem()\n\tat := reflect.TypeOf((*VertexAttrib)(nil)).Elem()\n\n\tv := reflect.ValueOf(s).Elem()\n\tt := v.Type()\n\n\tn := t.NumField()\n\n\tfor i := 0; i < n; i++ {\n\t\tft := t.Field(i)\n\t\tname := ft.Tag.Get(\"glsl\")\n\t\tif len(name) > 0 {\n\t\t\tfv := v.Field(i)\n\t\t\tif fv.Type().Implements(ut) {\n\t\t\t\tfv.Set(reflect.ValueOf(program.GetUniform(name)))\n\t\t\t} else if fv.Type().Implements(at) {\n\t\t\t\tfv.Set(reflect.ValueOf(program.GetAttrib(name)))\n\t\t\t} else {\n\t\t\t\tpanic(fmt.Errorf(\"invalid type of '%s': %s\", ft.Name, fv.Type().Name()))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (program *Program) GetAttrib(name string) VertexAttrib {\n\n\tcname := (*C.GLchar)(C.CString(name))\n\tdefer C.free(unsafe.Pointer(cname))\n\n\tiloc := C.glGetAttribLocation(program.id, cname)\n\tif iloc < 0 {\n\t\tpanic(fmt.Errorf(\"not an active uniform: %s\", name))\n\t}\n\n\tloc := C.GLuint(iloc)\n\n\tdatatype := C.GLenum(0)\n\tC.glGetActiveAttrib(program.id, C.GLuint(loc), 0, nil, nil, &datatype, nil)\n\n\tswitch datatype {\n\tcase C.GL_FLOAT:\n\t\treturn FloatAttrib{vattrib{loc}}\n\tcase C.GL_FLOAT_VEC2:\n\t\treturn Vec2Attrib{FloatAttrib{vattrib{loc}}}\n\tcase C.GL_FLOAT_VEC3:\n\t\treturn Vec3Attrib{FloatAttrib{vattrib{loc}}}\n\tcase C.GL_FLOAT_VEC4:\n\t\treturn Vec4Attrib{FloatAttrib{vattrib{loc}}}\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unsupported attribute type: %#x\", datatype))\n\t}\n}\n\nfunc (program *Program) GetUniform(name string) Uniform {\n\n\tcname := (*C.GLchar)(C.CString(name))\n\tdefer C.free(unsafe.Pointer(cname))\n\n\tloc := C.GLint(C.glGetUniformLocation(program.id, cname))\n\tif loc < 0 {\n\t\tpanic(fmt.Errorf(\"not an active attribute: %s\", name))\n\t}\n\n\tdatatype := C.GLenum(0)\n\tC.glGetActiveUniform(program.id, C.GLuint(loc), 0, nil, nil, &datatype, nil)\n\n\tswitch datatype {\n\tcase C.GL_FLOAT:\n\t\treturn Uniform1f{uniformBase{loc}}\n\tcase C.GL_FLOAT_VEC2:\n\t\treturn Uniform2f{uniformBase{loc}}\n\tcase C.GL_FLOAT_VEC3:\n\t\treturn Uniform3f{uniformBase{loc}}\n\tcase C.GL_FLOAT_VEC4:\n\t\treturn Uniform4f{uniformBase{loc}}\n\tcase C.GL_INT:\n\t\treturn Uniform1i{uniformBase{loc}}\n\tcase C.GL_INT_VEC2:\n\t\treturn Uniform2i{uniformBase{loc}}\n\tcase C.GL_INT_VEC3:\n\t\treturn Uniform3i{uniformBase{loc}}\n\tcase C.GL_INT_VEC4:\n\t\treturn Uniform4i{uniformBase{loc}}\n\tcase C.GL_BOOL:\n\t\treturn Uniform1i{uniformBase{loc}}\n\tcase C.GL_BOOL_VEC2:\n\t\treturn Uniform2i{uniformBase{loc}}\n\tcase C.GL_BOOL_VEC3:\n\t\treturn Uniform3i{uniformBase{loc}}\n\tcase C.GL_BOOL_VEC4:\n\t\treturn Uniform4i{uniformBase{loc}}\n\tcase C.GL_FLOAT_MAT2:\n\t\treturn UniformMatrix2f{uniformBase{loc}}\n\tcase C.GL_FLOAT_MAT3:\n\t\treturn UniformMatrix3f{uniformBase{loc}}\n\tcase C.GL_FLOAT_MAT4:\n\t\treturn UniformMatrix4f{uniformBase{loc}}\n\tcase C.GL_SAMPLER_2D:\n\t\treturn Uniform1i{uniformBase{loc}}\n\tcase C.GL_SAMPLER_CUBE:\n\t\treturn Uniform1i{uniformBase{loc}}\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unsupported uniform type: %#x\", datatype))\n\t}\n}\n<commit_msg>fix error messages for inactive uniforms and attributes<commit_after>\/\/ Copyright 2014 Mathias Fiedler\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gl\n\n\/\/ #include <GLES3\/gl3.h>\n\/\/ #include <stdlib.h>\nimport \"C\"\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"unsafe\"\n)\n\ntype Shader C.GLuint\ntype ShaderType C.GLenum\ntype ShaderBinaryFormat C.GLint\n\nconst (\n\tVERTEX_SHADER ShaderType = C.GL_VERTEX_SHADER\n\tFRAGMENT_SHADER = C.GL_FRAGMENT_SHADER\n)\n\ntype Program struct {\n\tid C.GLuint\n\n\tshadersValid bool\n\tshaders []Shader\n}\n\nfunc CreateProgram() Program {\n\treturn Program{id: C.glCreateProgram()}\n}\n\nfunc CreateShader(stype ShaderType) Shader {\n\treturn Shader(C.glCreateShader(C.GLenum(stype)))\n}\n\nfunc (program *Program) Shaders() []Shader {\n\tif !program.shadersValid {\n\t\tnshaders := C.GLint(0)\n\t\tC.glGetProgramiv(program.id, C.GL_ATTACHED_SHADERS, &nshaders)\n\t\tprogram.shaders = make([]Shader, nshaders)\n\t\tC.glGetAttachedShaders(program.id, C.GLsizei(nshaders), nil, (*C.GLuint)(&program.shaders[0]))\n\t\tprogram.shadersValid = true\n\t}\n\treturn program.shaders\n}\n\nfunc (program *Program) AttachShader(shader Shader) {\n\tC.glAttachShader(program.id, C.GLuint(shader))\n\tprogram.shadersValid = false\n}\n\nfunc (program *Program) BindAttribLocation(attrib VertexAttrib, name string) {\n\n\tcstr := C.CString(name)\n\tdefer C.free(unsafe.Pointer(cstr))\n\n\tC.glBindAttribLocation(program.id, attrib.Index(), (*C.GLchar)(cstr))\n}\n\nfunc (program *Program) Delete() {\n\tC.glDeleteProgram(program.id)\n}\n\nfunc (shader Shader) Delete() {\n\tC.glDeleteShader(C.GLuint(shader))\n}\n\nfunc (program *Program) DetachShader(shader Shader) {\n\tC.glDetachShader(program.id, C.GLuint(shader))\n\tprogram.shadersValid = false\n}\n\n\/\/ func (program *Program) GetActiveAttrib(index uint) (size int, atype int, name string) {\n\n\/\/ \tnameBuf := make([]byte, MAX_ATTRIBUTE_LENGTH)\n\/\/ \tsizeBuf := C.GLint(0)\n\/\/ \ttypeBuf := C.GLenum(0)\n\n\/\/ \tC.glGetActiveAttrib(program.id, C.GLuint(index), C.GLsizei(len(nameBuf)), C.NULL, &sizeBuf, &typeBuf, (*C.GLchar)(&nameBuf[0]))\n\n\/\/ \tsize = int(sizeBuf)\n\/\/ \tatype = int(typeBuf)\n\/\/ \tname = string(nameBuf)\n\/\/ }\n\n\/\/ func (program *Program) GetActiveUniform(index uint) (size int, utype int, name string) {\n\n\/\/ \tnameBuf := make([]byte, MAX_UNIFORM_LENGTH)\n\/\/ \tsizeBuf := C.GLint(0)\n\/\/ \ttypeBuf := C.GLenum(0)\n\n\/\/ \tC.glGetActiveUniform(program.id, C.GLuint(index), C.GLsizei(len(nameBuf)), C.NULL, &sizeBuf, &typeBuf, (*C.GLchar)(&nameBuf[0]))\n\n\/\/ \tsize = int(sizeBuf)\n\/\/ \tutype = int(typeBuf)\n\/\/ \tname = string(nameBuf)\n\/\/ }\n\n\/\/ func (program *Program) GetAttachedShaders(GLsizei maxcount, GLsizei* count) []uint {\n\/\/ \tC.glGetAttachedShaders(program.id, GLsizei maxcount, GLsizei* count, GLuint* shaders)\n\/\/ }\n\n\/\/ func (program *Program) GetProgramiv(pname int, params int) {\n\/\/ \tC.glGetProgramiv(program.id, GLenum pname, GLint* params)\n\/\/ }\n\n\/\/ func (program *Program) InfoLog(GLsizei bufsize, GLsizei* length, GLchar* infolog) {\n\/\/ \tC.glGetProgramInfoLog(program.id, GLsizei bufsize, GLsizei* length, GLchar* infolog)\n\/\/ }\n\n\/\/ func GetShaderiv(shader Shader, pname int, params int) {\n\/\/ \tC.glGetShaderiv(C.GLuint(shader), GLenum pname, GLint* params)\n\/\/ }\n\n\/\/ func GetShaderInfoLog(shader Shader, GLsizei bufsize, GLsizei* length, GLchar* infolog) {\n\/\/ \tC.glGetShaderInfoLog(C.GLuint(shader), GLsizei bufsize, GLsizei* length, GLchar* infolog)\n\/\/ }\n\n\/\/ func GetShaderPrecisionFormat(shadertype ShaderType, precisiontype int, GLint* range, precision int) {\n\/\/ \tC.glGetShaderPrecisionFormat(GLenum shadertype, GLenum precisiontype, GLint* range, GLint* precision)\n\/\/ }\n\n\/\/ func GetShaderSource(shader Shader, GLsizei bufsize, GLsizei* length, GLchar* source) {\n\/\/ \tC.glGetShaderSource(C.GLuint(shader), GLsizei bufsize, GLsizei* length, GLchar* source)\n\/\/ }\n\n\/\/ func (program *Program) GetUniformfv(location int, GLfloat* params) {\n\/\/ \tC.glGetUniformfv(program.id, GLint location, GLfloat* params)\n\/\/ }\n\n\/\/ func (program *Program) GetUniformiv(GLint location, params int) {\n\/\/ \tC.glGetUniformiv(program.id, GLint location, GLint* params)\n\/\/ }\n\n\/\/ GL_APICALL int GL_APIENTRY glGetAttribLocation (GLuint program, const GLchar* name);\n\/\/ GL_APICALL int GL_APIENTRY glGetUniformLocation (GLuint program, const GLchar* name);\n\n\/\/ func ShaderBinary(GLsizei n, shaders []uint, binaryformat int, const GLvoid* binary, GLsizei length) {\n\/\/ \tC.glShaderBinary(GLsizei n, const GLuint* shaders, GLenum binaryformat, const GLvoid* binary, GLsizei length)\n\/\/ }\n\nfunc GetShaderBinaryFormats() (formats []ShaderBinaryFormat) {\n\tnumFormats := C.GLint(0)\n\tC.glGetIntegerv(C.GL_NUM_SHADER_BINARY_FORMATS, &numFormats)\n\tif numFormats > 0 {\n\t\tformats = make([]ShaderBinaryFormat, numFormats)\n\t\tC.glGetIntegerv(C.GL_SHADER_BINARY_FORMATS, (*C.GLint)(&formats[0]))\n\t}\n\treturn\n}\n\nfunc ShaderBinary(shaders []Shader, format ShaderBinaryFormat, binary []byte) {\n\tC.glShaderBinary(C.GLsizei(len(shaders)), (*C.GLuint)(&shaders[0]), C.GLenum(format), unsafe.Pointer(&binary[0]), C.GLsizei(len(binary)))\n}\n\nfunc (shader Shader) Source(src string) {\n\tcstr := (*C.GLchar)(C.CString(src))\n\tdefer C.free(unsafe.Pointer(cstr))\n\tslen := C.GLint(len(src))\n\tC.glShaderSource(C.GLuint(shader), 1, &cstr, &slen)\n}\n\nfunc (shader Shader) Compile() {\n\tC.glCompileShader(C.GLuint(shader))\n\n\tstatus := C.GLint(0)\n\tC.glGetShaderiv(C.GLuint(shader), C.GL_COMPILE_STATUS, &status)\n\n\tif status != C.GL_TRUE {\n\t\tloglen := C.GLint(0)\n\t\tC.glGetShaderiv(C.GLuint(shader), C.GL_INFO_LOG_LENGTH, &loglen)\n\t\tlog := (*C.GLchar)(C.malloc(C.size_t(loglen)))\n\t\tdefer C.free(unsafe.Pointer(log))\n\t\tC.glGetShaderInfoLog(C.GLuint(shader), C.GLsizei(loglen), nil, log)\n\t\tpanic(fmt.Errorf(\"Failed to compile shader: %s\", C.GoString((*C.char)(log))))\n\t}\n}\n\nfunc (program *Program) Link() {\n\tC.glLinkProgram(program.id)\n\n\tstatus := C.GLint(0)\n\tC.glGetProgramiv(program.id, C.GL_LINK_STATUS, &status)\n\n\tif status != C.GL_TRUE {\n\t\tloglen := C.GLint(0)\n\t\tC.glGetProgramiv(program.id, C.GL_INFO_LOG_LENGTH, &loglen)\n\t\tlog := (*C.GLchar)(C.malloc(C.size_t(loglen)))\n\t\tdefer C.free(unsafe.Pointer(log))\n\t\tC.glGetProgramInfoLog(program.id, C.GLsizei(loglen), nil, log)\n\t\tpanic(fmt.Errorf(\"Failed to link shader: %s\", C.GoString((*C.char)(log))))\n\t}\n}\n\nfunc (program *Program) Use() {\n\tC.glUseProgram(program.id)\n}\n\nfunc (program *Program) Validate() {\n\tC.glValidateProgram(program.id)\n}\n\nfunc ReleaseShaderCompiler() {\n\tC.glReleaseShaderCompiler()\n}\n\nfunc (t ShaderType) String() string {\n\tswitch t {\n\tcase VERTEX_SHADER:\n\t\treturn \"VERTEX_SHADER\"\n\tcase FRAGMENT_SHADER:\n\t\treturn \"FRAGMENT_SHADER\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"ShaderType(%#x)\", t)\n\t}\n}\n\nfunc (program *Program) BindVariables(s interface{}) {\n\n\tut := reflect.TypeOf((*Uniform)(nil)).Elem()\n\tat := reflect.TypeOf((*VertexAttrib)(nil)).Elem()\n\n\tv := reflect.ValueOf(s).Elem()\n\tt := v.Type()\n\n\tn := t.NumField()\n\n\tfor i := 0; i < n; i++ {\n\t\tft := t.Field(i)\n\t\tname := ft.Tag.Get(\"glsl\")\n\t\tif len(name) > 0 {\n\t\t\tfv := v.Field(i)\n\t\t\tif fv.Type().Implements(ut) {\n\t\t\t\tfv.Set(reflect.ValueOf(program.GetUniform(name)))\n\t\t\t} else if fv.Type().Implements(at) {\n\t\t\t\tfv.Set(reflect.ValueOf(program.GetAttrib(name)))\n\t\t\t} else {\n\t\t\t\tpanic(fmt.Errorf(\"invalid type of '%s': %s\", ft.Name, fv.Type().Name()))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (program *Program) GetAttrib(name string) VertexAttrib {\n\n\tcname := (*C.GLchar)(C.CString(name))\n\tdefer C.free(unsafe.Pointer(cname))\n\n\tiloc := C.glGetAttribLocation(program.id, cname)\n\tif iloc < 0 {\n\t\tpanic(fmt.Errorf(\"not an active attribute: %s\", name))\n\t}\n\n\tloc := C.GLuint(iloc)\n\n\tdatatype := C.GLenum(0)\n\tC.glGetActiveAttrib(program.id, C.GLuint(loc), 0, nil, nil, &datatype, nil)\n\n\tswitch datatype {\n\tcase C.GL_FLOAT:\n\t\treturn FloatAttrib{vattrib{loc}}\n\tcase C.GL_FLOAT_VEC2:\n\t\treturn Vec2Attrib{FloatAttrib{vattrib{loc}}}\n\tcase C.GL_FLOAT_VEC3:\n\t\treturn Vec3Attrib{FloatAttrib{vattrib{loc}}}\n\tcase C.GL_FLOAT_VEC4:\n\t\treturn Vec4Attrib{FloatAttrib{vattrib{loc}}}\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unsupported attribute type: %#x\", datatype))\n\t}\n}\n\nfunc (program *Program) GetUniform(name string) Uniform {\n\n\tcname := (*C.GLchar)(C.CString(name))\n\tdefer C.free(unsafe.Pointer(cname))\n\n\tloc := C.GLint(C.glGetUniformLocation(program.id, cname))\n\tif loc < 0 {\n\t\tpanic(fmt.Errorf(\"not an active uniform: %s\", name))\n\t}\n\n\tdatatype := C.GLenum(0)\n\tC.glGetActiveUniform(program.id, C.GLuint(loc), 0, nil, nil, &datatype, nil)\n\n\tswitch datatype {\n\tcase C.GL_FLOAT:\n\t\treturn Uniform1f{uniformBase{loc}}\n\tcase C.GL_FLOAT_VEC2:\n\t\treturn Uniform2f{uniformBase{loc}}\n\tcase C.GL_FLOAT_VEC3:\n\t\treturn Uniform3f{uniformBase{loc}}\n\tcase C.GL_FLOAT_VEC4:\n\t\treturn Uniform4f{uniformBase{loc}}\n\tcase C.GL_INT:\n\t\treturn Uniform1i{uniformBase{loc}}\n\tcase C.GL_INT_VEC2:\n\t\treturn Uniform2i{uniformBase{loc}}\n\tcase C.GL_INT_VEC3:\n\t\treturn Uniform3i{uniformBase{loc}}\n\tcase C.GL_INT_VEC4:\n\t\treturn Uniform4i{uniformBase{loc}}\n\tcase C.GL_BOOL:\n\t\treturn Uniform1i{uniformBase{loc}}\n\tcase C.GL_BOOL_VEC2:\n\t\treturn Uniform2i{uniformBase{loc}}\n\tcase C.GL_BOOL_VEC3:\n\t\treturn Uniform3i{uniformBase{loc}}\n\tcase C.GL_BOOL_VEC4:\n\t\treturn Uniform4i{uniformBase{loc}}\n\tcase C.GL_FLOAT_MAT2:\n\t\treturn UniformMatrix2f{uniformBase{loc}}\n\tcase C.GL_FLOAT_MAT3:\n\t\treturn UniformMatrix3f{uniformBase{loc}}\n\tcase C.GL_FLOAT_MAT4:\n\t\treturn UniformMatrix4f{uniformBase{loc}}\n\tcase C.GL_SAMPLER_2D:\n\t\treturn Uniform1i{uniformBase{loc}}\n\tcase C.GL_SAMPLER_CUBE:\n\t\treturn Uniform1i{uniformBase{loc}}\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unsupported uniform type: %#x\", datatype))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/globalaccelerator\"\n\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/schema\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/validation\"\n)\n\nfunc resourceAwsGlobalAcceleratorListener() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsGlobalAcceleratorListenerCreate,\n\t\tRead: resourceAwsGlobalAcceleratorListenerRead,\n\t\tUpdate: resourceAwsGlobalAcceleratorListenerUpdate,\n\t\tDelete: resourceAwsGlobalAcceleratorListenerDelete,\n\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: schema.ImportStatePassthrough,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"accelerator_arn\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"client_affinity\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: globalaccelerator.ClientAffinityNone,\n\t\t\t\tValidateFunc: validation.StringInSlice([]string{\n\t\t\t\t\tglobalaccelerator.ClientAffinityNone,\n\t\t\t\t\tglobalaccelerator.ClientAffinitySourceIp,\n\t\t\t\t}, false),\n\t\t\t},\n\t\t\t\"protocol\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tValidateFunc: validation.StringInSlice([]string{\n\t\t\t\t\tglobalaccelerator.ProtocolTcp,\n\t\t\t\t\tglobalaccelerator.ProtocolUdp,\n\t\t\t\t}, false),\n\t\t\t},\n\t\t\t\"port_range\": {\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tRequired: true,\n\t\t\t\tMinItems: 1,\n\t\t\t\tMaxItems: 10,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"from_port\": {\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tValidateFunc: validation.IntBetween(0, 65535),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"to_port\": {\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tValidateFunc: validation.IntBetween(0, 65535),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceAwsGlobalAcceleratorListenerCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).globalacceleratorconn\n\n\topts := &globalaccelerator.CreateListenerInput{\n\t\tAcceleratorArn: aws.String(d.Get(\"accelerator_arn\").(string)),\n\t\tClientAffinity: aws.String(d.Get(\"client_affinity\").(string)),\n\t\tIdempotencyToken: aws.String(resource.UniqueId()),\n\t\tProtocol: aws.String(d.Get(\"protocol\").(string)),\n\t\tPortRanges: resourceAwsGlobalAcceleratorListenerExpandPortRanges(d.Get(\"port_range\").(*schema.Set).List()),\n\t}\n\n\tlog.Printf(\"[DEBUG] Create Global Accelerator listener: %s\", opts)\n\n\tresp, err := conn.CreateListener(opts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating Global Accelerator listener: %s\", err)\n\t}\n\n\td.SetId(*resp.Listener.ListenerArn)\n\n\t\/\/ Creating a listener triggers the accelerator to change status to InPending\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{globalaccelerator.AcceleratorStatusInProgress},\n\t\tTarget: []string{globalaccelerator.AcceleratorStatusDeployed},\n\t\tRefresh: resourceAwsGlobalAcceleratorAcceleratorStateRefreshFunc(conn, d.Get(\"accelerator_arn\").(string)),\n\t\tTimeout: 5 * time.Minute,\n\t}\n\n\tlog.Printf(\"[DEBUG] Waiting for Global Accelerator listener (%s) availability\", d.Id())\n\t_, err = stateConf.WaitForState()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error waiting for Global Accelerator listener (%s) availability: %s\", d.Id(), err)\n\t}\n\n\treturn resourceAwsGlobalAcceleratorListenerRead(d, meta)\n}\n\nfunc resourceAwsGlobalAcceleratorListenerRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).globalacceleratorconn\n\n\tlistener, err := resourceAwsGlobalAcceleratorListenerRetrieve(conn, d.Id())\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error reading Global Accelerator listener: %s\", err)\n\t}\n\n\tif listener == nil {\n\t\tlog.Printf(\"[WARN] Global Accelerator listener (%s) not found, removing from state\", d.Id())\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\n\tacceleratorArn, err := resourceAwsGlobalAcceleratorListenerParseAcceleratorArn(d.Id())\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.Set(\"accelerator_arn\", acceleratorArn)\n\td.Set(\"client_affinity\", listener.ClientAffinity)\n\td.Set(\"protocol\", listener.Protocol)\n\tif err := d.Set(\"port_range\", resourceAwsGlobalAcceleratorListenerFlattenPortRanges(listener.PortRanges)); err != nil {\n\t\treturn fmt.Errorf(\"error setting port_range: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsGlobalAcceleratorListenerParseAcceleratorArn(listenerArn string) (string, error) {\n\tparts := strings.Split(listenerArn, \"\/\")\n\tif len(parts) < 4 {\n\t\treturn \"\", fmt.Errorf(\"Unable to parse accelerator ARN from %s\", listenerArn)\n\t}\n\treturn strings.Join(parts[0:2], \"\/\"), nil\n}\n\nfunc resourceAwsGlobalAcceleratorListenerExpandPortRanges(portRanges []interface{}) []*globalaccelerator.PortRange {\n\tout := make([]*globalaccelerator.PortRange, len(portRanges))\n\n\tfor i, raw := range portRanges {\n\t\tportRange := raw.(map[string]interface{})\n\t\tm := globalaccelerator.PortRange{}\n\n\t\tm.FromPort = aws.Int64(int64(portRange[\"from_port\"].(int)))\n\t\tm.ToPort = aws.Int64(int64(portRange[\"to_port\"].(int)))\n\n\t\tout[i] = &m\n\t}\n\n\treturn out\n}\n\nfunc resourceAwsGlobalAcceleratorListenerFlattenPortRanges(portRanges []*globalaccelerator.PortRange) []interface{} {\n\tout := make([]interface{}, len(portRanges))\n\n\tfor i, portRange := range portRanges {\n\t\tm := make(map[string]interface{})\n\n\t\tm[\"from_port\"] = aws.Int64Value(portRange.FromPort)\n\t\tm[\"to_port\"] = aws.Int64Value(portRange.ToPort)\n\n\t\tout[i] = m\n\t}\n\n\treturn out\n}\n\nfunc resourceAwsGlobalAcceleratorListenerRetrieve(conn *globalaccelerator.GlobalAccelerator, listenerArn string) (*globalaccelerator.Listener, error) {\n\tresp, err := conn.DescribeListener(&globalaccelerator.DescribeListenerInput{\n\t\tListenerArn: aws.String(listenerArn),\n\t})\n\n\tif err != nil {\n\t\tif isAWSErr(err, globalaccelerator.ErrCodeListenerNotFoundException, \"\") {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\treturn resp.Listener, nil\n}\n\nfunc resourceAwsGlobalAcceleratorListenerUpdate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).globalacceleratorconn\n\n\topts := &globalaccelerator.UpdateListenerInput{\n\t\tClientAffinity: aws.String(d.Get(\"client_affinity\").(string)),\n\t\tListenerArn: aws.String(d.Id()),\n\t\tProtocol: aws.String(d.Get(\"protocol\").(string)),\n\t\tPortRanges: resourceAwsGlobalAcceleratorListenerExpandPortRanges(d.Get(\"port_range\").(*schema.Set).List()),\n\t}\n\n\tlog.Printf(\"[DEBUG] Update Global Accelerator listener: %s\", opts)\n\n\t_, err := conn.UpdateListener(opts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error updating Global Accelerator listener: %s\", err)\n\t}\n\n\t\/\/ Creating a listener triggers the accelerator to change status to InPending\n\terr = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, d.Get(\"accelerator_arn\").(string))\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn resourceAwsGlobalAcceleratorListenerRead(d, meta)\n}\n\nfunc resourceAwsGlobalAcceleratorListenerDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).globalacceleratorconn\n\n\topts := &globalaccelerator.DeleteListenerInput{\n\t\tListenerArn: aws.String(d.Id()),\n\t}\n\n\t_, err := conn.DeleteListener(opts)\n\tif err != nil {\n\t\tif isAWSErr(err, globalaccelerator.ErrCodeListenerNotFoundException, \"\") {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"Error deleting Global Accelerator listener: %s\", err)\n\t}\n\n\t\/\/ Deleting a listener triggers the accelerator to change status to InPending\n\t\/\/ }\n\terr = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, d.Get(\"accelerator_arn\").(string))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>Returns error where expected<commit_after>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/globalaccelerator\"\n\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/schema\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/validation\"\n)\n\nfunc resourceAwsGlobalAcceleratorListener() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsGlobalAcceleratorListenerCreate,\n\t\tRead: resourceAwsGlobalAcceleratorListenerRead,\n\t\tUpdate: resourceAwsGlobalAcceleratorListenerUpdate,\n\t\tDelete: resourceAwsGlobalAcceleratorListenerDelete,\n\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: schema.ImportStatePassthrough,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"accelerator_arn\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"client_affinity\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: globalaccelerator.ClientAffinityNone,\n\t\t\t\tValidateFunc: validation.StringInSlice([]string{\n\t\t\t\t\tglobalaccelerator.ClientAffinityNone,\n\t\t\t\t\tglobalaccelerator.ClientAffinitySourceIp,\n\t\t\t\t}, false),\n\t\t\t},\n\t\t\t\"protocol\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tValidateFunc: validation.StringInSlice([]string{\n\t\t\t\t\tglobalaccelerator.ProtocolTcp,\n\t\t\t\t\tglobalaccelerator.ProtocolUdp,\n\t\t\t\t}, false),\n\t\t\t},\n\t\t\t\"port_range\": {\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tRequired: true,\n\t\t\t\tMinItems: 1,\n\t\t\t\tMaxItems: 10,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"from_port\": {\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tValidateFunc: validation.IntBetween(0, 65535),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"to_port\": {\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tValidateFunc: validation.IntBetween(0, 65535),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceAwsGlobalAcceleratorListenerCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).globalacceleratorconn\n\n\topts := &globalaccelerator.CreateListenerInput{\n\t\tAcceleratorArn: aws.String(d.Get(\"accelerator_arn\").(string)),\n\t\tClientAffinity: aws.String(d.Get(\"client_affinity\").(string)),\n\t\tIdempotencyToken: aws.String(resource.UniqueId()),\n\t\tProtocol: aws.String(d.Get(\"protocol\").(string)),\n\t\tPortRanges: resourceAwsGlobalAcceleratorListenerExpandPortRanges(d.Get(\"port_range\").(*schema.Set).List()),\n\t}\n\n\tlog.Printf(\"[DEBUG] Create Global Accelerator listener: %s\", opts)\n\n\tresp, err := conn.CreateListener(opts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating Global Accelerator listener: %s\", err)\n\t}\n\n\td.SetId(*resp.Listener.ListenerArn)\n\n\t\/\/ Creating a listener triggers the accelerator to change status to InPending\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{globalaccelerator.AcceleratorStatusInProgress},\n\t\tTarget: []string{globalaccelerator.AcceleratorStatusDeployed},\n\t\tRefresh: resourceAwsGlobalAcceleratorAcceleratorStateRefreshFunc(conn, d.Get(\"accelerator_arn\").(string)),\n\t\tTimeout: 5 * time.Minute,\n\t}\n\n\tlog.Printf(\"[DEBUG] Waiting for Global Accelerator listener (%s) availability\", d.Id())\n\t_, err = stateConf.WaitForState()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error waiting for Global Accelerator listener (%s) availability: %s\", d.Id(), err)\n\t}\n\n\treturn resourceAwsGlobalAcceleratorListenerRead(d, meta)\n}\n\nfunc resourceAwsGlobalAcceleratorListenerRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).globalacceleratorconn\n\n\tlistener, err := resourceAwsGlobalAcceleratorListenerRetrieve(conn, d.Id())\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error reading Global Accelerator listener: %s\", err)\n\t}\n\n\tif listener == nil {\n\t\tlog.Printf(\"[WARN] Global Accelerator listener (%s) not found, removing from state\", d.Id())\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\n\tacceleratorArn, err := resourceAwsGlobalAcceleratorListenerParseAcceleratorArn(d.Id())\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.Set(\"accelerator_arn\", acceleratorArn)\n\td.Set(\"client_affinity\", listener.ClientAffinity)\n\td.Set(\"protocol\", listener.Protocol)\n\tif err := d.Set(\"port_range\", resourceAwsGlobalAcceleratorListenerFlattenPortRanges(listener.PortRanges)); err != nil {\n\t\treturn fmt.Errorf(\"error setting port_range: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsGlobalAcceleratorListenerParseAcceleratorArn(listenerArn string) (string, error) {\n\tparts := strings.Split(listenerArn, \"\/\")\n\tif len(parts) < 4 {\n\t\treturn \"\", fmt.Errorf(\"Unable to parse accelerator ARN from %s\", listenerArn)\n\t}\n\treturn strings.Join(parts[0:2], \"\/\"), nil\n}\n\nfunc resourceAwsGlobalAcceleratorListenerExpandPortRanges(portRanges []interface{}) []*globalaccelerator.PortRange {\n\tout := make([]*globalaccelerator.PortRange, len(portRanges))\n\n\tfor i, raw := range portRanges {\n\t\tportRange := raw.(map[string]interface{})\n\t\tm := globalaccelerator.PortRange{}\n\n\t\tm.FromPort = aws.Int64(int64(portRange[\"from_port\"].(int)))\n\t\tm.ToPort = aws.Int64(int64(portRange[\"to_port\"].(int)))\n\n\t\tout[i] = &m\n\t}\n\n\treturn out\n}\n\nfunc resourceAwsGlobalAcceleratorListenerFlattenPortRanges(portRanges []*globalaccelerator.PortRange) []interface{} {\n\tout := make([]interface{}, len(portRanges))\n\n\tfor i, portRange := range portRanges {\n\t\tm := make(map[string]interface{})\n\n\t\tm[\"from_port\"] = aws.Int64Value(portRange.FromPort)\n\t\tm[\"to_port\"] = aws.Int64Value(portRange.ToPort)\n\n\t\tout[i] = m\n\t}\n\n\treturn out\n}\n\nfunc resourceAwsGlobalAcceleratorListenerRetrieve(conn *globalaccelerator.GlobalAccelerator, listenerArn string) (*globalaccelerator.Listener, error) {\n\tresp, err := conn.DescribeListener(&globalaccelerator.DescribeListenerInput{\n\t\tListenerArn: aws.String(listenerArn),\n\t})\n\n\tif err != nil {\n\t\tif isAWSErr(err, globalaccelerator.ErrCodeListenerNotFoundException, \"\") {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\treturn resp.Listener, nil\n}\n\nfunc resourceAwsGlobalAcceleratorListenerUpdate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).globalacceleratorconn\n\n\topts := &globalaccelerator.UpdateListenerInput{\n\t\tClientAffinity: aws.String(d.Get(\"client_affinity\").(string)),\n\t\tListenerArn: aws.String(d.Id()),\n\t\tProtocol: aws.String(d.Get(\"protocol\").(string)),\n\t\tPortRanges: resourceAwsGlobalAcceleratorListenerExpandPortRanges(d.Get(\"port_range\").(*schema.Set).List()),\n\t}\n\n\tlog.Printf(\"[DEBUG] Update Global Accelerator listener: %s\", opts)\n\n\t_, err := conn.UpdateListener(opts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error updating Global Accelerator listener: %s\", err)\n\t}\n\n\t\/\/ Creating a listener triggers the accelerator to change status to InPending\n\terr = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, d.Get(\"accelerator_arn\").(string))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn resourceAwsGlobalAcceleratorListenerRead(d, meta)\n}\n\nfunc resourceAwsGlobalAcceleratorListenerDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).globalacceleratorconn\n\n\topts := &globalaccelerator.DeleteListenerInput{\n\t\tListenerArn: aws.String(d.Id()),\n\t}\n\n\t_, err := conn.DeleteListener(opts)\n\tif err != nil {\n\t\tif isAWSErr(err, globalaccelerator.ErrCodeListenerNotFoundException, \"\") {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"Error deleting Global Accelerator listener: %s\", err)\n\t}\n\n\t\/\/ Deleting a listener triggers the accelerator to change status to InPending\n\t\/\/ }\n\terr = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, d.Get(\"accelerator_arn\").(string))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage client\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\t\"github.com\/keybase\/client\/go\/protocol\/chat1\"\n\tgregor1 \"github.com\/keybase\/client\/go\/protocol\/gregor1\"\n)\n\ntype ChatUI struct {\n\tlibkb.Contextified\n\tterminal libkb.TerminalUI\n\tnoOutput bool\n\tlastPercentReported int\n}\n\nfunc (c *ChatUI) ChatAttachmentUploadOutboxID(context.Context, chat1.ChatAttachmentUploadOutboxIDArg) error {\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatAttachmentUploadStart(context.Context, chat1.ChatAttachmentUploadStartArg) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tw := c.terminal.ErrorWriter()\n\tfmt.Fprintf(w, \"Attachment upload \"+ColorString(c.G(), \"green\", \"starting\")+\"\\n\")\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatAttachmentUploadProgress(ctx context.Context, arg chat1.ChatAttachmentUploadProgressArg) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tpercent := int((100 * arg.BytesComplete) \/ arg.BytesTotal)\n\tif c.lastPercentReported == 0 || percent == 100 || percent-c.lastPercentReported >= 10 {\n\t\tw := c.terminal.ErrorWriter()\n\t\tfmt.Fprintf(w, \"Attachment upload progress %d%% (%d of %d bytes uploaded)\\n\", percent, arg.BytesComplete, arg.BytesTotal)\n\t\tc.lastPercentReported = percent\n\t}\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatAttachmentUploadDone(context.Context, int) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tw := c.terminal.ErrorWriter()\n\tfmt.Fprintf(w, \"Attachment upload \"+ColorString(c.G(), \"magenta\", \"finished\")+\"\\n\")\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatAttachmentPreviewUploadStart(context.Context, chat1.ChatAttachmentPreviewUploadStartArg) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tw := c.terminal.ErrorWriter()\n\tfmt.Fprintf(w, \"Attachment preview upload \"+ColorString(c.G(), \"green\", \"starting\")+\"\\n\")\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatAttachmentPreviewUploadDone(context.Context, int) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tw := c.terminal.ErrorWriter()\n\tfmt.Fprintf(w, \"Attachment preview upload \"+ColorString(c.G(), \"magenta\", \"finished\")+\"\\n\")\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatAttachmentDownloadStart(context.Context, int) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tw := c.terminal.ErrorWriter()\n\tfmt.Fprintf(w, \"Attachment download \"+ColorString(c.G(), \"green\", \"starting\")+\"\\n\")\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatAttachmentDownloadProgress(ctx context.Context, arg chat1.ChatAttachmentDownloadProgressArg) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tpercent := int((100 * arg.BytesComplete) \/ arg.BytesTotal)\n\tif c.lastPercentReported == 0 || percent == 100 || percent-c.lastPercentReported >= 10 {\n\t\tw := c.terminal.ErrorWriter()\n\t\tfmt.Fprintf(w, \"Attachment download progress %d%% (%d of %d bytes downloaded)\\n\", percent, arg.BytesComplete, arg.BytesTotal)\n\t\tc.lastPercentReported = percent\n\t}\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatAttachmentDownloadDone(context.Context, int) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tw := c.terminal.ErrorWriter()\n\tfmt.Fprintf(w, \"Attachment download \"+ColorString(c.G(), \"magenta\", \"finished\")+\"\\n\")\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatInboxConversation(ctx context.Context, arg chat1.ChatInboxConversationArg) error {\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatInboxFailed(ctx context.Context, arg chat1.ChatInboxFailedArg) error {\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatInboxUnverified(ctx context.Context, arg chat1.ChatInboxUnverifiedArg) error {\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatThreadCached(ctx context.Context, arg chat1.ChatThreadCachedArg) error {\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatThreadFull(ctx context.Context, arg chat1.ChatThreadFullArg) error {\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatConfirmChannelDelete(ctx context.Context, arg chat1.ChatConfirmChannelDeleteArg) (bool, error) {\n\tterm := c.G().UI.GetTerminalUI()\n\tterm.Printf(\"WARNING: This will destroy this chat channel and remove it from all members' inbox\\n\\n\")\n\tconfirm := fmt.Sprintf(\"nuke %s\", arg.Channel)\n\tresponse, err := term.Prompt(PromptDescriptorDeleteRootTeam,\n\t\tfmt.Sprintf(\"** if you are sure, please type: %q > \", confirm))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn strings.TrimSpace(response) == confirm, nil\n}\n\nfunc (c *ChatUI) ChatSearchHit(ctx context.Context, arg chat1.ChatSearchHitArg) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tsearchHit := arg.SearchHit\n\tgetMsgPrefix := func(uiMsg *chat1.UIMessage) string {\n\t\tm := uiMsg.Valid()\n\t\tt := gregor1.FromTime(m.Ctime)\n\t\treturn fmt.Sprintf(\"[%s %s] \", m.SenderUsername, shortDurationFromNow(t))\n\t}\n\n\tgetContext := func(uiMsg *chat1.UIMessage) string {\n\t\tif uiMsg != nil && uiMsg.IsValid() && uiMsg.GetMessageType() == chat1.MessageType_TEXT {\n\t\t\tmsgBody := uiMsg.Valid().MessageBody.Text().Body\n\t\t\treturn getMsgPrefix(uiMsg) + msgBody + \"\\n\"\n\t\t}\n\t\treturn \"\"\n\t}\n\n\thighlightHits := func(uiMsg *chat1.UIMessage, hits []string) string {\n\t\tif uiMsg != nil && uiMsg.IsValid() && uiMsg.GetMessageType() == chat1.MessageType_TEXT {\n\t\t\tmsgBody := uiMsg.Valid().MessageBody.Text().Body\n\t\t\tvar hitText string\n\t\t\tfor _, hit := range hits {\n\t\t\t\thitText = strings.Replace(msgBody, hit, ColorString(c.G(), \"red\", hit), -1)\n\t\t\t}\n\t\t\treturn getMsgPrefix(uiMsg) + hitText\n\t\t}\n\t\treturn \"\"\n\t}\n\n\t\/\/ TODO: This should really use chat_cli_rendering.messageView, but we need\n\t\/\/ to refactor for UIMessage\n\thitText := highlightHits(searchHit.HitMessage, searchHit.Matches)\n\tif hitText != \"\" {\n\t\tw := c.terminal.ErrorWriter()\n\t\tfmt.Fprintf(w, getContext(searchHit.PrevMessage))\n\t\tfmt.Fprintln(w, hitText)\n\t\tfmt.Fprintf(w, getContext(searchHit.NextMessage))\n\t\tfmt.Fprintln(w, \"\")\n\t}\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatSearchDone(ctx context.Context, arg chat1.ChatSearchDoneArg) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tw := c.terminal.ErrorWriter()\n\tfmt.Fprintf(w, \"Search complete. Found %d results.\", arg.NumHits)\n\tfmt.Fprintln(w, \"\")\n\treturn nil\n}\n<commit_msg>Print chat search hits to stdout (#11739)<commit_after>\/\/ Copyright 2016 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage client\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\t\"github.com\/keybase\/client\/go\/protocol\/chat1\"\n\tgregor1 \"github.com\/keybase\/client\/go\/protocol\/gregor1\"\n)\n\ntype ChatUI struct {\n\tlibkb.Contextified\n\tterminal libkb.TerminalUI\n\tnoOutput bool\n\tlastPercentReported int\n}\n\nfunc (c *ChatUI) ChatAttachmentUploadOutboxID(context.Context, chat1.ChatAttachmentUploadOutboxIDArg) error {\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatAttachmentUploadStart(context.Context, chat1.ChatAttachmentUploadStartArg) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tw := c.terminal.ErrorWriter()\n\tfmt.Fprintf(w, \"Attachment upload \"+ColorString(c.G(), \"green\", \"starting\")+\"\\n\")\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatAttachmentUploadProgress(ctx context.Context, arg chat1.ChatAttachmentUploadProgressArg) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tpercent := int((100 * arg.BytesComplete) \/ arg.BytesTotal)\n\tif c.lastPercentReported == 0 || percent == 100 || percent-c.lastPercentReported >= 10 {\n\t\tw := c.terminal.ErrorWriter()\n\t\tfmt.Fprintf(w, \"Attachment upload progress %d%% (%d of %d bytes uploaded)\\n\", percent, arg.BytesComplete, arg.BytesTotal)\n\t\tc.lastPercentReported = percent\n\t}\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatAttachmentUploadDone(context.Context, int) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tw := c.terminal.ErrorWriter()\n\tfmt.Fprintf(w, \"Attachment upload \"+ColorString(c.G(), \"magenta\", \"finished\")+\"\\n\")\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatAttachmentPreviewUploadStart(context.Context, chat1.ChatAttachmentPreviewUploadStartArg) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tw := c.terminal.ErrorWriter()\n\tfmt.Fprintf(w, \"Attachment preview upload \"+ColorString(c.G(), \"green\", \"starting\")+\"\\n\")\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatAttachmentPreviewUploadDone(context.Context, int) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tw := c.terminal.ErrorWriter()\n\tfmt.Fprintf(w, \"Attachment preview upload \"+ColorString(c.G(), \"magenta\", \"finished\")+\"\\n\")\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatAttachmentDownloadStart(context.Context, int) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tw := c.terminal.ErrorWriter()\n\tfmt.Fprintf(w, \"Attachment download \"+ColorString(c.G(), \"green\", \"starting\")+\"\\n\")\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatAttachmentDownloadProgress(ctx context.Context, arg chat1.ChatAttachmentDownloadProgressArg) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tpercent := int((100 * arg.BytesComplete) \/ arg.BytesTotal)\n\tif c.lastPercentReported == 0 || percent == 100 || percent-c.lastPercentReported >= 10 {\n\t\tw := c.terminal.ErrorWriter()\n\t\tfmt.Fprintf(w, \"Attachment download progress %d%% (%d of %d bytes downloaded)\\n\", percent, arg.BytesComplete, arg.BytesTotal)\n\t\tc.lastPercentReported = percent\n\t}\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatAttachmentDownloadDone(context.Context, int) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tw := c.terminal.ErrorWriter()\n\tfmt.Fprintf(w, \"Attachment download \"+ColorString(c.G(), \"magenta\", \"finished\")+\"\\n\")\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatInboxConversation(ctx context.Context, arg chat1.ChatInboxConversationArg) error {\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatInboxFailed(ctx context.Context, arg chat1.ChatInboxFailedArg) error {\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatInboxUnverified(ctx context.Context, arg chat1.ChatInboxUnverifiedArg) error {\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatThreadCached(ctx context.Context, arg chat1.ChatThreadCachedArg) error {\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatThreadFull(ctx context.Context, arg chat1.ChatThreadFullArg) error {\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatConfirmChannelDelete(ctx context.Context, arg chat1.ChatConfirmChannelDeleteArg) (bool, error) {\n\tterm := c.G().UI.GetTerminalUI()\n\tterm.Printf(\"WARNING: This will destroy this chat channel and remove it from all members' inbox\\n\\n\")\n\tconfirm := fmt.Sprintf(\"nuke %s\", arg.Channel)\n\tresponse, err := term.Prompt(PromptDescriptorDeleteRootTeam,\n\t\tfmt.Sprintf(\"** if you are sure, please type: %q > \", confirm))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn strings.TrimSpace(response) == confirm, nil\n}\n\nfunc (c *ChatUI) ChatSearchHit(ctx context.Context, arg chat1.ChatSearchHitArg) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tsearchHit := arg.SearchHit\n\tgetMsgPrefix := func(uiMsg *chat1.UIMessage) string {\n\t\tm := uiMsg.Valid()\n\t\tt := gregor1.FromTime(m.Ctime)\n\t\treturn fmt.Sprintf(\"[%s %s] \", m.SenderUsername, shortDurationFromNow(t))\n\t}\n\n\tgetContext := func(uiMsg *chat1.UIMessage) string {\n\t\tif uiMsg != nil && uiMsg.IsValid() && uiMsg.GetMessageType() == chat1.MessageType_TEXT {\n\t\t\tmsgBody := uiMsg.Valid().MessageBody.Text().Body\n\t\t\treturn getMsgPrefix(uiMsg) + msgBody + \"\\n\"\n\t\t}\n\t\treturn \"\"\n\t}\n\n\thighlightHits := func(uiMsg *chat1.UIMessage, hits []string) string {\n\t\tif uiMsg != nil && uiMsg.IsValid() && uiMsg.GetMessageType() == chat1.MessageType_TEXT {\n\t\t\tmsgBody := uiMsg.Valid().MessageBody.Text().Body\n\t\t\tvar hitText string\n\t\t\tfor _, hit := range hits {\n\t\t\t\thitText = strings.Replace(msgBody, hit, ColorString(c.G(), \"red\", hit), -1)\n\t\t\t}\n\t\t\treturn getMsgPrefix(uiMsg) + hitText\n\t\t}\n\t\treturn \"\"\n\t}\n\n\t\/\/ TODO: This should really use chat_cli_rendering.messageView, but we need\n\t\/\/ to refactor for UIMessage\n\thitText := highlightHits(searchHit.HitMessage, searchHit.Matches)\n\tif hitText != \"\" {\n\t\tw := c.terminal.OutputWriter()\n\t\tfmt.Fprintf(w, getContext(searchHit.PrevMessage))\n\t\tfmt.Fprintln(w, hitText)\n\t\tfmt.Fprintf(w, getContext(searchHit.NextMessage))\n\t\tfmt.Fprintln(w, \"\")\n\t}\n\treturn nil\n}\n\nfunc (c *ChatUI) ChatSearchDone(ctx context.Context, arg chat1.ChatSearchDoneArg) error {\n\tif c.noOutput {\n\t\treturn nil\n\t}\n\tw := c.terminal.ErrorWriter()\n\tfmt.Fprintf(w, \"Search complete. Found %d results.\", arg.NumHits)\n\tfmt.Fprintln(w, \"\")\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\npackage keyset\n\nimport (\n\t\"io\"\n\t\"io\/ioutil\"\n\n\t\"google.golang.org\/protobuf\/encoding\/protojson\"\n\t\"google.golang.org\/protobuf\/proto\"\n\n\ttinkpb \"github.com\/google\/tink\/go\/proto\/tink_go_proto\"\n)\n\n\/\/ JSONReader deserializes a keyset from json format.\ntype JSONReader struct {\n\tr io.Reader\n\tj *protojson.UnmarshalOptions\n}\n\n\/\/ NewJSONReader returns new JSONReader that will read from r.\nfunc NewJSONReader(r io.Reader) *JSONReader {\n\treturn &JSONReader{\n\t\tr: r,\n\t\tj: &protojson.UnmarshalOptions{},\n\t}\n}\n\n\/\/ Read parses a (cleartext) keyset from the underlying io.Reader.\nfunc (bkr *JSONReader) Read() (*tinkpb.Keyset, error) {\n\tkeyset := &tinkpb.Keyset{}\n\n\tif err := bkr.readJSON(bkr.r, keyset); err != nil {\n\t\treturn nil, err\n\t}\n\treturn keyset, nil\n}\n\n\/\/ ReadEncrypted parses an EncryptedKeyset from the underlying io.Reader.\nfunc (bkr *JSONReader) ReadEncrypted() (*tinkpb.EncryptedKeyset, error) {\n\tkeyset := &tinkpb.EncryptedKeyset{}\n\n\tif err := bkr.readJSON(bkr.r, keyset); err != nil {\n\t\treturn nil, err\n\t}\n\treturn keyset, nil\n}\n\nfunc (bkr *JSONReader) readJSON(r io.Reader, msg proto.Message) error {\n\tb, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn bkr.j.Unmarshal(b, msg)\n}\n\n\/\/ JSONWriter serializes a keyset into binary proto format.\ntype JSONWriter struct {\n\tw io.Writer\n\tj *protojson.MarshalOptions\n}\n\n\/\/ NewJSONWriter returns a new JSONWriter that will write to w.\nfunc NewJSONWriter(w io.Writer) *JSONWriter {\n\treturn &JSONWriter{\n\t\tw: w,\n\t\tj: &protojson.MarshalOptions{\n\t\t\tEmitUnpopulated: true,\n\t\t\tIndent: \"\",\n\t\t},\n\t}\n}\n\n\/\/ Write writes the keyset to the underlying io.Writer.\nfunc (bkw *JSONWriter) Write(keyset *tinkpb.Keyset) error {\n\treturn bkw.writeJSON(bkw.w, keyset)\n}\n\n\/\/ WriteEncrypted writes the encrypted keyset to the underlying io.Writer.\nfunc (bkw *JSONWriter) WriteEncrypted(keyset *tinkpb.EncryptedKeyset) error {\n\treturn bkw.writeJSON(bkw.w, keyset)\n}\n\nfunc (bkw *JSONWriter) writeJSON(w io.Writer, msg proto.Message) error {\n\tb, err := bkw.j.Marshal(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(b)\n\treturn err\n}\n<commit_msg>Fix typo in JSONWriter's doc comment<commit_after>\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\npackage keyset\n\nimport (\n\t\"io\"\n\t\"io\/ioutil\"\n\n\t\"google.golang.org\/protobuf\/encoding\/protojson\"\n\t\"google.golang.org\/protobuf\/proto\"\n\n\ttinkpb \"github.com\/google\/tink\/go\/proto\/tink_go_proto\"\n)\n\n\/\/ JSONReader deserializes a keyset from json format.\ntype JSONReader struct {\n\tr io.Reader\n\tj *protojson.UnmarshalOptions\n}\n\n\/\/ NewJSONReader returns new JSONReader that will read from r.\nfunc NewJSONReader(r io.Reader) *JSONReader {\n\treturn &JSONReader{\n\t\tr: r,\n\t\tj: &protojson.UnmarshalOptions{},\n\t}\n}\n\n\/\/ Read parses a (cleartext) keyset from the underlying io.Reader.\nfunc (bkr *JSONReader) Read() (*tinkpb.Keyset, error) {\n\tkeyset := &tinkpb.Keyset{}\n\n\tif err := bkr.readJSON(bkr.r, keyset); err != nil {\n\t\treturn nil, err\n\t}\n\treturn keyset, nil\n}\n\n\/\/ ReadEncrypted parses an EncryptedKeyset from the underlying io.Reader.\nfunc (bkr *JSONReader) ReadEncrypted() (*tinkpb.EncryptedKeyset, error) {\n\tkeyset := &tinkpb.EncryptedKeyset{}\n\n\tif err := bkr.readJSON(bkr.r, keyset); err != nil {\n\t\treturn nil, err\n\t}\n\treturn keyset, nil\n}\n\nfunc (bkr *JSONReader) readJSON(r io.Reader, msg proto.Message) error {\n\tb, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn bkr.j.Unmarshal(b, msg)\n}\n\n\/\/ JSONWriter serializes a keyset into json format.\ntype JSONWriter struct {\n\tw io.Writer\n\tj *protojson.MarshalOptions\n}\n\n\/\/ NewJSONWriter returns a new JSONWriter that will write to w.\nfunc NewJSONWriter(w io.Writer) *JSONWriter {\n\treturn &JSONWriter{\n\t\tw: w,\n\t\tj: &protojson.MarshalOptions{\n\t\t\tEmitUnpopulated: true,\n\t\t\tIndent: \"\",\n\t\t},\n\t}\n}\n\n\/\/ Write writes the keyset to the underlying io.Writer.\nfunc (bkw *JSONWriter) Write(keyset *tinkpb.Keyset) error {\n\treturn bkw.writeJSON(bkw.w, keyset)\n}\n\n\/\/ WriteEncrypted writes the encrypted keyset to the underlying io.Writer.\nfunc (bkw *JSONWriter) WriteEncrypted(keyset *tinkpb.EncryptedKeyset) error {\n\treturn bkw.writeJSON(bkw.w, keyset)\n}\n\nfunc (bkw *JSONWriter) writeJSON(w io.Writer, msg proto.Message) error {\n\tb, err := bkw.j.Marshal(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(b)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package libkb\n\nimport (\n\t\"errors\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tkeybase1 \"github.com\/keybase\/client\/go\/protocol\/keybase1\"\n)\n\ntype Feature string\ntype FeatureFlags []Feature\n\nconst (\n\tEnvironmentFeatureAllowHighSkips = Feature(\"env_allow_high_skips\")\n\tEnvironmentFeatureMerkleCheckpoint = Feature(\"merkle_checkpoint\")\n)\n\n\/\/ StringToFeatureFlags returns a set of feature flags\nfunc StringToFeatureFlags(s string) (ret FeatureFlags) {\n\ts = strings.TrimSpace(s)\n\tif len(s) == 0 {\n\t\treturn ret\n\t}\n\tv := strings.Split(s, \",\")\n\tfor _, f := range v {\n\t\tret = append(ret, Feature(strings.TrimSpace(f)))\n\t}\n\treturn ret\n}\n\n\/\/ Admin returns true if the admin feature set is on or the user is a keybase\n\/\/ admin.\nfunc (set FeatureFlags) Admin(uid keybase1.UID) bool {\n\tfor _, f := range set {\n\t\tif f == Feature(\"admin\") {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn IsKeybaseAdmin(uid)\n}\n\nfunc (set FeatureFlags) HasFeature(feature Feature) bool {\n\tfor _, f := range set {\n\t\tif f == feature {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (set FeatureFlags) Empty() bool {\n\treturn len(set) == 0\n}\n\ntype featureSlot struct {\n\tsync.Mutex\n\ton bool\n\tcacheUntil time.Time\n}\n\n\/\/ FeatureFlagSet is a set of feature flags for a given user. It will keep track\n\/\/ of whether a feature is on or off, and how long until we should check to\n\/\/ update\ntype FeatureFlagSet struct {\n\tsync.Mutex\n\tfeatures map[Feature]*featureSlot\n}\n\nconst (\n\tFeatureFTL = Feature(\"ftl\")\n\tFeatureBoxAuditor = Feature(\"box_auditor3\")\n\tExperimentalGenericProofs = Feature(\"experimental_generic_proofs\")\n\tFeatureCheckForHiddenChainSupport = Feature(\"check_for_hidden_chain_support\")\n\n\t\/\/ Show journeycards. This 'preview' flag is for development and admin testing.\n\t\/\/ This 'preview' flag is known to clients with old buggy journeycard code. For that reason, don't enable it for external users.\n\tFeatureJourneycardPreview = Feature(\"journeycard_preview\")\n\tFeatureJourneycard = Feature(\"journeycard\")\n)\n\n\/\/ NewFeatureFlagSet makes a new set of feature flags.\nfunc NewFeatureFlagSet() *FeatureFlagSet {\n\treturn &FeatureFlagSet{\n\t\tfeatures: make(map[Feature]*featureSlot),\n\t}\n}\n\nfunc (s *FeatureFlagSet) getOrMakeSlot(f Feature) *featureSlot {\n\ts.Lock()\n\tdefer s.Unlock()\n\tret := s.features[f]\n\tif ret != nil {\n\t\treturn ret\n\t}\n\tret = &featureSlot{}\n\ts.features[f] = ret\n\treturn ret\n}\n\ntype rawFeatureSlot struct {\n\tValue bool `json:\"value\"`\n\tCacheSec int `json:\"cache_sec\"`\n}\n\ntype rawFeatures struct {\n\tStatus AppStatus `json:\"status\"`\n\tFeatures map[string]rawFeatureSlot `json:\"features\"`\n}\n\nfunc (r *rawFeatures) GetAppStatus() *AppStatus {\n\treturn &r.Status\n}\n\nfunc (f *featureSlot) readFrom(m MetaContext, r rawFeatureSlot) {\n\tf.on = r.Value\n\tf.cacheUntil = m.G().Clock().Now().Add(time.Duration(r.CacheSec) * time.Second)\n}\n\nfunc (s *FeatureFlagSet) InvalidateCache(m MetaContext, f Feature) {\n\tfeatureSlot := s.getOrMakeSlot(f)\n\tfeatureSlot.Lock()\n\tdefer featureSlot.Unlock()\n\tfeatureSlot.cacheUntil = m.G().Clock().Now().Add(time.Duration(-1) * time.Second)\n}\n\nfunc (s *FeatureFlagSet) EnableImmediately(m MetaContext, f Feature) error {\n\tif m.G().Env.GetRunMode() == ProductionRunMode {\n\t\treturn errors.New(\"EnableImmediately is a dev\/test-only path\")\n\t}\n\ts.InvalidateCache(m, f)\n\t_, err := m.G().API.Post(m, APIArg{\n\t\tEndpoint: \"test\/feature\",\n\t\tSessionType: APISessionTypeREQUIRED,\n\t\tArgs: HTTPArgs{\n\t\t\t\"feature\": S{Val: string(f)},\n\t\t\t\"value\": I{Val: 1},\n\t\t\t\"cache_sec\": I{Val: 100},\n\t\t},\n\t})\n\treturn err\n}\n\n\/\/ EnabledWithError returns if the given feature is enabled, it will return true if it's\n\/\/ enabled, and an error if one occurred.\nfunc (s *FeatureFlagSet) EnabledWithError(m MetaContext, f Feature) (on bool, err error) {\n\tm = m.WithLogTag(\"FEAT\")\n\tslot := s.getOrMakeSlot(f)\n\tslot.Lock()\n\tdefer slot.Unlock()\n\tif m.G().Clock().Now().Before(slot.cacheUntil) {\n\t\tm.Debug(\"Feature (cached) %q -> %v\", f, slot.on)\n\t\treturn slot.on, nil\n\t}\n\tvar raw rawFeatures\n\targ := NewAPIArg(\"user\/features\")\n\targ.SessionType = APISessionTypeREQUIRED\n\targ.Args = HTTPArgs{\n\t\t\"features\": S{Val: string(f)},\n\t}\n\terr = m.G().API.GetDecode(m, arg, &raw)\n\tswitch err.(type) {\n\tcase nil:\n\tcase LoginRequiredError:\n\t\t\/\/ No features for logged-out users\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, err\n\t}\n\tif err != nil {\n\t\treturn false, err\n\t}\n\trawFeature, ok := raw.Features[string(f)]\n\tif !ok {\n\t\tm.Info(\"Feature %q wasn't returned from server\", f)\n\t\treturn false, nil\n\t}\n\tslot.readFrom(m, rawFeature)\n\tm.Debug(\"Feature (fetched) %q -> %v (will cache for %ds)\", f, slot.on, rawFeature.CacheSec)\n\treturn slot.on, nil\n}\n\n\/\/ Enabled returns if the feature flag is enabled. It ignore errors and just acts\n\/\/ as if the feature is off.\nfunc (s *FeatureFlagSet) Enabled(m MetaContext, f Feature) (on bool) {\n\ton, err := s.EnabledWithError(m, f)\n\tif err != nil {\n\t\tm.Info(\"Error checking feature %q: %v\", f, err)\n\t\treturn false\n\t}\n\treturn on\n}\n\n\/\/ Clear clears out the cached feature flags, for instance if the user\n\/\/ is going to logout.\nfunc (s *FeatureFlagSet) Clear() {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.features = make(map[Feature]*featureSlot)\n}\n\n\/\/ FeatureFlagGate allows the server to disable certain features by replying with a\n\/\/ FEATURE_FLAG API status code, which is then translated into a FeatureFlagError.\n\/\/ We cache these errors for a given amount of time, so we're not spamming the\n\/\/ same attempt over and over again.\ntype FeatureFlagGate struct {\n\tsync.Mutex\n\tlastCheck time.Time\n\tlastError error\n\tfeature Feature\n\tcacheFor time.Duration\n}\n\n\/\/ NewFeatureFlagGate makes a gate for the given feature that will cache for the given\n\/\/ duration.\nfunc NewFeatureFlagGate(f Feature, d time.Duration) *FeatureFlagGate {\n\treturn &FeatureFlagGate{\n\t\tfeature: f,\n\t\tcacheFor: d,\n\t}\n}\n\n\/\/ DigestError should be called on the result of an API call. It will allow this gate\n\/\/ to digest the error and maybe set up its internal caching for when to retry this\n\/\/ feature.\nfunc (f *FeatureFlagGate) DigestError(m MetaContext, err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\tffe, ok := err.(FeatureFlagError)\n\tif !ok {\n\t\treturn\n\t}\n\tif ffe.Feature() != f.feature {\n\t\tm.Debug(\"Got feature flag error for wrong feature: %v\", err)\n\t\treturn\n\t}\n\n\tm.Debug(\"Server reports feature %q is flagged off\", f.feature)\n\n\tf.Lock()\n\tdefer f.Unlock()\n\tf.lastCheck = m.G().Clock().Now()\n\tf.lastError = err\n}\n\n\/\/ ErrorIfFlagged should be called to avoid a feature if it's recently\n\/\/ been feature-flagged \"off\" by the server. In that case, it will return\n\/\/ the error that was originally returned by the server.\nfunc (f *FeatureFlagGate) ErrorIfFlagged(m MetaContext) (err error) {\n\tf.Lock()\n\tdefer f.Unlock()\n\tif f.lastError == nil {\n\t\treturn nil\n\t}\n\tdiff := m.G().Clock().Now().Sub(f.lastCheck)\n\tif diff > f.cacheFor {\n\t\tm.Debug(\"Feature flag %q expired %d ago, let's give it another try\", f.feature, diff)\n\t\tf.lastError = nil\n\t\tf.lastCheck = time.Time{}\n\t}\n\treturn f.lastError\n}\n\nfunc (f *FeatureFlagGate) Clear() {\n\tf.Lock()\n\tdefer f.Unlock()\n\tf.lastError = nil\n}\n<commit_msg>downgrade log level (#22859)<commit_after>package libkb\n\nimport (\n\t\"errors\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tkeybase1 \"github.com\/keybase\/client\/go\/protocol\/keybase1\"\n)\n\ntype Feature string\ntype FeatureFlags []Feature\n\nconst (\n\tEnvironmentFeatureAllowHighSkips = Feature(\"env_allow_high_skips\")\n\tEnvironmentFeatureMerkleCheckpoint = Feature(\"merkle_checkpoint\")\n)\n\n\/\/ StringToFeatureFlags returns a set of feature flags\nfunc StringToFeatureFlags(s string) (ret FeatureFlags) {\n\ts = strings.TrimSpace(s)\n\tif len(s) == 0 {\n\t\treturn ret\n\t}\n\tv := strings.Split(s, \",\")\n\tfor _, f := range v {\n\t\tret = append(ret, Feature(strings.TrimSpace(f)))\n\t}\n\treturn ret\n}\n\n\/\/ Admin returns true if the admin feature set is on or the user is a keybase\n\/\/ admin.\nfunc (set FeatureFlags) Admin(uid keybase1.UID) bool {\n\tfor _, f := range set {\n\t\tif f == Feature(\"admin\") {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn IsKeybaseAdmin(uid)\n}\n\nfunc (set FeatureFlags) HasFeature(feature Feature) bool {\n\tfor _, f := range set {\n\t\tif f == feature {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (set FeatureFlags) Empty() bool {\n\treturn len(set) == 0\n}\n\ntype featureSlot struct {\n\tsync.Mutex\n\ton bool\n\tcacheUntil time.Time\n}\n\n\/\/ FeatureFlagSet is a set of feature flags for a given user. It will keep track\n\/\/ of whether a feature is on or off, and how long until we should check to\n\/\/ update\ntype FeatureFlagSet struct {\n\tsync.Mutex\n\tfeatures map[Feature]*featureSlot\n}\n\nconst (\n\tFeatureFTL = Feature(\"ftl\")\n\tFeatureBoxAuditor = Feature(\"box_auditor3\")\n\tExperimentalGenericProofs = Feature(\"experimental_generic_proofs\")\n\tFeatureCheckForHiddenChainSupport = Feature(\"check_for_hidden_chain_support\")\n\n\t\/\/ Show journeycards. This 'preview' flag is for development and admin testing.\n\t\/\/ This 'preview' flag is known to clients with old buggy journeycard code. For that reason, don't enable it for external users.\n\tFeatureJourneycardPreview = Feature(\"journeycard_preview\")\n\tFeatureJourneycard = Feature(\"journeycard\")\n)\n\n\/\/ NewFeatureFlagSet makes a new set of feature flags.\nfunc NewFeatureFlagSet() *FeatureFlagSet {\n\treturn &FeatureFlagSet{\n\t\tfeatures: make(map[Feature]*featureSlot),\n\t}\n}\n\nfunc (s *FeatureFlagSet) getOrMakeSlot(f Feature) *featureSlot {\n\ts.Lock()\n\tdefer s.Unlock()\n\tret := s.features[f]\n\tif ret != nil {\n\t\treturn ret\n\t}\n\tret = &featureSlot{}\n\ts.features[f] = ret\n\treturn ret\n}\n\ntype rawFeatureSlot struct {\n\tValue bool `json:\"value\"`\n\tCacheSec int `json:\"cache_sec\"`\n}\n\ntype rawFeatures struct {\n\tStatus AppStatus `json:\"status\"`\n\tFeatures map[string]rawFeatureSlot `json:\"features\"`\n}\n\nfunc (r *rawFeatures) GetAppStatus() *AppStatus {\n\treturn &r.Status\n}\n\nfunc (f *featureSlot) readFrom(m MetaContext, r rawFeatureSlot) {\n\tf.on = r.Value\n\tf.cacheUntil = m.G().Clock().Now().Add(time.Duration(r.CacheSec) * time.Second)\n}\n\nfunc (s *FeatureFlagSet) InvalidateCache(m MetaContext, f Feature) {\n\tfeatureSlot := s.getOrMakeSlot(f)\n\tfeatureSlot.Lock()\n\tdefer featureSlot.Unlock()\n\tfeatureSlot.cacheUntil = m.G().Clock().Now().Add(time.Duration(-1) * time.Second)\n}\n\nfunc (s *FeatureFlagSet) EnableImmediately(m MetaContext, f Feature) error {\n\tif m.G().Env.GetRunMode() == ProductionRunMode {\n\t\treturn errors.New(\"EnableImmediately is a dev\/test-only path\")\n\t}\n\ts.InvalidateCache(m, f)\n\t_, err := m.G().API.Post(m, APIArg{\n\t\tEndpoint: \"test\/feature\",\n\t\tSessionType: APISessionTypeREQUIRED,\n\t\tArgs: HTTPArgs{\n\t\t\t\"feature\": S{Val: string(f)},\n\t\t\t\"value\": I{Val: 1},\n\t\t\t\"cache_sec\": I{Val: 100},\n\t\t},\n\t})\n\treturn err\n}\n\n\/\/ EnabledWithError returns if the given feature is enabled, it will return true if it's\n\/\/ enabled, and an error if one occurred.\nfunc (s *FeatureFlagSet) EnabledWithError(m MetaContext, f Feature) (on bool, err error) {\n\tm = m.WithLogTag(\"FEAT\")\n\tslot := s.getOrMakeSlot(f)\n\tslot.Lock()\n\tdefer slot.Unlock()\n\tif m.G().Clock().Now().Before(slot.cacheUntil) {\n\t\tm.Debug(\"Feature (cached) %q -> %v\", f, slot.on)\n\t\treturn slot.on, nil\n\t}\n\tvar raw rawFeatures\n\targ := NewAPIArg(\"user\/features\")\n\targ.SessionType = APISessionTypeREQUIRED\n\targ.Args = HTTPArgs{\n\t\t\"features\": S{Val: string(f)},\n\t}\n\terr = m.G().API.GetDecode(m, arg, &raw)\n\tswitch err.(type) {\n\tcase nil:\n\tcase LoginRequiredError:\n\t\t\/\/ No features for logged-out users\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, err\n\t}\n\tif err != nil {\n\t\treturn false, err\n\t}\n\trawFeature, ok := raw.Features[string(f)]\n\tif !ok {\n\t\tm.Debug(\"Feature %q wasn't returned from server\", f)\n\t\treturn false, nil\n\t}\n\tslot.readFrom(m, rawFeature)\n\tm.Debug(\"Feature (fetched) %q -> %v (will cache for %ds)\", f, slot.on, rawFeature.CacheSec)\n\treturn slot.on, nil\n}\n\n\/\/ Enabled returns if the feature flag is enabled. It ignore errors and just acts\n\/\/ as if the feature is off.\nfunc (s *FeatureFlagSet) Enabled(m MetaContext, f Feature) (on bool) {\n\ton, err := s.EnabledWithError(m, f)\n\tif err != nil {\n\t\tm.Debug(\"Error checking feature %q: %v\", f, err)\n\t\treturn false\n\t}\n\treturn on\n}\n\n\/\/ Clear clears out the cached feature flags, for instance if the user\n\/\/ is going to logout.\nfunc (s *FeatureFlagSet) Clear() {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.features = make(map[Feature]*featureSlot)\n}\n\n\/\/ FeatureFlagGate allows the server to disable certain features by replying with a\n\/\/ FEATURE_FLAG API status code, which is then translated into a FeatureFlagError.\n\/\/ We cache these errors for a given amount of time, so we're not spamming the\n\/\/ same attempt over and over again.\ntype FeatureFlagGate struct {\n\tsync.Mutex\n\tlastCheck time.Time\n\tlastError error\n\tfeature Feature\n\tcacheFor time.Duration\n}\n\n\/\/ NewFeatureFlagGate makes a gate for the given feature that will cache for the given\n\/\/ duration.\nfunc NewFeatureFlagGate(f Feature, d time.Duration) *FeatureFlagGate {\n\treturn &FeatureFlagGate{\n\t\tfeature: f,\n\t\tcacheFor: d,\n\t}\n}\n\n\/\/ DigestError should be called on the result of an API call. It will allow this gate\n\/\/ to digest the error and maybe set up its internal caching for when to retry this\n\/\/ feature.\nfunc (f *FeatureFlagGate) DigestError(m MetaContext, err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\tffe, ok := err.(FeatureFlagError)\n\tif !ok {\n\t\treturn\n\t}\n\tif ffe.Feature() != f.feature {\n\t\tm.Debug(\"Got feature flag error for wrong feature: %v\", err)\n\t\treturn\n\t}\n\n\tm.Debug(\"Server reports feature %q is flagged off\", f.feature)\n\n\tf.Lock()\n\tdefer f.Unlock()\n\tf.lastCheck = m.G().Clock().Now()\n\tf.lastError = err\n}\n\n\/\/ ErrorIfFlagged should be called to avoid a feature if it's recently\n\/\/ been feature-flagged \"off\" by the server. In that case, it will return\n\/\/ the error that was originally returned by the server.\nfunc (f *FeatureFlagGate) ErrorIfFlagged(m MetaContext) (err error) {\n\tf.Lock()\n\tdefer f.Unlock()\n\tif f.lastError == nil {\n\t\treturn nil\n\t}\n\tdiff := m.G().Clock().Now().Sub(f.lastCheck)\n\tif diff > f.cacheFor {\n\t\tm.Debug(\"Feature flag %q expired %d ago, let's give it another try\", f.feature, diff)\n\t\tf.lastError = nil\n\t\tf.lastCheck = time.Time{}\n\t}\n\treturn f.lastError\n}\n\nfunc (f *FeatureFlagGate) Clear() {\n\tf.Lock()\n\tdefer f.Unlock()\n\tf.lastError = nil\n}\n<|endoftext|>"} {"text":"<commit_before>package storage\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"sync\"\n)\n\nconst (\n\tSuperBlockSize = 8\n)\n\ntype SuperBlock struct {\n\tVersion Version\n\tReplicaType ReplicationType\n}\n\nfunc (s *SuperBlock) Bytes() []byte {\n\theader := make([]byte, SuperBlockSize)\n\theader[0] = byte(s.Version)\n\theader[1] = s.ReplicaType.Byte()\n\treturn header\n}\n\ntype Volume struct {\n\tId VolumeId\n\tdir string\n\tdataFile *os.File\n\tnm NeedleMapper\n\treadOnly bool\n\n\tSuperBlock\n\n\taccessLock sync.Mutex\n}\n\nfunc NewVolume(dirname string, id VolumeId, replicationType ReplicationType) (v *Volume, e error) {\n\tv = &Volume{dir: dirname, Id: id}\n\tv.SuperBlock = SuperBlock{ReplicaType: replicationType}\n\te = v.load(true)\n\treturn\n}\nfunc LoadVolumeOnly(dirname string, id VolumeId) (v *Volume, e error) {\n\tv = &Volume{dir: dirname, Id: id}\n\tv.SuperBlock = SuperBlock{ReplicaType: CopyNil}\n\te = v.load(false)\n\treturn\n}\nfunc (v *Volume) load(alsoLoadIndex bool) error {\n\tvar e error\n\tfileName := path.Join(v.dir, v.Id.String())\n\tv.dataFile, e = os.OpenFile(fileName+\".dat\", os.O_RDWR|os.O_CREATE, 0644)\n\tif e != nil {\n\t\tif !os.IsPermission(e) {\n\t\t\treturn fmt.Errorf(\"cannot create Volume Data %s.dat: %s\", fileName, e)\n\t\t}\n\t\tif v.dataFile, e = os.Open(fileName + \".dat\"); e != nil {\n\t\t\treturn fmt.Errorf(\"cannot open Volume Data %s.dat: %s\", fileName, e)\n\t\t}\n\t\tlog.Printf(\"opening \" + fileName + \".dat in READONLY mode\")\n\t\tv.readOnly = true\n\t}\n\tif v.ReplicaType == CopyNil {\n\t\te = v.readSuperBlock()\n\t} else {\n\t\te = v.maybeWriteSuperBlock()\n\t}\n\tif e == nil && alsoLoadIndex {\n\t\tvar indexFile *os.File\n\t\tif v.readOnly {\n\t\t\tif indexFile, e = os.Open(fileName + \".idx\"); e != nil && !os.IsNotExist(e) {\n\t\t\t\treturn fmt.Errorf(\"cannot open index file %s.idx: %s\", fileName, e)\n\t\t\t}\n\t\t\tif indexFile != nil {\n\t\t\t\tlog.Printf(\"converting %s.idx to %s.cdb\", fileName, fileName)\n\t\t\t\tif e = ConvertIndexToCdb(fileName+\".cdb\", indexFile); e != nil {\n\t\t\t\t\tlog.Printf(\"error converting %s.idx to %s.cdb: %s\", fileName, fileName)\n\t\t\t\t} else {\n\t\t\t\t\tindexFile.Close()\n\t\t\t\t\tos.Remove(indexFile.Name())\n\t\t\t\t\tindexFile = nil\n\t\t\t\t}\n\t\t\t}\n\t\t\tv.nm, e = OpenCdbMap(fileName + \".cdb\")\n\t\t\treturn e\n\t\t} else {\n\t\t\tindexFile, e = os.OpenFile(fileName+\".idx\", os.O_RDWR|os.O_CREATE, 0644)\n\t\t\tif e != nil {\n\t\t\t\treturn fmt.Errorf(\"cannot create Volume Data %s.dat: %s\", fileName, e)\n\t\t\t}\n\t\t}\n\t\tv.nm, e = LoadNeedleMap(indexFile)\n\t}\n\treturn e\n}\nfunc (v *Volume) Version() Version {\n\treturn v.SuperBlock.Version\n}\nfunc (v *Volume) Size() int64 {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tstat, e := v.dataFile.Stat()\n\tif e == nil {\n\t\treturn stat.Size()\n\t}\n\tfmt.Printf(\"Failed to read file size %s %s\\n\", v.dataFile.Name(), e.Error())\n\treturn -1\n}\nfunc (v *Volume) Close() {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tv.nm.Close()\n\t_ = v.dataFile.Close()\n}\nfunc (v *Volume) maybeWriteSuperBlock() error {\n\tstat, e := v.dataFile.Stat()\n\tif e != nil {\n\t\tfmt.Printf(\"failed to stat datafile %s: %s\", v.dataFile, e)\n\t\treturn e\n\t}\n\tif stat.Size() == 0 {\n\t\tv.SuperBlock.Version = CurrentVersion\n\t\t_, e = v.dataFile.Write(v.SuperBlock.Bytes())\n\t\tif e != nil && os.IsPermission(e) {\n\t\t\t\/\/read-only, but zero length - recreate it!\n\t\t\tif v.dataFile, e = os.Create(v.dataFile.Name()); e == nil {\n\t\t\t\tif _, e = v.dataFile.Write(v.SuperBlock.Bytes()); e == nil {\n\t\t\t\t\tv.readOnly = false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn e\n}\nfunc (v *Volume) readSuperBlock() (err error) {\n\tif _, err = v.dataFile.Seek(0, 0); err != nil {\n\t\treturn fmt.Errorf(\"cannot seek to the beginning of %s: %s\", v.dataFile, err)\n\t}\n\theader := make([]byte, SuperBlockSize)\n\tif _, e := v.dataFile.Read(header); e != nil {\n\t\treturn fmt.Errorf(\"cannot read superblock: %s\", e)\n\t}\n\tv.SuperBlock, err = ParseSuperBlock(header)\n\treturn err\n}\nfunc ParseSuperBlock(header []byte) (superBlock SuperBlock, err error) {\n\tsuperBlock.Version = Version(header[0])\n\tif superBlock.ReplicaType, err = NewReplicationTypeFromByte(header[1]); err != nil {\n\t\terr = fmt.Errorf(\"cannot read replica type: %s\", err)\n\t}\n\treturn\n}\nfunc (v *Volume) NeedToReplicate() bool {\n\treturn v.ReplicaType.GetCopyCount() > 1\n}\n\nfunc (v *Volume) isFileUnchanged(n *Needle) bool {\n\tnv, ok := v.nm.Get(n.Id)\n\tif ok && nv.Offset > 0 {\n\t\tif _, err := v.dataFile.Seek(int64(nv.Offset)*NeedlePaddingSize, 0); err != nil {\n\t\t\treturn false\n\t\t}\n\t\toldNeedle := new(Needle)\n\t\toldNeedle.Read(v.dataFile, nv.Size, v.Version())\n\t\tif len(oldNeedle.Data) == len(n.Data) && oldNeedle.Checksum == n.Checksum {\n\t\t\tlength := len(n.Data)\n\t\t\tfor i := 0; i < length; i++ {\n\t\t\t\tif n.Data[i] != oldNeedle.Data[i] {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t\tn.Size = oldNeedle.Size\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\nfunc (v *Volume) write(n *Needle) (size uint32, err error) {\n\tif v.readOnly {\n\t\terr = fmt.Errorf(\"%s is read-only\", v.dataFile)\n\t\treturn\n\t}\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tif v.isFileUnchanged(n) {\n\t\tsize = n.Size\n\t\treturn\n\t}\n\tvar offset int64\n\tif offset, err = v.dataFile.Seek(0, 2); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ensure file writing starting from aligned positions\n\tif offset%NeedlePaddingSize != 0 {\n\t\toffset = offset + (NeedlePaddingSize - offset%NeedlePaddingSize)\n\t\tif offset, err = v.dataFile.Seek(offset, 0); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif size, err = n.Append(v.dataFile, v.Version()); err != nil {\n\t\tif e := v.dataFile.Truncate(offset); e != nil {\n\t\t\terr = fmt.Errorf(\"%s\\ncannot truncate %s: %s\", err, v.dataFile, e)\n\t\t}\n\t\treturn\n\t}\n\tnv, ok := v.nm.Get(n.Id)\n\tif !ok || int64(nv.Offset)*NeedlePaddingSize < offset {\n\t\t_, err = v.nm.Put(n.Id, uint32(offset\/NeedlePaddingSize), n.Size)\n\t}\n\treturn\n}\n\nfunc (v *Volume) delete(n *Needle) (uint32, error) {\n\tif v.readOnly {\n\t\treturn 0, fmt.Errorf(\"%s is read-only\", v.dataFile)\n\t}\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tnv, ok := v.nm.Get(n.Id)\n\t\/\/fmt.Println(\"key\", n.Id, \"volume offset\", nv.Offset, \"data_size\", n.Size, \"cached size\", nv.Size)\n\tif ok {\n\t\tvar err error\n\t\tif err = v.nm.Delete(n.Id); err != nil {\n\t\t\treturn nv.Size, err\n\t\t}\n\t\tif _, err = v.dataFile.Seek(0, 2); err != nil {\n\t\t\treturn nv.Size, err\n\t\t}\n\t\tn.Data = make([]byte, 0)\n\t\t_, err = n.Append(v.dataFile, v.Version())\n\t\treturn nv.Size, err\n\t}\n\treturn 0, nil\n}\n\nfunc (v *Volume) read(n *Needle) (int, error) {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tnv, ok := v.nm.Get(n.Id)\n\tif ok && nv.Offset > 0 {\n\t\tif _, err := v.dataFile.Seek(int64(nv.Offset)*NeedlePaddingSize, 0); err != nil {\n\t\t\treturn -1, err\n\t\t}\n\t\treturn n.Read(v.dataFile, nv.Size, v.Version())\n\t}\n\treturn -1, errors.New(\"Not Found\")\n}\n\nfunc (v *Volume) garbageLevel() float64 {\n\treturn float64(v.nm.DeletedSize()) \/ float64(v.ContentSize())\n}\n\nfunc (v *Volume) compact() error {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\n\tfilePath := path.Join(v.dir, v.Id.String())\n\treturn v.copyDataAndGenerateIndexFile(filePath+\".cpd\", filePath+\".cpx\")\n}\nfunc (v *Volume) commitCompact() error {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\t_ = v.dataFile.Close()\n\tvar e error\n\tif e = os.Rename(path.Join(v.dir, v.Id.String()+\".cpd\"), path.Join(v.dir, v.Id.String()+\".dat\")); e != nil {\n\t\treturn e\n\t}\n\tif e = os.Rename(path.Join(v.dir, v.Id.String()+\".cpx\"), path.Join(v.dir, v.Id.String()+\".idx\")); e != nil {\n\t\treturn e\n\t}\n\tif e = v.load(true); e != nil {\n\t\treturn e\n\t}\n\treturn nil\n}\nfunc (v *Volume) freeze() error {\n\tif v.readOnly {\n\t\treturn nil\n\t}\n\tnm, ok := v.nm.(*NeedleMap)\n\tif !ok {\n\t\treturn nil\n\t}\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tbn, _ := nakeFilename(v.dataFile.Name())\n\tcdbFn := bn + \".cdb\"\n\tlog.Printf(\"converting %s to %s\", nm.indexFile.Name(), cdbFn)\n\terr := DumpNeedleMapToCdb(cdbFn, nm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif v.nm, err = OpenCdbMap(cdbFn); err != nil {\n\t\treturn err\n\t}\n\tnm.indexFile.Close()\n\tos.Remove(nm.indexFile.Name())\n\tv.readOnly = true\n\treturn nil\n}\n\nfunc ScanVolumeFile(dirname string, id VolumeId,\n\tvisitSuperBlock func(SuperBlock) error,\n\tvisitNeedle func(n *Needle, offset uint32) error) (err error) {\n\tvar v *Volume\n\tif v, err = LoadVolumeOnly(dirname, id); err != nil {\n\t\treturn\n\t}\n\tif err = visitSuperBlock(v.SuperBlock); err != nil {\n\t\treturn\n\t}\n\n\tversion := v.Version()\n\n\toffset := uint32(SuperBlockSize)\n\tn, rest, e := ReadNeedleHeader(v.dataFile, version)\n\tif e != nil {\n\t\terr = fmt.Errorf(\"cannot read needle header: %s\", e)\n\t\treturn\n\t}\n\tfor n != nil {\n\t\tif err = n.ReadNeedleBody(v.dataFile, version, rest); err != nil {\n\t\t\terr = fmt.Errorf(\"cannot read needle body: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tif err = visitNeedle(n, offset); err != nil {\n\t\t\treturn\n\t\t}\n\t\toffset += NeedleHeaderSize + rest\n\t\tif n, rest, err = ReadNeedleHeader(v.dataFile, version); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"cannot read needle header: %s\", err)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string) (err error) {\n\tvar (\n\t\tdst, idx *os.File\n\t)\n\tif dst, err = os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE, 0644); err != nil {\n\t\treturn\n\t}\n\tdefer dst.Close()\n\n\tif idx, err = os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE, 0644); err != nil {\n\t\treturn\n\t}\n\tdefer idx.Close()\n\n\tnm := NewNeedleMap(idx)\n\tnew_offset := uint32(SuperBlockSize)\n\n\terr = ScanVolumeFile(v.dir, v.Id, func(superBlock SuperBlock) error {\n\t\t_, err = dst.Write(superBlock.Bytes())\n\t\treturn err\n\t}, func(n *Needle, offset uint32) error {\n\t\tnv, ok := v.nm.Get(n.Id)\n\t\t\/\/log.Println(\"file size is\", n.Size, \"rest\", rest)\n\t\tif ok && nv.Offset*NeedlePaddingSize == offset {\n\t\t\tif nv.Size > 0 {\n\t\t\t\tif _, err = nm.Put(n.Id, new_offset\/NeedlePaddingSize, n.Size); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"cannot put needle: %s\", err)\n\t\t\t\t}\n\t\t\t\tif _, err = n.Append(dst, v.Version()); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"cannot append needle: %s\", err)\n\t\t\t\t}\n\t\t\t\tnew_offset += n.DiskSize()\n\t\t\t\t\/\/log.Println(\"saving key\", n.Id, \"volume offset\", old_offset, \"=>\", new_offset, \"data_size\", n.Size, \"rest\", rest)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn\n}\nfunc (v *Volume) ContentSize() uint64 {\n\treturn v.nm.ContentSize()\n}\n<commit_msg>use bytes.Equal() instead, Thanks for Thomas' suggestion<commit_after>package storage\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"sync\"\n)\n\nconst (\n\tSuperBlockSize = 8\n)\n\ntype SuperBlock struct {\n\tVersion Version\n\tReplicaType ReplicationType\n}\n\nfunc (s *SuperBlock) Bytes() []byte {\n\theader := make([]byte, SuperBlockSize)\n\theader[0] = byte(s.Version)\n\theader[1] = s.ReplicaType.Byte()\n\treturn header\n}\n\ntype Volume struct {\n\tId VolumeId\n\tdir string\n\tdataFile *os.File\n\tnm NeedleMapper\n\treadOnly bool\n\n\tSuperBlock\n\n\taccessLock sync.Mutex\n}\n\nfunc NewVolume(dirname string, id VolumeId, replicationType ReplicationType) (v *Volume, e error) {\n\tv = &Volume{dir: dirname, Id: id}\n\tv.SuperBlock = SuperBlock{ReplicaType: replicationType}\n\te = v.load(true)\n\treturn\n}\nfunc LoadVolumeOnly(dirname string, id VolumeId) (v *Volume, e error) {\n\tv = &Volume{dir: dirname, Id: id}\n\tv.SuperBlock = SuperBlock{ReplicaType: CopyNil}\n\te = v.load(false)\n\treturn\n}\nfunc (v *Volume) load(alsoLoadIndex bool) error {\n\tvar e error\n\tfileName := path.Join(v.dir, v.Id.String())\n\tv.dataFile, e = os.OpenFile(fileName+\".dat\", os.O_RDWR|os.O_CREATE, 0644)\n\tif e != nil {\n\t\tif !os.IsPermission(e) {\n\t\t\treturn fmt.Errorf(\"cannot create Volume Data %s.dat: %s\", fileName, e)\n\t\t}\n\t\tif v.dataFile, e = os.Open(fileName + \".dat\"); e != nil {\n\t\t\treturn fmt.Errorf(\"cannot open Volume Data %s.dat: %s\", fileName, e)\n\t\t}\n\t\tlog.Printf(\"opening \" + fileName + \".dat in READONLY mode\")\n\t\tv.readOnly = true\n\t}\n\tif v.ReplicaType == CopyNil {\n\t\te = v.readSuperBlock()\n\t} else {\n\t\te = v.maybeWriteSuperBlock()\n\t}\n\tif e == nil && alsoLoadIndex {\n\t\tvar indexFile *os.File\n\t\tif v.readOnly {\n\t\t\tif indexFile, e = os.Open(fileName + \".idx\"); e != nil && !os.IsNotExist(e) {\n\t\t\t\treturn fmt.Errorf(\"cannot open index file %s.idx: %s\", fileName, e)\n\t\t\t}\n\t\t\tif indexFile != nil {\n\t\t\t\tlog.Printf(\"converting %s.idx to %s.cdb\", fileName, fileName)\n\t\t\t\tif e = ConvertIndexToCdb(fileName+\".cdb\", indexFile); e != nil {\n\t\t\t\t\tlog.Printf(\"error converting %s.idx to %s.cdb: %s\", fileName, fileName)\n\t\t\t\t} else {\n\t\t\t\t\tindexFile.Close()\n\t\t\t\t\tos.Remove(indexFile.Name())\n\t\t\t\t\tindexFile = nil\n\t\t\t\t}\n\t\t\t}\n\t\t\tv.nm, e = OpenCdbMap(fileName + \".cdb\")\n\t\t\treturn e\n\t\t} else {\n\t\t\tindexFile, e = os.OpenFile(fileName+\".idx\", os.O_RDWR|os.O_CREATE, 0644)\n\t\t\tif e != nil {\n\t\t\t\treturn fmt.Errorf(\"cannot create Volume Data %s.dat: %s\", fileName, e)\n\t\t\t}\n\t\t}\n\t\tv.nm, e = LoadNeedleMap(indexFile)\n\t}\n\treturn e\n}\nfunc (v *Volume) Version() Version {\n\treturn v.SuperBlock.Version\n}\nfunc (v *Volume) Size() int64 {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tstat, e := v.dataFile.Stat()\n\tif e == nil {\n\t\treturn stat.Size()\n\t}\n\tfmt.Printf(\"Failed to read file size %s %s\\n\", v.dataFile.Name(), e.Error())\n\treturn -1\n}\nfunc (v *Volume) Close() {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tv.nm.Close()\n\t_ = v.dataFile.Close()\n}\nfunc (v *Volume) maybeWriteSuperBlock() error {\n\tstat, e := v.dataFile.Stat()\n\tif e != nil {\n\t\tfmt.Printf(\"failed to stat datafile %s: %s\", v.dataFile, e)\n\t\treturn e\n\t}\n\tif stat.Size() == 0 {\n\t\tv.SuperBlock.Version = CurrentVersion\n\t\t_, e = v.dataFile.Write(v.SuperBlock.Bytes())\n\t\tif e != nil && os.IsPermission(e) {\n\t\t\t\/\/read-only, but zero length - recreate it!\n\t\t\tif v.dataFile, e = os.Create(v.dataFile.Name()); e == nil {\n\t\t\t\tif _, e = v.dataFile.Write(v.SuperBlock.Bytes()); e == nil {\n\t\t\t\t\tv.readOnly = false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn e\n}\nfunc (v *Volume) readSuperBlock() (err error) {\n\tif _, err = v.dataFile.Seek(0, 0); err != nil {\n\t\treturn fmt.Errorf(\"cannot seek to the beginning of %s: %s\", v.dataFile, err)\n\t}\n\theader := make([]byte, SuperBlockSize)\n\tif _, e := v.dataFile.Read(header); e != nil {\n\t\treturn fmt.Errorf(\"cannot read superblock: %s\", e)\n\t}\n\tv.SuperBlock, err = ParseSuperBlock(header)\n\treturn err\n}\nfunc ParseSuperBlock(header []byte) (superBlock SuperBlock, err error) {\n\tsuperBlock.Version = Version(header[0])\n\tif superBlock.ReplicaType, err = NewReplicationTypeFromByte(header[1]); err != nil {\n\t\terr = fmt.Errorf(\"cannot read replica type: %s\", err)\n\t}\n\treturn\n}\nfunc (v *Volume) NeedToReplicate() bool {\n\treturn v.ReplicaType.GetCopyCount() > 1\n}\n\nfunc (v *Volume) isFileUnchanged(n *Needle) bool {\n\tnv, ok := v.nm.Get(n.Id)\n\tif ok && nv.Offset > 0 {\n\t\tif _, err := v.dataFile.Seek(int64(nv.Offset)*NeedlePaddingSize, 0); err != nil {\n\t\t\treturn false\n\t\t}\n\t\toldNeedle := new(Needle)\n\t\toldNeedle.Read(v.dataFile, nv.Size, v.Version())\n\t\tif oldNeedle.Checksum == n.Checksum && bytes.Equal(oldNeedle.Data, n.Data) {\n\t\t\tn.Size = oldNeedle.Size\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\nfunc (v *Volume) write(n *Needle) (size uint32, err error) {\n\tif v.readOnly {\n\t\terr = fmt.Errorf(\"%s is read-only\", v.dataFile)\n\t\treturn\n\t}\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tif v.isFileUnchanged(n) {\n\t\tsize = n.Size\n\t\treturn\n\t}\n\tvar offset int64\n\tif offset, err = v.dataFile.Seek(0, 2); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ensure file writing starting from aligned positions\n\tif offset%NeedlePaddingSize != 0 {\n\t\toffset = offset + (NeedlePaddingSize - offset%NeedlePaddingSize)\n\t\tif offset, err = v.dataFile.Seek(offset, 0); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif size, err = n.Append(v.dataFile, v.Version()); err != nil {\n\t\tif e := v.dataFile.Truncate(offset); e != nil {\n\t\t\terr = fmt.Errorf(\"%s\\ncannot truncate %s: %s\", err, v.dataFile, e)\n\t\t}\n\t\treturn\n\t}\n\tnv, ok := v.nm.Get(n.Id)\n\tif !ok || int64(nv.Offset)*NeedlePaddingSize < offset {\n\t\t_, err = v.nm.Put(n.Id, uint32(offset\/NeedlePaddingSize), n.Size)\n\t}\n\treturn\n}\n\nfunc (v *Volume) delete(n *Needle) (uint32, error) {\n\tif v.readOnly {\n\t\treturn 0, fmt.Errorf(\"%s is read-only\", v.dataFile)\n\t}\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tnv, ok := v.nm.Get(n.Id)\n\t\/\/fmt.Println(\"key\", n.Id, \"volume offset\", nv.Offset, \"data_size\", n.Size, \"cached size\", nv.Size)\n\tif ok {\n\t\tvar err error\n\t\tif err = v.nm.Delete(n.Id); err != nil {\n\t\t\treturn nv.Size, err\n\t\t}\n\t\tif _, err = v.dataFile.Seek(0, 2); err != nil {\n\t\t\treturn nv.Size, err\n\t\t}\n\t\tn.Data = make([]byte, 0)\n\t\t_, err = n.Append(v.dataFile, v.Version())\n\t\treturn nv.Size, err\n\t}\n\treturn 0, nil\n}\n\nfunc (v *Volume) read(n *Needle) (int, error) {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tnv, ok := v.nm.Get(n.Id)\n\tif ok && nv.Offset > 0 {\n\t\tif _, err := v.dataFile.Seek(int64(nv.Offset)*NeedlePaddingSize, 0); err != nil {\n\t\t\treturn -1, err\n\t\t}\n\t\treturn n.Read(v.dataFile, nv.Size, v.Version())\n\t}\n\treturn -1, errors.New(\"Not Found\")\n}\n\nfunc (v *Volume) garbageLevel() float64 {\n\treturn float64(v.nm.DeletedSize()) \/ float64(v.ContentSize())\n}\n\nfunc (v *Volume) compact() error {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\n\tfilePath := path.Join(v.dir, v.Id.String())\n\treturn v.copyDataAndGenerateIndexFile(filePath+\".cpd\", filePath+\".cpx\")\n}\nfunc (v *Volume) commitCompact() error {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\t_ = v.dataFile.Close()\n\tvar e error\n\tif e = os.Rename(path.Join(v.dir, v.Id.String()+\".cpd\"), path.Join(v.dir, v.Id.String()+\".dat\")); e != nil {\n\t\treturn e\n\t}\n\tif e = os.Rename(path.Join(v.dir, v.Id.String()+\".cpx\"), path.Join(v.dir, v.Id.String()+\".idx\")); e != nil {\n\t\treturn e\n\t}\n\tif e = v.load(true); e != nil {\n\t\treturn e\n\t}\n\treturn nil\n}\nfunc (v *Volume) freeze() error {\n\tif v.readOnly {\n\t\treturn nil\n\t}\n\tnm, ok := v.nm.(*NeedleMap)\n\tif !ok {\n\t\treturn nil\n\t}\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tbn, _ := nakeFilename(v.dataFile.Name())\n\tcdbFn := bn + \".cdb\"\n\tlog.Printf(\"converting %s to %s\", nm.indexFile.Name(), cdbFn)\n\terr := DumpNeedleMapToCdb(cdbFn, nm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif v.nm, err = OpenCdbMap(cdbFn); err != nil {\n\t\treturn err\n\t}\n\tnm.indexFile.Close()\n\tos.Remove(nm.indexFile.Name())\n\tv.readOnly = true\n\treturn nil\n}\n\nfunc ScanVolumeFile(dirname string, id VolumeId,\n\tvisitSuperBlock func(SuperBlock) error,\n\tvisitNeedle func(n *Needle, offset uint32) error) (err error) {\n\tvar v *Volume\n\tif v, err = LoadVolumeOnly(dirname, id); err != nil {\n\t\treturn\n\t}\n\tif err = visitSuperBlock(v.SuperBlock); err != nil {\n\t\treturn\n\t}\n\n\tversion := v.Version()\n\n\toffset := uint32(SuperBlockSize)\n\tn, rest, e := ReadNeedleHeader(v.dataFile, version)\n\tif e != nil {\n\t\terr = fmt.Errorf(\"cannot read needle header: %s\", e)\n\t\treturn\n\t}\n\tfor n != nil {\n\t\tif err = n.ReadNeedleBody(v.dataFile, version, rest); err != nil {\n\t\t\terr = fmt.Errorf(\"cannot read needle body: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tif err = visitNeedle(n, offset); err != nil {\n\t\t\treturn\n\t\t}\n\t\toffset += NeedleHeaderSize + rest\n\t\tif n, rest, err = ReadNeedleHeader(v.dataFile, version); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"cannot read needle header: %s\", err)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string) (err error) {\n\tvar (\n\t\tdst, idx *os.File\n\t)\n\tif dst, err = os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE, 0644); err != nil {\n\t\treturn\n\t}\n\tdefer dst.Close()\n\n\tif idx, err = os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE, 0644); err != nil {\n\t\treturn\n\t}\n\tdefer idx.Close()\n\n\tnm := NewNeedleMap(idx)\n\tnew_offset := uint32(SuperBlockSize)\n\n\terr = ScanVolumeFile(v.dir, v.Id, func(superBlock SuperBlock) error {\n\t\t_, err = dst.Write(superBlock.Bytes())\n\t\treturn err\n\t}, func(n *Needle, offset uint32) error {\n\t\tnv, ok := v.nm.Get(n.Id)\n\t\t\/\/log.Println(\"file size is\", n.Size, \"rest\", rest)\n\t\tif ok && nv.Offset*NeedlePaddingSize == offset {\n\t\t\tif nv.Size > 0 {\n\t\t\t\tif _, err = nm.Put(n.Id, new_offset\/NeedlePaddingSize, n.Size); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"cannot put needle: %s\", err)\n\t\t\t\t}\n\t\t\t\tif _, err = n.Append(dst, v.Version()); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"cannot append needle: %s\", err)\n\t\t\t\t}\n\t\t\t\tnew_offset += n.DiskSize()\n\t\t\t\t\/\/log.Println(\"saving key\", n.Id, \"volume offset\", old_offset, \"=>\", new_offset, \"data_size\", n.Size, \"rest\", rest)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn\n}\nfunc (v *Volume) ContentSize() uint64 {\n\treturn v.nm.ContentSize()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"time\"\n\n\tpb \"github.com\/brotherlogic\/gobuildslave\/proto\"\n\t\"github.com\/brotherlogic\/goserver\/utils\"\n\tpbt \"github.com\/brotherlogic\/tracer\/proto\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tpendWait = time.Minute\n)\n\nfunc (s *Server) runTransition(ctx context.Context, job *pb.JobAssignment) {\n\tutils.SendTrace(ctx, fmt.Sprintf(\"run_transition_%v\", job.State), time.Now(), pbt.Milestone_MARKER, job.Job.Name)\n\tstState := job.State\n\tswitch job.State {\n\tcase pb.State_ACKNOWLEDGED:\n\t\tkey := s.scheduleBuild(ctx, job.Job)\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"SCHED: %v\", key)\n\t\ts.stateMutex.Unlock()\n\t\tif job.Job.NonBootstrap {\n\t\t\tif key != \"\" {\n\t\t\t\tjob.Server = s.Registry.Identifier\n\t\t\t\tjob.State = pb.State_BUILT\n\t\t\t\tjob.RunningVersion = key\n\t\t\t}\n\t\t} else {\n\t\t\tjob.CommandKey = key\n\t\t\tjob.State = pb.State_BUILDING\n\t\t\tjob.Server = s.Registry.Identifier\n\t\t}\n\tcase pb.State_BUILDING:\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"BUILD(%v): %v\", job.CommandKey, s.scheduler.getState(job.CommandKey))\n\t\ts.stateMutex.Unlock()\n\t\tif s.taskComplete(job.CommandKey) {\n\t\t\tjob.State = pb.State_BUILT\n\t\t}\n\tcase pb.State_BUILT:\n\t\toutput := s.scheduler.getOutput(job.CommandKey)\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"BUILT(%v): (%v): %v\", job.CommandKey, len(output), output)\n\t\ts.stateMutex.Unlock()\n\t\tif !job.Job.NonBootstrap && len(output) > 0 {\n\t\t\tif job.BuildFail == 5 {\n\t\t\t\ts.deliverCrashReport(ctx, job, output)\n\t\t\t\tjob.BuildFail = 0\n\t\t\t}\n\t\t\tjob.BuildFail++\n\t\t\tjob.State = pb.State_DIED\n\t\t} else {\n\t\t\tjob.BuildFail = 0\n\t\t\tkey := s.scheduleRun(job.Job)\n\t\t\tjob.CommandKey = key\n\t\t\tjob.StartTime = time.Now().Unix()\n\t\t\tjob.State = pb.State_PENDING\n\t\t\tif _, ok := s.pendingMap[time.Now().Weekday()]; !ok {\n\t\t\t\ts.pendingMap[time.Now().Weekday()] = make(map[string]int)\n\t\t\t}\n\t\t\ts.pendingMap[time.Now().Weekday()][job.Job.Name]++\n\t\t}\n\tcase pb.State_PENDING:\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"OUTPUT = %v\", s.scheduler.getOutput(job.CommandKey))\n\t\ts.stateMutex.Unlock()\n\t\tif time.Now().Add(-time.Minute).Unix() > job.StartTime {\n\t\t\tjob.State = pb.State_RUNNING\n\t\t}\n\tcase pb.State_RUNNING:\n\t\toutput := s.scheduler.getOutput(job.CommandKey)\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"ROUTPUT = %v\", s.scheduler.getOutput(job.CommandKey))\n\t\ts.stateMutex.Unlock()\n\t\tif s.taskComplete(job.CommandKey) {\n\t\t\ts.stateMutex.Lock()\n\t\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"COMPLETE = %v\", output)\n\t\t\ts.stateMutex.Unlock()\n\t\t\ts.deliverCrashReport(ctx, job, output)\n\t\t\tjob.State = pb.State_DIED\n\t\t}\n\n\t\terr := s.discover.discover(job.Job.Name, s.Registry.Identifier)\n\t\tif err != nil {\n\t\t\tif job.DiscoverCount > 30 {\n\t\t\t\ts.RaiseIssue(ctx, \"Cannot Discover Running Server\", fmt.Sprintf(\"%v on %v is not discoverable, despite running (%v) the output says %v\", job.Job.Name, s.Registry.Identifier, err, output), false)\n\t\t\t}\n\t\t\tjob.DiscoverCount++\n\t\t} else {\n\t\t\tjob.DiscoverCount = 0\n\t\t}\n\n\t\t\/\/ Restart this job if we need to\n\t\tif job.Job.NonBootstrap {\n\t\t\tversion, err := s.getVersion(ctx, job.Job)\n\t\t\tif err == nil && version != job.RunningVersion {\n\t\t\t\ts.stateMutex.Lock()\n\t\t\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"VERSION_MISMATCH = %v,%v\", version, job.RunningVersion)\n\t\t\t\ts.stateMutex.Unlock()\n\t\t\t\ts.scheduler.killJob(job.CommandKey)\n\t\t\t\tjob.State = pb.State_ACKNOWLEDGED\n\t\t\t}\n\t\t}\n\tcase pb.State_DIED:\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"DIED %v\", job.CommandKey)\n\t\ts.stateMutex.Unlock()\n\t\ts.scheduler.removeJob(job.CommandKey)\n\t\tjob.State = pb.State_ACKNOWLEDGED\n\t}\n\n\tif job.State != stState {\n\t\ts.stateTime[job.Job.Name] = time.Now()\n\t}\n\n\tif job.State == pb.State_DIED {\n\t}\n\n\tutils.SendTrace(ctx, fmt.Sprintf(\"end_transition_%v_%v\", job.State, stState), time.Now(), pbt.Milestone_MARKER, job.Job.Name)\n\tutils.SendTrace(ctx, fmt.Sprintf(\"end_transition_func_%v\", job.State), time.Now(), pbt.Milestone_MARKER, job.Job.Name)\n}\n\ntype translator interface {\n\tbuild(job *pb.Job) *exec.Cmd\n\trun(job *pb.Job) *exec.Cmd\n}\n\ntype checker interface {\n\tisAlive(ctx context.Context, job *pb.JobAssignment) bool\n}\n\nfunc (s *Server) getVersion(ctx context.Context, job *pb.Job) (string, error) {\n\tversions, err := s.builder.build(ctx, job)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(versions) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\treturn versions[0].Version, nil\n\n}\n\n\/\/ scheduleBuild builds out the job, returning the current version\nfunc (s *Server) scheduleBuild(ctx context.Context, job *pb.Job) string {\n\tutils.SendTrace(ctx, fmt.Sprintf(\"schedule_build_%v\", job.NonBootstrap), time.Now(), pbt.Milestone_MARKER, job.Name)\n\tif !job.NonBootstrap {\n\t\tc := s.translator.build(job)\n\t\treturn s.scheduler.Schedule(&rCommand{command: c})\n\t}\n\n\tversions, err := s.builder.build(ctx, job)\n\n\tif len(versions) == 0 {\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Name] = fmt.Sprintf(\"No Versions: %v\", err)\n\t\ts.stateMutex.Unlock()\n\t\treturn \"\"\n\t}\n\n\tt := time.Now()\n\terr = s.builder.copy(ctx, versions[0])\n\ts.lastCopyTime = time.Now().Sub(t)\n\ts.lastCopyStatus = fmt.Sprintf(\"%v\", err)\n\tif err != nil {\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Name] = fmt.Sprintf(\"Copy fail (%v) -> %v\", time.Now().Sub(t), err)\n\t\ts.stateMutex.Unlock()\n\t\treturn \"\"\n\t}\n\ts.stateMutex.Lock()\n\ts.stateMap[job.Name] = fmt.Sprintf(\"Found version %v\", versions[0].Version)\n\ts.stateMutex.Unlock()\n\treturn versions[0].Version\n}\n\nfunc (s *Server) scheduleRun(job *pb.Job) string {\n\tc := s.translator.run(job)\n\treturn s.scheduler.Schedule(&rCommand{command: c})\n}\n\nfunc (s *Server) taskComplete(key string) bool {\n\treturn s.scheduler.schedulerComplete(key)\n}\n<commit_msg>Adds status line<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"time\"\n\n\tpb \"github.com\/brotherlogic\/gobuildslave\/proto\"\n\t\"github.com\/brotherlogic\/goserver\/utils\"\n\tpbt \"github.com\/brotherlogic\/tracer\/proto\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tpendWait = time.Minute\n)\n\nfunc (s *Server) runTransition(ctx context.Context, job *pb.JobAssignment) {\n\tutils.SendTrace(ctx, fmt.Sprintf(\"run_transition_%v\", job.State), time.Now(), pbt.Milestone_MARKER, job.Job.Name)\n\tstState := job.State\n\tswitch job.State {\n\tcase pb.State_ACKNOWLEDGED:\n\t\tkey := s.scheduleBuild(ctx, job.Job)\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"SCHED: %v\", key)\n\t\ts.stateMutex.Unlock()\n\t\tif job.Job.NonBootstrap {\n\t\t\tif key != \"\" {\n\t\t\t\tjob.Server = s.Registry.Identifier\n\t\t\t\tjob.State = pb.State_BUILT\n\t\t\t\tjob.RunningVersion = key\n\t\t\t}\n\t\t} else {\n\t\t\tjob.CommandKey = key\n\t\t\tjob.State = pb.State_BUILDING\n\t\t\tjob.Server = s.Registry.Identifier\n\t\t}\n\tcase pb.State_BUILDING:\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"BUILD(%v): %v\", job.CommandKey, s.scheduler.getState(job.CommandKey))\n\t\ts.stateMutex.Unlock()\n\t\tif s.taskComplete(job.CommandKey) {\n\t\t\tjob.State = pb.State_BUILT\n\t\t}\n\tcase pb.State_BUILT:\n\t\toutput := s.scheduler.getOutput(job.CommandKey)\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"BUILT(%v): (%v): %v\", job.CommandKey, len(output), output)\n\t\ts.stateMutex.Unlock()\n\t\tif !job.Job.NonBootstrap && len(output) > 0 {\n\t\t\tif job.BuildFail == 5 {\n\t\t\t\ts.deliverCrashReport(ctx, job, output)\n\t\t\t\tjob.BuildFail = 0\n\t\t\t}\n\t\t\tjob.BuildFail++\n\t\t\tjob.State = pb.State_DIED\n\t\t} else {\n\t\t\tjob.BuildFail = 0\n\t\t\tkey := s.scheduleRun(job.Job)\n\t\t\tjob.CommandKey = key\n\t\t\tjob.StartTime = time.Now().Unix()\n\t\t\tjob.State = pb.State_PENDING\n\t\t\tif _, ok := s.pendingMap[time.Now().Weekday()]; !ok {\n\t\t\t\ts.pendingMap[time.Now().Weekday()] = make(map[string]int)\n\t\t\t}\n\t\t\ts.pendingMap[time.Now().Weekday()][job.Job.Name]++\n\t\t}\n\tcase pb.State_PENDING:\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"OUTPUT = %v\", s.scheduler.getOutput(job.CommandKey))\n\t\ts.stateMutex.Unlock()\n\t\tif time.Now().Add(-time.Minute).Unix() > job.StartTime {\n\t\t\tjob.State = pb.State_RUNNING\n\t\t}\n\tcase pb.State_RUNNING:\n\t\toutput := s.scheduler.getOutput(job.CommandKey)\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"ROUTPUT = %v\", s.scheduler.getOutput(job.CommandKey))\n\t\ts.stateMutex.Unlock()\n\t\tif s.taskComplete(job.CommandKey) {\n\t\t\ts.stateMutex.Lock()\n\t\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"COMPLETE = %v\", output)\n\t\t\ts.stateMutex.Unlock()\n\t\t\ts.deliverCrashReport(ctx, job, output)\n\t\t\tjob.State = pb.State_DIED\n\t\t}\n\n\t\terr := s.discover.discover(job.Job.Name, s.Registry.Identifier)\n\t\tif err != nil {\n\t\t\tif job.DiscoverCount > 30 {\n\t\t\t\ts.RaiseIssue(ctx, \"Cannot Discover Running Server\", fmt.Sprintf(\"%v on %v is not discoverable, despite running (%v) the output says %v\", job.Job.Name, s.Registry.Identifier, err, output), false)\n\t\t\t}\n\t\t\tjob.DiscoverCount++\n\t\t} else {\n\t\t\tjob.DiscoverCount = 0\n\t\t}\n\n\t\t\/\/ Restart this job if we need to\n\t\tif job.Job.NonBootstrap {\n\t\t\tversion, err := s.getVersion(ctx, job.Job)\n\t\t\tif err == nil && version != job.RunningVersion {\n\t\t\t\ts.stateMutex.Lock()\n\t\t\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"VERSION_MISMATCH = %v,%v\", version, job.RunningVersion)\n\t\t\t\ts.stateMutex.Unlock()\n\t\t\t\ts.scheduler.killJob(job.CommandKey)\n\t\t\t\tjob.State = pb.State_ACKNOWLEDGED\n\t\t\t}\n\t\t}\n\tcase pb.State_DIED:\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"DIED %v\", job.CommandKey)\n\t\ts.stateMutex.Unlock()\n\t\ts.scheduler.removeJob(job.CommandKey)\n\t\tjob.State = pb.State_ACKNOWLEDGED\n\t}\n\n\tif job.State != stState {\n\t\ts.stateTime[job.Job.Name] = time.Now()\n\t}\n\n\tif job.State == pb.State_DIED {\n\t}\n\n\tutils.SendTrace(ctx, fmt.Sprintf(\"end_transition_%v_%v\", job.State, stState), time.Now(), pbt.Milestone_MARKER, job.Job.Name)\n\tutils.SendTrace(ctx, fmt.Sprintf(\"end_transition_func_%v\", job.State), time.Now(), pbt.Milestone_MARKER, job.Job.Name)\n}\n\ntype translator interface {\n\tbuild(job *pb.Job) *exec.Cmd\n\trun(job *pb.Job) *exec.Cmd\n}\n\ntype checker interface {\n\tisAlive(ctx context.Context, job *pb.JobAssignment) bool\n}\n\nfunc (s *Server) getVersion(ctx context.Context, job *pb.Job) (string, error) {\n\tversions, err := s.builder.build(ctx, job)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(versions) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\treturn versions[0].Version, nil\n\n}\n\n\/\/ scheduleBuild builds out the job, returning the current version\nfunc (s *Server) scheduleBuild(ctx context.Context, job *pb.Job) string {\n\tutils.SendTrace(ctx, fmt.Sprintf(\"schedule_build_%v\", job.NonBootstrap), time.Now(), pbt.Milestone_MARKER, job.Name)\n\tif !job.NonBootstrap {\n\t\tc := s.translator.build(job)\n\t\treturn s.scheduler.Schedule(&rCommand{command: c})\n\t}\n\n\tversions, err := s.builder.build(ctx, job)\n\n\ts.lastCopyStatus = fmt.Sprintf(\"%v\", err)\n\tif len(versions) == 0 {\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Name] = fmt.Sprintf(\"No Versions: %v\", err)\n\t\ts.stateMutex.Unlock()\n\t\treturn \"\"\n\t}\n\n\tt := time.Now()\n\terr = s.builder.copy(ctx, versions[0])\n\ts.lastCopyTime = time.Now().Sub(t)\n\ts.lastCopyStatus = fmt.Sprintf(\"%v\", err)\n\tif err != nil {\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Name] = fmt.Sprintf(\"Copy fail (%v) -> %v\", time.Now().Sub(t), err)\n\t\ts.stateMutex.Unlock()\n\t\treturn \"\"\n\t}\n\ts.stateMutex.Lock()\n\ts.stateMap[job.Name] = fmt.Sprintf(\"Found version %v\", versions[0].Version)\n\ts.stateMutex.Unlock()\n\treturn versions[0].Version\n}\n\nfunc (s *Server) scheduleRun(job *pb.Job) string {\n\tc := s.translator.run(job)\n\treturn s.scheduler.Schedule(&rCommand{command: c})\n}\n\nfunc (s *Server) taskComplete(key string) bool {\n\treturn s.scheduler.schedulerComplete(key)\n}\n<|endoftext|>"} {"text":"<commit_before>package snickers_test\n\nimport (\n\t\"os\"\n\n\t\"github.com\/flavioribeiro\/gonfig\"\n\t\"github.com\/flavioribeiro\/snickers\/db\"\n\t\"github.com\/flavioribeiro\/snickers\/lib\"\n\t\"github.com\/flavioribeiro\/snickers\/types\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Library\", func() {\n\tContext(\"HTTP Downloader\", func() {\n\t\tvar (\n\t\t\tdbInstance db.DatabaseInterface\n\t\t\tcfg gonfig.Gonfig\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tdbInstance, _ = db.GetDatabase()\n\t\t\tdbInstance.ClearDatabase()\n\t\t\tcfg, _ = gonfig.FromJsonFile(\"..\/config.json\")\n\t\t})\n\n\t\tIt(\"Should change job status and details on error\", func() {\n\t\t\texampleJob := types.Job{\n\t\t\t\tID: \"123\",\n\t\t\t\tSource: \"http:\/\/source.here.mp4\",\n\t\t\t\tDestination: \"s3:\/\/user@pass:\/bucket\/destination.mp4\",\n\t\t\t\tPreset: types.Preset{Name: \"presetHere\"},\n\t\t\t\tStatus: types.JobCreated,\n\t\t\t\tDetails: \"\",\n\t\t\t}\n\t\t\tdbInstance.StoreJob(exampleJob)\n\n\t\t\tlib.HTTPDownload(exampleJob.ID)\n\t\t\tchangedJob, _ := dbInstance.RetrieveJob(\"123\")\n\n\t\t\tExpect(changedJob.Status).To(Equal(types.JobError))\n\t\t\tExpect(changedJob.Details).To(SatisfyAny(ContainSubstring(\"no such host\"), ContainSubstring(\"No filename could be determined\")))\n\t\t})\n\n\t\tIt(\"Should set the local source and local destination on Job\", func() {\n\t\t\texampleJob := types.Job{\n\t\t\t\tID: \"123\",\n\t\t\t\tSource: \"http:\/\/flv.io\/source_here.mp4\",\n\t\t\t\tDestination: \"s3:\/\/user@pass:\/bucket\/destination.mp4\",\n\t\t\t\tPreset: types.Preset{Name: \"presetHere\"},\n\t\t\t\tStatus: types.JobCreated,\n\t\t\t\tDetails: \"\",\n\t\t\t}\n\t\t\tdbInstance.StoreJob(exampleJob)\n\n\t\t\tlib.HTTPDownload(exampleJob.ID)\n\t\t\tchangedJob, _ := dbInstance.RetrieveJob(\"123\")\n\n\t\t\tswapDir, _ := cfg.GetString(\"SWAP_DIRECTORY\", \"\")\n\t\t\tsourceExpected := swapDir + \"123\/src\/source_here.mp4\"\n\t\t\tExpect(changedJob.LocalSource).To(Equal(sourceExpected))\n\n\t\t\tdestinationExpected := swapDir + \"123\/dst\/source_here.mp4\"\n\t\t\tExpect(changedJob.LocalDestination).To(Equal(destinationExpected))\n\t\t})\n\t})\n\n\tContext(\"FFMPEG Encoder\", func() {\n\t\tvar (\n\t\t\tdbInstance db.DatabaseInterface\n\t\t\tcfg gonfig.Gonfig\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tdbInstance, _ = db.GetDatabase()\n\t\t\tdbInstance.ClearDatabase()\n\t\t\tcfg, _ = gonfig.FromJsonFile(\"..\/config.json\")\n\t\t})\n\n\t\tIt(\"Should change job status and details if input is not found\", func() {\n\t\t\texampleJob := types.Job{\n\t\t\t\tID: \"123\",\n\t\t\t\tSource: \"http:\/\/source.here.mp4\",\n\t\t\t\tDestination: \"s3:\/\/user@pass:\/bucket\/destination.mp4\",\n\t\t\t\tPreset: types.Preset{Name: \"presetHere\"},\n\t\t\t\tStatus: types.JobCreated,\n\t\t\t\tDetails: \"\",\n\t\t\t\tLocalSource: \"notfound.mp4\",\n\t\t\t\tLocalDestination: \"anywhere\",\n\t\t\t}\n\t\t\tdbInstance.StoreJob(exampleJob)\n\n\t\t\tlib.FFMPEGEncode(exampleJob.ID)\n\t\t\tchangedJob, _ := dbInstance.RetrieveJob(\"123\")\n\n\t\t\tExpect(changedJob.Status).To(Equal(types.JobError))\n\t\t\tExpect(changedJob.Details).To(Equal(\"Error opening input 'notfound.mp4': No such file or directory\"))\n\t\t})\n\n\t\tIt(\"Should change job status and details if output path doesn't exists\", func() {\n\t\t\tprojectPath, _ := os.Getwd()\n\t\t\texampleJob := types.Job{\n\t\t\t\tID: \"123\",\n\t\t\t\tSource: \"http:\/\/source.here.mp4\",\n\t\t\t\tDestination: \"s3:\/\/user@pass:\/bucket\/destination.mp4\",\n\t\t\t\tPreset: types.Preset{Name: \"presetHere\"},\n\t\t\t\tStatus: types.JobCreated,\n\t\t\t\tDetails: \"\",\n\t\t\t\tLocalSource: projectPath + \"\/videos\/comingsoon.mov\",\n\t\t\t\tLocalDestination: \"\/nowhere\",\n\t\t\t}\n\n\t\t\tdbInstance.StoreJob(exampleJob)\n\n\t\t\tlib.FFMPEGEncode(exampleJob.ID)\n\t\t\tchangedJob, _ := dbInstance.RetrieveJob(\"123\")\n\n\t\t\tExpect(changedJob.Status).To(Equal(types.JobError))\n\t\t\tExpect(changedJob.Details).To(Equal(\"output format is not initialized. Unable to allocate context\"))\n\t\t})\n\n\t\tIt(\"Should change job status and details when encoding\", func() {\n\t\t\tprojectPath, _ := os.Getwd()\n\t\t\tswapDir, _ := cfg.GetString(\"SWAP_DIRECTORY\", \"\")\n\t\t\texampleJob := types.Job{\n\t\t\t\tID: \"123\",\n\t\t\t\tSource: \"http:\/\/source.here.mp4\",\n\t\t\t\tDestination: \"s3:\/\/user@pass:\/bucket\/destination.mp4\",\n\t\t\t\tPreset: types.Preset{Name: \"presetHere\"},\n\t\t\t\tStatus: types.JobCreated,\n\t\t\t\tDetails: \"\",\n\t\t\t\tLocalSource: projectPath + \"\/videos\/nyt.mp4\",\n\t\t\t\tLocalDestination: swapDir + \"\/output.mp4\",\n\t\t\t}\n\n\t\t\tdbInstance.StoreJob(exampleJob)\n\n\t\t\tlib.FFMPEGEncode(exampleJob.ID)\n\t\t\tchangedJob, _ := dbInstance.RetrieveJob(\"123\")\n\n\t\t\tExpect(changedJob.Details).To(Equal(\"0%\"))\n\t\t\tExpect(changedJob.Status).To(Equal(types.JobEncoding))\n\t\t})\n\t})\n\n\tContext(\"S3 Uploader\", func() {\n\t\tvar (\n\t\t\tdbInstance db.DatabaseInterface\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tdbInstance, _ = db.GetDatabase()\n\t\t\tdbInstance.ClearDatabase()\n\t\t})\n\n\t\tIt(\"Should get bucket from URL Destination\", func() {\n\t\t\tdestination := \"http:\/\/AWSKEY:AWSSECRET@BUCKET.s3.amazonaws.com\/OBJECT\"\n\t\t\tbucket, _ := lib.GetAWSBucket(destination)\n\t\t\tExpect(bucket).To(Equal(\"BUCKET\"))\n\t\t})\n\n\t\tIt(\"Should set credentials from URL Destination\", func() {\n\t\t\tdestination := \"http:\/\/AWSKEY:AWSSECRET@BUCKET.s3.amazonaws.com\/OBJECT\"\n\t\t\tlib.SetAWSCredentials(destination)\n\t\t\tExpect(os.Getenv(\"AWS_ACCESS_KEY_ID\")).To(Equal(\"AWSKEY\"))\n\t\t\tExpect(os.Getenv(\"AWS_SECRET_ACCESS_KEY\")).To(Equal(\"AWSSECRET\"))\n\t\t})\n\n\t\tIt(\"Should get path and filename from URL Destination\", func() {\n\t\t\tdestination := \"http:\/\/AWSKEY:AWSSECRET@BUCKET.s3.amazonaws.com\/OBJECT\/HERE.mp4\"\n\t\t\tkey, _ := lib.GetAWSKey(destination)\n\t\t\tExpect(key).To(Equal(\"\/OBJECT\/HERE.mp4\"))\n\t\t})\n\t})\n})\n<commit_msg>tests: fix encoding status test<commit_after>package snickers_test\n\nimport (\n\t\"os\"\n\n\t\"github.com\/flavioribeiro\/gonfig\"\n\t\"github.com\/flavioribeiro\/snickers\/db\"\n\t\"github.com\/flavioribeiro\/snickers\/lib\"\n\t\"github.com\/flavioribeiro\/snickers\/types\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Library\", func() {\n\tContext(\"HTTP Downloader\", func() {\n\t\tvar (\n\t\t\tdbInstance db.DatabaseInterface\n\t\t\tcfg gonfig.Gonfig\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tdbInstance, _ = db.GetDatabase()\n\t\t\tdbInstance.ClearDatabase()\n\t\t\tcfg, _ = gonfig.FromJsonFile(\"..\/config.json\")\n\t\t})\n\n\t\tIt(\"Should change job status and details on error\", func() {\n\t\t\texampleJob := types.Job{\n\t\t\t\tID: \"123\",\n\t\t\t\tSource: \"http:\/\/source.here.mp4\",\n\t\t\t\tDestination: \"s3:\/\/user@pass:\/bucket\/destination.mp4\",\n\t\t\t\tPreset: types.Preset{Name: \"presetHere\"},\n\t\t\t\tStatus: types.JobCreated,\n\t\t\t\tDetails: \"\",\n\t\t\t}\n\t\t\tdbInstance.StoreJob(exampleJob)\n\n\t\t\tlib.HTTPDownload(exampleJob.ID)\n\t\t\tchangedJob, _ := dbInstance.RetrieveJob(\"123\")\n\n\t\t\tExpect(changedJob.Status).To(Equal(types.JobError))\n\t\t\tExpect(changedJob.Details).To(SatisfyAny(ContainSubstring(\"no such host\"), ContainSubstring(\"No filename could be determined\")))\n\t\t})\n\n\t\tIt(\"Should set the local source and local destination on Job\", func() {\n\t\t\texampleJob := types.Job{\n\t\t\t\tID: \"123\",\n\t\t\t\tSource: \"http:\/\/flv.io\/source_here.mp4\",\n\t\t\t\tDestination: \"s3:\/\/user@pass:\/bucket\/destination.mp4\",\n\t\t\t\tPreset: types.Preset{Name: \"presetHere\"},\n\t\t\t\tStatus: types.JobCreated,\n\t\t\t\tDetails: \"\",\n\t\t\t}\n\t\t\tdbInstance.StoreJob(exampleJob)\n\n\t\t\tlib.HTTPDownload(exampleJob.ID)\n\t\t\tchangedJob, _ := dbInstance.RetrieveJob(\"123\")\n\n\t\t\tswapDir, _ := cfg.GetString(\"SWAP_DIRECTORY\", \"\")\n\t\t\tsourceExpected := swapDir + \"123\/src\/source_here.mp4\"\n\t\t\tExpect(changedJob.LocalSource).To(Equal(sourceExpected))\n\n\t\t\tdestinationExpected := swapDir + \"123\/dst\/source_here.mp4\"\n\t\t\tExpect(changedJob.LocalDestination).To(Equal(destinationExpected))\n\t\t})\n\t})\n\n\tContext(\"FFMPEG Encoder\", func() {\n\t\tvar (\n\t\t\tdbInstance db.DatabaseInterface\n\t\t\tcfg gonfig.Gonfig\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tdbInstance, _ = db.GetDatabase()\n\t\t\tdbInstance.ClearDatabase()\n\t\t\tcfg, _ = gonfig.FromJsonFile(\"..\/config.json\")\n\t\t})\n\n\t\tIt(\"Should change job status and details if input is not found\", func() {\n\t\t\texampleJob := types.Job{\n\t\t\t\tID: \"123\",\n\t\t\t\tSource: \"http:\/\/source.here.mp4\",\n\t\t\t\tDestination: \"s3:\/\/user@pass:\/bucket\/destination.mp4\",\n\t\t\t\tPreset: types.Preset{Name: \"presetHere\"},\n\t\t\t\tStatus: types.JobCreated,\n\t\t\t\tDetails: \"\",\n\t\t\t\tLocalSource: \"notfound.mp4\",\n\t\t\t\tLocalDestination: \"anywhere\",\n\t\t\t}\n\t\t\tdbInstance.StoreJob(exampleJob)\n\n\t\t\tlib.FFMPEGEncode(exampleJob.ID)\n\t\t\tchangedJob, _ := dbInstance.RetrieveJob(\"123\")\n\n\t\t\tExpect(changedJob.Status).To(Equal(types.JobError))\n\t\t\tExpect(changedJob.Details).To(Equal(\"Error opening input 'notfound.mp4': No such file or directory\"))\n\t\t})\n\n\t\tIt(\"Should change job status and details if output path doesn't exists\", func() {\n\t\t\tprojectPath, _ := os.Getwd()\n\t\t\texampleJob := types.Job{\n\t\t\t\tID: \"123\",\n\t\t\t\tSource: \"http:\/\/source.here.mp4\",\n\t\t\t\tDestination: \"s3:\/\/user@pass:\/bucket\/destination.mp4\",\n\t\t\t\tPreset: types.Preset{Name: \"presetHere\"},\n\t\t\t\tStatus: types.JobCreated,\n\t\t\t\tDetails: \"\",\n\t\t\t\tLocalSource: projectPath + \"\/videos\/comingsoon.mov\",\n\t\t\t\tLocalDestination: \"\/nowhere\",\n\t\t\t}\n\n\t\t\tdbInstance.StoreJob(exampleJob)\n\n\t\t\tlib.FFMPEGEncode(exampleJob.ID)\n\t\t\tchangedJob, _ := dbInstance.RetrieveJob(\"123\")\n\n\t\t\tExpect(changedJob.Status).To(Equal(types.JobError))\n\t\t\tExpect(changedJob.Details).To(Equal(\"output format is not initialized. Unable to allocate context\"))\n\t\t})\n\n\t\tIt(\"Should change job status and details when encoding\", func() {\n\t\t\tprojectPath, _ := os.Getwd()\n\t\t\tswapDir, _ := cfg.GetString(\"SWAP_DIRECTORY\", \"\")\n\t\t\texampleJob := types.Job{\n\t\t\t\tID: \"123\",\n\t\t\t\tSource: \"http:\/\/source.here.mp4\",\n\t\t\t\tDestination: \"s3:\/\/user@pass:\/bucket\/destination.mp4\",\n\t\t\t\tPreset: types.Preset{Name: \"presetHere\"},\n\t\t\t\tStatus: types.JobCreated,\n\t\t\t\tDetails: \"\",\n\t\t\t\tLocalSource: projectPath + \"\/videos\/nyt.mp4\",\n\t\t\t\tLocalDestination: swapDir + \"\/output.mp4\",\n\t\t\t}\n\n\t\t\tdbInstance.StoreJob(exampleJob)\n\n\t\t\tlib.FFMPEGEncode(exampleJob.ID)\n\t\t\tchangedJob, _ := dbInstance.RetrieveJob(\"123\")\n\n\t\t\tExpect(changedJob.Details).To(Equal(\"100\"))\n\t\t\tExpect(changedJob.Status).To(Equal(types.JobEncoding))\n\t\t})\n\t})\n\n\tContext(\"S3 Uploader\", func() {\n\t\tvar (\n\t\t\tdbInstance db.DatabaseInterface\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tdbInstance, _ = db.GetDatabase()\n\t\t\tdbInstance.ClearDatabase()\n\t\t})\n\n\t\tIt(\"Should get bucket from URL Destination\", func() {\n\t\t\tdestination := \"http:\/\/AWSKEY:AWSSECRET@BUCKET.s3.amazonaws.com\/OBJECT\"\n\t\t\tbucket, _ := lib.GetAWSBucket(destination)\n\t\t\tExpect(bucket).To(Equal(\"BUCKET\"))\n\t\t})\n\n\t\tIt(\"Should set credentials from URL Destination\", func() {\n\t\t\tdestination := \"http:\/\/AWSKEY:AWSSECRET@BUCKET.s3.amazonaws.com\/OBJECT\"\n\t\t\tlib.SetAWSCredentials(destination)\n\t\t\tExpect(os.Getenv(\"AWS_ACCESS_KEY_ID\")).To(Equal(\"AWSKEY\"))\n\t\t\tExpect(os.Getenv(\"AWS_SECRET_ACCESS_KEY\")).To(Equal(\"AWSSECRET\"))\n\t\t})\n\n\t\tIt(\"Should get path and filename from URL Destination\", func() {\n\t\t\tdestination := \"http:\/\/AWSKEY:AWSSECRET@BUCKET.s3.amazonaws.com\/OBJECT\/HERE.mp4\"\n\t\t\tkey, _ := lib.GetAWSKey(destination)\n\t\t\tExpect(key).To(Equal(\"\/OBJECT\/HERE.mp4\"))\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/* +build cgo *\/\n\npackage gmssl\n\n\/*\n#include <openssl\/hmac.h>\n#include <openssl\/cmac.h>\n*\/\nimport \"C\"\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"unsafe\"\n)\n\nfunc GetMacs(aliases bool) []string {\n\treturn []string{\"hello\", \"world\"}\n}\n\nfunc GetMacKeyLength(name string) (int, error) {\n\treturn 0, nil\n}\n\nfunc GetMacLength(name string) (int, error) {\n\treturn 0, nil\n}\n\ntype MACContext struct {\n\thctx *C.HMAC_CTX\n}\n\nfunc NewMACContext(name string, args map[string]string, key []byte) (*MACContext, error) {\n\tcname := C.CString(name)\n\tdefer C.free(unsafe.Pointer(cname))\n\tmd := C.EVP_get_digestbyname(cname)\n\tif md == nil {\n\t\treturn nil, fmt.Errorf(\"shit\")\n\t}\n\n\tctx := C.HMAC_CTX_new()\n\tif ctx == nil {\n\t\treturn nil, fmt.Errorf(\"shit\")\n\t}\n\n\tret := &MACContext{ctx}\n\truntime.SetFinalizer(ret, func(ret *MACContext) {\n\t\tC.HMAC_CTX_free(ret.hctx)\n\t})\n\n\tif 1 != C.HMAC_Init_ex(ctx, unsafe.Pointer(&key[0]), C.int(len(key)), md, nil) {\n\t\treturn nil, fmt.Errorf(\"shit\")\n\t}\n\n\treturn ret, nil\n}\n\nfunc (ctx *MACContext) Update(data []byte) error {\n\tfmt.Printf(\"byte: %x \\n\", data)\n\tif len(data) == 0 {\n\t\tfmt.Println(\"len(data) ==0\")\n\t\treturn nil\n\t}\n\t\n\tfmt.Println(unsafe.Pointer(&data[0]))\n\tfmt.Println( C.size_t(len(data)) )\n\t\n\tfmt.Printf(\"ctx: %+v \\n\", ctx)\n\t\n\tfmt.Println(\"call C.HMAC_Update\")\n\t\n\tif 1 != C.HMAC_Update(ctx.hctx, (*C.uchar)(unsafe.Pointer(&data[0])), C.size_t(len(data))) {\n\t\tfmt.Println(\"errors...\")\n\t\treturn errors.New(\"hello\")\n\t}\n\tfmt.Println(\"return nil\")\n\treturn nil\n}\n\nfunc (ctx *MACContext) Final() ([]byte, error) {\n\toutbuf := make([]byte, 64)\n\toutlen := C.uint(len(outbuf))\n\tif 1 != C.HMAC_Final(ctx.hctx, (*C.uchar)(unsafe.Pointer(&outbuf[0])), &outlen) {\n\t\treturn nil, errors.New(\"error\")\n\t}\n\treturn outbuf[:outlen], nil\n}\n<commit_msg>Delete mac.go<commit_after><|endoftext|>"} {"text":"<commit_before>package metrics\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/url\"\n\t\"strconv\"\n\n\ttypes \"github.com\/openfaas\/faas-provider\/types\"\n)\n\n\/\/ AddMetricsHandler wraps a http.HandlerFunc with Prometheus metrics\nfunc AddMetricsHandler(handler http.HandlerFunc, prometheusQuery PrometheusQueryFetcher) http.HandlerFunc {\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\trecorder := httptest.NewRecorder()\n\t\thandler.ServeHTTP(recorder, r)\n\t\tupstreamCall := recorder.Result()\n\n\t\tif upstreamCall.Body == nil {\n\t\t\tlog.Println(\"Upstream call had empty body.\")\n\t\t\treturn\n\t\t}\n\n\t\tdefer upstreamCall.Body.Close()\n\t\tupstreamBody, _ := ioutil.ReadAll(upstreamCall.Body)\n\n\t\tif recorder.Code != http.StatusOK {\n\t\t\tlog.Printf(\"List functions responded with code %d, body: %s\",\n\t\t\t\trecorder.Code,\n\t\t\t\tstring(upstreamBody))\n\n\t\t\thttp.Error(w, \"Unexpected status code retriving functions from backend\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tvar functions []types.FunctionStatus\n\n\t\terr := json.Unmarshal(upstreamBody, &functions)\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Metrics upstream error: %s\", err)\n\n\t\t\thttp.Error(w, \"Error parsing metrics from upstream provider\/backend\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Ensure values are empty first.\n\t\tfor i := range functions {\n\t\t\tfunctions[i].InvocationCount = 0\n\t\t}\n\n\t\tif len(functions) > 0 {\n\n\t\t\tns := functions[0].Namespace\n\t\t\tq := fmt.Sprintf(`sum(gateway_function_invocation_total{function_name=~\".*.%s\"}) by (function_name)`, ns)\n\t\t\t\/\/ Restrict query results to only function names matching namespace suffix.\n\n\t\t\tresults, err := prometheusQuery.Fetch(url.QueryEscape(q))\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error querying Prometheus: %s\\n\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmixIn(&functions, results)\n\t\t}\n\n\t\tbytesOut, err := json.Marshal(functions)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error serializing functions: %s\", err)\n\t\t\thttp.Error(w, \"error writing response after adding metrics\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write(bytesOut)\n\t}\n}\n\nfunc mixIn(functions *[]types.FunctionStatus, metrics *VectorQueryResponse) {\n\n\tif functions == nil {\n\t\treturn\n\t}\n\n\tfor i, function := range *functions {\n\t\tfor _, v := range metrics.Data.Result {\n\n\t\t\tif v.Metric.FunctionName == fmt.Sprintf(\"%s.%s\", function.Name, function.Namespace) {\n\t\t\t\tmetricValue := v.Value[1]\n\t\t\t\tswitch value := metricValue.(type) {\n\t\t\t\tcase string:\n\t\t\t\t\tf, err := strconv.ParseFloat(value, 64)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Printf(\"add_metrics: unable to convert value %q for metric: %s\", value, err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\t(*functions)[i].InvocationCount += f\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>Fix typo in error message<commit_after>package metrics\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/url\"\n\t\"strconv\"\n\n\ttypes \"github.com\/openfaas\/faas-provider\/types\"\n)\n\n\/\/ AddMetricsHandler wraps a http.HandlerFunc with Prometheus metrics\nfunc AddMetricsHandler(handler http.HandlerFunc, prometheusQuery PrometheusQueryFetcher) http.HandlerFunc {\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\trecorder := httptest.NewRecorder()\n\t\thandler.ServeHTTP(recorder, r)\n\t\tupstreamCall := recorder.Result()\n\n\t\tif upstreamCall.Body == nil {\n\t\t\tlog.Println(\"Upstream call had empty body.\")\n\t\t\treturn\n\t\t}\n\n\t\tdefer upstreamCall.Body.Close()\n\t\tupstreamBody, _ := ioutil.ReadAll(upstreamCall.Body)\n\n\t\tif recorder.Code != http.StatusOK {\n\t\t\tlog.Printf(\"List functions responded with code %d, body: %s\",\n\t\t\t\trecorder.Code,\n\t\t\t\tstring(upstreamBody))\n\n\t\t\thttp.Error(w, \"Metrics hander: unexpected status code retrieving functions from backend\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tvar functions []types.FunctionStatus\n\n\t\terr := json.Unmarshal(upstreamBody, &functions)\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Metrics upstream error: %s\", err)\n\n\t\t\thttp.Error(w, \"Error parsing metrics from upstream provider\/backend\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Ensure values are empty first.\n\t\tfor i := range functions {\n\t\t\tfunctions[i].InvocationCount = 0\n\t\t}\n\n\t\tif len(functions) > 0 {\n\n\t\t\tns := functions[0].Namespace\n\t\t\tq := fmt.Sprintf(`sum(gateway_function_invocation_total{function_name=~\".*.%s\"}) by (function_name)`, ns)\n\t\t\t\/\/ Restrict query results to only function names matching namespace suffix.\n\n\t\t\tresults, err := prometheusQuery.Fetch(url.QueryEscape(q))\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error querying Prometheus: %s\\n\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmixIn(&functions, results)\n\t\t}\n\n\t\tbytesOut, err := json.Marshal(functions)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error serializing functions: %s\", err)\n\t\t\thttp.Error(w, \"error writing response after adding metrics\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write(bytesOut)\n\t}\n}\n\nfunc mixIn(functions *[]types.FunctionStatus, metrics *VectorQueryResponse) {\n\n\tif functions == nil {\n\t\treturn\n\t}\n\n\tfor i, function := range *functions {\n\t\tfor _, v := range metrics.Data.Result {\n\n\t\t\tif v.Metric.FunctionName == fmt.Sprintf(\"%s.%s\", function.Name, function.Namespace) {\n\t\t\t\tmetricValue := v.Value[1]\n\t\t\t\tswitch value := metricValue.(type) {\n\t\t\t\tcase string:\n\t\t\t\t\tf, err := strconv.ParseFloat(value, 64)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Printf(\"add_metrics: unable to convert value %q for metric: %s\", value, err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\t(*functions)[i].InvocationCount += f\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/go-martini\/martini\"\n\t\"net\/http\"\n \"io\"\n\t\"os\"\n)\n\nfunc main() {\n\tm := martini.Classic()\n\tm.Post(\"\/upload\", func(r *http.Request) (int, string) {\n\t\terr := r.ParseMultipartForm(100000)\n\t\tif err != nil {\n\t\t\treturn http.StatusInternalServerError, err.Error()\n\t\t}\n\t\tfiles := r.MultipartForm.File[\"files\"]\n\t\tfile, err := files[0].Open()\n\t\tdefer file.Close()\n\t\tif err != nil {\n\t\t\treturn http.StatusInternalServerError, err.Error()\n\t\t}\n\t\tdst, err := os.Create(\".\/uploads\/\" + files[0].Filename)\n\t\tdefer dst.Close()\n\t\tif err != nil {\n\t\t\treturn http.StatusInternalServerError, err.Error()\n\t\t}\n if _, err := io.Copy(dst, file); err != nil {\n\t\t\treturn http.StatusInternalServerError, err.Error()\n\t\t}\n\n\t\treturn 204, \"ok\"\n\t})\n\n\tm.Run()\n}\n<commit_msg>update go<commit_after>package main\n\nimport (\n\t\"github.com\/go-martini\/martini\"\n\t\"github.com\/nu7hatch\/gouuid\"\n\t\"encoding\/json\"\n\t\"net\/http\"\n \"io\"\n\t\"os\"\n)\n\nfunc main() {\n\tm := martini.Classic()\n\tm.Post(\"\/upload\", func(req *http.Request) (int, string) {\n\t\terr := req.ParseMultipartForm(100000)\n\t\tif err != nil {\n\t\t\treturn http.StatusInternalServerError, err.Error()\n\t\t}\n\t\tfiles := req.MultipartForm.File[\"files\"]\n\t\tfile, err := files[0].Open()\n\t\tdefer file.Close()\n\t\tif err != nil {\n\t\t\treturn http.StatusInternalServerError, err.Error()\n\t\t}\n\t\tfileName, err := uuid.NewV4()\n\t\tdst, err := os.Create(\".\/uploads\/\" + fileName.String())\n\t\tdefer dst.Close()\n\t\tif err != nil {\n\t\t\treturn http.StatusInternalServerError, err.Error()\n\t\t}\n if _, err := io.Copy(dst, file); err != nil {\n\t\t\treturn http.StatusInternalServerError, err.Error()\n\t\t}\n\t\tjsonResponse, err := json.Marshal(map[string]string{\"file_name\": fileName.String()})\n\t\treturn 200,string(jsonResponse)\n\t})\n\tm.Run()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\n\t\"golang.org\/x\/crypto\/openpgp\/armor\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/rakoo\/goax\"\n)\n\nfunc send(peer string) {\n\tr, err := openRatchet(peer)\n\tif err != nil {\n\t\tif err == errNoRatchet {\n\t\t\tfmt.Printf(\"No ratchet for %s, please send this to the peer and \\\"receive\\\" what they send you back\", peer)\n\t\t\tfmt.Println(\"\\n\")\n\t\t\tsendRatchet(peer)\n\t\t\tfmt.Println(\"\")\n\t\t\tos.Exit(0)\n\t\t} else {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tmsg, err := ioutil.ReadAll(os.Stdin)\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't read all stdin\")\n\t}\n\tcipherText := r.Encrypt(msg)\n\tif err := saveRatchet(r, peer); err != nil {\n\t\tlog.Println(\"Couldn't save ratchet:\", err)\n\t\tos.Remove(path.Join(\"ratchets\", hex.EncodeToString([]byte(peer))))\n\t\tos.Exit(1)\n\t}\n\n\tlog.Println(len(cipherText))\n}\n\nvar errNoRatchet = errors.New(\"No ratchet\")\n\nvar errInvalidRatchet = errors.New(\"Invalid ratchet\")\n\nfunc openRatchet(peer string) (r *goax.Ratchet, err error) {\n\tf, err := os.Open(path.Join(\"ratchets\", hex.EncodeToString([]byte(peer))))\n\tif err != nil {\n\t\treturn nil, errNoRatchet\n\t}\n\tdefer f.Close()\n\n\tr = new(goax.Ratchet)\n\tarmorDecoder, err := armor.Decode(f)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Error opening decoder\")\n\t}\n\terr = json.NewDecoder(armorDecoder.Body).Decode(r)\n\tif err != nil {\n\t\treturn nil, errInvalidRatchet\n\t}\n\n\treturn r, nil\n}\n\nfunc createRatchet(peer string) (r *goax.Ratchet, err error) {\n\tmyIdentityKeyPrivate := getPrivateKey()\n\tvar asArray [32]byte\n\tcopy(asArray[:], myIdentityKeyPrivate)\n\tr = goax.New(rand.Reader, asArray)\n\terr = saveRatchet(r, peer)\n\treturn r, err\n}\n\nfunc saveRatchet(r *goax.Ratchet, peer string) error {\n\tos.MkdirAll(\"ratchets\", 0755)\n\tf, err := os.Create(path.Join(\"ratchets\", hex.EncodeToString([]byte(peer))))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Couldn't create ratchet file\")\n\t}\n\tdefer f.Close()\n\n\tarmorEncoder, err := armor.Encode(f, \"GOAX RATCHET\", nil)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Couldn't create armor encoder\")\n\t}\n\terr = json.NewEncoder(armorEncoder).Encode(r)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Couldn't marshall ratchet\")\n\t}\n\terr = armorEncoder.Close()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Couldn't close armor encoder\")\n\t}\n\treturn nil\n}\n\nfunc sendRatchet(peer string) {\n\tr, err := createRatchet(peer)\n\tif err != nil {\n\t\tlog.Fatalf(\"Couldn't create ratchet for %s: %s\", peer, err)\n\t}\n\terr = saveRatchet(r, peer)\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't save ratchet, will have to try another time\", err)\n\t}\n\tkx, err := r.GetKeyExchangeMaterial()\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't get key exchange material\", err)\n\t}\n\tencoder, err := armor.Encode(os.Stdout, \"KEY EXCHANGE MATERIAL\", nil)\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't get armor encoder\")\n\t}\n\n\tjson.NewEncoder(encoder).Encode(kx)\n\tencoder.Close()\n}\n<commit_msg>Print encrypted message in armor encoding<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/rand\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\n\t\"golang.org\/x\/crypto\/openpgp\/armor\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/rakoo\/goax\"\n)\n\nfunc send(peer string) {\n\tr, err := openRatchet(peer)\n\tif err != nil {\n\t\tif err == errNoRatchet {\n\t\t\tfmt.Printf(\"No ratchet for %s, please send this to the peer and \\\"receive\\\" what they send you back\", peer)\n\t\t\tfmt.Println(\"\\n\")\n\t\t\tsendRatchet(peer)\n\t\t\tfmt.Println(\"\")\n\t\t\tos.Exit(0)\n\t\t} else {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tmsg, err := ioutil.ReadAll(os.Stdin)\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't read all stdin\")\n\t}\n\tcipherText := r.Encrypt(msg)\n\tif err := saveRatchet(r, peer); err != nil {\n\t\tlog.Println(\"Couldn't save ratchet:\", err)\n\t\tos.Remove(path.Join(\"ratchets\", hex.EncodeToString([]byte(peer))))\n\t\tos.Exit(1)\n\t}\n\n\tencoder, err := armor.Encode(os.Stdout, \"GOAX ENCRYPTED MESSAGE\", nil)\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't create armor encoder: \", err)\n\t}\n\n\tio.Copy(encoder, bytes.NewReader(cipherText))\n\tencoder.Close()\n\tfmt.Println(\"\")\n}\n\nvar errNoRatchet = errors.New(\"No ratchet\")\n\nvar errInvalidRatchet = errors.New(\"Invalid ratchet\")\n\nfunc openRatchet(peer string) (r *goax.Ratchet, err error) {\n\tf, err := os.Open(path.Join(\"ratchets\", hex.EncodeToString([]byte(peer))))\n\tif err != nil {\n\t\treturn nil, errNoRatchet\n\t}\n\tdefer f.Close()\n\n\tvar q goax.Ratchet\n\tarmorDecoder, err := armor.Decode(f)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Error opening decoder\")\n\t}\n\terr = json.NewDecoder(armorDecoder.Body).Decode(&q)\n\tif err != nil {\n\t\treturn nil, errInvalidRatchet\n\t}\n\n\treturn &q, nil\n}\n\nfunc createRatchet(peer string) (r *goax.Ratchet, err error) {\n\tmyIdentityKeyPrivate := getPrivateKey()\n\tvar asArray [32]byte\n\tcopy(asArray[:], myIdentityKeyPrivate)\n\tr = goax.New(rand.Reader, asArray)\n\terr = saveRatchet(r, peer)\n\treturn r, err\n}\n\nfunc saveRatchet(r *goax.Ratchet, peer string) error {\n\tos.MkdirAll(\"ratchets\", 0755)\n\tf, err := os.Create(path.Join(\"ratchets\", hex.EncodeToString([]byte(peer))))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Couldn't create ratchet file\")\n\t}\n\tdefer f.Close()\n\n\tarmorEncoder, err := armor.Encode(f, \"GOAX RATCHET\", nil)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Couldn't create armor encoder\")\n\t}\n\terr = json.NewEncoder(armorEncoder).Encode(r)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Couldn't marshall ratchet\")\n\t}\n\terr = armorEncoder.Close()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Couldn't close armor encoder\")\n\t}\n\treturn nil\n}\n\nfunc sendRatchet(peer string) {\n\tr, err := createRatchet(peer)\n\tif err != nil {\n\t\tlog.Fatalf(\"Couldn't create ratchet for %s: %s\", peer, err)\n\t}\n\terr = saveRatchet(r, peer)\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't save ratchet, will have to try another time\", err)\n\t}\n\tkx, err := r.GetKeyExchangeMaterial()\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't get key exchange material\", err)\n\t}\n\tencoder, err := armor.Encode(os.Stdout, \"KEY EXCHANGE MATERIAL\", nil)\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't get armor encoder\")\n\t}\n\n\tjson.NewEncoder(encoder).Encode(kx)\n\tencoder.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/csv\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/yhat\/gobenchdb\/benchdb\"\n)\n\nvar (\n\tconn = flag.String(\"conn\", \"\", \"postgres database connection string\")\n\ttable = flag.String(\"table\", \"\", \"postgres table name\")\n\ttestBench = flag.String(\"test.bench\", \".\",\n\t\t\"run only those benchmarks matching the regular expression\")\n)\n\nconst Postgres = \"postgres\"\n\nvar usage = `Usage: gobenchdb [options...]\n\nOptions:\n -conn postgres database connection string\n -table postgres table name\n -test.bench run only those benchmarks matching the regular expression`\n\nfunc main() {\n\t\/\/ Parse command line args\n\tflag.Parse()\n\tc := *conn\n\tt := *table\n\ttregex := *testBench\n\n\tif c == \"\" {\n\t\tUsageExit(\"database conn must be specified\")\n\n\t}\n\tif t == \"\" {\n\t\tUsageExit(\"database table must be specified\")\n\t}\n\n\t\/\/ write data in csv format to stdout\n\t\/\/ TODO: convert this to write to a sql db\n\twriter := csv.NewWriter(os.Stdout)\n\tdefer writer.Flush()\n\t_, err := (&benchdb.BenchDB{\n\t\tRegex: tregex,\n\t\tDriver: Postgres,\n\t\tConnStr: c,\n\t\tTableName: t,\n\t\tCsvWriter: *writer,\n\t}).Run()\n\tif err != nil {\n\t\tUsageExit(err.Error())\n\t}\n}\n\nfunc UsageExit(message string) {\n\tif message != \"\" {\n\t\tfmt.Fprintf(os.Stderr, message)\n\t\tfmt.Fprintf(os.Stderr, \"\\n\")\n\t}\n\tfmt.Fprintf(os.Stderr, usage)\n\tfmt.Fprintf(os.Stderr, \"\\n\")\n\tos.Exit(1)\n}\n<commit_msg>no longer writing usage to stderr when there is an error<commit_after>package main\n\nimport (\n\t\"encoding\/csv\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/yhat\/gobenchdb\/benchdb\"\n)\n\nvar (\n\tconn = flag.String(\"conn\", \"\", \"postgres database connection string\")\n\ttable = flag.String(\"table\", \"\", \"postgres table name\")\n\ttestBench = flag.String(\"test.bench\", \".\",\n\t\t\"run only those benchmarks matching the regular expression\")\n)\n\nconst Postgres = \"postgres\"\n\nvar usage = `Usage: gobenchdb [options...]\n\nOptions:\n -conn postgres database connection string\n -table postgres table name\n -test.bench run only those benchmarks matching the regular expression`\n\nfunc main() {\n\t\/\/ Parse command line args\n\tflag.Parse()\n\tc := *conn\n\tt := *table\n\ttregex := *testBench\n\n\tif c == \"\" {\n\t\tUsageExit(\"database conn must be specified\")\n\n\t}\n\tif t == \"\" {\n\t\tUsageExit(\"database table must be specified\")\n\t}\n\n\t\/\/ write data in csv format to stdout\n\t\/\/ TODO: convert this to write to a sql db\n\twriter := csv.NewWriter(os.Stdout)\n\tdefer writer.Flush()\n\t_, err := (&benchdb.BenchDB{\n\t\tRegex: tregex,\n\t\tDriver: Postgres,\n\t\tConnStr: c,\n\t\tTableName: t,\n\t\tCsvWriter: *writer,\n\t}).Run()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n}\n\nfunc UsageExit(message string) {\n\tif message != \"\" {\n\t\tfmt.Fprintf(os.Stderr, message)\n\t\tfmt.Fprintf(os.Stderr, \"\\n\")\n\t}\n\tfmt.Fprintf(os.Stderr, usage)\n\tfmt.Fprintf(os.Stderr, \"\\n\")\n\tos.Exit(1)\n}\n<|endoftext|>"} {"text":"<commit_before>package datastore\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/cloud\/datastore\"\n\n\tgcutil \"github.com\/nyaxt\/otaru\/gcloud\/util\"\n\t\"github.com\/nyaxt\/otaru\/logger\"\n)\n\nvar lklog = logger.Registry().Category(\"globallock\")\n\nconst kindGlobalLock = \"OtaruGlobalLock\"\n\ntype lockEntry struct {\n\tCreatedAt time.Time `datastore:,noindex`\n\tHostName string `datastore:,noindex`\n\tInfo string `datastore:,noindex`\n}\n\ntype GlobalLocker struct {\n\tcfg *Config\n\trootKey *datastore.Key\n\tlockEntryKey *datastore.Key\n\n\tlockEntry\n}\n\nfunc NewGlobalLocker(cfg *Config, hostname string, info string) *GlobalLocker {\n\tl := &GlobalLocker{\n\t\tcfg: cfg,\n\t\trootKey: datastore.NewKey(ctxNoNamespace, kindGlobalLock, cfg.rootKeyStr, 0, nil),\n\t\tlockEntry: lockEntry{\n\t\t\tHostName: hostname,\n\t\t\tInfo: info,\n\t\t},\n\t}\n\tl.lockEntryKey = datastore.NewKey(ctxNoNamespace, kindGlobalLock, \"\", 1, l.rootKey)\n\treturn l\n}\n\ntype ErrLockTaken struct {\n\tCreatedAt time.Time\n\tHostName string\n\tInfo string\n}\n\nvar _ = error(&ErrLockTaken{})\n\nfunc (e *ErrLockTaken) Error() string {\n\treturn fmt.Sprintf(\"GlobalLock is taken by host \\\"%s\\\" at %s. Info: %s\", e.HostName, e.CreatedAt, e.Info)\n}\n\n\/\/ Lock attempts to acquire the global lock.\n\/\/ If the lock was already taken by other GlobalLocker instance, it will return an ErrLockTaken.\nfunc (l *GlobalLocker) tryLockOnce() error {\n\tstart := time.Now()\n\tcli, err := l.cfg.getClient(context.Background())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdstx, err := cli.NewTransaction(context.Background(), datastore.Serializable)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar e lockEntry\n\tif err := dstx.Get(l.lockEntryKey, &e); err != datastore.ErrNoSuchEntity {\n\t\tdstx.Rollback()\n\t\tif err == nil {\n\t\t\treturn &ErrLockTaken{CreatedAt: e.CreatedAt, HostName: e.HostName, Info: e.Info}\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tl.lockEntry.CreatedAt = start\n\tif _, err := dstx.Put(l.lockEntryKey, &l.lockEntry); err != nil {\n\t\tdstx.Rollback()\n\t\treturn err\n\t}\n\tif _, err := dstx.Commit(); err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Infof(lklog, \"GlobalLocker.tryLockOnce(%+v) took %s.\", l.lockEntry, time.Since(start))\n\treturn nil\n}\n\nfunc (l *GlobalLocker) Lock() (err error) {\n\treturn gcutil.RetryIfNeeded(func() error {\n\t\treturn l.tryLockOnce()\n\t}, lklog)\n}\n\n\/\/ ForceUnlock releases the global lock entry forcibly, even if it was held by other GlobalLocker instance.\n\/\/ If there was no lock, ForceUnlock will log an warning, but return no error.\nfunc (l *GlobalLocker) forceUnlockOnce() error {\n\tstart := time.Now()\n\tcli, err := l.cfg.getClient(context.Background())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdstx, err := cli.NewTransaction(context.Background(), datastore.Serializable)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar e lockEntry\n\tif err := dstx.Get(l.lockEntryKey, &e); err != nil {\n\t\tlogger.Warningf(lklog, \"GlobalLocker.ForceUnlock(): Force unlocking existing lock entry: %+v\", e)\n\t}\n\tif err := dstx.Delete(l.lockEntryKey); err != nil {\n\t\tdstx.Rollback()\n\t\tif err == datastore.ErrNoSuchEntity {\n\t\t\tlogger.Warningf(lklog, \"GlobalLocker.ForceUnlock(): Warning: There was no global lock taken.\")\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\tif _, err := dstx.Commit(); err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Infof(lklog, \"GlobalLocker.ForceUnlock() took %s.\", time.Since(start))\n\treturn nil\n}\n\nfunc (l *GlobalLocker) ForceUnlock() error {\n\treturn gcutil.RetryIfNeeded(func() error {\n\t\treturn l.forceUnlockOnce()\n\t}, lklog)\n}\n\nvar ErrNoLock = errors.New(\"Attempted unlock, but couldn't find any lock entry.\")\n\nfunc closeEnough(a, b time.Time) bool {\n\treturn math.Abs(a.Sub(b).Seconds()) < 2\n}\n\nconst (\n\tcheckCreatedAt = false\n\tignoreCreatedAt = true\n)\n\n\/\/ Unlock releases the global lock previously taken by this GlobalLocker.\n\/\/ If the lock was taken by other GlobalLocker, Unlock will fail with ErrLockTaken.\n\/\/ If there was no lock, Unlock will fail with ErrNoLock.\nfunc (l *GlobalLocker) Unlock() error {\n\treturn l.unlockInternal(checkCreatedAt)\n}\n\nfunc (l *GlobalLocker) UnlockIgnoreCreatedAt() error {\n\treturn l.unlockInternal(ignoreCreatedAt)\n}\n\nfunc checkLock(a, b lockEntry, checkCreatedAtFlag bool) bool {\n\tif checkCreatedAt && !closeEnough(a.CreatedAt, b.CreatedAt) {\n\t\treturn false\n\t}\n\tif a.HostName != b.HostName {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (l *GlobalLocker) unlockInternalOnce(checkCreatedAtFlag bool) error {\n\tstart := time.Now()\n\n\tcli, err := l.cfg.getClient(context.Background())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdstx, err := cli.NewTransaction(context.Background(), datastore.Serializable)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar e lockEntry\n\tif err := dstx.Get(l.lockEntryKey, &e); err != nil {\n\t\tdstx.Rollback()\n\t\tif err == datastore.ErrNoSuchEntity {\n\t\t\treturn ErrNoLock\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\tif !checkLock(l.lockEntry, e, checkCreatedAtFlag) {\n\t\tdstx.Rollback()\n\t\treturn &ErrLockTaken{CreatedAt: e.CreatedAt, HostName: e.HostName, Info: e.Info}\n\t}\n\tif err := dstx.Delete(l.lockEntryKey); err != nil {\n\t\tdstx.Rollback()\n\t\treturn err\n\t}\n\n\tif _, err := dstx.Commit(); err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Infof(lklog, \"GlobalLocker.Unlock(%+v) took %s.\", l.lockEntry, time.Since(start))\n\treturn nil\n}\n\nfunc (l *GlobalLocker) unlockInternal(checkCreatedAtFlag bool) error {\n\treturn gcutil.RetryIfNeeded(func() error {\n\t\treturn l.unlockInternalOnce(checkCreatedAtFlag)\n\t}, lklog)\n}\n\nfunc (l *GlobalLocker) tryQueryOnce() (lockEntry, error) {\n\tvar e lockEntry\n\tstart := time.Now()\n\n\tcli, err := l.cfg.getClient(context.Background())\n\tif err != nil {\n\t\treturn e, err\n\t}\n\tdstx, err := cli.NewTransaction(context.Background(), datastore.Serializable)\n\tif err != nil {\n\t\treturn e, err\n\t}\n\tdefer dstx.Rollback()\n\n\tif err := dstx.Get(l.lockEntryKey, &e); err != nil {\n\t\tif err == datastore.ErrNoSuchEntity {\n\t\t\treturn e, ErrNoLock\n\t\t} else {\n\t\t\treturn e, err\n\t\t}\n\t}\n\n\tlogger.Infof(lklog, \"GlobalLocker.Query() took %s.\", time.Since(start))\n\treturn e, nil\n}\n\nfunc (l *GlobalLocker) Query() (le lockEntry, err error) {\n\terr = gcutil.RetryIfNeeded(func() error {\n\t\tle, err = l.tryQueryOnce()\n\t\treturn err\n\t}, lklog)\n\treturn\n}\n<commit_msg>datastore ops start log<commit_after>package datastore\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/cloud\/datastore\"\n\n\tgcutil \"github.com\/nyaxt\/otaru\/gcloud\/util\"\n\t\"github.com\/nyaxt\/otaru\/logger\"\n)\n\nvar lklog = logger.Registry().Category(\"globallock\")\n\nconst kindGlobalLock = \"OtaruGlobalLock\"\n\ntype lockEntry struct {\n\tCreatedAt time.Time `datastore:,noindex`\n\tHostName string `datastore:,noindex`\n\tInfo string `datastore:,noindex`\n}\n\ntype GlobalLocker struct {\n\tcfg *Config\n\trootKey *datastore.Key\n\tlockEntryKey *datastore.Key\n\n\tlockEntry\n}\n\nfunc NewGlobalLocker(cfg *Config, hostname string, info string) *GlobalLocker {\n\tl := &GlobalLocker{\n\t\tcfg: cfg,\n\t\trootKey: datastore.NewKey(ctxNoNamespace, kindGlobalLock, cfg.rootKeyStr, 0, nil),\n\t\tlockEntry: lockEntry{\n\t\t\tHostName: hostname,\n\t\t\tInfo: info,\n\t\t},\n\t}\n\tl.lockEntryKey = datastore.NewKey(ctxNoNamespace, kindGlobalLock, \"\", 1, l.rootKey)\n\treturn l\n}\n\ntype ErrLockTaken struct {\n\tCreatedAt time.Time\n\tHostName string\n\tInfo string\n}\n\nvar _ = error(&ErrLockTaken{})\n\nfunc (e *ErrLockTaken) Error() string {\n\treturn fmt.Sprintf(\"GlobalLock is taken by host \\\"%s\\\" at %s. Info: %s\", e.HostName, e.CreatedAt, e.Info)\n}\n\n\/\/ Lock attempts to acquire the global lock.\n\/\/ If the lock was already taken by other GlobalLocker instance, it will return an ErrLockTaken.\nfunc (l *GlobalLocker) tryLockOnce() error {\n\tstart := time.Now()\n\tcli, err := l.cfg.getClient(context.Background())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdstx, err := cli.NewTransaction(context.Background(), datastore.Serializable)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar e lockEntry\n\tif err := dstx.Get(l.lockEntryKey, &e); err != datastore.ErrNoSuchEntity {\n\t\tdstx.Rollback()\n\t\tif err == nil {\n\t\t\treturn &ErrLockTaken{CreatedAt: e.CreatedAt, HostName: e.HostName, Info: e.Info}\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tl.lockEntry.CreatedAt = start\n\tif _, err := dstx.Put(l.lockEntryKey, &l.lockEntry); err != nil {\n\t\tdstx.Rollback()\n\t\treturn err\n\t}\n\tif _, err := dstx.Commit(); err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Infof(lklog, \"GlobalLocker.tryLockOnce(%+v) took %s.\", l.lockEntry, time.Since(start))\n\treturn nil\n}\n\nfunc (l *GlobalLocker) Lock() (err error) {\n\tlogger.Infof(lklog, \"GlobalLocker.Lock() started.\")\n\treturn gcutil.RetryIfNeeded(func() error {\n\t\treturn l.tryLockOnce()\n\t}, lklog)\n}\n\n\/\/ ForceUnlock releases the global lock entry forcibly, even if it was held by other GlobalLocker instance.\n\/\/ If there was no lock, ForceUnlock will log an warning, but return no error.\nfunc (l *GlobalLocker) forceUnlockOnce() error {\n\tstart := time.Now()\n\tcli, err := l.cfg.getClient(context.Background())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdstx, err := cli.NewTransaction(context.Background(), datastore.Serializable)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar e lockEntry\n\tif err := dstx.Get(l.lockEntryKey, &e); err != nil {\n\t\tlogger.Warningf(lklog, \"GlobalLocker.ForceUnlock(): Force unlocking existing lock entry: %+v\", e)\n\t}\n\tif err := dstx.Delete(l.lockEntryKey); err != nil {\n\t\tdstx.Rollback()\n\t\tif err == datastore.ErrNoSuchEntity {\n\t\t\tlogger.Warningf(lklog, \"GlobalLocker.ForceUnlock(): Warning: There was no global lock taken.\")\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\tif _, err := dstx.Commit(); err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Infof(lklog, \"GlobalLocker.forceUnlockOnce() took %s.\", time.Since(start))\n\treturn nil\n}\n\nfunc (l *GlobalLocker) ForceUnlock() error {\n\tlogger.Infof(lklog, \"GlobalLocker.ForceUnlock() started.\")\n\treturn gcutil.RetryIfNeeded(func() error {\n\t\treturn l.forceUnlockOnce()\n\t}, lklog)\n}\n\nvar ErrNoLock = errors.New(\"Attempted unlock, but couldn't find any lock entry.\")\n\nfunc closeEnough(a, b time.Time) bool {\n\treturn math.Abs(a.Sub(b).Seconds()) < 2\n}\n\nconst (\n\tcheckCreatedAt = false\n\tignoreCreatedAt = true\n)\n\n\/\/ Unlock releases the global lock previously taken by this GlobalLocker.\n\/\/ If the lock was taken by other GlobalLocker, Unlock will fail with ErrLockTaken.\n\/\/ If there was no lock, Unlock will fail with ErrNoLock.\nfunc (l *GlobalLocker) Unlock() error {\n\tlogger.Infof(lklog, \"GlobalLocker.Unlock() started.\")\n\treturn l.unlockInternal(checkCreatedAt)\n}\n\nfunc (l *GlobalLocker) UnlockIgnoreCreatedAt() error {\n\tlogger.Infof(lklog, \"GlobalLocker.UnlockIgnoreCreatedAt() started.\")\n\treturn l.unlockInternal(ignoreCreatedAt)\n}\n\nfunc checkLock(a, b lockEntry, checkCreatedAtFlag bool) bool {\n\tif checkCreatedAt && !closeEnough(a.CreatedAt, b.CreatedAt) {\n\t\treturn false\n\t}\n\tif a.HostName != b.HostName {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (l *GlobalLocker) unlockInternalOnce(checkCreatedAtFlag bool) error {\n\tstart := time.Now()\n\n\tcli, err := l.cfg.getClient(context.Background())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdstx, err := cli.NewTransaction(context.Background(), datastore.Serializable)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar e lockEntry\n\tif err := dstx.Get(l.lockEntryKey, &e); err != nil {\n\t\tdstx.Rollback()\n\t\tif err == datastore.ErrNoSuchEntity {\n\t\t\treturn ErrNoLock\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\tif !checkLock(l.lockEntry, e, checkCreatedAtFlag) {\n\t\tdstx.Rollback()\n\t\treturn &ErrLockTaken{CreatedAt: e.CreatedAt, HostName: e.HostName, Info: e.Info}\n\t}\n\tif err := dstx.Delete(l.lockEntryKey); err != nil {\n\t\tdstx.Rollback()\n\t\treturn err\n\t}\n\n\tif _, err := dstx.Commit(); err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Infof(lklog, \"GlobalLocker.unlockInternalOnce(%+v) took %s.\", l.lockEntry, time.Since(start))\n\treturn nil\n}\n\nfunc (l *GlobalLocker) unlockInternal(checkCreatedAtFlag bool) error {\n\treturn gcutil.RetryIfNeeded(func() error {\n\t\treturn l.unlockInternalOnce(checkCreatedAtFlag)\n\t}, lklog)\n}\n\nfunc (l *GlobalLocker) tryQueryOnce() (lockEntry, error) {\n\tvar e lockEntry\n\tstart := time.Now()\n\n\tcli, err := l.cfg.getClient(context.Background())\n\tif err != nil {\n\t\treturn e, err\n\t}\n\tdstx, err := cli.NewTransaction(context.Background(), datastore.Serializable)\n\tif err != nil {\n\t\treturn e, err\n\t}\n\tdefer dstx.Rollback()\n\n\tif err := dstx.Get(l.lockEntryKey, &e); err != nil {\n\t\tif err == datastore.ErrNoSuchEntity {\n\t\t\treturn e, ErrNoLock\n\t\t} else {\n\t\t\treturn e, err\n\t\t}\n\t}\n\n\tlogger.Infof(lklog, \"GlobalLocker.tryQueryOnce() took %s.\", time.Since(start))\n\treturn e, nil\n}\n\nfunc (l *GlobalLocker) Query() (le lockEntry, err error) {\n\tlogger.Infof(lklog, \"GlobalLocker.Query() started.\")\n\terr = gcutil.RetryIfNeeded(func() error {\n\t\tle, err = l.tryQueryOnce()\n\t\treturn err\n\t}, lklog)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package gode\n\nimport (\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strings\"\n)\n\nconst registry = \"https:\/\/cli-npm.heroku.com\"\n\nvar rootPath string\nvar lockPath string\nvar nodePath string\nvar npmPath string\n\n\/\/ SetRootPath sets the root for gode\nfunc SetRootPath(root string) {\n\trootPath = root\n\tbase := filepath.Join(root, getLatestInstalledNode())\n\tnodePath = nodePathFromBase(base)\n\tnpmPath = npmPathFromBase(base)\n\tlockPath = filepath.Join(rootPath, \"node.lock\")\n}\n\nfunc getLatestInstalledNode() string {\n\tnodes := getNodeInstalls()\n\tif len(nodes) == 0 {\n\t\treturn \"\"\n\t}\n\tlatest := nodes[len(nodes)-1]\n\t\/\/ ignore ancient versions\n\tif strings.HasPrefix(latest, \"node-v0\") {\n\t\treturn \"\"\n\t}\n\treturn latest\n}\n\nfunc getNodeInstalls() []string {\n\tnodes := []string{}\n\tfiles, _ := ioutil.ReadDir(rootPath)\n\tfor _, f := range files {\n\t\tname := f.Name()\n\t\tif f.IsDir() && strings.HasPrefix(name, \"node-v\") {\n\t\t\tnodes = append(nodes, name)\n\t\t}\n\t}\n\tsort.Strings(nodes)\n\treturn nodes\n}\n\nfunc nodePathFromBase(base string) string {\n\tpath := filepath.Join(base, \"bin\", \"node\")\n\tif runtime.GOOS == \"windows\" {\n\t\treturn path + \".exe\"\n\t}\n\treturn path\n}\n\nfunc npmPathFromBase(base string) string {\n\treturn filepath.Join(base, \"lib\", \"node_modules\", \"npm\", \"cli.js\")\n}\n<commit_msg>use HEROKU_NODE_PATH<commit_after>package gode\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strings\"\n)\n\nconst registry = \"https:\/\/cli-npm.heroku.com\"\n\nvar rootPath string\nvar lockPath string\nvar nodePath string\nvar npmPath string\n\n\/\/ SetRootPath sets the root for gode\nfunc SetRootPath(root string) {\n\trootPath = root\n\tbase := filepath.Join(root, getLatestInstalledNode())\n\tnodePath = nodePathFromBase(base)\n\therokuNodePath := os.Getenv(\"HEROKU_NODE_PATH\")\n\tif herokuNodePath != \"\" {\n\t\tnodePath = herokuNodePath\n\t}\n\tnpmPath = npmPathFromBase(base)\n\tlockPath = filepath.Join(rootPath, \"node.lock\")\n}\n\nfunc getLatestInstalledNode() string {\n\tnodes := getNodeInstalls()\n\tif len(nodes) == 0 {\n\t\treturn \"\"\n\t}\n\tlatest := nodes[len(nodes)-1]\n\t\/\/ ignore ancient versions\n\tif strings.HasPrefix(latest, \"node-v0\") {\n\t\treturn \"\"\n\t}\n\treturn latest\n}\n\nfunc getNodeInstalls() []string {\n\tnodes := []string{}\n\tfiles, _ := ioutil.ReadDir(rootPath)\n\tfor _, f := range files {\n\t\tname := f.Name()\n\t\tif f.IsDir() && strings.HasPrefix(name, \"node-v\") {\n\t\t\tnodes = append(nodes, name)\n\t\t}\n\t}\n\tsort.Strings(nodes)\n\treturn nodes\n}\n\nfunc nodePathFromBase(base string) string {\n\tpath := filepath.Join(base, \"bin\", \"node\")\n\tif runtime.GOOS == \"windows\" {\n\t\treturn path + \".exe\"\n\t}\n\treturn path\n}\n\nfunc npmPathFromBase(base string) string {\n\treturn filepath.Join(base, \"lib\", \"node_modules\", \"npm\", \"cli.js\")\n}\n<|endoftext|>"} {"text":"<commit_before>package gogadgets\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\nvar (\n\tinputs = map[string]CreateInputDevice{\n\t\t\"thermometer\": NewThermometer,\n\t\t\"switch\": NewSwitch,\n\t\t\"flow_meter\": NewFlowMeter,\n\t\t\"xbee\": NewXBee,\n\t}\n\n\toutputs = map[string]CreateOutputDevice{\n\t\t\"alarm\": NewAlarm,\n\t\t\"heater\": NewHeater,\n\t\t\"cooler\": NewCooler,\n\t\t\"thermostat\": NewThermostat,\n\t\t\"boiler\": NewBoiler,\n\t\t\"gpio\": NewGPIO,\n\t\t\"recorder\": NewRecorder,\n\t\t\"pwm\": NewPWM,\n\t\t\"motor\": NewMotor,\n\t\t\"file\": NewFile,\n\t\t\"sms\": NewSMS,\n\t}\n)\n\n\/\/There are several types of Input\/Output devices build into\n\/\/GoGadgets (eg: header, cooler, gpio, thermometer and switch)\n\/\/NewGadget reads a GadgetConfig and creates the correct\n\/\/type of Gadget.\nfunc NewGadget(config *GadgetConfig) (Gadgeter, error) {\n\tif config.Type == \"cron\" {\n\t\treturn newSystemGadget(config)\n\t}\n\tswitch deviceType(config.Pin.Type) {\n\tcase \"input\":\n\t\treturn newInputGadget(config)\n\tcase \"output\":\n\t\treturn newOutputGadget(config)\n\t}\n\treturn nil, fmt.Errorf(\n\t\t\"couldn't build a gadget based on config: %v\",\n\t\tconfig,\n\t)\n}\n\nfunc newSystemGadget(config *GadgetConfig) (Gadgeter, error) {\n\tif config.Type == \"cron\" {\n\t\treturn NewCron(config)\n\t}\n\treturn nil, fmt.Errorf(\"don't know how to build %s\", config.Name)\n}\n\n\/\/Input Gadgets read from input devices and report their values (thermometer\n\/\/is an example).\nfunc newInputGadget(config *GadgetConfig) (gadget *Gadget, err error) {\n\tdev, err := NewInputDevice(&config.Pin)\n\tm := map[bool]string{}\n\tif config.OnValue != \"\" {\n\t\tm[true] = config.OnValue\n\t}\n\n\tif config.OffValue != \"\" {\n\t\tm[false] = config.OffValue\n\t}\n\n\tif err == nil {\n\t\tgadget = &Gadget{\n\t\t\tType: config.Pin.Type,\n\t\t\tLocation: config.Location,\n\t\t\tName: config.Name,\n\t\t\tInput: dev,\n\t\t\tDirection: \"input\",\n\t\t\tUID: fmt.Sprintf(\"%s %s\", config.Location, config.Name),\n\t\t\tinputLookup: m,\n\t\t}\n\t}\n\treturn gadget, err\n}\n\n\/\/Output Gadgets turn devices on and off.\nfunc newOutputGadget(config *GadgetConfig) (gadget *Gadget, err error) {\n\tdev, err := NewOutputDevice(&config.Pin)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif cmds := dev.Commands(config.Location, config.Name); cmds != nil {\n\t\tconfig.OnCommands = cmds.On\n\t\tconfig.OffCommands = cmds.Off\n\t} else {\n\t\tif len(config.OnCommands) == 0 {\n\t\t\tconfig.OnCommands = []string{fmt.Sprintf(\"turn on %s %s\", config.Location, config.Name)}\n\t\t}\n\t\tif len(config.OffCommands) == 0 {\n\t\t\tconfig.OffCommands = []string{fmt.Sprintf(\"turn off %s %s\", config.Location, config.Name)}\n\t\t}\n\t}\n\tgadget = &Gadget{\n\t\tType: config.Pin.Type,\n\t\tLocation: config.Location,\n\t\tName: config.Name,\n\t\tDirection: \"output\",\n\t\tOnCommands: config.OnCommands,\n\t\tOffCommands: config.OffCommands,\n\t\tInitialValue: config.InitialValue,\n\t\tOutput: dev,\n\t\tOperator: \">=\",\n\t\tUID: fmt.Sprintf(\"%s %s\", config.Location, config.Name),\n\t\tfilterMessages: config.Pin.Type != \"recorder\" && config.Pin.Type != \"alarm\",\n\t}\n\treturn gadget, nil\n}\n\nfunc RegisterInput(name string, f CreateInputDevice) {\n\tinputs[name] = f\n}\n\nfunc RegisterOutput(name string, f CreateOutputDevice) {\n\toutputs[name] = f\n}\n\ntype CreateInputDevice func(pin *Pin, opts ...func(InputDevice) error) (InputDevice, error)\n\n\/\/Inputdevices are started as goroutines by the Gadget\n\/\/that contains it.\ntype InputDevice interface {\n\tStart(<-chan Message, chan<- Value)\n\tGetValue() *Value\n\tConfig() ConfigHelper\n}\n\ntype Poller interface {\n\tWait() error\n\tStatus() map[string]bool\n}\n\nfunc deviceType(t string) string {\n\t_, ok := inputs[t]\n\tif ok {\n\t\treturn \"input\"\n\t}\n\t_, ok = outputs[t]\n\tif ok {\n\t\treturn \"output\"\n\t}\n\treturn \"\"\n}\n\nfunc NewInputDevice(pin *Pin) (dev InputDevice, err error) {\n\tf, ok := inputs[pin.Type]\n\tif !ok {\n\t\treturn nil, errors.New(\"invalid pin type\")\n\t}\n\treturn f(pin)\n}\n\ntype CreateOutputDevice func(pin *Pin) (OutputDevice, error)\n\ntype Commands struct {\n\tOn []string\n\tOff []string\n}\n\n\/\/ OutputDevice turns things on and off. Currently the\ntype OutputDevice interface {\n\tOn(val *Value) error\n\tOff() error\n\tUpdate(msg *Message) bool\n\tStatus() map[string]bool\n\tConfig() ConfigHelper\n\tCommands(string, string) *Commands\n}\n\nfunc NewOutputDevice(pin *Pin) (dev OutputDevice, err error) {\n\tf, ok := outputs[pin.Type]\n\tif !ok {\n\t\treturn nil, errors.New(\"invalid pin type\")\n\t}\n\n\tfor k, p := range pin.Pins {\n\t\tf, ok := outputs[p.Type]\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"invalid pin type\")\n\t\t}\n\t\tp.new = f\n\t\tpin.Pins[k] = p\n\t}\n\n\treturn f(pin)\n}\n<commit_msg>comments<commit_after>package gogadgets\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\nvar (\n\tinputs = map[string]CreateInputDevice{\n\t\t\"thermometer\": NewThermometer,\n\t\t\"switch\": NewSwitch,\n\t\t\"flow_meter\": NewFlowMeter,\n\t\t\"xbee\": NewXBee,\n\t}\n\n\toutputs = map[string]CreateOutputDevice{\n\t\t\"alarm\": NewAlarm,\n\t\t\"heater\": NewHeater,\n\t\t\"cooler\": NewCooler,\n\t\t\"thermostat\": NewThermostat,\n\t\t\"boiler\": NewBoiler,\n\t\t\"gpio\": NewGPIO,\n\t\t\"recorder\": NewRecorder,\n\t\t\"pwm\": NewPWM,\n\t\t\"motor\": NewMotor,\n\t\t\"file\": NewFile,\n\t\t\"sms\": NewSMS,\n\t}\n)\n\n\/\/ NewGadget returns a gadget with an input or output device\n\/\/ There are several types of Input\/Output devices build into\n\/\/ GoGadgets (eg: header, cooler, gpio, thermometer and switch)\n\/\/ NewGadget reads a GadgetConfig and creates the correct\n\/\/ type of Gadget.\nfunc NewGadget(config *GadgetConfig) (Gadgeter, error) {\n\tif config.Type == \"cron\" {\n\t\treturn newSystemGadget(config)\n\t}\n\tswitch deviceType(config.Pin.Type) {\n\tcase \"input\":\n\t\treturn newInputGadget(config)\n\tcase \"output\":\n\t\treturn newOutputGadget(config)\n\t}\n\treturn nil, fmt.Errorf(\n\t\t\"couldn't build a gadget based on config: %v\",\n\t\tconfig,\n\t)\n}\n\nfunc newSystemGadget(config *GadgetConfig) (Gadgeter, error) {\n\tif config.Type == \"cron\" {\n\t\treturn NewCron(config)\n\t}\n\treturn nil, fmt.Errorf(\"don't know how to build %s\", config.Name)\n}\n\n\/\/ InputGadgets read from input devices and report their values (thermometer\n\/\/ is an example).\nfunc newInputGadget(config *GadgetConfig) (gadget *Gadget, err error) {\n\tdev, err := NewInputDevice(&config.Pin)\n\tm := map[bool]string{}\n\tif config.OnValue != \"\" {\n\t\tm[true] = config.OnValue\n\t}\n\n\tif config.OffValue != \"\" {\n\t\tm[false] = config.OffValue\n\t}\n\n\tif err == nil {\n\t\tgadget = &Gadget{\n\t\t\tType: config.Pin.Type,\n\t\t\tLocation: config.Location,\n\t\t\tName: config.Name,\n\t\t\tInput: dev,\n\t\t\tDirection: \"input\",\n\t\t\tUID: fmt.Sprintf(\"%s %s\", config.Location, config.Name),\n\t\t\tinputLookup: m,\n\t\t}\n\t}\n\treturn gadget, err\n}\n\n\/\/Output Gadgets turn devices on and off.\nfunc newOutputGadget(config *GadgetConfig) (gadget *Gadget, err error) {\n\tdev, err := NewOutputDevice(&config.Pin)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif cmds := dev.Commands(config.Location, config.Name); cmds != nil {\n\t\tconfig.OnCommands = cmds.On\n\t\tconfig.OffCommands = cmds.Off\n\t} else {\n\t\tif len(config.OnCommands) == 0 {\n\t\t\tconfig.OnCommands = []string{fmt.Sprintf(\"turn on %s %s\", config.Location, config.Name)}\n\t\t}\n\t\tif len(config.OffCommands) == 0 {\n\t\t\tconfig.OffCommands = []string{fmt.Sprintf(\"turn off %s %s\", config.Location, config.Name)}\n\t\t}\n\t}\n\tgadget = &Gadget{\n\t\tType: config.Pin.Type,\n\t\tLocation: config.Location,\n\t\tName: config.Name,\n\t\tDirection: \"output\",\n\t\tOnCommands: config.OnCommands,\n\t\tOffCommands: config.OffCommands,\n\t\tInitialValue: config.InitialValue,\n\t\tOutput: dev,\n\t\tOperator: \">=\",\n\t\tUID: fmt.Sprintf(\"%s %s\", config.Location, config.Name),\n\t\tfilterMessages: config.Pin.Type != \"recorder\" && config.Pin.Type != \"alarm\",\n\t}\n\treturn gadget, nil\n}\n\nfunc RegisterInput(name string, f CreateInputDevice) {\n\tinputs[name] = f\n}\n\nfunc RegisterOutput(name string, f CreateOutputDevice) {\n\toutputs[name] = f\n}\n\ntype CreateInputDevice func(pin *Pin, opts ...func(InputDevice) error) (InputDevice, error)\n\n\/\/Inputdevices are started as goroutines by the Gadget\n\/\/that contains it.\ntype InputDevice interface {\n\tStart(<-chan Message, chan<- Value)\n\tGetValue() *Value\n\tConfig() ConfigHelper\n}\n\ntype Poller interface {\n\tWait() error\n\tStatus() map[string]bool\n}\n\nfunc deviceType(t string) string {\n\t_, ok := inputs[t]\n\tif ok {\n\t\treturn \"input\"\n\t}\n\t_, ok = outputs[t]\n\tif ok {\n\t\treturn \"output\"\n\t}\n\treturn \"\"\n}\n\nfunc NewInputDevice(pin *Pin) (dev InputDevice, err error) {\n\tf, ok := inputs[pin.Type]\n\tif !ok {\n\t\treturn nil, errors.New(\"invalid pin type\")\n\t}\n\treturn f(pin)\n}\n\ntype CreateOutputDevice func(pin *Pin) (OutputDevice, error)\n\ntype Commands struct {\n\tOn []string\n\tOff []string\n}\n\n\/\/ OutputDevice turns things on and off. Currently the\ntype OutputDevice interface {\n\tOn(val *Value) error\n\tOff() error\n\tUpdate(msg *Message) bool\n\tStatus() map[string]bool\n\tConfig() ConfigHelper\n\tCommands(string, string) *Commands\n}\n\nfunc NewOutputDevice(pin *Pin) (dev OutputDevice, err error) {\n\tf, ok := outputs[pin.Type]\n\tif !ok {\n\t\treturn nil, errors.New(\"invalid pin type\")\n\t}\n\n\tfor k, p := range pin.Pins {\n\t\tf, ok := outputs[p.Type]\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"invalid pin type\")\n\t\t}\n\t\tp.new = f\n\t\tpin.Pins[k] = p\n\t}\n\n\treturn f(pin)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"greet\"\n\tapp.Usage = \"fight the loneliness!\"\n\tapp.Action = func(c *cli.Context) error {\n\t\tfmt.Println(\"Hello friend!\")\n\t\treturn nil\n\t}\n\n\tapp.Run(os.Args)\n}\n\nconst Version = \"0.0.1\"\nconst Usage = `\nNAME:\n gorending - Show github trending in Terminal!\n\nUSAGE:\n gorending [language]\n\nVERSION:\n 0.0.1\n\nCOMMANDS:\n help, h Shows a list of commands or help for one command\n\nGLOBAL OPTIONS\n --version Shows version information\n --usage Shows usages\n`\n\n\/\/ func main() {\n\/\/ \tlang := flag.String(\"lang\", \"\", \"Language that you want\")\n\/\/ \tversion := flag.Bool(\"version\", false, \"Shows version information\")\n\/\/ \tusage := flag.Bool(\"usage\", false, \"Shows usages\")\n\n\/\/ \tflag.Parse()\n\n\/\/ \tif *version {\n\/\/ \t\tfmt.Println(Version)\n\/\/ \t\tos.Exit(0)\n\/\/ \t} else if *usage {\n\/\/ \t\tfmt.Println(Usage)\n\/\/ \t\tos.Exit(0)\n\/\/ \t}\n\n\/\/ \tresp, err := http.Get(\"https:\/\/github.com\/trending\/\" + *lang)\n\/\/ \tif err != nil {\n\/\/ \t\tfmt.Println(\"http transport error is: \", err)\n\/\/ \t}\n\n\/\/ \tbody, err := ioutil.ReadAll(resp.Body)\n\/\/ \tif err != nil {\n\/\/ \t\tfmt.Println(\"read error is: \", err)\n\/\/ \t}\n\n\/\/ \tbodyStr := string(body)\n\n\/\/ \tfmt.Println(bodyStr)\n\/\/ \tfmt.Println(\"lang:\", *lang)\n\/\/ }\n<commit_msg>대략적인 틀은 갖춤. html에서 파싱하는 로직만 작성하면 됨.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"gorending\"\n\tapp.Usage = \"Show Github trending in Terminal!\"\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"lang, l\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"language that you want\",\n\t\t},\n\t}\n\n\tapp.Action = func(c *cli.Context) error {\n\t\tlang := c.String(\"lang\")\n\n\t\tresp, err := http.Get(\"https:\/\/github.com\/trending\/\" + lang)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"http error is: \", err)\n\t\t\treturn err\n\t\t}\n\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"read error is: \", err)\n\t\t\treturn err\n\t\t}\n\n\t\tbodyStr := string(body)\n\n\t\tfmt.Println(bodyStr)\n\t\tfmt.Println(\"lang:\", lang)\n\n\t\treturn nil\n\t}\n\n\tapp.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n* @Author: Administrator\n* @Date: 2017-07-27 08:47:31\n* @Last Modified by: Administrator\n* @Last Modified time: 2017-07-27 09:06:03\n*\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\/*\n\thttps:\/\/golang.org\/pkg\/math\/rand\/\n\t *\/\n\t\"math\/rand\"\n\t\/*\n\thttps:\/\/golang.org\/pkg\/sync\/\n\t *\/\n\t\"sync\"\n\t\/*\n\t\n\t https:\/\/golang.org\/pkg\/time\/\n\t \n\t*\/\n\t\"time\"\n)\n\n\/\/ WaitGroup 用于等待结束 goroutines\nvar wg sync.WaitGroup\n\nfunc addTable() {\n\n\t\/\/ 结束时把 goroutine 的计数器减一\n\tdefer wg.Done()\n\tfor i := 1; i <= 10; i++ {\n\t\tsleep := rand.Int63n(1000)\n\t\ttime.Sleep(time.Duration(sleep) * time.Millisecond)\n\t\tfmt.Println(\"加法表:\", i)\n\t\tfor j := 1; j <= 10; j++ {\n\t\t\tfmt.Printf(\"\\t%d+%d=%d\\t\", i, j, i+j)\n\t\t}\n\t\tfmt.Println(\"\\n\")\n\t}\n}\n\nfunc multiTable() {\n\n\t\/\/ 结束时把 goroutine 的计数器减一\n\tdefer wg.Done()\n\tfor i := 1; i <= 10; i++ {\n\t\tsleep := rand.Int63n(1000)\n\t\ttime.Sleep(time.Duration(sleep) * time.Millisecond)\n\t\tfmt.Println(\"乘法表:\", i)\n\t\tfor j := 1; j <= 10; j++ {\n\t\t\tfmt.Printf(\"\\t%d*%d=%d\\t\", i, j, i*j)\n\t\t}\n\t\tfmt.Println(\"\\n\")\n\t}\n}\n\nfunc main() {\n\n\t\/\/ WaitGroup 设置为2, 表示有两个goroutine。\n\twg.Add(2)\n\tfmt.Println(\"goroutines 开始\")\n\t\n\tgo addTable()\n\tgo multiTable()\n\n\n\tfmt.Println(\"等待结束\")\n\twg.Wait()\n\tfmt.Println(\"\\n结束了\")\n\n\n}<commit_msg>update goroutine.go<commit_after>\/*\n* @Author: Administrator\n* @Date: 2017-07-27 08:47:31\n* @Last Modified by: Administrator\n* @Last Modified time: 2017-07-27 09:13:17\n*\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\/*\n\thttps:\/\/golang.org\/pkg\/math\/rand\/\n\t *\/\n\t\"math\/rand\"\n\t\/*\n\thttps:\/\/golang.org\/pkg\/sync\/\n\t *\/\n\t\"sync\"\n\t\/*\n\t\n\t https:\/\/golang.org\/pkg\/time\/\n\t \n\t*\/\n\t\"time\"\n)\n\n\/\/ WaitGroup 用于等待结束 goroutines\nvar wg sync.WaitGroup\n\n\/*\n加法表\n *\/\nfunc addTable() {\n\n\t\/\/ 结束时把 goroutine 的计数器减一\n\tdefer wg.Done()\n\tfor i := 1; i <= 10; i++ {\n\t\tsleep := rand.Int63n(1000)\n\t\ttime.Sleep(time.Duration(sleep) * time.Millisecond)\n\t\tfmt.Println(\"加法表:\", i)\n\t\tfor j := 1; j <= 10; j++ {\n\t\t\tfmt.Printf(\"\\t%d+%d=%d\\t\", i, j, i+j)\n\t\t}\n\t\tfmt.Println(\"\\n\")\n\t}\n}\n\n\/*\n乘法表\n *\/\nfunc multiTable() {\n\n\t\/\/ 结束时把 goroutine 的计数器减一\n\tdefer wg.Done()\n\tfor i := 1; i <= 10; i++ {\n\t\tsleep := rand.Int63n(1000)\n\t\ttime.Sleep(time.Duration(sleep) * time.Millisecond)\n\t\tfmt.Println(\"乘法表:\", i)\n\t\tfor j := 1; j <= 10; j++ {\n\t\t\tfmt.Printf(\"\\t%d*%d=%d\\t\", i, j, i*j)\n\t\t}\n\t\tfmt.Println(\"\\n\")\n\t}\n}\n\nfunc main() {\n\n\t\/\/ WaitGroup 设置为2, 表示有两个goroutine。\n\twg.Add(2)\n\tfmt.Println(\"goroutines 开始\")\n\t\n\tgo addTable()\n\tgo multiTable()\n\n\n\tfmt.Println(\"等待结束\")\n\twg.Wait()\n\tfmt.Println(\"\\n结束了\")\n\n\n}<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"os\"\n \"fmt\"\n \"flag\"\n \"io\/ioutil\"\n \"strings\"\n)\n\nconst gravizoBegin string = \"(http:\/\/g.gravizo.com\/svg?\"\nconst gravizoEnd string = \"enduml)\"\n\nvar encoder = strings.NewReplacer(\";\", \"%3B\", \" \", \"%20\", \"\\n\", \"%0A\", \"@\", \"%40\",\n \"(\", \"%28\", \")\", \"%29\", \"*\", \"%2A\", \"\\\\\", \"%5C\")\nvar decoder = strings.NewReplacer(\"%3B\", \";\", \"%20\", \" \", \"%0A\", \"\\n\", \"%40\", \"@\",\n \"%2A\", \"*\", \"%5C\", \"\\\\\")\n\nfunc check(err error) {\n if err != nil {\n fmt.Println(err)\n os.Exit(1)\n }\n}\n\nfunc convert(filename string, replacer *strings.Replacer, backup bool) {\n buffer, err := ioutil.ReadFile(filename)\n check(err)\n\n if backup {\n err = ioutil.WriteFile(fmt.Sprint(filename + \".bak\"), buffer, 0644)\n check(err)\n }\n\n text := string(buffer)\n\n for _, slice := range strings.Split(text, gravizoBegin) {\n if strings.Contains(slice, gravizoEnd) {\n subSlice := strings.Split(slice, gravizoEnd)\n if len(subSlice) > 0 {\n gravizoText := subSlice[0]\n convertedText := replacer.Replace(gravizoText)\n text = strings.Replace(text, gravizoText, convertedText, -1)\n }\n }\n }\n\n err = ioutil.WriteFile(filename, []byte(text), 0644)\n check(err)\n}\n\nfunc main() {\n encode := flag.String(\"e\", \"\", \"Encode the given GitHub Markdown file\")\n decode := flag.String(\"d\", \"\", \"Decode the given GitHub Markdown file\")\n backup := flag.Bool(\"b\", true, \"Backup GitHub Markdown file before encode\/decode\")\n\n flag.Parse()\n\n if len(*encode) > 0 {\n convert(*encode, encoder, *backup)\n } else if len(*decode) > 0 {\n convert(*decode, decoder, *backup)\n }\n}\n<commit_msg>find closing brace balanced<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n)\n\nconst gravizoBegin string = \"(http:\/\/g.gravizo.com\/svg?\"\n\n\/\/const gravizoEnd string = \"enduml)\"\n\nvar encoder = strings.NewReplacer(\";\", \"%3B\", \" \", \"%20\", \"\\n\", \"%0A\", \"@\", \"%40\",\n\t\"(\", \"%28\", \")\", \"%29\", \"*\", \"%2A\", \"\\\\\", \"%5C\")\nvar decoder = strings.NewReplacer(\"%3B\", \";\", \"%20\", \" \", \"%0A\", \"\\n\", \"%40\", \"@\",\n\t\"%2A\", \"*\", \"%5C\", \"\\\\\")\n\nfunc check(err error) {\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc findMatchingClose(text string, opening rune, closing rune) int {\n\topeningCount := 1\n\tfor i, ch := range text {\n\t\tif ch == opening {\n\t\t\topeningCount++\n\t\t} else if ch == closing {\n\t\t\topeningCount--\n\t\t\tif openingCount == 0 {\n\t\t\t\treturn i\n\t\t\t}\n\t\t}\n\t}\n\treturn 0\n}\n\nfunc convert(filename string, replacer *strings.Replacer, backup bool) {\n\tbuffer, err := ioutil.ReadFile(filename)\n\tcheck(err)\n\n\tif backup {\n\t\terr = ioutil.WriteFile(fmt.Sprint(filename+\".bak\"), buffer, 0644)\n\t\tcheck(err)\n\t}\n\n\ttext := string(buffer)\n\n\tfor offset, slice := range strings.Split(text, gravizoBegin) {\n\t\tif offset == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tcloseOffset := findMatchingClose(slice, '(', ')')\n\t\tif closeOffset > 0 {\n\t\t\tgravizoText := slice[:closeOffset]\n\t\t\tif len(gravizoText) > 0 {\n\t\t\t\tconvertedText := replacer.Replace(gravizoText)\n\t\t\t\ttext = strings.Replace(text, gravizoText, convertedText, -1)\n\t\t\t}\n\t\t}\n\t}\n\n\terr = ioutil.WriteFile(filename, []byte(text), 0644)\n\tcheck(err)\n}\n\nfunc main() {\n\tencode := flag.String(\"e\", \"\", \"Encode the given GitHub Markdown file\")\n\tdecode := flag.String(\"d\", \"\", \"Decode the given GitHub Markdown file\")\n\tbackup := flag.Bool(\"b\", true, \"Backup GitHub Markdown file before encode\/decode\")\n\n\tflag.Parse()\n\n\tif len(*encode) > 0 {\n\t\tconvert(*encode, encoder, *backup)\n\t} else if len(*decode) > 0 {\n\t\tconvert(*decode, decoder, *backup)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gcs\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\n\t\"github.com\/jacobsa\/gcloud\/httputil\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/api\/googleapi\"\n\tstoragev1 \"google.golang.org\/api\/storage\/v1\"\n)\n\nfunc (b *bucket) makeUpdateObjectBody(\n\treq *UpdateObjectRequest) (rc io.ReadCloser, err error) {\n\t\/\/ Set up a map representing the JSON object we want to send to GCS. For now,\n\t\/\/ we don't treat empty strings specially.\n\tjsonMap := make(map[string]interface{})\n\n\tif req.ContentType != nil {\n\t\tjsonMap[\"contentType\"] = req.ContentType\n\t}\n\n\tif req.ContentEncoding != nil {\n\t\tjsonMap[\"contentEncoding\"] = req.ContentEncoding\n\t}\n\n\tif req.ContentLanguage != nil {\n\t\tjsonMap[\"contentLanguage\"] = req.ContentLanguage\n\t}\n\n\tif req.CacheControl != nil {\n\t\tjsonMap[\"cacheControl\"] = req.CacheControl\n\t}\n\n\t\/\/ Implement the convention that a pointer to an empty string means to delete\n\t\/\/ the field (communicated to GCS by setting it to null in the JSON).\n\tfor k, v := range jsonMap {\n\t\tif *(v.(*string)) == \"\" {\n\t\t\tjsonMap[k] = nil\n\t\t}\n\t}\n\n\t\/\/ Add a field for user metadata if appropriate.\n\tif req.Metadata != nil {\n\t\tjsonMap[\"metadata\"] = req.Metadata\n\t}\n\n\t\/\/ Set up a reader.\n\tr, err := googleapi.WithoutDataWrapper.JSONReader(jsonMap)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"JSONReader\", err)\n\t\treturn\n\t}\n\n\t\/\/ Set up a ReadCloser.\n\trc = ioutil.NopCloser(r)\n\n\treturn\n}\n\nfunc (b *bucket) UpdateObject(\n\tctx context.Context,\n\treq *UpdateObjectRequest) (o *Object, err error) {\n\t\/\/ Construct an appropriate URL (cf. http:\/\/goo.gl\/B46IDy).\n\topaque := fmt.Sprintf(\n\t\t\"\/\/www.googleapis.com\/storage\/v1\/b\/%s\/o\/%s\",\n\t\thttputil.EncodePathSegment(b.Name()),\n\t\thttputil.EncodePathSegment(req.Name))\n\n\tquery := make(url.Values)\n\tquery.Set(\"projection\", \"full\")\n\tif req.Generation != 0 {\n\t\tquery.Set(\"generation\", fmt.Sprintf(\"%d\", req.Generation))\n\t}\n\n\turl := &url.URL{\n\t\tScheme: \"https\",\n\t\tHost: \"www.googleapis.com\",\n\t\tOpaque: opaque,\n\t\tRawQuery: query.Encode(),\n\t}\n\n\t\/\/ Set up the request body.\n\tbody, err := b.makeUpdateObjectBody(req)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"makeUpdateObjectBody: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Create an HTTP request.\n\thttpReq, err := httputil.NewRequest(ctx, \"PATCH\", url, body, b.userAgent)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"httputil.NewRequest: %v\", err)\n\t\treturn\n\t}\n\n\thttpReq.Header.Set(\"Content-Type\", \"application\/json\")\n\n\t\/\/ Execute the HTTP request.\n\thttpRes, err := b.client.Do(httpReq)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefer googleapi.CloseBody(httpRes)\n\n\t\/\/ Check for HTTP-level errors.\n\tif err = googleapi.CheckResponse(httpRes); err != nil {\n\t\t\/\/ Special case: handle not found errors.\n\t\tif typed, ok := err.(*googleapi.Error); ok {\n\t\t\tif typed.Code == http.StatusNotFound {\n\t\t\t\terr = &NotFoundError{Err: typed}\n\t\t\t}\n\t\t}\n\n\t\treturn\n\t}\n\n\t\/\/ Parse the response.\n\tvar rawObject *storagev1.Object\n\tif err = json.NewDecoder(httpRes.Body).Decode(&rawObject); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Convert the response.\n\tif o, err = toObject(rawObject); err != nil {\n\t\terr = fmt.Errorf(\"toObject: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n<commit_msg>Added update meta-generation precondition support.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gcs\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\n\t\"github.com\/jacobsa\/gcloud\/httputil\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/api\/googleapi\"\n\tstoragev1 \"google.golang.org\/api\/storage\/v1\"\n)\n\nfunc (b *bucket) makeUpdateObjectBody(\n\treq *UpdateObjectRequest) (rc io.ReadCloser, err error) {\n\t\/\/ Set up a map representing the JSON object we want to send to GCS. For now,\n\t\/\/ we don't treat empty strings specially.\n\tjsonMap := make(map[string]interface{})\n\n\tif req.ContentType != nil {\n\t\tjsonMap[\"contentType\"] = req.ContentType\n\t}\n\n\tif req.ContentEncoding != nil {\n\t\tjsonMap[\"contentEncoding\"] = req.ContentEncoding\n\t}\n\n\tif req.ContentLanguage != nil {\n\t\tjsonMap[\"contentLanguage\"] = req.ContentLanguage\n\t}\n\n\tif req.CacheControl != nil {\n\t\tjsonMap[\"cacheControl\"] = req.CacheControl\n\t}\n\n\t\/\/ Implement the convention that a pointer to an empty string means to delete\n\t\/\/ the field (communicated to GCS by setting it to null in the JSON).\n\tfor k, v := range jsonMap {\n\t\tif *(v.(*string)) == \"\" {\n\t\t\tjsonMap[k] = nil\n\t\t}\n\t}\n\n\t\/\/ Add a field for user metadata if appropriate.\n\tif req.Metadata != nil {\n\t\tjsonMap[\"metadata\"] = req.Metadata\n\t}\n\n\t\/\/ Set up a reader.\n\tr, err := googleapi.WithoutDataWrapper.JSONReader(jsonMap)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"JSONReader\", err)\n\t\treturn\n\t}\n\n\t\/\/ Set up a ReadCloser.\n\trc = ioutil.NopCloser(r)\n\n\treturn\n}\n\nfunc (b *bucket) UpdateObject(\n\tctx context.Context,\n\treq *UpdateObjectRequest) (o *Object, err error) {\n\t\/\/ Construct an appropriate URL (cf. http:\/\/goo.gl\/B46IDy).\n\topaque := fmt.Sprintf(\n\t\t\"\/\/www.googleapis.com\/storage\/v1\/b\/%s\/o\/%s\",\n\t\thttputil.EncodePathSegment(b.Name()),\n\t\thttputil.EncodePathSegment(req.Name))\n\n\tquery := make(url.Values)\n\tquery.Set(\"projection\", \"full\")\n\n\tif req.Generation != 0 {\n\t\tquery.Set(\"generation\", fmt.Sprintf(\"%d\", req.Generation))\n\t}\n\n\tif req.MetaGenerationPrecondition != nil {\n\t\tquery.Set(\n\t\t\t\"ifMetagenerationMatch\",\n\t\t\tfmt.Sprintf(\"%d\", *req.MetaGenerationPrecondition))\n\t}\n\n\turl := &url.URL{\n\t\tScheme: \"https\",\n\t\tHost: \"www.googleapis.com\",\n\t\tOpaque: opaque,\n\t\tRawQuery: query.Encode(),\n\t}\n\n\t\/\/ Set up the request body.\n\tbody, err := b.makeUpdateObjectBody(req)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"makeUpdateObjectBody: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Create an HTTP request.\n\thttpReq, err := httputil.NewRequest(ctx, \"PATCH\", url, body, b.userAgent)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"httputil.NewRequest: %v\", err)\n\t\treturn\n\t}\n\n\thttpReq.Header.Set(\"Content-Type\", \"application\/json\")\n\n\t\/\/ Execute the HTTP request.\n\thttpRes, err := b.client.Do(httpReq)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefer googleapi.CloseBody(httpRes)\n\n\t\/\/ Check for HTTP-level errors.\n\tif err = googleapi.CheckResponse(httpRes); err != nil {\n\t\t\/\/ Special case: handle not found errors.\n\t\tif typed, ok := err.(*googleapi.Error); ok {\n\t\t\tif typed.Code == http.StatusNotFound {\n\t\t\t\terr = &NotFoundError{Err: typed}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Special case: handle precondition errors.\n\t\tif typed, ok := err.(*googleapi.Error); ok {\n\t\t\tif typed.Code == http.StatusPreconditionFailed {\n\t\t\t\terr = &PreconditionError{Err: typed}\n\t\t\t}\n\t\t}\n\n\t\treturn\n\t}\n\n\t\/\/ Parse the response.\n\tvar rawObject *storagev1.Object\n\tif err = json.NewDecoder(httpRes.Body).Decode(&rawObject); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Convert the response.\n\tif o, err = toObject(rawObject); err != nil {\n\t\terr = fmt.Errorf(\"toObject: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package yum\n\nimport (\n\t\"compress\/gzip\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\n\t\"github.com\/gonuts\/logger\"\n)\n\n\/\/ RepositoryXMLBackend is a Backend querying YUM XML repositories\ntype RepositoryXMLBackend struct {\n\tName string\n\tPackages map[string][]*Package\n\tProvides map[string][]*Provides\n\tDBName string\n\tPrimary string\n\tRepository *Repository\n\tmsg *logger.Logger\n}\n\nfunc NewRepositoryXMLBackend(repo *Repository) (Backend, error) {\n\tconst dbname = \"primary.xml.gz\"\n\treturn &RepositoryXMLBackend{\n\t\tName: \"RepositoryXMLBackend\",\n\t\tPackages: make(map[string][]*Package),\n\t\tProvides: make(map[string][]*Provides),\n\t\tDBName: dbname,\n\t\tPrimary: filepath.Join(repo.CacheDir, dbname),\n\t\tRepository: repo,\n\t\tmsg: repo.msg,\n\t}, nil\n}\n\n\/\/ YumDataType returns the ID for the data type as used in the repomd.xml file\nfunc (repo *RepositoryXMLBackend) YumDataType() string {\n\treturn \"primary\"\n}\n\n\/\/ Download the DB from server\nfunc (repo *RepositoryXMLBackend) GetLatestDB(url string) error {\n\tvar err error\n\tout, err := os.Create(repo.Primary)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer out.Close()\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\t_, err = io.Copy(out, resp.Body)\n\treturn err\n}\n\n\/\/ Check whether the DB is there\nfunc (repo *RepositoryXMLBackend) HasDB() bool {\n\treturn path_exists(repo.Primary)\n}\n\n\/\/ Load loads the DB\nfunc (repo *RepositoryXMLBackend) LoadDB() error {\n\tvar err error\n\n\trepo.msg.Debugf(\"start parsing metadata XML file... (%s)\\n\", repo.Primary)\n\ttype xmlTree struct {\n\t\tXMLName xml.Name `xml:\"metadata\"`\n\t\tPackages []struct {\n\t\t\tType string `xml:\"type,attr\"`\n\t\t\tName string `xml:\"name\"`\n\t\t\tArch string `xml:\"arch\"`\n\n\t\t\tVersion struct {\n\t\t\t\tEpoch int `xml:\"epoch,attr\"`\n\t\t\t\tVersion string `xml:\"ver,attr\"`\n\t\t\t\tRelease int `xml:\"rel,attr\"`\n\t\t\t} `xml:\"version\"`\n\n\t\t\tChecksum struct {\n\t\t\t\tValue string `xml:\",innerxml\"`\n\t\t\t\tType string `xml:\"type,attr\"`\n\t\t\t\tPkgId string `xml:\"pkgid,attr\"`\n\t\t\t} `xml:\"checksum\"`\n\n\t\t\tSummary string `xml:\"summary\"`\n\t\t\tDescr string `xml:\"description\"`\n\t\t\tPackager string `xml:\"packager\"`\n\t\t\tUrl string `xml:\"url\"`\n\n\t\t\tTime struct {\n\t\t\t\tFile string `xml:\"file,attr\"`\n\t\t\t\tBuild string `xml:\"build,attr\"`\n\t\t\t} `xml:\"time\"`\n\n\t\t\tSize struct {\n\t\t\t\tPackage int64 `xml:\"package,attr\"`\n\t\t\t\tInstalled int64 `xml:\"installed,attr\"`\n\t\t\t\tArchive int64 `xml:\"archive,attr\"`\n\t\t\t} `xml:\"size\"`\n\n\t\t\tLocation struct {\n\t\t\t\tHref string `xml:\"href,attr\"`\n\t\t\t} `xml:\"location\"`\n\n\t\t\tFormat struct {\n\t\t\t\tLicense string `xml:\"license\"`\n\t\t\t\tVendor string `xml:\"vendor\"`\n\t\t\t\tGroup string `xml:\"group\"`\n\t\t\t\tBuildHost string `xml:\"buildhost\"`\n\t\t\t\tSourceRpm string `xml:\"sourcerpm\"`\n\n\t\t\t\tHeaderRange struct {\n\t\t\t\t\tBeg int64 `xml:\"start,attr\"`\n\t\t\t\t\tEnd int64 `xml:\"end,attr\"`\n\t\t\t\t} `xml:\"header-range\"`\n\n\t\t\t\tProvides struct {\n\t\t\t\t\tItems []struct {\n\t\t\t\t\t\tName string `xml:\"name,attr\"`\n\t\t\t\t\t\tFlags string `xml:\"flags,attr\"`\n\t\t\t\t\t\tEpoch int `xml:\"epoch,attr\"`\n\t\t\t\t\t\tVersion string `xml:\"ver,attr\"`\n\t\t\t\t\t\tRelease int `xml:\"rel,attr\"`\n\t\t\t\t\t} `xml:\"entry\"`\n\t\t\t\t} `xml:\"provides\"`\n\n\t\t\t\tRequires struct {\n\t\t\t\t\tItems []struct{\n\t\t\t\t\t\tName string `xml:\"name,attr\"`\n\t\t\t\t\t\tFlags string `xml:\"flags,attr\"`\n\t\t\t\t\t\tEpoch int `xml:\"epoch,attr\"`\n\t\t\t\t\t\tVersion string `xml:\"ver,attr\"`\n\t\t\t\t\t\tRelease int `xml:\"rel,attr\"`\n\t\t\t\t\t\tPre string `xml:\"pre,attr\"`\n\t\t\t\t\t} `xml:\"entry\"`\n\t\t\t\t} `xml:\"requires\"`\n\n\t\t\t\tFiles []string `xml:\"file\"`\n\t\t\t} `xml:\"format\"`\n\t\t} `xml:\"package\"`\n\t}\n\n\t\/\/ load the yum XML package list\n\tf, err := os.Open(repo.Primary)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tvar r io.Reader\n\tif rr, err := gzip.NewReader(f); err != nil {\n\t\tif err != gzip.ErrHeader {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ perhaps not a compressed file after all...\n\t\t_, err = f.Seek(0, 0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tr = f\n\t} else {\n\t\tr = rr\n\t\tdefer rr.Close()\n\t}\n\n\tvar tree xmlTree\n\terr = xml.NewDecoder(r).Decode(&tree)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, xml := range tree.Packages {\n\t\tpkg := NewPackage(xml.Name, xml.Version.Version, xml.Version.Release, xml.Version.Epoch)\n\t\tpkg.arch = xml.Arch\n\t\tpkg.group = xml.Format.Group\n\t\tpkg.location = xml.Location.Href\n\t\tfor _, v := range xml.Format.Provides.Items {\n\t\t\tprov := NewProvides(\n\t\t\t\tv.Name,\n\t\t\t\tv.Version,\n\t\t\t\tv.Release,\n\t\t\t\tv.Epoch,\n\t\t\t\tv.Flags,\n\t\t\t\tpkg,\n\t\t\t)\n\t\t\tpkg.provides = append(pkg.provides, prov)\n\n\t\t\tif !str_in_slice(prov.Name(), g_IGNORED_PACKAGES) {\n\t\t\t\trepo.Provides[prov.Name()] = append(repo.Provides[prov.Name()], prov)\n\t\t\t}\n\t\t}\n\n\t\tfor _, v := range xml.Format.Requires.Items {\n\t\t\treq := NewRequires(\n\t\t\t\tv.Name,\n\t\t\t\tv.Version,\n\t\t\t\tv.Release,\n\t\t\t\tv.Epoch,\n\t\t\t\tv.Flags,\n\t\t\t\tv.Pre,\n\t\t\t)\n\t\t\tpkg.requires = append(pkg.requires, req)\n\t\t}\n\t\tpkg.repository = repo.Repository\n\n\t\t\/\/ add package to repository\n\t\trepo.Packages[pkg.Name()] = append(repo.Packages[pkg.Name()], pkg)\n\t}\n\n\trepo.msg.Debugf(\"start parsing metadata XML file... (%s) [done]\\n\", repo.Primary)\n\treturn err\n}\n\n\/\/ FindLatestMatchingName locats a package by name, returns the latest available version.\nfunc (repo *RepositoryXMLBackend) FindLatestMatchingName(name, version, release string) (*Package, error) {\n\tvar pkg *Package\n\tvar err error\n\n\tpkgs, ok := repo.Packages[name]\n\tif !ok {\n\t\trepo.msg.Debugf(\"could not find package %q\\n\", name)\n\t\treturn nil, fmt.Errorf(\"no such package %q\", name)\n\t}\n\n\tif version == \"\" && len(pkgs) > 0 {\n\t\t\/\/ return latest\n\t\tsorted := make([]*Package, len(pkgs))\n\t\tcopy(sorted, pkgs)\n\t\tsort.Sort(Packages(sorted))\n\t\tpkg = sorted[len(sorted)-1]\n\t} else {\n\t\t\/\/ trying to match the requirements\n\t\trel := 0\n\t\tif release != \"\" {\n\t\t\trel, err = strconv.Atoi(release)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\treq := NewRequires(name, version, rel, 0, \"EQ\", \"\")\n\t\tsorted := make(Packages, 0, len(pkgs))\n\t\tfor _, p := range pkgs {\n\t\t\tif req.ProvideMatches(p) {\n\t\t\t\tsorted = append(sorted, p)\n\t\t\t}\n\t\t}\n\t\tif len(sorted) > 0 {\n\t\t\tsort.Sort(sorted)\n\t\t\tpkg = sorted[len(sorted)-1]\n\t\t}\n\t}\n\n\treturn pkg, err\n}\n\n\/\/ FindLatestMatchingRequire locates a package providing a given functionality.\nfunc (repo *RepositoryXMLBackend) FindLatestMatchingRequire(requirement *Requires) (*Package, error) {\n\tvar pkg *Package\n\tvar err error\n\n\treturn pkg, err\n}\n\n\/\/ GetPackages returns all the packages known by a YUM repository\nfunc (repo *RepositoryXMLBackend) GetPackages() []*Package {\n\tpkgs := make([]*Package, 0, len(repo.Packages))\n\tfor _, pkg := range repo.Packages {\n\t\tpkgs = append(pkgs, pkg...)\n\t}\n\treturn pkgs\n}\n\nfunc init() {\n\tg_backends[\"RepositoryXMLBackend\"] = NewRepositoryXMLBackend\n}\n<commit_msg>yum.xml: implement FindLatestMatchingRequire<commit_after>package yum\n\nimport (\n\t\"compress\/gzip\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\n\t\"github.com\/gonuts\/logger\"\n)\n\n\/\/ RepositoryXMLBackend is a Backend querying YUM XML repositories\ntype RepositoryXMLBackend struct {\n\tName string\n\tPackages map[string][]*Package\n\tProvides map[string][]*Provides\n\tDBName string\n\tPrimary string\n\tRepository *Repository\n\tmsg *logger.Logger\n}\n\nfunc NewRepositoryXMLBackend(repo *Repository) (Backend, error) {\n\tconst dbname = \"primary.xml.gz\"\n\treturn &RepositoryXMLBackend{\n\t\tName: \"RepositoryXMLBackend\",\n\t\tPackages: make(map[string][]*Package),\n\t\tProvides: make(map[string][]*Provides),\n\t\tDBName: dbname,\n\t\tPrimary: filepath.Join(repo.CacheDir, dbname),\n\t\tRepository: repo,\n\t\tmsg: repo.msg,\n\t}, nil\n}\n\n\/\/ YumDataType returns the ID for the data type as used in the repomd.xml file\nfunc (repo *RepositoryXMLBackend) YumDataType() string {\n\treturn \"primary\"\n}\n\n\/\/ Download the DB from server\nfunc (repo *RepositoryXMLBackend) GetLatestDB(url string) error {\n\tvar err error\n\tout, err := os.Create(repo.Primary)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer out.Close()\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\t_, err = io.Copy(out, resp.Body)\n\treturn err\n}\n\n\/\/ Check whether the DB is there\nfunc (repo *RepositoryXMLBackend) HasDB() bool {\n\treturn path_exists(repo.Primary)\n}\n\n\/\/ Load loads the DB\nfunc (repo *RepositoryXMLBackend) LoadDB() error {\n\tvar err error\n\n\trepo.msg.Debugf(\"start parsing metadata XML file... (%s)\\n\", repo.Primary)\n\ttype xmlTree struct {\n\t\tXMLName xml.Name `xml:\"metadata\"`\n\t\tPackages []struct {\n\t\t\tType string `xml:\"type,attr\"`\n\t\t\tName string `xml:\"name\"`\n\t\t\tArch string `xml:\"arch\"`\n\n\t\t\tVersion struct {\n\t\t\t\tEpoch int `xml:\"epoch,attr\"`\n\t\t\t\tVersion string `xml:\"ver,attr\"`\n\t\t\t\tRelease int `xml:\"rel,attr\"`\n\t\t\t} `xml:\"version\"`\n\n\t\t\tChecksum struct {\n\t\t\t\tValue string `xml:\",innerxml\"`\n\t\t\t\tType string `xml:\"type,attr\"`\n\t\t\t\tPkgId string `xml:\"pkgid,attr\"`\n\t\t\t} `xml:\"checksum\"`\n\n\t\t\tSummary string `xml:\"summary\"`\n\t\t\tDescr string `xml:\"description\"`\n\t\t\tPackager string `xml:\"packager\"`\n\t\t\tUrl string `xml:\"url\"`\n\n\t\t\tTime struct {\n\t\t\t\tFile string `xml:\"file,attr\"`\n\t\t\t\tBuild string `xml:\"build,attr\"`\n\t\t\t} `xml:\"time\"`\n\n\t\t\tSize struct {\n\t\t\t\tPackage int64 `xml:\"package,attr\"`\n\t\t\t\tInstalled int64 `xml:\"installed,attr\"`\n\t\t\t\tArchive int64 `xml:\"archive,attr\"`\n\t\t\t} `xml:\"size\"`\n\n\t\t\tLocation struct {\n\t\t\t\tHref string `xml:\"href,attr\"`\n\t\t\t} `xml:\"location\"`\n\n\t\t\tFormat struct {\n\t\t\t\tLicense string `xml:\"license\"`\n\t\t\t\tVendor string `xml:\"vendor\"`\n\t\t\t\tGroup string `xml:\"group\"`\n\t\t\t\tBuildHost string `xml:\"buildhost\"`\n\t\t\t\tSourceRpm string `xml:\"sourcerpm\"`\n\n\t\t\t\tHeaderRange struct {\n\t\t\t\t\tBeg int64 `xml:\"start,attr\"`\n\t\t\t\t\tEnd int64 `xml:\"end,attr\"`\n\t\t\t\t} `xml:\"header-range\"`\n\n\t\t\t\tProvides struct {\n\t\t\t\t\tItems []struct {\n\t\t\t\t\t\tName string `xml:\"name,attr\"`\n\t\t\t\t\t\tFlags string `xml:\"flags,attr\"`\n\t\t\t\t\t\tEpoch int `xml:\"epoch,attr\"`\n\t\t\t\t\t\tVersion string `xml:\"ver,attr\"`\n\t\t\t\t\t\tRelease int `xml:\"rel,attr\"`\n\t\t\t\t\t} `xml:\"entry\"`\n\t\t\t\t} `xml:\"provides\"`\n\n\t\t\t\tRequires struct {\n\t\t\t\t\tItems []struct{\n\t\t\t\t\t\tName string `xml:\"name,attr\"`\n\t\t\t\t\t\tFlags string `xml:\"flags,attr\"`\n\t\t\t\t\t\tEpoch int `xml:\"epoch,attr\"`\n\t\t\t\t\t\tVersion string `xml:\"ver,attr\"`\n\t\t\t\t\t\tRelease int `xml:\"rel,attr\"`\n\t\t\t\t\t\tPre string `xml:\"pre,attr\"`\n\t\t\t\t\t} `xml:\"entry\"`\n\t\t\t\t} `xml:\"requires\"`\n\n\t\t\t\tFiles []string `xml:\"file\"`\n\t\t\t} `xml:\"format\"`\n\t\t} `xml:\"package\"`\n\t}\n\n\t\/\/ load the yum XML package list\n\tf, err := os.Open(repo.Primary)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tvar r io.Reader\n\tif rr, err := gzip.NewReader(f); err != nil {\n\t\tif err != gzip.ErrHeader {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ perhaps not a compressed file after all...\n\t\t_, err = f.Seek(0, 0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tr = f\n\t} else {\n\t\tr = rr\n\t\tdefer rr.Close()\n\t}\n\n\tvar tree xmlTree\n\terr = xml.NewDecoder(r).Decode(&tree)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, xml := range tree.Packages {\n\t\tpkg := NewPackage(xml.Name, xml.Version.Version, xml.Version.Release, xml.Version.Epoch)\n\t\tpkg.arch = xml.Arch\n\t\tpkg.group = xml.Format.Group\n\t\tpkg.location = xml.Location.Href\n\t\tfor _, v := range xml.Format.Provides.Items {\n\t\t\tprov := NewProvides(\n\t\t\t\tv.Name,\n\t\t\t\tv.Version,\n\t\t\t\tv.Release,\n\t\t\t\tv.Epoch,\n\t\t\t\tv.Flags,\n\t\t\t\tpkg,\n\t\t\t)\n\t\t\tpkg.provides = append(pkg.provides, prov)\n\n\t\t\tif !str_in_slice(prov.Name(), g_IGNORED_PACKAGES) {\n\t\t\t\trepo.Provides[prov.Name()] = append(repo.Provides[prov.Name()], prov)\n\t\t\t}\n\t\t}\n\n\t\tfor _, v := range xml.Format.Requires.Items {\n\t\t\treq := NewRequires(\n\t\t\t\tv.Name,\n\t\t\t\tv.Version,\n\t\t\t\tv.Release,\n\t\t\t\tv.Epoch,\n\t\t\t\tv.Flags,\n\t\t\t\tv.Pre,\n\t\t\t)\n\t\t\tpkg.requires = append(pkg.requires, req)\n\t\t}\n\t\tpkg.repository = repo.Repository\n\n\t\t\/\/ add package to repository\n\t\trepo.Packages[pkg.Name()] = append(repo.Packages[pkg.Name()], pkg)\n\t}\n\n\trepo.msg.Debugf(\"start parsing metadata XML file... (%s) [done]\\n\", repo.Primary)\n\treturn err\n}\n\n\/\/ FindLatestMatchingName locats a package by name, returns the latest available version.\nfunc (repo *RepositoryXMLBackend) FindLatestMatchingName(name, version, release string) (*Package, error) {\n\tvar pkg *Package\n\tvar err error\n\n\tpkgs, ok := repo.Packages[name]\n\tif !ok {\n\t\trepo.msg.Debugf(\"could not find package %q\\n\", name)\n\t\treturn nil, fmt.Errorf(\"no such package %q\", name)\n\t}\n\n\tif version == \"\" && len(pkgs) > 0 {\n\t\t\/\/ return latest\n\t\tsorted := make([]*Package, len(pkgs))\n\t\tcopy(sorted, pkgs)\n\t\tsort.Sort(Packages(sorted))\n\t\tpkg = sorted[len(sorted)-1]\n\t} else {\n\t\t\/\/ trying to match the requirements\n\t\trel := 0\n\t\tif release != \"\" {\n\t\t\trel, err = strconv.Atoi(release)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\treq := NewRequires(name, version, rel, 0, \"EQ\", \"\")\n\t\tsorted := make(Packages, 0, len(pkgs))\n\t\tfor _, p := range pkgs {\n\t\t\tif req.ProvideMatches(p) {\n\t\t\t\tsorted = append(sorted, p)\n\t\t\t}\n\t\t}\n\t\tif len(sorted) > 0 {\n\t\t\tsort.Sort(sorted)\n\t\t\tpkg = sorted[len(sorted)-1]\n\t\t}\n\t}\n\n\treturn pkg, err\n}\n\n\/\/ FindLatestMatchingRequire locates a package providing a given functionality.\nfunc (repo *RepositoryXMLBackend) FindLatestMatchingRequire(requirement *Requires) (*Package, error) {\n\tvar pkg *Package\n\tvar err error\n\n\tpkgs, ok := repo.Provides[requirement.Name()]\n\tif !ok {\n\t\trepo.msg.Debugf(\"could not find package providing %s-%s\\n\", requirement.Name(), requirement.Version())\n\t\treturn nil, fmt.Errorf(\"no package providing %s-%s\", requirement.Name(), requirement.Version())\n\t}\n\n\tif requirement.Version() == \"\" && len(pkgs) > 0 {\n\t\t\/\/ return latest\n\t\tsorted := make([]RPM, 0, len(pkgs))\n\t\tfor _, p := range pkgs {\n\t\t\tsorted = append(sorted, p)\n\t\t}\n\t\tsort.Sort(RPMSlice(sorted))\n\t\tpkg = sorted[len(sorted)-1].(*Provides).Package\n\t} else {\n\t\t\/\/ trying to match the requirements\n\t\tsorted := make(RPMSlice, 0, len(pkgs))\n\t\tfor _, p := range pkgs {\n\t\t\tif requirement.ProvideMatches(p) {\n\t\t\t\tsorted = append(sorted, p)\n\t\t\t}\n\t\t}\n\t\tif len(sorted) > 0 {\n\t\t\tsort.Sort(sorted)\n\t\t\tpkg = sorted[len(sorted)-1].(*Provides).Package\n\t\t}\n\t}\n\n\treturn pkg, err\n}\n\n\/\/ GetPackages returns all the packages known by a YUM repository\nfunc (repo *RepositoryXMLBackend) GetPackages() []*Package {\n\tpkgs := make([]*Package, 0, len(repo.Packages))\n\tfor _, pkg := range repo.Packages {\n\t\tpkgs = append(pkgs, pkg...)\n\t}\n\treturn pkgs\n}\n\nfunc init() {\n\tg_backends[\"RepositoryXMLBackend\"] = NewRepositoryXMLBackend\n}\n<|endoftext|>"} {"text":"<commit_before>package objectstore\n\nimport (\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\t\"fmt\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/rancher\/convoy\/convoydriver\"\n\t\"github.com\/rancher\/convoy\/metadata\"\n\t\"github.com\/rancher\/convoy\/util\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t. \"github.com\/rancher\/convoy\/logging\"\n)\n\ntype BlockMapping struct {\n\tOffset int64\n\tBlockChecksum string\n}\n\ntype DeltaBlockBackupOperations interface {\n\tGetVolumeDevice(id string) (string, error)\n\tHasSnapshot(id, volumeID string) bool\n\tCompareSnapshot(id, compareID, volumeID string) (*metadata.Mappings, error)\n\tOpenSnapshot(id, volumeID string) error\n\tReadSnapshot(id, volumeID string, start int64, data []byte) error\n\tCloseSnapshot(id, volumeID string) error\n}\n\nconst (\n\tDEFAULT_BLOCK_SIZE = 2097152\n\n\tBLOCKS_DIRECTORY = \"blocks\"\n\tBLOCK_SEPARATE_LAYER1 = 2\n\tBLOCK_SEPARATE_LAYER2 = 4\n)\n\nfunc CreateDeltaBlockBackup(volume *Volume, snapshot *Snapshot, destURL string, sDriver convoydriver.ConvoyDriver) (string, error) {\n\tdeltaOps, ok := sDriver.(DeltaBlockBackupOperations)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"Driver %s doesn't implemented DeltaBlockBackupOperations interface\", sDriver.Name())\n\t}\n\n\tbsDriver, err := GetObjectStoreDriver(destURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := addVolume(volume, bsDriver); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Update volume from objectstore\n\tvolume, err = loadVolume(volume.UUID, bsDriver)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlastBackupUUID := volume.LastBackupUUID\n\n\tvar lastSnapshotUUID string\n\tvar lastBackup *Backup\n\tif lastBackupUUID != \"\" {\n\t\tlastBackup, err = loadBackup(lastBackupUUID, volume.UUID, bsDriver)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tlastSnapshotUUID = lastBackup.SnapshotUUID\n\t\tif lastSnapshotUUID == snapshot.UUID {\n\t\t\t\/\/Generate full snapshot if the snapshot has been backed up last time\n\t\t\tlastSnapshotUUID = \"\"\n\t\t\tlog.Debug(\"Would create full snapshot metadata\")\n\t\t} else if !deltaOps.HasSnapshot(lastSnapshotUUID, volume.UUID) {\n\t\t\t\/\/ It's possible that the snapshot in objectstore doesn't exist\n\t\t\t\/\/ in local storage\n\t\t\tlastSnapshotUUID = \"\"\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\tLOG_FIELD_REASON: LOG_REASON_FALLBACK,\n\t\t\t\tLOG_FIELD_OBJECT: LOG_OBJECT_SNAPSHOT,\n\t\t\t\tLOG_FIELD_SNAPSHOT: lastSnapshotUUID,\n\t\t\t\tLOG_FIELD_VOLUME: volume.UUID,\n\t\t\t}).Debug(\"Cannot find last snapshot in local storage, would process with full backup\")\n\t\t}\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\tLOG_FIELD_REASON: LOG_REASON_START,\n\t\tLOG_FIELD_OBJECT: LOG_OBJECT_SNAPSHOT,\n\t\tLOG_FIELD_EVENT: LOG_EVENT_COMPARE,\n\t\tLOG_FIELD_SNAPSHOT: snapshot.UUID,\n\t\tLOG_FIELD_LAST_SNAPSHOT: lastSnapshotUUID,\n\t}).Debug(\"Generating snapshot changed blocks metadata\")\n\tdelta, err := deltaOps.CompareSnapshot(snapshot.UUID, lastSnapshotUUID, volume.UUID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif delta.BlockSize != DEFAULT_BLOCK_SIZE {\n\t\treturn \"\", fmt.Errorf(\"Currently doesn't support different block sizes driver other than %v\", DEFAULT_BLOCK_SIZE)\n\t}\n\tlog.WithFields(logrus.Fields{\n\t\tLOG_FIELD_REASON: LOG_REASON_COMPLETE,\n\t\tLOG_FIELD_OBJECT: LOG_OBJECT_SNAPSHOT,\n\t\tLOG_FIELD_EVENT: LOG_EVENT_COMPARE,\n\t\tLOG_FIELD_SNAPSHOT: snapshot.UUID,\n\t\tLOG_FIELD_LAST_SNAPSHOT: lastSnapshotUUID,\n\t}).Debug(\"Generated snapshot changed blocks metadata\")\n\n\tlog.WithFields(logrus.Fields{\n\t\tLOG_FIELD_REASON: LOG_REASON_START,\n\t\tLOG_FIELD_EVENT: LOG_EVENT_BACKUP,\n\t\tLOG_FIELD_OBJECT: LOG_OBJECT_SNAPSHOT,\n\t\tLOG_FIELD_SNAPSHOT: snapshot.UUID,\n\t}).Debug(\"Creating backup\")\n\n\tdeltaBackup := &Backup{\n\t\tUUID: uuid.New(),\n\t\tVolumeUUID: volume.UUID,\n\t\tSnapshotUUID: snapshot.UUID,\n\t\tBlocks: []BlockMapping{},\n\t}\n\tif err := deltaOps.OpenSnapshot(snapshot.UUID, volume.UUID); err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer deltaOps.CloseSnapshot(snapshot.UUID, volume.UUID)\n\tfor _, d := range delta.Mappings {\n\t\tblock := make([]byte, DEFAULT_BLOCK_SIZE)\n\t\tfor i := int64(0); i < d.Size\/delta.BlockSize; i++ {\n\t\t\toffset := d.Offset + i*delta.BlockSize\n\t\t\terr := deltaOps.ReadSnapshot(snapshot.UUID, volume.UUID, offset, block)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tchecksum := util.GetChecksum(block)\n\t\t\tblkFile := getBlockFilePath(volume.UUID, checksum)\n\t\t\tif bsDriver.FileSize(blkFile) >= 0 {\n\t\t\t\tblockMapping := BlockMapping{\n\t\t\t\t\tOffset: offset,\n\t\t\t\t\tBlockChecksum: checksum,\n\t\t\t\t}\n\t\t\t\tdeltaBackup.Blocks = append(deltaBackup.Blocks, blockMapping)\n\t\t\t\tlog.Debugf(\"Found existed block match at %v\", blkFile)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\trs, err := util.CompressData(block)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tif err := bsDriver.Write(blkFile, rs); err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tlog.Debugf(\"Created new block file at %v\", blkFile)\n\n\t\t\tblockMapping := BlockMapping{\n\t\t\t\tOffset: offset,\n\t\t\t\tBlockChecksum: checksum,\n\t\t\t}\n\t\t\tdeltaBackup.Blocks = append(deltaBackup.Blocks, blockMapping)\n\t\t}\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\tLOG_FIELD_REASON: LOG_REASON_COMPLETE,\n\t\tLOG_FIELD_EVENT: LOG_EVENT_BACKUP,\n\t\tLOG_FIELD_OBJECT: LOG_OBJECT_SNAPSHOT,\n\t\tLOG_FIELD_SNAPSHOT: snapshot.UUID,\n\t}).Debug(\"Created snapshot changed blocks\")\n\n\tbackup := mergeSnapshotMap(deltaBackup, lastBackup)\n\tbackup.SnapshotName = snapshot.Name\n\tbackup.SnapshotCreatedAt = snapshot.CreatedTime\n\tbackup.CreatedTime = util.Now()\n\n\tif err := saveBackup(backup, bsDriver); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvolume.LastBackupUUID = backup.UUID\n\tif err := saveVolume(volume, bsDriver); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn encodeBackupURL(backup.UUID, volume.UUID, destURL), nil\n}\n\nfunc mergeSnapshotMap(deltaBackup, lastBackup *Backup) *Backup {\n\tif lastBackup == nil {\n\t\treturn deltaBackup\n\t}\n\tbackup := &Backup{\n\t\tUUID: deltaBackup.UUID,\n\t\tVolumeUUID: deltaBackup.VolumeUUID,\n\t\tSnapshotUUID: deltaBackup.SnapshotUUID,\n\t\tBlocks: []BlockMapping{},\n\t}\n\tvar d, l int\n\tfor d, l = 0, 0; d < len(deltaBackup.Blocks) && l < len(lastBackup.Blocks); {\n\t\tdB := deltaBackup.Blocks[d]\n\t\tlB := lastBackup.Blocks[l]\n\t\tif dB.Offset == lB.Offset {\n\t\t\tbackup.Blocks = append(backup.Blocks, dB)\n\t\t\td++\n\t\t\tl++\n\t\t} else if dB.Offset < lB.Offset {\n\t\t\tbackup.Blocks = append(backup.Blocks, dB)\n\t\t\td++\n\t\t} else {\n\t\t\t\/\/dB.Offset > lB.offset\n\t\t\tbackup.Blocks = append(backup.Blocks, lB)\n\t\t\tl++\n\t\t}\n\t}\n\n\tif d == len(deltaBackup.Blocks) {\n\t\tbackup.Blocks = append(backup.Blocks, lastBackup.Blocks[l:]...)\n\t} else {\n\t\tbackup.Blocks = append(backup.Blocks, deltaBackup.Blocks[d:]...)\n\t}\n\n\treturn backup\n}\n\nfunc RestoreDeltaBlockBackup(backupURL, volDevName string) error {\n\tbsDriver, err := GetObjectStoreDriver(backupURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrcBackupUUID, srcVolumeUUID, err := decodeBackupURL(backupURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := loadVolume(srcVolumeUUID, bsDriver); err != nil {\n\t\treturn generateError(logrus.Fields{\n\t\t\tLOG_FIELD_VOLUME: srcVolumeUUID,\n\t\t\tLOG_FIELD_BACKUP_URL: backupURL,\n\t\t}, \"Volume doesn't exist in objectstore: %v\", err)\n\t}\n\n\tvolDev, err := os.Create(volDevName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer volDev.Close()\n\n\tbackup, err := loadBackup(srcBackupUUID, srcVolumeUUID, bsDriver)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\tLOG_FIELD_REASON: LOG_REASON_START,\n\t\tLOG_FIELD_EVENT: LOG_EVENT_RESTORE,\n\t\tLOG_FIELD_OBJECT: LOG_FIELD_SNAPSHOT,\n\t\tLOG_FIELD_SNAPSHOT: srcBackupUUID,\n\t\tLOG_FIELD_ORIN_VOLUME: srcVolumeUUID,\n\t\tLOG_FIELD_VOLUME_DEV: volDevName,\n\t\tLOG_FIELD_BACKUP_URL: backupURL,\n\t}).Debug()\n\tblkCounts := len(backup.Blocks)\n\tfor i, block := range backup.Blocks {\n\t\tlog.Debugf(\"Restore for %v: block %v, %v\/%v\", volDevName, block.BlockChecksum, i+1, blkCounts)\n\t\tblkFile := getBlockFilePath(srcVolumeUUID, block.BlockChecksum)\n\t\trc, err := bsDriver.Read(blkFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tr, err := util.DecompressAndVerify(rc, block.BlockChecksum)\n\t\trc.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := volDev.Seek(block.Offset, 0); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := io.CopyN(volDev, r, DEFAULT_BLOCK_SIZE); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc DeleteDeltaBlockBackup(backupURL string) error {\n\tbsDriver, err := GetObjectStoreDriver(backupURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbackupUUID, volumeUUID, err := decodeBackupURL(backupURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tv, err := loadVolume(volumeUUID, bsDriver)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Cannot find volume %v in objectstore\", volumeUUID, err)\n\t}\n\n\tbackup, err := loadBackup(backupUUID, volumeUUID, bsDriver)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdiscardBlockSet := make(map[string]bool)\n\tfor _, blk := range backup.Blocks {\n\t\tdiscardBlockSet[blk.BlockChecksum] = true\n\t}\n\tdiscardBlockCounts := len(discardBlockSet)\n\n\tif err := removeBackup(backup, bsDriver); err != nil {\n\t\treturn err\n\t}\n\n\tif backup.UUID == v.LastBackupUUID {\n\t\tv.LastBackupUUID = \"\"\n\t\tif err := saveVolume(v, bsDriver); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tbackupUUIDs, err := getBackupUUIDsForVolume(volumeUUID, bsDriver)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(backupUUIDs) == 0 {\n\t\tlog.Debugf(\"No snapshot existed for the volume %v, removing volume\", volumeUUID)\n\t\tif err := removeVolume(volumeUUID, bsDriver); err != nil {\n\t\t\tlog.Warningf(\"Failed to remove volume %v due to: %v\", volumeUUID, err.Error())\n\t\t}\n\t\treturn nil\n\t}\n\n\tlog.Debug(\"GC started\")\n\tfor _, backupUUID := range backupUUIDs {\n\t\tbackup, err := loadBackup(backupUUID, volumeUUID, bsDriver)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, blk := range backup.Blocks {\n\t\t\tif _, exists := discardBlockSet[blk.BlockChecksum]; exists {\n\t\t\t\tdelete(discardBlockSet, blk.BlockChecksum)\n\t\t\t\tdiscardBlockCounts--\n\t\t\t\tif discardBlockCounts == 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif discardBlockCounts == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tvar blkFileList []string\n\tfor blk := range discardBlockSet {\n\t\tblkFileList = append(blkFileList, getBlockFilePath(volumeUUID, blk))\n\t\tlog.Debugf(\"Found unused blocks %v for volume %v\", blk, volumeUUID)\n\t}\n\tif err := bsDriver.Remove(blkFileList...); err != nil {\n\t\treturn err\n\t}\n\tlog.Debug(\"Removed unused blocks for volume \", volumeUUID)\n\n\tlog.Debug(\"GC completed\")\n\tlog.Debug(\"Removed objectstore backup \", backupUUID)\n\n\treturn nil\n}\n\nfunc getBlockPath(volumeUUID string) string {\n\treturn filepath.Join(getVolumePath(volumeUUID), BLOCKS_DIRECTORY) + \"\/\"\n}\n\nfunc getBlockFilePath(volumeUUID, checksum string) string {\n\tblockSubDirLayer1 := checksum[0:BLOCK_SEPARATE_LAYER1]\n\tblockSubDirLayer2 := checksum[BLOCK_SEPARATE_LAYER1:BLOCK_SEPARATE_LAYER2]\n\tpath := filepath.Join(getBlockPath(volumeUUID), blockSubDirLayer1, blockSubDirLayer2)\n\tfileName := checksum + \".blk\"\n\n\treturn filepath.Join(path, fileName)\n}\n<commit_msg>backup: Add progress info in log for backup<commit_after>package objectstore\n\nimport (\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\t\"fmt\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/rancher\/convoy\/convoydriver\"\n\t\"github.com\/rancher\/convoy\/metadata\"\n\t\"github.com\/rancher\/convoy\/util\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t. \"github.com\/rancher\/convoy\/logging\"\n)\n\ntype BlockMapping struct {\n\tOffset int64\n\tBlockChecksum string\n}\n\ntype DeltaBlockBackupOperations interface {\n\tGetVolumeDevice(id string) (string, error)\n\tHasSnapshot(id, volumeID string) bool\n\tCompareSnapshot(id, compareID, volumeID string) (*metadata.Mappings, error)\n\tOpenSnapshot(id, volumeID string) error\n\tReadSnapshot(id, volumeID string, start int64, data []byte) error\n\tCloseSnapshot(id, volumeID string) error\n}\n\nconst (\n\tDEFAULT_BLOCK_SIZE = 2097152\n\n\tBLOCKS_DIRECTORY = \"blocks\"\n\tBLOCK_SEPARATE_LAYER1 = 2\n\tBLOCK_SEPARATE_LAYER2 = 4\n)\n\nfunc CreateDeltaBlockBackup(volume *Volume, snapshot *Snapshot, destURL string, sDriver convoydriver.ConvoyDriver) (string, error) {\n\tdeltaOps, ok := sDriver.(DeltaBlockBackupOperations)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"Driver %s doesn't implemented DeltaBlockBackupOperations interface\", sDriver.Name())\n\t}\n\n\tbsDriver, err := GetObjectStoreDriver(destURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := addVolume(volume, bsDriver); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Update volume from objectstore\n\tvolume, err = loadVolume(volume.UUID, bsDriver)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlastBackupUUID := volume.LastBackupUUID\n\n\tvar lastSnapshotUUID string\n\tvar lastBackup *Backup\n\tif lastBackupUUID != \"\" {\n\t\tlastBackup, err = loadBackup(lastBackupUUID, volume.UUID, bsDriver)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tlastSnapshotUUID = lastBackup.SnapshotUUID\n\t\tif lastSnapshotUUID == snapshot.UUID {\n\t\t\t\/\/Generate full snapshot if the snapshot has been backed up last time\n\t\t\tlastSnapshotUUID = \"\"\n\t\t\tlog.Debug(\"Would create full snapshot metadata\")\n\t\t} else if !deltaOps.HasSnapshot(lastSnapshotUUID, volume.UUID) {\n\t\t\t\/\/ It's possible that the snapshot in objectstore doesn't exist\n\t\t\t\/\/ in local storage\n\t\t\tlastSnapshotUUID = \"\"\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\tLOG_FIELD_REASON: LOG_REASON_FALLBACK,\n\t\t\t\tLOG_FIELD_OBJECT: LOG_OBJECT_SNAPSHOT,\n\t\t\t\tLOG_FIELD_SNAPSHOT: lastSnapshotUUID,\n\t\t\t\tLOG_FIELD_VOLUME: volume.UUID,\n\t\t\t}).Debug(\"Cannot find last snapshot in local storage, would process with full backup\")\n\t\t}\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\tLOG_FIELD_REASON: LOG_REASON_START,\n\t\tLOG_FIELD_OBJECT: LOG_OBJECT_SNAPSHOT,\n\t\tLOG_FIELD_EVENT: LOG_EVENT_COMPARE,\n\t\tLOG_FIELD_SNAPSHOT: snapshot.UUID,\n\t\tLOG_FIELD_LAST_SNAPSHOT: lastSnapshotUUID,\n\t}).Debug(\"Generating snapshot changed blocks metadata\")\n\tdelta, err := deltaOps.CompareSnapshot(snapshot.UUID, lastSnapshotUUID, volume.UUID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif delta.BlockSize != DEFAULT_BLOCK_SIZE {\n\t\treturn \"\", fmt.Errorf(\"Currently doesn't support different block sizes driver other than %v\", DEFAULT_BLOCK_SIZE)\n\t}\n\tlog.WithFields(logrus.Fields{\n\t\tLOG_FIELD_REASON: LOG_REASON_COMPLETE,\n\t\tLOG_FIELD_OBJECT: LOG_OBJECT_SNAPSHOT,\n\t\tLOG_FIELD_EVENT: LOG_EVENT_COMPARE,\n\t\tLOG_FIELD_SNAPSHOT: snapshot.UUID,\n\t\tLOG_FIELD_LAST_SNAPSHOT: lastSnapshotUUID,\n\t}).Debug(\"Generated snapshot changed blocks metadata\")\n\n\tlog.WithFields(logrus.Fields{\n\t\tLOG_FIELD_REASON: LOG_REASON_START,\n\t\tLOG_FIELD_EVENT: LOG_EVENT_BACKUP,\n\t\tLOG_FIELD_OBJECT: LOG_OBJECT_SNAPSHOT,\n\t\tLOG_FIELD_SNAPSHOT: snapshot.UUID,\n\t}).Debug(\"Creating backup\")\n\n\tdeltaBackup := &Backup{\n\t\tUUID: uuid.New(),\n\t\tVolumeUUID: volume.UUID,\n\t\tSnapshotUUID: snapshot.UUID,\n\t\tBlocks: []BlockMapping{},\n\t}\n\tif err := deltaOps.OpenSnapshot(snapshot.UUID, volume.UUID); err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer deltaOps.CloseSnapshot(snapshot.UUID, volume.UUID)\n\tmCounts := len(delta.Mappings)\n\tfor m, d := range delta.Mappings {\n\t\tblock := make([]byte, DEFAULT_BLOCK_SIZE)\n\t\tblkCounts := d.Size \/ delta.BlockSize\n\t\tfor i := int64(0); i < blkCounts; i++ {\n\t\t\toffset := d.Offset + i*delta.BlockSize\n\t\t\tlog.Debugf(\"Backup for %v: segment %v\/%v, blocks %v\/%v\", snapshot.UUID, m+1, mCounts, i+1, blkCounts)\n\t\t\terr := deltaOps.ReadSnapshot(snapshot.UUID, volume.UUID, offset, block)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tchecksum := util.GetChecksum(block)\n\t\t\tblkFile := getBlockFilePath(volume.UUID, checksum)\n\t\t\tif bsDriver.FileSize(blkFile) >= 0 {\n\t\t\t\tblockMapping := BlockMapping{\n\t\t\t\t\tOffset: offset,\n\t\t\t\t\tBlockChecksum: checksum,\n\t\t\t\t}\n\t\t\t\tdeltaBackup.Blocks = append(deltaBackup.Blocks, blockMapping)\n\t\t\t\tlog.Debugf(\"Found existed block match at %v\", blkFile)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\trs, err := util.CompressData(block)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tif err := bsDriver.Write(blkFile, rs); err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tlog.Debugf(\"Created new block file at %v\", blkFile)\n\n\t\t\tblockMapping := BlockMapping{\n\t\t\t\tOffset: offset,\n\t\t\t\tBlockChecksum: checksum,\n\t\t\t}\n\t\t\tdeltaBackup.Blocks = append(deltaBackup.Blocks, blockMapping)\n\t\t}\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\tLOG_FIELD_REASON: LOG_REASON_COMPLETE,\n\t\tLOG_FIELD_EVENT: LOG_EVENT_BACKUP,\n\t\tLOG_FIELD_OBJECT: LOG_OBJECT_SNAPSHOT,\n\t\tLOG_FIELD_SNAPSHOT: snapshot.UUID,\n\t}).Debug(\"Created snapshot changed blocks\")\n\n\tbackup := mergeSnapshotMap(deltaBackup, lastBackup)\n\tbackup.SnapshotName = snapshot.Name\n\tbackup.SnapshotCreatedAt = snapshot.CreatedTime\n\tbackup.CreatedTime = util.Now()\n\n\tif err := saveBackup(backup, bsDriver); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvolume.LastBackupUUID = backup.UUID\n\tif err := saveVolume(volume, bsDriver); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn encodeBackupURL(backup.UUID, volume.UUID, destURL), nil\n}\n\nfunc mergeSnapshotMap(deltaBackup, lastBackup *Backup) *Backup {\n\tif lastBackup == nil {\n\t\treturn deltaBackup\n\t}\n\tbackup := &Backup{\n\t\tUUID: deltaBackup.UUID,\n\t\tVolumeUUID: deltaBackup.VolumeUUID,\n\t\tSnapshotUUID: deltaBackup.SnapshotUUID,\n\t\tBlocks: []BlockMapping{},\n\t}\n\tvar d, l int\n\tfor d, l = 0, 0; d < len(deltaBackup.Blocks) && l < len(lastBackup.Blocks); {\n\t\tdB := deltaBackup.Blocks[d]\n\t\tlB := lastBackup.Blocks[l]\n\t\tif dB.Offset == lB.Offset {\n\t\t\tbackup.Blocks = append(backup.Blocks, dB)\n\t\t\td++\n\t\t\tl++\n\t\t} else if dB.Offset < lB.Offset {\n\t\t\tbackup.Blocks = append(backup.Blocks, dB)\n\t\t\td++\n\t\t} else {\n\t\t\t\/\/dB.Offset > lB.offset\n\t\t\tbackup.Blocks = append(backup.Blocks, lB)\n\t\t\tl++\n\t\t}\n\t}\n\n\tif d == len(deltaBackup.Blocks) {\n\t\tbackup.Blocks = append(backup.Blocks, lastBackup.Blocks[l:]...)\n\t} else {\n\t\tbackup.Blocks = append(backup.Blocks, deltaBackup.Blocks[d:]...)\n\t}\n\n\treturn backup\n}\n\nfunc RestoreDeltaBlockBackup(backupURL, volDevName string) error {\n\tbsDriver, err := GetObjectStoreDriver(backupURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrcBackupUUID, srcVolumeUUID, err := decodeBackupURL(backupURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := loadVolume(srcVolumeUUID, bsDriver); err != nil {\n\t\treturn generateError(logrus.Fields{\n\t\t\tLOG_FIELD_VOLUME: srcVolumeUUID,\n\t\t\tLOG_FIELD_BACKUP_URL: backupURL,\n\t\t}, \"Volume doesn't exist in objectstore: %v\", err)\n\t}\n\n\tvolDev, err := os.Create(volDevName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer volDev.Close()\n\n\tbackup, err := loadBackup(srcBackupUUID, srcVolumeUUID, bsDriver)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\tLOG_FIELD_REASON: LOG_REASON_START,\n\t\tLOG_FIELD_EVENT: LOG_EVENT_RESTORE,\n\t\tLOG_FIELD_OBJECT: LOG_FIELD_SNAPSHOT,\n\t\tLOG_FIELD_SNAPSHOT: srcBackupUUID,\n\t\tLOG_FIELD_ORIN_VOLUME: srcVolumeUUID,\n\t\tLOG_FIELD_VOLUME_DEV: volDevName,\n\t\tLOG_FIELD_BACKUP_URL: backupURL,\n\t}).Debug()\n\tblkCounts := len(backup.Blocks)\n\tfor i, block := range backup.Blocks {\n\t\tlog.Debugf(\"Restore for %v: block %v, %v\/%v\", volDevName, block.BlockChecksum, i+1, blkCounts)\n\t\tblkFile := getBlockFilePath(srcVolumeUUID, block.BlockChecksum)\n\t\trc, err := bsDriver.Read(blkFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tr, err := util.DecompressAndVerify(rc, block.BlockChecksum)\n\t\trc.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := volDev.Seek(block.Offset, 0); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := io.CopyN(volDev, r, DEFAULT_BLOCK_SIZE); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc DeleteDeltaBlockBackup(backupURL string) error {\n\tbsDriver, err := GetObjectStoreDriver(backupURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbackupUUID, volumeUUID, err := decodeBackupURL(backupURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tv, err := loadVolume(volumeUUID, bsDriver)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Cannot find volume %v in objectstore\", volumeUUID, err)\n\t}\n\n\tbackup, err := loadBackup(backupUUID, volumeUUID, bsDriver)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdiscardBlockSet := make(map[string]bool)\n\tfor _, blk := range backup.Blocks {\n\t\tdiscardBlockSet[blk.BlockChecksum] = true\n\t}\n\tdiscardBlockCounts := len(discardBlockSet)\n\n\tif err := removeBackup(backup, bsDriver); err != nil {\n\t\treturn err\n\t}\n\n\tif backup.UUID == v.LastBackupUUID {\n\t\tv.LastBackupUUID = \"\"\n\t\tif err := saveVolume(v, bsDriver); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tbackupUUIDs, err := getBackupUUIDsForVolume(volumeUUID, bsDriver)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(backupUUIDs) == 0 {\n\t\tlog.Debugf(\"No snapshot existed for the volume %v, removing volume\", volumeUUID)\n\t\tif err := removeVolume(volumeUUID, bsDriver); err != nil {\n\t\t\tlog.Warningf(\"Failed to remove volume %v due to: %v\", volumeUUID, err.Error())\n\t\t}\n\t\treturn nil\n\t}\n\n\tlog.Debug(\"GC started\")\n\tfor _, backupUUID := range backupUUIDs {\n\t\tbackup, err := loadBackup(backupUUID, volumeUUID, bsDriver)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, blk := range backup.Blocks {\n\t\t\tif _, exists := discardBlockSet[blk.BlockChecksum]; exists {\n\t\t\t\tdelete(discardBlockSet, blk.BlockChecksum)\n\t\t\t\tdiscardBlockCounts--\n\t\t\t\tif discardBlockCounts == 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif discardBlockCounts == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tvar blkFileList []string\n\tfor blk := range discardBlockSet {\n\t\tblkFileList = append(blkFileList, getBlockFilePath(volumeUUID, blk))\n\t\tlog.Debugf(\"Found unused blocks %v for volume %v\", blk, volumeUUID)\n\t}\n\tif err := bsDriver.Remove(blkFileList...); err != nil {\n\t\treturn err\n\t}\n\tlog.Debug(\"Removed unused blocks for volume \", volumeUUID)\n\n\tlog.Debug(\"GC completed\")\n\tlog.Debug(\"Removed objectstore backup \", backupUUID)\n\n\treturn nil\n}\n\nfunc getBlockPath(volumeUUID string) string {\n\treturn filepath.Join(getVolumePath(volumeUUID), BLOCKS_DIRECTORY) + \"\/\"\n}\n\nfunc getBlockFilePath(volumeUUID, checksum string) string {\n\tblockSubDirLayer1 := checksum[0:BLOCK_SEPARATE_LAYER1]\n\tblockSubDirLayer2 := checksum[BLOCK_SEPARATE_LAYER1:BLOCK_SEPARATE_LAYER2]\n\tpath := filepath.Join(getBlockPath(volumeUUID), blockSubDirLayer1, blockSubDirLayer2)\n\tfileName := checksum + \".blk\"\n\n\treturn filepath.Join(path, fileName)\n}\n<|endoftext|>"} {"text":"<commit_before>package docindexing\n\nimport (\n\t\"log\"\n\t\"runtime\"\n\t\"sync\"\n)\n\n\/\/ Highest level inverted index structure\ntype InvertedIndex struct {\n\tTerms map[string]*TermEntry\n\tTermCount int64\n\tIndexLock *sync.Mutex\n}\n\n\/\/ Stores terms and their respective document entries\ntype TermEntry struct {\n\tFrequency int\n\tDocuments []*DocumentEntry\n\tEntryLock *sync.Mutex\n}\n\n\/\/ Stores document metadata as well as additional term information\ntype DocumentEntry struct {\n\tPath string\n\tID int64\n\tFrequency int\n\tPositions []int\n}\n\n\/\/ Constructs new inverted index\nfunc NewIndex() *InvertedIndex {\n\treturn &InvertedIndex{Terms: make(map[string]*TermEntry), TermCount: int64(0), IndexLock: &sync.Mutex{}}\n}\n\n\/\/ Adds new term entry to the index\nfunc (ind *InvertedIndex) AddTerm(term string, verbose bool) {\n\tind.IndexLock.Lock()\n\t\/\/ Make sure we aren't overwriting existing term\n\tif _, found := ind.Terms[term]; !found {\n\t\tif verbose {\n\t\t\tlog.Printf(\"Adding new term %s to index\", term)\n\t\t}\n\t\tind.Terms[term] = &TermEntry{Frequency: 0, Documents: make([]*DocumentEntry, 0), EntryLock: &sync.Mutex{}}\n\t\tind.TermCount++\n\t}\n\tind.IndexLock.Unlock()\n\truntime.Gosched()\n}\n\n\/\/ Adds new document entry to given term entry\nfunc (ind *InvertedIndex) AddDocument(term string, document *DocumentEntry, verbose bool) {\n\t\/\/ Make sure term is in index\n\tind.AddTerm(term, verbose)\n\tif verbose {\n\t\tlog.Printf(\"Adding new document %s to term %s's document list\", document.Path, term)\n\t}\n\t\/\/ Get index and term locks\n\tind.IndexLock.Lock();\n\tind.Terms[term].EntryLock.Lock()\n\t\n\t\/\/ Safely update values\n\tind.Terms[term].Frequency += document.Frequency\n\tind.Terms[term].Documents = append(ind.Terms[term].Documents, document)\n\t\n\t\/\/ Release locks\n\tind.Terms[term].EntryLock.Unlock()\n\tind.IndexLock.Unlock()\n\truntime.Gosched()\n}\n<commit_msg>Removed superfluous entry lock<commit_after>package docindexing\n\nimport (\n\t\"log\"\n\t\"runtime\"\n\t\"sync\"\n)\n\n\/\/ Highest level inverted index structure\ntype InvertedIndex struct {\n\tTerms map[string]*TermEntry\n\tTermCount int64\n\tIndexLock *sync.Mutex\n}\n\n\/\/ Stores terms and their respective document entries\ntype TermEntry struct {\n\tFrequency int\n\tDocuments []*DocumentEntry\n}\n\n\/\/ Stores document metadata as well as additional term information\ntype DocumentEntry struct {\n\tPath string\n\tID int64\n\tFrequency int\n\tPositions []int\n}\n\n\/\/ Constructs new inverted index\nfunc NewIndex() *InvertedIndex {\n\treturn &InvertedIndex{Terms: make(map[string]*TermEntry), TermCount: int64(0), IndexLock: &sync.Mutex{}}\n}\n\n\/\/ Adds new term entry to the index\nfunc (ind *InvertedIndex) AddTerm(term string, verbose bool) {\n\tind.IndexLock.Lock()\n\t\/\/ Make sure we aren't overwriting existing term\n\tif _, found := ind.Terms[term]; !found {\n\t\tif verbose {\n\t\t\tlog.Printf(\"Adding new term %s to index\", term)\n\t\t}\n\t\tind.Terms[term] = &TermEntry{Frequency: 0, Documents: make([]*DocumentEntry, 0)}\n\t\tind.TermCount++\n\t}\n\tind.IndexLock.Unlock()\n\truntime.Gosched()\n}\n\n\/\/ Adds new document entry to given term entry\nfunc (ind *InvertedIndex) AddDocument(term string, document *DocumentEntry, verbose bool) {\n\t\/\/ Make sure term is in index\n\tind.AddTerm(term, verbose)\n\tif verbose {\n\t\tlog.Printf(\"Adding new document %s to term %s's document list\", document.Path, term)\n\t}\n\t\/\/ Get index lock\n\tind.IndexLock.Lock()\n\t\n\t\/\/ Safely update values\n\tind.Terms[term].Frequency += document.Frequency\n\tind.Terms[term].Documents = append(ind.Terms[term].Documents, document)\n\t\n\t\/\/ Release lock\n\tind.IndexLock.Unlock()\n\truntime.Gosched()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012, Google Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage tabletserver\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/youtube\/vitess\/go\/pools\"\n\t\"github.com\/youtube\/vitess\/go\/sqldb\"\n\t\"github.com\/youtube\/vitess\/go\/sqltypes\"\n\t\"github.com\/youtube\/vitess\/go\/stats\"\n\t\"github.com\/youtube\/vitess\/go\/streamlog\"\n\t\"github.com\/youtube\/vitess\/go\/sync2\"\n\t\"github.com\/youtube\/vitess\/go\/timer\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/callerid\"\n\tquerypb \"github.com\/youtube\/vitess\/go\/vt\/proto\/query\"\n\tvtrpcpb \"github.com\/youtube\/vitess\/go\/vt\/proto\/vtrpc\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ TxLogger can be used to enable logging of transactions.\n\/\/ Call TxLogger.ServeLogs in your main program to enable logging.\n\/\/ The log format can be inferred by looking at TxConnection.Format.\nvar TxLogger = streamlog.New(\"TxLog\", 10)\n\n\/\/ These consts identify how a transaction was resolved.\nconst (\n\tTxClose = \"close\"\n\tTxCommit = \"commit\"\n\tTxRollback = \"rollback\"\n\tTxPrepare = \"prepare\"\n\tTxKill = \"kill\"\n)\n\nconst txLogInterval = time.Duration(1 * time.Minute)\n\n\/\/ TxPool is the transaction pool for the query service.\ntype TxPool struct {\n\tpool *ConnPool\n\tactivePool *pools.Numbered\n\tlastID sync2.AtomicInt64\n\ttimeout sync2.AtomicDuration\n\tticks *timer.Timer\n\ttxStats *stats.Timings\n\tqueryServiceStats *QueryServiceStats\n\tchecker MySQLChecker\n\t\/\/ Tracking culprits that cause tx pool full errors.\n\tlogMu sync.Mutex\n\tlastLog time.Time\n}\n\n\/\/ NewTxPool creates a new TxPool. It's not operational until it's Open'd.\nfunc NewTxPool(\n\tname string,\n\ttxStatsPrefix string,\n\tcapacity int,\n\ttimeout time.Duration,\n\tidleTimeout time.Duration,\n\tenablePublishStats bool,\n\tqStats *QueryServiceStats,\n\tchecker MySQLChecker) *TxPool {\n\n\ttxStatsName := \"\"\n\tif enablePublishStats {\n\t\ttxStatsName = txStatsPrefix + \"Transactions\"\n\t}\n\n\taxp := &TxPool{\n\t\tpool: NewConnPool(name, capacity, idleTimeout, enablePublishStats, qStats, checker),\n\t\tactivePool: pools.NewNumbered(),\n\t\tlastID: sync2.NewAtomicInt64(time.Now().UnixNano()),\n\t\ttimeout: sync2.NewAtomicDuration(timeout),\n\t\tticks: timer.NewTimer(timeout \/ 10),\n\t\ttxStats: stats.NewTimings(txStatsName),\n\t\tchecker: checker,\n\t\tqueryServiceStats: qStats,\n\t}\n\t\/\/ Careful: pool also exports name+\"xxx\" vars,\n\t\/\/ but we know it doesn't export Timeout.\n\tif enablePublishStats {\n\t\tstats.Publish(name+\"Timeout\", stats.DurationFunc(axp.timeout.Get))\n\t}\n\treturn axp\n}\n\n\/\/ Open makes the TxPool operational. This also starts the transaction killer\n\/\/ that will kill long-running transactions.\nfunc (axp *TxPool) Open(appParams, dbaParams *sqldb.ConnParams) {\n\tlog.Infof(\"Starting transaction id: %d\", axp.lastID)\n\taxp.pool.Open(appParams, dbaParams)\n\taxp.ticks.Start(func() { axp.transactionKiller() })\n}\n\n\/\/ Close closes the TxPool. A closed pool can be reopened.\nfunc (axp *TxPool) Close() {\n\taxp.ticks.Stop()\n\tfor _, v := range axp.activePool.GetOutdated(time.Duration(0), \"for closing\") {\n\t\tconn := v.(*TxConnection)\n\t\tlog.Warningf(\"killing transaction for shutdown: %s\", conn.Format(nil))\n\t\taxp.queryServiceStats.InternalErrors.Add(\"StrayTransactions\", 1)\n\t\tconn.Close()\n\t\tconn.conclude(TxClose)\n\t}\n\taxp.pool.Close()\n}\n\n\/\/ RollbackNonBusy rolls back all transactions that are not in use.\n\/\/ Transactions can be in use for situations like executing statements\n\/\/ or in prepared state.\nfunc (axp *TxPool) RollbackNonBusy(ctx context.Context) {\n\tfor _, v := range axp.activePool.GetOutdated(time.Duration(0), \"for transition\") {\n\t\tconn := v.(*TxConnection)\n\t\tlog.Warningf(\"rolling back transaction for transition: %s\", conn.Format(nil))\n\t\taxp.queryServiceStats.InternalErrors.Add(\"StrayTransactions\", 1)\n\t\taxp.LocalConclude(ctx, conn)\n\t}\n}\n\nfunc (axp *TxPool) transactionKiller() {\n\tdefer logError(axp.queryServiceStats)\n\tfor _, v := range axp.activePool.GetOutdated(time.Duration(axp.Timeout()), \"for rollback\") {\n\t\tconn := v.(*TxConnection)\n\t\tlog.Warningf(\"killing transaction (exceeded timeout: %v): %s\", axp.Timeout(), conn.Format(nil))\n\t\taxp.queryServiceStats.KillStats.Add(\"Transactions\", 1)\n\t\tconn.Close()\n\t\tconn.conclude(TxKill)\n\t}\n}\n\n\/\/ WaitForEmpty waits until all active transactions are completed.\nfunc (axp *TxPool) WaitForEmpty() {\n\taxp.activePool.WaitForEmpty()\n}\n\n\/\/ Begin begins a transaction, and returns the associated transaction id.\n\/\/ Subsequent statements can access the connection through the transaction id.\nfunc (axp *TxPool) Begin(ctx context.Context) (int64, error) {\n\tpoolCtx := ctx\n\tif deadline, ok := ctx.Deadline(); ok {\n\t\tvar cancel func()\n\t\tpoolCtx, cancel = context.WithDeadline(ctx, deadline.Add(-10*time.Millisecond))\n\t\tdefer cancel()\n\t}\n\tconn, err := axp.pool.Get(poolCtx)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase ErrConnPoolClosed:\n\t\t\treturn 0, err\n\t\tcase pools.ErrTimeout:\n\t\t\taxp.LogActive()\n\t\t\treturn 0, NewTabletError(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED, \"Transaction pool connection limit exceeded\")\n\t\t}\n\t\treturn 0, NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, err)\n\t}\n\tif _, err := conn.Exec(ctx, \"begin\", 1, false); err != nil {\n\t\tconn.Recycle()\n\t\treturn 0, NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err)\n\t}\n\ttransactionID := axp.lastID.Add(1)\n\taxp.activePool.Register(\n\t\ttransactionID,\n\t\tnewTxConnection(\n\t\t\tconn,\n\t\t\ttransactionID,\n\t\t\taxp,\n\t\t\tcallerid.ImmediateCallerIDFromContext(ctx),\n\t\t\tcallerid.EffectiveCallerIDFromContext(ctx),\n\t\t),\n\t)\n\treturn transactionID, nil\n}\n\n\/\/ Commit commits the specified transaction.\nfunc (axp *TxPool) Commit(ctx context.Context, transactionID int64) error {\n\tconn, err := axp.Get(transactionID, \"for commit\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn axp.LocalCommit(ctx, conn)\n}\n\n\/\/ Rollback rolls back the specified transaction.\nfunc (axp *TxPool) Rollback(ctx context.Context, transactionID int64) error {\n\tconn, err := axp.Get(transactionID, \"for rollback\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn axp.localRollback(ctx, conn)\n}\n\n\/\/ Get fetches the connection associated to the transactionID.\n\/\/ You must call Recycle on TxConnection once done.\nfunc (axp *TxPool) Get(transactionID int64, reason string) (*TxConnection, error) {\n\tv, err := axp.activePool.Get(transactionID, reason)\n\tif err != nil {\n\t\treturn nil, NewTabletError(vtrpcpb.ErrorCode_NOT_IN_TX, \"Transaction %d: %v\", transactionID, err)\n\t}\n\treturn v.(*TxConnection), nil\n}\n\n\/\/ LocalBegin is equivalent to Begin->Get.\n\/\/ It's used for executing transactions within a request. It's safe\n\/\/ to always call LocalConclude at the end.\nfunc (axp *TxPool) LocalBegin(ctx context.Context) (*TxConnection, error) {\n\ttransactionID, err := axp.Begin(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn axp.Get(transactionID, \"for local query\")\n}\n\n\/\/ LocalCommit is the commit function for LocalBegin.\nfunc (axp *TxPool) LocalCommit(ctx context.Context, conn *TxConnection) error {\n\tdefer conn.conclude(TxCommit)\n\taxp.txStats.Add(\"Completed\", time.Now().Sub(conn.StartTime))\n\tif _, err := conn.Exec(ctx, \"commit\", 1, false); err != nil {\n\t\tconn.Close()\n\t\treturn NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err)\n\t}\n\treturn nil\n}\n\n\/\/ LocalConclude concludes a transaction started by LocalBegin.\n\/\/ If the transaction was not previously concluded, it's rolled back.\nfunc (axp *TxPool) LocalConclude(ctx context.Context, conn *TxConnection) {\n\tif conn.DBConn != nil {\n\t\t_ = axp.localRollback(ctx, conn)\n\t}\n}\n\nfunc (axp *TxPool) localRollback(ctx context.Context, conn *TxConnection) error {\n\tdefer conn.conclude(TxRollback)\n\taxp.txStats.Add(\"Aborted\", time.Now().Sub(conn.StartTime))\n\tif _, err := conn.Exec(ctx, \"rollback\", 1, false); err != nil {\n\t\tconn.Close()\n\t\treturn NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err)\n\t}\n\treturn nil\n}\n\n\/\/ LogActive causes all existing transactions to be logged when they complete.\n\/\/ The logging is throttled to no more than once every txLogInterval.\nfunc (axp *TxPool) LogActive() {\n\taxp.logMu.Lock()\n\tdefer axp.logMu.Unlock()\n\tif time.Now().Sub(axp.lastLog) < txLogInterval {\n\t\treturn\n\t}\n\taxp.lastLog = time.Now()\n\tconns := axp.activePool.GetAll()\n\tfor _, c := range conns {\n\t\tc.(*TxConnection).LogToFile.Set(1)\n\t}\n}\n\n\/\/ Timeout returns the transaction timeout.\nfunc (axp *TxPool) Timeout() time.Duration {\n\treturn axp.timeout.Get()\n}\n\n\/\/ SetTimeout sets the transaction timeout.\nfunc (axp *TxPool) SetTimeout(timeout time.Duration) {\n\taxp.timeout.Set(timeout)\n\taxp.ticks.SetInterval(timeout \/ 10)\n}\n\n\/\/ TxConnection is meant for executing transactions. It can return itself to\n\/\/ the tx pool correctly. It also does not retry statements if there\n\/\/ are failures.\ntype TxConnection struct {\n\t*DBConn\n\tTransactionID int64\n\tpool *TxPool\n\tStartTime time.Time\n\tEndTime time.Time\n\tQueries []string\n\tConclusion string\n\tLogToFile sync2.AtomicInt32\n\tImmediateCallerID *querypb.VTGateCallerID\n\tEffectiveCallerID *vtrpcpb.CallerID\n}\n\nfunc newTxConnection(conn *DBConn, transactionID int64, pool *TxPool, immediate *querypb.VTGateCallerID, effective *vtrpcpb.CallerID) *TxConnection {\n\treturn &TxConnection{\n\t\tDBConn: conn,\n\t\tTransactionID: transactionID,\n\t\tpool: pool,\n\t\tStartTime: time.Now(),\n\t\tQueries: make([]string, 0, 8),\n\t\tImmediateCallerID: immediate,\n\t\tEffectiveCallerID: effective,\n\t}\n}\n\n\/\/ Exec executes the statement for the current transaction.\nfunc (txc *TxConnection) Exec(ctx context.Context, query string, maxrows int, wantfields bool) (*sqltypes.Result, error) {\n\tr, err := txc.DBConn.ExecOnce(ctx, query, maxrows, wantfields)\n\tif err != nil {\n\t\tif IsConnErr(err) {\n\t\t\ttxc.pool.checker.CheckMySQL()\n\t\t\treturn nil, NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, err)\n\t\t}\n\t\treturn nil, NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err)\n\t}\n\treturn r, nil\n}\n\n\/\/ Recycle returns the connection to the pool. The transaction remains\n\/\/ active.\nfunc (txc *TxConnection) Recycle() {\n\tif txc.IsClosed() {\n\t\ttxc.conclude(TxClose)\n\t} else {\n\t\ttxc.pool.activePool.Put(txc.TransactionID)\n\t}\n}\n\n\/\/ RecordQuery records the query against this transaction.\nfunc (txc *TxConnection) RecordQuery(query string) {\n\ttxc.Queries = append(txc.Queries, query)\n}\n\nfunc (txc *TxConnection) conclude(conclusion string) {\n\ttxc.pool.activePool.Unregister(txc.TransactionID)\n\ttxc.DBConn.Recycle()\n\ttxc.DBConn = nil\n\ttxc.log(conclusion)\n}\n\nfunc (txc *TxConnection) log(conclusion string) {\n\ttxc.Conclusion = conclusion\n\ttxc.EndTime = time.Now()\n\n\tusername := callerid.GetPrincipal(txc.EffectiveCallerID)\n\tif username == \"\" {\n\t\tusername = callerid.GetUsername(txc.ImmediateCallerID)\n\t}\n\tduration := txc.EndTime.Sub(txc.StartTime)\n\ttxc.pool.queryServiceStats.UserTransactionCount.Add([]string{username, conclusion}, 1)\n\ttxc.pool.queryServiceStats.UserTransactionTimesNs.Add([]string{username, conclusion}, int64(duration))\n\tif txc.LogToFile.Get() != 0 {\n\t\tlog.Infof(\"Logged transaction: %s\", txc.Format(nil))\n\t}\n\tTxLogger.Send(txc)\n}\n\n\/\/ EventTime returns the time the event was created.\nfunc (txc *TxConnection) EventTime() time.Time {\n\treturn txc.EndTime\n}\n\n\/\/ Format returns a printable version of the connection info.\nfunc (txc *TxConnection) Format(params url.Values) string {\n\treturn fmt.Sprintf(\n\t\t\"%v\\t'%v'\\t'%v'\\t%v\\t%v\\t%.6f\\t%v\\t%v\\t\\n\",\n\t\ttxc.TransactionID,\n\t\tcallerid.GetPrincipal(txc.EffectiveCallerID),\n\t\tcallerid.GetUsername(txc.ImmediateCallerID),\n\t\ttxc.StartTime.Format(time.StampMicro),\n\t\ttxc.EndTime.Format(time.StampMicro),\n\t\ttxc.EndTime.Sub(txc.StartTime).Seconds(),\n\t\ttxc.Conclusion,\n\t\tstrings.Join(txc.Queries, \";\"),\n\t)\n}\n<commit_msg>b\/32560113: Remove false alarm (#2220)<commit_after>\/\/ Copyright 2012, Google Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage tabletserver\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/youtube\/vitess\/go\/pools\"\n\t\"github.com\/youtube\/vitess\/go\/sqldb\"\n\t\"github.com\/youtube\/vitess\/go\/sqltypes\"\n\t\"github.com\/youtube\/vitess\/go\/stats\"\n\t\"github.com\/youtube\/vitess\/go\/streamlog\"\n\t\"github.com\/youtube\/vitess\/go\/sync2\"\n\t\"github.com\/youtube\/vitess\/go\/timer\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/callerid\"\n\tquerypb \"github.com\/youtube\/vitess\/go\/vt\/proto\/query\"\n\tvtrpcpb \"github.com\/youtube\/vitess\/go\/vt\/proto\/vtrpc\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ TxLogger can be used to enable logging of transactions.\n\/\/ Call TxLogger.ServeLogs in your main program to enable logging.\n\/\/ The log format can be inferred by looking at TxConnection.Format.\nvar TxLogger = streamlog.New(\"TxLog\", 10)\n\n\/\/ These consts identify how a transaction was resolved.\nconst (\n\tTxClose = \"close\"\n\tTxCommit = \"commit\"\n\tTxRollback = \"rollback\"\n\tTxPrepare = \"prepare\"\n\tTxKill = \"kill\"\n)\n\nconst txLogInterval = time.Duration(1 * time.Minute)\n\n\/\/ TxPool is the transaction pool for the query service.\ntype TxPool struct {\n\tpool *ConnPool\n\tactivePool *pools.Numbered\n\tlastID sync2.AtomicInt64\n\ttimeout sync2.AtomicDuration\n\tticks *timer.Timer\n\ttxStats *stats.Timings\n\tqueryServiceStats *QueryServiceStats\n\tchecker MySQLChecker\n\t\/\/ Tracking culprits that cause tx pool full errors.\n\tlogMu sync.Mutex\n\tlastLog time.Time\n}\n\n\/\/ NewTxPool creates a new TxPool. It's not operational until it's Open'd.\nfunc NewTxPool(\n\tname string,\n\ttxStatsPrefix string,\n\tcapacity int,\n\ttimeout time.Duration,\n\tidleTimeout time.Duration,\n\tenablePublishStats bool,\n\tqStats *QueryServiceStats,\n\tchecker MySQLChecker) *TxPool {\n\n\ttxStatsName := \"\"\n\tif enablePublishStats {\n\t\ttxStatsName = txStatsPrefix + \"Transactions\"\n\t}\n\n\taxp := &TxPool{\n\t\tpool: NewConnPool(name, capacity, idleTimeout, enablePublishStats, qStats, checker),\n\t\tactivePool: pools.NewNumbered(),\n\t\tlastID: sync2.NewAtomicInt64(time.Now().UnixNano()),\n\t\ttimeout: sync2.NewAtomicDuration(timeout),\n\t\tticks: timer.NewTimer(timeout \/ 10),\n\t\ttxStats: stats.NewTimings(txStatsName),\n\t\tchecker: checker,\n\t\tqueryServiceStats: qStats,\n\t}\n\t\/\/ Careful: pool also exports name+\"xxx\" vars,\n\t\/\/ but we know it doesn't export Timeout.\n\tif enablePublishStats {\n\t\tstats.Publish(name+\"Timeout\", stats.DurationFunc(axp.timeout.Get))\n\t}\n\treturn axp\n}\n\n\/\/ Open makes the TxPool operational. This also starts the transaction killer\n\/\/ that will kill long-running transactions.\nfunc (axp *TxPool) Open(appParams, dbaParams *sqldb.ConnParams) {\n\tlog.Infof(\"Starting transaction id: %d\", axp.lastID)\n\taxp.pool.Open(appParams, dbaParams)\n\taxp.ticks.Start(func() { axp.transactionKiller() })\n}\n\n\/\/ Close closes the TxPool. A closed pool can be reopened.\nfunc (axp *TxPool) Close() {\n\taxp.ticks.Stop()\n\tfor _, v := range axp.activePool.GetOutdated(time.Duration(0), \"for closing\") {\n\t\tconn := v.(*TxConnection)\n\t\tlog.Warningf(\"killing transaction for shutdown: %s\", conn.Format(nil))\n\t\taxp.queryServiceStats.InternalErrors.Add(\"StrayTransactions\", 1)\n\t\tconn.Close()\n\t\tconn.conclude(TxClose)\n\t}\n\taxp.pool.Close()\n}\n\n\/\/ RollbackNonBusy rolls back all transactions that are not in use.\n\/\/ Transactions can be in use for situations like executing statements\n\/\/ or in prepared state.\nfunc (axp *TxPool) RollbackNonBusy(ctx context.Context) {\n\tfor _, v := range axp.activePool.GetOutdated(time.Duration(0), \"for transition\") {\n\t\taxp.LocalConclude(ctx, v.(*TxConnection))\n\t}\n}\n\nfunc (axp *TxPool) transactionKiller() {\n\tdefer logError(axp.queryServiceStats)\n\tfor _, v := range axp.activePool.GetOutdated(time.Duration(axp.Timeout()), \"for rollback\") {\n\t\tconn := v.(*TxConnection)\n\t\tlog.Warningf(\"killing transaction (exceeded timeout: %v): %s\", axp.Timeout(), conn.Format(nil))\n\t\taxp.queryServiceStats.KillStats.Add(\"Transactions\", 1)\n\t\tconn.Close()\n\t\tconn.conclude(TxKill)\n\t}\n}\n\n\/\/ WaitForEmpty waits until all active transactions are completed.\nfunc (axp *TxPool) WaitForEmpty() {\n\taxp.activePool.WaitForEmpty()\n}\n\n\/\/ Begin begins a transaction, and returns the associated transaction id.\n\/\/ Subsequent statements can access the connection through the transaction id.\nfunc (axp *TxPool) Begin(ctx context.Context) (int64, error) {\n\tpoolCtx := ctx\n\tif deadline, ok := ctx.Deadline(); ok {\n\t\tvar cancel func()\n\t\tpoolCtx, cancel = context.WithDeadline(ctx, deadline.Add(-10*time.Millisecond))\n\t\tdefer cancel()\n\t}\n\tconn, err := axp.pool.Get(poolCtx)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase ErrConnPoolClosed:\n\t\t\treturn 0, err\n\t\tcase pools.ErrTimeout:\n\t\t\taxp.LogActive()\n\t\t\treturn 0, NewTabletError(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED, \"Transaction pool connection limit exceeded\")\n\t\t}\n\t\treturn 0, NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, err)\n\t}\n\tif _, err := conn.Exec(ctx, \"begin\", 1, false); err != nil {\n\t\tconn.Recycle()\n\t\treturn 0, NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err)\n\t}\n\ttransactionID := axp.lastID.Add(1)\n\taxp.activePool.Register(\n\t\ttransactionID,\n\t\tnewTxConnection(\n\t\t\tconn,\n\t\t\ttransactionID,\n\t\t\taxp,\n\t\t\tcallerid.ImmediateCallerIDFromContext(ctx),\n\t\t\tcallerid.EffectiveCallerIDFromContext(ctx),\n\t\t),\n\t)\n\treturn transactionID, nil\n}\n\n\/\/ Commit commits the specified transaction.\nfunc (axp *TxPool) Commit(ctx context.Context, transactionID int64) error {\n\tconn, err := axp.Get(transactionID, \"for commit\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn axp.LocalCommit(ctx, conn)\n}\n\n\/\/ Rollback rolls back the specified transaction.\nfunc (axp *TxPool) Rollback(ctx context.Context, transactionID int64) error {\n\tconn, err := axp.Get(transactionID, \"for rollback\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn axp.localRollback(ctx, conn)\n}\n\n\/\/ Get fetches the connection associated to the transactionID.\n\/\/ You must call Recycle on TxConnection once done.\nfunc (axp *TxPool) Get(transactionID int64, reason string) (*TxConnection, error) {\n\tv, err := axp.activePool.Get(transactionID, reason)\n\tif err != nil {\n\t\treturn nil, NewTabletError(vtrpcpb.ErrorCode_NOT_IN_TX, \"Transaction %d: %v\", transactionID, err)\n\t}\n\treturn v.(*TxConnection), nil\n}\n\n\/\/ LocalBegin is equivalent to Begin->Get.\n\/\/ It's used for executing transactions within a request. It's safe\n\/\/ to always call LocalConclude at the end.\nfunc (axp *TxPool) LocalBegin(ctx context.Context) (*TxConnection, error) {\n\ttransactionID, err := axp.Begin(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn axp.Get(transactionID, \"for local query\")\n}\n\n\/\/ LocalCommit is the commit function for LocalBegin.\nfunc (axp *TxPool) LocalCommit(ctx context.Context, conn *TxConnection) error {\n\tdefer conn.conclude(TxCommit)\n\taxp.txStats.Add(\"Completed\", time.Now().Sub(conn.StartTime))\n\tif _, err := conn.Exec(ctx, \"commit\", 1, false); err != nil {\n\t\tconn.Close()\n\t\treturn NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err)\n\t}\n\treturn nil\n}\n\n\/\/ LocalConclude concludes a transaction started by LocalBegin.\n\/\/ If the transaction was not previously concluded, it's rolled back.\nfunc (axp *TxPool) LocalConclude(ctx context.Context, conn *TxConnection) {\n\tif conn.DBConn != nil {\n\t\t_ = axp.localRollback(ctx, conn)\n\t}\n}\n\nfunc (axp *TxPool) localRollback(ctx context.Context, conn *TxConnection) error {\n\tdefer conn.conclude(TxRollback)\n\taxp.txStats.Add(\"Aborted\", time.Now().Sub(conn.StartTime))\n\tif _, err := conn.Exec(ctx, \"rollback\", 1, false); err != nil {\n\t\tconn.Close()\n\t\treturn NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err)\n\t}\n\treturn nil\n}\n\n\/\/ LogActive causes all existing transactions to be logged when they complete.\n\/\/ The logging is throttled to no more than once every txLogInterval.\nfunc (axp *TxPool) LogActive() {\n\taxp.logMu.Lock()\n\tdefer axp.logMu.Unlock()\n\tif time.Now().Sub(axp.lastLog) < txLogInterval {\n\t\treturn\n\t}\n\taxp.lastLog = time.Now()\n\tconns := axp.activePool.GetAll()\n\tfor _, c := range conns {\n\t\tc.(*TxConnection).LogToFile.Set(1)\n\t}\n}\n\n\/\/ Timeout returns the transaction timeout.\nfunc (axp *TxPool) Timeout() time.Duration {\n\treturn axp.timeout.Get()\n}\n\n\/\/ SetTimeout sets the transaction timeout.\nfunc (axp *TxPool) SetTimeout(timeout time.Duration) {\n\taxp.timeout.Set(timeout)\n\taxp.ticks.SetInterval(timeout \/ 10)\n}\n\n\/\/ TxConnection is meant for executing transactions. It can return itself to\n\/\/ the tx pool correctly. It also does not retry statements if there\n\/\/ are failures.\ntype TxConnection struct {\n\t*DBConn\n\tTransactionID int64\n\tpool *TxPool\n\tStartTime time.Time\n\tEndTime time.Time\n\tQueries []string\n\tConclusion string\n\tLogToFile sync2.AtomicInt32\n\tImmediateCallerID *querypb.VTGateCallerID\n\tEffectiveCallerID *vtrpcpb.CallerID\n}\n\nfunc newTxConnection(conn *DBConn, transactionID int64, pool *TxPool, immediate *querypb.VTGateCallerID, effective *vtrpcpb.CallerID) *TxConnection {\n\treturn &TxConnection{\n\t\tDBConn: conn,\n\t\tTransactionID: transactionID,\n\t\tpool: pool,\n\t\tStartTime: time.Now(),\n\t\tQueries: make([]string, 0, 8),\n\t\tImmediateCallerID: immediate,\n\t\tEffectiveCallerID: effective,\n\t}\n}\n\n\/\/ Exec executes the statement for the current transaction.\nfunc (txc *TxConnection) Exec(ctx context.Context, query string, maxrows int, wantfields bool) (*sqltypes.Result, error) {\n\tr, err := txc.DBConn.ExecOnce(ctx, query, maxrows, wantfields)\n\tif err != nil {\n\t\tif IsConnErr(err) {\n\t\t\ttxc.pool.checker.CheckMySQL()\n\t\t\treturn nil, NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, err)\n\t\t}\n\t\treturn nil, NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err)\n\t}\n\treturn r, nil\n}\n\n\/\/ Recycle returns the connection to the pool. The transaction remains\n\/\/ active.\nfunc (txc *TxConnection) Recycle() {\n\tif txc.IsClosed() {\n\t\ttxc.conclude(TxClose)\n\t} else {\n\t\ttxc.pool.activePool.Put(txc.TransactionID)\n\t}\n}\n\n\/\/ RecordQuery records the query against this transaction.\nfunc (txc *TxConnection) RecordQuery(query string) {\n\ttxc.Queries = append(txc.Queries, query)\n}\n\nfunc (txc *TxConnection) conclude(conclusion string) {\n\ttxc.pool.activePool.Unregister(txc.TransactionID)\n\ttxc.DBConn.Recycle()\n\ttxc.DBConn = nil\n\ttxc.log(conclusion)\n}\n\nfunc (txc *TxConnection) log(conclusion string) {\n\ttxc.Conclusion = conclusion\n\ttxc.EndTime = time.Now()\n\n\tusername := callerid.GetPrincipal(txc.EffectiveCallerID)\n\tif username == \"\" {\n\t\tusername = callerid.GetUsername(txc.ImmediateCallerID)\n\t}\n\tduration := txc.EndTime.Sub(txc.StartTime)\n\ttxc.pool.queryServiceStats.UserTransactionCount.Add([]string{username, conclusion}, 1)\n\ttxc.pool.queryServiceStats.UserTransactionTimesNs.Add([]string{username, conclusion}, int64(duration))\n\tif txc.LogToFile.Get() != 0 {\n\t\tlog.Infof(\"Logged transaction: %s\", txc.Format(nil))\n\t}\n\tTxLogger.Send(txc)\n}\n\n\/\/ EventTime returns the time the event was created.\nfunc (txc *TxConnection) EventTime() time.Time {\n\treturn txc.EndTime\n}\n\n\/\/ Format returns a printable version of the connection info.\nfunc (txc *TxConnection) Format(params url.Values) string {\n\treturn fmt.Sprintf(\n\t\t\"%v\\t'%v'\\t'%v'\\t%v\\t%v\\t%.6f\\t%v\\t%v\\t\\n\",\n\t\ttxc.TransactionID,\n\t\tcallerid.GetPrincipal(txc.EffectiveCallerID),\n\t\tcallerid.GetUsername(txc.ImmediateCallerID),\n\t\ttxc.StartTime.Format(time.StampMicro),\n\t\ttxc.EndTime.Format(time.StampMicro),\n\t\ttxc.EndTime.Sub(txc.StartTime).Seconds(),\n\t\ttxc.Conclusion,\n\t\tstrings.Join(txc.Queries, \";\"),\n\t)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cni\n\nimport (\n\t\"testing\"\n\n\t\"istio.io\/istio\/pkg\/test\/framework\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/components\/echo\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/components\/environment\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/components\/galley\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/components\/istio\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/components\/namespace\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/components\/pilot\"\n\t\"istio.io\/istio\/tests\/integration\/security\/util\/reachability\"\n)\n\nfunc TestMain(m *testing.M) {\n\tframework.\n\t\tNewSuite(\"cni\", m).\n\t\tSkip(\"https:\/\/github.com\/istio\/istio\/issues\/20487\").\n\t\tSetupOnEnv(environment.Kube, istio.Setup(nil, func(cfg *istio.Config) {\n\t\t\tcfg.ControlPlaneValues = `\ncomponents:\n cni:\n enabled: true\n hub: gcr.io\/istio-testing\n tag: latest\n`\n\t\t})).\n\t\tRun()\n}\n\n\/\/ This test verifies reachability under different authN scenario:\n\/\/ - app A to app B using mTLS.\n\/\/ In each test, the steps are:\n\/\/ - Configure authn policy.\n\/\/ - Wait for config propagation.\n\/\/ - Send HTTP\/gRPC requests between apps.\nfunc TestCNIReachability(t *testing.T) {\n\tframework.NewTest(t).\n\t\tRun(func(ctx framework.TestContext) {\n\t\t\tg, err := galley.New(ctx, galley.Config{})\n\t\t\tif err != nil {\n\t\t\t\tctx.Fatal(err)\n\t\t\t}\n\t\t\tp, err := pilot.New(ctx, pilot.Config{\n\t\t\t\tGalley: g,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tctx.Fatal(err)\n\t\t\t}\n\t\t\trctx := reachability.CreateContext(ctx, g, p)\n\t\t\tsystemNM := namespace.ClaimSystemNamespaceOrFail(ctx, ctx)\n\n\t\t\ttestCases := []reachability.TestCase{\n\t\t\t\t{\n\t\t\t\t\tConfigFile: \"global-mtls-on.yaml\",\n\t\t\t\t\tNamespace: systemNM,\n\t\t\t\t\tRequiredEnvironment: environment.Kube,\n\t\t\t\t\tInclude: func(src echo.Instance, opts echo.CallOptions) bool {\n\t\t\t\t\t\t\/\/ Exclude calls to the headless TCP port.\n\t\t\t\t\t\tif opts.Target == rctx.Headless && opts.PortName == \"tcp\" {\n\t\t\t\t\t\t\treturn false\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn true\n\t\t\t\t\t},\n\t\t\t\t\tExpectSuccess: func(src echo.Instance, opts echo.CallOptions) bool {\n\t\t\t\t\t\tif src == rctx.Naked && opts.Target == rctx.Naked {\n\t\t\t\t\t\t\t\/\/ naked->naked should always succeed.\n\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\/\/ If one of the two endpoints is naked, expect failure.\n\t\t\t\t\t\treturn src != rctx.Naked && opts.Target != rctx.Naked\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\trctx.Run(testCases)\n\t\t})\n}\n<commit_msg>Re-enable CNI test (#20826)<commit_after>\/\/ Copyright 2020 Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cni\n\nimport (\n\t\"testing\"\n\n\t\"istio.io\/istio\/pkg\/test\/framework\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/components\/echo\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/components\/environment\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/components\/environment\/kube\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/components\/galley\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/components\/istio\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/components\/namespace\"\n\t\"istio.io\/istio\/pkg\/test\/framework\/components\/pilot\"\n\t\"istio.io\/istio\/tests\/integration\/security\/util\/reachability\"\n)\n\nfunc TestMain(m *testing.M) {\n\tframework.\n\t\tNewSuite(\"cni\", m).\n\t\tRequireEnvironment(environment.Kube).\n\t\tSetupOnEnv(environment.Kube, istio.Setup(nil, func(cfg *istio.Config) {\n\t\t\tcfg.ControlPlaneValues = `\ncomponents:\n cni:\n enabled: true\n hub: gcr.io\/istio-testing\n tag: latest\n namespace: kube-system\n`\n\t\t})).\n\t\tRun()\n}\n\n\/\/ This test verifies reachability under different authN scenario:\n\/\/ - app A to app B using mTLS.\n\/\/ In each test, the steps are:\n\/\/ - Configure authn policy.\n\/\/ - Wait for config propagation.\n\/\/ - Send HTTP\/gRPC requests between apps.\nfunc TestCNIReachability(t *testing.T) {\n\tframework.NewTest(t).\n\t\tRun(func(ctx framework.TestContext) {\n\t\t\tg, err := galley.New(ctx, galley.Config{})\n\t\t\tif err != nil {\n\t\t\t\tctx.Fatal(err)\n\t\t\t}\n\t\t\tp, err := pilot.New(ctx, pilot.Config{\n\t\t\t\tGalley: g,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tctx.Fatal(err)\n\t\t\t}\n\t\t\tkenv := ctx.Environment().(*kube.Environment)\n\t\t\t_, err = kenv.WaitUntilPodsAreReady(kenv.NewSinglePodFetch(\"kube-system\", \"k8s-app=istio-cni-node\"))\n\t\t\tif err != nil {\n\t\t\t\tctx.Fatal(err)\n\t\t\t}\n\t\t\trctx := reachability.CreateContext(ctx, g, p)\n\t\t\tsystemNM := namespace.ClaimSystemNamespaceOrFail(ctx, ctx)\n\n\t\t\ttestCases := []reachability.TestCase{\n\t\t\t\t{\n\t\t\t\t\tConfigFile: \"global-mtls-on.yaml\",\n\t\t\t\t\tNamespace: systemNM,\n\t\t\t\t\tRequiredEnvironment: environment.Kube,\n\t\t\t\t\tInclude: func(src echo.Instance, opts echo.CallOptions) bool {\n\t\t\t\t\t\t\/\/ Exclude calls to the headless TCP port.\n\t\t\t\t\t\tif opts.Target == rctx.Headless && opts.PortName == \"tcp\" {\n\t\t\t\t\t\t\treturn false\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn true\n\t\t\t\t\t},\n\t\t\t\t\tExpectSuccess: func(src echo.Instance, opts echo.CallOptions) bool {\n\t\t\t\t\t\tif src == rctx.Naked && opts.Target == rctx.Naked {\n\t\t\t\t\t\t\t\/\/ naked->naked should always succeed.\n\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\/\/ If one of the two endpoints is naked, expect failure.\n\t\t\t\t\t\treturn src != rctx.Naked && opts.Target != rctx.Naked\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\trctx.Run(testCases)\n\t\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Same copyright and license as the rest of the files in this project\n\/\/ This file contains style related functions and structures\n\npackage gtk\n\n\/\/ #include <gtk\/gtk.h>\n\/\/ #include \"gtk.go.h\"\nimport \"C\"\nimport (\n\t\"unsafe\"\n\n\t\"github.com\/gotk3\/gotk3\/gdk\"\n\t\"github.com\/gotk3\/gotk3\/glib\"\n)\n\ntype StyleProviderPriority int\n\nconst (\n\tSTYLE_PROVIDER_PRIORITY_FALLBACK StyleProviderPriority = C.GTK_STYLE_PROVIDER_PRIORITY_FALLBACK\n\tSTYLE_PROVIDER_PRIORITY_THEME = C.GTK_STYLE_PROVIDER_PRIORITY_THEME\n\tSTYLE_PROVIDER_PRIORITY_SETTINGS = C.GTK_STYLE_PROVIDER_PRIORITY_SETTINGS\n\tSTYLE_PROVIDER_PRIORITY_APPLICATION = C.GTK_STYLE_PROVIDER_PRIORITY_APPLICATION\n\tSTYLE_PROVIDER_PRIORITY_USER = C.GTK_STYLE_PROVIDER_PRIORITY_USER\n)\n\n\/*\n * GtkStyleContext\n *\/\n\n\/\/ StyleContext is a representation of GTK's GtkStyleContext.\ntype StyleContext struct {\n\t*glib.Object\n}\n\n\/\/ native returns a pointer to the underlying GtkStyleContext.\nfunc (v *StyleContext) native() *C.GtkStyleContext {\n\tif v == nil || v.Object == nil {\n\t\treturn nil\n\t}\n\tp := unsafe.Pointer(v.GObject)\n\treturn C.toGtkStyleContext(p)\n}\n\nfunc wrapStyleContext(obj *glib.Object) *StyleContext {\n\tif obj == nil {\n\t\treturn nil\n\t}\n\n\treturn &StyleContext{obj}\n}\n\nfunc (v *StyleContext) AddClass(class_name string) {\n\tcstr := C.CString(class_name)\n\tdefer C.free(unsafe.Pointer(cstr))\n\n\tC.gtk_style_context_add_class(v.native(), (*C.gchar)(cstr))\n}\n\nfunc (v *StyleContext) RemoveClass(class_name string) {\n\tcstr := C.CString(class_name)\n\tdefer C.free(unsafe.Pointer(cstr))\n\n\tC.gtk_style_context_remove_class(v.native(), (*C.gchar)(cstr))\n}\n\nfunc fromNativeStyleContext(c *C.GtkStyleContext) (*StyleContext, error) {\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\n\tobj := glib.Take(unsafe.Pointer(c))\n\treturn wrapStyleContext(obj), nil\n}\n\n\/\/ GetStyleContext is a wrapper around gtk_widget_get_style_context().\nfunc (v *Widget) GetStyleContext() (*StyleContext, error) {\n\treturn fromNativeStyleContext(C.gtk_widget_get_style_context(v.native()))\n}\n\n\/\/ GetParent is a wrapper around gtk_style_context_get_parent().\nfunc (v *StyleContext) GetParent() (*StyleContext, error) {\n\treturn fromNativeStyleContext(C.gtk_style_context_get_parent(v.native()))\n}\n\n\/\/ GetProperty is a wrapper around gtk_style_context_get_property().\nfunc (v *StyleContext) GetProperty(property string, state StateFlags) (interface{}, error) {\n\tcstr := (*C.gchar)(C.CString(property))\n\tdefer C.free(unsafe.Pointer(cstr))\n\n\tvar gval C.GValue\n\tC.gtk_style_context_get_property(v.native(), cstr, C.GtkStateFlags(state), &gval)\n\tval := glib.ValueFromNative(unsafe.Pointer(&gval))\n\treturn val.GoValue()\n}\n\n\/\/ GetStyleProperty is a wrapper around gtk_style_context_get_style_property().\nfunc (v *StyleContext) GetStyleProperty(property string) (interface{}, error) {\n\tcstr := (*C.gchar)(C.CString(property))\n\tdefer C.free(unsafe.Pointer(cstr))\n\n\tvar gval C.GValue\n\tC.gtk_style_context_get_style_property(v.native(), cstr, &gval)\n\tval := glib.ValueFromNative(unsafe.Pointer(&gval))\n\treturn val.GoValue()\n}\n\n\/\/ GetScreen is a wrapper around gtk_style_context_get_screen().\nfunc (v *StyleContext) GetScreen() (*gdk.Screen, error) {\n\tc := C.gtk_style_context_get_screen(v.native())\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\n\td := &gdk.Screen{glib.Take(unsafe.Pointer(c))}\n\treturn d, nil\n}\n\n\/\/ GetState is a wrapper around gtk_style_context_get_state().\nfunc (v *StyleContext) GetState() StateFlags {\n\treturn StateFlags(C.gtk_style_context_get_state(v.native()))\n}\n\n\/\/ GetColor is a wrapper around gtk_style_context_get_color().\nfunc (v *StyleContext) GetColor(state StateFlags) *gdk.RGBA {\n\tgdkColor := gdk.NewRGBA()\n\tC.gtk_style_context_get_color(v.native(), C.GtkStateFlags(state), (*C.GdkRGBA)(unsafe.Pointer(gdkColor.Native())))\n\treturn gdkColor\n}\n\n\/\/ LookupColor is a wrapper around gtk_style_context_lookup_color().\nfunc (v *StyleContext) LookupColor(colorName string) (*gdk.RGBA, bool) {\n\tcstr := (*C.gchar)(C.CString(colorName))\n\tdefer C.free(unsafe.Pointer(cstr))\n\tgdkColor := gdk.NewRGBA()\n\tret := C.gtk_style_context_lookup_color(v.native(), cstr, (*C.GdkRGBA)(unsafe.Pointer(gdkColor.Native())))\n\treturn gdkColor, gobool(ret)\n}\n\n\/\/ StyleContextResetWidgets is a wrapper around gtk_style_context_reset_widgets().\nfunc StyleContextResetWidgets(v *gdk.Screen) {\n\tC.gtk_style_context_reset_widgets((*C.GdkScreen)(unsafe.Pointer(v.Native())))\n}\n\n\/\/ Restore is a wrapper around gtk_style_context_restore().\nfunc (v *StyleContext) Restore() {\n\tC.gtk_style_context_restore(v.native())\n}\n\n\/\/ Save is a wrapper around gtk_style_context_save().\nfunc (v *StyleContext) Save() {\n\tC.gtk_style_context_save(v.native())\n}\n\n\/\/ SetParent is a wrapper around gtk_style_context_set_parent().\nfunc (v *StyleContext) SetParent(p *StyleContext) {\n\tC.gtk_style_context_set_parent(v.native(), p.native())\n}\n\n\/\/ HasClass is a wrapper around gtk_style_context_has_class().\nfunc (v *StyleContext) HasClass(className string) bool {\n\tcstr := C.CString(className)\n\tdefer C.free(unsafe.Pointer(cstr))\n\n\treturn gobool(C.gtk_style_context_has_class(v.native(), (*C.gchar)(cstr)))\n}\n\n\/\/ SetScreen is a wrapper around gtk_style_context_set_screen().\nfunc (v *StyleContext) SetScreen(s *gdk.Screen) {\n\tC.gtk_style_context_set_screen(v.native(), (*C.GdkScreen)(unsafe.Pointer(s.Native())))\n}\n\n\/\/ SetState is a wrapper around gtk_style_context_set_state().\nfunc (v *StyleContext) SetState(state StateFlags) {\n\tC.gtk_style_context_set_state(v.native(), C.GtkStateFlags(state))\n}\n\ntype IStyleProvider interface {\n\ttoStyleProvider() *C.GtkStyleProvider\n}\n\n\/\/ AddProvider is a wrapper around gtk_style_context_add_provider().\nfunc (v *StyleContext) AddProvider(provider IStyleProvider, prio uint) {\n\tC.gtk_style_context_add_provider(v.native(), provider.toStyleProvider(), C.guint(prio))\n}\n\n\/\/ AddProviderForScreen is a wrapper around gtk_style_context_add_provider_for_screen().\nfunc AddProviderForScreen(s *gdk.Screen, provider IStyleProvider, prio uint) {\n\tC.gtk_style_context_add_provider_for_screen((*C.GdkScreen)(unsafe.Pointer(s.Native())), provider.toStyleProvider(), C.guint(prio))\n}\n\n\/\/ RemoveProvider is a wrapper around gtk_style_context_remove_provider().\nfunc (v *StyleContext) RemoveProvider(provider IStyleProvider) {\n\tC.gtk_style_context_remove_provider(v.native(), provider.toStyleProvider())\n}\n\n\/\/ RemoveProviderForScreen is a wrapper around gtk_style_context_remove_provider_for_screen().\nfunc RemoveProviderForScreen(s *gdk.Screen, provider IStyleProvider) {\n\tC.gtk_style_context_remove_provider_for_screen((*C.GdkScreen)(unsafe.Pointer(s.Native())), provider.toStyleProvider())\n}\n\n\/\/ GtkStyleContext * \tgtk_style_context_new ()\n\/\/ void \tgtk_style_context_get ()\n\/\/ GtkTextDirection \tgtk_style_context_get_direction ()\n\/\/ GtkJunctionSides \tgtk_style_context_get_junction_sides ()\n\/\/ const GtkWidgetPath * \tgtk_style_context_get_path ()\n\/\/ GdkFrameClock * \tgtk_style_context_get_frame_clock ()\n\/\/ void \tgtk_style_context_get_style ()\n\/\/ void \tgtk_style_context_get_style_valist ()\n\/\/ void \tgtk_style_context_get_valist ()\n\/\/ GtkCssSection * \tgtk_style_context_get_section ()\n\/\/ void \tgtk_style_context_get_background_color ()\n\/\/ void \tgtk_style_context_get_border_color ()\n\/\/ void \tgtk_style_context_get_border ()\n\/\/ void \tgtk_style_context_get_padding ()\n\/\/ void \tgtk_style_context_get_margin ()\n\/\/ const PangoFontDescription * \tgtk_style_context_get_font ()\n\/\/ void \tgtk_style_context_invalidate ()\n\/\/ gboolean \tgtk_style_context_state_is_running ()\n\/\/ GtkIconSet * \tgtk_style_context_lookup_icon_set ()\n\/\/ void \tgtk_style_context_cancel_animations ()\n\/\/ void \tgtk_style_context_scroll_animations ()\n\/\/ void \tgtk_style_context_notify_state_change ()\n\/\/ void \tgtk_style_context_pop_animatable_region ()\n\/\/ void \tgtk_style_context_push_animatable_region ()\n\/\/ void \tgtk_style_context_set_background ()\n\/\/ void \tgtk_style_context_set_direction ()\n\/\/ void \tgtk_style_context_set_junction_sides ()\n\/\/ void \tgtk_style_context_set_path ()\n\/\/ void \tgtk_style_context_add_region ()\n\/\/ void \tgtk_style_context_remove_region ()\n\/\/ gboolean \tgtk_style_context_has_region ()\n\/\/ GList * \tgtk_style_context_list_regions ()\n\/\/ void \tgtk_style_context_set_frame_clock ()\n\/\/ void \tgtk_style_context_set_scale ()\n\/\/ gint \tgtk_style_context_get_scale ()\n\/\/ GList * \tgtk_style_context_list_classes ()\n<commit_msg>Add gtk.RenderBackground<commit_after>\/\/ Same copyright and license as the rest of the files in this project\n\/\/ This file contains style related functions and structures\n\npackage gtk\n\n\/\/ #include <gtk\/gtk.h>\n\/\/ #include \"gtk.go.h\"\nimport \"C\"\nimport (\n\t\"unsafe\"\n\n\t\"github.com\/gotk3\/gotk3\/cairo\"\n\t\"github.com\/gotk3\/gotk3\/gdk\"\n\t\"github.com\/gotk3\/gotk3\/glib\"\n)\n\ntype StyleProviderPriority int\n\nconst (\n\tSTYLE_PROVIDER_PRIORITY_FALLBACK StyleProviderPriority = C.GTK_STYLE_PROVIDER_PRIORITY_FALLBACK\n\tSTYLE_PROVIDER_PRIORITY_THEME = C.GTK_STYLE_PROVIDER_PRIORITY_THEME\n\tSTYLE_PROVIDER_PRIORITY_SETTINGS = C.GTK_STYLE_PROVIDER_PRIORITY_SETTINGS\n\tSTYLE_PROVIDER_PRIORITY_APPLICATION = C.GTK_STYLE_PROVIDER_PRIORITY_APPLICATION\n\tSTYLE_PROVIDER_PRIORITY_USER = C.GTK_STYLE_PROVIDER_PRIORITY_USER\n)\n\n\/\/ RenderBackground is a wrapper around gtk_render_background().\nfunc RenderBackground(context *StyleContext, cr *cairo.Context, x, y, w, h float64) {\n\tC.gtk_render_background(\n\t\tcontext.native(),\n\t\t(*C.cairo_t)(unsafe.Pointer(cr.Native())),\n\t\tC.gdouble(x),\n\t\tC.gdouble(y),\n\t\tC.gdouble(w),\n\t\tC.gdouble(h),\n\t)\n}\n\n\/*\n * GtkStyleContext\n *\/\n\n\/\/ StyleContext is a representation of GTK's GtkStyleContext.\ntype StyleContext struct {\n\t*glib.Object\n}\n\n\/\/ native returns a pointer to the underlying GtkStyleContext.\nfunc (v *StyleContext) native() *C.GtkStyleContext {\n\tif v == nil || v.Object == nil {\n\t\treturn nil\n\t}\n\tp := unsafe.Pointer(v.GObject)\n\treturn C.toGtkStyleContext(p)\n}\n\nfunc wrapStyleContext(obj *glib.Object) *StyleContext {\n\tif obj == nil {\n\t\treturn nil\n\t}\n\n\treturn &StyleContext{obj}\n}\n\nfunc (v *StyleContext) AddClass(class_name string) {\n\tcstr := C.CString(class_name)\n\tdefer C.free(unsafe.Pointer(cstr))\n\n\tC.gtk_style_context_add_class(v.native(), (*C.gchar)(cstr))\n}\n\nfunc (v *StyleContext) RemoveClass(class_name string) {\n\tcstr := C.CString(class_name)\n\tdefer C.free(unsafe.Pointer(cstr))\n\n\tC.gtk_style_context_remove_class(v.native(), (*C.gchar)(cstr))\n}\n\nfunc fromNativeStyleContext(c *C.GtkStyleContext) (*StyleContext, error) {\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\n\tobj := glib.Take(unsafe.Pointer(c))\n\treturn wrapStyleContext(obj), nil\n}\n\n\/\/ GetStyleContext is a wrapper around gtk_widget_get_style_context().\nfunc (v *Widget) GetStyleContext() (*StyleContext, error) {\n\treturn fromNativeStyleContext(C.gtk_widget_get_style_context(v.native()))\n}\n\n\/\/ GetParent is a wrapper around gtk_style_context_get_parent().\nfunc (v *StyleContext) GetParent() (*StyleContext, error) {\n\treturn fromNativeStyleContext(C.gtk_style_context_get_parent(v.native()))\n}\n\n\/\/ GetProperty is a wrapper around gtk_style_context_get_property().\nfunc (v *StyleContext) GetProperty(property string, state StateFlags) (interface{}, error) {\n\tcstr := (*C.gchar)(C.CString(property))\n\tdefer C.free(unsafe.Pointer(cstr))\n\n\tvar gval C.GValue\n\tC.gtk_style_context_get_property(v.native(), cstr, C.GtkStateFlags(state), &gval)\n\tval := glib.ValueFromNative(unsafe.Pointer(&gval))\n\treturn val.GoValue()\n}\n\n\/\/ GetStyleProperty is a wrapper around gtk_style_context_get_style_property().\nfunc (v *StyleContext) GetStyleProperty(property string) (interface{}, error) {\n\tcstr := (*C.gchar)(C.CString(property))\n\tdefer C.free(unsafe.Pointer(cstr))\n\n\tvar gval C.GValue\n\tC.gtk_style_context_get_style_property(v.native(), cstr, &gval)\n\tval := glib.ValueFromNative(unsafe.Pointer(&gval))\n\treturn val.GoValue()\n}\n\n\/\/ GetScreen is a wrapper around gtk_style_context_get_screen().\nfunc (v *StyleContext) GetScreen() (*gdk.Screen, error) {\n\tc := C.gtk_style_context_get_screen(v.native())\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\n\td := &gdk.Screen{glib.Take(unsafe.Pointer(c))}\n\treturn d, nil\n}\n\n\/\/ GetState is a wrapper around gtk_style_context_get_state().\nfunc (v *StyleContext) GetState() StateFlags {\n\treturn StateFlags(C.gtk_style_context_get_state(v.native()))\n}\n\n\/\/ GetColor is a wrapper around gtk_style_context_get_color().\nfunc (v *StyleContext) GetColor(state StateFlags) *gdk.RGBA {\n\tgdkColor := gdk.NewRGBA()\n\tC.gtk_style_context_get_color(v.native(), C.GtkStateFlags(state), (*C.GdkRGBA)(unsafe.Pointer(gdkColor.Native())))\n\treturn gdkColor\n}\n\n\/\/ LookupColor is a wrapper around gtk_style_context_lookup_color().\nfunc (v *StyleContext) LookupColor(colorName string) (*gdk.RGBA, bool) {\n\tcstr := (*C.gchar)(C.CString(colorName))\n\tdefer C.free(unsafe.Pointer(cstr))\n\tgdkColor := gdk.NewRGBA()\n\tret := C.gtk_style_context_lookup_color(v.native(), cstr, (*C.GdkRGBA)(unsafe.Pointer(gdkColor.Native())))\n\treturn gdkColor, gobool(ret)\n}\n\n\/\/ StyleContextResetWidgets is a wrapper around gtk_style_context_reset_widgets().\nfunc StyleContextResetWidgets(v *gdk.Screen) {\n\tC.gtk_style_context_reset_widgets((*C.GdkScreen)(unsafe.Pointer(v.Native())))\n}\n\n\/\/ Restore is a wrapper around gtk_style_context_restore().\nfunc (v *StyleContext) Restore() {\n\tC.gtk_style_context_restore(v.native())\n}\n\n\/\/ Save is a wrapper around gtk_style_context_save().\nfunc (v *StyleContext) Save() {\n\tC.gtk_style_context_save(v.native())\n}\n\n\/\/ SetParent is a wrapper around gtk_style_context_set_parent().\nfunc (v *StyleContext) SetParent(p *StyleContext) {\n\tC.gtk_style_context_set_parent(v.native(), p.native())\n}\n\n\/\/ HasClass is a wrapper around gtk_style_context_has_class().\nfunc (v *StyleContext) HasClass(className string) bool {\n\tcstr := C.CString(className)\n\tdefer C.free(unsafe.Pointer(cstr))\n\n\treturn gobool(C.gtk_style_context_has_class(v.native(), (*C.gchar)(cstr)))\n}\n\n\/\/ SetScreen is a wrapper around gtk_style_context_set_screen().\nfunc (v *StyleContext) SetScreen(s *gdk.Screen) {\n\tC.gtk_style_context_set_screen(v.native(), (*C.GdkScreen)(unsafe.Pointer(s.Native())))\n}\n\n\/\/ SetState is a wrapper around gtk_style_context_set_state().\nfunc (v *StyleContext) SetState(state StateFlags) {\n\tC.gtk_style_context_set_state(v.native(), C.GtkStateFlags(state))\n}\n\ntype IStyleProvider interface {\n\ttoStyleProvider() *C.GtkStyleProvider\n}\n\n\/\/ AddProvider is a wrapper around gtk_style_context_add_provider().\nfunc (v *StyleContext) AddProvider(provider IStyleProvider, prio uint) {\n\tC.gtk_style_context_add_provider(v.native(), provider.toStyleProvider(), C.guint(prio))\n}\n\n\/\/ AddProviderForScreen is a wrapper around gtk_style_context_add_provider_for_screen().\nfunc AddProviderForScreen(s *gdk.Screen, provider IStyleProvider, prio uint) {\n\tC.gtk_style_context_add_provider_for_screen((*C.GdkScreen)(unsafe.Pointer(s.Native())), provider.toStyleProvider(), C.guint(prio))\n}\n\n\/\/ RemoveProvider is a wrapper around gtk_style_context_remove_provider().\nfunc (v *StyleContext) RemoveProvider(provider IStyleProvider) {\n\tC.gtk_style_context_remove_provider(v.native(), provider.toStyleProvider())\n}\n\n\/\/ RemoveProviderForScreen is a wrapper around gtk_style_context_remove_provider_for_screen().\nfunc RemoveProviderForScreen(s *gdk.Screen, provider IStyleProvider) {\n\tC.gtk_style_context_remove_provider_for_screen((*C.GdkScreen)(unsafe.Pointer(s.Native())), provider.toStyleProvider())\n}\n\n\/\/ GtkStyleContext * \tgtk_style_context_new ()\n\/\/ void \tgtk_style_context_get ()\n\/\/ GtkTextDirection \tgtk_style_context_get_direction ()\n\/\/ GtkJunctionSides \tgtk_style_context_get_junction_sides ()\n\/\/ const GtkWidgetPath * \tgtk_style_context_get_path ()\n\/\/ GdkFrameClock * \tgtk_style_context_get_frame_clock ()\n\/\/ void \tgtk_style_context_get_style ()\n\/\/ void \tgtk_style_context_get_style_valist ()\n\/\/ void \tgtk_style_context_get_valist ()\n\/\/ GtkCssSection * \tgtk_style_context_get_section ()\n\/\/ void \tgtk_style_context_get_background_color ()\n\/\/ void \tgtk_style_context_get_border_color ()\n\/\/ void \tgtk_style_context_get_border ()\n\/\/ void \tgtk_style_context_get_padding ()\n\/\/ void \tgtk_style_context_get_margin ()\n\/\/ const PangoFontDescription * \tgtk_style_context_get_font ()\n\/\/ void \tgtk_style_context_invalidate ()\n\/\/ gboolean \tgtk_style_context_state_is_running ()\n\/\/ GtkIconSet * \tgtk_style_context_lookup_icon_set ()\n\/\/ void \tgtk_style_context_cancel_animations ()\n\/\/ void \tgtk_style_context_scroll_animations ()\n\/\/ void \tgtk_style_context_notify_state_change ()\n\/\/ void \tgtk_style_context_pop_animatable_region ()\n\/\/ void \tgtk_style_context_push_animatable_region ()\n\/\/ void \tgtk_style_context_set_background ()\n\/\/ void \tgtk_style_context_set_direction ()\n\/\/ void \tgtk_style_context_set_junction_sides ()\n\/\/ void \tgtk_style_context_set_path ()\n\/\/ void \tgtk_style_context_add_region ()\n\/\/ void \tgtk_style_context_remove_region ()\n\/\/ gboolean \tgtk_style_context_has_region ()\n\/\/ GList * \tgtk_style_context_list_regions ()\n\/\/ void \tgtk_style_context_set_frame_clock ()\n\/\/ void \tgtk_style_context_set_scale ()\n\/\/ gint \tgtk_style_context_get_scale ()\n\/\/ GList * \tgtk_style_context_list_classes ()\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Volker Dobler. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gui\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/vdobler\/ht\/errorlist\"\n)\n\ntype Message struct {\n\tType string\n\tText string\n}\n\n\/\/ ----------------------------------------------------------------------------\n\/\/ Value\n\n\/\/ Value contains a value to be displayed and updated through a HTML GUI.\ntype Value struct {\n\t\/\/ Current is the current value.\n\tCurrent interface{}\n\n\t\/\/ Last contains the last values\n\tLast []interface{}\n\n\t\/\/ Path is the path prefix applied to this value.\n\tPath string\n\n\t\/\/ Messages contains messages to be rendered for paths. E.g.:\n\t\/\/ Test.Request.Timeout => Message{typ:error, txt=wrong}\n\tMessages map[string][]Message\n\n\tbuf *bytes.Buffer\n\n\tnextfieldinfo Fieldinfo\n}\n\n\/\/ NewValue creates a new Value from val.\nfunc NewValue(val interface{}, path string) *Value {\n\treturn &Value{\n\t\tCurrent: val,\n\t\tPath: path,\n\t\tMessages: make(map[string][]Message),\n\t\tbuf: &bytes.Buffer{},\n\t}\n}\n\n\/\/ Render v's Current value. The returned byte slice must neither be modified\n\/\/ nor kept longer then up to the next call of Render.\nfunc (v *Value) Render() ([]byte, error) {\n\tval := reflect.ValueOf(v.Current)\n\tv.buf.Reset()\n\terr := v.render(v.Path, 0, false, val) \/\/ TODO: type based readonly\n\treturn v.buf.Bytes(), err\n}\n\n\/\/ Update v with data from the received HTML form. It returns the path of the\n\/\/ most prominent field (TODO: explain better).\nfunc (v *Value) Update(form url.Values) (string, errorlist.List) {\n\tval := reflect.ValueOf(v.Current)\n\tv.Messages = make(map[string][]Message) \/\/ clear errors \/\/ TODO: really automaticall here?\n\tfirstErrorPath := \"\"\n\n\tupdated, err := walk(form, v.Path, val)\n\n\tif err == nil {\n\t\tv.Last = append(v.Last, v.Current)\n\t} else {\n\t\t\/\/ Process validation errors\n\t\tfor _, e := range err {\n\t\t\tfmt.Println(e)\n\t\t\tswitch ve := e.(type) {\n\t\t\tcase ValueError:\n\t\t\t\tif firstErrorPath == \"\" {\n\t\t\t\t\tfirstErrorPath = ve.Path\n\t\t\t\t}\n\t\t\t\tv.Messages[ve.Path] = []Message{{\n\t\t\t\t\tType: \"error\",\n\t\t\t\t\tText: ve.Err.Error(),\n\t\t\t\t}}\n\t\t\tcase addNoticeError:\n\t\t\t\tfirstErrorPath = string(ve)\n\t\t\t}\n\t\t}\n\t}\n\n\tv.Current = updated.Interface()\n\n\treturn firstErrorPath, err\n}\n\n\/\/ BinaryData returns the string or byte slice addressed by path.\nfunc (v *Value) BinaryData(path string) ([]byte, error) {\n\tif !strings.HasPrefix(path, v.Path) {\n\t\treturn nil, fmt.Errorf(\"gui: bad path\") \/\/ TODO error message\n\t}\n\n\tpath = path[len(v.Path)+1:]\n\treturn binData(path, reflect.ValueOf(v.Current))\n}\n\nfunc binData(path string, val reflect.Value) ([]byte, error) {\n\tif path == \"\" {\n\t\tswitch val.Kind() {\n\t\tcase reflect.String:\n\t\t\treturn []byte(val.String()), nil\n\t\tcase reflect.Slice:\n\t\t\telKind := val.Type().Elem().Kind()\n\t\t\tif elKind == reflect.Uint8 {\n\t\t\t\treturn val.Bytes(), nil\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"gui: bin data of %s slice\", elKind.String())\n\t\t}\n\t}\n\tpart := strings.SplitN(path, \".\", 2)\n\tif len(part) == 1 {\n\t\tpart = append(part, \"\")\n\t}\n\tswitch val.Kind() {\n\tcase reflect.Struct:\n\t\tfield := val.FieldByName(part[0])\n\t\tif !field.IsValid() {\n\t\t\treturn nil, fmt.Errorf(\"gui: no such field %s\", part[0])\n\t\t}\n\t\treturn binData(part[1], field)\n\tcase reflect.Map:\n\t\tname := demangleKey(part[0])\n\t\tkey := reflect.ValueOf(name)\n\t\t\/\/ TODO handel maps index by other stuff han strings\n\t\tv := val.MapIndex(key)\n\t\tif !v.IsValid() {\n\t\t\treturn nil, fmt.Errorf(\"gui: no such key %s\", part[0])\n\t\t}\n\n\t\treturn binData(part[1], v)\n\tcase reflect.Slice:\n\t\ti, err := strconv.Atoi(part[0])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"gui: bad index: %s\", err)\n\t\t}\n\t\tif i < 0 || i > val.Len() {\n\t\t\treturn nil, fmt.Errorf(\"gui: index %d out of range\", i)\n\t\t}\n\t\treturn binData(part[1], val.Index(i))\n\tcase reflect.Interface, reflect.Ptr:\n\t\tif val.IsNil() {\n\t\t\treturn nil, fmt.Errorf(\"gui: cannot traverse nil ptr\/interface\")\n\t\t}\n\t\treturn binData(part[1], val.Elem())\n\t}\n\treturn nil, fmt.Errorf(\"gui: cannot traverse %s in %s\", val.Kind().String(), path)\n}\n\n\/\/ ----------------------------------------------------------------------------\n\n\/\/ printf to internal buf.\nfunc (v *Value) printf(format string, val ...interface{}) {\n\tfmt.Fprintf(v.buf, format, val...)\n}\n\n\/\/ typeinfo returns name and tooltip for val's type.\nfunc (v *Value) typeinfo(val interface{}) (string, string) {\n\tvar typ reflect.Type\n\tif v, ok := val.(reflect.Value); ok {\n\t\ttyp = v.Type()\n\t} else {\n\t\ttyp = val.(reflect.Type)\n\t}\n\tname := typ.Name()\n\tif name == \"\" {\n\t\tname = \"-anonymous-\"\n\t}\n\n\ttooltip := \"??\"\n\tif info, ok := Typedata[typ]; ok {\n\t\ttooltip = info.Doc\n\t}\n\n\treturn name, tooltip\n}\n\n\/\/ fieldinfo returns the name and optional data for field nr i of val.\nfunc (v *Value) fieldinfo(val reflect.Value, i int) (string, Fieldinfo) {\n\ttyp := val.Type()\n\tname := typ.Field(i).Name\n\n\tif tinfo, ok := Typedata[typ]; ok {\n\t\tif finfo, ok := tinfo.Field[name]; ok {\n\t\t\treturn name, finfo\n\t\t}\n\t}\n\n\treturn name, Fieldinfo{Doc: \"-??-\"}\n}\n<commit_msg>gui: remove debug statement<commit_after>\/\/ Copyright 2017 Volker Dobler. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gui\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/vdobler\/ht\/errorlist\"\n)\n\ntype Message struct {\n\tType string\n\tText string\n}\n\n\/\/ ----------------------------------------------------------------------------\n\/\/ Value\n\n\/\/ Value contains a value to be displayed and updated through a HTML GUI.\ntype Value struct {\n\t\/\/ Current is the current value.\n\tCurrent interface{}\n\n\t\/\/ Last contains the last values\n\tLast []interface{}\n\n\t\/\/ Path is the path prefix applied to this value.\n\tPath string\n\n\t\/\/ Messages contains messages to be rendered for paths. E.g.:\n\t\/\/ Test.Request.Timeout => Message{typ:error, txt=wrong}\n\tMessages map[string][]Message\n\n\tbuf *bytes.Buffer\n\n\tnextfieldinfo Fieldinfo\n}\n\n\/\/ NewValue creates a new Value from val.\nfunc NewValue(val interface{}, path string) *Value {\n\treturn &Value{\n\t\tCurrent: val,\n\t\tPath: path,\n\t\tMessages: make(map[string][]Message),\n\t\tbuf: &bytes.Buffer{},\n\t}\n}\n\n\/\/ Render v's Current value. The returned byte slice must neither be modified\n\/\/ nor kept longer then up to the next call of Render.\nfunc (v *Value) Render() ([]byte, error) {\n\tval := reflect.ValueOf(v.Current)\n\tv.buf.Reset()\n\terr := v.render(v.Path, 0, false, val) \/\/ TODO: type based readonly\n\treturn v.buf.Bytes(), err\n}\n\n\/\/ Update v with data from the received HTML form. It returns the path of the\n\/\/ most prominent field (TODO: explain better).\nfunc (v *Value) Update(form url.Values) (string, errorlist.List) {\n\tval := reflect.ValueOf(v.Current)\n\tv.Messages = make(map[string][]Message) \/\/ clear errors \/\/ TODO: really automaticall here?\n\tfirstErrorPath := \"\"\n\n\tupdated, err := walk(form, v.Path, val)\n\n\tif err == nil {\n\t\tv.Last = append(v.Last, v.Current)\n\t} else {\n\t\t\/\/ Process validation errors\n\t\tfor _, e := range err {\n\t\t\tswitch ve := e.(type) {\n\t\t\tcase ValueError:\n\t\t\t\tif firstErrorPath == \"\" {\n\t\t\t\t\tfirstErrorPath = ve.Path\n\t\t\t\t}\n\t\t\t\tv.Messages[ve.Path] = []Message{{\n\t\t\t\t\tType: \"error\",\n\t\t\t\t\tText: ve.Err.Error(),\n\t\t\t\t}}\n\t\t\tcase addNoticeError:\n\t\t\t\tfirstErrorPath = string(ve)\n\t\t\t}\n\t\t}\n\t}\n\n\tv.Current = updated.Interface()\n\n\treturn firstErrorPath, err\n}\n\n\/\/ BinaryData returns the string or byte slice addressed by path.\nfunc (v *Value) BinaryData(path string) ([]byte, error) {\n\tif !strings.HasPrefix(path, v.Path) {\n\t\treturn nil, fmt.Errorf(\"gui: bad path\") \/\/ TODO error message\n\t}\n\n\tpath = path[len(v.Path)+1:]\n\treturn binData(path, reflect.ValueOf(v.Current))\n}\n\nfunc binData(path string, val reflect.Value) ([]byte, error) {\n\tif path == \"\" {\n\t\tswitch val.Kind() {\n\t\tcase reflect.String:\n\t\t\treturn []byte(val.String()), nil\n\t\tcase reflect.Slice:\n\t\t\telKind := val.Type().Elem().Kind()\n\t\t\tif elKind == reflect.Uint8 {\n\t\t\t\treturn val.Bytes(), nil\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"gui: bin data of %s slice\", elKind.String())\n\t\t}\n\t}\n\tpart := strings.SplitN(path, \".\", 2)\n\tif len(part) == 1 {\n\t\tpart = append(part, \"\")\n\t}\n\tswitch val.Kind() {\n\tcase reflect.Struct:\n\t\tfield := val.FieldByName(part[0])\n\t\tif !field.IsValid() {\n\t\t\treturn nil, fmt.Errorf(\"gui: no such field %s\", part[0])\n\t\t}\n\t\treturn binData(part[1], field)\n\tcase reflect.Map:\n\t\tname := demangleKey(part[0])\n\t\tkey := reflect.ValueOf(name)\n\t\t\/\/ TODO handel maps index by other stuff han strings\n\t\tv := val.MapIndex(key)\n\t\tif !v.IsValid() {\n\t\t\treturn nil, fmt.Errorf(\"gui: no such key %s\", part[0])\n\t\t}\n\n\t\treturn binData(part[1], v)\n\tcase reflect.Slice:\n\t\ti, err := strconv.Atoi(part[0])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"gui: bad index: %s\", err)\n\t\t}\n\t\tif i < 0 || i > val.Len() {\n\t\t\treturn nil, fmt.Errorf(\"gui: index %d out of range\", i)\n\t\t}\n\t\treturn binData(part[1], val.Index(i))\n\tcase reflect.Interface, reflect.Ptr:\n\t\tif val.IsNil() {\n\t\t\treturn nil, fmt.Errorf(\"gui: cannot traverse nil ptr\/interface\")\n\t\t}\n\t\treturn binData(part[1], val.Elem())\n\t}\n\treturn nil, fmt.Errorf(\"gui: cannot traverse %s in %s\", val.Kind().String(), path)\n}\n\n\/\/ ----------------------------------------------------------------------------\n\n\/\/ printf to internal buf.\nfunc (v *Value) printf(format string, val ...interface{}) {\n\tfmt.Fprintf(v.buf, format, val...)\n}\n\n\/\/ typeinfo returns name and tooltip for val's type.\nfunc (v *Value) typeinfo(val interface{}) (string, string) {\n\tvar typ reflect.Type\n\tif v, ok := val.(reflect.Value); ok {\n\t\ttyp = v.Type()\n\t} else {\n\t\ttyp = val.(reflect.Type)\n\t}\n\tname := typ.Name()\n\tif name == \"\" {\n\t\tname = \"-anonymous-\"\n\t}\n\n\ttooltip := \"??\"\n\tif info, ok := Typedata[typ]; ok {\n\t\ttooltip = info.Doc\n\t}\n\n\treturn name, tooltip\n}\n\n\/\/ fieldinfo returns the name and optional data for field nr i of val.\nfunc (v *Value) fieldinfo(val reflect.Value, i int) (string, Fieldinfo) {\n\ttyp := val.Type()\n\tname := typ.Field(i).Name\n\n\tif tinfo, ok := Typedata[typ]; ok {\n\t\tif finfo, ok := tinfo.Field[name]; ok {\n\t\t\treturn name, finfo\n\t\t}\n\t}\n\n\treturn name, Fieldinfo{Doc: \"-??-\"}\n}\n<|endoftext|>"} {"text":"<commit_before>package gxutil\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/user\"\n\t\"path\"\n\t\"strings\"\n\n\tmh \"github.com\/jbenet\/go-multihash\"\n\tish \"github.com\/whyrusleeping\/fallback-ipfs-shell\"\n\t. \"github.com\/whyrusleeping\/stump\"\n)\n\nconst PkgFileName = \"package.json\"\n\ntype PM struct {\n\tshell ish.Shell\n\n\tcfg *Config\n\n\t\/\/ hash of the 'empty' ipfs dir to avoid extra calls to object new\n\tblankDir string\n}\n\nfunc NewPM(cfg *Config) (*PM, error) {\n\tsh, err := ish.NewShell()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &PM{\n\t\tshell: sh,\n\t\tcfg: cfg,\n\t}, nil\n}\n\n\/\/ InstallDeps recursively installs all dependencies for the given package\nfunc (pm *PM) InstallDeps(pkg *Package, location string) error {\n\tLog(\"installing package: %s-%s\", pkg.Name, pkg.Version)\n\tfor _, dep := range pkg.Dependencies {\n\n\t\t\/\/ if its already local, skip it\n\t\tpkgdir := path.Join(location, dep.Hash)\n\t\terr := FindPackageInDir(&Package{}, pkgdir)\n\t\tif err == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tdeppkg, err := pm.GetPackageLocalDaemon(dep.Hash, location)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to fetch package: %s (%s):%s\", dep.Name,\n\t\t\t\tdep.Hash, err)\n\t\t}\n\n\t\terr = RunReqCheckHook(deppkg.Language, dep.Hash)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = pm.InstallDeps(deppkg, location)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (pm *PM) InitPkg(dir, name, lang string) error {\n\t\/\/ check for existing packagefile\n\tp := path.Join(dir, PkgFileName)\n\t_, err := os.Stat(p)\n\tif err == nil {\n\t\treturn errors.New(\"package file already exists in working dir\")\n\t}\n\n\tusername := pm.cfg.User.Name\n\tif username == \"\" {\n\t\tu, err := user.Current()\n\t\tif err != nil {\n\t\t\tfmt.Errorf(\"error looking up current user: %s\", err)\n\t\t}\n\t\tusername = u.Username\n\t}\n\n\tpkg := new(Package)\n\tpkg.Name = name\n\tpkg.Language = lang\n\tpkg.Author = username\n\tpkg.Version = \"1.0.0\"\n\n\t\/\/ check if the user has a tool installed for the selected language\n\tCheckForHelperTools(lang)\n\n\terr = SavePackageFile(pkg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = TryRunHook(\"post-init\", lang, dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc CheckForHelperTools(lang string) {\n\t_, err := exec.LookPath(\"gx-\" + lang)\n\tif err == nil {\n\t\treturn\n\t}\n\n\tif strings.Contains(err.Error(), \"file not found\") {\n\t\tLog(\"notice: no helper tool found for\", lang)\n\t\treturn\n\t}\n\n\tError(\"checking for helper tool:\", err)\n}\n\nfunc (pm *PM) ImportPackage(dir, dephash string) (*Dependency, error) {\n\tndep, err := pm.GetPackage(dephash)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, child := range ndep.Dependencies {\n\t\t_, err := pm.ImportPackage(dir, child.Hash)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\terr = RunPostImportHook(ndep.Language, dephash)\n\tif err != nil {\n\t\tFatal(err)\n\t}\n\n\treturn &Dependency{\n\t\tName: ndep.Name,\n\t\tHash: dephash,\n\t\tVersion: ndep.Version,\n\t}, nil\n}\n\nfunc (pm *PM) ResolveDepName(name string) (string, error) {\n\t_, err := mh.FromB58String(name)\n\tif err == nil {\n\t\treturn name, nil\n\t}\n\n\tif strings.Contains(name, \"\/\") {\n\t\tparts := strings.Split(name, \"\/\")\n\t\trpath, ok := pm.cfg.GetRepos()[parts[0]]\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"unknown repo: '%s'\", parts[0])\n\t\t}\n\n\t\tpkgs, err := pm.FetchRepo(rpath, true)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tval, ok := pkgs[parts[1]]\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"package %s not found in repo %s\", parts[1], parts[0])\n\t\t}\n\n\t\treturn val, nil\n\t}\n\n\tout, err := pm.QueryRepos(name)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(out) == 0 {\n\t\treturn \"\", fmt.Errorf(\"could not find package by name: %s\", name)\n\t}\n\n\tif len(out) == 1 {\n\t\tfor _, v := range out {\n\t\t\treturn v, nil\n\t\t}\n\t}\n\n\treturn \"\", fmt.Errorf(\"ambiguous ref, appears in multiple repos\")\n}\n\nfunc RunPostImportHook(env, pkg string) error {\n\treturn TryRunHook(\"post-import\", env, pkg)\n}\n\nfunc RunReqCheckHook(env, pkg string) error {\n\treturn TryRunHook(\"req-check\", env, pkg)\n}\n\nfunc TryRunHook(hook, env string, args ...string) error {\n\tif env == \"\" {\n\t\treturn nil\n\t}\n\n\tbinname := \"gx-\" + env\n\t_, err := exec.LookPath(binname)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tLog(\"No gx helper tool found for\", env)\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\targs = append([]string{\"hook\", hook}, args...)\n\tcmd := exec.Command(binname, args...)\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdout = os.Stdout\n\tcmd.Stdin = os.Stdin\n\n\terr = cmd.Run()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"hook failed: %s\", err)\n\t}\n\n\treturn nil\n}\n<commit_msg>better support global installs and resuming installs<commit_after>package gxutil\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/user\"\n\t\"path\"\n\t\"strings\"\n\n\tmh \"github.com\/jbenet\/go-multihash\"\n\tish \"github.com\/whyrusleeping\/fallback-ipfs-shell\"\n\t. \"github.com\/whyrusleeping\/stump\"\n)\n\nconst PkgFileName = \"package.json\"\n\ntype PM struct {\n\tshell ish.Shell\n\n\tcfg *Config\n\n\t\/\/ hash of the 'empty' ipfs dir to avoid extra calls to object new\n\tblankDir string\n}\n\nfunc NewPM(cfg *Config) (*PM, error) {\n\tsh, err := ish.NewShell()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &PM{\n\t\tshell: sh,\n\t\tcfg: cfg,\n\t}, nil\n}\n\n\/\/ InstallDeps recursively installs all dependencies for the given package\nfunc (pm *PM) InstallDeps(pkg *Package, location string) error {\n\tLog(\"installing package: %s-%s\", pkg.Name, pkg.Version)\n\tfor _, dep := range pkg.Dependencies {\n\n\t\t\/\/ if its already local, skip it\n\t\tpkgdir := path.Join(location, dep.Hash)\n\t\tpkg := new(Package)\n\t\terr := FindPackageInDir(pkg, pkgdir)\n\t\tif err != nil {\n\t\t\tdeppkg, err := pm.GetPackageLocalDaemon(dep.Hash, location)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to fetch package: %s (%s):%s\", dep.Name,\n\t\t\t\t\tdep.Hash, err)\n\t\t\t}\n\t\t\tpkg = deppkg\n\t\t}\n\n\t\terr = pm.InstallDeps(pkg, location)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (pm *PM) InitPkg(dir, name, lang string) error {\n\t\/\/ check for existing packagefile\n\tp := path.Join(dir, PkgFileName)\n\t_, err := os.Stat(p)\n\tif err == nil {\n\t\treturn errors.New(\"package file already exists in working dir\")\n\t}\n\n\tusername := pm.cfg.User.Name\n\tif username == \"\" {\n\t\tu, err := user.Current()\n\t\tif err != nil {\n\t\t\tfmt.Errorf(\"error looking up current user: %s\", err)\n\t\t}\n\t\tusername = u.Username\n\t}\n\n\tpkg := new(Package)\n\tpkg.Name = name\n\tpkg.Language = lang\n\tpkg.Author = username\n\tpkg.Version = \"1.0.0\"\n\n\t\/\/ check if the user has a tool installed for the selected language\n\tCheckForHelperTools(lang)\n\n\terr = SavePackageFile(pkg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = TryRunHook(\"post-init\", lang, dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc CheckForHelperTools(lang string) {\n\t_, err := exec.LookPath(\"gx-\" + lang)\n\tif err == nil {\n\t\treturn\n\t}\n\n\tif strings.Contains(err.Error(), \"file not found\") {\n\t\tLog(\"notice: no helper tool found for\", lang)\n\t\treturn\n\t}\n\n\tError(\"checking for helper tool:\", err)\n}\n\nfunc (pm *PM) ImportPackage(dir, dephash string) (*Dependency, error) {\n\tndep, err := pm.GetPackage(dephash)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, child := range ndep.Dependencies {\n\t\t_, err := pm.ImportPackage(dir, child.Hash)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\terr = RunPostImportHook(ndep.Language, dephash)\n\tif err != nil {\n\t\tFatal(err)\n\t}\n\n\treturn &Dependency{\n\t\tName: ndep.Name,\n\t\tHash: dephash,\n\t\tVersion: ndep.Version,\n\t}, nil\n}\n\nfunc (pm *PM) ResolveDepName(name string) (string, error) {\n\t_, err := mh.FromB58String(name)\n\tif err == nil {\n\t\treturn name, nil\n\t}\n\n\tif strings.Contains(name, \"\/\") {\n\t\tparts := strings.Split(name, \"\/\")\n\t\trpath, ok := pm.cfg.GetRepos()[parts[0]]\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"unknown repo: '%s'\", parts[0])\n\t\t}\n\n\t\tpkgs, err := pm.FetchRepo(rpath, true)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tval, ok := pkgs[parts[1]]\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"package %s not found in repo %s\", parts[1], parts[0])\n\t\t}\n\n\t\treturn val, nil\n\t}\n\n\tout, err := pm.QueryRepos(name)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(out) == 0 {\n\t\treturn \"\", fmt.Errorf(\"could not find package by name: %s\", name)\n\t}\n\n\tif len(out) == 1 {\n\t\tfor _, v := range out {\n\t\t\treturn v, nil\n\t\t}\n\t}\n\n\treturn \"\", fmt.Errorf(\"ambiguous ref, appears in multiple repos\")\n}\n\nfunc RunPostImportHook(env, pkg string) error {\n\treturn TryRunHook(\"post-import\", env, pkg)\n}\n\nfunc RunReqCheckHook(env, pkg string) error {\n\treturn TryRunHook(\"req-check\", env, pkg)\n}\n\nfunc TryRunHook(hook, env string, args ...string) error {\n\tif env == \"\" {\n\t\treturn nil\n\t}\n\n\tbinname := \"gx-\" + env\n\t_, err := exec.LookPath(binname)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tLog(\"No gx helper tool found for\", env)\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\targs = append([]string{\"hook\", hook}, args...)\n\tcmd := exec.Command(binname, args...)\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdout = os.Stdout\n\tcmd.Stdin = os.Stdin\n\n\terr = cmd.Run()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"hook failed: %s\", err)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package logrus\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tnocolor = 0\n\tred = 31\n\tgreen = 32\n\tyellow = 33\n\tblue = 34\n)\n\nfunc init() {\n\tbaseTimestamp = time.Now()\n}\n\nfunc miniTS() int {\n\treturn int(time.Since(baseTimestamp) \/ time.Second)\n}\n\ntype TextFormatter struct {\n\t\/\/ Set to true to bypass checking for a TTY before outputting colors.\n\tForceColors bool\n}\n\nfunc (f *TextFormatter) Format(entry *Entry) ([]byte, error) {\n\tvar serialized []byte\n\n\tif f.ForceColors || IsTerminal() {\n\t\tlevelText := strings.ToUpper(entry.Data[\"level\"].(string))[0:4]\n\n\t\tlevelColor := blue\n\n\t\tif entry.Data[\"level\"] == \"warning\" {\n\t\t\tlevelColor = yellow\n\t\t} else if entry.Data[\"level\"] == \"error\" ||\n\t\t\tentry.Data[\"level\"] == \"fatal\" ||\n\t\t\tentry.Data[\"level\"] == \"panic\" {\n\t\t\tlevelColor = red\n\t\t}\n\n\t\tserialized = append(serialized, []byte(fmt.Sprintf(\"\\x1b[%dm%s\\x1b[0m[%04d] %-45s \", levelColor, levelText, miniTS(), entry.Data[\"msg\"]))...)\n\n\t\tkeys := make([]string, 0)\n\t\tfor k, _ := range entry.Data {\n\t\t\tif k != \"level\" && k != \"time\" && k != \"msg\" {\n\t\t\t\tkeys = append(keys, k)\n\t\t\t}\n\t\t}\n\t\tsort.Strings(keys)\n\t\tfirst := true\n\t\tfor _, k := range keys {\n\t\t\tv := entry.Data[k]\n\t\t\tif first {\n\t\t\t\tfirst = false\n\t\t\t} else {\n\t\t\t\tserialized = append(serialized, ' ')\n\t\t\t}\n\t\t\tserialized = append(serialized, []byte(fmt.Sprintf(\"\\x1b[%dm%s\\x1b[0m=%v\", levelColor, k, v))...)\n\t\t}\n\t} else {\n\t\tserialized = f.AppendKeyValue(serialized, \"time\", entry.Data[\"time\"].(string))\n\t\tserialized = f.AppendKeyValue(serialized, \"level\", entry.Data[\"level\"].(string))\n\t\tserialized = f.AppendKeyValue(serialized, \"msg\", entry.Data[\"msg\"].(string))\n\n\t\tfor key, value := range entry.Data {\n\t\t\tif key != \"time\" && key != \"level\" && key != \"msg\" {\n\t\t\t\tserialized = f.AppendKeyValue(serialized, key, value)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn append(serialized, '\\n'), nil\n}\n\nfunc (f *TextFormatter) AppendKeyValue(serialized []byte, key, value interface{}) []byte {\n\tif _, ok := value.(string); ok {\n\t\treturn append(serialized, []byte(fmt.Sprintf(\"%v=%q \", key, value))...)\n\t} else {\n\t\treturn append(serialized, []byte(fmt.Sprintf(\"%v=%v \", key, value))...)\n\t}\n}\n<commit_msg>Clean up text_formatter code with a byte.Buffer<commit_after>package logrus\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tnocolor = 0\n\tred = 31\n\tgreen = 32\n\tyellow = 33\n\tblue = 34\n)\n\nfunc init() {\n\tbaseTimestamp = time.Now()\n}\n\nfunc miniTS() int {\n\treturn int(time.Since(baseTimestamp) \/ time.Second)\n}\n\ntype TextFormatter struct {\n\t\/\/ Set to true to bypass checking for a TTY before outputting colors.\n\tForceColors bool\n}\n\nfunc (f *TextFormatter) Format(entry *Entry) ([]byte, error) {\n\tb := &bytes.Buffer{}\n\n\tif f.ForceColors || IsTerminal() {\n\t\tlevelText := strings.ToUpper(entry.Data[\"level\"].(string))[0:4]\n\n\t\tlevelColor := blue\n\n\t\tif entry.Data[\"level\"] == \"warning\" {\n\t\t\tlevelColor = yellow\n\t\t} else if entry.Data[\"level\"] == \"error\" ||\n\t\t\tentry.Data[\"level\"] == \"fatal\" ||\n\t\t\tentry.Data[\"level\"] == \"panic\" {\n\t\t\tlevelColor = red\n\t\t}\n\n\t\tfmt.Fprintf(b, \"\\x1b[%dm%s\\x1b[0m[%04d] %-44s \", levelColor, levelText, miniTS(), entry.Data[\"msg\"])\n\n\t\tkeys := make([]string, 0)\n\t\tfor k, _ := range entry.Data {\n\t\t\tif k != \"level\" && k != \"time\" && k != \"msg\" {\n\t\t\t\tkeys = append(keys, k)\n\t\t\t}\n\t\t}\n\t\tsort.Strings(keys)\n\t\tfor _, k := range keys {\n\t\t\tv := entry.Data[k]\n\t\t\tfmt.Fprintf(b, \" \\x1b[%dm%s\\x1b[0m=%v\", levelColor, k, v)\n\t\t}\n\t} else {\n\t\tf.AppendKeyValue(b, \"time\", entry.Data[\"time\"].(string))\n\t\tf.AppendKeyValue(b, \"level\", entry.Data[\"level\"].(string))\n\t\tf.AppendKeyValue(b, \"msg\", entry.Data[\"msg\"].(string))\n\n\t\tfor key, value := range entry.Data {\n\t\t\tif key != \"time\" && key != \"level\" && key != \"msg\" {\n\t\t\t\tf.AppendKeyValue(b, key, value)\n\t\t\t}\n\t\t}\n\t}\n\n\tb.WriteByte('\\n')\n\treturn b.Bytes(), nil\n}\n\nfunc (f *TextFormatter) AppendKeyValue(b *bytes.Buffer, key, value interface{}) {\n\tif _, ok := value.(string); ok {\n\t\tfmt.Fprintf(b, \"%v=%q \", key, value)\n\t} else {\n\t\tfmt.Fprintf(b, \"%v=%v \", key, value)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package azure\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n)\n\nconst (\n\tactiveDirectoryAPIVersion = \"1.0\"\n)\n\n\/\/ Environment represents a set of endpoints for each of Azure's Clouds.\ntype Environment struct {\n\tName string `json:\"name\"`\n\tManagementPortalURL string `json:\"managementPortalURL\"`\n\tPublishSettingsURL string `json:\"publishSettingsURL\"`\n\tServiceManagementEndpoint string `json:\"serviceManagementEndpoint\"`\n\tResourceManagerEndpoint string `json:\"resourceManagerEndpoint\"`\n\tActiveDirectoryEndpoint string `json:\"activeDirectoryEndpoint\"`\n\tGalleryEndpoint string `json:\"galleryEndpoint\"`\n\tKeyVaultEndpoint string `json:\"keyVaultEndpoint\"`\n\tGraphEndpoint string `json:\"graphEndpoint\"`\n\tStorageEndpointSuffix string `json:\"storageEndpointSuffix\"`\n\tSQLDatabaseDNSSuffix string `json:\"sqlDatabaseDNSSuffix\"`\n\tTrafficManagerDNSSuffix string `json:\"trafficManagerDNSSuffix\"`\n\tKeyVaultDNSSuffix string `json:\"keyVaultDNSSuffix\"`\n\tServiceBusEndpointSuffix string `json:\"serviceBusEndpointSuffix\"`\n}\n\nvar (\n\t\/\/ PublicCloud is the default public Azure cloud environment\n\tPublicCloud = Environment{\n\t\tName: \"AzurePublicCloud\",\n\t\tManagementPortalURL: \"https:\/\/manage.windowsazure.com\/\",\n\t\tPublishSettingsURL: \"https:\/\/manage.windowsazure.com\/publishsettings\/index\",\n\t\tServiceManagementEndpoint: \"https:\/\/management.core.windows.net\/\",\n\t\tResourceManagerEndpoint: \"https:\/\/management.azure.com\/\",\n\t\tActiveDirectoryEndpoint: \"https:\/\/login.microsoftonline.com\/\",\n\t\tGalleryEndpoint: \"https:\/\/gallery.azure.com\/\",\n\t\tKeyVaultEndpoint: \"https:\/\/vault.azure.net\/\",\n\t\tGraphEndpoint: \"https:\/\/graph.windows.net\/\",\n\t\tStorageEndpointSuffix: \"core.windows.net\",\n\t\tSQLDatabaseDNSSuffix: \"database.windows.net\",\n\t\tTrafficManagerDNSSuffix: \"trafficmanager.net\",\n\t\tKeyVaultDNSSuffix: \"vault.azure.net\",\n\t\tServiceBusEndpointSuffix: \"servicebus.azure.com\",\n\t}\n\n\t\/\/ USGovernmentCloud is the cloud environment for the US Government\n\tUSGovernmentCloud = Environment{\n\t\tName: \"AzureUSGovernmentCloud\",\n\t\tManagementPortalURL: \"https:\/\/manage.windowsazure.us\/\",\n\t\tPublishSettingsURL: \"https:\/\/manage.windowsazure.us\/publishsettings\/index\",\n\t\tServiceManagementEndpoint: \"https:\/\/management.core.usgovcloudapi.net\/\",\n\t\tResourceManagerEndpoint: \"https:\/\/management.usgovcloudapi.net\",\n\t\tActiveDirectoryEndpoint: \"https:\/\/login.microsoftonline.com\/\",\n\t\tGalleryEndpoint: \"https:\/\/gallery.usgovcloudapi.net\/\",\n\t\tKeyVaultEndpoint: \"https:\/\/vault.usgovcloudapi.net\/\",\n\t\tGraphEndpoint: \"https:\/\/graph.usgovcloudapi.net\/\",\n\t\tStorageEndpointSuffix: \"core.usgovcloudapi.net\",\n\t\tSQLDatabaseDNSSuffix: \"database.usgovcloudapi.net\",\n\t\tTrafficManagerDNSSuffix: \"usgovtrafficmanager.net\",\n\t\tKeyVaultDNSSuffix: \"vault.usgovcloudapi.net\",\n\t\tServiceBusEndpointSuffix: \"servicebus.usgovcloudapi.net\",\n\t}\n\n\t\/\/ ChinaCloud is the cloud environment operated in China\n\tChinaCloud = Environment{\n\t\tName: \"AzureChinaCloud\",\n\t\tManagementPortalURL: \"https:\/\/manage.chinacloudapi.com\/\",\n\t\tPublishSettingsURL: \"https:\/\/manage.chinacloudapi.com\/publishsettings\/index\",\n\t\tServiceManagementEndpoint: \"https:\/\/management.core.chinacloudapi.cn\/\",\n\t\tResourceManagerEndpoint: \"https:\/\/management.chinacloudapi.cn\/\",\n\t\tActiveDirectoryEndpoint: \"https:\/\/login.chinacloudapi.cn\/?api-version=1.0\",\n\t\tGalleryEndpoint: \"https:\/\/gallery.chinacloudapi.cn\/\",\n\t\tKeyVaultEndpoint: \"https:\/\/vault.azure.cn\/\",\n\t\tGraphEndpoint: \"https:\/\/graph.chinacloudapi.cn\/\",\n\t\tStorageEndpointSuffix: \"core.chinacloudapi.cn\",\n\t\tSQLDatabaseDNSSuffix: \"database.chinacloudapi.cn\",\n\t\tTrafficManagerDNSSuffix: \"trafficmanager.cn\",\n\t\tKeyVaultDNSSuffix: \"vault.azure.cn\",\n\t\tServiceBusEndpointSuffix: \"servicebus.chinacloudapi.net\",\n\t}\n\n\t\/\/ GermanCloud is the cloud environment operated in Germany\n\tGermanCloud = Environment{\n\t\tName: \"AzureGermanCloud\",\n\t\tManagementPortalURL: \"http:\/\/portal.microsoftazure.de\/\",\n\t\tPublishSettingsURL: \"https:\/\/manage.microsoftazure.de\/publishsettings\/index\",\n\t\tServiceManagementEndpoint: \"https:\/\/management.core.cloudapi.de\",\n\t\tResourceManagerEndpoint: \"https:\/\/management.microsoftazure.de\",\n\t\tActiveDirectoryEndpoint: \"https:\/\/login.microsoftonline.de\/\",\n\t\tGalleryEndpoint: \"https:\/\/gallery.cloudapi.de\/\",\n\t\tKeyVaultEndpoint: \"https:\/\/vault.microsoftazure.de\/\",\n\t\tGraphEndpoint: \"https:\/\/graph.cloudapi.de\/\",\n\t\tStorageEndpointSuffix: \"core.cloudapi.de\",\n\t\tSQLDatabaseDNSSuffix: \"database.cloudapi.de\",\n\t\tTrafficManagerDNSSuffix: \"azuretrafficmanager.de\",\n\t\tKeyVaultDNSSuffix: \"vault.microsoftazure.de\",\n\t\tServiceBusEndpointSuffix: \"servicebus.cloudapi.de\",\n\t}\n)\n\n\/\/ OAuthConfigForTenant returns an OAuthConfig with tenant specific urls\nfunc (env Environment) OAuthConfigForTenant(tenantID string) (*OAuthConfig, error) {\n\ttemplate := \"%s\/oauth2\/%s?api-version=%s\"\n\tu, err := url.Parse(env.ActiveDirectoryEndpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tauthorizeURL, err := u.Parse(fmt.Sprintf(template, tenantID, \"authorize\", activeDirectoryAPIVersion))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttokenURL, err := u.Parse(fmt.Sprintf(template, tenantID, \"token\", activeDirectoryAPIVersion))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdeviceCodeURL, err := u.Parse(fmt.Sprintf(template, tenantID, \"devicecode\", activeDirectoryAPIVersion))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &OAuthConfig{\n\t\tAuthorizeEndpoint: *authorizeURL,\n\t\tTokenEndpoint: *tokenURL,\n\t\tDeviceCodeEndpoint: *deviceCodeURL,\n\t}, nil\n}\n<commit_msg>Add trailing \/ to endpoint<commit_after>package azure\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n)\n\nconst (\n\tactiveDirectoryAPIVersion = \"1.0\"\n)\n\n\/\/ Environment represents a set of endpoints for each of Azure's Clouds.\ntype Environment struct {\n\tName string `json:\"name\"`\n\tManagementPortalURL string `json:\"managementPortalURL\"`\n\tPublishSettingsURL string `json:\"publishSettingsURL\"`\n\tServiceManagementEndpoint string `json:\"serviceManagementEndpoint\"`\n\tResourceManagerEndpoint string `json:\"resourceManagerEndpoint\"`\n\tActiveDirectoryEndpoint string `json:\"activeDirectoryEndpoint\"`\n\tGalleryEndpoint string `json:\"galleryEndpoint\"`\n\tKeyVaultEndpoint string `json:\"keyVaultEndpoint\"`\n\tGraphEndpoint string `json:\"graphEndpoint\"`\n\tStorageEndpointSuffix string `json:\"storageEndpointSuffix\"`\n\tSQLDatabaseDNSSuffix string `json:\"sqlDatabaseDNSSuffix\"`\n\tTrafficManagerDNSSuffix string `json:\"trafficManagerDNSSuffix\"`\n\tKeyVaultDNSSuffix string `json:\"keyVaultDNSSuffix\"`\n\tServiceBusEndpointSuffix string `json:\"serviceBusEndpointSuffix\"`\n}\n\nvar (\n\t\/\/ PublicCloud is the default public Azure cloud environment\n\tPublicCloud = Environment{\n\t\tName: \"AzurePublicCloud\",\n\t\tManagementPortalURL: \"https:\/\/manage.windowsazure.com\/\",\n\t\tPublishSettingsURL: \"https:\/\/manage.windowsazure.com\/publishsettings\/index\",\n\t\tServiceManagementEndpoint: \"https:\/\/management.core.windows.net\/\",\n\t\tResourceManagerEndpoint: \"https:\/\/management.azure.com\/\",\n\t\tActiveDirectoryEndpoint: \"https:\/\/login.microsoftonline.com\/\",\n\t\tGalleryEndpoint: \"https:\/\/gallery.azure.com\/\",\n\t\tKeyVaultEndpoint: \"https:\/\/vault.azure.net\/\",\n\t\tGraphEndpoint: \"https:\/\/graph.windows.net\/\",\n\t\tStorageEndpointSuffix: \"core.windows.net\",\n\t\tSQLDatabaseDNSSuffix: \"database.windows.net\",\n\t\tTrafficManagerDNSSuffix: \"trafficmanager.net\",\n\t\tKeyVaultDNSSuffix: \"vault.azure.net\",\n\t\tServiceBusEndpointSuffix: \"servicebus.azure.com\",\n\t}\n\n\t\/\/ USGovernmentCloud is the cloud environment for the US Government\n\tUSGovernmentCloud = Environment{\n\t\tName: \"AzureUSGovernmentCloud\",\n\t\tManagementPortalURL: \"https:\/\/manage.windowsazure.us\/\",\n\t\tPublishSettingsURL: \"https:\/\/manage.windowsazure.us\/publishsettings\/index\",\n\t\tServiceManagementEndpoint: \"https:\/\/management.core.usgovcloudapi.net\/\",\n\t\tResourceManagerEndpoint: \"https:\/\/management.usgovcloudapi.net\/\",\n\t\tActiveDirectoryEndpoint: \"https:\/\/login.microsoftonline.com\/\",\n\t\tGalleryEndpoint: \"https:\/\/gallery.usgovcloudapi.net\/\",\n\t\tKeyVaultEndpoint: \"https:\/\/vault.usgovcloudapi.net\/\",\n\t\tGraphEndpoint: \"https:\/\/graph.usgovcloudapi.net\/\",\n\t\tStorageEndpointSuffix: \"core.usgovcloudapi.net\",\n\t\tSQLDatabaseDNSSuffix: \"database.usgovcloudapi.net\",\n\t\tTrafficManagerDNSSuffix: \"usgovtrafficmanager.net\",\n\t\tKeyVaultDNSSuffix: \"vault.usgovcloudapi.net\",\n\t\tServiceBusEndpointSuffix: \"servicebus.usgovcloudapi.net\",\n\t}\n\n\t\/\/ ChinaCloud is the cloud environment operated in China\n\tChinaCloud = Environment{\n\t\tName: \"AzureChinaCloud\",\n\t\tManagementPortalURL: \"https:\/\/manage.chinacloudapi.com\/\",\n\t\tPublishSettingsURL: \"https:\/\/manage.chinacloudapi.com\/publishsettings\/index\",\n\t\tServiceManagementEndpoint: \"https:\/\/management.core.chinacloudapi.cn\/\",\n\t\tResourceManagerEndpoint: \"https:\/\/management.chinacloudapi.cn\/\",\n\t\tActiveDirectoryEndpoint: \"https:\/\/login.chinacloudapi.cn\/?api-version=1.0\",\n\t\tGalleryEndpoint: \"https:\/\/gallery.chinacloudapi.cn\/\",\n\t\tKeyVaultEndpoint: \"https:\/\/vault.azure.cn\/\",\n\t\tGraphEndpoint: \"https:\/\/graph.chinacloudapi.cn\/\",\n\t\tStorageEndpointSuffix: \"core.chinacloudapi.cn\",\n\t\tSQLDatabaseDNSSuffix: \"database.chinacloudapi.cn\",\n\t\tTrafficManagerDNSSuffix: \"trafficmanager.cn\",\n\t\tKeyVaultDNSSuffix: \"vault.azure.cn\",\n\t\tServiceBusEndpointSuffix: \"servicebus.chinacloudapi.net\",\n\t}\n\n\t\/\/ GermanCloud is the cloud environment operated in Germany\n\tGermanCloud = Environment{\n\t\tName: \"AzureGermanCloud\",\n\t\tManagementPortalURL: \"http:\/\/portal.microsoftazure.de\/\",\n\t\tPublishSettingsURL: \"https:\/\/manage.microsoftazure.de\/publishsettings\/index\",\n\t\tServiceManagementEndpoint: \"https:\/\/management.core.cloudapi.de\/\",\n\t\tResourceManagerEndpoint: \"https:\/\/management.microsoftazure.de\/\",\n\t\tActiveDirectoryEndpoint: \"https:\/\/login.microsoftonline.de\/\",\n\t\tGalleryEndpoint: \"https:\/\/gallery.cloudapi.de\/\",\n\t\tKeyVaultEndpoint: \"https:\/\/vault.microsoftazure.de\/\",\n\t\tGraphEndpoint: \"https:\/\/graph.cloudapi.de\/\",\n\t\tStorageEndpointSuffix: \"core.cloudapi.de\",\n\t\tSQLDatabaseDNSSuffix: \"database.cloudapi.de\",\n\t\tTrafficManagerDNSSuffix: \"azuretrafficmanager.de\",\n\t\tKeyVaultDNSSuffix: \"vault.microsoftazure.de\",\n\t\tServiceBusEndpointSuffix: \"servicebus.cloudapi.de\",\n\t}\n)\n\n\/\/ OAuthConfigForTenant returns an OAuthConfig with tenant specific urls\nfunc (env Environment) OAuthConfigForTenant(tenantID string) (*OAuthConfig, error) {\n\ttemplate := \"%s\/oauth2\/%s?api-version=%s\"\n\tu, err := url.Parse(env.ActiveDirectoryEndpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tauthorizeURL, err := u.Parse(fmt.Sprintf(template, tenantID, \"authorize\", activeDirectoryAPIVersion))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttokenURL, err := u.Parse(fmt.Sprintf(template, tenantID, \"token\", activeDirectoryAPIVersion))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdeviceCodeURL, err := u.Parse(fmt.Sprintf(template, tenantID, \"devicecode\", activeDirectoryAPIVersion))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &OAuthConfig{\n\t\tAuthorizeEndpoint: *authorizeURL,\n\t\tTokenEndpoint: *tokenURL,\n\t\tDeviceCodeEndpoint: *deviceCodeURL,\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>wasm: Update generated binaries<commit_after><|endoftext|>"} {"text":"<commit_before>package leafnodes\n\nimport (\n\t\"math\"\n\t\"time\"\n\n\t\"github.com\/onsi\/ginkgo\/types\"\n)\n\ntype benchmarker struct {\n\tmeasurements map[string]*types.SpecMeasurement\n\torderCounter int\n}\n\nfunc newBenchmarker() *benchmarker {\n\treturn &benchmarker{\n\t\tmeasurements: make(map[string]*types.SpecMeasurement, 0),\n\t}\n}\n\nfunc (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {\n\tt := time.Now()\n\tbody()\n\telapsedTime = time.Since(t)\n\n\tmeasurement := b.getMeasurement(name, \"Fastest Time\", \"Slowest Time\", \"Average Time\", \"s\", info...)\n\tmeasurement.Results = append(measurement.Results, elapsedTime.Seconds())\n\n\treturn\n}\n\nfunc (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {\n\tmeasurement := b.getMeasurement(name, \"Smallest\", \" Largest\", \" Average\", \"\", info...)\n\tmeasurement.Results = append(measurement.Results, value)\n}\n\nfunc (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, info ...interface{}) *types.SpecMeasurement {\n\tmeasurement, ok := b.measurements[name]\n\tif !ok {\n\t\tvar computedInfo interface{}\n\t\tcomputedInfo = nil\n\t\tif len(info) > 0 {\n\t\t\tcomputedInfo = info[0]\n\t\t}\n\t\tmeasurement = &types.SpecMeasurement{\n\t\t\tName: name,\n\t\t\tInfo: computedInfo,\n\t\t\tOrder: b.orderCounter,\n\t\t\tSmallestLabel: smallestLabel,\n\t\t\tLargestLabel: largestLabel,\n\t\t\tAverageLabel: averageLabel,\n\t\t\tUnits: units,\n\t\t\tResults: make([]float64, 0),\n\t\t}\n\t\tb.measurements[name] = measurement\n\t\tb.orderCounter++\n\t}\n\n\treturn measurement\n}\n\nfunc (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {\n\tfor _, measurement := range b.measurements {\n\t\tmeasurement.Smallest = math.MaxFloat64\n\t\tmeasurement.Largest = -math.MaxFloat64\n\t\tsum := float64(0)\n\t\tsumOfSquares := float64(0)\n\n\t\tfor _, result := range measurement.Results {\n\t\t\tif result > measurement.Largest {\n\t\t\t\tmeasurement.Largest = result\n\t\t\t}\n\t\t\tif result < measurement.Smallest {\n\t\t\t\tmeasurement.Smallest = result\n\t\t\t}\n\t\t\tsum += result\n\t\t\tsumOfSquares += result * result\n\t\t}\n\n\t\tn := float64(len(measurement.Results))\n\t\tmeasurement.Average = sum \/ n\n\t\tmeasurement.StdDeviation = math.Sqrt(sumOfSquares\/n - (sum\/n)*(sum\/n))\n\t}\n\n\treturn b.measurements\n}\n<commit_msg>Fix data race<commit_after>package leafnodes\n\nimport (\n\t\"math\"\n\t\"time\"\n\n\t\"sync\"\n\n\t\"github.com\/onsi\/ginkgo\/types\"\n)\n\ntype benchmarker struct {\n\tmu sync.Mutex\n\tmeasurements map[string]*types.SpecMeasurement\n\torderCounter int\n}\n\nfunc newBenchmarker() *benchmarker {\n\treturn &benchmarker{\n\t\tmeasurements: make(map[string]*types.SpecMeasurement, 0),\n\t}\n}\n\nfunc (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {\n\tt := time.Now()\n\tbody()\n\telapsedTime = time.Since(t)\n\n b.mu.Lock()\n defer b.mu.Unlock()\n\tmeasurement := b.getMeasurement(name, \"Fastest Time\", \"Slowest Time\", \"Average Time\", \"s\", info...)\n\tmeasurement.Results = append(measurement.Results, elapsedTime.Seconds())\n\n\treturn\n}\n\nfunc (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {\n\tmeasurement := b.getMeasurement(name, \"Smallest\", \" Largest\", \" Average\", \"\", info...)\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\tmeasurement.Results = append(measurement.Results, value)\n}\n\nfunc (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, info ...interface{}) *types.SpecMeasurement {\n\tmeasurement, ok := b.measurements[name]\n\tif !ok {\n\t\tvar computedInfo interface{}\n\t\tcomputedInfo = nil\n\t\tif len(info) > 0 {\n\t\t\tcomputedInfo = info[0]\n\t\t}\n\t\tmeasurement = &types.SpecMeasurement{\n\t\t\tName: name,\n\t\t\tInfo: computedInfo,\n\t\t\tOrder: b.orderCounter,\n\t\t\tSmallestLabel: smallestLabel,\n\t\t\tLargestLabel: largestLabel,\n\t\t\tAverageLabel: averageLabel,\n\t\t\tUnits: units,\n\t\t\tResults: make([]float64, 0),\n\t\t}\n\t\tb.measurements[name] = measurement\n\t\tb.orderCounter++\n\t}\n\n\treturn measurement\n}\n\nfunc (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\tfor _, measurement := range b.measurements {\n\t\tmeasurement.Smallest = math.MaxFloat64\n\t\tmeasurement.Largest = -math.MaxFloat64\n\t\tsum := float64(0)\n\t\tsumOfSquares := float64(0)\n\n\t\tfor _, result := range measurement.Results {\n\t\t\tif result > measurement.Largest {\n\t\t\t\tmeasurement.Largest = result\n\t\t\t}\n\t\t\tif result < measurement.Smallest {\n\t\t\t\tmeasurement.Smallest = result\n\t\t\t}\n\t\t\tsum += result\n\t\t\tsumOfSquares += result * result\n\t\t}\n\n\t\tn := float64(len(measurement.Results))\n\t\tmeasurement.Average = sum \/ n\n\t\tmeasurement.StdDeviation = math.Sqrt(sumOfSquares\/n - (sum\/n)*(sum\/n))\n\t}\n\n\treturn b.measurements\n}\n<|endoftext|>"} {"text":"<commit_before>package remote\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/terraform\/internal\/states\/statefile\"\n\t\"github.com\/hashicorp\/terraform\/internal\/states\/statemgr\"\n)\n\n\/\/ TestClient is a generic function to test any client.\nfunc TestClient(t *testing.T, c Client) {\n\tvar buf bytes.Buffer\n\ts := statemgr.TestFullInitialState()\n\tsf := statefile.New(s, \"stub-lineage\", 2)\n\terr := statefile.Write(sf, &buf)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\tdata := buf.Bytes()\n\n\tif err := c.Put(data); err != nil {\n\t\tt.Fatalf(\"put: %s\", err)\n\t}\n\n\tp, err := c.Get()\n\tif err != nil {\n\t\tt.Fatalf(\"get: %s\", err)\n\t}\n\tif !bytes.Equal(p.Data, data) {\n\t\tt.Fatalf(\"expected full state %q\\n\\ngot: %q\", string(p.Data), string(data))\n\t}\n\n\tif err := c.Delete(); err != nil {\n\t\tt.Fatalf(\"delete: %s\", err)\n\t}\n\n\tp, err = c.Get()\n\tif err != nil {\n\t\tt.Fatalf(\"get: %s\", err)\n\t}\n\tif p != nil {\n\t\tt.Fatalf(\"expected empty state, got: %q\", string(p.Data))\n\t}\n}\n\n\/\/ Test the lock implementation for a remote.Client.\n\/\/ This test requires 2 client instances, in oder to have multiple remote\n\/\/ clients since some implementations may tie the client to the lock, or may\n\/\/ have reentrant locks.\nfunc TestRemoteLocks(t *testing.T, a, b Client) {\n\tlockerA, ok := a.(statemgr.Locker)\n\tif !ok {\n\t\tt.Fatal(\"client A not a statemgr.Locker\")\n\t}\n\n\tlockerB, ok := b.(statemgr.Locker)\n\tif !ok {\n\t\tt.Fatal(\"client B not a statemgr.Locker\")\n\t}\n\n\tinfoA := statemgr.NewLockInfo()\n\tinfoA.Operation = \"test\"\n\tinfoA.Who = \"clientA\"\n\n\tinfoB := statemgr.NewLockInfo()\n\tinfoB.Operation = \"test\"\n\tinfoB.Who = \"clientB\"\n\n\tlockIDA, err := lockerA.Lock(infoA)\n\tif err != nil {\n\t\tt.Fatal(\"unable to get initial lock:\", err)\n\t}\n\n\t_, err = lockerB.Lock(infoB)\n\tif err == nil {\n\t\tlockerA.Unlock(lockIDA)\n\t\tt.Fatal(\"client B obtained lock while held by client A\")\n\t}\n\n\tif err := lockerA.Unlock(lockIDA); err != nil {\n\t\tt.Fatal(\"error unlocking client A\", err)\n\t}\n\n\tlockIDB, err := lockerB.Lock(infoB)\n\tif err != nil {\n\t\tt.Fatal(\"unable to obtain lock from client B\")\n\t}\n\n\tif lockIDB == lockIDA {\n\t\tt.Fatalf(\"duplicate lock IDs: %q\", lockIDB)\n\t}\n\n\tif err = lockerB.Unlock(lockIDB); err != nil {\n\t\tt.Fatal(\"error unlocking client B:\", err)\n\t}\n\n\t\/\/ TODO: Should we enforce that Unlock requires the correct ID?\n}\n<commit_msg>states\/remote: Check for LockError error type<commit_after>package remote\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/terraform\/internal\/states\/statefile\"\n\t\"github.com\/hashicorp\/terraform\/internal\/states\/statemgr\"\n)\n\n\/\/ TestClient is a generic function to test any client.\nfunc TestClient(t *testing.T, c Client) {\n\tvar buf bytes.Buffer\n\ts := statemgr.TestFullInitialState()\n\tsf := statefile.New(s, \"stub-lineage\", 2)\n\terr := statefile.Write(sf, &buf)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\tdata := buf.Bytes()\n\n\tif err := c.Put(data); err != nil {\n\t\tt.Fatalf(\"put: %s\", err)\n\t}\n\n\tp, err := c.Get()\n\tif err != nil {\n\t\tt.Fatalf(\"get: %s\", err)\n\t}\n\tif !bytes.Equal(p.Data, data) {\n\t\tt.Fatalf(\"expected full state %q\\n\\ngot: %q\", string(p.Data), string(data))\n\t}\n\n\tif err := c.Delete(); err != nil {\n\t\tt.Fatalf(\"delete: %s\", err)\n\t}\n\n\tp, err = c.Get()\n\tif err != nil {\n\t\tt.Fatalf(\"get: %s\", err)\n\t}\n\tif p != nil {\n\t\tt.Fatalf(\"expected empty state, got: %q\", string(p.Data))\n\t}\n}\n\n\/\/ Test the lock implementation for a remote.Client.\n\/\/ This test requires 2 client instances, in oder to have multiple remote\n\/\/ clients since some implementations may tie the client to the lock, or may\n\/\/ have reentrant locks.\nfunc TestRemoteLocks(t *testing.T, a, b Client) {\n\tlockerA, ok := a.(statemgr.Locker)\n\tif !ok {\n\t\tt.Fatal(\"client A not a statemgr.Locker\")\n\t}\n\n\tlockerB, ok := b.(statemgr.Locker)\n\tif !ok {\n\t\tt.Fatal(\"client B not a statemgr.Locker\")\n\t}\n\n\tinfoA := statemgr.NewLockInfo()\n\tinfoA.Operation = \"test\"\n\tinfoA.Who = \"clientA\"\n\n\tinfoB := statemgr.NewLockInfo()\n\tinfoB.Operation = \"test\"\n\tinfoB.Who = \"clientB\"\n\n\tlockIDA, err := lockerA.Lock(infoA)\n\tif err != nil {\n\t\tt.Fatal(\"unable to get initial lock:\", err)\n\t}\n\n\t_, err = lockerB.Lock(infoB)\n\tif err == nil {\n\t\tlockerA.Unlock(lockIDA)\n\t\tt.Fatal(\"client B obtained lock while held by client A\")\n\t}\n\tif _, ok := err.(*statemgr.LockError); !ok {\n\t\tt.Errorf(\"expected a LockError, but was %t: %s\", err, err)\n\t}\n\n\tif err := lockerA.Unlock(lockIDA); err != nil {\n\t\tt.Fatal(\"error unlocking client A\", err)\n\t}\n\n\tlockIDB, err := lockerB.Lock(infoB)\n\tif err != nil {\n\t\tt.Fatal(\"unable to obtain lock from client B\")\n\t}\n\n\tif lockIDB == lockIDA {\n\t\tt.Fatalf(\"duplicate lock IDs: %q\", lockIDB)\n\t}\n\n\tif err = lockerB.Unlock(lockIDB); err != nil {\n\t\tt.Fatal(\"error unlocking client B:\", err)\n\t}\n\n\t\/\/ TODO: Should we enforce that Unlock requires the correct ID?\n}\n<|endoftext|>"} {"text":"<commit_before>package stupidgcm\n\nimport (\n\t\"bytes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/rand\"\n\t\"encoding\/hex\"\n\t\"log\"\n\t\"testing\"\n)\n\nfunc testCiphers(t *testing.T, our cipher.AEAD, ref cipher.AEAD) {\n\tt.Run(\"testEncryptDecrypt\", func(t *testing.T) { testEncryptDecrypt(t, our, ref) })\n\tt.Run(\"testInplaceSeal\", func(t *testing.T) { testInplaceSeal(t, our, ref) })\n\tt.Run(\"testInplaceOpen\", func(t *testing.T) { testInplaceOpen(t, our, ref) })\n\tt.Run(\"testCorruption_c1\", func(t *testing.T) { testCorruption(t, our) })\n\tt.Run(\"testCorruption_c2\", func(t *testing.T) { testCorruption(t, ref) })\n\tt.Run(\"testWipe\", func(t *testing.T) { testWipe(t, our) })\n}\n\n\/\/ testEncryptDecrypt encrypts and decrypts using both stupidgcm and Go's built-in\n\/\/ GCM implementation and verifies that the results are identical.\nfunc testEncryptDecrypt(t *testing.T, c1 cipher.AEAD, c2 cipher.AEAD) {\n\tif c1.NonceSize() != c2.NonceSize() {\n\t\tt.Fatal(\"different NonceSize\")\n\t}\n\tif c1.Overhead() != c2.Overhead() {\n\t\tt.Fatal(\"different Overhead\")\n\t}\n\n\tauthData := randBytes(24)\n\tiv := randBytes(c1.NonceSize())\n\n\tdst := make([]byte, 71) \/\/ 71 = arbitrary length\n\n\t\/\/ Check all block sizes from 1 to 5000\n\tfor i := 1; i < 5000; i++ {\n\t\tin := make([]byte, i)\n\n\t\tc1out := c1.Seal(dst, iv, in, authData)\n\t\tc2out := c2.Seal(dst, iv, in, authData)\n\n\t\t\/\/ Ciphertext must be identical to Go GCM\n\t\tif !bytes.Equal(c1out, c2out) {\n\t\t\tt.Fatalf(\"Compare failed for encryption, size %d\", i)\n\t\t\tt.Log(\"c1out:\")\n\t\t\tt.Log(\"\\n\" + hex.Dump(c1out))\n\t\t\tt.Log(\"c2out:\")\n\t\t\tt.Log(\"\\n\" + hex.Dump(c2out))\n\t\t}\n\n\t\tc1out2, sErr := c1.Open(dst, iv, c1out[len(dst):], authData)\n\t\tif sErr != nil {\n\t\t\tt.Fatal(sErr)\n\t\t}\n\t\tc2out2, gErr := c2.Open(dst, iv, c2out[len(dst):], authData)\n\t\tif gErr != nil {\n\t\t\tt.Fatal(gErr)\n\t\t}\n\n\t\t\/\/ Plaintext must be identical to Go GCM\n\t\tif !bytes.Equal(c1out2, c2out2) {\n\t\t\tt.Fatalf(\"Compare failed for decryption, size %d\", i)\n\t\t}\n\t}\n}\n\n\/\/ Seal re-uses the \"dst\" buffer it is large enough.\n\/\/ Check that this works correctly by testing different \"dst\" capacities from\n\/\/ 5000 to 16 and \"in\" lengths from 1 to 5000.\nfunc testInplaceSeal(t *testing.T, c1 cipher.AEAD, c2 cipher.AEAD) {\n\tauthData := randBytes(24)\n\tiv := randBytes(c1.NonceSize())\n\n\tmax := 5016\n\t\/\/ Check all block sizes from 1 to 5000\n\tfor i := 1; i < max-16; i++ {\n\t\tin := make([]byte, i)\n\t\tdst := make([]byte, max-i)\n\t\tdst = dst[:16]\n\n\t\tc1out := c1.Seal(dst, iv, in, authData)\n\t\tdst2 := make([]byte, 16)\n\t\tc2out := c2.Seal(dst2, iv, in, authData)\n\n\t\t\/\/ Ciphertext must be identical to Go GCM\n\t\tif !bytes.Equal(c1out, c2out) {\n\t\t\tt.Fatalf(\"Compare failed for encryption, size %d\", i)\n\t\t\tt.Log(\"sOut:\")\n\t\t\tt.Log(\"\\n\" + hex.Dump(c1out))\n\t\t\tt.Log(\"gOut:\")\n\t\t\tt.Log(\"\\n\" + hex.Dump(c2out))\n\t\t}\n\t}\n}\n\n\/\/ testInplaceOpen - Open re-uses the \"dst\" buffer it is large enough.\n\/\/ Check that this works correctly by testing different \"dst\" capacities from\n\/\/ 5000 to 16 and \"in\" lengths from 1 to 5000.\nfunc testInplaceOpen(t *testing.T, c1 cipher.AEAD, c2 cipher.AEAD) {\n\tauthData := randBytes(24)\n\tiv := randBytes(c1.NonceSize())\n\n\tmax := 5016\n\t\/\/ Check all block sizes from 1 to 5000\n\tfor i := 1; i < max-c1.NonceSize(); i++ {\n\t\tin := make([]byte, i)\n\n\t\tc2ciphertext := c2.Seal(iv, iv, in, authData)\n\n\t\tdst := make([]byte, max-i)\n\t\t\/\/ sPlaintext ... stupidgcm plaintext\n\t\tc1plaintext, err := c1.Open(dst[:0], iv, c2ciphertext[c1.NonceSize():], authData)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\t\/\/ Plaintext must be identical to Go GCM\n\t\tif !bytes.Equal(in, c1plaintext) {\n\t\t\tt.Fatalf(\"Compare failed, i=%d\", i)\n\t\t}\n\t}\n}\n\n\/\/ testCorruption verifies that changes in the ciphertext result in a decryption\n\/\/ error\nfunc testCorruption(t *testing.T, c cipher.AEAD) {\n\tauthData := randBytes(24)\n\tiv := randBytes(c.NonceSize())\n\n\tin := make([]byte, 354)\n\tout := c.Seal(nil, iv, in, authData)\n\tout2, sErr := c.Open(nil, iv, out, authData)\n\tif sErr != nil {\n\t\tt.Fatal(sErr)\n\t}\n\tif !bytes.Equal(in, out2) {\n\t\tt.Fatalf(\"Compare failed\")\n\t}\n\n\t\/\/ Corrupt first byte\n\tout[0]++\n\tout2, sErr = c.Open(nil, iv, out, authData)\n\tif sErr == nil || out2 != nil {\n\t\tt.Fatalf(\"Should have gotten error\")\n\t}\n\tout[0]--\n\n\t\/\/ Corrupt last byte\n\tout[len(out)-1]++\n\tout2, sErr = c.Open(nil, iv, out, authData)\n\tif sErr == nil || out2 != nil {\n\t\tt.Fatalf(\"Should have gotten error\")\n\t}\n\tout[len(out)-1]--\n\n\t\/\/ Append one byte\n\tout = append(out, 0)\n\tout2, sErr = c.Open(nil, iv, out, authData)\n\tif sErr == nil || out2 != nil {\n\t\tt.Fatalf(\"Should have gotten error\")\n\t}\n}\n\nfunc testWipe(t *testing.T, c cipher.AEAD) {\n\tswitch c2 := c.(type) {\n\tcase *StupidGCM:\n\t\tc2.Wipe()\n\t\tif !c2.Wiped() {\n\t\t\tt.Error(\"c2.wiped is not set\")\n\t\t}\n\t\tfor _, v := range c2.key {\n\t\t\tif v != 0 {\n\t\t\t\tt.Fatal(\"c2._key is not zeroed\")\n\t\t\t}\n\t\t}\n\tcase *stupidChacha20poly1305:\n\t\tc2.Wipe()\n\t\tif !c2.Wiped() {\n\t\t\tt.Error(\"c2.wiped is not set\")\n\t\t}\n\t\tfor _, v := range c2.key {\n\t\t\tif v != 0 {\n\t\t\t\tt.Fatal(\"c2._key is not zeroed\")\n\t\t\t}\n\t\t}\n\tcase *stupidXchacha20poly1305:\n\t\tc2.Wipe()\n\t\tif !c2.wiped {\n\t\t\tt.Error(\"c2.wiped is not set\")\n\t\t}\n\t\tfor _, v := range c2.key {\n\t\t\tif v != 0 {\n\t\t\t\tt.Fatal(\"c2.key is not zeroed\")\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tt.Fatalf(\"BUG: unhandled type %T\", c2)\n\t}\n}\n\n\/\/ Get \"n\" random bytes from \/dev\/urandom or panic\nfunc randBytes(n int) []byte {\n\tb := make([]byte, n)\n\t_, err := rand.Read(b)\n\tif err != nil {\n\t\tlog.Panic(\"Failed to read random bytes: \" + err.Error())\n\t}\n\treturn b\n}\n\n\/*\nBenchmarkCCall benchmarks the overhead of calling from Go into C.\nLooks like things improved a bit compared to\nhttps:\/\/www.cockroachlabs.com\/blog\/the-cost-and-complexity-of-cgo\/\nwhere they measured 171ns\/op:\n\n$ go test -bench .\ngoos: linux\ngoarch: amd64\npkg: github.com\/rfjakob\/gocryptfs\/v2\/internal\/stupidgcm\ncpu: Intel(R) Core(TM) i5-3470 CPU @ 3.20GHz\nBenchmarkCCall-4 \t13989364\t 76.72 ns\/op\nPASS\nok \tgithub.com\/rfjakob\/gocryptfs\/v2\/internal\/stupidgcm\t1.735s\n*\/\nfunc BenchmarkCCall(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tnoopCFunction()\n\t}\n}\n<commit_msg>stupidgcm: add testConcurrency<commit_after>\/\/ +build cgo,!without_openssl\n\npackage stupidgcm\n\nimport (\n\t\"bytes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/rand\"\n\t\"encoding\/hex\"\n\t\"log\"\n\t\"sync\"\n\t\"testing\"\n)\n\nfunc testCiphers(t *testing.T, our cipher.AEAD, ref cipher.AEAD) {\n\tt.Run(\"testEncryptDecrypt\", func(t *testing.T) { testEncryptDecrypt(t, our, ref) })\n\tt.Run(\"testInplaceSeal\", func(t *testing.T) { testInplaceSeal(t, our, ref) })\n\tt.Run(\"testInplaceOpen\", func(t *testing.T) { testInplaceOpen(t, our, ref) })\n\tt.Run(\"testCorruption_c1\", func(t *testing.T) { testCorruption(t, our) })\n\tt.Run(\"testCorruption_c2\", func(t *testing.T) { testCorruption(t, ref) })\n\tt.Run(\"testConcurrency\", func(t *testing.T) { testConcurrency(t, our, ref) })\n\tt.Run(\"testWipe\", func(t *testing.T) { testWipe(t, our) })\n}\n\n\/\/ testEncryptDecrypt encrypts and decrypts using both stupidgcm and Go's built-in\n\/\/ GCM implementation and verifies that the results are identical.\nfunc testEncryptDecrypt(t *testing.T, c1 cipher.AEAD, c2 cipher.AEAD) {\n\tif c1.NonceSize() != c2.NonceSize() {\n\t\tt.Fatal(\"different NonceSize\")\n\t}\n\tif c1.Overhead() != c2.Overhead() {\n\t\tt.Fatal(\"different Overhead\")\n\t}\n\n\tauthData := randBytes(24)\n\tiv := randBytes(c1.NonceSize())\n\n\tdst := make([]byte, 71) \/\/ 71 = arbitrary length\n\n\t\/\/ Check all block sizes from 1 to 5000\n\tfor i := 1; i < 5000; i++ {\n\t\tin := make([]byte, i)\n\n\t\tc1out := c1.Seal(dst, iv, in, authData)\n\t\tc2out := c2.Seal(dst, iv, in, authData)\n\n\t\t\/\/ Ciphertext must be identical to Go GCM\n\t\tif !bytes.Equal(c1out, c2out) {\n\t\t\tt.Fatalf(\"Compare failed for encryption, size %d\", i)\n\t\t\tt.Log(\"c1out:\")\n\t\t\tt.Log(\"\\n\" + hex.Dump(c1out))\n\t\t\tt.Log(\"c2out:\")\n\t\t\tt.Log(\"\\n\" + hex.Dump(c2out))\n\t\t}\n\n\t\tc1out2, sErr := c1.Open(dst, iv, c1out[len(dst):], authData)\n\t\tif sErr != nil {\n\t\t\tt.Fatal(sErr)\n\t\t}\n\t\tc2out2, gErr := c2.Open(dst, iv, c2out[len(dst):], authData)\n\t\tif gErr != nil {\n\t\t\tt.Fatal(gErr)\n\t\t}\n\n\t\t\/\/ Plaintext must be identical to Go GCM\n\t\tif !bytes.Equal(c1out2, c2out2) {\n\t\t\tt.Fatalf(\"Compare failed for decryption, size %d\", i)\n\t\t}\n\t}\n}\n\n\/\/ testConcurrency verifies that we don't corrupt data when called concurrently\nfunc testConcurrency(t *testing.T, c1 cipher.AEAD, c2 cipher.AEAD) {\n\tconst loopCount = 2\n\tconst goroutineCount = 4\n\n\tfor h := 0; h < loopCount; h++ {\n\t\tvar wg sync.WaitGroup\n\t\tfor i := 0; i < goroutineCount; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\ttestEncryptDecrypt(t, c1, c2)\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\twg.Wait()\n\t\t}\n\t}\n}\n\n\/\/ testInplaceSeal:\n\/\/ Seal re-uses the \"dst\" buffer it is large enough.\n\/\/ Check that this works correctly by testing different \"dst\" capacities from\n\/\/ 5000 to 16 and \"in\" lengths from 1 to 5000.\nfunc testInplaceSeal(t *testing.T, c1 cipher.AEAD, c2 cipher.AEAD) {\n\tauthData := randBytes(24)\n\tiv := randBytes(c1.NonceSize())\n\n\tmax := 5016\n\t\/\/ Check all block sizes from 1 to 5000\n\tfor i := 1; i < max-16; i++ {\n\t\tin := make([]byte, i)\n\t\tdst := make([]byte, max-i)\n\t\tdst = dst[:16]\n\n\t\tc1out := c1.Seal(dst, iv, in, authData)\n\t\tdst2 := make([]byte, 16)\n\t\tc2out := c2.Seal(dst2, iv, in, authData)\n\n\t\t\/\/ Ciphertext must be identical to Go GCM\n\t\tif !bytes.Equal(c1out, c2out) {\n\t\t\tt.Fatalf(\"Compare failed for encryption, size %d\", i)\n\t\t\tt.Log(\"sOut:\")\n\t\t\tt.Log(\"\\n\" + hex.Dump(c1out))\n\t\t\tt.Log(\"gOut:\")\n\t\t\tt.Log(\"\\n\" + hex.Dump(c2out))\n\t\t}\n\t}\n}\n\n\/\/ testInplaceOpen - Open re-uses the \"dst\" buffer it is large enough.\n\/\/ Check that this works correctly by testing different \"dst\" capacities from\n\/\/ 5000 to 16 and \"in\" lengths from 1 to 5000.\nfunc testInplaceOpen(t *testing.T, c1 cipher.AEAD, c2 cipher.AEAD) {\n\tauthData := randBytes(24)\n\tiv := randBytes(c1.NonceSize())\n\n\tmax := 5016\n\t\/\/ Check all block sizes from 1 to 5000\n\tfor i := 1; i < max-c1.NonceSize(); i++ {\n\t\tin := make([]byte, i)\n\n\t\tc2ciphertext := c2.Seal(iv, iv, in, authData)\n\n\t\tdst := make([]byte, max-i)\n\t\t\/\/ sPlaintext ... stupidgcm plaintext\n\t\tc1plaintext, err := c1.Open(dst[:0], iv, c2ciphertext[c1.NonceSize():], authData)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\t\/\/ Plaintext must be identical to Go GCM\n\t\tif !bytes.Equal(in, c1plaintext) {\n\t\t\tt.Fatalf(\"Compare failed, i=%d\", i)\n\t\t}\n\t}\n}\n\n\/\/ testCorruption verifies that changes in the ciphertext result in a decryption\n\/\/ error\nfunc testCorruption(t *testing.T, c cipher.AEAD) {\n\tauthData := randBytes(24)\n\tiv := randBytes(c.NonceSize())\n\n\tin := make([]byte, 354)\n\tout := c.Seal(nil, iv, in, authData)\n\tout2, sErr := c.Open(nil, iv, out, authData)\n\tif sErr != nil {\n\t\tt.Fatal(sErr)\n\t}\n\tif !bytes.Equal(in, out2) {\n\t\tt.Fatalf(\"Compare failed\")\n\t}\n\n\t\/\/ Corrupt first byte\n\tout[0]++\n\tout2, sErr = c.Open(nil, iv, out, authData)\n\tif sErr == nil || out2 != nil {\n\t\tt.Fatalf(\"Should have gotten error\")\n\t}\n\tout[0]--\n\n\t\/\/ Corrupt last byte\n\tout[len(out)-1]++\n\tout2, sErr = c.Open(nil, iv, out, authData)\n\tif sErr == nil || out2 != nil {\n\t\tt.Fatalf(\"Should have gotten error\")\n\t}\n\tout[len(out)-1]--\n\n\t\/\/ Append one byte\n\tout = append(out, 0)\n\tout2, sErr = c.Open(nil, iv, out, authData)\n\tif sErr == nil || out2 != nil {\n\t\tt.Fatalf(\"Should have gotten error\")\n\t}\n}\n\nfunc testWipe(t *testing.T, c cipher.AEAD) {\n\tswitch c2 := c.(type) {\n\tcase *StupidGCM:\n\t\tc2.Wipe()\n\t\tif !c2.Wiped() {\n\t\t\tt.Error(\"c2.wiped is not set\")\n\t\t}\n\t\tfor _, v := range c2.key {\n\t\t\tif v != 0 {\n\t\t\t\tt.Fatal(\"c2._key is not zeroed\")\n\t\t\t}\n\t\t}\n\tcase *stupidChacha20poly1305:\n\t\tc2.Wipe()\n\t\tif !c2.Wiped() {\n\t\t\tt.Error(\"c2.wiped is not set\")\n\t\t}\n\t\tfor _, v := range c2.key {\n\t\t\tif v != 0 {\n\t\t\t\tt.Fatal(\"c2._key is not zeroed\")\n\t\t\t}\n\t\t}\n\tcase *stupidXchacha20poly1305:\n\t\tc2.Wipe()\n\t\tif !c2.wiped {\n\t\t\tt.Error(\"c2.wiped is not set\")\n\t\t}\n\t\tfor _, v := range c2.key {\n\t\t\tif v != 0 {\n\t\t\t\tt.Fatal(\"c2.key is not zeroed\")\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tt.Fatalf(\"BUG: unhandled type %T\", c2)\n\t}\n}\n\n\/\/ Get \"n\" random bytes from \/dev\/urandom or panic\nfunc randBytes(n int) []byte {\n\tb := make([]byte, n)\n\t_, err := rand.Read(b)\n\tif err != nil {\n\t\tlog.Panic(\"Failed to read random bytes: \" + err.Error())\n\t}\n\treturn b\n}\n\n\/*\nBenchmarkCCall benchmarks the overhead of calling from Go into C.\nLooks like things improved a bit compared to\nhttps:\/\/www.cockroachlabs.com\/blog\/the-cost-and-complexity-of-cgo\/\nwhere they measured 171ns\/op:\n\n$ go test -bench .\ngoos: linux\ngoarch: amd64\npkg: github.com\/rfjakob\/gocryptfs\/v2\/internal\/stupidgcm\ncpu: Intel(R) Core(TM) i5-3470 CPU @ 3.20GHz\nBenchmarkCCall-4 \t13989364\t 76.72 ns\/op\nPASS\nok \tgithub.com\/rfjakob\/gocryptfs\/v2\/internal\/stupidgcm\t1.735s\n*\/\nfunc BenchmarkCCall(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tnoopCFunction()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright IBM Corp. All Rights Reserved.\n\nSPDX-License-Identifier: Apache-2.0\n*\/\npackage factory\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"plugin\"\n\n\t\"github.com\/hyperledger\/fabric\/bccsp\"\n)\n\nconst (\n\t\/\/ PluginFactoryName is the factory name for BCCSP plugins\n\tPluginFactoryName = \"PLUGIN\"\n)\n\n\/\/ PluginOpts contains the options for the PluginFactory\ntype PluginOpts struct {\n\t\/\/ Path to plugin library\n\tLibrary string\n\t\/\/ Config map for the plugin library\n\tConfig map[string]interface{}\n}\n\n\/\/ PluginFactory is the factory for BCCSP plugins\ntype PluginFactory struct{}\n\n\/\/ Name returns the name of this factory\nfunc (f *PluginFactory) Name() string {\n\treturn PluginFactoryName\n}\n\n\/\/ Get returns an instance of BCCSP using Opts.\nfunc (f *PluginFactory) Get(config *FactoryOpts) (bccsp.BCCSP, error) {\n\t\/\/ check for valid config\n\tif config == nil || config.PluginOpts == nil {\n\t\treturn nil, errors.New(\"Invalid config. It must not be nil.\")\n\t}\n\n\t\/\/ Library is required property\n\tif config.PluginOpts.Library == \"\" {\n\t\treturn nil, errors.New(\"Invalid config: missing property 'Library'\")\n\t}\n\n\t\/\/ make sure the library exists\n\tif _, err := os.Stat(config.PluginOpts.Library); err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not find library '%s' [%s]\", config.PluginOpts.Library, err)\n\t}\n\n\t\/\/ attempt to load the library as a plugin\n\tplug, err := plugin.Open(config.PluginOpts.Library)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to load plugin '%s' [%s]\", config.PluginOpts.Library, err)\n\t}\n\n\t\/\/ lookup the required symbol 'New'\n\tsym, err := plug.Lookup(\"New\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not find required symbol 'CryptoServiceProvider' [%s]\", err)\n\t}\n\n\t\/\/ check to make sure symbol New meets the required function signature\n\tnew, ok := sym.(func(config map[string]interface{}) (bccsp.BCCSP, error))\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Plugin does not implement the required function signature for 'New'\")\n\t}\n\n\treturn new(config.PluginOpts.Config)\n}\n<commit_msg>FAB-15403 avoid using Go key word for var<commit_after>\/*\nCopyright IBM Corp. All Rights Reserved.\n\nSPDX-License-Identifier: Apache-2.0\n*\/\npackage factory\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"plugin\"\n\n\t\"github.com\/hyperledger\/fabric\/bccsp\"\n)\n\nconst (\n\t\/\/ PluginFactoryName is the factory name for BCCSP plugins\n\tPluginFactoryName = \"PLUGIN\"\n)\n\n\/\/ PluginOpts contains the options for the PluginFactory\ntype PluginOpts struct {\n\t\/\/ Path to plugin library\n\tLibrary string\n\t\/\/ Config map for the plugin library\n\tConfig map[string]interface{}\n}\n\n\/\/ PluginFactory is the factory for BCCSP plugins\ntype PluginFactory struct{}\n\n\/\/ Name returns the name of this factory\nfunc (f *PluginFactory) Name() string {\n\treturn PluginFactoryName\n}\n\n\/\/ Get returns an instance of BCCSP using Opts.\nfunc (f *PluginFactory) Get(config *FactoryOpts) (bccsp.BCCSP, error) {\n\t\/\/ check for valid config\n\tif config == nil || config.PluginOpts == nil {\n\t\treturn nil, errors.New(\"Invalid config. It must not be nil.\")\n\t}\n\n\t\/\/ Library is required property\n\tif config.PluginOpts.Library == \"\" {\n\t\treturn nil, errors.New(\"Invalid config: missing property 'Library'\")\n\t}\n\n\t\/\/ make sure the library exists\n\tif _, err := os.Stat(config.PluginOpts.Library); err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not find library '%s' [%s]\", config.PluginOpts.Library, err)\n\t}\n\n\t\/\/ attempt to load the library as a plugin\n\tplug, err := plugin.Open(config.PluginOpts.Library)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to load plugin '%s' [%s]\", config.PluginOpts.Library, err)\n\t}\n\n\t\/\/ lookup the required symbol 'New'\n\tsym, err := plug.Lookup(\"New\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not find required symbol 'CryptoServiceProvider' [%s]\", err)\n\t}\n\n\t\/\/ check to make sure symbol New meets the required function signature\n\tsymNew, ok := sym.(func(config map[string]interface{}) (bccsp.BCCSP, error))\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Plugin does not implement the required function signature for 'New'\")\n\t}\n\n\treturn symNew(config.PluginOpts.Config)\n}\n<|endoftext|>"} {"text":"<commit_before>package relay\n\nimport (\n\t\"encoding\/binary\"\n\n\tcircuit \"github.com\/libp2p\/go-libp2p-circuit\"\n\tpstore \"github.com\/libp2p\/go-libp2p-peerstore\"\n\tma \"github.com\/multiformats\/go-multiaddr\"\n\tmanet \"github.com\/multiformats\/go-multiaddr-net\"\n)\n\n\/\/ This function cleans up a relay's address set to remove private addresses and curtail\n\/\/ addrsplosion.\nfunc cleanupAddressSet(pi pstore.PeerInfo) pstore.PeerInfo {\n\tvar public, private []ma.Multiaddr\n\n\tfor _, a := range pi.Addrs {\n\t\tif isRelayAddr(a) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif manet.IsPublicAddr(a) || isDNSAddr(a) {\n\t\t\tpublic = append(public, a)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ discard unroutable addrs\n\t\tif manet.IsPrivateAddr(a) {\n\t\t\tprivate = append(private, a)\n\t\t}\n\t}\n\n\tif !hasAddrsplosion(public) {\n\t\treturn pstore.PeerInfo{ID: pi.ID, Addrs: public}\n\t}\n\n\taddrs := sanitizeAddrsplodedSet(public, private)\n\treturn pstore.PeerInfo{ID: pi.ID, Addrs: addrs}\n}\n\nfunc isRelayAddr(a ma.Multiaddr) bool {\n\tisRelay := false\n\n\tma.ForEach(a, func(c ma.Component) bool {\n\t\tswitch c.Protocol().Code {\n\t\tcase circuit.P_CIRCUIT:\n\t\t\tisRelay = true\n\t\t\treturn false\n\t\tdefault:\n\t\t\treturn true\n\t\t}\n\t})\n\n\treturn isRelay\n}\n\nfunc isDNSAddr(a ma.Multiaddr) bool {\n\tif first, _ := ma.SplitFirst(a); first != nil {\n\t\tswitch first.Protocol().Code {\n\t\tcase 54, 55, 56:\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ we have addrsplosion if for some protocol we advertise multiple ports on\n\/\/ the same base address.\nfunc hasAddrsplosion(addrs []ma.Multiaddr) bool {\n\taset := make(map[string]int)\n\n\tfor _, a := range addrs {\n\t\tkey, port := addrKeyAndPort(a)\n\t\txport, ok := aset[key]\n\t\tif ok && port != xport {\n\t\t\treturn true\n\t\t}\n\t\taset[key] = port\n\t}\n\n\treturn false\n}\n\nfunc addrKeyAndPort(a ma.Multiaddr) (string, int) {\n\tvar (\n\t\tkey string\n\t\tport int\n\t)\n\n\tma.ForEach(a, func(c ma.Component) bool {\n\t\tswitch c.Protocol().Code {\n\t\tcase ma.P_TCP, ma.P_UDP:\n\t\t\tport = int(binary.BigEndian.Uint16(c.RawValue()))\n\t\t\tkey += \"\/\" + c.Protocol().Name\n\t\tdefault:\n\t\t\tval := c.Value()\n\t\t\tif val == \"\" {\n\t\t\t\tval = c.Protocol().Name\n\t\t\t}\n\t\t\tkey += \"\/\" + val\n\t\t}\n\t\treturn true\n\t})\n\n\treturn key, port\n}\n\n\/\/ clean up addrsplosion\n\/\/ the following heuristic is used:\n\/\/ - for each base address\/protocol combination, if there are multiple ports advertised then\n\/\/ only accept the default port if present.\n\/\/ - If the default port is not present, we check for non-standard ports by tracking\n\/\/ private port bindings if present.\n\/\/ - If there is no default or private port binding, then we can't infer the correct\n\/\/ port and give up and return all addrs (for that base address)\nfunc sanitizeAddrsplodedSet(public, private []ma.Multiaddr) []ma.Multiaddr {\n\ttype portAndAddr struct {\n\t\taddr ma.Multiaddr\n\t\tport int\n\t}\n\n\tprivports := make(map[int]struct{})\n\tpubaddrs := make(map[string][]portAndAddr)\n\n\tfor _, a := range private {\n\t\t_, port := addrKeyAndPort(a)\n\t\tprivports[port] = struct{}{}\n\t}\n\n\tfor _, a := range public {\n\t\tkey, port := addrKeyAndPort(a)\n\t\tpubaddrs[key] = append(pubaddrs[key], portAndAddr{addr: a, port: port})\n\t}\n\n\tvar result []ma.Multiaddr\n\tfor _, pas := range pubaddrs {\n\t\tif len(pas) == 1 {\n\t\t\t\/\/ it's not addrsploded\n\t\t\tresult = append(result, pas[0].addr)\n\t\t\tcontinue\n\t\t}\n\n\t\thaveAddr := false\n\t\tfor _, pa := range pas {\n\t\t\tif pa.port == 4001 || pa.port == 4002 {\n\t\t\t\t\/\/ it's a default port, use it\n\t\t\t\tresult = append(result, pa.addr)\n\t\t\t\thaveAddr = true\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif _, ok := privports[pa.port]; ok {\n\t\t\t\t\/\/ it matches a privately bound port, use it\n\t\t\t\tresult = append(result, pa.addr)\n\t\t\t\thaveAddr = true\n\t\t\t}\n\t\t}\n\n\t\tif !haveAddr {\n\t\t\t\/\/ we weren't able to select a port; bite the bullet and use them all\n\t\t\tfor _, pa := range pas {\n\t\t\t\tresult = append(result, pa.addr)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn result\n}\n<commit_msg>test for privately bound port first when cleaning up addrsplosion<commit_after>package relay\n\nimport (\n\t\"encoding\/binary\"\n\n\tcircuit \"github.com\/libp2p\/go-libp2p-circuit\"\n\tpstore \"github.com\/libp2p\/go-libp2p-peerstore\"\n\tma \"github.com\/multiformats\/go-multiaddr\"\n\tmanet \"github.com\/multiformats\/go-multiaddr-net\"\n)\n\n\/\/ This function cleans up a relay's address set to remove private addresses and curtail\n\/\/ addrsplosion.\nfunc cleanupAddressSet(pi pstore.PeerInfo) pstore.PeerInfo {\n\tvar public, private []ma.Multiaddr\n\n\tfor _, a := range pi.Addrs {\n\t\tif isRelayAddr(a) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif manet.IsPublicAddr(a) || isDNSAddr(a) {\n\t\t\tpublic = append(public, a)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ discard unroutable addrs\n\t\tif manet.IsPrivateAddr(a) {\n\t\t\tprivate = append(private, a)\n\t\t}\n\t}\n\n\tif !hasAddrsplosion(public) {\n\t\treturn pstore.PeerInfo{ID: pi.ID, Addrs: public}\n\t}\n\n\taddrs := sanitizeAddrsplodedSet(public, private)\n\treturn pstore.PeerInfo{ID: pi.ID, Addrs: addrs}\n}\n\nfunc isRelayAddr(a ma.Multiaddr) bool {\n\tisRelay := false\n\n\tma.ForEach(a, func(c ma.Component) bool {\n\t\tswitch c.Protocol().Code {\n\t\tcase circuit.P_CIRCUIT:\n\t\t\tisRelay = true\n\t\t\treturn false\n\t\tdefault:\n\t\t\treturn true\n\t\t}\n\t})\n\n\treturn isRelay\n}\n\nfunc isDNSAddr(a ma.Multiaddr) bool {\n\tif first, _ := ma.SplitFirst(a); first != nil {\n\t\tswitch first.Protocol().Code {\n\t\tcase 54, 55, 56:\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ we have addrsplosion if for some protocol we advertise multiple ports on\n\/\/ the same base address.\nfunc hasAddrsplosion(addrs []ma.Multiaddr) bool {\n\taset := make(map[string]int)\n\n\tfor _, a := range addrs {\n\t\tkey, port := addrKeyAndPort(a)\n\t\txport, ok := aset[key]\n\t\tif ok && port != xport {\n\t\t\treturn true\n\t\t}\n\t\taset[key] = port\n\t}\n\n\treturn false\n}\n\nfunc addrKeyAndPort(a ma.Multiaddr) (string, int) {\n\tvar (\n\t\tkey string\n\t\tport int\n\t)\n\n\tma.ForEach(a, func(c ma.Component) bool {\n\t\tswitch c.Protocol().Code {\n\t\tcase ma.P_TCP, ma.P_UDP:\n\t\t\tport = int(binary.BigEndian.Uint16(c.RawValue()))\n\t\t\tkey += \"\/\" + c.Protocol().Name\n\t\tdefault:\n\t\t\tval := c.Value()\n\t\t\tif val == \"\" {\n\t\t\t\tval = c.Protocol().Name\n\t\t\t}\n\t\t\tkey += \"\/\" + val\n\t\t}\n\t\treturn true\n\t})\n\n\treturn key, port\n}\n\n\/\/ clean up addrsplosion\n\/\/ the following heuristic is used:\n\/\/ - for each base address\/protocol combination, if there are multiple ports advertised then\n\/\/ only accept the default port if present.\n\/\/ - If the default port is not present, we check for non-standard ports by tracking\n\/\/ private port bindings if present.\n\/\/ - If there is no default or private port binding, then we can't infer the correct\n\/\/ port and give up and return all addrs (for that base address)\nfunc sanitizeAddrsplodedSet(public, private []ma.Multiaddr) []ma.Multiaddr {\n\ttype portAndAddr struct {\n\t\taddr ma.Multiaddr\n\t\tport int\n\t}\n\n\tprivports := make(map[int]struct{})\n\tpubaddrs := make(map[string][]portAndAddr)\n\n\tfor _, a := range private {\n\t\t_, port := addrKeyAndPort(a)\n\t\tprivports[port] = struct{}{}\n\t}\n\n\tfor _, a := range public {\n\t\tkey, port := addrKeyAndPort(a)\n\t\tpubaddrs[key] = append(pubaddrs[key], portAndAddr{addr: a, port: port})\n\t}\n\n\tvar result []ma.Multiaddr\n\tfor _, pas := range pubaddrs {\n\t\tif len(pas) == 1 {\n\t\t\t\/\/ it's not addrsploded\n\t\t\tresult = append(result, pas[0].addr)\n\t\t\tcontinue\n\t\t}\n\n\t\thaveAddr := false\n\t\tfor _, pa := range pas {\n\t\t\tif _, ok := privports[pa.port]; ok {\n\t\t\t\t\/\/ it matches a privately bound port, use it\n\t\t\t\tresult = append(result, pa.addr)\n\t\t\t\thaveAddr = true\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif pa.port == 4001 || pa.port == 4002 {\n\t\t\t\t\/\/ it's a default port, use it\n\t\t\t\tresult = append(result, pa.addr)\n\t\t\t\thaveAddr = true\n\t\t\t}\n\t\t}\n\n\t\tif !haveAddr {\n\t\t\t\/\/ we weren't able to select a port; bite the bullet and use them all\n\t\t\tfor _, pa := range pas {\n\t\t\t\tresult = append(result, pa.addr)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>package noise\n\nimport (\n\t\"context\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\tproto \"github.com\/gogo\/protobuf\/proto\"\n\tlogging \"github.com\/ipfs\/go-log\"\n\t\"github.com\/libp2p\/go-libp2p-core\/crypto\"\n\t\"github.com\/libp2p\/go-libp2p-core\/peer\"\n\n\tik \"github.com\/libp2p\/go-libp2p-noise\/ik\"\n\tpb \"github.com\/libp2p\/go-libp2p-noise\/pb\"\n\txx \"github.com\/libp2p\/go-libp2p-noise\/xx\"\n)\n\nconst payload_string = \"noise-libp2p-static-key:\"\n\n\/\/ Each encrypted transport message must be <= 65,535 bytes, including 16\n\/\/ bytes of authentication data. To write larger plaintexts, we split them\n\/\/ into fragments of maxPlaintextLength before encrypting.\nconst maxPlaintextLength = 65519\n\nvar log = logging.Logger(\"noise\")\n\nvar errNoKeypair = errors.New(\"cannot initiate secureSession - transport has no noise keypair\")\n\ntype secureSession struct {\n\tinsecure net.Conn\n\n\tinitiator bool\n\tprologue []byte\n\n\tlocalKey crypto.PrivKey\n\tlocalPeer peer.ID\n\tremotePeer peer.ID\n\n\tlocal peerInfo\n\tremote peerInfo\n\n\txx_ns *xx.NoiseSession\n\tik_ns *ik.NoiseSession\n\n\txx_complete bool\n\tik_complete bool\n\n\tnoisePipesSupport bool\n\tnoiseStaticKeyCache *KeyCache\n\n\tnoiseKeypair *Keypair\n\n\tmsgBuffer []byte\n\trwLock sync.Mutex\n}\n\ntype peerInfo struct {\n\tnoiseKey [32]byte \/\/ static noise public key\n\tlibp2pKey crypto.PubKey\n}\n\n\/\/ newSecureSession creates a noise session over the given insecure Conn, using the static\n\/\/ Noise keypair and libp2p identity keypair from the given Transport.\n\/\/\n\/\/ If tpt.noisePipesSupport == true, the Noise Pipes handshake protocol will be used,\n\/\/ which consists of the IK and XXfallback handshake patterns. With Noise Pipes on, we first try IK,\n\/\/ if that fails, move to XXfallback. With Noise Pipes off, we always do XX.\nfunc newSecureSession(tpt *Transport, ctx context.Context, insecure net.Conn, remote peer.ID, initiator bool) (*secureSession, error) {\n\tif tpt.noiseKeypair == nil {\n\t\treturn nil, errNoKeypair\n\t}\n\n\t\/\/ if the transport doesn't have a key cache, we make a new one just for\n\t\/\/ this session. it's a bit of a waste, but saves us having to check if\n\t\/\/ it's nil later\n\tkeyCache := tpt.noiseStaticKeyCache\n\tif keyCache == nil {\n\t\tkeyCache = NewKeyCache()\n\t}\n\n\tlocalPeerInfo := peerInfo{\n\t\tnoiseKey: tpt.noiseKeypair.publicKey,\n\t\tlibp2pKey: tpt.privateKey.GetPublic(),\n\t}\n\n\ts := &secureSession{\n\t\tinsecure: insecure,\n\t\tinitiator: initiator,\n\t\tprologue: []byte(ID),\n\t\tlocalKey: tpt.privateKey,\n\t\tlocalPeer: tpt.localID,\n\t\tremotePeer: remote,\n\t\tlocal: localPeerInfo,\n\t\tnoisePipesSupport: tpt.noisePipesSupport,\n\t\tnoiseStaticKeyCache: keyCache,\n\t\tmsgBuffer: []byte{},\n\t\tnoiseKeypair: tpt.noiseKeypair,\n\t}\n\n\terr := s.runHandshake(ctx)\n\n\treturn s, err\n}\n\nfunc (s *secureSession) NoiseStaticKeyCache() *KeyCache {\n\treturn s.noiseStaticKeyCache\n}\n\nfunc (s *secureSession) NoisePublicKey() [32]byte {\n\treturn s.noiseKeypair.publicKey\n}\n\nfunc (s *secureSession) NoisePrivateKey() [32]byte {\n\treturn s.noiseKeypair.privateKey\n}\n\nfunc (s *secureSession) readLength() (int, error) {\n\tbuf := make([]byte, 2)\n\t_, err := s.insecure.Read(buf)\n\treturn int(binary.BigEndian.Uint16(buf)), err\n}\n\nfunc (s *secureSession) writeLength(length int) error {\n\tbuf := make([]byte, 2)\n\tbinary.BigEndian.PutUint16(buf, uint16(length))\n\t_, err := s.insecure.Write(buf)\n\treturn err\n}\n\nfunc (s *secureSession) setRemotePeerInfo(key []byte) (err error) {\n\ts.remote.libp2pKey, err = crypto.UnmarshalPublicKey(key)\n\treturn err\n}\n\nfunc (s *secureSession) setRemotePeerID(key crypto.PubKey) (err error) {\n\ts.remotePeer, err = peer.IDFromPublicKey(key)\n\treturn err\n}\n\nfunc (s *secureSession) verifyPayload(payload *pb.NoiseHandshakePayload, noiseKey [32]byte) (err error) {\n\tsig := payload.GetNoiseStaticKeySignature()\n\tmsg := append([]byte(payload_string), noiseKey[:]...)\n\n\tlog.Debugf(\"verifyPayload msg=%x\", msg)\n\n\tok, err := s.RemotePublicKey().Verify(msg, sig)\n\tif err != nil {\n\t\treturn err\n\t} else if !ok {\n\t\treturn fmt.Errorf(\"did not verify payload\")\n\t}\n\n\treturn nil\n}\n\nfunc (s *secureSession) runHandshake(ctx context.Context) error {\n\t\/\/ setup libp2p keys\n\tlocalKeyRaw, err := s.LocalPublicKey().Bytes()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"runHandshake err getting raw pubkey: %s\", err)\n\t}\n\n\t\/\/ sign noise data for payload\n\tnoise_pub := s.noiseKeypair.publicKey\n\tsignedPayload, err := s.localKey.Sign(append([]byte(payload_string), noise_pub[:]...))\n\tif err != nil {\n\t\tlog.Errorf(\"runHandshake signing payload err=%s\", err)\n\t\treturn fmt.Errorf(\"runHandshake signing payload err=%s\", err)\n\t}\n\n\t\/\/ create payload\n\tpayload := new(pb.NoiseHandshakePayload)\n\tpayload.Libp2PKey = localKeyRaw\n\tpayload.NoiseStaticKeySignature = signedPayload\n\tpayloadEnc, err := proto.Marshal(payload)\n\tif err != nil {\n\t\tlog.Errorf(\"runHandshake marshal payload err=%s\", err)\n\t\treturn fmt.Errorf(\"runHandshake proto marshal payload err=%s\", err)\n\t}\n\n\t\/\/ If we support Noise pipes, we try IK first, falling back to XX if IK fails.\n\t\/\/ The exception is when we're the initiator and don't know the other party's\n\t\/\/ static Noise key. Then IK will always fail, so we go straight to XX.\n\ttryIK := s.noisePipesSupport\n\tif s.initiator && s.noiseStaticKeyCache.Load(s.remotePeer) == [32]byte{} {\n\t\ttryIK = false\n\t}\n\tif tryIK {\n\t\t\/\/ we're either a responder or an initiator with a known static key for the remote peer, try IK\n\t\tbuf, err := s.runHandshake_ik(ctx, payloadEnc)\n\t\tif err != nil {\n\t\t\tlog.Error(\"runHandshake ik err=%s\", err)\n\n\t\t\t\/\/ IK failed, pipe to XXfallback\n\t\t\terr = s.runHandshake_xx(ctx, true, payloadEnc, buf)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"runHandshake xx err=err\", err)\n\t\t\t\treturn fmt.Errorf(\"runHandshake xx err=%s\", err)\n\t\t\t}\n\n\t\t\ts.xx_complete = true\n\t\t} else {\n\t\t\ts.ik_complete = true\n\t\t}\n\t} else {\n\t\t\/\/ unknown static key for peer, try XX\n\t\terr := s.runHandshake_xx(ctx, false, payloadEnc, nil)\n\t\tif err != nil {\n\t\t\tlog.Error(\"runHandshake xx err=%s\", err)\n\t\t\treturn err\n\t\t}\n\n\t\ts.xx_complete = true\n\t}\n\n\treturn nil\n}\n\nfunc (s *secureSession) LocalAddr() net.Addr {\n\treturn s.insecure.LocalAddr()\n}\n\nfunc (s *secureSession) LocalPeer() peer.ID {\n\treturn s.localPeer\n}\n\nfunc (s *secureSession) LocalPrivateKey() crypto.PrivKey {\n\treturn s.localKey\n}\n\nfunc (s *secureSession) LocalPublicKey() crypto.PubKey {\n\treturn s.localKey.GetPublic()\n}\n\nfunc (s *secureSession) Read(buf []byte) (int, error) {\n\tl := len(buf)\n\n\t\/\/ if we have previously unread bytes, and they fit into the buf, copy them over and return\n\tif l <= len(s.msgBuffer) {\n\t\tcopy(buf, s.msgBuffer)\n\t\ts.msgBuffer = s.msgBuffer[l:]\n\t\treturn l, nil\n\t}\n\n\treadChunk := func(buf []byte) (int, error) {\n\t\t\/\/ read length of encrypted message\n\t\tl, err := s.readLength()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\t\/\/ read and decrypt ciphertext\n\t\tciphertext := make([]byte, l)\n\t\t_, err = s.insecure.Read(ciphertext)\n\t\tif err != nil {\n\t\t\tlog.Error(\"read ciphertext err\", err)\n\t\t\treturn 0, err\n\t\t}\n\n\t\tplaintext, err := s.Decrypt(ciphertext)\n\t\tif err != nil {\n\t\t\tlog.Error(\"decrypt err\", err)\n\t\t\treturn 0, err\n\t\t}\n\n\t\t\/\/ append plaintext to message buffer, copy over what can fit in the buf\n\t\t\/\/ then advance message buffer to remove what was copied\n\t\ts.msgBuffer = append(s.msgBuffer, plaintext...)\n\t\tc := copy(buf, s.msgBuffer)\n\t\ts.msgBuffer = s.msgBuffer[c:]\n\t\treturn c, nil\n\t}\n\n\ttotal := 0\n\tfor i := 0; i < len(buf); i += maxPlaintextLength {\n\t\tend := i + maxPlaintextLength\n\t\tif end > len(buf) {\n\t\t\tend = len(buf)\n\t\t}\n\n\t\tc, err := readChunk(buf[i:end])\n\t\ttotal += c\n\t\tif err != nil {\n\t\t\treturn total, err\n\t\t}\n\t}\n\n\treturn total, nil\n}\n\nfunc (s *secureSession) RemoteAddr() net.Addr {\n\treturn s.insecure.RemoteAddr()\n}\n\nfunc (s *secureSession) RemotePeer() peer.ID {\n\treturn s.remotePeer\n}\n\nfunc (s *secureSession) RemotePublicKey() crypto.PubKey {\n\treturn s.remote.libp2pKey\n}\n\nfunc (s *secureSession) SetDeadline(t time.Time) error {\n\treturn s.insecure.SetDeadline(t)\n}\n\nfunc (s *secureSession) SetReadDeadline(t time.Time) error {\n\treturn s.insecure.SetReadDeadline(t)\n}\n\nfunc (s *secureSession) SetWriteDeadline(t time.Time) error {\n\treturn s.insecure.SetWriteDeadline(t)\n}\n\nfunc (s *secureSession) Write(in []byte) (int, error) {\n\ts.rwLock.Lock()\n\tdefer s.rwLock.Unlock()\n\n\twriteChunk := func(in []byte) (int, error) {\n\t\tciphertext, err := s.Encrypt(in)\n\t\tif err != nil {\n\t\t\tlog.Error(\"encrypt error\", err)\n\t\t\treturn 0, err\n\t\t}\n\n\t\terr = s.writeLength(len(ciphertext))\n\t\tif err != nil {\n\t\t\tlog.Error(\"write length err\", err)\n\t\t\treturn 0, err\n\t\t}\n\n\t\t_, err = s.insecure.Write(ciphertext)\n\t\treturn len(in), err\n\t}\n\n\twritten := 0\n\tfor i := 0; i < len(in); i += maxPlaintextLength {\n\t\tend := i + maxPlaintextLength\n\t\tif end > len(in) {\n\t\t\tend = len(in)\n\t\t}\n\n\t\tl, err := writeChunk(in[i:end])\n\t\twritten += l\n\t\tif err != nil {\n\t\t\treturn written, err\n\t\t}\n\t}\n\treturn written, nil\n}\n\nfunc (s *secureSession) Close() error {\n\treturn s.insecure.Close()\n}\n<commit_msg>use empty prologue<commit_after>package noise\n\nimport (\n\t\"context\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\tproto \"github.com\/gogo\/protobuf\/proto\"\n\tlogging \"github.com\/ipfs\/go-log\"\n\t\"github.com\/libp2p\/go-libp2p-core\/crypto\"\n\t\"github.com\/libp2p\/go-libp2p-core\/peer\"\n\n\tik \"github.com\/libp2p\/go-libp2p-noise\/ik\"\n\tpb \"github.com\/libp2p\/go-libp2p-noise\/pb\"\n\txx \"github.com\/libp2p\/go-libp2p-noise\/xx\"\n)\n\nconst payload_string = \"noise-libp2p-static-key:\"\n\n\/\/ Each encrypted transport message must be <= 65,535 bytes, including 16\n\/\/ bytes of authentication data. To write larger plaintexts, we split them\n\/\/ into fragments of maxPlaintextLength before encrypting.\nconst maxPlaintextLength = 65519\n\nvar log = logging.Logger(\"noise\")\n\nvar errNoKeypair = errors.New(\"cannot initiate secureSession - transport has no noise keypair\")\n\ntype secureSession struct {\n\tinsecure net.Conn\n\n\tinitiator bool\n\tprologue []byte\n\n\tlocalKey crypto.PrivKey\n\tlocalPeer peer.ID\n\tremotePeer peer.ID\n\n\tlocal peerInfo\n\tremote peerInfo\n\n\txx_ns *xx.NoiseSession\n\tik_ns *ik.NoiseSession\n\n\txx_complete bool\n\tik_complete bool\n\n\tnoisePipesSupport bool\n\tnoiseStaticKeyCache *KeyCache\n\n\tnoiseKeypair *Keypair\n\n\tmsgBuffer []byte\n\trwLock sync.Mutex\n}\n\ntype peerInfo struct {\n\tnoiseKey [32]byte \/\/ static noise public key\n\tlibp2pKey crypto.PubKey\n}\n\n\/\/ newSecureSession creates a noise session over the given insecure Conn, using the static\n\/\/ Noise keypair and libp2p identity keypair from the given Transport.\n\/\/\n\/\/ If tpt.noisePipesSupport == true, the Noise Pipes handshake protocol will be used,\n\/\/ which consists of the IK and XXfallback handshake patterns. With Noise Pipes on, we first try IK,\n\/\/ if that fails, move to XXfallback. With Noise Pipes off, we always do XX.\nfunc newSecureSession(tpt *Transport, ctx context.Context, insecure net.Conn, remote peer.ID, initiator bool) (*secureSession, error) {\n\tif tpt.noiseKeypair == nil {\n\t\treturn nil, errNoKeypair\n\t}\n\n\t\/\/ if the transport doesn't have a key cache, we make a new one just for\n\t\/\/ this session. it's a bit of a waste, but saves us having to check if\n\t\/\/ it's nil later\n\tkeyCache := tpt.noiseStaticKeyCache\n\tif keyCache == nil {\n\t\tkeyCache = NewKeyCache()\n\t}\n\n\tlocalPeerInfo := peerInfo{\n\t\tnoiseKey: tpt.noiseKeypair.publicKey,\n\t\tlibp2pKey: tpt.privateKey.GetPublic(),\n\t}\n\n\ts := &secureSession{\n\t\tinsecure: insecure,\n\t\tinitiator: initiator,\n\t\tprologue: []byte{},\n\t\tlocalKey: tpt.privateKey,\n\t\tlocalPeer: tpt.localID,\n\t\tremotePeer: remote,\n\t\tlocal: localPeerInfo,\n\t\tnoisePipesSupport: tpt.noisePipesSupport,\n\t\tnoiseStaticKeyCache: keyCache,\n\t\tmsgBuffer: []byte{},\n\t\tnoiseKeypair: tpt.noiseKeypair,\n\t}\n\n\terr := s.runHandshake(ctx)\n\n\treturn s, err\n}\n\nfunc (s *secureSession) NoiseStaticKeyCache() *KeyCache {\n\treturn s.noiseStaticKeyCache\n}\n\nfunc (s *secureSession) NoisePublicKey() [32]byte {\n\treturn s.noiseKeypair.publicKey\n}\n\nfunc (s *secureSession) NoisePrivateKey() [32]byte {\n\treturn s.noiseKeypair.privateKey\n}\n\nfunc (s *secureSession) readLength() (int, error) {\n\tbuf := make([]byte, 2)\n\t_, err := s.insecure.Read(buf)\n\treturn int(binary.BigEndian.Uint16(buf)), err\n}\n\nfunc (s *secureSession) writeLength(length int) error {\n\tbuf := make([]byte, 2)\n\tbinary.BigEndian.PutUint16(buf, uint16(length))\n\t_, err := s.insecure.Write(buf)\n\treturn err\n}\n\nfunc (s *secureSession) setRemotePeerInfo(key []byte) (err error) {\n\ts.remote.libp2pKey, err = crypto.UnmarshalPublicKey(key)\n\treturn err\n}\n\nfunc (s *secureSession) setRemotePeerID(key crypto.PubKey) (err error) {\n\ts.remotePeer, err = peer.IDFromPublicKey(key)\n\treturn err\n}\n\nfunc (s *secureSession) verifyPayload(payload *pb.NoiseHandshakePayload, noiseKey [32]byte) (err error) {\n\tsig := payload.GetNoiseStaticKeySignature()\n\tmsg := append([]byte(payload_string), noiseKey[:]...)\n\n\tlog.Debugf(\"verifyPayload msg=%x\", msg)\n\n\tok, err := s.RemotePublicKey().Verify(msg, sig)\n\tif err != nil {\n\t\treturn err\n\t} else if !ok {\n\t\treturn fmt.Errorf(\"did not verify payload\")\n\t}\n\n\treturn nil\n}\n\nfunc (s *secureSession) runHandshake(ctx context.Context) error {\n\t\/\/ setup libp2p keys\n\tlocalKeyRaw, err := s.LocalPublicKey().Bytes()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"runHandshake err getting raw pubkey: %s\", err)\n\t}\n\n\t\/\/ sign noise data for payload\n\tnoise_pub := s.noiseKeypair.publicKey\n\tsignedPayload, err := s.localKey.Sign(append([]byte(payload_string), noise_pub[:]...))\n\tif err != nil {\n\t\tlog.Errorf(\"runHandshake signing payload err=%s\", err)\n\t\treturn fmt.Errorf(\"runHandshake signing payload err=%s\", err)\n\t}\n\n\t\/\/ create payload\n\tpayload := new(pb.NoiseHandshakePayload)\n\tpayload.Libp2PKey = localKeyRaw\n\tpayload.NoiseStaticKeySignature = signedPayload\n\tpayloadEnc, err := proto.Marshal(payload)\n\tif err != nil {\n\t\tlog.Errorf(\"runHandshake marshal payload err=%s\", err)\n\t\treturn fmt.Errorf(\"runHandshake proto marshal payload err=%s\", err)\n\t}\n\n\t\/\/ If we support Noise pipes, we try IK first, falling back to XX if IK fails.\n\t\/\/ The exception is when we're the initiator and don't know the other party's\n\t\/\/ static Noise key. Then IK will always fail, so we go straight to XX.\n\ttryIK := s.noisePipesSupport\n\tif s.initiator && s.noiseStaticKeyCache.Load(s.remotePeer) == [32]byte{} {\n\t\ttryIK = false\n\t}\n\tif tryIK {\n\t\t\/\/ we're either a responder or an initiator with a known static key for the remote peer, try IK\n\t\tbuf, err := s.runHandshake_ik(ctx, payloadEnc)\n\t\tif err != nil {\n\t\t\tlog.Error(\"runHandshake ik err=%s\", err)\n\n\t\t\t\/\/ IK failed, pipe to XXfallback\n\t\t\terr = s.runHandshake_xx(ctx, true, payloadEnc, buf)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"runHandshake xx err=err\", err)\n\t\t\t\treturn fmt.Errorf(\"runHandshake xx err=%s\", err)\n\t\t\t}\n\n\t\t\ts.xx_complete = true\n\t\t} else {\n\t\t\ts.ik_complete = true\n\t\t}\n\t} else {\n\t\t\/\/ unknown static key for peer, try XX\n\t\terr := s.runHandshake_xx(ctx, false, payloadEnc, nil)\n\t\tif err != nil {\n\t\t\tlog.Error(\"runHandshake xx err=%s\", err)\n\t\t\treturn err\n\t\t}\n\n\t\ts.xx_complete = true\n\t}\n\n\treturn nil\n}\n\nfunc (s *secureSession) LocalAddr() net.Addr {\n\treturn s.insecure.LocalAddr()\n}\n\nfunc (s *secureSession) LocalPeer() peer.ID {\n\treturn s.localPeer\n}\n\nfunc (s *secureSession) LocalPrivateKey() crypto.PrivKey {\n\treturn s.localKey\n}\n\nfunc (s *secureSession) LocalPublicKey() crypto.PubKey {\n\treturn s.localKey.GetPublic()\n}\n\nfunc (s *secureSession) Read(buf []byte) (int, error) {\n\tl := len(buf)\n\n\t\/\/ if we have previously unread bytes, and they fit into the buf, copy them over and return\n\tif l <= len(s.msgBuffer) {\n\t\tcopy(buf, s.msgBuffer)\n\t\ts.msgBuffer = s.msgBuffer[l:]\n\t\treturn l, nil\n\t}\n\n\treadChunk := func(buf []byte) (int, error) {\n\t\t\/\/ read length of encrypted message\n\t\tl, err := s.readLength()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\t\/\/ read and decrypt ciphertext\n\t\tciphertext := make([]byte, l)\n\t\t_, err = s.insecure.Read(ciphertext)\n\t\tif err != nil {\n\t\t\tlog.Error(\"read ciphertext err\", err)\n\t\t\treturn 0, err\n\t\t}\n\n\t\tplaintext, err := s.Decrypt(ciphertext)\n\t\tif err != nil {\n\t\t\tlog.Error(\"decrypt err\", err)\n\t\t\treturn 0, err\n\t\t}\n\n\t\t\/\/ append plaintext to message buffer, copy over what can fit in the buf\n\t\t\/\/ then advance message buffer to remove what was copied\n\t\ts.msgBuffer = append(s.msgBuffer, plaintext...)\n\t\tc := copy(buf, s.msgBuffer)\n\t\ts.msgBuffer = s.msgBuffer[c:]\n\t\treturn c, nil\n\t}\n\n\ttotal := 0\n\tfor i := 0; i < len(buf); i += maxPlaintextLength {\n\t\tend := i + maxPlaintextLength\n\t\tif end > len(buf) {\n\t\t\tend = len(buf)\n\t\t}\n\n\t\tc, err := readChunk(buf[i:end])\n\t\ttotal += c\n\t\tif err != nil {\n\t\t\treturn total, err\n\t\t}\n\t}\n\n\treturn total, nil\n}\n\nfunc (s *secureSession) RemoteAddr() net.Addr {\n\treturn s.insecure.RemoteAddr()\n}\n\nfunc (s *secureSession) RemotePeer() peer.ID {\n\treturn s.remotePeer\n}\n\nfunc (s *secureSession) RemotePublicKey() crypto.PubKey {\n\treturn s.remote.libp2pKey\n}\n\nfunc (s *secureSession) SetDeadline(t time.Time) error {\n\treturn s.insecure.SetDeadline(t)\n}\n\nfunc (s *secureSession) SetReadDeadline(t time.Time) error {\n\treturn s.insecure.SetReadDeadline(t)\n}\n\nfunc (s *secureSession) SetWriteDeadline(t time.Time) error {\n\treturn s.insecure.SetWriteDeadline(t)\n}\n\nfunc (s *secureSession) Write(in []byte) (int, error) {\n\ts.rwLock.Lock()\n\tdefer s.rwLock.Unlock()\n\n\twriteChunk := func(in []byte) (int, error) {\n\t\tciphertext, err := s.Encrypt(in)\n\t\tif err != nil {\n\t\t\tlog.Error(\"encrypt error\", err)\n\t\t\treturn 0, err\n\t\t}\n\n\t\terr = s.writeLength(len(ciphertext))\n\t\tif err != nil {\n\t\t\tlog.Error(\"write length err\", err)\n\t\t\treturn 0, err\n\t\t}\n\n\t\t_, err = s.insecure.Write(ciphertext)\n\t\treturn len(in), err\n\t}\n\n\twritten := 0\n\tfor i := 0; i < len(in); i += maxPlaintextLength {\n\t\tend := i + maxPlaintextLength\n\t\tif end > len(in) {\n\t\t\tend = len(in)\n\t\t}\n\n\t\tl, err := writeChunk(in[i:end])\n\t\twritten += l\n\t\tif err != nil {\n\t\t\treturn written, err\n\t\t}\n\t}\n\treturn written, nil\n}\n\nfunc (s *secureSession) Close() error {\n\treturn s.insecure.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package google\n\nimport (\n\t\"fmt\"\n\n\tcontainer \"google.golang.org\/api\/container\/v1beta1\"\n)\n\ntype ContainerOperationWaiter struct {\n\tService *container.Service\n\tOp *container.Operation\n\tProject string\n\tLocation string\n}\n\nfunc (w *ContainerOperationWaiter) State() string {\n\tif w == nil || w.Op == nil {\n\t\treturn \"<nil>\"\n\t}\n\treturn w.Op.Status\n}\n\nfunc (w *ContainerOperationWaiter) Error() error {\n\tif w == nil || w.Op == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Error gets called during operation polling to see if there is an error.\n\t\/\/ Since container's operation doesn't have an \"error\" field, we must wait\n\t\/\/ until it's done and check the status message\n\tfor _, pending := range w.PendingStates() {\n\t\tif w.Op.Status == pending {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif w.Op.StatusMessage != \"\" {\n\t\treturn fmt.Errorf(w.Op.StatusMessage)\n\t}\n\n\treturn nil\n}\n\nfunc (w *ContainerOperationWaiter) SetOp(op interface{}) error {\n\tvar ok bool\n\tw.Op, ok = op.(*container.Operation)\n\tif !ok {\n\t\treturn fmt.Errorf(\"Unable to set operation. Bad type!\")\n\t}\n\treturn nil\n}\n\nfunc (w *ContainerOperationWaiter) QueryOp() (interface{}, error) {\n\tif w == nil || w.Op == nil {\n\t\treturn nil, fmt.Errorf(\"Cannot query operation, it's unset or nil.\")\n\t}\n\tname := fmt.Sprintf(\"projects\/%s\/locations\/%s\/operations\/%s\",\n\t\tw.Project, w.Location, w.Op.Name)\n\treturn w.Service.Projects.Locations.Operations.Get(name).Do()\n}\n\nfunc (w *ContainerOperationWaiter) OpName() string {\n\tif w == nil || w.Op == nil {\n\t\treturn \"<nil>\"\n\t}\n\treturn w.Op.Name\n}\n\nfunc (w *ContainerOperationWaiter) PendingStates() []string {\n\treturn []string{\"PENDING\", \"RUNNING\"}\n}\n\nfunc (w *ContainerOperationWaiter) TargetStates() []string {\n\treturn []string{\"DONE\"}\n}\n\nfunc containerOperationWait(config *Config, op *container.Operation, project, location, activity string, timeoutMinutes int) error {\n\tw := &ContainerOperationWaiter{\n\t\tService: config.clientContainerBeta,\n\t\tOp: op,\n\t\tProject: project,\n\t\tLocation: location,\n\t}\n\n\tif err := w.SetOp(op); err != nil {\n\t\treturn err\n\t}\n\n\treturn OperationWait(w, activity, timeoutMinutes)\n}\n<commit_msg>Add retries to container Operations (#3801)<commit_after>package google\n\nimport (\n\t\"fmt\"\n\n\tcontainer \"google.golang.org\/api\/container\/v1beta1\"\n)\n\ntype ContainerOperationWaiter struct {\n\tService *container.Service\n\tOp *container.Operation\n\tProject string\n\tLocation string\n}\n\nfunc (w *ContainerOperationWaiter) State() string {\n\tif w == nil || w.Op == nil {\n\t\treturn \"<nil>\"\n\t}\n\treturn w.Op.Status\n}\n\nfunc (w *ContainerOperationWaiter) Error() error {\n\tif w == nil || w.Op == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Error gets called during operation polling to see if there is an error.\n\t\/\/ Since container's operation doesn't have an \"error\" field, we must wait\n\t\/\/ until it's done and check the status message\n\tfor _, pending := range w.PendingStates() {\n\t\tif w.Op.Status == pending {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif w.Op.StatusMessage != \"\" {\n\t\treturn fmt.Errorf(w.Op.StatusMessage)\n\t}\n\n\treturn nil\n}\n\nfunc (w *ContainerOperationWaiter) SetOp(op interface{}) error {\n\tvar ok bool\n\tw.Op, ok = op.(*container.Operation)\n\tif !ok {\n\t\treturn fmt.Errorf(\"Unable to set operation. Bad type!\")\n\t}\n\treturn nil\n}\n\nfunc (w *ContainerOperationWaiter) QueryOp() (interface{}, error) {\n\tif w == nil || w.Op == nil {\n\t\treturn nil, fmt.Errorf(\"Cannot query operation, it's unset or nil.\")\n\t}\n\tname := fmt.Sprintf(\"projects\/%s\/locations\/%s\/operations\/%s\",\n\t\tw.Project, w.Location, w.Op.Name)\n\n\tvar op *container.Operation\n\terr := retryTimeDuration(func() (opErr error) {\n\t\top, opErr = w.Service.Projects.Locations.Operations.Get(name).Do()\n\t\treturn opErr\n\t}, DefaultRequestTimeout)\n\n\treturn op, err\n}\n\nfunc (w *ContainerOperationWaiter) OpName() string {\n\tif w == nil || w.Op == nil {\n\t\treturn \"<nil>\"\n\t}\n\treturn w.Op.Name\n}\n\nfunc (w *ContainerOperationWaiter) PendingStates() []string {\n\treturn []string{\"PENDING\", \"RUNNING\"}\n}\n\nfunc (w *ContainerOperationWaiter) TargetStates() []string {\n\treturn []string{\"DONE\"}\n}\n\nfunc containerOperationWait(config *Config, op *container.Operation, project, location, activity string, timeoutMinutes int) error {\n\tw := &ContainerOperationWaiter{\n\t\tService: config.clientContainerBeta,\n\t\tOp: op,\n\t\tProject: project,\n\t\tLocation: location,\n\t}\n\n\tif err := w.SetOp(op); err != nil {\n\t\treturn err\n\t}\n\n\treturn OperationWait(w, activity, timeoutMinutes)\n}\n<|endoftext|>"} {"text":"<commit_before>package handler\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"github.com\/jeffbmartinez\/log\"\n)\n\n\/\/ BasicResponse creates a handler which responds with a standard response\n\/\/ code and message string.\nfunc BasicResponse(code int) func(response http.ResponseWriter, request *http.Request) {\n\treturn func(response http.ResponseWriter, request *http.Request) {\n\t\tresponse.WriteHeader(code)\n\t\tresponse.Write([]byte(http.StatusText(code)))\n\t}\n}\n\n\/\/ WriteBasicResponse responds the the request with a BasicResponse\nfunc WriteBasicResponse(code int, response http.ResponseWriter) {\n\tBasicResponse(code)(response, nil)\n}\n\n\/*\nWriteJSONResponse writes a json response (with correct http header).\n*\/\nfunc WriteJSONResponse(response http.ResponseWriter, message interface{}, statusCode int) {\n\tresponseString, err := json.Marshal(message)\n\tif err != nil {\n\t\tlog.Errorf(\"Couldn't marshal json: %v\", err)\n\n\t\tresponse.WriteHeader(http.StatusInternalServerError)\n\t\tresponse.Write([]byte(\"\"))\n\t\treturn\n\t}\n\n\tresponse.Header().Set(\"Content-Type\", \"application\/json\")\n\tresponse.WriteHeader(statusCode)\n\tresponse.Write([]byte(responseString))\n}\n<commit_msg>mini refactor<commit_after>package handler\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"github.com\/jeffbmartinez\/log\"\n)\n\n\/\/ BasicResponse creates a handler which responds with a standard response\n\/\/ code and message string.\nfunc BasicResponse(code int) func(response http.ResponseWriter, request *http.Request) {\n\treturn func(response http.ResponseWriter, request *http.Request) {\n\t\tresponse.WriteHeader(code)\n\t\tresponse.Write([]byte(http.StatusText(code)))\n\t}\n}\n\n\/\/ WriteBasicResponse responds the the request with a BasicResponse\nfunc WriteBasicResponse(code int, response http.ResponseWriter) {\n\tBasicResponse(code)(response, nil)\n}\n\n\/*\nWriteJSONResponse writes a json response (with correct http header).\n*\/\nfunc WriteJSONResponse(response http.ResponseWriter, message interface{}, statusCode int) {\n\tresponseString, err := json.Marshal(message)\n\tif err != nil {\n\t\tlog.Errorf(\"Couldn't marshal json: %v\", err)\n\n\t\tWriteBasicResponse(http.StatusInternalServerError, response)\n\t\treturn\n\t}\n\n\tresponse.Header().Set(\"Content-Type\", \"application\/json\")\n\tresponse.WriteHeader(statusCode)\n\tresponse.Write([]byte(responseString))\n}\n<|endoftext|>"} {"text":"<commit_before>package handlers\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/thermokarst\/bactdb\/Godeps\/_workspace\/src\/github.com\/gorilla\/mux\"\n\t\"github.com\/thermokarst\/bactdb\/api\"\n\t\"github.com\/thermokarst\/bactdb\/helpers\"\n\t\"github.com\/thermokarst\/bactdb\/types\"\n)\n\nfunc handleGetter(g api.Getter) errorHandler {\n\treturn func(w http.ResponseWriter, r *http.Request) *types.AppError {\n\t\tid, err := strconv.ParseInt(mux.Vars(r)[\"ID\"], 10, 0)\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\n\t\tclaims := helpers.GetClaims(r)\n\n\t\te, appErr := g.Get(id, mux.Vars(r)[\"genus\"], &claims)\n\t\tif appErr != nil {\n\t\t\treturn appErr\n\t\t}\n\n\t\tdata, err := e.Marshal()\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\t\tw.Write(data)\n\t\treturn nil\n\t}\n}\n\nfunc handleLister(l api.Lister) errorHandler {\n\treturn func(w http.ResponseWriter, r *http.Request) *types.AppError {\n\t\topt := r.URL.Query()\n\t\topt.Add(\"Genus\", mux.Vars(r)[\"genus\"])\n\n\t\tclaims := helpers.GetClaims(r)\n\n\t\tes, appErr := l.List(&opt, &claims)\n\t\tif appErr != nil {\n\t\t\treturn appErr\n\t\t}\n\t\tdata, err := es.Marshal()\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\t\tw.Write(data)\n\t\treturn nil\n\t}\n}\n\nfunc handleUpdater(u api.Updater) errorHandler {\n\treturn func(w http.ResponseWriter, r *http.Request) *types.AppError {\n\t\tid, err := strconv.ParseInt(mux.Vars(r)[\"ID\"], 10, 0)\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\n\t\tbodyBytes, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\n\t\te, err := u.Unmarshal(bodyBytes)\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\n\t\tclaims := helpers.GetClaims(r)\n\n\t\tappErr := u.Update(id, &e, mux.Vars(r)[\"genus\"], &claims)\n\t\tif appErr != nil {\n\t\t\treturn appErr\n\t\t}\n\n\t\tdata, err := e.Marshal()\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\t\tw.Write(data)\n\t\treturn nil\n\t}\n}\n\nfunc handleCreater(c api.Creater) errorHandler {\n\treturn func(w http.ResponseWriter, r *http.Request) *types.AppError {\n\t\tbodyBytes, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\n\t\te, err := c.Unmarshal(bodyBytes)\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\n\t\tclaims := helpers.GetClaims(r)\n\n\t\tappErr := c.Create(&e, mux.Vars(r)[\"genus\"], &claims)\n\t\tif appErr != nil {\n\t\t\treturn appErr\n\t\t}\n\n\t\tdata, err := e.Marshal()\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\t\tw.Write(data)\n\t\treturn nil\n\t}\n}\n\nfunc handleDeleter(d api.Deleter) errorHandler {\n\treturn func(w http.ResponseWriter, r *http.Request) *types.AppError {\n\t\tid, err := strconv.ParseInt(mux.Vars(r)[\"ID\"], 10, 0)\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\n\t\tclaims := helpers.GetClaims(r)\n\n\t\tappErr := d.Delete(id, mux.Vars(r)[\"genus\"], &claims)\n\t\tif appErr != nil {\n\t\t\treturn appErr\n\t\t}\n\n\t\treturn nil\n\t}\n}\n<commit_msg>Specify response status codes (delete, create)<commit_after>package handlers\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/thermokarst\/bactdb\/Godeps\/_workspace\/src\/github.com\/gorilla\/mux\"\n\t\"github.com\/thermokarst\/bactdb\/api\"\n\t\"github.com\/thermokarst\/bactdb\/helpers\"\n\t\"github.com\/thermokarst\/bactdb\/types\"\n)\n\nfunc handleGetter(g api.Getter) errorHandler {\n\treturn func(w http.ResponseWriter, r *http.Request) *types.AppError {\n\t\tid, err := strconv.ParseInt(mux.Vars(r)[\"ID\"], 10, 0)\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\n\t\tclaims := helpers.GetClaims(r)\n\n\t\te, appErr := g.Get(id, mux.Vars(r)[\"genus\"], &claims)\n\t\tif appErr != nil {\n\t\t\treturn appErr\n\t\t}\n\n\t\tdata, err := e.Marshal()\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\t\tw.Write(data)\n\t\treturn nil\n\t}\n}\n\nfunc handleLister(l api.Lister) errorHandler {\n\treturn func(w http.ResponseWriter, r *http.Request) *types.AppError {\n\t\topt := r.URL.Query()\n\t\topt.Add(\"Genus\", mux.Vars(r)[\"genus\"])\n\n\t\tclaims := helpers.GetClaims(r)\n\n\t\tes, appErr := l.List(&opt, &claims)\n\t\tif appErr != nil {\n\t\t\treturn appErr\n\t\t}\n\t\tdata, err := es.Marshal()\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\t\tw.Write(data)\n\t\treturn nil\n\t}\n}\n\nfunc handleUpdater(u api.Updater) errorHandler {\n\treturn func(w http.ResponseWriter, r *http.Request) *types.AppError {\n\t\tid, err := strconv.ParseInt(mux.Vars(r)[\"ID\"], 10, 0)\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\n\t\tbodyBytes, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\n\t\te, err := u.Unmarshal(bodyBytes)\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\n\t\tclaims := helpers.GetClaims(r)\n\n\t\tappErr := u.Update(id, &e, mux.Vars(r)[\"genus\"], &claims)\n\t\tif appErr != nil {\n\t\t\treturn appErr\n\t\t}\n\n\t\tdata, err := e.Marshal()\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\t\tw.Write(data)\n\t\treturn nil\n\t}\n}\n\nfunc handleCreater(c api.Creater) errorHandler {\n\treturn func(w http.ResponseWriter, r *http.Request) *types.AppError {\n\t\tbodyBytes, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\n\t\te, err := c.Unmarshal(bodyBytes)\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\n\t\tclaims := helpers.GetClaims(r)\n\n\t\tappErr := c.Create(&e, mux.Vars(r)[\"genus\"], &claims)\n\t\tif appErr != nil {\n\t\t\treturn appErr\n\t\t}\n\n\t\tdata, err := e.Marshal()\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\n\t\tw.WriteHeader(http.StatusCreated)\n\t\tw.Write(data)\n\n\t\treturn nil\n\t}\n}\n\nfunc handleDeleter(d api.Deleter) errorHandler {\n\treturn func(w http.ResponseWriter, r *http.Request) *types.AppError {\n\t\tid, err := strconv.ParseInt(mux.Vars(r)[\"ID\"], 10, 0)\n\t\tif err != nil {\n\t\t\treturn newJSONError(err, http.StatusInternalServerError)\n\t\t}\n\n\t\tclaims := helpers.GetClaims(r)\n\n\t\tappErr := d.Delete(id, mux.Vars(r)[\"genus\"], &claims)\n\t\tif appErr != nil {\n\t\t\treturn appErr\n\t\t}\n\n\t\tw.WriteHeader(http.StatusNoContent)\n\n\t\treturn nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport (\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\n\/*\nThrottleTimer fires an event at most \"dur\" after each .Set() call.\nIf a short burst of .Set() calls happens, ThrottleTimer fires once.\nIf a long continuous burst of .Set() calls happens, ThrottleTimer fires\nat most once every \"dur\".\n*\/\ntype ThrottleTimer struct {\n\tName string\n\tCh chan struct{}\n\tquit chan struct{}\n\tdur time.Duration\n\ttimer *time.Timer\n\tisSet uint32\n}\n\nfunc NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer {\n\tvar ch = make(chan struct{})\n\tvar quit = make(chan struct{})\n\tvar t = &ThrottleTimer{Name: name, Ch: ch, dur: dur, quit: quit}\n\tt.timer = time.AfterFunc(dur, t.fireRoutine)\n\tt.timer.Stop()\n\treturn t\n}\n\nfunc (t *ThrottleTimer) fireRoutine() {\n\tselect {\n\tcase t.Ch <- struct{}{}:\n\t\tatomic.StoreUint32(&t.isSet, 0)\n\tcase <-t.quit:\n\t\t\/\/ do nothing\n\tdefault:\n\t\tt.timer.Reset(t.dur)\n\t}\n}\n\nfunc (t *ThrottleTimer) Set() {\n\tif atomic.CompareAndSwapUint32(&t.isSet, 0, 1) {\n\t\tt.timer.Reset(t.dur)\n\t}\n}\n\n\/\/ For ease of .Stop()'ing services before .Start()'ing them,\n\/\/ we ignore .Stop()'s on nil ThrottleTimers\nfunc (t *ThrottleTimer) Stop() bool {\n\tif t == nil {\n\t\treturn false\n\t}\n\tclose(t.quit)\n\treturn t.timer.Stop()\n}\n<commit_msg>Add ThrottleTimer.Unset<commit_after>package common\n\nimport (\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\n\/*\nThrottleTimer fires an event at most \"dur\" after each .Set() call.\nIf a short burst of .Set() calls happens, ThrottleTimer fires once.\nIf a long continuous burst of .Set() calls happens, ThrottleTimer fires\nat most once every \"dur\".\n*\/\ntype ThrottleTimer struct {\n\tName string\n\tCh chan struct{}\n\tquit chan struct{}\n\tdur time.Duration\n\ttimer *time.Timer\n\tisSet uint32\n}\n\nfunc NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer {\n\tvar ch = make(chan struct{})\n\tvar quit = make(chan struct{})\n\tvar t = &ThrottleTimer{Name: name, Ch: ch, dur: dur, quit: quit}\n\tt.timer = time.AfterFunc(dur, t.fireRoutine)\n\tt.timer.Stop()\n\treturn t\n}\n\nfunc (t *ThrottleTimer) fireRoutine() {\n\tselect {\n\tcase t.Ch <- struct{}{}:\n\t\tatomic.StoreUint32(&t.isSet, 0)\n\tcase <-t.quit:\n\t\t\/\/ do nothing\n\tdefault:\n\t\tt.timer.Reset(t.dur)\n\t}\n}\n\nfunc (t *ThrottleTimer) Set() {\n\tif atomic.CompareAndSwapUint32(&t.isSet, 0, 1) {\n\t\tt.timer.Reset(t.dur)\n\t}\n}\n\nfunc (t *ThrottleTimer) Unset() {\n\tt.timer.Stop()\n}\n\n\/\/ For ease of .Stop()'ing services before .Start()'ing them,\n\/\/ we ignore .Stop()'s on nil ThrottleTimers\nfunc (t *ThrottleTimer) Stop() bool {\n\tif t == nil {\n\t\treturn false\n\t}\n\tclose(t.quit)\n\treturn t.timer.Stop()\n}\n<|endoftext|>"} {"text":"<commit_before>package binance\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\n\t\"github.com\/adshao\/go-binance\/common\"\n)\n\n\/\/ ListBookTickersService list best price\/qty on the order book for a symbol or symbols\ntype ListBookTickersService struct {\n\tc *Client\n\tsymbol *string\n}\n\n\/\/ Symbol set symbol\nfunc (s *ListBookTickersService) Symbol(symbol string) *ListBookTickersService {\n\ts.symbol = &symbol\n\treturn s\n}\n\n\/\/ Do send request\nfunc (s *ListBookTickersService) Do(ctx context.Context, opts ...RequestOption) (res []*BookTicker, err error) {\n\tr := &request{\n\t\tmethod: \"GET\",\n\t\tendpoint: \"\/api\/v3\/ticker\/bookTicker\",\n\t}\n\tif s.symbol != nil {\n\t\tr.setParam(\"symbol\", *s.symbol)\n\t}\n\tdata, err := s.c.callAPI(ctx, r, opts...)\n\tdata = common.ToJSONList(data)\n\tif err != nil {\n\t\treturn []*BookTicker{}, err\n\t}\n\tres = make([]*BookTicker, 0)\n\terr = json.Unmarshal(data, &res)\n\tif err != nil {\n\t\treturn []*BookTicker{}, err\n\t}\n\treturn res, nil\n}\n\n\/\/ BookTicker define book ticker info\ntype BookTicker struct {\n\tSymbol string `json:\"symbol\"`\n\tBidPrice string `json:\"bidPrice\"`\n\tBidQuantity string `json:\"bidQty\"`\n\tAskPrice string `json:\"askPrice\"`\n\tAskQuantity string `json:\"askQty\"`\n}\n\n\/\/ ListPricesService list latest price for a symbol or symbols\ntype ListPricesService struct {\n\tc *Client\n\tsymbol *string\n}\n\n\/\/ Symbol set symbol\nfunc (s *ListPricesService) Symbol(symbol string) *ListPricesService {\n\ts.symbol = &symbol\n\treturn s\n}\n\n\/\/ Do send request\nfunc (s *ListPricesService) Do(ctx context.Context, opts ...RequestOption) (res []*SymbolPrice, err error) {\n\tr := &request{\n\t\tmethod: \"GET\",\n\t\tendpoint: \"\/api\/v3\/ticker\/price\",\n\t}\n\tif s.symbol != nil {\n\t\tr.setParam(\"symbol\", *s.symbol)\n\t}\n\tdata, err := s.c.callAPI(ctx, r, opts...)\n\tif err != nil {\n\t\treturn []*SymbolPrice{}, err\n\t}\n\tdata = common.ToJSONList(data)\n\tres = make([]*SymbolPrice, 0)\n\terr = json.Unmarshal(data, &res)\n\tif err != nil {\n\t\treturn []*SymbolPrice{}, err\n\t}\n\treturn res, nil\n}\n\n\/\/ SymbolPrice define symbol and price pair\ntype SymbolPrice struct {\n\tSymbol string `json:\"symbol\"`\n\tPrice string `json:\"price\"`\n}\n\n\/\/ ListPriceChangeStatsService show stats of price change in last 24 hours for all symbols\ntype ListPriceChangeStatsService struct {\n\tc *Client\n\tsymbol *string\n}\n\n\/\/ Symbol set symbol\nfunc (s *ListPriceChangeStatsService) Symbol(symbol string) *ListPriceChangeStatsService {\n\ts.symbol = &symbol\n\treturn s\n}\n\n\/\/ Do send request\nfunc (s *ListPriceChangeStatsService) Do(ctx context.Context, opts ...RequestOption) (res []*PriceChangeStats, err error) {\n\tr := &request{\n\t\tmethod: \"GET\",\n\t\tendpoint: \"\/api\/v1\/ticker\/24hr\",\n\t}\n\tif s.symbol != nil {\n\t\tr.setParam(\"symbol\", *s.symbol)\n\t}\n\tdata, err := s.c.callAPI(ctx, r, opts...)\n\tif err != nil {\n\t\treturn res, err\n\t}\n\tdata = common.ToJSONList(data)\n\tres = make([]*PriceChangeStats, 0)\n\terr = json.Unmarshal(data, &res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}\n\n\/\/ PriceChangeStats define price change stats\ntype PriceChangeStats struct {\n\tSymbol string `json:\"symbol\"`\n\tPriceChange string `json:\"priceChange\"`\n\tPriceChangePercent string `json:\"priceChangePercent\"`\n\tWeightedAvgPrice string `json:\"weightedAvgPrice\"`\n\tPrevClosePrice string `json:\"prevClosePrice\"`\n\tLastPrice string `json:\"lastPrice\"`\n\tBidPrice string `json:\"bidPrice\"`\n\tAskPrice string `json:\"askPrice\"`\n\tOpenPrice string `json:\"openPrice\"`\n\tHighPrice string `json:\"highPrice\"`\n\tLowPrice string `json:\"lowPrice\"`\n\tVolume string `json:\"volume\"`\n\tQuoteVolume string `json:\"quoteVolume\"`\n\tOpenTime int64 `json:\"openTime\"`\n\tCloseTime int64 `json:\"closeTime\"`\n\tFristID int64 `json:\"firstId\"`\n\tLastID int64 `json:\"lastId\"`\n\tCount int64 `json:\"count\"`\n}\n\n\/\/ AveragePriceService show current average price for a symbol\ntype AveragePriceService struct {\n\tc *Client\n\tsymbol string\n}\n\n\/\/ Symbol set symbol\nfunc (s *AveragePriceService) Symbol(symbol string) *AveragePriceService {\n\ts.symbol = symbol\n\treturn s\n}\n\n\/\/ Do send request\nfunc (s *AveragePriceService) Do(ctx context.Context, opts ...RequestOption) (res *AvgPrice, err error) {\n\tr := &request{\n\t\tmethod: \"GET\",\n\t\tendpoint: \"\/api\/v3\/avgPrice\",\n\t}\n\tr.setParam(\"symbol\", s.symbol)\n\tdata, err := s.c.callAPI(ctx, r, opts...)\n\tif err != nil {\n\t\treturn res, err\n\t}\n\tres = new(AvgPrice)\n\terr = json.Unmarshal(data, res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}\n\n\/\/ AvgPrice define average price\ntype AvgPrice struct {\n\tMins int64 `json:\"mins\"`\n\tPrice string `json:\"price\"`\n}\n<commit_msg>Old endpoint: \"\/api\/v1\/ticker\/24hr\" replaced with a new endpoint: \"\/api\/v3\/ticker\/24hr\" according to documentation: \"24hr Ticker Price Change Statistics\" (#110)<commit_after>package binance\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\n\t\"github.com\/adshao\/go-binance\/common\"\n)\n\n\/\/ ListBookTickersService list best price\/qty on the order book for a symbol or symbols\ntype ListBookTickersService struct {\n\tc *Client\n\tsymbol *string\n}\n\n\/\/ Symbol set symbol\nfunc (s *ListBookTickersService) Symbol(symbol string) *ListBookTickersService {\n\ts.symbol = &symbol\n\treturn s\n}\n\n\/\/ Do send request\nfunc (s *ListBookTickersService) Do(ctx context.Context, opts ...RequestOption) (res []*BookTicker, err error) {\n\tr := &request{\n\t\tmethod: \"GET\",\n\t\tendpoint: \"\/api\/v3\/ticker\/bookTicker\",\n\t}\n\tif s.symbol != nil {\n\t\tr.setParam(\"symbol\", *s.symbol)\n\t}\n\tdata, err := s.c.callAPI(ctx, r, opts...)\n\tdata = common.ToJSONList(data)\n\tif err != nil {\n\t\treturn []*BookTicker{}, err\n\t}\n\tres = make([]*BookTicker, 0)\n\terr = json.Unmarshal(data, &res)\n\tif err != nil {\n\t\treturn []*BookTicker{}, err\n\t}\n\treturn res, nil\n}\n\n\/\/ BookTicker define book ticker info\ntype BookTicker struct {\n\tSymbol string `json:\"symbol\"`\n\tBidPrice string `json:\"bidPrice\"`\n\tBidQuantity string `json:\"bidQty\"`\n\tAskPrice string `json:\"askPrice\"`\n\tAskQuantity string `json:\"askQty\"`\n}\n\n\/\/ ListPricesService list latest price for a symbol or symbols\ntype ListPricesService struct {\n\tc *Client\n\tsymbol *string\n}\n\n\/\/ Symbol set symbol\nfunc (s *ListPricesService) Symbol(symbol string) *ListPricesService {\n\ts.symbol = &symbol\n\treturn s\n}\n\n\/\/ Do send request\nfunc (s *ListPricesService) Do(ctx context.Context, opts ...RequestOption) (res []*SymbolPrice, err error) {\n\tr := &request{\n\t\tmethod: \"GET\",\n\t\tendpoint: \"\/api\/v3\/ticker\/price\",\n\t}\n\tif s.symbol != nil {\n\t\tr.setParam(\"symbol\", *s.symbol)\n\t}\n\tdata, err := s.c.callAPI(ctx, r, opts...)\n\tif err != nil {\n\t\treturn []*SymbolPrice{}, err\n\t}\n\tdata = common.ToJSONList(data)\n\tres = make([]*SymbolPrice, 0)\n\terr = json.Unmarshal(data, &res)\n\tif err != nil {\n\t\treturn []*SymbolPrice{}, err\n\t}\n\treturn res, nil\n}\n\n\/\/ SymbolPrice define symbol and price pair\ntype SymbolPrice struct {\n\tSymbol string `json:\"symbol\"`\n\tPrice string `json:\"price\"`\n}\n\n\/\/ ListPriceChangeStatsService show stats of price change in last 24 hours for all symbols\ntype ListPriceChangeStatsService struct {\n\tc *Client\n\tsymbol *string\n}\n\n\/\/ Symbol set symbol\nfunc (s *ListPriceChangeStatsService) Symbol(symbol string) *ListPriceChangeStatsService {\n\ts.symbol = &symbol\n\treturn s\n}\n\n\/\/ Do send request\nfunc (s *ListPriceChangeStatsService) Do(ctx context.Context, opts ...RequestOption) (res []*PriceChangeStats, err error) {\n\tr := &request{\n\t\tmethod: \"GET\",\n\t\tendpoint: \"\/api\/v3\/ticker\/24hr\",\n\t}\n\tif s.symbol != nil {\n\t\tr.setParam(\"symbol\", *s.symbol)\n\t}\n\tdata, err := s.c.callAPI(ctx, r, opts...)\n\tif err != nil {\n\t\treturn res, err\n\t}\n\tdata = common.ToJSONList(data)\n\tres = make([]*PriceChangeStats, 0)\n\terr = json.Unmarshal(data, &res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}\n\n\/\/ PriceChangeStats define price change stats\ntype PriceChangeStats struct {\n\tSymbol string `json:\"symbol\"`\n\tPriceChange string `json:\"priceChange\"`\n\tPriceChangePercent string `json:\"priceChangePercent\"`\n\tWeightedAvgPrice string `json:\"weightedAvgPrice\"`\n\tPrevClosePrice string `json:\"prevClosePrice\"`\n\tLastPrice string `json:\"lastPrice\"`\n\tBidPrice string `json:\"bidPrice\"`\n\tAskPrice string `json:\"askPrice\"`\n\tOpenPrice string `json:\"openPrice\"`\n\tHighPrice string `json:\"highPrice\"`\n\tLowPrice string `json:\"lowPrice\"`\n\tVolume string `json:\"volume\"`\n\tQuoteVolume string `json:\"quoteVolume\"`\n\tOpenTime int64 `json:\"openTime\"`\n\tCloseTime int64 `json:\"closeTime\"`\n\tFristID int64 `json:\"firstId\"`\n\tLastID int64 `json:\"lastId\"`\n\tCount int64 `json:\"count\"`\n}\n\n\/\/ AveragePriceService show current average price for a symbol\ntype AveragePriceService struct {\n\tc *Client\n\tsymbol string\n}\n\n\/\/ Symbol set symbol\nfunc (s *AveragePriceService) Symbol(symbol string) *AveragePriceService {\n\ts.symbol = symbol\n\treturn s\n}\n\n\/\/ Do send request\nfunc (s *AveragePriceService) Do(ctx context.Context, opts ...RequestOption) (res *AvgPrice, err error) {\n\tr := &request{\n\t\tmethod: \"GET\",\n\t\tendpoint: \"\/api\/v3\/avgPrice\",\n\t}\n\tr.setParam(\"symbol\", s.symbol)\n\tdata, err := s.c.callAPI(ctx, r, opts...)\n\tif err != nil {\n\t\treturn res, err\n\t}\n\tres = new(AvgPrice)\n\terr = json.Unmarshal(data, res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}\n\n\/\/ AvgPrice define average price\ntype AvgPrice struct {\n\tMins int64 `json:\"mins\"`\n\tPrice string `json:\"price\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package handler\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"runtime\/debug\"\n\t\"time\"\n\n\t\"github.com\/TimothyYe\/godns\"\n\t\"golang.org\/x\/net\/proxy\"\n)\n\n\/\/ CloudflareHandler struct definition\ntype CloudflareHandler struct {\n\tConfiguration *godns.Settings\n\tAPI string\n}\n\n\/\/ DNSRecordResponse struct\ntype DNSRecordResponse struct {\n\tRecords []DNSRecord `json:\"result\"`\n\tSuccess bool `json:\"success\"`\n}\n\n\/\/ DNSRecordUpdateResponse struct\ntype DNSRecordUpdateResponse struct {\n\tRecord DNSRecord `json:\"result\"`\n\tSuccess bool `json:\"success\"`\n}\n\n\/\/ DNSRecord for Cloudflare API\ntype DNSRecord struct {\n\tID string `json:\"id\"`\n\tIP string `json:\"content\"`\n\tName string `json:\"name\"`\n\tProxied bool `json:\"proxied\"`\n\tType string `json:\"type\"`\n\tZoneID string `json:\"zone_id\"`\n}\n\n\/\/ SetIP updates DNSRecord.IP\nfunc (r *DNSRecord) SetIP(ip string) {\n\tr.IP = ip\n}\n\n\/\/ ZoneResponse is a wrapper for Zones\ntype ZoneResponse struct {\n\tZones []Zone `json:\"result\"`\n\tSuccess bool `json:\"success\"`\n}\n\n\/\/ Zone object with id and name\ntype Zone struct {\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n}\n\n\/\/ SetConfiguration pass dns settings and store it to handler instance\nfunc (handler *CloudflareHandler) SetConfiguration(conf *godns.Settings) {\n\thandler.Configuration = conf\n\thandler.API = \"https:\/\/api.cloudflare.com\/client\/v4\"\n}\n\n\/\/ DomainLoop the main logic loop\nfunc (handler *CloudflareHandler) DomainLoop(domain *godns.Domain, panicChan chan<- godns.Domain) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Printf(\"Recovered in %v: %v\\n\", err, debug.Stack())\n\t\t\tpanicChan <- *domain\n\t\t}\n\t}()\n\n\tvar lastIP string\n\tfor {\n\t\tcurrentIP, err := godns.GetCurrentIP(handler.Configuration)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error in GetCurrentIP:\", err)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Println(\"Current IP is:\", currentIP)\n\t\t\/\/check against locally cached IP, if no change, skip update\n\t\tif currentIP == lastIP {\n\t\t\tlog.Printf(\"IP is the same as cached one. Skip update.\\n\")\n\t\t} else {\n\t\t\tlastIP = currentIP\n\n\t\t\tlog.Println(\"Checking IP for domain\", domain.DomainName)\n\t\t\tzoneID := handler.getZone(domain.DomainName)\n\t\t\tif zoneID != \"\" {\n\t\t\t\trecords := handler.getDNSRecords(zoneID)\n\n\t\t\t\t\/\/ update records\n\t\t\t\tfor _, rec := range records {\n\t\t\t\t\tif !recordTracked(domain, &rec) {\n\t\t\t\t\t\tlog.Println(\"Skiping record:\", rec.Name)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif rec.IP != currentIP {\n\t\t\t\t\t\tlog.Printf(\"IP mismatch: Current(%+v) vs Cloudflare(%+v)\\r\\n\", currentIP, rec.IP)\n\t\t\t\t\t\thandler.updateRecord(rec, currentIP)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.Printf(\"Record OK: %+v - %+v\\r\\n\", rec.Name, rec.IP)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Failed to find zone for domain:\", domain.DomainName)\n\t\t\t}\n\t\t}\n\t\t\/\/ Interval is 5 minutes\n\t\tlog.Printf(\"Going to sleep, will start next checking in %d minutes...\\r\\n\", godns.INTERVAL)\n\t\ttime.Sleep(time.Minute * godns.INTERVAL)\n\t}\n}\n\n\/\/ Check if record is present in domain conf\nfunc recordTracked(domain *godns.Domain, record *DNSRecord) bool {\n\tfor _, subDomain := range domain.SubDomains {\n\t\tsd := fmt.Sprintf(\"%s.%s\", subDomain, domain.DomainName)\n\t\tif record.Name == sd {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/ Create a new request with auth in place and optional proxy\nfunc (handler *CloudflareHandler) newRequest(method, url string, body io.Reader) (*http.Request, *http.Client) {\n\tclient := &http.Client{}\n\n\tif handler.Configuration.Socks5Proxy != \"\" {\n\t\tlog.Println(\"use socks5 proxy:\" + handler.Configuration.Socks5Proxy)\n\t\tdialer, err := proxy.SOCKS5(\"tcp\", handler.Configuration.Socks5Proxy, nil, proxy.Direct)\n\t\tif err != nil {\n\t\t\tlog.Println(\"can't connect to the proxy:\", err)\n\t\t} else {\n\t\t\thttpTransport := &http.Transport{}\n\t\t\tclient.Transport = httpTransport\n\t\t\thttpTransport.Dial = dialer.Dial\n\t\t}\n\t}\n\n\treq, _ := http.NewRequest(method, handler.API+url, body)\n\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\treq.Header.Set(\"X-Auth-Email\", handler.Configuration.Email)\n\treq.Header.Set(\"X-Auth-Key\", handler.Configuration.Password)\n\treturn req, client\n}\n\n\/\/ Find the correct zone via domain name\nfunc (handler *CloudflareHandler) getZone(domain string) string {\n\n\tvar z ZoneResponse\n\n\treq, client := handler.newRequest(\"GET\", \"\/zones\", nil)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Println(\"Request error:\", err.Error())\n\t\treturn \"\"\n\t}\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\terr = json.Unmarshal(body, &z)\n\tif err != nil {\n\t\tlog.Printf(\"Decoder error: %+v\\n\", err)\n\t\tlog.Printf(\"Response body: %+v\\n\", string(body))\n\t\treturn \"\"\n\t}\n\tif z.Success != true {\n\t\tlog.Printf(\"Response failed: %+v\\n\", string(body))\n\t\treturn \"\"\n\t}\n\n\tfor _, zone := range z.Zones {\n\t\tif zone.Name == domain {\n\t\t\treturn zone.ID\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ Get all DNS A records for a zone\nfunc (handler *CloudflareHandler) getDNSRecords(zoneID string) []DNSRecord {\n\n\tvar empty []DNSRecord\n\tvar r DNSRecordResponse\n\n\treq, client := handler.newRequest(\"GET\", \"\/zones\/\"+zoneID+\"\/dns_records?type=A\", nil)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Println(\"Request error:\", err.Error())\n\t\treturn empty\n\t}\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\terr = json.Unmarshal(body, &r)\n\tif err != nil {\n\t\tlog.Printf(\"Decoder error: %+v\\n\", err)\n\t\tlog.Printf(\"Response body: %+v\\n\", string(body))\n\t\treturn empty\n\t}\n\tif r.Success != true {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\tlog.Printf(\"Response failed: %+v\\n\", string(body))\n\t\treturn empty\n\n\t}\n\treturn r.Records\n}\n\n\/\/ Update DNS A Record with new IP\nfunc (handler *CloudflareHandler) updateRecord(record DNSRecord, newIP string) {\n\n\tvar r DNSRecordUpdateResponse\n\trecord.SetIP(newIP)\n\n\tj, _ := json.Marshal(record)\n\treq, client := handler.newRequest(\"PUT\",\n\t\t\"\/zones\/\"+record.ZoneID+\"\/dns_records\/\"+record.ID,\n\t\tbytes.NewBuffer(j),\n\t)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Println(\"Request error:\", err.Error())\n\t\treturn\n\t}\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\terr = json.Unmarshal(body, &r)\n\tif err != nil {\n\t\tlog.Printf(\"Decoder error: %+v\\n\", err)\n\t\tlog.Printf(\"Response body: %+v\\n\", string(body))\n\t\treturn\n\t}\n\tif r.Success != true {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\tlog.Printf(\"Response failed: %+v\\n\", string(body))\n\t} else {\n\t\tlog.Printf(\"Record updated: %+v - %+v\", record.Name, record.IP)\n\t}\n}\n<commit_msg>send mail notification<commit_after>package handler\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"runtime\/debug\"\n\t\"time\"\n\n\t\"github.com\/TimothyYe\/godns\"\n\t\"golang.org\/x\/net\/proxy\"\n)\n\n\/\/ CloudflareHandler struct definition\ntype CloudflareHandler struct {\n\tConfiguration *godns.Settings\n\tAPI string\n}\n\n\/\/ DNSRecordResponse struct\ntype DNSRecordResponse struct {\n\tRecords []DNSRecord `json:\"result\"`\n\tSuccess bool `json:\"success\"`\n}\n\n\/\/ DNSRecordUpdateResponse struct\ntype DNSRecordUpdateResponse struct {\n\tRecord DNSRecord `json:\"result\"`\n\tSuccess bool `json:\"success\"`\n}\n\n\/\/ DNSRecord for Cloudflare API\ntype DNSRecord struct {\n\tID string `json:\"id\"`\n\tIP string `json:\"content\"`\n\tName string `json:\"name\"`\n\tProxied bool `json:\"proxied\"`\n\tType string `json:\"type\"`\n\tZoneID string `json:\"zone_id\"`\n}\n\n\/\/ SetIP updates DNSRecord.IP\nfunc (r *DNSRecord) SetIP(ip string) {\n\tr.IP = ip\n}\n\n\/\/ ZoneResponse is a wrapper for Zones\ntype ZoneResponse struct {\n\tZones []Zone `json:\"result\"`\n\tSuccess bool `json:\"success\"`\n}\n\n\/\/ Zone object with id and name\ntype Zone struct {\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n}\n\n\/\/ SetConfiguration pass dns settings and store it to handler instance\nfunc (handler *CloudflareHandler) SetConfiguration(conf *godns.Settings) {\n\thandler.Configuration = conf\n\thandler.API = \"https:\/\/api.cloudflare.com\/client\/v4\"\n}\n\n\/\/ DomainLoop the main logic loop\nfunc (handler *CloudflareHandler) DomainLoop(domain *godns.Domain, panicChan chan<- godns.Domain) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Printf(\"Recovered in %v: %v\\n\", err, debug.Stack())\n\t\t\tpanicChan <- *domain\n\t\t}\n\t}()\n\n\tvar lastIP string\n\tfor {\n\t\tcurrentIP, err := godns.GetCurrentIP(handler.Configuration)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error in GetCurrentIP:\", err)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Println(\"Current IP is:\", currentIP)\n\t\t\/\/check against locally cached IP, if no change, skip update\n\t\tif currentIP == lastIP {\n\t\t\tlog.Printf(\"IP is the same as cached one. Skip update.\\n\")\n\t\t} else {\n\t\t\tlastIP = currentIP\n\n\t\t\tlog.Println(\"Checking IP for domain\", domain.DomainName)\n\t\t\tzoneID := handler.getZone(domain.DomainName)\n\t\t\tif zoneID != \"\" {\n\t\t\t\trecords := handler.getDNSRecords(zoneID)\n\n\t\t\t\t\/\/ update records\n\t\t\t\tfor _, rec := range records {\n\t\t\t\t\tif !recordTracked(domain, &rec) {\n\t\t\t\t\t\tlog.Println(\"Skiping record:\", rec.Name)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif rec.IP != currentIP {\n\t\t\t\t\t\tlog.Printf(\"IP mismatch: Current(%+v) vs Cloudflare(%+v)\\r\\n\", currentIP, rec.IP)\n\t\t\t\t\t\thandler.updateRecord(rec, currentIP)\n\n\t\t\t\t\t\t\/\/ Send mail notification if notify is enabled\n\t\t\t\t\t\tif handler.Configuration.Notify.Enabled {\n\t\t\t\t\t\t\tlog.Print(\"Sending notification to:\", handler.Configuration.Notify.SendTo)\n\t\t\t\t\t\t\tgodns.SendNotify(handler.Configuration, rec.Name, currentIP)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.Printf(\"Record OK: %+v - %+v\\r\\n\", rec.Name, rec.IP)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Failed to find zone for domain:\", domain.DomainName)\n\t\t\t}\n\t\t}\n\t\t\/\/ Interval is 5 minutes\n\t\tlog.Printf(\"Going to sleep, will start next checking in %d minutes...\\r\\n\", godns.INTERVAL)\n\t\ttime.Sleep(time.Minute * godns.INTERVAL)\n\t}\n}\n\n\/\/ Check if record is present in domain conf\nfunc recordTracked(domain *godns.Domain, record *DNSRecord) bool {\n\tfor _, subDomain := range domain.SubDomains {\n\t\tsd := fmt.Sprintf(\"%s.%s\", subDomain, domain.DomainName)\n\t\tif record.Name == sd {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/ Create a new request with auth in place and optional proxy\nfunc (handler *CloudflareHandler) newRequest(method, url string, body io.Reader) (*http.Request, *http.Client) {\n\tclient := &http.Client{}\n\n\tif handler.Configuration.Socks5Proxy != \"\" {\n\t\tlog.Println(\"use socks5 proxy:\" + handler.Configuration.Socks5Proxy)\n\t\tdialer, err := proxy.SOCKS5(\"tcp\", handler.Configuration.Socks5Proxy, nil, proxy.Direct)\n\t\tif err != nil {\n\t\t\tlog.Println(\"can't connect to the proxy:\", err)\n\t\t} else {\n\t\t\thttpTransport := &http.Transport{}\n\t\t\tclient.Transport = httpTransport\n\t\t\thttpTransport.Dial = dialer.Dial\n\t\t}\n\t}\n\n\treq, _ := http.NewRequest(method, handler.API+url, body)\n\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\treq.Header.Set(\"X-Auth-Email\", handler.Configuration.Email)\n\treq.Header.Set(\"X-Auth-Key\", handler.Configuration.Password)\n\treturn req, client\n}\n\n\/\/ Find the correct zone via domain name\nfunc (handler *CloudflareHandler) getZone(domain string) string {\n\n\tvar z ZoneResponse\n\n\treq, client := handler.newRequest(\"GET\", \"\/zones\", nil)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Println(\"Request error:\", err.Error())\n\t\treturn \"\"\n\t}\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\terr = json.Unmarshal(body, &z)\n\tif err != nil {\n\t\tlog.Printf(\"Decoder error: %+v\\n\", err)\n\t\tlog.Printf(\"Response body: %+v\\n\", string(body))\n\t\treturn \"\"\n\t}\n\tif z.Success != true {\n\t\tlog.Printf(\"Response failed: %+v\\n\", string(body))\n\t\treturn \"\"\n\t}\n\n\tfor _, zone := range z.Zones {\n\t\tif zone.Name == domain {\n\t\t\treturn zone.ID\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ Get all DNS A records for a zone\nfunc (handler *CloudflareHandler) getDNSRecords(zoneID string) []DNSRecord {\n\n\tvar empty []DNSRecord\n\tvar r DNSRecordResponse\n\n\treq, client := handler.newRequest(\"GET\", \"\/zones\/\"+zoneID+\"\/dns_records?type=A\", nil)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Println(\"Request error:\", err.Error())\n\t\treturn empty\n\t}\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\terr = json.Unmarshal(body, &r)\n\tif err != nil {\n\t\tlog.Printf(\"Decoder error: %+v\\n\", err)\n\t\tlog.Printf(\"Response body: %+v\\n\", string(body))\n\t\treturn empty\n\t}\n\tif r.Success != true {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\tlog.Printf(\"Response failed: %+v\\n\", string(body))\n\t\treturn empty\n\n\t}\n\treturn r.Records\n}\n\n\/\/ Update DNS A Record with new IP\nfunc (handler *CloudflareHandler) updateRecord(record DNSRecord, newIP string) {\n\n\tvar r DNSRecordUpdateResponse\n\trecord.SetIP(newIP)\n\n\tj, _ := json.Marshal(record)\n\treq, client := handler.newRequest(\"PUT\",\n\t\t\"\/zones\/\"+record.ZoneID+\"\/dns_records\/\"+record.ID,\n\t\tbytes.NewBuffer(j),\n\t)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Println(\"Request error:\", err.Error())\n\t\treturn\n\t}\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\terr = json.Unmarshal(body, &r)\n\tif err != nil {\n\t\tlog.Printf(\"Decoder error: %+v\\n\", err)\n\t\tlog.Printf(\"Response body: %+v\\n\", string(body))\n\t\treturn\n\t}\n\tif r.Success != true {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\tlog.Printf(\"Response failed: %+v\\n\", string(body))\n\t} else {\n\t\tlog.Printf(\"Record updated: %+v - %+v\", record.Name, record.IP)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package communicator\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"time\"\n\n\tpackerssh \"github.com\/hashicorp\/packer\/communicator\/ssh\"\n\t\"github.com\/hashicorp\/packer\/helper\/multistep\"\n\thelperssh \"github.com\/hashicorp\/packer\/helper\/ssh\"\n\t\"github.com\/hashicorp\/packer\/template\/interpolate\"\n\t\"github.com\/masterzen\/winrm\"\n\t\"github.com\/mitchellh\/go-homedir\"\n\t\"golang.org\/x\/crypto\/ssh\"\n\t\"golang.org\/x\/crypto\/ssh\/agent\"\n)\n\n\/\/ Config is the common configuration that communicators allow within\n\/\/ a builder.\ntype Config struct {\n\tType string `mapstructure:\"communicator\"`\n\n\t\/\/ SSH\n\tSSHHost string `mapstructure:\"ssh_host\"`\n\tSSHPort int `mapstructure:\"ssh_port\"`\n\tSSHUsername string `mapstructure:\"ssh_username\"`\n\tSSHPassword string `mapstructure:\"ssh_password\"`\n\tSSHPublicKey []byte `mapstructure:\"ssh_public_key\"`\n\tSSHPrivateKey []byte `mapstructure:\"ssh_private_key\"`\n\tSSHKeyPairName string `mapstructure:\"ssh_keypair_name\"`\n\tSSHTemporaryKeyPairName string `mapstructure:\"temporary_key_pair_name\"`\n\tSSHClearAuthorizedKeys bool `mapstructure:\"ssh_clear_authorized_keys\"`\n\tSSHPrivateKeyFile string `mapstructure:\"ssh_private_key_file\"`\n\tSSHInterface string `mapstructure:\"ssh_interface\"`\n\tSSHIPVersion string `mapstructure:\"ssh_ip_version\"`\n\tSSHPty bool `mapstructure:\"ssh_pty\"`\n\tSSHTimeout time.Duration `mapstructure:\"ssh_timeout\"`\n\tSSHAgentAuth bool `mapstructure:\"ssh_agent_auth\"`\n\tSSHDisableAgentForwarding bool `mapstructure:\"ssh_disable_agent_forwarding\"`\n\tSSHHandshakeAttempts int `mapstructure:\"ssh_handshake_attempts\"`\n\tSSHBastionHost string `mapstructure:\"ssh_bastion_host\"`\n\tSSHBastionPort int `mapstructure:\"ssh_bastion_port\"`\n\tSSHBastionAgentAuth bool `mapstructure:\"ssh_bastion_agent_auth\"`\n\tSSHBastionUsername string `mapstructure:\"ssh_bastion_username\"`\n\tSSHBastionPassword string `mapstructure:\"ssh_bastion_password\"`\n\tSSHBastionPrivateKeyFile string `mapstructure:\"ssh_bastion_private_key_file\"`\n\tSSHFileTransferMethod string `mapstructure:\"ssh_file_transfer_method\"`\n\tSSHProxyHost string `mapstructure:\"ssh_proxy_host\"`\n\tSSHProxyPort int `mapstructure:\"ssh_proxy_port\"`\n\tSSHProxyUsername string `mapstructure:\"ssh_proxy_username\"`\n\tSSHProxyPassword string `mapstructure:\"ssh_proxy_password\"`\n\tSSHKeepAliveInterval time.Duration `mapstructure:\"ssh_keep_alive_interval\"`\n\tSSHReadWriteTimeout time.Duration `mapstructure:\"ssh_read_write_timeout\"`\n\n\t\/\/ WinRM\n\tWinRMUser string `mapstructure:\"winrm_username\"`\n\tWinRMPassword string `mapstructure:\"winrm_password\"`\n\tWinRMHost string `mapstructure:\"winrm_host\"`\n\tWinRMPort int `mapstructure:\"winrm_port\"`\n\tWinRMTimeout time.Duration `mapstructure:\"winrm_timeout\"`\n\tWinRMUseSSL bool `mapstructure:\"winrm_use_ssl\"`\n\tWinRMInsecure bool `mapstructure:\"winrm_insecure\"`\n\tWinRMUseNTLM bool `mapstructure:\"winrm_use_ntlm\"`\n\tWinRMTransportDecorator func() winrm.Transporter\n}\n\n\/\/ ReadSSHPrivateKeyFile returns the SSH private key bytes\nfunc (c *Config) ReadSSHPrivateKeyFile() ([]byte, error) {\n\tvar privateKey []byte\n\n\tif c.SSHPrivateKeyFile != \"\" {\n\t\tkeyPath, err := homedir.Expand(c.SSHPrivateKeyFile)\n\t\tif err != nil {\n\t\t\treturn privateKey, fmt.Errorf(\"Error expanding path for SSH private key: %s\", err)\n\t\t}\n\t\tprivateKey, err = ioutil.ReadFile(keyPath)\n\t\tif err != nil {\n\t\t\treturn privateKey, fmt.Errorf(\"Error on reading SSH private key: %s\", err)\n\t\t}\n\t}\n\treturn privateKey, nil\n}\n\n\/\/ SSHConfigFunc returns a function that can be used for the SSH communicator\n\/\/ config for connecting to the instance created over SSH using the private key\n\/\/ or password.\nfunc (c *Config) SSHConfigFunc() func(multistep.StateBag) (*ssh.ClientConfig, error) {\n\treturn func(state multistep.StateBag) (*ssh.ClientConfig, error) {\n\t\tsshConfig := &ssh.ClientConfig{\n\t\t\tUser: c.SSHUsername,\n\t\t\tHostKeyCallback: ssh.InsecureIgnoreHostKey(),\n\t\t}\n\n\t\tif c.SSHAgentAuth {\n\t\t\tauthSock := os.Getenv(\"SSH_AUTH_SOCK\")\n\t\t\tif authSock == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"SSH_AUTH_SOCK is not set\")\n\t\t\t}\n\n\t\t\tsshAgent, err := net.Dial(\"unix\", authSock)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Cannot connect to SSH Agent socket %q: %s\", authSock, err)\n\t\t\t}\n\n\t\t\tsshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers))\n\t\t}\n\n\t\tvar privateKeys [][]byte\n\t\tif c.SSHPrivateKeyFile != \"\" {\n\t\t\tprivateKey, err := c.ReadSSHPrivateKeyFile()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tprivateKeys = append(privateKeys, privateKey)\n\t\t}\n\n\t\t\/\/ aws,alicloud,cloudstack,digitalOcean,oneAndOne,openstack,oracle & profitbricks key\n\t\tif iKey, hasKey := state.GetOk(\"privateKey\"); hasKey {\n\t\t\tprivateKeys = append(privateKeys, []byte(iKey.(string)))\n\t\t}\n\n\t\tif len(c.SSHPrivateKey) != 0 {\n\t\t\tprivateKeys = append(privateKeys, c.SSHPrivateKey)\n\t\t}\n\n\t\tfor _, key := range privateKeys {\n\t\t\tsigner, err := ssh.ParsePrivateKey(key)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Error setting up SSH config: %s\", err)\n\t\t\t}\n\t\t\tsshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))\n\t\t}\n\n\t\tif c.SSHPassword != \"\" {\n\t\t\tsshConfig.Auth = append(sshConfig.Auth,\n\t\t\t\tssh.Password(c.SSHPassword),\n\t\t\t\tssh.KeyboardInteractive(packerssh.PasswordKeyboardInteractive(c.SSHPassword)),\n\t\t\t)\n\t\t}\n\t\treturn sshConfig, nil\n\t}\n}\n\n\/\/ Port returns the port that will be used for access based on config.\nfunc (c *Config) Port() int {\n\tswitch c.Type {\n\tcase \"ssh\":\n\t\treturn c.SSHPort\n\tcase \"winrm\":\n\t\treturn c.WinRMPort\n\tdefault:\n\t\treturn 0\n\t}\n}\n\n\/\/ Host returns the port that will be used for access based on config.\nfunc (c *Config) Host() string {\n\tswitch c.Type {\n\tcase \"ssh\":\n\t\treturn c.SSHHost\n\tcase \"winrm\":\n\t\treturn c.WinRMHost\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\n\/\/ User returns the port that will be used for access based on config.\nfunc (c *Config) User() string {\n\tswitch c.Type {\n\tcase \"ssh\":\n\t\treturn c.SSHUsername\n\tcase \"winrm\":\n\t\treturn c.WinRMUser\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\n\/\/ Password returns the port that will be used for access based on config.\nfunc (c *Config) Password() string {\n\tswitch c.Type {\n\tcase \"ssh\":\n\t\treturn c.SSHPassword\n\tcase \"winrm\":\n\t\treturn c.WinRMPassword\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\nfunc (c *Config) Prepare(ctx *interpolate.Context) []error {\n\tif c.Type == \"\" {\n\t\tc.Type = \"ssh\"\n\t}\n\n\tvar errs []error\n\tswitch c.Type {\n\tcase \"ssh\":\n\t\tif es := c.prepareSSH(ctx); len(es) > 0 {\n\t\t\terrs = append(errs, es...)\n\t\t}\n\tcase \"winrm\":\n\t\tif es := c.prepareWinRM(ctx); len(es) > 0 {\n\t\t\terrs = append(errs, es...)\n\t\t}\n\tcase \"docker\", \"none\":\n\t\tbreak\n\tdefault:\n\t\treturn []error{fmt.Errorf(\"Communicator type %s is invalid\", c.Type)}\n\t}\n\n\treturn errs\n}\n\nfunc (c *Config) prepareSSH(ctx *interpolate.Context) []error {\n\tif c.SSHPort == 0 {\n\t\tc.SSHPort = 22\n\t}\n\n\tif c.SSHTimeout == 0 {\n\t\tc.SSHTimeout = 5 * time.Minute\n\t}\n\n\tif c.SSHKeepAliveInterval == 0 {\n\t\tc.SSHKeepAliveInterval = 5 * time.Second\n\t}\n\n\tif c.SSHHandshakeAttempts == 0 {\n\t\tc.SSHHandshakeAttempts = 10\n\t}\n\n\tif c.SSHBastionHost != \"\" {\n\t\tif c.SSHBastionPort == 0 {\n\t\t\tc.SSHBastionPort = 22\n\t\t}\n\n\t\tif c.SSHBastionPrivateKeyFile == \"\" && c.SSHPrivateKeyFile != \"\" {\n\t\t\tc.SSHBastionPrivateKeyFile = c.SSHPrivateKeyFile\n\t\t}\n\t}\n\n\tif c.SSHProxyHost != \"\" {\n\t\tif c.SSHProxyPort == 0 {\n\t\t\tc.SSHProxyPort = 1080\n\t\t}\n\t}\n\n\tif c.SSHFileTransferMethod == \"\" {\n\t\tc.SSHFileTransferMethod = \"scp\"\n\t}\n\n\t\/\/ Validation\n\tvar errs []error\n\tif c.SSHUsername == \"\" {\n\t\terrs = append(errs, errors.New(\"An ssh_username must be specified\\n Note: some builders used to default ssh_username to \\\"root\\\".\"))\n\t}\n\n\tif c.SSHPrivateKeyFile != \"\" {\n\t\tpath, err := homedir.Expand(c.SSHPrivateKeyFile)\n\t\tif err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\"ssh_private_key_file is invalid: %s\", err))\n\t\t} else if _, err := os.Stat(path); err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\"ssh_private_key_file is invalid: %s\", err))\n\t\t} else if _, err := helperssh.FileSigner(path); err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\"ssh_private_key_file is invalid: %s\", err))\n\t\t}\n\t}\n\n\tif c.SSHBastionHost != \"\" && !c.SSHBastionAgentAuth {\n\t\tif c.SSHBastionPassword == \"\" && c.SSHBastionPrivateKeyFile == \"\" {\n\t\t\terrs = append(errs, errors.New(\n\t\t\t\t\"ssh_bastion_password or ssh_bastion_private_key_file must be specified\"))\n\t\t} else if c.SSHBastionPrivateKeyFile != \"\" {\n\t\t\tpath, err := homedir.Expand(c.SSHBastionPrivateKeyFile)\n\t\t\tif err != nil {\n\t\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\t\"ssh_bastion_private_key_file is invalid: %s\", err))\n\t\t\t} else if _, err := os.Stat(path); err != nil {\n\t\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\t\"ssh_bastion_private_key_file is invalid: %s\", err))\n\t\t\t} else if _, err := helperssh.FileSigner(path); err != nil {\n\t\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\t\"ssh_bastion_private_key_file is invalid: %s\", err))\n\t\t\t}\n\t\t}\n\t}\n\n\tif c.SSHFileTransferMethod != \"scp\" && c.SSHFileTransferMethod != \"sftp\" {\n\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\"ssh_file_transfer_method ('%s') is invalid, valid methods: sftp, scp\",\n\t\t\tc.SSHFileTransferMethod))\n\t}\n\n\tif c.SSHBastionHost != \"\" && c.SSHProxyHost != \"\" {\n\t\terrs = append(errs, errors.New(\"please specify either ssh_bastion_host or ssh_proxy_host, not both\"))\n\t}\n\n\treturn errs\n}\n\nfunc (c *Config) prepareWinRM(ctx *interpolate.Context) []error {\n\tif c.WinRMPort == 0 && c.WinRMUseSSL {\n\t\tc.WinRMPort = 5986\n\t} else if c.WinRMPort == 0 {\n\t\tc.WinRMPort = 5985\n\t}\n\n\tif c.WinRMTimeout == 0 {\n\t\tc.WinRMTimeout = 30 * time.Minute\n\t}\n\n\tif c.WinRMUseNTLM == true {\n\t\tc.WinRMTransportDecorator = func() winrm.Transporter { return &winrm.ClientNTLM{} }\n\t}\n\n\tvar errs []error\n\tif c.WinRMUser == \"\" {\n\t\terrs = append(errs, errors.New(\"winrm_username must be specified.\"))\n\t}\n\n\treturn errs\n}\n<commit_msg>communicator\/ssh: make ssh keys payload internal<commit_after>package communicator\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"time\"\n\n\tpackerssh \"github.com\/hashicorp\/packer\/communicator\/ssh\"\n\t\"github.com\/hashicorp\/packer\/helper\/multistep\"\n\thelperssh \"github.com\/hashicorp\/packer\/helper\/ssh\"\n\t\"github.com\/hashicorp\/packer\/template\/interpolate\"\n\t\"github.com\/masterzen\/winrm\"\n\t\"github.com\/mitchellh\/go-homedir\"\n\t\"golang.org\/x\/crypto\/ssh\"\n\t\"golang.org\/x\/crypto\/ssh\/agent\"\n)\n\n\/\/ Config is the common configuration that communicators allow within\n\/\/ a builder.\ntype Config struct {\n\tType string `mapstructure:\"communicator\"`\n\n\t\/\/ SSH\n\tSSHHost string `mapstructure:\"ssh_host\"`\n\tSSHPort int `mapstructure:\"ssh_port\"`\n\tSSHUsername string `mapstructure:\"ssh_username\"`\n\tSSHPassword string `mapstructure:\"ssh_password\"`\n\tSSHKeyPairName string `mapstructure:\"ssh_keypair_name\"`\n\tSSHTemporaryKeyPairName string `mapstructure:\"temporary_key_pair_name\"`\n\tSSHClearAuthorizedKeys bool `mapstructure:\"ssh_clear_authorized_keys\"`\n\tSSHPrivateKeyFile string `mapstructure:\"ssh_private_key_file\"`\n\tSSHInterface string `mapstructure:\"ssh_interface\"`\n\tSSHIPVersion string `mapstructure:\"ssh_ip_version\"`\n\tSSHPty bool `mapstructure:\"ssh_pty\"`\n\tSSHTimeout time.Duration `mapstructure:\"ssh_timeout\"`\n\tSSHAgentAuth bool `mapstructure:\"ssh_agent_auth\"`\n\tSSHDisableAgentForwarding bool `mapstructure:\"ssh_disable_agent_forwarding\"`\n\tSSHHandshakeAttempts int `mapstructure:\"ssh_handshake_attempts\"`\n\tSSHBastionHost string `mapstructure:\"ssh_bastion_host\"`\n\tSSHBastionPort int `mapstructure:\"ssh_bastion_port\"`\n\tSSHBastionAgentAuth bool `mapstructure:\"ssh_bastion_agent_auth\"`\n\tSSHBastionUsername string `mapstructure:\"ssh_bastion_username\"`\n\tSSHBastionPassword string `mapstructure:\"ssh_bastion_password\"`\n\tSSHBastionPrivateKeyFile string `mapstructure:\"ssh_bastion_private_key_file\"`\n\tSSHFileTransferMethod string `mapstructure:\"ssh_file_transfer_method\"`\n\tSSHProxyHost string `mapstructure:\"ssh_proxy_host\"`\n\tSSHProxyPort int `mapstructure:\"ssh_proxy_port\"`\n\tSSHProxyUsername string `mapstructure:\"ssh_proxy_username\"`\n\tSSHProxyPassword string `mapstructure:\"ssh_proxy_password\"`\n\tSSHKeepAliveInterval time.Duration `mapstructure:\"ssh_keep_alive_interval\"`\n\tSSHReadWriteTimeout time.Duration `mapstructure:\"ssh_read_write_timeout\"`\n\t\/\/ SSH Internals\n\tSSHPublicKey []byte\n\tSSHPrivateKey []byte\n\n\t\/\/ WinRM\n\tWinRMUser string `mapstructure:\"winrm_username\"`\n\tWinRMPassword string `mapstructure:\"winrm_password\"`\n\tWinRMHost string `mapstructure:\"winrm_host\"`\n\tWinRMPort int `mapstructure:\"winrm_port\"`\n\tWinRMTimeout time.Duration `mapstructure:\"winrm_timeout\"`\n\tWinRMUseSSL bool `mapstructure:\"winrm_use_ssl\"`\n\tWinRMInsecure bool `mapstructure:\"winrm_insecure\"`\n\tWinRMUseNTLM bool `mapstructure:\"winrm_use_ntlm\"`\n\tWinRMTransportDecorator func() winrm.Transporter\n}\n\n\/\/ ReadSSHPrivateKeyFile returns the SSH private key bytes\nfunc (c *Config) ReadSSHPrivateKeyFile() ([]byte, error) {\n\tvar privateKey []byte\n\n\tif c.SSHPrivateKeyFile != \"\" {\n\t\tkeyPath, err := homedir.Expand(c.SSHPrivateKeyFile)\n\t\tif err != nil {\n\t\t\treturn privateKey, fmt.Errorf(\"Error expanding path for SSH private key: %s\", err)\n\t\t}\n\t\tprivateKey, err = ioutil.ReadFile(keyPath)\n\t\tif err != nil {\n\t\t\treturn privateKey, fmt.Errorf(\"Error on reading SSH private key: %s\", err)\n\t\t}\n\t}\n\treturn privateKey, nil\n}\n\n\/\/ SSHConfigFunc returns a function that can be used for the SSH communicator\n\/\/ config for connecting to the instance created over SSH using the private key\n\/\/ or password.\nfunc (c *Config) SSHConfigFunc() func(multistep.StateBag) (*ssh.ClientConfig, error) {\n\treturn func(state multistep.StateBag) (*ssh.ClientConfig, error) {\n\t\tsshConfig := &ssh.ClientConfig{\n\t\t\tUser: c.SSHUsername,\n\t\t\tHostKeyCallback: ssh.InsecureIgnoreHostKey(),\n\t\t}\n\n\t\tif c.SSHAgentAuth {\n\t\t\tauthSock := os.Getenv(\"SSH_AUTH_SOCK\")\n\t\t\tif authSock == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"SSH_AUTH_SOCK is not set\")\n\t\t\t}\n\n\t\t\tsshAgent, err := net.Dial(\"unix\", authSock)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Cannot connect to SSH Agent socket %q: %s\", authSock, err)\n\t\t\t}\n\n\t\t\tsshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers))\n\t\t}\n\n\t\tvar privateKeys [][]byte\n\t\tif c.SSHPrivateKeyFile != \"\" {\n\t\t\tprivateKey, err := c.ReadSSHPrivateKeyFile()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tprivateKeys = append(privateKeys, privateKey)\n\t\t}\n\n\t\t\/\/ aws,alicloud,cloudstack,digitalOcean,oneAndOne,openstack,oracle & profitbricks key\n\t\tif iKey, hasKey := state.GetOk(\"privateKey\"); hasKey {\n\t\t\tprivateKeys = append(privateKeys, []byte(iKey.(string)))\n\t\t}\n\n\t\tif len(c.SSHPrivateKey) != 0 {\n\t\t\tprivateKeys = append(privateKeys, c.SSHPrivateKey)\n\t\t}\n\n\t\tfor _, key := range privateKeys {\n\t\t\tsigner, err := ssh.ParsePrivateKey(key)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Error setting up SSH config: %s\", err)\n\t\t\t}\n\t\t\tsshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))\n\t\t}\n\n\t\tif c.SSHPassword != \"\" {\n\t\t\tsshConfig.Auth = append(sshConfig.Auth,\n\t\t\t\tssh.Password(c.SSHPassword),\n\t\t\t\tssh.KeyboardInteractive(packerssh.PasswordKeyboardInteractive(c.SSHPassword)),\n\t\t\t)\n\t\t}\n\t\treturn sshConfig, nil\n\t}\n}\n\n\/\/ Port returns the port that will be used for access based on config.\nfunc (c *Config) Port() int {\n\tswitch c.Type {\n\tcase \"ssh\":\n\t\treturn c.SSHPort\n\tcase \"winrm\":\n\t\treturn c.WinRMPort\n\tdefault:\n\t\treturn 0\n\t}\n}\n\n\/\/ Host returns the port that will be used for access based on config.\nfunc (c *Config) Host() string {\n\tswitch c.Type {\n\tcase \"ssh\":\n\t\treturn c.SSHHost\n\tcase \"winrm\":\n\t\treturn c.WinRMHost\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\n\/\/ User returns the port that will be used for access based on config.\nfunc (c *Config) User() string {\n\tswitch c.Type {\n\tcase \"ssh\":\n\t\treturn c.SSHUsername\n\tcase \"winrm\":\n\t\treturn c.WinRMUser\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\n\/\/ Password returns the port that will be used for access based on config.\nfunc (c *Config) Password() string {\n\tswitch c.Type {\n\tcase \"ssh\":\n\t\treturn c.SSHPassword\n\tcase \"winrm\":\n\t\treturn c.WinRMPassword\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\nfunc (c *Config) Prepare(ctx *interpolate.Context) []error {\n\tif c.Type == \"\" {\n\t\tc.Type = \"ssh\"\n\t}\n\n\tvar errs []error\n\tswitch c.Type {\n\tcase \"ssh\":\n\t\tif es := c.prepareSSH(ctx); len(es) > 0 {\n\t\t\terrs = append(errs, es...)\n\t\t}\n\tcase \"winrm\":\n\t\tif es := c.prepareWinRM(ctx); len(es) > 0 {\n\t\t\terrs = append(errs, es...)\n\t\t}\n\tcase \"docker\", \"none\":\n\t\tbreak\n\tdefault:\n\t\treturn []error{fmt.Errorf(\"Communicator type %s is invalid\", c.Type)}\n\t}\n\n\treturn errs\n}\n\nfunc (c *Config) prepareSSH(ctx *interpolate.Context) []error {\n\tif c.SSHPort == 0 {\n\t\tc.SSHPort = 22\n\t}\n\n\tif c.SSHTimeout == 0 {\n\t\tc.SSHTimeout = 5 * time.Minute\n\t}\n\n\tif c.SSHKeepAliveInterval == 0 {\n\t\tc.SSHKeepAliveInterval = 5 * time.Second\n\t}\n\n\tif c.SSHHandshakeAttempts == 0 {\n\t\tc.SSHHandshakeAttempts = 10\n\t}\n\n\tif c.SSHBastionHost != \"\" {\n\t\tif c.SSHBastionPort == 0 {\n\t\t\tc.SSHBastionPort = 22\n\t\t}\n\n\t\tif c.SSHBastionPrivateKeyFile == \"\" && c.SSHPrivateKeyFile != \"\" {\n\t\t\tc.SSHBastionPrivateKeyFile = c.SSHPrivateKeyFile\n\t\t}\n\t}\n\n\tif c.SSHProxyHost != \"\" {\n\t\tif c.SSHProxyPort == 0 {\n\t\t\tc.SSHProxyPort = 1080\n\t\t}\n\t}\n\n\tif c.SSHFileTransferMethod == \"\" {\n\t\tc.SSHFileTransferMethod = \"scp\"\n\t}\n\n\t\/\/ Validation\n\tvar errs []error\n\tif c.SSHUsername == \"\" {\n\t\terrs = append(errs, errors.New(\"An ssh_username must be specified\\n Note: some builders used to default ssh_username to \\\"root\\\".\"))\n\t}\n\n\tif c.SSHPrivateKeyFile != \"\" {\n\t\tpath, err := homedir.Expand(c.SSHPrivateKeyFile)\n\t\tif err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\"ssh_private_key_file is invalid: %s\", err))\n\t\t} else if _, err := os.Stat(path); err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\"ssh_private_key_file is invalid: %s\", err))\n\t\t} else if _, err := helperssh.FileSigner(path); err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\"ssh_private_key_file is invalid: %s\", err))\n\t\t}\n\t}\n\n\tif c.SSHBastionHost != \"\" && !c.SSHBastionAgentAuth {\n\t\tif c.SSHBastionPassword == \"\" && c.SSHBastionPrivateKeyFile == \"\" {\n\t\t\terrs = append(errs, errors.New(\n\t\t\t\t\"ssh_bastion_password or ssh_bastion_private_key_file must be specified\"))\n\t\t} else if c.SSHBastionPrivateKeyFile != \"\" {\n\t\t\tpath, err := homedir.Expand(c.SSHBastionPrivateKeyFile)\n\t\t\tif err != nil {\n\t\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\t\"ssh_bastion_private_key_file is invalid: %s\", err))\n\t\t\t} else if _, err := os.Stat(path); err != nil {\n\t\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\t\"ssh_bastion_private_key_file is invalid: %s\", err))\n\t\t\t} else if _, err := helperssh.FileSigner(path); err != nil {\n\t\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\t\"ssh_bastion_private_key_file is invalid: %s\", err))\n\t\t\t}\n\t\t}\n\t}\n\n\tif c.SSHFileTransferMethod != \"scp\" && c.SSHFileTransferMethod != \"sftp\" {\n\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\"ssh_file_transfer_method ('%s') is invalid, valid methods: sftp, scp\",\n\t\t\tc.SSHFileTransferMethod))\n\t}\n\n\tif c.SSHBastionHost != \"\" && c.SSHProxyHost != \"\" {\n\t\terrs = append(errs, errors.New(\"please specify either ssh_bastion_host or ssh_proxy_host, not both\"))\n\t}\n\n\treturn errs\n}\n\nfunc (c *Config) prepareWinRM(ctx *interpolate.Context) []error {\n\tif c.WinRMPort == 0 && c.WinRMUseSSL {\n\t\tc.WinRMPort = 5986\n\t} else if c.WinRMPort == 0 {\n\t\tc.WinRMPort = 5985\n\t}\n\n\tif c.WinRMTimeout == 0 {\n\t\tc.WinRMTimeout = 30 * time.Minute\n\t}\n\n\tif c.WinRMUseNTLM == true {\n\t\tc.WinRMTransportDecorator = func() winrm.Transporter { return &winrm.ClientNTLM{} }\n\t}\n\n\tvar errs []error\n\tif c.WinRMUser == \"\" {\n\t\terrs = append(errs, errors.New(\"winrm_username must be specified.\"))\n\t}\n\n\treturn errs\n}\n<|endoftext|>"} {"text":"<commit_before>package httputils\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"time\"\n\n\t\/\/ Below is a port of the exponential backoff implementation from\n\t\/\/ google-http-java-client.\n\t\"github.com\/cenkalti\/backoff\"\n\t\"github.com\/fiorix\/go-web\/autogzip\"\n\t\"github.com\/skia-dev\/glog\"\n\t\"go.skia.org\/infra\/go\/metrics2\"\n\t\"go.skia.org\/infra\/go\/timer\"\n\t\"go.skia.org\/infra\/go\/util\"\n)\n\nconst (\n\tDIAL_TIMEOUT = time.Minute\n\tREQUEST_TIMEOUT = 5 * time.Minute\n\n\t\/\/ Exponential backoff defaults.\n\tINITIAL_INTERVAL = 500 * time.Millisecond\n\tRANDOMIZATION_FACTOR = 0.5\n\tBACKOFF_MULTIPLIER = 1.5\n\tMAX_INTERVAL = 60 * time.Second\n\tMAX_ELAPSED_TIME = 5 * time.Minute\n\n\tMAX_BYTES_IN_RESPONSE_BODY = 10 * 1024 \/\/10 KB\n)\n\n\/\/ DialTimeout is a dialer that sets a timeout.\nfunc DialTimeout(network, addr string) (net.Conn, error) {\n\treturn net.DialTimeout(network, addr, DIAL_TIMEOUT)\n}\n\n\/\/ NewTimeoutClient creates a new http.Client with both a dial timeout and a\n\/\/ request timeout.\nfunc NewTimeoutClient() *http.Client {\n\treturn &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: DialTimeout,\n\t\t},\n\t\tTimeout: REQUEST_TIMEOUT,\n\t}\n}\n\ntype BackOffConfig struct {\n\tinitialInterval time.Duration\n\tmaxInterval time.Duration\n\tmaxElapsedTime time.Duration\n\trandomizationFactor float64\n\tbackOffMultiplier float64\n}\n\n\/\/ NewBackOffTransport creates a BackOffTransport with default values. Look at\n\/\/ NewConfiguredBackOffTransport for an example of how the values impact behavior.\nfunc NewBackOffTransport() http.RoundTripper {\n\tconfig := &BackOffConfig{\n\t\tinitialInterval: INITIAL_INTERVAL,\n\t\tmaxInterval: MAX_INTERVAL,\n\t\tmaxElapsedTime: MAX_ELAPSED_TIME,\n\t\trandomizationFactor: RANDOMIZATION_FACTOR,\n\t\tbackOffMultiplier: BACKOFF_MULTIPLIER,\n\t}\n\treturn NewConfiguredBackOffTransport(config)\n}\n\ntype BackOffTransport struct {\n\thttp.Transport\n\tbackOffConfig *BackOffConfig\n}\n\ntype ResponsePagination struct {\n\tOffset int `json:\"offset\"`\n\tSize int `json:\"size\"`\n\tTotal int `json:\"total\"`\n}\n\n\/\/ NewBackOffTransport creates a BackOffTransport with the specified config.\n\/\/\n\/\/ Example: The default retry_interval is .5 seconds, default randomization_factor\n\/\/ is 0.5, default multiplier is 1.5 and the default max_interval is 1 minute. For\n\/\/ 10 tries the sequence will be (values in seconds) and assuming we go over the\n\/\/ max_elapsed_time on the 10th try:\n\/\/\n\/\/ request# retry_interval randomized_interval\n\/\/ 1 0.5 [0.25, 0.75]\n\/\/ 2 0.75 [0.375, 1.125]\n\/\/ 3 1.125 [0.562, 1.687]\n\/\/ 4 1.687 [0.8435, 2.53]\n\/\/ 5 2.53 [1.265, 3.795]\n\/\/ 6 3.795 [1.897, 5.692]\n\/\/ 7 5.692 [2.846, 8.538]\n\/\/ 8 8.538 [4.269, 12.807]\n\/\/ 9 12.807 [6.403, 19.210]\n\/\/ 10 19.210 backoff.Stop\nfunc NewConfiguredBackOffTransport(config *BackOffConfig) http.RoundTripper {\n\treturn &BackOffTransport{\n\t\tTransport: http.Transport{Dial: DialTimeout},\n\t\tbackOffConfig: config,\n\t}\n}\n\n\/\/ RoundTrip implements the RoundTripper interface.\nfunc (t *BackOffTransport) RoundTrip(req *http.Request) (*http.Response, error) {\n\t\/\/ Initialize the exponential backoff client.\n\tbackOffClient := &backoff.ExponentialBackOff{\n\t\tInitialInterval: t.backOffConfig.initialInterval,\n\t\tRandomizationFactor: t.backOffConfig.randomizationFactor,\n\t\tMultiplier: t.backOffConfig.backOffMultiplier,\n\t\tMaxInterval: t.backOffConfig.maxInterval,\n\t\tMaxElapsedTime: t.backOffConfig.maxElapsedTime,\n\t\tClock: backoff.SystemClock,\n\t}\n\t\/\/ Make a copy of the request's Body so that we can reuse it if the request\n\t\/\/ needs to be backed off and retried.\n\tbodyBuf := bytes.Buffer{}\n\tif req.Body != nil {\n\t\tif _, err := bodyBuf.ReadFrom(req.Body); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to read request body: %v\", err)\n\t\t}\n\t}\n\n\tvar resp *http.Response\n\tvar err error\n\troundTripOp := func() error {\n\t\tif req.Body != nil {\n\t\t\treq.Body = ioutil.NopCloser(bytes.NewBufferString(bodyBuf.String()))\n\t\t}\n\t\tresp, err = t.Transport.RoundTrip(req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error while making the round trip: %s\", err)\n\t\t}\n\t\tif resp != nil {\n\t\t\tif resp.StatusCode >= 500 && resp.StatusCode <= 599 {\n\t\t\t\t\/\/ We can't close the resp.Body on success, so we must do it in each of the failure cases.\n\t\t\t\treturn fmt.Errorf(\"Got server error statuscode %d while making the HTTP %s request to %s\\nResponse: %s\", resp.StatusCode, req.Method, req.URL, readAndClose(resp.Body))\n\t\t\t} else if resp.StatusCode < 200 || resp.StatusCode > 299 {\n\t\t\t\t\/\/ We can't close the resp.Body on success, so we must do it in each of the failure cases.\n\t\t\t\t\/\/ Stop backing off if there are non server errors.\n\t\t\t\tbackOffClient.MaxElapsedTime = backoff.Stop\n\t\t\t\treturn fmt.Errorf(\"Got non server error statuscode %d while making the HTTP %s request to %s\\nResponse: %s\", resp.StatusCode, req.Method, req.URL, readAndClose(resp.Body))\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\tnotifyFunc := func(err error, wait time.Duration) {\n\t\tglog.Warningf(\"Got error: %s. Retrying HTTP request after sleeping for %s\", err, wait)\n\t}\n\n\tif err := backoff.RetryNotify(roundTripOp, backOffClient, notifyFunc); err != nil {\n\t\treturn nil, fmt.Errorf(\"HTTP request failed inspite of exponential backoff: %s\", err)\n\t}\n\treturn resp, nil\n}\n\n\/\/ readAndClose reads the content of a ReadCloser (e.g. http Response), and returns it as a string.\n\/\/ If the response was nil or there was a problem, it will return empty string. The reader,\n\/\/if non-null, will be closed by this function.\nfunc readAndClose(r io.ReadCloser) string {\n\tif r != nil {\n\t\tdefer util.Close(r)\n\t\tif b, err := ioutil.ReadAll(io.LimitReader(r, MAX_BYTES_IN_RESPONSE_BODY)); err != nil {\n\t\t\tglog.Warningf(\"There was a potential problem reading the response body: %s\", err)\n\t\t} else {\n\t\t\treturn fmt.Sprintf(\"%q\", string(b))\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ TODO(stephana): Remove 'r' from the argument list since it's not used. It would\n\/\/ be also useful if we could specify a return status explicitly.\n\n\/\/ ReportError formats an HTTP error response and also logs the detailed error message.\n\/\/ The message parameter is returned in the HTTP response. If it is not provided then\n\/\/ \"Unknown error\" will be returned instead.\nfunc ReportError(w http.ResponseWriter, r *http.Request, err error, message string) {\n\tglog.Errorln(message, err)\n\tif err != io.ErrClosedPipe {\n\t\thttpErrMsg := message\n\t\tif message == \"\" {\n\t\t\thttpErrMsg = \"Unknown error\"\n\t\t}\n\t\thttp.Error(w, httpErrMsg, 500)\n\t}\n}\n\n\/\/ responseProxy implements http.ResponseWriter and records the status codes.\ntype responseProxy struct {\n\thttp.ResponseWriter\n\twroteHeader bool\n}\n\nfunc (rp *responseProxy) WriteHeader(code int) {\n\tif !rp.wroteHeader {\n\t\tglog.Infof(\"Response Code: %d\", code)\n\t\tmetrics2.GetCounter(\"http.response\", map[string]string{\"statuscode\": strconv.Itoa(code)}).Inc(1)\n\t\trp.ResponseWriter.WriteHeader(code)\n\t\trp.wroteHeader = true\n\t}\n}\n\n\/\/ recordResponse returns a wrapped http.Handler that records the status codes of the\n\/\/ responses.\n\/\/\n\/\/ Note that if a handler doesn't explicitly set a response code and goes with\n\/\/ the default of 200 then this will never record anything.\nfunc recordResponse(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\th.ServeHTTP(&responseProxy{ResponseWriter: w}, r)\n\t})\n}\n\n\/\/ LoggingGzipRequestResponse records parts of the request and the response to\n\/\/ the logs and gzips responses when appropriate.\nfunc LoggingGzipRequestResponse(h http.Handler) http.Handler {\n\treturn autogzip.Handle(LoggingRequestResponse(h))\n}\n\n\/\/ LoggingRequestResponse records parts of the request and the response to the logs.\nfunc LoggingRequestResponse(h http.Handler) http.Handler {\n\t\/\/ Closure to capture the request.\n\tf := func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tconst size = 64 << 10\n\t\t\t\tbuf := make([]byte, size)\n\t\t\t\tbuf = buf[:runtime.Stack(buf, false)]\n\t\t\t\tglog.Errorf(\"panic serving %v: %v\\n%s\", r.URL.Path, err, buf)\n\t\t\t}\n\t\t}()\n\t\tdefer timer.New(fmt.Sprintf(\"Request: %s %s %#v Content Length: %d Latency:\", r.URL.Path, r.Method, r.URL, r.ContentLength)).Stop()\n\t\th.ServeHTTP(w, r)\n\t}\n\n\treturn recordResponse(http.HandlerFunc(f))\n}\n\n\/\/ MakeResourceHandler is an HTTP handler function designed for serving files.\nfunc MakeResourceHandler(resourcesDir string) func(http.ResponseWriter, *http.Request) {\n\tfileServer := http.FileServer(http.Dir(resourcesDir))\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Add(\"Cache-Control\", \"max-age=300\")\n\t\tfileServer.ServeHTTP(w, r)\n\t}\n}\n\n\/\/ CorsHandler is an HTTP handler function which adds the necessary header for CORS.\nfunc CorsHandler(h func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Add(\"Access-Control-Allow-Origin\", \"*\")\n\t\th(w, r)\n\t}\n}\n\n\/\/ PaginationParams is helper function to extract pagination parameters from a\n\/\/ URL query string. It assumes that 'offset' and 'size' are the query parameters\n\/\/ used for pagination. It parses the values and returns an error if they are\n\/\/ not integers. If the params are not set the defaults are proviced.\n\/\/ Further it ensures that size is never above max size.\nfunc PaginationParams(query url.Values, defaultOffset, defaultSize, maxSize int) (int, int, error) {\n\tsize, err := getPositiveInt(query, \"size\", defaultSize)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\toffset, err := getPositiveInt(query, \"offset\", defaultOffset)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\treturn offset, util.MinInt(size, maxSize), nil\n}\n\n\/\/ getPositiveInt parses the param in query and ensures it is >= 0 using\n\/\/ default value when necessary.\nfunc getPositiveInt(query url.Values, param string, defaultVal int) (int, error) {\n\tvar val int\n\tvar err error\n\tif valStr := query.Get(param); valStr == \"\" {\n\t\treturn defaultVal, nil\n\t} else {\n\t\tval, err = strconv.Atoi(valStr)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"Not a valid integer value.\")\n\t\t}\n\t}\n\tif val < 0 {\n\t\treturn defaultVal, nil\n\t}\n\treturn val, nil\n}\n<commit_msg>Log request URL before handling the request, to aid in debugging.<commit_after>package httputils\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"time\"\n\n\t\/\/ Below is a port of the exponential backoff implementation from\n\t\/\/ google-http-java-client.\n\t\"github.com\/cenkalti\/backoff\"\n\t\"github.com\/fiorix\/go-web\/autogzip\"\n\t\"github.com\/skia-dev\/glog\"\n\t\"go.skia.org\/infra\/go\/metrics2\"\n\t\"go.skia.org\/infra\/go\/timer\"\n\t\"go.skia.org\/infra\/go\/util\"\n)\n\nconst (\n\tDIAL_TIMEOUT = time.Minute\n\tREQUEST_TIMEOUT = 5 * time.Minute\n\n\t\/\/ Exponential backoff defaults.\n\tINITIAL_INTERVAL = 500 * time.Millisecond\n\tRANDOMIZATION_FACTOR = 0.5\n\tBACKOFF_MULTIPLIER = 1.5\n\tMAX_INTERVAL = 60 * time.Second\n\tMAX_ELAPSED_TIME = 5 * time.Minute\n\n\tMAX_BYTES_IN_RESPONSE_BODY = 10 * 1024 \/\/10 KB\n)\n\n\/\/ DialTimeout is a dialer that sets a timeout.\nfunc DialTimeout(network, addr string) (net.Conn, error) {\n\treturn net.DialTimeout(network, addr, DIAL_TIMEOUT)\n}\n\n\/\/ NewTimeoutClient creates a new http.Client with both a dial timeout and a\n\/\/ request timeout.\nfunc NewTimeoutClient() *http.Client {\n\treturn &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: DialTimeout,\n\t\t},\n\t\tTimeout: REQUEST_TIMEOUT,\n\t}\n}\n\ntype BackOffConfig struct {\n\tinitialInterval time.Duration\n\tmaxInterval time.Duration\n\tmaxElapsedTime time.Duration\n\trandomizationFactor float64\n\tbackOffMultiplier float64\n}\n\n\/\/ NewBackOffTransport creates a BackOffTransport with default values. Look at\n\/\/ NewConfiguredBackOffTransport for an example of how the values impact behavior.\nfunc NewBackOffTransport() http.RoundTripper {\n\tconfig := &BackOffConfig{\n\t\tinitialInterval: INITIAL_INTERVAL,\n\t\tmaxInterval: MAX_INTERVAL,\n\t\tmaxElapsedTime: MAX_ELAPSED_TIME,\n\t\trandomizationFactor: RANDOMIZATION_FACTOR,\n\t\tbackOffMultiplier: BACKOFF_MULTIPLIER,\n\t}\n\treturn NewConfiguredBackOffTransport(config)\n}\n\ntype BackOffTransport struct {\n\thttp.Transport\n\tbackOffConfig *BackOffConfig\n}\n\ntype ResponsePagination struct {\n\tOffset int `json:\"offset\"`\n\tSize int `json:\"size\"`\n\tTotal int `json:\"total\"`\n}\n\n\/\/ NewBackOffTransport creates a BackOffTransport with the specified config.\n\/\/\n\/\/ Example: The default retry_interval is .5 seconds, default randomization_factor\n\/\/ is 0.5, default multiplier is 1.5 and the default max_interval is 1 minute. For\n\/\/ 10 tries the sequence will be (values in seconds) and assuming we go over the\n\/\/ max_elapsed_time on the 10th try:\n\/\/\n\/\/ request# retry_interval randomized_interval\n\/\/ 1 0.5 [0.25, 0.75]\n\/\/ 2 0.75 [0.375, 1.125]\n\/\/ 3 1.125 [0.562, 1.687]\n\/\/ 4 1.687 [0.8435, 2.53]\n\/\/ 5 2.53 [1.265, 3.795]\n\/\/ 6 3.795 [1.897, 5.692]\n\/\/ 7 5.692 [2.846, 8.538]\n\/\/ 8 8.538 [4.269, 12.807]\n\/\/ 9 12.807 [6.403, 19.210]\n\/\/ 10 19.210 backoff.Stop\nfunc NewConfiguredBackOffTransport(config *BackOffConfig) http.RoundTripper {\n\treturn &BackOffTransport{\n\t\tTransport: http.Transport{Dial: DialTimeout},\n\t\tbackOffConfig: config,\n\t}\n}\n\n\/\/ RoundTrip implements the RoundTripper interface.\nfunc (t *BackOffTransport) RoundTrip(req *http.Request) (*http.Response, error) {\n\t\/\/ Initialize the exponential backoff client.\n\tbackOffClient := &backoff.ExponentialBackOff{\n\t\tInitialInterval: t.backOffConfig.initialInterval,\n\t\tRandomizationFactor: t.backOffConfig.randomizationFactor,\n\t\tMultiplier: t.backOffConfig.backOffMultiplier,\n\t\tMaxInterval: t.backOffConfig.maxInterval,\n\t\tMaxElapsedTime: t.backOffConfig.maxElapsedTime,\n\t\tClock: backoff.SystemClock,\n\t}\n\t\/\/ Make a copy of the request's Body so that we can reuse it if the request\n\t\/\/ needs to be backed off and retried.\n\tbodyBuf := bytes.Buffer{}\n\tif req.Body != nil {\n\t\tif _, err := bodyBuf.ReadFrom(req.Body); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to read request body: %v\", err)\n\t\t}\n\t}\n\n\tvar resp *http.Response\n\tvar err error\n\troundTripOp := func() error {\n\t\tif req.Body != nil {\n\t\t\treq.Body = ioutil.NopCloser(bytes.NewBufferString(bodyBuf.String()))\n\t\t}\n\t\tresp, err = t.Transport.RoundTrip(req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error while making the round trip: %s\", err)\n\t\t}\n\t\tif resp != nil {\n\t\t\tif resp.StatusCode >= 500 && resp.StatusCode <= 599 {\n\t\t\t\t\/\/ We can't close the resp.Body on success, so we must do it in each of the failure cases.\n\t\t\t\treturn fmt.Errorf(\"Got server error statuscode %d while making the HTTP %s request to %s\\nResponse: %s\", resp.StatusCode, req.Method, req.URL, readAndClose(resp.Body))\n\t\t\t} else if resp.StatusCode < 200 || resp.StatusCode > 299 {\n\t\t\t\t\/\/ We can't close the resp.Body on success, so we must do it in each of the failure cases.\n\t\t\t\t\/\/ Stop backing off if there are non server errors.\n\t\t\t\tbackOffClient.MaxElapsedTime = backoff.Stop\n\t\t\t\treturn fmt.Errorf(\"Got non server error statuscode %d while making the HTTP %s request to %s\\nResponse: %s\", resp.StatusCode, req.Method, req.URL, readAndClose(resp.Body))\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\tnotifyFunc := func(err error, wait time.Duration) {\n\t\tglog.Warningf(\"Got error: %s. Retrying HTTP request after sleeping for %s\", err, wait)\n\t}\n\n\tif err := backoff.RetryNotify(roundTripOp, backOffClient, notifyFunc); err != nil {\n\t\treturn nil, fmt.Errorf(\"HTTP request failed inspite of exponential backoff: %s\", err)\n\t}\n\treturn resp, nil\n}\n\n\/\/ readAndClose reads the content of a ReadCloser (e.g. http Response), and returns it as a string.\n\/\/ If the response was nil or there was a problem, it will return empty string. The reader,\n\/\/if non-null, will be closed by this function.\nfunc readAndClose(r io.ReadCloser) string {\n\tif r != nil {\n\t\tdefer util.Close(r)\n\t\tif b, err := ioutil.ReadAll(io.LimitReader(r, MAX_BYTES_IN_RESPONSE_BODY)); err != nil {\n\t\t\tglog.Warningf(\"There was a potential problem reading the response body: %s\", err)\n\t\t} else {\n\t\t\treturn fmt.Sprintf(\"%q\", string(b))\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ TODO(stephana): Remove 'r' from the argument list since it's not used. It would\n\/\/ be also useful if we could specify a return status explicitly.\n\n\/\/ ReportError formats an HTTP error response and also logs the detailed error message.\n\/\/ The message parameter is returned in the HTTP response. If it is not provided then\n\/\/ \"Unknown error\" will be returned instead.\nfunc ReportError(w http.ResponseWriter, r *http.Request, err error, message string) {\n\tglog.Errorln(message, err)\n\tif err != io.ErrClosedPipe {\n\t\thttpErrMsg := message\n\t\tif message == \"\" {\n\t\t\thttpErrMsg = \"Unknown error\"\n\t\t}\n\t\thttp.Error(w, httpErrMsg, 500)\n\t}\n}\n\n\/\/ responseProxy implements http.ResponseWriter and records the status codes.\ntype responseProxy struct {\n\thttp.ResponseWriter\n\twroteHeader bool\n}\n\nfunc (rp *responseProxy) WriteHeader(code int) {\n\tif !rp.wroteHeader {\n\t\tglog.Infof(\"Response Code: %d\", code)\n\t\tmetrics2.GetCounter(\"http.response\", map[string]string{\"statuscode\": strconv.Itoa(code)}).Inc(1)\n\t\trp.ResponseWriter.WriteHeader(code)\n\t\trp.wroteHeader = true\n\t}\n}\n\n\/\/ recordResponse returns a wrapped http.Handler that records the status codes of the\n\/\/ responses.\n\/\/\n\/\/ Note that if a handler doesn't explicitly set a response code and goes with\n\/\/ the default of 200 then this will never record anything.\nfunc recordResponse(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\th.ServeHTTP(&responseProxy{ResponseWriter: w}, r)\n\t})\n}\n\n\/\/ LoggingGzipRequestResponse records parts of the request and the response to\n\/\/ the logs and gzips responses when appropriate.\nfunc LoggingGzipRequestResponse(h http.Handler) http.Handler {\n\treturn autogzip.Handle(LoggingRequestResponse(h))\n}\n\n\/\/ LoggingRequestResponse records parts of the request and the response to the logs.\nfunc LoggingRequestResponse(h http.Handler) http.Handler {\n\t\/\/ Closure to capture the request.\n\tf := func(w http.ResponseWriter, r *http.Request) {\n\t\tglog.Infof(\"Incoming request: %s %s %#v \", r.URL.Path, r.Method, *(r.URL))\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tconst size = 64 << 10\n\t\t\t\tbuf := make([]byte, size)\n\t\t\t\tbuf = buf[:runtime.Stack(buf, false)]\n\t\t\t\tglog.Errorf(\"panic serving %v: %v\\n%s\", r.URL.Path, err, buf)\n\t\t\t}\n\t\t}()\n\t\tdefer timer.New(fmt.Sprintf(\"Request: %s Latency:\", r.URL.Path)).Stop()\n\t\th.ServeHTTP(w, r)\n\t}\n\n\treturn recordResponse(http.HandlerFunc(f))\n}\n\n\/\/ MakeResourceHandler is an HTTP handler function designed for serving files.\nfunc MakeResourceHandler(resourcesDir string) func(http.ResponseWriter, *http.Request) {\n\tfileServer := http.FileServer(http.Dir(resourcesDir))\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Add(\"Cache-Control\", \"max-age=300\")\n\t\tfileServer.ServeHTTP(w, r)\n\t}\n}\n\n\/\/ CorsHandler is an HTTP handler function which adds the necessary header for CORS.\nfunc CorsHandler(h func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Add(\"Access-Control-Allow-Origin\", \"*\")\n\t\th(w, r)\n\t}\n}\n\n\/\/ PaginationParams is helper function to extract pagination parameters from a\n\/\/ URL query string. It assumes that 'offset' and 'size' are the query parameters\n\/\/ used for pagination. It parses the values and returns an error if they are\n\/\/ not integers. If the params are not set the defaults are proviced.\n\/\/ Further it ensures that size is never above max size.\nfunc PaginationParams(query url.Values, defaultOffset, defaultSize, maxSize int) (int, int, error) {\n\tsize, err := getPositiveInt(query, \"size\", defaultSize)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\toffset, err := getPositiveInt(query, \"offset\", defaultOffset)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\treturn offset, util.MinInt(size, maxSize), nil\n}\n\n\/\/ getPositiveInt parses the param in query and ensures it is >= 0 using\n\/\/ default value when necessary.\nfunc getPositiveInt(query url.Values, param string, defaultVal int) (int, error) {\n\tvar val int\n\tvar err error\n\tif valStr := query.Get(param); valStr == \"\" {\n\t\treturn defaultVal, nil\n\t} else {\n\t\tval, err = strconv.Atoi(valStr)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"Not a valid integer value.\")\n\t\t}\n\t}\n\tif val < 0 {\n\t\treturn defaultVal, nil\n\t}\n\treturn val, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/geobe\/gostip\/go\/controller\"\n\t\"github.com\/geobe\/gostip\/go\/model\"\n\t\"net\/http\"\n\t\"log\"\n\t\"golang.org\/x\/crypto\/acme\/autocert\"\n\t\"crypto\/tls\"\n\t\"flag\"\n)\n\n\/\/ Server Ports, zu denen Ports 80 und 443\n\/\/ vom Internet Router (z.B. FritzBox) mit Port Forwarding weitergeleitet wird\nconst httpport = \":8070\"\nconst tlsport = \":8443\"\n\nfunc main() {\n\t\/\/ read command line parameters\n\taccount := flag.String(\"mail\", \"\", \"a mail account\")\n\tmailpw := flag.String(\"mailpw\", \"\", \"password of the mail account\")\n\tcfgfile := flag.String(\"cfgfile\", \"\", \"name of config file\")\n\tflag.Parse()\n\t\/\/ setup mailer info\n\tmodel.SetMailer(*account, *mailpw)\n\t\/\/ prepare database\n\tmodel.Setup(*cfgfile)\n\tdb := model.Db()\n\tdefer db.Close()\n\/\/\tmodel.ClearTestDb(db)\n\tmodel.InitProdDb(db)\n\n\t\/\/ mux verwaltet die Routen\n\tmux := controller.SetRouting()\n\n\t\/\/ die zugelassenen host namen\n\tallowedHosts := []string{\"dkfai.spdns.org\", \"geobe.spdns.org\"}\n\n\t\/\/ der Verwalter der LetsEncrypt Zertifikate\n\tcertManager := autocert.Manager{\n\t\tPrompt: autocert.AcceptTOS,\n\t\tHostPolicy: autocert.HostWhitelist(allowedHosts...), \/\/your domain here\n\t\tEmail: \t \"georg.beier@fh-zwickau.de\",\n\t\tCache: autocert.DirCache(\"certs\"), \/\/folder for storing certificates\n\t}\n\n\n\n\t\/\/ konfiguriere server\n\tserver := &http.Server{\n\t\tAddr: \"0.0.0.0\" + tlsport,\n\t\tTLSConfig: &tls.Config{\n\t\t\tGetCertificate: certManager.GetCertificate,\n\t\t},\n\t\tHandler: mux,\n\t}\n\n\t\/\/ switching redirect handler\n\thandlerSwitch := &controller.HandlerSwitch{\n\t\tMux: mux,\n\t\tRedirect: http.HandlerFunc(controller.RedirectHTTP),\n\t\tAllowedHosts: allowedHosts,\n\t}\n\n\t\/\/ konfiguriere redirect server\n\tredirectserver := &http.Server{\n\t\tAddr: \"0.0.0.0\" + httpport,\n\t\tHandler: handlerSwitch, \/\/http.HandlerFunc(RedirectHTTP),\n\t}\n\t\/\/ starte den redirect server auf HTTP\n\tgo redirectserver.ListenAndServe()\n\n\t\/\/ und starte den primären server auf HTTPS\n\tlog.Printf(\"server starting\\n\")\n\tserver.ListenAndServeTLS(\"\", \"\")\n}\n<commit_msg>changed devserv ports to 18070 and 18443<commit_after>package main\n\nimport (\n\t\"github.com\/geobe\/gostip\/go\/controller\"\n\t\"github.com\/geobe\/gostip\/go\/model\"\n\t\"net\/http\"\n\t\"log\"\n\t\"golang.org\/x\/crypto\/acme\/autocert\"\n\t\"crypto\/tls\"\n\t\"flag\"\n)\n\n\/\/ Server Ports, zu denen Ports 80 und 443\n\/\/ vom Internet Router (z.B. FritzBox) mit Port Forwarding weitergeleitet wird\nconst httpport = \":18070\"\nconst tlsport = \":18443\"\n\nfunc main() {\n\t\/\/ read command line parameters\n\taccount := flag.String(\"mail\", \"\", \"a mail account\")\n\tmailpw := flag.String(\"mailpw\", \"\", \"password of the mail account\")\n\tcfgfile := flag.String(\"cfgfile\", \"\", \"name of config file\")\n\tflag.Parse()\n\t\/\/ setup mailer info\n\tmodel.SetMailer(*account, *mailpw)\n\t\/\/ prepare database\n\tmodel.Setup(*cfgfile)\n\tdb := model.Db()\n\tdefer db.Close()\n\/\/\tmodel.ClearTestDb(db)\n\tmodel.InitProdDb(db)\n\n\t\/\/ mux verwaltet die Routen\n\tmux := controller.SetRouting()\n\n\t\/\/ die zugelassenen host namen\n\tallowedHosts := []string{\"dkfai.spdns.org\", \"geobe.spdns.org\"}\n\n\t\/\/ der Verwalter der LetsEncrypt Zertifikate\n\tcertManager := autocert.Manager{\n\t\tPrompt: autocert.AcceptTOS,\n\t\tHostPolicy: autocert.HostWhitelist(allowedHosts...), \/\/your domain here\n\t\tEmail: \t \"georg.beier@fh-zwickau.de\",\n\t\tCache: autocert.DirCache(\"certs\"), \/\/folder for storing certificates\n\t}\n\n\n\n\t\/\/ konfiguriere server\n\tserver := &http.Server{\n\t\tAddr: \"0.0.0.0\" + tlsport,\n\t\tTLSConfig: &tls.Config{\n\t\t\tGetCertificate: certManager.GetCertificate,\n\t\t},\n\t\tHandler: mux,\n\t}\n\n\t\/\/ switching redirect handler\n\thandlerSwitch := &controller.HandlerSwitch{\n\t\tMux: mux,\n\t\tRedirect: http.HandlerFunc(controller.RedirectHTTP),\n\t\tAllowedHosts: allowedHosts,\n\t}\n\n\t\/\/ konfiguriere redirect server\n\tredirectserver := &http.Server{\n\t\tAddr: \"0.0.0.0\" + httpport,\n\t\tHandler: handlerSwitch, \/\/http.HandlerFunc(RedirectHTTP),\n\t}\n\t\/\/ starte den redirect server auf HTTP\n\tgo redirectserver.ListenAndServe()\n\n\t\/\/ und starte den primären server auf HTTPS\n\tlog.Printf(\"server starting\\n\")\n\tserver.ListenAndServeTLS(\"\", \"\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/orktes\/orlang\/ast\"\n\t\"github.com\/orktes\/orlang\/linter\"\n\t\"github.com\/orktes\/orlang\/scanner\"\n\n\t\"github.com\/gopherjs\/gopherjs\/js\"\n\t\"github.com\/orktes\/orlang\/analyser\"\n\tjscodegen \"github.com\/orktes\/orlang\/codegen\/js\"\n\t\"github.com\/orktes\/orlang\/parser\"\n\t\"github.com\/orktes\/orlang\/types\"\n)\n\nfunc configureAnalyzer(analsr *analyser.Analyser) {\n\tanalsr.AddExternalFunc(\"print\", &types.SignatureType{\n\t\tArgumentNames: []string{\"str\"},\n\t\tArgumentTypes: []types.Type{&types.InterfaceType{\n\t\t\tName: \"buildin(stringer)\",\n\t\t\tFunctions: []struct {\n\t\t\t\tName string\n\t\t\t\tType *types.SignatureType\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tName: \"toString\",\n\t\t\t\t\tType: &types.SignatureType{\n\t\t\t\t\t\tArgumentNames: []string{},\n\t\t\t\t\t\tArgumentTypes: []types.Type{},\n\t\t\t\t\t\tReturnType: types.StringType,\n\t\t\t\t\t\tExtern: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t\tReturnType: types.VoidType,\n\t\tExtern: true,\n\t})\n}\n\nfunc main() {\n\n\tjs.Module.Get(\"exports\").Set(\"Lint\", func(input string) []linter.LintIssue {\n\t\terrors, err := linter.Lint(strings.NewReader(input), configureAnalyzer)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\treturn errors\n\t})\n\n\tjs.Module.Get(\"exports\").Set(\"AutoComplete\", func(input string, line int, column int) (items []map[string]interface{}) {\n\t\tscanner := scanner.NewScanner(strings.NewReader(input))\n\t\tpars := parser.NewParser(analyser.NewAutoCompleteScanner(scanner, []ast.Position{ast.Position{Line: line, Column: column}}))\n\n\t\tfile, err := pars.Parse()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tvar result []analyser.AutoCompleteInfo\n\t\talys, err := analyser.New(file)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tconfigureAnalyzer(alys)\n\n\t\talys.AutoCompleteInfoCallback = func(res []analyser.AutoCompleteInfo) {\n\t\t\tresult = res\n\t\t}\n\n\t\talys.Analyse()\n\n\titemLoop:\n\t\tfor _, item := range result {\n\t\t\tswitch item.Label {\n\t\t\tcase \"+\", \"-\", \"*\", \"\/\":\n\t\t\t\tcontinue itemLoop\n\t\t\t}\n\n\t\t\tinsertText := item.Label\n\t\t\tswitch t := item.Type.(type) {\n\t\t\tcase *types.SignatureType:\n\t\t\t\tinsertText = fmt.Sprintf(\"%s(%s)\", insertText, strings.Join(t.ArgumentNames, \", \"))\n\t\t\tcase *types.StructType:\n\t\t\t\tif item.Kind == \"Class\" {\n\t\t\t\t\tinsertText = fmt.Sprintf(\"%s{}\", insertText)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\titems = append(items, map[string]interface{}{\n\t\t\t\t\"label\": item.Label,\n\t\t\t\t\"insertText\": insertText,\n\t\t\t\t\"kind\": item.Kind,\n\t\t\t\t\"documentation\": item.Type.GetName(),\n\t\t\t\t\"detail\": item.Type.GetName(),\n\t\t\t\t\"type\": fmt.Sprintf(\"%T\", item.Type),\n\t\t\t})\n\t\t}\n\n\t\treturn\n\t})\n\n\tjs.Module.Get(\"exports\").Set(\"Tokenize\", func(input string) []scanner.Token {\n\t\tscan := scanner.NewScanner(strings.NewReader(input))\n\n\t\ttokens := []scanner.Token{}\n\t\tfor {\n\t\t\ttoken := scan.Scan()\n\t\t\ttokens = append(tokens, token)\n\t\t\tif token.Type == scanner.TokenTypeEOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\treturn tokens\n\t})\n\n\tjs.Module.Get(\"exports\").Set(\"Compile\", func(input string) string {\n\t\tfile, err := parser.Parse(strings.NewReader(input))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tanalyser, err := analyser.New(file)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tconfigureAnalyzer(analyser)\n\n\t\tvar analyErr error\n\t\tanalyser.Error = func(node ast.Node, msg string, fatal bool) {\n\t\t\tif fatal {\n\t\t\t\terrStr := fmt.Sprintf(\n\t\t\t\t\t\"%d:%d %s\",\n\t\t\t\t\tnode.StartPos().Line+1,\n\t\t\t\t\tnode.StartPos().Column+1,\n\t\t\t\t\tmsg,\n\t\t\t\t)\n\t\t\t\tanalyErr = errors.New(errStr)\n\t\t\t}\n\t\t}\n\n\t\tinfo, err := analyser.Analyse()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tif analyErr != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tcode := jscodegen.New(info).Generate(file)\n\n\t\treturn string(code)\n\t})\n}\n<commit_msg>Add macros to autocomplete<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/orktes\/orlang\/ast\"\n\t\"github.com\/orktes\/orlang\/linter\"\n\t\"github.com\/orktes\/orlang\/scanner\"\n\n\t\"github.com\/gopherjs\/gopherjs\/js\"\n\t\"github.com\/orktes\/orlang\/analyser\"\n\tjscodegen \"github.com\/orktes\/orlang\/codegen\/js\"\n\t\"github.com\/orktes\/orlang\/parser\"\n\t\"github.com\/orktes\/orlang\/types\"\n)\n\nfunc configureAnalyzer(analsr *analyser.Analyser) {\n\tanalsr.AddExternalFunc(\"print\", &types.SignatureType{\n\t\tArgumentNames: []string{\"str\"},\n\t\tArgumentTypes: []types.Type{&types.InterfaceType{\n\t\t\tName: \"buildin(stringer)\",\n\t\t\tFunctions: []struct {\n\t\t\t\tName string\n\t\t\t\tType *types.SignatureType\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tName: \"toString\",\n\t\t\t\t\tType: &types.SignatureType{\n\t\t\t\t\t\tArgumentNames: []string{},\n\t\t\t\t\t\tArgumentTypes: []types.Type{},\n\t\t\t\t\t\tReturnType: types.StringType,\n\t\t\t\t\t\tExtern: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t\tReturnType: types.VoidType,\n\t\tExtern: true,\n\t})\n}\n\nfunc main() {\n\n\tjs.Module.Get(\"exports\").Set(\"Lint\", func(input string) []linter.LintIssue {\n\t\terrors, err := linter.Lint(strings.NewReader(input), configureAnalyzer)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\treturn errors\n\t})\n\n\tjs.Module.Get(\"exports\").Set(\"AutoComplete\", func(input string, line int, column int) (items []map[string]interface{}) {\n\t\tscanner := scanner.NewScanner(strings.NewReader(input))\n\t\tpars := parser.NewParser(analyser.NewAutoCompleteScanner(scanner, []ast.Position{ast.Position{Line: line, Column: column}}))\n\n\t\tfile, err := pars.Parse()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tvar result []analyser.AutoCompleteInfo\n\t\talys, err := analyser.New(file)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tconfigureAnalyzer(alys)\n\n\t\talys.AutoCompleteInfoCallback = func(res []analyser.AutoCompleteInfo) {\n\t\t\tresult = res\n\t\t}\n\n\t\talys.Analyse()\n\n\titemLoop:\n\t\tfor _, item := range result {\n\t\t\tswitch item.Label {\n\t\t\tcase \"+\", \"-\", \"*\", \"\/\":\n\t\t\t\tcontinue itemLoop\n\t\t\t}\n\n\t\t\tinsertText := item.Label\n\t\t\tswitch t := item.Type.(type) {\n\t\t\tcase *types.SignatureType:\n\t\t\t\tinsertText = fmt.Sprintf(\"%s(%s)\", insertText, strings.Join(t.ArgumentNames, \", \"))\n\t\t\tcase *types.StructType:\n\t\t\t\tif item.Kind == \"Class\" {\n\t\t\t\t\tinsertText = fmt.Sprintf(\"%s{}\", insertText)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\titems = append(items, map[string]interface{}{\n\t\t\t\t\"label\": item.Label,\n\t\t\t\t\"insertText\": insertText,\n\t\t\t\t\"kind\": item.Kind,\n\t\t\t\t\"documentation\": item.Type.GetName(),\n\t\t\t\t\"detail\": item.Type.GetName(),\n\t\t\t\t\"type\": fmt.Sprintf(\"%T\", item.Type),\n\t\t\t})\n\t\t}\n\n\t\tfor macroName, _ := range file.Macros {\n\t\t\titems = append(items, map[string]interface{}{\n\t\t\t\t\"label\": macroName + \"!\",\n\t\t\t\t\"insertText\": macroName + \"!\",\n\t\t\t\t\"kind\": \"Reference\",\n\t\t\t\t\"documentation\": \"macro \" + macroName,\n\t\t\t\t\"detail\": \"macro \" + macroName,\n\t\t\t\t\"type\": \"macro\",\n\t\t\t})\n\t\t}\n\n\t\treturn\n\t})\n\n\tjs.Module.Get(\"exports\").Set(\"Tokenize\", func(input string) []scanner.Token {\n\t\tscan := scanner.NewScanner(strings.NewReader(input))\n\n\t\ttokens := []scanner.Token{}\n\t\tfor {\n\t\t\ttoken := scan.Scan()\n\t\t\ttokens = append(tokens, token)\n\t\t\tif token.Type == scanner.TokenTypeEOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\treturn tokens\n\t})\n\n\tjs.Module.Get(\"exports\").Set(\"Compile\", func(input string) string {\n\t\tfile, err := parser.Parse(strings.NewReader(input))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tanalyser, err := analyser.New(file)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tconfigureAnalyzer(analyser)\n\n\t\tvar analyErr error\n\t\tanalyser.Error = func(node ast.Node, msg string, fatal bool) {\n\t\t\tif fatal {\n\t\t\t\terrStr := fmt.Sprintf(\n\t\t\t\t\t\"%d:%d %s\",\n\t\t\t\t\tnode.StartPos().Line+1,\n\t\t\t\t\tnode.StartPos().Column+1,\n\t\t\t\t\tmsg,\n\t\t\t\t)\n\t\t\t\tanalyErr = errors.New(errStr)\n\t\t\t}\n\t\t}\n\n\t\tinfo, err := analyser.Analyse()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tif analyErr != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tcode := jscodegen.New(info).Generate(file)\n\n\t\treturn string(code)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Cayley Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage graph\n\nimport \"github.com\/cayleygraph\/cayley\/quad\"\n\n\/\/ Transaction stores a bunch of Deltas to apply automatically on the database.\ntype Transaction struct {\n\t\/\/ Deltas stores the deltas in the right order\n\tDeltas []Delta\n\t\/\/ deltas stores the deltas in a map to avoid duplications\n\tdeltas map[Delta]struct{}\n}\n\n\/\/ NewTransaction initialize a new transaction.\nfunc NewTransaction() *Transaction {\n\treturn &Transaction{Deltas: make([]Delta, 0, 10), deltas: make(map[Delta]struct{}, 10)}\n}\n\n\/\/ AddQuad adds a new quad to the transaction if it is not already present in it.\n\/\/ If there is a 'remove' delta for that quad, it will remove that delta from\n\/\/ the transaction instead of actually adding the quad.\nfunc (t *Transaction) AddQuad(q quad.Quad) {\n\tad, rd := createDeltas(q)\n\n\tif _, adExists := t.deltas[ad]; !adExists {\n\t\tif _, rdExists := t.deltas[rd]; rdExists {\n\t\t\tt.deleteDelta(rd)\n\t\t} else {\n\t\t\tt.addDelta(ad)\n\t\t}\n\t}\n}\n\n\/\/ RemoveQuad adds a quad to remove to the transaction.\n\/\/ The quad will be removed from the database if it is not present in the\n\/\/ transaction, otherwise it simply remove it from the transaction.\nfunc (t *Transaction) RemoveQuad(q quad.Quad) {\n\tad, rd := createDeltas(q)\n\n\tif _, adExists := t.deltas[ad]; adExists {\n\t\tt.deleteDelta(ad)\n\t} else {\n\t\tif _, rdExists := t.deltas[rd]; !rdExists {\n\t\t\tt.addDelta(rd)\n\t\t}\n\t}\n}\n\nfunc createDeltas(q quad.Quad) (ad, rd Delta) {\n\tad = Delta{\n\t\tQuad: q,\n\t\tAction: Add,\n\t}\n\trd = Delta{\n\t\tQuad: q,\n\t\tAction: Delete,\n\t}\n\treturn\n}\n\nfunc (t *Transaction) addDelta(d Delta) {\n\tt.Deltas = append(t.Deltas, d)\n\tt.deltas[d] = struct{}{}\n}\n\nfunc (t *Transaction) deleteDelta(d Delta) {\n\tdelete(t.deltas, d)\n\n\tfor i, id := range t.Deltas {\n\t\tif id == d {\n\t\t\tt.Deltas = append(t.Deltas[:i], t.Deltas[i+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n}\n<commit_msg>Update transaction.go<commit_after>\/\/ Copyright 2015 The Cayley Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage graph\n\nimport \"github.com\/cayleygraph\/cayley\/quad\"\n\n\/\/ Transaction stores a bunch of Deltas to apply together in an atomic step on the database.\ntype Transaction struct {\n\t\/\/ Deltas stores the deltas in the right order\n\tDeltas []Delta\n\t\/\/ deltas stores the deltas in a map to avoid duplications\n\tdeltas map[Delta]struct{}\n}\n\n\/\/ NewTransaction initialize a new transaction.\nfunc NewTransaction() *Transaction {\n\treturn &Transaction{Deltas: make([]Delta, 0, 10), deltas: make(map[Delta]struct{}, 10)}\n}\n\n\/\/ AddQuad adds a new quad to the transaction if it is not already present in it.\n\/\/ If there is a 'remove' delta for that quad, it will remove that delta from\n\/\/ the transaction instead of actually adding the quad.\nfunc (t *Transaction) AddQuad(q quad.Quad) {\n\tad, rd := createDeltas(q)\n\n\tif _, adExists := t.deltas[ad]; !adExists {\n\t\tif _, rdExists := t.deltas[rd]; rdExists {\n\t\t\tt.deleteDelta(rd)\n\t\t} else {\n\t\t\tt.addDelta(ad)\n\t\t}\n\t}\n}\n\n\/\/ RemoveQuad adds a quad to remove to the transaction.\n\/\/ The quad will be removed from the database if it is not present in the\n\/\/ transaction, otherwise it simply remove it from the transaction.\nfunc (t *Transaction) RemoveQuad(q quad.Quad) {\n\tad, rd := createDeltas(q)\n\n\tif _, adExists := t.deltas[ad]; adExists {\n\t\tt.deleteDelta(ad)\n\t} else {\n\t\tif _, rdExists := t.deltas[rd]; !rdExists {\n\t\t\tt.addDelta(rd)\n\t\t}\n\t}\n}\n\nfunc createDeltas(q quad.Quad) (ad, rd Delta) {\n\tad = Delta{\n\t\tQuad: q,\n\t\tAction: Add,\n\t}\n\trd = Delta{\n\t\tQuad: q,\n\t\tAction: Delete,\n\t}\n\treturn\n}\n\nfunc (t *Transaction) addDelta(d Delta) {\n\tt.Deltas = append(t.Deltas, d)\n\tt.deltas[d] = struct{}{}\n}\n\nfunc (t *Transaction) deleteDelta(d Delta) {\n\tdelete(t.deltas, d)\n\n\tfor i, id := range t.Deltas {\n\t\tif id == d {\n\t\t\tt.Deltas = append(t.Deltas[:i], t.Deltas[i+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package vxlan\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"syscall\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/vishvananda\/netlink\"\n\n\t\"github.com\/flynn\/flynn\/flannel\/pkg\/ip\"\n)\n\ntype vxlanDeviceAttrs struct {\n\tvni uint32\n\tname string\n\tvtepIndex int\n\tvtepAddr net.IP\n\tvtepPort int\n}\n\ntype vxlanDevice struct {\n\tlink *netlink.Vxlan\n}\n\nfunc newVXLANDevice(devAttrs *vxlanDeviceAttrs) (*vxlanDevice, error) {\n\tlink := &netlink.Vxlan{\n\t\tLinkAttrs: netlink.LinkAttrs{\n\t\t\tName: devAttrs.name,\n\t\t},\n\t\tVxlanId: int(devAttrs.vni),\n\t\tVtepDevIndex: devAttrs.vtepIndex,\n\t\tSrcAddr: devAttrs.vtepAddr,\n\t\tPort: devAttrs.vtepPort,\n\t\tLearning: false,\n\t}\n\n\tlink, err := ensureLink(link)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &vxlanDevice{\n\t\tlink: link,\n\t}, nil\n}\n\nfunc ensureLink(vxlan *netlink.Vxlan) (*netlink.Vxlan, error) {\n\terr := netlink.LinkAdd(vxlan)\n\tif err == syscall.EEXIST {\n\t\t\/\/ it's ok if the device already exists as long as config is similar\n\t\texisting, err := netlink.LinkByName(vxlan.Name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tincompat := vxlanLinksIncompat(vxlan, existing)\n\t\tif incompat == \"\" {\n\t\t\treturn existing.(*netlink.Vxlan), nil\n\t\t}\n\n\t\t\/\/ delete existing\n\t\tlog.Warningf(\"%q already exists with incompatible configuration: %v; recreating device\", vxlan.Name, incompat)\n\t\tif err = netlink.LinkDel(existing); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to delete interface: %v\", err)\n\t\t}\n\n\t\t\/\/ create new\n\t\tif err = netlink.LinkAdd(vxlan); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create vxlan interface: %v\", err)\n\t\t}\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\tifindex := vxlan.Index\n\tlink, err := netlink.LinkByIndex(vxlan.Index)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't locate created vxlan device with index %v\", ifindex)\n\t}\n\tvar ok bool\n\tif vxlan, ok = link.(*netlink.Vxlan); !ok {\n\t\treturn nil, fmt.Errorf(\"created vxlan device with index %v is not vxlan\", ifindex)\n\t}\n\n\treturn vxlan, nil\n}\n\nfunc (dev *vxlanDevice) Configure(ipn ip.IP4Net) error {\n\tif err := setAddr4(dev.link, ipn.ToIPNet()); err != nil {\n\t\treturn err\n\t}\n\n\tif err := netlink.LinkSetUp(dev.link); err != nil {\n\t\treturn fmt.Errorf(\"failed to set interface %s to UP state: %s\", dev.link.Attrs().Name, err)\n\t}\n\n\t\/\/ explicitly add a route since there might be a route for a subnet already\n\t\/\/ installed by Docker and then it won't get auto added\n\troute := netlink.Route{\n\t\tLinkIndex: dev.link.Attrs().Index,\n\t\tScope: netlink.SCOPE_UNIVERSE,\n\t\tDst: ipn.Network().ToIPNet(),\n\t}\n\tif err := netlink.RouteAdd(&route); err != nil && err != syscall.EEXIST {\n\t\treturn fmt.Errorf(\"failed to add route (%s -> %s): %v\", ipn.Network().String(), dev.link.Attrs().Name, err)\n\t}\n\n\treturn nil\n}\n\nfunc (dev *vxlanDevice) Destroy() {\n\tnetlink.LinkDel(dev.link)\n}\n\nfunc (dev *vxlanDevice) MACAddr() net.HardwareAddr {\n\treturn dev.link.HardwareAddr\n}\n\nfunc (dev *vxlanDevice) MTU() int {\n\treturn dev.link.MTU\n}\n\ntype neigh struct {\n\tMAC net.HardwareAddr\n\tIP ip.IP4\n}\n\nfunc (dev *vxlanDevice) AddL2(n neigh) error {\n\tlog.Infof(\"calling NeighAdd: %v, %v\", n.IP, n.MAC)\n\treturn netlink.NeighAdd(&netlink.Neigh{\n\t\tLinkIndex: dev.link.Index,\n\t\tState: netlink.NUD_PERMANENT,\n\t\tFamily: syscall.AF_BRIDGE,\n\t\tFlags: netlink.NTF_SELF,\n\t\tIP: n.IP.ToIP(),\n\t\tHardwareAddr: n.MAC,\n\t})\n}\n\nfunc (dev *vxlanDevice) DelL2(n neigh) error {\n\tlog.Infof(\"calling NeighDel: %v, %v\", n.IP, n.MAC)\n\treturn netlink.NeighDel(&netlink.Neigh{\n\t\tLinkIndex: dev.link.Index,\n\t\tFamily: syscall.AF_BRIDGE,\n\t\tFlags: netlink.NTF_SELF,\n\t\tIP: n.IP.ToIP(),\n\t\tHardwareAddr: n.MAC,\n\t})\n}\n\nfunc (dev *vxlanDevice) AddL3(n neigh) error {\n\tlog.Infof(\"calling NeighSet: %v, %v\", n.IP, n.MAC)\n\treturn netlink.NeighSet(&netlink.Neigh{\n\t\tLinkIndex: dev.link.Index,\n\t\tState: netlink.NUD_PERMANENT,\n\t\tType: syscall.RTN_UNICAST,\n\t\tIP: n.IP.ToIP(),\n\t\tHardwareAddr: n.MAC,\n\t})\n}\n\nfunc (dev *vxlanDevice) DelL3(n neigh) error {\n\tlog.Infof(\"calling NeighDel: %v, %v\", n.IP, n.MAC)\n\treturn netlink.NeighDel(&netlink.Neigh{\n\t\tLinkIndex: dev.link.Index,\n\t\tState: netlink.NUD_PERMANENT,\n\t\tType: syscall.RTN_UNICAST,\n\t\tIP: n.IP.ToIP(),\n\t\tHardwareAddr: n.MAC,\n\t})\n}\n\nfunc (dev *vxlanDevice) AddRoute(subnet ip.IP4Net) error {\n\troute := &netlink.Route{\n\t\tScope: netlink.SCOPE_UNIVERSE,\n\t\tDst: subnet.ToIPNet(),\n\t\tGw: subnet.IP.ToIP(),\n\t}\n\n\tlog.Infof(\"calling RouteAdd: %s\", subnet)\n\treturn netlink.RouteAdd(route)\n}\n\nfunc (dev *vxlanDevice) DelRoute(subnet ip.IP4Net) error {\n\troute := &netlink.Route{\n\t\tScope: netlink.SCOPE_UNIVERSE,\n\t\tDst: subnet.ToIPNet(),\n\t\tGw: subnet.IP.ToIP(),\n\t}\n\tlog.Infof(\"calling RouteDel: %s\", subnet)\n\treturn netlink.RouteDel(route)\n}\n\nfunc vxlanLinksIncompat(l1, l2 netlink.Link) string {\n\tif l1.Type() != l2.Type() {\n\t\treturn fmt.Sprintf(\"link type: %v vs %v\", l1.Type(), l2.Type())\n\t}\n\n\tv1 := l1.(*netlink.Vxlan)\n\tv2 := l2.(*netlink.Vxlan)\n\n\tif v1.VxlanId != v2.VxlanId {\n\t\treturn fmt.Sprintf(\"vni: %v vs %v\", v1.VxlanId, v2.VxlanId)\n\t}\n\n\tif v1.VtepDevIndex > 0 && v2.VtepDevIndex > 0 && v1.VtepDevIndex != v2.VtepDevIndex {\n\t\treturn fmt.Sprintf(\"vtep (external) interface: %v vs %v\", v1.VtepDevIndex, v2.VtepDevIndex)\n\t}\n\n\tif len(v1.SrcAddr) > 0 && len(v2.SrcAddr) > 0 && !v1.SrcAddr.Equal(v2.SrcAddr) {\n\t\treturn fmt.Sprintf(\"vtep (external) IP: %v vs %v\", v1.SrcAddr, v2.SrcAddr)\n\t}\n\n\tif len(v1.Group) > 0 && len(v2.Group) > 0 && !v1.Group.Equal(v2.Group) {\n\t\treturn fmt.Sprintf(\"group address: %v vs %v\", v1.Group, v2.Group)\n\t}\n\n\tif v1.L2miss != v2.L2miss {\n\t\treturn fmt.Sprintf(\"l2miss: %v vs %v\", v1.L2miss, v2.L2miss)\n\t}\n\n\tif v1.Port > 0 && v2.Port > 0 && v1.Port != v2.Port {\n\t\treturn fmt.Sprintf(\"port: %v vs %v\", v1.Port, v2.Port)\n\t}\n\n\treturn \"\"\n}\n\n\/\/ sets IP4 addr on link removing any existing ones first\nfunc setAddr4(link *netlink.Vxlan, ipn *net.IPNet) error {\n\taddrs, err := netlink.AddrList(link, syscall.AF_INET)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\taddr := netlink.Addr{ipn, \"\"}\n\texisting := false\n\tfor _, old := range addrs {\n\t\tif old.IPNet.String() == addr.IPNet.String() {\n\t\t\texisting = true\n\t\t\tcontinue\n\t\t}\n\t\tif err = netlink.AddrDel(link, &old); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to delete IPv4 addr %s from %s\", old.String(), link.Attrs().Name)\n\t\t}\n\t}\n\n\tif !existing {\n\t\tif err = netlink.AddrAdd(link, &addr); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to add IP address %s to %s: %s\", ipn.String(), link.Attrs().Name, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>flannel: Use field names in netlink addr<commit_after>package vxlan\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"syscall\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/vishvananda\/netlink\"\n\n\t\"github.com\/flynn\/flynn\/flannel\/pkg\/ip\"\n)\n\ntype vxlanDeviceAttrs struct {\n\tvni uint32\n\tname string\n\tvtepIndex int\n\tvtepAddr net.IP\n\tvtepPort int\n}\n\ntype vxlanDevice struct {\n\tlink *netlink.Vxlan\n}\n\nfunc newVXLANDevice(devAttrs *vxlanDeviceAttrs) (*vxlanDevice, error) {\n\tlink := &netlink.Vxlan{\n\t\tLinkAttrs: netlink.LinkAttrs{\n\t\t\tName: devAttrs.name,\n\t\t},\n\t\tVxlanId: int(devAttrs.vni),\n\t\tVtepDevIndex: devAttrs.vtepIndex,\n\t\tSrcAddr: devAttrs.vtepAddr,\n\t\tPort: devAttrs.vtepPort,\n\t\tLearning: false,\n\t}\n\n\tlink, err := ensureLink(link)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &vxlanDevice{\n\t\tlink: link,\n\t}, nil\n}\n\nfunc ensureLink(vxlan *netlink.Vxlan) (*netlink.Vxlan, error) {\n\terr := netlink.LinkAdd(vxlan)\n\tif err == syscall.EEXIST {\n\t\t\/\/ it's ok if the device already exists as long as config is similar\n\t\texisting, err := netlink.LinkByName(vxlan.Name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tincompat := vxlanLinksIncompat(vxlan, existing)\n\t\tif incompat == \"\" {\n\t\t\treturn existing.(*netlink.Vxlan), nil\n\t\t}\n\n\t\t\/\/ delete existing\n\t\tlog.Warningf(\"%q already exists with incompatible configuration: %v; recreating device\", vxlan.Name, incompat)\n\t\tif err = netlink.LinkDel(existing); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to delete interface: %v\", err)\n\t\t}\n\n\t\t\/\/ create new\n\t\tif err = netlink.LinkAdd(vxlan); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create vxlan interface: %v\", err)\n\t\t}\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\tifindex := vxlan.Index\n\tlink, err := netlink.LinkByIndex(vxlan.Index)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't locate created vxlan device with index %v\", ifindex)\n\t}\n\tvar ok bool\n\tif vxlan, ok = link.(*netlink.Vxlan); !ok {\n\t\treturn nil, fmt.Errorf(\"created vxlan device with index %v is not vxlan\", ifindex)\n\t}\n\n\treturn vxlan, nil\n}\n\nfunc (dev *vxlanDevice) Configure(ipn ip.IP4Net) error {\n\tif err := setAddr4(dev.link, ipn.ToIPNet()); err != nil {\n\t\treturn err\n\t}\n\n\tif err := netlink.LinkSetUp(dev.link); err != nil {\n\t\treturn fmt.Errorf(\"failed to set interface %s to UP state: %s\", dev.link.Attrs().Name, err)\n\t}\n\n\t\/\/ explicitly add a route since there might be a route for a subnet already\n\t\/\/ installed by Docker and then it won't get auto added\n\troute := netlink.Route{\n\t\tLinkIndex: dev.link.Attrs().Index,\n\t\tScope: netlink.SCOPE_UNIVERSE,\n\t\tDst: ipn.Network().ToIPNet(),\n\t}\n\tif err := netlink.RouteAdd(&route); err != nil && err != syscall.EEXIST {\n\t\treturn fmt.Errorf(\"failed to add route (%s -> %s): %v\", ipn.Network().String(), dev.link.Attrs().Name, err)\n\t}\n\n\treturn nil\n}\n\nfunc (dev *vxlanDevice) Destroy() {\n\tnetlink.LinkDel(dev.link)\n}\n\nfunc (dev *vxlanDevice) MACAddr() net.HardwareAddr {\n\treturn dev.link.HardwareAddr\n}\n\nfunc (dev *vxlanDevice) MTU() int {\n\treturn dev.link.MTU\n}\n\ntype neigh struct {\n\tMAC net.HardwareAddr\n\tIP ip.IP4\n}\n\nfunc (dev *vxlanDevice) AddL2(n neigh) error {\n\tlog.Infof(\"calling NeighAdd: %v, %v\", n.IP, n.MAC)\n\treturn netlink.NeighAdd(&netlink.Neigh{\n\t\tLinkIndex: dev.link.Index,\n\t\tState: netlink.NUD_PERMANENT,\n\t\tFamily: syscall.AF_BRIDGE,\n\t\tFlags: netlink.NTF_SELF,\n\t\tIP: n.IP.ToIP(),\n\t\tHardwareAddr: n.MAC,\n\t})\n}\n\nfunc (dev *vxlanDevice) DelL2(n neigh) error {\n\tlog.Infof(\"calling NeighDel: %v, %v\", n.IP, n.MAC)\n\treturn netlink.NeighDel(&netlink.Neigh{\n\t\tLinkIndex: dev.link.Index,\n\t\tFamily: syscall.AF_BRIDGE,\n\t\tFlags: netlink.NTF_SELF,\n\t\tIP: n.IP.ToIP(),\n\t\tHardwareAddr: n.MAC,\n\t})\n}\n\nfunc (dev *vxlanDevice) AddL3(n neigh) error {\n\tlog.Infof(\"calling NeighSet: %v, %v\", n.IP, n.MAC)\n\treturn netlink.NeighSet(&netlink.Neigh{\n\t\tLinkIndex: dev.link.Index,\n\t\tState: netlink.NUD_PERMANENT,\n\t\tType: syscall.RTN_UNICAST,\n\t\tIP: n.IP.ToIP(),\n\t\tHardwareAddr: n.MAC,\n\t})\n}\n\nfunc (dev *vxlanDevice) DelL3(n neigh) error {\n\tlog.Infof(\"calling NeighDel: %v, %v\", n.IP, n.MAC)\n\treturn netlink.NeighDel(&netlink.Neigh{\n\t\tLinkIndex: dev.link.Index,\n\t\tState: netlink.NUD_PERMANENT,\n\t\tType: syscall.RTN_UNICAST,\n\t\tIP: n.IP.ToIP(),\n\t\tHardwareAddr: n.MAC,\n\t})\n}\n\nfunc (dev *vxlanDevice) AddRoute(subnet ip.IP4Net) error {\n\troute := &netlink.Route{\n\t\tScope: netlink.SCOPE_UNIVERSE,\n\t\tDst: subnet.ToIPNet(),\n\t\tGw: subnet.IP.ToIP(),\n\t}\n\n\tlog.Infof(\"calling RouteAdd: %s\", subnet)\n\treturn netlink.RouteAdd(route)\n}\n\nfunc (dev *vxlanDevice) DelRoute(subnet ip.IP4Net) error {\n\troute := &netlink.Route{\n\t\tScope: netlink.SCOPE_UNIVERSE,\n\t\tDst: subnet.ToIPNet(),\n\t\tGw: subnet.IP.ToIP(),\n\t}\n\tlog.Infof(\"calling RouteDel: %s\", subnet)\n\treturn netlink.RouteDel(route)\n}\n\nfunc vxlanLinksIncompat(l1, l2 netlink.Link) string {\n\tif l1.Type() != l2.Type() {\n\t\treturn fmt.Sprintf(\"link type: %v vs %v\", l1.Type(), l2.Type())\n\t}\n\n\tv1 := l1.(*netlink.Vxlan)\n\tv2 := l2.(*netlink.Vxlan)\n\n\tif v1.VxlanId != v2.VxlanId {\n\t\treturn fmt.Sprintf(\"vni: %v vs %v\", v1.VxlanId, v2.VxlanId)\n\t}\n\n\tif v1.VtepDevIndex > 0 && v2.VtepDevIndex > 0 && v1.VtepDevIndex != v2.VtepDevIndex {\n\t\treturn fmt.Sprintf(\"vtep (external) interface: %v vs %v\", v1.VtepDevIndex, v2.VtepDevIndex)\n\t}\n\n\tif len(v1.SrcAddr) > 0 && len(v2.SrcAddr) > 0 && !v1.SrcAddr.Equal(v2.SrcAddr) {\n\t\treturn fmt.Sprintf(\"vtep (external) IP: %v vs %v\", v1.SrcAddr, v2.SrcAddr)\n\t}\n\n\tif len(v1.Group) > 0 && len(v2.Group) > 0 && !v1.Group.Equal(v2.Group) {\n\t\treturn fmt.Sprintf(\"group address: %v vs %v\", v1.Group, v2.Group)\n\t}\n\n\tif v1.L2miss != v2.L2miss {\n\t\treturn fmt.Sprintf(\"l2miss: %v vs %v\", v1.L2miss, v2.L2miss)\n\t}\n\n\tif v1.Port > 0 && v2.Port > 0 && v1.Port != v2.Port {\n\t\treturn fmt.Sprintf(\"port: %v vs %v\", v1.Port, v2.Port)\n\t}\n\n\treturn \"\"\n}\n\n\/\/ sets IP4 addr on link removing any existing ones first\nfunc setAddr4(link *netlink.Vxlan, ipn *net.IPNet) error {\n\taddrs, err := netlink.AddrList(link, syscall.AF_INET)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\taddr := netlink.Addr{IPNet: ipn}\n\texisting := false\n\tfor _, old := range addrs {\n\t\tif old.IPNet.String() == addr.IPNet.String() {\n\t\t\texisting = true\n\t\t\tcontinue\n\t\t}\n\t\tif err = netlink.AddrDel(link, &old); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to delete IPv4 addr %s from %s\", old.String(), link.Attrs().Name)\n\t\t}\n\t}\n\n\tif !existing {\n\t\tif err = netlink.AddrAdd(link, &addr); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to add IP address %s to %s: %s\", ipn.String(), link.Attrs().Name, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Stefan Nyman.\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage hellosign\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/ajg\/form\"\n)\n\nconst (\n\tbaseURL string = \"https:\/\/api.hellosign.com\/v3\"\n\tcontentType = \"content-type\"\n\txRatelimitLimit = \"x-Ratelimit-Limit\"\n\txRatelimitLimitRemaining = \"x-Ratelimit-Limit-Remaining\"\n\txRateLimitReset = \"x-Ratelimit-Reset\"\n)\n\n\/\/ ListInfo struct with properties for all list epts.\ntype ListInfo struct {\n\tPage uint64 `json:\"page\"`\n\tNumPages uint64 `json:\"num_pages\"`\n\tNumResults uint64 `json:\"num_results\"`\n\tPageSize uint64 `json:\"page_size\"`\n}\n\n\/\/ ListParms struct with options for performing list operations.\ntype ListParms struct {\n\tAccountID *string `form:\"account_id,omitempty\"`\n\tPage *uint64 `form:\"page,omitempty\"`\n\tPageSize *uint64 `form:\"page_size,omitempty\"`\n\tQuery *string `form:\"query,omitempty\"`\n}\n\n\/\/ FormField a field where some kind of action needs to be taken.\ntype FormField struct {\n\tAPIID string `json:\"api_id\"`\n\tName string `json:\"name\"`\n\tType string `json:\"type\"`\n\tX uint64 `json:\"x\"`\n\tY uint64 `json:\"y\"`\n\tWidth uint64 `json:\"width\"`\n\tHeight uint64 `json:\"height\"`\n\tRequired bool `json:\"required\"`\n}\n\n\/\/ APIErr an error returned from the Hellosign API.\ntype APIErr struct {\n\tCode int \/\/ HTTP response code\n\tMessage string\n\tName string\n}\n\n\/\/ APIWarn a list of warnings returned from the HelloSign API.\ntype APIWarn struct {\n\tCode int \/\/ HTTP response code\n\tWarnings []struct {\n\t\tMessage string\n\t\tName string\n\t}\n}\n\nfunc (a APIErr) Error() string {\n\treturn fmt.Sprintf(\"%s: %s\", a.Name, a.Message)\n}\n\nfunc (a APIWarn) Error() string {\n\toutMsg := \"\"\n\tfor _, w := range a.Warnings {\n\t\toutMsg += fmt.Sprintf(\"%s: %s\\n\", w.Name, w.Message)\n\t}\n\treturn outMsg\n}\n\ntype hellosign struct {\n\tapiKey string\n\tRateLimit uint64 \/\/ Number of requests allowed per hour\n\tRateLimitRemaining uint64 \/\/ Remaining number of requests this hour\n\tRateLimitReset uint64 \/\/ When the limit will be reset. In seconds from epoch\n\tLastStatusCode int\n}\n\n\/\/ Initializes a new Hellosign API client.\nfunc newHellosign(apiKey string) *hellosign {\n\treturn &hellosign{\n\t\tapiKey: apiKey,\n\t}\n}\n\nfunc (c *hellosign) perform(req *http.Request) (*http.Response, error) {\n\treq.Header.Add(\"accept\", \"application\/json\")\n\treq.SetBasicAuth(c.apiKey, \"\")\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.LastStatusCode = resp.StatusCode\n\tif resp.StatusCode >= 400 {\n\t\treturn nil, c.parseResponseError(resp)\n\t}\n\tfor _, hk := range []string{xRatelimitLimit, xRatelimitLimitRemaining, xRateLimitReset} {\n\t\thv := resp.Header.Get(hk)\n\t\tif hv == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\thvui, pErr := strconv.ParseUint(hv, 10, 64)\n\t\tif pErr != nil {\n\t\t\tcontinue\n\t\t}\n\t\tswitch hk {\n\t\tcase xRatelimitLimit:\n\t\t\tc.RateLimit = hvui\n\t\tcase xRatelimitLimitRemaining:\n\t\t\tc.RateLimitRemaining = hvui\n\t\tcase xRateLimitReset:\n\t\t\tc.RateLimitReset = hvui\n\t\t}\n\t}\n\treturn resp, err\n}\n\nfunc (c *hellosign) parseResponseError(resp *http.Response) error {\n\te := &struct {\n\t\tErr struct {\n\t\t\tMsg *string `json:\"error_msg\"`\n\t\t\tName *string `json:\"error_name\"`\n\t\t} `json:\"error\"`\n\t}{}\n\tw := &struct {\n\t\tWarnings []struct {\n\t\t\tMsg *string `json:\"warning_msg\"`\n\t\t\tName *string `json:\"warning_name\"`\n\t\t} `json:\"warnings\"`\n\t}{}\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = json.Unmarshal(b, e)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif e.Err.Name != nil {\n\t\treturn APIErr{Code: resp.StatusCode, Message: *e.Err.Msg, Name: *e.Err.Name}\n\t}\n\terr = json.Unmarshal(b, w)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(w.Warnings) == 0 {\n\t\treturn errors.New(\"Could not parse response error or warning\")\n\t}\n\tretErr := APIWarn{}\n\twarns := []struct {\n\t\tName string\n\t\tMessage string\n\t}{}\n\tfor _, w := range w.Warnings {\n\t\twarns = append(warns, struct {\n\t\t\tName string\n\t\t\tMessage string\n\t\t}{\n\t\t\tName: *w.Name,\n\t\t\tMessage: *w.Msg,\n\t\t})\n\t}\n\treturn retErr\n}\n\nfunc (c *hellosign) parseResponse(resp *http.Response, dst interface{}) error {\n\tif resp.StatusCode >= 200 && resp.StatusCode < 300 {\n\t\td := json.NewDecoder(resp.Body)\n\t\td.UseNumber()\n\t\treturn d.Decode(dst)\n\t}\n\treturn errors.New(\"Status code invalid\")\n}\n\nfunc (c *hellosign) postForm(ept string, o interface{}) (*http.Response, error) {\n\tv := \"\"\n\tif o != nil {\n\t\tencoded, err := form.EncodeToString(o)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tv = encoded\n\t}\n\treq, err := http.NewRequest(http.MethodPost, c.getEptURL(ept), strings.NewReader(v))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(contentType, \"application\/x-www-form-urlencoded\")\n\treturn c.perform(req)\n}\n\nfunc (c *hellosign) postFormAndParse(ept string, inp, dst interface{}) (err error) {\n\tresp, err := c.postForm(ept, inp)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { err = resp.Body.Close() }()\n\treturn c.parseResponse(resp, dst)\n}\n\nfunc (c *hellosign) postEmptyExpect(ept string, expected int) (ok bool, err error) {\n\tresp, err := c.postForm(ept, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer func() { err = resp.Body.Close() }()\n\tif resp.StatusCode != expected {\n\t\treturn false, errors.New(resp.Status)\n\t}\n\treturn true, nil\n}\n\nfunc (c *hellosign) delete(ept string) (*http.Response, error) {\n\treq, err := http.NewRequest(http.MethodDelete, c.getEptURL(ept), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.perform(req)\n}\n\n\/\/ GetEptURL returns the full HelloSign api url for a given endpoint.\nfunc GetEptURL(ept string) string {\n\treturn fmt.Sprintf(\"%s\/%s\", baseURL, ept)\n}\n\nfunc (c *hellosign) getEptURL(ept string) string {\n\treturn GetEptURL(ept)\n}\n\nfunc (c *hellosign) get(ept string, params *string) (*http.Response, error) {\n\turl := c.getEptURL(ept)\n\tif params != nil && *params == \"\" {\n\t\turl = fmt.Sprintf(\"%s?%s\", url, *params)\n\t}\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.perform(req)\n\treturn resp, err\n}\n\nfunc (c *hellosign) getAndParse(ept string, params *string, dst interface{}) (err error) {\n\tresp, err := c.get(ept, params)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { err = resp.Body.Close() }()\n\treturn c.parseResponse(resp, dst)\n}\n\nfunc (c *hellosign) getFiles(ept, fileType string, getURL bool) (body []byte, fileURL *FileURL, err error) {\n\tif fileType != \"\" && fileType != \"pdf\" && fileType != \"zip\" {\n\t\treturn []byte{}, nil, errors.New(\"Invalid file type specified, pdf or zip\")\n\t}\n\tparms, err := form.EncodeToString(&struct {\n\t\tFileType string `form:\"file_type,omitempty\"`\n\t\tGetURL bool `form:\"get_url,omitempty\"`\n\t}{\n\t\tFileType: fileType,\n\t\tGetURL: getURL,\n\t})\n\tif err != nil {\n\t\treturn []byte{}, nil, err\n\t}\n\tresp, err := c.get(ept, &parms)\n\tif err != nil {\n\t\treturn []byte{}, nil, err\n\t}\n\tdefer func() { err = resp.Body.Close() }()\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn []byte{}, nil, errors.New(resp.Status)\n\t}\n\tif getURL {\n\t\tmsg := &FileURL{}\n\t\tif respErr := c.parseResponse(resp, msg); respErr != nil {\n\t\t\treturn []byte{}, nil, respErr\n\t\t}\n\t\treturn []byte{}, msg, nil\n\t}\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn []byte{}, nil, err\n\t}\n\treturn b, nil, nil\n}\n\nfunc (c *hellosign) list(ept string, parms ListParms, out interface{}) error {\n\tparamString, err := form.EncodeToString(parms)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := c.getAndParse(ept, ¶mString, out); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<commit_msg>change list parms into non-pointers, fix logic error in get<commit_after>\/\/ Copyright 2016 Stefan Nyman.\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage hellosign\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/ajg\/form\"\n)\n\nconst (\n\tbaseURL string = \"https:\/\/api.hellosign.com\/v3\"\n\tcontentType = \"content-type\"\n\txRatelimitLimit = \"x-Ratelimit-Limit\"\n\txRatelimitLimitRemaining = \"x-Ratelimit-Limit-Remaining\"\n\txRateLimitReset = \"x-Ratelimit-Reset\"\n)\n\n\/\/ ListInfo struct with properties for all list epts.\ntype ListInfo struct {\n\tPage uint64 `json:\"page\"`\n\tNumPages uint64 `json:\"num_pages\"`\n\tNumResults uint64 `json:\"num_results\"`\n\tPageSize uint64 `json:\"page_size\"`\n}\n\n\/\/ ListParms struct with options for performing list operations.\ntype ListParms struct {\n\tAccountID string `form:\"account_id,omitempty\"`\n\tPage uint64 `form:\"page,omitempty\"`\n\tPageSize uint64 `form:\"page_size,omitempty\"`\n\tQuery string `form:\"query,omitempty\"`\n}\n\n\/\/ FormField a field where some kind of action needs to be taken.\ntype FormField struct {\n\tAPIID string `json:\"api_id\"`\n\tName string `json:\"name\"`\n\tType string `json:\"type\"`\n\tX uint64 `json:\"x\"`\n\tY uint64 `json:\"y\"`\n\tWidth uint64 `json:\"width\"`\n\tHeight uint64 `json:\"height\"`\n\tRequired bool `json:\"required\"`\n}\n\n\/\/ APIErr an error returned from the Hellosign API.\ntype APIErr struct {\n\tCode int \/\/ HTTP response code\n\tMessage string\n\tName string\n}\n\n\/\/ APIWarn a list of warnings returned from the HelloSign API.\ntype APIWarn struct {\n\tCode int \/\/ HTTP response code\n\tWarnings []struct {\n\t\tMessage string\n\t\tName string\n\t}\n}\n\nfunc (a APIErr) Error() string {\n\treturn fmt.Sprintf(\"%s: %s\", a.Name, a.Message)\n}\n\nfunc (a APIWarn) Error() string {\n\toutMsg := \"\"\n\tfor _, w := range a.Warnings {\n\t\toutMsg += fmt.Sprintf(\"%s: %s\\n\", w.Name, w.Message)\n\t}\n\treturn outMsg\n}\n\ntype hellosign struct {\n\tapiKey string\n\tRateLimit uint64 \/\/ Number of requests allowed per hour\n\tRateLimitRemaining uint64 \/\/ Remaining number of requests this hour\n\tRateLimitReset uint64 \/\/ When the limit will be reset. In seconds from epoch\n\tLastStatusCode int\n}\n\n\/\/ Initializes a new Hellosign API client.\nfunc newHellosign(apiKey string) *hellosign {\n\treturn &hellosign{\n\t\tapiKey: apiKey,\n\t}\n}\n\nfunc (c *hellosign) perform(req *http.Request) (*http.Response, error) {\n\treq.Header.Add(\"accept\", \"application\/json\")\n\treq.SetBasicAuth(c.apiKey, \"\")\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.LastStatusCode = resp.StatusCode\n\tif resp.StatusCode >= 400 {\n\t\treturn nil, c.parseResponseError(resp)\n\t}\n\tfor _, hk := range []string{xRatelimitLimit, xRatelimitLimitRemaining, xRateLimitReset} {\n\t\thv := resp.Header.Get(hk)\n\t\tif hv == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\thvui, pErr := strconv.ParseUint(hv, 10, 64)\n\t\tif pErr != nil {\n\t\t\tcontinue\n\t\t}\n\t\tswitch hk {\n\t\tcase xRatelimitLimit:\n\t\t\tc.RateLimit = hvui\n\t\tcase xRatelimitLimitRemaining:\n\t\t\tc.RateLimitRemaining = hvui\n\t\tcase xRateLimitReset:\n\t\t\tc.RateLimitReset = hvui\n\t\t}\n\t}\n\treturn resp, err\n}\n\nfunc (c *hellosign) parseResponseError(resp *http.Response) error {\n\te := &struct {\n\t\tErr struct {\n\t\t\tMsg *string `json:\"error_msg\"`\n\t\t\tName *string `json:\"error_name\"`\n\t\t} `json:\"error\"`\n\t}{}\n\tw := &struct {\n\t\tWarnings []struct {\n\t\t\tMsg *string `json:\"warning_msg\"`\n\t\t\tName *string `json:\"warning_name\"`\n\t\t} `json:\"warnings\"`\n\t}{}\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = json.Unmarshal(b, e)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif e.Err.Name != nil {\n\t\treturn APIErr{Code: resp.StatusCode, Message: *e.Err.Msg, Name: *e.Err.Name}\n\t}\n\terr = json.Unmarshal(b, w)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(w.Warnings) == 0 {\n\t\treturn errors.New(\"Could not parse response error or warning\")\n\t}\n\tretErr := APIWarn{}\n\twarns := []struct {\n\t\tName string\n\t\tMessage string\n\t}{}\n\tfor _, w := range w.Warnings {\n\t\twarns = append(warns, struct {\n\t\t\tName string\n\t\t\tMessage string\n\t\t}{\n\t\t\tName: *w.Name,\n\t\t\tMessage: *w.Msg,\n\t\t})\n\t}\n\treturn retErr\n}\n\nfunc (c *hellosign) parseResponse(resp *http.Response, dst interface{}) error {\n\tif resp.StatusCode >= 200 && resp.StatusCode < 300 {\n\t\td := json.NewDecoder(resp.Body)\n\t\td.UseNumber()\n\t\treturn d.Decode(dst)\n\t}\n\treturn errors.New(\"Status code invalid\")\n}\n\nfunc (c *hellosign) postForm(ept string, o interface{}) (*http.Response, error) {\n\tv := \"\"\n\tif o != nil {\n\t\tencoded, err := form.EncodeToString(o)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tv = encoded\n\t}\n\treq, err := http.NewRequest(http.MethodPost, c.getEptURL(ept), strings.NewReader(v))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(contentType, \"application\/x-www-form-urlencoded\")\n\treturn c.perform(req)\n}\n\nfunc (c *hellosign) postFormAndParse(ept string, inp, dst interface{}) (err error) {\n\tresp, err := c.postForm(ept, inp)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { err = resp.Body.Close() }()\n\treturn c.parseResponse(resp, dst)\n}\n\nfunc (c *hellosign) postEmptyExpect(ept string, expected int) (ok bool, err error) {\n\tresp, err := c.postForm(ept, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer func() { err = resp.Body.Close() }()\n\tif resp.StatusCode != expected {\n\t\treturn false, errors.New(resp.Status)\n\t}\n\treturn true, nil\n}\n\nfunc (c *hellosign) delete(ept string) (*http.Response, error) {\n\treq, err := http.NewRequest(http.MethodDelete, c.getEptURL(ept), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.perform(req)\n}\n\n\/\/ GetEptURL returns the full HelloSign api url for a given endpoint.\nfunc GetEptURL(ept string) string {\n\treturn fmt.Sprintf(\"%s\/%s\", baseURL, ept)\n}\n\nfunc (c *hellosign) getEptURL(ept string) string {\n\treturn GetEptURL(ept)\n}\n\nfunc (c *hellosign) get(ept string, params *string) (*http.Response, error) {\n\turl := c.getEptURL(ept)\n\tif params != nil && *params != \"\" {\n\t\turl = fmt.Sprintf(\"%s?%s\", url, *params)\n\t}\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.perform(req)\n\treturn resp, err\n}\n\nfunc (c *hellosign) getAndParse(ept string, params *string, dst interface{}) (err error) {\n\tresp, err := c.get(ept, params)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { err = resp.Body.Close() }()\n\treturn c.parseResponse(resp, dst)\n}\n\nfunc (c *hellosign) getFiles(ept, fileType string, getURL bool) (body []byte, fileURL *FileURL, err error) {\n\tif fileType != \"\" && fileType != \"pdf\" && fileType != \"zip\" {\n\t\treturn []byte{}, nil, errors.New(\"Invalid file type specified, pdf or zip\")\n\t}\n\tparms, err := form.EncodeToString(&struct {\n\t\tFileType string `form:\"file_type,omitempty\"`\n\t\tGetURL bool `form:\"get_url,omitempty\"`\n\t}{\n\t\tFileType: fileType,\n\t\tGetURL: getURL,\n\t})\n\tif err != nil {\n\t\treturn []byte{}, nil, err\n\t}\n\tresp, err := c.get(ept, &parms)\n\tif err != nil {\n\t\treturn []byte{}, nil, err\n\t}\n\tdefer func() { err = resp.Body.Close() }()\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn []byte{}, nil, errors.New(resp.Status)\n\t}\n\tif getURL {\n\t\tmsg := &FileURL{}\n\t\tif respErr := c.parseResponse(resp, msg); respErr != nil {\n\t\t\treturn []byte{}, nil, respErr\n\t\t}\n\t\treturn []byte{}, msg, nil\n\t}\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn []byte{}, nil, err\n\t}\n\treturn b, nil, nil\n}\n\nfunc (c *hellosign) list(ept string, parms ListParms, out interface{}) error {\n\tparamString, err := form.EncodeToString(parms)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := c.getAndParse(ept, ¶mString, out); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package rpcdriver\n\nimport (\n\t\"encoding\/gob\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"runtime\/debug\"\n\n\t\"github.com\/rancher\/machine\/libmachine\/drivers\"\n\t\"github.com\/rancher\/machine\/libmachine\/log\"\n\t\"github.com\/rancher\/machine\/libmachine\/mcnflag\"\n\t\"github.com\/rancher\/machine\/libmachine\/state\"\n\t\"github.com\/rancher\/machine\/libmachine\/version\"\n\t\"github.com\/urfave\/cli\"\n)\n\ntype Stacker interface {\n\tStack() []byte\n}\n\ntype StandardStack struct{}\n\nfunc (ss *StandardStack) Stack() []byte {\n\treturn debug.Stack()\n}\n\nvar (\n\tstdStacker Stacker = &StandardStack{}\n)\n\nfunc init() {\n\tgob.Register(new(RPCFlags))\n\tgob.Register(new(mcnflag.IntFlag))\n\tgob.Register(new(mcnflag.StringFlag))\n\tgob.Register(new(mcnflag.StringSliceFlag))\n\tgob.Register(new(mcnflag.BoolFlag))\n\tgob.Register(new(cli.StringSlice))\n}\n\ntype RPCFlags struct {\n\tValues map[string]interface{}\n}\n\nfunc (r RPCFlags) Get(key string) interface{} {\n\tval, ok := r.Values[key]\n\tif !ok {\n\t\tlog.Warnf(\"Trying to access option %s which does not exist\", key)\n\t\tlog.Warn(\"THIS ***WILL*** CAUSE UNEXPECTED BEHAVIOR\")\n\t}\n\treturn val\n}\n\nfunc (r RPCFlags) String(key string) string {\n\tval, ok := r.Get(key).(string)\n\tif !ok {\n\t\tlog.Warnf(\"Type assertion did not go smoothly to string for key %s\", key)\n\t}\n\treturn val\n}\n\nfunc (r RPCFlags) StringSlice(key string) []string {\n\tval, ok := r.Get(key).([]string)\n\tif !ok {\n\t\tlog.Warnf(\"Type assertion did not go smoothly to string slice for key %s\", key)\n\t}\n\treturn val\n}\n\nfunc (r RPCFlags) Int(key string) int {\n\tval, ok := r.Get(key).(int)\n\tif !ok {\n\t\tlog.Warnf(\"Type assertion did not go smoothly to int for key %s\", key)\n\t}\n\treturn val\n}\n\nfunc (r RPCFlags) Bool(key string) bool {\n\tval, ok := r.Get(key).(bool)\n\tif !ok {\n\t\tlog.Warnf(\"Type assertion did not go smoothly to bool for key %s\", key)\n\t}\n\treturn val\n}\n\ntype RPCServerDriver struct {\n\tActualDriver drivers.Driver\n\tCloseCh chan bool\n\tHeartbeatCh chan bool\n}\n\nfunc NewRPCServerDriver(d drivers.Driver) *RPCServerDriver {\n\treturn &RPCServerDriver{\n\t\tActualDriver: d,\n\t\tCloseCh: make(chan bool),\n\t\tHeartbeatCh: make(chan bool),\n\t}\n}\n\nfunc (r *RPCServerDriver) Close(_, _ *struct{}) error {\n\tr.CloseCh <- true\n\treturn nil\n}\n\nfunc (r *RPCServerDriver) GetVersion(_ *struct{}, reply *int) error {\n\t*reply = version.APIVersion\n\treturn nil\n}\n\nfunc (r *RPCServerDriver) GetConfigRaw(_ *struct{}, reply *[]byte) error {\n\tdriverData, err := json.Marshal(r.ActualDriver)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*reply = driverData\n\n\treturn nil\n}\n\nfunc (r *RPCServerDriver) GetCreateFlags(_ *struct{}, reply *[]mcnflag.Flag) error {\n\t*reply = r.ActualDriver.GetCreateFlags()\n\treturn nil\n}\n\nfunc (r *RPCServerDriver) SetConfigRaw(data []byte, _ *struct{}) error {\n\treturn json.Unmarshal(data, &r.ActualDriver)\n}\n\nfunc trapPanic(err *error) {\n\tif r := recover(); r != nil {\n\t\t*err = fmt.Errorf(\"Panic in the driver: %s\\n%s\", r.(error), stdStacker.Stack())\n\t}\n}\n\nfunc (r *RPCServerDriver) Create(_, _ *struct{}) (err error) {\n\t\/\/ In an ideal world, plugins wouldn't ever panic. However, panics\n\t\/\/ have been known to happen and cause issues. Therefore, we recover\n\t\/\/ and do not crash the RPC server completely in the case of a panic\n\t\/\/ during create.\n\tdefer trapPanic(&err)\n\n\terr = r.ActualDriver.Create()\n\n\treturn err\n}\n\nfunc (r *RPCServerDriver) DriverName(_ *struct{}, reply *string) error {\n\t*reply = r.ActualDriver.DriverName()\n\treturn nil\n}\n\nfunc (r *RPCServerDriver) GetIP(_ *struct{}, reply *string) error {\n\tip, err := r.ActualDriver.GetIP()\n\t*reply = ip\n\treturn err\n}\n\nfunc (r *RPCServerDriver) GetMachineName(_ *struct{}, reply *string) error {\n\t*reply = r.ActualDriver.GetMachineName()\n\treturn nil\n}\n\nfunc (r *RPCServerDriver) GetSSHHostname(_ *struct{}, reply *string) error {\n\thostname, err := r.ActualDriver.GetSSHHostname()\n\t*reply = hostname\n\treturn err\n}\n\nfunc (r *RPCServerDriver) GetSSHKeyPath(_ *struct{}, reply *string) error {\n\t*reply = r.ActualDriver.GetSSHKeyPath()\n\treturn nil\n}\n\n\/\/ GetSSHPort returns port for use with ssh\nfunc (r *RPCServerDriver) GetSSHPort(_ *struct{}, reply *int) error {\n\tport, err := r.ActualDriver.GetSSHPort()\n\t*reply = port\n\treturn err\n}\n\nfunc (r *RPCServerDriver) GetSSHUsername(_ *struct{}, reply *string) error {\n\t*reply = r.ActualDriver.GetSSHUsername()\n\treturn nil\n}\n\nfunc (r *RPCServerDriver) GetURL(_ *struct{}, reply *string) error {\n\tinfo, err := r.ActualDriver.GetURL()\n\t*reply = info\n\treturn err\n}\n\nfunc (r *RPCServerDriver) GetState(_ *struct{}, reply *state.State) error {\n\ts, err := r.ActualDriver.GetState()\n\t*reply = s\n\treturn err\n}\n\nfunc (r *RPCServerDriver) Kill(_ *struct{}, _ *struct{}) error {\n\treturn r.ActualDriver.Kill()\n}\n\nfunc (r *RPCServerDriver) PreCreateCheck(_ *struct{}, _ *struct{}) error {\n\treturn r.ActualDriver.PreCreateCheck()\n}\n\nfunc (r *RPCServerDriver) Remove(_ *struct{}, _ *struct{}) error {\n\treturn r.ActualDriver.Remove()\n}\n\nfunc (r *RPCServerDriver) Restart(_ *struct{}, _ *struct{}) error {\n\treturn r.ActualDriver.Restart()\n}\n\nfunc (r *RPCServerDriver) SetConfigFromFlags(flags *drivers.DriverOptions, _ *struct{}) error {\n\treturn r.ActualDriver.SetConfigFromFlags(*flags)\n}\n\nfunc (r *RPCServerDriver) Start(_ *struct{}, _ *struct{}) error {\n\treturn r.ActualDriver.Start()\n}\n\nfunc (r *RPCServerDriver) Stop(_ *struct{}, _ *struct{}) error {\n\treturn r.ActualDriver.Stop()\n}\n\nfunc (r *RPCServerDriver) Heartbeat(_ *struct{}, _ *struct{}) error {\n\tr.HeartbeatCh <- true\n\treturn nil\n}\n<commit_msg>Revert \"New version of urfave cli embeds StringSlice so type must be registered\"<commit_after>package rpcdriver\n\nimport (\n\t\"encoding\/gob\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"runtime\/debug\"\n\n\t\"github.com\/rancher\/machine\/libmachine\/drivers\"\n\t\"github.com\/rancher\/machine\/libmachine\/log\"\n\t\"github.com\/rancher\/machine\/libmachine\/mcnflag\"\n\t\"github.com\/rancher\/machine\/libmachine\/state\"\n\t\"github.com\/rancher\/machine\/libmachine\/version\"\n)\n\ntype Stacker interface {\n\tStack() []byte\n}\n\ntype StandardStack struct{}\n\nfunc (ss *StandardStack) Stack() []byte {\n\treturn debug.Stack()\n}\n\nvar (\n\tstdStacker Stacker = &StandardStack{}\n)\n\nfunc init() {\n\tgob.Register(new(RPCFlags))\n\tgob.Register(new(mcnflag.IntFlag))\n\tgob.Register(new(mcnflag.StringFlag))\n\tgob.Register(new(mcnflag.StringSliceFlag))\n\tgob.Register(new(mcnflag.BoolFlag))\n}\n\ntype RPCFlags struct {\n\tValues map[string]interface{}\n}\n\nfunc (r RPCFlags) Get(key string) interface{} {\n\tval, ok := r.Values[key]\n\tif !ok {\n\t\tlog.Warnf(\"Trying to access option %s which does not exist\", key)\n\t\tlog.Warn(\"THIS ***WILL*** CAUSE UNEXPECTED BEHAVIOR\")\n\t}\n\treturn val\n}\n\nfunc (r RPCFlags) String(key string) string {\n\tval, ok := r.Get(key).(string)\n\tif !ok {\n\t\tlog.Warnf(\"Type assertion did not go smoothly to string for key %s\", key)\n\t}\n\treturn val\n}\n\nfunc (r RPCFlags) StringSlice(key string) []string {\n\tval, ok := r.Get(key).([]string)\n\tif !ok {\n\t\tlog.Warnf(\"Type assertion did not go smoothly to string slice for key %s\", key)\n\t}\n\treturn val\n}\n\nfunc (r RPCFlags) Int(key string) int {\n\tval, ok := r.Get(key).(int)\n\tif !ok {\n\t\tlog.Warnf(\"Type assertion did not go smoothly to int for key %s\", key)\n\t}\n\treturn val\n}\n\nfunc (r RPCFlags) Bool(key string) bool {\n\tval, ok := r.Get(key).(bool)\n\tif !ok {\n\t\tlog.Warnf(\"Type assertion did not go smoothly to bool for key %s\", key)\n\t}\n\treturn val\n}\n\ntype RPCServerDriver struct {\n\tActualDriver drivers.Driver\n\tCloseCh chan bool\n\tHeartbeatCh chan bool\n}\n\nfunc NewRPCServerDriver(d drivers.Driver) *RPCServerDriver {\n\treturn &RPCServerDriver{\n\t\tActualDriver: d,\n\t\tCloseCh: make(chan bool),\n\t\tHeartbeatCh: make(chan bool),\n\t}\n}\n\nfunc (r *RPCServerDriver) Close(_, _ *struct{}) error {\n\tr.CloseCh <- true\n\treturn nil\n}\n\nfunc (r *RPCServerDriver) GetVersion(_ *struct{}, reply *int) error {\n\t*reply = version.APIVersion\n\treturn nil\n}\n\nfunc (r *RPCServerDriver) GetConfigRaw(_ *struct{}, reply *[]byte) error {\n\tdriverData, err := json.Marshal(r.ActualDriver)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*reply = driverData\n\n\treturn nil\n}\n\nfunc (r *RPCServerDriver) GetCreateFlags(_ *struct{}, reply *[]mcnflag.Flag) error {\n\t*reply = r.ActualDriver.GetCreateFlags()\n\treturn nil\n}\n\nfunc (r *RPCServerDriver) SetConfigRaw(data []byte, _ *struct{}) error {\n\treturn json.Unmarshal(data, &r.ActualDriver)\n}\n\nfunc trapPanic(err *error) {\n\tif r := recover(); r != nil {\n\t\t*err = fmt.Errorf(\"Panic in the driver: %s\\n%s\", r.(error), stdStacker.Stack())\n\t}\n}\n\nfunc (r *RPCServerDriver) Create(_, _ *struct{}) (err error) {\n\t\/\/ In an ideal world, plugins wouldn't ever panic. However, panics\n\t\/\/ have been known to happen and cause issues. Therefore, we recover\n\t\/\/ and do not crash the RPC server completely in the case of a panic\n\t\/\/ during create.\n\tdefer trapPanic(&err)\n\n\terr = r.ActualDriver.Create()\n\n\treturn err\n}\n\nfunc (r *RPCServerDriver) DriverName(_ *struct{}, reply *string) error {\n\t*reply = r.ActualDriver.DriverName()\n\treturn nil\n}\n\nfunc (r *RPCServerDriver) GetIP(_ *struct{}, reply *string) error {\n\tip, err := r.ActualDriver.GetIP()\n\t*reply = ip\n\treturn err\n}\n\nfunc (r *RPCServerDriver) GetMachineName(_ *struct{}, reply *string) error {\n\t*reply = r.ActualDriver.GetMachineName()\n\treturn nil\n}\n\nfunc (r *RPCServerDriver) GetSSHHostname(_ *struct{}, reply *string) error {\n\thostname, err := r.ActualDriver.GetSSHHostname()\n\t*reply = hostname\n\treturn err\n}\n\nfunc (r *RPCServerDriver) GetSSHKeyPath(_ *struct{}, reply *string) error {\n\t*reply = r.ActualDriver.GetSSHKeyPath()\n\treturn nil\n}\n\n\/\/ GetSSHPort returns port for use with ssh\nfunc (r *RPCServerDriver) GetSSHPort(_ *struct{}, reply *int) error {\n\tport, err := r.ActualDriver.GetSSHPort()\n\t*reply = port\n\treturn err\n}\n\nfunc (r *RPCServerDriver) GetSSHUsername(_ *struct{}, reply *string) error {\n\t*reply = r.ActualDriver.GetSSHUsername()\n\treturn nil\n}\n\nfunc (r *RPCServerDriver) GetURL(_ *struct{}, reply *string) error {\n\tinfo, err := r.ActualDriver.GetURL()\n\t*reply = info\n\treturn err\n}\n\nfunc (r *RPCServerDriver) GetState(_ *struct{}, reply *state.State) error {\n\ts, err := r.ActualDriver.GetState()\n\t*reply = s\n\treturn err\n}\n\nfunc (r *RPCServerDriver) Kill(_ *struct{}, _ *struct{}) error {\n\treturn r.ActualDriver.Kill()\n}\n\nfunc (r *RPCServerDriver) PreCreateCheck(_ *struct{}, _ *struct{}) error {\n\treturn r.ActualDriver.PreCreateCheck()\n}\n\nfunc (r *RPCServerDriver) Remove(_ *struct{}, _ *struct{}) error {\n\treturn r.ActualDriver.Remove()\n}\n\nfunc (r *RPCServerDriver) Restart(_ *struct{}, _ *struct{}) error {\n\treturn r.ActualDriver.Restart()\n}\n\nfunc (r *RPCServerDriver) SetConfigFromFlags(flags *drivers.DriverOptions, _ *struct{}) error {\n\treturn r.ActualDriver.SetConfigFromFlags(*flags)\n}\n\nfunc (r *RPCServerDriver) Start(_ *struct{}, _ *struct{}) error {\n\treturn r.ActualDriver.Start()\n}\n\nfunc (r *RPCServerDriver) Stop(_ *struct{}, _ *struct{}) error {\n\treturn r.ActualDriver.Stop()\n}\n\nfunc (r *RPCServerDriver) Heartbeat(_ *struct{}, _ *struct{}) error {\n\tr.HeartbeatCh <- true\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package keys\n\nimport \"testing\"\n\nfunc TestKeyContextUnmarshalError(t *testing.T) {\n\tvar context KeyContext\n\tif err := context.UnmarshalJSON([]byte(``)); err == nil {\n\t\tt.Errorf(\"Expected error on loading empty string\")\n\t}\n}\n<commit_msg>Added license<commit_after>\/\/ Copyright 2014 The lime Authors.\n\/\/ Use of this source code is governed by a 2-clause\n\/\/ BSD-style license that can be found in the LICENSE file.\n\npackage keys\n\nimport \"testing\"\n\nfunc TestKeyContextUnmarshalError(t *testing.T) {\n\tvar context KeyContext\n\tif err := context.UnmarshalJSON([]byte(``)); err == nil {\n\t\tt.Errorf(\"Expected error on loading empty string\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package localfs\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"time\"\n\n\t\"os\"\n\n\t\"git.timschuster.info\/rls.moe\/catgi\/backend\/compl_test\"\n)\n\nfunc TestCompliance(t *testing.T) {\n\tctx := compltest.GetTestCtx()\n\ttmpDir := \"\/tmp\/test-catgi-\" + fmt.Sprintf(\"%d\", time.Now().Unix()) + \"\/\"\n\tos.MkdirAll(tmpDir, 0700)\n\tt.Log(\"Using TmpDir: \", tmpDir)\n\tlocalfs, err := NewLocalFSBackend(map[string]interface{}{\n\t\t\"root\": tmpDir,\n\t\t\"abs_root\": true,\n\t}, ctx)\n\n\tif err != nil {\n\t\tt.Log(\"Error on creating Testing Backend: \", err)\n\t\tt.FailNow()\n\t\treturn\n\t}\n\n\tcompltest.RunTestSuite(localfs, t)\n}\n<commit_msg>Skipping localfs test<commit_after>package localfs\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"time\"\n\n\t\"os\"\n\n\t\"git.timschuster.info\/rls.moe\/catgi\/backend\/compl_test\"\n)\n\nfunc TestCompliance(t *testing.T) {\n\tctx := compltest.GetTestCtx()\n\ttmpDir := \"\/tmp\/test-catgi-\" + fmt.Sprintf(\"%d\", time.Now().Unix()) + \"\/\"\n\tos.MkdirAll(tmpDir, 0700)\n\tt.Log(\"Using TmpDir: \", tmpDir)\n\tlocalfs, err := NewLocalFSBackend(map[string]interface{}{\n\t\t\"root\": tmpDir,\n\t\t\"abs_root\": true,\n\t}, ctx)\n\n\tif err != nil {\n\t\tt.Log(\"Error on creating Testing Backend: \", err)\n\t\tt.FailNow()\n\t\treturn\n\t}\n\n\tt.Skip(\"Skipping test due to incomplete implementation.\")\n\tcompltest.RunTestSuite(localfs, t)\n}\n<|endoftext|>"} {"text":"<commit_before>package statistics\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/aerogo\/aero\"\n\t\"github.com\/animenotifier\/arn\"\n\t\"github.com\/animenotifier\/notify.moe\/components\"\n)\n\ntype stats map[string]float64\n\n\/\/ Get ...\nfunc Get(ctx *aero.Context) string {\n\tpieCharts := getUserStats()\n\treturn ctx.HTML(components.Statistics(pieCharts))\n}\n\nfunc getUserStats() []*arn.PieChart {\n\tscreenSize := stats{}\n\tpixelRatio := stats{}\n\tbrowser := stats{}\n\tcountry := stats{}\n\tgender := stats{}\n\tos := stats{}\n\tnotifications := stats{}\n\tavatar := stats{}\n\tip := stats{}\n\tpro := stats{}\n\tconnectionType := stats{}\n\troundTripTime := stats{}\n\tdownLink := stats{}\n\n\tfor info := range arn.StreamAnalytics() {\n\t\tuser, err := arn.GetUser(info.UserID)\n\t\tarn.PanicOnError(err)\n\n\t\tif !user.IsActive() {\n\t\t\tcontinue\n\t\t}\n\n\t\tpixelRatio[fmt.Sprintf(\"%.0f\", info.Screen.PixelRatio)]++\n\n\t\tsize := arn.ToString(info.Screen.Width) + \" x \" + arn.ToString(info.Screen.Height)\n\t\tscreenSize[size]++\n\n\t\tif info.Connection.EffectiveType != \"\" {\n\t\t\tconnectionType[info.Connection.EffectiveType]++\n\t\t}\n\n\t\tif info.Connection.DownLink != 0 {\n\t\t\tdownLink[fmt.Sprintf(\"%.0f Mb\/s\", info.Connection.DownLink)]++\n\t\t}\n\n\t\tif info.Connection.RoundTripTime != 0 {\n\t\t\troundTripTime[fmt.Sprintf(\"%.0f ms\", info.Connection.RoundTripTime)]++\n\t\t}\n\t}\n\n\tfor user := range arn.StreamUsers() {\n\t\tif !user.IsActive() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif user.Gender != \"\" && user.Gender != \"other\" {\n\t\t\tgender[user.Gender]++\n\t\t}\n\n\t\tif user.Browser.Name != \"\" {\n\t\t\tbrowser[user.Browser.Name]++\n\t\t}\n\n\t\tif user.Location.CountryName != \"\" {\n\t\t\tcountry[user.Location.CountryName]++\n\t\t}\n\n\t\tif user.OS.Name != \"\" {\n\t\t\tif strings.HasPrefix(user.OS.Name, \"CrOS\") {\n\t\t\t\tuser.OS.Name = \"Chrome OS\"\n\t\t\t}\n\n\t\t\tos[user.OS.Name]++\n\t\t}\n\n\t\tif len(user.PushSubscriptions().Items) > 0 {\n\t\t\tnotifications[\"Enabled\"]++\n\t\t} else {\n\t\t\tnotifications[\"Disabled\"]++\n\t\t}\n\n\t\tif user.Avatar.Source == \"\" {\n\t\t\tavatar[\"none\"]++\n\t\t} else {\n\t\t\tavatar[user.Avatar.Source]++\n\t\t}\n\n\t\tif arn.IsIPv6(user.IP) {\n\t\t\tip[\"IPv6\"]++\n\t\t} else {\n\t\t\tip[\"IPv4\"]++\n\t\t}\n\n\t\tif user.IsPro() {\n\t\t\tpro[\"PRO accounts\"]++\n\t\t} else {\n\t\t\tpro[\"Free accounts\"]++\n\t\t}\n\t}\n\n\treturn []*arn.PieChart{\n\t\tarn.NewPieChart(\"OS\", os),\n\t\tarn.NewPieChart(\"Screen size\", screenSize),\n\t\tarn.NewPieChart(\"Browser\", browser),\n\t\tarn.NewPieChart(\"Country\", country),\n\t\tarn.NewPieChart(\"Avatar\", avatar),\n\t\tarn.NewPieChart(\"Notifications\", notifications),\n\t\tarn.NewPieChart(\"Gender\", gender),\n\t\tarn.NewPieChart(\"Pixel ratio\", pixelRatio),\n\t\tarn.NewPieChart(\"Connection\", connectionType),\n\t\tarn.NewPieChart(\"Ping\", roundTripTime),\n\t\tarn.NewPieChart(\"Download speed\", downLink),\n\t\tarn.NewPieChart(\"IP version\", ip),\n\t\tarn.NewPieChart(\"PRO accounts\", pro),\n\t}\n}\n<commit_msg>Changed order of statistics<commit_after>package statistics\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/aerogo\/aero\"\n\t\"github.com\/animenotifier\/arn\"\n\t\"github.com\/animenotifier\/notify.moe\/components\"\n)\n\ntype stats map[string]float64\n\n\/\/ Get ...\nfunc Get(ctx *aero.Context) string {\n\tpieCharts := getUserStats()\n\treturn ctx.HTML(components.Statistics(pieCharts))\n}\n\nfunc getUserStats() []*arn.PieChart {\n\tscreenSize := stats{}\n\tpixelRatio := stats{}\n\tbrowser := stats{}\n\tcountry := stats{}\n\tgender := stats{}\n\tos := stats{}\n\tnotifications := stats{}\n\tavatar := stats{}\n\tip := stats{}\n\tpro := stats{}\n\tconnectionType := stats{}\n\troundTripTime := stats{}\n\tdownLink := stats{}\n\n\tfor info := range arn.StreamAnalytics() {\n\t\tuser, err := arn.GetUser(info.UserID)\n\t\tarn.PanicOnError(err)\n\n\t\tif !user.IsActive() {\n\t\t\tcontinue\n\t\t}\n\n\t\tpixelRatio[fmt.Sprintf(\"%.0f\", info.Screen.PixelRatio)]++\n\n\t\tsize := arn.ToString(info.Screen.Width) + \" x \" + arn.ToString(info.Screen.Height)\n\t\tscreenSize[size]++\n\n\t\tif info.Connection.EffectiveType != \"\" {\n\t\t\tconnectionType[info.Connection.EffectiveType]++\n\t\t}\n\n\t\tif info.Connection.DownLink != 0 {\n\t\t\tdownLink[fmt.Sprintf(\"%.0f Mb\/s\", info.Connection.DownLink)]++\n\t\t}\n\n\t\tif info.Connection.RoundTripTime != 0 {\n\t\t\troundTripTime[fmt.Sprintf(\"%.0f ms\", info.Connection.RoundTripTime)]++\n\t\t}\n\t}\n\n\tfor user := range arn.StreamUsers() {\n\t\tif !user.IsActive() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif user.Gender != \"\" && user.Gender != \"other\" {\n\t\t\tgender[user.Gender]++\n\t\t}\n\n\t\tif user.Browser.Name != \"\" {\n\t\t\tbrowser[user.Browser.Name]++\n\t\t}\n\n\t\tif user.Location.CountryName != \"\" {\n\t\t\tcountry[user.Location.CountryName]++\n\t\t}\n\n\t\tif user.OS.Name != \"\" {\n\t\t\tif strings.HasPrefix(user.OS.Name, \"CrOS\") {\n\t\t\t\tuser.OS.Name = \"Chrome OS\"\n\t\t\t}\n\n\t\t\tos[user.OS.Name]++\n\t\t}\n\n\t\tif len(user.PushSubscriptions().Items) > 0 {\n\t\t\tnotifications[\"Enabled\"]++\n\t\t} else {\n\t\t\tnotifications[\"Disabled\"]++\n\t\t}\n\n\t\tif user.Avatar.Source == \"\" {\n\t\t\tavatar[\"none\"]++\n\t\t} else {\n\t\t\tavatar[user.Avatar.Source]++\n\t\t}\n\n\t\tif arn.IsIPv6(user.IP) {\n\t\t\tip[\"IPv6\"]++\n\t\t} else {\n\t\t\tip[\"IPv4\"]++\n\t\t}\n\n\t\tif user.IsPro() {\n\t\t\tpro[\"PRO accounts\"]++\n\t\t} else {\n\t\t\tpro[\"Free accounts\"]++\n\t\t}\n\t}\n\n\treturn []*arn.PieChart{\n\t\tarn.NewPieChart(\"OS\", os),\n\t\tarn.NewPieChart(\"Screen size\", screenSize),\n\t\tarn.NewPieChart(\"Browser\", browser),\n\t\tarn.NewPieChart(\"Country\", country),\n\t\tarn.NewPieChart(\"Avatar\", avatar),\n\t\tarn.NewPieChart(\"Notifications\", notifications),\n\t\tarn.NewPieChart(\"Gender\", gender),\n\t\tarn.NewPieChart(\"Pixel ratio\", pixelRatio),\n\t\tarn.NewPieChart(\"Download speed\", downLink),\n\t\tarn.NewPieChart(\"Ping\", roundTripTime),\n\t\tarn.NewPieChart(\"Connection\", connectionType),\n\t\tarn.NewPieChart(\"IP version\", ip),\n\t\tarn.NewPieChart(\"PRO accounts\", pro),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/+build linux\n\npackage audio\n\nimport (\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/yobert\/alsa\"\n)\n\ntype alsaAudio struct {\n\t*Encoding\n\t*alsa.Device\n\tbytesPerPeriod int\n\tperiod int\n\tplayProgress int\n\tstopCh chan struct{}\n\tplaying bool\n}\n\nfunc (aa *alsaAudio) Play() <-chan error {\n\tch := make(chan error)\n\t\/\/ If currently playing, restart\n\tif aa.playing {\n\t\taa.playProgress = 0\n\t\treturn\n\t}\n\taa.playing = true\n\tgo func() {\n\t\tfor {\n\t\t\tvar data []byte\n\t\t\tif len(aa.Encoding.Data)-aa.playProgress >= aa.period {\n\t\t\t\tdata = aa.Encoding.Data[aa.playProgress:]\n\t\t\t\tif aa.Loop {\n\t\t\t\t\tdelta := aa.period - (len(aa.Encoding.Data) - aa.playProgress)\n\t\t\t\t\tdata = append(data, aa.Encoding.Data[:delta])\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tdata = aa.Encoding.Data[aa.playProgress : aa.playProgress+aa.period]\n\t\t\t}\n\t\t\terr := aa.Device.Write(data, aa.period)\n\t\t\tif err != nil {\n\t\t\t\tselect {\n\t\t\t\tcase ch <- err:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\taa.playProgress += aa.period\n\t\t\tif aa.playProgress > len(aa.Encoding.Data) {\n\t\t\t\tif aa.Loop {\n\t\t\t\t\taa.playProgress %= len(aa.Encoding.Data)\n\t\t\t\t} else {\n\t\t\t\t\taa.playMutex.Unlock()\n\t\t\t\t\tselect {\n\t\t\t\t\tcase ch <- nil:\n\t\t\t\t\tdefault:\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-aa.stopCh:\n\t\t\t\tselect {\n\t\t\t\tcase ch <- nil:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t\taa.playing = false\n\t\taa.playProgress = 0\n\t}()\n\treturn ch\n}\n\nfunc (aa *alsaAudio) Stop() error {\n\tif aa.playing {\n\t\tgo func() {\n\t\t\taa.stopCh <- struct{}{}\n\t\t}()\n\t} else {\n\t\treturn errors.New(\"Audio not playing, cannot stop\")\n\t}\n\treturn nil\n}\n\nfunc (aa *alsaAudio) Filter(fs ...Filter) (Audio, error) {\n\tvar a Audio = aa\n\tvar err, consErr error\n\tfor _, f := range fs {\n\t\ta, err = f.Apply(a)\n\t\tif err != nil {\n\t\t\tif consErr == nil {\n\t\t\t\tconsErr = err\n\t\t\t} else {\n\t\t\t\tconsErr = errors.New(err.Error() + \":\" + consErr.Error())\n\t\t\t}\n\t\t}\n\t}\n\treturn aa, consErr\n}\n\n\/\/ MustFilter acts like Filter, but ignores errors (it does not panic,\n\/\/ as filter errors are expected to be non-fatal)\nfunc (aa *alsaAudio) MustFilter(fs ...Filter) Audio {\n\ta, _ := aa.Filter(fs...)\n\treturn a\n}\n\nfunc EncodeBytes(enc Encoding) (Audio, error) {\n\thandle, err := openDevice()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/err := handle.Open(\"default\", alsa.StreamTypePlayback, alsa.ModeBlock)\n\t\/\/if err != nil {\n\t\/\/\treturn nil, err\n\t\/\/}\n\t\/\/ Todo: annotate these errors with more info\n\tformat, err := alsaFormat(enc.Bits)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = handle.NegotiateFormat(format)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = handle.NegotiateRate(int(enc.SampleRate))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = handle.NegotiateChannels(int(enc.Channels))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Default value at recommendation of library\n\tperiod, err := handle.NegotiatePeriodSize(2048)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = handle.NegotiateBufferSize(4096)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = handle.Prepare()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &alsaAudio{\n\t\tEncoding: &enc,\n\t\tDevice: handle,\n\t\tperiod: period,\n\t\tstopCh: make(chan struct{}),\n\t\tbytesPerPeriod: period * (enc.Bits \/ 8),\n\t}, nil\n}\n\nfunc openDevice() (*alsa.Device, error) {\n\tcards, err := alsa.OpenCards()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i, c := range cards {\n\t\tdvcs, err := c.Devices()\n\t\tif err != nil {\n\t\t\talsa.CloseCards([]*alsa.Card{c})\n\t\t\tcontinue\n\t\t}\n\t\tfor j, d := range dvcs {\n\t\t\tif d.Type != alsa.PCM || !d.Play {\n\t\t\t\td.Close()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ We've a found a device we can hypothetically use\n\t\t\t\/\/ Close all other cards and devices\n\t\t\tfor h := j + 1; h < len(dvcs); h++ {\n\t\t\t\tdvcs[h].Close()\n\t\t\t}\n\t\t\talsa.CloseCards(cards[i+1:])\n\t\t\treturn d, d.Open()\n\t\t}\n\t\talsa.CloseCards([]*alsa.Card{c})\n\t}\n}\n\nfunc alsaFormat(bits uint16) (alsa.FormatType, error) {\n\tswitch bits {\n\tcase 8:\n\t\treturn alsa.S8, nil\n\tcase 16:\n\t\treturn alsa.S16_LE, nil\n\t}\n\treturn 0, errors.New(\"Undefined alsa format for encoding bits\")\n}\n<commit_msg>Syntax errors. Keep track of what channel play signals are on<commit_after>\/\/+build linux\n\npackage audio\n\nimport (\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/yobert\/alsa\"\n)\n\ntype alsaAudio struct {\n\t*Encoding\n\t*alsa.Device\n\tbytesPerPeriod int\n\tperiod int\n\tplayProgress int\n\tstopCh chan struct{}\n\tplaying bool\n\tplayCh chan error\n}\n\nfunc (aa *alsaAudio) Play() <-chan error {\n\t\/\/ If currently playing, restart\n\tif aa.playing {\n\t\taa.playProgress = 0\n\t\treturn aa.playCh\n\t}\n\taa.playing = true\n\taa.playCh = make(chan error)\n\tgo func() {\n\t\tfor {\n\t\t\tvar data []byte\n\t\t\tif len(aa.Encoding.Data)-aa.playProgress >= aa.period {\n\t\t\t\tdata = aa.Encoding.Data[aa.playProgress:]\n\t\t\t\tif aa.Loop {\n\t\t\t\t\tdelta := aa.period - (len(aa.Encoding.Data) - aa.playProgress)\n\t\t\t\t\tdata = append(data, aa.Encoding.Data[:delta]...)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tdata = aa.Encoding.Data[aa.playProgress : aa.playProgress+aa.period]\n\t\t\t}\n\t\t\terr := aa.Device.Write(data, aa.period)\n\t\t\tif err != nil {\n\t\t\t\tselect {\n\t\t\t\tcase aa.playCh <- err:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\taa.playProgress += aa.period\n\t\t\tif aa.playProgress > len(aa.Encoding.Data) {\n\t\t\t\tif aa.Loop {\n\t\t\t\t\taa.playProgress %= len(aa.Encoding.Data)\n\t\t\t\t} else {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase aa.playCh <- nil:\n\t\t\t\t\tdefault:\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-aa.stopCh:\n\t\t\t\tselect {\n\t\t\t\tcase aa.playCh <- nil:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t\taa.playing = false\n\t\taa.playProgress = 0\n\t}()\n\treturn aa.playCh\n}\n\nfunc (aa *alsaAudio) Stop() error {\n\tif aa.playing {\n\t\tgo func() {\n\t\t\taa.stopCh <- struct{}{}\n\t\t}()\n\t} else {\n\t\treturn errors.New(\"Audio not playing, cannot stop\")\n\t}\n\treturn nil\n}\n\nfunc (aa *alsaAudio) Filter(fs ...Filter) (Audio, error) {\n\tvar a Audio = aa\n\tvar err, consErr error\n\tfor _, f := range fs {\n\t\ta, err = f.Apply(a)\n\t\tif err != nil {\n\t\t\tif consErr == nil {\n\t\t\t\tconsErr = err\n\t\t\t} else {\n\t\t\t\tconsErr = errors.New(err.Error() + \":\" + consErr.Error())\n\t\t\t}\n\t\t}\n\t}\n\treturn aa, consErr\n}\n\n\/\/ MustFilter acts like Filter, but ignores errors (it does not panic,\n\/\/ as filter errors are expected to be non-fatal)\nfunc (aa *alsaAudio) MustFilter(fs ...Filter) Audio {\n\ta, _ := aa.Filter(fs...)\n\treturn a\n}\n\nfunc EncodeBytes(enc Encoding) (Audio, error) {\n\thandle, err := openDevice()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/err := handle.Open(\"default\", alsa.StreamTypePlayback, alsa.ModeBlock)\n\t\/\/if err != nil {\n\t\/\/\treturn nil, err\n\t\/\/}\n\t\/\/ Todo: annotate these errors with more info\n\tformat, err := alsaFormat(enc.Bits)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = handle.NegotiateFormat(format)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = handle.NegotiateRate(int(enc.SampleRate))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = handle.NegotiateChannels(int(enc.Channels))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Default value at recommendation of library\n\tperiod, err := handle.NegotiatePeriodSize(2048)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = handle.NegotiateBufferSize(4096)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = handle.Prepare()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &alsaAudio{\n\t\tEncoding: &enc,\n\t\tDevice: handle,\n\t\tperiod: period,\n\t\tstopCh: make(chan struct{}),\n\t\tbytesPerPeriod: period * (enc.Bits \/ 8),\n\t}, nil\n}\n\nfunc openDevice() (*alsa.Device, error) {\n\tcards, err := alsa.OpenCards()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i, c := range cards {\n\t\tdvcs, err := c.Devices()\n\t\tif err != nil {\n\t\t\talsa.CloseCards([]*alsa.Card{c})\n\t\t\tcontinue\n\t\t}\n\t\tfor j, d := range dvcs {\n\t\t\tif d.Type != alsa.PCM || !d.Play {\n\t\t\t\td.Close()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ We've a found a device we can hypothetically use\n\t\t\t\/\/ Close all other cards and devices\n\t\t\tfor h := j + 1; h < len(dvcs); h++ {\n\t\t\t\tdvcs[h].Close()\n\t\t\t}\n\t\t\talsa.CloseCards(cards[i+1:])\n\t\t\treturn d, d.Open()\n\t\t}\n\t\talsa.CloseCards([]*alsa.Card{c})\n\t}\n\treturn nil, errors.New(\"No valid device found\")\n}\n\nfunc alsaFormat(bits uint16) (alsa.FormatType, error) {\n\tswitch bits {\n\tcase 8:\n\t\treturn alsa.S8, nil\n\tcase 16:\n\t\treturn alsa.S16_LE, nil\n\t}\n\treturn 0, errors.New(\"Undefined alsa format for encoding bits\")\n}\n<|endoftext|>"} {"text":"<commit_before>package parser_test\n\nimport (\n\t\"github.com\/centurylinkcloud\/clc-go-cli\/parser\"\n\t\"reflect\"\n\t\"testing\"\n)\n\ntype parserTestParam struct {\n\tinput []string\n\terr string\n\tres map[string]interface{}\n}\n\nvar testCases = []parserTestParam{\n\t{input: []string{}, res: map[string]interface{}{}},\n\t{\n\t\tinput: []string{`{\"P1\":\"val1\",\"P2\":[1,2],\"P3\":true,\"P4\":{\"P41\":\"val41\",\"P42\":[\"str\"]}}`},\n\t\tres: map[string]interface{}{\n\t\t\t\"P1\": \"val1\",\n\t\t\t\"P2\": []interface{}{1., 2.},\n\t\t\t\"P3\": true,\n\t\t\t\"P4\": map[string]interface{}{\"P41\": \"val41\", \"P42\": []interface{}{\"str\"}},\n\t\t},\n\t},\n\t\/\/ Parses root values without keys from JSON.\n\t{input: []string{`{\"some-key\": \"value\"}`}, res: map[string]interface{}{\"SomeKey\": \"value\"}},\n\t\/\/ Does not allow duplicate keys.\n\t{input: []string{`{\"some-key\": \"value2\"}`, `{\"some-key\": \"value1\"}`}, err: \"Option 'SomeKey' is specified twice.\"},\n\t\/\/ Parses --some-key=value.\n\t{input: []string{\"--some-key\", \"value\"}, res: map[string]interface{}{\"SomeKey\": \"value\"}},\n\t\/\/ Does not allow duplicate keys.\n\t{input: []string{\"--some-key\", \"value1\", \"--some-key\", \"value2\"}, err: \"Option 'SomeKey' is specified twice.\"},\n\t{input: []string{`{\"some-key\": \"value\"}`, \"--some-key\", \"value2\"}, err: \"Option 'SomeKey' is specified twice.\"},\n\t\/\/ Does not parse root values not in JSON format.\n\t{input: []string{\"value\", \"value2\"}, err: \"Invalid option format, options without keys must be in JSON format.\"},\n\t\/\/ Parses keys without values.\n\t{input: []string{\"--some-key\"}, res: map[string]interface{}{\"SomeKey\": nil}},\n\t\/\/ Does not parse key values from JSON or key1=value1,key2=value2,.. notation.\n\t{input: []string{\"--some-key\", `{\"key\": \"value\"}`}, res: map[string]interface{}{\"SomeKey\": `{\"key\": \"value\"}`}},\n\t{input: []string{\"--some-key\", \"p1-key=10,p2-key=true,p3=',=!@=$ ,%^ &\\\"%<,.=\\\"'\"}, res: map[string]interface{}{\n\t\t\"SomeKey\": \"p1-key=10,p2-key=true,p3=',=!@=$ ,%^ &\\\"%<,.=\\\"'\",\n\t}},\n\t\/\/ Parses --key element1 element2 element3.\n\t{input: []string{\"--some-key\", \"value1\", \"value2\", `{\"value1\":[1,2,3]}`, \"a=b\"}, res: map[string]interface{}{\n\t\t\"SomeKey\": []interface{}{\"value1\", \"value2\", `{\"value1\":[1,2,3]}`, \"a=b\"},\n\t}},\n\t\/\/ Parses --key element1 element2 --another-key.\n\t{input: []string{\"--some-key\", `{\"key\":\"value\"}`, \"value2\", \"--another-key\"}, res: map[string]interface{}{\n\t\t\"SomeKey\": []interface{}{`{\"key\":\"value\"}`, \"value2\"}, \"AnotherKey\": nil,\n\t}},\n\t\/\/ Parses a complex case.\n\t{\n\t\tinput: []string{`{\"a\":{\"b\":\"c\"}}`, \"--some-long-key\", \"--another-key\", `{\"a\":\"b\"}`, \"a=b?,c=d\", \"--yet-another-key\"},\n\t\tres: map[string]interface{}{\n\t\t\t\"A\": map[string]interface{}{\n\t\t\t\t\"B\": \"c\",\n\t\t\t},\n\t\t\t\"SomeLongKey\": nil,\n\t\t\t\"AnotherKey\": []interface{}{`{\"a\":\"b\"}`, \"a=b?,c=d\"},\n\t\t\t\"YetAnotherKey\": nil,\n\t\t},\n\t},\n}\n\nfunc TestArgumentParser(t *testing.T) {\n\tfor i, testCase := range testCases {\n\t\tt.Logf(\"Executing %d test case.\", i+1)\n\t\tres, err := parser.ParseArguments(testCase.input)\n\t\tif testCase.err != \"\" && err.Error() != testCase.err {\n\t\t\tt.Errorf(\"Invalid error. Expected: %s, obtained %s\", testCase.err, err.Error())\n\t\t}\n\t\tif testCase.res != nil && !reflect.DeepEqual(testCase.res, res) {\n\t\t\tt.Errorf(\"Invalid result. expected %#v, obtained %#v\", testCase.res, res)\n\t\t}\n\t}\n}\n<commit_msg>fix error message parser has to return<commit_after>package parser_test\n\nimport (\n\t\"github.com\/centurylinkcloud\/clc-go-cli\/parser\"\n\t\"reflect\"\n\t\"testing\"\n)\n\ntype parserTestParam struct {\n\tinput []string\n\terr string\n\tres map[string]interface{}\n}\n\nvar testCases = []parserTestParam{\n\t{input: []string{}, res: map[string]interface{}{}},\n\t{\n\t\tinput: []string{`{\"P1\":\"val1\",\"P2\":[1,2],\"P3\":true,\"P4\":{\"P41\":\"val41\",\"P42\":[\"str\"]}}`},\n\t\tres: map[string]interface{}{\n\t\t\t\"P1\": \"val1\",\n\t\t\t\"P2\": []interface{}{1., 2.},\n\t\t\t\"P3\": true,\n\t\t\t\"P4\": map[string]interface{}{\"P41\": \"val41\", \"P42\": []interface{}{\"str\"}},\n\t\t},\n\t},\n\t\/\/ Parses root values without keys from JSON.\n\t{input: []string{`{\"some-key\": \"value\"}`}, res: map[string]interface{}{\"SomeKey\": \"value\"}},\n\t\/\/ Does not allow duplicate keys.\n\t{input: []string{`{\"some-key\": \"value2\"}`, `{\"some-key\": \"value1\"}`}, err: \"Option 'SomeKey' is specified twice.\"},\n\t\/\/ Parses --some-key=value.\n\t{input: []string{\"--some-key\", \"value\"}, res: map[string]interface{}{\"SomeKey\": \"value\"}},\n\t\/\/ Does not allow duplicate keys.\n\t{input: []string{\"--some-key\", \"value1\", \"--some-key\", \"value2\"}, err: \"Option 'SomeKey' is specified twice.\"},\n\t{input: []string{`{\"some-key\": \"value\"}`, \"--some-key\", \"value2\"}, err: \"Option 'SomeKey' is specified twice.\"},\n\t\/\/ Does not parse root values not in JSON format.\n\t{input: []string{\"value\", \"value2\"}, err: \"Invalid JSON: value.\"},\n\t\/\/ Parses keys without values.\n\t{input: []string{\"--some-key\"}, res: map[string]interface{}{\"SomeKey\": nil}},\n\t\/\/ Does not parse key values from JSON or key1=value1,key2=value2,.. notation.\n\t{input: []string{\"--some-key\", `{\"key\": \"value\"}`}, res: map[string]interface{}{\"SomeKey\": `{\"key\": \"value\"}`}},\n\t{input: []string{\"--some-key\", \"p1-key=10,p2-key=true,p3=',=!@=$ ,%^ &\\\"%<,.=\\\"'\"}, res: map[string]interface{}{\n\t\t\"SomeKey\": \"p1-key=10,p2-key=true,p3=',=!@=$ ,%^ &\\\"%<,.=\\\"'\",\n\t}},\n\t\/\/ Parses --key element1 element2 element3.\n\t{input: []string{\"--some-key\", \"value1\", \"value2\", `{\"value1\":[1,2,3]}`, \"a=b\"}, res: map[string]interface{}{\n\t\t\"SomeKey\": []interface{}{\"value1\", \"value2\", `{\"value1\":[1,2,3]}`, \"a=b\"},\n\t}},\n\t\/\/ Parses --key element1 element2 --another-key.\n\t{input: []string{\"--some-key\", `{\"key\":\"value\"}`, \"value2\", \"--another-key\"}, res: map[string]interface{}{\n\t\t\"SomeKey\": []interface{}{`{\"key\":\"value\"}`, \"value2\"}, \"AnotherKey\": nil,\n\t}},\n\t\/\/ Parses a complex case.\n\t{\n\t\tinput: []string{`{\"a\":{\"b\":\"c\"}}`, \"--some-long-key\", \"--another-key\", `{\"a\":\"b\"}`, \"a=b?,c=d\", \"--yet-another-key\"},\n\t\tres: map[string]interface{}{\n\t\t\t\"A\": map[string]interface{}{\n\t\t\t\t\"B\": \"c\",\n\t\t\t},\n\t\t\t\"SomeLongKey\": nil,\n\t\t\t\"AnotherKey\": []interface{}{`{\"a\":\"b\"}`, \"a=b?,c=d\"},\n\t\t\t\"YetAnotherKey\": nil,\n\t\t},\n\t},\n}\n\nfunc TestArgumentParser(t *testing.T) {\n\tfor i, testCase := range testCases {\n\t\tt.Logf(\"Executing %d test case.\", i+1)\n\t\tres, err := parser.ParseArguments(testCase.input)\n\t\tif testCase.err != \"\" && err.Error() != testCase.err {\n\t\t\tt.Errorf(\"Invalid error. Expected: %s, obtained %s\", testCase.err, err.Error())\n\t\t}\n\t\tif testCase.res != nil && !reflect.DeepEqual(testCase.res, res) {\n\t\t\tt.Errorf(\"Invalid result. expected %#v, obtained %#v\", testCase.res, res)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package stconverter\n\nimport (\n\t\"encoding\/json\"\n\t\"reflect\"\n\t\"testing\"\n)\n\ntype stTestCase struct {\n\tname string\n\tprogString string\n\tprog []STInstruction\n\terr error\n}\n\nvar stTestCases = []stTestCase{\n\t{\n\t\tname: \"assignment 1\",\n\t\tprogString: \"x := 1;\",\n\t\tprog: []STInstruction{\n\t\t\tSTExpressionOperator{\n\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\tArguments: []STExpression{\n\t\t\t\t\tSTExpressionValue{\"1\"},\n\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname: \"assignment 2\",\n\t\tprogString: \"y := x + 2;\",\n\t\tprog: []STInstruction{\n\t\t\tSTExpressionOperator{\n\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\tArguments: []STExpression{\n\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\tOperator: findOp(stAdd),\n\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSTExpressionValue{\"y\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname: \"if\/then 1\",\n\t\tprogString: \"if y > x then y := x; end_if;\",\n\t\tprog: []STInstruction{\n\t\t\tSTIfElsIfElse{\n\t\t\t\tIfThens: []STIfThen{\n\t\t\t\t\t{\n\t\t\t\t\t\tIfExpression: STExpressionOperator{\n\t\t\t\t\t\t\tOperator: findOp(stGreaterThan),\n\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t\t\t\t\tSTExpressionValue{\"y\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tThenSequence: []STInstruction{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"y\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname: \"if\/elsif\/else 1\",\n\t\tprogString: \"if y > x then y := x;\\n print(\\\"hello\\\");\\n elsif x > y then \\n a := 1 + 2 * 3; \\n else print(\\\"hi\\\"); \\n print(\\\"yes\\\"); \\n end_if;\",\n\t\tprog: []STInstruction{\n\t\t\tSTIfElsIfElse{\n\t\t\t\tIfThens: []STIfThen{\n\t\t\t\t\t{\n\t\t\t\t\t\tIfExpression: STExpressionOperator{\n\t\t\t\t\t\t\tOperator: findOp(stGreaterThan),\n\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t\t\t\t\tSTExpressionValue{\"y\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tThenSequence: []STInstruction{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"y\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"\\\"hello\\\"\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tIfExpression: STExpressionOperator{\n\t\t\t\t\t\t\tOperator: findOp(stGreaterThan),\n\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\tSTExpressionValue{\"y\"},\n\t\t\t\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tThenSequence: []STInstruction{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\t\t\tOperator: findOp(stAdd),\n\t\t\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\t\t\t\t\tOperator: findOp(stMultiply),\n\t\t\t\t\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\t\t\t\t\tSTExpressionValue{\"3\"},\n\t\t\t\t\t\t\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\tSTExpressionValue{\"1\"},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"a\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tElseSequence: []STInstruction{\n\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\tSTExpressionValue{\"\\\"hi\\\"\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\tSTExpressionValue{\"\\\"yes\\\"\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname: \"switchcase 1\",\n\t\tprogString: \"case x + 1 of \\n 1: print(\\\"hello\\\"); \\n y := 2; \\n 2, 3: print(\\\"many\\\");\\nelse\\nz := 2 + 2;end_case;\",\n\t\tprog: []STInstruction{\n\t\t\tSTSwitchCase{\n\t\t\t\tSwitchOn: STExpressionOperator{\n\t\t\t\t\tOperator: findOp(stAdd),\n\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\tSTExpressionValue{\"1\"},\n\t\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCases: []STCase{\n\t\t\t\t\tSTCase{\n\t\t\t\t\t\tCaseValues: []string{\"1\"},\n\t\t\t\t\t\tSequence: []STInstruction{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"\\\"hello\\\"\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"y\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSTCase{\n\t\t\t\t\t\tCaseValues: []string{\"2\", \"3\"},\n\t\t\t\t\t\tSequence: []STInstruction{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"\\\"many\\\"\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tElseSequence: []STInstruction{\n\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(stAdd),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSTExpressionValue{\"z\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname: \"switchcase 2\",\n\t\tprogString: \"\" +\n\t\t\t\"case (x + 1) of \\n\" +\n\t\t\t\"1: print(\\\"hello\\\"); \\n\" +\n\t\t\t\"y := 2; \\n\" +\n\t\t\t\"2: \\n\" +\n\t\t\t\"3: print(\\\"many\\\");\\n\" +\n\t\t\t\"else\\n\" +\n\t\t\t\"z := 2 + 2;\\n\" +\n\t\t\t\"end_case;\",\n\t\tprog: []STInstruction{\n\t\t\tSTSwitchCase{\n\t\t\t\tSwitchOn: STExpressionOperator{\n\t\t\t\t\tOperator: findOp(stAdd),\n\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\tSTExpressionValue{\"1\"},\n\t\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCases: []STCase{\n\t\t\t\t\tSTCase{\n\t\t\t\t\t\tCaseValues: []string{\"1\"},\n\t\t\t\t\t\tSequence: []STInstruction{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"\\\"hello\\\"\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"y\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSTCase{\n\t\t\t\t\t\tCaseValues: []string{\"2\"},\n\t\t\t\t\t},\n\t\t\t\t\tSTCase{\n\t\t\t\t\t\tCaseValues: []string{\"3\"},\n\t\t\t\t\t\tSequence: []STInstruction{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"\\\"many\\\"\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tElseSequence: []STInstruction{\n\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(stAdd),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSTExpressionValue{\"z\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname: \"for loop 1\",\n\t\tprogString: \"\" +\n\t\t\t\"for i := 1 to 10 by 2 do\\n\" +\n\t\t\t\"print(i);\\n\" +\n\t\t\t\"end_for;\\n\",\n\t\tprog: []STInstruction{\n\t\t\tSTForLoop{\n\t\t\t\tForAssignment: STExpressionOperator{\n\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\tSTExpressionValue{\"1\"},\n\t\t\t\t\t\tSTExpressionValue{\"i\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tToValue: STExpressionValue{\"10\"},\n\t\t\t\tByIncrement: STExpressionValue{\"2\"},\n\t\t\t\tSequence: []STInstruction{\n\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\tSTExpressionValue{\"i\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname: \"for loop 2\",\n\t\tprogString: \"\" +\n\t\t\t\"for i := 1 to (2+10)*5 do\\n\" +\n\t\t\t\"print(i);\\n\" +\n\t\t\t\"end_for;\\n\",\n\t\tprog: []STInstruction{\n\t\t\tSTForLoop{\n\t\t\t\tForAssignment: STExpressionOperator{\n\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\tSTExpressionValue{\"1\"},\n\t\t\t\t\t\tSTExpressionValue{\"i\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tToValue: STExpressionOperator{\n\t\t\t\t\tOperator: findOp(stMultiply),\n\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\tSTExpressionValue{\"5\"},\n\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\tOperator: findOp(stAdd),\n\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\tSTExpressionValue{\"10\"},\n\t\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSequence: []STInstruction{\n\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\tSTExpressionValue{\"i\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestCases(t *testing.T) {\n\tfor i := 0; i < len(stTestCases); i++ {\n\t\tprog, err := ParseString(stTestCases[i].name, stTestCases[i].progString)\n\t\tif err != nil && stTestCases[i].err != nil {\n\t\t\t\/\/TODO check if errors are the same\n\t\t\tif stTestCases[i].err.Error() != err.Err.Error() {\n\t\t\t\tt.Errorf(\"Test %d (%s) FAIL.\\nError mismatch. Expecting %s, but received:%s\", i, stTestCases[i].name, stTestCases[i].err.Error(), err.Err.Error())\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test %d (%s) FAIL.\\nNot expecting error, but received:%s\", i, stTestCases[i].name, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tif stTestCases[i].err != nil {\n\t\t\tt.Errorf(\"Test %d (%s) FAIL.\\nWas expecting error, but did not receive.\", i, stTestCases[i].name)\n\t\t}\n\t\tif !reflect.DeepEqual(prog, stTestCases[i].prog) {\n\t\t\texpected, _ := json.MarshalIndent(stTestCases[i].prog, \"\\t\", \"\\t\")\n\t\t\treceived, _ := json.MarshalIndent(prog, \"\\t\", \"\\t\")\n\t\t\tt.Errorf(\"Test %d (%s) FAIL.\\n:Expected:\\n\\t%s\\n\\nReceived:\\n\\t%s\\n\\n\", i, stTestCases[i].name, expected, received)\n\t\t}\n\t}\n}\n<commit_msg>tidy test cases<commit_after>package stconverter\n\nimport (\n\t\"encoding\/json\"\n\t\"reflect\"\n\t\"testing\"\n)\n\ntype stTestCase struct {\n\tname string\n\tprogString string\n\tprog []STInstruction\n\terr error\n}\n\nvar stTestCases = []stTestCase{\n\t{\n\t\tname: \"assignment 1\",\n\t\tprogString: \"x := 1;\",\n\t\tprog: []STInstruction{\n\t\t\tSTExpressionOperator{\n\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\tArguments: []STExpression{\n\t\t\t\t\tSTExpressionValue{\"1\"},\n\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname: \"assignment 2\",\n\t\tprogString: \"y := x + 2;\",\n\t\tprog: []STInstruction{\n\t\t\tSTExpressionOperator{\n\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\tArguments: []STExpression{\n\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\tOperator: findOp(stAdd),\n\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSTExpressionValue{\"y\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname: \"if\/then 1\",\n\t\tprogString: \"if y > x then y := x; end_if;\",\n\t\tprog: []STInstruction{\n\t\t\tSTIfElsIfElse{\n\t\t\t\tIfThens: []STIfThen{\n\t\t\t\t\t{\n\t\t\t\t\t\tIfExpression: STExpressionOperator{\n\t\t\t\t\t\t\tOperator: findOp(stGreaterThan),\n\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t\t\t\t\tSTExpressionValue{\"y\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tThenSequence: []STInstruction{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"y\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname: \"if\/elsif\/else 1\",\n\t\tprogString: \"\" +\n\t\t\t\"if y > x then\" +\n\t\t\t\"\ty := x;\\n\" +\n\t\t\t\"\tprint(\\\"hello\\\");\\n\" +\n\t\t\t\"elsif x > y then\\n\" +\n\t\t\t\"\ta := 1 + 2 * 3;\\n\" +\n\t\t\t\"else\\n\" +\n\t\t\t\"\tprint(\\\"hi\\\"); \\n\" +\n\t\t\t\"\tprint(\\\"yes\\\"); \\n\" +\n\t\t\t\"end_if;\",\n\t\tprog: []STInstruction{\n\t\t\tSTIfElsIfElse{\n\t\t\t\tIfThens: []STIfThen{\n\t\t\t\t\t{\n\t\t\t\t\t\tIfExpression: STExpressionOperator{\n\t\t\t\t\t\t\tOperator: findOp(stGreaterThan),\n\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t\t\t\t\tSTExpressionValue{\"y\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tThenSequence: []STInstruction{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"y\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"\\\"hello\\\"\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tIfExpression: STExpressionOperator{\n\t\t\t\t\t\t\tOperator: findOp(stGreaterThan),\n\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\tSTExpressionValue{\"y\"},\n\t\t\t\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tThenSequence: []STInstruction{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\t\t\tOperator: findOp(stAdd),\n\t\t\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\t\t\t\t\tOperator: findOp(stMultiply),\n\t\t\t\t\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\t\t\t\t\tSTExpressionValue{\"3\"},\n\t\t\t\t\t\t\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\tSTExpressionValue{\"1\"},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"a\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tElseSequence: []STInstruction{\n\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\tSTExpressionValue{\"\\\"hi\\\"\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\tSTExpressionValue{\"\\\"yes\\\"\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname: \"switchcase 1\",\n\t\tprogString: \"\" +\n\t\t\t\"case x + 1 of \\n\" +\n\t\t\t\"1:\tprint(\\\"hello\\\"); \\n\" +\n\t\t\t\"\ty := 2; \\n\" +\n\t\t\t\"2, 3: print(\\\"many\\\");\\n\" +\n\t\t\t\"else\\n\" +\n\t\t\t\"\tz := 2 + 2;\" +\n\t\t\t\"end_case;\",\n\t\tprog: []STInstruction{\n\t\t\tSTSwitchCase{\n\t\t\t\tSwitchOn: STExpressionOperator{\n\t\t\t\t\tOperator: findOp(stAdd),\n\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\tSTExpressionValue{\"1\"},\n\t\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCases: []STCase{\n\t\t\t\t\tSTCase{\n\t\t\t\t\t\tCaseValues: []string{\"1\"},\n\t\t\t\t\t\tSequence: []STInstruction{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"\\\"hello\\\"\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"y\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSTCase{\n\t\t\t\t\t\tCaseValues: []string{\"2\", \"3\"},\n\t\t\t\t\t\tSequence: []STInstruction{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"\\\"many\\\"\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tElseSequence: []STInstruction{\n\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(stAdd),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSTExpressionValue{\"z\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname: \"switchcase 2\",\n\t\tprogString: \"\" +\n\t\t\t\"case (x + 1) of \\n\" +\n\t\t\t\"1: print(\\\"hello\\\"); \\n\" +\n\t\t\t\"\ty := 2; \\n\" +\n\t\t\t\"2: \\n\" +\n\t\t\t\"3: print(\\\"many\\\");\\n\" +\n\t\t\t\"else\\n\" +\n\t\t\t\"\tz := 2 + 2;\\n\" +\n\t\t\t\"end_case;\",\n\t\tprog: []STInstruction{\n\t\t\tSTSwitchCase{\n\t\t\t\tSwitchOn: STExpressionOperator{\n\t\t\t\t\tOperator: findOp(stAdd),\n\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\tSTExpressionValue{\"1\"},\n\t\t\t\t\t\tSTExpressionValue{\"x\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCases: []STCase{\n\t\t\t\t\tSTCase{\n\t\t\t\t\t\tCaseValues: []string{\"1\"},\n\t\t\t\t\t\tSequence: []STInstruction{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"\\\"hello\\\"\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"y\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSTCase{\n\t\t\t\t\t\tCaseValues: []string{\"2\"},\n\t\t\t\t\t},\n\t\t\t\t\tSTCase{\n\t\t\t\t\t\tCaseValues: []string{\"3\"},\n\t\t\t\t\t\tSequence: []STInstruction{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"\\\"many\\\"\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tElseSequence: []STInstruction{\n\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\t\tOperator: findOp(stAdd),\n\t\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSTExpressionValue{\"z\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname: \"for loop 1\",\n\t\tprogString: \"\" +\n\t\t\t\"for i := 1 to 10 by 2 do\\n\" +\n\t\t\t\"\tprint(i);\\n\" +\n\t\t\t\"end_for;\\n\",\n\t\tprog: []STInstruction{\n\t\t\tSTForLoop{\n\t\t\t\tForAssignment: STExpressionOperator{\n\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\tSTExpressionValue{\"1\"},\n\t\t\t\t\t\tSTExpressionValue{\"i\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tToValue: STExpressionValue{\"10\"},\n\t\t\t\tByIncrement: STExpressionValue{\"2\"},\n\t\t\t\tSequence: []STInstruction{\n\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\tSTExpressionValue{\"i\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname: \"for loop 2\",\n\t\tprogString: \"\" +\n\t\t\t\"for i := 1 to (2+10)*5 do\\n\" +\n\t\t\t\"\tprint(i);\\n\" +\n\t\t\t\"end_for;\\n\",\n\t\tprog: []STInstruction{\n\t\t\tSTForLoop{\n\t\t\t\tForAssignment: STExpressionOperator{\n\t\t\t\t\tOperator: findOp(stAssignment),\n\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\tSTExpressionValue{\"1\"},\n\t\t\t\t\t\tSTExpressionValue{\"i\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tToValue: STExpressionOperator{\n\t\t\t\t\tOperator: findOp(stMultiply),\n\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\tSTExpressionValue{\"5\"},\n\t\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\t\tOperator: findOp(stAdd),\n\t\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\t\tSTExpressionValue{\"10\"},\n\t\t\t\t\t\t\t\tSTExpressionValue{\"2\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSequence: []STInstruction{\n\t\t\t\t\tSTExpressionOperator{\n\t\t\t\t\t\tOperator: findOp(\"print<1>\"),\n\t\t\t\t\t\tArguments: []STExpression{\n\t\t\t\t\t\t\tSTExpressionValue{\"i\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestCases(t *testing.T) {\n\tfor i := 0; i < len(stTestCases); i++ {\n\t\tprog, err := ParseString(stTestCases[i].name, stTestCases[i].progString)\n\t\tif err != nil && stTestCases[i].err != nil {\n\t\t\t\/\/TODO check if errors are the same\n\t\t\tif stTestCases[i].err.Error() != err.Err.Error() {\n\t\t\t\tt.Errorf(\"Test %d (%s) FAIL.\\nError mismatch. Expecting %s, but received:%s\", i, stTestCases[i].name, stTestCases[i].err.Error(), err.Err.Error())\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test %d (%s) FAIL.\\nNot expecting error, but received:%s\", i, stTestCases[i].name, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tif stTestCases[i].err != nil {\n\t\t\tt.Errorf(\"Test %d (%s) FAIL.\\nWas expecting error, but did not receive.\", i, stTestCases[i].name)\n\t\t}\n\t\tif !reflect.DeepEqual(prog, stTestCases[i].prog) {\n\t\t\texpected, _ := json.MarshalIndent(stTestCases[i].prog, \"\\t\", \"\\t\")\n\t\t\treceived, _ := json.MarshalIndent(prog, \"\\t\", \"\\t\")\n\t\t\tt.Errorf(\"Test %d (%s) FAIL.\\n:Expected:\\n\\t%s\\n\\nReceived:\\n\\t%s\\n\\n\", i, stTestCases[i].name, expected, received)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2014 Couchbase, Inc.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file\n\/\/ except in compliance with the License. You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing, software distributed under the\n\/\/ License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,\n\/\/ either express or implied. See the License for the specific language governing permissions\n\/\/ and limitations under the License.\n\npackage searchers\n\nimport (\n\t\"math\"\n\n\t\"github.com\/blevesearch\/bleve\/index\"\n\t\"github.com\/blevesearch\/bleve\/search\"\n)\n\ntype PhraseSearcher struct {\n\tinitialized bool\n\tindexReader index.IndexReader\n\tmustSearcher *ConjunctionSearcher\n\tqueryNorm float64\n\tcurrMust *search.DocumentMatch\n\tslop int\n\tterms []string\n}\n\nfunc NewPhraseSearcher(indexReader index.IndexReader, mustSearcher *ConjunctionSearcher, terms []string) (*PhraseSearcher, error) {\n\t\/\/ build our searcher\n\trv := PhraseSearcher{\n\t\tindexReader: indexReader,\n\t\tmustSearcher: mustSearcher,\n\t\tterms: terms,\n\t}\n\trv.computeQueryNorm()\n\treturn &rv, nil\n}\n\nfunc (s *PhraseSearcher) computeQueryNorm() {\n\t\/\/ first calculate sum of squared weights\n\tsumOfSquaredWeights := 0.0\n\tif s.mustSearcher != nil {\n\t\tsumOfSquaredWeights += s.mustSearcher.Weight()\n\t}\n\n\t\/\/ now compute query norm from this\n\ts.queryNorm = 1.0 \/ math.Sqrt(sumOfSquaredWeights)\n\t\/\/ finally tell all the downstream searchers the norm\n\tif s.mustSearcher != nil {\n\t\ts.mustSearcher.SetQueryNorm(s.queryNorm)\n\t}\n}\n\nfunc (s *PhraseSearcher) initSearchers() error {\n\tvar err error\n\t\/\/ get all searchers pointing at their first match\n\tif s.mustSearcher != nil {\n\t\ts.currMust, err = s.mustSearcher.Next()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ts.initialized = true\n\treturn nil\n}\n\nfunc (s *PhraseSearcher) advanceNextMust() error {\n\tvar err error\n\n\tif s.mustSearcher != nil {\n\t\ts.currMust, err = s.mustSearcher.Next()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (s *PhraseSearcher) Weight() float64 {\n\tvar rv float64\n\trv += s.mustSearcher.Weight()\n\n\treturn rv\n}\n\nfunc (s *PhraseSearcher) SetQueryNorm(qnorm float64) {\n\ts.mustSearcher.SetQueryNorm(qnorm)\n}\n\nfunc (s *PhraseSearcher) Next() (*search.DocumentMatch, error) {\n\tif !s.initialized {\n\t\terr := s.initSearchers()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tvar rv *search.DocumentMatch\n\tfor s.currMust != nil {\n\t\trvftlm := make(search.FieldTermLocationMap, 0)\n\t\tfreq := 0\n\t\tfirstTerm := s.terms[0]\n\t\tfor field, termLocMap := range s.currMust.Locations {\n\t\t\trvtlm := make(search.TermLocationMap, 0)\n\t\t\tlocations, ok := termLocMap[firstTerm]\n\t\t\tif ok {\n\t\t\tOUTER:\n\t\t\t\tfor _, location := range locations {\n\t\t\t\t\tcrvtlm := make(search.TermLocationMap, 0)\n\t\t\t\tINNER:\n\t\t\t\t\tfor i := 0; i < len(s.terms); i++ {\n\t\t\t\t\t\tnextTerm := s.terms[i]\n\t\t\t\t\t\tif nextTerm != \"\" {\n\t\t\t\t\t\t\t\/\/ look through all these term locations\n\t\t\t\t\t\t\t\/\/ to try and find the correct offsets\n\t\t\t\t\t\t\tnextLocations, ok := termLocMap[nextTerm]\n\t\t\t\t\t\t\tif ok {\n\t\t\t\t\t\t\t\tfor _, nextLocation := range nextLocations {\n\t\t\t\t\t\t\t\t\tif nextLocation.Pos == location.Pos+float64(i) {\n\t\t\t\t\t\t\t\t\t\t\/\/ found a location match for this term\n\t\t\t\t\t\t\t\t\t\tcrvtlm.AddLocation(nextTerm, nextLocation)\n\t\t\t\t\t\t\t\t\t\tcontinue INNER\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\/\/ if we got here we didn't find a location match for this term\n\t\t\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ if we got here all the terms matched\n\t\t\t\t\tfreq++\n\t\t\t\t\tsearch.MergeTermLocationMaps(rvtlm, crvtlm)\n\t\t\t\t\trvftlm[field] = rvtlm\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif freq > 0 {\n\t\t\t\/\/ return match\n\t\t\trv = s.currMust\n\t\t\trv.Locations = rvftlm\n\t\t\ts.advanceNextMust()\n\t\t\treturn rv, nil\n\t\t}\n\n\t\ts.advanceNextMust()\n\t}\n\n\treturn nil, nil\n}\n\nfunc (s *PhraseSearcher) Advance(ID string) (*search.DocumentMatch, error) {\n\tif !s.initialized {\n\t\terr := s.initSearchers()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\ts.mustSearcher.Advance(ID)\n\treturn s.Next()\n}\n\nfunc (s *PhraseSearcher) Count() uint64 {\n\t\/\/ for now return a worst case\n\tvar sum uint64\n\tsum += s.mustSearcher.Count()\n\treturn sum\n}\n\nfunc (s *PhraseSearcher) Close() {\n\tif s.mustSearcher != nil {\n\t\ts.mustSearcher.Close()\n\t}\n}\n\nfunc (s *PhraseSearcher) Min() int {\n\treturn 0\n}\n<commit_msg>fix advance logic to not skip over result<commit_after>\/\/ Copyright (c) 2014 Couchbase, Inc.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file\n\/\/ except in compliance with the License. You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing, software distributed under the\n\/\/ License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,\n\/\/ either express or implied. See the License for the specific language governing permissions\n\/\/ and limitations under the License.\n\npackage searchers\n\nimport (\n\t\"math\"\n\n\t\"github.com\/blevesearch\/bleve\/index\"\n\t\"github.com\/blevesearch\/bleve\/search\"\n)\n\ntype PhraseSearcher struct {\n\tinitialized bool\n\tindexReader index.IndexReader\n\tmustSearcher *ConjunctionSearcher\n\tqueryNorm float64\n\tcurrMust *search.DocumentMatch\n\tslop int\n\tterms []string\n}\n\nfunc NewPhraseSearcher(indexReader index.IndexReader, mustSearcher *ConjunctionSearcher, terms []string) (*PhraseSearcher, error) {\n\t\/\/ build our searcher\n\trv := PhraseSearcher{\n\t\tindexReader: indexReader,\n\t\tmustSearcher: mustSearcher,\n\t\tterms: terms,\n\t}\n\trv.computeQueryNorm()\n\treturn &rv, nil\n}\n\nfunc (s *PhraseSearcher) computeQueryNorm() {\n\t\/\/ first calculate sum of squared weights\n\tsumOfSquaredWeights := 0.0\n\tif s.mustSearcher != nil {\n\t\tsumOfSquaredWeights += s.mustSearcher.Weight()\n\t}\n\n\t\/\/ now compute query norm from this\n\ts.queryNorm = 1.0 \/ math.Sqrt(sumOfSquaredWeights)\n\t\/\/ finally tell all the downstream searchers the norm\n\tif s.mustSearcher != nil {\n\t\ts.mustSearcher.SetQueryNorm(s.queryNorm)\n\t}\n}\n\nfunc (s *PhraseSearcher) initSearchers() error {\n\tvar err error\n\t\/\/ get all searchers pointing at their first match\n\tif s.mustSearcher != nil {\n\t\ts.currMust, err = s.mustSearcher.Next()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ts.initialized = true\n\treturn nil\n}\n\nfunc (s *PhraseSearcher) advanceNextMust() error {\n\tvar err error\n\n\tif s.mustSearcher != nil {\n\t\ts.currMust, err = s.mustSearcher.Next()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (s *PhraseSearcher) Weight() float64 {\n\tvar rv float64\n\trv += s.mustSearcher.Weight()\n\n\treturn rv\n}\n\nfunc (s *PhraseSearcher) SetQueryNorm(qnorm float64) {\n\ts.mustSearcher.SetQueryNorm(qnorm)\n}\n\nfunc (s *PhraseSearcher) Next() (*search.DocumentMatch, error) {\n\tif !s.initialized {\n\t\terr := s.initSearchers()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tvar rv *search.DocumentMatch\n\tfor s.currMust != nil {\n\t\trvftlm := make(search.FieldTermLocationMap, 0)\n\t\tfreq := 0\n\t\tfirstTerm := s.terms[0]\n\t\tfor field, termLocMap := range s.currMust.Locations {\n\t\t\trvtlm := make(search.TermLocationMap, 0)\n\t\t\tlocations, ok := termLocMap[firstTerm]\n\t\t\tif ok {\n\t\t\tOUTER:\n\t\t\t\tfor _, location := range locations {\n\t\t\t\t\tcrvtlm := make(search.TermLocationMap, 0)\n\t\t\t\tINNER:\n\t\t\t\t\tfor i := 0; i < len(s.terms); i++ {\n\t\t\t\t\t\tnextTerm := s.terms[i]\n\t\t\t\t\t\tif nextTerm != \"\" {\n\t\t\t\t\t\t\t\/\/ look through all these term locations\n\t\t\t\t\t\t\t\/\/ to try and find the correct offsets\n\t\t\t\t\t\t\tnextLocations, ok := termLocMap[nextTerm]\n\t\t\t\t\t\t\tif ok {\n\t\t\t\t\t\t\t\tfor _, nextLocation := range nextLocations {\n\t\t\t\t\t\t\t\t\tif nextLocation.Pos == location.Pos+float64(i) {\n\t\t\t\t\t\t\t\t\t\t\/\/ found a location match for this term\n\t\t\t\t\t\t\t\t\t\tcrvtlm.AddLocation(nextTerm, nextLocation)\n\t\t\t\t\t\t\t\t\t\tcontinue INNER\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\/\/ if we got here we didn't find a location match for this term\n\t\t\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ if we got here all the terms matched\n\t\t\t\t\tfreq++\n\t\t\t\t\tsearch.MergeTermLocationMaps(rvtlm, crvtlm)\n\t\t\t\t\trvftlm[field] = rvtlm\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif freq > 0 {\n\t\t\t\/\/ return match\n\t\t\trv = s.currMust\n\t\t\trv.Locations = rvftlm\n\t\t\ts.advanceNextMust()\n\t\t\treturn rv, nil\n\t\t}\n\n\t\ts.advanceNextMust()\n\t}\n\n\treturn nil, nil\n}\n\nfunc (s *PhraseSearcher) Advance(ID string) (*search.DocumentMatch, error) {\n\tif !s.initialized {\n\t\terr := s.initSearchers()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tvar err error\n\ts.currMust, err = s.mustSearcher.Advance(ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s.Next()\n}\n\nfunc (s *PhraseSearcher) Count() uint64 {\n\t\/\/ for now return a worst case\n\tvar sum uint64\n\tsum += s.mustSearcher.Count()\n\treturn sum\n}\n\nfunc (s *PhraseSearcher) Close() {\n\tif s.mustSearcher != nil {\n\t\ts.mustSearcher.Close()\n\t}\n}\n\nfunc (s *PhraseSearcher) Min() int {\n\treturn 0\n}\n<|endoftext|>"} {"text":"<commit_before>package gcm\n\nimport (\n\t\"github.com\/alexjlockwood\/gcm\"\n\t\"github.com\/smancke\/guble\/protocol\"\n\t\"github.com\/smancke\/guble\/server\"\n\t\"github.com\/smancke\/guble\/store\"\n\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ REGISTRATIONS_SCHEMA is the default sqlite schema for GCM\nconst REGISTRATIONS_SCHEMA = \"gcm_registration\"\nconst MESSAGE_RETRIES = 5\nconst BROADCAST_RETRIES = 3\n\n\/\/ GCMConnector is the structure for handling the communication with Google Cloud Messaging\ntype GCMConnector struct {\n\trouter server.Router\n\tkvStore store.KVStore\n\tprefix string\n\trouterC chan server.MsgAndRoute\n\tcloseRouteByRouter chan server.Route\n\tstopC chan bool\n\tsender *gcm.Sender\n\tnWorkers int\n\twaitGroup sync.WaitGroup\n}\n\n\/\/ NewGCMConnector creates a new GCMConnector without starting it\nfunc NewGCMConnector(router server.Router, prefix string, gcmAPIKey string, nWorkers int) (*GCMConnector, error) {\n\n\tkvStore, err := router.KVStore()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/TODO Cosmin: check with dev-team the default number of GCM workers, below\n\tgcm := &GCMConnector{\n\t\trouter: router,\n\t\tkvStore: kvStore,\n\t\tprefix: prefix,\n\t\trouterC: make(chan server.MsgAndRoute, 1000),\n\t\tstopC: make(chan bool, 1),\n\t\tsender: &gcm.Sender{ApiKey: gcmAPIKey},\n\t\tnWorkers: nWorkers,\n\t}\n\n\treturn gcm, nil\n}\n\n\/\/ Start opens the connector, creates more goroutines \/ workers to handle messages coming from the router\nfunc (conn *GCMConnector) Start() error {\n\tbroadcastRoute := server.NewRoute(removeTrailingSlash(conn.prefix)+\"\/broadcast\", conn.routerC, \"gcm_connector\", \"gcm_connector\")\n\tconn.router.Subscribe(broadcastRoute)\n\tgo func() {\n\t\t\/\/TODO Cosmin: should loadSubscriptions() be taken out of this goroutine, and executed before ?\n\t\t\/\/ (even if startup-time is longer, the routes are guaranteed to be there right after Start() returns)\n\t\tconn.loadSubscriptions()\n\n\t\tfor id := 1; id <= conn.nWorkers; id++ {\n\t\t\tgo conn.loopSendOrBroadcastMessage(id)\n\t\t}\n\t}()\n\treturn nil\n}\n\n\/\/ Stop signals the closing of GCMConnector\nfunc (conn *GCMConnector) Stop() error {\n\tprotocol.Debug(\"GCM Stop()\")\n\tclose(conn.stopC)\n\tconn.waitGroup.Wait()\n\treturn nil\n}\n\n\/\/ Check returns nil if health-check succeeds, or an error if health-check fails\n\/\/curl --header \"Authorization: key=$api_key\" \\\n\/\/--header Content-Type:\"application\/json\" \\\n\/\/https:\/\/gcm-http.googleapis.com\/gcm\/send \\\n\/\/-d \"{\\\"registration_ids\\\":[\\\"ABC\\\"]}\"\nfunc (conn *GCMConnector) Check() error {\n\tpay := `{\"registration_ids\":[\"ABC\"]}`\n\tpayload := conn.parseMessageToMap(&protocol.Message{Body: []byte(pay)})\n\t_, err := conn.sender.Send(gcm.NewMessage(payload, \"\"), MESSAGE_RETRIES)\n\tif err != nil {\n\t\tprotocol.Err(\"error sending ping message\", err.Error())\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ loopSendOrBroadcastMessage awaits in a loop for messages from router to be forwarded to GCM,\n\/\/ until the stop-channel is closed\nfunc (conn *GCMConnector) loopSendOrBroadcastMessage(id int) {\n\tdefer conn.waitGroup.Done()\n\tconn.waitGroup.Add(1)\n\tprotocol.Debug(\"starting GCM worker %v\", id)\n\tfor {\n\t\tselect {\n\t\tcase msg, opened := <-conn.routerC:\n\t\t\tif opened {\n\t\t\t\tif string(msg.Message.Path) == removeTrailingSlash(conn.prefix)+\"\/broadcast\" {\n\t\t\t\t\tgo conn.broadcastMessage(msg)\n\t\t\t\t} else {\n\t\t\t\t\tgo conn.sendMessage(msg)\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-conn.stopC:\n\t\t\tprotocol.Debug(\"stopping GCM worker %v\", id)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (conn *GCMConnector) sendMessage(msg server.MsgAndRoute) {\n\tgcmID := msg.Route.ApplicationID\n\n\tpayload := conn.parseMessageToMap(msg.Message)\n\n\tvar messageToGcm = gcm.NewMessage(payload, gcmID)\n\tprotocol.Info(\"sending message to %v ...\", gcmID)\n\tresult, err := conn.sender.Send(messageToGcm, MESSAGE_RETRIES)\n\tif err != nil {\n\t\tprotocol.Err(\"error sending message to GCM gcmID=%v: %v\", gcmID, err.Error())\n\t\treturn\n\t}\n\n\terrorJSON := result.Results[0].Error\n\tif errorJSON != \"\" {\n\t\tconn.handleJSONError(errorJSON, gcmID, msg.Route)\n\t} else {\n\t\tprotocol.Debug(\"delivered message to GCM gcmID=%v: %v\", gcmID, errorJSON)\n\t}\n\n\t\/\/ we only send to one receiver,\n\t\/\/ so we know that we can replace the old id with the first registration id (=canonical id)\n\tif result.CanonicalIDs != 0 {\n\t\tconn.replaceSubscriptionWithCanonicalID(msg.Route, result.Results[0].RegistrationID)\n\t}\n}\n\nfunc (conn *GCMConnector) parseMessageToMap(msg *protocol.Message) map[string]interface{} {\n\tpayload := map[string]interface{}{}\n\tif msg.Body[0] == '{' {\n\t\tjson.Unmarshal(msg.Body, &payload)\n\t} else {\n\t\tpayload[\"message\"] = msg.BodyAsString()\n\t}\n\tprotocol.Debug(\"gcm: parsed message is: %v\", payload)\n\treturn payload\n}\n\nfunc (conn *GCMConnector) broadcastMessage(msg server.MsgAndRoute) {\n\ttopic := msg.Message.Path\n\tpayload := conn.parseMessageToMap(msg.Message)\n\tprotocol.Info(\"gcm: broadcasting message with topic %v ...\", string(topic))\n\n\tsubscriptions := conn.kvStore.Iterate(REGISTRATIONS_SCHEMA, \"\")\n\tcount := 0\n\tfor {\n\t\tselect {\n\t\tcase entry, ok := <-subscriptions:\n\t\t\tif !ok {\n\t\t\t\tprotocol.Info(\"gcm: sent message to %v receivers\", count)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgcmID := entry[0]\n\t\t\t\/\/TODO collect 1000 gcmIds and send them in one request!\n\t\t\tbroadcastMessage := gcm.NewMessage(payload, gcmID)\n\t\t\tgo func() {\n\t\t\t\t\/\/TODO error handling of response!\n\t\t\t\t_, err := conn.sender.Send(broadcastMessage, BROADCAST_RETRIES)\n\t\t\t\tprotocol.Debug(\"gcm: sent broadcast message to gcmID=%v\", gcmID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tprotocol.Err(\"gcm: error sending broadcast message to gcmID=%v: %v\", gcmID, err.Error())\n\t\t\t\t}\n\t\t\t}()\n\t\t\tcount++\n\t\t}\n\t}\n}\n\nfunc (conn *GCMConnector) replaceSubscriptionWithCanonicalID(route *server.Route, newGcmID string) {\n\toldGcmID := route.ApplicationID\n\ttopic := string(route.Path)\n\tuserID := route.UserID\n\n\tprotocol.Info(\"gcm: replacing old gcmID %v with canonicalId %v\", oldGcmID, newGcmID)\n\n\tconn.removeSubscription(route, oldGcmID)\n\tconn.subscribe(topic, userID, newGcmID)\n}\n\nfunc (conn *GCMConnector) handleJSONError(jsonError string, gcmID string, route *server.Route) {\n\tif jsonError == \"NotRegistered\" {\n\t\tprotocol.Debug(\"remove not registered GCM registration gcmID=%v\", gcmID)\n\t\tconn.removeSubscription(route, gcmID)\n\t} else if jsonError == \"InvalidRegistration\" {\n\t\tprotocol.Err(\"the gcmID=%v is not registered. %v\", gcmID, jsonError)\n\t} else {\n\t\tprotocol.Err(\"unexpected error while sending to GCM gcmID=%v: %v\", gcmID, jsonError)\n\t}\n}\n\n\/\/ GetPrefix is used to satisfy the HTTP handler interface\nfunc (conn *GCMConnector) GetPrefix() string {\n\treturn conn.prefix\n}\n\nfunc (conn *GCMConnector) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodPost {\n\t\tprotocol.Err(\"Only HTTP POST METHOD SUPPORTED but received type=[%s]\", r.Method)\n\t\thttp.Error(w, \"Permission Denied\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tuserID, gcmID, topic, err := conn.parseParams(r.URL.Path)\n\tif err != nil {\n\t\thttp.Error(w, \"Invalid Parameters in request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tconn.subscribe(topic, userID, gcmID)\n\n\tfmt.Fprintf(w, \"registered: %v\\n\", topic)\n}\n\n\/\/ parseParams will parse the HTTP URL with format \/gcm\/:userid\/:gcmid\/subscribe\/*topic\n\/\/ returning the parsed Params, or error if the request is not in the correct format\nfunc (conn *GCMConnector) parseParams(path string) (userID, gcmID, topic string, err error) {\n\tsubscribePrefixPath := \"subscribe\"\n\tcurrentURLPath := removeTrailingSlash(path)\n\n\tif strings.HasPrefix(currentURLPath, conn.prefix) != true {\n\t\terr = errors.New(\"GCM request is not starting with gcm prefix\")\n\t\treturn\n\t}\n\tpathAfterPrefix := strings.TrimPrefix(currentURLPath, conn.prefix)\n\n\tsplitParams := strings.SplitN(pathAfterPrefix, \"\/\", 3)\n\tif len(splitParams) != 3 {\n\t\terr = errors.New(\"GCM request has wrong number of params\")\n\t\treturn\n\t}\n\tuserID = splitParams[0]\n\tgcmID = splitParams[1]\n\n\tif strings.HasPrefix(splitParams[2], subscribePrefixPath+\"\/\") != true {\n\t\terr = errors.New(\"GCM request third param is not subscribe\")\n\t\treturn\n\t}\n\ttopic = strings.TrimPrefix(splitParams[2], subscribePrefixPath)\n\treturn userID, gcmID, topic, nil\n}\n\nfunc (conn *GCMConnector) subscribe(topic string, userID string, gcmID string) {\n\tprotocol.Info(\"GCM connector registration to userID=%q, gcmID=%q: %q\", userID, gcmID, topic)\n\n\troute := server.NewRoute(topic, conn.routerC, gcmID, userID)\n\n\tconn.router.Subscribe(route)\n\tconn.saveSubscription(userID, topic, gcmID)\n}\n\nfunc (conn *GCMConnector) removeSubscription(route *server.Route, gcmID string) {\n\tconn.router.Unsubscribe(route)\n\tconn.kvStore.Delete(REGISTRATIONS_SCHEMA, gcmID)\n}\n\nfunc (conn *GCMConnector) saveSubscription(userID, topic, gcmID string) {\n\tconn.kvStore.Put(REGISTRATIONS_SCHEMA, gcmID, []byte(userID+\":\"+topic))\n}\n\nfunc (conn *GCMConnector) loadSubscriptions() {\n\tsubscriptions := conn.kvStore.Iterate(REGISTRATIONS_SCHEMA, \"\")\n\tcount := 0\n\tfor {\n\t\tselect {\n\t\tcase entry, ok := <-subscriptions:\n\t\t\tif !ok {\n\t\t\t\tprotocol.Info(\"renewed %v GCM subscriptions\", count)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgcmID := entry[0]\n\t\t\tsplitValue := strings.SplitN(entry[1], \":\", 2)\n\t\t\tuserID := splitValue[0]\n\t\t\ttopic := splitValue[1]\n\n\t\t\tprotocol.Debug(\"renewing GCM subscription: userID=%v, topic=%v, gcmID=%v\", userID, topic, gcmID)\n\t\t\troute := server.NewRoute(topic, conn.routerC, gcmID, userID)\n\t\t\tconn.router.Subscribe(route)\n\t\t\tcount++\n\t\t}\n\t}\n}\n\nfunc removeTrailingSlash(path string) string {\n\tif len(path) > 1 && path[len(path)-1] == '\/' {\n\t\treturn path[:len(path)-1]\n\t}\n\treturn path\n}\n<commit_msg>FIx for spelling and added some more comments<commit_after>package gcm\n\nimport (\n\t\"github.com\/alexjlockwood\/gcm\"\n\t\"github.com\/smancke\/guble\/protocol\"\n\t\"github.com\/smancke\/guble\/server\"\n\t\"github.com\/smancke\/guble\/store\"\n\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ REGISTRATIONS_SCHEMA is the default sqlite schema for GCM\nconst REGISTRATIONS_SCHEMA = \"gcm_registration\"\nconst MESSAGE_RETRIES = 5\nconst BROADCAST_RETRIES = 3\n\n\/\/ GCMConnector is the structure for handling the communication with Google Cloud Messaging\ntype GCMConnector struct {\n\trouter server.Router\n\tkvStore store.KVStore\n\tprefix string\n\trouterC chan server.MsgAndRoute\n\tcloseRouteByRouter chan server.Route\n\tstopC chan bool\n\tsender *gcm.Sender\n\tnWorkers int\n\twaitGroup sync.WaitGroup\n}\n\n\/\/ NewGCMConnector creates a new GCMConnector without starting it\nfunc NewGCMConnector(router server.Router, prefix string, gcmAPIKey string, nWorkers int) (*GCMConnector, error) {\n\n\tkvStore, err := router.KVStore()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/TODO Cosmin: check with dev-team the default number of GCM workers, below\n\tgcm := &GCMConnector{\n\t\trouter: router,\n\t\tkvStore: kvStore,\n\t\tprefix: prefix,\n\t\trouterC: make(chan server.MsgAndRoute, 1000),\n\t\tstopC: make(chan bool, 1),\n\t\tsender: &gcm.Sender{ApiKey: gcmAPIKey},\n\t\tnWorkers: nWorkers,\n\t}\n\n\treturn gcm, nil\n}\n\n\/\/ Start opens the connector, creates more goroutines \/ workers to handle messages coming from the router\nfunc (conn *GCMConnector) Start() error {\n\tbroadcastRoute := server.NewRoute(removeTrailingSlash(conn.prefix)+\"\/broadcast\", conn.routerC, \"gcm_connector\", \"gcm_connector\")\n\tconn.router.Subscribe(broadcastRoute)\n\tgo func() {\n\t\t\/\/TODO Cosmin: should loadSubscriptions() be taken out of this goroutine, and executed before ?\n\t\t\/\/ (even if startup-time is longer, the routes are guaranteed to be there right after Start() returns)\n\t\tconn.loadSubscriptions()\n\n\t\tfor id := 1; id <= conn.nWorkers; id++ {\n\t\t\tgo conn.loopSendOrBroadcastMessage(id)\n\t\t}\n\t}()\n\treturn nil\n}\n\n\/\/ Stop signals the closing of GCMConnector\nfunc (conn *GCMConnector) Stop() error {\n\tprotocol.Debug(\"GCM Stop()\")\n\tclose(conn.stopC)\n\tconn.waitGroup.Wait()\n\treturn nil\n}\n\n\/\/ Check returns nil if health-check succeeds, or an error if health-check fails\n\/\/ by sending a request with only apikey. If the response is processed by the GCM endpoint\n\/\/ the gcmStatus will pe up.Otherwise the error from sending the message will be returned\nfunc (conn *GCMConnector) Check() error {\n\tpayload := conn.parseMessageToMap(&protocol.Message{Body: []byte(`{\"registration_ids\":[\"ABC\"]}`)})\n\t_, err := conn.sender.Send(gcm.NewMessage(payload, \"\"), MESSAGE_RETRIES)\n\tif err != nil {\n\t\tprotocol.Err(\"error sending ping message\", err.Error())\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ loopSendOrBroadcastMessage awaits in a loop for messages from router to be forwarded to GCM,\n\/\/ until the stop-channel is closed\nfunc (conn *GCMConnector) loopSendOrBroadcastMessage(id int) {\n\tdefer conn.waitGroup.Done()\n\tconn.waitGroup.Add(1)\n\tprotocol.Debug(\"starting GCM worker %v\", id)\n\tfor {\n\t\tselect {\n\t\tcase msg, opened := <-conn.routerC:\n\t\t\tif opened {\n\t\t\t\tif string(msg.Message.Path) == removeTrailingSlash(conn.prefix)+\"\/broadcast\" {\n\t\t\t\t\tgo conn.broadcastMessage(msg)\n\t\t\t\t} else {\n\t\t\t\t\tgo conn.sendMessage(msg)\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-conn.stopC:\n\t\t\tprotocol.Debug(\"stopping GCM worker %v\", id)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (conn *GCMConnector) sendMessage(msg server.MsgAndRoute) {\n\tgcmID := msg.Route.ApplicationID\n\n\tpayload := conn.parseMessageToMap(msg.Message)\n\n\tvar messageToGcm = gcm.NewMessage(payload, gcmID)\n\tprotocol.Info(\"sending message to %v ...\", gcmID)\n\tresult, err := conn.sender.Send(messageToGcm, MESSAGE_RETRIES)\n\tif err != nil {\n\t\tprotocol.Err(\"error sending message to GCM gcmID=%v: %v\", gcmID, err.Error())\n\t\treturn\n\t}\n\n\terrorJSON := result.Results[0].Error\n\tif errorJSON != \"\" {\n\t\tconn.handleJSONError(errorJSON, gcmID, msg.Route)\n\t} else {\n\t\tprotocol.Debug(\"delivered message to GCM gcmID=%v: %v\", gcmID, errorJSON)\n\t}\n\n\t\/\/ we only send to one receiver,\n\t\/\/ so we know that we can replace the old id with the first registration id (=canonical id)\n\tif result.CanonicalIDs != 0 {\n\t\tconn.replaceSubscriptionWithCanonicalID(msg.Route, result.Results[0].RegistrationID)\n\t}\n}\n\nfunc (conn *GCMConnector) parseMessageToMap(msg *protocol.Message) map[string]interface{} {\n\tpayload := map[string]interface{}{}\n\tif msg.Body[0] == '{' {\n\t\tjson.Unmarshal(msg.Body, &payload)\n\t} else {\n\t\tpayload[\"message\"] = msg.BodyAsString()\n\t}\n\tprotocol.Debug(\"gcm: parsed message is: %v\", payload)\n\treturn payload\n}\n\nfunc (conn *GCMConnector) broadcastMessage(msg server.MsgAndRoute) {\n\ttopic := msg.Message.Path\n\tpayload := conn.parseMessageToMap(msg.Message)\n\tprotocol.Info(\"gcm: broadcasting message with topic %v ...\", string(topic))\n\n\tsubscriptions := conn.kvStore.Iterate(REGISTRATIONS_SCHEMA, \"\")\n\tcount := 0\n\tfor {\n\t\tselect {\n\t\tcase entry, ok := <-subscriptions:\n\t\t\tif !ok {\n\t\t\t\tprotocol.Info(\"gcm: sent message to %v receivers\", count)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgcmID := entry[0]\n\t\t\t\/\/TODO collect 1000 gcmIds and send them in one request!\n\t\t\tbroadcastMessage := gcm.NewMessage(payload, gcmID)\n\t\t\tgo func() {\n\t\t\t\t\/\/TODO error handling of response!\n\t\t\t\t_, err := conn.sender.Send(broadcastMessage, BROADCAST_RETRIES)\n\t\t\t\tprotocol.Debug(\"gcm: sent broadcast message to gcmID=%v\", gcmID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tprotocol.Err(\"gcm: error sending broadcast message to gcmID=%v: %v\", gcmID, err.Error())\n\t\t\t\t}\n\t\t\t}()\n\t\t\tcount++\n\t\t}\n\t}\n}\n\nfunc (conn *GCMConnector) replaceSubscriptionWithCanonicalID(route *server.Route, newGcmID string) {\n\toldGcmID := route.ApplicationID\n\ttopic := string(route.Path)\n\tuserID := route.UserID\n\n\tprotocol.Info(\"gcm: replacing old gcmID %v with canonicalId %v\", oldGcmID, newGcmID)\n\n\tconn.removeSubscription(route, oldGcmID)\n\tconn.subscribe(topic, userID, newGcmID)\n}\n\nfunc (conn *GCMConnector) handleJSONError(jsonError string, gcmID string, route *server.Route) {\n\tif jsonError == \"NotRegistered\" {\n\t\tprotocol.Debug(\"remove not registered GCM registration gcmID=%v\", gcmID)\n\t\tconn.removeSubscription(route, gcmID)\n\t} else if jsonError == \"InvalidRegistration\" {\n\t\tprotocol.Err(\"the gcmID=%v is not registered. %v\", gcmID, jsonError)\n\t} else {\n\t\tprotocol.Err(\"unexpected error while sending to GCM gcmID=%v: %v\", gcmID, jsonError)\n\t}\n}\n\n\/\/ GetPrefix is used to satisfy the HTTP handler interface\nfunc (conn *GCMConnector) GetPrefix() string {\n\treturn conn.prefix\n}\n\nfunc (conn *GCMConnector) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodPost {\n\t\tprotocol.Err(\"Only HTTP POST METHOD SUPPORTED but received type=[%s]\", r.Method)\n\t\thttp.Error(w, \"Permission Denied\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tuserID, gcmID, topic, err := conn.parseParams(r.URL.Path)\n\tif err != nil {\n\t\thttp.Error(w, \"Invalid Parameters in request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tconn.subscribe(topic, userID, gcmID)\n\n\tfmt.Fprintf(w, \"registered: %v\\n\", topic)\n}\n\n\/\/ parseParams will parse the HTTP URL with format \/gcm\/:userid\/:gcmid\/subscribe\/*topic\n\/\/ returning the parsed Params, or error if the request is not in the correct format\nfunc (conn *GCMConnector) parseParams(path string) (userID, gcmID, topic string, err error) {\n\tsubscribePrefixPath := \"subscribe\"\n\tcurrentURLPath := removeTrailingSlash(path)\n\n\tif strings.HasPrefix(currentURLPath, conn.prefix) != true {\n\t\terr = errors.New(\"GCM request is not starting with gcm prefix\")\n\t\treturn\n\t}\n\tpathAfterPrefix := strings.TrimPrefix(currentURLPath, conn.prefix)\n\n\tsplitParams := strings.SplitN(pathAfterPrefix, \"\/\", 3)\n\tif len(splitParams) != 3 {\n\t\terr = errors.New(\"GCM request has wrong number of params\")\n\t\treturn\n\t}\n\tuserID = splitParams[0]\n\tgcmID = splitParams[1]\n\n\tif strings.HasPrefix(splitParams[2], subscribePrefixPath+\"\/\") != true {\n\t\terr = errors.New(\"GCM request third param is not subscribe\")\n\t\treturn\n\t}\n\ttopic = strings.TrimPrefix(splitParams[2], subscribePrefixPath)\n\treturn userID, gcmID, topic, nil\n}\n\nfunc (conn *GCMConnector) subscribe(topic string, userID string, gcmID string) {\n\tprotocol.Info(\"GCM connector registration to userID=%q, gcmID=%q: %q\", userID, gcmID, topic)\n\n\troute := server.NewRoute(topic, conn.routerC, gcmID, userID)\n\n\tconn.router.Subscribe(route)\n\tconn.saveSubscription(userID, topic, gcmID)\n}\n\nfunc (conn *GCMConnector) removeSubscription(route *server.Route, gcmID string) {\n\tconn.router.Unsubscribe(route)\n\tconn.kvStore.Delete(REGISTRATIONS_SCHEMA, gcmID)\n}\n\nfunc (conn *GCMConnector) saveSubscription(userID, topic, gcmID string) {\n\tconn.kvStore.Put(REGISTRATIONS_SCHEMA, gcmID, []byte(userID+\":\"+topic))\n}\n\nfunc (conn *GCMConnector) loadSubscriptions() {\n\tsubscriptions := conn.kvStore.Iterate(REGISTRATIONS_SCHEMA, \"\")\n\tcount := 0\n\tfor {\n\t\tselect {\n\t\tcase entry, ok := <-subscriptions:\n\t\t\tif !ok {\n\t\t\t\tprotocol.Info(\"renewed %v GCM subscriptions\", count)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgcmID := entry[0]\n\t\t\tsplitValue := strings.SplitN(entry[1], \":\", 2)\n\t\t\tuserID := splitValue[0]\n\t\t\ttopic := splitValue[1]\n\n\t\t\tprotocol.Debug(\"renewing GCM subscription: userID=%v, topic=%v, gcmID=%v\", userID, topic, gcmID)\n\t\t\troute := server.NewRoute(topic, conn.routerC, gcmID, userID)\n\t\t\tconn.router.Subscribe(route)\n\t\t\tcount++\n\t\t}\n\t}\n}\n\nfunc removeTrailingSlash(path string) string {\n\tif len(path) > 1 && path[len(path)-1] == '\/' {\n\t\treturn path[:len(path)-1]\n\t}\n\treturn path\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage draw\n\n\/\/ braille_line.go contains code that draws lines on a braille canvas.\n\nimport (\n\t\"fmt\"\n\t\"image\"\n\n\t\"github.com\/mum4k\/termdash\/canvas\/braille\"\n\t\"github.com\/mum4k\/termdash\/cell\"\n)\n\n\/\/ braillePixelChange represents an action on a pixel on the braille canvas.\ntype braillePixelChange int\n\n\/\/ String implements fmt.Stringer()\nfunc (bpc braillePixelChange) String() string {\n\tif n, ok := braillePixelChangeNames[bpc]; ok {\n\t\treturn n\n\t}\n\treturn \"braillePixelChangeUnknown\"\n}\n\n\/\/ braillePixelChangeNames maps braillePixelChange values to human readable names.\nvar braillePixelChangeNames = map[braillePixelChange]string{\n\tbraillePixelChangeSet: \"braillePixelChangeSet\",\n\tbraillePixelChangeClear: \"braillePixelChangeClear\",\n}\n\nconst (\n\tbraillePixelChangeUnknown braillePixelChange = iota\n\n\tbraillePixelChangeSet\n\tbraillePixelChangeClear\n)\n\n\/\/ BrailleLineOption is used to provide options to BrailleLine().\ntype BrailleLineOption interface {\n\t\/\/ set sets the provided option.\n\tset(*brailleLineOptions)\n}\n\n\/\/ brailleLineOptions stores the provided options.\ntype brailleLineOptions struct {\n\tcellOpts []cell.Option\n\tpixelChange braillePixelChange\n}\n\n\/\/ newBrailleLineOptions returns a new brailleLineOptions instance.\nfunc newBrailleLineOptions() *brailleLineOptions {\n\treturn &brailleLineOptions{\n\t\tpixelChange: braillePixelChangeSet,\n\t}\n}\n\n\/\/ brailleLineOption implements BrailleLineOption.\ntype brailleLineOption func(*brailleLineOptions)\n\n\/\/ set implements BrailleLineOption.set.\nfunc (o brailleLineOption) set(opts *brailleLineOptions) {\n\to(opts)\n}\n\n\/\/ BrailleLineCellOpts sets options on the cells that contain the line.\n\/\/ Cell options on a braille canvas can only be set on the entire cell, not per\n\/\/ pixel.\nfunc BrailleLineCellOpts(cOpts ...cell.Option) BrailleLineOption {\n\treturn brailleLineOption(func(opts *brailleLineOptions) {\n\t\topts.cellOpts = cOpts\n\t})\n}\n\n\/\/ BrailleLineClearPixels changes the behavior of BrailleLine, so that it\n\/\/ clears the pixels belonging to the line instead of setting them.\n\/\/ Useful in order to \"erase\" a line from the canvas as opposed to drawing one.\nfunc BrailleLineClearPixels() BrailleLineOption {\n\treturn brailleLineOption(func(opts *brailleLineOptions) {\n\t\topts.pixelChange = braillePixelChangeClear\n\t})\n}\n\n\/\/ BrailleLine draws an approximated line segment on the braille canvas between\n\/\/ the two provided points.\n\/\/ Both start and end must be valid points within the canvas. Start and end can\n\/\/ be the same point in which case only one pixel will be set on the braille\n\/\/ canvas.\n\/\/ The start or end coordinates must not be negative.\nfunc BrailleLine(bc *braille.Canvas, start, end image.Point, opts ...BrailleLineOption) error {\n\tif start.X < 0 || start.Y < 0 {\n\t\treturn fmt.Errorf(\"the start coordinates cannot be negative, got: %v\", start)\n\t}\n\tif end.X < 0 || end.Y < 0 {\n\t\treturn fmt.Errorf(\"the end coordinates cannot be negative, got: %v\", end)\n\t}\n\n\topt := newBrailleLineOptions()\n\tfor _, o := range opts {\n\t\to.set(opt)\n\t}\n\n\tpoints := brailleLinePoints(start, end)\n\tfor _, p := range points {\n\t\tswitch opt.pixelChange {\n\t\tcase braillePixelChangeSet:\n\t\t\tif err := bc.SetPixel(p, opt.cellOpts...); err != nil {\n\t\t\t\treturn fmt.Errorf(\"bc.SetPixel(%v) => %v\", p, err)\n\t\t\t}\n\t\tcase braillePixelChangeClear:\n\t\t\tif err := bc.ClearPixel(p, opt.cellOpts...); err != nil {\n\t\t\t\treturn fmt.Errorf(\"bc.ClearPixel(%v) => %v\", p, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ brailleLinePoints returns the points to set when drawing the line.\nfunc brailleLinePoints(start, end image.Point) []image.Point {\n\t\/\/ Implements Bresenham's line algorithm.\n\t\/\/ https:\/\/en.wikipedia.org\/wiki\/Bresenham%27s_line_algorithm\n\n\tvertProj := abs(end.Y - start.Y)\n\thorizProj := abs(end.X - start.X)\n\tif vertProj < horizProj {\n\t\tif start.X > end.X {\n\t\t\treturn lineLow(end.X, end.Y, start.X, start.Y)\n\t\t}\n\t\treturn lineLow(start.X, start.Y, end.X, end.Y)\n\t} else {\n\t\tif start.Y > end.Y {\n\t\t\treturn lineHigh(end.X, end.Y, start.X, start.Y)\n\t\t}\n\t\treturn lineHigh(start.X, start.Y, end.X, end.Y)\n\t}\n}\n\n\/\/ lineLow returns points that create a line whose horizontal projection\n\/\/ (end.X - start.X) is longer than its vertical projection\n\/\/ (end.Y - start.Y).\nfunc lineLow(x0, y0, x1, y1 int) []image.Point {\n\tdeltaX := x1 - x0\n\tdeltaY := y1 - y0\n\n\tstepY := 1\n\tif deltaY < 0 {\n\t\tstepY = -1\n\t\tdeltaY = -deltaY\n\t}\n\n\tvar res []image.Point\n\tdiff := 2*deltaY - deltaX\n\ty := y0\n\tfor x := x0; x <= x1; x++ {\n\t\tres = append(res, image.Point{x, y})\n\t\tif diff > 0 {\n\t\t\ty += stepY\n\t\t\tdiff -= 2 * deltaX\n\t\t}\n\t\tdiff += 2 * deltaY\n\t}\n\treturn res\n}\n\n\/\/ lineHigh returns points that createa line whose vertical projection\n\/\/ (end.Y - start.Y) is longer than its horizontal projection\n\/\/ (end.X - start.X).\nfunc lineHigh(x0, y0, x1, y1 int) []image.Point {\n\tdeltaX := x1 - x0\n\tdeltaY := y1 - y0\n\n\tstepX := 1\n\tif deltaX < 0 {\n\t\tstepX = -1\n\t\tdeltaX = -deltaX\n\t}\n\n\tvar res []image.Point\n\tdiff := 2*deltaX - deltaY\n\tx := x0\n\tfor y := y0; y <= y1; y++ {\n\t\tres = append(res, image.Point{x, y})\n\n\t\tif diff > 0 {\n\t\t\tx += stepX\n\t\t\tdiff -= 2 * deltaY\n\t\t}\n\t\tdiff += 2 * deltaX\n\t}\n\treturn res\n}\n\n\/\/ abs returns the absolute value of x.\nfunc abs(x int) int {\n\tif x < 0 {\n\t\treturn -x\n\t}\n\treturn x\n}\n<commit_msg>Fixed last golint<commit_after>\/\/ Copyright 2018 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage draw\n\n\/\/ braille_line.go contains code that draws lines on a braille canvas.\n\nimport (\n\t\"fmt\"\n\t\"image\"\n\n\t\"github.com\/mum4k\/termdash\/canvas\/braille\"\n\t\"github.com\/mum4k\/termdash\/cell\"\n)\n\n\/\/ braillePixelChange represents an action on a pixel on the braille canvas.\ntype braillePixelChange int\n\n\/\/ String implements fmt.Stringer()\nfunc (bpc braillePixelChange) String() string {\n\tif n, ok := braillePixelChangeNames[bpc]; ok {\n\t\treturn n\n\t}\n\treturn \"braillePixelChangeUnknown\"\n}\n\n\/\/ braillePixelChangeNames maps braillePixelChange values to human readable names.\nvar braillePixelChangeNames = map[braillePixelChange]string{\n\tbraillePixelChangeSet: \"braillePixelChangeSet\",\n\tbraillePixelChangeClear: \"braillePixelChangeClear\",\n}\n\nconst (\n\tbraillePixelChangeUnknown braillePixelChange = iota\n\n\tbraillePixelChangeSet\n\tbraillePixelChangeClear\n)\n\n\/\/ BrailleLineOption is used to provide options to BrailleLine().\ntype BrailleLineOption interface {\n\t\/\/ set sets the provided option.\n\tset(*brailleLineOptions)\n}\n\n\/\/ brailleLineOptions stores the provided options.\ntype brailleLineOptions struct {\n\tcellOpts []cell.Option\n\tpixelChange braillePixelChange\n}\n\n\/\/ newBrailleLineOptions returns a new brailleLineOptions instance.\nfunc newBrailleLineOptions() *brailleLineOptions {\n\treturn &brailleLineOptions{\n\t\tpixelChange: braillePixelChangeSet,\n\t}\n}\n\n\/\/ brailleLineOption implements BrailleLineOption.\ntype brailleLineOption func(*brailleLineOptions)\n\n\/\/ set implements BrailleLineOption.set.\nfunc (o brailleLineOption) set(opts *brailleLineOptions) {\n\to(opts)\n}\n\n\/\/ BrailleLineCellOpts sets options on the cells that contain the line.\n\/\/ Cell options on a braille canvas can only be set on the entire cell, not per\n\/\/ pixel.\nfunc BrailleLineCellOpts(cOpts ...cell.Option) BrailleLineOption {\n\treturn brailleLineOption(func(opts *brailleLineOptions) {\n\t\topts.cellOpts = cOpts\n\t})\n}\n\n\/\/ BrailleLineClearPixels changes the behavior of BrailleLine, so that it\n\/\/ clears the pixels belonging to the line instead of setting them.\n\/\/ Useful in order to \"erase\" a line from the canvas as opposed to drawing one.\nfunc BrailleLineClearPixels() BrailleLineOption {\n\treturn brailleLineOption(func(opts *brailleLineOptions) {\n\t\topts.pixelChange = braillePixelChangeClear\n\t})\n}\n\n\/\/ BrailleLine draws an approximated line segment on the braille canvas between\n\/\/ the two provided points.\n\/\/ Both start and end must be valid points within the canvas. Start and end can\n\/\/ be the same point in which case only one pixel will be set on the braille\n\/\/ canvas.\n\/\/ The start or end coordinates must not be negative.\nfunc BrailleLine(bc *braille.Canvas, start, end image.Point, opts ...BrailleLineOption) error {\n\tif start.X < 0 || start.Y < 0 {\n\t\treturn fmt.Errorf(\"the start coordinates cannot be negative, got: %v\", start)\n\t}\n\tif end.X < 0 || end.Y < 0 {\n\t\treturn fmt.Errorf(\"the end coordinates cannot be negative, got: %v\", end)\n\t}\n\n\topt := newBrailleLineOptions()\n\tfor _, o := range opts {\n\t\to.set(opt)\n\t}\n\n\tpoints := brailleLinePoints(start, end)\n\tfor _, p := range points {\n\t\tswitch opt.pixelChange {\n\t\tcase braillePixelChangeSet:\n\t\t\tif err := bc.SetPixel(p, opt.cellOpts...); err != nil {\n\t\t\t\treturn fmt.Errorf(\"bc.SetPixel(%v) => %v\", p, err)\n\t\t\t}\n\t\tcase braillePixelChangeClear:\n\t\t\tif err := bc.ClearPixel(p, opt.cellOpts...); err != nil {\n\t\t\t\treturn fmt.Errorf(\"bc.ClearPixel(%v) => %v\", p, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ brailleLinePoints returns the points to set when drawing the line.\nfunc brailleLinePoints(start, end image.Point) []image.Point {\n\t\/\/ Implements Bresenham's line algorithm.\n\t\/\/ https:\/\/en.wikipedia.org\/wiki\/Bresenham%27s_line_algorithm\n\n\tvertProj := abs(end.Y - start.Y)\n\thorizProj := abs(end.X - start.X)\n\tif vertProj < horizProj {\n\t\tif start.X > end.X {\n\t\t\treturn lineLow(end.X, end.Y, start.X, start.Y)\n\t\t}\n\t\treturn lineLow(start.X, start.Y, end.X, end.Y)\n\t}\n\tif start.Y > end.Y {\n\t\treturn lineHigh(end.X, end.Y, start.X, start.Y)\n\t}\n\treturn lineHigh(start.X, start.Y, end.X, end.Y)\n}\n\n\/\/ lineLow returns points that create a line whose horizontal projection\n\/\/ (end.X - start.X) is longer than its vertical projection\n\/\/ (end.Y - start.Y).\nfunc lineLow(x0, y0, x1, y1 int) []image.Point {\n\tdeltaX := x1 - x0\n\tdeltaY := y1 - y0\n\n\tstepY := 1\n\tif deltaY < 0 {\n\t\tstepY = -1\n\t\tdeltaY = -deltaY\n\t}\n\n\tvar res []image.Point\n\tdiff := 2*deltaY - deltaX\n\ty := y0\n\tfor x := x0; x <= x1; x++ {\n\t\tres = append(res, image.Point{x, y})\n\t\tif diff > 0 {\n\t\t\ty += stepY\n\t\t\tdiff -= 2 * deltaX\n\t\t}\n\t\tdiff += 2 * deltaY\n\t}\n\treturn res\n}\n\n\/\/ lineHigh returns points that createa line whose vertical projection\n\/\/ (end.Y - start.Y) is longer than its horizontal projection\n\/\/ (end.X - start.X).\nfunc lineHigh(x0, y0, x1, y1 int) []image.Point {\n\tdeltaX := x1 - x0\n\tdeltaY := y1 - y0\n\n\tstepX := 1\n\tif deltaX < 0 {\n\t\tstepX = -1\n\t\tdeltaX = -deltaX\n\t}\n\n\tvar res []image.Point\n\tdiff := 2*deltaX - deltaY\n\tx := x0\n\tfor y := y0; y <= y1; y++ {\n\t\tres = append(res, image.Point{x, y})\n\n\t\tif diff > 0 {\n\t\t\tx += stepX\n\t\t\tdiff -= 2 * deltaY\n\t\t}\n\t\tdiff += 2 * deltaX\n\t}\n\treturn res\n}\n\n\/\/ abs returns the absolute value of x.\nfunc abs(x int) int {\n\tif x < 0 {\n\t\treturn -x\n\t}\n\treturn x\n}\n<|endoftext|>"} {"text":"<commit_before>package gumble_ffmpeg\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"io\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/layeh\/gumble\/gumble\"\n)\n\ntype Stream struct {\n\tDone func()\n\n\tclient *gumble.Client\n\tcmd *exec.Cmd\n\tpipe io.ReadCloser\n\tsourceStop chan bool\n}\n\nfunc New(client *gumble.Client) (*Stream, error) {\n\tstream := &Stream{\n\t\tclient: client,\n\t}\n\treturn stream, nil\n}\n\nfunc (s *Stream) Play(file string) error {\n\tif s.sourceStop != nil {\n\t\treturn errors.New(\"already playing\")\n\t}\n\ts.cmd = exec.Command(\"ffmpeg\", \"-i\", file, \"-ac\", \"1\", \"-ar\", strconv.Itoa(gumble.AudioSampleRate), \"-f\", \"s16le\", \"-\")\n\tif pipe, err := s.cmd.StdoutPipe(); err != nil {\n\t\ts.cmd = nil\n\t\treturn nil\n\t} else {\n\t\ts.pipe = pipe\n\t}\n\ts.sourceStop = make(chan bool)\n\tgo s.sourceRoutine()\n\ts.cmd.Start()\n\treturn nil\n}\n\nfunc (s *Stream) IsPlaying() bool {\n\treturn s.sourceStop != nil\n}\n\nfunc (s *Stream) Stop() error {\n\tif s.sourceStop != nil {\n\t\tclose(s.sourceStop)\n\t}\n\treturn nil\n}\n\nfunc (s *Stream) sourceRoutine() {\n\tticker := time.NewTicker(10 * time.Millisecond)\n\tdefer ticker.Stop()\n\n\tdefer func() {\n\t\ts.cmd.Process.Kill()\n\t\ts.cmd.Wait()\n\t\ts.cmd = nil\n\t\ts.sourceStop = nil\n\t\tif done := s.Done; done != nil {\n\t\t\tdone()\n\t\t}\n\t}()\n\n\tstop := s.sourceStop\n\tint16Buffer := make([]int16, gumble.AudioDefaultFrameSize)\n\tbyteBuffer := make([]byte, gumble.AudioDefaultFrameSize*2)\n\n\tfor {\n\t\tselect {\n\t\tcase <-stop:\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tif _, err := io.ReadFull(s.pipe, byteBuffer); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor i := range int16Buffer {\n\t\t\t\tint16Buffer[i] = int16(binary.LittleEndian.Uint16(byteBuffer[i*2 : (i+1)*2]))\n\t\t\t}\n\t\t\ts.client.Send(gumble.AudioBuffer(int16Buffer))\n\t\t}\n\t}\n}\n<commit_msg>gumble_ffmpeg: add volume control<commit_after>package gumble_ffmpeg\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"io\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/layeh\/gumble\/gumble\"\n)\n\ntype Stream struct {\n\tDone func()\n\n\tclient *gumble.Client\n\tcmd *exec.Cmd\n\tpipe io.ReadCloser\n\tsourceStop chan bool\n\tvolume float32\n}\n\nfunc New(client *gumble.Client) (*Stream, error) {\n\tstream := &Stream{\n\t\tclient: client,\n\t\tvolume: 1.0,\n\t}\n\treturn stream, nil\n}\n\nfunc (s *Stream) Play(file string) error {\n\tif s.sourceStop != nil {\n\t\treturn errors.New(\"already playing\")\n\t}\n\ts.cmd = exec.Command(\"ffmpeg\", \"-i\", file, \"-ac\", \"1\", \"-ar\", strconv.Itoa(gumble.AudioSampleRate), \"-f\", \"s16le\", \"-\")\n\tif pipe, err := s.cmd.StdoutPipe(); err != nil {\n\t\ts.cmd = nil\n\t\treturn nil\n\t} else {\n\t\ts.pipe = pipe\n\t}\n\ts.sourceStop = make(chan bool)\n\tgo s.sourceRoutine()\n\ts.cmd.Start()\n\treturn nil\n}\n\nfunc (s *Stream) IsPlaying() bool {\n\treturn s.sourceStop != nil\n}\n\nfunc (s *Stream) Stop() error {\n\tif s.sourceStop != nil {\n\t\tclose(s.sourceStop)\n\t}\n\treturn nil\n}\n\nfunc (s *Stream) Volume() float32 {\n\treturn s.volume\n}\n\nfunc (s *Stream) SetVolume(volume float32) {\n\ts.volume = volume\n}\n\nfunc (s *Stream) sourceRoutine() {\n\tticker := time.NewTicker(10 * time.Millisecond)\n\tdefer ticker.Stop()\n\n\tdefer func() {\n\t\ts.cmd.Process.Kill()\n\t\ts.cmd.Wait()\n\t\ts.cmd = nil\n\t\ts.sourceStop = nil\n\t\tif done := s.Done; done != nil {\n\t\t\tdone()\n\t\t}\n\t}()\n\n\tstop := s.sourceStop\n\tint16Buffer := make([]int16, gumble.AudioDefaultFrameSize)\n\tbyteBuffer := make([]byte, gumble.AudioDefaultFrameSize*2)\n\n\tfor {\n\t\tselect {\n\t\tcase <-stop:\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tif _, err := io.ReadFull(s.pipe, byteBuffer); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor i := range int16Buffer {\n\t\t\t\tfloat := float32(int16(binary.LittleEndian.Uint16(byteBuffer[i*2 : (i+1)*2])))\n\t\t\t\tint16Buffer[i] = int16(s.volume * float)\n\t\t\t}\n\t\t\ts.client.Send(gumble.AudioBuffer(int16Buffer))\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2016 Aaron Longwell\n\/\/\n\/\/ Use of this source code is governed by an MIT licese.\n\/\/ Details in the LICENSE file.\n\npackage trello\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\ntype Card struct {\n\tclient *Client\n\n\t\/\/ Key metadata\n\tID string `json:\"id\"`\n\tIDShort int `json:\"idShort\"`\n\tName string `json:\"name\"`\n\tPos float64 `json:\"pos\"`\n\tEmail string `json:\"email\"`\n\tShortLink string `json:\"shortLink\"`\n\tShortUrl string `json:\"shortUrl\"`\n\tUrl string `json:\"url\"`\n\tDesc string `json:\"desc\"`\n\tDue *time.Time `json:\"due\"`\n\tDueComplete bool `json:\"dueComplete\"`\n\tClosed bool `json:\"closed\"`\n\tSubscribed bool `json:\"subscribed\"`\n\tDateLastActivity *time.Time `json:\"dateLastActivity\"`\n\n\t\/\/ Board\n\tBoard *Board\n\tIDBoard string `json:\"idBoard\"`\n\n\t\/\/ List\n\tList *List\n\tIDList string `json:\"idList\"`\n\n\t\/\/ Badges\n\tBadges struct {\n\t\tVotes int `json:\"votes\"`\n\t\tViewingMemberVoted bool `json:\"viewingMemberVoted\"`\n\t\tSubscribed bool `json:\"subscribed\"`\n\t\tFogbugz string `json:\"fogbugz,omitempty\"`\n\t\tCheckItems int `json:\"checkItems\"`\n\t\tCheckItemsChecked int `json:\"checkItemsChecked\"`\n\t\tComments int `json:\"comments\"`\n\t\tAttachments int `json:\"attachments\"`\n\t\tDescription bool `json:\"description\"`\n\t\tDue *time.Time `json:\"due,omitempty\"`\n\t} `json:\"badges\"`\n\n\t\/\/ Actions\n\tActions ActionCollection `json:\"actions,omitempty\"`\n\n\t\/\/ Checklists\n\tIDCheckLists []string `json:\"idCheckLists\"`\n\tChecklists []*Checklist `json:\"checklists,omitempty\"`\n\tCheckItemStates []*CheckItemState `json:\"checkItemStates,omitempty\"`\n\n\t\/\/ Members\n\tIDMembers []string `json:\"idMembers,omitempty\"`\n\tIDMembersVoted []string `json:\"idMembersVoted,omitempty\"`\n\tMembers []*Member `json:\"members,omitempty\"`\n\n\t\/\/ Attachments\n\tIDAttachmentCover string `json:\"idAttachmentCover\"`\n\tManualCoverAttachment bool `json:\"manualCoverAttachment\"`\n\tAttachments []*Attachment `json:\"attachments,omitempty\"`\n\n\t\/\/ Labels\n\tLabels []*Label `json:\"labels,omitempty\"`\n}\n\nfunc (c *Card) CreatedAt() time.Time {\n\tt, _ := IDToTime(c.ID)\n\treturn t\n}\n\nfunc (c *Card) MoveToList(listID string, args Arguments) error {\n\tpath := fmt.Sprintf(\"cards\/%s\", c.ID)\n\targs[\"idList\"] = listID\n\treturn c.client.Put(path, args, &c)\n}\n\nfunc (c *Card) SetPos(newPos float64) error {\n\tpath := fmt.Sprintf(\"cards\/%s\", c.ID)\n\treturn c.client.Put(path, Arguments{\"pos\": fmt.Sprintf(\"%f\", newPos)}, c)\n}\n\nfunc (c *Card) MoveToTopOfList() error {\n\tpath := fmt.Sprintf(\"cards\/%s\", c.ID)\n\treturn c.client.Put(path, Arguments{\"pos\": \"top\"}, c)\n}\n\nfunc (c *Card) MoveToBottomOfList() error {\n\tpath := fmt.Sprintf(\"cards\/%s\", c.ID)\n\treturn c.client.Put(path, Arguments{\"pos\": \"bottom\"}, c)\n}\n\nfunc (c *Card) Update(args Arguments) error {\n\tpath := fmt.Sprintf(\"cards\/%s\", c.ID)\n\treturn c.client.Put(path, args, c)\n}\n\nfunc (c *Client) CreateCard(card *Card, extraArgs Arguments) error {\n\tpath := \"cards\"\n\targs := Arguments{\n\t\t\"name\": card.Name,\n\t\t\"desc\": card.Desc,\n\t\t\"pos\": strconv.FormatFloat(card.Pos, 'g', -1, 64),\n\t\t\"idList\": card.IDList,\n\t\t\"idMembers\": strings.Join(card.IDMembers, \",\"),\n\t}\n\tif card.Due != nil {\n\t\targs[\"due\"] = card.Due.Format(time.RFC3339)\n\t}\n\t\/\/ Allow overriding the creation position with 'top' or 'botttom'\n\tif pos, ok := extraArgs[\"pos\"]; ok {\n\t\targs[\"pos\"] = pos\n\t}\n\terr := c.Post(path, args, &card)\n\tif err == nil {\n\t\tcard.client = c\n\t}\n\treturn err\n}\n\nfunc (l *List) AddCard(card *Card, extraArgs Arguments) error {\n\tpath := fmt.Sprintf(\"lists\/%s\/cards\", l.ID)\n\targs := Arguments{\n\t\t\"name\": card.Name,\n\t\t\"desc\": card.Desc,\n\t\t\"idMembers\": strings.Join(card.IDMembers, \",\"),\n\t}\n\tif card.Due != nil {\n\t\targs[\"due\"] = card.Due.Format(time.RFC3339)\n\t}\n\t\/\/ Allow overwriting the creation position with 'top' or 'bottom'\n\tif pos, ok := extraArgs[\"pos\"]; ok {\n\t\targs[\"pos\"] = pos\n\t}\n\terr := l.client.Post(path, args, &card)\n\tif err == nil {\n\t\tcard.client = l.client\n\t} else {\n\t\terr = errors.Wrapf(err, \"Error adding card to list %s\", l.ID)\n\t}\n\treturn err\n}\n\n\/\/ Try these Arguments\n\/\/\n\/\/ \tArguments[\"keepFromSource\"] = \"all\"\n\/\/ Arguments[\"keepFromSource\"] = \"none\"\n\/\/ \tArguments[\"keepFromSource\"] = \"attachments,checklists,comments\"\n\/\/\nfunc (c *Card) CopyToList(listID string, args Arguments) (*Card, error) {\n\tpath := \"cards\"\n\targs[\"idList\"] = listID\n\targs[\"idCardSource\"] = c.ID\n\tnewCard := Card{}\n\terr := c.client.Post(path, args, &newCard)\n\tif err == nil {\n\t\tnewCard.client = c.client\n\t} else {\n\t\terr = errors.Wrapf(err, \"Error copying card '%s' to list '%s'.\", c.ID, listID)\n\t}\n\treturn &newCard, err\n}\n\nfunc (c *Card) AddComment(comment string, args Arguments) (*Action, error) {\n\tpath := fmt.Sprintf(\"cards\/%s\/actions\/comments\", c.ID)\n\targs[\"text\"] = comment\n\taction := Action{}\n\terr := c.client.Post(path, args, &action)\n\tif err != nil {\n\t\terr = errors.Wrapf(err, \"Error commenting on card %s\", c.ID)\n\t}\n\treturn &action, err\n}\n\n\/\/ If this Card was created from a copy of another Card, this func retrieves\n\/\/ the originating Card. Returns an error only when a low-level failure occurred.\n\/\/ If this Card has no parent, a nil card and nil error are returned. In other words, the\n\/\/ non-existence of a parent is not treated as an error.\n\/\/\nfunc (c *Card) GetParentCard(args Arguments) (*Card, error) {\n\n\t\/\/ Hopefully the card came pre-loaded with Actions including the card creation\n\taction := c.Actions.FirstCardCreateAction()\n\n\tif action == nil {\n\t\t\/\/ No luck. Go get copyCard actions for this card.\n\t\tc.client.log(\"Creation action wasn't supplied before GetParentCard() on '%s'. Getting copyCard actions.\", c.ID)\n\t\tactions, err := c.GetActions(Arguments{\"filter\": \"copyCard\"})\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"GetParentCard() failed to GetActions() for card '%s'\", c.ID)\n\t\t\treturn nil, err\n\t\t}\n\t\taction = actions.FirstCardCreateAction()\n\t}\n\n\tif action != nil && action.Data != nil && action.Data.CardSource != nil {\n\t\tcard, err := c.client.GetCard(action.Data.CardSource.ID, args)\n\t\treturn card, err\n\t}\n\n\treturn nil, nil\n}\n\nfunc (c *Card) GetAncestorCards(args Arguments) (ancestors []*Card, err error) {\n\n\t\/\/ Get the first parent\n\tparent, err := c.GetParentCard(args)\n\tif IsNotFound(err) || IsPermissionDenied(err) {\n\t\tc.client.log(\"[trello] Can't get details about the parent of card '%s' due to lack of permissions or card deleted.\", c.ID)\n\t\treturn ancestors, nil\n\t}\n\n\tfor parent != nil {\n\t\tancestors = append(ancestors, parent)\n\t\tparent, err = parent.GetParentCard(args)\n\t\tif IsNotFound(err) || IsPermissionDenied(err) {\n\t\t\tc.client.log(\"[trello] Can't get details about the parent of card '%s' due to lack of permissions or card deleted.\", c.ID)\n\t\t\treturn ancestors, nil\n\t\t} else if err != nil {\n\t\t\treturn ancestors, err\n\t\t}\n\t}\n\n\treturn ancestors, err\n}\n\nfunc (c *Card) GetOriginatingCard(args Arguments) (*Card, error) {\n\tancestors, err := c.GetAncestorCards(args)\n\tif err != nil {\n\t\treturn c, err\n\t}\n\tif len(ancestors) > 0 {\n\t\treturn ancestors[len(ancestors)-1], nil\n\t} else {\n\t\treturn c, nil\n\t}\n}\n\nfunc (c *Card) CreatorMember() (*Member, error) {\n\tvar actions ActionCollection\n\tvar err error\n\n\tif len(c.Actions) == 0 {\n\t\tc.Actions, err = c.GetActions(Arguments{\"filter\": \"all\", \"limit\": \"1000\", \"memberCreator_fields\": \"all\"})\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"GetActions() call failed.\")\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tactions = c.Actions.FilterToCardCreationActions()\n\n\tif len(actions) > 0 {\n\t\treturn actions[0].MemberCreator, nil\n\t}\n\treturn nil, errors.Errorf(\"No card creation actions on Card %s with a .MemberCreator\", c.ID)\n}\n\nfunc (c *Card) CreatorMemberID() (string, error) {\n\n\tvar actions ActionCollection\n\tvar err error\n\n\tif len(c.Actions) == 0 {\n\t\tc.client.log(\"[trello] CreatorMemberID() called on card '%s' without any Card.Actions. Fetching fresh.\", c.ID)\n\t\tc.Actions, err = c.GetActions(Defaults())\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"GetActions() call failed.\")\n\t\t}\n\t}\n\tactions = c.Actions.FilterToCardCreationActions()\n\n\tif len(actions) > 0 {\n\t\tif actions[0].IDMemberCreator != \"\" {\n\t\t\treturn actions[0].IDMemberCreator, err\n\t\t}\n\t}\n\n\treturn \"\", errors.Wrapf(err, \"No Actions on card '%s' could be used to find its creator.\", c.ID)\n}\n\nfunc (b *Board) ContainsCopyOfCard(cardID string, args Arguments) (bool, error) {\n\targs[\"filter\"] = \"copyCard\"\n\tactions, err := b.GetActions(args)\n\tif err != nil {\n\t\terr := errors.Wrapf(err, \"GetCards() failed inside ContainsCopyOf() for board '%s' and card '%s'.\", b.ID, cardID)\n\t\treturn false, err\n\t}\n\tfor _, action := range actions {\n\t\tif action.Data != nil && action.Data.CardSource != nil && action.Data.CardSource.ID == cardID {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}\n\nfunc (c *Client) GetCard(cardID string, args Arguments) (card *Card, err error) {\n\tpath := fmt.Sprintf(\"cards\/%s\", cardID)\n\terr = c.Get(path, args, &card)\n\tif card != nil {\n\t\tcard.client = c\n\t}\n\treturn card, err\n}\n\n\/**\n * Retrieves all Cards on a Board\n *\n * If before\n *\/\nfunc (b *Board) GetCards(args Arguments) (cards []*Card, err error) {\n\tpath := fmt.Sprintf(\"boards\/%s\/cards\", b.ID)\n\n\terr = b.client.Get(path, args, &cards)\n\n\t\/\/ Naive implementation would return here. To make sure we get all\n\t\/\/ cards, we begin\n\tif len(cards) > 0 {\n\t\tmoreCards := true\n\t\tfor moreCards == true {\n\t\t\tnextCardBatch := make([]*Card, 0)\n\t\t\targs[\"before\"] = EarliestCardID(cards)\n\t\t\terr = b.client.Get(path, args, &nextCardBatch)\n\t\t\tif len(nextCardBatch) > 0 {\n\t\t\t\tcards = append(cards, nextCardBatch...)\n\t\t\t} else {\n\t\t\t\tmoreCards = false\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i := range cards {\n\t\tcards[i].client = b.client\n\t}\n\n\treturn\n}\n\n\/**\n * Retrieves all Cards in a List\n *\/\nfunc (l *List) GetCards(args Arguments) (cards []*Card, err error) {\n\tpath := fmt.Sprintf(\"lists\/%s\/cards\", l.ID)\n\terr = l.client.Get(path, args, &cards)\n\tfor i := range cards {\n\t\tcards[i].client = l.client\n\t}\n\treturn\n}\n\nfunc EarliestCardID(cards []*Card) string {\n\tif len(cards) == 0 {\n\t\treturn \"\"\n\t}\n\tearliest := cards[0].ID\n\tfor _, card := range cards {\n\t\tif card.ID < earliest {\n\t\t\tearliest = card.ID\n\t\t}\n\t}\n\treturn earliest\n}\n<commit_msg>Adds Client.RemoveMember().<commit_after>\/\/ Copyright © 2016 Aaron Longwell\n\/\/\n\/\/ Use of this source code is governed by an MIT licese.\n\/\/ Details in the LICENSE file.\n\npackage trello\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\ntype Card struct {\n\tclient *Client\n\n\t\/\/ Key metadata\n\tID string `json:\"id\"`\n\tIDShort int `json:\"idShort\"`\n\tName string `json:\"name\"`\n\tPos float64 `json:\"pos\"`\n\tEmail string `json:\"email\"`\n\tShortLink string `json:\"shortLink\"`\n\tShortUrl string `json:\"shortUrl\"`\n\tUrl string `json:\"url\"`\n\tDesc string `json:\"desc\"`\n\tDue *time.Time `json:\"due\"`\n\tDueComplete bool `json:\"dueComplete\"`\n\tClosed bool `json:\"closed\"`\n\tSubscribed bool `json:\"subscribed\"`\n\tDateLastActivity *time.Time `json:\"dateLastActivity\"`\n\n\t\/\/ Board\n\tBoard *Board\n\tIDBoard string `json:\"idBoard\"`\n\n\t\/\/ List\n\tList *List\n\tIDList string `json:\"idList\"`\n\n\t\/\/ Badges\n\tBadges struct {\n\t\tVotes int `json:\"votes\"`\n\t\tViewingMemberVoted bool `json:\"viewingMemberVoted\"`\n\t\tSubscribed bool `json:\"subscribed\"`\n\t\tFogbugz string `json:\"fogbugz,omitempty\"`\n\t\tCheckItems int `json:\"checkItems\"`\n\t\tCheckItemsChecked int `json:\"checkItemsChecked\"`\n\t\tComments int `json:\"comments\"`\n\t\tAttachments int `json:\"attachments\"`\n\t\tDescription bool `json:\"description\"`\n\t\tDue *time.Time `json:\"due,omitempty\"`\n\t} `json:\"badges\"`\n\n\t\/\/ Actions\n\tActions ActionCollection `json:\"actions,omitempty\"`\n\n\t\/\/ Checklists\n\tIDCheckLists []string `json:\"idCheckLists\"`\n\tChecklists []*Checklist `json:\"checklists,omitempty\"`\n\tCheckItemStates []*CheckItemState `json:\"checkItemStates,omitempty\"`\n\n\t\/\/ Members\n\tIDMembers []string `json:\"idMembers,omitempty\"`\n\tIDMembersVoted []string `json:\"idMembersVoted,omitempty\"`\n\tMembers []*Member `json:\"members,omitempty\"`\n\n\t\/\/ Attachments\n\tIDAttachmentCover string `json:\"idAttachmentCover\"`\n\tManualCoverAttachment bool `json:\"manualCoverAttachment\"`\n\tAttachments []*Attachment `json:\"attachments,omitempty\"`\n\n\t\/\/ Labels\n\tLabels []*Label `json:\"labels,omitempty\"`\n}\n\nfunc (c *Card) CreatedAt() time.Time {\n\tt, _ := IDToTime(c.ID)\n\treturn t\n}\n\nfunc (c *Card) MoveToList(listID string, args Arguments) error {\n\tpath := fmt.Sprintf(\"cards\/%s\", c.ID)\n\targs[\"idList\"] = listID\n\treturn c.client.Put(path, args, &c)\n}\n\nfunc (c *Card) SetPos(newPos float64) error {\n\tpath := fmt.Sprintf(\"cards\/%s\", c.ID)\n\treturn c.client.Put(path, Arguments{\"pos\": fmt.Sprintf(\"%f\", newPos)}, c)\n}\n\nfunc (c *Card) RemoveMember(memberID string) error {\n\tpath := fmt.Sprintf(\"cards\/%s\/idMembers\/%s\", c.ID, memberID)\n\treturn c.client.Delete(path, Defaults(), nil)\n}\n\nfunc (c *Card) MoveToTopOfList() error {\n\tpath := fmt.Sprintf(\"cards\/%s\", c.ID)\n\treturn c.client.Put(path, Arguments{\"pos\": \"top\"}, c)\n}\n\nfunc (c *Card) MoveToBottomOfList() error {\n\tpath := fmt.Sprintf(\"cards\/%s\", c.ID)\n\treturn c.client.Put(path, Arguments{\"pos\": \"bottom\"}, c)\n}\n\nfunc (c *Card) Update(args Arguments) error {\n\tpath := fmt.Sprintf(\"cards\/%s\", c.ID)\n\treturn c.client.Put(path, args, c)\n}\n\nfunc (c *Client) CreateCard(card *Card, extraArgs Arguments) error {\n\tpath := \"cards\"\n\targs := Arguments{\n\t\t\"name\": card.Name,\n\t\t\"desc\": card.Desc,\n\t\t\"pos\": strconv.FormatFloat(card.Pos, 'g', -1, 64),\n\t\t\"idList\": card.IDList,\n\t\t\"idMembers\": strings.Join(card.IDMembers, \",\"),\n\t}\n\tif card.Due != nil {\n\t\targs[\"due\"] = card.Due.Format(time.RFC3339)\n\t}\n\t\/\/ Allow overriding the creation position with 'top' or 'botttom'\n\tif pos, ok := extraArgs[\"pos\"]; ok {\n\t\targs[\"pos\"] = pos\n\t}\n\terr := c.Post(path, args, &card)\n\tif err == nil {\n\t\tcard.client = c\n\t}\n\treturn err\n}\n\nfunc (l *List) AddCard(card *Card, extraArgs Arguments) error {\n\tpath := fmt.Sprintf(\"lists\/%s\/cards\", l.ID)\n\targs := Arguments{\n\t\t\"name\": card.Name,\n\t\t\"desc\": card.Desc,\n\t\t\"idMembers\": strings.Join(card.IDMembers, \",\"),\n\t}\n\tif card.Due != nil {\n\t\targs[\"due\"] = card.Due.Format(time.RFC3339)\n\t}\n\t\/\/ Allow overwriting the creation position with 'top' or 'bottom'\n\tif pos, ok := extraArgs[\"pos\"]; ok {\n\t\targs[\"pos\"] = pos\n\t}\n\terr := l.client.Post(path, args, &card)\n\tif err == nil {\n\t\tcard.client = l.client\n\t} else {\n\t\terr = errors.Wrapf(err, \"Error adding card to list %s\", l.ID)\n\t}\n\treturn err\n}\n\n\/\/ Try these Arguments\n\/\/\n\/\/ \tArguments[\"keepFromSource\"] = \"all\"\n\/\/ Arguments[\"keepFromSource\"] = \"none\"\n\/\/ \tArguments[\"keepFromSource\"] = \"attachments,checklists,comments\"\n\/\/\nfunc (c *Card) CopyToList(listID string, args Arguments) (*Card, error) {\n\tpath := \"cards\"\n\targs[\"idList\"] = listID\n\targs[\"idCardSource\"] = c.ID\n\tnewCard := Card{}\n\terr := c.client.Post(path, args, &newCard)\n\tif err == nil {\n\t\tnewCard.client = c.client\n\t} else {\n\t\terr = errors.Wrapf(err, \"Error copying card '%s' to list '%s'.\", c.ID, listID)\n\t}\n\treturn &newCard, err\n}\n\nfunc (c *Card) AddComment(comment string, args Arguments) (*Action, error) {\n\tpath := fmt.Sprintf(\"cards\/%s\/actions\/comments\", c.ID)\n\targs[\"text\"] = comment\n\taction := Action{}\n\terr := c.client.Post(path, args, &action)\n\tif err != nil {\n\t\terr = errors.Wrapf(err, \"Error commenting on card %s\", c.ID)\n\t}\n\treturn &action, err\n}\n\n\/\/ If this Card was created from a copy of another Card, this func retrieves\n\/\/ the originating Card. Returns an error only when a low-level failure occurred.\n\/\/ If this Card has no parent, a nil card and nil error are returned. In other words, the\n\/\/ non-existence of a parent is not treated as an error.\n\/\/\nfunc (c *Card) GetParentCard(args Arguments) (*Card, error) {\n\n\t\/\/ Hopefully the card came pre-loaded with Actions including the card creation\n\taction := c.Actions.FirstCardCreateAction()\n\n\tif action == nil {\n\t\t\/\/ No luck. Go get copyCard actions for this card.\n\t\tc.client.log(\"Creation action wasn't supplied before GetParentCard() on '%s'. Getting copyCard actions.\", c.ID)\n\t\tactions, err := c.GetActions(Arguments{\"filter\": \"copyCard\"})\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"GetParentCard() failed to GetActions() for card '%s'\", c.ID)\n\t\t\treturn nil, err\n\t\t}\n\t\taction = actions.FirstCardCreateAction()\n\t}\n\n\tif action != nil && action.Data != nil && action.Data.CardSource != nil {\n\t\tcard, err := c.client.GetCard(action.Data.CardSource.ID, args)\n\t\treturn card, err\n\t}\n\n\treturn nil, nil\n}\n\nfunc (c *Card) GetAncestorCards(args Arguments) (ancestors []*Card, err error) {\n\n\t\/\/ Get the first parent\n\tparent, err := c.GetParentCard(args)\n\tif IsNotFound(err) || IsPermissionDenied(err) {\n\t\tc.client.log(\"[trello] Can't get details about the parent of card '%s' due to lack of permissions or card deleted.\", c.ID)\n\t\treturn ancestors, nil\n\t}\n\n\tfor parent != nil {\n\t\tancestors = append(ancestors, parent)\n\t\tparent, err = parent.GetParentCard(args)\n\t\tif IsNotFound(err) || IsPermissionDenied(err) {\n\t\t\tc.client.log(\"[trello] Can't get details about the parent of card '%s' due to lack of permissions or card deleted.\", c.ID)\n\t\t\treturn ancestors, nil\n\t\t} else if err != nil {\n\t\t\treturn ancestors, err\n\t\t}\n\t}\n\n\treturn ancestors, err\n}\n\nfunc (c *Card) GetOriginatingCard(args Arguments) (*Card, error) {\n\tancestors, err := c.GetAncestorCards(args)\n\tif err != nil {\n\t\treturn c, err\n\t}\n\tif len(ancestors) > 0 {\n\t\treturn ancestors[len(ancestors)-1], nil\n\t} else {\n\t\treturn c, nil\n\t}\n}\n\nfunc (c *Card) CreatorMember() (*Member, error) {\n\tvar actions ActionCollection\n\tvar err error\n\n\tif len(c.Actions) == 0 {\n\t\tc.Actions, err = c.GetActions(Arguments{\"filter\": \"all\", \"limit\": \"1000\", \"memberCreator_fields\": \"all\"})\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"GetActions() call failed.\")\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tactions = c.Actions.FilterToCardCreationActions()\n\n\tif len(actions) > 0 {\n\t\treturn actions[0].MemberCreator, nil\n\t}\n\treturn nil, errors.Errorf(\"No card creation actions on Card %s with a .MemberCreator\", c.ID)\n}\n\nfunc (c *Card) CreatorMemberID() (string, error) {\n\n\tvar actions ActionCollection\n\tvar err error\n\n\tif len(c.Actions) == 0 {\n\t\tc.client.log(\"[trello] CreatorMemberID() called on card '%s' without any Card.Actions. Fetching fresh.\", c.ID)\n\t\tc.Actions, err = c.GetActions(Defaults())\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"GetActions() call failed.\")\n\t\t}\n\t}\n\tactions = c.Actions.FilterToCardCreationActions()\n\n\tif len(actions) > 0 {\n\t\tif actions[0].IDMemberCreator != \"\" {\n\t\t\treturn actions[0].IDMemberCreator, err\n\t\t}\n\t}\n\n\treturn \"\", errors.Wrapf(err, \"No Actions on card '%s' could be used to find its creator.\", c.ID)\n}\n\nfunc (b *Board) ContainsCopyOfCard(cardID string, args Arguments) (bool, error) {\n\targs[\"filter\"] = \"copyCard\"\n\tactions, err := b.GetActions(args)\n\tif err != nil {\n\t\terr := errors.Wrapf(err, \"GetCards() failed inside ContainsCopyOf() for board '%s' and card '%s'.\", b.ID, cardID)\n\t\treturn false, err\n\t}\n\tfor _, action := range actions {\n\t\tif action.Data != nil && action.Data.CardSource != nil && action.Data.CardSource.ID == cardID {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}\n\nfunc (c *Client) GetCard(cardID string, args Arguments) (card *Card, err error) {\n\tpath := fmt.Sprintf(\"cards\/%s\", cardID)\n\terr = c.Get(path, args, &card)\n\tif card != nil {\n\t\tcard.client = c\n\t}\n\treturn card, err\n}\n\n\/**\n * Retrieves all Cards on a Board\n *\n * If before\n *\/\nfunc (b *Board) GetCards(args Arguments) (cards []*Card, err error) {\n\tpath := fmt.Sprintf(\"boards\/%s\/cards\", b.ID)\n\n\terr = b.client.Get(path, args, &cards)\n\n\t\/\/ Naive implementation would return here. To make sure we get all\n\t\/\/ cards, we begin\n\tif len(cards) > 0 {\n\t\tmoreCards := true\n\t\tfor moreCards == true {\n\t\t\tnextCardBatch := make([]*Card, 0)\n\t\t\targs[\"before\"] = EarliestCardID(cards)\n\t\t\terr = b.client.Get(path, args, &nextCardBatch)\n\t\t\tif len(nextCardBatch) > 0 {\n\t\t\t\tcards = append(cards, nextCardBatch...)\n\t\t\t} else {\n\t\t\t\tmoreCards = false\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i := range cards {\n\t\tcards[i].client = b.client\n\t}\n\n\treturn\n}\n\n\/**\n * Retrieves all Cards in a List\n *\/\nfunc (l *List) GetCards(args Arguments) (cards []*Card, err error) {\n\tpath := fmt.Sprintf(\"lists\/%s\/cards\", l.ID)\n\terr = l.client.Get(path, args, &cards)\n\tfor i := range cards {\n\t\tcards[i].client = l.client\n\t}\n\treturn\n}\n\nfunc EarliestCardID(cards []*Card) string {\n\tif len(cards) == 0 {\n\t\treturn \"\"\n\t}\n\tearliest := cards[0].ID\n\tfor _, card := range cards {\n\t\tif card.ID < earliest {\n\t\t\tearliest = card.ID\n\t\t}\n\t}\n\treturn earliest\n}\n<|endoftext|>"} {"text":"<commit_before>package handler\n\nimport (\n\t\"time\"\n\n\t\"github.com\/gsdocker\/gslogger\"\n\t\"github.com\/gsrpc\/gorpc\"\n)\n\ntype _HeartbeatHandler struct {\n\tgslogger.Log \/\/ Mixin log APIs\n\ttimeout time.Duration \/\/ timeout\n\tcontext gorpc.Context \/\/ handler context\n\texitflag chan bool \/\/ exit flag\n\tflag uint32 \/\/ flag\n\ttimestamp time.Time \/\/ last received heartbeat timestamp\n}\n\n\/\/ NewHeartbeatHandler create new heartbeat handler\nfunc NewHeartbeatHandler(timeout time.Duration) gorpc.Handler {\n\treturn &_HeartbeatHandler{\n\t\tLog: gslogger.Get(\"heartbeat\"),\n\t\ttimeout: timeout,\n\t\ttimestamp: time.Now(),\n\t}\n}\n\nfunc (handler *_HeartbeatHandler) Register(context gorpc.Context) error {\n\treturn nil\n}\n\nfunc (handler *_HeartbeatHandler) Active(context gorpc.Context) error {\n\n\tif handler.exitflag == nil {\n\n\t\thandler.context = context\n\n\t\thandler.exitflag = make(chan bool)\n\n\t\thandler.timestamp = time.Now()\n\n\t\tif handler.timeout != 0 {\n\t\t\tgo handler.timeoutLoop(context, handler.exitflag)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (handler *_HeartbeatHandler) Unregister(context gorpc.Context) {\n}\n\nfunc (handler *_HeartbeatHandler) Inactive(context gorpc.Context) {\n\n\tif handler.exitflag != nil {\n\n\t\tclose(handler.exitflag)\n\n\t\thandler.exitflag = nil\n\t}\n\n}\n\nfunc (handler *_HeartbeatHandler) timeoutLoop(context gorpc.Context, exitflag chan bool) {\n\n\twheel := context.Pipeline().EventLoop().TimeWheel()\n\n\tticker := wheel.NewTicker(handler.timeout)\n\n\tdefer ticker.Stop()\n\n\tfor {\n\n\t\tselect {\n\t\tcase <-ticker.C:\n\n\t\t\tif time.Now().Sub(handler.timestamp) > handler.timeout*2 {\n\t\t\t\thandler.context.Close()\n\t\t\t\thandler.W(\"heartbeat timeout(%s), close current pipeline(%s)\", handler.timeout*2, handler.context.Pipeline())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmessage := gorpc.NewMessage()\n\n\t\t\tmessage.Code = gorpc.CodeHeartbeat\n\n\t\t\thandler.context.Send(message)\n\n\t\t\thandler.V(\"%s send heartbeat message\", handler.context.Pipeline())\n\n\t\tcase <-exitflag:\n\t\t\thandler.I(\"exit heartbeat loop .....................\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (handler *_HeartbeatHandler) MessageReceived(context gorpc.Context, message *gorpc.Message) (*gorpc.Message, error) {\n\n\tif message.Code == gorpc.CodeHeartbeat {\n\n\t\thandler.V(\"%s recv heartbeat message\", handler.context.Pipeline())\n\n\t\tif handler.timeout != 0 {\n\t\t\thandler.timestamp = time.Now()\n\t\t}\n\n\t\treturn nil, nil\n\t}\n\n\treturn message, nil\n}\nfunc (handler *_HeartbeatHandler) MessageSending(context gorpc.Context, message *gorpc.Message) (*gorpc.Message, error) {\n\treturn message, nil\n}\n\nfunc (handler *_HeartbeatHandler) Panic(context gorpc.Context, err error) {\n\n}\n<commit_msg>fix bug<commit_after>package handler\n\nimport (\n\t\"time\"\n\n\t\"github.com\/gsdocker\/gslogger\"\n\t\"github.com\/gsrpc\/gorpc\"\n)\n\ntype _HeartbeatHandler struct {\n\tgslogger.Log \/\/ Mixin log APIs\n\ttimeout time.Duration \/\/ timeout\n\tcontext gorpc.Context \/\/ handler context\n\texitflag chan bool \/\/ exit flag\n\tflag uint32 \/\/ flag\n\ttimestamp time.Time \/\/ last received heartbeat timestamp\n}\n\n\/\/ NewHeartbeatHandler create new heartbeat handler\nfunc NewHeartbeatHandler(timeout time.Duration) gorpc.Handler {\n\treturn &_HeartbeatHandler{\n\t\tLog: gslogger.Get(\"heartbeat\"),\n\t\ttimeout: timeout,\n\t\ttimestamp: time.Now(),\n\t}\n}\n\nfunc (handler *_HeartbeatHandler) Register(context gorpc.Context) error {\n\treturn nil\n}\n\nfunc (handler *_HeartbeatHandler) Active(context gorpc.Context) error {\n\n\tif handler.exitflag == nil {\n\n\t\thandler.context = context\n\n\t\thandler.exitflag = make(chan bool)\n\n\t\thandler.timestamp = time.Now()\n\n\t\tif handler.timeout != 0 {\n\t\t\tgo handler.timeoutLoop(context, handler.exitflag)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (handler *_HeartbeatHandler) Unregister(context gorpc.Context) {\n}\n\nfunc (handler *_HeartbeatHandler) Inactive(context gorpc.Context) {\n\n\tif handler.exitflag != nil {\n\n\t\tclose(handler.exitflag)\n\n\t\thandler.exitflag = nil\n\t}\n\n}\n\nfunc (handler *_HeartbeatHandler) timeoutLoop(context gorpc.Context, exitflag chan bool) {\n\n\twheel := context.Pipeline().EventLoop().TimeWheel()\n\n\tticker := wheel.NewTicker(handler.timeout)\n\n\tdefer ticker.Stop()\n\n\tfor {\n\n\t\tselect {\n\t\tcase <-ticker.C:\n\n\t\t\tif time.Now().Sub(handler.timestamp) > handler.timeout*2 {\n\t\t\t\thandler.context.Close()\n\t\t\t\thandler.W(\"heartbeat timeout(%s), close current pipeline(%s)\", handler.timeout*2, handler.context.Pipeline())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmessage := gorpc.NewMessage()\n\n\t\t\tmessage.Code = gorpc.CodeHeartbeat\n\n\t\t\thandler.context.Send(message)\n\n\t\t\thandler.V(\"%s send heartbeat message\", handler.context.Pipeline())\n\n\t\tcase <-exitflag:\n\t\t\thandler.I(\"exit heartbeat loop .....................\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (handler *_HeartbeatHandler) MessageReceived(context gorpc.Context, message *gorpc.Message) (*gorpc.Message, error) {\n\n\tif message.Code == gorpc.CodeHeartbeat {\n\n\t\thandler.V(\"%s recv heartbeat message\", context.Pipeline())\n\n\t\tif handler.timeout != 0 {\n\t\t\thandler.timestamp = time.Now()\n\t\t}\n\n\t\treturn nil, nil\n\t}\n\n\treturn message, nil\n}\nfunc (handler *_HeartbeatHandler) MessageSending(context gorpc.Context, message *gorpc.Message) (*gorpc.Message, error) {\n\treturn message, nil\n}\n\nfunc (handler *_HeartbeatHandler) Panic(context gorpc.Context, err error) {\n\n}\n<|endoftext|>"} {"text":"<commit_before>package gcs_test\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"log\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com\/nyaxt\/otaru\/flags\"\n\tauthtu \"github.com\/nyaxt\/otaru\/gcloud\/auth\/testutils\"\n\t\"github.com\/nyaxt\/otaru\/gcloud\/gcs\"\n\ttu \"github.com\/nyaxt\/otaru\/testutils\"\n)\n\nfunc testGCSBlobStore() *gcs.GCSBlobStore {\n\tbs, err := gcs.NewGCSBlobStore(\n\t\tauthtu.TestConfig().ProjectName,\n\t\tauthtu.TestBucketName(),\n\t\tauthtu.TestTokenSource(),\n\t\tflags.O_RDWR,\n\t)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to create GCSBlobStore: %v\", err)\n\t}\n\treturn bs\n}\n\nfunc TestGCSBlobStore_WriteReadDelete(t *testing.T) {\n\tbs := testGCSBlobStore()\n\n\t\/\/ Write\n\t{\n\t\tw, err := bs.OpenWriter(\"hoge\")\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to open writer: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tn, err := w.Write(tu.HelloWorld)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Write failed: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tif n != len(tu.HelloWorld) {\n\t\t\tt.Errorf(\"Write returned unexpected len: %d\", n)\n\t\t\treturn\n\t\t}\n\t\tif err := w.Close(); err != nil {\n\t\t\tt.Errorf(\"Failed to close writer: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Read\n\t{\n\t\tr, err := bs.OpenReader(\"hoge\")\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to open reader: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tbuf := make([]byte, len(tu.HelloWorld))\n\t\tif _, err = io.ReadFull(r, buf); err != nil {\n\t\t\tt.Errorf(\"ReadFull failed: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tif !bytes.Equal(tu.HelloWorld, buf) {\n\t\t\tt.Errorf(\"Read content != Write content\")\n\t\t}\n\n\t\tif err := r.Close(); err != nil {\n\t\t\tt.Errorf(\"Failed to close reader: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ ListBlobs\n\t{\n\t\tbpaths, err := bs.ListBlobs()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to ListBlobs(): %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif !reflect.DeepEqual([]string{\"hoge\"}, bpaths) {\n\t\t\tt.Errorf(\"Unexpected BlobList: %v\", bpaths)\n\t\t}\n\t}\n\n\t\/\/ Delete\n\tif err := bs.RemoveBlob(\"hoge\"); err != nil {\n\t\tt.Errorf(\"Failed to remove blob: %v\", err)\n\t}\n}\n<commit_msg>TestGCSBlobStore_ReadOnly<commit_after>package gcs_test\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"log\"\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n\n\t\"github.com\/nyaxt\/otaru\/flags\"\n\tauthtu \"github.com\/nyaxt\/otaru\/gcloud\/auth\/testutils\"\n\t\"github.com\/nyaxt\/otaru\/gcloud\/gcs\"\n\ttu \"github.com\/nyaxt\/otaru\/testutils\"\n\t\"github.com\/nyaxt\/otaru\/util\"\n)\n\nfunc testGCSBlobStore(f int) *gcs.GCSBlobStore {\n\tbs, err := gcs.NewGCSBlobStore(\n\t\tauthtu.TestConfig().ProjectName,\n\t\tauthtu.TestBucketName(),\n\t\tauthtu.TestTokenSource(),\n\t\tf,\n\t)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to create GCSBlobStore: %v\", err)\n\t}\n\treturn bs\n}\n\nfunc TestGCSBlobStore_WriteReadDelete(t *testing.T) {\n\tbs := testGCSBlobStore(flags.O_RDWR)\n\n\t\/\/ Write\n\t{\n\t\tw, err := bs.OpenWriter(\"hoge\")\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to open writer: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tn, err := w.Write(tu.HelloWorld)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Write failed: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tif n != len(tu.HelloWorld) {\n\t\t\tt.Errorf(\"Write returned unexpected len: %d\", n)\n\t\t\treturn\n\t\t}\n\t\tif err := w.Close(); err != nil {\n\t\t\tt.Errorf(\"Failed to close writer: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Read\n\t{\n\t\tr, err := bs.OpenReader(\"hoge\")\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to open reader: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tbuf := make([]byte, len(tu.HelloWorld))\n\t\tif _, err = io.ReadFull(r, buf); err != nil {\n\t\t\tt.Errorf(\"ReadFull failed: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tif !bytes.Equal(tu.HelloWorld, buf) {\n\t\t\tt.Errorf(\"Read content != Write content\")\n\t\t}\n\n\t\tif err := r.Close(); err != nil {\n\t\t\tt.Errorf(\"Failed to close reader: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ ListBlobs\n\t{\n\t\tbpaths, err := bs.ListBlobs()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to ListBlobs(): %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif !reflect.DeepEqual([]string{\"hoge\"}, bpaths) {\n\t\t\tt.Errorf(\"Unexpected BlobList: %v\", bpaths)\n\t\t}\n\t}\n\n\t\/\/ Delete\n\tif err := bs.RemoveBlob(\"hoge\"); err != nil {\n\t\tt.Errorf(\"Failed to remove blob: %v\", err)\n\t}\n}\n\nfunc TestGCSBlobStore_ReadOnly(t *testing.T) {\n\twbs := testGCSBlobStore(flags.O_RDWR)\n\tdefer func() {\n\t\t_ = wbs.RemoveBlob(\"hoge\")\n\t\t_ = wbs.RemoveBlob(\"fuga\")\n\t}()\n\tif err := tu.WriteVersionedBlob(wbs, \"hoge\", 42); err != nil {\n\t\tt.Errorf(\"%v\", err)\n\t\treturn\n\t}\n\tif err := tu.WriteVersionedBlob(wbs, \"fuga\", 123); err != nil {\n\t\tt.Errorf(\"%v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Read should work just fine\n\trbs := testGCSBlobStore(flags.O_RDONLY)\n\tif err := tu.AssertBlobVersion(rbs, \"hoge\", 42); err != nil {\n\t\tt.Errorf(\"assert hoge 42. err: %v\", err)\n\t}\n\tif err := tu.AssertBlobVersion(rbs, \"fuga\", 123); err != nil {\n\t\tt.Errorf(\"assert fuga 123. err: %v\", err)\n\t}\n\n\t\/\/ Delete should fail\n\terr := rbs.RemoveBlob(\"hoge\")\n\tif err == nil {\n\t\tt.Errorf(\"Unexpected RemoveBlob success.\")\n\t\treturn\n\t}\n\tif err != util.EPERM {\n\t\tt.Errorf(\"Expected EPERM. got %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Write should fail\n\t_, err = rbs.OpenWriter(\"new\")\n\tif err == nil {\n\t\tt.Errorf(\"Unexpected OpenWriter success.\")\n\t\treturn\n\t}\n\tif err != util.EPERM {\n\t\tt.Errorf(\"Expected EPERM. got %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ ListBlobs should work\n\t{\n\t\tbpaths, err := rbs.ListBlobs()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to ListBlobs(): %v\", err)\n\t\t\treturn\n\t\t}\n\t\tsort.Strings(bpaths)\n\t\tif !reflect.DeepEqual([]string{\"fuga\", \"hoge\"}, bpaths) {\n\t\t\tt.Errorf(\"Unexpected BlobList: %v\", bpaths)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Tool to get xDS configs from pilot. This tool simulate envoy sidecar gRPC call to get config,\n\/\/ so it will work even when sidecar haswhen sidecar hasn't connected (e.g in the case of pilot running on local machine))\n\/\/\n\/\/ Usage:\n\/\/\n\/\/ First, you can either manually expose pilot gRPC port or rely on this tool to port-forward pilot by omitting -pilot_url flag:\n\/\/\n\/\/ * By port-forward existing pilot:\n\/\/ ```bash\n\/\/ kubectl port-forward $(kubectl get pod -l app=istiod -o jsonpath='{.items[0].metadata.name}' -n istio-system) -n istio-system 15010\n\/\/ ```\n\/\/ * Or run local pilot using the same k8s config.\n\/\/ ```bash\n\/\/ pilot-discovery discovery --kubeconfig=${HOME}\/.kube\/config\n\/\/ ```\n\/\/\n\/\/ To get LDS or CDS, use -type lds or -type cds, and provide the pod id or app label. For example:\n\/\/ ```bash\n\/\/ go run pilot_cli.go --type lds --proxytag httpbin-5766dd474b-2hlnx # --res will be ignored\n\/\/ go run pilot_cli.go --type lds --proxytag httpbin\n\/\/ ```\n\/\/ Note If more than one pod match with the app label, one will be picked arbitrarily.\n\/\/\n\/\/ For EDS\/RDS, provide comma-separated-list of corresponding clusters or routes name. For example:\n\/\/ ```bash\n\/\/ go run .\/pilot\/tools\/debug\/pilot_cli.go --type eds --proxytag httpbin \\\n\/\/ --res \"inbound|http||sleep.default.svc.cluster.local,outbound|http||httpbin.default.svc.cluster.local\"\n\/\/ ```\n\/\/\n\/\/ Script requires kube config in order to connect to k8s registry to get pod information (for LDS and CDS type). The default\n\/\/ value for kubeconfig path is .kube\/config in home folder (works for Linux only). It can be changed via -kubeconfig flag.\n\/\/ ```bash\n\/\/ go run .\/pilot\/debug\/pilot_cli.go --type lds --proxytag httpbin --kubeconfig path\/to\/kube\/config\n\/\/ ```\n\npackage main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\tcore \"github.com\/envoyproxy\/go-control-plane\/envoy\/config\/core\/v3\"\n\tdiscovery \"github.com\/envoyproxy\/go-control-plane\/envoy\/service\/discovery\/v3\"\n\t\"google.golang.org\/grpc\"\n\tv1 \"k8s.io\/api\/core\/v1\"\n\tmeta_v1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\t_ \"k8s.io\/client-go\/plugin\/pkg\/client\/auth\/gcp\"\n\t\"k8s.io\/client-go\/tools\/clientcmd\"\n\n\tv3 \"istio.io\/istio\/pilot\/pkg\/xds\/v3\"\n\t\"istio.io\/istio\/pkg\/util\/gogoprotomarshal\"\n\t\"istio.io\/pkg\/env\"\n\t\"istio.io\/pkg\/log\"\n)\n\nconst (\n\tLocalPortStart = 50000\n\tLocalPortEnd = 60000\n)\n\n\/\/ PodInfo holds information to identify pod.\ntype PodInfo struct {\n\tName string\n\tNamespace string\n\tIP string\n\tProxyType string\n}\n\nfunc getAllPods(kubeconfig string) (*v1.PodList, error) {\n\tcfg, err := clientcmd.BuildConfigFromFlags(\"\", kubeconfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclientset, err := kubernetes.NewForConfig(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn clientset.CoreV1().Pods(meta_v1.NamespaceAll).List(context.TODO(), meta_v1.ListOptions{})\n}\n\nfunc NewPodInfo(nameOrAppLabel string, kubeconfig string, proxyType string) *PodInfo {\n\tlog.Infof(\"Using kube config at %s\", kubeconfig)\n\tpods, err := getAllPods(kubeconfig)\n\tif err != nil {\n\t\tlog.Errorf(err.Error())\n\t\treturn nil\n\t}\n\n\tfor _, pod := range pods.Items {\n\t\tlog.Infof(\"pod %q\", pod.Name)\n\t\tif pod.Name == nameOrAppLabel {\n\t\t\tlog.Infof(\"Found pod %s.%s~%s matching name %q\", pod.Name, pod.Namespace, pod.Status.PodIP, nameOrAppLabel)\n\t\t\treturn &PodInfo{\n\t\t\t\tName: pod.Name,\n\t\t\t\tNamespace: pod.Namespace,\n\t\t\t\tIP: pod.Status.PodIP,\n\t\t\t\tProxyType: proxyType,\n\t\t\t}\n\t\t}\n\t\tif app, ok := pod.ObjectMeta.Labels[\"app\"]; ok && app == nameOrAppLabel {\n\t\t\tlog.Infof(\"Found pod %s.%s~%s matching app label %q\", pod.Name, pod.Namespace, pod.Status.PodIP, nameOrAppLabel)\n\t\t\treturn &PodInfo{\n\t\t\t\tName: pod.Name,\n\t\t\t\tNamespace: pod.Namespace,\n\t\t\t\tIP: pod.Status.PodIP,\n\t\t\t\tProxyType: proxyType,\n\t\t\t}\n\t\t}\n\t\tif istio, ok := pod.ObjectMeta.Labels[\"istio\"]; ok && istio == nameOrAppLabel {\n\t\t\tlog.Infof(\"Found pod %s.%s~%s matching app label %q\", pod.Name, pod.Namespace, pod.Status.PodIP, nameOrAppLabel)\n\t\t\treturn &PodInfo{\n\t\t\t\tName: pod.Name,\n\t\t\t\tNamespace: pod.Namespace,\n\t\t\t\tIP: pod.Status.PodIP,\n\t\t\t}\n\t\t}\n\t}\n\tlog.Warnf(\"Cannot find pod with name or app label matching %q in registry.\", nameOrAppLabel)\n\treturn nil\n}\n\nfunc (p PodInfo) makeNodeID() string {\n\tif p.ProxyType != \"\" {\n\t\treturn fmt.Sprintf(\"%s~%s~%s.%s~%s.svc.cluster.local\", p.ProxyType, p.IP, p.Name, p.Namespace, p.Namespace)\n\t}\n\tif strings.HasPrefix(p.Name, \"istio-ingressgateway\") || strings.HasPrefix(p.Name, \"istio-egressgateway\") {\n\t\treturn fmt.Sprintf(\"router~%s~%s.%s~%s.svc.cluster.local\", p.IP, p.Name, p.Namespace, p.Namespace)\n\t}\n\tif strings.HasPrefix(p.Name, \"istio-ingress\") {\n\t\treturn fmt.Sprintf(\"ingress~%s~%s.%s~%s.svc.cluster.local\", p.IP, p.Name, p.Namespace, p.Namespace)\n\t}\n\treturn fmt.Sprintf(\"sidecar~%s~%s.%s~%s.svc.cluster.local\", p.IP, p.Name, p.Namespace, p.Namespace)\n}\n\nfunc configTypeToTypeURL(configType string) string {\n\tswitch configType {\n\tcase \"lds\":\n\t\treturn v3.ListenerType\n\tcase \"cds\":\n\t\treturn v3.ClusterType\n\tcase \"rds\":\n\t\treturn v3.RouteType\n\tcase \"eds\":\n\t\treturn v3.EndpointType\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Unknown type %s\", configType))\n\t}\n}\n\nfunc (p PodInfo) makeRequest(configType string) *discovery.DiscoveryRequest {\n\treturn &discovery.DiscoveryRequest{\n\t\tNode: &core.Node{\n\t\t\tId: p.makeNodeID(),\n\t\t},\n\t\tTypeUrl: configTypeToTypeURL(configType),\n\t}\n}\n\nfunc (p PodInfo) appendResources(req *discovery.DiscoveryRequest, resources []string) *discovery.DiscoveryRequest {\n\treq.ResourceNames = resources\n\treturn req\n}\n\nfunc (p PodInfo) getXdsResponse(pilotURL string, req *discovery.DiscoveryRequest) (*discovery.DiscoveryResponse, error) {\n\tconn, err := grpc.Dial(pilotURL, grpc.WithInsecure())\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdefer func() { _ = conn.Close() }()\n\n\tadsClient := discovery.NewAggregatedDiscoveryServiceClient(conn)\n\tstream, err := adsClient.StreamAggregatedResources(context.Background())\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\terr = stream.Send(req)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tres, err := stream.Recv()\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn res, err\n}\n\nvar homeVar = env.RegisterStringVar(\"HOME\", \"\", \"\")\n\nfunc resolveKubeConfigPath(kubeConfig string) string {\n\tpath := strings.Replace(kubeConfig, \"~\", homeVar.Get(), 1)\n\tret, err := filepath.Abs(path)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn ret\n}\n\n\/\/ nolint: golint\nfunc portForwardPilot(kubeConfig, pilotURL string) (*os.Process, string, error) {\n\tif pilotURL != \"\" {\n\t\t\/\/ No need to port-forward, url is already provided.\n\t\treturn nil, pilotURL, nil\n\t}\n\tlog.Info(\"Pilot url is not provided, try to port-forward pilot pod.\")\n\n\tpodName := \"\"\n\tpods, err := getAllPods(kubeConfig)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\tfor _, pod := range pods.Items {\n\t\tif app, ok := pod.ObjectMeta.Labels[\"istio\"]; ok && app == \"istiod\" {\n\t\t\tpodName = pod.Name\n\t\t}\n\t}\n\tif podName == \"\" {\n\t\treturn nil, \"\", fmt.Errorf(\"cannot find istio-pilot pod\")\n\t}\n\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tlocalPort := r.Intn(LocalPortEnd-LocalPortStart) + LocalPortStart\n\tcmd := fmt.Sprintf(\"kubectl port-forward %s -n istio-system %d:15010\", podName, localPort)\n\tparts := strings.Split(cmd, \" \")\n\tc := exec.Command(parts[0], parts[1:]...)\n\terr = c.Start()\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\t\/\/ Make sure istio-pilot is reachable.\n\treachable := false\n\turl := fmt.Sprintf(\"localhost:%d\", localPort)\n\tfor i := 0; i < 10 && !reachable; i++ {\n\t\tconn, err := net.Dial(\"tcp\", url)\n\t\tif err == nil {\n\t\t\t_ = conn.Close()\n\t\t\treachable = true\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\tif !reachable {\n\t\treturn nil, \"\", fmt.Errorf(\"cannot reach local pilot url: %s\", url)\n\t}\n\treturn c.Process, fmt.Sprintf(\"localhost:%d\", localPort), nil\n}\n\nfunc main() {\n\tkubeConfig := flag.String(\"kubeconfig\", \"~\/.kube\/config\", \"path to the kubeconfig file. Default is ~\/.kube\/config\")\n\tpilotURL := flag.String(\"pilot\", \"\", \"pilot address. Will try port forward if not provided.\")\n\tconfigType := flag.String(\"type\", \"lds\", \"lds, cds, or eds. Default lds.\")\n\tproxyType := flag.String(\"proxytype\", \"\", \"sidecar, ingress, router.\")\n\tproxyTag := flag.String(\"proxytag\", \"\", \"Pod name or app label or istio label to identify the proxy.\")\n\tresources := flag.String(\"res\", \"\", \"Resource(s) to get config for. LDS\/CDS should leave it empty.\")\n\toutputFile := flag.String(\"out\", \"\", \"output file. Leave blank to go to stdout\")\n\tflag.Parse()\n\n\tprocess, pilot, err := portForwardPilot(resolveKubeConfigPath(*kubeConfig), *pilotURL)\n\tif err != nil {\n\t\tlog.Errorf(\"pilot port forward failed: %v\", err)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif process != nil {\n\t\t\terr := process.Kill()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Failed to kill port-forward process, pid: %d\", process.Pid)\n\t\t\t}\n\t\t}\n\t}()\n\tpod := NewPodInfo(*proxyTag, resolveKubeConfigPath(*kubeConfig), *proxyType)\n\n\tvar resp *discovery.DiscoveryResponse\n\tswitch *configType {\n\tcase \"lds\", \"cds\":\n\t\tresp, err = pod.getXdsResponse(pilot, pod.makeRequest(*configType))\n\tcase \"rds\", \"eds\":\n\t\tresp, err = pod.getXdsResponse(pilot, pod.appendResources(pod.makeRequest(*configType), strings.Split(*resources, \",\")))\n\tdefault:\n\t\tlog.Errorf(\"Unknown config type: %q\", *configType)\n\t\tos.Exit(1)\n\t}\n\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get Xds response for %v. Error: %v\", *resources, err)\n\t\treturn\n\t}\n\tstrResponse, _ := gogoprotomarshal.ToJSONWithIndent(resp, \" \")\n\tif outputFile == nil || *outputFile == \"\" {\n\t\tfmt.Printf(\"%v\\n\", strResponse)\n\t} else if err := ioutil.WriteFile(*outputFile, []byte(strResponse), 0644); err != nil {\n\t\tlog.Errorf(\"Cannot write output to file %q\", *outputFile)\n\t}\n}\n<commit_msg>fix labels (#31543)<commit_after>\/\/ Copyright Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Tool to get xDS configs from pilot. This tool simulate envoy sidecar gRPC call to get config,\n\/\/ so it will work even when sidecar haswhen sidecar hasn't connected (e.g in the case of pilot running on local machine))\n\/\/\n\/\/ Usage:\n\/\/\n\/\/ First, you can either manually expose pilot gRPC port or rely on this tool to port-forward pilot by omitting -pilot_url flag:\n\/\/\n\/\/ * By port-forward existing pilot:\n\/\/ ```bash\n\/\/ kubectl port-forward $(kubectl get pod -l app=istiod -o jsonpath='{.items[0].metadata.name}' -n istio-system) -n istio-system 15010\n\/\/ ```\n\/\/ * Or run local pilot using the same k8s config.\n\/\/ ```bash\n\/\/ pilot-discovery discovery --kubeconfig=${HOME}\/.kube\/config\n\/\/ ```\n\/\/\n\/\/ To get LDS or CDS, use -type lds or -type cds, and provide the pod id or app label. For example:\n\/\/ ```bash\n\/\/ go run pilot_cli.go --type lds --proxytag httpbin-5766dd474b-2hlnx # --res will be ignored\n\/\/ go run pilot_cli.go --type lds --proxytag httpbin\n\/\/ ```\n\/\/ Note If more than one pod match with the app label, one will be picked arbitrarily.\n\/\/\n\/\/ For EDS\/RDS, provide comma-separated-list of corresponding clusters or routes name. For example:\n\/\/ ```bash\n\/\/ go run .\/pilot\/tools\/debug\/pilot_cli.go --type eds --proxytag httpbin \\\n\/\/ --res \"inbound|http||sleep.default.svc.cluster.local,outbound|http||httpbin.default.svc.cluster.local\"\n\/\/ ```\n\/\/\n\/\/ Script requires kube config in order to connect to k8s registry to get pod information (for LDS and CDS type). The default\n\/\/ value for kubeconfig path is .kube\/config in home folder (works for Linux only). It can be changed via -kubeconfig flag.\n\/\/ ```bash\n\/\/ go run .\/pilot\/debug\/pilot_cli.go --type lds --proxytag httpbin --kubeconfig path\/to\/kube\/config\n\/\/ ```\n\npackage main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\tcore \"github.com\/envoyproxy\/go-control-plane\/envoy\/config\/core\/v3\"\n\tdiscovery \"github.com\/envoyproxy\/go-control-plane\/envoy\/service\/discovery\/v3\"\n\t\"google.golang.org\/grpc\"\n\tv1 \"k8s.io\/api\/core\/v1\"\n\tmeta_v1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\t_ \"k8s.io\/client-go\/plugin\/pkg\/client\/auth\/gcp\"\n\t\"k8s.io\/client-go\/tools\/clientcmd\"\n\n\tv3 \"istio.io\/istio\/pilot\/pkg\/xds\/v3\"\n\t\"istio.io\/istio\/pkg\/util\/gogoprotomarshal\"\n\t\"istio.io\/pkg\/env\"\n\t\"istio.io\/pkg\/log\"\n)\n\nconst (\n\tLocalPortStart = 50000\n\tLocalPortEnd = 60000\n)\n\n\/\/ PodInfo holds information to identify pod.\ntype PodInfo struct {\n\tName string\n\tNamespace string\n\tIP string\n\tProxyType string\n}\n\nfunc getAllPods(kubeconfig string) (*v1.PodList, error) {\n\tcfg, err := clientcmd.BuildConfigFromFlags(\"\", kubeconfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclientset, err := kubernetes.NewForConfig(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn clientset.CoreV1().Pods(meta_v1.NamespaceAll).List(context.TODO(), meta_v1.ListOptions{})\n}\n\nfunc NewPodInfo(nameOrAppLabel string, kubeconfig string, proxyType string) *PodInfo {\n\tlog.Infof(\"Using kube config at %s\", kubeconfig)\n\tpods, err := getAllPods(kubeconfig)\n\tif err != nil {\n\t\tlog.Errorf(err.Error())\n\t\treturn nil\n\t}\n\n\tfor _, pod := range pods.Items {\n\t\tlog.Infof(\"pod %q\", pod.Name)\n\t\tif pod.Name == nameOrAppLabel {\n\t\t\tlog.Infof(\"Found pod %s.%s~%s matching name %q\", pod.Name, pod.Namespace, pod.Status.PodIP, nameOrAppLabel)\n\t\t\treturn &PodInfo{\n\t\t\t\tName: pod.Name,\n\t\t\t\tNamespace: pod.Namespace,\n\t\t\t\tIP: pod.Status.PodIP,\n\t\t\t\tProxyType: proxyType,\n\t\t\t}\n\t\t}\n\t\tif app, ok := pod.ObjectMeta.Labels[\"app\"]; ok && app == nameOrAppLabel {\n\t\t\tlog.Infof(\"Found pod %s.%s~%s matching app label %q\", pod.Name, pod.Namespace, pod.Status.PodIP, nameOrAppLabel)\n\t\t\treturn &PodInfo{\n\t\t\t\tName: pod.Name,\n\t\t\t\tNamespace: pod.Namespace,\n\t\t\t\tIP: pod.Status.PodIP,\n\t\t\t\tProxyType: proxyType,\n\t\t\t}\n\t\t}\n\t\tif istio, ok := pod.ObjectMeta.Labels[\"istio\"]; ok && istio == nameOrAppLabel {\n\t\t\tlog.Infof(\"Found pod %s.%s~%s matching app label %q\", pod.Name, pod.Namespace, pod.Status.PodIP, nameOrAppLabel)\n\t\t\treturn &PodInfo{\n\t\t\t\tName: pod.Name,\n\t\t\t\tNamespace: pod.Namespace,\n\t\t\t\tIP: pod.Status.PodIP,\n\t\t\t}\n\t\t}\n\t}\n\tlog.Warnf(\"Cannot find pod with name or app label matching %q in registry.\", nameOrAppLabel)\n\treturn nil\n}\n\nfunc (p PodInfo) makeNodeID() string {\n\tif p.ProxyType != \"\" {\n\t\treturn fmt.Sprintf(\"%s~%s~%s.%s~%s.svc.cluster.local\", p.ProxyType, p.IP, p.Name, p.Namespace, p.Namespace)\n\t}\n\tif strings.HasPrefix(p.Name, \"istio-ingressgateway\") || strings.HasPrefix(p.Name, \"istio-egressgateway\") {\n\t\treturn fmt.Sprintf(\"router~%s~%s.%s~%s.svc.cluster.local\", p.IP, p.Name, p.Namespace, p.Namespace)\n\t}\n\tif strings.HasPrefix(p.Name, \"istio-ingress\") {\n\t\treturn fmt.Sprintf(\"ingress~%s~%s.%s~%s.svc.cluster.local\", p.IP, p.Name, p.Namespace, p.Namespace)\n\t}\n\treturn fmt.Sprintf(\"sidecar~%s~%s.%s~%s.svc.cluster.local\", p.IP, p.Name, p.Namespace, p.Namespace)\n}\n\nfunc configTypeToTypeURL(configType string) string {\n\tswitch configType {\n\tcase \"lds\":\n\t\treturn v3.ListenerType\n\tcase \"cds\":\n\t\treturn v3.ClusterType\n\tcase \"rds\":\n\t\treturn v3.RouteType\n\tcase \"eds\":\n\t\treturn v3.EndpointType\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Unknown type %s\", configType))\n\t}\n}\n\nfunc (p PodInfo) makeRequest(configType string) *discovery.DiscoveryRequest {\n\treturn &discovery.DiscoveryRequest{\n\t\tNode: &core.Node{\n\t\t\tId: p.makeNodeID(),\n\t\t},\n\t\tTypeUrl: configTypeToTypeURL(configType),\n\t}\n}\n\nfunc (p PodInfo) appendResources(req *discovery.DiscoveryRequest, resources []string) *discovery.DiscoveryRequest {\n\treq.ResourceNames = resources\n\treturn req\n}\n\nfunc (p PodInfo) getXdsResponse(pilotURL string, req *discovery.DiscoveryRequest) (*discovery.DiscoveryResponse, error) {\n\tconn, err := grpc.Dial(pilotURL, grpc.WithInsecure())\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdefer func() { _ = conn.Close() }()\n\n\tadsClient := discovery.NewAggregatedDiscoveryServiceClient(conn)\n\tstream, err := adsClient.StreamAggregatedResources(context.Background())\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\terr = stream.Send(req)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tres, err := stream.Recv()\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn res, err\n}\n\nvar homeVar = env.RegisterStringVar(\"HOME\", \"\", \"\")\n\nfunc resolveKubeConfigPath(kubeConfig string) string {\n\tpath := strings.Replace(kubeConfig, \"~\", homeVar.Get(), 1)\n\tret, err := filepath.Abs(path)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn ret\n}\n\n\/\/ nolint: golint\nfunc portForwardPilot(kubeConfig, pilotURL string) (*os.Process, string, error) {\n\tif pilotURL != \"\" {\n\t\t\/\/ No need to port-forward, url is already provided.\n\t\treturn nil, pilotURL, nil\n\t}\n\tlog.Info(\"Pilot url is not provided, try to port-forward pilot pod.\")\n\n\tpodName := \"\"\n\tpods, err := getAllPods(kubeConfig)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\tfor _, pod := range pods.Items {\n\t\tif app, ok := pod.ObjectMeta.Labels[\"app\"]; ok && app == \"istiod\" {\n\t\t\tpodName = pod.Name\n\t\t}\n\t}\n\tif podName == \"\" {\n\t\treturn nil, \"\", fmt.Errorf(\"cannot find istio-pilot pod\")\n\t}\n\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tlocalPort := r.Intn(LocalPortEnd-LocalPortStart) + LocalPortStart\n\tcmd := fmt.Sprintf(\"kubectl port-forward %s -n istio-system %d:15010\", podName, localPort)\n\tparts := strings.Split(cmd, \" \")\n\tc := exec.Command(parts[0], parts[1:]...)\n\terr = c.Start()\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\t\/\/ Make sure istio-pilot is reachable.\n\treachable := false\n\turl := fmt.Sprintf(\"localhost:%d\", localPort)\n\tfor i := 0; i < 10 && !reachable; i++ {\n\t\tconn, err := net.Dial(\"tcp\", url)\n\t\tif err == nil {\n\t\t\t_ = conn.Close()\n\t\t\treachable = true\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\tif !reachable {\n\t\treturn nil, \"\", fmt.Errorf(\"cannot reach local pilot url: %s\", url)\n\t}\n\treturn c.Process, fmt.Sprintf(\"localhost:%d\", localPort), nil\n}\n\nfunc main() {\n\tkubeConfig := flag.String(\"kubeconfig\", \"~\/.kube\/config\", \"path to the kubeconfig file. Default is ~\/.kube\/config\")\n\tpilotURL := flag.String(\"pilot\", \"\", \"pilot address. Will try port forward if not provided.\")\n\tconfigType := flag.String(\"type\", \"lds\", \"lds, cds, or eds. Default lds.\")\n\tproxyType := flag.String(\"proxytype\", \"\", \"sidecar, ingress, router.\")\n\tproxyTag := flag.String(\"proxytag\", \"\", \"Pod name or app label or istio label to identify the proxy.\")\n\tresources := flag.String(\"res\", \"\", \"Resource(s) to get config for. LDS\/CDS should leave it empty.\")\n\toutputFile := flag.String(\"out\", \"\", \"output file. Leave blank to go to stdout\")\n\tflag.Parse()\n\n\tprocess, pilot, err := portForwardPilot(resolveKubeConfigPath(*kubeConfig), *pilotURL)\n\tif err != nil {\n\t\tlog.Errorf(\"pilot port forward failed: %v\", err)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif process != nil {\n\t\t\terr := process.Kill()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Failed to kill port-forward process, pid: %d\", process.Pid)\n\t\t\t}\n\t\t}\n\t}()\n\tpod := NewPodInfo(*proxyTag, resolveKubeConfigPath(*kubeConfig), *proxyType)\n\n\tvar resp *discovery.DiscoveryResponse\n\tswitch *configType {\n\tcase \"lds\", \"cds\":\n\t\tresp, err = pod.getXdsResponse(pilot, pod.makeRequest(*configType))\n\tcase \"rds\", \"eds\":\n\t\tresp, err = pod.getXdsResponse(pilot, pod.appendResources(pod.makeRequest(*configType), strings.Split(*resources, \",\")))\n\tdefault:\n\t\tlog.Errorf(\"Unknown config type: %q\", *configType)\n\t\tos.Exit(1)\n\t}\n\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get Xds response for %v. Error: %v\", *resources, err)\n\t\treturn\n\t}\n\tstrResponse, _ := gogoprotomarshal.ToJSONWithIndent(resp, \" \")\n\tif outputFile == nil || *outputFile == \"\" {\n\t\tfmt.Printf(\"%v\\n\", strResponse)\n\t} else if err := ioutil.WriteFile(*outputFile, []byte(strResponse), 0644); err != nil {\n\t\tlog.Errorf(\"Cannot write output to file %q\", *outputFile)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package handlers\n\nimport (\n\t\"github.com\/danryan\/hal\"\n\t\"github.com\/davecgh\/go-spew\/spew\"\n)\n\nconst okStr = \"OK\"\n\n\/\/ GetHandler pulls a key out of the store, when addressed like so:\n\/\/ foobar?\n\/\/ bawt> get: foobar=baz\nvar GetHandler = hal.Hear(`^([:word]{1})\\?$`, func(res *hal.Response) error {\n\tkey := res.Match[0][1]\n\tval, err := res.Robot.Store.Get(key)\n\tif err != nil {\n\t\tres.Send(err.Error())\n\t\treturn err\n\t}\n\treturn res.Send(string(val))\n})\n\n\/\/ SetHandler does a simple match\/set into the Store\nvar SetHandler = hal.Hear(`^([:word]{1}) is ([:word]+)$`, func(res *hal.Response) error {\n\tkey := res.Match[0][1]\n\tval := res.Match[0][2]\n\terr := res.Robot.Store.Set(key, []byte(val))\n\tif err != nil {\n\t\tres.Send(err.Error())\n\t\treturn err\n\t}\n\treturn res.Send(okStr)\n})\n\n\/\/ DeleteHandler nukes keys in the Store\nvar DeleteHandler = hal.Hear(`^forget ([:word]{1})$`, func(res *hal.Response) error {\n\tkey := res.Match[0][1]\n\n\tif err := res.Robot.Store.Delete(key); err != nil {\n\t\tres.Send(err.Error())\n\t\treturn err\n\t}\n\treturn res.Send(okStr)\n})\n\n\/\/ UsersHandler provides insite into the Robots user storage\nvar UsersHandler = hal.Hear(`^!show users$`, func(res *hal.Response) error {\n\tlines := []string{}\n\tfor _, user := range res.Robot.Users.All() {\n\t\tlines = append(lines, spew.Sdump(user))\n\t}\n\treturn res.Send(lines...)\n})\n\n\/\/ UserHandler is the singular form of UsersHandler\nvar UserHandler = hal.Hear(`^!show user (.+)$`, func(res *hal.Response) error {\n\tid := res.Match[0][1]\n\tuser, _ := res.Robot.Users.Get(id)\n\tline := spew.Sdump(user)\n\treturn res.Send(line)\n})\n<commit_msg>ascii word typos<commit_after>package handlers\n\nimport (\n\t\"github.com\/danryan\/hal\"\n\t\"github.com\/davecgh\/go-spew\/spew\"\n)\n\nconst okStr = \"OK\"\n\n\/\/ GetHandler pulls a key out of the store, when addressed like so:\n\/\/ foobar?\n\/\/ bawt> get: foobar=baz\nvar GetHandler = hal.Hear(`^([:word:]{1})\\?$`, func(res *hal.Response) error {\n\tkey := res.Match[0][1]\n\tval, err := res.Robot.Store.Get(key)\n\tif err != nil {\n\t\tres.Send(err.Error())\n\t\treturn err\n\t}\n\treturn res.Send(string(val))\n})\n\n\/\/ SetHandler does a simple match\/set into the Store\nvar SetHandler = hal.Hear(`^([:word:]{1}) is ([:word]+)$`, func(res *hal.Response) error {\n\tkey := res.Match[0][1]\n\tval := res.Match[0][2]\n\terr := res.Robot.Store.Set(key, []byte(val))\n\tif err != nil {\n\t\tres.Send(err.Error())\n\t\treturn err\n\t}\n\treturn res.Send(okStr)\n})\n\n\/\/ DeleteHandler nukes keys in the Store\nvar DeleteHandler = hal.Hear(`^forget ([:word:]{1})$`, func(res *hal.Response) error {\n\tkey := res.Match[0][1]\n\n\tif err := res.Robot.Store.Delete(key); err != nil {\n\t\tres.Send(err.Error())\n\t\treturn err\n\t}\n\treturn res.Send(okStr)\n})\n\n\/\/ UsersHandler provides insite into the Robots user storage\nvar UsersHandler = hal.Hear(`^!show users$`, func(res *hal.Response) error {\n\tlines := []string{}\n\tfor _, user := range res.Robot.Users.All() {\n\t\tlines = append(lines, spew.Sdump(user))\n\t}\n\treturn res.Send(lines...)\n})\n\n\/\/ UserHandler is the singular form of UsersHandler\nvar UserHandler = hal.Hear(`^!show user (.+)$`, func(res *hal.Response) error {\n\tid := res.Match[0][1]\n\tuser, _ := res.Robot.Users.Get(id)\n\tline := spew.Sdump(user)\n\treturn res.Send(line)\n})\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage v1\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"k8s.io\/apimachinery\/pkg\/api\/resource\"\n\t\"k8s.io\/apimachinery\/pkg\/conversion\"\n\t\"k8s.io\/apimachinery\/pkg\/fields\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/intstr\"\n)\n\nfunc AddConversionFuncs(scheme *runtime.Scheme) error {\n\treturn scheme.AddConversionFuncs(\n\t\tConvert_v1_TypeMeta_To_v1_TypeMeta,\n\n\t\tConvert_unversioned_ListMeta_To_unversioned_ListMeta,\n\n\t\tConvert_intstr_IntOrString_To_intstr_IntOrString,\n\n\t\tConvert_unversioned_Time_To_unversioned_Time,\n\n\t\tConvert_Slice_string_To_unversioned_Time,\n\n\t\tConvert_resource_Quantity_To_resource_Quantity,\n\n\t\tConvert_string_To_labels_Selector,\n\t\tConvert_labels_Selector_To_string,\n\n\t\tConvert_string_To_fields_Selector,\n\t\tConvert_fields_Selector_To_string,\n\n\t\tConvert_Pointer_bool_To_bool,\n\t\tConvert_bool_To_Pointer_bool,\n\n\t\tConvert_Pointer_string_To_string,\n\t\tConvert_string_To_Pointer_string,\n\n\t\tConvert_Pointer_int64_To_int,\n\t\tConvert_int_To_Pointer_int64,\n\n\t\tConvert_Pointer_int32_To_int32,\n\t\tConvert_int32_To_Pointer_int32,\n\n\t\tConvert_Pointer_float64_To_float64,\n\t\tConvert_float64_To_Pointer_float64,\n\n\t\tConvert_map_to_unversioned_LabelSelector,\n\t\tConvert_unversioned_LabelSelector_to_map,\n\n\t\tConvert_Slice_string_To_Slice_int32,\n\t)\n}\n\nfunc Convert_Pointer_float64_To_float64(in **float64, out *float64, s conversion.Scope) error {\n\tif *in == nil {\n\t\t*out = 0\n\t\treturn nil\n\t}\n\t*out = float64(**in)\n\treturn nil\n}\n\nfunc Convert_float64_To_Pointer_float64(in *float64, out **float64, s conversion.Scope) error {\n\ttemp := float64(*in)\n\t*out = &temp\n\treturn nil\n}\n\nfunc Convert_Pointer_int32_To_int32(in **int32, out *int32, s conversion.Scope) error {\n\tif *in == nil {\n\t\t*out = 0\n\t\treturn nil\n\t}\n\t*out = int32(**in)\n\treturn nil\n}\n\nfunc Convert_int32_To_Pointer_int32(in *int32, out **int32, s conversion.Scope) error {\n\ttemp := int32(*in)\n\t*out = &temp\n\treturn nil\n}\n\nfunc Convert_Pointer_int64_To_int(in **int64, out *int, s conversion.Scope) error {\n\tif *in == nil {\n\t\t*out = 0\n\t\treturn nil\n\t}\n\t*out = int(**in)\n\treturn nil\n}\n\nfunc Convert_int_To_Pointer_int64(in *int, out **int64, s conversion.Scope) error {\n\ttemp := int64(*in)\n\t*out = &temp\n\treturn nil\n}\n\nfunc Convert_Pointer_string_To_string(in **string, out *string, s conversion.Scope) error {\n\tif *in == nil {\n\t\t*out = \"\"\n\t\treturn nil\n\t}\n\t*out = **in\n\treturn nil\n}\n\nfunc Convert_string_To_Pointer_string(in *string, out **string, s conversion.Scope) error {\n\tif in == nil {\n\t\tstringVar := \"\"\n\t\t*out = &stringVar\n\t\treturn nil\n\t}\n\t*out = in\n\treturn nil\n}\n\nfunc Convert_Pointer_bool_To_bool(in **bool, out *bool, s conversion.Scope) error {\n\tif *in == nil {\n\t\t*out = false\n\t\treturn nil\n\t}\n\t*out = **in\n\treturn nil\n}\n\nfunc Convert_bool_To_Pointer_bool(in *bool, out **bool, s conversion.Scope) error {\n\tif in == nil {\n\t\tboolVar := false\n\t\t*out = &boolVar\n\t\treturn nil\n\t}\n\t*out = in\n\treturn nil\n}\n\n\/\/ +k8s:conversion-fn=drop\nfunc Convert_v1_TypeMeta_To_v1_TypeMeta(in, out *TypeMeta, s conversion.Scope) error {\n\t\/\/ These values are explicitly not copied\n\t\/\/out.APIVersion = in.APIVersion\n\t\/\/out.Kind = in.Kind\n\treturn nil\n}\n\n\/\/ +k8s:conversion-fn=copy-only\nfunc Convert_unversioned_ListMeta_To_unversioned_ListMeta(in, out *ListMeta, s conversion.Scope) error {\n\t*out = *in\n\treturn nil\n}\n\n\/\/ +k8s:conversion-fn=copy-only\nfunc Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error {\n\t*out = *in\n\treturn nil\n}\n\n\/\/ +k8s:conversion-fn=copy-only\nfunc Convert_unversioned_Time_To_unversioned_Time(in *Time, out *Time, s conversion.Scope) error {\n\t\/\/ Cannot deep copy these, because time.Time has unexported fields.\n\t*out = *in\n\treturn nil\n}\n\n\/\/ Convert_Slice_string_To_unversioned_Time allows converting a URL query parameter value\nfunc Convert_Slice_string_To_unversioned_Time(input *[]string, out *Time, s conversion.Scope) error {\n\tstr := \"\"\n\tif len(*input) > 0 {\n\t\tstr = (*input)[0]\n\t}\n\treturn out.UnmarshalQueryParameter(str)\n}\n\nfunc Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error {\n\tselector, err := labels.Parse(*in)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*out = selector\n\treturn nil\n}\n\nfunc Convert_string_To_fields_Selector(in *string, out *fields.Selector, s conversion.Scope) error {\n\tselector, err := fields.ParseSelector(*in)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*out = selector\n\treturn nil\n}\n\nfunc Convert_labels_Selector_To_string(in *labels.Selector, out *string, s conversion.Scope) error {\n\tif *in == nil {\n\t\treturn nil\n\t}\n\t*out = (*in).String()\n\treturn nil\n}\n\nfunc Convert_fields_Selector_To_string(in *fields.Selector, out *string, s conversion.Scope) error {\n\tif *in == nil {\n\t\treturn nil\n\t}\n\t*out = (*in).String()\n\treturn nil\n}\n\n\/\/ +k8s:conversion-fn=copy-only\nfunc Convert_resource_Quantity_To_resource_Quantity(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error {\n\t*out = *in\n\treturn nil\n}\n\nfunc Convert_map_to_unversioned_LabelSelector(in *map[string]string, out *LabelSelector, s conversion.Scope) error {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout = new(LabelSelector)\n\tfor labelKey, labelValue := range *in {\n\t\tAddLabelToSelector(out, labelKey, labelValue)\n\t}\n\treturn nil\n}\n\nfunc Convert_unversioned_LabelSelector_to_map(in *LabelSelector, out *map[string]string, s conversion.Scope) error {\n\tvar err error\n\t*out, err = LabelSelectorAsMap(in)\n\treturn err\n}\n\n\/\/ Convert_Slice_string_To_Slice_int32 converts multiple query parameters or\n\/\/ a single query parameter with a comma delimited value to multiple int32.\n\/\/ This is used for port forwarding which needs the ports as int32.\nfunc Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversion.Scope) error {\n\tfor _, s := range *in {\n\t\tfor _, v := range strings.Split(s, \",\") {\n\t\t\tx, err := strconv.ParseUint(v, 10, 16)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"cannot convert to []int32: %v\", err)\n\t\t\t}\n\t\t\t*out = append(*out, int32(x))\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Dynamic Kubelet Configuration<commit_after>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage v1\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"k8s.io\/apimachinery\/pkg\/api\/resource\"\n\t\"k8s.io\/apimachinery\/pkg\/conversion\"\n\t\"k8s.io\/apimachinery\/pkg\/fields\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/intstr\"\n)\n\nfunc AddConversionFuncs(scheme *runtime.Scheme) error {\n\treturn scheme.AddConversionFuncs(\n\t\tConvert_v1_TypeMeta_To_v1_TypeMeta,\n\n\t\tConvert_unversioned_ListMeta_To_unversioned_ListMeta,\n\n\t\tConvert_intstr_IntOrString_To_intstr_IntOrString,\n\n\t\tConvert_unversioned_Time_To_unversioned_Time,\n\n\t\tConvert_Pointer_v1_Duration_To_v1_Duration,\n\t\tConvert_v1_Duration_To_Pointer_v1_Duration,\n\n\t\tConvert_Slice_string_To_unversioned_Time,\n\n\t\tConvert_resource_Quantity_To_resource_Quantity,\n\n\t\tConvert_string_To_labels_Selector,\n\t\tConvert_labels_Selector_To_string,\n\n\t\tConvert_string_To_fields_Selector,\n\t\tConvert_fields_Selector_To_string,\n\n\t\tConvert_Pointer_bool_To_bool,\n\t\tConvert_bool_To_Pointer_bool,\n\n\t\tConvert_Pointer_string_To_string,\n\t\tConvert_string_To_Pointer_string,\n\n\t\tConvert_Pointer_int64_To_int,\n\t\tConvert_int_To_Pointer_int64,\n\n\t\tConvert_Pointer_int32_To_int32,\n\t\tConvert_int32_To_Pointer_int32,\n\n\t\tConvert_Pointer_float64_To_float64,\n\t\tConvert_float64_To_Pointer_float64,\n\n\t\tConvert_map_to_unversioned_LabelSelector,\n\t\tConvert_unversioned_LabelSelector_to_map,\n\n\t\tConvert_Slice_string_To_Slice_int32,\n\t)\n}\n\nfunc Convert_Pointer_float64_To_float64(in **float64, out *float64, s conversion.Scope) error {\n\tif *in == nil {\n\t\t*out = 0\n\t\treturn nil\n\t}\n\t*out = float64(**in)\n\treturn nil\n}\n\nfunc Convert_float64_To_Pointer_float64(in *float64, out **float64, s conversion.Scope) error {\n\ttemp := float64(*in)\n\t*out = &temp\n\treturn nil\n}\n\nfunc Convert_Pointer_int32_To_int32(in **int32, out *int32, s conversion.Scope) error {\n\tif *in == nil {\n\t\t*out = 0\n\t\treturn nil\n\t}\n\t*out = int32(**in)\n\treturn nil\n}\n\nfunc Convert_int32_To_Pointer_int32(in *int32, out **int32, s conversion.Scope) error {\n\ttemp := int32(*in)\n\t*out = &temp\n\treturn nil\n}\n\nfunc Convert_Pointer_int64_To_int(in **int64, out *int, s conversion.Scope) error {\n\tif *in == nil {\n\t\t*out = 0\n\t\treturn nil\n\t}\n\t*out = int(**in)\n\treturn nil\n}\n\nfunc Convert_int_To_Pointer_int64(in *int, out **int64, s conversion.Scope) error {\n\ttemp := int64(*in)\n\t*out = &temp\n\treturn nil\n}\n\nfunc Convert_Pointer_string_To_string(in **string, out *string, s conversion.Scope) error {\n\tif *in == nil {\n\t\t*out = \"\"\n\t\treturn nil\n\t}\n\t*out = **in\n\treturn nil\n}\n\nfunc Convert_string_To_Pointer_string(in *string, out **string, s conversion.Scope) error {\n\tif in == nil {\n\t\tstringVar := \"\"\n\t\t*out = &stringVar\n\t\treturn nil\n\t}\n\t*out = in\n\treturn nil\n}\n\nfunc Convert_Pointer_bool_To_bool(in **bool, out *bool, s conversion.Scope) error {\n\tif *in == nil {\n\t\t*out = false\n\t\treturn nil\n\t}\n\t*out = **in\n\treturn nil\n}\n\nfunc Convert_bool_To_Pointer_bool(in *bool, out **bool, s conversion.Scope) error {\n\tif in == nil {\n\t\tboolVar := false\n\t\t*out = &boolVar\n\t\treturn nil\n\t}\n\t*out = in\n\treturn nil\n}\n\n\/\/ +k8s:conversion-fn=drop\nfunc Convert_v1_TypeMeta_To_v1_TypeMeta(in, out *TypeMeta, s conversion.Scope) error {\n\t\/\/ These values are explicitly not copied\n\t\/\/out.APIVersion = in.APIVersion\n\t\/\/out.Kind = in.Kind\n\treturn nil\n}\n\n\/\/ +k8s:conversion-fn=copy-only\nfunc Convert_unversioned_ListMeta_To_unversioned_ListMeta(in, out *ListMeta, s conversion.Scope) error {\n\t*out = *in\n\treturn nil\n}\n\n\/\/ +k8s:conversion-fn=copy-only\nfunc Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error {\n\t*out = *in\n\treturn nil\n}\n\n\/\/ +k8s:conversion-fn=copy-only\nfunc Convert_unversioned_Time_To_unversioned_Time(in *Time, out *Time, s conversion.Scope) error {\n\t\/\/ Cannot deep copy these, because time.Time has unexported fields.\n\t*out = *in\n\treturn nil\n}\n\nfunc Convert_Pointer_v1_Duration_To_v1_Duration(in **Duration, out *Duration, s conversion.Scope) error {\n\tif *in == nil {\n\t\t*out = Duration{} \/\/ zero duration\n\t\treturn nil\n\t}\n\t*out = **in \/\/ copy\n\treturn nil\n}\n\nfunc Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s conversion.Scope) error {\n\ttemp := *in \/\/copy\n\t*out = &temp\n\treturn nil\n}\n\n\/\/ Convert_Slice_string_To_unversioned_Time allows converting a URL query parameter value\nfunc Convert_Slice_string_To_unversioned_Time(input *[]string, out *Time, s conversion.Scope) error {\n\tstr := \"\"\n\tif len(*input) > 0 {\n\t\tstr = (*input)[0]\n\t}\n\treturn out.UnmarshalQueryParameter(str)\n}\n\nfunc Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error {\n\tselector, err := labels.Parse(*in)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*out = selector\n\treturn nil\n}\n\nfunc Convert_string_To_fields_Selector(in *string, out *fields.Selector, s conversion.Scope) error {\n\tselector, err := fields.ParseSelector(*in)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*out = selector\n\treturn nil\n}\n\nfunc Convert_labels_Selector_To_string(in *labels.Selector, out *string, s conversion.Scope) error {\n\tif *in == nil {\n\t\treturn nil\n\t}\n\t*out = (*in).String()\n\treturn nil\n}\n\nfunc Convert_fields_Selector_To_string(in *fields.Selector, out *string, s conversion.Scope) error {\n\tif *in == nil {\n\t\treturn nil\n\t}\n\t*out = (*in).String()\n\treturn nil\n}\n\n\/\/ +k8s:conversion-fn=copy-only\nfunc Convert_resource_Quantity_To_resource_Quantity(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error {\n\t*out = *in\n\treturn nil\n}\n\nfunc Convert_map_to_unversioned_LabelSelector(in *map[string]string, out *LabelSelector, s conversion.Scope) error {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout = new(LabelSelector)\n\tfor labelKey, labelValue := range *in {\n\t\tAddLabelToSelector(out, labelKey, labelValue)\n\t}\n\treturn nil\n}\n\nfunc Convert_unversioned_LabelSelector_to_map(in *LabelSelector, out *map[string]string, s conversion.Scope) error {\n\tvar err error\n\t*out, err = LabelSelectorAsMap(in)\n\treturn err\n}\n\n\/\/ Convert_Slice_string_To_Slice_int32 converts multiple query parameters or\n\/\/ a single query parameter with a comma delimited value to multiple int32.\n\/\/ This is used for port forwarding which needs the ports as int32.\nfunc Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversion.Scope) error {\n\tfor _, s := range *in {\n\t\tfor _, v := range strings.Split(s, \",\") {\n\t\t\tx, err := strconv.ParseUint(v, 10, 16)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"cannot convert to []int32: %v\", err)\n\t\t\t}\n\t\t\t*out = append(*out, int32(x))\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package rollrus\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/stvp\/roll\"\n)\n\nfunc TestIntConversion(t *testing.T) {\n\ti := make(logrus.Fields)\n\ti[\"test\"] = 5\n\n\tr := convertFields(i)\n\n\tv, ok := r[\"test\"]\n\tif !ok {\n\t\tt.Fatal(\"Expected test key, but did not find it\")\n\t}\n\n\tif v != \"5\" {\n\t\tt.Fatal(\"Expected value to equal 5, but instead it is: \", v)\n\t}\n}\n\nfunc TestErrConversion(t *testing.T) {\n\ti := make(logrus.Fields)\n\ti[\"test\"] = fmt.Errorf(\"This is an error\")\n\n\tr := convertFields(i)\n\n\tv, ok := r[\"test\"]\n\tif !ok {\n\t\tt.Fatal(\"Expected test key, but did not find it\")\n\t}\n\n\tif v != \"This is an error\" {\n\t\tt.Fatal(\"Expected value to be a string of the error but instead it is: \", v)\n\t}\n}\n\nfunc TestStringConversion(t *testing.T) {\n\ti := make(logrus.Fields)\n\ti[\"test\"] = \"This is a string\"\n\n\tr := convertFields(i)\n\n\tv, ok := r[\"test\"]\n\tif !ok {\n\t\tt.Fatal(\"Expected test key, but did not find it\")\n\t}\n\n\tif v != \"This is a string\" {\n\t\tt.Fatal(\"Expected value to equal a certain string, but instead it is: \", v)\n\t}\n}\n\nfunc TestTimeConversion(t *testing.T) {\n\tnow := time.Now()\n\ti := make(logrus.Fields)\n\ti[\"test\"] = now\n\n\tr := convertFields(i)\n\n\tv, ok := r[\"test\"]\n\tif !ok {\n\t\tt.Fatal(\"Expected test key, but did not find it\")\n\t}\n\n\tif v != now.Format(time.RFC3339) {\n\t\tt.Fatal(\"Expected value to equal, but instead it is: \", v)\n\t}\n}\n\nfunc TestExtractError(t *testing.T) {\n\tentry := logrus.NewEntry(nil)\n\tentry.Data[\"err\"] = fmt.Errorf(\"foo bar baz\")\n\n\ttrace, cause := extractError(entry)\n\tif len(trace) != 0 {\n\t\tt.Fatal(\"Expected length of trace to be equal to 0, but instead is: \", len(trace))\n\t}\n\n\tif cause.Error() != \"foo bar baz\" {\n\t\tt.Fatalf(\"Expected error as string to be 'foo bar baz', but was instead: %q\", cause)\n\t}\n}\n\ntype CauserError struct {\n\tfakeCause string\n\terror\n}\n\nfunc (ce CauserError) Cause() error {\n\treturn fmt.Errorf(ce.fakeCause)\n}\n\nfunc TestExtractCauserErrorWithCause(t *testing.T) {\n\n\tentry := logrus.NewEntry(nil)\n\n\tentry.Data[\"err\"] = CauserError{fakeCause: \"fbb cause\", error: fmt.Errorf(\"foo bar baz\")}\n\n\ttrace, cause := extractError(entry)\n\tif len(trace) != 0 {\n\t\tt.Fatal(\"Expected length of trace to be equal to 0, but instead is: \", len(trace))\n\t}\n\n\tif cause.Error() != \"fbb cause\" {\n\t\tt.Fatalf(\"Expected error as string to be 'fbb cause', but was instead: %q\", cause)\n\t}\n}\n\ntype NilCauserError struct {\n\terror\n}\n\nfunc (ce NilCauserError) Cause() error {\n\treturn nil\n}\n\nfunc TestExtractCauserErrorWithNilCause(t *testing.T) {\n\n\tentry := logrus.NewEntry(nil)\n\n\tentry.Data[\"err\"] = NilCauserError{error: fmt.Errorf(\"foo bar baz\")}\n\n\ttrace, cause := extractError(entry)\n\tif len(trace) != 0 {\n\t\tt.Fatal(\"Expected length of trace to be equal to 0, but instead is: \", len(trace))\n\t}\n\n\tif cause.Error() != \"foo bar baz\" {\n\t\tt.Fatalf(\"Expected error as string to be 'foo bar baz', but was instead: %q\", cause)\n\t}\n}\n\nfunc TestExtractErrorDefault(t *testing.T) {\n\tentry := logrus.NewEntry(nil)\n\tentry.Data[\"no-err\"] = fmt.Errorf(\"foo bar baz\")\n\tentry.Message = \"message error\"\n\n\ttrace, cause := extractError(entry)\n\tif len(trace) != 0 {\n\t\tt.Fatal(\"Expected length of trace to be equal to 0, but instead is: \", len(trace))\n\t}\n\n\tif cause.Error() != \"message error\" {\n\t\tt.Fatalf(\"Expected error as string to be 'message error', but was instead: %q\", cause)\n\t}\n}\n\nfunc TestExtractErrorFromStackTracer(t *testing.T) {\n\tentry := logrus.NewEntry(nil)\n\tentry.Data[\"err\"] = errors.Errorf(\"foo bar baz\")\n\n\ttrace, cause := extractError(entry)\n\tif len(trace) != 3 {\n\t\tt.Fatal(\"Expected length of trace to be == 3, but instead is: \", len(trace))\n\t}\n\n\tif cause.Error() != \"foo bar baz\" {\n\t\tt.Fatalf(\"Expected error as string to be 'foo bar baz', but was instead: %q\", cause.Error())\n\t}\n}\n\nfunc TestTriggerLevels(t *testing.T) {\n\tclient := roll.New(\"\", \"testing\")\n\tunderTest := &Hook{Client: client}\n\tif !reflect.DeepEqual(underTest.Levels(), defaultTriggerLevels) {\n\t\tt.Fatal(\"Expected Levels() to return defaultTriggerLevels\")\n\t}\n\n\tnewLevels := []logrus.Level{logrus.InfoLevel}\n\tunderTest.triggers = newLevels\n\tif !reflect.DeepEqual(underTest.Levels(), newLevels) {\n\t\tt.Fatal(\"Expected Levels() to return newLevels\")\n\t}\n}\n\nfunc TestWithMinLevelInfo(t *testing.T) {\n\th := NewHook(\"\", \"testing\", WithMinLevel(logrus.InfoLevel))\n\texpectedLevels := []logrus.Level{\n\t\tlogrus.PanicLevel,\n\t\tlogrus.FatalLevel,\n\t\tlogrus.ErrorLevel,\n\t\tlogrus.WarnLevel,\n\t\tlogrus.InfoLevel,\n\t}\n\tif !reflect.DeepEqual(h.Levels(), expectedLevels) {\n\t\tt.Fatal(\"Expected Levels() to return all levels above Info\")\n\t}\n}\n\nfunc TestWithMinLevelFatal(t *testing.T) {\n\th := NewHook(\"\", \"testing\", WithMinLevel(logrus.FatalLevel))\n\texpectedLevels := []logrus.Level{\n\t\tlogrus.PanicLevel,\n\t\tlogrus.FatalLevel,\n\t}\n\tif !reflect.DeepEqual(h.Levels(), expectedLevels) {\n\t\tt.Fatal(\"Expected Levels() to return all levels above Fatal\")\n\t}\n}\n\nfunc TestLoggingBelowTheMinimumLevelDoesNotFire(t *testing.T) {\n\th := NewHook(\"\", \"testing\", WithMinLevel(logrus.FatalLevel))\n\tl := logrus.New()\n\tl.AddHook(h)\n\n\tl.Error(\"This is a test\")\n\n\tif h.reported {\n\t\tt.Fatal(\"expected no report to have happened\")\n\t}\n}\nfunc TestLoggingAboveTheMinimumLevelDoesFire(t *testing.T) {\n\th := NewHook(\"\", \"testing\", WithMinLevel(logrus.WarnLevel))\n\tl := logrus.New()\n\tl.AddHook(h)\n\n\tl.Warn(\"This is a test\")\n\n\tif !h.reported {\n\t\tt.Fatal(\"expected report to have happened\")\n\t}\n}\n\nfunc TestWithIgnoredErrors(t *testing.T) {\n\th := NewHook(\"\", \"testing\", WithIgnoredErrors(io.EOF))\n\tentry := logrus.NewEntry(nil)\n\tentry.Message = \"This is a test\"\n\n\t\/\/ Exact error is skipped.\n\tentry.Data[\"err\"] = io.EOF\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\tif h.reported {\n\t\tt.Fatal(\"expected no report to have happened\")\n\t}\n\n\t\/\/ Wrapped error is also skipped.\n\tentry.Data[\"err\"] = errors.Wrap(io.EOF, \"hello\")\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\tif h.reported {\n\t\tt.Fatal(\"expected no report to have happened\")\n\t}\n\n\t\/\/ Non blacklisted errors get reported.\n\tentry.Data[\"err\"] = errors.New(\"hello\")\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\tif !h.reported {\n\t\tt.Fatal(\"expected a report to have happened\")\n\t}\n\n\t\/\/ no err gets reported.\n\tdelete(entry.Data, \"err\")\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\tif !h.reported {\n\t\tt.Fatal(\"expected a report to have happened\")\n\t}\n}\n\ntype isTemporary interface {\n\tTemporary() bool\n}\n\n\/\/ https:\/\/github.com\/go-pg\/pg\/blob\/2ebb4d1d9b890619de2dc9e1dc0085b86f93fc91\/internal\/error.go#L22\ntype PGError struct {\n\tm map[byte]string\n}\n\nfunc (err PGError) Error() string {\n\treturn \"error\"\n}\n\n\/\/ https:\/\/github.com\/heroku\/rollrus\/issues\/26\nfunc TestWithErrorHandlesUnhashableErrors(t *testing.T) {\n\t_ = NewHook(\"\", \"\", WithIgnoredErrors(PGError{m: make(map[byte]string)}))\n\tentry := logrus.NewEntry(nil)\n\tentry.Message = \"This is a test\"\n\tentry.Data[\"err\"] = PGError{m: make(map[byte]string)}\n\n\th := NewHook(\"\", \"testing\")\n\t\/\/ actually panics\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n}\n\nfunc TestWithIgnoreErrorFunc(t *testing.T) {\n\th := NewHook(\"\", \"testing\", WithIgnoreErrorFunc(func(err error) bool {\n\t\tif err == io.EOF {\n\t\t\treturn true\n\t\t}\n\n\t\tif e, ok := err.(isTemporary); ok && e.Temporary() {\n\t\t\treturn true\n\t\t}\n\n\t\treturn false\n\t}))\n\n\tentry := logrus.NewEntry(nil)\n\tentry.Message = \"This is a test\"\n\n\t\/\/ Exact error is skipped.\n\tentry.Data[\"err\"] = io.EOF\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\tif h.reported {\n\t\tt.Fatal(\"expected no report to have happened\")\n\t}\n\n\t\/\/ Wrapped error is also skipped.\n\tentry.Data[\"err\"] = errors.Wrap(io.EOF, \"hello\")\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\tif h.reported {\n\t\tt.Fatal(\"expected no report to have happened\")\n\t}\n\n\tsrv := httptest.NewServer(nil)\n\tsrv.Close()\n\n\t\/\/ Temporary error skipped\n\t_, err := http.Get(srv.URL)\n\n\tentry.Data[\"err\"] = err\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\n\t\/\/ Non blacklisted errors get reported.\n\tentry.Data[\"err\"] = errors.New(\"hello\")\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\tif !h.reported {\n\t\tt.Fatal(\"expected a report to have happened\")\n\t}\n\n\t\/\/ no err gets reported.\n\tdelete(entry.Data, \"err\")\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\tif !h.reported {\n\t\tt.Fatal(\"expected a report to have happened\")\n\t}\n}\n\nfunc TestWithIgnoreFunc(t *testing.T) {\n\tcases := []struct {\n\t\tname string\n\t\tfields logrus.Fields\n\t\tskipReport bool\n\t}{\n\t\t{\n\t\t\tname: \"extract error is skipped\",\n\t\t\tfields: map[string]interface{}{\"err\": io.EOF},\n\t\t\tskipReport: true,\n\t\t},\n\t\t{\n\t\t\tname: \"wrapped error is skipped\",\n\t\t\tfields: map[string]interface{}{\"err\": errors.Wrap(io.EOF, \"hello\")},\n\t\t\tskipReport: true,\n\t\t},\n\t\t{\n\t\t\tname: \"ignored field is skipped\",\n\t\t\tfields: map[string]interface{}{\"ignore\": \"true\"},\n\t\t\tskipReport: true,\n\t\t},\n\t\t{\n\t\t\tname: \"error is not skipped\",\n\t\t\tfields: map[string]interface{}{},\n\t\t\tskipReport: false,\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tc := c \/\/ capture local var for parallel tests\n\n\t\tt.Run(c.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\th := NewHook(\"\", \"testing\", WithIgnoreFunc(func(err error, m map[string]string) bool {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\n\t\t\t\tif m[\"ignore\"] == \"true\" {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t}))\n\n\t\t\tentry := logrus.NewEntry(nil)\n\t\t\tentry.Message = \"This is a test\"\n\t\t\tentry.Data = c.fields\n\n\t\t\tif err := h.Fire(entry); err != nil {\n\t\t\t\tt.Errorf(\"unexpected error %s\", err)\n\t\t\t}\n\n\t\t\tif c.skipReport && h.reported {\n\t\t\t\tt.Errorf(\"expected report to be skipped\")\n\t\t\t}\n\n\t\t\tif !c.skipReport && !h.reported {\n\t\t\t\tt.Errorf(\"expected report to be fired\")\n\t\t\t}\n\t\t})\n\t}\n}\n<commit_msg>test nil cause<commit_after>package rollrus\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/stvp\/roll\"\n)\n\nfunc TestIntConversion(t *testing.T) {\n\ti := make(logrus.Fields)\n\ti[\"test\"] = 5\n\n\tr := convertFields(i)\n\n\tv, ok := r[\"test\"]\n\tif !ok {\n\t\tt.Fatal(\"Expected test key, but did not find it\")\n\t}\n\n\tif v != \"5\" {\n\t\tt.Fatal(\"Expected value to equal 5, but instead it is: \", v)\n\t}\n}\n\nfunc TestErrConversion(t *testing.T) {\n\ti := make(logrus.Fields)\n\ti[\"test\"] = fmt.Errorf(\"This is an error\")\n\n\tr := convertFields(i)\n\n\tv, ok := r[\"test\"]\n\tif !ok {\n\t\tt.Fatal(\"Expected test key, but did not find it\")\n\t}\n\n\tif v != \"This is an error\" {\n\t\tt.Fatal(\"Expected value to be a string of the error but instead it is: \", v)\n\t}\n}\n\nfunc TestStringConversion(t *testing.T) {\n\ti := make(logrus.Fields)\n\ti[\"test\"] = \"This is a string\"\n\n\tr := convertFields(i)\n\n\tv, ok := r[\"test\"]\n\tif !ok {\n\t\tt.Fatal(\"Expected test key, but did not find it\")\n\t}\n\n\tif v != \"This is a string\" {\n\t\tt.Fatal(\"Expected value to equal a certain string, but instead it is: \", v)\n\t}\n}\n\nfunc TestTimeConversion(t *testing.T) {\n\tnow := time.Now()\n\ti := make(logrus.Fields)\n\ti[\"test\"] = now\n\n\tr := convertFields(i)\n\n\tv, ok := r[\"test\"]\n\tif !ok {\n\t\tt.Fatal(\"Expected test key, but did not find it\")\n\t}\n\n\tif v != now.Format(time.RFC3339) {\n\t\tt.Fatal(\"Expected value to equal, but instead it is: \", v)\n\t}\n}\n\nfunc TestExtractError(t *testing.T) {\n\tentry := logrus.NewEntry(nil)\n\tentry.Data[\"err\"] = fmt.Errorf(\"foo bar baz\")\n\n\ttrace, cause := extractError(entry)\n\tif len(trace) != 0 {\n\t\tt.Fatal(\"Expected length of trace to be equal to 0, but instead is: \", len(trace))\n\t}\n\n\tif cause.Error() != \"foo bar baz\" {\n\t\tt.Fatalf(\"Expected error as string to be 'foo bar baz', but was instead: %q\", cause)\n\t}\n}\n\ntype CauserError struct {\n\tfakeCause string\n\terror\n}\n\nfunc (ce CauserError) Cause() error {\n\treturn fmt.Errorf(ce.fakeCause)\n}\n\nfunc TestExtractCauserErrorWithCause(t *testing.T) {\n\n\tentry := logrus.NewEntry(nil)\n\n\tentry.Data[\"err\"] = CauserError{fakeCause: \"fbb cause\", error: fmt.Errorf(\"foo bar baz\")}\n\n\ttrace, cause := extractError(entry)\n\tif len(trace) != 0 {\n\t\tt.Fatal(\"Expected length of trace to be equal to 0, but instead is: \", len(trace))\n\t}\n\n\tif cause.Error() != \"fbb cause\" {\n\t\tt.Fatalf(\"Expected error as string to be 'fbb cause', but was instead: %q\", cause)\n\t}\n}\n\ntype NilCauserError struct {\n\terror\n}\n\nfunc (ce NilCauserError) Cause() error {\n\treturn nil\n}\n\nfunc TestExtractCauserErrorWithNilCause(t *testing.T) {\n\n\tentry := logrus.NewEntry(nil)\n\n\tentry.Data[\"err\"] = NilCauserError{error: fmt.Errorf(\"foo bar baz\")}\n\n\ttrace, cause := extractError(entry)\n\tif len(trace) != 0 {\n\t\tt.Fatal(\"Expected length of trace to be equal to 0, but instead is: \", len(trace))\n\t}\n\n\tif cause.Error() != \"foo bar baz\" {\n\t\tt.Fatalf(\"Expected error as string to be 'foo bar baz', but was instead: %q\", cause)\n\t}\n}\n\nfunc TestExtractErrorDefault(t *testing.T) {\n\tentry := logrus.NewEntry(nil)\n\tentry.Data[\"no-err\"] = fmt.Errorf(\"foo bar baz\")\n\tentry.Message = \"message error\"\n\n\ttrace, cause := extractError(entry)\n\tif len(trace) != 0 {\n\t\tt.Fatal(\"Expected length of trace to be equal to 0, but instead is: \", len(trace))\n\t}\n\n\tif cause.Error() != \"message error\" {\n\t\tt.Fatalf(\"Expected error as string to be 'message error', but was instead: %q\", cause)\n\t}\n}\n\nfunc TestExtractErrorFromStackTracer(t *testing.T) {\n\tentry := logrus.NewEntry(nil)\n\tentry.Data[\"err\"] = errors.Errorf(\"foo bar baz\")\n\n\ttrace, cause := extractError(entry)\n\tif len(trace) != 3 {\n\t\tt.Fatal(\"Expected length of trace to be == 3, but instead is: \", len(trace))\n\t}\n\n\tif cause.Error() != \"foo bar baz\" {\n\t\tt.Fatalf(\"Expected error as string to be 'foo bar baz', but was instead: %q\", cause.Error())\n\t}\n}\n\ntype merr []error\n\nfunc (merr) Error() string { return \"boom?\" }\nfunc (e merr) Cause() error {\n\tif len(e) == 0 {\n\t\treturn nil\n\t}\n\treturn e[0]\n}\nfunc TestExtractErrorWithNilCause(t *testing.T) {\n\tentry := logrus.NewEntry(nil)\n\tvar err error = merr{}\n\tentry.Data[\"err\"] = err\n\n\t\/\/ len(err) == 0, so Cause(err) will return nil\n\t_, cause := extractError(entry)\n\tif cause == nil {\n\t\tt.Fatalf(\"unexpected nil cause\")\n\t}\n\n\tif cause.Error() != \"boom?\" {\n\t\tt.Fatalf(\"Expected error as string to be 'boom?', but was instead: %q\", cause.Error())\n\t}\n}\n\nfunc TestTriggerLevels(t *testing.T) {\n\tclient := roll.New(\"\", \"testing\")\n\tunderTest := &Hook{Client: client}\n\tif !reflect.DeepEqual(underTest.Levels(), defaultTriggerLevels) {\n\t\tt.Fatal(\"Expected Levels() to return defaultTriggerLevels\")\n\t}\n\n\tnewLevels := []logrus.Level{logrus.InfoLevel}\n\tunderTest.triggers = newLevels\n\tif !reflect.DeepEqual(underTest.Levels(), newLevels) {\n\t\tt.Fatal(\"Expected Levels() to return newLevels\")\n\t}\n}\n\nfunc TestWithMinLevelInfo(t *testing.T) {\n\th := NewHook(\"\", \"testing\", WithMinLevel(logrus.InfoLevel))\n\texpectedLevels := []logrus.Level{\n\t\tlogrus.PanicLevel,\n\t\tlogrus.FatalLevel,\n\t\tlogrus.ErrorLevel,\n\t\tlogrus.WarnLevel,\n\t\tlogrus.InfoLevel,\n\t}\n\tif !reflect.DeepEqual(h.Levels(), expectedLevels) {\n\t\tt.Fatal(\"Expected Levels() to return all levels above Info\")\n\t}\n}\n\nfunc TestWithMinLevelFatal(t *testing.T) {\n\th := NewHook(\"\", \"testing\", WithMinLevel(logrus.FatalLevel))\n\texpectedLevels := []logrus.Level{\n\t\tlogrus.PanicLevel,\n\t\tlogrus.FatalLevel,\n\t}\n\tif !reflect.DeepEqual(h.Levels(), expectedLevels) {\n\t\tt.Fatal(\"Expected Levels() to return all levels above Fatal\")\n\t}\n}\n\nfunc TestLoggingBelowTheMinimumLevelDoesNotFire(t *testing.T) {\n\th := NewHook(\"\", \"testing\", WithMinLevel(logrus.FatalLevel))\n\tl := logrus.New()\n\tl.AddHook(h)\n\n\tl.Error(\"This is a test\")\n\n\tif h.reported {\n\t\tt.Fatal(\"expected no report to have happened\")\n\t}\n}\nfunc TestLoggingAboveTheMinimumLevelDoesFire(t *testing.T) {\n\th := NewHook(\"\", \"testing\", WithMinLevel(logrus.WarnLevel))\n\tl := logrus.New()\n\tl.AddHook(h)\n\n\tl.Warn(\"This is a test\")\n\n\tif !h.reported {\n\t\tt.Fatal(\"expected report to have happened\")\n\t}\n}\n\nfunc TestWithIgnoredErrors(t *testing.T) {\n\th := NewHook(\"\", \"testing\", WithIgnoredErrors(io.EOF))\n\tentry := logrus.NewEntry(nil)\n\tentry.Message = \"This is a test\"\n\n\t\/\/ Exact error is skipped.\n\tentry.Data[\"err\"] = io.EOF\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\tif h.reported {\n\t\tt.Fatal(\"expected no report to have happened\")\n\t}\n\n\t\/\/ Wrapped error is also skipped.\n\tentry.Data[\"err\"] = errors.Wrap(io.EOF, \"hello\")\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\tif h.reported {\n\t\tt.Fatal(\"expected no report to have happened\")\n\t}\n\n\t\/\/ Non blacklisted errors get reported.\n\tentry.Data[\"err\"] = errors.New(\"hello\")\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\tif !h.reported {\n\t\tt.Fatal(\"expected a report to have happened\")\n\t}\n\n\t\/\/ no err gets reported.\n\tdelete(entry.Data, \"err\")\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\tif !h.reported {\n\t\tt.Fatal(\"expected a report to have happened\")\n\t}\n}\n\ntype isTemporary interface {\n\tTemporary() bool\n}\n\n\/\/ https:\/\/github.com\/go-pg\/pg\/blob\/2ebb4d1d9b890619de2dc9e1dc0085b86f93fc91\/internal\/error.go#L22\ntype PGError struct {\n\tm map[byte]string\n}\n\nfunc (err PGError) Error() string {\n\treturn \"error\"\n}\n\n\/\/ https:\/\/github.com\/heroku\/rollrus\/issues\/26\nfunc TestWithErrorHandlesUnhashableErrors(t *testing.T) {\n\t_ = NewHook(\"\", \"\", WithIgnoredErrors(PGError{m: make(map[byte]string)}))\n\tentry := logrus.NewEntry(nil)\n\tentry.Message = \"This is a test\"\n\tentry.Data[\"err\"] = PGError{m: make(map[byte]string)}\n\n\th := NewHook(\"\", \"testing\")\n\t\/\/ actually panics\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n}\n\nfunc TestWithIgnoreErrorFunc(t *testing.T) {\n\th := NewHook(\"\", \"testing\", WithIgnoreErrorFunc(func(err error) bool {\n\t\tif err == io.EOF {\n\t\t\treturn true\n\t\t}\n\n\t\tif e, ok := err.(isTemporary); ok && e.Temporary() {\n\t\t\treturn true\n\t\t}\n\n\t\treturn false\n\t}))\n\n\tentry := logrus.NewEntry(nil)\n\tentry.Message = \"This is a test\"\n\n\t\/\/ Exact error is skipped.\n\tentry.Data[\"err\"] = io.EOF\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\tif h.reported {\n\t\tt.Fatal(\"expected no report to have happened\")\n\t}\n\n\t\/\/ Wrapped error is also skipped.\n\tentry.Data[\"err\"] = errors.Wrap(io.EOF, \"hello\")\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\tif h.reported {\n\t\tt.Fatal(\"expected no report to have happened\")\n\t}\n\n\tsrv := httptest.NewServer(nil)\n\tsrv.Close()\n\n\t\/\/ Temporary error skipped\n\t_, err := http.Get(srv.URL)\n\n\tentry.Data[\"err\"] = err\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\n\t\/\/ Non blacklisted errors get reported.\n\tentry.Data[\"err\"] = errors.New(\"hello\")\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\tif !h.reported {\n\t\tt.Fatal(\"expected a report to have happened\")\n\t}\n\n\t\/\/ no err gets reported.\n\tdelete(entry.Data, \"err\")\n\tif err := h.Fire(entry); err != nil {\n\t\tt.Fatal(\"unexpected error \", err)\n\t}\n\tif !h.reported {\n\t\tt.Fatal(\"expected a report to have happened\")\n\t}\n}\n\nfunc TestWithIgnoreFunc(t *testing.T) {\n\tcases := []struct {\n\t\tname string\n\t\tfields logrus.Fields\n\t\tskipReport bool\n\t}{\n\t\t{\n\t\t\tname: \"extract error is skipped\",\n\t\t\tfields: map[string]interface{}{\"err\": io.EOF},\n\t\t\tskipReport: true,\n\t\t},\n\t\t{\n\t\t\tname: \"wrapped error is skipped\",\n\t\t\tfields: map[string]interface{}{\"err\": errors.Wrap(io.EOF, \"hello\")},\n\t\t\tskipReport: true,\n\t\t},\n\t\t{\n\t\t\tname: \"ignored field is skipped\",\n\t\t\tfields: map[string]interface{}{\"ignore\": \"true\"},\n\t\t\tskipReport: true,\n\t\t},\n\t\t{\n\t\t\tname: \"error is not skipped\",\n\t\t\tfields: map[string]interface{}{},\n\t\t\tskipReport: false,\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tc := c \/\/ capture local var for parallel tests\n\n\t\tt.Run(c.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\th := NewHook(\"\", \"testing\", WithIgnoreFunc(func(err error, m map[string]string) bool {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\n\t\t\t\tif m[\"ignore\"] == \"true\" {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\n\t\t\t\treturn false\n\t\t\t}))\n\n\t\t\tentry := logrus.NewEntry(nil)\n\t\t\tentry.Message = \"This is a test\"\n\t\t\tentry.Data = c.fields\n\n\t\t\tif err := h.Fire(entry); err != nil {\n\t\t\t\tt.Errorf(\"unexpected error %s\", err)\n\t\t\t}\n\n\t\t\tif c.skipReport && h.reported {\n\t\t\t\tt.Errorf(\"expected report to be skipped\")\n\t\t\t}\n\n\t\t\tif !c.skipReport && !h.reported {\n\t\t\t\tt.Errorf(\"expected report to be fired\")\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage apiserver\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/httpstream\"\n\tutilnet \"k8s.io\/apimachinery\/pkg\/util\/net\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/proxy\"\n\tauditinternal \"k8s.io\/apiserver\/pkg\/apis\/audit\"\n\t\"k8s.io\/apiserver\/pkg\/endpoints\/handlers\/responsewriters\"\n\tendpointmetrics \"k8s.io\/apiserver\/pkg\/endpoints\/metrics\"\n\tgenericapirequest \"k8s.io\/apiserver\/pkg\/endpoints\/request\"\n\t\"k8s.io\/apiserver\/pkg\/server\/egressselector\"\n\tutilflowcontrol \"k8s.io\/apiserver\/pkg\/util\/flowcontrol\"\n\t\"k8s.io\/apiserver\/pkg\/util\/x509metrics\"\n\trestclient \"k8s.io\/client-go\/rest\"\n\t\"k8s.io\/client-go\/transport\"\n\t\"k8s.io\/klog\/v2\"\n\tapiregistrationv1api \"k8s.io\/kube-aggregator\/pkg\/apis\/apiregistration\/v1\"\n\tapiregistrationv1apihelper \"k8s.io\/kube-aggregator\/pkg\/apis\/apiregistration\/v1\/helper\"\n)\n\nconst (\n\taggregatorComponent string = \"aggregator\"\n\n\taggregatedDiscoveryTimeout = 5 * time.Second\n)\n\ntype certKeyFunc func() ([]byte, []byte)\n\n\/\/ proxyHandler provides a http.Handler which will proxy traffic to locations\n\/\/ specified by items implementing Redirector.\ntype proxyHandler struct {\n\t\/\/ localDelegate is used to satisfy local APIServices\n\tlocalDelegate http.Handler\n\n\t\/\/ proxyCurrentCertKeyContent holds the client cert used to identify this proxy. Backing APIServices use this to confirm the proxy's identity\n\tproxyCurrentCertKeyContent certKeyFunc\n\tproxyTransport *http.Transport\n\n\t\/\/ Endpoints based routing to map from cluster IP to routable IP\n\tserviceResolver ServiceResolver\n\n\thandlingInfo atomic.Value\n\n\t\/\/ egressSelector selects the proper egress dialer to communicate with the custom apiserver\n\t\/\/ overwrites proxyTransport dialer if not nil\n\tegressSelector *egressselector.EgressSelector\n}\n\ntype proxyHandlingInfo struct {\n\t\/\/ local indicates that this APIService is locally satisfied\n\tlocal bool\n\n\t\/\/ name is the name of the APIService\n\tname string\n\t\/\/ restConfig holds the information for building a roundtripper\n\trestConfig *restclient.Config\n\t\/\/ transportBuildingError is an error produced while building the transport. If this\n\t\/\/ is non-nil, it will be reported to clients.\n\ttransportBuildingError error\n\t\/\/ proxyRoundTripper is the re-useable portion of the transport. It does not vary with any request.\n\tproxyRoundTripper http.RoundTripper\n\t\/\/ serviceName is the name of the service this handler proxies to\n\tserviceName string\n\t\/\/ namespace is the namespace the service lives in\n\tserviceNamespace string\n\t\/\/ serviceAvailable indicates this APIService is available or not\n\tserviceAvailable bool\n\t\/\/ servicePort is the port of the service this handler proxies to\n\tservicePort int32\n}\n\nfunc proxyError(w http.ResponseWriter, req *http.Request, error string, code int) {\n\thttp.Error(w, error, code)\n\n\tctx := req.Context()\n\tinfo, ok := genericapirequest.RequestInfoFrom(ctx)\n\tif !ok {\n\t\tklog.Warning(\"no RequestInfo found in the context\")\n\t\treturn\n\t}\n\t\/\/ TODO: record long-running request differently? The long-running check func does not necessarily match the one of the aggregated apiserver\n\tendpointmetrics.RecordRequestTermination(req, info, aggregatorComponent, code)\n}\n\nfunc (r *proxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tvalue := r.handlingInfo.Load()\n\tif value == nil {\n\t\tr.localDelegate.ServeHTTP(w, req)\n\t\treturn\n\t}\n\thandlingInfo := value.(proxyHandlingInfo)\n\tif handlingInfo.local {\n\t\tif r.localDelegate == nil {\n\t\t\thttp.Error(w, \"\", http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\tr.localDelegate.ServeHTTP(w, req)\n\t\treturn\n\t}\n\n\tif !handlingInfo.serviceAvailable {\n\t\tproxyError(w, req, \"service unavailable\", http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\n\tif handlingInfo.transportBuildingError != nil {\n\t\tproxyError(w, req, handlingInfo.transportBuildingError.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tuser, ok := genericapirequest.UserFrom(req.Context())\n\tif !ok {\n\t\tproxyError(w, req, \"missing user\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ write a new location based on the existing request pointed at the target service\n\tlocation := &url.URL{}\n\tlocation.Scheme = \"https\"\n\trloc, err := r.serviceResolver.ResolveEndpoint(handlingInfo.serviceNamespace, handlingInfo.serviceName, handlingInfo.servicePort)\n\tif err != nil {\n\t\tklog.Errorf(\"error resolving %s\/%s: %v\", handlingInfo.serviceNamespace, handlingInfo.serviceName, err)\n\t\tproxyError(w, req, \"service unavailable\", http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\tlocation.Host = rloc.Host\n\tlocation.Path = req.URL.Path\n\tlocation.RawQuery = req.URL.Query().Encode()\n\n\tnewReq, cancelFn := newRequestForProxy(location, req)\n\tdefer cancelFn()\n\n\tif handlingInfo.proxyRoundTripper == nil {\n\t\tproxyError(w, req, \"\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tproxyRoundTripper := handlingInfo.proxyRoundTripper\n\tupgrade := httpstream.IsUpgradeRequest(req)\n\n\tproxyRoundTripper = transport.NewAuthProxyRoundTripper(user.GetName(), user.GetGroups(), user.GetExtra(), proxyRoundTripper)\n\n\t\/\/ If we are upgrading, then the upgrade path tries to use this request with the TLS config we provide, but it does\n\t\/\/ NOT use the proxyRoundTripper. It's a direct dial that bypasses the proxyRoundTripper. This means that we have to\n\t\/\/ attach the \"correct\" user headers to the request ahead of time.\n\tif upgrade {\n\t\ttransport.SetAuthProxyHeaders(newReq, user.GetName(), user.GetGroups(), user.GetExtra())\n\t}\n\n\thandler := proxy.NewUpgradeAwareHandler(location, proxyRoundTripper, true, upgrade, &responder{w: w})\n\thandler.InterceptRedirects = false\n\thandler.RequireSameHostRedirects = false\n\tutilflowcontrol.RequestDelegated(req.Context())\n\thandler.ServeHTTP(w, newReq)\n}\n\n\/\/ newRequestForProxy returns a shallow copy of the original request with a context that may include a timeout for discovery requests\nfunc newRequestForProxy(location *url.URL, req *http.Request) (*http.Request, context.CancelFunc) {\n\tnewCtx := req.Context()\n\tcancelFn := func() {}\n\n\tif requestInfo, ok := genericapirequest.RequestInfoFrom(req.Context()); ok {\n\t\t\/\/ trim leading and trailing slashes. Then \"\/apis\/group\/version\" requests are for discovery, so if we have exactly three\n\t\t\/\/ segments that we are going to proxy, we have a discovery request.\n\t\tif !requestInfo.IsResourceRequest && len(strings.Split(strings.Trim(requestInfo.Path, \"\/\"), \"\/\")) == 3 {\n\t\t\t\/\/ discovery requests are used by kubectl and others to determine which resources a server has. This is a cheap call that\n\t\t\t\/\/ should be fast for every aggregated apiserver. Latency for aggregation is expected to be low (as for all extensions)\n\t\t\t\/\/ so forcing a short timeout here helps responsiveness of all clients.\n\t\t\tnewCtx, cancelFn = context.WithTimeout(newCtx, aggregatedDiscoveryTimeout)\n\t\t}\n\t}\n\n\t\/\/ WithContext creates a shallow clone of the request with the same context.\n\tnewReq := req.WithContext(newCtx)\n\tnewReq.Header = utilnet.CloneHeader(req.Header)\n\tnewReq.URL = location\n\tnewReq.Host = location.Host\n\n\t\/\/ If the original request has an audit ID, let's make sure we propagate this\n\t\/\/ to the aggregated server.\n\tif auditID, found := genericapirequest.AuditIDFrom(req.Context()); found {\n\t\tnewReq.Header.Set(auditinternal.HeaderAuditID, string(auditID))\n\t}\n\n\treturn newReq, cancelFn\n}\n\n\/\/ responder implements rest.Responder for assisting a connector in writing objects or errors.\ntype responder struct {\n\tw http.ResponseWriter\n}\n\n\/\/ TODO this should properly handle content type negotiation\n\/\/ if the caller asked for protobuf and you write JSON bad things happen.\nfunc (r *responder) Object(statusCode int, obj runtime.Object) {\n\tresponsewriters.WriteRawJSON(statusCode, obj, r.w)\n}\n\nfunc (r *responder) Error(_ http.ResponseWriter, _ *http.Request, err error) {\n\thttp.Error(r.w, err.Error(), http.StatusServiceUnavailable)\n}\n\n\/\/ these methods provide locked access to fields\n\nfunc (r *proxyHandler) updateAPIService(apiService *apiregistrationv1api.APIService) {\n\tif apiService.Spec.Service == nil {\n\t\tr.handlingInfo.Store(proxyHandlingInfo{local: true})\n\t\treturn\n\t}\n\n\tproxyClientCert, proxyClientKey := r.proxyCurrentCertKeyContent()\n\n\tclientConfig := &restclient.Config{\n\t\tTLSClientConfig: restclient.TLSClientConfig{\n\t\t\tInsecure: apiService.Spec.InsecureSkipTLSVerify,\n\t\t\tServerName: apiService.Spec.Service.Name + \".\" + apiService.Spec.Service.Namespace + \".svc\",\n\t\t\tCertData: proxyClientCert,\n\t\t\tKeyData: proxyClientKey,\n\t\t\tCAData: apiService.Spec.CABundle,\n\t\t},\n\t}\n\tclientConfig.Wrap(x509metrics.NewMissingSANRoundTripperWrapperConstructor(x509MissingSANCounter))\n\n\tnewInfo := proxyHandlingInfo{\n\t\tname: apiService.Name,\n\t\trestConfig: clientConfig,\n\t\tserviceName: apiService.Spec.Service.Name,\n\t\tserviceNamespace: apiService.Spec.Service.Namespace,\n\t\tservicePort: *apiService.Spec.Service.Port,\n\t\tserviceAvailable: apiregistrationv1apihelper.IsAPIServiceConditionTrue(apiService, apiregistrationv1api.Available),\n\t}\n\tif r.egressSelector != nil {\n\t\tnetworkContext := egressselector.Cluster.AsNetworkContext()\n\t\tvar egressDialer utilnet.DialFunc\n\t\tegressDialer, err := r.egressSelector.Lookup(networkContext)\n\t\tif err != nil {\n\t\t\tklog.Warning(err.Error())\n\t\t} else {\n\t\t\tnewInfo.restConfig.Dial = egressDialer\n\t\t}\n\t} else if r.proxyTransport != nil && r.proxyTransport.DialContext != nil {\n\t\tnewInfo.restConfig.Dial = r.proxyTransport.DialContext\n\t}\n\tnewInfo.proxyRoundTripper, newInfo.transportBuildingError = restclient.TransportFor(newInfo.restConfig)\n\tif newInfo.transportBuildingError != nil {\n\t\tklog.Warning(newInfo.transportBuildingError.Error())\n\t}\n\tr.handlingInfo.Store(newInfo)\n}\n<commit_msg>remove unused parameter: intercceptRedirects & RequireSameHostRedirects<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage apiserver\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/httpstream\"\n\tutilnet \"k8s.io\/apimachinery\/pkg\/util\/net\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/proxy\"\n\tauditinternal \"k8s.io\/apiserver\/pkg\/apis\/audit\"\n\t\"k8s.io\/apiserver\/pkg\/endpoints\/handlers\/responsewriters\"\n\tendpointmetrics \"k8s.io\/apiserver\/pkg\/endpoints\/metrics\"\n\tgenericapirequest \"k8s.io\/apiserver\/pkg\/endpoints\/request\"\n\t\"k8s.io\/apiserver\/pkg\/server\/egressselector\"\n\tutilflowcontrol \"k8s.io\/apiserver\/pkg\/util\/flowcontrol\"\n\t\"k8s.io\/apiserver\/pkg\/util\/x509metrics\"\n\trestclient \"k8s.io\/client-go\/rest\"\n\t\"k8s.io\/client-go\/transport\"\n\t\"k8s.io\/klog\/v2\"\n\tapiregistrationv1api \"k8s.io\/kube-aggregator\/pkg\/apis\/apiregistration\/v1\"\n\tapiregistrationv1apihelper \"k8s.io\/kube-aggregator\/pkg\/apis\/apiregistration\/v1\/helper\"\n)\n\nconst (\n\taggregatorComponent string = \"aggregator\"\n\n\taggregatedDiscoveryTimeout = 5 * time.Second\n)\n\ntype certKeyFunc func() ([]byte, []byte)\n\n\/\/ proxyHandler provides a http.Handler which will proxy traffic to locations\n\/\/ specified by items implementing Redirector.\ntype proxyHandler struct {\n\t\/\/ localDelegate is used to satisfy local APIServices\n\tlocalDelegate http.Handler\n\n\t\/\/ proxyCurrentCertKeyContent holds the client cert used to identify this proxy. Backing APIServices use this to confirm the proxy's identity\n\tproxyCurrentCertKeyContent certKeyFunc\n\tproxyTransport *http.Transport\n\n\t\/\/ Endpoints based routing to map from cluster IP to routable IP\n\tserviceResolver ServiceResolver\n\n\thandlingInfo atomic.Value\n\n\t\/\/ egressSelector selects the proper egress dialer to communicate with the custom apiserver\n\t\/\/ overwrites proxyTransport dialer if not nil\n\tegressSelector *egressselector.EgressSelector\n}\n\ntype proxyHandlingInfo struct {\n\t\/\/ local indicates that this APIService is locally satisfied\n\tlocal bool\n\n\t\/\/ name is the name of the APIService\n\tname string\n\t\/\/ restConfig holds the information for building a roundtripper\n\trestConfig *restclient.Config\n\t\/\/ transportBuildingError is an error produced while building the transport. If this\n\t\/\/ is non-nil, it will be reported to clients.\n\ttransportBuildingError error\n\t\/\/ proxyRoundTripper is the re-useable portion of the transport. It does not vary with any request.\n\tproxyRoundTripper http.RoundTripper\n\t\/\/ serviceName is the name of the service this handler proxies to\n\tserviceName string\n\t\/\/ namespace is the namespace the service lives in\n\tserviceNamespace string\n\t\/\/ serviceAvailable indicates this APIService is available or not\n\tserviceAvailable bool\n\t\/\/ servicePort is the port of the service this handler proxies to\n\tservicePort int32\n}\n\nfunc proxyError(w http.ResponseWriter, req *http.Request, error string, code int) {\n\thttp.Error(w, error, code)\n\n\tctx := req.Context()\n\tinfo, ok := genericapirequest.RequestInfoFrom(ctx)\n\tif !ok {\n\t\tklog.Warning(\"no RequestInfo found in the context\")\n\t\treturn\n\t}\n\t\/\/ TODO: record long-running request differently? The long-running check func does not necessarily match the one of the aggregated apiserver\n\tendpointmetrics.RecordRequestTermination(req, info, aggregatorComponent, code)\n}\n\nfunc (r *proxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tvalue := r.handlingInfo.Load()\n\tif value == nil {\n\t\tr.localDelegate.ServeHTTP(w, req)\n\t\treturn\n\t}\n\thandlingInfo := value.(proxyHandlingInfo)\n\tif handlingInfo.local {\n\t\tif r.localDelegate == nil {\n\t\t\thttp.Error(w, \"\", http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\tr.localDelegate.ServeHTTP(w, req)\n\t\treturn\n\t}\n\n\tif !handlingInfo.serviceAvailable {\n\t\tproxyError(w, req, \"service unavailable\", http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\n\tif handlingInfo.transportBuildingError != nil {\n\t\tproxyError(w, req, handlingInfo.transportBuildingError.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tuser, ok := genericapirequest.UserFrom(req.Context())\n\tif !ok {\n\t\tproxyError(w, req, \"missing user\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ write a new location based on the existing request pointed at the target service\n\tlocation := &url.URL{}\n\tlocation.Scheme = \"https\"\n\trloc, err := r.serviceResolver.ResolveEndpoint(handlingInfo.serviceNamespace, handlingInfo.serviceName, handlingInfo.servicePort)\n\tif err != nil {\n\t\tklog.Errorf(\"error resolving %s\/%s: %v\", handlingInfo.serviceNamespace, handlingInfo.serviceName, err)\n\t\tproxyError(w, req, \"service unavailable\", http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\tlocation.Host = rloc.Host\n\tlocation.Path = req.URL.Path\n\tlocation.RawQuery = req.URL.Query().Encode()\n\n\tnewReq, cancelFn := newRequestForProxy(location, req)\n\tdefer cancelFn()\n\n\tif handlingInfo.proxyRoundTripper == nil {\n\t\tproxyError(w, req, \"\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tproxyRoundTripper := handlingInfo.proxyRoundTripper\n\tupgrade := httpstream.IsUpgradeRequest(req)\n\n\tproxyRoundTripper = transport.NewAuthProxyRoundTripper(user.GetName(), user.GetGroups(), user.GetExtra(), proxyRoundTripper)\n\n\t\/\/ If we are upgrading, then the upgrade path tries to use this request with the TLS config we provide, but it does\n\t\/\/ NOT use the proxyRoundTripper. It's a direct dial that bypasses the proxyRoundTripper. This means that we have to\n\t\/\/ attach the \"correct\" user headers to the request ahead of time.\n\tif upgrade {\n\t\ttransport.SetAuthProxyHeaders(newReq, user.GetName(), user.GetGroups(), user.GetExtra())\n\t}\n\n\thandler := proxy.NewUpgradeAwareHandler(location, proxyRoundTripper, true, upgrade, &responder{w: w})\n\tutilflowcontrol.RequestDelegated(req.Context())\n\thandler.ServeHTTP(w, newReq)\n}\n\n\/\/ newRequestForProxy returns a shallow copy of the original request with a context that may include a timeout for discovery requests\nfunc newRequestForProxy(location *url.URL, req *http.Request) (*http.Request, context.CancelFunc) {\n\tnewCtx := req.Context()\n\tcancelFn := func() {}\n\n\tif requestInfo, ok := genericapirequest.RequestInfoFrom(req.Context()); ok {\n\t\t\/\/ trim leading and trailing slashes. Then \"\/apis\/group\/version\" requests are for discovery, so if we have exactly three\n\t\t\/\/ segments that we are going to proxy, we have a discovery request.\n\t\tif !requestInfo.IsResourceRequest && len(strings.Split(strings.Trim(requestInfo.Path, \"\/\"), \"\/\")) == 3 {\n\t\t\t\/\/ discovery requests are used by kubectl and others to determine which resources a server has. This is a cheap call that\n\t\t\t\/\/ should be fast for every aggregated apiserver. Latency for aggregation is expected to be low (as for all extensions)\n\t\t\t\/\/ so forcing a short timeout here helps responsiveness of all clients.\n\t\t\tnewCtx, cancelFn = context.WithTimeout(newCtx, aggregatedDiscoveryTimeout)\n\t\t}\n\t}\n\n\t\/\/ WithContext creates a shallow clone of the request with the same context.\n\tnewReq := req.WithContext(newCtx)\n\tnewReq.Header = utilnet.CloneHeader(req.Header)\n\tnewReq.URL = location\n\tnewReq.Host = location.Host\n\n\t\/\/ If the original request has an audit ID, let's make sure we propagate this\n\t\/\/ to the aggregated server.\n\tif auditID, found := genericapirequest.AuditIDFrom(req.Context()); found {\n\t\tnewReq.Header.Set(auditinternal.HeaderAuditID, string(auditID))\n\t}\n\n\treturn newReq, cancelFn\n}\n\n\/\/ responder implements rest.Responder for assisting a connector in writing objects or errors.\ntype responder struct {\n\tw http.ResponseWriter\n}\n\n\/\/ TODO this should properly handle content type negotiation\n\/\/ if the caller asked for protobuf and you write JSON bad things happen.\nfunc (r *responder) Object(statusCode int, obj runtime.Object) {\n\tresponsewriters.WriteRawJSON(statusCode, obj, r.w)\n}\n\nfunc (r *responder) Error(_ http.ResponseWriter, _ *http.Request, err error) {\n\thttp.Error(r.w, err.Error(), http.StatusServiceUnavailable)\n}\n\n\/\/ these methods provide locked access to fields\n\nfunc (r *proxyHandler) updateAPIService(apiService *apiregistrationv1api.APIService) {\n\tif apiService.Spec.Service == nil {\n\t\tr.handlingInfo.Store(proxyHandlingInfo{local: true})\n\t\treturn\n\t}\n\n\tproxyClientCert, proxyClientKey := r.proxyCurrentCertKeyContent()\n\n\tclientConfig := &restclient.Config{\n\t\tTLSClientConfig: restclient.TLSClientConfig{\n\t\t\tInsecure: apiService.Spec.InsecureSkipTLSVerify,\n\t\t\tServerName: apiService.Spec.Service.Name + \".\" + apiService.Spec.Service.Namespace + \".svc\",\n\t\t\tCertData: proxyClientCert,\n\t\t\tKeyData: proxyClientKey,\n\t\t\tCAData: apiService.Spec.CABundle,\n\t\t},\n\t}\n\tclientConfig.Wrap(x509metrics.NewMissingSANRoundTripperWrapperConstructor(x509MissingSANCounter))\n\n\tnewInfo := proxyHandlingInfo{\n\t\tname: apiService.Name,\n\t\trestConfig: clientConfig,\n\t\tserviceName: apiService.Spec.Service.Name,\n\t\tserviceNamespace: apiService.Spec.Service.Namespace,\n\t\tservicePort: *apiService.Spec.Service.Port,\n\t\tserviceAvailable: apiregistrationv1apihelper.IsAPIServiceConditionTrue(apiService, apiregistrationv1api.Available),\n\t}\n\tif r.egressSelector != nil {\n\t\tnetworkContext := egressselector.Cluster.AsNetworkContext()\n\t\tvar egressDialer utilnet.DialFunc\n\t\tegressDialer, err := r.egressSelector.Lookup(networkContext)\n\t\tif err != nil {\n\t\t\tklog.Warning(err.Error())\n\t\t} else {\n\t\t\tnewInfo.restConfig.Dial = egressDialer\n\t\t}\n\t} else if r.proxyTransport != nil && r.proxyTransport.DialContext != nil {\n\t\tnewInfo.restConfig.Dial = r.proxyTransport.DialContext\n\t}\n\tnewInfo.proxyRoundTripper, newInfo.transportBuildingError = restclient.TransportFor(newInfo.restConfig)\n\tif newInfo.transportBuildingError != nil {\n\t\tklog.Warning(newInfo.transportBuildingError.Error())\n\t}\n\tr.handlingInfo.Store(newInfo)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/github.com\/flynn\/go-docopt\"\n\t\"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/github.com\/technoweenie\/grohl\"\n\t\"github.com\/flynn\/flynn\/discoverd\/client\"\n\t\"github.com\/flynn\/flynn\/host\/cli\"\n\t\"github.com\/flynn\/flynn\/host\/config\"\n\t\"github.com\/flynn\/flynn\/host\/ports\"\n\t\"github.com\/flynn\/flynn\/host\/sampi\"\n\t\"github.com\/flynn\/flynn\/host\/types\"\n\t\"github.com\/flynn\/flynn\/pkg\/attempt\"\n\t\"github.com\/flynn\/flynn\/pkg\/cluster\"\n\t\"github.com\/flynn\/flynn\/pkg\/shutdown\"\n\t\"github.com\/flynn\/flynn\/pkg\/stream\"\n)\n\n\/\/ discoverdAttempts is the attempt strategy that is used to connect to discoverd.\nvar discoverdAttempts = attempt.Strategy{\n\tMin: 5,\n\tTotal: 10 * time.Minute,\n\tDelay: 200 * time.Millisecond,\n}\n\nconst configFile = \"\/etc\/flynn\/host.json\"\n\nfunc init() {\n\tlog.SetFlags(log.Lshortfile | log.Lmicroseconds)\n\n\tcli.Register(\"daemon\", runDaemon, `\nusage: flynn-host daemon [options] [--meta=<KEY=VAL>...]\n\noptions:\n --external=IP external IP of host\n --manifest=PATH path to manifest file [default: \/etc\/flynn-host.json]\n --state=PATH path to state file [default: \/var\/lib\/flynn\/host-state.bolt]\n --id=ID host id\n --force kill all containers booted by flynn-host before starting\n --volpath=PATH directory to create volumes in [default: \/var\/lib\/flynn\/host-volumes]\n --backend=BACKEND runner backend [default: libvirt-lxc]\n --meta=<KEY=VAL>... key=value pair to add as metadata\n --bind=IP bind containers to IP\n --flynn-init=PATH path to flynn-init binary [default: \/usr\/bin\/flynn-init]\n\t`)\n}\n\nfunc main() {\n\tusage := `usage: flynn-host [-h|--help] <command> [<args>...]\n\nOptions:\n -h, --help Show this message\n\nCommands:\n help Show usage for a specific command\n init Create cluster configuration for daemon\n daemon Start the daemon\n download Download container images\n bootstrap Bootstrap layer 1\n inspect Get low-level information about a job\n log Get the logs of a job\n ps List jobs\n stop Stop running jobs\n upload-debug-info Upload debug information to an anonymous gist\n\nSee 'flynn-host help <command>' for more information on a specific command.\n`\n\n\targs, _ := docopt.Parse(usage, nil, true, \"\", true)\n\tcmd := args.String[\"<command>\"]\n\tcmdArgs := args.All[\"<args>\"].([]string)\n\n\tif cmd == \"help\" {\n\t\tif len(cmdArgs) == 0 { \/\/ `flynn help`\n\t\t\tfmt.Println(usage)\n\t\t\treturn\n\t\t} else { \/\/ `flynn help <command>`\n\t\t\tcmd = cmdArgs[0]\n\t\t\tcmdArgs = []string{\"--help\"}\n\t\t}\n\t}\n\n\tif cmd == \"daemon\" {\n\t\t\/\/ merge in args and env from config file, if available\n\t\tvar c *config.Config\n\t\tvar err error\n\t\tif n := os.Getenv(\"FLYNN_HOST_CONFIG\"); n != \"\" {\n\t\t\tc, err = config.Open(n)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"error opening config file %s: %s\", n, err)\n\t\t\t}\n\t\t} else {\n\t\t\tc, err = config.Open(configFile)\n\t\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\t\tlog.Fatalf(\"error opening config file %s: %s\", configFile, err)\n\t\t\t}\n\t\t\tif c == nil {\n\t\t\t\tc = &config.Config{}\n\t\t\t}\n\t\t}\n\t\tcmdArgs = append(cmdArgs, c.Args...)\n\t\tfor k, v := range c.Env {\n\t\t\tos.Setenv(k, v)\n\t\t}\n\t}\n\n\tif err := cli.Run(cmd, cmdArgs); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc runDaemon(args *docopt.Args) {\n\thostname, _ := os.Hostname()\n\texternalAddr := args.String[\"--external\"]\n\tbindAddr := args.String[\"--bind\"]\n\tmanifestFile := args.String[\"--manifest\"]\n\tstateFile := args.String[\"--state\"]\n\thostID := args.String[\"--id\"]\n\tforce := args.Bool[\"--force\"]\n\tvolPath := args.String[\"--volpath\"]\n\tbackendName := args.String[\"--backend\"]\n\tflynnInit := args.String[\"--flynn-init\"]\n\tmetadata := args.All[\"--meta\"].([]string)\n\n\tgrohl.AddContext(\"app\", \"host\")\n\tgrohl.Log(grohl.Data{\"at\": \"start\"})\n\tg := grohl.NewContext(grohl.Data{\"fn\": \"main\"})\n\n\tif hostID == \"\" {\n\t\thostID = strings.Replace(hostname, \"-\", \"\", -1)\n\t}\n\tif strings.Contains(hostID, \"-\") {\n\t\tlog.Fatal(\"host id must not contain dashes\")\n\t}\n\tif externalAddr == \"\" {\n\t\tvar err error\n\t\texternalAddr, err = config.DefaultExternalIP()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tportAlloc := map[string]*ports.Allocator{\n\t\t\"tcp\": ports.NewAllocator(55000, 65535),\n\t\t\"udp\": ports.NewAllocator(55000, 65535),\n\t}\n\n\tsh := shutdown.NewHandler()\n\tstate := NewState(hostID, stateFile)\n\tvar backend Backend\n\tvar err error\n\n\tswitch backendName {\n\tcase \"libvirt-lxc\":\n\t\tbackend, err = NewLibvirtLXCBackend(state, volPath, \"\/tmp\/flynn-host-logs\", flynnInit)\n\tdefault:\n\t\tlog.Fatalf(\"unknown backend %q\", backendName)\n\t}\n\tif err != nil {\n\t\tsh.Fatal(err)\n\t}\n\n\trouter, err := serveHTTP(&Host{state: state, backend: backend}, &attachHandler{state: state, backend: backend}, sh)\n\tif err != nil {\n\t\tsh.Fatal(err)\n\t}\n\n\tif err := state.Restore(backend); err != nil {\n\t\tsh.Fatal(err)\n\t}\n\n\tvar jobStream stream.Stream\n\tsh.BeforeExit(func() {\n\t\tif jobStream != nil {\n\t\t\tjobStream.Close()\n\t\t}\n\t\tbackend.Cleanup()\n\t})\n\n\tif force {\n\t\tif err := backend.Cleanup(); err != nil {\n\t\t\tsh.Fatal(err)\n\t\t}\n\t}\n\n\trunner := &manifestRunner{\n\t\tenv: parseEnviron(),\n\t\texternalAddr: externalAddr,\n\t\tbindAddr: bindAddr,\n\t\tbackend: backend,\n\t\tstate: state,\n\t\tports: portAlloc,\n\t}\n\n\tdiscAddr := os.Getenv(\"DISCOVERD\")\n\tvar disc *discoverd.Client\n\tif manifestFile != \"\" {\n\t\tvar r io.Reader\n\t\tvar f *os.File\n\t\tif manifestFile == \"-\" {\n\t\t\tr = os.Stdin\n\t\t} else {\n\t\t\tf, err = os.Open(manifestFile)\n\t\t\tif err != nil {\n\t\t\t\tsh.Fatal(err)\n\t\t\t}\n\t\t\tr = f\n\t\t}\n\t\tservices, err := runner.runManifest(r)\n\t\tif err != nil {\n\t\t\tsh.Fatal(err)\n\t\t}\n\t\tif f != nil {\n\t\t\tf.Close()\n\t\t}\n\n\t\tif d, ok := services[\"discoverd\"]; ok {\n\t\t\tdiscAddr = fmt.Sprintf(\"%s:%d\", d.ExternalIP, d.TCPPorts[0])\n\t\t\tvar disc *discoverd.Client\n\t\t\terr = discoverdAttempts.Run(func() (err error) {\n\t\t\t\tdisc, err = discoverd.NewClientWithAddr(discAddr)\n\t\t\t\treturn\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tsh.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif discAddr == \"\" && externalAddr != \"\" {\n\t\tdiscAddr = externalAddr + \":1111\"\n\t}\n\t\/\/ HACK: use env as global for discoverd connection in sampic\n\tos.Setenv(\"DISCOVERD\", discAddr)\n\tif disc == nil {\n\t\tdisc, err = discoverd.NewClientWithAddr(discAddr)\n\t\tif err != nil {\n\t\t\tsh.Fatal(err)\n\t\t}\n\t}\n\tsh.BeforeExit(func() { disc.UnregisterAll() })\n\tsampiStandby, err := disc.RegisterAndStandby(\"flynn-host\", externalAddr+\":1113\", map[string]string{\"id\": hostID})\n\tif err != nil {\n\t\tsh.Fatal(err)\n\t}\n\n\t\/\/ Check if we are the leader so that we can use the cluster functions directly\n\tsampiCluster := sampi.NewCluster(sampi.NewState())\n\tsampiAPI := &sampi.HTTPAPI{Cluster: sampiCluster}\n\tselect {\n\tcase <-sampiStandby:\n\t\tg.Log(grohl.Data{\"at\": \"sampi_leader\"})\n\t\tsampiAPI.RegisterRoutes(router, sh)\n\tcase <-time.After(5 * time.Millisecond):\n\t\tgo func() {\n\t\t\t<-sampiStandby\n\t\t\tg.Log(grohl.Data{\"at\": \"sampi_leader\"})\n\t\t\tsampiAPI.RegisterRoutes(router, sh)\n\t\t}()\n\t}\n\tcluster, err := cluster.NewClient()\n\tif err != nil {\n\t\tsh.Fatal(err)\n\t}\n\tsh.BeforeExit(func() { cluster.Close() })\n\n\tg.Log(grohl.Data{\"at\": \"sampi_connected\"})\n\n\tevents := state.AddListener(\"all\")\n\tgo syncScheduler(cluster, hostID, events)\n\n\th := &host.Host{ID: hostID, Metadata: make(map[string]string)}\n\tfor _, s := range metadata {\n\t\tkv := strings.SplitN(s, \"=\", 2)\n\t\th.Metadata[kv[0]] = kv[1]\n\t}\n\n\tfor {\n\t\tnewLeader := cluster.NewLeaderSignal()\n\n\t\th.Jobs = state.ClusterJobs()\n\t\tjobs := make(chan *host.Job)\n\t\tjobStream, err = cluster.RegisterHost(h, jobs)\n\t\tif err != nil {\n\t\t\tsh.Fatal(err)\n\t\t}\n\t\tg.Log(grohl.Data{\"at\": \"host_registered\"})\n\t\tfor job := range jobs {\n\t\t\tif externalAddr != \"\" {\n\t\t\t\tif job.Config.Env == nil {\n\t\t\t\t\tjob.Config.Env = make(map[string]string)\n\t\t\t\t}\n\t\t\t\tjob.Config.Env[\"EXTERNAL_IP\"] = externalAddr\n\t\t\t\tjob.Config.Env[\"DISCOVERD\"] = discAddr\n\t\t\t}\n\t\t\tif err := backend.Run(job); err != nil {\n\t\t\t\tstate.SetStatusFailed(job.ID, err)\n\t\t\t}\n\t\t}\n\t\tg.Log(grohl.Data{\"at\": \"sampi_disconnected\", \"err\": jobStream.Err})\n\n\t\t\/\/ if the process is shutting down, just block\n\t\tif sh.Active {\n\t\t\t<-make(chan struct{})\n\t\t}\n\n\t\t<-newLeader\n\t}\n}\n\nfunc syncScheduler(scheduler *cluster.Client, hostID string, events <-chan host.Event) {\n\tfor event := range events {\n\t\tif event.Event != \"stop\" {\n\t\t\tcontinue\n\t\t}\n\t\tgrohl.Log(grohl.Data{\"fn\": \"scheduler_event\", \"at\": \"remove_job\", \"job.id\": event.JobID})\n\t\tif err := scheduler.RemoveJob(hostID, event.JobID); err != nil {\n\t\t\tgrohl.Log(grohl.Data{\"fn\": \"scheduler_event\", \"at\": \"remove_job\", \"status\": \"error\", \"err\": err, \"job.id\": event.JobID})\n\t\t}\n\t}\n}\n<commit_msg>host: Drop unnecessary shutdown cleanup.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/github.com\/flynn\/go-docopt\"\n\t\"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/github.com\/technoweenie\/grohl\"\n\t\"github.com\/flynn\/flynn\/discoverd\/client\"\n\t\"github.com\/flynn\/flynn\/host\/cli\"\n\t\"github.com\/flynn\/flynn\/host\/config\"\n\t\"github.com\/flynn\/flynn\/host\/ports\"\n\t\"github.com\/flynn\/flynn\/host\/sampi\"\n\t\"github.com\/flynn\/flynn\/host\/types\"\n\t\"github.com\/flynn\/flynn\/pkg\/attempt\"\n\t\"github.com\/flynn\/flynn\/pkg\/cluster\"\n\t\"github.com\/flynn\/flynn\/pkg\/shutdown\"\n)\n\n\/\/ discoverdAttempts is the attempt strategy that is used to connect to discoverd.\nvar discoverdAttempts = attempt.Strategy{\n\tMin: 5,\n\tTotal: 10 * time.Minute,\n\tDelay: 200 * time.Millisecond,\n}\n\nconst configFile = \"\/etc\/flynn\/host.json\"\n\nfunc init() {\n\tlog.SetFlags(log.Lshortfile | log.Lmicroseconds)\n\n\tcli.Register(\"daemon\", runDaemon, `\nusage: flynn-host daemon [options] [--meta=<KEY=VAL>...]\n\noptions:\n --external=IP external IP of host\n --manifest=PATH path to manifest file [default: \/etc\/flynn-host.json]\n --state=PATH path to state file [default: \/var\/lib\/flynn\/host-state.bolt]\n --id=ID host id\n --force kill all containers booted by flynn-host before starting\n --volpath=PATH directory to create volumes in [default: \/var\/lib\/flynn\/host-volumes]\n --backend=BACKEND runner backend [default: libvirt-lxc]\n --meta=<KEY=VAL>... key=value pair to add as metadata\n --bind=IP bind containers to IP\n --flynn-init=PATH path to flynn-init binary [default: \/usr\/bin\/flynn-init]\n\t`)\n}\n\nfunc main() {\n\tusage := `usage: flynn-host [-h|--help] <command> [<args>...]\n\nOptions:\n -h, --help Show this message\n\nCommands:\n help Show usage for a specific command\n init Create cluster configuration for daemon\n daemon Start the daemon\n download Download container images\n bootstrap Bootstrap layer 1\n inspect Get low-level information about a job\n log Get the logs of a job\n ps List jobs\n stop Stop running jobs\n upload-debug-info Upload debug information to an anonymous gist\n\nSee 'flynn-host help <command>' for more information on a specific command.\n`\n\n\targs, _ := docopt.Parse(usage, nil, true, \"\", true)\n\tcmd := args.String[\"<command>\"]\n\tcmdArgs := args.All[\"<args>\"].([]string)\n\n\tif cmd == \"help\" {\n\t\tif len(cmdArgs) == 0 { \/\/ `flynn help`\n\t\t\tfmt.Println(usage)\n\t\t\treturn\n\t\t} else { \/\/ `flynn help <command>`\n\t\t\tcmd = cmdArgs[0]\n\t\t\tcmdArgs = []string{\"--help\"}\n\t\t}\n\t}\n\n\tif cmd == \"daemon\" {\n\t\t\/\/ merge in args and env from config file, if available\n\t\tvar c *config.Config\n\t\tvar err error\n\t\tif n := os.Getenv(\"FLYNN_HOST_CONFIG\"); n != \"\" {\n\t\t\tc, err = config.Open(n)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"error opening config file %s: %s\", n, err)\n\t\t\t}\n\t\t} else {\n\t\t\tc, err = config.Open(configFile)\n\t\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\t\tlog.Fatalf(\"error opening config file %s: %s\", configFile, err)\n\t\t\t}\n\t\t\tif c == nil {\n\t\t\t\tc = &config.Config{}\n\t\t\t}\n\t\t}\n\t\tcmdArgs = append(cmdArgs, c.Args...)\n\t\tfor k, v := range c.Env {\n\t\t\tos.Setenv(k, v)\n\t\t}\n\t}\n\n\tif err := cli.Run(cmd, cmdArgs); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc runDaemon(args *docopt.Args) {\n\thostname, _ := os.Hostname()\n\texternalAddr := args.String[\"--external\"]\n\tbindAddr := args.String[\"--bind\"]\n\tmanifestFile := args.String[\"--manifest\"]\n\tstateFile := args.String[\"--state\"]\n\thostID := args.String[\"--id\"]\n\tforce := args.Bool[\"--force\"]\n\tvolPath := args.String[\"--volpath\"]\n\tbackendName := args.String[\"--backend\"]\n\tflynnInit := args.String[\"--flynn-init\"]\n\tmetadata := args.All[\"--meta\"].([]string)\n\n\tgrohl.AddContext(\"app\", \"host\")\n\tgrohl.Log(grohl.Data{\"at\": \"start\"})\n\tg := grohl.NewContext(grohl.Data{\"fn\": \"main\"})\n\n\tif hostID == \"\" {\n\t\thostID = strings.Replace(hostname, \"-\", \"\", -1)\n\t}\n\tif strings.Contains(hostID, \"-\") {\n\t\tlog.Fatal(\"host id must not contain dashes\")\n\t}\n\tif externalAddr == \"\" {\n\t\tvar err error\n\t\texternalAddr, err = config.DefaultExternalIP()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tportAlloc := map[string]*ports.Allocator{\n\t\t\"tcp\": ports.NewAllocator(55000, 65535),\n\t\t\"udp\": ports.NewAllocator(55000, 65535),\n\t}\n\n\tsh := shutdown.NewHandler()\n\tstate := NewState(hostID, stateFile)\n\tvar backend Backend\n\tvar err error\n\n\tswitch backendName {\n\tcase \"libvirt-lxc\":\n\t\tbackend, err = NewLibvirtLXCBackend(state, volPath, \"\/tmp\/flynn-host-logs\", flynnInit)\n\tdefault:\n\t\tlog.Fatalf(\"unknown backend %q\", backendName)\n\t}\n\tif err != nil {\n\t\tsh.Fatal(err)\n\t}\n\n\trouter, err := serveHTTP(&Host{state: state, backend: backend}, &attachHandler{state: state, backend: backend}, sh)\n\tif err != nil {\n\t\tsh.Fatal(err)\n\t}\n\n\tif err := state.Restore(backend); err != nil {\n\t\tsh.Fatal(err)\n\t}\n\n\tsh.BeforeExit(func() { backend.Cleanup() })\n\n\tif force {\n\t\tif err := backend.Cleanup(); err != nil {\n\t\t\tsh.Fatal(err)\n\t\t}\n\t}\n\n\trunner := &manifestRunner{\n\t\tenv: parseEnviron(),\n\t\texternalAddr: externalAddr,\n\t\tbindAddr: bindAddr,\n\t\tbackend: backend,\n\t\tstate: state,\n\t\tports: portAlloc,\n\t}\n\n\tdiscAddr := os.Getenv(\"DISCOVERD\")\n\tvar disc *discoverd.Client\n\tif manifestFile != \"\" {\n\t\tvar r io.Reader\n\t\tvar f *os.File\n\t\tif manifestFile == \"-\" {\n\t\t\tr = os.Stdin\n\t\t} else {\n\t\t\tf, err = os.Open(manifestFile)\n\t\t\tif err != nil {\n\t\t\t\tsh.Fatal(err)\n\t\t\t}\n\t\t\tr = f\n\t\t}\n\t\tservices, err := runner.runManifest(r)\n\t\tif err != nil {\n\t\t\tsh.Fatal(err)\n\t\t}\n\t\tif f != nil {\n\t\t\tf.Close()\n\t\t}\n\n\t\tif d, ok := services[\"discoverd\"]; ok {\n\t\t\tdiscAddr = fmt.Sprintf(\"%s:%d\", d.ExternalIP, d.TCPPorts[0])\n\t\t\tvar disc *discoverd.Client\n\t\t\terr = discoverdAttempts.Run(func() (err error) {\n\t\t\t\tdisc, err = discoverd.NewClientWithAddr(discAddr)\n\t\t\t\treturn\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tsh.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif discAddr == \"\" && externalAddr != \"\" {\n\t\tdiscAddr = externalAddr + \":1111\"\n\t}\n\t\/\/ HACK: use env as global for discoverd connection in sampic\n\tos.Setenv(\"DISCOVERD\", discAddr)\n\tif disc == nil {\n\t\tdisc, err = discoverd.NewClientWithAddr(discAddr)\n\t\tif err != nil {\n\t\t\tsh.Fatal(err)\n\t\t}\n\t}\n\tsh.BeforeExit(func() { disc.UnregisterAll() })\n\tsampiStandby, err := disc.RegisterAndStandby(\"flynn-host\", externalAddr+\":1113\", map[string]string{\"id\": hostID})\n\tif err != nil {\n\t\tsh.Fatal(err)\n\t}\n\n\t\/\/ Check if we are the leader so that we can use the cluster functions directly\n\tsampiCluster := sampi.NewCluster(sampi.NewState())\n\tsampiAPI := &sampi.HTTPAPI{Cluster: sampiCluster}\n\tselect {\n\tcase <-sampiStandby:\n\t\tg.Log(grohl.Data{\"at\": \"sampi_leader\"})\n\t\tsampiAPI.RegisterRoutes(router, sh)\n\tcase <-time.After(5 * time.Millisecond):\n\t\tgo func() {\n\t\t\t<-sampiStandby\n\t\t\tg.Log(grohl.Data{\"at\": \"sampi_leader\"})\n\t\t\tsampiAPI.RegisterRoutes(router, sh)\n\t\t}()\n\t}\n\tcluster, err := cluster.NewClient()\n\tif err != nil {\n\t\tsh.Fatal(err)\n\t}\n\tsh.BeforeExit(func() { cluster.Close() })\n\n\tg.Log(grohl.Data{\"at\": \"sampi_connected\"})\n\n\tevents := state.AddListener(\"all\")\n\tgo syncScheduler(cluster, hostID, events)\n\n\th := &host.Host{ID: hostID, Metadata: make(map[string]string)}\n\tfor _, s := range metadata {\n\t\tkv := strings.SplitN(s, \"=\", 2)\n\t\th.Metadata[kv[0]] = kv[1]\n\t}\n\n\tfor {\n\t\tnewLeader := cluster.NewLeaderSignal()\n\n\t\th.Jobs = state.ClusterJobs()\n\t\tjobs := make(chan *host.Job)\n\t\tjobStream, err := cluster.RegisterHost(h, jobs)\n\t\tif err != nil {\n\t\t\tsh.Fatal(err)\n\t\t}\n\t\tg.Log(grohl.Data{\"at\": \"host_registered\"})\n\t\tfor job := range jobs {\n\t\t\tif externalAddr != \"\" {\n\t\t\t\tif job.Config.Env == nil {\n\t\t\t\t\tjob.Config.Env = make(map[string]string)\n\t\t\t\t}\n\t\t\t\tjob.Config.Env[\"EXTERNAL_IP\"] = externalAddr\n\t\t\t\tjob.Config.Env[\"DISCOVERD\"] = discAddr\n\t\t\t}\n\t\t\tif err := backend.Run(job); err != nil {\n\t\t\t\tstate.SetStatusFailed(job.ID, err)\n\t\t\t}\n\t\t}\n\t\tg.Log(grohl.Data{\"at\": \"sampi_disconnected\", \"err\": jobStream.Err})\n\n\t\t\/\/ if the process is shutting down, just block\n\t\tif sh.Active {\n\t\t\t<-make(chan struct{})\n\t\t}\n\n\t\t<-newLeader\n\t}\n}\n\nfunc syncScheduler(scheduler *cluster.Client, hostID string, events <-chan host.Event) {\n\tfor event := range events {\n\t\tif event.Event != \"stop\" {\n\t\t\tcontinue\n\t\t}\n\t\tgrohl.Log(grohl.Data{\"fn\": \"scheduler_event\", \"at\": \"remove_job\", \"job.id\": event.JobID})\n\t\tif err := scheduler.RemoveJob(hostID, event.JobID); err != nil {\n\t\t\tgrohl.Log(grohl.Data{\"fn\": \"scheduler_event\", \"at\": \"remove_job\", \"status\": \"error\", \"err\": err, \"job.id\": event.JobID})\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Rook Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\n\t\"github.com\/coreos\/pkg\/capnslog\"\n\tcephver \"github.com\/rook\/rook\/pkg\/operator\/ceph\/version\"\n)\n\n\/\/ ClusterInfo is a collection of information about a particular Ceph cluster. Rook uses information\n\/\/ about the cluster to configure daemons to connect to the desired cluster.\ntype ClusterInfo struct {\n\tFSID string\n\tMonitorSecret string\n\tAdminSecret string\n\tName string\n\tMonitors map[string]*MonInfo\n\tCephVersion cephver.CephVersion\n}\n\n\/\/ MonInfo is a collection of information about a Ceph mon.\ntype MonInfo struct {\n\tName string `json:\"name\"`\n\tEndpoint string `json:\"endpoint\"`\n}\n\n\/\/ IsInitialized returns true if the critical information in the ClusterInfo struct has been filled\n\/\/ in. This method exists less out of necessity than the desire to be explicit about the lifecycle\n\/\/ of the ClusterInfo struct during startup, specifically that it is expected to exist after the\n\/\/ Rook operator has started up or connected to the first components of the Ceph cluster.\nfunc (c *ClusterInfo) IsInitialized() bool {\n\tif c == nil || c.FSID == \"\" || c.MonitorSecret == \"\" || c.AdminSecret == \"\" {\n\t\tlogger.Errorf(\"clusterInfo: %+v\", c)\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/ NewMonInfo returns a new Ceph mon info struct from the given inputs.\nfunc NewMonInfo(name, ip string, port int32) *MonInfo {\n\treturn &MonInfo{Name: name, Endpoint: net.JoinHostPort(ip, fmt.Sprintf(\"%d\", port))}\n}\n\n\/\/ Log writes the cluster info struct to the logger\nfunc (c *ClusterInfo) Log(logger *capnslog.PackageLogger) {\n\tmons := []string{}\n\tfor _, m := range c.Monitors {\n\t\t\/\/ Sprintf formatting is safe as user input isn't being used. Issue https:\/\/github.com\/rook\/rook\/issues\/4575\n\t\tmons = append(mons, fmt.Sprintf(\"{Name: %s, Endpoint: %s}\", m.Name, m.Endpoint))\n\t}\n\tmonsec := \"\"\n\tif c.MonitorSecret != \"\" {\n\t\tmonsec = \"<hidden>\"\n\t}\n\tadmsec := \"\"\n\tif c.AdminSecret != \"\" {\n\t\tadmsec = \"<hidden>\"\n\t}\n\ts := fmt.Sprintf(\n\t\t\"ClusterInfo: {FSID: %s, MonitorSecret: %s, AdminSecret: %s, Name: %s, Monitors: %s}\",\n\t\tc.FSID, monsec, admsec, c.Name, strings.Join(mons, \" \"))\n\tlogger.Info(s)\n}\n<commit_msg>ceph: stop potential secret leak<commit_after>\/*\nCopyright 2016 The Rook Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\n\t\"github.com\/coreos\/pkg\/capnslog\"\n\tcephver \"github.com\/rook\/rook\/pkg\/operator\/ceph\/version\"\n)\n\n\/\/ ClusterInfo is a collection of information about a particular Ceph cluster. Rook uses information\n\/\/ about the cluster to configure daemons to connect to the desired cluster.\ntype ClusterInfo struct {\n\tFSID string\n\tMonitorSecret string\n\tAdminSecret string\n\tName string\n\tMonitors map[string]*MonInfo\n\tCephVersion cephver.CephVersion\n}\n\n\/\/ MonInfo is a collection of information about a Ceph mon.\ntype MonInfo struct {\n\tName string `json:\"name\"`\n\tEndpoint string `json:\"endpoint\"`\n}\n\n\/\/ IsInitialized returns true if the critical information in the ClusterInfo struct has been filled\n\/\/ in. This method exists less out of necessity than the desire to be explicit about the lifecycle\n\/\/ of the ClusterInfo struct during startup, specifically that it is expected to exist after the\n\/\/ Rook operator has started up or connected to the first components of the Ceph cluster.\nfunc (c *ClusterInfo) IsInitialized() bool {\n\tvar isInitialized bool\n\n\tif c == nil {\n\t\tlogger.Error(\"clusterInfo is nil\")\n\t} else if c.FSID == \"\" {\n\t\tlogger.Error(\"cluster fsid is empty\")\n\t} else if c.MonitorSecret == \"\" {\n\t\tlogger.Error(\"monitor secret is empty\")\n\t} else if c.AdminSecret == \"\" {\n\t\tlogger.Error(\"admin secret is empty\")\n\t} else {\n\t\tisInitialized = true\n\t}\n\n\treturn isInitialized\n}\n\n\/\/ NewMonInfo returns a new Ceph mon info struct from the given inputs.\nfunc NewMonInfo(name, ip string, port int32) *MonInfo {\n\treturn &MonInfo{Name: name, Endpoint: net.JoinHostPort(ip, fmt.Sprintf(\"%d\", port))}\n}\n\n\/\/ Log writes the cluster info struct to the logger\nfunc (c *ClusterInfo) Log(logger *capnslog.PackageLogger) {\n\tmons := []string{}\n\tfor _, m := range c.Monitors {\n\t\t\/\/ Sprintf formatting is safe as user input isn't being used. Issue https:\/\/github.com\/rook\/rook\/issues\/4575\n\t\tmons = append(mons, fmt.Sprintf(\"{Name: %s, Endpoint: %s}\", m.Name, m.Endpoint))\n\t}\n\tmonsec := \"\"\n\tif c.MonitorSecret != \"\" {\n\t\tmonsec = \"<hidden>\"\n\t}\n\tadmsec := \"\"\n\tif c.AdminSecret != \"\" {\n\t\tadmsec = \"<hidden>\"\n\t}\n\ts := fmt.Sprintf(\n\t\t\"ClusterInfo: {FSID: %s, MonitorSecret: %s, AdminSecret: %s, Name: %s, Monitors: %s}\",\n\t\tc.FSID, monsec, admsec, c.Name, strings.Join(mons, \" \"))\n\tlogger.Info(s)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport(\n\t\"fmt\"\n\t\"os\"\n\t\"flag\"\n)\n\nvar repoRoot = flag.String(\"d\", \".\", \"path to repo\")\n\n\ntype Commit struct {\n\tTree string\n\tParent string\n\tAuthor string\n\tCommitter string\n}\n\n\n\nfunc ParseCommit(in io.Reader) (*Commit, error) {\n\n\tcommit := new(Commit)\n\n\treturn commit, nil\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tf, err := GetArgInputFile()\n\n\tcmt, err := ParseCommit(f)\n\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"ERROR:\", err)\t\n\t\treturn\n\t}\n\n\tfmt.Fprintln(cmt)\n}\n<commit_msg>FIX: compile err<commit_after>package main\n\nimport(\n\t\"fmt\"\n\t\"os\"\n\t\"flag\"\n\t\"io\"\n)\n\nvar repoRoot = flag.String(\"d\", \".\", \"path to repo\")\n\n\ntype Commit struct {\n\tTree string\n\tParent string\n\tAuthor string\n\tCommitter string\n}\n\n\n\nfunc ParseCommit(in io.Reader) (*Commit, error) {\n\n\tcommit := new(Commit)\n\n\treturn commit, nil\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tf, err := GetArgInputFile()\n\n\tcmt, err := ParseCommit(f)\n\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"ERROR:\", err)\t\n\t\treturn\n\t}\n\n\tfmt.Fprintln(os.Stdout, cmt)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017-2018 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage loader\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"path\"\n\t\"runtime\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/command\/exec\"\n\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ OutputType determines the type to be generated by the compilation steps.\ntype OutputType string\n\nconst (\n\toutputObject = OutputType(\"obj\")\n\toutputAssembly = OutputType(\"asm\")\n\toutputSource = OutputType(\"c\")\n)\n\n\/\/ progInfo describes a program to be compiled with the expected output format\ntype progInfo struct {\n\t\/\/ Source is the program source (base) filename to be compiled\n\tSource string\n\t\/\/ Output is the expected (base) filename produced from the source\n\tOutput string\n\t\/\/ OutputType to be created by LLVM\n\tOutputType OutputType\n}\n\n\/\/ directoryInfo includes relevant directories for compilation and linking\ntype directoryInfo struct {\n\t\/\/ Library contains the library code to be used for compilation\n\tLibrary string\n\t\/\/ Runtime contains headers for compilation\n\tRuntime string\n\t\/\/ State contains node, lxc, and features headers for templatization\n\tState string\n\t\/\/ Output is the directory where the files will be stored\n\tOutput string\n}\n\nvar (\n\tcompiler = \"clang\"\n\tlinker = \"llc\"\n\tstandardCFlags = []string{\"-O2\", \"-target\", \"bpf\",\n\t\tfmt.Sprintf(\"-D__NR_CPUS__=%d\", runtime.NumCPU()),\n\t\t\"-Wno-address-of-packed-member\", \"-Wno-unknown-warning-option\"}\n\tstandardLDFlags = []string{\"-march=bpf\", \"-mcpu=probe\"}\n\n\tendpointPrefix = \"bpf_lxc\"\n\tendpointProg = fmt.Sprintf(\"%s.%s\", endpointPrefix, outputSource)\n\tendpointObj = fmt.Sprintf(\"%s.o\", endpointPrefix)\n\tendpointObjDebug = fmt.Sprintf(\"%s.dbg.o\", endpointPrefix)\n\tendpointAsm = fmt.Sprintf(\"%s.%s\", endpointPrefix, outputAssembly)\n\n\t\/\/ testIncludes allows the unit tests to inject additional include\n\t\/\/ paths into the compile command at test time. It is usually nil.\n\ttestIncludes string\n\n\tdebugProgs = []*progInfo{\n\t\t{\n\t\t\tSource: endpointProg,\n\t\t\tOutput: endpointObjDebug,\n\t\t\tOutputType: outputObject,\n\t\t},\n\t\t{\n\t\t\tSource: endpointProg,\n\t\t\tOutput: endpointAsm,\n\t\t\tOutputType: outputAssembly,\n\t\t},\n\t\t{\n\t\t\tSource: endpointProg,\n\t\t\tOutput: endpointProg,\n\t\t\tOutputType: outputSource,\n\t\t},\n\t}\n\tdatapathProg = &progInfo{\n\t\tSource: endpointProg,\n\t\tOutput: endpointObj,\n\t\tOutputType: outputObject,\n\t}\n)\n\n\/\/ progLDFlags determines the loader flags for the specified prog and paths.\nfunc progLDFlags(prog *progInfo, dir *directoryInfo) []string {\n\treturn []string{\n\t\tfmt.Sprintf(\"-filetype=%s\", prog.OutputType),\n\t\t\"-o\", path.Join(dir.Output, prog.Output),\n\t}\n}\n\n\/\/ prepareCmdPipes attaches pipes to the stdout and stderr of the specified\n\/\/ command, and returns the stdout, stderr, and any error that may have\n\/\/ occurred while creating the pipes.\nfunc prepareCmdPipes(cmd *exec.Cmd) (io.ReadCloser, io.ReadCloser, error) {\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"Failed to get stdout pipe: %s\", err)\n\t}\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\tstdout.Close()\n\t\treturn nil, nil, fmt.Errorf(\"Failed to get stderr pipe: %s\", err)\n\t}\n\n\treturn stdout, stderr, nil\n}\n\n\/\/ compileAndLink links the specified program from the specified path to the\n\/\/ intermediate representation, to the output specified in the prog's info.\nfunc compileAndLink(ctx context.Context, prog *progInfo, dir *directoryInfo, debug bool, compileArgs ...string) error {\n\tcompileCmd, cancelCompile := exec.WithCancel(ctx, compiler, compileArgs...)\n\tdefer cancelCompile()\n\tcompilerStdout, compilerStderr, err := prepareCmdPipes(compileCmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlinkArgs := make([]string, 0, 8)\n\tif debug {\n\t\tlinkArgs = append(linkArgs, \"-mattr=dwarfris\")\n\t}\n\tlinkArgs = append(linkArgs, standardLDFlags...)\n\tlinkArgs = append(linkArgs, progLDFlags(prog, dir)...)\n\n\tlinkCmd := exec.CommandContext(ctx, linker, linkArgs...)\n\tlinkCmd.Stdin = compilerStdout\n\tif err := compileCmd.Start(); err != nil {\n\t\treturn fmt.Errorf(\"Failed to start command %s: %s\", compileCmd.Args, err)\n\t}\n\n\tvar compileOut []byte\n\t\/* Ignoring the output here because pkg\/command\/exec will log it. *\/\n\t_, err = linkCmd.CombinedOutput(log, true)\n\tif err == nil {\n\t\tcompileOut, _ = ioutil.ReadAll(compilerStderr)\n\t\terr = compileCmd.Wait()\n\t} else {\n\t\tcancelCompile()\n\t}\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Failed to compile %s: %s\", prog.Output, err)\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"compiler-pid\": compileCmd.Process.Pid,\n\t\t\t\"linker-pid\": linkCmd.Process.Pid,\n\t\t}).Error(err)\n\t\tif compileOut != nil {\n\t\t\tscopedLog := log.Warn\n\t\t\tif debug {\n\t\t\t\tscopedLog = log.Debug\n\t\t\t}\n\t\t\tscanner := bufio.NewScanner(bytes.NewReader(compileOut))\n\t\t\tfor scanner.Scan() {\n\t\t\t\tscopedLog(scanner.Text())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}\n\n\/\/ progLDFlags determines the compiler flags for the specified prog and paths.\nfunc progCFlags(prog *progInfo, dir *directoryInfo) []string {\n\tvar output string\n\n\tif prog.OutputType == outputSource {\n\t\toutput = path.Join(dir.Output, prog.Output)\n\t} else {\n\t\toutput = \"-\" \/\/ stdout\n\t}\n\n\treturn []string{\n\t\ttestIncludes,\n\t\tfmt.Sprintf(\"-I%s\", path.Join(dir.Runtime, \"globals\")),\n\t\tfmt.Sprintf(\"-I%s\", dir.State),\n\t\tfmt.Sprintf(\"-I%s\", path.Join(dir.Library, \"include\")),\n\t\t\"-c\", path.Join(dir.Library, prog.Source),\n\t\t\"-o\", output,\n\t}\n}\n\n\/\/ compile and link a program.\nfunc compile(ctx context.Context, prog *progInfo, dir *directoryInfo, debug bool) (err error) {\n\targs := make([]string, 0, 16)\n\tif prog.OutputType == outputSource {\n\t\targs = append(args, \"-E\") \/\/ Preprocessor\n\t} else {\n\t\targs = append(args, \"-emit-llvm\")\n\t\tif debug {\n\t\t\targs = append(args, \"-g\")\n\t\t}\n\t}\n\targs = append(args, standardCFlags...)\n\targs = append(args, progCFlags(prog, dir)...)\n\n\t\/\/ Compilation is split between two exec calls. First clang generates\n\t\/\/ LLVM bitcode and then later llc compiles it to byte-code.\n\tlog.WithFields(logrus.Fields{\n\t\t\"target\": compiler,\n\t\t\"args\": args,\n\t}).Debug(\"Launching compiler\")\n\tif prog.OutputType == outputSource {\n\t\tcompileCmd := exec.CommandContext(ctx, compiler, args...)\n\t\t_, err = compileCmd.CombinedOutput(log, debug)\n\t} else {\n\t\tswitch prog.OutputType {\n\t\tcase outputObject:\n\t\t\terr = compileAndLink(ctx, prog, dir, debug, args...)\n\t\tcase outputAssembly:\n\t\t\terr = compileAndLink(ctx, prog, dir, false, args...)\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unhandled progInfo.OutputType %s\", prog.OutputType)\n\t\t}\n\t}\n\n\treturn err\n}\n<commit_msg>datapath: Fix nil dereference in logging statement<commit_after>\/\/ Copyright 2017-2019 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage loader\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"runtime\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/command\/exec\"\n\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ OutputType determines the type to be generated by the compilation steps.\ntype OutputType string\n\nconst (\n\toutputObject = OutputType(\"obj\")\n\toutputAssembly = OutputType(\"asm\")\n\toutputSource = OutputType(\"c\")\n)\n\n\/\/ progInfo describes a program to be compiled with the expected output format\ntype progInfo struct {\n\t\/\/ Source is the program source (base) filename to be compiled\n\tSource string\n\t\/\/ Output is the expected (base) filename produced from the source\n\tOutput string\n\t\/\/ OutputType to be created by LLVM\n\tOutputType OutputType\n}\n\n\/\/ directoryInfo includes relevant directories for compilation and linking\ntype directoryInfo struct {\n\t\/\/ Library contains the library code to be used for compilation\n\tLibrary string\n\t\/\/ Runtime contains headers for compilation\n\tRuntime string\n\t\/\/ State contains node, lxc, and features headers for templatization\n\tState string\n\t\/\/ Output is the directory where the files will be stored\n\tOutput string\n}\n\nvar (\n\tcompiler = \"clang\"\n\tlinker = \"llc\"\n\tstandardCFlags = []string{\"-O2\", \"-target\", \"bpf\",\n\t\tfmt.Sprintf(\"-D__NR_CPUS__=%d\", runtime.NumCPU()),\n\t\t\"-Wno-address-of-packed-member\", \"-Wno-unknown-warning-option\"}\n\tstandardLDFlags = []string{\"-march=bpf\", \"-mcpu=probe\"}\n\n\tendpointPrefix = \"bpf_lxc\"\n\tendpointProg = fmt.Sprintf(\"%s.%s\", endpointPrefix, outputSource)\n\tendpointObj = fmt.Sprintf(\"%s.o\", endpointPrefix)\n\tendpointObjDebug = fmt.Sprintf(\"%s.dbg.o\", endpointPrefix)\n\tendpointAsm = fmt.Sprintf(\"%s.%s\", endpointPrefix, outputAssembly)\n\n\t\/\/ testIncludes allows the unit tests to inject additional include\n\t\/\/ paths into the compile command at test time. It is usually nil.\n\ttestIncludes string\n\n\tdebugProgs = []*progInfo{\n\t\t{\n\t\t\tSource: endpointProg,\n\t\t\tOutput: endpointObjDebug,\n\t\t\tOutputType: outputObject,\n\t\t},\n\t\t{\n\t\t\tSource: endpointProg,\n\t\t\tOutput: endpointAsm,\n\t\t\tOutputType: outputAssembly,\n\t\t},\n\t\t{\n\t\t\tSource: endpointProg,\n\t\t\tOutput: endpointProg,\n\t\t\tOutputType: outputSource,\n\t\t},\n\t}\n\tdatapathProg = &progInfo{\n\t\tSource: endpointProg,\n\t\tOutput: endpointObj,\n\t\tOutputType: outputObject,\n\t}\n)\n\n\/\/ progLDFlags determines the loader flags for the specified prog and paths.\nfunc progLDFlags(prog *progInfo, dir *directoryInfo) []string {\n\treturn []string{\n\t\tfmt.Sprintf(\"-filetype=%s\", prog.OutputType),\n\t\t\"-o\", path.Join(dir.Output, prog.Output),\n\t}\n}\n\n\/\/ prepareCmdPipes attaches pipes to the stdout and stderr of the specified\n\/\/ command, and returns the stdout, stderr, and any error that may have\n\/\/ occurred while creating the pipes.\nfunc prepareCmdPipes(cmd *exec.Cmd) (io.ReadCloser, io.ReadCloser, error) {\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"Failed to get stdout pipe: %s\", err)\n\t}\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\tstdout.Close()\n\t\treturn nil, nil, fmt.Errorf(\"Failed to get stderr pipe: %s\", err)\n\t}\n\n\treturn stdout, stderr, nil\n}\n\nfunc pidFromProcess(proc *os.Process) string {\n\tresult := \"not-started\"\n\tif proc != nil {\n\t\tresult = fmt.Sprintf(\"%d\", proc.Pid)\n\t}\n\treturn result\n}\n\n\/\/ compileAndLink links the specified program from the specified path to the\n\/\/ intermediate representation, to the output specified in the prog's info.\nfunc compileAndLink(ctx context.Context, prog *progInfo, dir *directoryInfo, debug bool, compileArgs ...string) error {\n\tcompileCmd, cancelCompile := exec.WithCancel(ctx, compiler, compileArgs...)\n\tdefer cancelCompile()\n\tcompilerStdout, compilerStderr, err := prepareCmdPipes(compileCmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlinkArgs := make([]string, 0, 8)\n\tif debug {\n\t\tlinkArgs = append(linkArgs, \"-mattr=dwarfris\")\n\t}\n\tlinkArgs = append(linkArgs, standardLDFlags...)\n\tlinkArgs = append(linkArgs, progLDFlags(prog, dir)...)\n\n\tlinkCmd := exec.CommandContext(ctx, linker, linkArgs...)\n\tlinkCmd.Stdin = compilerStdout\n\tif err := compileCmd.Start(); err != nil {\n\t\treturn fmt.Errorf(\"Failed to start command %s: %s\", compileCmd.Args, err)\n\t}\n\n\tvar compileOut []byte\n\t\/* Ignoring the output here because pkg\/command\/exec will log it. *\/\n\t_, err = linkCmd.CombinedOutput(log, true)\n\tif err == nil {\n\t\tcompileOut, _ = ioutil.ReadAll(compilerStderr)\n\t\terr = compileCmd.Wait()\n\t} else {\n\t\tcancelCompile()\n\t}\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Failed to compile %s: %s\", prog.Output, err)\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"compiler-pid\": pidFromProcess(compileCmd.Process),\n\t\t\t\"linker-pid\": pidFromProcess(linkCmd.Process),\n\t\t}).Error(err)\n\t\tif compileOut != nil {\n\t\t\tscopedLog := log.Warn\n\t\t\tif debug {\n\t\t\t\tscopedLog = log.Debug\n\t\t\t}\n\t\t\tscanner := bufio.NewScanner(bytes.NewReader(compileOut))\n\t\t\tfor scanner.Scan() {\n\t\t\t\tscopedLog(scanner.Text())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}\n\n\/\/ progLDFlags determines the compiler flags for the specified prog and paths.\nfunc progCFlags(prog *progInfo, dir *directoryInfo) []string {\n\tvar output string\n\n\tif prog.OutputType == outputSource {\n\t\toutput = path.Join(dir.Output, prog.Output)\n\t} else {\n\t\toutput = \"-\" \/\/ stdout\n\t}\n\n\treturn []string{\n\t\ttestIncludes,\n\t\tfmt.Sprintf(\"-I%s\", path.Join(dir.Runtime, \"globals\")),\n\t\tfmt.Sprintf(\"-I%s\", dir.State),\n\t\tfmt.Sprintf(\"-I%s\", path.Join(dir.Library, \"include\")),\n\t\t\"-c\", path.Join(dir.Library, prog.Source),\n\t\t\"-o\", output,\n\t}\n}\n\n\/\/ compile and link a program.\nfunc compile(ctx context.Context, prog *progInfo, dir *directoryInfo, debug bool) (err error) {\n\targs := make([]string, 0, 16)\n\tif prog.OutputType == outputSource {\n\t\targs = append(args, \"-E\") \/\/ Preprocessor\n\t} else {\n\t\targs = append(args, \"-emit-llvm\")\n\t\tif debug {\n\t\t\targs = append(args, \"-g\")\n\t\t}\n\t}\n\targs = append(args, standardCFlags...)\n\targs = append(args, progCFlags(prog, dir)...)\n\n\t\/\/ Compilation is split between two exec calls. First clang generates\n\t\/\/ LLVM bitcode and then later llc compiles it to byte-code.\n\tlog.WithFields(logrus.Fields{\n\t\t\"target\": compiler,\n\t\t\"args\": args,\n\t}).Debug(\"Launching compiler\")\n\tif prog.OutputType == outputSource {\n\t\tcompileCmd := exec.CommandContext(ctx, compiler, args...)\n\t\t_, err = compileCmd.CombinedOutput(log, debug)\n\t} else {\n\t\tswitch prog.OutputType {\n\t\tcase outputObject:\n\t\t\terr = compileAndLink(ctx, prog, dir, debug, args...)\n\t\tcase outputAssembly:\n\t\t\terr = compileAndLink(ctx, prog, dir, false, args...)\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unhandled progInfo.OutputType %s\", prog.OutputType)\n\t\t}\n\t}\n\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\thtmlTemplate \"html\/template\"\n\t\"net\/mail\"\n\t\"net\/url\"\n\ttextTemplate \"text\/template\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/cloudformation\"\n\n\t\"github.com\/mozilla-services\/reaper\/filters\"\n\t\"github.com\/mozilla-services\/reaper\/reapable\"\n\tlog \"github.com\/mozilla-services\/reaper\/reaperlog\"\n\t\"github.com\/mozilla-services\/reaper\/state\"\n)\n\ntype CloudformationStack struct {\n\tAWSResource\n\tcloudformation.Stack\n}\n\nfunc NewCloudformationStack(region string, stack *cloudformation.Stack) *CloudformationStack {\n\ta := CloudformationStack{\n\t\tAWSResource: AWSResource{\n\t\t\tRegion: reapable.Region(region),\n\t\t\tID: reapable.ID(*stack.StackID),\n\t\t\tName: *stack.StackName,\n\t\t\tTags: make(map[string]string),\n\t\t\treaperState: state.NewStateWithUntil(time.Now().Add(config.Notifications.FirstStateDuration.Duration)),\n\t\t},\n\t\tStack: *stack,\n\t}\n\n\tfor i := 0; i < len(stack.Tags); i++ {\n\t\ta.AWSResource.Tags[*stack.Tags[i].Key] = *stack.Tags[i].Value\n\t}\n\n\tif a.Tagged(reaperTag) {\n\t\t\/\/ restore previously tagged state\n\t\ta.reaperState = state.NewStateWithTag(a.AWSResource.Tags[reaperTag])\n\t} else {\n\t\t\/\/ initial state\n\t\ta.reaperState = state.NewStateWithUntilAndState(\n\t\t\ttime.Now().Add(config.Notifications.FirstStateDuration.Duration),\n\t\t\tstate.FirstState)\n\t}\n\n\treturn &a\n}\n\nfunc (a *CloudformationStack) reapableEventHTML(text string) *bytes.Buffer {\n\tt := htmlTemplate.Must(htmlTemplate.New(\"reapable\").Parse(text))\n\tbuf := bytes.NewBuffer(nil)\n\n\tdata, err := a.getTemplateData()\n\terr = t.Execute(buf, data)\n\tif err != nil {\n\t\tlog.Debug(fmt.Sprintf(\"Template generation error: %s\", err))\n\t}\n\treturn buf\n}\n\nfunc (a *CloudformationStack) reapableEventText(text string) *bytes.Buffer {\n\tt := textTemplate.Must(textTemplate.New(\"reapable\").Parse(text))\n\tbuf := bytes.NewBuffer(nil)\n\n\tdata, err := a.getTemplateData()\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"%s\", err.Error()))\n\t}\n\terr = t.Execute(buf, data)\n\tif err != nil {\n\t\tlog.Debug(fmt.Sprintf(\"Template generation error: %s\", err))\n\t}\n\treturn buf\n}\n\nfunc (a *CloudformationStack) ReapableEventText() *bytes.Buffer {\n\treturn a.reapableEventText(reapableCloudformationEventText)\n}\n\nfunc (a *CloudformationStack) ReapableEventTextShort() *bytes.Buffer {\n\treturn a.reapableEventText(reapableCloudformationEventTextShort)\n}\n\nfunc (a *CloudformationStack) ReapableEventEmail() (owner mail.Address, subject string, body string, err error) {\n\t\/\/ if unowned, return unowned error\n\tif !a.Owned() {\n\t\terr = reapable.UnownedError{fmt.Sprintf(\"%s does not have an owner tag\", a.ReapableDescriptionShort())}\n\t\treturn\n\t}\n\n\tsubject = fmt.Sprintf(\"AWS Resource %s is going to be Reaped!\", a.ReapableDescriptionTiny())\n\towner = *a.Owner()\n\tbody = a.reapableEventHTML(reapableCloudformationEventHTML).String()\n\treturn\n}\n\nfunc (a *CloudformationStack) ReapableEventEmailShort() (owner mail.Address, body string, err error) {\n\t\/\/ if unowned, return unowned error\n\tif !a.Owned() {\n\t\terr = reapable.UnownedError{fmt.Sprintf(\"%s does not have an owner tag\", a.ReapableDescriptionShort())}\n\t\treturn\n\t}\n\towner = *a.Owner()\n\tbody = a.reapableEventHTML(reapableCloudformationEventHTMLShort).String()\n\treturn\n}\n\ntype CloudformationStackEventData struct {\n\tConfig *AWSConfig\n\tCloudformationStack *CloudformationStack\n\tTerminateLink string\n\tStopLink string\n\tForceStopLink string\n\tWhitelistLink string\n\tIgnoreLink1 string\n\tIgnoreLink3 string\n\tIgnoreLink7 string\n}\n\nfunc (a *CloudformationStack) getTemplateData() (*CloudformationStackEventData, error) {\n\tignore1, err := MakeIgnoreLink(a.Region, a.ID, config.HTTP.TokenSecret, config.HTTP.ApiURL, time.Duration(1*24*time.Hour))\n\tignore3, err := MakeIgnoreLink(a.Region, a.ID, config.HTTP.TokenSecret, config.HTTP.ApiURL, time.Duration(3*24*time.Hour))\n\tignore7, err := MakeIgnoreLink(a.Region, a.ID, config.HTTP.TokenSecret, config.HTTP.ApiURL, time.Duration(7*24*time.Hour))\n\tterminate, err := MakeTerminateLink(a.Region, a.ID, config.HTTP.TokenSecret, config.HTTP.ApiURL)\n\tstop, err := MakeStopLink(a.Region, a.ID, config.HTTP.TokenSecret, config.HTTP.ApiURL)\n\tforcestop, err := MakeForceStopLink(a.Region, a.ID, config.HTTP.TokenSecret, config.HTTP.ApiURL)\n\twhitelist, err := MakeWhitelistLink(a.Region, a.ID, config.HTTP.TokenSecret, config.HTTP.ApiURL)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &CloudformationStackEventData{\n\t\tConfig: config,\n\t\tCloudformationStack: a,\n\t\tTerminateLink: terminate,\n\t\tStopLink: stop,\n\t\tForceStopLink: forcestop,\n\t\tWhitelistLink: whitelist,\n\t\tIgnoreLink1: ignore1,\n\t\tIgnoreLink3: ignore3,\n\t\tIgnoreLink7: ignore7,\n\t}, nil\n}\n\nconst reapableCloudformationEventHTML = `\n<html>\n<body>\n\t<p>CloudformationStack <a href=\"{{ .CloudformationStack.AWSConsoleURL }}\">{{ if .CloudformationStack.Name }}\"{{.CloudformationStack.Name}}\" {{ end }} in {{.CloudformationStack.Region}}<\/a> is scheduled to be terminated.<\/p>\n\n\t<p>\n\t\tYou can ignore this message and your CloudformationStack will advance to the next state after <strong>{{.CloudformationStack.ReaperState.Until}}<\/strong>. If you do not take action it will be terminated!\n\t<\/p>\n\n\t<p>\n\t\tYou may also choose to:\n\t\t<ul>\n\t\t\t<li><a href=\"{{ .TerminateLink }}\">Terminate it now<\/a><\/li>\n\t\t\t<li><a href=\"{{ .StopLink }}\">Scale it to 0<\/a><\/li>\n\t\t\t<li><a href=\"{{ .ForceStopLink }}\">ForceScale it to 0<\/a><\/li>\n\t\t\t<li><a href=\"{{ .IgnoreLink1 }}\">Ignore it for 1 more day<\/a><\/li>\n\t\t\t<li><a href=\"{{ .IgnoreLink3 }}\">Ignore it for 3 more days<\/a><\/li>\n\t\t\t<li><a href=\"{{ .IgnoreLink7}}\">Ignore it for 7 more days<\/a><\/li>\n\t\t<\/ul>\n\t<\/p>\n\n\t<p>\n\t\tIf you want the Reaper to ignore this CloudformationStack tag it with {{ .Config.WhitelistTag }} with any value, or click <a href=\"{{ .WhitelistLink }}\">here<\/a>.\n\t<\/p>\n<\/body>\n<\/html>\n`\n\nconst reapableCloudformationEventHTMLShort = `\n<html>\n<body>\n\t<p>CloudformationStack <a href=\"{{ .CloudformationStack.AWSConsoleURL }}\">{{ if .CloudformationStack.Name }}\"{{.CloudformationStack.Name}}\" {{ end }}<\/a> in {{.CloudformationStack.Region}}<\/a> is scheduled to be terminated after <strong>{{.CloudformationStack.ReaperState.Until}}<\/strong>.\n\t\t<br \/>\n\t\t<a href=\"{{ .TerminateLink }}\">Terminate<\/a>, \n\t\t<a href=\"{{ .StopLink }}\">Stop<\/a>, \n\t\t<a href=\"{{ .IgnoreLink1 }}\">Ignore it for 1 more day<\/a>, \n\t\t<a href=\"{{ .IgnoreLink3 }}\">3 days<\/a>, \n\t\t<a href=\"{{ .IgnoreLink7}}\"> 7 days<\/a>, or \n\t\t<a href=\"{{ .WhitelistLink }}\">Whitelist<\/a> it.\n\t<\/p>\n<\/body>\n<\/html>\n`\n\nconst reapableCloudformationEventTextShort = `%%%\nCloudformationStack [{{.CloudformationStack.ID}}]({{.CloudformationStack.AWSConsoleURL}}) in region: [{{.CloudformationStack.Region}}](https:\/\/{{.CloudformationStack.Region}}.console.aws.amazon.com\/ec2\/v2\/home?region={{.CloudformationStack.Region}}).{{if .CloudformationStack.Owned}} Owned by {{.CloudformationStack.Owner}}.\\n{{end}}\n[Whitelist]({{ .WhitelistLink }}), or [Terminate]({{ .TerminateLink }}) this CloudformationStack.\n%%%`\n\nconst reapableCloudformationEventText = `%%%\nReaper has discovered an CloudformationStack qualified as reapable: [{{.CloudformationStack.ID}}]({{.CloudformationStack.AWSConsoleURL}}) in region: [{{.CloudformationStack.Region}}](https:\/\/{{.CloudformationStack.Region}}.console.aws.amazon.com\/ec2\/v2\/home?region={{.CloudformationStack.Region}}).\\n\n{{if .CloudformationStack.Owned}}Owned by {{.CloudformationStack.Owner}}.\\n{{end}}\n{{ if .CloudformationStack.AWSConsoleURL}}{{.CloudformationStack.AWSConsoleURL}}\\n{{end}}\n[AWS Console URL]({{.CloudformationStack.AWSConsoleURL}})\\n\n[Whitelist]({{ .WhitelistLink }}) this CloudformationStack.\n[Terminate]({{ .TerminateLink }}) this CloudformationStack.\n%%%`\n\n\/\/ method for reapable -> overrides promoted AWSResource method of same name?\nfunc (a *CloudformationStack) Save(s *state.State) (bool, error) {\n\treturn a.tagReaperState(a.Region, a.ID, a.ReaperState())\n}\n\n\/\/ method for reapable -> overrides promoted AWSResource method of same name?\nfunc (a *CloudformationStack) Unsave() (bool, error) {\n\tlog.Notice(\"Unsaving %s\", a.ReapableDescriptionTiny())\n\treturn a.untagReaperState(a.Region, a.ID, a.ReaperState())\n}\n\nfunc (a *CloudformationStack) untagReaperState(region reapable.Region, id reapable.ID, newState *state.State) (bool, error) {\n\treturn false, nil\n}\n\nfunc (a *CloudformationStack) tagReaperState(region reapable.Region, id reapable.ID, newState *state.State) (bool, error) {\n\treturn false, nil\n}\n\nfunc (a *CloudformationStack) Filter(filter filters.Filter) bool {\n\tmatched := false\n\t\/\/ map function names to function calls\n\tswitch filter.Function {\n\tcase \"Status\":\n\t\tif *a.StackStatus == filter.Arguments[0] {\n\t\t\t\/\/ one of:\n\t\t\t\/\/ CREATE_COMPLETE\n\t\t\t\/\/ CREATE_IN_PROGRESS\n\t\t\t\/\/ CREATE_FAILED\n\t\t\t\/\/ DELETE_COMPLETE\n\t\t\t\/\/ DELETE_FAILED\n\t\t\t\/\/ DELETE_IN_PROGRESS\n\t\t\t\/\/ ROLLBACK_COMPLETE\n\t\t\t\/\/ ROLLBACK_FAILED\n\t\t\t\/\/ ROLLBACK_IN_PROGRESS\n\t\t\t\/\/ UPDATE_COMPLETE\n\t\t\t\/\/ UPDATE_COMPLETE_CLEANUP_IN_PROGRESS\n\t\t\t\/\/ UPDATE_IN_PROGRESS\n\t\t\t\/\/ UPDATE_ROLLBACK_COMPLETE\n\t\t\t\/\/ UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS\n\t\t\t\/\/ UPDATE_ROLLBACK_FAILED\n\t\t\t\/\/ UPDATE_ROLLBACK_IN_PROGRESS\n\t\t\tmatched = true\n\t\t}\n\tcase \"NotStatus\":\n\t\tif *a.StackStatus != filter.Arguments[0] {\n\t\t\tmatched = true\n\t\t}\n\tcase \"Tagged\":\n\t\tif a.Tagged(filter.Arguments[0]) {\n\t\t\tmatched = true\n\t\t}\n\tcase \"NotTagged\":\n\t\tif !a.Tagged(filter.Arguments[0]) {\n\t\t\tmatched = true\n\t\t}\n\tcase \"TagNotEqual\":\n\t\tif a.Tag(filter.Arguments[0]) != filter.Arguments[1] {\n\t\t\tmatched = true\n\t\t}\n\tcase \"CreatedTimeInTheLast\":\n\t\td, err := time.ParseDuration(filter.Arguments[0])\n\t\tif err == nil && time.Since(*a.CreationTime) < d {\n\t\t\tmatched = true\n\t\t}\n\tcase \"CreatedTimeNotInTheLast\":\n\t\td, err := time.ParseDuration(filter.Arguments[0])\n\t\tif err == nil && time.Since(*a.CreationTime) > d {\n\t\t\tmatched = true\n\t\t}\n\tdefault:\n\t\tlog.Error(fmt.Sprintf(\"No function %s could be found for filtering CloudformationStacks.\", filter.Function))\n\t}\n\treturn matched\n}\n\nfunc (a *CloudformationStack) AWSConsoleURL() *url.URL {\n\turl, err := url.Parse(fmt.Sprintf(\"https:\/\/%s.console.aws.amazon.com\/cloudformation\/home?region=%s#\/stacks?filter=active&tab=overview&stackId=%s\",\n\t\tstring(a.Region), string(a.Region), url.QueryEscape(string(a.ID))))\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"Error generating AWSConsoleURL. %s\", err))\n\t}\n\treturn url\n}\n\nfunc (a *CloudformationStack) Terminate() (bool, error) {\n\tlog.Notice(\"Terminating CloudformationStack %s\", a.ReapableDescriptionTiny())\n\tas := cloudformation.New(&aws.Config{Region: string(a.Region)})\n\n\tstringID := string(a.ID)\n\n\tinput := &cloudformation.DeleteStackInput{\n\t\tStackName: &stringID,\n\t}\n\t_, err := as.DeleteStack(input)\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"could not delete CloudformationStack %s\", a.ReapableDescriptionTiny()))\n\t\treturn false, err\n\t}\n\treturn false, nil\n}\n\nfunc (a *CloudformationStack) Stop() (bool, error) {\n\treturn false, nil\n}\n\nfunc (a *CloudformationStack) ForceStop() (bool, error) {\n\treturn false, nil\n}\n<commit_msg>update notification text for cloudformations<commit_after>package aws\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\thtmlTemplate \"html\/template\"\n\t\"net\/mail\"\n\t\"net\/url\"\n\ttextTemplate \"text\/template\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/cloudformation\"\n\n\t\"github.com\/mozilla-services\/reaper\/filters\"\n\t\"github.com\/mozilla-services\/reaper\/reapable\"\n\tlog \"github.com\/mozilla-services\/reaper\/reaperlog\"\n\t\"github.com\/mozilla-services\/reaper\/state\"\n)\n\ntype CloudformationStack struct {\n\tAWSResource\n\tcloudformation.Stack\n}\n\nfunc NewCloudformationStack(region string, stack *cloudformation.Stack) *CloudformationStack {\n\ta := CloudformationStack{\n\t\tAWSResource: AWSResource{\n\t\t\tRegion: reapable.Region(region),\n\t\t\tID: reapable.ID(*stack.StackID),\n\t\t\tName: *stack.StackName,\n\t\t\tTags: make(map[string]string),\n\t\t\treaperState: state.NewStateWithUntil(time.Now().Add(config.Notifications.FirstStateDuration.Duration)),\n\t\t},\n\t\tStack: *stack,\n\t}\n\n\tfor i := 0; i < len(stack.Tags); i++ {\n\t\ta.AWSResource.Tags[*stack.Tags[i].Key] = *stack.Tags[i].Value\n\t}\n\n\tif a.Tagged(reaperTag) {\n\t\t\/\/ restore previously tagged state\n\t\ta.reaperState = state.NewStateWithTag(a.AWSResource.Tags[reaperTag])\n\t} else {\n\t\t\/\/ initial state\n\t\ta.reaperState = state.NewStateWithUntilAndState(\n\t\t\ttime.Now().Add(config.Notifications.FirstStateDuration.Duration),\n\t\t\tstate.FirstState)\n\t}\n\n\treturn &a\n}\n\nfunc (a *CloudformationStack) reapableEventHTML(text string) *bytes.Buffer {\n\tt := htmlTemplate.Must(htmlTemplate.New(\"reapable\").Parse(text))\n\tbuf := bytes.NewBuffer(nil)\n\n\tdata, err := a.getTemplateData()\n\terr = t.Execute(buf, data)\n\tif err != nil {\n\t\tlog.Debug(fmt.Sprintf(\"Template generation error: %s\", err))\n\t}\n\treturn buf\n}\n\nfunc (a *CloudformationStack) reapableEventText(text string) *bytes.Buffer {\n\tt := textTemplate.Must(textTemplate.New(\"reapable\").Parse(text))\n\tbuf := bytes.NewBuffer(nil)\n\n\tdata, err := a.getTemplateData()\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"%s\", err.Error()))\n\t}\n\terr = t.Execute(buf, data)\n\tif err != nil {\n\t\tlog.Debug(fmt.Sprintf(\"Template generation error: %s\", err))\n\t}\n\treturn buf\n}\n\nfunc (a *CloudformationStack) ReapableEventText() *bytes.Buffer {\n\treturn a.reapableEventText(reapableCloudformationEventText)\n}\n\nfunc (a *CloudformationStack) ReapableEventTextShort() *bytes.Buffer {\n\treturn a.reapableEventText(reapableCloudformationEventTextShort)\n}\n\nfunc (a *CloudformationStack) ReapableEventEmail() (owner mail.Address, subject string, body string, err error) {\n\t\/\/ if unowned, return unowned error\n\tif !a.Owned() {\n\t\terr = reapable.UnownedError{fmt.Sprintf(\"%s does not have an owner tag\", a.ReapableDescriptionShort())}\n\t\treturn\n\t}\n\n\tsubject = fmt.Sprintf(\"AWS Resource %s is going to be Reaped!\", a.ReapableDescriptionTiny())\n\towner = *a.Owner()\n\tbody = a.reapableEventHTML(reapableCloudformationEventHTML).String()\n\treturn\n}\n\nfunc (a *CloudformationStack) ReapableEventEmailShort() (owner mail.Address, body string, err error) {\n\t\/\/ if unowned, return unowned error\n\tif !a.Owned() {\n\t\terr = reapable.UnownedError{fmt.Sprintf(\"%s does not have an owner tag\", a.ReapableDescriptionShort())}\n\t\treturn\n\t}\n\towner = *a.Owner()\n\tbody = a.reapableEventHTML(reapableCloudformationEventHTMLShort).String()\n\treturn\n}\n\ntype CloudformationStackEventData struct {\n\tConfig *AWSConfig\n\tCloudformationStack *CloudformationStack\n\tTerminateLink string\n\tStopLink string\n\tForceStopLink string\n\tWhitelistLink string\n\tIgnoreLink1 string\n\tIgnoreLink3 string\n\tIgnoreLink7 string\n}\n\nfunc (a *CloudformationStack) getTemplateData() (*CloudformationStackEventData, error) {\n\tignore1, err := MakeIgnoreLink(a.Region, a.ID, config.HTTP.TokenSecret, config.HTTP.ApiURL, time.Duration(1*24*time.Hour))\n\tignore3, err := MakeIgnoreLink(a.Region, a.ID, config.HTTP.TokenSecret, config.HTTP.ApiURL, time.Duration(3*24*time.Hour))\n\tignore7, err := MakeIgnoreLink(a.Region, a.ID, config.HTTP.TokenSecret, config.HTTP.ApiURL, time.Duration(7*24*time.Hour))\n\tterminate, err := MakeTerminateLink(a.Region, a.ID, config.HTTP.TokenSecret, config.HTTP.ApiURL)\n\tstop, err := MakeStopLink(a.Region, a.ID, config.HTTP.TokenSecret, config.HTTP.ApiURL)\n\tforcestop, err := MakeForceStopLink(a.Region, a.ID, config.HTTP.TokenSecret, config.HTTP.ApiURL)\n\twhitelist, err := MakeWhitelistLink(a.Region, a.ID, config.HTTP.TokenSecret, config.HTTP.ApiURL)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &CloudformationStackEventData{\n\t\tConfig: config,\n\t\tCloudformationStack: a,\n\t\tTerminateLink: terminate,\n\t\tStopLink: stop,\n\t\tForceStopLink: forcestop,\n\t\tWhitelistLink: whitelist,\n\t\tIgnoreLink1: ignore1,\n\t\tIgnoreLink3: ignore3,\n\t\tIgnoreLink7: ignore7,\n\t}, nil\n}\n\nconst reapableCloudformationEventHTML = `\n<html>\n<body>\n\t<p>Cloudformation <a href=\"{{ .CloudformationStack.AWSConsoleURL }}\">{{ if .CloudformationStack.Name }}\"{{.CloudformationStack.Name}}\" {{ end }} in {{.CloudformationStack.Region}}<\/a> is scheduled to be terminated.<\/p>\n\n\t<p>\n\t\tYou can ignore this message and your Cloudformation will advance to the next state after <strong>{{.CloudformationStack.ReaperState.Until}}<\/strong>. If you do not take action it will be terminated!\n\t<\/p>\n\n\t<p>\n\t\tYou may also choose to:\n\t\t<ul>\n\t\t\t<li><a href=\"{{ .TerminateLink }}\">Terminate it now<\/a><\/li>\n\t\t\t<li><a href=\"{{ .IgnoreLink1 }}\">Ignore it for 1 more day<\/a><\/li>\n\t\t\t<li><a href=\"{{ .IgnoreLink3 }}\">Ignore it for 3 more days<\/a><\/li>\n\t\t\t<li><a href=\"{{ .IgnoreLink7}}\">Ignore it for 7 more days<\/a><\/li>\n\t\t<\/ul>\n\t<\/p>\n\n\t<p>\n\t\tIf you want the Reaper to ignore this Cloudformation tag it with {{ .Config.WhitelistTag }} with any value, or click <a href=\"{{ .WhitelistLink }}\">here<\/a>.\n\t<\/p>\n<\/body>\n<\/html>\n`\n\nconst reapableCloudformationEventHTMLShort = `\n<html>\n<body>\n\t<p>Cloudformation <a href=\"{{ .CloudformationStack.AWSConsoleURL }}\">{{ if .CloudformationStack.Name }}\"{{.CloudformationStack.Name}}\" {{ end }}<\/a> in {{.CloudformationStack.Region}}<\/a> is scheduled to be terminated after <strong>{{.CloudformationStack.ReaperState.Until}}<\/strong>.\n\t\t<br \/>\n\t\t<a href=\"{{ .TerminateLink }}\">Terminate<\/a>, \n\t\t<a href=\"{{ .IgnoreLink1 }}\">Ignore it for 1 more day<\/a>, \n\t\t<a href=\"{{ .IgnoreLink3 }}\">3 days<\/a>, \n\t\t<a href=\"{{ .IgnoreLink7}}\"> 7 days<\/a>, or \n\t\t<a href=\"{{ .WhitelistLink }}\">Whitelist<\/a> it.\n\t<\/p>\n<\/body>\n<\/html>\n`\n\nconst reapableCloudformationEventTextShort = `%%%\nCloudformation [{{.CloudformationStack.ID}}]({{.CloudformationStack.AWSConsoleURL}}) in region: [{{.CloudformationStack.Region}}](https:\/\/{{.CloudformationStack.Region}}.console.aws.amazon.com\/ec2\/v2\/home?region={{.CloudformationStack.Region}}).{{if .CloudformationStack.Owned}} Owned by {{.CloudformationStack.Owner}}.\\n{{end}}\n[Whitelist]({{ .WhitelistLink }}), or [Terminate]({{ .TerminateLink }}) this Cloudformation.\n%%%`\n\nconst reapableCloudformationEventText = `%%%\nReaper has discovered a Cloudformation qualified as reapable: [{{.CloudformationStack.ID}}]({{.CloudformationStack.AWSConsoleURL}}) in region: [{{.CloudformationStack.Region}}](https:\/\/{{.CloudformationStack.Region}}.console.aws.amazon.com\/ec2\/v2\/home?region={{.CloudformationStack.Region}}).\\n\n{{if .CloudformationStack.Owned}}Owned by {{.CloudformationStack.Owner}}.\\n{{end}}\n{{ if .CloudformationStack.AWSConsoleURL}}{{.CloudformationStack.AWSConsoleURL}}\\n{{end}}\n[AWS Console URL]({{.CloudformationStack.AWSConsoleURL}})\\n\n[Whitelist]({{ .WhitelistLink }}) this Cloudformation.\n[Terminate]({{ .TerminateLink }}) this Cloudformation.\n%%%`\n\n\/\/ method for reapable -> overrides promoted AWSResource method of same name?\nfunc (a *CloudformationStack) Save(s *state.State) (bool, error) {\n\treturn a.tagReaperState(a.Region, a.ID, a.ReaperState())\n}\n\n\/\/ method for reapable -> overrides promoted AWSResource method of same name?\nfunc (a *CloudformationStack) Unsave() (bool, error) {\n\tlog.Notice(\"Unsaving %s\", a.ReapableDescriptionTiny())\n\treturn a.untagReaperState(a.Region, a.ID, a.ReaperState())\n}\n\nfunc (a *CloudformationStack) untagReaperState(region reapable.Region, id reapable.ID, newState *state.State) (bool, error) {\n\treturn false, nil\n}\n\nfunc (a *CloudformationStack) tagReaperState(region reapable.Region, id reapable.ID, newState *state.State) (bool, error) {\n\treturn false, nil\n}\n\nfunc (a *CloudformationStack) Filter(filter filters.Filter) bool {\n\tmatched := false\n\t\/\/ map function names to function calls\n\tswitch filter.Function {\n\tcase \"Status\":\n\t\tif *a.StackStatus == filter.Arguments[0] {\n\t\t\t\/\/ one of:\n\t\t\t\/\/ CREATE_COMPLETE\n\t\t\t\/\/ CREATE_IN_PROGRESS\n\t\t\t\/\/ CREATE_FAILED\n\t\t\t\/\/ DELETE_COMPLETE\n\t\t\t\/\/ DELETE_FAILED\n\t\t\t\/\/ DELETE_IN_PROGRESS\n\t\t\t\/\/ ROLLBACK_COMPLETE\n\t\t\t\/\/ ROLLBACK_FAILED\n\t\t\t\/\/ ROLLBACK_IN_PROGRESS\n\t\t\t\/\/ UPDATE_COMPLETE\n\t\t\t\/\/ UPDATE_COMPLETE_CLEANUP_IN_PROGRESS\n\t\t\t\/\/ UPDATE_IN_PROGRESS\n\t\t\t\/\/ UPDATE_ROLLBACK_COMPLETE\n\t\t\t\/\/ UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS\n\t\t\t\/\/ UPDATE_ROLLBACK_FAILED\n\t\t\t\/\/ UPDATE_ROLLBACK_IN_PROGRESS\n\t\t\tmatched = true\n\t\t}\n\tcase \"NotStatus\":\n\t\tif *a.StackStatus != filter.Arguments[0] {\n\t\t\tmatched = true\n\t\t}\n\tcase \"Tagged\":\n\t\tif a.Tagged(filter.Arguments[0]) {\n\t\t\tmatched = true\n\t\t}\n\tcase \"NotTagged\":\n\t\tif !a.Tagged(filter.Arguments[0]) {\n\t\t\tmatched = true\n\t\t}\n\tcase \"TagNotEqual\":\n\t\tif a.Tag(filter.Arguments[0]) != filter.Arguments[1] {\n\t\t\tmatched = true\n\t\t}\n\tcase \"CreatedTimeInTheLast\":\n\t\td, err := time.ParseDuration(filter.Arguments[0])\n\t\tif err == nil && time.Since(*a.CreationTime) < d {\n\t\t\tmatched = true\n\t\t}\n\tcase \"CreatedTimeNotInTheLast\":\n\t\td, err := time.ParseDuration(filter.Arguments[0])\n\t\tif err == nil && time.Since(*a.CreationTime) > d {\n\t\t\tmatched = true\n\t\t}\n\tdefault:\n\t\tlog.Error(fmt.Sprintf(\"No function %s could be found for filtering CloudformationStacks.\", filter.Function))\n\t}\n\treturn matched\n}\n\nfunc (a *CloudformationStack) AWSConsoleURL() *url.URL {\n\turl, err := url.Parse(fmt.Sprintf(\"https:\/\/%s.console.aws.amazon.com\/cloudformation\/home?region=%s#\/stacks?filter=active&tab=overview&stackId=%s\",\n\t\tstring(a.Region), string(a.Region), url.QueryEscape(string(a.ID))))\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"Error generating AWSConsoleURL. %s\", err))\n\t}\n\treturn url\n}\n\nfunc (a *CloudformationStack) Terminate() (bool, error) {\n\tlog.Notice(\"Terminating CloudformationStack %s\", a.ReapableDescriptionTiny())\n\tas := cloudformation.New(&aws.Config{Region: string(a.Region)})\n\n\tstringID := string(a.ID)\n\n\tinput := &cloudformation.DeleteStackInput{\n\t\tStackName: &stringID,\n\t}\n\t_, err := as.DeleteStack(input)\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"could not delete CloudformationStack %s\", a.ReapableDescriptionTiny()))\n\t\treturn false, err\n\t}\n\treturn false, nil\n}\n\nfunc (a *CloudformationStack) Stop() (bool, error) {\n\treturn false, nil\n}\n\nfunc (a *CloudformationStack) ForceStop() (bool, error) {\n\treturn false, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package response\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n)\n\ntype jsonResponse struct {\n\tName string `json:\"name\"`\n}\n\nfunc TestAsJSON(t *testing.T) {\n\th := AsJSON(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {\n\t\tWithPayload(r.Context(), jsonResponse{\"John\"})\n\t}))\n\n\tw := httptest.NewRecorder()\n\treq, err := http.NewRequest(\"GET\", \"\/x\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\th.ServeHTTP(w, req)\n\n\theader := w.Header()\n\tif header.Get(\"Content-Type\") != \"application\/json\" {\n\t\tt.Error(\"AsJSON did not set proper headers\")\n\t}\n\n\tcmp := bytes.Compare(w.Body.Bytes(), append([]byte(`{\"name\":\"John\"}`), 10))\n\tif cmp != 0 {\n\t\tt.Errorf(\"AsJSON returned wrong body: %s | %d\", w.Body.String(), cmp)\n\t}\n}\n\nfunc TestErrorAsJSON(t *testing.T) {\n\th := AsJSON(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {\n\t\tWithError(r.Context(), HTTPError{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tError: errors.New(\"response error\"),\n\t\t\tMessage: \"Invalid request\",\n\t\t})\n\t}))\n\n\tw := httptest.NewRecorder()\n\treq, err := http.NewRequest(\"GET\", \"\/x\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\th.ServeHTTP(w, req)\n\n\theader := w.Header()\n\tif header.Get(\"Content-Type\") != \"application\/json\" {\n\t\tt.Error(\"AsJSON did not set proper headers\")\n\t}\n\n\tif w.Code != http.StatusBadRequest {\n\t\tt.Errorf(\"AsJSON error code not handled %d\", w.Code)\n\t}\n}\n\nfunc TestErrorPointerAsJSON(t *testing.T) {\n\th := AsJSON(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {\n\t\tWithPayload(r.Context(), &HTTPError{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tError: errors.New(\"response error\"),\n\t\t\tMessage: \"Invalid request\",\n\t\t})\n\t}))\n\n\tw := httptest.NewRecorder()\n\treq, err := http.NewRequest(\"GET\", \"\/x\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\th.ServeHTTP(w, req)\n\n\theader := w.Header()\n\tif header.Get(\"Content-Type\") != \"application\/json\" {\n\t\tt.Error(\"AsJSON did not set proper headers\")\n\t}\n\n\tif w.Code != http.StatusBadRequest {\n\t\tt.Errorf(\"AsJSON error code not handled %d\", w.Code)\n\t}\n}\n\nfunc TestInvalidPayloadAsJSON(t *testing.T) {\n\th := AsJSON(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {\n\t\tWithPayload(r.Context(), nil)\n\t}))\n\n\tw := httptest.NewRecorder()\n\treq, err := http.NewRequest(\"GET\", \"\/x\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\th.ServeHTTP(w, req)\n\n\theader := w.Header()\n\tif header.Get(\"Content-Type\") != \"application\/json\" {\n\t\tt.Error(\"AsJSON did not set proper headers\")\n\t}\n\n\tif w.Code != http.StatusInternalServerError {\n\t\tt.Errorf(\"AsJSON error code not handled %d\", w.Code)\n\t}\n}\n<commit_msg>Comment out unfinished test<commit_after>package response\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n)\n\ntype jsonResponse struct {\n\tName string `json:\"name\"`\n}\n\nfunc TestAsJSON(t *testing.T) {\n\th := AsJSON(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {\n\t\tWithPayload(r.Context(), jsonResponse{\"John\"})\n\t}))\n\n\tw := httptest.NewRecorder()\n\treq, err := http.NewRequest(\"GET\", \"\/x\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\th.ServeHTTP(w, req)\n\n\theader := w.Header()\n\tif header.Get(\"Content-Type\") != \"application\/json\" {\n\t\tt.Error(\"AsJSON did not set proper headers\")\n\t}\n\n\tcmp := bytes.Compare(w.Body.Bytes(), append([]byte(`{\"name\":\"John\"}`), 10))\n\tif cmp != 0 {\n\t\tt.Errorf(\"AsJSON returned wrong body: %s | %d\", w.Body.String(), cmp)\n\t}\n}\n\nfunc TestErrorAsJSON(t *testing.T) {\n\th := AsJSON(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {\n\t\tWithError(r.Context(), HTTPError{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tError: errors.New(\"response error\"),\n\t\t\tMessage: \"Invalid request\",\n\t\t})\n\t}))\n\n\tw := httptest.NewRecorder()\n\treq, err := http.NewRequest(\"GET\", \"\/x\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\th.ServeHTTP(w, req)\n\n\theader := w.Header()\n\tif header.Get(\"Content-Type\") != \"application\/json\" {\n\t\tt.Error(\"AsJSON did not set proper headers\")\n\t}\n\n\tif w.Code != http.StatusBadRequest {\n\t\tt.Errorf(\"AsJSON error code not handled %d\", w.Code)\n\t}\n}\n\nfunc TestErrorPointerAsJSON(t *testing.T) {\n\th := AsJSON(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {\n\t\tWithPayload(r.Context(), &HTTPError{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tError: errors.New(\"response error\"),\n\t\t\tMessage: \"Invalid request\",\n\t\t})\n\t}))\n\n\tw := httptest.NewRecorder()\n\treq, err := http.NewRequest(\"GET\", \"\/x\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\th.ServeHTTP(w, req)\n\n\theader := w.Header()\n\tif header.Get(\"Content-Type\") != \"application\/json\" {\n\t\tt.Error(\"AsJSON did not set proper headers\")\n\t}\n\n\tif w.Code != http.StatusBadRequest {\n\t\tt.Errorf(\"AsJSON error code not handled %d\", w.Code)\n\t}\n}\n\n\/\/ func TestInvalidPayloadAsJSON(t *testing.T) {\n\/\/ \th := AsJSON(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {\n\/\/ \t\tWithPayload(r.Context(), nil)\n\/\/ \t}))\n\n\/\/ \tw := httptest.NewRecorder()\n\/\/ \treq, err := http.NewRequest(\"GET\", \"\/x\", nil)\n\/\/ \tif err != nil {\n\/\/ \t\tt.Fatal(err)\n\/\/ \t}\n\n\/\/ \th.ServeHTTP(w, req)\n\n\/\/ \theader := w.Header()\n\/\/ \tif header.Get(\"Content-Type\") != \"application\/json\" {\n\/\/ \t\tt.Error(\"AsJSON did not set proper headers\")\n\/\/ \t}\n\n\/\/ \tif w.Code != http.StatusInternalServerError {\n\/\/ \t\tt.Errorf(\"AsJSON error code not handled %d\", w.Code)\n\/\/ \t}\n\/\/ }\n<|endoftext|>"} {"text":"<commit_before>package tracer\n\nimport (\n\t\"context\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/opentracing\/opentracing-go\"\n\t\"github.com\/opentracing\/opentracing-go\/log\"\n)\n\nvar DebugEnabled = int64(0)\n\nvar (\n\tNopSegment Segment = nopSegment{}\n)\n\nfunc SetDebug(enabled bool) {\n\tif enabled {\n\t\tatomic.StoreInt64(&DebugEnabled, 1)\n\t} else {\n\t\tatomic.StoreInt64(&DebugEnabled, 0)\n\t}\n}\n\ntype logger interface {\n\tInfo(msg string, fields ...log.Field)\n\tDebug(msg string, fields ...log.Field)\n}\n\ntype Segment interface {\n\tFinish()\n\tLogFields(fields ...log.Field)\n\tSetBaggageItem(key, value string)\n\tInfo(msg string, fields ...log.Field)\n\tDebug(msg string, fields ...log.Field)\n}\n\ntype segment struct {\n\tspan opentracing.Span\n\trecords []opentracing.LogRecord\n}\n\nfunc (s *segment) Finish() {\n\tif s == nil {\n\t\treturn\n\t}\n\n\ts.span.FinishWithOptions(opentracing.FinishOptions{\n\t\tFinishTime: time.Now(),\n\t\tLogRecords: s.records,\n\t})\n}\n\nfunc (s *segment) LogFields(fields ...log.Field) {\n\tif s == nil {\n\t\treturn\n\t}\n\n\ts.span.LogFields(fields...)\n}\n\nfunc (s *segment) SetBaggageItem(key, value string) {\n\tif s == nil {\n\t\treturn\n\t}\n\n\ts.span.SetBaggageItem(key, value)\n}\n\nfunc (s *segment) Info(msg string, fields ...log.Field) {\n\tif s == nil {\n\t\treturn\n\t}\n\n\tif v, ok := s.span.(logger); ok {\n\t\tv.Info(msg, fields...)\n\t}\n}\n\nfunc (s *segment) Debug(msg string, fields ...log.Field) {\n\tif s == nil {\n\t\treturn\n\t}\n\n\tif v, ok := s.span.(logger); ok {\n\t\tv.Debug(msg, fields...)\n\t}\n}\n\nfunc Caller(key string, skip int) log.Field {\n\t_, file, line, _ := runtime.Caller(skip)\n\treturn log.String(key, filepath.Base(filepath.Dir(file))+\"\/\"+filepath.Base(file)+\":\"+strconv.Itoa(line))\n}\n\nfunc NewSegment(ctx context.Context, operationName string, fields ...log.Field) (Segment, context.Context) {\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, operationName)\n\tspan.LogFields(fields...)\n\treturn &segment{span: span}, ctx\n}\n\n\/\/ SegmentFromContext returns an existing segment from the context\nfunc SegmentFromContext(ctx context.Context) Segment {\n\tspan := opentracing.SpanFromContext(ctx)\n\treturn &segment{span: span}\n}\n\ntype nopSegment struct{}\n\nfunc (nopSegment) Finish() {}\nfunc (nopSegment) LogFields(fields ...log.Field) {}\nfunc (nopSegment) SetBaggageItem(key, value string) {}\nfunc (nopSegment) Info(msg string, fields ...log.Field) {}\nfunc (nopSegment) Debug(msg string, fields ...log.Field) {}\n<commit_msg>- segment now handles nil underlying target<commit_after>package tracer\n\nimport (\n\t\"context\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/opentracing\/opentracing-go\"\n\t\"github.com\/opentracing\/opentracing-go\/log\"\n)\n\nvar DebugEnabled = int64(0)\n\nvar (\n\tNopSegment Segment = nopSegment{}\n)\n\nfunc SetDebug(enabled bool) {\n\tif enabled {\n\t\tatomic.StoreInt64(&DebugEnabled, 1)\n\t} else {\n\t\tatomic.StoreInt64(&DebugEnabled, 0)\n\t}\n}\n\ntype logger interface {\n\tInfo(msg string, fields ...log.Field)\n\tDebug(msg string, fields ...log.Field)\n}\n\ntype Segment interface {\n\tFinish()\n\tLogFields(fields ...log.Field)\n\tSetBaggageItem(key, value string)\n\tInfo(msg string, fields ...log.Field)\n\tDebug(msg string, fields ...log.Field)\n}\n\ntype segment struct {\n\tspan opentracing.Span\n\trecords []opentracing.LogRecord\n}\n\nfunc (s *segment) Finish() {\n\tif s == nil || s.span == nil {\n\t\treturn\n\t}\n\n\ts.span.FinishWithOptions(opentracing.FinishOptions{\n\t\tFinishTime: time.Now(),\n\t\tLogRecords: s.records,\n\t})\n}\n\nfunc (s *segment) LogFields(fields ...log.Field) {\n\tif s == nil || s.span == nil {\n\t\treturn\n\t}\n\n\ts.span.LogFields(fields...)\n}\n\nfunc (s *segment) SetBaggageItem(key, value string) {\n\tif s == nil || s.span == nil {\n\t\treturn\n\t}\n\n\ts.span.SetBaggageItem(key, value)\n}\n\nfunc (s *segment) Info(msg string, fields ...log.Field) {\n\tif s == nil || s.span == nil {\n\t\treturn\n\t}\n\n\tif v, ok := s.span.(logger); ok {\n\t\tv.Info(msg, fields...)\n\t}\n}\n\nfunc (s *segment) Debug(msg string, fields ...log.Field) {\n\tif s == nil || s.span == nil {\n\t\treturn\n\t}\n\n\tif v, ok := s.span.(logger); ok {\n\t\tv.Debug(msg, fields...)\n\t}\n}\n\nfunc Caller(key string, skip int) log.Field {\n\t_, file, line, _ := runtime.Caller(skip)\n\treturn log.String(key, filepath.Base(filepath.Dir(file))+\"\/\"+filepath.Base(file)+\":\"+strconv.Itoa(line))\n}\n\nfunc NewSegment(ctx context.Context, operationName string, fields ...log.Field) (Segment, context.Context) {\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, operationName)\n\tspan.LogFields(fields...)\n\treturn &segment{span: span}, ctx\n}\n\n\/\/ SegmentFromContext returns an existing segment from the context\nfunc SegmentFromContext(ctx context.Context) Segment {\n\tspan := opentracing.SpanFromContext(ctx)\n\treturn &segment{span: span}\n}\n\ntype nopSegment struct{}\n\nfunc (nopSegment) Finish() {}\nfunc (nopSegment) LogFields(fields ...log.Field) {}\nfunc (nopSegment) SetBaggageItem(key, value string) {}\nfunc (nopSegment) Info(msg string, fields ...log.Field) {}\nfunc (nopSegment) Debug(msg string, fields ...log.Field) {}\n<|endoftext|>"} {"text":"<commit_before>package secrets\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\ttestclient \"k8s.io\/client-go\/kubernetes\/fake\"\n)\n\nfunc TestClient_Create(t *testing.T) {\n\tcases := map[string]struct {\n\t\tName string\n\t\tOptions []CreateOption\n\t\tCreateErr error\n\t\tExpectErr error\n\t\tExpectNamespace string\n\t\tExpectStringData map[string]string\n\t\tExpectData map[string][]byte\n\t}{\n\t\t\"use default namespace by default\": {\n\t\t\tName: \"broker-secret\",\n\t\t\tOptions: []CreateOption{\n\t\t\t\tWithCreateStringData(map[string]string{\"username\": \"user\", \"password\": \"pass\"}),\n\t\t\t},\n\t\t\tExpectNamespace: \"default\",\n\t\t\tExpectStringData: map[string]string{\"username\": \"user\", \"password\": \"pass\"},\n\t\t},\n\t\t\"basicauth\": {\n\t\t\tName: \"broker-secret\",\n\t\t\tOptions: []CreateOption{\n\t\t\t\tWithCreateNamespace(\"supersecret\"),\n\t\t\t\tWithCreateStringData(map[string]string{\"username\": \"user\", \"password\": \"pass\"}),\n\t\t\t},\n\t\t\tExpectNamespace: \"supersecret\",\n\t\t\tExpectStringData: map[string]string{\"username\": \"user\", \"password\": \"pass\"},\n\t\t},\n\t\t\"error\": {\n\t\t\tName: \"broker-secret\",\n\t\t\tOptions: []CreateOption{\n\t\t\t\tWithCreateNamespace(\"default\"),\n\t\t\t\tWithCreateStringData(map[string]string{\"username\": \"user\", \"password\": \"pass\"}),\n\t\t\t},\n\t\t\tCreateErr: errors.New(\"some-error\"),\n\t\t\tExpectErr: errors.New(\"some-error\"),\n\t\t},\n\t\t\"non-string data\": {\n\t\t\tName: \"broker-secret\",\n\t\t\tOptions: []CreateOption{\n\t\t\t\tWithCreateNamespace(\"default\"),\n\t\t\t\tWithCreateData(map[string][]byte{\"username\": []byte(\"user\"), \"password\": []byte(\"pass\")}),\n\t\t\t},\n\t\t\tExpectData: map[string][]byte{\"username\": []byte(\"user\"), \"password\": []byte(\"pass\")},\n\t\t},\n\t}\n\n\tfor tn, tc := range cases {\n\t\tt.Run(tn, func(t *testing.T) {\n\n\t\t\tmockK8s := testclient.NewSimpleClientset()\n\t\t\tk8sFactory := func() (kubernetes.Interface, error) {\n\t\t\t\treturn mockK8s, tc.CreateErr\n\t\t\t}\n\n\t\t\tsecretsClient := NewClient(k8sFactory)\n\n\t\t\tactualErr := secretsClient.Create(tc.Name, tc.Options...)\n\t\t\tif tc.ExpectErr != nil || actualErr != nil {\n\t\t\t\tif fmt.Sprint(tc.ExpectErr) != fmt.Sprint(actualErr) {\n\t\t\t\t\tt.Fatalf(\"wanted err: %v, got: %v\", tc.ExpectErr, actualErr)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tsecret, err := mockK8s.CoreV1().Secrets(tc.ExpectNamespace).Get(tc.Name, v1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tif !reflect.DeepEqual(secret.StringData, tc.ExpectStringData) {\n\t\t\t\tt.Errorf(\"Expected StringData %#v got: %#v\", tc.ExpectStringData, secret.StringData)\n\t\t\t}\n\n\t\t\tif !reflect.DeepEqual(secret.Data, tc.ExpectData) {\n\t\t\t\tt.Errorf(\"Expected data %#v got: %#v\", tc.ExpectData, secret.Data)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestClient_Get(t *testing.T) {\n\tdummySecret := map[string][]byte{\n\t\t\"foo\": []byte(\"foo\"),\n\t}\n\n\tcases := map[string]struct {\n\t\tName string\n\t\tOptions []GetOption\n\t\tFactoryErr error\n\t\tExpectErr error\n\t\tExpectSecrets map[string][]byte\n\t\tSetup func(mockK8s kubernetes.Interface)\n\t}{\n\t\t\"uses default namespace\": {\n\t\t\tName: \"some-secret\",\n\t\t\tOptions: []GetOption{},\n\t\t\tExpectSecrets: dummySecret,\n\t\t\tSetup: func(mockK8s kubernetes.Interface) {\n\t\t\t\tsecret := &corev1.Secret{Data: dummySecret}\n\t\t\t\tsecret.Name = \"some-secret\"\n\t\t\t\tmockK8s.CoreV1().Secrets(\"default\").Create(secret)\n\t\t\t},\n\t\t},\n\t\t\"custom namespace\": {\n\t\t\tName: \"some-secret\",\n\t\t\tOptions: []GetOption{\n\t\t\t\tWithGetNamespace(\"custom-namespace\"),\n\t\t\t},\n\t\t\tExpectSecrets: dummySecret,\n\t\t\tSetup: func(mockK8s kubernetes.Interface) {\n\t\t\t\tsecret := &corev1.Secret{Data: dummySecret}\n\t\t\t\tsecret.Name = \"some-secret\"\n\t\t\t\tmockK8s.CoreV1().Secrets(\"custom-namespace\").Create(secret)\n\t\t\t},\n\t\t},\n\t\t\"secret does not exist\": {\n\t\t\tName: \"some-secret\",\n\t\t\tOptions: []GetOption{},\n\t\t\tExpectErr: errors.New(`secrets \"some-secret\" not found`),\n\t\t},\n\t\t\"client creation error\": {\n\t\t\tName: \"some-secret\",\n\t\t\tOptions: []GetOption{},\n\t\t\tFactoryErr: errors.New(`some-error`),\n\t\t\tExpectErr: errors.New(`some-error`),\n\t\t},\n\t}\n\n\tfor tn, tc := range cases {\n\t\tt.Run(tn, func(t *testing.T) {\n\n\t\t\tmockK8s := testclient.NewSimpleClientset()\n\t\t\tk8sFactory := func() (kubernetes.Interface, error) {\n\t\t\t\treturn mockK8s, tc.FactoryErr\n\t\t\t}\n\n\t\t\tif tc.Setup != nil {\n\t\t\t\ttc.Setup(mockK8s)\n\t\t\t}\n\n\t\t\tsecretsClient := NewClient(k8sFactory)\n\t\t\tactualSecrets, actualErr := secretsClient.Get(tc.Name, tc.Options...)\n\t\t\tif tc.ExpectErr != nil || actualErr != nil {\n\t\t\t\tif fmt.Sprint(tc.ExpectErr) != fmt.Sprint(actualErr) {\n\t\t\t\t\tt.Fatalf(\"wanted err: %v, got: %v\", tc.ExpectErr, actualErr)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !reflect.DeepEqual(tc.ExpectSecrets, actualSecrets) {\n\t\t\t\tt.Errorf(\"wanted secrets: %v, got: %v\", tc.ExpectSecrets, actualSecrets)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestClient_Delete(t *testing.T) {\n\tcases := map[string]struct {\n\t\tName string\n\t\tOptions []DeleteOption\n\t\tDeleteErr error\n\t\tExpectErr error\n\t\tSetup func(mockK8s kubernetes.Interface)\n\t}{\n\t\t\"secret does not exist\": {\n\t\t\tName: \"some-secret\",\n\t\t\tOptions: []DeleteOption{\n\t\t\t\tWithDeleteNamespace(\"default\"),\n\t\t\t},\n\t\t\tExpectErr: errors.New(`secrets \"some-secret\" not found`),\n\t\t},\n\t\t\"client creation error\": {\n\t\t\tName: \"some-secret\",\n\t\t\tOptions: []DeleteOption{\n\t\t\t\tWithDeleteNamespace(\"default\"),\n\t\t\t},\n\t\t\tDeleteErr: errors.New(`some-error`),\n\t\t\tExpectErr: errors.New(`some-error`),\n\t\t},\n\t\t\"secret exists\": {\n\t\t\tName: \"some-secret\",\n\t\t\tOptions: []DeleteOption{\n\t\t\t\tWithDeleteNamespace(\"my-namespace\"),\n\t\t\t},\n\t\t\tSetup: func(mockK8s kubernetes.Interface) {\n\t\t\t\tsecret := &corev1.Secret{}\n\t\t\t\tsecret.Name = \"some-secret\"\n\t\t\t\tmockK8s.CoreV1().Secrets(\"my-namespace\").Create(secret)\n\t\t\t},\n\t\t},\n\t\t\"uses default namespace\": {\n\t\t\tName: \"some-secret\",\n\t\t\tOptions: []DeleteOption{},\n\t\t\tSetup: func(mockK8s kubernetes.Interface) {\n\t\t\t\tsecret := &corev1.Secret{}\n\t\t\t\tsecret.Name = \"some-secret\"\n\t\t\t\tmockK8s.CoreV1().Secrets(\"default\").Create(secret)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range cases {\n\t\tt.Run(tn, func(t *testing.T) {\n\n\t\t\tmockK8s := testclient.NewSimpleClientset()\n\t\t\tk8sFactory := func() (kubernetes.Interface, error) {\n\t\t\t\treturn mockK8s, tc.DeleteErr\n\t\t\t}\n\n\t\t\tif tc.Setup != nil {\n\t\t\t\ttc.Setup(mockK8s)\n\t\t\t}\n\n\t\t\tsecretsClient := NewClient(k8sFactory)\n\t\t\tactualErr := secretsClient.Delete(tc.Name, tc.Options...)\n\t\t\tif tc.ExpectErr != nil || actualErr != nil {\n\t\t\t\tif fmt.Sprint(tc.ExpectErr) != fmt.Sprint(actualErr) {\n\t\t\t\t\tt.Fatalf(\"wanted err: %v, got: %v\", tc.ExpectErr, actualErr)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tsecrets, err := mockK8s.CoreV1().Secrets(DeleteOptions(tc.Options).Namespace()).List(v1.ListOptions{})\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tfor _, s := range secrets.Items {\n\t\t\t\tif s.Name == tc.Name {\n\t\t\t\t\tt.Fatal(\"The secret wasn't deleted\")\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n<commit_msg>secrets: add t.Parallel() to each test<commit_after>package secrets\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\ttestclient \"k8s.io\/client-go\/kubernetes\/fake\"\n)\n\nfunc TestClient_Create(t *testing.T) {\n\tt.Parallel()\n\n\tcases := map[string]struct {\n\t\tName string\n\t\tOptions []CreateOption\n\t\tCreateErr error\n\t\tExpectErr error\n\t\tExpectNamespace string\n\t\tExpectStringData map[string]string\n\t\tExpectData map[string][]byte\n\t}{\n\t\t\"use default namespace by default\": {\n\t\t\tName: \"broker-secret\",\n\t\t\tOptions: []CreateOption{\n\t\t\t\tWithCreateStringData(map[string]string{\"username\": \"user\", \"password\": \"pass\"}),\n\t\t\t},\n\t\t\tExpectNamespace: \"default\",\n\t\t\tExpectStringData: map[string]string{\"username\": \"user\", \"password\": \"pass\"},\n\t\t},\n\t\t\"basicauth\": {\n\t\t\tName: \"broker-secret\",\n\t\t\tOptions: []CreateOption{\n\t\t\t\tWithCreateNamespace(\"supersecret\"),\n\t\t\t\tWithCreateStringData(map[string]string{\"username\": \"user\", \"password\": \"pass\"}),\n\t\t\t},\n\t\t\tExpectNamespace: \"supersecret\",\n\t\t\tExpectStringData: map[string]string{\"username\": \"user\", \"password\": \"pass\"},\n\t\t},\n\t\t\"error\": {\n\t\t\tName: \"broker-secret\",\n\t\t\tOptions: []CreateOption{\n\t\t\t\tWithCreateNamespace(\"default\"),\n\t\t\t\tWithCreateStringData(map[string]string{\"username\": \"user\", \"password\": \"pass\"}),\n\t\t\t},\n\t\t\tCreateErr: errors.New(\"some-error\"),\n\t\t\tExpectErr: errors.New(\"some-error\"),\n\t\t},\n\t\t\"non-string data\": {\n\t\t\tName: \"broker-secret\",\n\t\t\tOptions: []CreateOption{\n\t\t\t\tWithCreateNamespace(\"default\"),\n\t\t\t\tWithCreateData(map[string][]byte{\"username\": []byte(\"user\"), \"password\": []byte(\"pass\")}),\n\t\t\t},\n\t\t\tExpectData: map[string][]byte{\"username\": []byte(\"user\"), \"password\": []byte(\"pass\")},\n\t\t},\n\t}\n\n\tfor tn, tc := range cases {\n\t\tt.Run(tn, func(t *testing.T) {\n\n\t\t\tmockK8s := testclient.NewSimpleClientset()\n\t\t\tk8sFactory := func() (kubernetes.Interface, error) {\n\t\t\t\treturn mockK8s, tc.CreateErr\n\t\t\t}\n\n\t\t\tsecretsClient := NewClient(k8sFactory)\n\n\t\t\tactualErr := secretsClient.Create(tc.Name, tc.Options...)\n\t\t\tif tc.ExpectErr != nil || actualErr != nil {\n\t\t\t\tif fmt.Sprint(tc.ExpectErr) != fmt.Sprint(actualErr) {\n\t\t\t\t\tt.Fatalf(\"wanted err: %v, got: %v\", tc.ExpectErr, actualErr)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tsecret, err := mockK8s.CoreV1().Secrets(tc.ExpectNamespace).Get(tc.Name, v1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tif !reflect.DeepEqual(secret.StringData, tc.ExpectStringData) {\n\t\t\t\tt.Errorf(\"Expected StringData %#v got: %#v\", tc.ExpectStringData, secret.StringData)\n\t\t\t}\n\n\t\t\tif !reflect.DeepEqual(secret.Data, tc.ExpectData) {\n\t\t\t\tt.Errorf(\"Expected data %#v got: %#v\", tc.ExpectData, secret.Data)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestClient_Get(t *testing.T) {\n\tt.Parallel()\n\n\tdummySecret := map[string][]byte{\n\t\t\"foo\": []byte(\"foo\"),\n\t}\n\n\tcases := map[string]struct {\n\t\tName string\n\t\tOptions []GetOption\n\t\tFactoryErr error\n\t\tExpectErr error\n\t\tExpectSecrets map[string][]byte\n\t\tSetup func(mockK8s kubernetes.Interface)\n\t}{\n\t\t\"uses default namespace\": {\n\t\t\tName: \"some-secret\",\n\t\t\tOptions: []GetOption{},\n\t\t\tExpectSecrets: dummySecret,\n\t\t\tSetup: func(mockK8s kubernetes.Interface) {\n\t\t\t\tsecret := &corev1.Secret{Data: dummySecret}\n\t\t\t\tsecret.Name = \"some-secret\"\n\t\t\t\tmockK8s.CoreV1().Secrets(\"default\").Create(secret)\n\t\t\t},\n\t\t},\n\t\t\"custom namespace\": {\n\t\t\tName: \"some-secret\",\n\t\t\tOptions: []GetOption{\n\t\t\t\tWithGetNamespace(\"custom-namespace\"),\n\t\t\t},\n\t\t\tExpectSecrets: dummySecret,\n\t\t\tSetup: func(mockK8s kubernetes.Interface) {\n\t\t\t\tsecret := &corev1.Secret{Data: dummySecret}\n\t\t\t\tsecret.Name = \"some-secret\"\n\t\t\t\tmockK8s.CoreV1().Secrets(\"custom-namespace\").Create(secret)\n\t\t\t},\n\t\t},\n\t\t\"secret does not exist\": {\n\t\t\tName: \"some-secret\",\n\t\t\tOptions: []GetOption{},\n\t\t\tExpectErr: errors.New(`secrets \"some-secret\" not found`),\n\t\t},\n\t\t\"client creation error\": {\n\t\t\tName: \"some-secret\",\n\t\t\tOptions: []GetOption{},\n\t\t\tFactoryErr: errors.New(`some-error`),\n\t\t\tExpectErr: errors.New(`some-error`),\n\t\t},\n\t}\n\n\tfor tn, tc := range cases {\n\t\tt.Run(tn, func(t *testing.T) {\n\n\t\t\tmockK8s := testclient.NewSimpleClientset()\n\t\t\tk8sFactory := func() (kubernetes.Interface, error) {\n\t\t\t\treturn mockK8s, tc.FactoryErr\n\t\t\t}\n\n\t\t\tif tc.Setup != nil {\n\t\t\t\ttc.Setup(mockK8s)\n\t\t\t}\n\n\t\t\tsecretsClient := NewClient(k8sFactory)\n\t\t\tactualSecrets, actualErr := secretsClient.Get(tc.Name, tc.Options...)\n\t\t\tif tc.ExpectErr != nil || actualErr != nil {\n\t\t\t\tif fmt.Sprint(tc.ExpectErr) != fmt.Sprint(actualErr) {\n\t\t\t\t\tt.Fatalf(\"wanted err: %v, got: %v\", tc.ExpectErr, actualErr)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !reflect.DeepEqual(tc.ExpectSecrets, actualSecrets) {\n\t\t\t\tt.Errorf(\"wanted secrets: %v, got: %v\", tc.ExpectSecrets, actualSecrets)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestClient_Delete(t *testing.T) {\n\tt.Parallel()\n\n\tcases := map[string]struct {\n\t\tName string\n\t\tOptions []DeleteOption\n\t\tDeleteErr error\n\t\tExpectErr error\n\t\tSetup func(mockK8s kubernetes.Interface)\n\t}{\n\t\t\"secret does not exist\": {\n\t\t\tName: \"some-secret\",\n\t\t\tOptions: []DeleteOption{\n\t\t\t\tWithDeleteNamespace(\"default\"),\n\t\t\t},\n\t\t\tExpectErr: errors.New(`secrets \"some-secret\" not found`),\n\t\t},\n\t\t\"client creation error\": {\n\t\t\tName: \"some-secret\",\n\t\t\tOptions: []DeleteOption{\n\t\t\t\tWithDeleteNamespace(\"default\"),\n\t\t\t},\n\t\t\tDeleteErr: errors.New(`some-error`),\n\t\t\tExpectErr: errors.New(`some-error`),\n\t\t},\n\t\t\"secret exists\": {\n\t\t\tName: \"some-secret\",\n\t\t\tOptions: []DeleteOption{\n\t\t\t\tWithDeleteNamespace(\"my-namespace\"),\n\t\t\t},\n\t\t\tSetup: func(mockK8s kubernetes.Interface) {\n\t\t\t\tsecret := &corev1.Secret{}\n\t\t\t\tsecret.Name = \"some-secret\"\n\t\t\t\tmockK8s.CoreV1().Secrets(\"my-namespace\").Create(secret)\n\t\t\t},\n\t\t},\n\t\t\"uses default namespace\": {\n\t\t\tName: \"some-secret\",\n\t\t\tOptions: []DeleteOption{},\n\t\t\tSetup: func(mockK8s kubernetes.Interface) {\n\t\t\t\tsecret := &corev1.Secret{}\n\t\t\t\tsecret.Name = \"some-secret\"\n\t\t\t\tmockK8s.CoreV1().Secrets(\"default\").Create(secret)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range cases {\n\t\tt.Run(tn, func(t *testing.T) {\n\n\t\t\tmockK8s := testclient.NewSimpleClientset()\n\t\t\tk8sFactory := func() (kubernetes.Interface, error) {\n\t\t\t\treturn mockK8s, tc.DeleteErr\n\t\t\t}\n\n\t\t\tif tc.Setup != nil {\n\t\t\t\ttc.Setup(mockK8s)\n\t\t\t}\n\n\t\t\tsecretsClient := NewClient(k8sFactory)\n\t\t\tactualErr := secretsClient.Delete(tc.Name, tc.Options...)\n\t\t\tif tc.ExpectErr != nil || actualErr != nil {\n\t\t\t\tif fmt.Sprint(tc.ExpectErr) != fmt.Sprint(actualErr) {\n\t\t\t\t\tt.Fatalf(\"wanted err: %v, got: %v\", tc.ExpectErr, actualErr)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tsecrets, err := mockK8s.CoreV1().Secrets(DeleteOptions(tc.Options).Namespace()).List(v1.ListOptions{})\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tfor _, s := range secrets.Items {\n\t\t\t\tif s.Name == tc.Name {\n\t\t\t\t\tt.Fatal(\"The secret wasn't deleted\")\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package apps\n\nimport (\n\t\"time\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/vito\/cmdtest\/matchers\"\n\n\t. \"github.com\/vito\/runtime-integration\/helpers\"\n)\n\nvar _ = Describe(\"An application printing a bunch of output\", func() {\n\tBeforeEach(func() {\n\t\tAppName = RandomName()\n\n\t\tExpect(Cf(\"push\", AppName, \"-p\", doraPath, \"-d\", IntegrationConfig.AppsDomain)).To(Say(\"Started\"))\n\t})\n\n\tAfterEach(func() {\n\t\tExpect(Cf(\"delete\", AppName, \"-f\")).To(Say(\"OK\"))\n\t})\n\n\tIt(\"doesn't die when printing 32MB\", func() {\n\t\tbeforeId := Curl(AppUri(\"\/id\")).FullOutput()\n\n\t\tExpect(Curl(AppUri(\"\/logspew\/33554432\"))).To(\n\t\t\tSay(\"Just wrote 33554432 random bytes to the log\"),\n\t\t)\n\n\t\t\/\/ Give time for components (i.e. Warden) to react to the output\n\t\t\/\/ and potentially make bad decisions (like killing the app)\n\t\ttime.Sleep(10 * time.Second)\n\n\t\tafterId := Curl(AppUri(\"\/id\")).FullOutput()\n\n\t\tExpect(beforeId).To(Equal(afterId))\n\n\t\tExpect(Curl(AppUri(\"\/logspew\/2\"))).To(\n\t\t\tSay(\"Just wrote 2 random bytes to the log\"),\n\t\t)\n\t})\n})\n<commit_msg>stringify ids to improve failure message<commit_after>package apps\n\nimport (\n\t\"time\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/vito\/cmdtest\/matchers\"\n\n\t. \"github.com\/vito\/runtime-integration\/helpers\"\n)\n\nvar _ = Describe(\"An application printing a bunch of output\", func() {\n\tBeforeEach(func() {\n\t\tAppName = RandomName()\n\n\t\tExpect(Cf(\"push\", AppName, \"-p\", doraPath, \"-d\", IntegrationConfig.AppsDomain)).To(Say(\"Started\"))\n\t})\n\n\tAfterEach(func() {\n\t\tExpect(Cf(\"delete\", AppName, \"-f\")).To(Say(\"OK\"))\n\t})\n\n\tIt(\"doesn't die when printing 32MB\", func() {\n\t\tbeforeId := string(Curl(AppUri(\"\/id\")).FullOutput())\n\n\t\tExpect(Curl(AppUri(\"\/logspew\/33554432\"))).To(\n\t\t\tSay(\"Just wrote 33554432 random bytes to the log\"),\n\t\t)\n\n\t\t\/\/ Give time for components (i.e. Warden) to react to the output\n\t\t\/\/ and potentially make bad decisions (like killing the app)\n\t\ttime.Sleep(10 * time.Second)\n\n\t\tafterId := string(Curl(AppUri(\"\/id\")).FullOutput())\n\n\t\tExpect(beforeId).To(Equal(afterId))\n\n\t\tExpect(Curl(AppUri(\"\/logspew\/2\"))).To(\n\t\t\tSay(\"Just wrote 2 random bytes to the log\"),\n\t\t)\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package sorting\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n)\n\nfunc DescribeNumber(f float64) string {\n\treturn fmt.Sprintf(\"This is the number %.1f\", f)\n}\n\ntype NumberBox interface {\n\tNumber() int\n}\n\nfunc DescribeNumberBox(nb NumberBox) string {\n\treturn fmt.Sprintf(\"This is a box containing the number %.1f\", float64(nb.Number()))\n}\n\ntype FancyNumber struct {\n\tn string\n}\n\nfunc (i FancyNumber) Value() string {\n\treturn i.n\n}\n\ntype FancyNumberBox interface {\n\tValue() string\n}\n\nfunc ExtractFancyNumber(fnb FancyNumberBox) int {\n\tresult, err := strconv.Atoi(fnb.Value())\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn result\n}\n\n\/\/ DescribeFancyNumberBox should return a string describing the FancyNumberBox.\nfunc DescribeFancyNumberBox(fnb FancyNumberBox) string {\n\tpanic(\"Please implement DescribeFancyNumberBox\")\n}\n\n\/\/ DescribeAnything should return a string describing whatever it contains.\nfunc DescribeAnything(i interface{}) string {\n\tpanic(\"Please implement DescribeAnything\")\n}\n<commit_msg>Implement DescribeFancyNumberBox<commit_after>package sorting\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n)\n\nfunc DescribeNumber(f float64) string {\n\treturn fmt.Sprintf(\"This is the number %.1f\", f)\n}\n\ntype NumberBox interface {\n\tNumber() int\n}\n\nfunc DescribeNumberBox(nb NumberBox) string {\n\treturn fmt.Sprintf(\"This is a box containing the number %.1f\", float64(nb.Number()))\n}\n\ntype FancyNumber struct {\n\tn string\n}\n\nfunc (i FancyNumber) Value() string {\n\treturn i.n\n}\n\ntype FancyNumberBox interface {\n\tValue() string\n}\n\nfunc ExtractFancyNumber(fnb FancyNumberBox) int {\n\tresult, err := strconv.Atoi(fnb.Value())\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn result\n}\n\nfunc DescribeFancyNumberBox(fnb FancyNumberBox) string {\n\treturn fmt.Sprintf(\"This is a fancy box containing the number %.1f\", float64(ExtractFancyNumber(fnb)))\n}\n\n\/\/ DescribeAnything should return a string describing whatever it contains.\nfunc DescribeAnything(i interface{}) string {\n\tpanic(\"Please implement DescribeAnything\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/* Copyright 2017 Google Inc.\n * https:\/\/github.com\/NeilFraser\/CodeCity\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage data\n\n\/\/ Object represents any JavaScript value (primitive, object, etc.).\ntype Object interface {\n\t\/\/\/ Any Object is a valid Value.\n\tValue\n\n\t\/\/ Proto returns the prototype (parent) object for this object.\n\t\/\/ N.B. this is the value returned by Object.getPrototypeOf(foo)\n\t\/\/ (and by foo.__proto__, if not overridden); it is unrelated to\n\t\/\/ the .prototype property on Functions.\n\tProto() Object\n\n\t\/\/ Get returns the current value of the given property or an\n\t\/\/ NativeError if that was not possible.\n\tGet(key string) (Value, *NativeError)\n\n\t\/\/ Set sets the given property to the specified value or returns\n\t\/\/ an NativeError if that was not possible.\n\tSet(key string, value Value) *NativeError\n\n\t\/\/ Delete attempts to remove the specified property. If the\n\t\/\/ property exists but can't be removed for some reason an\n\t\/\/ NativeError is returned. (Removing a non-existing property\n\t\/\/ \"succeeds\" silently.)\n\tDelete(key string) *NativeError\n\n\t\/\/ OwnPropertyKeys returns the list of (own) property keys as a\n\t\/\/ slice of strings.\n\tOwnPropertyKeys() []string\n\n\t\/\/ HasOwnProperty returns true if the specified property key\n\t\/\/ exists on the object itself.\n\tHasOwnProperty(string) bool\n\n\t\/\/ HasProperty returns true if the specified property key\n\t\/\/ exists on the object or its prototype chain.\n\tHasProperty(string) bool\n}\n\n\/\/ object represents typical plain old JavaScript objects with\n\/\/ prototype, properties, etc.; this struct is also embedded in other,\n\/\/ less-plain object types like Array.\ntype object struct {\n\towner *Owner\n\tproto Object\n\tproperties map[string]Property\n\tf bool\n}\n\n\/\/ Property is a property descriptor, per §8.10 of ES5.1\n\/\/ Value: The actual value of the property.\n\/\/ Owner: Who owns the property (has permission to write it)?\n\/\/ W: Is the property writeable?\n\/\/ E: Is the property enumerable?\n\/\/ C: Is the property configurable?\n\/\/ R: Is the property world-readable?\n\/\/ I: Is the property ownership inherited on children?\ntype Property struct {\n\tValue Value\n\tOwner *Owner\n\tW, E, C bool\n\tR, I bool\n}\n\n\/\/ *object must satisfy Object.\nvar _ Object = (*object)(nil)\n\n\/\/ Type always returns OBJECT for regular objects.\nfunc (object) Type() Type {\n\treturn OBJECT\n}\n\n\/\/ Typeof always returns \"object\" for regular objects.\nfunc (object) Typeof() string {\n\treturn \"object\"\n}\n\n\/\/ IsPrimitive always returns false for regular objects.\nfunc (object) IsPrimitive() bool {\n\treturn false\n}\n\n\/\/ Proto returns the prototype (parent) object for this object.\nfunc (obj object) Proto() Object {\n\treturn obj.proto\n}\n\n\/\/ Get returns the current value of the given property or an\n\/\/ NativeError if that was not possible.\nfunc (obj object) Get(key string) (Value, *NativeError) {\n\tpd, ok := obj.properties[key]\n\t\/\/ FIXME: permissions check for property readability goes here\n\tif ok {\n\t\treturn pd.Value, nil\n\t}\n\t\/\/ Try the prototype?\n\tproto := obj.Proto()\n\tif proto != nil {\n\t\treturn proto.Get(key)\n\t}\n\treturn Undefined{}, nil\n}\n\n\/\/ Set sets the given property to the specified value or returns an\n\/\/ NativeError if that was not possible.\nfunc (obj *object) Set(key string, value Value) *NativeError {\n\tpd, ok := obj.properties[key]\n\tif !ok { \/\/ Creating new property\n\t\t\/\/ FIXME: permissions check for object writability goes here\n\t\tobj.properties[key] = Property{\n\t\t\tValue: value,\n\t\t\tOwner: obj.owner, \/\/ FIXME: should this be caller?\n\t\t\tW: true,\n\t\t\tE: true,\n\t\t\tC: true,\n\t\t\tR: true,\n\t\t\tI: false,\n\t\t}\n\t\treturn nil\n\t}\n\t\/\/ Updating existing property\n\t\/\/ FIXME: permissions check for property writeability goes here\n\t\/\/ FIXME: recurse if necessary\n\tpd.Value = value\n\tobj.properties[key] = pd\n\treturn nil\n}\n\n\/\/ Delete removes the specified property if possible.\n\/\/\n\/\/ FIXME: perm \/ immutability checks!\nfunc (obj *object) Delete(key string) *NativeError {\n\tdelete(obj.properties, key)\n\treturn nil\n}\n\n\/\/ OwnPropertyKeys returns the list of (own) property keys as a slice\n\/\/ of strings.\nfunc (obj *object) OwnPropertyKeys() []string {\n\tkeys := make([]string, len(obj.properties))\n\ti := 0\n\tfor k := range obj.properties {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\treturn keys\n}\n\n\/\/ HasOwnProperty returns true if the specified property key exists on\n\/\/ the object itself.\nfunc (obj *object) HasOwnProperty(key string) bool {\n\t_, exists := obj.properties[key]\n\treturn exists\n}\n\n\/\/ HasProperty returns true if the specified property key exists on\n\/\/ the object or its prototype chain.\nfunc (obj *object) HasProperty(key string) bool {\n\treturn obj.HasOwnProperty(key) ||\n\t\tobj.proto != nil && obj.proto.HasProperty(key)\n}\n\n\/\/ ToBoolean always returns true for regular objects.\nfunc (object) ToBoolean() Boolean {\n\treturn true\n}\n\n\/\/ ToNumber returns the numeric equivalent of the object.\n\/\/\n\/\/ BUG(cpcallen): object.ToNumber is not strictly compliant with ES5.1\n\/\/ spec; it just returns .ToString().ToNumber().\nfunc (obj object) ToNumber() Number {\n\treturn obj.ToString().ToNumber()\n}\n\n\/\/ ToString returns a string representation of the object. By default\n\/\/ this is \"[object Object]\" for plain objects.\n\/\/\n\/\/ BUG(cpcallen): object.ToString should call a user-code toString()\n\/\/ method if present.\nfunc (object) ToString() String {\n\treturn \"[object Object]\"\n}\n\n\/\/ ToPrimitive defaults to ToNumber on objects.\n\/\/\n\/\/ BUG(cpcallen): object.ToPrimitive should prefer to return the\n\/\/ result of ToString() on date objects.\nfunc (obj *object) ToPrimitive() Value {\n\treturn obj.ToNumber()\n}\n\n\/\/ NewObject creates a new object with the specified owner and\n\/\/ prototype, initialises it as appropriate, and returns a pointer to\n\/\/ the newly-created object.\nfunc NewObject(owner *Owner, proto Object) *object {\n\tvar obj = new(object)\n\tobj.init(owner, proto)\n\tobj.f = true\n\treturn obj\n}\n\n\/\/ init is an internal initialisation routine, called from New and\n\/\/ also called when constructing other types of objects such as\n\/\/ Arrays, Owners, etc.\nfunc (obj *object) init(owner *Owner, proto Object) {\n\tobj.owner = owner\n\tobj.proto = proto\n\tobj.properties = make(map[string]Property)\n}\n<commit_msg>Add Object.DefineOwnProperty and Object.GetOwnProperty methods<commit_after>\/* Copyright 2017 Google Inc.\n * https:\/\/github.com\/NeilFraser\/CodeCity\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage data\n\n\/\/ Object represents any JavaScript value (primitive, object, etc.).\ntype Object interface {\n\t\/\/\/ Any Object is a valid Value.\n\tValue\n\n\t\/\/ Proto returns the prototype (parent) object for this object.\n\t\/\/ N.B. this is the value returned by Object.getPrototypeOf(foo)\n\t\/\/ (and by foo.__proto__, if not overridden); it is unrelated to\n\t\/\/ the .prototype property on Functions.\n\tProto() Object\n\n\t\/\/ DefineOwnProperty creates a new property (or updates an\n\t\/\/ existing one, if possible) with the specified property\n\t\/\/ descriptor.\n\tDefineOwnProperty(key string, pd Property) *NativeError\n\n\t\/\/ GetOwnProperty returns the property descriptor for the\n\t\/\/ specified key and ok == true (if a property with the specified\n\t\/\/ key exists), or ok == false (if it doesn't).\n\tGetOwnProperty(key string) (pd Property, ok bool)\n\n\t\/\/ Get returns the current value of the given property or an\n\t\/\/ NativeError if that was not possible.\n\tGet(key string) (Value, *NativeError)\n\n\t\/\/ Set sets the given property to the specified value or returns\n\t\/\/ an NativeError if that was not possible.\n\tSet(key string, value Value) *NativeError\n\n\t\/\/ Delete attempts to remove the specified property. If the\n\t\/\/ property exists but can't be removed for some reason an\n\t\/\/ NativeError is returned. (Removing a non-existing property\n\t\/\/ \"succeeds\" silently.)\n\tDelete(key string) *NativeError\n\n\t\/\/ OwnPropertyKeys returns the list of (own) property keys as a\n\t\/\/ slice of strings.\n\tOwnPropertyKeys() []string\n\n\t\/\/ HasOwnProperty returns true if the specified property key\n\t\/\/ exists on the object itself.\n\tHasOwnProperty(string) bool\n\n\t\/\/ HasProperty returns true if the specified property key\n\t\/\/ exists on the object or its prototype chain.\n\tHasProperty(string) bool\n}\n\n\/\/ object represents typical plain old JavaScript objects with\n\/\/ prototype, properties, etc.; this struct is also embedded in other,\n\/\/ less-plain object types like Array.\ntype object struct {\n\towner *Owner\n\tproto Object\n\tproperties map[string]Property\n\tf bool\n}\n\n\/\/ Property is a property descriptor, per §8.10 of ES5.1\n\/\/ Value: The actual value of the property.\n\/\/ Owner: Who owns the property (has permission to write it)?\n\/\/ W: Is the property writeable?\n\/\/ E: Is the property enumerable?\n\/\/ C: Is the property configurable?\n\/\/ R: Is the property world-readable?\n\/\/ I: Is the property ownership inherited on children?\ntype Property struct {\n\tValue Value\n\tOwner *Owner\n\tW, E, C bool\n\tR, I bool\n}\n\n\/\/ *object must satisfy Object.\nvar _ Object = (*object)(nil)\n\n\/\/ Type always returns OBJECT for regular objects.\nfunc (object) Type() Type {\n\treturn OBJECT\n}\n\n\/\/ Typeof always returns \"object\" for regular objects.\nfunc (object) Typeof() string {\n\treturn \"object\"\n}\n\n\/\/ IsPrimitive always returns false for regular objects.\nfunc (object) IsPrimitive() bool {\n\treturn false\n}\n\n\/\/ Proto returns the prototype (parent) object for this object.\nfunc (obj object) Proto() Object {\n\treturn obj.proto\n}\n\n\/\/ DefineOwnProperty creates a new property (or updates an existing\n\/\/ one, if possible) with the specified property descriptor.\nfunc (obj *object) DefineOwnProperty(key string, pd Property) *NativeError {\n\t\/\/ FIXME: perm \/ configurability checks!\n\tobj.properties[key] = pd\n\treturn nil\n}\n\n\/\/ GetOwnProperty returns the property descriptor for the specified\n\/\/ key and ok == true (if a property with the specified key exists),\n\/\/ or ok == false (if it doesn't).\n\/\/\n\/\/ FIXME: this needs to be redefined more carefully on Array,\n\/\/ BoxedString, etc.\nfunc (obj *object) GetOwnProperty(key string) (pd Property, ok bool) {\n\t\/\/ FIXME: perm check?\n\tpd, ok = obj.properties[key]\n\treturn pd, ok\n}\n\n\/\/ Get returns the current value of the given property or an\n\/\/ NativeError if that was not possible.\n\/\/\n\/\/ FIXME: this needs to be redefined more carefully on Array,\n\/\/ BoxedString, etc.\nfunc (obj object) Get(key string) (Value, *NativeError) {\n\tpd, ok := obj.properties[key]\n\t\/\/ FIXME: permissions check for property readability goes here\n\tif ok {\n\t\treturn pd.Value, nil\n\t}\n\t\/\/ Try the prototype?\n\tproto := obj.Proto()\n\tif proto != nil {\n\t\treturn proto.Get(key)\n\t}\n\treturn Undefined{}, nil\n}\n\n\/\/ Set sets the given property to the specified value or returns an\n\/\/ NativeError if that was not possible.\nfunc (obj *object) Set(key string, value Value) *NativeError {\n\tpd, ok := obj.properties[key]\n\tif !ok { \/\/ Creating new property\n\t\tpd := Property{\n\t\t\tValue: value,\n\t\t\tOwner: obj.owner, \/\/ FIXME: should this be caller?\n\t\t\tW: true,\n\t\t\tE: true,\n\t\t\tC: true,\n\t\t\tR: true,\n\t\t\tI: false,\n\t\t}\n\t\treturn obj.DefineOwnProperty(key, pd)\n\t}\n\t\/\/ Updating existing property\n\t\/\/ FIXME: permissions check for property writeability goes here\n\t\/\/ FIXME: recurse if necessary\n\tpd.Value = value\n\tobj.properties[key] = pd\n\treturn nil\n}\n\n\/\/ Delete removes the specified property if possible.\n\/\/\n\/\/ FIXME: perm \/ immutability checks!\nfunc (obj *object) Delete(key string) *NativeError {\n\tdelete(obj.properties, key)\n\treturn nil\n}\n\n\/\/ OwnPropertyKeys returns the list of (own) property keys as a slice\n\/\/ of strings.\nfunc (obj *object) OwnPropertyKeys() []string {\n\tkeys := make([]string, len(obj.properties))\n\ti := 0\n\tfor k := range obj.properties {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\treturn keys\n}\n\n\/\/ HasOwnProperty returns true if the specified property key exists on\n\/\/ the object itself.\nfunc (obj *object) HasOwnProperty(key string) bool {\n\t_, exists := obj.properties[key]\n\treturn exists\n}\n\n\/\/ HasProperty returns true if the specified property key exists on\n\/\/ the object or its prototype chain.\nfunc (obj *object) HasProperty(key string) bool {\n\treturn obj.HasOwnProperty(key) ||\n\t\tobj.proto != nil && obj.proto.HasProperty(key)\n}\n\n\/\/ ToBoolean always returns true for regular objects.\nfunc (object) ToBoolean() Boolean {\n\treturn true\n}\n\n\/\/ ToNumber returns the numeric equivalent of the object.\n\/\/\n\/\/ BUG(cpcallen): object.ToNumber is not strictly compliant with ES5.1\n\/\/ spec; it just returns .ToString().ToNumber().\nfunc (obj object) ToNumber() Number {\n\treturn obj.ToString().ToNumber()\n}\n\n\/\/ ToString returns a string representation of the object. By default\n\/\/ this is \"[object Object]\" for plain objects.\n\/\/\n\/\/ BUG(cpcallen): object.ToString should call a user-code toString()\n\/\/ method if present.\nfunc (object) ToString() String {\n\treturn \"[object Object]\"\n}\n\n\/\/ ToPrimitive defaults to ToNumber on objects.\n\/\/\n\/\/ BUG(cpcallen): object.ToPrimitive should prefer to return the\n\/\/ result of ToString() on date objects.\nfunc (obj *object) ToPrimitive() Value {\n\treturn obj.ToNumber()\n}\n\n\/\/ NewObject creates a new object with the specified owner and\n\/\/ prototype, initialises it as appropriate, and returns a pointer to\n\/\/ the newly-created object.\nfunc NewObject(owner *Owner, proto Object) *object {\n\tvar obj = new(object)\n\tobj.init(owner, proto)\n\tobj.f = true\n\treturn obj\n}\n\n\/\/ init is an internal initialisation routine, called from New and\n\/\/ also called when constructing other types of objects such as\n\/\/ Arrays, Owners, etc.\nfunc (obj *object) init(owner *Owner, proto Object) {\n\tobj.owner = owner\n\tobj.proto = proto\n\tobj.properties = make(map[string]Property)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/unixpickle\/essentials\"\n\t\"github.com\/unixpickle\/muniverse\"\n\t\"github.com\/unixpickle\/muniverse\/chrome\"\n)\n\nfunc main() {\n\tessentials.Die((&Server{\n\t\tInput: os.Stdin,\n\t\tOutput: os.Stdout,\n\t}).Run())\n}\n\ntype Server struct {\n\tInput io.Reader\n\tOutput io.Writer\n\n\tenvsLock sync.RWMutex\n\tcurrentUID int64\n\tenvsByUID map[string]muniverse.Env\n}\n\n\/\/ Run runs the server until an unrecoverable error is\n\/\/ encountered.\n\/\/ Typically, Run will not return until the input stream\n\/\/ has been closed or produces an error.\nfunc (b *Server) Run() error {\n\tb.envsByUID = map[string]muniverse.Env{}\n\n\tdone := make(chan struct{})\n\terrChan := make(chan error, 1)\n\tgotError := func(e error) {\n\t\tselect {\n\t\tcase errChan <- e:\n\t\t\tclose(done)\n\t\tdefault:\n\t\t}\n\t}\n\n\tvar wg sync.WaitGroup\n\n\t\/\/ Synchronize all responses to prevent overlapping\n\t\/\/ writes to one io.Writer.\n\trespChan := make(chan *Response, 1)\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase r := <-respChan:\n\t\t\t\tif err := WriteResponse(b.Output, r); err != nil {\n\t\t\t\t\tgotError(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\nReadLoop:\n\tfor {\n\t\tcall, err := ReadCall(b.Input)\n\t\tselect {\n\t\tcase <-done:\n\t\t\tbreak ReadLoop\n\t\tdefault:\n\t\t}\n\t\tif err != nil {\n\t\t\tgotError(err)\n\t\t\tbreak\n\t\t}\n\t\tgo func() {\n\t\t\tresponse := b.handleCall(call)\n\t\t\tresponse.ID = call.ID\n\t\t\tselect {\n\t\t\tcase respChan <- response:\n\t\t\tcase <-done:\n\t\t\t}\n\t\t}()\n\t}\n\n\twg.Wait()\n\n\treturn <-errChan\n}\n\nfunc (b *Server) handleCall(call *Call) *Response {\n\tswitch true {\n\tcase call.SpecForName != nil:\n\t\treturn b.specForName(call)\n\tcase call.NewEnv != nil, call.NewEnvContainer != nil, call.NewEnvChrome != nil:\n\t\treturn b.newEnv(call)\n\tcase call.CloseEnv != nil:\n\t\treturn b.closeEnv(call)\n\tcase call.Reset != nil:\n\t\treturn b.reset(call)\n\tcase call.Step != nil:\n\t\treturn b.step(call)\n\tcase call.Observe != nil:\n\t\treturn b.observe(call)\n\tcase call.KeyForCode != nil:\n\t\treturn b.keyForCode(call)\n\tdefault:\n\t\treturn ErrorResponse(errors.New(\"malformed call\"))\n\t}\n}\n\nfunc (b *Server) specForName(call *Call) *Response {\n\treturn &Response{\n\t\tSpec: muniverse.SpecForName(call.SpecForName.Name),\n\t}\n}\n\nfunc (b *Server) newEnv(call *Call) *Response {\n\tvar spec *muniverse.EnvSpec\n\tvar opts *muniverse.Options\n\tvar err error\n\tswitch true {\n\tcase call.NewEnv != nil:\n\t\tspec = call.NewEnv.Spec\n\t\topts = &muniverse.Options{}\n\tcase call.NewEnvContainer != nil:\n\t\tspec = call.NewEnvContainer.Spec\n\t\topts = &muniverse.Options{\n\t\t\tCustomImage: call.NewEnvContainer.Container,\n\t\t}\n\tcase call.NewEnvChrome != nil:\n\t\tspec = call.NewEnvContainer.Spec\n\t\topts = &muniverse.Options{\n\t\t\tDevtoolsHost: call.NewEnvChrome.Host,\n\t\t\tGameHost: call.NewEnvChrome.GameHost,\n\t\t}\n\tdefault:\n\t\tpanic(\"unreachable\")\n\t}\n\n\tvar env muniverse.Env\n\tif spec == nil {\n\t\terr = errors.New(\"null specification\")\n\t} else {\n\t\tenv, err = muniverse.NewEnvOptions(spec, opts)\n\t}\n\n\tresponse := &Response{}\n\tif err != nil {\n\t\tmessage := err.Error()\n\t\tresponse.Error = &message\n\t} else {\n\t\tb.envsLock.Lock()\n\t\tuid := strconv.FormatInt(b.currentUID, 10)\n\t\tb.currentUID++\n\t\tb.envsByUID[uid] = env\n\t\tb.envsLock.Unlock()\n\t\tresponse.UID = &uid\n\t}\n\treturn response\n}\n\nfunc (b *Server) closeEnv(call *Call) *Response {\n\tb.envsLock.Lock()\n\tenv, ok := b.envsByUID[call.CloseEnv.UID]\n\tdelete(b.envsByUID, call.CloseEnv.UID)\n\tb.envsLock.Unlock()\n\n\tif !ok {\n\t\treturn ErrorResponse(errors.New(\"environment does not exist\"))\n\t} else if err := env.Close(); err != nil {\n\t\treturn ErrorResponse(err)\n\t}\n\treturn &Response{}\n}\n\nfunc (b *Server) reset(call *Call) *Response {\n\tenv, errResp := b.lookupEnv(call.Reset.UID)\n\tif errResp != nil {\n\t\treturn errResp\n\t}\n\tif err := env.Reset(); err != nil {\n\t\treturn ErrorResponse(err)\n\t}\n\treturn &Response{}\n}\n\nfunc (b *Server) step(call *Call) *Response {\n\tenv, errResp := b.lookupEnv(call.Step.UID)\n\tif errResp != nil {\n\t\treturn errResp\n\t}\n\tt := time.Duration(float64(time.Second) * call.Step.Seconds)\n\tvar events []interface{}\n\tfor _, evt := range call.Step.Events {\n\t\tif evt.KeyEvent != nil {\n\t\t\tevents = append(events, evt.KeyEvent)\n\t\t} else if evt.MouseEvent != nil {\n\t\t\tevents = append(events, evt.MouseEvent)\n\t\t}\n\t}\n\treward, done, err := env.Step(t, events...)\n\tif err != nil {\n\t\treturn ErrorResponse(err)\n\t}\n\treturn &Response{\n\t\tStepResult: &StepResult{\n\t\t\tReward: reward,\n\t\t\tDone: done,\n\t\t},\n\t}\n}\n\nfunc (b *Server) observe(call *Call) *Response {\n\tenv, errResp := b.lookupEnv(call.Observe.UID)\n\tif errResp != nil {\n\t\treturn errResp\n\t}\n\tobs, err := env.Observe()\n\tif err != nil {\n\t\treturn ErrorResponse(err)\n\t}\n\tdata, width, height, err := muniverse.RGB(obs)\n\tif err != nil {\n\t\treturn ErrorResponse(err)\n\t}\n\treturn &Response{\n\t\tObservation: &Observation{\n\t\t\tWidth: width,\n\t\t\tHeight: height,\n\t\t\tRGB: data,\n\t\t},\n\t}\n}\n\nfunc (b *Server) keyForCode(call *Call) *Response {\n\tevt, ok := chrome.KeyEvents[call.KeyForCode.Code]\n\tif ok {\n\t\treturn &Response{\n\t\t\tKeyEvent: &evt,\n\t\t}\n\t} else {\n\t\treturn &Response{}\n\t}\n}\n\nfunc (b *Server) lookupEnv(uid string) (env muniverse.Env, errResp *Response) {\n\tb.envsLock.RLock()\n\tdefer b.envsLock.RUnlock()\n\tif env, ok := b.envsByUID[uid]; ok {\n\t\treturn env, nil\n\t} else {\n\t\treturn nil, ErrorResponse(errors.New(\"environment does not exist\"))\n\t}\n}\n<commit_msg>Fix nil pointer in NewEnvChrome case<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/unixpickle\/essentials\"\n\t\"github.com\/unixpickle\/muniverse\"\n\t\"github.com\/unixpickle\/muniverse\/chrome\"\n)\n\nfunc main() {\n\tessentials.Die((&Server{\n\t\tInput: os.Stdin,\n\t\tOutput: os.Stdout,\n\t}).Run())\n}\n\ntype Server struct {\n\tInput io.Reader\n\tOutput io.Writer\n\n\tenvsLock sync.RWMutex\n\tcurrentUID int64\n\tenvsByUID map[string]muniverse.Env\n}\n\n\/\/ Run runs the server until an unrecoverable error is\n\/\/ encountered.\n\/\/ Typically, Run will not return until the input stream\n\/\/ has been closed or produces an error.\nfunc (b *Server) Run() error {\n\tb.envsByUID = map[string]muniverse.Env{}\n\n\tdone := make(chan struct{})\n\terrChan := make(chan error, 1)\n\tgotError := func(e error) {\n\t\tselect {\n\t\tcase errChan <- e:\n\t\t\tclose(done)\n\t\tdefault:\n\t\t}\n\t}\n\n\tvar wg sync.WaitGroup\n\n\t\/\/ Synchronize all responses to prevent overlapping\n\t\/\/ writes to one io.Writer.\n\trespChan := make(chan *Response, 1)\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase r := <-respChan:\n\t\t\t\tif err := WriteResponse(b.Output, r); err != nil {\n\t\t\t\t\tgotError(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\nReadLoop:\n\tfor {\n\t\tcall, err := ReadCall(b.Input)\n\t\tselect {\n\t\tcase <-done:\n\t\t\tbreak ReadLoop\n\t\tdefault:\n\t\t}\n\t\tif err != nil {\n\t\t\tgotError(err)\n\t\t\tbreak\n\t\t}\n\t\tgo func() {\n\t\t\tresponse := b.handleCall(call)\n\t\t\tresponse.ID = call.ID\n\t\t\tselect {\n\t\t\tcase respChan <- response:\n\t\t\tcase <-done:\n\t\t\t}\n\t\t}()\n\t}\n\n\twg.Wait()\n\n\treturn <-errChan\n}\n\nfunc (b *Server) handleCall(call *Call) *Response {\n\tswitch true {\n\tcase call.SpecForName != nil:\n\t\treturn b.specForName(call)\n\tcase call.NewEnv != nil, call.NewEnvContainer != nil, call.NewEnvChrome != nil:\n\t\treturn b.newEnv(call)\n\tcase call.CloseEnv != nil:\n\t\treturn b.closeEnv(call)\n\tcase call.Reset != nil:\n\t\treturn b.reset(call)\n\tcase call.Step != nil:\n\t\treturn b.step(call)\n\tcase call.Observe != nil:\n\t\treturn b.observe(call)\n\tcase call.KeyForCode != nil:\n\t\treturn b.keyForCode(call)\n\tdefault:\n\t\treturn ErrorResponse(errors.New(\"malformed call\"))\n\t}\n}\n\nfunc (b *Server) specForName(call *Call) *Response {\n\treturn &Response{\n\t\tSpec: muniverse.SpecForName(call.SpecForName.Name),\n\t}\n}\n\nfunc (b *Server) newEnv(call *Call) *Response {\n\tvar spec *muniverse.EnvSpec\n\tvar opts *muniverse.Options\n\tvar err error\n\tswitch true {\n\tcase call.NewEnv != nil:\n\t\tspec = call.NewEnv.Spec\n\t\topts = &muniverse.Options{}\n\tcase call.NewEnvContainer != nil:\n\t\tspec = call.NewEnvContainer.Spec\n\t\topts = &muniverse.Options{\n\t\t\tCustomImage: call.NewEnvContainer.Container,\n\t\t}\n\tcase call.NewEnvChrome != nil:\n\t\tspec = call.NewEnvChrome.Spec\n\t\topts = &muniverse.Options{\n\t\t\tDevtoolsHost: call.NewEnvChrome.Host,\n\t\t\tGameHost: call.NewEnvChrome.GameHost,\n\t\t}\n\tdefault:\n\t\tpanic(\"unreachable\")\n\t}\n\n\tvar env muniverse.Env\n\tif spec == nil {\n\t\terr = errors.New(\"null specification\")\n\t} else {\n\t\tenv, err = muniverse.NewEnvOptions(spec, opts)\n\t}\n\n\tresponse := &Response{}\n\tif err != nil {\n\t\tmessage := err.Error()\n\t\tresponse.Error = &message\n\t} else {\n\t\tb.envsLock.Lock()\n\t\tuid := strconv.FormatInt(b.currentUID, 10)\n\t\tb.currentUID++\n\t\tb.envsByUID[uid] = env\n\t\tb.envsLock.Unlock()\n\t\tresponse.UID = &uid\n\t}\n\treturn response\n}\n\nfunc (b *Server) closeEnv(call *Call) *Response {\n\tb.envsLock.Lock()\n\tenv, ok := b.envsByUID[call.CloseEnv.UID]\n\tdelete(b.envsByUID, call.CloseEnv.UID)\n\tb.envsLock.Unlock()\n\n\tif !ok {\n\t\treturn ErrorResponse(errors.New(\"environment does not exist\"))\n\t} else if err := env.Close(); err != nil {\n\t\treturn ErrorResponse(err)\n\t}\n\treturn &Response{}\n}\n\nfunc (b *Server) reset(call *Call) *Response {\n\tenv, errResp := b.lookupEnv(call.Reset.UID)\n\tif errResp != nil {\n\t\treturn errResp\n\t}\n\tif err := env.Reset(); err != nil {\n\t\treturn ErrorResponse(err)\n\t}\n\treturn &Response{}\n}\n\nfunc (b *Server) step(call *Call) *Response {\n\tenv, errResp := b.lookupEnv(call.Step.UID)\n\tif errResp != nil {\n\t\treturn errResp\n\t}\n\tt := time.Duration(float64(time.Second) * call.Step.Seconds)\n\tvar events []interface{}\n\tfor _, evt := range call.Step.Events {\n\t\tif evt.KeyEvent != nil {\n\t\t\tevents = append(events, evt.KeyEvent)\n\t\t} else if evt.MouseEvent != nil {\n\t\t\tevents = append(events, evt.MouseEvent)\n\t\t}\n\t}\n\treward, done, err := env.Step(t, events...)\n\tif err != nil {\n\t\treturn ErrorResponse(err)\n\t}\n\treturn &Response{\n\t\tStepResult: &StepResult{\n\t\t\tReward: reward,\n\t\t\tDone: done,\n\t\t},\n\t}\n}\n\nfunc (b *Server) observe(call *Call) *Response {\n\tenv, errResp := b.lookupEnv(call.Observe.UID)\n\tif errResp != nil {\n\t\treturn errResp\n\t}\n\tobs, err := env.Observe()\n\tif err != nil {\n\t\treturn ErrorResponse(err)\n\t}\n\tdata, width, height, err := muniverse.RGB(obs)\n\tif err != nil {\n\t\treturn ErrorResponse(err)\n\t}\n\treturn &Response{\n\t\tObservation: &Observation{\n\t\t\tWidth: width,\n\t\t\tHeight: height,\n\t\t\tRGB: data,\n\t\t},\n\t}\n}\n\nfunc (b *Server) keyForCode(call *Call) *Response {\n\tevt, ok := chrome.KeyEvents[call.KeyForCode.Code]\n\tif ok {\n\t\treturn &Response{\n\t\t\tKeyEvent: &evt,\n\t\t}\n\t} else {\n\t\treturn &Response{}\n\t}\n}\n\nfunc (b *Server) lookupEnv(uid string) (env muniverse.Env, errResp *Response) {\n\tb.envsLock.RLock()\n\tdefer b.envsLock.RUnlock()\n\tif env, ok := b.envsByUID[uid]; ok {\n\t\treturn env, nil\n\t} else {\n\t\treturn nil, ErrorResponse(errors.New(\"environment does not exist\"))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package http\n\nimport (\n\t\"fmt\"\n\t\"github.com\/franela\/goreq\"\n\t\"github.com\/thoas\/gostorages\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n)\n\ntype HTTPStorage struct {\n\tgostorages.Storage\n}\n\nvar HeaderKeys = []string{\n\t\"Age\",\n\t\"Content-Type\",\n\t\"Last-Modified\",\n\t\"Date\",\n\t\"Etag\",\n}\n\nfunc (s *HTTPStorage) Open(filepath string) (gostorages.File, error) {\n\tu, err := url.Parse(s.URL(filepath))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontent, err := s.OpenFromURL(u)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn gostorages.NewContentFile(content), nil\n}\n\nfunc (s *HTTPStorage) OpenFromURL(u *url.URL) ([]byte, error) {\n\tcontent, err := goreq.Request{Uri: u.String()}.Do()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif content.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"%s [status: %d]\", u.String(), content.StatusCode)\n\t}\n\n\treturn ioutil.ReadAll(content.Body)\n}\n\nfunc (s *HTTPStorage) HeadersFromURL(u *url.URL) (map[string]string, error) {\n\tvar headers = make(map[string]string)\n\n\tcontent, err := goreq.Request{\n\t\tUri: u.String(),\n\t\tMethod: \"GET\",\n\t}.Do()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, key := range HeaderKeys {\n\t\tif value, ok := content.Header[key]; ok && len(value) > 0 {\n\t\t\theaders[key] = value[0]\n\t\t}\n\t}\n\n\treturn headers, nil\n}\n\nfunc (s *HTTPStorage) Headers(filepath string) (map[string]string, error) {\n\tu, err := url.Parse(s.URL(filepath))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.HeadersFromURL(u)\n}\n\nfunc (s *HTTPStorage) ModifiedTime(filepath string) (time.Time, error) {\n\theaders, err := s.Headers(filepath)\n\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\n\tlastModified, ok := headers[\"Last-Modified\"]\n\n\tif !ok {\n\t\treturn time.Time{}, fmt.Errorf(\"Last-Modified header not found\")\n\t}\n\n\treturn time.Parse(gostorages.LastModifiedFormat, lastModified)\n}\n<commit_msg>Fix HTTP leak on HTTPStorage (thanks @alexd6631)<commit_after>package http\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/franela\/goreq\"\n\t\"github.com\/thoas\/gostorages\"\n)\n\ntype HTTPStorage struct {\n\tgostorages.Storage\n}\n\nvar HeaderKeys = []string{\n\t\"Age\",\n\t\"Content-Type\",\n\t\"Last-Modified\",\n\t\"Date\",\n\t\"Etag\",\n}\n\nfunc (s *HTTPStorage) Open(filepath string) (gostorages.File, error) {\n\tu, err := url.Parse(s.URL(filepath))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontent, err := s.OpenFromURL(u)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn gostorages.NewContentFile(content), nil\n}\n\nfunc (s *HTTPStorage) OpenFromURL(u *url.URL) ([]byte, error) {\n\tcontent, err := goreq.Request{Uri: u.String()}.Do()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer content.Body.Close()\n\n\tif content.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"%s [status: %d]\", u.String(), content.StatusCode)\n\t}\n\n\treturn ioutil.ReadAll(content.Body)\n}\n\nfunc (s *HTTPStorage) HeadersFromURL(u *url.URL) (map[string]string, error) {\n\tvar headers = make(map[string]string)\n\n\tcontent, err := goreq.Request{\n\t\tUri: u.String(),\n\t\tMethod: \"GET\",\n\t}.Do()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer content.Body.Close()\n\n\tfor _, key := range HeaderKeys {\n\t\tif value, ok := content.Header[key]; ok && len(value) > 0 {\n\t\t\theaders[key] = value[0]\n\t\t}\n\t}\n\n\treturn headers, nil\n}\n\nfunc (s *HTTPStorage) Headers(filepath string) (map[string]string, error) {\n\tu, err := url.Parse(s.URL(filepath))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.HeadersFromURL(u)\n}\n\nfunc (s *HTTPStorage) ModifiedTime(filepath string) (time.Time, error) {\n\theaders, err := s.Headers(filepath)\n\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\n\tlastModified, ok := headers[\"Last-Modified\"]\n\n\tif !ok {\n\t\treturn time.Time{}, fmt.Errorf(\"Last-Modified header not found\")\n\t}\n\n\treturn time.Parse(gostorages.LastModifiedFormat, lastModified)\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>added test for compression<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013, Google Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage barnacle\n\n\/\/ Imports and register the Zookeeper TopologyServer\n\nimport (\n\t_ \"github.com\/youtube\/vitess\/go\/vt\/zktopo\"\n)\n<commit_msg>remove plugin_zktopo<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build linux\n\npackage inotify\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"sync\/atomic\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestInotifyEvents(t *testing.T) {\n\t\/\/ Create an inotify watcher instance and initialize it\n\twatcher, err := NewWatcher()\n\tif err != nil {\n\t\tt.Fatalf(\"NewWatcher failed: %s\", err)\n\t}\n\n\tdir, err := ioutil.TempDir(\"\", \"inotify\")\n\tif err != nil {\n\t\tt.Fatalf(\"TempDir failed: %s\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\t\/\/ Add a watch for \"_test\"\n\terr = watcher.Watch(dir)\n\tif err != nil {\n\t\tt.Fatalf(\"Watch failed: %s\", err)\n\t}\n\n\t\/\/ Receive errors on the error channel on a separate goroutine\n\tgo func() {\n\t\tfor err := range watcher.Error {\n\t\t\tt.Fatalf(\"error received: %s\", err)\n\t\t}\n\t}()\n\n\ttestFile := dir + \"\/TestInotifyEvents.testfile\"\n\n\t\/\/ Receive events on the event channel on a separate goroutine\n\teventstream := watcher.Event\n\tvar eventsReceived int32\n\tdone := make(chan bool)\n\tgo func() {\n\t\tfor event := range eventstream {\n\t\t\t\/\/ Only count relevant events\n\t\t\tif event.Name == testFile {\n\t\t\t\tatomic.AddInt32(&eventsReceived, 1)\n\t\t\t\tt.Logf(\"event received: %s\", event)\n\t\t\t} else {\n\t\t\t\tt.Logf(\"unexpected event received: %s\", event)\n\t\t\t}\n\t\t}\n\t\tdone <- true\n\t}()\n\n\t\/\/ Create a file\n\t\/\/ This should add at least one event to the inotify event queue\n\t_, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)\n\tif err != nil {\n\t\tt.Fatalf(\"creating test file: %s\", err)\n\t}\n\n\t\/\/ We expect this event to be received almost immediately, but let's wait 1 s to be sure\n\ttime.Sleep(1 * time.Second)\n\tif atomic.AddInt32(&eventsReceived, 0) == 0 {\n\t\tt.Fatal(\"inotify event hasn't been received after 1 second\")\n\t}\n\n\t\/\/ Try closing the inotify instance\n\tt.Log(\"calling Close()\")\n\twatcher.Close()\n\tt.Log(\"waiting for the event channel to become closed...\")\n\tselect {\n\tcase <-done:\n\t\tt.Log(\"event channel closed\")\n\tcase <-time.After(1 * time.Second):\n\t\tt.Fatal(\"event stream was not closed after 1 second\")\n\t}\n}\n\nfunc TestInotifyClose(t *testing.T) {\n\twatcher, _ := NewWatcher()\n\twatcher.Close()\n\n\tdone := make(chan bool)\n\tgo func() {\n\t\twatcher.Close()\n\t\tdone <- true\n\t}()\n\n\tselect {\n\tcase <-done:\n\tcase <-time.After(50 * time.Millisecond):\n\t\tt.Fatal(\"double Close() test failed: second Close() call didn't return\")\n\t}\n\n\terr := watcher.Watch(os.TempDir())\n\tif err == nil {\n\t\tt.Fatal(\"expected error on Watch() after Close(), got nil\")\n\t}\n}\n<commit_msg>call to (*T).Fatalf from a non-test goroutine (govet)<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/go:build linux\n\/\/ +build linux\n\npackage inotify\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"sync\/atomic\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestInotifyEvents(t *testing.T) {\n\t\/\/ Create an inotify watcher instance and initialize it\n\twatcher, err := NewWatcher()\n\tif err != nil {\n\t\tt.Fatalf(\"NewWatcher failed: %s\", err)\n\t}\n\n\tdir, err := ioutil.TempDir(\"\", \"inotify\")\n\tif err != nil {\n\t\tt.Fatalf(\"TempDir failed: %s\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\t\/\/ Add a watch for \"_test\"\n\terr = watcher.Watch(dir)\n\tif err != nil {\n\t\tt.Fatalf(\"Watch failed: %s\", err)\n\t}\n\n\t\/\/ Receive errors on the error channel on a separate goroutine\n\tgo func() {\n\t\tfor err := range watcher.Error {\n\t\t\tt.Errorf(\"error received: %s\", err)\n\t\t}\n\t}()\n\n\ttestFile := dir + \"\/TestInotifyEvents.testfile\"\n\n\t\/\/ Receive events on the event channel on a separate goroutine\n\teventstream := watcher.Event\n\tvar eventsReceived int32\n\tdone := make(chan bool)\n\tgo func() {\n\t\tfor event := range eventstream {\n\t\t\t\/\/ Only count relevant events\n\t\t\tif event.Name == testFile {\n\t\t\t\tatomic.AddInt32(&eventsReceived, 1)\n\t\t\t\tt.Logf(\"event received: %s\", event)\n\t\t\t} else {\n\t\t\t\tt.Logf(\"unexpected event received: %s\", event)\n\t\t\t}\n\t\t}\n\t\tdone <- true\n\t}()\n\n\t\/\/ Create a file\n\t\/\/ This should add at least one event to the inotify event queue\n\t_, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)\n\tif err != nil {\n\t\tt.Fatalf(\"creating test file: %s\", err)\n\t}\n\n\t\/\/ We expect this event to be received almost immediately, but let's wait 1 s to be sure\n\ttime.Sleep(1 * time.Second)\n\tif atomic.AddInt32(&eventsReceived, 0) == 0 {\n\t\tt.Fatal(\"inotify event hasn't been received after 1 second\")\n\t}\n\n\t\/\/ Try closing the inotify instance\n\tt.Log(\"calling Close()\")\n\twatcher.Close()\n\tt.Log(\"waiting for the event channel to become closed...\")\n\tselect {\n\tcase <-done:\n\t\tt.Log(\"event channel closed\")\n\tcase <-time.After(1 * time.Second):\n\t\tt.Fatal(\"event stream was not closed after 1 second\")\n\t}\n}\n\nfunc TestInotifyClose(t *testing.T) {\n\twatcher, _ := NewWatcher()\n\twatcher.Close()\n\n\tdone := make(chan bool)\n\tgo func() {\n\t\twatcher.Close()\n\t\tdone <- true\n\t}()\n\n\tselect {\n\tcase <-done:\n\tcase <-time.After(50 * time.Millisecond):\n\t\tt.Fatal(\"double Close() test failed: second Close() call didn't return\")\n\t}\n\n\terr := watcher.Watch(os.TempDir())\n\tif err == nil {\n\t\tt.Fatal(\"expected error on Watch() after Close(), got nil\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage machine\n\nimport (\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/docker\/machine\/libmachine\"\n\t\"github.com\/docker\/machine\/libmachine\/auth\"\n\t\"github.com\/docker\/machine\/libmachine\/cert\"\n\t\"github.com\/docker\/machine\/libmachine\/check\"\n\t\"github.com\/docker\/machine\/libmachine\/drivers\"\n\t\"github.com\/docker\/machine\/libmachine\/drivers\/plugin\"\n\t\"github.com\/docker\/machine\/libmachine\/drivers\/plugin\/localbinary\"\n\t\"github.com\/docker\/machine\/libmachine\/engine\"\n\t\"github.com\/docker\/machine\/libmachine\/host\"\n\t\"github.com\/docker\/machine\/libmachine\/mcnutils\"\n\t\"github.com\/docker\/machine\/libmachine\/persist\"\n\tlmssh \"github.com\/docker\/machine\/libmachine\/ssh\"\n\t\"github.com\/docker\/machine\/libmachine\/state\"\n\t\"github.com\/docker\/machine\/libmachine\/swarm\"\n\t\"github.com\/docker\/machine\/libmachine\/version\"\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/pkg\/errors\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/command\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/driver\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/exit\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/localpath\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/out\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/registry\"\n)\n\n\/\/ NewRPCClient gets a new client.\nfunc NewRPCClient(storePath, certsDir string) libmachine.API {\n\tc := libmachine.NewClient(storePath, certsDir)\n\tc.SSHClientType = lmssh.Native\n\treturn c\n}\n\n\/\/ NewAPIClient gets a new client.\nfunc NewAPIClient() (libmachine.API, error) {\n\tstorePath := localpath.MiniPath()\n\tcertsDir := localpath.MakeMiniPath(\"certs\")\n\n\treturn &LocalClient{\n\t\tcertsDir: certsDir,\n\t\tstorePath: storePath,\n\t\tFilestore: persist.NewFilestore(storePath, certsDir, certsDir),\n\t\tlegacyClient: NewRPCClient(storePath, certsDir),\n\t}, nil\n}\n\n\/\/ LocalClient is a non-RPC implementation\n\/\/ of the libmachine API\ntype LocalClient struct {\n\tcertsDir string\n\tstorePath string\n\t*persist.Filestore\n\tlegacyClient libmachine.API\n}\n\n\/\/ NewHost creates a new Host\nfunc (api *LocalClient) NewHost(drvName string, rawDriver []byte) (*host.Host, error) {\n\tdef := registry.Driver(drvName)\n\tif def.Empty() {\n\t\treturn nil, fmt.Errorf(\"driver %q does not exist\", drvName)\n\t}\n\tif def.Init == nil {\n\t\treturn api.legacyClient.NewHost(drvName, rawDriver)\n\t}\n\td := def.Init()\n\terr := json.Unmarshal(rawDriver, d)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Error getting driver %s\", string(rawDriver))\n\t}\n\n\treturn &host.Host{\n\t\tConfigVersion: version.ConfigVersion,\n\t\tName: d.GetMachineName(),\n\t\tDriver: d,\n\t\tDriverName: d.DriverName(),\n\t\tHostOptions: &host.Options{\n\t\t\tAuthOptions: &auth.Options{\n\t\t\t\tCertDir: api.certsDir,\n\t\t\t\tCaCertPath: filepath.Join(api.certsDir, \"ca.pem\"),\n\t\t\t\tCaPrivateKeyPath: filepath.Join(api.certsDir, \"ca-key.pem\"),\n\t\t\t\tClientCertPath: filepath.Join(api.certsDir, \"cert.pem\"),\n\t\t\t\tClientKeyPath: filepath.Join(api.certsDir, \"key.pem\"),\n\t\t\t\tServerCertPath: filepath.Join(api.GetMachinesDir(), \"server.pem\"),\n\t\t\t\tServerKeyPath: filepath.Join(api.GetMachinesDir(), \"server-key.pem\"),\n\t\t\t},\n\t\t\tEngineOptions: &engine.Options{\n\t\t\t\tStorageDriver: \"overlay2\",\n\t\t\t\tTLSVerify: true,\n\t\t\t},\n\t\t\tSwarmOptions: &swarm.Options{},\n\t\t},\n\t}, nil\n}\n\n\/\/ Load a new client, creating driver\nfunc (api *LocalClient) Load(name string) (*host.Host, error) {\n\th, err := api.Filestore.Load(name)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"filestore %q\", name)\n\t}\n\n\tdef := registry.Driver(h.DriverName)\n\tif def.Empty() {\n\t\treturn nil, fmt.Errorf(\"driver %q does not exist\", h.DriverName)\n\t}\n\tif def.Init == nil {\n\t\treturn api.legacyClient.Load(name)\n\t}\n\th.Driver = def.Init()\n\treturn h, json.Unmarshal(h.RawDriver, h.Driver)\n}\n\n\/\/ Close closes the client\nfunc (api *LocalClient) Close() error {\n\tif api.legacyClient != nil {\n\t\treturn api.legacyClient.Close()\n\t}\n\treturn nil\n}\n\n\/\/ CommandRunner returns best available command runner for this host\nfunc CommandRunner(h *host.Host) (command.Runner, error) {\n\tif h.DriverName == driver.Mock {\n\t\treturn &command.FakeCommandRunner{}, nil\n\t}\n\tif driver.BareMetal(h.Driver.DriverName()) {\n\t\treturn command.NewExecRunner(), nil\n\t}\n\n\tcr, err := SSHRunner(h)\n\n\t\/\/ We have a slower backup runner, so we may as well use it.\n\tif err != nil && driver.IsKIC(h.Driver.DriverName()) {\n\t\tglog.Errorf(\"SSH runner failed, using KIC runner as backup: %v\", err)\n\t\treturn command.NewKICRunner(h.Name, h.Driver.DriverName()), nil\n\t}\n\n\treturn cr, err\n}\n\n\/\/ SSHRunner returns an SSH runner for the host\nfunc SSHRunner(h *host.Host) (command.Runner, error) {\n\treturn command.NewSSHRunner(h.Driver), nil\n}\n\n\/\/ Create creates the host\nfunc (api *LocalClient) Create(h *host.Host) error {\n\tglog.Infof(\"LocalClient.Create starting\")\n\tstart := time.Now()\n\tdefer func() {\n\t\tglog.Infof(\"LocalClient.Create took %s\", time.Since(start))\n\t}()\n\n\tdef := registry.Driver(h.DriverName)\n\tif def.Empty() {\n\t\treturn fmt.Errorf(\"driver %q does not exist\", h.DriverName)\n\t}\n\tif def.Init == nil {\n\t\t\/\/ NOTE: This will call provision.DetectProvisioner\n\t\treturn api.legacyClient.Create(h)\n\t}\n\n\tsteps := []struct {\n\t\tname string\n\t\tf func() error\n\t}{\n\t\t{\n\t\t\t\"bootstrapping certificates\",\n\t\t\tfunc() error { return cert.BootstrapCertificates(h.AuthOptions()) },\n\t\t},\n\t\t{\n\t\t\t\"precreate\",\n\t\t\th.Driver.PreCreateCheck,\n\t\t},\n\t\t{\n\t\t\t\"saving\",\n\t\t\tfunc() error {\n\t\t\t\treturn api.Save(h)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"creating\",\n\t\t\th.Driver.Create,\n\t\t},\n\t\t{\n\t\t\t\"waiting\",\n\t\t\tfunc() error {\n\t\t\t\tif driver.BareMetal(h.Driver.DriverName()) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn mcnutils.WaitFor(drivers.MachineInState(h.Driver, state.Running))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"provisioning\",\n\t\t\tfunc() error {\n\t\t\t\t\/\/ Skippable because we don't reconfigure Docker?\n\t\t\t\tif driver.BareMetal(h.Driver.DriverName()) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn provisionDockerMachine(h)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, step := range steps {\n\n\t\tif err := step.f(); err != nil {\n\t\t\treturn errors.Wrap(err, step.name)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ StartDriver starts the driver\nfunc StartDriver() {\n\tcert.SetCertGenerator(&CertGenerator{})\n\tcheck.DefaultConnChecker = &ConnChecker{}\n\tif os.Getenv(localbinary.PluginEnvKey) == localbinary.PluginEnvVal {\n\t\tregisterDriver(os.Getenv(localbinary.PluginEnvDriverName))\n\t}\n\n\tlocalbinary.CurrentBinaryIsDockerMachine = true\n}\n\n\/\/ ConnChecker can check the connection\ntype ConnChecker struct {\n}\n\n\/\/ Check checks the connection\nfunc (cc *ConnChecker) Check(h *host.Host, swarm bool) (string, *auth.Options, error) {\n\tauthOptions := h.AuthOptions()\n\tdockerHost, err := h.Driver.GetURL()\n\tif err != nil {\n\t\treturn \"\", &auth.Options{}, err\n\t}\n\treturn dockerHost, authOptions, nil\n}\n\n\/\/ CertGenerator is used to override the default machine CertGenerator with a longer timeout.\ntype CertGenerator struct {\n\tcert.X509CertGenerator\n}\n\n\/\/ ValidateCertificate is a reimplementation of the default generator with a longer timeout.\nfunc (cg *CertGenerator) ValidateCertificate(addr string, authOptions *auth.Options) (bool, error) {\n\ttlsConfig, err := cg.ReadTLSConfig(addr, authOptions)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tdialer := &net.Dialer{\n\t\tTimeout: time.Second * 40,\n\t}\n\n\t_, err = tls.DialWithDialer(dialer, \"tcp\", addr, tlsConfig)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\nfunc registerDriver(drvName string) {\n\tdef := registry.Driver(drvName)\n\tif def.Empty() {\n\t\texit.UsageT(\"unsupported or missing driver: {{.name}}\", out.V{\"name\": drvName})\n\t}\n\tplugin.RegisterDriver(def.Init())\n}\n<commit_msg>Remove unused code<commit_after>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage machine\n\nimport (\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/docker\/machine\/libmachine\"\n\t\"github.com\/docker\/machine\/libmachine\/auth\"\n\t\"github.com\/docker\/machine\/libmachine\/cert\"\n\t\"github.com\/docker\/machine\/libmachine\/check\"\n\t\"github.com\/docker\/machine\/libmachine\/drivers\"\n\t\"github.com\/docker\/machine\/libmachine\/drivers\/plugin\"\n\t\"github.com\/docker\/machine\/libmachine\/drivers\/plugin\/localbinary\"\n\t\"github.com\/docker\/machine\/libmachine\/engine\"\n\t\"github.com\/docker\/machine\/libmachine\/host\"\n\t\"github.com\/docker\/machine\/libmachine\/mcnutils\"\n\t\"github.com\/docker\/machine\/libmachine\/persist\"\n\tlmssh \"github.com\/docker\/machine\/libmachine\/ssh\"\n\t\"github.com\/docker\/machine\/libmachine\/state\"\n\t\"github.com\/docker\/machine\/libmachine\/swarm\"\n\t\"github.com\/docker\/machine\/libmachine\/version\"\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/pkg\/errors\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/command\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/driver\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/exit\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/localpath\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/out\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/registry\"\n)\n\n\/\/ NewRPCClient gets a new client.\nfunc NewRPCClient(storePath, certsDir string) libmachine.API {\n\tc := libmachine.NewClient(storePath, certsDir)\n\tc.SSHClientType = lmssh.Native\n\treturn c\n}\n\n\/\/ NewAPIClient gets a new client.\nfunc NewAPIClient() (libmachine.API, error) {\n\tstorePath := localpath.MiniPath()\n\tcertsDir := localpath.MakeMiniPath(\"certs\")\n\n\treturn &LocalClient{\n\t\tcertsDir: certsDir,\n\t\tstorePath: storePath,\n\t\tFilestore: persist.NewFilestore(storePath, certsDir, certsDir),\n\t\tlegacyClient: NewRPCClient(storePath, certsDir),\n\t}, nil\n}\n\n\/\/ LocalClient is a non-RPC implementation\n\/\/ of the libmachine API\ntype LocalClient struct {\n\tcertsDir string\n\tstorePath string\n\t*persist.Filestore\n\tlegacyClient libmachine.API\n}\n\n\/\/ NewHost creates a new Host\nfunc (api *LocalClient) NewHost(drvName string, rawDriver []byte) (*host.Host, error) {\n\tdef := registry.Driver(drvName)\n\tif def.Empty() {\n\t\treturn nil, fmt.Errorf(\"driver %q does not exist\", drvName)\n\t}\n\tif def.Init == nil {\n\t\treturn api.legacyClient.NewHost(drvName, rawDriver)\n\t}\n\td := def.Init()\n\terr := json.Unmarshal(rawDriver, d)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Error getting driver %s\", string(rawDriver))\n\t}\n\n\treturn &host.Host{\n\t\tConfigVersion: version.ConfigVersion,\n\t\tName: d.GetMachineName(),\n\t\tDriver: d,\n\t\tDriverName: d.DriverName(),\n\t\tHostOptions: &host.Options{\n\t\t\tAuthOptions: &auth.Options{\n\t\t\t\tCertDir: api.certsDir,\n\t\t\t\tCaCertPath: filepath.Join(api.certsDir, \"ca.pem\"),\n\t\t\t\tCaPrivateKeyPath: filepath.Join(api.certsDir, \"ca-key.pem\"),\n\t\t\t\tClientCertPath: filepath.Join(api.certsDir, \"cert.pem\"),\n\t\t\t\tClientKeyPath: filepath.Join(api.certsDir, \"key.pem\"),\n\t\t\t\tServerCertPath: filepath.Join(api.GetMachinesDir(), \"server.pem\"),\n\t\t\t\tServerKeyPath: filepath.Join(api.GetMachinesDir(), \"server-key.pem\"),\n\t\t\t},\n\t\t\tEngineOptions: &engine.Options{\n\t\t\t\tStorageDriver: \"overlay2\",\n\t\t\t\tTLSVerify: true,\n\t\t\t},\n\t\t\tSwarmOptions: &swarm.Options{},\n\t\t},\n\t}, nil\n}\n\n\/\/ Load a new client, creating driver\nfunc (api *LocalClient) Load(name string) (*host.Host, error) {\n\th, err := api.Filestore.Load(name)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"filestore %q\", name)\n\t}\n\n\tdef := registry.Driver(h.DriverName)\n\tif def.Empty() {\n\t\treturn nil, fmt.Errorf(\"driver %q does not exist\", h.DriverName)\n\t}\n\tif def.Init == nil {\n\t\treturn api.legacyClient.Load(name)\n\t}\n\th.Driver = def.Init()\n\treturn h, json.Unmarshal(h.RawDriver, h.Driver)\n}\n\n\/\/ Close closes the client\nfunc (api *LocalClient) Close() error {\n\tif api.legacyClient != nil {\n\t\treturn api.legacyClient.Close()\n\t}\n\treturn nil\n}\n\n\/\/ CommandRunner returns best available command runner for this host\nfunc CommandRunner(h *host.Host) (command.Runner, error) {\n\tif h.DriverName == driver.Mock {\n\t\treturn &command.FakeCommandRunner{}, nil\n\t}\n\tif driver.BareMetal(h.Driver.DriverName()) {\n\t\treturn command.NewExecRunner(), nil\n\t}\n\n\treturn command.NewSSHRunner(h.Driver), nil\n}\n\n\/\/ Create creates the host\nfunc (api *LocalClient) Create(h *host.Host) error {\n\tglog.Infof(\"LocalClient.Create starting\")\n\tstart := time.Now()\n\tdefer func() {\n\t\tglog.Infof(\"LocalClient.Create took %s\", time.Since(start))\n\t}()\n\n\tdef := registry.Driver(h.DriverName)\n\tif def.Empty() {\n\t\treturn fmt.Errorf(\"driver %q does not exist\", h.DriverName)\n\t}\n\tif def.Init == nil {\n\t\t\/\/ NOTE: This will call provision.DetectProvisioner\n\t\treturn api.legacyClient.Create(h)\n\t}\n\n\tsteps := []struct {\n\t\tname string\n\t\tf func() error\n\t}{\n\t\t{\n\t\t\t\"bootstrapping certificates\",\n\t\t\tfunc() error { return cert.BootstrapCertificates(h.AuthOptions()) },\n\t\t},\n\t\t{\n\t\t\t\"precreate\",\n\t\t\th.Driver.PreCreateCheck,\n\t\t},\n\t\t{\n\t\t\t\"saving\",\n\t\t\tfunc() error {\n\t\t\t\treturn api.Save(h)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"creating\",\n\t\t\th.Driver.Create,\n\t\t},\n\t\t{\n\t\t\t\"waiting\",\n\t\t\tfunc() error {\n\t\t\t\tif driver.BareMetal(h.Driver.DriverName()) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn mcnutils.WaitFor(drivers.MachineInState(h.Driver, state.Running))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"provisioning\",\n\t\t\tfunc() error {\n\t\t\t\t\/\/ Skippable because we don't reconfigure Docker?\n\t\t\t\tif driver.BareMetal(h.Driver.DriverName()) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn provisionDockerMachine(h)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, step := range steps {\n\n\t\tif err := step.f(); err != nil {\n\t\t\treturn errors.Wrap(err, step.name)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ StartDriver starts the driver\nfunc StartDriver() {\n\tcert.SetCertGenerator(&CertGenerator{})\n\tcheck.DefaultConnChecker = &ConnChecker{}\n\tif os.Getenv(localbinary.PluginEnvKey) == localbinary.PluginEnvVal {\n\t\tregisterDriver(os.Getenv(localbinary.PluginEnvDriverName))\n\t}\n\n\tlocalbinary.CurrentBinaryIsDockerMachine = true\n}\n\n\/\/ ConnChecker can check the connection\ntype ConnChecker struct {\n}\n\n\/\/ Check checks the connection\nfunc (cc *ConnChecker) Check(h *host.Host, swarm bool) (string, *auth.Options, error) {\n\tauthOptions := h.AuthOptions()\n\tdockerHost, err := h.Driver.GetURL()\n\tif err != nil {\n\t\treturn \"\", &auth.Options{}, err\n\t}\n\treturn dockerHost, authOptions, nil\n}\n\n\/\/ CertGenerator is used to override the default machine CertGenerator with a longer timeout.\ntype CertGenerator struct {\n\tcert.X509CertGenerator\n}\n\n\/\/ ValidateCertificate is a reimplementation of the default generator with a longer timeout.\nfunc (cg *CertGenerator) ValidateCertificate(addr string, authOptions *auth.Options) (bool, error) {\n\ttlsConfig, err := cg.ReadTLSConfig(addr, authOptions)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tdialer := &net.Dialer{\n\t\tTimeout: time.Second * 40,\n\t}\n\n\t_, err = tls.DialWithDialer(dialer, \"tcp\", addr, tlsConfig)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\nfunc registerDriver(drvName string) {\n\tdef := registry.Driver(drvName)\n\tif def.Empty() {\n\t\texit.UsageT(\"unsupported or missing driver: {{.name}}\", out.V{\"name\": drvName})\n\t}\n\tplugin.RegisterDriver(def.Init())\n}\n<|endoftext|>"} {"text":"<commit_before>package http\n\nimport (\n\t\"strings\"\n\t\"github.com\/astaxie\/beego\/orm\"\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\n\t\"encoding\/json\"\n\t\"log\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\n\t\"github.com\/Cepave\/query\/g\"\n)\n\ntype Dto struct {\n\tMsg string `json:\"msg\"`\n\tData interface{} `json:\"data\"`\n}\n\nfunc InitDatabase() {\n\t\/\/ set default database\n\tconfig := g.Config()\n\torm.RegisterDataBase(\"default\", \"mysql\", config.Db.Addr, config.Db.Idle, config.Db.Max)\n\t\/\/ register model\n\torm.RegisterModel(new(Endpoint))\n\n\tstrConn := strings.Replace(config.Db.Addr, \"graph\", \"falcon_portal\", 1)\n\torm.RegisterDataBase(\"falcon_portal\", \"mysql\", strConn, config.Db.Idle, config.Db.Max)\n\torm.RegisterModel(new(Grp), new(Grp_host), new(Grp_tpl), new(Tpl))\n\n\tstrConn = strings.Replace(config.Db.Addr, \"graph\", \"grafana\", 1)\n\torm.RegisterDataBase(\"grafana\", \"mysql\", strConn, config.Db.Idle, config.Db.Max)\n\torm.RegisterModel(new(Province), new(City), new(Idc))\n\n\tif config.Debug == true {\n\t\torm.Debug = true\n\t}\n}\n\nfunc Start() {\n\tif !g.Config().Http.Enable {\n\t\tlog.Println(\"http.Start warning, not enable\")\n\t\treturn\n\t}\n\n\t\/\/ config http routes\n\tconfigCommonRoutes()\n\tconfigProcHttpRoutes()\n\tconfigGraphRoutes()\n\tconfigApiRoutes()\n\tconfigGrafanaRoutes()\n\tconfigZabbixRoutes()\n\n\t\/\/ start mysql database\n\tInitDatabase()\n\n\t\/\/ start http server\n\taddr := g.Config().Http.Listen\n\ts := &http.Server{\n\t\tAddr: addr,\n\t\tMaxHeaderBytes: 1 << 30,\n\t}\n\n\tlog.Println(\"http.Start ok, listening on\", addr)\n\tlog.Fatalln(s.ListenAndServe())\n}\n\nfunc RenderJson(w http.ResponseWriter, v interface{}) {\n\tbs, err := json.Marshal(v)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\tw.Write(bs)\n}\n\nfunc RenderDataJson(w http.ResponseWriter, data interface{}) {\n\tRenderJson(w, Dto{Msg: \"success\", Data: data})\n}\n\nfunc RenderMsgJson(w http.ResponseWriter, msg string) {\n\tRenderJson(w, map[string]string{\"msg\": msg})\n}\n\nfunc AutoRender(w http.ResponseWriter, data interface{}, err error) {\n\tif err != nil {\n\t\tRenderMsgJson(w, err.Error())\n\t\treturn\n\t}\n\tRenderDataJson(w, data)\n}\n\nfunc StdRender(w http.ResponseWriter, data interface{}, err error) {\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tRenderMsgJson(w, err.Error())\n\t\treturn\n\t}\n\tRenderJson(w, data)\n}\n<commit_msg>[OWL-262] update func InitDatabase()<commit_after>package http\n\nimport (\n\t\"strings\"\n\t\"github.com\/astaxie\/beego\/orm\"\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\n\t\"encoding\/json\"\n\t\"log\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\n\t\"github.com\/Cepave\/query\/g\"\n)\n\ntype Dto struct {\n\tMsg string `json:\"msg\"`\n\tData interface{} `json:\"data\"`\n}\n\nfunc InitDatabase() {\n\t\/\/ set default database\n\tconfig := g.Config()\n\torm.RegisterDataBase(\"default\", \"mysql\", config.Db.Addr, config.Db.Idle, config.Db.Max)\n\t\/\/ register model\n\torm.RegisterModel(new(Host), new(Grp), new(Grp_host), new(Grp_tpl), new(Tpl))\n\n\tstrConn := strings.Replace(config.Db.Addr, \"graph\", \"grafana\", 1)\n\torm.RegisterDataBase(\"grafana\", \"mysql\", strConn, config.Db.Idle, config.Db.Max)\n\torm.RegisterModel(new(Province), new(City), new(Idc))\n\n\tif config.Debug == true {\n\t\torm.Debug = true\n\t}\n}\n\nfunc Start() {\n\tif !g.Config().Http.Enable {\n\t\tlog.Println(\"http.Start warning, not enable\")\n\t\treturn\n\t}\n\n\t\/\/ config http routes\n\tconfigCommonRoutes()\n\tconfigProcHttpRoutes()\n\tconfigGraphRoutes()\n\tconfigApiRoutes()\n\tconfigGrafanaRoutes()\n\tconfigZabbixRoutes()\n\n\t\/\/ start mysql database\n\tInitDatabase()\n\n\t\/\/ start http server\n\taddr := g.Config().Http.Listen\n\ts := &http.Server{\n\t\tAddr: addr,\n\t\tMaxHeaderBytes: 1 << 30,\n\t}\n\n\tlog.Println(\"http.Start ok, listening on\", addr)\n\tlog.Fatalln(s.ListenAndServe())\n}\n\nfunc RenderJson(w http.ResponseWriter, v interface{}) {\n\tbs, err := json.Marshal(v)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\tw.Write(bs)\n}\n\nfunc RenderDataJson(w http.ResponseWriter, data interface{}) {\n\tRenderJson(w, Dto{Msg: \"success\", Data: data})\n}\n\nfunc RenderMsgJson(w http.ResponseWriter, msg string) {\n\tRenderJson(w, map[string]string{\"msg\": msg})\n}\n\nfunc AutoRender(w http.ResponseWriter, data interface{}, err error) {\n\tif err != nil {\n\t\tRenderMsgJson(w, err.Error())\n\t\treturn\n\t}\n\tRenderDataJson(w, data)\n}\n\nfunc StdRender(w http.ResponseWriter, data interface{}, err error) {\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tRenderMsgJson(w, err.Error())\n\t\treturn\n\t}\n\tRenderJson(w, data)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ File: .\/blockfreight\/bft\/bf_tx\/bf_tx.go\n\/\/ Summary: Application code for Blockfreight™ | The blockchain of global freight.\n\/\/ License: MIT License\n\/\/ Company: Blockfreight, Inc.\n\/\/ Author: Julian Nunez, Neil Tran, Julian Smith & contributors\n\/\/ Site: https:\/\/blockfreight.com\n\/\/ Support: <support@blockfreight.com>\n\n\/\/ Copyright 2017 Blockfreight, Inc.\n\n\/\/ Permission is hereby granted, free of charge, to any person obtaining\n\/\/ a copy of this software and associated documentation files (the \"Software\"),\n\/\/ to deal in the Software without restriction, including without limitation\n\/\/ the rights to use, copy, modify, merge, publish, distribute, sublicense,\n\/\/ and\/or sell copies of the Software, and to permit persons to whom the\n\/\/ Software is furnished to do so, subject to the following conditions:\n\n\/\/ The above copyright notice and this permission notice shall be\n\/\/ included in all copies or substantial portions of the Software.\n\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n\/\/ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n\/\/ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n\/\/ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\/\/ =================================================================================================================================================\n\/\/ =================================================================================================================================================\n\/\/\n\/\/ BBBBBBBBBBBb lll kkk ffff iii hhh ttt\n\/\/ BBBB``````BBBB lll kkk fff ``` hhh ttt\n\/\/ BBBB BBBB lll oooooo ccccccc kkk kkkk fffffff rrr rrr eeeee iii gggggg ggg hhh hhhhh tttttttt\n\/\/ BBBBBBBBBBBB lll ooo oooo ccc ccc kkk kkk fffffff rrrrrrrr eee eeee iii gggg ggggg hhhh hhhh tttttttt\n\/\/ BBBBBBBBBBBBBB lll ooo ooo ccc kkkkkkk fff rrrr eeeeeeeeeeeee iii gggg ggg hhh hhh ttt\n\/\/ BBBB BBB lll ooo ooo ccc kkkk kkkk fff rrr eeeeeeeeeeeee iii ggg ggg hhh hhh ttt\n\/\/ BBBB BBBB lll oooo oooo cccc ccc kkk kkkk fff rrr eee eee iii ggg gggg hhh hhh tttt ....\n\/\/ BBBBBBBBBBBBB lll oooooooo ccccccc kkk kkkk fff rrr eeeeeeeee iii gggggg ggg hhh hhh ttttt ....\n\/\/ ggg ggg\n\/\/ Blockfreight™ | The blockchain of global freight. ggggggggg\n\/\/\n\/\/ =================================================================================================================================================\n\/\/ =================================================================================================================================================\n\n\/\/ Package bf_tx is a package that defines the Blockfreight™ Transaction (BF_TX) transaction standard \n\/\/ and provides some useful functions to work with the BF_TX.\npackage bf_tx\n\nimport (\n \/\/ =======================\n \/\/ Golang Standard library\n \/\/ =======================\n \"crypto\/ecdsa\" \/\/ Implements the Elliptic Curve Digital Signature Algorithm, as defined in FIPS 186-3.\n \"encoding\/json\" \/\/ Implements encoding and decoding of JSON as defined in RFC 4627.\n\n \/\/ ======================\n \/\/ Blockfreight™ packages\n \/\/ ======================\n \"github.com\/blockfreight\/blockfreight-alpha\/blockfreight\/bft\/common\" \/\/ Implements common functions for Blockfreight™\n)\n\n\/\/ SetBF_TX receives the path of a JSON, reads it and returns the BF_TX structure with all attributes. \nfunc SetBF_TX(jsonpath string) BF_TX {\n var bf_tx BF_TX\n json.Unmarshal(common.ReadJSON(jsonpath), &bf_tx)\n return bf_tx\n}\n\n\/\/ BF_TXContent receives the BF_TX structure, applies it the json.Marshal procedure and return the content of the BF_TX JSON.\nfunc BF_TXContent(bf_tx BF_TX) string {\n jsonContent, _ := json.Marshal(bf_tx)\n return string(jsonContent)\n}\n\n\/\/ BF_TX structure respresents an logical abstraction of a Blockfreight™ Transaction.\ntype BF_TX struct {\n \/\/ =========================\n \/\/ Bill of Lading attributes\n \/\/ =========================\n Type string\n Properties Properties\n\n \/\/ ===================================\n \/\/ Blockfreight Transaction attributes\n \/\/ ===================================\n Id int\n PrivateKey ecdsa.PrivateKey\n Signhash []uint8\n Signature string\n Verified bool\n \n}\n\ntype Properties struct {\n Shipper Shipper\n Bol_Num BolNum\n Ref_Num RefNum\n Consignee Consignee\n Vessel Vessel\n Port_of_Loading PortLoading\n Port_of_Discharge PortDischarge\n Notify_Address NotifyAddress\n Desc_of_Goods DescGoods\n Gross_Weight GrossWeight\n Freight_Payable_Amt FreightPayableAmt\n Freight_Adv_Amt FreightAdvAmt\n General_Instructions GeneralInstructions\n Date_Shipped Date\n Issue_Details IssueDetails\n Num_Bol NumBol\n Master_Info MasterInfo\n Agent_for_Master AgentMaster\n Agent_for_Owner AgentOwner\n}\n\ntype Shipper struct {\n Type string\n}\n\ntype BolNum struct {\n Type int\n}\n\ntype RefNum struct {\n Type int\n}\n\ntype Consignee struct {\n Type string \/\/Null\n}\n\ntype Vessel struct {\n Type int\n}\n\ntype PortLoading struct {\n Type int\n}\n\ntype PortDischarge struct {\n Type int\n}\n\ntype NotifyAddress struct {\n Type string\n}\n\ntype DescGoods struct {\n Type string\n}\n\ntype GrossWeight struct {\n Type int\n}\n\ntype FreightPayableAmt struct {\n Type int\n}\n\ntype FreightAdvAmt struct {\n Type int\n}\n\ntype GeneralInstructions struct {\n Type string\n}\n\ntype Date struct {\n Type int\n Format string\n}\n\ntype IssueDetails struct {\n Type string\n Properties IssueDetailsProperties\n}\n\ntype IssueDetailsProperties struct {\n Place_of_Issue PlaceIssue\n Date_of_Issue Date\n}\n\ntype PlaceIssue struct {\n Type string\n}\n\ntype NumBol struct {\n Type int\n}\n\ntype MasterInfo struct {\n Type string\n Properties MasterInfoProperties\n}\n\ntype MasterInfoProperties struct {\n First_Name FirstName\n Last_Name LastName\n Sig Sig\n}\n\ntype AgentMaster struct {\n Type string\n Properties AgentMasterProperties\n}\n\ntype AgentMasterProperties struct {\n First_Name FirstName\n Last_Name LastName\n Sig Sig\n}\n\ntype AgentOwner struct {\n Type string\n Properties AgentOwnerProperties\n}\n\ntype AgentOwnerProperties struct {\n First_Name FirstName\n Last_Name LastName\n Sig Sig\n Conditions_for_Carriage ConditionsCarriage\n}\n\ntype FirstName struct {\n Type string\n}\n\ntype LastName struct {\n Type string\n}\n\ntype Sig struct {\n Type string\n}\n\ntype ConditionsCarriage struct {\n Type string\n}\n\n\/\/ =================================================\n\/\/ Blockfreight™ | The blockchain of global freight.\n\/\/ =================================================\n\n\/\/ BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n\/\/ BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n\/\/ BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBB BBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBB BBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBB BBBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBBB BBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBBBB BBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBBB BBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBB BBB BBBBB\n\/\/ BBBBBBB BBBB BBBBB\n\/\/ BBBBBBB BBBBBBB BBBBB\n\/\/ BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n\/\/ BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n\/\/ BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n\n\/\/ ==================================================\n\/\/ Blockfreight™ | The blockchain for global freight.\n\/\/ ==================================================\n<commit_msg>Add attribute Amendment to BF_TX structure<commit_after>\/\/ File: .\/blockfreight\/bft\/bf_tx\/bf_tx.go\n\/\/ Summary: Application code for Blockfreight™ | The blockchain of global freight.\n\/\/ License: MIT License\n\/\/ Company: Blockfreight, Inc.\n\/\/ Author: Julian Nunez, Neil Tran, Julian Smith & contributors\n\/\/ Site: https:\/\/blockfreight.com\n\/\/ Support: <support@blockfreight.com>\n\n\/\/ Copyright 2017 Blockfreight, Inc.\n\n\/\/ Permission is hereby granted, free of charge, to any person obtaining\n\/\/ a copy of this software and associated documentation files (the \"Software\"),\n\/\/ to deal in the Software without restriction, including without limitation\n\/\/ the rights to use, copy, modify, merge, publish, distribute, sublicense,\n\/\/ and\/or sell copies of the Software, and to permit persons to whom the\n\/\/ Software is furnished to do so, subject to the following conditions:\n\n\/\/ The above copyright notice and this permission notice shall be\n\/\/ included in all copies or substantial portions of the Software.\n\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n\/\/ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n\/\/ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n\/\/ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\/\/ =================================================================================================================================================\n\/\/ =================================================================================================================================================\n\/\/\n\/\/ BBBBBBBBBBBb lll kkk ffff iii hhh ttt\n\/\/ BBBB``````BBBB lll kkk fff ``` hhh ttt\n\/\/ BBBB BBBB lll oooooo ccccccc kkk kkkk fffffff rrr rrr eeeee iii gggggg ggg hhh hhhhh tttttttt\n\/\/ BBBBBBBBBBBB lll ooo oooo ccc ccc kkk kkk fffffff rrrrrrrr eee eeee iii gggg ggggg hhhh hhhh tttttttt\n\/\/ BBBBBBBBBBBBBB lll ooo ooo ccc kkkkkkk fff rrrr eeeeeeeeeeeee iii gggg ggg hhh hhh ttt\n\/\/ BBBB BBB lll ooo ooo ccc kkkk kkkk fff rrr eeeeeeeeeeeee iii ggg ggg hhh hhh ttt\n\/\/ BBBB BBBB lll oooo oooo cccc ccc kkk kkkk fff rrr eee eee iii ggg gggg hhh hhh tttt ....\n\/\/ BBBBBBBBBBBBB lll oooooooo ccccccc kkk kkkk fff rrr eeeeeeeee iii gggggg ggg hhh hhh ttttt ....\n\/\/ ggg ggg\n\/\/ Blockfreight™ | The blockchain of global freight. ggggggggg\n\/\/\n\/\/ =================================================================================================================================================\n\/\/ =================================================================================================================================================\n\n\/\/ Package bf_tx is a package that defines the Blockfreight™ Transaction (BF_TX) transaction standard \n\/\/ and provides some useful functions to work with the BF_TX.\npackage bf_tx\n\nimport (\n \/\/ =======================\n \/\/ Golang Standard library\n \/\/ =======================\n \"crypto\/ecdsa\" \/\/ Implements the Elliptic Curve Digital Signature Algorithm, as defined in FIPS 186-3.\n \"encoding\/json\" \/\/ Implements encoding and decoding of JSON as defined in RFC 4627.\n\n \/\/ ====================\n \/\/ Third-party packages\n \/\/ ====================\n \"github.com\/davecgh\/go-spew\/spew\" \/\/ Implements a deep pretty printer for Go data structures to aid in debugging.\n\n \/\/ ======================\n \/\/ Blockfreight™ packages\n \/\/ ======================\n \"github.com\/blockfreight\/blockfreight-alpha\/blockfreight\/bft\/common\" \/\/ Implements common functions for Blockfreight™\n)\n\n\/\/ SetBF_TX receives the path of a JSON, reads it and returns the BF_TX structure with all attributes. \nfunc SetBF_TX(jsonpath string) BF_TX {\n var bf_tx BF_TX\n json.Unmarshal(common.ReadJSON(jsonpath), &bf_tx)\n return bf_tx\n}\n\n\/\/ BF_TXContent receives the BF_TX structure, applies it the json.Marshal procedure and return the content of the BF_TX JSON.\nfunc BF_TXContent(bf_tx BF_TX) string {\n jsonContent, _ := json.Marshal(bf_tx)\n return string(jsonContent)\n}\n\n\/\/ PrintBF_TX receives a BF_TX and prints it clearly.\nfunc PrintBF_TX(bf_tx BF_TX){\n spew.Dump(bf_tx)\n}\n\n\/\/ BF_TX structure respresents an logical abstraction of a Blockfreight™ Transaction.\ntype BF_TX struct {\n \/\/ =========================\n \/\/ Bill of Lading attributes\n \/\/ =========================\n Type string\n Properties Properties\n\n \/\/ ===================================\n \/\/ Blockfreight Transaction attributes\n \/\/ ===================================\n Id int\n PrivateKey ecdsa.PrivateKey\n Signhash []uint8\n Signature string\n Verified bool\n Transmitted bool\n Amendment int\n}\n\ntype Properties struct {\n Shipper Shipper\n Bol_Num BolNum\n Ref_Num RefNum\n Consignee Consignee\n Vessel Vessel\n Port_of_Loading PortLoading\n Port_of_Discharge PortDischarge\n Notify_Address NotifyAddress\n Desc_of_Goods DescGoods\n Gross_Weight GrossWeight\n Freight_Payable_Amt FreightPayableAmt\n Freight_Adv_Amt FreightAdvAmt\n General_Instructions GeneralInstructions\n Date_Shipped Date\n Issue_Details IssueDetails\n Num_Bol NumBol\n Master_Info MasterInfo\n Agent_for_Master AgentMaster\n Agent_for_Owner AgentOwner\n}\n\ntype Shipper struct {\n Type string\n}\n\ntype BolNum struct {\n Type int\n}\n\ntype RefNum struct {\n Type int\n}\n\ntype Consignee struct {\n Type string \/\/Null\n}\n\ntype Vessel struct {\n Type int\n}\n\ntype PortLoading struct {\n Type int\n}\n\ntype PortDischarge struct {\n Type int\n}\n\ntype NotifyAddress struct {\n Type string\n}\n\ntype DescGoods struct {\n Type string\n}\n\ntype GrossWeight struct {\n Type int\n}\n\ntype FreightPayableAmt struct {\n Type int\n}\n\ntype FreightAdvAmt struct {\n Type int\n}\n\ntype GeneralInstructions struct {\n Type string\n}\n\ntype Date struct {\n Type int\n Format string\n}\n\ntype IssueDetails struct {\n Type string\n Properties IssueDetailsProperties\n}\n\ntype IssueDetailsProperties struct {\n Place_of_Issue PlaceIssue\n Date_of_Issue Date\n}\n\ntype PlaceIssue struct {\n Type string\n}\n\ntype NumBol struct {\n Type int\n}\n\ntype MasterInfo struct {\n Type string\n Properties MasterInfoProperties\n}\n\ntype MasterInfoProperties struct {\n First_Name FirstName\n Last_Name LastName\n Sig Sig\n}\n\ntype AgentMaster struct {\n Type string\n Properties AgentMasterProperties\n}\n\ntype AgentMasterProperties struct {\n First_Name FirstName\n Last_Name LastName\n Sig Sig\n}\n\ntype AgentOwner struct {\n Type string\n Properties AgentOwnerProperties\n}\n\ntype AgentOwnerProperties struct {\n First_Name FirstName\n Last_Name LastName\n Sig Sig\n Conditions_for_Carriage ConditionsCarriage\n}\n\ntype FirstName struct {\n Type string\n}\n\ntype LastName struct {\n Type string\n}\n\ntype Sig struct {\n Type string\n}\n\ntype ConditionsCarriage struct {\n Type string\n}\n\n\/\/ =================================================\n\/\/ Blockfreight™ | The blockchain of global freight.\n\/\/ =================================================\n\n\/\/ BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n\/\/ BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n\/\/ BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBB BBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBB BBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBB BBBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBBB BBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBBBB BBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBBB BBBBBBBBBBBBBB\n\/\/ BBBBBBB BBBBBBBBB BBB BBBBB\n\/\/ BBBBBBB BBBB BBBBB\n\/\/ BBBBBBB BBBBBBB BBBBB\n\/\/ BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n\/\/ BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n\/\/ BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n\n\/\/ ==================================================\n\/\/ Blockfreight™ | The blockchain for global freight.\n\/\/ ==================================================\n<|endoftext|>"} {"text":"<commit_before>package tmp_manager\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/flant\/werf\/pkg\/dappdeps\"\n\t\"github.com\/flant\/werf\/pkg\/docker\"\n\t\"github.com\/flant\/werf\/pkg\/lock\"\n\t\"github.com\/flant\/werf\/pkg\/logger\"\n\t\"github.com\/flant\/werf\/pkg\/werf\"\n\t\"github.com\/otiai10\/copy\"\n)\n\nconst (\n\tprojectsDir = \"projects\"\n\tdockerConfigsDir = \"docker_configs\"\n)\n\nfunc GetCreatedTmpDirs() string {\n\treturn filepath.Join(werf.GetServiceDir(), \"tmp\", \"created\")\n}\n\nfunc GetReleasedTmpDirs() string {\n\treturn filepath.Join(werf.GetServiceDir(), \"tmp\", \"released\")\n}\n\nfunc registerCreatedDir(newDir, createdDirs string) error {\n\tif err := os.MkdirAll(createdDirs, os.ModePerm); err != nil {\n\t\treturn fmt.Errorf(\"unable to create dir %s: %s\", createdDirs, err)\n\t}\n\n\tcreatedDir := filepath.Join(createdDirs, filepath.Base(newDir))\n\tif err := os.Symlink(newDir, createdDir); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc releaseDir(dir, createdDirs, releasedDirs string) error {\n\tif err := os.MkdirAll(releasedDirs, os.ModePerm); err != nil {\n\t\treturn fmt.Errorf(\"unable to create dir %s: %s\", releaseDir, err)\n\t}\n\n\treleasedDir := filepath.Join(releasedDirs, filepath.Base(dir))\n\tif err := os.Symlink(dir, releasedDir); err != nil {\n\t\treturn err\n\t}\n\n\tcreatedDir := filepath.Join(createdDirs, filepath.Base(dir))\n\tif err := os.Remove(createdDir); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc checkShouldRunGC() (bool, error) {\n\tvar releasedProjectsDirs []os.FileInfo\n\tvar releasedDockerConfigsDirs []os.FileInfo\n\n\treleasedProjectsDir := filepath.Join(GetReleasedTmpDirs(), projectsDir)\n\tif _, err := os.Stat(releasedProjectsDir); !os.IsNotExist(err) {\n\t\tvar err error\n\t\treleasedProjectsDirs, err = ioutil.ReadDir(releasedProjectsDir)\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"unable to list released projects tmp dirs in %s: %s\", releasedProjectsDir, err)\n\t\t}\n\t}\n\n\treleasedDockerConfigsDir := filepath.Join(GetReleasedTmpDirs(), projectsDir)\n\tif _, err := os.Stat(releasedProjectsDir); !os.IsNotExist(err) {\n\t\tvar err error\n\t\treleasedDockerConfigsDirs, err = ioutil.ReadDir(releasedDockerConfigsDir)\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"unable to list released docker configs tmp dirs in %s: %s\", releasedDockerConfigsDir, err)\n\t\t}\n\t}\n\n\tif len(releasedProjectsDirs) > 50 || len(releasedDockerConfigsDirs) > 50 {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}\n\nfunc newTmpDir(prefix string) (string, error) {\n\tnewDir, err := ioutil.TempDir(werf.GetTmpDir(), prefix)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif runtime.GOOS == \"darwin\" {\n\t\tdir, err := filepath.EvalSymlinks(newDir)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"eval symlink %s failed: %s\", newDir, err)\n\t\t}\n\t\tnewDir = dir\n\t}\n\n\treturn newDir, nil\n}\n\nfunc CreateProjectDir() (string, error) {\n\tnewDir, err := newTmpDir(\"werf-project-data-\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := registerCreatedDir(newDir, filepath.Join(GetCreatedTmpDirs(), projectsDir)); err != nil {\n\t\tos.RemoveAll(newDir)\n\t\treturn \"\", err\n\t}\n\n\tshouldRunGC, err := checkShouldRunGC()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif shouldRunGC {\n\t\terr := lock.WithLock(\"gc\", lock.LockOptions{}, GC)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"GC failed: %s\", err)\n\t\t}\n\t}\n\n\treturn newDir, nil\n}\n\nfunc ReleaseProjectDir(dir string) error {\n\treturn releaseDir(dir, filepath.Join(GetCreatedTmpDirs(), projectsDir), filepath.Join(GetReleasedTmpDirs(), projectsDir))\n}\n\nfunc CreateDockerConfigDir(fromDockerConfig string) (string, error) {\n\tnewDir, err := newTmpDir(\"werf-docker-config-\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif _, err := os.Stat(fromDockerConfig); !os.IsNotExist(err) {\n\t\terr := copy.Copy(fromDockerConfig, newDir)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"unable to copy %s to %s: %s\", fromDockerConfig, newDir, err)\n\t\t}\n\t}\n\n\tif err := registerCreatedDir(newDir, filepath.Join(GetCreatedTmpDirs(), dockerConfigsDir)); err != nil {\n\t\tos.RemoveAll(newDir)\n\t\treturn \"\", err\n\t}\n\n\tshouldRunGC, err := checkShouldRunGC()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif shouldRunGC {\n\t\terr := lock.WithLock(\"gc\", lock.LockOptions{}, GC)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"GC failed: %s\", err)\n\t\t}\n\t}\n\n\treturn newDir, nil\n}\n\nfunc ReleaseDockerConfigDir(dir string) error {\n\treturn releaseDir(dir, filepath.Join(GetCreatedTmpDirs(), dockerConfigsDir), filepath.Join(GetReleasedTmpDirs(), dockerConfigsDir))\n}\n\ntype DirDesc struct {\n\tFileInfo os.FileInfo\n\tLinkPath string\n}\n\nfunc getDirsDescs(dir string) ([]*DirDesc, error) {\n\tvar res []*DirDesc\n\n\tif _, err := os.Stat(dir); !os.IsNotExist(err) {\n\t\tinfos, err := ioutil.ReadDir(dir)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to list dirs in %s: %s\", dir, err)\n\t\t}\n\n\t\tfor _, info := range infos {\n\t\t\tres = append(res, &DirDesc{FileInfo: info, LinkPath: filepath.Join(dir, info.Name())})\n\t\t}\n\t}\n\n\treturn res, nil\n}\n\nfunc getReleasedDirsToRemove(releasedDirs []*DirDesc) ([]string, error) {\n\tvar res []string\n\n\tfor _, desc := range releasedDirs {\n\t\torigDir, err := os.Readlink(desc.LinkPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to read link %s: %s\", desc.LinkPath, err)\n\t\t}\n\t\tres = append(res, origDir)\n\t}\n\n\treturn res, nil\n}\n\nfunc getCreatedDirsToRemove(createdDirs []*DirDesc) ([]string, error) {\n\tvar res []string\n\n\tnow := time.Now()\n\tfor _, desc := range createdDirs {\n\t\tif now.Sub(desc.FileInfo.ModTime()) < 2*time.Hour {\n\t\t\tcontinue\n\t\t}\n\n\t\torigDir, err := os.Readlink(desc.LinkPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to read link %s: %s\", desc.LinkPath, err)\n\t\t}\n\n\t\tres = append(res, origDir)\n\t}\n\n\treturn res, nil\n}\n\nfunc GC() error {\n\tlogger.LogInfoF(\"Running GC\\n\")\n\n\treleasedProjectsDescs, err := getDirsDescs(filepath.Join(GetReleasedTmpDirs(), projectsDir))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get released tmp projects dirs: %s\", err)\n\t}\n\n\tcreatedProjectsDescs, err := getDirsDescs(filepath.Join(GetCreatedTmpDirs(), projectsDir))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get created tmp projects dirs: %s\", err)\n\t}\n\n\treleasedDockerConfigsDescs, err := getDirsDescs(filepath.Join(GetReleasedTmpDirs(), dockerConfigsDir))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get released tmp docker configs dirs: %s\", err)\n\t}\n\n\tcreatedDockerConfigsDescs, err := getDirsDescs(filepath.Join(GetCreatedTmpDirs(), dockerConfigsDir))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get created tmp docker configs dirs: %s\", err)\n\t}\n\n\tvar dirs []string\n\n\tprojectsToRemove := []string{}\n\tdockerConfigsToRemove := []string{}\n\n\tdirs, err = getReleasedDirsToRemove(releasedProjectsDescs)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get released tmp projects dirs to remove: %s\", err)\n\t}\n\tprojectsToRemove = append(projectsToRemove, dirs...)\n\n\tdirs, err = getCreatedDirsToRemove(createdProjectsDescs)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get created tmp projects dirs to remove: %s\", err)\n\t}\n\tprojectsToRemove = append(projectsToRemove, dirs...)\n\n\tdirs, err = getReleasedDirsToRemove(releasedDockerConfigsDescs)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get released tmp docker configs dirs to remove: %s\", err)\n\t}\n\tdockerConfigsToRemove = append(dockerConfigsToRemove, dirs...)\n\n\tdirs, err = getCreatedDirsToRemove(createdDockerConfigsDescs)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get created tmp docker configs dirs to remove: %s\", err)\n\t}\n\tdockerConfigsToRemove = append(dockerConfigsToRemove, dirs...)\n\n\tvar removeErrors []error\n\n\tif len(projectsToRemove) > 0 {\n\t\tif err := removeProjectDirs(projectsToRemove); err != nil {\n\t\t\tremoveErrors = append(removeErrors, fmt.Errorf(\"unable to remove tmp projects dirs %s: %s\", strings.Join(projectsToRemove, \", \"), err))\n\t\t}\n\t}\n\n\tfor _, dir := range dockerConfigsToRemove {\n\t\terr := os.RemoveAll(dir)\n\t\tif err != nil {\n\t\t\tremoveErrors = append(removeErrors, fmt.Errorf(\"unable to remove %s: %s\", dir, err))\n\t\t}\n\t}\n\n\tfor _, descs := range [][]*DirDesc{\n\t\treleasedProjectsDescs, releasedDockerConfigsDescs,\n\t\tcreatedProjectsDescs, createdDockerConfigsDescs,\n\t} {\n\t\tfor _, desc := range descs {\n\t\t\tif err := os.Remove(desc.LinkPath); err != nil {\n\t\t\t\tremoveErrors = append(removeErrors, fmt.Errorf(\"unable to remove %s: %s\", desc.LinkPath, err))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc Purge() error {\n\ttmpFiles, err := ioutil.ReadDir(werf.GetTmpDir())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to list tmp files in %s: %s\", werf.GetTmpDir(), err)\n\t}\n\n\tfilesToRemove := []string{}\n\tfor _, finfo := range tmpFiles {\n\t\tif strings.HasPrefix(finfo.Name(), \"werf\") {\n\t\t\tfilesToRemove = append(filesToRemove, filepath.Join(werf.GetTmpDir(), finfo.Name()))\n\t\t}\n\t}\n\n\tfor _, file := range filesToRemove {\n\t\terr := os.RemoveAll(file)\n\t\tif err != nil {\n\t\t\tlogger.LogErrorF(\"WARNING: unable to remove %s: %s\\n\", file, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc removeProjectDirs(dirs []string) error {\n\ttoolchainContainerName, err := dappdeps.ToolchainContainer()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs := []string{\n\t\t\"--rm\",\n\t\t\"--volumes-from\", toolchainContainerName,\n\t\t\"--volume\", fmt.Sprintf(\"%s:%s\", werf.GetTmpDir(), werf.GetTmpDir()),\n\t\tdappdeps.BaseImageName(),\n\t\tdappdeps.RmBinPath(), \"-rf\",\n\t}\n\n\targs = append(args, dirs...)\n\n\treturn docker.CliRun(args...)\n}\n<commit_msg>[way2alpha13] Docker configs gc and permissions fixed<commit_after>package tmp_manager\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/flant\/werf\/pkg\/dappdeps\"\n\t\"github.com\/flant\/werf\/pkg\/docker\"\n\t\"github.com\/flant\/werf\/pkg\/lock\"\n\t\"github.com\/flant\/werf\/pkg\/logger\"\n\t\"github.com\/flant\/werf\/pkg\/werf\"\n\t\"github.com\/otiai10\/copy\"\n)\n\nconst (\n\tprojectsDir = \"projects\"\n\tdockerConfigsDir = \"docker_configs\"\n)\n\nfunc GetCreatedTmpDirs() string {\n\treturn filepath.Join(werf.GetServiceDir(), \"tmp\", \"created\")\n}\n\nfunc GetReleasedTmpDirs() string {\n\treturn filepath.Join(werf.GetServiceDir(), \"tmp\", \"released\")\n}\n\nfunc registerCreatedDir(newDir, createdDirs string) error {\n\tif err := os.MkdirAll(createdDirs, os.ModePerm); err != nil {\n\t\treturn fmt.Errorf(\"unable to create dir %s: %s\", createdDirs, err)\n\t}\n\n\tcreatedDir := filepath.Join(createdDirs, filepath.Base(newDir))\n\tif err := os.Symlink(newDir, createdDir); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc releaseDir(dir, createdDirs, releasedDirs string) error {\n\tif err := os.MkdirAll(releasedDirs, os.ModePerm); err != nil {\n\t\treturn fmt.Errorf(\"unable to create dir %s: %s\", releaseDir, err)\n\t}\n\n\treleasedDir := filepath.Join(releasedDirs, filepath.Base(dir))\n\tif err := os.Symlink(dir, releasedDir); err != nil {\n\t\treturn err\n\t}\n\n\tcreatedDir := filepath.Join(createdDirs, filepath.Base(dir))\n\tif err := os.Remove(createdDir); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc checkShouldRunGC() (bool, error) {\n\treleasedProjectsDir := filepath.Join(GetReleasedTmpDirs(), projectsDir)\n\tif _, err := os.Stat(releasedProjectsDir); !os.IsNotExist(err) {\n\t\tvar err error\n\t\treleasedProjectsDirs, err := ioutil.ReadDir(releasedProjectsDir)\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"unable to list released projects tmp dirs in %s: %s\", releasedProjectsDir, err)\n\t\t}\n\n\t\tif len(releasedProjectsDirs) > 50 {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\tcreatedDockerConfigsDir := filepath.Join(GetCreatedTmpDirs(), dockerConfigsDir)\n\tif _, err := os.Stat(createdDockerConfigsDir); !os.IsNotExist(err) {\n\t\tvar err error\n\t\tcreatedDockerConfigsDirs, err := ioutil.ReadDir(createdDockerConfigsDir)\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"unable to list released docker configs tmp dirs in %s: %s\", createdDockerConfigsDir, err)\n\t\t}\n\n\t\tnow := time.Now()\n\t\tfor _, info := range createdDockerConfigsDirs {\n\t\t\tif now.Sub(info.ModTime()) > 24*time.Hour {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false, nil\n}\n\nfunc newTmpDir(prefix string) (string, error) {\n\tnewDir, err := ioutil.TempDir(werf.GetTmpDir(), prefix)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif runtime.GOOS == \"darwin\" {\n\t\tdir, err := filepath.EvalSymlinks(newDir)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"eval symlink %s failed: %s\", newDir, err)\n\t\t}\n\t\tnewDir = dir\n\t}\n\n\treturn newDir, nil\n}\n\nfunc CreateProjectDir() (string, error) {\n\tnewDir, err := newTmpDir(\"werf-project-data-\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := registerCreatedDir(newDir, filepath.Join(GetCreatedTmpDirs(), projectsDir)); err != nil {\n\t\tos.RemoveAll(newDir)\n\t\treturn \"\", err\n\t}\n\n\tshouldRunGC, err := checkShouldRunGC()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif shouldRunGC {\n\t\terr := lock.WithLock(\"gc\", lock.LockOptions{}, GC)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"GC failed: %s\", err)\n\t\t}\n\t}\n\n\treturn newDir, nil\n}\n\nfunc ReleaseProjectDir(dir string) error {\n\treturn releaseDir(dir, filepath.Join(GetCreatedTmpDirs(), projectsDir), filepath.Join(GetReleasedTmpDirs(), projectsDir))\n}\n\nfunc CreateDockerConfigDir(fromDockerConfig string) (string, error) {\n\tnewDir, err := newTmpDir(\"werf-docker-config-\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := os.Chmod(newDir, 0700); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif _, err := os.Stat(fromDockerConfig); !os.IsNotExist(err) {\n\t\terr := copy.Copy(fromDockerConfig, newDir)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"unable to copy %s to %s: %s\", fromDockerConfig, newDir, err)\n\t\t}\n\t}\n\n\tif err := registerCreatedDir(newDir, filepath.Join(GetCreatedTmpDirs(), dockerConfigsDir)); err != nil {\n\t\tos.RemoveAll(newDir)\n\t\treturn \"\", err\n\t}\n\n\tshouldRunGC, err := checkShouldRunGC()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif shouldRunGC {\n\t\terr := lock.WithLock(\"gc\", lock.LockOptions{}, GC)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"GC failed: %s\", err)\n\t\t}\n\t}\n\n\treturn newDir, nil\n}\n\ntype DirDesc struct {\n\tFileInfo os.FileInfo\n\tLinkPath string\n}\n\nfunc getDirsDescs(dir string) ([]*DirDesc, error) {\n\tvar res []*DirDesc\n\n\tif _, err := os.Stat(dir); !os.IsNotExist(err) {\n\t\tinfos, err := ioutil.ReadDir(dir)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to list dirs in %s: %s\", dir, err)\n\t\t}\n\n\t\tfor _, info := range infos {\n\t\t\tres = append(res, &DirDesc{FileInfo: info, LinkPath: filepath.Join(dir, info.Name())})\n\t\t}\n\t}\n\n\treturn res, nil\n}\n\nfunc getReleasedDirsToRemove(releasedDirs []*DirDesc) ([]string, error) {\n\tvar res []string\n\n\tfor _, desc := range releasedDirs {\n\t\torigDir, err := os.Readlink(desc.LinkPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to read link %s: %s\", desc.LinkPath, err)\n\t\t}\n\t\tres = append(res, origDir, desc.LinkPath)\n\t}\n\n\treturn res, nil\n}\n\nfunc getCreatedDirsToRemove(createdDirs []*DirDesc) ([]string, error) {\n\tvar res []string\n\n\tnow := time.Now()\n\tfor _, desc := range createdDirs {\n\t\tif now.Sub(desc.FileInfo.ModTime()) < 2*time.Hour {\n\t\t\tcontinue\n\t\t}\n\n\t\torigDir, err := os.Readlink(desc.LinkPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to read link %s: %s\", desc.LinkPath, err)\n\t\t}\n\n\t\tres = append(res, origDir, desc.LinkPath)\n\t}\n\n\treturn res, nil\n}\n\nfunc GC() error {\n\tlogger.LogInfoF(\"Running GC\\n\")\n\n\treleasedProjectsDescs, err := getDirsDescs(filepath.Join(GetReleasedTmpDirs(), projectsDir))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get released tmp projects dirs: %s\", err)\n\t}\n\n\tcreatedProjectsDescs, err := getDirsDescs(filepath.Join(GetCreatedTmpDirs(), projectsDir))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get created tmp projects dirs: %s\", err)\n\t}\n\n\tcreatedDockerConfigsDescs, err := getDirsDescs(filepath.Join(GetCreatedTmpDirs(), dockerConfigsDir))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get created tmp docker configs dirs: %s\", err)\n\t}\n\n\tvar dirs []string\n\n\tprojectsToRemove := []string{}\n\tdockerConfigsToRemove := []string{}\n\n\tdirs, err = getReleasedDirsToRemove(releasedProjectsDescs)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get released tmp projects dirs to remove: %s\", err)\n\t}\n\tprojectsToRemove = append(projectsToRemove, dirs...)\n\n\tdirs, err = getCreatedDirsToRemove(createdProjectsDescs)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get created tmp projects dirs to remove: %s\", err)\n\t}\n\tprojectsToRemove = append(projectsToRemove, dirs...)\n\n\tdirs, err = getCreatedDirsToRemove(createdDockerConfigsDescs)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get created tmp docker configs dirs to remove: %s\", err)\n\t}\n\tdockerConfigsToRemove = append(dockerConfigsToRemove, dirs...)\n\n\tvar removeErrors []error\n\n\tif len(projectsToRemove) > 0 {\n\t\tif err := removeProjectDirs(projectsToRemove); err != nil {\n\t\t\tremoveErrors = append(removeErrors, fmt.Errorf(\"unable to remove tmp projects dirs %s: %s\", strings.Join(projectsToRemove, \", \"), err))\n\t\t}\n\t}\n\n\tfor _, dir := range dockerConfigsToRemove {\n\t\terr := os.RemoveAll(dir)\n\t\tif err != nil {\n\t\t\tremoveErrors = append(removeErrors, fmt.Errorf(\"unable to remove %s: %s\", dir, err))\n\t\t}\n\t}\n\n\tif len(removeErrors) > 0 {\n\t\tmsg := \"\"\n\t\tfor _, err := range removeErrors {\n\t\t\tmsg += fmt.Sprintf(\"%s\\n\", err)\n\t\t}\n\t\treturn fmt.Errorf(\"%s\", msg)\n\t}\n\n\treturn nil\n}\n\nfunc Purge() error {\n\ttmpFiles, err := ioutil.ReadDir(werf.GetTmpDir())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to list tmp files in %s: %s\", werf.GetTmpDir(), err)\n\t}\n\n\tfilesToRemove := []string{}\n\tfor _, finfo := range tmpFiles {\n\t\tif strings.HasPrefix(finfo.Name(), \"werf\") {\n\t\t\tfilesToRemove = append(filesToRemove, filepath.Join(werf.GetTmpDir(), finfo.Name()))\n\t\t}\n\t}\n\n\tfor _, file := range filesToRemove {\n\t\terr := os.RemoveAll(file)\n\t\tif err != nil {\n\t\t\tlogger.LogErrorF(\"WARNING: unable to remove %s: %s\\n\", file, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc removeProjectDirs(dirs []string) error {\n\ttoolchainContainerName, err := dappdeps.ToolchainContainer()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs := []string{\n\t\t\"--rm\",\n\t\t\"--volumes-from\", toolchainContainerName,\n\t\t\"--volume\", fmt.Sprintf(\"%s:%s\", werf.GetTmpDir(), werf.GetTmpDir()),\n\t\tdappdeps.BaseImageName(),\n\t\tdappdeps.RmBinPath(), \"-rf\",\n\t}\n\n\targs = append(args, dirs...)\n\n\treturn docker.CliRun(args...)\n}\n<|endoftext|>"} {"text":"<commit_before>package input\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"strconv\"\n\t\"sync\"\n)\n\nconst (\n\tLenBuffEnd = \":\"\n\tValBuffEnd = \";\"\n\tNoResult = false\n)\n\nvar (\n\terr error\n\tmutex *sync.Mutex = &sync.Mutex{}\n\tbrokenErr = errors.New(\"broken\")\n\tlenIncErr = errors.New(\"length-buffer-incomplete\")\n\tlenInvErr = errors.New(\"length-buffer-invalid-byte\")\n\tlenConvErr = errors.New(\"length-buffer-conversion-error\")\n\tvalIncErr = errors.New(\"value-buffer-incomplete\")\n)\n\n\/\/ A NetstrDelimiter detects when message lines start.\ntype NetstrDelimiter struct {\n\tResult string\n\tlenBuff bytes.Buffer\n\tvalBuff bytes.Buffer\n\tvalBuffLen int\n\tvalBuffMode bool\n\tignoreMode bool\n\tbrokenMode bool\n}\n\n\/\/ NewNetstrDelimiter returns an initialized NetstrDelimiter.\nfunc NewNetstrDelimiter() *NetstrDelimiter {\n\treturn &NetstrDelimiter{\n\t\tlenBuff: *bytes.NewBuffer([]byte{}),\n\t\tvalBuff: *bytes.NewBuffer([]byte{}),\n\t}\n}\n\n\/\/ Push the given byte into a buffer, return when a new result is available,\n\/\/ as well as the first occurring error (if any occurred).\nfunc (d *NetstrDelimiter) Push(b byte) (bool, error) {\n\tif d.brokenMode {\n\t\treturn NoResult, errors.New(\"broken\")\n\t}\n\treturn d.processByte(b)\n}\n\n\/\/ Reset the NetstrDelimiter instance to its initial state.\nfunc (d *NetstrDelimiter) Reset() {\n\tmutex.Lock()\n\td.useLenBuff()\n\tmutex.Unlock()\n}\n\n\/\/ processByte checks if a byte must be processed as \"length byte\" or as \"value byte\".\nfunc (d *NetstrDelimiter) processByte(b byte) (bool, error) {\n\tif d.valBuffMode {\n\t\treturn d.processValByte(b)\n\t}\n\treturn d.processLenByte(b)\n}\n\n\/\/ processLenBytes writes the passed byte to the \"length buffer\",\n\/\/ unless the passed byte is the end of the \"length buffer\".\nfunc (d *NetstrDelimiter) processLenByte(b byte) (bool, error) {\n\tif b == LenBuffEnd[0] {\n\t\treturn NoResult, d.useValBuff()\n\t}\n\tif d.checkLenByte(b) {\n\t\tif err = d.lenBuff.WriteByte(b); err != nil {\n\t\t\td.brokenMode = true\n\t\t\treturn NoResult, lenIncErr\n\t\t}\n\t\treturn NoResult, nil\n\t}\n\td.brokenMode = true\n\treturn NoResult, lenInvErr\n}\n\n\/\/ checkLenByte checks that the current byte is a digit.\nfunc (d *NetstrDelimiter) checkLenByte(b byte) bool {\n\tfor i := 0; i < 10; i++ {\n\t\tif strconv.Itoa(i)[0] == b {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ processValByte writess the passed byte to the \"value buffer\",\n\/\/ unless the \"value buffer length\" is equal to 0.\nfunc (d *NetstrDelimiter) processValByte(b byte) (bool, error) {\n\tif d.valBuffLen == 0 {\n\t\td.useLenBuff()\n\t\treturn true, nil\n\t}\n\td.valBuffLen--\n\tif d.ignoreMode {\n\t\treturn NoResult, nil\n\t}\n\t\/\/ If an error occurs, while writing to the buffer,\n\t\/\/ the current \"value buffer\" gets ignored.\n\tif err = d.valBuff.WriteByte(b); err != nil {\n\t\td.ignoreMode = true\n\t\treturn NoResult, valIncErr\n\t}\n\treturn NoResult, nil\n}\n\n\/\/ useLenBuff overwrites the old result and resets values.\nfunc (d *NetstrDelimiter) useLenBuff() {\n\tif d.ignoreMode {\n\t\td.Result = \"\"\n\t\td.ignoreMode = false\n\t} else {\n\t\td.Result = d.valBuff.String()\n\t}\n\td.valBuff.Reset()\n\td.valBuffMode = false\n}\n\n\/\/ useValBuff converts the \"length buffer\" value to an integer,\n\/\/ representing the \"value buffer length\" and resets values.\nfunc (d *NetstrDelimiter) useValBuff() error {\n\tif d.valBuffLen, err = strconv.Atoi(d.lenBuff.String()); err != nil {\n\t\td.brokenMode = true\n\t\treturn lenConvErr\n\t}\n\td.lenBuff.Reset()\n\td.valBuffMode = true\n\treturn nil\n}\n<commit_msg>Better error naming<commit_after>package input\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"strconv\"\n\t\"sync\"\n)\n\nconst (\n\tLenBuffEnd = \":\"\n\tValBuffEnd = \";\"\n\tNoResult = false\n)\n\nvar (\n\terr error\n\tmutex *sync.Mutex = &sync.Mutex{}\n\terrBroken = errors.New(\"broken\")\n\terrLenInc = errors.New(\"length-buffer-incomplete\")\n\terrLenInv = errors.New(\"length-buffer-invalid-byte\")\n\terrLenConv = errors.New(\"length-buffer-conversion-error\")\n\terrValInc = errors.New(\"value-buffer-incomplete\")\n)\n\n\/\/ A NetstrDelimiter detects when message lines start.\ntype NetstrDelimiter struct {\n\tResult string\n\tlenBuff bytes.Buffer\n\tvalBuff bytes.Buffer\n\tvalBuffLen int\n\tvalBuffMode bool\n\tignoreMode bool\n\tbrokenMode bool\n}\n\n\/\/ NewNetstrDelimiter returns an initialized NetstrDelimiter.\nfunc NewNetstrDelimiter() *NetstrDelimiter {\n\treturn &NetstrDelimiter{\n\t\tlenBuff: *bytes.NewBuffer([]byte{}),\n\t\tvalBuff: *bytes.NewBuffer([]byte{}),\n\t}\n}\n\n\/\/ Push the given byte into a buffer, return when a new result is available,\n\/\/ as well as the first occurring error (if any occurred).\nfunc (d *NetstrDelimiter) Push(b byte) (bool, error) {\n\tif d.brokenMode {\n\t\treturn NoResult, errors.New(\"broken\")\n\t}\n\treturn d.processByte(b)\n}\n\n\/\/ Reset the NetstrDelimiter instance to its initial state.\nfunc (d *NetstrDelimiter) Reset() {\n\tmutex.Lock()\n\td.useLenBuff()\n\tmutex.Unlock()\n}\n\n\/\/ processByte checks if a byte must be processed as \"length byte\" or as \"value byte\".\nfunc (d *NetstrDelimiter) processByte(b byte) (bool, error) {\n\tif d.valBuffMode {\n\t\treturn d.processValByte(b)\n\t}\n\treturn d.processLenByte(b)\n}\n\n\/\/ processLenBytes writes the passed byte to the \"length buffer\",\n\/\/ unless the passed byte is the end of the \"length buffer\".\nfunc (d *NetstrDelimiter) processLenByte(b byte) (bool, error) {\n\tif b == LenBuffEnd[0] {\n\t\treturn NoResult, d.useValBuff()\n\t}\n\tif d.checkLenByte(b) {\n\t\tif err = d.lenBuff.WriteByte(b); err != nil {\n\t\t\td.brokenMode = true\n\t\t\treturn NoResult, errLenInc\n\t\t}\n\t\treturn NoResult, nil\n\t}\n\td.brokenMode = true\n\treturn NoResult, errLenInv\n}\n\n\/\/ checkLenByte checks that the current byte is a digit.\nfunc (d *NetstrDelimiter) checkLenByte(b byte) bool {\n\tfor i := 0; i < 10; i++ {\n\t\tif strconv.Itoa(i)[0] == b {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ processValByte writess the passed byte to the \"value buffer\",\n\/\/ unless the \"value buffer length\" is equal to 0.\nfunc (d *NetstrDelimiter) processValByte(b byte) (bool, error) {\n\tif d.valBuffLen == 0 {\n\t\td.useLenBuff()\n\t\treturn true, nil\n\t}\n\td.valBuffLen--\n\tif d.ignoreMode {\n\t\treturn NoResult, nil\n\t}\n\t\/\/ If an error occurs, while writing to the buffer,\n\t\/\/ the current \"value buffer\" gets ignored.\n\tif err = d.valBuff.WriteByte(b); err != nil {\n\t\td.ignoreMode = true\n\t\treturn NoResult, errValInc\n\t}\n\treturn NoResult, nil\n}\n\n\/\/ useLenBuff overwrites the old result and resets values.\nfunc (d *NetstrDelimiter) useLenBuff() {\n\tif d.ignoreMode {\n\t\td.Result = \"\"\n\t\td.ignoreMode = false\n\t} else {\n\t\td.Result = d.valBuff.String()\n\t}\n\td.valBuff.Reset()\n\td.valBuffMode = false\n}\n\n\/\/ useValBuff converts the \"length buffer\" value to an integer,\n\/\/ representing the \"value buffer length\" and resets values.\nfunc (d *NetstrDelimiter) useValBuff() error {\n\tif d.valBuffLen, err = strconv.Atoi(d.lenBuff.String()); err != nil {\n\t\td.brokenMode = true\n\t\treturn errLenConv\n\t}\n\td.lenBuff.Reset()\n\td.valBuffMode = true\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package vitali\n\nimport (\n \"testing\"\n \"net\/http\"\n \"net\/url\"\n \"net\/http\/httptest\"\n)\n\nfunc TestNotFound(t *testing.T) {\n r := &http.Request{\n Method: \"GET\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{})\n webapp.ServeHTTP(rr, r)\n\n if rr.Code != http.StatusNotFound {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n}\n\ntype Root struct {\n Ctx\n}\n\nfunc (c Root) Get() interface{} {\n return \"root\"\n}\n\nfunc TestOK(t *testing.T) {\n r := &http.Request{\n Method: \"GET\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/\", Root{}},\n })\n webapp.ServeHTTP(rr, r)\n\n if rr.Code != http.StatusOK {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n entity := rr.Body.String()\n if entity != \"root\" {\n t.Errorf(\"entity is `%s`\", entity)\n }\n}\n\nfunc TestMethodNotAllowed(t *testing.T) {\n r := &http.Request{\n Method: \"POST\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/\", Root{}},\n })\n webapp.ServeHTTP(rr, r)\n\n if rr.Code != http.StatusMethodNotAllowed {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n allowed := rr.HeaderMap.Get(\"Allow\")\n if allowed != \"GET, HEAD\" {\n t.Errorf(\"allow header is %s\", allowed)\n }\n}\n\nfunc TestNotImplemented(t *testing.T) {\n r := &http.Request{\n Method: \"WTF\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/\", Root{}},\n })\n webapp.ServeHTTP(rr, r)\n\n if rr.Code != http.StatusNotImplemented {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n}\n\ntype Something struct {\n Ctx\n}\n\nfunc (c Something) Get() interface{} {\n return c.PathParam(\"id1\") + c.PathParam(\"id2\")\n}\n\nfunc TestPathParam(t *testing.T) {\n r := &http.Request{\n Method: \"GET\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/foo\/123\/bar\/456\/something\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/foo\/{id1}\/bar\/{id2}\/something\", Something{}},\n })\n webapp.ServeHTTP(rr, r)\n\n if rr.Code != http.StatusOK {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n entity := rr.Body.String()\n if entity != \"123456\" {\n t.Errorf(\"entity is `%s`\", entity)\n }\n}\n\ntype NeedAuth struct {\n Ctx\n Perm\n}\n\nfunc (c NeedAuth) Get() interface{} {\n return c.Username\n}\n\nfunc (c NeedAuth) Post() interface{} {\n return \"public post\"\n}\n\nfunc (c NeedAuth) Delete() interface{} {\n return c.Username\n}\n\nfunc TestNeedAuth(t *testing.T) {\n r := &http.Request{\n Method: \"GET\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/needauth\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/needauth\", NeedAuth{\n Perm: Perm{\"GET\": AUTHENTICATED, \"POST\": PUBLIC, \"*\": AUTHENTICATED},\n }},\n })\n webapp.ServeHTTP(rr, r)\n if rr.Code != http.StatusUnauthorized {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n\n r.Method = \"POST\"\n rr = httptest.NewRecorder()\n webapp.ServeHTTP(rr, r)\n if rr.Code != http.StatusOK {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n entity := rr.Body.String()\n if entity != \"public post\" {\n t.Errorf(\"entity is `%s`\", entity)\n }\n\n r.Method = \"DELETE\"\n rr = httptest.NewRecorder()\n webapp.ServeHTTP(rr, r)\n if rr.Code != http.StatusUnauthorized {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n}\n\ntype Auther struct {\n}\n\nfunc (c Auther) User(r *http.Request) string {\n return \"bob\"\n}\n\nfunc (c Auther) AuthHeader(r *http.Request) string {\n return `Basic realm=\"test\"`\n}\n\nfunc TestAuthed(t *testing.T) {\n r := &http.Request{\n Method: \"GET\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/needauth\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/needauth\", NeedAuth{\n Perm: Perm{\"GET\": AUTHENTICATED, \"POST\": PUBLIC, \"*\": AUTHENTICATED},\n }},\n })\n webapp.UserProvider = Auther{}\n webapp.ServeHTTP(rr, r)\n if rr.Code != http.StatusOK {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n entity := rr.Body.String()\n if entity != \"bob\" {\n t.Errorf(\"user is `%s`\", entity)\n }\n}\n\ntype Panikr struct {\n Ctx\n}\n\nfunc (c Panikr) Get() interface{} {\n panic(\"panic!!\")\n return \"\"\n}\n\nfunc TestRecoverPanic(t *testing.T) {\n r := &http.Request{\n Method: \"GET\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/panic\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/panic\", Panikr{}},\n })\n webapp.ServeHTTP(rr, r)\n\n if rr.Code != http.StatusInternalServerError {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n}\n<commit_msg>add more tests<commit_after>package vitali\n\nimport (\n \"testing\"\n \"net\/http\"\n \"net\/url\"\n \"net\/http\/httptest\"\n)\n\nfunc TestNotFound(t *testing.T) {\n r := &http.Request{\n Method: \"GET\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{})\n webapp.ServeHTTP(rr, r)\n\n if rr.Code != http.StatusNotFound {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n}\n\ntype Root struct {\n Ctx\n}\n\nfunc (c Root) Get() interface{} {\n return \"root\"\n}\n\nfunc TestOK(t *testing.T) {\n r := &http.Request{\n Method: \"GET\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/\", Root{}},\n })\n webapp.ServeHTTP(rr, r)\n\n if rr.Code != http.StatusOK {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n entity := rr.Body.String()\n if entity != \"root\" {\n t.Errorf(\"entity is `%s`\", entity)\n }\n}\n\ntype Info struct {\n Ctx\n Provided string\n Provided2 string\n}\n\nfunc (c Info) Get() interface{} {\n return c.Provided + c.Provided2\n}\n\nfunc TestProvided(t *testing.T) {\n r := &http.Request{\n Method: \"GET\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/info\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/info\", Info{\n Provided: \"foo\",\n Provided2: \"bar\",\n }},\n })\n webapp.ServeHTTP(rr, r)\n\n if rr.Code != http.StatusOK {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n entity := rr.Body.String()\n if entity != \"foobar\" {\n t.Errorf(\"entity is `%s`\", entity)\n }\n}\n\nfunc TestMethodNotAllowed(t *testing.T) {\n r := &http.Request{\n Method: \"POST\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/\", Root{}},\n })\n webapp.ServeHTTP(rr, r)\n\n if rr.Code != http.StatusMethodNotAllowed {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n allowed := rr.HeaderMap.Get(\"Allow\")\n if allowed != \"GET, HEAD\" {\n t.Errorf(\"allow header is %s\", allowed)\n }\n}\n\nfunc TestNotImplemented(t *testing.T) {\n r := &http.Request{\n Method: \"WTF\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/\", Root{}},\n })\n webapp.ServeHTTP(rr, r)\n\n if rr.Code != http.StatusNotImplemented {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n}\n\ntype Something struct {\n Ctx\n}\n\nfunc (c Something) Get() interface{} {\n return c.PathParam(\"id1\") + c.PathParam(\"id2\")\n}\n\nfunc TestPathParam(t *testing.T) {\n r := &http.Request{\n Method: \"GET\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/foo\/123\/bar\/456\/something\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/foo\/{id1}\/bar\/{id2}\/something\", Something{}},\n })\n webapp.ServeHTTP(rr, r)\n\n if rr.Code != http.StatusOK {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n entity := rr.Body.String()\n if entity != \"123456\" {\n t.Errorf(\"entity is `%s`\", entity)\n }\n}\n\ntype Something2 struct {\n Ctx\n}\n\nfunc (c Something2) Get() interface{} {\n return c.Param(\"id\")\n}\n\nfunc TestForm(t *testing.T) {\n r := &http.Request{\n Method: \"GET\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/something2\",\n },\n Form: make(url.Values),\n }\n r.Form.Add(\"id\", \"5566\")\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/something2\", Something2{}},\n })\n webapp.ServeHTTP(rr, r)\n\n if rr.Code != http.StatusOK {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n entity := rr.Body.String()\n if entity != \"5566\" {\n t.Errorf(\"id is `%s`\", entity)\n }\n}\n\ntype NeedAuth struct {\n Ctx\n Perm\n}\n\nfunc (c NeedAuth) Get() interface{} {\n return c.Username\n}\n\nfunc (c NeedAuth) Post() interface{} {\n return \"public post\"\n}\n\nfunc (c NeedAuth) Delete() interface{} {\n return c.Username\n}\n\nfunc TestNeedAuth(t *testing.T) {\n r := &http.Request{\n Method: \"GET\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/needauth\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/needauth\", NeedAuth{\n Perm: Perm{\"GET\": AUTHENTICATED, \"POST\": PUBLIC, \"*\": AUTHENTICATED},\n }},\n })\n webapp.ServeHTTP(rr, r)\n if rr.Code != http.StatusUnauthorized {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n\n r.Method = \"POST\"\n rr = httptest.NewRecorder()\n webapp.ServeHTTP(rr, r)\n if rr.Code != http.StatusOK {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n entity := rr.Body.String()\n if entity != \"public post\" {\n t.Errorf(\"entity is `%s`\", entity)\n }\n\n r.Method = \"DELETE\"\n rr = httptest.NewRecorder()\n webapp.ServeHTTP(rr, r)\n if rr.Code != http.StatusUnauthorized {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n}\n\ntype Auther struct {\n}\n\nfunc (c Auther) User(r *http.Request) string {\n return \"bob\"\n}\n\nfunc (c Auther) AuthHeader(r *http.Request) string {\n return `Basic realm=\"test\"`\n}\n\nfunc TestAuthed(t *testing.T) {\n r := &http.Request{\n Method: \"GET\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/needauth\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/needauth\", NeedAuth{\n Perm: Perm{\"GET\": AUTHENTICATED, \"POST\": PUBLIC, \"*\": AUTHENTICATED},\n }},\n })\n webapp.UserProvider = Auther{}\n webapp.ServeHTTP(rr, r)\n if rr.Code != http.StatusOK {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n entity := rr.Body.String()\n if entity != \"bob\" {\n t.Errorf(\"user is `%s`\", entity)\n }\n}\n\ntype Bad struct {\n Ctx\n}\n\nfunc (c Bad) Get() interface{} {\n return c.BadRequest(\"reason\")\n}\n\nfunc TestBadRequest(t *testing.T) {\n r := &http.Request{\n Method: \"GET\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/bad\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/bad\", Bad{}},\n })\n webapp.ServeHTTP(rr, r)\n\n if rr.Code != http.StatusBadRequest {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n entity := rr.Body.String()\n if entity != \"reason\\n\" {\n t.Errorf(\"entity is `%s`\", entity)\n }\n}\n\ntype AcceptSomething struct {\n Ctx\n Accept\n}\n\nfunc (c AcceptSomething) Post() interface{} {\n return \"success\"\n}\n\nfunc TestAccept(t *testing.T) {\n r := &http.Request{\n Method: \"POST\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/accept\",\n },\n Header: make(http.Header),\n }\n webapp := CreateWebApp([]RouteRule{\n {\"\/accept\", AcceptSomething{\n Accept: Accept{\"POST\": MediaTypes{\"application\/json\", \"application\/xml\"}},\n }},\n })\n\n rr := httptest.NewRecorder()\n webapp.ServeHTTP(rr, r)\n if rr.Code != http.StatusUnsupportedMediaType {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n\n r.Header.Set(\"Content-Type\", \"application\/json\")\n rr = httptest.NewRecorder()\n webapp.ServeHTTP(rr, r)\n if rr.Code != http.StatusOK {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n\n r.Header.Set(\"Content-Type\", \"application\/xml\")\n rr = httptest.NewRecorder()\n webapp.ServeHTTP(rr, r)\n if rr.Code != http.StatusOK {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n\n r.Header.Set(\"Content-Type\", \"text\/plain\")\n rr = httptest.NewRecorder()\n webapp.ServeHTTP(rr, r)\n if rr.Code != http.StatusUnsupportedMediaType {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n}\n\ntype Redirects struct {\n Ctx\n}\n\nfunc (c Redirects) Get() interface{} {\n switch c.Param(\"type\") {\n case \"found\":\n return c.Found(\"\/found\")\n case \"seeother\":\n return c.SeeOther(\"\/seeother\")\n }\n return \"\"\n}\n\nfunc TestRedirects(t *testing.T) {\n r := &http.Request{\n Method: \"GET\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/redirects\",\n },\n Form: make(url.Values),\n }\n webapp := CreateWebApp([]RouteRule{\n {\"\/redirects\", Redirects{}},\n })\n\n r.Form.Set(\"type\", \"found\")\n rr := httptest.NewRecorder()\n webapp.ServeHTTP(rr, r)\n if rr.Code != http.StatusFound {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n location := rr.Header().Get(\"Location\")\n if location != \"\/found\" {\n t.Errorf(\"location is `%s`\", location)\n }\n\n r.Form.Set(\"type\", \"seeother\")\n rr = httptest.NewRecorder()\n webapp.ServeHTTP(rr, r)\n if rr.Code != http.StatusSeeOther {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n location = rr.Header().Get(\"Location\")\n if location != \"\/seeother\" {\n t.Errorf(\"location is `%s`\", location)\n }\n}\n\ntype Panikr struct {\n Ctx\n}\n\nfunc (c Panikr) Get() interface{} {\n panic(\"panic!!\")\n return \"\"\n}\n\nfunc TestRecoverPanic(t *testing.T) {\n r := &http.Request{\n Method: \"GET\",\n Host: \"lunastorm.tw\",\n URL: &url.URL{\n Path: \"\/panic\",\n },\n }\n rr := httptest.NewRecorder()\n webapp := CreateWebApp([]RouteRule{\n {\"\/panic\", Panikr{}},\n })\n webapp.ServeHTTP(rr, r)\n\n if rr.Code != http.StatusInternalServerError {\n t.Errorf(\"response code is %d\", rr.Code)\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package helpers\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"regexp\"\n\n\t\"code.cloudfoundry.org\/ykk\"\n\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n)\n\ntype Package struct {\n\tChecksum string\n}\n\nfunc (p *Package) UnmarshalJSON(data []byte) error {\n\tvar ccPackage struct {\n\t\tData struct {\n\t\t\tChecksum struct {\n\t\t\t\tValue string `json:\"value\"`\n\t\t\t} `json:\"checksum\"`\n\t\t} `json:\"data\"`\n\t}\n\n\tif err := json.Unmarshal(data, &ccPackage); err != nil {\n\t\treturn err\n\t}\n\n\tp.Checksum = ccPackage.Data.Checksum.Value\n\n\treturn nil\n}\n\nfunc VerifyAppPackageContentsV3(appName string, files ...string) {\n\ttmpZipFilepath, err := ioutil.TempFile(\"\", \"\")\n\tdefer os.Remove(tmpZipFilepath.Name())\n\tExpect(err).ToNot(HaveOccurred())\n\n\tdownloadFirstAppPackage(appName, tmpZipFilepath.Name())\n\tExpect(err).ToNot(HaveOccurred())\n\n\tinfo, err := tmpZipFilepath.Stat()\n\tExpect(err).ToNot(HaveOccurred())\n\treader, err := ykk.NewReader(tmpZipFilepath, info.Size())\n\tExpect(err).ToNot(HaveOccurred())\n\n\tseenFiles := []string{}\n\tfor _, file := range reader.File {\n\t\tseenFiles = append(seenFiles, file.Name)\n\t}\n\n\tExpect(seenFiles).To(ConsistOf(files))\n}\n\nfunc getFirstAppPackageGuid(appName string) string {\n\tsession := CF(\"v3-packages\", appName)\n\tEventually(session).Should(Exit(0))\n\n\tmyRegexp, err := regexp.Compile(GUIDRegex)\n\tExpect(err).NotTo(HaveOccurred())\n\tmatches := myRegexp.FindAll(session.Out.Contents(), -1)\n\tpackageGUID := matches[3]\n\n\treturn string(packageGUID)\n}\n\nfunc downloadFirstAppPackage(appName string, tmpZipFilepath string) {\n\tappGUID := getFirstAppPackageGuid(appName)\n\tsession := CF(\"curl\", fmt.Sprintf(\"\/v3\/packages\/%s\/download\", appGUID), \"--output\", tmpZipFilepath)\n\tEventually(session).Should(Exit(0))\n}\n\nfunc VerifyAppPackageContentsV2(appName string, files ...string) {\n\ttmpZipFilepath, err := ioutil.TempFile(\"\", \"\")\n\tdefer os.Remove(tmpZipFilepath.Name())\n\tExpect(err).ToNot(HaveOccurred())\n\n\tdownloadFirstAppBits(appName, tmpZipFilepath.Name())\n\tExpect(err).ToNot(HaveOccurred())\n\n\tinfo, err := tmpZipFilepath.Stat()\n\tExpect(err).ToNot(HaveOccurred())\n\treader, err := ykk.NewReader(tmpZipFilepath, info.Size())\n\tExpect(err).ToNot(HaveOccurred())\n\n\tseenFiles := []string{}\n\tfor _, file := range reader.File {\n\t\tseenFiles = append(seenFiles, file.Name)\n\t}\n\n\tExpect(seenFiles).To(ConsistOf(files))\n}\n\nfunc downloadFirstAppBits(appName string, tmpZipFilepath string) {\n\tappGUID := AppGUID(appName)\n\tsession := CF(\"curl\", fmt.Sprintf(\"\/v2\/apps\/%s\/download\", appGUID), \"--output\", tmpZipFilepath)\n\tEventually(session).Should(Exit(0))\n}\n<commit_msg>Remove unsused helper package struct<commit_after>package helpers\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"regexp\"\n\n\t\"code.cloudfoundry.org\/ykk\"\n\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n)\n\nfunc VerifyAppPackageContentsV3(appName string, files ...string) {\n\ttmpZipFilepath, err := ioutil.TempFile(\"\", \"\")\n\tdefer os.Remove(tmpZipFilepath.Name())\n\tExpect(err).ToNot(HaveOccurred())\n\n\tdownloadFirstAppPackage(appName, tmpZipFilepath.Name())\n\tExpect(err).ToNot(HaveOccurred())\n\n\tinfo, err := tmpZipFilepath.Stat()\n\tExpect(err).ToNot(HaveOccurred())\n\treader, err := ykk.NewReader(tmpZipFilepath, info.Size())\n\tExpect(err).ToNot(HaveOccurred())\n\n\tseenFiles := []string{}\n\tfor _, file := range reader.File {\n\t\tseenFiles = append(seenFiles, file.Name)\n\t}\n\n\tExpect(seenFiles).To(ConsistOf(files))\n}\n\nfunc getFirstAppPackageGuid(appName string) string {\n\tsession := CF(\"v3-packages\", appName)\n\tEventually(session).Should(Exit(0))\n\n\tmyRegexp, err := regexp.Compile(GUIDRegex)\n\tExpect(err).NotTo(HaveOccurred())\n\tmatches := myRegexp.FindAll(session.Out.Contents(), -1)\n\tpackageGUID := matches[3]\n\n\treturn string(packageGUID)\n}\n\nfunc downloadFirstAppPackage(appName string, tmpZipFilepath string) {\n\tappGUID := getFirstAppPackageGuid(appName)\n\tsession := CF(\"curl\", fmt.Sprintf(\"\/v3\/packages\/%s\/download\", appGUID), \"--output\", tmpZipFilepath)\n\tEventually(session).Should(Exit(0))\n}\n\nfunc VerifyAppPackageContentsV2(appName string, files ...string) {\n\ttmpZipFilepath, err := ioutil.TempFile(\"\", \"\")\n\tdefer os.Remove(tmpZipFilepath.Name())\n\tExpect(err).ToNot(HaveOccurred())\n\n\tdownloadFirstAppBits(appName, tmpZipFilepath.Name())\n\tExpect(err).ToNot(HaveOccurred())\n\n\tinfo, err := tmpZipFilepath.Stat()\n\tExpect(err).ToNot(HaveOccurred())\n\treader, err := ykk.NewReader(tmpZipFilepath, info.Size())\n\tExpect(err).ToNot(HaveOccurred())\n\n\tseenFiles := []string{}\n\tfor _, file := range reader.File {\n\t\tseenFiles = append(seenFiles, file.Name)\n\t}\n\n\tExpect(seenFiles).To(ConsistOf(files))\n}\n\nfunc downloadFirstAppBits(appName string, tmpZipFilepath string) {\n\tappGUID := AppGUID(appName)\n\tsession := CF(\"curl\", fmt.Sprintf(\"\/v2\/apps\/%s\/download\", appGUID), \"--output\", tmpZipFilepath)\n\tEventually(session).Should(Exit(0))\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage net\n\nimport (\n\t\"crypto\/tls\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"k8s.io\/kubernetes\/pkg\/util\/sets\"\n)\n\nfunc TestCloneTLSConfig(t *testing.T) {\n\texpected := sets.NewString(\n\t\t\/\/ These fields are copied in CloneTLSConfig\n\t\t\"Rand\",\n\t\t\"Time\",\n\t\t\"Certificates\",\n\t\t\"RootCAs\",\n\t\t\"NextProtos\",\n\t\t\"ServerName\",\n\t\t\"InsecureSkipVerify\",\n\t\t\"CipherSuites\",\n\t\t\"PreferServerCipherSuites\",\n\t\t\"MinVersion\",\n\t\t\"MaxVersion\",\n\t\t\"CurvePreferences\",\n\t\t\"NameToCertificate\",\n\t\t\"GetCertificate\",\n\t\t\"ClientAuth\",\n\t\t\"ClientCAs\",\n\t\t\"ClientSessionCache\",\n\n\t\t\/\/ These fields are not copied\n\t\t\"SessionTicketsDisabled\",\n\t\t\"SessionTicketKey\",\n\t\t\"DynamicRecordSizingDisabled\",\n\t\t\"Renegotiation\",\n\n\t\t\/\/ These fields are unexported\n\t\t\"serverInitOnce\",\n\t\t\"mutex\",\n\t\t\"sessionTicketKeys\",\n\t)\n\n\tfields := sets.NewString()\n\tstructType := reflect.TypeOf(tls.Config{})\n\tfor i := 0; i < structType.NumField(); i++ {\n\t\tfields.Insert(structType.Field(i).Name)\n\t}\n\n\tif missing := expected.Difference(fields); len(missing) > 0 {\n\t\tt.Errorf(\"Expected fields that were not seen in http.Transport: %v\", missing.List())\n\t}\n\tif extra := fields.Difference(expected); len(extra) > 0 {\n\t\tt.Errorf(\"New fields seen in http.Transport: %v\\nAdd to CopyClientTLSConfig if client-relevant, then add to expected list in TestCopyClientTLSConfig\", extra.List())\n\t}\n}\n\nfunc TestGetClientIP(t *testing.T) {\n\tipString := \"10.0.0.1\"\n\tip := net.ParseIP(ipString)\n\tinvalidIPString := \"invalidIPString\"\n\ttestCases := []struct {\n\t\tRequest http.Request\n\t\tExpectedIP net.IP\n\t}{\n\t\t{\n\t\t\tRequest: http.Request{},\n\t\t},\n\t\t{\n\t\t\tRequest: http.Request{\n\t\t\t\tHeader: map[string][]string{\n\t\t\t\t\t\"X-Real-Ip\": {ipString},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectedIP: ip,\n\t\t},\n\t\t{\n\t\t\tRequest: http.Request{\n\t\t\t\tHeader: map[string][]string{\n\t\t\t\t\t\"X-Real-Ip\": {invalidIPString},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tRequest: http.Request{\n\t\t\t\tHeader: map[string][]string{\n\t\t\t\t\t\"X-Forwarded-For\": {ipString},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectedIP: ip,\n\t\t},\n\t\t{\n\t\t\tRequest: http.Request{\n\t\t\t\tHeader: map[string][]string{\n\t\t\t\t\t\"X-Forwarded-For\": {invalidIPString},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tRequest: http.Request{\n\t\t\t\tHeader: map[string][]string{\n\t\t\t\t\t\"X-Forwarded-For\": {invalidIPString + \",\" + ipString},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectedIP: ip,\n\t\t},\n\t\t{\n\t\t\tRequest: http.Request{\n\t\t\t\t\/\/ RemoteAddr is in the form host:port\n\t\t\t\tRemoteAddr: ipString + \":1234\",\n\t\t\t},\n\t\t\tExpectedIP: ip,\n\t\t},\n\t\t{\n\t\t\tRequest: http.Request{\n\t\t\t\tRemoteAddr: invalidIPString,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tRequest: http.Request{\n\t\t\t\tHeader: map[string][]string{\n\t\t\t\t\t\"X-Forwarded-For\": {invalidIPString},\n\t\t\t\t},\n\t\t\t\t\/\/ RemoteAddr is in the form host:port\n\t\t\t\tRemoteAddr: ipString,\n\t\t\t},\n\t\t\tExpectedIP: ip,\n\t\t},\n\t}\n\n\tfor i, test := range testCases {\n\t\tif a, e := GetClientIP(&test.Request), test.ExpectedIP; reflect.DeepEqual(e, a) != true {\n\t\t\tt.Fatalf(\"test case %d failed. expected: %v, actual: %v\", i, e, a)\n\t\t}\n\t}\n}\n\nfunc TestProxierWithNoProxyCIDR(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\tnoProxy string\n\t\turl string\n\n\t\texpectedDelegated bool\n\t}{\n\t\t{\n\t\t\tname: \"no env\",\n\t\t\turl: \"https:\/\/192.168.143.1\/api\",\n\t\t\texpectedDelegated: true,\n\t\t},\n\t\t{\n\t\t\tname: \"no cidr\",\n\t\t\tnoProxy: \"192.168.63.1\",\n\t\t\turl: \"https:\/\/192.168.143.1\/api\",\n\t\t\texpectedDelegated: true,\n\t\t},\n\t\t{\n\t\t\tname: \"hostname\",\n\t\t\tnoProxy: \"192.168.63.0\/24,192.168.143.0\/24\",\n\t\t\turl: \"https:\/\/my-hostname\/api\",\n\t\t\texpectedDelegated: true,\n\t\t},\n\t\t{\n\t\t\tname: \"match second cidr\",\n\t\t\tnoProxy: \"192.168.63.0\/24,192.168.143.0\/24\",\n\t\t\turl: \"https:\/\/192.168.143.1\/api\",\n\t\t\texpectedDelegated: false,\n\t\t},\n\t\t{\n\t\t\tname: \"match second cidr with host:port\",\n\t\t\tnoProxy: \"192.168.63.0\/24,192.168.143.0\/24\",\n\t\t\turl: \"https:\/\/192.168.143.1:8443\/api\",\n\t\t\texpectedDelegated: false,\n\t\t},\n\t}\n\n\tfor _, test := range testCases {\n\t\tos.Setenv(\"NO_PROXY\", test.noProxy)\n\t\tactualDelegated := false\n\t\tproxyFunc := NewProxierWithNoProxyCIDR(func(req *http.Request) (*url.URL, error) {\n\t\t\tactualDelegated = true\n\t\t\treturn nil, nil\n\t\t})\n\n\t\treq, err := http.NewRequest(\"GET\", test.url, nil)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: unexpected err: %v\", test.name, err)\n\t\t\tcontinue\n\t\t}\n\t\tif _, err := proxyFunc(req); err != nil {\n\t\t\tt.Errorf(\"%s: unexpected err: %v\", test.name, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif test.expectedDelegated != actualDelegated {\n\t\t\tt.Errorf(\"%s: expected %v, got %v\", test.name, test.expectedDelegated, actualDelegated)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\ntype fakeTLSClientConfigHolder struct {\n\tcalled bool\n}\n\nfunc (f *fakeTLSClientConfigHolder) TLSClientConfig() *tls.Config {\n\tf.called = true\n\treturn nil\n}\nfunc (f *fakeTLSClientConfigHolder) RoundTrip(*http.Request) (*http.Response, error) {\n\treturn nil, nil\n}\n\nfunc TestTLSClientConfigHolder(t *testing.T) {\n\trt := &fakeTLSClientConfigHolder{}\n\tTLSClientConfig(rt)\n\n\tif !rt.called {\n\t\tt.Errorf(\"didn't find tls config\")\n\t}\n}\n<commit_msg>Add TLS conf for Go1.7<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage net\n\nimport (\n\t\"crypto\/tls\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"k8s.io\/kubernetes\/pkg\/util\/sets\"\n)\n\nfunc TestCloneTLSConfig(t *testing.T) {\n\texpected := sets.NewString(\n\t\t\/\/ These fields are copied in CloneTLSConfig\n\t\t\"Rand\",\n\t\t\"Time\",\n\t\t\"Certificates\",\n\t\t\"RootCAs\",\n\t\t\"NextProtos\",\n\t\t\"ServerName\",\n\t\t\"InsecureSkipVerify\",\n\t\t\"CipherSuites\",\n\t\t\"PreferServerCipherSuites\",\n\t\t\"MinVersion\",\n\t\t\"MaxVersion\",\n\t\t\"CurvePreferences\",\n\t\t\"NameToCertificate\",\n\t\t\"GetCertificate\",\n\t\t\"ClientAuth\",\n\t\t\"ClientCAs\",\n\t\t\"ClientSessionCache\",\n\n\t\t\/\/ These fields are not copied\n\t\t\"SessionTicketsDisabled\",\n\t\t\"SessionTicketKey\",\n\n\t\t\/\/ These fields are unexported\n\t\t\"serverInitOnce\",\n\t\t\"mutex\",\n\t\t\"sessionTicketKeys\",\n\t)\n\n\t\/\/ See #33936.\n\tif strings.HasPrefix(runtime.Version(), \"go1.7\") {\n\t\texpected.Insert(\"DynamicRecordSizingDisabled\", \"Renegotiation\")\n\t}\n\n\tfields := sets.NewString()\n\tstructType := reflect.TypeOf(tls.Config{})\n\tfor i := 0; i < structType.NumField(); i++ {\n\t\tfields.Insert(structType.Field(i).Name)\n\t}\n\n\tif missing := expected.Difference(fields); len(missing) > 0 {\n\t\tt.Errorf(\"Expected fields that were not seen in http.Transport: %v\", missing.List())\n\t}\n\tif extra := fields.Difference(expected); len(extra) > 0 {\n\t\tt.Errorf(\"New fields seen in http.Transport: %v\\nAdd to CopyClientTLSConfig if client-relevant, then add to expected list in TestCopyClientTLSConfig\", extra.List())\n\t}\n}\n\nfunc TestGetClientIP(t *testing.T) {\n\tipString := \"10.0.0.1\"\n\tip := net.ParseIP(ipString)\n\tinvalidIPString := \"invalidIPString\"\n\ttestCases := []struct {\n\t\tRequest http.Request\n\t\tExpectedIP net.IP\n\t}{\n\t\t{\n\t\t\tRequest: http.Request{},\n\t\t},\n\t\t{\n\t\t\tRequest: http.Request{\n\t\t\t\tHeader: map[string][]string{\n\t\t\t\t\t\"X-Real-Ip\": {ipString},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectedIP: ip,\n\t\t},\n\t\t{\n\t\t\tRequest: http.Request{\n\t\t\t\tHeader: map[string][]string{\n\t\t\t\t\t\"X-Real-Ip\": {invalidIPString},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tRequest: http.Request{\n\t\t\t\tHeader: map[string][]string{\n\t\t\t\t\t\"X-Forwarded-For\": {ipString},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectedIP: ip,\n\t\t},\n\t\t{\n\t\t\tRequest: http.Request{\n\t\t\t\tHeader: map[string][]string{\n\t\t\t\t\t\"X-Forwarded-For\": {invalidIPString},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tRequest: http.Request{\n\t\t\t\tHeader: map[string][]string{\n\t\t\t\t\t\"X-Forwarded-For\": {invalidIPString + \",\" + ipString},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectedIP: ip,\n\t\t},\n\t\t{\n\t\t\tRequest: http.Request{\n\t\t\t\t\/\/ RemoteAddr is in the form host:port\n\t\t\t\tRemoteAddr: ipString + \":1234\",\n\t\t\t},\n\t\t\tExpectedIP: ip,\n\t\t},\n\t\t{\n\t\t\tRequest: http.Request{\n\t\t\t\tRemoteAddr: invalidIPString,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tRequest: http.Request{\n\t\t\t\tHeader: map[string][]string{\n\t\t\t\t\t\"X-Forwarded-For\": {invalidIPString},\n\t\t\t\t},\n\t\t\t\t\/\/ RemoteAddr is in the form host:port\n\t\t\t\tRemoteAddr: ipString,\n\t\t\t},\n\t\t\tExpectedIP: ip,\n\t\t},\n\t}\n\n\tfor i, test := range testCases {\n\t\tif a, e := GetClientIP(&test.Request), test.ExpectedIP; reflect.DeepEqual(e, a) != true {\n\t\t\tt.Fatalf(\"test case %d failed. expected: %v, actual: %v\", i, e, a)\n\t\t}\n\t}\n}\n\nfunc TestProxierWithNoProxyCIDR(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\tnoProxy string\n\t\turl string\n\n\t\texpectedDelegated bool\n\t}{\n\t\t{\n\t\t\tname: \"no env\",\n\t\t\turl: \"https:\/\/192.168.143.1\/api\",\n\t\t\texpectedDelegated: true,\n\t\t},\n\t\t{\n\t\t\tname: \"no cidr\",\n\t\t\tnoProxy: \"192.168.63.1\",\n\t\t\turl: \"https:\/\/192.168.143.1\/api\",\n\t\t\texpectedDelegated: true,\n\t\t},\n\t\t{\n\t\t\tname: \"hostname\",\n\t\t\tnoProxy: \"192.168.63.0\/24,192.168.143.0\/24\",\n\t\t\turl: \"https:\/\/my-hostname\/api\",\n\t\t\texpectedDelegated: true,\n\t\t},\n\t\t{\n\t\t\tname: \"match second cidr\",\n\t\t\tnoProxy: \"192.168.63.0\/24,192.168.143.0\/24\",\n\t\t\turl: \"https:\/\/192.168.143.1\/api\",\n\t\t\texpectedDelegated: false,\n\t\t},\n\t\t{\n\t\t\tname: \"match second cidr with host:port\",\n\t\t\tnoProxy: \"192.168.63.0\/24,192.168.143.0\/24\",\n\t\t\turl: \"https:\/\/192.168.143.1:8443\/api\",\n\t\t\texpectedDelegated: false,\n\t\t},\n\t}\n\n\tfor _, test := range testCases {\n\t\tos.Setenv(\"NO_PROXY\", test.noProxy)\n\t\tactualDelegated := false\n\t\tproxyFunc := NewProxierWithNoProxyCIDR(func(req *http.Request) (*url.URL, error) {\n\t\t\tactualDelegated = true\n\t\t\treturn nil, nil\n\t\t})\n\n\t\treq, err := http.NewRequest(\"GET\", test.url, nil)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: unexpected err: %v\", test.name, err)\n\t\t\tcontinue\n\t\t}\n\t\tif _, err := proxyFunc(req); err != nil {\n\t\t\tt.Errorf(\"%s: unexpected err: %v\", test.name, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif test.expectedDelegated != actualDelegated {\n\t\t\tt.Errorf(\"%s: expected %v, got %v\", test.name, test.expectedDelegated, actualDelegated)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\ntype fakeTLSClientConfigHolder struct {\n\tcalled bool\n}\n\nfunc (f *fakeTLSClientConfigHolder) TLSClientConfig() *tls.Config {\n\tf.called = true\n\treturn nil\n}\nfunc (f *fakeTLSClientConfigHolder) RoundTrip(*http.Request) (*http.Response, error) {\n\treturn nil, nil\n}\n\nfunc TestTLSClientConfigHolder(t *testing.T) {\n\trt := &fakeTLSClientConfigHolder{}\n\tTLSClientConfig(rt)\n\n\tif !rt.called {\n\t\tt.Errorf(\"didn't find tls config\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package testutil\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"v.io\/x\/devtools\/internal\/tool\"\n\t\"v.io\/x\/devtools\/internal\/util\"\n)\n\ntype testCoverage struct {\n\tBranchRate int `xml:\"branch-rate,attr\"`\n\tLineRate int `xml:\"line-rate,attr\"`\n\tTimeStamp int64 `xml:\"timestamp,attr\"`\n\tVersion string `xml:\"version,attr\"`\n\tPackages []testCoveragePkg `xml:\"packages>package\"`\n\tSources []string `xml:\"sources>source\"`\n\tXMLName xml.Name `xml:\"coverage\"`\n}\n\ntype testCoveragePkg struct {\n\tName string `xml:\"name,attr\"`\n\tBranchRate int `xml:\"branch-rate,attr\"`\n\tLineRate int `xml:\"line-rate,attr\"`\n\tComplexity int `xml:\"complexity,attr\"`\n\tClasses []testCoverageClass `xml:\"classes>class\"`\n}\n\ntype testCoverageClass struct {\n\tName string `xml:\"name,attr\"`\n\tFilename string `xml:\"filename,attr\"`\n\tBranchRate int `xml:\"branch-rate,attr\"`\n\tLineRate int `xml:\"line-rate,attr\"`\n\tComplexity int `xml:\"complexity,attr\"`\n\tMethods []testCoverageMethod `xml:\"methods>method\"`\n}\n\ntype testCoverageMethod struct {\n\tName string `xml:\"name,attr\"`\n\tSignature string `xml:\"signature,attr\"`\n\tBranchRate int `xml:\"branch-rate,attr\"`\n\tLineRate int `xml:\"line-rate,attr\"`\n\tLines []testCoverageLine `xml:\"lines>line\"`\n}\n\ntype testCoverageLine struct {\n\tNumber int `xml:\"number,attr\"`\n\tHits int `xml:\"hits,attr\"`\n}\n\n\/\/ coberturaReportPath returns the path to the cobertura report.\nfunc coberturaReportPath(testName string) string {\n\tworkspace, fileName := os.Getenv(\"WORKSPACE\"), \"cobertura_report.xml\"\n\tif workspace == \"\" {\n\t\treturn filepath.Join(os.Getenv(\"HOME\"), \"tmp\", testName, fileName)\n\t} else {\n\t\treturn filepath.Join(workspace, fileName)\n\t}\n}\n\n\/\/ coverageFromGoTestOutput reads data from the given input, assuming\n\/\/ it contains the coverage information generated by \"go test -cover\",\n\/\/ and returns it as an in-memory data structure.\nfunc coverageFromGoTestOutput(ctx *tool.Context, testOutput io.Reader) (*testCoverage, error) {\n\troot, err := util.VanadiumRoot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbin := filepath.Join(root, \"third_party\", \"go\", \"bin\", \"gocover-cobertura\")\n\tenv, err := util.VanadiumEnvironment(ctx, util.HostPlatform())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar out bytes.Buffer\n\topts := ctx.Run().Opts()\n\topts.Stdin = testOutput\n\topts.Stdout = &out\n\topts.Env = env.Map()\n\tif err := ctx.Run().CommandWithOpts(opts, bin); err != nil {\n\t\treturn nil, err\n\t}\n\tvar coverage testCoverage\n\tif err := xml.Unmarshal(out.Bytes(), &coverage); err != nil {\n\t\treturn nil, fmt.Errorf(\"Unmarshal() failed: %v\\n%v\", err, out.String())\n\t}\n\treturn &coverage, nil\n}\n\n\/\/ createCoberturaReport generates a cobertura report using the given\n\/\/ coverage information.\nfunc createCoberturaReport(ctx *tool.Context, testName string, data *testCoverage) error {\n\tbytes, err := xml.MarshalIndent(*data, \"\", \" \")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"MarshalIndent(%v) failed: %v\", *data, err)\n\t}\n\tif err := ctx.Run().WriteFile(coberturaReportPath(testName), bytes, os.FileMode(0644)); err != nil {\n\t\treturn fmt.Errorf(\"WriteFile(%v) failed: %v\", coberturaReportPath(testName), err)\n\t}\n\treturn nil\n}\n<commit_msg>TBR: devtools\/internal\/testutil: fix path in cobertura.go<commit_after>package testutil\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"v.io\/x\/devtools\/internal\/tool\"\n\t\"v.io\/x\/devtools\/internal\/util\"\n)\n\ntype testCoverage struct {\n\tBranchRate int `xml:\"branch-rate,attr\"`\n\tLineRate int `xml:\"line-rate,attr\"`\n\tTimeStamp int64 `xml:\"timestamp,attr\"`\n\tVersion string `xml:\"version,attr\"`\n\tPackages []testCoveragePkg `xml:\"packages>package\"`\n\tSources []string `xml:\"sources>source\"`\n\tXMLName xml.Name `xml:\"coverage\"`\n}\n\ntype testCoveragePkg struct {\n\tName string `xml:\"name,attr\"`\n\tBranchRate int `xml:\"branch-rate,attr\"`\n\tLineRate int `xml:\"line-rate,attr\"`\n\tComplexity int `xml:\"complexity,attr\"`\n\tClasses []testCoverageClass `xml:\"classes>class\"`\n}\n\ntype testCoverageClass struct {\n\tName string `xml:\"name,attr\"`\n\tFilename string `xml:\"filename,attr\"`\n\tBranchRate int `xml:\"branch-rate,attr\"`\n\tLineRate int `xml:\"line-rate,attr\"`\n\tComplexity int `xml:\"complexity,attr\"`\n\tMethods []testCoverageMethod `xml:\"methods>method\"`\n}\n\ntype testCoverageMethod struct {\n\tName string `xml:\"name,attr\"`\n\tSignature string `xml:\"signature,attr\"`\n\tBranchRate int `xml:\"branch-rate,attr\"`\n\tLineRate int `xml:\"line-rate,attr\"`\n\tLines []testCoverageLine `xml:\"lines>line\"`\n}\n\ntype testCoverageLine struct {\n\tNumber int `xml:\"number,attr\"`\n\tHits int `xml:\"hits,attr\"`\n}\n\n\/\/ coberturaReportPath returns the path to the cobertura report.\nfunc coberturaReportPath(testName string) string {\n\tworkspace, fileName := os.Getenv(\"WORKSPACE\"), \"cobertura_report.xml\"\n\tif workspace == \"\" {\n\t\treturn filepath.Join(os.Getenv(\"HOME\"), \"tmp\", testName, fileName)\n\t} else {\n\t\treturn filepath.Join(workspace, fileName)\n\t}\n}\n\n\/\/ coverageFromGoTestOutput reads data from the given input, assuming\n\/\/ it contains the coverage information generated by \"go test -cover\",\n\/\/ and returns it as an in-memory data structure.\nfunc coverageFromGoTestOutput(ctx *tool.Context, testOutput io.Reader) (*testCoverage, error) {\n\troot, err := util.VanadiumRoot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbin := util.ThirdPartyBinPath(root, \"gocover-cobertura\")\n\tenv, err := util.VanadiumEnvironment(ctx, util.HostPlatform())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar out bytes.Buffer\n\topts := ctx.Run().Opts()\n\topts.Stdin = testOutput\n\topts.Stdout = &out\n\topts.Env = env.Map()\n\tif err := ctx.Run().CommandWithOpts(opts, bin); err != nil {\n\t\treturn nil, err\n\t}\n\tvar coverage testCoverage\n\tif err := xml.Unmarshal(out.Bytes(), &coverage); err != nil {\n\t\treturn nil, fmt.Errorf(\"Unmarshal() failed: %v\\n%v\", err, out.String())\n\t}\n\treturn &coverage, nil\n}\n\n\/\/ createCoberturaReport generates a cobertura report using the given\n\/\/ coverage information.\nfunc createCoberturaReport(ctx *tool.Context, testName string, data *testCoverage) error {\n\tbytes, err := xml.MarshalIndent(*data, \"\", \" \")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"MarshalIndent(%v) failed: %v\", *data, err)\n\t}\n\tif err := ctx.Run().WriteFile(coberturaReportPath(testName), bytes, os.FileMode(0644)); err != nil {\n\t\treturn fmt.Errorf(\"WriteFile(%v) failed: %v\", coberturaReportPath(testName), err)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package integration_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"GCSclient\", func() {\n\tvar (\n\t\terr error\n\t\ttempDir string\n\t\ttempFile *os.File\n\t\ttempVerDir string\n\t\ttempVerFile *os.File\n\t\truntime string\n\t\tdirectoryPrefix string\n\t)\n\n\tBeforeEach(func() {\n\t\tdirectoryPrefix = \"gcsclient-tests\"\n\t\truntime = fmt.Sprintf(\"%d\", time.Now().Unix())\n\t})\n\n\tDescribe(\"with a non versioned bucket\", func() {\n\t\tBeforeEach(func() {\n\t\t\ttempDir, err = ioutil.TempDir(\"\", \"gcs_client_integration_test\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\ttempFile, err = ioutil.TempFile(tempDir, \"file-to-upload\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\ttempFile.Write([]byte(\"hello-\" + runtime))\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\terr := os.RemoveAll(tempDir)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\terr = gcsClient.DeleteObject(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), 0)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\terr = gcsClient.DeleteObject(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), 0)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\terr = gcsClient.DeleteObject(bucketName, filepath.Join(directoryPrefix, \"zip-to-upload.zip\"), 0)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t})\n\n\t\tIt(\"can interact with buckets\", func() {\n\t\t\t_, err := gcsClient.UploadFile(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), \"\", tempFile.Name(), \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t_, err = gcsClient.UploadFile(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), \"\", tempFile.Name(), \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t_, err = gcsClient.UploadFile(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), \"\", tempFile.Name(), \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t_, err = gcsClient.UploadFile(bucketName, filepath.Join(directoryPrefix, \"zip-to-upload.zip\"), \"application\/zip\", tempFile.Name(), \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfakeZipFileObject, err := gcsClient.GetBucketObjectInfo(bucketName, filepath.Join(directoryPrefix, \"zip-to-upload.zip\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fakeZipFileObject.ContentType).To(Equal(\"application\/zip\"))\n\n\t\t\tfiles, err := gcsClient.BucketObjects(bucketName, directoryPrefix)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(files).To(ConsistOf([]string{filepath.Join(directoryPrefix, \"file-to-upload-1\"), filepath.Join(directoryPrefix, \"file-to-upload-2\"), filepath.Join(directoryPrefix, \"zip-to-upload.zip\")}))\n\n\t\t\t_, err = gcsClient.ObjectGenerations(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"))\n\t\t\tExpect(err).To(HaveOccurred())\n\t\t\tExpect(err.Error()).To(ContainSubstring(\"bucket is not versioned\"))\n\n\t\t\tfileOneURL, err := gcsClient.URL(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), 0)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileOneURL).To(Equal(fmt.Sprintf(\"gs:\/\/%s\/%s\", bucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"))))\n\n\t\t\t_, err = gcsClient.ObjectGenerations(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"))\n\t\t\tExpect(err).To(HaveOccurred())\n\t\t\tExpect(err.Error()).To(ContainSubstring(\"bucket is not versioned\"))\n\n\t\t\tfileTwoURL, err := gcsClient.URL(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), 0)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileTwoURL).To(Equal(fmt.Sprintf(\"gs:\/\/%s\/%s\", bucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"))))\n\n\t\t\terr = gcsClient.DownloadFile(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), 0, filepath.Join(tempDir, \"downloaded-file\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tread, err := ioutil.ReadFile(filepath.Join(tempDir, \"downloaded-file\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(read).To(Equal([]byte(\"hello-\" + runtime)))\n\t\t})\n\t})\n\n\tDescribe(\"with a versioned bucket\", func() {\n\t\tBeforeEach(func() {\n\t\t\ttempVerDir, err = ioutil.TempDir(\"\", \"gcs-versioned-upload-dir\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\ttempVerFile, err = ioutil.TempFile(tempVerDir, \"file-to-upload\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\ttempVerFile.Write([]byte(\"hello-\" + runtime))\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\terr := os.RemoveAll(tempVerDir)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfileOneGenerations, err := gcsClient.ObjectGenerations(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfor _, fileOneGeneration := range fileOneGenerations {\n\t\t\t\terr := gcsClient.DeleteObject(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), fileOneGeneration)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t}\n\n\t\t\tfileTwoGenerations, err := gcsClient.ObjectGenerations(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfor _, fileTwoGeneration := range fileTwoGenerations {\n\t\t\t\terr := gcsClient.DeleteObject(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), fileTwoGeneration)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t}\n\n\t\t\tfakeZipFileGenerations, err := gcsClient.ObjectGenerations(versionedBucketName, filepath.Join(directoryPrefix, \"zip-to-upload.zip\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfor _, fakeZipFileGeneration := range fakeZipFileGenerations {\n\t\t\t\terr := gcsClient.DeleteObject(versionedBucketName, filepath.Join(directoryPrefix, \"zip-to-upload.zip\"), fakeZipFileGeneration)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t}\n\t\t})\n\n\t\tIt(\"can interact with buckets\", func() {\n\t\t\tfileOneGeneration, err := gcsClient.UploadFile(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), \"\", tempVerFile.Name(), \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfileTwoGeneration1, err := gcsClient.UploadFile(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), \"\", tempVerFile.Name(), \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfileTwoGeneration2, err := gcsClient.UploadFile(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), \"\", tempVerFile.Name(), \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfakeZipFileGeneration, err := gcsClient.UploadFile(versionedBucketName, filepath.Join(directoryPrefix, \"zip-to-upload.zip\"), \"application\/zip\", tempVerFile.Name(), \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfakeZipFileObject, err := gcsClient.GetBucketObjectInfo(versionedBucketName, filepath.Join(directoryPrefix, \"zip-to-upload.zip\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fakeZipFileObject.ContentType).To(Equal(\"application\/zip\"))\n\t\t\tExpect(fakeZipFileGeneration).To(Equal(fakeZipFileObject.Generation))\n\n\t\t\tfiles, err := gcsClient.BucketObjects(versionedBucketName, directoryPrefix)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(files).To(ConsistOf([]string{filepath.Join(directoryPrefix, \"file-to-upload-1\"), filepath.Join(directoryPrefix, \"file-to-upload-2\"), filepath.Join(directoryPrefix, \"zip-to-upload.zip\")}))\n\n\t\t\tfileOneGenerations, err := gcsClient.ObjectGenerations(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileOneGenerations).To(ConsistOf([]int64{fileOneGeneration}))\n\n\t\t\tfileOneGenerationsObject, err := gcsClient.GetBucketObjectInfo(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileOneGenerations).To(ConsistOf([]int64{fileOneGenerationsObject.Generation}))\n\n\t\t\tfileOneURL, err := gcsClient.URL(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), 0)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileOneURL).To(Equal(fmt.Sprintf(\"gs:\/\/%s\/%s\", versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"))))\n\n\t\t\tfileOneURLGeneration, err := gcsClient.URL(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), fileOneGeneration)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileOneURLGeneration).To(Equal(fmt.Sprintf(\"gs:\/\/%s\/%s#%d\", versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), fileOneGeneration)))\n\n\t\t\tfileTwoGenerations, err := gcsClient.ObjectGenerations(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileTwoGenerations).To(ConsistOf([]int64{fileTwoGeneration1, fileTwoGeneration2}))\n\n\t\t\tfileTwoURL, err := gcsClient.URL(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), 0)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileTwoURL).To(Equal(fmt.Sprintf(\"gs:\/\/%s\/%s\", versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"))))\n\n\t\t\tfileTwoURLGeneration1, err := gcsClient.URL(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), fileTwoGeneration1)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileTwoURLGeneration1).To(Equal(fmt.Sprintf(\"gs:\/\/%s\/%s#%d\", versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), fileTwoGeneration1)))\n\n\t\t\tfileTwoURLGeneration2, err := gcsClient.URL(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), fileTwoGeneration2)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileTwoURLGeneration2).To(Equal(fmt.Sprintf(\"gs:\/\/%s\/%s#%d\", versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), fileTwoGeneration2)))\n\n\t\t\terr = gcsClient.DownloadFile(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), 0, filepath.Join(tempVerDir, \"downloaded-file\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tread, err := ioutil.ReadFile(filepath.Join(tempVerDir, \"downloaded-file\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(read).To(Equal([]byte(\"hello-\" + runtime)))\n\t\t})\n\t})\n})\n<commit_msg>Update gcsClient.DownloadFile signature in integration\/gcsclient_test.go<commit_after>package integration_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"GCSclient\", func() {\n\tvar (\n\t\terr error\n\t\ttempDir string\n\t\ttempFile *os.File\n\t\ttempVerDir string\n\t\ttempVerFile *os.File\n\t\truntime string\n\t\tdirectoryPrefix string\n\t)\n\n\tBeforeEach(func() {\n\t\tdirectoryPrefix = \"gcsclient-tests\"\n\t\truntime = fmt.Sprintf(\"%d\", time.Now().Unix())\n\t})\n\n\tDescribe(\"with a non versioned bucket\", func() {\n\t\tBeforeEach(func() {\n\t\t\ttempDir, err = ioutil.TempDir(\"\", \"gcs_client_integration_test\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\ttempFile, err = ioutil.TempFile(tempDir, \"file-to-upload\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\ttempFile.Write([]byte(\"hello-\" + runtime))\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\terr := os.RemoveAll(tempDir)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\terr = gcsClient.DeleteObject(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), 0)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\terr = gcsClient.DeleteObject(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), 0)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\terr = gcsClient.DeleteObject(bucketName, filepath.Join(directoryPrefix, \"zip-to-upload.zip\"), 0)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t})\n\n\t\tIt(\"can interact with buckets\", func() {\n\t\t\t_, err := gcsClient.UploadFile(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), \"\", tempFile.Name(), \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t_, err = gcsClient.UploadFile(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), \"\", tempFile.Name(), \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t_, err = gcsClient.UploadFile(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), \"\", tempFile.Name(), \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t_, err = gcsClient.UploadFile(bucketName, filepath.Join(directoryPrefix, \"zip-to-upload.zip\"), \"application\/zip\", tempFile.Name(), \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfakeZipFileObject, err := gcsClient.GetBucketObjectInfo(bucketName, filepath.Join(directoryPrefix, \"zip-to-upload.zip\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fakeZipFileObject.ContentType).To(Equal(\"application\/zip\"))\n\n\t\t\tfiles, err := gcsClient.BucketObjects(bucketName, directoryPrefix)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(files).To(ConsistOf([]string{filepath.Join(directoryPrefix, \"file-to-upload-1\"), filepath.Join(directoryPrefix, \"file-to-upload-2\"), filepath.Join(directoryPrefix, \"zip-to-upload.zip\")}))\n\n\t\t\t_, err = gcsClient.ObjectGenerations(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"))\n\t\t\tExpect(err).To(HaveOccurred())\n\t\t\tExpect(err.Error()).To(ContainSubstring(\"bucket is not versioned\"))\n\n\t\t\tfileOneURL, err := gcsClient.URL(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), 0)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileOneURL).To(Equal(fmt.Sprintf(\"gs:\/\/%s\/%s\", bucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"))))\n\n\t\t\t_, err = gcsClient.ObjectGenerations(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"))\n\t\t\tExpect(err).To(HaveOccurred())\n\t\t\tExpect(err.Error()).To(ContainSubstring(\"bucket is not versioned\"))\n\n\t\t\tfileTwoURL, err := gcsClient.URL(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), 0)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileTwoURL).To(Equal(fmt.Sprintf(\"gs:\/\/%s\/%s\", bucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"))))\n\n\t\t\ttempFile, err := gcsClient.DownloadFile(bucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), 0)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tread, err := ioutil.ReadFile(tempFile)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(read).To(Equal([]byte(\"hello-\" + runtime)))\n\t\t})\n\t})\n\n\tDescribe(\"with a versioned bucket\", func() {\n\t\tBeforeEach(func() {\n\t\t\ttempVerDir, err = ioutil.TempDir(\"\", \"gcs-versioned-upload-dir\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\ttempVerFile, err = ioutil.TempFile(tempVerDir, \"file-to-upload\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\ttempVerFile.Write([]byte(\"hello-\" + runtime))\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\terr := os.RemoveAll(tempVerDir)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfileOneGenerations, err := gcsClient.ObjectGenerations(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfor _, fileOneGeneration := range fileOneGenerations {\n\t\t\t\terr := gcsClient.DeleteObject(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), fileOneGeneration)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t}\n\n\t\t\tfileTwoGenerations, err := gcsClient.ObjectGenerations(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfor _, fileTwoGeneration := range fileTwoGenerations {\n\t\t\t\terr := gcsClient.DeleteObject(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), fileTwoGeneration)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t}\n\n\t\t\tfakeZipFileGenerations, err := gcsClient.ObjectGenerations(versionedBucketName, filepath.Join(directoryPrefix, \"zip-to-upload.zip\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfor _, fakeZipFileGeneration := range fakeZipFileGenerations {\n\t\t\t\terr := gcsClient.DeleteObject(versionedBucketName, filepath.Join(directoryPrefix, \"zip-to-upload.zip\"), fakeZipFileGeneration)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t}\n\t\t})\n\n\t\tIt(\"can interact with buckets\", func() {\n\t\t\tfileOneGeneration, err := gcsClient.UploadFile(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), \"\", tempVerFile.Name(), \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfileTwoGeneration1, err := gcsClient.UploadFile(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), \"\", tempVerFile.Name(), \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfileTwoGeneration2, err := gcsClient.UploadFile(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), \"\", tempVerFile.Name(), \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfakeZipFileGeneration, err := gcsClient.UploadFile(versionedBucketName, filepath.Join(directoryPrefix, \"zip-to-upload.zip\"), \"application\/zip\", tempVerFile.Name(), \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfakeZipFileObject, err := gcsClient.GetBucketObjectInfo(versionedBucketName, filepath.Join(directoryPrefix, \"zip-to-upload.zip\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fakeZipFileObject.ContentType).To(Equal(\"application\/zip\"))\n\t\t\tExpect(fakeZipFileGeneration).To(Equal(fakeZipFileObject.Generation))\n\n\t\t\tfiles, err := gcsClient.BucketObjects(versionedBucketName, directoryPrefix)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(files).To(ConsistOf([]string{filepath.Join(directoryPrefix, \"file-to-upload-1\"), filepath.Join(directoryPrefix, \"file-to-upload-2\"), filepath.Join(directoryPrefix, \"zip-to-upload.zip\")}))\n\n\t\t\tfileOneGenerations, err := gcsClient.ObjectGenerations(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileOneGenerations).To(ConsistOf([]int64{fileOneGeneration}))\n\n\t\t\tfileOneGenerationsObject, err := gcsClient.GetBucketObjectInfo(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileOneGenerations).To(ConsistOf([]int64{fileOneGenerationsObject.Generation}))\n\n\t\t\tfileOneURL, err := gcsClient.URL(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), 0)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileOneURL).To(Equal(fmt.Sprintf(\"gs:\/\/%s\/%s\", versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"))))\n\n\t\t\tfileOneURLGeneration, err := gcsClient.URL(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), fileOneGeneration)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileOneURLGeneration).To(Equal(fmt.Sprintf(\"gs:\/\/%s\/%s#%d\", versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), fileOneGeneration)))\n\n\t\t\tfileTwoGenerations, err := gcsClient.ObjectGenerations(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"))\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileTwoGenerations).To(ConsistOf([]int64{fileTwoGeneration1, fileTwoGeneration2}))\n\n\t\t\tfileTwoURL, err := gcsClient.URL(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), 0)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileTwoURL).To(Equal(fmt.Sprintf(\"gs:\/\/%s\/%s\", versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"))))\n\n\t\t\tfileTwoURLGeneration1, err := gcsClient.URL(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), fileTwoGeneration1)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileTwoURLGeneration1).To(Equal(fmt.Sprintf(\"gs:\/\/%s\/%s#%d\", versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), fileTwoGeneration1)))\n\n\t\t\tfileTwoURLGeneration2, err := gcsClient.URL(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), fileTwoGeneration2)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(fileTwoURLGeneration2).To(Equal(fmt.Sprintf(\"gs:\/\/%s\/%s#%d\", versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-2\"), fileTwoGeneration2)))\n\n\t\t\ttempFile, err := gcsClient.DownloadFile(versionedBucketName, filepath.Join(directoryPrefix, \"file-to-upload-1\"), 0)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tread, err := ioutil.ReadFile(tempFile)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(read).To(Equal([]byte(\"hello-\" + runtime)))\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package integration_test\n\nimport (\n\t\"os\/exec\"\n\n\t\"github.com\/concourse\/atc\"\n\t\"github.com\/concourse\/fly\/ui\"\n\t\"github.com\/fatih\/color\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n\t\"github.com\/onsi\/gomega\/ghttp\"\n)\n\nvar _ = Describe(\"Fly CLI\", func() {\n\tDescribe(\"pipelines\", func() {\n\t\tvar (\n\t\t\tflyCmd *exec.Cmd\n\t\t)\n\n\t\tContext(\"when pipelines are returned from the API\", func() {\n\t\t\tContext(\"when no --all flag is given\", func() {\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tflyCmd = exec.Command(flyPath, \"-t\", targetName, \"pipelines\")\n\t\t\t\t\tatcServer.AppendHandlers(\n\t\t\t\t\t\tghttp.CombineHandlers(\n\t\t\t\t\t\t\tghttp.VerifyRequest(\"GET\", \"\/api\/v1\/teams\/main\/pipelines\"),\n\t\t\t\t\t\t\tghttp.RespondWithJSONEncoded(200, []atc.Pipeline{\n\t\t\t\t\t\t\t\t{Name: \"pipeline-1-longer\", URL: \"\/pipelines\/pipeline-1\", Paused: false, Public: false},\n\t\t\t\t\t\t\t\t{Name: \"pipeline-2\", URL: \"\/pipelines\/pipeline-2\", Paused: true, Public: false},\n\t\t\t\t\t\t\t\t{Name: \"pipeline-3\", URL: \"\/pipelines\/pipeline-3\", Paused: false, Public: true},\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t),\n\t\t\t\t\t)\n\t\t\t\t})\n\n\t\t\t\tIt(\"only shows the team's pipelines\", func() {\n\t\t\t\t\tsess, err := gexec.Start(flyCmd, GinkgoWriter, GinkgoWriter)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tEventually(sess).Should(gexec.Exit(0))\n\n\t\t\t\t\tExpect(sess.Out).To(PrintTable(ui.Table{\n\t\t\t\t\t\tHeaders: ui.TableRow{\n\t\t\t\t\t\t\t{Contents: \"name\", Color: color.New(color.Bold)},\n\t\t\t\t\t\t\t{Contents: \"paused\", Color: color.New(color.Bold)},\n\t\t\t\t\t\t\t{Contents: \"public\", Color: color.New(color.Bold)},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tData: []ui.TableRow{\n\t\t\t\t\t\t\t{{Contents: \"pipeline-1-longer\"}, {Contents: \"no\"}, {Contents: \"no\"}},\n\t\t\t\t\t\t\t{{Contents: \"pipeline-2\"}, {Contents: \"yes\", Color: color.New(color.FgCyan)}, {Contents: \"no\"}},\n\t\t\t\t\t\t\t{{Contents: \"pipeline-3\"}, {Contents: \"no\"}, {Contents: \"yes\", Color: color.New(color.FgCyan)}},\n\t\t\t\t\t\t},\n\t\t\t\t\t}))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when --all is specified\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tflyCmd = exec.Command(flyPath, \"-t\", targetName, \"pipelines\", \"--all\")\n\t\t\t\t\tatcServer.AppendHandlers(\n\t\t\t\t\t\tghttp.CombineHandlers(\n\t\t\t\t\t\t\tghttp.VerifyRequest(\"GET\", \"\/api\/v1\/pipelines\"),\n\t\t\t\t\t\t\tghttp.RespondWithJSONEncoded(200, []atc.Pipeline{\n\t\t\t\t\t\t\t\t{Name: \"pipeline-1-longer\", URL: \"\/pipelines\/pipeline-1\", Paused: false, Public: false, TeamName: \"main\"},\n\t\t\t\t\t\t\t\t{Name: \"pipeline-2\", URL: \"\/pipelines\/pipeline-2\", Paused: true, Public: false, TeamName: \"main\"},\n\t\t\t\t\t\t\t\t{Name: \"pipeline-3\", URL: \"\/pipelines\/pipeline-3\", Paused: false, Public: true, TeamName: \"main\"},\n\t\t\t\t\t\t\t\t{Name: \"foreign-pipeline-1\", URL: \"\/pipelines\/foreign-pipeline-1\", Paused: false, Public: true, TeamName: \"other\"},\n\t\t\t\t\t\t\t\t{Name: \"foreign-pipeline-2\", URL: \"\/pipelines\/foreign-pipeline-2\", Paused: false, Public: true, TeamName: \"other\"},\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t),\n\t\t\t\t\t)\n\t\t\t\t})\n\n\t\t\t\tIt(\"includes team and shared pipelines, with a team name column\", func() {\n\t\t\t\t\tsess, err := gexec.Start(flyCmd, GinkgoWriter, GinkgoWriter)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tEventually(sess).Should(gexec.Exit(0))\n\n\t\t\t\t\tExpect(sess.Out).To(PrintTable(ui.Table{\n\t\t\t\t\t\tHeaders: ui.TableRow{\n\t\t\t\t\t\t\t{Contents: \"name\", Color: color.New(color.Bold)},\n\t\t\t\t\t\t\t{Contents: \"team\", Color: color.New(color.Bold)},\n\t\t\t\t\t\t\t{Contents: \"paused\", Color: color.New(color.Bold)},\n\t\t\t\t\t\t\t{Contents: \"public\", Color: color.New(color.Bold)},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tData: []ui.TableRow{\n\t\t\t\t\t\t\t{{Contents: \"pipeline-1-longer\"}, {Contents: \"main\"}, {Contents: \"no\"}, {Contents: \"no\"}},\n\t\t\t\t\t\t\t{{Contents: \"pipeline-2\"}, {Contents: \"main\"}, {Contents: \"yes\", Color: color.New(color.FgCyan)}, {Contents: \"no\"}},\n\t\t\t\t\t\t\t{{Contents: \"pipeline-3\"}, {Contents: \"main\"}, {Contents: \"no\"}, {Contents: \"yes\", Color: color.New(color.FgCyan)}},\n\t\t\t\t\t\t\t{{Contents: \"foreign-pipeline-1\"}, {Contents: \"other\"}, {Contents: \"no\"}, {Contents: \"yes\", Color: color.New(color.FgCyan)}},\n\t\t\t\t\t\t\t{{Contents: \"foreign-pipeline-2\"}, {Contents: \"other\"}, {Contents: \"no\"}, {Contents: \"yes\", Color: color.New(color.FgCyan)}},\n\t\t\t\t\t\t},\n\t\t\t\t\t}))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tContext(\"and the api returns an internal server error\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tflyCmd = exec.Command(flyPath, \"-t\", targetName, \"pipelines\")\n\t\t\t\tatcServer.AppendHandlers(\n\t\t\t\t\tghttp.CombineHandlers(\n\t\t\t\t\t\tghttp.VerifyRequest(\"GET\", \"\/api\/v1\/teams\/main\/pipelines\"),\n\t\t\t\t\t\tghttp.RespondWith(500, \"\"),\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t})\n\n\t\t\tIt(\"writes an error message to stderr\", func() {\n\t\t\t\tsess, err := gexec.Start(flyCmd, GinkgoWriter, GinkgoWriter)\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\tEventually(sess).Should(gexec.Exit(1))\n\t\t\t\tEventually(sess.Err).Should(gbytes.Say(\"Unexpected Response\"))\n\t\t\t})\n\t\t})\n\t})\n})\n<commit_msg>Test pipeline completion.<commit_after>package integration_test\n\nimport (\n\t\"os\"\n\t\"os\/exec\"\n\n\t\"github.com\/concourse\/atc\"\n\t\"github.com\/concourse\/fly\/ui\"\n\t\"github.com\/fatih\/color\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n\t\"github.com\/onsi\/gomega\/ghttp\"\n)\n\nvar _ = Describe(\"Fly CLI\", func() {\n\tDescribe(\"pipelines\", func() {\n\t\tvar (\n\t\t\tflyCmd *exec.Cmd\n\t\t)\n\n\t\tContext(\"when pipelines are returned from the API\", func() {\n\t\t\tContext(\"when no --all flag is given\", func() {\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tflyCmd = exec.Command(flyPath, \"-t\", targetName, \"pipelines\")\n\t\t\t\t\tatcServer.AppendHandlers(\n\t\t\t\t\t\tghttp.CombineHandlers(\n\t\t\t\t\t\t\tghttp.VerifyRequest(\"GET\", \"\/api\/v1\/teams\/main\/pipelines\"),\n\t\t\t\t\t\t\tghttp.RespondWithJSONEncoded(200, []atc.Pipeline{\n\t\t\t\t\t\t\t\t{Name: \"pipeline-1-longer\", URL: \"\/pipelines\/pipeline-1\", Paused: false, Public: false},\n\t\t\t\t\t\t\t\t{Name: \"pipeline-2\", URL: \"\/pipelines\/pipeline-2\", Paused: true, Public: false},\n\t\t\t\t\t\t\t\t{Name: \"pipeline-3\", URL: \"\/pipelines\/pipeline-3\", Paused: false, Public: true},\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t),\n\t\t\t\t\t)\n\t\t\t\t})\n\n\t\t\t\tIt(\"only shows the team's pipelines\", func() {\n\t\t\t\t\tsess, err := gexec.Start(flyCmd, GinkgoWriter, GinkgoWriter)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tEventually(sess).Should(gexec.Exit(0))\n\n\t\t\t\t\tExpect(sess.Out).To(PrintTable(ui.Table{\n\t\t\t\t\t\tHeaders: ui.TableRow{\n\t\t\t\t\t\t\t{Contents: \"name\", Color: color.New(color.Bold)},\n\t\t\t\t\t\t\t{Contents: \"paused\", Color: color.New(color.Bold)},\n\t\t\t\t\t\t\t{Contents: \"public\", Color: color.New(color.Bold)},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tData: []ui.TableRow{\n\t\t\t\t\t\t\t{{Contents: \"pipeline-1-longer\"}, {Contents: \"no\"}, {Contents: \"no\"}},\n\t\t\t\t\t\t\t{{Contents: \"pipeline-2\"}, {Contents: \"yes\", Color: color.New(color.FgCyan)}, {Contents: \"no\"}},\n\t\t\t\t\t\t\t{{Contents: \"pipeline-3\"}, {Contents: \"no\"}, {Contents: \"yes\", Color: color.New(color.FgCyan)}},\n\t\t\t\t\t\t},\n\t\t\t\t\t}))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when --all is specified\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tflyCmd = exec.Command(flyPath, \"-t\", targetName, \"pipelines\", \"--all\")\n\t\t\t\t\tatcServer.AppendHandlers(\n\t\t\t\t\t\tghttp.CombineHandlers(\n\t\t\t\t\t\t\tghttp.VerifyRequest(\"GET\", \"\/api\/v1\/pipelines\"),\n\t\t\t\t\t\t\tghttp.RespondWithJSONEncoded(200, []atc.Pipeline{\n\t\t\t\t\t\t\t\t{Name: \"pipeline-1-longer\", URL: \"\/pipelines\/pipeline-1\", Paused: false, Public: false, TeamName: \"main\"},\n\t\t\t\t\t\t\t\t{Name: \"pipeline-2\", URL: \"\/pipelines\/pipeline-2\", Paused: true, Public: false, TeamName: \"main\"},\n\t\t\t\t\t\t\t\t{Name: \"pipeline-3\", URL: \"\/pipelines\/pipeline-3\", Paused: false, Public: true, TeamName: \"main\"},\n\t\t\t\t\t\t\t\t{Name: \"foreign-pipeline-1\", URL: \"\/pipelines\/foreign-pipeline-1\", Paused: false, Public: true, TeamName: \"other\"},\n\t\t\t\t\t\t\t\t{Name: \"foreign-pipeline-2\", URL: \"\/pipelines\/foreign-pipeline-2\", Paused: false, Public: true, TeamName: \"other\"},\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t),\n\t\t\t\t\t)\n\t\t\t\t})\n\n\t\t\t\tIt(\"includes team and shared pipelines, with a team name column\", func() {\n\t\t\t\t\tsess, err := gexec.Start(flyCmd, GinkgoWriter, GinkgoWriter)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tEventually(sess).Should(gexec.Exit(0))\n\n\t\t\t\t\tExpect(sess.Out).To(PrintTable(ui.Table{\n\t\t\t\t\t\tHeaders: ui.TableRow{\n\t\t\t\t\t\t\t{Contents: \"name\", Color: color.New(color.Bold)},\n\t\t\t\t\t\t\t{Contents: \"team\", Color: color.New(color.Bold)},\n\t\t\t\t\t\t\t{Contents: \"paused\", Color: color.New(color.Bold)},\n\t\t\t\t\t\t\t{Contents: \"public\", Color: color.New(color.Bold)},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tData: []ui.TableRow{\n\t\t\t\t\t\t\t{{Contents: \"pipeline-1-longer\"}, {Contents: \"main\"}, {Contents: \"no\"}, {Contents: \"no\"}},\n\t\t\t\t\t\t\t{{Contents: \"pipeline-2\"}, {Contents: \"main\"}, {Contents: \"yes\", Color: color.New(color.FgCyan)}, {Contents: \"no\"}},\n\t\t\t\t\t\t\t{{Contents: \"pipeline-3\"}, {Contents: \"main\"}, {Contents: \"no\"}, {Contents: \"yes\", Color: color.New(color.FgCyan)}},\n\t\t\t\t\t\t\t{{Contents: \"foreign-pipeline-1\"}, {Contents: \"other\"}, {Contents: \"no\"}, {Contents: \"yes\", Color: color.New(color.FgCyan)}},\n\t\t\t\t\t\t\t{{Contents: \"foreign-pipeline-2\"}, {Contents: \"other\"}, {Contents: \"no\"}, {Contents: \"yes\", Color: color.New(color.FgCyan)}},\n\t\t\t\t\t\t},\n\t\t\t\t\t}))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"completion\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tos.Setenv(\"GO_FLAGS_COMPLETION\", \"1\")\n\t\t\t\t\tatcServer.AppendHandlers(\n\t\t\t\t\t\tghttp.CombineHandlers(\n\t\t\t\t\t\t\tghttp.VerifyRequest(\"GET\", \"\/api\/v1\/teams\/main\/pipelines\"),\n\t\t\t\t\t\t\tghttp.RespondWithJSONEncoded(200, []atc.Pipeline{\n\t\t\t\t\t\t\t\t{Name: \"some-pipeline-1\", URL: \"\/pipelines\/some-pipeline-1\", Paused: false, Public: false},\n\t\t\t\t\t\t\t\t{Name: \"some-pipeline-2\", URL: \"\/pipelines\/some-pipeline-2\", Paused: false, Public: false},\n\t\t\t\t\t\t\t\t{Name: \"another-pipeline\", URL: \"\/pipelines\/another-pipeline\", Paused: false, Public: false},\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t),\n\t\t\t\t\t)\n\t\t\t\t})\n\n\t\t\t\tAfterEach(func() {\n\t\t\t\t\tos.Unsetenv(\"GO_FLAGS_COMPLETION\")\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns all matching pipelines\", func() {\n\t\t\t\t\tflyCmd := exec.Command(flyPath, \"-t\", targetName, \"get-pipeline\", \"-p\", \"some-\")\n\t\t\t\t\tsess, err := gexec.Start(flyCmd, GinkgoWriter, GinkgoWriter)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tEventually(sess).Should(gexec.Exit(0))\n\t\t\t\t\tEventually(sess.Out).Should(gbytes.Say(\"some-pipeline-1\"))\n\t\t\t\t\tEventually(sess.Out).Should(gbytes.Say(\"some-pipeline-2\"))\n\t\t\t\t\tEventually(sess.Out).ShouldNot(gbytes.Say(\"another-pipeline\"))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tContext(\"and the api returns an internal server error\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tflyCmd = exec.Command(flyPath, \"-t\", targetName, \"pipelines\")\n\t\t\t\tatcServer.AppendHandlers(\n\t\t\t\t\tghttp.CombineHandlers(\n\t\t\t\t\t\tghttp.VerifyRequest(\"GET\", \"\/api\/v1\/teams\/main\/pipelines\"),\n\t\t\t\t\t\tghttp.RespondWith(500, \"\"),\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t})\n\n\t\t\tIt(\"writes an error message to stderr\", func() {\n\t\t\t\tsess, err := gexec.Start(flyCmd, GinkgoWriter, GinkgoWriter)\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\tEventually(sess).Should(gexec.Exit(1))\n\t\t\t\tEventually(sess.Err).Should(gbytes.Say(\"Unexpected Response\"))\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Copyright 2019 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS-IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\/\/\npackage schedbt\n\nimport (\n\t\"bufio\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/google\/go-cmp\/cmp\"\n\teventpb \"github.com\/google\/schedviz\/tracedata\/schedviz_events_go_proto\"\n\t\"github.com\/google\/schedviz\/tracedata\/testeventsetbuilder\"\n)\n\nfunc TestParsing(t *testing.T) {\n\ttests := []struct {\n\t\tdescription string\n\t\tinput string\n\t\twant *eventpb.EventSet\n\t}{{\n\t\t\"Valid trace\",\n\t\t`Attaching 5 probes...\nST:0:beef\nP:a:64:70:Thread 1\nP:a:c8:70:Thread 2\nS:a:0:64:0:c8\nM:14:0:0:1:64\nW:1e:0:1:64`,\n\t\ttesteventsetbuilder.TestProtobuf(t, emptyEventSet().\n\t\t\tWithEvent(\"sched_switch\", 0, 0xbeef+10, false,\n\t\t\t\t100, \"Thread 1\", 112, 0,\n\t\t\t\t200, \"Thread 2\", 112).\n\t\t\tWithEvent(\"sched_migrate_task\", 0, 0xbeef+20, false,\n\t\t\t\t100, \"Thread 1\", 112,\n\t\t\t\t0, 1).\n\t\t\tWithEvent(\"sched_wakeup\", 0, 0xbeef+30, false,\n\t\t\t\t100, \"Thread 1\", 112, 1)),\n\t}, {\n\t\t\"Process name with colons\",\n\t\t`Attaching 5 probes...\nST:0:beef\nP:a:64:70:Thread:1\nM:a:0:0:1:64`,\n\t\ttesteventsetbuilder.TestProtobuf(t, emptyEventSet().\n\t\t\tWithEvent(\"sched_migrate_task\", 0, 0xbeef+10, false,\n\t\t\t\t100, \"Thread:1\", 112,\n\t\t\t\t0, 1)),\n\t}}\n\tfor _, test := range tests {\n\t\tt.Run(test.description, func(t *testing.T) {\n\t\t\tp := NewParser()\n\t\t\tr := strings.NewReader(test.input)\n\t\t\tbufr := bufio.NewReader(r)\n\t\t\tif err := p.Parse(bufr); err != nil {\n\t\t\t\tt.Fatalf(\"Parse() yielded unexpected error %v\", err)\n\t\t\t}\n\t\t\tgot, err := p.EventSet()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"EventSet() yielded unexpected error %v\", err)\n\t\t\t}\n\t\t\tif d := cmp.Diff(test.want, got); d != \"\" {\n\t\t\t\tt.Errorf(\"Parser produced %s, diff(want->got) %s\", proto.MarshalTextString(got), d)\n\t\t\t}\n\t\t})\n\t}\n}\n<commit_msg>Internal change<commit_after>\/\/\n\/\/ Copyright 2019 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS-IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\/\/\npackage schedbt\n\nimport (\n\t\"bufio\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/google\/go-cmp\/cmp\"\n\teventpb \"github.com\/google\/schedviz\/tracedata\/schedviz_events_go_proto\"\n\t\"github.com\/google\/schedviz\/tracedata\/testeventsetbuilder\"\n)\n\nfunc TestParsing(t *testing.T) {\n\ttests := []struct {\n\t\tdescription string\n\t\tinput string\n\t\twant *eventpb.EventSet\n\t}{{\n\t\t\"Valid trace\",\n\t\t`Attaching 5 probes...\nST:0:beef\nP:a:64:70:Thread 1\nP:a:c8:70:Thread 2\nS:a:0:64:0:c8\nM:14:0:0:1:64\nW:1e:0:1:64`,\n\t\ttesteventsetbuilder.TestProtobuf(t, emptyEventSet().\n\t\t\tWithEvent(\"sched_switch\", 0, 0xbeef+10, false,\n\t\t\t\t100, \"Thread 1\", 112, 0,\n\t\t\t\t200, \"Thread 2\", 112).\n\t\t\tWithEvent(\"sched_migrate_task\", 0, 0xbeef+20, false,\n\t\t\t\t100, \"Thread 1\", 112,\n\t\t\t\t0, 1).\n\t\t\tWithEvent(\"sched_wakeup\", 0, 0xbeef+30, false,\n\t\t\t\t100, \"Thread 1\", 112, 1)),\n\t}, {\n\t\t\"Process name with colons\",\n\t\t`Attaching 5 probes...\nST:0:beef\nP:a:64:70:Thread:1\nM:a:0:0:1:64`,\n\t\ttesteventsetbuilder.TestProtobuf(t, emptyEventSet().\n\t\t\tWithEvent(\"sched_migrate_task\", 0, 0xbeef+10, false,\n\t\t\t\t100, \"Thread:1\", 112,\n\t\t\t\t0, 1)),\n\t}}\n\tfor _, test := range tests {\n\t\tt.Run(test.description, func(t *testing.T) {\n\t\t\tp := NewParser()\n\t\t\tr := strings.NewReader(test.input)\n\t\t\tbufr := bufio.NewReader(r)\n\t\t\tif err := p.Parse(bufr); err != nil {\n\t\t\t\tt.Fatalf(\"Parse() yielded unexpected error %v\", err)\n\t\t\t}\n\t\t\tgot, err := p.EventSet()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"EventSet() yielded unexpected error %v\", err)\n\t\t\t}\n\t\t\tif d := cmp.Diff(test.want, got, cmp.Comparer(proto.Equal)); d != \"\" {\n\t\t\t\tt.Errorf(\"Parser produced %s, diff(want->got) %s\", proto.MarshalTextString(got), d)\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package command\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\n\t\"github.com\/hashicorp\/terraform\/internal\/configs\"\n\t\"github.com\/hashicorp\/terraform\/internal\/getproviders\"\n\t\"github.com\/hashicorp\/terraform\/internal\/tfdiags\"\n\t\"github.com\/xlab\/treeprint\"\n)\n\n\/\/ ProvidersCommand is a Command implementation that prints out information\n\/\/ about the providers used in the current configuration\/state.\ntype ProvidersCommand struct {\n\tMeta\n}\n\nfunc (c *ProvidersCommand) Help() string {\n\treturn providersCommandHelp\n}\n\nfunc (c *ProvidersCommand) Synopsis() string {\n\treturn \"Show the providers required for this configuration\"\n}\n\nfunc (c *ProvidersCommand) Run(args []string) int {\n\targs = c.Meta.process(args)\n\tcmdFlags := c.Meta.defaultFlagSet(\"providers\")\n\tcmdFlags.Usage = func() { c.Ui.Error(c.Help()) }\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error parsing command-line flags: %s\\n\", err.Error()))\n\t\treturn 1\n\t}\n\n\tconfigPath, err := ModulePath(cmdFlags.Args())\n\tif err != nil {\n\t\tc.Ui.Error(err.Error())\n\t\treturn 1\n\t}\n\n\tvar diags tfdiags.Diagnostics\n\n\tempty, err := configs.IsEmptyDir(configPath)\n\tif err != nil {\n\t\tdiags = diags.Append(tfdiags.Sourceless(\n\t\t\ttfdiags.Error,\n\t\t\t\"Error validating configuration directory\",\n\t\t\tfmt.Sprintf(\"Terraform encountered an unexpected error while verifying that the given configuration directory is valid: %s.\", err),\n\t\t))\n\t\tc.showDiagnostics(diags)\n\t\treturn 1\n\t}\n\tif empty {\n\t\tabsPath, err := filepath.Abs(configPath)\n\t\tif err != nil {\n\t\t\tabsPath = configPath\n\t\t}\n\t\tdiags = diags.Append(tfdiags.Sourceless(\n\t\t\ttfdiags.Error,\n\t\t\t\"No configuration files\",\n\t\t\tfmt.Sprintf(\"The directory %s contains no Terraform configuration files.\", absPath),\n\t\t))\n\t\tc.showDiagnostics(diags)\n\t\treturn 1\n\t}\n\n\tconfig, configDiags := c.loadConfig(configPath)\n\tdiags = diags.Append(configDiags)\n\tif configDiags.HasErrors() {\n\t\tc.showDiagnostics(diags)\n\t\treturn 1\n\t}\n\n\t\/\/ Load the backend\n\tb, backendDiags := c.Backend(&BackendOpts{\n\t\tConfig: config.Module.Backend,\n\t})\n\tdiags = diags.Append(backendDiags)\n\tif backendDiags.HasErrors() {\n\t\tc.showDiagnostics(diags)\n\t\treturn 1\n\t}\n\n\t\/\/ This is a read-only command\n\tc.ignoreRemoteVersionConflict(b)\n\n\t\/\/ Get the state\n\tenv, err := c.Workspace()\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error selecting workspace: %s\", err))\n\t\treturn 1\n\t}\n\ts, err := b.StateMgr(env)\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Failed to load state: %s\", err))\n\t\treturn 1\n\t}\n\tif err := s.RefreshState(); err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Failed to load state: %s\", err))\n\t\treturn 1\n\t}\n\n\treqs, reqDiags := config.ProviderRequirementsByModule()\n\tdiags = diags.Append(reqDiags)\n\tif diags.HasErrors() {\n\t\tc.showDiagnostics(diags)\n\t\treturn 1\n\t}\n\n\tstate := s.State()\n\tvar stateReqs getproviders.Requirements\n\tif state != nil {\n\t\tstateReqs = state.ProviderRequirements()\n\t}\n\n\tprintRoot := treeprint.New()\n\tc.populateTreeNode(printRoot, reqs)\n\n\tc.Ui.Output(\"\\nProviders required by configuration:\")\n\tc.Ui.Output(printRoot.String())\n\n\tif len(stateReqs) > 0 {\n\t\tc.Ui.Output(\"Providers required by state:\\n\")\n\t\tfor fqn := range stateReqs {\n\t\t\tc.Ui.Output(fmt.Sprintf(\" provider[%s]\\n\", fqn.String()))\n\t\t}\n\t}\n\n\tc.showDiagnostics(diags)\n\tif diags.HasErrors() {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc (c *ProvidersCommand) populateTreeNode(tree treeprint.Tree, node *configs.ModuleRequirements) {\n\tfor fqn, dep := range node.Requirements {\n\t\tversionsStr := getproviders.VersionConstraintsString(dep)\n\t\tif versionsStr != \"\" {\n\t\t\tversionsStr = \" \" + versionsStr\n\t\t}\n\t\ttree.AddNode(fmt.Sprintf(\"provider[%s]%s\", fqn.String(), versionsStr))\n\t}\n\tfor name, childNode := range node.Children {\n\t\tbranch := tree.AddBranch(fmt.Sprintf(\"module.%s\", name))\n\t\tc.populateTreeNode(branch, childNode)\n\t}\n}\n\nconst providersCommandHelp = `\nUsage: terraform [global options] providers [DIR]\n\n Prints out a tree of modules in the referenced configuration annotated with\n their provider requirements.\n\n This provides an overview of all of the provider requirements across all\n referenced modules, as an aid to understanding why particular provider\n plugins are needed and why particular versions are selected.\n`\n<commit_msg>goimports on providers.go only<commit_after>package command\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\n\t\"github.com\/xlab\/treeprint\"\n\n\t\"github.com\/hashicorp\/terraform\/internal\/configs\"\n\t\"github.com\/hashicorp\/terraform\/internal\/getproviders\"\n\t\"github.com\/hashicorp\/terraform\/internal\/tfdiags\"\n)\n\n\/\/ ProvidersCommand is a Command implementation that prints out information\n\/\/ about the providers used in the current configuration\/state.\ntype ProvidersCommand struct {\n\tMeta\n}\n\nfunc (c *ProvidersCommand) Help() string {\n\treturn providersCommandHelp\n}\n\nfunc (c *ProvidersCommand) Synopsis() string {\n\treturn \"Show the providers required for this configuration\"\n}\n\nfunc (c *ProvidersCommand) Run(args []string) int {\n\targs = c.Meta.process(args)\n\tcmdFlags := c.Meta.defaultFlagSet(\"providers\")\n\tcmdFlags.Usage = func() { c.Ui.Error(c.Help()) }\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error parsing command-line flags: %s\\n\", err.Error()))\n\t\treturn 1\n\t}\n\n\tconfigPath, err := ModulePath(cmdFlags.Args())\n\tif err != nil {\n\t\tc.Ui.Error(err.Error())\n\t\treturn 1\n\t}\n\n\tvar diags tfdiags.Diagnostics\n\n\tempty, err := configs.IsEmptyDir(configPath)\n\tif err != nil {\n\t\tdiags = diags.Append(tfdiags.Sourceless(\n\t\t\ttfdiags.Error,\n\t\t\t\"Error validating configuration directory\",\n\t\t\tfmt.Sprintf(\"Terraform encountered an unexpected error while verifying that the given configuration directory is valid: %s.\", err),\n\t\t))\n\t\tc.showDiagnostics(diags)\n\t\treturn 1\n\t}\n\tif empty {\n\t\tabsPath, err := filepath.Abs(configPath)\n\t\tif err != nil {\n\t\t\tabsPath = configPath\n\t\t}\n\t\tdiags = diags.Append(tfdiags.Sourceless(\n\t\t\ttfdiags.Error,\n\t\t\t\"No configuration files\",\n\t\t\tfmt.Sprintf(\"The directory %s contains no Terraform configuration files.\", absPath),\n\t\t))\n\t\tc.showDiagnostics(diags)\n\t\treturn 1\n\t}\n\n\tconfig, configDiags := c.loadConfig(configPath)\n\tdiags = diags.Append(configDiags)\n\tif configDiags.HasErrors() {\n\t\tc.showDiagnostics(diags)\n\t\treturn 1\n\t}\n\n\t\/\/ Load the backend\n\tb, backendDiags := c.Backend(&BackendOpts{\n\t\tConfig: config.Module.Backend,\n\t})\n\tdiags = diags.Append(backendDiags)\n\tif backendDiags.HasErrors() {\n\t\tc.showDiagnostics(diags)\n\t\treturn 1\n\t}\n\n\t\/\/ This is a read-only command\n\tc.ignoreRemoteVersionConflict(b)\n\n\t\/\/ Get the state\n\tenv, err := c.Workspace()\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error selecting workspace: %s\", err))\n\t\treturn 1\n\t}\n\ts, err := b.StateMgr(env)\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Failed to load state: %s\", err))\n\t\treturn 1\n\t}\n\tif err := s.RefreshState(); err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Failed to load state: %s\", err))\n\t\treturn 1\n\t}\n\n\treqs, reqDiags := config.ProviderRequirementsByModule()\n\tdiags = diags.Append(reqDiags)\n\tif diags.HasErrors() {\n\t\tc.showDiagnostics(diags)\n\t\treturn 1\n\t}\n\n\tstate := s.State()\n\tvar stateReqs getproviders.Requirements\n\tif state != nil {\n\t\tstateReqs = state.ProviderRequirements()\n\t}\n\n\tprintRoot := treeprint.New()\n\tc.populateTreeNode(printRoot, reqs)\n\n\tc.Ui.Output(\"\\nProviders required by configuration:\")\n\tc.Ui.Output(printRoot.String())\n\n\tif len(stateReqs) > 0 {\n\t\tc.Ui.Output(\"Providers required by state:\\n\")\n\t\tfor fqn := range stateReqs {\n\t\t\tc.Ui.Output(fmt.Sprintf(\" provider[%s]\\n\", fqn.String()))\n\t\t}\n\t}\n\n\tc.showDiagnostics(diags)\n\tif diags.HasErrors() {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc (c *ProvidersCommand) populateTreeNode(tree treeprint.Tree, node *configs.ModuleRequirements) {\n\tfor fqn, dep := range node.Requirements {\n\t\tversionsStr := getproviders.VersionConstraintsString(dep)\n\t\tif versionsStr != \"\" {\n\t\t\tversionsStr = \" \" + versionsStr\n\t\t}\n\t\ttree.AddNode(fmt.Sprintf(\"provider[%s]%s\", fqn.String(), versionsStr))\n\t}\n\tfor name, childNode := range node.Children {\n\t\tbranch := tree.AddBranch(fmt.Sprintf(\"module.%s\", name))\n\t\tc.populateTreeNode(branch, childNode)\n\t}\n}\n\nconst providersCommandHelp = `\nUsage: terraform [global options] providers [DIR]\n\n Prints out a tree of modules in the referenced configuration annotated with\n their provider requirements.\n\n This provides an overview of all of the provider requirements across all\n referenced modules, as an aid to understanding why particular provider\n plugins are needed and why particular versions are selected.\n`\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage frontend\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"path\"\n\t\"strings\"\n\n\t\"golang.org\/x\/mod\/module\"\n\t\"golang.org\/x\/mod\/semver\"\n\t\"golang.org\/x\/pkgsite\/internal\"\n\t\"golang.org\/x\/pkgsite\/internal\/log\"\n\t\"golang.org\/x\/pkgsite\/internal\/stdlib\"\n\t\"golang.org\/x\/pkgsite\/internal\/version\"\n)\n\n\/\/ VersionsDetails contains the hierarchy of version summary information used\n\/\/ to populate the version tab. Version information is organized into separate\n\/\/ lists, one for each (ModulePath, Major Version) pair.\ntype VersionsDetails struct {\n\t\/\/ ThisModule is the slice of VersionLists with the same module path as the\n\t\/\/ current package.\n\tThisModule []*VersionList\n\n\t\/\/ OtherModules is the slice of VersionLists with a different module path\n\t\/\/ from the current package.\n\tOtherModules []*VersionList\n}\n\n\/\/ VersionListKey identifies a version list on the versions tab. We have a\n\/\/ separate VersionList for each major version of a module series. Notably we\n\/\/ have more version lists than module paths: v0 and v1 module versions are in\n\/\/ separate version lists, despite having the same module path.\ntype VersionListKey struct {\n\t\/\/ ModulePath is the module path of this major version.\n\tModulePath string\n\t\/\/ Major is the major version string (e.g. v1, v2)\n\tMajor string\n}\n\n\/\/ VersionList holds all versions corresponding to a unique (module path,\n\/\/ major version) tuple in the version hierarchy.\ntype VersionList struct {\n\tVersionListKey\n\t\/\/ Versions holds the nested version summaries, organized in descending\n\t\/\/ semver order.\n\tVersions []*VersionSummary\n}\n\n\/\/ VersionSummary holds data required to format the version link on the\n\/\/ versions tab.\ntype VersionSummary struct {\n\tTooltipVersion string\n\tDisplayVersion string\n\tCommitTime string\n\t\/\/ Link to this version, for use in the anchor href.\n\tLink string\n}\n\n\/\/ fetchModuleVersionsDetails builds a version hierarchy for module versions\n\/\/ with the same series path as the given version.\nfunc fetchModuleVersionsDetails(ctx context.Context, ds internal.DataSource, mi *internal.ModuleInfo) (*VersionsDetails, error) {\n\tversions, err := ds.GetTaggedVersionsForModule(ctx, mi.ModulePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ If no tagged versions of the module are found, fetch pseudo-versions\n\t\/\/ instead.\n\tif len(versions) == 0 {\n\t\tversions, err = ds.GetPseudoVersionsForModule(ctx, mi.ModulePath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tlinkify := func(m *internal.ModuleInfo) string {\n\t\treturn constructModuleURL(m.ModulePath, linkVersion(m.Version, m.ModulePath))\n\t}\n\treturn buildVersionDetails(mi.ModulePath, versions, linkify), nil\n}\n\n\/\/ fetchPackageVersionsDetails builds a version hierarchy for all module\n\/\/ versions containing a package path with v1 import path matching the given v1 path.\nfunc fetchPackageVersionsDetails(ctx context.Context, ds internal.DataSource, pkgPath, v1Path, modulePath string) (*VersionsDetails, error) {\n\tversions, err := ds.GetTaggedVersionsForPackageSeries(ctx, pkgPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ If no tagged versions for the package series are found, fetch the\n\t\/\/ pseudo-versions instead.\n\tif len(versions) == 0 {\n\t\tversions, err = ds.GetPseudoVersionsForPackageSeries(ctx, pkgPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tvar filteredVersions []*internal.ModuleInfo\n\t\/\/ TODO(rfindley): remove this filtering, as it should not be necessary and\n\t\/\/ is probably a relic of earlier version query implementations.\n\tfor _, v := range versions {\n\t\tif seriesPath := v.SeriesPath(); strings.HasPrefix(v1Path, seriesPath) || seriesPath == stdlib.ModulePath {\n\t\t\tfilteredVersions = append(filteredVersions, v)\n\t\t} else {\n\t\t\tlog.Errorf(ctx, \"got version with mismatching series: %q\", seriesPath)\n\t\t}\n\t}\n\n\tlinkify := func(mi *internal.ModuleInfo) string {\n\t\t\/\/ Here we have only version information, but need to construct the full\n\t\t\/\/ import path of the package corresponding to this version.\n\t\tvar versionPath string\n\t\tif mi.ModulePath == stdlib.ModulePath {\n\t\t\tversionPath = pkgPath\n\t\t} else {\n\t\t\tversionPath = pathInVersion(v1Path, mi)\n\t\t}\n\t\treturn constructPackageURL(versionPath, mi.ModulePath, linkVersion(mi.Version, mi.ModulePath))\n\t}\n\treturn buildVersionDetails(modulePath, filteredVersions, linkify), nil\n}\n\n\/\/ pathInVersion constructs the full import path of the package corresponding\n\/\/ to mi, given its v1 path. To do this, we first compute the suffix of the\n\/\/ package path in the given module series, and then append it to the real\n\/\/ (versioned) module path.\n\/\/\n\/\/ For example: if we're considering package foo.com\/v3\/bar\/baz, and encounter\n\/\/ module version foo.com\/bar\/v2, we do the following:\n\/\/ 1) Start with the v1Path foo.com\/bar\/baz.\n\/\/ 2) Trim off the version series path foo.com\/bar to get 'baz'.\n\/\/ 3) Join with the versioned module path foo.com\/bar\/v2 to get\n\/\/ foo.com\/bar\/v2\/baz.\n\/\/ ...being careful about slashes along the way.\nfunc pathInVersion(v1Path string, mi *internal.ModuleInfo) string {\n\tsuffix := strings.TrimPrefix(strings.TrimPrefix(v1Path, mi.SeriesPath()), \"\/\")\n\tif suffix == \"\" {\n\t\treturn mi.ModulePath\n\t}\n\treturn path.Join(mi.ModulePath, suffix)\n}\n\n\/\/ buildVersionDetails constructs the version hierarchy to be rendered on the\n\/\/ versions tab, organizing major versions into those that have the same module\n\/\/ path as the package version under consideration, and those that don't. The\n\/\/ given versions MUST be sorted first by module path and then by semver.\nfunc buildVersionDetails(currentModulePath string, modInfos []*internal.ModuleInfo, linkify func(v *internal.ModuleInfo) string) *VersionsDetails {\n\n\t\/\/ lists organizes versions by VersionListKey. Note that major version isn't\n\t\/\/ sufficient as a key: there are packages contained in the same major\n\t\/\/ version of different modules, for example github.com\/hashicorp\/vault\/api,\n\t\/\/ which exists in v1 of both of github.com\/hashicorp\/vault and\n\t\/\/ github.com\/hashicorp\/vault\/api.\n\tlists := make(map[VersionListKey][]*VersionSummary)\n\t\/\/ seenLists tracks the order in which we encounter entries of each version\n\t\/\/ list. We want to preserve this order.\n\tvar seenLists []VersionListKey\n\tfor _, mi := range modInfos {\n\t\t\/\/ Try to resolve the most appropriate major version for this version. If\n\t\t\/\/ we detect a +incompatible version (when the path version does not match\n\t\t\/\/ the sematic version), we prefer the path version.\n\t\tmajor := semver.Major(mi.Version)\n\t\tif mi.ModulePath == stdlib.ModulePath {\n\t\t\tvar err error\n\t\t\tmajor, err = stdlib.MajorVersionForVersion(mi.Version)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t\tif _, pathMajor, ok := module.SplitPathVersion(mi.ModulePath); ok {\n\t\t\t\/\/ We prefer the path major version except for v1 import paths where the\n\t\t\t\/\/ semver major version is v0. In this case, we prefer the more specific\n\t\t\t\/\/ semver version.\n\t\t\tif pathMajor != \"\" {\n\t\t\t\t\/\/ Trim both '\/' and '.' from the path major version to account for\n\t\t\t\t\/\/ standard and gopkg.in module paths.\n\t\t\t\tmajor = strings.TrimLeft(pathMajor, \"\/.\")\n\t\t\t} else if major != \"v0\" && !strings.HasPrefix(major, \"go\") {\n\t\t\t\tmajor = \"v1\"\n\t\t\t}\n\t\t}\n\t\tkey := VersionListKey{ModulePath: mi.ModulePath, Major: major}\n\t\tttversion := mi.Version\n\t\tfmtVersion := displayVersion(mi.Version, mi.ModulePath)\n\t\tif mi.ModulePath == stdlib.ModulePath {\n\t\t\tttversion = fmtVersion \/\/ tooltips will show the Go tag\n\t\t}\n\t\tvs := &VersionSummary{\n\t\t\tTooltipVersion: ttversion,\n\t\t\tLink: linkify(mi),\n\t\t\tCommitTime: elapsedTime(mi.CommitTime),\n\t\t\tDisplayVersion: fmtVersion,\n\t\t}\n\t\tif _, ok := lists[key]; !ok {\n\t\t\tseenLists = append(seenLists, key)\n\t\t}\n\t\tlists[key] = append(lists[key], vs)\n\t}\n\n\tvar details VersionsDetails\n\tfor _, key := range seenLists {\n\t\tvl := &VersionList{\n\t\t\tVersionListKey: key,\n\t\t\tVersions: lists[key],\n\t\t}\n\t\tif key.ModulePath == currentModulePath {\n\t\t\tdetails.ThisModule = append(details.ThisModule, vl)\n\t\t} else {\n\t\t\tdetails.OtherModules = append(details.OtherModules, vl)\n\t\t}\n\t}\n\treturn &details\n}\n\n\/\/ formatVersion formats a more readable representation of the given version\n\/\/ string. On any parsing error, it simply returns the input unmodified.\n\/\/\n\/\/ For prerelease versions, formatVersion separates the prerelease portion of\n\/\/ the version string into a parenthetical. i.e.\n\/\/ formatVersion(\"v1.2.3-alpha\") = \"v1.2.3 (alpha)\"\n\/\/\n\/\/ For pseudo versions, formatVersion uses a short commit hash to identify the\n\/\/ version. i.e.\n\/\/ formatVersion(\"v1.2.3-20190311183353-d8887717615a\") = \"v.1.2.3 (d888771)\"\nfunc formatVersion(v string) string {\n\tvType, err := version.ParseType(v)\n\tif err != nil {\n\t\tlog.Errorf(context.TODO(), \"Error parsing version %q: %v\", v, err)\n\t\treturn v\n\t}\n\tpre := semver.Prerelease(v)\n\tbase := strings.TrimSuffix(v, pre)\n\tpre = strings.TrimPrefix(pre, \"-\")\n\tswitch vType {\n\tcase version.TypePrerelease:\n\t\treturn fmt.Sprintf(\"%s (%s)\", base, pre)\n\tcase version.TypePseudo:\n\t\trev := pseudoVersionRev(v)\n\t\tcommitLen := 7\n\t\tif len(rev) < commitLen {\n\t\t\tcommitLen = len(rev)\n\t\t}\n\t\treturn fmt.Sprintf(\"%s (%s)\", base, rev[0:commitLen])\n\tdefault:\n\t\treturn v\n\t}\n}\n\n\/\/ pseudoVersionRev extracts the commit identifier from a pseudo version\n\/\/ string. It assumes the pseudo version is correctly formatted.\nfunc pseudoVersionRev(v string) string {\n\tv = strings.TrimSuffix(v, \"+incompatible\")\n\tj := strings.LastIndex(v, \"-\")\n\treturn v[j+1:]\n}\n\n\/\/ displayVersion returns the version string, formatted for display.\nfunc displayVersion(v string, modulePath string) string {\n\tif modulePath == stdlib.ModulePath {\n\t\treturn goTagForVersion(v)\n\t}\n\treturn formatVersion(v)\n}\n\n\/\/ linkVersion returns the version string, suitable for use in\n\/\/ a link to this site.\nfunc linkVersion(v string, modulePath string) string {\n\tif modulePath == stdlib.ModulePath {\n\t\treturn goTagForVersion(v)\n\t}\n\treturn v\n}\n\n\/\/ goTagForVersion returns the Go tag corresponding to a given semantic\n\/\/ version. It should only be used if we are 100% sure the version will\n\/\/ correspond to a Go tag, such as when we are fetching the version from the\n\/\/ database.\nfunc goTagForVersion(v string) string {\n\ttag, err := stdlib.TagForVersion(v)\n\tif err != nil {\n\t\tlog.Errorf(context.TODO(), \"goTagForVersion(%q): %v\", v, err)\n\t\treturn \"unknown\"\n\t}\n\treturn tag\n}\n<commit_msg>internal\/frontend: remove unnecessary version filtering<commit_after>\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage frontend\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"path\"\n\t\"strings\"\n\n\t\"golang.org\/x\/mod\/module\"\n\t\"golang.org\/x\/mod\/semver\"\n\t\"golang.org\/x\/pkgsite\/internal\"\n\t\"golang.org\/x\/pkgsite\/internal\/log\"\n\t\"golang.org\/x\/pkgsite\/internal\/stdlib\"\n\t\"golang.org\/x\/pkgsite\/internal\/version\"\n)\n\n\/\/ VersionsDetails contains the hierarchy of version summary information used\n\/\/ to populate the version tab. Version information is organized into separate\n\/\/ lists, one for each (ModulePath, Major Version) pair.\ntype VersionsDetails struct {\n\t\/\/ ThisModule is the slice of VersionLists with the same module path as the\n\t\/\/ current package.\n\tThisModule []*VersionList\n\n\t\/\/ OtherModules is the slice of VersionLists with a different module path\n\t\/\/ from the current package.\n\tOtherModules []*VersionList\n}\n\n\/\/ VersionListKey identifies a version list on the versions tab. We have a\n\/\/ separate VersionList for each major version of a module series. Notably we\n\/\/ have more version lists than module paths: v0 and v1 module versions are in\n\/\/ separate version lists, despite having the same module path.\ntype VersionListKey struct {\n\t\/\/ ModulePath is the module path of this major version.\n\tModulePath string\n\t\/\/ Major is the major version string (e.g. v1, v2)\n\tMajor string\n}\n\n\/\/ VersionList holds all versions corresponding to a unique (module path,\n\/\/ major version) tuple in the version hierarchy.\ntype VersionList struct {\n\tVersionListKey\n\t\/\/ Versions holds the nested version summaries, organized in descending\n\t\/\/ semver order.\n\tVersions []*VersionSummary\n}\n\n\/\/ VersionSummary holds data required to format the version link on the\n\/\/ versions tab.\ntype VersionSummary struct {\n\tTooltipVersion string\n\tDisplayVersion string\n\tCommitTime string\n\t\/\/ Link to this version, for use in the anchor href.\n\tLink string\n}\n\n\/\/ fetchModuleVersionsDetails builds a version hierarchy for module versions\n\/\/ with the same series path as the given version.\nfunc fetchModuleVersionsDetails(ctx context.Context, ds internal.DataSource, mi *internal.ModuleInfo) (*VersionsDetails, error) {\n\tversions, err := ds.GetTaggedVersionsForModule(ctx, mi.ModulePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ If no tagged versions of the module are found, fetch pseudo-versions\n\t\/\/ instead.\n\tif len(versions) == 0 {\n\t\tversions, err = ds.GetPseudoVersionsForModule(ctx, mi.ModulePath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tlinkify := func(m *internal.ModuleInfo) string {\n\t\treturn constructModuleURL(m.ModulePath, linkVersion(m.Version, m.ModulePath))\n\t}\n\treturn buildVersionDetails(mi.ModulePath, versions, linkify), nil\n}\n\n\/\/ fetchPackageVersionsDetails builds a version hierarchy for all module\n\/\/ versions containing a package path with v1 import path matching the given v1 path.\nfunc fetchPackageVersionsDetails(ctx context.Context, ds internal.DataSource, pkgPath, v1Path, modulePath string) (*VersionsDetails, error) {\n\tversions, err := ds.GetTaggedVersionsForPackageSeries(ctx, pkgPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ If no tagged versions for the package series are found, fetch the\n\t\/\/ pseudo-versions instead.\n\tif len(versions) == 0 {\n\t\tversions, err = ds.GetPseudoVersionsForPackageSeries(ctx, pkgPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tlinkify := func(mi *internal.ModuleInfo) string {\n\t\t\/\/ Here we have only version information, but need to construct the full\n\t\t\/\/ import path of the package corresponding to this version.\n\t\tvar versionPath string\n\t\tif mi.ModulePath == stdlib.ModulePath {\n\t\t\tversionPath = pkgPath\n\t\t} else {\n\t\t\tversionPath = pathInVersion(v1Path, mi)\n\t\t}\n\t\treturn constructPackageURL(versionPath, mi.ModulePath, linkVersion(mi.Version, mi.ModulePath))\n\t}\n\treturn buildVersionDetails(modulePath, versions, linkify), nil\n}\n\n\/\/ pathInVersion constructs the full import path of the package corresponding\n\/\/ to mi, given its v1 path. To do this, we first compute the suffix of the\n\/\/ package path in the given module series, and then append it to the real\n\/\/ (versioned) module path.\n\/\/\n\/\/ For example: if we're considering package foo.com\/v3\/bar\/baz, and encounter\n\/\/ module version foo.com\/bar\/v2, we do the following:\n\/\/ 1) Start with the v1Path foo.com\/bar\/baz.\n\/\/ 2) Trim off the version series path foo.com\/bar to get 'baz'.\n\/\/ 3) Join with the versioned module path foo.com\/bar\/v2 to get\n\/\/ foo.com\/bar\/v2\/baz.\n\/\/ ...being careful about slashes along the way.\nfunc pathInVersion(v1Path string, mi *internal.ModuleInfo) string {\n\tsuffix := strings.TrimPrefix(strings.TrimPrefix(v1Path, mi.SeriesPath()), \"\/\")\n\tif suffix == \"\" {\n\t\treturn mi.ModulePath\n\t}\n\treturn path.Join(mi.ModulePath, suffix)\n}\n\n\/\/ buildVersionDetails constructs the version hierarchy to be rendered on the\n\/\/ versions tab, organizing major versions into those that have the same module\n\/\/ path as the package version under consideration, and those that don't. The\n\/\/ given versions MUST be sorted first by module path and then by semver.\nfunc buildVersionDetails(currentModulePath string, modInfos []*internal.ModuleInfo, linkify func(v *internal.ModuleInfo) string) *VersionsDetails {\n\n\t\/\/ lists organizes versions by VersionListKey. Note that major version isn't\n\t\/\/ sufficient as a key: there are packages contained in the same major\n\t\/\/ version of different modules, for example github.com\/hashicorp\/vault\/api,\n\t\/\/ which exists in v1 of both of github.com\/hashicorp\/vault and\n\t\/\/ github.com\/hashicorp\/vault\/api.\n\tlists := make(map[VersionListKey][]*VersionSummary)\n\t\/\/ seenLists tracks the order in which we encounter entries of each version\n\t\/\/ list. We want to preserve this order.\n\tvar seenLists []VersionListKey\n\tfor _, mi := range modInfos {\n\t\t\/\/ Try to resolve the most appropriate major version for this version. If\n\t\t\/\/ we detect a +incompatible version (when the path version does not match\n\t\t\/\/ the sematic version), we prefer the path version.\n\t\tmajor := semver.Major(mi.Version)\n\t\tif mi.ModulePath == stdlib.ModulePath {\n\t\t\tvar err error\n\t\t\tmajor, err = stdlib.MajorVersionForVersion(mi.Version)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t\tif _, pathMajor, ok := module.SplitPathVersion(mi.ModulePath); ok {\n\t\t\t\/\/ We prefer the path major version except for v1 import paths where the\n\t\t\t\/\/ semver major version is v0. In this case, we prefer the more specific\n\t\t\t\/\/ semver version.\n\t\t\tif pathMajor != \"\" {\n\t\t\t\t\/\/ Trim both '\/' and '.' from the path major version to account for\n\t\t\t\t\/\/ standard and gopkg.in module paths.\n\t\t\t\tmajor = strings.TrimLeft(pathMajor, \"\/.\")\n\t\t\t} else if major != \"v0\" && !strings.HasPrefix(major, \"go\") {\n\t\t\t\tmajor = \"v1\"\n\t\t\t}\n\t\t}\n\t\tkey := VersionListKey{ModulePath: mi.ModulePath, Major: major}\n\t\tttversion := mi.Version\n\t\tfmtVersion := displayVersion(mi.Version, mi.ModulePath)\n\t\tif mi.ModulePath == stdlib.ModulePath {\n\t\t\tttversion = fmtVersion \/\/ tooltips will show the Go tag\n\t\t}\n\t\tvs := &VersionSummary{\n\t\t\tTooltipVersion: ttversion,\n\t\t\tLink: linkify(mi),\n\t\t\tCommitTime: elapsedTime(mi.CommitTime),\n\t\t\tDisplayVersion: fmtVersion,\n\t\t}\n\t\tif _, ok := lists[key]; !ok {\n\t\t\tseenLists = append(seenLists, key)\n\t\t}\n\t\tlists[key] = append(lists[key], vs)\n\t}\n\n\tvar details VersionsDetails\n\tfor _, key := range seenLists {\n\t\tvl := &VersionList{\n\t\t\tVersionListKey: key,\n\t\t\tVersions: lists[key],\n\t\t}\n\t\tif key.ModulePath == currentModulePath {\n\t\t\tdetails.ThisModule = append(details.ThisModule, vl)\n\t\t} else {\n\t\t\tdetails.OtherModules = append(details.OtherModules, vl)\n\t\t}\n\t}\n\treturn &details\n}\n\n\/\/ formatVersion formats a more readable representation of the given version\n\/\/ string. On any parsing error, it simply returns the input unmodified.\n\/\/\n\/\/ For prerelease versions, formatVersion separates the prerelease portion of\n\/\/ the version string into a parenthetical. i.e.\n\/\/ formatVersion(\"v1.2.3-alpha\") = \"v1.2.3 (alpha)\"\n\/\/\n\/\/ For pseudo versions, formatVersion uses a short commit hash to identify the\n\/\/ version. i.e.\n\/\/ formatVersion(\"v1.2.3-20190311183353-d8887717615a\") = \"v.1.2.3 (d888771)\"\nfunc formatVersion(v string) string {\n\tvType, err := version.ParseType(v)\n\tif err != nil {\n\t\tlog.Errorf(context.TODO(), \"Error parsing version %q: %v\", v, err)\n\t\treturn v\n\t}\n\tpre := semver.Prerelease(v)\n\tbase := strings.TrimSuffix(v, pre)\n\tpre = strings.TrimPrefix(pre, \"-\")\n\tswitch vType {\n\tcase version.TypePrerelease:\n\t\treturn fmt.Sprintf(\"%s (%s)\", base, pre)\n\tcase version.TypePseudo:\n\t\trev := pseudoVersionRev(v)\n\t\tcommitLen := 7\n\t\tif len(rev) < commitLen {\n\t\t\tcommitLen = len(rev)\n\t\t}\n\t\treturn fmt.Sprintf(\"%s (%s)\", base, rev[0:commitLen])\n\tdefault:\n\t\treturn v\n\t}\n}\n\n\/\/ pseudoVersionRev extracts the commit identifier from a pseudo version\n\/\/ string. It assumes the pseudo version is correctly formatted.\nfunc pseudoVersionRev(v string) string {\n\tv = strings.TrimSuffix(v, \"+incompatible\")\n\tj := strings.LastIndex(v, \"-\")\n\treturn v[j+1:]\n}\n\n\/\/ displayVersion returns the version string, formatted for display.\nfunc displayVersion(v string, modulePath string) string {\n\tif modulePath == stdlib.ModulePath {\n\t\treturn goTagForVersion(v)\n\t}\n\treturn formatVersion(v)\n}\n\n\/\/ linkVersion returns the version string, suitable for use in\n\/\/ a link to this site.\nfunc linkVersion(v string, modulePath string) string {\n\tif modulePath == stdlib.ModulePath {\n\t\treturn goTagForVersion(v)\n\t}\n\treturn v\n}\n\n\/\/ goTagForVersion returns the Go tag corresponding to a given semantic\n\/\/ version. It should only be used if we are 100% sure the version will\n\/\/ correspond to a Go tag, such as when we are fetching the version from the\n\/\/ database.\nfunc goTagForVersion(v string) string {\n\ttag, err := stdlib.TagForVersion(v)\n\tif err != nil {\n\t\tlog.Errorf(context.TODO(), \"goTagForVersion(%q): %v\", v, err)\n\t\treturn \"unknown\"\n\t}\n\treturn tag\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package platform manages the runtime services as a platform\npackage platform\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\n\t\"github.com\/micro\/cli\"\n\t\"github.com\/micro\/go-micro\/config\/cmd\"\n\tgorun \"github.com\/micro\/go-micro\/runtime\"\n\t\"github.com\/micro\/go-micro\/util\/log\"\n\n\t\/\/ include usage\n\n\t\"github.com\/micro\/micro\/internal\/update\"\n\t_ \"github.com\/micro\/micro\/internal\/usage\"\n)\n\nvar (\n\t\/\/ Date of the build\n\t\/\/ TODO: move elsewhere\n\tVersion string\n\n\t\/\/ list of services managed\n\tservices = []string{\n\t\t\/\/ runtime services\n\t\t\"network\", \/\/ :8085\n\t\t\"runtime\", \/\/ :8088\n\t\t\"registry\", \/\/ :8000\n\t\t\"broker\", \/\/ :8001\n\t\t\"store\", \/\/ :8002\n\t\t\"tunnel\", \/\/ :8083\n\t\t\"router\", \/\/ :8084\n\t\t\"monitor\", \/\/ :????\n\t\t\"debug\", \/\/ :????\n\t\t\"proxy\", \/\/ :8081\n\t\t\"api\", \/\/ :8080\n\t\t\"web\", \/\/ :8082\n\t\t\"bot\", \/\/ :????\n\t\t\"init\", \/\/ no port, manage self\n\t}\n\n\t\/\/ list of web apps\n\tdashboards = []string{\n\t\t\"network.web\",\n\t\t\"debug.web\",\n\t}\n\n\t\/\/ list of apis\n\tapis = []string{\n\t\t\"network.dns\",\n\t\t\"network.api\",\n\t}\n)\n\ntype initScheduler struct {\n\tgorun.Scheduler\n\tservices []string\n}\n\nfunc (i *initScheduler) Notify() (<-chan gorun.Event, error) {\n\tch, err := i.Scheduler.Notify()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ create new event channel\n\tevChan := make(chan gorun.Event, 32)\n\n\tgo func() {\n\t\tfor ev := range ch {\n\t\t\t\/\/ fire an event per service\n\t\t\tfor _, service := range i.services {\n\t\t\t\tevChan <- gorun.Event{\n\t\t\t\t\tService: service,\n\t\t\t\t\tVersion: ev.Version,\n\t\t\t\t\tTimestamp: ev.Timestamp,\n\t\t\t\t\tType: ev.Type,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ we've reached the end\n\t\tclose(evChan)\n\t}()\n\n\treturn evChan, nil\n}\n\nfunc initNotify(n gorun.Scheduler, services []string) gorun.Scheduler {\n\treturn &initScheduler{n, services}\n}\n\n\/\/ Init is the `micro init` command which manages the lifecycle\n\/\/ of all the services. It does not start the services.\nfunc Init(context *cli.Context) {\n\tlog.Name(\"init\")\n\n\tif len(context.Args()) > 0 {\n\t\tcli.ShowSubcommandHelp(context)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ create the combined list of services\n\tinitServices := append(services, dashboards...)\n\tinitServices = append(services, apis...)\n\n\t\/\/ get the service prefix\n\tif namespace := context.GlobalString(\"namespace\"); len(namespace) > 0 {\n\t\tfor i, service := range initServices {\n\t\t\tinitServices[i] = fmt.Sprintf(\"%s.%s\", namespace, service)\n\t\t}\n\t}\n\n\t\/\/ create new micro runtime\n\tmuRuntime := cmd.DefaultCmd.Options().Runtime\n\n\t\/\/ Use default update notifier\n\tnotifier := update.NewScheduler(Version)\n\twrapped := initNotify(notifier, initServices)\n\n\t\/\/ specify with a notifier that fires\n\t\/\/ individual events for each service\n\toptions := []gorun.Option{\n\t\tgorun.WithScheduler(wrapped),\n\t\tgorun.WithType(\"runtime\"),\n\t}\n\t(*muRuntime).Init(options...)\n\n\t\/\/ used to signal when to shutdown\n\tshutdown := make(chan os.Signal, 1)\n\tsignal.Notify(shutdown, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT)\n\n\tlog.Info(\"Starting service runtime\")\n\n\t\/\/ start the runtime\n\tif err := (*muRuntime).Start(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Info(\"Service runtime started\")\n\n\tselect {\n\tcase <-shutdown:\n\t\tlog.Info(\"Shutdown signal received\")\n\t\tlog.Info(\"Stopping service runtime\")\n\t}\n\n\t\/\/ stop all the things\n\tif err := (*muRuntime).Stop(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Info(\"Service runtime shutdown\")\n\n\t\/\/ exit success\n\tos.Exit(0)\n}\n\n\/\/ Run runs the entire platform\nfunc Run(context *cli.Context) {\n\tlog.Name(\"micro\")\n\n\tif len(context.Args()) > 0 {\n\t\tcli.ShowSubcommandHelp(context)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ get the network flag\n\tnetwork := context.GlobalString(\"network\")\n\tlocal := context.GlobalBool(\"local\")\n\n\t\/\/ pass through the environment\n\t\/\/ TODO: perhaps don't do this\n\tenv := os.Environ()\n\n\tif network == \"local\" || local {\n\t\t\/\/ no op for now\n\t\tlog.Info(\"Setting local network\")\n\t} else {\n\t\tlog.Info(\"Setting global network\")\n\n\t\tif v := os.Getenv(\"MICRO_NETWORK_NODES\"); len(v) == 0 {\n\t\t\t\/\/ set the resolver to use https:\/\/micro.mu\/network\n\t\t\tenv = append(env, \"MICRO_NETWORK_NODES=network.micro.mu\")\n\t\t\tlog.Log(\"Setting default network micro.mu\")\n\t\t}\n\t\tif v := os.Getenv(\"MICRO_NETWORK_TOKEN\"); len(v) == 0 {\n\t\t\t\/\/ set the network token\n\t\t\tenv = append(env, \"MICRO_NETWORK_TOKEN=micro.mu\")\n\t\t\tlog.Log(\"Setting default network token\")\n\t\t}\n\t}\n\n\tlog.Info(\"Loading core services\")\n\n\t\/\/ create new micro runtime\n\tmuRuntime := cmd.DefaultCmd.Options().Runtime\n\n\t\/\/ Use default update notifier\n\tif context.GlobalBool(\"auto_update\") {\n\t\toptions := []gorun.Option{\n\t\t\tgorun.WithScheduler(update.NewScheduler(Version)),\n\t\t}\n\t\t(*muRuntime).Init(options...)\n\t}\n\n\tfor _, service := range services {\n\t\tname := service\n\n\t\tif namespace := context.GlobalString(\"namespace\"); len(namespace) > 0 {\n\t\t\tname = fmt.Sprintf(\"%s.%s\", namespace, service)\n\t\t}\n\n\t\tlog.Infof(\"Registering %s\", name)\n\n\t\t\/\/ runtime based on environment we run the service in\n\t\targs := []gorun.CreateOption{\n\t\t\tgorun.WithCommand(os.Args[0], service),\n\t\t\tgorun.WithEnv(env),\n\t\t\tgorun.WithOutput(os.Stdout),\n\t\t}\n\n\t\t\/\/ NOTE: we use Version right now to check for the latest release\n\t\tmuService := &gorun.Service{Name: name, Version: Version}\n\t\tif err := (*muRuntime).Create(muService, args...); err != nil {\n\t\t\tlog.Errorf(\"Failed to create runtime enviroment: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tshutdown := make(chan os.Signal, 1)\n\tsignal.Notify(shutdown, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT)\n\n\tlog.Info(\"Starting service runtime\")\n\n\t\/\/ start the runtime\n\tif err := (*muRuntime).Start(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Info(\"Service runtime started\")\n\n\t\/\/ TODO: should we launch the console?\n\t\/\/ start the console\n\t\/\/ cli.Init(context)\n\n\tselect {\n\tcase <-shutdown:\n\t\tlog.Info(\"Shutdown signal received\")\n\t}\n\n\tlog.Info(\"Stopping service runtime\")\n\n\t\/\/ stop all the things\n\tif err := (*muRuntime).Stop(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Info(\"Service runtime shutdown\")\n\n\t\/\/ exit success\n\tos.Exit(0)\n}\n<commit_msg>reorder loading so they update dashboards<commit_after>\/\/ Package platform manages the runtime services as a platform\npackage platform\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\n\t\"github.com\/micro\/cli\"\n\t\"github.com\/micro\/go-micro\/config\/cmd\"\n\tgorun \"github.com\/micro\/go-micro\/runtime\"\n\t\"github.com\/micro\/go-micro\/util\/log\"\n\n\t\/\/ include usage\n\n\t\"github.com\/micro\/micro\/internal\/update\"\n\t_ \"github.com\/micro\/micro\/internal\/usage\"\n)\n\nvar (\n\t\/\/ Date of the build\n\t\/\/ TODO: move elsewhere\n\tVersion string\n\n\t\/\/ list of services managed\n\tservices = []string{\n\t\t\/\/ runtime services\n\t\t\"network\", \/\/ :8085\n\t\t\"runtime\", \/\/ :8088\n\t\t\"registry\", \/\/ :8000\n\t\t\"broker\", \/\/ :8001\n\t\t\"store\", \/\/ :8002\n\t\t\"tunnel\", \/\/ :8083\n\t\t\"router\", \/\/ :8084\n\t\t\"monitor\", \/\/ :????\n\t\t\"debug\", \/\/ :????\n\t\t\"proxy\", \/\/ :8081\n\t\t\"api\", \/\/ :8080\n\t\t\"web\", \/\/ :8082\n\t\t\"bot\", \/\/ :????\n\t\t\"init\", \/\/ no port, manage self\n\t}\n\n\t\/\/ list of web apps\n\tdashboards = []string{\n\t\t\"network.web\",\n\t\t\"debug.web\",\n\t}\n\n\t\/\/ list of apis\n\tapis = []string{\n\t\t\"network.dns\",\n\t\t\"network.api\",\n\t}\n)\n\ntype initScheduler struct {\n\tgorun.Scheduler\n\tservices []string\n}\n\nfunc (i *initScheduler) Notify() (<-chan gorun.Event, error) {\n\tch, err := i.Scheduler.Notify()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ create new event channel\n\tevChan := make(chan gorun.Event, 32)\n\n\tgo func() {\n\t\tfor ev := range ch {\n\t\t\t\/\/ fire an event per service\n\t\t\tfor _, service := range i.services {\n\t\t\t\tevChan <- gorun.Event{\n\t\t\t\t\tService: service,\n\t\t\t\t\tVersion: ev.Version,\n\t\t\t\t\tTimestamp: ev.Timestamp,\n\t\t\t\t\tType: ev.Type,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ we've reached the end\n\t\tclose(evChan)\n\t}()\n\n\treturn evChan, nil\n}\n\nfunc initNotify(n gorun.Scheduler, services []string) gorun.Scheduler {\n\treturn &initScheduler{n, services}\n}\n\n\/\/ Init is the `micro init` command which manages the lifecycle\n\/\/ of all the services. It does not start the services.\nfunc Init(context *cli.Context) {\n\tlog.Name(\"init\")\n\n\tif len(context.Args()) > 0 {\n\t\tcli.ShowSubcommandHelp(context)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ create the combined list of services\n\tinitServices := append(dashboards, apis...)\n\tinitServices = append(initServices, services...)\n\n\t\/\/ get the service prefix\n\tif namespace := context.GlobalString(\"namespace\"); len(namespace) > 0 {\n\t\tfor i, service := range initServices {\n\t\t\tinitServices[i] = fmt.Sprintf(\"%s.%s\", namespace, service)\n\t\t}\n\t}\n\n\t\/\/ create new micro runtime\n\tmuRuntime := cmd.DefaultCmd.Options().Runtime\n\n\t\/\/ Use default update notifier\n\tnotifier := update.NewScheduler(Version)\n\twrapped := initNotify(notifier, initServices)\n\n\t\/\/ specify with a notifier that fires\n\t\/\/ individual events for each service\n\toptions := []gorun.Option{\n\t\tgorun.WithScheduler(wrapped),\n\t\tgorun.WithType(\"runtime\"),\n\t}\n\t(*muRuntime).Init(options...)\n\n\t\/\/ used to signal when to shutdown\n\tshutdown := make(chan os.Signal, 1)\n\tsignal.Notify(shutdown, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT)\n\n\tlog.Info(\"Starting service runtime\")\n\n\t\/\/ start the runtime\n\tif err := (*muRuntime).Start(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Info(\"Service runtime started\")\n\n\tselect {\n\tcase <-shutdown:\n\t\tlog.Info(\"Shutdown signal received\")\n\t\tlog.Info(\"Stopping service runtime\")\n\t}\n\n\t\/\/ stop all the things\n\tif err := (*muRuntime).Stop(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Info(\"Service runtime shutdown\")\n\n\t\/\/ exit success\n\tos.Exit(0)\n}\n\n\/\/ Run runs the entire platform\nfunc Run(context *cli.Context) {\n\tlog.Name(\"micro\")\n\n\tif len(context.Args()) > 0 {\n\t\tcli.ShowSubcommandHelp(context)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ get the network flag\n\tnetwork := context.GlobalString(\"network\")\n\tlocal := context.GlobalBool(\"local\")\n\n\t\/\/ pass through the environment\n\t\/\/ TODO: perhaps don't do this\n\tenv := os.Environ()\n\n\tif network == \"local\" || local {\n\t\t\/\/ no op for now\n\t\tlog.Info(\"Setting local network\")\n\t} else {\n\t\tlog.Info(\"Setting global network\")\n\n\t\tif v := os.Getenv(\"MICRO_NETWORK_NODES\"); len(v) == 0 {\n\t\t\t\/\/ set the resolver to use https:\/\/micro.mu\/network\n\t\t\tenv = append(env, \"MICRO_NETWORK_NODES=network.micro.mu\")\n\t\t\tlog.Log(\"Setting default network micro.mu\")\n\t\t}\n\t\tif v := os.Getenv(\"MICRO_NETWORK_TOKEN\"); len(v) == 0 {\n\t\t\t\/\/ set the network token\n\t\t\tenv = append(env, \"MICRO_NETWORK_TOKEN=micro.mu\")\n\t\t\tlog.Log(\"Setting default network token\")\n\t\t}\n\t}\n\n\tlog.Info(\"Loading core services\")\n\n\t\/\/ create new micro runtime\n\tmuRuntime := cmd.DefaultCmd.Options().Runtime\n\n\t\/\/ Use default update notifier\n\tif context.GlobalBool(\"auto_update\") {\n\t\toptions := []gorun.Option{\n\t\t\tgorun.WithScheduler(update.NewScheduler(Version)),\n\t\t}\n\t\t(*muRuntime).Init(options...)\n\t}\n\n\tfor _, service := range services {\n\t\tname := service\n\n\t\tif namespace := context.GlobalString(\"namespace\"); len(namespace) > 0 {\n\t\t\tname = fmt.Sprintf(\"%s.%s\", namespace, service)\n\t\t}\n\n\t\tlog.Infof(\"Registering %s\", name)\n\n\t\t\/\/ runtime based on environment we run the service in\n\t\targs := []gorun.CreateOption{\n\t\t\tgorun.WithCommand(os.Args[0], service),\n\t\t\tgorun.WithEnv(env),\n\t\t\tgorun.WithOutput(os.Stdout),\n\t\t}\n\n\t\t\/\/ NOTE: we use Version right now to check for the latest release\n\t\tmuService := &gorun.Service{Name: name, Version: Version}\n\t\tif err := (*muRuntime).Create(muService, args...); err != nil {\n\t\t\tlog.Errorf(\"Failed to create runtime enviroment: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tshutdown := make(chan os.Signal, 1)\n\tsignal.Notify(shutdown, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT)\n\n\tlog.Info(\"Starting service runtime\")\n\n\t\/\/ start the runtime\n\tif err := (*muRuntime).Start(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Info(\"Service runtime started\")\n\n\t\/\/ TODO: should we launch the console?\n\t\/\/ start the console\n\t\/\/ cli.Init(context)\n\n\tselect {\n\tcase <-shutdown:\n\t\tlog.Info(\"Shutdown signal received\")\n\t}\n\n\tlog.Info(\"Stopping service runtime\")\n\n\t\/\/ stop all the things\n\tif err := (*muRuntime).Stop(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Info(\"Service runtime shutdown\")\n\n\t\/\/ exit success\n\tos.Exit(0)\n}\n<|endoftext|>"} {"text":"<commit_before>package repository\n\nimport (\n\t\"context\"\n\t\"crypto\/sha256\"\n\t\"io\"\n\n\t\"github.com\/restic\/restic\/internal\/debug\"\n\t\"github.com\/restic\/restic\/internal\/fs\"\n\t\"github.com\/restic\/restic\/internal\/hashing\"\n\t\"github.com\/restic\/restic\/internal\/pack\"\n\t\"github.com\/restic\/restic\/internal\/restic\"\n\n\t\"github.com\/restic\/restic\/internal\/errors\"\n)\n\n\/\/ Repack takes a list of packs together with a list of blobs contained in\n\/\/ these packs. Each pack is loaded and the blobs listed in keepBlobs is saved\n\/\/ into a new pack. Returned is the list of obsolete packs which can then\n\/\/ be removed.\nfunc Repack(ctx context.Context, repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet, p *restic.Progress) (obsoletePacks restic.IDSet, err error) {\n\tdebug.Log(\"repacking %d packs while keeping %d blobs\", len(packs), len(keepBlobs))\n\n\tfor packID := range packs {\n\t\t\/\/ load the complete pack into a temp file\n\t\th := restic.Handle{Type: restic.DataFile, Name: packID.String()}\n\n\t\ttempfile, err := fs.TempFile(\"\", \"restic-temp-repack-\")\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"TempFile\")\n\t\t}\n\n\t\tbeRd, err := repo.Backend().Load(ctx, h, 0, 0)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\thrd := hashing.NewReader(beRd, sha256.New())\n\t\tpackLength, err := io.Copy(tempfile, hrd)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Copy\")\n\t\t}\n\n\t\tif err = beRd.Close(); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Close\")\n\t\t}\n\n\t\thash := restic.IDFromHash(hrd.Sum(nil))\n\t\tdebug.Log(\"pack %v loaded (%d bytes), hash %v\", packID.Str(), packLength, hash.Str())\n\n\t\tif !packID.Equal(hash) {\n\t\t\treturn nil, errors.Errorf(\"hash does not match id: want %v, got %v\", packID, hash)\n\t\t}\n\n\t\t_, err = tempfile.Seek(0, 0)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Seek\")\n\t\t}\n\n\t\tblobs, err := pack.List(repo.Key(), tempfile, packLength)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdebug.Log(\"processing pack %v, blobs: %v\", packID.Str(), len(blobs))\n\t\tvar buf []byte\n\t\tfor _, entry := range blobs {\n\t\t\th := restic.BlobHandle{ID: entry.ID, Type: entry.Type}\n\t\t\tif !keepBlobs.Has(h) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdebug.Log(\" process blob %v\", h)\n\n\t\t\tbuf = buf[:]\n\t\t\tif uint(len(buf)) < entry.Length {\n\t\t\t\tbuf = make([]byte, entry.Length)\n\t\t\t}\n\t\t\tbuf = buf[:entry.Length]\n\n\t\t\tn, err := tempfile.ReadAt(buf, int64(entry.Offset))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"ReadAt\")\n\t\t\t}\n\n\t\t\tif n != len(buf) {\n\t\t\t\treturn nil, errors.Errorf(\"read blob %v from %v: not enough bytes read, want %v, got %v\",\n\t\t\t\t\th, tempfile.Name(), len(buf), n)\n\t\t\t}\n\n\t\t\tn, err = repo.Key().Decrypt(buf, buf)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tbuf = buf[:n]\n\n\t\t\tid := restic.Hash(buf)\n\t\t\tif !id.Equal(entry.ID) {\n\t\t\t\treturn nil, errors.Errorf(\"read blob %v from %v: wrong data returned, hash is %v\",\n\t\t\t\t\th, tempfile.Name(), id)\n\t\t\t}\n\n\t\t\t_, err = repo.SaveBlob(ctx, entry.Type, buf, entry.ID)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tdebug.Log(\" saved blob %v\", entry.ID.Str())\n\n\t\t\tkeepBlobs.Delete(h)\n\t\t}\n\n\t\tif err = tempfile.Close(); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Close\")\n\t\t}\n\n\t\tif err = fs.RemoveIfExists(tempfile.Name()); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Remove\")\n\t\t}\n\t\tif p != nil {\n\t\t\tp.Report(restic.Stat{Blobs: 1})\n\t\t}\n\t}\n\n\tif err := repo.Flush(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn packs, nil\n}\n<commit_msg>prune: Warn about wrong plaintext blob ID<commit_after>package repository\n\nimport (\n\t\"context\"\n\t\"crypto\/sha256\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t\"github.com\/restic\/restic\/internal\/debug\"\n\t\"github.com\/restic\/restic\/internal\/fs\"\n\t\"github.com\/restic\/restic\/internal\/hashing\"\n\t\"github.com\/restic\/restic\/internal\/pack\"\n\t\"github.com\/restic\/restic\/internal\/restic\"\n\n\t\"github.com\/restic\/restic\/internal\/errors\"\n)\n\n\/\/ Repack takes a list of packs together with a list of blobs contained in\n\/\/ these packs. Each pack is loaded and the blobs listed in keepBlobs is saved\n\/\/ into a new pack. Returned is the list of obsolete packs which can then\n\/\/ be removed.\nfunc Repack(ctx context.Context, repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet, p *restic.Progress) (obsoletePacks restic.IDSet, err error) {\n\tdebug.Log(\"repacking %d packs while keeping %d blobs\", len(packs), len(keepBlobs))\n\n\tfor packID := range packs {\n\t\t\/\/ load the complete pack into a temp file\n\t\th := restic.Handle{Type: restic.DataFile, Name: packID.String()}\n\n\t\ttempfile, err := fs.TempFile(\"\", \"restic-temp-repack-\")\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"TempFile\")\n\t\t}\n\n\t\tbeRd, err := repo.Backend().Load(ctx, h, 0, 0)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\thrd := hashing.NewReader(beRd, sha256.New())\n\t\tpackLength, err := io.Copy(tempfile, hrd)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Copy\")\n\t\t}\n\n\t\tif err = beRd.Close(); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Close\")\n\t\t}\n\n\t\thash := restic.IDFromHash(hrd.Sum(nil))\n\t\tdebug.Log(\"pack %v loaded (%d bytes), hash %v\", packID.Str(), packLength, hash.Str())\n\n\t\tif !packID.Equal(hash) {\n\t\t\treturn nil, errors.Errorf(\"hash does not match id: want %v, got %v\", packID, hash)\n\t\t}\n\n\t\t_, err = tempfile.Seek(0, 0)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Seek\")\n\t\t}\n\n\t\tblobs, err := pack.List(repo.Key(), tempfile, packLength)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdebug.Log(\"processing pack %v, blobs: %v\", packID.Str(), len(blobs))\n\t\tvar buf []byte\n\t\tfor _, entry := range blobs {\n\t\t\th := restic.BlobHandle{ID: entry.ID, Type: entry.Type}\n\t\t\tif !keepBlobs.Has(h) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdebug.Log(\" process blob %v\", h)\n\n\t\t\tbuf = buf[:]\n\t\t\tif uint(len(buf)) < entry.Length {\n\t\t\t\tbuf = make([]byte, entry.Length)\n\t\t\t}\n\t\t\tbuf = buf[:entry.Length]\n\n\t\t\tn, err := tempfile.ReadAt(buf, int64(entry.Offset))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"ReadAt\")\n\t\t\t}\n\n\t\t\tif n != len(buf) {\n\t\t\t\treturn nil, errors.Errorf(\"read blob %v from %v: not enough bytes read, want %v, got %v\",\n\t\t\t\t\th, tempfile.Name(), len(buf), n)\n\t\t\t}\n\n\t\t\tn, err = repo.Key().Decrypt(buf, buf)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tbuf = buf[:n]\n\n\t\t\tid := restic.Hash(buf)\n\t\t\tif !id.Equal(entry.ID) {\n\t\t\t\tdebug.Log(\"read blob %v\/%v from %v: wrong data returned, hash is %v\",\n\t\t\t\t\th.Type, h.ID, tempfile.Name(), id)\n\t\t\t\tfmt.Fprintf(os.Stderr, \"read blob %v from %v: wrong data returned, hash is %v\",\n\t\t\t\t\th, tempfile.Name(), id)\n\t\t\t}\n\n\t\t\t_, err = repo.SaveBlob(ctx, entry.Type, buf, entry.ID)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tdebug.Log(\" saved blob %v\", entry.ID.Str())\n\n\t\t\tkeepBlobs.Delete(h)\n\t\t}\n\n\t\tif err = tempfile.Close(); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Close\")\n\t\t}\n\n\t\tif err = fs.RemoveIfExists(tempfile.Name()); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Remove\")\n\t\t}\n\t\tif p != nil {\n\t\t\tp.Report(restic.Stat{Blobs: 1})\n\t\t}\n\t}\n\n\tif err := repo.Flush(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn packs, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage save\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/jacobsa\/comeback\/internal\/blob\"\n\t\"github.com\/jacobsa\/comeback\/internal\/blob\/mock\"\n\t\"github.com\/jacobsa\/comeback\/internal\/fs\"\n\t\"github.com\/jacobsa\/comeback\/internal\/repr\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/oglemock\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\nfunc TestBlobStore(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ Match *blob.StoreRequest where the Blob field's contents are equal to the\n\/\/ given slice.\nfunc blobEquals(expected []byte) Matcher {\n\tpred := func(c interface{}) (err error) {\n\t\treq, ok := c.(*blob.StoreRequest)\n\t\tif !ok {\n\t\t\terr = fmt.Errorf(\"which has type %T\", c)\n\t\t\treturn\n\t\t}\n\n\t\tif !bytes.Equal(expected, req.Blob) {\n\t\t\terr = errors.New(\"which has different contents\")\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t}\n\n\treturn NewMatcher(pred, \"*blob.StoreRequest with appropriate content\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype VisitorTest struct {\n\tctx context.Context\n\tchunkSize int\n\tblobStore mock_blob.MockStore\n\n\tnode fsNode\n\n\t\/\/ A temporary directory removed at the end of the test.\n\tdir string\n}\n\nfunc init() { RegisterTestSuite(&VisitorTest{}) }\n\nvar _ SetUpInterface = &VisitorTest{}\nvar _ TearDownInterface = &VisitorTest{}\n\nfunc (t *VisitorTest) SetUp(ti *TestInfo) {\n\tvar err error\n\n\tt.ctx = ti.Ctx\n\tt.chunkSize = 8\n\tt.blobStore = mock_blob.NewMockStore(ti.MockController, \"blobStore\")\n\n\t\/\/ Set up the directory.\n\tt.dir, err = ioutil.TempDir(\"\", \"score_map_test\")\n\tAssertEq(nil, err)\n}\n\nfunc (t *VisitorTest) TearDown() {\n\tvar err error\n\n\terr = os.RemoveAll(t.dir)\n\tAssertEq(nil, err)\n}\n\nfunc (t *VisitorTest) call() (err error) {\n\tvisitor := newVisitor(\n\t\tt.chunkSize,\n\t\tt.dir,\n\t\tt.blobStore,\n\t\tlog.New(ioutil.Discard, \"\", 0),\n\t\tmake(chan *fsNode, 1))\n\n\terr = visitor.Visit(t.ctx, &t.node)\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *VisitorTest) ScoresAlreadyPresent_Empty() {\n\tscores := []blob.Score{}\n\tt.node.Info.Scores = scores\n\n\terr := t.call()\n\tAssertEq(nil, err)\n\tExpectThat(t.node.Info.Scores, DeepEquals(scores))\n}\n\nfunc (t *VisitorTest) ScoresAlreadyPresent_NonEmpty() {\n\tscores := []blob.Score{blob.ComputeScore([]byte(\"taco\"))}\n\tt.node.Info.Scores = scores\n\n\terr := t.call()\n\tAssertEq(nil, err)\n\tExpectThat(t.node.Info.Scores, DeepEquals(scores))\n}\n\nfunc (t *VisitorTest) Symlink() {\n\tvar err error\n\n\t\/\/ Node setup\n\tt.node.RelPath = \"foo\"\n\tt.node.Info = fs.DirectoryEntry{\n\t\tType: fs.TypeSymlink,\n\t}\n\n\t\/\/ Call\n\terr = t.call()\n\tAssertEq(nil, err)\n\n\tExpectEq(nil, t.node.Info.Scores)\n}\n\nfunc (t *VisitorTest) Directory() {\n\tvar err error\n\n\t\/\/ Children\n\tchild0 := &fsNode{\n\t\tInfo: fs.DirectoryEntry{\n\t\t\tName: \"taco\",\n\t\t\tMTime: time.Date(2012, time.August, 15, 12, 56, 00, 0, time.Local),\n\t\t},\n\t}\n\n\tchild1 := &fsNode{\n\t\tInfo: fs.DirectoryEntry{\n\t\t\tName: \"burrito\",\n\t\t\tMTime: time.Date(2015, 4, 5, 2, 15, 0, 0, time.Local),\n\t\t},\n\t}\n\n\t\/\/ Node setup\n\tt.node.RelPath = \"\"\n\tt.node.Info = fs.DirectoryEntry{\n\t\tType: fs.TypeDirectory,\n\t}\n\n\tt.node.Children = []*fsNode{child0, child1}\n\n\t\/\/ Snoop on the call to the blob store.\n\tvar savedReq *blob.StoreRequest\n\texpectedScore := blob.ComputeScore([]byte(\"taco\"))\n\n\tExpectCall(t.blobStore, \"Store\")(Any(), Any()).\n\t\tWillOnce(DoAll(SaveArg(1, &savedReq), Return(expectedScore, nil)))\n\n\t\/\/ Call\n\terr = t.call()\n\tAssertEq(nil, err)\n\tAssertThat(t.node.Info.Scores, ElementsAre(expectedScore))\n\n\t\/\/ Parse the blob.\n\tentries, err := repr.UnmarshalDir(savedReq.Blob)\n\tAssertEq(nil, err)\n\tAssertEq(2, len(entries))\n\n\tExpectThat(*entries[0], DeepEquals(child0.Info))\n\tExpectThat(*entries[1], DeepEquals(child1.Info))\n}\n\nfunc (t *VisitorTest) File_Empty() {\n\tvar err error\n\n\t\/\/ Node setup\n\tt.node.RelPath = \"foo\"\n\tt.node.Info.Type = fs.TypeFile\n\tp := path.Join(t.dir, t.node.RelPath)\n\n\terr = ioutil.WriteFile(p, []byte(\"\"), 0700)\n\tAssertEq(nil, err)\n\n\t\/\/ Call\n\terr = t.call()\n\tAssertEq(nil, err)\n\n\tAssertNe(nil, t.node.Info.Scores)\n\tExpectThat(t.node.Info.Scores, ElementsAre())\n}\n\nfunc (t *VisitorTest) File_LastChunkIsFull() {\n\tvar err error\n\n\t\/\/ Node setup\n\tt.node.RelPath = \"foo\"\n\tt.node.Info.Type = fs.TypeFile\n\tp := path.Join(t.dir, t.node.RelPath)\n\n\tchunk0 := bytes.Repeat([]byte{0}, t.chunkSize)\n\tchunk1 := bytes.Repeat([]byte{1}, t.chunkSize)\n\n\tvar contents []byte\n\tcontents = append(contents, chunk0...)\n\tcontents = append(contents, chunk1...)\n\n\terr = ioutil.WriteFile(p, contents, 0700)\n\tAssertEq(nil, err)\n\n\t\/\/ Blob store (chunk 0)\n\texpected0, err := repr.MarshalFile(chunk0)\n\tAssertEq(nil, err)\n\n\tscore0 := blob.ComputeScore(expected0)\n\tExpectCall(t.blobStore, \"Store\")(Any(), blobEquals(expected0)).\n\t\tWillOnce(Return(score0, nil))\n\n\t\/\/ Blob store (chunk 1)\n\texpected1, err := repr.MarshalFile(chunk1)\n\tAssertEq(nil, err)\n\n\tscore1 := blob.ComputeScore(expected1)\n\tExpectCall(t.blobStore, \"Store\")(Any(), blobEquals(expected1)).\n\t\tWillOnce(Return(score1, nil))\n\n\t\/\/ Call\n\terr = t.call()\n\tAssertEq(nil, err)\n\n\tExpectThat(t.node.Info.Scores, ElementsAre(score0, score1))\n}\n\nfunc (t *VisitorTest) File_LastChunkIsPartial() {\n\tvar err error\n\n\t\/\/ Node setup\n\tt.node.RelPath = \"foo\"\n\tp := path.Join(t.dir, t.node.RelPath)\n\tt.node.Info.Type = fs.TypeFile\n\n\tchunk0 := bytes.Repeat([]byte{0}, t.chunkSize)\n\tchunk1 := bytes.Repeat([]byte{1}, t.chunkSize-1)\n\n\tvar contents []byte\n\tcontents = append(contents, chunk0...)\n\tcontents = append(contents, chunk1...)\n\n\terr = ioutil.WriteFile(p, contents, 0700)\n\tAssertEq(nil, err)\n\n\t\/\/ Blob store (chunk 0)\n\texpected0, err := repr.MarshalFile(chunk0)\n\tAssertEq(nil, err)\n\n\tscore0 := blob.ComputeScore(expected0)\n\tExpectCall(t.blobStore, \"Store\")(Any(), blobEquals(expected0)).\n\t\tWillOnce(Return(score0, nil))\n\n\t\/\/ Blob store (chunk 1)\n\texpected1, err := repr.MarshalFile(chunk1)\n\tAssertEq(nil, err)\n\n\tscore1 := blob.ComputeScore(expected1)\n\tExpectCall(t.blobStore, \"Store\")(Any(), blobEquals(expected1)).\n\t\tWillOnce(Return(score1, nil))\n\n\t\/\/ Call\n\terr = t.call()\n\tAssertEq(nil, err)\n\n\tExpectThat(t.node.Info.Scores, ElementsAre(score0, score1))\n}\n<commit_msg>Updated test names.<commit_after>\/\/ Copyright 2015 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage save\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/jacobsa\/comeback\/internal\/blob\"\n\t\"github.com\/jacobsa\/comeback\/internal\/blob\/mock\"\n\t\"github.com\/jacobsa\/comeback\/internal\/fs\"\n\t\"github.com\/jacobsa\/comeback\/internal\/repr\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/oglemock\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\nfunc TestBlobStore(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ Match *blob.StoreRequest where the Blob field's contents are equal to the\n\/\/ given slice.\nfunc blobEquals(expected []byte) Matcher {\n\tpred := func(c interface{}) (err error) {\n\t\treq, ok := c.(*blob.StoreRequest)\n\t\tif !ok {\n\t\t\terr = fmt.Errorf(\"which has type %T\", c)\n\t\t\treturn\n\t\t}\n\n\t\tif !bytes.Equal(expected, req.Blob) {\n\t\t\terr = errors.New(\"which has different contents\")\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t}\n\n\treturn NewMatcher(pred, \"*blob.StoreRequest with appropriate content\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype VisitorTest struct {\n\tctx context.Context\n\tchunkSize int\n\tblobStore mock_blob.MockStore\n\n\tnode fsNode\n\n\t\/\/ A temporary directory removed at the end of the test.\n\tdir string\n}\n\nfunc init() { RegisterTestSuite(&VisitorTest{}) }\n\nvar _ SetUpInterface = &VisitorTest{}\nvar _ TearDownInterface = &VisitorTest{}\n\nfunc (t *VisitorTest) SetUp(ti *TestInfo) {\n\tvar err error\n\n\tt.ctx = ti.Ctx\n\tt.chunkSize = 8\n\tt.blobStore = mock_blob.NewMockStore(ti.MockController, \"blobStore\")\n\n\t\/\/ Set up the directory.\n\tt.dir, err = ioutil.TempDir(\"\", \"score_map_test\")\n\tAssertEq(nil, err)\n}\n\nfunc (t *VisitorTest) TearDown() {\n\tvar err error\n\n\terr = os.RemoveAll(t.dir)\n\tAssertEq(nil, err)\n}\n\nfunc (t *VisitorTest) call() (err error) {\n\tvisitor := newVisitor(\n\t\tt.chunkSize,\n\t\tt.dir,\n\t\tt.blobStore,\n\t\tlog.New(ioutil.Discard, \"\", 0),\n\t\tmake(chan *fsNode, 1))\n\n\terr = visitor.Visit(t.ctx, &t.node)\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *VisitorTest) PresentInScoreMap_Empty() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *VisitorTest) PresentInScoreMap_NonEmpty() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *VisitorTest) Symlink() {\n\tvar err error\n\n\t\/\/ Node setup\n\tt.node.RelPath = \"foo\"\n\tt.node.Info = fs.DirectoryEntry{\n\t\tType: fs.TypeSymlink,\n\t}\n\n\t\/\/ Call\n\terr = t.call()\n\tAssertEq(nil, err)\n\n\tExpectEq(nil, t.node.Info.Scores)\n}\n\nfunc (t *VisitorTest) Directory() {\n\tvar err error\n\n\t\/\/ Children\n\tchild0 := &fsNode{\n\t\tInfo: fs.DirectoryEntry{\n\t\t\tName: \"taco\",\n\t\t\tMTime: time.Date(2012, time.August, 15, 12, 56, 00, 0, time.Local),\n\t\t},\n\t}\n\n\tchild1 := &fsNode{\n\t\tInfo: fs.DirectoryEntry{\n\t\t\tName: \"burrito\",\n\t\t\tMTime: time.Date(2015, 4, 5, 2, 15, 0, 0, time.Local),\n\t\t},\n\t}\n\n\t\/\/ Node setup\n\tt.node.RelPath = \"\"\n\tt.node.Info = fs.DirectoryEntry{\n\t\tType: fs.TypeDirectory,\n\t}\n\n\tt.node.Children = []*fsNode{child0, child1}\n\n\t\/\/ Snoop on the call to the blob store.\n\tvar savedReq *blob.StoreRequest\n\texpectedScore := blob.ComputeScore([]byte(\"taco\"))\n\n\tExpectCall(t.blobStore, \"Store\")(Any(), Any()).\n\t\tWillOnce(DoAll(SaveArg(1, &savedReq), Return(expectedScore, nil)))\n\n\t\/\/ Call\n\terr = t.call()\n\tAssertEq(nil, err)\n\tAssertThat(t.node.Info.Scores, ElementsAre(expectedScore))\n\n\t\/\/ Parse the blob.\n\tentries, err := repr.UnmarshalDir(savedReq.Blob)\n\tAssertEq(nil, err)\n\tAssertEq(2, len(entries))\n\n\tExpectThat(*entries[0], DeepEquals(child0.Info))\n\tExpectThat(*entries[1], DeepEquals(child1.Info))\n}\n\nfunc (t *VisitorTest) File_Empty() {\n\tvar err error\n\n\t\/\/ Node setup\n\tt.node.RelPath = \"foo\"\n\tt.node.Info.Type = fs.TypeFile\n\tp := path.Join(t.dir, t.node.RelPath)\n\n\terr = ioutil.WriteFile(p, []byte(\"\"), 0700)\n\tAssertEq(nil, err)\n\n\t\/\/ Call\n\terr = t.call()\n\tAssertEq(nil, err)\n\n\tAssertNe(nil, t.node.Info.Scores)\n\tExpectThat(t.node.Info.Scores, ElementsAre())\n}\n\nfunc (t *VisitorTest) File_LastChunkIsFull() {\n\tvar err error\n\n\t\/\/ Node setup\n\tt.node.RelPath = \"foo\"\n\tt.node.Info.Type = fs.TypeFile\n\tp := path.Join(t.dir, t.node.RelPath)\n\n\tchunk0 := bytes.Repeat([]byte{0}, t.chunkSize)\n\tchunk1 := bytes.Repeat([]byte{1}, t.chunkSize)\n\n\tvar contents []byte\n\tcontents = append(contents, chunk0...)\n\tcontents = append(contents, chunk1...)\n\n\terr = ioutil.WriteFile(p, contents, 0700)\n\tAssertEq(nil, err)\n\n\t\/\/ Blob store (chunk 0)\n\texpected0, err := repr.MarshalFile(chunk0)\n\tAssertEq(nil, err)\n\n\tscore0 := blob.ComputeScore(expected0)\n\tExpectCall(t.blobStore, \"Store\")(Any(), blobEquals(expected0)).\n\t\tWillOnce(Return(score0, nil))\n\n\t\/\/ Blob store (chunk 1)\n\texpected1, err := repr.MarshalFile(chunk1)\n\tAssertEq(nil, err)\n\n\tscore1 := blob.ComputeScore(expected1)\n\tExpectCall(t.blobStore, \"Store\")(Any(), blobEquals(expected1)).\n\t\tWillOnce(Return(score1, nil))\n\n\t\/\/ Call\n\terr = t.call()\n\tAssertEq(nil, err)\n\n\tExpectThat(t.node.Info.Scores, ElementsAre(score0, score1))\n}\n\nfunc (t *VisitorTest) File_LastChunkIsPartial() {\n\tvar err error\n\n\t\/\/ Node setup\n\tt.node.RelPath = \"foo\"\n\tp := path.Join(t.dir, t.node.RelPath)\n\tt.node.Info.Type = fs.TypeFile\n\n\tchunk0 := bytes.Repeat([]byte{0}, t.chunkSize)\n\tchunk1 := bytes.Repeat([]byte{1}, t.chunkSize-1)\n\n\tvar contents []byte\n\tcontents = append(contents, chunk0...)\n\tcontents = append(contents, chunk1...)\n\n\terr = ioutil.WriteFile(p, contents, 0700)\n\tAssertEq(nil, err)\n\n\t\/\/ Blob store (chunk 0)\n\texpected0, err := repr.MarshalFile(chunk0)\n\tAssertEq(nil, err)\n\n\tscore0 := blob.ComputeScore(expected0)\n\tExpectCall(t.blobStore, \"Store\")(Any(), blobEquals(expected0)).\n\t\tWillOnce(Return(score0, nil))\n\n\t\/\/ Blob store (chunk 1)\n\texpected1, err := repr.MarshalFile(chunk1)\n\tAssertEq(nil, err)\n\n\tscore1 := blob.ComputeScore(expected1)\n\tExpectCall(t.blobStore, \"Store\")(Any(), blobEquals(expected1)).\n\t\tWillOnce(Return(score1, nil))\n\n\t\/\/ Call\n\terr = t.call()\n\tAssertEq(nil, err)\n\n\tExpectThat(t.node.Info.Scores, ElementsAre(score0, score1))\n}\n\nfunc (t *VisitorTest) File_IneligibleForScoreMap() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *VisitorTest) File_UpdatesScoreMap() {\n\tAssertTrue(false, \"TODO\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/osrg\/gobgp\/api\"\n\t\"github.com\/spf13\/cobra\"\n\t\"golang.org\/x\/net\/context\"\n\t\"io\"\n\t\"os\"\n)\n\nfunc NewMonitorCmd() *cobra.Command {\n\tribCmd := &cobra.Command{\n\t\tUse: CMD_RIB,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\trf, err := checkAddressFamily(nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\targ := &gobgpapi.Arguments{\n\t\t\t\tResource: gobgpapi.Resource_GLOBAL,\n\t\t\t\tRf: uint32(rf),\n\t\t\t}\n\n\t\t\tstream, err := client.MonitorBestChanged(context.Background(), arg)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tfor {\n\t\t\t\td, err := stream.Recv()\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t} else if err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tp, err := ApiStruct2Path(d.Paths[0])\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\n\t\t\t\tif globalOpts.Json {\n\t\t\t\t\tj, _ := json.Marshal(p)\n\t\t\t\t\tfmt.Println(string(j))\n\t\t\t\t} else {\n\t\t\t\t\tshowRoute([]*Path{p}, false, false, false, true, true)\n\t\t\t\t}\n\t\t\t}\n\n\t\t},\n\t}\n\tribCmd.PersistentFlags().StringVarP(&subOpts.AddressFamily, \"address-family\", \"a\", \"\", \"address family\")\n\n\tglobalCmd := &cobra.Command{\n\t\tUse: CMD_GLOBAL,\n\t}\n\tglobalCmd.AddCommand(ribCmd)\n\n\tneighborCmd := &cobra.Command{\n\t\tUse: CMD_NEIGHBOR,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tvar arg *gobgpapi.Arguments\n\t\t\tif len(args) > 0 {\n\t\t\t\targ = &gobgpapi.Arguments{\n\t\t\t\t\tName: args[0],\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\targ = &gobgpapi.Arguments{}\n\t\t\t}\n\n\t\t\tstream, err := client.MonitorPeerState(context.Background(), arg)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tfor {\n\t\t\t\ts, err := stream.Recv()\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t} else if err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tif globalOpts.Json {\n\t\t\t\t\tj, _ := json.Marshal(s)\n\t\t\t\t\tfmt.Println(string(j))\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Printf(\"[NEIGH] %s fsm: %s admin: %s\\n\", s.Conf.RemoteIp, s.Info.BgpState, s.Info.AdminState)\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\n\tmonitorCmd := &cobra.Command{\n\t\tUse: CMD_MONITOR,\n\t}\n\tmonitorCmd.AddCommand(globalCmd)\n\tmonitorCmd.AddCommand(neighborCmd)\n\n\treturn monitorCmd\n}\n<commit_msg>cli: fix not to show corrupted header in monitor command<commit_after>\/\/ Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/osrg\/gobgp\/api\"\n\t\"github.com\/spf13\/cobra\"\n\t\"golang.org\/x\/net\/context\"\n\t\"io\"\n\t\"os\"\n)\n\nfunc NewMonitorCmd() *cobra.Command {\n\tribCmd := &cobra.Command{\n\t\tUse: CMD_RIB,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\trf, err := checkAddressFamily(nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\targ := &gobgpapi.Arguments{\n\t\t\t\tResource: gobgpapi.Resource_GLOBAL,\n\t\t\t\tRf: uint32(rf),\n\t\t\t}\n\n\t\t\tstream, err := client.MonitorBestChanged(context.Background(), arg)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tfor {\n\t\t\t\td, err := stream.Recv()\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t} else if err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tp, err := ApiStruct2Path(d.Paths[0])\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\n\t\t\t\tif globalOpts.Json {\n\t\t\t\t\tj, _ := json.Marshal(p)\n\t\t\t\t\tfmt.Println(string(j))\n\t\t\t\t} else {\n\t\t\t\t\tshowRoute([]*Path{p}, false, false, false, true, false)\n\t\t\t\t}\n\t\t\t}\n\n\t\t},\n\t}\n\tribCmd.PersistentFlags().StringVarP(&subOpts.AddressFamily, \"address-family\", \"a\", \"\", \"address family\")\n\n\tglobalCmd := &cobra.Command{\n\t\tUse: CMD_GLOBAL,\n\t}\n\tglobalCmd.AddCommand(ribCmd)\n\n\tneighborCmd := &cobra.Command{\n\t\tUse: CMD_NEIGHBOR,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tvar arg *gobgpapi.Arguments\n\t\t\tif len(args) > 0 {\n\t\t\t\targ = &gobgpapi.Arguments{\n\t\t\t\t\tName: args[0],\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\targ = &gobgpapi.Arguments{}\n\t\t\t}\n\n\t\t\tstream, err := client.MonitorPeerState(context.Background(), arg)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tfor {\n\t\t\t\ts, err := stream.Recv()\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t} else if err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tif globalOpts.Json {\n\t\t\t\t\tj, _ := json.Marshal(s)\n\t\t\t\t\tfmt.Println(string(j))\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Printf(\"[NEIGH] %s fsm: %s admin: %s\\n\", s.Conf.RemoteIp, s.Info.BgpState, s.Info.AdminState)\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\n\tmonitorCmd := &cobra.Command{\n\t\tUse: CMD_MONITOR,\n\t}\n\tmonitorCmd.AddCommand(globalCmd)\n\tmonitorCmd.AddCommand(neighborCmd)\n\n\treturn monitorCmd\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"time\"\n\n\tpbb \"github.com\/brotherlogic\/buildserver\/proto\"\n\tpbfc \"github.com\/brotherlogic\/filecopier\/proto\"\n\tpb \"github.com\/brotherlogic\/gobuildslave\/proto\"\n)\n\nconst (\n\tpendWait = time.Minute\n)\n\nfunc (s *Server) runTransition(ctx context.Context, job *pb.JobAssignment) {\n\tstartState := job.State\n\tjob.LastUpdateTime = time.Now().Unix()\n\tswitch job.State {\n\tcase pb.State_ACKNOWLEDGED:\n\t\tkey := s.scheduleBuild(ctx, job)\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"SCHED: %v @ %v\", key, time.Now())\n\t\ts.stateMutex.Unlock()\n\t\tif !job.Job.Bootstrap {\n\t\t\tif key != \"\" {\n\t\t\t\tjob.State = pb.State_BUILT\n\t\t\t\tjob.RunningVersion = key\n\t\t\t} else {\n\t\t\t\t\/\/ Bootstrap this job since we don't have an initial version\n\t\t\t\tif job.Job.PartialBootstrap {\n\t\t\t\t\tjob.Job.Bootstrap = true\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tjob.CommandKey = key\n\t\t\tjob.State = pb.State_BUILDING\n\t\t}\n\tcase pb.State_BUILDING:\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"BUILD(%v): %v\", job.CommandKey, s.scheduler.getState(job.CommandKey))\n\t\ts.stateMutex.Unlock()\n\t\ts.scheduler.wait(job.CommandKey)\n\t\tjob.State = pb.State_BUILT\n\tcase pb.State_BUILT:\n\t\tjob.SubState = \"Getting Output\"\n\t\toutput, _ := s.scheduler.getOutput(job.CommandKey)\n\t\tjob.SubState = \"Entering Lock\"\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"BUILT(%v): (%v): %v\", job.CommandKey, len(output), output)\n\t\ts.stateMutex.Unlock()\n\t\tif job.Job.Bootstrap && len(output) > 0 {\n\t\t\tif job.BuildFail == 5 {\n\t\t\t\tjob.SubState = \"Sending Report\"\n\t\t\t\ts.deliverCrashReport(ctx, job, output)\n\t\t\t\tjob.BuildFail = 0\n\t\t\t}\n\t\t\tjob.BuildFail++\n\t\t\tjob.State = pb.State_DIED\n\t\t} else {\n\t\t\tjob.BuildFail = 0\n\t\t\tjob.SubState = \"Scheduling Run\"\n\t\t\tkey := s.scheduleRun(job)\n\t\t\tjob.CommandKey = key\n\t\t\tjob.StartTime = time.Now().Unix()\n\t\t\tjob.State = pb.State_PENDING\n\t\t\tif _, ok := s.pendingMap[time.Now().Weekday()]; !ok {\n\t\t\t\ts.pendingMap[time.Now().Weekday()] = make(map[string]int)\n\t\t\t}\n\t\t\ts.pendingMap[time.Now().Weekday()][job.Job.Name]++\n\t\t}\n\t\tjob.SubState = \"Out of case\"\n\tcase pb.State_PENDING:\n\t\tif job.Job.PartialBootstrap && job.Job.Bootstrap {\n\t\t\tjob.Job.Bootstrap = false\n\t\t}\n\t\ts.stateMutex.Lock()\n\t\tout, _ := s.scheduler.getOutput(job.CommandKey)\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"OUTPUT = %v\", out)\n\t\ts.stateMutex.Unlock()\n\t\tif time.Now().Add(-time.Minute).Unix() > job.StartTime {\n\t\t\tjob.State = pb.State_RUNNING\n\t\t}\n\tcase pb.State_RUNNING:\n\t\toutput, errout := s.scheduler.getOutput(job.CommandKey)\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"ROUTPUT = %v, %v\", output, s.scheduler.getStatus(job.CommandKey))\n\t\tjob.Status = s.scheduler.getStatus(job.CommandKey)\n\t\ts.stateMutex.Unlock()\n\t\tif len(job.CommandKey) > 0 {\n\t\t\ts.scheduler.wait(job.CommandKey)\n\t\t\ts.stateMutex.Lock()\n\t\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"COMPLETE = (%v, %v)\", job, output)\n\t\t\ts.stateMutex.Unlock()\n\t\t\ts.deliverCrashReport(ctx, job, output)\n\t\t\tjob.State = pb.State_DIED\n\t\t}\n\n\t\tif s.discover != nil && s.Registry != nil {\n\t\t\tport, err := s.discover.discover(job.Job.Name, s.Registry.Identifier)\n\t\t\tif err != nil {\n\t\t\t\tif job.DiscoverCount > 30 {\n\t\t\t\t\toutput2, errout2 := s.scheduler.getErrOutput(job.CommandKey)\n\t\t\t\t\ts.RaiseIssue(\"Cannot Discover Running Server\", fmt.Sprintf(\"%v on %v is not discoverable, despite running (%v) the output says %v (%v), %v, %v\", job.Job.Name, s.Registry.Identifier, err, output, errout, output2, errout2))\n\t\t\t\t}\n\t\t\t\tjob.DiscoverCount++\n\t\t\t} else {\n\t\t\t\tjob.Port = port\n\t\t\t\tjob.DiscoverCount = 0\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Restart this job if we need to\n\t\tif !job.Job.Bootstrap {\n\t\t\tif time.Now().Sub(time.Unix(job.LastVersionPull, 0)) > time.Minute*5 {\n\t\t\t\tversion, err := s.getVersion(ctx, job.Job)\n\t\t\t\tjob.LastVersionPull = time.Now().Unix()\n\n\t\t\t\tif err == nil && version.Version != job.RunningVersion {\n\t\t\t\t\ts.stateMutex.Lock()\n\t\t\t\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"VERSION_MISMATCH = %v,%v\", version, job.RunningVersion)\n\t\t\t\t\ts.stateMutex.Unlock()\n\t\t\t\t\ts.scheduler.killJob(job.CommandKey)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase pb.State_BRINK_OF_DEATH:\n\t\tif s.version.confirm(ctx, job.Job.Name) {\n\t\t\ts.scheduler.killJob(job.CommandKey)\n\t\t\tjob.State = pb.State_ACKNOWLEDGED\n\t\t}\n\tcase pb.State_DIED:\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"DIED %v\", job.CommandKey)\n\t\ts.stateMutex.Unlock()\n\t\tjob.State = pb.State_ACKNOWLEDGED\n\t}\n\n\tif job.State != startState {\n\t\tjob.LastTransitionTime = time.Now().Unix()\n\t}\n}\n\ntype translator interface {\n\tbuild(job *pb.Job) *exec.Cmd\n\trun(job *pb.Job) *exec.Cmd\n}\n\ntype checker interface {\n\tisAlive(ctx context.Context, job *pb.JobAssignment) bool\n}\n\nfunc (s *Server) getVersion(ctx context.Context, job *pb.Job) (*pbb.Version, error) {\n\tversion, err := s.builder.build(ctx, job)\n\tif err != nil {\n\t\treturn &pbb.Version{}, err\n\t}\n\n\treturn version, nil\n\n}\n\nfunc updateJob(err error, job *pb.JobAssignment, resp *pbfc.CopyResponse) {\n\tif err == nil {\n\t\tjob.QueuePos = resp.IndexInQueue\n\t}\n\n}\n\n\/\/ scheduleBuild builds out the job, returning the current version\nfunc (s *Server) scheduleBuild(ctx context.Context, job *pb.JobAssignment) string {\n\tif job.Job.Bootstrap {\n\t\tc := s.translator.build(job.Job)\n\t\treturn s.scheduler.Schedule(&rCommand{command: c, base: job.Job.Name})\n\t}\n\n\tval, err := s.builder.build(ctx, job.Job)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn val.Version\n}\n\nfunc (s *Server) scheduleRun(job *pb.JobAssignment) string {\n\t\/\/Copy over any existing new versions\n\tkey := s.scheduler.Schedule(&rCommand{command: exec.Command(\"mv\", \"$GOPATH\/bin\/\"+job.GetJob().GetName()+\".new\", \"$GOPATH\/bin\/\"+job.GetJob().GetName()), base: job.GetJob().GetName()})\n\ts.scheduler.wait(key)\n\n\tc := s.translator.run(job.GetJob())\n\treturn s.scheduler.Schedule(&rCommand{command: c, base: job.GetJob().GetName()})\n}\n<commit_msg>Blocks builds<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"time\"\n\n\tpbb \"github.com\/brotherlogic\/buildserver\/proto\"\n\tpbfc \"github.com\/brotherlogic\/filecopier\/proto\"\n\tpb \"github.com\/brotherlogic\/gobuildslave\/proto\"\n)\n\nconst (\n\tpendWait = time.Minute\n)\n\nfunc (s *Server) runTransition(ctx context.Context, job *pb.JobAssignment) {\n\tstartState := job.State\n\tjob.LastUpdateTime = time.Now().Unix()\n\tswitch job.State {\n\tcase pb.State_ACKNOWLEDGED:\n\t\tkey := s.scheduleBuild(ctx, job)\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"SCHED: %v @ %v\", key, time.Now())\n\t\ts.stateMutex.Unlock()\n\t\tif !job.Job.Bootstrap {\n\t\t\tif key != \"\" {\n\t\t\t\tjob.State = pb.State_BUILT\n\t\t\t\tjob.RunningVersion = key\n\t\t\t} else {\n\t\t\t\t\/\/ Bootstrap this job since we don't have an initial version\n\t\t\t\tif job.Job.PartialBootstrap {\n\t\t\t\t\tjob.Job.Bootstrap = true\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tjob.CommandKey = key\n\t\t\tjob.State = pb.State_BUILDING\n\t\t}\n\tcase pb.State_BUILDING:\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"BUILD(%v): %v\", job.CommandKey, s.scheduler.getState(job.CommandKey))\n\t\ts.stateMutex.Unlock()\n\t\ts.scheduler.wait(job.CommandKey)\n\t\tjob.State = pb.State_BUILT\n\tcase pb.State_BUILT:\n\t\tjob.SubState = \"Getting Output\"\n\t\toutput, _ := s.scheduler.getOutput(job.CommandKey)\n\t\tjob.SubState = \"Entering Lock\"\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"BUILT(%v): (%v): %v\", job.CommandKey, len(output), output)\n\t\ts.stateMutex.Unlock()\n\t\tif job.Job.Bootstrap && len(output) > 0 {\n\t\t\tif job.BuildFail == 5 {\n\t\t\t\tjob.SubState = \"Sending Report\"\n\t\t\t\ts.deliverCrashReport(ctx, job, output)\n\t\t\t\tjob.BuildFail = 0\n\t\t\t}\n\t\t\tjob.BuildFail++\n\t\t\tjob.State = pb.State_DIED\n\t\t} else {\n\t\t\tjob.BuildFail = 0\n\t\t\tjob.SubState = \"Scheduling Run\"\n\t\t\tkey := s.scheduleRun(job)\n\t\t\tjob.CommandKey = key\n\t\t\tjob.StartTime = time.Now().Unix()\n\t\t\tjob.State = pb.State_PENDING\n\t\t\tif _, ok := s.pendingMap[time.Now().Weekday()]; !ok {\n\t\t\t\ts.pendingMap[time.Now().Weekday()] = make(map[string]int)\n\t\t\t}\n\t\t\ts.pendingMap[time.Now().Weekday()][job.Job.Name]++\n\t\t}\n\t\tjob.SubState = \"Out of case\"\n\tcase pb.State_PENDING:\n\t\tif job.Job.PartialBootstrap && job.Job.Bootstrap {\n\t\t\tjob.Job.Bootstrap = false\n\t\t}\n\t\ts.stateMutex.Lock()\n\t\tout, _ := s.scheduler.getOutput(job.CommandKey)\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"OUTPUT = %v\", out)\n\t\ts.stateMutex.Unlock()\n\t\tif time.Now().Add(-time.Minute).Unix() > job.StartTime {\n\t\t\tjob.State = pb.State_RUNNING\n\t\t}\n\tcase pb.State_RUNNING:\n\t\toutput, errout := s.scheduler.getOutput(job.CommandKey)\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"ROUTPUT = %v, %v\", output, s.scheduler.getStatus(job.CommandKey))\n\t\tjob.Status = s.scheduler.getStatus(job.CommandKey)\n\t\ts.stateMutex.Unlock()\n\t\tif len(job.CommandKey) > 0 {\n\t\t\ts.scheduler.wait(job.CommandKey)\n\t\t\ts.stateMutex.Lock()\n\t\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"COMPLETE = (%v, %v)\", job, output)\n\t\t\ts.stateMutex.Unlock()\n\t\t\ts.deliverCrashReport(ctx, job, output)\n\t\t\tjob.State = pb.State_DIED\n\t\t}\n\n\t\tif s.discover != nil && s.Registry != nil {\n\t\t\tport, err := s.discover.discover(job.Job.Name, s.Registry.Identifier)\n\t\t\tif err != nil {\n\t\t\t\tif job.DiscoverCount > 30 {\n\t\t\t\t\toutput2, errout2 := s.scheduler.getErrOutput(job.CommandKey)\n\t\t\t\t\ts.RaiseIssue(\"Cannot Discover Running Server\", fmt.Sprintf(\"%v on %v is not discoverable, despite running (%v) the output says %v (%v), %v, %v\", job.Job.Name, s.Registry.Identifier, err, output, errout, output2, errout2))\n\t\t\t\t}\n\t\t\t\tjob.DiscoverCount++\n\t\t\t} else {\n\t\t\t\tjob.Port = port\n\t\t\t\tjob.DiscoverCount = 0\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Restart this job if we need to\n\t\tif !job.Job.Bootstrap {\n\t\t\tif time.Now().Sub(time.Unix(job.LastVersionPull, 0)) > time.Minute*5 {\n\t\t\t\tversion, err := s.getVersion(ctx, job.Job)\n\t\t\t\tjob.LastVersionPull = time.Now().Unix()\n\n\t\t\t\tif err == nil && version.Version != job.RunningVersion {\n\t\t\t\t\ts.stateMutex.Lock()\n\t\t\t\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"VERSION_MISMATCH = %v,%v\", version, job.RunningVersion)\n\t\t\t\t\ts.stateMutex.Unlock()\n\t\t\t\t\ts.scheduler.killJob(job.CommandKey)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase pb.State_BRINK_OF_DEATH:\n\t\tif s.version.confirm(ctx, job.Job.Name) {\n\t\t\ts.scheduler.killJob(job.CommandKey)\n\t\t\tjob.State = pb.State_ACKNOWLEDGED\n\t\t}\n\tcase pb.State_DIED:\n\t\ts.stateMutex.Lock()\n\t\ts.stateMap[job.Job.Name] = fmt.Sprintf(\"DIED %v\", job.CommandKey)\n\t\ts.stateMutex.Unlock()\n\t\tjob.State = pb.State_ACKNOWLEDGED\n\t}\n\n\tif job.State != startState {\n\t\tjob.LastTransitionTime = time.Now().Unix()\n\t}\n}\n\ntype translator interface {\n\tbuild(job *pb.Job) *exec.Cmd\n\trun(job *pb.Job) *exec.Cmd\n}\n\ntype checker interface {\n\tisAlive(ctx context.Context, job *pb.JobAssignment) bool\n}\n\nfunc (s *Server) getVersion(ctx context.Context, job *pb.Job) (*pbb.Version, error) {\n\tversion, err := s.builder.build(ctx, job)\n\tif err != nil {\n\t\treturn &pbb.Version{}, err\n\t}\n\n\treturn version, nil\n\n}\n\nfunc updateJob(err error, job *pb.JobAssignment, resp *pbfc.CopyResponse) {\n\tif err == nil {\n\t\tjob.QueuePos = resp.IndexInQueue\n\t}\n\n}\n\n\/\/ scheduleBuild builds out the job, returning the current version\nfunc (s *Server) scheduleBuild(ctx context.Context, job *pb.JobAssignment) string {\n\tif job.Job.Bootstrap {\n\t\tc := s.translator.build(job.Job)\n\t\t\/\/ Block builds to prevent clashes\n\t\treturn s.scheduler.Schedule(&rCommand{command: c, base: job.Job.Name, block: true})\n\t}\n\n\tval, err := s.builder.build(ctx, job.Job)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn val.Version\n}\n\nfunc (s *Server) scheduleRun(job *pb.JobAssignment) string {\n\t\/\/Copy over any existing new versions\n\tkey := s.scheduler.Schedule(&rCommand{command: exec.Command(\"mv\", \"$GOPATH\/bin\/\"+job.GetJob().GetName()+\".new\", \"$GOPATH\/bin\/\"+job.GetJob().GetName()), base: job.GetJob().GetName()})\n\ts.scheduler.wait(key)\n\n\tc := s.translator.run(job.GetJob())\n\treturn s.scheduler.Schedule(&rCommand{command: c, base: job.GetJob().GetName()})\n}\n<|endoftext|>"} {"text":"<commit_before>package goroutine\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc Test_go(t *testing.T) {\n\tpid, err := Start(new(Test))\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tgo Cast(pid, \"+\", []interface{}{1, 2})\n\tadd := func() {\n\t\tret, err := Call(pid, \"+\", []interface{}{1, 2}, 2)\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(\"1 + 2 = \", ret[0])\n\t\tif ret[0] != 3 {\n\t\t\tt.Errorf(\"1 + 2 != 3 ?????????????\")\n\t\t\treturn\n\t\t}\n\t}\n\tgo add()\n\tsub := func() {\n\t\tret, err := Call(pid, \"-\", []interface{}{1, 2}, 2)\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(\"1 - 2 = \", ret[0])\n\t\tif ret[0] != -1 {\n\t\t\tt.Errorf(\"1 - 2 != -1 ?????????????\")\n\t\t\treturn\n\t\t}\n\t}\n\tgo sub()\n\tmul := func() {\n\t\tret, err := Call(pid, \"*\", []interface{}{1, 2}, 2)\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(\"1 * 2 = \", ret[0])\n\t\tif ret[0] != 2 {\n\t\t\tt.Errorf(\"1 * 2 != 2 ?????????????\")\n\t\t\treturn\n\t\t}\n\t}\n\tgo mul()\n\ttime.Sleep(1e9)\n\tif err := StopById(pid); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tStopByName(\"calc\")\n}\n\n\/*****************************实现进程装载器********************************\/\n\ntype Test struct {\n}\n\nfunc (t *Test) name() string {\n\treturn \"calc\"\n}\n\nfunc (t *Test) timer() time.Duration {\n\treturn time.Millisecond * 200\n}\n\nfunc (t *Test) timer_work() {\n\tfmt.Println(\"11111111111111111111内置定时器1111111111111111111111\")\n}\n\nfunc (t *Test) initGo() {\n\tfmt.Println(\"init..............\")\n}\n\nfunc (t *Test) handler(msg string, args []interface{}, ret chan []interface{}) {\n\tfmt.Println(\"handler..............\", msg, args)\n\t\/\/ 异步的嘛\n\tif ret == nil {\n\t\t\/\/...........do something...........\n\t\treturn\n\t}\n\t\/\/ 同步的嘛\n\tswitch msg {\n\tcase \"+\":\n\t\tret <- []interface{}{args[0].(int) + args[1].(int)}\n\tcase \"-\":\n\t\tret <- []interface{}{args[0].(int) - args[1].(int)}\n\tcase \"*\":\n\t\tret <- []interface{}{args[0].(int) * args[1].(int)}\n\t}\n}\n\nfunc (t *Test) closeGo() {\n\tfmt.Println(\"close..............\")\n}\n<commit_msg>update goroutine test<commit_after>package goroutine\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc Test_go(t *testing.T) {\n\tpid, err := Start(new(Test))\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tgo Cast(pid, \"+\", []interface{}{1, 2})\n\tadd := func() {\n\t\tret, err := Call(pid, \"+\", []interface{}{1, 2}, 2)\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(\"1 + 2 = \", ret[0])\n\t\tif ret[0] != 3 {\n\t\t\tt.Errorf(\"1 + 2 != 3 ?????????????\")\n\t\t\treturn\n\t\t}\n\t}\n\tgo add()\n\tsub := func() {\n\t\tret, err := Call(pid, \"-\", []interface{}{1, 2}, 2)\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(\"1 - 2 = \", ret[0])\n\t\tif ret[0] != -1 {\n\t\t\tt.Errorf(\"1 - 2 != -1 ?????????????\")\n\t\t\treturn\n\t\t}\n\t}\n\tgo sub()\n\tmul := func() {\n\t\tret, err := Call(pid, \"*\", []interface{}{1, 2}, 2)\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(\"1 * 2 = \", ret[0])\n\t\tif ret[0] != 2 {\n\t\t\tt.Errorf(\"1 * 2 != 2 ?????????????\")\n\t\t\treturn\n\t\t}\n\t}\n\tgo mul()\n\ttime.Sleep(1e9)\n\tif err := StopById(pid); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tStopByName(\"calc\")\n}\n\n\/*****************************实现进程装载器********************************\/\n\ntype Test struct {\n}\n\nfunc (t *Test) name() string {\n\treturn \"calc\"\n}\n\nfunc (t *Test) timer() time.Duration {\n\treturn time.Millisecond * 200\n\t\/\/ return 0\n}\n\nfunc (t *Test) timer_work() {\n\tfmt.Println(\"11111111111111111111内置定时器1111111111111111111111\")\n}\n\nfunc (t *Test) initGo() {\n\tfmt.Println(\"init..............\")\n}\n\nfunc (t *Test) handler(msg string, args []interface{}, ret chan []interface{}) {\n\tfmt.Println(\"handler..............\", msg, args)\n\t\/\/ 异步的嘛\n\tif ret == nil {\n\t\t\/\/...........do something...........\n\t\treturn\n\t}\n\t\/\/ 同步的嘛\n\tswitch msg {\n\tcase \"+\":\n\t\tret <- []interface{}{args[0].(int) + args[1].(int)}\n\tcase \"-\":\n\t\tret <- []interface{}{args[0].(int) - args[1].(int)}\n\tcase \"*\":\n\t\tret <- []interface{}{args[0].(int) * args[1].(int)}\n\t}\n}\n\nfunc (t *Test) closeGo() {\n\tfmt.Println(\"close..............\")\n}\n<|endoftext|>"} {"text":"<commit_before>package cert\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\nvar SkipVerify = false\n\nvar UTC = false\n\nvar userTempl string\n\nfunc SetUserTempl(templ string) error {\n\tif templ == \"\" {\n\t\treturn nil\n\t}\n\n\tpath, err := filepath.Abs(templ)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontent, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\t\tuserTempl = templ\n\t\treturn nil\n\t}\n\n\tuserTempl = string(content)\n\n\treturn nil\n}\n\nconst defaultPort = \"443\"\n\nfunc SplitHostPort(hostport string) (string, string, error) {\n\tif !strings.Contains(hostport, \":\") {\n\t\treturn hostport, defaultPort, nil\n\t}\n\n\thost, port, err := net.SplitHostPort(hostport)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tif port == \"\" {\n\t\tport = defaultPort\n\t}\n\n\treturn host, port, nil\n}\n\ntype Cert struct {\n\tDomainName string `json:\"domainName\"`\n\tIP string `json:\"ip\"`\n\tIssuer string `json:\"issuer\"`\n\tCommonName string `json:\"commonName\"`\n\tSANs []string `json:\"sans\"`\n\tNotBefore string `json:\"notBefore\"`\n\tNotAfter string `json:\"notAfter\"`\n\tError string `json:\"error\"`\n\tcertChain []*x509.Certificate\n}\n\nvar serverCert = func(host, port string) ([]*x509.Certificate, string, error) {\n\tconn, err := tls.Dial(\"tcp\", host+\":\"+port, &tls.Config{\n\t\tInsecureSkipVerify: SkipVerify,\n\t})\n\tif err != nil {\n\t\treturn []*x509.Certificate{&x509.Certificate{}}, \"\", err\n\t}\n\tdefer conn.Close()\n\taddr := conn.RemoteAddr()\n\tip, _, _ := net.SplitHostPort(addr.String())\n\tcert := conn.ConnectionState().PeerCertificates\n\n\treturn cert, ip, nil\n}\n\nfunc NewCert(hostport string) *Cert {\n\thost, port, err := SplitHostPort(hostport)\n\tif err != nil {\n\t\treturn &Cert{DomainName: host, Error: err.Error()}\n\t}\n\tcertChain, ip, err := serverCert(host, port)\n\tif err != nil {\n\t\treturn &Cert{DomainName: host, Error: err.Error()}\n\t}\n\tcert := certChain[0]\n\n\tvar loc *time.Location\n\tloc = time.Local\n\tif UTC {\n\t\tloc = time.UTC\n\t}\n\n\treturn &Cert{\n\t\tDomainName: host,\n\t\tIP: ip,\n\t\tIssuer: cert.Issuer.CommonName,\n\t\tCommonName: cert.Subject.CommonName,\n\t\tSANs: cert.DNSNames,\n\t\tNotBefore: cert.NotBefore.In(loc).String(),\n\t\tNotAfter: cert.NotAfter.In(loc).String(),\n\t\tError: \"\",\n\t\tcertChain: certChain,\n\t}\n}\n\nfunc (c *Cert) Detail() *x509.Certificate {\n\treturn c.certChain[0]\n}\n\nfunc (c *Cert) CertChain() []*x509.Certificate {\n\treturn c.certChain\n}\n\ntype Certs []*Cert\n\nvar tokens = make(chan struct{}, 128)\n\nfunc validate(s []string) error {\n\tif len(s) < 1 {\n\t\treturn fmt.Errorf(\"Input at least one domain name.\")\n\t}\n\treturn nil\n}\n\nfunc NewCerts(s []string) (Certs, error) {\n\tif err := validate(s); err != nil {\n\t\treturn nil, err\n\t}\n\n\ttype indexer struct {\n\t\tindex int\n\t\tcert *Cert\n\t}\n\n\tch := make(chan *indexer)\n\tfor i, d := range s {\n\t\tgo func(i int, d string) {\n\t\t\ttokens <- struct{}{}\n\t\t\tch <- &indexer{i, NewCert(d)}\n\t\t\t<-tokens\n\t\t}(i, d)\n\t}\n\n\tcerts := make(Certs, len(s))\n\tfor range s {\n\t\ti := <-ch\n\t\tcerts[i.index] = i.cert\n\t}\n\treturn certs, nil\n}\n\nconst defaultTempl = `{{range .}}DomainName: {{.DomainName}}\nIP: {{.IP}}\nIssuer: {{.Issuer}}\nNotBefore: {{.NotBefore}}\nNotAfter: {{.NotAfter}}\nCommonName: {{.CommonName}}\nSANs: {{.SANs}}\nError: {{.Error}}\n\n{{end}}\n`\n\nfunc (certs Certs) String() string {\n\tvar b bytes.Buffer\n\n\ttempl := defaultTempl\n\tif userTempl != \"\" {\n\t\ttempl = userTempl\n\t}\n\n\tt := template.Must(template.New(\"default\").Parse(templ))\n\tif err := t.Execute(&b, certs); err != nil {\n\t\tpanic(err)\n\t}\n\treturn b.String()\n}\n\nconst markdownTempl = `DomainName | IP | Issuer | NotBefore | NotAfter | CN | SANs | Error\n--- | --- | --- | --- | --- | --- | --- | ---\n{{range .}}{{.DomainName}} | {{.IP}} | {{.Issuer}} | {{.NotBefore}} | {{.NotAfter}} | {{.CommonName}} | {{range .SANs}}{{.}}<br\/>{{end}} | {{.Error}}\n{{end}}\n`\n\nfunc (certs Certs) escapeStar() Certs {\n\tfor _, cert := range certs {\n\t\tfor i, san := range cert.SANs {\n\t\t\tcert.SANs[i] = strings.Replace(san, \"*\", \"\\\\*\", -1)\n\t\t}\n\t}\n\treturn certs\n}\n\nfunc (certs Certs) Markdown() string {\n\tvar b bytes.Buffer\n\tt := template.Must(template.New(\"markdown\").Parse(markdownTempl))\n\tif err := t.Execute(&b, certs.escapeStar()); err != nil {\n\t\tpanic(err)\n\t}\n\treturn b.String()\n}\n\nfunc (certs Certs) JSON() []byte {\n\tdata, err := json.Marshal(certs)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn data\n}\n<commit_msg>cert.JSON return string<commit_after>package cert\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\nvar SkipVerify = false\n\nvar UTC = false\n\nvar userTempl string\n\nfunc SetUserTempl(templ string) error {\n\tif templ == \"\" {\n\t\treturn nil\n\t}\n\n\tpath, err := filepath.Abs(templ)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontent, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\t\tuserTempl = templ\n\t\treturn nil\n\t}\n\n\tuserTempl = string(content)\n\n\treturn nil\n}\n\nconst defaultPort = \"443\"\n\nfunc SplitHostPort(hostport string) (string, string, error) {\n\tif !strings.Contains(hostport, \":\") {\n\t\treturn hostport, defaultPort, nil\n\t}\n\n\thost, port, err := net.SplitHostPort(hostport)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tif port == \"\" {\n\t\tport = defaultPort\n\t}\n\n\treturn host, port, nil\n}\n\ntype Cert struct {\n\tDomainName string `json:\"domainName\"`\n\tIP string `json:\"ip\"`\n\tIssuer string `json:\"issuer\"`\n\tCommonName string `json:\"commonName\"`\n\tSANs []string `json:\"sans\"`\n\tNotBefore string `json:\"notBefore\"`\n\tNotAfter string `json:\"notAfter\"`\n\tError string `json:\"error\"`\n\tcertChain []*x509.Certificate\n}\n\nvar serverCert = func(host, port string) ([]*x509.Certificate, string, error) {\n\tconn, err := tls.Dial(\"tcp\", host+\":\"+port, &tls.Config{\n\t\tInsecureSkipVerify: SkipVerify,\n\t})\n\tif err != nil {\n\t\treturn []*x509.Certificate{&x509.Certificate{}}, \"\", err\n\t}\n\tdefer conn.Close()\n\taddr := conn.RemoteAddr()\n\tip, _, _ := net.SplitHostPort(addr.String())\n\tcert := conn.ConnectionState().PeerCertificates\n\n\treturn cert, ip, nil\n}\n\nfunc NewCert(hostport string) *Cert {\n\thost, port, err := SplitHostPort(hostport)\n\tif err != nil {\n\t\treturn &Cert{DomainName: host, Error: err.Error()}\n\t}\n\tcertChain, ip, err := serverCert(host, port)\n\tif err != nil {\n\t\treturn &Cert{DomainName: host, Error: err.Error()}\n\t}\n\tcert := certChain[0]\n\n\tvar loc *time.Location\n\tloc = time.Local\n\tif UTC {\n\t\tloc = time.UTC\n\t}\n\n\treturn &Cert{\n\t\tDomainName: host,\n\t\tIP: ip,\n\t\tIssuer: cert.Issuer.CommonName,\n\t\tCommonName: cert.Subject.CommonName,\n\t\tSANs: cert.DNSNames,\n\t\tNotBefore: cert.NotBefore.In(loc).String(),\n\t\tNotAfter: cert.NotAfter.In(loc).String(),\n\t\tError: \"\",\n\t\tcertChain: certChain,\n\t}\n}\n\nfunc (c *Cert) Detail() *x509.Certificate {\n\treturn c.certChain[0]\n}\n\nfunc (c *Cert) CertChain() []*x509.Certificate {\n\treturn c.certChain\n}\n\ntype Certs []*Cert\n\nvar tokens = make(chan struct{}, 128)\n\nfunc validate(s []string) error {\n\tif len(s) < 1 {\n\t\treturn fmt.Errorf(\"Input at least one domain name.\")\n\t}\n\treturn nil\n}\n\nfunc NewCerts(s []string) (Certs, error) {\n\tif err := validate(s); err != nil {\n\t\treturn nil, err\n\t}\n\n\ttype indexer struct {\n\t\tindex int\n\t\tcert *Cert\n\t}\n\n\tch := make(chan *indexer)\n\tfor i, d := range s {\n\t\tgo func(i int, d string) {\n\t\t\ttokens <- struct{}{}\n\t\t\tch <- &indexer{i, NewCert(d)}\n\t\t\t<-tokens\n\t\t}(i, d)\n\t}\n\n\tcerts := make(Certs, len(s))\n\tfor range s {\n\t\ti := <-ch\n\t\tcerts[i.index] = i.cert\n\t}\n\treturn certs, nil\n}\n\nconst defaultTempl = `{{range .}}DomainName: {{.DomainName}}\nIP: {{.IP}}\nIssuer: {{.Issuer}}\nNotBefore: {{.NotBefore}}\nNotAfter: {{.NotAfter}}\nCommonName: {{.CommonName}}\nSANs: {{.SANs}}\nError: {{.Error}}\n\n{{end}}\n`\n\nfunc (certs Certs) String() string {\n\tvar b bytes.Buffer\n\n\ttempl := defaultTempl\n\tif userTempl != \"\" {\n\t\ttempl = userTempl\n\t}\n\n\tt := template.Must(template.New(\"default\").Parse(templ))\n\tif err := t.Execute(&b, certs); err != nil {\n\t\tpanic(err)\n\t}\n\treturn b.String()\n}\n\nconst markdownTempl = `DomainName | IP | Issuer | NotBefore | NotAfter | CN | SANs | Error\n--- | --- | --- | --- | --- | --- | --- | ---\n{{range .}}{{.DomainName}} | {{.IP}} | {{.Issuer}} | {{.NotBefore}} | {{.NotAfter}} | {{.CommonName}} | {{range .SANs}}{{.}}<br\/>{{end}} | {{.Error}}\n{{end}}\n`\n\nfunc (certs Certs) escapeStar() Certs {\n\tfor _, cert := range certs {\n\t\tfor i, san := range cert.SANs {\n\t\t\tcert.SANs[i] = strings.Replace(san, \"*\", \"\\\\*\", -1)\n\t\t}\n\t}\n\treturn certs\n}\n\nfunc (certs Certs) Markdown() string {\n\tvar b bytes.Buffer\n\tt := template.Must(template.New(\"markdown\").Parse(markdownTempl))\n\tif err := t.Execute(&b, certs.escapeStar()); err != nil {\n\t\tpanic(err)\n\t}\n\treturn b.String()\n}\n\nfunc (certs Certs) JSON() string {\n\tdata, err := json.Marshal(certs)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(data)\n}\n<|endoftext|>"} {"text":"<commit_before>package trello\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/joonasmyhrberg\/go-trello\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n)\n\ntype WebhookResponse struct {\n\tAction trello.Action `json:\"action\"`\n}\n\nfunc WebhookHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"HEAD\":\n\t\thandlePing(w)\n\tcase \"POST\":\n\t\tfmt.Println(\"Handler post\")\n\t\tresponse, err := handleUpdate(w, r)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error handling update: \", err)\n\t\t}\n\t\thandleAction(response.Action)\n\tdefault:\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}\n\nfunc handlePing(w http.ResponseWriter) {\n\tfmt.Println(\"Received webhook creation ping, responding with 200\")\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc handleUpdate(w http.ResponseWriter, r *http.Request) (webhookResponse WebhookResponse, err error) {\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\terr = json.Unmarshal(body, &webhookResponse)\n\tif err != nil {\n\t\tfmt.Println(\"Failed to decode action JSON with error: \", err.Error())\n\t\treturn WebhookResponse{}, err\n\t}\n\treturn\n}\n<commit_msg>Remove extra debug printing<commit_after>package trello\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/joonasmyhrberg\/go-trello\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n)\n\ntype WebhookResponse struct {\n\tAction trello.Action `json:\"action\"`\n}\n\nfunc WebhookHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"HEAD\":\n\t\thandlePing(w)\n\tcase \"POST\":\n\t\tresponse, err := handleUpdate(w, r)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error handling update: \", err)\n\t\t}\n\t\thandleAction(response.Action)\n\tdefault:\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}\n\nfunc handlePing(w http.ResponseWriter) {\n\tfmt.Println(\"Received webhook creation ping, responding with 200\")\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc handleUpdate(w http.ResponseWriter, r *http.Request) (webhookResponse WebhookResponse, err error) {\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\terr = json.Unmarshal(body, &webhookResponse)\n\tif err != nil {\n\t\tfmt.Println(\"Failed to decode action JSON with error: \", err.Error())\n\t\treturn WebhookResponse{}, err\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2022 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage isolation_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/pingcap\/failpoint\"\n\t\"github.com\/pingcap\/tidb\/infoschema\"\n\t\"github.com\/pingcap\/tidb\/kv\"\n\t\"github.com\/pingcap\/tidb\/sessionctx\"\n\t\"github.com\/pingcap\/tidb\/sessiontxn\"\n\t\"github.com\/pingcap\/tidb\/testkit\"\n\t\"github.com\/pingcap\/tidb\/testkit\/testfork\"\n\t\"github.com\/pingcap\/tidb\/testkit\/testsetup\"\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"github.com\/tikv\/client-go\/v2\/oracle\"\n\t\"github.com\/tikv\/client-go\/v2\/tikv\"\n\t\"go.uber.org\/goleak\"\n)\n\nfunc TestMain(m *testing.M) {\n\ttestsetup.SetupForCommonTest()\n\ttikv.EnableFailpoints()\n\topts := []goleak.Option{\n\t\tgoleak.IgnoreTopFunction(\"github.com\/golang\/glog.(*loggingT).flushDaemon\"),\n\t\tgoleak.IgnoreTopFunction(\"go.etcd.io\/etcd\/client\/pkg\/v3\/logutil.(*MergeLogger).outputLoop\"),\n\t\tgoleak.IgnoreTopFunction(\"go.opencensus.io\/stats\/view.(*worker).start\"),\n\t}\n\tgoleak.VerifyTestMain(m, opts...)\n}\n\nfunc getOracleTS(t testing.TB, sctx sessionctx.Context) uint64 {\n\tts, err := sctx.GetStore().GetOracle().GetTimestamp(context.TODO(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})\n\trequire.NoError(t, err)\n\treturn ts\n}\n\ntype txnAssert[T sessiontxn.TxnContextProvider] struct {\n\tsctx sessionctx.Context\n\tisolation string\n\tminStartTime time.Time\n\tactive bool\n\tinTxn bool\n\tminStartTS uint64\n\tstartTS uint64\n\tcausalConsistencyOnly bool\n\tcouldRetry bool\n}\n\nfunc (a *txnAssert[T]) Check(t testing.TB) {\n\tprovider := sessiontxn.GetTxnManager(a.sctx).GetContextProvider()\n\tsessVars := a.sctx.GetSessionVars()\n\ttxnCtx := sessVars.TxnCtx\n\n\tif sessVars.SnapshotInfoschema == nil {\n\t\trequire.Same(t, provider.GetTxnInfoSchema(), txnCtx.InfoSchema)\n\t} else {\n\t\trequire.Equal(t, sessVars.SnapshotInfoschema.(infoschema.InfoSchema).SchemaMetaVersion(), provider.GetTxnInfoSchema().SchemaMetaVersion())\n\t}\n\trequire.Equal(t, a.isolation, txnCtx.Isolation)\n\trequire.Equal(t, a.isolation != \"\", txnCtx.IsPessimistic)\n\trequire.Equal(t, sessVars.CheckAndGetTxnScope(), txnCtx.TxnScope)\n\trequire.Equal(t, sessVars.ShardAllocateStep, int64(txnCtx.ShardStep))\n\trequire.False(t, txnCtx.IsStaleness)\n\trequire.GreaterOrEqual(t, txnCtx.CreateTime.Nanosecond(), a.minStartTime.Nanosecond())\n\trequire.Equal(t, a.inTxn, sessVars.InTxn())\n\trequire.Equal(t, a.inTxn, txnCtx.IsExplicit)\n\trequire.Equal(t, a.couldRetry, txnCtx.CouldRetry)\n\trequire.Equal(t, assertTxnScope, txnCtx.TxnScope)\n\trequire.Equal(t, assertTxnScope, provider.GetTxnScope())\n\trequire.Equal(t, assertReplicaReadScope, provider.GetReadReplicaScope())\n\n\ttxn, err := a.sctx.Txn(false)\n\trequire.NoError(t, err)\n\trequire.Equal(t, a.active, txn.Valid())\n\tif !a.active {\n\t\trequire.False(t, a.inTxn)\n\t\trequire.Zero(t, a.startTS)\n\t\trequire.Zero(t, txnCtx.StartTS)\n\t} else {\n\t\trequire.True(t, a.minStartTS != 0 || a.startTS != 0)\n\t\trequire.Greater(t, txnCtx.StartTS, a.minStartTS)\n\t\tif a.startTS != 0 {\n\t\t\trequire.Equal(t, a.startTS, txnCtx.StartTS)\n\t\t}\n\t\trequire.Equal(t, txnCtx.StartTS, txn.StartTS())\n\t\trequire.Same(t, sessVars.KVVars, txn.GetVars())\n\t\trequire.Equal(t, txnCtx.TxnScope, txn.GetOption(kv.TxnScope))\n\t\trequire.Equal(t, a.causalConsistencyOnly, !txn.GetOption(kv.GuaranteeLinearizability).(bool))\n\t\trequire.Equal(t, txnCtx.IsPessimistic, txn.IsPessimistic())\n\t}\n\t\/\/ The next line is testing the provider has the type T, if not, the cast will panic\n\t_ = provider.(T)\n}\n\nfunc activeSnapshotTxnAssert(sctx sessionctx.Context, ts uint64, isolation string) *txnAssert[sessiontxn.TxnContextProvider] {\n\treturn &txnAssert[sessiontxn.TxnContextProvider]{\n\t\tsctx: sctx,\n\t\tminStartTime: time.Now(),\n\t\tstartTS: ts,\n\t\tactive: true,\n\t\tinTxn: false,\n\t\tisolation: isolation,\n\t}\n}\n\nfunc (a *txnAssert[T]) CheckAndGetProvider(t testing.TB) T {\n\ta.Check(t)\n\treturn sessiontxn.GetTxnManager(a.sctx).GetContextProvider().(T)\n}\n\nvar assertTxnScope = kv.GlobalTxnScope\nvar assertReplicaReadScope = kv.GlobalReplicaScope\n\nfunc forkScopeSettings(t *testfork.T, store kv.Storage) func() {\n\ttk := testkit.NewTestKit(t, store)\n\tfailPointEnabled := false\n\tclearFunc := func() {\n\t\tassertTxnScope = kv.GlobalTxnScope\n\t\tassertReplicaReadScope = kv.GlobalReplicaScope\n\t\ttk.MustExec(\"set @@global.tidb_replica_read='leader'\")\n\t\ttk.MustExec(\"set @@global.tidb_enable_local_txn=0\")\n\t\tif failPointEnabled {\n\t\t\trequire.NoError(t, failpoint.Disable(\"tikvclient\/injectTxnScope\"))\n\t\t}\n\t}\n\n\tclearFunc()\n\tsuccess := false\n\tdefer func() {\n\t\tif !success {\n\t\t\tclearFunc()\n\t\t}\n\t}()\n\n\tzone := testfork.PickEnum(t, \"\", \"bj\")\n\tif zone != \"\" {\n\t\trequire.NoError(t, failpoint.Enable(\"tikvclient\/injectTxnScope\", fmt.Sprintf(`return(\"%v\")`, zone)))\n\t\tfailPointEnabled = true\n\t\tif testfork.PickEnum(t, \"\", \"enableLocalTxn\") != \"\" {\n\t\t\ttk.MustExec(\"set @@global.tidb_enable_local_txn=1\")\n\t\t\tassertTxnScope = zone\n\t\t\tassertReplicaReadScope = zone\n\t\t}\n\t}\n\n\tif testfork.PickEnum(t, \"\", \"closetRead\") != \"\" {\n\t\ttk.MustExec(\"set @@global.tidb_replica_read='closest-replicas'\")\n\t\tif zone != \"\" {\n\t\t\tassertReplicaReadScope = zone\n\t\t}\n\t}\n\n\tsuccess = true\n\treturn clearFunc\n}\n<commit_msg>*: fix unstable test in `sessiontxn` (#36072)<commit_after>\/\/ Copyright 2022 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage isolation_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/pingcap\/failpoint\"\n\t\"github.com\/pingcap\/tidb\/infoschema\"\n\t\"github.com\/pingcap\/tidb\/kv\"\n\t\"github.com\/pingcap\/tidb\/sessionctx\"\n\t\"github.com\/pingcap\/tidb\/sessiontxn\"\n\t\"github.com\/pingcap\/tidb\/testkit\"\n\t\"github.com\/pingcap\/tidb\/testkit\/testfork\"\n\t\"github.com\/pingcap\/tidb\/testkit\/testsetup\"\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"github.com\/tikv\/client-go\/v2\/oracle\"\n\t\"github.com\/tikv\/client-go\/v2\/tikv\"\n\t\"go.uber.org\/goleak\"\n)\n\nfunc TestMain(m *testing.M) {\n\ttestsetup.SetupForCommonTest()\n\ttikv.EnableFailpoints()\n\topts := []goleak.Option{\n\t\tgoleak.IgnoreTopFunction(\"github.com\/golang\/glog.(*loggingT).flushDaemon\"),\n\t\tgoleak.IgnoreTopFunction(\"go.etcd.io\/etcd\/client\/pkg\/v3\/logutil.(*MergeLogger).outputLoop\"),\n\t\tgoleak.IgnoreTopFunction(\"go.opencensus.io\/stats\/view.(*worker).start\"),\n\t}\n\tgoleak.VerifyTestMain(m, opts...)\n}\n\nfunc getOracleTS(t testing.TB, sctx sessionctx.Context) uint64 {\n\tts, err := sctx.GetStore().GetOracle().GetTimestamp(context.TODO(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})\n\trequire.NoError(t, err)\n\treturn ts\n}\n\ntype txnAssert[T sessiontxn.TxnContextProvider] struct {\n\tsctx sessionctx.Context\n\tisolation string\n\tminStartTime time.Time\n\tactive bool\n\tinTxn bool\n\tminStartTS uint64\n\tstartTS uint64\n\tcausalConsistencyOnly bool\n\tcouldRetry bool\n}\n\nfunc (a *txnAssert[T]) Check(t testing.TB) {\n\tprovider := sessiontxn.GetTxnManager(a.sctx).GetContextProvider()\n\tsessVars := a.sctx.GetSessionVars()\n\ttxnCtx := sessVars.TxnCtx\n\n\tif sessVars.SnapshotInfoschema == nil {\n\t\trequire.Same(t, provider.GetTxnInfoSchema(), txnCtx.InfoSchema)\n\t} else {\n\t\trequire.Equal(t, sessVars.SnapshotInfoschema.(infoschema.InfoSchema).SchemaMetaVersion(), provider.GetTxnInfoSchema().SchemaMetaVersion())\n\t}\n\trequire.Equal(t, a.isolation, txnCtx.Isolation)\n\trequire.Equal(t, a.isolation != \"\", txnCtx.IsPessimistic)\n\trequire.Equal(t, sessVars.CheckAndGetTxnScope(), txnCtx.TxnScope)\n\trequire.Equal(t, sessVars.ShardAllocateStep, int64(txnCtx.ShardStep))\n\trequire.False(t, txnCtx.IsStaleness)\n\trequire.GreaterOrEqual(t, txnCtx.CreateTime.UnixNano(), a.minStartTime.UnixNano())\n\trequire.Equal(t, a.inTxn, sessVars.InTxn())\n\trequire.Equal(t, a.inTxn, txnCtx.IsExplicit)\n\trequire.Equal(t, a.couldRetry, txnCtx.CouldRetry)\n\trequire.Equal(t, assertTxnScope, txnCtx.TxnScope)\n\trequire.Equal(t, assertTxnScope, provider.GetTxnScope())\n\trequire.Equal(t, assertReplicaReadScope, provider.GetReadReplicaScope())\n\n\ttxn, err := a.sctx.Txn(false)\n\trequire.NoError(t, err)\n\trequire.Equal(t, a.active, txn.Valid())\n\tif !a.active {\n\t\trequire.False(t, a.inTxn)\n\t\trequire.Zero(t, a.startTS)\n\t\trequire.Zero(t, txnCtx.StartTS)\n\t} else {\n\t\trequire.True(t, a.minStartTS != 0 || a.startTS != 0)\n\t\trequire.Greater(t, txnCtx.StartTS, a.minStartTS)\n\t\tif a.startTS != 0 {\n\t\t\trequire.Equal(t, a.startTS, txnCtx.StartTS)\n\t\t}\n\t\trequire.Equal(t, txnCtx.StartTS, txn.StartTS())\n\t\trequire.Same(t, sessVars.KVVars, txn.GetVars())\n\t\trequire.Equal(t, txnCtx.TxnScope, txn.GetOption(kv.TxnScope))\n\t\trequire.Equal(t, a.causalConsistencyOnly, !txn.GetOption(kv.GuaranteeLinearizability).(bool))\n\t\trequire.Equal(t, txnCtx.IsPessimistic, txn.IsPessimistic())\n\t}\n\t\/\/ The next line is testing the provider has the type T, if not, the cast will panic\n\t_ = provider.(T)\n}\n\nfunc activeSnapshotTxnAssert(sctx sessionctx.Context, ts uint64, isolation string) *txnAssert[sessiontxn.TxnContextProvider] {\n\treturn &txnAssert[sessiontxn.TxnContextProvider]{\n\t\tsctx: sctx,\n\t\tminStartTime: time.Now(),\n\t\tstartTS: ts,\n\t\tactive: true,\n\t\tinTxn: false,\n\t\tisolation: isolation,\n\t}\n}\n\nfunc (a *txnAssert[T]) CheckAndGetProvider(t testing.TB) T {\n\ta.Check(t)\n\treturn sessiontxn.GetTxnManager(a.sctx).GetContextProvider().(T)\n}\n\nvar assertTxnScope = kv.GlobalTxnScope\nvar assertReplicaReadScope = kv.GlobalReplicaScope\n\nfunc forkScopeSettings(t *testfork.T, store kv.Storage) func() {\n\ttk := testkit.NewTestKit(t, store)\n\tfailPointEnabled := false\n\tclearFunc := func() {\n\t\tassertTxnScope = kv.GlobalTxnScope\n\t\tassertReplicaReadScope = kv.GlobalReplicaScope\n\t\ttk.MustExec(\"set @@global.tidb_replica_read='leader'\")\n\t\ttk.MustExec(\"set @@global.tidb_enable_local_txn=0\")\n\t\tif failPointEnabled {\n\t\t\trequire.NoError(t, failpoint.Disable(\"tikvclient\/injectTxnScope\"))\n\t\t}\n\t}\n\n\tclearFunc()\n\tsuccess := false\n\tdefer func() {\n\t\tif !success {\n\t\t\tclearFunc()\n\t\t}\n\t}()\n\n\tzone := testfork.PickEnum(t, \"\", \"bj\")\n\tif zone != \"\" {\n\t\trequire.NoError(t, failpoint.Enable(\"tikvclient\/injectTxnScope\", fmt.Sprintf(`return(\"%v\")`, zone)))\n\t\tfailPointEnabled = true\n\t\tif testfork.PickEnum(t, \"\", \"enableLocalTxn\") != \"\" {\n\t\t\ttk.MustExec(\"set @@global.tidb_enable_local_txn=1\")\n\t\t\tassertTxnScope = zone\n\t\t\tassertReplicaReadScope = zone\n\t\t}\n\t}\n\n\tif testfork.PickEnum(t, \"\", \"closetRead\") != \"\" {\n\t\ttk.MustExec(\"set @@global.tidb_replica_read='closest-replicas'\")\n\t\tif zone != \"\" {\n\t\t\tassertReplicaReadScope = zone\n\t\t}\n\t}\n\n\tsuccess = true\n\treturn clearFunc\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2016 Mark Bates <mark@markbates.com>\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage generate\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n\n\t\"github.com\/markbates\/gentronics\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar publicLogo = &gentronics.RemoteFile{\n\tFile: gentronics.NewFile(\"public\/assets\/images\/logo.svg\", \"\"),\n\tRemotePath: \"https:\/\/raw.githubusercontent.com\/gobuffalo\/buffalo\/master\/logo.svg\",\n}\n\nvar assetsLogo = &gentronics.RemoteFile{\n\tFile: gentronics.NewFile(\"assets\/images\/logo.svg\", \"\"),\n\tRemotePath: \"https:\/\/raw.githubusercontent.com\/gobuffalo\/buffalo\/master\/logo.svg\",\n}\n\n\/\/ WebpackCmd generates a new actions\/resource file and a stub test.\nvar WebpackCmd = &cobra.Command{\n\tUse: \"webpack\",\n\tShort: \"Generates a webpack asset pipeline.\",\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tdata := gentronics.Data{\n\t\t\t\"withWebpack\": true,\n\t\t}\n\t\treturn NewWebpackGenerator(data).Run(\".\", data)\n\t},\n}\n\n\/\/ NewWebpackGenerator generates a new actions\/resource file and a stub test.\nfunc NewWebpackGenerator(data gentronics.Data) *gentronics.Generator {\n\tg := gentronics.New()\n\n\tshould := func(data gentronics.Data) bool {\n\t\tif b, ok := data[\"withWebpack\"]; ok {\n\t\t\treturn b.(bool)\n\t\t}\n\t\treturn false\n\t}\n\n\t\/\/ if we're not using web pack save the logo and return\n\tif !should(data) {\n\t\tg.Add(publicLogo)\n\t\tg.Add(gentronics.NewFile(\"public\/assets\/application.js\", \"\"))\n\t\tg.Add(gentronics.NewFile(\"public\/assets\/application.css\", nwApplicationCSS))\n\t\treturn g\n\t}\n\n\t\/\/ if there's no npm, return!\n\t_, err := exec.LookPath(\"npm\")\n\tif err != nil {\n\t\tfmt.Println(\"Could not find npm\/node. Skipping webpack generation.\")\n\t\tg.Add(publicLogo)\n\t\treturn g\n\t}\n\n\tg.Should = should\n\tg.Add(assetsLogo)\n\tg.Add(gentronics.NewFile(\"webpack.config.js\", nWebpack))\n\tg.Add(gentronics.NewFile(\"public\/assets\/.gitignore\", \"\"))\n\tg.Add(gentronics.NewFile(\"assets\/js\/application.js\", wApplicationJS))\n\tg.Add(gentronics.NewFile(\"assets\/css\/application.scss\", wApplicationCSS))\n\tc := gentronics.NewCommand(exec.Command(\"npm\", \"install\", \"webpack\", \"-g\"))\n\tg.Add(c)\n\tc = gentronics.NewCommand(exec.Command(\"npm\", \"init\", \"-y\"))\n\tg.Add(c)\n\n\tmodules := []string{\"webpack\", \"sass-loader\", \"css-loader\", \"style-loader\", \"node-sass\",\n\t\t\"babel-loader\", \"extract-text-webpack-plugin\", \"babel\", \"babel-core\", \"url-loader\", \"file-loader\",\n\t\t\"jquery\", \"bootstrap\", \"path\", \"font-awesome\", \"npm-install-webpack-plugin\", \"jquery-ujs\",\n\t\t\"copy-webpack-plugin\",\n\t}\n\targs := []string{\"install\", \"--save\"}\n\targs = append(args, modules...)\n\tg.Add(gentronics.NewCommand(exec.Command(\"npm\", args...)))\n\treturn g\n}\n\nvar nWebpack = `var webpack = require(\"webpack\");\nvar CopyWebpackPlugin = require('copy-webpack-plugin');\nvar ExtractTextPlugin = require(\"extract-text-webpack-plugin\");\n\nmodule.exports = {\n entry: [\n \".\/assets\/js\/application.js\",\n \".\/assets\/css\/application.scss\",\n \".\/node_modules\/jquery-ujs\/src\/rails.js\"\n ],\n output: {\n filename: \"application.js\",\n path: \".\/public\/assets\"\n },\n plugins: [\n new webpack.ProvidePlugin({\n $: \"jquery\",\n jQuery: \"jquery\"\n }),\n new ExtractTextPlugin(\"application.css\"),\n new CopyWebpackPlugin([{\n from: \".\/assets\",\n to: \"\"\n }], {\n ignore: [\n \"css\/*\",\n \"js\/*\",\n ]\n })\n ],\n module: {\n loaders: [{\n test: \/\\.jsx?$\/,\n loader: \"babel\",\n exclude: \/node_modules\/\n }, {\n test: \/\\.scss$\/,\n loader: ExtractTextPlugin.extract(\n \"style\",\n \"css?sourceMap!sass?sourceMap\"\n )\n }, {\n test: \/\\.woff(\\?v=\\d+\\.\\d+\\.\\d+)?$\/,\n loader: \"url?limit=10000&mimetype=application\/font-woff\"\n }, {\n test: \/\\.woff2(\\?v=\\d+\\.\\d+\\.\\d+)?$\/,\n loader: \"url?limit=10000&mimetype=application\/font-woff\"\n }, {\n test: \/\\.ttf(\\?v=\\d+\\.\\d+\\.\\d+)?$\/,\n loader: \"url?limit=10000&mimetype=application\/octet-stream\"\n }, {\n test: \/\\.eot(\\?v=\\d+\\.\\d+\\.\\d+)?$\/,\n loader: \"file\"\n }, {\n test: \/\\.svg(\\?v=\\d+\\.\\d+\\.\\d+)?$\/,\n loader: \"url?limit=10000&mimetype=image\/svg+xml\"\n }]\n }\n};\n`\n\nconst wApplicationJS = `require(\"bootstrap\/dist\/js\/bootstrap.js\");\n\n$(() => {\n\n});`\nconst wApplicationCSS = `@import \"~bootstrap\/dist\/css\/bootstrap.css\";\n@import \"~font-awesome\/css\/font-awesome.css\";\n`\n\nconst nwApplicationCSS = `* {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\n\nbody {\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-size: 14px;\n line-height: 1.42857143;\n color: #333;\n background-color: #fff;\n margin: 0;\n}\n\nh1, h2 {\n margin-top: 20px;\n margin-bottom: 10px;\n font-family: inherit;\n font-weight: 500;\n line-height: 1.1;\n color: inherit;\n}\n\nh1 {\n font-size: 36px;\n}\n\nh2 {\n font-size: 30px;\n}\n\nhr {\n margin-top: 20px;\n margin-bottom: 20px;\n border: 0;\n border-top: 1px solid #eee;\n height: 0;\n -webkit-box-sizing: content-box;\n -moz-box-sizing: content-box;\n box-sizing: content-box;\n}\n\na {\n color: #337ab7;\n text-decoration: none;\n}\n\na:hover {\n color: #23527c;\n}\n\n.container {\n padding-right: 15px;\n padding-left: 15px;\n margin-right: auto;\n margin-left: auto;\n}\n\n@media (min-width: 768px) {\n .container {\n width: 750px;\n }\n}\n@media (min-width: 992px) {\n .container {\n width: 970px;\n }\n}\n@media (min-width: 1200px) {\n .container {\n width: 1170px;\n }\n}\n\n.table {\n width: 100%;\n max-width: 100%;\n margin-bottom: 20px;\n background-color: transparent;\n border-spacing: 0;\n border-collapse: collapse;\n}\n\n.table-striped > tbody {\n background-color: #f9f9f9;\n}\n\n.table > thead > tr > th, .table > tbody > tr > td {\n padding: 8px;\n line-height: 1.42857143;\n vertical-align: top;\n border-top: 1px solid #ddd;\n}\n\n.table > thead > tr > th {\n border-top: 0;\n vertical-align: bottom;\n border-bottom: 2px solid #ddd;\n text-align: left;\n}\n\ncode {\n padding: 2px 4px;\n font-size: 90%;\n color: #c7254e;\n background-color: #f9f2f4;\n border-radius: 4px;\n font-family: Menlo, Monaco, Consolas, \"Courier New\", monospace;\n}\n\n.row {\n margin-right: -15px;\n margin-left: -15px;\n}\n\n.col-md-2, .col-md-10 {\n float: left;\n position: relative;\n min-height: 1px;\n padding-right: 15px;\n padding-left: 15px;\n}\n\n.col-md-2 {\n width: 16.66666667%;\n}\n\n.col-md-10 {\n width: 83.33333333%;\n}\n\nimg {\n vertical-align: middle;\n border: 0;\n}\n`\n<commit_msg>point people to npm docs if there is an issue running npm<commit_after>\/\/ Copyright © 2016 Mark Bates <mark@markbates.com>\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage generate\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n\n\t\"github.com\/markbates\/gentronics\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar publicLogo = &gentronics.RemoteFile{\n\tFile: gentronics.NewFile(\"public\/assets\/images\/logo.svg\", \"\"),\n\tRemotePath: \"https:\/\/raw.githubusercontent.com\/gobuffalo\/buffalo\/master\/logo.svg\",\n}\n\nvar assetsLogo = &gentronics.RemoteFile{\n\tFile: gentronics.NewFile(\"assets\/images\/logo.svg\", \"\"),\n\tRemotePath: \"https:\/\/raw.githubusercontent.com\/gobuffalo\/buffalo\/master\/logo.svg\",\n}\n\n\/\/ WebpackCmd generates a new actions\/resource file and a stub test.\nvar WebpackCmd = &cobra.Command{\n\tUse: \"webpack\",\n\tShort: \"Generates a webpack asset pipeline.\",\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tdata := gentronics.Data{\n\t\t\t\"withWebpack\": true,\n\t\t}\n\t\treturn NewWebpackGenerator(data).Run(\".\", data)\n\t},\n}\n\n\/\/ NewWebpackGenerator generates a new actions\/resource file and a stub test.\nfunc NewWebpackGenerator(data gentronics.Data) *gentronics.Generator {\n\tg := gentronics.New()\n\n\tshould := func(data gentronics.Data) bool {\n\t\tif b, ok := data[\"withWebpack\"]; ok {\n\t\t\treturn b.(bool)\n\t\t}\n\t\treturn false\n\t}\n\n\t\/\/ if we're not using web pack save the logo and return\n\tif !should(data) {\n\t\tg.Add(publicLogo)\n\t\tg.Add(gentronics.NewFile(\"public\/assets\/application.js\", \"\"))\n\t\tg.Add(gentronics.NewFile(\"public\/assets\/application.css\", nwApplicationCSS))\n\t\treturn g\n\t}\n\n\t\/\/ if there's no npm, return!\n\t_, err := exec.LookPath(\"npm\")\n\tif err != nil {\n\t\tfmt.Println(\"Could not find npm\/node. Skipping webpack generation.\")\n\t\tg.Add(publicLogo)\n\t\treturn g\n\t}\n\n\tg.Should = should\n\tg.Add(assetsLogo)\n\tg.Add(gentronics.NewFile(\"webpack.config.js\", nWebpack))\n\tg.Add(gentronics.NewFile(\"public\/assets\/.gitignore\", \"\"))\n\tg.Add(gentronics.NewFile(\"assets\/js\/application.js\", wApplicationJS))\n\tg.Add(gentronics.NewFile(\"assets\/css\/application.scss\", wApplicationCSS))\n\n\tc := gentronics.NewCommand(exec.Command(\"npm\", \"install\", \"webpack\", \"-g\"))\n\trf := gentronics.RunFn(func(path string, data gentronics.Data) error {\n\t\terr := c.Run(path, data)\n\t\tif err != nil {\n\t\t\tu := \"https:\/\/docs.npmjs.com\/getting-started\/fixing-npm-permissions\"\n\t\t\tfmt.Printf(\"There was a problem running npm install. You may want to checkout their docs for help!\\n%s\\n\", u)\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\tg.Add(&gentronics.Func{\n\t\tRunner: rf,\n\t\tShould: should,\n\t})\n\n\tc = gentronics.NewCommand(exec.Command(\"npm\", \"init\", \"-y\"))\n\tg.Add(c)\n\n\tmodules := []string{\"webpack\", \"sass-loader\", \"css-loader\", \"style-loader\", \"node-sass\",\n\t\t\"babel-loader\", \"extract-text-webpack-plugin\", \"babel\", \"babel-core\", \"url-loader\", \"file-loader\",\n\t\t\"jquery\", \"bootstrap\", \"path\", \"font-awesome\", \"npm-install-webpack-plugin\", \"jquery-ujs\",\n\t\t\"copy-webpack-plugin\",\n\t}\n\targs := []string{\"install\", \"--save\"}\n\targs = append(args, modules...)\n\tg.Add(gentronics.NewCommand(exec.Command(\"npm\", args...)))\n\treturn g\n}\n\nvar nWebpack = `var webpack = require(\"webpack\");\nvar CopyWebpackPlugin = require('copy-webpack-plugin');\nvar ExtractTextPlugin = require(\"extract-text-webpack-plugin\");\n\nmodule.exports = {\n entry: [\n \".\/assets\/js\/application.js\",\n \".\/assets\/css\/application.scss\",\n \".\/node_modules\/jquery-ujs\/src\/rails.js\"\n ],\n output: {\n filename: \"application.js\",\n path: \".\/public\/assets\"\n },\n plugins: [\n new webpack.ProvidePlugin({\n $: \"jquery\",\n jQuery: \"jquery\"\n }),\n new ExtractTextPlugin(\"application.css\"),\n new CopyWebpackPlugin([{\n from: \".\/assets\",\n to: \"\"\n }], {\n ignore: [\n \"css\/*\",\n \"js\/*\",\n ]\n })\n ],\n module: {\n loaders: [{\n test: \/\\.jsx?$\/,\n loader: \"babel\",\n exclude: \/node_modules\/\n }, {\n test: \/\\.scss$\/,\n loader: ExtractTextPlugin.extract(\n \"style\",\n \"css?sourceMap!sass?sourceMap\"\n )\n }, {\n test: \/\\.woff(\\?v=\\d+\\.\\d+\\.\\d+)?$\/,\n loader: \"url?limit=10000&mimetype=application\/font-woff\"\n }, {\n test: \/\\.woff2(\\?v=\\d+\\.\\d+\\.\\d+)?$\/,\n loader: \"url?limit=10000&mimetype=application\/font-woff\"\n }, {\n test: \/\\.ttf(\\?v=\\d+\\.\\d+\\.\\d+)?$\/,\n loader: \"url?limit=10000&mimetype=application\/octet-stream\"\n }, {\n test: \/\\.eot(\\?v=\\d+\\.\\d+\\.\\d+)?$\/,\n loader: \"file\"\n }, {\n test: \/\\.svg(\\?v=\\d+\\.\\d+\\.\\d+)?$\/,\n loader: \"url?limit=10000&mimetype=image\/svg+xml\"\n }]\n }\n};\n`\n\nconst wApplicationJS = `require(\"bootstrap\/dist\/js\/bootstrap.js\");\n\n$(() => {\n\n});`\nconst wApplicationCSS = `@import \"~bootstrap\/dist\/css\/bootstrap.css\";\n@import \"~font-awesome\/css\/font-awesome.css\";\n`\n\nconst nwApplicationCSS = `* {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\n\nbody {\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-size: 14px;\n line-height: 1.42857143;\n color: #333;\n background-color: #fff;\n margin: 0;\n}\n\nh1, h2 {\n margin-top: 20px;\n margin-bottom: 10px;\n font-family: inherit;\n font-weight: 500;\n line-height: 1.1;\n color: inherit;\n}\n\nh1 {\n font-size: 36px;\n}\n\nh2 {\n font-size: 30px;\n}\n\nhr {\n margin-top: 20px;\n margin-bottom: 20px;\n border: 0;\n border-top: 1px solid #eee;\n height: 0;\n -webkit-box-sizing: content-box;\n -moz-box-sizing: content-box;\n box-sizing: content-box;\n}\n\na {\n color: #337ab7;\n text-decoration: none;\n}\n\na:hover {\n color: #23527c;\n}\n\n.container {\n padding-right: 15px;\n padding-left: 15px;\n margin-right: auto;\n margin-left: auto;\n}\n\n@media (min-width: 768px) {\n .container {\n width: 750px;\n }\n}\n@media (min-width: 992px) {\n .container {\n width: 970px;\n }\n}\n@media (min-width: 1200px) {\n .container {\n width: 1170px;\n }\n}\n\n.table {\n width: 100%;\n max-width: 100%;\n margin-bottom: 20px;\n background-color: transparent;\n border-spacing: 0;\n border-collapse: collapse;\n}\n\n.table-striped > tbody {\n background-color: #f9f9f9;\n}\n\n.table > thead > tr > th, .table > tbody > tr > td {\n padding: 8px;\n line-height: 1.42857143;\n vertical-align: top;\n border-top: 1px solid #ddd;\n}\n\n.table > thead > tr > th {\n border-top: 0;\n vertical-align: bottom;\n border-bottom: 2px solid #ddd;\n text-align: left;\n}\n\ncode {\n padding: 2px 4px;\n font-size: 90%;\n color: #c7254e;\n background-color: #f9f2f4;\n border-radius: 4px;\n font-family: Menlo, Monaco, Consolas, \"Courier New\", monospace;\n}\n\n.row {\n margin-right: -15px;\n margin-left: -15px;\n}\n\n.col-md-2, .col-md-10 {\n float: left;\n position: relative;\n min-height: 1px;\n padding-right: 15px;\n padding-left: 15px;\n}\n\n.col-md-2 {\n width: 16.66666667%;\n}\n\n.col-md-10 {\n width: 83.33333333%;\n}\n\nimg {\n vertical-align: middle;\n border: 0;\n}\n`\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"github.com\/satori\/go.uuid\"\n)\n\n\/* Read functions\n *\/\nfunc GetHostDeployment(w http.ResponseWriter, r *http.Request,\n\tparams httprouter.Params) {\n\tdefer PanicCatcher(w)\n\tvar (\n\t\terr error\n\t\tassetid int64\n\t)\n\n\tif _, err = uuid.FromString(params.ByName(\"system\")); err != nil {\n\t\tDispatchBadRequest(&w, err)\n\t\treturn\n\t}\n\tif assetid, err = strconv.ParseInt(params.ByName(\"assetid\"), 10, 64); err != nil {\n\t\tDispatchBadRequest(&w, err)\n\t\treturn\n\t}\n\n\treturnChannel := make(chan somaResult)\n\thandler := handlerMap[\"hostDeploymentHandler\"].(somaHostDeploymentHandler)\n\thandler.input <- somaHostDeploymentRequest{\n\t\taction: \"get\",\n\t\treply: returnChannel,\n\t\tsystem: params.ByName(\"system\"),\n\t\tassetid: assetid,\n\t}\n\tresult := <-returnChannel\n\tSendHostDeploymentReply(&w, &result)\n}\n\nfunc AssembleHostUpdate(w http.ResponseWriter, r *http.Request,\n\tparams httprouter.Params) {\n\tdefer PanicCatcher(w)\n\tvar (\n\t\terr error\n\t\tassetid int64\n\t)\n\n\tif _, err = uuid.FromString(params.ByName(\"system\")); err != nil {\n\t\tDispatchBadRequest(&w, err)\n\t\treturn\n\t}\n\tif assetid, err = strconv.ParseInt(params.ByName(\"assetid\"), 10, 64); err != nil {\n\t\tDispatchBadRequest(&w, err)\n\t\treturn\n\t}\n\n\tcReq := proto.Request{}\n\tif err = DecodeJsonBody(r, &cReq); err != nil {\n\t\tDispatchBadRequest(&w, err)\n\t\treturn\n\t}\n\n\tif cReq.HostDeployment == nil {\n\t\tDispatchBadRequest(&w, err)\n\t\treturn\n\t}\n\n\treturnChannel := make(chan somaResult)\n\thandler := handlerMap[\"hostDeploymentHandler\"].(somaHostDeploymentHandler)\n\thandler.input <- somaHostDeploymentRequest{\n\t\taction: \"assemble\",\n\t\treply: returnChannel,\n\t\tsystem: params.ByName(\"system\"),\n\t\tassetid: assetid,\n\t\tidlist: cReq.HostDeployment.CurrentCheckInstanceIdList,\n\t}\n\tresult := <-returnChannel\n\tSendHostDeploymentReply(&w, &result)\n}\n\n\/* Utility\n *\/\nfunc SendHostDeploymentReply(w *http.ResponseWriter, r *somaResult) {\n\tresult := proto.NewHostDeploymentResult()\n\tif r.MarkErrors(&result) {\n\t\tgoto dispatch\n\t}\n\tfor _, i := range (*r).HostDeployments {\n\t\tif i.Delete {\n\t\t\t*result.HostDeployments = append(*result.HostDeployments, proto.HostDeployment{\n\t\t\t\tDeleteInstance: true,\n\t\t\t\tCheckInstanceId: i.DeleteId,\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\t\t*result.Deployments = append(*result.Deployments, i.Deployment)\n\t}\n\ndispatch:\n\tjson, err := json.Marshal(result)\n\tif err != nil {\n\t\tDispatchInternalError(w, err)\n\t\treturn\n\t}\n\tDispatchJsonReply(w, &json)\n\treturn\n}\n\n\/\/ vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix\n<commit_msg>Send better error message than nil<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"github.com\/satori\/go.uuid\"\n)\n\n\/* Read functions\n *\/\nfunc GetHostDeployment(w http.ResponseWriter, r *http.Request,\n\tparams httprouter.Params) {\n\tdefer PanicCatcher(w)\n\tvar (\n\t\terr error\n\t\tassetid int64\n\t)\n\n\tif _, err = uuid.FromString(params.ByName(\"system\")); err != nil {\n\t\tDispatchBadRequest(&w, err)\n\t\treturn\n\t}\n\tif assetid, err = strconv.ParseInt(params.ByName(\"assetid\"), 10, 64); err != nil {\n\t\tDispatchBadRequest(&w, err)\n\t\treturn\n\t}\n\n\treturnChannel := make(chan somaResult)\n\thandler := handlerMap[\"hostDeploymentHandler\"].(somaHostDeploymentHandler)\n\thandler.input <- somaHostDeploymentRequest{\n\t\taction: \"get\",\n\t\treply: returnChannel,\n\t\tsystem: params.ByName(\"system\"),\n\t\tassetid: assetid,\n\t}\n\tresult := <-returnChannel\n\tSendHostDeploymentReply(&w, &result)\n}\n\nfunc AssembleHostUpdate(w http.ResponseWriter, r *http.Request,\n\tparams httprouter.Params) {\n\tdefer PanicCatcher(w)\n\tvar (\n\t\terr error\n\t\tassetid int64\n\t)\n\n\tif _, err = uuid.FromString(params.ByName(\"system\")); err != nil {\n\t\tDispatchBadRequest(&w, err)\n\t\treturn\n\t}\n\tif assetid, err = strconv.ParseInt(params.ByName(\"assetid\"), 10, 64); err != nil {\n\t\tDispatchBadRequest(&w, err)\n\t\treturn\n\t}\n\n\tcReq := proto.Request{}\n\tif err = DecodeJsonBody(r, &cReq); err != nil {\n\t\tDispatchBadRequest(&w, err)\n\t\treturn\n\t}\n\n\tif cReq.HostDeployment == nil {\n\t\tDispatchBadRequest(&w, fmt.Errorf(`HostDeployment section missing`))\n\t\treturn\n\t}\n\n\treturnChannel := make(chan somaResult)\n\thandler := handlerMap[\"hostDeploymentHandler\"].(somaHostDeploymentHandler)\n\thandler.input <- somaHostDeploymentRequest{\n\t\taction: \"assemble\",\n\t\treply: returnChannel,\n\t\tsystem: params.ByName(\"system\"),\n\t\tassetid: assetid,\n\t\tidlist: cReq.HostDeployment.CurrentCheckInstanceIdList,\n\t}\n\tresult := <-returnChannel\n\tSendHostDeploymentReply(&w, &result)\n}\n\n\/* Utility\n *\/\nfunc SendHostDeploymentReply(w *http.ResponseWriter, r *somaResult) {\n\tresult := proto.NewHostDeploymentResult()\n\tif r.MarkErrors(&result) {\n\t\tgoto dispatch\n\t}\n\tfor _, i := range (*r).HostDeployments {\n\t\tif i.Delete {\n\t\t\t*result.HostDeployments = append(*result.HostDeployments, proto.HostDeployment{\n\t\t\t\tDeleteInstance: true,\n\t\t\t\tCheckInstanceId: i.DeleteId,\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\t\t*result.Deployments = append(*result.Deployments, i.Deployment)\n\t}\n\ndispatch:\n\tjson, err := json.Marshal(result)\n\tif err != nil {\n\t\tDispatchInternalError(w, err)\n\t\treturn\n\t}\n\tDispatchJsonReply(w, &json)\n\treturn\n}\n\n\/\/ vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix\n<|endoftext|>"} {"text":"<commit_before>\/* https:\/\/leetcode.com\/problems\/majority-element\/#\/description\nGiven an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n\/2 ⌋ times.\n\nYou may assume that the array is non-empty and the majority element always exist in the array.\n\nCredits:\nSpecial thanks to @ts for adding this problem and creating all test cases.\n*\/\n\npackage leetcode\n\nfunc majorityElement(nums []int) int {\n\tmaps := make(map[int]int)\n\tmid := len(nums) \/ 2\n\tfor i := 0; i < len(nums); i++ {\n\t\tif v, ok := maps[nums[i]]; ok {\n\t\t\tv += 1\n\t\t\tmaps[nums[i]] = v\n\t\t} else {\n\t\t\tmaps[nums[i]] = 1\n\t\t}\n\t\tif v, ok := maps[nums[i]]; ok && v > mid {\n\t\t\treturn nums[i]\n\t\t}\n\t}\n\tpanic(\"nums cannot be empty\")\n}\n<commit_msg>don't use hash table<commit_after>\/* https:\/\/leetcode.com\/problems\/majority-element\/#\/description\nGiven an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n\/2 ⌋ times.\n\nYou may assume that the array is non-empty and the majority element always exist in the array.\n\nCredits:\nSpecial thanks to @ts for adding this problem and creating all test cases.\n*\/\n\npackage leetcode\n\nfunc majorityElement(nums []int) int {\n\t\/*maps := make(map[int]int)\n\t mid := len(nums) \/ 2\n\t for i := 0; i < len(nums); i++ {\n\t if v, ok := maps[nums[i]]; ok {\n\t v += 1\n\t maps[nums[i]] = v\n\t } else {\n\t maps[nums[i]t ] = 1\n\t }\n\t if v, ok := maps[nums[i]]; ok && v > mid {\n\t return nums[i]\n\t }\n\t }\n\t panic(\"nums cannot be empty\")\n\t*\/\n\tcur, count := nums[0], 1\n\tfor i := 1; i < len(nums); i++ {\n\t\tif count == 0 {\n\t\t\tcount = 1\n\t\t\tcur = nums[i]\n\t\t} else if cur == nums[i] {\n\t\t\tcount++\n\t\t} else {\n\t\t\tcount--\n\t\t}\n\t}\n\treturn cur\n}\n<|endoftext|>"} {"text":"<commit_before>\/* https:\/\/leetcode.com\/problems\/partition-list\/description\/\nGiven a linked list and a value x, partition it such that all nodes less than x come before nodes greater than or equal to x.\n\nYou should preserve the original relative order of the nodes in each of the two partitions.\n\nFor example,\nGiven 1->4->3->2->5->2 and x = 3,\nreturn 1->2->2->4->3->5.\n*\/\n\npackage leetcode\n\nfunc partition(head *ListNode, x int) *ListNode {\n\tltHead, gtHead := &ListNode{Val: 0}, &ListNode{Val: 0}\n\tltCur, gtCur := ltHead, gtHead\n\n\tfor cur := head; cur != nil; cur = cur.Next {\n\t\ttmp := &ListNode{Val: cur.Val}\n\t\tif cur.Val < x {\n\t\t\tltCur.Next, ltCur = tmp, tmp\n\t\t} else {\n\t\t\tgtCur.Next, gtCur = tmp, tmp\n\t\t}\n\t}\n\tltCur.Next = gtHead.Next\n\treturn ltHead.Next\n}\n<commit_msg>fix var name<commit_after>\/* https:\/\/leetcode.com\/problems\/partition-list\/description\/\nGiven a linked list and a value x, partition it such that all nodes less than x come before nodes greater than or equal to x.\n\nYou should preserve the original relative order of the nodes in each of the two partitions.\n\nFor example,\nGiven 1->4->3->2->5->2 and x = 3,\nreturn 1->2->2->4->3->5.\n*\/\n\npackage leetcode\n\nfunc partition(head *ListNode, x int) *ListNode {\n\tltHead, gteHead := &ListNode{Val: 0}, &ListNode{Val: 0}\n\tltCur, gteCur := ltHead, gteHead\n\n\tfor cur := head; cur != nil; cur = cur.Next {\n\t\ttmp := &ListNode{Val: cur.Val}\n\t\tif cur.Val < x {\n\t\t\tltCur.Next, ltCur = tmp, tmp\n\t\t} else {\n\t\t\tgteCur.Next, gteCur = tmp, tmp\n\t\t}\n\t}\n\tltCur.Next = gteHead.Next\n\treturn ltHead.Next\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e_node\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n)\n\nvar serverStartTimeout = flag.Duration(\"server-start-timeout\", time.Second*120, \"Time to wait for each server to become healthy.\")\nvar reportDir = flag.String(\"report-dir\", \"\", \"Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.\")\n\ntype e2eService struct {\n\tkillCmds []*killCmd\n\trmDirs []string\n\n\tetcdDataDir string\n\tkubeletStaticPodDir string\n\tnodeName string\n\tlogFiles map[string]logFileData\n\tcgroupsPerQOS bool\n}\n\ntype logFileData struct {\n\tfiles []string\n\tjournalctlCommand []string\n}\n\nconst (\n\t\/\/ This is consistent with the level used in a cluster e2e test.\n\tLOG_VERBOSITY_LEVEL = \"4\"\n)\n\nfunc newE2eService(nodeName string, cgroupsPerQOS bool) *e2eService {\n\t\/\/ Special log files that need to be collected for additional debugging.\n\tvar logFiles = map[string]logFileData{\n\t\t\"kern.log\": {[]string{\"\/var\/log\/kern.log\"}, []string{\"-k\"}},\n\t\t\"docker.log\": {[]string{\"\/var\/log\/docker.log\", \"\/var\/log\/upstart\/docker.log\"}, []string{\"-u\", \"docker\"}},\n\t}\n\n\treturn &e2eService{\n\t\tnodeName: nodeName,\n\t\tlogFiles: logFiles,\n\t\tcgroupsPerQOS: cgroupsPerQOS,\n\t}\n}\n\nfunc (es *e2eService) start() error {\n\tif _, err := getK8sBin(\"kubelet\"); err != nil {\n\t\treturn err\n\t}\n\tif _, err := getK8sBin(\"kube-apiserver\"); err != nil {\n\t\treturn err\n\t}\n\n\tcmd, err := es.startEtcd()\n\tif err != nil {\n\t\treturn err\n\t}\n\tes.killCmds = append(es.killCmds, cmd)\n\tes.rmDirs = append(es.rmDirs, es.etcdDataDir)\n\n\tcmd, err = es.startApiServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\tes.killCmds = append(es.killCmds, cmd)\n\n\tcmd, err = es.startKubeletServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\tes.killCmds = append(es.killCmds, cmd)\n\tes.rmDirs = append(es.rmDirs, es.kubeletStaticPodDir)\n\n\treturn nil\n}\n\n\/\/ Get logs of interest either via journalctl or by creating sym links.\n\/\/ Since we scp files from the remote directory, symlinks will be treated as normal files and file contents will be copied over.\nfunc (es *e2eService) getLogFiles() {\n\t\/\/ Nothing to do if report dir is not specified.\n\tif *reportDir == \"\" {\n\t\treturn\n\t}\n\tjournaldFound := isJournaldAvailable()\n\tfor targetFileName, logFileData := range es.logFiles {\n\t\ttargetLink := path.Join(*reportDir, targetFileName)\n\t\tif journaldFound {\n\t\t\t\/\/ Skip log files that do not have an equivalent in journald based machines.\n\t\t\tif len(logFileData.journalctlCommand) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tout, err := exec.Command(\"sudo\", append([]string{\"journalctl\"}, logFileData.journalctlCommand...)...).CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"failed to get %q from journald: %v, %v\", targetFileName, string(out), err)\n\t\t\t} else {\n\t\t\t\tif err = ioutil.WriteFile(targetLink, out, 0755); err != nil {\n\t\t\t\t\tglog.Errorf(\"failed to write logs to %q: %v\", targetLink, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tfor _, file := range logFileData.files {\n\t\t\tif _, err := os.Stat(file); err != nil {\n\t\t\t\t\/\/ Expected file not found on this distro.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := copyLogFile(file, targetLink); err != nil {\n\t\t\t\tglog.Error(err)\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc copyLogFile(src, target string) error {\n\t\/\/ If not a journald based distro, then just symlink files.\n\tif out, err := exec.Command(\"sudo\", \"cp\", src, target).CombinedOutput(); err != nil {\n\t\treturn fmt.Errorf(\"failed to copy %q to %q: %v, %v\", src, target, out, err)\n\t}\n\tif out, err := exec.Command(\"sudo\", \"chmod\", \"a+r\", target).CombinedOutput(); err != nil {\n\t\treturn fmt.Errorf(\"failed to make log file %q world readable: %v, %v\", target, out, err)\n\t}\n\treturn nil\n}\n\nfunc isJournaldAvailable() bool {\n\t_, err := exec.LookPath(\"journalctl\")\n\treturn err == nil\n}\n\nfunc (es *e2eService) stop() {\n\tfor _, k := range es.killCmds {\n\t\tif err := k.Kill(); err != nil {\n\t\t\tglog.Errorf(\"Failed to stop %v: %v\", k.name, err)\n\t\t}\n\t}\n\tfor _, d := range es.rmDirs {\n\t\terr := os.RemoveAll(d)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Failed to delete directory %s.\\n%v\", d, err)\n\t\t}\n\t}\n}\n\nfunc (es *e2eService) startEtcd() (*killCmd, error) {\n\tdataDir, err := ioutil.TempDir(\"\", \"node-e2e\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tes.etcdDataDir = dataDir\n\tcmd := exec.Command(\"etcd\")\n\t\/\/ Execute etcd in the data directory instead of using --data-dir because the flag sometimes requires additional\n\t\/\/ configuration (e.g. --name in version 0.4.9)\n\tcmd.Dir = es.etcdDataDir\n\thcc := newHealthCheckCommand(\n\t\t\"http:\/\/127.0.0.1:4001\/v2\/keys\/\", \/\/ Trailing slash is required,\n\t\tcmd,\n\t\t\"etcd.log\")\n\treturn &killCmd{name: \"etcd\", cmd: cmd}, es.startServer(hcc)\n}\n\nfunc (es *e2eService) startApiServer() (*killCmd, error) {\n\tcmd := exec.Command(\"sudo\", getApiServerBin(),\n\t\t\"--etcd-servers\", \"http:\/\/127.0.0.1:4001\",\n\t\t\"--insecure-bind-address\", \"0.0.0.0\",\n\t\t\"--service-cluster-ip-range\", \"10.0.0.1\/24\",\n\t\t\"--kubelet-port\", \"10250\",\n\t\t\"--allow-privileged\", \"true\",\n\t\t\"--v\", LOG_VERBOSITY_LEVEL, \"--logtostderr\",\n\t)\n\thcc := newHealthCheckCommand(\n\t\t\"http:\/\/127.0.0.1:8080\/healthz\",\n\t\tcmd,\n\t\t\"kube-apiserver.log\")\n\treturn &killCmd{name: \"kube-apiserver\", cmd: cmd}, es.startServer(hcc)\n}\n\nfunc (es *e2eService) startKubeletServer() (*killCmd, error) {\n\tdataDir, err := ioutil.TempDir(\"\", \"node-e2e-pod\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tes.kubeletStaticPodDir = dataDir\n\tvar killOverride *exec.Cmd\n\tcmdArgs := []string{}\n\tif systemdRun, err := exec.LookPath(\"systemd-run\"); err == nil {\n\t\t\/\/ On systemd services, detection of a service \/ unit works reliably while\n\t\t\/\/ detection of a process started from an ssh session does not work.\n\t\t\/\/ Since kubelet will typically be run as a service it also makes more\n\t\t\/\/ sense to test it that way\n\t\tunitName := fmt.Sprintf(\"kubelet-%d.service\", rand.Int31())\n\t\tcmdArgs = append(cmdArgs, systemdRun, \"--unit=\"+unitName, getKubeletServerBin())\n\t\tkillOverride = exec.Command(\"sudo\", \"systemctl\", \"kill\", unitName)\n\t\tes.logFiles[\"kubelet.log\"] = logFileData{\n\t\t\tjournalctlCommand: []string{\"-u\", unitName},\n\t\t}\n\t} else {\n\t\tcmdArgs = append(cmdArgs, getKubeletServerBin())\n\t}\n\tcmdArgs = append(cmdArgs,\n\t\t\"--api-servers\", \"http:\/\/127.0.0.1:8080\",\n\t\t\"--address\", \"0.0.0.0\",\n\t\t\"--port\", \"10250\",\n\t\t\"--hostname-override\", es.nodeName, \/\/ Required because hostname is inconsistent across hosts\n\t\t\"--volume-stats-agg-period\", \"10s\", \/\/ Aggregate volumes frequently so tests don't need to wait as long\n\t\t\"--allow-privileged\", \"true\",\n\t\t\"--serialize-image-pulls\", \"false\",\n\t\t\"--config\", es.kubeletStaticPodDir,\n\t\t\"--file-check-frequency\", \"10s\", \/\/ Check file frequently so tests won't wait too long\n\t\t\"--v\", LOG_VERBOSITY_LEVEL, \"--logtostderr\",\n\t\t\"--pod-cidr=10.180.0.0\/24\", \/\/ Assign a fixed CIDR to the node because there is no node controller.\n\t)\n\tif es.cgroupsPerQOS {\n\t\tcmdArgs = append(cmdArgs,\n\t\t\t\"--cgroups-per-qos\", \"true\",\n\t\t\t\"--cgroup-root\", \"\/\",\n\t\t)\n\t}\n\tif !*disableKubenet {\n\t\tcwd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcmdArgs = append(cmdArgs,\n\t\t\t\"--network-plugin=kubenet\",\n\t\t\t\"--network-plugin-dir\", filepath.Join(cwd, CNIDirectory, \"bin\")) \/\/ Enable kubenet\n\t}\n\n\tcmd := exec.Command(\"sudo\", cmdArgs...)\n\thcc := newHealthCheckCommand(\n\t\t\"http:\/\/127.0.0.1:10255\/healthz\",\n\t\tcmd,\n\t\t\"kubelet.log\")\n\treturn &killCmd{name: \"kubelet\", cmd: cmd, override: killOverride}, es.startServer(hcc)\n}\n\nfunc (es *e2eService) startServer(cmd *healthCheckCommand) error {\n\tcmdErrorChan := make(chan error)\n\tgo func() {\n\t\tdefer close(cmdErrorChan)\n\n\t\t\/\/ Create the output filename\n\t\toutPath := path.Join(*reportDir, cmd.outputFilename)\n\t\toutfile, err := os.Create(outPath)\n\t\tif err != nil {\n\t\t\tcmdErrorChan <- fmt.Errorf(\"Failed to create file %s for `%s` %v.\", outPath, cmd, err)\n\t\t\treturn\n\t\t}\n\t\tdefer outfile.Close()\n\t\tdefer outfile.Sync()\n\n\t\t\/\/ Set the command to write the output file\n\t\tcmd.Cmd.Stdout = outfile\n\t\tcmd.Cmd.Stderr = outfile\n\n\t\t\/\/ Killing the sudo command should kill the server as well.\n\t\tattrs := &syscall.SysProcAttr{}\n\t\t\/\/ Hack to set linux-only field without build tags.\n\t\tdeathSigField := reflect.ValueOf(attrs).Elem().FieldByName(\"Pdeathsig\")\n\t\tif deathSigField.IsValid() {\n\t\t\tdeathSigField.Set(reflect.ValueOf(syscall.SIGKILL))\n\t\t} else {\n\t\t\tcmdErrorChan <- fmt.Errorf(\"Failed to set Pdeathsig field (non-linux build)\")\n\t\t\treturn\n\t\t}\n\t\tcmd.Cmd.SysProcAttr = attrs\n\n\t\t\/\/ Run the command\n\t\terr = cmd.Run()\n\t\tif err != nil {\n\t\t\tcmdErrorChan <- fmt.Errorf(\"%s Failed with error \\\"%v\\\". Output written to: %s\", cmd, err, outPath)\n\t\t\treturn\n\t\t}\n\t}()\n\n\tendTime := time.Now().Add(*serverStartTimeout)\n\tfor endTime.After(time.Now()) {\n\t\tselect {\n\t\tcase err := <-cmdErrorChan:\n\t\t\treturn err\n\t\tcase <-time.After(time.Second):\n\t\t\tresp, err := http.Get(cmd.HealthCheckUrl)\n\t\t\tif err == nil && resp.StatusCode == http.StatusOK {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\treturn fmt.Errorf(\"Timeout waiting for service %s\", cmd)\n}\n\n\/\/ killCmd is a struct to kill a given cmd. The cmd member specifies a command\n\/\/ to find the pid of and attempt to kill.\n\/\/ If the override field is set, that will be used instead to kill the command.\n\/\/ name is only used for logging\ntype killCmd struct {\n\tname string\n\tcmd *exec.Cmd\n\toverride *exec.Cmd\n}\n\nfunc (k *killCmd) Kill() error {\n\tname := k.name\n\tcmd := k.cmd\n\n\tif k.override != nil {\n\t\treturn k.override.Run()\n\t}\n\n\tif cmd == nil {\n\t\treturn fmt.Errorf(\"Could not kill %s because both `override` and `cmd` are nil\", name)\n\t}\n\n\tif cmd.Process == nil {\n\t\tglog.V(2).Infof(\"%s not running\", name)\n\t\treturn nil\n\t}\n\tpid := cmd.Process.Pid\n\tif pid <= 1 {\n\t\treturn fmt.Errorf(\"invalid PID %d for %s\", pid, name)\n\t}\n\n\t\/\/ Attempt to shut down the process in a friendly manner before forcing it.\n\twaitChan := make(chan error)\n\tgo func() {\n\t\t_, err := cmd.Process.Wait()\n\t\twaitChan <- err\n\t\tclose(waitChan)\n\t}()\n\n\tconst timeout = 10 * time.Second\n\tfor _, signal := range []string{\"-TERM\", \"-KILL\"} {\n\t\tglog.V(2).Infof(\"Killing process %d (%s) with %s\", pid, name, signal)\n\t\t_, err := exec.Command(\"sudo\", \"kill\", signal, strconv.Itoa(pid)).Output()\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Error signaling process %d (%s) with %s: %v\", pid, name, signal, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tselect {\n\t\tcase err := <-waitChan:\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error stopping %s: %v\", name, err)\n\t\t\t}\n\t\t\t\/\/ Success!\n\t\t\treturn nil\n\t\tcase <-time.After(timeout):\n\t\t\t\/\/ Continue.\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"unable to stop %s\", name)\n}\n\ntype healthCheckCommand struct {\n\t*exec.Cmd\n\tHealthCheckUrl string\n\toutputFilename string\n}\n\nfunc newHealthCheckCommand(healthCheckUrl string, cmd *exec.Cmd, filename string) *healthCheckCommand {\n\treturn &healthCheckCommand{\n\t\tHealthCheckUrl: healthCheckUrl,\n\t\tCmd: cmd,\n\t\toutputFilename: filename,\n\t}\n}\n\nfunc (hcc *healthCheckCommand) String() string {\n\treturn fmt.Sprintf(\"`%s` health-check: %s\", strings.Join(append([]string{hcc.Path}, hcc.Args[1:]...), \" \"), hcc.HealthCheckUrl)\n}\n<commit_msg>Change process group when sending kill signal<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e_node\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n)\n\nvar serverStartTimeout = flag.Duration(\"server-start-timeout\", time.Second*120, \"Time to wait for each server to become healthy.\")\nvar reportDir = flag.String(\"report-dir\", \"\", \"Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.\")\n\ntype e2eService struct {\n\tkillCmds []*killCmd\n\trmDirs []string\n\n\tetcdDataDir string\n\tkubeletStaticPodDir string\n\tnodeName string\n\tlogFiles map[string]logFileData\n\tcgroupsPerQOS bool\n}\n\ntype logFileData struct {\n\tfiles []string\n\tjournalctlCommand []string\n}\n\nconst (\n\t\/\/ This is consistent with the level used in a cluster e2e test.\n\tLOG_VERBOSITY_LEVEL = \"4\"\n)\n\nfunc newE2eService(nodeName string, cgroupsPerQOS bool) *e2eService {\n\t\/\/ Special log files that need to be collected for additional debugging.\n\tvar logFiles = map[string]logFileData{\n\t\t\"kern.log\": {[]string{\"\/var\/log\/kern.log\"}, []string{\"-k\"}},\n\t\t\"docker.log\": {[]string{\"\/var\/log\/docker.log\", \"\/var\/log\/upstart\/docker.log\"}, []string{\"-u\", \"docker\"}},\n\t}\n\n\treturn &e2eService{\n\t\tnodeName: nodeName,\n\t\tlogFiles: logFiles,\n\t\tcgroupsPerQOS: cgroupsPerQOS,\n\t}\n}\n\nfunc (es *e2eService) start() error {\n\tif _, err := getK8sBin(\"kubelet\"); err != nil {\n\t\treturn err\n\t}\n\tif _, err := getK8sBin(\"kube-apiserver\"); err != nil {\n\t\treturn err\n\t}\n\n\tcmd, err := es.startEtcd()\n\tif err != nil {\n\t\treturn err\n\t}\n\tes.killCmds = append(es.killCmds, cmd)\n\tes.rmDirs = append(es.rmDirs, es.etcdDataDir)\n\n\tcmd, err = es.startApiServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\tes.killCmds = append(es.killCmds, cmd)\n\n\tcmd, err = es.startKubeletServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\tes.killCmds = append(es.killCmds, cmd)\n\tes.rmDirs = append(es.rmDirs, es.kubeletStaticPodDir)\n\n\treturn nil\n}\n\n\/\/ Get logs of interest either via journalctl or by creating sym links.\n\/\/ Since we scp files from the remote directory, symlinks will be treated as normal files and file contents will be copied over.\nfunc (es *e2eService) getLogFiles() {\n\t\/\/ Nothing to do if report dir is not specified.\n\tif *reportDir == \"\" {\n\t\treturn\n\t}\n\tjournaldFound := isJournaldAvailable()\n\tfor targetFileName, logFileData := range es.logFiles {\n\t\ttargetLink := path.Join(*reportDir, targetFileName)\n\t\tif journaldFound {\n\t\t\t\/\/ Skip log files that do not have an equivalent in journald based machines.\n\t\t\tif len(logFileData.journalctlCommand) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tout, err := exec.Command(\"sudo\", append([]string{\"journalctl\"}, logFileData.journalctlCommand...)...).CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"failed to get %q from journald: %v, %v\", targetFileName, string(out), err)\n\t\t\t} else {\n\t\t\t\tif err = ioutil.WriteFile(targetLink, out, 0755); err != nil {\n\t\t\t\t\tglog.Errorf(\"failed to write logs to %q: %v\", targetLink, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tfor _, file := range logFileData.files {\n\t\t\tif _, err := os.Stat(file); err != nil {\n\t\t\t\t\/\/ Expected file not found on this distro.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := copyLogFile(file, targetLink); err != nil {\n\t\t\t\tglog.Error(err)\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc copyLogFile(src, target string) error {\n\t\/\/ If not a journald based distro, then just symlink files.\n\tif out, err := exec.Command(\"sudo\", \"cp\", src, target).CombinedOutput(); err != nil {\n\t\treturn fmt.Errorf(\"failed to copy %q to %q: %v, %v\", src, target, out, err)\n\t}\n\tif out, err := exec.Command(\"sudo\", \"chmod\", \"a+r\", target).CombinedOutput(); err != nil {\n\t\treturn fmt.Errorf(\"failed to make log file %q world readable: %v, %v\", target, out, err)\n\t}\n\treturn nil\n}\n\nfunc isJournaldAvailable() bool {\n\t_, err := exec.LookPath(\"journalctl\")\n\treturn err == nil\n}\n\nfunc (es *e2eService) stop() {\n\tfor _, k := range es.killCmds {\n\t\tif err := k.Kill(); err != nil {\n\t\t\tglog.Errorf(\"Failed to stop %v: %v\", k.name, err)\n\t\t}\n\t}\n\tfor _, d := range es.rmDirs {\n\t\terr := os.RemoveAll(d)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Failed to delete directory %s.\\n%v\", d, err)\n\t\t}\n\t}\n}\n\nfunc (es *e2eService) startEtcd() (*killCmd, error) {\n\tdataDir, err := ioutil.TempDir(\"\", \"node-e2e\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tes.etcdDataDir = dataDir\n\tcmd := exec.Command(\"etcd\")\n\t\/\/ Execute etcd in the data directory instead of using --data-dir because the flag sometimes requires additional\n\t\/\/ configuration (e.g. --name in version 0.4.9)\n\tcmd.Dir = es.etcdDataDir\n\thcc := newHealthCheckCommand(\n\t\t\"http:\/\/127.0.0.1:4001\/v2\/keys\/\", \/\/ Trailing slash is required,\n\t\tcmd,\n\t\t\"etcd.log\")\n\treturn &killCmd{name: \"etcd\", cmd: cmd}, es.startServer(hcc)\n}\n\nfunc (es *e2eService) startApiServer() (*killCmd, error) {\n\tcmd := exec.Command(\"sudo\", getApiServerBin(),\n\t\t\"--etcd-servers\", \"http:\/\/127.0.0.1:4001\",\n\t\t\"--insecure-bind-address\", \"0.0.0.0\",\n\t\t\"--service-cluster-ip-range\", \"10.0.0.1\/24\",\n\t\t\"--kubelet-port\", \"10250\",\n\t\t\"--allow-privileged\", \"true\",\n\t\t\"--v\", LOG_VERBOSITY_LEVEL, \"--logtostderr\",\n\t)\n\thcc := newHealthCheckCommand(\n\t\t\"http:\/\/127.0.0.1:8080\/healthz\",\n\t\tcmd,\n\t\t\"kube-apiserver.log\")\n\treturn &killCmd{name: \"kube-apiserver\", cmd: cmd}, es.startServer(hcc)\n}\n\nfunc (es *e2eService) startKubeletServer() (*killCmd, error) {\n\tdataDir, err := ioutil.TempDir(\"\", \"node-e2e-pod\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tes.kubeletStaticPodDir = dataDir\n\tvar killOverride *exec.Cmd\n\tcmdArgs := []string{}\n\tif systemdRun, err := exec.LookPath(\"systemd-run\"); err == nil {\n\t\t\/\/ On systemd services, detection of a service \/ unit works reliably while\n\t\t\/\/ detection of a process started from an ssh session does not work.\n\t\t\/\/ Since kubelet will typically be run as a service it also makes more\n\t\t\/\/ sense to test it that way\n\t\tunitName := fmt.Sprintf(\"kubelet-%d.service\", rand.Int31())\n\t\tcmdArgs = append(cmdArgs, systemdRun, \"--unit=\"+unitName, getKubeletServerBin())\n\t\tkillOverride = exec.Command(\"sudo\", \"systemctl\", \"kill\", unitName)\n\t\tes.logFiles[\"kubelet.log\"] = logFileData{\n\t\t\tjournalctlCommand: []string{\"-u\", unitName},\n\t\t}\n\t} else {\n\t\tcmdArgs = append(cmdArgs, getKubeletServerBin())\n\t}\n\tcmdArgs = append(cmdArgs,\n\t\t\"--api-servers\", \"http:\/\/127.0.0.1:8080\",\n\t\t\"--address\", \"0.0.0.0\",\n\t\t\"--port\", \"10250\",\n\t\t\"--hostname-override\", es.nodeName, \/\/ Required because hostname is inconsistent across hosts\n\t\t\"--volume-stats-agg-period\", \"10s\", \/\/ Aggregate volumes frequently so tests don't need to wait as long\n\t\t\"--allow-privileged\", \"true\",\n\t\t\"--serialize-image-pulls\", \"false\",\n\t\t\"--config\", es.kubeletStaticPodDir,\n\t\t\"--file-check-frequency\", \"10s\", \/\/ Check file frequently so tests won't wait too long\n\t\t\"--v\", LOG_VERBOSITY_LEVEL, \"--logtostderr\",\n\t\t\"--pod-cidr=10.180.0.0\/24\", \/\/ Assign a fixed CIDR to the node because there is no node controller.\n\t)\n\tif es.cgroupsPerQOS {\n\t\tcmdArgs = append(cmdArgs,\n\t\t\t\"--cgroups-per-qos\", \"true\",\n\t\t\t\"--cgroup-root\", \"\/\",\n\t\t)\n\t}\n\tif !*disableKubenet {\n\t\tcwd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcmdArgs = append(cmdArgs,\n\t\t\t\"--network-plugin=kubenet\",\n\t\t\t\"--network-plugin-dir\", filepath.Join(cwd, CNIDirectory, \"bin\")) \/\/ Enable kubenet\n\t}\n\n\tcmd := exec.Command(\"sudo\", cmdArgs...)\n\thcc := newHealthCheckCommand(\n\t\t\"http:\/\/127.0.0.1:10255\/healthz\",\n\t\tcmd,\n\t\t\"kubelet.log\")\n\treturn &killCmd{name: \"kubelet\", cmd: cmd, override: killOverride}, es.startServer(hcc)\n}\n\nfunc (es *e2eService) startServer(cmd *healthCheckCommand) error {\n\tcmdErrorChan := make(chan error)\n\tgo func() {\n\t\tdefer close(cmdErrorChan)\n\n\t\t\/\/ Create the output filename\n\t\toutPath := path.Join(*reportDir, cmd.outputFilename)\n\t\toutfile, err := os.Create(outPath)\n\t\tif err != nil {\n\t\t\tcmdErrorChan <- fmt.Errorf(\"Failed to create file %s for `%s` %v.\", outPath, cmd, err)\n\t\t\treturn\n\t\t}\n\t\tdefer outfile.Close()\n\t\tdefer outfile.Sync()\n\n\t\t\/\/ Set the command to write the output file\n\t\tcmd.Cmd.Stdout = outfile\n\t\tcmd.Cmd.Stderr = outfile\n\n\t\t\/\/ Killing the sudo command should kill the server as well.\n\t\tattrs := &syscall.SysProcAttr{}\n\t\t\/\/ Hack to set linux-only field without build tags.\n\t\tdeathSigField := reflect.ValueOf(attrs).Elem().FieldByName(\"Pdeathsig\")\n\t\tif deathSigField.IsValid() {\n\t\t\tdeathSigField.Set(reflect.ValueOf(syscall.SIGKILL))\n\t\t} else {\n\t\t\tcmdErrorChan <- fmt.Errorf(\"Failed to set Pdeathsig field (non-linux build)\")\n\t\t\treturn\n\t\t}\n\t\tcmd.Cmd.SysProcAttr = attrs\n\n\t\t\/\/ Run the command\n\t\terr = cmd.Run()\n\t\tif err != nil {\n\t\t\tcmdErrorChan <- fmt.Errorf(\"%s Failed with error \\\"%v\\\". Output written to: %s\", cmd, err, outPath)\n\t\t\treturn\n\t\t}\n\t}()\n\n\tendTime := time.Now().Add(*serverStartTimeout)\n\tfor endTime.After(time.Now()) {\n\t\tselect {\n\t\tcase err := <-cmdErrorChan:\n\t\t\treturn err\n\t\tcase <-time.After(time.Second):\n\t\t\tresp, err := http.Get(cmd.HealthCheckUrl)\n\t\t\tif err == nil && resp.StatusCode == http.StatusOK {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\treturn fmt.Errorf(\"Timeout waiting for service %s\", cmd)\n}\n\n\/\/ killCmd is a struct to kill a given cmd. The cmd member specifies a command\n\/\/ to find the pid of and attempt to kill.\n\/\/ If the override field is set, that will be used instead to kill the command.\n\/\/ name is only used for logging\ntype killCmd struct {\n\tname string\n\tcmd *exec.Cmd\n\toverride *exec.Cmd\n}\n\nfunc (k *killCmd) Kill() error {\n\tname := k.name\n\tcmd := k.cmd\n\n\tif k.override != nil {\n\t\treturn k.override.Run()\n\t}\n\n\tif cmd == nil {\n\t\treturn fmt.Errorf(\"Could not kill %s because both `override` and `cmd` are nil\", name)\n\t}\n\n\tif cmd.Process == nil {\n\t\tglog.V(2).Infof(\"%s not running\", name)\n\t\treturn nil\n\t}\n\tpid := cmd.Process.Pid\n\tif pid <= 1 {\n\t\treturn fmt.Errorf(\"invalid PID %d for %s\", pid, name)\n\t}\n\n\t\/\/ Attempt to shut down the process in a friendly manner before forcing it.\n\twaitChan := make(chan error)\n\tgo func() {\n\t\t_, err := cmd.Process.Wait()\n\t\twaitChan <- err\n\t\tclose(waitChan)\n\t}()\n\n\tconst timeout = 10 * time.Second\n\tfor _, signal := range []string{\"-TERM\", \"-KILL\"} {\n\t\tglog.V(2).Infof(\"Killing process %d (%s) with %s\", pid, name, signal)\n\t\tcmd := exec.Command(\"sudo\", \"kill\", signal, strconv.Itoa(pid))\n\t\t\/\/ Run the 'kill' command in a separate process group so sudo doesn't ignore it\n\t\tcmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}\n\t\t_, err := cmd.Output()\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Error signaling process %d (%s) with %s: %v\", pid, name, signal, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tselect {\n\t\tcase err := <-waitChan:\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error stopping %s: %v\", name, err)\n\t\t\t}\n\t\t\t\/\/ Success!\n\t\t\treturn nil\n\t\tcase <-time.After(timeout):\n\t\t\t\/\/ Continue.\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"unable to stop %s\", name)\n}\n\ntype healthCheckCommand struct {\n\t*exec.Cmd\n\tHealthCheckUrl string\n\toutputFilename string\n}\n\nfunc newHealthCheckCommand(healthCheckUrl string, cmd *exec.Cmd, filename string) *healthCheckCommand {\n\treturn &healthCheckCommand{\n\t\tHealthCheckUrl: healthCheckUrl,\n\t\tCmd: cmd,\n\t\toutputFilename: filename,\n\t}\n}\n\nfunc (hcc *healthCheckCommand) String() string {\n\treturn fmt.Sprintf(\"`%s` health-check: %s\", strings.Join(append([]string{hcc.Path}, hcc.Args[1:]...), \" \"), hcc.HealthCheckUrl)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ compile\n\n\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage p\n\n\/\/ any is now permitted instead of interface{}\nvar x any\n<commit_msg>test\/fixedbugs: adjust test case (fix longtest builders)<commit_after>\/\/ compile -G=3\n\n\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage p\n\n\/\/ any is now permitted instead of interface{}\nvar x any\n<|endoftext|>"} {"text":"<commit_before>\/\/ 11 february 2014\n\/\/package ui\npackage main\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n\t\"runtime\"\n)\n\nvar uitask chan *uimsg\n\ntype uimsg struct {\n\tcall\t\t*syscall.LazyProc\n\tp\t\t[]uintptr\n\tret\t\tchan uiret\n}\n\ntype uiret struct {\n\tret\t\tuintptr\n\terr\t\terror\n}\n\nfunc ui(initDone chan error) {\n\truntime.LockOSThread()\n\n\tuitask = make(chan *uimsg)\n\tinitDone <- doWindowsInit()\n\n\tfor m := range uitask {\n\t\tr1, _, err := m.msg.Call(m.p...)\n\t\tm.ret <- uiret{\n\t\t\tret:\tr1,\n\t\t\terr:\terr,\n\t\t}\n\t}\n}\n<commit_msg>Added the Windows message loop.<commit_after>\/\/ 11 february 2014\n\/\/package ui\npackage main\n\nimport (\n\t\"fmt\"\n\t\"syscall\"\n\t\"unsafe\"\n\t\"runtime\"\n)\n\nvar uitask chan *uimsg\n\ntype uimsg struct {\n\tcall\t\t*syscall.LazyProc\n\tp\t\t[]uintptr\n\tret\t\tchan uiret\n}\n\ntype uiret struct {\n\tret\t\tuintptr\n\terr\t\terror\n}\n\nfunc ui(initDone chan error) {\n\truntime.LockOSThread()\n\n\tuitask = make(chan *uimsg)\n\tinitDone <- doWindowsInit()\n\n\tgo msgloop()\n\n\tfor m := range uitask {\n\t\tr1, _, err := m.msg.Call(m.p...)\n\t\tm.ret <- uiret{\n\t\t\tret:\tr1,\n\t\t\terr:\terr,\n\t\t}\n\t}\n}\n\nvar (\n\t_dispatchMessage = user32.NewProc(\"DispatchMessageW\")\n\t_getMessage = user32.NewProc(\"GetMessageW\")\n\t_postQuitMessage = user32.NewProc(\"PostQuitMessage\")\n\t_sendMessage = user32.NewProc(\"SendMessageW\")\n\t_translateMessage = user32.NewProc(\"TranslateMessage\")\n)\n\nfunc msgloop() {\n\tvar msg struct {\n\t\tHwnd\t_HWND\n\t\tMessage\tuint32\n\t\tWParam\t_WPARAM\n\t\tLParam\t_LPARAM\n\t\tTime\t\tuint32\n\t\tPt\t\t_POINT\n\t}\n\n\tfor {\n\t\tr1, _, err := getMessage.Call(\n\t\t\tuintptr(unsafe.Pointer(&msg)),\n\t\t\tuintptr(_NULL),\n\t\t\tuintptr(0),\n\t\t\tuintptr(0))\n\t\tif r1 == uintptr(getMessageFail) {\t\t\/\/ failure\n\t\t\tpanic(fmt.Sprintf(\"GetMessage failed: %v\", err))\n\t\t} else if r1 == 0 {\t\/\/ quit\n\t\t\tbreak\n\t\t}\n\t\t\/\/ TODO handle potential errors in TranslateMessage() and DispatchMessage()\n\t\t_translateMessage.Call(uintptr(unsafe.Pointer(&msg)))\n\t\t_dispatchMessage.Call(uintptr(unsafe.Pointer(&msg)))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package sdp\n\nimport (\n\t\"testing\"\n)\n\nconst (\n\tBaseSDP = \"v=0\\r\\n\" +\n\t\t\"o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5\\r\\n\" +\n\t\t\"s=SDP Seminar\\r\\n\"\n\n\tSessionInformationSDP = BaseSDP +\n\t\t\"i=A Seminar on the session description protocol\\r\\n\" +\n\t\t\"t=3034423619 3042462419\\r\\n\"\n\n\tURISDP = BaseSDP +\n\t\t\"u=http:\/\/www.example.com\/seminars\/sdp.pdf\\r\\n\" +\n\t\t\"t=3034423619 3042462419\\r\\n\"\n\n\tEmailAddressSDP = BaseSDP +\n\t\t\"e=j.doe@example.com (Jane Doe)\\r\\n\" +\n\t\t\"t=3034423619 3042462419\\r\\n\"\n\n\tPhoneNumberSDP = BaseSDP +\n\t\t\"p=+1 617 555-6011\\r\\n\" +\n\t\t\"t=3034423619 3042462419\\r\\n\"\n\n\tSessionConnectionInformationSDP = BaseSDP +\n\t\t\"c=IN IP4 224.2.17.12\/127\\r\\n\" +\n\t\t\"t=3034423619 3042462419\\r\\n\"\n\n\tSessionBandwidthSDP = BaseSDP +\n\t\t\"b=X-YZ:128\\r\\n\" +\n\t\t\"b=AS:12345\\r\\n\" +\n\t\t\"t=3034423619 3042462419\\r\\n\"\n\n\tTimingSDP = BaseSDP +\n\t\t\"t=2873397496 2873404696\\r\\n\"\n\n\t\/\/ Short hand time notation is converted into NTP timestamp format in\n\t\/\/ seconds. Because of that unittest comparisons will fail as the same time\n\t\/\/ will be expressed in different units.\n\tRepeatTimesSDP = TimingSDP +\n\t\t\"r=604800 3600 0 90000\\r\\n\" +\n\t\t\"r=3d 2h 0 21h\\r\\n\"\n\n\tRepeatTimesSDPExpected = TimingSDP +\n\t\t\"r=604800 3600 0 90000\\r\\n\" +\n\t\t\"r=259200 7200 0 75600\\r\\n\"\n\n\t\/\/ The expected value looks a bit different for the same reason as mentioned\n\t\/\/ above regarding RepeatTimes.\n\tTimeZonesSDP = TimingSDP +\n\t\t\"r=2882844526 -1h 2898848070 0\\r\\n\"\n\n\tTimeZonesSDPExpected = TimingSDP +\n\t\t\"r=2882844526 -3600 2898848070 0\\r\\n\"\n\n\tSessionEncryptionKeySDP = TimingSDP +\n\t\t\"k=prompt\\r\\n\"\n\n\tSessionAttributesSDP = TimingSDP +\n\t\t\"a=rtpmap:96 opus\/48000\\r\\n\"\n\n\tMediaNameSDP = TimingSDP +\n\t\t\"m=video 51372 RTP\/AVP 99\\r\\n\" +\n\t\t\"m=audio 54400 RTP\/SAVPF 0 96\\r\\n\"\n\n\tMediaTitleSDP = MediaNameSDP +\n\t\t\"i=Vivamus a posuere nisl\\r\\n\"\n\n\tMediaConnectionInformationSDP = MediaNameSDP +\n\t\t\"c=IN IP4 203.0.113.1\\r\\n\"\n\n\tMediaBandwidthSDP = MediaNameSDP +\n\t\t\"b=X-YZ:128\\r\\n\" +\n\t\t\"b=AS:12345\\r\\n\"\n\n\tMediaEncryptionKeySDP = MediaNameSDP +\n\t\t\"k=prompt\\r\\n\"\n\n\tMediaAttributesSDP = MediaNameSDP +\n\t\t\"a=rtpmap:99 h263-1998\/90000\\r\\n\" +\n\t\t\"a=candidate:0 1 UDP 2113667327 203.0.113.1 54400 typ host\\r\\n\"\n\n\tCanonicalUnmarshalSDP = \"v=0\\r\\n\" +\n\t\t\"o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5\\r\\n\" +\n\t\t\"s=SDP Seminar\\r\\n\" +\n\t\t\"i=A Seminar on the session description protocol\\r\\n\" +\n\t\t\"u=http:\/\/www.example.com\/seminars\/sdp.pdf\\r\\n\" +\n\t\t\"e=j.doe@example.com (Jane Doe)\\r\\n\" +\n\t\t\"p=+1 617 555-6011\\r\\n\" +\n\t\t\"c=IN IP4 224.2.17.12\/127\\r\\n\" +\n\t\t\"b=X-YZ:128\\r\\n\" +\n\t\t\"b=AS:12345\\r\\n\" +\n\t\t\"t=2873397496 2873404696\\r\\n\" +\n\t\t\"t=3034423619 3042462419\\r\\n\" +\n\t\t\"r=604800 3600 0 90000\\r\\n\" +\n\t\t\"z=2882844526 -3600 2898848070 0\\r\\n\" +\n\t\t\"k=prompt\\r\\n\" +\n\t\t\"a=candidate:0 1 UDP 2113667327 203.0.113.1 54400 typ host\\r\\n\" +\n\t\t\"a=recvonly\\r\\n\" +\n\t\t\"m=audio 49170 RTP\/AVP 0\\r\\n\" +\n\t\t\"i=Vivamus a posuere nisl\\r\\n\" +\n\t\t\"c=IN IP4 203.0.113.1\\r\\n\" +\n\t\t\"b=X-YZ:128\\r\\n\" +\n\t\t\"k=prompt\\r\\n\" +\n\t\t\"a=sendrecv\\r\\n\" +\n\t\t\"m=video 51372 RTP\/AVP 99\\r\\n\" +\n\t\t\"a=rtpmap:99 h263-1998\/90000\\r\\n\"\n)\n\nfunc TestUnmarshalSessionInformation(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(SessionInformationSDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != SessionInformationSDP {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", SessionInformationSDP, actual)\n\t}\n}\n\nfunc TestUnmarshalURI(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(URISDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != URISDP {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", URISDP, actual)\n\t}\n}\n\nfunc TestUnmarshalEmailAddress(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(EmailAddressSDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != EmailAddressSDP {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", EmailAddressSDP, actual)\n\t}\n}\n\nfunc TestUnmarshalPhoneNumber(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(PhoneNumberSDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != PhoneNumberSDP {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", PhoneNumberSDP, actual)\n\t}\n}\n\nfunc TestUnmarshalSessionConnectionInformation(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(SessionConnectionInformationSDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != SessionConnectionInformationSDP {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", SessionConnectionInformationSDP, actual)\n\t}\n}\n\nfunc TestUnmarshalSessionBandwidth(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(SessionBandwidthSDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != SessionBandwidthSDP {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", SessionBandwidthSDP, actual)\n\t}\n}\n\nfunc TestUnmarshalRepeatTimes(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(RepeatTimesSDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != RepeatTimesSDPExpected {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", RepeatTimesSDPExpected, actual)\n\t}\n}\n\nfunc TestUnmarshalTimeZones(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(TimeZonesSDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != TimeZonesSDPExpected {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", TimeZonesSDPExpected, actual)\n\t}\n}\n\nfunc TestUnmarshalSessionEncryptionKey(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(SessionEncryptionKeySDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != SessionEncryptionKeySDP {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", SessionEncryptionKeySDP, actual)\n\t}\n}\n\nfunc TestUnmarshalSessionAttributes(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(SessionAttributesSDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != SessionAttributesSDP {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", SessionAttributesSDP, actual)\n\t}\n}\n\nfunc TestUnmarshalMediaName(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(MediaNameSDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != MediaNameSDP {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", MediaNameSDP, actual)\n\t}\n}\n\nfunc TestUnmarshalMediaTitle(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(MediaTitleSDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != MediaTitleSDP {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", MediaTitleSDP, actual)\n\t}\n}\n\nfunc TestUnmarshalMediaConnectionInformation(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(MediaConnectionInformationSDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != MediaConnectionInformationSDP {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", MediaConnectionInformationSDP, actual)\n\t}\n}\n\nfunc TestUnmarshalMediaBandwidth(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(MediaBandwidthSDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != MediaBandwidthSDP {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", MediaBandwidthSDP, actual)\n\t}\n}\n\nfunc TestUnmarshalMediaEncryptionKey(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(MediaEncryptionKeySDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != MediaEncryptionKeySDP {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", MediaEncryptionKeySDP, actual)\n\t}\n}\n\nfunc TestUnmarshalMediaAttributes(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(MediaAttributesSDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != MediaAttributesSDP {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", MediaAttributesSDP, actual)\n\t}\n}\n\nfunc TestUnmarshalCanonical(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(CanonicalUnmarshalSDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != CanonicalUnmarshalSDP {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", CanonicalUnmarshalSDP, actual)\n\t}\n}\n<commit_msg>Refactor into table-driven tests<commit_after>package sdp\n\nimport (\n\t\"testing\"\n)\n\nconst (\n\tBaseSDP = \"v=0\\r\\n\" +\n\t\t\"o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5\\r\\n\" +\n\t\t\"s=SDP Seminar\\r\\n\"\n\n\tSessionInformationSDP = BaseSDP +\n\t\t\"i=A Seminar on the session description protocol\\r\\n\" +\n\t\t\"t=3034423619 3042462419\\r\\n\"\n\n\tURISDP = BaseSDP +\n\t\t\"u=http:\/\/www.example.com\/seminars\/sdp.pdf\\r\\n\" +\n\t\t\"t=3034423619 3042462419\\r\\n\"\n\n\tEmailAddressSDP = BaseSDP +\n\t\t\"e=j.doe@example.com (Jane Doe)\\r\\n\" +\n\t\t\"t=3034423619 3042462419\\r\\n\"\n\n\tPhoneNumberSDP = BaseSDP +\n\t\t\"p=+1 617 555-6011\\r\\n\" +\n\t\t\"t=3034423619 3042462419\\r\\n\"\n\n\tSessionConnectionInformationSDP = BaseSDP +\n\t\t\"c=IN IP4 224.2.17.12\/127\\r\\n\" +\n\t\t\"t=3034423619 3042462419\\r\\n\"\n\n\tSessionBandwidthSDP = BaseSDP +\n\t\t\"b=X-YZ:128\\r\\n\" +\n\t\t\"b=AS:12345\\r\\n\" +\n\t\t\"t=3034423619 3042462419\\r\\n\"\n\n\tTimingSDP = BaseSDP +\n\t\t\"t=2873397496 2873404696\\r\\n\"\n\n\t\/\/ Short hand time notation is converted into NTP timestamp format in\n\t\/\/ seconds. Because of that unittest comparisons will fail as the same time\n\t\/\/ will be expressed in different units.\n\tRepeatTimesSDP = TimingSDP +\n\t\t\"r=604800 3600 0 90000\\r\\n\" +\n\t\t\"r=3d 2h 0 21h\\r\\n\"\n\n\tRepeatTimesSDPExpected = TimingSDP +\n\t\t\"r=604800 3600 0 90000\\r\\n\" +\n\t\t\"r=259200 7200 0 75600\\r\\n\"\n\n\t\/\/ The expected value looks a bit different for the same reason as mentioned\n\t\/\/ above regarding RepeatTimes.\n\tTimeZonesSDP = TimingSDP +\n\t\t\"r=2882844526 -1h 2898848070 0\\r\\n\"\n\n\tTimeZonesSDPExpected = TimingSDP +\n\t\t\"r=2882844526 -3600 2898848070 0\\r\\n\"\n\n\tSessionEncryptionKeySDP = TimingSDP +\n\t\t\"k=prompt\\r\\n\"\n\n\tSessionAttributesSDP = TimingSDP +\n\t\t\"a=rtpmap:96 opus\/48000\\r\\n\"\n\n\tMediaNameSDP = TimingSDP +\n\t\t\"m=video 51372 RTP\/AVP 99\\r\\n\" +\n\t\t\"m=audio 54400 RTP\/SAVPF 0 96\\r\\n\"\n\n\tMediaTitleSDP = MediaNameSDP +\n\t\t\"i=Vivamus a posuere nisl\\r\\n\"\n\n\tMediaConnectionInformationSDP = MediaNameSDP +\n\t\t\"c=IN IP4 203.0.113.1\\r\\n\"\n\n\tMediaBandwidthSDP = MediaNameSDP +\n\t\t\"b=X-YZ:128\\r\\n\" +\n\t\t\"b=AS:12345\\r\\n\"\n\n\tMediaEncryptionKeySDP = MediaNameSDP +\n\t\t\"k=prompt\\r\\n\"\n\n\tMediaAttributesSDP = MediaNameSDP +\n\t\t\"a=rtpmap:99 h263-1998\/90000\\r\\n\" +\n\t\t\"a=candidate:0 1 UDP 2113667327 203.0.113.1 54400 typ host\\r\\n\"\n\n\tCanonicalUnmarshalSDP = \"v=0\\r\\n\" +\n\t\t\"o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5\\r\\n\" +\n\t\t\"s=SDP Seminar\\r\\n\" +\n\t\t\"i=A Seminar on the session description protocol\\r\\n\" +\n\t\t\"u=http:\/\/www.example.com\/seminars\/sdp.pdf\\r\\n\" +\n\t\t\"e=j.doe@example.com (Jane Doe)\\r\\n\" +\n\t\t\"p=+1 617 555-6011\\r\\n\" +\n\t\t\"c=IN IP4 224.2.17.12\/127\\r\\n\" +\n\t\t\"b=X-YZ:128\\r\\n\" +\n\t\t\"b=AS:12345\\r\\n\" +\n\t\t\"t=2873397496 2873404696\\r\\n\" +\n\t\t\"t=3034423619 3042462419\\r\\n\" +\n\t\t\"r=604800 3600 0 90000\\r\\n\" +\n\t\t\"z=2882844526 -3600 2898848070 0\\r\\n\" +\n\t\t\"k=prompt\\r\\n\" +\n\t\t\"a=candidate:0 1 UDP 2113667327 203.0.113.1 54400 typ host\\r\\n\" +\n\t\t\"a=recvonly\\r\\n\" +\n\t\t\"m=audio 49170 RTP\/AVP 0\\r\\n\" +\n\t\t\"i=Vivamus a posuere nisl\\r\\n\" +\n\t\t\"c=IN IP4 203.0.113.1\\r\\n\" +\n\t\t\"b=X-YZ:128\\r\\n\" +\n\t\t\"k=prompt\\r\\n\" +\n\t\t\"a=sendrecv\\r\\n\" +\n\t\t\"m=video 51372 RTP\/AVP 99\\r\\n\" +\n\t\t\"a=rtpmap:99 h263-1998\/90000\\r\\n\"\n)\n\nfunc TestRoundTrip(t *testing.T) {\n\tfor _, test := range []struct {\n\t\tName string\n\t\tSDP string\n\t}{\n\t\t{\n\t\t\tName: \"SessionInformation\",\n\t\t\tSDP: SessionInformationSDP,\n\t\t},\n\t\t{\n\t\t\tName: \"URI\",\n\t\t\tSDP: URISDP,\n\t\t},\n\t\t{\n\t\t\tName: \"EmailAddress\",\n\t\t\tSDP: string(EmailAddressSDP),\n\t\t},\n\t\t{\n\t\t\tName: \"PhoneNumber\",\n\t\t\tSDP: PhoneNumberSDP,\n\t\t},\n\t\t{\n\t\t\tName: \"SessionConnectionInformation\",\n\t\t\tSDP: SessionConnectionInformationSDP,\n\t\t},\n\t\t{\n\t\t\tName: \"SessionConnectionInformation\",\n\t\t\tSDP: SessionConnectionInformationSDP,\n\t\t},\n\t\t{\n\t\t\tName: \"SessionBandwidth\",\n\t\t\tSDP: SessionBandwidthSDP,\n\t\t},\n\t\t{\n\t\t\tName: \"SessionEncryptionKey\",\n\t\t\tSDP: SessionEncryptionKeySDP,\n\t\t},\n\t\t{\n\t\t\tName: \"SessionAttributes\",\n\t\t\tSDP: SessionAttributesSDP,\n\t\t},\n\t\t{\n\t\t\tName: \"MediaName\",\n\t\t\tSDP: MediaNameSDP,\n\t\t},\n\t\t{\n\t\t\tName: \"MediaTitle\",\n\t\t\tSDP: MediaTitleSDP,\n\t\t},\n\t\t{\n\t\t\tName: \"MediaConnectionInformation\",\n\t\t\tSDP: MediaConnectionInformationSDP,\n\t\t},\n\t\t{\n\t\t\tName: \"MediaConnectionInformation\",\n\t\t\tSDP: MediaConnectionInformationSDP,\n\t\t},\n\t\t{\n\t\t\tName: \"MediaBandwidth\",\n\t\t\tSDP: MediaBandwidthSDP,\n\t\t},\n\t\t{\n\t\t\tName: \"MediaEncryptionKey\",\n\t\t\tSDP: MediaEncryptionKeySDP,\n\t\t},\n\t\t{\n\t\t\tName: \"MediaAttributes\",\n\t\t\tSDP: MediaAttributesSDP,\n\t\t},\n\t\t{\n\t\t\tName: \"CanonicalUnmarshal\",\n\t\t\tSDP: CanonicalUnmarshalSDP,\n\t\t},\n\t} {\n\t\tsd := &SessionDescription{}\n\n\t\terr := sd.Unmarshal(test.SDP)\n\t\tif got, want := err, error(nil); got != want {\n\t\t\tt.Fatalf(\"Unmarshal(%s): err=%v, want %v\", test.Name, got, want)\n\t\t}\n\n\t\tdata := sd.Marshal()\n\t\tif got, want := data, test.SDP; got != want {\n\t\t\tt.Fatalf(\"Marshal(%s) = %q, want %q\", test.Name, got, want)\n\t\t}\n\t}\n}\n\nfunc TestUnmarshalRepeatTimes(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(RepeatTimesSDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != RepeatTimesSDPExpected {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", RepeatTimesSDPExpected, actual)\n\t}\n}\n\nfunc TestUnmarshalTimeZones(t *testing.T) {\n\tsd := &SessionDescription{}\n\tif err := sd.Unmarshal(TimeZonesSDP); err != nil {\n\t\tt.Errorf(\"error: %v\", err)\n\t}\n\n\tactual := sd.Marshal()\n\tif actual != TimeZonesSDPExpected {\n\t\tt.Errorf(\"error:\\n\\nEXPECTED:\\n%v\\nACTUAL:\\n%v\", TimeZonesSDPExpected, actual)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Tideland Go Cells - Behaviors - Unit Tests - Finite State Machine\n\/\/\n\/\/ Copyright (C) 2010-2017 Frank Mueller \/ Tideland \/ Oldenburg \/ Germany\n\/\/\n\/\/ All rights reserved. Use of this source code is governed\n\/\/ by the new BSD license.\n\npackage behaviors_test\n\n\/\/--------------------\n\/\/ IMPORTS\n\/\/--------------------\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/tideland\/golib\/audit\"\n\n\t\"github.com\/tideland\/gocells\/behaviors\"\n\t\"github.com\/tideland\/gocells\/cells\"\n)\n\n\/\/--------------------\n\/\/ TESTS\n\/\/--------------------\n\n\/\/ TestFSMBehavior tests the finite state machine behavior.\nfunc TestFSMBehavior(t *testing.T) {\n\tassert := audit.NewTestingAssertion(t, true)\n\tctx := context.Background()\n\tenv := cells.NewEnvironment(\"fsm-behavior\")\n\tdefer env.Stop()\n\n\tcheckCents := func(id string) int {\n\t\tpayload, err := env.Request(ctx, id, \"cents?\", cells.DefaultTimeout)\n\t\tassert.Nil(err)\n\t\treturn payload.GetDefault(0).(int)\n\t}\n\tinfo := func(id string) string {\n\t\tpayload, err := env.Request(ctx, id, \"info?\", cells.DefaultTimeout)\n\t\tassert.Nil(err)\n\t\treturn payload.GetDefault(\"\").(string)\n\t}\n\tgrabCents := func() int {\n\t\tpayload, err := env.Request(ctx, \"restorer\", \"grab!\", cells.DefaultTimeout)\n\t\tassert.Nil(err)\n\t\treturn payload.GetDefault(0).(int)\n\t}\n\n\tlockA := lockMachine{}\n\tlockB := lockMachine{}\n\n\tenv.StartCell(\"lock-a\", behaviors.NewFSMBehavior(lockA.Locked))\n\tenv.StartCell(\"lock-b\", behaviors.NewFSMBehavior(lockB.Locked))\n\tenv.StartCell(\"restorer\", newRestorerBehavior())\n\n\tenv.Subscribe(\"lock-a\", \"restorer\")\n\tenv.Subscribe(\"lock-b\", \"restorer\")\n\n\t\/\/ 1st run: emit not enough and press button.\n\tenv.EmitNew(ctx, \"lock-a\", \"coin!\", 20)\n\tenv.EmitNew(ctx, \"lock-a\", \"coin!\", 20)\n\tenv.EmitNew(ctx, \"lock-a\", \"coin!\", 20)\n\tenv.EmitNew(ctx, \"lock-a\", \"button-press!\", nil)\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tassert.Equal(checkCents(\"lock-a\"), 0)\n\tassert.Equal(grabCents(), 60)\n\n\t\/\/ 2nd run: unlock the lock and lock it again.\n\tenv.EmitNew(ctx, \"lock-a\", \"coin!\", 50)\n\tenv.EmitNew(ctx, \"lock-a\", \"coin!\", 20)\n\tenv.EmitNew(ctx, \"lock-a\", \"coin!\", 50)\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tassert.Equal(info(\"lock-a\"), \"state 'unlocked' with 20 cents\")\n\n\tenv.EmitNew(ctx, \"lock-a\", \"button-press!\", nil)\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tassert.Equal(checkCents(\"lock-a\"), 00)\n\tassert.Equal(info(\"lock-a\"), \"state 'locked' with 0 cents\")\n\tassert.Equal(grabCents(), 20)\n\n\t\/\/ 3rd run: put a screwdriwer in the lock.\n\tenv.EmitNew(ctx, \"lock-a\", \"screwdriver!\", nil)\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tdone, err := behaviors.RequestFSMStatus(ctx, env, \"lock-a\", time.Second)\n\tassert.Nil(err)\n\tassert.Equal(done, true)\n\n\t\/\/ 4th run: try an illegal action.\n\tenv.EmitNew(ctx, \"lock-b\", \"chewing-gum\", nil)\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tdone, err = behaviors.RequestFSMStatus(ctx, env, \"lock-b\", time.Second)\n\tassert.Equal(done, true)\n\tassert.ErrorMatch(err, \"illegal topic in state 'locked': chewing-gum\")\n}\n\n\/\/--------------------\n\/\/ HELPERS\n\/\/--------------------\n\n\/\/ cents retrieves the cents out of the payload of an event.\nfunc payloadCents(event cells.Event) int {\n\treturn event.Payload().GetInt(cells.PayloadDefault, -1)\n}\n\n\/\/ lockMachine will be unlocked if enough money is inserted. After\n\/\/ that it can be locked again.\ntype lockMachine struct {\n\tcents int\n}\n\n\/\/ Locked represents the locked state receiving coins.\nfunc (m *lockMachine) Locked(cell cells.Cell, event cells.Event) (behaviors.FSMState, error) {\n\tswitch event.Topic() {\n\tcase \"cents?\":\n\t\tpayload, ok := cells.HasWaiterPayload(event)\n\t\tif ok {\n\t\t\tpayload.GetWaiter().Set(m.cents)\n\t\t}\n\t\treturn m.Locked, nil\n\tcase \"info?\":\n\t\tinfo := fmt.Sprintf(\"state 'locked' with %d cents\", m.cents)\n\t\tpayload, ok := cells.HasWaiterPayload(event)\n\t\tif ok {\n\t\t\tpayload.GetWaiter().Set(info)\n\t\t}\n\t\treturn m.Locked, nil\n\tcase \"coin!\":\n\t\tcents := payloadCents(event)\n\t\tif cents < 1 {\n\t\t\treturn nil, fmt.Errorf(\"do not insert buttons\")\n\t\t}\n\t\tm.cents += cents\n\t\tif m.cents > 100 {\n\t\t\tm.cents -= 100\n\t\t\treturn m.Unlocked, nil\n\t\t}\n\t\treturn m.Locked, nil\n\tcase \"button-press!\":\n\t\tif m.cents > 0 {\n\t\t\tcell.Environment().EmitNew(event.Context(), \"restorer\", \"drop!\", m.cents)\n\t\t\tm.cents = 0\n\t\t}\n\t\treturn m.Locked, nil\n\tcase \"screwdriver!\":\n\t\t\/\/ Allow a screwdriver to bring the lock into an undefined state.\n\t\treturn nil, nil\n\t}\n\treturn m.Locked, fmt.Errorf(\"illegal topic in state 'locked': %s\", event.Topic())\n}\n\n\/\/ Unlocked represents the unlocked state receiving coins.\nfunc (m *lockMachine) Unlocked(cell cells.Cell, event cells.Event) (behaviors.FSMState, error) {\n\tswitch event.Topic() {\n\tcase \"cents?\":\n\t\tpayload, ok := cells.HasWaiterPayload(event)\n\t\tif ok {\n\t\t\tpayload.GetWaiter().Set(m.cents)\n\t\t}\n\t\treturn m.Unlocked, nil\n\tcase \"info?\":\n\t\tinfo := fmt.Sprintf(\"state 'unlocked' with %d cents\", m.cents)\n\t\tpayload, ok := cells.HasWaiterPayload(event)\n\t\tif ok {\n\t\t\tpayload.GetWaiter().Set(info)\n\t\t}\n\t\treturn m.Unlocked, nil\n\tcase \"coin!\":\n\t\tcents := payloadCents(event)\n\t\tcell.EmitNew(event.Context(), \"return\", cents)\n\t\treturn m.Unlocked, nil\n\tcase \"button-press!\":\n\t\tcell.Environment().EmitNew(event.Context(), \"restorer\", \"drop!\", m.cents)\n\t\tm.cents = 0\n\t\treturn m.Locked, nil\n\t}\n\treturn m.Unlocked, fmt.Errorf(\"illegal topic in state 'unlocked': %s\", event.Topic())\n}\n\ntype restorerBehavior struct {\n\tcell cells.Cell\n\tcents int\n}\n\nfunc newRestorerBehavior() cells.Behavior {\n\treturn &restorerBehavior{nil, 0}\n}\n\nfunc (b *restorerBehavior) Init(c cells.Cell) error {\n\tb.cell = c\n\treturn nil\n}\n\nfunc (b *restorerBehavior) Terminate() error {\n\treturn nil\n}\n\nfunc (b *restorerBehavior) ProcessEvent(event cells.Event) error {\n\tswitch event.Topic() {\n\tcase \"grab!\":\n\t\tcents := b.cents\n\t\tb.cents = 0\n\t\tpayload, ok := cells.HasWaiterPayload(event)\n\t\tif ok {\n\t\t\tpayload.GetWaiter().Set(cents)\n\t\t}\n\tcase \"drop!\":\n\t\tb.cents += payloadCents(event)\n\t}\n\treturn nil\n}\n\nfunc (b *restorerBehavior) Recover(err interface{}) error {\n\treturn nil\n}\n\n\/\/ EOF\n<commit_msg>Began refactoring FSM tests<commit_after>\/\/ Tideland Go Cells - Behaviors - Unit Tests - Finite State Machine\n\/\/\n\/\/ Copyright (C) 2010-2017 Frank Mueller \/ Tideland \/ Oldenburg \/ Germany\n\/\/\n\/\/ All rights reserved. Use of this source code is governed\n\/\/ by the new BSD license.\n\npackage behaviors_test\n\n\/\/--------------------\n\/\/ IMPORTS\n\/\/--------------------\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/tideland\/golib\/audit\"\n\n\t\"github.com\/tideland\/gocells\/behaviors\"\n\t\"github.com\/tideland\/gocells\/cells\"\n)\n\n\/\/--------------------\n\/\/ TESTS\n\/\/--------------------\n\n\/\/ TestFSMBehavior tests the finite state machine behavior.\nfunc TestFSMBehavior(t *testing.T) {\n\tassert := audit.NewTestingAssertion(t, true)\n\tenv := cells.NewEnvironment(\"fsm-behavior\")\n\tdefer env.Stop()\n\n\tprocessor := func(accessor cells.EventSinkAccessor) error {\n\t\treturn nil\n\t}\n\n\tlockA := lockMachine{}\n\tlockB := lockMachine{}\n\n\tenv.StartCell(\"lock-a\", behaviors.NewFSMBehavior(lockA.Locked))\n\tenv.StartCell(\"lock-b\", behaviors.NewFSMBehavior(lockB.Locked))\n\tenv.StartCell(\"restorer\", newRestorerBehavior())\n\tenv.StartCell(\"collector\", behaviors.NewCollectorBehavior(10, processor))\n\n\tenv.Subscribe(\"lock-a\", \"restorer\", \"collector\")\n\tenv.Subscribe(\"lock-b\", \"restorer\", \"collector\")\n\n\t\/\/ 1st run: emit not enough and press button.\n\tenv.EmitNew(\"lock-a\", \"coin\", 20)\n\tenv.EmitNew(\"lock-a\", \"coin\", 20)\n\tenv.EmitNew(\"lock-a\", \"coin\", 20)\n\tenv.EmitNew(\"lock-a\", \"button-press\", nil)\n\tenv.EmitNew(\"lock-a\", \"check-cents\", nil)\n\tenv.EmitNew(\"restorer\", \"grab\", nil)\n\n\t\/\/ TODO 2017-06-07 Mue Add asserts.\n\n\t\/\/ 2nd run: unlock the lock and lock it again.\n\tenv.EmitNew(\"lock-a\", \"coin\", 50)\n\tenv.EmitNew(\"lock-a\", \"coin\", 20)\n\tenv.EmitNew(\"lock-a\", \"coin\", 50)\n\tenv.EmitNew(\"lock-a\", \"info\", nil)\n\tenv.EmitNew(\"lock-a\", \"button-press\", nil)\n\n\t\/\/ TODO 2017-06-07 Mue Add asserts.\n\n\t\/\/ 3rd run: put a screwdriwer in the lock.\n\tenv.EmitNew(\"lock-a\", \"screwdriver\", nil)\n\n\t\/\/ TODO 2017-06-07 Mue Add asserts.\n\n\t\/\/ 4th run: try an illegal action.\n\tenv.EmitNew(\"lock-b\", \"chewing-gum\", nil)\n\n\t\/\/ TODO 2017-06-07 Mue Add asserts.\n}\n\n\/\/--------------------\n\/\/ HELPERS\n\/\/--------------------\n\n\/\/ cents retrieves the cents out of the payload of an event.\nfunc payloadCents(event cells.Event) int {\n\treturn event.Payload().GetInt(cells.PayloadDefault, -1)\n}\n\n\/\/ lockMachine will be unlocked if enough money is inserted. After\n\/\/ that it can be locked again.\ntype lockMachine struct {\n\tcents int\n}\n\n\/\/ Locked represents the locked state receiving coins.\nfunc (m *lockMachine) Locked(cell cells.Cell, event cells.Event) (behaviors.FSMState, string, error) {\n\tswitch event.Topic() {\n\tcase \"check-cents\":\n\t\tcell.EmitNew(\"cents\", fmt.Sprintf(\"check-cents %s: %d\", cell.ID(), m.cents))\n\t\treturn m.Locked, \"locked\", nil\n\tcase \"info?\":\n\t\tinfo := fmt.Sprintf(\"state 'locked' with %d cents\", m.cents)\n\t\tpayload, ok := cells.HasWaiterPayload(event)\n\t\tif ok {\n\t\t\tpayload.GetWaiter().Set(info)\n\t\t}\n\t\treturn m.Locked, nil\n\tcase \"coin!\":\n\t\tcents := payloadCents(event)\n\t\tif cents < 1 {\n\t\t\treturn nil, fmt.Errorf(\"do not insert buttons\")\n\t\t}\n\t\tm.cents += cents\n\t\tif m.cents > 100 {\n\t\t\tm.cents -= 100\n\t\t\treturn m.Unlocked, nil\n\t\t}\n\t\treturn m.Locked, nil\n\tcase \"button-press!\":\n\t\tif m.cents > 0 {\n\t\t\tcell.Environment().EmitNew(event.Context(), \"restorer\", \"drop!\", m.cents)\n\t\t\tm.cents = 0\n\t\t}\n\t\treturn m.Locked, nil\n\tcase \"screwdriver!\":\n\t\t\/\/ Allow a screwdriver to bring the lock into an undefined state.\n\t\treturn nil, nil\n\t}\n\treturn m.Locked, fmt.Errorf(\"illegal topic in state 'locked': %s\", event.Topic())\n}\n\n\/\/ Unlocked represents the unlocked state receiving coins.\nfunc (m *lockMachine) Unlocked(cell cells.Cell, event cells.Event) (behaviors.FSMState, error) {\n\tswitch event.Topic() {\n\tcase \"cents?\":\n\t\tpayload, ok := cells.HasWaiterPayload(event)\n\t\tif ok {\n\t\t\tpayload.GetWaiter().Set(m.cents)\n\t\t}\n\t\treturn m.Unlocked, nil\n\tcase \"info?\":\n\t\tinfo := fmt.Sprintf(\"state 'unlocked' with %d cents\", m.cents)\n\t\tpayload, ok := cells.HasWaiterPayload(event)\n\t\tif ok {\n\t\t\tpayload.GetWaiter().Set(info)\n\t\t}\n\t\treturn m.Unlocked, nil\n\tcase \"coin!\":\n\t\tcents := payloadCents(event)\n\t\tcell.EmitNew(event.Context(), \"return\", cents)\n\t\treturn m.Unlocked, nil\n\tcase \"button-press!\":\n\t\tcell.Environment().EmitNew(event.Context(), \"restorer\", \"drop!\", m.cents)\n\t\tm.cents = 0\n\t\treturn m.Locked, nil\n\t}\n\treturn m.Unlocked, fmt.Errorf(\"illegal topic in state 'unlocked': %s\", event.Topic())\n}\n\ntype restorerBehavior struct {\n\tcell cells.Cell\n\tcents int\n}\n\nfunc newRestorerBehavior() cells.Behavior {\n\treturn &restorerBehavior{nil, 0}\n}\n\nfunc (b *restorerBehavior) Init(c cells.Cell) error {\n\tb.cell = c\n\treturn nil\n}\n\nfunc (b *restorerBehavior) Terminate() error {\n\treturn nil\n}\n\nfunc (b *restorerBehavior) ProcessEvent(event cells.Event) error {\n\tswitch event.Topic() {\n\tcase \"grab!\":\n\t\tcents := b.cents\n\t\tb.cents = 0\n\t\tpayload, ok := cells.HasWaiterPayload(event)\n\t\tif ok {\n\t\t\tpayload.GetWaiter().Set(cents)\n\t\t}\n\tcase \"drop!\":\n\t\tb.cents += payloadCents(event)\n\t}\n\treturn nil\n}\n\nfunc (b *restorerBehavior) Recover(err interface{}) error {\n\treturn nil\n}\n\n\/\/ EOF\n<|endoftext|>"} {"text":"<commit_before>package sourcegraph\n\nimport (\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/oauth2\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n)\n\ntype contextKey int\n\nconst (\n\tgrpcEndpointKey contextKey = iota\n\thttpEndpointKey\n\tcredentialsKey\n)\n\n\/\/ WithGRPCEndpoint returns a copy of parent whose clients (obtained\n\/\/ using FromContext) communicate with the given gRPC API endpoint\n\/\/ URL.\nfunc WithGRPCEndpoint(parent context.Context, url *url.URL) context.Context {\n\treturn context.WithValue(parent, grpcEndpointKey, url)\n}\n\n\/\/ GRPCEndpoint returns the context's gRPC endpoint URL that was\n\/\/ previously configured using WithGRPCEndpoint.\nfunc GRPCEndpoint(ctx context.Context) *url.URL {\n\turl, _ := ctx.Value(grpcEndpointKey).(*url.URL)\n\tif url == nil {\n\t\tpanic(\"no gRPC API endpoint URL set in context\")\n\t}\n\treturn url\n}\n\n\/\/ WithHTTPEndpoint returns a copy of parent whose clients (obtained\n\/\/ using FromContext) communicate with the given HTTP API endpoint\n\/\/ URL.\nfunc WithHTTPEndpoint(parent context.Context, url *url.URL) context.Context {\n\treturn context.WithValue(parent, httpEndpointKey, url)\n}\n\n\/\/ HTTPEndpoint returns the context's HTTP API endpoint URL that was\n\/\/ previously configured using WithHTTPEndpoint.\nfunc HTTPEndpoint(ctx context.Context) *url.URL {\n\turl, _ := ctx.Value(httpEndpointKey).(*url.URL)\n\tif url == nil {\n\t\tpanic(\"no HTTP API endpoint URL set in context\")\n\t}\n\treturn url\n}\n\n\/\/ Credentials authenticate gRPC and HTTP requests made by an API\n\/\/ client.\ntype Credentials interface {\n\toauth2.TokenSource\n}\n\n\/\/ WithCredentials returns a copy of the parent context that uses cred\n\/\/ as the credentials for future API clients constructed using this\n\/\/ context (with NewClientFromContext). It replaces (shadows) any\n\/\/ previously set credentials in the context.\nfunc WithCredentials(parent context.Context, cred Credentials) context.Context {\n\treturn context.WithValue(parent, credentialsKey, cred)\n}\n\n\/\/ CredentialsFromContext returns the credentials (if any) previously\n\/\/ set in the context by WithCredentials.\nfunc CredentialsFromContext(ctx context.Context) Credentials {\n\tcred, ok := ctx.Value(credentialsKey).(Credentials)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn cred\n}\n\nvar maxDialTimeout = 3 * time.Second\n\n\/\/ NewClientFromContext returns a Sourcegraph API client configured\n\/\/ using the context (e.g., authenticated using the context's\n\/\/ credentials).\nvar NewClientFromContext = func(ctx context.Context) *Client {\n\ttransport := keepAliveTransport\n\n\topts := []grpc.DialOption{\n\t\tgrpc.WithCodec(GRPCCodec),\n\t}\n\n\tgrpcEndpoint := GRPCEndpoint(ctx)\n\tif grpcEndpoint.Scheme == \"https\" {\n\t\topts = append(opts, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, \"\")))\n\t}\n\n\tcred := CredentialsFromContext(ctx)\n\n\t\/\/ oauth2.NewClient retrieves the underlying transport from\n\t\/\/ its passed-in context, so we need to create a dummy context\n\t\/\/ using that transport.\n\tctxWithTransport := context.WithValue(ctx, oauth2.HTTPClient, &http.Client{Transport: transport})\n\ttransport = oauth2.NewClient(ctxWithTransport, cred).Transport\n\n\t\/\/ Use contextCredentials instead of directly using the cred\n\t\/\/ so that we can use different credentials for the same\n\t\/\/ connection (in the pool).\n\topts = append(opts, grpc.WithPerRPCCredentials(contextCredentials{}))\n\n\t\/\/ Dial timeout is the lesser of the ctx deadline or\n\t\/\/ maxDialTimeout.\n\tvar timeout time.Duration\n\tif d, ok := ctx.Deadline(); ok && time.Now().Add(maxDialTimeout).After(d) {\n\t\ttimeout = d.Sub(time.Now())\n\t} else {\n\t\ttimeout = maxDialTimeout\n\t}\n\topts = append(opts, grpc.WithTimeout(timeout))\n\n\tconn, err := pooledGRPCDial(grpcEndpoint.Host, opts...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tc := NewClient(&http.Client{Transport: transport}, conn)\n\tc.BaseURL = HTTPEndpoint(ctx)\n\treturn c\n}\n\ntype contextCredentials struct{}\n\nfunc (contextCredentials) GetRequestMetadata(ctx context.Context) (map[string]string, error) {\n\tif cred := CredentialsFromContext(ctx); cred != nil {\n\t\ts := credentials.TokenSource{TokenSource: cred}\n\t\treturn s.GetRequestMetadata(ctx)\n\t}\n\treturn nil, nil\n}\n<commit_msg>allow passing client metadata<commit_after>package sourcegraph\n\nimport (\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/oauth2\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n)\n\ntype contextKey int\n\nconst (\n\tgrpcEndpointKey contextKey = iota\n\thttpEndpointKey\n\tcredentialsKey\n\tclientMetadataKey\n)\n\n\/\/ WithGRPCEndpoint returns a copy of parent whose clients (obtained\n\/\/ using FromContext) communicate with the given gRPC API endpoint\n\/\/ URL.\nfunc WithGRPCEndpoint(parent context.Context, url *url.URL) context.Context {\n\treturn context.WithValue(parent, grpcEndpointKey, url)\n}\n\n\/\/ GRPCEndpoint returns the context's gRPC endpoint URL that was\n\/\/ previously configured using WithGRPCEndpoint.\nfunc GRPCEndpoint(ctx context.Context) *url.URL {\n\turl, _ := ctx.Value(grpcEndpointKey).(*url.URL)\n\tif url == nil {\n\t\tpanic(\"no gRPC API endpoint URL set in context\")\n\t}\n\treturn url\n}\n\n\/\/ WithHTTPEndpoint returns a copy of parent whose clients (obtained\n\/\/ using FromContext) communicate with the given HTTP API endpoint\n\/\/ URL.\nfunc WithHTTPEndpoint(parent context.Context, url *url.URL) context.Context {\n\treturn context.WithValue(parent, httpEndpointKey, url)\n}\n\n\/\/ HTTPEndpoint returns the context's HTTP API endpoint URL that was\n\/\/ previously configured using WithHTTPEndpoint.\nfunc HTTPEndpoint(ctx context.Context) *url.URL {\n\turl, _ := ctx.Value(httpEndpointKey).(*url.URL)\n\tif url == nil {\n\t\tpanic(\"no HTTP API endpoint URL set in context\")\n\t}\n\treturn url\n}\n\n\/\/ Credentials authenticate gRPC and HTTP requests made by an API\n\/\/ client.\ntype Credentials interface {\n\toauth2.TokenSource\n}\n\n\/\/ WithCredentials returns a copy of the parent context that uses cred\n\/\/ as the credentials for future API clients constructed using this\n\/\/ context (with NewClientFromContext). It replaces (shadows) any\n\/\/ previously set credentials in the context.\n\/\/\n\/\/ It can be used to add, e.g., trace\/span ID metadata for request\n\/\/ tracing.\nfunc WithCredentials(parent context.Context, cred Credentials) context.Context {\n\treturn context.WithValue(parent, credentialsKey, cred)\n}\n\n\/\/ CredentialsFromContext returns the credentials (if any) previously\n\/\/ set in the context by WithCredentials.\nfunc CredentialsFromContext(ctx context.Context) Credentials {\n\tcred, ok := ctx.Value(credentialsKey).(Credentials)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn cred\n}\n\n\/\/ WithClientMetadata returns a copy of the parent context that merges\n\/\/ in the specified metadata to future API clients constructed using\n\/\/ this context (with NewClientFromContext). It replaces (shadows) any\n\/\/ previously set metadata in the context.\nfunc WithClientMetadata(parent context.Context, md map[string]string) context.Context {\n\treturn context.WithValue(parent, clientMetadataKey, md)\n}\n\n\/\/ clientMetadataFromContext returns the metadata (if any) previously\n\/\/ set in the context by WithClientMetadata.\nfunc clientMetadataFromContext(ctx context.Context) map[string]string {\n\tcred, ok := ctx.Value(clientMetadataKey).(map[string]string)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn cred\n}\n\nvar maxDialTimeout = 3 * time.Second\n\n\/\/ NewClientFromContext returns a Sourcegraph API client configured\n\/\/ using the context (e.g., authenticated using the context's\n\/\/ credentials).\nvar NewClientFromContext = func(ctx context.Context) *Client {\n\ttransport := keepAliveTransport\n\n\topts := []grpc.DialOption{\n\t\tgrpc.WithCodec(GRPCCodec),\n\t}\n\n\tgrpcEndpoint := GRPCEndpoint(ctx)\n\tif grpcEndpoint.Scheme == \"https\" {\n\t\topts = append(opts, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, \"\")))\n\t}\n\n\tcred := CredentialsFromContext(ctx)\n\n\t\/\/ oauth2.NewClient retrieves the underlying transport from\n\t\/\/ its passed-in context, so we need to create a dummy context\n\t\/\/ using that transport.\n\tctxWithTransport := context.WithValue(ctx, oauth2.HTTPClient, &http.Client{Transport: transport})\n\ttransport = oauth2.NewClient(ctxWithTransport, cred).Transport\n\n\t\/\/ Use contextCredentials instead of directly using the cred\n\t\/\/ so that we can use different credentials for the same\n\t\/\/ connection (in the pool).\n\topts = append(opts, grpc.WithPerRPCCredentials(contextCredentials{}))\n\n\t\/\/ Dial timeout is the lesser of the ctx deadline or\n\t\/\/ maxDialTimeout.\n\tvar timeout time.Duration\n\tif d, ok := ctx.Deadline(); ok && time.Now().Add(maxDialTimeout).After(d) {\n\t\ttimeout = d.Sub(time.Now())\n\t} else {\n\t\ttimeout = maxDialTimeout\n\t}\n\topts = append(opts, grpc.WithTimeout(timeout))\n\n\tconn, err := pooledGRPCDial(grpcEndpoint.Host, opts...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tc := NewClient(&http.Client{Transport: transport}, conn)\n\tc.BaseURL = HTTPEndpoint(ctx)\n\treturn c\n}\n\ntype contextCredentials struct{}\n\nfunc (contextCredentials) GetRequestMetadata(ctx context.Context) (map[string]string, error) {\n\tm := clientMetadataFromContext(ctx)\n\n\tif cred := CredentialsFromContext(ctx); cred != nil {\n\t\tcredMD, err := (credentials.TokenSource{TokenSource: cred}).GetRequestMetadata(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif m == nil {\n\t\t\tm = credMD\n\t\t} else {\n\t\t\tfor k, v := range credMD {\n\t\t\t\tm[k] = v\n\t\t\t}\n\t\t}\n\t}\n\treturn m, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ CMAC message authentication code, defined in\n\/\/ NIST Special Publication SP 800-38B.\n\npackage cmac\n\nimport (\n\t\"crypto\/cipher\"\n\t\"crypto\/subtle\"\n\t\"errors\"\n\t\"hash\"\n)\n\nconst (\n\t\/\/ minimal irreducible polynomial of degree b\n\tr64 = 0x1b\n\tr128 = 0x87\n)\n\ntype cmac struct {\n\tk1, k2, ci, digest []byte\n\tp int \/\/ position in ci\n\tc cipher.Block\n}\n\n\/\/ New returns a new instance of a CMAC message authentication code\n\/\/ digest using the given cipher.Block.\nfunc New(c cipher.Block) (hash.Hash, error) {\n\tvar r int\n\tn := c.BlockSize()\n\tswitch n {\n\tcase 64 \/ 8:\n\t\tr = r64\n\tcase 128 \/ 8:\n\t\tr = r128\n\tdefault:\n\t\treturn nil, errors.New(\"NewCMAC: invalid cipher block size\")\n\t}\n\n\td := new(cmac)\n\td.c = c\n\td.k1 = make([]byte, n)\n\td.k2 = make([]byte, n)\n\td.ci = make([]byte, n)\n\td.digest = make([]byte, n)\n\n\t\/\/ Subkey generation, p. 7\n\tc.Encrypt(d.k1, d.k1)\n\n\tv1 := shift1(d.k1, d.k1)\n\td.k1[n-1] ^= byte(subtle.ConstantTimeSelect(v1, r, 0))\n\n\tv2 := shift1(d.k1, d.k2)\n\td.k2[n-1] ^= byte(subtle.ConstantTimeSelect(v2, r, 0))\n\n\treturn d, nil\n}\n\n\/\/ Reset clears the digest state, starting a new digest.\nfunc (d *cmac) Reset() {\n\tfor i := range d.ci {\n\t\td.ci[i] = 0\n\t}\n\td.p = 0\n}\n\n\/\/ Write adds the given data to the digest state.\nfunc (d *cmac) Write(p []byte) (nn int, err error) {\n\tnn = len(p)\n\tbs := len(d.ci)\n\tleft := bs - d.p\n\n\tif len(p) > left {\n\t\txor(d.ci[d.p:], p[:left])\n\t\tp = p[left:]\n\t\td.c.Encrypt(d.ci, d.ci)\n\t\td.p = 0\n\t}\n\n\tfor len(p) > bs {\n\t\txor(d.ci, p[:bs])\n\t\tp = p[bs:]\n\t\td.c.Encrypt(d.ci, d.ci)\n\t}\n\n\tif len(p) > 0 {\n\t\txor(d.ci[d.p:], p)\n\t\td.p += len(p)\n\t}\n\treturn\n}\n\n\/\/ Sum returns the CMAC digest, one cipher block in length,\n\/\/ of the data written with Write.\nfunc (d *cmac) Sum(in []byte) []byte {\n\t\/\/ Finish last block, mix in key, encrypt.\n\t\/\/ Don't edit ci, in case caller wants\n\t\/\/ to keep digesting after call to Sum.\n\tk := d.k1\n\tif d.p < len(d.digest) {\n\t\tk = d.k2\n\t}\n\tfor i := 0; i < len(d.ci); i++ {\n\t\td.digest[i] = d.ci[i] ^ k[i]\n\t}\n\tif d.p < len(d.digest) {\n\t\td.digest[d.p] ^= 0x80\n\t}\n\td.c.Encrypt(d.digest, d.digest)\n\treturn append(in, d.digest...)\n}\n\nfunc (d *cmac) Size() int { return len(d.digest) }\n\nfunc (d *cmac) BlockSize() int { return d.c.BlockSize() }\n\nfunc shift1(src, dst []byte) int {\n\tvar b byte\n\tfor i := len(src) - 1; i >= 0; i-- {\n\t\tbb := src[i] >> 7\n\t\tdst[i] = src[i]<<1 | b\n\t\tb = bb\n\t}\n\treturn int(b)\n}\n\nfunc xor(a, b []byte) {\n\tfor i, v := range b {\n\t\ta[i] ^= v\n\t}\n}\n<commit_msg>Fix error message<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ CMAC message authentication code, defined in\n\/\/ NIST Special Publication SP 800-38B.\n\npackage cmac\n\nimport (\n\t\"crypto\/cipher\"\n\t\"crypto\/subtle\"\n\t\"errors\"\n\t\"hash\"\n)\n\nconst (\n\t\/\/ minimal irreducible polynomial of degree b\n\tr64 = 0x1b\n\tr128 = 0x87\n)\n\ntype cmac struct {\n\tk1, k2, ci, digest []byte\n\tp int \/\/ position in ci\n\tc cipher.Block\n}\n\n\/\/ New returns a new instance of a CMAC message authentication code\n\/\/ digest using the given cipher.Block.\nfunc New(c cipher.Block) (hash.Hash, error) {\n\tvar r int\n\tn := c.BlockSize()\n\tswitch n {\n\tcase 64 \/ 8:\n\t\tr = r64\n\tcase 128 \/ 8:\n\t\tr = r128\n\tdefault:\n\t\treturn nil, errors.New(\"cmac: invalid cipher block size\")\n\t}\n\n\td := new(cmac)\n\td.c = c\n\td.k1 = make([]byte, n)\n\td.k2 = make([]byte, n)\n\td.ci = make([]byte, n)\n\td.digest = make([]byte, n)\n\n\t\/\/ Subkey generation, p. 7\n\tc.Encrypt(d.k1, d.k1)\n\n\tv1 := shift1(d.k1, d.k1)\n\td.k1[n-1] ^= byte(subtle.ConstantTimeSelect(v1, r, 0))\n\n\tv2 := shift1(d.k1, d.k2)\n\td.k2[n-1] ^= byte(subtle.ConstantTimeSelect(v2, r, 0))\n\n\treturn d, nil\n}\n\n\/\/ Reset clears the digest state, starting a new digest.\nfunc (d *cmac) Reset() {\n\tfor i := range d.ci {\n\t\td.ci[i] = 0\n\t}\n\td.p = 0\n}\n\n\/\/ Write adds the given data to the digest state.\nfunc (d *cmac) Write(p []byte) (nn int, err error) {\n\tnn = len(p)\n\tbs := len(d.ci)\n\tleft := bs - d.p\n\n\tif len(p) > left {\n\t\txor(d.ci[d.p:], p[:left])\n\t\tp = p[left:]\n\t\td.c.Encrypt(d.ci, d.ci)\n\t\td.p = 0\n\t}\n\n\tfor len(p) > bs {\n\t\txor(d.ci, p[:bs])\n\t\tp = p[bs:]\n\t\td.c.Encrypt(d.ci, d.ci)\n\t}\n\n\tif len(p) > 0 {\n\t\txor(d.ci[d.p:], p)\n\t\td.p += len(p)\n\t}\n\treturn\n}\n\n\/\/ Sum returns the CMAC digest, one cipher block in length,\n\/\/ of the data written with Write.\nfunc (d *cmac) Sum(in []byte) []byte {\n\t\/\/ Finish last block, mix in key, encrypt.\n\t\/\/ Don't edit ci, in case caller wants\n\t\/\/ to keep digesting after call to Sum.\n\tk := d.k1\n\tif d.p < len(d.digest) {\n\t\tk = d.k2\n\t}\n\tfor i := 0; i < len(d.ci); i++ {\n\t\td.digest[i] = d.ci[i] ^ k[i]\n\t}\n\tif d.p < len(d.digest) {\n\t\td.digest[d.p] ^= 0x80\n\t}\n\td.c.Encrypt(d.digest, d.digest)\n\treturn append(in, d.digest...)\n}\n\nfunc (d *cmac) Size() int { return len(d.digest) }\n\nfunc (d *cmac) BlockSize() int { return d.c.BlockSize() }\n\nfunc shift1(src, dst []byte) int {\n\tvar b byte\n\tfor i := len(src) - 1; i >= 0; i-- {\n\t\tbb := src[i] >> 7\n\t\tdst[i] = src[i]<<1 | b\n\t\tb = bb\n\t}\n\treturn int(b)\n}\n\nfunc xor(a, b []byte) {\n\tfor i, v := range b {\n\t\ta[i] ^= v\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build 386 arm arm64 mips64 mips64le ppc64 ppc64le\n\npackage crc32\n\n\/\/ The file contains the generic version of updateCastagnoli which just calls\n\/\/ the software implementation.\n\nfunc updateCastagnoli(crc uint32, p []byte) uint32 {\n\treturn update(crc, castagnoliTable, p)\n}\n\nfunc updateIEEE(crc uint32, p []byte) uint32 {\n\t\/\/ only use slicing-by-8 when input is >= 4KB\n\tif len(p) >= 4096 {\n\t\tieeeTable8Once.Do(func() {\n\t\t\tieeeTable8 = makeTable8(IEEE)\n\t\t})\n\t\treturn updateSlicingBy8(crc, ieeeTable8, p)\n\t}\n\treturn update(crc, IEEETable, p)\n}\n<commit_msg>hash\/crc32: add sparc64 build tags<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build 386 arm arm64 mips64 mips64le ppc64 ppc64le sparc64\n\npackage crc32\n\n\/\/ The file contains the generic version of updateCastagnoli which just calls\n\/\/ the software implementation.\n\nfunc updateCastagnoli(crc uint32, p []byte) uint32 {\n\treturn update(crc, castagnoliTable, p)\n}\n\nfunc updateIEEE(crc uint32, p []byte) uint32 {\n\t\/\/ only use slicing-by-8 when input is >= 4KB\n\tif len(p) >= 4096 {\n\t\tieeeTable8Once.Do(func() {\n\t\t\tieeeTable8 = makeTable8(IEEE)\n\t\t})\n\t\treturn updateSlicingBy8(crc, ieeeTable8, p)\n\t}\n\treturn update(crc, IEEETable, p)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 ThoughtWorks, Inc.\n\n\/\/ This file is part of Gauge.\n\n\/\/ Gauge is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\n\/\/ Gauge is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with Gauge. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage util\n\nimport (\n\t\"github.com\/getgauge\/gauge\/config\"\n\t. \"gopkg.in\/check.v1\"\n\t\"path\/filepath\"\n)\n\nfunc (s *MySuite) TestPrefixingMessage(c *C) {\n\tprefixedLines := AddPrefixToEachLine(\"Hello\\nWorld\", \"[my-plugin Plugin] : \")\n\tc.Assert(prefixedLines, Equals, \"[my-plugin Plugin] : Hello\\n\"+\n\t\t\"[my-plugin Plugin] : World\")\n}\n\nfunc (s *MySuite) TestPrefixingMessageEndingWithNewLine(c *C) {\n\tprefixedLines := AddPrefixToEachLine(\"Hello\\nWorld\\n\", \"[my-plugin Plugin] : \")\n\tc.Assert(prefixedLines, Equals, \"[my-plugin Plugin] : Hello\\n\"+\n\t\t\"[my-plugin Plugin] : World\\n\")\n\n}\n\nfunc (s *MySuite) TestPrefixingMultiLineMessagWithNewLine(c *C) {\n\tprefixedLines := AddPrefixToEachLine(\"\\nHello\\nWorld\\n\\nFoo bar\\n\", \"[my-plugin Plugin] : \")\n\tc.Assert(prefixedLines, Equals, \"[my-plugin Plugin] : \\n\"+\n\t\t\"[my-plugin Plugin] : Hello\\n\"+\n\t\t\"[my-plugin Plugin] : World\\n\"+\n\t\t\"[my-plugin Plugin] : \\n\"+\n\t\t\"[my-plugin Plugin] : Foo bar\\n\")\n\n}\n\nfunc (s *MySuite) TestGetPathToFile(c *C) {\n\tvar path string\n\tconfig.ProjectRoot = \"PROJECT_ROOT\"\n\n\tpath = GetPathToFile(string(filepath.Separator) + \"resources\")\n\tc.Assert(path, Equals, string(filepath.Separator) + \"resources\")\n\n\tpath = GetPathToFile(\"resources\")\n\tc.Assert(path, Equals, filepath.Join(config.ProjectRoot,\"resources\"))\n}\n<commit_msg>fixing util tests<commit_after>\/\/ Copyright 2015 ThoughtWorks, Inc.\n\n\/\/ This file is part of Gauge.\n\n\/\/ Gauge is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\n\/\/ Gauge is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with Gauge. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage util\n\nimport (\n\t\"github.com\/getgauge\/gauge\/config\"\n\t. \"gopkg.in\/check.v1\"\n\t\"path\/filepath\"\n)\n\nfunc (s *MySuite) TestPrefixingMessage(c *C) {\n\tprefixedLines := AddPrefixToEachLine(\"Hello\\nWorld\", \"[my-plugin Plugin] : \")\n\tc.Assert(prefixedLines, Equals, \"[my-plugin Plugin] : Hello\\n\"+\n\t\t\"[my-plugin Plugin] : World\")\n}\n\nfunc (s *MySuite) TestPrefixingMessageEndingWithNewLine(c *C) {\n\tprefixedLines := AddPrefixToEachLine(\"Hello\\nWorld\\n\", \"[my-plugin Plugin] : \")\n\tc.Assert(prefixedLines, Equals, \"[my-plugin Plugin] : Hello\\n\"+\n\t\t\"[my-plugin Plugin] : World\\n\")\n\n}\n\nfunc (s *MySuite) TestPrefixingMultiLineMessagWithNewLine(c *C) {\n\tprefixedLines := AddPrefixToEachLine(\"\\nHello\\nWorld\\n\\nFoo bar\\n\", \"[my-plugin Plugin] : \")\n\tc.Assert(prefixedLines, Equals, \"[my-plugin Plugin] : \\n\"+\n\t\t\"[my-plugin Plugin] : Hello\\n\"+\n\t\t\"[my-plugin Plugin] : World\\n\"+\n\t\t\"[my-plugin Plugin] : \\n\"+\n\t\t\"[my-plugin Plugin] : Foo bar\\n\")\n\n}\n\nfunc (s *MySuite) TestGetPathToFile(c *C) {\n\tvar path string\n\tconfig.ProjectRoot = \"PROJECT_ROOT\"\n\tabsPath, _ := filepath.Abs(\"resources\")\n\tpath = GetPathToFile(absPath)\n\tc.Assert(path, Equals, absPath)\n\n\tpath = GetPathToFile(\"resources\")\n\tc.Assert(path, Equals, filepath.Join(config.ProjectRoot,\"resources\"))\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport (\n\t\"bytes\"\n\t\"math\/rand\"\n\t\"testing\"\n\n\tmh \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-multihash\"\n)\n\nfunc TestKey(t *testing.T) {\n\n\th1, err := mh.Sum([]byte(\"beep boop\"), mh.SHA2_256, -1)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tk1 := Key(h1)\n\th2 := mh.Multihash(k1)\n\tk2 := Key(h2)\n\n\tif !bytes.Equal(h1, h2) {\n\t\tt.Error(\"Multihashes not equal.\")\n\t}\n\n\tif k1 != k2 {\n\t\tt.Error(\"Keys not equal.\")\n\t}\n}\n\nfunc TestByteChanReader(t *testing.T) {\n\n\tvar data bytes.Buffer\n\tvar data2 bytes.Buffer\n\tdch := make(chan []byte, 8)\n\trandr := NewTimeSeededRand()\n\n\tgo func() {\n\t\tdefer close(dch)\n\t\tfor i := 0; i < rand.Intn(100)+100; i++ {\n\t\t\tchunk := make([]byte, rand.Intn(100000)+10)\n\t\t\trandr.Read(chunk)\n\t\t\tdata.Write(chunk)\n\t\t\t\/\/ fmt.Printf(\"chunk: %6.d %v\\n\", len(chunk), chunk[:10])\n\t\t\tdch <- chunk\n\t\t}\n\t}()\n\n\tread := NewByteChanReader(dch)\n\n\t\/\/ read in random, weird sizes to exercise saving buffer.\n\tfor {\n\t\tbuf := make([]byte, rand.Intn(10)*10)\n\t\tn, err := read.Read(buf)\n\t\tdata2.Write(buf[:n])\n\t\t\/\/ fmt.Printf(\"read: %6.d\\n\", n)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ fmt.Printf(\"lens: %d == %d\\n\", len(out), len(data.Bytes()))\n\tif !bytes.Equal(data2.Bytes(), data.Bytes()) {\n\t\tt.Fatal(\"Reader failed to stream correct bytes\")\n\t}\n}\n\nfunc TestXOR(t *testing.T) {\n\tcases := [][3][]byte{\n\t\t[3][]byte{\n\t\t\t[]byte{0xFF, 0xFF, 0xFF},\n\t\t\t[]byte{0xFF, 0xFF, 0xFF},\n\t\t\t[]byte{0x00, 0x00, 0x00},\n\t\t},\n\t\t[3][]byte{\n\t\t\t[]byte{0x00, 0xFF, 0x00},\n\t\t\t[]byte{0xFF, 0xFF, 0xFF},\n\t\t\t[]byte{0xFF, 0x00, 0xFF},\n\t\t},\n\t\t[3][]byte{\n\t\t\t[]byte{0x55, 0x55, 0x55},\n\t\t\t[]byte{0x55, 0xFF, 0xAA},\n\t\t\t[]byte{0x00, 0xAA, 0xFF},\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tr := XOR(c[0], c[1])\n\t\tif !bytes.Equal(r, c[2]) {\n\t\t\tt.Error(\"XOR failed\")\n\t\t}\n\t}\n}\n<commit_msg>remove comment<commit_after>package util\n\nimport (\n\t\"bytes\"\n\t\"math\/rand\"\n\t\"testing\"\n\n\tmh \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-multihash\"\n)\n\nfunc TestKey(t *testing.T) {\n\n\th1, err := mh.Sum([]byte(\"beep boop\"), mh.SHA2_256, -1)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tk1 := Key(h1)\n\th2 := mh.Multihash(k1)\n\tk2 := Key(h2)\n\n\tif !bytes.Equal(h1, h2) {\n\t\tt.Error(\"Multihashes not equal.\")\n\t}\n\n\tif k1 != k2 {\n\t\tt.Error(\"Keys not equal.\")\n\t}\n}\n\nfunc TestByteChanReader(t *testing.T) {\n\n\tvar data bytes.Buffer\n\tvar data2 bytes.Buffer\n\tdch := make(chan []byte, 8)\n\trandr := NewTimeSeededRand()\n\n\tgo func() {\n\t\tdefer close(dch)\n\t\tfor i := 0; i < rand.Intn(100)+100; i++ {\n\t\t\tchunk := make([]byte, rand.Intn(100000)+10)\n\t\t\trandr.Read(chunk)\n\t\t\tdata.Write(chunk)\n\t\t\tdch <- chunk\n\t\t}\n\t}()\n\n\tread := NewByteChanReader(dch)\n\n\t\/\/ read in random, weird sizes to exercise saving buffer.\n\tfor {\n\t\tbuf := make([]byte, rand.Intn(10)*10)\n\t\tn, err := read.Read(buf)\n\t\tdata2.Write(buf[:n])\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !bytes.Equal(data2.Bytes(), data.Bytes()) {\n\t\tt.Fatal(\"Reader failed to stream correct bytes\")\n\t}\n}\n\nfunc TestXOR(t *testing.T) {\n\tcases := [][3][]byte{\n\t\t[3][]byte{\n\t\t\t[]byte{0xFF, 0xFF, 0xFF},\n\t\t\t[]byte{0xFF, 0xFF, 0xFF},\n\t\t\t[]byte{0x00, 0x00, 0x00},\n\t\t},\n\t\t[3][]byte{\n\t\t\t[]byte{0x00, 0xFF, 0x00},\n\t\t\t[]byte{0xFF, 0xFF, 0xFF},\n\t\t\t[]byte{0xFF, 0x00, 0xFF},\n\t\t},\n\t\t[3][]byte{\n\t\t\t[]byte{0x55, 0x55, 0x55},\n\t\t\t[]byte{0x55, 0xFF, 0xAA},\n\t\t\t[]byte{0x00, 0xAA, 0xFF},\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tr := XOR(c[0], c[1])\n\t\tif !bytes.Equal(r, c[2]) {\n\t\t\tt.Error(\"XOR failed\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package neptulon\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/neptulon\/cmap\"\n\t\"github.com\/neptulon\/shortid\"\n\n\t\"golang.org\/x\/net\/websocket\"\n)\n\n\/\/ Conn is a client connection.\ntype Conn struct {\n\tID string \/\/ Randomly generated unique client connection ID.\n\tSession *cmap.CMap \/\/ Thread-safe data store for storing arbitrary data for this connection session.\n\tmiddleware []func(ctx *ReqCtx) error\n\tresRoutes *cmap.CMap \/\/ message ID (string) -> handler func(ctx *ResCtx) error : expected responses for requests that we've sent\n\tws *websocket.Conn\n\twg sync.WaitGroup\n\tdeadline time.Duration\n\tisClientConn bool\n\tconnected atomic.Value\n}\n\n\/\/ NewConn creates a new Conn object.\nfunc NewConn() (*Conn, error) {\n\tid, err := shortid.UUID()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Conn{\n\t\tID: id,\n\t\tSession: cmap.New(),\n\t\tresRoutes: cmap.New(),\n\t\tdeadline: time.Second * time.Duration(300),\n\t}, nil\n}\n\n\/\/ SetDeadline set the read\/write deadlines for the connection, in seconds.\n\/\/ Default value for read\/write deadline is 300 seconds.\nfunc (c *Conn) SetDeadline(seconds int) {\n\tc.deadline = time.Second * time.Duration(seconds)\n}\n\n\/\/ Middleware registers middleware to handle incoming request messages.\nfunc (c *Conn) Middleware(middleware ...Middleware) {\n\tfor _, m := range middleware {\n\t\tc.MiddlewareFunc(m.Middleware)\n\t}\n}\n\n\/\/ MiddlewareFunc registers middleware function to handle incoming request messages.\nfunc (c *Conn) MiddlewareFunc(middleware ...func(ctx *ReqCtx) error) {\n\tc.middleware = append(c.middleware, middleware...)\n}\n\n\/\/ Connect connects to the given WebSocket server.\n\/\/ addr should be formatted as ws:\/\/host:port -or- wss:\/\/host:port (i.e. ws:\/\/127.0.0.1:3000 -or- wss:\/\/localhost:3000)\nfunc (c *Conn) Connect(addr string) error {\n\tws, err := websocket.Dial(addr, \"\", \"http:\/\/localhost\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := c.setConn(ws); err != nil {\n\t\treturn err\n\t}\n\n\tc.isClientConn = true\n\tc.wg.Add(1)\n\tgo func() {\n\t\tdefer recoverAndLog(c, &c.wg)\n\t\tc.startReceive()\n\t}()\n\ttime.Sleep(time.Millisecond) \/\/ give receive goroutine a few cycles to start\n\treturn nil\n}\n\n\/\/ RemoteAddr returns the remote network address.\nfunc (c *Conn) RemoteAddr() net.Addr {\n\tif c.ws == nil {\n\t\treturn nil\n\t}\n\n\treturn c.ws.RemoteAddr()\n}\n\n\/\/ SendRequest sends a JSON-RPC request through the connection with an auto generated request ID.\n\/\/ resHandler is called when a response is returned.\nfunc (c *Conn) SendRequest(method string, params interface{}, resHandler func(res *ResCtx) error) (reqID string, err error) {\n\tid, err := shortid.UUID()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treq := request{ID: id, Method: method, Params: params}\n\tif err = c.send(req); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tc.resRoutes.Set(req.ID, resHandler)\n\treturn id, nil\n}\n\n\/\/ SendRequestArr sends a JSON-RPC request through the connection, with array params and auto generated request ID.\n\/\/ resHandler is called when a response is returned.\nfunc (c *Conn) SendRequestArr(method string, resHandler func(res *ResCtx) error, params ...interface{}) (reqID string, err error) {\n\treturn c.SendRequest(method, params, resHandler)\n}\n\n\/\/ Close closes the connection.\nfunc (c *Conn) Close() error {\n\tc.connected.Store(false)\n\tif c.ws != nil {\n\t\tc.ws.Close()\n\t}\n\treturn nil\n}\n\n\/\/ Wait waits for all message\/connection handler goroutines to exit.\nfunc (c *Conn) Wait() {\n\tc.wg.Wait()\n}\n\n\/\/ SendResponse sends a JSON-RPC response message through the connection.\nfunc (c *Conn) sendResponse(id string, result interface{}, err *ResError) error {\n\treturn c.send(response{ID: id, Result: result, Error: err})\n}\n\n\/\/ Send sends the given message through the connection.\nfunc (c *Conn) send(msg interface{}) error {\n\tif !c.connected.Load().(bool) {\n\t\treturn errors.New(\"use of closed connection\")\n\t}\n\n\treturn websocket.JSON.Send(c.ws, msg)\n}\n\n\/\/ Receive receives message from the connection.\nfunc (c *Conn) receive(msg *message) error {\n\tif !c.connected.Load().(bool) {\n\t\treturn errors.New(\"use of closed connection\")\n\t}\n\n\treturn websocket.JSON.Receive(c.ws, &msg)\n}\n\n\/\/ Reuse an established websocket.Conn.\nfunc (c *Conn) setConn(ws *websocket.Conn) error {\n\tc.ws = ws\n\tc.connected.Store(true)\n\tif err := ws.SetDeadline(time.Now().Add(c.deadline)); err != nil {\n\t\treturn fmt.Errorf(\"conn: error while setting websocket connection deadline: %v\", err)\n\t}\n\treturn nil\n}\n\n\/\/ startReceive starts receiving messages. This method blocks and does not return until the connection is closed.\nfunc (c *Conn) startReceive() {\n\tdefer c.Close()\n\n\tfor {\n\t\tvar m message\n\t\terr := c.receive(&m)\n\t\tif err != nil {\n\t\t\t\/\/ if we closed the connection\n\t\t\tif !c.connected.Load().(bool) {\n\t\t\t\tlog.Printf(\"conn: closed %v: %v\", c.ID, c.RemoteAddr())\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t\/\/ if peer closed the connection\n\t\t\tif err == io.EOF {\n\t\t\t\tlog.Printf(\"conn: peer disconnected %v: %v\", c.ID, c.RemoteAddr())\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tlog.Printf(\"conn: error while receiving message: %v\", err)\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ if the message is a request\n\t\tif m.Method != \"\" {\n\t\t\tc.wg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer recoverAndLog(c, &c.wg)\n\t\t\t\tif err := newReqCtx(c, m.ID, m.Method, m.Params, c.middleware).Next(); err != nil {\n\t\t\t\t\tlog.Printf(\"conn: error while handling request: %v\", err)\n\t\t\t\t\tc.Close()\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ if the message is not a JSON-RPC message\n\t\tif m.ID == \"\" || (m.Result == nil && m.Error == nil) {\n\t\t\tlog.Printf(\"conn: received an unknown message %v: %v, %v\", c.ID, c.RemoteAddr(), m)\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ if the message is a response\n\t\tif resHandler, ok := c.resRoutes.GetOk(m.ID); ok {\n\t\t\tc.wg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer recoverAndLog(c, &c.wg)\n\t\t\t\terr := resHandler.(func(ctx *ResCtx) error)(newResCtx(c, m.ID, m.Result, m.Error))\n\t\t\t\tc.resRoutes.Delete(m.ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"conn: error while handling response: %v\", err)\n\t\t\t\t\tc.Close()\n\t\t\t\t}\n\t\t\t}()\n\t\t} else {\n\t\t\tlog.Printf(\"conn: error while handling response: got response to a request with unknown ID: %v\", m.ID)\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc recoverAndLog(c *Conn, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tif err := recover(); err != nil {\n\t\tconst size = 64 << 10\n\t\tbuf := make([]byte, size)\n\t\tbuf = buf[:runtime.Stack(buf, false)]\n\t\tlog.Printf(\"conn: panic handling response %v: %v\\n%s\", c.RemoteAddr(), err, buf)\n\t}\n}\n<commit_msg>add Conn.DisconnHandler<commit_after>package neptulon\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/neptulon\/cmap\"\n\t\"github.com\/neptulon\/shortid\"\n\n\t\"golang.org\/x\/net\/websocket\"\n)\n\n\/\/ Conn is a client connection.\ntype Conn struct {\n\tID string \/\/ Randomly generated unique client connection ID.\n\tSession *cmap.CMap \/\/ Thread-safe data store for storing arbitrary data for this connection session.\n\tmiddleware []func(ctx *ReqCtx) error\n\tresRoutes *cmap.CMap \/\/ message ID (string) -> handler func(ctx *ResCtx) error : expected responses for requests that we've sent\n\tws *websocket.Conn\n\twg sync.WaitGroup\n\tdeadline time.Duration\n\tisClientConn bool\n\tconnected atomic.Value\n\tdisconnHandler func(c *Conn)\n}\n\n\/\/ NewConn creates a new Conn object.\nfunc NewConn() (*Conn, error) {\n\tid, err := shortid.UUID()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Conn{\n\t\tID: id,\n\t\tSession: cmap.New(),\n\t\tresRoutes: cmap.New(),\n\t\tdeadline: time.Second * time.Duration(300),\n\t\tdisconnHandler: func(c *Conn) {},\n\t}, nil\n}\n\n\/\/ SetDeadline set the read\/write deadlines for the connection, in seconds.\n\/\/ Default value for read\/write deadline is 300 seconds.\nfunc (c *Conn) SetDeadline(seconds int) {\n\tc.deadline = time.Second * time.Duration(seconds)\n}\n\n\/\/ Middleware registers middleware to handle incoming request messages.\nfunc (c *Conn) Middleware(middleware ...Middleware) {\n\tfor _, m := range middleware {\n\t\tc.MiddlewareFunc(m.Middleware)\n\t}\n}\n\n\/\/ MiddlewareFunc registers middleware function to handle incoming request messages.\nfunc (c *Conn) MiddlewareFunc(middleware ...func(ctx *ReqCtx) error) {\n\tc.middleware = append(c.middleware, middleware...)\n}\n\n\/\/ DisconnHandler registers a function to handle disconnection event.\nfunc (c *Conn) DisconnHandler(handler func(c *Conn)) {\n\tc.disconnHandler = handler\n}\n\n\/\/ Connect connects to the given WebSocket server.\n\/\/ addr should be formatted as ws:\/\/host:port -or- wss:\/\/host:port (i.e. ws:\/\/127.0.0.1:3000 -or- wss:\/\/localhost:3000)\nfunc (c *Conn) Connect(addr string) error {\n\tws, err := websocket.Dial(addr, \"\", \"http:\/\/localhost\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := c.setConn(ws); err != nil {\n\t\treturn err\n\t}\n\n\tc.isClientConn = true\n\tc.wg.Add(1)\n\tgo func() {\n\t\tdefer recoverAndLog(c, &c.wg)\n\t\tc.startReceive()\n\t}()\n\ttime.Sleep(time.Millisecond) \/\/ give receive goroutine a few cycles to start\n\treturn nil\n}\n\n\/\/ RemoteAddr returns the remote network address.\nfunc (c *Conn) RemoteAddr() net.Addr {\n\tif c.ws == nil {\n\t\treturn nil\n\t}\n\n\treturn c.ws.RemoteAddr()\n}\n\n\/\/ SendRequest sends a JSON-RPC request through the connection with an auto generated request ID.\n\/\/ resHandler is called when a response is returned.\nfunc (c *Conn) SendRequest(method string, params interface{}, resHandler func(res *ResCtx) error) (reqID string, err error) {\n\tid, err := shortid.UUID()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treq := request{ID: id, Method: method, Params: params}\n\tif err = c.send(req); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tc.resRoutes.Set(req.ID, resHandler)\n\treturn id, nil\n}\n\n\/\/ SendRequestArr sends a JSON-RPC request through the connection, with array params and auto generated request ID.\n\/\/ resHandler is called when a response is returned.\nfunc (c *Conn) SendRequestArr(method string, resHandler func(res *ResCtx) error, params ...interface{}) (reqID string, err error) {\n\treturn c.SendRequest(method, params, resHandler)\n}\n\n\/\/ Close closes the connection.\nfunc (c *Conn) Close() error {\n\tc.connected.Store(false)\n\tif c.ws != nil {\n\t\tc.ws.Close()\n\t}\n\treturn nil\n}\n\n\/\/ Wait waits for all message\/connection handler goroutines to exit.\nfunc (c *Conn) Wait() {\n\tc.wg.Wait()\n}\n\n\/\/ SendResponse sends a JSON-RPC response message through the connection.\nfunc (c *Conn) sendResponse(id string, result interface{}, err *ResError) error {\n\treturn c.send(response{ID: id, Result: result, Error: err})\n}\n\n\/\/ Send sends the given message through the connection.\nfunc (c *Conn) send(msg interface{}) error {\n\tif !c.connected.Load().(bool) {\n\t\treturn errors.New(\"use of closed connection\")\n\t}\n\n\treturn websocket.JSON.Send(c.ws, msg)\n}\n\n\/\/ Receive receives message from the connection.\nfunc (c *Conn) receive(msg *message) error {\n\tif !c.connected.Load().(bool) {\n\t\treturn errors.New(\"use of closed connection\")\n\t}\n\n\treturn websocket.JSON.Receive(c.ws, &msg)\n}\n\n\/\/ Reuse an established websocket.Conn.\nfunc (c *Conn) setConn(ws *websocket.Conn) error {\n\tc.ws = ws\n\tc.connected.Store(true)\n\tif err := ws.SetDeadline(time.Now().Add(c.deadline)); err != nil {\n\t\treturn fmt.Errorf(\"conn: error while setting websocket connection deadline: %v\", err)\n\t}\n\treturn nil\n}\n\n\/\/ startReceive starts receiving messages. This method blocks and does not return until the connection is closed.\nfunc (c *Conn) startReceive() {\n\tdefer c.disconnHandler(c)\n\tdefer c.Close()\n\n\tfor {\n\t\tvar m message\n\t\terr := c.receive(&m)\n\t\tif err != nil {\n\t\t\t\/\/ if we closed the connection\n\t\t\tif !c.connected.Load().(bool) {\n\t\t\t\tlog.Printf(\"conn: closed %v: %v\", c.ID, c.RemoteAddr())\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t\/\/ if peer closed the connection\n\t\t\tif err == io.EOF {\n\t\t\t\tlog.Printf(\"conn: peer disconnected %v: %v\", c.ID, c.RemoteAddr())\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tlog.Printf(\"conn: error while receiving message: %v\", err)\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ if the message is a request\n\t\tif m.Method != \"\" {\n\t\t\tc.wg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer recoverAndLog(c, &c.wg)\n\t\t\t\tif err := newReqCtx(c, m.ID, m.Method, m.Params, c.middleware).Next(); err != nil {\n\t\t\t\t\tlog.Printf(\"conn: error while handling request: %v\", err)\n\t\t\t\t\tc.Close()\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ if the message is not a JSON-RPC message\n\t\tif m.ID == \"\" || (m.Result == nil && m.Error == nil) {\n\t\t\tlog.Printf(\"conn: received an unknown message %v: %v, %v\", c.ID, c.RemoteAddr(), m)\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ if the message is a response\n\t\tif resHandler, ok := c.resRoutes.GetOk(m.ID); ok {\n\t\t\tc.wg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer recoverAndLog(c, &c.wg)\n\t\t\t\terr := resHandler.(func(ctx *ResCtx) error)(newResCtx(c, m.ID, m.Result, m.Error))\n\t\t\t\tc.resRoutes.Delete(m.ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"conn: error while handling response: %v\", err)\n\t\t\t\t\tc.Close()\n\t\t\t\t}\n\t\t\t}()\n\t\t} else {\n\t\t\tlog.Printf(\"conn: error while handling response: got response to a request with unknown ID: %v\", m.ID)\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc recoverAndLog(c *Conn, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tif err := recover(); err != nil {\n\t\tconst size = 64 << 10\n\t\tbuf := make([]byte, size)\n\t\tbuf = buf[:runtime.Stack(buf, false)]\n\t\tlog.Printf(\"conn: panic handling response %v: %v\\n%s\", c.RemoteAddr(), err, buf)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gosocks5\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype Config struct {\n\tMethods []uint8\n\tSelectMethod func(methods ...uint8) uint8\n\tMethodSelected func(method uint8, conn net.Conn) (net.Conn, error)\n}\n\nfunc defaultConfig() *Config {\n\treturn &Config{}\n}\n\ntype Conn struct {\n\tc net.Conn\n\tconfig *Config\n\tmethod uint8\n\tisClient bool\n\thandshaked bool\n\thandshakeMutex sync.Mutex\n\thandshakeErr error\n}\n\nfunc ClientConn(conn net.Conn, config *Config) *Conn {\n\treturn &Conn{\n\t\tc: conn,\n\t\tconfig: config,\n\t\tisClient: true,\n\t}\n}\n\nfunc ServerConn(conn net.Conn, config *Config) *Conn {\n\treturn &Conn{\n\t\tc: conn,\n\t\tconfig: config,\n\t}\n}\n\nfunc (conn *Conn) Handleshake() error {\n\tconn.handshakeMutex.Lock()\n\tdefer conn.handshakeMutex.Unlock()\n\n\tif err := conn.handshakeErr; err != nil {\n\t\treturn err\n\t}\n\tif conn.handshaked {\n\t\treturn nil\n\t}\n\n\tif conn.isClient {\n\t\tconn.handshakeErr = conn.clientHandshake()\n\t} else {\n\t\tconn.handshakeErr = conn.serverHandshake()\n\t}\n\n\treturn conn.handshakeErr\n}\n\nfunc (conn *Conn) clientHandshake() error {\n\tif conn.config == nil {\n\t\tconn.config = defaultConfig()\n\t}\n\n\tnm := len(conn.config.Methods)\n\tif nm == 0 {\n\t\tnm = 1\n\t}\n\n\tb := make([]byte, 2+nm)\n\tb[0] = Ver5\n\tb[1] = uint8(nm)\n\tcopy(b[2:], conn.config.Methods)\n\n\tif _, err := conn.c.Write(b); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := io.ReadFull(conn.c, b[:2]); err != nil {\n\t\treturn err\n\t}\n\n\tif b[0] != Ver5 {\n\t\treturn ErrBadVersion\n\t}\n\n\tif conn.config.MethodSelected != nil {\n\t\tc, err := conn.config.MethodSelected(b[1], conn.c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconn.c = c\n\t}\n\tconn.method = b[1]\n\tlog.Println(\"method:\", conn.method)\n\tconn.handshaked = true\n\treturn nil\n}\n\nfunc (conn *Conn) serverHandshake() error {\n\tif conn.config == nil {\n\t\tconn.config = defaultConfig()\n\t}\n\n\tmethods, err := ReadMethods(conn.c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmethod := MethodNoAuth\n\tif conn.config.SelectMethod != nil {\n\t\tmethod = conn.config.SelectMethod(methods...)\n\t}\n\n\tif _, err := conn.c.Write([]byte{Ver5, method}); err != nil {\n\t\treturn err\n\t}\n\n\tif conn.config.MethodSelected != nil {\n\t\tc, err := conn.config.MethodSelected(method, conn.c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconn.c = c\n\t}\n\tconn.method = method\n\tlog.Println(\"method:\", method)\n\tconn.handshaked = true\n\treturn nil\n}\n\nfunc (conn *Conn) Read(b []byte) (n int, err error) {\n\tif err = conn.Handleshake(); err != nil {\n\t\treturn\n\t}\n\treturn conn.c.Read(b)\n}\n\nfunc (conn *Conn) Write(b []byte) (n int, err error) {\n\tif err = conn.Handleshake(); err != nil {\n\t\treturn\n\t}\n\treturn conn.c.Write(b)\n}\n\nfunc (conn *Conn) Close() error {\n\treturn conn.c.Close()\n}\n\nfunc (conn *Conn) LocalAddr() net.Addr {\n\treturn conn.c.LocalAddr()\n}\n\nfunc (conn *Conn) RemoteAddr() net.Addr {\n\treturn conn.c.RemoteAddr()\n}\n\nfunc (conn *Conn) SetDeadline(t time.Time) error {\n\treturn conn.c.SetDeadline(t)\n}\n\nfunc (conn *Conn) SetReadDeadline(t time.Time) error {\n\treturn conn.c.SetReadDeadline(t)\n}\n\nfunc (conn *Conn) SetWriteDeadline(t time.Time) error {\n\treturn conn.c.SetWriteDeadline(t)\n}\n<commit_msg>rm print<commit_after>package gosocks5\n\nimport (\n\t\"io\"\n\t\/\/\"log\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype Config struct {\n\tMethods []uint8\n\tSelectMethod func(methods ...uint8) uint8\n\tMethodSelected func(method uint8, conn net.Conn) (net.Conn, error)\n}\n\nfunc defaultConfig() *Config {\n\treturn &Config{}\n}\n\ntype Conn struct {\n\tc net.Conn\n\tconfig *Config\n\tmethod uint8\n\tisClient bool\n\thandshaked bool\n\thandshakeMutex sync.Mutex\n\thandshakeErr error\n}\n\nfunc ClientConn(conn net.Conn, config *Config) *Conn {\n\treturn &Conn{\n\t\tc: conn,\n\t\tconfig: config,\n\t\tisClient: true,\n\t}\n}\n\nfunc ServerConn(conn net.Conn, config *Config) *Conn {\n\treturn &Conn{\n\t\tc: conn,\n\t\tconfig: config,\n\t}\n}\n\nfunc (conn *Conn) Handleshake() error {\n\tconn.handshakeMutex.Lock()\n\tdefer conn.handshakeMutex.Unlock()\n\n\tif err := conn.handshakeErr; err != nil {\n\t\treturn err\n\t}\n\tif conn.handshaked {\n\t\treturn nil\n\t}\n\n\tif conn.isClient {\n\t\tconn.handshakeErr = conn.clientHandshake()\n\t} else {\n\t\tconn.handshakeErr = conn.serverHandshake()\n\t}\n\n\treturn conn.handshakeErr\n}\n\nfunc (conn *Conn) clientHandshake() error {\n\tif conn.config == nil {\n\t\tconn.config = defaultConfig()\n\t}\n\n\tnm := len(conn.config.Methods)\n\tif nm == 0 {\n\t\tnm = 1\n\t}\n\n\tb := make([]byte, 2+nm)\n\tb[0] = Ver5\n\tb[1] = uint8(nm)\n\tcopy(b[2:], conn.config.Methods)\n\n\tif _, err := conn.c.Write(b); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := io.ReadFull(conn.c, b[:2]); err != nil {\n\t\treturn err\n\t}\n\n\tif b[0] != Ver5 {\n\t\treturn ErrBadVersion\n\t}\n\n\tif conn.config.MethodSelected != nil {\n\t\tc, err := conn.config.MethodSelected(b[1], conn.c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconn.c = c\n\t}\n\tconn.method = b[1]\n\t\/\/log.Println(\"method:\", conn.method)\n\tconn.handshaked = true\n\treturn nil\n}\n\nfunc (conn *Conn) serverHandshake() error {\n\tif conn.config == nil {\n\t\tconn.config = defaultConfig()\n\t}\n\n\tmethods, err := ReadMethods(conn.c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmethod := MethodNoAuth\n\tif conn.config.SelectMethod != nil {\n\t\tmethod = conn.config.SelectMethod(methods...)\n\t}\n\n\tif _, err := conn.c.Write([]byte{Ver5, method}); err != nil {\n\t\treturn err\n\t}\n\n\tif conn.config.MethodSelected != nil {\n\t\tc, err := conn.config.MethodSelected(method, conn.c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconn.c = c\n\t}\n\tconn.method = method\n\t\/\/log.Println(\"method:\", method)\n\tconn.handshaked = true\n\treturn nil\n}\n\nfunc (conn *Conn) Read(b []byte) (n int, err error) {\n\tif err = conn.Handleshake(); err != nil {\n\t\treturn\n\t}\n\treturn conn.c.Read(b)\n}\n\nfunc (conn *Conn) Write(b []byte) (n int, err error) {\n\tif err = conn.Handleshake(); err != nil {\n\t\treturn\n\t}\n\treturn conn.c.Write(b)\n}\n\nfunc (conn *Conn) Close() error {\n\treturn conn.c.Close()\n}\n\nfunc (conn *Conn) LocalAddr() net.Addr {\n\treturn conn.c.LocalAddr()\n}\n\nfunc (conn *Conn) RemoteAddr() net.Addr {\n\treturn conn.c.RemoteAddr()\n}\n\nfunc (conn *Conn) SetDeadline(t time.Time) error {\n\treturn conn.c.SetDeadline(t)\n}\n\nfunc (conn *Conn) SetReadDeadline(t time.Time) error {\n\treturn conn.c.SetReadDeadline(t)\n}\n\nfunc (conn *Conn) SetWriteDeadline(t time.Time) error {\n\treturn conn.c.SetWriteDeadline(t)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/textproto\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype connType uint32\n\nconst (\n\tconnWhisperConn = iota\n\tconnReadConn\n\tconnSendConn\n\tconnDelete\n)\n\ntype connection struct {\n\tsync.Mutex\n\tconn net.Conn\n\tactive bool\n\tanon bool\n\tjoins []string\n\tmsgCount int\n\tlastUse time.Time\n\talive bool\n\tconntype connType\n\tbot *bot\n\tname string\n}\n\nfunc newConnection(t connType) *connection {\n\treturn &connection{\n\t\tjoins: make([]string, 0),\n\t\tconntype: t,\n\t\tlastUse: time.Now(),\n\t\tname: randomHash(),\n\t}\n}\n\nfunc (conn *connection) login(pass string, nick string) {\n\tconn.anon = pass == \"\"\n\tif !conn.anon {\n\t\tconn.send(\"PASS \" + pass)\n\t\tconn.send(\"NICK \" + nick)\n\t\treturn\n\t}\n\tconn.send(\"NICK justinfan123\")\n}\n\nfunc (conn *connection) close() {\n\tif conn.conn != nil {\n\t\tconn.conn.Close()\n\t}\n\tfor _, channel := range conn.joins {\n\t\tconn.part(channel)\n\t}\n\tconn.alive = false\n}\n\nfunc (conn *connection) part(channel string) {\n\tchannel = strings.ToLower(channel)\n\tfor i, ch := range conn.joins {\n\t\tif ch == channel {\n\t\t\tconn.joins = append(conn.joins[:i], conn.joins[i+1:]...)\n\t\t}\n\t}\n}\n\nfunc (conn *connection) restore() {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tLog.Error(\"cannot restore connection\")\n\t\t}\n\t}()\n\tif conn.conntype == connReadConn {\n\t\tvar i int\n\t\tvar channels []string\n\t\tfor index, co := range conn.bot.readconns {\n\t\t\tif conn == co {\n\t\t\t\ti = index\n\t\t\t\tchannels = co.joins\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tLog.Error(\"readconn died, lost joins:\", channels)\n\t\tconn.bot.Lock()\n\t\tconn.bot.readconns = append(conn.bot.readconns[:i], conn.bot.readconns[i+1:]...)\n\t\tconn.bot.Unlock()\n\t\tfor _, channel := range channels {\n\t\t\tconns := conn.bot.channels[channel]\n\t\t\tfor i, co := range conns {\n\t\t\t\tif conn == co {\n\t\t\t\t\tconn.bot.Lock()\n\t\t\t\t\tconn.bot.channels[channel] = append(conns[:i], conns[i+1:]...)\n\t\t\t\t\tconn.bot.Unlock()\n\t\t\t\t\tconn.part(channel)\n\t\t\t\t}\n\t\t\t}\n\t\t\tconn.bot.join <- channel\n\n\t\t}\n\n\t} else if conn.conntype == connSendConn {\n\t\tLog.Error(\"sendconn died\")\n\t\tvar i int\n\t\tfor index, co := range conn.bot.sendconns {\n\t\t\tif conn == co {\n\t\t\t\ti = index\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tconn.bot.Lock()\n\t\tconn.bot.sendconns = append(conn.bot.sendconns[:i], conn.bot.sendconns[i+1:]...)\n\t\tconn.bot.Unlock()\n\t} else if conn.conntype == connWhisperConn {\n\t\tLog.Error(\"whisperconn died, reconnecting\")\n\t\tconn.close()\n\t\tconn.bot.newConn(connWhisperConn)\n\t}\n\tconn.conntype = connDelete\n}\n\nfunc (conn *connection) connect(client *Client, pass string, nick string) {\n\tdialer := &net.Dialer{\n\t\tKeepAlive: time.Second * 10,\n\t}\n\n\tconn.bot = client.bot\n\tc, err := tls.DialWithDialer(dialer, \"tcp\", *addr, &tls.Config{})\n\tif err != nil {\n\t\tLog.Error(\"unable to connect to irc server\", err)\n\t\ttime.Sleep(2 * time.Second)\n\t\tconn.restore()\n\t\treturn\n\t}\n\tconn.conn = c\n\n\tconn.send(\"CAP REQ :twitch.tv\/tags twitch.tv\/commands\")\n\tconn.login(pass, nick)\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tLog.Error(\"error connecting\")\n\t\t}\n\t\tconn.restore()\n\t}()\n\ttp := textproto.NewReader(bufio.NewReader(conn.conn))\n\n\tfor {\n\t\tline, err := tp.ReadLine()\n\t\tif err != nil {\n\t\t\tLog.Fatalf(\"[READERROR:%s] %s\", conn.name, err.Error())\n\t\t\t\/\/ conn.restore()\n\t\t\treturn\n\t\t}\n\t\tLog.Debugf(\"[TWITCH:%s] %s\", conn.name, line)\n\t\tif conn.conntype == connDelete {\n\t\t\tconn.restore()\n\t\t}\n\t\tif strings.HasPrefix(line, \"PING\") {\n\t\t\tconn.send(strings.Replace(line, \"PING\", \"PONG\", 1))\n\t\t} else if strings.HasPrefix(line, \"PONG\") {\n\t\t\tLog.Debug(\"PONG\")\n\t\t} else {\n\t\t\tif isWhisper(line) && conn.conntype != connWhisperConn {\n\t\t\t\t\/\/ throw away message\n\t\t\t} else {\n\t\t\t\tclient.toClient <- line\n\t\t\t}\n\t\t}\n\t\tconn.active = true\n\t}\n}\n\nfunc isWhisper(line string) bool {\n\tif !strings.Contains(line, \".tmi.twitch.tv WHISPER \") {\n\t\treturn false\n\t}\n\tspl := strings.SplitN(line, \" :\", 3)\n\tif strings.Contains(spl[1], \".tmi.twitch.tv WHISPER \") {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (conn *connection) send(msg string) error {\n\tif conn.conn == nil {\n\t\tLog.Error(\"conn is nil\", conn, conn.conn)\n\t\treturn errors.New(\"connection is nil\")\n\t}\n\t_, err := fmt.Fprint(conn.conn, msg+\"\\r\\n\")\n\tif err != nil {\n\t\tLog.Error(\"error sending message\")\n\t\treturn err\n\t}\n\tLog.Debugf(\"[OUTGOING:%s] %s\", conn.name, msg)\n\treturn nil\n}\n\nfunc (conn *connection) reduceMsgCount() {\n\tconn.msgCount--\n}\n\nfunc (conn *connection) countMsg() {\n\tconn.msgCount++\n\ttime.AfterFunc(30*time.Second, conn.reduceMsgCount)\n}\n\nfunc randomHash() string {\n\tn := 5\n\tb := make([]byte, n)\n\tif _, err := rand.Read(b); err != nil {\n\t\tpanic(err)\n\t}\n\treturn fmt.Sprintf(\"%X\", b)\n}\n<commit_msg>readd restore<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/textproto\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype connType uint32\n\nconst (\n\tconnWhisperConn = iota\n\tconnReadConn\n\tconnSendConn\n\tconnDelete\n)\n\ntype connection struct {\n\tsync.Mutex\n\tconn net.Conn\n\tactive bool\n\tanon bool\n\tjoins []string\n\tmsgCount int\n\tlastUse time.Time\n\talive bool\n\tconntype connType\n\tbot *bot\n\tname string\n}\n\nfunc newConnection(t connType) *connection {\n\treturn &connection{\n\t\tjoins: make([]string, 0),\n\t\tconntype: t,\n\t\tlastUse: time.Now(),\n\t\tname: randomHash(),\n\t}\n}\n\nfunc (conn *connection) login(pass string, nick string) {\n\tconn.anon = pass == \"\"\n\tif !conn.anon {\n\t\tconn.send(\"PASS \" + pass)\n\t\tconn.send(\"NICK \" + nick)\n\t\treturn\n\t}\n\tconn.send(\"NICK justinfan123\")\n}\n\nfunc (conn *connection) close() {\n\tif conn.conn != nil {\n\t\tconn.conn.Close()\n\t}\n\tfor _, channel := range conn.joins {\n\t\tconn.part(channel)\n\t}\n\tconn.alive = false\n}\n\nfunc (conn *connection) part(channel string) {\n\tchannel = strings.ToLower(channel)\n\tfor i, ch := range conn.joins {\n\t\tif ch == channel {\n\t\t\tconn.joins = append(conn.joins[:i], conn.joins[i+1:]...)\n\t\t}\n\t}\n}\n\nfunc (conn *connection) restore() {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tLog.Error(\"cannot restore connection\")\n\t\t}\n\t}()\n\tif conn.conntype == connReadConn {\n\t\tvar i int\n\t\tvar channels []string\n\t\tfor index, co := range conn.bot.readconns {\n\t\t\tif conn == co {\n\t\t\t\ti = index\n\t\t\t\tchannels = co.joins\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tLog.Error(\"readconn died, lost joins:\", channels)\n\t\tconn.bot.Lock()\n\t\tconn.bot.readconns = append(conn.bot.readconns[:i], conn.bot.readconns[i+1:]...)\n\t\tconn.bot.Unlock()\n\t\tfor _, channel := range channels {\n\t\t\tconns := conn.bot.channels[channel]\n\t\t\tfor i, co := range conns {\n\t\t\t\tif conn == co {\n\t\t\t\t\tconn.bot.Lock()\n\t\t\t\t\tconn.bot.channels[channel] = append(conns[:i], conns[i+1:]...)\n\t\t\t\t\tconn.bot.Unlock()\n\t\t\t\t\tconn.part(channel)\n\t\t\t\t}\n\t\t\t}\n\t\t\tconn.bot.join <- channel\n\n\t\t}\n\n\t} else if conn.conntype == connSendConn {\n\t\tLog.Error(\"sendconn died\")\n\t\tvar i int\n\t\tfor index, co := range conn.bot.sendconns {\n\t\t\tif conn == co {\n\t\t\t\ti = index\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tconn.bot.Lock()\n\t\tconn.bot.sendconns = append(conn.bot.sendconns[:i], conn.bot.sendconns[i+1:]...)\n\t\tconn.bot.Unlock()\n\t} else if conn.conntype == connWhisperConn {\n\t\tLog.Error(\"whisperconn died, reconnecting\")\n\t\tconn.close()\n\t\tconn.bot.newConn(connWhisperConn)\n\t}\n\tconn.conntype = connDelete\n}\n\nfunc (conn *connection) connect(client *Client, pass string, nick string) {\n\tdialer := &net.Dialer{\n\t\tKeepAlive: time.Second * 10,\n\t}\n\n\tconn.bot = client.bot\n\tc, err := tls.DialWithDialer(dialer, \"tcp\", *addr, &tls.Config{})\n\tif err != nil {\n\t\tLog.Error(\"unable to connect to irc server\", err)\n\t\ttime.Sleep(2 * time.Second)\n\t\tconn.restore()\n\t\treturn\n\t}\n\tconn.conn = c\n\n\tconn.send(\"CAP REQ :twitch.tv\/tags twitch.tv\/commands\")\n\tconn.login(pass, nick)\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tLog.Error(\"error connecting\")\n\t\t}\n\t\tconn.restore()\n\t}()\n\ttp := textproto.NewReader(bufio.NewReader(conn.conn))\n\n\tfor {\n\t\tline, err := tp.ReadLine()\n\t\tif err != nil {\n\t\t\tLog.Errorf(\"[READERROR:%s] %s\", conn.name, err.Error())\n\t\t\tconn.restore()\n\t\t\treturn\n\t\t}\n\t\tLog.Debugf(\"[TWITCH:%s] %s\", conn.name, line)\n\t\tif conn.conntype == connDelete {\n\t\t\tconn.restore()\n\t\t}\n\t\tif strings.HasPrefix(line, \"PING\") {\n\t\t\tconn.send(strings.Replace(line, \"PING\", \"PONG\", 1))\n\t\t} else if strings.HasPrefix(line, \"PONG\") {\n\t\t\tLog.Debug(\"PONG\")\n\t\t} else {\n\t\t\tif isWhisper(line) && conn.conntype != connWhisperConn {\n\t\t\t\t\/\/ throw away message\n\t\t\t} else {\n\t\t\t\tclient.toClient <- line\n\t\t\t}\n\t\t}\n\t\tconn.active = true\n\t}\n}\n\nfunc isWhisper(line string) bool {\n\tif !strings.Contains(line, \".tmi.twitch.tv WHISPER \") {\n\t\treturn false\n\t}\n\tspl := strings.SplitN(line, \" :\", 3)\n\tif strings.Contains(spl[1], \".tmi.twitch.tv WHISPER \") {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (conn *connection) send(msg string) error {\n\tif conn.conn == nil {\n\t\tLog.Error(\"conn is nil\", conn, conn.conn)\n\t\treturn errors.New(\"connection is nil\")\n\t}\n\t_, err := fmt.Fprint(conn.conn, msg+\"\\r\\n\")\n\tif err != nil {\n\t\tLog.Error(\"error sending message\")\n\t\treturn err\n\t}\n\tLog.Debugf(\"[OUTGOING:%s] %s\", conn.name, msg)\n\treturn nil\n}\n\nfunc (conn *connection) reduceMsgCount() {\n\tconn.msgCount--\n}\n\nfunc (conn *connection) countMsg() {\n\tconn.msgCount++\n\ttime.AfterFunc(30*time.Second, conn.reduceMsgCount)\n}\n\nfunc randomHash() string {\n\tn := 5\n\tb := make([]byte, n)\n\tif _, err := rand.Read(b); err != nil {\n\t\tpanic(err)\n\t}\n\treturn fmt.Sprintf(\"%X\", b)\n}\n<|endoftext|>"} {"text":"<commit_before>package gotcp\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\n\/\/ Error type\nvar (\n\tErrConnClosing = errors.New(\"use of closed network connection\")\n\tErrWriteBlocking = errors.New(\"write packet was blocking\")\n\tErrReadBlocking = errors.New(\"read packet was blocking\")\n)\n\n\/\/ Conn exposes a set of callbacks for the various events that occur on a connection\ntype Conn struct {\n\tsrv *Server\n\tconn *net.TCPConn \/\/ the raw connection\n\textraData interface{} \/\/ to save extra data\n\tcloseOnce sync.Once \/\/ close the conn, once, per instance\n\tcloseFlag int32 \/\/ close flag\n\tcloseChan chan struct{} \/\/ close chanel\n\tpacketSendChan chan Packet \/\/ packet send chanel\n\tpacketReceiveChan chan Packet \/\/ packeet receive chanel\n}\n\n\/\/ ConnCallback is an interface of methods that are used as callbacks on a connection\ntype ConnCallback interface {\n\t\/\/ OnConnect is called when the connection was accepted,\n\t\/\/ If the return value of false is closed\n\tOnConnect(*Conn) bool\n\n\t\/\/ OnMessage is called when the connection receives a packet,\n\t\/\/ If the return value of false is closed\n\tOnMessage(*Conn, Packet) bool\n\n\t\/\/ OnClose is called when the connection closed\n\tOnClose(*Conn)\n}\n\n\/\/ newConn returns a wrapper of raw conn\nfunc newConn(conn *net.TCPConn, srv *Server) *Conn {\n\treturn &Conn{\n\t\tsrv: srv,\n\t\tconn: conn,\n\t\tcloseChan: make(chan struct{}),\n\t\tpacketSendChan: make(chan Packet, srv.config.PacketSendChanLimit),\n\t\tpacketReceiveChan: make(chan Packet, srv.config.PacketReceiveChanLimit),\n\t}\n}\n\n\/\/ GetExtraData gets the extra data from the Conn\nfunc (c *Conn) GetExtraData() interface{} {\n\treturn c.extraData\n}\n\n\/\/ PutExtraData puts the extra data with the Conn\nfunc (c *Conn) PutExtraData(data interface{}) {\n\tc.extraData = data\n}\n\n\/\/ GetRawConn returns the raw net.TCPConn from the Conn\nfunc (c *Conn) GetRawConn() *net.TCPConn {\n\treturn c.conn\n}\n\n\/\/ Close closes the connection\nfunc (c *Conn) Close() {\n\tc.closeOnce.Do(func() {\n\t\tatomic.StoreInt32(&c.closeFlag, 1)\n\t\tclose(c.closeChan)\n\t\tc.conn.Close()\n\t\tc.srv.callback.OnClose(c)\n\t})\n}\n\n\/\/ IsClosed indicates whether or not the connection is closed\nfunc (c *Conn) IsClosed() bool {\n\treturn atomic.LoadInt32(&c.closeFlag) == 1\n}\n\n\/\/ AsyncReadPacket async reads a packet, this method will never block\nfunc (c *Conn) AsyncReadPacket(timeout time.Duration) (Packet, error) {\n\tif c.IsClosed() {\n\t\treturn nil, ErrConnClosing\n\t}\n\n\tif timeout == 0 {\n\t\tselect {\n\t\tcase p := <-c.packetReceiveChan:\n\t\t\treturn p, nil\n\n\t\tdefault:\n\t\t\treturn nil, ErrReadBlocking\n\t\t}\n\n\t} else {\n\t\tselect {\n\t\tcase p := <-c.packetReceiveChan:\n\t\t\treturn p, nil\n\n\t\tcase <-c.closeChan:\n\t\t\treturn nil, ErrConnClosing\n\n\t\tcase <-time.After(timeout):\n\t\t\treturn nil, ErrReadBlocking\n\t\t}\n\t}\n}\n\n\/\/ AsyncWritePacket async writes a packet, this method will never block\nfunc (c *Conn) AsyncWritePacket(p Packet, timeout time.Duration) error {\n\tif c.IsClosed() {\n\t\treturn ErrConnClosing\n\t}\n\n\tif timeout == 0 {\n\t\tselect {\n\t\tcase c.packetSendChan <- p:\n\t\t\treturn nil\n\n\t\tdefault:\n\t\t\treturn ErrWriteBlocking\n\t\t}\n\n\t} else {\n\t\tselect {\n\t\tcase c.packetSendChan <- p:\n\t\t\treturn nil\n\n\t\tcase <-c.closeChan:\n\t\t\treturn ErrConnClosing\n\n\t\tcase <-time.After(timeout):\n\t\t\treturn ErrWriteBlocking\n\t\t}\n\t}\n}\n\n\/\/ Do it\nfunc (c *Conn) Do() {\n\tif !c.srv.callback.OnConnect(c) {\n\t\treturn\n\t}\n\n\tgo c.handleLoop()\n\tgo c.readLoop()\n\tgo c.writeLoop()\n}\n\nfunc (c *Conn) readLoop() {\n\tc.srv.waitGroup.Add(1)\n\tdefer func() {\n\t\trecover()\n\t\tc.Close()\n\t\tc.srv.waitGroup.Done()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-c.srv.exitChan:\n\t\t\treturn\n\n\t\tcase <-c.closeChan:\n\t\t\treturn\n\n\t\tdefault:\n\t\t}\n\n\t\tp, err := c.srv.protocol.ReadPacket(c.conn)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tc.packetReceiveChan <- p\n\t}\n}\n\nfunc (c *Conn) writeLoop() {\n\tc.srv.waitGroup.Add(1)\n\tdefer func() {\n\t\trecover()\n\t\tc.Close()\n\t\tc.srv.waitGroup.Done()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-c.srv.exitChan:\n\t\t\treturn\n\n\t\tcase <-c.closeChan:\n\t\t\treturn\n\n\t\tcase p := <-c.packetSendChan:\n\t\t\tif _, err := c.conn.Write(p.Serialize()); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *Conn) handleLoop() {\n\tc.srv.waitGroup.Add(1)\n\tdefer func() {\n\t\trecover()\n\t\tc.Close()\n\t\tc.srv.waitGroup.Done()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-c.srv.exitChan:\n\t\t\treturn\n\n\t\tcase <-c.closeChan:\n\t\t\treturn\n\n\t\tcase p := <-c.packetReceiveChan:\n\t\t\tif !c.srv.callback.OnMessage(c, p) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>fix possible panic and blocking<commit_after>package gotcp\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\n\/\/ Error type\nvar (\n\tErrConnClosing = errors.New(\"use of closed network connection\")\n\tErrWriteBlocking = errors.New(\"write packet was blocking\")\n\tErrReadBlocking = errors.New(\"read packet was blocking\")\n)\n\n\/\/ Conn exposes a set of callbacks for the various events that occur on a connection\ntype Conn struct {\n\tsrv *Server\n\tconn *net.TCPConn \/\/ the raw connection\n\textraData interface{} \/\/ to save extra data\n\tcloseOnce sync.Once \/\/ close the conn, once, per instance\n\tcloseFlag int32 \/\/ close flag\n\tcloseChan chan struct{} \/\/ close chanel\n\tpacketSendChan chan Packet \/\/ packet send chanel\n\tpacketReceiveChan chan Packet \/\/ packeet receive chanel\n}\n\n\/\/ ConnCallback is an interface of methods that are used as callbacks on a connection\ntype ConnCallback interface {\n\t\/\/ OnConnect is called when the connection was accepted,\n\t\/\/ If the return value of false is closed\n\tOnConnect(*Conn) bool\n\n\t\/\/ OnMessage is called when the connection receives a packet,\n\t\/\/ If the return value of false is closed\n\tOnMessage(*Conn, Packet) bool\n\n\t\/\/ OnClose is called when the connection closed\n\tOnClose(*Conn)\n}\n\n\/\/ newConn returns a wrapper of raw conn\nfunc newConn(conn *net.TCPConn, srv *Server) *Conn {\n\treturn &Conn{\n\t\tsrv: srv,\n\t\tconn: conn,\n\t\tcloseChan: make(chan struct{}),\n\t\tpacketSendChan: make(chan Packet, srv.config.PacketSendChanLimit),\n\t\tpacketReceiveChan: make(chan Packet, srv.config.PacketReceiveChanLimit),\n\t}\n}\n\n\/\/ GetExtraData gets the extra data from the Conn\nfunc (c *Conn) GetExtraData() interface{} {\n\treturn c.extraData\n}\n\n\/\/ PutExtraData puts the extra data with the Conn\nfunc (c *Conn) PutExtraData(data interface{}) {\n\tc.extraData = data\n}\n\n\/\/ GetRawConn returns the raw net.TCPConn from the Conn\nfunc (c *Conn) GetRawConn() *net.TCPConn {\n\treturn c.conn\n}\n\n\/\/ Close closes the connection\nfunc (c *Conn) Close() {\n\tc.closeOnce.Do(func() {\n\t\tatomic.StoreInt32(&c.closeFlag, 1)\n\t\tclose(c.closeChan)\n\t\tclose(c.packetSendChan)\n\t\tclose(c.packetReceiveChan)\n\t\tc.conn.Close()\n\t\tc.srv.callback.OnClose(c)\n\t})\n}\n\n\/\/ IsClosed indicates whether or not the connection is closed\nfunc (c *Conn) IsClosed() bool {\n\treturn atomic.LoadInt32(&c.closeFlag) == 1\n}\n\n\/\/ AsyncWritePacket async writes a packet, this method will never block\nfunc (c *Conn) AsyncWritePacket(p Packet, timeout time.Duration) (err error) {\n\tif c.IsClosed() {\n\t\treturn ErrConnClosing\n\t}\n\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = ErrConnClosing\n\t\t}\n\t}()\n\n\tif timeout == 0 {\n\t\tselect {\n\t\tcase c.packetSendChan <- p:\n\t\t\treturn nil\n\n\t\tdefault:\n\t\t\treturn ErrWriteBlocking\n\t\t}\n\n\t} else {\n\t\tselect {\n\t\tcase c.packetSendChan <- p:\n\t\t\treturn nil\n\n\t\tcase <-c.closeChan:\n\t\t\treturn ErrConnClosing\n\n\t\tcase <-time.After(timeout):\n\t\t\treturn ErrWriteBlocking\n\t\t}\n\t}\n}\n\n\/\/ Do it\nfunc (c *Conn) Do() {\n\tif !c.srv.callback.OnConnect(c) {\n\t\treturn\n\t}\n\n\tgo c.handleLoop()\n\tgo c.readLoop()\n\tgo c.writeLoop()\n}\n\nfunc (c *Conn) readLoop() {\n\tc.srv.waitGroup.Add(1)\n\tdefer func() {\n\t\trecover()\n\t\tc.Close()\n\t\tc.srv.waitGroup.Done()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-c.srv.exitChan:\n\t\t\treturn\n\n\t\tcase <-c.closeChan:\n\t\t\treturn\n\n\t\tdefault:\n\t\t}\n\n\t\tp, err := c.srv.protocol.ReadPacket(c.conn)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tc.packetReceiveChan <- p\n\t}\n}\n\nfunc (c *Conn) writeLoop() {\n\tc.srv.waitGroup.Add(1)\n\tdefer func() {\n\t\trecover()\n\t\tc.Close()\n\t\tc.srv.waitGroup.Done()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-c.srv.exitChan:\n\t\t\treturn\n\n\t\tcase <-c.closeChan:\n\t\t\treturn\n\n\t\tcase p := <-c.packetSendChan:\n\t\t\tif c.IsClosed() {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, err := c.conn.Write(p.Serialize()); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *Conn) handleLoop() {\n\tc.srv.waitGroup.Add(1)\n\tdefer func() {\n\t\trecover()\n\t\tc.Close()\n\t\tc.srv.waitGroup.Done()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-c.srv.exitChan:\n\t\t\treturn\n\n\t\tcase <-c.closeChan:\n\t\t\treturn\n\n\t\tcase p := <-c.packetReceiveChan:\n\t\t\tif c.IsClosed() {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !c.srv.callback.OnMessage(c, p) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package testblas\n\nimport (\n\t\"math\"\n\t\"testing\"\n)\n\ntype Dgerer interface {\n\tDger(m, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int)\n}\n\nfunc DgerTest(t *testing.T, blasser Dgerer) {\n\tfor _, test := range []struct {\n\t\tname string\n\t\ta [][]float64\n\t\tm int\n\t\tn int\n\t\tx []float64\n\t\ty []float64\n\t\tincX int\n\t\tincY int\n\n\t\ttrueAns [][]float64\n\t}{\n\t\t{\n\t\t\tname: \"M gt N inc 1\",\n\t\t\tm: 5,\n\t\t\tn: 3,\n\t\t\ta: [][]float64{\n\t\t\t\t{1.3, 2.4, 3.5},\n\t\t\t\t{2.6, 2.8, 3.3},\n\t\t\t\t{-1.3, -4.3, -9.7},\n\t\t\t\t{8, 9, -10},\n\t\t\t\t{-12, -14, -6},\n\t\t\t},\n\t\t\tx: []float64{-2, -3, 0, 1, 2},\n\t\t\ty: []float64{-1.1, 5, 0},\n\t\t\tincX: 1,\n\t\t\tincY: 1,\n\t\t\ttrueAns: [][]float64{{3.5, -7.6, 3.5}, {5.9, -12.2, 3.3}, {-1.3, -4.3, -9.7}, {6.9, 14, -10}, {-14.2, -4, -6}},\n\t\t},\n\t\t{\n\t\t\tname: \"M eq N inc 1\",\n\t\t\tm: 3,\n\t\t\tn: 3,\n\t\t\ta: [][]float64{\n\t\t\t\t{1.3, 2.4, 3.5},\n\t\t\t\t{2.6, 2.8, 3.3},\n\t\t\t\t{-1.3, -4.3, -9.7},\n\t\t\t},\n\t\t\tx: []float64{-2, -3, 0},\n\t\t\ty: []float64{-1.1, 5, 0},\n\t\t\tincX: 1,\n\t\t\tincY: 1,\n\t\t\ttrueAns: [][]float64{{3.5, -7.6, 3.5}, {5.9, -12.2, 3.3}, {-1.3, -4.3, -9.7}},\n\t\t},\n\n\t\t{\n\t\t\tname: \"M lt N inc 1\",\n\t\t\tm: 3,\n\t\t\tn: 6,\n\t\t\ta: [][]float64{\n\t\t\t\t{1.3, 2.4, 3.5, 4.8, 1.11, -9},\n\t\t\t\t{2.6, 2.8, 3.3, -3.4, 6.2, -8.7},\n\t\t\t\t{-1.3, -4.3, -9.7, -3.1, 8.9, 8.9},\n\t\t\t},\n\t\t\tx: []float64{-2, -3, 0},\n\t\t\ty: []float64{-1.1, 5, 0, 9, 19, 22},\n\t\t\tincX: 1,\n\t\t\tincY: 1,\n\t\t\ttrueAns: [][]float64{{3.5, -7.6, 3.5, -13.2, -36.89, -53}, {5.9, -12.2, 3.3, -30.4, -50.8, -74.7}, {-1.3, -4.3, -9.7, -3.1, 8.9, 8.9}},\n\t\t},\n\t\t{\n\t\t\tname: \"M gt N inc not 1\",\n\t\t\tm: 5,\n\t\t\tn: 3,\n\t\t\ta: [][]float64{\n\t\t\t\t{1.3, 2.4, 3.5},\n\t\t\t\t{2.6, 2.8, 3.3},\n\t\t\t\t{-1.3, -4.3, -9.7},\n\t\t\t\t{8, 9, -10},\n\t\t\t\t{-12, -14, -6},\n\t\t\t},\n\t\t\tx: []float64{-2, -3, 0, 1, 2, 6, 0, 9, 7},\n\t\t\ty: []float64{-1.1, 5, 0, 8, 7, -5, 7},\n\t\t\tincX: 2,\n\t\t\tincY: 3,\n\t\t\ttrueAns: [][]float64{{3.5, -13.6, -10.5}, {2.6, 2.8, 3.3}, {-3.5, 11.7, 4.3}, {8, 9, -10}, {-19.700000000000003, 42, 43}},\n\t\t},\n\t\t{\n\t\t\tname: \"M eq N inc not 1\",\n\t\t\tm: 3,\n\t\t\tn: 3,\n\t\t\ta: [][]float64{\n\t\t\t\t{1.3, 2.4, 3.5},\n\t\t\t\t{2.6, 2.8, 3.3},\n\t\t\t\t{-1.3, -4.3, -9.7},\n\t\t\t},\n\t\t\tx: []float64{-2, -3, 0, 8, 7, -9, 7, -6, 12, 6, 6, 6, -11},\n\t\t\ty: []float64{-1.1, 5, 0, 0, 9, 8, 6},\n\t\t\tincX: 4,\n\t\t\tincY: 3,\n\t\t\ttrueAns: [][]float64{{3.5, 2.4, -8.5}, {-5.1, 2.8, 45.3}, {-14.5, -4.3, 62.3}},\n\t\t},\n\t\t{\n\t\t\tname: \"M lt N inc not 1\",\n\t\t\tm: 3,\n\t\t\tn: 6,\n\t\t\ta: [][]float64{\n\t\t\t\t{1.3, 2.4, 3.5, 4.8, 1.11, -9},\n\t\t\t\t{2.6, 2.8, 3.3, -3.4, 6.2, -8.7},\n\t\t\t\t{-1.3, -4.3, -9.7, -3.1, 8.9, 8.9},\n\t\t\t},\n\t\t\tx: []float64{-2, -3, 0, 0, 8, 0, 9, -3},\n\t\t\ty: []float64{-1.1, 5, 0, 9, 19, 22, 11, -8.11, -9.22, 9.87, 7},\n\t\t\tincX: 3,\n\t\t\tincY: 2,\n\t\t\ttrueAns: [][]float64{{3.5, 2.4, -34.5, -17.2, 19.55, -23}, {2.6, 2.8, 3.3, -3.4, 6.2, -8.7}, {-11.2, -4.3, 161.3, 95.9, -74.08, 71.9}},\n\t\t},\n\t\t{\n\t\t\tname: \"Y NaN element\",\n\t\t\tm: 1,\n\t\t\tn: 1,\n\t\t\ta: [][]float64{{1.3}},\n\t\t\tx: []float64{1.3},\n\t\t\ty: []float64{math.NaN()},\n\t\t\tincX: 1,\n\t\t\tincY: 1,\n\t\t\ttrueAns: [][]float64{{math.NaN()}},\n\t\t},\n\t} {\n\t\t\/\/ TODO: Add tests where a is longer\n\t\t\/\/ TODO: Add panic tests\n\t\t\/\/ TODO: Add negative increment tests\n\n\t\tx := sliceCopy(test.x)\n\t\ty := sliceCopy(test.y)\n\n\t\ta := sliceOfSliceCopy(test.a)\n\n\t\t\/\/ Test with row major\n\t\talpha := 1.0\n\t\taFlat := flatten(a)\n\t\tblasser.Dger(test.m, test.n, alpha, x, test.incX, y, test.incY, aFlat, test.n)\n\t\tans := unflatten(aFlat, test.m, test.n)\n\t\tdgercomp(t, x, test.x, y, test.y, ans, test.trueAns, test.name+\" row maj\")\n\n\t\t\/\/ Test with different alpha\n\t\talpha = 4.0\n\t\taFlat = flatten(a)\n\t\tblasser.Dger(test.m, test.n, alpha, x, test.incX, y, test.incY, aFlat, test.n)\n\t\tans = unflatten(aFlat, test.m, test.n)\n\t\ttrueCopy := sliceOfSliceCopy(test.trueAns)\n\t\tfor i := range trueCopy {\n\t\t\tfor j := range trueCopy[i] {\n\t\t\t\ttrueCopy[i][j] = alpha*(trueCopy[i][j]-a[i][j]) + a[i][j]\n\t\t\t}\n\t\t}\n\t\tdgercomp(t, x, test.x, y, test.y, ans, trueCopy, test.name+\" row maj alpha\")\n\t}\n}\n\nfunc dgercomp(t *testing.T, x, xCopy, y, yCopy []float64, ans [][]float64, trueAns [][]float64, name string) {\n\tif !dSliceEqual(x, xCopy) {\n\t\tt.Errorf(\"case %v: x modified during call to dger\\n%v\\n%v\", name, x, xCopy)\n\t}\n\tif !dSliceEqual(y, yCopy) {\n\t\tt.Errorf(\"case %v: y modified during call to dger\\n%v\\n%v\", name, y, yCopy)\n\t}\n\n\tfor i := range ans {\n\t\tif !dSliceTolEqual(ans[i], trueAns[i]) {\n\t\t\tt.Errorf(\"case %v: answer mismatch at %v. Expected %v, Found %v\", name, i, trueAns, ans)\n\t\t\tbreak\n\t\t}\n\t}\n}\n<commit_msg>blas\/testblas: Added 7x7 tests to dger.<commit_after>package testblas\n\nimport (\n\t\"math\"\n\t\"testing\"\n)\n\ntype Dgerer interface {\n\tDger(m, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int)\n}\n\nfunc DgerTest(t *testing.T, blasser Dgerer) {\n\tfor _, test := range []struct {\n\t\tname string\n\t\ta [][]float64\n\t\tm int\n\t\tn int\n\t\tx []float64\n\t\ty []float64\n\t\tincX int\n\t\tincY int\n\n\t\twant [][]float64\n\t}{\n\t\t{\n\t\t\tname: \"M gt N inc 1\",\n\t\t\tm: 5,\n\t\t\tn: 3,\n\t\t\ta: [][]float64{\n\t\t\t\t{1.3, 2.4, 3.5},\n\t\t\t\t{2.6, 2.8, 3.3},\n\t\t\t\t{-1.3, -4.3, -9.7},\n\t\t\t\t{8, 9, -10},\n\t\t\t\t{-12, -14, -6},\n\t\t\t},\n\t\t\tx: []float64{-2, -3, 0, 1, 2},\n\t\t\ty: []float64{-1.1, 5, 0},\n\t\t\tincX: 1,\n\t\t\tincY: 1,\n\t\t\twant: [][]float64{{3.5, -7.6, 3.5}, {5.9, -12.2, 3.3}, {-1.3, -4.3, -9.7}, {6.9, 14, -10}, {-14.2, -4, -6}},\n\t\t},\n\t\t{\n\t\t\tname: \"M eq N inc 1\",\n\t\t\tm: 3,\n\t\t\tn: 3,\n\t\t\ta: [][]float64{\n\t\t\t\t{1.3, 2.4, 3.5},\n\t\t\t\t{2.6, 2.8, 3.3},\n\t\t\t\t{-1.3, -4.3, -9.7},\n\t\t\t},\n\t\t\tx: []float64{-2, -3, 0},\n\t\t\ty: []float64{-1.1, 5, 0},\n\t\t\tincX: 1,\n\t\t\tincY: 1,\n\t\t\twant: [][]float64{{3.5, -7.6, 3.5}, {5.9, -12.2, 3.3}, {-1.3, -4.3, -9.7}},\n\t\t},\n\n\t\t{\n\t\t\tname: \"M lt N inc 1\",\n\t\t\tm: 3,\n\t\t\tn: 6,\n\t\t\ta: [][]float64{\n\t\t\t\t{1.3, 2.4, 3.5, 4.8, 1.11, -9},\n\t\t\t\t{2.6, 2.8, 3.3, -3.4, 6.2, -8.7},\n\t\t\t\t{-1.3, -4.3, -9.7, -3.1, 8.9, 8.9},\n\t\t\t},\n\t\t\tx: []float64{-2, -3, 0},\n\t\t\ty: []float64{-1.1, 5, 0, 9, 19, 22},\n\t\t\tincX: 1,\n\t\t\tincY: 1,\n\t\t\twant: [][]float64{{3.5, -7.6, 3.5, -13.2, -36.89, -53}, {5.9, -12.2, 3.3, -30.4, -50.8, -74.7}, {-1.3, -4.3, -9.7, -3.1, 8.9, 8.9}},\n\t\t},\n\t\t{\n\t\t\tname: \"M gt N inc not 1\",\n\t\t\tm: 5,\n\t\t\tn: 3,\n\t\t\ta: [][]float64{\n\t\t\t\t{1.3, 2.4, 3.5},\n\t\t\t\t{2.6, 2.8, 3.3},\n\t\t\t\t{-1.3, -4.3, -9.7},\n\t\t\t\t{8, 9, -10},\n\t\t\t\t{-12, -14, -6},\n\t\t\t},\n\t\t\tx: []float64{-2, -3, 0, 1, 2, 6, 0, 9, 7},\n\t\t\ty: []float64{-1.1, 5, 0, 8, 7, -5, 7},\n\t\t\tincX: 2,\n\t\t\tincY: 3,\n\t\t\twant: [][]float64{{3.5, -13.6, -10.5}, {2.6, 2.8, 3.3}, {-3.5, 11.7, 4.3}, {8, 9, -10}, {-19.700000000000003, 42, 43}},\n\t\t},\n\t\t{\n\t\t\tname: \"M eq N inc not 1\",\n\t\t\tm: 3,\n\t\t\tn: 3,\n\t\t\ta: [][]float64{\n\t\t\t\t{1.3, 2.4, 3.5},\n\t\t\t\t{2.6, 2.8, 3.3},\n\t\t\t\t{-1.3, -4.3, -9.7},\n\t\t\t},\n\t\t\tx: []float64{-2, -3, 0, 8, 7, -9, 7, -6, 12, 6, 6, 6, -11},\n\t\t\ty: []float64{-1.1, 5, 0, 0, 9, 8, 6},\n\t\t\tincX: 4,\n\t\t\tincY: 3,\n\t\t\twant: [][]float64{{3.5, 2.4, -8.5}, {-5.1, 2.8, 45.3}, {-14.5, -4.3, 62.3}},\n\t\t},\n\t\t{\n\t\t\tname: \"M lt N inc not 1\",\n\t\t\tm: 3,\n\t\t\tn: 6,\n\t\t\ta: [][]float64{\n\t\t\t\t{1.3, 2.4, 3.5, 4.8, 1.11, -9},\n\t\t\t\t{2.6, 2.8, 3.3, -3.4, 6.2, -8.7},\n\t\t\t\t{-1.3, -4.3, -9.7, -3.1, 8.9, 8.9},\n\t\t\t},\n\t\t\tx: []float64{-2, -3, 0, 0, 8, 0, 9, -3},\n\t\t\ty: []float64{-1.1, 5, 0, 9, 19, 22, 11, -8.11, -9.22, 9.87, 7},\n\t\t\tincX: 3,\n\t\t\tincY: 2,\n\t\t\twant: [][]float64{{3.5, 2.4, -34.5, -17.2, 19.55, -23}, {2.6, 2.8, 3.3, -3.4, 6.2, -8.7}, {-11.2, -4.3, 161.3, 95.9, -74.08, 71.9}},\n\t\t},\n\t\t{\n\t\t\tname: \"Y NaN element\",\n\t\t\tm: 1,\n\t\t\tn: 1,\n\t\t\ta: [][]float64{{1.3}},\n\t\t\tx: []float64{1.3},\n\t\t\ty: []float64{math.NaN()},\n\t\t\tincX: 1,\n\t\t\tincY: 1,\n\t\t\twant: [][]float64{{math.NaN()}},\n\t\t},\n\t\t{\n\t\t\tname: \"M eq N large inc 1\",\n\t\t\tm: 7,\n\t\t\tn: 7,\n\t\t\tx: []float64{6.2, -5, 88.68, 43.4, -30.5, -40.2, 19.9},\n\t\t\ty: []float64{1.5, 21.7, -28.7, -11.9, 18.1, 3.1, 21},\n\t\t\ta: [][]float64{\n\t\t\t\t{-20.5, 17.1, -8.4, -23.8, 3.9, 7.7, 6.25},\n\t\t\t\t{2.9, -0.29, 25.6, -9.4, 36.5, 9.7, 2.3},\n\t\t\t\t{4.1, -34.1, 10.3, 4.5, -42.05, 9.4, 4},\n\t\t\t\t{19.2, 9.8, -32.7, 4.1, 4.4, -22.5, -7.8},\n\t\t\t\t{3.6, -24.5, 21.7, 8.6, -13.82, 38.05, -2.29},\n\t\t\t\t{39.4, -40.5, 7.9, -2.5, -7.7, 18.1, -25.5},\n\t\t\t\t{-18.5, 43.2, 2.1, 30.1, 3.02, -31.1, -7.6},\n\t\t\t},\n\t\t\tincX: 1,\n\t\t\tincY: 1,\n\t\t\twant: [][]float64{\n\t\t\t\t{-11.2, 151.64, -186.34, -97.58, 116.12, 26.92, 136.45},\n\t\t\t\t{-4.6, -108.79, 169.1, 50.1, -54, -5.8, -102.7},\n\t\t\t\t{137.12, 1890.256, -2534.816, -1050.792, 1563.058, 284.308, 1866.28},\n\t\t\t\t{84.3, 951.58, -1278.28, -512.36, 789.94, 112.04, 903.6},\n\t\t\t\t{-42.15, -686.35, 897.05, 371.55, -565.87, -56.5, -642.79},\n\t\t\t\t{-20.9, -912.84, 1161.64, 475.88, -735.32, -106.52, -869.7},\n\t\t\t\t{11.35, 475.03, -569.03, -206.71, 363.21, 30.59, 410.3},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"M eq N large inc not 1\",\n\t\t\tm: 7,\n\t\t\tn: 7,\n\t\t\tx: []float64{6.2, 100, 200, -5, 300, 400, 88.68, 100, 200, 43.4, 300, 400, -30.5, 100, 200, -40.2, 300, 400, 19.9},\n\t\t\ty: []float64{1.5, 100, 200, 300, 21.7, 100, 200, 300, -28.7, 100, 200, 300, -11.9, 100, 200, 300, 18.1, 100, 200, 300, 3.1, 100, 200, 300, 21},\n\t\t\ta: [][]float64{\n\t\t\t\t{-20.5, 17.1, -8.4, -23.8, 3.9, 7.7, 6.25},\n\t\t\t\t{2.9, -0.29, 25.6, -9.4, 36.5, 9.7, 2.3},\n\t\t\t\t{4.1, -34.1, 10.3, 4.5, -42.05, 9.4, 4},\n\t\t\t\t{19.2, 9.8, -32.7, 4.1, 4.4, -22.5, -7.8},\n\t\t\t\t{3.6, -24.5, 21.7, 8.6, -13.82, 38.05, -2.29},\n\t\t\t\t{39.4, -40.5, 7.9, -2.5, -7.7, 18.1, -25.5},\n\t\t\t\t{-18.5, 43.2, 2.1, 30.1, 3.02, -31.1, -7.6},\n\t\t\t},\n\t\t\tincX: 3,\n\t\t\tincY: 4,\n\t\t\twant: [][]float64{\n\t\t\t\t{-11.2, 151.64, -186.34, -97.58, 116.12, 26.92, 136.45},\n\t\t\t\t{-4.6, -108.79, 169.1, 50.1, -54, -5.8, -102.7},\n\t\t\t\t{137.12, 1890.256, -2534.816, -1050.792, 1563.058, 284.308, 1866.28},\n\t\t\t\t{84.3, 951.58, -1278.28, -512.36, 789.94, 112.04, 903.6},\n\t\t\t\t{-42.15, -686.35, 897.05, 371.55, -565.87, -56.5, -642.79},\n\t\t\t\t{-20.9, -912.84, 1161.64, 475.88, -735.32, -106.52, -869.7},\n\t\t\t\t{11.35, 475.03, -569.03, -206.71, 363.21, 30.59, 410.3},\n\t\t\t},\n\t\t},\n\t} {\n\t\t\/\/ TODO: Add tests where a is longer\n\t\t\/\/ TODO: Add panic tests\n\t\t\/\/ TODO: Add negative increment tests\n\n\t\tx := sliceCopy(test.x)\n\t\ty := sliceCopy(test.y)\n\n\t\ta := sliceOfSliceCopy(test.a)\n\n\t\t\/\/ Test with row major\n\t\talpha := 1.0\n\t\taFlat := flatten(a)\n\t\tblasser.Dger(test.m, test.n, alpha, x, test.incX, y, test.incY, aFlat, test.n)\n\t\tans := unflatten(aFlat, test.m, test.n)\n\t\tdgercomp(t, x, test.x, y, test.y, ans, test.want, test.name+\" row maj\")\n\n\t\t\/\/ Test with different alpha\n\t\talpha = 4.0\n\t\taFlat = flatten(a)\n\t\tblasser.Dger(test.m, test.n, alpha, x, test.incX, y, test.incY, aFlat, test.n)\n\t\tans = unflatten(aFlat, test.m, test.n)\n\t\ttrueCopy := sliceOfSliceCopy(test.want)\n\t\tfor i := range trueCopy {\n\t\t\tfor j := range trueCopy[i] {\n\t\t\t\ttrueCopy[i][j] = alpha*(trueCopy[i][j]-a[i][j]) + a[i][j]\n\t\t\t}\n\t\t}\n\t\tdgercomp(t, x, test.x, y, test.y, ans, trueCopy, test.name+\" row maj alpha\")\n\t}\n}\n\nfunc dgercomp(t *testing.T, x, xCopy, y, yCopy []float64, ans [][]float64, trueAns [][]float64, name string) {\n\tif !dSliceEqual(x, xCopy) {\n\t\tt.Errorf(\"case %v: x modified during call to dger\\n%v\\n%v\", name, x, xCopy)\n\t}\n\tif !dSliceEqual(y, yCopy) {\n\t\tt.Errorf(\"case %v: y modified during call to dger\\n%v\\n%v\", name, y, yCopy)\n\t}\n\n\tfor i := range ans {\n\t\tif !dSliceTolEqual(ans[i], trueAns[i]) {\n\t\t\tt.Errorf(\"case %v: answer mismatch at %v.\\nExpected %v,\\nFound %v\", name, i, trueAns, ans)\n\t\t\tbreak\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Qubit Digital Ltd.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/ Package logspray is a collection of tools for streaming and indexing\n\/\/ large volumes of dynamic logs.\n\npackage indexer\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/QubitProducts\/logspray\/proto\/logspray\"\n\t\"github.com\/QubitProducts\/logspray\/ql\"\n\t\"github.com\/gogo\/protobuf\/proto\"\n\t\"github.com\/golang\/protobuf\/ptypes\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ Search searches the shard file for messages in the provided time range,\n\/\/ matched by matcher, and passes them to msgFunc. If the reverse is true the\n\/\/ file will be searched in reverse order\nfunc (s *ShardFile) Search(ctx context.Context, msgFunc logspray.MessageFunc, matcher ql.MatchFunc, from, to time.Time, count, offset uint64, reverse bool) error {\n\ts.RLock()\n\tdefer s.RUnlock()\n\n\tr, err := os.Open(s.fn)\n\tdefer r.Close()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to read file %s\", s.fn)\n\t}\n\n\thdr, err := readMessageFromFile(r)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to read file header %s\", s.fn)\n\t}\n\terr = msgFunc(hdr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif reverse {\n\t\tr.Seek(0, io.SeekEnd)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t\tvar nmsg *logspray.Message\n\t\t\tvar err error\n\t\t\tif !reverse {\n\t\t\t\tnmsg, err = readMessageFromFile(r)\n\t\t\t} else {\n\t\t\t\tnmsg, err = readMessageFromFileEnd(r)\n\t\t\t}\n\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"failed to read message from %s\", s.fn)\n\t\t\t}\n\n\t\t\tt, _ := ptypes.Timestamp(nmsg.Time)\n\t\t\tif t.Before(from) || t.After(to) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !matcher(hdr, nmsg) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif offset > 0 {\n\t\t\t\toffset--\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmsgFunc(nmsg)\n\t\t\tcount--\n\t\t\tif count == 0 {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *ShardFile) writeMessageToFile(ctx context.Context, m *logspray.Message) error {\n\ts.Lock()\n\tdefer s.Unlock()\n\tvar err error\n\n\tif s.writer == nil {\n\t\tdir := filepath.Dir(s.fn)\n\t\terr := os.MkdirAll(dir, 0777)\n\t\ts.writer, err = os.Create(s.fn)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif s.pb {\n\t\t\thm := &logspray.Message{\n\t\t\t\tControlMessage: logspray.Message_SETHEADER,\n\t\t\t\tStreamID: m.StreamID,\n\t\t\t\tTime: m.Time,\n\t\t\t\tLabels: s.labels,\n\t\t\t}\n\t\t\tsz, err := writePBMessageToFile(s.writer, hm)\n\t\t\tif err != nil {\n\t\t\t\ts.writer = nil\n\t\t\t\treturn errors.Wrapf(err, \"failed writing file header\")\n\t\t\t}\n\t\t\ts.offset += int64(sz)\n\t\t}\n\t}\n\n\tif !s.pb {\n\t\tmt, _ := ptypes.Timestamp(m.Time)\n\t\t_, err = fmt.Fprintf(s.writer, \"%s %s\\n\", mt.Format(time.RFC3339Nano), m.Text)\n\t\treturn errors.Wrapf(err, \"failed writing raw file %s\", s.fn)\n\t}\n\n\tsz, err := writePBMessageToFile(s.writer, m)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed writing to %s\", s.fn)\n\t}\n\ts.offset += int64(sz)\n\n\treturn err\n}\n\nfunc writePBMessageToFile(w io.Writer, msg *logspray.Message) (uint32, error) {\n\tbs, err := proto.Marshal(msg)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"marshal protobuf failed\")\n\t}\n\n\t\/\/ We use a uin16 for the size field, we'll drop anything that's\n\t\/\/ too big here.\n\tif len(bs) > 65535 {\n\t\treturn 0, errors.Errorf(\"marshaled protobuf is %d bytes too large\", len(bs)-65535)\n\t}\n\n\tbuf := bytes.NewBuffer([]byte{})\n\tbinary.Write(buf, binary.LittleEndian, uint16(len(bs)))\n\tbuf.Write(bs)\n\tbinary.Write(buf, binary.LittleEndian, uint16(len(bs)))\n\n\tn, err := w.Write(buf.Bytes())\n\tif n != len(buf.Bytes()) || err != nil {\n\t\treturn 0, errors.Wrap(err, \"write pb to file failed\")\n\t}\n\n\treturn uint32(len(buf.Bytes())), nil\n}\n\nfunc readMessageFromFile(r io.Reader) (*logspray.Message, error) {\n\tszbs := make([]byte, 2)\n\tszbuf := bytes.NewBuffer(szbs)\n\tn, err := r.Read(szbs)\n\tif err == io.EOF {\n\t\treturn nil, err\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed read message size header\")\n\t}\n\tif n != len(szbs) {\n\t\treturn nil, errors.New(\"short read for message size header\")\n\t}\n\n\tszbytes := uint16(0)\n\terr = binary.Read(szbuf, binary.LittleEndian, &szbytes)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to decode message size\")\n\t}\n\n\t\/\/ We read the protobuf, and the trailing size bytes.\n\tpbbs := make([]byte, szbytes+2)\n\tn, err = r.Read(pbbs)\n\tif err == io.EOF {\n\t\treturn nil, err\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed read message\")\n\t}\n\tif n != len(pbbs) {\n\t\treturn nil, errors.New(\"short read for message\")\n\t}\n\n\ttrailSzbytes := uint16(0)\n\tszbuf = bytes.NewBuffer(pbbs[len(pbbs)-2:])\n\terr = binary.Read(szbuf, binary.LittleEndian, &trailSzbytes)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed reading trail size\")\n\t}\n\n\tif szbytes != trailSzbytes {\n\t\treturn nil, errors.Wrap(err, \"header and trail size mismatch\")\n\t}\n\n\tmsg := logspray.Message{}\n\terr = proto.Unmarshal(pbbs[:len(pbbs)-2], &msg)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to umarshal log message proto\")\n\t}\n\n\treturn &msg, nil\n}\n\n\/\/ readMessageFromFileEnd tries to read a message before the current position\n\/\/ of the io.ReadSeeker\nfunc readMessageFromFileEnd(r io.ReadSeeker) (*logspray.Message, error) {\n\tif off, _ := r.Seek(0, io.SeekCurrent); off == 0 {\n\t\treturn nil, io.EOF\n\t}\n\n\t_, err := r.Seek(-2, io.SeekCurrent)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not seek back to message trailer\")\n\t}\n\n\tszbs := make([]byte, 2)\n\tszbuf := bytes.NewBuffer(szbs)\n\tn, err := r.Read(szbs)\n\tif err == io.EOF {\n\t\treturn nil, err\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed read message size trailer\")\n\t}\n\tif n != len(szbs) {\n\t\treturn nil, errors.New(\"short read for message size trailer\")\n\t}\n\n\tszbytes := uint16(0)\n\terr = binary.Read(szbuf, binary.LittleEndian, &szbytes)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to decode message size trailer\")\n\t}\n\n\tstart, err := r.Seek(-1*int64(4+szbytes), io.SeekCurrent)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not seek back to message header\")\n\t}\n\n\tmsg, err := readMessageFromFile(r)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to read message\")\n\t}\n\n\t_, err = r.Seek(start, io.SeekStart)\n\n\treturn msg, nil\n}\n<commit_msg>shardfile: fix crash on nil timestamp<commit_after>\/\/ Copyright 2016 Qubit Digital Ltd.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/ Package logspray is a collection of tools for streaming and indexing\n\/\/ large volumes of dynamic logs.\n\npackage indexer\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/QubitProducts\/logspray\/proto\/logspray\"\n\t\"github.com\/QubitProducts\/logspray\/ql\"\n\t\"github.com\/gogo\/protobuf\/proto\"\n\t\"github.com\/golang\/protobuf\/ptypes\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ Search searches the shard file for messages in the provided time range,\n\/\/ matched by matcher, and passes them to msgFunc. If the reverse is true the\n\/\/ file will be searched in reverse order\nfunc (s *ShardFile) Search(ctx context.Context, msgFunc logspray.MessageFunc, matcher ql.MatchFunc, from, to time.Time, count, offset uint64, reverse bool) error {\n\ts.RLock()\n\tdefer s.RUnlock()\n\n\tr, err := os.Open(s.fn)\n\tdefer r.Close()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to read file %s\", s.fn)\n\t}\n\n\thdr, err := readMessageFromFile(r)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to read file header %s\", s.fn)\n\t}\n\terr = msgFunc(hdr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif reverse {\n\t\tr.Seek(0, io.SeekEnd)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t\tvar nmsg *logspray.Message\n\t\t\tvar err error\n\t\t\tif !reverse {\n\t\t\t\tnmsg, err = readMessageFromFile(r)\n\t\t\t} else {\n\t\t\t\tnmsg, err = readMessageFromFileEnd(r)\n\t\t\t}\n\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"failed to read message from %s\", s.fn)\n\t\t\t}\n\n\t\t\tif nmsg.Time == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt, _ := ptypes.Timestamp(nmsg.Time)\n\t\t\tif t.Before(from) || t.After(to) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !matcher(hdr, nmsg) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif offset > 0 {\n\t\t\t\toffset--\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmsgFunc(nmsg)\n\t\t\tcount--\n\t\t\tif count == 0 {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *ShardFile) writeMessageToFile(ctx context.Context, m *logspray.Message) error {\n\ts.Lock()\n\tdefer s.Unlock()\n\tvar err error\n\n\tif s.writer == nil {\n\t\tdir := filepath.Dir(s.fn)\n\t\terr := os.MkdirAll(dir, 0777)\n\t\ts.writer, err = os.Create(s.fn)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif s.pb {\n\t\t\thm := &logspray.Message{\n\t\t\t\tControlMessage: logspray.Message_SETHEADER,\n\t\t\t\tStreamID: m.StreamID,\n\t\t\t\tTime: m.Time,\n\t\t\t\tLabels: s.labels,\n\t\t\t}\n\t\t\tsz, err := writePBMessageToFile(s.writer, hm)\n\t\t\tif err != nil {\n\t\t\t\ts.writer = nil\n\t\t\t\treturn errors.Wrapf(err, \"failed writing file header\")\n\t\t\t}\n\t\t\ts.offset += int64(sz)\n\t\t}\n\t}\n\n\tif !s.pb {\n\t\tmt, _ := ptypes.Timestamp(m.Time)\n\t\t_, err = fmt.Fprintf(s.writer, \"%s %s\\n\", mt.Format(time.RFC3339Nano), m.Text)\n\t\treturn errors.Wrapf(err, \"failed writing raw file %s\", s.fn)\n\t}\n\n\tsz, err := writePBMessageToFile(s.writer, m)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed writing to %s\", s.fn)\n\t}\n\ts.offset += int64(sz)\n\n\treturn err\n}\n\nfunc writePBMessageToFile(w io.Writer, msg *logspray.Message) (uint32, error) {\n\tbs, err := proto.Marshal(msg)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"marshal protobuf failed\")\n\t}\n\n\t\/\/ We use a uin16 for the size field, we'll drop anything that's\n\t\/\/ too big here.\n\tif len(bs) > 65535 {\n\t\treturn 0, errors.Errorf(\"marshaled protobuf is %d bytes too large\", len(bs)-65535)\n\t}\n\n\tbuf := bytes.NewBuffer([]byte{})\n\tbinary.Write(buf, binary.LittleEndian, uint16(len(bs)))\n\tbuf.Write(bs)\n\tbinary.Write(buf, binary.LittleEndian, uint16(len(bs)))\n\n\tn, err := w.Write(buf.Bytes())\n\tif n != len(buf.Bytes()) || err != nil {\n\t\treturn 0, errors.Wrap(err, \"write pb to file failed\")\n\t}\n\n\treturn uint32(len(buf.Bytes())), nil\n}\n\nfunc readMessageFromFile(r io.Reader) (*logspray.Message, error) {\n\tszbs := make([]byte, 2)\n\tszbuf := bytes.NewBuffer(szbs)\n\tn, err := r.Read(szbs)\n\tif err == io.EOF {\n\t\treturn nil, err\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed read message size header\")\n\t}\n\tif n != len(szbs) {\n\t\treturn nil, errors.New(\"short read for message size header\")\n\t}\n\n\tszbytes := uint16(0)\n\terr = binary.Read(szbuf, binary.LittleEndian, &szbytes)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to decode message size\")\n\t}\n\n\t\/\/ We read the protobuf, and the trailing size bytes.\n\tpbbs := make([]byte, szbytes+2)\n\tn, err = r.Read(pbbs)\n\tif err == io.EOF {\n\t\treturn nil, err\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed read message\")\n\t}\n\tif n != len(pbbs) {\n\t\treturn nil, errors.New(\"short read for message\")\n\t}\n\n\ttrailSzbytes := uint16(0)\n\tszbuf = bytes.NewBuffer(pbbs[len(pbbs)-2:])\n\terr = binary.Read(szbuf, binary.LittleEndian, &trailSzbytes)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed reading trail size\")\n\t}\n\n\tif szbytes != trailSzbytes {\n\t\treturn nil, errors.Wrap(err, \"header and trail size mismatch\")\n\t}\n\n\tmsg := logspray.Message{}\n\terr = proto.Unmarshal(pbbs[:len(pbbs)-2], &msg)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to umarshal log message proto\")\n\t}\n\n\treturn &msg, nil\n}\n\n\/\/ readMessageFromFileEnd tries to read a message before the current position\n\/\/ of the io.ReadSeeker\nfunc readMessageFromFileEnd(r io.ReadSeeker) (*logspray.Message, error) {\n\tif off, _ := r.Seek(0, io.SeekCurrent); off == 0 {\n\t\treturn nil, io.EOF\n\t}\n\n\t_, err := r.Seek(-2, io.SeekCurrent)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not seek back to message trailer\")\n\t}\n\n\tszbs := make([]byte, 2)\n\tszbuf := bytes.NewBuffer(szbs)\n\tn, err := r.Read(szbs)\n\tif err == io.EOF {\n\t\treturn nil, err\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed read message size trailer\")\n\t}\n\tif n != len(szbs) {\n\t\treturn nil, errors.New(\"short read for message size trailer\")\n\t}\n\n\tszbytes := uint16(0)\n\terr = binary.Read(szbuf, binary.LittleEndian, &szbytes)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to decode message size trailer\")\n\t}\n\n\tstart, err := r.Seek(-1*int64(4+szbytes), io.SeekCurrent)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not seek back to message header\")\n\t}\n\n\tmsg, err := readMessageFromFile(r)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to read message\")\n\t}\n\n\t_, err = r.Seek(start, io.SeekStart)\n\n\treturn msg, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package pq\n\nimport (\n\t\"database\/sql\/driver\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n)\n\nvar (\n\terrCopyInClosed = errors.New(\"pq: copyin statement has already been closed\")\n\terrBinaryCopyNotSupported = errors.New(\"pq: only text format supported for COPY\")\n\terrCopyToNotSupported = errors.New(\"pq: COPY TO is not supported\")\n\terrCopyNotSupportedOutsideTxn = errors.New(\"pq: COPY is only allowed inside a transaction\")\n\terrCopyInProgress = errors.New(\"pq: COPY in progress\")\n)\n\n\/\/ CopyIn creates a COPY FROM statement which can be prepared with\n\/\/ Tx.Prepare(). The target table should be visible in search_path.\nfunc CopyIn(table string, columns ...string) string {\n\tstmt := \"COPY \" + QuoteIdentifier(table) + \" (\"\n\tfor i, col := range columns {\n\t\tif i != 0 {\n\t\t\tstmt += \", \"\n\t\t}\n\t\tstmt += QuoteIdentifier(col)\n\t}\n\tstmt += \") FROM STDIN\"\n\treturn stmt\n}\n\n\/\/ CopyInSchema creates a COPY FROM statement which can be prepared with\n\/\/ Tx.Prepare().\nfunc CopyInSchema(schema, table string, columns ...string) string {\n\tstmt := \"COPY \" + QuoteIdentifier(schema) + \".\" + QuoteIdentifier(table) + \" (\"\n\tfor i, col := range columns {\n\t\tif i != 0 {\n\t\t\tstmt += \", \"\n\t\t}\n\t\tstmt += QuoteIdentifier(col)\n\t}\n\tstmt += \") FROM STDIN\"\n\treturn stmt\n}\n\ntype copyin struct {\n\tcn *conn\n\tbuffer []byte\n\trowData chan []byte\n\tdone chan bool\n\n\tclosed bool\n\n\tmu struct {\n\t\tsync.Mutex\n\t\terr error\n\t\tdriver.Result\n\t}\n}\n\nconst ciBufferSize = 64 * 1024\n\n\/\/ flush buffer before the buffer is filled up and needs reallocation\nconst ciBufferFlushSize = 63 * 1024\n\nfunc (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {\n\tif !cn.isInTransaction() {\n\t\treturn nil, errCopyNotSupportedOutsideTxn\n\t}\n\n\tci := ©in{\n\t\tcn: cn,\n\t\tbuffer: make([]byte, 0, ciBufferSize),\n\t\trowData: make(chan []byte),\n\t\tdone: make(chan bool, 1),\n\t}\n\t\/\/ add CopyData identifier + 4 bytes for message length\n\tci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)\n\n\tb := cn.writeBuf('Q')\n\tb.string(q)\n\tcn.send(b)\n\nawaitCopyInResponse:\n\tfor {\n\t\tt, r := cn.recv1()\n\t\tswitch t {\n\t\tcase 'G':\n\t\t\tif r.byte() != 0 {\n\t\t\t\terr = errBinaryCopyNotSupported\n\t\t\t\tbreak awaitCopyInResponse\n\t\t\t}\n\t\t\tgo ci.resploop()\n\t\t\treturn ci, nil\n\t\tcase 'H':\n\t\t\terr = errCopyToNotSupported\n\t\t\tbreak awaitCopyInResponse\n\t\tcase 'E':\n\t\t\terr = parseError(r)\n\t\tcase 'Z':\n\t\t\tif err == nil {\n\t\t\t\tci.setBad(driver.ErrBadConn)\n\t\t\t\terrorf(\"unexpected ReadyForQuery in response to COPY\")\n\t\t\t}\n\t\t\tcn.processReadyForQuery(r)\n\t\t\treturn nil, err\n\t\tdefault:\n\t\t\tci.setBad(driver.ErrBadConn)\n\t\t\terrorf(\"unknown response for copy query: %q\", t)\n\t\t}\n\t}\n\n\t\/\/ something went wrong, abort COPY before we return\n\tb = cn.writeBuf('f')\n\tb.string(err.Error())\n\tcn.send(b)\n\n\tfor {\n\t\tt, r := cn.recv1()\n\t\tswitch t {\n\t\tcase 'c', 'C', 'E':\n\t\tcase 'Z':\n\t\t\t\/\/ correctly aborted, we're done\n\t\t\tcn.processReadyForQuery(r)\n\t\t\treturn nil, err\n\t\tdefault:\n\t\t\tci.setBad(driver.ErrBadConn)\n\t\t\terrorf(\"unknown response for CopyFail: %q\", t)\n\t\t}\n\t}\n}\n\nfunc (ci *copyin) flush(buf []byte) {\n\t\/\/ set message length (without message identifier)\n\tbinary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))\n\n\t_, err := ci.cn.c.Write(buf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (ci *copyin) resploop() {\n\tfor {\n\t\tvar r readBuf\n\t\tt, err := ci.cn.recvMessage(&r)\n\t\tif err != nil {\n\t\t\tci.setBad(driver.ErrBadConn)\n\t\t\tci.setError(err)\n\t\t\tci.done <- true\n\t\t\treturn\n\t\t}\n\t\tswitch t {\n\t\tcase 'C':\n\t\t\t\/\/ complete\n\t\t\tres, _ := ci.cn.parseComplete(r.string())\n\t\t\tci.setResult(res)\n\t\tcase 'N':\n\t\t\tif n := ci.cn.noticeHandler; n != nil {\n\t\t\t\tn(parseError(&r))\n\t\t\t}\n\t\tcase 'Z':\n\t\t\tci.cn.processReadyForQuery(&r)\n\t\t\tci.done <- true\n\t\t\treturn\n\t\tcase 'E':\n\t\t\terr := parseError(&r)\n\t\t\tci.setError(err)\n\t\tdefault:\n\t\t\tci.setBad(driver.ErrBadConn)\n\t\t\tci.setError(fmt.Errorf(\"unknown response during CopyIn: %q\", t))\n\t\t\tci.done <- true\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (ci *copyin) setBad(err error) {\n\tci.cn.err.set(err)\n}\n\nfunc (ci *copyin) getBad() error {\n\treturn ci.cn.err.get()\n}\n\nfunc (ci *copyin) err() error {\n\tci.mu.Lock()\n\terr := ci.mu.err\n\tci.mu.Unlock()\n\treturn err\n}\n\n\/\/ setError() sets ci.err if one has not been set already. Caller must not be\n\/\/ holding ci.Mutex.\nfunc (ci *copyin) setError(err error) {\n\tci.mu.Lock()\n\tif ci.mu.err == nil {\n\t\tci.mu.err = err\n\t}\n\tci.mu.Unlock()\n}\n\nfunc (ci *copyin) setResult(result driver.Result) {\n\tci.mu.Lock()\n\tci.mu.Result = result\n\tci.mu.Unlock()\n}\n\nfunc (ci *copyin) getResult() driver.Result {\n\tci.mu.Lock()\n\tresult := ci.mu.Result\n\tci.mu.Unlock()\n\tif result == nil {\n\t\treturn driver.RowsAffected(0)\n\t}\n\treturn result\n}\n\nfunc (ci *copyin) NumInput() int {\n\treturn -1\n}\n\nfunc (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {\n\treturn nil, ErrNotSupported\n}\n\n\/\/ Exec inserts values into the COPY stream. The insert is asynchronous\n\/\/ and Exec can return errors from previous Exec calls to the same\n\/\/ COPY stmt.\n\/\/\n\/\/ You need to call Exec(nil) to sync the COPY stream and to get any\n\/\/ errors from pending data, since Stmt.Close() doesn't return errors\n\/\/ to the user.\nfunc (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {\n\tif ci.closed {\n\t\treturn nil, errCopyInClosed\n\t}\n\n\tif err := ci.getBad(); err != nil {\n\t\treturn nil, err\n\t}\n\tdefer ci.cn.errRecover(&err)\n\n\tif err := ci.err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(v) == 0 {\n\t\tif err := ci.Close(); err != nil {\n\t\t\treturn driver.RowsAffected(0), err\n\t\t}\n\n\t\treturn ci.getResult(), nil\n\t}\n\n\tnumValues := len(v)\n\tfor i, value := range v {\n\t\tci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)\n\t\tif i < numValues-1 {\n\t\t\tci.buffer = append(ci.buffer, '\\t')\n\t\t}\n\t}\n\n\tci.buffer = append(ci.buffer, '\\n')\n\n\tif len(ci.buffer) > ciBufferFlushSize {\n\t\tci.flush(ci.buffer)\n\t\t\/\/ reset buffer, keep bytes for message identifier and length\n\t\tci.buffer = ci.buffer[:5]\n\t}\n\n\treturn driver.RowsAffected(0), nil\n}\n\nfunc (ci *copyin) Close() (err error) {\n\tif ci.closed { \/\/ Don't do anything, we're already closed\n\t\treturn nil\n\t}\n\tci.closed = true\n\n\tif err := ci.getBad(); err != nil {\n\t\treturn err\n\t}\n\tdefer ci.cn.errRecover(&err)\n\n\tif len(ci.buffer) > 0 {\n\t\tci.flush(ci.buffer)\n\t}\n\t\/\/ Avoid touching the scratch buffer as resploop could be using it.\n\terr = ci.cn.sendSimpleMessage('c')\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t<-ci.done\n\tci.cn.inCopy = false\n\n\tif err := ci.err(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<commit_msg>expose raw CopyData command (#1077)<commit_after>package pq\n\nimport (\n\t\"context\"\n\t\"database\/sql\/driver\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n)\n\nvar (\n\terrCopyInClosed = errors.New(\"pq: copyin statement has already been closed\")\n\terrBinaryCopyNotSupported = errors.New(\"pq: only text format supported for COPY\")\n\terrCopyToNotSupported = errors.New(\"pq: COPY TO is not supported\")\n\terrCopyNotSupportedOutsideTxn = errors.New(\"pq: COPY is only allowed inside a transaction\")\n\terrCopyInProgress = errors.New(\"pq: COPY in progress\")\n)\n\n\/\/ CopyIn creates a COPY FROM statement which can be prepared with\n\/\/ Tx.Prepare(). The target table should be visible in search_path.\nfunc CopyIn(table string, columns ...string) string {\n\tstmt := \"COPY \" + QuoteIdentifier(table) + \" (\"\n\tfor i, col := range columns {\n\t\tif i != 0 {\n\t\t\tstmt += \", \"\n\t\t}\n\t\tstmt += QuoteIdentifier(col)\n\t}\n\tstmt += \") FROM STDIN\"\n\treturn stmt\n}\n\n\/\/ CopyInSchema creates a COPY FROM statement which can be prepared with\n\/\/ Tx.Prepare().\nfunc CopyInSchema(schema, table string, columns ...string) string {\n\tstmt := \"COPY \" + QuoteIdentifier(schema) + \".\" + QuoteIdentifier(table) + \" (\"\n\tfor i, col := range columns {\n\t\tif i != 0 {\n\t\t\tstmt += \", \"\n\t\t}\n\t\tstmt += QuoteIdentifier(col)\n\t}\n\tstmt += \") FROM STDIN\"\n\treturn stmt\n}\n\ntype copyin struct {\n\tcn *conn\n\tbuffer []byte\n\trowData chan []byte\n\tdone chan bool\n\n\tclosed bool\n\n\tmu struct {\n\t\tsync.Mutex\n\t\terr error\n\t\tdriver.Result\n\t}\n}\n\nconst ciBufferSize = 64 * 1024\n\n\/\/ flush buffer before the buffer is filled up and needs reallocation\nconst ciBufferFlushSize = 63 * 1024\n\nfunc (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {\n\tif !cn.isInTransaction() {\n\t\treturn nil, errCopyNotSupportedOutsideTxn\n\t}\n\n\tci := ©in{\n\t\tcn: cn,\n\t\tbuffer: make([]byte, 0, ciBufferSize),\n\t\trowData: make(chan []byte),\n\t\tdone: make(chan bool, 1),\n\t}\n\t\/\/ add CopyData identifier + 4 bytes for message length\n\tci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)\n\n\tb := cn.writeBuf('Q')\n\tb.string(q)\n\tcn.send(b)\n\nawaitCopyInResponse:\n\tfor {\n\t\tt, r := cn.recv1()\n\t\tswitch t {\n\t\tcase 'G':\n\t\t\tif r.byte() != 0 {\n\t\t\t\terr = errBinaryCopyNotSupported\n\t\t\t\tbreak awaitCopyInResponse\n\t\t\t}\n\t\t\tgo ci.resploop()\n\t\t\treturn ci, nil\n\t\tcase 'H':\n\t\t\terr = errCopyToNotSupported\n\t\t\tbreak awaitCopyInResponse\n\t\tcase 'E':\n\t\t\terr = parseError(r)\n\t\tcase 'Z':\n\t\t\tif err == nil {\n\t\t\t\tci.setBad(driver.ErrBadConn)\n\t\t\t\terrorf(\"unexpected ReadyForQuery in response to COPY\")\n\t\t\t}\n\t\t\tcn.processReadyForQuery(r)\n\t\t\treturn nil, err\n\t\tdefault:\n\t\t\tci.setBad(driver.ErrBadConn)\n\t\t\terrorf(\"unknown response for copy query: %q\", t)\n\t\t}\n\t}\n\n\t\/\/ something went wrong, abort COPY before we return\n\tb = cn.writeBuf('f')\n\tb.string(err.Error())\n\tcn.send(b)\n\n\tfor {\n\t\tt, r := cn.recv1()\n\t\tswitch t {\n\t\tcase 'c', 'C', 'E':\n\t\tcase 'Z':\n\t\t\t\/\/ correctly aborted, we're done\n\t\t\tcn.processReadyForQuery(r)\n\t\t\treturn nil, err\n\t\tdefault:\n\t\t\tci.setBad(driver.ErrBadConn)\n\t\t\terrorf(\"unknown response for CopyFail: %q\", t)\n\t\t}\n\t}\n}\n\nfunc (ci *copyin) flush(buf []byte) {\n\t\/\/ set message length (without message identifier)\n\tbinary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))\n\n\t_, err := ci.cn.c.Write(buf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (ci *copyin) resploop() {\n\tfor {\n\t\tvar r readBuf\n\t\tt, err := ci.cn.recvMessage(&r)\n\t\tif err != nil {\n\t\t\tci.setBad(driver.ErrBadConn)\n\t\t\tci.setError(err)\n\t\t\tci.done <- true\n\t\t\treturn\n\t\t}\n\t\tswitch t {\n\t\tcase 'C':\n\t\t\t\/\/ complete\n\t\t\tres, _ := ci.cn.parseComplete(r.string())\n\t\t\tci.setResult(res)\n\t\tcase 'N':\n\t\t\tif n := ci.cn.noticeHandler; n != nil {\n\t\t\t\tn(parseError(&r))\n\t\t\t}\n\t\tcase 'Z':\n\t\t\tci.cn.processReadyForQuery(&r)\n\t\t\tci.done <- true\n\t\t\treturn\n\t\tcase 'E':\n\t\t\terr := parseError(&r)\n\t\t\tci.setError(err)\n\t\tdefault:\n\t\t\tci.setBad(driver.ErrBadConn)\n\t\t\tci.setError(fmt.Errorf(\"unknown response during CopyIn: %q\", t))\n\t\t\tci.done <- true\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (ci *copyin) setBad(err error) {\n\tci.cn.err.set(err)\n}\n\nfunc (ci *copyin) getBad() error {\n\treturn ci.cn.err.get()\n}\n\nfunc (ci *copyin) err() error {\n\tci.mu.Lock()\n\terr := ci.mu.err\n\tci.mu.Unlock()\n\treturn err\n}\n\n\/\/ setError() sets ci.err if one has not been set already. Caller must not be\n\/\/ holding ci.Mutex.\nfunc (ci *copyin) setError(err error) {\n\tci.mu.Lock()\n\tif ci.mu.err == nil {\n\t\tci.mu.err = err\n\t}\n\tci.mu.Unlock()\n}\n\nfunc (ci *copyin) setResult(result driver.Result) {\n\tci.mu.Lock()\n\tci.mu.Result = result\n\tci.mu.Unlock()\n}\n\nfunc (ci *copyin) getResult() driver.Result {\n\tci.mu.Lock()\n\tresult := ci.mu.Result\n\tci.mu.Unlock()\n\tif result == nil {\n\t\treturn driver.RowsAffected(0)\n\t}\n\treturn result\n}\n\nfunc (ci *copyin) NumInput() int {\n\treturn -1\n}\n\nfunc (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {\n\treturn nil, ErrNotSupported\n}\n\n\/\/ Exec inserts values into the COPY stream. The insert is asynchronous\n\/\/ and Exec can return errors from previous Exec calls to the same\n\/\/ COPY stmt.\n\/\/\n\/\/ You need to call Exec(nil) to sync the COPY stream and to get any\n\/\/ errors from pending data, since Stmt.Close() doesn't return errors\n\/\/ to the user.\nfunc (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {\n\tif ci.closed {\n\t\treturn nil, errCopyInClosed\n\t}\n\n\tif err := ci.getBad(); err != nil {\n\t\treturn nil, err\n\t}\n\tdefer ci.cn.errRecover(&err)\n\n\tif err := ci.err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(v) == 0 {\n\t\tif err := ci.Close(); err != nil {\n\t\t\treturn driver.RowsAffected(0), err\n\t\t}\n\n\t\treturn ci.getResult(), nil\n\t}\n\n\tnumValues := len(v)\n\tfor i, value := range v {\n\t\tci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)\n\t\tif i < numValues-1 {\n\t\t\tci.buffer = append(ci.buffer, '\\t')\n\t\t}\n\t}\n\n\tci.buffer = append(ci.buffer, '\\n')\n\n\tif len(ci.buffer) > ciBufferFlushSize {\n\t\tci.flush(ci.buffer)\n\t\t\/\/ reset buffer, keep bytes for message identifier and length\n\t\tci.buffer = ci.buffer[:5]\n\t}\n\n\treturn driver.RowsAffected(0), nil\n}\n\n\/\/ CopyData executes a raw CopyData command using the PostgreSQL Frontend\/Backend\n\/\/ protocol. Use Exec(nil) to finish the command.\nfunc (ci *copyin) CopyData(ctx context.Context, line string) (r driver.Result, err error) {\n\tif ci.closed {\n\t\treturn nil, errCopyInClosed\n\t}\n\n\tif finish := ci.cn.watchCancel(ctx); finish != nil {\n\t\tdefer finish()\n\t}\n\n\tif err := ci.getBad(); err != nil {\n\t\treturn nil, err\n\t}\n\tdefer ci.cn.errRecover(&err)\n\n\tif err := ci.err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tci.buffer = append(ci.buffer, []byte(line)...)\n\tci.buffer = append(ci.buffer, '\\n')\n\n\tif len(ci.buffer) > ciBufferFlushSize {\n\t\tci.flush(ci.buffer)\n\t\t\/\/ reset buffer, keep bytes for message identifier and length\n\t\tci.buffer = ci.buffer[:5]\n\t}\n\n\treturn driver.RowsAffected(0), nil\n}\n\nfunc (ci *copyin) Close() (err error) {\n\tif ci.closed { \/\/ Don't do anything, we're already closed\n\t\treturn nil\n\t}\n\tci.closed = true\n\n\tif err := ci.getBad(); err != nil {\n\t\treturn err\n\t}\n\tdefer ci.cn.errRecover(&err)\n\n\tif len(ci.buffer) > 0 {\n\t\tci.flush(ci.buffer)\n\t}\n\t\/\/ Avoid touching the scratch buffer as resploop could be using it.\n\terr = ci.cn.sendSimpleMessage('c')\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t<-ci.done\n\tci.cn.inCopy = false\n\n\tif err := ci.err(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package zapdriver\n\nimport (\n\t\"strings\"\n\n\t\"go.uber.org\/zap\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\n\/\/ Core is a zapdriver specific core wrapped around the default zap core. It\n\/\/ allows to merge all defined labels\ntype core struct {\n\tzapcore.Core\n\n\t\/\/ permLabels is a collection of labels that have been added to the logger\n\t\/\/ through the use of `With()`. These labels should never be cleared after\n\t\/\/ logging a single entry, unlike `tempLabel`.\n\tpermLabels *labels\n\n\t\/\/ tempLabels keeps a record of all the labels that need to be applied to the\n\t\/\/ current log entry. Zap serializes log fields at different parts of the\n\t\/\/ stack, one such location is when calling `core.With` and the other one is\n\t\/\/ when calling `core.Write`. This makes it impossible to (for example) take\n\t\/\/ all `labels.xxx` fields, and wrap them in the `labels` namespace in one go.\n\t\/\/\n\t\/\/ Instead, we have to filter out these labels at both locations, and then add\n\t\/\/ them back in the proper format right before we call `Write` on the original\n\t\/\/ Zap core.\n\ttempLabels *labels\n}\n\n\/\/ WrapCore returns a `zap.Option` that wraps the default core with the\n\/\/ zapdriver one.\nfunc WrapCore() zap.Option {\n\treturn zap.WrapCore(func(c zapcore.Core) zapcore.Core {\n\t\treturn &core{c, newLabels(), newLabels()}\n\t})\n}\n\n\/\/ With adds structured context to the Core.\nfunc (c *core) With(fields []zap.Field) zapcore.Core {\n\tvar lbls *labels\n\tlbls, fields = c.extractLabels(fields)\n\n\tlbls.mutex.Lock()\n\tfor k, v := range lbls.store {\n\t\tc.permLabels.store[k] = v\n\t}\n\tlbls.mutex.Unlock()\n\n\treturn &core{c.Core.With(fields), c.permLabels, newLabels()}\n}\n\n\/\/ Check determines whether the supplied Entry should be logged (using the\n\/\/ embedded LevelEnabler and possibly some extra logic). If the entry\n\/\/ should be logged, the Core adds itself to the CheckedEntry and returns\n\/\/ the result.\n\/\/\n\/\/ Callers must use Check before calling Write.\nfunc (c *core) Check(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {\n\tif c.Enabled(ent.Level) {\n\t\treturn ce.AddCore(ent, c)\n\t}\n\n\treturn ce\n}\n\nfunc (c *core) Write(ent zapcore.Entry, fields []zapcore.Field) error {\n\tvar lbls *labels\n\tlbls, fields = c.extractLabels(fields)\n\n\tlbls.mutex.Lock()\n\tfor k, v := range lbls.store {\n\t\tc.tempLabels.store[k] = v\n\t}\n\tlbls.mutex.Unlock()\n\n\tfields = append(fields, labelsField(c.allLabels()))\n\tfields = c.withSourceLocation(ent, fields)\n\n\tc.tempLabels = newLabels()\n\n\treturn c.Core.Write(ent, fields)\n}\n\n\/\/ Sync flushes buffered logs (if any).\nfunc (c *core) Sync() error {\n\treturn c.Core.Sync()\n}\n\nfunc (c *core) allLabels() *labels {\n\tlbls := newLabels()\n\n\tlbls.mutex.Lock()\n\tfor k, v := range c.permLabels.store {\n\t\tlbls.store[k] = v\n\t}\n\n\tfor k, v := range c.tempLabels.store {\n\t\tlbls.store[k] = v\n\t}\n\tlbls.mutex.Unlock()\n\n\treturn lbls\n}\n\nfunc (c *core) extractLabels(fields []zapcore.Field) (*labels, []zapcore.Field) {\n\tlbls := newLabels()\n\tout := []zapcore.Field{}\n\n\tlbls.mutex.Lock()\n\tfor i := range fields {\n\t\tif !isLabelField(fields[i]) {\n\t\t\tout = append(out, fields[i])\n\t\t\tcontinue\n\t\t}\n\n\t\tlbls.store[strings.Replace(fields[i].Key, \"labels.\", \"\", 1)] = fields[i].String\n\t}\n\tlbls.mutex.Unlock()\n\n\treturn lbls, out\n}\n\nfunc (c *core) withLabels(fields []zapcore.Field) []zapcore.Field {\n\tlbls := newLabels()\n\tout := []zapcore.Field{}\n\n\tlbls.mutex.Lock()\n\tfor i := range fields {\n\t\tif isLabelField(fields[i]) {\n\t\t\tlbls.store[strings.Replace(fields[i].Key, \"labels.\", \"\", 1)] = fields[i].String\n\t\t\tcontinue\n\t\t}\n\n\t\tout = append(out, fields[i])\n\t}\n\tlbls.mutex.Unlock()\n\n\treturn append(out, labelsField(lbls))\n}\n\nfunc (c *core) withSourceLocation(ent zapcore.Entry, fields []zapcore.Field) []zapcore.Field {\n\t\/\/ If the source location was manually set, don't overwrite it\n\tfor i := range fields {\n\t\tif fields[i].Key == sourceKey {\n\t\t\treturn fields\n\t\t}\n\t}\n\n\tif !ent.Caller.Defined {\n\t\treturn fields\n\t}\n\n\treturn append(fields, SourceLocation(ent.Caller.PC, ent.Caller.File, ent.Caller.Line, true))\n}\n<commit_msg>add missing mutex locks<commit_after>package zapdriver\n\nimport (\n\t\"strings\"\n\n\t\"go.uber.org\/zap\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\n\/\/ Core is a zapdriver specific core wrapped around the default zap core. It\n\/\/ allows to merge all defined labels\ntype core struct {\n\tzapcore.Core\n\n\t\/\/ permLabels is a collection of labels that have been added to the logger\n\t\/\/ through the use of `With()`. These labels should never be cleared after\n\t\/\/ logging a single entry, unlike `tempLabel`.\n\tpermLabels *labels\n\n\t\/\/ tempLabels keeps a record of all the labels that need to be applied to the\n\t\/\/ current log entry. Zap serializes log fields at different parts of the\n\t\/\/ stack, one such location is when calling `core.With` and the other one is\n\t\/\/ when calling `core.Write`. This makes it impossible to (for example) take\n\t\/\/ all `labels.xxx` fields, and wrap them in the `labels` namespace in one go.\n\t\/\/\n\t\/\/ Instead, we have to filter out these labels at both locations, and then add\n\t\/\/ them back in the proper format right before we call `Write` on the original\n\t\/\/ Zap core.\n\ttempLabels *labels\n}\n\n\/\/ WrapCore returns a `zap.Option` that wraps the default core with the\n\/\/ zapdriver one.\nfunc WrapCore() zap.Option {\n\treturn zap.WrapCore(func(c zapcore.Core) zapcore.Core {\n\t\treturn &core{c, newLabels(), newLabels()}\n\t})\n}\n\n\/\/ With adds structured context to the Core.\nfunc (c *core) With(fields []zap.Field) zapcore.Core {\n\tvar lbls *labels\n\tlbls, fields = c.extractLabels(fields)\n\n\tlbls.mutex.RLock()\n\tc.permLabels.mutex.Lock()\n\tfor k, v := range lbls.store {\n\t\tc.permLabels.store[k] = v\n\t}\n\tc.permLabels.mutex.Unlock()\n\tlbls.mutex.RUnlock()\n\n\treturn &core{c.Core.With(fields), c.permLabels, newLabels()}\n}\n\n\/\/ Check determines whether the supplied Entry should be logged (using the\n\/\/ embedded LevelEnabler and possibly some extra logic). If the entry\n\/\/ should be logged, the Core adds itself to the CheckedEntry and returns\n\/\/ the result.\n\/\/\n\/\/ Callers must use Check before calling Write.\nfunc (c *core) Check(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {\n\tif c.Enabled(ent.Level) {\n\t\treturn ce.AddCore(ent, c)\n\t}\n\n\treturn ce\n}\n\nfunc (c *core) Write(ent zapcore.Entry, fields []zapcore.Field) error {\n\tvar lbls *labels\n\tlbls, fields = c.extractLabels(fields)\n\n\tlbls.mutex.RLock()\n\tc.tempLabels.mutex.Lock()\n\tfor k, v := range lbls.store {\n\t\tc.tempLabels.store[k] = v\n\t}\n\tc.tempLabels.mutex.Unlock()\n\tlbls.mutex.RUnlock()\n\n\tfields = append(fields, labelsField(c.allLabels()))\n\tfields = c.withSourceLocation(ent, fields)\n\n\tc.tempLabels = newLabels()\n\n\treturn c.Core.Write(ent, fields)\n}\n\n\/\/ Sync flushes buffered logs (if any).\nfunc (c *core) Sync() error {\n\treturn c.Core.Sync()\n}\n\nfunc (c *core) allLabels() *labels {\n\tlbls := newLabels()\n\n\tlbls.mutex.Lock()\n\tc.permLabels.mutex.RLock()\n\tfor k, v := range c.permLabels.store {\n\t\tlbls.store[k] = v\n\t}\n\tc.permLabels.mutex.RUnlock()\n\n\tc.tempLabels.mutex.RLock()\n\tfor k, v := range c.tempLabels.store {\n\t\tlbls.store[k] = v\n\t}\n\tc.tempLabels.mutex.RUnlock()\n\tlbls.mutex.Unlock()\n\n\treturn lbls\n}\n\nfunc (c *core) extractLabels(fields []zapcore.Field) (*labels, []zapcore.Field) {\n\tlbls := newLabels()\n\tout := []zapcore.Field{}\n\n\tlbls.mutex.Lock()\n\tfor i := range fields {\n\t\tif !isLabelField(fields[i]) {\n\t\t\tout = append(out, fields[i])\n\t\t\tcontinue\n\t\t}\n\n\t\tlbls.store[strings.Replace(fields[i].Key, \"labels.\", \"\", 1)] = fields[i].String\n\t}\n\tlbls.mutex.Unlock()\n\n\treturn lbls, out\n}\n\nfunc (c *core) withLabels(fields []zapcore.Field) []zapcore.Field {\n\tlbls := newLabels()\n\tout := []zapcore.Field{}\n\n\tlbls.mutex.Lock()\n\tfor i := range fields {\n\t\tif isLabelField(fields[i]) {\n\t\t\tlbls.store[strings.Replace(fields[i].Key, \"labels.\", \"\", 1)] = fields[i].String\n\t\t\tcontinue\n\t\t}\n\n\t\tout = append(out, fields[i])\n\t}\n\tlbls.mutex.Unlock()\n\n\treturn append(out, labelsField(lbls))\n}\n\nfunc (c *core) withSourceLocation(ent zapcore.Entry, fields []zapcore.Field) []zapcore.Field {\n\t\/\/ If the source location was manually set, don't overwrite it\n\tfor i := range fields {\n\t\tif fields[i].Key == sourceKey {\n\t\t\treturn fields\n\t\t}\n\t}\n\n\tif !ent.Caller.Defined {\n\t\treturn fields\n\t}\n\n\treturn append(fields, SourceLocation(ent.Caller.PC, ent.Caller.File, ent.Caller.Line, true))\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage minio\n\nimport \"io\"\nimport \"encoding\/hex\"\n\n\/\/ Inherits Client and adds new methods to expose the low level S3 APIs.\ntype Core struct {\n\t*Client\n}\n\n\/\/ NewCoreClient - Returns new Core.\nfunc NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Core, error) {\n\tvar s3Client Core\n\tclient, err := NewV4(endpoint, accessKeyID, secretAccessKey, secure)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts3Client.Client = client\n\treturn &s3Client, nil\n}\n\n\/\/ ListObjects - List the objects.\nfunc (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) {\n\treturn c.listObjectsQuery(bucket, prefix, marker, delimiter, maxKeys)\n}\n\n\/\/ PutObject - Upload object. Uploads using single PUT call.\nfunc (c Core) PutObject(bucket, object string, size int64, data io.Reader, md5Sum string, sha256Sum string, metadata map[string][]string) (ObjectInfo, error) {\n\tvar md5SumBytes []byte\n\tvar sha256SumBytes []byte\n\tvar err error\n\tif md5Sum != \"\" {\n\t\tmd5SumBytes, err = hex.DecodeString(md5Sum)\n\t\tif err != nil {\n\t\t\treturn ObjectInfo{}, err\n\t\t}\n\t}\n\tif sha256Sum != \"\" {\n\t\tsha256SumBytes, err = hex.DecodeString(sha256Sum)\n\t\tif err != nil {\n\t\t\treturn ObjectInfo{}, err\n\t\t}\n\t}\n\treturn c.putObjectDo(bucket, object, data, md5SumBytes, sha256SumBytes, size, metadata)\n}\n\n\/\/ NewMultipartUpload - Initiates new multipart upload and returns the new uploaID.\nfunc (c Core) NewMultipartUpload(bucket, object string, metadata map[string][]string) (uploadID string, err error) {\n\tresult, err := c.initiateMultipartUpload(bucket, object, metadata)\n\treturn result.UploadID, err\n}\n\n\/\/ ListMultipartUploads - List incomplete uploads.\nfunc (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) {\n\treturn c.listMultipartUploadsQuery(bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads)\n}\n\n\/\/ PutObjectPart - Upload an object part.\nfunc (c Core) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Sum, sha256Sum string) (ObjectPart, error) {\n\tvar md5SumBytes []byte\n\tvar sha256SumBytes []byte\n\tvar err error\n\tif md5Sum != \"\" {\n\t\tmd5SumBytes, err = hex.DecodeString(md5Sum)\n\t\tif err != nil {\n\t\t\treturn ObjectPart{}, err\n\t\t}\n\t}\n\tif sha256Sum != \"\" {\n\t\tsha256SumBytes, err = hex.DecodeString(sha256Sum)\n\t\tif err != nil {\n\t\t\treturn ObjectPart{}, err\n\t\t}\n\t}\n\treturn c.uploadPart(bucket, object, uploadID, data, partID, md5SumBytes, sha256SumBytes, size)\n}\n\n\/\/ ListObjectParts - List uploaded parts of an incomplete upload.\nfunc (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) {\n\treturn c.listObjectPartsQuery(bucket, object, uploadID, partNumberMarker, maxParts)\n}\n\n\/\/ CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.\nfunc (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error {\n\t_, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{Parts: parts})\n\treturn err\n}\n\n\/\/ AbortMultipartUpload - Abort an incomplete upload.\nfunc (c Core) AbortMultipartUpload(bucket, object, uploadID string) error {\n\treturn c.abortMultipartUpload(bucket, object, uploadID)\n}\n<commit_msg>Take md5 and sha256 arguments as []byte (#637)<commit_after>\/*\n * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage minio\n\nimport \"io\"\n\n\/\/ Inherits Client and adds new methods to expose the low level S3 APIs.\ntype Core struct {\n\t*Client\n}\n\n\/\/ NewCoreClient - Returns new Core.\nfunc NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Core, error) {\n\tvar s3Client Core\n\tclient, err := NewV4(endpoint, accessKeyID, secretAccessKey, secure)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts3Client.Client = client\n\treturn &s3Client, nil\n}\n\n\/\/ ListObjects - List the objects.\nfunc (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) {\n\treturn c.listObjectsQuery(bucket, prefix, marker, delimiter, maxKeys)\n}\n\n\/\/ PutObject - Upload object. Uploads using single PUT call.\nfunc (c Core) PutObject(bucket, object string, size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectInfo, error) {\n\treturn c.putObjectDo(bucket, object, data, md5Sum, sha256Sum, size, metadata)\n}\n\n\/\/ NewMultipartUpload - Initiates new multipart upload and returns the new uploaID.\nfunc (c Core) NewMultipartUpload(bucket, object string, metadata map[string][]string) (uploadID string, err error) {\n\tresult, err := c.initiateMultipartUpload(bucket, object, metadata)\n\treturn result.UploadID, err\n}\n\n\/\/ ListMultipartUploads - List incomplete uploads.\nfunc (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) {\n\treturn c.listMultipartUploadsQuery(bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads)\n}\n\n\/\/ PutObjectPart - Upload an object part.\nfunc (c Core) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Sum, sha256Sum []byte) (ObjectPart, error) {\n\treturn c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size)\n}\n\n\/\/ ListObjectParts - List uploaded parts of an incomplete upload.\nfunc (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) {\n\treturn c.listObjectPartsQuery(bucket, object, uploadID, partNumberMarker, maxParts)\n}\n\n\/\/ CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.\nfunc (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error {\n\t_, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{Parts: parts})\n\treturn err\n}\n\n\/\/ AbortMultipartUpload - Abort an incomplete upload.\nfunc (c Core) AbortMultipartUpload(bucket, object, uploadID string) error {\n\treturn c.abortMultipartUpload(bucket, object, uploadID)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\" \/\/ nolint\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/getsentry\/sentry-go\"\n\t\"github.com\/projecteru2\/core\/auth\"\n\t\"github.com\/projecteru2\/core\/cluster\/calcium\"\n\t\"github.com\/projecteru2\/core\/log\"\n\t\"github.com\/projecteru2\/core\/metrics\"\n\t\"github.com\/projecteru2\/core\/rpc\"\n\tpb \"github.com\/projecteru2\/core\/rpc\/gen\"\n\t\"github.com\/projecteru2\/core\/utils\"\n\t\"github.com\/projecteru2\/core\/version\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\/promhttp\"\n\t\"github.com\/sethvargo\/go-signalcontext\"\n\tcli \"github.com\/urfave\/cli\/v2\"\n\t\"google.golang.org\/grpc\"\n\n\t_ \"go.uber.org\/automaxprocs\"\n)\n\nvar (\n\tconfigPath string\n\tembeddedStorage bool\n)\n\nfunc setupSentry(dsn string) (func(), error) {\n\tif dsn == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tsentryDefer := func() {\n\t\tdefer sentry.Flush(2 * time.Second)\n\t\terr := recover()\n\t\tif err != nil {\n\t\t\tsentry.CaptureMessage(fmt.Sprintf(\"%+v\", err))\n\t\t\tpanic(err)\n\t\t}\n\t}\n\treturn sentryDefer, sentry.Init(sentry.ClientOptions{\n\t\tDsn: dsn,\n\t})\n}\n\nfunc serve(c *cli.Context) error {\n\tconfig, err := utils.LoadConfig(configPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"[main] %v\", err)\n\t}\n\n\tif err := log.SetupLog(config.LogLevel); err != nil {\n\t\tlog.Fatalf(\"[main] %v\", err)\n\t}\n\n\tif sentryDefer, err := setupSentry(config.SentryDSN); err != nil {\n\t\tlog.Warnf(context.TODO(), \"[main] sentry %v\", err)\n\t} else if sentryDefer != nil {\n\t\tdefer sentryDefer()\n\t}\n\n\tif err := metrics.InitMetrics(config); err != nil {\n\t\tlog.Errorf(context.TODO(), \"[main] %v\", err)\n\t\treturn err\n\t}\n\n\tcluster, err := calcium.New(config, nil)\n\tif err != nil {\n\t\tlog.Errorf(context.TODO(), \"[main] %v\", err)\n\t\treturn err\n\t}\n\tdefer cluster.Finalizer()\n\tcluster.DisasterRecover()\n\n\tstop := make(chan struct{}, 1)\n\tvibranium := rpc.New(cluster, config, stop)\n\ts, err := net.Listen(\"tcp\", config.Bind)\n\tif err != nil {\n\t\tlog.Errorf(context.TODO(), \"[main] %v\", err)\n\t\treturn err\n\t}\n\n\topts := []grpc.ServerOption{\n\t\tgrpc.MaxConcurrentStreams(uint32(config.GRPCConfig.MaxConcurrentStreams)),\n\t\tgrpc.MaxRecvMsgSize(config.GRPCConfig.MaxRecvMsgSize),\n\t}\n\n\tif config.Auth.Username != \"\" {\n\t\tlog.Info(\"[main] Cluster auth enable.\")\n\t\tauth := auth.NewAuth(config.Auth)\n\t\topts = append(opts, grpc.StreamInterceptor(auth.StreamInterceptor))\n\t\topts = append(opts, grpc.UnaryInterceptor(auth.UnaryInterceptor))\n\t\tlog.Infof(context.TODO(), \"[main] Username %s Password %s\", config.Auth.Username, config.Auth.Password)\n\t}\n\n\tgrpcServer := grpc.NewServer(opts...)\n\tpb.RegisterCoreRPCServer(grpcServer, vibranium)\n\tutils.SentryGo(func() {\n\t\tif err := grpcServer.Serve(s); err != nil {\n\t\t\tlog.Errorf(context.TODO(), \"[main] start grpc failed %v\", err)\n\t\t}\n\t})\n\n\tif config.Profile != \"\" {\n\t\thttp.Handle(\"\/metrics\", metrics.Client.ResourceMiddleware(cluster)(promhttp.Handler()))\n\t\tutils.SentryGo(func() {\n\t\t\tif err := http.ListenAndServe(config.Profile, nil); err != nil {\n\t\t\t\tlog.Errorf(context.TODO(), \"[main] start http failed %v\", err)\n\t\t\t}\n\t\t})\n\t}\n\n\tunregisterService, err := cluster.RegisterService(c.Context)\n\tif err != nil {\n\t\tlog.Errorf(context.TODO(), \"[main] failed to register service: %v\", err)\n\t\treturn err\n\t}\n\tlog.Info(\"[main] Cluster started successfully.\")\n\n\t\/\/ wait for unix signals and try to GracefulStop\n\tctx, cancel := signalcontext.OnInterrupt()\n\tdefer cancel()\n\t<-ctx.Done()\n\n\tlog.Info(\"[main] Interrupt by signal\")\n\tclose(stop)\n\tunregisterService()\n\tgrpcServer.GracefulStop()\n\tlog.Info(\"[main] gRPC server gracefully stopped.\")\n\n\tlog.Info(\"[main] Check if cluster still have running tasks.\")\n\tvibranium.Wait()\n\tlog.Info(\"[main] cluster gracefully stopped.\")\n\treturn nil\n}\n\nfunc main() {\n\tcli.VersionPrinter = func(c *cli.Context) {\n\t\tfmt.Print(version.String())\n\t}\n\n\tapp := cli.NewApp()\n\tapp.Name = version.NAME\n\tapp.Usage = \"Run eru core\"\n\tapp.Version = version.VERSION\n\tapp.Flags = []cli.Flag{\n\t\t&cli.StringFlag{\n\t\t\tName: \"config\",\n\t\t\tValue: \"\/etc\/eru\/core.yaml\",\n\t\t\tUsage: \"config file path for core, in yaml\",\n\t\t\tDestination: &configPath,\n\t\t\tEnvVars: []string{\"ERU_CONFIG_PATH\"},\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName: \"embedded-storage\",\n\t\t\tUsage: \"active embedded storage\",\n\t\t\tDestination: &embeddedStorage,\n\t\t},\n\t}\n\tapp.Action = serve\n\t_ = app.Run(os.Args)\n}\n<commit_msg>can use testing storage when someone like it (#435)<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\" \/\/ nolint\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/getsentry\/sentry-go\"\n\t\"github.com\/projecteru2\/core\/auth\"\n\t\"github.com\/projecteru2\/core\/cluster\/calcium\"\n\t\"github.com\/projecteru2\/core\/log\"\n\t\"github.com\/projecteru2\/core\/metrics\"\n\t\"github.com\/projecteru2\/core\/rpc\"\n\tpb \"github.com\/projecteru2\/core\/rpc\/gen\"\n\t\"github.com\/projecteru2\/core\/utils\"\n\t\"github.com\/projecteru2\/core\/version\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\/promhttp\"\n\t\"github.com\/sethvargo\/go-signalcontext\"\n\tcli \"github.com\/urfave\/cli\/v2\"\n\t\"google.golang.org\/grpc\"\n\n\t_ \"go.uber.org\/automaxprocs\"\n)\n\nvar (\n\tconfigPath string\n\tembeddedStorage bool\n)\n\nfunc setupSentry(dsn string) (func(), error) {\n\tif dsn == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tsentryDefer := func() {\n\t\tdefer sentry.Flush(2 * time.Second)\n\t\terr := recover()\n\t\tif err != nil {\n\t\t\tsentry.CaptureMessage(fmt.Sprintf(\"%+v\", err))\n\t\t\tpanic(err)\n\t\t}\n\t}\n\treturn sentryDefer, sentry.Init(sentry.ClientOptions{\n\t\tDsn: dsn,\n\t})\n}\n\nfunc serve(c *cli.Context) error {\n\tconfig, err := utils.LoadConfig(configPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"[main] %v\", err)\n\t}\n\n\tif err := log.SetupLog(config.LogLevel); err != nil {\n\t\tlog.Fatalf(\"[main] %v\", err)\n\t}\n\n\tif sentryDefer, err := setupSentry(config.SentryDSN); err != nil {\n\t\tlog.Warnf(context.TODO(), \"[main] sentry %v\", err)\n\t} else if sentryDefer != nil {\n\t\tdefer sentryDefer()\n\t}\n\n\tif err := metrics.InitMetrics(config); err != nil {\n\t\tlog.Errorf(context.TODO(), \"[main] %v\", err)\n\t\treturn err\n\t}\n\n\tvar t *testing.T\n\tif embeddedStorage {\n\t\tt = &testing.T{}\n\t}\n\tcluster, err := calcium.New(config, t)\n\tif err != nil {\n\t\tlog.Errorf(context.TODO(), \"[main] %v\", err)\n\t\treturn err\n\t}\n\tdefer cluster.Finalizer()\n\tcluster.DisasterRecover()\n\n\tstop := make(chan struct{}, 1)\n\tvibranium := rpc.New(cluster, config, stop)\n\ts, err := net.Listen(\"tcp\", config.Bind)\n\tif err != nil {\n\t\tlog.Errorf(context.TODO(), \"[main] %v\", err)\n\t\treturn err\n\t}\n\n\topts := []grpc.ServerOption{\n\t\tgrpc.MaxConcurrentStreams(uint32(config.GRPCConfig.MaxConcurrentStreams)),\n\t\tgrpc.MaxRecvMsgSize(config.GRPCConfig.MaxRecvMsgSize),\n\t}\n\n\tif config.Auth.Username != \"\" {\n\t\tlog.Info(\"[main] Cluster auth enable.\")\n\t\tauth := auth.NewAuth(config.Auth)\n\t\topts = append(opts, grpc.StreamInterceptor(auth.StreamInterceptor))\n\t\topts = append(opts, grpc.UnaryInterceptor(auth.UnaryInterceptor))\n\t\tlog.Infof(context.TODO(), \"[main] Username %s Password %s\", config.Auth.Username, config.Auth.Password)\n\t}\n\n\tgrpcServer := grpc.NewServer(opts...)\n\tpb.RegisterCoreRPCServer(grpcServer, vibranium)\n\tutils.SentryGo(func() {\n\t\tif err := grpcServer.Serve(s); err != nil {\n\t\t\tlog.Errorf(context.TODO(), \"[main] start grpc failed %v\", err)\n\t\t}\n\t})\n\n\tif config.Profile != \"\" {\n\t\thttp.Handle(\"\/metrics\", metrics.Client.ResourceMiddleware(cluster)(promhttp.Handler()))\n\t\tutils.SentryGo(func() {\n\t\t\tif err := http.ListenAndServe(config.Profile, nil); err != nil {\n\t\t\t\tlog.Errorf(context.TODO(), \"[main] start http failed %v\", err)\n\t\t\t}\n\t\t})\n\t}\n\n\tunregisterService, err := cluster.RegisterService(c.Context)\n\tif err != nil {\n\t\tlog.Errorf(context.TODO(), \"[main] failed to register service: %v\", err)\n\t\treturn err\n\t}\n\tlog.Info(\"[main] Cluster started successfully.\")\n\n\t\/\/ wait for unix signals and try to GracefulStop\n\tctx, cancel := signalcontext.OnInterrupt()\n\tdefer cancel()\n\t<-ctx.Done()\n\n\tlog.Info(\"[main] Interrupt by signal\")\n\tclose(stop)\n\tunregisterService()\n\tgrpcServer.GracefulStop()\n\tlog.Info(\"[main] gRPC server gracefully stopped.\")\n\n\tlog.Info(\"[main] Check if cluster still have running tasks.\")\n\tvibranium.Wait()\n\tlog.Info(\"[main] cluster gracefully stopped.\")\n\treturn nil\n}\n\nfunc main() {\n\tcli.VersionPrinter = func(c *cli.Context) {\n\t\tfmt.Print(version.String())\n\t}\n\n\tapp := cli.NewApp()\n\tapp.Name = version.NAME\n\tapp.Usage = \"Run eru core\"\n\tapp.Version = version.VERSION\n\tapp.Flags = []cli.Flag{\n\t\t&cli.StringFlag{\n\t\t\tName: \"config\",\n\t\t\tValue: \"\/etc\/eru\/core.yaml\",\n\t\t\tUsage: \"config file path for core, in yaml\",\n\t\t\tDestination: &configPath,\n\t\t\tEnvVars: []string{\"ERU_CONFIG_PATH\"},\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName: \"embedded-storage\",\n\t\t\tUsage: \"active embedded storage\",\n\t\t\tDestination: &embeddedStorage,\n\t\t},\n\t}\n\tapp.Action = serve\n\t_ = app.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>package handler\n\nimport (\n\t\"github.com\/oursky\/ourd\/handler\/handlertest\"\n\t. \"github.com\/oursky\/ourd\/ourtest\"\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n\t\"testing\"\n\n\t\"github.com\/oursky\/ourd\/oddb\"\n\t\"github.com\/oursky\/ourd\/push\"\n\t\"github.com\/oursky\/ourd\/router\"\n)\n\nfunc TestPushToDevice(t *testing.T) {\n\tConvey(\"push to device\", t, func() {\n\t\ttestdevice := oddb.Device{\n\t\t\tID: \"device\",\n\t\t\tType: \"ios\",\n\t\t\tToken: \"token\",\n\t\t\tUserInfoID: \"userid\",\n\t\t}\n\t\tconn := simpleDeviceConn{\n\t\t\tdevices: []oddb.Device{testdevice},\n\t\t}\n\n\t\tr := handlertest.NewSingleRouteRouter(PushToDeviceHandler, func(p *router.Payload) {\n\t\t\tp.DBConn = &conn\n\t\t})\n\n\t\toriginalSendFunc := sendPushNotification\n\t\tdefer func() {\n\t\t\tsendPushNotification = originalSendFunc\n\t\t}()\n\n\t\tConvey(\"push to single device\", func() {\n\t\t\tcalled := false\n\t\t\tsendPushNotification = func(sender *push.APNSPusher, device *oddb.Device, m push.Mapper) {\n\t\t\t\tSo(device, ShouldResemble, &testdevice)\n\t\t\t\tSo(m.Map(), ShouldResemble, map[string]interface{}{\n\t\t\t\t\t\"aps\": map[string]interface{}{\n\t\t\t\t\t\t\"alert\": \"This is a message.\",\n\t\t\t\t\t\t\"sound\": \"sosumi.mp3\",\n\t\t\t\t\t},\n\t\t\t\t\t\"acme\": \"interesting\",\n\t\t\t\t})\n\t\t\t\tcalled = true\n\t\t\t}\n\t\t\tresp := r.POST(`{\n\t\t\t\t\t\"device_ids\": [\"device\"],\n\t\t\t\t\t\"notification\": {\n\t\t\t\t\t\t\"aps\": {\n\t\t\t\t\t\t\t\"alert\": \"This is a message.\",\n\t\t\t\t\t\t\t\"sound\": \"sosumi.mp3\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"acme\": \"interesting\"\n\t\t\t\t\t}\n\t\t\t\t}`)\n\t\t\tSo(resp.Code, ShouldEqual, 200)\n\t\t\tSo(resp.Body.Bytes(), ShouldEqualJSON, `{\n\t\"result\": [{\n\t\t\"_id\": \"device\"\n\t}]\n}`)\n\t\t\tSo(called, ShouldBeTrue)\n\t\t})\n\n\t\tConvey(\"push to non-existent device\", func() {\n\t\t\tcalled := false\n\t\t\tsendPushNotification = func(sender *push.APNSPusher, device *oddb.Device, m push.Mapper) {\n\t\t\t\tcalled = true\n\t\t\t}\n\t\t\tresp := r.POST(`{\n\t\t\t\t\t\t\"device_ids\": [\"nonexistent\"],\n\t\t\t\t\t\t\"notification\": {\n\t\t\t\t\t\t\t\"aps\": {\n\t\t\t\t\t\t\t\t\"alert\": \"This is a message.\",\n\t\t\t\t\t\t\t\t\"sound\": \"sosumi.mp3\"\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"acme\": \"interesting\"\n\t\t\t\t\t\t}\n\t\t\t\t\t}`)\n\t\t\tSo(resp.Code, ShouldEqual, 200)\n\t\t\tSo(resp.Body.Bytes(), ShouldEqualJSON, `{\n\t\"result\": [{\n\t\t\"_id\": \"nonexistent\",\n\t\t\"_type\": \"error\",\n\t\t\"message\": \"cannot find device \\\"nonexistent\\\"\",\n\t\t\"type\": \"ResourceNotFound\",\n\t\t\"code\": 101,\n\t\t\"info\": {\"id\": \"nonexistent\"}\n\t}]\n}`)\n\t\t\tSo(called, ShouldBeFalse)\n\t\t})\n\t})\n\n}\n\nfunc TestPushToUser(t *testing.T) {\n\tConvey(\"push to user\", t, func() {\n\t\ttestdevice1 := oddb.Device{\n\t\t\tID: \"device1\",\n\t\t\tType: \"ios\",\n\t\t\tToken: \"token\",\n\t\t\tUserInfoID: \"johndoe\",\n\t\t}\n\t\ttestdevice2 := oddb.Device{\n\t\t\tID: \"device2\",\n\t\t\tType: \"ios\",\n\t\t\tToken: \"token\",\n\t\t\tUserInfoID: \"johndoe\",\n\t\t}\n\t\ttestdevice3 := oddb.Device{\n\t\t\tID: \"device2\",\n\t\t\tType: \"ios\",\n\t\t\tToken: \"token\",\n\t\t\tUserInfoID: \"janedoe\",\n\t\t}\n\t\tconn := simpleDeviceConn{\n\t\t\tdevices: []oddb.Device{testdevice1, testdevice2, testdevice3},\n\t\t}\n\n\t\tr := handlertest.NewSingleRouteRouter(PushToUserHandler, func(p *router.Payload) {\n\t\t\tp.DBConn = &conn\n\t\t})\n\n\t\toriginalSendFunc := sendPushNotification\n\t\tdefer func() {\n\t\t\tsendPushNotification = originalSendFunc\n\t\t}()\n\n\t\tConvey(\"push to single user\", func() {\n\t\t\tsentDevices := make([]oddb.Device, 0)\n\t\t\tsendPushNotification = func(sender *push.APNSPusher, device *oddb.Device, m push.Mapper) {\n\t\t\t\tSo(m.Map(), ShouldResemble, map[string]interface{}{\n\t\t\t\t\t\"aps\": map[string]interface{}{\n\t\t\t\t\t\t\"alert\": \"This is a message.\",\n\t\t\t\t\t\t\"sound\": \"sosumi.mp3\",\n\t\t\t\t\t},\n\t\t\t\t\t\"acme\": \"interesting\",\n\t\t\t\t})\n\t\t\t\tsentDevices = append(sentDevices, *device)\n\t\t\t}\n\t\t\tresp := r.POST(`{\n\t\t\t\t\t\"user_ids\": [\"johndoe\"],\n\t\t\t\t\t\"notification\": {\n\t\t\t\t\t\t\"aps\": {\n\t\t\t\t\t\t\t\"alert\": \"This is a message.\",\n\t\t\t\t\t\t\t\"sound\": \"sosumi.mp3\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"acme\": \"interesting\"\n\t\t\t\t\t}\n\t\t\t\t}`)\n\t\t\tSo(resp.Code, ShouldEqual, 200)\n\t\t\tSo(resp.Body.Bytes(), ShouldEqualJSON, `{\n\t\"result\": [{\"_id\":\"johndoe\"}]\n}`)\n\n\t\t\tSo(len(sentDevices), ShouldEqual, 2)\n\t\t\tSo(sentDevices[0], ShouldResemble, testdevice1)\n\t\t\tSo(sentDevices[1], ShouldResemble, testdevice2)\n\t\t})\n\n\t\tConvey(\"push to non-existent user\", func() {\n\t\t\tcalled := false\n\t\t\tsendPushNotification = func(sender *push.APNSPusher, device *oddb.Device, m push.Mapper) {\n\t\t\t\tcalled = true\n\t\t\t}\n\t\t\tresp := r.POST(`{\n\t\t\t\t\t\"user_ids\": [\"nonexistent\"],\n\t\t\t\t\t\"notification\": {\n\t\t\t\t\t\t\"aps\": {\n\t\t\t\t\t\t\t\"alert\": \"This is a message.\",\n\t\t\t\t\t\t\t\"sound\": \"sosumi.mp3\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"acme\": \"interesting\"\n\t\t\t\t\t}\n\t\t\t\t}`)\n\t\t\tSo(resp.Code, ShouldEqual, 200)\n\t\t\tSo(resp.Body.Bytes(), ShouldEqualJSON, `{\n\t\"result\": [{\n\t\t\"_id\": \"nonexistent\",\n\t\t\"_type\": \"error\",\n\t\t\"message\": \"cannot find user \\\"nonexistent\\\"\",\n\t\t\"type\": \"ResourceNotFound\",\n\t\t\"code\": 101,\n\t\t\"info\": {\"id\": \"nonexistent\"}\n\t}]\n}`)\n\t\t\tSo(called, ShouldBeFalse)\n\t\t})\n\t})\n\n}\n\ntype simpleDeviceConn struct {\n\tdevices []oddb.Device\n\toddb.Conn\n}\n\nfunc (conn *simpleDeviceConn) GetDevice(id string, device *oddb.Device) error {\n\tfor _, prospectiveDevice := range conn.devices {\n\t\tif prospectiveDevice.ID == id {\n\t\t\t*device = prospectiveDevice\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn oddb.ErrDeviceNotFound\n}\n\nfunc (conn *simpleDeviceConn) QueryDevicesByUser(user string) ([]oddb.Device, error) {\n\tresult := make([]oddb.Device, 0)\n\tfor _, prospectiveDevice := range conn.devices {\n\t\tif prospectiveDevice.UserInfoID == user {\n\t\t\tresult = append(result, prospectiveDevice)\n\t\t}\n\t}\n\tif len(result) == 0 {\n\t\treturn nil, oddb.ErrUserNotFound\n\t} else {\n\t\treturn result, nil\n\t}\n}\n<commit_msg>subscription: fix push_test<commit_after>package handler\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/oursky\/ourd\/handler\/handlertest\"\n\t. \"github.com\/oursky\/ourd\/ourtest\"\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n\n\t\"github.com\/oursky\/ourd\/oddb\"\n\t\"github.com\/oursky\/ourd\/push\"\n\t\"github.com\/oursky\/ourd\/router\"\n)\n\nfunc TestPushToDevice(t *testing.T) {\n\tConvey(\"push to device\", t, func() {\n\t\ttestdevice := oddb.Device{\n\t\t\tID: \"device\",\n\t\t\tType: \"ios\",\n\t\t\tToken: \"token\",\n\t\t\tUserInfoID: \"userid\",\n\t\t}\n\t\tconn := simpleDeviceConn{\n\t\t\tdevices: []oddb.Device{testdevice},\n\t\t}\n\n\t\tr := handlertest.NewSingleRouteRouter(PushToDeviceHandler, func(p *router.Payload) {\n\t\t\tp.DBConn = &conn\n\t\t})\n\n\t\toriginalSendFunc := sendPushNotification\n\t\tdefer func() {\n\t\t\tsendPushNotification = originalSendFunc\n\t\t}()\n\n\t\tConvey(\"push to single device\", func() {\n\t\t\tcalled := false\n\t\t\tsendPushNotification = func(sender push.Sender, device *oddb.Device, m push.Mapper) {\n\t\t\t\tSo(device, ShouldResemble, &testdevice)\n\t\t\t\tSo(m.Map(), ShouldResemble, map[string]interface{}{\n\t\t\t\t\t\"aps\": map[string]interface{}{\n\t\t\t\t\t\t\"alert\": \"This is a message.\",\n\t\t\t\t\t\t\"sound\": \"sosumi.mp3\",\n\t\t\t\t\t},\n\t\t\t\t\t\"acme\": \"interesting\",\n\t\t\t\t})\n\t\t\t\tcalled = true\n\t\t\t}\n\t\t\tresp := r.POST(`{\n\t\t\t\t\t\"device_ids\": [\"device\"],\n\t\t\t\t\t\"notification\": {\n\t\t\t\t\t\t\"aps\": {\n\t\t\t\t\t\t\t\"alert\": \"This is a message.\",\n\t\t\t\t\t\t\t\"sound\": \"sosumi.mp3\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"acme\": \"interesting\"\n\t\t\t\t\t}\n\t\t\t\t}`)\n\t\t\tSo(resp.Code, ShouldEqual, 200)\n\t\t\tSo(resp.Body.Bytes(), ShouldEqualJSON, `{\n\t\"result\": [{\n\t\t\"_id\": \"device\"\n\t}]\n}`)\n\t\t\tSo(called, ShouldBeTrue)\n\t\t})\n\n\t\tConvey(\"push to non-existent device\", func() {\n\t\t\tcalled := false\n\t\t\tsendPushNotification = func(sender push.Sender, device *oddb.Device, m push.Mapper) {\n\t\t\t\tcalled = true\n\t\t\t}\n\t\t\tresp := r.POST(`{\n\t\t\t\t\t\t\"device_ids\": [\"nonexistent\"],\n\t\t\t\t\t\t\"notification\": {\n\t\t\t\t\t\t\t\"aps\": {\n\t\t\t\t\t\t\t\t\"alert\": \"This is a message.\",\n\t\t\t\t\t\t\t\t\"sound\": \"sosumi.mp3\"\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"acme\": \"interesting\"\n\t\t\t\t\t\t}\n\t\t\t\t\t}`)\n\t\t\tSo(resp.Code, ShouldEqual, 200)\n\t\t\tSo(resp.Body.Bytes(), ShouldEqualJSON, `{\n\t\"result\": [{\n\t\t\"_id\": \"nonexistent\",\n\t\t\"_type\": \"error\",\n\t\t\"message\": \"cannot find device \\\"nonexistent\\\"\",\n\t\t\"type\": \"ResourceNotFound\",\n\t\t\"code\": 101,\n\t\t\"info\": {\"id\": \"nonexistent\"}\n\t}]\n}`)\n\t\t\tSo(called, ShouldBeFalse)\n\t\t})\n\t})\n\n}\n\nfunc TestPushToUser(t *testing.T) {\n\tConvey(\"push to user\", t, func() {\n\t\ttestdevice1 := oddb.Device{\n\t\t\tID: \"device1\",\n\t\t\tType: \"ios\",\n\t\t\tToken: \"token\",\n\t\t\tUserInfoID: \"johndoe\",\n\t\t}\n\t\ttestdevice2 := oddb.Device{\n\t\t\tID: \"device2\",\n\t\t\tType: \"ios\",\n\t\t\tToken: \"token\",\n\t\t\tUserInfoID: \"johndoe\",\n\t\t}\n\t\ttestdevice3 := oddb.Device{\n\t\t\tID: \"device2\",\n\t\t\tType: \"ios\",\n\t\t\tToken: \"token\",\n\t\t\tUserInfoID: \"janedoe\",\n\t\t}\n\t\tconn := simpleDeviceConn{\n\t\t\tdevices: []oddb.Device{testdevice1, testdevice2, testdevice3},\n\t\t}\n\n\t\tr := handlertest.NewSingleRouteRouter(PushToUserHandler, func(p *router.Payload) {\n\t\t\tp.DBConn = &conn\n\t\t})\n\n\t\toriginalSendFunc := sendPushNotification\n\t\tdefer func() {\n\t\t\tsendPushNotification = originalSendFunc\n\t\t}()\n\n\t\tConvey(\"push to single user\", func() {\n\t\t\tsentDevices := make([]oddb.Device, 0)\n\t\t\tsendPushNotification = func(sender push.Sender, device *oddb.Device, m push.Mapper) {\n\t\t\t\tSo(m.Map(), ShouldResemble, map[string]interface{}{\n\t\t\t\t\t\"aps\": map[string]interface{}{\n\t\t\t\t\t\t\"alert\": \"This is a message.\",\n\t\t\t\t\t\t\"sound\": \"sosumi.mp3\",\n\t\t\t\t\t},\n\t\t\t\t\t\"acme\": \"interesting\",\n\t\t\t\t})\n\t\t\t\tsentDevices = append(sentDevices, *device)\n\t\t\t}\n\t\t\tresp := r.POST(`{\n\t\t\t\t\t\"user_ids\": [\"johndoe\"],\n\t\t\t\t\t\"notification\": {\n\t\t\t\t\t\t\"aps\": {\n\t\t\t\t\t\t\t\"alert\": \"This is a message.\",\n\t\t\t\t\t\t\t\"sound\": \"sosumi.mp3\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"acme\": \"interesting\"\n\t\t\t\t\t}\n\t\t\t\t}`)\n\t\t\tSo(resp.Code, ShouldEqual, 200)\n\t\t\tSo(resp.Body.Bytes(), ShouldEqualJSON, `{\n\t\"result\": [{\"_id\":\"johndoe\"}]\n}`)\n\n\t\t\tSo(len(sentDevices), ShouldEqual, 2)\n\t\t\tSo(sentDevices[0], ShouldResemble, testdevice1)\n\t\t\tSo(sentDevices[1], ShouldResemble, testdevice2)\n\t\t})\n\n\t\tConvey(\"push to non-existent user\", func() {\n\t\t\tcalled := false\n\t\t\tsendPushNotification = func(sender push.Sender, device *oddb.Device, m push.Mapper) {\n\t\t\t\tcalled = true\n\t\t\t}\n\t\t\tresp := r.POST(`{\n\t\t\t\t\t\"user_ids\": [\"nonexistent\"],\n\t\t\t\t\t\"notification\": {\n\t\t\t\t\t\t\"aps\": {\n\t\t\t\t\t\t\t\"alert\": \"This is a message.\",\n\t\t\t\t\t\t\t\"sound\": \"sosumi.mp3\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"acme\": \"interesting\"\n\t\t\t\t\t}\n\t\t\t\t}`)\n\t\t\tSo(resp.Code, ShouldEqual, 200)\n\t\t\tSo(resp.Body.Bytes(), ShouldEqualJSON, `{\n\t\"result\": [{\n\t\t\"_id\": \"nonexistent\",\n\t\t\"_type\": \"error\",\n\t\t\"message\": \"cannot find user \\\"nonexistent\\\"\",\n\t\t\"type\": \"ResourceNotFound\",\n\t\t\"code\": 101,\n\t\t\"info\": {\"id\": \"nonexistent\"}\n\t}]\n}`)\n\t\t\tSo(called, ShouldBeFalse)\n\t\t})\n\t})\n\n}\n\ntype simpleDeviceConn struct {\n\tdevices []oddb.Device\n\toddb.Conn\n}\n\nfunc (conn *simpleDeviceConn) GetDevice(id string, device *oddb.Device) error {\n\tfor _, prospectiveDevice := range conn.devices {\n\t\tif prospectiveDevice.ID == id {\n\t\t\t*device = prospectiveDevice\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn oddb.ErrDeviceNotFound\n}\n\nfunc (conn *simpleDeviceConn) QueryDevicesByUser(user string) ([]oddb.Device, error) {\n\tresult := make([]oddb.Device, 0)\n\tfor _, prospectiveDevice := range conn.devices {\n\t\tif prospectiveDevice.UserInfoID == user {\n\t\t\tresult = append(result, prospectiveDevice)\n\t\t}\n\t}\n\tif len(result) == 0 {\n\t\treturn nil, oddb.ErrUserNotFound\n\t} else {\n\t\treturn result, nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nLicensed to the Apache Software Foundation (ASF) under one\nor more contributor license agreements. See the NOTICE file\ndistributed with this work for additional information\nregarding copyright ownership. The ASF licenses this file\nto you under the Apache License, Version 2.0 (the\n\"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing,\nsoftware distributed under the License is distributed on an\n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, either express or implied. See the License for the\nspecific language governing permissions and limitations\nunder the License.\n*\/\n\n\/\/sqlParser.go\n\/\/interacts with database\npackage sqlParser\n\nimport (\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t\"github.com\/jmoiron\/sqlx\"\n)\n\nfunc check(e error) {\n\tif e != nil {\n\t\tpanic(e)\n\t}\n}\n\n\/\/connects to and returns a pointer to the database\nfunc ConnectToDatabase(username string, password string, environment string) sqlx.DB {\n\tdb, err := sqlx.Open(\"mysql\", username+\":\"+password+\"@tcp(localhost:3306)\/\"+environment)\n\tcheck(err)\n\n\treturn *db\n}\n\n\/\/returns array of table name strings from queried database\nfunc GetTableNames(db sqlx.DB) []string {\n\tvar tableNames []string\n\n\ttableRawBytes := make([]byte, 1)\n\ttableInterface := make([]interface{}, 1)\n\n\ttableInterface[0] = &tableRawBytes\n\n\trows, err := db.Query(\"SELECT TABLE_NAME FROM information_schema.tables WHERE TABLE_TYPE='BASE TABLE' and TABLE_SCHEMA='to_development'\")\n\tcheck(err)\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(tableInterface...)\n\t\tcheck(err)\n\n\t\ttableNames = append(tableNames, string(tableRawBytes))\n\t}\n\n\treturn tableNames\n}\n\n\/\/returns *Rows from given table (name) from queried database\nfunc GetRows(db sqlx.DB, tableName string) *sqlx.Rows {\n\trows, err := db.Queryx(\"SELECT * from \" + tableName)\n\tcheck(err)\n\n\treturn rows\n}\n\n\/\/returns array of column names from table in database\nfunc GetColumnNames(db sqlx.DB, tableName string) []string {\n\tvar colNames []string\n\n\tcolRawBytes := make([]byte, 1)\n\tcolInterface := make([]interface{}, 1)\n\n\tcolInterface[0] = &colRawBytes\n\n\trows, err := db.Query(\"SELECT COLUMN_NAME FROM information_schema.columns WHERE TABLE_NAME='\" + tableName + \"' and TABLE_SCHEMA='to_development'\")\n\tcheck(err)\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(colInterface...)\n\t\tcheck(err)\n\n\t\tcolNames = append(colNames, string(colRawBytes))\n\t}\n\n\treturn colNames\n}\n\n\/\/returns array of column names from table in database\nfunc GetColumnTypes(db sqlx.DB, tableName string) []string {\n\tvar colTypes []string\n\n\tcolRawBytes := make([]byte, 1)\n\tcolInterface := make([]interface{}, 1)\n\n\tcolInterface[0] = &colRawBytes\n\n\trows, err := db.Query(\"SELECT COLUMN_TYPE FROM information_schema.columns WHERE TABLE_NAME='\" + tableName + \"' and TABLE_SCHEMA='to_development'\")\n\tcheck(err)\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(colInterface...)\n\t\tcheck(err)\n\n\t\tcolTypes = append(colTypes, string(colRawBytes))\n\t}\n\n\treturn colTypes\n}\n<commit_msg>Removed hard coded table schema<commit_after>\/*\nLicensed to the Apache Software Foundation (ASF) under one\nor more contributor license agreements. See the NOTICE file\ndistributed with this work for additional information\nregarding copyright ownership. The ASF licenses this file\nto you under the Apache License, Version 2.0 (the\n\"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing,\nsoftware distributed under the License is distributed on an\n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, either express or implied. See the License for the\nspecific language governing permissions and limitations\nunder the License.\n*\/\n\n\/\/sqlParser.go\n\/\/interacts with database\npackage sqlParser\n\nimport (\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t\"github.com\/jmoiron\/sqlx\"\n)\n\nvar (\n\tglobalEnvironment = \"\"\n)\n\nfunc check(e error) {\n\tif e != nil {\n\t\tpanic(e)\n\t}\n}\n\n\/\/connects to and returns a pointer to the database\nfunc ConnectToDatabase(username string, password string, environment string) sqlx.DB {\n\tdb, err := sqlx.Open(\"mysql\", username+\":\"+password+\"@tcp(localhost:3306)\/\"+environment)\n\tcheck(err)\n\n\t\/\/set globalEnvironment\n\tglobalEnvironment = environment\n\n\treturn *db\n}\n\n\/\/returns array of table name strings from queried database\nfunc GetTableNames(db sqlx.DB) []string {\n\tvar tableNames []string\n\n\ttableRawBytes := make([]byte, 1)\n\ttableInterface := make([]interface{}, 1)\n\n\ttableInterface[0] = &tableRawBytes\n\n\trows, err := db.Query(\"SELECT TABLE_NAME FROM information_schema.tables WHERE TABLE_TYPE='BASE TABLE' and TABLE_SCHEMA='\" + globalEnvironment + \"'\")\n\tcheck(err)\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(tableInterface...)\n\t\tcheck(err)\n\n\t\ttableNames = append(tableNames, string(tableRawBytes))\n\t}\n\n\treturn tableNames\n}\n\n\/\/returns *Rows from given table (name) from queried database\nfunc GetRows(db sqlx.DB, tableName string) *sqlx.Rows {\n\trows, err := db.Queryx(\"SELECT * from \" + tableName)\n\tcheck(err)\n\n\treturn rows\n}\n\n\/\/returns array of column names from table in database\nfunc GetColumnNames(db sqlx.DB, tableName string) []string {\n\tvar colNames []string\n\n\tcolRawBytes := make([]byte, 1)\n\tcolInterface := make([]interface{}, 1)\n\n\tcolInterface[0] = &colRawBytes\n\n\trows, err := db.Query(\"SELECT COLUMN_NAME FROM information_schema.columns WHERE TABLE_NAME='\" + tableName + \"' and TABLE_SCHEMA='\" + globalEnvironment + \"'\")\n\tcheck(err)\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(colInterface...)\n\t\tcheck(err)\n\n\t\tcolNames = append(colNames, string(colRawBytes))\n\t}\n\n\treturn colNames\n}\n\n\/\/returns array of column names from table in database\nfunc GetColumnTypes(db sqlx.DB, tableName string) []string {\n\tvar colTypes []string\n\n\tcolRawBytes := make([]byte, 1)\n\tcolInterface := make([]interface{}, 1)\n\n\tcolInterface[0] = &colRawBytes\n\n\trows, err := db.Query(\"SELECT COLUMN_TYPE FROM information_schema.columns WHERE TABLE_NAME='\" + tableName + \"' and TABLE_SCHEMA='\" + globalEnvironment + \"'\")\n\tcheck(err)\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(colInterface...)\n\t\tcheck(err)\n\n\t\tcolTypes = append(colTypes, string(colRawBytes))\n\t}\n\n\treturn colTypes\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ This library implements a cron spec parser and runner. See the README for\n\/\/ more details.\npackage cron\n\nimport (\n\t\"log\"\n\t\"runtime\"\n\t\"sort\"\n\t\"time\"\n)\n\n\/\/ Cron keeps track of any number of entries, invoking the associated func as\n\/\/ specified by the schedule. It may be started, stopped, and the entries may\n\/\/ be inspected while running.\ntype Cron struct {\n\tentries []*Entry\n\tstop chan struct{}\n\tadd chan *Entry\n\tsnapshot chan []*Entry\n\trunning bool\n\tErrorLog *log.Logger\n\tlocation *time.Location\n}\n\n\/\/ Job is an interface for submitted cron jobs.\ntype Job interface {\n\tRun()\n}\n\n\/\/ The Schedule describes a job's duty cycle.\ntype Schedule interface {\n\t\/\/ Return the next activation time, later than the given time.\n\t\/\/ Next is invoked initially, and then each time the job is run.\n\tNext(time.Time) time.Time\n}\n\n\/\/ Entry consists of a schedule and the func to execute on that schedule.\ntype Entry struct {\n\t\/\/ The schedule on which this job should be run.\n\tSchedule Schedule\n\n\t\/\/ The next time the job will run. This is the zero time if Cron has not been\n\t\/\/ started or this entry's schedule is unsatisfiable\n\tNext time.Time\n\n\t\/\/ The last time this job was run. This is the zero time if the job has never\n\t\/\/ been run.\n\tPrev time.Time\n\n\t\/\/ The Job to run.\n\tJob Job\n}\n\n\/\/ byTime is a wrapper for sorting the entry array by time\n\/\/ (with zero time at the end).\ntype byTime []*Entry\n\nfunc (s byTime) Len() int { return len(s) }\nfunc (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\nfunc (s byTime) Less(i, j int) bool {\n\t\/\/ Two zero times should return false.\n\t\/\/ Otherwise, zero is \"greater\" than any other time.\n\t\/\/ (To sort it at the end of the list.)\n\tif s[i].Next.IsZero() {\n\t\treturn false\n\t}\n\tif s[j].Next.IsZero() {\n\t\treturn true\n\t}\n\treturn s[i].Next.Before(s[j].Next)\n}\n\n\/\/ New returns a new Cron job runner.\nfunc New() *Cron {\n\treturn NewWithLocation(time.Now().Location())\n}\n\n\/\/ NewWithLocation returns a new Cron job runner.\nfunc NewWithLocation(location *time.Location) *Cron {\n\treturn &Cron{\n\t\tentries: nil,\n\t\tadd: make(chan *Entry),\n\t\tstop: make(chan struct{}),\n\t\tsnapshot: make(chan []*Entry),\n\t\trunning: false,\n\t\tErrorLog: nil,\n\t\tlocation: location,\n\t}\n}\n\n\/\/ A wrapper that turns a func() into a cron.Job\ntype FuncJob func()\n\nfunc (f FuncJob) Run() { f() }\n\n\/\/ AddFunc adds a func to the Cron to be run on the given schedule.\nfunc (c *Cron) AddFunc(spec string, cmd func()) error {\n\treturn c.AddJob(spec, FuncJob(cmd))\n}\n\n\/\/ AddJob adds a Job to the Cron to be run on the given schedule.\nfunc (c *Cron) AddJob(spec string, cmd Job) error {\n\tschedule, err := Parse(spec)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.Schedule(schedule, cmd)\n\treturn nil\n}\n\n\/\/ Schedule adds a Job to the Cron to be run on the given schedule.\nfunc (c *Cron) Schedule(schedule Schedule, cmd Job) {\n\tentry := &Entry{\n\t\tSchedule: schedule,\n\t\tJob: cmd,\n\t}\n\tif !c.running {\n\t\tc.entries = append(c.entries, entry)\n\t\treturn\n\t}\n\n\tc.add <- entry\n}\n\n\/\/ Entries returns a snapshot of the cron entries.\nfunc (c *Cron) Entries() []*Entry {\n\tif c.running {\n\t\tc.snapshot <- nil\n\t\tx := <-c.snapshot\n\t\treturn x\n\t}\n\treturn c.entrySnapshot()\n}\n\n\/\/ Location gets the time zone location\nfunc (c *Cron) Location() *time.Location {\n\treturn c.location\n}\n\n\/\/ Start the cron scheduler in its own go-routine.\nfunc (c *Cron) Start() {\n\tc.running = true\n\tgo c.run()\n}\n\nfunc (c *Cron) runWithRecovery(j Job) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tconst size = 64 << 10\n\t\t\tbuf := make([]byte, size)\n\t\t\tbuf = buf[:runtime.Stack(buf, false)]\n\t\t\tc.logf(\"cron: panic running job: %v\\n%s\", r, buf)\n\t\t}\n\t}()\n\tj.Run()\n}\n\n\/\/ Run the scheduler.. this is private just due to the need to synchronize\n\/\/ access to the 'running' state variable.\nfunc (c *Cron) run() {\n\t\/\/ Figure out the next activation times for each entry.\n\tnow := time.Now().In(c.location)\n\tfor _, entry := range c.entries {\n\t\tentry.Next = entry.Schedule.Next(now)\n\t}\n\n\tfor {\n\t\t\/\/ Determine the next entry to run.\n\t\tsort.Sort(byTime(c.entries))\n\n\t\tvar effective time.Time\n\t\tif len(c.entries) == 0 || c.entries[0].Next.IsZero() {\n\t\t\t\/\/ If there are no entries yet, just sleep - it still handles new entries\n\t\t\t\/\/ and stop requests.\n\t\t\teffective = now.AddDate(10, 0, 0)\n\t\t} else {\n\t\t\teffective = c.entries[0].Next\n\t\t}\n\n\t\tselect {\n\t\tcase now = <-time.After(effective.Sub(now)):\n\t\t\t\/\/ Run every entry whose next time was this effective time.\n\t\t\tfor _, e := range c.entries {\n\t\t\t\tif e.Next != effective {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tgo c.runWithRecovery(e.Job)\n\t\t\t\te.Prev = e.Next\n\t\t\t\te.Next = e.Schedule.Next(now)\n\t\t\t}\n\t\t\tcontinue\n\n\t\tcase newEntry := <-c.add:\n\t\t\tc.entries = append(c.entries, newEntry)\n\t\t\tnewEntry.Next = newEntry.Schedule.Next(time.Now().Local())\n\n\t\tcase <-c.snapshot:\n\t\t\tc.snapshot <- c.entrySnapshot()\n\n\t\tcase <-c.stop:\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ 'now' should be updated after newEntry and snapshot cases.\n\t\tnow = time.Now().In(c.location)\n\t}\n}\n\n\/\/ Logs an error to stderr or to the configured error log\nfunc (c *Cron) logf(format string, args ...interface{}) {\n\tif c.ErrorLog != nil {\n\t\tc.ErrorLog.Printf(format, args...)\n\t} else {\n\t\tlog.Printf(format, args...)\n\t}\n}\n\n\/\/ Stop stops the cron scheduler if it is running; otherwise it does nothing.\nfunc (c *Cron) Stop() {\n\tif !c.running {\n\t\treturn\n\t}\n\tc.stop <- struct{}{}\n\tc.running = false\n}\n\n\/\/ entrySnapshot returns a copy of the current cron entry list.\nfunc (c *Cron) entrySnapshot() []*Entry {\n\tentries := []*Entry{}\n\tfor _, e := range c.entries {\n\t\tentries = append(entries, &Entry{\n\t\t\tSchedule: e.Schedule,\n\t\t\tNext: e.Next,\n\t\t\tPrev: e.Prev,\n\t\t\tJob: e.Job,\n\t\t})\n\t}\n\treturn entries\n}\n<commit_msg>Comment update to clarify local time zone usage<commit_after>\/\/ This library implements a cron spec parser and runner. See the README for\n\/\/ more details.\npackage cron\n\nimport (\n\t\"log\"\n\t\"runtime\"\n\t\"sort\"\n\t\"time\"\n)\n\n\/\/ Cron keeps track of any number of entries, invoking the associated func as\n\/\/ specified by the schedule. It may be started, stopped, and the entries may\n\/\/ be inspected while running.\ntype Cron struct {\n\tentries []*Entry\n\tstop chan struct{}\n\tadd chan *Entry\n\tsnapshot chan []*Entry\n\trunning bool\n\tErrorLog *log.Logger\n\tlocation *time.Location\n}\n\n\/\/ Job is an interface for submitted cron jobs.\ntype Job interface {\n\tRun()\n}\n\n\/\/ The Schedule describes a job's duty cycle.\ntype Schedule interface {\n\t\/\/ Return the next activation time, later than the given time.\n\t\/\/ Next is invoked initially, and then each time the job is run.\n\tNext(time.Time) time.Time\n}\n\n\/\/ Entry consists of a schedule and the func to execute on that schedule.\ntype Entry struct {\n\t\/\/ The schedule on which this job should be run.\n\tSchedule Schedule\n\n\t\/\/ The next time the job will run. This is the zero time if Cron has not been\n\t\/\/ started or this entry's schedule is unsatisfiable\n\tNext time.Time\n\n\t\/\/ The last time this job was run. This is the zero time if the job has never\n\t\/\/ been run.\n\tPrev time.Time\n\n\t\/\/ The Job to run.\n\tJob Job\n}\n\n\/\/ byTime is a wrapper for sorting the entry array by time\n\/\/ (with zero time at the end).\ntype byTime []*Entry\n\nfunc (s byTime) Len() int { return len(s) }\nfunc (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\nfunc (s byTime) Less(i, j int) bool {\n\t\/\/ Two zero times should return false.\n\t\/\/ Otherwise, zero is \"greater\" than any other time.\n\t\/\/ (To sort it at the end of the list.)\n\tif s[i].Next.IsZero() {\n\t\treturn false\n\t}\n\tif s[j].Next.IsZero() {\n\t\treturn true\n\t}\n\treturn s[i].Next.Before(s[j].Next)\n}\n\n\/\/ New returns a new Cron job runner, in the Local time zone.\nfunc New() *Cron {\n\treturn NewWithLocation(time.Now().Location())\n}\n\n\/\/ NewWithLocation returns a new Cron job runner.\nfunc NewWithLocation(location *time.Location) *Cron {\n\treturn &Cron{\n\t\tentries: nil,\n\t\tadd: make(chan *Entry),\n\t\tstop: make(chan struct{}),\n\t\tsnapshot: make(chan []*Entry),\n\t\trunning: false,\n\t\tErrorLog: nil,\n\t\tlocation: location,\n\t}\n}\n\n\/\/ A wrapper that turns a func() into a cron.Job\ntype FuncJob func()\n\nfunc (f FuncJob) Run() { f() }\n\n\/\/ AddFunc adds a func to the Cron to be run on the given schedule.\nfunc (c *Cron) AddFunc(spec string, cmd func()) error {\n\treturn c.AddJob(spec, FuncJob(cmd))\n}\n\n\/\/ AddJob adds a Job to the Cron to be run on the given schedule.\nfunc (c *Cron) AddJob(spec string, cmd Job) error {\n\tschedule, err := Parse(spec)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.Schedule(schedule, cmd)\n\treturn nil\n}\n\n\/\/ Schedule adds a Job to the Cron to be run on the given schedule.\nfunc (c *Cron) Schedule(schedule Schedule, cmd Job) {\n\tentry := &Entry{\n\t\tSchedule: schedule,\n\t\tJob: cmd,\n\t}\n\tif !c.running {\n\t\tc.entries = append(c.entries, entry)\n\t\treturn\n\t}\n\n\tc.add <- entry\n}\n\n\/\/ Entries returns a snapshot of the cron entries.\nfunc (c *Cron) Entries() []*Entry {\n\tif c.running {\n\t\tc.snapshot <- nil\n\t\tx := <-c.snapshot\n\t\treturn x\n\t}\n\treturn c.entrySnapshot()\n}\n\n\/\/ Location gets the time zone location\nfunc (c *Cron) Location() *time.Location {\n\treturn c.location\n}\n\n\/\/ Start the cron scheduler in its own go-routine.\nfunc (c *Cron) Start() {\n\tc.running = true\n\tgo c.run()\n}\n\nfunc (c *Cron) runWithRecovery(j Job) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tconst size = 64 << 10\n\t\t\tbuf := make([]byte, size)\n\t\t\tbuf = buf[:runtime.Stack(buf, false)]\n\t\t\tc.logf(\"cron: panic running job: %v\\n%s\", r, buf)\n\t\t}\n\t}()\n\tj.Run()\n}\n\n\/\/ Run the scheduler.. this is private just due to the need to synchronize\n\/\/ access to the 'running' state variable.\nfunc (c *Cron) run() {\n\t\/\/ Figure out the next activation times for each entry.\n\tnow := time.Now().In(c.location)\n\tfor _, entry := range c.entries {\n\t\tentry.Next = entry.Schedule.Next(now)\n\t}\n\n\tfor {\n\t\t\/\/ Determine the next entry to run.\n\t\tsort.Sort(byTime(c.entries))\n\n\t\tvar effective time.Time\n\t\tif len(c.entries) == 0 || c.entries[0].Next.IsZero() {\n\t\t\t\/\/ If there are no entries yet, just sleep - it still handles new entries\n\t\t\t\/\/ and stop requests.\n\t\t\teffective = now.AddDate(10, 0, 0)\n\t\t} else {\n\t\t\teffective = c.entries[0].Next\n\t\t}\n\n\t\tselect {\n\t\tcase now = <-time.After(effective.Sub(now)):\n\t\t\t\/\/ Run every entry whose next time was this effective time.\n\t\t\tfor _, e := range c.entries {\n\t\t\t\tif e.Next != effective {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tgo c.runWithRecovery(e.Job)\n\t\t\t\te.Prev = e.Next\n\t\t\t\te.Next = e.Schedule.Next(now)\n\t\t\t}\n\t\t\tcontinue\n\n\t\tcase newEntry := <-c.add:\n\t\t\tc.entries = append(c.entries, newEntry)\n\t\t\tnewEntry.Next = newEntry.Schedule.Next(time.Now().Local())\n\n\t\tcase <-c.snapshot:\n\t\t\tc.snapshot <- c.entrySnapshot()\n\n\t\tcase <-c.stop:\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ 'now' should be updated after newEntry and snapshot cases.\n\t\tnow = time.Now().In(c.location)\n\t}\n}\n\n\/\/ Logs an error to stderr or to the configured error log\nfunc (c *Cron) logf(format string, args ...interface{}) {\n\tif c.ErrorLog != nil {\n\t\tc.ErrorLog.Printf(format, args...)\n\t} else {\n\t\tlog.Printf(format, args...)\n\t}\n}\n\n\/\/ Stop stops the cron scheduler if it is running; otherwise it does nothing.\nfunc (c *Cron) Stop() {\n\tif !c.running {\n\t\treturn\n\t}\n\tc.stop <- struct{}{}\n\tc.running = false\n}\n\n\/\/ entrySnapshot returns a copy of the current cron entry list.\nfunc (c *Cron) entrySnapshot() []*Entry {\n\tentries := []*Entry{}\n\tfor _, e := range c.entries {\n\t\tentries = append(entries, &Entry{\n\t\t\tSchedule: e.Schedule,\n\t\t\tNext: e.Next,\n\t\t\tPrev: e.Prev,\n\t\t\tJob: e.Job,\n\t\t})\n\t}\n\treturn entries\n}\n<|endoftext|>"} {"text":"<commit_before>package weed_server\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/operation\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/volume_server_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/needle\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/types\"\n)\n\nfunc (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.BatchDeleteRequest) (*volume_server_pb.BatchDeleteResponse, error) {\n\n\tresp := &volume_server_pb.BatchDeleteResponse{}\n\n\tnow := uint64(time.Now().Unix())\n\n\tfor _, fid := range req.FileIds {\n\t\tvid, id_cookie, err := operation.ParseFileId(fid)\n\t\tif err != nil {\n\t\t\tresp.Results = append(resp.Results, &volume_server_pb.DeleteResult{\n\t\t\t\tFileId: fid,\n\t\t\t\tStatus: http.StatusBadRequest,\n\t\t\t\tError: err.Error()})\n\t\t\tcontinue\n\t\t}\n\n\t\tn := new(needle.Needle)\n\t\tvolumeId, _ := needle.NewVolumeId(vid)\n\t\tif req.SkipCookieCheck {\n\t\t\tn.Id, err = types.ParseNeedleId(id_cookie)\n\t\t\tif err != nil {\n\t\t\t\tresp.Results = append(resp.Results, &volume_server_pb.DeleteResult{\n\t\t\t\t\tFileId: fid,\n\t\t\t\t\tStatus: http.StatusBadRequest,\n\t\t\t\t\tError: err.Error()})\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\tn.ParsePath(id_cookie)\n\t\t\tcookie := n.Cookie\n\t\t\tif _, err := vs.store.ReadVolumeNeedle(volumeId, n, nil); err != nil {\n\t\t\t\tresp.Results = append(resp.Results, &volume_server_pb.DeleteResult{\n\t\t\t\t\tFileId: fid,\n\t\t\t\t\tStatus: http.StatusNotFound,\n\t\t\t\t\tError: err.Error(),\n\t\t\t\t})\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif n.Cookie != cookie {\n\t\t\t\tresp.Results = append(resp.Results, &volume_server_pb.DeleteResult{\n\t\t\t\t\tFileId: fid,\n\t\t\t\t\tStatus: http.StatusBadRequest,\n\t\t\t\t\tError: \"File Random Cookie does not match.\",\n\t\t\t\t})\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif n.IsChunkedManifest() {\n\t\t\tresp.Results = append(resp.Results, &volume_server_pb.DeleteResult{\n\t\t\t\tFileId: fid,\n\t\t\t\tStatus: http.StatusNotAcceptable,\n\t\t\t\tError: \"ChunkManifest: not allowed in batch delete mode.\",\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\n\t\tn.LastModified = now\n\t\tif size, err := vs.store.DeleteVolumeNeedle(volumeId, n); err != nil {\n\t\t\tresp.Results = append(resp.Results, &volume_server_pb.DeleteResult{\n\t\t\t\tFileId: fid,\n\t\t\t\tStatus: http.StatusInternalServerError,\n\t\t\t\tError: err.Error()},\n\t\t\t)\n\t\t} else {\n\t\t\tresp.Results = append(resp.Results, &volume_server_pb.DeleteResult{\n\t\t\t\tFileId: fid,\n\t\t\t\tStatus: http.StatusAccepted,\n\t\t\t\tSize: uint32(size)},\n\t\t\t)\n\t\t}\n\t}\n\n\treturn resp, nil\n\n}\n<commit_msg>mount: fix for deletion stopped working since 2.53<commit_after>package weed_server\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/operation\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/volume_server_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/needle\"\n)\n\nfunc (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.BatchDeleteRequest) (*volume_server_pb.BatchDeleteResponse, error) {\n\n\tresp := &volume_server_pb.BatchDeleteResponse{}\n\n\tnow := uint64(time.Now().Unix())\n\n\tfor _, fid := range req.FileIds {\n\t\tvid, id_cookie, err := operation.ParseFileId(fid)\n\t\tif err != nil {\n\t\t\tresp.Results = append(resp.Results, &volume_server_pb.DeleteResult{\n\t\t\t\tFileId: fid,\n\t\t\t\tStatus: http.StatusBadRequest,\n\t\t\t\tError: err.Error()})\n\t\t\tcontinue\n\t\t}\n\n\t\tn := new(needle.Needle)\n\t\tvolumeId, _ := needle.NewVolumeId(vid)\n\t\tif req.SkipCookieCheck {\n\t\t\tn.Id, _, err = needle.ParseNeedleIdCookie(id_cookie)\n\t\t\tif err != nil {\n\t\t\t\tresp.Results = append(resp.Results, &volume_server_pb.DeleteResult{\n\t\t\t\t\tFileId: fid,\n\t\t\t\t\tStatus: http.StatusBadRequest,\n\t\t\t\t\tError: err.Error()})\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\tn.ParsePath(id_cookie)\n\t\t\tcookie := n.Cookie\n\t\t\tif _, err := vs.store.ReadVolumeNeedle(volumeId, n, nil); err != nil {\n\t\t\t\tresp.Results = append(resp.Results, &volume_server_pb.DeleteResult{\n\t\t\t\t\tFileId: fid,\n\t\t\t\t\tStatus: http.StatusNotFound,\n\t\t\t\t\tError: err.Error(),\n\t\t\t\t})\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif n.Cookie != cookie {\n\t\t\t\tresp.Results = append(resp.Results, &volume_server_pb.DeleteResult{\n\t\t\t\t\tFileId: fid,\n\t\t\t\t\tStatus: http.StatusBadRequest,\n\t\t\t\t\tError: \"File Random Cookie does not match.\",\n\t\t\t\t})\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif n.IsChunkedManifest() {\n\t\t\tresp.Results = append(resp.Results, &volume_server_pb.DeleteResult{\n\t\t\t\tFileId: fid,\n\t\t\t\tStatus: http.StatusNotAcceptable,\n\t\t\t\tError: \"ChunkManifest: not allowed in batch delete mode.\",\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\n\t\tn.LastModified = now\n\t\tif size, err := vs.store.DeleteVolumeNeedle(volumeId, n); err != nil {\n\t\t\tresp.Results = append(resp.Results, &volume_server_pb.DeleteResult{\n\t\t\t\tFileId: fid,\n\t\t\t\tStatus: http.StatusInternalServerError,\n\t\t\t\tError: err.Error()},\n\t\t\t)\n\t\t} else {\n\t\t\tresp.Results = append(resp.Results, &volume_server_pb.DeleteResult{\n\t\t\t\tFileId: fid,\n\t\t\t\tStatus: http.StatusAccepted,\n\t\t\t\tSize: uint32(size)},\n\t\t\t)\n\t\t}\n\t}\n\n\treturn resp, nil\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage discoverspaces\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/loggo\"\n\t\"github.com\/juju\/names\"\n\t\"github.com\/juju\/utils\/set\"\n\t\"launchpad.net\/tomb\"\n\n\t\"github.com\/juju\/juju\/api\/discoverspaces\"\n\t\"github.com\/juju\/juju\/apiserver\/params\"\n\t\"github.com\/juju\/juju\/environs\"\n\t\"github.com\/juju\/juju\/network\"\n\t\"github.com\/juju\/juju\/worker\"\n)\n\nvar logger = loggo.GetLogger(\"juju.discoverspaces\")\n\ntype discoverspacesWorker struct {\n\tapi *discoverspaces.API\n\ttomb tomb.Tomb\n\tdiscoveringSpaces chan struct{}\n}\n\nvar dashPrefix = regexp.MustCompile(\"^-*\")\nvar dashSuffix = regexp.MustCompile(\"-*$\")\nvar multipleDashes = regexp.MustCompile(\"--+\")\n\nfunc convertSpaceName(name string, existing set.Strings) string {\n\t\/\/ First lower case and replace spaces with dashes.\n\tname = strings.Replace(name, \" \", \"-\", -1)\n\tname = strings.ToLower(name)\n\t\/\/ Replace any character that isn't in the set \"-\", \"a-z\", \"0-9\".\n\tname = network.SpaceInvalidChars.ReplaceAllString(name, \"\")\n\t\/\/ Get rid of any dashes at the start as that isn't valid.\n\tname = dashPrefix.ReplaceAllString(name, \"\")\n\t\/\/ And any at the end.\n\tname = dashSuffix.ReplaceAllString(name, \"\")\n\t\/\/ Repleace multiple dashes with a single dash.\n\tname = multipleDashes.ReplaceAllString(name, \"-\")\n\t\/\/ Special case of when the space name was only dashes or invalid\n\t\/\/ characters!\n\tif name == \"\" {\n\t\tname = \"empty\"\n\t}\n\t\/\/ If this name is in use add a numerical suffix.\n\tif existing.Contains(name) {\n\t\tcounter := 2\n\t\tfor existing.Contains(name + fmt.Sprintf(\"-%d\", counter)) {\n\t\t\tcounter += 1\n\t\t}\n\t\tname = name + fmt.Sprintf(\"-%d\", counter)\n\t}\n\treturn name\n}\n\n\/\/ NewWorker returns a worker\nfunc NewWorker(api *discoverspaces.API) (worker.Worker, chan struct{}) {\n\tdw := &discoverspacesWorker{\n\t\tapi: api,\n\t\tdiscoveringSpaces: make(chan struct{}),\n\t}\n\tgo func() {\n\t\tdefer dw.tomb.Done()\n\t\tdw.tomb.Kill(dw.loop())\n\t}()\n\treturn dw, dw.discoveringSpaces\n}\n\nfunc (dw *discoverspacesWorker) Kill() {\n\tdw.tomb.Kill(nil)\n}\n\nfunc (dw *discoverspacesWorker) Wait() error {\n\treturn dw.tomb.Wait()\n}\n\nfunc (dw *discoverspacesWorker) loop() (err error) {\n\tmodelCfg, err := dw.api.ModelConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tmodel, err := environs.New(modelCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tnetworkingModel, ok := environs.SupportsNetworking(model)\n\n\tif ok {\n\t\terr = dw.handleSubnets(networkingModel)\n\t\tif err != nil {\n\t\t\tclose(dw.discoveringSpaces)\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t}\n\tclose(dw.discoveringSpaces)\n\n\t\/\/ TODO(mfoord): we'll have a watcher here checking if we need to\n\t\/\/ update the spaces\/subnets definition.\n\tdying := dw.tomb.Dying()\n\tfor {\n\t\tselect {\n\t\tcase <-dying:\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (dw *discoverspacesWorker) handleSubnets(env environs.NetworkingEnviron) error {\n\tok, err := env.SupportsSpaceDiscovery()\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif !ok {\n\t\t\/\/ Nothing to do.\n\t\treturn nil\n\t}\n\tproviderSpaces, err := env.Spaces()\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tlistSpacesResult, err := dw.api.ListSpaces()\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tstateSubnets, err := dw.api.ListSubnets(params.SubnetsFilters{})\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tstateSubnetIds := make(set.Strings)\n\tfor _, subnet := range stateSubnets.Results {\n\t\tstateSubnetIds.Add(subnet.ProviderId)\n\t}\n\tstateSpaceMap := make(map[string]params.ProviderSpace)\n\tspaceNames := make(set.Strings)\n\tfor _, space := range listSpacesResult.Results {\n\t\tstateSpaceMap[space.ProviderId] = space\n\t\tspaceNames.Add(space.Name)\n\t}\n\n\t\/\/ TODO(mfoord): we need to delete spaces and subnets that no longer\n\t\/\/ exist, so long as they're not in use.\n\tfor _, space := range providerSpaces {\n\t\t\/\/ Check if the space is already in state, in which case we know\n\t\t\/\/ its name.\n\t\tstateSpace, ok := stateSpaceMap[string(space.ProviderId)]\n\t\tvar spaceTag names.SpaceTag\n\t\tif ok {\n\t\t\tspaceName := stateSpace.Name\n\t\t\tif !names.IsValidSpace(spaceName) {\n\t\t\t\t\/\/ Can only happen if an invalid name is stored\n\t\t\t\t\/\/ in state.\n\t\t\t\tlogger.Errorf(\"space %q has an invalid name, ignoring\", spaceName)\n\t\t\t\tcontinue\n\n\t\t\t}\n\t\t\tspaceTag = names.NewSpaceTag(spaceName)\n\n\t\t} else {\n\t\t\t\/\/ The space is new, we need to create a valid name for it\n\t\t\t\/\/ in state.\n\t\t\tspaceName := string(space.ProviderId)\n\t\t\t\/\/ Convert the name into a valid name that isn't already in\n\t\t\t\/\/ use.\n\t\t\tspaceName = convertSpaceName(spaceName, spaceNames)\n\t\t\tspaceNames.Add(spaceName)\n\t\t\tspaceTag = names.NewSpaceTag(spaceName)\n\t\t\t\/\/ We need to create the space.\n\t\t\targs := params.CreateSpacesParams{\n\t\t\t\tSpaces: []params.CreateSpaceParams{{\n\t\t\t\t\tPublic: false,\n\t\t\t\t\tSpaceTag: spaceTag.String(),\n\t\t\t\t\tProviderId: string(space.ProviderId),\n\t\t\t\t}}}\n\t\t\tresult, err := dw.api.CreateSpaces(args)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorf(\"error creating space %v\", err)\n\t\t\t\treturn errors.Trace(err)\n\t\t\t}\n\t\t\tif len(result.Results) != 1 {\n\t\t\t\treturn errors.Errorf(\"unexpected number of results from CreateSpaces, should be 1: %v\", result)\n\t\t\t}\n\t\t\tif result.Results[0].Error != nil {\n\t\t\t\treturn errors.Errorf(\"error from CreateSpaces: %v\", result.Results[0].Error)\n\t\t\t}\n\t\t}\n\t\t\/\/ TODO(mfoord): currently no way of removing subnets, or\n\t\t\/\/ changing the space they're in, so we can only add ones we\n\t\t\/\/ don't already know about.\n\t\tlogger.Debugf(\"Created space %v with %v subnets\", spaceTag.String(), len(space.Subnets))\n\t\tfor _, subnet := range space.Subnets {\n\t\t\tif stateSubnetIds.Contains(string(subnet.ProviderId)) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tzones := subnet.AvailabilityZones\n\t\t\tif len(zones) == 0 {\n\t\t\t\tzones = []string{\"default\"}\n\t\t\t}\n\t\t\targs := params.AddSubnetsParams{\n\t\t\t\tSubnets: []params.AddSubnetParams{{\n\t\t\t\t\tSubnetProviderId: string(subnet.ProviderId),\n\t\t\t\t\tSpaceTag: spaceTag.String(),\n\t\t\t\t\tZones: zones,\n\t\t\t\t}}}\n\t\t\tlogger.Tracef(\"Adding subnet %v\", subnet.CIDR)\n\t\t\tresult, err := dw.api.AddSubnets(args)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorf(\"invalid creating subnet %v\", err)\n\t\t\t\treturn errors.Trace(err)\n\t\t\t}\n\t\t\tif len(result.Results) != 1 {\n\t\t\t\treturn errors.Errorf(\"unexpected number of results from AddSubnets, should be 1: %v\", result)\n\t\t\t}\n\t\t\tif result.Results[0].Error != nil {\n\t\t\t\tlogger.Errorf(\"error creating subnet %v\", result.Results[0].Error)\n\t\t\t\treturn errors.Errorf(\"error creating subnet %v\", result.Results[0].Error)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Close discovery channel in event of errors<commit_after>\/\/ Copyright 2015 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage discoverspaces\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/loggo\"\n\t\"github.com\/juju\/names\"\n\t\"github.com\/juju\/utils\/set\"\n\t\"launchpad.net\/tomb\"\n\n\t\"github.com\/juju\/juju\/api\/discoverspaces\"\n\t\"github.com\/juju\/juju\/apiserver\/params\"\n\t\"github.com\/juju\/juju\/environs\"\n\t\"github.com\/juju\/juju\/network\"\n\t\"github.com\/juju\/juju\/worker\"\n)\n\nvar logger = loggo.GetLogger(\"juju.discoverspaces\")\n\ntype discoverspacesWorker struct {\n\tapi *discoverspaces.API\n\ttomb tomb.Tomb\n\tdiscoveringSpaces chan struct{}\n}\n\nvar dashPrefix = regexp.MustCompile(\"^-*\")\nvar dashSuffix = regexp.MustCompile(\"-*$\")\nvar multipleDashes = regexp.MustCompile(\"--+\")\n\nfunc convertSpaceName(name string, existing set.Strings) string {\n\t\/\/ First lower case and replace spaces with dashes.\n\tname = strings.Replace(name, \" \", \"-\", -1)\n\tname = strings.ToLower(name)\n\t\/\/ Replace any character that isn't in the set \"-\", \"a-z\", \"0-9\".\n\tname = network.SpaceInvalidChars.ReplaceAllString(name, \"\")\n\t\/\/ Get rid of any dashes at the start as that isn't valid.\n\tname = dashPrefix.ReplaceAllString(name, \"\")\n\t\/\/ And any at the end.\n\tname = dashSuffix.ReplaceAllString(name, \"\")\n\t\/\/ Repleace multiple dashes with a single dash.\n\tname = multipleDashes.ReplaceAllString(name, \"-\")\n\t\/\/ Special case of when the space name was only dashes or invalid\n\t\/\/ characters!\n\tif name == \"\" {\n\t\tname = \"empty\"\n\t}\n\t\/\/ If this name is in use add a numerical suffix.\n\tif existing.Contains(name) {\n\t\tcounter := 2\n\t\tfor existing.Contains(name + fmt.Sprintf(\"-%d\", counter)) {\n\t\t\tcounter += 1\n\t\t}\n\t\tname = name + fmt.Sprintf(\"-%d\", counter)\n\t}\n\treturn name\n}\n\n\/\/ NewWorker returns a worker\nfunc NewWorker(api *discoverspaces.API) (worker.Worker, chan struct{}) {\n\tdw := &discoverspacesWorker{\n\t\tapi: api,\n\t\tdiscoveringSpaces: make(chan struct{}),\n\t}\n\tgo func() {\n\t\tdefer dw.tomb.Done()\n\t\tdw.tomb.Kill(dw.loop())\n\t}()\n\treturn dw, dw.discoveringSpaces\n}\n\nfunc (dw *discoverspacesWorker) Kill() {\n\tdw.tomb.Kill(nil)\n}\n\nfunc (dw *discoverspacesWorker) Wait() error {\n\treturn dw.tomb.Wait()\n}\n\nfunc (dw *discoverspacesWorker) loop() (err error) {\n\tmodelCfg, err := dw.api.ModelConfig()\n\tif err != nil {\n\t\tclose(dw.discoveringSpaces)\n\t\treturn err\n\t}\n\tmodel, err := environs.New(modelCfg)\n\tif err != nil {\n\t\tclose(dw.discoveringSpaces)\n\t\treturn err\n\t}\n\tnetworkingModel, ok := environs.SupportsNetworking(model)\n\n\tif ok {\n\t\terr = dw.handleSubnets(networkingModel)\n\t\tif err != nil {\n\t\t\tclose(dw.discoveringSpaces)\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t}\n\tclose(dw.discoveringSpaces)\n\n\t\/\/ TODO(mfoord): we'll have a watcher here checking if we need to\n\t\/\/ update the spaces\/subnets definition.\n\tdying := dw.tomb.Dying()\n\tfor {\n\t\tselect {\n\t\tcase <-dying:\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (dw *discoverspacesWorker) handleSubnets(env environs.NetworkingEnviron) error {\n\tok, err := env.SupportsSpaceDiscovery()\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif !ok {\n\t\t\/\/ Nothing to do.\n\t\treturn nil\n\t}\n\tproviderSpaces, err := env.Spaces()\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tlistSpacesResult, err := dw.api.ListSpaces()\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tstateSubnets, err := dw.api.ListSubnets(params.SubnetsFilters{})\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tstateSubnetIds := make(set.Strings)\n\tfor _, subnet := range stateSubnets.Results {\n\t\tstateSubnetIds.Add(subnet.ProviderId)\n\t}\n\tstateSpaceMap := make(map[string]params.ProviderSpace)\n\tspaceNames := make(set.Strings)\n\tfor _, space := range listSpacesResult.Results {\n\t\tstateSpaceMap[space.ProviderId] = space\n\t\tspaceNames.Add(space.Name)\n\t}\n\n\t\/\/ TODO(mfoord): we need to delete spaces and subnets that no longer\n\t\/\/ exist, so long as they're not in use.\n\tfor _, space := range providerSpaces {\n\t\t\/\/ Check if the space is already in state, in which case we know\n\t\t\/\/ its name.\n\t\tstateSpace, ok := stateSpaceMap[string(space.ProviderId)]\n\t\tvar spaceTag names.SpaceTag\n\t\tif ok {\n\t\t\tspaceName := stateSpace.Name\n\t\t\tif !names.IsValidSpace(spaceName) {\n\t\t\t\t\/\/ Can only happen if an invalid name is stored\n\t\t\t\t\/\/ in state.\n\t\t\t\tlogger.Errorf(\"space %q has an invalid name, ignoring\", spaceName)\n\t\t\t\tcontinue\n\n\t\t\t}\n\t\t\tspaceTag = names.NewSpaceTag(spaceName)\n\n\t\t} else {\n\t\t\t\/\/ The space is new, we need to create a valid name for it\n\t\t\t\/\/ in state.\n\t\t\tspaceName := string(space.ProviderId)\n\t\t\t\/\/ Convert the name into a valid name that isn't already in\n\t\t\t\/\/ use.\n\t\t\tspaceName = convertSpaceName(spaceName, spaceNames)\n\t\t\tspaceNames.Add(spaceName)\n\t\t\tspaceTag = names.NewSpaceTag(spaceName)\n\t\t\t\/\/ We need to create the space.\n\t\t\targs := params.CreateSpacesParams{\n\t\t\t\tSpaces: []params.CreateSpaceParams{{\n\t\t\t\t\tPublic: false,\n\t\t\t\t\tSpaceTag: spaceTag.String(),\n\t\t\t\t\tProviderId: string(space.ProviderId),\n\t\t\t\t}}}\n\t\t\tresult, err := dw.api.CreateSpaces(args)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorf(\"error creating space %v\", err)\n\t\t\t\treturn errors.Trace(err)\n\t\t\t}\n\t\t\tif len(result.Results) != 1 {\n\t\t\t\treturn errors.Errorf(\"unexpected number of results from CreateSpaces, should be 1: %v\", result)\n\t\t\t}\n\t\t\tif result.Results[0].Error != nil {\n\t\t\t\treturn errors.Errorf(\"error from CreateSpaces: %v\", result.Results[0].Error)\n\t\t\t}\n\t\t}\n\t\t\/\/ TODO(mfoord): currently no way of removing subnets, or\n\t\t\/\/ changing the space they're in, so we can only add ones we\n\t\t\/\/ don't already know about.\n\t\tlogger.Debugf(\"Created space %v with %v subnets\", spaceTag.String(), len(space.Subnets))\n\t\tfor _, subnet := range space.Subnets {\n\t\t\tif stateSubnetIds.Contains(string(subnet.ProviderId)) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tzones := subnet.AvailabilityZones\n\t\t\tif len(zones) == 0 {\n\t\t\t\tzones = []string{\"default\"}\n\t\t\t}\n\t\t\targs := params.AddSubnetsParams{\n\t\t\t\tSubnets: []params.AddSubnetParams{{\n\t\t\t\t\tSubnetProviderId: string(subnet.ProviderId),\n\t\t\t\t\tSpaceTag: spaceTag.String(),\n\t\t\t\t\tZones: zones,\n\t\t\t\t}}}\n\t\t\tlogger.Tracef(\"Adding subnet %v\", subnet.CIDR)\n\t\t\tresult, err := dw.api.AddSubnets(args)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorf(\"invalid creating subnet %v\", err)\n\t\t\t\treturn errors.Trace(err)\n\t\t\t}\n\t\t\tif len(result.Results) != 1 {\n\t\t\t\treturn errors.Errorf(\"unexpected number of results from AddSubnets, should be 1: %v\", result)\n\t\t\t}\n\t\t\tif result.Results[0].Error != nil {\n\t\t\t\tlogger.Errorf(\"error creating subnet %v\", result.Results[0].Error)\n\t\t\t\treturn errors.Errorf(\"error creating subnet %v\", result.Results[0].Error)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package ohetcd\n\nimport (\n\t\"github.com\/coreos\/etcd\/client\"\n\t\"golang.org\/x\/net\/context\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"log\"\n\t\"strings\"\n)\n\ntype Linkable interface {\n\tSet()\n\tUpdate()\n\tSave()\n\tLink()\n\tUnlink()\n\tWatch()\n\tUnwatch()\n}\n\ntype Data struct {\n\tDirectory string\n\tObject interface{}\n\tDeep bool\n}\n\nfunc NewData() *Data {\n\treturn &Data{}\n}\n\n\/\/ set etcd path for data\nfunc (d *Data) Set(dir string, object interface{}, deep bool) {\n\td.Directory = dir\n\td.Object = object\n\td.Deep = deep\n}\n\n\/\/ get up-to-date value from etcd\nfunc (d *Data) Update() {\n\tif d.Deep {\n\t\tdeepRetrieve(d.Directory, d.Object)\n\t\treturn\n\t}\n\tresp, err := kapi.Get(context.Background(), d.Directory, &client.GetOptions{\n\t\tRecursive: true,\n\t})\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), string(client.ErrorCodeKeyNotFound)) {\n\t\t\t\/\/ if key not found, init etcd with current variable\n\t\t\td.Save()\n\t\t\treturn\n\t\t}\n\t\tlog.Println(err)\n\t}\n\tdata := resp.Node.Value\n\terr = yaml.Unmarshal([]byte(data), d.Object)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n}\n\n\/\/ changes made, save to etcd\nfunc (d *Data) Save() error {\n\tif d.Deep {\n\t\tdeepSave(d.Directory, d.Object)\n\t\treturn\n\t}\n\tval, err := yaml.Marshal(d.Object)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = kapi.Set(context.Background(), d.Directory, string(val), &client.SetOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (d *Data) Link() {\n\tch := make(chan int)\n\t\/\/ save ch into global chMap\n\theartBeatChMap[d] = ch\n\tgo func(d *Data, ch chan int) {\n\t\tvar i int\n\t\tfor {\n\t\t\ti = <-ch\n\t\t\tif i == CH_CLOSE_SIG {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\td.Update()\n\t\t\t}\n\t\t}\n\t}(d, ch)\n}\n\nfunc (d *Data) Unlink() {\n\theartBeatChMap[d] <- CH_CLOSE_SIG\n}\n\nfunc (d *Data) Watch() {\n\twatcher := kapi.Watcher(d.Directory, &client.WatcherOptions{\n\t\tRecursive: true,\n\t})\n\tch := make(chan int)\n\twatchChMap[d] = ch\n\tgo func(watcher client.Watcher, d *Data, ch chan int) {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ch:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tresp, err := watcher.Next(context.Background())\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(resp)\n\t\t\t\t\td.Update()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}(watcher, d, ch)\n}\n\nfunc (d *Data) Unwatch() {\n\twatchChMap[d] <- CH_CLOSE_SIG\n}\n<commit_msg>fix return err<commit_after>package ohetcd\n\nimport (\n\t\"github.com\/coreos\/etcd\/client\"\n\t\"golang.org\/x\/net\/context\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"log\"\n\t\"strings\"\n)\n\ntype Linkable interface {\n\tSet()\n\tUpdate()\n\tSave()\n\tLink()\n\tUnlink()\n\tWatch()\n\tUnwatch()\n}\n\ntype Data struct {\n\tDirectory string\n\tObject interface{}\n\tDeep bool\n}\n\nfunc NewData() *Data {\n\treturn &Data{}\n}\n\n\/\/ set etcd path for data\nfunc (d *Data) Set(dir string, object interface{}, deep bool) {\n\td.Directory = dir\n\td.Object = object\n\td.Deep = deep\n}\n\n\/\/ get up-to-date value from etcd\nfunc (d *Data) Update() {\n\tif d.Deep {\n\t\tdeepRetrieve(d.Directory, d.Object)\n\t\treturn\n\t}\n\tresp, err := kapi.Get(context.Background(), d.Directory, &client.GetOptions{\n\t\tRecursive: true,\n\t})\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), string(client.ErrorCodeKeyNotFound)) {\n\t\t\t\/\/ if key not found, init etcd with current variable\n\t\t\td.Save()\n\t\t\treturn\n\t\t}\n\t\tlog.Println(err)\n\t}\n\tdata := resp.Node.Value\n\terr = yaml.Unmarshal([]byte(data), d.Object)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n}\n\n\/\/ changes made, save to etcd\nfunc (d *Data) Save() error {\n\tif d.Deep {\n\t\treturn deepSave(d.Directory, d.Object)\n\t}\n\tval, err := yaml.Marshal(d.Object)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = kapi.Set(context.Background(), d.Directory, string(val), &client.SetOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (d *Data) Link() {\n\tch := make(chan int)\n\t\/\/ save ch into global chMap\n\theartBeatChMap[d] = ch\n\tgo func(d *Data, ch chan int) {\n\t\tvar i int\n\t\tfor {\n\t\t\ti = <-ch\n\t\t\tif i == CH_CLOSE_SIG {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\td.Update()\n\t\t\t}\n\t\t}\n\t}(d, ch)\n}\n\nfunc (d *Data) Unlink() {\n\theartBeatChMap[d] <- CH_CLOSE_SIG\n}\n\nfunc (d *Data) Watch() {\n\twatcher := kapi.Watcher(d.Directory, &client.WatcherOptions{\n\t\tRecursive: true,\n\t})\n\tch := make(chan int)\n\twatchChMap[d] = ch\n\tgo func(watcher client.Watcher, d *Data, ch chan int) {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ch:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tresp, err := watcher.Next(context.Background())\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(resp)\n\t\t\t\t\td.Update()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}(watcher, d, ch)\n}\n\nfunc (d *Data) Unwatch() {\n\twatchChMap[d] <- CH_CLOSE_SIG\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\npackage sh\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar (\n\tenableUserNS bool\n)\n\nfunc init() {\n\tconst usernsOk = \"1\"\n\tconst kernelcfg = \"CONFIG_USER_NS\"\n\n\tlogUsernsDetection := func(err error) {\n\t\tif enableUserNS {\n\t\t\tfmt.Printf(\"Linux user namespaces enabled!\")\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Printf(\"Warning: Impossible to know if kernel support USER namespace.\\n\")\n\t\tfmt.Printf(\"Warning: USER namespace tests will not run.\\n\")\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"ERROR: %s\\n\", err)\n\t\t}\n\t}\n\n\tusernsCfg := \"\/proc\/sys\/kernel\/unprivileged_userns_clone\"\n\tval, permerr := ioutil.ReadFile(usernsCfg)\n\n\t\/\/ Travis build doesn't support \/proc\/config.gz but kernel has userns\n\tif os.Getenv(\"TRAVIS_BUILD\") == usernsOk {\n\t\tenableUserNS = permerr == nil && string(val) == usernsOk\n\t\tlogUsernsDetection(permerr)\n\t\treturn\n\t}\n\n\tif permerr == nil {\n\t\tenableUserNS = string(val) == usernsOk\n\t\tlogUsernsDetection(permerr)\n\t\treturn\n\t}\n\n\t\/\/ old kernels dont has sysctl configurations\n\t\/\/ than just checking the \/proc\/config suffices\n\tusernsCmd := exec.Command(\"zgrep\", kernelcfg, \"\/proc\/config.gz\")\n\n\tcontent, err := usernsCmd.CombinedOutput()\n\tif err != nil {\n\t\tenableUserNS = false\n\t\tlogUsernsDetection(fmt.Errorf(\"Failed to get kernel config: %s\", err))\n\t\treturn\n\t}\n\n\tcfgVal := strings.Trim(string(content), \"\\n\\t \")\n\tenableUserNS = cfgVal == kernelcfg+\"=y\"\n\tlogUsernsDetection(fmt.Errorf(\"%s not enabled in kernel config\", kernelcfg))\n}\n\nfunc TestExecuteRforkUserNS(t *testing.T) {\n\tif !enableUserNS {\n\t\tt.Skip(\"User namespace not enabled\")\n\t\treturn\n\t}\n\n\tvar out bytes.Buffer\n\tf, teardown := setup(t)\n\tdefer teardown()\n\n\tsh, err := NewShell()\n\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tsh.SetNashdPath(f.nashdPath)\n\tsh.SetStdout(&out)\n\n\terr = sh.Exec(\"rfork test\", `\n rfork u {\n id -u\n }\n `)\n\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tif string(out.Bytes()) != \"0\\n\" {\n\t\tt.Errorf(\"User namespace not supported in your kernel: %s\", string(out.Bytes()))\n\t\treturn\n\t}\n}\n\nfunc TestExecuteRforkEnvVars(t *testing.T) {\n\tif !enableUserNS {\n\t\tt.Skip(\"User namespace not enabled\")\n\t\treturn\n\t}\n\n\tf, teardown := setup(t)\n\tdefer teardown()\n\n\tsh, err := NewShell()\n\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tsh.SetNashdPath(f.nashdPath)\n\n\terr = sh.Exec(\"test env\", `abra = \"cadabra\"\nsetenv abra\nrfork up {\n\techo $abra\n}`)\n\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n}\n\nfunc TestExecuteRforkUserNSNested(t *testing.T) {\n\tif !enableUserNS {\n\t\tt.Skip(\"User namespace not enabled\")\n\t\treturn\n\t}\n\n\tvar out bytes.Buffer\n\tf, teardown := setup(t)\n\tdefer teardown()\n\n\tsh, err := NewShell()\n\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tsh.SetNashdPath(f.nashdPath)\n\tsh.SetStdout(&out)\n\n\terr = sh.Exec(\"rfork userns nested\", `\n rfork u {\n id -u\n rfork u {\n id -u\n }\n }\n `)\n\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tif string(out.Bytes()) != \"0\\n0\\n\" {\n\t\tt.Errorf(\"User namespace not supported in your kernel\")\n\t\treturn\n\t}\n}\n<commit_msg>fix travis_build detection<commit_after>\/\/ +build linux\n\npackage sh\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar (\n\tenableUserNS bool\n)\n\nfunc init() {\n\tconst usernsOk = \"1\"\n\tconst kernelcfg = \"CONFIG_USER_NS\"\n\n\tlogUsernsDetection := func(err error) {\n\t\tif enableUserNS {\n\t\t\tfmt.Printf(\"Linux user namespaces enabled!\")\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Printf(\"Warning: Impossible to know if kernel support USER namespace.\\n\")\n\t\tfmt.Printf(\"Warning: USER namespace tests will not run.\\n\")\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"ERROR: %s\\n\", err)\n\t\t}\n\t}\n\n\tusernsCfg := \"\/proc\/sys\/kernel\/unprivileged_userns_clone\"\n\tval, permerr := ioutil.ReadFile(usernsCfg)\n\n\t\/\/ Travis build doesn't support \/proc\/config.gz but kernel has userns\n\tif os.Getenv(\"TRAVIS_BUILD\") == \"1\" {\n\t\tenableUserNS = permerr == nil && string(val) == usernsOk\n\t\tlogUsernsDetection(permerr)\n\t\treturn\n\t}\n\n\tif permerr == nil {\n\t\tenableUserNS = string(val) == usernsOk\n\t\tlogUsernsDetection(permerr)\n\t\treturn\n\t}\n\n\t\/\/ old kernels dont has sysctl configurations\n\t\/\/ than just checking the \/proc\/config suffices\n\tusernsCmd := exec.Command(\"zgrep\", kernelcfg, \"\/proc\/config.gz\")\n\n\tcontent, err := usernsCmd.CombinedOutput()\n\tif err != nil {\n\t\tenableUserNS = false\n\t\tlogUsernsDetection(fmt.Errorf(\"Failed to get kernel config: %s\", err))\n\t\treturn\n\t}\n\n\tcfgVal := strings.Trim(string(content), \"\\n\\t \")\n\tenableUserNS = cfgVal == kernelcfg+\"=y\"\n\tlogUsernsDetection(fmt.Errorf(\"%s not enabled in kernel config\", kernelcfg))\n}\n\nfunc TestExecuteRforkUserNS(t *testing.T) {\n\tif !enableUserNS {\n\t\tt.Skip(\"User namespace not enabled\")\n\t\treturn\n\t}\n\n\tvar out bytes.Buffer\n\tf, teardown := setup(t)\n\tdefer teardown()\n\n\tsh, err := NewShell()\n\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tsh.SetNashdPath(f.nashdPath)\n\tsh.SetStdout(&out)\n\n\terr = sh.Exec(\"rfork test\", `\n rfork u {\n id -u\n }\n `)\n\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tif string(out.Bytes()) != \"0\\n\" {\n\t\tt.Errorf(\"User namespace not supported in your kernel: %s\", string(out.Bytes()))\n\t\treturn\n\t}\n}\n\nfunc TestExecuteRforkEnvVars(t *testing.T) {\n\tif !enableUserNS {\n\t\tt.Skip(\"User namespace not enabled\")\n\t\treturn\n\t}\n\n\tf, teardown := setup(t)\n\tdefer teardown()\n\n\tsh, err := NewShell()\n\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tsh.SetNashdPath(f.nashdPath)\n\n\terr = sh.Exec(\"test env\", `abra = \"cadabra\"\nsetenv abra\nrfork up {\n\techo $abra\n}`)\n\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n}\n\nfunc TestExecuteRforkUserNSNested(t *testing.T) {\n\tif !enableUserNS {\n\t\tt.Skip(\"User namespace not enabled\")\n\t\treturn\n\t}\n\n\tvar out bytes.Buffer\n\tf, teardown := setup(t)\n\tdefer teardown()\n\n\tsh, err := NewShell()\n\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tsh.SetNashdPath(f.nashdPath)\n\tsh.SetStdout(&out)\n\n\terr = sh.Exec(\"rfork userns nested\", `\n rfork u {\n id -u\n rfork u {\n id -u\n }\n }\n `)\n\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tif string(out.Bytes()) != \"0\\n0\\n\" {\n\t\tt.Errorf(\"User namespace not supported in your kernel\")\n\t\treturn\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package stmtcache is a cache that can be used to implement lazy prepared statements.\npackage stmtcache\n\nimport (\n\t\"strconv\"\n\t\"sync\/atomic\"\n\n\t\"github.com\/jackc\/pgx\/v5\/pgconn\"\n)\n\nvar stmtCounter int64\n\n\/\/ NextStatementName returns a statement name that will be unique for the lifetime of the program.\nfunc NextStatementName() string {\n\tn := atomic.AddInt64(&stmtCounter, 1)\n\treturn \"stmtcache_\" + strconv.FormatInt(n, 10)\n}\n\n\/\/ Cache caches statement descriptions.\ntype Cache interface {\n\t\/\/ Get returns the statement description for sql. Returns nil if not found.\n\tGet(sql string) *pgconn.StatementDescription\n\n\t\/\/ Put stores sd in the cache. Put panics if sd.SQL is \"\". Put does nothing if sd.SQL already exists in the cache.\n\tPut(sd *pgconn.StatementDescription)\n\n\t\/\/ Invalidate invalidates statement description identified by sql. Does nothing if not found.\n\tInvalidate(sql string)\n\n\t\/\/ InvalidateAll invalidates all statement descriptions.\n\tInvalidateAll()\n\n\t\/\/ HandleInvalidated returns a slice of all statement descriptions invalidated since the last call to HandleInvalidated.\n\tHandleInvalidated() []*pgconn.StatementDescription\n\n\t\/\/ Len returns the number of cached prepared statement descriptions.\n\tLen() int\n\n\t\/\/ Cap returns the maximum number of cached prepared statement descriptions.\n\tCap() int\n}\n\nfunc IsStatementInvalid(err error) bool {\n\tpgErr, ok := err.(*pgconn.PgError)\n\tif !ok {\n\t\treturn false\n\t}\n\n\t\/\/ https:\/\/github.com\/jackc\/pgx\/issues\/1162\n\t\/\/\n\t\/\/ We used to look for the message \"cached plan must not change result type\". However, that message can be localized.\n\t\/\/ Unfortunately, error code \"0A000\" - \"FEATURE NOT SUPPORTED\" is used for many different errors and the only way to\n\t\/\/ tell the difference is by the message. But all that happens is we clear a statement that we otherwise wouldn't\n\t\/\/ have so it should be safe.\n\tpossibleInvalidCachedPlanError := pgErr.Code == \"0A000\"\n\treturn possibleInvalidCachedPlanError\n}\n<commit_msg>Update docs<commit_after>\/\/ Package stmtcache is a cache for statement descriptions.\npackage stmtcache\n\nimport (\n\t\"strconv\"\n\t\"sync\/atomic\"\n\n\t\"github.com\/jackc\/pgx\/v5\/pgconn\"\n)\n\nvar stmtCounter int64\n\n\/\/ NextStatementName returns a statement name that will be unique for the lifetime of the program.\nfunc NextStatementName() string {\n\tn := atomic.AddInt64(&stmtCounter, 1)\n\treturn \"stmtcache_\" + strconv.FormatInt(n, 10)\n}\n\n\/\/ Cache caches statement descriptions.\ntype Cache interface {\n\t\/\/ Get returns the statement description for sql. Returns nil if not found.\n\tGet(sql string) *pgconn.StatementDescription\n\n\t\/\/ Put stores sd in the cache. Put panics if sd.SQL is \"\". Put does nothing if sd.SQL already exists in the cache.\n\tPut(sd *pgconn.StatementDescription)\n\n\t\/\/ Invalidate invalidates statement description identified by sql. Does nothing if not found.\n\tInvalidate(sql string)\n\n\t\/\/ InvalidateAll invalidates all statement descriptions.\n\tInvalidateAll()\n\n\t\/\/ HandleInvalidated returns a slice of all statement descriptions invalidated since the last call to HandleInvalidated.\n\tHandleInvalidated() []*pgconn.StatementDescription\n\n\t\/\/ Len returns the number of cached prepared statement descriptions.\n\tLen() int\n\n\t\/\/ Cap returns the maximum number of cached prepared statement descriptions.\n\tCap() int\n}\n\nfunc IsStatementInvalid(err error) bool {\n\tpgErr, ok := err.(*pgconn.PgError)\n\tif !ok {\n\t\treturn false\n\t}\n\n\t\/\/ https:\/\/github.com\/jackc\/pgx\/issues\/1162\n\t\/\/\n\t\/\/ We used to look for the message \"cached plan must not change result type\". However, that message can be localized.\n\t\/\/ Unfortunately, error code \"0A000\" - \"FEATURE NOT SUPPORTED\" is used for many different errors and the only way to\n\t\/\/ tell the difference is by the message. But all that happens is we clear a statement that we otherwise wouldn't\n\t\/\/ have so it should be safe.\n\tpossibleInvalidCachedPlanError := pgErr.Code == \"0A000\"\n\treturn possibleInvalidCachedPlanError\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage sync\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/golang\/mock\/gomock\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"go.uber.org\/yarpc\/internal\/testtime\"\n)\n\nfunc TestLifecycleOnce(t *testing.T) {\n\ttype testStruct struct {\n\t\tmsg string\n\n\t\t\/\/ A list of actions that will be applied on the LifecycleOnce\n\t\tactions []LifecycleAction\n\n\t\t\/\/ expected state at the end of the actions\n\t\texpectedFinalState LifecycleState\n\t}\n\ttests := []testStruct{\n\t\t{\n\t\t\tmsg: \"setup\",\n\t\t\texpectedFinalState: Idle,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Start\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tStartAction{ExpectedState: Running},\n\t\t\t},\n\t\t\texpectedFinalState: Running,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Start and Started\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{ExpectedState: Running},\n\t\t\t\t\t\tActions{\n\t\t\t\t\t\t\tWaitForStartAction,\n\t\t\t\t\t\t\tGetStateAction{ExpectedState: Running},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Running,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Stop\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tStartAction{ExpectedState: Running},\n\t\t\t\tStopAction{ExpectedState: Stopped},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Stop and Stopped\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStopAction{ExpectedState: Stopped},\n\t\t\t\t\t\tWaitForStoppingAction,\n\t\t\t\t\t\tWaitForStopAction,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Error and Stopped\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"abort\"),\n\t\t\t\t\t\t\tExpectedErr: fmt.Errorf(\"abort\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tActions{\n\t\t\t\t\t\t\tWaitForStoppingAction,\n\t\t\t\t\t\t\tGetStateAction{ExpectedState: Errored},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tActions{\n\t\t\t\t\t\t\tWaitForStopAction,\n\t\t\t\t\t\t\tGetStateAction{ExpectedState: Errored},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Errored,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Start, Stop, and Stopped\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tActions{\n\t\t\t\t\t\t\tStartAction{ExpectedState: Running},\n\t\t\t\t\t\t\tStopAction{ExpectedState: Stopped},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tWaitForStoppingAction,\n\t\t\t\t\t\tWaitForStopAction,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Starting\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{ExpectedState: Running, Wait: 20 * testtime.Millisecond},\n\t\t\t\t\t\tGetStateAction{ExpectedState: Starting},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Running,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Stopping\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tStartAction{ExpectedState: Running},\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStopAction{ExpectedState: Stopped, Wait: 20 * testtime.Millisecond},\n\t\t\t\t\t\tGetStateAction{ExpectedState: Stopping},\n\t\t\t\t\t\tGetStateAction{ExpectedState: Stopped},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Delayed stop, wait for stopping\",\n\t\t\t\/\/ The purpose of this test is to verify that the stopping state is\n\t\t\t\/\/ occupied for the duration of the Stop() call.\n\t\t\t\/\/\n\t\t\t\/\/ Timeline:\n\t\t\t\/\/ - 0ms Idle\n\t\t\t\/\/ - 0ms Starting\n\t\t\t\/\/ - 0–20ms Running\n\t\t\t\/\/ - 20–40ms Stopping\n\t\t\t\/\/ - 40–ms Stopped\n\t\t\t\/\/\n\t\t\t\/\/ First group:\n\t\t\t\/\/ - 0ms Start (0ms)\n\t\t\t\/\/ - 0ms Wait (20ms)\n\t\t\t\/\/ - 20ms Stop (20ms)\n\t\t\t\/\/\n\t\t\t\/\/ Second group:\n\t\t\t\/\/ - 0ms Wait until Stopping\n\t\t\t\/\/ - 20ms Resume\n\t\t\t\/\/ - 20ms Wait (10ms)\n\t\t\t\/\/ - 30ms Expect Stopping state\n\t\t\t\/\/ - 30ms Wait (20ms)\n\t\t\t\/\/ - 50ms Expect Stopped state\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tStartAction{ExpectedState: Running},\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tActions{\n\t\t\t\t\t\t\tStartAction{ExpectedState: Running},\n\t\t\t\t\t\t\tWaitAction(20 * testtime.Millisecond),\n\t\t\t\t\t\t\tStopAction{ExpectedState: Stopped, Wait: 20 * testtime.Millisecond},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tActions{\n\t\t\t\t\t\t\tWaitForStoppingAction,\n\t\t\t\t\t\t\tWaitAction(10 * testtime.Millisecond),\n\t\t\t\t\t\t\tExactStateAction{ExpectedState: Stopping},\n\t\t\t\t\t\t\tWaitAction(20 * testtime.Millisecond),\n\t\t\t\t\t\t\tGetStateAction{ExpectedState: Stopped},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Start assure only called once and propagates the same error\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tWait: 40 * testtime.Millisecond,\n\t\t\t\t\t\t\tErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tErr: errors.New(\"not an expected error 1\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tWait: 40 * testtime.Millisecond,\n\t\t\t\t\t\t\tErr: errors.New(\"not an expected error 2\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Errored,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Successful Start followed by failed Stop\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Running,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t\t\t\tErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tErr: errors.New(\"not expected error 2\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tErr: errors.New(\"not expected error 2\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 30 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Errored,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Stop assure only called once and returns the same error\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tStartAction{\n\t\t\t\t\tExpectedState: Running,\n\t\t\t\t},\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 40 * testtime.Millisecond,\n\t\t\t\t\t\t\tErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 40 * testtime.Millisecond,\n\t\t\t\t\t\t\tErr: errors.New(\"not an expected error 1\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 40 * testtime.Millisecond,\n\t\t\t\t\t\t\tErr: errors.New(\"not an expected error 2\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Errored,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Stop before start goes directly to 'stopped'\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tStopAction{\n\t\t\t\t\tExpectedState: Stopped,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Pre-empting start after stop\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Stopped,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"start action should not run\"),\n\t\t\t\t\t\t\tWait: 500 * time.Second,\n\t\t\t\t\t\t\tExpectedState: Stopped,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 20 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Overlapping stop after start\",\n\t\t\t\/\/ ms: timeline\n\t\t\t\/\/ 00: 0: start..............starting\n\t\t\t\/\/ 10: | 1. stop\n\t\t\t\/\/ 50: X..|..................running\n\t\t\t\/\/ |..................stopping\n\t\t\t\/\/ 60: X..................stopped\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tWait: 50 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Running,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Stopped,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Overlapping stop after start error\",\n\t\t\t\/\/ ms: timeline\n\t\t\t\/\/ 00: 0: start..............starting\n\t\t\t\/\/ 10: | 1. stop............stopping\n\t\t\t\/\/ 50: X X..................errored\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tWait: 50 * testtime.Millisecond,\n\t\t\t\t\t\t\tErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Errored,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Overlapping start after stop\",\n\t\t\t\/\/ ms: timeline\n\t\t\t\/\/ 00: 0: start.............starting\n\t\t\t\/\/ 10: |\n\t\t\t\/\/ 20: | 1: stop\n\t\t\t\/\/ 30: X -.................running\n\t\t\t\/\/ 30+Δ: |.................stopping\n\t\t\t\/\/ 40: | 2: start\n\t\t\t\/\/ 40+Δ: | X\n\t\t\t\/\/ 50: |\n\t\t\t\/\/ 60: |\n\t\t\t\/\/ 70: X.................stopped\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tWait: 30 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Running,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 30 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Stopped,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"start action should not run\"),\n\t\t\t\t\t\t\tExpectedState: Stopping,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 20 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Start completes before overlapping stop completes\",\n\t\t\t\/\/ ms: timeline\n\t\t\t\/\/ 00: 0: start............starting\n\t\t\t\/\/ 10: | 1: start\n\t\t\t\/\/ 20: | - 2: stop\n\t\t\t\/\/ 30: X - - 3: start...running\n\t\t\t\/\/ 30+Δ: X | X..........stopping\n\t\t\t\/\/ 40: | 4: stop\n\t\t\t\/\/ | -\n\t\t\t\/\/ | -\n\t\t\t\/\/ 60: X X.......stopped\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tWait: 30 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Running,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"start action should not run\"),\n\t\t\t\t\t\t\tExpectedState: Running,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 40 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Stopped,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"start action should not run\"),\n\t\t\t\t\t\t\tExpectedState: Running,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"stop action should not run\"),\n\t\t\t\t\t\t\tExpectedState: Stopped,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.msg, func(t *testing.T) {\n\t\t\tmockCtrl := gomock.NewController(t)\n\t\t\tdefer mockCtrl.Finish()\n\n\t\t\tonce := Once()\n\t\t\tApplyLifecycleActions(t, once, tt.actions)\n\n\t\t\tassert.Equal(t, tt.expectedFinalState, once.LifecycleState())\n\t\t})\n\t}\n}\n\n\/\/ TestStopping verifies that a lifecycle object can spawn a goroutine and wait\n\/\/ for that goroutine to exit in its stopping state. The goroutine must wrap\n\/\/ up its work when it detects that the lifecycle has begun stopping. If it\n\/\/ waited for the stopped channel, the stop callback would deadlock.\nfunc TestStopping(t *testing.T) {\n\tl := Once()\n\tl.Start(nil)\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tselect {\n\t\tcase <-l.Stopping():\n\t\tcase <-time.After(time.Second):\n\t\t\tassert.Fail(t, \"deadlock\")\n\t\t}\n\t\tclose(done)\n\t}()\n\n\tl.Stop(func() error {\n\t\tselect {\n\t\tcase <-done:\n\t\tcase <-time.After(time.Second):\n\t\t\tassert.Fail(t, \"deadlock\")\n\t\t}\n\t\treturn nil\n\t})\n}\n<commit_msg>make lifecycle test less flaky (#1153)<commit_after>\/\/ Copyright (c) 2017 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage sync\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/golang\/mock\/gomock\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"go.uber.org\/yarpc\/internal\/testtime\"\n)\n\nfunc TestLifecycleOnce(t *testing.T) {\n\ttype testStruct struct {\n\t\tmsg string\n\n\t\t\/\/ A list of actions that will be applied on the LifecycleOnce\n\t\tactions []LifecycleAction\n\n\t\t\/\/ expected state at the end of the actions\n\t\texpectedFinalState LifecycleState\n\t}\n\ttests := []testStruct{\n\t\t{\n\t\t\tmsg: \"setup\",\n\t\t\texpectedFinalState: Idle,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Start\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tStartAction{ExpectedState: Running},\n\t\t\t},\n\t\t\texpectedFinalState: Running,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Start and Started\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{ExpectedState: Running},\n\t\t\t\t\t\tActions{\n\t\t\t\t\t\t\tWaitForStartAction,\n\t\t\t\t\t\t\tGetStateAction{ExpectedState: Running},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Running,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Stop\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tStartAction{ExpectedState: Running},\n\t\t\t\tStopAction{ExpectedState: Stopped},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Stop and Stopped\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStopAction{ExpectedState: Stopped},\n\t\t\t\t\t\tWaitForStoppingAction,\n\t\t\t\t\t\tWaitForStopAction,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Error and Stopped\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"abort\"),\n\t\t\t\t\t\t\tExpectedErr: fmt.Errorf(\"abort\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tActions{\n\t\t\t\t\t\t\tWaitForStoppingAction,\n\t\t\t\t\t\t\tGetStateAction{ExpectedState: Errored},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tActions{\n\t\t\t\t\t\t\tWaitForStopAction,\n\t\t\t\t\t\t\tGetStateAction{ExpectedState: Errored},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Errored,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Start, Stop, and Stopped\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tActions{\n\t\t\t\t\t\t\tStartAction{ExpectedState: Running},\n\t\t\t\t\t\t\tStopAction{ExpectedState: Stopped},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tWaitForStoppingAction,\n\t\t\t\t\t\tWaitForStopAction,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Starting\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{ExpectedState: Running, Wait: 20 * testtime.Millisecond},\n\t\t\t\t\t\tGetStateAction{ExpectedState: Starting},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Running,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Stopping\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tStartAction{ExpectedState: Running},\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStopAction{ExpectedState: Stopped, Wait: 50 * testtime.Millisecond},\n\t\t\t\t\t\tGetStateAction{ExpectedState: Stopping},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Delayed stop, wait for stopping\",\n\t\t\t\/\/ The purpose of this test is to verify that the stopping state is\n\t\t\t\/\/ occupied for the duration of the Stop() call.\n\t\t\t\/\/\n\t\t\t\/\/ Timeline:\n\t\t\t\/\/ - 0ms Idle\n\t\t\t\/\/ - 0ms Starting\n\t\t\t\/\/ - 0–20ms Running\n\t\t\t\/\/ - 20–40ms Stopping\n\t\t\t\/\/ - 40–ms Stopped\n\t\t\t\/\/\n\t\t\t\/\/ First group:\n\t\t\t\/\/ - 0ms Start (0ms)\n\t\t\t\/\/ - 0ms Wait (20ms)\n\t\t\t\/\/ - 20ms Stop (20ms)\n\t\t\t\/\/\n\t\t\t\/\/ Second group:\n\t\t\t\/\/ - 0ms Wait until Stopping\n\t\t\t\/\/ - 20ms Resume\n\t\t\t\/\/ - 20ms Wait (10ms)\n\t\t\t\/\/ - 30ms Expect Stopping state\n\t\t\t\/\/ - 30ms Wait (20ms)\n\t\t\t\/\/ - 50ms Expect Stopped state\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tStartAction{ExpectedState: Running},\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tActions{\n\t\t\t\t\t\t\tStartAction{ExpectedState: Running},\n\t\t\t\t\t\t\tWaitAction(20 * testtime.Millisecond),\n\t\t\t\t\t\t\tStopAction{ExpectedState: Stopped, Wait: 20 * testtime.Millisecond},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tActions{\n\t\t\t\t\t\t\tWaitForStoppingAction,\n\t\t\t\t\t\t\tWaitAction(10 * testtime.Millisecond),\n\t\t\t\t\t\t\tExactStateAction{ExpectedState: Stopping},\n\t\t\t\t\t\t\tWaitAction(20 * testtime.Millisecond),\n\t\t\t\t\t\t\tGetStateAction{ExpectedState: Stopped},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Start assure only called once and propagates the same error\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tWait: 40 * testtime.Millisecond,\n\t\t\t\t\t\t\tErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tErr: errors.New(\"not an expected error 1\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tWait: 40 * testtime.Millisecond,\n\t\t\t\t\t\t\tErr: errors.New(\"not an expected error 2\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Errored,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Successful Start followed by failed Stop\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Running,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t\t\t\tErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tErr: errors.New(\"not expected error 2\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tErr: errors.New(\"not expected error 2\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 30 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Errored,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Stop assure only called once and returns the same error\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tStartAction{\n\t\t\t\t\tExpectedState: Running,\n\t\t\t\t},\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 40 * testtime.Millisecond,\n\t\t\t\t\t\t\tErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 40 * testtime.Millisecond,\n\t\t\t\t\t\t\tErr: errors.New(\"not an expected error 1\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 40 * testtime.Millisecond,\n\t\t\t\t\t\t\tErr: errors.New(\"not an expected error 2\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Errored,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Stop before start goes directly to 'stopped'\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tStopAction{\n\t\t\t\t\tExpectedState: Stopped,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Pre-empting start after stop\",\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Stopped,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"start action should not run\"),\n\t\t\t\t\t\t\tWait: 500 * time.Second,\n\t\t\t\t\t\t\tExpectedState: Stopped,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 20 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Overlapping stop after start\",\n\t\t\t\/\/ ms: timeline\n\t\t\t\/\/ 00: 0: start..............starting\n\t\t\t\/\/ 10: | 1. stop\n\t\t\t\/\/ 50: X..|..................running\n\t\t\t\/\/ |..................stopping\n\t\t\t\/\/ 60: X..................stopped\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tWait: 50 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Running,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Stopped,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Overlapping stop after start error\",\n\t\t\t\/\/ ms: timeline\n\t\t\t\/\/ 00: 0: start..............starting\n\t\t\t\/\/ 10: | 1. stop............stopping\n\t\t\t\/\/ 50: X X..................errored\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tWait: 50 * testtime.Millisecond,\n\t\t\t\t\t\t\tErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Errored,\n\t\t\t\t\t\t\tExpectedErr: errors.New(\"expected error\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Errored,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Overlapping start after stop\",\n\t\t\t\/\/ ms: timeline\n\t\t\t\/\/ 00: 0: start.............starting\n\t\t\t\/\/ 10: |\n\t\t\t\/\/ 20: | 1: stop\n\t\t\t\/\/ 30: X -.................running\n\t\t\t\/\/ 30+Δ: |.................stopping\n\t\t\t\/\/ 40: | 2: start\n\t\t\t\/\/ 40+Δ: | X\n\t\t\t\/\/ 50: |\n\t\t\t\/\/ 60: |\n\t\t\t\/\/ 70: X.................stopped\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tWait: 30 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Running,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 30 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Stopped,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"start action should not run\"),\n\t\t\t\t\t\t\tExpectedState: Stopping,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 20 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t\t{\n\t\t\tmsg: \"Start completes before overlapping stop completes\",\n\t\t\t\/\/ ms: timeline\n\t\t\t\/\/ 00: 0: start............starting\n\t\t\t\/\/ 10: | 1: start\n\t\t\t\/\/ 20: | - 2: stop\n\t\t\t\/\/ 30: X - - 3: start...running\n\t\t\t\/\/ 30+Δ: X | X..........stopping\n\t\t\t\/\/ 40: | 4: stop\n\t\t\t\/\/ | -\n\t\t\t\/\/ | -\n\t\t\t\/\/ 60: X X.......stopped\n\t\t\tactions: []LifecycleAction{\n\t\t\t\tConcurrentAction{\n\t\t\t\t\tActions: []LifecycleAction{\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tWait: 30 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Running,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"start action should not run\"),\n\t\t\t\t\t\t\tExpectedState: Running,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tWait: 40 * testtime.Millisecond,\n\t\t\t\t\t\t\tExpectedState: Stopped,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStartAction{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"start action should not run\"),\n\t\t\t\t\t\t\tExpectedState: Running,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStopAction{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"stop action should not run\"),\n\t\t\t\t\t\t\tExpectedState: Stopped,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tWait: 10 * testtime.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedFinalState: Stopped,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.msg, func(t *testing.T) {\n\t\t\tmockCtrl := gomock.NewController(t)\n\t\t\tdefer mockCtrl.Finish()\n\n\t\t\tonce := Once()\n\t\t\tApplyLifecycleActions(t, once, tt.actions)\n\n\t\t\tassert.Equal(t, tt.expectedFinalState, once.LifecycleState())\n\t\t})\n\t}\n}\n\n\/\/ TestStopping verifies that a lifecycle object can spawn a goroutine and wait\n\/\/ for that goroutine to exit in its stopping state. The goroutine must wrap\n\/\/ up its work when it detects that the lifecycle has begun stopping. If it\n\/\/ waited for the stopped channel, the stop callback would deadlock.\nfunc TestStopping(t *testing.T) {\n\tl := Once()\n\tl.Start(nil)\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tselect {\n\t\tcase <-l.Stopping():\n\t\tcase <-time.After(time.Second):\n\t\t\tassert.Fail(t, \"deadlock\")\n\t\t}\n\t\tclose(done)\n\t}()\n\n\tl.Stop(func() error {\n\t\tselect {\n\t\tcase <-done:\n\t\tcase <-time.After(time.Second):\n\t\t\tassert.Fail(t, \"deadlock\")\n\t\t}\n\t\treturn nil\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package builtins\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n)\n\ntype file struct {\n\tvalueStub\n}\n\nfunc NewFileClass() Value {\n\tf := &file{}\n\tf.initialize()\n\tf.AddMethod(NewMethod(\"expand_path\", func(args ...Value) (Value, error) {\n\t\targ1 := args[0].(*StringValue).String()\n\n\t\tif arg1[0] == '~' {\n\t\t\targ1 = os.Getenv(\"HOME\") + arg1[1:]\n\t\t}\n\n\t\tvar path string\n\t\tif len(args) == 2 {\n\t\t\tpath = filepath.Join(args[1].(*StringValue).String(), arg1)\n\t\t} else {\n\t\t\tpath, _ = filepath.Abs(arg1)\n\t\t}\n\n\t\treturn NewString(path), nil\n\t}))\n\n\treturn f\n}\n\nfunc (file *file) String() string {\n\treturn \"File\"\n}\n\nfunc (file *file) New() Value {\n\treturn file\n}\n<commit_msg>File class should have \"Class\" as its superclass<commit_after>package builtins\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n)\n\ntype fileClass struct {\n\tvalueStub\n}\n\nfunc NewFileClass() Value {\n\tf := &fileClass{}\n\tf.initialize()\n\tf.class = NewClassValue() \/\/ FIXME: this should be a global reference\n\tf.AddMethod(NewMethod(\"expand_path\", func(args ...Value) (Value, error) {\n\t\targ1 := args[0].(*StringValue).String()\n\n\t\tif arg1[0] == '~' {\n\t\t\targ1 = os.Getenv(\"HOME\") + arg1[1:]\n\t\t}\n\n\t\tvar path string\n\t\tif len(args) == 2 {\n\t\t\tpath = filepath.Join(args[1].(*StringValue).String(), arg1)\n\t\t} else {\n\t\t\tpath, _ = filepath.Abs(arg1)\n\t\t}\n\n\t\treturn NewString(path), nil\n\t}))\n\n\treturn f\n}\n\nfunc (file *fileClass) String() string {\n\treturn \"File\"\n}\n\nfunc (file *fileClass) New() Value {\n\treturn file\n}\n<|endoftext|>"} {"text":"<commit_before>package git_comment\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\tgit \"gopkg.in\/libgit2\/git2go.v22\"\n)\n\nconst (\n\tauthorNotFoundError = \"No name or email found in git config for commenting\"\n\tcommitNotFoundError = \"Commit not found\"\n\tcommentNotFoundError = \"Comment not found\"\n\tuserNameKey = \"user.name\"\n\tuserEmailKey = \"user.email\"\n\theadCommit = \"HEAD\"\n)\n\n\/\/ Create a new comment on a commit, optionally with a file and line\nfunc CreateComment(repoPath string, commit *string, fileRef *FileRef, message string) (*string, error) {\n\tvar hash = commit\n\tauthor, cErr := author(repoPath)\n\tif cErr != nil {\n\t\treturn nil, cErr\n\t}\n\tif *hash == \"HEAD\" || hash == nil {\n\t\thead, hErr := head(repoPath)\n\t\tif hErr != nil {\n\t\t\treturn nil, hErr\n\t\t}\n\t\thash = head\n\t} else {\n\t\t\/\/ check if commit hash exists\n\t}\n\tcomment, err := NewComment(message, *hash, fileRef, author)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfmt.Println(comment.Serialize())\n\twriteErr := writeCommentToDisk(repoPath, comment)\n\tif writeErr != nil {\n\t\treturn nil, writeErr\n\t}\n\treturn &comment.ID, nil\n}\n\nfunc UpdateComment(repoPath string, ID string, message string) error {\n\tcomment, err := CommentByID(repoPath, ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tauthor, cErr := author(repoPath)\n\tif cErr != nil {\n\t\treturn cErr\n\t}\n\tcomment.Amend(message, author)\n\treturn writeCommentToDisk(repoPath, comment)\n}\n\n\/\/ Remove a comment from a commit\nfunc DeleteComment(repoPath string, ID string) error {\n\tcomment, err := CommentByID(repoPath, ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcomment.Deleted = true\n\treturn writeCommentToDisk(repoPath, comment)\n}\n\n\/\/ Finds a comment by a given ID\nfunc CommentByID(repoPath string, identifier string) (*Comment, error) {\n\treturn &Comment{}, errors.New(commentNotFoundError)\n}\n\n\/\/ Finds all comments on a given commit\nfunc CommentsOnCommit(repoPath string, commit string) []*Comment {\n\treturn []*Comment{}\n}\n\n\/\/ Write git object for a given comment and update the\n\/\/ comment refs\nfunc writeCommentToDisk(repoPath string, comment *Comment) error {\n\treturn nil\n}\n\nfunc repo(repoPath string) (*git.Repository, error) {\n\treturn git.OpenRepository(repoPath)\n}\n\nfunc author(repoPath string) (*Person, error) {\n\trepo, err := repo(repoPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconfig, cErr := repo.Config()\n\tif cErr != nil {\n\t\treturn nil, cErr\n\t}\n\tname, nErr := config.LookupString(userNameKey)\n\temail, eErr := config.LookupString(userEmailKey)\n\tif nErr != nil && eErr != nil {\n\t\treturn nil, errors.New(authorNotFoundError)\n\t}\n\treturn &Person{name, email}, nil\n}\n\nfunc head(repoPath string) (*string, error) {\n\trepo, err := repo(repoPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thead, hErr := repo.Head()\n\tif hErr != nil {\n\t\treturn nil, hErr\n\t}\n\thash := head.Name()\n\treturn &hash, nil\n}\n<commit_msg>Cleanup<commit_after>package git_comment\n\nimport (\n\t\"errors\"\n\tgit \"gopkg.in\/libgit2\/git2go.v22\"\n)\n\nconst (\n\tauthorNotFoundError = \"No name or email found in git config for commenting\"\n\tcommitNotFoundError = \"Commit not found\"\n\tcommentNotFoundError = \"Comment not found\"\n\tuserNameKey = \"user.name\"\n\tuserEmailKey = \"user.email\"\n\theadCommit = \"HEAD\"\n)\n\n\/\/ Create a new comment on a commit, optionally with a file and line\nfunc CreateComment(repoPath string, commit *string, fileRef *FileRef, message string) (*string, error) {\n\tvar hash = commit\n\tauthor, cErr := author(repoPath)\n\tif cErr != nil {\n\t\treturn nil, cErr\n\t}\n\tif *hash == \"HEAD\" || hash == nil {\n\t\thead, hErr := head(repoPath)\n\t\tif hErr != nil {\n\t\t\treturn nil, hErr\n\t\t}\n\t\thash = head\n\t} else {\n\t\t\/\/ check if commit hash exists\n\t}\n\tcomment, err := NewComment(message, *hash, fileRef, author)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif writeErr := writeCommentToDisk(repoPath, comment); writeErr != nil {\n\t\treturn nil, writeErr\n\t}\n\treturn &comment.ID, nil\n}\n\nfunc UpdateComment(repoPath string, ID string, message string) error {\n\tcomment, err := CommentByID(repoPath, ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tauthor, cErr := author(repoPath)\n\tif cErr != nil {\n\t\treturn cErr\n\t}\n\tcomment.Amend(message, author)\n\treturn writeCommentToDisk(repoPath, comment)\n}\n\n\/\/ Remove a comment from a commit\nfunc DeleteComment(repoPath string, ID string) error {\n\tcomment, err := CommentByID(repoPath, ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcomment.Deleted = true\n\treturn writeCommentToDisk(repoPath, comment)\n}\n\n\/\/ Finds a comment by a given ID\nfunc CommentByID(repoPath string, identifier string) (*Comment, error) {\n\treturn &Comment{}, errors.New(commentNotFoundError)\n}\n\n\/\/ Finds all comments on a given commit\nfunc CommentsOnCommit(repoPath string, commit string) []*Comment {\n\treturn []*Comment{}\n}\n\n\/\/ Write git object for a given comment and update the\n\/\/ comment refs\nfunc writeCommentToDisk(repoPath string, comment *Comment) error {\n\treturn nil\n}\n\nfunc repo(repoPath string) (*git.Repository, error) {\n\treturn git.OpenRepository(repoPath)\n}\n\nfunc author(repoPath string) (*Person, error) {\n\trepo, err := repo(repoPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconfig, cErr := repo.Config()\n\tif cErr != nil {\n\t\treturn nil, cErr\n\t}\n\tname, nErr := config.LookupString(userNameKey)\n\temail, eErr := config.LookupString(userEmailKey)\n\tif nErr != nil && eErr != nil {\n\t\treturn nil, errors.New(authorNotFoundError)\n\t}\n\treturn &Person{name, email}, nil\n}\n\nfunc head(repoPath string) (*string, error) {\n\trepo, err := repo(repoPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thead, hErr := repo.Head()\n\tif hErr != nil {\n\t\treturn nil, hErr\n\t}\n\thash := head.Name()\n\treturn &hash, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package ioprogress\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/ DrawFunc is the callback type for drawing progress.\ntype DrawFunc func(int64, int64) error\n\n\/\/ DrawTextFormatFunc is a callback used by DrawFuncs that draw text in\n\/\/ order to format the text into some more human friendly format.\ntype DrawTextFormatFunc func(int64, int64) string\n\nvar defaultDrawFunc DrawFunc\n\nfunc init() {\n\tdefaultDrawFunc = DrawTerminal(os.Stdout)\n}\n\n\/\/ DrawTerminal returns a DrawFunc that draws a progress bar to an io.Writer\n\/\/ that is assumed to be a terminal (and therefore respects carriage returns).\nfunc DrawTerminal(w io.Writer) DrawFunc {\n\tvar maxLength int\n\treturn DrawTerminalf(w, func(progress, total int64) string {\n\t\tline := fmt.Sprintf(\"%d\/%d\", progress, total)\n\n\t\t\/\/ Make sure we pad it to the max length we've ever drawn so that\n\t\t\/\/ we don't have trailing characters.\n\t\tif len(line) < maxLength {\n\t\t\tline = fmt.Sprintf(\n\t\t\t\t\"%s%s\", line, strings.Repeat(\" \", maxLength-len(line)))\n\t\t}\n\t\tmaxLength = len(line)\n\n\t\treturn line\n\t})\n}\n\n\/\/ DrawTerminalf returns a DrawFunc that draws a progress bar to an io.Writer\n\/\/ that is formatted with the given formatting function.\nfunc DrawTerminalf(w io.Writer, f DrawTextFormatFunc) DrawFunc {\n\treturn func(progress, total int64) error {\n\t\tif progress == -1 || total == -1 {\n\t\t\t_, err := fmt.Fprintf(w, \"\\n\")\n\t\t\treturn err\n\t\t}\n\n\t\t_, err := fmt.Fprint(w, f(progress, total)+\"\\r\")\n\t\treturn err\n\t}\n}\n\nvar byteUnits = []string{\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\"}\n\n\/\/ DrawTextFormatBytes is a DrawTextFormatFunc that formats the progress\n\/\/ and total into human-friendly byte formats.\nfunc DrawTextFormatBytes(progress, total int64) string {\n\treturn fmt.Sprintf(\"%s\/%s\", byteUnitStr(progress), byteUnitStr(total))\n}\n\nfunc byteUnitStr(n int64) string {\n\tvar unit string\n\tsize := float64(n)\n\tfor i := 1; i < len(byteUnits); i++ {\n\t\tmult := float64(i * 1000)\n\t\tif size < mult {\n\t\t\tunit = byteUnits[i-1]\n\t\t\tbreak\n\t\t}\n\n\t\tsize = size \/ mult\n\t}\n\n\treturn fmt.Sprintf(\"%.3g %s\", size, unit)\n}\n<commit_msg>Fix byte formatting<commit_after>package ioprogress\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/ DrawFunc is the callback type for drawing progress.\ntype DrawFunc func(int64, int64) error\n\n\/\/ DrawTextFormatFunc is a callback used by DrawFuncs that draw text in\n\/\/ order to format the text into some more human friendly format.\ntype DrawTextFormatFunc func(int64, int64) string\n\nvar defaultDrawFunc DrawFunc\n\nfunc init() {\n\tdefaultDrawFunc = DrawTerminal(os.Stdout)\n}\n\n\/\/ DrawTerminal returns a DrawFunc that draws a progress bar to an io.Writer\n\/\/ that is assumed to be a terminal (and therefore respects carriage returns).\nfunc DrawTerminal(w io.Writer) DrawFunc {\n\tvar maxLength int\n\treturn DrawTerminalf(w, func(progress, total int64) string {\n\t\tline := fmt.Sprintf(\"%d\/%d\", progress, total)\n\n\t\t\/\/ Make sure we pad it to the max length we've ever drawn so that\n\t\t\/\/ we don't have trailing characters.\n\t\tif len(line) < maxLength {\n\t\t\tline = fmt.Sprintf(\n\t\t\t\t\"%s%s\", line, strings.Repeat(\" \", maxLength-len(line)))\n\t\t}\n\t\tmaxLength = len(line)\n\n\t\treturn line\n\t})\n}\n\n\/\/ DrawTerminalf returns a DrawFunc that draws a progress bar to an io.Writer\n\/\/ that is formatted with the given formatting function.\nfunc DrawTerminalf(w io.Writer, f DrawTextFormatFunc) DrawFunc {\n\treturn func(progress, total int64) error {\n\t\tif progress == -1 || total == -1 {\n\t\t\t_, err := fmt.Fprintf(w, \"\\n\")\n\t\t\treturn err\n\t\t}\n\n\t\t_, err := fmt.Fprint(w, f(progress, total)+\"\\r\")\n\t\treturn err\n\t}\n}\n\nvar byteUnits = []string{\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\"}\n\n\/\/ DrawTextFormatBytes is a DrawTextFormatFunc that formats the progress\n\/\/ and total into human-friendly byte formats.\nfunc DrawTextFormatBytes(progress, total int64) string {\n\treturn fmt.Sprintf(\"%s\/%s\", byteUnitStr(progress), byteUnitStr(total))\n}\n\nfunc byteUnitStr(n int64) string {\n\tvar unit string\n\tsize := float64(n)\n\tfor i := 1; i < len(byteUnits); i++ {\n\t\tif size < 1000 {\n\t\t\tunit = byteUnits[i-1]\n\t\t\tbreak\n\t\t}\n\n\t\tsize = size \/ 1000\n\t}\n\n\treturn fmt.Sprintf(\"%.3g %s\", size, unit)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"bytes\"\n \"config\"\n \"fmt\"\n \"io\"\n _ \"io\/ioutil\"\n \"log\"\n \"net\"\n \"net\/http\"\n \"net\/url\"\n \"os\"\n \"os\/signal\"\n \"strings\"\n \"syscall\"\n \"time\"\n)\n\nconst (\n CLR_N = \"\\x1b[0m\"\n \/* you use codes 30+i to specify foreground color, 40+i to specify background color *\/\n BLACK = 0\n RED = 1\n GREEN = 2\n YELLO = 3\n BLUE = 4\n MAGENTA = 5\n CYAN = 6\n WHITE = 7\n)\n\ntype Server struct {\n}\n\n\/\/返回ANSI 控制台颜色格式的字符串\n\/\/bc 背景颜色\n\/\/fc 前景(文字)颜色\nfunc ansi_color(bc int, fc int, s string) string {\n return fmt.Sprintf(\"\\x1b[%d;%dm%s%s\", 40+bc, 30+fc, s, CLR_N)\n}\n\n\/**\nhttp Header Copay\n*\/\nfunc header_copy(s http.Header, d *http.Header) {\n for hk, _ := range s {\n d.Set(hk, s.Get(hk))\n }\n}\n\nfunc access_log(w http.ResponseWriter, r *http.Request, query_url string, startTime time.Time) {\n remoteAddr := strings.Split(r.RemoteAddr, \":\")[0] \/\/客户端地址\n\n switch {\n case len(r.Header.Get(\"X-Real-Ip\")) > 0:\n remoteAddr = r.Header.Get(\"X-Real-Ip\")\n case len(r.Header.Get(\"Remote-Addr\")) > 0:\n remoteAddr = r.Header.Get(\"Remote-Addr\")\n case len(r.Header.Get(\"X-Forwarded-For\")) > 0:\n remoteAddr = r.Header.Get(\"X-Forwarded-For\")\n }\n\n if remoteAddr == \"[\" || len(remoteAddr) == 0 {\n remoteAddr = \"127.0.0.1\"\n }\n\n r.ParseForm()\n var postValues []string\n for k, _ := range r.PostForm {\n postValues = append(postValues, fmt.Sprintf(\"%s=%s\", url.QueryEscape(k), url.QueryEscape(r.FormValue(k))))\n }\n\n if len(postValues) == 0 {\n postValues = append(postValues, \"-\")\n }\n\n content_len := w.Header().Get(\"Content-Length\")\n if len(content_len) == 0 {\n content_len = \"-\"\n }\n\n logLine := fmt.Sprintf(`[%s] [%s] S:\"%s %s %s F:{%s}\" D:\"%s F:{%s} %s %s\",%0.5fs`,\n ansi_color(WHITE, BLACK, remoteAddr),\n time.Now().Format(\"2006-01-02 15:04:05.999999999 -0700 MST\"),\n r.Method,\n ansi_color(GREEN, BLACK, r.RequestURI),\n r.Proto,\n ansi_color(GREEN, BLACK, strings.Join(postValues, \"&\")),\n ansi_color(YELLO, BLACK, query_url),\n ansi_color(YELLO, BLACK, strings.Join(postValues, \"&\")),\n w.Header().Get(\"Status\"),\n content_len,\n time.Now().Sub(startTime).Seconds(),\n )\n g_env.AccessLog.Println(logLine)\n}\n\nfunc parse_querys(r *http.Request) (raw_query []string) {\n r.ParseForm()\n check_key_map := make(map[string]bool)\n check_and_append := func(_k, _v string) {\n if _, ok := check_key_map[_k]; ok {\n return\n }\n check_key_map[_k] = true\n raw_query = append(raw_query, fmt.Sprintf(\"%s=%s\", url.QueryEscape(_k), url.QueryEscape(_v)))\n }\n\n for k, _ := range r.Form {\n check_and_append(k, r.Form.Get(k))\n }\n\n if len(r.Referer()) > 0 {\n if uri, err := url.Parse(r.Referer()); err == nil {\n for k, _ := range uri.Query() {\n check_and_append(k, uri.Query().Get(k))\n }\n }\n }\n return\n}\n\n\/**\n获取目标地址\n*\/\nfunc target_path(r *http.Request, cfg *config.Config) (t string) {\n if len(cfg.TargetPath) > 0 {\n t = cfg.TargetPath\n } else {\n t = r.URL.Path\n }\n return\n}\n\n\/**\n获取目标服务服务器\n*\/\nfunc target_server(cfg *config.Config) (s string) {\n if len(cfg.TargetServer) > 0 {\n s = cfg.TargetServer\n } else {\n s = g_config.Default.TargetServer\n }\n return\n}\n\nfunc parse_query_key_value(v string) (kv map[string]string) {\n kv = make(map[string]string)\n var _k, _v string\n si := strings.IndexByte(v, '=')\n if si < 0 {\n _k = v\n } else {\n _k = v[0:si]\n if si+1 <= len(v) {\n _v = v[si+1:]\n }\n }\n kv[url.QueryEscape(_k)] = url.QueryEscape(_v)\n return\n}\n\n\/**\n标准的 http 协议,会将 & 和 ;看成参数对的分割符\n*\/\nfunc parse_query_values(r *http.Request) (vs []map[string]string) {\n for _, v := range strings.Split(r.URL.RawQuery, \"&\") {\n vs = append(vs, parse_query_key_value(v))\n }\n return\n}\n\n\/**\n获取查询参数并做替换\n对GET参数不做标准分割 &; 这两个字符\n*\/\nfunc __swap_raw_query(r *http.Request, cfg *config.Config) (q string) {\n var tmp_slice []string\n raw_querys := parse_query_values(r)\n append_slict := func(key string, value string) {\n if len(key) == 0 {\n return\n }\n tmp_slice = append(tmp_slice, fmt.Sprintf(\"%s=%s\", key, value))\n }\n if len(cfg.TargetParamNameSwap) == 0 {\n for _, vs := range raw_querys {\n for k1, v1 := range vs {\n append_slict(k1, v1)\n }\n }\n q = strings.Join(tmp_slice, \"&\")\n return\n }\n\n for _, vs := range raw_querys {\n for k1, v1 := range vs {\n if k2, ok := cfg.TargetParamNameSwap[k1]; ok {\n append_slict(k2, v1)\n } else {\n append_slict(k1, v1)\n }\n }\n }\n\n q = strings.Join(tmp_slice, \"&\")\n return\n}\n\n\/*\n标准http解析库实现\n*\/\nfunc swap_raw_query(r *http.Request, cfg *config.Config) (q string) {\n var tmp_slice []string\n append_slict := func(key string, value string) {\n tmp_slice = append(tmp_slice, fmt.Sprintf(\"%s=%s\", url.QueryEscape(key), url.QueryEscape(r.URL.Query().Get(value))))\n }\n \/\/如果配置段没有需要交换的参数,则直接返回查询字符串\n if len(cfg.TargetParamNameSwap) == 0 {\n for k, _ := range r.URL.Query() {\n append_slict(k, k)\n }\n q = strings.Join(tmp_slice, \"&\")\n return\n }\n\n for k, _ := range r.URL.Query() {\n if v, ok := cfg.TargetParamNameSwap[k]; ok {\n append_slict(v, k)\n } else {\n append_slict(k, k)\n }\n }\n q = strings.Join(tmp_slice, \"&\")\n return\n}\n\nfunc timeout_dialer(conn_timeout int, rw_timeout int) func(net, addr string) (c net.Conn, err error) {\n return func(netw, addr string) (net.Conn, error) {\n conn, err := net.DialTimeout(netw, addr, time.Duration(conn_timeout)*time.Second)\n if err != nil {\n log.Printf(\"Failed to connect to [%s]. Timed out after %d seconds\\n\", addr, rw_timeout)\n return nil, err\n }\n conn.SetDeadline(time.Now().Add(time.Duration(rw_timeout) * time.Second))\n return conn, nil\n }\n}\n\nfunc (s Server) handler_func(w http.ResponseWriter, r *http.Request) {\n var (\n cfg *config.Config\n cfg_err *config.ConfigErr\n conntction_timeout, response_timeout int\n req *http.Request\n err error\n raw_query []string\n )\n defer func() {\n if re := recover(); re != nil {\n g_env.ErrorLog.Println(\"Recovered in backendServer:\", re)\n }\n }()\n\n defer r.Body.Close()\n\n start_at := time.Now()\n raw_query = parse_querys(r)\n\n if err != nil {\n g_env.ErrorLog.Println(req, err)\n http.Error(w, \"Read Body Error.\", http.StatusInternalServerError)\n return\n }\n\n \/\/获取配置文件\n if cfg, cfg_err = g_config.FindBySourcePathAndParams(raw_query, r.URL.Path); cfg_err != nil {\n cfg = g_config.FindByParamsOrSourcePath(raw_query, r.URL.Path)\n }\n\n if conntction_timeout = cfg.ConnectionTimeout; conntction_timeout <= 0 {\n conntction_timeout = 15\n }\n\n if response_timeout = cfg.ResponseTimeout; response_timeout <= 0 {\n response_timeout = 120\n }\n\n transport := http.Transport{\n Dial: timeout_dialer(conntction_timeout, response_timeout),\n ResponseHeaderTimeout: time.Duration(response_timeout) * time.Second,\n DisableCompression: false,\n DisableKeepAlives: true,\n MaxIdleConnsPerHost: 2,\n }\n defer transport.CloseIdleConnections()\n\n client := &http.Client{\n Transport: &transport,\n }\n\n query_url, _ := url.Parse(target_server(cfg) + target_path(r, cfg) + \"?\" + swap_raw_query(r, cfg))\n\n switch r.Method {\n case \"GET\", \"HEAD\":\n req, err = http.NewRequest(r.Method, query_url.String(), nil)\n case \"POST\":\n req, err = http.NewRequest(r.Method, query_url.String(), bytes.NewBufferString(strings.Join(raw_query, \"&\")))\n req.Header.Set(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n default:\n http.Error(w, \"MethodNotAllowed\", http.StatusMethodNotAllowed)\n return\n }\n req.Close = true\n\n header_copy(r.Header, &req.Header)\n\n if err != nil {\n g_env.ErrorLog.Println(err)\n http.Error(w, err.Error(), http.StatusInternalServerError)\n return\n }\n\n resp, err := client.Do(req)\n defer resp.Body.Close()\n\n if err != nil {\n g_env.ErrorLog.Println(req, err)\n http.Error(w, err.Error(), http.StatusInternalServerError)\n return\n }\n\n for hk, _ := range resp.Header {\n w.Header().Set(hk, resp.Header.Get(hk))\n }\n w.Header().Set(\"X-Transit-Ver\", \"0.0.1\")\n w.Header().Set(\"Server\", \"X-Transit\")\n\n w.WriteHeader(resp.StatusCode)\n io.Copy(w, resp.Body)\n access_log(w, r, query_url.String(), start_at)\n}\n\n\/\/TODO\n\/\/ add unix socket listener\n\nfunc Run() {\n g_env.ErrorLog.Printf(\"start@ %s:%d %v \\n\", g_config.Listen.Host, g_config.Listen.Port, time.Now())\n fmt.Printf(\"start@ %s:%d %v \\n\", g_config.Listen.Host, g_config.Listen.Port, time.Now())\n\n sigchan := make(chan os.Signal, 1)\n signal.Notify(sigchan, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGINT, syscall.SIGQUIT)\n\n server := Server{}\n go func() {\n s := &http.Server{\n Addr: fmt.Sprintf(\"%s:%d\", g_config.Listen.Host, g_config.Listen.Port),\n Handler: http.HandlerFunc(server.handler_func),\n ReadTimeout: 120 * time.Second,\n WriteTimeout: 120 * time.Second,\n MaxHeaderBytes: 1 << 20,\n }\n log.Fatal(s.ListenAndServe())\n }()\n\n <-sigchan\n}\n<commit_msg>add access begin logger<commit_after>package main\n\nimport (\n \"bytes\"\n \"config\"\n \"crypto\/md5\"\n \"fmt\"\n \"io\"\n _ \"io\/ioutil\"\n \"log\"\n \"math\/rand\"\n \"net\"\n \"net\/http\"\n \"net\/url\"\n \"os\"\n \"os\/signal\"\n \"strconv\"\n \"strings\"\n \"syscall\"\n \"time\"\n)\n\nconst (\n CLR_N = \"\\x1b[0m\"\n \/* you use codes 30+i to specify foreground color, 40+i to specify background color *\/\n BLACK = 0\n RED = 1\n GREEN = 2\n YELLO = 3\n BLUE = 4\n MAGENTA = 5\n CYAN = 6\n WHITE = 7\n)\n\ntype Server struct {\n}\n\n\/\/返回ANSI 控制台颜色格式的字符串\n\/\/bc 背景颜色\n\/\/fc 前景(文字)颜色\nfunc ansi_color(bc int, fc int, s string) string {\n return fmt.Sprintf(\"\\x1b[%d;%dm%s%s\", 40+bc, 30+fc, s, CLR_N)\n}\n\n\/**\nhttp Header Copay\n*\/\nfunc header_copy(s http.Header, d *http.Header) {\n for hk, _ := range s {\n d.Set(hk, s.Get(hk))\n }\n}\n\nfunc access_id(as string) string {\n md5 := func(s string) string {\n hash_md5 := md5.New()\n io.WriteString(hash_md5, s)\n return fmt.Sprintf(\"%x\", hash_md5.Sum(nil))\n }\n nano := time.Now().UnixNano()\n rand.Seed(nano)\n rnd_num := rand.Int63()\n fs := md5(md5(as) + md5(strconv.FormatInt(nano, 10)) + md5(strconv.FormatInt(rnd_num, 10)))\n return strings.ToUpper(fmt.Sprintf(\"%s.%s\", fs[0:4], fs[28:]))\n}\n\nfunc access_ip(r *http.Request) string {\n remote_addr := strings.Split(r.RemoteAddr, \":\")[0] \/\/客户端地址\n switch {\n case len(r.Header.Get(\"X-Real-Ip\")) > 0:\n remote_addr = r.Header.Get(\"X-Real-Ip\")\n case len(r.Header.Get(\"Remote-Addr\")) > 0:\n remote_addr = r.Header.Get(\"Remote-Addr\")\n case len(r.Header.Get(\"X-Forwarded-For\")) > 0:\n remote_addr = r.Header.Get(\"X-Forwarded-For\")\n }\n\n if remote_addr == \"[\" || len(remote_addr) == 0 {\n remote_addr = \"127.0.0.1\"\n }\n return remote_addr\n}\n\nfunc post_values(r *http.Request) (ps []string) {\n r.ParseForm()\n for k, _ := range r.PostForm {\n ps = append(ps, fmt.Sprintf(\"%s=%s\", url.QueryEscape(k), url.QueryEscape(r.FormValue(k))))\n }\n return\n}\n\n\/\/access begin logger\nfunc access_log_begin(aid string, r *http.Request, query_url string) {\n post_values := post_values(r)\n\n if len(post_values) == 0 {\n post_values = append(post_values, \"-\")\n }\n log_line := fmt.Sprintf(`Begin [%s] \"%s\" [%s] S:\"%s %s %s F:{%s}\" D:\"%s F:{%s}\"`,\n ansi_color(WHITE, BLACK, access_ip(r)),\n ansi_color(WHITE, RED, aid),\n time.Now().Format(\"2006-01-02 15:04:05.999999999 -0700 MST\"),\n r.Method,\n ansi_color(GREEN, BLACK, r.RequestURI),\n r.Proto,\n ansi_color(GREEN, BLACK, strings.Join(post_values, \"&\")),\n ansi_color(YELLO, BLACK, query_url),\n ansi_color(YELLO, BLACK, strings.Join(post_values, \"&\")),\n )\n g_env.AccessLog.Println(log_line)\n}\n\n\/\/transit complete logger\nfunc access_log(aid string, w http.ResponseWriter, r *http.Request, query_url string, startTime time.Time) {\n post_values := post_values(r)\n\n if len(post_values) == 0 {\n post_values = append(post_values, \"-\")\n }\n\n content_len := w.Header().Get(\"Content-Length\")\n if len(content_len) == 0 {\n content_len = \"-\"\n }\n\n log_line := fmt.Sprintf(`Complete [%s] \"%s\" [%s] S:\"%s %s %s F:{%s}\" D:\"%s F:{%s} %s %s\",%0.5fs`,\n ansi_color(WHITE, BLACK, access_ip(r)),\n ansi_color(WHITE, RED, aid),\n time.Now().Format(\"2006-01-02 15:04:05.999999999 -0700 MST\"),\n r.Method,\n ansi_color(GREEN, BLACK, r.RequestURI),\n r.Proto,\n ansi_color(GREEN, BLACK, strings.Join(post_values, \"&\")),\n ansi_color(YELLO, BLACK, query_url),\n ansi_color(YELLO, BLACK, strings.Join(post_values, \"&\")),\n w.Header().Get(\"Status\"),\n content_len,\n time.Now().Sub(startTime).Seconds(),\n )\n g_env.AccessLog.Println(log_line)\n}\n\nfunc parse_querys(r *http.Request) (raw_query []string) {\n r.ParseForm()\n check_key_map := make(map[string]bool)\n check_and_append := func(_k, _v string) {\n if _, ok := check_key_map[_k]; ok {\n return\n }\n check_key_map[_k] = true\n raw_query = append(raw_query, fmt.Sprintf(\"%s=%s\", url.QueryEscape(_k), url.QueryEscape(_v)))\n }\n\n for k, _ := range r.Form {\n check_and_append(k, r.Form.Get(k))\n }\n\n if len(r.Referer()) > 0 {\n if uri, err := url.Parse(r.Referer()); err == nil {\n for k, _ := range uri.Query() {\n check_and_append(k, uri.Query().Get(k))\n }\n }\n }\n return\n}\n\n\/**\n获取目标地址\n*\/\nfunc target_path(r *http.Request, cfg *config.Config) (t string) {\n if len(cfg.TargetPath) > 0 {\n t = cfg.TargetPath\n } else {\n t = r.URL.Path\n }\n return\n}\n\n\/**\n获取目标服务服务器\n*\/\nfunc target_server(cfg *config.Config) (s string) {\n if len(cfg.TargetServer) > 0 {\n s = cfg.TargetServer\n } else {\n s = g_config.Default.TargetServer\n }\n return\n}\n\n\/*\n标准http解析库实现\n*\/\nfunc swap_raw_query(r *http.Request, cfg *config.Config) (q string) {\n var tmp_slice []string\n append_slict := func(key string, value string) {\n tmp_slice = append(tmp_slice, fmt.Sprintf(\"%s=%s\", url.QueryEscape(key), url.QueryEscape(r.URL.Query().Get(value))))\n }\n \/\/如果配置段没有需要交换的参数,则直接返回查询字符串\n if len(cfg.TargetParamNameSwap) == 0 {\n for k, _ := range r.URL.Query() {\n append_slict(k, k)\n }\n q = strings.Join(tmp_slice, \"&\")\n return\n }\n\n for k, _ := range r.URL.Query() {\n if v, ok := cfg.TargetParamNameSwap[k]; ok {\n append_slict(v, k)\n } else {\n append_slict(k, k)\n }\n }\n q = strings.Join(tmp_slice, \"&\")\n return\n}\n\nfunc timeout_dialer(conn_timeout int, rw_timeout int) func(net, addr string) (c net.Conn, err error) {\n return func(netw, addr string) (net.Conn, error) {\n conn, err := net.DialTimeout(netw, addr, time.Duration(conn_timeout)*time.Second)\n if err != nil {\n log.Printf(\"Failed to connect to [%s]. Timed out after %d seconds\\n\", addr, rw_timeout)\n return nil, err\n }\n conn.SetDeadline(time.Now().Add(time.Duration(rw_timeout) * time.Second))\n return conn, nil\n }\n}\n\nfunc (s Server) handler_func(w http.ResponseWriter, r *http.Request) {\n var (\n cfg *config.Config\n cfg_err *config.ConfigErr\n conntction_timeout, response_timeout int\n req *http.Request\n err error\n raw_query []string\n )\n defer func() {\n if re := recover(); re != nil {\n g_env.ErrorLog.Println(\"Recovered in backendServer:\", re)\n }\n }()\n\n defer r.Body.Close()\n\n start_at := time.Now()\n raw_query = parse_querys(r)\n aid := access_id(r.RequestURI)\n\n if err != nil {\n g_env.ErrorLog.Println(req, err)\n http.Error(w, \"Read Body Error.\", http.StatusInternalServerError)\n return\n }\n\n \/\/获取配置文件\n if cfg, cfg_err = g_config.FindBySourcePathAndParams(raw_query, r.URL.Path); cfg_err != nil {\n cfg = g_config.FindByParamsOrSourcePath(raw_query, r.URL.Path)\n }\n\n if conntction_timeout = cfg.ConnectionTimeout; conntction_timeout <= 0 {\n conntction_timeout = 15\n }\n\n if response_timeout = cfg.ResponseTimeout; response_timeout <= 0 {\n response_timeout = 120\n }\n\n transport := http.Transport{\n Dial: timeout_dialer(conntction_timeout, response_timeout),\n ResponseHeaderTimeout: time.Duration(response_timeout) * time.Second,\n DisableCompression: false,\n DisableKeepAlives: true,\n MaxIdleConnsPerHost: 2,\n }\n defer transport.CloseIdleConnections()\n\n client := &http.Client{\n Transport: &transport,\n }\n\n query_url, _ := url.Parse(target_server(cfg) + target_path(r, cfg) + \"?\" + swap_raw_query(r, cfg))\n\n access_log_begin(aid, r, query_url.String())\n\n switch r.Method {\n case \"GET\", \"HEAD\":\n req, err = http.NewRequest(r.Method, query_url.String(), nil)\n case \"POST\":\n req, err = http.NewRequest(r.Method, query_url.String(), bytes.NewBufferString(strings.Join(raw_query, \"&\")))\n req.Header.Set(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n default:\n http.Error(w, \"MethodNotAllowed\", http.StatusMethodNotAllowed)\n return\n }\n req.Close = true\n\n header_copy(r.Header, &req.Header)\n\n if err != nil {\n g_env.ErrorLog.Println(err)\n http.Error(w, err.Error(), http.StatusInternalServerError)\n return\n }\n\n resp, err := client.Do(req)\n defer resp.Body.Close()\n\n if err != nil {\n g_env.ErrorLog.Println(req, err)\n http.Error(w, err.Error(), http.StatusInternalServerError)\n return\n }\n\n for hk, _ := range resp.Header {\n w.Header().Set(hk, resp.Header.Get(hk))\n }\n w.Header().Set(\"X-Transit-Ver\", \"0.0.1\")\n w.Header().Set(\"Server\", \"X-Transit\")\n\n w.WriteHeader(resp.StatusCode)\n io.Copy(w, resp.Body)\n access_log(aid, w, r, query_url.String(), start_at)\n}\n\n\/\/TODO\n\/\/ add unix socket listener\n\nfunc Run() {\n g_env.ErrorLog.Printf(\"start@ %s:%d %v \\n\", g_config.Listen.Host, g_config.Listen.Port, time.Now())\n fmt.Printf(\"start@ %s:%d %v \\n\", g_config.Listen.Host, g_config.Listen.Port, time.Now())\n\n sigchan := make(chan os.Signal, 1)\n signal.Notify(sigchan, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGINT, syscall.SIGQUIT)\n\n server := Server{}\n go func() {\n s := &http.Server{\n Addr: fmt.Sprintf(\"%s:%d\", g_config.Listen.Host, g_config.Listen.Port),\n Handler: http.HandlerFunc(server.handler_func),\n ReadTimeout: 120 * time.Second,\n WriteTimeout: 120 * time.Second,\n MaxHeaderBytes: 1 << 20,\n }\n log.Fatal(s.ListenAndServe())\n }()\n\n <-sigchan\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright (C) Max Romanov\n * Copyright (C) NGINX, Inc.\n *\/\n\npackage unit\n\n\/*\n#include \"nxt_cgo_lib.h\"\n*\/\nimport \"C\"\n\nimport (\n\t\"net\/http\"\n)\n\ntype response struct {\n\theader http.Header\n\theaderSent bool\n\treq *http.Request\n\tc_req C.uintptr_t\n}\n\nfunc new_response(c_req C.uintptr_t, req *http.Request) *response {\n\tresp := &response{\n\t\theader: http.Header{},\n\t\treq: req,\n\t\tc_req: c_req,\n\t}\n\n\treturn resp\n}\n\nfunc (r *response) Header() http.Header {\n\treturn r.header\n}\n\nfunc (r *response) Write(p []byte) (n int, err error) {\n\tif !r.headerSent {\n\t\tr.WriteHeader(http.StatusOK)\n\t}\n\n\tres := C.nxt_cgo_response_write(r.c_req, buf_ref(p), C.uint32_t(len(p)))\n\treturn int(res), nil\n}\n\nfunc (r *response) WriteHeader(code int) {\n\tif r.headerSent {\n\t\t\/\/ Note: explicitly using Stderr, as Stdout is our HTTP output.\n\t\tnxt_go_warn(\"multiple response.WriteHeader calls\")\n\t\treturn\n\t}\n\tr.headerSent = true\n\n\t\/\/ Set a default Content-Type\n\tif _, hasType := r.header[\"Content-Type\"]; !hasType {\n\t\tr.header.Add(\"Content-Type\", \"text\/html; charset=utf-8\")\n\t}\n\n\tfields := 0\n\tfields_size := 0\n\n\tfor k, vv := range r.header {\n\t\tfor _, v := range vv {\n\t\t\tfields++\n\t\t\tfields_size += len(k) + len(v)\n\t\t}\n\t}\n\n\tC.nxt_cgo_response_create(r.c_req, C.int(code), C.int(fields),\n\t\tC.uint32_t(fields_size))\n\n\tfor k, vv := range r.header {\n\t\tfor _, v := range vv {\n\t\t\tC.nxt_cgo_response_add_field(r.c_req, str_ref(k), C.uint8_t(len(k)),\n\t\t\t\tstr_ref(v), C.uint32_t(len(v)))\n\t\t}\n\t}\n\n\tC.nxt_cgo_response_send(r.c_req)\n}\n\nfunc (r *response) Flush() {\n\tif !r.headerSent {\n\t\tr.WriteHeader(http.StatusOK)\n\t}\n}\n<commit_msg>Go: fixing header buffer size calculation.<commit_after>\/*\n * Copyright (C) Max Romanov\n * Copyright (C) NGINX, Inc.\n *\/\n\npackage unit\n\n\/*\n#include \"nxt_cgo_lib.h\"\n*\/\nimport \"C\"\n\nimport (\n\t\"net\/http\"\n)\n\ntype response struct {\n\theader http.Header\n\theaderSent bool\n\treq *http.Request\n\tc_req C.uintptr_t\n}\n\nfunc new_response(c_req C.uintptr_t, req *http.Request) *response {\n\tresp := &response{\n\t\theader: http.Header{},\n\t\treq: req,\n\t\tc_req: c_req,\n\t}\n\n\treturn resp\n}\n\nfunc (r *response) Header() http.Header {\n\treturn r.header\n}\n\nfunc (r *response) Write(p []byte) (n int, err error) {\n\tif !r.headerSent {\n\t\tr.WriteHeader(http.StatusOK)\n\t}\n\n\tres := C.nxt_cgo_response_write(r.c_req, buf_ref(p), C.uint32_t(len(p)))\n\treturn int(res), nil\n}\n\nfunc (r *response) WriteHeader(code int) {\n\tif r.headerSent {\n\t\t\/\/ Note: explicitly using Stderr, as Stdout is our HTTP output.\n\t\tnxt_go_warn(\"multiple response.WriteHeader calls\")\n\t\treturn\n\t}\n\tr.headerSent = true\n\n\t\/\/ Set a default Content-Type\n\tif _, hasType := r.header[\"Content-Type\"]; !hasType {\n\t\tr.header.Add(\"Content-Type\", \"text\/html; charset=utf-8\")\n\t}\n\n\tfields := 0\n\tfields_size := 0\n\n\tfor k, vv := range r.header {\n\t\tfor _, v := range vv {\n\t\t\tfields++\n\t\t\tfields_size += len(k) + len(v) + 2\n\t\t}\n\t}\n\n\tC.nxt_cgo_response_create(r.c_req, C.int(code), C.int(fields),\n\t\tC.uint32_t(fields_size))\n\n\tfor k, vv := range r.header {\n\t\tfor _, v := range vv {\n\t\t\tC.nxt_cgo_response_add_field(r.c_req, str_ref(k), C.uint8_t(len(k)),\n\t\t\t\tstr_ref(v), C.uint32_t(len(v)))\n\t\t}\n\t}\n\n\tC.nxt_cgo_response_send(r.c_req)\n}\n\nfunc (r *response) Flush() {\n\tif !r.headerSent {\n\t\tr.WriteHeader(http.StatusOK)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\/\n\npackage thrift\n\nimport (\n\t\"log\"\n\t\"runtime\/debug\"\n\t\"sync\"\n)\n\n\/*\n * This is not a typical TSimpleServer as it is not blocked after accept a socket.\n * It is more like a TThreadedServer that can handle different connections in different goroutines.\n * This will work if golang user implements a conn-pool like thing in client side.\n *\/\ntype TSimpleServer struct {\n\tquit chan struct{}\n\n\tprocessorFactory TProcessorFactory\n\tserverTransport TServerTransport\n\tinputTransportFactory TTransportFactory\n\toutputTransportFactory TTransportFactory\n\tinputProtocolFactory TProtocolFactory\n\toutputProtocolFactory TProtocolFactory\n}\n\nfunc NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {\n\treturn NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)\n}\n\nfunc NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {\n\treturn NewTSimpleServerFactory4(NewTProcessorFactory(processor),\n\t\tserverTransport,\n\t\ttransportFactory,\n\t\tprotocolFactory,\n\t)\n}\n\nfunc NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {\n\treturn NewTSimpleServerFactory6(NewTProcessorFactory(processor),\n\t\tserverTransport,\n\t\tinputTransportFactory,\n\t\toutputTransportFactory,\n\t\tinputProtocolFactory,\n\t\toutputProtocolFactory,\n\t)\n}\n\nfunc NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {\n\treturn NewTSimpleServerFactory6(processorFactory,\n\t\tserverTransport,\n\t\tNewTTransportFactory(),\n\t\tNewTTransportFactory(),\n\t\tNewTBinaryProtocolFactoryDefault(),\n\t\tNewTBinaryProtocolFactoryDefault(),\n\t)\n}\n\nfunc NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {\n\treturn NewTSimpleServerFactory6(processorFactory,\n\t\tserverTransport,\n\t\ttransportFactory,\n\t\ttransportFactory,\n\t\tprotocolFactory,\n\t\tprotocolFactory,\n\t)\n}\n\nfunc NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {\n\treturn &TSimpleServer{\n\t\tprocessorFactory: processorFactory,\n\t\tserverTransport: serverTransport,\n\t\tinputTransportFactory: inputTransportFactory,\n\t\toutputTransportFactory: outputTransportFactory,\n\t\tinputProtocolFactory: inputProtocolFactory,\n\t\toutputProtocolFactory: outputProtocolFactory,\n\t\tquit: make(chan struct{}, 1),\n\t}\n}\n\nfunc (p *TSimpleServer) ProcessorFactory() TProcessorFactory {\n\treturn p.processorFactory\n}\n\nfunc (p *TSimpleServer) ServerTransport() TServerTransport {\n\treturn p.serverTransport\n}\n\nfunc (p *TSimpleServer) InputTransportFactory() TTransportFactory {\n\treturn p.inputTransportFactory\n}\n\nfunc (p *TSimpleServer) OutputTransportFactory() TTransportFactory {\n\treturn p.outputTransportFactory\n}\n\nfunc (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {\n\treturn p.inputProtocolFactory\n}\n\nfunc (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {\n\treturn p.outputProtocolFactory\n}\n\nfunc (p *TSimpleServer) Listen() error {\n\treturn p.serverTransport.Listen()\n}\n\nfunc (p *TSimpleServer) AcceptLoop() error {\n\tfor {\n\t\tclient, err := p.serverTransport.Accept()\n\t\tif err != nil {\n\t\t\tselect {\n\t\t\tcase <-p.quit:\n\t\t\t\treturn nil\n\t\t\tdefault:\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tif client != nil {\n\t\t\tgo func() {\n\t\t\t\tif err := p.processRequests(client); err != nil {\n\t\t\t\t\tlog.Println(\"error processing request:\", err)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n}\n\nfunc (p *TSimpleServer) Serve() error {\n\terr := p.Listen()\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.AcceptLoop()\n\treturn nil\n}\n\nvar once sync.Once\n\nfunc (p *TSimpleServer) Stop() error {\n\tq := func() {\n\t\tp.quit <- struct{}{}\n\t\tp.serverTransport.Interrupt()\n\t}\n\tonce.Do(q)\n\treturn nil\n}\n\nfunc (p *TSimpleServer) processRequests(client TTransport) error {\n\tprocessor := p.processorFactory.GetProcessor(client)\n\tinputTransport := p.inputTransportFactory.GetTransport(client)\n\toutputTransport := p.outputTransportFactory.GetTransport(client)\n\tinputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)\n\toutputProtocol := p.outputProtocolFactory.GetProtocol(outputTransport)\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tlog.Printf(\"panic in processor: %s: %s\", e, debug.Stack())\n\t\t}\n\t}()\n\tif inputTransport != nil {\n\t\tdefer inputTransport.Close()\n\t}\n\tif outputTransport != nil {\n\t\tdefer outputTransport.Close()\n\t}\n\tfor {\n\t\tok, err := processor.Process(inputProtocol, outputProtocol)\n\t\tif err, ok := err.(TTransportException); ok && err.TypeId() == END_OF_FILE {\n\t\t\treturn nil\n\t\t} else if err != nil {\n\t\t\tlog.Printf(\"error processing request: %s\", err)\n\t\t\treturn err\n\t\t}\n\t\tif err, ok := err.(TApplicationException); ok && err.TypeId() == UNKNOWN_METHOD {\n\t\t\tcontinue\n\t\t}\n \t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>THRIFT-3730: go server errors are logged twice Client: go Patch: zhujun2006 <stone1342006 at gmail dot com><commit_after>\/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\/\n\npackage thrift\n\nimport (\n\t\"log\"\n\t\"runtime\/debug\"\n\t\"sync\"\n)\n\n\/*\n * This is not a typical TSimpleServer as it is not blocked after accept a socket.\n * It is more like a TThreadedServer that can handle different connections in different goroutines.\n * This will work if golang user implements a conn-pool like thing in client side.\n *\/\ntype TSimpleServer struct {\n\tquit chan struct{}\n\n\tprocessorFactory TProcessorFactory\n\tserverTransport TServerTransport\n\tinputTransportFactory TTransportFactory\n\toutputTransportFactory TTransportFactory\n\tinputProtocolFactory TProtocolFactory\n\toutputProtocolFactory TProtocolFactory\n}\n\nfunc NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {\n\treturn NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)\n}\n\nfunc NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {\n\treturn NewTSimpleServerFactory4(NewTProcessorFactory(processor),\n\t\tserverTransport,\n\t\ttransportFactory,\n\t\tprotocolFactory,\n\t)\n}\n\nfunc NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {\n\treturn NewTSimpleServerFactory6(NewTProcessorFactory(processor),\n\t\tserverTransport,\n\t\tinputTransportFactory,\n\t\toutputTransportFactory,\n\t\tinputProtocolFactory,\n\t\toutputProtocolFactory,\n\t)\n}\n\nfunc NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {\n\treturn NewTSimpleServerFactory6(processorFactory,\n\t\tserverTransport,\n\t\tNewTTransportFactory(),\n\t\tNewTTransportFactory(),\n\t\tNewTBinaryProtocolFactoryDefault(),\n\t\tNewTBinaryProtocolFactoryDefault(),\n\t)\n}\n\nfunc NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {\n\treturn NewTSimpleServerFactory6(processorFactory,\n\t\tserverTransport,\n\t\ttransportFactory,\n\t\ttransportFactory,\n\t\tprotocolFactory,\n\t\tprotocolFactory,\n\t)\n}\n\nfunc NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {\n\treturn &TSimpleServer{\n\t\tprocessorFactory: processorFactory,\n\t\tserverTransport: serverTransport,\n\t\tinputTransportFactory: inputTransportFactory,\n\t\toutputTransportFactory: outputTransportFactory,\n\t\tinputProtocolFactory: inputProtocolFactory,\n\t\toutputProtocolFactory: outputProtocolFactory,\n\t\tquit: make(chan struct{}, 1),\n\t}\n}\n\nfunc (p *TSimpleServer) ProcessorFactory() TProcessorFactory {\n\treturn p.processorFactory\n}\n\nfunc (p *TSimpleServer) ServerTransport() TServerTransport {\n\treturn p.serverTransport\n}\n\nfunc (p *TSimpleServer) InputTransportFactory() TTransportFactory {\n\treturn p.inputTransportFactory\n}\n\nfunc (p *TSimpleServer) OutputTransportFactory() TTransportFactory {\n\treturn p.outputTransportFactory\n}\n\nfunc (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {\n\treturn p.inputProtocolFactory\n}\n\nfunc (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {\n\treturn p.outputProtocolFactory\n}\n\nfunc (p *TSimpleServer) Listen() error {\n\treturn p.serverTransport.Listen()\n}\n\nfunc (p *TSimpleServer) AcceptLoop() error {\n\tfor {\n\t\tclient, err := p.serverTransport.Accept()\n\t\tif err != nil {\n\t\t\tselect {\n\t\t\tcase <-p.quit:\n\t\t\t\treturn nil\n\t\t\tdefault:\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tif client != nil {\n\t\t\tgo func() {\n\t\t\t\tif err := p.processRequests(client); err != nil {\n\t\t\t\t\tlog.Println(\"error processing request:\", err)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n}\n\nfunc (p *TSimpleServer) Serve() error {\n\terr := p.Listen()\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.AcceptLoop()\n\treturn nil\n}\n\nvar once sync.Once\n\nfunc (p *TSimpleServer) Stop() error {\n\tq := func() {\n\t\tp.quit <- struct{}{}\n\t\tp.serverTransport.Interrupt()\n\t}\n\tonce.Do(q)\n\treturn nil\n}\n\nfunc (p *TSimpleServer) processRequests(client TTransport) error {\n\tprocessor := p.processorFactory.GetProcessor(client)\n\tinputTransport := p.inputTransportFactory.GetTransport(client)\n\toutputTransport := p.outputTransportFactory.GetTransport(client)\n\tinputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)\n\toutputProtocol := p.outputProtocolFactory.GetProtocol(outputTransport)\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tlog.Printf(\"panic in processor: %s: %s\", e, debug.Stack())\n\t\t}\n\t}()\n\tif inputTransport != nil {\n\t\tdefer inputTransport.Close()\n\t}\n\tif outputTransport != nil {\n\t\tdefer outputTransport.Close()\n\t}\n\tfor {\n\t\tok, err := processor.Process(inputProtocol, outputProtocol)\n\t\tif err, ok := err.(TTransportException); ok && err.TypeId() == END_OF_FILE {\n\t\t\treturn nil\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err, ok := err.(TApplicationException); ok && err.TypeId() == UNKNOWN_METHOD {\n\t\t\tcontinue\n\t\t}\n \t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n *\n * k6 - a next-generation load testing tool\n * Copyright (C) 2020 Load Impact\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as\n * published by the Free Software Foundation, either version 3 of the\n * License, or (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n *\/\n\npackage types\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestHostnameTrieInsert(t *testing.T) {\n\thostnames := HostnameTrie{}\n\tassert.NoError(t, hostnames.insert(\"test.k6.io\"))\n\tassert.Error(t, hostnames.insert(\"inval*d.pattern\"))\n\tassert.NoError(t, hostnames.insert(\"*valid.pattern\"))\n}\n\nfunc TestHostnameTrieContains(t *testing.T) {\n\ttrie, err := NewHostnameTrie([]string{\"test.k6.io\", \"*valid.pattern\"})\n\trequire.NoError(t, err)\n\t_, matches := trie.Contains(\"K6.Io\")\n\tassert.False(t, matches)\n\tmatch, matches := trie.Contains(\"tEsT.k6.Io\")\n\tassert.True(t, matches)\n\tassert.Equal(t, \"test.k6.io\", match)\n\tmatch, matches = trie.Contains(\"TEST.K6.IO\")\n\tassert.True(t, matches)\n\tassert.Equal(t, \"test.k6.io\", match)\n\tmatch, matches = trie.Contains(\"blocked.valId.paTtern\")\n\tassert.True(t, matches)\n\tassert.Equal(t, \"*valid.pattern\", match)\n\t_, matches = trie.Contains(\"example.test.k6.io\")\n\tassert.False(t, matches)\n}\n<commit_msg>rewrite TestHostnameTrieContains as table test<commit_after>\/*\n *\n * k6 - a next-generation load testing tool\n * Copyright (C) 2020 Load Impact\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as\n * published by the Free Software Foundation, either version 3 of the\n * License, or (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n *\/\n\npackage types\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestHostnameTrieInsert(t *testing.T) {\n\thostnames := HostnameTrie{}\n\tassert.NoError(t, hostnames.insert(\"test.k6.io\"))\n\tassert.Error(t, hostnames.insert(\"inval*d.pattern\"))\n\tassert.NoError(t, hostnames.insert(\"*valid.pattern\"))\n}\n\nfunc TestHostnameTrieContains(t *testing.T) {\n\ttrie, err := NewHostnameTrie([]string{\"test.k6.io\", \"*valid.pattern\"})\n\trequire.NoError(t, err)\n\tcases := map[string]string{\n\t\t\"K6.Io\": \"\",\n\t\t\"tEsT.k6.Io\": \"test.k6.io\",\n\t\t\"TESt.K6.IO\": \"test.k6.io\",\n\t\t\"blocked.valId.paTtern\": \"*valid.pattern\",\n\t\t\"example.test.k6.io\": \"\",\n\t}\n\tfor key, value := range cases {\n\t\thost, pattern := key, value\n\t\tt.Run(host, func(t *testing.T) {\n\t\t\tmatch, matches := trie.Contains(host)\n\t\t\tif pattern == \"\" {\n\t\t\t\tassert.False(t, matches)\n\t\t\t\tassert.Empty(t, match)\n\t\t\t} else {\n\t\t\t\tassert.True(t, matches)\n\t\t\t\tassert.Equal(t, pattern, match)\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package configs\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n)\n\ntype Rlimit struct {\n\tType int `json:\"type\"`\n\tHard uint64 `json:\"hard\"`\n\tSoft uint64 `json:\"soft\"`\n}\n\n\/\/ IDMap represents UID\/GID Mappings for User Namespaces.\ntype IDMap struct {\n\tContainerID int `json:\"container_id\"`\n\tHostID int `json:\"host_id\"`\n\tSize int `json:\"size\"`\n}\n\n\/\/ Seccomp represents syscall restrictions\n\/\/ By default, only the native architecture of the kernel is allowed to be used\n\/\/ for syscalls. Additional architectures can be added by specifying them in\n\/\/ Architectures.\ntype Seccomp struct {\n\tDefaultAction Action `json:\"default_action\"`\n\tArchitectures []string `json:\"architectures\"`\n\tSyscalls []*Syscall `json:\"syscalls\"`\n}\n\n\/\/ An action to be taken upon rule match in Seccomp\ntype Action int\n\nconst (\n\tKill Action = iota + 1\n\tErrno\n\tTrap\n\tAllow\n\tTrace\n)\n\n\/\/ A comparison operator to be used when matching syscall arguments in Seccomp\ntype Operator int\n\nconst (\n\tEqualTo Operator = iota + 1\n\tNotEqualTo\n\tGreaterThan\n\tGreaterThanOrEqualTo\n\tLessThan\n\tLessThanOrEqualTo\n\tMaskEqualTo\n)\n\n\/\/ A rule to match a specific syscall argument in Seccomp\ntype Arg struct {\n\tIndex uint `json:\"index\"`\n\tValue uint64 `json:\"value\"`\n\tValueTwo uint64 `json:\"value_two\"`\n\tOp Operator `json:\"op\"`\n}\n\n\/\/ An rule to match a syscall in Seccomp\ntype Syscall struct {\n\tName string `json:\"name\"`\n\tAction Action `json:\"action\"`\n\tArgs []*Arg `json:\"args\"`\n}\n\n\/\/ TODO Windows. Many of these fields should be factored out into those parts\n\/\/ which are common across platforms, and those which are platform specific.\n\n\/\/ Config defines configuration options for executing a process inside a contained environment.\ntype Config struct {\n\t\/\/ NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs\n\t\/\/ This is a common option when the container is running in ramdisk\n\tNoPivotRoot bool `json:\"no_pivot_root\"`\n\n\t\/\/ ParentDeathSignal specifies the signal that is sent to the container's process in the case\n\t\/\/ that the parent process dies.\n\tParentDeathSignal int `json:\"parent_death_signal\"`\n\n\t\/\/ PivotDir allows a custom directory inside the container's root filesystem to be used as pivot, when NoPivotRoot is not set.\n\t\/\/ When a custom PivotDir not set, a temporary dir inside the root filesystem will be used. The pivot dir needs to be writeable.\n\t\/\/ This is required when using read only root filesystems. In these cases, a read\/writeable path can be (bind) mounted somewhere inside the root filesystem to act as pivot.\n\tPivotDir string `json:\"pivot_dir\"`\n\n\t\/\/ Path to a directory containing the container's root filesystem.\n\tRootfs string `json:\"rootfs\"`\n\n\t\/\/ Readonlyfs will remount the container's rootfs as readonly where only externally mounted\n\t\/\/ bind mounts are writtable.\n\tReadonlyfs bool `json:\"readonlyfs\"`\n\n\t\/\/ Specifies the mount propagation flags to be applied to \/.\n\tRootPropagation int `json:\"rootPropagation\"`\n\n\t\/\/ Mounts specify additional source and destination paths that will be mounted inside the container's\n\t\/\/ rootfs and mount namespace if specified\n\tMounts []*Mount `json:\"mounts\"`\n\n\t\/\/ The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well!\n\tDevices []*Device `json:\"devices\"`\n\n\tMountLabel string `json:\"mount_label\"`\n\n\t\/\/ Hostname optionally sets the container's hostname if provided\n\tHostname string `json:\"hostname\"`\n\n\t\/\/ Namespaces specifies the container's namespaces that it should setup when cloning the init process\n\t\/\/ If a namespace is not provided that namespace is shared from the container's parent process\n\tNamespaces Namespaces `json:\"namespaces\"`\n\n\t\/\/ Capabilities specify the capabilities to keep when executing the process inside the container\n\t\/\/ All capbilities not specified will be dropped from the processes capability mask\n\tCapabilities []string `json:\"capabilities\"`\n\n\t\/\/ Networks specifies the container's network setup to be created\n\tNetworks []*Network `json:\"networks\"`\n\n\t\/\/ Routes can be specified to create entries in the route table as the container is started\n\tRoutes []*Route `json:\"routes\"`\n\n\t\/\/ Cgroups specifies specific cgroup settings for the various subsystems that the container is\n\t\/\/ placed into to limit the resources the container has available\n\tCgroups *Cgroup `json:\"cgroups\"`\n\n\t\/\/ AppArmorProfile specifies the profile to apply to the process running in the container and is\n\t\/\/ change at the time the process is execed\n\tAppArmorProfile string `json:\"apparmor_profile,omitempty\"`\n\n\t\/\/ ProcessLabel specifies the label to apply to the process running in the container. It is\n\t\/\/ commonly used by selinux\n\tProcessLabel string `json:\"process_label,omitempty\"`\n\n\t\/\/ Rlimits specifies the resource limits, such as max open files, to set in the container\n\t\/\/ If Rlimits are not set, the container will inherit rlimits from the parent process\n\tRlimits []Rlimit `json:\"rlimits,omitempty\"`\n\n\t\/\/ OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores\n\t\/\/ for a process. Valid values are between the range [-1000, '1000'], where processes with\n\t\/\/ higher scores are preferred for being killed.\n\t\/\/ More information about kernel oom score calculation here: https:\/\/lwn.net\/Articles\/317814\/\n\tOomScoreAdj int `json:\"oom_score_adj\"`\n\n\t\/\/ AdditionalGroups specifies the gids that should be added to supplementary groups\n\t\/\/ in addition to those that the user belongs to.\n\tAdditionalGroups []string `json:\"additional_groups\"`\n\n\t\/\/ UidMappings is an array of User ID mappings for User Namespaces\n\tUidMappings []IDMap `json:\"uid_mappings\"`\n\n\t\/\/ GidMappings is an array of Group ID mappings for User Namespaces\n\tGidMappings []IDMap `json:\"gid_mappings\"`\n\n\t\/\/ MaskPaths specifies paths within the container's rootfs to mask over with a bind\n\t\/\/ mount pointing to \/dev\/null as to prevent reads of the file.\n\tMaskPaths []string `json:\"mask_paths\"`\n\n\t\/\/ ReadonlyPaths specifies paths within the container's rootfs to remount as read-only\n\t\/\/ so that these files prevent any writes.\n\tReadonlyPaths []string `json:\"readonly_paths\"`\n\n\t\/\/ Sysctl is a map of properties and their values. It is the equivalent of using\n\t\/\/ sysctl -w my.property.name value in Linux.\n\tSysctl map[string]string `json:\"sysctl\"`\n\n\t\/\/ Seccomp allows actions to be taken whenever a syscall is made within the container.\n\t\/\/ A number of rules are given, each having an action to be taken if a syscall matches it.\n\t\/\/ A default action to be taken if no rules match is also given.\n\tSeccomp *Seccomp `json:\"seccomp\"`\n\n\t\/\/ NoNewPrivileges controls whether processes in the container can gain additional privileges.\n\tNoNewPrivileges bool `json:\"no_new_privileges,omitempty\"`\n\n\t\/\/ Hooks are a collection of actions to perform at various container lifecycle events.\n\t\/\/ CommandHooks are serialized to JSON, but other hooks are not.\n\tHooks *Hooks\n\n\t\/\/ Version is the version of opencontainer specification that is supported.\n\tVersion string `json:\"version\"`\n\n\t\/\/ Labels are user defined metadata that is stored in the config and populated on the state\n\tLabels []string `json:\"labels\"`\n}\n\ntype Hooks struct {\n\t\/\/ Prestart commands are executed after the container namespaces are created,\n\t\/\/ but before the user supplied command is executed from init.\n\tPrestart []Hook\n\n\t\/\/ Poststart commands are executed after the container init process starts.\n\tPoststart []Hook\n\n\t\/\/ Poststop commands are executed after the container init process exits.\n\tPoststop []Hook\n}\n\nfunc (hooks *Hooks) UnmarshalJSON(b []byte) error {\n\tvar state struct {\n\t\tPrestart []CommandHook\n\t\tPoststart []CommandHook\n\t\tPoststop []CommandHook\n\t}\n\n\tif err := json.Unmarshal(b, &state); err != nil {\n\t\treturn err\n\t}\n\n\tdeserialize := func(shooks []CommandHook) (hooks []Hook) {\n\t\tfor _, shook := range shooks {\n\t\t\thooks = append(hooks, shook)\n\t\t}\n\n\t\treturn hooks\n\t}\n\n\thooks.Prestart = deserialize(state.Prestart)\n\thooks.Poststart = deserialize(state.Poststart)\n\thooks.Poststop = deserialize(state.Poststop)\n\treturn nil\n}\n\nfunc (hooks Hooks) MarshalJSON() ([]byte, error) {\n\tserialize := func(hooks []Hook) (serializableHooks []CommandHook) {\n\t\tfor _, hook := range hooks {\n\t\t\tswitch chook := hook.(type) {\n\t\t\tcase CommandHook:\n\t\t\t\tserializableHooks = append(serializableHooks, chook)\n\t\t\tdefault:\n\t\t\t\tlogrus.Warnf(\"cannot serialize hook of type %T, skipping\", hook)\n\t\t\t}\n\t\t}\n\n\t\treturn serializableHooks\n\t}\n\n\treturn json.Marshal(map[string]interface{}{\n\t\t\"prestart\": serialize(hooks.Prestart),\n\t\t\"poststart\": serialize(hooks.Poststart),\n\t\t\"poststop\": serialize(hooks.Poststop),\n\t})\n}\n\n\/\/ HookState is the payload provided to a hook on execution.\ntype HookState struct {\n\tVersion string `json:\"version\"`\n\tID string `json:\"id\"`\n\tPid int `json:\"pid\"`\n\tRoot string `json:\"root\"`\n}\n\ntype Hook interface {\n\t\/\/ Run executes the hook with the provided state.\n\tRun(HookState) error\n}\n\n\/\/ NewFunctionHooks will call the provided function when the hook is run.\nfunc NewFunctionHook(f func(HookState) error) FuncHook {\n\treturn FuncHook{\n\t\trun: f,\n\t}\n}\n\ntype FuncHook struct {\n\trun func(HookState) error\n}\n\nfunc (f FuncHook) Run(s HookState) error {\n\treturn f.run(s)\n}\n\ntype Command struct {\n\tPath string `json:\"path\"`\n\tArgs []string `json:\"args\"`\n\tEnv []string `json:\"env\"`\n\tDir string `json:\"dir\"`\n\tTimeout *time.Duration `json:\"timeout\"`\n}\n\n\/\/ NewCommandHooks will execute the provided command when the hook is run.\nfunc NewCommandHook(cmd Command) CommandHook {\n\treturn CommandHook{\n\t\tCommand: cmd,\n\t}\n}\n\ntype CommandHook struct {\n\tCommand\n}\n\nfunc (c Command) Run(s HookState) error {\n\tb, err := json.Marshal(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd := exec.Cmd{\n\t\tPath: c.Path,\n\t\tArgs: c.Args,\n\t\tEnv: c.Env,\n\t\tStdin: bytes.NewReader(b),\n\t}\n\terrC := make(chan error, 1)\n\tgo func() {\n\t\terrC <- cmd.Run()\n\t}()\n\tif c.Timeout != nil {\n\t\tselect {\n\t\tcase err := <-errC:\n\t\t\treturn err\n\t\tcase <-time.After(*c.Timeout):\n\t\t\tcmd.Process.Kill()\n\t\t\tcmd.Wait()\n\t\t\treturn fmt.Errorf(\"hook ran past specified timeout of %.1fs\", c.Timeout.Seconds())\n\t\t}\n\t}\n\treturn <-errC\n}\n<commit_msg>Report hook output on error<commit_after>package configs\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n)\n\ntype Rlimit struct {\n\tType int `json:\"type\"`\n\tHard uint64 `json:\"hard\"`\n\tSoft uint64 `json:\"soft\"`\n}\n\n\/\/ IDMap represents UID\/GID Mappings for User Namespaces.\ntype IDMap struct {\n\tContainerID int `json:\"container_id\"`\n\tHostID int `json:\"host_id\"`\n\tSize int `json:\"size\"`\n}\n\n\/\/ Seccomp represents syscall restrictions\n\/\/ By default, only the native architecture of the kernel is allowed to be used\n\/\/ for syscalls. Additional architectures can be added by specifying them in\n\/\/ Architectures.\ntype Seccomp struct {\n\tDefaultAction Action `json:\"default_action\"`\n\tArchitectures []string `json:\"architectures\"`\n\tSyscalls []*Syscall `json:\"syscalls\"`\n}\n\n\/\/ An action to be taken upon rule match in Seccomp\ntype Action int\n\nconst (\n\tKill Action = iota + 1\n\tErrno\n\tTrap\n\tAllow\n\tTrace\n)\n\n\/\/ A comparison operator to be used when matching syscall arguments in Seccomp\ntype Operator int\n\nconst (\n\tEqualTo Operator = iota + 1\n\tNotEqualTo\n\tGreaterThan\n\tGreaterThanOrEqualTo\n\tLessThan\n\tLessThanOrEqualTo\n\tMaskEqualTo\n)\n\n\/\/ A rule to match a specific syscall argument in Seccomp\ntype Arg struct {\n\tIndex uint `json:\"index\"`\n\tValue uint64 `json:\"value\"`\n\tValueTwo uint64 `json:\"value_two\"`\n\tOp Operator `json:\"op\"`\n}\n\n\/\/ An rule to match a syscall in Seccomp\ntype Syscall struct {\n\tName string `json:\"name\"`\n\tAction Action `json:\"action\"`\n\tArgs []*Arg `json:\"args\"`\n}\n\n\/\/ TODO Windows. Many of these fields should be factored out into those parts\n\/\/ which are common across platforms, and those which are platform specific.\n\n\/\/ Config defines configuration options for executing a process inside a contained environment.\ntype Config struct {\n\t\/\/ NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs\n\t\/\/ This is a common option when the container is running in ramdisk\n\tNoPivotRoot bool `json:\"no_pivot_root\"`\n\n\t\/\/ ParentDeathSignal specifies the signal that is sent to the container's process in the case\n\t\/\/ that the parent process dies.\n\tParentDeathSignal int `json:\"parent_death_signal\"`\n\n\t\/\/ PivotDir allows a custom directory inside the container's root filesystem to be used as pivot, when NoPivotRoot is not set.\n\t\/\/ When a custom PivotDir not set, a temporary dir inside the root filesystem will be used. The pivot dir needs to be writeable.\n\t\/\/ This is required when using read only root filesystems. In these cases, a read\/writeable path can be (bind) mounted somewhere inside the root filesystem to act as pivot.\n\tPivotDir string `json:\"pivot_dir\"`\n\n\t\/\/ Path to a directory containing the container's root filesystem.\n\tRootfs string `json:\"rootfs\"`\n\n\t\/\/ Readonlyfs will remount the container's rootfs as readonly where only externally mounted\n\t\/\/ bind mounts are writtable.\n\tReadonlyfs bool `json:\"readonlyfs\"`\n\n\t\/\/ Specifies the mount propagation flags to be applied to \/.\n\tRootPropagation int `json:\"rootPropagation\"`\n\n\t\/\/ Mounts specify additional source and destination paths that will be mounted inside the container's\n\t\/\/ rootfs and mount namespace if specified\n\tMounts []*Mount `json:\"mounts\"`\n\n\t\/\/ The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well!\n\tDevices []*Device `json:\"devices\"`\n\n\tMountLabel string `json:\"mount_label\"`\n\n\t\/\/ Hostname optionally sets the container's hostname if provided\n\tHostname string `json:\"hostname\"`\n\n\t\/\/ Namespaces specifies the container's namespaces that it should setup when cloning the init process\n\t\/\/ If a namespace is not provided that namespace is shared from the container's parent process\n\tNamespaces Namespaces `json:\"namespaces\"`\n\n\t\/\/ Capabilities specify the capabilities to keep when executing the process inside the container\n\t\/\/ All capbilities not specified will be dropped from the processes capability mask\n\tCapabilities []string `json:\"capabilities\"`\n\n\t\/\/ Networks specifies the container's network setup to be created\n\tNetworks []*Network `json:\"networks\"`\n\n\t\/\/ Routes can be specified to create entries in the route table as the container is started\n\tRoutes []*Route `json:\"routes\"`\n\n\t\/\/ Cgroups specifies specific cgroup settings for the various subsystems that the container is\n\t\/\/ placed into to limit the resources the container has available\n\tCgroups *Cgroup `json:\"cgroups\"`\n\n\t\/\/ AppArmorProfile specifies the profile to apply to the process running in the container and is\n\t\/\/ change at the time the process is execed\n\tAppArmorProfile string `json:\"apparmor_profile,omitempty\"`\n\n\t\/\/ ProcessLabel specifies the label to apply to the process running in the container. It is\n\t\/\/ commonly used by selinux\n\tProcessLabel string `json:\"process_label,omitempty\"`\n\n\t\/\/ Rlimits specifies the resource limits, such as max open files, to set in the container\n\t\/\/ If Rlimits are not set, the container will inherit rlimits from the parent process\n\tRlimits []Rlimit `json:\"rlimits,omitempty\"`\n\n\t\/\/ OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores\n\t\/\/ for a process. Valid values are between the range [-1000, '1000'], where processes with\n\t\/\/ higher scores are preferred for being killed.\n\t\/\/ More information about kernel oom score calculation here: https:\/\/lwn.net\/Articles\/317814\/\n\tOomScoreAdj int `json:\"oom_score_adj\"`\n\n\t\/\/ AdditionalGroups specifies the gids that should be added to supplementary groups\n\t\/\/ in addition to those that the user belongs to.\n\tAdditionalGroups []string `json:\"additional_groups\"`\n\n\t\/\/ UidMappings is an array of User ID mappings for User Namespaces\n\tUidMappings []IDMap `json:\"uid_mappings\"`\n\n\t\/\/ GidMappings is an array of Group ID mappings for User Namespaces\n\tGidMappings []IDMap `json:\"gid_mappings\"`\n\n\t\/\/ MaskPaths specifies paths within the container's rootfs to mask over with a bind\n\t\/\/ mount pointing to \/dev\/null as to prevent reads of the file.\n\tMaskPaths []string `json:\"mask_paths\"`\n\n\t\/\/ ReadonlyPaths specifies paths within the container's rootfs to remount as read-only\n\t\/\/ so that these files prevent any writes.\n\tReadonlyPaths []string `json:\"readonly_paths\"`\n\n\t\/\/ Sysctl is a map of properties and their values. It is the equivalent of using\n\t\/\/ sysctl -w my.property.name value in Linux.\n\tSysctl map[string]string `json:\"sysctl\"`\n\n\t\/\/ Seccomp allows actions to be taken whenever a syscall is made within the container.\n\t\/\/ A number of rules are given, each having an action to be taken if a syscall matches it.\n\t\/\/ A default action to be taken if no rules match is also given.\n\tSeccomp *Seccomp `json:\"seccomp\"`\n\n\t\/\/ NoNewPrivileges controls whether processes in the container can gain additional privileges.\n\tNoNewPrivileges bool `json:\"no_new_privileges,omitempty\"`\n\n\t\/\/ Hooks are a collection of actions to perform at various container lifecycle events.\n\t\/\/ CommandHooks are serialized to JSON, but other hooks are not.\n\tHooks *Hooks\n\n\t\/\/ Version is the version of opencontainer specification that is supported.\n\tVersion string `json:\"version\"`\n\n\t\/\/ Labels are user defined metadata that is stored in the config and populated on the state\n\tLabels []string `json:\"labels\"`\n}\n\ntype Hooks struct {\n\t\/\/ Prestart commands are executed after the container namespaces are created,\n\t\/\/ but before the user supplied command is executed from init.\n\tPrestart []Hook\n\n\t\/\/ Poststart commands are executed after the container init process starts.\n\tPoststart []Hook\n\n\t\/\/ Poststop commands are executed after the container init process exits.\n\tPoststop []Hook\n}\n\nfunc (hooks *Hooks) UnmarshalJSON(b []byte) error {\n\tvar state struct {\n\t\tPrestart []CommandHook\n\t\tPoststart []CommandHook\n\t\tPoststop []CommandHook\n\t}\n\n\tif err := json.Unmarshal(b, &state); err != nil {\n\t\treturn err\n\t}\n\n\tdeserialize := func(shooks []CommandHook) (hooks []Hook) {\n\t\tfor _, shook := range shooks {\n\t\t\thooks = append(hooks, shook)\n\t\t}\n\n\t\treturn hooks\n\t}\n\n\thooks.Prestart = deserialize(state.Prestart)\n\thooks.Poststart = deserialize(state.Poststart)\n\thooks.Poststop = deserialize(state.Poststop)\n\treturn nil\n}\n\nfunc (hooks Hooks) MarshalJSON() ([]byte, error) {\n\tserialize := func(hooks []Hook) (serializableHooks []CommandHook) {\n\t\tfor _, hook := range hooks {\n\t\t\tswitch chook := hook.(type) {\n\t\t\tcase CommandHook:\n\t\t\t\tserializableHooks = append(serializableHooks, chook)\n\t\t\tdefault:\n\t\t\t\tlogrus.Warnf(\"cannot serialize hook of type %T, skipping\", hook)\n\t\t\t}\n\t\t}\n\n\t\treturn serializableHooks\n\t}\n\n\treturn json.Marshal(map[string]interface{}{\n\t\t\"prestart\": serialize(hooks.Prestart),\n\t\t\"poststart\": serialize(hooks.Poststart),\n\t\t\"poststop\": serialize(hooks.Poststop),\n\t})\n}\n\n\/\/ HookState is the payload provided to a hook on execution.\ntype HookState struct {\n\tVersion string `json:\"version\"`\n\tID string `json:\"id\"`\n\tPid int `json:\"pid\"`\n\tRoot string `json:\"root\"`\n}\n\ntype Hook interface {\n\t\/\/ Run executes the hook with the provided state.\n\tRun(HookState) error\n}\n\n\/\/ NewFunctionHooks will call the provided function when the hook is run.\nfunc NewFunctionHook(f func(HookState) error) FuncHook {\n\treturn FuncHook{\n\t\trun: f,\n\t}\n}\n\ntype FuncHook struct {\n\trun func(HookState) error\n}\n\nfunc (f FuncHook) Run(s HookState) error {\n\treturn f.run(s)\n}\n\ntype Command struct {\n\tPath string `json:\"path\"`\n\tArgs []string `json:\"args\"`\n\tEnv []string `json:\"env\"`\n\tDir string `json:\"dir\"`\n\tTimeout *time.Duration `json:\"timeout\"`\n}\n\n\/\/ NewCommandHooks will execute the provided command when the hook is run.\nfunc NewCommandHook(cmd Command) CommandHook {\n\treturn CommandHook{\n\t\tCommand: cmd,\n\t}\n}\n\ntype CommandHook struct {\n\tCommand\n}\n\nfunc (c Command) Run(s HookState) error {\n\tb, err := json.Marshal(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd := exec.Cmd{\n\t\tPath: c.Path,\n\t\tArgs: c.Args,\n\t\tEnv: c.Env,\n\t\tStdin: bytes.NewReader(b),\n\t}\n\terrC := make(chan error, 1)\n\tgo func() {\n\t\tout, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"%s: %s\", err, out)\n\t\t}\n\t\terrC <- err\n\t}()\n\tif c.Timeout != nil {\n\t\tselect {\n\t\tcase err := <-errC:\n\t\t\treturn err\n\t\tcase <-time.After(*c.Timeout):\n\t\t\tcmd.Process.Kill()\n\t\t\tcmd.Wait()\n\t\t\treturn fmt.Errorf(\"hook ran past specified timeout of %.1fs\", c.Timeout.Seconds())\n\t\t}\n\t}\n\treturn <-errC\n}\n<|endoftext|>"} {"text":"<commit_before>package anteater\n\nimport (\n\t\"time\"\n\t\"runtime\"\n\t\"sort\"\n\t\"sync\/atomic\"\n\t\"fmt\"\n)\n\n\nvar FiveSecondsCounters []*StateHttpCounters = make([]*StateHttpCounters, 60)\nvar FiveSecondsCursor int\n\ntype State struct {\n\tMain *StateMain\n\tFiles *StateFiles\n\tCounters *StateHttpCounters\n\tCountersLast5Seconds *StateHttpCounters\n\tCountersLastMinute *StateHttpCounters\n\tCountersLast5Minutes *StateHttpCounters\n\tAlloc *StateAllocateCounters\n}\n\ntype StateMain struct {\n\tTime int64\n\tStartTime int64\n\tGoroutines int\n\tMemoryUsage uint64\n\tLastDump int64\n\tLastDumpTime int64\n\tIndexFileSize int64\n}\n\ntype StateFiles struct {\n\tContainersCount int\n\tFilesCount int64\n\tFilesSize int64\t\n\tSpacesCount int64\n\tSpacesSize int64\n}\n\ntype StateHttpCounters struct {\n\tGet int64\n\tAdd int64\n\tDelete int64\n\tNotFound int64\n}\n\ntype StateAllocateCounters struct {\n\tReplaceSpace int64\n\tAddToSpace int64\n\tToEnd int64\n}\n\n\nfunc init() {\n\tFiveSecondsTick()\n\tgo func() { \n\t\tch := time.Tick(5 * time.Second)\n\t\tfor _ = range ch {\n\t\t\tfunc () {\n\t\t\t\tFiveSecondsTick()\n\t\t\t}()\n\t\t}\n\t}()\n}\n\n\nfunc GetState() *State {\n\tms := &runtime.MemStats{}\n\truntime.ReadMemStats(ms)\n\tm := &StateMain{\n\t\tTime : time.Now().Unix(),\n\t\tStartTime : StartTime.Unix(),\n\t\tGoroutines : runtime.NumGoroutine(),\n\t\tMemoryUsage : ms.TotalAlloc,\n\t\tLastDump : LastDump.Unix(),\n\t\tLastDumpTime : LastDumpTime,\n\t\tIndexFileSize: IndexFileSize,\n\t}\n\t\n\tGetFileLock.Lock()\n\tdefer GetFileLock.Unlock()\n\n\tvar ids []int = make([]int, 0)\n\tfor _, cn := range(FileContainers) {\n\t\tids = append(ids, int(cn.Id))\n\t}\n\tsort.Ints(ids)\n\tvar cnt *Container\n\t\n\tvar totalSize, totalFileSize, fileCount, spacesCount, spacesTotalSize int64\n\t\n\tfor _, id := range(ids) {\n\t\tcnt = FileContainers[int32(id)]\n\t\t_, sc, st := cnt.Spaces.ToHtml(10, cnt.Size)\n\t\ttotalSize += cnt.Size\n\t\tfileCount += cnt.Count\n\t\tspacesCount += sc\n\t\tspacesTotalSize += st\t\t\n\t\tallocated := cnt.Size - (cnt.Size - cnt.Offset) - st \n\t\ttotalFileSize += allocated\n\t}\n\n\tf := &StateFiles{\n\t\tContainersCount : len(ids),\n\t\tFilesCount : fileCount,\n\t\tFilesSize : totalFileSize,\n\t\tSpacesCount : spacesCount,\n\t\tSpacesSize : spacesTotalSize,\n\t}\n\t\n\t\n\treturn &State{\n\t\tMain : m,\n\t\tFiles : f,\n\t\tCounters : HttpCn,\n\t\tCountersLast5Seconds : GetHttpStateByPeriod(1),\n\t\tCountersLastMinute : GetHttpStateByPeriod(12),\n\t\tCountersLast5Minutes : GetHttpStateByPeriod(60),\n\t\tAlloc : AllocCn,\n\t}\n}\n\n\nfunc FiveSecondsTick() {\n\tif FiveSecondsCursor == 60 {\n\t\tFiveSecondsCursor = 0\n\t}\n\tif FiveSecondsCounters[FiveSecondsCursor] == nil {\n\t\tFiveSecondsCounters[FiveSecondsCursor] = &StateHttpCounters{}\n\t}\n\tFiveSecondsCounters[FiveSecondsCursor].SetData(HttpCn)\t\n\tFiveSecondsCursor++\n\t\n\tfor k, v := range(FiveSecondsCounters) {\n\t\tfmt.Println(k, v)\n\t}\n}\n\nfunc GetHttpStateByPeriod(period int) (result *StateHttpCounters) {\n\tcurCursor := FiveSecondsCursor - 1\n\tdiffCursor := curCursor\n\tresult = &StateHttpCounters{}\n\ti := 0\n\tfor {\n\t\tdiffCursor--\n\t\ti++\n\t\tif diffCursor == -1 {\n\t\t\tdiffCursor = 59\n\t\t}\t\t\n\t\tif FiveSecondsCounters[diffCursor] == nil {\n\t\t\tdiffCursor++\n\t\t\tif diffCursor == 60 {\n\t\t\t\tdiffCursor = 0\n\t\t\t} \n\t\t\tbreak\n\t\t}\n\t\tif i >= period {\n\t\t\tbreak\n\t\t}\n\t} \n\t\/\/fmt.Println(period, curCursor, diffCursor)\n\tcur := FiveSecondsCounters[curCursor]\n\tdiff := FiveSecondsCounters[diffCursor]\n\n\tresult.Add = cur.Add - diff.Add\n\tresult.Get = cur.Get - diff.Add\n\tresult.Delete = cur.Delete - diff.Delete\n\tresult.NotFound = cur.NotFound - diff.NotFound\n\treturn\n}\n \nfunc (s *StateHttpCounters) CGet() {\n\tatomic.AddInt64(&s.Get, 1)\n}\n\nfunc (s *StateHttpCounters) CAdd() {\n\tatomic.AddInt64(&s.Add, 1)\n}\n\nfunc (s *StateHttpCounters) CDelete() {\n\tatomic.AddInt64(&s.Delete, 1)\n}\n\nfunc (s *StateHttpCounters) CNotFound() {\n\tatomic.AddInt64(&s.NotFound, 1)\n}\n\nfunc (s *StateHttpCounters) SetData(otherS *StateHttpCounters) {\n\ts.Get = otherS.Get\n\ts.Add = otherS.Add\n\ts.Delete = otherS.Delete\n\ts.NotFound = otherS.NotFound\n}\n\nfunc (s *StateAllocateCounters) CTarget(target int) {\n\tswitch target {\n\t\tcase TARGET_SPACE_EQ:\n\t\t\tatomic.AddInt64(&s.ReplaceSpace, 1)\n\t\tcase TARGET_SPACE_FREE:\n\t\t\tatomic.AddInt64(&s.AddToSpace, 1)\n\t \tcase TARGET_NEW:\n\t \t\tatomic.AddInt64(&s.ToEnd, 1)\n\t}\n}\n<commit_msg>Debug<commit_after>package anteater\n\nimport (\n\t\"time\"\n\t\"runtime\"\n\t\"sort\"\n\t\"sync\/atomic\"\n\t\"fmt\"\n)\n\n\nvar FiveSecondsCounters []*StateHttpCounters = make([]*StateHttpCounters, 60)\nvar FiveSecondsCursor int\n\ntype State struct {\n\tMain *StateMain\n\tFiles *StateFiles\n\tCounters *StateHttpCounters\n\tCountersLast5Seconds *StateHttpCounters\n\tCountersLastMinute *StateHttpCounters\n\tCountersLast5Minutes *StateHttpCounters\n\tAlloc *StateAllocateCounters\n}\n\ntype StateMain struct {\n\tTime int64\n\tStartTime int64\n\tGoroutines int\n\tMemoryUsage uint64\n\tLastDump int64\n\tLastDumpTime int64\n\tIndexFileSize int64\n}\n\ntype StateFiles struct {\n\tContainersCount int\n\tFilesCount int64\n\tFilesSize int64\t\n\tSpacesCount int64\n\tSpacesSize int64\n}\n\ntype StateHttpCounters struct {\n\tGet int64\n\tAdd int64\n\tDelete int64\n\tNotFound int64\n}\n\ntype StateAllocateCounters struct {\n\tReplaceSpace int64\n\tAddToSpace int64\n\tToEnd int64\n}\n\n\nfunc init() {\n\tFiveSecondsTick()\n\tgo func() { \n\t\tch := time.Tick(5 * time.Second)\n\t\tfor _ = range ch {\n\t\t\tfunc () {\n\t\t\t\tFiveSecondsTick()\n\t\t\t}()\n\t\t}\n\t}()\n}\n\n\nfunc GetState() *State {\n\tms := &runtime.MemStats{}\n\truntime.ReadMemStats(ms)\n\tm := &StateMain{\n\t\tTime : time.Now().Unix(),\n\t\tStartTime : StartTime.Unix(),\n\t\tGoroutines : runtime.NumGoroutine(),\n\t\tMemoryUsage : ms.TotalAlloc,\n\t\tLastDump : LastDump.Unix(),\n\t\tLastDumpTime : LastDumpTime,\n\t\tIndexFileSize: IndexFileSize,\n\t}\n\t\n\tGetFileLock.Lock()\n\tdefer GetFileLock.Unlock()\n\n\tvar ids []int = make([]int, 0)\n\tfor _, cn := range(FileContainers) {\n\t\tids = append(ids, int(cn.Id))\n\t}\n\tsort.Ints(ids)\n\tvar cnt *Container\n\t\n\tvar totalSize, totalFileSize, fileCount, spacesCount, spacesTotalSize int64\n\t\n\tfor _, id := range(ids) {\n\t\tcnt = FileContainers[int32(id)]\n\t\t_, sc, st := cnt.Spaces.ToHtml(10, cnt.Size)\n\t\ttotalSize += cnt.Size\n\t\tfileCount += cnt.Count\n\t\tspacesCount += sc\n\t\tspacesTotalSize += st\t\t\n\t\tallocated := cnt.Size - (cnt.Size - cnt.Offset) - st \n\t\ttotalFileSize += allocated\n\t}\n\n\tf := &StateFiles{\n\t\tContainersCount : len(ids),\n\t\tFilesCount : fileCount,\n\t\tFilesSize : totalFileSize,\n\t\tSpacesCount : spacesCount,\n\t\tSpacesSize : spacesTotalSize,\n\t}\n\t\n\t\n\treturn &State{\n\t\tMain : m,\n\t\tFiles : f,\n\t\tCounters : HttpCn,\n\t\tCountersLast5Seconds : GetHttpStateByPeriod(1),\n\t\tCountersLastMinute : GetHttpStateByPeriod(12),\n\t\tCountersLast5Minutes : GetHttpStateByPeriod(60),\n\t\tAlloc : AllocCn,\n\t}\n}\n\n\nfunc FiveSecondsTick() {\n\tif FiveSecondsCursor == 60 {\n\t\tFiveSecondsCursor = 0\n\t}\n\tif FiveSecondsCounters[FiveSecondsCursor] == nil {\n\t\tFiveSecondsCounters[FiveSecondsCursor] = &StateHttpCounters{}\n\t}\n\tFiveSecondsCounters[FiveSecondsCursor].SetData(HttpCn)\t\n\tFiveSecondsCursor++\n\t\n\tfor k, v := range(FiveSecondsCounters) {\n\t\tif v != nil {\n\t\t\tfmt.Println(k, v)\n\t\t}\n\t}\n}\n\nfunc GetHttpStateByPeriod(period int) (result *StateHttpCounters) {\n\tcurCursor := FiveSecondsCursor - 1\n\tdiffCursor := curCursor\n\tresult = &StateHttpCounters{}\n\ti := 0\n\tfor {\n\t\tdiffCursor--\n\t\ti++\n\t\tif diffCursor == -1 {\n\t\t\tdiffCursor = 59\n\t\t}\t\t\n\t\tif FiveSecondsCounters[diffCursor] == nil {\n\t\t\tdiffCursor++\n\t\t\tif diffCursor == 60 {\n\t\t\t\tdiffCursor = 0\n\t\t\t} \n\t\t\tbreak\n\t\t}\n\t\tif i >= period {\n\t\t\tbreak\n\t\t}\n\t} \n\t\/\/fmt.Println(period, curCursor, diffCursor)\n\tcur := FiveSecondsCounters[curCursor]\n\tdiff := FiveSecondsCounters[diffCursor]\n\n\tresult.Add = cur.Add - diff.Add\n\tresult.Get = cur.Get - diff.Add\n\tresult.Delete = cur.Delete - diff.Delete\n\tresult.NotFound = cur.NotFound - diff.NotFound\n\treturn\n}\n \nfunc (s *StateHttpCounters) CGet() {\n\tatomic.AddInt64(&s.Get, 1)\n}\n\nfunc (s *StateHttpCounters) CAdd() {\n\tatomic.AddInt64(&s.Add, 1)\n}\n\nfunc (s *StateHttpCounters) CDelete() {\n\tatomic.AddInt64(&s.Delete, 1)\n}\n\nfunc (s *StateHttpCounters) CNotFound() {\n\tatomic.AddInt64(&s.NotFound, 1)\n}\n\nfunc (s *StateHttpCounters) SetData(otherS *StateHttpCounters) {\n\ts.Get = otherS.Get\n\ts.Add = otherS.Add\n\ts.Delete = otherS.Delete\n\ts.NotFound = otherS.NotFound\n}\n\nfunc (s *StateAllocateCounters) CTarget(target int) {\n\tswitch target {\n\t\tcase TARGET_SPACE_EQ:\n\t\t\tatomic.AddInt64(&s.ReplaceSpace, 1)\n\t\tcase TARGET_SPACE_FREE:\n\t\t\tatomic.AddInt64(&s.AddToSpace, 1)\n\t \tcase TARGET_NEW:\n\t \t\tatomic.AddInt64(&s.ToEnd, 1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Ajit Yagaty\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage client\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"github.com\/ajityagaty\/go-kairosdb\/builder\"\n\t\"github.com\/ajityagaty\/go-kairosdb\/response\"\n)\n\nvar (\n\tapi_version = \"\/api\/v1\"\n\tdatapoints_ep = api_version + \"\/datapoints\"\n\tquery_ep = api_version + \"\/datapoints\/query\"\n\tversion_ep = api_version + \"\/version\"\n\thealth_ep = api_version + \"\/health\/check\"\n\tmetricnames_ep = api_version + \"\/metricnames\"\n\ttagnames_ep = api_version + \"\/tagnames\"\n\ttagvalues_ep = api_version + \"\/tagvalues\"\n\tdelmetric_ep = api_version + \"\/metric\/\"\n\tdeldatapoints_ep = api_version + \"\/datapoints\/delete\"\n)\n\n\/\/ This is the type that implements the Client interface.\ntype httpClient struct {\n\tserverAddress string\n}\n\nfunc NewHttpClient(serverAddress string) Client {\n\treturn &httpClient{\n\t\tserverAddress: serverAddress,\n\t}\n}\n\n\/\/ Returns a list of all metrics names.\nfunc (hc *httpClient) GetMetricNames() (*response.GetResponse, error) {\n\treturn hc.get(hc.serverAddress + metricnames_ep)\n}\n\n\/\/ Returns a list of all tag names.\nfunc (hc *httpClient) GetTagNames() (*response.GetResponse, error) {\n\treturn hc.get(hc.serverAddress + tagnames_ep)\n}\n\n\/\/ Returns a list of all tag values.\nfunc (hc *httpClient) GetTagValues() (*response.GetResponse, error) {\n\treturn hc.get(hc.serverAddress + tagvalues_ep)\n}\n\n\/\/ Queries KairosDB using the query built using builder.\nfunc (hc *httpClient) Query(qb builder.QueryBuilder) (*response.QueryResponse, error) {\n\t\/\/ Get the JSON representation of the query.\n\tdata, err := qb.Build()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hc.postQuery(hc.serverAddress+query_ep, data)\n}\n\n\/\/ Sends metrics from the builder to the KairosDB server.\nfunc (hc *httpClient) PushMetrics(mb builder.MetricBuilder) (*response.Response, error) {\n\tdata, err := mb.Build()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hc.postData(hc.serverAddress+datapoints_ep, data)\n}\n\n\/\/ Deletes a metric. This is the metric and all its datapoints.\nfunc (hc *httpClient) DeleteMetric(name string) (*response.Response, error) {\n\treturn hc.delete(hc.serverAddress + delmetric_ep + name)\n}\n\n\/\/ TODO: Deletes data in KairosDB using the query built by the builder.\nfunc (hc *httpClient) Delete(qb builder.QueryBuilder) (*response.Response, error) {\n\tdata, err := qb.Build()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hc.postData(hc.serverAddress+deldatapoints_ep, data)\n}\n\n\/\/ Checks the health of the KairosDB Server.\nfunc (hc *httpClient) HealthCheck() (*response.Response, error) {\n\tresp, err := hc.sendRequest(hc.serverAddress+health_ep, \"GET\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := &response.Response{}\n\tr.SetStatusCode(resp.StatusCode)\n\treturn r, nil\n}\n\nfunc (hc *httpClient) sendRequest(url, method string) (*http.Response, error) {\n\treq, err := http.NewRequest(method, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"accept\", \"application\/json\")\n\tcli := &http.Client{}\n\n\treturn cli.Do(req)\n}\n\nfunc (hc *httpClient) httpRespToResponse(httpResp *http.Response) (*response.Response, error) {\n\tresp := &response.Response{}\n\tresp.SetStatusCode(httpResp.StatusCode)\n\n\tif httpResp.StatusCode != http.StatusNoContent {\n\t\t\/\/ If the request has failed, then read the response body.\n\t\tdefer httpResp.Body.Close()\n\t\tcontents, err := ioutil.ReadAll(httpResp.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\t\/\/ Unmarshal the contents into Response object.\n\t\t\terr = json.Unmarshal(contents, resp)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn resp, nil\n}\n\nfunc (hc *httpClient) httpRespToQueryResponse(httpResp *http.Response) (*response.QueryResponse, error) {\n\t\/\/ Read the HTTP response body.\n\tdefer httpResp.Body.Close()\n\tcontents, err := ioutil.ReadAll(httpResp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqr := response.NewQueryResponse(httpResp.StatusCode)\n\n\t\/\/ Unmarshal the contents into QueryResponse object.\n\terr = json.Unmarshal(contents, qr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn qr, nil\n}\n\nfunc (hc *httpClient) get(url string) (*response.GetResponse, error) {\n\tresp, err := hc.sendRequest(url, \"GET\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\tcontents, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tgr := response.NewGetResponse(resp.StatusCode)\n\n\t\terr = json.Unmarshal(contents, gr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn gr, nil\n\t}\n}\n\nfunc (hc *httpClient) postData(url string, data []byte) (*response.Response, error) {\n\tresp, err := http.Post(url, \"application\/json\", bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hc.httpRespToResponse(resp)\n}\n\nfunc (hc *httpClient) postQuery(url string, data []byte) (*response.QueryResponse, error) {\n\tresp, err := http.Post(url, \"application\/json\", bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hc.httpRespToQueryResponse(resp)\n}\n\nfunc (hc *httpClient) delete(url string) (*response.Response, error) {\n\tresp, err := hc.sendRequest(url, \"DELETE\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hc.httpRespToResponse(resp)\n}\n<commit_msg>Update the comment for the Delete API.<commit_after>\/\/ Copyright 2016 Ajit Yagaty\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage client\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"github.com\/ajityagaty\/go-kairosdb\/builder\"\n\t\"github.com\/ajityagaty\/go-kairosdb\/response\"\n)\n\nvar (\n\tapi_version = \"\/api\/v1\"\n\tdatapoints_ep = api_version + \"\/datapoints\"\n\tquery_ep = api_version + \"\/datapoints\/query\"\n\tversion_ep = api_version + \"\/version\"\n\thealth_ep = api_version + \"\/health\/check\"\n\tmetricnames_ep = api_version + \"\/metricnames\"\n\ttagnames_ep = api_version + \"\/tagnames\"\n\ttagvalues_ep = api_version + \"\/tagvalues\"\n\tdelmetric_ep = api_version + \"\/metric\/\"\n\tdeldatapoints_ep = api_version + \"\/datapoints\/delete\"\n)\n\n\/\/ This is the type that implements the Client interface.\ntype httpClient struct {\n\tserverAddress string\n}\n\nfunc NewHttpClient(serverAddress string) Client {\n\treturn &httpClient{\n\t\tserverAddress: serverAddress,\n\t}\n}\n\n\/\/ Returns a list of all metrics names.\nfunc (hc *httpClient) GetMetricNames() (*response.GetResponse, error) {\n\treturn hc.get(hc.serverAddress + metricnames_ep)\n}\n\n\/\/ Returns a list of all tag names.\nfunc (hc *httpClient) GetTagNames() (*response.GetResponse, error) {\n\treturn hc.get(hc.serverAddress + tagnames_ep)\n}\n\n\/\/ Returns a list of all tag values.\nfunc (hc *httpClient) GetTagValues() (*response.GetResponse, error) {\n\treturn hc.get(hc.serverAddress + tagvalues_ep)\n}\n\n\/\/ Queries KairosDB using the query built using builder.\nfunc (hc *httpClient) Query(qb builder.QueryBuilder) (*response.QueryResponse, error) {\n\t\/\/ Get the JSON representation of the query.\n\tdata, err := qb.Build()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hc.postQuery(hc.serverAddress+query_ep, data)\n}\n\n\/\/ Sends metrics from the builder to the KairosDB server.\nfunc (hc *httpClient) PushMetrics(mb builder.MetricBuilder) (*response.Response, error) {\n\tdata, err := mb.Build()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hc.postData(hc.serverAddress+datapoints_ep, data)\n}\n\n\/\/ Deletes a metric. This is the metric and all its datapoints.\nfunc (hc *httpClient) DeleteMetric(name string) (*response.Response, error) {\n\treturn hc.delete(hc.serverAddress + delmetric_ep + name)\n}\n\n\/\/ Deletes data in KairosDB using the query built by the builder.\nfunc (hc *httpClient) Delete(qb builder.QueryBuilder) (*response.Response, error) {\n\tdata, err := qb.Build()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hc.postData(hc.serverAddress+deldatapoints_ep, data)\n}\n\n\/\/ Checks the health of the KairosDB Server.\nfunc (hc *httpClient) HealthCheck() (*response.Response, error) {\n\tresp, err := hc.sendRequest(hc.serverAddress+health_ep, \"GET\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := &response.Response{}\n\tr.SetStatusCode(resp.StatusCode)\n\treturn r, nil\n}\n\nfunc (hc *httpClient) sendRequest(url, method string) (*http.Response, error) {\n\treq, err := http.NewRequest(method, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"accept\", \"application\/json\")\n\tcli := &http.Client{}\n\n\treturn cli.Do(req)\n}\n\nfunc (hc *httpClient) httpRespToResponse(httpResp *http.Response) (*response.Response, error) {\n\tresp := &response.Response{}\n\tresp.SetStatusCode(httpResp.StatusCode)\n\n\tif httpResp.StatusCode != http.StatusNoContent {\n\t\t\/\/ If the request has failed, then read the response body.\n\t\tdefer httpResp.Body.Close()\n\t\tcontents, err := ioutil.ReadAll(httpResp.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\t\/\/ Unmarshal the contents into Response object.\n\t\t\terr = json.Unmarshal(contents, resp)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn resp, nil\n}\n\nfunc (hc *httpClient) httpRespToQueryResponse(httpResp *http.Response) (*response.QueryResponse, error) {\n\t\/\/ Read the HTTP response body.\n\tdefer httpResp.Body.Close()\n\tcontents, err := ioutil.ReadAll(httpResp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqr := response.NewQueryResponse(httpResp.StatusCode)\n\n\t\/\/ Unmarshal the contents into QueryResponse object.\n\terr = json.Unmarshal(contents, qr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn qr, nil\n}\n\nfunc (hc *httpClient) get(url string) (*response.GetResponse, error) {\n\tresp, err := hc.sendRequest(url, \"GET\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\tcontents, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tgr := response.NewGetResponse(resp.StatusCode)\n\n\t\terr = json.Unmarshal(contents, gr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn gr, nil\n\t}\n}\n\nfunc (hc *httpClient) postData(url string, data []byte) (*response.Response, error) {\n\tresp, err := http.Post(url, \"application\/json\", bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hc.httpRespToResponse(resp)\n}\n\nfunc (hc *httpClient) postQuery(url string, data []byte) (*response.QueryResponse, error) {\n\tresp, err := http.Post(url, \"application\/json\", bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hc.httpRespToQueryResponse(resp)\n}\n\nfunc (hc *httpClient) delete(url string) (*response.Response, error) {\n\tresp, err := hc.sendRequest(url, \"DELETE\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hc.httpRespToResponse(resp)\n}\n<|endoftext|>"} {"text":"<commit_before>package client\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"github.com\/ActiveState\/httpapi\/client\"\n\t\"net\/http\"\n)\n\ntype RestClient struct {\n\tTargetURL string\n\tToken string\n\tGroup string\n\tclient *client.Client\n}\n\nfunc NewRestClient(targetUrl, token, group string) *RestClient {\n\treturn &RestClient{targetUrl, token, group, getInsecureHttpRestClient()}\n}\n\ntype App struct {\n\tName string\n\tURIs []string\n\tInstances int\n\tRunningInstances *int\n\tState string\n\tVersion string\n\tStaging struct {\n\t\tModel string\n\t\tStack string\n\t}\n\tResources struct {\n\t\tMemory int\n\t\tDisk int\n\t\tFDs int\n\t\tSudo bool\n\t}\n\tMeta struct {\n\t\tVersion int\n\t\tCreated int \/\/ timestamp\n\t}\n}\n\nfunc (c *RestClient) ListApps() (apps []App, err error) {\n\terr = c.MakeRequest(\"GET\", \"\/apps\", nil, &apps)\n\treturn\n}\n\n\/\/ CreateApp only creates the application. It is an equivalent of `s\n\/\/ create-app --json`.\nfunc (c *RestClient) CreateApp(name string) (int, error) {\n\t\/\/ Ensure that app name is unique for this user. We do this as\n\t\/\/ unfortunately the server doesn't enforce it.\n\tapps, err := c.ListApps()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tfor _, app := range apps {\n\t\tif app.Name == name {\n\t\t\treturn -1, fmt.Errorf(\"App by that name (%s) already exists\", name)\n\t\t}\n\t}\n\n\t\/\/ The CC requires that a POST on \/apps passes, at minimum, these\n\t\/\/ fields. The values for framework\/runtime doesn't matter for our\n\t\/\/ purposes (they will get overwritten by a subsequent app push).\n\tcreateArgs := map[string]interface{}{\n\t\t\"name\": name,\n\t\t\"staging\": map[string]string{\n\t\t\t\"framework\": \"buildpack\",\n\t\t\t\"runtime\": \"python27\",\n\t\t},\n\t}\n\n\tvar resp struct{ App_ID int }\n\terr = c.MakeRequest(\"POST\", \"\/apps\", createArgs, &resp)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tif resp.App_ID < 1 {\n\t\treturn -1, fmt.Errorf(\"Invalid or missing AppID from CC: %d\", resp.App_ID)\n\t}\n\n\treturn resp.App_ID, nil\n}\n\nfunc (c *RestClient) MakeRequest(method string, path string, params interface{}, response interface{}) error {\n\treq, err := client.NewRequest(method, c.TargetURL+path, params)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Authorization\", c.Token)\n\tif c.Group != \"\" {\n\t\treq.Header.Set(\"X-Stackato-Group\", c.Group)\n\t}\n\treturn c.client.DoRequest(req, response)\n}\n\n\/\/ emulate `curl -k ...`\nfunc getInsecureHttpRestClient() *client.Client {\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true}}\n\treturn &client.Client{Transport: tr}\n}\n<commit_msg>wrap http api errors<commit_after>package client\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"github.com\/ActiveState\/httpapi\/client\"\n\t\"net\/http\"\n)\n\ntype RestClient struct {\n\tTargetURL string\n\tToken string\n\tGroup string\n\tclient *client.Client\n}\n\nfunc NewRestClient(targetUrl, token, group string) *RestClient {\n\treturn &RestClient{targetUrl, token, group, getInsecureHttpRestClient()}\n}\n\ntype App struct {\n\tName string\n\tURIs []string\n\tInstances int\n\tRunningInstances *int\n\tState string\n\tVersion string\n\tStaging struct {\n\t\tModel string\n\t\tStack string\n\t}\n\tResources struct {\n\t\tMemory int\n\t\tDisk int\n\t\tFDs int\n\t\tSudo bool\n\t}\n\tMeta struct {\n\t\tVersion int\n\t\tCreated int \/\/ timestamp\n\t}\n}\n\nfunc (c *RestClient) ListApps() (apps []App, err error) {\n\terr = c.MakeRequest(\"GET\", \"\/apps\", nil, &apps)\n\treturn\n}\n\n\/\/ CreateApp only creates the application. It is an equivalent of `s\n\/\/ create-app --json`.\nfunc (c *RestClient) CreateApp(name string) (int, error) {\n\t\/\/ Ensure that app name is unique for this user. We do this as\n\t\/\/ unfortunately the server doesn't enforce it.\n\tapps, err := c.ListApps()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tfor _, app := range apps {\n\t\tif app.Name == name {\n\t\t\treturn -1, fmt.Errorf(\"App by that name (%s) already exists\", name)\n\t\t}\n\t}\n\n\t\/\/ The CC requires that a POST on \/apps sends, at minimum, these\n\t\/\/ fields. The values for framework\/runtime doesn't matter for our\n\t\/\/ purpose (they will get overwritten by a subsequent app push).\n\tcreateArgs := map[string]interface{}{\n\t\t\"name\": name,\n\t\t\"staging\": map[string]string{\n\t\t\t\"framework\": \"buildpack\",\n\t\t\t\"runtime\": \"python27\",\n\t\t},\n\t}\n\n\tvar resp struct{ App_ID int }\n\terr = c.MakeRequest(\"POST\", \"\/apps\", createArgs, &resp)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tif resp.App_ID < 1 {\n\t\treturn -1, fmt.Errorf(\"Invalid or missing AppID from CC: %d\", resp.App_ID)\n\t}\n\n\treturn resp.App_ID, nil\n}\n\nfunc (c *RestClient) MakeRequest(method string, path string, params interface{}, response interface{}) error {\n\treq, err := client.NewRequest(method, c.TargetURL+path, params)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Authorization\", c.Token)\n\tif c.Group != \"\" {\n\t\treq.Header.Set(\"X-Stackato-Group\", c.Group)\n\t}\n\terr = c.client.DoRequest(req, response)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"CC API %v %v failed: %v\", method, path, err)\n\t}\n\treturn nil\n}\n\n\/\/ emulate `curl -k ...`\nfunc getInsecureHttpRestClient() *client.Client {\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true}}\n\treturn &client.Client{Transport: tr}\n}\n<|endoftext|>"} {"text":"<commit_before>package clock\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ExampleClock_AddJobRepeat 基于函数回调,一个对重复任务的使用演示。\nfunc ExampleClock_AddJobRepeat() {\n\tvar (\n\t\tmyClock = NewClock()\n\t\tcounter = 0\n\t\tmut sync.Mutex\n\t\tsigalChan = make(chan struct{}, 0)\n\t)\n\tfn := func() {\n\t\tfmt.Println(\"schedule repeat\")\n\t\tmut.Lock()\n\t\tdefer mut.Unlock()\n\t\tcounter++\n\t\tif counter == 3 {\n\t\t\tsigalChan <- struct{}{}\n\t\t}\n\n\t}\n\t\/\/create a task that executes three times,interval 50 millisecond\n\tevent, inserted := myClock.AddJobRepeat(time.Duration(time.Millisecond*50), 0, fn)\n\tif !inserted {\n\t\tlog.Println(\"failure\")\n\t}\n\n\t\/\/等待阻塞信号\n\t<-sigalChan\n\tmyClock.DelJob(event)\n\n\t\/\/wait a second,watching\n\ttime.Sleep(time.Second)\n\t\/\/Output:\n\t\/\/\n\t\/\/schedule repeat\n\t\/\/schedule repeat\n\t\/\/schedule repeat\n}\n\n\/\/ExampleClock_AddJobRepeat2 ,基于函数回调,演示添加有次数限制的重复任务\n\/\/ 执行3次之后,撤销定时事件\nfunc ExampleClock_AddJobRepeat2() {\n\tvar (\n\t\tmyClock = NewClock()\n\t)\n\t\/\/define a repeat task\n\tfn := func() {\n\t\tfmt.Println(\"schedule repeat\")\n\t}\n\t\/\/add in clock,execute three times,interval 200 millisecond\n\t_, inserted := myClock.AddJobRepeat(time.Duration(time.Millisecond*200), 3, fn)\n\tif !inserted {\n\t\tlog.Println(\"failure\")\n\t}\n\t\/\/wait a second,watching\n\ttime.Sleep(time.Second)\n\t\/\/Output:\n\t\/\/\n\t\/\/schedule repeat\n\t\/\/schedule repeat\n\t\/\/schedule repeat\n}\n\n\/\/ExampleClock_AddJobWithTimeout 基于函数回调,对一次性任务正常使用的演示。\nfunc ExampleClock_AddJobWithTimeout() {\n\tvar (\n\t\tjobClock = NewClock()\n\t\tjobFunc = func() {\n\t\t\tfmt.Println(\"schedule once\")\n\t\t}\n\t)\n\t\/\/add a task that executes once,interval 100 millisecond\n\tjobClock.AddJobWithInterval(time.Duration(100*time.Millisecond), jobFunc)\n\n\t\/\/wait a second,watching\n\ttime.Sleep(1 * time.Second)\n\n\t\/\/Output:\n\t\/\/\n\t\/\/schedule once\n}\n\n\/\/ExampleClock_AddJobWithDeadtime 基于事件提醒,对一次性任务中途放弃的使用演示。\nfunc ExampleClock_AddJobWithDeadtime() {\n\tvar (\n\t\tmyClock = NewClock()\n\t\tjobFunc = func() {\n\t\t\tfmt.Println(\"schedule once\")\n\t\t}\n\t\tactionTime = time.Now().Add(time.Millisecond * 500)\n\t)\n\t\/\/创建一次性任务,定时500ms\n\tjob, _ := myClock.AddJobWithDeadtime(actionTime, jobFunc)\n\n\t\/\/任务执行前,撤销任务\n\ttime.Sleep(time.Millisecond * 300)\n\tmyClock.DelJob(job)\n\n\t\/\/等待2秒,正常情况下,事件不会再执行\n\ttime.Sleep(2 * time.Second)\n\n\t\/\/Output:\n\t\/\/\n\t\/\/\n}\n<commit_msg>fix renew test function name<commit_after>package clock\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ExampleClock_AddJobRepeat 基于函数回调,一个对重复任务的使用演示。\nfunc ExampleClock_AddJobRepeat() {\n\tvar (\n\t\tmyClock = NewClock()\n\t\tcounter = 0\n\t\tmut sync.Mutex\n\t\tsigalChan = make(chan struct{}, 0)\n\t)\n\tfn := func() {\n\t\tfmt.Println(\"schedule repeat\")\n\t\tmut.Lock()\n\t\tdefer mut.Unlock()\n\t\tcounter++\n\t\tif counter == 3 {\n\t\t\tsigalChan <- struct{}{}\n\t\t}\n\n\t}\n\t\/\/create a task that executes three times,interval 50 millisecond\n\tevent, inserted := myClock.AddJobRepeat(time.Duration(time.Millisecond*50), 0, fn)\n\tif !inserted {\n\t\tlog.Println(\"failure\")\n\t}\n\n\t\/\/等待阻塞信号\n\t<-sigalChan\n\tmyClock.DelJob(event)\n\n\t\/\/wait a second,watching\n\ttime.Sleep(time.Second)\n\t\/\/Output:\n\t\/\/\n\t\/\/schedule repeat\n\t\/\/schedule repeat\n\t\/\/schedule repeat\n}\n\n\/\/ExampleClock_AddJobRepeat2 ,基于函数回调,演示添加有次数限制的重复任务\n\/\/ 执行3次之后,撤销定时事件\nfunc ExampleClock_AddJobRepeat2() {\n\tvar (\n\t\tmyClock = NewClock()\n\t)\n\t\/\/define a repeat task\n\tfn := func() {\n\t\tfmt.Println(\"schedule repeat\")\n\t}\n\t\/\/add in clock,execute three times,interval 200 millisecond\n\t_, inserted := myClock.AddJobRepeat(time.Duration(time.Millisecond*200), 3, fn)\n\tif !inserted {\n\t\tlog.Println(\"failure\")\n\t}\n\t\/\/wait a second,watching\n\ttime.Sleep(time.Second)\n\t\/\/Output:\n\t\/\/\n\t\/\/schedule repeat\n\t\/\/schedule repeat\n\t\/\/schedule repeat\n}\n\n\/\/ExampleClock_AddJobWithInterval 基于函数回调,对一次性任务正常使用的演示。\nfunc ExampleClock_AddJobWithInterval() {\n\tvar (\n\t\tjobClock = NewClock()\n\t\tjobFunc = func() {\n\t\t\tfmt.Println(\"schedule once\")\n\t\t}\n\t)\n\t\/\/add a task that executes once,interval 100 millisecond\n\tjobClock.AddJobWithInterval(time.Duration(100*time.Millisecond), jobFunc)\n\n\t\/\/wait a second,watching\n\ttime.Sleep(1 * time.Second)\n\n\t\/\/Output:\n\t\/\/\n\t\/\/schedule once\n}\n\n\/\/ExampleClock_AddJobWithDeadtime 基于事件提醒,对一次性任务中途放弃的使用演示。\nfunc ExampleClock_AddJobWithDeadtime() {\n\tvar (\n\t\tmyClock = NewClock()\n\t\tjobFunc = func() {\n\t\t\tfmt.Println(\"schedule once\")\n\t\t}\n\t\tactionTime = time.Now().Add(time.Millisecond * 500)\n\t)\n\t\/\/创建一次性任务,定时500ms\n\tjob, _ := myClock.AddJobWithDeadtime(actionTime, jobFunc)\n\n\t\/\/任务执行前,撤销任务\n\ttime.Sleep(time.Millisecond * 300)\n\tmyClock.DelJob(job)\n\n\t\/\/等待2秒,正常情况下,事件不会再执行\n\ttime.Sleep(2 * time.Second)\n\n\t\/\/Output:\n\t\/\/\n\t\/\/\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage epub generates valid EPUB 3.0 files with additional EPUB 2.0 table of\ncontents (as seen here: https:\/\/github.com\/bmaupin\/epub-samples) for maximum\ncompatibility.\n\nBasic usage:\n\n\t\/\/ Create a new EPUB\n\te := epub.NewEpub(\"My title\")\n\n\t\/\/ Set the author\n\te.SetAuthor(\"Hingle McCringleberry\")\n\n\t\/\/ Add a section\n\tsection1Body := `<h1>Section 1<\/h1>\n\t<p>This is a paragraph.<\/p>`\n\te.AddSection(section1Body, \"Section 1\", \"\", \"\")\n\n\t\/\/ Write the EPUB\n\terr = e.Write(\"My EPUB.epub\")\n\tif err != nil {\n\t\t\/\/ handle error\n\t}\n\n*\/\npackage epub\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/satori\/go.uuid\"\n)\n\n\/\/ ErrFilenameAlreadyUsed is thrown by AddCSS, AddFont, AddImage, or AddSection\n\/\/ if the same filename is used more than once\nvar ErrFilenameAlreadyUsed = errors.New(\"Filename already used\")\n\n\/\/ ErrRetrievingFile is thrown by AddCSS, AddFont, or AddImage if there was a\n\/\/ problem retrieving the source file that was provided\nvar ErrRetrievingFile = errors.New(\"Error retrieving file from source\")\n\n\/\/ Folder names used for resources inside the EPUB\nconst (\n\tCSSFolderName = \"css\"\n\tFontFolderName = \"fonts\"\n\tImageFolderName = \"images\"\n)\n\nconst (\n\tcssFileFormat = \"css%04d%s\"\n\tdefaultCoverBody = `<img src=\"%s\" alt=\"Cover Image\" \/>`\n\tdefaultCoverCSSContent = `body {\n background-color: #FFFFFF;\n margin-bottom: 0px;\n margin-left: 0px;\n margin-right: 0px;\n margin-top: 0px;\n text-align: center;\n}\nimg {\n max-height: 100%;\n max-width: 100%;\n}\n`\n\tdefaultCoverCSSFilename = \"cover.css\"\n\tdefaultCoverCSSSource = \"cover.css\"\n\tdefaultCoverImgFormat = \"cover%s\"\n\tdefaultCoverXhtmlFilename = \"cover.xhtml\"\n\tdefaultEpubLang = \"en\"\n\tdefaultPpd = \"ltr\"\n\tfontFileFormat = \"font%04d%s\"\n\timageFileFormat = \"image%04d%s\"\n\tsectionFileFormat = \"section%04d.xhtml\"\n\turnUUIDPrefix = \"urn:uuid:\"\n)\n\n\/\/ Epub implements an EPUB file.\ntype Epub struct {\n\tauthor string\n\tcover *epubCover\n\t\/\/ The key is the css filename, the value is the css source\n\tcss map[string]string\n\t\/\/ The key is the font filename, the value is the font source\n\tfonts map[string]string\n\tidentifier string\n\t\/\/ The key is the image filename, the value is the image source\n\timages map[string]string\n\t\/\/ Language\n\tlang string\n\t\/\/ Page progression direction\n\tppd string\n\t\/\/ The package file (package.opf)\n\tpkg *pkg\n\tsections []epubSection\n\ttitle string\n\t\/\/ Table of contents\n\ttoc *toc\n}\n\ntype epubCover struct {\n\tcssFilename string\n\tcssTempFile string\n\timageFilename string\n\txhtmlFilename string\n}\n\ntype epubSection struct {\n\tfilename string\n\txhtml *xhtml\n}\n\n\/\/ NewEpub returns a new Epub.\nfunc NewEpub(title string) *Epub {\n\te := &Epub{}\n\te.cover = &epubCover{\n\t\tcssFilename: \"\",\n\t\tcssTempFile: \"\",\n\t\timageFilename: \"\",\n\t\txhtmlFilename: \"\",\n\t}\n\te.css = make(map[string]string)\n\te.fonts = make(map[string]string)\n\te.images = make(map[string]string)\n\te.pkg = newPackage()\n\te.toc = newToc()\n\t\/\/ Set minimal required attributes\n\te.SetIdentifier(urnUUIDPrefix + uuid.NewV4().String())\n\te.SetLang(defaultEpubLang)\n\te.SetPpd(defaultPpd)\n\te.SetTitle(title)\n\n\treturn e\n}\n\n\/\/ AddCSS adds a CSS file to the EPUB and returns a relative path to the CSS\n\/\/ file that can be used in EPUB sections in the format:\n\/\/ ..\/CSSFolderName\/internalFilename\n\/\/\n\/\/ The CSS source should either be a URL or a path to a local file; in either\n\/\/ case, the CSS file will be retrieved and stored in the EPUB.\n\/\/\n\/\/ The internal filename will be used when storing the CSS file in the EPUB\n\/\/ and must be unique among all CSS files. If the same filename is used more\n\/\/ than once, ErrFilenameAlreadyUsed will be returned. The internal filename is\n\/\/ optional; if no filename is provided, one will be generated.\nfunc (e *Epub) AddCSS(source string, internalFilename string) (string, error) {\n\treturn addMedia(source, internalFilename, cssFileFormat, CSSFolderName, e.css)\n}\n\n\/\/ AddFont adds a font file to the EPUB and returns a relative path to the font\n\/\/ file that can be used in EPUB sections in the format:\n\/\/ ..\/FontFolderName\/internalFilename\n\/\/\n\/\/ The font source should either be a URL or a path to a local file; in either\n\/\/ case, the font file will be retrieved and stored in the EPUB.\n\/\/\n\/\/ The internal filename will be used when storing the font file in the EPUB\n\/\/ and must be unique among all font files. If the same filename is used more\n\/\/ than once, ErrFilenameAlreadyUsed will be returned. The internal filename is\n\/\/ optional; if no filename is provided, one will be generated.\nfunc (e *Epub) AddFont(source string, internalFilename string) (string, error) {\n\treturn addMedia(source, internalFilename, fontFileFormat, FontFolderName, e.fonts)\n}\n\n\/\/ AddImage adds an image to the EPUB and returns a relative path to the image\n\/\/ file that can be used in EPUB sections in the format:\n\/\/ ..\/ImageFolderName\/internalFilename\n\/\/\n\/\/ The image source should either be a URL or a path to a local file; in either\n\/\/ case, the image file will be retrieved and stored in the EPUB.\n\/\/\n\/\/ The internal filename will be used when storing the image file in the EPUB\n\/\/ and must be unique among all image files. If the same filename is used more\n\/\/ than once, ErrFilenameAlreadyUsed will be returned. The internal filename is\n\/\/ optional; if no filename is provided, one will be generated.\nfunc (e *Epub) AddImage(source string, imageFilename string) (string, error) {\n\treturn addMedia(source, imageFilename, imageFileFormat, ImageFolderName, e.images)\n}\n\n\/\/ AddSection adds a new section (chapter, etc) to the EPUB and returns a\n\/\/ relative path to the section that can be used from another section (for\n\/\/ links).\n\/\/\n\/\/ The body must be valid XHTML that will go between the <body> tags of the\n\/\/ section XHTML file. The content will not be validated.\n\/\/\n\/\/ The title will be used for the table of contents. The section will be shown\n\/\/ in the table of contents in the same order it was added to the EPUB. The\n\/\/ title is optional; if no title is provided, the section will not be added to\n\/\/ the table of contents.\n\/\/\n\/\/ The internal filename will be used when storing the section file in the EPUB\n\/\/ and must be unique among all section files. If the same filename is used more\n\/\/ than once, ErrFilenameAlreadyUsed will be returned. The internal filename is\n\/\/ optional; if no filename is provided, one will be generated.\n\/\/\n\/\/ The internal path to an already-added CSS file (as returned by AddCSS) to be\n\/\/ used for the section is optional.\nfunc (e *Epub) AddSection(body string, sectionTitle string, internalFilename string, internalCSSPath string) (string, error) {\n\t\/\/ Generate a filename if one isn't provided\n\tif internalFilename == \"\" {\n\t\tinternalFilename = fmt.Sprintf(sectionFileFormat, len(e.sections)+1)\n\t}\n\n\tfor _, section := range e.sections {\n\t\tif section.filename == internalFilename {\n\t\t\treturn \"\", ErrFilenameAlreadyUsed\n\t\t}\n\t}\n\n\tx := newXhtml(body)\n\tx.setTitle(sectionTitle)\n\n\tif internalCSSPath != \"\" {\n\t\tx.setCSS(internalCSSPath)\n\t}\n\n\ts := epubSection{\n\t\tfilename: internalFilename,\n\t\txhtml: x,\n\t}\n\te.sections = append(e.sections, s)\n\n\treturn internalFilename, nil\n}\n\n\/\/ Author returns the author of the EPUB.\nfunc (e *Epub) Author() string {\n\treturn e.author\n}\n\n\/\/ Identifier returns the unique identifier of the EPUB.\nfunc (e *Epub) Identifier() string {\n\treturn e.identifier\n}\n\n\/\/ Lang returns the language of the EPUB.\nfunc (e *Epub) Lang() string {\n\treturn e.lang\n}\n\n\/\/ SetAuthor sets the author of the EPUB.\nfunc (e *Epub) SetAuthor(author string) {\n\te.author = author\n\te.pkg.setAuthor(author)\n}\n\n\/\/ SetCover sets the cover page for the EPUB using the provided image source and\n\/\/ optional CSS.\n\/\/\n\/\/ The internal path to an already-added image file (as returned by AddImage) is\n\/\/ required.\n\/\/\n\/\/ The internal path to an already-added CSS file (as returned by AddCSS) to be\n\/\/ used for the cover is optional. If the CSS path isn't provided, default CSS\n\/\/ will be used.\nfunc (e *Epub) SetCover(internalImagePath string, internalCSSPath string) {\n\t\/\/ If a cover already exists\n\tif e.cover.xhtmlFilename != \"\" {\n\t\t\/\/ Remove the xhtml file\n\t\tfor i, section := range e.sections {\n\t\t\tif section.filename == e.cover.xhtmlFilename {\n\t\t\t\te.sections = append(e.sections[:i], e.sections[i+1:]...)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Remove the image\n\t\tdelete(e.images, e.cover.imageFilename)\n\n\t\t\/\/ Remove the CSS\n\t\tdelete(e.css, e.cover.cssFilename)\n\n\t\tif e.cover.cssTempFile != \"\" {\n\t\t\tos.Remove(e.cover.cssTempFile)\n\t\t}\n\t}\n\n\te.cover.imageFilename = filepath.Base(internalImagePath)\n\n\t\/\/ Use default cover stylesheet if one isn't provided\n\tif internalCSSPath == \"\" {\n\t\t\/\/ Create a temporary file to hold the default cover CSS\n\t\ttempFile, err := ioutil.TempFile(\"\", tempDirPrefix)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Error creating temp file: %s\", err))\n\t\t}\n\t\tdefer func() {\n\t\t\tif err := tempFile.Close(); err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"Error closing temp file: %s\", err))\n\t\t\t}\n\t\t}()\n\t\te.cover.cssTempFile = tempFile.Name()\n\n\t\t\/\/ Write the default cover CSS to the temp file\n\t\tif _, err = tempFile.WriteString(defaultCoverCSSContent); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Error writing CSS file: %s\", err))\n\t\t}\n\n\t\tinternalCSSPath, err = e.AddCSS(e.cover.cssTempFile, defaultCoverCSSFilename)\n\t\t\/\/ If that doesn't work, generate a filename\n\t\tif err == ErrFilenameAlreadyUsed {\n\t\t\tcoverCSSFilename := fmt.Sprintf(\n\t\t\t\tcssFileFormat,\n\t\t\t\tlen(e.css)+1,\n\t\t\t\t\".css\",\n\t\t\t)\n\n\t\t\tinternalCSSPath, err = e.AddCSS(e.cover.cssTempFile, coverCSSFilename)\n\t\t\tif err == ErrFilenameAlreadyUsed {\n\t\t\t\t\/\/ This shouldn't cause an error\n\t\t\t\tpanic(fmt.Sprintf(\"Error adding default cover CSS file: %s\", err))\n\t\t\t}\n\t\t}\n\t\tif err != nil && err != ErrFilenameAlreadyUsed {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\te.cover.cssFilename = filepath.Base(internalCSSPath)\n\n\tcoverBody := fmt.Sprintf(defaultCoverBody, internalImagePath)\n\t\/\/ Title won't be used since the cover won't be added to the TOC\n\t\/\/ First try to use the default cover filename\n\tcoverPath, err := e.AddSection(coverBody, \"\", defaultCoverXhtmlFilename, internalCSSPath)\n\t\/\/ If that doesn't work, generate a filename\n\tif err == ErrFilenameAlreadyUsed {\n\t\tcoverPath, err = e.AddSection(coverBody, \"\", \"\", internalCSSPath)\n\t\tif err == ErrFilenameAlreadyUsed {\n\t\t\t\/\/ This shouldn't cause an error since we're not specifying a filename\n\t\t\tpanic(fmt.Sprintf(\"Error adding default cover XHTML file: %s\", err))\n\t\t}\n\t}\n\te.cover.xhtmlFilename = filepath.Base(coverPath)\n}\n\n\/\/ SetIdentifier sets the unique identifier of the EPUB, such as a UUID, DOI,\n\/\/ ISBN or ISSN. If no identifier is set, a UUID will be automatically\n\/\/ generated.\nfunc (e *Epub) SetIdentifier(identifier string) {\n\te.identifier = identifier\n\te.pkg.setIdentifier(identifier)\n\te.toc.setIdentifier(identifier)\n}\n\n\/\/ SetLang sets the language of the EPUB.\nfunc (e *Epub) SetLang(lang string) {\n\te.lang = lang\n\te.pkg.setLang(lang)\n}\n\nfunc (e *Epub) SetPpd(direction string) {\n\te.ppd = direction\n\te.pkg.setPpd(direction)\n}\n\n\/\/ SetTitle sets the title of the EPUB.\nfunc (e *Epub) SetTitle(title string) {\n\te.title = title\n\te.pkg.setTitle(title)\n\te.toc.setTitle(title)\n}\n\n\/\/ Title returns the title of the EPUB.\nfunc (e *Epub) Title() string {\n\treturn e.title\n}\n\n\/\/ Add a media file to the EPUB and return the path relative to the EPUB section\n\/\/ files\nfunc addMedia(source string, internalFilename string, mediaFileFormat string, mediaFolderName string, mediaMap map[string]string) (string, error) {\n\t\/\/ Make sure the source file is valid before proceeding\n\tif isFileSourceValid(source) == false {\n\t\treturn \"\", ErrRetrievingFile\n\t}\n\n\tif internalFilename == \"\" {\n\t\t\/\/ If a filename isn't provided, use the filename from the source\n\t\tinternalFilename = filepath.Base(source)\n\t\t\/\/ If that's already used, try to generate a unique filename\n\t\tif _, ok := mediaMap[internalFilename]; ok {\n\t\t\tinternalFilename = fmt.Sprintf(\n\t\t\t\tmediaFileFormat,\n\t\t\t\tlen(mediaMap)+1,\n\t\t\t\tstrings.ToLower(filepath.Ext(source)),\n\t\t\t)\n\t\t}\n\t}\n\n\tif _, ok := mediaMap[internalFilename]; ok {\n\t\treturn \"\", ErrFilenameAlreadyUsed\n\t}\n\n\tmediaMap[internalFilename] = source\n\n\treturn filepath.Join(\n\t\t\"..\",\n\t\tmediaFolderName,\n\t\tinternalFilename,\n\t), nil\n}\n\nfunc isFileSourceValid(source string) bool {\n\tu, err := url.Parse(source)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tvar r io.ReadCloser\n\tvar resp *http.Response\n\t\/\/ If it's a URL\n\tif u.Scheme == \"http\" || u.Scheme == \"https\" {\n\t\tresp, err = http.Get(source)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tr = resp.Body\n\n\t\t\/\/ Otherwise, assume it's a local file\n\t} else {\n\t\tr, err = os.Open(source)\n\t}\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer func() {\n\t\tif err := r.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\treturn true\n}\n<commit_msg>Add doc to epub.SetPpd()<commit_after>\/*\nPackage epub generates valid EPUB 3.0 files with additional EPUB 2.0 table of\ncontents (as seen here: https:\/\/github.com\/bmaupin\/epub-samples) for maximum\ncompatibility.\n\nBasic usage:\n\n\t\/\/ Create a new EPUB\n\te := epub.NewEpub(\"My title\")\n\n\t\/\/ Set the author\n\te.SetAuthor(\"Hingle McCringleberry\")\n\n\t\/\/ Add a section\n\tsection1Body := `<h1>Section 1<\/h1>\n\t<p>This is a paragraph.<\/p>`\n\te.AddSection(section1Body, \"Section 1\", \"\", \"\")\n\n\t\/\/ Write the EPUB\n\terr = e.Write(\"My EPUB.epub\")\n\tif err != nil {\n\t\t\/\/ handle error\n\t}\n\n*\/\npackage epub\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/satori\/go.uuid\"\n)\n\n\/\/ ErrFilenameAlreadyUsed is thrown by AddCSS, AddFont, AddImage, or AddSection\n\/\/ if the same filename is used more than once\nvar ErrFilenameAlreadyUsed = errors.New(\"Filename already used\")\n\n\/\/ ErrRetrievingFile is thrown by AddCSS, AddFont, or AddImage if there was a\n\/\/ problem retrieving the source file that was provided\nvar ErrRetrievingFile = errors.New(\"Error retrieving file from source\")\n\n\/\/ Folder names used for resources inside the EPUB\nconst (\n\tCSSFolderName = \"css\"\n\tFontFolderName = \"fonts\"\n\tImageFolderName = \"images\"\n)\n\nconst (\n\tcssFileFormat = \"css%04d%s\"\n\tdefaultCoverBody = `<img src=\"%s\" alt=\"Cover Image\" \/>`\n\tdefaultCoverCSSContent = `body {\n background-color: #FFFFFF;\n margin-bottom: 0px;\n margin-left: 0px;\n margin-right: 0px;\n margin-top: 0px;\n text-align: center;\n}\nimg {\n max-height: 100%;\n max-width: 100%;\n}\n`\n\tdefaultCoverCSSFilename = \"cover.css\"\n\tdefaultCoverCSSSource = \"cover.css\"\n\tdefaultCoverImgFormat = \"cover%s\"\n\tdefaultCoverXhtmlFilename = \"cover.xhtml\"\n\tdefaultEpubLang = \"en\"\n\tdefaultPpd = \"ltr\"\n\tfontFileFormat = \"font%04d%s\"\n\timageFileFormat = \"image%04d%s\"\n\tsectionFileFormat = \"section%04d.xhtml\"\n\turnUUIDPrefix = \"urn:uuid:\"\n)\n\n\/\/ Epub implements an EPUB file.\ntype Epub struct {\n\tauthor string\n\tcover *epubCover\n\t\/\/ The key is the css filename, the value is the css source\n\tcss map[string]string\n\t\/\/ The key is the font filename, the value is the font source\n\tfonts map[string]string\n\tidentifier string\n\t\/\/ The key is the image filename, the value is the image source\n\timages map[string]string\n\t\/\/ Language\n\tlang string\n\t\/\/ Page progression direction\n\tppd string\n\t\/\/ The package file (package.opf)\n\tpkg *pkg\n\tsections []epubSection\n\ttitle string\n\t\/\/ Table of contents\n\ttoc *toc\n}\n\ntype epubCover struct {\n\tcssFilename string\n\tcssTempFile string\n\timageFilename string\n\txhtmlFilename string\n}\n\ntype epubSection struct {\n\tfilename string\n\txhtml *xhtml\n}\n\n\/\/ NewEpub returns a new Epub.\nfunc NewEpub(title string) *Epub {\n\te := &Epub{}\n\te.cover = &epubCover{\n\t\tcssFilename: \"\",\n\t\tcssTempFile: \"\",\n\t\timageFilename: \"\",\n\t\txhtmlFilename: \"\",\n\t}\n\te.css = make(map[string]string)\n\te.fonts = make(map[string]string)\n\te.images = make(map[string]string)\n\te.pkg = newPackage()\n\te.toc = newToc()\n\t\/\/ Set minimal required attributes\n\te.SetIdentifier(urnUUIDPrefix + uuid.NewV4().String())\n\te.SetLang(defaultEpubLang)\n\te.SetPpd(defaultPpd)\n\te.SetTitle(title)\n\n\treturn e\n}\n\n\/\/ AddCSS adds a CSS file to the EPUB and returns a relative path to the CSS\n\/\/ file that can be used in EPUB sections in the format:\n\/\/ ..\/CSSFolderName\/internalFilename\n\/\/\n\/\/ The CSS source should either be a URL or a path to a local file; in either\n\/\/ case, the CSS file will be retrieved and stored in the EPUB.\n\/\/\n\/\/ The internal filename will be used when storing the CSS file in the EPUB\n\/\/ and must be unique among all CSS files. If the same filename is used more\n\/\/ than once, ErrFilenameAlreadyUsed will be returned. The internal filename is\n\/\/ optional; if no filename is provided, one will be generated.\nfunc (e *Epub) AddCSS(source string, internalFilename string) (string, error) {\n\treturn addMedia(source, internalFilename, cssFileFormat, CSSFolderName, e.css)\n}\n\n\/\/ AddFont adds a font file to the EPUB and returns a relative path to the font\n\/\/ file that can be used in EPUB sections in the format:\n\/\/ ..\/FontFolderName\/internalFilename\n\/\/\n\/\/ The font source should either be a URL or a path to a local file; in either\n\/\/ case, the font file will be retrieved and stored in the EPUB.\n\/\/\n\/\/ The internal filename will be used when storing the font file in the EPUB\n\/\/ and must be unique among all font files. If the same filename is used more\n\/\/ than once, ErrFilenameAlreadyUsed will be returned. The internal filename is\n\/\/ optional; if no filename is provided, one will be generated.\nfunc (e *Epub) AddFont(source string, internalFilename string) (string, error) {\n\treturn addMedia(source, internalFilename, fontFileFormat, FontFolderName, e.fonts)\n}\n\n\/\/ AddImage adds an image to the EPUB and returns a relative path to the image\n\/\/ file that can be used in EPUB sections in the format:\n\/\/ ..\/ImageFolderName\/internalFilename\n\/\/\n\/\/ The image source should either be a URL or a path to a local file; in either\n\/\/ case, the image file will be retrieved and stored in the EPUB.\n\/\/\n\/\/ The internal filename will be used when storing the image file in the EPUB\n\/\/ and must be unique among all image files. If the same filename is used more\n\/\/ than once, ErrFilenameAlreadyUsed will be returned. The internal filename is\n\/\/ optional; if no filename is provided, one will be generated.\nfunc (e *Epub) AddImage(source string, imageFilename string) (string, error) {\n\treturn addMedia(source, imageFilename, imageFileFormat, ImageFolderName, e.images)\n}\n\n\/\/ AddSection adds a new section (chapter, etc) to the EPUB and returns a\n\/\/ relative path to the section that can be used from another section (for\n\/\/ links).\n\/\/\n\/\/ The body must be valid XHTML that will go between the <body> tags of the\n\/\/ section XHTML file. The content will not be validated.\n\/\/\n\/\/ The title will be used for the table of contents. The section will be shown\n\/\/ in the table of contents in the same order it was added to the EPUB. The\n\/\/ title is optional; if no title is provided, the section will not be added to\n\/\/ the table of contents.\n\/\/\n\/\/ The internal filename will be used when storing the section file in the EPUB\n\/\/ and must be unique among all section files. If the same filename is used more\n\/\/ than once, ErrFilenameAlreadyUsed will be returned. The internal filename is\n\/\/ optional; if no filename is provided, one will be generated.\n\/\/\n\/\/ The internal path to an already-added CSS file (as returned by AddCSS) to be\n\/\/ used for the section is optional.\nfunc (e *Epub) AddSection(body string, sectionTitle string, internalFilename string, internalCSSPath string) (string, error) {\n\t\/\/ Generate a filename if one isn't provided\n\tif internalFilename == \"\" {\n\t\tinternalFilename = fmt.Sprintf(sectionFileFormat, len(e.sections)+1)\n\t}\n\n\tfor _, section := range e.sections {\n\t\tif section.filename == internalFilename {\n\t\t\treturn \"\", ErrFilenameAlreadyUsed\n\t\t}\n\t}\n\n\tx := newXhtml(body)\n\tx.setTitle(sectionTitle)\n\n\tif internalCSSPath != \"\" {\n\t\tx.setCSS(internalCSSPath)\n\t}\n\n\ts := epubSection{\n\t\tfilename: internalFilename,\n\t\txhtml: x,\n\t}\n\te.sections = append(e.sections, s)\n\n\treturn internalFilename, nil\n}\n\n\/\/ Author returns the author of the EPUB.\nfunc (e *Epub) Author() string {\n\treturn e.author\n}\n\n\/\/ Identifier returns the unique identifier of the EPUB.\nfunc (e *Epub) Identifier() string {\n\treturn e.identifier\n}\n\n\/\/ Lang returns the language of the EPUB.\nfunc (e *Epub) Lang() string {\n\treturn e.lang\n}\n\n\/\/ SetAuthor sets the author of the EPUB.\nfunc (e *Epub) SetAuthor(author string) {\n\te.author = author\n\te.pkg.setAuthor(author)\n}\n\n\/\/ SetCover sets the cover page for the EPUB using the provided image source and\n\/\/ optional CSS.\n\/\/\n\/\/ The internal path to an already-added image file (as returned by AddImage) is\n\/\/ required.\n\/\/\n\/\/ The internal path to an already-added CSS file (as returned by AddCSS) to be\n\/\/ used for the cover is optional. If the CSS path isn't provided, default CSS\n\/\/ will be used.\nfunc (e *Epub) SetCover(internalImagePath string, internalCSSPath string) {\n\t\/\/ If a cover already exists\n\tif e.cover.xhtmlFilename != \"\" {\n\t\t\/\/ Remove the xhtml file\n\t\tfor i, section := range e.sections {\n\t\t\tif section.filename == e.cover.xhtmlFilename {\n\t\t\t\te.sections = append(e.sections[:i], e.sections[i+1:]...)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Remove the image\n\t\tdelete(e.images, e.cover.imageFilename)\n\n\t\t\/\/ Remove the CSS\n\t\tdelete(e.css, e.cover.cssFilename)\n\n\t\tif e.cover.cssTempFile != \"\" {\n\t\t\tos.Remove(e.cover.cssTempFile)\n\t\t}\n\t}\n\n\te.cover.imageFilename = filepath.Base(internalImagePath)\n\n\t\/\/ Use default cover stylesheet if one isn't provided\n\tif internalCSSPath == \"\" {\n\t\t\/\/ Create a temporary file to hold the default cover CSS\n\t\ttempFile, err := ioutil.TempFile(\"\", tempDirPrefix)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Error creating temp file: %s\", err))\n\t\t}\n\t\tdefer func() {\n\t\t\tif err := tempFile.Close(); err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"Error closing temp file: %s\", err))\n\t\t\t}\n\t\t}()\n\t\te.cover.cssTempFile = tempFile.Name()\n\n\t\t\/\/ Write the default cover CSS to the temp file\n\t\tif _, err = tempFile.WriteString(defaultCoverCSSContent); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Error writing CSS file: %s\", err))\n\t\t}\n\n\t\tinternalCSSPath, err = e.AddCSS(e.cover.cssTempFile, defaultCoverCSSFilename)\n\t\t\/\/ If that doesn't work, generate a filename\n\t\tif err == ErrFilenameAlreadyUsed {\n\t\t\tcoverCSSFilename := fmt.Sprintf(\n\t\t\t\tcssFileFormat,\n\t\t\t\tlen(e.css)+1,\n\t\t\t\t\".css\",\n\t\t\t)\n\n\t\t\tinternalCSSPath, err = e.AddCSS(e.cover.cssTempFile, coverCSSFilename)\n\t\t\tif err == ErrFilenameAlreadyUsed {\n\t\t\t\t\/\/ This shouldn't cause an error\n\t\t\t\tpanic(fmt.Sprintf(\"Error adding default cover CSS file: %s\", err))\n\t\t\t}\n\t\t}\n\t\tif err != nil && err != ErrFilenameAlreadyUsed {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\te.cover.cssFilename = filepath.Base(internalCSSPath)\n\n\tcoverBody := fmt.Sprintf(defaultCoverBody, internalImagePath)\n\t\/\/ Title won't be used since the cover won't be added to the TOC\n\t\/\/ First try to use the default cover filename\n\tcoverPath, err := e.AddSection(coverBody, \"\", defaultCoverXhtmlFilename, internalCSSPath)\n\t\/\/ If that doesn't work, generate a filename\n\tif err == ErrFilenameAlreadyUsed {\n\t\tcoverPath, err = e.AddSection(coverBody, \"\", \"\", internalCSSPath)\n\t\tif err == ErrFilenameAlreadyUsed {\n\t\t\t\/\/ This shouldn't cause an error since we're not specifying a filename\n\t\t\tpanic(fmt.Sprintf(\"Error adding default cover XHTML file: %s\", err))\n\t\t}\n\t}\n\te.cover.xhtmlFilename = filepath.Base(coverPath)\n}\n\n\/\/ SetIdentifier sets the unique identifier of the EPUB, such as a UUID, DOI,\n\/\/ ISBN or ISSN. If no identifier is set, a UUID will be automatically\n\/\/ generated.\nfunc (e *Epub) SetIdentifier(identifier string) {\n\te.identifier = identifier\n\te.pkg.setIdentifier(identifier)\n\te.toc.setIdentifier(identifier)\n}\n\n\/\/ SetLang sets the language of the EPUB.\nfunc (e *Epub) SetLang(lang string) {\n\te.lang = lang\n\te.pkg.setLang(lang)\n}\n\n\/\/ SetPpd sets the page progression direction of the EPUB.\nfunc (e *Epub) SetPpd(direction string) {\n\te.ppd = direction\n\te.pkg.setPpd(direction)\n}\n\n\/\/ SetTitle sets the title of the EPUB.\nfunc (e *Epub) SetTitle(title string) {\n\te.title = title\n\te.pkg.setTitle(title)\n\te.toc.setTitle(title)\n}\n\n\/\/ Title returns the title of the EPUB.\nfunc (e *Epub) Title() string {\n\treturn e.title\n}\n\n\/\/ Add a media file to the EPUB and return the path relative to the EPUB section\n\/\/ files\nfunc addMedia(source string, internalFilename string, mediaFileFormat string, mediaFolderName string, mediaMap map[string]string) (string, error) {\n\t\/\/ Make sure the source file is valid before proceeding\n\tif isFileSourceValid(source) == false {\n\t\treturn \"\", ErrRetrievingFile\n\t}\n\n\tif internalFilename == \"\" {\n\t\t\/\/ If a filename isn't provided, use the filename from the source\n\t\tinternalFilename = filepath.Base(source)\n\t\t\/\/ If that's already used, try to generate a unique filename\n\t\tif _, ok := mediaMap[internalFilename]; ok {\n\t\t\tinternalFilename = fmt.Sprintf(\n\t\t\t\tmediaFileFormat,\n\t\t\t\tlen(mediaMap)+1,\n\t\t\t\tstrings.ToLower(filepath.Ext(source)),\n\t\t\t)\n\t\t}\n\t}\n\n\tif _, ok := mediaMap[internalFilename]; ok {\n\t\treturn \"\", ErrFilenameAlreadyUsed\n\t}\n\n\tmediaMap[internalFilename] = source\n\n\treturn filepath.Join(\n\t\t\"..\",\n\t\tmediaFolderName,\n\t\tinternalFilename,\n\t), nil\n}\n\nfunc isFileSourceValid(source string) bool {\n\tu, err := url.Parse(source)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tvar r io.ReadCloser\n\tvar resp *http.Response\n\t\/\/ If it's a URL\n\tif u.Scheme == \"http\" || u.Scheme == \"https\" {\n\t\tresp, err = http.Get(source)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tr = resp.Body\n\n\t\t\/\/ Otherwise, assume it's a local file\n\t} else {\n\t\tr, err = os.Open(source)\n\t}\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer func() {\n\t\tif err := r.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Periph Authors. All rights reserved.\n\/\/ Use of this source code is governed under the Apache License, Version 2.0\n\/\/ that can be found in the LICENSE file.\n\npackage cpu\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestMaxSpeed(t *testing.T) {\n\ts := MaxSpeed()\n\tif isLinux {\n\t\tif s == 0 {\n\t\t\tt.Fatal(\"MaxSpeed() is supported on linux\")\n\t\t}\n\t} else {\n\t\tif s != 0 {\n\t\t\tt.Fatal(\"MaxSpeed() is not supported on non-linux\")\n\t\t}\n\t}\n}\n\nfunc TestNanospin(t *testing.T) {\n\tNanospin(time.Microsecond)\n}\n<commit_msg>Fix test on travis.<commit_after>\/\/ Copyright 2016 The Periph Authors. All rights reserved.\n\/\/ Use of this source code is governed under the Apache License, Version 2.0\n\/\/ that can be found in the LICENSE file.\n\npackage cpu\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestMaxSpeed(t *testing.T) {\n\ts := MaxSpeed()\n\tif isLinux {\n\t\t\/\/ MaxSpeed() is 0 when running in a docker container, i.e. travis-ci.org.\n\t} else {\n\t\tif s != 0 {\n\t\t\tt.Fatal(\"MaxSpeed() is not supported on non-linux\")\n\t\t}\n\t}\n}\n\nfunc TestNanospin(t *testing.T) {\n\tNanospin(time.Microsecond)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package highlight_go provides a syntax highlighter for Go, using go\/scanner.\npackage highlight_go\n\nimport (\n\t\"go\/scanner\"\n\t\"go\/token\"\n\t\"io\"\n\n\t\"github.com\/sourcegraph\/annotate\"\n\t\"github.com\/sourcegraph\/syntaxhighlight\"\n)\n\nfunc tokenKind(tok token.Token, lit string) int {\n\tswitch {\n\tcase tok.IsKeyword() || (tok.IsOperator() && tok < token.LPAREN):\n\t\treturn syntaxhighlight.KEYWORD\n\n\t\/\/ Literals.\n\tcase tok == token.INT || tok == token.FLOAT || tok == token.IMAG:\n\t\treturn syntaxhighlight.DECIMAL\n\tcase tok == token.STRING || tok == token.CHAR:\n\t\treturn syntaxhighlight.STRING\n\tcase lit == \"true\" || lit == \"false\" || lit == \"iota\" || lit == \"nil\":\n\t\treturn syntaxhighlight.LITERAL\n\n\tcase tok == token.COMMENT:\n\t\treturn syntaxhighlight.COMMENT\n\tdefault:\n\t\treturn syntaxhighlight.PLAINTEXT\n\t}\n}\n\nfunc Print(src []byte, w io.Writer, p syntaxhighlight.Printer) error {\n\tvar s scanner.Scanner\n\tfset := token.NewFileSet()\n\tfile := fset.AddFile(\"\", fset.Base(), len(src))\n\ts.Init(file, src, nil, scanner.ScanComments)\n\n\tvar lastOffset int\n\n\tfor {\n\t\tpos, tok, lit := s.Scan()\n\t\tif tok == token.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\toffset := int(fset.Position(pos).Offset)\n\n\t\tvar tokString string\n\t\tif lit != \"\" {\n\t\t\ttokString = lit\n\t\t} else {\n\t\t\ttokString = tok.String()\n\t\t}\n\n\t\t\/\/ TODO: Clean this up.\n\t\t\/\/if tok == token.SEMICOLON {\n\t\tif tok == token.SEMICOLON && lit == \"\\n\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ TODO: Clean this up.\n\t\twhitespace := string(src[lastOffset:offset])\n\t\tlastOffset = offset + len(tokString)\n\t\ttokString = whitespace + tokString\n\n\t\terr := p.Print(w, tokenKind(tok, lit), tokString)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc Annotate(src []byte, a syntaxhighlight.Annotator) (annotate.Annotations, error) {\n\tvar s scanner.Scanner\n\tfset := token.NewFileSet()\n\tfile := fset.AddFile(\"\", fset.Base(), len(src))\n\ts.Init(file, src, nil, scanner.ScanComments)\n\n\tvar anns annotate.Annotations\n\n\tfor {\n\t\tpos, tok, lit := s.Scan()\n\t\tif tok == token.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\toffset := int(fset.Position(pos).Offset)\n\n\t\tvar tokString string\n\t\tif lit != \"\" {\n\t\t\ttokString = lit\n\t\t} else {\n\t\t\ttokString = tok.String()\n\t\t}\n\n\t\t\/\/ TODO: Clean this up.\n\t\t\/\/if tok == token.SEMICOLON {\n\t\tif tok == token.SEMICOLON && lit == \"\\n\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tann, err := a.Annotate(offset, tokenKind(tok, lit), tokString)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif ann != nil {\n\t\t\tanns = append(anns, ann)\n\t\t}\n\t}\n\n\treturn anns, nil\n}\n<commit_msg>Highlight Go chars the same as numbers.<commit_after>\/\/ Package highlight_go provides a syntax highlighter for Go, using go\/scanner.\npackage highlight_go\n\nimport (\n\t\"go\/scanner\"\n\t\"go\/token\"\n\t\"io\"\n\n\t\"github.com\/sourcegraph\/annotate\"\n\t\"github.com\/sourcegraph\/syntaxhighlight\"\n)\n\nfunc tokenKind(tok token.Token, lit string) int {\n\tswitch {\n\tcase tok.IsKeyword() || (tok.IsOperator() && tok <= token.ELLIPSIS):\n\t\treturn syntaxhighlight.KEYWORD\n\n\t\/\/ Literals.\n\tcase tok == token.INT || tok == token.FLOAT || tok == token.IMAG || tok == token.CHAR:\n\t\treturn syntaxhighlight.DECIMAL\n\tcase tok == token.STRING:\n\t\treturn syntaxhighlight.STRING\n\tcase lit == \"true\" || lit == \"false\" || lit == \"iota\" || lit == \"nil\":\n\t\treturn syntaxhighlight.LITERAL\n\n\tcase tok == token.COMMENT:\n\t\treturn syntaxhighlight.COMMENT\n\tdefault:\n\t\treturn syntaxhighlight.PLAINTEXT\n\t}\n}\n\nfunc Print(src []byte, w io.Writer, p syntaxhighlight.Printer) error {\n\tvar s scanner.Scanner\n\tfset := token.NewFileSet()\n\tfile := fset.AddFile(\"\", fset.Base(), len(src))\n\ts.Init(file, src, nil, scanner.ScanComments)\n\n\tvar lastOffset int\n\n\tfor {\n\t\tpos, tok, lit := s.Scan()\n\t\tif tok == token.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\toffset := int(fset.Position(pos).Offset)\n\n\t\tvar tokString string\n\t\tif lit != \"\" {\n\t\t\ttokString = lit\n\t\t} else {\n\t\t\ttokString = tok.String()\n\t\t}\n\n\t\t\/\/ TODO: Clean this up.\n\t\t\/\/if tok == token.SEMICOLON {\n\t\tif tok == token.SEMICOLON && lit == \"\\n\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ TODO: Clean this up.\n\t\twhitespace := string(src[lastOffset:offset])\n\t\tlastOffset = offset + len(tokString)\n\t\ttokString = whitespace + tokString\n\n\t\terr := p.Print(w, tokenKind(tok, lit), tokString)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc Annotate(src []byte, a syntaxhighlight.Annotator) (annotate.Annotations, error) {\n\tvar s scanner.Scanner\n\tfset := token.NewFileSet()\n\tfile := fset.AddFile(\"\", fset.Base(), len(src))\n\ts.Init(file, src, nil, scanner.ScanComments)\n\n\tvar anns annotate.Annotations\n\n\tfor {\n\t\tpos, tok, lit := s.Scan()\n\t\tif tok == token.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\toffset := int(fset.Position(pos).Offset)\n\n\t\tvar tokString string\n\t\tif lit != \"\" {\n\t\t\ttokString = lit\n\t\t} else {\n\t\t\ttokString = tok.String()\n\t\t}\n\n\t\t\/\/ TODO: Clean this up.\n\t\t\/\/if tok == token.SEMICOLON {\n\t\tif tok == token.SEMICOLON && lit == \"\\n\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tann, err := a.Annotate(offset, tokenKind(tok, lit), tokString)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif ann != nil {\n\t\t\tanns = append(anns, ann)\n\t\t}\n\t}\n\n\treturn anns, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/opencontainers\/runc\/libcontainer\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/utils\"\n\t\"github.com\/opencontainers\/runtime-spec\/specs-go\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar execCommand = cli.Command{\n\tName: \"exec\",\n\tUsage: \"execute new process inside the container\",\n\tArgsUsage: `<container-id> <command> [command options] || -p process.json <container-id>\n\nWhere \"<container-id>\" is the name for the instance of the container and\n\"<command>\" is the command to be executed in the container.\n\"<command>\" can't be empty unless a \"-p\" flag provided.\n\nEXAMPLE:\nFor example, if the container is configured to run the linux ps command the\nfollowing will output a list of processes running in the container:\n\n # runc exec <container-id> ps`,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"console-socket\",\n\t\t\tUsage: \"path to an AF_UNIX socket which will receive a file descriptor referencing the master end of the console's pseudoterminal\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"cwd\",\n\t\t\tUsage: \"current working directory in the container\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"env, e\",\n\t\t\tUsage: \"set environment variables\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"tty, t\",\n\t\t\tUsage: \"allocate a pseudo-TTY\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"user, u\",\n\t\t\tUsage: \"UID (format: <uid>[:<gid>])\",\n\t\t},\n\t\tcli.Int64SliceFlag{\n\t\t\tName: \"additional-gids, g\",\n\t\t\tUsage: \"additional gids\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"process, p\",\n\t\t\tUsage: \"path to the process.json\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"detach,d\",\n\t\t\tUsage: \"detach from the container's process\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"pid-file\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"specify the file to write the process id to\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"process-label\",\n\t\t\tUsage: \"set the asm process label for the process commonly used with selinux\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"apparmor\",\n\t\t\tUsage: \"set the apparmor profile for the process\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"no-new-privs\",\n\t\t\tUsage: \"set the no new privileges value for the process\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"cap, c\",\n\t\t\tValue: &cli.StringSlice{},\n\t\t\tUsage: \"add a capability to the bounding set for the process\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"preserve-fds\",\n\t\t\tUsage: \"Pass N additional file descriptors to the container (stdio + $LISTEN_FDS + N in total)\",\n\t\t},\n\t},\n\tAction: func(context *cli.Context) error {\n\t\tif err := checkArgs(context, 1, minArgs); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := revisePidFile(context); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstatus, err := execProcess(context)\n\t\tif err == nil {\n\t\t\tos.Exit(status)\n\t\t}\n\t\tfatalWithCode(fmt.Errorf(\"exec failed: %w\", err), 255)\n\t\treturn nil \/\/ to satisfy the linter\n\t},\n\tSkipArgReorder: true,\n}\n\nfunc execProcess(context *cli.Context) (int, error) {\n\tcontainer, err := getContainer(context)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tstatus, err := container.Status()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tif status == libcontainer.Stopped {\n\t\treturn -1, errors.New(\"cannot exec a container that has stopped\")\n\t}\n\tpath := context.String(\"process\")\n\tif path == \"\" && len(context.Args()) == 1 {\n\t\treturn -1, errors.New(\"process args cannot be empty\")\n\t}\n\tdetach := context.Bool(\"detach\")\n\tstate, err := container.State()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tbundle := utils.SearchLabels(state.Config.Labels, \"bundle\")\n\tp, err := getProcess(context, bundle)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tr := &runner{\n\t\tenableSubreaper: false,\n\t\tshouldDestroy: false,\n\t\tcontainer: container,\n\t\tconsoleSocket: context.String(\"console-socket\"),\n\t\tdetach: detach,\n\t\tpidFile: context.String(\"pid-file\"),\n\t\taction: CT_ACT_RUN,\n\t\tinit: false,\n\t\tpreserveFDs: context.Int(\"preserve-fds\"),\n\t}\n\treturn r.run(p)\n}\n\nfunc getProcess(context *cli.Context, bundle string) (*specs.Process, error) {\n\tif path := context.String(\"process\"); path != \"\" {\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer f.Close()\n\t\tvar p specs.Process\n\t\tif err := json.NewDecoder(f).Decode(&p); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &p, validateProcessSpec(&p)\n\t}\n\t\/\/ process via cli flags\n\tif err := os.Chdir(bundle); err != nil {\n\t\treturn nil, err\n\t}\n\tspec, err := loadSpec(specConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp := spec.Process\n\tp.Args = context.Args()[1:]\n\t\/\/ override the cwd, if passed\n\tif context.String(\"cwd\") != \"\" {\n\t\tp.Cwd = context.String(\"cwd\")\n\t}\n\tif ap := context.String(\"apparmor\"); ap != \"\" {\n\t\tp.ApparmorProfile = ap\n\t}\n\tif l := context.String(\"process-label\"); l != \"\" {\n\t\tp.SelinuxLabel = l\n\t}\n\tif caps := context.StringSlice(\"cap\"); len(caps) > 0 {\n\t\tfor _, c := range caps {\n\t\t\tp.Capabilities.Bounding = append(p.Capabilities.Bounding, c)\n\t\t\tp.Capabilities.Inheritable = append(p.Capabilities.Inheritable, c)\n\t\t\tp.Capabilities.Effective = append(p.Capabilities.Effective, c)\n\t\t\tp.Capabilities.Permitted = append(p.Capabilities.Permitted, c)\n\t\t\tp.Capabilities.Ambient = append(p.Capabilities.Ambient, c)\n\t\t}\n\t}\n\t\/\/ append the passed env variables\n\tp.Env = append(p.Env, context.StringSlice(\"env\")...)\n\n\t\/\/ set the tty\n\tp.Terminal = false\n\tif context.IsSet(\"tty\") {\n\t\tp.Terminal = context.Bool(\"tty\")\n\t}\n\tif context.IsSet(\"no-new-privs\") {\n\t\tp.NoNewPrivileges = context.Bool(\"no-new-privs\")\n\t}\n\t\/\/ override the user, if passed\n\tif context.String(\"user\") != \"\" {\n\t\tu := strings.SplitN(context.String(\"user\"), \":\", 2)\n\t\tif len(u) > 1 {\n\t\t\tgid, err := strconv.Atoi(u[1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"parsing %s as int for gid failed: %w\", u[1], err)\n\t\t\t}\n\t\t\tp.User.GID = uint32(gid)\n\t\t}\n\t\tuid, err := strconv.Atoi(u[0])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"parsing %s as int for uid failed: %w\", u[0], err)\n\t\t}\n\t\tp.User.UID = uint32(uid)\n\t}\n\tfor _, gid := range context.Int64Slice(\"additional-gids\") {\n\t\tif gid < 0 {\n\t\t\treturn nil, fmt.Errorf(\"additional-gids must be a positive number %d\", gid)\n\t\t}\n\t\tp.User.AdditionalGids = append(p.User.AdditionalGids, uint32(gid))\n\t}\n\treturn p, validateProcessSpec(p)\n}\n<commit_msg>exec.go: nit<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/opencontainers\/runc\/libcontainer\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/utils\"\n\t\"github.com\/opencontainers\/runtime-spec\/specs-go\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar execCommand = cli.Command{\n\tName: \"exec\",\n\tUsage: \"execute new process inside the container\",\n\tArgsUsage: `<container-id> <command> [command options] || -p process.json <container-id>\n\nWhere \"<container-id>\" is the name for the instance of the container and\n\"<command>\" is the command to be executed in the container.\n\"<command>\" can't be empty unless a \"-p\" flag provided.\n\nEXAMPLE:\nFor example, if the container is configured to run the linux ps command the\nfollowing will output a list of processes running in the container:\n\n # runc exec <container-id> ps`,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"console-socket\",\n\t\t\tUsage: \"path to an AF_UNIX socket which will receive a file descriptor referencing the master end of the console's pseudoterminal\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"cwd\",\n\t\t\tUsage: \"current working directory in the container\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"env, e\",\n\t\t\tUsage: \"set environment variables\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"tty, t\",\n\t\t\tUsage: \"allocate a pseudo-TTY\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"user, u\",\n\t\t\tUsage: \"UID (format: <uid>[:<gid>])\",\n\t\t},\n\t\tcli.Int64SliceFlag{\n\t\t\tName: \"additional-gids, g\",\n\t\t\tUsage: \"additional gids\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"process, p\",\n\t\t\tUsage: \"path to the process.json\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"detach,d\",\n\t\t\tUsage: \"detach from the container's process\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"pid-file\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"specify the file to write the process id to\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"process-label\",\n\t\t\tUsage: \"set the asm process label for the process commonly used with selinux\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"apparmor\",\n\t\t\tUsage: \"set the apparmor profile for the process\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"no-new-privs\",\n\t\t\tUsage: \"set the no new privileges value for the process\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"cap, c\",\n\t\t\tValue: &cli.StringSlice{},\n\t\t\tUsage: \"add a capability to the bounding set for the process\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"preserve-fds\",\n\t\t\tUsage: \"Pass N additional file descriptors to the container (stdio + $LISTEN_FDS + N in total)\",\n\t\t},\n\t},\n\tAction: func(context *cli.Context) error {\n\t\tif err := checkArgs(context, 1, minArgs); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := revisePidFile(context); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstatus, err := execProcess(context)\n\t\tif err == nil {\n\t\t\tos.Exit(status)\n\t\t}\n\t\tfatalWithCode(fmt.Errorf(\"exec failed: %w\", err), 255)\n\t\treturn nil \/\/ to satisfy the linter\n\t},\n\tSkipArgReorder: true,\n}\n\nfunc execProcess(context *cli.Context) (int, error) {\n\tcontainer, err := getContainer(context)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tstatus, err := container.Status()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tif status == libcontainer.Stopped {\n\t\treturn -1, errors.New(\"cannot exec a container that has stopped\")\n\t}\n\tpath := context.String(\"process\")\n\tif path == \"\" && len(context.Args()) == 1 {\n\t\treturn -1, errors.New(\"process args cannot be empty\")\n\t}\n\tstate, err := container.State()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tbundle := utils.SearchLabels(state.Config.Labels, \"bundle\")\n\tp, err := getProcess(context, bundle)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tr := &runner{\n\t\tenableSubreaper: false,\n\t\tshouldDestroy: false,\n\t\tcontainer: container,\n\t\tconsoleSocket: context.String(\"console-socket\"),\n\t\tdetach: context.Bool(\"detach\"),\n\t\tpidFile: context.String(\"pid-file\"),\n\t\taction: CT_ACT_RUN,\n\t\tinit: false,\n\t\tpreserveFDs: context.Int(\"preserve-fds\"),\n\t}\n\treturn r.run(p)\n}\n\nfunc getProcess(context *cli.Context, bundle string) (*specs.Process, error) {\n\tif path := context.String(\"process\"); path != \"\" {\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer f.Close()\n\t\tvar p specs.Process\n\t\tif err := json.NewDecoder(f).Decode(&p); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &p, validateProcessSpec(&p)\n\t}\n\t\/\/ process via cli flags\n\tif err := os.Chdir(bundle); err != nil {\n\t\treturn nil, err\n\t}\n\tspec, err := loadSpec(specConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp := spec.Process\n\tp.Args = context.Args()[1:]\n\t\/\/ override the cwd, if passed\n\tif context.String(\"cwd\") != \"\" {\n\t\tp.Cwd = context.String(\"cwd\")\n\t}\n\tif ap := context.String(\"apparmor\"); ap != \"\" {\n\t\tp.ApparmorProfile = ap\n\t}\n\tif l := context.String(\"process-label\"); l != \"\" {\n\t\tp.SelinuxLabel = l\n\t}\n\tif caps := context.StringSlice(\"cap\"); len(caps) > 0 {\n\t\tfor _, c := range caps {\n\t\t\tp.Capabilities.Bounding = append(p.Capabilities.Bounding, c)\n\t\t\tp.Capabilities.Inheritable = append(p.Capabilities.Inheritable, c)\n\t\t\tp.Capabilities.Effective = append(p.Capabilities.Effective, c)\n\t\t\tp.Capabilities.Permitted = append(p.Capabilities.Permitted, c)\n\t\t\tp.Capabilities.Ambient = append(p.Capabilities.Ambient, c)\n\t\t}\n\t}\n\t\/\/ append the passed env variables\n\tp.Env = append(p.Env, context.StringSlice(\"env\")...)\n\n\t\/\/ set the tty\n\tp.Terminal = false\n\tif context.IsSet(\"tty\") {\n\t\tp.Terminal = context.Bool(\"tty\")\n\t}\n\tif context.IsSet(\"no-new-privs\") {\n\t\tp.NoNewPrivileges = context.Bool(\"no-new-privs\")\n\t}\n\t\/\/ override the user, if passed\n\tif context.String(\"user\") != \"\" {\n\t\tu := strings.SplitN(context.String(\"user\"), \":\", 2)\n\t\tif len(u) > 1 {\n\t\t\tgid, err := strconv.Atoi(u[1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"parsing %s as int for gid failed: %w\", u[1], err)\n\t\t\t}\n\t\t\tp.User.GID = uint32(gid)\n\t\t}\n\t\tuid, err := strconv.Atoi(u[0])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"parsing %s as int for uid failed: %w\", u[0], err)\n\t\t}\n\t\tp.User.UID = uint32(uid)\n\t}\n\tfor _, gid := range context.Int64Slice(\"additional-gids\") {\n\t\tif gid < 0 {\n\t\t\treturn nil, fmt.Errorf(\"additional-gids must be a positive number %d\", gid)\n\t\t}\n\t\tp.User.AdditionalGids = append(p.User.AdditionalGids, uint32(gid))\n\t}\n\treturn p, validateProcessSpec(p)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016-2019 Google LLC. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\npackage auth\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/oauth2\/jwt\"\n)\n\nconst (\n\t\/\/ auth scopes needed by the program\n\tscopeDriveReadOnly = \"https:\/\/www.googleapis.com\/auth\/drive.readonly\"\n\n\t\/\/ program credentials for installed apps\n\tgoogClient = \"183908478743-e8rth9fbo7juk9eeivgp23asnt791g63.apps.googleusercontent.com\"\n\tgoogSecret = \"ljELuf5jUrzcOxZGL7OQfkIC\"\n\n\t\/\/ token providers\n\tProviderGoogle = \"goog\"\n\n\t\/\/ service account creds (plan B is to use a SA like GitWhisperer)\n\tqwiklabsServiceAccountCreds = \"qwiklabs-services-prod-e569bfaea3cd.json\"\n)\n\nvar (\n\tgoogleAuthConfig = oauth2.Config{\n\t\tClientID: googClient,\n\t\tClientSecret: googSecret,\n\t\tScopes: []string{scopeDriveReadOnly},\n\t\tRedirectURL: \"urn:ietf:wg:oauth:2.0:oob\",\n\t\tEndpoint: oauth2.Endpoint{\n\t\t\tAuthURL: \"https:\/\/accounts.google.com\/o\/oauth2\/auth\",\n\t\t\tTokenURL: \"https:\/\/accounts.google.com\/o\/oauth2\/token\",\n\t\t},\n\t}\n)\n\ntype authorizationHandler func(conf *oauth2.Config) (*oauth2.Token, error)\n\ntype internalOptions struct {\n\tauthHandler authorizationHandler\n}\n\ntype Helper struct {\n\tauthToken string\n\tprovider string\n\tclient *http.Client\n\topts internalOptions\n}\n\nfunc NewHelper(at, p string, rt http.RoundTripper) (*Helper, error) {\n\treturn newHelper(at, p, rt, internalOptions{\n\t\tauthHandler: authorize,\n\t})\n}\n\nfunc newHelper(at, p string, rt http.RoundTripper, io internalOptions) (*Helper, error) {\n\th := Helper{\n\t\tauthToken: at,\n\t\tprovider: p,\n\t\topts: io,\n\t}\n\n\tvar err error\n\th.client, err = h.produceDriveClient(rt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &h, nil\n}\n\nfunc (h *Helper) DriveClient() *http.Client {\n\treturn h.client\n}\n\nfunc (h *Helper) produceDriveClient(rt http.RoundTripper) (*http.Client, error) {\n\tts, err := h.tokenSource()\n\t\/\/ ts, err := h.tokenSourceServiceAccount()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif rt == nil {\n\t\trt = http.DefaultTransport\n\t}\n\n\treturn &http.Client{\n\t\tTransport: &oauth2.Transport{\n\t\t\tSource: ts,\n\t\t\tBase: rt,\n\t\t},\n\t}, nil\n}\n\n\/\/ Plan B\n\/\/ Creates a new oauth2.TokenSource from SA\n\/\/ Use the same SA as GitWhisperer\nfunc (h *Helper) tokenSourceServiceAccount() (oauth2.TokenSource, error) {\n l, err := tokenServiceAccountLocation()\n if err != nil {\n\t return nil, err\n }\n\n b, err := ioutil.ReadFile(l)\n if err != nil {\n\t return nil, err\n }\n\n var c = struct {\n Email string `json:\"client_email\"`\n PrivateKey string `json:\"private_key\"`\n TokenUri string `json:\"token_uri\"`\n }{}\n json.Unmarshal(b, &c)\n\n config := &jwt.Config{\n Email: c.Email,\n PrivateKey: []byte(c.PrivateKey),\n\tScopes: []string{scopeDriveReadOnly},\n TokenURL: c.TokenUri,\n }\n\n t := config.TokenSource(oauth2.NoContext)\n\n log.Printf(\"Plan B: attrs\\n%s, %s, %s\", c.Email, c.PrivateKey, c.TokenUri)\n\n token, err := t.Token()\n log.Printf(\"Plan B: token\\n%s\", token)\n\n return t, nil\n}\n\nfunc tokenServiceAccountLocation() (string, error) {\n\td := homedir()\n\tif d == \"\" {\n\t\tlog.Printf(\"WARNING: unable to identify user home dir\")\n\t}\n\td = path.Join(d, \".config\", \"claat\")\n\tif err := os.MkdirAll(d, 0700); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn path.Join(d, qwiklabsServiceAccountCreds), nil\n}\n\n\n\/\/ tokenSource creates a new oauth2.TokenSource backed by tokenRefresher,\n\/\/ using previously stored user credentials if available.\n\/\/ If authToken is not given at Helper init, we use the Google provider.\n\/\/ Otherwise, we use the auth config for the given provider.\nfunc (h *Helper) tokenSource() (oauth2.TokenSource, error) {\n\t\/\/ Create a static token source if we have an auth token.\n\tif h.authToken != \"\" {\n\t\treturn oauth2.StaticTokenSource(&oauth2.Token{\n\t\t\tAccessToken: h.authToken,\n\t\t}), nil\n\t}\n\n\t\/\/ Otherwise, use the Google provider.\n\tt, err := readToken(h.provider)\n\tif err != nil {\n\t\tt, err = h.opts.authHandler(&googleAuthConfig)\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to obtain access token for %q: %#v\", h.provider, err)\n\t}\n\tcache := &cachedTokenSource{\n\t\tsrc: googleAuthConfig.TokenSource(context.Background(), t),\n\t\tprovider: h.provider,\n\t\tconfig: &googleAuthConfig,\n\t}\n\treturn oauth2.ReuseTokenSource(nil, cache), nil\n}\n\nfunc readToken(provider string) (*oauth2.Token, error) {\n\tl, err := tokenLocation(provider)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb, err := ioutil.ReadFile(l)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt := &oauth2.Token{}\n\treturn t, json.Unmarshal(b, t)\n}\n\n\/\/ writeToken serializes token tok to local disk.\nfunc writeToken(provider string, tok *oauth2.Token) error {\n\tl, err := tokenLocation(provider)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw, err := os.Create(l)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer w.Close()\n\tb, err := json.MarshalIndent(tok, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(b)\n\treturn err\n}\n\n\/\/ tokenLocation returns a local file path, suitable for storing user credentials.\n\/\/ dtc: 3\/11\/2022\n\/\/ copy cred file to \/tmp so AppEngine Flex can read\/write\nfunc tokenLocation(provider string) (string, error) {\n\tfilename := provider+\"-cred.json\"\n\n\t\/\/ only copy if \/tmp\/goog-cred.json does not exist\n\tif exists, _ := FileExists(path.Join(\"\/tmp\", filename)); !exists {\n\t d := homedir()\n\t if d == \"\" {\n\t\t log.Printf(\"WARNING: unable to identify user home dir\")\n\t }\n\t d = path.Join(d, \".config\", \"claat\")\n\t if err := os.MkdirAll(d, 0700); err != nil {\n\t\t return \"\", err\n\t }\n\n\t if exists, _ = FileExists(path.Join(d, filename)); exists {\n\t \/\/ copy file to \/tmp\n\t bytesRead, err := ioutil.ReadFile(path.Join(d, filename))\n\t if err != nil {\n\t log.Fatal(err)\n\t }\n\t err = ioutil.WriteFile(path.Join(\"\/tmp\", filename), bytesRead, 0666)\n\t if err != nil {\n\t log.Fatal(err)\n\t }\n\t }\n\t}\n\n\treturn path.Join(\"\/tmp\", filename), nil\n}\n\nfunc FileExists(name string) (bool, error) {\n _, err := os.Stat(name)\n if err == nil {\n return true, nil\n }\n if errors.Is(err, os.ErrNotExist) {\n return false, nil\n }\n return false, err\n}\n\nfunc homedir() string {\n\tif v := os.Getenv(\"HOME\"); v != \"\" {\n\t\treturn v\n\t}\n\td, p := os.Getenv(\"HOMEDRIVE\"), os.Getenv(\"HOMEPATH\")\n\tif d != \"\" && p != \"\" {\n\t\treturn d + p\n\t}\n\treturn os.Getenv(\"USERPROFILE\")\n}\n\n\/\/ cachedTokenSource stores tokens returned from src on local disk.\n\/\/ It is usually combined with oauth2.ReuseTokenSource.\ntype cachedTokenSource struct {\n\tsrc oauth2.TokenSource\n\tprovider string\n\tconfig *oauth2.Config\n}\n\nfunc (c *cachedTokenSource) Token() (*oauth2.Token, error) {\n\tt, err := c.src.Token()\n\tif err != nil {\n\t\tt, err = authorize(c.config)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = writeToken(c.provider, t)\n\tif err != nil {\n\t\t\/\/ AppEngine debug\n\t log.Printf(\"Can't writeToken [%#v]\", err)\n\t}\n\n\treturn t, nil\n}\n\n\/\/ authorize performs user authorization flow, asking for permissions grant.\nfunc authorize(conf *oauth2.Config) (*oauth2.Token, error) {\n\taurl := conf.AuthCodeURL(\"unused\", oauth2.AccessTypeOffline)\n\tfmt.Printf(\"Authorize me at following URL, please:\\n\\n%s\\n\\nCode: \", aurl)\n\tvar code string\n\tif _, err := fmt.Scan(&code); err != nil {\n\t\treturn nil, err\n\t}\n\treturn conf.Exchange(context.Background(), code)\n}\n<commit_msg>Add logging for file copy<commit_after>\/\/ Copyright 2016-2019 Google LLC. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\npackage auth\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/oauth2\/jwt\"\n)\n\nconst (\n\t\/\/ auth scopes needed by the program\n\tscopeDriveReadOnly = \"https:\/\/www.googleapis.com\/auth\/drive.readonly\"\n\n\t\/\/ program credentials for installed apps\n\tgoogClient = \"183908478743-e8rth9fbo7juk9eeivgp23asnt791g63.apps.googleusercontent.com\"\n\tgoogSecret = \"ljELuf5jUrzcOxZGL7OQfkIC\"\n\n\t\/\/ token providers\n\tProviderGoogle = \"goog\"\n\n\t\/\/ service account creds (plan B is to use a SA like GitWhisperer)\n\tqwiklabsServiceAccountCreds = \"qwiklabs-services-prod-e569bfaea3cd.json\"\n)\n\nvar (\n\tgoogleAuthConfig = oauth2.Config{\n\t\tClientID: googClient,\n\t\tClientSecret: googSecret,\n\t\tScopes: []string{scopeDriveReadOnly},\n\t\tRedirectURL: \"urn:ietf:wg:oauth:2.0:oob\",\n\t\tEndpoint: oauth2.Endpoint{\n\t\t\tAuthURL: \"https:\/\/accounts.google.com\/o\/oauth2\/auth\",\n\t\t\tTokenURL: \"https:\/\/accounts.google.com\/o\/oauth2\/token\",\n\t\t},\n\t}\n)\n\ntype authorizationHandler func(conf *oauth2.Config) (*oauth2.Token, error)\n\ntype internalOptions struct {\n\tauthHandler authorizationHandler\n}\n\ntype Helper struct {\n\tauthToken string\n\tprovider string\n\tclient *http.Client\n\topts internalOptions\n}\n\nfunc NewHelper(at, p string, rt http.RoundTripper) (*Helper, error) {\n\treturn newHelper(at, p, rt, internalOptions{\n\t\tauthHandler: authorize,\n\t})\n}\n\nfunc newHelper(at, p string, rt http.RoundTripper, io internalOptions) (*Helper, error) {\n\th := Helper{\n\t\tauthToken: at,\n\t\tprovider: p,\n\t\topts: io,\n\t}\n\n\tvar err error\n\th.client, err = h.produceDriveClient(rt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &h, nil\n}\n\nfunc (h *Helper) DriveClient() *http.Client {\n\treturn h.client\n}\n\nfunc (h *Helper) produceDriveClient(rt http.RoundTripper) (*http.Client, error) {\n\tts, err := h.tokenSource()\n\t\/\/ ts, err := h.tokenSourceServiceAccount()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif rt == nil {\n\t\trt = http.DefaultTransport\n\t}\n\n\treturn &http.Client{\n\t\tTransport: &oauth2.Transport{\n\t\t\tSource: ts,\n\t\t\tBase: rt,\n\t\t},\n\t}, nil\n}\n\n\/\/ Plan B\n\/\/ Creates a new oauth2.TokenSource from SA\n\/\/ Use the same SA as GitWhisperer\nfunc (h *Helper) tokenSourceServiceAccount() (oauth2.TokenSource, error) {\n l, err := tokenServiceAccountLocation()\n if err != nil {\n\t return nil, err\n }\n\n b, err := ioutil.ReadFile(l)\n if err != nil {\n\t return nil, err\n }\n\n var c = struct {\n Email string `json:\"client_email\"`\n PrivateKey string `json:\"private_key\"`\n TokenUri string `json:\"token_uri\"`\n }{}\n json.Unmarshal(b, &c)\n\n config := &jwt.Config{\n Email: c.Email,\n PrivateKey: []byte(c.PrivateKey),\n\tScopes: []string{scopeDriveReadOnly},\n TokenURL: c.TokenUri,\n }\n\n t := config.TokenSource(oauth2.NoContext)\n\n log.Printf(\"Plan B: attrs\\n%s, %s, %s\", c.Email, c.PrivateKey, c.TokenUri)\n\n token, err := t.Token()\n log.Printf(\"Plan B: token\\n%s\", token)\n\n return t, nil\n}\n\nfunc tokenServiceAccountLocation() (string, error) {\n\td := homedir()\n\tif d == \"\" {\n\t\tlog.Printf(\"WARNING: unable to identify user home dir\")\n\t}\n\td = path.Join(d, \".config\", \"claat\")\n\tif err := os.MkdirAll(d, 0700); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn path.Join(d, qwiklabsServiceAccountCreds), nil\n}\n\n\n\/\/ tokenSource creates a new oauth2.TokenSource backed by tokenRefresher,\n\/\/ using previously stored user credentials if available.\n\/\/ If authToken is not given at Helper init, we use the Google provider.\n\/\/ Otherwise, we use the auth config for the given provider.\nfunc (h *Helper) tokenSource() (oauth2.TokenSource, error) {\n\t\/\/ Create a static token source if we have an auth token.\n\tif h.authToken != \"\" {\n\t\treturn oauth2.StaticTokenSource(&oauth2.Token{\n\t\t\tAccessToken: h.authToken,\n\t\t}), nil\n\t}\n\n\t\/\/ Otherwise, use the Google provider.\n\tt, err := readToken(h.provider)\n\tif err != nil {\n\t\tt, err = h.opts.authHandler(&googleAuthConfig)\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to obtain access token for %q: %#v\", h.provider, err)\n\t}\n\tcache := &cachedTokenSource{\n\t\tsrc: googleAuthConfig.TokenSource(context.Background(), t),\n\t\tprovider: h.provider,\n\t\tconfig: &googleAuthConfig,\n\t}\n\treturn oauth2.ReuseTokenSource(nil, cache), nil\n}\n\nfunc readToken(provider string) (*oauth2.Token, error) {\n\tl, err := tokenLocation(provider)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb, err := ioutil.ReadFile(l)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt := &oauth2.Token{}\n\treturn t, json.Unmarshal(b, t)\n}\n\n\/\/ writeToken serializes token tok to local disk.\nfunc writeToken(provider string, tok *oauth2.Token) error {\n\tl, err := tokenLocation(provider)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw, err := os.Create(l)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer w.Close()\n\tb, err := json.MarshalIndent(tok, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(b)\n\treturn err\n}\n\n\/\/ tokenLocation returns a local file path, suitable for storing user credentials.\n\/\/ dtc: 3\/11\/2022\n\/\/ copy cred file to \/tmp so AppEngine Flex can read\/write\nfunc tokenLocation(provider string) (string, error) {\n\tfilename := provider+\"-cred.json\"\n\n\t\/\/ only copy if \/tmp\/goog-cred.json does not exist\n\tif exists, _ := FileExists(path.Join(\"\/tmp\", filename)); !exists {\n\t d := homedir()\n\t if d == \"\" {\n\t\t log.Printf(\"WARNING: unable to identify user home dir\")\n\t }\n\t d = path.Join(d, \".config\", \"claat\")\n\t if err := os.MkdirAll(d, 0700); err != nil {\n\t\t return \"\", err\n\t }\n\n\t if exists, _ = FileExists(path.Join(d, filename)); exists {\n\t \/\/ copy file to \/tmp\n\t bytesRead, err := ioutil.ReadFile(path.Join(d, filename))\n\t if err != nil {\n\t log.Fatal(err)\n\t }\n\t err = ioutil.WriteFile(path.Join(\"\/tmp\", filename), bytesRead, 0666)\n\t if err != nil {\n\t log.Fatal(err)\n\t }\n\t log.Printf(\"tokenLocation: config file copied!\")\n\t }\n\t}\n\n\treturn path.Join(\"\/tmp\", filename), nil\n}\n\nfunc FileExists(name string) (bool, error) {\n _, err := os.Stat(name)\n if err == nil {\n return true, nil\n }\n if errors.Is(err, os.ErrNotExist) {\n return false, nil\n }\n return false, err\n}\n\nfunc homedir() string {\n\tif v := os.Getenv(\"HOME\"); v != \"\" {\n\t\treturn v\n\t}\n\td, p := os.Getenv(\"HOMEDRIVE\"), os.Getenv(\"HOMEPATH\")\n\tif d != \"\" && p != \"\" {\n\t\treturn d + p\n\t}\n\treturn os.Getenv(\"USERPROFILE\")\n}\n\n\/\/ cachedTokenSource stores tokens returned from src on local disk.\n\/\/ It is usually combined with oauth2.ReuseTokenSource.\ntype cachedTokenSource struct {\n\tsrc oauth2.TokenSource\n\tprovider string\n\tconfig *oauth2.Config\n}\n\nfunc (c *cachedTokenSource) Token() (*oauth2.Token, error) {\n\tt, err := c.src.Token()\n\tif err != nil {\n\t\tt, err = authorize(c.config)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = writeToken(c.provider, t)\n\tif err != nil {\n\t\t\/\/ AppEngine debug\n\t log.Printf(\"Can't writeToken [%#v]\", err)\n\t}\n\n\treturn t, nil\n}\n\n\/\/ authorize performs user authorization flow, asking for permissions grant.\nfunc authorize(conf *oauth2.Config) (*oauth2.Token, error) {\n\taurl := conf.AuthCodeURL(\"unused\", oauth2.AccessTypeOffline)\n\tfmt.Printf(\"Authorize me at following URL, please:\\n\\n%s\\n\\nCode: \", aurl)\n\tvar code string\n\tif _, err := fmt.Scan(&code); err != nil {\n\t\treturn nil, err\n\t}\n\treturn conf.Exchange(context.Background(), code)\n}\n<|endoftext|>"} {"text":"<commit_before>package vault\n\nimport (\n\t\"crypto\/ecdsa\"\n\t\"crypto\/elliptic\"\n\t\"crypto\/rand\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/SermoDigital\/jose\/crypto\"\n\t\"github.com\/SermoDigital\/jose\/jws\"\n\t\"github.com\/SermoDigital\/jose\/jwt\"\n\t\"github.com\/hashicorp\/errwrap\"\n\t\"github.com\/hashicorp\/vault\/helper\/jsonutil\"\n\t\"github.com\/hashicorp\/vault\/logical\"\n)\n\nconst (\n\t\/\/ The location of the key used to generate response-wrapping JWTs\n\tcoreWrappingJWTKeyPath = \"core\/wrapping\/jwtkey\"\n)\n\nfunc (c *Core) ensureWrappingKey() error {\n\tentry, err := c.barrier.Get(coreWrappingJWTKeyPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar keyParams clusterKeyParams\n\n\tif entry == nil {\n\t\tkey, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)\n\t\tif err != nil {\n\t\t\treturn errwrap.Wrapf(\"failed to generate wrapping key: {{err}}\", err)\n\t\t}\n\t\tkeyParams.D = key.D\n\t\tkeyParams.X = key.X\n\t\tkeyParams.Y = key.Y\n\t\tkeyParams.Type = corePrivateKeyTypeP521\n\t\tval, err := jsonutil.EncodeJSON(keyParams)\n\t\tif err != nil {\n\t\t\treturn errwrap.Wrapf(\"failed to encode wrapping key: {{err}}\", err)\n\t\t}\n\t\tentry = &Entry{\n\t\t\tKey: coreWrappingJWTKeyPath,\n\t\t\tValue: val,\n\t\t}\n\t\tif err = c.barrier.Put(entry); err != nil {\n\t\t\treturn errwrap.Wrapf(\"failed to store wrapping key: {{err}}\", err)\n\t\t}\n\t}\n\n\t\/\/ Redundant if we just created it, but in this case serves as a check anyways\n\tif err = jsonutil.DecodeJSON(entry.Value, &keyParams); err != nil {\n\t\treturn errwrap.Wrapf(\"failed to decode wrapping key parameters: {{err}}\", err)\n\t}\n\n\tc.wrappingJWTKey = &ecdsa.PrivateKey{\n\t\tPublicKey: ecdsa.PublicKey{\n\t\t\tCurve: elliptic.P521(),\n\t\t\tX: keyParams.X,\n\t\t\tY: keyParams.Y,\n\t\t},\n\t\tD: keyParams.D,\n\t}\n\n\tc.logger.Info(\"core: loaded wrapping token key\")\n\n\treturn nil\n}\n\nfunc (c *Core) wrapInCubbyhole(req *logical.Request, resp *logical.Response) (*logical.Response, error) {\n\t\/\/ Before wrapping, obey special rules for listing: if no entries are\n\t\/\/ found, 404. This prevents unwrapping only to find empty data.\n\tif req.Operation == logical.ListOperation {\n\t\tif resp == nil || len(resp.Data) == 0 {\n\t\t\treturn nil, logical.ErrUnsupportedPath\n\t\t}\n\t\tkeysRaw, ok := resp.Data[\"keys\"]\n\t\tif !ok || keysRaw == nil {\n\t\t\treturn nil, logical.ErrUnsupportedPath\n\t\t}\n\t\tkeys, ok := keysRaw.([]string)\n\t\tif !ok {\n\t\t\treturn nil, logical.ErrUnsupportedPath\n\t\t}\n\t\tif len(keys) == 0 {\n\t\t\treturn nil, logical.ErrUnsupportedPath\n\t\t}\n\t}\n\n\tvar err error\n\n\t\/\/ If we are wrapping, the first part (performed in this functions) happens\n\t\/\/ before auditing so that resp.WrapInfo.Token can contain the HMAC'd\n\t\/\/ wrapping token ID in the audit logs, so that it can be determined from\n\t\/\/ the audit logs whether the token was ever actually used.\n\tcreationTime := time.Now()\n\tte := TokenEntry{\n\t\tPath: req.Path,\n\t\tPolicies: []string{\"response-wrapping\"},\n\t\tCreationTime: creationTime.Unix(),\n\t\tTTL: resp.WrapInfo.TTL,\n\t\tNumUses: 1,\n\t\tExplicitMaxTTL: resp.WrapInfo.TTL,\n\t}\n\n\tif err := c.tokenStore.create(&te); err != nil {\n\t\tc.logger.Error(\"core: failed to create wrapping token\", \"error\", err)\n\t\treturn nil, ErrInternalError\n\t}\n\n\tresp.WrapInfo.Token = te.ID\n\tresp.WrapInfo.CreationTime = creationTime\n\n\t\/\/ This will only be non-nil if this response contains a token, so in that\n\t\/\/ case put the accessor in the wrap info.\n\tif resp.Auth != nil {\n\t\tresp.WrapInfo.WrappedAccessor = resp.Auth.Accessor\n\t}\n\n\tswitch resp.WrapInfo.Format {\n\tcase \"jwt\":\n\t\t\/\/ Create the JWT\n\t\tclaims := jws.Claims{}\n\t\t\/\/ Map the JWT ID to the token ID for ease ofuse\n\t\tclaims.SetJWTID(te.ID)\n\t\t\/\/ Set the issue time to the creation time\n\t\tclaims.SetIssuedAt(creationTime)\n\t\t\/\/ Set the expiration to the TTL\n\t\tclaims.SetExpiration(creationTime.Add(resp.WrapInfo.TTL))\n\t\tif resp.Auth != nil {\n\t\t\tclaims.Set(\"accessor\", resp.Auth.Accessor)\n\t\t}\n\t\tclaims.Set(\"type\", \"wrapping\")\n\t\tclaims.Set(\"addr\", c.redirectAddr)\n\t\tjwt := jws.NewJWT(claims, crypto.SigningMethodES512)\n\t\tserWebToken, err := jwt.Serialize(c.wrappingJWTKey)\n\t\tif err != nil {\n\t\t\tc.logger.Error(\"core: failed to serialize JWT\", \"error\", err)\n\t\t\treturn nil, ErrInternalError\n\t\t}\n\t\tresp.WrapInfo.Token = string(serWebToken)\n\t}\n\n\tcubbyReq := &logical.Request{\n\t\tOperation: logical.CreateOperation,\n\t\tPath: \"cubbyhole\/response\",\n\t\tClientToken: te.ID,\n\t}\n\n\t\/\/ During a rewrap, store the original response, don't wrap it again.\n\tif req.Path == \"sys\/wrapping\/rewrap\" {\n\t\tcubbyReq.Data = map[string]interface{}{\n\t\t\t\"response\": resp.Data[\"response\"],\n\t\t}\n\t} else {\n\t\thttpResponse := logical.LogicalResponseToHTTPResponse(resp)\n\n\t\t\/\/ Add the unique identifier of the original request to the response\n\t\thttpResponse.RequestID = req.ID\n\n\t\t\/\/ Because of the way that JSON encodes (likely just in Go) we actually get\n\t\t\/\/ mixed-up values for ints if we simply put this object in the response\n\t\t\/\/ and encode the whole thing; so instead we marshal it first, then store\n\t\t\/\/ the string response. This actually ends up making it easier on the\n\t\t\/\/ client side, too, as it becomes a straight read-string-pass-to-unmarshal\n\t\t\/\/ operation.\n\n\t\tmarshaledResponse, err := json.Marshal(httpResponse)\n\t\tif err != nil {\n\t\t\tc.logger.Error(\"core: failed to marshal wrapped response\", \"error\", err)\n\t\t\treturn nil, ErrInternalError\n\t\t}\n\n\t\tcubbyReq.Data = map[string]interface{}{\n\t\t\t\"response\": string(marshaledResponse),\n\t\t}\n\t}\n\n\tcubbyResp, err := c.router.Route(cubbyReq)\n\tif err != nil {\n\t\t\/\/ Revoke since it's not yet being tracked for expiration\n\t\tc.tokenStore.Revoke(te.ID)\n\t\tc.logger.Error(\"core: failed to store wrapped response information\", \"error\", err)\n\t\treturn nil, ErrInternalError\n\t}\n\tif cubbyResp != nil && cubbyResp.IsError() {\n\t\tc.tokenStore.Revoke(te.ID)\n\t\tc.logger.Error(\"core: failed to store wrapped response information\", \"error\", cubbyResp.Data[\"error\"])\n\t\treturn cubbyResp, nil\n\t}\n\n\t\/\/ Store info for lookup\n\tcubbyReq.Path = \"cubbyhole\/wrapinfo\"\n\tcubbyReq.Data = map[string]interface{}{\n\t\t\"creation_ttl\": resp.WrapInfo.TTL,\n\t\t\"creation_time\": creationTime,\n\t}\n\tcubbyResp, err = c.router.Route(cubbyReq)\n\tif err != nil {\n\t\t\/\/ Revoke since it's not yet being tracked for expiration\n\t\tc.tokenStore.Revoke(te.ID)\n\t\tc.logger.Error(\"core: failed to store wrapping information\", \"error\", err)\n\t\treturn nil, ErrInternalError\n\t}\n\tif cubbyResp != nil && cubbyResp.IsError() {\n\t\tc.tokenStore.Revoke(te.ID)\n\t\tc.logger.Error(\"core: failed to store wrapping information\", \"error\", cubbyResp.Data[\"error\"])\n\t\treturn cubbyResp, nil\n\t}\n\n\tauth := &logical.Auth{\n\t\tClientToken: te.ID,\n\t\tPolicies: []string{\"response-wrapping\"},\n\t\tLeaseOptions: logical.LeaseOptions{\n\t\t\tTTL: te.TTL,\n\t\t\tRenewable: false,\n\t\t},\n\t}\n\n\t\/\/ Register the wrapped token with the expiration manager\n\tif err := c.expiration.RegisterAuth(te.Path, auth); err != nil {\n\t\t\/\/ Revoke since it's not yet being tracked for expiration\n\t\tc.tokenStore.Revoke(te.ID)\n\t\tc.logger.Error(\"core: failed to register cubbyhole wrapping token lease\", \"request_path\", req.Path, \"error\", err)\n\t\treturn nil, ErrInternalError\n\t}\n\n\treturn nil, nil\n}\n\nfunc (c *Core) ValidateWrappingToken(req *logical.Request) (bool, error) {\n\tif req == nil {\n\t\treturn false, fmt.Errorf(\"invalid request\")\n\t}\n\n\tvar err error\n\n\tvar token string\n\tif req.Data != nil && req.Data[\"token\"] != nil {\n\t\tif tokenStr, ok := req.Data[\"token\"].(string); !ok {\n\t\t\treturn false, fmt.Errorf(\"could not decode token in request body\")\n\t\t} else if tokenStr == \"\" {\n\t\t\treturn false, fmt.Errorf(\"empty token in request body\")\n\t\t} else {\n\t\t\ttoken = tokenStr\n\t\t}\n\t} else {\n\t\ttoken = req.ClientToken\n\t}\n\n\t\/\/ Check for it being a JWT. If it is, and it is valid, we extract the\n\t\/\/ internal client token from it and use that during lookup.\n\tif strings.Count(token, \".\") == 2 {\n\t\twt, err := jws.ParseJWT([]byte(token))\n\t\t\/\/ If there's an error we simply fall back to attempting to use it as a regular token\n\t\tif err == nil && wt != nil {\n\t\t\tvalidator := &jwt.Validator{}\n\t\t\tvalidator.SetClaim(\"type\", \"wrapping\")\n\t\t\tif err = wt.Validate(&c.wrappingJWTKey.PublicKey, crypto.SigningMethodES512, []*jwt.Validator{validator}...); err != nil {\n\t\t\t\treturn false, errwrap.Wrapf(\"wrapping token signature could not be validated: {{err}}\", err)\n\t\t\t}\n\t\t\ttoken, _ = wt.Claims().JWTID()\n\t\t}\n\t}\n\n\tif token == \"\" {\n\t\treturn false, fmt.Errorf(\"token is empty\")\n\t}\n\n\tc.stateLock.RLock()\n\tdefer c.stateLock.RUnlock()\n\tif c.sealed {\n\t\treturn nil, ErrSealed\n\t}\n\tif c.standby {\n\t\treturn nil, ErrStandby\n\t}\n\n\tte, err := c.tokenStore.Lookup(token)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif te == nil {\n\t\treturn false, nil\n\t}\n\n\tif len(te.Policies) != 1 {\n\t\treturn false, nil\n\t}\n\n\tif te.Policies[0] != responseWrappingPolicyName {\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}\n\n\/\/ parseVaultTokenFromJWT returns a string iff the token was a JWT and we could\n\/\/ extract the original token ID from inside\nfunc (c *Core) parseVaultTokenFromJWT(token string) *string {\n\tvar result string\n\tif strings.Count(token, \".\") != 2 {\n\t\treturn nil\n\t}\n\n\twt, err := jws.ParseJWT([]byte(token))\n\tif err != nil || wt == nil {\n\t\treturn nil\n\t}\n\n\tresult, _ = wt.Claims().JWTID()\n\n\treturn &result\n}\n<commit_msg>Fixed return types<commit_after>package vault\n\nimport (\n\t\"crypto\/ecdsa\"\n\t\"crypto\/elliptic\"\n\t\"crypto\/rand\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/SermoDigital\/jose\/crypto\"\n\t\"github.com\/SermoDigital\/jose\/jws\"\n\t\"github.com\/SermoDigital\/jose\/jwt\"\n\t\"github.com\/hashicorp\/errwrap\"\n\t\"github.com\/hashicorp\/vault\/helper\/jsonutil\"\n\t\"github.com\/hashicorp\/vault\/logical\"\n)\n\nconst (\n\t\/\/ The location of the key used to generate response-wrapping JWTs\n\tcoreWrappingJWTKeyPath = \"core\/wrapping\/jwtkey\"\n)\n\nfunc (c *Core) ensureWrappingKey() error {\n\tentry, err := c.barrier.Get(coreWrappingJWTKeyPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar keyParams clusterKeyParams\n\n\tif entry == nil {\n\t\tkey, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)\n\t\tif err != nil {\n\t\t\treturn errwrap.Wrapf(\"failed to generate wrapping key: {{err}}\", err)\n\t\t}\n\t\tkeyParams.D = key.D\n\t\tkeyParams.X = key.X\n\t\tkeyParams.Y = key.Y\n\t\tkeyParams.Type = corePrivateKeyTypeP521\n\t\tval, err := jsonutil.EncodeJSON(keyParams)\n\t\tif err != nil {\n\t\t\treturn errwrap.Wrapf(\"failed to encode wrapping key: {{err}}\", err)\n\t\t}\n\t\tentry = &Entry{\n\t\t\tKey: coreWrappingJWTKeyPath,\n\t\t\tValue: val,\n\t\t}\n\t\tif err = c.barrier.Put(entry); err != nil {\n\t\t\treturn errwrap.Wrapf(\"failed to store wrapping key: {{err}}\", err)\n\t\t}\n\t}\n\n\t\/\/ Redundant if we just created it, but in this case serves as a check anyways\n\tif err = jsonutil.DecodeJSON(entry.Value, &keyParams); err != nil {\n\t\treturn errwrap.Wrapf(\"failed to decode wrapping key parameters: {{err}}\", err)\n\t}\n\n\tc.wrappingJWTKey = &ecdsa.PrivateKey{\n\t\tPublicKey: ecdsa.PublicKey{\n\t\t\tCurve: elliptic.P521(),\n\t\t\tX: keyParams.X,\n\t\t\tY: keyParams.Y,\n\t\t},\n\t\tD: keyParams.D,\n\t}\n\n\tc.logger.Info(\"core: loaded wrapping token key\")\n\n\treturn nil\n}\n\nfunc (c *Core) wrapInCubbyhole(req *logical.Request, resp *logical.Response) (*logical.Response, error) {\n\t\/\/ Before wrapping, obey special rules for listing: if no entries are\n\t\/\/ found, 404. This prevents unwrapping only to find empty data.\n\tif req.Operation == logical.ListOperation {\n\t\tif resp == nil || len(resp.Data) == 0 {\n\t\t\treturn nil, logical.ErrUnsupportedPath\n\t\t}\n\t\tkeysRaw, ok := resp.Data[\"keys\"]\n\t\tif !ok || keysRaw == nil {\n\t\t\treturn nil, logical.ErrUnsupportedPath\n\t\t}\n\t\tkeys, ok := keysRaw.([]string)\n\t\tif !ok {\n\t\t\treturn nil, logical.ErrUnsupportedPath\n\t\t}\n\t\tif len(keys) == 0 {\n\t\t\treturn nil, logical.ErrUnsupportedPath\n\t\t}\n\t}\n\n\tvar err error\n\n\t\/\/ If we are wrapping, the first part (performed in this functions) happens\n\t\/\/ before auditing so that resp.WrapInfo.Token can contain the HMAC'd\n\t\/\/ wrapping token ID in the audit logs, so that it can be determined from\n\t\/\/ the audit logs whether the token was ever actually used.\n\tcreationTime := time.Now()\n\tte := TokenEntry{\n\t\tPath: req.Path,\n\t\tPolicies: []string{\"response-wrapping\"},\n\t\tCreationTime: creationTime.Unix(),\n\t\tTTL: resp.WrapInfo.TTL,\n\t\tNumUses: 1,\n\t\tExplicitMaxTTL: resp.WrapInfo.TTL,\n\t}\n\n\tif err := c.tokenStore.create(&te); err != nil {\n\t\tc.logger.Error(\"core: failed to create wrapping token\", \"error\", err)\n\t\treturn nil, ErrInternalError\n\t}\n\n\tresp.WrapInfo.Token = te.ID\n\tresp.WrapInfo.CreationTime = creationTime\n\n\t\/\/ This will only be non-nil if this response contains a token, so in that\n\t\/\/ case put the accessor in the wrap info.\n\tif resp.Auth != nil {\n\t\tresp.WrapInfo.WrappedAccessor = resp.Auth.Accessor\n\t}\n\n\tswitch resp.WrapInfo.Format {\n\tcase \"jwt\":\n\t\t\/\/ Create the JWT\n\t\tclaims := jws.Claims{}\n\t\t\/\/ Map the JWT ID to the token ID for ease ofuse\n\t\tclaims.SetJWTID(te.ID)\n\t\t\/\/ Set the issue time to the creation time\n\t\tclaims.SetIssuedAt(creationTime)\n\t\t\/\/ Set the expiration to the TTL\n\t\tclaims.SetExpiration(creationTime.Add(resp.WrapInfo.TTL))\n\t\tif resp.Auth != nil {\n\t\t\tclaims.Set(\"accessor\", resp.Auth.Accessor)\n\t\t}\n\t\tclaims.Set(\"type\", \"wrapping\")\n\t\tclaims.Set(\"addr\", c.redirectAddr)\n\t\tjwt := jws.NewJWT(claims, crypto.SigningMethodES512)\n\t\tserWebToken, err := jwt.Serialize(c.wrappingJWTKey)\n\t\tif err != nil {\n\t\t\tc.logger.Error(\"core: failed to serialize JWT\", \"error\", err)\n\t\t\treturn nil, ErrInternalError\n\t\t}\n\t\tresp.WrapInfo.Token = string(serWebToken)\n\t}\n\n\tcubbyReq := &logical.Request{\n\t\tOperation: logical.CreateOperation,\n\t\tPath: \"cubbyhole\/response\",\n\t\tClientToken: te.ID,\n\t}\n\n\t\/\/ During a rewrap, store the original response, don't wrap it again.\n\tif req.Path == \"sys\/wrapping\/rewrap\" {\n\t\tcubbyReq.Data = map[string]interface{}{\n\t\t\t\"response\": resp.Data[\"response\"],\n\t\t}\n\t} else {\n\t\thttpResponse := logical.LogicalResponseToHTTPResponse(resp)\n\n\t\t\/\/ Add the unique identifier of the original request to the response\n\t\thttpResponse.RequestID = req.ID\n\n\t\t\/\/ Because of the way that JSON encodes (likely just in Go) we actually get\n\t\t\/\/ mixed-up values for ints if we simply put this object in the response\n\t\t\/\/ and encode the whole thing; so instead we marshal it first, then store\n\t\t\/\/ the string response. This actually ends up making it easier on the\n\t\t\/\/ client side, too, as it becomes a straight read-string-pass-to-unmarshal\n\t\t\/\/ operation.\n\n\t\tmarshaledResponse, err := json.Marshal(httpResponse)\n\t\tif err != nil {\n\t\t\tc.logger.Error(\"core: failed to marshal wrapped response\", \"error\", err)\n\t\t\treturn nil, ErrInternalError\n\t\t}\n\n\t\tcubbyReq.Data = map[string]interface{}{\n\t\t\t\"response\": string(marshaledResponse),\n\t\t}\n\t}\n\n\tcubbyResp, err := c.router.Route(cubbyReq)\n\tif err != nil {\n\t\t\/\/ Revoke since it's not yet being tracked for expiration\n\t\tc.tokenStore.Revoke(te.ID)\n\t\tc.logger.Error(\"core: failed to store wrapped response information\", \"error\", err)\n\t\treturn nil, ErrInternalError\n\t}\n\tif cubbyResp != nil && cubbyResp.IsError() {\n\t\tc.tokenStore.Revoke(te.ID)\n\t\tc.logger.Error(\"core: failed to store wrapped response information\", \"error\", cubbyResp.Data[\"error\"])\n\t\treturn cubbyResp, nil\n\t}\n\n\t\/\/ Store info for lookup\n\tcubbyReq.Path = \"cubbyhole\/wrapinfo\"\n\tcubbyReq.Data = map[string]interface{}{\n\t\t\"creation_ttl\": resp.WrapInfo.TTL,\n\t\t\"creation_time\": creationTime,\n\t}\n\tcubbyResp, err = c.router.Route(cubbyReq)\n\tif err != nil {\n\t\t\/\/ Revoke since it's not yet being tracked for expiration\n\t\tc.tokenStore.Revoke(te.ID)\n\t\tc.logger.Error(\"core: failed to store wrapping information\", \"error\", err)\n\t\treturn nil, ErrInternalError\n\t}\n\tif cubbyResp != nil && cubbyResp.IsError() {\n\t\tc.tokenStore.Revoke(te.ID)\n\t\tc.logger.Error(\"core: failed to store wrapping information\", \"error\", cubbyResp.Data[\"error\"])\n\t\treturn cubbyResp, nil\n\t}\n\n\tauth := &logical.Auth{\n\t\tClientToken: te.ID,\n\t\tPolicies: []string{\"response-wrapping\"},\n\t\tLeaseOptions: logical.LeaseOptions{\n\t\t\tTTL: te.TTL,\n\t\t\tRenewable: false,\n\t\t},\n\t}\n\n\t\/\/ Register the wrapped token with the expiration manager\n\tif err := c.expiration.RegisterAuth(te.Path, auth); err != nil {\n\t\t\/\/ Revoke since it's not yet being tracked for expiration\n\t\tc.tokenStore.Revoke(te.ID)\n\t\tc.logger.Error(\"core: failed to register cubbyhole wrapping token lease\", \"request_path\", req.Path, \"error\", err)\n\t\treturn nil, ErrInternalError\n\t}\n\n\treturn nil, nil\n}\n\nfunc (c *Core) ValidateWrappingToken(req *logical.Request) (bool, error) {\n\tif req == nil {\n\t\treturn false, fmt.Errorf(\"invalid request\")\n\t}\n\n\tvar err error\n\n\tvar token string\n\tif req.Data != nil && req.Data[\"token\"] != nil {\n\t\tif tokenStr, ok := req.Data[\"token\"].(string); !ok {\n\t\t\treturn false, fmt.Errorf(\"could not decode token in request body\")\n\t\t} else if tokenStr == \"\" {\n\t\t\treturn false, fmt.Errorf(\"empty token in request body\")\n\t\t} else {\n\t\t\ttoken = tokenStr\n\t\t}\n\t} else {\n\t\ttoken = req.ClientToken\n\t}\n\n\t\/\/ Check for it being a JWT. If it is, and it is valid, we extract the\n\t\/\/ internal client token from it and use that during lookup.\n\tif strings.Count(token, \".\") == 2 {\n\t\twt, err := jws.ParseJWT([]byte(token))\n\t\t\/\/ If there's an error we simply fall back to attempting to use it as a regular token\n\t\tif err == nil && wt != nil {\n\t\t\tvalidator := &jwt.Validator{}\n\t\t\tvalidator.SetClaim(\"type\", \"wrapping\")\n\t\t\tif err = wt.Validate(&c.wrappingJWTKey.PublicKey, crypto.SigningMethodES512, []*jwt.Validator{validator}...); err != nil {\n\t\t\t\treturn false, errwrap.Wrapf(\"wrapping token signature could not be validated: {{err}}\", err)\n\t\t\t}\n\t\t\ttoken, _ = wt.Claims().JWTID()\n\t\t}\n\t}\n\n\tif token == \"\" {\n\t\treturn false, fmt.Errorf(\"token is empty\")\n\t}\n\n\tc.stateLock.RLock()\n\tdefer c.stateLock.RUnlock()\n\tif c.sealed {\n\t\treturn false, ErrSealed\n\t}\n\tif c.standby {\n\t\treturn false, ErrStandby\n\t}\n\n\tte, err := c.tokenStore.Lookup(token)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif te == nil {\n\t\treturn false, nil\n\t}\n\n\tif len(te.Policies) != 1 {\n\t\treturn false, nil\n\t}\n\n\tif te.Policies[0] != responseWrappingPolicyName {\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}\n\n\/\/ parseVaultTokenFromJWT returns a string iff the token was a JWT and we could\n\/\/ extract the original token ID from inside\nfunc (c *Core) parseVaultTokenFromJWT(token string) *string {\n\tvar result string\n\tif strings.Count(token, \".\") != 2 {\n\t\treturn nil\n\t}\n\n\twt, err := jws.ParseJWT([]byte(token))\n\tif err != nil || wt == nil {\n\t\treturn nil\n\t}\n\n\tresult, _ = wt.Claims().JWTID()\n\n\treturn &result\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build js\n\npackage ui\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/gopherjs\/gopherjs\/js\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/opengl\"\n)\n\nvar canvas *js.Object\n\ntype userInterface struct {\n\tscale float64\n\tdeviceScale float64\n\tsizeChanged bool\n\twindowFocus bool\n}\n\nvar currentUI = &userInterface{\n\tsizeChanged: true,\n\twindowFocus: true,\n}\n\n\/\/ NOTE: This returns true even when the browser is not active.\nfunc shown() bool {\n\treturn !js.Global.Get(\"document\").Get(\"hidden\").Bool()\n}\n\nfunc SetScreenSize(width, height int) bool {\n\treturn currentUI.setScreenSize(width, height, currentUI.scale)\n}\n\nfunc SetScreenScale(scale float64) bool {\n\twidth, height := currentUI.size()\n\treturn currentUI.setScreenSize(width, height, scale)\n}\n\nfunc ScreenScale() float64 {\n\treturn currentUI.scale\n}\n\nfunc SetFullscreen(fullscreen bool) bool {\n\t\/\/ TODO: Implement\n\treturn false\n}\n\nfunc IsFullscreen() bool {\n\t\/\/ TODO: Implement\n\treturn false\n}\n\nfunc ScreenOffset() (float64, float64) {\n\treturn 0, 0\n}\n\nfunc adjustCursorPosition(x, y int) (int, int) {\n\treturn x, y\n}\n\nfunc SetCursorVisibility(visibility bool) {\n\tif visibility {\n\t\tcanvas.Get(\"style\").Set(\"cursor\", \"auto\")\n\t} else {\n\t\tcanvas.Get(\"style\").Set(\"cursor\", \"none\")\n\t}\n}\n\nfunc (u *userInterface) actualScreenScale() float64 {\n\treturn u.scale * u.deviceScale\n}\n\nfunc (u *userInterface) update(g GraphicsContext) error {\n\tif !u.windowFocus {\n\t\treturn nil\n\t}\n\tif opengl.GetContext().IsContextLost() {\n\t\topengl.GetContext().RestoreContext()\n\t\tg.Invalidate()\n\t}\n\tcurrentInput.updateGamepads()\n\tif u.sizeChanged {\n\t\tu.sizeChanged = false\n\t\tw, h := u.size()\n\t\tg.SetSize(w, h, u.actualScreenScale())\n\t\treturn nil\n\t}\n\tif err := g.Update(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (u *userInterface) loop(g GraphicsContext) error {\n\tch := make(chan error)\n\tvar f func()\n\tf = func() {\n\t\tgo func() {\n\t\t\tif err := u.update(g); err != nil {\n\t\t\t\tch <- err\n\t\t\t\tclose(ch)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tjs.Global.Get(\"window\").Call(\"requestAnimationFrame\", f)\n\t\t}()\n\t}\n\tf()\n\treturn <-ch\n}\n\nfunc touchEventToTouches(e *js.Object) []touch {\n\tscale := currentUI.scale\n\tj := e.Get(\"targetTouches\")\n\trect := canvas.Call(\"getBoundingClientRect\")\n\tleft, top := rect.Get(\"left\").Int(), rect.Get(\"top\").Int()\n\tt := make([]touch, j.Get(\"length\").Int())\n\tfor i := 0; i < len(t); i++ {\n\t\tjj := j.Call(\"item\", i)\n\t\tt[i].id = jj.Get(\"identifier\").Int()\n\t\tt[i].x = int(float64(jj.Get(\"clientX\").Int()-left) \/ scale)\n\t\tt[i].y = int(float64(jj.Get(\"clientY\").Int()-top) \/ scale)\n\t}\n\treturn t\n}\n\nfunc init() {\n\tif err := initialize(); err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc initialize() error {\n\t\/\/ Do nothing in node.js.\n\tif js.Global.Get(\"require\") != js.Undefined {\n\t\treturn nil\n\t}\n\n\tdoc := js.Global.Get(\"document\")\n\twindow := js.Global.Get(\"window\")\n\tif doc.Get(\"body\") == nil {\n\t\tch := make(chan struct{})\n\t\twindow.Call(\"addEventListener\", \"load\", func() {\n\t\t\tclose(ch)\n\t\t})\n\t\t<-ch\n\t}\n\twindow.Call(\"addEventListener\", \"focus\", func() {\n\t\tcurrentUI.windowFocus = true\n\t})\n\twindow.Call(\"addEventListener\", \"blur\", func() {\n\t\tcurrentUI.windowFocus = false\n\t})\n\n\t\/\/ Adjust the initial scale to 1.\n\t\/\/ https:\/\/developer.mozilla.org\/en\/docs\/Mozilla\/Mobile\/Viewport_meta_tag\n\tmeta := doc.Call(\"createElement\", \"meta\")\n\tmeta.Set(\"name\", \"viewport\")\n\tmeta.Set(\"content\", \"width=device-width, initial-scale=1\")\n\tdoc.Get(\"body\").Call(\"appendChild\", meta)\n\n\tcanvas = doc.Call(\"createElement\", \"canvas\")\n\tcanvas.Set(\"width\", 16)\n\tcanvas.Set(\"height\", 16)\n\tdoc.Get(\"body\").Call(\"appendChild\", canvas)\n\n\thtmlStyle := doc.Get(\"documentElement\").Get(\"style\")\n\thtmlStyle.Set(\"height\", \"100%\")\n\thtmlStyle.Set(\"margin\", \"0\")\n\thtmlStyle.Set(\"padding\", \"0\")\n\n\tbodyStyle := doc.Get(\"body\").Get(\"style\")\n\tbodyStyle.Set(\"backgroundColor\", \"#000\")\n\tbodyStyle.Set(\"position\", \"relative\")\n\tbodyStyle.Set(\"height\", \"100%\")\n\tbodyStyle.Set(\"margin\", \"0\")\n\tbodyStyle.Set(\"padding\", \"0\")\n\t\/\/ TODO: This is OK as long as the game is in an independent iframe.\n\t\/\/ What if the canvas is embedded in a HTML directly?\n\tdoc.Get(\"body\").Call(\"addEventListener\", \"click\", func() {\n\t\tcanvas.Call(\"focus\")\n\t})\n\n\tcanvasStyle := canvas.Get(\"style\")\n\tcanvasStyle.Set(\"position\", \"absolute\")\n\n\t\/\/ Make the canvas focusable.\n\tcanvas.Call(\"setAttribute\", \"tabindex\", 1)\n\tcanvas.Get(\"style\").Set(\"outline\", \"none\")\n\n\t\/\/ Keyboard\n\tcanvas.Call(\"addEventListener\", \"keydown\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t\tif e.Get(\"code\") == js.Undefined {\n\t\t\t\/\/ Assume that UA is Safari.\n\t\t\tcode := e.Get(\"keyCode\").Int()\n\t\t\tcurrentInput.keyDownSafari(code)\n\t\t\treturn\n\t\t}\n\t\tcode := e.Get(\"code\").String()\n\t\tcurrentInput.keyDown(code)\n\t})\n\tcanvas.Call(\"addEventListener\", \"keyup\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t\tif e.Get(\"code\") == js.Undefined {\n\t\t\t\/\/ Assume that UA is Safari.\n\t\t\tcode := e.Get(\"keyCode\").Int()\n\t\t\tcurrentInput.keyUpSafari(code)\n\t\t}\n\t\tcode := e.Get(\"code\").String()\n\t\tcurrentInput.keyUp(code)\n\t})\n\n\t\/\/ Mouse\n\tcanvas.Call(\"addEventListener\", \"mousedown\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t\tbutton := e.Get(\"button\").Int()\n\t\tcurrentInput.mouseDown(button)\n\t\tsetMouseCursorFromEvent(e)\n\t})\n\tcanvas.Call(\"addEventListener\", \"mouseup\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t\tbutton := e.Get(\"button\").Int()\n\t\tcurrentInput.mouseUp(button)\n\t\tsetMouseCursorFromEvent(e)\n\t})\n\tcanvas.Call(\"addEventListener\", \"mousemove\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t\tsetMouseCursorFromEvent(e)\n\t})\n\tcanvas.Call(\"addEventListener\", \"contextmenu\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t})\n\n\t\/\/ Touch\n\tcanvas.Call(\"addEventListener\", \"touchstart\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t\tcurrentInput.updateTouches(touchEventToTouches(e))\n\t})\n\tcanvas.Call(\"addEventListener\", \"touchend\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t\tcurrentInput.updateTouches(touchEventToTouches(e))\n\t})\n\tcanvas.Call(\"addEventListener\", \"touchmove\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t\tcurrentInput.updateTouches(touchEventToTouches(e))\n\t})\n\n\t\/\/ Gamepad\n\twindow.Call(\"addEventListener\", \"gamepadconnected\", func(e *js.Object) {\n\t\t\/\/ Do nothing.\n\t})\n\n\tcanvas.Call(\"addEventListener\", \"webglcontextlost\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t})\n\tcanvas.Call(\"addEventListener\", \"webglcontextrestored\", func(e *js.Object) {\n\t\t\/\/ Do nothing.\n\t})\n\n\treturn nil\n}\n\nfunc setMouseCursorFromEvent(e *js.Object) {\n\tscale := currentUI.scale\n\trect := canvas.Call(\"getBoundingClientRect\")\n\tx, y := e.Get(\"clientX\").Int(), e.Get(\"clientY\").Int()\n\tx -= rect.Get(\"left\").Int()\n\ty -= rect.Get(\"top\").Int()\n\tcurrentInput.setMouseCursor(int(float64(x)\/scale), int(float64(y)\/scale))\n}\n\nfunc devicePixelRatio() float64 {\n\tratio := js.Global.Get(\"window\").Get(\"devicePixelRatio\").Float()\n\tif ratio == 0 {\n\t\tratio = 1\n\t}\n\treturn ratio\n}\n\nfunc RunMainThreadLoop(ch <-chan error) error {\n\treturn <-ch\n}\n\nfunc Run(width, height int, scale float64, title string, g GraphicsContext) error {\n\tu := currentUI\n\tdoc := js.Global.Get(\"document\")\n\tdoc.Set(\"title\", title)\n\tu.setScreenSize(width, height, scale)\n\tcanvas.Call(\"focus\")\n\tif err := opengl.Init(); err != nil {\n\t\treturn err\n\t}\n\treturn u.loop(g)\n}\n\nfunc (u *userInterface) size() (width, height int) {\n\ta := u.actualScreenScale()\n\tif a == 0 {\n\t\t\/\/ a == 0 only on the initial state.\n\t\treturn\n\t}\n\twidth = int(canvas.Get(\"width\").Float() \/ a)\n\theight = int(canvas.Get(\"height\").Float() \/ a)\n\treturn\n}\n\nfunc isSafari() bool {\n\tua := js.Global.Get(\"navigator\").Get(\"userAgent\").String()\n\tif !strings.Contains(ua, \"Safari\") {\n\t\treturn false\n\t}\n\tif strings.Contains(ua, \"Chrome\") {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (u *userInterface) setScreenSize(width, height int, scale float64) bool {\n\tw, h := u.size()\n\ts := u.scale\n\tif w == width && h == height && s == scale {\n\t\treturn false\n\t}\n\tu.scale = scale\n\t\/\/ CSS imageRendering seems useful to enlarge the screen,\n\t\/\/ but doesn't work in some cases (#306):\n\t\/\/ * Chrome just after restoring the lost context\n\t\/\/ * Safari\n\t\/\/ Let's use the pixel ratio as it is here.\n\tu.deviceScale = devicePixelRatio()\n\tcanvas.Set(\"width\", int(float64(width)*u.actualScreenScale()))\n\tcanvas.Set(\"height\", int(float64(height)*u.actualScreenScale()))\n\tcanvasStyle := canvas.Get(\"style\")\n\n\tcssWidth := int(float64(width) * scale)\n\tcssHeight := int(float64(height) * scale)\n\tcanvasStyle.Set(\"width\", strconv.Itoa(cssWidth)+\"px\")\n\tcanvasStyle.Set(\"height\", strconv.Itoa(cssHeight)+\"px\")\n\t\/\/ CSS calc requires space chars.\n\tcanvasStyle.Set(\"left\", \"calc((100% - \"+strconv.Itoa(cssWidth)+\"px) \/ 2)\")\n\tcanvasStyle.Set(\"top\", \"calc((100% - \"+strconv.Itoa(cssHeight)+\"px) \/ 2)\")\n\tu.sizeChanged = true\n\treturn true\n}\n<commit_msg>ui: Bug fix: Should add <meta> to <head> instead of <body><commit_after>\/\/ Copyright 2015 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build js\n\npackage ui\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/gopherjs\/gopherjs\/js\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/opengl\"\n)\n\nvar canvas *js.Object\n\ntype userInterface struct {\n\tscale float64\n\tdeviceScale float64\n\tsizeChanged bool\n\twindowFocus bool\n}\n\nvar currentUI = &userInterface{\n\tsizeChanged: true,\n\twindowFocus: true,\n}\n\n\/\/ NOTE: This returns true even when the browser is not active.\nfunc shown() bool {\n\treturn !js.Global.Get(\"document\").Get(\"hidden\").Bool()\n}\n\nfunc SetScreenSize(width, height int) bool {\n\treturn currentUI.setScreenSize(width, height, currentUI.scale)\n}\n\nfunc SetScreenScale(scale float64) bool {\n\twidth, height := currentUI.size()\n\treturn currentUI.setScreenSize(width, height, scale)\n}\n\nfunc ScreenScale() float64 {\n\treturn currentUI.scale\n}\n\nfunc SetFullscreen(fullscreen bool) bool {\n\t\/\/ TODO: Implement\n\treturn false\n}\n\nfunc IsFullscreen() bool {\n\t\/\/ TODO: Implement\n\treturn false\n}\n\nfunc ScreenOffset() (float64, float64) {\n\treturn 0, 0\n}\n\nfunc adjustCursorPosition(x, y int) (int, int) {\n\treturn x, y\n}\n\nfunc SetCursorVisibility(visibility bool) {\n\tif visibility {\n\t\tcanvas.Get(\"style\").Set(\"cursor\", \"auto\")\n\t} else {\n\t\tcanvas.Get(\"style\").Set(\"cursor\", \"none\")\n\t}\n}\n\nfunc (u *userInterface) actualScreenScale() float64 {\n\treturn u.scale * u.deviceScale\n}\n\nfunc (u *userInterface) update(g GraphicsContext) error {\n\tif !u.windowFocus {\n\t\treturn nil\n\t}\n\tif opengl.GetContext().IsContextLost() {\n\t\topengl.GetContext().RestoreContext()\n\t\tg.Invalidate()\n\t}\n\tcurrentInput.updateGamepads()\n\tif u.sizeChanged {\n\t\tu.sizeChanged = false\n\t\tw, h := u.size()\n\t\tg.SetSize(w, h, u.actualScreenScale())\n\t\treturn nil\n\t}\n\tif err := g.Update(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (u *userInterface) loop(g GraphicsContext) error {\n\tch := make(chan error)\n\tvar f func()\n\tf = func() {\n\t\tgo func() {\n\t\t\tif err := u.update(g); err != nil {\n\t\t\t\tch <- err\n\t\t\t\tclose(ch)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tjs.Global.Get(\"window\").Call(\"requestAnimationFrame\", f)\n\t\t}()\n\t}\n\tf()\n\treturn <-ch\n}\n\nfunc touchEventToTouches(e *js.Object) []touch {\n\tscale := currentUI.scale\n\tj := e.Get(\"targetTouches\")\n\trect := canvas.Call(\"getBoundingClientRect\")\n\tleft, top := rect.Get(\"left\").Int(), rect.Get(\"top\").Int()\n\tt := make([]touch, j.Get(\"length\").Int())\n\tfor i := 0; i < len(t); i++ {\n\t\tjj := j.Call(\"item\", i)\n\t\tt[i].id = jj.Get(\"identifier\").Int()\n\t\tt[i].x = int(float64(jj.Get(\"clientX\").Int()-left) \/ scale)\n\t\tt[i].y = int(float64(jj.Get(\"clientY\").Int()-top) \/ scale)\n\t}\n\treturn t\n}\n\nfunc init() {\n\tif err := initialize(); err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc initialize() error {\n\t\/\/ Do nothing in node.js.\n\tif js.Global.Get(\"require\") != js.Undefined {\n\t\treturn nil\n\t}\n\n\tdoc := js.Global.Get(\"document\")\n\twindow := js.Global.Get(\"window\")\n\tif doc.Get(\"body\") == nil {\n\t\tch := make(chan struct{})\n\t\twindow.Call(\"addEventListener\", \"load\", func() {\n\t\t\tclose(ch)\n\t\t})\n\t\t<-ch\n\t}\n\twindow.Call(\"addEventListener\", \"focus\", func() {\n\t\tcurrentUI.windowFocus = true\n\t})\n\twindow.Call(\"addEventListener\", \"blur\", func() {\n\t\tcurrentUI.windowFocus = false\n\t})\n\n\t\/\/ Adjust the initial scale to 1.\n\t\/\/ https:\/\/developer.mozilla.org\/en\/docs\/Mozilla\/Mobile\/Viewport_meta_tag\n\tmeta := doc.Call(\"createElement\", \"meta\")\n\tmeta.Set(\"name\", \"viewport\")\n\tmeta.Set(\"content\", \"width=device-width, initial-scale=1\")\n\tdoc.Get(\"head\").Call(\"appendChild\", meta)\n\n\tcanvas = doc.Call(\"createElement\", \"canvas\")\n\tcanvas.Set(\"width\", 16)\n\tcanvas.Set(\"height\", 16)\n\tdoc.Get(\"body\").Call(\"appendChild\", canvas)\n\n\thtmlStyle := doc.Get(\"documentElement\").Get(\"style\")\n\thtmlStyle.Set(\"height\", \"100%\")\n\thtmlStyle.Set(\"margin\", \"0\")\n\thtmlStyle.Set(\"padding\", \"0\")\n\n\tbodyStyle := doc.Get(\"body\").Get(\"style\")\n\tbodyStyle.Set(\"backgroundColor\", \"#000\")\n\tbodyStyle.Set(\"position\", \"relative\")\n\tbodyStyle.Set(\"height\", \"100%\")\n\tbodyStyle.Set(\"margin\", \"0\")\n\tbodyStyle.Set(\"padding\", \"0\")\n\t\/\/ TODO: This is OK as long as the game is in an independent iframe.\n\t\/\/ What if the canvas is embedded in a HTML directly?\n\tdoc.Get(\"body\").Call(\"addEventListener\", \"click\", func() {\n\t\tcanvas.Call(\"focus\")\n\t})\n\n\tcanvasStyle := canvas.Get(\"style\")\n\tcanvasStyle.Set(\"position\", \"absolute\")\n\n\t\/\/ Make the canvas focusable.\n\tcanvas.Call(\"setAttribute\", \"tabindex\", 1)\n\tcanvas.Get(\"style\").Set(\"outline\", \"none\")\n\n\t\/\/ Keyboard\n\tcanvas.Call(\"addEventListener\", \"keydown\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t\tif e.Get(\"code\") == js.Undefined {\n\t\t\t\/\/ Assume that UA is Safari.\n\t\t\tcode := e.Get(\"keyCode\").Int()\n\t\t\tcurrentInput.keyDownSafari(code)\n\t\t\treturn\n\t\t}\n\t\tcode := e.Get(\"code\").String()\n\t\tcurrentInput.keyDown(code)\n\t})\n\tcanvas.Call(\"addEventListener\", \"keyup\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t\tif e.Get(\"code\") == js.Undefined {\n\t\t\t\/\/ Assume that UA is Safari.\n\t\t\tcode := e.Get(\"keyCode\").Int()\n\t\t\tcurrentInput.keyUpSafari(code)\n\t\t}\n\t\tcode := e.Get(\"code\").String()\n\t\tcurrentInput.keyUp(code)\n\t})\n\n\t\/\/ Mouse\n\tcanvas.Call(\"addEventListener\", \"mousedown\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t\tbutton := e.Get(\"button\").Int()\n\t\tcurrentInput.mouseDown(button)\n\t\tsetMouseCursorFromEvent(e)\n\t})\n\tcanvas.Call(\"addEventListener\", \"mouseup\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t\tbutton := e.Get(\"button\").Int()\n\t\tcurrentInput.mouseUp(button)\n\t\tsetMouseCursorFromEvent(e)\n\t})\n\tcanvas.Call(\"addEventListener\", \"mousemove\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t\tsetMouseCursorFromEvent(e)\n\t})\n\tcanvas.Call(\"addEventListener\", \"contextmenu\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t})\n\n\t\/\/ Touch\n\tcanvas.Call(\"addEventListener\", \"touchstart\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t\tcurrentInput.updateTouches(touchEventToTouches(e))\n\t})\n\tcanvas.Call(\"addEventListener\", \"touchend\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t\tcurrentInput.updateTouches(touchEventToTouches(e))\n\t})\n\tcanvas.Call(\"addEventListener\", \"touchmove\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t\tcurrentInput.updateTouches(touchEventToTouches(e))\n\t})\n\n\t\/\/ Gamepad\n\twindow.Call(\"addEventListener\", \"gamepadconnected\", func(e *js.Object) {\n\t\t\/\/ Do nothing.\n\t})\n\n\tcanvas.Call(\"addEventListener\", \"webglcontextlost\", func(e *js.Object) {\n\t\te.Call(\"preventDefault\")\n\t})\n\tcanvas.Call(\"addEventListener\", \"webglcontextrestored\", func(e *js.Object) {\n\t\t\/\/ Do nothing.\n\t})\n\n\treturn nil\n}\n\nfunc setMouseCursorFromEvent(e *js.Object) {\n\tscale := currentUI.scale\n\trect := canvas.Call(\"getBoundingClientRect\")\n\tx, y := e.Get(\"clientX\").Int(), e.Get(\"clientY\").Int()\n\tx -= rect.Get(\"left\").Int()\n\ty -= rect.Get(\"top\").Int()\n\tcurrentInput.setMouseCursor(int(float64(x)\/scale), int(float64(y)\/scale))\n}\n\nfunc devicePixelRatio() float64 {\n\tratio := js.Global.Get(\"window\").Get(\"devicePixelRatio\").Float()\n\tif ratio == 0 {\n\t\tratio = 1\n\t}\n\treturn ratio\n}\n\nfunc RunMainThreadLoop(ch <-chan error) error {\n\treturn <-ch\n}\n\nfunc Run(width, height int, scale float64, title string, g GraphicsContext) error {\n\tu := currentUI\n\tdoc := js.Global.Get(\"document\")\n\tdoc.Set(\"title\", title)\n\tu.setScreenSize(width, height, scale)\n\tcanvas.Call(\"focus\")\n\tif err := opengl.Init(); err != nil {\n\t\treturn err\n\t}\n\treturn u.loop(g)\n}\n\nfunc (u *userInterface) size() (width, height int) {\n\ta := u.actualScreenScale()\n\tif a == 0 {\n\t\t\/\/ a == 0 only on the initial state.\n\t\treturn\n\t}\n\twidth = int(canvas.Get(\"width\").Float() \/ a)\n\theight = int(canvas.Get(\"height\").Float() \/ a)\n\treturn\n}\n\nfunc isSafari() bool {\n\tua := js.Global.Get(\"navigator\").Get(\"userAgent\").String()\n\tif !strings.Contains(ua, \"Safari\") {\n\t\treturn false\n\t}\n\tif strings.Contains(ua, \"Chrome\") {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (u *userInterface) setScreenSize(width, height int, scale float64) bool {\n\tw, h := u.size()\n\ts := u.scale\n\tif w == width && h == height && s == scale {\n\t\treturn false\n\t}\n\tu.scale = scale\n\t\/\/ CSS imageRendering seems useful to enlarge the screen,\n\t\/\/ but doesn't work in some cases (#306):\n\t\/\/ * Chrome just after restoring the lost context\n\t\/\/ * Safari\n\t\/\/ Let's use the pixel ratio as it is here.\n\tu.deviceScale = devicePixelRatio()\n\tcanvas.Set(\"width\", int(float64(width)*u.actualScreenScale()))\n\tcanvas.Set(\"height\", int(float64(height)*u.actualScreenScale()))\n\tcanvasStyle := canvas.Get(\"style\")\n\n\tcssWidth := int(float64(width) * scale)\n\tcssHeight := int(float64(height) * scale)\n\tcanvasStyle.Set(\"width\", strconv.Itoa(cssWidth)+\"px\")\n\tcanvasStyle.Set(\"height\", strconv.Itoa(cssHeight)+\"px\")\n\t\/\/ CSS calc requires space chars.\n\tcanvasStyle.Set(\"left\", \"calc((100% - \"+strconv.Itoa(cssWidth)+\"px) \/ 2)\")\n\tcanvasStyle.Set(\"top\", \"calc((100% - \"+strconv.Itoa(cssHeight)+\"px) \/ 2)\")\n\tu.sizeChanged = true\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Ceph-CSI Authors.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage util\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\tklog \"k8s.io\/klog\/v2\"\n)\n\n\/\/ enum defining logging levels.\nconst (\n\tDefault klog.Level = iota + 1\n\tUseful\n\tExtended\n\tDebug\n\tTrace\n)\n\ntype contextKey string\n\n\/\/ CtxKey for context based logging.\nvar CtxKey = contextKey(\"ID\")\n\n\/\/ ReqID for logging request ID.\nvar ReqID = contextKey(\"Req-ID\")\n\n\/\/ Log helps in context based logging.\nfunc Log(ctx context.Context, format string) string {\n\tid := ctx.Value(CtxKey)\n\tif id == nil {\n\t\treturn format\n\t}\n\ta := fmt.Sprintf(\"ID: %v \", id)\n\treqID := ctx.Value(ReqID)\n\tif reqID == nil {\n\t\treturn a + format\n\t}\n\ta += fmt.Sprintf(\"Req-ID: %v \", reqID)\n\treturn a + format\n}\n\n\/\/ FatalLog helps in logging fatal errors.\nfunc FatalLog(message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(message, args...)\n\tklog.FatalDepth(1, logMessage)\n}\n\n\/\/ ErrorLogMsg helps in logging errors with message.\nfunc ErrorLogMsg(message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(message, args...)\n\tklog.ErrorDepth(1, logMessage)\n}\n\n\/\/ ErrorLog helps in logging errors with context.\nfunc ErrorLog(ctx context.Context, message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(Log(ctx, message), args...)\n\tklog.ErrorDepth(1, logMessage)\n}\n\n\/\/ WarningLog helps in logging warnings.\nfunc WarningLog(message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(message, args...)\n\tklog.WarningDepth(1, logMessage)\n}\n\n\/\/ DefaultLog helps in logging with klog.level 1.\nfunc DefaultLog(message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(message, args...)\n\t\/\/ If logging is disabled, don't evaluate the arguments\n\tif klog.V(Default).Enabled() {\n\t\tklog.InfoDepth(1, logMessage)\n\t}\n}\n\n\/\/ UsefulLog helps in logging with klog.level 2.\nfunc UsefulLog(ctx context.Context, message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(Log(ctx, message), args...)\n\t\/\/ If logging is disabled, don't evaluate the arguments\n\tif klog.V(Useful).Enabled() {\n\t\tklog.InfoDepth(1, logMessage)\n\t}\n}\n\n\/\/ ExtendedLogMsg helps in logging a message with klog.level 3.\nfunc ExtendedLogMsg(message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(message, args...)\n\t\/\/ If logging is disabled, don't evaluate the arguments\n\tif klog.V(Extended).Enabled() {\n\t\tklog.InfoDepth(1, logMessage)\n\t}\n}\n\n\/\/ ExtendedLog helps in logging with klog.level 3.\nfunc ExtendedLog(ctx context.Context, message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(Log(ctx, message), args...)\n\t\/\/ If logging is disabled, don't evaluate the arguments\n\tif klog.V(Extended).Enabled() {\n\t\tklog.InfoDepth(1, logMessage)\n\t}\n}\n\n\/\/ DebugLogMsg helps in logging a message with klog.level 4.\nfunc DebugLogMsg(message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(message, args...)\n\t\/\/ If logging is disabled, don't evaluate the arguments\n\tif klog.V(Debug).Enabled() {\n\t\tklog.InfoDepth(1, logMessage)\n\t}\n}\n\n\/\/ DebugLog helps in logging with klog.level 4.\nfunc DebugLog(ctx context.Context, message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(Log(ctx, message), args...)\n\t\/\/ If logging is disabled, don't evaluate the arguments\n\tif klog.V(Debug).Enabled() {\n\t\tklog.InfoDepth(1, logMessage)\n\t}\n}\n\n\/\/ TraceLogMsg helps in logging a message with klog.level 5.\nfunc TraceLogMsg(message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(message, args...)\n\t\/\/ If logging is disabled, don't evaluate the arguments\n\tif klog.V(Trace).Enabled() {\n\t\tklog.InfoDepth(1, logMessage)\n\t}\n}\n\n\/\/ TraceLog helps in logging with klog.level 5.\nfunc TraceLog(ctx context.Context, message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(Log(ctx, message), args...)\n\t\/\/ If logging is disabled, don't evaluate the arguments\n\tif klog.V(Trace).Enabled() {\n\t\tklog.InfoDepth(1, logMessage)\n\t}\n}\n<commit_msg>util: rename WarningLog to WarningLogMsg<commit_after>\/*\nCopyright 2019 The Ceph-CSI Authors.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage util\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\tklog \"k8s.io\/klog\/v2\"\n)\n\n\/\/ enum defining logging levels.\nconst (\n\tDefault klog.Level = iota + 1\n\tUseful\n\tExtended\n\tDebug\n\tTrace\n)\n\ntype contextKey string\n\n\/\/ CtxKey for context based logging.\nvar CtxKey = contextKey(\"ID\")\n\n\/\/ ReqID for logging request ID.\nvar ReqID = contextKey(\"Req-ID\")\n\n\/\/ Log helps in context based logging.\nfunc Log(ctx context.Context, format string) string {\n\tid := ctx.Value(CtxKey)\n\tif id == nil {\n\t\treturn format\n\t}\n\ta := fmt.Sprintf(\"ID: %v \", id)\n\treqID := ctx.Value(ReqID)\n\tif reqID == nil {\n\t\treturn a + format\n\t}\n\ta += fmt.Sprintf(\"Req-ID: %v \", reqID)\n\treturn a + format\n}\n\n\/\/ FatalLog helps in logging fatal errors.\nfunc FatalLog(message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(message, args...)\n\tklog.FatalDepth(1, logMessage)\n}\n\n\/\/ ErrorLogMsg helps in logging errors with message.\nfunc ErrorLogMsg(message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(message, args...)\n\tklog.ErrorDepth(1, logMessage)\n}\n\n\/\/ ErrorLog helps in logging errors with context.\nfunc ErrorLog(ctx context.Context, message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(Log(ctx, message), args...)\n\tklog.ErrorDepth(1, logMessage)\n}\n\n\/\/ WarningLogMsg helps in logging warnings with message.\nfunc WarningLogMsg(message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(message, args...)\n\tklog.WarningDepth(1, logMessage)\n}\n\n\/\/ DefaultLog helps in logging with klog.level 1.\nfunc DefaultLog(message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(message, args...)\n\t\/\/ If logging is disabled, don't evaluate the arguments\n\tif klog.V(Default).Enabled() {\n\t\tklog.InfoDepth(1, logMessage)\n\t}\n}\n\n\/\/ UsefulLog helps in logging with klog.level 2.\nfunc UsefulLog(ctx context.Context, message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(Log(ctx, message), args...)\n\t\/\/ If logging is disabled, don't evaluate the arguments\n\tif klog.V(Useful).Enabled() {\n\t\tklog.InfoDepth(1, logMessage)\n\t}\n}\n\n\/\/ ExtendedLogMsg helps in logging a message with klog.level 3.\nfunc ExtendedLogMsg(message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(message, args...)\n\t\/\/ If logging is disabled, don't evaluate the arguments\n\tif klog.V(Extended).Enabled() {\n\t\tklog.InfoDepth(1, logMessage)\n\t}\n}\n\n\/\/ ExtendedLog helps in logging with klog.level 3.\nfunc ExtendedLog(ctx context.Context, message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(Log(ctx, message), args...)\n\t\/\/ If logging is disabled, don't evaluate the arguments\n\tif klog.V(Extended).Enabled() {\n\t\tklog.InfoDepth(1, logMessage)\n\t}\n}\n\n\/\/ DebugLogMsg helps in logging a message with klog.level 4.\nfunc DebugLogMsg(message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(message, args...)\n\t\/\/ If logging is disabled, don't evaluate the arguments\n\tif klog.V(Debug).Enabled() {\n\t\tklog.InfoDepth(1, logMessage)\n\t}\n}\n\n\/\/ DebugLog helps in logging with klog.level 4.\nfunc DebugLog(ctx context.Context, message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(Log(ctx, message), args...)\n\t\/\/ If logging is disabled, don't evaluate the arguments\n\tif klog.V(Debug).Enabled() {\n\t\tklog.InfoDepth(1, logMessage)\n\t}\n}\n\n\/\/ TraceLogMsg helps in logging a message with klog.level 5.\nfunc TraceLogMsg(message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(message, args...)\n\t\/\/ If logging is disabled, don't evaluate the arguments\n\tif klog.V(Trace).Enabled() {\n\t\tklog.InfoDepth(1, logMessage)\n\t}\n}\n\n\/\/ TraceLog helps in logging with klog.level 5.\nfunc TraceLog(ctx context.Context, message string, args ...interface{}) {\n\tlogMessage := fmt.Sprintf(Log(ctx, message), args...)\n\t\/\/ If logging is disabled, don't evaluate the arguments\n\tif klog.V(Trace).Enabled() {\n\t\tklog.InfoDepth(1, logMessage)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package seelog\n\nimport (\n\t\"archive\/zip\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n)\n\n\/\/ File and directory permitions.\nconst (\n\tdefaultFilePermissions = 0666\n\tdefaultDirectoryPermissions = 0767\n)\n\nconst (\n\t\/\/ Max number of directories can be read asynchronously.\n\tmaxDirNumberReadAsync = 1000\n)\n\ntype cannotOpenFileError struct {\n\tbaseError\n}\n\nfunc newCannotOpenFileError(fname string) *cannotOpenFileError {\n\treturn &cannotOpenFileError{baseError{message: \"Cannot open file: \" + fname}}\n}\n\ntype notDirectoryError struct {\n\tbaseError\n}\n\nfunc newNotDirectoryError(dname string) *notDirectoryError {\n\treturn ¬DirectoryError{baseError{message: dname + \" is not directory\"}}\n}\n\n\/\/ fileFilter is a filtering criteria function for '*os.File'.\n\/\/ Must return 'false' to set aside the given file.\ntype fileFilter func(os.FileInfo, *os.File) bool\n\n\/\/ filePathFilter is a filtering creteria function for file path.\n\/\/ Must return 'false' to set aside the given file.\ntype filePathFilter func(filePath string) bool\n\n\/\/ GetSubdirNames returns a list of directories found in\n\/\/ the given one with dirPath.\nfunc getSubdirNames(dirPath string) ([]string, error) {\n\tfi, err := os.Stat(dirPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !fi.IsDir() {\n\t\treturn nil, newNotDirectoryError(dirPath)\n\t}\n\tdd, err := os.Open(dirPath)\n\t\/\/ Cannot open file.\n\tif err != nil {\n\t\tif dd != nil {\n\t\t\tdd.Close()\n\t\t}\n\t\treturn nil, err\n\t}\n\tdefer dd.Close()\n\t\/\/ TODO: Improve performance by buffering reading.\n\tallEntities, err := dd.Readdir(-1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsubDirs := []string{}\n\tfor _, entity := range allEntities {\n\t\tif entity.IsDir() {\n\t\t\tsubDirs = append(subDirs, entity.Name())\n\t\t}\n\t}\n\treturn subDirs, nil\n}\n\n\/\/ getSubdirAbsPaths recursively visit all the subdirectories\n\/\/ starting from the given directory and returns absolute paths for them.\nfunc getAllSubdirAbsPaths(dirPath string) (res []string, err error) {\n\tdps, err := getSubdirAbsPaths(dirPath)\n\tif err != nil {\n\t\tres = []string{}\n\t\treturn\n\t}\n\tres = append(res, dps...)\n\tfor _, dp := range dps {\n\t\tsdps, err := getAllSubdirAbsPaths(dp)\n\t\tif err != nil {\n\t\t\treturn []string{}, err\n\t\t}\n\t\tres = append(res, sdps...)\n\t}\n\treturn\n}\n\n\/\/ getSubdirAbsPaths supplies absolute paths for all subdirectiries in a given directory.\n\/\/ Input: (I1) dirPath - absolute path of a directory in question.\n\/\/ Out: (O1) - slice of subdir asbolute paths; (O2) - error of the operation.\n\/\/ Remark: If error (O2) is non-nil then (O1) is nil and vice versa.\nfunc getSubdirAbsPaths(dirPath string) ([]string, error) {\n\tsdns, err := getSubdirNames(dirPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trsdns := []string{}\n\tfor _, sdn := range sdns {\n\t\trsdns = append(rsdns, filepath.Join(dirPath, sdn))\n\t}\n\treturn rsdns, nil\n}\n\n\/\/ getOpenFilesInDir supplies a slice of os.File pointers to files located in the directory.\n\/\/ Remark: Ignores files for which fileFilter returns false\nfunc getOpenFilesInDir(dirPath string, fFilter fileFilter) ([]*os.File, error) {\n\tdfi, err := os.Open(dirPath)\n\tif err != nil {\n\t\treturn nil, newCannotOpenFileError(\"Cannot open directory \" + dirPath)\n\t}\n\tdefer dfi.Close()\n\t\/\/ Size of read buffer (i.e. chunk of items read at a time).\n\trbs := 64\n\tresFiles := []*os.File{}\nL:\n\tfor {\n\t\t\/\/ Read directory entities by reasonable chuncks\n\t\t\/\/ to prevent overflows on big number of files.\n\t\tfis, e := dfi.Readdir(rbs)\n\t\tswitch e {\n\t\t\/\/ It's OK.\n\t\tcase nil:\n\t\t\/\/ Do nothing, just continue cycle.\n\t\tcase io.EOF:\n\t\t\tbreak L\n\t\t\/\/ Something went wrong.\n\t\tdefault:\n\t\t\treturn nil, e\n\t\t}\n\t\t\/\/ THINK: Maybe, use async running.\n\t\tfor _, fi := range fis {\n\t\t\t\/\/ NB: On Linux this could be a problem as\n\t\t\t\/\/ there are lots of file types available.\n\t\t\tif !fi.IsDir() {\n\t\t\t\tf, e := os.Open(filepath.Join(dirPath, fi.Name()))\n\t\t\t\tif e != nil {\n\t\t\t\t\tif f != nil {\n\t\t\t\t\t\tf.Close()\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ THINK: Add nil as indicator that a problem occurred.\n\t\t\t\t\tresFiles = append(resFiles, nil)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t\/\/ Check filter condition.\n\t\t\t\tif fFilter != nil && !fFilter(fi, f) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tresFiles = append(resFiles, f)\n\t\t\t}\n\t\t}\n\t}\n\treturn resFiles, nil\n}\n\nfunc isRegular(m os.FileMode) bool {\n\treturn m&os.ModeType == 0\n}\n\n\/\/ getDirFilePaths return full paths of the files located in the directory.\n\/\/ Remark: Ignores files for which fileFilter returns false.\nfunc getDirFilePaths(dirPath string, fpFilter filePathFilter, pathIsName bool) ([]string, error) {\n\tdfi, err := os.Open(dirPath)\n\tif err != nil {\n\t\treturn nil, newCannotOpenFileError(\"Cannot open directory \" + dirPath)\n\t}\n\tdefer dfi.Close()\n\n\tvar absDirPath string\n\tif !filepath.IsAbs(dirPath) {\n\t\tabsDirPath, err = filepath.Abs(dirPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Cannot get absolute path of directory: %s\", err.Error())\n\t\t}\n\t} else {\n\t\tabsDirPath = dirPath\n\t}\n\n\t\/\/ TODO: check if dirPath is really directory.\n\t\/\/ Size of read buffer (i.e. chunk of items read at a time).\n\trbs := 2 << 5\n\tfilePaths := []string{}\n\n\tvar fp string\nL:\n\tfor {\n\t\t\/\/ Read directory entities by reasonable chuncks\n\t\t\/\/ to prevent overflows on big number of files.\n\t\tfis, e := dfi.Readdir(rbs)\n\t\tswitch e {\n\t\t\/\/ It's OK.\n\t\tcase nil:\n\t\t\/\/ Do nothing, just continue cycle.\n\t\tcase io.EOF:\n\t\t\tbreak L\n\t\t\/\/ Indicate that something went wrong.\n\t\tdefault:\n\t\t\treturn nil, e\n\t\t}\n\t\t\/\/ THINK: Maybe, use async running.\n\t\tfor _, fi := range fis {\n\t\t\t\/\/ NB: Should work on every Windows and non-Windows OS.\n\t\t\tif isRegular(fi.Mode()) {\n\t\t\t\tif pathIsName {\n\t\t\t\t\tfp = fi.Name()\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ Build full path of a file.\n\t\t\t\t\tfp = filepath.Join(absDirPath, fi.Name())\n\t\t\t\t}\n\t\t\t\t\/\/ Check filter condition.\n\t\t\t\tif fpFilter != nil && !fpFilter(fp) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfilePaths = append(filePaths, fp)\n\t\t\t}\n\t\t}\n\t}\n\treturn filePaths, nil\n}\n\n\/\/ getOpenFilesByDirectoryAsync runs async reading directories 'dirPaths' and inserts pairs\n\/\/ in map 'filesInDirMap': Key - directory name, value - *os.File slice.\nfunc getOpenFilesByDirectoryAsync(\n\tdirPaths []string,\n\tfFilter fileFilter,\n\tfilesInDirMap map[string][]*os.File,\n) error {\n\tn := len(dirPaths)\n\tif n > maxDirNumberReadAsync {\n\t\treturn fmt.Errorf(\"Number of input directories to be read exceeded max value %d\", maxDirNumberReadAsync)\n\t}\n\ttype filesInDirResult struct {\n\t\tDirName string\n\t\tFiles []*os.File\n\t\tError error\n\t}\n\tdirFilesChan := make(chan *filesInDirResult, n)\n\tvar wg sync.WaitGroup\n\t\/\/ Register n goroutines which are going to do work.\n\twg.Add(n)\n\tfor i := 0; i < n; i++ {\n\t\t\/\/ Launch asynchronously the piece of work.\n\t\tgo func(dirPath string) {\n\t\t\tfs, e := getOpenFilesInDir(dirPath, fFilter)\n\t\t\tdirFilesChan <- &filesInDirResult{filepath.Base(dirPath), fs, e}\n\t\t\t\/\/ Mark the current goroutine as finished (work is done).\n\t\t\twg.Done()\n\t\t}(dirPaths[i])\n\t}\n\t\/\/ Wait for all goroutines to finish their work.\n\twg.Wait()\n\t\/\/ Close the error channel to let for-range clause\n\t\/\/ get all the buffered values without blocking and quit in the end.\n\tclose(dirFilesChan)\n\tfor fidr := range dirFilesChan {\n\t\tif fidr.Error == nil {\n\t\t\t\/\/ THINK: What will happen if the key is already present?\n\t\t\tfilesInDirMap[fidr.DirName] = fidr.Files\n\t\t} else {\n\t\t\treturn fidr.Error\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc copyFile(sf *os.File, dst string) (int64, error) {\n\tdf, err := os.Create(dst)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer df.Close()\n\treturn io.Copy(df, sf)\n}\n\n\/\/ fileExists return flag whether a given file exists\n\/\/ and operation error if an unclassified failure occurs.\nfunc fileExists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, nil\n\t\t} else {\n\t\t\treturn false, err\n\t\t}\n\t}\n\treturn true, nil\n}\n\n\/\/ createDirectory makes directory with a given name\n\/\/ making all parent directories if necessary.\nfunc createDirectory(dirPath string) error {\n\tvar dPath string\n\tvar err error\n\tif !filepath.IsAbs(dirPath) {\n\t\tdPath, err = filepath.Abs(dirPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tdPath = dirPath\n\t}\n\texists, err := fileExists(dPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif exists {\n\t\treturn nil\n\t}\n\treturn os.MkdirAll(dPath, os.ModeDir)\n}\n\n\/\/ tryRemoveFile gives a try removing the file\n\/\/ only ignoring an error when the file does not exist.\nfunc tryRemoveFile(filePath string) (err error) {\n\terr = os.Remove(filePath)\n\tif os.IsNotExist(err) {\n\t\terr = nil\n\t\treturn\n\t}\n\treturn\n}\n\n\/\/ Unzips a specified zip file. Returns filename->filebytes map.\nfunc unzip(archiveName string) (map[string][]byte, error) {\n\t\/\/ Open a zip archive for reading.\n\tr, err := zip.OpenReader(archiveName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Close()\n\n\t\/\/ Files to be added to archive\n\t\/\/ map file name to contents\n\tfiles := make(map[string][]byte)\n\n\t\/\/ Iterate through the files in the archive,\n\t\/\/ printing some of their contents.\n\tfor _, f := range r.File {\n\t\trc, err := f.Open()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tbts, err := ioutil.ReadAll(rc)\n\t\trcErr := rc.Close()\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif rcErr != nil {\n\t\t\treturn nil, rcErr\n\t\t}\n\n\t\tfiles[f.Name] = bts\n\t}\n\n\treturn files, nil\n}\n\n\/\/ Creates a zip file with the specified file names and byte contents.\nfunc createZip(archiveName string, files map[string][]byte) error {\n\t\/\/ Create a buffer to write our archive to.\n\tbuf := new(bytes.Buffer)\n\n\t\/\/ Create a new zip archive.\n\tw := zip.NewWriter(buf)\n\n\t\/\/ Write files\n\tfor fpath, fcont := range files {\n\t\tf, err := w.Create(fpath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = f.Write([]byte(fcont))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Make sure to check the error on Close.\n\terr := w.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(archiveName, buf.Bytes(), defaultFilePermissions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>Remove else after block with return statement.<commit_after>package seelog\n\nimport (\n\t\"archive\/zip\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n)\n\n\/\/ File and directory permitions.\nconst (\n\tdefaultFilePermissions = 0666\n\tdefaultDirectoryPermissions = 0767\n)\n\nconst (\n\t\/\/ Max number of directories can be read asynchronously.\n\tmaxDirNumberReadAsync = 1000\n)\n\ntype cannotOpenFileError struct {\n\tbaseError\n}\n\nfunc newCannotOpenFileError(fname string) *cannotOpenFileError {\n\treturn &cannotOpenFileError{baseError{message: \"Cannot open file: \" + fname}}\n}\n\ntype notDirectoryError struct {\n\tbaseError\n}\n\nfunc newNotDirectoryError(dname string) *notDirectoryError {\n\treturn ¬DirectoryError{baseError{message: dname + \" is not directory\"}}\n}\n\n\/\/ fileFilter is a filtering criteria function for '*os.File'.\n\/\/ Must return 'false' to set aside the given file.\ntype fileFilter func(os.FileInfo, *os.File) bool\n\n\/\/ filePathFilter is a filtering creteria function for file path.\n\/\/ Must return 'false' to set aside the given file.\ntype filePathFilter func(filePath string) bool\n\n\/\/ GetSubdirNames returns a list of directories found in\n\/\/ the given one with dirPath.\nfunc getSubdirNames(dirPath string) ([]string, error) {\n\tfi, err := os.Stat(dirPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !fi.IsDir() {\n\t\treturn nil, newNotDirectoryError(dirPath)\n\t}\n\tdd, err := os.Open(dirPath)\n\t\/\/ Cannot open file.\n\tif err != nil {\n\t\tif dd != nil {\n\t\t\tdd.Close()\n\t\t}\n\t\treturn nil, err\n\t}\n\tdefer dd.Close()\n\t\/\/ TODO: Improve performance by buffering reading.\n\tallEntities, err := dd.Readdir(-1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsubDirs := []string{}\n\tfor _, entity := range allEntities {\n\t\tif entity.IsDir() {\n\t\t\tsubDirs = append(subDirs, entity.Name())\n\t\t}\n\t}\n\treturn subDirs, nil\n}\n\n\/\/ getSubdirAbsPaths recursively visit all the subdirectories\n\/\/ starting from the given directory and returns absolute paths for them.\nfunc getAllSubdirAbsPaths(dirPath string) (res []string, err error) {\n\tdps, err := getSubdirAbsPaths(dirPath)\n\tif err != nil {\n\t\tres = []string{}\n\t\treturn\n\t}\n\tres = append(res, dps...)\n\tfor _, dp := range dps {\n\t\tsdps, err := getAllSubdirAbsPaths(dp)\n\t\tif err != nil {\n\t\t\treturn []string{}, err\n\t\t}\n\t\tres = append(res, sdps...)\n\t}\n\treturn\n}\n\n\/\/ getSubdirAbsPaths supplies absolute paths for all subdirectiries in a given directory.\n\/\/ Input: (I1) dirPath - absolute path of a directory in question.\n\/\/ Out: (O1) - slice of subdir asbolute paths; (O2) - error of the operation.\n\/\/ Remark: If error (O2) is non-nil then (O1) is nil and vice versa.\nfunc getSubdirAbsPaths(dirPath string) ([]string, error) {\n\tsdns, err := getSubdirNames(dirPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trsdns := []string{}\n\tfor _, sdn := range sdns {\n\t\trsdns = append(rsdns, filepath.Join(dirPath, sdn))\n\t}\n\treturn rsdns, nil\n}\n\n\/\/ getOpenFilesInDir supplies a slice of os.File pointers to files located in the directory.\n\/\/ Remark: Ignores files for which fileFilter returns false\nfunc getOpenFilesInDir(dirPath string, fFilter fileFilter) ([]*os.File, error) {\n\tdfi, err := os.Open(dirPath)\n\tif err != nil {\n\t\treturn nil, newCannotOpenFileError(\"Cannot open directory \" + dirPath)\n\t}\n\tdefer dfi.Close()\n\t\/\/ Size of read buffer (i.e. chunk of items read at a time).\n\trbs := 64\n\tresFiles := []*os.File{}\nL:\n\tfor {\n\t\t\/\/ Read directory entities by reasonable chuncks\n\t\t\/\/ to prevent overflows on big number of files.\n\t\tfis, e := dfi.Readdir(rbs)\n\t\tswitch e {\n\t\t\/\/ It's OK.\n\t\tcase nil:\n\t\t\/\/ Do nothing, just continue cycle.\n\t\tcase io.EOF:\n\t\t\tbreak L\n\t\t\/\/ Something went wrong.\n\t\tdefault:\n\t\t\treturn nil, e\n\t\t}\n\t\t\/\/ THINK: Maybe, use async running.\n\t\tfor _, fi := range fis {\n\t\t\t\/\/ NB: On Linux this could be a problem as\n\t\t\t\/\/ there are lots of file types available.\n\t\t\tif !fi.IsDir() {\n\t\t\t\tf, e := os.Open(filepath.Join(dirPath, fi.Name()))\n\t\t\t\tif e != nil {\n\t\t\t\t\tif f != nil {\n\t\t\t\t\t\tf.Close()\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ THINK: Add nil as indicator that a problem occurred.\n\t\t\t\t\tresFiles = append(resFiles, nil)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t\/\/ Check filter condition.\n\t\t\t\tif fFilter != nil && !fFilter(fi, f) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tresFiles = append(resFiles, f)\n\t\t\t}\n\t\t}\n\t}\n\treturn resFiles, nil\n}\n\nfunc isRegular(m os.FileMode) bool {\n\treturn m&os.ModeType == 0\n}\n\n\/\/ getDirFilePaths return full paths of the files located in the directory.\n\/\/ Remark: Ignores files for which fileFilter returns false.\nfunc getDirFilePaths(dirPath string, fpFilter filePathFilter, pathIsName bool) ([]string, error) {\n\tdfi, err := os.Open(dirPath)\n\tif err != nil {\n\t\treturn nil, newCannotOpenFileError(\"Cannot open directory \" + dirPath)\n\t}\n\tdefer dfi.Close()\n\n\tvar absDirPath string\n\tif !filepath.IsAbs(dirPath) {\n\t\tabsDirPath, err = filepath.Abs(dirPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Cannot get absolute path of directory: %s\", err.Error())\n\t\t}\n\t} else {\n\t\tabsDirPath = dirPath\n\t}\n\n\t\/\/ TODO: check if dirPath is really directory.\n\t\/\/ Size of read buffer (i.e. chunk of items read at a time).\n\trbs := 2 << 5\n\tfilePaths := []string{}\n\n\tvar fp string\nL:\n\tfor {\n\t\t\/\/ Read directory entities by reasonable chuncks\n\t\t\/\/ to prevent overflows on big number of files.\n\t\tfis, e := dfi.Readdir(rbs)\n\t\tswitch e {\n\t\t\/\/ It's OK.\n\t\tcase nil:\n\t\t\/\/ Do nothing, just continue cycle.\n\t\tcase io.EOF:\n\t\t\tbreak L\n\t\t\/\/ Indicate that something went wrong.\n\t\tdefault:\n\t\t\treturn nil, e\n\t\t}\n\t\t\/\/ THINK: Maybe, use async running.\n\t\tfor _, fi := range fis {\n\t\t\t\/\/ NB: Should work on every Windows and non-Windows OS.\n\t\t\tif isRegular(fi.Mode()) {\n\t\t\t\tif pathIsName {\n\t\t\t\t\tfp = fi.Name()\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ Build full path of a file.\n\t\t\t\t\tfp = filepath.Join(absDirPath, fi.Name())\n\t\t\t\t}\n\t\t\t\t\/\/ Check filter condition.\n\t\t\t\tif fpFilter != nil && !fpFilter(fp) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfilePaths = append(filePaths, fp)\n\t\t\t}\n\t\t}\n\t}\n\treturn filePaths, nil\n}\n\n\/\/ getOpenFilesByDirectoryAsync runs async reading directories 'dirPaths' and inserts pairs\n\/\/ in map 'filesInDirMap': Key - directory name, value - *os.File slice.\nfunc getOpenFilesByDirectoryAsync(\n\tdirPaths []string,\n\tfFilter fileFilter,\n\tfilesInDirMap map[string][]*os.File,\n) error {\n\tn := len(dirPaths)\n\tif n > maxDirNumberReadAsync {\n\t\treturn fmt.Errorf(\"Number of input directories to be read exceeded max value %d\", maxDirNumberReadAsync)\n\t}\n\ttype filesInDirResult struct {\n\t\tDirName string\n\t\tFiles []*os.File\n\t\tError error\n\t}\n\tdirFilesChan := make(chan *filesInDirResult, n)\n\tvar wg sync.WaitGroup\n\t\/\/ Register n goroutines which are going to do work.\n\twg.Add(n)\n\tfor i := 0; i < n; i++ {\n\t\t\/\/ Launch asynchronously the piece of work.\n\t\tgo func(dirPath string) {\n\t\t\tfs, e := getOpenFilesInDir(dirPath, fFilter)\n\t\t\tdirFilesChan <- &filesInDirResult{filepath.Base(dirPath), fs, e}\n\t\t\t\/\/ Mark the current goroutine as finished (work is done).\n\t\t\twg.Done()\n\t\t}(dirPaths[i])\n\t}\n\t\/\/ Wait for all goroutines to finish their work.\n\twg.Wait()\n\t\/\/ Close the error channel to let for-range clause\n\t\/\/ get all the buffered values without blocking and quit in the end.\n\tclose(dirFilesChan)\n\tfor fidr := range dirFilesChan {\n\t\tif fidr.Error == nil {\n\t\t\t\/\/ THINK: What will happen if the key is already present?\n\t\t\tfilesInDirMap[fidr.DirName] = fidr.Files\n\t\t} else {\n\t\t\treturn fidr.Error\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc copyFile(sf *os.File, dst string) (int64, error) {\n\tdf, err := os.Create(dst)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer df.Close()\n\treturn io.Copy(df, sf)\n}\n\n\/\/ fileExists return flag whether a given file exists\n\/\/ and operation error if an unclassified failure occurs.\nfunc fileExists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\n\/\/ createDirectory makes directory with a given name\n\/\/ making all parent directories if necessary.\nfunc createDirectory(dirPath string) error {\n\tvar dPath string\n\tvar err error\n\tif !filepath.IsAbs(dirPath) {\n\t\tdPath, err = filepath.Abs(dirPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tdPath = dirPath\n\t}\n\texists, err := fileExists(dPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif exists {\n\t\treturn nil\n\t}\n\treturn os.MkdirAll(dPath, os.ModeDir)\n}\n\n\/\/ tryRemoveFile gives a try removing the file\n\/\/ only ignoring an error when the file does not exist.\nfunc tryRemoveFile(filePath string) (err error) {\n\terr = os.Remove(filePath)\n\tif os.IsNotExist(err) {\n\t\terr = nil\n\t\treturn\n\t}\n\treturn\n}\n\n\/\/ Unzips a specified zip file. Returns filename->filebytes map.\nfunc unzip(archiveName string) (map[string][]byte, error) {\n\t\/\/ Open a zip archive for reading.\n\tr, err := zip.OpenReader(archiveName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Close()\n\n\t\/\/ Files to be added to archive\n\t\/\/ map file name to contents\n\tfiles := make(map[string][]byte)\n\n\t\/\/ Iterate through the files in the archive,\n\t\/\/ printing some of their contents.\n\tfor _, f := range r.File {\n\t\trc, err := f.Open()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tbts, err := ioutil.ReadAll(rc)\n\t\trcErr := rc.Close()\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif rcErr != nil {\n\t\t\treturn nil, rcErr\n\t\t}\n\n\t\tfiles[f.Name] = bts\n\t}\n\n\treturn files, nil\n}\n\n\/\/ Creates a zip file with the specified file names and byte contents.\nfunc createZip(archiveName string, files map[string][]byte) error {\n\t\/\/ Create a buffer to write our archive to.\n\tbuf := new(bytes.Buffer)\n\n\t\/\/ Create a new zip archive.\n\tw := zip.NewWriter(buf)\n\n\t\/\/ Write files\n\tfor fpath, fcont := range files {\n\t\tf, err := w.Create(fpath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = f.Write([]byte(fcont))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Make sure to check the error on Close.\n\terr := w.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(archiveName, buf.Bytes(), defaultFilePermissions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package expr\n\nimport (\n\t\"github.com\/antonmedv\/expr\/checker\"\n\t\"github.com\/antonmedv\/expr\/compiler\"\n\t\"github.com\/antonmedv\/expr\/internal\/conf\"\n\t\"github.com\/antonmedv\/expr\/optimizer\"\n\t\"github.com\/antonmedv\/expr\/parser\"\n\t\"github.com\/antonmedv\/expr\/vm\"\n\t\"reflect\"\n)\n\n\/\/ Eval parses, compiles and runs given input.\nfunc Eval(input string, env interface{}) (interface{}, error) {\n\ttree, err := parser.Parse(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprogram, err := compiler.Compile(tree, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toutput, err := vm.Run(program, env)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn output, nil\n}\n\n\/\/ Env specifies expected input of env for type checks.\n\/\/ If struct is passed, all fields will be treated as variables,\n\/\/ as well as all fields of embedded structs and struct itself.\n\/\/ If map is passed, all items will be treated as variables.\n\/\/ Methods defined on this type will be available as functions.\nfunc Env(i interface{}) conf.Option {\n\treturn func(c *conf.Config) {\n\t\tif _, ok := i.(map[string]interface{}); ok {\n\t\t\tc.MapEnv = true\n\t\t}\n\t\tc.Types = conf.CreateTypesTable(i)\n\t}\n}\n\nfunc Operator(operator string, fns ...string) conf.Option {\n\treturn func(c *conf.Config) {\n\t\tc.Operators[operator] = append(c.Operators[operator], fns...)\n\t}\n}\n\n\/\/ AsBool tells the compiler to expect boolean result.\nfunc AsBool() conf.Option {\n\treturn func(c *conf.Config) {\n\t\tc.Expect = reflect.Bool\n\t}\n}\n\n\/\/ AsInt64 tells the compiler to expect int64 result.\nfunc AsInt64() conf.Option {\n\treturn func(c *conf.Config) {\n\t\tc.Expect = reflect.Int64\n\t}\n}\n\n\/\/ AsFloat64 tells the compiler to expect float64 result.\nfunc AsFloat64() conf.Option {\n\treturn func(c *conf.Config) {\n\t\tc.Expect = reflect.Float64\n\t}\n}\n\n\/\/ Optimize turns optimizations on or off.\nfunc Optimize(b bool) conf.Option {\n\treturn func(c *conf.Config) {\n\t\tc.Optimize = b\n\t}\n}\n\n\/\/ Compile parses and compiles given input expression to bytecode program.\nfunc Compile(input string, ops ...conf.Option) (*vm.Program, error) {\n\tconfig := &conf.Config{\n\t\tOperators: make(map[string][]string),\n\t\tOptimize: true,\n\t}\n\n\tfor _, op := range ops {\n\t\top(config)\n\t}\n\n\tif err := config.Check(); err != nil {\n\t\treturn nil, err\n\t}\n\n\ttree, err := parser.Parse(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif config.Types != nil {\n\t\t_, err = checker.Check(tree, config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tchecker.PatchOperators(tree, config)\n\t}\n\n\tif config.Optimize {\n\t\toptimizer.Optimize(&tree.Node)\n\t}\n\n\tprogram, err := compiler.Compile(tree, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn program, nil\n}\n\n\/\/ Run evaluates given bytecode program.\nfunc Run(program *vm.Program, env interface{}) (interface{}, error) {\n\treturn vm.Run(program, env)\n}\n<commit_msg>Add Operator comment<commit_after>package expr\n\nimport (\n\t\"github.com\/antonmedv\/expr\/checker\"\n\t\"github.com\/antonmedv\/expr\/compiler\"\n\t\"github.com\/antonmedv\/expr\/internal\/conf\"\n\t\"github.com\/antonmedv\/expr\/optimizer\"\n\t\"github.com\/antonmedv\/expr\/parser\"\n\t\"github.com\/antonmedv\/expr\/vm\"\n\t\"reflect\"\n)\n\n\/\/ Eval parses, compiles and runs given input.\nfunc Eval(input string, env interface{}) (interface{}, error) {\n\ttree, err := parser.Parse(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprogram, err := compiler.Compile(tree, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toutput, err := vm.Run(program, env)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn output, nil\n}\n\n\/\/ Env specifies expected input of env for type checks.\n\/\/ If struct is passed, all fields will be treated as variables,\n\/\/ as well as all fields of embedded structs and struct itself.\n\/\/ If map is passed, all items will be treated as variables.\n\/\/ Methods defined on this type will be available as functions.\nfunc Env(i interface{}) conf.Option {\n\treturn func(c *conf.Config) {\n\t\tif _, ok := i.(map[string]interface{}); ok {\n\t\t\tc.MapEnv = true\n\t\t}\n\t\tc.Types = conf.CreateTypesTable(i)\n\t}\n}\n\n\/\/ Operator allows to override binary operator with function.\nfunc Operator(operator string, fn ...string) conf.Option {\n\treturn func(c *conf.Config) {\n\t\tc.Operators[operator] = append(c.Operators[operator], fn...)\n\t}\n}\n\n\/\/ AsBool tells the compiler to expect boolean result.\nfunc AsBool() conf.Option {\n\treturn func(c *conf.Config) {\n\t\tc.Expect = reflect.Bool\n\t}\n}\n\n\/\/ AsInt64 tells the compiler to expect int64 result.\nfunc AsInt64() conf.Option {\n\treturn func(c *conf.Config) {\n\t\tc.Expect = reflect.Int64\n\t}\n}\n\n\/\/ AsFloat64 tells the compiler to expect float64 result.\nfunc AsFloat64() conf.Option {\n\treturn func(c *conf.Config) {\n\t\tc.Expect = reflect.Float64\n\t}\n}\n\n\/\/ Optimize turns optimizations on or off.\nfunc Optimize(b bool) conf.Option {\n\treturn func(c *conf.Config) {\n\t\tc.Optimize = b\n\t}\n}\n\n\/\/ Compile parses and compiles given input expression to bytecode program.\nfunc Compile(input string, ops ...conf.Option) (*vm.Program, error) {\n\tconfig := &conf.Config{\n\t\tOperators: make(map[string][]string),\n\t\tOptimize: true,\n\t}\n\n\tfor _, op := range ops {\n\t\top(config)\n\t}\n\n\tif err := config.Check(); err != nil {\n\t\treturn nil, err\n\t}\n\n\ttree, err := parser.Parse(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif config.Types != nil {\n\t\t_, err = checker.Check(tree, config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tchecker.PatchOperators(tree, config)\n\t}\n\n\tif config.Optimize {\n\t\toptimizer.Optimize(&tree.Node)\n\t}\n\n\tprogram, err := compiler.Compile(tree, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn program, nil\n}\n\n\/\/ Run evaluates given bytecode program.\nfunc Run(program *vm.Program, env interface{}) (interface{}, error) {\n\treturn vm.Run(program, env)\n}\n<|endoftext|>"} {"text":"<commit_before>package cli\n\nimport (\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/github.com\/flynn\/go-docopt\"\n\ttuf \"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/github.com\/flynn\/go-tuf\/client\"\n\t\"github.com\/flynn\/flynn\/pinkerton\"\n\t\"github.com\/flynn\/flynn\/pkg\/tufutil\"\n)\n\nfunc init() {\n\tRegister(\"download\", runDownload, `\nusage: flynn-host download [--driver=<name>] [--root=<path>] [--repository=<uri>] [--tuf-db=<path>] [--config-dir=<dir>] [--bin-dir=<dir>]\n\nOptions:\n -d --driver=<name> image storage driver [default: aufs]\n -r --root=<path> image storage root [default: \/var\/lib\/docker]\n -u --repository=<uri> TUF repository URI [default: https:\/\/dl.flynn.io\/tuf]\n -t --tuf-db=<path> local TUF file [default: \/etc\/flynn\/tuf.db]\n -c --config-dir=<dir> config directory [default: \/etc\/flynn]\n -b --bin-dir=<dir> binary directory [default: \/usr\/local\/bin]\n\nDownload container images and Flynn binaries from a TUF repository`)\n}\n\nfunc runDownload(args *docopt.Args) error {\n\tif err := os.MkdirAll(args.String[\"--root\"], 0755); err != nil {\n\t\treturn fmt.Errorf(\"error creating root dir: %s\", err)\n\t}\n\n\t\/\/ create a TUF client, initialize it if the DB doesn't exist and update it\n\ttufDB := args.String[\"--tuf-db\"]\n\tneedsInit := false\n\tif _, err := os.Stat(tufDB); os.IsNotExist(err) {\n\t\tneedsInit = true\n\t}\n\tlocal, err := tuf.FileLocalStore(tufDB)\n\tif err != nil {\n\t\treturn err\n\t}\n\tremote, err := tuf.HTTPRemoteStore(args.String[\"--repository\"], nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tclient := tuf.NewClient(local, remote)\n\tif needsInit {\n\t\tif err := client.Init(rootKeys, len(rootKeys)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif _, err := client.Update(); err != nil && !tuf.IsLatestSnapshot(err) {\n\t\treturn err\n\t}\n\n\t\/\/ pull images from the TUF repo\n\tif err := pinkerton.PullImagesWithClient(\n\t\tclient,\n\t\targs.String[\"--repository\"],\n\t\targs.String[\"--driver\"],\n\t\targs.String[\"--root\"],\n\t\tpinkerton.InfoPrinter(false),\n\t); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ download the upstart config and image manifests\n\tif err := os.MkdirAll(args.String[\"--config-dir\"], 0755); err != nil {\n\t\treturn fmt.Errorf(\"error creating config dir: %s\", err)\n\t}\n\tfor _, path := range []string{\"\/upstart.conf\", \"\/host-manifest.json\", \"\/bootstrap-manifest.json\"} {\n\t\tif _, err := downloadGzippedFile(client, path, args.String[\"--config-dir\"]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ download the init and cli binaries\n\tif err := os.MkdirAll(args.String[\"--bin-dir\"], 0755); err != nil {\n\t\treturn fmt.Errorf(\"error creating bin dir: %s\", err)\n\t}\n\tfor _, path := range []string{\"\/flynn-linux-amd64\", \"\/flynn-init\"} {\n\t\tdst, err := downloadGzippedFile(client, path, args.String[\"--bin-dir\"])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := os.Chmod(dst, 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc downloadGzippedFile(client *tuf.Client, path, dir string) (string, error) {\n\tfile, err := tufutil.Download(client, path+\".gz\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer file.Close()\n\tdst := filepath.Join(dir, path)\n\tout, err := os.Create(dst)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tgz, err := gzip.NewReader(file)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer gz.Close()\n\t_, err = io.Copy(out, gz)\n\treturn dst, err\n}\n<commit_msg>host: Fix initializing the TUF client<commit_after>package cli\n\nimport (\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/github.com\/flynn\/go-docopt\"\n\ttuf \"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/github.com\/flynn\/go-tuf\/client\"\n\t\"github.com\/flynn\/flynn\/pinkerton\"\n\t\"github.com\/flynn\/flynn\/pkg\/tufutil\"\n)\n\nfunc init() {\n\tRegister(\"download\", runDownload, `\nusage: flynn-host download [--driver=<name>] [--root=<path>] [--repository=<uri>] [--tuf-db=<path>] [--config-dir=<dir>] [--bin-dir=<dir>]\n\nOptions:\n -d --driver=<name> image storage driver [default: aufs]\n -r --root=<path> image storage root [default: \/var\/lib\/docker]\n -u --repository=<uri> TUF repository URI [default: https:\/\/dl.flynn.io\/tuf]\n -t --tuf-db=<path> local TUF file [default: \/etc\/flynn\/tuf.db]\n -c --config-dir=<dir> config directory [default: \/etc\/flynn]\n -b --bin-dir=<dir> binary directory [default: \/usr\/local\/bin]\n\nDownload container images and Flynn binaries from a TUF repository`)\n}\n\nfunc runDownload(args *docopt.Args) error {\n\tif err := os.MkdirAll(args.String[\"--root\"], 0755); err != nil {\n\t\treturn fmt.Errorf(\"error creating root dir: %s\", err)\n\t}\n\n\t\/\/ create a TUF client and update it\n\ttufDB := args.String[\"--tuf-db\"]\n\tlocal, err := tuf.FileLocalStore(tufDB)\n\tif err != nil {\n\t\treturn err\n\t}\n\tremote, err := tuf.HTTPRemoteStore(args.String[\"--repository\"], nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tclient := tuf.NewClient(local, remote)\n\tif err := updateTUFClient(client); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ pull images from the TUF repo\n\tif err := pinkerton.PullImagesWithClient(\n\t\tclient,\n\t\targs.String[\"--repository\"],\n\t\targs.String[\"--driver\"],\n\t\targs.String[\"--root\"],\n\t\tpinkerton.InfoPrinter(false),\n\t); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ download the upstart config and image manifests\n\tif err := os.MkdirAll(args.String[\"--config-dir\"], 0755); err != nil {\n\t\treturn fmt.Errorf(\"error creating config dir: %s\", err)\n\t}\n\tfor _, path := range []string{\"\/upstart.conf\", \"\/host-manifest.json\", \"\/bootstrap-manifest.json\"} {\n\t\tif _, err := downloadGzippedFile(client, path, args.String[\"--config-dir\"]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ download the init and cli binaries\n\tif err := os.MkdirAll(args.String[\"--bin-dir\"], 0755); err != nil {\n\t\treturn fmt.Errorf(\"error creating bin dir: %s\", err)\n\t}\n\tfor _, path := range []string{\"\/flynn-linux-amd64\", \"\/flynn-init\"} {\n\t\tdst, err := downloadGzippedFile(client, path, args.String[\"--bin-dir\"])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := os.Chmod(dst, 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc downloadGzippedFile(client *tuf.Client, path, dir string) (string, error) {\n\tfile, err := tufutil.Download(client, path+\".gz\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer file.Close()\n\tdst := filepath.Join(dir, path)\n\tout, err := os.Create(dst)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tgz, err := gzip.NewReader(file)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer gz.Close()\n\t_, err = io.Copy(out, gz)\n\treturn dst, err\n}\n\n\/\/ updateTUFClient updates the given client, initializing and re-running the\n\/\/ update if ErrNoRootKeys is returned.\nfunc updateTUFClient(client *tuf.Client) error {\n\t_, err := client.Update()\n\tif err == nil || tuf.IsLatestSnapshot(err) {\n\t\treturn nil\n\t}\n\tif err == tuf.ErrNoRootKeys {\n\t\tif err := client.Init(rootKeys, len(rootKeys)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn updateTUFClient(client)\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package apiserver\n\nimport (\n\t\"encoding\/json\"\n\t\"net\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Dataman-Cloud\/swan\/src\/manager\/apiserver\/metrics\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/emicklei\/go-restful\"\n\t\"github.com\/emicklei\/go-restful\/swagger\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\/promhttp\"\n)\n\nconst (\n\tAPI_VERSION = \"v_beta\"\n)\n\n\/\/ RootPaths lists the paths available at root.\n\/\/ For example: \"\/healthz\", \"\/apis\".\ntype RootPaths struct {\n\t\/\/ paths are the paths available at root.\n\tPaths []string\n}\n\ntype ApiRegister interface {\n\tRegister(*restful.Container)\n}\n\ntype ApiServer struct {\n\taddr string\n\tsock string\n\tapiRegisters []ApiRegister\n}\n\nfunc init() {\n\tmetrics.Register()\n}\n\nfunc NewApiServer(addr, sock string) *ApiServer {\n\treturn &ApiServer{\n\t\taddr: addr,\n\t\tsock: sock,\n\t}\n}\n\nfunc Install(apiServer *ApiServer, apiRegister ApiRegister) {\n\tapiServer.apiRegisters = append(apiServer.apiRegisters, apiRegister)\n}\n\nfunc (apiServer *ApiServer) Start() error {\n\twsContainer := restful.NewContainer()\n\n\t\/\/ Register webservices here\n\tfor _, ws := range apiServer.apiRegisters {\n\t\tws.Register(wsContainer)\n\t}\n\n\t\/\/ Add container filter to enable CORS\n\tcors := restful.CrossOriginResourceSharing{\n\t\tAllowedMethods: []string{\"GET\", \"POST\", \"PUT\", \"PATCH\"},\n\t\tCookiesAllowed: false,\n\t\tContainer: wsContainer}\n\twsContainer.Filter(cors.Filter)\n\n\t\/\/ Add log filter\n\twsContainer.Filter(NCSACommonLogFormatLogger())\n\n\t\/\/ Add prometheus metrics\n\twsContainer.Handle(\"\/metrics\", promhttp.Handler())\n\n\t\/\/ Optionally, you can install the Swagger Service which provides a nice Web UI on your REST API\n\t\/\/ You need to download the Swagger HTML5 assets and change the FilePath location in the config below.\n\t\/\/ Open http:\/\/localhost:8080\/apidocs and enter http:\/\/localhost:8080\/apidocs.json in the api input field.\n\t\/\/ TODO(xychu): add a config flag for swagger UI, and also for the swagger UI file path.\n\tswggerUiPath, _ := filepath.Abs(\".\/third_party\/swagger-ui-2.2.8\")\n\tlogrus.Debugf(\"xychu: swaggerUIPath: %s\", swggerUiPath)\n\tconfig := swagger.Config{\n\t\tWebServices: wsContainer.RegisteredWebServices(), \/\/ you control what services are visible\n\t\t\/\/ WebServicesUrl: \"\",\n\t\tApiVersion: API_VERSION,\n\t\tApiPath: \"\/apidocs.json\",\n\n\t\t\/\/ Optionally, specifiy where the UI is located\n\t\tSwaggerPath: \"\/apidocs\/\",\n\t\tSwaggerFilePath: swggerUiPath,\n\t}\n\tswagger.RegisterSwaggerService(config, wsContainer)\n\n\t\/\/ Add API index handler\n\twsContainer.ServeMux.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tstatus := http.StatusOK\n\t\tif r.URL.Path != \"\/\" && r.URL.Path != \"\/index.html\" {\n\t\t\t\/\/ Since \"\/\" matches all paths, handleIndex is called for all paths for which there is no handler registered.\n\t\t\t\/\/ We want to return a 404 status with a list of all valid paths, incase of an invalid URL request.\n\t\t\tstatus = http.StatusNotFound\n\t\t\tw.WriteHeader(status)\n\t\t\treturn\n\t\t}\n\t\tvar handledPaths []string\n\t\t\/\/ Extract the paths handled using restful.WebService\n\t\tfor _, ws := range wsContainer.RegisteredWebServices() {\n\t\t\thandledPaths = append(handledPaths, ws.RootPath())\n\t\t}\n\t\t\/\/ Extract the paths handled using mux handler.\n\t\thandledPaths = append(handledPaths, \"\/metrics\", \"\/apidocs\/\")\n\t\tsort.Strings(handledPaths)\n\n\t\toutput, err := json.MarshalIndent(RootPaths{Paths: handledPaths}, \"\", \" \")\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tw.WriteHeader(status)\n\t\tw.Write(output)\n\t})\n\n\tgo func() {\n\t\tsrv := &http.Server{\n\t\t\tAddr: apiServer.sock,\n\t\t\tHandler: wsContainer,\n\t\t}\n\t\tln, err := net.Listen(\"unix\", apiServer.sock)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"can't listen on socket %s:%s\", apiServer.sock, err.Error())\n\t\t}\n\t\tlogrus.Printf(\"start listening on %s\", apiServer.sock)\n\t\tsrv.Serve(ln)\n\t}()\n\n\tlogrus.Printf(\"start listening on %s\", apiServer.addr)\n\tserver := &http.Server{Addr: apiServer.addr, Handler: wsContainer}\n\tlogrus.Fatal(server.ListenAndServe())\n\n\treturn nil\n}\n\nfunc NCSACommonLogFormatLogger() restful.FilterFunction {\n\treturn func(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {\n\t\tvar username = \"-\"\n\t\tif req.Request.URL.User != nil {\n\t\t\tif name := req.Request.URL.User.Username(); name != \"\" {\n\t\t\t\tusername = name\n\t\t\t}\n\t\t}\n\t\tchain.ProcessFilter(req, resp)\n\t\tlogrus.Printf(\"%s - %s [%s] \\\"%s %s %s\\\" %d %d\",\n\t\t\tstrings.Split(req.Request.RemoteAddr, \":\")[0],\n\t\t\tusername,\n\t\t\ttime.Now().Format(\"02\/Jan\/2006:15:04:05 -0700\"),\n\t\t\treq.Request.Method,\n\t\t\treq.Request.URL.RequestURI(),\n\t\t\treq.Request.Proto,\n\t\t\tresp.StatusCode(),\n\t\t\tresp.ContentLength(),\n\t\t)\n\t}\n}\n<commit_msg>remove sock file before listen sock file<commit_after>package apiserver\n\nimport (\n\t\"encoding\/json\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Dataman-Cloud\/swan\/src\/manager\/apiserver\/metrics\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/emicklei\/go-restful\"\n\t\"github.com\/emicklei\/go-restful\/swagger\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\/promhttp\"\n)\n\nconst (\n\tAPI_VERSION = \"v_beta\"\n)\n\n\/\/ RootPaths lists the paths available at root.\n\/\/ For example: \"\/healthz\", \"\/apis\".\ntype RootPaths struct {\n\t\/\/ paths are the paths available at root.\n\tPaths []string\n}\n\ntype ApiRegister interface {\n\tRegister(*restful.Container)\n}\n\ntype ApiServer struct {\n\taddr string\n\tsock string\n\tapiRegisters []ApiRegister\n}\n\nfunc init() {\n\tmetrics.Register()\n}\n\nfunc NewApiServer(addr, sock string) *ApiServer {\n\treturn &ApiServer{\n\t\taddr: addr,\n\t\tsock: sock,\n\t}\n}\n\nfunc Install(apiServer *ApiServer, apiRegister ApiRegister) {\n\tapiServer.apiRegisters = append(apiServer.apiRegisters, apiRegister)\n}\n\nfunc (apiServer *ApiServer) Start() error {\n\twsContainer := restful.NewContainer()\n\n\t\/\/ Register webservices here\n\tfor _, ws := range apiServer.apiRegisters {\n\t\tws.Register(wsContainer)\n\t}\n\n\t\/\/ Add container filter to enable CORS\n\tcors := restful.CrossOriginResourceSharing{\n\t\tAllowedMethods: []string{\"GET\", \"POST\", \"PUT\", \"PATCH\"},\n\t\tCookiesAllowed: false,\n\t\tContainer: wsContainer}\n\twsContainer.Filter(cors.Filter)\n\n\t\/\/ Add log filter\n\twsContainer.Filter(NCSACommonLogFormatLogger())\n\n\t\/\/ Add prometheus metrics\n\twsContainer.Handle(\"\/metrics\", promhttp.Handler())\n\n\t\/\/ Optionally, you can install the Swagger Service which provides a nice Web UI on your REST API\n\t\/\/ You need to download the Swagger HTML5 assets and change the FilePath location in the config below.\n\t\/\/ Open http:\/\/localhost:8080\/apidocs and enter http:\/\/localhost:8080\/apidocs.json in the api input field.\n\t\/\/ TODO(xychu): add a config flag for swagger UI, and also for the swagger UI file path.\n\tswggerUiPath, _ := filepath.Abs(\".\/third_party\/swagger-ui-2.2.8\")\n\tlogrus.Debugf(\"xychu: swaggerUIPath: %s\", swggerUiPath)\n\tconfig := swagger.Config{\n\t\tWebServices: wsContainer.RegisteredWebServices(), \/\/ you control what services are visible\n\t\t\/\/ WebServicesUrl: \"\",\n\t\tApiVersion: API_VERSION,\n\t\tApiPath: \"\/apidocs.json\",\n\n\t\t\/\/ Optionally, specifiy where the UI is located\n\t\tSwaggerPath: \"\/apidocs\/\",\n\t\tSwaggerFilePath: swggerUiPath,\n\t}\n\tswagger.RegisterSwaggerService(config, wsContainer)\n\n\t\/\/ Add API index handler\n\twsContainer.ServeMux.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tstatus := http.StatusOK\n\t\tif r.URL.Path != \"\/\" && r.URL.Path != \"\/index.html\" {\n\t\t\t\/\/ Since \"\/\" matches all paths, handleIndex is called for all paths for which there is no handler registered.\n\t\t\t\/\/ We want to return a 404 status with a list of all valid paths, incase of an invalid URL request.\n\t\t\tstatus = http.StatusNotFound\n\t\t\tw.WriteHeader(status)\n\t\t\treturn\n\t\t}\n\t\tvar handledPaths []string\n\t\t\/\/ Extract the paths handled using restful.WebService\n\t\tfor _, ws := range wsContainer.RegisteredWebServices() {\n\t\t\thandledPaths = append(handledPaths, ws.RootPath())\n\t\t}\n\t\t\/\/ Extract the paths handled using mux handler.\n\t\thandledPaths = append(handledPaths, \"\/metrics\", \"\/apidocs\/\")\n\t\tsort.Strings(handledPaths)\n\n\t\toutput, err := json.MarshalIndent(RootPaths{Paths: handledPaths}, \"\", \" \")\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tw.WriteHeader(status)\n\t\tw.Write(output)\n\t})\n\n\tgo func() {\n\t\tif _, err := os.Stat(apiServer.sock); err == nil {\n\t\t\t\/\/ remove the sock file if it's already existed\n\t\t\tif err := os.Remove(apiServer.sock); err != nil {\n\t\t\t\tlogrus.Errorf(\"can't listen on socket %s remove existed file failed. Error: %s\", apiServer.sock, err.Error())\n\t\t\t}\n\t\t}\n\t\tsrv := &http.Server{\n\t\t\tAddr: apiServer.sock,\n\t\t\tHandler: wsContainer,\n\t\t}\n\t\tln, err := net.Listen(\"unix\", apiServer.sock)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"can't listen on socket %s:%s\", apiServer.sock, err.Error())\n\t\t}\n\t\tlogrus.Printf(\"start listening on %s\", apiServer.sock)\n\t\tsrv.Serve(ln)\n\t}()\n\n\tlogrus.Printf(\"start listening on %s\", apiServer.addr)\n\tserver := &http.Server{Addr: apiServer.addr, Handler: wsContainer}\n\tlogrus.Fatal(server.ListenAndServe())\n\n\treturn nil\n}\n\nfunc NCSACommonLogFormatLogger() restful.FilterFunction {\n\treturn func(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {\n\t\tvar username = \"-\"\n\t\tif req.Request.URL.User != nil {\n\t\t\tif name := req.Request.URL.User.Username(); name != \"\" {\n\t\t\t\tusername = name\n\t\t\t}\n\t\t}\n\t\tchain.ProcessFilter(req, resp)\n\t\tlogrus.Printf(\"%s - %s [%s] \\\"%s %s %s\\\" %d %d\",\n\t\t\tstrings.Split(req.Request.RemoteAddr, \":\")[0],\n\t\t\tusername,\n\t\t\ttime.Now().Format(\"02\/Jan\/2006:15:04:05 -0700\"),\n\t\t\treq.Request.Method,\n\t\t\treq.Request.URL.RequestURI(),\n\t\t\treq.Request.Proto,\n\t\t\tresp.StatusCode(),\n\t\t\tresp.ContentLength(),\n\t\t)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage metrics\n\nimport (\n\t\"net\/http\"\n\t\"strconv\"\n\t\"sync\"\n\n\tprom \"contrib.go.opencensus.io\/exporter\/prometheus\"\n\t\"go.opencensus.io\/resource\"\n\t\"go.opencensus.io\/stats\/view\"\n\t\"go.uber.org\/zap\"\n)\n\nvar (\n\tcurPromSrv *http.Server\n\tcurPromSrvMux sync.Mutex\n)\n\ntype emptyPromExporter struct{}\n\nvar _ view.Exporter = (*emptyPromExporter)(nil)\n\nfunc (emptyPromExporter) ExportView(viewData *view.Data) {\n\t\/\/ Prometheus runs a loop to read stats via ReadAndExport, so this is just\n\t\/\/ a signal to enrich the internal Meters with Resource information.\n}\n\nfunc newPrometheusExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, ResourceExporterFactory, error) {\n\te, err := prom.NewExporter(prom.Options{Namespace: config.component})\n\tif err != nil {\n\t\tlogger.Errorw(\"Failed to create the Prometheus exporter.\", zap.Error(err))\n\t\treturn nil, nil, err\n\t}\n\tlogger.Infof(\"Created Opencensus Prometheus exporter with config: %v. Start the server for Prometheus exporter.\", config)\n\t\/\/ Start the server for Prometheus scraping\n\tgo func() {\n\t\tsrv := startNewPromSrv(e, config.prometheusHost, config.prometheusPort)\n\t\tsrv.ListenAndServe()\n\t}()\n\treturn e,\n\t\tfunc(r *resource.Resource) (view.Exporter, error) { return &emptyPromExporter{}, nil },\n\t\tnil\n}\n\nfunc getCurPromSrv() *http.Server {\n\tcurPromSrvMux.Lock()\n\tdefer curPromSrvMux.Unlock()\n\treturn curPromSrv\n}\n\nfunc resetCurPromSrv() {\n\tcurPromSrvMux.Lock()\n\tdefer curPromSrvMux.Unlock()\n\tif curPromSrv != nil {\n\t\tcurPromSrv.Close()\n\t\tcurPromSrv = nil\n\t}\n}\n\nfunc startNewPromSrv(e *prom.Exporter, host string, port int) *http.Server {\n\tsm := http.NewServeMux()\n\tsm.Handle(\"\/metrics\", e)\n\tcurPromSrvMux.Lock()\n\tdefer curPromSrvMux.Unlock()\n\tif curPromSrv != nil {\n\t\tcurPromSrv.Close()\n\t}\n\tcurPromSrv = &http.Server{\n\t\tAddr: host + \":\" + strconv.Itoa(port),\n\t\tHandler: sm,\n\t}\n\treturn curPromSrv\n}\n<commit_msg>Add nolint to false positive being flagged (#1897)<commit_after>\/*\nCopyright 2019 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage metrics\n\nimport (\n\t\"net\/http\"\n\t\"strconv\"\n\t\"sync\"\n\n\tprom \"contrib.go.opencensus.io\/exporter\/prometheus\"\n\t\"go.opencensus.io\/resource\"\n\t\"go.opencensus.io\/stats\/view\"\n\t\"go.uber.org\/zap\"\n)\n\nvar (\n\tcurPromSrv *http.Server\n\tcurPromSrvMux sync.Mutex\n)\n\ntype emptyPromExporter struct{}\n\nvar _ view.Exporter = (*emptyPromExporter)(nil)\n\nfunc (emptyPromExporter) ExportView(viewData *view.Data) {\n\t\/\/ Prometheus runs a loop to read stats via ReadAndExport, so this is just\n\t\/\/ a signal to enrich the internal Meters with Resource information.\n}\n\n\/\/nolint: unparam \/\/ False positive of flagging the second result of this function unused.\nfunc newPrometheusExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, ResourceExporterFactory, error) {\n\te, err := prom.NewExporter(prom.Options{Namespace: config.component})\n\tif err != nil {\n\t\tlogger.Errorw(\"Failed to create the Prometheus exporter.\", zap.Error(err))\n\t\treturn nil, nil, err\n\t}\n\tlogger.Infof(\"Created Opencensus Prometheus exporter with config: %v. Start the server for Prometheus exporter.\", config)\n\t\/\/ Start the server for Prometheus scraping\n\tgo func() {\n\t\tsrv := startNewPromSrv(e, config.prometheusHost, config.prometheusPort)\n\t\tsrv.ListenAndServe()\n\t}()\n\treturn e,\n\t\tfunc(r *resource.Resource) (view.Exporter, error) { return &emptyPromExporter{}, nil },\n\t\tnil\n}\n\nfunc getCurPromSrv() *http.Server {\n\tcurPromSrvMux.Lock()\n\tdefer curPromSrvMux.Unlock()\n\treturn curPromSrv\n}\n\nfunc resetCurPromSrv() {\n\tcurPromSrvMux.Lock()\n\tdefer curPromSrvMux.Unlock()\n\tif curPromSrv != nil {\n\t\tcurPromSrv.Close()\n\t\tcurPromSrv = nil\n\t}\n}\n\nfunc startNewPromSrv(e *prom.Exporter, host string, port int) *http.Server {\n\tsm := http.NewServeMux()\n\tsm.Handle(\"\/metrics\", e)\n\tcurPromSrvMux.Lock()\n\tdefer curPromSrvMux.Unlock()\n\tif curPromSrv != nil {\n\t\tcurPromSrv.Close()\n\t}\n\tcurPromSrv = &http.Server{\n\t\tAddr: host + \":\" + strconv.Itoa(port),\n\t\tHandler: sm,\n\t}\n\treturn curPromSrv\n}\n<|endoftext|>"} {"text":"<commit_before>package publishers\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/fweikert\/continuous-integration\/metrics\/metrics\"\n\n\t\"github.com\/fweikert\/continuous-integration\/metrics\/data\"\n\t_ \"github.com\/go-sql-driver\/mysql\"\n)\n\nconst insertTemplate = \"INSERT INTO %s (%s) VALUES (%s)\"\n\ntype CloudSql struct {\n\tconn *sql.DB\n\tstatements map[string]*sql.Stmt\n}\n\nfunc (c *CloudSql) Name() string {\n\treturn \"Cloud SQL\"\n}\n\nfunc (c *CloudSql) RegisterMetric(metric metrics.Metric) error {\n\terr := c.ensureTableExists(metric)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.prepareInsertStatement(metric)\n}\n\nfunc (c *CloudSql) ensureTableExists(metric metrics.Metric) error {\n\t\/\/ TODO(fweikert): implement\n\treturn nil\n}\n\nfunc (c *CloudSql) prepareInsertStatement(metric metrics.Metric) error {\n\tname := metric.Name()\n\tif _, ok := c.statements[name]; ok {\n\t\treturn fmt.Errorf(\"Metrics %s has already been registered for publisher %s.\", name, c.Name())\n\t}\n\n\tcolumnNames := metrics.GetColumnNames(metric.Columns())\n\tplaceholder := strings.TrimRight(strings.Repeat(\"?, \", len(columnNames)), \", \")\n\tinsert := fmt.Sprintf(insertTemplate, name, strings.Join(columnNames, \", \"), placeholder)\n\n\tnonKeyColumnNames := make([]string, 0)\n\tfor _, c := range metric.Columns() {\n\t\tif !c.IsKey {\n\t\t\tnonKeyColumnNames = append(nonKeyColumnNames, c.Name)\n\t\t}\n\t}\n\tif len(nonKeyColumnNames) != len(columnNames) {\n\t\tupdates := make([]string, len(nonKeyColumnNames))\n\t\tfor i, c := range nonKeyColumnNames {\n\t\t\tupdates[i] = fmt.Sprintf(\"%s=VALUES(%s)\", c, c)\n\t\t}\n\t\tinsert = fmt.Sprintf(\"%s ON DUPLICATE KEY UPDATE %s\", insert, strings.Join(updates, \", \"))\n\t}\n\n\tstmt, err := c.conn.Prepare(insert)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to prepare insert statement for metric %s: %v\", name, err)\n\t}\n\tc.statements[name] = stmt\n\treturn nil\n}\n\nfunc (c *CloudSql) Publish(metricName string, newData *data.DataSet) error {\n\tstmt := c.statements[metricName]\n\tif stmt == nil {\n\t\treturn fmt.Errorf(\"Could not find prepared insert statement for metric %s. Have you called RegisterMetric() first?\", metricName)\n\t}\n\n\tfor _, row := range newData.Data {\n\t\t_, err := stmt.Exec(row...)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Could not insert new data for metric %s: %v\", metricName, err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc CreateCloudSqlPublisher(user, password, instance, database string, localPort int) (*CloudSql, error) {\n\tconn, err := sql.Open(\"mysql\", getConnectionString(user, password, instance, database, localPort))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not establish connection to database: %v\", err)\n\t}\n\tif err := conn.Ping(); err != nil {\n\t\tconn.Close()\n\t\treturn nil, fmt.Errorf(\"Connection to database is bad: %v\", err)\n\t}\n\treturn &CloudSql{\n\t\tconn: conn,\n\t\tstatements: make(map[string]*sql.Stmt),\n\t}, nil\n}\n\nfunc getConnectionString(user, password, instance, database string, localPort int) string {\n\tcred := fmt.Sprintf(\"%s:%s@\", user, password)\n\tif os.Getenv(\"GAE_INSTANCE\") != \"\" {\n\t\t\/\/ Running in production.\n\t\treturn fmt.Sprintf(\"%sunix(\/cloudsql\/%s)\/%s\", cred, instance, database)\n\t}\n\treturn fmt.Sprintf(\"%stcp([localhost]:%d)\/%s\", cred, localPort, database)\n}\n<commit_msg>Improve SQL logging.<commit_after>package publishers\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/fweikert\/continuous-integration\/metrics\/metrics\"\n\n\t\"github.com\/fweikert\/continuous-integration\/metrics\/data\"\n\t_ \"github.com\/go-sql-driver\/mysql\"\n)\n\nconst insertTemplate = \"INSERT INTO %s (%s) VALUES (%s)\"\n\ntype statement struct {\n\tprepared *sql.Stmt\n\ttext string\n}\n\ntype CloudSql struct {\n\tconn *sql.DB\n\tstatements map[string]*statement\n}\n\nfunc (c *CloudSql) Name() string {\n\treturn \"Cloud SQL\"\n}\n\nfunc (c *CloudSql) RegisterMetric(metric metrics.Metric) error {\n\terr := c.ensureTableExists(metric)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.prepareInsertStatement(metric)\n}\n\nfunc (c *CloudSql) ensureTableExists(metric metrics.Metric) error {\n\t\/\/ TODO(fweikert): implement\n\treturn nil\n}\n\nfunc (c *CloudSql) prepareInsertStatement(metric metrics.Metric) error {\n\tname := metric.Name()\n\tif _, ok := c.statements[name]; ok {\n\t\treturn fmt.Errorf(\"Metrics %s has already been registered for publisher %s.\", name, c.Name())\n\t}\n\n\tcolumnNames := metrics.GetColumnNames(metric.Columns())\n\tplaceholder := strings.TrimRight(strings.Repeat(\"?, \", len(columnNames)), \", \")\n\tinsert := fmt.Sprintf(insertTemplate, name, strings.Join(columnNames, \", \"), placeholder)\n\n\tnonKeyColumnNames := make([]string, 0)\n\tfor _, c := range metric.Columns() {\n\t\tif !c.IsKey {\n\t\t\tnonKeyColumnNames = append(nonKeyColumnNames, c.Name)\n\t\t}\n\t}\n\tif len(nonKeyColumnNames) != len(columnNames) {\n\t\tupdates := make([]string, len(nonKeyColumnNames))\n\t\tfor i, c := range nonKeyColumnNames {\n\t\t\tupdates[i] = fmt.Sprintf(\"%s=VALUES(%s)\", c, c)\n\t\t}\n\t\tinsert = fmt.Sprintf(\"%s ON DUPLICATE KEY UPDATE %s\", insert, strings.Join(updates, \", \"))\n\t}\n\n\tstmt, err := c.createStatement(name, insert)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.statements[name] = stmt\n\treturn nil\n}\n\nfunc (c *CloudSql) createStatement(metricName string, text string) (*statement, error) {\n\tstmt, err := c.conn.Prepare(text)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to prepare insert statement for metric %s: %v\\n\\tStatement: %s\", metricName, err, text)\n\t}\n\treturn &statement{prepared: stmt, text: text}, nil\n}\n\nfunc (c *CloudSql) Publish(metricName string, newData *data.DataSet) error {\n\tstmt := c.statements[metricName]\n\tif stmt == nil {\n\t\treturn fmt.Errorf(\"Could not find prepared insert statement for metric %s. Have you called RegisterMetric() first?\", metricName)\n\t}\n\n\tfor _, row := range newData.Data {\n\t\t_, err := stmt.prepared.Exec(row...)\n\t\tif err != nil {\n\t\t\tvalues := make([]string, len(row))\n\t\t\tfor i, v := range row {\n\t\t\t\tvalues[i] = fmt.Sprintf(\"%v\", v)\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"Could not insert new data for metric %s: %v\\n\\tStatement: %s\\n\\tValues: %s\", metricName, err, stmt.text, strings.Join(values, \", \"))\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc CreateCloudSqlPublisher(user, password, instance, database string, localPort int) (*CloudSql, error) {\n\tconn, err := sql.Open(\"mysql\", getConnectionString(user, password, instance, database, localPort))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not establish connection to database: %v\", err)\n\t}\n\tif err := conn.Ping(); err != nil {\n\t\tconn.Close()\n\t\treturn nil, fmt.Errorf(\"Connection to database is bad: %v\", err)\n\t}\n\treturn &CloudSql{\n\t\tconn: conn,\n\t\tstatements: make(map[string]*statement),\n\t}, nil\n}\n\nfunc getConnectionString(user, password, instance, database string, localPort int) string {\n\tcred := fmt.Sprintf(\"%s:%s@\", user, password)\n\tif os.Getenv(\"GAE_INSTANCE\") != \"\" {\n\t\t\/\/ Running in production.\n\t\treturn fmt.Sprintf(\"%sunix(\/cloudsql\/%s)\/%s\", cred, instance, database)\n\t}\n\treturn fmt.Sprintf(\"%stcp([localhost]:%d)\/%s\", cred, localPort, database)\n}\n<|endoftext|>"} {"text":"<commit_before>package markdown\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/mholt\/caddy\/middleware\"\n\t\"github.com\/russross\/blackfriday\"\n)\n\nconst (\n\tDefaultTemplate = \"defaultTemplate\"\n\tDefaultStaticDir = \"generated_site\"\n)\n\ntype MarkdownData struct {\n\tmiddleware.Context\n\tVar map[string]interface{}\n\tTitle string\n\tMarkdown string\n}\n\n\/\/ Process processes the contents of a page in b. It parses the metadata\n\/\/ (if any) and uses the template (if found).\nfunc (md Markdown) Process(c Config, requestPath string, b []byte, ctx middleware.Context) ([]byte, error) {\n\tvar metadata = Metadata{Variables: make(map[string]interface{})}\n\tvar markdown []byte\n\tvar err error\n\n\t\/\/ find parser compatible with page contents\n\tparser := findParser(b)\n\n\tif parser == nil {\n\t\t\/\/ if not found, assume whole file is markdown (no front matter)\n\t\tmarkdown = b\n\t} else {\n\t\t\/\/ if found, assume metadata present and parse.\n\t\tmarkdown, err = parser.Parse(b)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmetadata = parser.Metadata()\n\t}\n\n\t\/\/ if template is not specified, check if Default template is set\n\tif metadata.Template == \"\" {\n\t\tif _, ok := c.Templates[DefaultTemplate]; ok {\n\t\t\tmetadata.Template = DefaultTemplate\n\t\t}\n\t}\n\n\t\/\/ if template is set, load it\n\tvar tmpl []byte\n\tif metadata.Template != \"\" {\n\t\tif t, ok := c.Templates[metadata.Template]; ok {\n\t\t\ttmpl, err = ioutil.ReadFile(t)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ process markdown\n\tmarkdown = blackfriday.Markdown(markdown, c.Renderer, 0)\n\n\t\/\/ set it as body for template\n\tmetadata.Variables[\"markdown\"] = string(markdown)\n\n\treturn md.processTemplate(c, requestPath, tmpl, metadata, ctx)\n}\n\n\/\/ processTemplate processes a template given a requestPath,\n\/\/ template (tmpl) and metadata\nfunc (md Markdown) processTemplate(c Config, requestPath string, tmpl []byte, metadata Metadata, ctx middleware.Context) ([]byte, error) {\n\t\/\/ if template is not specified,\n\t\/\/ use the default template\n\tif tmpl == nil {\n\t\ttmpl = defaultTemplate(c, metadata, requestPath)\n\t}\n\n\t\/\/ process the template\n\tb := new(bytes.Buffer)\n\tt, err := template.New(\"\").Parse(string(tmpl))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmdData := MarkdownData{\n\t\tContext: ctx,\n\t\tVar: metadata.Variables,\n\t\tTitle: metadata.Title,\n\t\tMarkdown: metadata.Variables[\"markdown\"].(string),\n\t}\n\n\tif err = t.Execute(b, mdData); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ generate static page\n\tif err = md.generatePage(c, requestPath, b.Bytes()); err != nil {\n\t\t\/\/ if static page generation fails,\n\t\t\/\/ nothing fatal, only log the error.\n\t\t\/\/ TODO: Report this non-fatal error, but don't log it here\n\t\tlog.Println(err)\n\t\tfmt.Printf(\"Error: %v\", err)\n\t}\n\n\treturn b.Bytes(), nil\n\n}\n\n\/\/ generatePage generates a static html page from the markdown in content if c.StaticDir\n\/\/ is a non-empty value, meaning that the user enabled static site generation.\nfunc (md Markdown) generatePage(c Config, requestPath string, content []byte) error {\n\t\/\/ Only generate the page if static site generation is enabled\n\tif c.StaticDir != \"\" {\n\t\t\/\/ if static directory is not existing, create it\n\t\tif _, err := os.Stat(c.StaticDir); err != nil {\n\t\t\terr := os.MkdirAll(c.StaticDir, os.FileMode(0755))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfilePath := filepath.Join(c.StaticDir, requestPath)\n\n\t\t\/\/ If it is index file, use the directory instead\n\t\tif md.IsIndexFile(filepath.Base(requestPath)) {\n\t\t\tfilePath, _ = filepath.Split(filePath)\n\t\t}\n\n\t\t\/\/ Create the directory in case it is not existing\n\t\tif err := os.MkdirAll(filePath, os.FileMode(0744)); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ generate index.html file in the directory\n\t\tfilePath = filepath.Join(filePath, \"index.html\")\n\t\terr := ioutil.WriteFile(filePath, content, os.FileMode(0664))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc.StaticFiles[requestPath] = filePath\n\t}\n\n\treturn nil\n}\n\n\/\/ defaultTemplate constructs a default template.\nfunc defaultTemplate(c Config, metadata Metadata, requestPath string) []byte {\n\tvar scripts, styles bytes.Buffer\n\tfor _, style := range c.Styles {\n\t\tstyles.WriteString(strings.Replace(cssTemplate, \"{{url}}\", style, 1))\n\t\tstyles.WriteString(\"\\r\\n\")\n\t}\n\tfor _, script := range c.Scripts {\n\t\tscripts.WriteString(strings.Replace(jsTemplate, \"{{url}}\", script, 1))\n\t\tscripts.WriteString(\"\\r\\n\")\n\t}\n\n\t\/\/ Title is first line (length-limited), otherwise filename\n\ttitle := metadata.Title\n\tif title == \"\" {\n\t\ttitle = filepath.Base(requestPath)\n\t\tif body, _ := metadata.Variables[\"markdown\"].([]byte); len(body) > 128 {\n\t\t\ttitle = string(body[:128])\n\t\t} else if len(body) > 0 {\n\t\t\ttitle = string(body)\n\t\t}\n\t}\n\n\thtml := []byte(htmlTemplate)\n\thtml = bytes.Replace(html, []byte(\"{{title}}\"), []byte(title), 1)\n\thtml = bytes.Replace(html, []byte(\"{{css}}\"), styles.Bytes(), 1)\n\thtml = bytes.Replace(html, []byte(\"{{js}}\"), scripts.Bytes(), 1)\n\n\treturn html\n}\n\nconst (\n\thtmlTemplate = `<!DOCTYPE html>\n<html>\n\t<head>\n\t\t<title>{{title}}<\/title>\n\t\t<meta charset=\"utf-8\">\n\t\t{{css}}\n\t\t{{js}}\n\t<\/head>\n\t<body>\n\t\t{{.Markdown}}\n\t<\/body>\n<\/html>`\n\tcssTemplate = `<link rel=\"stylesheet\" href=\"{{url}}\">`\n\tjsTemplate = `<script src=\"{{url}}\"><\/script>`\n)\n<commit_msg>Remove undesired committed debug logs<commit_after>package markdown\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/mholt\/caddy\/middleware\"\n\t\"github.com\/russross\/blackfriday\"\n)\n\nconst (\n\tDefaultTemplate = \"defaultTemplate\"\n\tDefaultStaticDir = \"generated_site\"\n)\n\ntype MarkdownData struct {\n\tmiddleware.Context\n\tVar map[string]interface{}\n\tTitle string\n\tMarkdown string\n}\n\n\/\/ Process processes the contents of a page in b. It parses the metadata\n\/\/ (if any) and uses the template (if found).\nfunc (md Markdown) Process(c Config, requestPath string, b []byte, ctx middleware.Context) ([]byte, error) {\n\tvar metadata = Metadata{Variables: make(map[string]interface{})}\n\tvar markdown []byte\n\tvar err error\n\n\t\/\/ find parser compatible with page contents\n\tparser := findParser(b)\n\n\tif parser == nil {\n\t\t\/\/ if not found, assume whole file is markdown (no front matter)\n\t\tmarkdown = b\n\t} else {\n\t\t\/\/ if found, assume metadata present and parse.\n\t\tmarkdown, err = parser.Parse(b)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmetadata = parser.Metadata()\n\t}\n\n\t\/\/ if template is not specified, check if Default template is set\n\tif metadata.Template == \"\" {\n\t\tif _, ok := c.Templates[DefaultTemplate]; ok {\n\t\t\tmetadata.Template = DefaultTemplate\n\t\t}\n\t}\n\n\t\/\/ if template is set, load it\n\tvar tmpl []byte\n\tif metadata.Template != \"\" {\n\t\tif t, ok := c.Templates[metadata.Template]; ok {\n\t\t\ttmpl, err = ioutil.ReadFile(t)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ process markdown\n\tmarkdown = blackfriday.Markdown(markdown, c.Renderer, 0)\n\n\t\/\/ set it as body for template\n\tmetadata.Variables[\"markdown\"] = string(markdown)\n\n\treturn md.processTemplate(c, requestPath, tmpl, metadata, ctx)\n}\n\n\/\/ processTemplate processes a template given a requestPath,\n\/\/ template (tmpl) and metadata\nfunc (md Markdown) processTemplate(c Config, requestPath string, tmpl []byte, metadata Metadata, ctx middleware.Context) ([]byte, error) {\n\t\/\/ if template is not specified,\n\t\/\/ use the default template\n\tif tmpl == nil {\n\t\ttmpl = defaultTemplate(c, metadata, requestPath)\n\t}\n\n\t\/\/ process the template\n\tb := new(bytes.Buffer)\n\tt, err := template.New(\"\").Parse(string(tmpl))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmdData := MarkdownData{\n\t\tContext: ctx,\n\t\tVar: metadata.Variables,\n\t\tTitle: metadata.Title,\n\t\tMarkdown: metadata.Variables[\"markdown\"].(string),\n\t}\n\n\tif err = t.Execute(b, mdData); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ generate static page\n\tif err = md.generatePage(c, requestPath, b.Bytes()); err != nil {\n\t\t\/\/ if static page generation fails,\n\t\t\/\/ nothing fatal, only log the error.\n\t\t\/\/ TODO: Report this non-fatal error, but don't log it here\n\t\tlog.Println(err)\n\t}\n\n\treturn b.Bytes(), nil\n\n}\n\n\/\/ generatePage generates a static html page from the markdown in content if c.StaticDir\n\/\/ is a non-empty value, meaning that the user enabled static site generation.\nfunc (md Markdown) generatePage(c Config, requestPath string, content []byte) error {\n\t\/\/ Only generate the page if static site generation is enabled\n\tif c.StaticDir != \"\" {\n\t\t\/\/ if static directory is not existing, create it\n\t\tif _, err := os.Stat(c.StaticDir); err != nil {\n\t\t\terr := os.MkdirAll(c.StaticDir, os.FileMode(0755))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfilePath := filepath.Join(c.StaticDir, requestPath)\n\n\t\t\/\/ If it is index file, use the directory instead\n\t\tif md.IsIndexFile(filepath.Base(requestPath)) {\n\t\t\tfilePath, _ = filepath.Split(filePath)\n\t\t}\n\n\t\t\/\/ Create the directory in case it is not existing\n\t\tif err := os.MkdirAll(filePath, os.FileMode(0744)); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ generate index.html file in the directory\n\t\tfilePath = filepath.Join(filePath, \"index.html\")\n\t\terr := ioutil.WriteFile(filePath, content, os.FileMode(0664))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc.StaticFiles[requestPath] = filePath\n\t}\n\n\treturn nil\n}\n\n\/\/ defaultTemplate constructs a default template.\nfunc defaultTemplate(c Config, metadata Metadata, requestPath string) []byte {\n\tvar scripts, styles bytes.Buffer\n\tfor _, style := range c.Styles {\n\t\tstyles.WriteString(strings.Replace(cssTemplate, \"{{url}}\", style, 1))\n\t\tstyles.WriteString(\"\\r\\n\")\n\t}\n\tfor _, script := range c.Scripts {\n\t\tscripts.WriteString(strings.Replace(jsTemplate, \"{{url}}\", script, 1))\n\t\tscripts.WriteString(\"\\r\\n\")\n\t}\n\n\t\/\/ Title is first line (length-limited), otherwise filename\n\ttitle := metadata.Title\n\tif title == \"\" {\n\t\ttitle = filepath.Base(requestPath)\n\t\tif body, _ := metadata.Variables[\"markdown\"].([]byte); len(body) > 128 {\n\t\t\ttitle = string(body[:128])\n\t\t} else if len(body) > 0 {\n\t\t\ttitle = string(body)\n\t\t}\n\t}\n\n\thtml := []byte(htmlTemplate)\n\thtml = bytes.Replace(html, []byte(\"{{title}}\"), []byte(title), 1)\n\thtml = bytes.Replace(html, []byte(\"{{css}}\"), styles.Bytes(), 1)\n\thtml = bytes.Replace(html, []byte(\"{{js}}\"), scripts.Bytes(), 1)\n\n\treturn html\n}\n\nconst (\n\thtmlTemplate = `<!DOCTYPE html>\n<html>\n\t<head>\n\t\t<title>{{title}}<\/title>\n\t\t<meta charset=\"utf-8\">\n\t\t{{css}}\n\t\t{{js}}\n\t<\/head>\n\t<body>\n\t\t{{.Markdown}}\n\t<\/body>\n<\/html>`\n\tcssTemplate = `<link rel=\"stylesheet\" href=\"{{url}}\">`\n\tjsTemplate = `<script src=\"{{url}}\"><\/script>`\n)\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n)\n\n\/\/ run is a simple wrapper for exec.Run\/Close\nfunc run(envv []string, dir string, argv ...string) error {\n\tif *verbose {\n\t\tlog.Println(\"run\", argv)\n\t}\n\tcmd := exec.Command(argv[0], argv[1:]...)\n\tcmd.Dir = dir\n\tcmd.Env = envv\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\n\/\/ runLog runs a process and returns the combined stdout\/stderr, \n\/\/ as well as writing it to logfile (if specified). It returns\n\/\/ process combined stdout and stderr output, exit status and error.\n\/\/ The error returned is nil, if process is started successfully,\n\/\/ even if exit status is not 0.\nfunc runLog(envv []string, logfile, dir string, argv ...string) (string, int, error) {\n\tif *verbose {\n\t\tlog.Println(\"runLog\", argv)\n\t}\n\n\tb := new(bytes.Buffer)\n\tvar w io.Writer = b\n\tif logfile != \"\" {\n\t\tf, err := os.OpenFile(logfile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)\n\t\tif err != nil {\n\t\t\treturn \"\", 0, err\n\t\t}\n\t\tdefer f.Close()\n\t\tw = io.MultiWriter(f, b)\n\t}\n\n\tcmd := exec.Command(argv[0], argv[1:]...)\n\tcmd.Dir = dir\n\tcmd.Env = envv\n\tcmd.Stdout = w\n\tcmd.Stderr = w\n\n\terr := cmd.Run()\n\tif err != nil {\n\t\tif ws, ok := err.(*exec.ExitError); ok {\n\t\t\treturn b.String(), ws.ExitStatus(), nil\n\t\t}\n\t}\n\treturn b.String(), 0, err\n}\n<commit_msg>builder: update for os.Wait changes.<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n)\n\n\/\/ run is a simple wrapper for exec.Run\/Close\nfunc run(envv []string, dir string, argv ...string) error {\n\tif *verbose {\n\t\tlog.Println(\"run\", argv)\n\t}\n\tcmd := exec.Command(argv[0], argv[1:]...)\n\tcmd.Dir = dir\n\tcmd.Env = envv\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\n\/\/ runLog runs a process and returns the combined stdout\/stderr, \n\/\/ as well as writing it to logfile (if specified). It returns\n\/\/ process combined stdout and stderr output, exit status and error.\n\/\/ The error returned is nil, if process is started successfully,\n\/\/ even if exit status is not successful.\nfunc runLog(envv []string, logfile, dir string, argv ...string) (string, int, error) {\n\tif *verbose {\n\t\tlog.Println(\"runLog\", argv)\n\t}\n\n\tb := new(bytes.Buffer)\n\tvar w io.Writer = b\n\tif logfile != \"\" {\n\t\tf, err := os.OpenFile(logfile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)\n\t\tif err != nil {\n\t\t\treturn \"\", 0, err\n\t\t}\n\t\tdefer f.Close()\n\t\tw = io.MultiWriter(f, b)\n\t}\n\n\tcmd := exec.Command(argv[0], argv[1:]...)\n\tcmd.Dir = dir\n\tcmd.Env = envv\n\tcmd.Stdout = w\n\tcmd.Stderr = w\n\n\tstartErr := cmd.Start()\n\tif startErr != nil {\n\t\treturn \"\", 1, startErr\n\t}\n\texitStatus := 0\n\tif err := cmd.Wait(); err != nil {\n\t\texitStatus = 1 \/\/ TODO(bradfitz): this is fake. no callers care, so just return a bool instead.\n\t}\n\treturn b.String(), exitStatus, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 The CUE Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"github.com\/spf13\/cobra\"\n\n\t\"cuelang.org\/go\/internal\/encoding\"\n\t\"cuelang.org\/go\/internal\/filetypes\"\n)\n\n\/\/ newExportCmd creates and export command\nfunc newExportCmd(c *Command) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"export\",\n\t\tShort: \"output data in a standard format\",\n\t\tLong: `export evaluates the configuration found in the current\ndirectory and prints the emit value to stdout.\n\nExamples:\nEvaluated and emit\n\n\t# a single file\n\tcue export config.cue\n\n\t# multiple files: these are combined at the top-level. Order doesn't matter.\n\tcue export file1.cue foo\/file2.cue\n\n\t# all files within the \"mypkg\" package: this includes all files in the\n\t# current directory and its ancestor directories that are marked with the\n\t# same package.\n\tcue export -p cloud\n\n\t# the -p flag can be omitted if the directory only contains files for\n\t# the \"mypkg\" package.\n\tcue export\n\nEmit value:\nFor CUE files, the generated configuration is derived from the top-level\nsingle expression, the emit value. For example, the file\n\n\t\/\/ config.cue\n\targ1: 1\n\targ2: \"my string\"\n\n\t{\n\t\ta: arg1\n\t\tb: arg2\n\t}\n\nyields the following JSON:\n\n\t{\n\t\t\"a\": 1,\n\t\t\"b\", \"my string\"\n\t}\n\nIn absence of arguments, the current directory is loaded as a package instance.\nA package instance for a directory contains all files in the directory and its\nancestor directories, up to the module root, belonging to the same package.\nIf the package is not explicitly defined by the '-p' flag, it must be uniquely\ndefined by the files in the current directory.\n\n\nFormats\nThe following formats are recognized:\n\njson output as JSON\n Outputs any CUE value.\n\ntext output as raw text\n The evaluated value must be of type string.\n\nyaml output as YAML\n Outputs any CUE value.\n`,\n\n\t\tRunE: mkRunE(c, runExport),\n\t}\n\n\taddOutFlags(cmd.Flags(), true)\n\taddOrphanFlags(cmd.Flags())\n\n\tcmd.Flags().Bool(string(flagEscape), false, \"use HTML escaping\")\n\n\tcmd.Flags().StringArrayP(string(flagExpression), \"e\", nil, \"export this expression only\")\n\n\tcmd.Flags().StringArrayP(string(flagInject), \"t\", nil,\n\t\t\"set the value of a tagged field\")\n\n\treturn cmd\n}\n\nfunc runExport(cmd *Command, args []string) error {\n\tb, err := parseArgs(cmd, args, &config{outMode: filetypes.Export})\n\texitOnErr(cmd, err, true)\n\n\tenc, err := encoding.NewEncoder(b.outFile, b.encConfig)\n\texitOnErr(cmd, err, true)\n\tdefer enc.Close()\n\n\titer := b.instances()\n\tdefer iter.close()\n\tfor iter.scan() {\n\t\tv := iter.value()\n\t\terr = enc.Encode(v)\n\t\texitOnErr(cmd, err, true)\n\t}\n\texitOnErr(cmd, iter.err(), true)\n\treturn nil\n}\n<commit_msg>cmd\/export: fix help<commit_after>\/\/ Copyright 2018 The CUE Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"github.com\/spf13\/cobra\"\n\n\t\"cuelang.org\/go\/internal\/encoding\"\n\t\"cuelang.org\/go\/internal\/filetypes\"\n)\n\n\/\/ newExportCmd creates and export command\nfunc newExportCmd(c *Command) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"export\",\n\t\tShort: \"output data in a standard format\",\n\t\tLong: `export evaluates the configuration found in the current\ndirectory and prints the emit value to stdout.\n\nExamples:\nEvaluated and emit\n\n\t# a single file\n\tcue export config.cue\n\n\t# multiple files: these are combined at the top-level. Order doesn't matter.\n\tcue export file1.cue foo\/file2.cue\n\n\t# all files within the \"mypkg\" package: this includes all files in the\n\t# current directory and its ancestor directories that are marked with the\n\t# same package.\n\tcue export -p cloud\n\n\t# the -p flag can be omitted if the directory only contains files for\n\t# the \"mypkg\" package.\n\tcue export\n\nEmit value:\nFor CUE files, the generated configuration is derived from the top-level\nsingle expression, the emit value. For example, the file\n\n\t\/\/ config.cue\n\targ1: 1\n\targ2: \"my string\"\n\n\t{\n\t\ta: arg1\n\t\tb: arg2\n\t}\n\nyields the following JSON:\n\n\t{\n\t\t\"arg1\": 1,\n\t\t\"a\": 1,\n\t\t\"arg2\": \"my string\",\n\t\t\"b\": \"my string\"\n\t}\n\nIn absence of arguments, the current directory is loaded as a package instance.\nA package instance for a directory contains all files in the directory and its\nancestor directories, up to the module root, belonging to the same package.\nIf the package is not explicitly defined by the '-p' flag, it must be uniquely\ndefined by the files in the current directory.\n\n\nFormats\nThe following formats are recognized:\n\njson output as JSON\n Outputs any CUE value.\n\ntext output as raw text\n The evaluated value must be of type string.\n\nyaml output as YAML\n Outputs any CUE value.\n`,\n\n\t\tRunE: mkRunE(c, runExport),\n\t}\n\n\taddOutFlags(cmd.Flags(), true)\n\taddOrphanFlags(cmd.Flags())\n\n\tcmd.Flags().Bool(string(flagEscape), false, \"use HTML escaping\")\n\n\tcmd.Flags().StringArrayP(string(flagExpression), \"e\", nil, \"export this expression only\")\n\n\tcmd.Flags().StringArrayP(string(flagInject), \"t\", nil,\n\t\t\"set the value of a tagged field\")\n\n\treturn cmd\n}\n\nfunc runExport(cmd *Command, args []string) error {\n\tb, err := parseArgs(cmd, args, &config{outMode: filetypes.Export})\n\texitOnErr(cmd, err, true)\n\n\tenc, err := encoding.NewEncoder(b.outFile, b.encConfig)\n\texitOnErr(cmd, err, true)\n\tdefer enc.Close()\n\n\titer := b.instances()\n\tdefer iter.close()\n\tfor iter.scan() {\n\t\tv := iter.value()\n\t\terr = enc.Encode(v)\n\t\texitOnErr(cmd, err, true)\n\t}\n\texitOnErr(cmd, iter.err(), true)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Andrew O'Neill\n\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"log\"\n\t\"net\"\n\n\t\"google.golang.org\/grpc\"\n\n\t\"github.com\/foolusion\/choices\/elwinstorage\"\n\t\"github.com\/foolusion\/choices\/storage\/mongo\"\n)\n\nfunc main() {\n\tlog.Println(\"Starting exp-store...\")\n\tserver, err := mongo.NewServer(\"localhost\", \"elwin\")\n\tserver.LoadExampleData()\n\n\tlis, err := net.Listen(\"tcp\", \":8080\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ts := grpc.NewServer()\n\tstorage.RegisterElwinStorageServer(s, server)\n\tlog.Fatal(s.Serve(lis))\n}\n<commit_msg>cmd\/exp-store: add grpc metrics<commit_after>\/\/ Copyright 2016 Andrew O'Neill\n\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\n\t\"google.golang.org\/grpc\"\n\n\t\"github.com\/foolusion\/choices\/elwinstorage\"\n\t\"github.com\/foolusion\/choices\/storage\/mongo\"\n\t\"github.com\/grpc-ecosystem\/go-grpc-prometheus\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n)\n\nfunc main() {\n\tlog.Println(\"Starting exp-store...\")\n\tserver, err := mongo.NewServer(\"localhost\", \"elwin\")\n\tserver.LoadExampleData()\n\n\tlis, err := net.Listen(\"tcp\", \":8080\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ts := grpc.NewServer(\n\t\tgrpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),\n\t)\n\tstorage.RegisterElwinStorageServer(s, server)\n\tgrpc_prometheus.Register(s)\n\tgo func() {\n\t\thttp.Handle(\"\/metrics\", prometheus.Handler())\n\t\tlog.Fatal(http.ListenAndServe(\":8081\", nil))\n\t}()\n\n\tlog.Fatal(s.Serve(lis))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/sha512\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/go-redis\/redis\"\n\tmetrics \"github.com\/rcrowley\/go-metrics\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ credentials are used by basic auth and include the hash of a valid password, plus\n\/\/ a \"stage\" string which is used to emit metrics that are useful when managing credrolls, so that\n\/\/ we can track whether or not deprecated passwords are still in use.\ntype credential struct {\n\tStage string `json:stage`\n\tHmac string `json:hmac`\n}\n\nfunc newAuth(config AuthConfig, registry metrics.Registry) (*BasicAuth, error) {\n\tif config.RedisUrl != \"\" && config.RedisKey == \"\" {\n\t\treturn nil, errors.New(\"RedisKey must be set if RedisUrl is set\")\n\t}\n\n\tif config.RedisUrl == \"\" && config.Tokens == \"\" {\n\t\treturn nil, errors.New(\"At least one of RedisUrl or Tokens must be set.\")\n\t}\n\n\tresult, err := NewBasicAuthFromString(config.Tokens, config.HmacKey, registry)\n\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\tif config.RedisUrl == \"\" {\n\t\treturn result, err\n\t}\n\n\t\/\/ Parse redis db out of url\n\topt, err := redis.ParseURL(config.RedisUrl)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\tclient := redis.NewClient(opt)\n\n\t\/\/ Refresh once at the start.\n\t\/\/ Ignore errors here - if redis is down, we don't want the process to fail to start.\n\t_, _ = result.refresh(client, config.HmacKey, config.RedisKey, config.Tokens)\n\n\t\/\/ Refresh forever.\n\tgo result.startRefresh(client, config, registry)\n\n\treturn result, err\n}\n\nfunc (auth *BasicAuth) startRefresh(client *redis.Client, config AuthConfig, registry metrics.Registry) {\n\tpChanges := metrics.GetOrRegisterCounter(\"log-iss.auth_refresh.changes\", registry)\n\tpFailures := metrics.GetOrRegisterCounter(\"log-iss.auth_refresh.failures\", registry)\n\tpSuccesses := metrics.GetOrRegisterCounter(\"log-iss.auth_refresh.successes\", registry)\n\tticker := time.NewTicker(config.RefreshInterval)\n\n\tfor ; true; <-ticker.C {\n\t\tchanged, err := auth.refresh(client, config.HmacKey, config.RedisKey, config.Tokens)\n\t\tif err == nil {\n\t\t\tpSuccesses.Inc(1)\n\t\t\tif changed {\n\t\t\t\tpChanges.Inc(1)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tlog.WithFields(log.Fields{\"ns\": \"auth\", \"at\": \"error\", \"refresh\": true, \"message\": err.Error()}).Info()\n\t\tfmt.Printf(\"Unable to refresh credentials: %s\", err)\n\t\tpFailures.Inc(1)\n\t}\n}\n\n\/\/ Refresh auth credentials.\n\/\/ Return true if credentials changed, false otherwise.\nfunc (ba *BasicAuth) refresh(client redis.Cmdable, hmacKey string, redisKey string, config string) (bool, error) {\n\t\/\/ Start out using the strings from config\n\tnba, err := NewBasicAuthFromString(config, hmacKey, ba.registry)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t\/\/ Retrieve all secrets and construct a string in the format expected by BasicAuth\n\tval := client.HGetAll(redisKey)\n\t\/\/ Return error if the key does not exist.\n\tif val == nil {\n\t\treturn false, fmt.Errorf(\"Key %s not found\", redisKey)\n\t}\n\n\tr, err := val.Result()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor k, v := range r {\n\t\tvar arr []credential\n\t\terr = json.Unmarshal([]byte(v), &arr)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tnba.creds[k] = arr\n\t}\n\n\t\/\/ Swap the secrets if there are changes\n\tif !reflect.DeepEqual(ba.creds, nba.creds) {\n\t\tba.Lock()\n\t\tdefer ba.Unlock()\n\t\tba.creds = nba.creds\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}\n\n\/\/ BasicAuth handles normal user\/password Basic Auth requests, multiple\n\/\/ password for the same user and is safe for concurrent use.\ntype BasicAuth struct {\n\tsync.RWMutex\n\tcreds map[string][]credential\n\thmacKey string\n\tregistry metrics.Registry\n}\n\n\/\/ NewBasicAuthFromString creates and populates a BasicAuth from the provided\n\/\/ credentials, encoded as a string, in the following format:\n\/\/ user:password|user:password|...\nfunc NewBasicAuthFromString(creds string, hmacKey string, registry metrics.Registry) (*BasicAuth, error) {\n\tba := NewBasicAuth(registry, hmacKey)\n\n\t\/\/ If the string is empty, that's allowed.\n\tif creds == \"\" {\n\t\treturn ba, nil\n\t}\n\n\tfor _, u := range strings.Split(creds, \"|\") {\n\t\tuparts := strings.SplitN(u, \":\", 2)\n\t\tif len(uparts) != 2 || len(uparts[0]) == 0 || len(uparts[1]) == 0 {\n\t\t\treturn ba, fmt.Errorf(\"Unable to create credentials from '%s'\", u)\n\t\t}\n\n\t\tba.AddPrincipal(uparts[0], hmacEncode(hmacKey, uparts[1]), \"env\")\n\t}\n\treturn ba, nil\n}\n\nfunc hmacEncode(key string, message string) string {\n\thash := hmac.New(sha512.New, []byte(key))\n\thash.Write([]byte(message))\n\treturn hex.EncodeToString(hash.Sum(nil))\n}\n\nfunc NewBasicAuth(registry metrics.Registry, hmacKey string) *BasicAuth {\n\treturn &BasicAuth{\n\t\tcreds: make(map[string][]credential),\n\t\thmacKey: hmacKey,\n\t\tregistry: registry,\n\t}\n}\n\n\/\/ AddPrincipal add's a user\/password combo to the list of valid combinations\nfunc (ba *BasicAuth) AddPrincipal(user string, hmac string, stage string) {\n\tba.Lock()\n\tdefer ba.Unlock()\n\tu, exists := ba.creds[user]\n\tif !exists {\n\t\tu = make([]credential, 0, 1)\n\t}\n\tba.creds[user] = append(u, credential{Stage: stage, Hmac: hmac})\n}\n\n\/\/ Authenticate is true if the Request has a valid BasicAuth signature and\n\/\/ that signature encodes a known username\/password combo.\nfunc (ba *BasicAuth) Authenticate(r *http.Request) bool {\n\tuser, pass, ok := r.BasicAuth()\n\tif !ok {\n\t\tlog.WithFields(log.Fields{\"ns\": \"auth\", \"at\": \"failure\", \"no_basic_auth\": true}).Info()\n\t\treturn false\n\t}\n\n\tba.RLock()\n\tdefer ba.RUnlock()\n\n\tcredentials, exists := ba.creds[user]\n\tif !exists {\n\t\tlog.WithFields(log.Fields{\"ns\": \"auth\", \"at\": \"failure\", \"user\": user}).Info()\n\t\treturn false\n\t}\n\n\tfor _, c := range credentials {\n\t\tif c.Hmac == hmacEncode(ba.hmacKey, pass) {\n\t\t\tcountName := fmt.Sprintf(\"log-iss.auth.%s.%s.successes\", user, c.Stage)\n\t\t\tcounter := metrics.GetOrRegisterCounter(countName, ba.registry)\n\t\t\tcounter.Inc(1)\n\t\t\treturn true\n\t\t}\n\t}\n\tcountName := fmt.Sprintf(\"log-iss.auth.%s.failures\", user)\n\tcounter := metrics.GetOrRegisterCounter(countName, ba.registry)\n\tcounter.Inc(1)\n\treturn false\n}\n<commit_msg>Remove initial refresh of creds - it's no longer relevant<commit_after>package main\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/sha512\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/go-redis\/redis\"\n\tmetrics \"github.com\/rcrowley\/go-metrics\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ credentials are used by basic auth and include the hash of a valid password, plus\n\/\/ a \"stage\" string which is used to emit metrics that are useful when managing credrolls, so that\n\/\/ we can track whether or not deprecated passwords are still in use.\ntype credential struct {\n\tStage string `json:stage`\n\tHmac string `json:hmac`\n}\n\nfunc newAuth(config AuthConfig, registry metrics.Registry) (*BasicAuth, error) {\n\tif config.RedisUrl != \"\" && config.RedisKey == \"\" {\n\t\treturn nil, errors.New(\"RedisKey must be set if RedisUrl is set\")\n\t}\n\n\tif config.RedisUrl == \"\" && config.Tokens == \"\" {\n\t\treturn nil, errors.New(\"At least one of RedisUrl or Tokens must be set.\")\n\t}\n\n\tresult, err := NewBasicAuthFromString(config.Tokens, config.HmacKey, registry)\n\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\tif config.RedisUrl == \"\" {\n\t\treturn result, err\n\t}\n\n\t\/\/ Parse redis db out of url\n\topt, err := redis.ParseURL(config.RedisUrl)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\tclient := redis.NewClient(opt)\n\n\t\/\/ Refresh forever.\n\tgo result.startRefresh(client, config, registry)\n\n\treturn result, err\n}\n\nfunc (auth *BasicAuth) startRefresh(client *redis.Client, config AuthConfig, registry metrics.Registry) {\n\tpChanges := metrics.GetOrRegisterCounter(\"log-iss.auth_refresh.changes\", registry)\n\tpFailures := metrics.GetOrRegisterCounter(\"log-iss.auth_refresh.failures\", registry)\n\tpSuccesses := metrics.GetOrRegisterCounter(\"log-iss.auth_refresh.successes\", registry)\n\tticker := time.NewTicker(config.RefreshInterval)\n\n\tfor ; true; <-ticker.C {\n\t\tchanged, err := auth.refresh(client, config.HmacKey, config.RedisKey, config.Tokens)\n\t\tif err == nil {\n\t\t\tpSuccesses.Inc(1)\n\t\t\tif changed {\n\t\t\t\tpChanges.Inc(1)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tlog.WithFields(log.Fields{\"ns\": \"auth\", \"at\": \"error\", \"refresh\": true, \"message\": err.Error()}).Info()\n\t\tpFailures.Inc(1)\n\t}\n}\n\n\/\/ Refresh auth credentials.\n\/\/ Return true if credentials changed, false otherwise.\nfunc (ba *BasicAuth) refresh(client redis.Cmdable, hmacKey string, redisKey string, config string) (bool, error) {\n\t\/\/ Start out using the strings from config\n\tnba, err := NewBasicAuthFromString(config, hmacKey, ba.registry)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t\/\/ Retrieve all secrets and construct a string in the format expected by BasicAuth\n\tval := client.HGetAll(redisKey)\n\t\/\/ Return error if the key does not exist.\n\tif val == nil {\n\t\treturn false, fmt.Errorf(\"Key %s not found\", redisKey)\n\t}\n\n\tr, err := val.Result()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor k, v := range r {\n\t\tvar arr []credential\n\t\terr = json.Unmarshal([]byte(v), &arr)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tnba.creds[k] = arr\n\t}\n\n\t\/\/ Swap the secrets if there are changes\n\tif !reflect.DeepEqual(ba.creds, nba.creds) {\n\t\tba.Lock()\n\t\tdefer ba.Unlock()\n\t\tba.creds = nba.creds\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}\n\n\/\/ BasicAuth handles normal user\/password Basic Auth requests, multiple\n\/\/ password for the same user and is safe for concurrent use.\ntype BasicAuth struct {\n\tsync.RWMutex\n\tcreds map[string][]credential\n\thmacKey string\n\tregistry metrics.Registry\n}\n\n\/\/ NewBasicAuthFromString creates and populates a BasicAuth from the provided\n\/\/ credentials, encoded as a string, in the following format:\n\/\/ user:password|user:password|...\nfunc NewBasicAuthFromString(creds string, hmacKey string, registry metrics.Registry) (*BasicAuth, error) {\n\tba := NewBasicAuth(registry, hmacKey)\n\n\t\/\/ If the string is empty, that's allowed.\n\tif creds == \"\" {\n\t\treturn ba, nil\n\t}\n\n\tfor _, u := range strings.Split(creds, \"|\") {\n\t\tuparts := strings.SplitN(u, \":\", 2)\n\t\tif len(uparts) != 2 || len(uparts[0]) == 0 || len(uparts[1]) == 0 {\n\t\t\treturn ba, fmt.Errorf(\"Unable to create credentials from '%s'\", u)\n\t\t}\n\n\t\tba.AddPrincipal(uparts[0], hmacEncode(hmacKey, uparts[1]), \"env\")\n\t}\n\treturn ba, nil\n}\n\nfunc hmacEncode(key string, message string) string {\n\thash := hmac.New(sha512.New, []byte(key))\n\thash.Write([]byte(message))\n\treturn hex.EncodeToString(hash.Sum(nil))\n}\n\nfunc NewBasicAuth(registry metrics.Registry, hmacKey string) *BasicAuth {\n\treturn &BasicAuth{\n\t\tcreds: make(map[string][]credential),\n\t\thmacKey: hmacKey,\n\t\tregistry: registry,\n\t}\n}\n\n\/\/ AddPrincipal add's a user\/password combo to the list of valid combinations\nfunc (ba *BasicAuth) AddPrincipal(user string, hmac string, stage string) {\n\tba.Lock()\n\tdefer ba.Unlock()\n\tu, exists := ba.creds[user]\n\tif !exists {\n\t\tu = make([]credential, 0, 1)\n\t}\n\tba.creds[user] = append(u, credential{Stage: stage, Hmac: hmac})\n}\n\n\/\/ Authenticate is true if the Request has a valid BasicAuth signature and\n\/\/ that signature encodes a known username\/password combo.\nfunc (ba *BasicAuth) Authenticate(r *http.Request) bool {\n\tuser, pass, ok := r.BasicAuth()\n\tif !ok {\n\t\tlog.WithFields(log.Fields{\"ns\": \"auth\", \"at\": \"failure\", \"no_basic_auth\": true}).Info()\n\t\treturn false\n\t}\n\n\tba.RLock()\n\tdefer ba.RUnlock()\n\n\tcredentials, exists := ba.creds[user]\n\tif !exists {\n\t\tlog.WithFields(log.Fields{\"ns\": \"auth\", \"at\": \"failure\", \"user\": user}).Info()\n\t\treturn false\n\t}\n\n\tfor _, c := range credentials {\n\t\tif c.Hmac == hmacEncode(ba.hmacKey, pass) {\n\t\t\tcountName := fmt.Sprintf(\"log-iss.auth.%s.%s.successes\", user, c.Stage)\n\t\t\tcounter := metrics.GetOrRegisterCounter(countName, ba.registry)\n\t\t\tcounter.Inc(1)\n\t\t\treturn true\n\t\t}\n\t}\n\tcountName := fmt.Sprintf(\"log-iss.auth.%s.failures\", user)\n\tcounter := metrics.GetOrRegisterCounter(countName, ba.registry)\n\tcounter.Inc(1)\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>reformat<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Red Hat, Inc, and individual contributors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\"\n)\n\nfunc setContextCommand(sh *kubesh, args []string) error {\n\tswitch l := len(args); {\n\tcase l > 3:\n\t\tfmt.Println(\"Usage: \" + args[0] + \"[TYPE [NAME]]\")\n\n\t\t\/\/TODO: return an error?\n\t\treturn nil\n\tcase l == 1:\n\t\tsh.context = []string{}\n\t\tsh.rl.SetPrompt(prompt(sh.context))\n\n\t\treturn nil\n\t}\n\n\tresources, err := sh.finder.Lookup(args[1:])\n\tif err != nil {\n\n\t\treturn err\n\t}\n\n\ttypeOnly := len(args) == 2\n\n\tif len(resources) > 0 {\n\t\tres := resources[0]\n\t\tif typeOnly {\n\t\t\tsh.context = []string{res.typeName}\n\t\t} else {\n\t\t\tsh.context = []string{res.typeName, res.name}\n\t\t}\n\t}\n\n\tsh.rl.SetPrompt(prompt(sh.context))\n\n\treturn nil\n}\n\nfunc prompt(context []string) string {\n\treturn strings.Join(context, \":\") + \"> \"\n}\n\nfunc applyContext(context []string, args []string, rootCommand *cobra.Command) ([]string, error) {\n\tif len(context) > 0 {\n\t\tsubcmd, _, err := rootCommand.Find(args[:1])\n\t\tif err != nil {\n\n\t\t\treturn args, err\n\t\t}\n\n\t\t\/\/ poor man's set\n\t\tresourceTypes := map[string]struct{}{}\n\t\tfor _, t := range kubectl.ResourceAliases(ResourceTypes(subcmd)) {\n\t\t\tresourceTypes[t] = struct{}{}\n\t\t}\n\t\tnewArgs := []string{}\n\t\tnewArgs = append(newArgs, args[0])\n\n\t\tif _, ok := resourceTypes[context[0]]; ok {\n\t\t\terr := subcmd.ParseFlags(args)\n\t\t\tif err != nil {\n\n\t\t\t\treturn args, err\n\t\t\t}\n\n\t\t\tnonFlagArgs := subcmd.Flags().Args()\n\t\t\t\/\/ the subcommand is an arg, so we'll always have at least one\n\t\t\tswitch l := len(nonFlagArgs); {\n\t\t\tcase l == 1:\n\t\t\t\tnewArgs = append(newArgs, context...)\n\t\t\tcase l == 2:\n\t\t\t\t\/\/ could be a resource type or a resource name\n\t\t\t\targ := nonFlagArgs[1]\n\t\t\t\tif _, ok := resourceTypes[arg]; !ok {\n\t\t\t\t\t\/\/ treat as a resource name, use the resource type from the context\n\t\t\t\t\tnewArgs = append(newArgs, context[0])\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn append(newArgs, args[1:]...), nil\n\t\t}\n\t}\n\n\treturn args, nil\n}\n<commit_msg>Add clear subcommand to pin along with useful -h<commit_after>\/\/ Copyright 2016 Red Hat, Inc, and individual contributors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/renstrom\/dedent\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\"\n)\n\nvar help = dedent.Dedent(`\n Pin to a resource or resource type.\n\n Pinning causes the shell to remember the given resource and\/or\n resource type, and apply it to commands as appropriate, allowing\n you to leave the resource type and\/or name out of other command\n invocations.\n\n # Pin to pods\n pin pods\n\n # Pin to a particular pod\n pin pod nginx-1234-asdf\n\n # Clear the pin\n pin clear\n\n The current pin will be shown in the prompt.\n`)\n\nfunc setContextCommand(sh *kubesh, args []string) error {\n\tif len(args) == 1 || len(args) > 3 {\n\t\tfmt.Println(\"Usage: \" + args[0] + \" (-h | clear | [TYPE [NAME]])\")\n\n\t\t\/\/TODO: return an error?\n\t\treturn nil\n\t}\n\n\tswitch arg := args[1]; {\n\tcase arg == \"clear\":\n\t\tsh.context = []string{}\n\t\tsh.rl.SetPrompt(prompt(sh.context))\n\n\t\treturn nil\n\n\tcase arg == \"-h\":\n\t\tfmt.Println(help)\n\n\t\treturn nil\n\t}\n\n\tresources, err := sh.finder.Lookup(args[1:])\n\tif err != nil {\n\n\t\treturn err\n\t}\n\n\ttypeOnly := len(args) == 2\n\n\tif len(resources) > 0 {\n\t\tres := resources[0]\n\t\tif typeOnly {\n\t\t\tsh.context = []string{res.typeName}\n\t\t} else {\n\t\t\tsh.context = []string{res.typeName, res.name}\n\t\t}\n\t}\n\n\tsh.rl.SetPrompt(prompt(sh.context))\n\n\treturn nil\n}\n\nfunc prompt(context []string) string {\n\treturn strings.Join(context, \":\") + \"> \"\n}\n\nfunc applyContext(context []string, args []string, rootCommand *cobra.Command) ([]string, error) {\n\tif len(context) > 0 {\n\t\tsubcmd, _, err := rootCommand.Find(args[:1])\n\t\tif err != nil {\n\n\t\t\treturn args, err\n\t\t}\n\n\t\t\/\/ poor man's set\n\t\tresourceTypes := map[string]struct{}{}\n\t\tfor _, t := range kubectl.ResourceAliases(ResourceTypes(subcmd)) {\n\t\t\tresourceTypes[t] = struct{}{}\n\t\t}\n\t\tnewArgs := []string{}\n\t\tnewArgs = append(newArgs, args[0])\n\n\t\tif _, ok := resourceTypes[context[0]]; ok {\n\t\t\terr := subcmd.ParseFlags(args)\n\t\t\tif err != nil {\n\n\t\t\t\treturn args, err\n\t\t\t}\n\n\t\t\tnonFlagArgs := subcmd.Flags().Args()\n\t\t\t\/\/ the subcommand is an arg, so we'll always have at least one\n\t\t\tswitch l := len(nonFlagArgs); {\n\t\t\tcase l == 1:\n\t\t\t\tnewArgs = append(newArgs, context...)\n\t\t\tcase l == 2:\n\t\t\t\t\/\/ could be a resource type or a resource name\n\t\t\t\targ := nonFlagArgs[1]\n\t\t\t\tif _, ok := resourceTypes[arg]; !ok {\n\t\t\t\t\t\/\/ treat as a resource name, use the resource type from the context\n\t\t\t\t\tnewArgs = append(newArgs, context[0])\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn append(newArgs, args[1:]...), nil\n\t\t}\n\t}\n\n\treturn args, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/neurosnap\/sentences\/english\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar VERSION string\nvar COMMITHASH string\n\nvar ver bool\nvar fname string\nvar delim string\n\nvar sentencesCmd = &cobra.Command{\n\tUse: \"sentences\",\n\tShort: \"Sentence tokenizer\",\n\tLong: \"A utility that will break up a blob of text into sentences.\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tvar text []byte\n\n\t\tif ver {\n\t\t\tfmt.Println(VERSION)\n\t\t\tfmt.Println(COMMITHASH)\n\t\t\treturn\n\t\t}\n\n\t\tif fname != \"\" {\n\t\t\ttext, _ = ioutil.ReadFile(fname)\n\t\t} else {\n\t\t\treader := bufio.NewReader(os.Stdin)\n\t\t\ttext, _ = ioutil.ReadAll(reader)\n\t\t}\n\n\t\ttokenizer, err := english.NewSentenceTokenizer(nil)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tsentences := tokenizer.Tokenize(string(text))\n\t\tfor _, s := range sentences {\n\t\t\ttext := strings.Join(strings.Fields(s.Text), \" \")\n\n\t\t\ttext = strings.Join([]string{text, delim}, \"\")\n\t\t\tfmt.Printf(\"%s\", text)\n\t\t}\n\t},\n}\n\nfunc main() {\n\tsentencesCmd.Flags().BoolVarP(&ver, \"version\", \"v\", false, \"Get current version of sentences\")\n\tsentencesCmd.Flags().StringVarP(&fname, \"file\", \"f\", \"\", \"Read file as source input instead of stdin\")\n\tsentencesCmd.Flags().StringVarP(&delim, \"delimiter\", \"d\", \"\\n\", \"Delimiter used to demarcate sentence boundaries\")\n\n\tif err := sentencesCmd.Execute(); err != nil {\n\t\tfmt.Print(err)\n\t}\n}\n<commit_msg>more cmd line goodness<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/neurosnap\/sentences\/english\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar VERSION string\nvar COMMITHASH string\n\nvar ver bool\nvar fname string\nvar delim string\n\nvar sentencesCmd = &cobra.Command{\n\tUse: \"sentences\",\n\tShort: \"Sentence tokenizer\",\n\tLong: \"A utility that will break up a blob of text into sentences.\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tvar text []byte\n\t\tvar err error\n\n\t\tif ver {\n\t\t\tfmt.Println(VERSION)\n\t\t\tfmt.Println(COMMITHASH)\n\t\t\treturn\n\t\t}\n\n\t\tif fname != \"\" {\n\t\t\ttext, err = ioutil.ReadFile(fname)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t} else {\n\t\t\tstat, err := os.Stdin.Stat()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tif (stat.Mode() & os.ModeCharDevice) != 0 {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\treader := bufio.NewReader(os.Stdin)\n\t\t\ttext, err = ioutil.ReadAll(reader)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\n\t\ttokenizer, err := english.NewSentenceTokenizer(nil)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tsentences := tokenizer.Tokenize(string(text))\n\t\tfor _, s := range sentences {\n\t\t\ttext := strings.Join(strings.Fields(s.Text), \" \")\n\n\t\t\ttext = strings.Join([]string{text, delim}, \"\")\n\t\t\tfmt.Printf(\"%s\", text)\n\t\t}\n\t},\n}\n\nfunc main() {\n\tsentencesCmd.Flags().BoolVarP(&ver, \"version\", \"v\", false, \"Get current version of sentences\")\n\tsentencesCmd.Flags().StringVarP(&fname, \"file\", \"f\", \"\", \"Read file as source input instead of stdin\")\n\tsentencesCmd.Flags().StringVarP(&delim, \"delimiter\", \"d\", \"\\n\", \"Delimiter used to demarcate sentence boundaries\")\n\n\tif err := sentencesCmd.Execute(); err != nil {\n\t\tfmt.Print(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/loadimpact\/speedboat\"\n\t\"github.com\/loadimpact\/speedboat\/js\"\n\t\"github.com\/loadimpact\/speedboat\/sampler\"\n\t\"github.com\/loadimpact\/speedboat\/sampler\/influxdb\"\n\t\"github.com\/loadimpact\/speedboat\/simple\"\n\t\"github.com\/urfave\/cli\"\n\t\"golang.org\/x\/net\/context\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"io\/ioutil\"\n\tstdlog \"log\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ Configure the global logger.\nfunc configureLogging(c *cli.Context) {\n\tlog.SetLevel(log.InfoLevel)\n\tif c.GlobalBool(\"verbose\") {\n\t\tlog.SetLevel(log.DebugLevel)\n\t}\n\n\tif c.GlobalBool(\"json\") {\n\t\tlog.SetFormatter(&log.JSONFormatter{})\n\t}\n}\n\n\/\/ Configure the global sampler.\nfunc configureSampler(c *cli.Context) {\n\tsampler.DefaultSampler.OnError = func(err error) {\n\t\tlog.WithError(err).Error(\"[Sampler error]\")\n\t}\n\n\tfor _, output := range c.GlobalStringSlice(\"output\") {\n\t\tparts := strings.SplitN(output, \"+\", 2)\n\t\tswitch parts[0] {\n\t\tcase \"influxdb\":\n\t\t\tout, err := influxdb.NewFromURL(parts[1])\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Fatal(\"Couldn't create InfluxDB client\")\n\t\t\t}\n\t\t\tsampler.DefaultSampler.Outputs = append(sampler.DefaultSampler.Outputs, out)\n\t\tcase \"stdout\":\n\t\t\tout := &LogMetricsOutput{Writer: os.Stdout}\n\t\t\tsampler.DefaultSampler.Outputs = append(sampler.DefaultSampler.Outputs, out)\n\t\tdefault:\n\t\t\tlog.WithField(\"type\", parts[0]).Fatal(\"Unrecognized metric output\")\n\t\t}\n\t}\n}\n\nfunc parse(cc *cli.Context) (conf Config, err error) {\n\tswitch len(cc.Args()) {\n\tcase 0:\n\t\tif !cc.IsSet(\"script\") && !cc.IsSet(\"url\") {\n\t\t\treturn conf, errors.New(\"No config file, script or URL\")\n\t\t}\n\tcase 1:\n\t\tbytes, err := ioutil.ReadFile(cc.Args()[0])\n\t\tif err != nil {\n\t\t\treturn conf, errors.New(\"Couldn't read config file\")\n\t\t}\n\t\tif err := yaml.Unmarshal(bytes, &conf); err != nil {\n\t\t\treturn conf, errors.New(\"Couldn't parse config file\")\n\t\t}\n\tdefault:\n\t\treturn conf, errors.New(\"Too many arguments!\")\n\t}\n\n\t\/\/ Let commandline flags override config files\n\tif cc.IsSet(\"script\") {\n\t\tconf.Script = cc.String(\"script\")\n\t}\n\tif cc.IsSet(\"url\") {\n\t\tconf.URL = cc.String(\"url\")\n\t}\n\tif cc.IsSet(\"vus\") {\n\t\tconf.VUs = cc.Int(\"vus\")\n\t}\n\tif cc.IsSet(\"duration\") {\n\t\tconf.Duration = cc.Duration(\"duration\").String()\n\t}\n\n\treturn conf, nil\n}\n\nfunc dumpTest(t *speedboat.Test) {\n\tlog.WithFields(log.Fields{\n\t\t\"script\": t.Script,\n\t\t\"url\": t.URL,\n\t}).Info(\"General\")\n\tfor i, stage := range t.Stages {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"#\": i,\n\t\t\t\"duration\": stage.Duration,\n\t\t\t\"start\": stage.StartVUs,\n\t\t\t\"end\": stage.EndVUs,\n\t\t}).Info(\"Stage\")\n\t}\n}\n\nfunc headlessController(c context.Context, t *speedboat.Test) <-chan int {\n\tch := make(chan int)\n\n\tgo func() {\n\t\tdefer close(ch)\n\n\t\tselect {\n\t\tcase ch <- t.VUsAt(0):\n\t\tcase <-c.Done():\n\t\t\treturn\n\t\t}\n\n\t\tstartTime := time.Now()\n\t\tticker := time.NewTicker(100 * time.Millisecond)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tch <- t.VUsAt(time.Since(startTime))\n\t\t\tcase <-c.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch\n}\n\nfunc action(cc *cli.Context) error {\n\tconf, err := parse(cc)\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"Invalid arguments; see --help\")\n\t}\n\n\tt, err := conf.MakeTest()\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"Configuration error\")\n\t}\n\n\tif cc.Bool(\"dump\") {\n\t\tdumpTest(&t)\n\t\treturn nil\n\t}\n\n\t\/\/ Inspect the test to find a suitable runner; additional ones can easily be added\n\tvar runner speedboat.Runner\n\tswitch {\n\tcase t.URL != \"\":\n\t\trunner = simple.New()\n\tcase strings.HasSuffix(t.Script, \".js\"):\n\t\tsrc, err := ioutil.ReadFile(t.Script)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Fatal(\"Couldn't read script\")\n\t\t}\n\t\trunner = js.New(t.Script, string(src))\n\tdefault:\n\t\tlog.Fatal(\"No suitable runner found!\")\n\t}\n\n\t\/\/ Context that expires at the end of the test\n\tctx, cancel := context.WithTimeout(context.Background(), t.TotalDuration())\n\n\t\/\/ Configure the VU logger\n\tlogger := &log.Logger{\n\t\tOut: os.Stderr,\n\t\tLevel: log.DebugLevel,\n\t\tFormatter: &log.TextFormatter{},\n\t}\n\tif cc.GlobalBool(\"json\") {\n\t\tlogger.Formatter = &log.JSONFormatter{}\n\t}\n\tctx = speedboat.WithLogger(ctx, logger)\n\n\t\/\/ Output metrics appropriately; use a mutex to prevent garbled output\n\tlogMetrics := cc.Bool(\"log\")\n\tmetricsLogger := stdlog.New(os.Stdout, \"metrics: \", stdlog.Lmicroseconds)\n\tmetricsMutex := sync.Mutex{}\n\tgo func() {\n\t\tticker := time.NewTicker(1 * time.Second)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tif logMetrics {\n\t\t\t\t\tmetricsMutex.Lock()\n\t\t\t\t\tprintMetrics(metricsLogger)\n\t\t\t\t\tmetricsMutex.Unlock()\n\t\t\t\t}\n\t\t\t\tcommitMetrics()\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ Use a \"headless controller\" to scale VUs by polling the test ramp\n\tmVUs := sampler.Gauge(\"vus\")\n\tvus := []context.CancelFunc{}\n\tfor scale := range headlessController(ctx, &t) {\n\t\tfor i := len(vus); i < scale; i++ {\n\t\t\tlog.WithField(\"id\", i).Debug(\"Spawning VU\")\n\t\t\tvuCtx, vuCancel := context.WithCancel(ctx)\n\t\t\tvus = append(vus, vuCancel)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tif v := recover(); v != nil {\n\t\t\t\t\t\tswitch err := v.(type) {\n\t\t\t\t\t\tcase speedboat.FlowControl:\n\t\t\t\t\t\t\tswitch err {\n\t\t\t\t\t\t\tcase speedboat.AbortTest:\n\t\t\t\t\t\t\t\tlog.Error(\"Test aborted\")\n\t\t\t\t\t\t\t\tcancel()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\t\"id\": i,\n\t\t\t\t\t\t\t\t\"error\": err,\n\t\t\t\t\t\t\t}).Error(\"VU crashed!\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\trunner.RunVU(vuCtx, t, len(vus))\n\t\t\t}()\n\t\t}\n\t\tfor i := len(vus); i > scale; i-- {\n\t\t\tlog.WithField(\"id\", i-1).Debug(\"Dropping VU\")\n\t\t\tvus[i-1]()\n\t\t\tvus = vus[:i-1]\n\t\t}\n\t\tmVUs.Int(len(vus))\n\t}\n\n\t\/\/ Wait until the end of the test\n\t<-ctx.Done()\n\n\t\/\/ Print final metrics\n\tmetricsMutex.Lock()\n\tprintMetrics(metricsLogger)\n\tmetricsMutex.Unlock()\n\tcommitMetrics()\n\n\treturn nil\n}\n\nfunc main() {\n\t\/\/ Free up -v and -h for our own flags\n\tcli.VersionFlag.Name = \"version\"\n\tcli.HelpFlag.Name = \"help, ?\"\n\n\t\/\/ Bootstrap using action-registered commandline flags\n\tapp := cli.NewApp()\n\tapp.Name = \"speedboat\"\n\tapp.Usage = \"A next-generation load generator\"\n\tapp.Version = \"0.0.1a1\"\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"verbose, v\",\n\t\t\tUsage: \"More verbose output\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"script, s\",\n\t\t\tUsage: \"Script to run\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"url\",\n\t\t\tUsage: \"URL to test\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"vus, u\",\n\t\t\tUsage: \"Number of VUs to simulate\",\n\t\t\tValue: 10,\n\t\t},\n\t\tcli.DurationFlag{\n\t\t\tName: \"duration, d\",\n\t\t\tUsage: \"Test duration\",\n\t\t\tValue: time.Duration(10) * time.Second,\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"output, o\",\n\t\t\tUsage: \"Output metrics to a file or database\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"log, l\",\n\t\t\tUsage: \"Log metrics to stdout\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"json\",\n\t\t\tUsage: \"Format log messages as JSON\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"dump\",\n\t\t\tUsage: \"Dump parsed test and exit\",\n\t\t},\n\t}\n\tapp.Before = func(c *cli.Context) error {\n\t\tconfigureLogging(c)\n\t\tconfigureSampler(c)\n\t\treturn nil\n\t}\n\tapp.Action = action\n\tapp.Run(os.Args)\n}\n<commit_msg>[fix] Don't log final metrics without -l<commit_after>package main\n\nimport (\n\t\"errors\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/loadimpact\/speedboat\"\n\t\"github.com\/loadimpact\/speedboat\/js\"\n\t\"github.com\/loadimpact\/speedboat\/sampler\"\n\t\"github.com\/loadimpact\/speedboat\/sampler\/influxdb\"\n\t\"github.com\/loadimpact\/speedboat\/simple\"\n\t\"github.com\/urfave\/cli\"\n\t\"golang.org\/x\/net\/context\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"io\/ioutil\"\n\tstdlog \"log\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ Configure the global logger.\nfunc configureLogging(c *cli.Context) {\n\tlog.SetLevel(log.InfoLevel)\n\tif c.GlobalBool(\"verbose\") {\n\t\tlog.SetLevel(log.DebugLevel)\n\t}\n\n\tif c.GlobalBool(\"json\") {\n\t\tlog.SetFormatter(&log.JSONFormatter{})\n\t}\n}\n\n\/\/ Configure the global sampler.\nfunc configureSampler(c *cli.Context) {\n\tsampler.DefaultSampler.OnError = func(err error) {\n\t\tlog.WithError(err).Error(\"[Sampler error]\")\n\t}\n\n\tfor _, output := range c.GlobalStringSlice(\"output\") {\n\t\tparts := strings.SplitN(output, \"+\", 2)\n\t\tswitch parts[0] {\n\t\tcase \"influxdb\":\n\t\t\tout, err := influxdb.NewFromURL(parts[1])\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Fatal(\"Couldn't create InfluxDB client\")\n\t\t\t}\n\t\t\tsampler.DefaultSampler.Outputs = append(sampler.DefaultSampler.Outputs, out)\n\t\tcase \"stdout\":\n\t\t\tout := &LogMetricsOutput{Writer: os.Stdout}\n\t\t\tsampler.DefaultSampler.Outputs = append(sampler.DefaultSampler.Outputs, out)\n\t\tdefault:\n\t\t\tlog.WithField(\"type\", parts[0]).Fatal(\"Unrecognized metric output\")\n\t\t}\n\t}\n}\n\nfunc parse(cc *cli.Context) (conf Config, err error) {\n\tswitch len(cc.Args()) {\n\tcase 0:\n\t\tif !cc.IsSet(\"script\") && !cc.IsSet(\"url\") {\n\t\t\treturn conf, errors.New(\"No config file, script or URL\")\n\t\t}\n\tcase 1:\n\t\tbytes, err := ioutil.ReadFile(cc.Args()[0])\n\t\tif err != nil {\n\t\t\treturn conf, errors.New(\"Couldn't read config file\")\n\t\t}\n\t\tif err := yaml.Unmarshal(bytes, &conf); err != nil {\n\t\t\treturn conf, errors.New(\"Couldn't parse config file\")\n\t\t}\n\tdefault:\n\t\treturn conf, errors.New(\"Too many arguments!\")\n\t}\n\n\t\/\/ Let commandline flags override config files\n\tif cc.IsSet(\"script\") {\n\t\tconf.Script = cc.String(\"script\")\n\t}\n\tif cc.IsSet(\"url\") {\n\t\tconf.URL = cc.String(\"url\")\n\t}\n\tif cc.IsSet(\"vus\") {\n\t\tconf.VUs = cc.Int(\"vus\")\n\t}\n\tif cc.IsSet(\"duration\") {\n\t\tconf.Duration = cc.Duration(\"duration\").String()\n\t}\n\n\treturn conf, nil\n}\n\nfunc dumpTest(t *speedboat.Test) {\n\tlog.WithFields(log.Fields{\n\t\t\"script\": t.Script,\n\t\t\"url\": t.URL,\n\t}).Info(\"General\")\n\tfor i, stage := range t.Stages {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"#\": i,\n\t\t\t\"duration\": stage.Duration,\n\t\t\t\"start\": stage.StartVUs,\n\t\t\t\"end\": stage.EndVUs,\n\t\t}).Info(\"Stage\")\n\t}\n}\n\nfunc headlessController(c context.Context, t *speedboat.Test) <-chan int {\n\tch := make(chan int)\n\n\tgo func() {\n\t\tdefer close(ch)\n\n\t\tselect {\n\t\tcase ch <- t.VUsAt(0):\n\t\tcase <-c.Done():\n\t\t\treturn\n\t\t}\n\n\t\tstartTime := time.Now()\n\t\tticker := time.NewTicker(100 * time.Millisecond)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tch <- t.VUsAt(time.Since(startTime))\n\t\t\tcase <-c.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch\n}\n\nfunc action(cc *cli.Context) error {\n\tconf, err := parse(cc)\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"Invalid arguments; see --help\")\n\t}\n\n\tt, err := conf.MakeTest()\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"Configuration error\")\n\t}\n\n\tif cc.Bool(\"dump\") {\n\t\tdumpTest(&t)\n\t\treturn nil\n\t}\n\n\t\/\/ Inspect the test to find a suitable runner; additional ones can easily be added\n\tvar runner speedboat.Runner\n\tswitch {\n\tcase t.URL != \"\":\n\t\trunner = simple.New()\n\tcase strings.HasSuffix(t.Script, \".js\"):\n\t\tsrc, err := ioutil.ReadFile(t.Script)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Fatal(\"Couldn't read script\")\n\t\t}\n\t\trunner = js.New(t.Script, string(src))\n\tdefault:\n\t\tlog.Fatal(\"No suitable runner found!\")\n\t}\n\n\t\/\/ Context that expires at the end of the test\n\tctx, cancel := context.WithTimeout(context.Background(), t.TotalDuration())\n\n\t\/\/ Configure the VU logger\n\tlogger := &log.Logger{\n\t\tOut: os.Stderr,\n\t\tLevel: log.DebugLevel,\n\t\tFormatter: &log.TextFormatter{},\n\t}\n\tif cc.GlobalBool(\"json\") {\n\t\tlogger.Formatter = &log.JSONFormatter{}\n\t}\n\tctx = speedboat.WithLogger(ctx, logger)\n\n\t\/\/ Output metrics appropriately; use a mutex to prevent garbled output\n\tlogMetrics := cc.Bool(\"log\")\n\tmetricsLogger := stdlog.New(os.Stdout, \"metrics: \", stdlog.Lmicroseconds)\n\tmetricsMutex := sync.Mutex{}\n\tgo func() {\n\t\tticker := time.NewTicker(1 * time.Second)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tif logMetrics {\n\t\t\t\t\tmetricsMutex.Lock()\n\t\t\t\t\tprintMetrics(metricsLogger)\n\t\t\t\t\tmetricsMutex.Unlock()\n\t\t\t\t}\n\t\t\t\tcommitMetrics()\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ Use a \"headless controller\" to scale VUs by polling the test ramp\n\tmVUs := sampler.Gauge(\"vus\")\n\tvus := []context.CancelFunc{}\n\tfor scale := range headlessController(ctx, &t) {\n\t\tfor i := len(vus); i < scale; i++ {\n\t\t\tlog.WithField(\"id\", i).Debug(\"Spawning VU\")\n\t\t\tvuCtx, vuCancel := context.WithCancel(ctx)\n\t\t\tvus = append(vus, vuCancel)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tif v := recover(); v != nil {\n\t\t\t\t\t\tswitch err := v.(type) {\n\t\t\t\t\t\tcase speedboat.FlowControl:\n\t\t\t\t\t\t\tswitch err {\n\t\t\t\t\t\t\tcase speedboat.AbortTest:\n\t\t\t\t\t\t\t\tlog.Error(\"Test aborted\")\n\t\t\t\t\t\t\t\tcancel()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\t\"id\": i,\n\t\t\t\t\t\t\t\t\"error\": err,\n\t\t\t\t\t\t\t}).Error(\"VU crashed!\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\trunner.RunVU(vuCtx, t, len(vus))\n\t\t\t}()\n\t\t}\n\t\tfor i := len(vus); i > scale; i-- {\n\t\t\tlog.WithField(\"id\", i-1).Debug(\"Dropping VU\")\n\t\t\tvus[i-1]()\n\t\t\tvus = vus[:i-1]\n\t\t}\n\t\tmVUs.Int(len(vus))\n\t}\n\n\t\/\/ Wait until the end of the test\n\t<-ctx.Done()\n\n\t\/\/ Print final metrics\n\tif logMetrics {\n\t\tmetricsMutex.Lock()\n\t\tprintMetrics(metricsLogger)\n\t\tmetricsMutex.Unlock()\n\t}\n\tcommitMetrics()\n\n\treturn nil\n}\n\nfunc main() {\n\t\/\/ Free up -v and -h for our own flags\n\tcli.VersionFlag.Name = \"version\"\n\tcli.HelpFlag.Name = \"help, ?\"\n\n\t\/\/ Bootstrap using action-registered commandline flags\n\tapp := cli.NewApp()\n\tapp.Name = \"speedboat\"\n\tapp.Usage = \"A next-generation load generator\"\n\tapp.Version = \"0.0.1a1\"\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"verbose, v\",\n\t\t\tUsage: \"More verbose output\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"script, s\",\n\t\t\tUsage: \"Script to run\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"url\",\n\t\t\tUsage: \"URL to test\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"vus, u\",\n\t\t\tUsage: \"Number of VUs to simulate\",\n\t\t\tValue: 10,\n\t\t},\n\t\tcli.DurationFlag{\n\t\t\tName: \"duration, d\",\n\t\t\tUsage: \"Test duration\",\n\t\t\tValue: time.Duration(10) * time.Second,\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"output, o\",\n\t\t\tUsage: \"Output metrics to a file or database\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"log, l\",\n\t\t\tUsage: \"Log metrics to stdout\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"json\",\n\t\t\tUsage: \"Format log messages as JSON\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"dump\",\n\t\t\tUsage: \"Dump parsed test and exit\",\n\t\t},\n\t}\n\tapp.Before = func(c *cli.Context) error {\n\t\tconfigureLogging(c)\n\t\tconfigureSampler(c)\n\t\treturn nil\n\t}\n\tapp.Action = action\n\tapp.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"encoding\/json\"\n\t_ \"expvar\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/facebookgo\/httpcontrol\"\n\n\tversion \"bosun.org\/_version\"\n\n\t\"bosun.org\/cmd\/tsdbrelay\/denormalize\"\n\t\"bosun.org\/collect\"\n\t\"bosun.org\/metadata\"\n\t\"bosun.org\/opentsdb\"\n\t\"bosun.org\/slog\"\n\t\"bosun.org\/util\"\n)\n\nvar (\n\tlistenAddr = flag.String(\"l\", \":4242\", \"Listen address.\")\n\tbosunServer = flag.String(\"b\", \"bosun\", \"Target Bosun server. Can specify port with host:port.\")\n\tsecondaryRelays = flag.String(\"r\", \"\", \"Additional relays to send data to. Intended for secondary data center replication. Only response from primary tsdb server wil be relayed to clients.\")\n\ttsdbServer = flag.String(\"t\", \"\", \"Target OpenTSDB server. Can specify port with host:port.\")\n\tlogVerbose = flag.Bool(\"v\", false, \"enable verbose logging\")\n\ttoDenormalize = flag.String(\"denormalize\", \"\", \"List of metrics to denormalize. Comma seperated list of `metric__tagname__tagname` rules. Will be translated to `__tagvalue.tagvalue.metric`\")\n\tflagVersion = flag.Bool(\"version\", false, \"Prints the version and exits.\")\n\n\tredisHost = flag.String(\"redis\", \"\", \"redis host for aggregating external counters\")\n\tredisDb = flag.Int(\"db\", 0, \"redis db to use for counters\")\n)\n\nvar (\n\ttsdbPutURL string\n\tbosunIndexURL string\n\n\tdenormalizationRules map[string]*denormalize.DenormalizationRule\n\n\trelayPutUrls []string\n\n\ttags = opentsdb.TagSet{}\n)\n\ntype tsdbrelayHTTPTransport struct {\n\tUserAgent string\n\thttp.RoundTripper\n}\n\nfunc (t *tsdbrelayHTTPTransport) RoundTrip(req *http.Request) (*http.Response, error) {\n\tif req.Header.Get(\"User-Agent\") == \"\" {\n\t\treq.Header.Add(\"User-Agent\", t.UserAgent)\n\t}\n\tif req.Header.Get(\"X-Access-Token\") == \"\" {\n\t\treq.Header.Add(\"X-Access-Token\", t.UserAgent)\n\t}\n\treturn t.RoundTripper.RoundTrip(req)\n}\n\nfunc init() {\n\tclient := &http.Client{\n\t\tTransport: &tsdbrelayHTTPTransport{\n\t\t\t\"Tsdbrelay\/\" + version.ShortVersion(),\n\t\t\t&httpcontrol.Transport{\n\t\t\t\tRequestTimeout: time.Minute,\n\t\t\t},\n\t\t},\n\t}\n\thttp.DefaultClient = client\n\tcollect.DefaultClient = client\n}\n\nfunc main() {\n\tvar err error\n\tmyHost, err = os.Hostname()\n\tif err != nil || myHost == \"\" {\n\t\tmyHost = \"tsdbrelay\"\n\t}\n\n\tflag.Parse()\n\tif *flagVersion {\n\t\tfmt.Println(version.GetVersionInfo(\"tsdbrelay\"))\n\t\tos.Exit(0)\n\t}\n\tif *bosunServer == \"\" || *tsdbServer == \"\" {\n\t\tslog.Fatal(\"must specify both bosun and tsdb server\")\n\t}\n\tslog.Infoln(version.GetVersionInfo(\"tsdbrelay\"))\n\tslog.Infoln(\"listen on\", *listenAddr)\n\tslog.Infoln(\"relay to bosun at\", *bosunServer)\n\tslog.Infoln(\"relay to tsdb at\", *tsdbServer)\n\tif *toDenormalize != \"\" {\n\t\tvar err error\n\t\tdenormalizationRules, err = denormalize.ParseDenormalizationRules(*toDenormalize)\n\t\tif err != nil {\n\t\t\tslog.Fatal(err)\n\t\t}\n\t}\n\n\ttsdbURL := &url.URL{\n\t\tScheme: \"http\",\n\t\tHost: *tsdbServer,\n\t}\n\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: *tsdbServer,\n\t\tPath: \"\/api\/put\",\n\t}\n\ttsdbPutURL = u.String()\n\tbosunURL := &url.URL{\n\t\tScheme: \"http\",\n\t\tHost: *bosunServer,\n\t}\n\tu = url.URL{\n\t\tScheme: \"http\",\n\t\tHost: *bosunServer,\n\t\tPath: \"\/api\/index\",\n\t}\n\tbosunIndexURL = u.String()\n\tif *secondaryRelays != \"\" {\n\t\tfor _, rUrl := range strings.Split(*secondaryRelays, \",\") {\n\t\t\tu = url.URL{\n\t\t\t\tScheme: \"http\",\n\t\t\t\tHost: rUrl,\n\t\t\t\tPath: \"\/api\/put\",\n\t\t\t}\n\t\t\trelayPutUrls = append(relayPutUrls, u.String())\n\t\t}\n\t}\n\n\ttsdbProxy := util.NewSingleHostProxy(tsdbURL)\n\tbosunProxy := util.NewSingleHostProxy(bosunURL)\n\trp := &relayProxy{\n\t\tTSDBProxy: tsdbProxy,\n\t\tBosunProxy: bosunProxy,\n\t}\n\thttp.HandleFunc(\"\/api\/put\", func(w http.ResponseWriter, r *http.Request) {\n\t\trp.relayPut(w, r, true)\n\t})\n\tif *redisHost != \"\" {\n\t\thttp.HandleFunc(\"\/api\/count\", collect.HandleCounterPut(*redisHost, *redisDb))\n\t}\n\thttp.HandleFunc(\"\/api\/metadata\/put\", func(w http.ResponseWriter, r *http.Request) {\n\t\trp.relayMetadata(w, r)\n\t})\n\thttp.Handle(\"\/\", tsdbProxy)\n\n\tcollectUrl := &url.URL{\n\t\tScheme: \"http\",\n\t\tHost: *listenAddr,\n\t\tPath: \"\/api\/put\",\n\t}\n\tif err = collect.Init(collectUrl, \"tsdbrelay\"); err != nil {\n\t\tslog.Fatal(err)\n\t}\n\tif err := metadata.Init(collectUrl, false); err != nil {\n\t\tslog.Fatal(err)\n\t}\n\t\/\/ Make sure these get zeroed out instead of going unknown on restart\n\tcollect.Add(\"puts.relayed\", tags, 0)\n\tcollect.Add(\"puts.error\", tags, 0)\n\tcollect.Add(\"metadata.relayed\", tags, 0)\n\tcollect.Add(\"metadata.error\", tags, 0)\n\tcollect.Add(\"additional.puts.relayed\", tags, 0)\n\tcollect.Add(\"additional.puts.error\", tags, 0)\n\tmetadata.AddMetricMeta(\"tsdbrelay.puts.relayed\", metadata.Counter, metadata.Count, \"Number of successful puts relayed to opentsdb target\")\n\tmetadata.AddMetricMeta(\"tsdbrelay.puts.error\", metadata.Counter, metadata.Count, \"Number of puts that could not be relayed to opentsdb target\")\n\tmetadata.AddMetricMeta(\"tsdbrelay.metadata.relayed\", metadata.Counter, metadata.Count, \"Number of successful metadata puts relayed to bosun target\")\n\tmetadata.AddMetricMeta(\"tsdbrelay.metadata.error\", metadata.Counter, metadata.Count, \"Number of metadata puts that could not be relayed to bosun target\")\n\tmetadata.AddMetricMeta(\"tsdbrelay.additional.puts.relayed\", metadata.Counter, metadata.Count, \"Number of successful puts relayed to additional targets\")\n\tmetadata.AddMetricMeta(\"tsdbrelay.additional.puts.error\", metadata.Counter, metadata.Count, \"Number of puts that could not be relayed to additional targets\")\n\tslog.Fatal(http.ListenAndServe(*listenAddr, nil))\n}\n\nfunc verbose(format string, a ...interface{}) {\n\tif *logVerbose {\n\t\tslog.Infof(format, a...)\n\t}\n}\n\ntype relayProxy struct {\n\tTSDBProxy *httputil.ReverseProxy\n\tBosunProxy *httputil.ReverseProxy\n}\n\ntype passthru struct {\n\tio.ReadCloser\n\tbuf bytes.Buffer\n}\n\nfunc (p *passthru) Read(b []byte) (int, error) {\n\tn, err := p.ReadCloser.Read(b)\n\tp.buf.Write(b[:n])\n\treturn n, err\n}\n\ntype relayWriter struct {\n\thttp.ResponseWriter\n\tcode int\n}\n\nfunc (rw *relayWriter) WriteHeader(code int) {\n\trw.code = code\n\trw.ResponseWriter.WriteHeader(code)\n}\n\nvar (\n\trelayHeader = \"X-Relayed-From\"\n\tencHeader = \"Content-Encoding\"\n\ttypeHeader = \"Content-Type\"\n\tmyHost string\n)\n\nfunc (rp *relayProxy) relayPut(responseWriter http.ResponseWriter, r *http.Request, parse bool) {\n\tisRelayed := r.Header.Get(relayHeader) != \"\"\n\treader := &passthru{ReadCloser: r.Body}\n\tr.Body = reader\n\tw := &relayWriter{ResponseWriter: responseWriter}\n\trp.TSDBProxy.ServeHTTP(w, r)\n\tif w.code\/100 != 2 {\n\t\tverbose(\"relayPut got status %d\", w.code)\n\t\tcollect.Add(\"puts.error\", tags, 1)\n\t\treturn\n\t}\n\tverbose(\"relayed to tsdb\")\n\tcollect.Add(\"puts.relayed\", tags, 1)\n\t\/\/ Send to bosun in a separate go routine so we can end the source's request.\n\tgo func() {\n\t\tbody := bytes.NewBuffer(reader.buf.Bytes())\n\t\treq, err := http.NewRequest(r.Method, bosunIndexURL, body)\n\t\tif err != nil {\n\t\t\tverbose(\"bosun connect error: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tresp, err := http.DefaultClient.Do(req)\n\t\tif err != nil {\n\t\t\tverbose(\"bosun relay error: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tresp.Body.Close()\n\t\tverbose(\"bosun relay success\")\n\t}()\n\t\/\/ Parse and denormalize datapoints\n\tif !isRelayed && parse && denormalizationRules != nil {\n\t\tgo rp.denormalize(bytes.NewReader(reader.buf.Bytes()))\n\t}\n\n\tif !isRelayed && len(relayPutUrls) > 0 {\n\t\tgo func() {\n\t\t\tfor _, relayUrl := range relayPutUrls {\n\t\t\t\tbody := bytes.NewBuffer(reader.buf.Bytes())\n\t\t\t\treq, err := http.NewRequest(r.Method, relayUrl, body)\n\t\t\t\tif err != nil {\n\t\t\t\t\tverbose(\"relayPutUrls connect error: %v\", err)\n\t\t\t\t\tcollect.Add(\"additional.puts.error\", tags, 1)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif contenttype := r.Header.Get(typeHeader); contenttype != \"\" {\n\t\t\t\t\treq.Header.Set(typeHeader, contenttype)\n\t\t\t\t}\n\t\t\t\tif encoding := r.Header.Get(encHeader); encoding != \"\" {\n\t\t\t\t\treq.Header.Set(encHeader, encoding)\n\t\t\t\t}\n\t\t\t\treq.Header.Add(relayHeader, myHost)\n\t\t\t\tresp, err := http.DefaultClient.Do(req)\n\t\t\t\tif err != nil {\n\t\t\t\t\tverbose(\"secondary relay error: %v\", err)\n\t\t\t\t\tcollect.Add(\"additional.puts.error\", tags, 1)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tresp.Body.Close()\n\t\t\t\tverbose(\"secondary relay success\")\n\t\t\t\tcollect.Add(\"additional.puts.relayed\", tags, 1)\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (rp *relayProxy) denormalize(body io.Reader) {\n\tgReader, err := gzip.NewReader(body)\n\tif err != nil {\n\t\tverbose(\"error making gzip reader: %v\", err)\n\t\treturn\n\t}\n\tdecoder := json.NewDecoder(gReader)\n\tdps := []*opentsdb.DataPoint{}\n\terr = decoder.Decode(&dps)\n\tif err != nil {\n\t\tverbose(\"error decoding data points: %v\", err)\n\t\treturn\n\t}\n\trelayDps := []*opentsdb.DataPoint{}\n\tfor _, dp := range dps {\n\t\tif rule, ok := denormalizationRules[dp.Metric]; ok {\n\t\t\tif err = rule.Translate(dp); err == nil {\n\t\t\t\trelayDps = append(relayDps, dp)\n\t\t\t} else {\n\t\t\t\tverbose(\"error translating points: %v\", err.Error())\n\t\t\t}\n\t\t}\n\t}\n\tif len(relayDps) == 0 {\n\t\treturn\n\t}\n\tbuf := &bytes.Buffer{}\n\tgWriter := gzip.NewWriter(buf)\n\tencoder := json.NewEncoder(gWriter)\n\terr = encoder.Encode(relayDps)\n\tif err != nil {\n\t\tverbose(\"error encoding denormalized data points: %v\", err)\n\t\treturn\n\t}\n\tif err = gWriter.Close(); err != nil {\n\t\tverbose(\"error zipping denormalized data points: %v\", err)\n\t\treturn\n\t}\n\treq, err := http.NewRequest(\"POST\", tsdbPutURL, buf)\n\tif err != nil {\n\t\tverbose(\"error posting denormalized data points: %v\", err)\n\t\treturn\n\t}\n\treq.Header.Set(typeHeader, \"application\/json\")\n\treq.Header.Set(encHeader, \"gzip\")\n\n\tresponseWriter := httptest.NewRecorder()\n\trp.relayPut(responseWriter, req, false)\n\n\tverbose(\"relayed %d denormalized data points. Tsdb response: %d\", len(relayDps), responseWriter.Code)\n}\n\nfunc (rp *relayProxy) relayMetadata(responseWriter http.ResponseWriter, r *http.Request) {\n\treader := &passthru{ReadCloser: r.Body}\n\tr.Body = reader\n\tw := &relayWriter{ResponseWriter: responseWriter}\n\trp.BosunProxy.ServeHTTP(w, r)\n\tif w.code != 204 {\n\t\tverbose(\"relayMetadata got status %d\", w.code)\n\t\tcollect.Add(\"metadata.error\", tags, 1)\n\t\treturn\n\t}\n\tverbose(\"relayed metadata to bosun\")\n\tcollect.Add(\"metadata.relayed\", tags, 1)\n\tif r.Header.Get(relayHeader) != \"\" {\n\t\treturn\n\t}\n\tif len(relayPutUrls) != 0 {\n\t\tgo func() {\n\t\t\tfor _, relayUrl := range relayPutUrls {\n\t\t\t\trelayUrl = strings.Replace(relayUrl, \"\/put\", \"\/metadata\/put\", 1)\n\t\t\t\tbody := bytes.NewBuffer(reader.buf.Bytes())\n\t\t\t\treq, err := http.NewRequest(r.Method, relayUrl, body)\n\t\t\t\tif err != nil {\n\t\t\t\t\tverbose(\"metadata relayPutUrls error %v\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif contenttype := r.Header.Get(typeHeader); contenttype != \"\" {\n\t\t\t\t\treq.Header.Set(typeHeader, contenttype)\n\t\t\t\t}\n\t\t\t\tif encoding := r.Header.Get(encHeader); encoding != \"\" {\n\t\t\t\t\treq.Header.Set(encHeader, encoding)\n\t\t\t\t}\n\t\t\t\treq.Header.Add(relayHeader, myHost)\n\t\t\t\tresp, err := http.DefaultClient.Do(req)\n\t\t\t\tif err != nil {\n\t\t\t\t\tverbose(\"secondary relay metadata error: %v\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tresp.Body.Close()\n\t\t\t\tverbose(\"secondary relay metadata success\")\n\t\t\t}\n\t\t}()\n\t}\n}\n<commit_msg>cmd\/tsdbrelay: copy X-Access-Token in correct place<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"encoding\/json\"\n\t_ \"expvar\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/facebookgo\/httpcontrol\"\n\n\tversion \"bosun.org\/_version\"\n\n\t\"bosun.org\/cmd\/tsdbrelay\/denormalize\"\n\t\"bosun.org\/collect\"\n\t\"bosun.org\/metadata\"\n\t\"bosun.org\/opentsdb\"\n\t\"bosun.org\/slog\"\n\t\"bosun.org\/util\"\n)\n\nvar (\n\tlistenAddr = flag.String(\"l\", \":4242\", \"Listen address.\")\n\tbosunServer = flag.String(\"b\", \"bosun\", \"Target Bosun server. Can specify port with host:port.\")\n\tsecondaryRelays = flag.String(\"r\", \"\", \"Additional relays to send data to. Intended for secondary data center replication. Only response from primary tsdb server wil be relayed to clients.\")\n\ttsdbServer = flag.String(\"t\", \"\", \"Target OpenTSDB server. Can specify port with host:port.\")\n\tlogVerbose = flag.Bool(\"v\", false, \"enable verbose logging\")\n\ttoDenormalize = flag.String(\"denormalize\", \"\", \"List of metrics to denormalize. Comma seperated list of `metric__tagname__tagname` rules. Will be translated to `__tagvalue.tagvalue.metric`\")\n\tflagVersion = flag.Bool(\"version\", false, \"Prints the version and exits.\")\n\n\tredisHost = flag.String(\"redis\", \"\", \"redis host for aggregating external counters\")\n\tredisDb = flag.Int(\"db\", 0, \"redis db to use for counters\")\n)\n\nvar (\n\ttsdbPutURL string\n\tbosunIndexURL string\n\n\tdenormalizationRules map[string]*denormalize.DenormalizationRule\n\n\trelayPutUrls []string\n\n\ttags = opentsdb.TagSet{}\n)\n\ntype tsdbrelayHTTPTransport struct {\n\tUserAgent string\n\thttp.RoundTripper\n}\n\nfunc (t *tsdbrelayHTTPTransport) RoundTrip(req *http.Request) (*http.Response, error) {\n\tif req.Header.Get(\"User-Agent\") == \"\" {\n\t\treq.Header.Add(\"User-Agent\", t.UserAgent)\n\t}\n\treturn t.RoundTripper.RoundTrip(req)\n}\n\nfunc init() {\n\tclient := &http.Client{\n\t\tTransport: &tsdbrelayHTTPTransport{\n\t\t\t\"Tsdbrelay\/\" + version.ShortVersion(),\n\t\t\t&httpcontrol.Transport{\n\t\t\t\tRequestTimeout: time.Minute,\n\t\t\t},\n\t\t},\n\t}\n\thttp.DefaultClient = client\n\tcollect.DefaultClient = client\n}\n\nfunc main() {\n\tvar err error\n\tmyHost, err = os.Hostname()\n\tif err != nil || myHost == \"\" {\n\t\tmyHost = \"tsdbrelay\"\n\t}\n\n\tflag.Parse()\n\tif *flagVersion {\n\t\tfmt.Println(version.GetVersionInfo(\"tsdbrelay\"))\n\t\tos.Exit(0)\n\t}\n\tif *bosunServer == \"\" || *tsdbServer == \"\" {\n\t\tslog.Fatal(\"must specify both bosun and tsdb server\")\n\t}\n\tslog.Infoln(version.GetVersionInfo(\"tsdbrelay\"))\n\tslog.Infoln(\"listen on\", *listenAddr)\n\tslog.Infoln(\"relay to bosun at\", *bosunServer)\n\tslog.Infoln(\"relay to tsdb at\", *tsdbServer)\n\tif *toDenormalize != \"\" {\n\t\tvar err error\n\t\tdenormalizationRules, err = denormalize.ParseDenormalizationRules(*toDenormalize)\n\t\tif err != nil {\n\t\t\tslog.Fatal(err)\n\t\t}\n\t}\n\n\ttsdbURL := &url.URL{\n\t\tScheme: \"http\",\n\t\tHost: *tsdbServer,\n\t}\n\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: *tsdbServer,\n\t\tPath: \"\/api\/put\",\n\t}\n\ttsdbPutURL = u.String()\n\tbosunURL := &url.URL{\n\t\tScheme: \"http\",\n\t\tHost: *bosunServer,\n\t}\n\tu = url.URL{\n\t\tScheme: \"http\",\n\t\tHost: *bosunServer,\n\t\tPath: \"\/api\/index\",\n\t}\n\tbosunIndexURL = u.String()\n\tif *secondaryRelays != \"\" {\n\t\tfor _, rUrl := range strings.Split(*secondaryRelays, \",\") {\n\t\t\tu = url.URL{\n\t\t\t\tScheme: \"http\",\n\t\t\t\tHost: rUrl,\n\t\t\t\tPath: \"\/api\/put\",\n\t\t\t}\n\t\t\trelayPutUrls = append(relayPutUrls, u.String())\n\t\t}\n\t}\n\n\ttsdbProxy := util.NewSingleHostProxy(tsdbURL)\n\tbosunProxy := util.NewSingleHostProxy(bosunURL)\n\trp := &relayProxy{\n\t\tTSDBProxy: tsdbProxy,\n\t\tBosunProxy: bosunProxy,\n\t}\n\thttp.HandleFunc(\"\/api\/put\", func(w http.ResponseWriter, r *http.Request) {\n\t\trp.relayPut(w, r, true)\n\t})\n\tif *redisHost != \"\" {\n\t\thttp.HandleFunc(\"\/api\/count\", collect.HandleCounterPut(*redisHost, *redisDb))\n\t}\n\thttp.HandleFunc(\"\/api\/metadata\/put\", func(w http.ResponseWriter, r *http.Request) {\n\t\trp.relayMetadata(w, r)\n\t})\n\thttp.Handle(\"\/\", tsdbProxy)\n\n\tcollectUrl := &url.URL{\n\t\tScheme: \"http\",\n\t\tHost: *listenAddr,\n\t\tPath: \"\/api\/put\",\n\t}\n\tif err = collect.Init(collectUrl, \"tsdbrelay\"); err != nil {\n\t\tslog.Fatal(err)\n\t}\n\tif err := metadata.Init(collectUrl, false); err != nil {\n\t\tslog.Fatal(err)\n\t}\n\t\/\/ Make sure these get zeroed out instead of going unknown on restart\n\tcollect.Add(\"puts.relayed\", tags, 0)\n\tcollect.Add(\"puts.error\", tags, 0)\n\tcollect.Add(\"metadata.relayed\", tags, 0)\n\tcollect.Add(\"metadata.error\", tags, 0)\n\tcollect.Add(\"additional.puts.relayed\", tags, 0)\n\tcollect.Add(\"additional.puts.error\", tags, 0)\n\tmetadata.AddMetricMeta(\"tsdbrelay.puts.relayed\", metadata.Counter, metadata.Count, \"Number of successful puts relayed to opentsdb target\")\n\tmetadata.AddMetricMeta(\"tsdbrelay.puts.error\", metadata.Counter, metadata.Count, \"Number of puts that could not be relayed to opentsdb target\")\n\tmetadata.AddMetricMeta(\"tsdbrelay.metadata.relayed\", metadata.Counter, metadata.Count, \"Number of successful metadata puts relayed to bosun target\")\n\tmetadata.AddMetricMeta(\"tsdbrelay.metadata.error\", metadata.Counter, metadata.Count, \"Number of metadata puts that could not be relayed to bosun target\")\n\tmetadata.AddMetricMeta(\"tsdbrelay.additional.puts.relayed\", metadata.Counter, metadata.Count, \"Number of successful puts relayed to additional targets\")\n\tmetadata.AddMetricMeta(\"tsdbrelay.additional.puts.error\", metadata.Counter, metadata.Count, \"Number of puts that could not be relayed to additional targets\")\n\tslog.Fatal(http.ListenAndServe(*listenAddr, nil))\n}\n\nfunc verbose(format string, a ...interface{}) {\n\tif *logVerbose {\n\t\tslog.Infof(format, a...)\n\t}\n}\n\ntype relayProxy struct {\n\tTSDBProxy *httputil.ReverseProxy\n\tBosunProxy *httputil.ReverseProxy\n}\n\ntype passthru struct {\n\tio.ReadCloser\n\tbuf bytes.Buffer\n}\n\nfunc (p *passthru) Read(b []byte) (int, error) {\n\tn, err := p.ReadCloser.Read(b)\n\tp.buf.Write(b[:n])\n\treturn n, err\n}\n\ntype relayWriter struct {\n\thttp.ResponseWriter\n\tcode int\n}\n\nfunc (rw *relayWriter) WriteHeader(code int) {\n\trw.code = code\n\trw.ResponseWriter.WriteHeader(code)\n}\n\nvar (\n\trelayHeader = \"X-Relayed-From\"\n\tencHeader = \"Content-Encoding\"\n\ttypeHeader = \"Content-Type\"\n\taccessHeader = \"X-Access-Header\"\n\tmyHost string\n)\n\nfunc (rp *relayProxy) relayPut(responseWriter http.ResponseWriter, r *http.Request, parse bool) {\n\tisRelayed := r.Header.Get(relayHeader) != \"\"\n\treader := &passthru{ReadCloser: r.Body}\n\tr.Body = reader\n\tw := &relayWriter{ResponseWriter: responseWriter}\n\trp.TSDBProxy.ServeHTTP(w, r)\n\tif w.code\/100 != 2 {\n\t\tverbose(\"relayPut got status %d\", w.code)\n\t\tcollect.Add(\"puts.error\", tags, 1)\n\t\treturn\n\t}\n\tverbose(\"relayed to tsdb\")\n\tcollect.Add(\"puts.relayed\", tags, 1)\n\t\/\/ Send to bosun in a separate go routine so we can end the source's request.\n\tgo func() {\n\t\tbody := bytes.NewBuffer(reader.buf.Bytes())\n\t\treq, err := http.NewRequest(r.Method, bosunIndexURL, body)\n\t\tif err != nil {\n\t\t\tverbose(\"bosun connect error: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tresp, err := http.DefaultClient.Do(req)\n\t\tif err != nil {\n\t\t\tverbose(\"bosun relay error: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tresp.Body.Close()\n\t\tverbose(\"bosun relay success\")\n\t}()\n\t\/\/ Parse and denormalize datapoints\n\tif !isRelayed && parse && denormalizationRules != nil {\n\t\tgo rp.denormalize(bytes.NewReader(reader.buf.Bytes()))\n\t}\n\n\tif !isRelayed && len(relayPutUrls) > 0 {\n\t\tgo func() {\n\t\t\tfor _, relayUrl := range relayPutUrls {\n\t\t\t\tbody := bytes.NewBuffer(reader.buf.Bytes())\n\t\t\t\treq, err := http.NewRequest(r.Method, relayUrl, body)\n\t\t\t\tif err != nil {\n\t\t\t\t\tverbose(\"relayPutUrls connect error: %v\", err)\n\t\t\t\t\tcollect.Add(\"additional.puts.error\", tags, 1)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif contenttype := r.Header.Get(typeHeader); contenttype != \"\" {\n\t\t\t\t\treq.Header.Set(typeHeader, contenttype)\n\t\t\t\t}\n\t\t\t\tif access := r.Header.Get(accessHeader); access != \"\" {\n\t\t\t\t\treq.Header.Set(accessHeader, access)\n\t\t\t\t}\n\t\t\t\tif encoding := r.Header.Get(encHeader); encoding != \"\" {\n\t\t\t\t\treq.Header.Set(encHeader, encoding)\n\t\t\t\t}\n\t\t\t\treq.Header.Add(relayHeader, myHost)\n\t\t\t\tresp, err := http.DefaultClient.Do(req)\n\t\t\t\tif err != nil {\n\t\t\t\t\tverbose(\"secondary relay error: %v\", err)\n\t\t\t\t\tcollect.Add(\"additional.puts.error\", tags, 1)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tresp.Body.Close()\n\t\t\t\tverbose(\"secondary relay success\")\n\t\t\t\tcollect.Add(\"additional.puts.relayed\", tags, 1)\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (rp *relayProxy) denormalize(body io.Reader) {\n\tgReader, err := gzip.NewReader(body)\n\tif err != nil {\n\t\tverbose(\"error making gzip reader: %v\", err)\n\t\treturn\n\t}\n\tdecoder := json.NewDecoder(gReader)\n\tdps := []*opentsdb.DataPoint{}\n\terr = decoder.Decode(&dps)\n\tif err != nil {\n\t\tverbose(\"error decoding data points: %v\", err)\n\t\treturn\n\t}\n\trelayDps := []*opentsdb.DataPoint{}\n\tfor _, dp := range dps {\n\t\tif rule, ok := denormalizationRules[dp.Metric]; ok {\n\t\t\tif err = rule.Translate(dp); err == nil {\n\t\t\t\trelayDps = append(relayDps, dp)\n\t\t\t} else {\n\t\t\t\tverbose(\"error translating points: %v\", err.Error())\n\t\t\t}\n\t\t}\n\t}\n\tif len(relayDps) == 0 {\n\t\treturn\n\t}\n\tbuf := &bytes.Buffer{}\n\tgWriter := gzip.NewWriter(buf)\n\tencoder := json.NewEncoder(gWriter)\n\terr = encoder.Encode(relayDps)\n\tif err != nil {\n\t\tverbose(\"error encoding denormalized data points: %v\", err)\n\t\treturn\n\t}\n\tif err = gWriter.Close(); err != nil {\n\t\tverbose(\"error zipping denormalized data points: %v\", err)\n\t\treturn\n\t}\n\treq, err := http.NewRequest(\"POST\", tsdbPutURL, buf)\n\tif err != nil {\n\t\tverbose(\"error posting denormalized data points: %v\", err)\n\t\treturn\n\t}\n\treq.Header.Set(typeHeader, \"application\/json\")\n\treq.Header.Set(encHeader, \"gzip\")\n\n\tresponseWriter := httptest.NewRecorder()\n\trp.relayPut(responseWriter, req, false)\n\n\tverbose(\"relayed %d denormalized data points. Tsdb response: %d\", len(relayDps), responseWriter.Code)\n}\n\nfunc (rp *relayProxy) relayMetadata(responseWriter http.ResponseWriter, r *http.Request) {\n\treader := &passthru{ReadCloser: r.Body}\n\tr.Body = reader\n\tw := &relayWriter{ResponseWriter: responseWriter}\n\trp.BosunProxy.ServeHTTP(w, r)\n\tif w.code != 204 {\n\t\tverbose(\"relayMetadata got status %d\", w.code)\n\t\tcollect.Add(\"metadata.error\", tags, 1)\n\t\treturn\n\t}\n\tverbose(\"relayed metadata to bosun\")\n\tcollect.Add(\"metadata.relayed\", tags, 1)\n\tif r.Header.Get(relayHeader) != \"\" {\n\t\treturn\n\t}\n\tif len(relayPutUrls) != 0 {\n\t\tgo func() {\n\t\t\tfor _, relayUrl := range relayPutUrls {\n\t\t\t\trelayUrl = strings.Replace(relayUrl, \"\/put\", \"\/metadata\/put\", 1)\n\t\t\t\tbody := bytes.NewBuffer(reader.buf.Bytes())\n\t\t\t\treq, err := http.NewRequest(r.Method, relayUrl, body)\n\t\t\t\tif err != nil {\n\t\t\t\t\tverbose(\"metadata relayPutUrls error %v\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif contenttype := r.Header.Get(typeHeader); contenttype != \"\" {\n\t\t\t\t\treq.Header.Set(typeHeader, contenttype)\n\t\t\t\t}\n\t\t\t\tif access := r.Header.Get(accessHeader); access != \"\" {\n\t\t\t\t\treq.Header.Set(accessHeader, access)\n\t\t\t\t}\n\t\t\t\tif encoding := r.Header.Get(encHeader); encoding != \"\" {\n\t\t\t\t\treq.Header.Set(encHeader, encoding)\n\t\t\t\t}\n\t\t\t\treq.Header.Add(relayHeader, myHost)\n\t\t\t\tresp, err := http.DefaultClient.Do(req)\n\t\t\t\tif err != nil {\n\t\t\t\t\tverbose(\"secondary relay metadata error: %v\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tresp.Body.Close()\n\t\t\t\tverbose(\"secondary relay metadata success\")\n\t\t\t}\n\t\t}()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/**\n * Copyright (C) 2015 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\npackage cmds\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/fabric8io\/gofabric8\/client\"\n\t\"github.com\/fabric8io\/gofabric8\/util\"\n\t\"github.com\/spf13\/cobra\"\n\tcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n)\n\ntype cleanUpJenkinsFlags struct {\n\tconfirm bool\n}\n\n\/\/ NewCmdCleanUpJenkins delete files in the tenants content repository\nfunc NewCmdCleanUpJenkins(f *cmdutil.Factory) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"jenkins\",\n\t\tShort: \"Deletes all the jenkins jobs in your tenant Jenkins service\",\n\t\tLong: `Deletes all the jenkins jobs in your tenant Jenkins service`,\n\t\tAliases: []string{\"content-repository\"},\n\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tp := cleanUpJenkinsFlags{}\n\t\t\tif cmd.Flags().Lookup(yesFlag).Value.String() == \"true\" {\n\t\t\t\tp.confirm = true\n\t\t\t}\n\t\t\terr := p.cleanUpJenkins(f)\n\t\t\tif err != nil {\n\t\t\t\tutil.Fatalf(\"%s\\n\", err)\n\t\t\t}\n\t\t\treturn\n\t\t},\n\t}\n\treturn cmd\n}\n\nfunc (p *cleanUpJenkinsFlags) cleanUpJenkins(f *cmdutil.Factory) error {\n\tc, cfg := client.NewClient(f)\n\tns, _, _ := f.DefaultNamespace()\n\toc, _ := client.NewOpenShiftClient(cfg)\n\tinitSchema()\n\n\tuserNS, err := detectCurrentUserNamespace(ns, c, oc)\n\tif err != nil {\n\t\treturn err\n\t}\n\tjenkinsNS := fmt.Sprintf(\"%s-jenkins\", userNS)\n\n\tif !p.confirm {\n\t\tconfirm := \"\"\n\t\tutil.Warn(\"WARNING this is destructive and will remove ALL of the jenkins jobs\\n\")\n\t\tutil.Info(\"for your tenant: \")\n\t\tutil.Successf(\"%s\", userNS)\n\t\tutil.Info(\" running in namespace: \")\n\t\tutil.Successf(\"%s\\n\", jenkinsNS)\n\t\tutil.Warn(\"\\nContinue [y\/N]: \")\n\t\tfmt.Scanln(&confirm)\n\t\tif confirm != \"y\" {\n\t\t\tutil.Warn(\"Aborted\\n\")\n\t\t\treturn nil\n\t\t}\n\t}\n\tutil.Info(\"Cleaning jenkins for tenant: \")\n\tutil.Successf(\"%s\", userNS)\n\tutil.Info(\" running in namespace: \")\n\tutil.Successf(\"%s\\n\", jenkinsNS)\n\n\terr = ensureDeploymentOrDCHasReplicas(c, oc, jenkinsNS, \"jenkins\", 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpod, err := waitForReadyPodForDeploymentOrDC(c, oc, jenkinsNS, \"jenkins\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tutil.Infof(\"Found running jenkins pod %s\\n\", pod)\n\n\tkubeCLI := \"kubectl\"\n\terr = runCommand(kubeCLI, \"exec\", \"-it\", pod, \"-n\", jenkinsNS, \"--\", \"bash\", \"-c\", \"rm -rf \/var\/lib\/jenkins\/jobs\/*\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = runCommand(kubeCLI, \"delete\", \"pod\", pod, \"-n\", jenkinsNS)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ now lets remove the Jenkins Job ConfigMaps\n\terr = runCommand(\"oc\", \"delete\", \"configmap\", \"-l\", \"openshift.io\/jenkins=job\", \"-n\", userNS)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ now lets remove the BuildConfigs and Builds\n\terr = runCommand(\"oc\", \"delete\", \"buildconfig\", \"--all\", \"-n\", userNS)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = runCommand(\"oc\", \"delete\", \"build\", \"--all\", \"-n\", userNS)\n\n\tif err == nil {\n\t\tutil.Info(\"Completed!\\n\")\n\t}\n\treturn err\n}\n<commit_msg>avoid kubectl for clean jenkins<commit_after>\/**\n * Copyright (C) 2015 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\npackage cmds\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/fabric8io\/gofabric8\/client\"\n\t\"github.com\/fabric8io\/gofabric8\/util\"\n\t\"github.com\/spf13\/cobra\"\n\tcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n)\n\ntype cleanUpJenkinsFlags struct {\n\tconfirm bool\n}\n\n\/\/ NewCmdCleanUpJenkins delete files in the tenants content repository\nfunc NewCmdCleanUpJenkins(f *cmdutil.Factory) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"jenkins\",\n\t\tShort: \"Deletes all the jenkins jobs in your tenant Jenkins service\",\n\t\tLong: `Deletes all the jenkins jobs in your tenant Jenkins service`,\n\t\tAliases: []string{\"content-repository\"},\n\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tp := cleanUpJenkinsFlags{}\n\t\t\tif cmd.Flags().Lookup(yesFlag).Value.String() == \"true\" {\n\t\t\t\tp.confirm = true\n\t\t\t}\n\t\t\terr := p.cleanUpJenkins(f)\n\t\t\tif err != nil {\n\t\t\t\tutil.Fatalf(\"%s\\n\", err)\n\t\t\t}\n\t\t\treturn\n\t\t},\n\t}\n\treturn cmd\n}\n\nfunc (p *cleanUpJenkinsFlags) cleanUpJenkins(f *cmdutil.Factory) error {\n\tc, cfg := client.NewClient(f)\n\tns, _, _ := f.DefaultNamespace()\n\toc, _ := client.NewOpenShiftClient(cfg)\n\tinitSchema()\n\n\tuserNS, err := detectCurrentUserNamespace(ns, c, oc)\n\tif err != nil {\n\t\treturn err\n\t}\n\tjenkinsNS := fmt.Sprintf(\"%s-jenkins\", userNS)\n\n\tif !p.confirm {\n\t\tconfirm := \"\"\n\t\tutil.Warn(\"WARNING this is destructive and will remove ALL of the jenkins jobs\\n\")\n\t\tutil.Info(\"for your tenant: \")\n\t\tutil.Successf(\"%s\", userNS)\n\t\tutil.Info(\" running in namespace: \")\n\t\tutil.Successf(\"%s\\n\", jenkinsNS)\n\t\tutil.Warn(\"\\nContinue [y\/N]: \")\n\t\tfmt.Scanln(&confirm)\n\t\tif confirm != \"y\" {\n\t\t\tutil.Warn(\"Aborted\\n\")\n\t\t\treturn nil\n\t\t}\n\t}\n\tutil.Info(\"Cleaning jenkins for tenant: \")\n\tutil.Successf(\"%s\", userNS)\n\tutil.Info(\" running in namespace: \")\n\tutil.Successf(\"%s\\n\", jenkinsNS)\n\n\terr = ensureDeploymentOrDCHasReplicas(c, oc, jenkinsNS, \"jenkins\", 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpod, err := waitForReadyPodForDeploymentOrDC(c, oc, jenkinsNS, \"jenkins\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tutil.Infof(\"Found running jenkins pod %s\\n\", pod)\n\n\tkubeCLI := \"oc\" \n\terr = runCommand(kubeCLI, \"exec\", \"-it\", pod, \"-n\", jenkinsNS, \"--\", \"bash\", \"-c\", \"rm -rf \/var\/lib\/jenkins\/jobs\/*\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = runCommand(kubeCLI, \"delete\", \"pod\", pod, \"-n\", jenkinsNS)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ now lets remove the Jenkins Job ConfigMaps\n\terr = runCommand(\"oc\", \"delete\", \"configmap\", \"-l\", \"openshift.io\/jenkins=job\", \"-n\", userNS)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ now lets remove the BuildConfigs and Builds\n\terr = runCommand(\"oc\", \"delete\", \"buildconfig\", \"--all\", \"-n\", userNS)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = runCommand(\"oc\", \"delete\", \"build\", \"--all\", \"-n\", userNS)\n\n\tif err == nil {\n\t\tutil.Info(\"Completed!\\n\")\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package html\n\nimport (\n\t\"github.com\/tdewolff\/parse\"\n\t\"github.com\/tdewolff\/parse\/html\"\n)\n\ntype tokenBuffer struct {\n\ttokenizer *html.Tokenizer\n\n\tbuf []token\n\tpos int\n}\n\nfunc newTokenBuffer(tokenizer *html.Tokenizer) *tokenBuffer {\n\treturn &tokenBuffer{\n\t\ttokenizer: tokenizer,\n\t\tbuf: make([]token, 0, 8),\n\t}\n}\n\nfunc (z *tokenBuffer) Read(p []token) int {\n\tfor i := 0; i < len(p); i++ {\n\t\ttt, data := z.tokenizer.Next()\n\t\tif !z.tokenizer.IsEOF() {\n\t\t\tdata = parse.Copy(data)\n\t\t}\n\n\t\tvar attrVal []byte\n\t\tvar hash html.Hash\n\t\tif tt == html.AttributeToken {\n\t\t\tattrVal = z.tokenizer.AttrVal()\n\t\t\tif !z.tokenizer.IsEOF() {\n\t\t\t\tattrVal = parse.Copy(attrVal)\n\t\t\t}\n\t\t\thash = html.ToHash(data)\n\t\t} else if tt == html.StartTagToken || tt == html.EndTagToken {\n\t\t\thash = z.tokenizer.RawTag()\n\t\t\tif hash == 0 {\n\t\t\t\thash = html.ToHash(data)\n\t\t\t}\n\t\t}\n\t\tp[i] = token{tt, data, attrVal, hash}\n\t\tif tt == html.ErrorToken {\n\t\t\treturn i + 1\n\t\t}\n\t}\n\treturn len(p)\n}\n\n\/\/ Peek returns the ith element and possibly does an allocation.\n\/\/ Peeking past an error will panic.\nfunc (z *tokenBuffer) Peek(i int) *token {\n\tend := z.pos + i\n\tif end >= len(z.buf) {\n\t\tc := cap(z.buf)\n\t\td := len(z.buf) - z.pos\n\t\tvar buf []token\n\t\tif 2*d > c {\n\t\t\tbuf = make([]token, d, 2*c)\n\t\t} else {\n\t\t\tbuf = z.buf[:d]\n\t\t}\n\t\tcopy(buf, z.buf[z.pos:])\n\n\t\tn := z.Read(buf[d:cap(buf)])\n\t\tend -= z.pos\n\t\tz.pos, z.buf = 0, buf[:d+n]\n\t}\n\treturn &z.buf[end]\n}\n\n\/\/ Shift returns the first element and advances position.\nfunc (z *tokenBuffer) Shift() *token {\n\tt := z.Peek(0)\n\tz.pos++\n\treturn t\n}\n<commit_msg>Commentary change<commit_after>package html \/\/ import \"github.com\/tdewolff\/minify\/html\"\n\nimport (\n\t\"github.com\/tdewolff\/parse\"\n\t\"github.com\/tdewolff\/parse\/html\"\n)\n\ntype tokenBuffer struct {\n\ttokenizer *html.Tokenizer\n\n\tbuf []token\n\tpos int\n}\n\nfunc newTokenBuffer(tokenizer *html.Tokenizer) *tokenBuffer {\n\treturn &tokenBuffer{\n\t\ttokenizer: tokenizer,\n\t\tbuf: make([]token, 0, 8),\n\t}\n}\n\nfunc (z *tokenBuffer) Read(p []token) int {\n\tfor i := 0; i < len(p); i++ {\n\t\ttt, data := z.tokenizer.Next()\n\t\tif !z.tokenizer.IsEOF() {\n\t\t\tdata = parse.Copy(data)\n\t\t}\n\n\t\tvar attrVal []byte\n\t\tvar hash html.Hash\n\t\tif tt == html.AttributeToken {\n\t\t\tattrVal = z.tokenizer.AttrVal()\n\t\t\tif !z.tokenizer.IsEOF() {\n\t\t\t\tattrVal = parse.Copy(attrVal)\n\t\t\t}\n\t\t\thash = html.ToHash(data)\n\t\t} else if tt == html.StartTagToken || tt == html.EndTagToken {\n\t\t\thash = z.tokenizer.RawTag()\n\t\t\tif hash == 0 {\n\t\t\t\thash = html.ToHash(data)\n\t\t\t}\n\t\t}\n\t\tp[i] = token{tt, data, attrVal, hash}\n\t\tif tt == html.ErrorToken {\n\t\t\treturn i + 1\n\t\t}\n\t}\n\treturn len(p)\n}\n\n\/\/ Peek returns the ith element and possibly does an allocation.\n\/\/ Peeking past an error will panic.\nfunc (z *tokenBuffer) Peek(i int) *token {\n\tend := z.pos + i\n\tif end >= len(z.buf) {\n\t\tc := cap(z.buf)\n\t\td := len(z.buf) - z.pos\n\t\tvar buf []token\n\t\tif 2*d > c {\n\t\t\tbuf = make([]token, d, 2*c)\n\t\t} else {\n\t\t\tbuf = z.buf[:d]\n\t\t}\n\t\tcopy(buf, z.buf[z.pos:])\n\n\t\tn := z.Read(buf[d:cap(buf)])\n\t\tend -= z.pos\n\t\tz.pos, z.buf = 0, buf[:d+n]\n\t}\n\treturn &z.buf[end]\n}\n\n\/\/ Shift returns the first element and advances position.\nfunc (z *tokenBuffer) Shift() *token {\n\tt := z.Peek(0)\n\tz.pos++\n\treturn t\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"text\/template\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/sts\"\n\t\"golang.org\/x\/crypto\/ssh\"\n\n\t\"github.com\/tapglue\/snaas\/platform\/generate\"\n)\n\nconst (\n\targDestroy = \"-destroy\"\n\targOut = \"-out\"\n\targState = \"-state\"\n\targVarFile = \"-var-file\"\n\n\tbinaryTerraform = \"terraform\"\n\n\tcmdSetup = \"setup\"\n\tcmdTeardown = \"teardown\"\n\tcmdUpdate = \"update\"\n\n\tdefaultKeyPath = \"access.pem\"\n\tdefaultStatesPath = \"infrastructure\/terraform\/states\"\n\tdefaultTemplatePath = \"infrastructure\/terraform\/template\"\n\tdefaultTmpPath = \"\/tmp\"\n\n\tfmtNamespace = \"%s-%s\"\n\tfmtPlan = \"%s\/%s.plan\"\n\tfmtStateFile = \"%s.tfstate\"\n\tfmtVarsFile = \"%s.tfvars\"\n\tfmtTFVar = \"TF_VAR_%s=%s\"\n\n\ttfCmdApply = \"apply\"\n\ttfCmdDestroy = \"destroy\"\n\ttfCmdPlan = \"plan\"\n\n\ttplTFVars = `\nkey = {\n\taccess = \"{{.KeyAccess}}\"\n}\npg_password = \"{{.PGPassword}}\"\n`\n\n\tvarAccount = \"account\"\n\tvarEnv = \"env\"\n\tvarRegion = \"region\"\n)\n\nfunc main() {\n\tvar (\n\t\tenv = flag.String(\"env\", \"\", \"Environment used for isolation.\")\n\t\tregion = flag.String(\"region\", \"\", \"AWS region to deploy to.\")\n\t\tstatesPath = flag.String(\"states.path\", defaultStatesPath, \"Location to store env states.\")\n\t\ttemplatePath = flag.String(\"template.path\", defaultTemplatePath, \"Location of the infrastructure template.\")\n\t\ttmpPath = flag.String(\"tmp.path\", defaultTmpPath, \"Location for temporary output like plans.\")\n\t\tvarsPath = flag.String(\"vars.path\", \"\", \"Location of vars file.\")\n\t)\n\tflag.Parse()\n\n\tlog.SetFlags(log.Lshortfile)\n\n\tif len(flag.Args()) != 1 {\n\t\tlog.Fatal(\"provide command: setup, teardown, update\")\n\t}\n\n\tif *env == \"\" {\n\t\tlog.Fatal(\"provide env\")\n\t}\n\n\tif *region == \"\" {\n\t\tlog.Fatal(\"provide region\")\n\t}\n\n\tif _, err := exec.LookPath(binaryTerraform); err != nil {\n\t\tlog.Fatal(\"terraform must be in your PATH\")\n\t}\n\n\taccount, err := awsAcoount(*region)\n\tif err != nil {\n\t\tlog.Fatalf(\"AWS account fetch failed: %s\", err)\n\t}\n\n\tvar (\n\t\tnamespace = fmt.Sprintf(fmtNamespace, *env, *region)\n\t\tplanFile = fmt.Sprintf(fmtPlan, *tmpPath, namespace)\n\t\tstatePath = filepath.Join(*statesPath, namespace)\n\t\tstateFile = filepath.Join(statePath, fmt.Sprintf(fmtStateFile, namespace))\n\t\tvarFile = filepath.Join(statePath, fmt.Sprintf(fmtVarsFile, namespace))\n\t\tenviron = append(\n\t\t\tos.Environ(),\n\t\t\ttfVar(varAccount, account),\n\t\t\ttfVar(varEnv, *env),\n\t\t\ttfVar(varRegion, *region),\n\t\t)\n\t)\n\n\tif *varsPath != \"\" {\n\t\tvarFile = *varsPath\n\t}\n\n\tswitch flag.Args()[0] {\n\tcase cmdSetup:\n\t\tif _, err := os.Stat(stateFile); err == nil {\n\t\t\tlog.Fatalf(\"state file already exists: %s\", stateFile)\n\t\t}\n\n\t\tif err := os.MkdirAll(statePath, os.ModePerm); err != nil {\n\t\t\tlog.Fatalf(\"state dir creation failed: %s\", err)\n\t\t}\n\n\t\tif _, err := os.Stat(varFile); err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\tpubKey, err := generateKeyPair(filepath.Join(statePath, defaultKeyPath))\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\tif err = generateVarFile(varFile, pubKey, generate.RandomString(32)); err != nil {\n\t\t\t\tlog.Fatalf(\"var file create failed: %s\", err)\n\t\t\t}\n\t\t}\n\n\t\targs := []string{\n\t\t\targOut, planFile,\n\t\t\targState, stateFile,\n\t\t\targVarFile, varFile,\n\t\t\t*templatePath,\n\t\t}\n\n\t\tif err := prepareCmd(environ, tfCmdPlan, args...).Run(); err != nil {\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfmt.Println(\"Want to apply the plan? (type 'yes')\")\n\t\tfmt.Print(\"(no) |> \")\n\n\t\tresponse := \"no\"\n\t\tfmt.Scanf(\"%s\", &response)\n\n\t\tif response != \"yes\" {\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\targs = []string{\n\t\t\targState, stateFile,\n\t\t\tplanFile,\n\t\t}\n\n\t\tif err := prepareCmd(environ, tfCmdApply, args...).Run(); err != nil {\n\t\t\tos.Exit(1)\n\t\t}\n\tcase cmdTeardown:\n\t\targs := []string{\n\t\t\targDestroy,\n\t\t\targOut, planFile,\n\t\t\targState, stateFile,\n\t\t\targVarFile, varFile,\n\t\t\t*templatePath,\n\t\t}\n\n\t\tif err := prepareCmd(environ, tfCmdPlan, args...).Run(); err != nil {\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\targs = []string{\n\t\t\targState, stateFile,\n\t\t\targVarFile, varFile,\n\t\t\t*templatePath,\n\t\t}\n\n\t\tif err := prepareCmd(environ, tfCmdDestroy, args...).Run(); err != nil {\n\t\t\tos.Exit(1)\n\t\t}\n\tcase cmdUpdate:\n\t\tif _, err := os.Stat(stateFile); err != nil {\n\t\t\tlog.Fatalf(\"couldn't locate state file: %s\", err)\n\t\t}\n\n\t\targs := []string{\n\t\t\targOut, planFile,\n\t\t\targState, stateFile,\n\t\t\targVarFile, varFile,\n\t\t\t*templatePath,\n\t\t}\n\n\t\tif err := prepareCmd(environ, tfCmdPlan, args...).Run(); err != nil {\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfmt.Println(\"\\nWant to apply the plan? (type 'yes')\")\n\t\tfmt.Print(\"(no) |> \")\n\n\t\tresponse := \"no\"\n\t\tfmt.Scanf(\"%s\", &response)\n\n\t\tif response != \"yes\" {\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\targs = []string{\n\t\t\targState, stateFile,\n\t\t\tplanFile,\n\t\t}\n\n\t\tif err := prepareCmd(environ, tfCmdApply, args...).Run(); err != nil {\n\t\t\tos.Exit(1)\n\t\t}\n\tdefault:\n\t\tlog.Fatalf(\"unknown command '%s'\", flag.Args()[0])\n\t}\n}\n\nfunc awsAcoount(region string) (string, error) {\n\tvar (\n\t\tproviders = []credentials.Provider{\n\t\t\t&credentials.EnvProvider{},\n\t\t\t&credentials.SharedCredentialsProvider{},\n\t\t}\n\t\tawsSession = session.New(&aws.Config{\n\t\t\tCredentials: credentials.NewChainCredentials(providers),\n\t\t\tRegion: aws.String(region),\n\t\t})\n\t\tstsService = sts.New(awsSession)\n\t)\n\n\tres, err := stsService.GetCallerIdentity(&sts.GetCallerIdentityInput{})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn *res.Account, nil\n}\n\nfunc generateKeyPair(privateKeyPath string) ([]byte, error) {\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 4096)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprivateFile, err := os.Create(privateKeyPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer privateFile.Close()\n\n\tprivatePEM := &pem.Block{\n\t\tBytes: x509.MarshalPKCS1PrivateKey(privateKey),\n\t\tType: \"RSA PRIVATE KEY\",\n\t}\n\tif err := pem.Encode(privateFile, privatePEM); err != nil {\n\t\treturn nil, err\n\t}\n\n\tpub, err := ssh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ssh.MarshalAuthorizedKey(pub), nil\n}\n\nfunc generateVarFile(path string, accessKeyPub []byte, pgPassword string) error {\n\tf, err := os.Create(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttmpl, err := template.New(\"vars\").Parse(tplTFVars)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttmpl.Execute(f, struct {\n\t\tKeyAccess string\n\t\tPGPassword string\n\t}{\n\t\tKeyAccess: string(accessKeyPub),\n\t\tPGPassword: pgPassword,\n\t})\n\n\treturn nil\n}\n\nfunc prepareCmd(environ []string, command string, args ...string) *exec.Cmd {\n\targs = append([]string{command}, args...)\n\n\tcmd := exec.Command(binaryTerraform, args...)\n\tcmd.Env = append(\n\t\tenviron,\n\t\t\"TF_LOG=TRACE\",\n\t\tfmt.Sprintf(\"TF_LOG_PATH=\/tmp\/%s.log\", command),\n\t)\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\n\treturn cmd\n}\n\nfunc tfVar(k, v string) string {\n\treturn fmt.Sprintf(fmtTFVar, k, v)\n}\n<commit_msg>Strip newlines in access key<commit_after>package main\n\nimport (\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/sts\"\n\t\"golang.org\/x\/crypto\/ssh\"\n\n\t\"github.com\/tapglue\/snaas\/platform\/generate\"\n)\n\nconst (\n\targDestroy = \"-destroy\"\n\targOut = \"-out\"\n\targState = \"-state\"\n\targVarFile = \"-var-file\"\n\n\tbinaryTerraform = \"terraform\"\n\n\tcmdSetup = \"setup\"\n\tcmdTeardown = \"teardown\"\n\tcmdUpdate = \"update\"\n\n\tdefaultKeyPath = \"access.pem\"\n\tdefaultStatesPath = \"infrastructure\/terraform\/states\"\n\tdefaultTemplatePath = \"infrastructure\/terraform\/template\"\n\tdefaultTmpPath = \"\/tmp\"\n\n\tfmtNamespace = \"%s-%s\"\n\tfmtPlan = \"%s\/%s.plan\"\n\tfmtStateFile = \"%s.tfstate\"\n\tfmtVarsFile = \"%s.tfvars\"\n\tfmtTFVar = \"TF_VAR_%s=%s\"\n\n\ttfCmdApply = \"apply\"\n\ttfCmdDestroy = \"destroy\"\n\ttfCmdPlan = \"plan\"\n\n\ttplTFVars = `\nkey = {\n\taccess = \"{{.KeyAccess}}\"\n}\npg_password = \"{{.PGPassword}}\"\n`\n\n\tvarAccount = \"account\"\n\tvarEnv = \"env\"\n\tvarRegion = \"region\"\n)\n\nfunc main() {\n\tvar (\n\t\tenv = flag.String(\"env\", \"\", \"Environment used for isolation.\")\n\t\tregion = flag.String(\"region\", \"\", \"AWS region to deploy to.\")\n\t\tstatesPath = flag.String(\"states.path\", defaultStatesPath, \"Location to store env states.\")\n\t\ttemplatePath = flag.String(\"template.path\", defaultTemplatePath, \"Location of the infrastructure template.\")\n\t\ttmpPath = flag.String(\"tmp.path\", defaultTmpPath, \"Location for temporary output like plans.\")\n\t\tvarsPath = flag.String(\"vars.path\", \"\", \"Location of vars file.\")\n\t)\n\tflag.Parse()\n\n\tlog.SetFlags(log.Lshortfile)\n\n\tif len(flag.Args()) != 1 {\n\t\tlog.Fatal(\"provide command: setup, teardown, update\")\n\t}\n\n\tif *env == \"\" {\n\t\tlog.Fatal(\"provide env\")\n\t}\n\n\tif *region == \"\" {\n\t\tlog.Fatal(\"provide region\")\n\t}\n\n\tif _, err := exec.LookPath(binaryTerraform); err != nil {\n\t\tlog.Fatal(\"terraform must be in your PATH\")\n\t}\n\n\taccount, err := awsAcoount(*region)\n\tif err != nil {\n\t\tlog.Fatalf(\"AWS account fetch failed: %s\", err)\n\t}\n\n\tvar (\n\t\tnamespace = fmt.Sprintf(fmtNamespace, *env, *region)\n\t\tplanFile = fmt.Sprintf(fmtPlan, *tmpPath, namespace)\n\t\tstatePath = filepath.Join(*statesPath, namespace)\n\t\tstateFile = filepath.Join(statePath, fmt.Sprintf(fmtStateFile, namespace))\n\t\tvarFile = filepath.Join(statePath, fmt.Sprintf(fmtVarsFile, namespace))\n\t\tenviron = append(\n\t\t\tos.Environ(),\n\t\t\ttfVar(varAccount, account),\n\t\t\ttfVar(varEnv, *env),\n\t\t\ttfVar(varRegion, *region),\n\t\t)\n\t)\n\n\tif *varsPath != \"\" {\n\t\tvarFile = *varsPath\n\t}\n\n\tswitch flag.Args()[0] {\n\tcase cmdSetup:\n\t\tif _, err := os.Stat(stateFile); err == nil {\n\t\t\tlog.Fatalf(\"state file already exists: %s\", stateFile)\n\t\t}\n\n\t\tif err := os.MkdirAll(statePath, os.ModePerm); err != nil {\n\t\t\tlog.Fatalf(\"state dir creation failed: %s\", err)\n\t\t}\n\n\t\tif _, err := os.Stat(varFile); err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\tpubKey, err := generateKeyPair(filepath.Join(statePath, defaultKeyPath))\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\tif err = generateVarFile(varFile, pubKey, generate.RandomString(32)); err != nil {\n\t\t\t\tlog.Fatalf(\"var file create failed: %s\", err)\n\t\t\t}\n\t\t}\n\n\t\targs := []string{\n\t\t\targOut, planFile,\n\t\t\targState, stateFile,\n\t\t\targVarFile, varFile,\n\t\t\t*templatePath,\n\t\t}\n\n\t\tif err := prepareCmd(environ, tfCmdPlan, args...).Run(); err != nil {\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfmt.Println(\"Want to apply the plan? (type 'yes')\")\n\t\tfmt.Print(\"(no) |> \")\n\n\t\tresponse := \"no\"\n\t\tfmt.Scanf(\"%s\", &response)\n\n\t\tif response != \"yes\" {\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\targs = []string{\n\t\t\targState, stateFile,\n\t\t\tplanFile,\n\t\t}\n\n\t\tif err := prepareCmd(environ, tfCmdApply, args...).Run(); err != nil {\n\t\t\tos.Exit(1)\n\t\t}\n\tcase cmdTeardown:\n\t\targs := []string{\n\t\t\targDestroy,\n\t\t\targOut, planFile,\n\t\t\targState, stateFile,\n\t\t\targVarFile, varFile,\n\t\t\t*templatePath,\n\t\t}\n\n\t\tif err := prepareCmd(environ, tfCmdPlan, args...).Run(); err != nil {\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\targs = []string{\n\t\t\targState, stateFile,\n\t\t\targVarFile, varFile,\n\t\t\t*templatePath,\n\t\t}\n\n\t\tif err := prepareCmd(environ, tfCmdDestroy, args...).Run(); err != nil {\n\t\t\tos.Exit(1)\n\t\t}\n\tcase cmdUpdate:\n\t\tif _, err := os.Stat(stateFile); err != nil {\n\t\t\tlog.Fatalf(\"couldn't locate state file: %s\", err)\n\t\t}\n\n\t\targs := []string{\n\t\t\targOut, planFile,\n\t\t\targState, stateFile,\n\t\t\targVarFile, varFile,\n\t\t\t*templatePath,\n\t\t}\n\n\t\tif err := prepareCmd(environ, tfCmdPlan, args...).Run(); err != nil {\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfmt.Println(\"\\nWant to apply the plan? (type 'yes')\")\n\t\tfmt.Print(\"(no) |> \")\n\n\t\tresponse := \"no\"\n\t\tfmt.Scanf(\"%s\", &response)\n\n\t\tif response != \"yes\" {\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\targs = []string{\n\t\t\targState, stateFile,\n\t\t\tplanFile,\n\t\t}\n\n\t\tif err := prepareCmd(environ, tfCmdApply, args...).Run(); err != nil {\n\t\t\tos.Exit(1)\n\t\t}\n\tdefault:\n\t\tlog.Fatalf(\"unknown command '%s'\", flag.Args()[0])\n\t}\n}\n\nfunc awsAcoount(region string) (string, error) {\n\tvar (\n\t\tproviders = []credentials.Provider{\n\t\t\t&credentials.EnvProvider{},\n\t\t\t&credentials.SharedCredentialsProvider{},\n\t\t}\n\t\tawsSession = session.New(&aws.Config{\n\t\t\tCredentials: credentials.NewChainCredentials(providers),\n\t\t\tRegion: aws.String(region),\n\t\t})\n\t\tstsService = sts.New(awsSession)\n\t)\n\n\tres, err := stsService.GetCallerIdentity(&sts.GetCallerIdentityInput{})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn *res.Account, nil\n}\n\nfunc generateKeyPair(privateKeyPath string) ([]byte, error) {\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 4096)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprivateFile, err := os.Create(privateKeyPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer privateFile.Close()\n\n\tprivatePEM := &pem.Block{\n\t\tBytes: x509.MarshalPKCS1PrivateKey(privateKey),\n\t\tType: \"RSA PRIVATE KEY\",\n\t}\n\tif err := pem.Encode(privateFile, privatePEM); err != nil {\n\t\treturn nil, err\n\t}\n\n\tpub, err := ssh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ssh.MarshalAuthorizedKey(pub), nil\n}\n\nfunc generateVarFile(path string, accessKeyPub []byte, pgPassword string) error {\n\tf, err := os.Create(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttmpl, err := template.New(\"vars\").Parse(tplTFVars)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttmpl.Execute(f, struct {\n\t\tKeyAccess string\n\t\tPGPassword string\n\t}{\n\t\tKeyAccess: strings.Trim(string(accessKeyPub), \"\\n\"),\n\t\tPGPassword: pgPassword,\n\t})\n\n\treturn nil\n}\n\nfunc prepareCmd(environ []string, command string, args ...string) *exec.Cmd {\n\targs = append([]string{command}, args...)\n\n\tcmd := exec.Command(binaryTerraform, args...)\n\tcmd.Env = append(\n\t\tenviron,\n\t\t\"TF_LOG=TRACE\",\n\t\tfmt.Sprintf(\"TF_LOG_PATH=\/tmp\/%s.log\", command),\n\t)\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\n\treturn cmd\n}\n\nfunc tfVar(k, v string) string {\n\treturn fmt.Sprintf(fmtTFVar, k, v)\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Create remote client on target sync not on request<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2018 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage telemetry\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/ligato\/cn-infra\/infra\"\n\t\"github.com\/ligato\/cn-infra\/logging\"\n\t\"github.com\/ligato\/cn-infra\/rpc\/grpc\"\n\tprom \"github.com\/ligato\/cn-infra\/rpc\/prometheus\"\n\t\"github.com\/ligato\/cn-infra\/rpc\/rest\"\n\t\"github.com\/ligato\/cn-infra\/servicelabel\"\n\t\"github.com\/unrolled\/render\"\n\n\t\"github.com\/ligato\/vpp-agent\/api\/configurator\"\n\t\"github.com\/ligato\/vpp-agent\/pkg\/metrics\"\n\t\"github.com\/ligato\/vpp-agent\/pkg\/models\"\n\t\"github.com\/ligato\/vpp-agent\/plugins\/govppmux\"\n\t\"github.com\/ligato\/vpp-agent\/plugins\/telemetry\/vppcalls\"\n\n\t_ \"github.com\/ligato\/vpp-agent\/plugins\/telemetry\/vppcalls\/vpp1904\"\n\t_ \"github.com\/ligato\/vpp-agent\/plugins\/telemetry\/vppcalls\/vpp1908\"\n\t_ \"github.com\/ligato\/vpp-agent\/plugins\/telemetry\/vppcalls\/vpp2001_324\"\n\t_ \"github.com\/ligato\/vpp-agent\/plugins\/telemetry\/vppcalls\/vpp2001_379\"\n)\n\nvar debug = os.Getenv(\"DEBUG_TELEMETRY\") != \"\"\n\n\/\/ Plugin registers Telemetry Plugin\ntype Plugin struct {\n\tDeps\n\n\thandler vppcalls.TelemetryVppAPI\n\n\tstatsPollerServer\n\tprometheusMetrics\n\n\t\/\/ From config file\n\tupdatePeriod time.Duration\n\tdisabled bool\n\tprometheusDisabled bool\n\tskipped map[string]bool\n\n\twg sync.WaitGroup\n\tquit chan struct{}\n}\n\n\/\/ Deps represents dependencies of Telemetry Plugin\ntype Deps struct {\n\tinfra.PluginDeps\n\tServiceLabel servicelabel.ReaderAPI\n\tGoVppmux govppmux.StatsAPI\n\tPrometheus prom.API\n\tGRPC grpc.Server\n\tHTTPHandlers rest.HTTPHandlers\n}\n\n\/\/ Init initializes Telemetry Plugin\nfunc (p *Plugin) Init() error {\n\tp.quit = make(chan struct{})\n\tp.skipped = make(map[string]bool, 0)\n\n\t\/\/ Telemetry config file\n\tconfig, err := p.loadConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif config != nil {\n\t\t\/\/ If telemetry is not enabled, skip plugin initialization\n\t\tif config.Disabled {\n\t\t\tp.Log.Info(\"Telemetry plugin disabled via config file\")\n\t\t\tp.disabled = true\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ Disable prometheus metrics if set by config\n\t\tif config.PrometheusDisabled {\n\t\t\tp.Log.Info(\"Prometheus metrics disabled via config file\")\n\t\t\tp.prometheusDisabled = true\n\t\t} else {\n\t\t\t\/\/ This prevents setting the update period to less than 5 seconds,\n\t\t\t\/\/ which can have significant performance hit.\n\t\t\tif config.PollingInterval > minimumUpdatePeriod {\n\t\t\t\tp.updatePeriod = config.PollingInterval\n\t\t\t\tp.Log.Infof(\"polling period changed to %v\", p.updatePeriod)\n\t\t\t} else if config.PollingInterval > 0 {\n\t\t\t\tp.Log.Warnf(\"polling period has to be at least %s, using default: %v\",\n\t\t\t\t\tminimumUpdatePeriod, defaultUpdatePeriod)\n\t\t\t}\n\t\t\t\/\/ Store map of skipped metrics\n\t\t\tfor _, skip := range config.Skipped {\n\t\t\t\tp.skipped[skip] = true\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Register prometheus\n\tif !p.prometheusDisabled {\n\t\tif p.updatePeriod == 0 {\n\t\t\tp.updatePeriod = defaultUpdatePeriod\n\t\t}\n\t\tif err := p.registerPrometheus(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif p.HTTPHandlers != nil {\n\t\t\tp.HTTPHandlers.RegisterHTTPHandler(\"\/metrics\/{metric}\", metricsHandler, \"GET\")\n\t\t}\n\t}\n\n\t\/\/ Setup stats poller\n\tp.statsPollerServer.log = p.Log.NewLogger(\"stats-poller\")\n\tif err := p.setupStatsPoller(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ AfterInit executes after initializion of Telemetry Plugin\nfunc (p *Plugin) AfterInit() error {\n\t\/\/ Do not start polling if telemetry is disabled\n\tif p.disabled || p.prometheusDisabled {\n\t\treturn nil\n\t}\n\n\tp.startPeriodicUpdates()\n\n\treturn nil\n}\n\nfunc (p *Plugin) setupStatsPoller() error {\n\tvppCh, err := p.GoVppmux.NewAPIChannel()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer vppCh.Close()\n\n\th := vppcalls.CompatibleTelemetryHandler(vppCh, p.GoVppmux)\n\tif h == nil {\n\t\treturn err\n\t}\n\tp.statsPollerServer.handler = h\n\n\tif p.GRPC != nil && p.GRPC.GetServer() != nil {\n\t\tconfigurator.RegisterStatsPollerServer(p.GRPC.GetServer(), &p.statsPollerServer)\n\t}\n\n\treturn nil\n}\n\n\/\/ Close is used to clean up resources used by Telemetry Plugin\nfunc (p *Plugin) Close() error {\n\tclose(p.quit)\n\tp.wg.Wait()\n\treturn nil\n}\n\nfunc (p *Plugin) startPeriodicUpdates() {\n\tvppCh, err := p.GoVppmux.NewAPIChannel()\n\tif err != nil {\n\t\tp.Log.Errorf(\"creating channel failed: %v\", err)\n\t\treturn\n\t}\n\tdefer vppCh.Close()\n\n\tp.handler = vppcalls.CompatibleTelemetryHandler(vppCh, p.GoVppmux)\n\tif p.handler == nil {\n\t\tp.Log.Warnf(\"no compatible telemetry handler, skipping periodic updates\")\n\t\treturn\n\t}\n\n\tp.wg.Add(1)\n\tgo p.periodicUpdates()\n}\n\n\/\/ periodic updates for the metrics data\nfunc (p *Plugin) periodicUpdates() {\n\tdefer p.wg.Done()\n\n\tp.Log.Debugf(\"starting periodic updates (%v)\", p.updatePeriod)\n\n\ttick := time.NewTicker(p.updatePeriod)\n\tfor {\n\t\tselect {\n\t\t\/\/ Delay period between updates\n\t\tcase <-tick.C:\n\t\t\tctx := context.Background()\n\t\t\tp.updatePrometheus(ctx)\n\n\t\t\/\/ Plugin has stopped.\n\t\tcase <-p.quit:\n\t\t\tp.Log.Debugf(\"stopping periodic updates\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (p *Plugin) tracef(f string, a ...interface{}) {\n\tif debug && p.Log.GetLevel() >= logging.DebugLevel {\n\t\ts := fmt.Sprintf(f, a...)\n\t\tif len(s) > 250 {\n\t\t\tp.Log.Debugf(\"%s... (%d bytes omitted) ...%s\", s[:200], len(s)-250, s[len(s)-50:])\n\t\t\treturn\n\t\t}\n\t\tp.Log.Debug(s)\n\t}\n}\n\nfunc metricsHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tvars := mux.Vars(req)\n\t\tif vars == nil {\n\t\t\t_ = formatter.JSON(w, http.StatusNotFound, struct{}{})\n\t\t\treturn\n\t\t}\n\t\tmetric := vars[\"metric\"]\n\t\tmodel, err := models.DefaultRegistry.GetModel(metric)\n\t\tif err != nil {\n\t\t\t_ = formatter.JSON(w, http.StatusNotFound, struct{ Error string }{err.Error()})\n\t\t\treturn\n\t\t}\n\t\tdata := model.NewInstance()\n\t\tif err := metrics.Retrieve(data); err != nil {\n\t\t\t_ = formatter.JSON(w, http.StatusInternalServerError, struct{ Error string }{err.Error()})\n\t\t\treturn\n\t\t}\n\t\t_ = formatter.JSON(w, 200, data)\n\t}\n}\n<commit_msg>start http handler regardless of the prometheus running<commit_after>\/\/ Copyright (c) 2018 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage telemetry\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/ligato\/cn-infra\/infra\"\n\t\"github.com\/ligato\/cn-infra\/logging\"\n\t\"github.com\/ligato\/cn-infra\/rpc\/grpc\"\n\tprom \"github.com\/ligato\/cn-infra\/rpc\/prometheus\"\n\t\"github.com\/ligato\/cn-infra\/rpc\/rest\"\n\t\"github.com\/ligato\/cn-infra\/servicelabel\"\n\t\"github.com\/unrolled\/render\"\n\n\t\"github.com\/ligato\/vpp-agent\/api\/configurator\"\n\t\"github.com\/ligato\/vpp-agent\/pkg\/metrics\"\n\t\"github.com\/ligato\/vpp-agent\/pkg\/models\"\n\t\"github.com\/ligato\/vpp-agent\/plugins\/govppmux\"\n\t\"github.com\/ligato\/vpp-agent\/plugins\/telemetry\/vppcalls\"\n\n\t_ \"github.com\/ligato\/vpp-agent\/plugins\/telemetry\/vppcalls\/vpp1904\"\n\t_ \"github.com\/ligato\/vpp-agent\/plugins\/telemetry\/vppcalls\/vpp1908\"\n\t_ \"github.com\/ligato\/vpp-agent\/plugins\/telemetry\/vppcalls\/vpp2001_324\"\n\t_ \"github.com\/ligato\/vpp-agent\/plugins\/telemetry\/vppcalls\/vpp2001_379\"\n)\n\nvar debug = os.Getenv(\"DEBUG_TELEMETRY\") != \"\"\n\n\/\/ Plugin registers Telemetry Plugin\ntype Plugin struct {\n\tDeps\n\n\thandler vppcalls.TelemetryVppAPI\n\n\tstatsPollerServer\n\tprometheusMetrics\n\n\t\/\/ From config file\n\tupdatePeriod time.Duration\n\tdisabled bool\n\tprometheusDisabled bool\n\tskipped map[string]bool\n\n\twg sync.WaitGroup\n\tquit chan struct{}\n}\n\n\/\/ Deps represents dependencies of Telemetry Plugin\ntype Deps struct {\n\tinfra.PluginDeps\n\tServiceLabel servicelabel.ReaderAPI\n\tGoVppmux govppmux.StatsAPI\n\tPrometheus prom.API\n\tGRPC grpc.Server\n\tHTTPHandlers rest.HTTPHandlers\n}\n\n\/\/ Init initializes Telemetry Plugin\nfunc (p *Plugin) Init() error {\n\tp.quit = make(chan struct{})\n\tp.skipped = make(map[string]bool, 0)\n\n\t\/\/ Telemetry config file\n\tconfig, err := p.loadConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif config != nil {\n\t\t\/\/ If telemetry is not enabled, skip plugin initialization\n\t\tif config.Disabled {\n\t\t\tp.Log.Info(\"Telemetry plugin disabled via config file\")\n\t\t\tp.disabled = true\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ Disable prometheus metrics if set by config\n\t\tif config.PrometheusDisabled {\n\t\t\tp.Log.Info(\"Prometheus metrics disabled via config file\")\n\t\t\tp.prometheusDisabled = true\n\t\t} else {\n\t\t\t\/\/ This prevents setting the update period to less than 5 seconds,\n\t\t\t\/\/ which can have significant performance hit.\n\t\t\tif config.PollingInterval > minimumUpdatePeriod {\n\t\t\t\tp.updatePeriod = config.PollingInterval\n\t\t\t\tp.Log.Infof(\"polling period changed to %v\", p.updatePeriod)\n\t\t\t} else if config.PollingInterval > 0 {\n\t\t\t\tp.Log.Warnf(\"polling period has to be at least %s, using default: %v\",\n\t\t\t\t\tminimumUpdatePeriod, defaultUpdatePeriod)\n\t\t\t}\n\t\t\t\/\/ Store map of skipped metrics\n\t\t\tfor _, skip := range config.Skipped {\n\t\t\t\tp.skipped[skip] = true\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Register prometheus\n\tif !p.prometheusDisabled {\n\t\tif p.updatePeriod == 0 {\n\t\t\tp.updatePeriod = defaultUpdatePeriod\n\t\t}\n\t\tif err := p.registerPrometheus(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Setup stats poller\n\tp.statsPollerServer.log = p.Log.NewLogger(\"stats-poller\")\n\tif err := p.setupStatsPoller(); err != nil {\n\t\treturn err\n\t}\n\n\tif p.HTTPHandlers != nil {\n\t\tp.HTTPHandlers.RegisterHTTPHandler(\"\/metrics\/{metric}\", metricsHandler, \"GET\")\n\t}\n\n\treturn nil\n}\n\n\/\/ AfterInit executes after initializion of Telemetry Plugin\nfunc (p *Plugin) AfterInit() error {\n\t\/\/ Do not start polling if telemetry is disabled\n\tif p.disabled || p.prometheusDisabled {\n\t\treturn nil\n\t}\n\n\tp.startPeriodicUpdates()\n\n\treturn nil\n}\n\nfunc (p *Plugin) setupStatsPoller() error {\n\tvppCh, err := p.GoVppmux.NewAPIChannel()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer vppCh.Close()\n\n\th := vppcalls.CompatibleTelemetryHandler(vppCh, p.GoVppmux)\n\tif h == nil {\n\t\treturn err\n\t}\n\tp.statsPollerServer.handler = h\n\n\tif p.GRPC != nil && p.GRPC.GetServer() != nil {\n\t\tconfigurator.RegisterStatsPollerServer(p.GRPC.GetServer(), &p.statsPollerServer)\n\t}\n\n\treturn nil\n}\n\n\/\/ Close is used to clean up resources used by Telemetry Plugin\nfunc (p *Plugin) Close() error {\n\tclose(p.quit)\n\tp.wg.Wait()\n\treturn nil\n}\n\nfunc (p *Plugin) startPeriodicUpdates() {\n\tvppCh, err := p.GoVppmux.NewAPIChannel()\n\tif err != nil {\n\t\tp.Log.Errorf(\"creating channel failed: %v\", err)\n\t\treturn\n\t}\n\tdefer vppCh.Close()\n\n\tp.handler = vppcalls.CompatibleTelemetryHandler(vppCh, p.GoVppmux)\n\tif p.handler == nil {\n\t\tp.Log.Warnf(\"no compatible telemetry handler, skipping periodic updates\")\n\t\treturn\n\t}\n\n\tp.wg.Add(1)\n\tgo p.periodicUpdates()\n}\n\n\/\/ periodic updates for the metrics data\nfunc (p *Plugin) periodicUpdates() {\n\tdefer p.wg.Done()\n\n\tp.Log.Debugf(\"starting periodic updates (%v)\", p.updatePeriod)\n\n\ttick := time.NewTicker(p.updatePeriod)\n\tfor {\n\t\tselect {\n\t\t\/\/ Delay period between updates\n\t\tcase <-tick.C:\n\t\t\tctx := context.Background()\n\t\t\tp.updatePrometheus(ctx)\n\n\t\t\/\/ Plugin has stopped.\n\t\tcase <-p.quit:\n\t\t\tp.Log.Debugf(\"stopping periodic updates\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (p *Plugin) tracef(f string, a ...interface{}) {\n\tif debug && p.Log.GetLevel() >= logging.DebugLevel {\n\t\ts := fmt.Sprintf(f, a...)\n\t\tif len(s) > 250 {\n\t\t\tp.Log.Debugf(\"%s... (%d bytes omitted) ...%s\", s[:200], len(s)-250, s[len(s)-50:])\n\t\t\treturn\n\t\t}\n\t\tp.Log.Debug(s)\n\t}\n}\n\nfunc metricsHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tvars := mux.Vars(req)\n\t\tif vars == nil {\n\t\t\t_ = formatter.JSON(w, http.StatusNotFound, struct{}{})\n\t\t\treturn\n\t\t}\n\t\tmetric := vars[\"metric\"]\n\t\tmodel, err := models.DefaultRegistry.GetModel(metric)\n\t\tif err != nil {\n\t\t\t_ = formatter.JSON(w, http.StatusNotFound, struct{ Error string }{err.Error()})\n\t\t\treturn\n\t\t}\n\t\tdata := model.NewInstance()\n\t\tif err := metrics.Retrieve(data); err != nil {\n\t\t\t_ = formatter.JSON(w, http.StatusInternalServerError, struct{ Error string }{err.Error()})\n\t\t\treturn\n\t\t}\n\t\t_ = formatter.JSON(w, 200, data)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package collector\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com\/jbub\/pgbouncer_exporter\/config\"\n\t\"github.com\/jbub\/pgbouncer_exporter\/domain\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/common\/log\"\n)\n\nconst (\n\t\/\/ Name is the name of the exporter.\n\tName = \"pgbouncer_exporter\"\n)\n\n\/\/ Names of the exporter subsystems.\nconst (\n\tSubsystemStats = \"stats\"\n\tSubsystemPools = \"pools\"\n\tSubsystemDatabases = \"database\"\n\tSubsystemLists = \"lists\"\n)\n\ntype gaugeVecItem struct {\n\tgaugeVec *prometheus.GaugeVec\n\tresolve resolveGaugeVecFunc\n\tenabled bool\n}\n\ntype gaugeVecValueItem struct {\n\tlabels []string\n\tcount float64\n}\n\ntype resolveGaugeVecFunc func(res *storeResult) []gaugeVecValueItem\n\ntype storeResult struct {\n\tstats []domain.Stat\n\tpools []domain.Pool\n\tdatabases []domain.Database\n\tlists []domain.List\n}\n\n\/\/ Exporter represents pgbouncer prometheus stats exporter.\ntype Exporter struct {\n\tcfg config.Config\n\tst domain.Store\n\tmutex sync.RWMutex \/\/ guards Collect\n\tgaugeVecItems []gaugeVecItem\n}\n\n\/\/ New returns new Exporter.\nfunc New(cfg config.Config, st domain.Store) *Exporter {\n\treturn &Exporter{\n\t\tst: st,\n\t\tcfg: cfg,\n\t\tgaugeVecItems: []gaugeVecItem{\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportStats,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemStats,\n\t\t\t\t\tName: \"total_requests\",\n\t\t\t\t\tHelp: \"Total number of SQL requests pooled by pgbouncer.\",\n\t\t\t\t}, []string{\"database\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, stat := range res.stats {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{stat.Database},\n\t\t\t\t\t\t\tcount: float64(stat.TotalRequests),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportStats,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemStats,\n\t\t\t\t\tName: \"total_received\",\n\t\t\t\t\tHelp: \"Total number of SQL requests pooled by pgbouncer.\",\n\t\t\t\t}, []string{\"database\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, stat := range res.stats {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{stat.Database},\n\t\t\t\t\t\t\tcount: float64(stat.TotalReceived),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportStats,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemStats,\n\t\t\t\t\tName: \"total_sent\",\n\t\t\t\t\tHelp: \"Total volume in bytes of network traffic sent by pgbouncer.\",\n\t\t\t\t}, []string{\"database\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, stat := range res.stats {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{stat.Database},\n\t\t\t\t\t\t\tcount: float64(stat.TotalSent),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportStats,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemStats,\n\t\t\t\t\tName: \"total_query_time\",\n\t\t\t\t\tHelp: \"Total number of microseconds spent by pgbouncer when actively connected to PostgreSQL.\",\n\t\t\t\t}, []string{\"database\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, stat := range res.stats {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{stat.Database},\n\t\t\t\t\t\t\tcount: float64(stat.TotalQueryTime),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportStats,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemStats,\n\t\t\t\t\tName: \"total_xact_time\",\n\t\t\t\t\tHelp: \"Total number of microseconds spent by pgbouncer when connected to PostgreSQL in a transaction, either idle in transaction or executing queries.\",\n\t\t\t\t}, []string{\"database\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, stat := range res.stats {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{stat.Database},\n\t\t\t\t\t\t\tcount: float64(stat.TotalXactTime),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportStats,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemStats,\n\t\t\t\t\tName: \"total_query_count\",\n\t\t\t\t\tHelp: \"Total number of SQL queries pooled by pgbouncer.\",\n\t\t\t\t}, []string{\"database\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, stat := range res.stats {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{stat.Database},\n\t\t\t\t\t\t\tcount: float64(stat.TotalQueryCount),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportStats,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemStats,\n\t\t\t\t\tName: \"total_xact_count\",\n\t\t\t\t\tHelp: \"Total number of SQL transactions pooled by pgbouncer.\",\n\t\t\t\t}, []string{\"database\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, stat := range res.stats {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{stat.Database},\n\t\t\t\t\t\t\tcount: float64(stat.TotalXactCount),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportPools,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemPools,\n\t\t\t\t\tName: \"active_clients\",\n\t\t\t\t\tHelp: \"Client connections that are linked to server connection and can process queries.\",\n\t\t\t\t}, []string{\"database\", \"user\", \"pool_mode\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, pool := range res.pools {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{pool.Database, pool.User, pool.PoolMode},\n\t\t\t\t\t\t\tcount: float64(pool.Active),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportPools,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemPools,\n\t\t\t\t\tName: \"waiting_clients\",\n\t\t\t\t\tHelp: \"Client connections have sent queries but have not yet got a server connection.\",\n\t\t\t\t}, []string{\"database\", \"user\", \"pool_mode\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, pool := range res.pools {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{pool.Database, pool.User, pool.PoolMode},\n\t\t\t\t\t\t\tcount: float64(pool.Waiting),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportDatabases,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemDatabases,\n\t\t\t\t\tName: \"current_connections\",\n\t\t\t\t\tHelp: \"Current number of connections.\",\n\t\t\t\t}, []string{\"name\", \"pool_mode\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, database := range res.databases {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{database.Name, database.PoolMode},\n\t\t\t\t\t\t\tcount: float64(database.CurrentConnections),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportLists,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemLists,\n\t\t\t\t\tName: \"items\",\n\t\t\t\t\tHelp: \"List of internal pgbouncer information.\",\n\t\t\t\t}, []string{\"list\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, list := range res.lists {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{list.List},\n\t\t\t\t\t\t\tcount: float64(list.Items),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ Describe implements prometheus Collector.Describe.\nfunc (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, gaugeVecItem := range e.gaugeVecItems {\n\t\tif gaugeVecItem.enabled {\n\t\t\tgaugeVecItem.gaugeVec.Describe(ch)\n\t\t}\n\t}\n}\n\n\/\/ Collect implements prometheus Collector.Collect.\nfunc (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\tctx, cancel := context.WithTimeout(context.Background(), e.cfg.StoreTimeout)\n\tdefer cancel()\n\n\tres, err := e.getStoreResult(ctx)\n\tif err != nil {\n\t\tlog.Errorf(\"unable to get store result: %v\", err)\n\t\treturn\n\t}\n\n\tfor _, gaugeVecItem := range e.gaugeVecItems {\n\t\tif !gaugeVecItem.enabled {\n\t\t\tcontinue\n\t\t}\n\n\t\titems := gaugeVecItem.resolve(res)\n\t\tfor _, item := range items {\n\t\t\tgaugeVecItem.gaugeVec.WithLabelValues(item.labels...).Set(item.count)\n\t\t}\n\n\t\tgaugeVecItem.gaugeVec.Collect(ch)\n\t}\n}\n\nfunc (e *Exporter) getStoreResult(ctx context.Context) (*storeResult, error) {\n\tres := new(storeResult)\n\n\tif e.cfg.ExportStats {\n\t\tstats, err := e.st.GetStats(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to get stats: %v\", err)\n\t\t}\n\t\tres.stats = stats\n\t}\n\n\tif e.cfg.ExportPools {\n\t\tpools, err := e.st.GetPools(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to get pools: %v\", err)\n\t\t}\n\t\tres.pools = pools\n\t}\n\n\tif e.cfg.ExportDatabases {\n\t\tdatabases, err := e.st.GetDatabases(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to get databases: %v\", err)\n\t\t}\n\t\tres.databases = databases\n\t}\n\n\tif e.cfg.ExportLists {\n\t\tlists, err := e.st.GetLists(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to get lists: %v\", err)\n\t\t}\n\t\tres.lists = lists\n\t}\n\n\treturn res, nil\n}\n<commit_msg>Expose more pool metrics<commit_after>package collector\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com\/jbub\/pgbouncer_exporter\/config\"\n\t\"github.com\/jbub\/pgbouncer_exporter\/domain\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/common\/log\"\n)\n\nconst (\n\t\/\/ Name is the name of the exporter.\n\tName = \"pgbouncer_exporter\"\n)\n\n\/\/ Names of the exporter subsystems.\nconst (\n\tSubsystemStats = \"stats\"\n\tSubsystemPools = \"pools\"\n\tSubsystemDatabases = \"database\"\n\tSubsystemLists = \"lists\"\n)\n\ntype gaugeVecItem struct {\n\tgaugeVec *prometheus.GaugeVec\n\tresolve resolveGaugeVecFunc\n\tenabled bool\n}\n\ntype gaugeVecValueItem struct {\n\tlabels []string\n\tcount float64\n}\n\ntype resolveGaugeVecFunc func(res *storeResult) []gaugeVecValueItem\n\ntype storeResult struct {\n\tstats []domain.Stat\n\tpools []domain.Pool\n\tdatabases []domain.Database\n\tlists []domain.List\n}\n\n\/\/ Exporter represents pgbouncer prometheus stats exporter.\ntype Exporter struct {\n\tcfg config.Config\n\tst domain.Store\n\tmutex sync.RWMutex \/\/ guards Collect\n\tgaugeVecItems []gaugeVecItem\n}\n\n\/\/ New returns new Exporter.\nfunc New(cfg config.Config, st domain.Store) *Exporter {\n\treturn &Exporter{\n\t\tst: st,\n\t\tcfg: cfg,\n\t\tgaugeVecItems: []gaugeVecItem{\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportStats,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemStats,\n\t\t\t\t\tName: \"total_requests\",\n\t\t\t\t\tHelp: \"Total number of SQL requests pooled by pgbouncer.\",\n\t\t\t\t}, []string{\"database\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, stat := range res.stats {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{stat.Database},\n\t\t\t\t\t\t\tcount: float64(stat.TotalRequests),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportStats,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemStats,\n\t\t\t\t\tName: \"total_received\",\n\t\t\t\t\tHelp: \"Total number of SQL requests pooled by pgbouncer.\",\n\t\t\t\t}, []string{\"database\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, stat := range res.stats {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{stat.Database},\n\t\t\t\t\t\t\tcount: float64(stat.TotalReceived),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportStats,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemStats,\n\t\t\t\t\tName: \"total_sent\",\n\t\t\t\t\tHelp: \"Total volume in bytes of network traffic sent by pgbouncer.\",\n\t\t\t\t}, []string{\"database\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, stat := range res.stats {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{stat.Database},\n\t\t\t\t\t\t\tcount: float64(stat.TotalSent),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportStats,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemStats,\n\t\t\t\t\tName: \"total_query_time\",\n\t\t\t\t\tHelp: \"Total number of microseconds spent by pgbouncer when actively connected to PostgreSQL.\",\n\t\t\t\t}, []string{\"database\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, stat := range res.stats {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{stat.Database},\n\t\t\t\t\t\t\tcount: float64(stat.TotalQueryTime),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportStats,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemStats,\n\t\t\t\t\tName: \"total_xact_time\",\n\t\t\t\t\tHelp: \"Total number of microseconds spent by pgbouncer when connected to PostgreSQL in a transaction, either idle in transaction or executing queries.\",\n\t\t\t\t}, []string{\"database\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, stat := range res.stats {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{stat.Database},\n\t\t\t\t\t\t\tcount: float64(stat.TotalXactTime),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportStats,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemStats,\n\t\t\t\t\tName: \"total_query_count\",\n\t\t\t\t\tHelp: \"Total number of SQL queries pooled by pgbouncer.\",\n\t\t\t\t}, []string{\"database\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, stat := range res.stats {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{stat.Database},\n\t\t\t\t\t\t\tcount: float64(stat.TotalQueryCount),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportStats,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemStats,\n\t\t\t\t\tName: \"total_xact_count\",\n\t\t\t\t\tHelp: \"Total number of SQL transactions pooled by pgbouncer.\",\n\t\t\t\t}, []string{\"database\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, stat := range res.stats {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{stat.Database},\n\t\t\t\t\t\t\tcount: float64(stat.TotalXactCount),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportPools,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemPools,\n\t\t\t\t\tName: \"active_clients\",\n\t\t\t\t\tHelp: \"Client connections that are linked to server connection and can process queries.\",\n\t\t\t\t}, []string{\"database\", \"user\", \"pool_mode\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, pool := range res.pools {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{pool.Database, pool.User, pool.PoolMode},\n\t\t\t\t\t\t\tcount: float64(pool.Active),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportPools,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemPools,\n\t\t\t\t\tName: \"waiting_clients\",\n\t\t\t\t\tHelp: \"Client connections have sent queries but have not yet got a server connection.\",\n\t\t\t\t}, []string{\"database\", \"user\", \"pool_mode\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, pool := range res.pools {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{pool.Database, pool.User, pool.PoolMode},\n\t\t\t\t\t\t\tcount: float64(pool.Waiting),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportPools,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemPools,\n\t\t\t\t\tName: \"active_server\",\n\t\t\t\t\tHelp: \"Server connections that linked to client.\",\n\t\t\t\t}, []string{\"database\", \"user\", \"pool_mode\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, pool := range res.pools {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{pool.Database, pool.User, pool.PoolMode},\n\t\t\t\t\t\t\tcount: float64(pool.ServerActive),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportPools,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemPools,\n\t\t\t\t\tName: \"idle_server\",\n\t\t\t\t\tHelp: \"Server connections that unused and immediately usable for client queries.\",\n\t\t\t\t}, []string{\"database\", \"user\", \"pool_mode\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, pool := range res.pools {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{pool.Database, pool.User, pool.PoolMode},\n\t\t\t\t\t\t\tcount: float64(pool.ServerIdle),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportPools,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemPools,\n\t\t\t\t\tName: \"used_server\",\n\t\t\t\t\tHelp: \"Server connections that have been idle more than server_check_delay, so they needs server_check_query to run on it before it can be used.\",\n\t\t\t\t}, []string{\"database\", \"user\", \"pool_mode\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, pool := range res.pools {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{pool.Database, pool.User, pool.PoolMode},\n\t\t\t\t\t\t\tcount: float64(pool.ServerUsed),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportPools,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemPools,\n\t\t\t\t\tName: \"tested_server\",\n\t\t\t\t\tHelp: \"Server connections that are currently running either server_reset_query or server_check_query.\",\n\t\t\t\t}, []string{\"database\", \"user\", \"pool_mode\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, pool := range res.pools {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{pool.Database, pool.User, pool.PoolMode},\n\t\t\t\t\t\t\tcount: float64(pool.ServerTested),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportPools,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemPools,\n\t\t\t\t\tName: \"login_server\",\n\t\t\t\t\tHelp: \"Server connections currently in logging in process.\",\n\t\t\t\t}, []string{\"database\", \"user\", \"pool_mode\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, pool := range res.pools {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{pool.Database, pool.User, pool.PoolMode},\n\t\t\t\t\t\t\tcount: float64(pool.ServerLogin),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportPools,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemPools,\n\t\t\t\t\tName: \"max_wait\",\n\t\t\t\t\tHelp: \"How long the first (oldest) client in queue has waited, in seconds. If this starts increasing, then the current pool of servers does not handle requests quick enough. Reason may be either overloaded server or just too small of a pool_size setting.\",\n\t\t\t\t}, []string{\"database\", \"user\", \"pool_mode\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, pool := range res.pools {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{pool.Database, pool.User, pool.PoolMode},\n\t\t\t\t\t\t\tcount: float64(pool.MaxWait),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportDatabases,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemDatabases,\n\t\t\t\t\tName: \"current_connections\",\n\t\t\t\t\tHelp: \"Current number of connections.\",\n\t\t\t\t}, []string{\"name\", \"pool_mode\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, database := range res.databases {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{database.Name, database.PoolMode},\n\t\t\t\t\t\t\tcount: float64(database.CurrentConnections),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tenabled: cfg.ExportLists,\n\t\t\t\tgaugeVec: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: Name,\n\t\t\t\t\tSubsystem: SubsystemLists,\n\t\t\t\t\tName: \"items\",\n\t\t\t\t\tHelp: \"List of internal pgbouncer information.\",\n\t\t\t\t}, []string{\"list\"}),\n\t\t\t\tresolve: func(res *storeResult) (items []gaugeVecValueItem) {\n\t\t\t\t\tfor _, list := range res.lists {\n\t\t\t\t\t\titems = append(items, gaugeVecValueItem{\n\t\t\t\t\t\t\tlabels: []string{list.List},\n\t\t\t\t\t\t\tcount: float64(list.Items),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\treturn items\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ Describe implements prometheus Collector.Describe.\nfunc (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, gaugeVecItem := range e.gaugeVecItems {\n\t\tif gaugeVecItem.enabled {\n\t\t\tgaugeVecItem.gaugeVec.Describe(ch)\n\t\t}\n\t}\n}\n\n\/\/ Collect implements prometheus Collector.Collect.\nfunc (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\tctx, cancel := context.WithTimeout(context.Background(), e.cfg.StoreTimeout)\n\tdefer cancel()\n\n\tres, err := e.getStoreResult(ctx)\n\tif err != nil {\n\t\tlog.Errorf(\"unable to get store result: %v\", err)\n\t\treturn\n\t}\n\n\tfor _, gaugeVecItem := range e.gaugeVecItems {\n\t\tif !gaugeVecItem.enabled {\n\t\t\tcontinue\n\t\t}\n\n\t\titems := gaugeVecItem.resolve(res)\n\t\tfor _, item := range items {\n\t\t\tgaugeVecItem.gaugeVec.WithLabelValues(item.labels...).Set(item.count)\n\t\t}\n\n\t\tgaugeVecItem.gaugeVec.Collect(ch)\n\t}\n}\n\nfunc (e *Exporter) getStoreResult(ctx context.Context) (*storeResult, error) {\n\tres := new(storeResult)\n\n\tif e.cfg.ExportStats {\n\t\tstats, err := e.st.GetStats(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to get stats: %v\", err)\n\t\t}\n\t\tres.stats = stats\n\t}\n\n\tif e.cfg.ExportPools {\n\t\tpools, err := e.st.GetPools(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to get pools: %v\", err)\n\t\t}\n\t\tres.pools = pools\n\t}\n\n\tif e.cfg.ExportDatabases {\n\t\tdatabases, err := e.st.GetDatabases(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to get databases: %v\", err)\n\t\t}\n\t\tres.databases = databases\n\t}\n\n\tif e.cfg.ExportLists {\n\t\tlists, err := e.st.GetLists(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to get lists: %v\", err)\n\t\t}\n\t\tres.lists = lists\n\t}\n\n\treturn res, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2019 The vt-go authors. All Rights Reserved.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage vt\n\nimport (\n\t\"bufio\"\n\t\"compress\/bzip2\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ FeedType ...\ntype FeedType string\n\nconst (\n\t\/\/ FileFeed is the feed type passed to NewFeed() for getting a feed with\n\t\/\/ all the files being scanned by VirusTotal.\n\tFileFeed FeedType = \"files\"\n)\n\n\/\/ A Feed represents a stream of objects received from VirusTotal via the\n\/\/ feed API v3. This API allows you to get information about objects as they are\n\/\/ processed by VirusTotal in real-time. Objects are sent on channel C.\ntype Feed struct {\n\tC chan *Object\n\tclient *Client\n\tfeedType FeedType\n\t\/\/ t is the time of the current package and n is index of the current item\n\t\/\/ within the package, the feed cursor is determined by the t and n.\n\tt time.Time\n\tn int64\n\tstop chan bool\n\tstopped bool\n\terr error\n}\n\n\/\/ FeedOption represents an option passed to a NewFeed.\ntype FeedOption func(*Feed) error\n\n\/\/ FeedBufferSize specifies the size of the Feed's buffer.\nfunc FeedBufferSize(size int) FeedOption {\n\treturn func(f *Feed) error {\n\t\tf.C = make(chan *Object, size)\n\t\treturn nil\n\t}\n}\n\n\/\/ FeedCursor specifies the point in time where the feed starts. Files processed\n\/\/ by VirusTotal after that time will be retrieved. The cursor is a string with\n\/\/ the format YYYYMMDDhhmm, indicating the date and time with minute precision.\n\/\/ If a empty string is passed as cursor the current time will be used.\nfunc FeedCursor(cursor string) FeedOption {\n\treturn func(f *Feed) error {\n\t\tvar err error\n\t\t\/\/ An empty cursor is acceptable, it's equivalent to passing no cursor\n\t\t\/\/ at all.\n\t\tif cursor == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ Cursor can be either YYYYMMDDhhmm or YYYYMMDDhhmm-N where N\n\t\t\/\/ indicates a line number within package YYYYMMDDhhmm.\n\t\ts := strings.Split(cursor, \"-\")\n\t\tif len(s) > 1 {\n\t\t\tf.n, err = strconv.ParseInt(s[1], 10, 32)\n\t\t}\n\t\tif err == nil {\n\t\t\tf.t, err = time.Parse(\"200601021504\", s[0])\n\t\t}\n\t\treturn err\n\t}\n}\n\n\/\/ NewFeed creates a Feed that receives objects from the specified type. Objects\n\/\/ are send on channel C. The feed can be stopped at any moment by calling Stop.\n\/\/ This example illustrates how a Feed is typically used:\n\/\/\n\/\/ feed, err := vt.Client(<api key>).NewFeed(vt.FileFeed)\n\/\/ if err != nil {\n\/\/ ... handle error\n\/\/ }\n\/\/ for fileObj := range feed.C {\n\/\/ ... do something with file object\n\/\/ }\n\/\/ if feed.Error() != nil {\n\/\/ ... feed as been stopped by some error.\n\/\/ }\n\/\/\nfunc (cli *Client) NewFeed(t FeedType, options ...FeedOption) (*Feed, error) {\n\tfeed := &Feed{\n\t\tclient: cli,\n\t\tfeedType: t,\n\t\tt: time.Now().UTC().Add(-30 * time.Minute),\n\t\tstop: make(chan bool, 1)}\n\n\tfor _, opt := range options {\n\t\tif err := opt(feed); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ If the channel hasn't been created yet with a custom buffer size by\n\t\/\/ WithBufferSize, let's create it with a default size.\n\tif feed.C == nil {\n\t\tfeed.C = make(chan *Object, 1000)\n\t}\n\n\tgo feed.retrieve()\n\n\treturn feed, nil\n}\n\n\/\/ Cursor returns a string that can be passed to FeedCursor for creating a\n\/\/ feed that resumes where a previous one left.\nfunc (f *Feed) Cursor() string {\n\treturn fmt.Sprintf(\"%s-%d\", f.t.Format(\"200601021504\"), f.n)\n}\n\n\/\/ Error returns any error occurred so far.\nfunc (f *Feed) Error() error {\n\treturn f.err\n}\n\n\/\/ Stop causes the feed to stop sending objects to the channel C. After Stop is\n\/\/ called the feed still sends all the objects that it has buffered.\nfunc (f *Feed) Stop() error {\n\tif !f.stopped {\n\t\tf.stopped = true\n\t\tf.stop <- true\n\t}\n\treturn nil\n}\n\n\/\/ Send the object to the feed's channel, except if it was stopped.\nfunc (f *Feed) sendToChannel(object *Object) int {\n\tselect {\n\tcase <-f.stop:\n\t\treturn stop\n\tcase f.C <- object:\n\t\treturn ok\n\t}\n}\n\n\/\/ Wait for the given amount of time, but exits earlier if the feed is stopped\n\/\/ during the waiting period.\nfunc (f *Feed) wait(d time.Duration) int {\n\tselect {\n\tcase <-f.stop:\n\t\treturn stop\n\tcase <-time.After(d):\n\t\treturn ok\n\t}\n}\n\nvar errNoAvailableYet = errors.New(\"No available yet\")\n\nfunc (f *Feed) getObjects(packageTime string) ([]*Object, error) {\n\n\tu := URL(\"feeds\/%s\/%s\", f.feedType, packageTime)\n\n\thttpResp, err := f.client.sendRequest(\"GET\", u, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer httpResp.Body.Close()\n\n\tif httpResp.StatusCode == http.StatusBadRequest {\n\t\tif resp, err := f.client.parseResponse(httpResp); err != nil {\n\t\t\tif resp.Error.Code == \"NotAvailableYet\" {\n\t\t\t\treturn nil, errNoAvailableYet\n\t\t\t}\n\t\t}\n\t}\n\n\tif httpResp.StatusCode != http.StatusOK {\n\t\treturn nil, errors.New(httpResp.Status)\n\t}\n\n\tsc := bufio.NewScanner(bzip2.NewReader(httpResp.Body))\n\t\/\/ By default bufio.Scanner uses a buffer that is limited to a maximum size\n\t\/\/ defined by bufio.MaxScanBufferSize (64KB). This is too small for\n\t\/\/ accommodating the large JSONs stored in the feed files. So we create an\n\t\/\/ initial 1MB buffer and let it grow up to 10MB.\n\tbuffer := make([]byte, 1*1024*1024)\n\tsc.Buffer(buffer, 10*1024*1024)\n\n\tobjects := make([]*Object, 0)\n\tfor sc.Scan() {\n\t\tobj := &Object{}\n\t\tif err := json.Unmarshal(sc.Bytes(), obj); err != nil {\n\t\t\treturn objects, err\n\t\t}\n\t\tobjects = append(objects, obj)\n\t}\n\n\treturn objects, sc.Err()\n}\n\nfunc (f *Feed) retrieve() {\n\twaitDuration := 20 * time.Second\nloop:\n\tfor {\n\t\tpackageTime := f.t.Format(\"200601021504\") \/\/ YYYYMMDDhhmm\n\t\tobjects, err := f.getObjects(packageTime)\n\t\tobjects = objects[f.n:]\n\t\tswitch err {\n\t\tcase nil:\n\t\t\tfor _, object := range objects {\n\t\t\t\tif f.sendToChannel(object) == stop {\n\t\t\t\t\tbreak loop\n\t\t\t\t}\n\t\t\t\tf.n++\n\t\t\t}\n\t\t\tf.t = f.t.Add(60 * time.Second)\n\t\t\tf.n = 0\n\t\t\twaitDuration = 20 * time.Second\n\t\tcase errNoAvailableYet:\n\t\t\t\/\/ Feed package is not available yet, let's wait for 1 minute and\n\t\t\t\/\/ try again. If Close() is called during the waiting period it\n\t\t\t\/\/ exits early and breaks the loop.\n\t\t\tif f.wait(waitDuration) == stop {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\twaitDuration *= 2\n\t\tdefault:\n\t\t\tf.err = err\n\t\t\tbreak loop\n\t\t}\n\t}\n\tf.stopped = true\n\tclose(f.C)\n\tclose(f.stop)\n}\n<commit_msg>Add tolerance for missing packages.<commit_after>\/\/ Copyright © 2019 The vt-go authors. All Rights Reserved.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage vt\n\nimport (\n\t\"bufio\"\n\t\"compress\/bzip2\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ FeedType ...\ntype FeedType string\n\nconst (\n\t\/\/ FileFeed is the feed type passed to NewFeed() for getting a feed with\n\t\/\/ all the files being scanned by VirusTotal.\n\tFileFeed FeedType = \"files\"\n)\n\n\/\/ A Feed represents a stream of objects received from VirusTotal via the\n\/\/ feed API v3. This API allows you to get information about objects as they are\n\/\/ processed by VirusTotal in real-time. Objects are sent on channel C.\ntype Feed struct {\n\tC chan *Object\n\tclient *Client\n\tfeedType FeedType\n\t\/\/ t is the time of the current package and n is index of the current item\n\t\/\/ within the package, the feed cursor is determined by the t and n.\n\tt time.Time\n\tn int64\n\tstop chan bool\n\tstopped bool\n\terr error\n\tmissingPackagesTolerancy int\n}\n\n\/\/ FeedOption represents an option passed to a NewFeed.\ntype FeedOption func(*Feed) error\n\n\/\/ FeedBufferSize specifies the size of the Feed's buffer.\nfunc FeedBufferSize(size int) FeedOption {\n\treturn func(f *Feed) error {\n\t\tf.C = make(chan *Object, size)\n\t\treturn nil\n\t}\n}\n\n\/\/ FeedCursor specifies the point in time where the feed starts. Files processed\n\/\/ by VirusTotal after that time will be retrieved. The cursor is a string with\n\/\/ the format YYYYMMDDhhmm, indicating the date and time with minute precision.\n\/\/ If a empty string is passed as cursor the current time will be used.\nfunc FeedCursor(cursor string) FeedOption {\n\treturn func(f *Feed) error {\n\t\tvar err error\n\t\t\/\/ An empty cursor is acceptable, it's equivalent to passing no cursor\n\t\t\/\/ at all.\n\t\tif cursor == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ Cursor can be either YYYYMMDDhhmm or YYYYMMDDhhmm-N where N\n\t\t\/\/ indicates a line number within package YYYYMMDDhhmm.\n\t\ts := strings.Split(cursor, \"-\")\n\t\tif len(s) > 1 {\n\t\t\tf.n, err = strconv.ParseInt(s[1], 10, 32)\n\t\t}\n\t\tif err == nil {\n\t\t\tf.t, err = time.Parse(\"200601021504\", s[0])\n\t\t}\n\t\treturn err\n\t}\n}\n\n\/\/ NewFeed creates a Feed that receives objects from the specified type. Objects\n\/\/ are send on channel C. The feed can be stopped at any moment by calling Stop.\n\/\/ This example illustrates how a Feed is typically used:\n\/\/\n\/\/ feed, err := vt.Client(<api key>).NewFeed(vt.FileFeed)\n\/\/ if err != nil {\n\/\/ ... handle error\n\/\/ }\n\/\/ for fileObj := range feed.C {\n\/\/ ... do something with file object\n\/\/ }\n\/\/ if feed.Error() != nil {\n\/\/ ... feed as been stopped by some error.\n\/\/ }\n\/\/\nfunc (cli *Client) NewFeed(t FeedType, options ...FeedOption) (*Feed, error) {\n\tfeed := &Feed{\n\t\tclient: cli,\n\t\tfeedType: t,\n\t\tt: time.Now().UTC().Add(-30 * time.Minute),\n\t\tstop: make(chan bool, 1),\n\t\tmissingPackagesTolerancy: 1,\n\t}\n\n\tfor _, opt := range options {\n\t\tif err := opt(feed); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ If the channel hasn't been created yet with a custom buffer size by\n\t\/\/ WithBufferSize, let's create it with a default size.\n\tif feed.C == nil {\n\t\tfeed.C = make(chan *Object, 1000)\n\t}\n\n\tgo feed.retrieve()\n\n\treturn feed, nil\n}\n\n\/\/ Cursor returns a string that can be passed to FeedCursor for creating a\n\/\/ feed that resumes where a previous one left.\nfunc (f *Feed) Cursor() string {\n\treturn fmt.Sprintf(\"%s-%d\", f.t.Format(\"200601021504\"), f.n)\n}\n\n\/\/ Error returns any error occurred so far.\nfunc (f *Feed) Error() error {\n\treturn f.err\n}\n\n\/\/ Stop causes the feed to stop sending objects to the channel C. After Stop is\n\/\/ called the feed still sends all the objects that it has buffered.\nfunc (f *Feed) Stop() error {\n\tif !f.stopped {\n\t\tf.stopped = true\n\t\tf.stop <- true\n\t}\n\treturn nil\n}\n\n\/\/ Send the object to the feed's channel, except if it was stopped.\nfunc (f *Feed) sendToChannel(object *Object) int {\n\tselect {\n\tcase <-f.stop:\n\t\treturn stop\n\tcase f.C <- object:\n\t\treturn ok\n\t}\n}\n\n\/\/ Wait for the given amount of time, but exits earlier if the feed is stopped\n\/\/ during the waiting period.\nfunc (f *Feed) wait(d time.Duration) int {\n\tselect {\n\tcase <-f.stop:\n\t\treturn stop\n\tcase <-time.After(d):\n\t\treturn ok\n\t}\n}\n\nvar errNoAvailableYet = errors.New(\"not available yet\")\nvar errNotFound = errors.New(\"not found\")\n\nfunc (f *Feed) getObjects(packageTime string) ([]*Object, error) {\n\n\tu := URL(\"feeds\/%s\/%s\", f.feedType, packageTime)\n\n\thttpResp, err := f.client.sendRequest(\"GET\", u, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer httpResp.Body.Close()\n\n\tswitch httpResp.StatusCode {\n\tcase http.StatusBadRequest:\n\t\tif resp, err := f.client.parseResponse(httpResp); err != nil {\n\t\t\tif resp.Error.Code == \"NotAvailableYet\" {\n\t\t\t\treturn nil, errNoAvailableYet\n\t\t\t}\n\t\t}\n\tcase http.StatusNotFound:\n\t\treturn nil, errNotFound\n\t}\n\n\tif httpResp.StatusCode != http.StatusOK {\n\t\treturn nil, errors.New(httpResp.Status)\n\t}\n\n\tsc := bufio.NewScanner(bzip2.NewReader(httpResp.Body))\n\t\/\/ By default bufio.Scanner uses a buffer that is limited to a maximum size\n\t\/\/ defined by bufio.MaxScanBufferSize (64KB). This is too small for\n\t\/\/ accommodating the large JSONs stored in the feed files. So we create an\n\t\/\/ initial 1MB buffer and let it grow up to 10MB.\n\tbuffer := make([]byte, 1*1024*1024)\n\tsc.Buffer(buffer, 10*1024*1024)\n\n\tobjects := make([]*Object, 0)\n\tfor sc.Scan() {\n\t\tobj := &Object{}\n\t\tif err := json.Unmarshal(sc.Bytes(), obj); err != nil {\n\t\t\treturn objects, err\n\t\t}\n\t\tobjects = append(objects, obj)\n\t}\n\n\treturn objects, sc.Err()\n}\n\nfunc (f *Feed) retrieve() {\n\twaitDuration := 20 * time.Second\n\tmissingPackages := 0\nloop:\n\tfor {\n\t\tpackageTime := f.t.Format(\"200601021504\") \/\/ YYYYMMDDhhmm\n\t\tobjects, err := f.getObjects(packageTime)\n\t\tobjects = objects[f.n:]\n\t\tswitch err {\n\t\tcase nil:\n\t\t\tfor _, object := range objects {\n\t\t\t\tif f.sendToChannel(object) == stop {\n\t\t\t\t\tbreak loop\n\t\t\t\t}\n\t\t\t\tf.n++\n\t\t\t}\n\t\t\tf.t = f.t.Add(60 * time.Second)\n\t\t\tf.n = 0\n\t\t\twaitDuration = 20 * time.Second\n\t\t\tmissingPackages = 0\n\t\tcase errNoAvailableYet:\n\t\t\t\/\/ Feed package is not available yet, let's wait for 1 minute and\n\t\t\t\/\/ try again. If Close() is called during the waiting period it\n\t\t\t\/\/ exits early and breaks the loop.\n\t\t\tif f.wait(waitDuration) == stop {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\twaitDuration *= 2\n\t\tcase errNotFound:\n\t\t\t\/\/ The feed tolerates some missing packages, if the number of missing\n\t\t\t\/\/ packages is greater than missingPackagesTolerancy an error is\n\t\t\t\/\/ returned, if not, it tries to get the next package.\n\t\t\tmissingPackages++\n\t\t\tif missingPackages > f.missingPackagesTolerancy {\n\t\t\t\tf.err = err\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tf.t = f.t.Add(60 * time.Second)\n\t\tdefault:\n\t\t\tf.err = err\n\t\t\tbreak loop\n\t\t}\n\t}\n\tf.stopped = true\n\tclose(f.C)\n\tclose(f.stop)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/codegangsta\/cli\"\n\t\"os\"\n\t\"path\"\n)\n\ntype File struct {\n\tImdbId string\n\tFormat string\n\tFileName string\n\tFullPath string\n}\n\nfunc (f *File) IsValid() bool {\n\treturn f.ImdbId != \"\" && f.Present()\n}\n\n\/\/ Is the file is present on disk\nfunc (f *File) Present() bool {\n\treturn file_exists(f.FullPath)\n}\n\nfunc NewFile(c *cli.Context) File {\n\tvar file_name, format, id, full_path string\n\n\tif len(c.Args()) == 1 {\n\t\tfull_path = c.Args()[0]\n\t\tbase := path.Base(full_path)\n\t\tformat = path.Ext(full_path)\n\n\t\tfile_name = base[:len(base)-len(format)]\n\t\tid = c.String(\"id\")\n\t}\n\treturn File{ImdbId: id, FileName: file_name, Format: format, FullPath: full_path}\n}\n\nfunc file_exists(path string) bool {\n\texists := false\n\tif _, err := os.Stat(path); err == nil {\n\t\texists = true\n\t}\n\treturn exists\n}\n<commit_msg>Add go style names<commit_after>package main\n\nimport (\n\t\"github.com\/codegangsta\/cli\"\n\t\"os\"\n\t\"path\"\n)\n\ntype File struct {\n\tImdbId string\n\tFormat string\n\tFileName string\n\tFullPath string\n}\n\nfunc (f *File) IsValid() bool {\n\treturn f.ImdbId != \"\" && f.Present()\n}\n\n\/\/ Is the file is present on disk\nfunc (f *File) Present() bool {\n\treturn fileExists(f.FullPath)\n}\n\nfunc NewFile(c *cli.Context) File {\n\tvar fileName, format, id, fullPath string\n\n\tif len(c.Args()) == 1 {\n\t\tfullPath = c.Args()[0]\n\t\tbase := path.Base(fullPath)\n\t\tformat = path.Ext(fullPath)\n\n\t\tfileName = base[:len(base)-len(format)]\n\t\tid = c.String(\"id\")\n\t}\n\treturn File{ImdbId: id, FileName: fileName, Format: format, FullPath: fullPath}\n}\n\nfunc fileExists(path string) bool {\n\texists := false\n\tif _, err := os.Stat(path); err == nil {\n\t\texists = true\n\t}\n\treturn exists\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/github\/hub\/Godeps\/_workspace\/src\/github.com\/bmizerany\/assert\"\n)\n\nfunc TestNewArgs(t *testing.T) {\n\targs := NewArgs([]string{})\n\tassert.Equal(t, \"\", args.Command)\n\tassert.Equal(t, 0, args.ParamsSize())\n\n\targs = NewArgs([]string{\"command\"})\n\tassert.Equal(t, \"command\", args.Command)\n\tassert.Equal(t, 0, args.ParamsSize())\n\n\targs = NewArgs([]string{\"command\", \"args\"})\n\tassert.Equal(t, \"command\", args.Command)\n\tassert.Equal(t, 1, args.ParamsSize())\n\n\targs = NewArgs([]string{\"--noop\", \"command\", \"args\"})\n\tassert.Equal(t, \"command\", args.Command)\n\tassert.Equal(t, 1, args.ParamsSize())\n\tassert.T(t, args.Noop)\n\n\targs = NewArgs([]string{\"--version\"})\n\tassert.Equal(t, \"version\", args.Command)\n\tassert.Equal(t, 0, args.ParamsSize())\n\n\targs = NewArgs([]string{\"--help\"})\n\tassert.Equal(t, \"help\", args.Command)\n\tassert.Equal(t, 0, args.ParamsSize())\n\n\targs = NewArgs([]string{\"--noop\", \"--version\"})\n\tassert.T(t, args.Noop)\n\tassert.Equal(t, \"version\", args.Command)\n\n\targs = NewArgs([]string{\"-c\", \"foo=bar\", \"--git-dir=path\", \"--bare\", \"-c\", \"a=b\"})\n\tassert.Equal(t, 7, len(args.GlobalFlags))\n\tassert.Equal(t, \"-c foo=bar -c a=b --bare --git-dir path\", strings.Join(args.GlobalFlags, \" \"))\n}\n\nfunc TestArgs_Words(t *testing.T) {\n\targs := NewArgs([]string{\"--no-ff\", \"master\"})\n\ta := args.Words()\n\n\tassert.Equal(t, 1, len(a))\n\tassert.Equal(t, \"master\", a[0])\n}\n\nfunc TestArgs_Insert(t *testing.T) {\n\targs := NewArgs([]string{\"command\", \"1\", \"2\", \"3\", \"4\"})\n\targs.InsertParam(0, \"foo\")\n\n\tassert.Equal(t, 5, args.ParamsSize())\n\tassert.Equal(t, \"foo\", args.FirstParam())\n\n\targs = NewArgs([]string{\"command\", \"1\", \"2\", \"3\", \"4\"})\n\targs.InsertParam(3, \"foo\")\n\n\tassert.Equal(t, 5, args.ParamsSize())\n\tassert.Equal(t, \"foo\", args.Params[3])\n}\n\nfunc TestArgs_Remove(t *testing.T) {\n\targs := NewArgs([]string{\"1\", \"2\", \"3\", \"4\"})\n\n\titem := args.RemoveParam(1)\n\tassert.Equal(t, \"3\", item)\n\tassert.Equal(t, 2, args.ParamsSize())\n\tassert.Equal(t, \"2\", args.FirstParam())\n\tassert.Equal(t, \"4\", args.GetParam(1))\n}\n<commit_msg>Fix flaky test since config param is a map and order may not be preserved<commit_after>package commands\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/github\/hub\/Godeps\/_workspace\/src\/github.com\/bmizerany\/assert\"\n)\n\nfunc TestNewArgs(t *testing.T) {\n\targs := NewArgs([]string{})\n\tassert.Equal(t, \"\", args.Command)\n\tassert.Equal(t, 0, args.ParamsSize())\n\n\targs = NewArgs([]string{\"command\"})\n\tassert.Equal(t, \"command\", args.Command)\n\tassert.Equal(t, 0, args.ParamsSize())\n\n\targs = NewArgs([]string{\"command\", \"args\"})\n\tassert.Equal(t, \"command\", args.Command)\n\tassert.Equal(t, 1, args.ParamsSize())\n\n\targs = NewArgs([]string{\"--noop\", \"command\", \"args\"})\n\tassert.Equal(t, \"command\", args.Command)\n\tassert.Equal(t, 1, args.ParamsSize())\n\tassert.T(t, args.Noop)\n\n\targs = NewArgs([]string{\"--version\"})\n\tassert.Equal(t, \"version\", args.Command)\n\tassert.Equal(t, 0, args.ParamsSize())\n\n\targs = NewArgs([]string{\"--help\"})\n\tassert.Equal(t, \"help\", args.Command)\n\tassert.Equal(t, 0, args.ParamsSize())\n\n\targs = NewArgs([]string{\"--noop\", \"--version\"})\n\tassert.T(t, args.Noop)\n\tassert.Equal(t, \"version\", args.Command)\n\n\targs = NewArgs([]string{\"-c\", \"foo=bar\", \"--git-dir=path\", \"--bare\"})\n\tassert.Equal(t, 5, len(args.GlobalFlags))\n\tassert.Equal(t, \"-c foo=bar --bare --git-dir path\", strings.Join(args.GlobalFlags, \" \"))\n}\n\nfunc TestArgs_Words(t *testing.T) {\n\targs := NewArgs([]string{\"--no-ff\", \"master\"})\n\ta := args.Words()\n\n\tassert.Equal(t, 1, len(a))\n\tassert.Equal(t, \"master\", a[0])\n}\n\nfunc TestArgs_Insert(t *testing.T) {\n\targs := NewArgs([]string{\"command\", \"1\", \"2\", \"3\", \"4\"})\n\targs.InsertParam(0, \"foo\")\n\n\tassert.Equal(t, 5, args.ParamsSize())\n\tassert.Equal(t, \"foo\", args.FirstParam())\n\n\targs = NewArgs([]string{\"command\", \"1\", \"2\", \"3\", \"4\"})\n\targs.InsertParam(3, \"foo\")\n\n\tassert.Equal(t, 5, args.ParamsSize())\n\tassert.Equal(t, \"foo\", args.Params[3])\n}\n\nfunc TestArgs_Remove(t *testing.T) {\n\targs := NewArgs([]string{\"1\", \"2\", \"3\", \"4\"})\n\n\titem := args.RemoveParam(1)\n\tassert.Equal(t, \"3\", item)\n\tassert.Equal(t, 2, args.ParamsSize())\n\tassert.Equal(t, \"2\", args.FirstParam())\n\tassert.Equal(t, \"4\", args.GetParam(1))\n}\n<|endoftext|>"} {"text":"<commit_before>package cli\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strings\"\n\n\tcmds \"github.com\/ipfs\/go-ipfs\/commands\"\n\tfiles \"github.com\/ipfs\/go-ipfs\/commands\/files\"\n\tu \"gx\/ipfs\/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1\/go-ipfs-util\"\n)\n\n\/\/ Parse parses the input commandline string (cmd, flags, and args).\n\/\/ returns the corresponding command Request object.\nfunc Parse(input []string, stdin *os.File, root *cmds.Command) (cmds.Request, *cmds.Command, []string, error) {\n\tpath, opts, stringVals, cmd, err := parseOpts(input, root)\n\tif err != nil {\n\t\treturn nil, nil, path, err\n\t}\n\n\toptDefs, err := root.GetOptions(path)\n\tif err != nil {\n\t\treturn nil, cmd, path, err\n\t}\n\n\treq, err := cmds.NewRequest(path, opts, nil, nil, cmd, optDefs)\n\tif err != nil {\n\t\treturn nil, cmd, path, err\n\t}\n\n\t\/\/ if -r is provided, and it is associated with the package builtin\n\t\/\/ recursive path option, allow recursive file paths\n\trecursiveOpt := req.Option(cmds.RecShort)\n\trecursive := false\n\tif recursiveOpt != nil && recursiveOpt.Definition() == cmds.OptionRecursivePath {\n\t\trecursive, _, err = recursiveOpt.Bool()\n\t\tif err != nil {\n\t\t\treturn req, nil, nil, u.ErrCast()\n\t\t}\n\t}\n\n\t\/\/ if '--hidden' is provided, enumerate hidden paths\n\thiddenOpt := req.Option(\"hidden\")\n\thidden := false\n\tif hiddenOpt != nil {\n\t\thidden, _, err = hiddenOpt.Bool()\n\t\tif err != nil {\n\t\t\treturn req, nil, nil, u.ErrCast()\n\t\t}\n\t}\n\n\tstringArgs, fileArgs, err := parseArgs(stringVals, stdin, cmd.Arguments, recursive, hidden, root)\n\tif err != nil {\n\t\treturn req, cmd, path, err\n\t}\n\treq.SetArguments(stringArgs)\n\n\tif len(fileArgs) > 0 {\n\t\tfile := files.NewSliceFile(\"\", \"\", fileArgs)\n\t\treq.SetFiles(file)\n\t}\n\n\terr = cmd.CheckArguments(req)\n\tif err != nil {\n\t\treturn req, cmd, path, err\n\t}\n\n\treturn req, cmd, path, nil\n}\n\n\/\/ Parse a command line made up of sub-commands, short arguments, long arguments and positional arguments\nfunc parseOpts(args []string, root *cmds.Command) (\n\tpath []string,\n\topts map[string]interface{},\n\tstringVals []string,\n\tcmd *cmds.Command,\n\terr error,\n) {\n\tpath = make([]string, 0, len(args))\n\tstringVals = make([]string, 0, len(args))\n\toptDefs := map[string]cmds.Option{}\n\topts = map[string]interface{}{}\n\tcmd = root\n\n\t\/\/ parseFlag checks that a flag is valid and saves it into opts\n\t\/\/ Returns true if the optional second argument is used\n\tparseFlag := func(name string, arg *string, mustUse bool) (bool, error) {\n\t\tif _, ok := opts[name]; ok {\n\t\t\treturn false, fmt.Errorf(\"Duplicate values for option '%s'\", name)\n\t\t}\n\n\t\toptDef, found := optDefs[name]\n\t\tif !found {\n\t\t\terr = fmt.Errorf(\"Unrecognized option '%s'\", name)\n\t\t\treturn false, err\n\t\t}\n\t\t\/\/ mustUse implies that you must use the argument given after the '='\n\t\t\/\/ eg. -r=true means you must take true into consideration\n\t\t\/\/\t\tmustUse == true in the above case\n\t\t\/\/ eg. ipfs -r <file> means disregard <file> since there is no '='\n\t\t\/\/\t\tmustUse == false in the above situation\n\t\t\/\/arg == nil implies the flag was specified without an argument\n\t\tif optDef.Type() == cmds.Bool {\n\t\t\tif arg == nil || !mustUse {\n\t\t\t\topts[name] = true\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\targVal := strings.ToLower(*arg)\n\t\t\tswitch argVal {\n\t\t\tcase \"true\":\n\t\t\t\topts[name] = true\n\t\t\t\treturn true, nil\n\t\t\tcase \"false\":\n\t\t\t\topts[name] = false\n\t\t\t\treturn true, nil\n\t\t\tdefault:\n\t\t\t\treturn true, fmt.Errorf(\"Option '%s' takes true\/false arguments, but was passed '%s'\", name, argVal)\n\t\t\t}\n\t\t} else {\n\t\t\tif arg == nil {\n\t\t\t\treturn true, fmt.Errorf(\"Missing argument for option '%s'\", name)\n\t\t\t}\n\t\t\topts[name] = *arg\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\toptDefs, err = root.GetOptions(path)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tconsumed := false\n\tfor i, arg := range args {\n\t\tswitch {\n\t\tcase consumed:\n\t\t\t\/\/ arg was already consumed by the preceding flag\n\t\t\tconsumed = false\n\t\t\tcontinue\n\n\t\tcase arg == \"--\":\n\t\t\t\/\/ treat all remaining arguments as positional arguments\n\t\t\tstringVals = append(stringVals, args[i+1:]...)\n\t\t\treturn\n\n\t\tcase strings.HasPrefix(arg, \"--\"):\n\t\t\t\/\/ arg is a long flag, with an optional argument specified\n\t\t\t\/\/ using `=' or in args[i+1]\n\t\t\tvar slurped bool\n\t\t\tvar next *string\n\t\t\tsplit := strings.SplitN(arg, \"=\", 2)\n\t\t\tif len(split) == 2 {\n\t\t\t\tslurped = false\n\t\t\t\targ = split[0]\n\t\t\t\tnext = &split[1]\n\t\t\t} else {\n\t\t\t\tslurped = true\n\t\t\t\tif i+1 < len(args) {\n\t\t\t\t\tnext = &args[i+1]\n\t\t\t\t} else {\n\t\t\t\t\tnext = nil\n\t\t\t\t}\n\t\t\t}\n\t\t\tconsumed, err = parseFlag(arg[2:], next, len(split) == 2)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !slurped {\n\t\t\t\tconsumed = false\n\t\t\t}\n\n\t\tcase strings.HasPrefix(arg, \"-\") && arg != \"-\":\n\t\t\t\/\/ args is one or more flags in short form, followed by an optional argument\n\t\t\t\/\/ all flags except the last one have type bool\n\t\t\tfor arg = arg[1:]; len(arg) != 0; arg = arg[1:] {\n\t\t\t\tvar rest *string\n\t\t\t\tvar slurped bool\n\t\t\t\tmustUse := false\n\t\t\t\tif len(arg) > 1 {\n\t\t\t\t\tslurped = false\n\t\t\t\t\tstr := arg[1:]\n\t\t\t\t\tif len(str) > 0 && str[0] == '=' {\n\t\t\t\t\t\tstr = str[1:]\n\t\t\t\t\t\tmustUse = true\n\t\t\t\t\t}\n\t\t\t\t\trest = &str\n\t\t\t\t} else {\n\t\t\t\t\tslurped = true\n\t\t\t\t\tif i+1 < len(args) {\n\t\t\t\t\t\trest = &args[i+1]\n\t\t\t\t\t} else {\n\t\t\t\t\t\trest = nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tvar end bool\n\t\t\t\tend, err = parseFlag(arg[:1], rest, mustUse)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif end {\n\t\t\t\t\tconsumed = slurped\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tdefault:\n\t\t\t\/\/ arg is a sub-command or a positional argument\n\t\t\tsub := cmd.Subcommand(arg)\n\t\t\tif sub != nil {\n\t\t\t\tcmd = sub\n\t\t\t\tpath = append(path, arg)\n\t\t\t\toptDefs, err = root.GetOptions(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ If we've come across an external binary call, pass all the remaining\n\t\t\t\t\/\/ arguments on to it\n\t\t\t\tif cmd.External {\n\t\t\t\t\tstringVals = append(stringVals, args[i+1:]...)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tstringVals = append(stringVals, arg)\n\t\t\t\tif len(path) == 0 {\n\t\t\t\t\t\/\/ found a typo or early argument\n\t\t\t\t\terr = printSuggestions(stringVals, root)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc parseArgs(inputs []string, stdin *os.File, argDefs []cmds.Argument, recursive, hidden bool, root *cmds.Command) ([]string, []files.File, error) {\n\t\/\/ ignore stdin on Windows\n\tif runtime.GOOS == \"windows\" {\n\t\tstdin = nil\n\t}\n\n\t\/\/ check if stdin is coming from terminal or is being piped in\n\tif stdin != nil {\n\t\tif term, err := isTerminal(stdin); err != nil {\n\t\t\treturn nil, nil, err\n\t\t} else if term {\n\t\t\tstdin = nil \/\/ set to nil so we ignore it\n\t\t}\n\t}\n\n\t\/\/ count required argument definitions\n\tnumRequired := 0\n\tfor _, argDef := range argDefs {\n\t\tif argDef.Required {\n\t\t\tnumRequired++\n\t\t}\n\t}\n\n\t\/\/ count number of values provided by user.\n\t\/\/ if there is at least one ArgDef, we can safely trigger the inputs loop\n\t\/\/ below to parse stdin.\n\tnumInputs := len(inputs)\n\tif len(argDefs) > 0 && argDefs[len(argDefs)-1].SupportsStdin && stdin != nil {\n\t\tnumInputs += 1\n\t}\n\n\t\/\/ if we have more arg values provided than argument definitions,\n\t\/\/ and the last arg definition is not variadic (or there are no definitions), return an error\n\tnotVariadic := len(argDefs) == 0 || !argDefs[len(argDefs)-1].Variadic\n\tif notVariadic && len(inputs) > len(argDefs) {\n\t\terr := printSuggestions(inputs, root)\n\t\treturn nil, nil, err\n\t}\n\n\tstringArgs := make([]string, 0, numInputs)\n\n\tfileArgs := make(map[string]files.File)\n\targDefIndex := 0 \/\/ the index of the current argument definition\n\tfor i := 0; i < numInputs; i++ {\n\t\targDef := getArgDef(argDefIndex, argDefs)\n\n\t\t\/\/ skip optional argument definitions if there aren't sufficient remaining inputs\n\t\tfor numInputs-i <= numRequired && !argDef.Required {\n\t\t\targDefIndex++\n\t\t\targDef = getArgDef(argDefIndex, argDefs)\n\t\t}\n\t\tif argDef.Required {\n\t\t\tnumRequired--\n\t\t}\n\n\t\tvar err error\n\t\tif argDef.Type == cmds.ArgString {\n\t\t\tif stdin == nil {\n\t\t\t\t\/\/ add string values\n\t\t\t\tstringArgs, inputs = appendString(stringArgs, inputs)\n\t\t\t} else if !argDef.SupportsStdin {\n\t\t\t\tif len(inputs) == 0 {\n\t\t\t\t\t\/\/ failure case, we have stdin, but our current\n\t\t\t\t\t\/\/ argument doesnt want stdin\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tstringArgs, inputs = appendString(stringArgs, inputs)\n\t\t\t} else {\n\t\t\t\tif len(inputs) > 0 {\n\t\t\t\t\t\/\/ don't use stdin if we have inputs\n\t\t\t\t\tstdin = nil\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ if we have a stdin, read it in and use the data as a string value\n\t\t\t\t\tstringArgs, stdin, err = appendStdinAsString(stringArgs, stdin)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else if argDef.Type == cmds.ArgFile {\n\t\t\tif stdin == nil || !argDef.SupportsStdin {\n\t\t\t\t\/\/ treat stringArg values as file paths\n\t\t\t\tfpath := inputs[0]\n\t\t\t\tinputs = inputs[1:]\n\t\t\t\tfile, err := appendFile(fpath, argDef, recursive, hidden)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, nil, err\n\t\t\t\t}\n\n\t\t\t\tfileArgs[fpath] = file\n\t\t\t} else {\n\t\t\t\tif len(inputs) > 0 {\n\t\t\t\t\t\/\/ don't use stdin if we have inputs\n\t\t\t\t\tstdin = nil\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ if we have a stdin, create a file from it\n\t\t\t\t\tfileArgs[\"\"] = files.NewReaderFile(\"\", \"\", stdin, nil)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\targDefIndex++\n\t}\n\n\t\/\/ check to make sure we didn't miss any required arguments\n\tif len(argDefs) > argDefIndex {\n\t\tfor _, argDef := range argDefs[argDefIndex:] {\n\t\t\tif argDef.Required {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"Argument '%s' is required\", argDef.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn stringArgs, filesMapToSortedArr(fileArgs), nil\n}\n\nfunc filesMapToSortedArr(fs map[string]files.File) []files.File {\n\tvar names []string\n\tfor name, _ := range fs {\n\t\tnames = append(names, name)\n\t}\n\n\tsort.Strings(names)\n\n\tvar out []files.File\n\tfor _, f := range names {\n\t\tout = append(out, fs[f])\n\t}\n\n\treturn out\n}\n\nfunc getArgDef(i int, argDefs []cmds.Argument) *cmds.Argument {\n\tif i < len(argDefs) {\n\t\t\/\/ get the argument definition (usually just argDefs[i])\n\t\treturn &argDefs[i]\n\n\t} else if len(argDefs) > 0 {\n\t\t\/\/ but if i > len(argDefs) we use the last argument definition)\n\t\treturn &argDefs[len(argDefs)-1]\n\t}\n\n\t\/\/ only happens if there aren't any definitions\n\treturn nil\n}\n\nfunc appendString(args, inputs []string) ([]string, []string) {\n\treturn append(args, inputs[0]), inputs[1:]\n}\n\nfunc appendStdinAsString(args []string, stdin *os.File) ([]string, *os.File, error) {\n\tbuf := new(bytes.Buffer)\n\n\t_, err := buf.ReadFrom(stdin)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tinput := strings.TrimSpace(buf.String())\n\treturn append(args, strings.Split(input, \"\\n\")...), nil, nil\n}\n\nconst notRecursiveFmtStr = \"'%s' is a directory, use the '-%s' flag to specify directories\"\nconst dirNotSupportedFmtStr = \"Invalid path '%s', argument '%s' does not support directories\"\n\nfunc appendFile(fpath string, argDef *cmds.Argument, recursive, hidden bool) (files.File, error) {\n\tfpath = filepath.ToSlash(filepath.Clean(fpath))\n\n\tif fpath == \".\" {\n\t\tcwd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfpath = cwd\n\t}\n\n\tstat, err := os.Lstat(fpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif stat.IsDir() {\n\t\tif !argDef.Recursive {\n\t\t\treturn nil, fmt.Errorf(dirNotSupportedFmtStr, fpath, argDef.Name)\n\t\t}\n\t\tif !recursive {\n\t\t\treturn nil, fmt.Errorf(notRecursiveFmtStr, fpath, cmds.RecShort)\n\t\t}\n\t}\n\n\treturn files.NewSerialFile(path.Base(fpath), fpath, hidden, stat)\n}\n\n\/\/ isTerminal returns true if stdin is a Stdin pipe (e.g. `cat file | ipfs`),\n\/\/ and false otherwise (e.g. nothing is being piped in, so stdin is\n\/\/ coming from the terminal)\nfunc isTerminal(stdin *os.File) (bool, error) {\n\tstat, err := stdin.Stat()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t\/\/ if stdin is a CharDevice, return true\n\treturn ((stat.Mode() & os.ModeCharDevice) != 0), nil\n}\n<commit_msg>Fix dot path parsing on Windows<commit_after>package cli\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strings\"\n\n\tcmds \"github.com\/ipfs\/go-ipfs\/commands\"\n\tfiles \"github.com\/ipfs\/go-ipfs\/commands\/files\"\n\tu \"gx\/ipfs\/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1\/go-ipfs-util\"\n)\n\n\/\/ Parse parses the input commandline string (cmd, flags, and args).\n\/\/ returns the corresponding command Request object.\nfunc Parse(input []string, stdin *os.File, root *cmds.Command) (cmds.Request, *cmds.Command, []string, error) {\n\tpath, opts, stringVals, cmd, err := parseOpts(input, root)\n\tif err != nil {\n\t\treturn nil, nil, path, err\n\t}\n\n\toptDefs, err := root.GetOptions(path)\n\tif err != nil {\n\t\treturn nil, cmd, path, err\n\t}\n\n\treq, err := cmds.NewRequest(path, opts, nil, nil, cmd, optDefs)\n\tif err != nil {\n\t\treturn nil, cmd, path, err\n\t}\n\n\t\/\/ if -r is provided, and it is associated with the package builtin\n\t\/\/ recursive path option, allow recursive file paths\n\trecursiveOpt := req.Option(cmds.RecShort)\n\trecursive := false\n\tif recursiveOpt != nil && recursiveOpt.Definition() == cmds.OptionRecursivePath {\n\t\trecursive, _, err = recursiveOpt.Bool()\n\t\tif err != nil {\n\t\t\treturn req, nil, nil, u.ErrCast()\n\t\t}\n\t}\n\n\t\/\/ if '--hidden' is provided, enumerate hidden paths\n\thiddenOpt := req.Option(\"hidden\")\n\thidden := false\n\tif hiddenOpt != nil {\n\t\thidden, _, err = hiddenOpt.Bool()\n\t\tif err != nil {\n\t\t\treturn req, nil, nil, u.ErrCast()\n\t\t}\n\t}\n\n\tstringArgs, fileArgs, err := parseArgs(stringVals, stdin, cmd.Arguments, recursive, hidden, root)\n\tif err != nil {\n\t\treturn req, cmd, path, err\n\t}\n\treq.SetArguments(stringArgs)\n\n\tif len(fileArgs) > 0 {\n\t\tfile := files.NewSliceFile(\"\", \"\", fileArgs)\n\t\treq.SetFiles(file)\n\t}\n\n\terr = cmd.CheckArguments(req)\n\tif err != nil {\n\t\treturn req, cmd, path, err\n\t}\n\n\treturn req, cmd, path, nil\n}\n\n\/\/ Parse a command line made up of sub-commands, short arguments, long arguments and positional arguments\nfunc parseOpts(args []string, root *cmds.Command) (\n\tpath []string,\n\topts map[string]interface{},\n\tstringVals []string,\n\tcmd *cmds.Command,\n\terr error,\n) {\n\tpath = make([]string, 0, len(args))\n\tstringVals = make([]string, 0, len(args))\n\toptDefs := map[string]cmds.Option{}\n\topts = map[string]interface{}{}\n\tcmd = root\n\n\t\/\/ parseFlag checks that a flag is valid and saves it into opts\n\t\/\/ Returns true if the optional second argument is used\n\tparseFlag := func(name string, arg *string, mustUse bool) (bool, error) {\n\t\tif _, ok := opts[name]; ok {\n\t\t\treturn false, fmt.Errorf(\"Duplicate values for option '%s'\", name)\n\t\t}\n\n\t\toptDef, found := optDefs[name]\n\t\tif !found {\n\t\t\terr = fmt.Errorf(\"Unrecognized option '%s'\", name)\n\t\t\treturn false, err\n\t\t}\n\t\t\/\/ mustUse implies that you must use the argument given after the '='\n\t\t\/\/ eg. -r=true means you must take true into consideration\n\t\t\/\/\t\tmustUse == true in the above case\n\t\t\/\/ eg. ipfs -r <file> means disregard <file> since there is no '='\n\t\t\/\/\t\tmustUse == false in the above situation\n\t\t\/\/arg == nil implies the flag was specified without an argument\n\t\tif optDef.Type() == cmds.Bool {\n\t\t\tif arg == nil || !mustUse {\n\t\t\t\topts[name] = true\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\targVal := strings.ToLower(*arg)\n\t\t\tswitch argVal {\n\t\t\tcase \"true\":\n\t\t\t\topts[name] = true\n\t\t\t\treturn true, nil\n\t\t\tcase \"false\":\n\t\t\t\topts[name] = false\n\t\t\t\treturn true, nil\n\t\t\tdefault:\n\t\t\t\treturn true, fmt.Errorf(\"Option '%s' takes true\/false arguments, but was passed '%s'\", name, argVal)\n\t\t\t}\n\t\t} else {\n\t\t\tif arg == nil {\n\t\t\t\treturn true, fmt.Errorf(\"Missing argument for option '%s'\", name)\n\t\t\t}\n\t\t\topts[name] = *arg\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\toptDefs, err = root.GetOptions(path)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tconsumed := false\n\tfor i, arg := range args {\n\t\tswitch {\n\t\tcase consumed:\n\t\t\t\/\/ arg was already consumed by the preceding flag\n\t\t\tconsumed = false\n\t\t\tcontinue\n\n\t\tcase arg == \"--\":\n\t\t\t\/\/ treat all remaining arguments as positional arguments\n\t\t\tstringVals = append(stringVals, args[i+1:]...)\n\t\t\treturn\n\n\t\tcase strings.HasPrefix(arg, \"--\"):\n\t\t\t\/\/ arg is a long flag, with an optional argument specified\n\t\t\t\/\/ using `=' or in args[i+1]\n\t\t\tvar slurped bool\n\t\t\tvar next *string\n\t\t\tsplit := strings.SplitN(arg, \"=\", 2)\n\t\t\tif len(split) == 2 {\n\t\t\t\tslurped = false\n\t\t\t\targ = split[0]\n\t\t\t\tnext = &split[1]\n\t\t\t} else {\n\t\t\t\tslurped = true\n\t\t\t\tif i+1 < len(args) {\n\t\t\t\t\tnext = &args[i+1]\n\t\t\t\t} else {\n\t\t\t\t\tnext = nil\n\t\t\t\t}\n\t\t\t}\n\t\t\tconsumed, err = parseFlag(arg[2:], next, len(split) == 2)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !slurped {\n\t\t\t\tconsumed = false\n\t\t\t}\n\n\t\tcase strings.HasPrefix(arg, \"-\") && arg != \"-\":\n\t\t\t\/\/ args is one or more flags in short form, followed by an optional argument\n\t\t\t\/\/ all flags except the last one have type bool\n\t\t\tfor arg = arg[1:]; len(arg) != 0; arg = arg[1:] {\n\t\t\t\tvar rest *string\n\t\t\t\tvar slurped bool\n\t\t\t\tmustUse := false\n\t\t\t\tif len(arg) > 1 {\n\t\t\t\t\tslurped = false\n\t\t\t\t\tstr := arg[1:]\n\t\t\t\t\tif len(str) > 0 && str[0] == '=' {\n\t\t\t\t\t\tstr = str[1:]\n\t\t\t\t\t\tmustUse = true\n\t\t\t\t\t}\n\t\t\t\t\trest = &str\n\t\t\t\t} else {\n\t\t\t\t\tslurped = true\n\t\t\t\t\tif i+1 < len(args) {\n\t\t\t\t\t\trest = &args[i+1]\n\t\t\t\t\t} else {\n\t\t\t\t\t\trest = nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tvar end bool\n\t\t\t\tend, err = parseFlag(arg[:1], rest, mustUse)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif end {\n\t\t\t\t\tconsumed = slurped\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tdefault:\n\t\t\t\/\/ arg is a sub-command or a positional argument\n\t\t\tsub := cmd.Subcommand(arg)\n\t\t\tif sub != nil {\n\t\t\t\tcmd = sub\n\t\t\t\tpath = append(path, arg)\n\t\t\t\toptDefs, err = root.GetOptions(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ If we've come across an external binary call, pass all the remaining\n\t\t\t\t\/\/ arguments on to it\n\t\t\t\tif cmd.External {\n\t\t\t\t\tstringVals = append(stringVals, args[i+1:]...)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tstringVals = append(stringVals, arg)\n\t\t\t\tif len(path) == 0 {\n\t\t\t\t\t\/\/ found a typo or early argument\n\t\t\t\t\terr = printSuggestions(stringVals, root)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc parseArgs(inputs []string, stdin *os.File, argDefs []cmds.Argument, recursive, hidden bool, root *cmds.Command) ([]string, []files.File, error) {\n\t\/\/ ignore stdin on Windows\n\tif runtime.GOOS == \"windows\" {\n\t\tstdin = nil\n\t}\n\n\t\/\/ check if stdin is coming from terminal or is being piped in\n\tif stdin != nil {\n\t\tif term, err := isTerminal(stdin); err != nil {\n\t\t\treturn nil, nil, err\n\t\t} else if term {\n\t\t\tstdin = nil \/\/ set to nil so we ignore it\n\t\t}\n\t}\n\n\t\/\/ count required argument definitions\n\tnumRequired := 0\n\tfor _, argDef := range argDefs {\n\t\tif argDef.Required {\n\t\t\tnumRequired++\n\t\t}\n\t}\n\n\t\/\/ count number of values provided by user.\n\t\/\/ if there is at least one ArgDef, we can safely trigger the inputs loop\n\t\/\/ below to parse stdin.\n\tnumInputs := len(inputs)\n\tif len(argDefs) > 0 && argDefs[len(argDefs)-1].SupportsStdin && stdin != nil {\n\t\tnumInputs += 1\n\t}\n\n\t\/\/ if we have more arg values provided than argument definitions,\n\t\/\/ and the last arg definition is not variadic (or there are no definitions), return an error\n\tnotVariadic := len(argDefs) == 0 || !argDefs[len(argDefs)-1].Variadic\n\tif notVariadic && len(inputs) > len(argDefs) {\n\t\terr := printSuggestions(inputs, root)\n\t\treturn nil, nil, err\n\t}\n\n\tstringArgs := make([]string, 0, numInputs)\n\n\tfileArgs := make(map[string]files.File)\n\targDefIndex := 0 \/\/ the index of the current argument definition\n\tfor i := 0; i < numInputs; i++ {\n\t\targDef := getArgDef(argDefIndex, argDefs)\n\n\t\t\/\/ skip optional argument definitions if there aren't sufficient remaining inputs\n\t\tfor numInputs-i <= numRequired && !argDef.Required {\n\t\t\targDefIndex++\n\t\t\targDef = getArgDef(argDefIndex, argDefs)\n\t\t}\n\t\tif argDef.Required {\n\t\t\tnumRequired--\n\t\t}\n\n\t\tvar err error\n\t\tif argDef.Type == cmds.ArgString {\n\t\t\tif stdin == nil {\n\t\t\t\t\/\/ add string values\n\t\t\t\tstringArgs, inputs = appendString(stringArgs, inputs)\n\t\t\t} else if !argDef.SupportsStdin {\n\t\t\t\tif len(inputs) == 0 {\n\t\t\t\t\t\/\/ failure case, we have stdin, but our current\n\t\t\t\t\t\/\/ argument doesnt want stdin\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tstringArgs, inputs = appendString(stringArgs, inputs)\n\t\t\t} else {\n\t\t\t\tif len(inputs) > 0 {\n\t\t\t\t\t\/\/ don't use stdin if we have inputs\n\t\t\t\t\tstdin = nil\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ if we have a stdin, read it in and use the data as a string value\n\t\t\t\t\tstringArgs, stdin, err = appendStdinAsString(stringArgs, stdin)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else if argDef.Type == cmds.ArgFile {\n\t\t\tif stdin == nil || !argDef.SupportsStdin {\n\t\t\t\t\/\/ treat stringArg values as file paths\n\t\t\t\tfpath := inputs[0]\n\t\t\t\tinputs = inputs[1:]\n\t\t\t\tfile, err := appendFile(fpath, argDef, recursive, hidden)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, nil, err\n\t\t\t\t}\n\n\t\t\t\tfileArgs[fpath] = file\n\t\t\t} else {\n\t\t\t\tif len(inputs) > 0 {\n\t\t\t\t\t\/\/ don't use stdin if we have inputs\n\t\t\t\t\tstdin = nil\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ if we have a stdin, create a file from it\n\t\t\t\t\tfileArgs[\"\"] = files.NewReaderFile(\"\", \"\", stdin, nil)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\targDefIndex++\n\t}\n\n\t\/\/ check to make sure we didn't miss any required arguments\n\tif len(argDefs) > argDefIndex {\n\t\tfor _, argDef := range argDefs[argDefIndex:] {\n\t\t\tif argDef.Required {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"Argument '%s' is required\", argDef.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn stringArgs, filesMapToSortedArr(fileArgs), nil\n}\n\nfunc filesMapToSortedArr(fs map[string]files.File) []files.File {\n\tvar names []string\n\tfor name, _ := range fs {\n\t\tnames = append(names, name)\n\t}\n\n\tsort.Strings(names)\n\n\tvar out []files.File\n\tfor _, f := range names {\n\t\tout = append(out, fs[f])\n\t}\n\n\treturn out\n}\n\nfunc getArgDef(i int, argDefs []cmds.Argument) *cmds.Argument {\n\tif i < len(argDefs) {\n\t\t\/\/ get the argument definition (usually just argDefs[i])\n\t\treturn &argDefs[i]\n\n\t} else if len(argDefs) > 0 {\n\t\t\/\/ but if i > len(argDefs) we use the last argument definition)\n\t\treturn &argDefs[len(argDefs)-1]\n\t}\n\n\t\/\/ only happens if there aren't any definitions\n\treturn nil\n}\n\nfunc appendString(args, inputs []string) ([]string, []string) {\n\treturn append(args, inputs[0]), inputs[1:]\n}\n\nfunc appendStdinAsString(args []string, stdin *os.File) ([]string, *os.File, error) {\n\tbuf := new(bytes.Buffer)\n\n\t_, err := buf.ReadFrom(stdin)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tinput := strings.TrimSpace(buf.String())\n\treturn append(args, strings.Split(input, \"\\n\")...), nil, nil\n}\n\nconst notRecursiveFmtStr = \"'%s' is a directory, use the '-%s' flag to specify directories\"\nconst dirNotSupportedFmtStr = \"Invalid path '%s', argument '%s' does not support directories\"\n\nfunc appendFile(fpath string, argDef *cmds.Argument, recursive, hidden bool) (files.File, error) {\n\tif fpath == \".\" {\n\t\tcwd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfpath = cwd\n\t}\n\n\tfpath = filepath.ToSlash(filepath.Clean(fpath))\n\n\tstat, err := os.Lstat(fpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif stat.IsDir() {\n\t\tif !argDef.Recursive {\n\t\t\treturn nil, fmt.Errorf(dirNotSupportedFmtStr, fpath, argDef.Name)\n\t\t}\n\t\tif !recursive {\n\t\t\treturn nil, fmt.Errorf(notRecursiveFmtStr, fpath, cmds.RecShort)\n\t\t}\n\t}\n\n\treturn files.NewSerialFile(path.Base(fpath), fpath, hidden, stat)\n}\n\n\/\/ isTerminal returns true if stdin is a Stdin pipe (e.g. `cat file | ipfs`),\n\/\/ and false otherwise (e.g. nothing is being piped in, so stdin is\n\/\/ coming from the terminal)\nfunc isTerminal(stdin *os.File) (bool, error) {\n\tstat, err := stdin.Stat()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t\/\/ if stdin is a CharDevice, return true\n\treturn ((stat.Mode() & os.ModeCharDevice) != 0), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cli\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\tcmds \"github.com\/jbenet\/go-ipfs\/commands\"\n)\n\n\/\/ Parse parses the input commandline string (cmd, flags, and args).\n\/\/ returns the corresponding command Request object.\nfunc Parse(input []string, roots ...*cmds.Command) (cmds.Request, *cmds.Command, error) {\n\tvar root, cmd *cmds.Command\n\tvar path, stringArgs []string\n\tvar opts map[string]interface{}\n\n\t\/\/ use the root that matches the longest path (most accurately matches request)\n\tmaxLength := 0\n\tfor _, r := range roots {\n\t\tp, i, c := parsePath(input, r)\n\t\to, s, err := parseOptions(i)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tlength := len(p)\n\t\tif length > maxLength {\n\t\t\tmaxLength = length\n\t\t\troot = r\n\t\t\tpath = p\n\t\t\tcmd = c\n\t\t\topts = o\n\t\t\tstringArgs = s\n\t\t}\n\t}\n\n\tif maxLength == 0 {\n\t\treturn nil, nil, errors.New(\"Not a valid subcommand\")\n\t}\n\n\targs, err := parseArgs(stringArgs, cmd)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq := cmds.NewRequest(path, opts, args, cmd)\n\n\terr = cmd.CheckArguments(req)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn req, root, nil\n}\n\n\/\/ parsePath separates the command path and the opts and args from a command string\n\/\/ returns command path slice, rest slice, and the corresponding *cmd.Command\nfunc parsePath(input []string, root *cmds.Command) ([]string, []string, *cmds.Command) {\n\tcmd := root\n\ti := 0\n\n\tfor _, blob := range input {\n\t\tif strings.HasPrefix(blob, \"-\") {\n\t\t\tbreak\n\t\t}\n\n\t\tsub := cmd.Subcommand(blob)\n\t\tif sub == nil {\n\t\t\tbreak\n\t\t}\n\t\tcmd = sub\n\n\t\ti++\n\t}\n\n\treturn input[:i], input[i:], cmd\n}\n\n\/\/ parseOptions parses the raw string values of the given options\n\/\/ returns the parsed options as strings, along with the CLI args\nfunc parseOptions(input []string) (map[string]interface{}, []string, error) {\n\topts := make(map[string]interface{})\n\targs := []string{}\n\n\tfor i := 0; i < len(input); i++ {\n\t\tblob := input[i]\n\n\t\tif strings.HasPrefix(blob, \"-\") {\n\t\t\tname := blob[1:]\n\t\t\tvalue := \"\"\n\n\t\t\t\/\/ support single and double dash\n\t\t\tif strings.HasPrefix(name, \"-\") {\n\t\t\t\tname = name[1:]\n\t\t\t}\n\n\t\t\tif strings.Contains(name, \"=\") {\n\t\t\t\tsplit := strings.SplitN(name, \"=\", 2)\n\t\t\t\tname = split[0]\n\t\t\t\tvalue = split[1]\n\t\t\t}\n\n\t\t\tif _, ok := opts[name]; ok {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"Duplicate values for option '%s'\", name)\n\t\t\t}\n\n\t\t\topts[name] = value\n\n\t\t} else {\n\t\t\targs = append(args, blob)\n\t\t}\n\t}\n\n\treturn opts, args, nil\n}\n\nfunc parseArgs(stringArgs []string, cmd *cmds.Command) ([]interface{}, error) {\n\targs := make([]interface{}, 0)\n\n\t\/\/ count required argument definitions\n\tlenRequired := 0\n\tfor _, argDef := range cmd.Arguments {\n\t\tif argDef.Required {\n\t\t\tlenRequired++\n\t\t}\n\t}\n\n\tj := 0\n\tfor _, argDef := range cmd.Arguments {\n\t\t\/\/ skip optional argument definitions if there aren't sufficient remaining values\n\t\tif len(stringArgs)-j <= lenRequired && !argDef.Required {\n\t\t\tcontinue\n\t\t}\n\n\t\tif j >= len(stringArgs) {\n\t\t\tbreak\n\t\t}\n\n\t\tif argDef.Variadic {\n\t\t\tfor _, arg := range stringArgs[j:] {\n\t\t\t\tvar err error\n\t\t\t\targs, err = appendArg(args, argDef, arg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tvar err error\n\t\t\targs, err = appendArg(args, argDef, stringArgs[j])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tj++\n\t}\n\n\tif len(stringArgs)-j > 0 {\n\t\targs = append(args, make([]interface{}, len(stringArgs)-j))\n\t}\n\n\treturn args, nil\n}\n\nfunc appendArg(args []interface{}, argDef cmds.Argument, value string) ([]interface{}, error) {\n\tif argDef.Type == cmds.ArgString {\n\t\treturn append(args, value), nil\n\n\t} else {\n\t\tin, err := os.Open(value)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn append(args, in), nil\n\t}\n}\n<commit_msg>commands\/cli: Made Parse return the resolved subcommand, even on error<commit_after>package cli\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\tcmds \"github.com\/jbenet\/go-ipfs\/commands\"\n)\n\n\/\/ Parse parses the input commandline string (cmd, flags, and args).\n\/\/ returns the corresponding command Request object.\nfunc Parse(input []string, roots ...*cmds.Command) (cmds.Request, *cmds.Command, *cmds.Command, error) {\n\tvar root, cmd *cmds.Command\n\tvar path, stringArgs []string\n\tvar opts map[string]interface{}\n\n\t\/\/ use the root that matches the longest path (most accurately matches request)\n\tmaxLength := 0\n\tfor _, r := range roots {\n\t\tp, i, c := parsePath(input, r)\n\t\to, s, err := parseOptions(i)\n\t\tif err != nil {\n\t\t\treturn nil, root, c, err\n\t\t}\n\n\t\tlength := len(p)\n\t\tif length > maxLength {\n\t\t\tmaxLength = length\n\t\t\troot = r\n\t\t\tpath = p\n\t\t\tcmd = c\n\t\t\topts = o\n\t\t\tstringArgs = s\n\t\t}\n\t}\n\n\tif maxLength == 0 {\n\t\treturn nil, root, nil, errors.New(\"Not a valid subcommand\")\n\t}\n\n\targs, err := parseArgs(stringArgs, cmd)\n\tif err != nil {\n\t\treturn nil, root, cmd, err\n\t}\n\n\treq := cmds.NewRequest(path, opts, args, cmd)\n\n\terr = cmd.CheckArguments(req)\n\tif err != nil {\n\t\treturn nil, root, cmd, err\n\t}\n\n\treturn req, root, cmd, nil\n}\n\n\/\/ parsePath separates the command path and the opts and args from a command string\n\/\/ returns command path slice, rest slice, and the corresponding *cmd.Command\nfunc parsePath(input []string, root *cmds.Command) ([]string, []string, *cmds.Command) {\n\tcmd := root\n\ti := 0\n\n\tfor _, blob := range input {\n\t\tif strings.HasPrefix(blob, \"-\") {\n\t\t\tbreak\n\t\t}\n\n\t\tsub := cmd.Subcommand(blob)\n\t\tif sub == nil {\n\t\t\tbreak\n\t\t}\n\t\tcmd = sub\n\n\t\ti++\n\t}\n\n\treturn input[:i], input[i:], cmd\n}\n\n\/\/ parseOptions parses the raw string values of the given options\n\/\/ returns the parsed options as strings, along with the CLI args\nfunc parseOptions(input []string) (map[string]interface{}, []string, error) {\n\topts := make(map[string]interface{})\n\targs := []string{}\n\n\tfor i := 0; i < len(input); i++ {\n\t\tblob := input[i]\n\n\t\tif strings.HasPrefix(blob, \"-\") {\n\t\t\tname := blob[1:]\n\t\t\tvalue := \"\"\n\n\t\t\t\/\/ support single and double dash\n\t\t\tif strings.HasPrefix(name, \"-\") {\n\t\t\t\tname = name[1:]\n\t\t\t}\n\n\t\t\tif strings.Contains(name, \"=\") {\n\t\t\t\tsplit := strings.SplitN(name, \"=\", 2)\n\t\t\t\tname = split[0]\n\t\t\t\tvalue = split[1]\n\t\t\t}\n\n\t\t\tif _, ok := opts[name]; ok {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"Duplicate values for option '%s'\", name)\n\t\t\t}\n\n\t\t\topts[name] = value\n\n\t\t} else {\n\t\t\targs = append(args, blob)\n\t\t}\n\t}\n\n\treturn opts, args, nil\n}\n\nfunc parseArgs(stringArgs []string, cmd *cmds.Command) ([]interface{}, error) {\n\targs := make([]interface{}, 0)\n\n\t\/\/ count required argument definitions\n\tlenRequired := 0\n\tfor _, argDef := range cmd.Arguments {\n\t\tif argDef.Required {\n\t\t\tlenRequired++\n\t\t}\n\t}\n\n\tj := 0\n\tfor _, argDef := range cmd.Arguments {\n\t\t\/\/ skip optional argument definitions if there aren't sufficient remaining values\n\t\tif len(stringArgs)-j <= lenRequired && !argDef.Required {\n\t\t\tcontinue\n\t\t}\n\n\t\tif j >= len(stringArgs) {\n\t\t\tbreak\n\t\t}\n\n\t\tif argDef.Variadic {\n\t\t\tfor _, arg := range stringArgs[j:] {\n\t\t\t\tvar err error\n\t\t\t\targs, err = appendArg(args, argDef, arg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tvar err error\n\t\t\targs, err = appendArg(args, argDef, stringArgs[j])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tj++\n\t}\n\n\tif len(stringArgs)-j > 0 {\n\t\targs = append(args, make([]interface{}, len(stringArgs)-j))\n\t}\n\n\treturn args, nil\n}\n\nfunc appendArg(args []interface{}, argDef cmds.Argument, value string) ([]interface{}, error) {\n\tif argDef.Type == cmds.ArgString {\n\t\treturn append(args, value), nil\n\n\t} else {\n\t\tin, err := os.Open(value)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn append(args, in), nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\npackage core\n\nimport (\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"runtime\"\n\t\"math\"\n\t\"bytes\"\n\t\"time\"\n\t\"github.com\/jackyb\/go-sdl2\/sdl\"\n\t\"github.com\/op\/go-nanomsg\"\n\t\"github.com\/fire\/go-ogre3d\"\n\t\"github.com\/jmckaskill\/go-capnproto\")\n\ntype InputState struct {\n\tyawSens float32\n\tpitchSens float32\n\torientationFactor float32 \/\/ +1\/-1 easy switch between look around and manipulate something\n\tyaw float32 \/\/ degrees, modulo [-180,180] range\n\tpitch float32 \/\/ degrees, clamped [-90,90] range\n\troll float32\n\torientation ogre.Quaternion \/\/ current orientation\n}\n\nfunc InitCore() {\n\tvar gameThreadParams GameThreadParams\n\tgameThreadParams.start = time.Now() \/\/ There's an small time before this variable is initalized,\n\t\/\/ it probably doesn't matter... Someone timed Go initalization at 1.94us on Linux.\n\t\n\tsdl.Init(sdl.INIT_EVERYTHING)\n\twindow := sdl.CreateWindow(\"es_core::SDL\",\n\t\tsdl.WINDOWPOS_UNDEFINED,\n\t\tsdl.WINDOWPOS_UNDEFINED,\n\t\t800,\n\t\t600,\n\t\tsdl.WINDOW_SHOWN)\n\tif window == nil {\n\t\tpanic(fmt.Sprintf(\"sdl.CreateWindow failed: %s\\n\", sdl.GetError()))\n\t}\n\tdefer sdl.Quit()\n\tvar info sdl.SysWMInfo \n\tif !window.GetWMInfo(&info) {\n\t\tpanic(fmt.Sprintf(\"window.GetWMInfo failed.\\n\"))\n\t}\n\tvar version sdl.Version\n\tsdl.GetVersion(&version)\n\t\n\tfmt.Printf(\"Sdl Major Version: %d\\n\", version.Major)\n\tfmt.Printf(\"Sdl Minor Version: %d\\n\", version.Minor)\n\tfmt.Printf(\"Sdl Patch level: %d\\n\", version.Patch)\n\tfmt.Printf(\"Sdl Subsystem: %s\\n\", getSubsystemString(info)) \n\troot := ogre.NewRoot(\"\", \"\", \"ogre.log\")\n\tdefer root.Destroy()\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif runtime.GOOS == \"windows\" {\n\t\troot.LoadPlugin(wd + \"\/RenderSystem_GL3Plus\")\n\t\t}\n\tif runtime.GOOS == \"darwin\" {\n\t\troot.LoadPlugin(wd + \"\/..\/frameworks\/RenderSystem_GL3Plus\")\n\t}\n\t\t\t\t\n\trenderers := root.GetAvailableRenderers()\n\tif renderers.RenderSystemListSize() != 1 {\n\t\t\n\t\tpanic(fmt.Sprintf(\"Failed to initalize RendererRenderSystem_GL\"))\n\t}\n\troot.SetRenderSystem(renderers.RenderSystemListGet(0))\n\troot.Initialise(false, \"es_core::ogre\")\n\tparams := ogre.CreateNameValuePairList()\n\tif runtime.GOOS == \"windows\" {\n\t\twindowsInfo := info.GetWindowsInfo()\n\t\twindowString := strconv.FormatUint(uint64(*(*uint32)(windowsInfo.Window)), 10)\n\t\tparams.AddPair(\"parentWindowHandle\", windowString)\n\t}\n\tif runtime.GOOS == \"darwin\" {\n\t\tparams.AddPair(\"macAPI\", \"cocoa\")\n\t\tcocoaInfo := info.GetCocoaInfo()\n\t\twindowString := strconv.FormatUint(uint64(*(*uint32)(cocoaInfo.Window)), 10)\n\t\tparams.AddPair(\"parentWindowHandle\", windowString)\n\t}\n\t\n\trenderWindow := root.CreateRenderWindow(\"es_core::ogre\", 800, 600, false, params)\n\trenderWindow.SetVisible(true)\n\t\n\tnnGameSocket, err := nanomsg.NewSocket(nanomsg.AF_SP, nanomsg.BUS)\n if err != nil {\n panic(err)\n }\n _, err = nnGameSocket.Bind(\"tcp:\/\/127.0.0.1:60206\")\n if err != nil {\n panic(err)\n }\n\t\n\tnnRenderSocket, err := nanomsg.NewSocket(nanomsg.AF_SP, nanomsg.BUS)\n\tif err != nil {\n panic(err)\n }\n _, err = nnRenderSocket.Bind(\"tcp:\/\/127.0.0.1:60207\")\n if err != nil {\n panic(err)\n }\n\n\tnnInputPub, err := nanomsg.NewSocket(nanomsg.AF_SP, nanomsg.PUB)\n if err != nil {\n panic(err)\n }\n _, err = nnInputPub.Bind(\"tcp:\/\/127.0.0.1:60208\")\n if err != nil {\n panic(err)\n }\n\n\tnnInputPull, err := nanomsg.NewSocket(nanomsg.AF_SP, nanomsg.PULL)\n if err != nil {\n panic(err)\n }\n _, err = nnInputPull.Bind(\"tcp:\/\/127.0.0.1:60209\")\n if err != nil {\n panic(err)\n }\n\tgo gameThread(gameThreadParams)\n\tvar renderThreadParams RenderThreadParams\n\trenderThreadParams.start = gameThreadParams.start\n\trenderThreadParams.root = root\n\trenderThreadParams.window = window\n\trenderThreadParams.ogreWindow = renderWindow\n\t\n\tgo renderThread(renderThreadParams)\n\n\twindow.SetGrab(true)\n\tsdl.SetRelativeMouseMode(true)\n\n\tshutdownRequested := false\n\tvar is InputState\n\tis.yawSens = 0.1\n\tis.yaw = 0.0\n\tis.pitchSens = 0.1\n\tis.pitch = 0.0\n\tis.roll = 0.0\n\tis.orientationFactor = -1.0 \/\/ Look around config\n\n\n\tfor !shutdownRequested \/* && SDL_GetTicks() < MAX_RUN_TIME *\/ {\n\t\t\/\/ We wait here.\n\t\tb, err := nnInputPull.Recv(0)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"%s\\n\", err)\n\t\t}\t\n\t\ts, _, err := capn.ReadFromMemoryZeroCopy(b)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Read error %v\\n\", err)\n\t\t\treturn\n\t\t}\t\n\t\tstate := ReadRootState(s)\n\t\tfmt.Printf(\"Game push received:\\n\")\n\t\t\/\/ poll for events before processing the request\n\t\t\/\/ NOTE: this is how SDL builds the internal mouse and keyboard state\n\t\t\/\/ TODO: done this way does not meet the objectives of smooth, frame independent mouse view control,\n\t\t\/\/ Plus it throws some latency into the calling thread\n\n\t\tvar event sdl.Event\n\t\tfor event = sdl.PollEvent(); event != nil; event = sdl.PollEvent {\n\t\t\tswitch t := event.(type) {\n\t\t\tcase *sdl.KeyDownEvent:\n\t\t\t\tfmt.Printf(\"SDL keyboard event:\\n\")\n\t\t\tcase *sdl.KeyUpEvent:\n\t\t\t\tfmt.Printf(\"SDL keyboard event:\\n\")\n\t\t\t\tif t.Keysym.Scancode == sdl.SCANCODE_ESCAPE {\n\t\t\t\t\t\/\/ Todo\n\t\t\t\t\tsendShutdown(nnRenderSocket, nnGameSocket)\n\t\t\t\t\tshutdownRequested = true\n\t\t\t\t}\n\t\t\tcase *sdl.MouseMotionEvent:\n\t\t\t\t\/\/ + when manipulating an object, - when doing a first person view .. needs to be configurable?\n\t\t\t\tis.yaw += is.orientationFactor * is.yawSens * float32(t.XRel)\n\t\t\t\tif is.yaw >= 0.0 {\n\t\t\t\t\tis.yaw = float32(math.Mod(float64(is.yaw) + 180.0, 360.0) - 180.0)\n\t\t\t\t} else {\n\t\t\t\t\tis.yaw = float32(math.Mod(float64(is.yaw) - 180.0, 360.0) + 180.0)\n\t\t\t\t}\n\t\t\t\t\/\/ + when manipulating an object, - when doing a first person view .. needs to be configurable?\n\t\t\t\tis.pitch += is.orientationFactor * is.pitchSens * float32(t.YRel)\n\t\t\t\tif is.pitch > 90.0 {\n\t\t\t\t\tis.pitch = 90.0\n\t\t\t\t} else if ( is.pitch < -90.0 ) {\n\t\t\t\t\tis.pitch = -90.0\n\t\t\t\t}\n\t\t\t\t\/\/ build a quaternion of the current orientation\n\t\t\t\tvar r ogre.Matrix3\n\t\t\t\tr.FromEulerAnglesYXZ( deg2Rad(is.yaw), deg2Rad(is.pitch), deg2Rad(is.roll)) \n\t\t\t\tis.orientation.FromRotationMatrix(r)\n\t\t\tcase *sdl.MouseButtonEvent:\n\t\t\t\tfmt.Printf(\"SDL mouse button event:\\n\")\n\t\t\tcase *sdl.QuitEvent:\n\t\t\t \/\/ push a shutdown on the control socket, game and render will pick it up later\n\t\t\t\t\/\/ NOTE: if the message patterns change we may still have to deal with hangs here\n\t\t\t\tsendShutdown(nnRenderSocket, nnGameSocket)\t\t\t\t\n\t\t\t\t\n\t\t\t\tshutdownRequested = true\n\t\t\tdefault:\n\t\t\t\tfmt.Printf(\"SDL_Event %T\\n\", event);\n\t\t\t}\n\t\t}\n\t\tswitch {\n\t\t\/\/ we are ready to process the request now\n\t\tcase state.Mouse():\n\t\t\tbuttons := sdl.GetMouseState(nil, nil)\n\t\t\tfmt.Printf(\"buttons: %d\\n\", buttons)\n\t\t\ts := capn.NewBuffer(nil)\n\t\t\tms := NewRootInputMouse(s)\n\t\t\tms.SetW(is.orientation.W())\n\t\t\tms.SetX(is.orientation.X())\n\t\t\tms.SetY(is.orientation.Y())\n\t\t\tms.SetZ(is.orientation.Z())\n\t\t\tms.SetButtons(buttons)\n\t\t\tbuf := bytes.Buffer{}\n\t\t\ts.WriteTo(&buf)\n\t\t\tnnInputPub.Send(append([]byte(\"input.mouse:\"), buf.Bytes()...), 0)\n\t\t\tfmt.Printf(\"Mouse input sent.\\n\")\n\t\t\t\n\t\tcase state.Kb():\n\t\t\/\/ looking at a few hardcoded keys for now\n\t\t\/\/ NOTE: I suspect it would be perfectly safe to grab that pointer once, and read it from a different thread?\n\t\t\tstate := sdl.GetKeyboardState(nil)\n\t\t\tt := capn.NewBuffer(nil)\n\t\t\tkbs := NewRootInputKb(t)\t\t\t\n\t\t\tkbs.SetW(state[sdl.SCANCODE_W] != 0)\n\t\t\tkbs.SetA(state[sdl.SCANCODE_A] != 0)\n\t\t\tkbs.SetS(state[sdl.SCANCODE_S] != 0)\n\t\t\tkbs.SetD(state[sdl.SCANCODE_D] != 0)\n\t\t\tkbs.SetSpace(state[sdl.SCANCODE_SPACE] != 0)\n\t\t\tkbs.SetLalt(state[sdl.SCANCODE_LALT] != 0)\n\t\t\tb := bytes.Buffer{}\n\t\t\tt.WriteTo(&b)\n\t\t\tnnInputPub.Send(append([]byte(\"input.kb:\"), b.Bytes()...), 0)\n\t\t\tfmt.Printf(\"Keyboard input sent.\\n\")\n\t\t\t\t\n\t\tcase state.MouseReset():\n\t\t\tvar q ogre.Quaternion;\n\t\t\tis.orientation = q.FromValues(state.Quaternion().W(), state.Quaternion().X(),\n\t\t\t\tstate.Quaternion().Y(), state.Quaternion().Z())\n\t\t\tvar r ogre.Matrix3\n\t\t\tis.orientation.ToRotationMatrix(&r)\n\t\t\tvar rfYAngle, rfPAngle, rfRAngle float32\n\t\t\tr.ToEulerAnglesYXZ(&rfYAngle, &rfPAngle, &rfRAngle)\n\t\t\tis.yaw = rad2Deg(rfYAngle)\n\t\t\tis.pitch = rad2Deg(rfPAngle)\n\t\t\tis.roll = rad2Deg(rfRAngle)\n\t\tcase state.ConfigLookAround():\n\t\t\tif state.LookAround().ManipulateObject() {\n\t\t\t\tfmt.Printf(\"Input configuration: manipulate object\\n\");\n\t\t\t\tis.orientationFactor = 1.0;\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Input configuration: look around\\n\");\n\t\t\t\tis.orientationFactor = -1.0\n\t\t\t}\n\t\t}\n\t}\n\tif !shutdownRequested {\n sendShutdown(nnRenderSocket, nnGameSocket)\n shutdownRequested = true\n }\n waitShutdown(nnInputPull, &gameThreadParams)\n}\n\nfunc deg2Rad(deg float32) float32 {\n\treturn deg * math.Pi \/ 180\n}\n\nfunc rad2Deg (rad float32) float32 {\n\treturn rad * 180 \/ math.Pi\n}\n\nfunc sendShutdown(nnRenderSocket *nanomsg.Socket, nnGameSocket *nanomsg.Socket) {\n\ts := capn.NewBuffer(nil)\n\tstop := NewRootStop(s)\n\tstop.SetStop(true)\n\tbuf := bytes.Buffer{}\n\ts.WriteTo(&buf)\n\tfmt.Printf(\"Render socket shutdown.\\n\")\n\tnnRenderSocket.Send(buf.Bytes(), 0)\n\tfmt.Printf(\"Game socket shutdown.\\n\")\n\tnnGameSocket.Send(buf.Bytes(), 0)\n}\n\nfunc waitShutdown(nnInputPull *nanomsg.Socket, params *GameThreadParams) {\n\t\/\/ For now, loop the input thread for a bit to flush out any events\n\tcontinueTime := time.Since(params.start) + 500 * time.Millisecond \/\/ An eternity.\n\tfor time.Since(params.start) < continueTime {\t\n\t\tmsg, _ := nnInputPull.Recv(nanomsg.DontWait)\n\t\tif msg == nil {\n\t\t\tsdl.Delay(10)\n\t\t}\n\t}\n}\n\nfunc getSubsystemString(info sdl.SysWMInfo) string {\n\tswitch info.Subsystem {\n\tcase 0:\t\n\t return \"Unknown\"\n\tcase 1:\n\t\treturn \"Windows\"\n\tcase 2:\n\t\treturn \"X11\"\n\tcase 3:\n\t\treturn \"DirectFB\"\n\tcase 4: \n\t\treturn \"Cocoa\"\n\tcase 5:\n\t\treturn \"UiKit\"\n\t}\n\treturn \"Unknown\"\n}\n<commit_msg>Cleanup.<commit_after>\npackage core\n\nimport (\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"runtime\"\n\t\"math\"\n\t\"bytes\"\n\t\"time\"\n\t\"github.com\/jackyb\/go-sdl2\/sdl\"\n\t\"github.com\/op\/go-nanomsg\"\n\t\"github.com\/fire\/go-ogre3d\"\n\t\"github.com\/jmckaskill\/go-capnproto\")\n\ntype InputState struct {\n\tyawSens float32\n\tpitchSens float32\n\torientationFactor float32 \/\/ +1\/-1 easy switch between look around and manipulate something\n\tyaw float32 \/\/ degrees, modulo [-180,180] range\n\tpitch float32 \/\/ degrees, clamped [-90,90] range\n\troll float32\n\torientation ogre.Quaternion \/\/ current orientation\n}\n\nfunc InitCore() {\n\tvar gameThreadParams GameThreadParams\n\tgameThreadParams.start = time.Now() \/\/ There's an small time before this variable is initalized,\n\t\/\/ it probably doesn't matter... Someone timed Go initalization at 1.94us on Linux.\n\t\n\tsdl.Init(sdl.INIT_EVERYTHING)\n\tvar event sdl.Event\n\twindow := sdl.CreateWindow(\"es_core::SDL\",\n\t\tsdl.WINDOWPOS_UNDEFINED,\n\t\tsdl.WINDOWPOS_UNDEFINED,\n\t\t800,\n\t\t600,\n\t\tsdl.WINDOW_SHOWN)\n\tif window == nil {\n\t\tpanic(fmt.Sprintf(\"sdl.CreateWindow failed: %s\\n\", sdl.GetError()))\n\t}\n\tdefer sdl.Quit()\n\tvar info sdl.SysWMInfo \n\tif !window.GetWMInfo(&info) {\n\t\tpanic(fmt.Sprintf(\"window.GetWMInfo failed.\\n\"))\n\t}\n\tvar version sdl.Version\n\tsdl.GetVersion(&version)\n\t\n\tfmt.Printf(\"Sdl Major Version: %d\\n\", version.Major)\n\tfmt.Printf(\"Sdl Minor Version: %d\\n\", version.Minor)\n\tfmt.Printf(\"Sdl Patch level: %d\\n\", version.Patch)\n\tfmt.Printf(\"Sdl Subsystem: %s\\n\", getSubsystemString(info)) \n\troot := ogre.NewRoot(\"\", \"\", \"ogre.log\")\n\tdefer root.Destroy()\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif runtime.GOOS == \"windows\" {\n\t\troot.LoadPlugin(wd + \"\/RenderSystem_GL3Plus\")\n\t\t}\n\tif runtime.GOOS == \"darwin\" {\n\t\troot.LoadPlugin(wd + \"\/..\/frameworks\/RenderSystem_GL3Plus\")\n\t}\n\t\t\t\t\n\trenderers := root.GetAvailableRenderers()\n\tif renderers.RenderSystemListSize() != 1 {\n\t\t\n\t\tpanic(fmt.Sprintf(\"Failed to initalize RendererRenderSystem_GL\"))\n\t}\n\troot.SetRenderSystem(renderers.RenderSystemListGet(0))\n\troot.Initialise(false, \"es_core::ogre\")\n\tparams := ogre.CreateNameValuePairList()\n\tif runtime.GOOS == \"windows\" {\n\t\twindowsInfo := info.GetWindowsInfo()\n\t\twindowString := strconv.FormatUint(uint64(*(*uint32)(windowsInfo.Window)), 10)\n\t\tparams.AddPair(\"parentWindowHandle\", windowString)\n\t}\n\tif runtime.GOOS == \"darwin\" {\n\t\tparams.AddPair(\"macAPI\", \"cocoa\")\n\t\tcocoaInfo := info.GetCocoaInfo()\n\t\twindowString := strconv.FormatUint(uint64(*(*uint32)(cocoaInfo.Window)), 10)\n\t\tparams.AddPair(\"parentWindowHandle\", windowString)\n\t}\n\t\n\trenderWindow := root.CreateRenderWindow(\"es_core::ogre\", 800, 600, false, params)\n\trenderWindow.SetVisible(true)\n\t\n\tnnGameSocket, err := nanomsg.NewSocket(nanomsg.AF_SP, nanomsg.BUS)\n if err != nil {\n panic(err)\n }\n _, err = nnGameSocket.Bind(\"tcp:\/\/127.0.0.1:60206\")\n if err != nil {\n panic(err)\n }\n\t\n\tnnRenderSocket, err := nanomsg.NewSocket(nanomsg.AF_SP, nanomsg.BUS)\n\tif err != nil {\n panic(err)\n }\n _, err = nnRenderSocket.Bind(\"tcp:\/\/127.0.0.1:60207\")\n if err != nil {\n panic(err)\n }\n\n\tnnInputPub, err := nanomsg.NewSocket(nanomsg.AF_SP, nanomsg.PUB)\n if err != nil {\n panic(err)\n }\n _, err = nnInputPub.Bind(\"tcp:\/\/127.0.0.1:60208\")\n if err != nil {\n panic(err)\n }\n\n\tnnInputPull, err := nanomsg.NewSocket(nanomsg.AF_SP, nanomsg.PULL)\n if err != nil {\n panic(err)\n }\n _, err = nnInputPull.Bind(\"tcp:\/\/127.0.0.1:60209\")\n if err != nil {\n panic(err)\n }\n\tgo gameThread(gameThreadParams)\n\tvar renderThreadParams RenderThreadParams\n\trenderThreadParams.start = gameThreadParams.start\n\trenderThreadParams.root = root\n\trenderThreadParams.window = window\n\trenderThreadParams.ogreWindow = renderWindow\n\t\n\tgo renderThread(renderThreadParams)\n\n\twindow.SetGrab(true)\n\tsdl.SetRelativeMouseMode(true)\n\n\tshutdownRequested := false\n\tvar is InputState\n\tis.yawSens = 0.1\n\tis.yaw = 0.0\n\tis.pitchSens = 0.1\n\tis.pitch = 0.0\n\tis.roll = 0.0\n\tis.orientationFactor = -1.0 \/\/ Look around config\n\n\n\tfor !shutdownRequested \/* && SDL_GetTicks() < MAX_RUN_TIME *\/ {\n\t\t\/\/ We wait here.\n\t\tb, err := nnInputPull.Recv(0)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"%s\\n\", err)\n\t\t}\t\n\t\ts, _, err := capn.ReadFromMemoryZeroCopy(b)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Read error %v\\n\", err)\n\t\t\treturn\n\t\t}\t\n\t\tstate := ReadRootState(s)\n\t\tfmt.Printf(\"Game push received:\\n\")\n\t\t\/\/ poll for events before processing the request\n\t\t\/\/ NOTE: this is how SDL builds the internal mouse and keyboard state\n\t\t\/\/ TODO: done this way does not meet the objectives of smooth, frame independent mouse view control,\n\t\t\/\/ Plus it throws some latency into the calling thread\n\n\t\tfor event = sdl.PollEvent(); event != nil; event = sdl.PollEvent {\n\t\t\tswitch t := event.(type) {\n\t\t\tcase *sdl.KeyDownEvent:\n\t\t\t\tfmt.Printf(\"SDL keyboard event:\\n\")\n\t\t\tcase *sdl.KeyUpEvent:\n\t\t\t\tfmt.Printf(\"SDL keyboard event:\\n\")\n\t\t\t\tif t.Keysym.Scancode == sdl.SCANCODE_ESCAPE {\n\t\t\t\t\tsendShutdown(nnRenderSocket, nnGameSocket)\n\t\t\t\t\tshutdownRequested = true\n\t\t\t\t}\n\t\t\tcase *sdl.MouseMotionEvent:\n\t\t\t\t\/\/ + when manipulating an object, - when doing a first person view .. needs to be configurable?\n\t\t\t\tis.yaw += is.orientationFactor * is.yawSens * float32(t.XRel)\n\t\t\t\tif is.yaw >= 0.0 {\n\t\t\t\t\tis.yaw = float32(math.Mod(float64(is.yaw) + 180.0, 360.0) - 180.0)\n\t\t\t\t} else {\n\t\t\t\t\tis.yaw = float32(math.Mod(float64(is.yaw) - 180.0, 360.0) + 180.0)\n\t\t\t\t}\n\t\t\t\t\/\/ + when manipulating an object, - when doing a first person view .. needs to be configurable?\n\t\t\t\tis.pitch += is.orientationFactor * is.pitchSens * float32(t.YRel)\n\t\t\t\tif is.pitch > 90.0 {\n\t\t\t\t\tis.pitch = 90.0\n\t\t\t\t} else if ( is.pitch < -90.0 ) {\n\t\t\t\t\tis.pitch = -90.0\n\t\t\t\t}\n\t\t\t\t\/\/ build a quaternion of the current orientation\n\t\t\t\tvar r ogre.Matrix3\n\t\t\t\tr.FromEulerAnglesYXZ( deg2Rad(is.yaw), deg2Rad(is.pitch), deg2Rad(is.roll)) \n\t\t\t\tis.orientation.FromRotationMatrix(r)\n\t\t\tcase *sdl.MouseButtonEvent:\n\t\t\t\tfmt.Printf(\"SDL mouse button event:\\n\")\n\t\t\tcase *sdl.QuitEvent:\n\t\t\t \/\/ push a shutdown on the control socket, game and render will pick it up later\n\t\t\t\t\/\/ NOTE: if the message patterns change we may still have to deal with hangs here\n\t\t\t\tsendShutdown(nnRenderSocket, nnGameSocket)\t\t\t\t\n\t\t\t\tshutdownRequested = true\n\t\t\tdefault:\n\t\t\t\tfmt.Printf(\"SDL_Event: %T\\n\", t);\n\t\t\t\t\n\t\t\t}\n\t\t}\n\t\tswitch {\n\t\t\/\/ we are ready to process the request now\n\t\tcase state.Mouse():\n\t\t\tbuttons := sdl.GetMouseState(nil, nil)\n\t\t\tfmt.Printf(\"buttons: %d\\n\", buttons)\n\t\t\ts := capn.NewBuffer(nil)\n\t\t\tms := NewRootInputMouse(s)\n\t\t\tms.SetW(is.orientation.W())\n\t\t\tms.SetX(is.orientation.X())\n\t\t\tms.SetY(is.orientation.Y())\n\t\t\tms.SetZ(is.orientation.Z())\n\t\t\tms.SetButtons(buttons)\n\t\t\tbuf := bytes.Buffer{}\n\t\t\ts.WriteTo(&buf)\n\t\t\tnnInputPub.Send(append([]byte(\"input.mouse:\"), buf.Bytes()...), 0)\n\t\t\tfmt.Printf(\"Mouse input sent.\\n\")\n\t\t\t\n\t\tcase state.Kb():\n\t\t\/\/ looking at a few hardcoded keys for now\n\t\t\/\/ NOTE: I suspect it would be perfectly safe to grab that pointer once, and read it from a different thread?\n\t\t\tstate := sdl.GetKeyboardState()\n\t\t\tt := capn.NewBuffer(nil)\n\t\t\tkbs := NewRootInputKb(t)\t\t\t\n\t\t\tkbs.SetW(state[sdl.SCANCODE_W] != 0)\n\t\t\tkbs.SetA(state[sdl.SCANCODE_A] != 0)\n\t\t\tkbs.SetS(state[sdl.SCANCODE_S] != 0)\n\t\t\tkbs.SetD(state[sdl.SCANCODE_D] != 0)\n\t\t\tkbs.SetSpace(state[sdl.SCANCODE_SPACE] != 0)\n\t\t\tkbs.SetLalt(state[sdl.SCANCODE_LALT] != 0)\n\t\t\tb := bytes.Buffer{}\n\t\t\tt.WriteTo(&b)\n\t\t\tnnInputPub.Send(append([]byte(\"input.kb:\"), b.Bytes()...), 0)\n\t\t\tfmt.Printf(\"Keyboard input sent.\\n\")\n\t\t\t\t\n\t\tcase state.MouseReset():\n\t\t\tvar q ogre.Quaternion;\n\t\t\tis.orientation = q.FromValues(state.Quaternion().W(), state.Quaternion().X(),\n\t\t\t\tstate.Quaternion().Y(), state.Quaternion().Z())\n\t\t\tvar r ogre.Matrix3\n\t\t\tis.orientation.ToRotationMatrix(&r)\n\t\t\tvar rfYAngle, rfPAngle, rfRAngle float32\n\t\t\tr.ToEulerAnglesYXZ(&rfYAngle, &rfPAngle, &rfRAngle)\n\t\t\tis.yaw = rad2Deg(rfYAngle)\n\t\t\tis.pitch = rad2Deg(rfPAngle)\n\t\t\tis.roll = rad2Deg(rfRAngle)\n\t\tcase state.ConfigLookAround():\n\t\t\tif state.LookAround().ManipulateObject() {\n\t\t\t\tfmt.Printf(\"Input configuration: manipulate object\\n\");\n\t\t\t\tis.orientationFactor = 1.0;\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Input configuration: look around\\n\");\n\t\t\t\tis.orientationFactor = -1.0\n\t\t\t}\n\t\t}\n\t}\n\tif !shutdownRequested {\n sendShutdown(nnRenderSocket, nnGameSocket)\n shutdownRequested = true\n }\n waitShutdown(nnInputPull, &gameThreadParams)\n}\n\nfunc deg2Rad(deg float32) float32 {\n\treturn deg * math.Pi \/ 180\n}\n\nfunc rad2Deg (rad float32) float32 {\n\treturn rad * 180 \/ math.Pi\n}\n\nfunc sendShutdown(nnRenderSocket *nanomsg.Socket, nnGameSocket *nanomsg.Socket) {\n\ts := capn.NewBuffer(nil)\n\tstop := NewRootStop(s)\n\tstop.SetStop(true)\n\tbuf := bytes.Buffer{}\n\ts.WriteTo(&buf)\n\tfmt.Printf(\"Render socket shutdown.\\n\")\n\tnnRenderSocket.Send(buf.Bytes(), 0)\n\tfmt.Printf(\"Game socket shutdown.\\n\")\n\tnnGameSocket.Send(buf.Bytes(), 0)\n}\n\nfunc waitShutdown(nnInputPull *nanomsg.Socket, params *GameThreadParams) {\n\t\/\/ For now, loop the input thread for a bit to flush out any events\n\tcontinueTime := time.Since(params.start) + 500 * time.Millisecond \/\/ An eternity.\n\tfor time.Since(params.start) < continueTime {\t\n\t\tmsg, _ := nnInputPull.Recv(nanomsg.DontWait)\n\t\tif msg == nil {\n\t\t\tsdl.Delay(10)\n\t\t}\n\t}\n}\n\nfunc getSubsystemString(info sdl.SysWMInfo) string {\n\tswitch info.Subsystem {\n\tcase 0:\t\n\t return \"Unknown\"\n\tcase 1:\n\t\treturn \"Windows\"\n\tcase 2:\n\t\treturn \"X11\"\n\tcase 3:\n\t\treturn \"DirectFB\"\n\tcase 4: \n\t\treturn \"Cocoa\"\n\tcase 5:\n\t\treturn \"UiKit\"\n\t}\n\treturn \"Unknown\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 ibelie, Chen Jie, Joungtao. All rights reserved.\n\/\/ Use of this source code is governed by The MIT License\n\/\/ that can be found in the LICENSE file.\n\npackage tygo\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"path\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"io\/ioutil\"\n)\n\nvar (\n\tTS_MODULE string\n\tTS_CUR_MODULE string\n\tTS_EX_TYPE bool\n\tTS_OBJECTS map[string]*Object\n\tEXTENS_PKG map[string]string\n)\n\nfunc Typescript(dir string, name string, module string, types []Type, propPre []Type) {\n\tvar buffer bytes.Buffer\n\n\tPROP_PRE = propPre\n\tTS_MODULE = module\n\tTS_OBJECTS = ObjectMap(types, TS_MODULE == \"\")\n\n\tvar pkgTypes map[string][]Type\n\tvar sortedPkgs []string\n\tif TS_MODULE == \"\" {\n\t\tpkgTypes = PkgTypeMap(types)\n\t\tfor pkg, _ := range pkgTypes {\n\t\t\tsortedPkgs = append(sortedPkgs, pkg)\n\t\t}\n\t\tsort.Strings(sortedPkgs)\n\t} else {\n\t\tpkgTypes = map[string][]Type{module: types}\n\t\tsortedPkgs = []string{module}\n\t}\n\n\tvar modules []string\n\tfor _, pkg := range sortedPkgs {\n\t\tts := pkgTypes[pkg]\n\t\tTS_CUR_MODULE = pkg\n\t\tTS_EX_TYPE = false\n\n\t\tvar codes []string\n\t\tfor _, t := range ts {\n\t\t\tcodes = append(codes, t.Typescript())\n\t\t}\n\n\t\texType := \"\"\n\t\tif TS_EX_TYPE {\n\t\t\texType = `\n\tinterface Type {\n\t\t__class__: string;\n\t\tByteSize(): number;\n\t\tSerialize(): Uint8Array;\n\t\tDeserialize(data: Uint8Array): void;\n\t}`\n\t\t}\n\n\t\tmodules = append(modules, fmt.Sprintf(`\ndeclare module %s {%s%s\n}\n`, strings.Replace(pkg, \"\/\", \".\", -1), exType, strings.Join(codes, \"\")))\n\t}\n\n\tPROP_PRE = nil\n\tTS_OBJECTS = nil\n\tTS_EX_TYPE = false\n\tTS_MODULE = \"\"\n\tTS_CUR_MODULE = \"\"\n\n\tbuffer.Write([]byte(fmt.Sprintf(`\/\/ Generated for tyts by tygo. DO NOT EDIT!\n%s`, strings.Join(modules, \"\"))))\n\n\tif name == \"\" {\n\t\tname = module\n\t}\n\tioutil.WriteFile(path.Join(dir, name+\".d.ts\"), buffer.Bytes(), 0666)\n\tJavascript(dir, name, module, types, propPre)\n}\n\nfunc (t *Enum) Typescript() string {\n\tvar enums []string\n\tfor _, name := range t.Sorted() {\n\t\tenums = append(enums, fmt.Sprintf(`\n\t\t%s = %d`, name, t.Values[name]))\n\t}\n\treturn fmt.Sprintf(`\n\n\tconst enum %s {%s\n\t}`, t.Name, strings.Join(enums, \",\"))\n}\n\nfunc typeListTypescript(name string, typ string, ts []Type) string {\n\tvar items []string\n\tfor i, t := range ts {\n\t\titems = append(items, fmt.Sprintf(\"a%d: %s\", i, t.Typescript()))\n\t}\n\treturn fmt.Sprintf(`\n\t\tstatic S_%s%s(%s): Uint8Array;\n\t\tstatic D_%s%s(data: Uint8Array): any;`, name, typ, strings.Join(items, \", \"), name, typ)\n}\n\nfunc (t *Object) Typescript() string {\n\tvar parent string\n\tif t.HasParent() {\n\t\tparent = fmt.Sprintf(\" extends %s\", t.Parent.Typescript())\n\t}\n\tvar members []string\n\tfor _, field := range t.VisibleFields() {\n\t\tmembers = append(members, fmt.Sprintf(`\n\t\t%s: %s;`, field.Name, field.Typescript()))\n\t}\n\n\tif PROP_PRE != nil {\n\t\tfor _, field := range t.VisibleFields() {\n\t\t\tmembers = append(members, typeListTypescript(field.Name, \"\", []Type{field}))\n\t\t}\n\t}\n\n\tfor _, method := range t.Methods {\n\t\tif len(method.Params) > 0 {\n\t\t\tmembers = append(members, typeListTypescript(method.Name, \"Param\", method.Params))\n\t\t}\n\t\tif len(method.Results) > 0 {\n\t\t\tmembers = append(members, typeListTypescript(method.Name, \"Result\", method.Results))\n\t\t}\n\t}\n\n\treturn fmt.Sprintf(`\n\n\tclass %s%s {\n\t\t__class__: string;\n\t\tByteSize(): number;\n\t\tSerialize(): Uint8Array;\n\t\tDeserialize(data: Uint8Array): void;\n%s\n\t}\n\n\tnamespace %s {\n\t\tfunction Deserialize(data: Uint8Array): %s;\n\t}`, t.Name, parent, strings.Join(members, \"\"), t.Name, t.Name)\n}\n\nfunc (t UnknownType) Typescript() string {\n\treturn \"\"\n}\n\nfunc (t SimpleType) Typescript() string {\n\tswitch t {\n\tcase SimpleType_INT32:\n\t\tfallthrough\n\tcase SimpleType_INT64:\n\t\tfallthrough\n\tcase SimpleType_UINT32:\n\t\tfallthrough\n\tcase SimpleType_UINT64:\n\t\tfallthrough\n\tcase SimpleType_FLOAT32:\n\t\tfallthrough\n\tcase SimpleType_FLOAT64:\n\t\treturn \"number\"\n\tcase SimpleType_BYTES:\n\t\treturn \"Uint8Array\"\n\tcase SimpleType_SYMBOL:\n\t\tfallthrough\n\tcase SimpleType_STRING:\n\t\treturn \"string\"\n\tcase SimpleType_BOOL:\n\t\treturn \"boolean\"\n\tdefault:\n\t\tlog.Fatalf(\"[Tygo][SimpleType] Unexpect enum value for Typescript: %d\", t)\n\t\treturn \"unknown\"\n\t}\n}\n\nfunc (t *EnumType) Typescript() string {\n\treturn t.Name\n}\n\nfunc (t *InstanceType) Typescript() string {\n\tfullName := t.Name\n\tif t.Object != nil {\n\t\tfullName = t.Object.FullName()\n\t} else if TS_MODULE == \"\" && t.PkgPath != \"\" {\n\t\tfullName = t.PkgPath + \"\/\" + t.Name\n\t} else if EXTENS_PKG != nil {\n\t\tif pkg, ok := EXTENS_PKG[t.Name]; ok {\n\t\t\tif TS_CUR_MODULE == pkg {\n\t\t\t\treturn t.Name\n\t\t\t} else {\n\t\t\t\treturn strings.Replace(pkg, \"\/\", \".\", -1) + \".\" + t.Name\n\t\t\t}\n\t\t}\n\t}\n\tif _, ok := TS_OBJECTS[fullName]; ok {\n\t\tif TS_CUR_MODULE == t.PkgPath || t.Object != nil {\n\t\t\treturn t.Name\n\t\t}\n\t\treturn strings.Replace(fullName, \"\/\", \".\", -1)\n\t} else {\n\t\tTS_EX_TYPE = true\n\t\treturn \"Type\"\n\t}\n}\n\nfunc (t *FixedPointType) Typescript() string {\n\treturn \"number\"\n}\n\nfunc (t *ListType) Typescript() string {\n\treturn fmt.Sprintf(\"%s[]\", t.E.Typescript())\n}\n\nfunc (t *DictType) Typescript() string {\n\treturn fmt.Sprintf(\"{[index: %s]: %s}\", t.K.Typescript(), t.V.Typescript())\n}\n\nfunc (t *VariantType) Typescript() string {\n\treturn \"any\"\n}\n<commit_msg>fix type bug<commit_after>\/\/ Copyright 2017 ibelie, Chen Jie, Joungtao. All rights reserved.\n\/\/ Use of this source code is governed by The MIT License\n\/\/ that can be found in the LICENSE file.\n\npackage tygo\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"path\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"io\/ioutil\"\n)\n\nvar (\n\tTS_MODULE string\n\tTS_CUR_MODULE string\n\tTS_EX_TYPE bool\n\tTS_OBJECTS map[string]*Object\n\tEXTENS_PKG map[string]string\n)\n\nfunc Typescript(dir string, name string, module string, types []Type, propPre []Type) {\n\tvar buffer bytes.Buffer\n\n\tPROP_PRE = propPre\n\tTS_MODULE = module\n\tTS_OBJECTS = ObjectMap(types, TS_MODULE == \"\")\n\n\tvar pkgTypes map[string][]Type\n\tvar sortedPkgs []string\n\tif TS_MODULE == \"\" {\n\t\tpkgTypes = PkgTypeMap(types)\n\t\tfor pkg, _ := range pkgTypes {\n\t\t\tsortedPkgs = append(sortedPkgs, pkg)\n\t\t}\n\t\tsort.Strings(sortedPkgs)\n\t} else {\n\t\tpkgTypes = map[string][]Type{module: types}\n\t\tsortedPkgs = []string{module}\n\t}\n\n\tvar modules []string\n\tfor _, pkg := range sortedPkgs {\n\t\tts := pkgTypes[pkg]\n\t\tTS_CUR_MODULE = pkg\n\t\tTS_EX_TYPE = false\n\n\t\tvar codes []string\n\t\tfor _, t := range ts {\n\t\t\tcodes = append(codes, t.Typescript())\n\t\t}\n\n\t\texType := \"\"\n\t\tif TS_EX_TYPE {\n\t\t\texType = `\n\tinterface Type {\n\t\t__class__: string;\n\t\tByteSize(): number;\n\t\tSerialize(): Uint8Array;\n\t\tDeserialize(data: Uint8Array): void;\n\t}`\n\t\t}\n\n\t\tmodules = append(modules, fmt.Sprintf(`\ndeclare module %s {%s%s\n}\n`, strings.Replace(pkg, \"\/\", \".\", -1), exType, strings.Join(codes, \"\")))\n\t}\n\n\tPROP_PRE = nil\n\tTS_OBJECTS = nil\n\tTS_EX_TYPE = false\n\tTS_MODULE = \"\"\n\tTS_CUR_MODULE = \"\"\n\n\tbuffer.Write([]byte(fmt.Sprintf(`\/\/ Generated for tyts by tygo. DO NOT EDIT!\n%s`, strings.Join(modules, \"\"))))\n\n\tif name == \"\" {\n\t\tname = module\n\t}\n\tioutil.WriteFile(path.Join(dir, name+\".d.ts\"), buffer.Bytes(), 0666)\n\tJavascript(dir, name, module, types, propPre)\n}\n\nfunc (t *Enum) Typescript() string {\n\tvar enums []string\n\tfor _, name := range t.Sorted() {\n\t\tenums = append(enums, fmt.Sprintf(`\n\t\t%s = %d`, name, t.Values[name]))\n\t}\n\treturn fmt.Sprintf(`\n\n\tconst enum %s {%s\n\t}`, t.Name, strings.Join(enums, \",\"))\n}\n\nfunc typeListTypescript(name string, typ string, ts []Type) string {\n\tvar items []string\n\tfor i, t := range ts {\n\t\titems = append(items, fmt.Sprintf(\"a%d: %s\", i, t.Typescript()))\n\t}\n\treturn fmt.Sprintf(`\n\t\tstatic S_%s%s(%s): Uint8Array;\n\t\tstatic D_%s%s(data: Uint8Array): any;`, name, typ, strings.Join(items, \", \"), name, typ)\n}\n\nfunc (t *Object) Typescript() string {\n\tvar parent string\n\tif t.HasParent() {\n\t\tparent = fmt.Sprintf(\" extends %s\", t.Parent.Typescript())\n\t}\n\tvar members []string\n\tfor _, field := range t.VisibleFields() {\n\t\tmembers = append(members, fmt.Sprintf(`\n\t\t%s: %s;`, field.Name, field.Typescript()))\n\t}\n\n\tif PROP_PRE != nil {\n\t\tfor _, field := range t.VisibleFields() {\n\t\t\tmembers = append(members, typeListTypescript(field.Name, \"\", []Type{field}))\n\t\t}\n\t}\n\n\tfor _, method := range t.Methods {\n\t\tif len(method.Params) > 0 {\n\t\t\tmembers = append(members, typeListTypescript(method.Name, \"Param\", method.Params))\n\t\t}\n\t\tif len(method.Results) > 0 {\n\t\t\tmembers = append(members, typeListTypescript(method.Name, \"Result\", method.Results))\n\t\t}\n\t}\n\n\treturn fmt.Sprintf(`\n\n\tclass %s%s {\n\t\t__class__: string;\n\t\tByteSize(): number;\n\t\tSerialize(): Uint8Array;\n\t\tDeserialize(data: Uint8Array): void;\n%s\n\t}\n\n\tnamespace %s {\n\t\tfunction Deserialize(data: Uint8Array): %s;\n\t}`, t.Name, parent, strings.Join(members, \"\"), t.Name, t.Name)\n}\n\nfunc (t UnknownType) Typescript() string {\n\treturn \"\"\n}\n\nfunc (t SimpleType) Typescript() string {\n\tswitch t {\n\tcase SimpleType_INT32:\n\t\tfallthrough\n\tcase SimpleType_INT64:\n\t\tfallthrough\n\tcase SimpleType_UINT32:\n\t\tfallthrough\n\tcase SimpleType_UINT64:\n\t\tfallthrough\n\tcase SimpleType_FLOAT32:\n\t\tfallthrough\n\tcase SimpleType_FLOAT64:\n\t\treturn \"number\"\n\tcase SimpleType_BYTES:\n\t\treturn \"Uint8Array\"\n\tcase SimpleType_SYMBOL:\n\t\tfallthrough\n\tcase SimpleType_STRING:\n\t\treturn \"string\"\n\tcase SimpleType_BOOL:\n\t\treturn \"boolean\"\n\tdefault:\n\t\tlog.Fatalf(\"[Tygo][SimpleType] Unexpect enum value for Typescript: %d\", t)\n\t\treturn \"unknown\"\n\t}\n}\n\nfunc (t *EnumType) Typescript() string {\n\treturn t.Name\n}\n\nfunc (t *InstanceType) Typescript() string {\n\tfullName := t.Name\n\tif TS_MODULE == \"\" && t.Object != nil {\n\t\tfullName = t.Object.FullName()\n\t} else if TS_MODULE == \"\" && t.PkgPath != \"\" {\n\t\tfullName = t.PkgPath + \"\/\" + t.Name\n\t} else if EXTENS_PKG != nil {\n\t\tif pkg, ok := EXTENS_PKG[t.Name]; ok {\n\t\t\tif TS_CUR_MODULE == pkg {\n\t\t\t\treturn t.Name\n\t\t\t} else {\n\t\t\t\treturn strings.Replace(pkg, \"\/\", \".\", -1) + \".\" + t.Name\n\t\t\t}\n\t\t}\n\t}\n\tif _, ok := TS_OBJECTS[fullName]; ok {\n\t\tif TS_CUR_MODULE == t.PkgPath || t.Object != nil {\n\t\t\treturn t.Name\n\t\t}\n\t\treturn strings.Replace(fullName, \"\/\", \".\", -1)\n\t} else {\n\t\tTS_EX_TYPE = true\n\t\treturn \"Type\"\n\t}\n}\n\nfunc (t *FixedPointType) Typescript() string {\n\treturn \"number\"\n}\n\nfunc (t *ListType) Typescript() string {\n\treturn fmt.Sprintf(\"%s[]\", t.E.Typescript())\n}\n\nfunc (t *DictType) Typescript() string {\n\treturn fmt.Sprintf(\"{[index: %s]: %s}\", t.K.Typescript(), t.V.Typescript())\n}\n\nfunc (t *VariantType) Typescript() string {\n\treturn \"any\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016 Uber Technologies, Inc.\n\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage jaeger\n\nconst (\n\t\/\/ JaegerClientVersionTagKey is the name of the tag used to report client version.\n\tJaegerClientVersionTagKey = \"jaeger.version\"\n\n\t\/\/ TracerHostnameTagKey used to report host name of the process.\n\tTracerHostnameTagKey = \"jaeger.hostname\"\n\n\t\/\/ SamplerTypeTagKey reports which sampler was used on the root span.\n\tSamplerTypeTagKey = \"sampler.type\"\n\n\t\/\/ SamplerParamTagKey reports the parameter of the sampler, like sampling probability.\n\tSamplerParamTagKey = \"sampler.param\"\n\n\t\/\/ JaegerGoVersion is the version of the client library reported as Span tag.\n\tJaegerGoVersion = \"Golang-1.2\"\n\n\t\/\/ TracerStateHeaderName is the http header name used to propagate tracing context.\n\t\/\/ This must be in lower-case to avoid mismatches when decoding incoming headers.\n\tTracerStateHeaderName = \"uber-trace-id\"\n\n\t\/\/ TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.\n\t\/\/ This must be in lower-case to avoid mismatches when decoding incoming headers.\n\tTraceBaggageHeaderPrefix = \"uberctx-\"\n\n\tdefaultSamplingServerHostPort = \"localhost:5778\"\n\n\t\/\/ SamplerTypeConst is the type of sampler that always makes the same decision.\n\tSamplerTypeConst = \"const\"\n\n\t\/\/ SamplerTypeRemote is the type of sampler that polls Jaeger agent for sampling strategy.\n\tSamplerTypeRemote = \"remote\"\n\n\t\/\/ SamplerTypeProbabilistic is the type of sampler that samples traces\n\t\/\/ with a certain fixed probability.\n\tSamplerTypeProbabilistic = \"probabilistic\"\n\n\t\/\/ SamplerTypeRateLimiting is the type of sampler that samples\n\t\/\/ only up to a fixed number of traces per second.\n\tSamplerTypeRateLimiting = \"ratelimiting\"\n)\n<commit_msg>Preparing release 1.3.1<commit_after>\/\/ Copyright (c) 2016 Uber Technologies, Inc.\n\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage jaeger\n\nconst (\n\t\/\/ JaegerClientVersionTagKey is the name of the tag used to report client version.\n\tJaegerClientVersionTagKey = \"jaeger.version\"\n\n\t\/\/ TracerHostnameTagKey used to report host name of the process.\n\tTracerHostnameTagKey = \"jaeger.hostname\"\n\n\t\/\/ SamplerTypeTagKey reports which sampler was used on the root span.\n\tSamplerTypeTagKey = \"sampler.type\"\n\n\t\/\/ SamplerParamTagKey reports the parameter of the sampler, like sampling probability.\n\tSamplerParamTagKey = \"sampler.param\"\n\n\t\/\/ JaegerGoVersion is the version of the client library reported as Span tag.\n\tJaegerGoVersion = \"Golang-1.3\"\n\n\t\/\/ TracerStateHeaderName is the http header name used to propagate tracing context.\n\t\/\/ This must be in lower-case to avoid mismatches when decoding incoming headers.\n\tTracerStateHeaderName = \"uber-trace-id\"\n\n\t\/\/ TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.\n\t\/\/ This must be in lower-case to avoid mismatches when decoding incoming headers.\n\tTraceBaggageHeaderPrefix = \"uberctx-\"\n\n\tdefaultSamplingServerHostPort = \"localhost:5778\"\n\n\t\/\/ SamplerTypeConst is the type of sampler that always makes the same decision.\n\tSamplerTypeConst = \"const\"\n\n\t\/\/ SamplerTypeRemote is the type of sampler that polls Jaeger agent for sampling strategy.\n\tSamplerTypeRemote = \"remote\"\n\n\t\/\/ SamplerTypeProbabilistic is the type of sampler that samples traces\n\t\/\/ with a certain fixed probability.\n\tSamplerTypeProbabilistic = \"probabilistic\"\n\n\t\/\/ SamplerTypeRateLimiting is the type of sampler that samples\n\t\/\/ only up to a fixed number of traces per second.\n\tSamplerTypeRateLimiting = \"ratelimiting\"\n)\n<|endoftext|>"} {"text":"<commit_before>package plugin\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"testing\"\n\n\t\"github.com\/docker\/cli\/internal\/test\"\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/docker\/api\/types\/filters\"\n\n\t\"gotest.tools\/assert\"\n\tis \"gotest.tools\/assert\/cmp\"\n\t\"gotest.tools\/golden\"\n)\n\nfunc TestListErrors(t *testing.T) {\n\ttestCases := []struct {\n\t\tdescription string\n\t\targs []string\n\t\tflags map[string]string\n\t\texpectedError string\n\t\tlistFunc func(filter filters.Args) (types.PluginsListResponse, error)\n\t}{\n\t\t{\n\t\t\tdescription: \"too many arguments\",\n\t\t\targs: []string{\"foo\"},\n\t\t\texpectedError: \"accepts no arguments\",\n\t\t},\n\t\t{\n\t\t\tdescription: \"error listing plugins\",\n\t\t\targs: []string{},\n\t\t\texpectedError: \"error listing plugins\",\n\t\t\tlistFunc: func(filter filters.Args) (types.PluginsListResponse, error) {\n\t\t\t\treturn types.PluginsListResponse{}, fmt.Errorf(\"error listing plugins\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"invalid format\",\n\t\t\targs: []string{},\n\t\t\tflags: map[string]string{\n\t\t\t\t\"format\": \"{{invalid format}}\",\n\t\t\t},\n\t\t\texpectedError: \"Template parsing error\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tcli := test.NewFakeCli(&fakeClient{pluginListFunc: tc.listFunc})\n\t\tcmd := newListCommand(cli)\n\t\tcmd.SetArgs(tc.args)\n\t\tfor key, value := range tc.flags {\n\t\t\tcmd.Flags().Set(key, value)\n\t\t}\n\t\tcmd.SetOutput(ioutil.Discard)\n\t\tassert.ErrorContains(t, cmd.Execute(), tc.expectedError)\n\t}\n}\n\nfunc TestList(t *testing.T) {\n\tsinglePluginListFunc := func(filter filters.Args) (types.PluginsListResponse, error) {\n\t\treturn types.PluginsListResponse{\n\t\t\t{\n\t\t\t\tID: \"id-foo\",\n\t\t\t\tName: \"name-foo\",\n\t\t\t\tEnabled: true,\n\t\t\t\tConfig: types.PluginConfig{\n\t\t\t\t\tDescription: \"desc-bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\ttestCases := []struct {\n\t\tdescription string\n\t\targs []string\n\t\tflags map[string]string\n\t\tgolden string\n\t\tlistFunc func(filter filters.Args) (types.PluginsListResponse, error)\n\t}{\n\t\t{\n\t\t\tdescription: \"list with no additional flags\",\n\t\t\targs: []string{},\n\t\t\tgolden: \"plugin-list-without-format.golden\",\n\t\t\tlistFunc: singlePluginListFunc,\n\t\t},\n\t\t{\n\t\t\tdescription: \"list with filters\",\n\t\t\targs: []string{},\n\t\t\tflags: map[string]string{\n\t\t\t\t\"filter\": \"foo=bar\",\n\t\t\t},\n\t\t\tgolden: \"plugin-list-without-format.golden\",\n\t\t\tlistFunc: func(filter filters.Args) (types.PluginsListResponse, error) {\n\t\t\t\tassert.Check(t, is.Equal(\"bar\", filter.Get(\"foo\")[0]))\n\t\t\t\treturn singlePluginListFunc(filter)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"list with quiet option\",\n\t\t\targs: []string{},\n\t\t\tflags: map[string]string{\n\t\t\t\t\"quiet\": \"true\",\n\t\t\t},\n\t\t\tgolden: \"plugin-list-with-quiet-option.golden\",\n\t\t\tlistFunc: singlePluginListFunc,\n\t\t},\n\t\t{\n\t\t\tdescription: \"list with no-trunc option\",\n\t\t\targs: []string{},\n\t\t\tflags: map[string]string{\n\t\t\t\t\"no-trunc\": \"true\",\n\t\t\t\t\"format\": \"{{ .ID }}\",\n\t\t\t},\n\t\t\tgolden: \"plugin-list-with-no-trunc-option.golden\",\n\t\t\tlistFunc: func(filter filters.Args) (types.PluginsListResponse, error) {\n\t\t\t\treturn types.PluginsListResponse{\n\t\t\t\t\t{\n\t\t\t\t\t\tID: \"xyg4z2hiSLO5yTnBJfg4OYia9gKA6Qjd\",\n\t\t\t\t\t\tName: \"name-foo\",\n\t\t\t\t\t\tEnabled: true,\n\t\t\t\t\t\tConfig: types.PluginConfig{\n\t\t\t\t\t\t\tDescription: \"desc-bar\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}, nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"list with format\",\n\t\t\targs: []string{},\n\t\t\tflags: map[string]string{\n\t\t\t\t\"format\": \"{{ .Name }}\",\n\t\t\t},\n\t\t\tgolden: \"plugin-list-with-format.golden\",\n\t\t\tlistFunc: singlePluginListFunc,\n\t\t},\n\t\t{\n\t\t\tdescription: \"list output is sorted based on plugin name\",\n\t\t\targs: []string{},\n\t\t\tflags: map[string]string{\n\t\t\t\t\"format\": \"{{ .Name }}\",\n\t\t\t},\n\t\t\tgolden: \"plugin-list-sort.golden\",\n\t\t\tlistFunc: func(filter filters.Args) (types.PluginsListResponse, error) {\n\t\t\t\treturn types.PluginsListResponse{\n\t\t\t\t\t{\n\t\t\t\t\t\tID: \"id-1\",\n\t\t\t\t\t\tName: \"plugin-1-foo\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tID: \"id-2\",\n\t\t\t\t\t\tName: \"plugin-10-foo\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tID: \"id-3\",\n\t\t\t\t\t\tName: \"plugin-2-foo\",\n\t\t\t\t\t},\n\t\t\t\t}, nil\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tcli := test.NewFakeCli(&fakeClient{pluginListFunc: tc.listFunc})\n\t\tcmd := newListCommand(cli)\n\t\tcmd.SetArgs(tc.args)\n\t\tfor key, value := range tc.flags {\n\t\t\tcmd.Flags().Set(key, value)\n\t\t}\n\t\tassert.NilError(t, cmd.Execute())\n\t\tgolden.Assert(t, cli.OutBuffer().String(), tc.golden)\n\t}\n}\n<commit_msg>cli\/command\/plugin\/list_test.go:61:31: `TestList$1` - `filter` is unused (unparam)<commit_after>package plugin\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"testing\"\n\n\t\"github.com\/docker\/cli\/internal\/test\"\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/docker\/api\/types\/filters\"\n\n\t\"gotest.tools\/assert\"\n\tis \"gotest.tools\/assert\/cmp\"\n\t\"gotest.tools\/golden\"\n)\n\nfunc TestListErrors(t *testing.T) {\n\ttestCases := []struct {\n\t\tdescription string\n\t\targs []string\n\t\tflags map[string]string\n\t\texpectedError string\n\t\tlistFunc func(filter filters.Args) (types.PluginsListResponse, error)\n\t}{\n\t\t{\n\t\t\tdescription: \"too many arguments\",\n\t\t\targs: []string{\"foo\"},\n\t\t\texpectedError: \"accepts no arguments\",\n\t\t},\n\t\t{\n\t\t\tdescription: \"error listing plugins\",\n\t\t\targs: []string{},\n\t\t\texpectedError: \"error listing plugins\",\n\t\t\tlistFunc: func(filter filters.Args) (types.PluginsListResponse, error) {\n\t\t\t\treturn types.PluginsListResponse{}, fmt.Errorf(\"error listing plugins\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"invalid format\",\n\t\t\targs: []string{},\n\t\t\tflags: map[string]string{\n\t\t\t\t\"format\": \"{{invalid format}}\",\n\t\t\t},\n\t\t\texpectedError: \"Template parsing error\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tcli := test.NewFakeCli(&fakeClient{pluginListFunc: tc.listFunc})\n\t\tcmd := newListCommand(cli)\n\t\tcmd.SetArgs(tc.args)\n\t\tfor key, value := range tc.flags {\n\t\t\tcmd.Flags().Set(key, value)\n\t\t}\n\t\tcmd.SetOutput(ioutil.Discard)\n\t\tassert.ErrorContains(t, cmd.Execute(), tc.expectedError)\n\t}\n}\n\nfunc TestList(t *testing.T) {\n\tsinglePluginListFunc := func(_ filters.Args) (types.PluginsListResponse, error) {\n\t\treturn types.PluginsListResponse{\n\t\t\t{\n\t\t\t\tID: \"id-foo\",\n\t\t\t\tName: \"name-foo\",\n\t\t\t\tEnabled: true,\n\t\t\t\tConfig: types.PluginConfig{\n\t\t\t\t\tDescription: \"desc-bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\ttestCases := []struct {\n\t\tdescription string\n\t\targs []string\n\t\tflags map[string]string\n\t\tgolden string\n\t\tlistFunc func(filter filters.Args) (types.PluginsListResponse, error)\n\t}{\n\t\t{\n\t\t\tdescription: \"list with no additional flags\",\n\t\t\targs: []string{},\n\t\t\tgolden: \"plugin-list-without-format.golden\",\n\t\t\tlistFunc: singlePluginListFunc,\n\t\t},\n\t\t{\n\t\t\tdescription: \"list with filters\",\n\t\t\targs: []string{},\n\t\t\tflags: map[string]string{\n\t\t\t\t\"filter\": \"foo=bar\",\n\t\t\t},\n\t\t\tgolden: \"plugin-list-without-format.golden\",\n\t\t\tlistFunc: func(filter filters.Args) (types.PluginsListResponse, error) {\n\t\t\t\tassert.Check(t, is.Equal(\"bar\", filter.Get(\"foo\")[0]))\n\t\t\t\treturn singlePluginListFunc(filter)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"list with quiet option\",\n\t\t\targs: []string{},\n\t\t\tflags: map[string]string{\n\t\t\t\t\"quiet\": \"true\",\n\t\t\t},\n\t\t\tgolden: \"plugin-list-with-quiet-option.golden\",\n\t\t\tlistFunc: singlePluginListFunc,\n\t\t},\n\t\t{\n\t\t\tdescription: \"list with no-trunc option\",\n\t\t\targs: []string{},\n\t\t\tflags: map[string]string{\n\t\t\t\t\"no-trunc\": \"true\",\n\t\t\t\t\"format\": \"{{ .ID }}\",\n\t\t\t},\n\t\t\tgolden: \"plugin-list-with-no-trunc-option.golden\",\n\t\t\tlistFunc: func(_ filters.Args) (types.PluginsListResponse, error) {\n\t\t\t\treturn types.PluginsListResponse{\n\t\t\t\t\t{\n\t\t\t\t\t\tID: \"xyg4z2hiSLO5yTnBJfg4OYia9gKA6Qjd\",\n\t\t\t\t\t\tName: \"name-foo\",\n\t\t\t\t\t\tEnabled: true,\n\t\t\t\t\t\tConfig: types.PluginConfig{\n\t\t\t\t\t\t\tDescription: \"desc-bar\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}, nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"list with format\",\n\t\t\targs: []string{},\n\t\t\tflags: map[string]string{\n\t\t\t\t\"format\": \"{{ .Name }}\",\n\t\t\t},\n\t\t\tgolden: \"plugin-list-with-format.golden\",\n\t\t\tlistFunc: singlePluginListFunc,\n\t\t},\n\t\t{\n\t\t\tdescription: \"list output is sorted based on plugin name\",\n\t\t\targs: []string{},\n\t\t\tflags: map[string]string{\n\t\t\t\t\"format\": \"{{ .Name }}\",\n\t\t\t},\n\t\t\tgolden: \"plugin-list-sort.golden\",\n\t\t\tlistFunc: func(_ filters.Args) (types.PluginsListResponse, error) {\n\t\t\t\treturn types.PluginsListResponse{\n\t\t\t\t\t{\n\t\t\t\t\t\tID: \"id-1\",\n\t\t\t\t\t\tName: \"plugin-1-foo\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tID: \"id-2\",\n\t\t\t\t\t\tName: \"plugin-10-foo\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tID: \"id-3\",\n\t\t\t\t\t\tName: \"plugin-2-foo\",\n\t\t\t\t\t},\n\t\t\t\t}, nil\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tcli := test.NewFakeCli(&fakeClient{pluginListFunc: tc.listFunc})\n\t\tcmd := newListCommand(cli)\n\t\tcmd.SetArgs(tc.args)\n\t\tfor key, value := range tc.flags {\n\t\t\tcmd.Flags().Set(key, value)\n\t\t}\n\t\tassert.NilError(t, cmd.Execute())\n\t\tgolden.Assert(t, cli.OutBuffer().String(), tc.golden)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016, Janoš Guljaš <janos@resenje.org>\n\/\/ All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage httpClient\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestDefaultClient(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))\n\tdefer ts.Close()\n\n\tr, err := Default.Get(ts.URL)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif r == nil {\n\t\tt.Error(\"unexpected nil response\")\n\t}\n}\n\nfunc TestClientRetry(t *testing.T) {\n\tl, err := net.Listen(\"tcp\", \"\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tport := l.Addr().(*net.TCPAddr).Port\n\tl.Close()\n\n\tgo func() {\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tl, err = net.Listen(\"tcp\", fmt.Sprintf(\"localhost:%d\", port))\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tdefer l.Close()\n\t\tserver := http.Server{\n\t\t\tHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}),\n\t\t}\n\t\tif err := server.Serve(l); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\tr, err := New(&Options{RetryTimeMax: 10 * time.Second}).Get(fmt.Sprintf(\"http:\/\/localhost:%d\", port))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif r == nil {\n\t\tt.Error(\"unexpected nil response\")\n\t}\n}\n\nfunc TestClientRetryFailure(t *testing.T) {\n\tl, err := net.Listen(\"tcp\", \"\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tport := l.Addr().(*net.TCPAddr).Port\n\tl.Close()\n\n\tgo func() {\n\t\ttime.Sleep(1 * time.Second)\n\n\t\tl, err = net.Listen(\"tcp\", fmt.Sprintf(\"localhost:%d\", port))\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tdefer l.Close()\n\t\tserver := http.Server{\n\t\t\tHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}),\n\t\t}\n\t\tif err := server.Serve(l); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\tr, err := New(&Options{RetryTimeMax: 1 * time.Second}).Get(fmt.Sprintf(\"http:\/\/localhost:%d\", port))\n\tif err == nil || !strings.Contains(err.Error(), \"getsockopt: connection refused\") {\n\t\tt.Errorf(\"expected connection refused error, got $#v\", err)\n\t}\n\tif r != nil {\n\t\tt.Error(\"unexpected not-nil response\")\n\t}\n}\n\nfunc TestClientRedirectHeaders(t *testing.T) {\n\twant := \"test httputils\"\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/redirect\" {\n\t\t\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n\t\t}\n\t\tgot := r.Header.Get(\"X-Test\")\n\t\tif got != want {\n\t\t\tt.Errorf(\"expected X-Test header %q, got %q\", want, got)\n\t\t}\n\t}))\n\tdefer ts.Close()\n\n\treq, err := http.NewRequest(\"GET\", ts.URL+\"\/redirect\", nil)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\treq.Header.Set(\"X-Test\", want)\n\tr, err := Default.Do(req)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif r == nil {\n\t\tt.Error(\"unexpected nil response\")\n\t}\n}\n\nfunc TestOptionsMarshalJSON(t *testing.T) {\n\to := Options{\n\t\tTimeout: 30 * time.Second,\n\t\tKeepAlive: 3 * time.Minute,\n\t\tTLSHandshakeTimeout: 100 * time.Millisecond,\n\t\tTLSSkipVerify: true,\n\t\tRetryTimeMax: 1 * time.Hour,\n\t\tRetrySleepMax: 10 * time.Second,\n\t\tRetrySleepBase: 10 * time.Microsecond,\n\t}\n\n\tb, err := o.MarshalJSON()\n\tif err != nil {\n\t\tt.Error(err.Error())\n\t}\n\tgot := string(b)\n\twant := \"{\\\"timeout\\\":\\\"30s\\\",\\\"keep-alive\\\":\\\"3m0s\\\",\\\"tls-handshake-timeout\\\":\\\"100ms\\\",\\\"tls-skip-verify\\\":true,\\\"retry-time-max\\\":\\\"1h0m0s\\\",\\\"retry-sleep-max\\\":\\\"10s\\\",\\\"retry-sleep-base\\\":\\\"10µs\\\"}\"\n\tif got != want {\n\t\tt.Errorf(\"expected json %q, got %q\", want, got)\n\t}\n}\n\nfunc TestOptionsUnmarshalJSON(t *testing.T) {\n\to := &Options{}\n\tif err := o.UnmarshalJSON([]byte(\"{\\\"timeout\\\":\\\"30s\\\",\\\"keep-alive\\\":\\\"3m0s\\\",\\\"tls-handshake-timeout\\\":\\\"100ms\\\",\\\"tls-skip-verify\\\":true,\\\"retry-time-max\\\":\\\"1h0m0s\\\",\\\"retry-sleep-max\\\":\\\"10s\\\",\\\"retry-sleep-base\\\":\\\"10µs\\\"}\")); err != nil {\n\t\tt.Error(err.Error())\n\t}\n\n\tTimeout := 30 * time.Second\n\tif o.Timeout != Timeout {\n\t\tt.Errorf(\"expected Timeout %v, got %v\", Timeout, o.Timeout)\n\t}\n\tKeepAlive := 3 * time.Minute\n\tif o.KeepAlive != KeepAlive {\n\t\tt.Errorf(\"expected KeepAlive %v, got %v\", KeepAlive, o.KeepAlive)\n\t}\n\tTLSHandshakeTimeout := 100 * time.Millisecond\n\tif o.TLSHandshakeTimeout != TLSHandshakeTimeout {\n\t\tt.Errorf(\"expected TLSHandshakeTimeout %v, got %v\", TLSHandshakeTimeout, o.TLSHandshakeTimeout)\n\t}\n\tTLSSkipVerify := true\n\tif o.TLSSkipVerify != TLSSkipVerify {\n\t\tt.Errorf(\"expected TLSSkipVerify %v, got %v\", TLSSkipVerify, o.TLSSkipVerify)\n\t}\n\tRetryTimeMax := 1 * time.Hour\n\tif o.RetryTimeMax != RetryTimeMax {\n\t\tt.Errorf(\"expected RetryTimeMax %v, got %v\", RetryTimeMax, o.RetryTimeMax)\n\t}\n\tRetrySleepMax := 10 * time.Second\n\tif o.RetrySleepMax != RetrySleepMax {\n\t\tt.Errorf(\"expected RetrySleepMax %v, got %v\", RetrySleepMax, o.RetrySleepMax)\n\t}\n\tRetrySleepBase := 10 * time.Microsecond\n\tif o.RetrySleepBase != RetrySleepBase {\n\t\tt.Errorf(\"expected RetrySleepBase %v, got %v\", RetrySleepBase, o.RetrySleepBase)\n\t}\n}\n\nfunc TestOptionsUnmarshalJSONError(t *testing.T) {\n\to := &Options{}\n\te := \"invalid character '1' looking for beginning of object key string\"\n\tif err := o.UnmarshalJSON([]byte(\"{1}\")); err == nil || err.Error() != e {\n\t\tt.Errorf(\"expected error %q, got %v\", e, err)\n\t}\n}\n<commit_msg>Fix fmt mesage in test<commit_after>\/\/ Copyright (c) 2016, Janoš Guljaš <janos@resenje.org>\n\/\/ All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage httpClient\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestDefaultClient(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))\n\tdefer ts.Close()\n\n\tr, err := Default.Get(ts.URL)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif r == nil {\n\t\tt.Error(\"unexpected nil response\")\n\t}\n}\n\nfunc TestClientRetry(t *testing.T) {\n\tl, err := net.Listen(\"tcp\", \"\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tport := l.Addr().(*net.TCPAddr).Port\n\tl.Close()\n\n\tgo func() {\n\t\ttime.Sleep(2 * time.Second)\n\n\t\tl, err = net.Listen(\"tcp\", fmt.Sprintf(\"localhost:%d\", port))\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tdefer l.Close()\n\t\tserver := http.Server{\n\t\t\tHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}),\n\t\t}\n\t\tif err := server.Serve(l); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\tr, err := New(&Options{RetryTimeMax: 10 * time.Second}).Get(fmt.Sprintf(\"http:\/\/localhost:%d\", port))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif r == nil {\n\t\tt.Error(\"unexpected nil response\")\n\t}\n}\n\nfunc TestClientRetryFailure(t *testing.T) {\n\tl, err := net.Listen(\"tcp\", \"\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tport := l.Addr().(*net.TCPAddr).Port\n\tl.Close()\n\n\tgo func() {\n\t\ttime.Sleep(1 * time.Second)\n\n\t\tl, err = net.Listen(\"tcp\", fmt.Sprintf(\"localhost:%d\", port))\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tdefer l.Close()\n\t\tserver := http.Server{\n\t\t\tHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}),\n\t\t}\n\t\tif err := server.Serve(l); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\tr, err := New(&Options{RetryTimeMax: 1 * time.Second}).Get(fmt.Sprintf(\"http:\/\/localhost:%d\", port))\n\tif err == nil || !strings.Contains(err.Error(), \"getsockopt: connection refused\") {\n\t\tt.Errorf(\"expected connection refused error, got %#v\", err)\n\t}\n\tif r != nil {\n\t\tt.Error(\"unexpected not-nil response\")\n\t}\n}\n\nfunc TestClientRedirectHeaders(t *testing.T) {\n\twant := \"test httputils\"\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/redirect\" {\n\t\t\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n\t\t}\n\t\tgot := r.Header.Get(\"X-Test\")\n\t\tif got != want {\n\t\t\tt.Errorf(\"expected X-Test header %q, got %q\", want, got)\n\t\t}\n\t}))\n\tdefer ts.Close()\n\n\treq, err := http.NewRequest(\"GET\", ts.URL+\"\/redirect\", nil)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\treq.Header.Set(\"X-Test\", want)\n\tr, err := Default.Do(req)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif r == nil {\n\t\tt.Error(\"unexpected nil response\")\n\t}\n}\n\nfunc TestOptionsMarshalJSON(t *testing.T) {\n\to := Options{\n\t\tTimeout: 30 * time.Second,\n\t\tKeepAlive: 3 * time.Minute,\n\t\tTLSHandshakeTimeout: 100 * time.Millisecond,\n\t\tTLSSkipVerify: true,\n\t\tRetryTimeMax: 1 * time.Hour,\n\t\tRetrySleepMax: 10 * time.Second,\n\t\tRetrySleepBase: 10 * time.Microsecond,\n\t}\n\n\tb, err := o.MarshalJSON()\n\tif err != nil {\n\t\tt.Error(err.Error())\n\t}\n\tgot := string(b)\n\twant := \"{\\\"timeout\\\":\\\"30s\\\",\\\"keep-alive\\\":\\\"3m0s\\\",\\\"tls-handshake-timeout\\\":\\\"100ms\\\",\\\"tls-skip-verify\\\":true,\\\"retry-time-max\\\":\\\"1h0m0s\\\",\\\"retry-sleep-max\\\":\\\"10s\\\",\\\"retry-sleep-base\\\":\\\"10µs\\\"}\"\n\tif got != want {\n\t\tt.Errorf(\"expected json %q, got %q\", want, got)\n\t}\n}\n\nfunc TestOptionsUnmarshalJSON(t *testing.T) {\n\to := &Options{}\n\tif err := o.UnmarshalJSON([]byte(\"{\\\"timeout\\\":\\\"30s\\\",\\\"keep-alive\\\":\\\"3m0s\\\",\\\"tls-handshake-timeout\\\":\\\"100ms\\\",\\\"tls-skip-verify\\\":true,\\\"retry-time-max\\\":\\\"1h0m0s\\\",\\\"retry-sleep-max\\\":\\\"10s\\\",\\\"retry-sleep-base\\\":\\\"10µs\\\"}\")); err != nil {\n\t\tt.Error(err.Error())\n\t}\n\n\tTimeout := 30 * time.Second\n\tif o.Timeout != Timeout {\n\t\tt.Errorf(\"expected Timeout %v, got %v\", Timeout, o.Timeout)\n\t}\n\tKeepAlive := 3 * time.Minute\n\tif o.KeepAlive != KeepAlive {\n\t\tt.Errorf(\"expected KeepAlive %v, got %v\", KeepAlive, o.KeepAlive)\n\t}\n\tTLSHandshakeTimeout := 100 * time.Millisecond\n\tif o.TLSHandshakeTimeout != TLSHandshakeTimeout {\n\t\tt.Errorf(\"expected TLSHandshakeTimeout %v, got %v\", TLSHandshakeTimeout, o.TLSHandshakeTimeout)\n\t}\n\tTLSSkipVerify := true\n\tif o.TLSSkipVerify != TLSSkipVerify {\n\t\tt.Errorf(\"expected TLSSkipVerify %v, got %v\", TLSSkipVerify, o.TLSSkipVerify)\n\t}\n\tRetryTimeMax := 1 * time.Hour\n\tif o.RetryTimeMax != RetryTimeMax {\n\t\tt.Errorf(\"expected RetryTimeMax %v, got %v\", RetryTimeMax, o.RetryTimeMax)\n\t}\n\tRetrySleepMax := 10 * time.Second\n\tif o.RetrySleepMax != RetrySleepMax {\n\t\tt.Errorf(\"expected RetrySleepMax %v, got %v\", RetrySleepMax, o.RetrySleepMax)\n\t}\n\tRetrySleepBase := 10 * time.Microsecond\n\tif o.RetrySleepBase != RetrySleepBase {\n\t\tt.Errorf(\"expected RetrySleepBase %v, got %v\", RetrySleepBase, o.RetrySleepBase)\n\t}\n}\n\nfunc TestOptionsUnmarshalJSONError(t *testing.T) {\n\to := &Options{}\n\te := \"invalid character '1' looking for beginning of object key string\"\n\tif err := o.UnmarshalJSON([]byte(\"{1}\")); err == nil || err.Error() != e {\n\t\tt.Errorf(\"expected error %q, got %v\", e, err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package fire\n\nimport \"net\/http\"\n\n\/\/ Compose is a short-hand for chaining the specified middlewares and handler\n\/\/ together.\nfunc Compose(chain ...interface{}) http.Handler {\n\t\/\/ check length\n\tif len(chain) < 2 {\n\t\tpanic(\"expected chain to have at least two items\")\n\t}\n\n\t\/\/ get handler\n\th, ok := chain[len(chain)-1].(http.Handler)\n\tif !ok {\n\t\tpanic(`expected last chain item to be a \"http.Handler\"`)\n\t}\n\n\t\/\/ chain all middlewares\n\tfor i := len(chain) - 2; i >= 0; i-- {\n\t\t\/\/ get middleware\n\t\tm, ok := chain[i].(func(http.Handler) http.Handler)\n\t\tif !ok {\n\t\t\tpanic(`expected chain item to be a \"func(http.handler) http.Handler\"`)\n\t\t}\n\n\t\t\/\/ chain\n\t\th = m(h)\n\t}\n\n\treturn h\n}\n<commit_msg>make message case consistent<commit_after>package fire\n\nimport \"net\/http\"\n\n\/\/ Compose is a short-hand for chaining the specified middlewares and handler\n\/\/ together.\nfunc Compose(chain ...interface{}) http.Handler {\n\t\/\/ check length\n\tif len(chain) < 2 {\n\t\tpanic(\"Expected chain to have at least two items\")\n\t}\n\n\t\/\/ get handler\n\th, ok := chain[len(chain)-1].(http.Handler)\n\tif !ok {\n\t\tpanic(`Expected last chain item to be a \"http.Handler\"`)\n\t}\n\n\t\/\/ chain all middlewares\n\tfor i := len(chain) - 2; i >= 0; i-- {\n\t\t\/\/ get middleware\n\t\tm, ok := chain[i].(func(http.Handler) http.Handler)\n\t\tif !ok {\n\t\t\tpanic(`Expected chain item to be a \"func(http.handler) http.Handler\"`)\n\t\t}\n\n\t\t\/\/ chain\n\t\th = m(h)\n\t}\n\n\treturn h\n}\n<|endoftext|>"} {"text":"<commit_before>package stun\n\nimport (\n\t\"crypto\/md5\" \/\/ #nosec\n\t\"crypto\/sha1\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/gortc\/stun\/internal\/hmac\"\n)\n\n\/\/ separator for credentials.\nconst credentialsSep = \":\"\n\n\/\/ NewLongTermIntegrity returns new MessageIntegrity with key for long-term\n\/\/ credentials. Password, username, and realm must be SASL-prepared.\nfunc NewLongTermIntegrity(username, realm, password string) MessageIntegrity {\n\tk := strings.Join(\n\t\t[]string{\n\t\t\tusername,\n\t\t\trealm,\n\t\t\tpassword,\n\t\t},\n\t\tcredentialsSep,\n\t)\n\t\/\/ #nosec\n\th := md5.New()\n\tfmt.Fprint(h, k)\n\treturn MessageIntegrity(h.Sum(nil))\n}\n\n\/\/ NewShortTermIntegrity returns new MessageIntegrity with key for short-term\n\/\/ credentials. Password must be SASL-prepared.\nfunc NewShortTermIntegrity(password string) MessageIntegrity {\n\treturn MessageIntegrity(password)\n}\n\n\/\/ MessageIntegrity represents MESSAGE-INTEGRITY attribute.\n\/\/\n\/\/ AddTo and GetFrom methods are using zero-allocation version of hmac, see\n\/\/ internal\/hmac\/pool.go.\n\/\/\n\/\/ RFC 5389 Section 15.4\ntype MessageIntegrity []byte\n\n\/\/ ErrFingerprintBeforeIntegrity means that FINGEPRINT attribute is already in\n\/\/ message, so MESSAGE-INTEGRITY attribute cannot be added.\nvar ErrFingerprintBeforeIntegrity = errors.New(\n\t\"FINGERPRINT before MESSAGE-INTEGRITY attribute\",\n)\n\nfunc (i MessageIntegrity) String() string {\n\treturn fmt.Sprintf(\"KEY: 0x%x\", []byte(i))\n}\n\nconst messageIntegritySize = 20\n\n\/\/ AddTo adds MESSAGE-INTEGRITY attribute to message. Be advised, CPU\n\/\/ and allocations costly, can be cause of DOS.\nfunc (i MessageIntegrity) AddTo(m *Message) error {\n\tfor _, a := range m.Attributes {\n\t\t\/\/ Message should not contain FINGERPRINT attribute\n\t\t\/\/ before MESSAGE-INTEGRITY.\n\t\tif a.Type == AttrFingerprint {\n\t\t\treturn ErrFingerprintBeforeIntegrity\n\t\t}\n\t}\n\t\/\/ The text used as input to HMAC is the STUN message,\n\t\/\/ including the header, up to and including the attribute preceding the\n\t\/\/ MESSAGE-INTEGRITY attribute.\n\tlength := m.Length\n\t\/\/ Adjusting m.Length to contain MESSAGE-INTEGRITY TLV.\n\tm.Length += messageIntegritySize + attributeHeaderSize\n\tm.WriteLength() \/\/ writing length to m.Raw\n\tv := newHMAC(i, m.Raw, m.Raw[len(m.Raw):]) \/\/ calculating HMAC for adjusted m.Raw\n\tm.Length = length \/\/ changing m.Length back\n\n\t\/\/ Copy hmac value to temporary variable to protect it from resetting\n\t\/\/ while processing m.Add call.\n\tvBuf := make([]byte, sha1.Size)\n\tcopy(vBuf, v)\n\n\tm.Add(AttrMessageIntegrity, vBuf)\n\treturn nil\n}\n\n\/\/ ErrIntegrityMismatch means that computed HMAC differs from expected.\nvar ErrIntegrityMismatch = errors.New(\"integrity check failed\")\n\nfunc newHMAC(key, message, buf []byte) []byte {\n\tmac := hmac.AcquireSHA1(key)\n\twriteOrPanic(mac, message)\n\tdefer hmac.PutSHA1(mac)\n\treturn mac.Sum(buf)\n}\n\n\/\/ Check checks MESSAGE-INTEGRITY attribute. Be advised, CPU and allocations\n\/\/ costly, can be cause of DOS.\nfunc (i MessageIntegrity) Check(m *Message) error {\n\tv, err := m.Get(AttrMessageIntegrity)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Adjusting length in header to match m.Raw that was\n\t\/\/ used when computing HMAC.\n\tvar (\n\t\tlength = m.Length\n\t\tafterIntegrity = false\n\t\tsizeReduced int\n\t)\n\tfor _, a := range m.Attributes {\n\t\tif afterIntegrity {\n\t\t\tsizeReduced += nearestPaddedValueLength(int(a.Length))\n\t\t\tsizeReduced += attributeHeaderSize\n\t\t}\n\t\tif a.Type == AttrMessageIntegrity {\n\t\t\tafterIntegrity = true\n\t\t}\n\t}\n\tm.Length -= uint32(sizeReduced)\n\tm.WriteLength()\n\t\/\/ startOfHMAC should be first byte of integrity attribute.\n\tstartOfHMAC := messageHeaderSize + m.Length - (attributeHeaderSize + messageIntegritySize)\n\tb := m.Raw[:startOfHMAC] \/\/ data before integrity attribute\n\texpected := newHMAC(i, b, m.Raw[len(m.Raw):])\n\tm.Length = length\n\tm.WriteLength() \/\/ writing length back\n\treturn checkHMAC(v, expected)\n}\n<commit_msg>a:integrity: remove unnecessary multiline declaration and fix comment<commit_after>package stun\n\nimport (\n\t\"crypto\/md5\" \/\/ #nosec\n\t\"crypto\/sha1\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/gortc\/stun\/internal\/hmac\"\n)\n\n\/\/ separator for credentials.\nconst credentialsSep = \":\"\n\n\/\/ NewLongTermIntegrity returns new MessageIntegrity with key for long-term\n\/\/ credentials. Password, username, and realm must be SASL-prepared.\nfunc NewLongTermIntegrity(username, realm, password string) MessageIntegrity {\n\tk := strings.Join(\n\t\t[]string{\n\t\t\tusername,\n\t\t\trealm,\n\t\t\tpassword,\n\t\t},\n\t\tcredentialsSep,\n\t)\n\t\/\/ #nosec\n\th := md5.New()\n\tfmt.Fprint(h, k)\n\treturn MessageIntegrity(h.Sum(nil))\n}\n\n\/\/ NewShortTermIntegrity returns new MessageIntegrity with key for short-term\n\/\/ credentials. Password must be SASL-prepared.\nfunc NewShortTermIntegrity(password string) MessageIntegrity {\n\treturn MessageIntegrity(password)\n}\n\n\/\/ MessageIntegrity represents MESSAGE-INTEGRITY attribute.\n\/\/\n\/\/ AddTo and GetFrom methods are using zero-allocation version of hmac, see\n\/\/ internal\/hmac\/pool.go.\n\/\/\n\/\/ RFC 5389 Section 15.4\ntype MessageIntegrity []byte\n\n\/\/ ErrFingerprintBeforeIntegrity means that FINGERPRINT attribute is already in\n\/\/ message, so MESSAGE-INTEGRITY attribute cannot be added.\nvar ErrFingerprintBeforeIntegrity = errors.New(\"FINGERPRINT before MESSAGE-INTEGRITY attribute\")\n\nfunc (i MessageIntegrity) String() string {\n\treturn fmt.Sprintf(\"KEY: 0x%x\", []byte(i))\n}\n\nconst messageIntegritySize = 20\n\n\/\/ AddTo adds MESSAGE-INTEGRITY attribute to message. Be advised, CPU\n\/\/ and allocations costly, can be cause of DOS.\nfunc (i MessageIntegrity) AddTo(m *Message) error {\n\tfor _, a := range m.Attributes {\n\t\t\/\/ Message should not contain FINGERPRINT attribute\n\t\t\/\/ before MESSAGE-INTEGRITY.\n\t\tif a.Type == AttrFingerprint {\n\t\t\treturn ErrFingerprintBeforeIntegrity\n\t\t}\n\t}\n\t\/\/ The text used as input to HMAC is the STUN message,\n\t\/\/ including the header, up to and including the attribute preceding the\n\t\/\/ MESSAGE-INTEGRITY attribute.\n\tlength := m.Length\n\t\/\/ Adjusting m.Length to contain MESSAGE-INTEGRITY TLV.\n\tm.Length += messageIntegritySize + attributeHeaderSize\n\tm.WriteLength() \/\/ writing length to m.Raw\n\tv := newHMAC(i, m.Raw, m.Raw[len(m.Raw):]) \/\/ calculating HMAC for adjusted m.Raw\n\tm.Length = length \/\/ changing m.Length back\n\n\t\/\/ Copy hmac value to temporary variable to protect it from resetting\n\t\/\/ while processing m.Add call.\n\tvBuf := make([]byte, sha1.Size)\n\tcopy(vBuf, v)\n\n\tm.Add(AttrMessageIntegrity, vBuf)\n\treturn nil\n}\n\n\/\/ ErrIntegrityMismatch means that computed HMAC differs from expected.\nvar ErrIntegrityMismatch = errors.New(\"integrity check failed\")\n\nfunc newHMAC(key, message, buf []byte) []byte {\n\tmac := hmac.AcquireSHA1(key)\n\twriteOrPanic(mac, message)\n\tdefer hmac.PutSHA1(mac)\n\treturn mac.Sum(buf)\n}\n\n\/\/ Check checks MESSAGE-INTEGRITY attribute. Be advised, CPU and allocations\n\/\/ costly, can be cause of DOS.\nfunc (i MessageIntegrity) Check(m *Message) error {\n\tv, err := m.Get(AttrMessageIntegrity)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Adjusting length in header to match m.Raw that was\n\t\/\/ used when computing HMAC.\n\tvar (\n\t\tlength = m.Length\n\t\tafterIntegrity = false\n\t\tsizeReduced int\n\t)\n\tfor _, a := range m.Attributes {\n\t\tif afterIntegrity {\n\t\t\tsizeReduced += nearestPaddedValueLength(int(a.Length))\n\t\t\tsizeReduced += attributeHeaderSize\n\t\t}\n\t\tif a.Type == AttrMessageIntegrity {\n\t\t\tafterIntegrity = true\n\t\t}\n\t}\n\tm.Length -= uint32(sizeReduced)\n\tm.WriteLength()\n\t\/\/ startOfHMAC should be first byte of integrity attribute.\n\tstartOfHMAC := messageHeaderSize + m.Length - (attributeHeaderSize + messageIntegritySize)\n\tb := m.Raw[:startOfHMAC] \/\/ data before integrity attribute\n\texpected := newHMAC(i, b, m.Raw[len(m.Raw):])\n\tm.Length = length\n\tm.WriteLength() \/\/ writing length back\n\treturn checkHMAC(v, expected)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Generic JSON representation.\n\npackage json\n\nimport (\n\t\"array\";\n\t\"fmt\";\n\t\"math\";\n\t\"json\";\n\t\"strconv\";\n\t\"strings\";\n)\n\nexport const (\n\tStringKind = iota;\n\tNumberKind;\n\tMapKind;\t\t\/\/ JSON term is \"Object\", but in Go, it's a map\n\tArrayKind;\n\tBoolKind;\n\tNullKind;\n)\n\nexport type Json interface {\n\tKind() int;\n\tString() string;\n\tNumber() float64;\n\tBool() bool;\n\tGet(s string) Json;\n\tElem(i int) Json;\n\tLen() int;\n}\n\nexport func JsonToString(j Json) string {\n\tif j == nil {\n\t\treturn \"null\"\n\t}\n\tif j.Kind() == StringKind {\n\t\treturn Quote(j.String())\n\t}\n\treturn j.String()\n}\n\ntype Null struct { }\nexport var null Json = &Null{}\nfunc (*Null) Kind() int { return NullKind }\nfunc (*Null) String() string { return \"null\" }\nfunc (*Null) Number() float64 { return 0 }\nfunc (*Null) Bool() bool { return false }\nfunc (*Null) Get(s string) Json { return null }\nfunc (*Null) Elem(int) Json { return null }\nfunc (*Null) Len() int { return 0 }\n\ntype String struct { s string; Null }\nfunc (j *String) Kind() int { return StringKind }\nfunc (j *String) String() string { return j.s }\n\ntype Number struct { f float64; Null }\nfunc (j *Number) Kind() int { return NumberKind }\nfunc (j *Number) Number() float64 { return j.f }\nfunc (j *Number) String() string {\n\tif math.Floor(j.f) == j.f {\n\t\treturn fmt.sprintf(\"%.0f\", j.f);\n\t}\n\treturn fmt.sprintf(\"%g\", j.f);\n}\n\ntype Array struct { a *array.Array; Null }\nfunc (j *Array) Kind() int { return ArrayKind }\nfunc (j *Array) Len() int { return j.a.Len() }\nfunc (j *Array) Elem(i int) Json {\n\tif i < 0 || i >= j.a.Len() {\n\t\treturn null\n\t}\n\treturn j.a.At(i)\n}\nfunc (j *Array) String() string {\n\ts := \"[\";\n\tfor i := 0; i < j.a.Len(); i++ {\n\t\tif i > 0 {\n\t\t\ts += \",\";\n\t\t}\n\t\ts += JsonToString(j.a.At(i).(Json));\n\t}\n\ts += \"]\";\n\treturn s;\n}\n\ntype Bool struct { b bool; Null }\nfunc (j *Bool) Kind() int { return BoolKind }\nfunc (j *Bool) Bool() bool { return j.b }\nfunc (j *Bool) String() string {\n\tif j.b {\n\t\treturn \"true\"\n\t}\n\treturn \"false\"\n}\n\ntype Map struct { m *map[string]Json; Null }\nfunc (j *Map) Kind() int { return MapKind }\nfunc (j *Map) Get(s string) Json {\n\tif j.m == nil {\n\t\treturn null\n\t}\n\tv, ok := j.m[s];\n\tif !ok {\n\t\treturn null\n\t}\n\treturn v;\n}\nfunc (j *Map) String() string {\n\ts := \"{\";\n\tfirst := true;\n\tfor k,v range j.m {\n\t\tif first {\n\t\t\tfirst = false;\n\t\t} else {\n\t\t\ts += \",\";\n\t\t}\n\t\ts += Quote(k);\n\t\ts += \":\";\n\t\ts += JsonToString(v);\n\t}\n\ts += \"}\";\n\treturn s;\n}\n\nexport func Walk(j Json, path string) Json {\n\tfor len(path) > 0 {\n\t\tvar elem string;\n\t\tif i := strings.index(path, '\/'); i >= 0 {\n\t\t\telem = path[0:i];\n\t\t\tpath = path[i+1:len(path)];\n\t\t} else {\n\t\t\telem = path;\n\t\t\tpath = \"\";\n\t\t}\n\t\tswitch j.Kind() {\n\t\tcase ArrayKind:\n\t\t\tindx, err := strconv.atoi(elem);\n\t\t\tif err != nil {\n\t\t\t\treturn null\n\t\t\t}\n\t\t\tj = j.Elem(indx);\n\t\tcase MapKind:\n\t\t\tj = j.Get(elem);\n\t\tdefault:\n\t\t\treturn null\n\t\t}\n\t}\n\treturn j\n}\n\nexport func Equal(a, b Json) bool {\n\tswitch {\n\tcase a == nil && b == nil:\n\t\treturn true;\n\tcase a == nil || b == nil:\n\t\treturn false;\n\tcase a.Kind() != b.Kind():\n\t\treturn false;\n\t}\n\n\tswitch a.Kind() {\n\tcase NullKind:\n\t\treturn true;\n\tcase StringKind:\n\t\treturn a.String() == b.String();\n\tcase NumberKind:\n\t\treturn a.Number() == b.Number();\n\tcase BoolKind:\n\t\treturn a.Bool() == b.Bool();\n\tcase ArrayKind:\n\t\tif a.Len() != b.Len() {\n\t\t\treturn false;\n\t\t}\n\t\tfor i := 0; i < a.Len(); i++ {\n\t\t\tif !Equal(a.Elem(i), b.Elem(i)) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t\treturn true;\n\tcase MapKind:\n\t\tm := a.(*Map).m;\n\t\tif len(m) != len(b.(*Map).m) {\n\t\t\treturn false;\n\t\t}\n\t\tfor k,v range m {\n\t\t\tif !Equal(v, b.Get(k)) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n\n\t\/\/ invalid kind\n\treturn false;\n}\n\n\n\/\/ Parse builder for Json objects.\n\ntype JsonBuilder struct {\n\t\/\/ either writing to *ptr\n\tptr *Json;\n\n\t\/\/ or to a[i] (can't set ptr = &a[i])\n\ta *array.Array;\n\ti int;\n\n\t\/\/ or to m[k] (can't set ptr = &m[k])\n\tm *map[string] Json;\n\tk string;\n}\n\nfunc (b *JsonBuilder) Put(j Json) {\n\tswitch {\n\tcase b.ptr != nil:\n\t\t*b.ptr = j;\n\tcase b.a != nil:\n\t\tb.a.Set(b.i, j);\n\tcase b.m != nil:\n\t\tb.m[b.k] = j;\n\t}\n}\n\nfunc (b *JsonBuilder) Get() Json {\n\tswitch {\n\tcase b.ptr != nil:\n\t\treturn *b.ptr;\n\tcase b.a != nil:\n\t\treturn b.a.At(b.i);\n\tcase b.m != nil:\n\t\treturn b.m[b.k];\n\t}\n\treturn nil\n}\n\nfunc (b *JsonBuilder) Float64(f float64) {\n\tb.Put(&Number{f, Null{}})\n}\n\nfunc (b *JsonBuilder) Int64(i int64) {\n\tb.Float64(float64(i))\n}\n\nfunc (b *JsonBuilder) Uint64(i uint64) {\n\tb.Float64(float64(i))\n}\n\nfunc (b *JsonBuilder) Bool(tf bool) {\n\tb.Put(&Bool{tf, Null{}})\n}\n\nfunc (b *JsonBuilder) Null() {\n\tb.Put(null)\n}\n\nfunc (b *JsonBuilder) String(s string) {\n\tb.Put(&String{s, Null{}})\n}\n\n\nfunc (b *JsonBuilder) Array() {\n\tb.Put(&Array{array.New(0), Null{}})\n}\n\nfunc (b *JsonBuilder) Map() {\n\tb.Put(&Map{new(map[string]Json), Null{}})\n}\n\nfunc (b *JsonBuilder) Elem(i int) Builder {\n\tbb := new(JsonBuilder);\n\tbb.a = b.Get().(*Array).a;\n\tbb.i = i;\n\tfor i >= bb.a.Len() {\n\t\tbb.a.Push(null)\n\t}\n\treturn bb\n}\n\nfunc (b *JsonBuilder) Key(k string) Builder {\n\tbb := new(JsonBuilder);\n\tbb.m = b.Get().(*Map).m;\n\tbb.k = k;\n\tbb.m[k] = null;\n\treturn bb\n}\n\nexport func StringToJson(s string) (json Json, ok bool, errtok string) {\n\tvar errindx int;\n\tvar j Json;\n\tb := new(JsonBuilder);\n\tb.ptr = &j;\n\tok, errindx, errtok = Parse(s, b);\n\tif !ok {\n\t\treturn nil, false, errtok\n\t}\n\treturn j, true, \"\"\n}\n<commit_msg>remove implicit int -> string<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Generic JSON representation.\n\npackage json\n\nimport (\n\t\"array\";\n\t\"fmt\";\n\t\"math\";\n\t\"json\";\n\t\"strconv\";\n\t\"strings\";\n)\n\nexport const (\n\tStringKind = iota;\n\tNumberKind;\n\tMapKind;\t\t\/\/ JSON term is \"Object\", but in Go, it's a map\n\tArrayKind;\n\tBoolKind;\n\tNullKind;\n)\n\nexport type Json interface {\n\tKind() int;\n\tString() string;\n\tNumber() float64;\n\tBool() bool;\n\tGet(s string) Json;\n\tElem(i int) Json;\n\tLen() int;\n}\n\nexport func JsonToString(j Json) string {\n\tif j == nil {\n\t\treturn \"null\"\n\t}\n\tif j.Kind() == StringKind {\n\t\treturn Quote(j.String())\n\t}\n\treturn j.String()\n}\n\ntype Null struct { }\nexport var null Json = &Null{}\nfunc (*Null) Kind() int { return NullKind }\nfunc (*Null) String() string { return \"null\" }\nfunc (*Null) Number() float64 { return 0 }\nfunc (*Null) Bool() bool { return false }\nfunc (*Null) Get(s string) Json { return null }\nfunc (*Null) Elem(int) Json { return null }\nfunc (*Null) Len() int { return 0 }\n\ntype String struct { s string; Null }\nfunc (j *String) Kind() int { return StringKind }\nfunc (j *String) String() string { return j.s }\n\ntype Number struct { f float64; Null }\nfunc (j *Number) Kind() int { return NumberKind }\nfunc (j *Number) Number() float64 { return j.f }\nfunc (j *Number) String() string {\n\tif math.Floor(j.f) == j.f {\n\t\treturn fmt.sprintf(\"%.0f\", j.f);\n\t}\n\treturn fmt.sprintf(\"%g\", j.f);\n}\n\ntype Array struct { a *array.Array; Null }\nfunc (j *Array) Kind() int { return ArrayKind }\nfunc (j *Array) Len() int { return j.a.Len() }\nfunc (j *Array) Elem(i int) Json {\n\tif i < 0 || i >= j.a.Len() {\n\t\treturn null\n\t}\n\treturn j.a.At(i)\n}\nfunc (j *Array) String() string {\n\ts := \"[\";\n\tfor i := 0; i < j.a.Len(); i++ {\n\t\tif i > 0 {\n\t\t\ts += \",\";\n\t\t}\n\t\ts += JsonToString(j.a.At(i).(Json));\n\t}\n\ts += \"]\";\n\treturn s;\n}\n\ntype Bool struct { b bool; Null }\nfunc (j *Bool) Kind() int { return BoolKind }\nfunc (j *Bool) Bool() bool { return j.b }\nfunc (j *Bool) String() string {\n\tif j.b {\n\t\treturn \"true\"\n\t}\n\treturn \"false\"\n}\n\ntype Map struct { m *map[string]Json; Null }\nfunc (j *Map) Kind() int { return MapKind }\nfunc (j *Map) Get(s string) Json {\n\tif j.m == nil {\n\t\treturn null\n\t}\n\tv, ok := j.m[s];\n\tif !ok {\n\t\treturn null\n\t}\n\treturn v;\n}\nfunc (j *Map) String() string {\n\ts := \"{\";\n\tfirst := true;\n\tfor k,v range j.m {\n\t\tif first {\n\t\t\tfirst = false;\n\t\t} else {\n\t\t\ts += \",\";\n\t\t}\n\t\ts += Quote(k);\n\t\ts += \":\";\n\t\ts += JsonToString(v);\n\t}\n\ts += \"}\";\n\treturn s;\n}\n\nexport func Walk(j Json, path string) Json {\n\tfor len(path) > 0 {\n\t\tvar elem string;\n\t\tif i := strings.index(path, \"\/\"); i >= 0 {\n\t\t\telem = path[0:i];\n\t\t\tpath = path[i+1:len(path)];\n\t\t} else {\n\t\t\telem = path;\n\t\t\tpath = \"\";\n\t\t}\n\t\tswitch j.Kind() {\n\t\tcase ArrayKind:\n\t\t\tindx, err := strconv.atoi(elem);\n\t\t\tif err != nil {\n\t\t\t\treturn null\n\t\t\t}\n\t\t\tj = j.Elem(indx);\n\t\tcase MapKind:\n\t\t\tj = j.Get(elem);\n\t\tdefault:\n\t\t\treturn null\n\t\t}\n\t}\n\treturn j\n}\n\nexport func Equal(a, b Json) bool {\n\tswitch {\n\tcase a == nil && b == nil:\n\t\treturn true;\n\tcase a == nil || b == nil:\n\t\treturn false;\n\tcase a.Kind() != b.Kind():\n\t\treturn false;\n\t}\n\n\tswitch a.Kind() {\n\tcase NullKind:\n\t\treturn true;\n\tcase StringKind:\n\t\treturn a.String() == b.String();\n\tcase NumberKind:\n\t\treturn a.Number() == b.Number();\n\tcase BoolKind:\n\t\treturn a.Bool() == b.Bool();\n\tcase ArrayKind:\n\t\tif a.Len() != b.Len() {\n\t\t\treturn false;\n\t\t}\n\t\tfor i := 0; i < a.Len(); i++ {\n\t\t\tif !Equal(a.Elem(i), b.Elem(i)) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t\treturn true;\n\tcase MapKind:\n\t\tm := a.(*Map).m;\n\t\tif len(m) != len(b.(*Map).m) {\n\t\t\treturn false;\n\t\t}\n\t\tfor k,v range m {\n\t\t\tif !Equal(v, b.Get(k)) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n\n\t\/\/ invalid kind\n\treturn false;\n}\n\n\n\/\/ Parse builder for Json objects.\n\ntype JsonBuilder struct {\n\t\/\/ either writing to *ptr\n\tptr *Json;\n\n\t\/\/ or to a[i] (can't set ptr = &a[i])\n\ta *array.Array;\n\ti int;\n\n\t\/\/ or to m[k] (can't set ptr = &m[k])\n\tm *map[string] Json;\n\tk string;\n}\n\nfunc (b *JsonBuilder) Put(j Json) {\n\tswitch {\n\tcase b.ptr != nil:\n\t\t*b.ptr = j;\n\tcase b.a != nil:\n\t\tb.a.Set(b.i, j);\n\tcase b.m != nil:\n\t\tb.m[b.k] = j;\n\t}\n}\n\nfunc (b *JsonBuilder) Get() Json {\n\tswitch {\n\tcase b.ptr != nil:\n\t\treturn *b.ptr;\n\tcase b.a != nil:\n\t\treturn b.a.At(b.i);\n\tcase b.m != nil:\n\t\treturn b.m[b.k];\n\t}\n\treturn nil\n}\n\nfunc (b *JsonBuilder) Float64(f float64) {\n\tb.Put(&Number{f, Null{}})\n}\n\nfunc (b *JsonBuilder) Int64(i int64) {\n\tb.Float64(float64(i))\n}\n\nfunc (b *JsonBuilder) Uint64(i uint64) {\n\tb.Float64(float64(i))\n}\n\nfunc (b *JsonBuilder) Bool(tf bool) {\n\tb.Put(&Bool{tf, Null{}})\n}\n\nfunc (b *JsonBuilder) Null() {\n\tb.Put(null)\n}\n\nfunc (b *JsonBuilder) String(s string) {\n\tb.Put(&String{s, Null{}})\n}\n\n\nfunc (b *JsonBuilder) Array() {\n\tb.Put(&Array{array.New(0), Null{}})\n}\n\nfunc (b *JsonBuilder) Map() {\n\tb.Put(&Map{new(map[string]Json), Null{}})\n}\n\nfunc (b *JsonBuilder) Elem(i int) Builder {\n\tbb := new(JsonBuilder);\n\tbb.a = b.Get().(*Array).a;\n\tbb.i = i;\n\tfor i >= bb.a.Len() {\n\t\tbb.a.Push(null)\n\t}\n\treturn bb\n}\n\nfunc (b *JsonBuilder) Key(k string) Builder {\n\tbb := new(JsonBuilder);\n\tbb.m = b.Get().(*Map).m;\n\tbb.k = k;\n\tbb.m[k] = null;\n\treturn bb\n}\n\nexport func StringToJson(s string) (json Json, ok bool, errtok string) {\n\tvar errindx int;\n\tvar j Json;\n\tb := new(JsonBuilder);\n\tb.ptr = &j;\n\tok, errindx, errtok = Parse(s, b);\n\tif !ok {\n\t\treturn nil, false, errtok\n\t}\n\treturn j, true, \"\"\n}\n<|endoftext|>"} {"text":"<commit_before>package client\n\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"io\"\n\t\"time\"\n\n\t\"github.com\/Symantec\/Dominator\/lib\/hash\"\n\t\"github.com\/Symantec\/Dominator\/lib\/log\"\n\t\"github.com\/Symantec\/Dominator\/lib\/objectserver\"\n\t\"github.com\/Symantec\/Dominator\/lib\/srpc\"\n\tproto \"github.com\/Symantec\/Dominator\/proto\/filegenerator\"\n\t\"github.com\/Symantec\/tricorder\/go\/tricorder\"\n\t\"github.com\/Symantec\/tricorder\/go\/tricorder\/units\"\n)\n\nfunc newManager(objSrv objectserver.ObjectServer, logger log.Logger) *Manager {\n\tsourceReconnectChannel := make(chan string)\n\tm := &Manager{\n\t\tsourceMap: make(map[string]*sourceType),\n\t\tobjectServer: objSrv,\n\t\tmachineMap: make(map[string]*machineType),\n\t\taddMachineChannel: make(chan *machineType),\n\t\tremoveMachineChannel: make(chan string),\n\t\tupdateMachineChannel: make(chan *machineType),\n\t\tserverMessageChannel: make(chan *serverMessageType),\n\t\tsourceReconnectChannel: sourceReconnectChannel,\n\t\tobjectWaiters: make(map[hash.Hash][]chan<- hash.Hash),\n\t\tlogger: logger}\n\ttricorder.RegisterMetric(\"filegen\/client\/num-object-waiters\",\n\t\t&m.numObjectWaiters.value, units.None,\n\t\t\"number of goroutines waiting for objects\")\n\tgo m.manage(sourceReconnectChannel)\n\treturn m\n}\n\nfunc (m *Manager) manage(sourceConnectChannel <-chan string) {\n\tfor {\n\t\tselect {\n\t\tcase machine := <-m.addMachineChannel:\n\t\t\tm.addMachine(machine)\n\t\tcase hostname := <-m.removeMachineChannel:\n\t\t\tm.removeMachine(hostname)\n\t\tcase machine := <-m.updateMachineChannel:\n\t\t\tm.updateMachine(machine)\n\t\tcase serverMessage := <-m.serverMessageChannel:\n\t\t\tm.processMessage(serverMessage)\n\t\tcase sourceName := <-sourceConnectChannel:\n\t\t\tm.processSourceConnect(sourceName)\n\t\t}\n\t}\n}\n\nfunc (m *Manager) processMessage(serverMessage *serverMessageType) {\n\tif msg := serverMessage.serverMessage.GetObjectResponse; msg != nil {\n\t\tif _, _, err := m.objectServer.AddObject(\n\t\t\tbytes.NewReader(msg.Data), 0, &msg.Hash); err != nil {\n\t\t\tm.logger.Println(err)\n\t\t} else {\n\t\t\tif waiters, ok := m.objectWaiters[msg.Hash]; ok {\n\t\t\t\tfor _, channel := range waiters {\n\t\t\t\t\tchannel <- msg.Hash\n\t\t\t\t}\n\t\t\t\tdelete(m.objectWaiters, msg.Hash)\n\t\t\t}\n\t\t}\n\t}\n\tif msg := serverMessage.serverMessage.YieldResponse; msg != nil {\n\t\tif machine, ok := m.machineMap[msg.Hostname]; ok {\n\t\t\tm.handleYieldResponse(machine, msg.Files)\n\t\t} \/\/ else machine no longer known. Drop the message.\n\t}\n}\n\nfunc (m *Manager) processSourceConnect(sourceName string) {\n\tsource := m.sourceMap[sourceName]\n\tfor _, machine := range m.machineMap {\n\t\tif pathnames, ok := machine.sourceToPaths[sourceName]; ok {\n\t\t\trequest := &proto.ClientRequest{\n\t\t\t\tYieldRequest: &proto.YieldRequest{machine.machine, pathnames}}\n\t\t\tsource.sendChannel <- request\n\t\t}\n\t}\n}\n\nfunc (m *Manager) getSource(sourceName string) *sourceType {\n\tsource, ok := m.sourceMap[sourceName]\n\tif ok {\n\t\treturn source\n\t}\n\tsource = new(sourceType)\n\tsendChannel := make(chan *proto.ClientRequest, 4096)\n\tsource.sendChannel = sendChannel\n\tm.sourceMap[sourceName] = source\n\tgo manageSource(sourceName, m.sourceReconnectChannel, sendChannel,\n\t\tm.serverMessageChannel, m.logger)\n\treturn source\n}\n\nfunc manageSource(sourceName string, sourceReconnectChannel chan<- string,\n\tclientRequestChannel <-chan *proto.ClientRequest,\n\tserverMessageChannel chan<- *serverMessageType, logger log.Logger) {\n\tcloseNotifyChannel := make(chan struct{})\n\tinitialRetryTimeout := time.Millisecond * 100\n\tretryTimeout := initialRetryTimeout\n\treconnect := false\n\tfor ; ; time.Sleep(retryTimeout) {\n\t\tif retryTimeout < time.Minute {\n\t\t\tretryTimeout *= 2\n\t\t}\n\t\tclient, err := srpc.DialHTTP(\"tcp\", sourceName, time.Second*15)\n\t\tif err != nil {\n\t\t\tlogger.Printf(\"error connecting to: %s: %s\\n\", sourceName, err)\n\t\t\tcontinue\n\t\t}\n\t\tconn, err := client.Call(\"FileGenerator.Connect\")\n\t\tif err != nil {\n\t\t\tclient.Close()\n\t\t\tlogger.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tretryTimeout = initialRetryTimeout\n\t\t\/\/ The server keeps the same encoder\/decoder pair over the lifetime of\n\t\t\/\/ the connection, so we must do the same.\n\t\tgo handleServerMessages(sourceName, gob.NewDecoder(conn),\n\t\t\tserverMessageChannel, closeNotifyChannel, logger)\n\t\tif reconnect {\n\t\t\tsourceReconnectChannel <- sourceName\n\t\t} else {\n\t\t\treconnect = true\n\t\t}\n\t\tsendClientRequests(conn, clientRequestChannel, closeNotifyChannel,\n\t\t\tlogger)\n\t\tconn.Close()\n\t\tclient.Close()\n\t}\n}\n\nfunc sendClientRequests(conn *srpc.Conn,\n\tclientRequestChannel <-chan *proto.ClientRequest,\n\tcloseNotifyChannel <-chan struct{}, logger log.Logger) {\n\tencoder := gob.NewEncoder(conn)\n\tfor {\n\t\tselect {\n\t\tcase clientRequest := <-clientRequestChannel:\n\t\t\tif err := encoder.Encode(clientRequest); err != nil {\n\t\t\t\tlogger.Printf(\"error encoding client request: %s\\n\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(clientRequestChannel) < 1 {\n\t\t\t\tif err := conn.Flush(); err != nil {\n\t\t\t\t\tlogger.Printf(\"error flushing: %s\\n\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-closeNotifyChannel:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc handleServerMessages(sourceName string, decoder *gob.Decoder,\n\tserverMessageChannel chan<- *serverMessageType,\n\tcloseNotifyChannel chan<- struct{}, logger log.Logger) {\n\tfor {\n\t\tvar message proto.ServerMessage\n\t\tif err := decoder.Decode(&message); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tlogger.Printf(\"connection to source: %s closed\\n\", sourceName)\n\t\t\t} else {\n\t\t\t\tlogger.Println(err)\n\t\t\t}\n\t\t\tcloseNotifyChannel <- struct{}{}\n\t\t\treturn\n\t\t}\n\t\tserverMessageChannel <- &serverMessageType{sourceName, message}\n\t}\n}\n<commit_msg>Switch lib\/filegen\/client to use connection Decode and Encode methods.<commit_after>package client\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"time\"\n\n\t\"github.com\/Symantec\/Dominator\/lib\/hash\"\n\t\"github.com\/Symantec\/Dominator\/lib\/log\"\n\t\"github.com\/Symantec\/Dominator\/lib\/objectserver\"\n\t\"github.com\/Symantec\/Dominator\/lib\/srpc\"\n\tproto \"github.com\/Symantec\/Dominator\/proto\/filegenerator\"\n\t\"github.com\/Symantec\/tricorder\/go\/tricorder\"\n\t\"github.com\/Symantec\/tricorder\/go\/tricorder\/units\"\n)\n\nfunc newManager(objSrv objectserver.ObjectServer, logger log.Logger) *Manager {\n\tsourceReconnectChannel := make(chan string)\n\tm := &Manager{\n\t\tsourceMap: make(map[string]*sourceType),\n\t\tobjectServer: objSrv,\n\t\tmachineMap: make(map[string]*machineType),\n\t\taddMachineChannel: make(chan *machineType),\n\t\tremoveMachineChannel: make(chan string),\n\t\tupdateMachineChannel: make(chan *machineType),\n\t\tserverMessageChannel: make(chan *serverMessageType),\n\t\tsourceReconnectChannel: sourceReconnectChannel,\n\t\tobjectWaiters: make(map[hash.Hash][]chan<- hash.Hash),\n\t\tlogger: logger}\n\ttricorder.RegisterMetric(\"filegen\/client\/num-object-waiters\",\n\t\t&m.numObjectWaiters.value, units.None,\n\t\t\"number of goroutines waiting for objects\")\n\tgo m.manage(sourceReconnectChannel)\n\treturn m\n}\n\nfunc (m *Manager) manage(sourceConnectChannel <-chan string) {\n\tfor {\n\t\tselect {\n\t\tcase machine := <-m.addMachineChannel:\n\t\t\tm.addMachine(machine)\n\t\tcase hostname := <-m.removeMachineChannel:\n\t\t\tm.removeMachine(hostname)\n\t\tcase machine := <-m.updateMachineChannel:\n\t\t\tm.updateMachine(machine)\n\t\tcase serverMessage := <-m.serverMessageChannel:\n\t\t\tm.processMessage(serverMessage)\n\t\tcase sourceName := <-sourceConnectChannel:\n\t\t\tm.processSourceConnect(sourceName)\n\t\t}\n\t}\n}\n\nfunc (m *Manager) processMessage(serverMessage *serverMessageType) {\n\tif msg := serverMessage.serverMessage.GetObjectResponse; msg != nil {\n\t\tif _, _, err := m.objectServer.AddObject(\n\t\t\tbytes.NewReader(msg.Data), 0, &msg.Hash); err != nil {\n\t\t\tm.logger.Println(err)\n\t\t} else {\n\t\t\tif waiters, ok := m.objectWaiters[msg.Hash]; ok {\n\t\t\t\tfor _, channel := range waiters {\n\t\t\t\t\tchannel <- msg.Hash\n\t\t\t\t}\n\t\t\t\tdelete(m.objectWaiters, msg.Hash)\n\t\t\t}\n\t\t}\n\t}\n\tif msg := serverMessage.serverMessage.YieldResponse; msg != nil {\n\t\tif machine, ok := m.machineMap[msg.Hostname]; ok {\n\t\t\tm.handleYieldResponse(machine, msg.Files)\n\t\t} \/\/ else machine no longer known. Drop the message.\n\t}\n}\n\nfunc (m *Manager) processSourceConnect(sourceName string) {\n\tsource := m.sourceMap[sourceName]\n\tfor _, machine := range m.machineMap {\n\t\tif pathnames, ok := machine.sourceToPaths[sourceName]; ok {\n\t\t\trequest := &proto.ClientRequest{\n\t\t\t\tYieldRequest: &proto.YieldRequest{machine.machine, pathnames}}\n\t\t\tsource.sendChannel <- request\n\t\t}\n\t}\n}\n\nfunc (m *Manager) getSource(sourceName string) *sourceType {\n\tsource, ok := m.sourceMap[sourceName]\n\tif ok {\n\t\treturn source\n\t}\n\tsource = new(sourceType)\n\tsendChannel := make(chan *proto.ClientRequest, 4096)\n\tsource.sendChannel = sendChannel\n\tm.sourceMap[sourceName] = source\n\tgo manageSource(sourceName, m.sourceReconnectChannel, sendChannel,\n\t\tm.serverMessageChannel, m.logger)\n\treturn source\n}\n\nfunc manageSource(sourceName string, sourceReconnectChannel chan<- string,\n\tclientRequestChannel <-chan *proto.ClientRequest,\n\tserverMessageChannel chan<- *serverMessageType, logger log.Logger) {\n\tcloseNotifyChannel := make(chan struct{})\n\tinitialRetryTimeout := time.Millisecond * 100\n\tretryTimeout := initialRetryTimeout\n\treconnect := false\n\tfor ; ; time.Sleep(retryTimeout) {\n\t\tif retryTimeout < time.Minute {\n\t\t\tretryTimeout *= 2\n\t\t}\n\t\tclient, err := srpc.DialHTTP(\"tcp\", sourceName, time.Second*15)\n\t\tif err != nil {\n\t\t\tlogger.Printf(\"error connecting to: %s: %s\\n\", sourceName, err)\n\t\t\tcontinue\n\t\t}\n\t\tconn, err := client.Call(\"FileGenerator.Connect\")\n\t\tif err != nil {\n\t\t\tclient.Close()\n\t\t\tlogger.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tretryTimeout = initialRetryTimeout\n\t\tgo handleServerMessages(sourceName, conn, serverMessageChannel,\n\t\t\tcloseNotifyChannel, logger)\n\t\tif reconnect {\n\t\t\tsourceReconnectChannel <- sourceName\n\t\t} else {\n\t\t\treconnect = true\n\t\t}\n\t\tsendClientRequests(conn, clientRequestChannel, closeNotifyChannel,\n\t\t\tlogger)\n\t\tconn.Close()\n\t\tclient.Close()\n\t}\n}\n\nfunc sendClientRequests(conn *srpc.Conn,\n\tclientRequestChannel <-chan *proto.ClientRequest,\n\tcloseNotifyChannel <-chan struct{}, logger log.Logger) {\n\tfor {\n\t\tselect {\n\t\tcase clientRequest := <-clientRequestChannel:\n\t\t\tif err := conn.Encode(clientRequest); err != nil {\n\t\t\t\tlogger.Printf(\"error encoding client request: %s\\n\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(clientRequestChannel) < 1 {\n\t\t\t\tif err := conn.Flush(); err != nil {\n\t\t\t\t\tlogger.Printf(\"error flushing: %s\\n\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-closeNotifyChannel:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc handleServerMessages(sourceName string, decoder srpc.Decoder,\n\tserverMessageChannel chan<- *serverMessageType,\n\tcloseNotifyChannel chan<- struct{}, logger log.Logger) {\n\tfor {\n\t\tvar message proto.ServerMessage\n\t\tif err := decoder.Decode(&message); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tlogger.Printf(\"connection to source: %s closed\\n\", sourceName)\n\t\t\t} else {\n\t\t\t\tlogger.Println(err)\n\t\t\t}\n\t\t\tcloseNotifyChannel <- struct{}{}\n\t\t\treturn\n\t\t}\n\t\tserverMessageChannel <- &serverMessageType{sourceName, message}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package fragbag\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/TuftsBCB\/seq\"\n\t\"github.com\/TuftsBCB\/structure\"\n)\n\ntype Library interface {\n\t\/\/ Save should save the fragment library to the writer provided.\n\t\/\/ No particular format is specified by the public API.\n\tSave(w io.Writer) error\n\n\t\/\/ Size returns the number of fragments in the library.\n\tSize() int\n\n\t\/\/ FragmentSize returns the size of every fragment in the library.\n\t\/\/ All fragments in a library must have the same size.\n\tFragmentSize() int\n\n\t\/\/ String returns a custom string representation of the library.\n\t\/\/ This may be anything.\n\tString() string\n\n\t\/\/ Name returns a canonical name for this fragment library.\n\tName() string\n}\n\ntype StructureLibrary interface {\n\tLibrary\n\tBest([]structure.Coords) int\n\tFragment(i int) StructureFragment\n}\n\ntype StructureFragment interface {\n\tNumber() int\n\tString() string\n\tAtoms() []structure.Coords\n}\n\ntype SequenceLibrary interface {\n\tLibrary\n\tBest(seq.Sequence) int\n\tFragment(i int) SequenceFragment\n}\n\ntype SequenceFragment interface {\n\tNumber() int\n\tString() string\n\tAlignmentProb(seq.Sequence) seq.Prob\n}\n\ntype libKind string\n\nconst (\n\tkindStructureAtoms libKind = \"structure-atoms\"\n\tkindSequenceProfile = \"sequence-profile\"\n\tkindSequenceHMM = \"sequence-hmm\"\n)\n\nfunc (k libKind) String() string {\n\treturn string(k)\n}\n\ntype jsonLibrary struct {\n\tKind libKind\n\tLibrary json.RawMessage\n}\n\n\/\/ Open reads a library from the reader provided. If there is a problem\n\/\/ reading or parsing the data as a library, an error is returned.\n\/\/ If no error is returned, the Library returned is guarnateed to be one\n\/\/ of the following types of fragment libraries:\n\/\/ StructureLibrary, SequenceLibrary.\nfunc Open(r io.Reader) (Library, error) {\n\tvar jsonlib jsonLibrary\n\tdec := json.NewDecoder(r)\n\tif err := dec.Decode(&jsonlib); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbytesr := bytes.NewReader(jsonlib.Library)\n\tswitch jsonlib.Kind {\n\tcase kindStructureAtoms:\n\t\treturn openStructureAtoms(bytesr)\n\tcase kindSequenceProfile:\n\t\treturn openSequenceProfile(bytesr)\n\tcase kindSequenceHMM:\n\t\treturn openSequenceHMM(bytesr)\n\t}\n\treturn nil, fmt.Errorf(\"Unknown fragment library type: %s\", jsonlib.Kind)\n}\n\n\/\/ saveLibrary writes any library with the given kind as JSON data to w.\nfunc saveLibrary(w io.Writer, kind libKind, library interface{}) error {\n\tjsonlib := map[string]interface{}{\n\t\t\"Kind\": kind,\n\t\t\"Library\": library,\n\t}\n\treturn niceJson(w, jsonlib)\n}\n\n\/\/ niceJson is a convenience function for encoding a JSON value and writing\n\/\/ it with `json.Indent`.\nfunc niceJson(w io.Writer, v interface{}) error {\n\traw, dst := new(bytes.Buffer), new(bytes.Buffer)\n\n\tenc := json.NewEncoder(raw)\n\tif err := enc.Encode(v); err != nil {\n\t\treturn err\n\t}\n\tif err := json.Indent(dst, raw.Bytes(), \"\", \"\\t\"); err != nil {\n\t\treturn err\n\t}\n\t_, err := io.Copy(w, dst)\n\treturn err\n}\n<commit_msg>Convenient functions to tell whether a fragment library is sequence or structure based. Probably not a good idea. (Why use these when you have type switching?)<commit_after>package fragbag\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/TuftsBCB\/seq\"\n\t\"github.com\/TuftsBCB\/structure\"\n)\n\ntype Library interface {\n\t\/\/ Save should save the fragment library to the writer provided.\n\t\/\/ No particular format is specified by the public API.\n\tSave(w io.Writer) error\n\n\t\/\/ Size returns the number of fragments in the library.\n\tSize() int\n\n\t\/\/ FragmentSize returns the size of every fragment in the library.\n\t\/\/ All fragments in a library must have the same size.\n\tFragmentSize() int\n\n\t\/\/ String returns a custom string representation of the library.\n\t\/\/ This may be anything.\n\tString() string\n\n\t\/\/ Name returns a canonical name for this fragment library.\n\tName() string\n}\n\ntype StructureLibrary interface {\n\tLibrary\n\tBest([]structure.Coords) int\n\tFragment(i int) StructureFragment\n}\n\ntype StructureFragment interface {\n\tNumber() int\n\tString() string\n\tAtoms() []structure.Coords\n}\n\ntype SequenceLibrary interface {\n\tLibrary\n\tBest(seq.Sequence) int\n\tFragment(i int) SequenceFragment\n}\n\ntype SequenceFragment interface {\n\tNumber() int\n\tString() string\n\tAlignmentProb(seq.Sequence) seq.Prob\n}\n\ntype libKind string\n\nconst (\n\tkindStructureAtoms libKind = \"structure-atoms\"\n\tkindSequenceProfile = \"sequence-profile\"\n\tkindSequenceHMM = \"sequence-hmm\"\n)\n\nfunc (k libKind) String() string {\n\treturn string(k)\n}\n\ntype jsonLibrary struct {\n\tKind libKind\n\tLibrary json.RawMessage\n}\n\n\/\/ Open reads a library from the reader provided. If there is a problem\n\/\/ reading or parsing the data as a library, an error is returned.\n\/\/ If no error is returned, the Library returned is guarnateed to be one\n\/\/ of the following types of fragment libraries:\n\/\/ StructureLibrary, SequenceLibrary.\nfunc Open(r io.Reader) (Library, error) {\n\tvar jsonlib jsonLibrary\n\tdec := json.NewDecoder(r)\n\tif err := dec.Decode(&jsonlib); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbytesr := bytes.NewReader(jsonlib.Library)\n\tswitch jsonlib.Kind {\n\tcase kindStructureAtoms:\n\t\treturn openStructureAtoms(bytesr)\n\tcase kindSequenceProfile:\n\t\treturn openSequenceProfile(bytesr)\n\tcase kindSequenceHMM:\n\t\treturn openSequenceHMM(bytesr)\n\t}\n\treturn nil, fmt.Errorf(\"Unknown fragment library type: %s\", jsonlib.Kind)\n}\n\n\/\/ IsSequence returns true if the given library is a sequence fragment library.\n\/\/ Returns false otherwise.\nfunc IsSequence(lib Library) bool {\n\t_, ok := lib.(SequenceLibrary)\n\treturn ok\n}\n\n\/\/ IsStructure returns true if the given library is a structure fragment\n\/\/ library. Returns false otherwise.\nfunc IsStructure(lib Library) bool {\n\t_, ok := lib.(StructureLibrary)\n\treturn ok\n}\n\n\/\/ saveLibrary writes any library with the given kind as JSON data to w.\nfunc saveLibrary(w io.Writer, kind libKind, library interface{}) error {\n\tjsonlib := map[string]interface{}{\n\t\t\"Kind\": kind,\n\t\t\"Library\": library,\n\t}\n\treturn niceJson(w, jsonlib)\n}\n\n\/\/ niceJson is a convenience function for encoding a JSON value and writing\n\/\/ it with `json.Indent`.\nfunc niceJson(w io.Writer, v interface{}) error {\n\traw, dst := new(bytes.Buffer), new(bytes.Buffer)\n\n\tenc := json.NewEncoder(raw)\n\tif err := enc.Encode(v); err != nil {\n\t\treturn err\n\t}\n\tif err := json.Indent(dst, raw.Bytes(), \"\", \"\\t\"); err != nil {\n\t\treturn err\n\t}\n\t_, err := io.Copy(w, dst)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/Symantec\/Dominator\/lib\/filesystem\"\n\t\"github.com\/Symantec\/Dominator\/lib\/format\"\n\t\"github.com\/Symantec\/Dominator\/lib\/hash\"\n\t\"github.com\/Symantec\/Dominator\/lib\/log\"\n\t\"github.com\/Symantec\/Dominator\/lib\/objectserver\"\n)\n\nconst (\n\tdirPerms = syscall.S_IRWXU\n\tfilePerms = syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IRGRP\n)\n\nfunc createFile(filename string) error {\n\tif file, err := os.Create(filename); err != nil {\n\t\treturn err\n\t} else {\n\t\t\/\/ Don't wait for finaliser to close, otherwise we can have too many\n\t\t\/\/ open files.\n\t\tfile.Close()\n\t\treturn nil\n\t}\n}\n\nfunc unpack(fs *filesystem.FileSystem, objectsGetter objectserver.ObjectsGetter,\n\tdirname string, logger log.Logger) error {\n\tfor _, entry := range fs.EntryList {\n\t\tif entry.Name == \".inodes\" {\n\t\t\treturn errors.New(\"cannot unpack a file-system with \/.inodes\")\n\t\t}\n\t}\n\tos.Mkdir(dirname, dirPerms)\n\tinodesDir := path.Join(dirname, \".inodes\")\n\tif err := os.Mkdir(inodesDir, dirPerms); err != nil {\n\t\treturn err\n\t}\n\tdefer os.RemoveAll(inodesDir)\n\tvar statfs syscall.Statfs_t\n\tif err := syscall.Statfs(inodesDir, &statfs); err != nil {\n\t\treturn errors.New(fmt.Sprintf(\"Unable to Statfs: %s %s\\n\",\n\t\t\tinodesDir, err))\n\t}\n\tif fs.TotalDataBytes > uint64(statfs.Bsize)*statfs.Bfree {\n\t\treturn errors.New(\"image will not fit on file-system\")\n\t}\n\thashes, inums, lengths := getHashes(fs)\n\terr := writeObjects(objectsGetter, hashes, inums, lengths, inodesDir,\n\t\tlogger)\n\tif err != nil {\n\t\treturn err\n\t}\n\tstartWriteTime := time.Now()\n\tif err := writeInodes(fs.InodeTable, inodesDir); err != nil {\n\t\treturn err\n\t}\n\tif err = fs.DirectoryInode.Write(dirname); err != nil {\n\t\treturn err\n\t}\n\tstartBuildTime := time.Now()\n\twriteDuration := startBuildTime.Sub(startWriteTime)\n\terr = buildTree(&fs.DirectoryInode, dirname, \"\", inodesDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuildDuration := time.Since(startBuildTime)\n\tlogger.Printf(\"Unpacked file-system: made inodes in %s, built tree in %s\\n\",\n\t\tformat.Duration(writeDuration), format.Duration(buildDuration))\n\treturn nil\n}\n\nfunc getHashes(fs *filesystem.FileSystem) ([]hash.Hash, []uint64, []uint64) {\n\thashes := make([]hash.Hash, 0, fs.NumRegularInodes)\n\tinums := make([]uint64, 0, fs.NumRegularInodes)\n\tlengths := make([]uint64, 0, fs.NumRegularInodes)\n\tfor inum, inode := range fs.InodeTable {\n\t\tif inode, ok := inode.(*filesystem.RegularInode); ok {\n\t\t\tif inode.Size > 0 {\n\t\t\t\thashes = append(hashes, inode.Hash)\n\t\t\t\tinums = append(inums, inum)\n\t\t\t\tlengths = append(lengths, inode.Size)\n\t\t\t}\n\t\t}\n\t}\n\treturn hashes, inums, lengths\n}\n\nfunc writeObjects(objectsGetter objectserver.ObjectsGetter, hashes []hash.Hash,\n\tinums []uint64, lengths []uint64, inodesDir string,\n\tlogger log.Logger) error {\n\tstartTime := time.Now()\n\tobjectsReader, err := objectsGetter.GetObjects(hashes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting object reader: %s\\n\", err)\n\t}\n\tdefer objectsReader.Close()\n\tvar totalLength uint64\n\tbuffer := make([]byte, 32<<10)\n\tfor index := range hashes {\n\t\terr = writeObject(objectsReader, inums[index], lengths[index],\n\t\t\tinodesDir, buffer)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttotalLength += lengths[index]\n\t}\n\tduration := time.Since(startTime)\n\tspeed := uint64(float64(totalLength) \/ duration.Seconds())\n\tlogger.Printf(\"Copied %d objects (%s) in %s (%s\/s)\\n\",\n\t\tlen(hashes), format.FormatBytes(totalLength), format.Duration(duration),\n\t\tformat.FormatBytes(speed))\n\treturn nil\n}\n\nfunc writeObject(objectsReader objectserver.ObjectsReader, inodeNumber uint64,\n\tlength uint64, inodesDir string, buffer []byte) error {\n\trlength, reader, err := objectsReader.NextObject()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer reader.Close()\n\tif rlength != length {\n\t\treturn errors.New(\"mismatched lengths\")\n\t}\n\tfilename := path.Join(inodesDir, fmt.Sprintf(\"%d\", inodeNumber))\n\tdestFile, err := os.OpenFile(filename,\n\t\tos.O_CREATE|os.O_TRUNC|os.O_WRONLY, filePerms)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdoClose := true\n\tdefer func() {\n\t\tif doClose {\n\t\t\tdestFile.Close()\n\t\t}\n\t}()\n\tiLength := int64(length)\n\tnCopied, err := io.CopyBuffer(destFile, io.LimitReader(reader, iLength),\n\t\tbuffer)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error copying: %s\", err)\n\t}\n\tif nCopied != iLength {\n\t\treturn fmt.Errorf(\"expected length: %d, got: %d for: %s\\n\",\n\t\t\tlength, nCopied, filename)\n\t}\n\tdoClose = false\n\treturn destFile.Close()\n}\n\nfunc writeInode(inode filesystem.GenericInode, filename string) error {\n\tswitch inode := inode.(type) {\n\tcase *filesystem.RegularInode:\n\t\tif inode.Size < 1 {\n\t\t\tif err := createFile(filename); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif err := inode.WriteMetadata(filename); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *filesystem.ComputedRegularInode:\n\t\tif err := createFile(filename); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttmpInode := &filesystem.RegularInode{\n\t\t\tMode: inode.Mode,\n\t\t\tUid: inode.Uid,\n\t\t\tGid: inode.Gid,\n\t\t}\n\t\tif err := tmpInode.WriteMetadata(filename); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *filesystem.SymlinkInode:\n\t\tif err := inode.Write(filename); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *filesystem.SpecialInode:\n\t\tif err := inode.Write(filename); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *filesystem.DirectoryInode:\n\t\tif err := inode.Write(filename); err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\treturn errors.New(\"unsupported inode type\")\n\t}\n\treturn nil\n}\n\nfunc writeInodes(inodeTable filesystem.InodeTable, inodesDir string) error {\n\tfor inodeNumber, inode := range inodeTable {\n\t\tfilename := path.Join(inodesDir, fmt.Sprintf(\"%d\", inodeNumber))\n\t\tif err := writeInode(inode, filename); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc buildTree(directory *filesystem.DirectoryInode,\n\trootDir, mySubPathName, inodesDir string) error {\n\tfor _, dirent := range directory.EntryList {\n\t\toldPath := path.Join(inodesDir, fmt.Sprintf(\"%d\", dirent.InodeNumber))\n\t\tnewSubPath := path.Join(mySubPathName, dirent.Name)\n\t\tnewFullPath := path.Join(rootDir, newSubPath)\n\t\tif inode := dirent.Inode(); inode == nil {\n\t\t\tpanic(\"no inode pointer for: \" + newSubPath)\n\t\t} else if dinode, ok := inode.(*filesystem.DirectoryInode); ok {\n\t\t\tif err := renameDir(oldPath, newFullPath, dinode); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr := buildTree(dinode, rootDir, newSubPath, inodesDir)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tif err := link(oldPath, newFullPath, inode); err != nil {\n\t\t\t\tif !os.IsNotExist(err) {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc link(oldname, newname string, inode filesystem.GenericInode) error {\n\tif err := os.Link(oldname, newname); err == nil {\n\t\treturn nil\n\t}\n\tif finode, ok := inode.(*filesystem.RegularInode); ok {\n\t\tif finode.Size > 0 {\n\t\t\treader, err := os.Open(oldname)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter, err := os.Create(newname)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := io.Copy(writer, reader); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tif err := writeInode(inode, newname); err != nil {\n\t\treturn err\n\t}\n\tif err := os.Remove(oldname); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc renameDir(oldpath, newpath string,\n\tinode *filesystem.DirectoryInode) error {\n\tif err := os.Rename(oldpath, newpath); err == nil {\n\t\treturn nil\n\t}\n\tif oldFi, err := os.Lstat(oldpath); err != nil {\n\t\treturn err\n\t} else {\n\t\tif !oldFi.IsDir() {\n\t\t\treturn fmt.Errorf(\"%s is not a directory\", oldpath)\n\t\t}\n\t}\n\tif newFi, err := os.Lstat(newpath); err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\t\tif err := inode.Write(newpath); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tif !newFi.IsDir() {\n\t\t\treturn fmt.Errorf(\"%s is not a directory\", newpath)\n\t\t}\n\t\tif err := inode.WriteMetadata(newpath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := os.Remove(oldpath); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<commit_msg>Fix file leak in lib\/filesystem\/util.link() function.<commit_after>package util\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/Symantec\/Dominator\/lib\/filesystem\"\n\t\"github.com\/Symantec\/Dominator\/lib\/format\"\n\t\"github.com\/Symantec\/Dominator\/lib\/hash\"\n\t\"github.com\/Symantec\/Dominator\/lib\/log\"\n\t\"github.com\/Symantec\/Dominator\/lib\/objectserver\"\n)\n\nconst (\n\tdirPerms = syscall.S_IRWXU\n\tfilePerms = syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IRGRP\n)\n\nfunc createEmptyFile(filename string) error {\n\tif file, err := os.Create(filename); err != nil {\n\t\treturn err\n\t} else {\n\t\t\/\/ Don't wait for finaliser to close, otherwise we can have too many\n\t\t\/\/ open files.\n\t\tfile.Close()\n\t\treturn nil\n\t}\n}\n\nfunc unpack(fs *filesystem.FileSystem, objectsGetter objectserver.ObjectsGetter,\n\tdirname string, logger log.Logger) error {\n\tfor _, entry := range fs.EntryList {\n\t\tif entry.Name == \".inodes\" {\n\t\t\treturn errors.New(\"cannot unpack a file-system with \/.inodes\")\n\t\t}\n\t}\n\tos.Mkdir(dirname, dirPerms)\n\tinodesDir := path.Join(dirname, \".inodes\")\n\tif err := os.Mkdir(inodesDir, dirPerms); err != nil {\n\t\treturn err\n\t}\n\tdefer os.RemoveAll(inodesDir)\n\tvar statfs syscall.Statfs_t\n\tif err := syscall.Statfs(inodesDir, &statfs); err != nil {\n\t\treturn errors.New(fmt.Sprintf(\"Unable to Statfs: %s %s\\n\",\n\t\t\tinodesDir, err))\n\t}\n\tif fs.TotalDataBytes > uint64(statfs.Bsize)*statfs.Bfree {\n\t\treturn errors.New(\"image will not fit on file-system\")\n\t}\n\thashes, inums, lengths := getHashes(fs)\n\terr := writeObjects(objectsGetter, hashes, inums, lengths, inodesDir,\n\t\tlogger)\n\tif err != nil {\n\t\treturn err\n\t}\n\tstartWriteTime := time.Now()\n\tif err := writeInodes(fs.InodeTable, inodesDir); err != nil {\n\t\treturn err\n\t}\n\tif err = fs.DirectoryInode.Write(dirname); err != nil {\n\t\treturn err\n\t}\n\tstartBuildTime := time.Now()\n\twriteDuration := startBuildTime.Sub(startWriteTime)\n\terr = buildTree(&fs.DirectoryInode, dirname, \"\", inodesDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuildDuration := time.Since(startBuildTime)\n\tlogger.Printf(\"Unpacked file-system: made inodes in %s, built tree in %s\\n\",\n\t\tformat.Duration(writeDuration), format.Duration(buildDuration))\n\treturn nil\n}\n\nfunc getHashes(fs *filesystem.FileSystem) ([]hash.Hash, []uint64, []uint64) {\n\thashes := make([]hash.Hash, 0, fs.NumRegularInodes)\n\tinums := make([]uint64, 0, fs.NumRegularInodes)\n\tlengths := make([]uint64, 0, fs.NumRegularInodes)\n\tfor inum, inode := range fs.InodeTable {\n\t\tif inode, ok := inode.(*filesystem.RegularInode); ok {\n\t\t\tif inode.Size > 0 {\n\t\t\t\thashes = append(hashes, inode.Hash)\n\t\t\t\tinums = append(inums, inum)\n\t\t\t\tlengths = append(lengths, inode.Size)\n\t\t\t}\n\t\t}\n\t}\n\treturn hashes, inums, lengths\n}\n\nfunc writeObjects(objectsGetter objectserver.ObjectsGetter, hashes []hash.Hash,\n\tinums []uint64, lengths []uint64, inodesDir string,\n\tlogger log.Logger) error {\n\tstartTime := time.Now()\n\tobjectsReader, err := objectsGetter.GetObjects(hashes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting object reader: %s\\n\", err)\n\t}\n\tdefer objectsReader.Close()\n\tvar totalLength uint64\n\tbuffer := make([]byte, 32<<10)\n\tfor index := range hashes {\n\t\terr = writeObject(objectsReader, inums[index], lengths[index],\n\t\t\tinodesDir, buffer)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttotalLength += lengths[index]\n\t}\n\tduration := time.Since(startTime)\n\tspeed := uint64(float64(totalLength) \/ duration.Seconds())\n\tlogger.Printf(\"Copied %d objects (%s) in %s (%s\/s)\\n\",\n\t\tlen(hashes), format.FormatBytes(totalLength), format.Duration(duration),\n\t\tformat.FormatBytes(speed))\n\treturn nil\n}\n\nfunc writeObject(objectsReader objectserver.ObjectsReader, inodeNumber uint64,\n\tlength uint64, inodesDir string, buffer []byte) error {\n\trlength, reader, err := objectsReader.NextObject()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer reader.Close()\n\tif rlength != length {\n\t\treturn errors.New(\"mismatched lengths\")\n\t}\n\tfilename := path.Join(inodesDir, fmt.Sprintf(\"%d\", inodeNumber))\n\tdestFile, err := os.OpenFile(filename,\n\t\tos.O_CREATE|os.O_TRUNC|os.O_WRONLY, filePerms)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdoClose := true\n\tdefer func() {\n\t\tif doClose {\n\t\t\tdestFile.Close()\n\t\t}\n\t}()\n\tiLength := int64(length)\n\tnCopied, err := io.CopyBuffer(destFile, io.LimitReader(reader, iLength),\n\t\tbuffer)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error copying: %s\", err)\n\t}\n\tif nCopied != iLength {\n\t\treturn fmt.Errorf(\"expected length: %d, got: %d for: %s\\n\",\n\t\t\tlength, nCopied, filename)\n\t}\n\tdoClose = false\n\treturn destFile.Close()\n}\n\nfunc writeInode(inode filesystem.GenericInode, filename string) error {\n\tswitch inode := inode.(type) {\n\tcase *filesystem.RegularInode:\n\t\tif inode.Size < 1 {\n\t\t\tif err := createEmptyFile(filename); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif err := inode.WriteMetadata(filename); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *filesystem.ComputedRegularInode:\n\t\tif err := createEmptyFile(filename); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttmpInode := &filesystem.RegularInode{\n\t\t\tMode: inode.Mode,\n\t\t\tUid: inode.Uid,\n\t\t\tGid: inode.Gid,\n\t\t}\n\t\tif err := tmpInode.WriteMetadata(filename); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *filesystem.SymlinkInode:\n\t\tif err := inode.Write(filename); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *filesystem.SpecialInode:\n\t\tif err := inode.Write(filename); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *filesystem.DirectoryInode:\n\t\tif err := inode.Write(filename); err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\treturn errors.New(\"unsupported inode type\")\n\t}\n\treturn nil\n}\n\nfunc writeInodes(inodeTable filesystem.InodeTable, inodesDir string) error {\n\tfor inodeNumber, inode := range inodeTable {\n\t\tfilename := path.Join(inodesDir, fmt.Sprintf(\"%d\", inodeNumber))\n\t\tif err := writeInode(inode, filename); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc buildTree(directory *filesystem.DirectoryInode,\n\trootDir, mySubPathName, inodesDir string) error {\n\tfor _, dirent := range directory.EntryList {\n\t\toldPath := path.Join(inodesDir, fmt.Sprintf(\"%d\", dirent.InodeNumber))\n\t\tnewSubPath := path.Join(mySubPathName, dirent.Name)\n\t\tnewFullPath := path.Join(rootDir, newSubPath)\n\t\tif inode := dirent.Inode(); inode == nil {\n\t\t\tpanic(\"no inode pointer for: \" + newSubPath)\n\t\t} else if dinode, ok := inode.(*filesystem.DirectoryInode); ok {\n\t\t\tif err := renameDir(oldPath, newFullPath, dinode); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr := buildTree(dinode, rootDir, newSubPath, inodesDir)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tif err := link(oldPath, newFullPath, inode); err != nil {\n\t\t\t\tif !os.IsNotExist(err) {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc link(oldname, newname string, inode filesystem.GenericInode) error {\n\tif err := os.Link(oldname, newname); err == nil {\n\t\treturn nil\n\t}\n\tif finode, ok := inode.(*filesystem.RegularInode); ok {\n\t\tif finode.Size > 0 {\n\t\t\treader, err := os.Open(oldname)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer reader.Close()\n\t\t\twriter, err := os.Create(newname)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer writer.Close()\n\t\t\tif _, err := io.Copy(writer, reader); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tif err := writeInode(inode, newname); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc renameDir(oldpath, newpath string,\n\tinode *filesystem.DirectoryInode) error {\n\tif err := os.Rename(oldpath, newpath); err == nil {\n\t\treturn nil\n\t}\n\tif oldFi, err := os.Lstat(oldpath); err != nil {\n\t\treturn err\n\t} else {\n\t\tif !oldFi.IsDir() {\n\t\t\treturn fmt.Errorf(\"%s is not a directory\", oldpath)\n\t\t}\n\t}\n\tif newFi, err := os.Lstat(newpath); err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\t\tif err := inode.Write(newpath); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tif !newFi.IsDir() {\n\t\t\treturn fmt.Errorf(\"%s is not a directory\", newpath)\n\t\t}\n\t\tif err := inode.WriteMetadata(newpath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := os.Remove(oldpath); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package flow\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"syscall\"\n\t\"time\"\n)\n\nvar (\n\tpkgPath = reflect.TypeOf(Flow{}).PkgPath()\n\tDefaultDebug = false\n)\n\ntype debugInfo struct {\n\tStack string\n\tInfo string\n}\n\nfunc (d *debugInfo) String() string {\n\treturn d.Stack + \" - \" + d.Info\n}\n\ntype Flow struct {\n\terrChan chan error\n\tstopChan chan struct{}\n\tref *int32\n\twg sync.WaitGroup\n\tParent *Flow\n\tChildren []*Flow\n\tstoped int32\n\texited int32\n\tonClose []func()\n\tid uintptr\n\n\tmutex sync.Mutex\n\tdebug []debugInfo\n\tprinted int32\n}\n\nfunc NewEx(n int) *Flow {\n\tf := &Flow{\n\t\terrChan: make(chan error, 1),\n\t\tstopChan: make(chan struct{}),\n\t\tref: new(int32),\n\t}\n\tf.appendDebug(\"init\")\n\treturn f\n}\n\nfunc New() *Flow {\n\treturn NewEx(0)\n}\n\nfunc (f *Flow) WaitNotify(ch chan struct{}) bool {\n\tselect {\n\tcase <-ch:\n\t\treturn true\n\tcase <-f.IsClose():\n\t\treturn false\n\t}\n}\n\nfunc (f *Flow) MarkExit() bool {\n\treturn atomic.CompareAndSwapInt32(&f.exited, 0, 1)\n}\n\nfunc (f *Flow) IsExit() bool {\n\treturn atomic.LoadInt32(&f.exited) == 1\n}\n\nfunc (f *Flow) printDebug() {\n\tbuf := bytes.NewBuffer(nil)\n\tmaxLength := 0\n\tfor _, d := range f.debug {\n\t\tif maxLength < len(d.Stack) {\n\t\t\tmaxLength = len(d.Stack)\n\t\t}\n\t}\n\tfill := func(a string, n int) string {\n\t\treturn a + strings.Repeat(\" \", n-len(a))\n\t}\n\tbuf.WriteString(\"\\n\")\n\tfor _, d := range f.debug {\n\t\tbuf.WriteString(fmt.Sprint(&f) + \" \")\n\t\tbuf.WriteString(fill(d.Stack, maxLength) + \" - \" + d.Info + \"\\n\")\n\t}\n\tprint(buf.String())\n}\n\nfunc (f *Flow) appendDebug(info string) {\n\tpc, fp, line, _ := runtime.Caller(f.getCaller())\n\tname := runtime.FuncForPC(pc).Name()\n\tstack := fmt.Sprintf(\"%v:%v %v\", path.Base(fp), line, path.Base(name))\n\tf.debug = append(f.debug, debugInfo{stack, info})\n}\n\nfunc (f *Flow) SetOnClose(exit func()) *Flow {\n\tf.onClose = []func(){exit}\n\treturn f\n}\n\nfunc (f *Flow) AddOnClose(exit func()) *Flow {\n\tf.onClose = append(f.onClose, exit)\n\treturn f\n}\n\nconst (\n\tF_CLOSED = true\n\tF_TIMEOUT = false\n)\n\nfunc (f *Flow) Tick(t *time.Ticker) bool {\n\tselect {\n\tcase <-t.C:\n\t\treturn F_TIMEOUT\n\tcase <-f.IsClose():\n\t\treturn F_CLOSED\n\t}\n}\n\nfunc (f *Flow) CloseOrWait(duration time.Duration) bool {\n\tselect {\n\tcase <-time.After(duration):\n\t\treturn F_TIMEOUT\n\tcase <-f.IsClose():\n\t\treturn F_CLOSED\n\t}\n}\n\nfunc (f *Flow) Error(err error) {\n\tf.errChan <- err\n}\n\nfunc (f *Flow) ForkTo(ref **Flow, exit func()) {\n\t*ref = f.Fork(0).AddOnClose(exit)\n}\n\nfunc (f *Flow) Fork(n int) *Flow {\n\tf2 := NewEx(n)\n\tf2.Parent = f\n\t\/\/ TODO(chzyer): test it !\n\tf2.errChan = f.errChan\n\tf.Children = append(f.Children, f2)\n\tf.Add(1) \/\/ for f2\n\treturn f2\n}\n\nfunc (f *Flow) StopAll() {\n\tflow := f\n\tfor flow.Parent != nil {\n\t\tflow = flow.Parent\n\t}\n\tflow.Stop()\n}\n\nfunc (f *Flow) Close() {\n\tf.appendDebug(\"close\")\n\tf.close()\n}\n\nfunc (f *Flow) close() {\n\tf.Stop()\n\tf.wait()\n}\n\nfunc (f *Flow) Stop() {\n\tif !atomic.CompareAndSwapInt32(&f.stoped, 0, 1) {\n\t\treturn\n\t}\n\tf.appendDebug(\"stop\")\n\n\tclose(f.stopChan)\n\tfor _, cf := range f.Children {\n\t\tcf.Stop()\n\t}\n\tif len(f.onClose) > 0 {\n\t\tgo func() {\n\t\t\tfor _, f := range f.onClose {\n\t\t\t\tf()\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (f *Flow) IsClosed() bool {\n\treturn atomic.LoadInt32(&f.stoped) == 1\n}\n\nfunc (f *Flow) IsClose() chan struct{} {\n\treturn f.stopChan\n}\n\nfunc (f *Flow) Add(n int) {\n\tatomic.AddInt32(f.ref, int32(n))\n\tf.appendDebug(fmt.Sprintf(\"add: %v, ref: %v\", n, *f.ref))\n\tf.wg.Add(n)\n}\n\nfunc (f *Flow) getCaller() int {\n\tfor i := 0; ; i++ {\n\t\tpc, _, _, ok := runtime.Caller(i)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tf := runtime.FuncForPC(pc).Name()\n\t\tif !strings.HasPrefix(f, pkgPath) {\n\t\t\treturn i - 1\n\t\t}\n\t}\n\treturn 0\n}\n\nfunc (f *Flow) Done() {\n\tf.wg.Done()\n\tif atomic.AddInt32(f.ref, -1) == 0 {\n\t\tf.Stop()\n\t}\n}\n\nfunc (f *Flow) DoneAndClose() {\n\tf.Done()\n\tf.appendDebug(fmt.Sprintf(\"done and close, ref: %v\", *f.ref))\n\tf.Stop()\n}\n\nfunc (f *Flow) wait() {\n\tf.appendDebug(\"wait\")\n\n\tdone := make(chan struct{})\n\tprinted := int32(0)\n\tif DefaultDebug && atomic.CompareAndSwapInt32(&f.printed, 0, 1) {\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\tcase <-time.After(1000 * time.Millisecond):\n\t\t\t\tf.printDebug()\n\t\t\t\tatomic.StoreInt32(&printed, 1)\n\t\t\t}\n\t\t}()\n\t}\n\t<-f.stopChan\n\tf.wg.Wait()\n\tclose(done)\n\tif atomic.LoadInt32(&printed) == 1 {\n\t\tprintln(fmt.Sprint(&f) + \" - exit\")\n\t}\n\n\tif f.Parent != nil {\n\t\tf.Parent.Done()\n\t\tf.Parent = nil\n\t}\n}\n\nfunc (f *Flow) Wait() error {\n\tsignalChan := make(chan os.Signal)\n\tsignal.Notify(signalChan,\n\t\tos.Interrupt, os.Kill, syscall.SIGTERM, syscall.SIGHUP)\n\tvar err error\n\tselect {\n\tcase <-f.IsClose():\n\t\tf.appendDebug(\"got closed\")\n\tcase <-signalChan:\n\t\tf.appendDebug(\"got signal\")\n\t\tf.Stop()\n\tcase err = <-f.errChan:\n\t\tf.appendDebug(fmt.Sprintf(\"got error: %v\", err))\n\n\t\tif err != nil {\n\t\t\tf.Stop()\n\t\t}\n\t}\n\n\tgo func() {\n\t\t<-signalChan\n\t\t\/\/ force close\n\t\tprintln(\"force close\")\n\t\tos.Exit(1)\n\t}()\n\n\tf.wait()\n\treturn err\n}\n<commit_msg>add GetDebug() []byte<commit_after>package flow\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"syscall\"\n\t\"time\"\n)\n\nvar (\n\tpkgPath = reflect.TypeOf(Flow{}).PkgPath()\n\tDefaultDebug = false\n)\n\ntype debugInfo struct {\n\tStack string\n\tInfo string\n}\n\nfunc (d *debugInfo) String() string {\n\treturn d.Stack + \" - \" + d.Info\n}\n\ntype Flow struct {\n\terrChan chan error\n\tstopChan chan struct{}\n\tref *int32\n\twg sync.WaitGroup\n\tParent *Flow\n\tChildren []*Flow\n\tstoped int32\n\texited int32\n\tonClose []func()\n\tid uintptr\n\n\tmutex sync.Mutex\n\tdebug []debugInfo\n\tprinted int32\n}\n\nfunc NewEx(n int) *Flow {\n\tf := &Flow{\n\t\terrChan: make(chan error, 1),\n\t\tstopChan: make(chan struct{}),\n\t\tref: new(int32),\n\t}\n\tf.appendDebug(\"init\")\n\treturn f\n}\n\nfunc New() *Flow {\n\treturn NewEx(0)\n}\n\nfunc (f *Flow) WaitNotify(ch chan struct{}) bool {\n\tselect {\n\tcase <-ch:\n\t\treturn true\n\tcase <-f.IsClose():\n\t\treturn false\n\t}\n}\n\nfunc (f *Flow) MarkExit() bool {\n\treturn atomic.CompareAndSwapInt32(&f.exited, 0, 1)\n}\n\nfunc (f *Flow) IsExit() bool {\n\treturn atomic.LoadInt32(&f.exited) == 1\n}\n\nfunc (f *Flow) GetDebug() []byte {\n\tbuf := bytes.NewBuffer(nil)\n\tmaxLength := 0\n\tfor _, d := range f.debug {\n\t\tif maxLength < len(d.Stack) {\n\t\t\tmaxLength = len(d.Stack)\n\t\t}\n\t}\n\tfill := func(a string, n int) string {\n\t\treturn a + strings.Repeat(\" \", n-len(a))\n\t}\n\tbuf.WriteString(\"\\n\")\n\tfor _, d := range f.debug {\n\t\tbuf.WriteString(fill(d.Stack, maxLength) + \" - \" + d.Info + \"\\n\")\n\t}\n\treturn buf.Bytes()\n}\n\nfunc (f *Flow) printDebug() {\n\tprintln(string(f.GetDebug()))\n}\n\nfunc (f *Flow) appendDebug(info string) {\n\tpc, fp, line, _ := runtime.Caller(f.getCaller())\n\tname := runtime.FuncForPC(pc).Name()\n\tstack := fmt.Sprintf(\"%v:%v %v\", path.Base(fp), line, path.Base(name))\n\tf.debug = append(f.debug, debugInfo{stack, info})\n}\n\nfunc (f *Flow) SetOnClose(exit func()) *Flow {\n\tf.onClose = []func(){exit}\n\treturn f\n}\n\nfunc (f *Flow) AddOnClose(exit func()) *Flow {\n\tf.onClose = append(f.onClose, exit)\n\treturn f\n}\n\nconst (\n\tF_CLOSED = true\n\tF_TIMEOUT = false\n)\n\nfunc (f *Flow) Tick(t *time.Ticker) bool {\n\tselect {\n\tcase <-t.C:\n\t\treturn F_TIMEOUT\n\tcase <-f.IsClose():\n\t\treturn F_CLOSED\n\t}\n}\n\nfunc (f *Flow) CloseOrWait(duration time.Duration) bool {\n\tselect {\n\tcase <-time.After(duration):\n\t\treturn F_TIMEOUT\n\tcase <-f.IsClose():\n\t\treturn F_CLOSED\n\t}\n}\n\nfunc (f *Flow) Error(err error) {\n\tf.errChan <- err\n}\n\nfunc (f *Flow) ForkTo(ref **Flow, exit func()) {\n\t*ref = f.Fork(0).AddOnClose(exit)\n}\n\nfunc (f *Flow) Fork(n int) *Flow {\n\tf2 := NewEx(n)\n\tf2.Parent = f\n\t\/\/ TODO(chzyer): test it !\n\tf2.errChan = f.errChan\n\tf.Children = append(f.Children, f2)\n\tf.Add(1) \/\/ for f2\n\treturn f2\n}\n\nfunc (f *Flow) StopAll() {\n\tflow := f\n\tfor flow.Parent != nil {\n\t\tflow = flow.Parent\n\t}\n\tflow.Stop()\n}\n\nfunc (f *Flow) Close() {\n\tf.appendDebug(\"close\")\n\tf.close()\n}\n\nfunc (f *Flow) close() {\n\tf.Stop()\n\tf.wait()\n}\n\nfunc (f *Flow) Stop() {\n\tif !atomic.CompareAndSwapInt32(&f.stoped, 0, 1) {\n\t\treturn\n\t}\n\tf.appendDebug(\"stop\")\n\n\tclose(f.stopChan)\n\tfor _, cf := range f.Children {\n\t\tcf.Stop()\n\t}\n\tif len(f.onClose) > 0 {\n\t\tgo func() {\n\t\t\tfor _, f := range f.onClose {\n\t\t\t\tf()\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (f *Flow) IsClosed() bool {\n\treturn atomic.LoadInt32(&f.stoped) == 1\n}\n\nfunc (f *Flow) IsClose() chan struct{} {\n\treturn f.stopChan\n}\n\nfunc (f *Flow) Add(n int) {\n\tatomic.AddInt32(f.ref, int32(n))\n\tf.appendDebug(fmt.Sprintf(\"add: %v, ref: %v\", n, *f.ref))\n\tf.wg.Add(n)\n}\n\nfunc (f *Flow) getCaller() int {\n\tfor i := 0; ; i++ {\n\t\tpc, _, _, ok := runtime.Caller(i)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tf := runtime.FuncForPC(pc).Name()\n\t\tif !strings.HasPrefix(f, pkgPath) {\n\t\t\treturn i - 1\n\t\t}\n\t}\n\treturn 0\n}\n\nfunc (f *Flow) Done() {\n\tf.wg.Done()\n\tif atomic.AddInt32(f.ref, -1) == 0 {\n\t\tf.Stop()\n\t}\n}\n\nfunc (f *Flow) DoneAndClose() {\n\tf.Done()\n\tf.appendDebug(fmt.Sprintf(\"done and close, ref: %v\", *f.ref))\n\tf.Stop()\n}\n\nfunc (f *Flow) wait() {\n\tf.appendDebug(\"wait\")\n\n\tdone := make(chan struct{})\n\tprinted := int32(0)\n\tif DefaultDebug && atomic.CompareAndSwapInt32(&f.printed, 0, 1) {\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\tcase <-time.After(1000 * time.Millisecond):\n\t\t\t\tf.printDebug()\n\t\t\t\tatomic.StoreInt32(&printed, 1)\n\t\t\t}\n\t\t}()\n\t}\n\t<-f.stopChan\n\tf.wg.Wait()\n\tclose(done)\n\tif atomic.LoadInt32(&printed) == 1 {\n\t\tprintln(fmt.Sprint(&f) + \" - exit\")\n\t}\n\n\tif f.Parent != nil {\n\t\tf.Parent.Done()\n\t\tf.Parent = nil\n\t}\n}\n\nfunc (f *Flow) Wait() error {\n\tsignalChan := make(chan os.Signal)\n\tsignal.Notify(signalChan,\n\t\tos.Interrupt, os.Kill, syscall.SIGTERM, syscall.SIGHUP)\n\tvar err error\n\tselect {\n\tcase <-f.IsClose():\n\t\tf.appendDebug(\"got closed\")\n\tcase <-signalChan:\n\t\tf.appendDebug(\"got signal\")\n\t\tf.Stop()\n\tcase err = <-f.errChan:\n\t\tf.appendDebug(fmt.Sprintf(\"got error: %v\", err))\n\n\t\tif err != nil {\n\t\t\tf.Stop()\n\t\t}\n\t}\n\n\tgo func() {\n\t\t<-signalChan\n\t\t\/\/ force close\n\t\tprintln(\"force close\")\n\t\tos.Exit(1)\n\t}()\n\n\tf.wait()\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package form provides an easy to use way to parse form values from an HTTP\n\/\/ request into a struct\npackage form \/\/ import \"vimagination.zapto.org\/form\"\n\nimport (\n\t\"net\/http\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar interType = reflect.TypeOf((*formParser)(nil)).Elem()\n\ntype processorDetails struct {\n\tprocessor\n\tPost, Required bool\n\tIndex []int\n}\n\ntype typeMap map[string]processorDetails\n\nvar (\n\ttmMu sync.RWMutex\n\ttypeMaps = make(map[reflect.Type]typeMap)\n)\n\nfunc getTypeMap(t reflect.Type) typeMap {\n\ttmMu.RLock()\n\ttm, ok := typeMaps[t]\n\ttmMu.RUnlock()\n\tif ok {\n\t\treturn tm\n\t}\n\ttmMu.Lock()\n\ttm = createTypeMap(t)\n\ttmMu.Unlock()\n\treturn tm\n}\n\nfunc basicTypeProcessor(t reflect.Type, tag reflect.StructTag) processor {\n\tswitch t.Kind() {\n\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:\n\t\treturn newInum(tag, t.Bits())\n\tcase reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\treturn newUnum(tag, t.Bits())\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn newFloat(tag, t.Bits())\n\tcase reflect.String:\n\t\treturn newString(tag)\n\tcase reflect.Bool:\n\t\treturn boolean{}\n\t}\n\treturn nil\n}\n\nfunc createTypeMap(t reflect.Type) typeMap {\n\ttm, ok := typeMaps[t]\n\tif ok {\n\t\treturn tm\n\t}\n\ttm = make(typeMap)\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\tif f.PkgPath != \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tname := f.Name\n\t\tvar required, post bool\n\t\tif n := f.Tag.Get(\"form\"); n == \"-\" {\n\t\t\tcontinue\n\t\t} else if n != \"\" {\n\t\t\tp := strings.IndexByte(n, ',')\n\t\t\tif p >= 0 {\n\t\t\t\tif p > 0 {\n\t\t\t\t\tname = n[:p]\n\t\t\t\t}\n\t\t\t\trest := n[p:]\n\t\t\t\trequired = strings.Contains(rest, \",required,\") || strings.HasSuffix(rest, \",required\")\n\t\t\t\tpost = strings.Contains(rest, \",post,\") || strings.HasSuffix(rest, \",post\")\n\t\t\t} else {\n\t\t\t\tname = n\n\t\t\t}\n\t\t}\n\t\tvar p processor\n\t\tif f.Type.Implements(interType) {\n\t\t\tp = inter(false)\n\t\t} else if reflect.PtrTo(f.Type).Implements(interType) {\n\t\t\tp = inter(true)\n\t\t} else if k := f.Type.Kind(); k == reflect.Slice || k == reflect.Ptr {\n\t\t\tet := f.Type.Elem()\n\t\t\ts := basicTypeProcessor(et, f.Tag)\n\t\t\tif s == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif k == reflect.Slice {\n\t\t\t\tp = slice{\n\t\t\t\t\tprocessor: s,\n\t\t\t\t\ttyp: reflect.SliceOf(et),\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tp = pointer{\n\t\t\t\t\tprocessor: s,\n\t\t\t\t\ttyp: et,\n\t\t\t\t}\n\t\t\t}\n\t\t} else if k == reflect.Struct && f.Anonymous {\n\t\t\tfor n, p := range createTypeMap(f.Type) {\n\t\t\t\tif _, ok := tm[n]; !ok {\n\t\t\t\t\ttm[n] = processorDetails{\n\t\t\t\t\t\tprocessor: p.processor,\n\t\t\t\t\t\tRequired: p.Required,\n\t\t\t\t\t\tPost: p.Post,\n\t\t\t\t\t\tIndex: append(append(make([]int, 0, len(p.Index)+1), i), p.Index...),\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t} else {\n\t\t\tp = basicTypeProcessor(f.Type, f.Tag)\n\t\t\tif p == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\ttm[name] = processorDetails{\n\t\t\tprocessor: p,\n\t\t\tRequired: required,\n\t\t\tPost: post,\n\t\t\tIndex: []int{i},\n\t\t}\n\t}\n\ttypeMaps[t] = tm\n\treturn tm\n}\n\n\/\/ Process parses the form data from the request into the passed value, which\n\/\/ must be a pointer to a struct.\n\/\/\n\/\/ Form keys are assumed to be the field names unless a 'form' tag is provided\n\/\/ with an alternate name, for example, in the following struct, the int is\n\/\/ parse with key 'A' and the bool is parsed with key 'C'.\n\/\/\n\/\/ type Example struct {\n\/\/\tA int\n\/\/\tB bool `form:\"C\"`\n\/\/ }\n\/\/\n\/\/ Two options can be added to the form tag to modify the processing. The\n\/\/ 'post' option forces the processer to parse a value from the PostForm field\n\/\/ of the Request, and the 'required' option will have an error thrown if the\n\/\/ key in not set.\n\/\/\n\/\/ Number types can also have minimums and maximums checked during processing\n\/\/ by setting the 'min' and 'max' tags accordingly.\n\/\/\n\/\/ In a similar vein, string types can utilise the 'regex' tag to set a\n\/\/ regular expression to be matched against.\n\/\/\n\/\/ Lastly, a custom data processor can be specified by attaching a method to\n\/\/ the field type with the following specification:\n\/\/\n\/\/ ParseForm([]string) error\nfunc Process(r *http.Request, fv interface{}) error {\n\tv := reflect.ValueOf(fv)\n\tif v.Kind() != reflect.Ptr {\n\t\treturn ErrNeedPointer\n\t}\n\tfor v.Kind() == reflect.Ptr {\n\t\tv = v.Elem()\n\t}\n\tif v.Kind() != reflect.Struct {\n\t\treturn ErrNeedStruct\n\t}\n\ttm := getTypeMap(v.Type())\n\tif err := r.ParseForm(); err != nil {\n\t\treturn err\n\t}\n\tvar errors ErrorMap\n\tfor key, pd := range tm {\n\t\tvar (\n\t\t\tval []string\n\t\t\tok bool\n\t\t)\n\t\tif pd.Post {\n\t\t\tval, ok = r.PostForm[key]\n\t\t} else {\n\t\t\tval, ok = r.Form[key]\n\t\t}\n\t\tif ok {\n\t\t\tif err := pd.processor.process(v.FieldByIndex(pd.Index), val); err != nil {\n\t\t\t\tif errors == nil {\n\t\t\t\t\terrors = make(ErrorMap)\n\t\t\t\t}\n\t\t\t\terrors[key] = err\n\t\t\t}\n\t\t} else if pd.Required {\n\t\t\tif errors == nil {\n\t\t\t\terrors = make(ErrorMap)\n\t\t\t}\n\t\t\terrors[key] = ErrRequiredMissing\n\t\t}\n\t}\n\tif len(errors) > 0 {\n\t\treturn errors\n\t}\n\treturn nil\n}\n<commit_msg>added more detail for Process comment<commit_after>\/\/ Package form provides an easy to use way to parse form values from an HTTP\n\/\/ request into a struct\npackage form \/\/ import \"vimagination.zapto.org\/form\"\n\nimport (\n\t\"net\/http\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar interType = reflect.TypeOf((*formParser)(nil)).Elem()\n\ntype processorDetails struct {\n\tprocessor\n\tPost, Required bool\n\tIndex []int\n}\n\ntype typeMap map[string]processorDetails\n\nvar (\n\ttmMu sync.RWMutex\n\ttypeMaps = make(map[reflect.Type]typeMap)\n)\n\nfunc getTypeMap(t reflect.Type) typeMap {\n\ttmMu.RLock()\n\ttm, ok := typeMaps[t]\n\ttmMu.RUnlock()\n\tif ok {\n\t\treturn tm\n\t}\n\ttmMu.Lock()\n\ttm = createTypeMap(t)\n\ttmMu.Unlock()\n\treturn tm\n}\n\nfunc basicTypeProcessor(t reflect.Type, tag reflect.StructTag) processor {\n\tswitch t.Kind() {\n\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:\n\t\treturn newInum(tag, t.Bits())\n\tcase reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\treturn newUnum(tag, t.Bits())\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn newFloat(tag, t.Bits())\n\tcase reflect.String:\n\t\treturn newString(tag)\n\tcase reflect.Bool:\n\t\treturn boolean{}\n\t}\n\treturn nil\n}\n\nfunc createTypeMap(t reflect.Type) typeMap {\n\ttm, ok := typeMaps[t]\n\tif ok {\n\t\treturn tm\n\t}\n\ttm = make(typeMap)\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\tif f.PkgPath != \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tname := f.Name\n\t\tvar required, post bool\n\t\tif n := f.Tag.Get(\"form\"); n == \"-\" {\n\t\t\tcontinue\n\t\t} else if n != \"\" {\n\t\t\tp := strings.IndexByte(n, ',')\n\t\t\tif p >= 0 {\n\t\t\t\tif p > 0 {\n\t\t\t\t\tname = n[:p]\n\t\t\t\t}\n\t\t\t\trest := n[p:]\n\t\t\t\trequired = strings.Contains(rest, \",required,\") || strings.HasSuffix(rest, \",required\")\n\t\t\t\tpost = strings.Contains(rest, \",post,\") || strings.HasSuffix(rest, \",post\")\n\t\t\t} else {\n\t\t\t\tname = n\n\t\t\t}\n\t\t}\n\t\tvar p processor\n\t\tif f.Type.Implements(interType) {\n\t\t\tp = inter(false)\n\t\t} else if reflect.PtrTo(f.Type).Implements(interType) {\n\t\t\tp = inter(true)\n\t\t} else if k := f.Type.Kind(); k == reflect.Slice || k == reflect.Ptr {\n\t\t\tet := f.Type.Elem()\n\t\t\ts := basicTypeProcessor(et, f.Tag)\n\t\t\tif s == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif k == reflect.Slice {\n\t\t\t\tp = slice{\n\t\t\t\t\tprocessor: s,\n\t\t\t\t\ttyp: reflect.SliceOf(et),\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tp = pointer{\n\t\t\t\t\tprocessor: s,\n\t\t\t\t\ttyp: et,\n\t\t\t\t}\n\t\t\t}\n\t\t} else if k == reflect.Struct && f.Anonymous {\n\t\t\tfor n, p := range createTypeMap(f.Type) {\n\t\t\t\tif _, ok := tm[n]; !ok {\n\t\t\t\t\ttm[n] = processorDetails{\n\t\t\t\t\t\tprocessor: p.processor,\n\t\t\t\t\t\tRequired: p.Required,\n\t\t\t\t\t\tPost: p.Post,\n\t\t\t\t\t\tIndex: append(append(make([]int, 0, len(p.Index)+1), i), p.Index...),\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t} else {\n\t\t\tp = basicTypeProcessor(f.Type, f.Tag)\n\t\t\tif p == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\ttm[name] = processorDetails{\n\t\t\tprocessor: p,\n\t\t\tRequired: required,\n\t\t\tPost: post,\n\t\t\tIndex: []int{i},\n\t\t}\n\t}\n\ttypeMaps[t] = tm\n\treturn tm\n}\n\n\/\/ Process parses the form data from the request into the passed value, which\n\/\/ must be a pointer to a struct.\n\/\/\n\/\/ Form keys are assumed to be the field names unless a 'form' tag is provided\n\/\/ with an alternate name, for example, in the following struct, the int is\n\/\/ parse with key 'A' and the bool is parsed with key 'C'.\n\/\/\n\/\/ type Example struct {\n\/\/\tA int\n\/\/\tB bool `form:\"C\"`\n\/\/ }\n\/\/\n\/\/ Two options can be added to the form tag to modify the processing. The\n\/\/ 'post' option forces the processer to parse a value from the PostForm field\n\/\/ of the Request, and the 'required' option will have an error thrown if the\n\/\/ key in not set.\n\/\/\n\/\/ Number types can also have minimums and maximums checked during processing\n\/\/ by setting the 'min' and 'max' tags accordingly.\n\/\/\n\/\/ In a similar vein, string types can utilise the 'regex' tag to set a\n\/\/ regular expression to be matched against.\n\/\/\n\/\/ Anonymous structs are traversed, but will not override more local fields.\n\/\/\n\/\/ Slices of basic types can be processed, and errors returned from any such\n\/\/ processing will be of the Errors type, which each indexed entry\n\/\/ corresponding to the index of the processed data.\n\/\/\n\/\/ Pointers to basic types can also be processed, with the type being allocated\n\/\/ even if an error occurs.\n\/\/\n\/\/ Lastly, a custom data processor can be specified by attaching a method to\n\/\/ the field type with the following specification:\n\/\/\n\/\/ ParseForm([]string) error\nfunc Process(r *http.Request, fv interface{}) error {\n\tv := reflect.ValueOf(fv)\n\tif v.Kind() != reflect.Ptr {\n\t\treturn ErrNeedPointer\n\t}\n\tfor v.Kind() == reflect.Ptr {\n\t\tv = v.Elem()\n\t}\n\tif v.Kind() != reflect.Struct {\n\t\treturn ErrNeedStruct\n\t}\n\ttm := getTypeMap(v.Type())\n\tif err := r.ParseForm(); err != nil {\n\t\treturn err\n\t}\n\tvar errors ErrorMap\n\tfor key, pd := range tm {\n\t\tvar (\n\t\t\tval []string\n\t\t\tok bool\n\t\t)\n\t\tif pd.Post {\n\t\t\tval, ok = r.PostForm[key]\n\t\t} else {\n\t\t\tval, ok = r.Form[key]\n\t\t}\n\t\tif ok {\n\t\t\tif err := pd.processor.process(v.FieldByIndex(pd.Index), val); err != nil {\n\t\t\t\tif errors == nil {\n\t\t\t\t\terrors = make(ErrorMap)\n\t\t\t\t}\n\t\t\t\terrors[key] = err\n\t\t\t}\n\t\t} else if pd.Required {\n\t\t\tif errors == nil {\n\t\t\t\terrors = make(ErrorMap)\n\t\t\t}\n\t\t\terrors[key] = ErrRequiredMissing\n\t\t}\n\t}\n\tif len(errors) > 0 {\n\t\treturn errors\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build gofuzz\npackage amqp\n\nimport \"bytes\"\n\nfunc Fuzz(data []byte) int {\n\tr := reader{bytes.NewReader(data)}\n\tframe, err := r.ReadFrame()\n\tif err != nil {\n\t\tif frame != nil {\n\t\t\tpanic(\"frame is not nil\")\n\t\t}\n\t\treturn 0\n\t}\n\treturn 1\n}\n<commit_msg>the +build tag isn't a doc comment<commit_after>\/\/ +build gofuzz\n\npackage amqp\n\nimport \"bytes\"\n\nfunc Fuzz(data []byte) int {\n\tr := reader{bytes.NewReader(data)}\n\tframe, err := r.ReadFrame()\n\tif err != nil {\n\t\tif frame != nil {\n\t\t\tpanic(\"frame is not nil\")\n\t\t}\n\t\treturn 0\n\t}\n\treturn 1\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"testing\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/davecgh\/go-spew\/spew\"\n)\n\nfunc TestItemAssembly(t *testing.T) {\n\tvar tests = []testpair{\n\t\t{\n\t\t\t`>\\C[14]…今は使用できません。`,\n\t\t\t`>\\C[14]…今は使用できません。`,\n\t\t},\n\t\t{\n\t\t\t`【\\C[14]\\N[2]\\C[0]】 \\{アハァァーーーンッ!!`,\n\t\t\t`【\\C[14]\\N[2]\\C[0]】 \\{ アハァァーーーンッ!!`,\n\t\t},\n\t\t{\n\t\t\t`疾風苦無(消費1) en(v[25] >= 1)`,\n\t\t\t`疾風苦無 (消費 1) en(v[25] >= 1)`,\n\t\t},\n\t\t{\n\t\t\t`PT加入en(!s[484] and v[25] <2)`,\n\t\t\t`PT 加入 en(!s[484] and v[25] <2)`,\n\t\t},\n\t\t{\n\t\t\t`\\i[21]メンタルキュア`,\n\t\t\t`\\i[21] メンタルキュア`,\n\t\t},\n\t\t{\n\t\t\t`\"0x#{text}\"`,\n\t\t\t`\"0x#{text}\"`,\n\t\t},\n\t\t{\n\t\t\t`[レース10]`,\n\t\t\t`[レース 10]`,\n\t\t},\n\t}\n\n\tfor _, pair := range tests {\n\t\t\/\/pair.input = unescapeText(pair.input)\n\t\titems, err := parseText(pair.input)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"%s\\ntext: %q\", err, pair.input)\n\t\t\tlog.Error(spew.Sdump(items))\n\t\t} else {\n\t\t\tlog.Debug(spew.Sdump(items))\n\t\t}\n\n\t\tr := assembleItems(items)\n\n\t\tif r != pair.output {\n\t\t\tt.Errorf(\"For input:\\n%q\\nexpected:\\n%q\\ngot:\\n%q\\n\", pair.input, pair.output, r)\n\t\t}\n\t}\n}\n<commit_msg>Dump items on error<commit_after>package main\n\nimport (\n\t\"testing\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/davecgh\/go-spew\/spew\"\n)\n\nfunc TestItemAssembly(t *testing.T) {\n\tvar tests = []testpair{\n\t\t{\n\t\t\t`>\\C[14]…今は使用できません。`,\n\t\t\t`>\\C[14]…今は使用できません。`,\n\t\t},\n\t\t{\n\t\t\t`【\\C[14]\\N[2]\\C[0]】 \\{アハァァーーーンッ!!`,\n\t\t\t`【\\C[14]\\N[2]\\C[0]】 \\{ アハァァーーーンッ!!`,\n\t\t},\n\t\t{\n\t\t\t`疾風苦無(消費1) en(v[25] >= 1)`,\n\t\t\t`疾風苦無 (消費 1) en(v[25] >= 1)`,\n\t\t},\n\t\t{\n\t\t\t`PT加入en(!s[484] and v[25] <2)`,\n\t\t\t`PT 加入 en(!s[484] and v[25] <2)`,\n\t\t},\n\t\t{\n\t\t\t`\\i[21]メンタルキュア`,\n\t\t\t`\\i[21] メンタルキュア`,\n\t\t},\n\t\t{\n\t\t\t`\"0x#{text}\"`,\n\t\t\t`\"0x#{text}\"`,\n\t\t},\n\t\t{\n\t\t\t`[レース10]`,\n\t\t\t`[レース 10]`,\n\t\t},\n\t\t{\n\t\t\t`%sの%sを %s 奪った`,\n\t\t\t`%s の %s を %s 奪った`,\n\t\t},\n\t}\n\n\tfor _, pair := range tests {\n\t\t\/\/pair.input = unescapeText(pair.input)\n\t\titems, err := parseText(pair.input)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"%s\\ntext: %q\", err, pair.input)\n\t\t\tlog.Error(spew.Sdump(items))\n\t\t} else {\n\t\t\tlog.Debug(spew.Sdump(items))\n\t\t}\n\n\t\tr := assembleItems(items)\n\n\t\tif r != pair.output {\n\t\t\tt.Errorf(\"For input:\\n%q\\nexpected:\\n%q\\ngot:\\n%q\\nitems:\\n%s\", pair.input, pair.output, r, spew.Sdump(items))\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage tar\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestFileInfoHeader(t *testing.T) {\n\tfi, err := os.Stat(\"testdata\/small.txt\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\th, err := FileInfoHeader(fi, \"\")\n\tif err != nil {\n\t\tt.Fatalf(\"FileInfoHeader: %v\", err)\n\t}\n\tif g, e := h.Name, \"small.txt\"; g != e {\n\t\tt.Errorf(\"Name = %q; want %q\", g, e)\n\t}\n\tif g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {\n\t\tt.Errorf(\"Mode = %#o; want %#o\", g, e)\n\t}\n\tif g, e := h.Size, int64(5); g != e {\n\t\tt.Errorf(\"Size = %v; want %v\", g, e)\n\t}\n\tif g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {\n\t\tt.Errorf(\"ModTime = %v; want %v\", g, e)\n\t}\n}\n\nfunc TestFileInfoHeaderDir(t *testing.T) {\n\tfi, err := os.Stat(\"testdata\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\th, err := FileInfoHeader(fi, \"\")\n\tif err != nil {\n\t\tt.Fatalf(\"FileInfoHeader: %v\", err)\n\t}\n\tif g, e := h.Name, \"testdata\/\"; g != e {\n\t\tt.Errorf(\"Name = %q; want %q\", g, e)\n\t}\n\t\/\/ Ignoring c_ISGID for golang.org\/issue\/4867\n\tif g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {\n\t\tt.Errorf(\"Mode = %#o; want %#o\", g, e)\n\t}\n\tif g, e := h.Size, int64(0); g != e {\n\t\tt.Errorf(\"Size = %v; want %v\", g, e)\n\t}\n\tif g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {\n\t\tt.Errorf(\"ModTime = %v; want %v\", g, e)\n\t}\n}\n\nfunc TestFileInfoHeaderSymlink(t *testing.T) {\n\th, err := FileInfoHeader(symlink{}, \"some-target\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif g, e := h.Name, \"some-symlink\"; g != e {\n\t\tt.Errorf(\"Name = %q; want %q\", g, e)\n\t}\n\tif g, e := h.Linkname, \"some-target\"; g != e {\n\t\tt.Errorf(\"Linkname = %q; want %q\", g, e)\n\t}\n}\n\ntype symlink struct{}\n\nfunc (symlink) Name() string { return \"some-symlink\" }\nfunc (symlink) Size() int64 { return 0 }\nfunc (symlink) Mode() os.FileMode { return os.ModeSymlink }\nfunc (symlink) ModTime() time.Time { return time.Time{} }\nfunc (symlink) IsDir() bool { return false }\nfunc (symlink) Sys() interface{} { return nil }\n\nfunc TestRoundTrip(t *testing.T) {\n\tdata := []byte(\"some file contents\")\n\n\tvar b bytes.Buffer\n\ttw := NewWriter(&b)\n\thdr := &Header{\n\t\tName: \"file.txt\",\n\t\tUid: 1 << 21, \/\/ too big for 8 octal digits\n\t\tSize: int64(len(data)),\n\t\tModTime: time.Now(),\n\t}\n\t\/\/ tar only supports second precision.\n\thdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)\n\tif err := tw.WriteHeader(hdr); err != nil {\n\t\tt.Fatalf(\"tw.WriteHeader: %v\", err)\n\t}\n\tif _, err := tw.Write(data); err != nil {\n\t\tt.Fatalf(\"tw.Write: %v\", err)\n\t}\n\tif err := tw.Close(); err != nil {\n\t\tt.Fatalf(\"tw.Close: %v\", err)\n\t}\n\n\t\/\/ Read it back.\n\ttr := NewReader(&b)\n\trHdr, err := tr.Next()\n\tif err != nil {\n\t\tt.Fatalf(\"tr.Next: %v\", err)\n\t}\n\tif !reflect.DeepEqual(rHdr, hdr) {\n\t\tt.Errorf(\"Header mismatch.\\n got %+v\\nwant %+v\", rHdr, hdr)\n\t}\n\trData, err := ioutil.ReadAll(tr)\n\tif err != nil {\n\t\tt.Fatalf(\"Read: %v\", err)\n\t}\n\tif !bytes.Equal(rData, data) {\n\t\tt.Errorf(\"Data mismatch.\\n got %q\\nwant %q\", rData, data)\n\t}\n}\n\ntype headerRoundTripTest struct {\n\th *Header\n\tfm os.FileMode\n}\n\nfunc TestHeaderRoundTrip(t *testing.T) {\n\tgolden := []headerRoundTripTest{\n\t\t\/\/ regular file.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"test.txt\",\n\t\t\t\tMode: 0644 | c_ISREG,\n\t\t\t\tSize: 12,\n\t\t\t\tModTime: time.Unix(1360600916, 0),\n\t\t\t\tTypeflag: TypeReg,\n\t\t\t},\n\t\t\tfm: 0644,\n\t\t},\n\t\t\/\/ hard link.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"hard.txt\",\n\t\t\t\tMode: 0644 | c_ISLNK,\n\t\t\t\tSize: 0,\n\t\t\t\tModTime: time.Unix(1360600916, 0),\n\t\t\t\tTypeflag: TypeLink,\n\t\t\t},\n\t\t\tfm: 0644 | os.ModeSymlink,\n\t\t},\n\t\t\/\/ symbolic link.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"link.txt\",\n\t\t\t\tMode: 0777 | c_ISLNK,\n\t\t\t\tSize: 0,\n\t\t\t\tModTime: time.Unix(1360600852, 0),\n\t\t\t\tTypeflag: TypeSymlink,\n\t\t\t},\n\t\t\tfm: 0777 | os.ModeSymlink,\n\t\t},\n\t\t\/\/ character device node.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"dev\/null\",\n\t\t\t\tMode: 0666 | c_ISCHR,\n\t\t\t\tSize: 0,\n\t\t\t\tModTime: time.Unix(1360578951, 0),\n\t\t\t\tTypeflag: TypeChar,\n\t\t\t},\n\t\t\tfm: 0666 | os.ModeDevice | os.ModeCharDevice,\n\t\t},\n\t\t\/\/ block device node.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"dev\/sda\",\n\t\t\t\tMode: 0660 | c_ISBLK,\n\t\t\t\tSize: 0,\n\t\t\t\tModTime: time.Unix(1360578954, 0),\n\t\t\t\tTypeflag: TypeBlock,\n\t\t\t},\n\t\t\tfm: 0660 | os.ModeDevice,\n\t\t},\n\t\t\/\/ directory.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"dir\/\",\n\t\t\t\tMode: 0755 | c_ISDIR,\n\t\t\t\tSize: 0,\n\t\t\t\tModTime: time.Unix(1360601116, 0),\n\t\t\t\tTypeflag: TypeDir,\n\t\t\t},\n\t\t\tfm: 0755 | os.ModeDir,\n\t\t},\n\t\t\/\/ fifo node.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"dev\/initctl\",\n\t\t\t\tMode: 0600 | c_ISFIFO,\n\t\t\t\tSize: 0,\n\t\t\t\tModTime: time.Unix(1360578949, 0),\n\t\t\t\tTypeflag: TypeFifo,\n\t\t\t},\n\t\t\tfm: 0600 | os.ModeNamedPipe,\n\t\t},\n\t\t\/\/ setuid.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"bin\/su\",\n\t\t\t\tMode: 0755 | c_ISREG | c_ISUID,\n\t\t\t\tSize: 23232,\n\t\t\t\tModTime: time.Unix(1355405093, 0),\n\t\t\t\tTypeflag: TypeReg,\n\t\t\t},\n\t\t\tfm: 0755 | os.ModeSetuid,\n\t\t},\n\t\t\/\/ setguid.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"group.txt\",\n\t\t\t\tMode: 0750 | c_ISREG | c_ISGID,\n\t\t\t\tSize: 0,\n\t\t\t\tModTime: time.Unix(1360602346, 0),\n\t\t\t\tTypeflag: TypeReg,\n\t\t\t},\n\t\t\tfm: 0750 | os.ModeSetgid,\n\t\t},\n\t\t\/\/ sticky.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"sticky.txt\",\n\t\t\t\tMode: 0600 | c_ISREG | c_ISVTX,\n\t\t\t\tSize: 7,\n\t\t\t\tModTime: time.Unix(1360602540, 0),\n\t\t\t\tTypeflag: TypeReg,\n\t\t\t},\n\t\t\tfm: 0600 | os.ModeSticky,\n\t\t},\n\t}\n\n\tfor i, g := range golden {\n\t\tfi := g.h.FileInfo()\n\t\th2, err := FileInfoHeader(fi, \"\")\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(fi.Name(), \"\/\") {\n\t\t\tt.Errorf(\"FileInfo of %q contains slash: %q\", g.h.Name, fi.Name())\n\t\t}\n\t\tname := path.Base(g.h.Name)\n\t\tif fi.IsDir() {\n\t\t\tname += \"\/\"\n\t\t}\n\t\tif got, want := h2.Name, name; got != want {\n\t\t\tt.Errorf(\"i=%d: Name: got %v, want %v\", i, got, want)\n\t\t}\n\t\tif got, want := h2.Size, g.h.Size; got != want {\n\t\t\tt.Errorf(\"i=%d: Size: got %v, want %v\", i, got, want)\n\t\t}\n\t\tif got, want := h2.Mode, g.h.Mode; got != want {\n\t\t\tt.Errorf(\"i=%d: Mode: got %o, want %o\", i, got, want)\n\t\t}\n\t\tif got, want := fi.Mode(), g.fm; got != want {\n\t\t\tt.Errorf(\"i=%d: fi.Mode: got %o, want %o\", i, got, want)\n\t\t}\n\t\tif got, want := h2.ModTime, g.h.ModTime; got != want {\n\t\t\tt.Errorf(\"i=%d: ModTime: got %v, want %v\", i, got, want)\n\t\t}\n\t\tif sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {\n\t\t\tt.Errorf(\"i=%d: Sys didn't return original *Header\", i)\n\t\t}\n\t}\n}\n<commit_msg>archive\/tar: add test case for passing nil to FileInfoHeader<commit_after>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage tar\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestFileInfoHeader(t *testing.T) {\n\tfi, err := os.Stat(\"testdata\/small.txt\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\th, err := FileInfoHeader(fi, \"\")\n\tif err != nil {\n\t\tt.Fatalf(\"FileInfoHeader: %v\", err)\n\t}\n\tif g, e := h.Name, \"small.txt\"; g != e {\n\t\tt.Errorf(\"Name = %q; want %q\", g, e)\n\t}\n\tif g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {\n\t\tt.Errorf(\"Mode = %#o; want %#o\", g, e)\n\t}\n\tif g, e := h.Size, int64(5); g != e {\n\t\tt.Errorf(\"Size = %v; want %v\", g, e)\n\t}\n\tif g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {\n\t\tt.Errorf(\"ModTime = %v; want %v\", g, e)\n\t}\n\t\/\/ FileInfoHeader should error when passing nil FileInfo\n\tif _, err := FileInfoHeader(nil, \"\"); err == nil {\n\t\tt.Fatalf(\"Expected error when passing nil to FileInfoHeader\")\n\t}\n}\n\nfunc TestFileInfoHeaderDir(t *testing.T) {\n\tfi, err := os.Stat(\"testdata\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\th, err := FileInfoHeader(fi, \"\")\n\tif err != nil {\n\t\tt.Fatalf(\"FileInfoHeader: %v\", err)\n\t}\n\tif g, e := h.Name, \"testdata\/\"; g != e {\n\t\tt.Errorf(\"Name = %q; want %q\", g, e)\n\t}\n\t\/\/ Ignoring c_ISGID for golang.org\/issue\/4867\n\tif g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {\n\t\tt.Errorf(\"Mode = %#o; want %#o\", g, e)\n\t}\n\tif g, e := h.Size, int64(0); g != e {\n\t\tt.Errorf(\"Size = %v; want %v\", g, e)\n\t}\n\tif g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {\n\t\tt.Errorf(\"ModTime = %v; want %v\", g, e)\n\t}\n}\n\nfunc TestFileInfoHeaderSymlink(t *testing.T) {\n\th, err := FileInfoHeader(symlink{}, \"some-target\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif g, e := h.Name, \"some-symlink\"; g != e {\n\t\tt.Errorf(\"Name = %q; want %q\", g, e)\n\t}\n\tif g, e := h.Linkname, \"some-target\"; g != e {\n\t\tt.Errorf(\"Linkname = %q; want %q\", g, e)\n\t}\n}\n\ntype symlink struct{}\n\nfunc (symlink) Name() string { return \"some-symlink\" }\nfunc (symlink) Size() int64 { return 0 }\nfunc (symlink) Mode() os.FileMode { return os.ModeSymlink }\nfunc (symlink) ModTime() time.Time { return time.Time{} }\nfunc (symlink) IsDir() bool { return false }\nfunc (symlink) Sys() interface{} { return nil }\n\nfunc TestRoundTrip(t *testing.T) {\n\tdata := []byte(\"some file contents\")\n\n\tvar b bytes.Buffer\n\ttw := NewWriter(&b)\n\thdr := &Header{\n\t\tName: \"file.txt\",\n\t\tUid: 1 << 21, \/\/ too big for 8 octal digits\n\t\tSize: int64(len(data)),\n\t\tModTime: time.Now(),\n\t}\n\t\/\/ tar only supports second precision.\n\thdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)\n\tif err := tw.WriteHeader(hdr); err != nil {\n\t\tt.Fatalf(\"tw.WriteHeader: %v\", err)\n\t}\n\tif _, err := tw.Write(data); err != nil {\n\t\tt.Fatalf(\"tw.Write: %v\", err)\n\t}\n\tif err := tw.Close(); err != nil {\n\t\tt.Fatalf(\"tw.Close: %v\", err)\n\t}\n\n\t\/\/ Read it back.\n\ttr := NewReader(&b)\n\trHdr, err := tr.Next()\n\tif err != nil {\n\t\tt.Fatalf(\"tr.Next: %v\", err)\n\t}\n\tif !reflect.DeepEqual(rHdr, hdr) {\n\t\tt.Errorf(\"Header mismatch.\\n got %+v\\nwant %+v\", rHdr, hdr)\n\t}\n\trData, err := ioutil.ReadAll(tr)\n\tif err != nil {\n\t\tt.Fatalf(\"Read: %v\", err)\n\t}\n\tif !bytes.Equal(rData, data) {\n\t\tt.Errorf(\"Data mismatch.\\n got %q\\nwant %q\", rData, data)\n\t}\n}\n\ntype headerRoundTripTest struct {\n\th *Header\n\tfm os.FileMode\n}\n\nfunc TestHeaderRoundTrip(t *testing.T) {\n\tgolden := []headerRoundTripTest{\n\t\t\/\/ regular file.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"test.txt\",\n\t\t\t\tMode: 0644 | c_ISREG,\n\t\t\t\tSize: 12,\n\t\t\t\tModTime: time.Unix(1360600916, 0),\n\t\t\t\tTypeflag: TypeReg,\n\t\t\t},\n\t\t\tfm: 0644,\n\t\t},\n\t\t\/\/ hard link.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"hard.txt\",\n\t\t\t\tMode: 0644 | c_ISLNK,\n\t\t\t\tSize: 0,\n\t\t\t\tModTime: time.Unix(1360600916, 0),\n\t\t\t\tTypeflag: TypeLink,\n\t\t\t},\n\t\t\tfm: 0644 | os.ModeSymlink,\n\t\t},\n\t\t\/\/ symbolic link.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"link.txt\",\n\t\t\t\tMode: 0777 | c_ISLNK,\n\t\t\t\tSize: 0,\n\t\t\t\tModTime: time.Unix(1360600852, 0),\n\t\t\t\tTypeflag: TypeSymlink,\n\t\t\t},\n\t\t\tfm: 0777 | os.ModeSymlink,\n\t\t},\n\t\t\/\/ character device node.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"dev\/null\",\n\t\t\t\tMode: 0666 | c_ISCHR,\n\t\t\t\tSize: 0,\n\t\t\t\tModTime: time.Unix(1360578951, 0),\n\t\t\t\tTypeflag: TypeChar,\n\t\t\t},\n\t\t\tfm: 0666 | os.ModeDevice | os.ModeCharDevice,\n\t\t},\n\t\t\/\/ block device node.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"dev\/sda\",\n\t\t\t\tMode: 0660 | c_ISBLK,\n\t\t\t\tSize: 0,\n\t\t\t\tModTime: time.Unix(1360578954, 0),\n\t\t\t\tTypeflag: TypeBlock,\n\t\t\t},\n\t\t\tfm: 0660 | os.ModeDevice,\n\t\t},\n\t\t\/\/ directory.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"dir\/\",\n\t\t\t\tMode: 0755 | c_ISDIR,\n\t\t\t\tSize: 0,\n\t\t\t\tModTime: time.Unix(1360601116, 0),\n\t\t\t\tTypeflag: TypeDir,\n\t\t\t},\n\t\t\tfm: 0755 | os.ModeDir,\n\t\t},\n\t\t\/\/ fifo node.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"dev\/initctl\",\n\t\t\t\tMode: 0600 | c_ISFIFO,\n\t\t\t\tSize: 0,\n\t\t\t\tModTime: time.Unix(1360578949, 0),\n\t\t\t\tTypeflag: TypeFifo,\n\t\t\t},\n\t\t\tfm: 0600 | os.ModeNamedPipe,\n\t\t},\n\t\t\/\/ setuid.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"bin\/su\",\n\t\t\t\tMode: 0755 | c_ISREG | c_ISUID,\n\t\t\t\tSize: 23232,\n\t\t\t\tModTime: time.Unix(1355405093, 0),\n\t\t\t\tTypeflag: TypeReg,\n\t\t\t},\n\t\t\tfm: 0755 | os.ModeSetuid,\n\t\t},\n\t\t\/\/ setguid.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"group.txt\",\n\t\t\t\tMode: 0750 | c_ISREG | c_ISGID,\n\t\t\t\tSize: 0,\n\t\t\t\tModTime: time.Unix(1360602346, 0),\n\t\t\t\tTypeflag: TypeReg,\n\t\t\t},\n\t\t\tfm: 0750 | os.ModeSetgid,\n\t\t},\n\t\t\/\/ sticky.\n\t\t{\n\t\t\th: &Header{\n\t\t\t\tName: \"sticky.txt\",\n\t\t\t\tMode: 0600 | c_ISREG | c_ISVTX,\n\t\t\t\tSize: 7,\n\t\t\t\tModTime: time.Unix(1360602540, 0),\n\t\t\t\tTypeflag: TypeReg,\n\t\t\t},\n\t\t\tfm: 0600 | os.ModeSticky,\n\t\t},\n\t}\n\n\tfor i, g := range golden {\n\t\tfi := g.h.FileInfo()\n\t\th2, err := FileInfoHeader(fi, \"\")\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(fi.Name(), \"\/\") {\n\t\t\tt.Errorf(\"FileInfo of %q contains slash: %q\", g.h.Name, fi.Name())\n\t\t}\n\t\tname := path.Base(g.h.Name)\n\t\tif fi.IsDir() {\n\t\t\tname += \"\/\"\n\t\t}\n\t\tif got, want := h2.Name, name; got != want {\n\t\t\tt.Errorf(\"i=%d: Name: got %v, want %v\", i, got, want)\n\t\t}\n\t\tif got, want := h2.Size, g.h.Size; got != want {\n\t\t\tt.Errorf(\"i=%d: Size: got %v, want %v\", i, got, want)\n\t\t}\n\t\tif got, want := h2.Mode, g.h.Mode; got != want {\n\t\t\tt.Errorf(\"i=%d: Mode: got %o, want %o\", i, got, want)\n\t\t}\n\t\tif got, want := fi.Mode(), g.fm; got != want {\n\t\t\tt.Errorf(\"i=%d: fi.Mode: got %o, want %o\", i, got, want)\n\t\t}\n\t\tif got, want := h2.ModTime, g.h.ModTime; got != want {\n\t\t\tt.Errorf(\"i=%d: ModTime: got %v, want %v\", i, got, want)\n\t\t}\n\t\tif sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {\n\t\t\tt.Errorf(\"i=%d: Sys didn't return original *Header\", i)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage tls\n\nimport (\n\t\"crypto\/x509\"\n\t\"io\/ioutil\"\n)\n\n\/\/ Possible certificate files; stop after finding one.\nvar certFiles = []string{\n\t\"\/etc\/ssl\/certs\/ca-certificates.crt\", \/\/ Linux etc\n\t\"\/etc\/pki\/tls\/certs\/ca-bundle.crt\", \/\/ Fedora\/RHEL\n\t\"\/etc\/ssl\/ca-bundle.pem\", \/\/ OpenSUSE\n}\n\nfunc initDefaultRoots() {\n\troots := x509.NewCertPool()\n\tfor _, file := range certFiles {\n\t\tdata, err := ioutil.ReadFile(file)\n\t\tif err == nil {\n\t\t\troots.AppendCertsFromPEM(data)\n\t\t\tbreak\n\t\t}\n\t}\n\tvarDefaultRoots = roots\n}\n<commit_msg>crypto\/tls: add openbsd root certificate location<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage tls\n\nimport (\n\t\"crypto\/x509\"\n\t\"io\/ioutil\"\n)\n\n\/\/ Possible certificate files; stop after finding one.\nvar certFiles = []string{\n\t\"\/etc\/ssl\/certs\/ca-certificates.crt\", \/\/ Linux etc\n\t\"\/etc\/pki\/tls\/certs\/ca-bundle.crt\", \/\/ Fedora\/RHEL\n\t\"\/etc\/ssl\/ca-bundle.pem\", \/\/ OpenSUSE\n\t\"\/etc\/ssl\/cert.pem\", \/\/ OpenBSD\n}\n\nfunc initDefaultRoots() {\n\troots := x509.NewCertPool()\n\tfor _, file := range certFiles {\n\t\tdata, err := ioutil.ReadFile(file)\n\t\tif err == nil {\n\t\t\troots.AppendCertsFromPEM(data)\n\t\t\tbreak\n\t\t}\n\t}\n\tvarDefaultRoots = roots\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage runtime_test\n\nimport (\n\t. \"runtime\"\n\t\"testing\"\n)\n\nfunc TestMemmove(t *testing.T) {\n\tsize := 256\n\tif testing.Short() {\n\t\tsize = 128 + 16\n\t}\n\tsrc := make([]byte, size)\n\tdst := make([]byte, size)\n\tfor i := 0; i < size; i++ {\n\t\tsrc[i] = byte(128 + (i & 127))\n\t}\n\tfor i := 0; i < size; i++ {\n\t\tdst[i] = byte(i & 127)\n\t}\n\tfor n := 0; n <= size; n++ {\n\t\tfor x := 0; x <= size-n; x++ { \/\/ offset in src\n\t\t\tfor y := 0; y <= size-n; y++ { \/\/ offset in dst\n\t\t\t\tcopy(dst[y:y+n], src[x:x+n])\n\t\t\t\tfor i := 0; i < y; i++ {\n\t\t\t\t\tif dst[i] != byte(i&127) {\n\t\t\t\t\t\tt.Fatalf(\"prefix dst[%d] = %d\", i, dst[i])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor i := y; i < y+n; i++ {\n\t\t\t\t\tif dst[i] != byte(128+((i-y+x)&127)) {\n\t\t\t\t\t\tt.Fatalf(\"copied dst[%d] = %d\", i, dst[i])\n\t\t\t\t\t}\n\t\t\t\t\tdst[i] = byte(i & 127) \/\/ reset dst\n\t\t\t\t}\n\t\t\t\tfor i := y + n; i < size; i++ {\n\t\t\t\t\tif dst[i] != byte(i&127) {\n\t\t\t\t\t\tt.Fatalf(\"suffix dst[%d] = %d\", i, dst[i])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestMemmoveAlias(t *testing.T) {\n\tsize := 256\n\tif testing.Short() {\n\t\tsize = 128 + 16\n\t}\n\tbuf := make([]byte, size)\n\tfor i := 0; i < size; i++ {\n\t\tbuf[i] = byte(i)\n\t}\n\tfor n := 0; n <= size; n++ {\n\t\tfor x := 0; x <= size-n; x++ { \/\/ src offset\n\t\t\tfor y := 0; y <= size-n; y++ { \/\/ dst offset\n\t\t\t\tcopy(buf[y:y+n], buf[x:x+n])\n\t\t\t\tfor i := 0; i < y; i++ {\n\t\t\t\t\tif buf[i] != byte(i) {\n\t\t\t\t\t\tt.Fatalf(\"prefix buf[%d] = %d\", i, buf[i])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor i := y; i < y+n; i++ {\n\t\t\t\t\tif buf[i] != byte(i-y+x) {\n\t\t\t\t\t\tt.Fatalf(\"copied buf[%d] = %d\", i, buf[i])\n\t\t\t\t\t}\n\t\t\t\t\tbuf[i] = byte(i) \/\/ reset buf\n\t\t\t\t}\n\t\t\t\tfor i := y + n; i < size; i++ {\n\t\t\t\t\tif buf[i] != byte(i) {\n\t\t\t\t\t\tt.Fatalf(\"suffix buf[%d] = %d\", i, buf[i])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc bmMemmove(b *testing.B, n int) {\n\tx := make([]byte, n)\n\ty := make([]byte, n)\n\tb.SetBytes(int64(n))\n\tfor i := 0; i < b.N; i++ {\n\t\tcopy(x, y)\n\t}\n}\n\nfunc BenchmarkMemmove0(b *testing.B) { bmMemmove(b, 0) }\nfunc BenchmarkMemmove1(b *testing.B) { bmMemmove(b, 1) }\nfunc BenchmarkMemmove2(b *testing.B) { bmMemmove(b, 2) }\nfunc BenchmarkMemmove3(b *testing.B) { bmMemmove(b, 3) }\nfunc BenchmarkMemmove4(b *testing.B) { bmMemmove(b, 4) }\nfunc BenchmarkMemmove5(b *testing.B) { bmMemmove(b, 5) }\nfunc BenchmarkMemmove6(b *testing.B) { bmMemmove(b, 6) }\nfunc BenchmarkMemmove7(b *testing.B) { bmMemmove(b, 7) }\nfunc BenchmarkMemmove8(b *testing.B) { bmMemmove(b, 8) }\nfunc BenchmarkMemmove9(b *testing.B) { bmMemmove(b, 9) }\nfunc BenchmarkMemmove10(b *testing.B) { bmMemmove(b, 10) }\nfunc BenchmarkMemmove11(b *testing.B) { bmMemmove(b, 11) }\nfunc BenchmarkMemmove12(b *testing.B) { bmMemmove(b, 12) }\nfunc BenchmarkMemmove13(b *testing.B) { bmMemmove(b, 13) }\nfunc BenchmarkMemmove14(b *testing.B) { bmMemmove(b, 14) }\nfunc BenchmarkMemmove15(b *testing.B) { bmMemmove(b, 15) }\nfunc BenchmarkMemmove16(b *testing.B) { bmMemmove(b, 16) }\nfunc BenchmarkMemmove32(b *testing.B) { bmMemmove(b, 32) }\nfunc BenchmarkMemmove64(b *testing.B) { bmMemmove(b, 64) }\nfunc BenchmarkMemmove128(b *testing.B) { bmMemmove(b, 128) }\nfunc BenchmarkMemmove256(b *testing.B) { bmMemmove(b, 256) }\nfunc BenchmarkMemmove512(b *testing.B) { bmMemmove(b, 512) }\nfunc BenchmarkMemmove1024(b *testing.B) { bmMemmove(b, 1024) }\nfunc BenchmarkMemmove2048(b *testing.B) { bmMemmove(b, 2048) }\nfunc BenchmarkMemmove4096(b *testing.B) { bmMemmove(b, 4096) }\n\nfunc TestMemclr(t *testing.T) {\n\tsize := 512\n\tif testing.Short() {\n\t\tsize = 128 + 16\n\t}\n\tmem := make([]byte, size)\n\tfor i := 0; i < size; i++ {\n\t\tmem[i] = 0xee\n\t}\n\tfor n := 0; n < size; n++ {\n\t\tfor x := 0; x <= size-n; x++ { \/\/ offset in mem\n\t\t\tMemclrBytes(mem[x : x+n])\n\t\t\tfor i := 0; i < x; i++ {\n\t\t\t\tif mem[i] != 0xee {\n\t\t\t\t\tt.Fatalf(\"overwrite prefix mem[%d] = %d\", i, mem[i])\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor i := x; i < x+n; i++ {\n\t\t\t\tif mem[i] != 0 {\n\t\t\t\t\tt.Fatalf(\"failed clear mem[%d] = %d\", i, mem[i])\n\t\t\t\t}\n\t\t\t\tmem[i] = 0xee\n\t\t\t}\n\t\t\tfor i := x + n; i < size; i++ {\n\t\t\t\tif mem[i] != 0xee {\n\t\t\t\t\tt.Fatalf(\"overwrite suffix mem[%d] = %d\", i, mem[i])\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc bmMemclr(b *testing.B, n int) {\n\tx := make([]byte, n)\n\tb.SetBytes(int64(n))\n\tfor i := 0; i < b.N; i++ {\n\t\tMemclrBytes(x)\n\t}\n}\nfunc BenchmarkMemclr5(b *testing.B) { bmMemclr(b, 5) }\nfunc BenchmarkMemclr16(b *testing.B) { bmMemclr(b, 16) }\nfunc BenchmarkMemclr64(b *testing.B) { bmMemclr(b, 64) }\nfunc BenchmarkMemclr256(b *testing.B) { bmMemclr(b, 256) }\nfunc BenchmarkMemclr4096(b *testing.B) { bmMemclr(b, 4096) }\nfunc BenchmarkMemclr65536(b *testing.B) { bmMemclr(b, 65536) }\n\nfunc BenchmarkClearFat16(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [16 \/ 4]uint32\n\t\t_ = x\n\t}\n}\nfunc BenchmarkClearFat24(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [24 \/ 4]uint32\n\t\t_ = x\n\t}\n}\nfunc BenchmarkClearFat32(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [32 \/ 4]uint32\n\t\t_ = x\n\t}\n}\nfunc BenchmarkClearFat64(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [64 \/ 4]uint32\n\t\t_ = x\n\t}\n}\nfunc BenchmarkClearFat128(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [128 \/ 4]uint32\n\t\t_ = x\n\t}\n}\nfunc BenchmarkClearFat256(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [256 \/ 4]uint32\n\t\t_ = x\n\t}\n}\nfunc BenchmarkClearFat512(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [512 \/ 4]uint32\n\t\t_ = x\n\t}\n}\nfunc BenchmarkClearFat1024(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [1024 \/ 4]uint32\n\t\t_ = x\n\t}\n}\n\nfunc BenchmarkCopyFat16(b *testing.B) {\n\tvar x [16 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\nfunc BenchmarkCopyFat24(b *testing.B) {\n\tvar x [24 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\nfunc BenchmarkCopyFat32(b *testing.B) {\n\tvar x [32 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\nfunc BenchmarkCopyFat64(b *testing.B) {\n\tvar x [64 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\nfunc BenchmarkCopyFat128(b *testing.B) {\n\tvar x [128 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\nfunc BenchmarkCopyFat256(b *testing.B) {\n\tvar x [256 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\nfunc BenchmarkCopyFat512(b *testing.B) {\n\tvar x [512 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\nfunc BenchmarkCopyFat1024(b *testing.B) {\n\tvar x [1024 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\n<commit_msg>runtime: add Benchmark[Clear|Copy]Fat[8|12]<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage runtime_test\n\nimport (\n\t. \"runtime\"\n\t\"testing\"\n)\n\nfunc TestMemmove(t *testing.T) {\n\tsize := 256\n\tif testing.Short() {\n\t\tsize = 128 + 16\n\t}\n\tsrc := make([]byte, size)\n\tdst := make([]byte, size)\n\tfor i := 0; i < size; i++ {\n\t\tsrc[i] = byte(128 + (i & 127))\n\t}\n\tfor i := 0; i < size; i++ {\n\t\tdst[i] = byte(i & 127)\n\t}\n\tfor n := 0; n <= size; n++ {\n\t\tfor x := 0; x <= size-n; x++ { \/\/ offset in src\n\t\t\tfor y := 0; y <= size-n; y++ { \/\/ offset in dst\n\t\t\t\tcopy(dst[y:y+n], src[x:x+n])\n\t\t\t\tfor i := 0; i < y; i++ {\n\t\t\t\t\tif dst[i] != byte(i&127) {\n\t\t\t\t\t\tt.Fatalf(\"prefix dst[%d] = %d\", i, dst[i])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor i := y; i < y+n; i++ {\n\t\t\t\t\tif dst[i] != byte(128+((i-y+x)&127)) {\n\t\t\t\t\t\tt.Fatalf(\"copied dst[%d] = %d\", i, dst[i])\n\t\t\t\t\t}\n\t\t\t\t\tdst[i] = byte(i & 127) \/\/ reset dst\n\t\t\t\t}\n\t\t\t\tfor i := y + n; i < size; i++ {\n\t\t\t\t\tif dst[i] != byte(i&127) {\n\t\t\t\t\t\tt.Fatalf(\"suffix dst[%d] = %d\", i, dst[i])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestMemmoveAlias(t *testing.T) {\n\tsize := 256\n\tif testing.Short() {\n\t\tsize = 128 + 16\n\t}\n\tbuf := make([]byte, size)\n\tfor i := 0; i < size; i++ {\n\t\tbuf[i] = byte(i)\n\t}\n\tfor n := 0; n <= size; n++ {\n\t\tfor x := 0; x <= size-n; x++ { \/\/ src offset\n\t\t\tfor y := 0; y <= size-n; y++ { \/\/ dst offset\n\t\t\t\tcopy(buf[y:y+n], buf[x:x+n])\n\t\t\t\tfor i := 0; i < y; i++ {\n\t\t\t\t\tif buf[i] != byte(i) {\n\t\t\t\t\t\tt.Fatalf(\"prefix buf[%d] = %d\", i, buf[i])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor i := y; i < y+n; i++ {\n\t\t\t\t\tif buf[i] != byte(i-y+x) {\n\t\t\t\t\t\tt.Fatalf(\"copied buf[%d] = %d\", i, buf[i])\n\t\t\t\t\t}\n\t\t\t\t\tbuf[i] = byte(i) \/\/ reset buf\n\t\t\t\t}\n\t\t\t\tfor i := y + n; i < size; i++ {\n\t\t\t\t\tif buf[i] != byte(i) {\n\t\t\t\t\t\tt.Fatalf(\"suffix buf[%d] = %d\", i, buf[i])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc bmMemmove(b *testing.B, n int) {\n\tx := make([]byte, n)\n\ty := make([]byte, n)\n\tb.SetBytes(int64(n))\n\tfor i := 0; i < b.N; i++ {\n\t\tcopy(x, y)\n\t}\n}\n\nfunc BenchmarkMemmove0(b *testing.B) { bmMemmove(b, 0) }\nfunc BenchmarkMemmove1(b *testing.B) { bmMemmove(b, 1) }\nfunc BenchmarkMemmove2(b *testing.B) { bmMemmove(b, 2) }\nfunc BenchmarkMemmove3(b *testing.B) { bmMemmove(b, 3) }\nfunc BenchmarkMemmove4(b *testing.B) { bmMemmove(b, 4) }\nfunc BenchmarkMemmove5(b *testing.B) { bmMemmove(b, 5) }\nfunc BenchmarkMemmove6(b *testing.B) { bmMemmove(b, 6) }\nfunc BenchmarkMemmove7(b *testing.B) { bmMemmove(b, 7) }\nfunc BenchmarkMemmove8(b *testing.B) { bmMemmove(b, 8) }\nfunc BenchmarkMemmove9(b *testing.B) { bmMemmove(b, 9) }\nfunc BenchmarkMemmove10(b *testing.B) { bmMemmove(b, 10) }\nfunc BenchmarkMemmove11(b *testing.B) { bmMemmove(b, 11) }\nfunc BenchmarkMemmove12(b *testing.B) { bmMemmove(b, 12) }\nfunc BenchmarkMemmove13(b *testing.B) { bmMemmove(b, 13) }\nfunc BenchmarkMemmove14(b *testing.B) { bmMemmove(b, 14) }\nfunc BenchmarkMemmove15(b *testing.B) { bmMemmove(b, 15) }\nfunc BenchmarkMemmove16(b *testing.B) { bmMemmove(b, 16) }\nfunc BenchmarkMemmove32(b *testing.B) { bmMemmove(b, 32) }\nfunc BenchmarkMemmove64(b *testing.B) { bmMemmove(b, 64) }\nfunc BenchmarkMemmove128(b *testing.B) { bmMemmove(b, 128) }\nfunc BenchmarkMemmove256(b *testing.B) { bmMemmove(b, 256) }\nfunc BenchmarkMemmove512(b *testing.B) { bmMemmove(b, 512) }\nfunc BenchmarkMemmove1024(b *testing.B) { bmMemmove(b, 1024) }\nfunc BenchmarkMemmove2048(b *testing.B) { bmMemmove(b, 2048) }\nfunc BenchmarkMemmove4096(b *testing.B) { bmMemmove(b, 4096) }\n\nfunc TestMemclr(t *testing.T) {\n\tsize := 512\n\tif testing.Short() {\n\t\tsize = 128 + 16\n\t}\n\tmem := make([]byte, size)\n\tfor i := 0; i < size; i++ {\n\t\tmem[i] = 0xee\n\t}\n\tfor n := 0; n < size; n++ {\n\t\tfor x := 0; x <= size-n; x++ { \/\/ offset in mem\n\t\t\tMemclrBytes(mem[x : x+n])\n\t\t\tfor i := 0; i < x; i++ {\n\t\t\t\tif mem[i] != 0xee {\n\t\t\t\t\tt.Fatalf(\"overwrite prefix mem[%d] = %d\", i, mem[i])\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor i := x; i < x+n; i++ {\n\t\t\t\tif mem[i] != 0 {\n\t\t\t\t\tt.Fatalf(\"failed clear mem[%d] = %d\", i, mem[i])\n\t\t\t\t}\n\t\t\t\tmem[i] = 0xee\n\t\t\t}\n\t\t\tfor i := x + n; i < size; i++ {\n\t\t\t\tif mem[i] != 0xee {\n\t\t\t\t\tt.Fatalf(\"overwrite suffix mem[%d] = %d\", i, mem[i])\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc bmMemclr(b *testing.B, n int) {\n\tx := make([]byte, n)\n\tb.SetBytes(int64(n))\n\tfor i := 0; i < b.N; i++ {\n\t\tMemclrBytes(x)\n\t}\n}\nfunc BenchmarkMemclr5(b *testing.B) { bmMemclr(b, 5) }\nfunc BenchmarkMemclr16(b *testing.B) { bmMemclr(b, 16) }\nfunc BenchmarkMemclr64(b *testing.B) { bmMemclr(b, 64) }\nfunc BenchmarkMemclr256(b *testing.B) { bmMemclr(b, 256) }\nfunc BenchmarkMemclr4096(b *testing.B) { bmMemclr(b, 4096) }\nfunc BenchmarkMemclr65536(b *testing.B) { bmMemclr(b, 65536) }\n\nfunc BenchmarkClearFat8(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [8 \/ 4]uint32\n\t\t_ = x\n\t}\n}\nfunc BenchmarkClearFat12(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [12 \/ 4]uint32\n\t\t_ = x\n\t}\n}\nfunc BenchmarkClearFat16(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [16 \/ 4]uint32\n\t\t_ = x\n\t}\n}\nfunc BenchmarkClearFat24(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [24 \/ 4]uint32\n\t\t_ = x\n\t}\n}\nfunc BenchmarkClearFat32(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [32 \/ 4]uint32\n\t\t_ = x\n\t}\n}\nfunc BenchmarkClearFat64(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [64 \/ 4]uint32\n\t\t_ = x\n\t}\n}\nfunc BenchmarkClearFat128(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [128 \/ 4]uint32\n\t\t_ = x\n\t}\n}\nfunc BenchmarkClearFat256(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [256 \/ 4]uint32\n\t\t_ = x\n\t}\n}\nfunc BenchmarkClearFat512(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [512 \/ 4]uint32\n\t\t_ = x\n\t}\n}\nfunc BenchmarkClearFat1024(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar x [1024 \/ 4]uint32\n\t\t_ = x\n\t}\n}\n\nfunc BenchmarkCopyFat8(b *testing.B) {\n\tvar x [8 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\nfunc BenchmarkCopyFat12(b *testing.B) {\n\tvar x [12 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\nfunc BenchmarkCopyFat16(b *testing.B) {\n\tvar x [16 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\nfunc BenchmarkCopyFat24(b *testing.B) {\n\tvar x [24 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\nfunc BenchmarkCopyFat32(b *testing.B) {\n\tvar x [32 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\nfunc BenchmarkCopyFat64(b *testing.B) {\n\tvar x [64 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\nfunc BenchmarkCopyFat128(b *testing.B) {\n\tvar x [128 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\nfunc BenchmarkCopyFat256(b *testing.B) {\n\tvar x [256 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\nfunc BenchmarkCopyFat512(b *testing.B) {\n\tvar x [512 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\nfunc BenchmarkCopyFat1024(b *testing.B) {\n\tvar x [1024 \/ 4]uint32\n\tfor i := 0; i < b.N; i++ {\n\t\ty := x\n\t\t_ = y\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage strings_test\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"unicode\"\n)\n\nfunc ExampleFields() {\n\tfmt.Printf(\"Fields are: %q\", strings.Fields(\" foo bar baz \"))\n\t\/\/ Output: Fields are: [\"foo\" \"bar\" \"baz\"]\n}\n\nfunc ExampleFieldsFunc() {\n\tf := func(c rune) bool {\n\t\treturn !unicode.IsLetter(c) && !unicode.IsNumber(c)\n\t}\n\tfmt.Printf(\"Fields are: %q\", strings.FieldsFunc(\" foo1;bar2,baz3...\", f))\n\t\/\/ Output: Fields are: [\"foo1\" \"bar2\" \"baz3\"]\n}\n\nfunc ExampleContains() {\n\tfmt.Println(strings.Contains(\"seafood\", \"foo\"))\n\tfmt.Println(strings.Contains(\"seafood\", \"bar\"))\n\tfmt.Println(strings.Contains(\"seafood\", \"\"))\n\tfmt.Println(strings.Contains(\"\", \"\"))\n\t\/\/ Output:\n\t\/\/ true\n\t\/\/ false\n\t\/\/ true\n\t\/\/ true\n}\n\nfunc ExampleContainsAny() {\n\tfmt.Println(strings.ContainsAny(\"team\", \"i\"))\n\tfmt.Println(strings.ContainsAny(\"failure\", \"u & i\"))\n\tfmt.Println(strings.ContainsAny(\"foo\", \"\"))\n\tfmt.Println(strings.ContainsAny(\"\", \"\"))\n\t\/\/ Output:\n\t\/\/ false\n\t\/\/ true\n\t\/\/ false\n\t\/\/ false\n}\n\nfunc ExampleCount() {\n\tfmt.Println(strings.Count(\"cheese\", \"e\"))\n\tfmt.Println(strings.Count(\"five\", \"\")) \/\/ before & after each rune\n\t\/\/ Output:\n\t\/\/ 3\n\t\/\/ 5\n}\n\nfunc ExampleEqualFold() {\n\tfmt.Println(strings.EqualFold(\"Go\", \"go\"))\n\t\/\/ Output: true\n}\n\nfunc ExampleIndex() {\n\tfmt.Println(strings.Index(\"chicken\", \"ken\"))\n\tfmt.Println(strings.Index(\"chicken\", \"dmr\"))\n\t\/\/ Output:\n\t\/\/ 4\n\t\/\/ -1\n}\n\nfunc ExampleIndexFunc() {\n\tf := func(c rune) bool {\n\t\treturn unicode.Is(unicode.Han, c)\n\t}\n\tfmt.Println(strings.IndexFunc(\"Hello, 世界\", f))\n\tfmt.Println(strings.IndexFunc(\"Hello, world\", f))\n\t\/\/ Output:\n\t\/\/ 7\n\t\/\/ -1\n}\n\nfunc ExampleIndexRune() {\n\tfmt.Println(strings.IndexRune(\"chicken\", 'k'))\n\tfmt.Println(strings.IndexRune(\"chicken\", 'd'))\n\t\/\/ Output:\n\t\/\/ 4\n\t\/\/ -1\n}\n\nfunc ExampleLastIndex() {\n\tfmt.Println(strings.Index(\"go gopher\", \"go\"))\n\tfmt.Println(strings.LastIndex(\"go gopher\", \"go\"))\n\tfmt.Println(strings.LastIndex(\"go gopher\", \"rodent\"))\n\t\/\/ Output:\n\t\/\/ 0\n\t\/\/ 3\n\t\/\/ -1\n}\n\nfunc ExampleJoin() {\n\ts := []string{\"foo\", \"bar\", \"baz\"}\n\tfmt.Println(strings.Join(s, \", \"))\n\t\/\/ Output: foo, bar, baz\n}\n\nfunc ExampleRepeat() {\n\tfmt.Println(\"ba\" + strings.Repeat(\"na\", 2))\n\t\/\/ Output: banana\n}\n\nfunc ExampleReplace() {\n\tfmt.Println(strings.Replace(\"oink oink oink\", \"k\", \"ky\", 2))\n\tfmt.Println(strings.Replace(\"oink oink oink\", \"oink\", \"moo\", -1))\n\t\/\/ Output:\n\t\/\/ oinky oinky oink\n\t\/\/ moo moo moo\n}\n\nfunc ExampleSplit() {\n\tfmt.Printf(\"%q\\n\", strings.Split(\"a,b,c\", \",\"))\n\tfmt.Printf(\"%q\\n\", strings.Split(\"a man a plan a canal panama\", \"a \"))\n\tfmt.Printf(\"%q\\n\", strings.Split(\" xyz \", \"\"))\n\tfmt.Printf(\"%q\\n\", strings.Split(\"\", \"Bernardo O'Higgins\"))\n\t\/\/ Output:\n\t\/\/ [\"a\" \"b\" \"c\"]\n\t\/\/ [\"\" \"man \" \"plan \" \"canal panama\"]\n\t\/\/ [\" \" \"x\" \"y\" \"z\" \" \"]\n\t\/\/ [\"\"]\n}\n\nfunc ExampleSplitN() {\n\tfmt.Printf(\"%q\\n\", strings.SplitN(\"a,b,c\", \",\", 2))\n\tz := strings.SplitN(\"a,b,c\", \",\", 0)\n\tfmt.Printf(\"%q (nil = %v)\\n\", z, z == nil)\n\t\/\/ Output:\n\t\/\/ [\"a\" \"b,c\"]\n\t\/\/ [] (nil = true)\n}\n\nfunc ExampleSplitAfter() {\n\tfmt.Printf(\"%q\\n\", strings.SplitAfter(\"a,b,c\", \",\"))\n\t\/\/ Output: [\"a,\" \"b,\" \"c\"]\n}\n\nfunc ExampleSplitAfterN() {\n\tfmt.Printf(\"%q\\n\", strings.SplitAfterN(\"a,b,c\", \",\", 2))\n\t\/\/ Output: [\"a,\" \"b,c\"]\n}\n\nfunc ExampleTitle() {\n\tfmt.Println(strings.Title(\"her royal highness\"))\n\t\/\/ Output: Her Royal Highness\n}\n\nfunc ExampleToTitle() {\n\tfmt.Println(strings.ToTitle(\"loud noises\"))\n\tfmt.Println(strings.ToTitle(\"хлеб\"))\n\t\/\/ Output:\n\t\/\/ LOUD NOISES\n\t\/\/ ХЛЕБ\n}\n\nfunc ExampleTrim() {\n\tfmt.Printf(\"[%q]\", strings.Trim(\" !!! Achtung! Achtung! !!! \", \"! \"))\n\t\/\/ Output: [\"Achtung! Achtung\"]\n}\n\nfunc ExampleMap() {\n\trot13 := func(r rune) rune {\n\t\tswitch {\n\t\tcase r >= 'A' && r <= 'Z':\n\t\t\treturn 'A' + (r-'A'+13)%26\n\t\tcase r >= 'a' && r <= 'z':\n\t\t\treturn 'a' + (r-'a'+13)%26\n\t\t}\n\t\treturn r\n\t}\n\tfmt.Println(strings.Map(rot13, \"'Twas brillig and the slithy gopher...\"))\n\t\/\/ Output: 'Gjnf oevyyvt naq gur fyvgul tbcure...\n}\n\nfunc ExampleTrimSpace() {\n\tfmt.Println(strings.TrimSpace(\" \\t\\n a lone gopher \\n\\t\\r\\n\"))\n\t\/\/ Output: a lone gopher\n}\n\nfunc ExampleNewReplacer() {\n\tr := strings.NewReplacer(\"<\", \"<\", \">\", \">\")\n\tfmt.Println(r.Replace(\"This is <b>HTML<\/b>!\"))\n\t\/\/ Output: This is <b>HTML<\/b>!\n}\n\nfunc ExampleToUpper() {\n\tfmt.Println(strings.ToUpper(\"Gopher\"))\n\t\/\/ Output: GOPHER\n}\n\nfunc ExampleToLower() {\n\tfmt.Println(strings.ToLower(\"Gopher\"))\n\t\/\/ Output: gopher\n}\n\nfunc ExampleTrimSuffix() {\n\tvar s = \"Hello, goodbye, etc!\"\n\ts = strings.TrimSuffix(s, \"goodbye, etc!\")\n\ts = strings.TrimSuffix(s, \"planet\")\n\tfmt.Print(s, \"world!\")\n\t\/\/ Output: Hello, world!\n}\n\nfunc ExampleTrimPrefix() {\n\tvar s = \"Goodbye,, world!\"\n\ts = strings.TrimPrefix(s, \"Goodbye,\")\n\ts = strings.TrimPrefix(s, \"Howdy,\")\n\tfmt.Print(\"Hello\" + s)\n\t\/\/ Output: Hello, world!\n}\n<commit_msg>strings: Add example function for IndexAny<commit_after>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage strings_test\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"unicode\"\n)\n\nfunc ExampleFields() {\n\tfmt.Printf(\"Fields are: %q\", strings.Fields(\" foo bar baz \"))\n\t\/\/ Output: Fields are: [\"foo\" \"bar\" \"baz\"]\n}\n\nfunc ExampleFieldsFunc() {\n\tf := func(c rune) bool {\n\t\treturn !unicode.IsLetter(c) && !unicode.IsNumber(c)\n\t}\n\tfmt.Printf(\"Fields are: %q\", strings.FieldsFunc(\" foo1;bar2,baz3...\", f))\n\t\/\/ Output: Fields are: [\"foo1\" \"bar2\" \"baz3\"]\n}\n\nfunc ExampleContains() {\n\tfmt.Println(strings.Contains(\"seafood\", \"foo\"))\n\tfmt.Println(strings.Contains(\"seafood\", \"bar\"))\n\tfmt.Println(strings.Contains(\"seafood\", \"\"))\n\tfmt.Println(strings.Contains(\"\", \"\"))\n\t\/\/ Output:\n\t\/\/ true\n\t\/\/ false\n\t\/\/ true\n\t\/\/ true\n}\n\nfunc ExampleContainsAny() {\n\tfmt.Println(strings.ContainsAny(\"team\", \"i\"))\n\tfmt.Println(strings.ContainsAny(\"failure\", \"u & i\"))\n\tfmt.Println(strings.ContainsAny(\"foo\", \"\"))\n\tfmt.Println(strings.ContainsAny(\"\", \"\"))\n\t\/\/ Output:\n\t\/\/ false\n\t\/\/ true\n\t\/\/ false\n\t\/\/ false\n}\n\nfunc ExampleCount() {\n\tfmt.Println(strings.Count(\"cheese\", \"e\"))\n\tfmt.Println(strings.Count(\"five\", \"\")) \/\/ before & after each rune\n\t\/\/ Output:\n\t\/\/ 3\n\t\/\/ 5\n}\n\nfunc ExampleEqualFold() {\n\tfmt.Println(strings.EqualFold(\"Go\", \"go\"))\n\t\/\/ Output: true\n}\n\nfunc ExampleIndex() {\n\tfmt.Println(strings.Index(\"chicken\", \"ken\"))\n\tfmt.Println(strings.Index(\"chicken\", \"dmr\"))\n\t\/\/ Output:\n\t\/\/ 4\n\t\/\/ -1\n}\n\nfunc ExampleIndexFunc() {\n\tf := func(c rune) bool {\n\t\treturn unicode.Is(unicode.Han, c)\n\t}\n\tfmt.Println(strings.IndexFunc(\"Hello, 世界\", f))\n\tfmt.Println(strings.IndexFunc(\"Hello, world\", f))\n\t\/\/ Output:\n\t\/\/ 7\n\t\/\/ -1\n}\n\nfunc ExampleIndexAny() {\n\tfmt.Println(strings.IndexAny(\"chicken\", \"aeiouy\"))\n\tfmt.Println(strings.IndexAny(\"crwth\", \"aeiouy\"))\n\t\/\/ Output:\n\t\/\/ 2\n\t\/\/ -1\n}\n\nfunc ExampleIndexRune() {\n\tfmt.Println(strings.IndexRune(\"chicken\", 'k'))\n\tfmt.Println(strings.IndexRune(\"chicken\", 'd'))\n\t\/\/ Output:\n\t\/\/ 4\n\t\/\/ -1\n}\n\nfunc ExampleLastIndex() {\n\tfmt.Println(strings.Index(\"go gopher\", \"go\"))\n\tfmt.Println(strings.LastIndex(\"go gopher\", \"go\"))\n\tfmt.Println(strings.LastIndex(\"go gopher\", \"rodent\"))\n\t\/\/ Output:\n\t\/\/ 0\n\t\/\/ 3\n\t\/\/ -1\n}\n\nfunc ExampleJoin() {\n\ts := []string{\"foo\", \"bar\", \"baz\"}\n\tfmt.Println(strings.Join(s, \", \"))\n\t\/\/ Output: foo, bar, baz\n}\n\nfunc ExampleRepeat() {\n\tfmt.Println(\"ba\" + strings.Repeat(\"na\", 2))\n\t\/\/ Output: banana\n}\n\nfunc ExampleReplace() {\n\tfmt.Println(strings.Replace(\"oink oink oink\", \"k\", \"ky\", 2))\n\tfmt.Println(strings.Replace(\"oink oink oink\", \"oink\", \"moo\", -1))\n\t\/\/ Output:\n\t\/\/ oinky oinky oink\n\t\/\/ moo moo moo\n}\n\nfunc ExampleSplit() {\n\tfmt.Printf(\"%q\\n\", strings.Split(\"a,b,c\", \",\"))\n\tfmt.Printf(\"%q\\n\", strings.Split(\"a man a plan a canal panama\", \"a \"))\n\tfmt.Printf(\"%q\\n\", strings.Split(\" xyz \", \"\"))\n\tfmt.Printf(\"%q\\n\", strings.Split(\"\", \"Bernardo O'Higgins\"))\n\t\/\/ Output:\n\t\/\/ [\"a\" \"b\" \"c\"]\n\t\/\/ [\"\" \"man \" \"plan \" \"canal panama\"]\n\t\/\/ [\" \" \"x\" \"y\" \"z\" \" \"]\n\t\/\/ [\"\"]\n}\n\nfunc ExampleSplitN() {\n\tfmt.Printf(\"%q\\n\", strings.SplitN(\"a,b,c\", \",\", 2))\n\tz := strings.SplitN(\"a,b,c\", \",\", 0)\n\tfmt.Printf(\"%q (nil = %v)\\n\", z, z == nil)\n\t\/\/ Output:\n\t\/\/ [\"a\" \"b,c\"]\n\t\/\/ [] (nil = true)\n}\n\nfunc ExampleSplitAfter() {\n\tfmt.Printf(\"%q\\n\", strings.SplitAfter(\"a,b,c\", \",\"))\n\t\/\/ Output: [\"a,\" \"b,\" \"c\"]\n}\n\nfunc ExampleSplitAfterN() {\n\tfmt.Printf(\"%q\\n\", strings.SplitAfterN(\"a,b,c\", \",\", 2))\n\t\/\/ Output: [\"a,\" \"b,c\"]\n}\n\nfunc ExampleTitle() {\n\tfmt.Println(strings.Title(\"her royal highness\"))\n\t\/\/ Output: Her Royal Highness\n}\n\nfunc ExampleToTitle() {\n\tfmt.Println(strings.ToTitle(\"loud noises\"))\n\tfmt.Println(strings.ToTitle(\"хлеб\"))\n\t\/\/ Output:\n\t\/\/ LOUD NOISES\n\t\/\/ ХЛЕБ\n}\n\nfunc ExampleTrim() {\n\tfmt.Printf(\"[%q]\", strings.Trim(\" !!! Achtung! Achtung! !!! \", \"! \"))\n\t\/\/ Output: [\"Achtung! Achtung\"]\n}\n\nfunc ExampleMap() {\n\trot13 := func(r rune) rune {\n\t\tswitch {\n\t\tcase r >= 'A' && r <= 'Z':\n\t\t\treturn 'A' + (r-'A'+13)%26\n\t\tcase r >= 'a' && r <= 'z':\n\t\t\treturn 'a' + (r-'a'+13)%26\n\t\t}\n\t\treturn r\n\t}\n\tfmt.Println(strings.Map(rot13, \"'Twas brillig and the slithy gopher...\"))\n\t\/\/ Output: 'Gjnf oevyyvt naq gur fyvgul tbcure...\n}\n\nfunc ExampleTrimSpace() {\n\tfmt.Println(strings.TrimSpace(\" \\t\\n a lone gopher \\n\\t\\r\\n\"))\n\t\/\/ Output: a lone gopher\n}\n\nfunc ExampleNewReplacer() {\n\tr := strings.NewReplacer(\"<\", \"<\", \">\", \">\")\n\tfmt.Println(r.Replace(\"This is <b>HTML<\/b>!\"))\n\t\/\/ Output: This is <b>HTML<\/b>!\n}\n\nfunc ExampleToUpper() {\n\tfmt.Println(strings.ToUpper(\"Gopher\"))\n\t\/\/ Output: GOPHER\n}\n\nfunc ExampleToLower() {\n\tfmt.Println(strings.ToLower(\"Gopher\"))\n\t\/\/ Output: gopher\n}\n\nfunc ExampleTrimSuffix() {\n\tvar s = \"Hello, goodbye, etc!\"\n\ts = strings.TrimSuffix(s, \"goodbye, etc!\")\n\ts = strings.TrimSuffix(s, \"planet\")\n\tfmt.Print(s, \"world!\")\n\t\/\/ Output: Hello, world!\n}\n\nfunc ExampleTrimPrefix() {\n\tvar s = \"Goodbye,, world!\"\n\ts = strings.TrimPrefix(s, \"Goodbye,\")\n\ts = strings.TrimPrefix(s, \"Howdy,\")\n\tfmt.Print(\"Hello\" + s)\n\t\/\/ Output: Hello, world!\n}\n<|endoftext|>"} {"text":"<commit_before>package jobs\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/juju\/errgo\"\n\n\t\"arvika.pulcy.com\/pulcy\/deployit\/units\"\n)\n\nvar (\n\ttaskNamePattern = regexp.MustCompile(`^([a-z0-9_]{2,30})$`)\n)\n\ntype TaskName string\n\nfunc (tn TaskName) String() string {\n\treturn string(tn)\n}\n\nfunc (tn TaskName) Validate() error {\n\tif !taskNamePattern.MatchString(string(tn)) {\n\t\treturn maskAny(errgo.WithCausef(nil, InvalidNameError, \"task name must match '%s', got '%s'\", taskNamePattern, tn))\n\t}\n\treturn nil\n}\n\ntype Task struct {\n\tName TaskName `json:\"name\", maspstructure:\"-\"`\n\tgroup *TaskGroup `json:\"-\", mapstructure:\"-\"`\n\tCount uint `json:\"-\"` \/\/ This value is used during parsing only\n\tGlobal bool `json:\"-\"` \/\/ This value is used during parsing only\n\n\tImage DockerImage `json:\"image\"`\n\tVolumesFrom []TaskName `json:\"volumes-from,omitempty\"`\n\tVolumes []string `json:\"volumes,omitempty\"`\n\tArgs []string `json:\"args,omitempty\"`\n\tEnvironment map[string]string `json:\"environment,omitempty\"`\n\tPorts []string `json:\"ports,omitempty\"`\n\tFrontEnds []FrontEnd `json:\"frontends,omitempty\"`\n}\n\ntype TaskList []*Task\n\n\/\/ Check for errors\nfunc (t *Task) Validate() error {\n\tif err := t.Name.Validate(); err != nil {\n\t\treturn maskAny(err)\n\t}\n\tfor _, name := range t.VolumesFrom {\n\t\t_, err := t.group.Task(name)\n\t\tif err != nil {\n\t\t\treturn maskAny(err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ createUnits creates all units needed to run this task.\nfunc (t *Task) createUnits(scalingGroup uint) ([]*units.Unit, error) {\n\tunits := []*units.Unit{}\n\tmain, err := t.createMainUnit(scalingGroup)\n\tif err != nil {\n\t\treturn nil, maskAny(err)\n\t}\n\tunits = append(units, main)\n\n\treturn units, nil\n}\n\n\/\/ createMainUnit\nfunc (t *Task) createMainUnit(scalingGroup uint) (*units.Unit, error) {\n\tname := t.containerName(scalingGroup)\n\tserviceName := t.serviceName()\n\timage := t.Image.String()\n\texecStart := []string{\n\t\t\"\/usr\/bin\/docker\",\n\t\t\"run\",\n\t\t\"--rm\",\n\t\tfmt.Sprintf(\"--name %s\", name),\n\t}\n\tif len(t.Ports) > 0 {\n\t\tfor _, p := range t.Ports {\n\t\t\texecStart = append(execStart, fmt.Sprintf(\"-p %s\", p))\n\t\t}\n\t} else {\n\t\texecStart = append(execStart, \"-P\")\n\t}\n\tfor _, v := range t.Volumes {\n\t\texecStart = append(execStart, fmt.Sprintf(\"-v %s\", v))\n\t}\n\tfor _, name := range t.VolumesFrom {\n\t\tother, err := t.group.Task(name)\n\t\tif err != nil {\n\t\t\treturn nil, maskAny(err)\n\t\t}\n\t\texecStart = append(execStart, fmt.Sprintf(\"--volumes-from %s\", other.containerName(scalingGroup)))\n\t}\n\tfor k, v := range t.Environment {\n\t\texecStart = append(execStart, \"-e \"+strconv.Quote(fmt.Sprintf(\"%s=%s\", k, v)))\n\t}\n\texecStart = append(execStart, fmt.Sprintf(\"-e SERVICE_NAME=%s\", serviceName)) \/\/ Support registrator\n\texecStart = append(execStart, image)\n\texecStart = append(execStart, t.Args...)\n\tmain := &units.Unit{\n\t\tName: t.unitName(scalingGroup),\n\t\tFullName: t.unitName(scalingGroup) + \".service\",\n\t\tDescription: fmt.Sprintf(\"Main unit for %s slice %v\", t.fullName(), scalingGroup),\n\t\tType: \"service\",\n\t\tScalable: t.group.IsScalable(),\n\t\tScalingGroup: scalingGroup,\n\t\tExecOptions: units.NewExecOptions(execStart...),\n\t\tFleetOptions: units.NewFleetOptions(),\n\t}\n\t\/\/main.FleetOptions.IsGlobal = ds.global\n\tmain.ExecOptions.ExecStartPre = []string{\n\t\tfmt.Sprintf(\"\/usr\/bin\/docker pull %s\", image),\n\t\tfmt.Sprintf(\"-\/usr\/bin\/docker stop -t %v %s\", main.ExecOptions.ContainerTimeoutStopSec, name),\n\t\tfmt.Sprintf(\"-\/usr\/bin\/docker rm -f %s\", name),\n\t}\n\tmain.ExecOptions.ExecStop = fmt.Sprintf(\"-\/usr\/bin\/docker stop -t %v %s\", main.ExecOptions.ContainerTimeoutStopSec, name)\n\tmain.ExecOptions.ExecStopPost = []string{\n\t\tfmt.Sprintf(\"-\/usr\/bin\/docker rm -f %s\", name),\n\t}\n\tmain.FleetOptions.IsGlobal = t.group.Global\n\n\tif err := t.addFrontEndRegistration(main); err != nil {\n\t\treturn nil, maskAny(err)\n\t}\n\n\treturn main, nil\n}\n\n\/\/ Gets the full name of this task: job\/taskgroup\/task\nfunc (t *Task) fullName() string {\n\treturn fmt.Sprintf(\"%s\/%s\", t.group.fullName(), t.Name)\n}\n\n\/\/ unitName returns the name of the systemd unit for this task.\nfunc (t *Task) unitName(scalingGroup uint) string {\n\tbase := strings.Replace(t.fullName(), \"\/\", \"-\", -1)\n\tif !t.group.IsScalable() {\n\t\treturn base\n\t}\n\treturn fmt.Sprintf(\"%s@%v\", base, scalingGroup)\n}\n\n\/\/ containerName returns the name of the docker contained used for this task.\nfunc (t *Task) containerName(scalingGroup uint) string {\n\tbase := strings.Replace(t.fullName(), \"\/\", \"-\", -1)\n\treturn fmt.Sprintf(\"%s-%v\", base, scalingGroup)\n}\n\n\/\/ serviceName returns the name used to register this service.\nfunc (t *Task) serviceName() string {\n\treturn strings.Replace(t.fullName(), \"\/\", \"-\", -1)\n}\n\ntype frontendRecord struct {\n\tSelectors []frontendSelectorRecord `json:\"selectors\"`\n\tService string `json:\"service,omitempty\"`\n}\n\ntype frontendSelectorRecord struct {\n\tDomain string `json:\"domain,omitempty\"`\n\tPathPrefix string `json:\"path-prefix,omitempty\"`\n}\n\n\/\/ addFrontEndRegistration adds registration code for frontends to the given units\nfunc (t *Task) addFrontEndRegistration(main *units.Unit) error {\n\tif len(t.FrontEnds) == 0 {\n\t\treturn nil\n\t}\n\tkey := \"\/pulcy\/frontend\/\" + t.serviceName()\n\trecord := frontendRecord{\n\t\tService: t.serviceName(),\n\t}\n\tfor _, fr := range t.FrontEnds {\n\t\trecord.Selectors = append(record.Selectors, frontendSelectorRecord{\n\t\t\tDomain: fr.Domain,\n\t\t\tPathPrefix: fr.PathPrefix,\n\t\t})\n\t}\n\tjson, err := json.Marshal(&record)\n\tif err != nil {\n\t\treturn maskAny(err)\n\t}\n\tmain.ExecOptions.ExecStartPost = append(main.ExecOptions.ExecStartPost,\n\t\tfmt.Sprintf(\"\/bin\/sh -c 'echo %s | base64 -d | \/usr\/bin\/etcdctl set %s'\", base64.StdEncoding.EncodeToString(json), key),\n\t)\n\treturn nil\n}\n\nfunc (l TaskList) Len() int {\n\treturn len(l)\n}\n\nfunc (l TaskList) Less(i, j int) bool {\n\treturn bytes.Compare([]byte(l[i].Name.String()), []byte(l[j].Name.String())) < 0\n}\n\nfunc (l TaskList) Swap(i, j int) {\n\ttmp := l[i]\n\tl[i] = l[j]\n\tl[j] = tmp\n}\n<commit_msg>Added Conflicts rule<commit_after>package jobs\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/juju\/errgo\"\n\n\t\"arvika.pulcy.com\/pulcy\/deployit\/units\"\n)\n\nvar (\n\ttaskNamePattern = regexp.MustCompile(`^([a-z0-9_]{2,30})$`)\n)\n\ntype TaskName string\n\nfunc (tn TaskName) String() string {\n\treturn string(tn)\n}\n\nfunc (tn TaskName) Validate() error {\n\tif !taskNamePattern.MatchString(string(tn)) {\n\t\treturn maskAny(errgo.WithCausef(nil, InvalidNameError, \"task name must match '%s', got '%s'\", taskNamePattern, tn))\n\t}\n\treturn nil\n}\n\ntype Task struct {\n\tName TaskName `json:\"name\", maspstructure:\"-\"`\n\tgroup *TaskGroup `json:\"-\", mapstructure:\"-\"`\n\tCount uint `json:\"-\"` \/\/ This value is used during parsing only\n\tGlobal bool `json:\"-\"` \/\/ This value is used during parsing only\n\n\tImage DockerImage `json:\"image\"`\n\tVolumesFrom []TaskName `json:\"volumes-from,omitempty\"`\n\tVolumes []string `json:\"volumes,omitempty\"`\n\tArgs []string `json:\"args,omitempty\"`\n\tEnvironment map[string]string `json:\"environment,omitempty\"`\n\tPorts []string `json:\"ports,omitempty\"`\n\tFrontEnds []FrontEnd `json:\"frontends,omitempty\"`\n}\n\ntype TaskList []*Task\n\n\/\/ Check for errors\nfunc (t *Task) Validate() error {\n\tif err := t.Name.Validate(); err != nil {\n\t\treturn maskAny(err)\n\t}\n\tfor _, name := range t.VolumesFrom {\n\t\t_, err := t.group.Task(name)\n\t\tif err != nil {\n\t\t\treturn maskAny(err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ createUnits creates all units needed to run this task.\nfunc (t *Task) createUnits(scalingGroup uint) ([]*units.Unit, error) {\n\tunits := []*units.Unit{}\n\tmain, err := t.createMainUnit(scalingGroup)\n\tif err != nil {\n\t\treturn nil, maskAny(err)\n\t}\n\tunits = append(units, main)\n\n\treturn units, nil\n}\n\n\/\/ createMainUnit\nfunc (t *Task) createMainUnit(scalingGroup uint) (*units.Unit, error) {\n\tname := t.containerName(scalingGroup)\n\tserviceName := t.serviceName()\n\timage := t.Image.String()\n\texecStart := []string{\n\t\t\"\/usr\/bin\/docker\",\n\t\t\"run\",\n\t\t\"--rm\",\n\t\tfmt.Sprintf(\"--name %s\", name),\n\t}\n\tif len(t.Ports) > 0 {\n\t\tfor _, p := range t.Ports {\n\t\t\texecStart = append(execStart, fmt.Sprintf(\"-p %s\", p))\n\t\t}\n\t} else {\n\t\texecStart = append(execStart, \"-P\")\n\t}\n\tfor _, v := range t.Volumes {\n\t\texecStart = append(execStart, fmt.Sprintf(\"-v %s\", v))\n\t}\n\tfor _, name := range t.VolumesFrom {\n\t\tother, err := t.group.Task(name)\n\t\tif err != nil {\n\t\t\treturn nil, maskAny(err)\n\t\t}\n\t\texecStart = append(execStart, fmt.Sprintf(\"--volumes-from %s\", other.containerName(scalingGroup)))\n\t}\n\tfor k, v := range t.Environment {\n\t\texecStart = append(execStart, \"-e \"+strconv.Quote(fmt.Sprintf(\"%s=%s\", k, v)))\n\t}\n\texecStart = append(execStart, fmt.Sprintf(\"-e SERVICE_NAME=%s\", serviceName)) \/\/ Support registrator\n\texecStart = append(execStart, image)\n\texecStart = append(execStart, t.Args...)\n\tmain := &units.Unit{\n\t\tName: t.unitName(strconv.Itoa(int(scalingGroup))),\n\t\tFullName: t.unitName(strconv.Itoa(int(scalingGroup))) + \".service\",\n\t\tDescription: fmt.Sprintf(\"Main unit for %s slice %v\", t.fullName(), scalingGroup),\n\t\tType: \"service\",\n\t\tScalable: t.group.IsScalable(),\n\t\tScalingGroup: scalingGroup,\n\t\tExecOptions: units.NewExecOptions(execStart...),\n\t\tFleetOptions: units.NewFleetOptions(),\n\t}\n\t\/\/main.FleetOptions.IsGlobal = ds.global\n\tmain.ExecOptions.ExecStartPre = []string{\n\t\tfmt.Sprintf(\"\/usr\/bin\/docker pull %s\", image),\n\t\tfmt.Sprintf(\"-\/usr\/bin\/docker stop -t %v %s\", main.ExecOptions.ContainerTimeoutStopSec, name),\n\t\tfmt.Sprintf(\"-\/usr\/bin\/docker rm -f %s\", name),\n\t}\n\tmain.ExecOptions.ExecStop = fmt.Sprintf(\"-\/usr\/bin\/docker stop -t %v %s\", main.ExecOptions.ContainerTimeoutStopSec, name)\n\tmain.ExecOptions.ExecStopPost = []string{\n\t\tfmt.Sprintf(\"-\/usr\/bin\/docker rm -f %s\", name),\n\t}\n\tmain.FleetOptions.IsGlobal = t.group.Global\n\tif t.group.IsScalable() {\n\t\tmain.FleetOptions.Conflicts(t.unitName(\"*\") + \".service\")\n\t}\n\n\tif err := t.addFrontEndRegistration(main); err != nil {\n\t\treturn nil, maskAny(err)\n\t}\n\n\treturn main, nil\n}\n\n\/\/ Gets the full name of this task: job\/taskgroup\/task\nfunc (t *Task) fullName() string {\n\treturn fmt.Sprintf(\"%s\/%s\", t.group.fullName(), t.Name)\n}\n\n\/\/ unitName returns the name of the systemd unit for this task.\nfunc (t *Task) unitName(scalingGroup string) string {\n\tbase := strings.Replace(t.fullName(), \"\/\", \"-\", -1)\n\tif !t.group.IsScalable() {\n\t\treturn base\n\t}\n\treturn fmt.Sprintf(\"%s@%s\", base, scalingGroup)\n}\n\n\/\/ containerName returns the name of the docker contained used for this task.\nfunc (t *Task) containerName(scalingGroup uint) string {\n\tbase := strings.Replace(t.fullName(), \"\/\", \"-\", -1)\n\tif !t.group.IsScalable() {\n\t\treturn base\n\t}\n\treturn fmt.Sprintf(\"%s-%v\", base, scalingGroup)\n}\n\n\/\/ serviceName returns the name used to register this service.\nfunc (t *Task) serviceName() string {\n\treturn strings.Replace(t.fullName(), \"\/\", \"-\", -1)\n}\n\ntype frontendRecord struct {\n\tSelectors []frontendSelectorRecord `json:\"selectors\"`\n\tService string `json:\"service,omitempty\"`\n}\n\ntype frontendSelectorRecord struct {\n\tDomain string `json:\"domain,omitempty\"`\n\tPathPrefix string `json:\"path-prefix,omitempty\"`\n}\n\n\/\/ addFrontEndRegistration adds registration code for frontends to the given units\nfunc (t *Task) addFrontEndRegistration(main *units.Unit) error {\n\tif len(t.FrontEnds) == 0 {\n\t\treturn nil\n\t}\n\tkey := \"\/pulcy\/frontend\/\" + t.serviceName()\n\trecord := frontendRecord{\n\t\tService: t.serviceName(),\n\t}\n\tfor _, fr := range t.FrontEnds {\n\t\trecord.Selectors = append(record.Selectors, frontendSelectorRecord{\n\t\t\tDomain: fr.Domain,\n\t\t\tPathPrefix: fr.PathPrefix,\n\t\t})\n\t}\n\tjson, err := json.Marshal(&record)\n\tif err != nil {\n\t\treturn maskAny(err)\n\t}\n\tmain.ExecOptions.ExecStartPost = append(main.ExecOptions.ExecStartPost,\n\t\tfmt.Sprintf(\"\/bin\/sh -c 'echo %s | base64 -d | \/usr\/bin\/etcdctl set %s'\", base64.StdEncoding.EncodeToString(json), key),\n\t)\n\treturn nil\n}\n\nfunc (l TaskList) Len() int {\n\treturn len(l)\n}\n\nfunc (l TaskList) Less(i, j int) bool {\n\treturn bytes.Compare([]byte(l[i].Name.String()), []byte(l[j].Name.String())) < 0\n}\n\nfunc (l TaskList) Swap(i, j int) {\n\ttmp := l[i]\n\tl[i] = l[j]\n\tl[j] = tmp\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package gcfg reads \"gitconfig-like\" text-based configuration files with\n\/\/ \"name=value\" pairs grouped into sections (gcfg files).\n\/\/ Support for writing gcfg files may be added later.\n\/\/\n\/\/ See ReadInto and the examples to get an idea of how to use it.\n\/\/\n\/\/ This package is still a work in progress, and both the supported syntax and\n\/\/ the API is subject to change.\n\/\/\n\/\/ The syntax is based on that used by git config:\n\/\/ http:\/\/git-scm.com\/docs\/git-config#_syntax .\n\/\/ Note that the gcfg syntax may diverge from that of git config in the future\n\/\/ to a limited degree.\n\/\/ Currently planned differences (apart from TODOs listed below) are:\n\/\/ - gcfg files must use UTF-8 encoding and must not contain the 0 byte\n\/\/ - include and \"path\" type is not supported at the package level\n\/\/ - `[sec.sub]` format is not allowed (deprecated in gitconfig)\n\/\/ - `[sec \"\"]` is not allowed\n\/\/ - `[sec]` is the equivalent (section name \"sec\" and empty subsection name)\n\/\/ - within a single file, definitions must be consecutive for each:\n\/\/ - section: '[secA]' -> '[secB]' -> '[secA]' is an error\n\/\/ - subsection: '[sec \"A\"]' -> '[sec \"B\"]' -> '[sec \"A\"]' is an error\n\/\/ - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error\n\/\/\n\/\/ The package may be usable for handling some of the various \"INI file\" formats\n\/\/ used by some programs and libraries, but achieving or maintaining\n\/\/ compatibility with any of those is not a primary concern.\n\/\/\n\/\/ TODO:\n\/\/ - docs\n\/\/ - format spec\n\/\/ - parsing\n\/\/ - define internal representation structure\n\/\/ - support multi-value variables\n\/\/ - non-regexp based parser\n\/\/ - support partially quoted strings\n\/\/ - support escaping in strings\n\/\/ - support multiple inputs (readers, strings, files)\n\/\/ - support declaring encoding (?)\n\/\/ - support pointer fields\n\/\/ - scanEnum\n\/\/ - should use longest match (?)\n\/\/ - support matching on unique prefix (?)\n\/\/ - writing gcfg files\n\/\/ - error handling\n\/\/ - include error context\n\/\/ - more helpful error messages\n\/\/ - error types \/ codes?\n\/\/ - limit input size?\n\/\/\npackage gcfg\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar (\n\treCmnt = regexp.MustCompile(`^([^;#\"]*)[;#].*$`)\n\treCmntQ = regexp.MustCompile(`^([^;#\"]*\"[^\"]*\"[^;#\"]*)[;#].*$`)\n\treBlank = regexp.MustCompile(`^\\s*$`)\n\treSect = regexp.MustCompile(`^\\s*\\[\\s*([^\"\\s]*)\\s*\\]\\s*$`)\n\treSectSub = regexp.MustCompile(`^\\s*\\[\\s*([^\"\\s]*)\\s*\"([^\"]+)\"\\s*\\]\\s*$`)\n\treVar = regexp.MustCompile(`^\\s*([^\"=\\s]+)\\s*=\\s*([^\"\\s]*)\\s*$`)\n\treVarQ = regexp.MustCompile(`^\\s*([^\"=\\s]+)\\s*=\\s*\"([^\"\\n\\\\]*)\"\\s*$`)\n\treVarDflt = regexp.MustCompile(`^\\s*\\b(.*)\\b\\s*$`)\n)\n\nconst (\n\t\/\/ Default value string in case a value for a variable isn't provided.\n\tdefaultValue = \"true\"\n)\n\ntype gbool bool\n\nvar gboolValues = map[string]interface{}{\n\t\"true\": true, \"yes\": true, \"on\": true, \"1\": true,\n\t\"false\": false, \"no\": false, \"off\": false, \"0\": false}\n\nfunc (b *gbool) Scan(state fmt.ScanState, verb rune) error {\n\tv, err := scanEnum(state, gboolValues, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbb, _ := v.(bool) \/\/ cannot be non-bool\n\t*b = gbool(bb)\n\treturn nil\n}\n\nfunc fieldFold(v reflect.Value, name string) reflect.Value {\n\tn := strings.Replace(name, \"-\", \"_\", -1)\n\treturn v.FieldByNameFunc(func(fieldName string) bool {\n\t\treturn strings.EqualFold(n, fieldName)\n\t})\n}\n\nfunc set(cfg interface{}, sect, sub, name, value string) error {\n\tvDest := reflect.ValueOf(cfg).Elem()\n\tvSect := fieldFold(vDest, sect)\n\tif vSect.Kind() == reflect.Map {\n\t\tif vSect.IsNil() {\n\t\t\tvSect.Set(reflect.MakeMap(vSect.Type()))\n\t\t}\n\t\tk := reflect.ValueOf(sub)\n\t\tpv := vSect.MapIndex(k)\n\t\tif !pv.IsValid() {\n\t\t\tvType := vSect.Type().Elem().Elem()\n\t\t\tpv = reflect.New(vType)\n\t\t\tvSect.SetMapIndex(k, pv)\n\t\t}\n\t\tvSect = pv.Elem()\n\t} else if sub != \"\" {\n\t\treturn fmt.Errorf(\"expected map; section %q subsection %q\", sect, sub)\n\t}\n\tvName := fieldFold(vSect, name)\n\tvAddr := vName.Addr().Interface()\n\tswitch v := vAddr.(type) {\n\tcase *string:\n\t\t*v = value\n\t\treturn nil\n\tcase *bool:\n\t\tvAddr = (*gbool)(v)\n\t}\n\t\/\/ attempt to read an extra rune to make sure the value is consumed \n\tvar r rune\n\tn, err := fmt.Sscanf(value, \"%v%c\", vAddr, &r)\n\tswitch {\n\tcase n < 1 || n == 1 && err != io.EOF:\n\t\treturn fmt.Errorf(\"failed to parse %q as %#v: parse error %v\", value,\n\t\t\tvName.Type(), err)\n\tcase n > 1:\n\t\treturn fmt.Errorf(\"failed to parse %q as %#v: extra characters\", value,\n\t\t\tvName.Type())\n\tcase n == 1 && err == io.EOF:\n\t\treturn nil\n\t}\n\tpanic(\"never reached\")\n}\n\n\/\/ ReadInto reads gcfg formatted data from reader and sets the values into the\n\/\/ corresponding fields in config.\n\/\/\n\/\/ Config must be a pointer to a struct.\n\/\/ Each section corresponds to a struct field in config, and each variable in a\n\/\/ section corresponds to a data field in the section struct.\n\/\/ The name of the field must match the name of the section or variable,\n\/\/ ignoring case.\n\/\/ Hyphens in variable names correspond to underscores in section or field\n\/\/ names.\n\/\/\n\/\/ For sections with subsections, the corresponding field in config must be a\n\/\/ map, rather than a struct, with string keys and pointer-to-struct values.\n\/\/ Values for subsection variables are stored in the map with the subsection\n\/\/ name used as the map key.\n\/\/ (Note that unlike section and variable names, subsection names are case\n\/\/ sensitive.)\n\/\/ When using a map, and there is a section with the same section name but\n\/\/ without a subsection name, its values are stored with the empty string used\n\/\/ as the key.\n\/\/\n\/\/ The section structs in the config struct may contain arbitrary types.\n\/\/ For string fields, the (unquoted and unescaped) value string is assigned to\n\/\/ the field.\n\/\/ For bool fields, the field is set to true if the value is \"true\", \"yes\", \"on\"\n\/\/ or \"1\", and set to false if the value is \"false\", \"no\", \"off\" or \"0\",\n\/\/ ignoring case.\n\/\/ For all other types, fmt.Sscanf is used to parse the value and set it to the\n\/\/ field.\n\/\/ This means that built-in Go types are parseable using the standard format,\n\/\/ and any user-defined type is parseable if it implements the fmt.Scanner\n\/\/ interface.\n\/\/\n\/\/ See ReadStringInto for examples.\n\/\/\nfunc ReadInto(config interface{}, reader io.Reader) error {\n\tr := bufio.NewReader(reader)\n\tsect := (*string)(nil)\n\tsectsub := \"\"\n\tfor line := 1; true; line++ {\n\t\tl, pre, err := r.ReadLine()\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn err\n\t\t} else if pre {\n\t\t\treturn errors.New(\"line too long\")\n\t\t}\n\t\t\/\/ exclude comments\n\t\tif c := reCmnt.FindSubmatch(l); c != nil {\n\t\t\tl = c[1]\n\t\t} else if c := reCmntQ.FindSubmatch(l); c != nil {\n\t\t\tl = c[1]\n\t\t}\n\t\tif !reBlank.Match(l) {\n\t\t\t\/\/ \"switch\" based on line contents\n\t\t\tif sec := reSect.FindSubmatch(l); sec != nil {\n\t\t\t\tstrsec := string(sec[1])\n\t\t\t\tsect, sectsub = &strsec, \"\"\n\t\t\t} else if sec := reSectSub.FindSubmatch(l); sec != nil {\n\t\t\t\tstrsec := string(sec[1])\n\t\t\t\tstrsub := string(sec[2])\n\t\t\t\tif strsub == \"\" {\n\t\t\t\t\treturn errors.New(\"empty subsection not allowed\")\n\t\t\t\t}\n\t\t\t\tsect, sectsub = &strsec, strsub\n\t\t\t} else if v, vq, vd := reVar.FindSubmatch(l),\n\t\t\t\treVarQ.FindSubmatch(l), reVarDflt.FindSubmatch(l); \/\/\n\t\t\tv != nil || vq != nil || vd != nil {\n\t\t\t\tif sect == nil {\n\t\t\t\t\treturn errors.New(\"no section\")\n\t\t\t\t}\n\t\t\t\tvar name, value string\n\t\t\t\tif v != nil {\n\t\t\t\t\tname, value = string(v[1]), string(v[2])\n\t\t\t\t} else if vq != nil {\n\t\t\t\t\tname, value = string(vq[1]), string(vq[2])\n\t\t\t\t} else { \/\/ vd != nil\n\t\t\t\t\tname, value = string(vd[1]), defaultValue\n\t\t\t\t}\n\t\t\t\terr := set(config, *sect, sectsub, name, value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"invalid line %q\", string(l))\n\t\t\t}\n\t\t}\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ ReadStringInto reads gcfg formatted data from str and sets the values into\n\/\/ the corresponding fields in config.\n\/\/ See ReadInto(config, reader) for a detailed description of how values are\n\/\/ parsed and set into config.\nfunc ReadStringInto(config interface{}, str string) error {\n\tr := strings.NewReader(str)\n\treturn ReadInto(config, r)\n}\n\n\/\/ ReadFileInto reads gcfg formatted data from the file filename and sets the\n\/\/ values into the corresponding fields in config.\n\/\/ See ReadInto(config, reader) for a detailed description of how values are\n\/\/ parsed and set into config.\nfunc ReadFileInto(config interface{}, filename string) error {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\treturn ReadInto(config, f)\n}\n<commit_msg>improve docs<commit_after>\/\/ Package gcfg reads \"gitconfig-like\" text-based configuration files with\n\/\/ \"name=value\" pairs grouped into sections (gcfg files).\n\/\/ Support for writing gcfg files may be added later.\n\/\/\n\/\/ See ReadInto and the examples to get an idea of how to use it.\n\/\/\n\/\/ This package is still a work in progress, and both the supported syntax and\n\/\/ the API is subject to change.\n\/\/\n\/\/ The syntax is based on that used by git config:\n\/\/ http:\/\/git-scm.com\/docs\/git-config#_syntax .\n\/\/ There are some (planned) differences compared to the git config format:\n\/\/ - improve data portability:\n\/\/ - must be encoded in UTF-8 (for now) and must not contain the 0 byte\n\/\/ - include and \"path\" type is not supported\n\/\/ (path type may be implementable as a user-defined type)\n\/\/ - disallow potentially ambiguous or misleading definitions:\n\/\/ - `[sec.sub]` format is not allowed (deprecated in gitconfig)\n\/\/ - `[sec \"\"]` is not allowed\n\/\/ - use `[sec]` for section name \"sec\" and empty subsection name\n\/\/ - within a single file, definitions must be consecutive for each:\n\/\/ - section: '[secA]' -> '[secB]' -> '[secA]' is an error\n\/\/ - subsection: '[sec \"A\"]' -> '[sec \"B\"]' -> '[sec \"A\"]' is an error\n\/\/ - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error\n\/\/\n\/\/ The package may be usable for handling some of the various \"INI file\" formats\n\/\/ used by some programs and libraries, but achieving or maintaining\n\/\/ compatibility with any of those is not a primary concern.\n\/\/\n\/\/ TODO:\n\/\/ - format\n\/\/ - explain \"why gcfg\"\n\/\/ - define valid section and variable names\n\/\/ - define handling of default value\n\/\/ - complete syntax documentation\n\/\/ - reading\n\/\/ - define internal representation structure\n\/\/ - support multi-value variables\n\/\/ - non-regexp based parser\n\/\/ - support partially quoted strings\n\/\/ - support escaping in strings\n\/\/ - support multiple inputs (readers, strings, files)\n\/\/ - support declaring encoding (?)\n\/\/ - support pointer fields\n\/\/ - support varying fields sets for subsections (?)\n\/\/ - scanEnum\n\/\/ - should use longest match (?)\n\/\/ - support matching on unique prefix (?)\n\/\/ - writing gcfg files\n\/\/ - error handling\n\/\/ - include error context\n\/\/ - avoid reflection-related panics\n\/\/ - more helpful error messages\n\/\/ - error types \/ codes?\n\/\/ - limit input size?\n\/\/ - move TODOs to issue tracker (eventually)\n\/\/\npackage gcfg\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar (\n\treCmnt = regexp.MustCompile(`^([^;#\"]*)[;#].*$`)\n\treCmntQ = regexp.MustCompile(`^([^;#\"]*\"[^\"]*\"[^;#\"]*)[;#].*$`)\n\treBlank = regexp.MustCompile(`^\\s*$`)\n\treSect = regexp.MustCompile(`^\\s*\\[\\s*([^\"\\s]*)\\s*\\]\\s*$`)\n\treSectSub = regexp.MustCompile(`^\\s*\\[\\s*([^\"\\s]*)\\s*\"([^\"]+)\"\\s*\\]\\s*$`)\n\treVar = regexp.MustCompile(`^\\s*([^\"=\\s]+)\\s*=\\s*([^\"\\s]*)\\s*$`)\n\treVarQ = regexp.MustCompile(`^\\s*([^\"=\\s]+)\\s*=\\s*\"([^\"\\n\\\\]*)\"\\s*$`)\n\treVarDflt = regexp.MustCompile(`^\\s*\\b(.*)\\b\\s*$`)\n)\n\nconst (\n\t\/\/ Default value string in case a value for a variable isn't provided.\n\tdefaultValue = \"true\"\n)\n\ntype gbool bool\n\nvar gboolValues = map[string]interface{}{\n\t\"true\": true, \"yes\": true, \"on\": true, \"1\": true,\n\t\"false\": false, \"no\": false, \"off\": false, \"0\": false}\n\nfunc (b *gbool) Scan(state fmt.ScanState, verb rune) error {\n\tv, err := scanEnum(state, gboolValues, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbb, _ := v.(bool) \/\/ cannot be non-bool\n\t*b = gbool(bb)\n\treturn nil\n}\n\nfunc fieldFold(v reflect.Value, name string) reflect.Value {\n\tn := strings.Replace(name, \"-\", \"_\", -1)\n\treturn v.FieldByNameFunc(func(fieldName string) bool {\n\t\treturn strings.EqualFold(n, fieldName)\n\t})\n}\n\nfunc set(cfg interface{}, sect, sub, name, value string) error {\n\tvDest := reflect.ValueOf(cfg).Elem()\n\tvSect := fieldFold(vDest, sect)\n\tif vSect.Kind() == reflect.Map {\n\t\tif vSect.IsNil() {\n\t\t\tvSect.Set(reflect.MakeMap(vSect.Type()))\n\t\t}\n\t\tk := reflect.ValueOf(sub)\n\t\tpv := vSect.MapIndex(k)\n\t\tif !pv.IsValid() {\n\t\t\tvType := vSect.Type().Elem().Elem()\n\t\t\tpv = reflect.New(vType)\n\t\t\tvSect.SetMapIndex(k, pv)\n\t\t}\n\t\tvSect = pv.Elem()\n\t} else if sub != \"\" {\n\t\treturn fmt.Errorf(\"expected map; section %q subsection %q\", sect, sub)\n\t}\n\tvName := fieldFold(vSect, name)\n\tvAddr := vName.Addr().Interface()\n\tswitch v := vAddr.(type) {\n\tcase *string:\n\t\t*v = value\n\t\treturn nil\n\tcase *bool:\n\t\tvAddr = (*gbool)(v)\n\t}\n\t\/\/ attempt to read an extra rune to make sure the value is consumed \n\tvar r rune\n\tn, err := fmt.Sscanf(value, \"%v%c\", vAddr, &r)\n\tswitch {\n\tcase n < 1 || n == 1 && err != io.EOF:\n\t\treturn fmt.Errorf(\"failed to parse %q as %#v: parse error %v\", value,\n\t\t\tvName.Type(), err)\n\tcase n > 1:\n\t\treturn fmt.Errorf(\"failed to parse %q as %#v: extra characters\", value,\n\t\t\tvName.Type())\n\tcase n == 1 && err == io.EOF:\n\t\treturn nil\n\t}\n\tpanic(\"never reached\")\n}\n\n\/\/ ReadInto reads gcfg formatted data from reader and sets the values into the\n\/\/ corresponding fields in config.\n\/\/\n\/\/ Config must be a pointer to a struct.\n\/\/ Each section corresponds to a struct field in config, and each variable in a\n\/\/ section corresponds to a data field in the section struct.\n\/\/ The name of the field must match the name of the section or variable,\n\/\/ ignoring case.\n\/\/ Hyphens in variable names correspond to underscores in section or field\n\/\/ names.\n\/\/\n\/\/ For sections with subsections, the corresponding field in config must be a\n\/\/ map, rather than a struct, with string keys and pointer-to-struct values.\n\/\/ Values for subsection variables are stored in the map with the subsection\n\/\/ name used as the map key.\n\/\/ (Note that unlike section and variable names, subsection names are case\n\/\/ sensitive.)\n\/\/ When using a map, and there is a section with the same section name but\n\/\/ without a subsection name, its values are stored with the empty string used\n\/\/ as the key.\n\/\/\n\/\/ The section structs in the config struct may contain arbitrary types.\n\/\/ For string fields, the (unquoted and unescaped) value string is assigned to\n\/\/ the field.\n\/\/ For bool fields, the field is set to true if the value is \"true\", \"yes\", \"on\"\n\/\/ or \"1\", and set to false if the value is \"false\", \"no\", \"off\" or \"0\",\n\/\/ ignoring case.\n\/\/ For all other types, fmt.Sscanf is used to parse the value and set it to the\n\/\/ field.\n\/\/ This means that built-in Go types are parseable using the standard format,\n\/\/ and any user-defined type is parseable if it implements the fmt.Scanner\n\/\/ interface.\n\/\/ Note that the value is considered invalid unless fmt.Scanner fully consumes\n\/\/ the value string without error.\n\/\/\n\/\/ See ReadStringInto for examples.\n\/\/\nfunc ReadInto(config interface{}, reader io.Reader) error {\n\tr := bufio.NewReader(reader)\n\tsect := (*string)(nil)\n\tsectsub := \"\"\n\tfor line := 1; true; line++ {\n\t\tl, pre, err := r.ReadLine()\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn err\n\t\t} else if pre {\n\t\t\treturn errors.New(\"line too long\")\n\t\t}\n\t\t\/\/ exclude comments\n\t\tif c := reCmnt.FindSubmatch(l); c != nil {\n\t\t\tl = c[1]\n\t\t} else if c := reCmntQ.FindSubmatch(l); c != nil {\n\t\t\tl = c[1]\n\t\t}\n\t\tif !reBlank.Match(l) {\n\t\t\t\/\/ \"switch\" based on line contents\n\t\t\tif sec := reSect.FindSubmatch(l); sec != nil {\n\t\t\t\tstrsec := string(sec[1])\n\t\t\t\tsect, sectsub = &strsec, \"\"\n\t\t\t} else if sec := reSectSub.FindSubmatch(l); sec != nil {\n\t\t\t\tstrsec := string(sec[1])\n\t\t\t\tstrsub := string(sec[2])\n\t\t\t\tif strsub == \"\" {\n\t\t\t\t\treturn errors.New(\"empty subsection not allowed\")\n\t\t\t\t}\n\t\t\t\tsect, sectsub = &strsec, strsub\n\t\t\t} else if v, vq, vd := reVar.FindSubmatch(l),\n\t\t\t\treVarQ.FindSubmatch(l), reVarDflt.FindSubmatch(l); \/\/\n\t\t\tv != nil || vq != nil || vd != nil {\n\t\t\t\tif sect == nil {\n\t\t\t\t\treturn errors.New(\"no section\")\n\t\t\t\t}\n\t\t\t\tvar name, value string\n\t\t\t\tif v != nil {\n\t\t\t\t\tname, value = string(v[1]), string(v[2])\n\t\t\t\t} else if vq != nil {\n\t\t\t\t\tname, value = string(vq[1]), string(vq[2])\n\t\t\t\t} else { \/\/ vd != nil\n\t\t\t\t\tname, value = string(vd[1]), defaultValue\n\t\t\t\t}\n\t\t\t\terr := set(config, *sect, sectsub, name, value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"invalid line %q\", string(l))\n\t\t\t}\n\t\t}\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ ReadStringInto reads gcfg formatted data from str and sets the values into\n\/\/ the corresponding fields in config.\n\/\/ ReadStringInfo is a wrapper for ReadInfo; see ReadInto(config, reader) for\n\/\/ detailed description of how data is read and set into config.\nfunc ReadStringInto(config interface{}, str string) error {\n\tr := strings.NewReader(str)\n\treturn ReadInto(config, r)\n}\n\n\/\/ ReadFileInto reads gcfg formatted data from the file filename and sets the\n\/\/ values into the corresponding fields in config.\n\/\/ ReadFileInto is a wrapper for ReadInfo; see ReadInto(config, reader) for\n\/\/ detailed description of how data is read and set into config.\nfunc ReadFileInto(config interface{}, filename string) error {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\treturn ReadInto(config, f)\n}\n<|endoftext|>"} {"text":"<commit_before>package ghch\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/Masterminds\/semver\"\n\t\"github.com\/octokit\/go-octokit\/octokit\"\n)\n\ntype ghch struct {\n\trepoPath string\n\tremote string\n\tverbose bool\n\tclient *octokit.Client\n}\n\nfunc (gh *ghch) initialize() *ghch {\n\t\/\/ XXX authentication\n\tgh.client = octokit.NewClient(nil)\n\treturn gh\n}\n\nfunc (gh *ghch) cmd(argv ...string) (string, error) {\n\targ := []string{\"-C\", gh.repoPath}\n\targ = append(arg, argv...)\n\tcmd := exec.Command(\"git\", arg...)\n\tcmd.Env = append(os.Environ(), \"LANG=C\")\n\n\tvar b bytes.Buffer\n\tcmd.Stdout = &b\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\treturn b.String(), err\n}\n\nvar verReg = regexp.MustCompile(`^v?[0-9]+(?:\\.[0-9]+){0,2}$`)\n\nfunc (gh *ghch) versions() []string {\n\tout, _ := gh.cmd(\"tag\")\n\treturn parseVerions(out)\n}\n\nfunc parseVerions(out string) []string {\n\trawTags := strings.Split(out, \"\\n\")\n\tvar versions []*semver.Version\n\tfor _, tag := range rawTags {\n\t\tt := strings.TrimSpace(tag)\n\t\tif verReg.MatchString(t) {\n\t\t\tv, _ := semver.NewVersion(t)\n\t\t\tversions = append(versions, v)\n\t\t}\n\t}\n\tsort.Sort(sort.Reverse(semver.Collection(versions)))\n\tvar vers = make([]string, len(versions))\n\tfor i, v := range versions {\n\t\tvers[i] = v.Original()\n\t}\n\treturn vers\n}\n\nfunc (gh *ghch) getRemote() string {\n\tif gh.remote != \"\" {\n\t\treturn gh.remote\n\t}\n\treturn \"origin\"\n}\n\nvar repoURLReg = regexp.MustCompile(`([^\/:]+)\/([^\/]+?)(?:\\.git)?$`)\n\nfunc (gh *ghch) ownerAndRepo() (owner, repo string) {\n\tout, _ := gh.cmd(\"remote\", \"-v\")\n\tremotes := strings.Split(out, \"\\n\")\n\tfor _, r := range remotes {\n\t\tfields := strings.Fields(r)\n\t\tif len(fields) > 1 && fields[0] == gh.getRemote() {\n\t\t\tif matches := repoURLReg.FindStringSubmatch(fields[1]); len(matches) > 2 {\n\t\t\t\treturn matches[1], matches[2]\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc (gh *ghch) mergedPRs(from, to string) (prs []*octokit.PullRequest) {\n\towner, repo := gh.ownerAndRepo()\n\tnums := gh.mergedPRNums(from, to)\n\tfor _, num := range nums {\n\t\turl, _ := octokit.PullRequestsURL.Expand(octokit.M{\"owner\": owner, \"repo\": repo, \"number\": num})\n\t\tpr, r := gh.client.PullRequests(url).One()\n\t\tif r.HasError() {\n\t\t\tlog.Print(r.Err)\n\t\t\tcontinue\n\t\t}\n\t\tif !gh.verbose {\n\t\t\tpr = reducePR(pr)\n\t\t}\n\t\tprs = append(prs, pr)\n\t}\n\treturn\n}\n\nvar prMergeReg = regexp.MustCompile(`^[a-f0-9]{7} Merge pull request #([0-9]+) from`)\n\nfunc (gh *ghch) mergedPRNums(from, to string) (nums []int) {\n\tif from == \"\" {\n\t\tvers := gh.versions()\n\t\tif len(vers) < 1 {\n\t\t\treturn\n\t\t}\n\t\tfrom = vers[0]\n\t}\n\trevisionRange := fmt.Sprintf(\"%s..%s\", from, to)\n\tout, err := gh.cmd(\"log\", revisionRange, \"--merges\", \"--oneline\")\n\tif err != nil {\n\t\treturn\n\t}\n\treturn parseMergedPRNums(out)\n}\n\nfunc parseMergedPRNums(out string) (nums []int) {\n\tlines := strings.Split(out, \"\\n\")\n\tfor _, line := range lines {\n\t\tif matches := prMergeReg.FindStringSubmatch(line); len(matches) > 1 {\n\t\t\ti, _ := strconv.Atoi(matches[1])\n\t\t\tnums = append(nums, i)\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>use sync.WaitGroup<commit_after>package ghch\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/Masterminds\/semver\"\n\t\"github.com\/octokit\/go-octokit\/octokit\"\n)\n\ntype ghch struct {\n\trepoPath string\n\tremote string\n\tverbose bool\n\tclient *octokit.Client\n}\n\nfunc (gh *ghch) initialize() *ghch {\n\t\/\/ XXX authentication\n\tgh.client = octokit.NewClient(nil)\n\treturn gh\n}\n\nfunc (gh *ghch) cmd(argv ...string) (string, error) {\n\targ := []string{\"-C\", gh.repoPath}\n\targ = append(arg, argv...)\n\tcmd := exec.Command(\"git\", arg...)\n\tcmd.Env = append(os.Environ(), \"LANG=C\")\n\n\tvar b bytes.Buffer\n\tcmd.Stdout = &b\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\treturn b.String(), err\n}\n\nvar verReg = regexp.MustCompile(`^v?[0-9]+(?:\\.[0-9]+){0,2}$`)\n\nfunc (gh *ghch) versions() []string {\n\tout, _ := gh.cmd(\"tag\")\n\treturn parseVerions(out)\n}\n\nfunc parseVerions(out string) []string {\n\trawTags := strings.Split(out, \"\\n\")\n\tvar versions []*semver.Version\n\tfor _, tag := range rawTags {\n\t\tt := strings.TrimSpace(tag)\n\t\tif verReg.MatchString(t) {\n\t\t\tv, _ := semver.NewVersion(t)\n\t\t\tversions = append(versions, v)\n\t\t}\n\t}\n\tsort.Sort(sort.Reverse(semver.Collection(versions)))\n\tvar vers = make([]string, len(versions))\n\tfor i, v := range versions {\n\t\tvers[i] = v.Original()\n\t}\n\treturn vers\n}\n\nfunc (gh *ghch) getRemote() string {\n\tif gh.remote != \"\" {\n\t\treturn gh.remote\n\t}\n\treturn \"origin\"\n}\n\nvar repoURLReg = regexp.MustCompile(`([^\/:]+)\/([^\/]+?)(?:\\.git)?$`)\n\nfunc (gh *ghch) ownerAndRepo() (owner, repo string) {\n\tout, _ := gh.cmd(\"remote\", \"-v\")\n\tremotes := strings.Split(out, \"\\n\")\n\tfor _, r := range remotes {\n\t\tfields := strings.Fields(r)\n\t\tif len(fields) > 1 && fields[0] == gh.getRemote() {\n\t\t\tif matches := repoURLReg.FindStringSubmatch(fields[1]); len(matches) > 2 {\n\t\t\t\treturn matches[1], matches[2]\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc (gh *ghch) mergedPRs(from, to string) (prs []*octokit.PullRequest) {\n\towner, repo := gh.ownerAndRepo()\n\tnums := gh.mergedPRNums(from, to)\n\n\tvar wg sync.WaitGroup\n\tprCh := make(chan *octokit.PullRequest)\n\n\tgo func() {\n\t\tfor pr := range prCh {\n\t\t\tprs = append(prs, pr)\n\t\t}\n\t}()\n\n\tfor _, num := range nums {\n\t\twg.Add(1)\n\t\tgo func(num int) {\n\t\t\tdefer wg.Done()\n\t\t\turl, _ := octokit.PullRequestsURL.Expand(octokit.M{\"owner\": owner, \"repo\": repo, \"number\": num})\n\t\t\tpr, r := gh.client.PullRequests(url).One()\n\t\t\tif r.HasError() {\n\t\t\t\tlog.Print(r.Err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !gh.verbose {\n\t\t\t\tpr = reducePR(pr)\n\t\t\t}\n\t\t\tprCh <- pr\n\t\t}(num)\n\t}\n\twg.Wait()\n\tclose(prCh)\n\n\treturn\n}\n\nvar prMergeReg = regexp.MustCompile(`^[a-f0-9]{7} Merge pull request #([0-9]+) from`)\n\nfunc (gh *ghch) mergedPRNums(from, to string) (nums []int) {\n\tif from == \"\" {\n\t\tvers := gh.versions()\n\t\tif len(vers) < 1 {\n\t\t\treturn\n\t\t}\n\t\tfrom = vers[0]\n\t}\n\trevisionRange := fmt.Sprintf(\"%s..%s\", from, to)\n\tout, err := gh.cmd(\"log\", revisionRange, \"--merges\", \"--oneline\")\n\tif err != nil {\n\t\treturn\n\t}\n\treturn parseMergedPRNums(out)\n}\n\nfunc parseMergedPRNums(out string) (nums []int) {\n\tlines := strings.Split(out, \"\\n\")\n\tfor _, line := range lines {\n\t\tif matches := prMergeReg.FindStringSubmatch(line); len(matches) > 1 {\n\t\t\ti, _ := strconv.Atoi(matches[1])\n\t\t\tnums = append(nums, i)\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package godb\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/samonzeweb\/godb\/adapters\"\n)\n\n\/\/ DB stores a connection to the database, the current transaction, logger, ...\n\/\/ Everything starts with a DB.\n\/\/ DB is not thread safe (see Clone).\ntype DB struct {\n\tadapter adapters.Adapter\n\tsqlDB *sql.DB\n\tsqlTx *sql.Tx\n\tlogger *log.Logger\n\tconsumedTime time.Duration\n\n\t\/\/ Prepared Statement cache for DB and Tx\n\tstmtCacheDB *StmtCache\n\tstmtCacheTx *StmtCache\n}\n\n\/\/ Placeholder is the placeholder string, use it to build queries.\n\/\/ Adapters could change it before queries are executed.\nconst Placeholder string = \"?\"\n\n\/\/ ErrOpLock is an error returned when Optimistic Locking failure occurs\nvar ErrOpLock = errors.New(\"Optimistic Locking Failure\")\n\n\/\/ Open creates a new DB struct and initialise a sql.DB connection.\nfunc Open(adapter adapters.Adapter, dataSourceName string) (*DB, error) {\n\tdb := DB{\n\t\tadapter: adapter,\n\t\tstmtCacheDB: newStmtCache(),\n\t\tstmtCacheTx: newStmtCache(),\n\t}\n\n\t\/\/ Prepared statements cache is disabled by default except for Tx\n\tdb.stmtCacheDB.Disable()\n\n\tvar err error\n\tdb.sqlDB, err = sql.Open(adapter.DriverName(), dataSourceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &db, nil\n}\n\n\/\/ Clone creates a copy of an existing DB, without the current transaction.\n\/\/ The clone has no consumed time, and new prepared statements caches with\n\/\/ the same characteristics.\n\/\/ Use it to create new DB object before starting a goroutine.\nfunc (db *DB) Clone() *DB {\n\tclone := &DB{\n\t\tadapter: db.adapter,\n\t\tsqlDB: db.sqlDB,\n\t\tsqlTx: nil,\n\t\tlogger: db.logger,\n\t\tconsumedTime: 0,\n\t\tstmtCacheDB: newStmtCache(),\n\t\tstmtCacheTx: newStmtCache(),\n\t}\n\n\tclone.stmtCacheDB.SetSize(db.stmtCacheDB.GetSize())\n\tif !db.stmtCacheDB.IsEnabled() {\n\t\tclone.stmtCacheDB.Disable()\n\t}\n\n\tclone.stmtCacheTx.SetSize(db.stmtCacheTx.GetSize())\n\tif !db.stmtCacheTx.IsEnabled() {\n\t\tclone.stmtCacheTx.Disable()\n\t}\n\n\treturn clone\n}\n\n\/\/ Close closes an existing DB created by Open.\n\/\/ Dont't close a cloned DB still used by others goroutines as the sql.DB\n\/\/ is shared !\n\/\/ Don't use a DB anymore after a call to Close.\nfunc (db *DB) Close() error {\n\tdb.logPrintln(\"CLOSE DB\")\n\tif db.sqlTx != nil {\n\t\tdb.logPrintln(\"Warning, there is a current transaction\")\n\t}\n\treturn db.sqlDB.Close()\n}\n\n\/\/ Adapter returns the current adapter.\nfunc (db *DB) Adapter() adapters.Adapter {\n\treturn db.adapter\n}\n\n\/\/ CurrentDB returns the current *sql.DB.\n\/\/ Use it wisely.\nfunc (db *DB) CurrentDB() *sql.DB {\n\treturn db.sqlDB\n}\n\n\/\/ ConsumedTime returns the time consumed by SQL queries executions\n\/\/ The duration is reseted when the DB is cloned.\nfunc (db *DB) ConsumedTime() time.Duration {\n\treturn db.consumedTime\n}\n\n\/\/ ResetConsumedTime resets the time consumed by SQL queries executions\nfunc (db *DB) ResetConsumedTime() {\n\tdb.consumedTime = 0\n}\n\n\/\/ addConsumedTime adds duration to the consumed time\nfunc (db *DB) addConsumedTime(duration time.Duration) {\n\tdb.consumedTime += duration\n}\n\n\/\/ timeElapsedSince returns the time elapsed (duration) since a given\n\/\/ start time.\nfunc timeElapsedSince(startTime time.Time) time.Duration {\n\treturn time.Now().Sub(startTime)\n}\n\n\/\/ quoteAll returns all strings given quoted by the adapter.\nfunc (db *DB) quoteAll(identifiers []string) []string {\n\tquotedIdentifiers := make([]string, 0, len(identifiers))\n\tfor _, identifier := range identifiers {\n\t\tquotedIdentifiers = append(quotedIdentifiers, db.adapter.Quote(identifier))\n\t}\n\treturn quotedIdentifiers\n}\n\n\/\/ replacePlaceholders uses the adapter to change placehodlers according to\n\/\/ the database used.\nfunc (db *DB) replacePlaceholders(sql string) string {\n\tplaceholderReplacer, ok := (db.adapter).(adapters.PlaceholdersReplacer)\n\tif !ok {\n\t\treturn sql\n\t}\n\n\treturn placeholderReplacer.ReplacePlaceholders(Placeholder, sql)\n}\n<commit_msg>added Wrap(sql.DB) for preconfigured sql.DB instance (#5)<commit_after>package godb\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/samonzeweb\/godb\/adapters\"\n)\n\n\/\/ DB stores a connection to the database, the current transaction, logger, ...\n\/\/ Everything starts with a DB.\n\/\/ DB is not thread safe (see Clone).\ntype DB struct {\n\tadapter adapters.Adapter\n\tsqlDB *sql.DB\n\tsqlTx *sql.Tx\n\tlogger *log.Logger\n\tconsumedTime time.Duration\n\n\t\/\/ Prepared Statement cache for DB and Tx\n\tstmtCacheDB *StmtCache\n\tstmtCacheTx *StmtCache\n}\n\n\/\/ Placeholder is the placeholder string, use it to build queries.\n\/\/ Adapters could change it before queries are executed.\nconst Placeholder string = \"?\"\n\n\/\/ ErrOpLock is an error returned when Optimistic Locking failure occurs\nvar ErrOpLock = errors.New(\"optimistic locking failure\")\n\n\/\/ Open creates a new DB struct and initialise a sql.DB connection.\nfunc Open(adapter adapters.Adapter, dataSourceName string) (*DB, error) {\n\tdb := DB{\n\t\tadapter: adapter,\n\t\tstmtCacheDB: newStmtCache(),\n\t\tstmtCacheTx: newStmtCache(),\n\t}\n\n\t\/\/ Prepared statements cache is disabled by default except for Tx\n\tdb.stmtCacheDB.Disable()\n\n\tvar err error\n\tdb.sqlDB, err = sql.Open(adapter.DriverName(), dataSourceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &db, nil\n}\n\n\n\/\/ Wrap creates a godb.DB by using provided and initialized sql.DB Helpful for\n\/\/ using custom configured sql.DB instance for godb. Can be used before\n\/\/ starting a goroutine.\nfunc Wrap(adapter adapters.Adapter, dbInst *sql.DB) *DB {\n\tdb := DB{\n\t\tadapter: adapter,\n\t\tstmtCacheDB: newStmtCache(),\n\t\tstmtCacheTx: newStmtCache(),\n\t}\n\n\t\/\/ Prepared statements cache is disabled by default except for Tx\n\tdb.stmtCacheDB.Disable()\n\tdb.sqlDB = dbInst\n\treturn &db\n}\n\n\/\/ Clone creates a copy of an existing DB, without the current transaction.\n\/\/ The clone has consumedTime set to zero, and new prepared statements caches with\n\/\/ the same characteristics.\n\/\/ Use it to create new DB object before starting a goroutine.\nfunc (db *DB) Clone() *DB {\n\tclone := &DB{\n\t\tadapter: db.adapter,\n\t\tsqlDB: db.sqlDB,\n\t\tsqlTx: nil,\n\t\tlogger: db.logger,\n\t\tconsumedTime: 0,\n\t\tstmtCacheDB: newStmtCache(),\n\t\tstmtCacheTx: newStmtCache(),\n\t}\n\n\tclone.stmtCacheDB.SetSize(db.stmtCacheDB.GetSize())\n\tif !db.stmtCacheDB.IsEnabled() {\n\t\tclone.stmtCacheDB.Disable()\n\t}\n\n\tclone.stmtCacheTx.SetSize(db.stmtCacheTx.GetSize())\n\tif !db.stmtCacheTx.IsEnabled() {\n\t\tclone.stmtCacheTx.Disable()\n\t}\n\n\treturn clone\n}\n\n\/\/ Close closes an existing DB created by Open.\n\/\/ Don't close a cloned DB still used by others goroutines as the sql.DB\n\/\/ is shared !\n\/\/ Don't use a DB anymore after a call to Close.\nfunc (db *DB) Close() error {\n\tdb.logPrintln(\"CLOSE DB\")\n\tif db.sqlTx != nil {\n\t\tdb.logPrintln(\"Warning, there is a current transaction\")\n\t}\n\treturn db.sqlDB.Close()\n}\n\n\/\/ Adapter returns the current adapter.\nfunc (db *DB) Adapter() adapters.Adapter {\n\treturn db.adapter\n}\n\n\/\/ CurrentDB returns the current *sql.DB.\n\/\/ Use it wisely.\nfunc (db *DB) CurrentDB() *sql.DB {\n\treturn db.sqlDB\n}\n\n\/\/ ConsumedTime returns the time consumed by SQL queries executions\n\/\/ The duration is reseted when the DB is cloned.\nfunc (db *DB) ConsumedTime() time.Duration {\n\treturn db.consumedTime\n}\n\n\/\/ ResetConsumedTime resets the time consumed by SQL queries executions\nfunc (db *DB) ResetConsumedTime() {\n\tdb.consumedTime = 0\n}\n\n\/\/ addConsumedTime adds duration to the consumed time\nfunc (db *DB) addConsumedTime(duration time.Duration) {\n\tdb.consumedTime += duration\n}\n\n\/\/ timeElapsedSince returns the time elapsed (duration) since a given\n\/\/ start time.\nfunc timeElapsedSince(startTime time.Time) time.Duration {\n\treturn time.Now().Sub(startTime)\n}\n\n\/\/ quoteAll returns all strings given quoted by the adapter.\nfunc (db *DB) quoteAll(identifiers []string) []string {\n\tquotedIdentifiers := make([]string, 0, len(identifiers))\n\tfor _, identifier := range identifiers {\n\t\tquotedIdentifiers = append(quotedIdentifiers, db.adapter.Quote(identifier))\n\t}\n\treturn quotedIdentifiers\n}\n\n\/\/ replacePlaceholders uses the adapter to change placeholders according to\n\/\/ the database used.\nfunc (db *DB) replacePlaceholders(sql string) string {\n\tplaceholderReplacer, ok := (db.adapter).(adapters.PlaceholdersReplacer)\n\tif !ok {\n\t\treturn sql\n\t}\n\n\treturn placeholderReplacer.ReplacePlaceholders(Placeholder, sql)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/labstack\/echo\/middleware\"\n\t\"github.com\/toqueteos\/webbrowser\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\t\"io\"\n)\n\nvar (\n\tport = kingpin.Flag(\"port\", \"Listening port used by webserver\").Short('p').Default(\"1110\").Int()\n\tfile = kingpin.Arg(\"file\", \"Markdown file\").String()\n)\n\ntype EditorView struct {\n\tFile string\n\tContent string\n}\n\nfunc main() {\n\tkingpin.Version(\"0.0.1\")\n\tkingpin.Parse()\n\n\te := echo.New()\n\n\tt := &Template{\n\t\ttemplates: template.Must(template.ParseGlob(\"template\/*.html\")),\n\t}\n\te.SetRenderer(t)\n\n\te.Use(middleware.Logger())\n\te.Use(middleware.Recover())\n\n\te.Static(\"\/static\/\", \"static\")\n\n\tedit := e.Group(\"\/edit\")\n\tedit.Get(\"\/*\", EditHandler)\n\tedit.Post(\"\/*\", EditHandler)\n\n\tgo WaitForServer(port)\n\te.Run(fmt.Sprintf(\"127.0.0.1:%d\", *port))\n}\n\ntype Template struct {\n\ttemplates *template.Template\n}\n\nfunc (t *Template) Render(w io.Writer, name string, data interface{}) error {\n\treturn t.templates.ExecuteTemplate(w, name, data)\n}\n\nfunc EditHandler(c *echo.Context) error {\n\tfilepath := c.P(0)\n\tif c.Request().Method == \"POST\" {\n\t\tc.Request().ParseForm()\n\t\tioutil.WriteFile(filepath, []byte(c.Request().PostForm.Get(\"content\")), 0644)\n\t}\n\tcontent, err := ioutil.ReadFile(filepath)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, \"Unable to read requested file\")\n\t\tlog.Fatalf(\"Unable to open file \" + filepath)\n\t}\n\tev := EditorView{File: filepath, Content: string(content)}\n\treturn c.Render(http.StatusOK, \"base\", ev)\n}\n\nfunc WaitForServer(port *int) {\n\tlog.Printf(\"Waiting for listener on port %d\", *port)\n\turl := fmt.Sprintf(\"http:\/\/localhost:%d\/edit\/%s\", *port, url.QueryEscape(*file))\n\tfor {\n\t\ttime.Sleep(time.Millisecond * 50)\n\t\tresp, err := http.Get(url)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tlog.Println(\"Opening \" + url)\n\tif err := webbrowser.Open(url); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>Organize input arguments in a struct<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/labstack\/echo\/middleware\"\n\t\"github.com\/toqueteos\/webbrowser\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n\t\"html\/template\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n)\n\ntype InputArgs struct {\n\tPort *int\n\tFile *string\n}\n\nvar args = InputArgs{\n\tPort: kingpin.Flag(\"port\", \"Listening port used by webserver\").Short('p').Default(\"1110\").Int(),\n\tFile: kingpin.Arg(\"file\", \"Markdown file\").String(),\n}\n\ntype EditorView struct {\n\tFile string\n\tContent string\n}\n\nfunc main() {\n\tkingpin.Version(\"0.0.1\")\n\tkingpin.Parse()\n\n\te := echo.New()\n\n\tt := &Template{\n\t\ttemplates: template.Must(template.ParseGlob(\"template\/*.html\")),\n\t}\n\te.SetRenderer(t)\n\n\te.Use(middleware.Logger())\n\te.Use(middleware.Recover())\n\n\te.Static(\"\/static\/\", \"static\")\n\n\tedit := e.Group(\"\/edit\")\n\tedit.Get(\"\/*\", EditHandler)\n\tedit.Post(\"\/*\", EditHandler)\n\n\tgo WaitForServer()\n\te.Run(fmt.Sprintf(\"127.0.0.1:%d\", *args.Port))\n}\n\ntype Template struct {\n\ttemplates *template.Template\n}\n\nfunc (t *Template) Render(w io.Writer, name string, data interface{}) error {\n\treturn t.templates.ExecuteTemplate(w, name, data)\n}\n\nfunc EditHandler(c *echo.Context) error {\n\tfilepath := c.P(0)\n\tif c.Request().Method == \"POST\" {\n\t\tc.Request().ParseForm()\n\t\tioutil.WriteFile(filepath, []byte(c.Request().PostForm.Get(\"content\")), 0644)\n\t}\n\tcontent, err := ioutil.ReadFile(filepath)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, \"Unable to read requested file\")\n\t\tlog.Fatalf(\"Unable to open file \" + filepath)\n\t}\n\tev := EditorView{File: filepath, Content: string(content)}\n\treturn c.Render(http.StatusOK, \"base\", ev)\n}\n\nfunc WaitForServer() {\n\tlog.Printf(\"Waiting for listener on port %d\", *args.Port)\n\turl := fmt.Sprintf(\"http:\/\/localhost:%d\/edit\/%s\", *args.Port, url.QueryEscape(*args.File))\n\tfor {\n\t\ttime.Sleep(time.Millisecond * 50)\n\t\tresp, err := http.Get(url)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tlog.Println(\"Opening \" + url)\n\tif err := webbrowser.Open(url); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Program gops is a tool to list currently running Go processes.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/google\/gops\/internal\/objfile\"\n\n\tps \"github.com\/keybase\/go-ps\"\n)\n\nconst helpText = `Usage: gops is a tool to list and diagnose Go processes.\n\n gops Lists all Go processes currently running.\n gops [cmd] -p=<pid> See the section below.\n\nCommands: \n stack Prints the stack trace.\n gc Runs the garbage collector and blocks until successful.\n memstats Prints the garbage collection stats.\n version Prints the Go version used to build the program.\n\nAll commands require the agent running on the Go process.\n`\n\n\/\/ TODO(jbd): add link that explains the use of agent.\n\nfunc main() {\n\tif len(os.Args) < 2 {\n\t\tprocesses()\n\t\treturn\n\t}\n\n\tcmd := os.Args[1]\n\tfn, ok := cmds[cmd]\n\tif !ok {\n\t\tusage(\"unknown subcommand\")\n\t}\n\n\tvar pid int\n\tflag.IntVar(&pid, \"p\", -1, \"\")\n\tflag.CommandLine.Parse(os.Args[2:])\n\tif pid == -1 {\n\t\tusage(\"missing -p=<pid> flag\")\n\t}\n\n\tif err := fn(pid); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc processes() {\n\tpss, err := ps.Processes()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar undetermined int\n\tfor _, pr := range pss {\n\t\tname, err := pr.Path()\n\t\tif err != nil {\n\t\t\tundetermined++\n\t\t\tcontinue\n\t\t}\n\t\tok, err := isGo(name)\n\t\tif err != nil {\n\t\t\t\/\/ TODO(jbd): worth to report the number?\n\t\t\tcontinue\n\t\t}\n\t\tif ok {\n\t\t\t\/\/ TODO(jbd): List if the program is running the agent.\n\t\t\tfmt.Printf(\"%d\\t%v\\t(%v)\\n\", pr.Pid(), pr.Executable(), name)\n\t\t}\n\t}\n\tif undetermined > 0 {\n\t\tfmt.Printf(\"\\n%d processes left undetermined\\n\", undetermined)\n\t}\n}\n\nfunc isGo(executable string) (ok bool, err error) {\n\tobj, err := objfile.Open(executable)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer obj.Close()\n\n\tsymbols, err := obj.Symbols()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t\/\/ TODO(jbd): find a faster way to determine Go programs.\n\tfor _, s := range symbols {\n\t\tif s.Name == \"runtime.buildVersion\" {\n\t\t\treturn true, nil\n\t\t}\n\t\tif strings.HasPrefix(s.Name, \"github.com\/google\/gops\") {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}\n\nfunc usage(msg string) {\n\tif msg != \"\" {\n\t\tfmt.Printf(\"gops: %v\\n\", msg)\n\t}\n\tfmt.Fprintf(os.Stderr, \"%v\\n\", helpText)\n\tos.Exit(1)\n}\n<commit_msg>remove unnecessary agent sym check<commit_after>\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Program gops is a tool to list currently running Go processes.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/google\/gops\/internal\/objfile\"\n\n\tps \"github.com\/keybase\/go-ps\"\n)\n\nconst helpText = `Usage: gops is a tool to list and diagnose Go processes.\n\n gops Lists all Go processes currently running.\n gops [cmd] -p=<pid> See the section below.\n\nCommands: \n stack Prints the stack trace.\n gc Runs the garbage collector and blocks until successful.\n memstats Prints the garbage collection stats.\n version Prints the Go version used to build the program.\n\nAll commands require the agent running on the Go process.\n`\n\n\/\/ TODO(jbd): add link that explains the use of agent.\n\nfunc main() {\n\tif len(os.Args) < 2 {\n\t\tprocesses()\n\t\treturn\n\t}\n\n\tcmd := os.Args[1]\n\tfn, ok := cmds[cmd]\n\tif !ok {\n\t\tusage(\"unknown subcommand\")\n\t}\n\n\tvar pid int\n\tflag.IntVar(&pid, \"p\", -1, \"\")\n\tflag.CommandLine.Parse(os.Args[2:])\n\tif pid == -1 {\n\t\tusage(\"missing -p=<pid> flag\")\n\t}\n\n\tif err := fn(pid); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc processes() {\n\tpss, err := ps.Processes()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar undetermined int\n\tfor _, pr := range pss {\n\t\tname, err := pr.Path()\n\t\tif err != nil {\n\t\t\tundetermined++\n\t\t\tcontinue\n\t\t}\n\t\tok, err := isGo(name)\n\t\tif err != nil {\n\t\t\t\/\/ TODO(jbd): worth to report the number?\n\t\t\tcontinue\n\t\t}\n\t\tif ok {\n\t\t\t\/\/ TODO(jbd): List if the program is running the agent.\n\t\t\tfmt.Printf(\"%d\\t%v\\t(%v)\\n\", pr.Pid(), pr.Executable(), name)\n\t\t}\n\t}\n\tif undetermined > 0 {\n\t\tfmt.Printf(\"\\n%d processes left undetermined\\n\", undetermined)\n\t}\n}\n\nfunc isGo(executable string) (ok bool, err error) {\n\tobj, err := objfile.Open(executable)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer obj.Close()\n\n\tsymbols, err := obj.Symbols()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t\/\/ TODO(jbd): find a faster way to determine Go programs.\n\tfor _, s := range symbols {\n\t\tif s.Name == \"runtime.buildVersion\" {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}\n\nfunc usage(msg string) {\n\tif msg != \"\" {\n\t\tfmt.Printf(\"gops: %v\\n\", msg)\n\t}\n\tfmt.Fprintf(os.Stderr, \"%v\\n\", helpText)\n\tos.Exit(1)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"bufio\"\n \"flag\"\n \"fmt\"\n \"log\"\n \"os\"\n \"regexp\"\n \"strings\"\n \"time\"\n \"path\"\n)\n\ntype SubData struct {\n Num string\n TimeStart time.Time\n TimeEnd time.Time\n Text string\n}\n\nconst TIME_FORMAT = \"15:04:05.000\"\n\nvar timeReference time.Time\nvar subLines []SubData\nvar replaceComma bool\nvar subFileName string\n\nfunc main() {\n subFile := flag.String(\"f\", \"\", \"subtitle file\")\n shiftTime := flag.String(\"t\", \"\", \"time shift with format hh:mm:ss.000\")\n\n replaceComma = false\n\n flag.Parse()\n\n log.SetFlags(0)\n\n timeReference = setClock(time.Now(), 0, 0, 0, 0)\n\n readSubFile(*subFile)\n shiftSub(*shiftTime)\n writeSub()\n}\n\nfunc readSubFile(fileName string) {\n subFileName = fileName\n\n log.Println(\"Processing \" + subFileName)\n\n file, err := os.Open(subFileName)\n\n if err != nil {\n log.Fatalf(\"Error opening subtitle file: %v\", err)\n return\n }\n\n defer file.Close()\n\n scanner := bufio.NewScanner(file)\n\n subLines = make([]SubData, 0)\n\n var sub SubData\n\n for scanner.Scan() {\n text := scanner.Text()\n\n reNum := regexp.MustCompile(`^\\d+$`)\n reTime := regexp.MustCompile(`^(\\d{2}:\\d{2}:[\\d(,|\\.)]+).+(\\d{2}:\\d{2}:[\\d(,|\\.)]+)$`)\n\n switch {\n case reNum.MatchString(text):\n sub = SubData{}\n sub.Num = text\n\n case reTime.MatchString(text):\n subtime := reTime.FindStringSubmatch(text)\n\n if strings.LastIndex(subtime[1], \",\") != -1 {\n subtime[1] = strings.Replace(subtime[1], \",\", \".\", 1)\n subtime[2] = strings.Replace(subtime[2], \",\", \".\", 1)\n\n replaceComma = true\n }\n\n timeStart, _ := time.Parse(TIME_FORMAT, subtime[1])\n timeEnd, _ := time.Parse(TIME_FORMAT, subtime[2])\n\n sub.TimeStart = setClock(timeReference,\n timeStart.Hour(), timeStart.Minute(),\n timeStart.Second(), timeStart.Nanosecond())\n sub.TimeEnd = setClock(timeReference,\n timeEnd.Hour(), timeEnd.Minute(),\n timeEnd.Second(), timeEnd.Nanosecond())\n\n case text != \"\":\n sub.Text += text + \"\\n\"\n\n default:\n sub.Text = strings.TrimSpace(sub.Text)\n subLines = append(subLines, sub)\n\n }\n }\n\n if err := scanner.Err(); err != nil {\n log.Fatal(err)\n }\n}\n\nfunc shiftSub(shift string) {\n timeFormat := []string{\n \"5\",\n \"4:5\",\n \"15:4:5\",\n \"5.000\",\n \"4:5.000\",\n \"15:4:5.000\",\n }\n\n var sTime time.Time\n var err error\n sign := 1;\n trimmedShift := shift\n\n if strings.HasPrefix(shift, \"-\") {\n trimmedShift = strings.Trim(shift, \"- \")\n sign = -1;\n }\n\n for i := 0; i < len(timeFormat); i++ {\n sTime, err = time.Parse(timeFormat[i], trimmedShift)\n\n if err != nil {\n if i < 2 {\n continue\n } else {\n log.Fatalf(\"shift time (%s) not recognized\", shift)\n }\n }\n\n break;\n }\n\n tfmt := sTime.Format(fmt.Sprintf(\"%d:04:05.000\", sign * 15))\n log.Println(\"Shifting by: \" + tfmt)\n\n shiftDur := time.Duration(sTime.Hour()) * time.Hour +\n time.Duration(sTime.Minute()) * time.Minute +\n time.Duration(sTime.Second()) * time.Second +\n time.Duration(sTime.Nanosecond()) * time.Nanosecond\n\n if sign == -1 {\n shiftDur = -shiftDur\n }\n\n for i, sub := range subLines {\n var timeStart, timeEnd time.Time\n\n timeStart = sub.TimeStart.Add(shiftDur)\n timeEnd = sub.TimeEnd.Add(shiftDur)\n\n if timeStart.Before(timeReference) {\n timeStart = setClock(timeReference, 0, 0, 0, 0)\n }\n\n if timeEnd.Before(timeReference) {\n timeEnd = setClock(timeReference, 0, 0, 0, 0)\n }\n\n sub.TimeStart = timeStart\n sub.TimeEnd = timeEnd\n\n subLines[i] = sub\n }\n}\n\nfunc writeSub() {\n ext := path.Ext(subFileName)\n fname := strings.TrimSuffix(subFileName, ext)\n resName := fname + \"-resync\" + ext\n\n file, err := os.Create(resName)\n\n if err != nil {\n log.Fatalf(\"Error creating new subtitle file: %v\", err)\n return\n }\n\n defer file.Close()\n\n for _, sub := range subLines {\n file.WriteString(sub.Num + \"\\n\")\n\n start := sub.TimeStart.Format(\"15:04:05.000\")\n end := sub.TimeEnd.Format(\"15:04:05.000\")\n\n file.WriteString(start + \" --> \" + end + \"\\n\")\n file.WriteString(sub.Text + \"\\n\\n\")\n }\n\n log.Printf(\"Result has been saved as %s\", resName)\n}\n\nfunc setClock(originalTime time.Time, hour, minute, second, nanosecond int) time.Time {\n refYear, refMonth, refDay := originalTime.Date()\n\n return time.Date(refYear, refMonth, refDay,\n hour, minute, second, nanosecond,\n originalTime.Location())\n}\n<commit_msg>remove any UTF BOM<commit_after>package main\n\nimport (\n \"bufio\"\n \"flag\"\n \"fmt\"\n \"log\"\n \"os\"\n \"regexp\"\n \"strings\"\n \"time\"\n \"path\"\n)\n\ntype SubData struct {\n Num string\n TimeStart time.Time\n TimeEnd time.Time\n Text string\n}\n\nconst TIME_FORMAT = \"15:04:05.000\"\n\nvar timeReference time.Time\nvar subLines []SubData\nvar replaceComma bool\nvar subFileName string\n\nfunc main() {\n subFile := flag.String(\"f\", \"\", \"subtitle file\")\n shiftTime := flag.String(\"t\", \"\", \"time shift with format hh:mm:ss.000\")\n\n replaceComma = false\n\n flag.Parse()\n\n log.SetFlags(0)\n\n timeReference = setClock(time.Now(), 0, 0, 0, 0)\n\n readSubFile(*subFile)\n shiftSub(*shiftTime)\n writeSub()\n}\n\nfunc readSubFile(fileName string) {\n subFileName = fileName\n\n log.Println(\"Processing \" + subFileName)\n\n file, err := os.Open(subFileName)\n\n if err != nil {\n log.Fatalf(\"Error opening subtitle file: %v\", err)\n return\n }\n\n defer file.Close()\n\n scanner := bufio.NewScanner(file)\n\n subLines = make([]SubData, 0)\n\n var sub SubData\n\n for scanner.Scan() {\n text := strings.TrimSpace(scanner.Text())\n\n reBom := regexp.MustCompile(`\\x{feff}`)\n reNum := regexp.MustCompile(`^\\d+$`)\n reTime := regexp.MustCompile(`^(\\d{2}:\\d{2}:[\\d(,|\\.)]+).+(\\d{2}:\\d{2}:[\\d(,|\\.)]+)$`)\n\n \/\/ remove any UTF BOM\n text = reBom.ReplaceAllString(text, \"\")\n\n switch {\n case reNum.MatchString(text):\n sub = SubData{}\n sub.Num = text\n\n case reTime.MatchString(text):\n subtime := reTime.FindStringSubmatch(text)\n\n if strings.LastIndex(subtime[1], \",\") != -1 {\n subtime[1] = strings.Replace(subtime[1], \",\", \".\", 1)\n subtime[2] = strings.Replace(subtime[2], \",\", \".\", 1)\n\n replaceComma = true\n }\n\n timeStart, _ := time.Parse(TIME_FORMAT, subtime[1])\n timeEnd, _ := time.Parse(TIME_FORMAT, subtime[2])\n\n sub.TimeStart = setClock(timeReference,\n timeStart.Hour(), timeStart.Minute(),\n timeStart.Second(), timeStart.Nanosecond())\n sub.TimeEnd = setClock(timeReference,\n timeEnd.Hour(), timeEnd.Minute(),\n timeEnd.Second(), timeEnd.Nanosecond())\n\n case text != \"\":\n sub.Text += text + \"\\n\"\n\n default:\n sub.Text = strings.TrimSpace(sub.Text)\n subLines = append(subLines, sub)\n\n }\n }\n\n if err := scanner.Err(); err != nil {\n log.Fatal(err)\n }\n}\n\nfunc shiftSub(shift string) {\n timeFormat := []string{\n \"5\",\n \"4:5\",\n \"15:4:5\",\n \"5.000\",\n \"4:5.000\",\n \"15:4:5.000\",\n }\n\n var sTime time.Time\n var err error\n sign := 1;\n trimmedShift := shift\n\n if strings.HasPrefix(shift, \"-\") {\n trimmedShift = strings.Trim(shift, \"- \")\n sign = -1;\n }\n\n for i := 0; i < len(timeFormat); i++ {\n sTime, err = time.Parse(timeFormat[i], trimmedShift)\n\n if err != nil {\n if i < 2 {\n continue\n } else {\n log.Fatalf(\"shift time (%s) not recognized\", shift)\n }\n }\n\n break;\n }\n\n tfmt := sTime.Format(fmt.Sprintf(\"%d:04:05.000\", sign * 15))\n log.Println(\"Shifting by: \" + tfmt)\n\n shiftDur := time.Duration(sTime.Hour()) * time.Hour +\n time.Duration(sTime.Minute()) * time.Minute +\n time.Duration(sTime.Second()) * time.Second +\n time.Duration(sTime.Nanosecond()) * time.Nanosecond\n\n if sign == -1 {\n shiftDur = -shiftDur\n }\n\n for i, sub := range subLines {\n var timeStart, timeEnd time.Time\n\n timeStart = sub.TimeStart.Add(shiftDur)\n timeEnd = sub.TimeEnd.Add(shiftDur)\n\n if timeStart.Before(timeReference) {\n timeStart = setClock(timeReference, 0, 0, 0, 0)\n }\n\n if timeEnd.Before(timeReference) {\n timeEnd = setClock(timeReference, 0, 0, 0, 0)\n }\n\n sub.TimeStart = timeStart\n sub.TimeEnd = timeEnd\n\n subLines[i] = sub\n }\n}\n\nfunc writeSub() {\n ext := path.Ext(subFileName)\n fname := strings.TrimSuffix(subFileName, ext)\n resName := fname + \"-resync\" + ext\n\n file, err := os.Create(resName)\n\n if err != nil {\n log.Fatalf(\"Error creating new subtitle file: %v\", err)\n return\n }\n\n defer file.Close()\n\n for _, sub := range subLines {\n start := sub.TimeStart.Format(\"15:04:05.000\")\n end := sub.TimeEnd.Format(\"15:04:05.000\")\n\n if replaceComma {\n start = strings.Replace(start, \".\", \",\", 1)\n end = strings.Replace(end, \".\", \",\", 1)\n }\n\n file.WriteString(sub.Num + \"\\n\")\n file.WriteString(start + \" --> \" + end + \"\\n\")\n file.WriteString(sub.Text + \"\\n\\n\")\n }\n\n log.Printf(\"Result has been saved as %s\", resName)\n}\n\nfunc setClock(originalTime time.Time, hour, minute, second, nanosecond int) time.Time {\n refYear, refMonth, refDay := originalTime.Date()\n\n return time.Date(refYear, refMonth, refDay,\n hour, minute, second, nanosecond,\n originalTime.Location())\n}\n<|endoftext|>"} {"text":"<commit_before>package grim\n\n\/\/ Copyright 2015 MediaMath <http:\/\/www.mediamath.com>. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\nimport \"fmt\"\n\n\/\/ Instance models the state of a configured Grim instance.\ntype Instance struct {\n\tconfigRoot *string\n\tqueue *sqsQueue\n}\n\n\/\/ SetConfigRoot sets the base path of the configuration directory and clears any previously read config values from memory.\nfunc (i *Instance) SetConfigRoot(path string) {\n\ti.configRoot = &path\n\ti.queue = nil\n}\n\n\/\/ PrepareGrimQueue creates or reuses the Amazon SQS queue named in the config.\nfunc (i *Instance) PrepareGrimQueue() error {\n\tconfigRoot := getEffectiveConfigRoot(i.configRoot)\n\n\tconfig, err := getEffectiveGlobalConfig(configRoot)\n\tif err != nil {\n\t\treturn fatalGrimErrorf(\"error while reading config: %v\", err)\n\t}\n\n\tqueue, err := prepareSQSQueue(config.awsKey, config.awsSecret, config.awsRegion, config.grimQueueName)\n\tif err != nil {\n\t\treturn fatalGrimErrorf(\"error preparing queue: %v\", err)\n\t}\n\n\ti.queue = queue\n\n\treturn nil\n}\n\n\/\/ PrepareRepos discovers all repos that are configured then sets up SNS and GitHub.\n\/\/ It is an error to call this without calling PrepareGrimQueue first.\nfunc (i *Instance) PrepareRepos() error {\n\tif err := i.checkGrimQueue(); err != nil {\n\t\treturn err\n\t}\n\n\tconfigRoot := getEffectiveConfigRoot(i.configRoot)\n\n\tconfig, err := getEffectiveGlobalConfig(configRoot)\n\tif err != nil {\n\t\treturn fatalGrimErrorf(\"error while reading config: %v\", err)\n\t}\n\n\trepos := getAllConfiguredRepos(configRoot)\n\n\tvar topicARNs []string\n\tfor _, repo := range repos {\n\t\tlocalConfig, err := getEffectiveConfig(configRoot, repo.owner, repo.name)\n\t\tif err != nil {\n\t\t\treturn fatalGrimErrorf(\"error while reading local config: %v\", err)\n\t\t}\n\n\t\tsnsTopicName := fmt.Sprintf(\"grim-%v-%v-repo-topic\", repo.owner, repo.name)\n\n\t\tsnsTopicARN, err := prepareSNSTopic(config.awsKey, config.awsSecret, config.awsRegion, snsTopicName)\n\t\tif err != nil {\n\t\t\treturn fatalGrimErrorf(\"error creating SNS topic: %v\", err)\n\t\t}\n\n\t\terr = prepareSubscription(config.awsKey, config.awsSecret, config.awsRegion, snsTopicARN, i.queue.ARN)\n\t\tif err != nil {\n\t\t\treturn fatalGrimErrorf(\"error subscribing Grim queue %q to SNS topic %q: %v\", i.queue.ARN, snsTopicARN, err)\n\t\t}\n\t\terr = prepareAmazonSNSService(localConfig.gitHubToken, repo.owner, repo.name, snsTopicARN, config.awsKey, config.awsSecret, config.awsRegion)\n\t\tif err != nil {\n\t\t\treturn fatalGrimErrorf(\"error creating configuring GitHub AmazonSNS service: %v\", err)\n\t\t}\n\t\ttopicARNs = append(topicARNs, snsTopicARN)\n\t}\n\n\terr = setPolicy(config.awsKey, config.awsSecret, config.awsRegion, i.queue.ARN, i.queue.URL, topicARNs)\n\tif err != nil {\n\t\treturn fatalGrimErrorf(\"error setting policy for Grim queue %q with topics %v: %v\", i.queue.ARN, topicARNs, err)\n\t}\n\n\treturn nil\n}\n\n\/\/ BuildNextInGrimQueue creates or reuses an SQS queue as a source of work.\nfunc (i *Instance) BuildNextInGrimQueue() error {\n\tif err := i.checkGrimQueue(); err != nil {\n\t\treturn err\n\t}\n\n\tconfigRoot := getEffectiveConfigRoot(i.configRoot)\n\n\tglobalConfig, err := getEffectiveGlobalConfig(configRoot)\n\tif err != nil {\n\t\treturn grimErrorf(\"error while reading config: %v\", err)\n\t}\n\n\tmessage, err := getNextMessage(globalConfig.awsKey, globalConfig.awsSecret, globalConfig.awsRegion, i.queue.URL)\n\tif err != nil {\n\t\treturn grimErrorf(\"error retrieving message from Grim queue %q: %v\", i.queue.URL, err)\n\t}\n\n\tif message != \"\" {\n\t\thook, err := extractHookEvent(message)\n\t\tif err != nil {\n\t\t\treturn grimErrorf(\"error extracting hook from message: %v\", err)\n\t\t}\n\n\t\tif !(hook.eventName == \"push\" || hook.eventName == \"pull_request\" && (hook.action == \"opened\" || hook.action == \"reopened\" || hook.action == \"synchronize\")) {\n\t\t\treturn nil\n\t\t}\n\n\t\tif hook.eventName == \"pull_request\" {\n\t\t\tsha, err := pollForMergeCommitSha(globalConfig.gitHubToken, hook.owner, hook.repo, hook.prNumber)\n\t\t\tif err != nil {\n\t\t\t\treturn grimErrorf(\"error getting merge commit sha: %v\", err)\n\t\t\t} else if sha == \"\" {\n\t\t\t\treturn grimErrorf(\"error getting merge commit sha: field empty\")\n\t\t\t}\n\t\t\thook.ref = sha\n\t\t}\n\n\t\tlocalConfig, err := getEffectiveConfig(configRoot, hook.owner, hook.repo)\n\t\tif err != nil {\n\t\t\treturn grimErrorf(\"error while reading config: %v\", err)\n\t\t}\n\n\t\treturn buildForHook(configRoot, localConfig, *hook)\n\t}\n\n\treturn nil\n}\n\n\/\/ BuildRef builds a git ref immediately.\nfunc (i *Instance) BuildRef(owner, repo, ref string) error {\n\tconfigRoot := getEffectiveConfigRoot(i.configRoot)\n\n\tconfig, err := getEffectiveConfig(configRoot, owner, repo)\n\tif err != nil {\n\t\treturn fatalGrimErrorf(\"error while reading config: %v\", err)\n\t}\n\n\treturn buildForHook(configRoot, config, hookEvent{\n\t\towner: owner,\n\t\trepo: repo,\n\t\tref: ref,\n\t})\n}\n\nfunc buildForHook(configRoot string, config *effectiveConfig, hook hookEvent) error {\n\textraEnv := hook.env()\n\n\t\/\/ TODO: do something with the err\n\tnotifyPending(config, hook)\n\n\tresult, err := build(configRoot, config.workspaceRoot, config.pathToCloneIn, hook.owner, hook.repo, extraEnv)\n\tif err != nil {\n\t\tnotifyError(config, hook)\n\t\treturn fatalGrimErrorf(\"error during %v: %v\", describeHook(hook), err)\n\t}\n\n\tvar notifyError error\n\tif result.ExitCode == 0 {\n\t\tnotifyError = notifySuccess(config, hook)\n\t} else {\n\t\tnotifyError = notifyFailure(config, hook)\n\t}\n\n\terr = appendResult(config.resultRoot, hook.owner, hook.repo, *result)\n\tif err != nil {\n\t\treturn fatalGrimErrorf(\"error while storing result: %v\", err)\n\t}\n\n\treturn notifyError\n}\n\nfunc describeHook(hook hookEvent) string {\n\treturn fmt.Sprintf(\"build of %v\/%v initiated by a %q to %q by %q\", hook.owner, hook.repo, hook.eventName, hook.target, hook.userName)\n}\n\nfunc notifyPending(config *effectiveConfig, hook hookEvent) error {\n\treturn notify(config, hook, RSPending, fmt.Sprintf(\"Starting %v\", describeHook(hook)), ColorYellow)\n}\n\nfunc notifyError(config *effectiveConfig, hook hookEvent) error {\n\treturn notify(config, hook, RSError, fmt.Sprintf(\"Error during %v\", describeHook(hook)), ColorGray)\n}\n\nfunc notifyFailure(config *effectiveConfig, hook hookEvent) error {\n\treturn notify(config, hook, RSFailure, fmt.Sprintf(\"Failure during %v\", describeHook(hook)), ColorRed)\n}\n\nfunc notifySuccess(config *effectiveConfig, hook hookEvent) error {\n\treturn notify(config, hook, RSSuccess, fmt.Sprintf(\"Success after %v\", describeHook(hook)), ColorGreen)\n}\n\nfunc notify(config *effectiveConfig, hook hookEvent, state refStatus, message string, color messageColor) error {\n\tif hook.eventName != \"push\" && hook.eventName != \"pull_request\" {\n\t\treturn nil\n\t}\n\n\t\/\/add grimServerID\/grimQueueName to hipchat message for test\n\tmessage += \"ServerID:\" + config.grimServerID\n\tfmt.Printf(\"The updated message is:\", message)\n\n\tghErr := setRefStatus(config.gitHubToken, hook.owner, hook.repo, hook.statusRef, state, \"\", message)\n\n\tif config.hipChatToken != \"\" && config.hipChatRoom != \"\" {\n\t\terr := sendMessageToRoom(config.hipChatToken, config.hipChatRoom, \"Grim\", message, color)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn ghErr\n}\n\nfunc (i *Instance) checkGrimQueue() error {\n\tif i.queue == nil {\n\t\treturn fatalGrimErrorf(\"the Grim queue must be prepared first\")\n\t}\n\n\treturn nil\n}\n<commit_msg>grim local test<commit_after>package grim\n\n\/\/ Copyright 2015 MediaMath <http:\/\/www.mediamath.com>. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\nimport \"fmt\"\n\n\/\/ Instance models the state of a configured Grim instance.\ntype Instance struct {\n\tconfigRoot *string\n\tqueue *sqsQueue\n}\n\n\/\/ SetConfigRoot sets the base path of the configuration directory and clears any previously read config values from memory.\nfunc (i *Instance) SetConfigRoot(path string) {\n\ti.configRoot = &path\n\ti.queue = nil\n}\n\n\/\/ PrepareGrimQueue creates or reuses the Amazon SQS queue named in the config.\nfunc (i *Instance) PrepareGrimQueue() error {\n\tconfigRoot := getEffectiveConfigRoot(i.configRoot)\n\n\tconfig, err := getEffectiveGlobalConfig(configRoot)\n\tif err != nil {\n\t\treturn fatalGrimErrorf(\"error while reading config: %v\", err)\n\t}\n\n\tqueue, err := prepareSQSQueue(config.awsKey, config.awsSecret, config.awsRegion, config.grimQueueName)\n\tif err != nil {\n\t\treturn fatalGrimErrorf(\"error preparing queue: %v\", err)\n\t}\n\n\ti.queue = queue\n\n\treturn nil\n}\n\n\/\/ PrepareRepos discovers all repos that are configured then sets up SNS and GitHub.\n\/\/ It is an error to call this without calling PrepareGrimQueue first.\nfunc (i *Instance) PrepareRepos() error {\n\tif err := i.checkGrimQueue(); err != nil {\n\t\treturn err\n\t}\n\n\tconfigRoot := getEffectiveConfigRoot(i.configRoot)\n\n\tconfig, err := getEffectiveGlobalConfig(configRoot)\n\tif err != nil {\n\t\treturn fatalGrimErrorf(\"error while reading config: %v\", err)\n\t}\n\n\trepos := getAllConfiguredRepos(configRoot)\n\n\tvar topicARNs []string\n\tfor _, repo := range repos {\n\t\tlocalConfig, err := getEffectiveConfig(configRoot, repo.owner, repo.name)\n\t\tif err != nil {\n\t\t\treturn fatalGrimErrorf(\"error while reading local config: %v\", err)\n\t\t}\n\n\t\tsnsTopicName := fmt.Sprintf(\"grim-%v-%v-repo-topic\", repo.owner, repo.name)\n\n\t\tsnsTopicARN, err := prepareSNSTopic(config.awsKey, config.awsSecret, config.awsRegion, snsTopicName)\n\t\tif err != nil {\n\t\t\treturn fatalGrimErrorf(\"error creating SNS topic: %v\", err)\n\t\t}\n\n\t\terr = prepareSubscription(config.awsKey, config.awsSecret, config.awsRegion, snsTopicARN, i.queue.ARN)\n\t\tif err != nil {\n\t\t\treturn fatalGrimErrorf(\"error subscribing Grim queue %q to SNS topic %q: %v\", i.queue.ARN, snsTopicARN, err)\n\t\t}\n\t\terr = prepareAmazonSNSService(localConfig.gitHubToken, repo.owner, repo.name, snsTopicARN, config.awsKey, config.awsSecret, config.awsRegion)\n\t\tif err != nil {\n\t\t\treturn fatalGrimErrorf(\"error creating configuring GitHub AmazonSNS service: %v\", err)\n\t\t}\n\t\ttopicARNs = append(topicARNs, snsTopicARN)\n\t}\n\n\terr = setPolicy(config.awsKey, config.awsSecret, config.awsRegion, i.queue.ARN, i.queue.URL, topicARNs)\n\tif err != nil {\n\t\treturn fatalGrimErrorf(\"error setting policy for Grim queue %q with topics %v: %v\", i.queue.ARN, topicARNs, err)\n\t}\n\n\treturn nil\n}\n\n\/\/ BuildNextInGrimQueue creates or reuses an SQS queue as a source of work.\nfunc (i *Instance) BuildNextInGrimQueue() error {\n\tif err := i.checkGrimQueue(); err != nil {\n\t\treturn err\n\t}\n\n\tconfigRoot := getEffectiveConfigRoot(i.configRoot)\n\n\tglobalConfig, err := getEffectiveGlobalConfig(configRoot)\n\tif err != nil {\n\t\treturn grimErrorf(\"error while reading config: %v\", err)\n\t}\n\n\tmessage, err := getNextMessage(globalConfig.awsKey, globalConfig.awsSecret, globalConfig.awsRegion, i.queue.URL)\n\tif err != nil {\n\t\treturn grimErrorf(\"error retrieving message from Grim queue %q: %v\", i.queue.URL, err)\n\t}\n\n\tif message != \"\" {\n\t\thook, err := extractHookEvent(message)\n\t\tif err != nil {\n\t\t\treturn grimErrorf(\"error extracting hook from message: %v\", err)\n\t\t}\n\n\t\tif !(hook.eventName == \"push\" || hook.eventName == \"pull_request\" && (hook.action == \"opened\" || hook.action == \"reopened\" || hook.action == \"synchronize\")) {\n\t\t\treturn nil\n\t\t}\n\n\t\tif hook.eventName == \"pull_request\" {\n\t\t\tsha, err := pollForMergeCommitSha(globalConfig.gitHubToken, hook.owner, hook.repo, hook.prNumber)\n\t\t\tif err != nil {\n\t\t\t\treturn grimErrorf(\"error getting merge commit sha: %v\", err)\n\t\t\t} else if sha == \"\" {\n\t\t\t\treturn grimErrorf(\"error getting merge commit sha: field empty\")\n\t\t\t}\n\t\t\thook.ref = sha\n\t\t}\n\n\t\tlocalConfig, err := getEffectiveConfig(configRoot, hook.owner, hook.repo)\n\t\tif err != nil {\n\t\t\treturn grimErrorf(\"error while reading config: %v\", err)\n\t\t}\n\n\t\treturn buildForHook(configRoot, localConfig, *hook)\n\t}\n\n\treturn nil\n}\n\n\/\/ BuildRef builds a git ref immediately.\nfunc (i *Instance) BuildRef(owner, repo, ref string) error {\n\tconfigRoot := getEffectiveConfigRoot(i.configRoot)\n\n\tconfig, err := getEffectiveConfig(configRoot, owner, repo)\n\tif err != nil {\n\t\treturn fatalGrimErrorf(\"error while reading config: %v\", err)\n\t}\n\n\treturn buildForHook(configRoot, config, hookEvent{\n\t\towner: owner,\n\t\trepo: repo,\n\t\tref: ref,\n\t})\n}\n\nfunc buildForHook(configRoot string, config *effectiveConfig, hook hookEvent) error {\n\textraEnv := hook.env()\n\n\t\/\/ TODO: do something with the err\n\tnotifyPending(config, hook)\n\n\tresult, err := build(configRoot, config.workspaceRoot, config.pathToCloneIn, hook.owner, hook.repo, extraEnv)\n\tif err != nil {\n\t\tnotifyError(config, hook)\n\t\treturn fatalGrimErrorf(\"error during %v: %v\", describeHook(hook), err)\n\t}\n\n\tvar notifyError error\n\tif result.ExitCode == 0 {\n\t\tnotifyError = notifySuccess(config, hook)\n\t} else {\n\t\tnotifyError = notifyFailure(config, hook)\n\t}\n\n\terr = appendResult(config.resultRoot, hook.owner, hook.repo, *result)\n\tif err != nil {\n\t\treturn fatalGrimErrorf(\"error while storing result: %v\", err)\n\t}\n\n\treturn notifyError\n}\n\nfunc describeHook(hook hookEvent) string {\n\treturn fmt.Sprintf(\"build of %v\/%v initiated by a %q to %q by %q\", hook.owner, hook.repo, hook.eventName, hook.target, hook.userName)\n}\n\nfunc notifyPending(config *effectiveConfig, hook hookEvent) error {\n\treturn notify(config, hook, RSPending, fmt.Sprintf(\"Starting %v\", describeHook(hook)), ColorYellow)\n}\n\nfunc notifyError(config *effectiveConfig, hook hookEvent) error {\n\treturn notify(config, hook, RSError, fmt.Sprintf(\"Error during %v\", describeHook(hook)), ColorGray)\n}\n\nfunc notifyFailure(config *effectiveConfig, hook hookEvent) error {\n\treturn notify(config, hook, RSFailure, fmt.Sprintf(\"Failure during %v\", describeHook(hook)), ColorRed)\n}\n\nfunc notifySuccess(config *effectiveConfig, hook hookEvent) error {\n\treturn notify(config, hook, RSSuccess, fmt.Sprintf(\"Success after %v\", describeHook(hook)), ColorGreen)\n}\n\nfunc notify(config *effectiveConfig, hook hookEvent, state refStatus, message string, color messageColor) error {\n\tif hook.eventName != \"push\" && hook.eventName != \"pull_request\" {\n\t\treturn nil\n\t}\n\n\t\/\/add grimServerID\/grimQueueName to hipchat message\n\tmessage += \":\" + \"ServerID\/QueueName - \" + config.grimServerID\n\n\tghErr := setRefStatus(config.gitHubToken, hook.owner, hook.repo, hook.statusRef, state, \"\", message)\n\n\tif config.hipChatToken != \"\" && config.hipChatRoom != \"\" {\n\t\terr := sendMessageToRoom(config.hipChatToken, config.hipChatRoom, \"Grim\", message, color)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn ghErr\n}\n\nfunc (i *Instance) checkGrimQueue() error {\n\tif i.queue == nil {\n\t\treturn fatalGrimErrorf(\"the Grim queue must be prepared first\")\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tpb \"github.com\/itshouldntdothis\/swagger-http-grpc\"\n\n\t\"go.uber.org\/ratelimit\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/reflection\"\n\t\"gopkg.in\/resty.v0\"\n)\n\n\/\/ GrcpServerOptions implements processing functions\ntype GrcpServerOptions struct {\n\tAddr string\n\tServer *grpc.Server\n\tRateLimited bool\n\tRateLimiter ratelimit.Limiter\n\tRestyClient *resty.Client\n\tUserAgent string\n}\n\n\/\/ NewGrcpOptions returns a initialized object\nfunc NewGrcpOptions() *GrcpServerOptions {\n\tTransport := &http.Transport{\n\t\tMaxIdleConns: 200,\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout: 60 * time.Second,\n\t\t\tKeepAlive: 60 * time.Second,\n\t\t}).DialContext,\n\t\tIdleConnTimeout: 60 * time.Second,\n\t\tTLSHandshakeTimeout: 20 * time.Second,\n\t\tResponseHeaderTimeout: 20 * time.Second,\n\t\tExpectContinueTimeout: 0,\n\t\tMaxIdleConnsPerHost: 40,\n\t}\n\n\tport := \"50051\"\n\tenvGrcpPort := os.Getenv(\"SW_GRPC_PORT\")\n\tif len(envGrcpPort) != 0 {\n\t\tport = envGrcpPort\n\t}\n\tAddr := \":\" + port\n\tlog.Printf(\"SET GRPC server port to %v\", Addr)\n\n\tUserAgent := os.Getenv(\"SW_USER_AGENT\")\n\tif len(UserAgent) == 0 {\n\t\tlog.Fatal(\"Default useragent env `SW_USER_AGENT` is required\")\n\t}\n\tlog.Printf(\"SET GRPC HTTP WORKER User-Agent to `%v`\", UserAgent)\n\n\tRateLimit := 700\n\tvar RateLimiter ratelimit.Limiter\n\tvar RateLimited bool\n\tenvRateLimit := os.Getenv(\"SW_REQUEST_LIMIT\")\n\tif len(envRateLimit) != 0 {\n\t\tparsedRequestLimit, err := strconv.Atoi(envRateLimit)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Unable to covert ENV 'SW_REQUEST_LIMIT' of %v to int : %v\", envRateLimit, err)\n\t\t}\n\t\tRateLimit = parsedRequestLimit\n\t}\n\n\tif RateLimit == 0 {\n\t\tRateLimited = false\n\t\tlog.Println(\"Rate Limit of 0 is not recommended\")\n\t} else {\n\t\tRateLimited = true\n\t\tlog.Printf(\"Rate Limit set to %v\", RateLimit)\n\t\tRateLimiter = ratelimit.New(RateLimit)\n\t}\n\n\treturn &GrcpServerOptions{\n\t\tServer: grpc.NewServer(),\n\t\tAddr: Addr,\n\t\tRestyClient: resty.New().SetHeader(\"Via\", via).SetTransport(Transport),\n\t\tRateLimited: RateLimited,\n\t\tRateLimiter: RateLimiter,\n\t\tUserAgent: UserAgent,\n\t}\n}\n\nfunc (o *GrcpServerOptions) start() {\n\tlis, err := net.Listen(\"tcp\", o.Addr)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to listen: %v\", err)\n\t}\n\tpb.RegisterWorkersServer(o.Server, o)\n\t\/\/ Register reflection service on gRPC server.\n\treflection.Register(o.Server)\n\n\tlog.Println(\"Starting grpc server\")\n\tif err := o.Server.Serve(lis); err != nil {\n\t\tlog.Fatalf(\"Failed to serve grpc server: %v\", err)\n\t}\n}\n\nfunc (o *GrcpServerOptions) close() {\n\tlog.Println(\"Closing down grpc server\")\n\to.Server.GracefulStop()\n}\n\n\/\/ DoRequest processes a single http request from grpc\nfunc (o *GrcpServerOptions) DoRequest(ctx context.Context, in *pb.Request) (*pb.Response, error) {\n\tmethod := in.GetMethod()\n\tif method != \"GET\" && method != \"POST\" && method != \"PUT\" && method != \"DELETE\" {\n\t\treturn nil, errors.New(\"We don't support \" + method + \" method at this time\")\n\t}\n\n\trestyReq := o.RestyClient.R()\n\tif len(in.GetHeaders()) > 0 {\n\t\trestyReq = restyReq.SetHeaders(in.GetHeaders())\n\t}\n\n\tif restyReq.Header.Get(\"User-Agent\") == \"\" {\n\t\trestyReq = restyReq.SetHeader(\"User-Agent\", o.UserAgent)\n\t}\n\n\tif in.GetBody() != \"\" {\n\t\trestyReq = restyReq.SetBody(in.GetBody())\n\t}\n\n\tif o.RateLimited {\n\t\to.RateLimiter.Take()\n\t}\n\n\tresp, err := restyReq.Execute(method, in.GetUrl())\n\tif err != nil {\n\t\tlog.Printf(\"Fetch Error %v\", err)\n\t\treturn nil, err\n\t}\n\n\tlog.Printf(\"%v successful for %v took %v\", method, resp.Request.URL, resp.Time())\n\n\theaders := make(map[string]*pb.Header)\n\tfor k, v := range resp.Header() {\n\t\theaders[strings.ToLower(k)] = &pb.Header{Value: v}\n\t}\n\n\treturn &pb.Response{\n\t\tOk: isOk(resp.StatusCode()),\n\t\tUrl: resp.Request.URL,\n\t\tStatus: int32(resp.StatusCode()),\n\t\tStatusText: resp.Status(),\n\t\tBody: resp.String(),\n\t\tHeaders: headers,\n\t}, nil\n}\n\nfunc isOk(status int) bool {\n\tif status >= 200 && status <= 299 {\n\t\treturn true\n\t}\n\treturn false\n}\n<commit_msg>Log time delayed<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tpb \"github.com\/itshouldntdothis\/swagger-http-grpc\"\n\n\t\"go.uber.org\/ratelimit\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/reflection\"\n\t\"gopkg.in\/resty.v0\"\n)\n\n\/\/ GrcpServerOptions implements processing functions\ntype GrcpServerOptions struct {\n\tAddr string\n\tServer *grpc.Server\n\tRateLimited bool\n\tRateLimiter ratelimit.Limiter\n\tRestyClient *resty.Client\n\tUserAgent string\n}\n\n\/\/ NewGrcpOptions returns a initialized object\nfunc NewGrcpOptions() *GrcpServerOptions {\n\tTransport := &http.Transport{\n\t\tMaxIdleConns: 200,\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout: 60 * time.Second,\n\t\t\tKeepAlive: 60 * time.Second,\n\t\t}).DialContext,\n\t\tIdleConnTimeout: 60 * time.Second,\n\t\tTLSHandshakeTimeout: 20 * time.Second,\n\t\tResponseHeaderTimeout: 20 * time.Second,\n\t\tExpectContinueTimeout: 0,\n\t\tMaxIdleConnsPerHost: 40,\n\t}\n\n\tport := \"50051\"\n\tenvGrcpPort := os.Getenv(\"SW_GRPC_PORT\")\n\tif len(envGrcpPort) != 0 {\n\t\tport = envGrcpPort\n\t}\n\tAddr := \":\" + port\n\tlog.Printf(\"SET GRPC server port to %v\", Addr)\n\n\tUserAgent := os.Getenv(\"SW_USER_AGENT\")\n\tif len(UserAgent) == 0 {\n\t\tlog.Fatal(\"Default useragent env `SW_USER_AGENT` is required\")\n\t}\n\tlog.Printf(\"SET GRPC HTTP WORKER User-Agent to `%v`\", UserAgent)\n\n\tRateLimit := 700\n\tvar RateLimiter ratelimit.Limiter\n\tvar RateLimited bool\n\tenvRateLimit := os.Getenv(\"SW_REQUEST_LIMIT\")\n\tif len(envRateLimit) != 0 {\n\t\tparsedRequestLimit, err := strconv.Atoi(envRateLimit)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Unable to covert ENV 'SW_REQUEST_LIMIT' of %v to int : %v\", envRateLimit, err)\n\t\t}\n\t\tRateLimit = parsedRequestLimit\n\t}\n\n\tif RateLimit == 0 {\n\t\tRateLimited = false\n\t\tlog.Println(\"Rate Limit of 0 is not recommended\")\n\t} else {\n\t\tRateLimited = true\n\t\tlog.Printf(\"Rate Limit set to %v\", RateLimit)\n\t\tRateLimiter = ratelimit.New(RateLimit)\n\t}\n\n\treturn &GrcpServerOptions{\n\t\tServer: grpc.NewServer(),\n\t\tAddr: Addr,\n\t\tRestyClient: resty.New().SetHeader(\"Via\", via).SetTransport(Transport),\n\t\tRateLimited: RateLimited,\n\t\tRateLimiter: RateLimiter,\n\t\tUserAgent: UserAgent,\n\t}\n}\n\nfunc (o *GrcpServerOptions) start() {\n\tlis, err := net.Listen(\"tcp\", o.Addr)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to listen: %v\", err)\n\t}\n\tpb.RegisterWorkersServer(o.Server, o)\n\t\/\/ Register reflection service on gRPC server.\n\treflection.Register(o.Server)\n\n\tlog.Println(\"Starting grpc server\")\n\tif err := o.Server.Serve(lis); err != nil {\n\t\tlog.Fatalf(\"Failed to serve grpc server: %v\", err)\n\t}\n}\n\nfunc (o *GrcpServerOptions) close() {\n\tlog.Println(\"Closing down grpc server\")\n\to.Server.GracefulStop()\n}\n\n\/\/ DoRequest processes a single http request from grpc\nfunc (o *GrcpServerOptions) DoRequest(ctx context.Context, in *pb.Request) (*pb.Response, error) {\n\tmethod := in.GetMethod()\n\tif method != \"GET\" && method != \"POST\" && method != \"PUT\" && method != \"DELETE\" {\n\t\treturn nil, errors.New(\"We don't support \" + method + \" method at this time\")\n\t}\n\n\trestyReq := o.RestyClient.R()\n\tif len(in.GetHeaders()) > 0 {\n\t\trestyReq = restyReq.SetHeaders(in.GetHeaders())\n\t}\n\n\tif restyReq.Header.Get(\"User-Agent\") == \"\" {\n\t\trestyReq = restyReq.SetHeader(\"User-Agent\", o.UserAgent)\n\t}\n\n\tif in.GetBody() != \"\" {\n\t\trestyReq = restyReq.SetBody(in.GetBody())\n\t}\n\n\tvar delay time.Duration\n\tif o.RateLimited {\n\t\tstart := time.Now()\n\t\tnow := o.RateLimiter.Take()\n\t\tdelay = now.Sub(start)\n\t}\n\n\tresp, err := restyReq.Execute(method, in.GetUrl())\n\tif err != nil {\n\t\tlog.Printf(\"Fetch Error %v\", err)\n\t\treturn nil, err\n\t}\n\n\theaders := make(map[string]*pb.Header)\n\tfor k, v := range resp.Header() {\n\t\theaders[strings.ToLower(k)] = &pb.Header{Value: v}\n\t}\n\n\tlog.Printf(\"%v successful for %v took %v delayed by %v\", method, resp.Request.URL, resp.Time(), delay)\n\treturn &pb.Response{\n\t\tOk: isOk(resp.StatusCode()),\n\t\tUrl: resp.Request.URL,\n\t\tStatus: int32(resp.StatusCode()),\n\t\tStatusText: resp.Status(),\n\t\tBody: resp.String(),\n\t\tHeaders: headers,\n\t}, nil\n}\n\nfunc isOk(status int) bool {\n\tif status >= 200 && status <= 299 {\n\t\treturn true\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright (c) 2013-2017, Jeremy Bingham (<jeremy@goiardi.gl>)\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"github.com\/ctdk\/goiardi\/actor\"\n\t\"github.com\/ctdk\/goiardi\/gerror\"\n\t\"github.com\/ctdk\/goiardi\/util\"\n\t\"github.com\/tideland\/golib\/logger\"\n\t\"net\/http\"\n)\n\n\/\/ Functions and types for HEAD responses for various endpoints\n\n\/\/ Implements Chef RFCnnn (unassigned so far)\n\n\/\/ Full table of what everything means will be added when the RFC is finalized\n\ntype headChecker interface {\n\tactor.Actor\n\tDoesExist(string) (bool, util.Gerror)\n}\n\ntype exists func(resource string) (bool, util.Gerror)\n\ntype permChecker func(r *http.Request, resource string, obj actor.Actor) util.Gerror\n\n\/\/ for when no perm check is actually necessary\nfunc nilPermCheck(r *http.Request, resource string, obj actor.Actor) util.Gerror {\n\treturn nil\n}\n\nfunc headResponse(w http.ResponseWriter, r *http.Request, status int) {\n\tlogger.Debugf(\"HEAD response status %d for %s\", status, r.URL.Path)\n\tw.WriteHeader(status)\n\treturn\n}\n\nfunc headDefaultResponse(w http.ResponseWriter, r *http.Request) {\n\tlogger.Debugf(\"HEAD default response issued for %s\", r.URL.Path)\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc headChecking(w http.ResponseWriter, r *http.Request, obj actor.Actor, resource string, doesExist exists, permCheck permChecker) {\n\tfound, err := doesExist(resource)\n\tif err != nil {\n\t\theadResponse(w, r, err.Status())\n\t}\n\terr = permCheck(r, resource, obj)\n\tif err != nil {\n\t\theadResponse(w, r, err.Status())\n\t}\n\tif !found {\n\t\theadResponse(w, r, http.StatusNotFound)\n\t}\n\theadResponse(w, r, http.StatusOK)\n}\n\nfunc headForbidden() util.Gerror {\n\terr := gerror.StatusError(\"forbidden\", http.StatusForbidden)\n\treturn err\n}\n<commit_msg>Chef RFC090 has been accepted and numbered<commit_after>\/*\n * Copyright (c) 2013-2017, Jeremy Bingham (<jeremy@goiardi.gl>)\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"github.com\/ctdk\/goiardi\/actor\"\n\t\"github.com\/ctdk\/goiardi\/gerror\"\n\t\"github.com\/ctdk\/goiardi\/util\"\n\t\"github.com\/tideland\/golib\/logger\"\n\t\"net\/http\"\n)\n\n\/\/ Functions and types for HEAD responses for various endpoints\n\/\/\n\/\/ Implements Chef RFC090 -\n\/\/ (https:\/\/github.com\/chef\/chef-rfc\/blob\/master\/rfc090-server-http-head.md)\n\/\/\n\/\/ From the RFC:\n\/\/\n\/\/ Code \tReason\n\/\/ ---- \t------\n\/\/ 200 \t\tobject exists\n\/\/ 401 \t\trequest signature is invalid\n\/\/ 403 \t\trequestor does not have READ on the object\n\/\/ 404 \t\tobject does not exist\n\/\/\n\/\/ Obviously, the 403 bit will be a little bit different for goiardi 0.x.y vs.\n\/\/ the 1.0.0 branch because of the different permissions scheme.\n\/\/\n\/\/ HEAD should be present everywhere GET is, even if the HEAD request isn't\n\/\/ particularly meaningful.\n\ntype headChecker interface {\n\tactor.Actor\n\tDoesExist(string) (bool, util.Gerror)\n}\n\ntype exists func(resource string) (bool, util.Gerror)\n\ntype permChecker func(r *http.Request, resource string, obj actor.Actor) util.Gerror\n\n\/\/ for when no perm check is actually necessary\nfunc nilPermCheck(r *http.Request, resource string, obj actor.Actor) util.Gerror {\n\treturn nil\n}\n\nfunc headResponse(w http.ResponseWriter, r *http.Request, status int) {\n\tlogger.Debugf(\"HEAD response status %d for %s\", status, r.URL.Path)\n\tw.WriteHeader(status)\n\treturn\n}\n\nfunc headDefaultResponse(w http.ResponseWriter, r *http.Request) {\n\tlogger.Debugf(\"HEAD default response issued for %s\", r.URL.Path)\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc headChecking(w http.ResponseWriter, r *http.Request, obj actor.Actor, resource string, doesExist exists, permCheck permChecker) {\n\tfound, err := doesExist(resource)\n\tif err != nil {\n\t\theadResponse(w, r, err.Status())\n\t}\n\terr = permCheck(r, resource, obj)\n\tif err != nil {\n\t\theadResponse(w, r, err.Status())\n\t}\n\tif !found {\n\t\theadResponse(w, r, http.StatusNotFound)\n\t}\n\theadResponse(w, r, http.StatusOK)\n}\n\nfunc headForbidden() util.Gerror {\n\terr := gerror.StatusError(\"forbidden\", http.StatusForbidden)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package hkjnweb is the personal website hkjn.me.\n\/\/\n\/\/ See https:\/\/github.com\/hkjn\/autosite for the framework that enables\n\/\/ this site.\npackage hkjnweb\n\nimport (\n\t\"fmt\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"hkjn.me\/autosite\"\n)\n\nvar (\n\tbaseTemplates = []string{\n\t\t\"tmpl\/base.tmpl\",\n\t\t\"tmpl\/base_header.tmpl\",\n\t\t\"tmpl\/head.tmpl\",\n\t\t\"tmpl\/style.tmpl\",\n\t\t\"tmpl\/fonts.tmpl\",\n\t\t\"tmpl\/js.tmpl\",\n\t}\n\tIsProd = false\n\tLogger = autosite.Glogger{}\n)\n\n\/\/ getLogger returns the autosite.Logger to use.\nfunc getLogger(r *http.Request) autosite.Logger {\n\treturn Logger\n}\n\nvar (\n\twebDomain = \"www.hkjn.me\"\n\tweb = autosite.New(\n\t\t\"Henrik Jonsson\",\n\t\t\"pages\/*.tmpl\", \/\/ glob for pages\n\t\twebDomain, \/\/ live domain\n\t\tappend(baseTemplates, \"tmpl\/page.tmpl\"),\n\t\tgetLogger,\n\t\tIsProd,\n\t\ttemplate.FuncMap{},\n\t)\n\n\tgoImportTmpl = `<head>\n <meta http-equiv=\"refresh\" content=\"0; URL='%s'\">\n <meta name=\"go-import\" content=\"%s git %s\">\n <\/head>`\n\n\tredirects = map[string]string{\n\t\t\"\/where\": \"http:\/\/computare0.appspot.com\/where\/me@hkjn.me\",\n\t}\n)\n\n\/\/ Register registers the handlers.\nfunc Register() {\n\t\/\/ We remap \/webindex (from pages\/webindex.tmpl, no special-cases in\n\t\/\/ the autosite package) to serve index.\n\tweb.ChangeURI(\"\/webindex\", \"\/\")\n\tif IsProd {\n\t\tlog.Println(\"We're in prod, remapping some paths\\n\")\n\t\thttp.HandleFunc(\"https:\/\/hkjn.me\/\", nakedIndexHandler)\n\t\thttp.HandleFunc(\"www.hkjn.me\/keybase.txt\", keybaseHandler)\n\t} else {\n\t\tlog.Println(\"We're not in prod, remapping some paths\\n\")\n\t\thttp.HandleFunc(\"\/nakedindex\", nakedIndexHandler)\n\t}\n\tfor uri, newUri := range redirects {\n\t\tweb.AddRedirect(uri, newUri)\n\t}\n\n\tweb.Register()\n}\n\n\/\/ nakedIndexHandler serves requests to hkjn.me\/\nfunc nakedIndexHandler(w http.ResponseWriter, r *http.Request) {\n\tl := getLogger(r)\n\tl.Infof(\"nakedIndexHandler for URI %s\\n\", r.RequestURI)\n\tif r.URL.Path == \"\/\" {\n\t\turl := fmt.Sprintf(\"https:\/\/%s\", webDomain)\n\t\tl.Debugf(\"visitor to \/ of naked domain, redirecting to %q..\\n\")\n\t\thttp.Redirect(w, r, url, http.StatusSeeOther)\n\t} else {\n\t\t\/\/ Our response tells the `go get` tool where to find\n\t\t\/\/ `hkjn.me\/[package]`.\n\t\tparts := strings.Split(r.URL.Path, \"\/\")\n\t\tgodocUrl := fmt.Sprintf(\"https:\/\/godoc.org\/hkjn.me%s\", r.URL.Path)\n\t\trepoRoot := fmt.Sprintf(\"https:\/\/github.com\/hkjn\/%s\", parts[1])\n\t\timportPrefix := fmt.Sprintf(\"hkjn.me\/%s\", parts[1])\n\t\tfmt.Fprintf(w, goImportTmpl, godocUrl, importPrefix, repoRoot)\n\t}\n}\n\nvar keybaseVerifyText = `\n==================================================================\nhttps:\/\/keybase.io\/hkjn\n--------------------------------------------------------------------\n\nI hereby claim:\n\n * I am an admin of http:\/\/www.hkjn.me\n * I am hkjn (https:\/\/keybase.io\/hkjn) on keybase.\n * I have a public key with fingerprint D618 7A03 A40A 3D56 62F5 4B46 03EF BF83 9A5F DC15\n\nTo do so, I am signing this object:\n\n{\n \"body\": {\n \"key\": {\n \"fingerprint\": \"d6187a03a40a3d5662f54b4603efbf839a5fdc15\",\n \"host\": \"keybase.io\",\n \"key_id\": \"03efbf839a5fdc15\",\n \"kid\": \"0101c778a84ebd54581e354296aa8171d79a23a3bcce4dbf99f880dfeafc2e0030030a\",\n \"uid\": \"d1a36099363d76e027441cb534d6ba19\",\n \"username\": \"hkjn\"\n },\n \"service\": {\n \"hostname\": \"www.hkjn.me\",\n \"protocol\": \"http:\"\n },\n \"type\": \"web_service_binding\",\n \"version\": 1\n },\n \"ctime\": 1429520487,\n \"expire_in\": 157680000,\n \"prev\": \"4e9b7b9ff1b50e84436eb96100528ef25bf277aca7169d5a4330b2f5bd0b9ba4\",\n \"seqno\": 4,\n \"tag\": \"signature\"\n}\n\nwhich yields the signature:\n\n-----BEGIN PGP MESSAGE-----\nVersion: GnuPG v2\n\nowFtUntQVFUYZ3kYD0lRVDKYhqsFNQT3\/SBxjcjMmFEYGq3M5Zx7z11uq3vX3WUB\nmZWc0olIpxQQMGLMFZKhWSie0kCORUAzIBI6haat2AOzZIAdiqTuJfuvM2fmzPd9\nv9\/vO79zvncjgwJCDUG\/975RY7q6yvD1+Q0w4EX6c7UYg6pUhKUWYxa0eMiK1Yzs\nNrtidWKpmMQSPAdwCtA4oCSGZUmZoSHN4hSSocxTAmBkSSQYLAnLUx06Q5OBwIGS\nFVXLaYFJkbTs\/+At\/xYInBA5jgc8jaDE0AxPIIqhSYEFgCc4QuIEQFKAgqKIaAnK\ngiDzPC7JCMgiiXCc0jfQ5PIX5SQCUCwuCBRLSRyLcJKjaUKEDEVLLASEoAMdyG4F\ne5GGzrO8ZsXcSZiWcSki0u3rLu5XCwoKknVEshYlYTa76lRFdY9OczptqTrPWWRb\nBCJoui9hgopV0l5QI7iQ3aGoViyV0JCiU9E1Cc0YQ+I0zyVhqNCm2JFJ0REMx\/K4\ntvQ2yKVJ0kiAHBRkmYAMjniaplgEBZbAcYbkkUwyUCY5DoiAI1hBYgBNUTjU\/gZK\nOBQgoDHd1D6riqXS2jWBWZN0KGYrcObbEeYOLw2MDQ4whAYsCQnUhyAgPGz5f6Ox\nOnvZ33svHP\/Asy2uePVj\/exDM9G+c5PTt\/9qGsw9NFuSVtCYs3n+07rymV8MXqph\nrqWyom+lGUusjzsyeHnTwQ0nhIUtU7fIE+q9+k2vu8Nmz300FwPvlOc2lu35Nelm\nSKP\/8Yoctb2jutYm+f1\/RJUrJ1u+TauybPnqmdOWY8YuC21q6B15+MaVl41vBceM\nLdT9GZGYHpmRkHCrNNC4YHQ0p7vK\/N3vdwx18S8czBRPRdyMSombuF7iKb2gLLVX\n95EzZs4zFasaW7O8Q0G1w76JgmVhWTl936xtPftAyLX8s08E5pze5socSPmtvrdJ\naQ3N7jhTtXKhjVvDgC5ff13X+azy2HVqrvvizmezxjPXq90D9jPfe8eMlU3ckh1X\n9v98d+DwqynTGVtrhtfecAyCivA7ZSOy7Bx9ZPyu7\/i1pU\/91JcSJb132\/C84Z0s\nzyfHWkp2rZhr+9FW0t4+2rzrVPWStu1uXLme5vJOfsbXfFEc720E61YcGHAk7Gbr\nQHLhKn+bb7Tnw\/pHf+BcO3Y7Nz\/Y3NlZVXZk\/87DQbVF3yG7GTw3wk\/tk0q+vBzs\njqlkE6+exDOmn1Yj52X\/fHvP8PjR2dnEhjWO7ob18mT\/vbcrhy5u7UHLKydemulo\n\/7i3U5rwxl\/Kto85C73p+a9EiBt9TwYf9XjrD0XHtx0wed7M265sjL70Dw==\n=c4ep\n-----END PGP MESSAGE-----\n\nAnd finally, I am proving ownership of this host by posting or\nappending to this document.\n\nView my publicly-auditable identity here: https:\/\/keybase.io\/hkjn\n\n==================================================================`\n\nfunc keybaseHandler(w http.ResponseWriter, r *http.Request) {\n\tl := getLogger(r)\n\tl.Infof(\"keybaseHandler for URI %s\\n\", r.RequestURI)\n\tfmt.Fprintf(w, keybaseVerifyText)\n}\n<commit_msg>Serve web page on \/webindex by disabling redirect<commit_after>\/\/ Package hkjnweb is the personal website hkjn.me.\n\/\/\n\/\/ See https:\/\/github.com\/hkjn\/autosite for the framework that enables\n\/\/ this site.\npackage hkjnweb\n\nimport (\n\t\"fmt\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"hkjn.me\/autosite\"\n)\n\nvar (\n\tbaseTemplates = []string{\n\t\t\"tmpl\/base.tmpl\",\n\t\t\"tmpl\/base_header.tmpl\",\n\t\t\"tmpl\/head.tmpl\",\n\t\t\"tmpl\/style.tmpl\",\n\t\t\"tmpl\/fonts.tmpl\",\n\t\t\"tmpl\/js.tmpl\",\n\t}\n\tIsProd = false\n\tLogger = autosite.Glogger{}\n)\n\n\/\/ getLogger returns the autosite.Logger to use.\nfunc getLogger(r *http.Request) autosite.Logger {\n\treturn Logger\n}\n\nvar (\n\twebDomain = \"www.hkjn.me\"\n\tweb = autosite.New(\n\t\t\"Henrik Jonsson\",\n\t\t\"pages\/*.tmpl\", \/\/ glob for pages\n\t\twebDomain, \/\/ live domain\n\t\tappend(baseTemplates, \"tmpl\/page.tmpl\"),\n\t\tgetLogger,\n\t\tIsProd,\n\t\ttemplate.FuncMap{},\n\t)\n\n\tgoImportTmpl = `<head>\n <meta http-equiv=\"refresh\" content=\"0; URL='%s'\">\n <meta name=\"go-import\" content=\"%s git %s\">\n <\/head>`\n\n\tredirects = map[string]string{\n\t\t\"\/where\": \"http:\/\/computare0.appspot.com\/where\/me@hkjn.me\",\n\t}\n)\n\n\/\/ Register registers the handlers.\nfunc Register() {\n\t\/\/ We remap \/webindex (from pages\/webindex.tmpl, no special-cases in\n\t\/\/ the autosite package) to serve index.\n\n\t\/\/ TODO(henrik): Re-enable this once autosite package doesn't\n\t\/\/ interfere with \/ and \/keybase.txt handlers below.\n\n\t\/\/ web.ChangeURI(\"\/webindex\", \"\/\")\n\tif IsProd {\n\t\tlog.Println(\"We're in prod, remapping some paths\\n\")\n\t\thttp.HandleFunc(\"\/\", nakedIndexHandler)\n\t\thttp.HandleFunc(\"\/keybase.txt\", keybaseHandler)\n\t} else {\n\t\tlog.Println(\"We're not in prod, remapping some paths\\n\")\n\t\thttp.HandleFunc(\"\/nakedindex\", nakedIndexHandler)\n\t}\n\tfor uri, newUri := range redirects {\n\t\tweb.AddRedirect(uri, newUri)\n\t}\n\n\tweb.Register()\n}\n\n\/\/ nakedIndexHandler serves requests to hkjn.me\/\nfunc nakedIndexHandler(w http.ResponseWriter, r *http.Request) {\n\tl := getLogger(r)\n\tl.Infof(\"nakedIndexHandler for URI %s\\n\", r.RequestURI)\n\tif r.URL.Path == \"\/\" {\n\t\turl := fmt.Sprintf(\"https:\/\/%s\", webDomain)\n\t\tl.Debugf(\"visitor to \/ of naked domain, redirecting to %q..\\n\")\n\t\thttp.Redirect(w, r, url, http.StatusSeeOther)\n\t} else {\n\t\t\/\/ Our response tells the `go get` tool where to find\n\t\t\/\/ `hkjn.me\/[package]`.\n\t\tparts := strings.Split(r.URL.Path, \"\/\")\n\t\tgodocUrl := fmt.Sprintf(\"https:\/\/godoc.org\/hkjn.me%s\", r.URL.Path)\n\t\trepoRoot := fmt.Sprintf(\"https:\/\/github.com\/hkjn\/%s\", parts[1])\n\t\timportPrefix := fmt.Sprintf(\"hkjn.me\/%s\", parts[1])\n\t\tfmt.Fprintf(w, goImportTmpl, godocUrl, importPrefix, repoRoot)\n\t}\n}\n\nvar keybaseVerifyText = `\n==================================================================\nhttps:\/\/keybase.io\/hkjn\n--------------------------------------------------------------------\n\nI hereby claim:\n\n * I am an admin of http:\/\/www.hkjn.me\n * I am hkjn (https:\/\/keybase.io\/hkjn) on keybase.\n * I have a public key with fingerprint D618 7A03 A40A 3D56 62F5 4B46 03EF BF83 9A5F DC15\n\nTo do so, I am signing this object:\n\n{\n \"body\": {\n \"key\": {\n \"fingerprint\": \"d6187a03a40a3d5662f54b4603efbf839a5fdc15\",\n \"host\": \"keybase.io\",\n \"key_id\": \"03efbf839a5fdc15\",\n \"kid\": \"0101c778a84ebd54581e354296aa8171d79a23a3bcce4dbf99f880dfeafc2e0030030a\",\n \"uid\": \"d1a36099363d76e027441cb534d6ba19\",\n \"username\": \"hkjn\"\n },\n \"service\": {\n \"hostname\": \"www.hkjn.me\",\n \"protocol\": \"http:\"\n },\n \"type\": \"web_service_binding\",\n \"version\": 1\n },\n \"ctime\": 1429520487,\n \"expire_in\": 157680000,\n \"prev\": \"4e9b7b9ff1b50e84436eb96100528ef25bf277aca7169d5a4330b2f5bd0b9ba4\",\n \"seqno\": 4,\n \"tag\": \"signature\"\n}\n\nwhich yields the signature:\n\n-----BEGIN PGP MESSAGE-----\nVersion: GnuPG v2\n\nowFtUntQVFUYZ3kYD0lRVDKYhqsFNQT3\/SBxjcjMmFEYGq3M5Zx7z11uq3vX3WUB\nmZWc0olIpxQQMGLMFZKhWSie0kCORUAzIBI6haat2AOzZIAdiqTuJfuvM2fmzPd9\nv9\/vO79zvncjgwJCDUG\/975RY7q6yvD1+Q0w4EX6c7UYg6pUhKUWYxa0eMiK1Yzs\nNrtidWKpmMQSPAdwCtA4oCSGZUmZoSHN4hSSocxTAmBkSSQYLAnLUx06Q5OBwIGS\nFVXLaYFJkbTs\/+At\/xYInBA5jgc8jaDE0AxPIIqhSYEFgCc4QuIEQFKAgqKIaAnK\ngiDzPC7JCMgiiXCc0jfQ5PIX5SQCUCwuCBRLSRyLcJKjaUKEDEVLLASEoAMdyG4F\ne5GGzrO8ZsXcSZiWcSki0u3rLu5XCwoKknVEshYlYTa76lRFdY9OczptqTrPWWRb\nBCJoui9hgopV0l5QI7iQ3aGoViyV0JCiU9E1Cc0YQ+I0zyVhqNCm2JFJ0REMx\/K4\ntvQ2yKVJ0kiAHBRkmYAMjniaplgEBZbAcYbkkUwyUCY5DoiAI1hBYgBNUTjU\/gZK\nOBQgoDHd1D6riqXS2jWBWZN0KGYrcObbEeYOLw2MDQ4whAYsCQnUhyAgPGz5f6Ox\nOnvZ33svHP\/Asy2uePVj\/exDM9G+c5PTt\/9qGsw9NFuSVtCYs3n+07rymV8MXqph\nrqWyom+lGUusjzsyeHnTwQ0nhIUtU7fIE+q9+k2vu8Nmz300FwPvlOc2lu35Nelm\nSKP\/8Yoctb2jutYm+f1\/RJUrJ1u+TauybPnqmdOWY8YuC21q6B15+MaVl41vBceM\nLdT9GZGYHpmRkHCrNNC4YHQ0p7vK\/N3vdwx18S8czBRPRdyMSombuF7iKb2gLLVX\n95EzZs4zFasaW7O8Q0G1w76JgmVhWTl936xtPftAyLX8s08E5pze5socSPmtvrdJ\naQ3N7jhTtXKhjVvDgC5ff13X+azy2HVqrvvizmezxjPXq90D9jPfe8eMlU3ckh1X\n9v98d+DwqynTGVtrhtfecAyCivA7ZSOy7Bx9ZPyu7\/i1pU\/91JcSJb132\/C84Z0s\nzyfHWkp2rZhr+9FW0t4+2rzrVPWStu1uXLme5vJOfsbXfFEc720E61YcGHAk7Gbr\nQHLhKn+bb7Tnw\/pHf+BcO3Y7Nz\/Y3NlZVXZk\/87DQbVF3yG7GTw3wk\/tk0q+vBzs\njqlkE6+exDOmn1Yj52X\/fHvP8PjR2dnEhjWO7ob18mT\/vbcrhy5u7UHLKydemulo\n\/7i3U5rwxl\/Kto85C73p+a9EiBt9TwYf9XjrD0XHtx0wed7M265sjL70Dw==\n=c4ep\n-----END PGP MESSAGE-----\n\nAnd finally, I am proving ownership of this host by posting or\nappending to this document.\n\nView my publicly-auditable identity here: https:\/\/keybase.io\/hkjn\n\n==================================================================`\n\nfunc keybaseHandler(w http.ResponseWriter, r *http.Request) {\n\tl := getLogger(r)\n\tl.Infof(\"keybaseHandler for URI %s\\n\", r.RequestURI)\n\tfmt.Fprintf(w, keybaseVerifyText)\n}\n<|endoftext|>"} {"text":"<commit_before>package goose\n\nimport (\n\tresty \"github.com\/go-resty\/resty\/v2\"\n\t\"github.com\/pkg\/errors\"\n)\n\ntype HtmlRequester interface {\n\tfetchHTML(string) (string, error)\n}\n\n\/\/ Crawler can fetch the target HTML page\ntype htmlrequester struct {\n\tconfig Configuration\n}\n\n\/\/ NewCrawler returns a crawler object initialised with the URL and the [optional] raw HTML body\nfunc NewHtmlRequester(config Configuration) HtmlRequester {\n\treturn htmlrequester{\n\t\tconfig: config,\n\t}\n}\n\nfunc (hr htmlrequester) fetchHTML(url string) (string, error) {\n\tclient := resty.New()\n\tclient.SetTimeout(hr.config.timeout)\n\tresp, err := client.R().\n\t\tSetHeader(\"Content-Type\", \"application\/json\").\n\t\tSetHeader(\"User-Agent\", \"Mozilla\/5.0 (Macintosh; Intel Mac OS X 10_6_7) AppleWebKit\/534.30 (KHTML, like Gecko) Chrome\/12.0.742.91 Safari\/534.30\").\n\t\tGet(url)\n\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"could not perform request on \"+url)\n\t}\n\tif resp.IsError() {\n\t\treturn \"\", &badRequest{Message: \"could not perform request with \" + url + \" status code \" + string(resp.StatusCode())}\n\t}\n\treturn resp.String(), nil\n}\n\ntype badRequest struct {\n\tMessage string `json:\"message,omitempty\"`\n}\n\nfunc (BadRequest *badRequest) Error() string {\n\treturn \"Required request fields are not filled\"\n}\n<commit_msg>Why use application\/json to fetch HTML?<commit_after>package goose\n\nimport (\n\tresty \"github.com\/go-resty\/resty\/v2\"\n\t\"github.com\/pkg\/errors\"\n)\n\ntype HtmlRequester interface {\n\tfetchHTML(string) (string, error)\n}\n\n\/\/ Crawler can fetch the target HTML page\ntype htmlrequester struct {\n\tconfig Configuration\n}\n\n\/\/ NewCrawler returns a crawler object initialised with the URL and the [optional] raw HTML body\nfunc NewHtmlRequester(config Configuration) HtmlRequester {\n\treturn htmlrequester{\n\t\tconfig: config,\n\t}\n}\n\nfunc (hr htmlrequester) fetchHTML(url string) (string, error) {\n\tclient := resty.New()\n\tclient.SetTimeout(hr.config.timeout)\n\tresp, err := client.R().\n\t\tSetHeader(\"Content-Type\", \"text\/html\").\n\t\tSetHeader(\"User-Agent\", \"Mozilla\/5.0 (Macintosh; Intel Mac OS X 10_6_7) AppleWebKit\/534.30 (KHTML, like Gecko) Chrome\/12.0.742.91 Safari\/534.30\").\n\t\tGet(url)\n\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"could not perform request on \"+url)\n\t}\n\tif resp.IsError() {\n\t\treturn \"\", &badRequest{Message: \"could not perform request with \" + url + \" status code \" + string(resp.StatusCode())}\n\t}\n\treturn resp.String(), nil\n}\n\ntype badRequest struct {\n\tMessage string `json:\"message,omitempty\"`\n}\n\nfunc (BadRequest *badRequest) Error() string {\n\treturn \"Required request fields are not filled\"\n}\n<|endoftext|>"} {"text":"<commit_before>package yiigo\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n)\n\n\/\/ defaultHTTPTimeout default http request timeout\nconst defaultHTTPTimeout = 10 * time.Second\n\n\/\/ errCookieFileNotFound cookie file not found error\nvar errCookieFileNotFound = errors.New(\"cookie file not found\")\n\n\/\/ httpClientOptions http client options\ntype httpClientOptions struct {\n\tdialTimeout time.Duration\n\tdialKeepAlive time.Duration\n\tfallbackDelay time.Duration\n\tmaxIdleConns int\n\tmaxIdleConnsPerHost int\n\tmaxConnsPerHost int\n\tidleConnTimeout time.Duration\n\ttlsConfig *tls.Config\n\ttlsHandshakeTimeout time.Duration\n\texpectContinueTimeout time.Duration\n}\n\n\/\/ HTTPClientOption configures how we set up the http client\ntype HTTPClientOption interface {\n\tapply(options *httpClientOptions)\n}\n\n\/\/ funcHTTPClientOption implements http client option\ntype funcHTTPClientOption struct {\n\tf func(options *httpClientOptions)\n}\n\nfunc (fo *funcHTTPClientOption) apply(o *httpClientOptions) {\n\tfo.f(o)\n}\n\nfunc newFuncHTTPOption(f func(options *httpClientOptions)) *funcHTTPClientOption {\n\treturn &funcHTTPClientOption{f: f}\n}\n\n\/\/ WithHTTPDialTimeout specifies the `DialTimeout` to net.Dialer.\nfunc WithHTTPDialTimeout(d time.Duration) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.dialTimeout = d\n\t})\n}\n\n\/\/ WithHTTPDialKeepAlive specifies the `KeepAlive` to net.Dialer.\nfunc WithHTTPDialKeepAlive(d time.Duration) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.dialKeepAlive = d\n\t})\n}\n\n\/\/ WithHTTPDialFallbackDelay specifies the `FallbackDelay` to net.Dialer.\nfunc WithHTTPDialFallbackDelay(d time.Duration) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.fallbackDelay = d\n\t})\n}\n\n\/\/ WithHTTPMaxIdleConns specifies the `MaxIdleConns` to http client.\nfunc WithHTTPMaxIdleConns(n int) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.maxIdleConns = n\n\t})\n}\n\n\/\/ WithHTTPMaxIdleConnsPerHost specifies the `MaxIdleConnsPerHost` to http client.\nfunc WithHTTPMaxIdleConnsPerHost(n int) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.maxIdleConnsPerHost = n\n\t})\n}\n\n\/\/ WithHTTPMaxConnsPerHost specifies the `MaxConnsPerHost` to http client.\nfunc WithHTTPMaxConnsPerHost(n int) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.maxConnsPerHost = n\n\t})\n}\n\n\/\/ WithHTTPIdleConnTimeout specifies the `IdleConnTimeout` to http client.\nfunc WithHTTPIdleConnTimeout(d time.Duration) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.idleConnTimeout = d\n\t})\n}\n\n\/\/ WithHTTPTLSConfig specifies the `TLSClientConfig` to http client.\nfunc WithHTTPTLSConfig(c *tls.Config) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.tlsConfig = c\n\t})\n}\n\n\/\/ WithHTTPTLSHandshakeTimeout specifies the `TLSHandshakeTimeout` to http client.\nfunc WithHTTPTLSHandshakeTimeout(d time.Duration) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.tlsHandshakeTimeout = d\n\t})\n}\n\n\/\/ WithHTTPExpectContinueTimeout specifies the `ExpectContinueTimeout` to http client.\nfunc WithHTTPExpectContinueTimeout(d time.Duration) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.expectContinueTimeout = d\n\t})\n}\n\n\/\/ httpRequestOptions http request options\ntype httpRequestOptions struct {\n\theaders map[string]string\n\tcookieFile string\n\twithCookies bool\n\tcookieSave bool\n\tcookieReplace bool\n\tdisableKeepAlive bool\n\ttimeout time.Duration\n}\n\n\/\/ HTTPRequestOption configures how we set up the http request\ntype HTTPRequestOption interface {\n\tapply(*httpRequestOptions) error\n}\n\n\/\/ funcHTTPRequestOption implements request option\ntype funcHTTPRequestOption struct {\n\tf func(*httpRequestOptions) error\n}\n\nfunc (fo *funcHTTPRequestOption) apply(r *httpRequestOptions) error {\n\treturn fo.f(r)\n}\n\nfunc newFuncHTTPRequestOption(f func(*httpRequestOptions) error) *funcHTTPRequestOption {\n\treturn &funcHTTPRequestOption{f: f}\n}\n\n\/\/ WithRequestHeader specifies the headers to http request.\nfunc WithRequestHeader(key, value string) HTTPRequestOption {\n\treturn newFuncHTTPRequestOption(func(o *httpRequestOptions) error {\n\t\to.headers[key] = value\n\n\t\treturn nil\n\t})\n}\n\n\/\/ WithRequestCookieFile specifies the file which to save http response cookies.\nfunc WithRequestCookieFile(file string) HTTPRequestOption {\n\treturn newFuncHTTPRequestOption(func(o *httpRequestOptions) error {\n\t\tpath, err := filepath.Abs(file)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\to.cookieFile = path\n\n\t\treturn mkCookieFile(path)\n\t})\n}\n\n\/\/ WithRequestCookies specifies http requested with cookies.\nfunc WithRequestCookies(b bool) HTTPRequestOption {\n\treturn newFuncHTTPRequestOption(func(o *httpRequestOptions) error {\n\t\to.withCookies = b\n\n\t\treturn nil\n\t})\n}\n\n\/\/ WithRequestCookieSave specifies save the http response cookies.\nfunc WithRequestCookieSave(b bool) HTTPRequestOption {\n\treturn newFuncHTTPRequestOption(func(o *httpRequestOptions) error {\n\t\to.cookieSave = b\n\n\t\treturn nil\n\t})\n}\n\n\/\/ WithRequestCookieReplace specifies replace the old http response cookies.\nfunc WithRequestCookieReplace(b bool) HTTPRequestOption {\n\treturn newFuncHTTPRequestOption(func(o *httpRequestOptions) error {\n\t\to.cookieReplace = b\n\n\t\treturn nil\n\t})\n}\n\n\/\/ WithRequestDisableKeepAlive specifies close the connection after\n\/\/ replying to this request (for servers) or after sending this\n\/\/ request and reading its response (for clients).\nfunc WithRequestDisableKeepAlive(b bool) HTTPRequestOption {\n\treturn newFuncHTTPRequestOption(func(o *httpRequestOptions) error {\n\t\to.disableKeepAlive = b\n\n\t\treturn nil\n\t})\n}\n\n\/\/ WithRequestTimeout specifies the timeout to http request.\nfunc WithRequestTimeout(d time.Duration) HTTPRequestOption {\n\treturn newFuncHTTPRequestOption(func(o *httpRequestOptions) error {\n\t\to.timeout = d\n\n\t\treturn nil\n\t})\n}\n\n\/\/ HTTPClient http client\ntype HTTPClient struct {\n\tclient *http.Client\n}\n\n\/\/ Get http get request\nfunc (h *HTTPClient) Get(url string, options ...HTTPRequestOption) ([]byte, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\to := &httpRequestOptions{\n\t\theaders: make(map[string]string),\n\t\ttimeout: defaultHTTPTimeout,\n\t}\n\n\tif len(options) > 0 {\n\t\tfor _, option := range options {\n\t\t\tif err := option.apply(o); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(o.headers) > 0 {\n\t\tfor k, v := range o.headers {\n\t\t\treq.Header.Set(k, v)\n\t\t}\n\t}\n\n\tif o.withCookies {\n\t\tcookies, err := getCookies(o.cookieFile)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, c := range cookies {\n\t\t\treq.AddCookie(c)\n\t\t}\n\t}\n\n\tif o.disableKeepAlive {\n\t\treq.Close = true\n\t}\n\n\tctx, cancel := context.WithTimeout(context.TODO(), o.timeout)\n\n\tdefer cancel()\n\n\tresp, err := h.client.Do(req.WithContext(ctx))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif o.cookieSave {\n\t\tif err := saveCookie(resp.Cookies(), o.cookieFile, o.cookieReplace); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tio.Copy(ioutil.Discard, resp.Body)\n\n\t\treturn nil, fmt.Errorf(\"error http code: %d\", resp.StatusCode)\n\t}\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn b, nil\n}\n\n\/\/ Post http post request\nfunc (h *HTTPClient) Post(url string, body []byte, options ...HTTPRequestOption) ([]byte, error) {\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewReader(body))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\to := &httpRequestOptions{\n\t\theaders: make(map[string]string),\n\t\ttimeout: defaultHTTPTimeout,\n\t}\n\n\tif len(options) > 0 {\n\t\tfor _, option := range options {\n\t\t\tif err := option.apply(o); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(o.headers) > 0 {\n\t\tfor k, v := range o.headers {\n\t\t\treq.Header.Set(k, v)\n\t\t}\n\t}\n\n\tif o.withCookies {\n\t\tcookies, err := getCookies(o.cookieFile)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, c := range cookies {\n\t\t\treq.AddCookie(c)\n\t\t}\n\t}\n\n\tif o.disableKeepAlive {\n\t\treq.Close = true\n\t}\n\n\tctx, cancel := context.WithTimeout(context.TODO(), o.timeout)\n\n\tdefer cancel()\n\n\tresp, err := h.client.Do(req.WithContext(ctx))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif o.cookieSave {\n\t\tif err := saveCookie(resp.Cookies(), o.cookieFile, o.cookieReplace); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tio.Copy(ioutil.Discard, resp.Body)\n\n\t\treturn nil, fmt.Errorf(\"error http code: %d\", resp.StatusCode)\n\t}\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn b, nil\n}\n\n\/\/ defaultHTTPClient default http client\nvar defaultHTTPClient = &HTTPClient{\n\tclient: &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout: 30 * time.Second,\n\t\t\t\tKeepAlive: 60 * time.Second,\n\t\t\t}).DialContext,\n\t\t\tMaxIdleConns: 0,\n\t\t\tMaxIdleConnsPerHost: 1000,\n\t\t\tMaxConnsPerHost: 1000,\n\t\t\tIdleConnTimeout: 60 * time.Second,\n\t\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\t\tExpectContinueTimeout: 1 * time.Second,\n\t\t},\n\t},\n}\n\n\/\/ NewHTTPClient returns a new http client\nfunc NewHTTPClient(options ...HTTPClientOption) *HTTPClient {\n\to := &httpClientOptions{\n\t\tdialTimeout: 30 * time.Second,\n\t\tdialKeepAlive: 60 * time.Second,\n\t\tmaxIdleConns: 0,\n\t\tmaxIdleConnsPerHost: 1000,\n\t\tmaxConnsPerHost: 1000,\n\t\tidleConnTimeout: 60 * time.Second,\n\t\ttlsHandshakeTimeout: 10 * time.Second,\n\t\texpectContinueTimeout: 1 * time.Second,\n\t}\n\n\tif len(options) > 0 {\n\t\tfor _, option := range options {\n\t\t\toption.apply(o)\n\t\t}\n\t}\n\n\tt := &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout: o.dialTimeout,\n\t\t\tKeepAlive: o.dialKeepAlive,\n\t\t}).DialContext,\n\t\tMaxConnsPerHost: o.maxConnsPerHost,\n\t\tMaxIdleConnsPerHost: o.maxIdleConnsPerHost,\n\t\tMaxIdleConns: o.maxIdleConns,\n\t\tIdleConnTimeout: o.idleConnTimeout,\n\t\tTLSClientConfig: o.tlsConfig,\n\t\tTLSHandshakeTimeout: o.tlsHandshakeTimeout,\n\t\tExpectContinueTimeout: o.expectContinueTimeout,\n\t}\n\n\tc := &HTTPClient{\n\t\tclient: &http.Client{\n\t\t\tTransport: t,\n\t\t},\n\t}\n\n\treturn c\n}\n\n\/\/ HTTPGet http get request\nfunc HTTPGet(url string, options ...HTTPRequestOption) ([]byte, error) {\n\treturn defaultHTTPClient.Get(url, options...)\n}\n\n\/\/ HTTPPost http post request\nfunc HTTPPost(url string, body []byte, options ...HTTPRequestOption) ([]byte, error) {\n\treturn defaultHTTPClient.Post(url, body, options...)\n}\n\n\/\/ mkCookieFile create cookie file\nfunc mkCookieFile(path string) error {\n\tdir := filepath.Dir(path)\n\n\t\/\/ make dir if not exsit\n\tif _, err := os.Stat(dir); err != nil && os.IsNotExist(err) {\n\t\tif err := os.MkdirAll(dir, os.ModePerm); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ create file if not exsit\n\tif _, err := os.Stat(path); err != nil && os.IsNotExist(err) {\n\t\tif _, err := os.Create(path); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ getCookie get http saved cookies\nfunc getCookies(cookieFile string) ([]*http.Cookie, error) {\n\tif cookieFile == \"\" {\n\t\treturn nil, errCookieFileNotFound\n\t}\n\n\tcookieM := make(map[string]*http.Cookie)\n\tcontent, err := ioutil.ReadFile(cookieFile)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := json.Unmarshal(content, &cookieM); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcookies := make([]*http.Cookie, 0, len(cookieM))\n\n\tfor _, v := range cookieM {\n\t\tcookies = append(cookies, v)\n\t}\n\n\treturn cookies, nil\n}\n\n\/\/ saveCookie save http cookies\nfunc saveCookie(cookies []*http.Cookie, cookieFile string, replace bool) error {\n\tif len(cookies) == 0 {\n\t\treturn nil\n\t}\n\n\tif cookieFile == \"\" {\n\t\treturn errCookieFileNotFound\n\t}\n\n\tcookieM := make(map[string]*http.Cookie)\n\n\tif !replace {\n\t\tcontent, err := ioutil.ReadFile(cookieFile)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(content) > 0 {\n\t\t\tif err := json.Unmarshal(content, &cookieM); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, c := range cookies {\n\t\tcookieM[c.Name] = c\n\t}\n\n\tb, err := json.Marshal(cookieM)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ioutil.WriteFile(cookieFile, b, os.ModePerm)\n}\n<commit_msg>update http<commit_after>package yiigo\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"time\"\n)\n\n\/\/ defaultHTTPTimeout default http request timeout\nconst defaultHTTPTimeout = 10 * time.Second\n\n\/\/ errCookieFileNotFound cookie file not found error\nvar errCookieFileNotFound = errors.New(\"cookie file not found\")\n\n\/\/ httpClientOptions http client options\ntype httpClientOptions struct {\n\tdialTimeout time.Duration\n\tdialKeepAlive time.Duration\n\tfallbackDelay time.Duration\n\tmaxIdleConns int\n\tmaxIdleConnsPerHost int\n\tmaxConnsPerHost int\n\tidleConnTimeout time.Duration\n\ttlsConfig *tls.Config\n\ttlsHandshakeTimeout time.Duration\n\texpectContinueTimeout time.Duration\n}\n\n\/\/ HTTPClientOption configures how we set up the http client\ntype HTTPClientOption interface {\n\tapply(options *httpClientOptions)\n}\n\n\/\/ funcHTTPClientOption implements http client option\ntype funcHTTPClientOption struct {\n\tf func(options *httpClientOptions)\n}\n\nfunc (fo *funcHTTPClientOption) apply(o *httpClientOptions) {\n\tfo.f(o)\n}\n\nfunc newFuncHTTPOption(f func(options *httpClientOptions)) *funcHTTPClientOption {\n\treturn &funcHTTPClientOption{f: f}\n}\n\n\/\/ WithHTTPDialTimeout specifies the `DialTimeout` to net.Dialer.\nfunc WithHTTPDialTimeout(d time.Duration) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.dialTimeout = d\n\t})\n}\n\n\/\/ WithHTTPDialKeepAlive specifies the `KeepAlive` to net.Dialer.\nfunc WithHTTPDialKeepAlive(d time.Duration) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.dialKeepAlive = d\n\t})\n}\n\n\/\/ WithHTTPDialFallbackDelay specifies the `FallbackDelay` to net.Dialer.\nfunc WithHTTPDialFallbackDelay(d time.Duration) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.fallbackDelay = d\n\t})\n}\n\n\/\/ WithHTTPMaxIdleConns specifies the `MaxIdleConns` to http client.\nfunc WithHTTPMaxIdleConns(n int) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.maxIdleConns = n\n\t})\n}\n\n\/\/ WithHTTPMaxIdleConnsPerHost specifies the `MaxIdleConnsPerHost` to http client.\nfunc WithHTTPMaxIdleConnsPerHost(n int) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.maxIdleConnsPerHost = n\n\t})\n}\n\n\/\/ WithHTTPMaxConnsPerHost specifies the `MaxConnsPerHost` to http client.\nfunc WithHTTPMaxConnsPerHost(n int) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.maxConnsPerHost = n\n\t})\n}\n\n\/\/ WithHTTPIdleConnTimeout specifies the `IdleConnTimeout` to http client.\nfunc WithHTTPIdleConnTimeout(d time.Duration) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.idleConnTimeout = d\n\t})\n}\n\n\/\/ WithHTTPTLSConfig specifies the `TLSClientConfig` to http client.\nfunc WithHTTPTLSConfig(c *tls.Config) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.tlsConfig = c\n\t})\n}\n\n\/\/ WithHTTPTLSHandshakeTimeout specifies the `TLSHandshakeTimeout` to http client.\nfunc WithHTTPTLSHandshakeTimeout(d time.Duration) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.tlsHandshakeTimeout = d\n\t})\n}\n\n\/\/ WithHTTPExpectContinueTimeout specifies the `ExpectContinueTimeout` to http client.\nfunc WithHTTPExpectContinueTimeout(d time.Duration) HTTPClientOption {\n\treturn newFuncHTTPOption(func(o *httpClientOptions) {\n\t\to.expectContinueTimeout = d\n\t})\n}\n\n\/\/ httpRequestOptions http request options\ntype httpRequestOptions struct {\n\theaders map[string]string\n\tclose bool\n\ttimeout time.Duration\n}\n\n\/\/ HTTPRequestOption configures how we set up the http request\ntype HTTPRequestOption interface {\n\tapply(*httpRequestOptions)\n}\n\n\/\/ funcHTTPRequestOption implements request option\ntype funcHTTPRequestOption struct {\n\tf func(*httpRequestOptions)\n}\n\nfunc (fo *funcHTTPRequestOption) apply(r *httpRequestOptions) {\n\tfo.f(r)\n}\n\nfunc newFuncHTTPRequestOption(f func(*httpRequestOptions)) *funcHTTPRequestOption {\n\treturn &funcHTTPRequestOption{f: f}\n}\n\n\/\/ WithRequestHeader specifies the headers to http request.\nfunc WithRequestHeader(key, value string) HTTPRequestOption {\n\treturn newFuncHTTPRequestOption(func(o *httpRequestOptions) {\n\t\to.headers[key] = value\n\t})\n}\n\n\/\/ WithRequestClose specifies close the connection after\n\/\/ replying to this request (for servers) or after sending this\n\/\/ request and reading its response (for clients).\nfunc WithRequestClose(b bool) HTTPRequestOption {\n\treturn newFuncHTTPRequestOption(func(o *httpRequestOptions) {\n\t\to.close = b\n\t})\n}\n\n\/\/ WithRequestTimeout specifies the timeout to http request.\nfunc WithRequestTimeout(d time.Duration) HTTPRequestOption {\n\treturn newFuncHTTPRequestOption(func(o *httpRequestOptions) {\n\t\to.timeout = d\n\t})\n}\n\n\/\/ HTTPClient http client\ntype HTTPClient struct {\n\tclient *http.Client\n}\n\n\/\/ Get http get request\nfunc (h *HTTPClient) Get(url string, options ...HTTPRequestOption) ([]byte, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\to := &httpRequestOptions{\n\t\theaders: make(map[string]string),\n\t\ttimeout: defaultHTTPTimeout,\n\t}\n\n\tif len(options) > 0 {\n\t\tfor _, option := range options {\n\t\t\toption.apply(o)\n\t\t}\n\t}\n\n\tif len(o.headers) > 0 {\n\t\tfor k, v := range o.headers {\n\t\t\treq.Header.Set(k, v)\n\t\t}\n\t}\n\n\tif o.close {\n\t\treq.Close = true\n\t}\n\n\tctx, cancel := context.WithTimeout(context.TODO(), o.timeout)\n\n\tdefer cancel()\n\n\tresp, err := h.client.Do(req.WithContext(ctx))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tio.Copy(ioutil.Discard, resp.Body)\n\n\t\treturn nil, fmt.Errorf(\"error http code: %d\", resp.StatusCode)\n\t}\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn b, nil\n}\n\n\/\/ Post http post request\nfunc (h *HTTPClient) Post(url string, body io.Reader, options ...HTTPRequestOption) ([]byte, error) {\n\treq, err := http.NewRequest(\"POST\", url, body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\to := &httpRequestOptions{\n\t\theaders: make(map[string]string),\n\t\ttimeout: defaultHTTPTimeout,\n\t}\n\n\tif len(options) > 0 {\n\t\tfor _, option := range options {\n\t\t\toption.apply(o)\n\t\t}\n\t}\n\n\tif len(o.headers) > 0 {\n\t\tfor k, v := range o.headers {\n\t\t\treq.Header.Set(k, v)\n\t\t}\n\t}\n\n\tif o.close {\n\t\treq.Close = true\n\t}\n\n\tctx, cancel := context.WithTimeout(context.TODO(), o.timeout)\n\n\tdefer cancel()\n\n\tresp, err := h.client.Do(req.WithContext(ctx))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tio.Copy(ioutil.Discard, resp.Body)\n\n\t\treturn nil, fmt.Errorf(\"error http code: %d\", resp.StatusCode)\n\t}\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn b, nil\n}\n\n\/\/ defaultHTTPClient default http client\nvar defaultHTTPClient = &HTTPClient{\n\tclient: &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout: 30 * time.Second,\n\t\t\t\tKeepAlive: 60 * time.Second,\n\t\t\t}).DialContext,\n\t\t\tMaxIdleConns: 0,\n\t\t\tMaxIdleConnsPerHost: 1000,\n\t\t\tMaxConnsPerHost: 1000,\n\t\t\tIdleConnTimeout: 60 * time.Second,\n\t\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\t\tExpectContinueTimeout: 1 * time.Second,\n\t\t},\n\t},\n}\n\n\/\/ NewHTTPClient returns a new http client\nfunc NewHTTPClient(options ...HTTPClientOption) *HTTPClient {\n\to := &httpClientOptions{\n\t\tdialTimeout: 30 * time.Second,\n\t\tdialKeepAlive: 60 * time.Second,\n\t\tmaxIdleConns: 0,\n\t\tmaxIdleConnsPerHost: 1000,\n\t\tmaxConnsPerHost: 1000,\n\t\tidleConnTimeout: 60 * time.Second,\n\t\ttlsHandshakeTimeout: 10 * time.Second,\n\t\texpectContinueTimeout: 1 * time.Second,\n\t}\n\n\tif len(options) > 0 {\n\t\tfor _, option := range options {\n\t\t\toption.apply(o)\n\t\t}\n\t}\n\n\tt := &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout: o.dialTimeout,\n\t\t\tKeepAlive: o.dialKeepAlive,\n\t\t}).DialContext,\n\t\tMaxConnsPerHost: o.maxConnsPerHost,\n\t\tMaxIdleConnsPerHost: o.maxIdleConnsPerHost,\n\t\tMaxIdleConns: o.maxIdleConns,\n\t\tIdleConnTimeout: o.idleConnTimeout,\n\t\tTLSClientConfig: o.tlsConfig,\n\t\tTLSHandshakeTimeout: o.tlsHandshakeTimeout,\n\t\tExpectContinueTimeout: o.expectContinueTimeout,\n\t}\n\n\tc := &HTTPClient{\n\t\tclient: &http.Client{\n\t\t\tTransport: t,\n\t\t},\n\t}\n\n\treturn c\n}\n\n\/\/ HTTPGet http get request\nfunc HTTPGet(url string, options ...HTTPRequestOption) ([]byte, error) {\n\treturn defaultHTTPClient.Get(url, options...)\n}\n\n\/\/ HTTPPost http post request\nfunc HTTPPost(url string, body io.Reader, options ...HTTPRequestOption) ([]byte, error) {\n\treturn defaultHTTPClient.Post(url, body, options...)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\n\nimport (\n \"log\"\n \"strconv\"\n \"net\/http\"\n \"huabot-sched\/db\"\n \"github.com\/go-martini\/martini\"\n \"github.com\/martini-contrib\/render\"\n \"github.com\/martini-contrib\/binding\"\n)\n\n\nconst API = \"\"\n\n\nfunc StartHttpServer(addr string, sched *Sched) {\n mart := martini.Classic()\n mart.Use(render.Renderer(render.Options{\n Directory: \"templates\",\n Layout: \"layout\",\n Extensions: []string{\".tmpl\", \".html\"},\n Charset: \"UTF-8\",\n IndentJSON: true,\n IndentXML: true,\n HTMLContentType: \"application\/xhtml+xml\",\n }))\n\n api(mart, sched)\n\n mart.RunOnAddr(addr)\n}\n\n\ntype JobForm struct {\n Name string `form:\"name\" binding:\"required\"`\n Timeout int `form:\"timeout\"`\n SchedAt int `form:\"sched_at\" binding:\"required\"`\n}\n\n\nfunc api(mart *martini.ClassicMartini, sched *Sched) {\n\n mart.Post(API + \"\/jobs\/\", binding.Bind(JobForm{}), func(j JobForm, r render.Render) {\n job := db.Job{\n Name: j.Name,\n Timeout: j.Timeout,\n SchedAt: j.SchedAt,\n Status: \"ready\",\n }\n jobId, _ := db.GetIndex(\"job:name\", job.Name)\n if jobId > 0 {\n job.Id = jobId\n }\n err := job.Save()\n if err != nil {\n r.JSON(http.StatusInternalServerError, map[string]interface{}{\"err\": err.Error()})\n return\n }\n sched.Notify()\n r.JSON(http.StatusOK, map[string]db.Job{\"job\": job})\n })\n\n\n mart.Get(API + \"\/jobs\/(?P<job_id>[0-9]+)\", func(params martini.Params, r render.Render) {\n jobId, _ := strconv.Atoi(params[\"job_id\"])\n job, err := db.GetJob(jobId)\n if err != nil {\n r.JSON(http.StatusNotFound, map[string]interface{}{\"err\": err.Error()})\n return\n }\n r.JSON(http.StatusOK, map[string]db.Job{\"job\": job})\n })\n\n\n mart.Get(API + \"\/jobs\/(?P<status>ready|doing)\/\", func(params martini.Params, req *http.Request, r render.Render) {\n status := params[\"status\"]\n qs := req.URL.Query()\n var start, limit, stop int\n var err error\n if start, err = strconv.Atoi(qs.Get(\"start\")); err != nil {\n start = 0\n }\n if limit, err = strconv.Atoi(qs.Get(\"limit\")); err != nil {\n limit = 10\n }\n stop = start + limit\n var jobs []db.Job\n var count int\n jobs, err = db.RangeSchedJob(status, start, stop)\n count, _ = db.CountSchedJob(status)\n if err != nil {\n log.Printf(\"Error: RangeSchedJob error %s\\n\", err)\n r.JSON(http.StatusOK, map[string]interface{}{\"jobs\": \"[]\", \"total\": count, \"current\": start})\n return\n }\n r.JSON(http.StatusOK, map[string]interface{}{\"jobs\": jobs, \"total\": count, \"current\": start})\n })\n\n\n mart.Get(API + \"\/jobs\/\", func(req *http.Request, r render.Render) {\n qs := req.URL.Query()\n var start, limit, stop int\n var err error\n if start, err = strconv.Atoi(qs.Get(\"start\")); err != nil {\n start = 0\n }\n if limit, err = strconv.Atoi(qs.Get(\"limit\")); err != nil {\n limit = 10\n }\n stop = start + limit\n var jobs []db.Job\n var count int\n jobs, err = db.RangeJob(start, stop)\n count, _ = db.CountJob()\n if err != nil {\n log.Printf(\"Error: RangeJob error %s\\n\", err)\n r.JSON(http.StatusOK, map[string]interface{}{\"jobs\": \"[]\", \"total\": count, \"current\": start})\n return\n }\n r.JSON(http.StatusOK, map[string]interface{}{\"jobs\": jobs, \"total\": count, \"current\": start})\n })\n\n\n mart.Delete(API + \"\/jobs\/(?P<job_id>[0-9a-zA-Z]+)\",\n func(params martini.Params, req *http.Request, r render.Render) {\n qs := req.URL.Query()\n id := params[\"job_id\"]\n var jobId int\n if qs.Get(\"id_type\") == \"name\" {\n jobId, _ = db.GetIndex(\"job:name\", id)\n } else {\n jobId, _ = strconv.Atoi(id)\n }\n err := db.DelJob(jobId)\n if err != nil {\n log.Printf(\"Error: DelJob error %s\\n\", err)\n r.JSON(http.StatusOK, map[string]interface{}{\"err\": err.Error()})\n return\n }\n r.JSON(http.StatusOK, map[string]interface{}{})\n })\n\n\n mart.Post(API + \"\/notify\", func(r render.Render) {\n sched.Notify()\n r.JSON(http.StatusOK, map[string]interface{}{})\n })\n}\n<commit_msg>Add id_type for get api<commit_after>package main\n\n\nimport (\n \"log\"\n \"strconv\"\n \"net\/http\"\n \"huabot-sched\/db\"\n \"github.com\/go-martini\/martini\"\n \"github.com\/martini-contrib\/render\"\n \"github.com\/martini-contrib\/binding\"\n)\n\n\nconst API = \"\"\n\n\nfunc StartHttpServer(addr string, sched *Sched) {\n mart := martini.Classic()\n mart.Use(render.Renderer(render.Options{\n Directory: \"templates\",\n Layout: \"layout\",\n Extensions: []string{\".tmpl\", \".html\"},\n Charset: \"UTF-8\",\n IndentJSON: true,\n IndentXML: true,\n HTMLContentType: \"application\/xhtml+xml\",\n }))\n\n api(mart, sched)\n\n mart.RunOnAddr(addr)\n}\n\n\ntype JobForm struct {\n Name string `form:\"name\" binding:\"required\"`\n Timeout int `form:\"timeout\"`\n SchedAt int `form:\"sched_at\" binding:\"required\"`\n}\n\n\nfunc api(mart *martini.ClassicMartini, sched *Sched) {\n\n mart.Post(API + \"\/jobs\/\", binding.Bind(JobForm{}), func(j JobForm, r render.Render) {\n job := db.Job{\n Name: j.Name,\n Timeout: j.Timeout,\n SchedAt: j.SchedAt,\n Status: \"ready\",\n }\n jobId, _ := db.GetIndex(\"job:name\", job.Name)\n if jobId > 0 {\n job.Id = jobId\n }\n err := job.Save()\n if err != nil {\n r.JSON(http.StatusInternalServerError, map[string]interface{}{\"err\": err.Error()})\n return\n }\n sched.Notify()\n r.JSON(http.StatusOK, map[string]db.Job{\"job\": job})\n })\n\n\n mart.Get(API + \"\/jobs\/(?P<job_id>[0-9a-zA-Z]+)\",\n func(params martini.Params, req *http.Request, r render.Render) {\n qs := req.URL.Query()\n id := params[\"job_id\"]\n var jobId int\n if qs.Get(\"id_type\") == \"name\" {\n jobId, _ = db.GetIndex(\"job:name\", id)\n } else {\n jobId, _ = strconv.Atoi(id)\n }\n job, err := db.GetJob(jobId)\n if err != nil {\n r.JSON(http.StatusNotFound, map[string]interface{}{\"err\": err.Error()})\n return\n }\n r.JSON(http.StatusOK, map[string]db.Job{\"job\": job})\n })\n\n\n mart.Get(API + \"\/jobs\/(?P<status>ready|doing)\/\", func(params martini.Params, req *http.Request, r render.Render) {\n status := params[\"status\"]\n qs := req.URL.Query()\n var start, limit, stop int\n var err error\n if start, err = strconv.Atoi(qs.Get(\"start\")); err != nil {\n start = 0\n }\n if limit, err = strconv.Atoi(qs.Get(\"limit\")); err != nil {\n limit = 10\n }\n stop = start + limit\n var jobs []db.Job\n var count int\n jobs, err = db.RangeSchedJob(status, start, stop)\n count, _ = db.CountSchedJob(status)\n if err != nil {\n log.Printf(\"Error: RangeSchedJob error %s\\n\", err)\n r.JSON(http.StatusOK, map[string]interface{}{\"jobs\": \"[]\", \"total\": count, \"current\": start})\n return\n }\n r.JSON(http.StatusOK, map[string]interface{}{\"jobs\": jobs, \"total\": count, \"current\": start})\n })\n\n\n mart.Get(API + \"\/jobs\/\", func(req *http.Request, r render.Render) {\n qs := req.URL.Query()\n var start, limit, stop int\n var err error\n if start, err = strconv.Atoi(qs.Get(\"start\")); err != nil {\n start = 0\n }\n if limit, err = strconv.Atoi(qs.Get(\"limit\")); err != nil {\n limit = 10\n }\n stop = start + limit\n var jobs []db.Job\n var count int\n jobs, err = db.RangeJob(start, stop)\n count, _ = db.CountJob()\n if err != nil {\n log.Printf(\"Error: RangeJob error %s\\n\", err)\n r.JSON(http.StatusOK, map[string]interface{}{\"jobs\": \"[]\", \"total\": count, \"current\": start})\n return\n }\n r.JSON(http.StatusOK, map[string]interface{}{\"jobs\": jobs, \"total\": count, \"current\": start})\n })\n\n\n mart.Delete(API + \"\/jobs\/(?P<job_id>[0-9a-zA-Z]+)\",\n func(params martini.Params, req *http.Request, r render.Render) {\n qs := req.URL.Query()\n id := params[\"job_id\"]\n var jobId int\n if qs.Get(\"id_type\") == \"name\" {\n jobId, _ = db.GetIndex(\"job:name\", id)\n } else {\n jobId, _ = strconv.Atoi(id)\n }\n err := db.DelJob(jobId)\n if err != nil {\n log.Printf(\"Error: DelJob error %s\\n\", err)\n r.JSON(http.StatusOK, map[string]interface{}{\"err\": err.Error()})\n return\n }\n r.JSON(http.StatusOK, map[string]interface{}{})\n })\n\n\n mart.Post(API + \"\/notify\", func(r render.Render) {\n sched.Notify()\n r.JSON(http.StatusOK, map[string]interface{}{})\n })\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/drone\/routes\"\n\t\"github.com\/stretchr\/graceful\"\n)\n\nvar httpInstance *Http = nil\n\ntype Http struct {\n\tbitTorrent *BitTorrent\n\tserver *graceful.Server\n}\n\nfunc NewHttp(bitTorrent *BitTorrent) *Http {\n\tmime.AddExtensionType(\".avi\", \"video\/avi\")\n\tmime.AddExtensionType(\".mkv\", \"video\/x-matroska\")\n\tmime.AddExtensionType(\".mp4\", \"video\/mp4\")\n\n\tmux := routes.New()\n\tmux.Get(\"\/\", index)\n\tmux.Get(\"\/video\", video)\n\tmux.Get(\"\/shutdown\", shutdown)\n\n\treturn &Http{\n\t\tbitTorrent: bitTorrent,\n\t\tserver: &graceful.Server{\n\t\t\tTimeout: 500 * time.Millisecond,\n\t\t\tServer: &http.Server{\n\t\t\t\tAddr: fmt.Sprintf(\"%v:%v\", \"0.0.0.0\", settings.httpPort),\n\t\t\t\tHandler: mux,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc (h *Http) Start() {\n\t\/\/ Parent process monitoring\n\tif settings.parentPID != 1 {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tparentAlive := true\n\n\t\t\t\tp, err := os.FindProcess(settings.parentPID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tparentAlive = false\n\t\t\t\t} else {\n\t\t\t\t\terr := p.Signal(syscall.Signal(0))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tparentAlive = false\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !parentAlive {\n\t\t\t\t\tlog.Print(\"Parent process is dead, exiting\")\n\t\t\t\t\thttpInstance.server.Stop(500 * time.Millisecond)\n\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t}()\n\t}\n\n\thttpInstance = h\n\th.server.ListenAndServe()\n}\n\nfunc (h *Http) Stop() {\n}\n\nfunc index(w http.ResponseWriter, r *http.Request) {\n\troutes.ServeJson(w, httpInstance.bitTorrent.GetTorrentInfos())\n}\n\nfunc video(w http.ResponseWriter, r *http.Request) {\n\tmagnetLink := getQueryParam(r, \"magnet_link\", \"\")\n\tdownloadDir := getQueryParam(r, \"download_dir\", \".\")\n\tpreview := getQueryParam(r, \"preview\", \"0\")\n\n\tif magnetLink != \"\" {\n\t\tif regExpMatch := regexp.MustCompile(`xt=urn:btih:([a-zA-Z0-9]+)`).FindStringSubmatch(magnetLink); len(regExpMatch) == 2 {\n\t\t\tinfoHash := regExpMatch[1]\n\n\t\t\thttpInstance.bitTorrent.AddTorrent(magnetLink, downloadDir)\n\n\t\t\tif torrentInfo := httpInstance.bitTorrent.GetTorrentInfo(infoHash); torrentInfo != nil {\n\t\t\t\thttpInstance.bitTorrent.AddConnection(infoHash)\n\t\t\t\tdefer httpInstance.bitTorrent.RemoveConnection(infoHash)\n\n\t\t\t\tif torrentFileInfo := torrentInfo.GetBiggestTorrentFileInfo(); torrentFileInfo != nil && torrentFileInfo.CompletePieces > 0 {\n\t\t\t\t\tif preview == \"0\" {\n\t\t\t\t\t\tif torrentFileInfo.Open(torrentInfo.DownloadDir) {\n\t\t\t\t\t\t\tdefer torrentFileInfo.Close()\n\t\t\t\t\t\t\thttp.ServeContent(w, r, torrentFileInfo.Path, time.Time{}, torrentFileInfo)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\thttp.Error(w, \"Failed to open file\", http.StatusInternalServerError)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tvideoReady(w, true)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ Video not ready yet\n\t\t\t\t\tif preview == \"0\" {\n\t\t\t\t\t\tredirect(w, r)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tvideoReady(w, false)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t\/\/ Torrent not ready yet\n\t\t\t\tif preview == \"0\" {\n\t\t\t\t\tredirect(w, r)\n\t\t\t\t} else {\n\t\t\t\t\tvideoReady(w, false)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\thttp.Error(w, \"Invalid Magnet link\", http.StatusBadRequest)\n\t\t}\n\t} else {\n\t\thttp.Error(w, \"Missing Magnet link\", http.StatusBadRequest)\n\t}\n}\n\nfunc shutdown(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\thttpInstance.server.Stop(500 * time.Millisecond)\n}\n\nfunc getQueryParam(r *http.Request, paramName string, defaultValue string) (result string) {\n\tresult = r.URL.Query().Get(paramName)\n\tif result == \"\" {\n\t\tresult = defaultValue\n\t}\n\treturn result\n}\n\nfunc redirect(w http.ResponseWriter, r *http.Request) {\n\ttime.Sleep(2 * time.Second)\n\thttp.Redirect(w, r, r.URL.String(), http.StatusTemporaryRedirect)\n}\n\nfunc videoReady(w http.ResponseWriter, videoReady bool) {\n\troutes.ServeJson(w, map[string]interface{}{\"video_ready\": videoReady})\n}\n<commit_msg>Fix parent process monitoring on Windows<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/drone\/routes\"\n\t\"github.com\/stretchr\/graceful\"\n)\n\nvar httpInstance *Http = nil\n\ntype Http struct {\n\tbitTorrent *BitTorrent\n\tserver *graceful.Server\n}\n\nfunc NewHttp(bitTorrent *BitTorrent) *Http {\n\tmime.AddExtensionType(\".avi\", \"video\/avi\")\n\tmime.AddExtensionType(\".mkv\", \"video\/x-matroska\")\n\tmime.AddExtensionType(\".mp4\", \"video\/mp4\")\n\n\tmux := routes.New()\n\tmux.Get(\"\/\", index)\n\tmux.Get(\"\/video\", video)\n\tmux.Get(\"\/shutdown\", shutdown)\n\n\treturn &Http{\n\t\tbitTorrent: bitTorrent,\n\t\tserver: &graceful.Server{\n\t\t\tTimeout: 500 * time.Millisecond,\n\t\t\tServer: &http.Server{\n\t\t\t\tAddr: fmt.Sprintf(\"%v:%v\", \"0.0.0.0\", settings.httpPort),\n\t\t\t\tHandler: mux,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc (h *Http) Start() {\n\t\/\/ Parent process monitoring\n\tif settings.parentPID != 1 {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tparentAlive := true\n\n\t\t\t\tp, err := os.FindProcess(settings.parentPID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tparentAlive = false\n\t\t\t\t} else if runtime.GOOS != \"windows\" {\n\t\t\t\t\terr := p.Signal(syscall.Signal(0))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tparentAlive = false\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !parentAlive {\n\t\t\t\t\tlog.Print(\"Parent process is dead, exiting\")\n\t\t\t\t\thttpInstance.server.Stop(500 * time.Millisecond)\n\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t}()\n\t}\n\n\thttpInstance = h\n\th.server.ListenAndServe()\n}\n\nfunc (h *Http) Stop() {\n}\n\nfunc index(w http.ResponseWriter, r *http.Request) {\n\troutes.ServeJson(w, httpInstance.bitTorrent.GetTorrentInfos())\n}\n\nfunc video(w http.ResponseWriter, r *http.Request) {\n\tmagnetLink := getQueryParam(r, \"magnet_link\", \"\")\n\tdownloadDir := getQueryParam(r, \"download_dir\", \".\")\n\tpreview := getQueryParam(r, \"preview\", \"0\")\n\n\tif magnetLink != \"\" {\n\t\tif regExpMatch := regexp.MustCompile(`xt=urn:btih:([a-zA-Z0-9]+)`).FindStringSubmatch(magnetLink); len(regExpMatch) == 2 {\n\t\t\tinfoHash := regExpMatch[1]\n\n\t\t\thttpInstance.bitTorrent.AddTorrent(magnetLink, downloadDir)\n\n\t\t\tif torrentInfo := httpInstance.bitTorrent.GetTorrentInfo(infoHash); torrentInfo != nil {\n\t\t\t\thttpInstance.bitTorrent.AddConnection(infoHash)\n\t\t\t\tdefer httpInstance.bitTorrent.RemoveConnection(infoHash)\n\n\t\t\t\tif torrentFileInfo := torrentInfo.GetBiggestTorrentFileInfo(); torrentFileInfo != nil && torrentFileInfo.CompletePieces > 0 {\n\t\t\t\t\tif preview == \"0\" {\n\t\t\t\t\t\tif torrentFileInfo.Open(torrentInfo.DownloadDir) {\n\t\t\t\t\t\t\tdefer torrentFileInfo.Close()\n\t\t\t\t\t\t\thttp.ServeContent(w, r, torrentFileInfo.Path, time.Time{}, torrentFileInfo)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\thttp.Error(w, \"Failed to open file\", http.StatusInternalServerError)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tvideoReady(w, true)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ Video not ready yet\n\t\t\t\t\tif preview == \"0\" {\n\t\t\t\t\t\tredirect(w, r)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tvideoReady(w, false)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t\/\/ Torrent not ready yet\n\t\t\t\tif preview == \"0\" {\n\t\t\t\t\tredirect(w, r)\n\t\t\t\t} else {\n\t\t\t\t\tvideoReady(w, false)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\thttp.Error(w, \"Invalid Magnet link\", http.StatusBadRequest)\n\t\t}\n\t} else {\n\t\thttp.Error(w, \"Missing Magnet link\", http.StatusBadRequest)\n\t}\n}\n\nfunc shutdown(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\thttpInstance.server.Stop(500 * time.Millisecond)\n}\n\nfunc getQueryParam(r *http.Request, paramName string, defaultValue string) (result string) {\n\tresult = r.URL.Query().Get(paramName)\n\tif result == \"\" {\n\t\tresult = defaultValue\n\t}\n\treturn result\n}\n\nfunc redirect(w http.ResponseWriter, r *http.Request) {\n\ttime.Sleep(2 * time.Second)\n\thttp.Redirect(w, r, r.URL.String(), http.StatusTemporaryRedirect)\n}\n\nfunc videoReady(w http.ResponseWriter, videoReady bool) {\n\troutes.ServeJson(w, map[string]interface{}{\"video_ready\": videoReady})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\npackage libcontainer\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\/debug\"\n\t\"strconv\"\n\n\t\"github.com\/opencontainers\/runc\/libcontainer\/cgroups\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/cgroups\/fs\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/cgroups\/systemd\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/configs\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/configs\/validate\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/intelrdt\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/mount\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/utils\"\n\n\t\"golang.org\/x\/sys\/unix\"\n)\n\nconst (\n\tstateFilename = \"state.json\"\n\texecFifoFilename = \"exec.fifo\"\n)\n\nvar idRegex = regexp.MustCompile(`^[\\w+-\\.]+$`)\n\n\/\/ InitArgs returns an options func to configure a LinuxFactory with the\n\/\/ provided init binary path and arguments.\nfunc InitArgs(args ...string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) (err error) {\n\t\tif len(args) > 0 {\n\t\t\t\/\/ Resolve relative paths to ensure that its available\n\t\t\t\/\/ after directory changes.\n\t\t\tif args[0], err = filepath.Abs(args[0]); err != nil {\n\t\t\t\treturn newGenericError(err, ConfigInvalid)\n\t\t\t}\n\t\t}\n\n\t\tl.InitArgs = args\n\t\treturn nil\n\t}\n}\n\n\/\/ SystemdCgroups is an options func to configure a LinuxFactory to return\n\/\/ containers that use systemd to create and manage cgroups.\nfunc SystemdCgroups(l *LinuxFactory) error {\n\tl.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {\n\t\treturn &systemd.Manager{\n\t\t\tCgroups: config,\n\t\t\tPaths: paths,\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Cgroupfs is an options func to configure a LinuxFactory to return containers\n\/\/ that use the native cgroups filesystem implementation to create and manage\n\/\/ cgroups.\nfunc Cgroupfs(l *LinuxFactory) error {\n\tl.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {\n\t\treturn &fs.Manager{\n\t\t\tCgroups: config,\n\t\t\tPaths: paths,\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ RootlessCgroupfs is an options func to configure a LinuxFactory to return\n\/\/ containers that use the native cgroups filesystem implementation to create\n\/\/ and manage cgroups. The difference between RootlessCgroupfs and Cgroupfs is\n\/\/ that RootlessCgroupfs can transparently handle permission errors that occur\n\/\/ during rootless container setup (while still allowing cgroup usage if\n\/\/ they've been set up properly).\nfunc RootlessCgroupfs(l *LinuxFactory) error {\n\tl.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {\n\t\treturn &fs.Manager{\n\t\t\tCgroups: config,\n\t\t\tRootless: true,\n\t\t\tPaths: paths,\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ IntelRdtfs is an options func to configure a LinuxFactory to return\n\/\/ containers that use the Intel RDT \"resource control\" filesystem to\n\/\/ create and manage Intel Xeon platform shared resources (e.g., L3 cache).\nfunc IntelRdtFs(l *LinuxFactory) error {\n\tl.NewIntelRdtManager = func(config *configs.Config, id string, path string) intelrdt.Manager {\n\t\treturn &intelrdt.IntelRdtManager{\n\t\t\tConfig: config,\n\t\t\tId: id,\n\t\t\tPath: path,\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ TmpfsRoot is an option func to mount LinuxFactory.Root to tmpfs.\nfunc TmpfsRoot(l *LinuxFactory) error {\n\tmounted, err := mount.Mounted(l.Root)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !mounted {\n\t\tif err := unix.Mount(\"tmpfs\", l.Root, \"tmpfs\", 0, \"\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ CriuPath returns an option func to configure a LinuxFactory with the\n\/\/ provided criupath\nfunc CriuPath(criupath string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) error {\n\t\tl.CriuPath = criupath\n\t\treturn nil\n\t}\n}\n\n\/\/ New returns a linux based container factory based in the root directory and\n\/\/ configures the factory with the provided option funcs.\nfunc New(root string, options ...func(*LinuxFactory) error) (Factory, error) {\n\tif root != \"\" {\n\t\tif err := os.MkdirAll(root, 0700); err != nil {\n\t\t\treturn nil, newGenericError(err, SystemError)\n\t\t}\n\t}\n\tl := &LinuxFactory{\n\t\tRoot: root,\n\t\tInitPath: \"\/proc\/self\/exe\",\n\t\tInitArgs: []string{os.Args[0], \"init\"},\n\t\tValidator: validate.New(),\n\t\tCriuPath: \"criu\",\n\t}\n\tCgroupfs(l)\n\tfor _, opt := range options {\n\t\tif opt == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif err := opt(l); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn l, nil\n}\n\n\/\/ LinuxFactory implements the default factory interface for linux based systems.\ntype LinuxFactory struct {\n\t\/\/ Root directory for the factory to store state.\n\tRoot string\n\n\t\/\/ InitPath is the path for calling the init responsibilities for spawning\n\t\/\/ a container.\n\tInitPath string\n\n\t\/\/ InitArgs are arguments for calling the init responsibilities for spawning\n\t\/\/ a container.\n\tInitArgs []string\n\n\t\/\/ CriuPath is the path to the criu binary used for checkpoint and restore of\n\t\/\/ containers.\n\tCriuPath string\n\n\t\/\/ New{u,g}uidmapPath is the path to the binaries used for mapping with\n\t\/\/ rootless containers.\n\tNewuidmapPath string\n\tNewgidmapPath string\n\n\t\/\/ Validator provides validation to container configurations.\n\tValidator validate.Validator\n\n\t\/\/ NewCgroupsManager returns an initialized cgroups manager for a single container.\n\tNewCgroupsManager func(config *configs.Cgroup, paths map[string]string) cgroups.Manager\n\n\t\/\/ NewIntelRdtManager returns an initialized Intel RDT manager for a single container.\n\tNewIntelRdtManager func(config *configs.Config, id string, path string) intelrdt.Manager\n}\n\nfunc (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) {\n\tif l.Root == \"\" {\n\t\treturn nil, newGenericError(fmt.Errorf(\"invalid root\"), ConfigInvalid)\n\t}\n\tif err := l.validateID(id); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := l.Validator.Validate(config); err != nil {\n\t\treturn nil, newGenericError(err, ConfigInvalid)\n\t}\n\tcontainerRoot := filepath.Join(l.Root, id)\n\tif _, err := os.Stat(containerRoot); err == nil {\n\t\treturn nil, newGenericError(fmt.Errorf(\"container with id exists: %v\", id), IdInUse)\n\t} else if !os.IsNotExist(err) {\n\t\treturn nil, newGenericError(err, SystemError)\n\t}\n\tif err := os.MkdirAll(containerRoot, 0711); err != nil {\n\t\treturn nil, newGenericError(err, SystemError)\n\t}\n\tif err := os.Chown(containerRoot, unix.Geteuid(), unix.Getegid()); err != nil {\n\t\treturn nil, newGenericError(err, SystemError)\n\t}\n\tc := &linuxContainer{\n\t\tid: id,\n\t\troot: containerRoot,\n\t\tconfig: config,\n\t\tinitPath: l.InitPath,\n\t\tinitArgs: l.InitArgs,\n\t\tcriuPath: l.CriuPath,\n\t\tnewuidmapPath: l.NewuidmapPath,\n\t\tnewgidmapPath: l.NewgidmapPath,\n\t\tcgroupManager: l.NewCgroupsManager(config.Cgroups, nil),\n\t}\n\tif intelrdt.IsEnabled() {\n\t\tc.intelRdtManager = l.NewIntelRdtManager(config, id, \"\")\n\t}\n\tc.state = &stoppedState{c: c}\n\treturn c, nil\n}\n\nfunc (l *LinuxFactory) Load(id string) (Container, error) {\n\tif l.Root == \"\" {\n\t\treturn nil, newGenericError(fmt.Errorf(\"invalid root\"), ConfigInvalid)\n\t}\n\tcontainerRoot := filepath.Join(l.Root, id)\n\tstate, err := l.loadState(containerRoot, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := &nonChildProcess{\n\t\tprocessPid: state.InitProcessPid,\n\t\tprocessStartTime: state.InitProcessStartTime,\n\t\tfds: state.ExternalDescriptors,\n\t}\n\tc := &linuxContainer{\n\t\tinitProcess: r,\n\t\tinitProcessStartTime: state.InitProcessStartTime,\n\t\tid: id,\n\t\tconfig: &state.Config,\n\t\tinitPath: l.InitPath,\n\t\tinitArgs: l.InitArgs,\n\t\tcriuPath: l.CriuPath,\n\t\tnewuidmapPath: l.NewuidmapPath,\n\t\tnewgidmapPath: l.NewgidmapPath,\n\t\tcgroupManager: l.NewCgroupsManager(state.Config.Cgroups, state.CgroupPaths),\n\t\troot: containerRoot,\n\t\tcreated: state.Created,\n\t}\n\tc.state = &loadedState{c: c}\n\tif err := c.refreshState(); err != nil {\n\t\treturn nil, err\n\t}\n\tif intelrdt.IsEnabled() {\n\t\tc.intelRdtManager = l.NewIntelRdtManager(&state.Config, id, state.IntelRdtPath)\n\t}\n\treturn c, nil\n}\n\nfunc (l *LinuxFactory) Type() string {\n\treturn \"libcontainer\"\n}\n\n\/\/ StartInitialization loads a container by opening the pipe fd from the parent to read the configuration and state\n\/\/ This is a low level implementation detail of the reexec and should not be consumed externally\nfunc (l *LinuxFactory) StartInitialization() (err error) {\n\tvar (\n\t\tpipefd, fifofd int\n\t\tconsoleSocket *os.File\n\t\tenvInitPipe = os.Getenv(\"_LIBCONTAINER_INITPIPE\")\n\t\tenvFifoFd = os.Getenv(\"_LIBCONTAINER_FIFOFD\")\n\t\tenvConsole = os.Getenv(\"_LIBCONTAINER_CONSOLE\")\n\t)\n\n\t\/\/ Get the INITPIPE.\n\tpipefd, err = strconv.Atoi(envInitPipe)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to convert _LIBCONTAINER_INITPIPE=%s to int: %s\", envInitPipe, err)\n\t}\n\n\tvar (\n\t\tpipe = os.NewFile(uintptr(pipefd), \"pipe\")\n\t\tit = initType(os.Getenv(\"_LIBCONTAINER_INITTYPE\"))\n\t)\n\tdefer pipe.Close()\n\n\t\/\/ Only init processes have FIFOFD.\n\tfifofd = -1\n\tif it == initStandard {\n\t\tif fifofd, err = strconv.Atoi(envFifoFd); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to convert _LIBCONTAINER_FIFOFD=%s to int: %s\", envFifoFd, err)\n\t\t}\n\t}\n\n\tif envConsole != \"\" {\n\t\tconsole, err := strconv.Atoi(envConsole)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to convert _LIBCONTAINER_CONSOLE=%s to int: %s\", envConsole, err)\n\t\t}\n\t\tconsoleSocket = os.NewFile(uintptr(console), \"console-socket\")\n\t\tdefer consoleSocket.Close()\n\t}\n\n\t\/\/ clear the current process's environment to clean any libcontainer\n\t\/\/ specific env vars.\n\tos.Clearenv()\n\n\tdefer func() {\n\t\t\/\/ We have an error during the initialization of the container's init,\n\t\t\/\/ send it back to the parent process in the form of an initError.\n\t\tif werr := utils.WriteJSON(pipe, syncT{procError}); werr != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\treturn\n\t\t}\n\t\tif werr := utils.WriteJSON(pipe, newSystemError(err)); werr != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\treturn\n\t\t}\n\t}()\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = fmt.Errorf(\"panic from initialization: %v, %v\", e, string(debug.Stack()))\n\t\t}\n\t}()\n\n\ti, err := newContainerInit(it, pipe, consoleSocket, fifofd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ If Init succeeds, syscall.Exec will not return, hence none of the defers will be called.\n\treturn i.Init()\n}\n\nfunc (l *LinuxFactory) loadState(root, id string) (*State, error) {\n\tf, err := os.Open(filepath.Join(root, stateFilename))\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, newGenericError(fmt.Errorf(\"container %q does not exist\", id), ContainerNotExists)\n\t\t}\n\t\treturn nil, newGenericError(err, SystemError)\n\t}\n\tdefer f.Close()\n\tvar state *State\n\tif err := json.NewDecoder(f).Decode(&state); err != nil {\n\t\treturn nil, newGenericError(err, SystemError)\n\t}\n\treturn state, nil\n}\n\nfunc (l *LinuxFactory) validateID(id string) error {\n\tif !idRegex.MatchString(id) {\n\t\treturn newGenericError(fmt.Errorf(\"invalid id format: %v\", id), InvalidIdFormat)\n\t}\n\n\treturn nil\n}\n\n\/\/ NewuidmapPath returns an option func to configure a LinuxFactory with the\n\/\/ provided ..\nfunc NewuidmapPath(newuidmapPath string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) error {\n\t\tl.NewuidmapPath = newuidmapPath\n\t\treturn nil\n\t}\n}\n\n\/\/ NewgidmapPath returns an option func to configure a LinuxFactory with the\n\/\/ provided ..\nfunc NewgidmapPath(newgidmapPath string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) error {\n\t\tl.NewgidmapPath = newgidmapPath\n\t\treturn nil\n\t}\n}\n<commit_msg>fix unexpected delete bug when container id is ..<commit_after>\/\/ +build linux\n\npackage libcontainer\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\/debug\"\n\t\"strconv\"\n\n\t\"github.com\/opencontainers\/runc\/libcontainer\/cgroups\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/cgroups\/fs\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/cgroups\/systemd\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/configs\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/configs\/validate\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/intelrdt\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/mount\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/utils\"\n\n\t\"golang.org\/x\/sys\/unix\"\n)\n\nconst (\n\tstateFilename = \"state.json\"\n\texecFifoFilename = \"exec.fifo\"\n)\n\nvar idRegex = regexp.MustCompile(`^[\\w+-\\.]+$`)\n\n\/\/ InitArgs returns an options func to configure a LinuxFactory with the\n\/\/ provided init binary path and arguments.\nfunc InitArgs(args ...string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) (err error) {\n\t\tif len(args) > 0 {\n\t\t\t\/\/ Resolve relative paths to ensure that its available\n\t\t\t\/\/ after directory changes.\n\t\t\tif args[0], err = filepath.Abs(args[0]); err != nil {\n\t\t\t\treturn newGenericError(err, ConfigInvalid)\n\t\t\t}\n\t\t}\n\n\t\tl.InitArgs = args\n\t\treturn nil\n\t}\n}\n\n\/\/ SystemdCgroups is an options func to configure a LinuxFactory to return\n\/\/ containers that use systemd to create and manage cgroups.\nfunc SystemdCgroups(l *LinuxFactory) error {\n\tl.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {\n\t\treturn &systemd.Manager{\n\t\t\tCgroups: config,\n\t\t\tPaths: paths,\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Cgroupfs is an options func to configure a LinuxFactory to return containers\n\/\/ that use the native cgroups filesystem implementation to create and manage\n\/\/ cgroups.\nfunc Cgroupfs(l *LinuxFactory) error {\n\tl.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {\n\t\treturn &fs.Manager{\n\t\t\tCgroups: config,\n\t\t\tPaths: paths,\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ RootlessCgroupfs is an options func to configure a LinuxFactory to return\n\/\/ containers that use the native cgroups filesystem implementation to create\n\/\/ and manage cgroups. The difference between RootlessCgroupfs and Cgroupfs is\n\/\/ that RootlessCgroupfs can transparently handle permission errors that occur\n\/\/ during rootless container setup (while still allowing cgroup usage if\n\/\/ they've been set up properly).\nfunc RootlessCgroupfs(l *LinuxFactory) error {\n\tl.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {\n\t\treturn &fs.Manager{\n\t\t\tCgroups: config,\n\t\t\tRootless: true,\n\t\t\tPaths: paths,\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ IntelRdtfs is an options func to configure a LinuxFactory to return\n\/\/ containers that use the Intel RDT \"resource control\" filesystem to\n\/\/ create and manage Intel Xeon platform shared resources (e.g., L3 cache).\nfunc IntelRdtFs(l *LinuxFactory) error {\n\tl.NewIntelRdtManager = func(config *configs.Config, id string, path string) intelrdt.Manager {\n\t\treturn &intelrdt.IntelRdtManager{\n\t\t\tConfig: config,\n\t\t\tId: id,\n\t\t\tPath: path,\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ TmpfsRoot is an option func to mount LinuxFactory.Root to tmpfs.\nfunc TmpfsRoot(l *LinuxFactory) error {\n\tmounted, err := mount.Mounted(l.Root)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !mounted {\n\t\tif err := unix.Mount(\"tmpfs\", l.Root, \"tmpfs\", 0, \"\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ CriuPath returns an option func to configure a LinuxFactory with the\n\/\/ provided criupath\nfunc CriuPath(criupath string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) error {\n\t\tl.CriuPath = criupath\n\t\treturn nil\n\t}\n}\n\n\/\/ New returns a linux based container factory based in the root directory and\n\/\/ configures the factory with the provided option funcs.\nfunc New(root string, options ...func(*LinuxFactory) error) (Factory, error) {\n\tif root != \"\" {\n\t\tif err := os.MkdirAll(root, 0700); err != nil {\n\t\t\treturn nil, newGenericError(err, SystemError)\n\t\t}\n\t}\n\tl := &LinuxFactory{\n\t\tRoot: root,\n\t\tInitPath: \"\/proc\/self\/exe\",\n\t\tInitArgs: []string{os.Args[0], \"init\"},\n\t\tValidator: validate.New(),\n\t\tCriuPath: \"criu\",\n\t}\n\tCgroupfs(l)\n\tfor _, opt := range options {\n\t\tif opt == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif err := opt(l); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn l, nil\n}\n\n\/\/ LinuxFactory implements the default factory interface for linux based systems.\ntype LinuxFactory struct {\n\t\/\/ Root directory for the factory to store state.\n\tRoot string\n\n\t\/\/ InitPath is the path for calling the init responsibilities for spawning\n\t\/\/ a container.\n\tInitPath string\n\n\t\/\/ InitArgs are arguments for calling the init responsibilities for spawning\n\t\/\/ a container.\n\tInitArgs []string\n\n\t\/\/ CriuPath is the path to the criu binary used for checkpoint and restore of\n\t\/\/ containers.\n\tCriuPath string\n\n\t\/\/ New{u,g}uidmapPath is the path to the binaries used for mapping with\n\t\/\/ rootless containers.\n\tNewuidmapPath string\n\tNewgidmapPath string\n\n\t\/\/ Validator provides validation to container configurations.\n\tValidator validate.Validator\n\n\t\/\/ NewCgroupsManager returns an initialized cgroups manager for a single container.\n\tNewCgroupsManager func(config *configs.Cgroup, paths map[string]string) cgroups.Manager\n\n\t\/\/ NewIntelRdtManager returns an initialized Intel RDT manager for a single container.\n\tNewIntelRdtManager func(config *configs.Config, id string, path string) intelrdt.Manager\n}\n\nfunc (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) {\n\tif l.Root == \"\" {\n\t\treturn nil, newGenericError(fmt.Errorf(\"invalid root\"), ConfigInvalid)\n\t}\n\tif err := l.validateID(id); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := l.Validator.Validate(config); err != nil {\n\t\treturn nil, newGenericError(err, ConfigInvalid)\n\t}\n\tcontainerRoot := filepath.Join(l.Root, id)\n\tif _, err := os.Stat(containerRoot); err == nil {\n\t\treturn nil, newGenericError(fmt.Errorf(\"container with id exists: %v\", id), IdInUse)\n\t} else if !os.IsNotExist(err) {\n\t\treturn nil, newGenericError(err, SystemError)\n\t}\n\tif err := os.MkdirAll(containerRoot, 0711); err != nil {\n\t\treturn nil, newGenericError(err, SystemError)\n\t}\n\tif err := os.Chown(containerRoot, unix.Geteuid(), unix.Getegid()); err != nil {\n\t\treturn nil, newGenericError(err, SystemError)\n\t}\n\tc := &linuxContainer{\n\t\tid: id,\n\t\troot: containerRoot,\n\t\tconfig: config,\n\t\tinitPath: l.InitPath,\n\t\tinitArgs: l.InitArgs,\n\t\tcriuPath: l.CriuPath,\n\t\tnewuidmapPath: l.NewuidmapPath,\n\t\tnewgidmapPath: l.NewgidmapPath,\n\t\tcgroupManager: l.NewCgroupsManager(config.Cgroups, nil),\n\t}\n\tif intelrdt.IsEnabled() {\n\t\tc.intelRdtManager = l.NewIntelRdtManager(config, id, \"\")\n\t}\n\tc.state = &stoppedState{c: c}\n\treturn c, nil\n}\n\nfunc (l *LinuxFactory) Load(id string) (Container, error) {\n\tif l.Root == \"\" {\n\t\treturn nil, newGenericError(fmt.Errorf(\"invalid root\"), ConfigInvalid)\n\t}\n\t\/\/when load, we need to check id is valid or not.\n\tif err := l.validateID(id); err != nil {\n\t\treturn nil, err\n\t}\n\tcontainerRoot := filepath.Join(l.Root, id)\n\tstate, err := l.loadState(containerRoot, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := &nonChildProcess{\n\t\tprocessPid: state.InitProcessPid,\n\t\tprocessStartTime: state.InitProcessStartTime,\n\t\tfds: state.ExternalDescriptors,\n\t}\n\tc := &linuxContainer{\n\t\tinitProcess: r,\n\t\tinitProcessStartTime: state.InitProcessStartTime,\n\t\tid: id,\n\t\tconfig: &state.Config,\n\t\tinitPath: l.InitPath,\n\t\tinitArgs: l.InitArgs,\n\t\tcriuPath: l.CriuPath,\n\t\tnewuidmapPath: l.NewuidmapPath,\n\t\tnewgidmapPath: l.NewgidmapPath,\n\t\tcgroupManager: l.NewCgroupsManager(state.Config.Cgroups, state.CgroupPaths),\n\t\troot: containerRoot,\n\t\tcreated: state.Created,\n\t}\n\tc.state = &loadedState{c: c}\n\tif err := c.refreshState(); err != nil {\n\t\treturn nil, err\n\t}\n\tif intelrdt.IsEnabled() {\n\t\tc.intelRdtManager = l.NewIntelRdtManager(&state.Config, id, state.IntelRdtPath)\n\t}\n\treturn c, nil\n}\n\nfunc (l *LinuxFactory) Type() string {\n\treturn \"libcontainer\"\n}\n\n\/\/ StartInitialization loads a container by opening the pipe fd from the parent to read the configuration and state\n\/\/ This is a low level implementation detail of the reexec and should not be consumed externally\nfunc (l *LinuxFactory) StartInitialization() (err error) {\n\tvar (\n\t\tpipefd, fifofd int\n\t\tconsoleSocket *os.File\n\t\tenvInitPipe = os.Getenv(\"_LIBCONTAINER_INITPIPE\")\n\t\tenvFifoFd = os.Getenv(\"_LIBCONTAINER_FIFOFD\")\n\t\tenvConsole = os.Getenv(\"_LIBCONTAINER_CONSOLE\")\n\t)\n\n\t\/\/ Get the INITPIPE.\n\tpipefd, err = strconv.Atoi(envInitPipe)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to convert _LIBCONTAINER_INITPIPE=%s to int: %s\", envInitPipe, err)\n\t}\n\n\tvar (\n\t\tpipe = os.NewFile(uintptr(pipefd), \"pipe\")\n\t\tit = initType(os.Getenv(\"_LIBCONTAINER_INITTYPE\"))\n\t)\n\tdefer pipe.Close()\n\n\t\/\/ Only init processes have FIFOFD.\n\tfifofd = -1\n\tif it == initStandard {\n\t\tif fifofd, err = strconv.Atoi(envFifoFd); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to convert _LIBCONTAINER_FIFOFD=%s to int: %s\", envFifoFd, err)\n\t\t}\n\t}\n\n\tif envConsole != \"\" {\n\t\tconsole, err := strconv.Atoi(envConsole)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to convert _LIBCONTAINER_CONSOLE=%s to int: %s\", envConsole, err)\n\t\t}\n\t\tconsoleSocket = os.NewFile(uintptr(console), \"console-socket\")\n\t\tdefer consoleSocket.Close()\n\t}\n\n\t\/\/ clear the current process's environment to clean any libcontainer\n\t\/\/ specific env vars.\n\tos.Clearenv()\n\n\tdefer func() {\n\t\t\/\/ We have an error during the initialization of the container's init,\n\t\t\/\/ send it back to the parent process in the form of an initError.\n\t\tif werr := utils.WriteJSON(pipe, syncT{procError}); werr != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\treturn\n\t\t}\n\t\tif werr := utils.WriteJSON(pipe, newSystemError(err)); werr != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\treturn\n\t\t}\n\t}()\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = fmt.Errorf(\"panic from initialization: %v, %v\", e, string(debug.Stack()))\n\t\t}\n\t}()\n\n\ti, err := newContainerInit(it, pipe, consoleSocket, fifofd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ If Init succeeds, syscall.Exec will not return, hence none of the defers will be called.\n\treturn i.Init()\n}\n\nfunc (l *LinuxFactory) loadState(root, id string) (*State, error) {\n\tf, err := os.Open(filepath.Join(root, stateFilename))\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, newGenericError(fmt.Errorf(\"container %q does not exist\", id), ContainerNotExists)\n\t\t}\n\t\treturn nil, newGenericError(err, SystemError)\n\t}\n\tdefer f.Close()\n\tvar state *State\n\tif err := json.NewDecoder(f).Decode(&state); err != nil {\n\t\treturn nil, newGenericError(err, SystemError)\n\t}\n\treturn state, nil\n}\n\nfunc (l *LinuxFactory) validateID(id string) error {\n\tif !idRegex.MatchString(id) || id == \"..\" || id == \".\" {\n\t\treturn newGenericError(fmt.Errorf(\"invalid id format: %v\", id), InvalidIdFormat)\n\t}\n\n\t\/\/For unforeseen invalid id situations, can checked by is SubDir?\n\trootPath, err := filepath.Abs(l.Root)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontainerRoot := filepath.Join(l.Root, id)\n\trootCheckPath, err := filepath.Abs(filepath.Join(containerRoot, \"..\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif rootPath != rootCheckPath {\n\t\treturn newGenericError(fmt.Errorf(\"invalid id format: %v\", id), InvalidIdFormat)\n\t}\n\n\treturn nil\n}\n\n\/\/ NewuidmapPath returns an option func to configure a LinuxFactory with the\n\/\/ provided ..\nfunc NewuidmapPath(newuidmapPath string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) error {\n\t\tl.NewuidmapPath = newuidmapPath\n\t\treturn nil\n\t}\n}\n\n\/\/ NewgidmapPath returns an option func to configure a LinuxFactory with the\n\/\/ provided ..\nfunc NewgidmapPath(newgidmapPath string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) error {\n\t\tl.NewgidmapPath = newgidmapPath\n\t\treturn nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strconv\"\n\n\t\"github.com\/7sDream\/rikka\/common\/logger\"\n\t\"github.com\/7sDream\/rikka\/plugins\"\n\t\"github.com\/7sDream\/rikka\/plugins\/fs\"\n)\n\nvar (\n\t\/\/ Map from plugin name to object\n\tpluginMap = make(map[string]plugins.RikkaPlugin)\n\n\t\/\/ Command line arguments var\n\targBindIPAddress *string\n\targPort *int\n\targPassword *string\n\targMaxSizeByMB *float64\n\targPluginStr *string\n\targLogLevel *int\n\n\t\/\/ concat socket from ip address and port\n\tsocket string\n\n\t\/\/ The used plugin\n\tthePlugin plugins.RikkaPlugin\n)\n\n\/\/ --- Init and check ---\n\nfunc createSignalHandler(handlerFunc func()) (func(), chan os.Signal) {\n\tsignalChain := make(chan os.Signal, 1)\n\n\treturn func() {\n\t\tfor _ = range signalChain {\n\t\t\thandlerFunc()\n\t\t}\n\t}, signalChain\n}\n\n\/\/ registerSignalHandler register a handler for process Ctrl + C\nfunc registerSignalHandler(handlerFunc func()) {\n\tsignalHandler, channel := createSignalHandler(handlerFunc)\n\tsignal.Notify(channel, os.Interrupt)\n\tgo signalHandler()\n}\n\nfunc init() {\n\n\tregisterSignalHandler(func() {\n\t\tl.Fatal(\"Rikka have to go to sleep, see you tomorrow\")\n\t})\n\n\tinitPluginList()\n\n\tinitArgVars()\n\n\tflag.Parse()\n\n\tl.Info(\"Args bindIP =\", *argBindIPAddress)\n\tl.Info(\"Args port =\", *argPort)\n\tl.Info(\"Args password =\", *argPassword)\n\tl.Info(\"Args maxFileSize =\", *argMaxSizeByMB, \"MB\")\n\tl.Info(\"Args loggerLevel =\", *argLogLevel)\n\tl.Info(\"Args.plugin =\", *argPluginStr)\n\n\tif *argBindIPAddress == \":\" {\n\t\tsocket = *argBindIPAddress + strconv.Itoa(*argPort)\n\t} else {\n\t\tsocket = *argBindIPAddress + \":\" + strconv.Itoa(*argPort)\n\t}\n\n\tlogger.SetLevel(*argLogLevel)\n\n\truntimeEnvCheck()\n}\n\nfunc initPluginList() {\n\tpluginMap[\"fs\"] = fs.FsPlugin\n}\n\nfunc initArgVars() {\n\targBindIPAddress = flag.String(\"bind\", \":\", \"bind ip address, use : for all address\")\n\targPort = flag.Int(\"port\", 80, \"server port\")\n\targPassword = flag.String(\"pwd\", \"rikka\", \"The password need provided when upload\")\n\targMaxSizeByMB = flag.Float64(\"size\", 5, \"Max file size by MB\")\n\targLogLevel = flag.Int(\n\t\t\"level\", logger.LevelInfo,\n\t\tfmt.Sprintf(\"logger level, from %d to %d\", logger.LevelDebug, logger.LevelError),\n\t)\n\n\t\/\/ Get name array of all avaliable plugins, show in `rikka -h``\n\tpluginNames := make([]string, 0, len(pluginMap))\n\tfor k := range pluginMap {\n\t\tpluginNames = append(pluginNames, k)\n\t}\n\targPluginStr = flag.String(\n\t\t\"plugin\", \"fs\",\n\t\t\"what plugin use to save file, selected from \"+fmt.Sprintf(\"%v\", pluginNames),\n\t)\n\n}\n\nfunc runtimeEnvCheck() {\n\tl.Info(\"Check runtime environment\")\n\n\tl.Debug(\"Try to find plugin\", *argPluginStr)\n\n\t\/\/ Make sure plugin be selected exist\n\tif plugin, ok := pluginMap[*argPluginStr]; ok {\n\t\tthePlugin = plugin\n\t\tl.Debug(\"Plugin\", *argPluginStr, \"found\")\n\t} else {\n\t\tl.Fatal(\"Plugin\", *argPluginStr, \"not exist\")\n\t}\n\n\tl.Info(\"All runtime environment check passed\")\n}\n<commit_msg>fix exit code when get Ctrl+c, add makefile<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strconv\"\n\n\t\"github.com\/7sDream\/rikka\/common\/logger\"\n\t\"github.com\/7sDream\/rikka\/plugins\"\n\t\"github.com\/7sDream\/rikka\/plugins\/fs\"\n)\n\nvar (\n\t\/\/ Map from plugin name to object\n\tpluginMap = make(map[string]plugins.RikkaPlugin)\n\n\t\/\/ Command line arguments var\n\targBindIPAddress *string\n\targPort *int\n\targPassword *string\n\targMaxSizeByMB *float64\n\targPluginStr *string\n\targLogLevel *int\n\n\t\/\/ concat socket from ip address and port\n\tsocket string\n\n\t\/\/ The used plugin\n\tthePlugin plugins.RikkaPlugin\n)\n\n\/\/ --- Init and check ---\n\nfunc createSignalHandler(handlerFunc func()) (func(), chan os.Signal) {\n\tsignalChain := make(chan os.Signal, 1)\n\n\treturn func() {\n\t\tfor _ = range signalChain {\n\t\t\thandlerFunc()\n\t\t}\n\t}, signalChain\n}\n\n\/\/ registerSignalHandler register a handler for process Ctrl + C\nfunc registerSignalHandler(handlerFunc func()) {\n\tsignalHandler, channel := createSignalHandler(handlerFunc)\n\tsignal.Notify(channel, os.Interrupt)\n\tgo signalHandler()\n}\n\nfunc init() {\n\n\tregisterSignalHandler(func() {\n\t\tl.Info(\"Rikka have to go to sleep, see you tomorrow\")\n\t\tos.Exit(0)\n\t})\n\n\tinitPluginList()\n\n\tinitArgVars()\n\n\tflag.Parse()\n\n\tl.Info(\"Args bindIP =\", *argBindIPAddress)\n\tl.Info(\"Args port =\", *argPort)\n\tl.Info(\"Args password =\", *argPassword)\n\tl.Info(\"Args maxFileSize =\", *argMaxSizeByMB, \"MB\")\n\tl.Info(\"Args loggerLevel =\", *argLogLevel)\n\tl.Info(\"Args.plugin =\", *argPluginStr)\n\n\tif *argBindIPAddress == \":\" {\n\t\tsocket = *argBindIPAddress + strconv.Itoa(*argPort)\n\t} else {\n\t\tsocket = *argBindIPAddress + \":\" + strconv.Itoa(*argPort)\n\t}\n\n\tlogger.SetLevel(*argLogLevel)\n\n\truntimeEnvCheck()\n}\n\nfunc initPluginList() {\n\tpluginMap[\"fs\"] = fs.FsPlugin\n}\n\nfunc initArgVars() {\n\targBindIPAddress = flag.String(\"bind\", \":\", \"bind ip address, use : for all address\")\n\targPort = flag.Int(\"port\", 80, \"server port\")\n\targPassword = flag.String(\"pwd\", \"rikka\", \"The password need provided when upload\")\n\targMaxSizeByMB = flag.Float64(\"size\", 5, \"Max file size by MB\")\n\targLogLevel = flag.Int(\n\t\t\"level\", logger.LevelInfo,\n\t\tfmt.Sprintf(\"logger level, from %d to %d\", logger.LevelDebug, logger.LevelError),\n\t)\n\n\t\/\/ Get name array of all avaliable plugins, show in `rikka -h``\n\tpluginNames := make([]string, 0, len(pluginMap))\n\tfor k := range pluginMap {\n\t\tpluginNames = append(pluginNames, k)\n\t}\n\targPluginStr = flag.String(\n\t\t\"plugin\", \"fs\",\n\t\t\"what plugin use to save file, selected from \"+fmt.Sprintf(\"%v\", pluginNames),\n\t)\n\n}\n\nfunc runtimeEnvCheck() {\n\tl.Info(\"Check runtime environment\")\n\n\tl.Debug(\"Try to find plugin\", *argPluginStr)\n\n\t\/\/ Make sure plugin be selected exist\n\tif plugin, ok := pluginMap[*argPluginStr]; ok {\n\t\tthePlugin = plugin\n\t\tl.Debug(\"Plugin\", *argPluginStr, \"found\")\n\t} else {\n\t\tl.Fatal(\"Plugin\", *argPluginStr, \"not exist\")\n\t}\n\n\tl.Info(\"All runtime environment check passed\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/proxy\"\n)\n\nvar version, configAddr string\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n\n\tenvNoLog := os.Getenv(\"proxy_no_log\") \/\/ false\n\tconfig.SetNoLog(envNoLog[strings.Index(envNoLog, \"=\")+1:])\n\tlog.SetFlags(log.Lshortfile | log.LstdFlags | log.Lmicroseconds)\n\tlog.Println(\"libsocks5connect loaded, version:\", version)\n\n\tenvProxy := os.Getenv(\"socks5_proxy\") \/\/ user:pass@192.168.1.1:1080,user:pass@192.168.1.2:1080\n\tconfig.SetProxyAddrs(strings.Split(envProxy[strings.Index(envProxy, \"=\")+1:], \",\"))\n\n\tenvNotProxies := os.Getenv(\"not_proxy\") \/\/ 127.0.0.0\/8,192.168.1.0\/24\n\tconfig.SetNoProxies(strings.Split(envNotProxies[strings.Index(envNotProxies, \"=\")+1:], \",\"))\n\n\tenvConnectTimeouts := os.Getenv(\"proxy_timeout_ms\") \/\/ 1000\n\tconfig.SetConnectTimeouts(envConnectTimeouts[strings.Index(envConnectTimeouts, \"=\")+1:])\n\n\tenvNoConfigServer := os.Getenv(\"no_config_server\") \/\/ false\n\tenvNoConfigServer = strings.ToLower(strings.TrimSpace(\n\t\tenvNoConfigServer[strings.Index(envNoConfigServer, \"=\")+1:]))\n\tif envNoConfigServer != \"true\" && envNoConfigServer != \"1\" {\n\t\tgo config.Listen()\n\t}\n}\n\nfunc main() {\n}\n\nvar config = &Config{}\n\ntype Config struct {\n\tlock sync.RWMutex\n\tnotProxies []*net.IPNet\n\tconnectTimeouts time.Duration\n\tproxyAddrs []ProxyAddr\n\tproxyNoLog bool\n}\n\nfunc (p *Config) String() string {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\treturn fmt.Sprintf(\"%v: %v\\n%v=%v\\n%v=%v\\n%v=%v\\n%v=%v\\n\",\n\t\t\"version\", version,\n\t\t\"proxy_no_log\", p.IsProxyNoLog(),\n\t\t\"socks5_proxy\", strings.Join(p.GetProxyAddrs(), \",\"),\n\t\t\"not_proxy\", strings.Join(p.GetNoProxies(), \",\"),\n\t\t\"proxy_timeout_ms\", uint64(p.GetConnectTimeouts()\/time.Millisecond),\n\t)\n}\n\ntype ProxyAddr struct {\n\tproxy.Auth\n\tAddrStr string\n\tResolvedAddr *net.TCPAddr\n}\n\nfunc (p ProxyAddr) Sockaddr() (addr syscall.Sockaddr) {\n\tnaddr, err := net.ResolveTCPAddr(\"tcp\", p.AddrStr)\n\tif err != nil {\n\t\tlog.Println(\"resolve proxy addr failed\", p.AddrStr, err, \"use saved addr:\", p.ResolvedAddr)\n\t\tnaddr = p.ResolvedAddr\n\t} else {\n\t\tp.ResolvedAddr = naddr\n\t}\n\tif ip4 := naddr.IP.To4(); ip4 != nil {\n\t\tvar proxyIp4 [4]byte\n\t\tcopy(proxyIp4[:], ip4)\n\t\treturn &syscall.SockaddrInet4{\n\t\t\tAddr: proxyIp4,\n\t\t\tPort: naddr.Port,\n\t\t}\n\t} else if ip6 := naddr.IP.To16(); ip6 != nil {\n\t\tlog.Println(\"not support ipv6 proxy addr\", p.AddrStr, p.ResolvedAddr)\n\t}\n\treturn\n}\n\nfunc (p ProxyAddr) String() string {\n\tif p.AddrStr == p.ResolvedAddr.String() {\n\t\treturn p.AddrStr\n\t}\n\treturn fmt.Sprintf(\"%v(%v)\", p.AddrStr, p.ResolvedAddr)\n}\n\nfunc (p *Config) SetProxyAddrs(addrs []string) {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\tvar proxyAddrs []ProxyAddr\n\tfor _, ipAddr := range addrs {\n\t\tipAddr = strings.TrimSpace(ipAddr)\n\t\tif len(ipAddr) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvar proxyAddr ProxyAddr\n\t\tu, err := url.Parse(\"socks5:\/\/\" + strings.TrimSpace(ipAddr))\n\t\tif err != nil {\n\t\t\tlog.Println(\"parse proxy addr failed\", ipAddr, err)\n\t\t\tcontinue\n\t\t}\n\t\tif u.User != nil {\n\t\t\tproxyAddr.User = u.User.Username()\n\t\t\tproxyAddr.Password, _ = u.User.Password()\n\t\t}\n\t\tnaddr, err := net.ResolveTCPAddr(\"tcp\", u.Host)\n\t\tif err != nil || naddr.IP.To4() == nil {\n\t\t\tlog.Println(\"resolve proxy addr failed\", ipAddr, err, naddr.IP)\n\t\t\tcontinue\n\t\t}\n\t\tproxyAddr.AddrStr = u.Host\n\t\tproxyAddr.ResolvedAddr = naddr\n\t\tlog.Println(\"add proxy:\", ipAddr)\n\t\tproxyAddrs = append(proxyAddrs, proxyAddr)\n\t}\n\tif len(proxyAddrs) == 0 {\n\t\tlog.Println(\"no proxy available\")\n\t\treturn\n\t}\n\tp.proxyAddrs = proxyAddrs\n}\n\nfunc (p *Config) GetProxyCount() int {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\treturn len(p.proxyAddrs)\n}\n\nfunc (p *Config) GetProxyAddr() *ProxyAddr {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\ttmpAddr := p.proxyAddrs[rand.Intn(len(p.proxyAddrs))]\n\treturn &tmpAddr\n}\nfunc (p *Config) GetProxyAddrs() (addrs []string) {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\tfor _, addr := range p.proxyAddrs {\n\t\taddrs = append(addrs, addr.AddrStr)\n\t}\n\treturn\n}\n\nfunc (p *Config) SetConnectTimeouts(timeout string) {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\tt, err := strconv.Atoi(strings.TrimSpace(timeout))\n\tif err != nil {\n\t\tt = 3000\n\t}\n\tp.connectTimeouts = time.Duration(t) * time.Millisecond\n\tlog.Println(\"set connect timeout to\", p.connectTimeouts)\n}\nfunc (p *Config) GetConnectTimeouts() time.Duration {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\treturn p.connectTimeouts\n}\n\nfunc (p *Config) SetNoLog(isNoLog string) {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\tisNoLog = strings.ToLower(strings.TrimSpace(isNoLog))\n\tif isNoLog == \"true\" || isNoLog == \"1\" {\n\t\tlog.SetOutput(ioutil.Discard)\n\t\tp.proxyNoLog = true\n\t} else {\n\t\tlog.SetOutput(os.Stderr)\n\t\tp.proxyNoLog = false\n\t}\n}\nfunc (p *Config) IsProxyNoLog() bool {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\treturn p.proxyNoLog\n}\n\nfunc (p *Config) ShouldNotProxy(ip net.IP) bool {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\tfor _, ipnet := range p.notProxies {\n\t\tif ipnet.Contains(ip) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (p *Config) SetNoProxies(addrs []string) {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\tfor _, addr := range addrs {\n\t\taddr = strings.TrimSpace(addr)\n\t\tif len(addr) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t_, ipnet, err := net.ParseCIDR(addr)\n\t\tif err != nil {\n\t\t\tlog.Println(\"parse ipnet failed\", err, addr)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Println(\"add not proxy addr:\", addr)\n\t\tp.notProxies = append(p.notProxies, ipnet)\n\t}\n}\n\nfunc (p *Config) GetNoProxies() (addrs []string) {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\tfor _, addr := range p.notProxies {\n\t\taddrs = append(addrs, addr.String())\n\t}\n\treturn\n}\n\nfunc (p *Config) Listen() {\n\tln, err := net.Listen(\"tcp\", \":0\")\n\tif err != nil {\n\t\tlog.Println(\"listen failed\", err)\n\t\treturn\n\t}\n\tconfigAddr = ln.Addr().String()\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Accept failed\", err)\n\t\t\tcontinue\n\t\t}\n\t\tgo p.UpdateConfigFromConn(conn)\n\t}\n}\n\nfunc (p *Config) UpdateConfigFromConn(conn net.Conn) {\n\tdefer conn.Close()\n\tlog.Println(\"config server new connection from:\", conn.RemoteAddr())\n\tfmt.Fprintf(conn, \"current config:\\n%v\\n\\n\", p)\n\tscanner := bufio.NewScanner(conn)\n\tfor scanner.Scan() {\n\t\tsp := strings.SplitN(scanner.Text(), \"=\", 2)\n\t\tif len(sp) < 2 {\n\t\t\tlog.Println(\"invalid config\")\n\t\t\tfmt.Fprintf(conn, \"invalid config %#v\\n\", scanner.Text())\n\t\t\tfmt.Fprintf(conn, \"current config:\\n%v\\n\\n\", p)\n\t\t\tcontinue\n\t\t}\n\t\tswitch sp[0] {\n\t\tcase \"socks5_proxy\", \"export socks5_proxy\":\n\t\t\tconfig.SetProxyAddrs(strings.Split(sp[1], \",\"))\n\t\t\tfmt.Fprintf(conn, \"OK, current proxyaddrs: %v\\n\",\n\t\t\t\tstrings.Join(config.GetProxyAddrs(), \",\"))\n\t\tcase \"not_proxy\", \"export not_proxy\":\n\t\t\tconfig.SetNoProxies(strings.Split(sp[1], \",\"))\n\t\t\tfmt.Fprintf(conn, \"OK, current no proxy addrs: %v\\n\",\n\t\t\t\tstrings.Join(config.GetNoProxies(), \",\"))\n\t\tcase \"proxy_timeout_ms\", \"export proxy_timeout_ms\":\n\t\t\tconfig.SetConnectTimeouts(sp[1])\n\t\t\tfmt.Fprintf(conn, \"OK, current proxy timeouts: %v\\n\",\n\t\t\t\tuint64(p.GetConnectTimeouts()\/time.Millisecond))\n\t\tcase \"proxy_no_log\", \"export proxy_no_log\":\n\t\t\tconfig.SetNoLog(sp[1])\n\t\t\tfmt.Fprintf(conn, \"OK, proxy_no_log: %v\\n\",\n\t\t\t\tconfig.IsProxyNoLog())\n\t\tdefault:\n\t\t\tlog.Println(\"unknown config\")\n\t\t\tfmt.Fprintf(conn, \"unknown config %#v\\n\", scanner.Text())\n\t\t\tfmt.Fprintf(conn, \"current config:\\n%v\\n\\n\", p)\n\t\t}\n\t}\n}\n<commit_msg>config addr use localhost<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/proxy\"\n)\n\nvar version, configAddr string\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n\n\tenvNoLog := os.Getenv(\"proxy_no_log\") \/\/ false\n\tconfig.SetNoLog(envNoLog[strings.Index(envNoLog, \"=\")+1:])\n\tlog.SetFlags(log.Lshortfile | log.LstdFlags | log.Lmicroseconds)\n\tlog.Println(\"libsocks5connect loaded, version:\", version)\n\n\tenvProxy := os.Getenv(\"socks5_proxy\") \/\/ user:pass@192.168.1.1:1080,user:pass@192.168.1.2:1080\n\tconfig.SetProxyAddrs(strings.Split(envProxy[strings.Index(envProxy, \"=\")+1:], \",\"))\n\n\tenvNotProxies := os.Getenv(\"not_proxy\") \/\/ 127.0.0.0\/8,192.168.1.0\/24\n\tconfig.SetNoProxies(strings.Split(envNotProxies[strings.Index(envNotProxies, \"=\")+1:], \",\"))\n\n\tenvConnectTimeouts := os.Getenv(\"proxy_timeout_ms\") \/\/ 1000\n\tconfig.SetConnectTimeouts(envConnectTimeouts[strings.Index(envConnectTimeouts, \"=\")+1:])\n\n\tenvNoConfigServer := os.Getenv(\"no_config_server\") \/\/ false\n\tenvNoConfigServer = strings.ToLower(strings.TrimSpace(\n\t\tenvNoConfigServer[strings.Index(envNoConfigServer, \"=\")+1:]))\n\tif envNoConfigServer != \"true\" && envNoConfigServer != \"1\" {\n\t\tgo config.Listen()\n\t}\n}\n\nfunc main() {\n}\n\nvar config = &Config{}\n\ntype Config struct {\n\tlock sync.RWMutex\n\tnotProxies []*net.IPNet\n\tconnectTimeouts time.Duration\n\tproxyAddrs []ProxyAddr\n\tproxyNoLog bool\n}\n\nfunc (p *Config) String() string {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\treturn fmt.Sprintf(\"%v: %v\\n%v=%v\\n%v=%v\\n%v=%v\\n%v=%v\\n\",\n\t\t\"version\", version,\n\t\t\"proxy_no_log\", p.IsProxyNoLog(),\n\t\t\"socks5_proxy\", strings.Join(p.GetProxyAddrs(), \",\"),\n\t\t\"not_proxy\", strings.Join(p.GetNoProxies(), \",\"),\n\t\t\"proxy_timeout_ms\", uint64(p.GetConnectTimeouts()\/time.Millisecond),\n\t)\n}\n\ntype ProxyAddr struct {\n\tproxy.Auth\n\tAddrStr string\n\tResolvedAddr *net.TCPAddr\n}\n\nfunc (p ProxyAddr) Sockaddr() (addr syscall.Sockaddr) {\n\tnaddr, err := net.ResolveTCPAddr(\"tcp\", p.AddrStr)\n\tif err != nil {\n\t\tlog.Println(\"resolve proxy addr failed\", p.AddrStr, err, \"use saved addr:\", p.ResolvedAddr)\n\t\tnaddr = p.ResolvedAddr\n\t} else {\n\t\tp.ResolvedAddr = naddr\n\t}\n\tif ip4 := naddr.IP.To4(); ip4 != nil {\n\t\tvar proxyIp4 [4]byte\n\t\tcopy(proxyIp4[:], ip4)\n\t\treturn &syscall.SockaddrInet4{\n\t\t\tAddr: proxyIp4,\n\t\t\tPort: naddr.Port,\n\t\t}\n\t} else if ip6 := naddr.IP.To16(); ip6 != nil {\n\t\tlog.Println(\"not support ipv6 proxy addr\", p.AddrStr, p.ResolvedAddr)\n\t}\n\treturn\n}\n\nfunc (p ProxyAddr) String() string {\n\tif p.AddrStr == p.ResolvedAddr.String() {\n\t\treturn p.AddrStr\n\t}\n\treturn fmt.Sprintf(\"%v(%v)\", p.AddrStr, p.ResolvedAddr)\n}\n\nfunc (p *Config) SetProxyAddrs(addrs []string) {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\tvar proxyAddrs []ProxyAddr\n\tfor _, ipAddr := range addrs {\n\t\tipAddr = strings.TrimSpace(ipAddr)\n\t\tif len(ipAddr) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvar proxyAddr ProxyAddr\n\t\tu, err := url.Parse(\"socks5:\/\/\" + strings.TrimSpace(ipAddr))\n\t\tif err != nil {\n\t\t\tlog.Println(\"parse proxy addr failed\", ipAddr, err)\n\t\t\tcontinue\n\t\t}\n\t\tif u.User != nil {\n\t\t\tproxyAddr.User = u.User.Username()\n\t\t\tproxyAddr.Password, _ = u.User.Password()\n\t\t}\n\t\tnaddr, err := net.ResolveTCPAddr(\"tcp\", u.Host)\n\t\tif err != nil || naddr.IP.To4() == nil {\n\t\t\tlog.Println(\"resolve proxy addr failed\", ipAddr, err, naddr.IP)\n\t\t\tcontinue\n\t\t}\n\t\tproxyAddr.AddrStr = u.Host\n\t\tproxyAddr.ResolvedAddr = naddr\n\t\tlog.Println(\"add proxy:\", ipAddr)\n\t\tproxyAddrs = append(proxyAddrs, proxyAddr)\n\t}\n\tif len(proxyAddrs) == 0 {\n\t\tlog.Println(\"no proxy available\")\n\t\treturn\n\t}\n\tp.proxyAddrs = proxyAddrs\n}\n\nfunc (p *Config) GetProxyCount() int {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\treturn len(p.proxyAddrs)\n}\n\nfunc (p *Config) GetProxyAddr() *ProxyAddr {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\ttmpAddr := p.proxyAddrs[rand.Intn(len(p.proxyAddrs))]\n\treturn &tmpAddr\n}\nfunc (p *Config) GetProxyAddrs() (addrs []string) {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\tfor _, addr := range p.proxyAddrs {\n\t\taddrs = append(addrs, addr.AddrStr)\n\t}\n\treturn\n}\n\nfunc (p *Config) SetConnectTimeouts(timeout string) {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\tt, err := strconv.Atoi(strings.TrimSpace(timeout))\n\tif err != nil {\n\t\tt = 3000\n\t}\n\tp.connectTimeouts = time.Duration(t) * time.Millisecond\n\tlog.Println(\"set connect timeout to\", p.connectTimeouts)\n}\nfunc (p *Config) GetConnectTimeouts() time.Duration {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\treturn p.connectTimeouts\n}\n\nfunc (p *Config) SetNoLog(isNoLog string) {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\tisNoLog = strings.ToLower(strings.TrimSpace(isNoLog))\n\tif isNoLog == \"true\" || isNoLog == \"1\" {\n\t\tlog.SetOutput(ioutil.Discard)\n\t\tp.proxyNoLog = true\n\t} else {\n\t\tlog.SetOutput(os.Stderr)\n\t\tp.proxyNoLog = false\n\t}\n}\nfunc (p *Config) IsProxyNoLog() bool {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\treturn p.proxyNoLog\n}\n\nfunc (p *Config) ShouldNotProxy(ip net.IP) bool {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\tfor _, ipnet := range p.notProxies {\n\t\tif ipnet.Contains(ip) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (p *Config) SetNoProxies(addrs []string) {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\tfor _, addr := range addrs {\n\t\taddr = strings.TrimSpace(addr)\n\t\tif len(addr) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t_, ipnet, err := net.ParseCIDR(addr)\n\t\tif err != nil {\n\t\t\tlog.Println(\"parse ipnet failed\", err, addr)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Println(\"add not proxy addr:\", addr)\n\t\tp.notProxies = append(p.notProxies, ipnet)\n\t}\n}\n\nfunc (p *Config) GetNoProxies() (addrs []string) {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\tfor _, addr := range p.notProxies {\n\t\taddrs = append(addrs, addr.String())\n\t}\n\treturn\n}\n\nfunc (p *Config) Listen() {\n\tln, err := net.Listen(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tlog.Println(\"listen failed\", err)\n\t\treturn\n\t}\n\tconfigAddr = ln.Addr().String()\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Accept failed\", err)\n\t\t\tcontinue\n\t\t}\n\t\tgo p.UpdateConfigFromConn(conn)\n\t}\n}\n\nfunc (p *Config) UpdateConfigFromConn(conn net.Conn) {\n\tdefer conn.Close()\n\tlog.Println(\"config server new connection from:\", conn.RemoteAddr())\n\tfmt.Fprintf(conn, \"current config:\\n%v\\n\\n\", p)\n\tscanner := bufio.NewScanner(conn)\n\tfor scanner.Scan() {\n\t\tsp := strings.SplitN(scanner.Text(), \"=\", 2)\n\t\tif len(sp) < 2 {\n\t\t\tlog.Println(\"invalid config\")\n\t\t\tfmt.Fprintf(conn, \"invalid config %#v\\n\", scanner.Text())\n\t\t\tfmt.Fprintf(conn, \"current config:\\n%v\\n\\n\", p)\n\t\t\tcontinue\n\t\t}\n\t\tswitch sp[0] {\n\t\tcase \"socks5_proxy\", \"export socks5_proxy\":\n\t\t\tconfig.SetProxyAddrs(strings.Split(sp[1], \",\"))\n\t\t\tfmt.Fprintf(conn, \"OK, current proxyaddrs: %v\\n\",\n\t\t\t\tstrings.Join(config.GetProxyAddrs(), \",\"))\n\t\tcase \"not_proxy\", \"export not_proxy\":\n\t\t\tconfig.SetNoProxies(strings.Split(sp[1], \",\"))\n\t\t\tfmt.Fprintf(conn, \"OK, current no proxy addrs: %v\\n\",\n\t\t\t\tstrings.Join(config.GetNoProxies(), \",\"))\n\t\tcase \"proxy_timeout_ms\", \"export proxy_timeout_ms\":\n\t\t\tconfig.SetConnectTimeouts(sp[1])\n\t\t\tfmt.Fprintf(conn, \"OK, current proxy timeouts: %v\\n\",\n\t\t\t\tuint64(p.GetConnectTimeouts()\/time.Millisecond))\n\t\tcase \"proxy_no_log\", \"export proxy_no_log\":\n\t\t\tconfig.SetNoLog(sp[1])\n\t\t\tfmt.Fprintf(conn, \"OK, proxy_no_log: %v\\n\",\n\t\t\t\tconfig.IsProxyNoLog())\n\t\tdefault:\n\t\t\tlog.Println(\"unknown config\")\n\t\t\tfmt.Fprintf(conn, \"unknown config %#v\\n\", scanner.Text())\n\t\t\tfmt.Fprintf(conn, \"current config:\\n%v\\n\\n\", p)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\n\t\"github.com\/kardianos\/osext\"\n\n\t\"code.cloudfoundry.org\/lager\"\n\n\t\"cred-alert\/inflator\"\n\t\"cred-alert\/kolsch\"\n\t\"cred-alert\/mimetype\"\n\t\"cred-alert\/scanners\"\n\t\"cred-alert\/scanners\/diffscanner\"\n\t\"cred-alert\/scanners\/dirscanner\"\n\t\"cred-alert\/scanners\/filescanner\"\n\t\"cred-alert\/sniff\"\n\t\"cred-alert\/sniff\/matchers\"\n)\n\ntype ScanCommand struct {\n\tFile string `short:\"f\" long:\"file\" description:\"the file or directory to scan\" value-name:\"FILE\"`\n\tDiff bool `long:\"diff\" description:\"content to be scanned is a git diff\"`\n\tShowCredentials bool `long:\"show-suspected-credentials\" description:\"allow credentials to be shown in output\"`\n\tRegexp string `long:\"regexp\" description:\"override default regexp matcher\" value-name:\"REGEXP\"`\n\tRegexpFile string `long:\"regexp-file\" description:\"path to regexp file\" value-name:\"PATH\"`\n}\n\nfunc (command *ScanCommand) Execute(args []string) error {\n\twarnIfOldExecutable()\n\n\tlogger := lager.NewLogger(\"scan\")\n\tlogger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.INFO))\n\n\tif command.Regexp != \"\" && command.RegexpFile != \"\" {\n\t\tfmt.Fprintln(os.Stderr, yellow(\"[WARN]\"), \"Two options specified for Regexp, only using: --regexp\", command.Regexp)\n\t}\n\n\tvar sniffer sniff.Sniffer\n\tswitch {\n\tcase command.Regexp != \"\":\n\t\tsniffer = sniff.NewSniffer(matchers.Format(command.Regexp), nil)\n\tcase command.RegexpFile != \"\":\n\t\tfile, err := os.Open(command.RegexpFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer file.Close()\n\n\t\tmatcher := matchers.UpcasedMultiMatcherFromReader(file)\n\t\tsniffer = sniff.NewSniffer(matcher, nil)\n\tdefault:\n\t\tsniffer = sniff.NewDefaultSniffer()\n\t}\n\n\tsignalsCh := make(chan os.Signal)\n\tsignal.Notify(signalsCh, os.Interrupt)\n\n\tvar exitFuncs []func()\n\tgo func() {\n\t\t<-signalsCh\n\t\tlog.SetFlags(0)\n\t\tlog.Println(\"\\ncleaning up...\")\n\t\tfor _, f := range exitFuncs {\n\t\t\tf()\n\t\t}\n\t\tos.Exit(1)\n\t}()\n\n\tvar credsFound int\n\thandler := func(logger lager.Logger, violation scanners.Violation) error {\n\t\tline := violation.Line\n\t\tcredsFound++\n\t\toutput := fmt.Sprintf(\"%s %s:%d\", red(\"[CRED]\"), line.Path, line.LineNumber)\n\t\tif command.ShowCredentials {\n\t\t\toutput = output + fmt.Sprintf(\" [%s]\", violation.Credential())\n\t\t}\n\t\tfmt.Println(output)\n\n\t\treturn nil\n\t}\n\n\tquietLogger := kolsch.NewLogger()\n\n\tswitch {\n\tcase command.File != \"\":\n\t\tfi, err := os.Stat(command.File)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tviolationsDir, err := ioutil.TempDir(\"\", \"cred-alert-cli-violations\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tinflateDir, err := ioutil.TempDir(\"\", \"cred-alert-cli\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tarchiveHandler := sniff.NewArchiveViolationHandlerFunc(inflateDir, violationsDir, handler)\n\t\tscanner := dirscanner.New(sniffer, handler, archiveHandler, inflateDir)\n\t\tif fi.IsDir() {\n\t\t\terr = scanner.Scan(quietLogger, command.File)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tfile, err := os.Open(command.File)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbr := bufio.NewReader(file)\n\t\t\tif mime, isArchive := mimetype.IsArchive(logger, br); isArchive {\n\n\t\t\t\tinflate := inflator.New()\n\t\t\t\texitFuncs = append(exitFuncs, func() {\n\t\t\t\t\tinflate.Close()\n\t\t\t\t\tos.RemoveAll(inflateDir)\n\t\t\t\t})\n\n\t\t\t\tinflateArchive(quietLogger, inflate, inflateDir, mime, command.File)\n\n\t\t\t\terr = scanner.Scan(quietLogger, inflateDir)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tsniffer.Sniff(logger, filescanner.New(br, command.File), handler)\n\t\t\t}\n\t\t}\n\tcase command.Diff:\n\t\tsniffer.Sniff(logger, diffscanner.NewDiffScanner(os.Stdin), handler)\n\tdefault:\n\t\tsniffer.Sniff(logger, filescanner.New(os.Stdin, \"STDIN\"), handler)\n\t}\n\n\tif credsFound > 0 {\n\t\tfmt.Println()\n\t\tfmt.Println(\"Yikes! Looks like we found some credentials.\")\n\t\tfmt.Println()\n\t\tfmt.Println(\"There are a few cases for what this may be:\")\n\t\tfmt.Println()\n\t\tfmt.Println(\"1. An actual credential in a repository which shouldn't be\")\n\t\tfmt.Println(\" committed! Remove it and try committing again.\")\n\t\tfmt.Println()\n\t\tfmt.Println(\"2. An example credential in tests or documentation. You can\")\n\t\tfmt.Println(\" use the words 'fake' and\/or 'example' in your credential so it is\")\n\t\tfmt.Println(\" ignored.\")\n\t\tfmt.Println()\n\t\tfmt.Println(\"3. An actual credential in a credential repository. If you are calling this\")\n\t\tfmt.Println(\" via Git hook and if you want the false positive to go away, you can pass `-n`\")\n\t\tfmt.Println(\" to skip the hook for now.\")\n\t\tfmt.Println()\n\t\tfmt.Println(\"4. A false positive which isn't a credential at all! Please let us know about \")\n\t\tfmt.Println(\" the this case in our Slack channel (#pcf-sec-enablement).\")\n\n\t\texitFuncs = append(exitFuncs, func() {\n\t\t\tos.Exit(3)\n\t\t})\n\t}\n\n\tfor _, f := range exitFuncs {\n\t\tf()\n\t}\n\n\treturn nil\n}\n\nfunc inflateArchive(\n\tlogger lager.Logger,\n\tinflate inflator.Inflator,\n\tinflateDir string,\n\tmime string,\n\tfile string,\n) error {\n\tinflateStart := time.Now()\n\tfmt.Print(\"Inflating archive... \")\n\terr := inflate.Inflate(logger, mime, file, inflateDir)\n\tif err != nil {\n\t\tfmt.Printf(\"%s\\n\", red(\"FAILED\"))\n\t\treturn err\n\t}\n\tfmt.Printf(\"%s\\n\", green(\"DONE\"))\n\n\tfmt.Println()\n\tfmt.Println(\"Time taken (inflating):\", time.Since(inflateStart))\n\tfmt.Println(\"Any archive inflation errors can be found in: \", inflate.LogPath())\n\tfmt.Println()\n\n\treturn nil\n}\n\nfunc warnIfOldExecutable() {\n\tconst twoWeeks = 14 * 24 * time.Hour\n\n\texePath, err := osext.Executable()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tinfo, err := os.Stat(exePath)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tmtime := info.ModTime()\n\n\tif time.Since(mtime) > twoWeeks {\n\t\tfmt.Fprintln(os.Stderr, yellow(\"[WARN]\"), \"Executable is old! Please consider running `cred-alert-cli update`.\")\n\t}\n}\n<commit_msg>reduce complexity of the scan command<commit_after>package commands\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\n\t\"github.com\/kardianos\/osext\"\n\n\t\"code.cloudfoundry.org\/lager\"\n\n\t\"cred-alert\/inflator\"\n\t\"cred-alert\/kolsch\"\n\t\"cred-alert\/mimetype\"\n\t\"cred-alert\/scanners\"\n\t\"cred-alert\/scanners\/diffscanner\"\n\t\"cred-alert\/scanners\/dirscanner\"\n\t\"cred-alert\/scanners\/filescanner\"\n\t\"cred-alert\/sniff\"\n\t\"cred-alert\/sniff\/matchers\"\n)\n\ntype ScanCommand struct {\n\tFile string `short:\"f\" long:\"file\" description:\"the file or directory to scan\" value-name:\"FILE\"`\n\tDiff bool `long:\"diff\" description:\"content to be scanned is a git diff\"`\n\tShowCredentials bool `long:\"show-suspected-credentials\" description:\"allow credentials to be shown in output\"`\n\tRegexp string `long:\"regexp\" description:\"override default regexp matcher\" value-name:\"REGEXP\"`\n\tRegexpFile string `long:\"regexp-file\" description:\"path to regexp file\" value-name:\"PATH\"`\n}\n\nfunc (command *ScanCommand) Execute(args []string) error {\n\twarnIfOldExecutable()\n\n\tlogger := lager.NewLogger(\"scan\")\n\tlogger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.INFO))\n\n\tif command.Regexp != \"\" && command.RegexpFile != \"\" {\n\t\tfmt.Fprintln(os.Stderr, yellow(\"[WARN]\"), \"Two options specified for Regexp, only using: --regexp\", command.Regexp)\n\t}\n\n\tsniffer, err := command.buildSniffer()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclean := newCleanup()\n\thandler := newCredentialCounter(command.ShowCredentials)\n\n\tswitch {\n\tcase command.File != \"\":\n\t\tif err := command.scanFile(logger, sniffer, handler.HandleViolation, clean); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase command.Diff:\n\t\tsniffer.Sniff(logger, diffscanner.NewDiffScanner(os.Stdin), handler.HandleViolation)\n\tdefault:\n\t\tsniffer.Sniff(logger, filescanner.New(os.Stdin, \"STDIN\"), handler.HandleViolation)\n\t}\n\n\tif handler.count > 0 {\n\t\tshowCredentialWarning()\n\t\tclean.exit(3)\n\t}\n\n\treturn nil\n}\n\nfunc (c *ScanCommand) scanFile(logger lager.Logger, sniffer sniff.Sniffer, handleFunc sniff.ViolationHandlerFunc, cleaner *cleanup) error {\n\tfi, err := os.Stat(c.File)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tviolationsDir, err := ioutil.TempDir(\"\", \"cred-alert-cli-violations\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinflateDir, err := ioutil.TempDir(\"\", \"cred-alert-cli\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tquietLogger := kolsch.NewLogger()\n\tarchiveHandler := sniff.NewArchiveViolationHandlerFunc(inflateDir, violationsDir, handleFunc)\n\tscanner := dirscanner.New(sniffer, handleFunc, archiveHandler, inflateDir)\n\n\tif fi.IsDir() {\n\t\treturn scanner.Scan(quietLogger, c.File)\n\t}\n\n\tfile, err := os.Open(c.File)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbr := bufio.NewReader(file)\n\tif mime, isArchive := mimetype.IsArchive(logger, br); isArchive {\n\t\tinflate := inflator.New()\n\t\tcleaner.register(func() {\n\t\t\tinflate.Close()\n\t\t\tos.RemoveAll(inflateDir)\n\t\t})\n\n\t\tinflateArchive(quietLogger, inflate, inflateDir, mime, c.File)\n\n\t\terr = scanner.Scan(quietLogger, inflateDir)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tsniffer.Sniff(logger, filescanner.New(br, c.File), handleFunc)\n\t}\n\n\treturn nil\n}\n\nfunc (c *ScanCommand) buildSniffer() (sniff.Sniffer, error) {\n\tvar sniffer sniff.Sniffer\n\n\tswitch {\n\tcase c.Regexp != \"\":\n\t\tsniffer = sniff.NewSniffer(matchers.Format(c.Regexp), nil)\n\tcase c.RegexpFile != \"\":\n\t\tfile, err := os.Open(c.RegexpFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tmatcher := matchers.UpcasedMultiMatcherFromReader(file)\n\n\t\tif err := file.Close(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsniffer = sniff.NewSniffer(matcher, nil)\n\tdefault:\n\t\tsniffer = sniff.NewDefaultSniffer()\n\t}\n\n\treturn sniffer, nil\n}\n\ntype cleanup struct {\n\twork []func()\n}\n\nfunc newCleanup() *cleanup {\n\tclean := &cleanup{}\n\n\tsignalsCh := make(chan os.Signal)\n\tsignal.Notify(signalsCh, os.Interrupt)\n\n\tgo func() {\n\t\t<-signalsCh\n\t\tlog.SetFlags(0)\n\t\tlog.Println(\"\\ncleaning up...\")\n\t\tclean.exit(1)\n\t}()\n\n\treturn clean\n}\n\nfunc (c *cleanup) register(fn func()) {\n\tc.work = append(c.work, fn)\n}\n\nfunc (c cleanup) exit(status int) {\n\tfor _, w := range c.work {\n\t\tw()\n\t}\n\n\tos.Exit(status)\n}\n\nfunc showCredentialWarning() {\n\tfmt.Println()\n\tfmt.Println(\"Yikes! Looks like we found some credentials.\")\n\tfmt.Println()\n\tfmt.Println(\"There are a few cases for what this may be:\")\n\tfmt.Println()\n\tfmt.Println(\"1. An actual credential in a repository which shouldn't be\")\n\tfmt.Println(\" committed! Remove it and try committing again.\")\n\tfmt.Println()\n\tfmt.Println(\"2. An example credential in tests or documentation. You can\")\n\tfmt.Println(\" use the words 'fake' and\/or 'example' in your credential so it is\")\n\tfmt.Println(\" ignored.\")\n\tfmt.Println()\n\tfmt.Println(\"3. An actual credential in a credential repository. If you are calling this\")\n\tfmt.Println(\" via Git hook and if you want the false positive to go away, you can pass `-n`\")\n\tfmt.Println(\" to skip the hook for now.\")\n\tfmt.Println()\n\tfmt.Println(\"4. A false positive which isn't a credential at all! Please let us know about \")\n\tfmt.Println(\" the this case in our Slack channel (#pcf-sec-enablement).\")\n}\n\nfunc inflateArchive(\n\tlogger lager.Logger,\n\tinflate inflator.Inflator,\n\tinflateDir string,\n\tmime string,\n\tfile string,\n) error {\n\tinflateStart := time.Now()\n\tfmt.Print(\"Inflating archive... \")\n\terr := inflate.Inflate(logger, mime, file, inflateDir)\n\tif err != nil {\n\t\tfmt.Printf(\"%s\\n\", red(\"FAILED\"))\n\t\treturn err\n\t}\n\tfmt.Printf(\"%s\\n\", green(\"DONE\"))\n\n\tfmt.Println()\n\tfmt.Println(\"Time taken (inflating):\", time.Since(inflateStart))\n\tfmt.Println(\"Any archive inflation errors can be found in: \", inflate.LogPath())\n\tfmt.Println()\n\n\treturn nil\n}\n\nfunc warnIfOldExecutable() {\n\tconst twoWeeks = 14 * 24 * time.Hour\n\n\texePath, err := osext.Executable()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tinfo, err := os.Stat(exePath)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tmtime := info.ModTime()\n\n\tif time.Since(mtime) > twoWeeks {\n\t\tfmt.Fprintln(os.Stderr, yellow(\"[WARN]\"), \"Executable is old! Please consider running `cred-alert-cli update`.\")\n\t}\n}\n\nfunc newCredentialCounter(showCreds bool) *credentialCounter {\n\treturn &credentialCounter{\n\t\tshowCreds: showCreds,\n\t}\n}\n\ntype credentialCounter struct {\n\tcount int\n\tshowCreds bool\n}\n\nfunc (c *credentialCounter) HandleViolation(logger lager.Logger, violation scanners.Violation) error {\n\tline := violation.Line\n\tc.count++\n\toutput := fmt.Sprintf(\"%s %s:%d\", red(\"[CRED]\"), line.Path, line.LineNumber)\n\tif c.showCreds {\n\t\toutput = output + fmt.Sprintf(\" [%s]\", violation.Credential())\n\t}\n\tfmt.Println(output)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage mime\n\nimport (\n\t\"strings\"\n)\n\n\/\/ isTSpecial returns true if rune is in 'tspecials' as defined by RFC\n\/\/ 1531 and RFC 2045.\nfunc isTSpecial(rune int) bool {\n\treturn strings.IndexRune(`()<>@,;:\\\"\/[]?=`, rune) != -1\n}\n\n\/\/ IsTokenChar returns true if rune is in 'token' as defined by RFC\n\/\/ 1531 and RFC 2045.\nfunc IsTokenChar(rune int) bool {\n\t\/\/ token := 1*<any (US-ASCII) CHAR except SPACE, CTLs,\n\t\/\/ or tspecials>\n\treturn rune > 0x20 && rune < 0x7f && !isTSpecial(rune)\n}\n\n\/\/ IsQText returns true if rune is in 'qtext' as defined by RFC 822.\nfunc IsQText(rune int) bool {\n\t\/\/ CHAR = <any ASCII character> ; ( 0-177, 0.-127.)\n\t\/\/ qtext = <any CHAR excepting <\">, ; => may be folded\n\t\/\/ \"\\\" & CR, and including\n\t\/\/ linear-white-space>\n\tswitch rune {\n\tcase int('\"'), int('\\\\'), int('\\r'):\n\t\treturn false\n\t}\n\treturn rune < 0x80\n}\n<commit_msg>mime: delete unnecessary constant conversions.<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage mime\n\nimport (\n\t\"strings\"\n)\n\n\/\/ isTSpecial returns true if rune is in 'tspecials' as defined by RFC\n\/\/ 1531 and RFC 2045.\nfunc isTSpecial(rune int) bool {\n\treturn strings.IndexRune(`()<>@,;:\\\"\/[]?=`, rune) != -1\n}\n\n\/\/ IsTokenChar returns true if rune is in 'token' as defined by RFC\n\/\/ 1531 and RFC 2045.\nfunc IsTokenChar(rune int) bool {\n\t\/\/ token := 1*<any (US-ASCII) CHAR except SPACE, CTLs,\n\t\/\/ or tspecials>\n\treturn rune > 0x20 && rune < 0x7f && !isTSpecial(rune)\n}\n\n\/\/ IsQText returns true if rune is in 'qtext' as defined by RFC 822.\nfunc IsQText(rune int) bool {\n\t\/\/ CHAR = <any ASCII character> ; ( 0-177, 0.-127.)\n\t\/\/ qtext = <any CHAR excepting <\">, ; => may be folded\n\t\/\/ \"\\\" & CR, and including\n\t\/\/ linear-white-space>\n\tswitch rune {\n\tcase '\"', '\\\\', '\\r':\n\t\treturn false\n\t}\n\treturn rune < 0x80\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Automatically downloads and configures Steam grid images for all games in a\n\/\/ given Steam installation.\npackage main\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n)\n\n\/\/ Prints an error and quits.\nfunc errorAndExit(err error) {\n\tfmt.Println(err.Error())\n\tbufio.NewReader(os.Stdin).ReadBytes('\\n')\n\tos.Exit(0)\n}\n\nfunc main() {\n\thttp.DefaultTransport.(*http.Transport).ResponseHeaderTimeout = time.Second * 10\n\tstartApplication()\n}\n\nfunc startApplication() {\n\tfmt.Println(\"Loading overlays...\")\n\toverlays, err := LoadOverlays(filepath.Join(filepath.Dir(os.Args[0]), \"overlays by category\"))\n\tif err != nil {\n\t\terrorAndExit(err)\n\t}\n\tif len(overlays) == 0 {\n\t\tfmt.Println(\"No category overlays found. You can put overlay images in the folder 'overlays by category', where the filename is the game category.\\n\\nYou can find many user-created overlays at https:\/\/www.reddit.com\/r\/steamgrid\/wiki\/overlays .\\n\\nContinuing without overlays...\\n\")\n\t} else {\n\t\tfmt.Printf(\"Loaded %v overlays. \\n\\nYou can find many user-created overlays at https:\/\/www.reddit.com\/r\/steamgrid\/wiki\/overlays .\\n\\n\", len(overlays))\n\t}\n\n\tfmt.Println(\"Looking for Steam directory...\")\n\tinstallationDir, err := GetSteamInstallation()\n\tif err != nil {\n\t\terrorAndExit(err)\n\t}\n\n\tfmt.Println(\"Loading users...\")\n\tusers, err := GetUsers(installationDir)\n\tif err != nil {\n\t\terrorAndExit(err)\n\t}\n\tif len(users) == 0 {\n\t\terrorAndExit(errors.New(\"No users found at Steam\/userdata. Have you used Steam before in this computer?\"))\n\t}\n\n\tnOverlaysApplied := 0\n\tnDownloaded := 0\n\tvar notFounds []*Game\n\tvar searchedGames []*Game\n\tvar failedGames []*Game\n\tvar errorMessages []string\n\n\tfor _, user := range users {\n\t\tfmt.Println(\"Loading games for \" + user.Name)\n\t\tgridDir := filepath.Join(user.Dir, \"config\", \"grid\")\n\n\t\terr = os.MkdirAll(filepath.Join(gridDir, \"originals\"), 0777)\n\t\tif err != nil {\n\t\t\terrorAndExit(err)\n\t\t}\n\n\t\tgames := GetGames(user)\n\n\t\tfmt.Println(\"Loading existing images and backups...\")\n\n\t\ti := 0\n\t\tfor _, game := range games {\n\t\t\ti++\n\n\t\t\toverridePath := filepath.Join(filepath.Dir(os.Args[0]), \"games\")\n\t\t\tLoadExisting(overridePath, gridDir, game)\n\t\t\t\/\/ This cleans up unused backups and images for the same game but with different extensions.\n\t\t\terr = RemoveExisting(gridDir, game.ID)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\n\t\t\tvar name string\n\t\t\tif game.Name == \"\" {\n\t\t\t\tgame.Name = GetGameName(game.ID)\n\t\t\t}\n\n\t\t\tif game.Name != \"\" {\n\t\t\t\tname = game.Name\n\t\t\t} else {\n\t\t\t\tname = \"unknown game with id \" + game.ID\n\t\t\t}\n\t\t\tfmt.Printf(\"Processing %v (%v\/%v)\", name, i, len(games))\n\n\t\t\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\t\t\/\/ Download if missing.\n\t\t\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\t\tif game.ImageSource == \"\" {\n\t\t\t\tfromSearch, err := DownloadImage(gridDir, game)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\n\t\t\t\tif game.ImageSource == \"\" {\n\t\t\t\t\tnotFounds = append(notFounds, game)\n\t\t\t\t\tfmt.Printf(\" not found\\n\")\n\t\t\t\t\t\/\/ Game has no image, skip it.\n\t\t\t\t\tcontinue\n\t\t\t\t} else if err == nil {\n\t\t\t\t\tnDownloaded++\n\t\t\t\t}\n\n\t\t\t\tif fromSearch {\n\t\t\t\t\tsearchedGames = append(searchedGames, game)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\" found from %v\\n\", game.ImageSource)\n\n\t\t\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\t\t\/\/ Apply overlay.\n\t\t\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\t\terr := ApplyOverlay(game, overlays)\n\t\t\tif err != nil {\n\t\t\t\tprint(err.Error(), \"\\n\")\n\t\t\t\tfailedGames = append(failedGames, game)\n\t\t\t\terrorMessages = append(errorMessages, err.Error())\n\t\t\t}\n\t\t\tif game.OverlayImageBytes != nil {\n\t\t\t\tnOverlaysApplied++\n\t\t\t} else {\n\t\t\t\tgame.OverlayImageBytes = game.CleanImageBytes\n\t\t\t}\n\n\t\t\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\t\t\/\/ Save result.\n\t\t\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\t\terr = BackupGame(gridDir, game)\n\t\t\tif err != nil {\n\t\t\t\terrorAndExit(err)\n\t\t\t}\n\t\t\timagePath := filepath.Join(gridDir, game.ID+game.ImageExt)\n\t\t\terr = ioutil.WriteFile(imagePath, game.OverlayImageBytes, 0666)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Failed to write image for %v because: %v\\n\", game.Name, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\n\tfmt.Printf(\"\\n\\n%v images downloaded and %v overlays applied.\\n\\n\", nDownloaded, nOverlaysApplied)\n\tif len(searchedGames) >= 1 {\n\t\tfmt.Printf(\"%v images were found with a Google search and may not be accurate:\\n\", len(searchedGames))\n\t\tfor _, game := range searchedGames {\n\t\t\tfmt.Printf(\"* %v (steam id %v)\\n\", game.Name, game.ID)\n\t\t}\n\n\t\tfmt.Printf(\"\\n\\n\")\n\t}\n\n\tif len(notFounds) >= 1 {\n\t\tfmt.Printf(\"%v images could not be found anywhere:\\n\", len(notFounds))\n\t\tfor _, game := range notFounds {\n\t\t\tfmt.Printf(\"- %v (id %v)\\n\", game.Name, game.ID)\n\t\t}\n\n\t\tfmt.Printf(\"\\n\\n\")\n\t}\n\n\tif len(failedGames) >= 1 {\n\t\tfmt.Printf(\"%v images were found but had errors and could not be overlaid:\\n\", len(failedGames))\n\t\tfor i, game := range failedGames {\n\t\t\tfmt.Printf(\"- %v (id %v) (%v)\\n\", game.Name, game.ID, errorMessages[i])\n\t\t}\n\n\t\tfmt.Printf(\"\\n\\n\")\n\t}\n\n\tfmt.Println(\"Open Steam in grid view to see the results!\\n\\nPress enter to close.\")\n\n\tbufio.NewReader(os.Stdin).ReadBytes('\\n')\n}\n<commit_msg>Added Help details.<commit_after>\/\/ Automatically downloads and configures Steam grid images for all games in a\n\/\/ given Steam installation.\npackage main\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n)\n\n\/\/ Prints an error and quits.\nfunc errorAndExit(err error) {\n\tfmt.Println(err.Error())\n\tbufio.NewReader(os.Stdin).ReadBytes('\\n')\n\tos.Exit(0)\n}\n\nfunc main() {\n\thttp.DefaultTransport.(*http.Transport).ResponseHeaderTimeout = time.Second * 10\n\tstartApplication()\n}\n\nfunc startApplication() {\n\tfmt.Println(\"Loading overlays...\")\n\toverlays, err := LoadOverlays(filepath.Join(filepath.Dir(os.Args[0]), \"overlays by category\"))\n\tif err != nil {\n\t\terrorAndExit(err)\n\t}\n\tif len(overlays) == 0 {\n\t\tfmt.Println(\"No category overlays found. You can put overlay images in the folder 'overlays by category', where the filename is the game category.\\n\\nYou can find many user-created overlays at https:\/\/www.reddit.com\/r\/steamgrid\/wiki\/overlays .\\n\\nContinuing without overlays...\\n\")\n\t} else {\n\t\tfmt.Printf(\"Loaded %v overlays. \\n\\nYou can find many user-created overlays at https:\/\/www.reddit.com\/r\/steamgrid\/wiki\/overlays .\\n\\n\", len(overlays))\n\t}\n\n\tfmt.Println(\"Looking for Steam directory...If SteamGrid doesn´t find the directory automatically, launch it with an argument linking to the Steam directory.\")\n\tinstallationDir, err := GetSteamInstallation()\n\tif err != nil {\n\t\terrorAndExit(err)\n\t}\n\n\tfmt.Println(\"Loading users...\")\n\tusers, err := GetUsers(installationDir)\n\tif err != nil {\n\t\terrorAndExit(err)\n\t}\n\tif len(users) == 0 {\n\t\terrorAndExit(errors.New(\"No users found at Steam\/userdata. Have you used Steam before in this computer?\"))\n\t}\n\n\tnOverlaysApplied := 0\n\tnDownloaded := 0\n\tvar notFounds []*Game\n\tvar searchedGames []*Game\n\tvar failedGames []*Game\n\tvar errorMessages []string\n\n\tfor _, user := range users {\n\t\tfmt.Println(\"Loading games for \" + user.Name)\n\t\tgridDir := filepath.Join(user.Dir, \"config\", \"grid\")\n\n\t\terr = os.MkdirAll(filepath.Join(gridDir, \"originals\"), 0777)\n\t\tif err != nil {\n\t\t\terrorAndExit(err)\n\t\t}\n\n\t\tgames := GetGames(user)\n\n\t\tfmt.Println(\"Loading existing images and backups...\")\n\n\t\ti := 0\n\t\tfor _, game := range games {\n\t\t\ti++\n\n\t\t\toverridePath := filepath.Join(filepath.Dir(os.Args[0]), \"games\")\n\t\t\tLoadExisting(overridePath, gridDir, game)\n\t\t\t\/\/ This cleans up unused backups and images for the same game but with different extensions.\n\t\t\terr = RemoveExisting(gridDir, game.ID)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\n\t\t\tvar name string\n\t\t\tif game.Name == \"\" {\n\t\t\t\tgame.Name = GetGameName(game.ID)\n\t\t\t}\n\n\t\t\tif game.Name != \"\" {\n\t\t\t\tname = game.Name\n\t\t\t} else {\n\t\t\t\tname = \"unknown game with id \" + game.ID\n\t\t\t}\n\t\t\tfmt.Printf(\"Processing %v (%v\/%v)\", name, i, len(games))\n\n\t\t\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\t\t\/\/ Download if missing.\n\t\t\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\t\tif game.ImageSource == \"\" {\n\t\t\t\tfromSearch, err := DownloadImage(gridDir, game)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\n\t\t\t\tif game.ImageSource == \"\" {\n\t\t\t\t\tnotFounds = append(notFounds, game)\n\t\t\t\t\tfmt.Printf(\" not found\\n\")\n\t\t\t\t\t\/\/ Game has no image, skip it.\n\t\t\t\t\tcontinue\n\t\t\t\t} else if err == nil {\n\t\t\t\t\tnDownloaded++\n\t\t\t\t}\n\n\t\t\t\tif fromSearch {\n\t\t\t\t\tsearchedGames = append(searchedGames, game)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\" found from %v\\n\", game.ImageSource)\n\n\t\t\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\t\t\/\/ Apply overlay.\n\t\t\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\t\terr := ApplyOverlay(game, overlays)\n\t\t\tif err != nil {\n\t\t\t\tprint(err.Error(), \"\\n\")\n\t\t\t\tfailedGames = append(failedGames, game)\n\t\t\t\terrorMessages = append(errorMessages, err.Error())\n\t\t\t}\n\t\t\tif game.OverlayImageBytes != nil {\n\t\t\t\tnOverlaysApplied++\n\t\t\t} else {\n\t\t\t\tgame.OverlayImageBytes = game.CleanImageBytes\n\t\t\t}\n\n\t\t\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\t\t\/\/ Save result.\n\t\t\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\t\terr = BackupGame(gridDir, game)\n\t\t\tif err != nil {\n\t\t\t\terrorAndExit(err)\n\t\t\t}\n\t\t\timagePath := filepath.Join(gridDir, game.ID+game.ImageExt)\n\t\t\terr = ioutil.WriteFile(imagePath, game.OverlayImageBytes, 0666)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Failed to write image for %v because: %v\\n\", game.Name, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\n\tfmt.Printf(\"\\n\\n%v images downloaded and %v overlays applied.\\n\\n\", nDownloaded, nOverlaysApplied)\n\tif len(searchedGames) >= 1 {\n\t\tfmt.Printf(\"%v images were found with a Google search and may not be accurate:\\n\", len(searchedGames))\n\t\tfor _, game := range searchedGames {\n\t\t\tfmt.Printf(\"* %v (steam id %v)\\n\", game.Name, game.ID)\n\t\t}\n\n\t\tfmt.Printf(\"\\n\\n\")\n\t}\n\n\tif len(notFounds) >= 1 {\n\t\tfmt.Printf(\"%v images could not be found anywhere:\\n\", len(notFounds))\n\t\tfor _, game := range notFounds {\n\t\t\tfmt.Printf(\"- %v (id %v)\\n\", game.Name, game.ID)\n\t\t}\n\n\t\tfmt.Printf(\"\\n\\n\")\n\t}\n\n\tif len(failedGames) >= 1 {\n\t\tfmt.Printf(\"%v images were found but had errors and could not be overlaid:\\n\", len(failedGames))\n\t\tfor i, game := range failedGames {\n\t\t\tfmt.Printf(\"- %v (id %v) (%v)\\n\", game.Name, game.ID, errorMessages[i])\n\t\t}\n\n\t\tfmt.Printf(\"\\n\\n\")\n\t}\n\n\tfmt.Println(\"Open Steam in grid view to see the results!\\n\\nPress enter to close.\")\n\n\tbufio.NewReader(os.Stdin).ReadBytes('\\n')\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Artur Grabowski. All rights reserved.\n\/\/ Use of this source code is governed by a ISC-style\n\/\/ license that can be found in the LICENSE file.\npackage stopwatch\n\nimport (\n\t\"time\"\n)\n\ntype Stopwatch struct {\n\tacc time.Duration\n\tt time.Time\n}\n\nfunc New() Stopwatch {\n\treturn Stopwatch{}\n}\n\nfunc (s *Stopwatch) Start() {\n\ts.t = time.Now()\n}\n\nfunc (s *Stopwatch) Stop() {\n\ts.acc = time.Since(s.t)\n}\n\nfunc (s *Stopwatch) Handover(s2 *Stopwatch) {\n\ts2.t = time.Now()\n\ts.acc = s.t.Sub(s2.t)\n}\n\nfunc (s *Stopwatch) Snapshot() Stopwatch {\n\tret := *s\n\tret.Stop()\n\treturn ret\n}\n\nfunc (s *Stopwatch) Reset() {\n\t*s = Stopwatch{}\n}\n\nfunc (s Stopwatch) Nanoseconds() int64 {\n\treturn s.acc.Nanoseconds()\n}\n\nfunc (s Stopwatch) Seconds() float64 {\n\treturn s.acc.Seconds()\n}\n\nfunc (s Stopwatch) String() string {\n\treturn s.acc.String()\n}\n<commit_msg>Document.<commit_after>\/\/ Copyright 2013 Artur Grabowski. All rights reserved.\n\/\/ Use of this source code is governed by a ISC-style\n\/\/ license that can be found in the LICENSE file.\npackage stopwatch\n\nimport (\n\t\"time\"\n)\n\n\/\/ A Stopwatch is used for keeping track of elapsed wall-clock time.\n\/\/ Stopwatches don't keep track if they are running or not,\n\/\/ it's left to the user to deal with this. Unmatched Start\/Stop\n\/\/ calls will lead to the stopwatch having an undefined value.\n\/\/\n\/\/ Stopwatches are currently limited by the data type of time.Duration\n\/\/ and will not measure longer durations than 290 years.\ntype Stopwatch struct {\n\tacc time.Duration\n\tt time.Time\n}\n\n\/\/ Returns a new Stopwatch.\nfunc New() Stopwatch {\n\treturn Stopwatch{}\n}\n\n\/\/ Starts counting elapsed wall-clock time \nfunc (s *Stopwatch) Start() {\n\ts.t = time.Now()\n}\n\n\/\/ Stops counting elapsed wall-clock time\nfunc (s *Stopwatch) Stop() {\n\ts.acc = time.Since(s.t)\n}\n\n\/\/ Equivalent to s.Stop(); s2.Start() with no time elapsed between,\n\/\/ just using one time stamp.\nfunc (s *Stopwatch) Handover(s2 *Stopwatch) {\n\ts2.t = time.Now()\n\ts.acc = s.t.Sub(s2.t)\n}\n\n\/\/ Takes a snapshot of a currently running stopwatch. If the stopwatch\n\/\/ wasn't running the result is undefined.\nfunc (s *Stopwatch) Snapshot() Stopwatch {\n\tret := *s\n\tret.Stop()\n\treturn ret\n}\n\n\/\/ Reset the accumulated time in the stopwatch and \nfunc (s *Stopwatch) Reset() {\n\t*s = Stopwatch{}\n}\n\n\/\/ Returns the accumulated sanoseconds of the stopwatch. If the stopwatch\n\/\/ is currently running that will not be accounted for.\nfunc (s Stopwatch) Nanoseconds() int64 {\n\treturn s.acc.Nanoseconds()\n}\n\n\/\/ Returns the accumulated seconds of the stopwatch. If the stopwatch is\n\/\/ currently running that will not be accounted for.\nfunc (s Stopwatch) Seconds() float64 {\n\treturn s.acc.Seconds()\n}\n\nfunc (s Stopwatch) String() string {\n\treturn s.acc.String()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"hash\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/cavaliercoder\/grab\"\n\tui \"github.com\/gizak\/termui\"\n\t\"github.com\/mholt\/archiver\"\n\t\"github.com\/olekukonko\/tablewriter\"\n)\n\nconst (\n\tviewGoVersions = \"goversions\"\n\tviewGoFiles = \"gofiles\"\n)\n\nvar mutex = &sync.Mutex{}\n\nvar viewInFocus = viewGoFiles\nvar selectedGoVersion = 0\nvar selectedGoFile = 0\n\nvar goVersions []string\nvar goFiles []string\nvar doc *goquery.Document\nvar downloadFileLinks = make(map[string]string)\nvar downloadFileSHA = make(map[string]string)\n\nvar uiVersionsList = ui.NewList()\nvar uiFilesList = ui.NewList()\n\n\/\/var uiInfo = ui.NewPar(\"Info\")\nvar uiInfo = ui.NewList()\nvar uiDownloadProgress = ui.NewGauge()\nvar uiDownloadSpeed = ui.NewSparklines()\n\nfunc getGolangData(url string) {\n\tvar err error\n\tdoc, err = goquery.NewDocument(url)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tr, err := regexp.Compile(\"([a-zA-Z0-9.]+)\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgoVersions = nil\n\tdoc.Find(\"div .expanded > h2\").Each(func(i int, s *goquery.Selection) {\n\t\tgoVersions = append(goVersions, r.FindString(s.Text()))\n\t})\n}\n\nfunc updateGoVersions() {\n\tvar strs []string\n\n\tcheckLimits()\n\n\tfor index, s := range goVersions {\n\t\tif index == selectedGoVersion {\n\t\t\tstrs = append(strs, fmt.Sprintf(\"[%s](fg-white,bg-green)\", s))\n\t\t} else {\n\t\t\tstrs = append(strs, fmt.Sprintf(\"%s\", s))\n\t\t}\n\t}\n\tuiVersionsList.Items = strs\n}\n\nfunc addLoggingText(text string) {\n\tuiInfo.Items = append(uiInfo.Items, text)\n\theight := uiInfo.GetHeight()\n\tstartPosition := len(uiInfo.Items) - height + 2\n\tif startPosition < 0 {\n\t\tstartPosition = 0\n\t}\n\tuiInfo.Items = uiInfo.Items[startPosition:]\n\n}\n\nfunc downloadFile(url string) error {\n\n\taddLoggingText(fmt.Sprintf(\"Downloading %s...\\n\", url))\n\trespch, err := grab.GetAsync(\".\", url)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error downloading %s: %v\\n\", url, err)\n\t\tos.Exit(1)\n\t}\n\n\taddLoggingText(fmt.Sprintf(\"Initializing download...\\n\"))\n\n\tuiDownloadProgress.Percent = 0\n\n\tvar resp *grab.Response\n\n\tgo func() {\n\t\tt := time.NewTicker(100 * time.Millisecond)\n\t\tvar sinceLastTime uint64\n\t\tvar spdata []int\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase r := <-respch:\n\t\t\t\tif r != nil {\n\t\t\t\t\tresp = r\n\t\t\t\t\taddLoggingText(fmt.Sprintf(\"Downloading...\\n\"))\n\t\t\t\t}\n\t\t\tcase <-t.C:\n\t\t\t\tif resp == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif resp.Error != nil {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"Error downloading %s: %v\\n\", url, resp.Error)\n\t\t\t\t\t\/\/return resp.Error\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif resp.IsComplete() {\n\t\t\t\t\taddLoggingText(fmt.Sprintf(\"Successfully downloaded to .\/%s\\n\", resp.Filename))\n\n\t\t\t\t\tuiDownloadProgress.Percent = 100\n\n\t\t\t\t\taddLoggingText(\"Verifying...\\n\")\n\t\t\t\t\tsha := downloadFileSHA[getGoFile()]\n\n\t\t\t\t\tvar hasher hash.Hash\n\t\t\t\t\tif len(sha) == 64 {\n\t\t\t\t\t\thasher = sha256.New()\n\t\t\t\t\t} else if len(sha) == 40 {\n\t\t\t\t\t\thasher = sha1.New()\n\t\t\t\t\t} else {\n\t\t\t\t\t\taddLoggingText(fmt.Sprintf(\"Unknown hash length of %d.\\n\", len(sha)))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tf, err := os.Open(resp.Filename)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\taddLoggingText(fmt.Sprintf(err.Error()))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tdefer f.Close()\n\n\t\t\t\t\tif _, err := io.Copy(hasher, f); err != nil {\n\t\t\t\t\t\taddLoggingText(fmt.Sprintf(err.Error()))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tfileSHA := hex.EncodeToString(hasher.Sum(nil))\n\n\t\t\t\t\tif sha != fileSHA {\n\t\t\t\t\t\taddLoggingText(\"Download file doesn't match SHA.\\n\")\n\t\t\t\t\t\taddLoggingText(fmt.Sprintf(\"File SHA: %s\\nExpected SHA:\\n%s\\n\", fileSHA, sha))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\taddLoggingText(fmt.Sprintf(\"File SHA matches: %s\\n\", fileSHA))\n\n\t\t\t\t\taddLoggingText(fmt.Sprintf(\"Extracting...\\n\"))\n\t\t\t\t\tif strings.Contains(resp.Filename, \".zip\") {\n\t\t\t\t\t\tarchiver.Zip.Open(resp.Filename, \".\")\n\t\t\t\t\t} else if strings.Contains(resp.Filename, \".tar.gz\") {\n\t\t\t\t\t\tarchiver.TarGz.Open(resp.Filename, \".\")\n\t\t\t\t\t}\n\t\t\t\t\tos.Rename(\"go\", getGoVersion())\n\t\t\t\t\taddLoggingText(fmt.Sprintf(\"Done extracting.\\n\"))\n\t\t\t\t\tt.Stop()\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tuiDownloadProgress.Percent = int(100 * resp.Progress())\n\n\t\t\t\tthisTime := resp.BytesTransferred() - sinceLastTime\n\t\t\t\tsinceLastTime = resp.BytesTransferred()\n\t\t\t\tspdata = append(spdata, int(thisTime))\n\t\t\t\tuiDownloadSpeed.Lines[0].Data = spdata\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}\n\nfunc updateFilesView(s string) error {\n\tvar err error\n\n\tvar b bytes.Buffer\n\ttable := tablewriter.NewWriter(&b)\n\n\tvar headers []string\n\tvar data [][]string\n\tgoFiles = nil\n\tdoc.Find(\"div[id=\\\"\" + strings.TrimSpace(s) + \"\\\"] > .expanded > table > thead > tr > th\").Each(func(i int, s *goquery.Selection) {\n\t\theaders = append(headers, s.Text())\n\t})\n\n\tdoc.Find(\"div[id=\\\"\" + strings.TrimSpace(s) + \"\\\"] > .expanded > table > tbody > tr\").Each(func(i int, tr *goquery.Selection) {\n\t\tvar row []string\n\t\tvar a *goquery.Selection\n\t\ttr.Find(\"td\").Each(func(i int, td *goquery.Selection) {\n\n\t\t\tif a == nil {\n\t\t\t\ta = td.Find(\"a\")\n\t\t\t}\n\t\t\tif link, exists := a.Attr(\"href\"); exists {\n\t\t\t\tdownloadFileLinks[a.Text()] = link\n\t\t\t}\n\n\t\t\ttt := td.Find(\"tt\")\n\t\t\tif tt != nil && a != nil {\n\t\t\t\tdownloadFileSHA[a.Text()] = tt.Text()\n\t\t\t}\n\t\t\trow = append(row, td.Text())\n\t\t})\n\t\tif len(row) > 0 {\n\t\t\tgoFiles = append(goFiles, row[0])\n\t\t}\n\t\tdata = append(data, row)\n\t})\n\ttable.SetHeader(headers)\n\t\/\/table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})\n\ttable.SetBorder(false)\n\t\/\/table.SetCenterSeparator(\"|\")\n\ttable.AppendBulk(data)\n\ttable.Render()\n\n\tcheckLimits()\n\n\tstrs := strings.Split(b.String(), \"\\n\")\n\n\tfor index, s := range strs {\n\t\tif index == selectedGoFile+2 { \/\/ +2 To skip headers.\n\t\t\tstrs[index] = \"[\" + strings.TrimRight(s, \"\\n\") + \"](fg-white,bg-blue)\\n\"\n\t\t\tbreak\n\t\t}\n\t}\n\n\tuiFilesList.Items = strs\n\n\treturn err\n}\n\nfunc getGoVersion() string {\n\treturn goVersions[selectedGoVersion]\n}\n\nfunc getGoFile() string {\n\treturn goFiles[selectedGoFile]\n}\n\nfunc checkLimits() {\n\tif selectedGoVersion < 0 {\n\t\tselectedGoVersion = 0\n\t} else if selectedGoVersion >= len(goVersions) {\n\t\tselectedGoVersion = len(goVersions) - 1\n\t}\n\n\tif selectedGoFile < 0 {\n\t\tselectedGoFile = 0\n\t} else if selectedGoFile >= len(goFiles) {\n\t\tselectedGoFile = len(goFiles) - 1\n\t}\n}\n\nfunc main() {\n\n\turl := \"https:\/\/golang.org\/dl\/\"\n\tlog.Printf(\"Getting data from %s...\\n\", url)\n\tgetGolangData(url)\n\n\tif err := ui.Init(); err != nil {\n\t\tpanic(err)\n\t}\n\tdefer ui.Close()\n\n\tspark := ui.NewSparkline()\n\tspark.Height = 8\n\tspark.LineColor = ui.ColorCyan\n\tspark.TitleColor = ui.ColorWhite\n\n\tuiDownloadSpeed.Add(spark)\n\tuiDownloadSpeed.Height = 11\n\tuiDownloadSpeed.BorderLabel = \"Download Speed\"\n\n\tuiFilesList.Height = len(goVersions) + 2\n\tuiFilesList.BorderLabel = \"Go Files\"\n\tuiFilesList.Width = 20\n\tuiFilesList.Border = true\n\n\tuiInfo.BorderLabel = \"Info Panel\"\n\tuiInfo.Items = []string{\"Ready.\"}\n\t\/\/uiInfo.Text = \"Ready.\\n\"\n\tuiInfo.Height = 14\n\n\tupdateFilesView(getGoVersion())\n\n\tuiDownloadProgress.LabelAlign = ui.AlignCenter\n\tuiDownloadProgress.Height = 3\n\tuiDownloadProgress.Border = true\n\tuiDownloadProgress.BorderLabel = \"Download Progress\"\n\tuiDownloadProgress.BarColor = ui.ColorRed\n\n\tupdateGoVersions()\n\tuiVersionsList.BorderLabel = \"Go Versions\"\n\tuiVersionsList.Height = len(goVersions) + 2\n\tuiVersionsList.Width = 10\n\n\t\/\/ build layout\n\tui.Body.AddRows(\n\t\tui.NewRow(\n\t\t\tui.NewCol(2, 0, uiVersionsList),\n\t\t\tui.NewCol(10, 0, uiFilesList)),\n\t\tui.NewRow(\n\t\t\tui.NewCol(6, 0, uiDownloadProgress, uiDownloadSpeed),\n\t\t\tui.NewCol(6, 0, uiInfo)))\n\n\t\/\/ calculate layout\n\tui.Body.Align()\n\n\tui.Merge(\"timer\", ui.NewTimerCh(10*time.Millisecond))\n\tui.Render(ui.Body)\n\n\tui.Handle(\"\/sys\/kbd\/q\", func(ui.Event) {\n\t\tui.StopLoop()\n\t})\n\tui.Handle(\"\/sys\/kbd\/Q\", func(ui.Event) {\n\t\tui.StopLoop()\n\t})\n\tui.Handle(\"\/sys\/kbd\/C-c\", func(ui.Event) {\n\t\tui.StopLoop()\n\t})\n\tui.Handle(\"\/sys\/kbd\/<up>\", func(ui.Event) {\n\t\tmutex.Lock()\n\t\tdefer mutex.Unlock()\n\t\tif viewInFocus == viewGoFiles {\n\t\t\tselectedGoFile--\n\t\t\tupdateFilesView(getGoVersion())\n\t\t} else if viewInFocus == viewGoVersions {\n\t\t\tselectedGoVersion--\n\t\t\tupdateGoVersions()\n\t\t\tupdateFilesView(getGoVersion())\n\t\t}\n\t})\n\tui.Handle(\"\/sys\/kbd\/<enter>\", func(ui.Event) {\n\t\tdownloadFile(downloadFileLinks[getGoFile()])\n\t})\n\tui.Handle(\"\/sys\/kbd\/<down>\", func(ui.Event) {\n\t\tmutex.Lock()\n\t\tdefer mutex.Unlock()\n\t\tif viewInFocus == viewGoFiles {\n\t\t\tselectedGoFile++\n\t\t\tcheckLimits()\n\t\t\tupdateFilesView(getGoVersion())\n\t\t} else if viewInFocus == viewGoVersions {\n\t\t\tselectedGoVersion++\n\t\t\tcheckLimits()\n\t\t\tupdateGoVersions()\n\t\t\tupdateFilesView(getGoVersion())\n\t\t}\n\t})\n\tui.Handle(\"\/sys\/kbd\/<tab>\", func(ui.Event) {\n\t\tif viewInFocus == viewGoFiles {\n\t\t\tviewInFocus = viewGoVersions\n\t\t\tuiFilesList.BorderLabelBg = ui.ColorDefault\n\t\t\t\/\/uiFilesList.BorderLabelFg = ui.ColorGreen\n\t\t\tuiVersionsList.BorderLabelBg = ui.ColorWhite\n\t\t} else if viewInFocus == viewGoVersions {\n\t\t\tviewInFocus = viewGoFiles\n\t\t\tuiFilesList.BorderLabelBg = ui.ColorWhite\n\t\t\t\/\/uiFilesList.BorderLabelFg = ui.ColorBlack\n\t\t\t\/\/uiVersionsList.BorderLabelFg = ui.ColorGreen\n\t\t\tuiVersionsList.BorderLabelBg = ui.ColorDefault\n\t\t}\n\t})\n\tui.Handle(\"\/timer\/\", func(e ui.Event) {\n\t\tui.Render(ui.Body)\n\t})\n\n\tui.Handle(\"\/sys\/wnd\/resize\", func(e ui.Event) {\n\t\tui.Body.Width = ui.TermWidth()\n\t\tui.Body.Align()\n\t\tui.Clear()\n\t\tui.Render(ui.Body)\n\t})\n\n\tuiFilesList.BorderLabelBg = ui.ColorWhite\n\tui.Loop()\n}\n<commit_msg>Minor refactoring.<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"hash\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/cavaliercoder\/grab\"\n\tui \"github.com\/gizak\/termui\"\n\t\"github.com\/mholt\/archiver\"\n\t\"github.com\/olekukonko\/tablewriter\"\n)\n\nconst (\n\tviewGoVersions = \"goversions\"\n\tviewGoFiles = \"gofiles\"\n)\n\nvar mutex = &sync.Mutex{}\n\nvar viewInFocus = viewGoFiles\nvar selectedGoVersion = 0\nvar selectedGoFile = 0\n\nvar goVersions []string\nvar goFiles []string\nvar doc *goquery.Document\nvar downloadFileLinks = make(map[string]string)\nvar downloadFileSHA = make(map[string]string)\n\nvar uiVersionsList = ui.NewList()\nvar uiFilesList = ui.NewList()\n\n\/\/var uiInfo = ui.NewPar(\"Info\")\nvar uiInfo = ui.NewList()\nvar uiDownloadProgress = ui.NewGauge()\nvar uiDownloadSpeed = ui.NewSparklines()\n\nfunc getGolangData(url string) {\n\tvar err error\n\tdoc, err = goquery.NewDocument(url)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tr, err := regexp.Compile(\"([a-zA-Z0-9.]+)\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgoVersions = nil\n\tdoc.Find(\"div .expanded > h2\").Each(func(i int, s *goquery.Selection) {\n\t\tgoVersions = append(goVersions, r.FindString(s.Text()))\n\t})\n}\n\nfunc updateGoVersions() {\n\tvar strs []string\n\n\tcheckLimits()\n\n\tfor index, s := range goVersions {\n\t\tif index == selectedGoVersion {\n\t\t\tstrs = append(strs, fmt.Sprintf(\"[%s](fg-white,bg-green)\", s))\n\t\t} else {\n\t\t\tstrs = append(strs, fmt.Sprintf(\"%s\", s))\n\t\t}\n\t}\n\n\tuiVersionsList.Items = strs\n}\n\nfunc addTextToInfoPanel(text string) {\n\tuiInfo.Items = append(uiInfo.Items, text)\n\theight := uiInfo.GetHeight()\n\tstartPosition := len(uiInfo.Items) - height + 2\n\tif startPosition < 0 {\n\t\tstartPosition = 0\n\t}\n\n\tuiInfo.Items = uiInfo.Items[startPosition:]\n}\n\nfunc downloadFile(url string) error {\n\n\taddTextToInfoPanel(fmt.Sprintf(\"Downloading %s...\\n\", url))\n\trespch, err := grab.GetAsync(\".\", url)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error downloading %s: %v\\n\", url, err)\n\t\tos.Exit(1)\n\t}\n\n\taddTextToInfoPanel(fmt.Sprintf(\"Initializing download...\\n\"))\n\n\tuiDownloadProgress.Percent = 0\n\n\tgo func() {\n\t\tvar resp *grab.Response\n\t\tt := time.NewTicker(100 * time.Millisecond)\n\t\tvar sinceLastTime uint64\n\t\tvar spdata []int\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase r := <-respch:\n\t\t\t\tif r != nil {\n\t\t\t\t\tresp = r\n\t\t\t\t\taddTextToInfoPanel(fmt.Sprintf(\"Downloading...\\n\"))\n\t\t\t\t}\n\t\t\tcase <-t.C:\n\n\t\t\t\tif resp == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif resp.Error != nil {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"Error downloading %s: %v\\n\", url, resp.Error)\n\t\t\t\t\t\/\/return resp.Error\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif resp.IsComplete() {\n\t\t\t\t\taddTextToInfoPanel(fmt.Sprintf(\"Successfully downloaded to .\/%s\\n\", resp.Filename))\n\n\t\t\t\t\tuiDownloadProgress.Percent = 100\n\n\t\t\t\t\taddTextToInfoPanel(\"Verifying...\\n\")\n\t\t\t\t\tsha := downloadFileSHA[getGoFile()]\n\n\t\t\t\t\tvar hasher hash.Hash\n\t\t\t\t\tif len(sha) == 64 {\n\t\t\t\t\t\thasher = sha256.New()\n\t\t\t\t\t} else if len(sha) == 40 {\n\t\t\t\t\t\thasher = sha1.New()\n\t\t\t\t\t} else {\n\t\t\t\t\t\taddTextToInfoPanel(fmt.Sprintf(\"Unknown hash length of %d.\\n\", len(sha)))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tf, err := os.Open(resp.Filename)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\taddTextToInfoPanel(fmt.Sprintf(err.Error()))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tdefer f.Close()\n\n\t\t\t\t\tif _, err := io.Copy(hasher, f); err != nil {\n\t\t\t\t\t\taddTextToInfoPanel(fmt.Sprintf(err.Error()))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tfileSHA := hex.EncodeToString(hasher.Sum(nil))\n\t\t\t\t\tif sha != fileSHA {\n\t\t\t\t\t\taddTextToInfoPanel(\"Download file doesn't match SHA.\\n\")\n\t\t\t\t\t\taddTextToInfoPanel(fmt.Sprintf(\"File SHA: %s\\nExpected SHA:\\n%s\\n\", fileSHA, sha))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\taddTextToInfoPanel(fmt.Sprintf(\"File SHA matches: %s\\n\", fileSHA))\n\n\t\t\t\t\taddTextToInfoPanel(fmt.Sprintf(\"Extracting...\\n\"))\n\n\t\t\t\t\tif strings.Contains(resp.Filename, \".zip\") {\n\t\t\t\t\t\tarchiver.Zip.Open(resp.Filename, \".\")\n\t\t\t\t\t} else if strings.Contains(resp.Filename, \".tar.gz\") {\n\t\t\t\t\t\tarchiver.TarGz.Open(resp.Filename, \".\")\n\t\t\t\t\t}\n\n\t\t\t\t\tos.Rename(\"go\", getGoVersion())\n\n\t\t\t\t\taddTextToInfoPanel(fmt.Sprintf(\"Done extracting.\\n\"))\n\n\t\t\t\t\tt.Stop()\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tuiDownloadProgress.Percent = int(100 * resp.Progress())\n\n\t\t\t\tthisTime := resp.BytesTransferred() - sinceLastTime\n\t\t\t\tsinceLastTime = resp.BytesTransferred()\n\t\t\t\tspdata = append(spdata, int(thisTime))\n\t\t\t\tuiDownloadSpeed.Lines[0].Data = spdata\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc updateFilesView(s string) error {\n\tvar err error\n\tvar b bytes.Buffer\n\tvar headers []string\n\tvar data [][]string\n\n\tgoFiles = nil\n\n\ttable := tablewriter.NewWriter(&b)\n\tdoc.Find(\"div[id=\\\"\" + strings.TrimSpace(s) + \"\\\"] > .expanded > table > thead > tr > th\").Each(func(i int, s *goquery.Selection) {\n\t\theaders = append(headers, s.Text())\n\t})\n\n\tdoc.Find(\"div[id=\\\"\" + strings.TrimSpace(s) + \"\\\"] > .expanded > table > tbody > tr\").Each(func(i int, tr *goquery.Selection) {\n\t\tvar row []string\n\t\tvar a *goquery.Selection\n\t\ttr.Find(\"td\").Each(func(i int, td *goquery.Selection) {\n\n\t\t\tif a == nil {\n\t\t\t\ta = td.Find(\"a\")\n\t\t\t}\n\t\t\tif link, exists := a.Attr(\"href\"); exists {\n\t\t\t\tdownloadFileLinks[a.Text()] = link\n\t\t\t}\n\n\t\t\ttt := td.Find(\"tt\")\n\t\t\tif tt != nil && a != nil {\n\t\t\t\tdownloadFileSHA[a.Text()] = tt.Text()\n\t\t\t}\n\t\t\trow = append(row, td.Text())\n\t\t})\n\t\tif len(row) > 0 {\n\t\t\tgoFiles = append(goFiles, row[0])\n\t\t}\n\t\tdata = append(data, row)\n\t})\n\n\ttable.SetHeader(headers)\n\t\/\/table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})\n\ttable.SetBorder(false)\n\t\/\/table.SetCenterSeparator(\"|\")\n\ttable.AppendBulk(data)\n\ttable.Render()\n\n\tcheckLimits()\n\n\tstrs := strings.Split(b.String(), \"\\n\")\n\tfor index, s := range strs {\n\t\tif index == selectedGoFile+2 { \/\/ +2 To skip headers.\n\t\t\tstrs[index] = \"[\" + strings.TrimRight(s, \"\\n\") + \"](fg-white,bg-blue)\\n\"\n\t\t\tbreak\n\t\t}\n\t}\n\n\tuiFilesList.Items = strs\n\n\treturn err\n}\n\nfunc getGoVersion() string {\n\treturn goVersions[selectedGoVersion]\n}\n\nfunc getGoFile() string {\n\treturn goFiles[selectedGoFile]\n}\n\nfunc checkLimits() {\n\tif selectedGoVersion < 0 {\n\t\tselectedGoVersion = 0\n\t} else if selectedGoVersion >= len(goVersions) {\n\t\tselectedGoVersion = len(goVersions) - 1\n\t}\n\n\tif selectedGoFile < 0 {\n\t\tselectedGoFile = 0\n\t} else if selectedGoFile >= len(goFiles) {\n\t\tselectedGoFile = len(goFiles) - 1\n\t}\n}\n\nfunc updateViewInFocus() {\n\tif viewInFocus == viewGoFiles {\n\t\tuiFilesList.BorderLabelBg = ui.ColorWhite\n\t\t\/\/uiFilesList.BorderLabelFg = ui.ColorBlack\n\t\t\/\/uiVersionsList.BorderLabelFg = ui.ColorGreen\n\t\tuiVersionsList.BorderLabelBg = ui.ColorDefault\n\t} else if viewInFocus == viewGoVersions {\n\t\tuiFilesList.BorderLabelBg = ui.ColorDefault\n\t\t\/\/uiFilesList.BorderLabelFg = ui.ColorGreen\n\t\tuiVersionsList.BorderLabelBg = ui.ColorWhite\n\t}\n}\n\nfunc main() {\n\n\turl := \"https:\/\/golang.org\/dl\/\"\n\tlog.Printf(\"Getting data from %s...\\n\", url)\n\tgetGolangData(url)\n\n\tif err := ui.Init(); err != nil {\n\t\tpanic(err)\n\t}\n\tdefer ui.Close()\n\n\tspark := ui.NewSparkline()\n\tspark.Height = 8\n\tspark.LineColor = ui.ColorCyan\n\tspark.TitleColor = ui.ColorWhite\n\n\tuiDownloadSpeed.Add(spark)\n\tuiDownloadSpeed.Height = 11\n\tuiDownloadSpeed.BorderLabel = \"Download Speed\"\n\n\tuiFilesList.Height = len(goVersions) + 2\n\tuiFilesList.BorderLabel = \"Go Files\"\n\tuiFilesList.Width = 20\n\tuiFilesList.Border = true\n\n\tuiInfo.BorderLabel = \"Info Panel\"\n\tuiInfo.Items = []string{\"Ready.\"}\n\t\/\/uiInfo.Text = \"Ready.\\n\"\n\tuiInfo.Height = 14\n\n\tupdateFilesView(getGoVersion())\n\n\tuiDownloadProgress.LabelAlign = ui.AlignCenter\n\tuiDownloadProgress.Height = 3\n\tuiDownloadProgress.Border = true\n\tuiDownloadProgress.BorderLabel = \"Download Progress\"\n\tuiDownloadProgress.BarColor = ui.ColorRed\n\n\tupdateGoVersions()\n\tuiVersionsList.BorderLabel = \"Go Versions\"\n\tuiVersionsList.Height = len(goVersions) + 2\n\tuiVersionsList.Width = 10\n\n\t\/\/ build layout\n\tui.Body.AddRows(\n\t\tui.NewRow(\n\t\t\tui.NewCol(2, 0, uiVersionsList),\n\t\t\tui.NewCol(10, 0, uiFilesList)),\n\t\tui.NewRow(\n\t\t\tui.NewCol(6, 0, uiDownloadProgress, uiDownloadSpeed),\n\t\t\tui.NewCol(6, 0, uiInfo)))\n\n\t\/\/ calculate layout\n\tui.Body.Align()\n\n\tui.Merge(\"timer\", ui.NewTimerCh(10*time.Millisecond))\n\tui.Render(ui.Body)\n\n\tui.Handle(\"\/sys\/kbd\/q\", func(ui.Event) {\n\t\tui.StopLoop()\n\t})\n\tui.Handle(\"\/sys\/kbd\/Q\", func(ui.Event) {\n\t\tui.StopLoop()\n\t})\n\tui.Handle(\"\/sys\/kbd\/C-c\", func(ui.Event) {\n\t\tui.StopLoop()\n\t})\n\tui.Handle(\"\/sys\/kbd\/<up>\", func(ui.Event) {\n\t\tmutex.Lock()\n\t\tdefer mutex.Unlock()\n\t\tif viewInFocus == viewGoFiles {\n\t\t\tselectedGoFile--\n\t\t\tupdateFilesView(getGoVersion())\n\t\t} else if viewInFocus == viewGoVersions {\n\t\t\tselectedGoVersion--\n\t\t\tupdateGoVersions()\n\t\t\tupdateFilesView(getGoVersion())\n\t\t}\n\t})\n\tui.Handle(\"\/sys\/kbd\/<enter>\", func(ui.Event) {\n\t\tdownloadFile(downloadFileLinks[getGoFile()])\n\t})\n\tui.Handle(\"\/sys\/kbd\/<down>\", func(ui.Event) {\n\t\tmutex.Lock()\n\t\tdefer mutex.Unlock()\n\t\tif viewInFocus == viewGoFiles {\n\t\t\tselectedGoFile++\n\t\t\tupdateFilesView(getGoVersion())\n\t\t} else if viewInFocus == viewGoVersions {\n\t\t\tselectedGoVersion++\n\t\t\tupdateGoVersions()\n\t\t\tupdateFilesView(getGoVersion())\n\t\t}\n\t})\n\tui.Handle(\"\/sys\/kbd\/<tab>\", func(ui.Event) {\n\t\tif viewInFocus == viewGoFiles {\n\t\t\tviewInFocus = viewGoVersions\n\t\t} else if viewInFocus == viewGoVersions {\n\t\t\tviewInFocus = viewGoFiles\n\t\t}\n\t\tupdateViewInFocus()\n\t})\n\tui.Handle(\"\/timer\/\", func(e ui.Event) {\n\t\tui.Render(ui.Body)\n\t})\n\tui.Handle(\"\/sys\/wnd\/resize\", func(e ui.Event) {\n\t\tui.Body.Width = ui.TermWidth()\n\t\tui.Body.Align()\n\t\tui.Clear()\n\t\tui.Render(ui.Body)\n\t})\n\n\tupdateViewInFocus()\n\n\tui.Loop()\n}\n<|endoftext|>"} {"text":"<commit_before>package sms\n\nconst (\n\tURI = \"https:\/\/sms-rassilka.com\/api\/simple\"\n\tdefaultFrom = \"inform\"\n\n\t\/\/ Successful delivery statuses.\n\tStatusQueued = \"0\"\n\tStatusSent = \"1\"\n\tStatusDelivered = \"3\"\n\n\t\/\/ Unsuccessful delivery statuses.\n\tStatusUndeliveredUnavailable = \"4\"\n\tStatusUndeliveredSpam = \"15\"\n\tStatusUndeliveredInvPhone = \"16\"\n\n\t\/\/ TODO: Other delivery statuses.\n)\n\n\/\/ Sender is an object for sending SMS.\ntype Sender struct {\n\tLogin string\n\tPassword string\n\tDevMode bool\n}\n\n\/\/ DeliveryStatus represents a delivery status. If you need an exact status, compare with constants above.\ntype DeliveryStatus string\n\n\/\/ IsInProcess tells if a message is still being processed.\nfunc (d DeliveryStatus) IsInProcess() bool {\n\treturn d == StatusQueued || d == StatusSent\n}\n\n\/\/ IsDelivered tells if a message has in fact been delivered.\nfunc (d DeliveryStatus) IsDelivered() bool {\n\treturn d == StatusDelivered\n}\n\n\/\/ IsUndelivered tells if a message has been processed and undelivered by any reason.\nfunc (d DeliveryStatus) IsUndelivered() bool {\n\treturn d == StatusUndeliveredUnavailable || d == StatusUndeliveredSpam || d == StatusUndeliveredInvPhone\n}\n<commit_msg>IsInProcess -> IsInProgress<commit_after>package sms\n\nconst (\n\tURI = \"https:\/\/sms-rassilka.com\/api\/simple\"\n\tdefaultFrom = \"inform\"\n\n\t\/\/ Successful delivery statuses.\n\tStatusQueued = \"0\"\n\tStatusSent = \"1\"\n\tStatusDelivered = \"3\"\n\n\t\/\/ Unsuccessful delivery statuses.\n\tStatusUndeliveredUnavailable = \"4\"\n\tStatusUndeliveredSpam = \"15\"\n\tStatusUndeliveredInvPhone = \"16\"\n\n\t\/\/ TODO: Other delivery statuses.\n)\n\n\/\/ Sender is an object for sending SMS.\ntype Sender struct {\n\tLogin string\n\tPassword string\n\tDevMode bool\n}\n\n\/\/ DeliveryStatus represents a delivery status. If you need an exact status, compare with constants above.\ntype DeliveryStatus string\n\n\/\/ IsInProgress tells if a message is still being processed.\nfunc (d DeliveryStatus) IsInProgress() bool {\n\treturn d == StatusQueued || d == StatusSent\n}\n\n\/\/ IsDelivered tells if a message has in fact been delivered.\nfunc (d DeliveryStatus) IsDelivered() bool {\n\treturn d == StatusDelivered\n}\n\n\/\/ IsUndelivered tells if a message has been processed and undelivered by any reason.\nfunc (d DeliveryStatus) IsUndelivered() bool {\n\treturn d == StatusUndeliveredUnavailable || d == StatusUndeliveredSpam || d == StatusUndeliveredInvPhone\n}\n<|endoftext|>"} {"text":"<commit_before>package goesi\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"context\"\n\n\t\"golang.org\/x\/oauth2\"\n)\n\n\/\/ SSOAuthenticator provides interfacing to the EVE SSO. NewSSOAuthenticator is used to create\n\/\/ this structure.\n\n\/\/ [TODO] lose this mutex and allow scopes to change without conflict.\ntype SSOAuthenticator struct {\n\thttpClient *http.Client\n\t\/\/ Hide this...\n\toauthConfig *oauth2.Config\n\tscopeLock sync.Mutex\n}\n\n\/\/ NewSSOAuthenticator create a new EVE SSO Authenticator.\n\/\/ Requires your application clientID, clientSecret, and redirectURL.\n\/\/ RedirectURL must match exactly to what you registered with CCP.\nfunc NewSSOAuthenticator(client *http.Client, clientID string, clientSecret string, redirectURL string, scopes []string) *SSOAuthenticator {\n\n\tif client == nil {\n\t\treturn nil\n\t}\n\n\tc := &SSOAuthenticator{}\n\n\tc.httpClient = client\n\n\tc.oauthConfig = &oauth2.Config{\n\t\tClientID: clientID,\n\t\tClientSecret: clientSecret,\n\t\tEndpoint: oauth2.Endpoint{\n\t\t\tAuthURL: \"https:\/\/login.eveonline.com\/oauth\/authorize\",\n\t\t\tTokenURL: \"https:\/\/login.eveonline.com\/oauth\/token\",\n\t\t},\n\t\tScopes: scopes,\n\t\tRedirectURL: redirectURL,\n\t}\n\n\treturn c\n}\n\n\/\/ ChangeAuthURL changes the oauth2 configuration url for authentication\nfunc (c *SSOAuthenticator) ChangeAuthURL(url string) {\n\tc.oauthConfig.Endpoint.AuthURL = url\n}\n\n\/\/ ChangeTokenURL changes the oauth2 configuration url for token\nfunc (c *SSOAuthenticator) ChangeTokenURL(url string) {\n\tc.oauthConfig.Endpoint.TokenURL = url\n}\n\n\/\/ AuthorizeURL returns a url for an end user to authenticate with EVE SSO\n\/\/ and return success to the redirectURL.\n\/\/ It is important to create a significatly unique state for this request\n\/\/ and verify the state matches when returned to the redirectURL.\nfunc (c *SSOAuthenticator) AuthorizeURL(state string, onlineAccess bool, scopes []string) string {\n\tvar url string\n\n\t\/\/ Generate the URL\n\tif onlineAccess == true {\n\t\turl = c.oauthConfig.AuthCodeURL(state, oauth2.AccessTypeOnline, oauth2.SetAuthURLParam(\"scope\", strings.Join(scopes, \" \")))\n\t} else {\n\t\turl = c.oauthConfig.AuthCodeURL(state, oauth2.AccessTypeOffline, oauth2.SetAuthURLParam(\"scope\", strings.Join(scopes, \" \")))\n\t}\n\n\treturn url\n}\n\n\/\/ TokenRevoke revokes a refresh token\nfunc (c *SSOAuthenticator) TokenRevoke(refreshToken string) error {\n\tv := url.Values{\n\t\t\"token_type_hint\": {\"refresh_token\"},\n\t\t\"token\": {refreshToken},\n\t}\n\treq, err := http.NewRequest(\"POST\", \"https:\/\/login.eveonline.com\/oauth\/revoke\", strings.NewReader(v.Encode()))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\n\treq.Header.Set(\"Authorization\",\n\t\t\"Basic \"+base64.URLEncoding.EncodeToString(\n\t\t\t[]byte(c.oauthConfig.ClientID+\":\"+c.oauthConfig.ClientSecret),\n\t\t))\n\n\tr, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Body.Close()\n\tbody, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"oauth2: cannot revoke token: %v\", err)\n\t}\n\tif code := r.StatusCode; code < 200 || code > 299 {\n\t\treturn &oauth2.RetrieveError{\n\t\t\tResponse: r,\n\t\t\tBody: body,\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ TokenExchange exchanges the code returned to the redirectURL with\n\/\/ the CREST server to an access token. A caching client must be passed.\n\/\/ This client MUST cache per CCP guidelines or face banning.\nfunc (c *SSOAuthenticator) TokenExchange(code string) (*oauth2.Token, error) {\n\ttok, err := c.oauthConfig.Exchange(createContext(c.httpClient), code)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn tok, nil\n}\n\n\/\/ TokenSource creates a refreshable token that can be passed to ESI functions\nfunc (c *SSOAuthenticator) TokenSource(token *oauth2.Token) (oauth2.TokenSource, error) {\n\treturn c.oauthConfig.TokenSource(createContext(c.httpClient), token), nil\n}\n\ntype VerifyResponse struct {\n\tCharacterID int32\n\tCharacterName string\n\tExpiresOn string\n\tScopes string\n\tTokenType string\n\tCharacterOwnerHash string\n}\n\n\/\/ Verify the client and collect user information.\nfunc (c *SSOAuthenticator) Verify(auth oauth2.TokenSource) (*VerifyResponse, error) {\n\tv := &VerifyResponse{}\n\t_, err := c.doJSON(\"GET\", \"https:\/\/login.eveonline.com\/oauth\/verify\", nil, v, \"application\/json;\", auth)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn v, nil\n}\n\n\/\/ Creates a new http.Request for a public resource.\nfunc (c *SSOAuthenticator) newRequest(method, urlStr string, body interface{}, mediaType string) (*http.Request, error) {\n\trel, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tif body != nil {\n\t\terr = json.NewEncoder(buf).Encode(body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(method, rel.String(), buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}\n\n\/\/ Calls a resource from the public CREST\nfunc (c *SSOAuthenticator) doJSON(method, urlStr string, body interface{}, v interface{}, mediaType string, auth oauth2.TokenSource) (*http.Response, error) {\n\n\treq, err := c.newRequest(method, urlStr, body, mediaType)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif auth != nil {\n\t\t\/\/ We were able to grab an oauth2 token from the context\n\t\tvar latestToken *oauth2.Token\n\t\tif latestToken, err = auth.Token(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlatestToken.SetAuthHeader(req)\n\t}\n\n\tres, err := c.executeRequest(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer res.Body.Close()\n\tbuf, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn nil, errors.New(string(buf))\n\t}\n\tif err := json.Unmarshal([]byte(buf), v); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res, nil\n}\n\n\/\/ Executes a request generated with newRequest\nfunc (c *SSOAuthenticator) executeRequest(req *http.Request) (*http.Response, error) {\n\tres, err := c.httpClient.Do(req)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode == http.StatusOK ||\n\t\tres.StatusCode == http.StatusCreated {\n\t\treturn res, nil\n\t}\n\treturn res, errors.New(res.Status)\n}\n\n\/\/ Add custom clients to the context.\nfunc createContext(httpClient *http.Client) context.Context {\n\tparent := oauth2.NoContext\n\tctx := context.WithValue(parent, oauth2.HTTPClient, httpClient)\n\treturn ctx\n}\n\n\/\/ TokenToJSON helper function to convert a token to a storable format.\nfunc TokenToJSON(token *oauth2.Token) (string, error) {\n\tif d, err := json.Marshal(token); err != nil {\n\t\treturn \"\", err\n\t} else {\n\t\treturn string(d), nil\n\t}\n}\n\n\/\/ TokenFromJSON helper function to convert stored JSON to a token.\nfunc TokenFromJSON(jsonStr string) (*oauth2.Token, error) {\n\tvar token oauth2.Token\n\tif err := json.Unmarshal([]byte(jsonStr), &token); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &token, nil\n}\n<commit_msg>[feat] support v2 sso endpoints<commit_after>package goesi\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"context\"\n\n\t\"golang.org\/x\/oauth2\"\n)\n\n\/\/ SSOAuthenticator provides interfacing to the EVE SSO. NewSSOAuthenticator is used to create\n\/\/ this structure.\n\n\/\/ [TODO] lose this mutex and allow scopes to change without conflict.\ntype SSOAuthenticator struct {\n\thttpClient *http.Client\n\t\/\/ Hide this...\n\toauthConfig *oauth2.Config\n\tscopeLock sync.Mutex\n}\n\n\/\/ NewSSOAuthenticator create a new EVE SSO Authenticator.\n\/\/ Requires your application clientID, clientSecret, and redirectURL.\n\/\/ RedirectURL must match exactly to what you registered with CCP.\nfunc NewSSOAuthenticator(client *http.Client, clientID string, clientSecret string, redirectURL string, scopes []string) *SSOAuthenticator {\n\treturn newSSOAuthenticator(\n\t\tclient,\n\t\tclientID,\n\t\tclientSecret,\n\t\tredirectURL,\n\t\tscopes,\n\t\toauth2.Endpoint{\n\t\t\tAuthURL: \"https:\/\/login.eveonline.com\/oauth\/authorize\",\n\t\t\tTokenURL: \"https:\/\/login.eveonline.com\/oauth\/token\",\n\t\t},\n\t)\n}\n\n\/\/ NewSSOAuthenticatorV2 create a new EVE SSO Authenticator with the v2 urls.\n\/\/ Requires your application clientID, clientSecret, and redirectURL.\n\/\/ RedirectURL must match exactly to what you registered with CCP.\nfunc NewSSOAuthenticatorV2(client *http.Client, clientID string, clientSecret string, redirectURL string, scopes []string) *SSOAuthenticator {\n\treturn newSSOAuthenticator(\n\t\tclient,\n\t\tclientID,\n\t\tclientSecret,\n\t\tredirectURL,\n\t\tscopes,\n\t\toauth2.Endpoint{\n\t\t\tAuthURL: \"https:\/\/login.eveonline.com\/v2\/oauth\/authorize\",\n\t\t\tTokenURL: \"https:\/\/login.eveonline.com\/v2\/oauth\/token\",\n\t\t},\n\t)\n}\n\nfunc newSSOAuthenticator(client *http.Client, clientID string, clientSecret string, redirectURL string, scopes []string, endpoint oauth2.Endpoint) *SSOAuthenticator {\n\tif client == nil {\n\t\treturn nil\n\t}\n\n\tc := &SSOAuthenticator{}\n\tc.httpClient = client\n\tc.oauthConfig = &oauth2.Config{\n\t\tClientID: clientID,\n\t\tClientSecret: clientSecret,\n\t\tEndpoint: endpoint,\n\t\tScopes: scopes,\n\t\tRedirectURL: redirectURL,\n\t}\n\n\treturn c\n}\n\n\/\/ ChangeAuthURL changes the oauth2 configuration url for authentication\nfunc (c *SSOAuthenticator) ChangeAuthURL(url string) {\n\tc.oauthConfig.Endpoint.AuthURL = url\n}\n\n\/\/ ChangeTokenURL changes the oauth2 configuration url for token\nfunc (c *SSOAuthenticator) ChangeTokenURL(url string) {\n\tc.oauthConfig.Endpoint.TokenURL = url\n}\n\n\/\/ AuthorizeURL returns a url for an end user to authenticate with EVE SSO\n\/\/ and return success to the redirectURL.\n\/\/ It is important to create a significatly unique state for this request\n\/\/ and verify the state matches when returned to the redirectURL.\nfunc (c *SSOAuthenticator) AuthorizeURL(state string, onlineAccess bool, scopes []string) string {\n\tvar url string\n\n\t\/\/ Generate the URL\n\tif onlineAccess == true {\n\t\turl = c.oauthConfig.AuthCodeURL(state, oauth2.AccessTypeOnline, oauth2.SetAuthURLParam(\"scope\", strings.Join(scopes, \" \")))\n\t} else {\n\t\turl = c.oauthConfig.AuthCodeURL(state, oauth2.AccessTypeOffline, oauth2.SetAuthURLParam(\"scope\", strings.Join(scopes, \" \")))\n\t}\n\n\treturn url\n}\n\n\/\/ TokenRevoke revokes a refresh token\nfunc (c *SSOAuthenticator) TokenRevoke(refreshToken string) error {\n\tv := url.Values{\n\t\t\"token_type_hint\": {\"refresh_token\"},\n\t\t\"token\": {refreshToken},\n\t}\n\treq, err := http.NewRequest(\"POST\", \"https:\/\/login.eveonline.com\/oauth\/revoke\", strings.NewReader(v.Encode()))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\n\treq.Header.Set(\"Authorization\",\n\t\t\"Basic \"+base64.URLEncoding.EncodeToString(\n\t\t\t[]byte(c.oauthConfig.ClientID+\":\"+c.oauthConfig.ClientSecret),\n\t\t))\n\n\tr, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Body.Close()\n\tbody, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"oauth2: cannot revoke token: %v\", err)\n\t}\n\tif code := r.StatusCode; code < 200 || code > 299 {\n\t\treturn &oauth2.RetrieveError{\n\t\t\tResponse: r,\n\t\t\tBody: body,\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ TokenExchange exchanges the code returned to the redirectURL with\n\/\/ the CREST server to an access token. A caching client must be passed.\n\/\/ This client MUST cache per CCP guidelines or face banning.\nfunc (c *SSOAuthenticator) TokenExchange(code string) (*oauth2.Token, error) {\n\ttok, err := c.oauthConfig.Exchange(createContext(c.httpClient), code)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn tok, nil\n}\n\n\/\/ TokenSource creates a refreshable token that can be passed to ESI functions\nfunc (c *SSOAuthenticator) TokenSource(token *oauth2.Token) (oauth2.TokenSource, error) {\n\treturn c.oauthConfig.TokenSource(createContext(c.httpClient), token), nil\n}\n\ntype VerifyResponse struct {\n\tCharacterID int32\n\tCharacterName string\n\tExpiresOn string\n\tScopes string\n\tTokenType string\n\tCharacterOwnerHash string\n}\n\n\/\/ Verify the client and collect user information.\nfunc (c *SSOAuthenticator) Verify(auth oauth2.TokenSource) (*VerifyResponse, error) {\n\tv := &VerifyResponse{}\n\t_, err := c.doJSON(\"GET\", \"https:\/\/login.eveonline.com\/oauth\/verify\", nil, v, \"application\/json;\", auth)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn v, nil\n}\n\n\/\/ Creates a new http.Request for a public resource.\nfunc (c *SSOAuthenticator) newRequest(method, urlStr string, body interface{}, mediaType string) (*http.Request, error) {\n\trel, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tif body != nil {\n\t\terr = json.NewEncoder(buf).Encode(body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(method, rel.String(), buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}\n\n\/\/ Calls a resource from the public CREST\nfunc (c *SSOAuthenticator) doJSON(method, urlStr string, body interface{}, v interface{}, mediaType string, auth oauth2.TokenSource) (*http.Response, error) {\n\n\treq, err := c.newRequest(method, urlStr, body, mediaType)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif auth != nil {\n\t\t\/\/ We were able to grab an oauth2 token from the context\n\t\tvar latestToken *oauth2.Token\n\t\tif latestToken, err = auth.Token(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlatestToken.SetAuthHeader(req)\n\t}\n\n\tres, err := c.executeRequest(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer res.Body.Close()\n\tbuf, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn nil, errors.New(string(buf))\n\t}\n\tif err := json.Unmarshal([]byte(buf), v); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res, nil\n}\n\n\/\/ Executes a request generated with newRequest\nfunc (c *SSOAuthenticator) executeRequest(req *http.Request) (*http.Response, error) {\n\tres, err := c.httpClient.Do(req)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode == http.StatusOK ||\n\t\tres.StatusCode == http.StatusCreated {\n\t\treturn res, nil\n\t}\n\treturn res, errors.New(res.Status)\n}\n\n\/\/ Add custom clients to the context.\nfunc createContext(httpClient *http.Client) context.Context {\n\tparent := oauth2.NoContext\n\tctx := context.WithValue(parent, oauth2.HTTPClient, httpClient)\n\treturn ctx\n}\n\n\/\/ TokenToJSON helper function to convert a token to a storable format.\nfunc TokenToJSON(token *oauth2.Token) (string, error) {\n\tif d, err := json.Marshal(token); err != nil {\n\t\treturn \"\", err\n\t} else {\n\t\treturn string(d), nil\n\t}\n}\n\n\/\/ TokenFromJSON helper function to convert stored JSON to a token.\nfunc TokenFromJSON(jsonStr string) (*oauth2.Token, error) {\n\tvar token oauth2.Token\n\tif err := json.Unmarshal([]byte(jsonStr), &token); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &token, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package thegotribe\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar logger = log.New(os.Stdout, \"[thegotribe] \", log.LstdFlags)\n\ntype EyeTracker struct {\n\tconn io.ReadWriteCloser\n\tenc *json.Encoder\n\tdec *json.Decoder\n\tincoming chan Response\n\theartbeats chan Response\n\tOnGaze GazeFunc\n}\n\nfunc Create() (tracker *EyeTracker, err error) {\n\tconn, err := net.Dial(\"tcp\", \"localhost:6555\")\n\tif err != nil {\n\t\treturn\n\t}\n\treturn New(conn), nil\n}\n\nfunc New(conn io.ReadWriteCloser) (tracker *EyeTracker) {\n\ttracker = &EyeTracker{\n\t\tconn: conn,\n\t\tenc: json.NewEncoder(conn),\n\t\tdec: json.NewDecoder(conn),\n\t\tincoming: make(chan Response, 10),\n\t\theartbeats: make(chan Response),\n\t\tOnGaze: NullFunc,\n\t}\n\tgo tracker.readPackets()\n\n\tres, err := tracker.Get(\"heartbeatinterval\")\n\theartbeat := 250 * time.Millisecond\n\tif err == nil {\n\t\trequested := time.Duration(res.(float64)) * time.Millisecond\n\t\tif requested < heartbeat {\n\t\t\theartbeat = requested \/ 2\n\t\t}\n\t}\n\tgo tracker.heartbeat(heartbeat)\n\n\ttracker.Set(\"version\", ProtocolVersion)\n\ttracker.Set(\"push\", true)\n\n\treturn\n}\n\nfunc (et *EyeTracker) Close() error {\n\tlogger.Println(\"Closing tracker\")\n\treturn et.conn.Close()\n}\n\nfunc (et *EyeTracker) readPackets() {\n\tvar res Response\n\n\tfor {\n\t\terr := et.dec.Decode(&res)\n\t\tlogger.Println(\"Raw:\", res)\n\t\tif err != nil {\n\t\t\tlogger.Println(err)\n\t\t\tclose(et.incoming)\n\t\t\tclose(et.heartbeats)\n\t\t\treturn\n\t\t}\n\n\t\tif res.Category == CategoryHeartbeat {\n\t\t\tet.heartbeats <- res\n\t\t}\n\n\t\tif et.OnGaze == nil {\n\t\t\tlogger.Println(\"OnGaze == nil\")\n\t\t}\n\n\t\tif frame := res.Values.Frame; frame != nil && et.OnGaze != nil {\n\t\t\tlogger.Println(frame)\n\t\t\tet.OnGaze(*frame)\n\t\t\tres.Values.Frame = nil \/\/ Handeled\n\t\t}\n\t\tif res.Values.StatusMessage != nil ||\n\t\t\tres.Values.Push != nil ||\n\t\t\tres.Values.HeartbeatInterval != nil ||\n\t\t\tres.Values.Version != nil ||\n\t\t\tres.Values.TrackerState != nil ||\n\t\t\tres.Values.Framerate != nil ||\n\t\t\tres.Values.ScreenIndex != nil ||\n\t\t\tres.Values.ScreenPsyW != nil ||\n\t\t\tres.Values.ScreenResW != nil ||\n\t\t\tres.Values.ScreenResH != nil ||\n\t\t\tres.Values.ScreenPsyH != nil {\n\t\t\tet.incoming <- res\n\t\t}\n\t}\n}\n\nfunc (et *EyeTracker) heartbeat(interval time.Duration) {\n\tticker := time.NewTicker(interval)\n\tfor _ = range ticker.C {\n\t\terr := et.enc.Encode(Request{\n\t\t\tCategory: CategoryHeartbeat,\n\t\t})\n\t\tif err != nil {\n\t\t\tticker.Stop()\n\t\t}\n\t\t<-et.heartbeats\n\t}\n}\n\nfunc (et *EyeTracker) Set(attribute string, value interface{}) error {\n\tet.SetAll(map[string]interface{}{\n\t\tattribute: value,\n\t})\n\treturn nil\n}\n\nfunc (et *EyeTracker) SetAll(attributes map[string]interface{}) error {\n\tet.enc.Encode(Request{\n\t\tCategory: CategoryTracker,\n\t\tRequest: \"set\",\n\t\tValues: attributes,\n\t})\n\tres := <-et.incoming\n\tif res.StatusCode != OK {\n\t\treturn res.StatusCode\n\t}\n\treturn nil\n}\n\nfunc (et *EyeTracker) Get(attribute string) (interface{}, error) {\n\tres, err := et.GetAll(attribute)\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tif val := reflect.ValueOf(res).Elem().FieldByNameFunc(func(name string) bool {\n\t\t\treturn strings.ToLower(name) == attribute\n\t\t}); !val.IsNil() {\n\t\t\treturn val.Elem().Interface(), nil\n\t\t}\n\t\treturn nil, errors.New(*res.StatusMessage)\n\t}\n}\n\nfunc (et *EyeTracker) GetAll(attributes ...string) (*Values, error) {\n\tet.enc.Encode(Request{\n\t\tCategory: CategoryTracker,\n\t\tRequest: \"get\",\n\t\tValues: attributes,\n\t})\n\n\tresult, ok := <-et.incoming\n\tif !ok {\n\t\treturn nil, errors.New(\"End of Connection\")\n\t}\n\tif result.StatusCode != OK {\n\t\treturn &result.Values, result.StatusCode\n\t}\n\n\treturn &result.Values, nil\n}\n\ntype GazeFunc func(Frame)\n\nfunc NullFunc(f Frame) {\n\n}\n<commit_msg>Appears to work now. Frame stream is examplified and tested in examples\/tet-tracegaze.<commit_after>package thegotribe\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"reflect\"\n\t\"time\"\n)\n\nvar logger = log.New(ioutil.Discard, \"[thegotribe] \", log.LstdFlags)\n\ntype EyeTracker struct {\n\tconn io.ReadWriteCloser\n\tenc *json.Encoder\n\tdec *json.Decoder\n\n\tFrames chan Frame\n\theartbeats chan Response\n\tsets chan Response\n\tinterests map[string][]chan<- interface{}\n}\n\nfunc Create() (tracker *EyeTracker, err error) {\n\tconn, err := net.Dial(\"tcp\", \"localhost:6555\")\n\tif err != nil {\n\t\treturn\n\t}\n\treturn New(conn), nil\n}\n\nfunc New(conn io.ReadWriteCloser) (tracker *EyeTracker) {\n\ttracker = &EyeTracker{\n\t\tconn: conn,\n\t\tenc: json.NewEncoder(conn),\n\t\tdec: json.NewDecoder(conn),\n\n\t\tFrames: make(chan Frame, 1),\n\t\theartbeats: make(chan Response),\n\t\tsets: make(chan Response),\n\t\tinterests: make(map[string][]chan<- interface{}),\n\t}\n\tgo tracker.readPackets()\n\n\tres, err := tracker.Get(\"heartbeatinterval\")\n\theartbeat := 250 * time.Millisecond\n\tif err == nil {\n\t\trequested := time.Duration(res.(float64)) * time.Millisecond\n\t\tif requested < heartbeat {\n\t\t\theartbeat = requested \/ 2\n\t\t}\n\t}\n\tgo tracker.heartbeat(heartbeat)\n\n\ttracker.Set(\"version\", ProtocolVersion)\n\ttracker.Set(\"push\", true)\n\n\treturn\n}\n\nfunc (et *EyeTracker) Close() error {\n\tlogger.Println(\"Closing tracker\")\n\treturn et.conn.Close()\n}\n\nfunc (et *EyeTracker) readPackets() {\n\tvar res Response\n\n\tfor {\n\t\terr := et.dec.Decode(&res)\n\t\tlogger.Println(\"Raw:\", res)\n\t\tif err != nil {\n\t\t\tlogger.Println(err)\n\t\t\tclose(et.heartbeats)\n\t\t\tclose(et.Frames)\n\t\t\treturn\n\t\t}\n\n\t\tif res.StatusCode != OK { \/\/ Deals with bad status codes\n\t\t\tcontinue\n\t\t}\n\n\t\tif res.Category == CategoryHeartbeat {\n\t\t\tet.heartbeats <- res\n\t\t\tcontinue\n\t\t}\n\n\t\tif res.Category == CategoryCalibration {\n\t\t\t\/\/ Not yet supported.\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Only res.Category == CategoryTracker possible at this point\n\t\tif res.Category != CategoryTracker {\n\t\t\tpanic(\"Unknown category \" + res.Category)\n\t\t}\n\n\t\tif res.Request == RequestSet {\n\t\t\tet.sets <- res\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Only res.Request == RequestGet possible at this point\n\t\tif res.Request != RequestGet {\n\t\t\tpanic(\"Unknown request method \" + res.Request)\n\t\t}\n\n\t\tif frame := res.Values.Frame; frame != nil {\n\t\t\tselect {\n\t\t\tcase et.Frames <- *frame:\n\t\t\tdefault: \/\/Throw it away. Client is busy.\n\t\t\t}\n\t\t\tres.Values.Frame = nil \/\/ Handeled\n\t\t}\n\n\t\t\/\/ Deal with every other value of tracker\n\t\tval, typ := reflect.ValueOf(res.Values), reflect.TypeOf(res.Values)\n\t\tfor i := 0; i < val.NumField(); i++ {\n\t\t\tif field := val.Field(i); !field.IsNil() {\n\t\t\t\tname := typ.Field(i).Tag.Get(\"json\") \/\/ Only if value is present\n\t\t\t\tif list, ok := et.interests[name]; ok && len(list) > 0 {\n\t\t\t\t\tfor _, ch := range list { \/\/ Only if value is present and there are interested parties\n\t\t\t\t\t\tch <- field.Elem().Interface() \/\/ Notify\n\t\t\t\t\t\tclose(ch) \/\/ Close\n\t\t\t\t\t}\n\t\t\t\t\tet.interests[name] = list[0:0] \/\/Clear interest list\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (et *EyeTracker) heartbeat(interval time.Duration) {\n\tticker := time.NewTicker(interval)\n\tfor _ = range ticker.C {\n\t\terr := et.enc.Encode(Request{\n\t\t\tCategory: CategoryHeartbeat,\n\t\t})\n\t\tif err != nil {\n\t\t\tticker.Stop()\n\t\t}\n\t\t<-et.heartbeats\n\t}\n}\n\nfunc (et *EyeTracker) Set(attribute string, value interface{}) error {\n\treturn et.SetAll(map[string]interface{}{\n\t\tattribute: value,\n\t})\n\n}\n\nfunc (et *EyeTracker) SetAll(attributes map[string]interface{}) error {\n\tet.enc.Encode(Request{\n\t\tCategory: CategoryTracker,\n\t\tRequest: RequestSet,\n\t\tValues: attributes,\n\t})\n\tres := <-et.sets\n\tif res.StatusCode != OK {\n\t\treturn res.StatusCode\n\t}\n\treturn nil\n}\n\nfunc (et *EyeTracker) Get(attribute string) (interface{}, error) {\n\tresch := make(chan interface{}, 1)\n\tet.interests[attribute] = append(et.interests[attribute], resch)\n\n\terr := et.enc.Encode(Request{\n\t\tCategory: CategoryTracker,\n\t\tRequest: RequestGet,\n\t\tValues: []string{attribute},\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tselect {\n\tcase res := <-resch:\n\t\treturn res, nil\n\tcase <-time.After(3 * time.Second):\n\t\treturn nil, errors.New(\"Request timeout\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\nfunc inputToObject(inputStr string, debug *bool) (result interface{}, err error) {\n\tjsonStr := \"\"\n\tlastChar := \"\"\n\n\tif *debug {\n\t\tfmt.Fprintf(os.Stderr, \"input is: %v\\n\", inputStr)\n\t}\n\n\tfor position, rune := range inputStr {\n\t\tcurrentChar := string(rune)\n\n\t\tisOpeningBrace, _ := regexp.MatchString(\"[{\\\\[]\", currentChar)\n\t\tisColonOrComma, _ := regexp.MatchString(\"[:,]\", currentChar)\n\t\tisNotSpecial, _ := regexp.MatchString(\"[^{\\\\[:,]\", currentChar)\n\t\tlastWasSpecial, _ := regexp.MatchString(\"[{\\\\[:,]\", lastChar)\n\t\tisClosingBrace, _ := regexp.MatchString(\"[}\\\\]]\", currentChar)\n\t\tlastWasClosingBrace, _ := regexp.MatchString(\"[^}\\\\]]\", lastChar)\n\n\t\tif position > 0 && isOpeningBrace && !lastWasSpecial {\n\t\t\tjsonStr += \"\\\"\"\n\t\t}\n\n\t\tif isNotSpecial && lastWasSpecial {\n\t\t\tjsonStr += \"\\\"\"\n\t\t}\n\n\t\tif isColonOrComma && lastWasClosingBrace {\n\t\t\tjsonStr += \"\\\"\"\n\t\t}\n\n\t\tif isClosingBrace && lastWasClosingBrace {\n\t\t\tjsonStr += \"\\\"\"\n\t\t}\n\n\t\tjsonStr += currentChar\n\t\tlastChar = currentChar\n\t}\n\n\tif *debug {\n\t\tfmt.Fprintf(os.Stderr, \"json is: %v\\n\", jsonStr)\n\t}\n\n\terr = json.Unmarshal([]byte(jsonStr), &result)\n\tif err != nil || reflect.TypeOf(result).Kind() == reflect.Float64 {\n\t\tresult = inputStr\n\t\tif *debug {\n\t\t\tfmt.Fprintf(os.Stderr, \"result is: %v (error: %v)\\n\", result, err)\n\t\t}\n\t} else if *debug {\n\t\tfmt.Fprintf(os.Stderr, \"result is: %v\\n\", result)\n\t}\n\n\treturn result, err\n}\n\nfunc main() {\n\n\t\/\/ set and parse cmd line flags\n\tdebug := flag.Bool(\"d\", false, \"enable debug mode\")\n\ttemplateFile := flag.String(\"t\", \"\", \"template file\")\n\tversion := flag.Bool(\"v\", false, \"show version\")\n\n\tflag.Parse()\n\n\tif *version {\n\t\tfmt.Fprintf(os.Stdout, \"version %s\\n\", \"0.4\")\n\t\tos.Exit(0)\n\t}\n\n\tif len(*templateFile) == 0 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tif _, err := os.Stat(*templateFile); os.IsNotExist(err) {\n\t\tfmt.Fprintf(os.Stderr, \"%s not found\\n\", *templateFile)\n\t\tos.Exit(2)\n\t}\n\n\t\/\/ generate environment map\n\tenvironment := make(map[string]interface{})\n\tfor _, envVar := range os.Environ() {\n\n\t\tenvKeyValuePair := strings.Split(envVar, \"=\")\n\t\tenvKey, envValue := envKeyValuePair[0], envKeyValuePair[1]\n\n\t\tdata, err := inputToObject(envValue, debug)\n\t\tif err != nil {\n\t\t\tenvironment[envKey] = envValue\n\t\t} else {\n\t\t\tenvironment[envKey] = data\n\t\t}\n\t}\n\n\tif *debug {\n\t\tfmt.Fprintf(os.Stderr, \"environment map is: %v\\n\", environment)\n\t}\n\n\t\/\/ render template\n\ttpl := template.Must(template.ParseGlob(*templateFile))\n\terr := tpl.Execute(os.Stdout, environment)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error rendering template %v: %v\", *templateFile, err)\n\t\tos.Exit(2)\n\t}\n\n}\n<commit_msg>bump version<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\nfunc inputToObject(inputStr string, debug *bool) (result interface{}, err error) {\n\tjsonStr := \"\"\n\tlastChar := \"\"\n\n\tif *debug {\n\t\tfmt.Fprintf(os.Stderr, \"input is: %v\\n\", inputStr)\n\t}\n\n\tfor position, rune := range inputStr {\n\t\tcurrentChar := string(rune)\n\n\t\tisOpeningBrace, _ := regexp.MatchString(\"[{\\\\[]\", currentChar)\n\t\tisColonOrComma, _ := regexp.MatchString(\"[:,]\", currentChar)\n\t\tisNotSpecial, _ := regexp.MatchString(\"[^{\\\\[:,]\", currentChar)\n\t\tlastWasSpecial, _ := regexp.MatchString(\"[{\\\\[:,]\", lastChar)\n\t\tisClosingBrace, _ := regexp.MatchString(\"[}\\\\]]\", currentChar)\n\t\tlastWasClosingBrace, _ := regexp.MatchString(\"[^}\\\\]]\", lastChar)\n\n\t\tif position > 0 && isOpeningBrace && !lastWasSpecial {\n\t\t\tjsonStr += \"\\\"\"\n\t\t}\n\n\t\tif isNotSpecial && lastWasSpecial {\n\t\t\tjsonStr += \"\\\"\"\n\t\t}\n\n\t\tif isColonOrComma && lastWasClosingBrace {\n\t\t\tjsonStr += \"\\\"\"\n\t\t}\n\n\t\tif isClosingBrace && lastWasClosingBrace {\n\t\t\tjsonStr += \"\\\"\"\n\t\t}\n\n\t\tjsonStr += currentChar\n\t\tlastChar = currentChar\n\t}\n\n\tif *debug {\n\t\tfmt.Fprintf(os.Stderr, \"json is: %v\\n\", jsonStr)\n\t}\n\n\terr = json.Unmarshal([]byte(jsonStr), &result)\n\tif err != nil || reflect.TypeOf(result).Kind() == reflect.Float64 {\n\t\tresult = inputStr\n\t\tif *debug {\n\t\t\tfmt.Fprintf(os.Stderr, \"result is: %v (error: %v)\\n\", result, err)\n\t\t}\n\t} else if *debug {\n\t\tfmt.Fprintf(os.Stderr, \"result is: %v\\n\", result)\n\t}\n\n\treturn result, err\n}\n\nfunc main() {\n\n\t\/\/ set and parse cmd line flags\n\tdebug := flag.Bool(\"d\", false, \"enable debug mode\")\n\ttemplateFile := flag.String(\"t\", \"\", \"template file\")\n\tversion := flag.Bool(\"v\", false, \"show version\")\n\n\tflag.Parse()\n\n\tif *version {\n\t\tfmt.Fprintf(os.Stdout, \"version %s\\n\", \"0.3.1\")\n\t\tos.Exit(0)\n\t}\n\n\tif len(*templateFile) == 0 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tif _, err := os.Stat(*templateFile); os.IsNotExist(err) {\n\t\tfmt.Fprintf(os.Stderr, \"%s not found\\n\", *templateFile)\n\t\tos.Exit(2)\n\t}\n\n\t\/\/ generate environment map\n\tenvironment := make(map[string]interface{})\n\tfor _, envVar := range os.Environ() {\n\n\t\tenvKeyValuePair := strings.Split(envVar, \"=\")\n\t\tenvKey, envValue := envKeyValuePair[0], envKeyValuePair[1]\n\n\t\tdata, err := inputToObject(envValue, debug)\n\t\tif err != nil {\n\t\t\tenvironment[envKey] = envValue\n\t\t} else {\n\t\t\tenvironment[envKey] = data\n\t\t}\n\t}\n\n\tif *debug {\n\t\tfmt.Fprintf(os.Stderr, \"environment map is: %v\\n\", environment)\n\t}\n\n\t\/\/ render template\n\ttpl := template.Must(template.ParseGlob(*templateFile))\n\terr := tpl.Execute(os.Stdout, environment)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error rendering template %v: %v\", *templateFile, err)\n\t\tos.Exit(2)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package url\n\nimport (\n\t\"github.com\/golangplus\/bytes\"\n\n\t. \"github.com\/gohtml\/elements\"\n\t. \"github.com\/gohtml\/utils\"\n)\n\n\/\/ Makes a URL.\n\/\/ port is ignored if set to zero.\n\/\/ fragment is optional. If specified as an empty string, a trailing '#' will be appended.\nfunc U(scheme, host string, port int, path string, q QUERY, fragment ...string) URL {\n\tvar b bytesp.ByteSlice\n\tif len(host) > 0 {\n\t\tif len(scheme) > 0 {\n\t\t\tb.WriteString(scheme)\n\t\t\tb.WriteByte(':')\n\t\t}\n\n\t\tb.WriteString(`\/\/`)\n\t\tb.WriteString(EscapeHost(host))\n\n\t\tif port > 0 {\n\t\t\tb.WriteByte(':')\n\t\t\tb.WriteItoa(int64(port), 10)\n\t\t}\n\n\t\tif len(path) == 0 || path[0] != '\/' {\n\t\t\tb.WriteByte('\/')\n\t\t}\n\t}\n\n\tb.WriteString(path)\n\n\tif len(q) > 0 {\n\t\tb.WriteByte('?')\n\t\tb.WriteString(string(q))\n\t}\n\n\tif fragment != nil {\n\t\tb.WriteByte('#')\n\t\tb.WriteString(fragment[0])\n\t}\n\n\treturn URL(b)\n}\n\n\/\/ Generates a URL QUERY.\n\/\/ nameValue's should be paired otherwise the last one is ignored.\nfunc Q(nameValue ...string) QUERY {\n\tif len(nameValue) < 2 {\n\t\treturn \"\"\n\t}\n\n\tvar b bytesp.ByteSlice\n\n\tfor i := 1; i < len(nameValue); i += 2 {\n\t\tif i > 1 {\n\t\t\tb.WriteByte('&')\n\t\t}\n\t\tb.WriteString(EscapeQuery(nameValue[i-1]))\n\t\tb.WriteByte('=')\n\t\tb.WriteString(EscapeQuery(nameValue[i]))\n\t}\n\n\treturn QUERY(b)\n}\n<commit_msg>dot import to default import for utils pacakge<commit_after>package url\n\nimport (\n\t\"github.com\/golangplus\/bytes\"\n\n\t. \"github.com\/gohtml\/elements\"\n\t\"github.com\/gohtml\/utils\"\n)\n\n\/\/ Makes a URL.\n\/\/ port is ignored if set to zero.\n\/\/ fragment is optional. If specified as an empty string, a trailing '#' will be appended.\nfunc U(scheme, host string, port int, path string, q QUERY, fragment ...string) URL {\n\tvar b bytesp.ByteSlice\n\tif len(host) > 0 {\n\t\tif len(scheme) > 0 {\n\t\t\tb.WriteString(scheme)\n\t\t\tb.WriteByte(':')\n\t\t}\n\n\t\tb.WriteString(`\/\/`)\n\t\tb.WriteString(utils.EscapeHost(host))\n\n\t\tif port > 0 {\n\t\t\tb.WriteByte(':')\n\t\t\tb.WriteItoa(int64(port), 10)\n\t\t}\n\n\t\tif len(path) == 0 || path[0] != '\/' {\n\t\t\tb.WriteByte('\/')\n\t\t}\n\t}\n\n\tb.WriteString(path)\n\n\tif len(q) > 0 {\n\t\tb.WriteByte('?')\n\t\tb.WriteString(string(q))\n\t}\n\n\tif fragment != nil {\n\t\tb.WriteByte('#')\n\t\tb.WriteString(fragment[0])\n\t}\n\n\treturn URL(b)\n}\n\n\/\/ Generates a URL QUERY.\n\/\/ nameValue's should be paired otherwise the last one is ignored.\nfunc Q(nameValue ...string) QUERY {\n\tif len(nameValue) < 2 {\n\t\treturn \"\"\n\t}\n\n\tvar b bytesp.ByteSlice\n\n\tfor i := 1; i < len(nameValue); i += 2 {\n\t\tif i > 1 {\n\t\t\tb.WriteByte('&')\n\t\t}\n\t\tb.WriteString(utils.EscapeQuery(nameValue[i-1]))\n\t\tb.WriteByte('=')\n\t\tb.WriteString(utils.EscapeQuery(nameValue[i]))\n\t}\n\n\treturn QUERY(b)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/rach\/pomod\/Godeps\/_workspace\/src\/github.com\/elazarl\/go-bindata-assetfs\"\n\t\"github.com\/rach\/pomod\/Godeps\/_workspace\/src\/github.com\/jmoiron\/sqlx\"\n\t_ \"github.com\/rach\/pomod\/Godeps\/_workspace\/src\/github.com\/lib\/pq\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n)\n\nfunc metricsHandler(a *appContext, w http.ResponseWriter, r *http.Request) (int, error) {\n\t\/\/ Our handlers now have access to the members of our context struct.\n\t\/\/ e.g. we can call methods on our DB type via err := a.db.GetPosts()\n\tv, _ := json.Marshal(a.metrics)\n\tfmt.Fprintf(w, string(v))\n\treturn 200, nil\n}\n\nfunc staticHandler(rw http.ResponseWriter, req *http.Request) {\n\tvar path string = req.URL.Path\n\t\/\/fmt.Println(path)\n\tif path == \"\" {\n\t\tpath = \"index.html\"\n\t}\n\tif bs, err := Asset(path); err != nil {\n\t\trw.WriteHeader(http.StatusNotFound)\n\t} else {\n\t\tfmt.Fprintf(rw, http.DetectContentType(bs[:]))\n\t\trw.Header().Set(\"Content-Type\", http.DetectContentType(bs[:]))\n\t\tvar reader = bytes.NewBuffer(bs)\n\t\tio.Copy(rw, reader)\n\t}\n}\n\ntype appContext struct {\n\tdb *sqlx.DB\n\tmetrics *MetricList\n}\n\ntype appHandler struct {\n\t*appContext\n\tH func(*appContext, http.ResponseWriter, *http.Request) (int, error)\n}\n\nfunc initWebServer(context *appContext) {\n\thttp.Handle(\"\/api\/stats\", appHandler{context, metricsHandler})\n\thttp.Handle(\"\/\",\n\t\thttp.FileServer(\n\t\t\t&assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: \"\"}))\n\thttp.ListenAndServe(fmt.Sprintf(\":%d\", *port), nil)\n}\n\nfunc (ah appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Updated to pass ah.appContext as a parameter to our handler type.\n\tstatus, err := ah.H(ah.appContext, w, r)\n\tif err != nil {\n\t\tlog.Printf(\"HTTP %d: %q\", status, err)\n\t\tswitch status {\n\t\tcase http.StatusNotFound:\n\t\t\thttp.NotFound(w, r)\n\t\t\t\/\/ And if we wanted a friendlier error page, we can\n\t\t\t\/\/ now leverage our context instance - e.g.\n\t\t\t\/\/ err := ah.renderTemplate(w, \"http_404.tmpl\", nil)\n\t\tcase http.StatusInternalServerError:\n\t\t\thttp.Error(w, http.StatusText(status), status)\n\t\tdefault:\n\t\t\thttp.Error(w, http.StatusText(status), status)\n\t\t}\n\t}\n}\n<commit_msg>Removing Old handmade static handler<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/rach\/pomod\/Godeps\/_workspace\/src\/github.com\/elazarl\/go-bindata-assetfs\"\n\t\"github.com\/rach\/pomod\/Godeps\/_workspace\/src\/github.com\/jmoiron\/sqlx\"\n\t_ \"github.com\/rach\/pomod\/Godeps\/_workspace\/src\/github.com\/lib\/pq\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n)\n\nfunc metricsHandler(a *appContext, w http.ResponseWriter, r *http.Request) (int, error) {\n\t\/\/ Our handlers now have access to the members of our context struct.\n\t\/\/ e.g. we can call methods on our DB type via err := a.db.GetPosts()\n\tv, _ := json.Marshal(a.metrics)\n\tfmt.Fprintf(w, string(v))\n\treturn 200, nil\n}\n\ntype appContext struct {\n\tdb *sqlx.DB\n\tmetrics *MetricList\n}\n\ntype appHandler struct {\n\t*appContext\n\tH func(*appContext, http.ResponseWriter, *http.Request) (int, error)\n}\n\nfunc initWebServer(context *appContext) {\n\thttp.Handle(\"\/api\/stats\", appHandler{context, metricsHandler})\n\thttp.Handle(\"\/\",\n\t\thttp.FileServer(\n\t\t\t&assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: \"\"}))\n\thttp.ListenAndServe(fmt.Sprintf(\":%d\", *port), nil)\n}\n\nfunc (ah appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Updated to pass ah.appContext as a parameter to our handler type.\n\tstatus, err := ah.H(ah.appContext, w, r)\n\tif err != nil {\n\t\tlog.Printf(\"HTTP %d: %q\", status, err)\n\t\tswitch status {\n\t\tcase http.StatusNotFound:\n\t\t\thttp.NotFound(w, r)\n\t\t\t\/\/ And if we wanted a friendlier error page, we can\n\t\t\t\/\/ now leverage our context instance - e.g.\n\t\t\t\/\/ err := ah.renderTemplate(w, \"http_404.tmpl\", nil)\n\t\tcase http.StatusInternalServerError:\n\t\t\thttp.Error(w, http.StatusText(status), status)\n\t\tdefault:\n\t\t\thttp.Error(w, http.StatusText(status), status)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\/\/\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\nvar host = flag.String(\"host\", \"127.0.0.1\", \"Host\")\nvar port = flag.String(\"port\", \"8080\", \"Port\")\n\nfunc IndexHandler(w http.ResponseWriter, r *http.Request) {\n\tisJsonRequest := false\n\n\tif acceptHeaders, ok := r.Header[\"Accept\"]; ok {\n\t\tfor _, acceptHeader := range acceptHeaders {\n\t\t\tif strings.Contains(acceptHeader, \"json\") {\n\t\t\t\tisJsonRequest = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif isJsonRequest {\n\t\tw.Write([]byte(resourceListingJson))\n\t} else {\n\t\thttp.Redirect(w, r, \"\/swagger-ui\/\", http.StatusFound)\n\t}\n}\n\nfunc ApiDescriptionHandler(w http.ResponseWriter, r *http.Request) {\n\tapiKey := strings.Trim(r.RequestURI, \"\/\")\n\n\tif json, ok := apiDescriptionsJson[apiKey]; ok {\n\t\tw.Write([]byte(json))\n\t} else {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\t\/\/ To serve a directory on disk (\/tmp) under an alternate URL\n\t\/\/ path (\/tmpfiles\/), use StripPrefix to modify the request\n\t\/\/ URL's path before the FileServer sees it:\n\thttp.HandleFunc(\"\/\", IndexHandler)\n\thttp.Handle(\"\/swagger-ui\/\", http.StripPrefix(\"\/swagger-ui\/\", http.FileServer(http.Dir(\".\/swagger-ui\"))))\n\n\tfor apiKey, _ := range apiDescriptionsJson {\n\t\thttp.HandleFunc(\"\/\"+apiKey+\"\/\", ApiDescriptionHandler)\n\t}\n\n\tlistenTo := *host + \":\" + *port\n\tlog.Printf(\"Star listen to %s\", listenTo)\n\n\thttp.ListenAndServe(listenTo, http.DefaultServeMux)\n\t\/\/http.ListenAndServe(\":8080\", http.StripPrefix(\"\/swagger-ui\/\", http.FileServer(http.Dir(\".\/swagger-ui\")) )\n}\n<commit_msg>Add \"staticPath\" options to web client<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\/\/\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\nvar host = flag.String(\"host\", \"127.0.0.1\", \"Host\")\nvar port = flag.String(\"port\", \"8080\", \"Port\")\nvar staticContent = flag.String(\"staticPath\", \".\/swagger-ui\", \"Path to folder with Swagger UI\")\n\nfunc IndexHandler(w http.ResponseWriter, r *http.Request) {\n\tisJsonRequest := false\n\n\tif acceptHeaders, ok := r.Header[\"Accept\"]; ok {\n\t\tfor _, acceptHeader := range acceptHeaders {\n\t\t\tif strings.Contains(acceptHeader, \"json\") {\n\t\t\t\tisJsonRequest = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif isJsonRequest {\n\t\tw.Write([]byte(resourceListingJson))\n\t} else {\n\t\thttp.Redirect(w, r, \"\/swagger-ui\/\", http.StatusFound)\n\t}\n}\n\nfunc ApiDescriptionHandler(w http.ResponseWriter, r *http.Request) {\n\tapiKey := strings.Trim(r.RequestURI, \"\/\")\n\n\tif json, ok := apiDescriptionsJson[apiKey]; ok {\n\t\tw.Write([]byte(json))\n\t} else {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\t\/\/ To serve a directory on disk (\/tmp) under an alternate URL\n\t\/\/ path (\/tmpfiles\/), use StripPrefix to modify the request\n\t\/\/ URL's path before the FileServer sees it:\n\thttp.HandleFunc(\"\/\", IndexHandler)\n\thttp.Handle(\"\/swagger-ui\/\", http.StripPrefix(\"\/swagger-ui\/\", http.FileServer(http.Dir(*staticContent))))\n\n\tfor apiKey, _ := range apiDescriptionsJson {\n\t\thttp.HandleFunc(\"\/\"+apiKey+\"\/\", ApiDescriptionHandler)\n\t}\n\n\tlistenTo := *host + \":\" + *port\n\tlog.Printf(\"Star listen to %s\", listenTo)\n\n\thttp.ListenAndServe(listenTo, http.DefaultServeMux)\n\t\/\/http.ListenAndServe(\":8080\", http.StripPrefix(\"\/swagger-ui\/\", http.FileServer(http.Dir(\".\/swagger-ui\")) )\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n)\n\nfunc main() {\n\tStart(os.Getenv(\"PORT\"))\n}\n\nfunc Start(port string) {\n\thttp.HandleFunc(\"\/\", func(res http.ResponseWriter, req *http.Request) {\n\t\tswitch req.Method {\n\t\tcase \"GET\":\n\t\t\thandleGet(res, req)\n\t\t\t\/\/\t\tcase \"POST\":\n\t\t\t\/\/\t\t\thandlePost(res, req)\n\t\tdefault:\n\t\t\thttp.Error(res, \"Only GET and POST supported\", http.StatusMethodNotAllowed)\n\t\t}\n\t})\n\n\tif port == \"\" {\n\t\tport = \"5000\"\n\t}\n\tlog.Println(\"listening on port:\", port)\n\tlog.Fatal(http.ListenAndServe(\":\"+port, nil))\n}\n\ntype fileInfoResponse struct {\n\tName string `json:\"name\"`\n\tSize int64 `json:\"size\"`\n\tType string `json:\"type\"`\n\tPerm int `json:\"permission\"`\n\tModTime time.Time `json:\"updated_at\"`\n}\n\nfunc toFileInfoResponse(fi os.FileInfo) fileInfoResponse {\n\treturn fileInfoResponse{\n\t\tName: fi.Name(),\n\t\tSize: fi.Size(),\n\t\tType: string(fi.Mode().String()[0]),\n\t\tPerm: int(fi.Mode().Perm()),\n\t\tModTime: fi.ModTime(),\n\t}\n}\n\nfunc handleGet(res http.ResponseWriter, req *http.Request) {\n\tpath, err := os.Open(req.URL.Path)\n\tif err != nil {\n\t\thttp.Error(res, \"Error reading path: \"+err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tpathInfo, err := path.Stat()\n\tif err != nil {\n\t\thttp.Error(res, \"Error reading path info: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif pathInfo.Mode().IsDir() {\n\t\tfileInfos, err := path.Readdir(0)\n\t\tif err != nil {\n\t\t\thttp.Error(res, \"Error reading dir: \"+err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tfileResponses := []fileInfoResponse{}\n\t\tfor _, fi := range fileInfos {\n\t\t\tfileResponses = append(fileResponses, toFileInfoResponse(fi))\n\t\t}\n\n\t\tres.Header().Set(\"Content-Type\", \"application\/json\") \/\/ TODO allow others\n\t\tfileResponsesJson, err := json.Marshal(fileResponses)\n\t\tif err != nil {\n\t\t\thttp.Error(res, \"Error converting JSON: \"+err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tres.Write(fileResponsesJson)\n\t} else if pathInfo.Mode().IsRegular() {\n\t\tio.Copy(res, path)\n\t} else {\n\t\thttp.Error(res, \"Invalid file type: \"+string(pathInfo.Mode().String()[0]), http.StatusBadRequest)\n\t}\n}\n\n\/\/func handlePost(res http.ResponseWriter, req *http.Request) {\n\/\/\tscheme := \"http\"\n\/\/\tif req.TLS != nil || req.Header.Get(\"X-Forwarded-Proto\") == \"https\" {\n\/\/\t\tscheme = \"https\"\n\/\/\t}\n\/\/\n\/\/\tdata, err := ioutil.ReadAll(req.Body)\n\/\/\tif err != nil {\n\/\/\t\tlog.Println(\"post.error.body:\", err)\n\/\/\t\thttp.Error(res, \"Error reading request body: \"+err.Error(), http.StatusBadRequest)\n\/\/\t\treturn\n\/\/\t}\n\/\/\n\/\/\tif err := handleStatusParam(res, req); err != nil {\n\/\/\t\treturn\n\/\/\t}\n\/\/\n\/\/\tres.Header().Set(\"Content-Type\", \"text\/uri-list; charset=utf-8\")\n\/\/\tcontentType := req.Header.Get(\"Content-Type\")\n\/\/\turi := shaas.CreateUri(scheme, req.Host, req.URL.Path, contentType, data)\n\/\/\tfmt.Fprintln(res, uri)\n\/\/}\n<commit_msg>json only<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n)\n\nfunc main() {\n\tStart(os.Getenv(\"PORT\"))\n}\n\nfunc Start(port string) {\n\thttp.HandleFunc(\"\/\", func(res http.ResponseWriter, req *http.Request) {\n\t\tswitch req.Method {\n\t\tcase \"GET\":\n\t\t\thandleGet(res, req)\n\t\t\t\/\/\t\tcase \"POST\":\n\t\t\t\/\/\t\t\thandlePost(res, req)\n\t\tdefault:\n\t\t\thttp.Error(res, \"Only GET and POST supported\", http.StatusMethodNotAllowed)\n\t\t}\n\t})\n\n\tif port == \"\" {\n\t\tport = \"5000\"\n\t}\n\tlog.Println(\"listening on port:\", port)\n\tlog.Fatal(http.ListenAndServe(\":\"+port, nil))\n}\n\ntype fileInfoResponse struct {\n\tName string `json:\"name\"`\n\tSize int64 `json:\"size\"`\n\tType string `json:\"type\"`\n\tPerm int `json:\"permission\"`\n\tModTime time.Time `json:\"updated_at\"`\n}\n\nfunc toFileInfoResponse(fi os.FileInfo) fileInfoResponse {\n\treturn fileInfoResponse{\n\t\tName: fi.Name(),\n\t\tSize: fi.Size(),\n\t\tType: string(fi.Mode().String()[0]),\n\t\tPerm: int(fi.Mode().Perm()),\n\t\tModTime: fi.ModTime(),\n\t}\n}\n\nfunc handleGet(res http.ResponseWriter, req *http.Request) {\n\tpath, err := os.Open(req.URL.Path)\n\tif err != nil {\n\t\thttp.Error(res, \"Error reading path: \"+err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tpathInfo, err := path.Stat()\n\tif err != nil {\n\t\thttp.Error(res, \"Error reading path info: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif pathInfo.Mode().IsDir() {\n\t\tfileInfos, err := path.Readdir(0)\n\t\tif err != nil {\n\t\t\thttp.Error(res, \"Error reading dir: \"+err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tfileResponses := []fileInfoResponse{}\n\t\tfor _, fi := range fileInfos {\n\t\t\tfileResponses = append(fileResponses, toFileInfoResponse(fi))\n\t\t}\n\n\t\tres.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tfileResponsesJson, err := json.Marshal(fileResponses)\n\t\tif err != nil {\n\t\t\thttp.Error(res, \"Error converting JSON: \"+err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tres.Write(fileResponsesJson)\n\t} else if pathInfo.Mode().IsRegular() {\n\t\tio.Copy(res, path)\n\t} else {\n\t\thttp.Error(res, \"Invalid file type: \"+string(pathInfo.Mode().String()[0]), http.StatusBadRequest)\n\t}\n}\n\n\/\/func handlePost(res http.ResponseWriter, req *http.Request) {\n\/\/\tscheme := \"http\"\n\/\/\tif req.TLS != nil || req.Header.Get(\"X-Forwarded-Proto\") == \"https\" {\n\/\/\t\tscheme = \"https\"\n\/\/\t}\n\/\/\n\/\/\tdata, err := ioutil.ReadAll(req.Body)\n\/\/\tif err != nil {\n\/\/\t\tlog.Println(\"post.error.body:\", err)\n\/\/\t\thttp.Error(res, \"Error reading request body: \"+err.Error(), http.StatusBadRequest)\n\/\/\t\treturn\n\/\/\t}\n\/\/\n\/\/\tif err := handleStatusParam(res, req); err != nil {\n\/\/\t\treturn\n\/\/\t}\n\/\/\n\/\/\tres.Header().Set(\"Content-Type\", \"text\/uri-list; charset=utf-8\")\n\/\/\tcontentType := req.Header.Get(\"Content-Type\")\n\/\/\turi := shaas.CreateUri(scheme, req.Host, req.URL.Path, contentType, data)\n\/\/\tfmt.Fprintln(res, uri)\n\/\/}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build windows\n\n\/*\nPackage wmi provides a WQL interface for WMI on Windows.\n\nExample code to print names of running processes:\n\n\ttype Win32_Process struct {\n\t\tName string\n\t}\n\n\tfunc main() {\n\t\tvar dst []Win32_Process\n\t\tq := wmi.CreateQuery(&dst, \"\")\n\t\terr := wmi.Query(q, &dst)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tfor i, v := range dst {\n\t\t\tprintln(i, v.Name)\n\t\t}\n\t}\n\n*\/\npackage wmi\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/mattn\/go-ole\"\n\t\"github.com\/mattn\/go-ole\/oleutil\"\n)\n\nvar l = log.New(os.Stdout, \"\", log.LstdFlags)\n\nvar (\n\tErrInvalidEntityType = errors.New(\"wmi: invalid entity type\")\n\tlock sync.Mutex\n)\n\n\/\/ QueryNamespace invokes Query with the given namespace on the local machine.\nfunc QueryNamespace(query string, dst interface{}, namespace string) error {\n\treturn Query(query, dst, nil, namespace)\n}\n\n\/\/ Query runs the WQL query and appends the values to dst.\n\/\/\n\/\/ dst must have type *[]S or *[]*S, for some struct type S. Fields selected in\n\/\/ the query must have the same name in dst. Supported types are all signed and\n\/\/ unsigned integers, time.Time, string, bool, or a pointer to one of those.\n\/\/ Array types are not supported.\n\/\/\n\/\/ By default, the local machine and default namespace are used. These can be\n\/\/ changed using connectServerArgs. See\n\/\/ http:\/\/msdn.microsoft.com\/en-us\/library\/aa393720.aspx for details.\nfunc Query(query string, dst interface{}, connectServerArgs ...interface{}) error {\n\tdv := reflect.ValueOf(dst)\n\tif dv.Kind() != reflect.Ptr || dv.IsNil() {\n\t\treturn ErrInvalidEntityType\n\t}\n\tdv = dv.Elem()\n\tmat, elemType := checkMultiArg(dv)\n\tif mat == multiArgTypeInvalid {\n\t\treturn ErrInvalidEntityType\n\t}\n\n\tlock.Lock()\n\tdefer lock.Unlock()\n\truntime.LockOSThread()\n\tdefer runtime.UnlockOSThread()\n\n\terr := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)\n\tif err != nil {\n\t\toleerr := err.(*ole.OleError)\n\t\t\/\/ S_FALSE = 0x00000001 \/\/ CoInitializeEx was already called on this thread\n\t\tif oleerr.Code() != ole.S_OK && oleerr.Code() != 0x00000001 {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t\/\/ Only invoke CoUninitialize if the thread was not initizlied before.\n\t\t\/\/ This will allow other go packages based on go-ole play along\n\t\t\/\/ with this library.\n\t\tdefer ole.CoUninitialize()\n\t}\n\n\tunknown, err := oleutil.CreateObject(\"WbemScripting.SWbemLocator\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer unknown.Release()\n\n\twmi, err := unknown.QueryInterface(ole.IID_IDispatch)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer wmi.Release()\n\n\t\/\/ service is a SWbemServices\n\tserviceRaw, err := oleutil.CallMethod(wmi, \"ConnectServer\", connectServerArgs...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tservice := serviceRaw.ToIDispatch()\n\tdefer serviceRaw.Clear()\n\n\t\/\/ result is a SWBemObjectSet\n\tresultRaw, err := oleutil.CallMethod(service, \"ExecQuery\", query)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresult := resultRaw.ToIDispatch()\n\tdefer resultRaw.Clear()\n\n\tcount, err := oleInt64(result, \"Count\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Initialize a slice with Count capacity\n\tdv.Set(reflect.MakeSlice(dv.Type(), 0, int(count)))\n\n\tvar errFieldMismatch error\n\tfor i := int64(0); i < count; i++ {\n\t\terr := func() error {\n\t\t\t\/\/ item is a SWbemObject, but really a Win32_Process\n\t\t\titemRaw, err := oleutil.CallMethod(result, \"ItemIndex\", i)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\titem := itemRaw.ToIDispatch()\n\t\t\tdefer itemRaw.Clear()\n\n\t\t\tev := reflect.New(elemType)\n\t\t\tif err = loadEntity(ev.Interface(), item); err != nil {\n\t\t\t\tif _, ok := err.(*ErrFieldMismatch); ok {\n\t\t\t\t\t\/\/ We continue loading entities even in the face of field mismatch errors.\n\t\t\t\t\t\/\/ If we encounter any other error, that other error is returned. Otherwise,\n\t\t\t\t\t\/\/ an ErrFieldMismatch is returned.\n\t\t\t\t\terrFieldMismatch = err\n\t\t\t\t} else {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif mat != multiArgTypeStructPtr {\n\t\t\t\tev = ev.Elem()\n\t\t\t}\n\t\t\tdv.Set(reflect.Append(dv, ev))\n\t\t\treturn nil\n\t\t}()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn errFieldMismatch\n}\n\n\/\/ ErrFieldMismatch is returned when a field is to be loaded into a different\n\/\/ type than the one it was stored from, or when a field is missing or\n\/\/ unexported in the destination struct.\n\/\/ StructType is the type of the struct pointed to by the destination argument.\ntype ErrFieldMismatch struct {\n\tStructType reflect.Type\n\tFieldName string\n\tReason string\n}\n\nfunc (e *ErrFieldMismatch) Error() string {\n\treturn fmt.Sprintf(\"wmi: cannot load field %q into a %q: %s\",\n\t\te.FieldName, e.StructType, e.Reason)\n}\n\nvar timeType = reflect.TypeOf(time.Time{})\n\n\/\/ loadEntity loads a SWbemObject into a struct pointer.\nfunc loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismatch error) {\n\tv := reflect.ValueOf(dst).Elem()\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tf := v.Field(i)\n\t\tof := f\n\t\tisPtr := f.Kind() == reflect.Ptr\n\t\tif isPtr {\n\t\t\tptr := reflect.New(f.Type().Elem())\n\t\t\tf.Set(ptr)\n\t\t\tf = f.Elem()\n\t\t}\n\t\tn := v.Type().Field(i).Name\n\t\tif !f.CanSet() {\n\t\t\treturn &ErrFieldMismatch{\n\t\t\t\tStructType: of.Type(),\n\t\t\t\tFieldName: n,\n\t\t\t\tReason: \"CanSet() is false\",\n\t\t\t}\n\t\t}\n\t\tprop, err := oleutil.GetProperty(src, n)\n\t\tif err != nil {\n\t\t\terrFieldMismatch = &ErrFieldMismatch{\n\t\t\t\tStructType: of.Type(),\n\t\t\t\tFieldName: n,\n\t\t\t\tReason: \"no such struct field\",\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tdefer prop.Clear()\n\n\t\tswitch val := prop.Value().(type) {\n\t\tcase int, int64:\n\t\t\tvar v int64\n\t\t\tswitch val := val.(type) {\n\t\t\tcase int:\n\t\t\t\tv = int64(val)\n\t\t\tcase int64:\n\t\t\t\tv = val\n\t\t\tdefault:\n\t\t\t\tpanic(\"unexpected type\")\n\t\t\t}\n\t\t\tswitch f.Kind() {\n\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\tf.SetInt(v)\n\t\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\t\tf.SetUint(uint64(v))\n\t\t\tdefault:\n\t\t\t\treturn &ErrFieldMismatch{\n\t\t\t\t\tStructType: of.Type(),\n\t\t\t\t\tFieldName: n,\n\t\t\t\t\tReason: \"not an integer class\",\n\t\t\t\t}\n\t\t\t}\n\t\tcase string:\n\t\t\tiv, err := strconv.ParseInt(val, 10, 64)\n\t\t\tswitch f.Kind() {\n\t\t\tcase reflect.String:\n\t\t\t\tf.SetString(val)\n\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tf.SetInt(iv)\n\t\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tf.SetUint(uint64(iv))\n\t\t\tcase reflect.Struct:\n\t\t\t\tswitch f.Type() {\n\t\t\t\tcase timeType:\n\t\t\t\t\tif len(val) == 25 {\n\t\t\t\t\t\tmins, err := strconv.Atoi(val[22:])\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tval = val[:22] + fmt.Sprintf(\"%02d%02d\", mins\/60, mins%60)\n\t\t\t\t\t}\n\t\t\t\t\tt, err := time.Parse(\"20060102150405.000000-0700\", val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tf.Set(reflect.ValueOf(t))\n\t\t\t\t}\n\t\t\t}\n\t\tcase bool:\n\t\t\tswitch f.Kind() {\n\t\t\tcase reflect.Bool:\n\t\t\t\tf.SetBool(val)\n\t\t\tdefault:\n\t\t\t\treturn &ErrFieldMismatch{\n\t\t\t\t\tStructType: of.Type(),\n\t\t\t\t\tFieldName: n,\n\t\t\t\t\tReason: \"not a bool\",\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\ttypeof := reflect.TypeOf(val)\n\t\t\tif isPtr && typeof == nil {\n\t\t\t\tof.Set(reflect.Zero(of.Type()))\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn &ErrFieldMismatch{\n\t\t\t\tStructType: of.Type(),\n\t\t\t\tFieldName: n,\n\t\t\t\tReason: fmt.Sprintf(\"unsupported type (%T)\", val),\n\t\t\t}\n\t\t}\n\t}\n\treturn errFieldMismatch\n}\n\ntype multiArgType int\n\nconst (\n\tmultiArgTypeInvalid multiArgType = iota\n\tmultiArgTypeStruct\n\tmultiArgTypeStructPtr\n)\n\n\/\/ checkMultiArg checks that v has type []S, []*S for some struct type S.\n\/\/\n\/\/ It returns what category the slice's elements are, and the reflect.Type\n\/\/ that represents S.\nfunc checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {\n\tif v.Kind() != reflect.Slice {\n\t\treturn multiArgTypeInvalid, nil\n\t}\n\telemType = v.Type().Elem()\n\tswitch elemType.Kind() {\n\tcase reflect.Struct:\n\t\treturn multiArgTypeStruct, elemType\n\tcase reflect.Ptr:\n\t\telemType = elemType.Elem()\n\t\tif elemType.Kind() == reflect.Struct {\n\t\t\treturn multiArgTypeStructPtr, elemType\n\t\t}\n\t}\n\treturn multiArgTypeInvalid, nil\n}\n\nfunc oleInt64(item *ole.IDispatch, prop string) (int64, error) {\n\tv, err := oleutil.GetProperty(item, prop)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer v.Clear()\n\n\ti := int64(v.Val)\n\treturn i, nil\n}\n\n\/\/ CreateQuery returns a WQL query string that queries all columns of src. where\n\/\/ is an optional string that is appended to the query, to be used with WHERE\n\/\/ clauses. In such a case, the \"WHERE\" string should appear at the beginning.\nfunc CreateQuery(src interface{}, where string) string {\n\tvar b bytes.Buffer\n\tb.WriteString(\"SELECT \")\n\ts := reflect.Indirect(reflect.ValueOf(src))\n\tt := s.Type()\n\tif s.Kind() == reflect.Slice {\n\t\tt = t.Elem()\n\t}\n\tif t.Kind() != reflect.Struct {\n\t\treturn \"\"\n\t}\n\tvar fields []string\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tfields = append(fields, t.Field(i).Name)\n\t}\n\tb.WriteString(strings.Join(fields, \", \"))\n\tb.WriteString(\" FROM \")\n\tb.WriteString(t.Name())\n\tb.WriteString(\" \" + where)\n\treturn b.String()\n}\n<commit_msg>Add behaviour control for nil field value handling<commit_after>\/\/ +build windows\n\n\/*\nPackage wmi provides a WQL interface for WMI on Windows.\n\nExample code to print names of running processes:\n\n\ttype Win32_Process struct {\n\t\tName string\n\t}\n\n\tfunc main() {\n\t\tvar dst []Win32_Process\n\t\tq := wmi.CreateQuery(&dst, \"\")\n\t\terr := wmi.Query(q, &dst)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tfor i, v := range dst {\n\t\t\tprintln(i, v.Name)\n\t\t}\n\t}\n\n*\/\npackage wmi\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/mattn\/go-ole\"\n\t\"github.com\/mattn\/go-ole\/oleutil\"\n)\n\nvar l = log.New(os.Stdout, \"\", log.LstdFlags)\n\nvar (\n\tErrInvalidEntityType = errors.New(\"wmi: invalid entity type\")\n\tlock sync.Mutex\n)\n\n\/\/ QueryNamespace invokes Query with the given namespace on the local machine.\nfunc QueryNamespace(query string, dst interface{}, namespace string) error {\n\treturn Query(query, dst, nil, namespace)\n}\n\n\/\/ Query runs the WQL query and appends the values to dst.\n\/\/\n\/\/ dst must have type *[]S or *[]*S, for some struct type S. Fields selected in\n\/\/ the query must have the same name in dst. Supported types are all signed and\n\/\/ unsigned integers, time.Time, string, bool, or a pointer to one of those.\n\/\/ Array types are not supported.\n\/\/\n\/\/ By default, the local machine and default namespace are used. These can be\n\/\/ changed using connectServerArgs. See\n\/\/ http:\/\/msdn.microsoft.com\/en-us\/library\/aa393720.aspx for details.\n\/\/\n\/\/ Query is a wrapper around DefaultClient.Query.\nfunc Query(query string, dst interface{}, connectServerArgs ...interface{}) error {\n\treturn DefaultClient.Query(query, dst, connectServerArgs...)\n}\n\n\/\/ A Client is an WMI query client.\n\/\/\n\/\/ Its zero value (DefaultClient) is a usable client.\ntype Client struct {\n\t\/\/ NonePtrZero specifies if nil values for fields which aren't pointers\n\t\/\/ should be returned as the field types zero value.\n\t\/\/\n\t\/\/ Setting this to true allows stucts without pointer fields to be used\n\t\/\/ without the risk failure should a nil value returned from WMI.\n\tNonePtrZero bool\n\n\t\/\/ PtrNil specifies if nil values for pointer fields should be returned\n\t\/\/ as nil.\n\t\/\/\n\t\/\/ Setting this to true will set pointer fields to nil where WMI\n\t\/\/ returned nil, otherwise the types zero value will be returned.\n\tPtrNil bool\n}\n\n\/\/ DefaultClient is the default Client and is used by Query, QueryNamespace\nvar DefaultClient = &Client{}\n\n\/\/ Query runs the WQL query and appends the values to dst.\n\/\/\n\/\/ dst must have type *[]S or *[]*S, for some struct type S. Fields selected in\n\/\/ the query must have the same name in dst. Supported types are all signed and\n\/\/ unsigned integers, time.Time, string, bool, or a pointer to one of those.\n\/\/ Array types are not supported.\n\/\/\n\/\/ By default, the local machine and default namespace are used. These can be\n\/\/ changed using connectServerArgs. See\n\/\/ http:\/\/msdn.microsoft.com\/en-us\/library\/aa393720.aspx for details.\nfunc (c *Client) Query(query string, dst interface{}, connectServerArgs ...interface{}) error {\n\tdv := reflect.ValueOf(dst)\n\tif dv.Kind() != reflect.Ptr || dv.IsNil() {\n\t\treturn ErrInvalidEntityType\n\t}\n\tdv = dv.Elem()\n\tmat, elemType := checkMultiArg(dv)\n\tif mat == multiArgTypeInvalid {\n\t\treturn ErrInvalidEntityType\n\t}\n\n\tlock.Lock()\n\tdefer lock.Unlock()\n\truntime.LockOSThread()\n\tdefer runtime.UnlockOSThread()\n\n\terr := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)\n\tif err != nil {\n\t\toleerr := err.(*ole.OleError)\n\t\t\/\/ S_FALSE = 0x00000001 \/\/ CoInitializeEx was already called on this thread\n\t\tif oleerr.Code() != ole.S_OK && oleerr.Code() != 0x00000001 {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t\/\/ Only invoke CoUninitialize if the thread was not initizlied before.\n\t\t\/\/ This will allow other go packages based on go-ole play along\n\t\t\/\/ with this library.\n\t\tdefer ole.CoUninitialize()\n\t}\n\n\tunknown, err := oleutil.CreateObject(\"WbemScripting.SWbemLocator\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer unknown.Release()\n\n\twmi, err := unknown.QueryInterface(ole.IID_IDispatch)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer wmi.Release()\n\n\t\/\/ service is a SWbemServices\n\tserviceRaw, err := oleutil.CallMethod(wmi, \"ConnectServer\", connectServerArgs...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tservice := serviceRaw.ToIDispatch()\n\tdefer serviceRaw.Clear()\n\n\t\/\/ result is a SWBemObjectSet\n\tresultRaw, err := oleutil.CallMethod(service, \"ExecQuery\", query)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresult := resultRaw.ToIDispatch()\n\tdefer resultRaw.Clear()\n\n\tcount, err := oleInt64(result, \"Count\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Initialize a slice with Count capacity\n\tdv.Set(reflect.MakeSlice(dv.Type(), 0, int(count)))\n\n\tvar errFieldMismatch error\n\tfor i := int64(0); i < count; i++ {\n\t\terr := func() error {\n\t\t\t\/\/ item is a SWbemObject, but really a Win32_Process\n\t\t\titemRaw, err := oleutil.CallMethod(result, \"ItemIndex\", i)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\titem := itemRaw.ToIDispatch()\n\t\t\tdefer itemRaw.Clear()\n\n\t\t\tev := reflect.New(elemType)\n\t\t\tif err = c.loadEntity(ev.Interface(), item); err != nil {\n\t\t\t\tif _, ok := err.(*ErrFieldMismatch); ok {\n\t\t\t\t\t\/\/ We continue loading entities even in the face of field mismatch errors.\n\t\t\t\t\t\/\/ If we encounter any other error, that other error is returned. Otherwise,\n\t\t\t\t\t\/\/ an ErrFieldMismatch is returned.\n\t\t\t\t\terrFieldMismatch = err\n\t\t\t\t} else {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif mat != multiArgTypeStructPtr {\n\t\t\t\tev = ev.Elem()\n\t\t\t}\n\t\t\tdv.Set(reflect.Append(dv, ev))\n\t\t\treturn nil\n\t\t}()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn errFieldMismatch\n}\n\n\/\/ ErrFieldMismatch is returned when a field is to be loaded into a different\n\/\/ type than the one it was stored from, or when a field is missing or\n\/\/ unexported in the destination struct.\n\/\/ StructType is the type of the struct pointed to by the destination argument.\ntype ErrFieldMismatch struct {\n\tStructType reflect.Type\n\tFieldName string\n\tReason string\n}\n\nfunc (e *ErrFieldMismatch) Error() string {\n\treturn fmt.Sprintf(\"wmi: cannot load field %q into a %q: %s\",\n\t\te.FieldName, e.StructType, e.Reason)\n}\n\nvar timeType = reflect.TypeOf(time.Time{})\n\n\/\/ loadEntity loads a SWbemObject into a struct pointer.\nfunc (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismatch error) {\n\tv := reflect.ValueOf(dst).Elem()\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tf := v.Field(i)\n\t\tof := f\n\t\tisPtr := f.Kind() == reflect.Ptr\n\t\tif isPtr {\n\t\t\tptr := reflect.New(f.Type().Elem())\n\t\t\tf.Set(ptr)\n\t\t\tf = f.Elem()\n\t\t}\n\t\tn := v.Type().Field(i).Name\n\t\tif !f.CanSet() {\n\t\t\treturn &ErrFieldMismatch{\n\t\t\t\tStructType: of.Type(),\n\t\t\t\tFieldName: n,\n\t\t\t\tReason: \"CanSet() is false\",\n\t\t\t}\n\t\t}\n\t\tprop, err := oleutil.GetProperty(src, n)\n\t\tif err != nil {\n\t\t\terrFieldMismatch = &ErrFieldMismatch{\n\t\t\t\tStructType: of.Type(),\n\t\t\t\tFieldName: n,\n\t\t\t\tReason: \"no such struct field\",\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tdefer prop.Clear()\n\n\t\tswitch val := prop.Value().(type) {\n\t\tcase int, int64:\n\t\t\tvar v int64\n\t\t\tswitch val := val.(type) {\n\t\t\tcase int:\n\t\t\t\tv = int64(val)\n\t\t\tcase int64:\n\t\t\t\tv = val\n\t\t\tdefault:\n\t\t\t\tpanic(\"unexpected type\")\n\t\t\t}\n\t\t\tswitch f.Kind() {\n\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\tf.SetInt(v)\n\t\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\t\tf.SetUint(uint64(v))\n\t\t\tdefault:\n\t\t\t\treturn &ErrFieldMismatch{\n\t\t\t\t\tStructType: of.Type(),\n\t\t\t\t\tFieldName: n,\n\t\t\t\t\tReason: \"not an integer class\",\n\t\t\t\t}\n\t\t\t}\n\t\tcase string:\n\t\t\tiv, err := strconv.ParseInt(val, 10, 64)\n\t\t\tswitch f.Kind() {\n\t\t\tcase reflect.String:\n\t\t\t\tf.SetString(val)\n\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tf.SetInt(iv)\n\t\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tf.SetUint(uint64(iv))\n\t\t\tcase reflect.Struct:\n\t\t\t\tswitch f.Type() {\n\t\t\t\tcase timeType:\n\t\t\t\t\tif len(val) == 25 {\n\t\t\t\t\t\tmins, err := strconv.Atoi(val[22:])\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tval = val[:22] + fmt.Sprintf(\"%02d%02d\", mins\/60, mins%60)\n\t\t\t\t\t}\n\t\t\t\t\tt, err := time.Parse(\"20060102150405.000000-0700\", val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tf.Set(reflect.ValueOf(t))\n\t\t\t\t}\n\t\t\t}\n\t\tcase bool:\n\t\t\tswitch f.Kind() {\n\t\t\tcase reflect.Bool:\n\t\t\t\tf.SetBool(val)\n\t\t\tdefault:\n\t\t\t\treturn &ErrFieldMismatch{\n\t\t\t\t\tStructType: of.Type(),\n\t\t\t\t\tFieldName: n,\n\t\t\t\t\tReason: \"not a bool\",\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\ttypeof := reflect.TypeOf(val)\n\t\t\tif typeof == nil && (isPtr || c.NonePtrZero) {\n\t\t\t\tif (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) {\n\t\t\t\t\tof.Set(reflect.Zero(of.Type()))\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn &ErrFieldMismatch{\n\t\t\t\tStructType: of.Type(),\n\t\t\t\tFieldName: n,\n\t\t\t\tReason: fmt.Sprintf(\"unsupported type (%T)\", val),\n\t\t\t}\n\t\t}\n\t}\n\treturn errFieldMismatch\n}\n\ntype multiArgType int\n\nconst (\n\tmultiArgTypeInvalid multiArgType = iota\n\tmultiArgTypeStruct\n\tmultiArgTypeStructPtr\n)\n\n\/\/ checkMultiArg checks that v has type []S, []*S for some struct type S.\n\/\/\n\/\/ It returns what category the slice's elements are, and the reflect.Type\n\/\/ that represents S.\nfunc checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {\n\tif v.Kind() != reflect.Slice {\n\t\treturn multiArgTypeInvalid, nil\n\t}\n\telemType = v.Type().Elem()\n\tswitch elemType.Kind() {\n\tcase reflect.Struct:\n\t\treturn multiArgTypeStruct, elemType\n\tcase reflect.Ptr:\n\t\telemType = elemType.Elem()\n\t\tif elemType.Kind() == reflect.Struct {\n\t\t\treturn multiArgTypeStructPtr, elemType\n\t\t}\n\t}\n\treturn multiArgTypeInvalid, nil\n}\n\nfunc oleInt64(item *ole.IDispatch, prop string) (int64, error) {\n\tv, err := oleutil.GetProperty(item, prop)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer v.Clear()\n\n\ti := int64(v.Val)\n\treturn i, nil\n}\n\n\/\/ CreateQuery returns a WQL query string that queries all columns of src. where\n\/\/ is an optional string that is appended to the query, to be used with WHERE\n\/\/ clauses. In such a case, the \"WHERE\" string should appear at the beginning.\nfunc CreateQuery(src interface{}, where string) string {\n\tvar b bytes.Buffer\n\tb.WriteString(\"SELECT \")\n\ts := reflect.Indirect(reflect.ValueOf(src))\n\tt := s.Type()\n\tif s.Kind() == reflect.Slice {\n\t\tt = t.Elem()\n\t}\n\tif t.Kind() != reflect.Struct {\n\t\treturn \"\"\n\t}\n\tvar fields []string\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tfields = append(fields, t.Field(i).Name)\n\t}\n\tb.WriteString(strings.Join(fields, \", \"))\n\tb.WriteString(\" FROM \")\n\tb.WriteString(t.Name())\n\tb.WriteString(\" \" + where)\n\treturn b.String()\n}\n<|endoftext|>"} {"text":"<commit_before>package goque\n\nimport (\n\t\"encoding\/binary\"\n)\n\n\/\/ Item represents an entry in either a stack or queue.\ntype Item struct {\n\tID uint64\n\tKey []byte\n\tValue []byte\n}\n\n\/\/ ToString returns the item value as a string.\nfunc (i *Item) ToString() string {\n\treturn string(i.Value)\n}\n\n\/\/ PriorityItem represents an entry in a priority queue.\ntype PriorityItem struct {\n\tID uint64\n\tPriority uint8\n\tKey []byte\n\tValue []byte\n}\n\n\/\/ ToString returns the item value as a string.\n\/\/func (pi *PriorityItem) ToString() string {}\n\n\/\/ NewItem creates a new item for use with a stack or queue.\nfunc NewItem(value []byte) *Item {\n\treturn &Item{Value: value}\n}\n\n\/\/ NewItemString is a helper function for NewItem that accepts a\n\/\/ value as a string rather than a byte slice.\nfunc NewItemString(value string) *Item {\n\treturn NewItem([]byte(value))\n}\n\n\/\/ idToKey converts and returns the given ID to a key.\nfunc idToKey(id uint64) []byte {\n\tkey := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(key, id)\n\treturn key\n}\n\n\/\/ keyToID converts and returns the given key to an ID.\nfunc keyToID(key []byte) uint64 {\n\treturn binary.BigEndian.Uint64(key)\n}\n<commit_msg>Changed function position back in item.go<commit_after>package goque\n\nimport (\n\t\"encoding\/binary\"\n)\n\n\/\/ Item represents an entry in either a stack or queue.\ntype Item struct {\n\tID uint64\n\tKey []byte\n\tValue []byte\n}\n\n\/\/ NewItem creates a new item for use with a stack or queue.\nfunc NewItem(value []byte) *Item {\n\treturn &Item{Value: value}\n}\n\n\/\/ NewItemString is a helper function for NewItem that accepts a\n\/\/ value as a string rather than a byte slice.\nfunc NewItemString(value string) *Item {\n\treturn NewItem([]byte(value))\n}\n\n\/\/ ToString returns the item value as a string.\nfunc (i *Item) ToString() string {\n\treturn string(i.Value)\n}\n\n\/\/ PriorityItem represents an entry in a priority queue.\ntype PriorityItem struct {\n\tID uint64\n\tPriority uint8\n\tKey []byte\n\tValue []byte\n}\n\n\/\/ ToString returns the item value as a string.\n\/\/func (pi *PriorityItem) ToString() string {}\n\n\/\/ idToKey converts and returns the given ID to a key.\nfunc idToKey(id uint64) []byte {\n\tkey := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(key, id)\n\treturn key\n}\n\n\/\/ keyToID converts and returns the given key to an ID.\nfunc keyToID(key []byte) uint64 {\n\treturn binary.BigEndian.Uint64(key)\n}\n<|endoftext|>"} {"text":"<commit_before>package gkvlite\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\/atomic\"\n\t\"unsafe\"\n)\n\n\/\/ A persistable item.\ntype Item struct {\n\tTransient unsafe.Pointer \/\/ For any ephemeral data; atomic CAS recommended.\n\tKey, Val []byte \/\/ Val may be nil if not fetched into memory yet.\n\tPriority int32 \/\/ Use rand.Int31() for probabilistic balancing.\n}\n\n\/\/ A persistable item and its persistence location.\ntype itemLoc struct {\n\tloc unsafe.Pointer \/\/ *ploc - can be nil if item is dirty (not yet persisted).\n\titem unsafe.Pointer \/\/ *Item - can be nil if item is not fetched into memory yet.\n}\n\nvar empty_itemLoc = &itemLoc{}\n\n\/\/ Number of Key bytes plus number of Val bytes.\nfunc (i *Item) NumBytes(c *Collection) int {\n\treturn len(i.Key) + i.NumValBytes(c)\n}\n\nfunc (i *Item) NumValBytes(c *Collection) int {\n\tif c.store.callbacks.ItemValLength != nil {\n\t\treturn c.store.callbacks.ItemValLength(c, i)\n\t}\n\treturn len(i.Val)\n}\n\nfunc (i *Item) Copy() *Item {\n\treturn &Item{\n\t\tKey: i.Key,\n\t\tVal: i.Val,\n\t\tPriority: i.Priority,\n\t\tTransient: i.Transient,\n\t}\n}\n\nfunc (i *itemLoc) Loc() *ploc {\n\treturn (*ploc)(atomic.LoadPointer(&i.loc))\n}\n\nfunc (i *itemLoc) Item() *Item {\n\treturn (*Item)(atomic.LoadPointer(&i.item))\n}\n\nfunc (i *itemLoc) Copy(src *itemLoc) {\n\tif src == nil {\n\t\ti.Copy(empty_itemLoc)\n\t\treturn\n\t}\n\tatomic.StorePointer(&i.loc, unsafe.Pointer(src.Loc()))\n\tatomic.StorePointer(&i.item, unsafe.Pointer(src.Item()))\n}\n\nfunc (i *itemLoc) write(c *Collection) (err error) {\n\tif i.Loc().isEmpty() {\n\t\tiItem := i.Item()\n\t\tif iItem == nil {\n\t\t\treturn errors.New(\"itemLoc.write with nil item\")\n\t\t}\n\t\tif c.store.callbacks.BeforeItemWrite != nil {\n\t\t\tiItem, err = c.store.callbacks.BeforeItemWrite(c, iItem)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\toffset := atomic.LoadInt64(&c.store.size)\n\t\thlength := 4 + 2 + 4 + 4 + len(iItem.Key)\n\t\tvlength := iItem.NumValBytes(c)\n\t\tilength := hlength + vlength\n\t\tb := make([]byte, hlength)\n\t\tpos := 0\n\t\tbinary.BigEndian.PutUint32(b[pos:pos+4], uint32(ilength))\n\t\tpos += 4\n\t\tbinary.BigEndian.PutUint16(b[pos:pos+2], uint16(len(iItem.Key)))\n\t\tpos += 2\n\t\tbinary.BigEndian.PutUint32(b[pos:pos+4], uint32(vlength))\n\t\tpos += 4\n\t\tbinary.BigEndian.PutUint32(b[pos:pos+4], uint32(iItem.Priority))\n\t\tpos += 4\n\t\tpos += copy(b[pos:], iItem.Key)\n\t\tif pos != hlength {\n\t\t\treturn fmt.Errorf(\"itemLoc.write() pos: %v didn't match hlength: %v\",\n\t\t\t\tpos, hlength)\n\t\t}\n\t\tif _, err := c.store.file.WriteAt(b, offset); err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr := c.store.ItemValWrite(c, iItem, c.store.file, offset+int64(pos))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tatomic.StoreInt64(&c.store.size, offset+int64(ilength))\n\t\tatomic.StorePointer(&i.loc,\n\t\t\tunsafe.Pointer(&ploc{Offset: offset, Length: uint32(ilength)}))\n\t}\n\treturn nil\n}\n\nfunc (iloc *itemLoc) read(c *Collection, withValue bool) (i *Item, err error) {\n\tif iloc == nil {\n\t\treturn nil, nil\n\t}\n\ti = iloc.Item()\n\tif i == nil || (i.Val == nil && withValue) {\n\t\tloc := iloc.Loc()\n\t\tif loc.isEmpty() {\n\t\t\treturn nil, nil\n\t\t}\n\t\thdrLength := 4 + 2 + 4 + 4\n\t\tif loc.Length < uint32(hdrLength) {\n\t\t\treturn nil, fmt.Errorf(\"unexpected item loc.Length: %v < %v\",\n\t\t\t\tloc.Length, hdrLength)\n\t\t}\n\t\tb := make([]byte, hdrLength)\n\t\tif _, err := c.store.file.ReadAt(b, loc.Offset); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ti = &Item{}\n\t\tpos := 0\n\t\tlength := binary.BigEndian.Uint32(b[pos : pos+4])\n\t\tpos += 4\n\t\tkeyLength := binary.BigEndian.Uint16(b[pos : pos+2])\n\t\tpos += 2\n\t\tvalLength := binary.BigEndian.Uint32(b[pos : pos+4])\n\t\tpos += 4\n\t\ti.Priority = int32(binary.BigEndian.Uint32(b[pos : pos+4]))\n\t\tpos += 4\n\t\tif length != uint32(hdrLength)+uint32(keyLength)+valLength {\n\t\t\treturn nil, errors.New(\"mismatched itemLoc lengths\")\n\t\t}\n\t\tif pos != hdrLength {\n\t\t\treturn nil, fmt.Errorf(\"read pos != hdrLength, %v != %v\", pos, hdrLength)\n\t\t}\n\t\ti.Key = make([]byte, keyLength)\n\t\tif _, err := c.store.file.ReadAt(i.Key,\n\t\t\tloc.Offset+int64(hdrLength)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif withValue {\n\t\t\terr := c.store.ItemValRead(c, i, c.store.file,\n\t\t\t\tloc.Offset+int64(hdrLength)+int64(keyLength), valLength)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tif c.store.callbacks.AfterItemRead != nil {\n\t\t\ti, err = c.store.callbacks.AfterItemRead(c, i)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tatomic.StorePointer(&iloc.item, unsafe.Pointer(i))\n\t}\n\treturn i, nil\n}\n<commit_msg>Const itemLoc_hdrLength.<commit_after>package gkvlite\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\/atomic\"\n\t\"unsafe\"\n)\n\n\/\/ A persistable item.\ntype Item struct {\n\tTransient unsafe.Pointer \/\/ For any ephemeral data; atomic CAS recommended.\n\tKey, Val []byte \/\/ Val may be nil if not fetched into memory yet.\n\tPriority int32 \/\/ Use rand.Int31() for probabilistic balancing.\n}\n\n\/\/ A persistable item and its persistence location.\ntype itemLoc struct {\n\tloc unsafe.Pointer \/\/ *ploc - can be nil if item is dirty (not yet persisted).\n\titem unsafe.Pointer \/\/ *Item - can be nil if item is not fetched into memory yet.\n}\n\nvar empty_itemLoc = &itemLoc{}\n\n\/\/ Number of Key bytes plus number of Val bytes.\nfunc (i *Item) NumBytes(c *Collection) int {\n\treturn len(i.Key) + i.NumValBytes(c)\n}\n\nfunc (i *Item) NumValBytes(c *Collection) int {\n\tif c.store.callbacks.ItemValLength != nil {\n\t\treturn c.store.callbacks.ItemValLength(c, i)\n\t}\n\treturn len(i.Val)\n}\n\nfunc (i *Item) Copy() *Item {\n\treturn &Item{\n\t\tKey: i.Key,\n\t\tVal: i.Val,\n\t\tPriority: i.Priority,\n\t\tTransient: i.Transient,\n\t}\n}\n\nfunc (i *itemLoc) Loc() *ploc {\n\treturn (*ploc)(atomic.LoadPointer(&i.loc))\n}\n\nfunc (i *itemLoc) Item() *Item {\n\treturn (*Item)(atomic.LoadPointer(&i.item))\n}\n\nfunc (i *itemLoc) Copy(src *itemLoc) {\n\tif src == nil {\n\t\ti.Copy(empty_itemLoc)\n\t\treturn\n\t}\n\tatomic.StorePointer(&i.loc, unsafe.Pointer(src.Loc()))\n\tatomic.StorePointer(&i.item, unsafe.Pointer(src.Item()))\n}\n\nconst itemLoc_hdrLength int = 4 + 2 + 4 + 4\n\nfunc (i *itemLoc) write(c *Collection) (err error) {\n\tif i.Loc().isEmpty() {\n\t\tiItem := i.Item()\n\t\tif iItem == nil {\n\t\t\treturn errors.New(\"itemLoc.write with nil item\")\n\t\t}\n\t\tif c.store.callbacks.BeforeItemWrite != nil {\n\t\t\tiItem, err = c.store.callbacks.BeforeItemWrite(c, iItem)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\toffset := atomic.LoadInt64(&c.store.size)\n\t\thlength := itemLoc_hdrLength + len(iItem.Key)\n\t\tvlength := iItem.NumValBytes(c)\n\t\tilength := hlength + vlength\n\t\tb := make([]byte, hlength)\n\t\tpos := 0\n\t\tbinary.BigEndian.PutUint32(b[pos:pos+4], uint32(ilength))\n\t\tpos += 4\n\t\tbinary.BigEndian.PutUint16(b[pos:pos+2], uint16(len(iItem.Key)))\n\t\tpos += 2\n\t\tbinary.BigEndian.PutUint32(b[pos:pos+4], uint32(vlength))\n\t\tpos += 4\n\t\tbinary.BigEndian.PutUint32(b[pos:pos+4], uint32(iItem.Priority))\n\t\tpos += 4\n\t\tpos += copy(b[pos:], iItem.Key)\n\t\tif pos != hlength {\n\t\t\treturn fmt.Errorf(\"itemLoc.write() pos: %v didn't match hlength: %v\",\n\t\t\t\tpos, hlength)\n\t\t}\n\t\tif _, err := c.store.file.WriteAt(b, offset); err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr := c.store.ItemValWrite(c, iItem, c.store.file, offset+int64(pos))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tatomic.StoreInt64(&c.store.size, offset+int64(ilength))\n\t\tatomic.StorePointer(&i.loc,\n\t\t\tunsafe.Pointer(&ploc{Offset: offset, Length: uint32(ilength)}))\n\t}\n\treturn nil\n}\n\nfunc (iloc *itemLoc) read(c *Collection, withValue bool) (i *Item, err error) {\n\tif iloc == nil {\n\t\treturn nil, nil\n\t}\n\ti = iloc.Item()\n\tif i == nil || (i.Val == nil && withValue) {\n\t\tloc := iloc.Loc()\n\t\tif loc.isEmpty() {\n\t\t\treturn nil, nil\n\t\t}\n\t\tif loc.Length < uint32(itemLoc_hdrLength) {\n\t\t\treturn nil, fmt.Errorf(\"unexpected item loc.Length: %v < %v\",\n\t\t\t\tloc.Length, itemLoc_hdrLength)\n\t\t}\n\t\tb := make([]byte, itemLoc_hdrLength)\n\t\tif _, err := c.store.file.ReadAt(b, loc.Offset); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ti = &Item{}\n\t\tpos := 0\n\t\tlength := binary.BigEndian.Uint32(b[pos : pos+4])\n\t\tpos += 4\n\t\tkeyLength := binary.BigEndian.Uint16(b[pos : pos+2])\n\t\tpos += 2\n\t\tvalLength := binary.BigEndian.Uint32(b[pos : pos+4])\n\t\tpos += 4\n\t\ti.Priority = int32(binary.BigEndian.Uint32(b[pos : pos+4]))\n\t\tpos += 4\n\t\tif length != uint32(itemLoc_hdrLength)+uint32(keyLength)+valLength {\n\t\t\treturn nil, errors.New(\"mismatched itemLoc lengths\")\n\t\t}\n\t\tif pos != itemLoc_hdrLength {\n\t\t\treturn nil, fmt.Errorf(\"read pos != itemLoc_hdrLength, %v != %v\",\n\t\t\t\tpos, itemLoc_hdrLength)\n\t\t}\n\t\ti.Key = make([]byte, keyLength)\n\t\tif _, err := c.store.file.ReadAt(i.Key,\n\t\t\tloc.Offset+int64(itemLoc_hdrLength)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif withValue {\n\t\t\terr := c.store.ItemValRead(c, i, c.store.file,\n\t\t\t\tloc.Offset+int64(itemLoc_hdrLength)+int64(keyLength), valLength)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tif c.store.callbacks.AfterItemRead != nil {\n\t\t\ti, err = c.store.callbacks.AfterItemRead(c, i)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tatomic.StorePointer(&iloc.item, unsafe.Pointer(i))\n\t}\n\treturn i, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package websocket\n\nimport (\n\t\"context\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/pkg\/errors\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\t\/\/ Time allowed to write a message to the peer.\n\tconnWriteTimeout = 10 * time.Second\n\n\t\/\/ Time allowed to read a message from the peer.\n\tconnReadTimeout = 10 * time.Second\n\n\t\/\/ Send pings to peer with this period. Must be less than connReadTimeout.\n\tconnPingPeriod = 5 * time.Second\n\n\t\/\/ Maximum message size allowed from peer.\n\tconnMaxMessageSize = 2048\n)\n\n\/\/ WSConnection is an adapter for a websocket Conn implementing the\n\/\/ network.connection interface.\ntype WSConnection struct {\n\t\/\/ The websocket connection used\n\twsConn *websocket.Conn\n\t\/\/ A mutex that must be held to write to the wsConn. This mutex\n\t\/\/ is required as our read spawns an internal ping-writing goroutine.\n\twriteMu sync.Mutex\n\t\/\/ The first error returned from reading a message from the\n\t\/\/ websocket connection.\n\treadError error\n}\n\n\/\/ newWSConnection creates a new WSConnection, wrapping the provided wsConn.\nfunc newWSConnection(wsConn *websocket.Conn) (*WSConnection, error) {\n\tif wsConn == nil {\n\t\treturn nil, errors.New(\"wsConn cannot be nil\")\n\t}\n\treturn &WSConnection{\n\t\twsConn: wsConn,\n\t}, nil\n}\n\n\/\/ ReadMessage reads a text message from the websocket connection. Blocks until\n\/\/ a message has been read. Only a single goroutine may call ReadMessage at a\n\/\/ time. If ReadMessage ever returns an error, the same error will be returned\n\/\/ on each following call.\nfunc (c *WSConnection) ReadMessage() (msg []byte, err error) {\n\tif c.readError != nil {\n\t\treturn nil, c.readError\n\t}\n\tdefer func() { c.readError = err }()\n\n\tmsgType, msg, err := c.readMessage()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed reading message\")\n\t}\n\tif msgType != websocket.TextMessage {\n\t\treturn nil, errors.Errorf(\"Unexpected message type '%d'\", msgType)\n\t}\n\treturn msg, nil\n}\n\n\/\/ WriteMessage writes a text message to the websocket connection. Blocks\n\/\/ until the message is sent. Only a single goroutine may call WriteMessage\n\/\/ at the same time.\nfunc (c *WSConnection) WriteMessage(message []byte) (err error) {\n\tc.writeMu.Lock()\n\tdefer c.writeMu.Unlock()\n\treturn c.writeMessage(websocket.TextMessage, message)\n}\n\n\/\/ Shutdown attempts to cleanly shutdown the connection, sending a websocket\n\/\/ close frame to the client and then closing the connection. Blocks until\n\/\/ the shutdown is complete, or the context is cancelled.\nfunc (c *WSConnection) Shutdown(ctx context.Context) error {\n\terrCh := make(chan error, 1)\n\tgo func() { errCh <- c.shutdown() }()\n\tselect {\n\tcase err := <-errCh:\n\t\treturn errors.Wrap(err, \"Unable to cleanly shutdown\")\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}\n\n\/\/ Close closes the connection, without sending a close message. Closing the\n\/\/ connection will make all current and future readers and writers fail.\nfunc (c *WSConnection) Close() error {\n\treturn c.wsConn.Close()\n}\n\n\/\/ pingLoop writes a ping message to the websocket connection\n\/\/ every connPingPeriod. The pingLoop is stopped if the stop\n\/\/ channel is closed, or if writing a ping message fails.\nfunc (c *WSConnection) pingLoop(stop <-chan struct{}) error {\n\tticker := time.NewTicker(connPingPeriod)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif err := c.writePing(); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"Failed writing ping message\")\n\t\t\t}\n\t\tcase <-stop:\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\n\/\/ readMessage reads a message from the underlying websocket connection.\n\/\/ If readMessage returns an error, it must not be called again. The\n\/\/ method blocks until a message is read.\nfunc (c *WSConnection) readMessage() (messageType int, msg []byte, err error) {\n\t\/\/ Start a separate goroutine for sending PINGs while we are\n\t\/\/ waiting for a message to be read. pingLoop quitting will,\n\t\/\/ due to the deadline, make the ReadMessage call fail.\n\tstopPing := make(chan struct{})\n\tgo c.pingLoop(stopPing)\n\tdefer close(stopPing)\n\n\t\/\/ Set a deadline for reading a message from the connection.\n\t\/\/ If the client sends a pong message, the deadline for\n\t\/\/ reading a real message is extended.\n\tc.wsConn.SetReadDeadline(time.Now().Add(connReadTimeout))\n\tc.wsConn.SetPongHandler(func(string) error {\n\t\tc.wsConn.SetReadDeadline(time.Now().Add(connReadTimeout))\n\t\treturn nil\n\t})\n\tc.wsConn.SetReadLimit(connMaxMessageSize)\n\treturn c.wsConn.ReadMessage()\n}\n\n\/\/ writeMessage writes a message to the underlying websocket connection.\n\/\/ The method must be called holding writeMu. The method blocks until the\n\/\/ message is written, or for at most connWriteTimeout.\nfunc (c *WSConnection) writeMessage(messageType int, msg []byte) error {\n\tc.wsConn.SetWriteDeadline(time.Now().Add(connWriteTimeout))\n\treturn c.wsConn.WriteMessage(messageType, msg)\n}\n\n\/\/ writePing writes a websocket PING message to the peer, which the peer\n\/\/ should reply to with a PONG message.\nfunc (c *WSConnection) writePing() error {\n\tc.writeMu.Lock()\n\tdefer c.writeMu.Unlock()\n\treturn c.writeMessage(websocket.PingMessage, []byte{})\n}\n\n\/\/ Writes a websocket close message to the client and, if successful,\n\/\/ closes the connection.\nfunc (c *WSConnection) shutdown() error {\n\tc.writeMu.Lock()\n\tdefer c.writeMu.Unlock()\n\terr := c.writeMessage(websocket.CloseMessage, []byte{})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed writing websocket CloseMessage\")\n\t}\n\treturn c.wsConn.Close()\n}\n<commit_msg>Use binary instead of text websocket<commit_after>package websocket\n\nimport (\n\t\"context\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/pkg\/errors\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\t\/\/ Time allowed to write a message to the peer.\n\tconnWriteTimeout = 10 * time.Second\n\n\t\/\/ Time allowed to read a message from the peer.\n\tconnReadTimeout = 10 * time.Second\n\n\t\/\/ Send pings to peer with this period. Must be less than connReadTimeout.\n\tconnPingPeriod = 5 * time.Second\n\n\t\/\/ Maximum message size allowed from peer.\n\tconnMaxMessageSize = 2048\n)\n\n\/\/ WSConnection is an adapter for a websocket Conn implementing the\n\/\/ network.connection interface.\ntype WSConnection struct {\n\t\/\/ The websocket connection used\n\twsConn *websocket.Conn\n\t\/\/ A mutex that must be held to write to the wsConn. This mutex\n\t\/\/ is required as our read spawns an internal ping-writing goroutine.\n\twriteMu sync.Mutex\n\t\/\/ The first error returned from reading a message from the\n\t\/\/ websocket connection.\n\treadError error\n}\n\n\/\/ newWSConnection creates a new WSConnection, wrapping the provided wsConn.\nfunc newWSConnection(wsConn *websocket.Conn) (*WSConnection, error) {\n\tif wsConn == nil {\n\t\treturn nil, errors.New(\"wsConn cannot be nil\")\n\t}\n\treturn &WSConnection{\n\t\twsConn: wsConn,\n\t}, nil\n}\n\n\/\/ ReadMessage reads a text message from the websocket connection. Blocks until\n\/\/ a message has been read. Only a single goroutine may call ReadMessage at a\n\/\/ time. If ReadMessage ever returns an error, the same error will be returned\n\/\/ on each following call.\nfunc (c *WSConnection) ReadMessage() (msg []byte, err error) {\n\tif c.readError != nil {\n\t\treturn nil, c.readError\n\t}\n\tdefer func() { c.readError = err }()\n\n\tmsgType, msg, err := c.readMessage()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed reading message\")\n\t}\n\tif msgType != websocket.BinaryMessage {\n\t\treturn nil, errors.Errorf(\"Unexpected message type '%d'\", msgType)\n\t}\n\treturn msg, nil\n}\n\n\/\/ WriteMessage writes a text message to the websocket connection. Blocks\n\/\/ until the message is sent. Only a single goroutine may call WriteMessage\n\/\/ at the same time.\nfunc (c *WSConnection) WriteMessage(message []byte) (err error) {\n\tc.writeMu.Lock()\n\tdefer c.writeMu.Unlock()\n\treturn c.writeMessage(websocket.BinaryMessage, message)\n}\n\n\/\/ Shutdown attempts to cleanly shutdown the connection, sending a websocket\n\/\/ close frame to the client and then closing the connection. Blocks until\n\/\/ the shutdown is complete, or the context is cancelled.\nfunc (c *WSConnection) Shutdown(ctx context.Context) error {\n\terrCh := make(chan error, 1)\n\tgo func() { errCh <- c.shutdown() }()\n\tselect {\n\tcase err := <-errCh:\n\t\treturn errors.Wrap(err, \"Unable to cleanly shutdown\")\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}\n\n\/\/ Close closes the connection, without sending a close message. Closing the\n\/\/ connection will make all current and future readers and writers fail.\nfunc (c *WSConnection) Close() error {\n\treturn c.wsConn.Close()\n}\n\n\/\/ pingLoop writes a ping message to the websocket connection\n\/\/ every connPingPeriod. The pingLoop is stopped if the stop\n\/\/ channel is closed, or if writing a ping message fails.\nfunc (c *WSConnection) pingLoop(stop <-chan struct{}) error {\n\tticker := time.NewTicker(connPingPeriod)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif err := c.writePing(); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"Failed writing ping message\")\n\t\t\t}\n\t\tcase <-stop:\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\n\/\/ readMessage reads a message from the underlying websocket connection.\n\/\/ If readMessage returns an error, it must not be called again. The\n\/\/ method blocks until a message is read.\nfunc (c *WSConnection) readMessage() (messageType int, msg []byte, err error) {\n\t\/\/ Start a separate goroutine for sending PINGs while we are\n\t\/\/ waiting for a message to be read. pingLoop quitting will,\n\t\/\/ due to the deadline, make the ReadMessage call fail.\n\tstopPing := make(chan struct{})\n\tgo c.pingLoop(stopPing)\n\tdefer close(stopPing)\n\n\t\/\/ Set a deadline for reading a message from the connection.\n\t\/\/ If the client sends a pong message, the deadline for\n\t\/\/ reading a real message is extended.\n\tc.wsConn.SetReadDeadline(time.Now().Add(connReadTimeout))\n\tc.wsConn.SetPongHandler(func(string) error {\n\t\tc.wsConn.SetReadDeadline(time.Now().Add(connReadTimeout))\n\t\treturn nil\n\t})\n\tc.wsConn.SetReadLimit(connMaxMessageSize)\n\treturn c.wsConn.ReadMessage()\n}\n\n\/\/ writeMessage writes a message to the underlying websocket connection.\n\/\/ The method must be called holding writeMu. The method blocks until the\n\/\/ message is written, or for at most connWriteTimeout.\nfunc (c *WSConnection) writeMessage(messageType int, msg []byte) error {\n\tc.wsConn.SetWriteDeadline(time.Now().Add(connWriteTimeout))\n\treturn c.wsConn.WriteMessage(messageType, msg)\n}\n\n\/\/ writePing writes a websocket PING message to the peer, which the peer\n\/\/ should reply to with a PONG message.\nfunc (c *WSConnection) writePing() error {\n\tc.writeMu.Lock()\n\tdefer c.writeMu.Unlock()\n\treturn c.writeMessage(websocket.PingMessage, []byte{})\n}\n\n\/\/ Writes a websocket close message to the client and, if successful,\n\/\/ closes the connection.\nfunc (c *WSConnection) shutdown() error {\n\tc.writeMu.Lock()\n\tdefer c.writeMu.Unlock()\n\terr := c.writeMessage(websocket.CloseMessage, []byte{})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed writing websocket CloseMessage\")\n\t}\n\treturn c.wsConn.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file provides tests for all refactorings. The testdata directory is\n\/\/ structured as such:\n\/\/\n\/\/ testdata\/\n\/\/ refactoring-name\/\n\/\/ 001-test-name\/\n\/\/ 002-test-name\/\n\/\/\n\/\/ To filter which refactorings are tested, run\n\/\/ go test -filter=something\n\/\/ Then, only tests in directories containing \"something\" are run. E.g.::\n\/\/ go test -filter=rename # Only run rename tests\n\/\/ go test -filter=shortassign\/003 # Only run shortassign test #3\n\/\/\n\/\/ Refactorings are run on the files in each test directory; special comments\n\/\/ in the .go files indicate what refactoring(s) to run and whether the\n\/\/ refactorings are expected to succeed or fail. Specifically, test files are\n\/\/ annotated with markers of the form\n\/\/ \/\/<<<<<name,startline,startcol,endline,endcol,arg1,arg2,...,argn,pass\n\/\/ The name indicates the refactoring to run. The next four fields specify a\n\/\/ text selection on which to invoke the refactoring. The arguments\n\/\/ arg1,arg2,...,argn are passed as arguments to the refactoring (see\n\/\/ Config.Args). The last field is either \"pass\" or \"fail\", indicating whether\n\/\/ the refactoring is expected to complete successfully or raise an error. If\n\/\/ the refactoring is expected to succeed, the resulting file is compared\n\/\/ against a .golden file with the same name in the same directory.\n\/\/\n\/\/ Each test directory (001-test-name, 002-test-name, etc.) is treated as the\n\/\/ root of a Go workspace when its tests are run; i.e., the GOPATH is set to\n\/\/ the test directory. This allows it to define its own packages. In such\n\/\/ cases, the test directory is usually structured as follows:\n\/\/\n\/\/ testdata\/\n\/\/ refactoring-name\/\n\/\/ 001-test-name\/\n\/\/ src\/\n\/\/ main.go\n\/\/ main.golden\n\/\/ package-name\/\n\/\/ package-file.go\n\/\/ package-file.golden\n\/\/\n\/\/ To additional options are available and are currently used only to test the\n\/\/ Debug refactoring. When it does not make sense to compare against a .golden\n\/\/ file, include an empty file named filename.golden.ignoreOutput instead. If\n\/\/ the output will contain absolute paths to files in the test folder, include\n\/\/ a file named filename.go.stripPaths, and the refactoring's output will be\n\/\/ stripped of all occurrences of the absolute path to the .go file.\n\npackage refactoring_test\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang-refactoring.org\/go-doctor\/engine\"\n\t\"golang-refactoring.org\/go-doctor\/filesystem\"\n\t\"golang-refactoring.org\/go-doctor\/refactoring\"\n\t\"golang-refactoring.org\/go-doctor\/text\"\n)\n\nconst MARKER = \"<<<<<\"\nconst PASS = \"pass\"\nconst FAIL = \"fail\"\n\nconst MAIN_DOT_GO = \"main.go\"\n\nconst FSCHANGES_TXT = \"fschanges.txt\"\n\nvar filterFlag = flag.String(\"filter\", \"\",\n\t\"Only tests from directories containing this substring will be run\")\n\nconst directory = \"testdata\/\"\n\nfunc TestRefactorings(t *testing.T) {\n\ttestDirs, err := ioutil.ReadDir(directory)\n\tfailIfError(err, t)\n\tfor _, testDirInfo := range testDirs {\n\t\t\/\/ FIXME(jeff): Move refactoring tests into testdata\/refactoring\/x and remove this hack\n\t\tif testDirInfo.IsDir() && testDirInfo.Name() != \"diff\" {\n\t\t\trunAllTestsInSubdirectories(testDirInfo, t)\n\t\t}\n\t}\n}\n\nfunc runAllTestsInSubdirectories(testDirInfo os.FileInfo, t *testing.T) {\n\ttestDirPath := filepath.Join(directory, testDirInfo.Name())\n\tsubDirs, err := ioutil.ReadDir(testDirPath)\n\tfailIfError(err, t)\n\tfor _, subDirInfo := range subDirs {\n\t\tif subDirInfo.IsDir() {\n\t\t\tsubDirPath := filepath.Join(testDirPath, subDirInfo.Name())\n\t\t\tif strings.Contains(subDirPath, *filterFlag) {\n\t\t\t\trunAllTestsInDirectory(subDirPath, t)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ RunAllTests is a utility method that runs a set of refactoring tests\n\/\/ based on markers in all of the files in subdirectories of a given directory\nfunc runAllTestsInDirectory(directory string, t *testing.T) {\n\tfiles, err := recursiveReadDir(directory)\n\tfailIfError(err, t)\n\n\tabsolutePath, err := filepath.Abs(directory)\n\tfailIfError(err, t)\n\terr = os.Setenv(\"GOPATH\", absolutePath)\n\tfailIfError(err, t)\n\n\trunTestsInFiles(directory, files, t)\n}\n\n\/\/ Assumes no duplication or circularity due to symbolic links\nfunc recursiveReadDir(path string) ([]string, error) {\n\tresult := []string{}\n\n\tfileInfos, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\n\tfor _, fi := range fileInfos {\n\t\tif fi.IsDir() {\n\t\t\tcurrent := result\n\t\t\trest, err := recursiveReadDir(filepath.Join(path, fi.Name()))\n\t\t\tif err != nil {\n\t\t\t\treturn []string{}, err\n\t\t\t}\n\n\t\t\tnewLen := len(current) + len(rest)\n\t\t\tresult = make([]string, newLen, newLen)\n\t\t\tcopy(result, current)\n\t\t\tcopy(result[len(current):], rest)\n\t\t} else {\n\t\t\tresult = append(result, filepath.Join(path, fi.Name()))\n\t\t}\n\t}\n\treturn result, err\n}\n\nfunc failIfError(err error, t *testing.T) {\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc runTestsInFiles(directory string, files []string, t *testing.T) {\n\tmarkers := make(map[string][]string)\n\tfor _, path := range files {\n\t\tif strings.HasSuffix(path, \".go\") {\n\t\t\tmarkers[path] = extractMarkers(path, t)\n\t\t}\n\t}\n\n\tif len(markers) == 0 {\n\t\tpwd, _ := os.Getwd()\n\t\tpwd = filepath.Base(pwd)\n\t\tt.Fatalf(\"No <<<<< markers found in any files in %s\", pwd)\n\t}\n\n\tfor path, markersInFile := range markers {\n\t\tfor _, marker := range markersInFile {\n\t\t\trunRefactoring(directory, path, marker, t)\n\t\t}\n\t}\n}\n\nfunc runRefactoring(directory string, filename string, marker string, t *testing.T) {\n\trefac, selection, remainder, passFail := splitMarker(filename, marker, t)\n\n\tr := engine.GetRefactoring(refac)\n\tif r == nil {\n\t\tt.Fatalf(\"There is no refactoring named %s (from marker %s)\", refac, marker)\n\t}\n\n\tshouldPass := (passFail == PASS)\n\tname := r.Description().Name\n\n\tcwd, _ := os.Getwd()\n\tabsPath, _ := filepath.Abs(filename)\n\trelativePath, _ := filepath.Rel(cwd, absPath)\n\tfmt.Println(name, relativePath, selection.ShortString())\n\n\tmainFile := filepath.Join(directory, MAIN_DOT_GO)\n\tif !exists(mainFile, t) {\n\t\tmainFile = filepath.Join(filepath.Join(directory, \"src\"), MAIN_DOT_GO)\n\t\tif !exists(mainFile, t) {\n\t\t\tmainFile = filename\n\t\t}\n\t}\n\n\tmainFile, err := filepath.Abs(mainFile)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\targs := refactoring.InterpretArgs(remainder, r.Description().Params)\n\n\tfileSystem := &filesystem.LocalFileSystem{}\n\tconfig := &refactoring.Config{\n\t\tFileSystem: fileSystem,\n\t\tScope: []string{mainFile},\n\t\tSelection: selection,\n\t\tArgs: args,\n\t\tGoPath: \"\", \/\/ FIXME(jeff): GOPATH\n\t}\n\tresult := r.Run(config)\n\tif shouldPass && result.Log.ContainsErrors() {\n\t\tt.Log(result.Log)\n\t\tt.Fatalf(\"Refactoring produced unexpected errors\")\n\t} else if !shouldPass && !result.Log.ContainsErrors() {\n\t\tt.Fatalf(\"Refactoring should have produced errors but didn't\")\n\t}\n\n\tfor filename, edits := range result.Edits {\n\t\toutput, err := text.ApplyToFile(edits, filename)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif shouldPass {\n\t\t\tcheckResult(filename, string(output), t)\n\n\t\t}\n\t}\n\t\/\/fmt.Println(\"filesystem changes\",result.FSChanges)\n\t\/\/ checkRenamedDir(result.RenameDir,\"fschanges.txt\")\n\n\tfsChangesFile := filepath.Join(directory, FSCHANGES_TXT)\n\tif !exists(fsChangesFile, t) {\n\t\tif len(result.FSChanges) > 0 {\n\t\t\tt.Fatalf(\"Refactoring returned file system changes, \"+\n\t\t\t\t\"but %s does not exist\", fsChangesFile)\n\t\t}\n\t} else {\n\t\tbytes, err := ioutil.ReadFile(fsChangesFile)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfschanges := removeEmptyLines(strings.Split(string(bytes), \"\\n\"))\n\t\tif len(fschanges) != len(result.FSChanges) {\n\t\t\tt.Fatalf(\"Expected %d file system changes but got %d\",\n\t\t\t\tlen(fschanges), len(result.FSChanges))\n\t\t} else {\n\t\t\tfor i, chg := range result.FSChanges {\n\t\t\t\tif chg.String(directory) != strings.TrimSpace(fschanges[i]) {\n\t\t\t\t\tt.Fatalf(\"FSChanges[%d]\\nExpected: %s\\nActual: %s\", i, fschanges[i], chg.String(directory))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc removeEmptyLines(lines []string) []string {\n\tresult := []string{}\n\tfor _, line := range lines {\n\t\tif line != \"\" {\n\t\t\tresult = append(result, line)\n\t\t}\n\t}\n\treturn result\n}\n\nfunc exists(filename string, t *testing.T) bool {\n\tif _, err := os.Stat(filename); err == nil {\n\t\treturn true\n\t} else {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false\n\t\t} else {\n\t\t\tt.Fatal(err)\n\t\t\treturn false\n\t\t}\n\t}\n}\n\nfunc checkResult(filename string, actualOutput string, t *testing.T) {\n\tif _, err := os.Stat(filename + \".ignoreOutput\"); err == nil {\n\t\treturn\n\t}\n\tif _, err := os.Stat(filename + \".stripPaths\"); err == nil {\n\t\tdir := filepath.Dir(filename) + string(filepath.Separator)\n\t\tactualOutput = strings.Replace(actualOutput, dir, \"\", -1)\n\t}\n\tbytes, err := ioutil.ReadFile(filename + \"lden\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\texpectedOutput := strings.Replace(string(bytes), \"\\r\\n\", \"\\n\", -1)\n\tactualOutput = strings.Replace(actualOutput, \"\\r\\n\", \"\\n\", -1)\n\n\tif actualOutput != expectedOutput {\n\t\tfmt.Printf(\">>>>> Output does not match %slden\\n\", filename)\n\t\tfmt.Println(\"EXPECTED OUTPUT\")\n\t\tfmt.Println(\"vvvvvvvvvvvvvvv\")\n\t\tfmt.Println(expectedOutput)\n\t\tfmt.Println(\"^^^^^^^^^^^^^^^\")\n\t\tfmt.Println(\"ACTUAL OUTPUT\")\n\t\tfmt.Println(\"vvvvvvvvvvvvv\")\n\t\tfmt.Println(actualOutput)\n\t\tfmt.Println(\"^^^^^^^^^^^^^\")\n\t\tlenExpected, lenActual := len(expectedOutput), len(actualOutput)\n\t\tif lenExpected != lenActual {\n\t\t\tfmt.Printf(\"Length of expected output is %d; length of actual output is %d\\n\",\n\t\t\t\tlenExpected, lenActual)\n\t\t\tminLen := lenExpected\n\t\t\tif lenActual < minLen {\n\t\t\t\tminLen = lenActual\n\t\t\t}\n\t\t\tfor i := 0; i < minLen; i++ {\n\t\t\t\tif expectedOutput[i] != actualOutput[i] {\n\t\t\t\t\tfmt.Printf(\"Strings differ at index %d\\n\", i)\n\t\t\t\t\tfmt.Printf(\"Substrings starting at that index are:\\n\")\n\t\t\t\t\tfmt.Printf(\"Expected: [%s]\\n\", describe(expectedOutput[i:]))\n\t\t\t\t\tfmt.Printf(\"Actual: [%s]\\n\", describe(actualOutput[i:]))\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tt.Fatalf(\"Refactoring test failed - %s\", filename)\n\t}\n}\n\n\/\/TODO Define after getting the value of Gopath\n\/*func checkRenamedDir(result.RenameDir []string,filename string) {\n\n if result.RenameDir != nil {\n\n bytes, err := ioutil.ReadFile(filename)\n if err != nil {\n\t\tt.Fatal(err)\n\t}\n expectedoutput := string(bytes)\n\n}\n\n}\n*\/\nfunc describe(s string) string {\n\t\/\/ FIXME: Jeff: Handle other non-printing characters\n\tif len(s) > 10 {\n\t\ts = s[:10]\n\t}\n\ts = strings.Replace(s, \"\\n\", \"\\\\n\", -1)\n\ts = strings.Replace(s, \"\\r\", \"\\\\r\", -1)\n\ts = strings.Replace(s, \"\\t\", \"\\\\t\", -1)\n\treturn s\n}\n\nfunc splitMarker(filename string, marker string, t *testing.T) (refac string, selection *text.Selection, remainder []string, result string) {\n\tfilename, err := filepath.Abs(filename)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfields := strings.Split(marker, \",\")\n\tif len(fields) < 6 {\n\t\tt.Fatalf(\"Marker is invalid (must contain >= 5 fields): %s\", marker)\n\t}\n\trefac = fields[0]\n\tstartLine := parseInt(fields[1], t)\n\tstartCol := parseInt(fields[2], t)\n\tendLine := parseInt(fields[3], t)\n\tendCol := parseInt(fields[4], t)\n\tselection = &text.Selection{filename,\n\t\tstartLine, startCol, endLine, endCol}\n\tremainder = fields[5 : len(fields)-1]\n\tresult = fields[len(fields)-1]\n\tif result != PASS && result != FAIL {\n\t\tt.Fatalf(\"Marker is invalid: last field must be %s or %s\",\n\t\t\tPASS, FAIL)\n\t}\n\treturn\n}\n\nfunc parseInt(s string, t *testing.T) int {\n\tresult, err := strconv.ParseInt(s, 10, 0)\n\tif err != nil {\n\t\tt.Fatalf(\"Marker is invalid: expecting integer, found %s\", s)\n\t}\n\treturn int(result)\n}\n\n\/\/ extractMarkers extracts comments of the form \/\/<<<<<a,b,c,d,e,f,g removing\n\/\/ the leading <<<<< and trimming any spaces from the left and right ends\nfunc extractMarkers(filename string, t *testing.T) []string {\n\tresult := []string{}\n\tfset := token.NewFileSet()\n\tf, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)\n\tif err != nil {\n\t\tt.Logf(\"Cannot extract markers from %s -- unable to parse\",\n\t\t\tfilename)\n\t\twd, _ := os.Getwd()\n\t\tt.Logf(\"Working directory is %s\", wd)\n\t\tt.Fatal(err)\n\t}\n\tfor _, commentGroup := range f.Comments {\n\t\tfor _, comment := range commentGroup.List {\n\t\t\ttxt := comment.Text\n\t\t\tif strings.Contains(txt, MARKER) {\n\t\t\t\tidx := strings.Index(txt, MARKER) + len(MARKER)\n\t\t\t\ttxt = strings.TrimSpace(txt[idx:])\n\t\t\t\tresult = append(result, txt)\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}\n<commit_msg>New search for .go\/.golden files refactoring_test<commit_after>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file provides tests for all refactorings. The testdata directory is\n\/\/ structured as such:\n\/\/\n\/\/ testdata\/\n\/\/ refactoring-name\/\n\/\/ 001-test-name\/\n\/\/ 002-test-name\/\n\/\/\n\/\/ To filter which refactorings are tested, run\n\/\/ go test -filter=something\n\/\/ Then, only tests in directories containing \"something\" are run. E.g.::\n\/\/ go test -filter=rename # Only run rename tests\n\/\/ go test -filter=shortassign\/003 # Only run shortassign test #3\n\/\/\n\/\/ Refactorings are run on the files in each test directory; special comments\n\/\/ in the .go files indicate what refactoring(s) to run and whether the\n\/\/ refactorings are expected to succeed or fail. Specifically, test files are\n\/\/ annotated with markers of the form\n\/\/ \/\/<<<<<name,startline,startcol,endline,endcol,arg1,arg2,...,argn,pass\n\/\/ The name indicates the refactoring to run. The next four fields specify a\n\/\/ text selection on which to invoke the refactoring. The arguments\n\/\/ arg1,arg2,...,argn are passed as arguments to the refactoring (see\n\/\/ Config.Args). The last field is either \"pass\" or \"fail\", indicating whether\n\/\/ the refactoring is expected to complete successfully or raise an error. If\n\/\/ the refactoring is expected to succeed, the resulting file is compared\n\/\/ against a .golden file with the same name in the same directory.\n\/\/\n\/\/ Each test directory (001-test-name, 002-test-name, etc.) is treated as the\n\/\/ root of a Go workspace when its tests are run; i.e., the GOPATH is set to\n\/\/ the test directory. This allows it to define its own packages. In such\n\/\/ cases, the test directory is usually structured as follows:\n\/\/\n\/\/ testdata\/\n\/\/ refactoring-name\/\n\/\/ 001-test-name\/\n\/\/ src\/\n\/\/ main.go\n\/\/ main.golden\n\/\/ package-name\/\n\/\/ package-file.go\n\/\/ package-file.golden\n\/\/\n\/\/ To additional options are available and are currently used only to test the\n\/\/ Debug refactoring. When it does not make sense to compare against a .golden\n\/\/ file, include an empty file named filename.golden.ignoreOutput instead. If\n\/\/ the output will contain absolute paths to files in the test folder, include\n\/\/ a file named filename.go.stripPaths, and the refactoring's output will be\n\/\/ stripped of all occurrences of the absolute path to the .go file.\n\npackage refactoring_test\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang-refactoring.org\/go-doctor\/engine\"\n\t\"golang-refactoring.org\/go-doctor\/filesystem\"\n\t\"golang-refactoring.org\/go-doctor\/refactoring\"\n\t\"golang-refactoring.org\/go-doctor\/text\"\n)\n\nconst MARKER = \"<<<<<\"\nconst PASS = \"pass\"\nconst FAIL = \"fail\"\n\nconst MAIN_DOT_GO = \"main.go\"\n\nconst FSCHANGES_TXT = \"fschanges.txt\"\n\nvar filterFlag = flag.String(\"filter\", \"\",\n\t\"Only tests from directories containing this substring will be run\")\n\nconst directory = \"testdata\/\"\n\nfunc TestRefactorings(t *testing.T) {\n\ttestDirs, err := ioutil.ReadDir(directory)\n\tfailIfError(err, t)\n\tfor _, testDirInfo := range testDirs {\n\t\tif testDirInfo.IsDir() {\n\t\t\trunAllTestsInSubdirectories(testDirInfo, t)\n\t\t}\n\t}\n}\n\nfunc runAllTestsInSubdirectories(testDirInfo os.FileInfo, t *testing.T) {\n\ttestDirPath := filepath.Join(directory, testDirInfo.Name())\n\tsubDirs, err := ioutil.ReadDir(testDirPath)\n\tfailIfError(err, t)\n\tfor _, subDirInfo := range subDirs {\n\t\tif subDirInfo.IsDir() {\n\t\t\tsubDirPath := filepath.Join(testDirPath, subDirInfo.Name())\n\t\t\tif strings.Contains(subDirPath, *filterFlag) {\n\t\t\t\trunAllTestsInDirectory(subDirPath, t)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ RunAllTests is a utility method that runs a set of refactoring tests\n\/\/ based on markers in all of the files in subdirectories of a given directory\nfunc runAllTestsInDirectory(directory string, t *testing.T) {\n\tfiles, err := recursiveReadDir(directory)\n\tfailIfError(err, t)\n\n\tabsolutePath, err := filepath.Abs(directory)\n\tfailIfError(err, t)\n\terr = os.Setenv(\"GOPATH\", absolutePath)\n\tfailIfError(err, t)\n\n\trunTestsInFiles(directory, files, t)\n}\n\n\/\/ Assumes no duplication or circularity due to symbolic links\nfunc recursiveReadDir(path string) ([]string, error) {\n\tresult := []string{}\n\n\tfileInfos, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\n\tfor _, fi := range fileInfos {\n\t\tif fi.IsDir() {\n\t\t\tcurrent := result\n\t\t\trest, err := recursiveReadDir(filepath.Join(path, fi.Name()))\n\t\t\tif err != nil {\n\t\t\t\treturn []string{}, err\n\t\t\t}\n\n\t\t\tnewLen := len(current) + len(rest)\n\t\t\tresult = make([]string, newLen, newLen)\n\t\t\tcopy(result, current)\n\t\t\tcopy(result[len(current):], rest)\n\t\t} else {\n\t\t\tresult = append(result, filepath.Join(path, fi.Name()))\n\t\t}\n\t}\n\treturn result, err\n}\n\nfunc failIfError(err error, t *testing.T) {\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc runTestsInFiles(directory string, files []string, t *testing.T) {\n\tmarkers := make(map[string][]string)\n\tfor _, path := range files {\n\t\tif strings.HasSuffix(path, \".go\") {\n\t\t\tmarkers[path] = extractMarkers(path, t)\n\t\t}\n\t}\n\n\tif len(markers) == 0 {\n\t\tpwd, _ := os.Getwd()\n\t\tpwd = filepath.Base(pwd)\n\t\tt.Fatalf(\"No <<<<< markers found in any files in %s\", pwd)\n\t}\n\n\tfor path, markersInFile := range markers {\n\t\tfor _, marker := range markersInFile {\n\t\t\trunRefactoring(directory, path, marker, t)\n\t\t}\n\t}\n}\n\nfunc runRefactoring(directory string, filename string, marker string, t *testing.T) {\n\trefac, selection, remainder, passFail := splitMarker(filename, marker, t)\n\n\tr := engine.GetRefactoring(refac)\n\tif r == nil {\n\t\tt.Fatalf(\"There is no refactoring named %s (from marker %s)\", refac, marker)\n\t}\n\n\tshouldPass := (passFail == PASS)\n\tname := r.Description().Name\n\n\tcwd, _ := os.Getwd()\n\tabsPath, _ := filepath.Abs(filename)\n\trelativePath, _ := filepath.Rel(cwd, absPath)\n\tfmt.Println(name, relativePath, selection.ShortString())\n\n\tmainFile := filepath.Join(directory, MAIN_DOT_GO)\n\tif !exists(mainFile, t) {\n\t\tmainFile = filepath.Join(filepath.Join(directory, \"src\"), MAIN_DOT_GO)\n\t\tif !exists(mainFile, t) {\n\t\t\tmainFile = filename\n\t\t}\n\t}\n\n\tmainFile, err := filepath.Abs(mainFile)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\targs := refactoring.InterpretArgs(remainder, r.Description().Params)\n\n\tfileSystem := &filesystem.LocalFileSystem{}\n\tconfig := &refactoring.Config{\n\t\tFileSystem: fileSystem,\n\t\tScope: []string{mainFile},\n\t\tSelection: selection,\n\t\tArgs: args,\n\t\tGoPath: \"\", \/\/ FIXME(jeff): GOPATH\n\t}\n\tresult := r.Run(config)\n\tif shouldPass && result.Log.ContainsErrors() {\n\t\tt.Log(result.Log)\n\t\tt.Fatalf(\"Refactoring produced unexpected errors\")\n\t} else if !shouldPass && !result.Log.ContainsErrors() {\n\t\tt.Fatalf(\"Refactoring should have produced errors but didn't\")\n\t}\n\n\terr = filepath.Walk(directory,\n\t\tfunc(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif strings.HasSuffix(path, \".go\") {\n\t\t\t\tpath, err := filepath.Abs(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tedits, ok := result.Edits[path]\n\t\t\t\tif !ok {\n\t\t\t\t\tedits = text.NewEditSet()\n\t\t\t\t}\n\t\t\t\toutput, err := text.ApplyToFile(edits, path)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif shouldPass {\n\t\t\t\t\tcheckResult(path, string(output), t)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfsChangesFile := filepath.Join(directory, FSCHANGES_TXT)\n\tif !exists(fsChangesFile, t) {\n\t\tif len(result.FSChanges) > 0 {\n\t\t\tt.Fatalf(\"Refactoring returned file system changes, \"+\n\t\t\t\t\"but %s does not exist\", fsChangesFile)\n\t\t}\n\t} else {\n\t\tbytes, err := ioutil.ReadFile(fsChangesFile)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfschanges := removeEmptyLines(strings.Split(string(bytes), \"\\n\"))\n\t\tif len(fschanges) != len(result.FSChanges) {\n\t\t\tt.Fatalf(\"Expected %d file system changes but got %d\",\n\t\t\t\tlen(fschanges), len(result.FSChanges))\n\t\t} else {\n\t\t\tfor i, chg := range result.FSChanges {\n\t\t\t\tif chg.String(directory) != strings.TrimSpace(fschanges[i]) {\n\t\t\t\t\tt.Fatalf(\"FSChanges[%d]\\nExpected: %s\\nActual: %s\", i, fschanges[i], chg.String(directory))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc removeEmptyLines(lines []string) []string {\n\tresult := []string{}\n\tfor _, line := range lines {\n\t\tif line != \"\" {\n\t\t\tresult = append(result, line)\n\t\t}\n\t}\n\treturn result\n}\n\nfunc exists(filename string, t *testing.T) bool {\n\tif _, err := os.Stat(filename); err == nil {\n\t\treturn true\n\t} else {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false\n\t\t} else {\n\t\t\tt.Fatal(err)\n\t\t\treturn false\n\t\t}\n\t}\n}\n\nfunc checkResult(filename string, actualOutput string, t *testing.T) {\n\tif _, err := os.Stat(filename + \".ignoreOutput\"); err == nil {\n\t\treturn\n\t}\n\tif _, err := os.Stat(filename + \".stripPaths\"); err == nil {\n\t\tdir := filepath.Dir(filename) + string(filepath.Separator)\n\t\tactualOutput = strings.Replace(actualOutput, dir, \"\", -1)\n\t}\n\tbytes, err := ioutil.ReadFile(filename + \"lden\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\texpectedOutput := strings.Replace(string(bytes), \"\\r\\n\", \"\\n\", -1)\n\tactualOutput = strings.Replace(actualOutput, \"\\r\\n\", \"\\n\", -1)\n\n\tif actualOutput != expectedOutput {\n\t\tfmt.Printf(\">>>>> Output does not match %slden\\n\", filename)\n\t\tfmt.Println(\"EXPECTED OUTPUT\")\n\t\tfmt.Println(\"vvvvvvvvvvvvvvv\")\n\t\tfmt.Println(expectedOutput)\n\t\tfmt.Println(\"^^^^^^^^^^^^^^^\")\n\t\tfmt.Println(\"ACTUAL OUTPUT\")\n\t\tfmt.Println(\"vvvvvvvvvvvvv\")\n\t\tfmt.Println(actualOutput)\n\t\tfmt.Println(\"^^^^^^^^^^^^^\")\n\t\tlenExpected, lenActual := len(expectedOutput), len(actualOutput)\n\t\tif lenExpected != lenActual {\n\t\t\tfmt.Printf(\"Length of expected output is %d; length of actual output is %d\\n\",\n\t\t\t\tlenExpected, lenActual)\n\t\t\tminLen := lenExpected\n\t\t\tif lenActual < minLen {\n\t\t\t\tminLen = lenActual\n\t\t\t}\n\t\t\tfor i := 0; i < minLen; i++ {\n\t\t\t\tif expectedOutput[i] != actualOutput[i] {\n\t\t\t\t\tfmt.Printf(\"Strings differ at index %d\\n\", i)\n\t\t\t\t\tfmt.Printf(\"Substrings starting at that index are:\\n\")\n\t\t\t\t\tfmt.Printf(\"Expected: [%s]\\n\", describe(expectedOutput[i:]))\n\t\t\t\t\tfmt.Printf(\"Actual: [%s]\\n\", describe(actualOutput[i:]))\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tt.Fatalf(\"Refactoring test failed - %s\", filename)\n\t}\n}\n\n\/\/TODO Define after getting the value of Gopath\n\/*func checkRenamedDir(result.RenameDir []string,filename string) {\n\n if result.RenameDir != nil {\n\n bytes, err := ioutil.ReadFile(filename)\n if err != nil {\n\t\tt.Fatal(err)\n\t}\n expectedoutput := string(bytes)\n\n}\n\n}\n*\/\nfunc describe(s string) string {\n\t\/\/ FIXME: Jeff: Handle other non-printing characters\n\tif len(s) > 10 {\n\t\ts = s[:10]\n\t}\n\ts = strings.Replace(s, \"\\n\", \"\\\\n\", -1)\n\ts = strings.Replace(s, \"\\r\", \"\\\\r\", -1)\n\ts = strings.Replace(s, \"\\t\", \"\\\\t\", -1)\n\treturn s\n}\n\nfunc splitMarker(filename string, marker string, t *testing.T) (refac string, selection *text.Selection, remainder []string, result string) {\n\tfilename, err := filepath.Abs(filename)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfields := strings.Split(marker, \",\")\n\tif len(fields) < 6 {\n\t\tt.Fatalf(\"Marker is invalid (must contain >= 5 fields): %s\", marker)\n\t}\n\trefac = fields[0]\n\tstartLine := parseInt(fields[1], t)\n\tstartCol := parseInt(fields[2], t)\n\tendLine := parseInt(fields[3], t)\n\tendCol := parseInt(fields[4], t)\n\tselection = &text.Selection{filename,\n\t\tstartLine, startCol, endLine, endCol}\n\tremainder = fields[5 : len(fields)-1]\n\tresult = fields[len(fields)-1]\n\tif result != PASS && result != FAIL {\n\t\tt.Fatalf(\"Marker is invalid: last field must be %s or %s\",\n\t\t\tPASS, FAIL)\n\t}\n\treturn\n}\n\nfunc parseInt(s string, t *testing.T) int {\n\tresult, err := strconv.ParseInt(s, 10, 0)\n\tif err != nil {\n\t\tt.Fatalf(\"Marker is invalid: expecting integer, found %s\", s)\n\t}\n\treturn int(result)\n}\n\n\/\/ extractMarkers extracts comments of the form \/\/<<<<<a,b,c,d,e,f,g removing\n\/\/ the leading <<<<< and trimming any spaces from the left and right ends\nfunc extractMarkers(filename string, t *testing.T) []string {\n\tresult := []string{}\n\tfset := token.NewFileSet()\n\tf, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)\n\tif err != nil {\n\t\tt.Logf(\"Cannot extract markers from %s -- unable to parse\",\n\t\t\tfilename)\n\t\twd, _ := os.Getwd()\n\t\tt.Logf(\"Working directory is %s\", wd)\n\t\tt.Fatal(err)\n\t}\n\tfor _, commentGroup := range f.Comments {\n\t\tfor _, comment := range commentGroup.List {\n\t\t\ttxt := comment.Text\n\t\t\tif strings.Contains(txt, MARKER) {\n\t\t\t\tidx := strings.Index(txt, MARKER) + len(MARKER)\n\t\t\t\ttxt = strings.TrimSpace(txt[idx:])\n\t\t\t\tresult = append(result, txt)\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file provides tests for all refactorings. The testdata directory is\n\/\/ structured as such:\n\/\/\n\/\/ testdata\/\n\/\/ refactoring-name\/\n\/\/ 001-test-name\/\n\/\/ 002-test-name\/\n\/\/\n\/\/ To filter which refactorings are tested, run\n\/\/ go test -filter=something\n\/\/ Then, only tests in directories containing \"something\" are run. E.g.::\n\/\/ go test -filter=rename # Only run rename tests\n\/\/ go test -filter=shortassign\/003 # Only run shortassign test #3\n\/\/\n\/\/ Refactorings are run on the files in each test directory; special comments\n\/\/ in the .go files indicate what refactoring(s) to run and whether the\n\/\/ refactorings are expected to succeed or fail. Specifically, test files are\n\/\/ annotated with markers of the form\n\/\/ \/\/<<<<<name,startline,startcol,endline,endcol,arg1,arg2,...,argn,pass\n\/\/ The name indicates the refactoring to run. The next four fields specify a\n\/\/ text selection on which to invoke the refactoring. The arguments\n\/\/ arg1,arg2,...,argn are passed as arguments to the refactoring (see\n\/\/ Config.Args). The last field is either \"pass\" or \"fail\", indicating whether\n\/\/ the refactoring is expected to complete successfully or raise an error. If\n\/\/ the refactoring is expected to succeed, the resulting file is compared\n\/\/ against a .golden file with the same name in the same directory.\n\/\/\n\/\/ Each test directory (001-test-name, 002-test-name, etc.) is treated as the\n\/\/ root of a Go workspace when its tests are run; i.e., the GOPATH is set to\n\/\/ the test directory. This allows it to define its own packages. In such\n\/\/ cases, the test directory is usually structured as follows:\n\/\/\n\/\/ testdata\/\n\/\/ refactoring-name\/\n\/\/ 001-test-name\/\n\/\/ src\/\n\/\/ main.go\n\/\/ main.golden\n\/\/ package-name\/\n\/\/ package-file.go\n\/\/ package-file.golden\n\/\/\n\/\/ To additional options are available and are currently used only to test the\n\/\/ Debug refactoring. When it does not make sense to compare against a .golden\n\/\/ file, include an empty file named filename.golden.ignoreOutput instead. If\n\/\/ the output will contain absolute paths to files in the test folder, include\n\/\/ a file named filename.go.stripPaths, and the refactoring's output will be\n\/\/ stripped of all occurrences of the absolute path to the .go file.\n\npackage refactoring_test\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang-refactoring.org\/go-doctor\/engine\"\n\t\"golang-refactoring.org\/go-doctor\/filesystem\"\n\t\"golang-refactoring.org\/go-doctor\/refactoring\"\n\t\"golang-refactoring.org\/go-doctor\/text\"\n)\n\nconst MARKER = \"<<<<<\"\nconst PASS = \"pass\"\nconst FAIL = \"fail\"\n\nconst MAIN_DOT_GO = \"main.go\"\n\nconst FSCHANGES_TXT = \"fschanges.txt\"\n\nvar filterFlag = flag.String(\"filter\", \"\",\n\t\"Only tests from directories containing this substring will be run\")\n\nconst directory = \"testdata\/\"\n\nfunc TestRefactorings(t *testing.T) {\n\ttestDirs, err := ioutil.ReadDir(directory)\n\tfailIfError(err, t)\n\tfor _, testDirInfo := range testDirs {\n\t\t\/\/ FIXME(jeff): Move refactoring tests into testdata\/refactoring\/x and remove this hack\n\t\tif testDirInfo.IsDir() && testDirInfo.Name() != \"diff\" {\n\t\t\trunAllTestsInSubdirectories(testDirInfo, t)\n\t\t}\n\t}\n}\n\nfunc runAllTestsInSubdirectories(testDirInfo os.FileInfo, t *testing.T) {\n\ttestDirPath := filepath.Join(directory, testDirInfo.Name())\n\tsubDirs, err := ioutil.ReadDir(testDirPath)\n\tfailIfError(err, t)\n\tfor _, subDirInfo := range subDirs {\n\t\tif subDirInfo.IsDir() {\n\t\t\tsubDirPath := filepath.Join(testDirPath, subDirInfo.Name())\n\t\t\tif strings.Contains(subDirPath, *filterFlag) {\n\t\t\t\trunAllTestsInDirectory(subDirPath, t)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ RunAllTests is a utility method that runs a set of refactoring tests\n\/\/ based on markers in all of the files in subdirectories of a given directory\nfunc runAllTestsInDirectory(directory string, t *testing.T) {\n\tfiles, err := recursiveReadDir(directory)\n\tfailIfError(err, t)\n\n\tabsolutePath, err := filepath.Abs(directory)\n\tfailIfError(err, t)\n\terr = os.Setenv(\"GOPATH\", absolutePath)\n\tfailIfError(err, t)\n\n\trunTestsInFiles(directory, files, t)\n}\n\n\/\/ Assumes no duplication or circularity due to symbolic links\nfunc recursiveReadDir(path string) ([]string, error) {\n\tresult := []string{}\n\n\tfileInfos, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\n\tfor _, fi := range fileInfos {\n\t\tif fi.IsDir() {\n\t\t\tcurrent := result\n\t\t\trest, err := recursiveReadDir(filepath.Join(path, fi.Name()))\n\t\t\tif err != nil {\n\t\t\t\treturn []string{}, err\n\t\t\t}\n\n\t\t\tnewLen := len(current) + len(rest)\n\t\t\tresult = make([]string, newLen, newLen)\n\t\t\tcopy(result, current)\n\t\t\tcopy(result[len(current):], rest)\n\t\t} else {\n\t\t\tresult = append(result, filepath.Join(path, fi.Name()))\n\t\t}\n\t}\n\treturn result, err\n}\n\nfunc failIfError(err error, t *testing.T) {\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc runTestsInFiles(directory string, files []string, t *testing.T) {\n\tmarkers := make(map[string][]string)\n\tfor _, path := range files {\n\t\tif strings.HasSuffix(path, \".go\") {\n\t\t\tmarkers[path] = extractMarkers(path, t)\n\t\t}\n\t}\n\n\tif len(markers) == 0 {\n\t\tpwd, _ := os.Getwd()\n\t\tpwd = filepath.Base(pwd)\n\t\tt.Fatalf(\"No <<<<< markers found in any files in %s\", pwd)\n\t}\n\n\tfor path, markersInFile := range markers {\n\t\tfor _, marker := range markersInFile {\n\t\t\trunRefactoring(directory, path, marker, t)\n\t\t}\n\t}\n}\n\nfunc runRefactoring(directory string, filename string, marker string, t *testing.T) {\n\trefac, selection, remainder, passFail := splitMarker(filename, marker, t)\n\n\tr := engine.GetRefactoring(refac)\n\tif r == nil {\n\t\tt.Fatalf(\"There is no refactoring named %s (from marker %s)\", refac, marker)\n\t}\n\n\tshouldPass := (passFail == PASS)\n\tname := r.Description().Name\n\n\tcwd, _ := os.Getwd()\n\tabsPath, _ := filepath.Abs(filename)\n\trelativePath, _ := filepath.Rel(cwd, absPath)\n\tfmt.Println(name, relativePath)\n\n\tmainFile := filepath.Join(directory, MAIN_DOT_GO)\n\tif !exists(mainFile, t) {\n\t\tmainFile = filepath.Join(filepath.Join(directory, \"src\"), MAIN_DOT_GO)\n\t\tif !exists(mainFile, t) {\n\t\t\tmainFile = filename\n\t\t}\n\t}\n\n\tmainFile, err := filepath.Abs(mainFile)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\targs := refactoring.InterpretArgs(remainder, r.Description().Params)\n\n\tfileSystem := &filesystem.LocalFileSystem{}\n\tconfig := &refactoring.Config{\n\t\tFileSystem: fileSystem,\n\t\tScope: []string{mainFile},\n\t\tSelection: selection,\n\t\tArgs: args,\n\t\tGoPath: \"\", \/\/ FIXME(jeff): GOPATH\n\t}\n\tresult := r.Run(config)\n\tif shouldPass && result.Log.ContainsErrors() {\n\t\tt.Log(result.Log)\n\t\tt.Fatalf(\"Refactoring produced unexpected errors\")\n\t} else if !shouldPass && !result.Log.ContainsErrors() {\n\t\tt.Fatalf(\"Refactoring should have produced errors but didn't\")\n\t}\n\n\tfor filename, edits := range result.Edits {\n\t\toutput, err := text.ApplyToFile(edits, filename)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif shouldPass {\n\t\t\tcheckResult(filename, string(output), t)\n\n\t\t}\n\t}\n\t\/\/fmt.Println(\"filesystem changes\",result.FSChanges)\n\t\/\/ checkRenamedDir(result.RenameDir,\"fschanges.txt\")\n\n\tfsChangesFile := filepath.Join(directory, FSCHANGES_TXT)\n\tif !exists(fsChangesFile, t) {\n\t\tif len(result.FSChanges) > 0 {\n\t\t\tt.Fatalf(\"Refactoring returned file system changes, \"+\n\t\t\t\t\"but %s does not exist\", fsChangesFile)\n\t\t}\n\t} else {\n\t\tbytes, err := ioutil.ReadFile(fsChangesFile)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfschanges := removeEmptyLines(strings.Split(string(bytes), \"\\n\"))\n\t\tif len(fschanges) != len(result.FSChanges) {\n\t\t\tt.Fatalf(\"Expected %d file system changes but got %d\",\n\t\t\t\tlen(fschanges), len(result.FSChanges))\n\t\t} else {\n\t\t\tfor i, chg := range result.FSChanges {\n\t\t\t\tif chg.String(directory) != strings.TrimSpace(fschanges[i]) {\n\t\t\t\t\tt.Fatalf(\"FSChanges[%d]\\nExpected: %s\\nActual: %s\", i, fschanges[i], chg.String(directory))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc removeEmptyLines(lines []string) []string {\n\tresult := []string{}\n\tfor _, line := range lines {\n\t\tif line != \"\" {\n\t\t\tresult = append(result, line)\n\t\t}\n\t}\n\treturn result\n}\n\nfunc exists(filename string, t *testing.T) bool {\n\tif _, err := os.Stat(filename); err == nil {\n\t\treturn true\n\t} else {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false\n\t\t} else {\n\t\t\tt.Fatal(err)\n\t\t\treturn false\n\t\t}\n\t}\n}\n\nfunc checkResult(filename string, actualOutput string, t *testing.T) {\n\tif _, err := os.Stat(filename + \".ignoreOutput\"); err == nil {\n\t\treturn\n\t}\n\tif _, err := os.Stat(filename + \".stripPaths\"); err == nil {\n\t\tdir := filepath.Dir(filename) + string(filepath.Separator)\n\t\tactualOutput = strings.Replace(actualOutput, dir, \"\", -1)\n\t}\n\tbytes, err := ioutil.ReadFile(filename + \"lden\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\texpectedOutput := strings.Replace(string(bytes), \"\\r\\n\", \"\\n\", -1)\n\tactualOutput = strings.Replace(actualOutput, \"\\r\\n\", \"\\n\", -1)\n\n\tif actualOutput != expectedOutput {\n\t\tfmt.Printf(\">>>>> Output does not match %slden\\n\", filename)\n\t\tfmt.Println(\"EXPECTED OUTPUT\")\n\t\tfmt.Println(\"vvvvvvvvvvvvvvv\")\n\t\tfmt.Println(expectedOutput)\n\t\tfmt.Println(\"^^^^^^^^^^^^^^^\")\n\t\tfmt.Println(\"ACTUAL OUTPUT\")\n\t\tfmt.Println(\"vvvvvvvvvvvvv\")\n\t\tfmt.Println(actualOutput)\n\t\tfmt.Println(\"^^^^^^^^^^^^^\")\n\t\tlenExpected, lenActual := len(expectedOutput), len(actualOutput)\n\t\tif lenExpected != lenActual {\n\t\t\tfmt.Printf(\"Length of expected output is %d; length of actual output is %d\\n\",\n\t\t\t\tlenExpected, lenActual)\n\t\t\tminLen := lenExpected\n\t\t\tif lenActual < minLen {\n\t\t\t\tminLen = lenActual\n\t\t\t}\n\t\t\tfor i := 0; i < minLen; i++ {\n\t\t\t\tif expectedOutput[i] != actualOutput[i] {\n\t\t\t\t\tfmt.Printf(\"Strings differ at index %d\\n\", i)\n\t\t\t\t\tfmt.Printf(\"Substrings starting at that index are:\\n\")\n\t\t\t\t\tfmt.Printf(\"Expected: [%s]\\n\", describe(expectedOutput[i:]))\n\t\t\t\t\tfmt.Printf(\"Actual: [%s]\\n\", describe(actualOutput[i:]))\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tt.Fatalf(\"Refactoring test failed - %s\", filename)\n\t}\n}\n\n\/\/TODO Define after getting the value of Gopath\n\/*func checkRenamedDir(result.RenameDir []string,filename string) {\n\n if result.RenameDir != nil {\n\n bytes, err := ioutil.ReadFile(filename)\n if err != nil {\n\t\tt.Fatal(err)\n\t}\n expectedoutput := string(bytes)\n\n}\n\n}\n*\/\nfunc describe(s string) string {\n\t\/\/ FIXME: Jeff: Handle other non-printing characters\n\tif len(s) > 10 {\n\t\ts = s[:10]\n\t}\n\ts = strings.Replace(s, \"\\n\", \"\\\\n\", -1)\n\ts = strings.Replace(s, \"\\r\", \"\\\\r\", -1)\n\ts = strings.Replace(s, \"\\t\", \"\\\\t\", -1)\n\treturn s\n}\n\nfunc splitMarker(filename string, marker string, t *testing.T) (refac string, selection text.Selection, remainder []string, result string) {\n\tfilename, err := filepath.Abs(filename)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfields := strings.Split(marker, \",\")\n\tif len(fields) < 6 {\n\t\tt.Fatalf(\"Marker is invalid (must contain >= 5 fields): %s\", marker)\n\t}\n\trefac = fields[0]\n\tstartLine := parseInt(fields[1], t)\n\tstartCol := parseInt(fields[2], t)\n\tendLine := parseInt(fields[3], t)\n\tendCol := parseInt(fields[4], t)\n\tselection = &text.LineColSelection{filename,\n\t\tstartLine, startCol, endLine, endCol}\n\tremainder = fields[5 : len(fields)-1]\n\tresult = fields[len(fields)-1]\n\tif result != PASS && result != FAIL {\n\t\tt.Fatalf(\"Marker is invalid: last field must be %s or %s\",\n\t\t\tPASS, FAIL)\n\t}\n\treturn\n}\n\nfunc parseInt(s string, t *testing.T) int {\n\tresult, err := strconv.ParseInt(s, 10, 0)\n\tif err != nil {\n\t\tt.Fatalf(\"Marker is invalid: expecting integer, found %s\", s)\n\t}\n\treturn int(result)\n}\n\n\/\/ extractMarkers extracts comments of the form \/\/<<<<<a,b,c,d,e,f,g removing\n\/\/ the leading <<<<< and trimming any spaces from the left and right ends\nfunc extractMarkers(filename string, t *testing.T) []string {\n\tresult := []string{}\n\tfset := token.NewFileSet()\n\tf, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)\n\tif err != nil {\n\t\tt.Logf(\"Cannot extract markers from %s -- unable to parse\",\n\t\t\tfilename)\n\t\twd, _ := os.Getwd()\n\t\tt.Logf(\"Working directory is %s\", wd)\n\t\tt.Fatal(err)\n\t}\n\tfor _, commentGroup := range f.Comments {\n\t\tfor _, comment := range commentGroup.List {\n\t\t\ttxt := comment.Text\n\t\t\tif strings.Contains(txt, MARKER) {\n\t\t\t\tidx := strings.Index(txt, MARKER) + len(MARKER)\n\t\t\t\ttxt = strings.TrimSpace(txt[idx:])\n\t\t\t\tresult = append(result, txt)\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}\n<commit_msg>New search for .go\/.golden files refactoring_test<commit_after>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file provides tests for all refactorings. The testdata directory is\n\/\/ structured as such:\n\/\/\n\/\/ testdata\/\n\/\/ refactoring-name\/\n\/\/ 001-test-name\/\n\/\/ 002-test-name\/\n\/\/\n\/\/ To filter which refactorings are tested, run\n\/\/ go test -filter=something\n\/\/ Then, only tests in directories containing \"something\" are run. E.g.::\n\/\/ go test -filter=rename # Only run rename tests\n\/\/ go test -filter=shortassign\/003 # Only run shortassign test #3\n\/\/\n\/\/ Refactorings are run on the files in each test directory; special comments\n\/\/ in the .go files indicate what refactoring(s) to run and whether the\n\/\/ refactorings are expected to succeed or fail. Specifically, test files are\n\/\/ annotated with markers of the form\n\/\/ \/\/<<<<<name,startline,startcol,endline,endcol,arg1,arg2,...,argn,pass\n\/\/ The name indicates the refactoring to run. The next four fields specify a\n\/\/ text selection on which to invoke the refactoring. The arguments\n\/\/ arg1,arg2,...,argn are passed as arguments to the refactoring (see\n\/\/ Config.Args). The last field is either \"pass\" or \"fail\", indicating whether\n\/\/ the refactoring is expected to complete successfully or raise an error. If\n\/\/ the refactoring is expected to succeed, the resulting file is compared\n\/\/ against a .golden file with the same name in the same directory.\n\/\/\n\/\/ Each test directory (001-test-name, 002-test-name, etc.) is treated as the\n\/\/ root of a Go workspace when its tests are run; i.e., the GOPATH is set to\n\/\/ the test directory. This allows it to define its own packages. In such\n\/\/ cases, the test directory is usually structured as follows:\n\/\/\n\/\/ testdata\/\n\/\/ refactoring-name\/\n\/\/ 001-test-name\/\n\/\/ src\/\n\/\/ main.go\n\/\/ main.golden\n\/\/ package-name\/\n\/\/ package-file.go\n\/\/ package-file.golden\n\/\/\n\/\/ To additional options are available and are currently used only to test the\n\/\/ Debug refactoring. When it does not make sense to compare against a .golden\n\/\/ file, include an empty file named filename.golden.ignoreOutput instead. If\n\/\/ the output will contain absolute paths to files in the test folder, include\n\/\/ a file named filename.go.stripPaths, and the refactoring's output will be\n\/\/ stripped of all occurrences of the absolute path to the .go file.\n\npackage refactoring_test\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang-refactoring.org\/go-doctor\/engine\"\n\t\"golang-refactoring.org\/go-doctor\/filesystem\"\n\t\"golang-refactoring.org\/go-doctor\/refactoring\"\n\t\"golang-refactoring.org\/go-doctor\/text\"\n)\n\nconst MARKER = \"<<<<<\"\nconst PASS = \"pass\"\nconst FAIL = \"fail\"\n\nconst MAIN_DOT_GO = \"main.go\"\n\nconst FSCHANGES_TXT = \"fschanges.txt\"\n\nvar filterFlag = flag.String(\"filter\", \"\",\n\t\"Only tests from directories containing this substring will be run\")\n\nconst directory = \"testdata\/\"\n\nfunc TestRefactorings(t *testing.T) {\n\ttestDirs, err := ioutil.ReadDir(directory)\n\tfailIfError(err, t)\n\tfor _, testDirInfo := range testDirs {\n\t\tif testDirInfo.IsDir() {\n\t\t\trunAllTestsInSubdirectories(testDirInfo, t)\n\t\t}\n\t}\n}\n\nfunc runAllTestsInSubdirectories(testDirInfo os.FileInfo, t *testing.T) {\n\ttestDirPath := filepath.Join(directory, testDirInfo.Name())\n\tsubDirs, err := ioutil.ReadDir(testDirPath)\n\tfailIfError(err, t)\n\tfor _, subDirInfo := range subDirs {\n\t\tif subDirInfo.IsDir() {\n\t\t\tsubDirPath := filepath.Join(testDirPath, subDirInfo.Name())\n\t\t\tif strings.Contains(subDirPath, *filterFlag) {\n\t\t\t\trunAllTestsInDirectory(subDirPath, t)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ RunAllTests is a utility method that runs a set of refactoring tests\n\/\/ based on markers in all of the files in subdirectories of a given directory\nfunc runAllTestsInDirectory(directory string, t *testing.T) {\n\tfiles, err := recursiveReadDir(directory)\n\tfailIfError(err, t)\n\n\tabsolutePath, err := filepath.Abs(directory)\n\tfailIfError(err, t)\n\terr = os.Setenv(\"GOPATH\", absolutePath)\n\tfailIfError(err, t)\n\n\trunTestsInFiles(directory, files, t)\n}\n\n\/\/ Assumes no duplication or circularity due to symbolic links\nfunc recursiveReadDir(path string) ([]string, error) {\n\tresult := []string{}\n\n\tfileInfos, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\n\tfor _, fi := range fileInfos {\n\t\tif fi.IsDir() {\n\t\t\tcurrent := result\n\t\t\trest, err := recursiveReadDir(filepath.Join(path, fi.Name()))\n\t\t\tif err != nil {\n\t\t\t\treturn []string{}, err\n\t\t\t}\n\n\t\t\tnewLen := len(current) + len(rest)\n\t\t\tresult = make([]string, newLen, newLen)\n\t\t\tcopy(result, current)\n\t\t\tcopy(result[len(current):], rest)\n\t\t} else {\n\t\t\tresult = append(result, filepath.Join(path, fi.Name()))\n\t\t}\n\t}\n\treturn result, err\n}\n\nfunc failIfError(err error, t *testing.T) {\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc runTestsInFiles(directory string, files []string, t *testing.T) {\n\tmarkers := make(map[string][]string)\n\tfor _, path := range files {\n\t\tif strings.HasSuffix(path, \".go\") {\n\t\t\tmarkers[path] = extractMarkers(path, t)\n\t\t}\n\t}\n\n\tif len(markers) == 0 {\n\t\tpwd, _ := os.Getwd()\n\t\tpwd = filepath.Base(pwd)\n\t\tt.Fatalf(\"No <<<<< markers found in any files in %s\", pwd)\n\t}\n\n\tfor path, markersInFile := range markers {\n\t\tfor _, marker := range markersInFile {\n\t\t\trunRefactoring(directory, path, marker, t)\n\t\t}\n\t}\n}\n\nfunc runRefactoring(directory string, filename string, marker string, t *testing.T) {\n\trefac, selection, remainder, passFail := splitMarker(filename, marker, t)\n\n\tr := engine.GetRefactoring(refac)\n\tif r == nil {\n\t\tt.Fatalf(\"There is no refactoring named %s (from marker %s)\", refac, marker)\n\t}\n\n\tshouldPass := (passFail == PASS)\n\tname := r.Description().Name\n\n\tcwd, _ := os.Getwd()\n\tabsPath, _ := filepath.Abs(filename)\n\trelativePath, _ := filepath.Rel(cwd, absPath)\n\tfmt.Println(name, relativePath)\n\n\tmainFile := filepath.Join(directory, MAIN_DOT_GO)\n\tif !exists(mainFile, t) {\n\t\tmainFile = filepath.Join(filepath.Join(directory, \"src\"), MAIN_DOT_GO)\n\t\tif !exists(mainFile, t) {\n\t\t\tmainFile = filename\n\t\t}\n\t}\n\n\tmainFile, err := filepath.Abs(mainFile)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\targs := refactoring.InterpretArgs(remainder, r.Description().Params)\n\n\tfileSystem := &filesystem.LocalFileSystem{}\n\tconfig := &refactoring.Config{\n\t\tFileSystem: fileSystem,\n\t\tScope: []string{mainFile},\n\t\tSelection: selection,\n\t\tArgs: args,\n\t\tGoPath: \"\", \/\/ FIXME(jeff): GOPATH\n\t}\n\tresult := r.Run(config)\n\tif shouldPass && result.Log.ContainsErrors() {\n\t\tt.Log(result.Log)\n\t\tt.Fatalf(\"Refactoring produced unexpected errors\")\n\t} else if !shouldPass && !result.Log.ContainsErrors() {\n\t\tt.Fatalf(\"Refactoring should have produced errors but didn't\")\n\t}\n\n\terr = filepath.Walk(directory,\n\t\tfunc(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif strings.HasSuffix(path, \".go\") {\n\t\t\t\tpath, err := filepath.Abs(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tedits, ok := result.Edits[path]\n\t\t\t\tif !ok {\n\t\t\t\t\tedits = text.NewEditSet()\n\t\t\t\t}\n\t\t\t\toutput, err := text.ApplyToFile(edits, path)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif shouldPass {\n\t\t\t\t\tcheckResult(path, string(output), t)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfsChangesFile := filepath.Join(directory, FSCHANGES_TXT)\n\tif !exists(fsChangesFile, t) {\n\t\tif len(result.FSChanges) > 0 {\n\t\t\tt.Fatalf(\"Refactoring returned file system changes, \"+\n\t\t\t\t\"but %s does not exist\", fsChangesFile)\n\t\t}\n\t} else {\n\t\tbytes, err := ioutil.ReadFile(fsChangesFile)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfschanges := removeEmptyLines(strings.Split(string(bytes), \"\\n\"))\n\t\tif len(fschanges) != len(result.FSChanges) {\n\t\t\tt.Fatalf(\"Expected %d file system changes but got %d\",\n\t\t\t\tlen(fschanges), len(result.FSChanges))\n\t\t} else {\n\t\t\tfor i, chg := range result.FSChanges {\n\t\t\t\tif chg.String(directory) != strings.TrimSpace(fschanges[i]) {\n\t\t\t\t\tt.Fatalf(\"FSChanges[%d]\\nExpected: %s\\nActual: %s\", i, fschanges[i], chg.String(directory))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc removeEmptyLines(lines []string) []string {\n\tresult := []string{}\n\tfor _, line := range lines {\n\t\tif line != \"\" {\n\t\t\tresult = append(result, line)\n\t\t}\n\t}\n\treturn result\n}\n\nfunc exists(filename string, t *testing.T) bool {\n\tif _, err := os.Stat(filename); err == nil {\n\t\treturn true\n\t} else {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false\n\t\t} else {\n\t\t\tt.Fatal(err)\n\t\t\treturn false\n\t\t}\n\t}\n}\n\nfunc checkResult(filename string, actualOutput string, t *testing.T) {\n\tif _, err := os.Stat(filename + \".ignoreOutput\"); err == nil {\n\t\treturn\n\t}\n\tif _, err := os.Stat(filename + \".stripPaths\"); err == nil {\n\t\tdir := filepath.Dir(filename) + string(filepath.Separator)\n\t\tactualOutput = strings.Replace(actualOutput, dir, \"\", -1)\n\t}\n\tbytes, err := ioutil.ReadFile(filename + \"lden\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\texpectedOutput := strings.Replace(string(bytes), \"\\r\\n\", \"\\n\", -1)\n\tactualOutput = strings.Replace(actualOutput, \"\\r\\n\", \"\\n\", -1)\n\n\tif actualOutput != expectedOutput {\n\t\tfmt.Printf(\">>>>> Output does not match %slden\\n\", filename)\n\t\tfmt.Println(\"EXPECTED OUTPUT\")\n\t\tfmt.Println(\"vvvvvvvvvvvvvvv\")\n\t\tfmt.Println(expectedOutput)\n\t\tfmt.Println(\"^^^^^^^^^^^^^^^\")\n\t\tfmt.Println(\"ACTUAL OUTPUT\")\n\t\tfmt.Println(\"vvvvvvvvvvvvv\")\n\t\tfmt.Println(actualOutput)\n\t\tfmt.Println(\"^^^^^^^^^^^^^\")\n\t\tlenExpected, lenActual := len(expectedOutput), len(actualOutput)\n\t\tif lenExpected != lenActual {\n\t\t\tfmt.Printf(\"Length of expected output is %d; length of actual output is %d\\n\",\n\t\t\t\tlenExpected, lenActual)\n\t\t\tminLen := lenExpected\n\t\t\tif lenActual < minLen {\n\t\t\t\tminLen = lenActual\n\t\t\t}\n\t\t\tfor i := 0; i < minLen; i++ {\n\t\t\t\tif expectedOutput[i] != actualOutput[i] {\n\t\t\t\t\tfmt.Printf(\"Strings differ at index %d\\n\", i)\n\t\t\t\t\tfmt.Printf(\"Substrings starting at that index are:\\n\")\n\t\t\t\t\tfmt.Printf(\"Expected: [%s]\\n\", describe(expectedOutput[i:]))\n\t\t\t\t\tfmt.Printf(\"Actual: [%s]\\n\", describe(actualOutput[i:]))\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tt.Fatalf(\"Refactoring test failed - %s\", filename)\n\t}\n}\n\n\/\/TODO Define after getting the value of Gopath\n\/*func checkRenamedDir(result.RenameDir []string,filename string) {\n\n if result.RenameDir != nil {\n\n bytes, err := ioutil.ReadFile(filename)\n if err != nil {\n\t\tt.Fatal(err)\n\t}\n expectedoutput := string(bytes)\n\n}\n\n}\n*\/\nfunc describe(s string) string {\n\t\/\/ FIXME: Jeff: Handle other non-printing characters\n\tif len(s) > 10 {\n\t\ts = s[:10]\n\t}\n\ts = strings.Replace(s, \"\\n\", \"\\\\n\", -1)\n\ts = strings.Replace(s, \"\\r\", \"\\\\r\", -1)\n\ts = strings.Replace(s, \"\\t\", \"\\\\t\", -1)\n\treturn s\n}\n\nfunc splitMarker(filename string, marker string, t *testing.T) (refac string, selection text.Selection, remainder []string, result string) {\n\tfilename, err := filepath.Abs(filename)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfields := strings.Split(marker, \",\")\n\tif len(fields) < 6 {\n\t\tt.Fatalf(\"Marker is invalid (must contain >= 5 fields): %s\", marker)\n\t}\n\trefac = fields[0]\n\tstartLine := parseInt(fields[1], t)\n\tstartCol := parseInt(fields[2], t)\n\tendLine := parseInt(fields[3], t)\n\tendCol := parseInt(fields[4], t)\n\tselection = &text.LineColSelection{filename,\n\t\tstartLine, startCol, endLine, endCol}\n\tremainder = fields[5 : len(fields)-1]\n\tresult = fields[len(fields)-1]\n\tif result != PASS && result != FAIL {\n\t\tt.Fatalf(\"Marker is invalid: last field must be %s or %s\",\n\t\t\tPASS, FAIL)\n\t}\n\treturn\n}\n\nfunc parseInt(s string, t *testing.T) int {\n\tresult, err := strconv.ParseInt(s, 10, 0)\n\tif err != nil {\n\t\tt.Fatalf(\"Marker is invalid: expecting integer, found %s\", s)\n\t}\n\treturn int(result)\n}\n\n\/\/ extractMarkers extracts comments of the form \/\/<<<<<a,b,c,d,e,f,g removing\n\/\/ the leading <<<<< and trimming any spaces from the left and right ends\nfunc extractMarkers(filename string, t *testing.T) []string {\n\tresult := []string{}\n\tfset := token.NewFileSet()\n\tf, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)\n\tif err != nil {\n\t\tt.Logf(\"Cannot extract markers from %s -- unable to parse\",\n\t\t\tfilename)\n\t\twd, _ := os.Getwd()\n\t\tt.Logf(\"Working directory is %s\", wd)\n\t\tt.Fatal(err)\n\t}\n\tfor _, commentGroup := range f.Comments {\n\t\tfor _, comment := range commentGroup.List {\n\t\t\ttxt := comment.Text\n\t\t\tif strings.Contains(txt, MARKER) {\n\t\t\t\tidx := strings.Index(txt, MARKER) + len(MARKER)\n\t\t\t\ttxt = strings.TrimSpace(txt[idx:])\n\t\t\t\tresult = append(result, txt)\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/sha1\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"time\"\n\/\/\t\"encoding\/json\"\n)\n\nvar signatureAlgorithm = [...]string{\n\t\"UnknownSignatureAlgorithm\",\n\t\"MD2WithRSA\",\n\t\"MD5WithRSA\",\n\t\"SHA1WithRSA\",\n\t\"SHA256WithRSA\",\n\t\"SHA384WithRSA\",\n\t\"SHA512WithRSA\",\n\t\"DSAWithSHA1\",\n\t\"DSAWithSHA256\",\n\t\"ECDSAWithSHA1\",\n\t\"ECDSAWithSHA256\",\n\t\"ECDSAWithSHA384\",\n\t\"ECDSAWithSHA512\",\n}\n\nvar publicKeyAlgorithm = [...]string{\n\t\"UnknownPublicKeyAlgorithm\",\n\t\"RSA\",\n\t\"DAS\",\n\t\"ECDSA\",\n}\n\nvar programName = \"tlsRetriever\"\n\nfunc ExpiresIn(t time.Time) string {\n\tunits := [...]struct {\n\t\tsuffix string\n\t\tunit time.Duration\n\t}{\n\t\t{\"days\", 24 * time.Hour},\n\t\t{\"hours\", time.Hour},\n\t\t{\"minutes\", time.Minute},\n\t\t{\"seconds\", time.Second},\n\t}\n\td := t.Sub(time.Now())\n\tfor _, u := range units {\n\t\tif d > u.unit {\n\t\t\treturn fmt.Sprintf(\"Expires in %d %s\", d\/u.unit, u.suffix)\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"Expired on %s\", t.Local())\n}\n\nfunc SHA1Hash(data []byte) string {\n\th := sha1.New()\n\th.Write(data)\n\treturn fmt.Sprintf(\"%X\", h.Sum(nil))\n}\n\n\/\/Need to decide about what charachteristics of the cartificate \n\/\/are going to be saved in the observatory\ntype SSLCerts struct {\n\tSHA1 string\n\tSubjectKeyId string\n\tVersion int\n\tSignatureAlgorithm string\n\tPublicKeyAlgorithm string\n\tSubject string\n\tDNSNames []string\n\tNotBefore, NotAfter string\n\tExpiresIn string\n\tIssuer string\n\tAuthorityKeyId string\n}\n\ntype SSLCertsJSON struct {\n\tSHA1 string `json:\"sha1\"`\n\tSubjectKeyId string `json:\"subKeyID\"`\n\tVersion int\t `json:\"version\"`\t\n\tSignatureAlgorithm string `json:\"sigAlg\"`\n\tPublicKeyAlgorithm string `json:\"pubKeyAlg\"`\n\tSubject string `json:\"subject\"`\n\tDNSNames []string `json:\"domain(s)\"`\n\tNotBefore string `json:\"notBefore\"`\n\tNotAfter \t\t\tstring `json:\"notAfter\"`\n\tExpiresIn string `json:\"Exp\"`\n\tIssuer string `json:\"issuer\"`\n\tAuthorityKeyId string `json:\"authKeyID\"`\n}\n\nfunc checkHost(domainName string, skipVerify bool) ([]SSLCerts, error) {\n\n\t\/\/Connect network\n\tipConn, err := net.DialTimeout(\"tcp\", domainName, 10000*time.Millisecond)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer ipConn.Close()\n\n\t\/\/ Configure tls to look at domainName\n\tconfig := tls.Config{ServerName: domainName,\n\t\tInsecureSkipVerify: skipVerify}\n\n\t\/\/ Connect to tls\n\tconn := tls.Client(ipConn, &config)\n\tdefer conn.Close()\n\n\t\/\/ Handshake with TLS to get certs\n\thsErr := conn.Handshake()\n\tif hsErr != nil {\n\t\treturn nil, hsErr\n\t}\n\n\tcerts := conn.ConnectionState().PeerCertificates\n\n\tif certs == nil || len(certs) < 1 {\n\t\treturn nil, errors.New(\"Could not get server's certificate from the TLS connection.\")\n\t}\n\n\tsslcerts := make([]SSLCerts, len(certs))\n\n\tfor i, cert := range certs {\n\t\ts := SSLCerts{SHA1: SHA1Hash(cert.Raw), SubjectKeyId: fmt.Sprintf(\"%X\", cert.SubjectKeyId),\n\t\t\tVersion: cert.Version, SignatureAlgorithm: signatureAlgorithm[cert.SignatureAlgorithm],\n\t\t\tPublicKeyAlgorithm: publicKeyAlgorithm[cert.PublicKeyAlgorithm],\n\t\t\tSubject: cert.Subject.CommonName,\n\t\t\tDNSNames: cert.DNSNames,\n\t\t\t\/\/times are calcualated locally for the time being\n\t\t\t\/\/maybe this needs to change and keep the original time\n\t\t\tNotBefore: cert.NotBefore.Local().String(),\n\t\t\tNotAfter: cert.NotAfter.Local().String(),\n\t\t\tExpiresIn: ExpiresIn(cert.NotAfter.Local()),\n\t\t\tIssuer: cert.Issuer.CommonName,\n\t\t\tAuthorityKeyId: fmt.Sprintf(\"%X\", cert.AuthorityKeyId),\n\t\t}\n\n\t\tsslcerts[i] = s\n\n\t}\n\n\treturn sslcerts, nil\n}\n\nfunc OutputToJson(domainName string, certs []SSLCerts ) bool{\n\n\t\/\/ for i, cert := range certs {\n\n\t\/\/ \tfmt.Printf(\"%d\",i)\n\t\/\/ \t\/\/var certificate []SSLCertsJSON\n\t\/\/ \t\/\/copy(certificate,cert)\n\t\/\/ \tcertif := &SSLCertsJSON(cert)\n\t\/\/ \tout, _ = json.Marshal(certif)\n\t\/\/ \tfmt.Printf(string(out))\n\t\/\/ }\n\treturn true\n}\n\nfunc OutputToStd( canonicalName string, certs []SSLCerts ){\n\n\tfor i, cert := range certs {\n\t\tif i == 0 {\n\t\t\tfmt.Printf(\"Certificate chain for %s\\n\", canonicalName)\n\t\t}\n\t\tfmt.Printf(\"Subject: %s\\n\", cert.Subject)\n\t\tfmt.Printf(\"\\tSHA1: %s\\n\", cert.SHA1)\n\t\tfmt.Printf(\"\\tSubjectKeyId: %s\\n\", cert.SubjectKeyId)\n\t\tfmt.Printf(\"\\tSignatureAlgorithm: %s\\n\", cert.SignatureAlgorithm)\n\t\tfmt.Printf(\"\\tPublicKeyAlgorithm: %s\\n\", cert.PublicKeyAlgorithm)\n\t\tfmt.Printf(\"\\tDNSNames: %v\\n\", cert.DNSNames)\n\t\tfmt.Printf(\"\\tNotBefore: %s\\n\", cert.NotBefore)\n\t\tfmt.Printf(\"\\tNotAfter: %s\\n\", cert.NotAfter)\n\t\tfmt.Printf(\"\\tExpiresIn: %s\\n\", cert.ExpiresIn)\n\t\tfmt.Printf(\"\\tIssuer: %s\\n\", cert.Issuer)\n\t\tfmt.Printf(\"\\tAuthorityKeyId: %s\\n\", cert.AuthorityKeyId)\n\t}\n}\n\nfunc Usage() {\n\tfmt.Printf(\"Usage: %s -d <domain name> -p <port> -o <outfile>\\n\",programName)\n\tflag.PrintDefaults()\n}\n\nfunc main() {\n\tvar domainName, port, outfile, canonicalName string\n\n\tflag.StringVar(&domainName, \"d\", \"\", \"Domain name or IP Address of the host you want to check ssl certificates of.\")\n\tflag.StringVar(&port, \"p\", \"443\", \"Port Number\")\n\tflag.StringVar(&outfile, \"o\", \"output.json\", \"Output file\")\n\tflag.Parse()\n\n\tif len(os.Args)<3||(domainName == \"\") {\n\t\tUsage()\n\t\tos.Exit(1)\n\t}\n\n\tcanonicalName = domainName + \":\" + port\n\n\tvar ce string\n\tvar err error\n\tvar certs []SSLCerts\n\n\t\/\/ Catch any misconfigurations\n\tcerts, err = checkHost(canonicalName, false)\n\tif err != nil {\n\t\tce = fmt.Sprintf(\"%s\", err)\n\t}\n\n\t\/\/ proceed to gather the certs, ignoring the warnings\n\tif certs == nil && err != nil {\n\t\tcerts, err = checkHost(canonicalName, true)\n\t\tif err != nil {\n\t\t\tce = fmt.Sprintf(\"%s\", err)\n\t\t}\n\t}\n\n\tif ce != \"\" {\n\t\tfmt.Printf(\"WARNING ! :%s\\n\", ce)\n\t}\n\n\tOutputToStd(canonicalName, certs)\n\t\/\/OutputToJson(canonicalName, certs)\n}\n<commit_msg>deleted previous file...<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Copyright 2017, Arkbriar\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage gitlab\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/url\"\n\t\"time\"\n)\n\n\/\/ JobsService handles communication with the ci builds related methods\n\/\/ of the GitLab API.\n\/\/\n\/\/ GitLab API docs: https:\/\/docs.gitlab.com\/ce\/api\/jobs.html\ntype JobsService struct {\n\tclient *Client\n}\n\n\/\/ Job represents a ci build.\n\/\/\n\/\/ GitLab API docs: https:\/\/docs.gitlab.com\/ce\/api\/jobs.html\ntype Job struct {\n\tCommit *Commit `json:\"commit\"`\n\tCreatedAt *time.Time `json:\"created_at\"`\n\tCoverage float64 `json:\"coverage\"`\n\tArtifactsFile struct {\n\t\tFilename string `json:\"filename\"`\n\t\tSize int `json:\"size\"`\n\t} `json:\"artifacts_file\"`\n\tFinishedAt *time.Time `json:\"finished_at\"`\n\tID int `json:\"id\"`\n\tName string `json:\"name\"`\n\tPipeline struct {\n\t\tID int `json:\"id\"`\n\t\tRef string `json:\"ref\"`\n\t\tSha string `json:\"sha\"`\n\t\tStatus string `json:\"status\"`\n\t} `json:\"pipeline\"`\n\tRef string `json:\"ref\"`\n\tRunner struct {\n\t\tID int `json:\"id\"`\n\t\tDescription string `json:\"description\"`\n\t\tActive bool `json:\"active\"`\n\t\tIsShared bool `json:\"is_shared\"`\n\t\tName string `json:\"name\"`\n\t} `json:\"runner\"`\n\tStage string `json:\"stage\"`\n\tStartedAt *time.Time `json:\"started_at\"`\n\tStatus string `json:\"status\"`\n\tTag bool `json:\"tag\"`\n\tUser *User `json:\"user\"`\n\tWebURL string `json:\"web_url\"`\n}\n\n\/\/ ListJobsOptions are options for two list apis\ntype ListJobsOptions struct {\n\tListOptions\n\tScope []BuildStateValue `url:\"scope,omitempty\" json:\"scope,omitempty\"`\n}\n\n\/\/ ListProjectJobs gets a list of jobs in a project.\n\/\/\n\/\/ The scope of jobs to show, one or array of: created, pending, running,\n\/\/ failed, success, canceled, skipped; showing all jobs if none provided\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#list-project-jobs\nfunc (s *JobsService) ListProjectJobs(pid interface{}, opts *ListJobsOptions, options ...OptionFunc) ([]Job, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\", url.QueryEscape(project))\n\n\treq, err := s.client.NewRequest(\"GET\", u, opts, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar jobs []Job\n\tresp, err := s.client.Do(req, &jobs)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn jobs, resp, err\n}\n\n\/\/ ListPipelineJobs gets a list of jobs for specific pipeline in a\n\/\/ project. If the pipeline ID is not found, it will respond with 404.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#list-pipeline-jobs\nfunc (s *JobsService) ListPipelineJobs(pid interface{}, pipelineID int, opts *ListJobsOptions, options ...OptionFunc) ([]*Job, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/pipelines\/%d\/jobs\", url.QueryEscape(project), pipelineID)\n\n\treq, err := s.client.NewRequest(\"GET\", u, opts, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar jobs []*Job\n\tresp, err := s.client.Do(req, &jobs)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn jobs, resp, err\n}\n\n\/\/ GetJob gets a single job of a project.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#get-a-single-job\nfunc (s *JobsService) GetJob(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/%d\", url.QueryEscape(project), jobID)\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tjob := new(Job)\n\tresp, err := s.client.Do(req, job)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn job, resp, err\n}\n\n\/\/ GetJobArtifacts get jobs artifacts of a project\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#get-job-artifacts\nfunc (s *JobsService) GetJobArtifacts(pid interface{}, jobID int, options ...OptionFunc) (io.Reader, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/%d\/artifacts\", url.QueryEscape(project), jobID)\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tartifactsBuf := new(bytes.Buffer)\n\tresp, err := s.client.Do(req, artifactsBuf)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn artifactsBuf, resp, err\n}\n\n\/\/ DownloadArtifactsFile download the artifacts file from the given\n\/\/ reference name and job provided the job finished successfully.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#download-the-artifacts-file\nfunc (s *JobsService) DownloadArtifactsFile(pid interface{}, refName string, job string, options ...OptionFunc) (io.Reader, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n var q struct {\n Job string `url:\"job,omitempty\" json:\"job,omitempty\"`\n }\n q.Job = job\n\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/artifacts\/%s\/download\", url.QueryEscape(project), refName)\n\n\treq, err := s.client.NewRequest(\"GET\", u, q, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tartifactsBuf := new(bytes.Buffer)\n\tresp, err := s.client.Do(req, artifactsBuf)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn artifactsBuf, resp, err\n}\n\n\/\/ DownloadSingleArtifactsFile download a file from the artifacts from the\n\/\/ given reference name and job provided the job finished successfully.\n\/\/ Only a single file is going to be extracted from the archive and streamed\n\/\/ to a client.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#download-a-single-artifact-file\nfunc (s *JobsService) DownloadSingleArtifactsFile(pid interface{}, jobID int, artifactPath string, options ...OptionFunc) (io.Reader, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tu := fmt.Sprintf(\n\t\t\"projects\/%s\/jobs\/%d\/artifacts\/%s\",\n\t\turl.QueryEscape(project),\n\t\tjobID,\n\t\tartifactPath,\n\t)\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tartifactBuf := new(bytes.Buffer)\n\tresp, err := s.client.Do(req, artifactBuf)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn artifactBuf, resp, err\n}\n\n\/\/ GetTraceFile gets a trace of a specific job of a project\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#get-a-trace-file\nfunc (s *JobsService) GetTraceFile(pid interface{}, jobID int, options ...OptionFunc) (io.Reader, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/%d\/trace\", url.QueryEscape(project), jobID)\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\ttraceBuf := new(bytes.Buffer)\n\tresp, err := s.client.Do(req, traceBuf)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn traceBuf, resp, err\n}\n\n\/\/ CancelJob cancels a single job of a project.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#cancel-a-job\nfunc (s *JobsService) CancelJob(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/%d\/cancel\", url.QueryEscape(project), jobID)\n\n\treq, err := s.client.NewRequest(\"POST\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tjob := new(Job)\n\tresp, err := s.client.Do(req, job)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn job, resp, err\n}\n\n\/\/ RetryJob retries a single job of a project\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#retry-a-job\nfunc (s *JobsService) RetryJob(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/%d\/retry\", url.QueryEscape(project), jobID)\n\n\treq, err := s.client.NewRequest(\"POST\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tjob := new(Job)\n\tresp, err := s.client.Do(req, job)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn job, resp, err\n}\n\n\/\/ EraseJob erases a single job of a project, removes a job\n\/\/ artifacts and a job trace.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#erase-a-job\nfunc (s *JobsService) EraseJob(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/%d\/erase\", url.QueryEscape(project), jobID)\n\n\treq, err := s.client.NewRequest(\"POST\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tjob := new(Job)\n\tresp, err := s.client.Do(req, job)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn job, resp, err\n}\n\n\/\/ KeepArtifacts prevents artifacts from being deleted when\n\/\/ expiration is set.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#keep-artifacts\nfunc (s *JobsService) KeepArtifacts(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/%d\/artifacts\/keep\", url.QueryEscape(project), jobID)\n\n\treq, err := s.client.NewRequest(\"POST\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tjob := new(Job)\n\tresp, err := s.client.Do(req, job)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn job, resp, err\n}\n\n\/\/ PlayJob triggers a manual action to start a job.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#play-a-job\nfunc (s *JobsService) PlayJob(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/%d\/play\", url.QueryEscape(project), jobID)\n\n\treq, err := s.client.NewRequest(\"POST\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tjob := new(Job)\n\tresp, err := s.client.Do(req, job)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn job, resp, err\n}\n<commit_msg>[Jobs] Add options struct for DownloadArtifactsFile func which contains the job name<commit_after>\/\/\n\/\/ Copyright 2017, Arkbriar\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage gitlab\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/url\"\n\t\"time\"\n)\n\n\/\/ JobsService handles communication with the ci builds related methods\n\/\/ of the GitLab API.\n\/\/\n\/\/ GitLab API docs: https:\/\/docs.gitlab.com\/ce\/api\/jobs.html\ntype JobsService struct {\n\tclient *Client\n}\n\n\/\/ Job represents a ci build.\n\/\/\n\/\/ GitLab API docs: https:\/\/docs.gitlab.com\/ce\/api\/jobs.html\ntype Job struct {\n\tCommit *Commit `json:\"commit\"`\n\tCreatedAt *time.Time `json:\"created_at\"`\n\tCoverage float64 `json:\"coverage\"`\n\tArtifactsFile struct {\n\t\tFilename string `json:\"filename\"`\n\t\tSize int `json:\"size\"`\n\t} `json:\"artifacts_file\"`\n\tFinishedAt *time.Time `json:\"finished_at\"`\n\tID int `json:\"id\"`\n\tName string `json:\"name\"`\n\tPipeline struct {\n\t\tID int `json:\"id\"`\n\t\tRef string `json:\"ref\"`\n\t\tSha string `json:\"sha\"`\n\t\tStatus string `json:\"status\"`\n\t} `json:\"pipeline\"`\n\tRef string `json:\"ref\"`\n\tRunner struct {\n\t\tID int `json:\"id\"`\n\t\tDescription string `json:\"description\"`\n\t\tActive bool `json:\"active\"`\n\t\tIsShared bool `json:\"is_shared\"`\n\t\tName string `json:\"name\"`\n\t} `json:\"runner\"`\n\tStage string `json:\"stage\"`\n\tStartedAt *time.Time `json:\"started_at\"`\n\tStatus string `json:\"status\"`\n\tTag bool `json:\"tag\"`\n\tUser *User `json:\"user\"`\n\tWebURL string `json:\"web_url\"`\n}\n\n\/\/ ListJobsOptions are options for two list apis\ntype ListJobsOptions struct {\n\tListOptions\n\tScope []BuildStateValue `url:\"scope,omitempty\" json:\"scope,omitempty\"`\n}\n\n\/\/ ListProjectJobs gets a list of jobs in a project.\n\/\/\n\/\/ The scope of jobs to show, one or array of: created, pending, running,\n\/\/ failed, success, canceled, skipped; showing all jobs if none provided\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#list-project-jobs\nfunc (s *JobsService) ListProjectJobs(pid interface{}, opts *ListJobsOptions, options ...OptionFunc) ([]Job, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\", url.QueryEscape(project))\n\n\treq, err := s.client.NewRequest(\"GET\", u, opts, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar jobs []Job\n\tresp, err := s.client.Do(req, &jobs)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn jobs, resp, err\n}\n\n\/\/ ListPipelineJobs gets a list of jobs for specific pipeline in a\n\/\/ project. If the pipeline ID is not found, it will respond with 404.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#list-pipeline-jobs\nfunc (s *JobsService) ListPipelineJobs(pid interface{}, pipelineID int, opts *ListJobsOptions, options ...OptionFunc) ([]*Job, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/pipelines\/%d\/jobs\", url.QueryEscape(project), pipelineID)\n\n\treq, err := s.client.NewRequest(\"GET\", u, opts, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar jobs []*Job\n\tresp, err := s.client.Do(req, &jobs)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn jobs, resp, err\n}\n\n\/\/ GetJob gets a single job of a project.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#get-a-single-job\nfunc (s *JobsService) GetJob(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/%d\", url.QueryEscape(project), jobID)\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tjob := new(Job)\n\tresp, err := s.client.Do(req, job)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn job, resp, err\n}\n\n\/\/ GetJobArtifacts get jobs artifacts of a project\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#get-job-artifacts\nfunc (s *JobsService) GetJobArtifacts(pid interface{}, jobID int, options ...OptionFunc) (io.Reader, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/%d\/artifacts\", url.QueryEscape(project), jobID)\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tartifactsBuf := new(bytes.Buffer)\n\tresp, err := s.client.Do(req, artifactsBuf)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn artifactsBuf, resp, err\n}\n\n\/\/ DownloadArtifactsFileOptions represents the available DownloadArtifactsFile() options.\n\/\/\n\/\/ GitLab API docs: https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#download-the-artifacts-file\ntype DownloadArtifactsFileOptions struct {\n\tJob *string `url:\"job,omitempty\" json:\"job,omitempty\"`\n}\n\n\/\/ DownloadArtifactsFile download the artifacts file from the given\n\/\/ reference name and job provided the job finished successfully.\n\/\/\n\/\/ GitLab API docs: https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#download-the-artifacts-file\nfunc (s *JobsService) DownloadArtifactsFile(pid interface{}, refName string, opt *DownloadArtifactsFileOptions, options ...OptionFunc) (io.Reader, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/artifacts\/%s\/download\", url.QueryEscape(project), refName)\n\n\treq, err := s.client.NewRequest(\"GET\", u, opt, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tartifactsBuf := new(bytes.Buffer)\n\tresp, err := s.client.Do(req, artifactsBuf)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn artifactsBuf, resp, err\n}\n\n\/\/ DownloadSingleArtifactsFile download a file from the artifacts from the\n\/\/ given reference name and job provided the job finished successfully.\n\/\/ Only a single file is going to be extracted from the archive and streamed\n\/\/ to a client.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#download-a-single-artifact-file\nfunc (s *JobsService) DownloadSingleArtifactsFile(pid interface{}, jobID int, artifactPath string, options ...OptionFunc) (io.Reader, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tu := fmt.Sprintf(\n\t\t\"projects\/%s\/jobs\/%d\/artifacts\/%s\",\n\t\turl.QueryEscape(project),\n\t\tjobID,\n\t\tartifactPath,\n\t)\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tartifactBuf := new(bytes.Buffer)\n\tresp, err := s.client.Do(req, artifactBuf)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn artifactBuf, resp, err\n}\n\n\/\/ GetTraceFile gets a trace of a specific job of a project\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#get-a-trace-file\nfunc (s *JobsService) GetTraceFile(pid interface{}, jobID int, options ...OptionFunc) (io.Reader, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/%d\/trace\", url.QueryEscape(project), jobID)\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\ttraceBuf := new(bytes.Buffer)\n\tresp, err := s.client.Do(req, traceBuf)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn traceBuf, resp, err\n}\n\n\/\/ CancelJob cancels a single job of a project.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#cancel-a-job\nfunc (s *JobsService) CancelJob(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/%d\/cancel\", url.QueryEscape(project), jobID)\n\n\treq, err := s.client.NewRequest(\"POST\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tjob := new(Job)\n\tresp, err := s.client.Do(req, job)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn job, resp, err\n}\n\n\/\/ RetryJob retries a single job of a project\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#retry-a-job\nfunc (s *JobsService) RetryJob(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/%d\/retry\", url.QueryEscape(project), jobID)\n\n\treq, err := s.client.NewRequest(\"POST\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tjob := new(Job)\n\tresp, err := s.client.Do(req, job)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn job, resp, err\n}\n\n\/\/ EraseJob erases a single job of a project, removes a job\n\/\/ artifacts and a job trace.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#erase-a-job\nfunc (s *JobsService) EraseJob(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/%d\/erase\", url.QueryEscape(project), jobID)\n\n\treq, err := s.client.NewRequest(\"POST\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tjob := new(Job)\n\tresp, err := s.client.Do(req, job)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn job, resp, err\n}\n\n\/\/ KeepArtifacts prevents artifacts from being deleted when\n\/\/ expiration is set.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#keep-artifacts\nfunc (s *JobsService) KeepArtifacts(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/%d\/artifacts\/keep\", url.QueryEscape(project), jobID)\n\n\treq, err := s.client.NewRequest(\"POST\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tjob := new(Job)\n\tresp, err := s.client.Do(req, job)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn job, resp, err\n}\n\n\/\/ PlayJob triggers a manual action to start a job.\n\/\/\n\/\/ GitLab API docs:\n\/\/ https:\/\/docs.gitlab.com\/ce\/api\/jobs.html#play-a-job\nfunc (s *JobsService) PlayJob(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects\/%s\/jobs\/%d\/play\", url.QueryEscape(project), jobID)\n\n\treq, err := s.client.NewRequest(\"POST\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tjob := new(Job)\n\tresp, err := s.client.Do(req, job)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn job, resp, err\n}\n<|endoftext|>"} {"text":"<commit_before>package libchunk\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\n\/\/Join will read and decrypt chunks for keys provided by the iterator and writes\n\/\/each chunk's contents to writer 'w' in order of key appearance. Chunks\n\/\/are fetched concurrently (locally or remote) but are guaranteed arrive in\n\/\/order to writer 'w' for assembly in the original format\nfunc Join(keys KeyIterator, w io.Writer, conf Config) error {\n\n\t\/\/result of working the item\n\ttype result struct {\n\t\tchunk []byte\n\t\terr error\n\t}\n\n\t\/\/work item\n\ttype item struct {\n\t\tkey K\n\t\tresCh chan *result\n\t\terr error\n\t}\n\n\t\/\/work is run concurrently\n\twork := func(it *item) {\n\t\tchunk, err := conf.Store.Get(it.key)\n\t\tif err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\t\/\/@TODO add fetching from remote\n\t\t\t\tit.resCh <- &result{nil, ErrNoSuchKey}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tit.resCh <- &result{nil, fmt.Errorf(\"failed to find key '%s': %v\", it.key, err)}\n\t\t\treturn\n\t\t}\n\n\t\tres := &result{}\n\t\tres.chunk, res.err = conf.AEAD.Open(nil, it.key[:], chunk, nil)\n\t\tit.resCh <- res\n\t}\n\n\t\/\/fan-out concurrent work\n\titemCh := make(chan *item, 10)\n\tgo func() {\n\t\tdefer close(itemCh)\n\t\tfor {\n\t\t\tk, err := keys.Next()\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\titemCh <- &item{\n\t\t\t\t\t\tkey: k,\n\t\t\t\t\t\terr: fmt.Errorf(\"failed to iterate into next key: %v\", err),\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tit := &item{\n\t\t\t\tkey: k,\n\t\t\t\tresCh: make(chan *result),\n\t\t\t}\n\n\t\t\tgo work(it) \/\/create work\n\t\t\titemCh <- it \/\/send to fan-in thread for syncing results\n\t\t}\n\t}()\n\n\t\/\/fan in, output plaintext chunks\n\tfor it := range itemCh {\n\t\tif it.err != nil {\n\t\t\treturn fmt.Errorf(\"failed to iterate: %v\", it.err)\n\t\t}\n\n\t\tres := <-it.resCh\n\t\tif res.err != nil {\n\t\t\treturn fmt.Errorf(\"failed to work chunk '%s': %v\", it.key, res.err)\n\t\t} else {\n\t\t\t_, err := w.Write(res.chunk)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to write chunk '%s' to output: %v\", it.key, err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>added race condition protection in join logic<commit_after>package libchunk\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\n\/\/Join will read and decrypt chunks for keys provided by the iterator and write\n\/\/each chunk's contents to writer 'w' in order of key appearance. Chunks are\n\/\/fetched concurrently (locally or remote) but are guaranteed to arrive in\n\/\/order to writer 'w' for assembly in the original format\nfunc Join(keys KeyIterator, w io.Writer, conf Config) error {\n\n\t\/\/result of working the item\n\ttype result struct {\n\t\tchunk []byte\n\t\terr error\n\t}\n\n\t\/\/work item\n\ttype item struct {\n\t\tkey K\n\t\tresCh chan *result\n\t\terr error\n\t\tpos int64\n\t}\n\n\t\/\/work is run concurrently\n\twork := func(it *item) {\n\t\tchunk, err := conf.Store.Get(it.key)\n\t\tif err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\t\/\/@TODO add fetching from remote\n\t\t\t\tit.resCh <- &result{nil, ErrNoSuchKey}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tit.resCh <- &result{nil, fmt.Errorf(\"failed to find key '%s': %v\", it.key, err)}\n\t\t\treturn\n\t\t}\n\n\t\tres := &result{}\n\t\tres.chunk, res.err = conf.AEAD.Open(nil, it.key[:], chunk, nil)\n\t\tit.resCh <- res\n\t}\n\n\t\/\/fan-out concurrent work\n\titemCh := make(chan *item, 10)\n\tgo func() {\n\t\tdefer close(itemCh)\n\t\tpos := int64(0)\n\t\tfor {\n\t\t\tk, err := keys.Next()\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\titemCh <- &item{\n\t\t\t\t\t\tkey: k,\n\t\t\t\t\t\terr: fmt.Errorf(\"failed to iterate into next key: %v\", err),\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tit := &item{\n\t\t\t\tpos: pos,\n\t\t\t\tkey: k,\n\t\t\t\tresCh: make(chan *result),\n\t\t\t}\n\n\t\t\tgo work(it) \/\/create work\n\t\t\titemCh <- it \/\/send to fan-in thread for syncing results\n\t\t\tpos++\n\t\t}\n\t}()\n\n\t\/\/fan in, output plaintext chunks\n\tvar lastpos int64\n\tfor it := range itemCh {\n\t\tif it.err != nil {\n\t\t\treturn fmt.Errorf(\"failed to iterate: %v\", it.err)\n\t\t}\n\n\t\tif lastpos > it.pos {\n\t\t\t\/\/the language spec is unclear about guaranteed FIFO behaviour of\n\t\t\t\/\/buffered channels, in rare conditions this behaviour might not\n\t\t\t\/\/be guaranteed, for this project such a case be catestropic as it WILL\n\t\t\t\/\/corrupt large files. This is a buildin safeguard that asks the user to\n\t\t\t\/\/submit a real world example if this happens\n\t\t\treturn fmt.Errorf(\"Unexpected race condition during joining, chunk '%d' arrived before chunk '%d', please report this to the author with the file that is being split\", lastpos, it.pos)\n\t\t}\n\n\t\tres := <-it.resCh\n\t\tif res.err != nil {\n\t\t\treturn fmt.Errorf(\"failed to work chunk '%s': %v\", it.key, res.err)\n\t\t} else {\n\t\t\t_, err := w.Write(res.chunk)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to write chunk '%s' to output: %v\", it.key, err)\n\t\t\t}\n\t\t}\n\n\t\tlastpos = it.pos\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"time\"\n\n\t\"github.com\/joshsoftware\/curem\/config\"\n\t\"labix.org\/v2\/mgo\/bson\"\n)\n\ntype lead struct {\n\tId bson.ObjectId `bson:\"_id\" json:\"id\"`\n\tContactId bson.ObjectId `bson:\"contactId,omitempty\" json:\"contactId,omitempty\"`\n\tSource string `bson:\"source,omitempty\" json:\"source,omitempty\"`\n\tOwner string `bson:\"owner,omitempty\" json:\"owner,omitempty\"`\n\tStatus string `bson:\"status,omitempty\" json:\"status,omitempty\"`\n\tTeamSize float64 `bson:\"teamSize,omitempty\" json:\"teamSize,omitempty\"`\n\tRatePerHour float64 `bson:\"ratePerHour,omitempty\" json:\"ratePerHour,omitempty\"`\n\tDurationInMonths float64 `bson:\"durationInMonths,omitempty\" json:\"durationInMonths,omitempty\"`\n\tEstimatedStartDate string `bson:\"estimatedStartDate,omitempty\" json:\"estimatedStartDate,omitempty\"`\n\tComments []string `bson:\"comments,omitempty\" json:\"comments,omitempty\"`\n\tCreatedAt time.Time `bson:\"createdAt,omitempty\" json:\"createdAt,omitempty\"`\n\tUpdatedAt time.Time `bson:\"updatedAt,omitempty\" json:\"updatedAt,omitempty\"`\n}\n\n\/\/ NewLead takes the fields of a lead, initializes a struct of lead type and returns\n\/\/ the pointer to that struct.\n\/\/ Also, It inserts the lead object into the database.\nfunc NewLead(cid bson.ObjectId, source, owner, status string, teamsize, rate, duration float64,\n\tstart string, comments []string) (*lead, error) {\n\tdoc := lead{\n\t\tId: bson.NewObjectId(),\n\t\tContactId: cid,\n\t\tSource: source,\n\t\tOwner: owner,\n\t\tStatus: status,\n\t\tTeamSize: teamsize,\n\t\tRatePerHour: rate,\n\t\tDurationInMonths: duration,\n\t\tEstimatedStartDate: start,\n\t\tComments: comments,\n\t}\n\n\tdoc.CreatedAt = doc.Id.Time()\n\tdoc.UpdatedAt = doc.CreatedAt\n\n\terr := config.LeadsCollection.Insert(doc)\n\tif err != nil {\n\t\treturn &lead{}, err\n\t}\n\treturn &doc, nil\n}\n\n\/\/ GetLead takes the lead Id as an argument and returns a pointer to a lead object.\nfunc GetLead(i bson.ObjectId) (*lead, error) {\n\tvar l lead\n\terr := config.LeadsCollection.FindId(i).One(&l)\n\tif err != nil {\n\t\treturn &lead{}, err\n\t}\n\treturn &l, nil\n}\n\n\/\/ GetAllLeads fetches all the leads from the database.\nfunc GetAllLeads() ([]lead, error) {\n\tvar l []lead\n\terr := config.LeadsCollection.Find(nil).All(&l)\n\tif err != nil {\n\t\treturn []lead{}, err\n\t}\n\treturn l, nil\n}\n\n\/\/ Update updates the lead in the database.\n\/\/ First, fetch a lead from the database and change the necessary fields.\n\/\/ Then call the Update method on that lead object.\nfunc (l *lead) Update() error {\n\tl.UpdatedAt = bson.Now()\n\terr := config.LeadsCollection.UpdateId(l.Id, l)\n\treturn err\n}\n\n\/\/ Delete deletes the lead from the database.\nfunc (l *lead) Delete() error {\n\treturn config.LeadsCollection.RemoveId(l.Id)\n}\n<commit_msg>Add incomingLead type and copyIncomingFields method<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com\/joshsoftware\/curem\/config\"\n\t\"labix.org\/v2\/mgo\/bson\"\n)\n\ntype lead struct {\n\tId bson.ObjectId `bson:\"_id\" json:\"id\"`\n\tContactId bson.ObjectId `bson:\"contactId,omitempty\" json:\"contactId,omitempty\"`\n\tSource string `bson:\"source,omitempty\" json:\"source,omitempty\"`\n\tOwner string `bson:\"owner,omitempty\" json:\"owner,omitempty\"`\n\tStatus string `bson:\"status,omitempty\" json:\"status,omitempty\"`\n\tTeamSize float64 `bson:\"teamSize,omitempty\" json:\"teamSize,omitempty\"`\n\tRatePerHour float64 `bson:\"ratePerHour,omitempty\" json:\"ratePerHour,omitempty\"`\n\tDurationInMonths float64 `bson:\"durationInMonths,omitempty\" json:\"durationInMonths,omitempty\"`\n\tEstimatedStartDate string `bson:\"estimatedStartDate,omitempty\" json:\"estimatedStartDate,omitempty\"`\n\tComments []string `bson:\"comments,omitempty\" json:\"comments,omitempty\"`\n\tCreatedAt time.Time `bson:\"createdAt,omitempty\" json:\"createdAt,omitempty\"`\n\tUpdatedAt time.Time `bson:\"updatedAt,omitempty\" json:\"updatedAt,omitempty\"`\n}\n\ntype incomingLead struct {\n\tId *bson.ObjectId `json:\"id\"`\n\tContactId *bson.ObjectId `json:\"contactId,omitempty\"`\n\tSource *string `json:\"source,omitempty\"`\n\tOwner *string `json:\"owner,omitempty\"`\n\tStatus *string `json:\"status,omitempty\"`\n\tTeamSize *float64 `json:\"teamSize,omitempty\"`\n\tRatePerHour *float64 `json:\"ratePerHour,omitempty\"`\n\tDurationInMonths *float64 `json:\"durationInMonths,omitempty\"`\n\tEstimatedStartDate *string `json:\"estimatedStartDate,omitempty\"`\n\tComments *[]string `json:\"comments,omitempty\"`\n}\n\nfunc (l *lead) copyIncomingFields(i *incomingLead) error {\n\tif i.Id != nil {\n\t\tif *i.Id != l.Id {\n\t\t\treturn errors.New(\"Id doesn't match\")\n\t\t}\n\t}\n\tif i.ContactId != nil {\n\t\tl.Id = *i.Id\n\t}\n\tif i.Source != nil {\n\t\tl.Source = *i.Source\n\t}\n\tif i.Owner != nil {\n\t\tl.Owner = *i.Owner\n\t}\n\tif i.Status != nil {\n\t\tl.Status = *i.Status\n\t}\n\tif i.TeamSize != nil {\n\t\tl.TeamSize = *i.TeamSize\n\t}\n\tif i.RatePerHour != nil {\n\t\tl.RatePerHour = *i.RatePerHour\n\t}\n\tif i.DurationInMonths != nil {\n\t\tl.DurationInMonths = *i.DurationInMonths\n\t}\n\tif i.EstimatedStartDate != nil {\n\t\tl.EstimatedStartDate = *i.EstimatedStartDate\n\t}\n\tif i.Comments != nil {\n\t\tl.Comments = *i.Comments\n\t}\n\treturn nil\n}\n\n\/\/ NewLead takes the fields of a lead, initializes a struct of lead type and returns\n\/\/ the pointer to that struct.\n\/\/ Also, It inserts the lead object into the database.\nfunc NewLead(cid bson.ObjectId, source, owner, status string, teamsize, rate, duration float64,\n\tstart string, comments []string) (*lead, error) {\n\tdoc := lead{\n\t\tId: bson.NewObjectId(),\n\t\tContactId: cid,\n\t\tSource: source,\n\t\tOwner: owner,\n\t\tStatus: status,\n\t\tTeamSize: teamsize,\n\t\tRatePerHour: rate,\n\t\tDurationInMonths: duration,\n\t\tEstimatedStartDate: start,\n\t\tComments: comments,\n\t}\n\n\tdoc.CreatedAt = doc.Id.Time()\n\tdoc.UpdatedAt = doc.CreatedAt\n\n\terr := config.LeadsCollection.Insert(doc)\n\tif err != nil {\n\t\treturn &lead{}, err\n\t}\n\treturn &doc, nil\n}\n\n\/\/ GetLead takes the lead Id as an argument and returns a pointer to a lead object.\nfunc GetLead(i bson.ObjectId) (*lead, error) {\n\tvar l lead\n\terr := config.LeadsCollection.FindId(i).One(&l)\n\tif err != nil {\n\t\treturn &lead{}, err\n\t}\n\treturn &l, nil\n}\n\n\/\/ GetAllLeads fetches all the leads from the database.\nfunc GetAllLeads() ([]lead, error) {\n\tvar l []lead\n\terr := config.LeadsCollection.Find(nil).All(&l)\n\tif err != nil {\n\t\treturn []lead{}, err\n\t}\n\treturn l, nil\n}\n\n\/\/ Update updates the lead in the database.\n\/\/ First, fetch a lead from the database and change the necessary fields.\n\/\/ Then call the Update method on that lead object.\nfunc (l *lead) Update() error {\n\tl.UpdatedAt = bson.Now()\n\terr := config.LeadsCollection.UpdateId(l.Id, l)\n\treturn err\n}\n\n\/\/ Delete deletes the lead from the database.\nfunc (l *lead) Delete() error {\n\treturn config.LeadsCollection.RemoveId(l.Id)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/gob\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nvar (\n\tmodshell32 = syscall.NewLazyDLL(\"shell32.dll\")\n\tprocShellExecuteEx = modshell32.NewProc(\"ShellExecuteExW\")\n)\n\nconst (\n\t_SEE_MASK_NOCLOSEPROCESS = 0x00000040\n)\n\nconst (\n\t_ERROR_BAD_FORMAT = 11\n)\n\nconst (\n\t_SE_ERR_FNF = 2\n\t_SE_ERR_PNF = 3\n\t_SE_ERR_ACCESSDENIED = 5\n\t_SE_ERR_OOM = 8\n\t_SE_ERR_DLLNOTFOUND = 32\n\t_SE_ERR_SHARE = 26\n\t_SE_ERR_ASSOCINCOMPLETE = 27\n\t_SE_ERR_DDETIMEOUT = 28\n\t_SE_ERR_DDEFAIL = 29\n\t_SE_ERR_DDEBUSY = 30\n\t_SE_ERR_NOASSOC = 31\n)\n\ntype (\n\tdword uint32\n\thinstance syscall.Handle\n\thkey syscall.Handle\n\thwnd syscall.Handle\n\tulong uint32\n\tlpctstr uintptr\n\tlpvoid uintptr\n)\n\n\/\/ SHELLEXECUTEINFO struct\ntype _SHELLEXECUTEINFO struct {\n\tcbSize dword\n\tfMask ulong\n\thwnd hwnd\n\tlpVerb lpctstr\n\tlpFile lpctstr\n\tlpParameters lpctstr\n\tlpDirectory lpctstr\n\tnShow int\n\thInstApp hinstance\n\tlpIDList lpvoid\n\tlpClass lpctstr\n\thkeyClass hkey\n\tdwHotKey dword\n\thIconOrMonitor syscall.Handle\n\thProcess syscall.Handle\n}\n\n\/\/ _ShellExecuteAndWait is version of ShellExecuteEx which want process\nfunc _ShellExecuteAndWait(hwnd hwnd, lpOperation, lpFile, lpParameters, lpDirectory string, nShowCmd int) error {\n\tvar lpctstrVerb, lpctstrParameters, lpctstrDirectory lpctstr\n\tif len(lpOperation) != 0 {\n\t\tlpctstrVerb = lpctstr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpOperation)))\n\t}\n\tif len(lpParameters) != 0 {\n\t\tlpctstrParameters = lpctstr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpParameters)))\n\t}\n\tif len(lpDirectory) != 0 {\n\t\tlpctstrDirectory = lpctstr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpDirectory)))\n\t}\n\ti := &_SHELLEXECUTEINFO{\n\t\tfMask: _SEE_MASK_NOCLOSEPROCESS,\n\t\thwnd: hwnd,\n\t\tlpVerb: lpctstrVerb,\n\t\tlpFile: lpctstr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpFile))),\n\t\tlpParameters: lpctstrParameters,\n\t\tlpDirectory: lpctstrDirectory,\n\t\tnShow: nShowCmd,\n\t}\n\ti.cbSize = dword(unsafe.Sizeof(*i))\n\treturn _ShellExecuteEx(i)\n}\n\n\/\/ ShellExecuteEx is Windows API\nfunc _ShellExecuteEx(pExecInfo *_SHELLEXECUTEINFO) error {\n\tret, _, _ := procShellExecuteEx.Call(uintptr(unsafe.Pointer(pExecInfo)))\n\tif ret == 1 && pExecInfo.fMask&_SEE_MASK_NOCLOSEPROCESS != 0 {\n\t\ts, e := syscall.WaitForSingleObject(syscall.Handle(pExecInfo.hProcess), syscall.INFINITE)\n\t\tswitch s {\n\t\tcase syscall.WAIT_OBJECT_0:\n\t\t\tbreak\n\t\tcase syscall.WAIT_FAILED:\n\t\t\treturn os.NewSyscallError(\"WaitForSingleObject\", e)\n\t\tdefault:\n\t\t\treturn errors.New(\"Unexpected result from WaitForSingleObject\")\n\t\t}\n\t}\n\terrorMsg := \"\"\n\tif pExecInfo.hInstApp != 0 && pExecInfo.hInstApp <= 32 {\n\t\tswitch int(pExecInfo.hInstApp) {\n\t\tcase _SE_ERR_FNF:\n\t\t\terrorMsg = \"The specified file was not found\"\n\t\tcase _SE_ERR_PNF:\n\t\t\terrorMsg = \"The specified path was not found\"\n\t\tcase _ERROR_BAD_FORMAT:\n\t\t\terrorMsg = \"The .exe file is invalid (non-Win32 .exe or error in .exe image)\"\n\t\tcase _SE_ERR_ACCESSDENIED:\n\t\t\terrorMsg = \"The operating system denied access to the specified file\"\n\t\tcase _SE_ERR_ASSOCINCOMPLETE:\n\t\t\terrorMsg = \"The file name association is incomplete or invalid\"\n\t\tcase _SE_ERR_DDEBUSY:\n\t\t\terrorMsg = \"The DDE transaction could not be completed because other DDE transactions were being processed\"\n\t\tcase _SE_ERR_DDEFAIL:\n\t\t\terrorMsg = \"The DDE transaction failed\"\n\t\tcase _SE_ERR_DDETIMEOUT:\n\t\t\terrorMsg = \"The DDE transaction could not be completed because the request timed out\"\n\t\tcase _SE_ERR_DLLNOTFOUND:\n\t\t\terrorMsg = \"The specified DLL was not found\"\n\t\tcase _SE_ERR_NOASSOC:\n\t\t\terrorMsg = \"There is no application associated with the given file name extension\"\n\t\tcase _SE_ERR_OOM:\n\t\t\terrorMsg = \"There was not enough memory to complete the operation\"\n\t\tcase _SE_ERR_SHARE:\n\t\t\terrorMsg = \"A sharing violation occurred\"\n\t\tdefault:\n\t\t\terrorMsg = fmt.Sprintf(\"Unknown error occurred with error code %v\", pExecInfo.hInstApp)\n\t\t}\n\t} else {\n\t\treturn nil\n\t}\n\treturn errors.New(errorMsg)\n}\n\ntype msg struct {\n\tName string\n\tExit int\n\tError string\n\tData []byte\n}\n\nfunc msgWrite(enc *gob.Encoder, typ string) io.WriteCloser {\n\tr, w := io.Pipe()\n\tgo func() {\n\t\tdefer r.Close()\n\t\tvar b [4096]byte\n\t\tfor {\n\t\t\tn, err := r.Read(b[:])\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\terr = enc.Encode(&msg{Name: typ, Data: b[:n]})\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\treturn w\n}\n\nfunc client(addr string) int {\n\t\/\/ connect to server\n\tconn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdefer conn.Close()\n\n\tenc, dec := gob.NewEncoder(conn), gob.NewDecoder(conn)\n\n\tcmd := exec.Command(flag.Arg(0), flag.Args()[1:]...)\n\n\t\/\/ stdin\n\tinw, err := cmd.StdinPipe()\n\tif err != nil {\n\t\tenc.Encode(&msg{Name: \"error\", Error: fmt.Sprintf(\"cannot execute command: %v\", makeCmdLine(flag.Args()))})\n\t\treturn 1\n\t}\n\tdefer inw.Close()\n\n\t\/\/ stdout\n\toutw := msgWrite(enc, \"stdout\")\n\tdefer outw.Close()\n\tcmd.Stdout = outw\n\n\t\/\/ stderr\n\terrw := msgWrite(enc, \"stderr\")\n\tdefer errw.Close()\n\tcmd.Stderr = errw\n\n\tgo func() {\n\t\tdefer inw.Close()\n\tin_loop:\n\t\tfor {\n\t\t\tvar m msg\n\t\t\terr = dec.Decode(&m)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tswitch m.Name {\n\t\t\tcase \"close\":\n\t\t\t\tbreak in_loop\n\t\t\tcase \"ctrlc\":\n\t\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\t\t\/\/ windows doesn't support os.Interrupt\n\t\t\t\t\texec.Command(\"taskkill\", \"\/F\", \"\/T\", \"\/PID\", fmt.Sprint(cmd.Process.Pid)).Run()\n\t\t\t\t} else {\n\t\t\t\t\tcmd.Process.Signal(os.Interrupt)\n\t\t\t\t}\n\t\t\t\tbreak in_loop\n\t\t\tcase \"stdin\":\n\t\t\t\tinw.Write(m.Data)\n\t\t\t}\n\t\t}\n\t}()\n\n\terr = cmd.Run()\n\n\tcode := 1\n\tif err != nil {\n\t\tif status, ok := cmd.ProcessState.Sys().(syscall.WaitStatus); ok {\n\t\t\tcode = status.ExitStatus()\n\t\t}\n\t} else {\n\t\tcode = 0\n\t}\n\n\terr = enc.Encode(&msg{Name: \"exit\", Exit: code})\n\tif err != nil {\n\t\tenc.Encode(&msg{Name: \"error\", Error: fmt.Sprintf(\"cannot detect exit code: %v\", makeCmdLine(flag.Args()))})\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc makeCmdLine(args []string) string {\n\tvar s string\n\tfor _, v := range args {\n\t\tif s != \"\" {\n\t\t\ts += \" \"\n\t\t}\n\t\ts += syscall.EscapeArg(v)\n\t}\n\treturn s\n}\n\nfunc server() int {\n\t\/\/ make listner to communicate child process\n\tlis, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: cannot make listener\\n\", os.Args[0])\n\t\treturn 1\n\t}\n\tdefer lis.Close()\n\n\t\/\/ make sure executable name to avoid detecting same executable name\n\texe, err := os.Executable()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: cannot find executable\\n\", os.Args[0])\n\t\treturn 1\n\t}\n\targs := []string{\"-mode\", lis.Addr().String()}\n\targs = append(args, flag.Args()...)\n\n\tvar errExec error\n\tgo func() {\n\t\terr = _ShellExecuteAndWait(0, \"runas\", exe, makeCmdLine(args), \"\", syscall.SW_HIDE)\n\t\tif err != nil {\n\t\t\terrExec = err\n\t\t\tlis.Close()\n\t\t}\n\t}()\n\n\tconn, err := lis.Accept()\n\tif err != nil {\n\t\tif errExec != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%v: %v\\n\", os.Args[0], errExec)\n\t\t} else {\n\t\t\tfmt.Fprintf(os.Stderr, \"%v: cannot execute command: %v\\n\", os.Args[0], makeCmdLine(flag.Args()))\n\t\t}\n\t\treturn 1\n\t}\n\tdefer conn.Close()\n\n\tenc, dec := gob.NewEncoder(conn), gob.NewDecoder(conn)\n\n\tsc := make(chan os.Signal, 1)\n\tsignal.Notify(sc, os.Interrupt)\n\tgo func() {\n\t\tfor range sc {\n\t\t\tenc.Encode(&msg{Name: \"ctrlc\"})\n\t\t}\n\t}()\n\tdefer close(sc)\n\n\tgo func() {\n\t\tvar b [256]byte\n\t\tfor {\n\t\t\tn, err := os.Stdin.Read(b[:])\n\t\t\tif err != nil {\n\t\t\t\t\/\/ stdin was closed\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tenc.Encode(&msg{Name: \"close\"})\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr = enc.Encode(&msg{Name: \"stdin\", Data: b[:n]})\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor {\n\t\tvar m msg\n\t\terr = dec.Decode(&m)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%v: cannot execute command: %v\\n\", os.Args[0], makeCmdLine(flag.Args()))\n\t\t\treturn 1\n\t\t}\n\t\tswitch m.Name {\n\t\tcase \"stdout\":\n\t\t\tsyscall.Write(syscall.Stdout, m.Data)\n\t\tcase \"stderr\":\n\t\t\tsyscall.Write(syscall.Stderr, m.Data)\n\t\tcase \"error\":\n\t\t\tfmt.Fprintln(os.Stderr, m.Error)\n\t\tcase \"exit\":\n\t\t\treturn m.Exit\n\t\t}\n\t}\n}\n\nfunc main() {\n\tvar mode string\n\tflag.StringVar(&mode, \"mode\", \"\", \"mode\")\n\tflag.Parse()\n\tif mode != \"\" {\n\t\tos.Exit(client(mode))\n\t}\n\tos.Exit(server())\n}\n<commit_msg>fix typo<commit_after>package main\n\nimport (\n\t\"encoding\/gob\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nvar (\n\tmodshell32 = syscall.NewLazyDLL(\"shell32.dll\")\n\tprocShellExecuteEx = modshell32.NewProc(\"ShellExecuteExW\")\n)\n\nconst (\n\t_SEE_MASK_NOCLOSEPROCESS = 0x00000040\n)\n\nconst (\n\t_ERROR_BAD_FORMAT = 11\n)\n\nconst (\n\t_SE_ERR_FNF = 2\n\t_SE_ERR_PNF = 3\n\t_SE_ERR_ACCESSDENIED = 5\n\t_SE_ERR_OOM = 8\n\t_SE_ERR_DLLNOTFOUND = 32\n\t_SE_ERR_SHARE = 26\n\t_SE_ERR_ASSOCINCOMPLETE = 27\n\t_SE_ERR_DDETIMEOUT = 28\n\t_SE_ERR_DDEFAIL = 29\n\t_SE_ERR_DDEBUSY = 30\n\t_SE_ERR_NOASSOC = 31\n)\n\ntype (\n\tdword uint32\n\thinstance syscall.Handle\n\thkey syscall.Handle\n\thwnd syscall.Handle\n\tulong uint32\n\tlpctstr uintptr\n\tlpvoid uintptr\n)\n\n\/\/ SHELLEXECUTEINFO struct\ntype _SHELLEXECUTEINFO struct {\n\tcbSize dword\n\tfMask ulong\n\thwnd hwnd\n\tlpVerb lpctstr\n\tlpFile lpctstr\n\tlpParameters lpctstr\n\tlpDirectory lpctstr\n\tnShow int\n\thInstApp hinstance\n\tlpIDList lpvoid\n\tlpClass lpctstr\n\thkeyClass hkey\n\tdwHotKey dword\n\thIconOrMonitor syscall.Handle\n\thProcess syscall.Handle\n}\n\n\/\/ _ShellExecuteAndWait is version of ShellExecuteEx which want process\nfunc _ShellExecuteAndWait(hwnd hwnd, lpOperation, lpFile, lpParameters, lpDirectory string, nShowCmd int) error {\n\tvar lpctstrVerb, lpctstrParameters, lpctstrDirectory lpctstr\n\tif len(lpOperation) != 0 {\n\t\tlpctstrVerb = lpctstr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpOperation)))\n\t}\n\tif len(lpParameters) != 0 {\n\t\tlpctstrParameters = lpctstr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpParameters)))\n\t}\n\tif len(lpDirectory) != 0 {\n\t\tlpctstrDirectory = lpctstr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpDirectory)))\n\t}\n\ti := &_SHELLEXECUTEINFO{\n\t\tfMask: _SEE_MASK_NOCLOSEPROCESS,\n\t\thwnd: hwnd,\n\t\tlpVerb: lpctstrVerb,\n\t\tlpFile: lpctstr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpFile))),\n\t\tlpParameters: lpctstrParameters,\n\t\tlpDirectory: lpctstrDirectory,\n\t\tnShow: nShowCmd,\n\t}\n\ti.cbSize = dword(unsafe.Sizeof(*i))\n\treturn _ShellExecuteEx(i)\n}\n\n\/\/ ShellExecuteEx is Windows API\nfunc _ShellExecuteEx(pExecInfo *_SHELLEXECUTEINFO) error {\n\tret, _, _ := procShellExecuteEx.Call(uintptr(unsafe.Pointer(pExecInfo)))\n\tif ret == 1 && pExecInfo.fMask&_SEE_MASK_NOCLOSEPROCESS != 0 {\n\t\ts, e := syscall.WaitForSingleObject(syscall.Handle(pExecInfo.hProcess), syscall.INFINITE)\n\t\tswitch s {\n\t\tcase syscall.WAIT_OBJECT_0:\n\t\t\tbreak\n\t\tcase syscall.WAIT_FAILED:\n\t\t\treturn os.NewSyscallError(\"WaitForSingleObject\", e)\n\t\tdefault:\n\t\t\treturn errors.New(\"Unexpected result from WaitForSingleObject\")\n\t\t}\n\t}\n\terrorMsg := \"\"\n\tif pExecInfo.hInstApp != 0 && pExecInfo.hInstApp <= 32 {\n\t\tswitch int(pExecInfo.hInstApp) {\n\t\tcase _SE_ERR_FNF:\n\t\t\terrorMsg = \"The specified file was not found\"\n\t\tcase _SE_ERR_PNF:\n\t\t\terrorMsg = \"The specified path was not found\"\n\t\tcase _ERROR_BAD_FORMAT:\n\t\t\terrorMsg = \"The .exe file is invalid (non-Win32 .exe or error in .exe image)\"\n\t\tcase _SE_ERR_ACCESSDENIED:\n\t\t\terrorMsg = \"The operating system denied access to the specified file\"\n\t\tcase _SE_ERR_ASSOCINCOMPLETE:\n\t\t\terrorMsg = \"The file name association is incomplete or invalid\"\n\t\tcase _SE_ERR_DDEBUSY:\n\t\t\terrorMsg = \"The DDE transaction could not be completed because other DDE transactions were being processed\"\n\t\tcase _SE_ERR_DDEFAIL:\n\t\t\terrorMsg = \"The DDE transaction failed\"\n\t\tcase _SE_ERR_DDETIMEOUT:\n\t\t\terrorMsg = \"The DDE transaction could not be completed because the request timed out\"\n\t\tcase _SE_ERR_DLLNOTFOUND:\n\t\t\terrorMsg = \"The specified DLL was not found\"\n\t\tcase _SE_ERR_NOASSOC:\n\t\t\terrorMsg = \"There is no application associated with the given file name extension\"\n\t\tcase _SE_ERR_OOM:\n\t\t\terrorMsg = \"There was not enough memory to complete the operation\"\n\t\tcase _SE_ERR_SHARE:\n\t\t\terrorMsg = \"A sharing violation occurred\"\n\t\tdefault:\n\t\t\terrorMsg = fmt.Sprintf(\"Unknown error occurred with error code %v\", pExecInfo.hInstApp)\n\t\t}\n\t} else {\n\t\treturn nil\n\t}\n\treturn errors.New(errorMsg)\n}\n\ntype msg struct {\n\tName string\n\tExit int\n\tError string\n\tData []byte\n}\n\nfunc msgWrite(enc *gob.Encoder, typ string) io.WriteCloser {\n\tr, w := io.Pipe()\n\tgo func() {\n\t\tdefer r.Close()\n\t\tvar b [4096]byte\n\t\tfor {\n\t\t\tn, err := r.Read(b[:])\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\terr = enc.Encode(&msg{Name: typ, Data: b[:n]})\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\treturn w\n}\n\nfunc client(addr string) int {\n\t\/\/ connect to server\n\tconn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdefer conn.Close()\n\n\tenc, dec := gob.NewEncoder(conn), gob.NewDecoder(conn)\n\n\tcmd := exec.Command(flag.Arg(0), flag.Args()[1:]...)\n\n\t\/\/ stdin\n\tinw, err := cmd.StdinPipe()\n\tif err != nil {\n\t\tenc.Encode(&msg{Name: \"error\", Error: fmt.Sprintf(\"cannot execute command: %v\", makeCmdLine(flag.Args()))})\n\t\treturn 1\n\t}\n\tdefer inw.Close()\n\n\t\/\/ stdout\n\toutw := msgWrite(enc, \"stdout\")\n\tdefer outw.Close()\n\tcmd.Stdout = outw\n\n\t\/\/ stderr\n\terrw := msgWrite(enc, \"stderr\")\n\tdefer errw.Close()\n\tcmd.Stderr = errw\n\n\tgo func() {\n\t\tdefer inw.Close()\n\tin_loop:\n\t\tfor {\n\t\t\tvar m msg\n\t\t\terr = dec.Decode(&m)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tswitch m.Name {\n\t\t\tcase \"close\":\n\t\t\t\tbreak in_loop\n\t\t\tcase \"ctrlc\":\n\t\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\t\t\/\/ windows doesn't support os.Interrupt\n\t\t\t\t\texec.Command(\"taskkill\", \"\/F\", \"\/T\", \"\/PID\", fmt.Sprint(cmd.Process.Pid)).Run()\n\t\t\t\t} else {\n\t\t\t\t\tcmd.Process.Signal(os.Interrupt)\n\t\t\t\t}\n\t\t\t\tbreak in_loop\n\t\t\tcase \"stdin\":\n\t\t\t\tinw.Write(m.Data)\n\t\t\t}\n\t\t}\n\t}()\n\n\terr = cmd.Run()\n\n\tcode := 1\n\tif err != nil {\n\t\tif status, ok := cmd.ProcessState.Sys().(syscall.WaitStatus); ok {\n\t\t\tcode = status.ExitStatus()\n\t\t}\n\t} else {\n\t\tcode = 0\n\t}\n\n\terr = enc.Encode(&msg{Name: \"exit\", Exit: code})\n\tif err != nil {\n\t\tenc.Encode(&msg{Name: \"error\", Error: fmt.Sprintf(\"cannot detect exit code: %v\", makeCmdLine(flag.Args()))})\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc makeCmdLine(args []string) string {\n\tvar s string\n\tfor _, v := range args {\n\t\tif s != \"\" {\n\t\t\ts += \" \"\n\t\t}\n\t\ts += syscall.EscapeArg(v)\n\t}\n\treturn s\n}\n\nfunc server() int {\n\t\/\/ make listener to communicate child process\n\tlis, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: cannot make listener\\n\", os.Args[0])\n\t\treturn 1\n\t}\n\tdefer lis.Close()\n\n\t\/\/ make sure executable name to avoid detecting same executable name\n\texe, err := os.Executable()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: cannot find executable\\n\", os.Args[0])\n\t\treturn 1\n\t}\n\targs := []string{\"-mode\", lis.Addr().String()}\n\targs = append(args, flag.Args()...)\n\n\tvar errExec error\n\tgo func() {\n\t\terr = _ShellExecuteAndWait(0, \"runas\", exe, makeCmdLine(args), \"\", syscall.SW_HIDE)\n\t\tif err != nil {\n\t\t\terrExec = err\n\t\t\tlis.Close()\n\t\t}\n\t}()\n\n\tconn, err := lis.Accept()\n\tif err != nil {\n\t\tif errExec != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%v: %v\\n\", os.Args[0], errExec)\n\t\t} else {\n\t\t\tfmt.Fprintf(os.Stderr, \"%v: cannot execute command: %v\\n\", os.Args[0], makeCmdLine(flag.Args()))\n\t\t}\n\t\treturn 1\n\t}\n\tdefer conn.Close()\n\n\tenc, dec := gob.NewEncoder(conn), gob.NewDecoder(conn)\n\n\tsc := make(chan os.Signal, 1)\n\tsignal.Notify(sc, os.Interrupt)\n\tgo func() {\n\t\tfor range sc {\n\t\t\tenc.Encode(&msg{Name: \"ctrlc\"})\n\t\t}\n\t}()\n\tdefer close(sc)\n\n\tgo func() {\n\t\tvar b [256]byte\n\t\tfor {\n\t\t\tn, err := os.Stdin.Read(b[:])\n\t\t\tif err != nil {\n\t\t\t\t\/\/ stdin was closed\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tenc.Encode(&msg{Name: \"close\"})\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr = enc.Encode(&msg{Name: \"stdin\", Data: b[:n]})\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor {\n\t\tvar m msg\n\t\terr = dec.Decode(&m)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%v: cannot execute command: %v\\n\", os.Args[0], makeCmdLine(flag.Args()))\n\t\t\treturn 1\n\t\t}\n\t\tswitch m.Name {\n\t\tcase \"stdout\":\n\t\t\tsyscall.Write(syscall.Stdout, m.Data)\n\t\tcase \"stderr\":\n\t\t\tsyscall.Write(syscall.Stderr, m.Data)\n\t\tcase \"error\":\n\t\t\tfmt.Fprintln(os.Stderr, m.Error)\n\t\tcase \"exit\":\n\t\t\treturn m.Exit\n\t\t}\n\t}\n}\n\nfunc main() {\n\tvar mode string\n\tflag.StringVar(&mode, \"mode\", \"\", \"mode\")\n\tflag.Parse()\n\tif mode != \"\" {\n\t\tos.Exit(client(mode))\n\t}\n\tos.Exit(server())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"compress\/gzip\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"bosun.org\/opentsdb\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"gopkg.in\/inconshreveable\/log15.v2\"\n)\n\nvar (\n\tLog = log15.New()\n\n\tsampleExpiry = flag.Duration(\"scollector.sample-expiry\", 5*time.Minute, \"How long a sample is valid for.\")\n\tlastProcessed = prometheus.NewGauge(\n\t\tprometheus.GaugeOpts{\n\t\t\tName: \"scollector_last_processed_timestamp_seconds\",\n\t\t\tHelp: \"Unix timestamp of the last processed scollector metric.\",\n\t\t},\n\t)\n)\n\n\/\/ Most of the ideas are stolen from https:\/\/github.com\/prometheus\/graphite_exporter\/blob\/master\/main.go\n\/\/ https:\/\/github.com\/prometheus\/graphite_exporter\/commit\/298611cd340e0a34bc9d2d434f47456c6c201221\n\ntype scollectorSample struct {\n\tName string\n\tLabels map[string]string\n\tHelp string\n\tValue float64\n\tType prometheus.ValueType\n\tTimestamp time.Time\n}\n\nfunc (s scollectorSample) NameWithLabels() string {\n\tlabels := make([]string, 0, len(s.Labels))\n\tfor k, v := range s.Labels {\n\t\tlabels = append(labels, k+\"=\"+v)\n\t}\n\tsort.Strings(labels)\n\treturn s.Name + \"#\" + strings.Join(labels, \";\")\n}\n\ntype scollectorCollector struct {\n\tsamples map[string]scollectorSample\n\ttypes map[string]string\n\tch chan scollectorSample\n\tmu sync.Mutex\n}\n\nfunc newScollectorCollector() *scollectorCollector {\n\tc := &scollectorCollector{\n\t\tch: make(chan scollectorSample, 0),\n\t\tsamples: make(map[string]scollectorSample, 512),\n\t\ttypes: make(map[string]string, 512),\n\t}\n\tgo c.processSamples()\n\treturn c\n}\n\nfunc (c *scollectorCollector) processSamples() {\n\tticker := time.NewTicker(time.Minute).C\n\tfor {\n\t\tselect {\n\t\tcase sample := <-c.ch:\n\t\t\tc.mu.Lock()\n\t\t\tc.samples[sample.NameWithLabels()] = sample\n\t\t\tc.mu.Unlock()\n\t\tcase <-ticker:\n\t\t\t\/\/ Garbage collect expired samples.\n\t\t\tageLimit := time.Now().Add(-*sampleExpiry)\n\t\t\tc.mu.Lock()\n\t\t\tfor k, sample := range c.samples {\n\t\t\t\tif ageLimit.After(sample.Timestamp) {\n\t\t\t\t\tdelete(c.samples, k)\n\t\t\t\t}\n\t\t\t}\n\t\t\tc.mu.Unlock()\n\t\t}\n\t}\n}\n\n\/\/ Collect implements prometheus.Collector.\nfunc (c *scollectorCollector) Collect(ch chan<- prometheus.Metric) {\n\tLog.Debug(\"Collect\", \"samples\", len(c.samples))\n\tch <- lastProcessed\n\tLog.Debug(\"Collect\", \"lastProcessed\", lastProcessed)\n\n\tc.mu.Lock()\n\tsamples := make([]scollectorSample, 0, len(c.samples))\n\tfor _, sample := range c.samples {\n\t\tsamples = append(samples, sample)\n\t}\n\tc.mu.Unlock()\n\n\tageLimit := time.Now().Add(-*sampleExpiry)\n\tfor _, sample := range samples {\n\t\tif ageLimit.After(sample.Timestamp) {\n\t\t\tLog.Debug(\"skipping old sample\", \"limit\", ageLimit, \"sample\", sample)\n\t\t\tcontinue\n\t\t}\n\t\tLog.Debug(\"sending sample\", \"sample\", sample)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tprometheus.NewDesc(sample.Name, sample.Help, []string{}, sample.Labels),\n\t\t\tsample.Type,\n\t\t\tsample.Value,\n\t\t)\n\t}\n}\n\n\/\/ Describe implements prometheus.Collector.\nfunc (c *scollectorCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- lastProcessed.Desc()\n}\n\nvar dotReplacer = strings.NewReplacer(\".\", \"_\")\n\nfunc (c *scollectorCollector) handleScoll(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tsendErr := func(msg string, code int) {\n\t\tLog.Error(msg)\n\t\thttp.Error(w, msg, http.StatusMethodNotAllowed)\n\t}\n\tif r.Method != \"POST\" {\n\t\tsendErr(\"Only POST is allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tif r.Header.Get(\"Content-Type\") != \"application\/json\" {\n\t\tsendErr(\"Only application\/json is allowed\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\trdr := io.Reader(r.Body)\n\tif r.Header.Get(\"Content-Encoding\") == \"gzip\" {\n\t\tvar err error\n\t\tif rdr, err = gzip.NewReader(rdr); err != nil {\n\t\t\tsendErr(\"Not gzipped: \"+err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\tvar batch []opentsdb.DataPoint\n\tif err := json.NewDecoder(rdr).Decode(&batch); err != nil {\n\t\tsendErr(\"cannot decode JSON: \"+err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tLog.Debug(\"batch\", \"size\", len(batch))\n\tn := 0\n\tfor _, m := range batch {\n\t\tLog.Debug(\"got\", \"msg\", m)\n\t\tname := clearName(m.Metric, true, 'x')\n\t\tif name == \"\" {\n\t\t\tLog.Warn(\"bad metric name: \" + m.Metric)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar v float64\n\t\tswitch x := m.Value.(type) {\n\t\tcase float64:\n\t\t\tv = x\n\t\tcase int:\n\t\t\tv = float64(x)\n\t\tcase int64:\n\t\t\tv = float64(x)\n\t\tcase int32:\n\t\t\tv = float64(x)\n\t\tcase string: \/\/ type info\n\t\t\tif x != \"\" {\n\t\t\t\tif z, ok := c.types[name]; !ok || z != x {\n\t\t\t\t\tc.types[name] = x\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tLog.Warn(\"unknown value\", \"type\", fmt.Sprintf(\"%T\", m.Value), \"msg\", m)\n\t\t\tcontinue\n\t\t}\n\t\ttyp := prometheus.GaugeValue\n\t\tif c.types[name] == \"counter\" {\n\t\t\ttyp = prometheus.CounterValue\n\t\t}\n\t\tvar ts time.Time\n\t\tif m.Timestamp >= 1e10 {\n\t\t\tts = time.Unix(\n\t\t\t\tm.Timestamp&(1e10-1),\n\t\t\t\tint64(math.Mod(float64(m.Timestamp), 1e10))*int64(time.Millisecond))\n\t\t} else {\n\t\t\tts = time.Unix(m.Timestamp, 0)\n\t\t}\n\t\tfor k := range m.Tags {\n\t\t\tk2 := clearName(k, false, 'x')\n\t\t\tif k2 != \"\" && k2 != k {\n\t\t\t\tLog.Warn(\"bad label name \" + k)\n\t\t\t\tm.Tags[k2] = m.Tags[k]\n\t\t\t\tdelete(m.Tags, k)\n\t\t\t}\n\t\t}\n\t\tif m.Tags[\"host\"] != \"\" {\n\t\t\tm.Tags[\"instance\"] = m.Tags[\"host\"]\n\t\t\tdelete(m.Tags, \"host\")\n\t\t}\n\t\tc.ch <- scollectorSample{\n\t\t\tName: name,\n\t\t\tLabels: m.Tags,\n\t\t\tType: typ,\n\t\t\tHelp: fmt.Sprintf(\"Scollector metric %s (%s)\", m.Metric, c.types[name]),\n\t\t\tValue: v,\n\t\t\tTimestamp: ts,\n\t\t}\n\t\tn++\n\t}\n\tlastProcessed.Set(float64(time.Now().UnixNano()) \/ 1e9)\n\tLog.Info(\"processed\", \"messages\", n, \"samples\", len(c.samples), \"types\", len(c.types))\n\n\tw.WriteHeader(http.StatusNoContent)\n}\n\nvar (\n\tflagAddr = flag.String(\"http\", \"0.0.0.0:9107\", \"address to listen on\")\n\tflagScollPref = flag.String(\"scollector.prefix\", \"\/api\/put\", \"HTTP path prefix for listening for scollector-sent data\")\n\tflagVerbose = flag.Bool(\"v\", false, \"verbose logging\")\n)\n\nfunc main() {\n\thndl := log15.CallerFileHandler(log15.StderrHandler)\n\tLog.SetHandler(hndl)\n\n\tflag.Parse()\n\tif !*flagVerbose {\n\t\tLog.SetHandler(log15.LvlFilterHandler(log15.LvlInfo, hndl))\n\t}\n\n\tc := newScollectorCollector()\n\tprometheus.MustRegister(c)\n\n\thttp.HandleFunc(*flagScollPref, c.handleScoll)\n\thttp.Handle(\"\/metrics\", prometheus.Handler())\n\tLog.Info(\"Serving on \" + *flagAddr)\n\thttp.ListenAndServe(*flagAddr, nil)\n}\n\n\/\/ clearName replaces non-allowed characters.\nfunc clearName(txt string, allowColon bool, replaceRune rune) string {\n\tif replaceRune == 0 {\n\t\treplaceRune = -1\n\t}\n\ti := -1\n\treturn strings.Map(\n\t\tfunc(r rune) rune {\n\t\t\ti++\n\t\t\tif r == '.' {\n\t\t\t\treturn '_'\n\t\t\t}\n\t\t\tif 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' {\n\t\t\t\treturn r\n\t\t\t}\n\t\t\tif allowColon && r == ':' {\n\t\t\t\treturn r\n\t\t\t}\n\t\t\tif i > 0 && '0' <= r && r <= '9' {\n\t\t\t\treturn r\n\t\t\t}\n\t\t\treturn replaceRune\n\t\t},\n\t\ttxt)\n}\n<commit_msg>Bugfix: handleScoll's error response now uses the code parameter instead of http.StatusMethodNotAllowed<commit_after>\/\/ Copyright 2015 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"compress\/gzip\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"bosun.org\/opentsdb\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"gopkg.in\/inconshreveable\/log15.v2\"\n)\n\nvar (\n\tLog = log15.New()\n\n\tsampleExpiry = flag.Duration(\"scollector.sample-expiry\", 5*time.Minute, \"How long a sample is valid for.\")\n\tlastProcessed = prometheus.NewGauge(\n\t\tprometheus.GaugeOpts{\n\t\t\tName: \"scollector_last_processed_timestamp_seconds\",\n\t\t\tHelp: \"Unix timestamp of the last processed scollector metric.\",\n\t\t},\n\t)\n)\n\n\/\/ Most of the ideas are stolen from https:\/\/github.com\/prometheus\/graphite_exporter\/blob\/master\/main.go\n\/\/ https:\/\/github.com\/prometheus\/graphite_exporter\/commit\/298611cd340e0a34bc9d2d434f47456c6c201221\n\ntype scollectorSample struct {\n\tName string\n\tLabels map[string]string\n\tHelp string\n\tValue float64\n\tType prometheus.ValueType\n\tTimestamp time.Time\n}\n\nfunc (s scollectorSample) NameWithLabels() string {\n\tlabels := make([]string, 0, len(s.Labels))\n\tfor k, v := range s.Labels {\n\t\tlabels = append(labels, k+\"=\"+v)\n\t}\n\tsort.Strings(labels)\n\treturn s.Name + \"#\" + strings.Join(labels, \";\")\n}\n\ntype scollectorCollector struct {\n\tsamples map[string]scollectorSample\n\ttypes map[string]string\n\tch chan scollectorSample\n\tmu sync.Mutex\n}\n\nfunc newScollectorCollector() *scollectorCollector {\n\tc := &scollectorCollector{\n\t\tch: make(chan scollectorSample, 0),\n\t\tsamples: make(map[string]scollectorSample, 512),\n\t\ttypes: make(map[string]string, 512),\n\t}\n\tgo c.processSamples()\n\treturn c\n}\n\nfunc (c *scollectorCollector) processSamples() {\n\tticker := time.NewTicker(time.Minute).C\n\tfor {\n\t\tselect {\n\t\tcase sample := <-c.ch:\n\t\t\tc.mu.Lock()\n\t\t\tc.samples[sample.NameWithLabels()] = sample\n\t\t\tc.mu.Unlock()\n\t\tcase <-ticker:\n\t\t\t\/\/ Garbage collect expired samples.\n\t\t\tageLimit := time.Now().Add(-*sampleExpiry)\n\t\t\tc.mu.Lock()\n\t\t\tfor k, sample := range c.samples {\n\t\t\t\tif ageLimit.After(sample.Timestamp) {\n\t\t\t\t\tdelete(c.samples, k)\n\t\t\t\t}\n\t\t\t}\n\t\t\tc.mu.Unlock()\n\t\t}\n\t}\n}\n\n\/\/ Collect implements prometheus.Collector.\nfunc (c *scollectorCollector) Collect(ch chan<- prometheus.Metric) {\n\tLog.Debug(\"Collect\", \"samples\", len(c.samples))\n\tch <- lastProcessed\n\tLog.Debug(\"Collect\", \"lastProcessed\", lastProcessed)\n\n\tc.mu.Lock()\n\tsamples := make([]scollectorSample, 0, len(c.samples))\n\tfor _, sample := range c.samples {\n\t\tsamples = append(samples, sample)\n\t}\n\tc.mu.Unlock()\n\n\tageLimit := time.Now().Add(-*sampleExpiry)\n\tfor _, sample := range samples {\n\t\tif ageLimit.After(sample.Timestamp) {\n\t\t\tLog.Debug(\"skipping old sample\", \"limit\", ageLimit, \"sample\", sample)\n\t\t\tcontinue\n\t\t}\n\t\tLog.Debug(\"sending sample\", \"sample\", sample)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tprometheus.NewDesc(sample.Name, sample.Help, []string{}, sample.Labels),\n\t\t\tsample.Type,\n\t\t\tsample.Value,\n\t\t)\n\t}\n}\n\n\/\/ Describe implements prometheus.Collector.\nfunc (c *scollectorCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- lastProcessed.Desc()\n}\n\nvar dotReplacer = strings.NewReplacer(\".\", \"_\")\n\nfunc (c *scollectorCollector) handleScoll(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tsendErr := func(msg string, code int) {\n\t\tLog.Error(msg)\n\t\thttp.Error(w, msg, code)\n\t}\n\tif r.Method != \"POST\" {\n\t\tsendErr(\"Only POST is allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tif r.Header.Get(\"Content-Type\") != \"application\/json\" {\n\t\tsendErr(\"Only application\/json is allowed\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\trdr := io.Reader(r.Body)\n\tif r.Header.Get(\"Content-Encoding\") == \"gzip\" {\n\t\tvar err error\n\t\tif rdr, err = gzip.NewReader(rdr); err != nil {\n\t\t\tsendErr(\"Not gzipped: \"+err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\tvar batch []opentsdb.DataPoint\n\tif err := json.NewDecoder(rdr).Decode(&batch); err != nil {\n\t\tsendErr(\"cannot decode JSON: \"+err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tLog.Debug(\"batch\", \"size\", len(batch))\n\tn := 0\n\tfor _, m := range batch {\n\t\tLog.Debug(\"got\", \"msg\", m)\n\t\tname := clearName(m.Metric, true, 'x')\n\t\tif name == \"\" {\n\t\t\tLog.Warn(\"bad metric name: \" + m.Metric)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar v float64\n\t\tswitch x := m.Value.(type) {\n\t\tcase float64:\n\t\t\tv = x\n\t\tcase int:\n\t\t\tv = float64(x)\n\t\tcase int64:\n\t\t\tv = float64(x)\n\t\tcase int32:\n\t\t\tv = float64(x)\n\t\tcase string: \/\/ type info\n\t\t\tif x != \"\" {\n\t\t\t\tif z, ok := c.types[name]; !ok || z != x {\n\t\t\t\t\tc.types[name] = x\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tLog.Warn(\"unknown value\", \"type\", fmt.Sprintf(\"%T\", m.Value), \"msg\", m)\n\t\t\tcontinue\n\t\t}\n\t\ttyp := prometheus.GaugeValue\n\t\tif c.types[name] == \"counter\" {\n\t\t\ttyp = prometheus.CounterValue\n\t\t}\n\t\tvar ts time.Time\n\t\tif m.Timestamp >= 1e10 {\n\t\t\tts = time.Unix(\n\t\t\t\tm.Timestamp&(1e10-1),\n\t\t\t\tint64(math.Mod(float64(m.Timestamp), 1e10))*int64(time.Millisecond))\n\t\t} else {\n\t\t\tts = time.Unix(m.Timestamp, 0)\n\t\t}\n\t\tfor k := range m.Tags {\n\t\t\tk2 := clearName(k, false, 'x')\n\t\t\tif k2 != \"\" && k2 != k {\n\t\t\t\tLog.Warn(\"bad label name \" + k)\n\t\t\t\tm.Tags[k2] = m.Tags[k]\n\t\t\t\tdelete(m.Tags, k)\n\t\t\t}\n\t\t}\n\t\tif m.Tags[\"host\"] != \"\" {\n\t\t\tm.Tags[\"instance\"] = m.Tags[\"host\"]\n\t\t\tdelete(m.Tags, \"host\")\n\t\t}\n\t\tc.ch <- scollectorSample{\n\t\t\tName: name,\n\t\t\tLabels: m.Tags,\n\t\t\tType: typ,\n\t\t\tHelp: fmt.Sprintf(\"Scollector metric %s (%s)\", m.Metric, c.types[name]),\n\t\t\tValue: v,\n\t\t\tTimestamp: ts,\n\t\t}\n\t\tn++\n\t}\n\tlastProcessed.Set(float64(time.Now().UnixNano()) \/ 1e9)\n\tLog.Info(\"processed\", \"messages\", n, \"samples\", len(c.samples), \"types\", len(c.types))\n\n\tw.WriteHeader(http.StatusNoContent)\n}\n\nvar (\n\tflagAddr = flag.String(\"http\", \"0.0.0.0:9107\", \"address to listen on\")\n\tflagScollPref = flag.String(\"scollector.prefix\", \"\/api\/put\", \"HTTP path prefix for listening for scollector-sent data\")\n\tflagVerbose = flag.Bool(\"v\", false, \"verbose logging\")\n)\n\nfunc main() {\n\thndl := log15.CallerFileHandler(log15.StderrHandler)\n\tLog.SetHandler(hndl)\n\n\tflag.Parse()\n\tif !*flagVerbose {\n\t\tLog.SetHandler(log15.LvlFilterHandler(log15.LvlInfo, hndl))\n\t}\n\n\tc := newScollectorCollector()\n\tprometheus.MustRegister(c)\n\n\thttp.HandleFunc(*flagScollPref, c.handleScoll)\n\thttp.Handle(\"\/metrics\", prometheus.Handler())\n\tLog.Info(\"Serving on \" + *flagAddr)\n\thttp.ListenAndServe(*flagAddr, nil)\n}\n\n\/\/ clearName replaces non-allowed characters.\nfunc clearName(txt string, allowColon bool, replaceRune rune) string {\n\tif replaceRune == 0 {\n\t\treplaceRune = -1\n\t}\n\ti := -1\n\treturn strings.Map(\n\t\tfunc(r rune) rune {\n\t\t\ti++\n\t\t\tif r == '.' {\n\t\t\t\treturn '_'\n\t\t\t}\n\t\t\tif 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' {\n\t\t\t\treturn r\n\t\t\t}\n\t\t\tif allowColon && r == ':' {\n\t\t\t\treturn r\n\t\t\t}\n\t\t\tif i > 0 && '0' <= r && r <= '9' {\n\t\t\t\treturn r\n\t\t\t}\n\t\t\treturn replaceRune\n\t\t},\n\t\ttxt)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ A command line tool that shows the status of (many) Go packages.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"sync\"\n\n\t\"github.com\/kisielk\/gotool\"\n\t\"github.com\/shurcooL\/gostatus\/status\"\n\n\t\"github.com\/shurcooL\/go\/gists\/gist7480523\"\n\t\"github.com\/shurcooL\/go\/gists\/gist7651991\"\n\t\"github.com\/shurcooL\/go\/gists\/gist7802150\"\n)\n\nconst numWorkers = 8\n\nvar vFlag = flag.Bool(\"v\", false, \"Verbose output: show all Go packages, not just ones with notable status.\")\nvar stdinFlag = flag.Bool(\"stdin\", false, \"Read the list of newline separated Go packages from stdin.\")\nvar plumbingFlag = flag.Bool(\"plumbing\", false, \"Give the output in an easy-to-parse format for scripts.\")\nvar debugFlag = flag.Bool(\"debug\", false, \"Give the output with verbose debug information.\")\n\nfunc usage() {\n\tfmt.Fprint(os.Stderr, \"Usage: gostatus [flags] [packages]\\n\")\n\tfmt.Fprint(os.Stderr, \" [newline separated packages] | gostatus --stdin [flags]\\n\")\n\tflag.PrintDefaults()\n\tfmt.Fprint(os.Stderr, `\nExamples:\n # Show status of package in current directory, if notable.\n gostatus .\n\n # Show status of all packages with notable status.\n gostatus all\n\n # Show status of all dependencies (recursive) of package in cur working dir.\n go list -f '{{join .Deps \"\\n\"}}' . | gostatus --stdin -v\n\nLegend:\n ???? - Not under (recognized) version control\n b - Non-master branch checked out\n * - Uncommited changes in working dir\n + - Update available\n - - Local revision is ahead of remote (need to push?)\n ! - No remote\n # - Remote path doesn't match import path\n $ - Stash exists\n`)\n\tos.Exit(2)\n}\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tflag.Usage = usage\n\tflag.Parse()\n\n\t\/\/ Get current directory.\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tshouldShow := func(goPackage *gist7480523.GoPackage) bool {\n\t\t\/\/ Check for notable status.\n\t\treturn status.PorcelainPresenter(goPackage)[:4] != \" \"\n\t}\n\tif *vFlag == true {\n\t\tshouldShow = func(_ *gist7480523.GoPackage) bool { return true }\n\t}\n\n\tvar presenter gist7480523.GoPackageStringer = status.PorcelainPresenter\n\tif *debugFlag == true {\n\t\tpresenter = status.DebugPresenter\n\t} else if *plumbingFlag == true {\n\t\tpresenter = status.PlumbingPresenter\n\t}\n\n\t\/\/ A map of repos that have been checked, to avoid doing same repo more than once.\n\tvar lock sync.Mutex\n\tcheckedRepos := map[string]bool{}\n\n\t\/\/ Input: Go package Import Path\n\t\/\/ Output: If a valid Go package and not part of standard library, output a status string, else nil.\n\treduceFunc := func(in string) interface{} {\n\t\tgoPackage := gist7480523.GoPackageFromPath(in, wd)\n\t\tif goPackage == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif goPackage.Standard {\n\t\t\treturn nil\n\t\t}\n\n\t\tgoPackage.UpdateVcs()\n\t\t\/\/ Check that the same repo hasn't already been done.\n\t\tif goPackage.Dir.Repo != nil {\n\t\t\trootPath := goPackage.Dir.Repo.Vcs.RootPath()\n\t\t\tlock.Lock()\n\t\t\tif !checkedRepos[rootPath] {\n\t\t\t\tcheckedRepos[rootPath] = true\n\t\t\t\tlock.Unlock()\n\t\t\t} else {\n\t\t\t\tlock.Unlock()\n\t\t\t\t\/\/ Skip repos that were done.\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tif goPackage.Dir.Repo != nil {\n\t\t\tgist7802150.MakeUpdated(goPackage.Dir.Repo.VcsLocal)\n\t\t\tgist7802150.MakeUpdated(goPackage.Dir.Repo.VcsRemote)\n\t\t}\n\n\t\tif shouldShow(goPackage) == false {\n\t\t\treturn nil\n\t\t}\n\t\treturn presenter(goPackage)\n\t}\n\n\t\/\/ Run reduceFunc on all import paths in parallel.\n\tvar outChan <-chan interface{}\n\tswitch *stdinFlag {\n\tcase false:\n\t\timportPathPatterns := flag.Args()\n\t\tif len(importPathPatterns) == 0 {\n\t\t\timportPathPatterns = []string{\".\"}\n\t\t}\n\t\timportPaths := gotool.ImportPaths(importPathPatterns)\n\t\toutChan = gist7651991.GoReduceLinesFromSlice(importPaths, numWorkers, reduceFunc)\n\tcase true:\n\t\toutChan = gist7651991.GoReduceLinesFromReader(os.Stdin, numWorkers, reduceFunc)\n\t}\n\n\t\/\/ Output results.\n\tfor out := range outChan {\n\t\tfmt.Println(out.(string))\n\t}\n}\n<commit_msg>Print \"can't load package\" message if invalid Go package is supplied.<commit_after>\/\/ A command line tool that shows the status of (many) Go packages.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"sync\"\n\n\t\"github.com\/kisielk\/gotool\"\n\t\"github.com\/shurcooL\/gostatus\/status\"\n\n\t\"github.com\/shurcooL\/go\/gists\/gist7480523\"\n\t\"github.com\/shurcooL\/go\/gists\/gist7651991\"\n\t\"github.com\/shurcooL\/go\/gists\/gist7802150\"\n)\n\nconst numWorkers = 8\n\nvar vFlag = flag.Bool(\"v\", false, \"Verbose output: show all Go packages, not just ones with notable status.\")\nvar stdinFlag = flag.Bool(\"stdin\", false, \"Read the list of newline separated Go packages from stdin.\")\nvar plumbingFlag = flag.Bool(\"plumbing\", false, \"Give the output in an easy-to-parse format for scripts.\")\nvar debugFlag = flag.Bool(\"debug\", false, \"Give the output with verbose debug information.\")\n\nfunc usage() {\n\tfmt.Fprint(os.Stderr, \"Usage: gostatus [flags] [packages]\\n\")\n\tfmt.Fprint(os.Stderr, \" [newline separated packages] | gostatus --stdin [flags]\\n\")\n\tflag.PrintDefaults()\n\tfmt.Fprint(os.Stderr, `\nExamples:\n # Show status of package in current directory, if notable.\n gostatus .\n\n # Show status of all packages with notable status.\n gostatus all\n\n # Show status of all dependencies (recursive) of package in cur working dir.\n go list -f '{{join .Deps \"\\n\"}}' . | gostatus --stdin -v\n\nLegend:\n ???? - Not under (recognized) version control\n b - Non-master branch checked out\n * - Uncommited changes in working dir\n + - Update available\n - - Local revision is ahead of remote (need to push?)\n ! - No remote\n # - Remote path doesn't match import path\n $ - Stash exists\n`)\n\tos.Exit(2)\n}\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tflag.Usage = usage\n\tflag.Parse()\n\n\t\/\/ Get current directory.\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tshouldShow := func(goPackage *gist7480523.GoPackage) bool {\n\t\t\/\/ Check for notable status.\n\t\treturn status.PorcelainPresenter(goPackage)[:4] != \" \"\n\t}\n\tif *vFlag == true {\n\t\tshouldShow = func(_ *gist7480523.GoPackage) bool { return true }\n\t}\n\n\tvar presenter gist7480523.GoPackageStringer = status.PorcelainPresenter\n\tif *debugFlag == true {\n\t\tpresenter = status.DebugPresenter\n\t} else if *plumbingFlag == true {\n\t\tpresenter = status.PlumbingPresenter\n\t}\n\n\t\/\/ A map of repos that have been checked, to avoid doing same repo more than once.\n\tvar lock sync.Mutex\n\tcheckedRepos := map[string]bool{}\n\n\t\/\/ Input: Go package Import Path\n\t\/\/ Output: If a valid Go package and not part of standard library, output a status string, else nil.\n\treduceFunc := func(in string) interface{} {\n\t\tgoPackage, err := gist7480523.GoPackageFromPath(in, wd)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"can't load package: %s\\n\", err)\n\t\t\treturn nil\n\t\t}\n\t\tif goPackage == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif goPackage.Standard {\n\t\t\treturn nil\n\t\t}\n\n\t\tgoPackage.UpdateVcs()\n\t\t\/\/ Check that the same repo hasn't already been done.\n\t\tif goPackage.Dir.Repo != nil {\n\t\t\trootPath := goPackage.Dir.Repo.Vcs.RootPath()\n\t\t\tlock.Lock()\n\t\t\tif !checkedRepos[rootPath] {\n\t\t\t\tcheckedRepos[rootPath] = true\n\t\t\t\tlock.Unlock()\n\t\t\t} else {\n\t\t\t\tlock.Unlock()\n\t\t\t\t\/\/ Skip repos that were done.\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tif goPackage.Dir.Repo != nil {\n\t\t\tgist7802150.MakeUpdated(goPackage.Dir.Repo.VcsLocal)\n\t\t\tgist7802150.MakeUpdated(goPackage.Dir.Repo.VcsRemote)\n\t\t}\n\n\t\tif shouldShow(goPackage) == false {\n\t\t\treturn nil\n\t\t}\n\t\treturn presenter(goPackage)\n\t}\n\n\t\/\/ Run reduceFunc on all import paths in parallel.\n\tvar outChan <-chan interface{}\n\tswitch *stdinFlag {\n\tcase false:\n\t\timportPathPatterns := flag.Args()\n\t\tif len(importPathPatterns) == 0 {\n\t\t\timportPathPatterns = []string{\".\"}\n\t\t}\n\t\timportPaths := gotool.ImportPaths(importPathPatterns)\n\t\toutChan = gist7651991.GoReduceLinesFromSlice(importPaths, numWorkers, reduceFunc)\n\tcase true:\n\t\toutChan = gist7651991.GoReduceLinesFromReader(os.Stdin, numWorkers, reduceFunc)\n\t}\n\n\t\/\/ Output results.\n\tfor out := range outChan {\n\t\tfmt.Println(out.(string))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/sha1\"\n\t\"database\/sql\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"html\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/dchest\/uniuri\"\n\t\"github.com\/ewhal\/pygments\"\n\t\"github.com\/gorilla\/mux\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n)\n\nconst (\n\tADDRESS = \"http:\/\/localhost:9900\"\n\tLENGTH = 6\n\tTEXT = \"$ <command> | curl -F 'p=<-' \" + ADDRESS + \"\\n\"\n\tPORT = \":9900\"\n)\n\ntype Response struct {\n\tID string `json:\"id\"`\n\tHASH string `json:\"hash\"`\n\tURL string `json:\"url\"`\n\tSIZE int `json:\"size\"`\n\tDELKEY string `json:\"delkey\"`\n}\n\nfunc check(err error) {\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\nfunc generateName() string {\n\ts := uniuri.NewLen(LENGTH)\n\tdb, err := sql.Open(\"sqlite3\", \".\/database.db\")\n\tcheck(err)\n\n\tquery, err := db.Query(\"select id from pastebin\")\n\tfor query.Next() {\n\t\tvar id string\n\t\terr := query.Scan(&id)\n\t\tif err != nil {\n\n\t\t}\n\t\tif id == s {\n\t\t\tgenerateName()\n\t\t}\n\t}\n\tdb.Close()\n\n\treturn s\n\n}\nfunc hash(paste []byte) string {\n\thasher := sha1.New()\n\thasher.Write(paste)\n\tsha := base64.URLEncoding.EncodeToString(hasher.Sum(nil))\n\treturn sha\n}\n\nfunc save(raw []byte) []string {\n\tp := raw[86 : len(raw)-46]\n\n\tdb, err := sql.Open(\"sqlite3\", \".\/database.db\")\n\tcheck(err)\n\n\tsha := hash(p)\n\tid := generateName()\n\turl := ADDRESS + \"\/p\/\" + id\n\tdelKey := uniuri.NewLen(40)\n\tpaste := html.EscapeString(string(p))\n\n\tstmt, err := db.Prepare(\"INSERT INTO pastebin(id, hash, data, delkey) values(?,?,?,?)\")\n\tcheck(err)\n\t_, err = stmt.Exec(id, sha, paste, delKey)\n\tcheck(err)\n\tdb.Close()\n\treturn []string{id, sha, url, paste, delKey}\n\n}\n\nfunc delHandler(w http.ResponseWriter, r *http.Request) {\n}\nfunc saveHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\toutput := vars[\"output\"]\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tbuf, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), 500)\n\t\t}\n\n\t\tvalues := save(buf)\n\n\t\tswitch output {\n\t\tcase \"json\":\n\t\t\tb := &Response{\n\t\t\t\tID: values[0],\n\t\t\t\tHASH: values[1],\n\t\t\t\tURL: values[2],\n\t\t\t\tSIZE: len(values[3]),\n\t\t\t\tDELKEY: values[4],\n\t\t\t}\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\t\terr := json.NewEncoder(w).Encode(b)\n\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\tdefault:\n\t\t\tio.WriteString(w, values[2]+\"\\n\")\n\t\t}\n\t}\n\n}\n\nfunc rootHandler(w http.ResponseWriter, r *http.Request) {\n\tio.WriteString(w, TEXT)\n}\nfunc langHandler(w http.ResponseWriter, r *http.Request) {\n\n\tvars := mux.Vars(r)\n\tpaste := vars[\"pasteId\"]\n\tlang := vars[\"lang\"]\n\ts := getPaste(paste)\n\thighlight := pygments.Highlight(html.UnescapeString(s), lang, \"html\", \"full, style=autumn,linenos=True, lineanchors=True,anchorlinenos=True,\", \"utf-8\")\n\tio.WriteString(w, highlight)\n\n}\n\nfunc getPaste(paste string) string {\n\tparam1 := html.EscapeString(paste)\n\tdb, err := sql.Open(\"sqlite3\", \".\/database.db\")\n\tvar s string\n\terr = db.QueryRow(\"select data from pastebin where id=?\", param1).Scan(&s)\n\tdb.Close()\n\tcheck(err)\n\n\tif err == sql.ErrNoRows {\n\t\treturn \"Error invalid paste\"\n\t} else {\n\t\treturn html.UnescapeString(s)\n\t}\n\n}\nfunc pasteHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tpaste := vars[\"pasteId\"]\n\ts := getPaste(paste)\n\tio.WriteString(w, s)\n\n}\n\nfunc main() {\n\trouter := mux.NewRouter().StrictSlash(true)\n\trouter.HandleFunc(\"\/\", rootHandler)\n\trouter.HandleFunc(\"\/p\/{pasteId}\", pasteHandler)\n\trouter.HandleFunc(\"\/p\/{pasteId}\/{lang}\", langHandler)\n\trouter.HandleFunc(\"\/save\", saveHandler)\n\trouter.HandleFunc(\"\/save\/{output}\", saveHandler)\n\trouter.HandleFunc(\"\/del\/{pasteId}\/{delKey}\", delHandler)\n\terr := http.ListenAndServe(PORT, router)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n}\n<commit_msg>Add xml response<commit_after>package main\n\nimport (\n\t\"crypto\/sha1\"\n\t\"database\/sql\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"html\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/dchest\/uniuri\"\n\t\"github.com\/ewhal\/pygments\"\n\t\"github.com\/gorilla\/mux\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n)\n\nconst (\n\tADDRESS = \"http:\/\/localhost:9900\"\n\tLENGTH = 6\n\tTEXT = \"$ <command> | curl -F 'p=<-' \" + ADDRESS + \"\\n\"\n\tPORT = \":9900\"\n)\n\ntype Response struct {\n\tID string `json:\"id\"`\n\tHASH string `json:\"hash\"`\n\tURL string `json:\"url\"`\n\tSIZE int `json:\"size\"`\n\tDELKEY string `json:\"delkey\"`\n}\n\nfunc check(err error) {\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\nfunc generateName() string {\n\ts := uniuri.NewLen(LENGTH)\n\tdb, err := sql.Open(\"sqlite3\", \".\/database.db\")\n\tcheck(err)\n\n\tquery, err := db.Query(\"select id from pastebin\")\n\tfor query.Next() {\n\t\tvar id string\n\t\terr := query.Scan(&id)\n\t\tif err != nil {\n\n\t\t}\n\t\tif id == s {\n\t\t\tgenerateName()\n\t\t}\n\t}\n\tdb.Close()\n\n\treturn s\n\n}\nfunc hash(paste []byte) string {\n\thasher := sha1.New()\n\thasher.Write(paste)\n\tsha := base64.URLEncoding.EncodeToString(hasher.Sum(nil))\n\treturn sha\n}\n\nfunc save(raw []byte) []string {\n\tp := raw[86 : len(raw)-46]\n\n\tdb, err := sql.Open(\"sqlite3\", \".\/database.db\")\n\tcheck(err)\n\n\tsha := hash(p)\n\tid := generateName()\n\turl := ADDRESS + \"\/p\/\" + id\n\tdelKey := uniuri.NewLen(40)\n\tpaste := html.EscapeString(string(p))\n\n\tstmt, err := db.Prepare(\"INSERT INTO pastebin(id, hash, data, delkey) values(?,?,?,?)\")\n\tcheck(err)\n\t_, err = stmt.Exec(id, sha, paste, delKey)\n\tcheck(err)\n\tdb.Close()\n\treturn []string{id, sha, url, paste, delKey}\n\n}\n\nfunc delHandler(w http.ResponseWriter, r *http.Request) {\n}\nfunc saveHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\toutput := vars[\"output\"]\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tbuf, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), 500)\n\t\t}\n\n\t\tvalues := save(buf)\n\t\tb := &Response{\n\t\t\tID: values[0],\n\t\t\tHASH: values[1],\n\t\t\tURL: values[2],\n\t\t\tSIZE: len(values[3]),\n\t\t\tDELKEY: values[4],\n\t\t}\n\n\t\tswitch output {\n\t\tcase \"json\":\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\t\terr := json.NewEncoder(w).Encode(b)\n\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase \"xml\":\n\t\t\tx, err := xml.MarshalIndent(b, \"\", \" \")\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/xml\")\n\t\t\tw.Write(x)\n\n\t\tdefault:\n\t\t\tio.WriteString(w, values[2]+\"\\n\")\n\t\t}\n\t}\n\n}\n\nfunc rootHandler(w http.ResponseWriter, r *http.Request) {\n\tio.WriteString(w, TEXT)\n}\nfunc langHandler(w http.ResponseWriter, r *http.Request) {\n\n\tvars := mux.Vars(r)\n\tpaste := vars[\"pasteId\"]\n\tlang := vars[\"lang\"]\n\ts := getPaste(paste)\n\thighlight := pygments.Highlight(html.UnescapeString(s), lang, \"html\", \"full, style=autumn,linenos=True, lineanchors=True,anchorlinenos=True,\", \"utf-8\")\n\tio.WriteString(w, highlight)\n\n}\n\nfunc getPaste(paste string) string {\n\tparam1 := html.EscapeString(paste)\n\tdb, err := sql.Open(\"sqlite3\", \".\/database.db\")\n\tvar s string\n\terr = db.QueryRow(\"select data from pastebin where id=?\", param1).Scan(&s)\n\tdb.Close()\n\tcheck(err)\n\n\tif err == sql.ErrNoRows {\n\t\treturn \"Error invalid paste\"\n\t} else {\n\t\treturn html.UnescapeString(s)\n\t}\n\n}\nfunc pasteHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tpaste := vars[\"pasteId\"]\n\ts := getPaste(paste)\n\tio.WriteString(w, s)\n\n}\n\nfunc main() {\n\trouter := mux.NewRouter().StrictSlash(true)\n\trouter.HandleFunc(\"\/\", rootHandler)\n\trouter.HandleFunc(\"\/p\/{pasteId}\", pasteHandler)\n\trouter.HandleFunc(\"\/p\/{pasteId}\/{lang}\", langHandler)\n\trouter.HandleFunc(\"\/save\", saveHandler)\n\trouter.HandleFunc(\"\/save\/{output}\", saveHandler)\n\trouter.HandleFunc(\"\/del\/{pasteId}\/{delKey}\", delHandler)\n\terr := http.ListenAndServe(PORT, router)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\n\t\"github.com\/dtan4\/ct2stimer\/crontab\"\n\t\"github.com\/dtan4\/ct2stimer\/systemd\"\n\tflag \"github.com\/spf13\/pflag\"\n)\n\nvar opts = struct {\n\tafter string\n\tfilename string\n\tnameRegexp string\n\toutdir string\n\treload bool\n}{}\n\nfunc parseArgs(args []string) error {\n\tf := flag.NewFlagSet(\"ct2stimer\", flag.ExitOnError)\n\n\tf.StringVar(&opts.filename, \"after\", \"\", \"unit dependencies (After=)\")\n\tf.StringVarP(&opts.filename, \"file\", \"f\", \"\", \"crontab file\")\n\tf.StringVar(&opts.nameRegexp, \"name-regexp\", \"\", \"regexp to extract scheduler name from crontab\")\n\tf.StringVarP(&opts.outdir, \"outdir\", \"o\", systemd.DefaultUnitsDirectory, \"directory to save systemd files\")\n\tf.BoolVar(&opts.reload, \"reload\", false, \"reload & start genreated timers\")\n\n\tf.Parse(args)\n\n\tif opts.filename == \"\" {\n\t\treturn fmt.Errorf(\"Please specify crontab file.\")\n\t}\n\n\tif opts.outdir == \"\" {\n\t\treturn fmt.Errorf(\"Please specify directory to save systemd files.\")\n\t}\n\n\tif _, err := os.Stat(opts.outdir); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn fmt.Errorf(\"%s: directory does not exist\", opts.outdir)\n\t\t}\n\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc reloadSystemd(timers []string) error {\n\tconn, err := systemd.NewConn()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\tdefer conn.Close()\n\n\tclient := systemd.NewClient(conn)\n\n\tif err := client.Reload(); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, timerUnit := range timers {\n\t\tif err := client.StartUnit(timerUnit); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc main() {\n\tif err := parseArgs(os.Args[1:]); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\n\tbody, err := ioutil.ReadFile(opts.filename)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\n\tschedules, err := crontab.Parse(string(body))\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\n\ttimers := []string{}\n\n\tfor _, schedule := range schedules {\n\t\tcalendar, err := schedule.ConvertToSystemdCalendar()\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tvar name string\n\n\t\tif opts.nameRegexp != \"\" {\n\t\t\tre, err := regexp.Compile(opts.nameRegexp)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\tname = schedule.NameByRegexp(re)\n\t\t}\n\n\t\tif name == \"\" {\n\t\t\tname = \"cron-\" + schedule.SHA256Sum()[0:12]\n\t\t}\n\n\t\tservice, err := systemd.GenerateService(name, schedule.Command, opts.after)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tservicePath := filepath.Join(opts.outdir, name+\".service\")\n\t\tif err := ioutil.WriteFile(servicePath, []byte(service), 0644); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\ttimer, err := systemd.GenerateTimer(name, calendar)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\ttimerPath := filepath.Join(opts.outdir, name+\".timer\")\n\t\tif err := ioutil.WriteFile(timerPath, []byte(timer), 0644); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\ttimers = append(timers, name+\".timer\")\n\t}\n\n\tif opts.reload {\n\t\tif err := reloadSystemd(timers); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n<commit_msg>Modify --after variable<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\n\t\"github.com\/dtan4\/ct2stimer\/crontab\"\n\t\"github.com\/dtan4\/ct2stimer\/systemd\"\n\tflag \"github.com\/spf13\/pflag\"\n)\n\nvar opts = struct {\n\tafter string\n\tfilename string\n\tnameRegexp string\n\toutdir string\n\treload bool\n}{}\n\nfunc parseArgs(args []string) error {\n\tf := flag.NewFlagSet(\"ct2stimer\", flag.ExitOnError)\n\n\tf.StringVar(&opts.after, \"after\", \"\", \"unit dependencies (After=)\")\n\tf.StringVarP(&opts.filename, \"file\", \"f\", \"\", \"crontab file\")\n\tf.StringVar(&opts.nameRegexp, \"name-regexp\", \"\", \"regexp to extract scheduler name from crontab\")\n\tf.StringVarP(&opts.outdir, \"outdir\", \"o\", systemd.DefaultUnitsDirectory, \"directory to save systemd files\")\n\tf.BoolVar(&opts.reload, \"reload\", false, \"reload & start genreated timers\")\n\n\tf.Parse(args)\n\n\tif opts.filename == \"\" {\n\t\treturn fmt.Errorf(\"Please specify crontab file.\")\n\t}\n\n\tif opts.outdir == \"\" {\n\t\treturn fmt.Errorf(\"Please specify directory to save systemd files.\")\n\t}\n\n\tif _, err := os.Stat(opts.outdir); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn fmt.Errorf(\"%s: directory does not exist\", opts.outdir)\n\t\t}\n\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc reloadSystemd(timers []string) error {\n\tconn, err := systemd.NewConn()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\tdefer conn.Close()\n\n\tclient := systemd.NewClient(conn)\n\n\tif err := client.Reload(); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, timerUnit := range timers {\n\t\tif err := client.StartUnit(timerUnit); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc main() {\n\tif err := parseArgs(os.Args[1:]); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\n\tbody, err := ioutil.ReadFile(opts.filename)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\n\tschedules, err := crontab.Parse(string(body))\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\n\ttimers := []string{}\n\n\tfor _, schedule := range schedules {\n\t\tcalendar, err := schedule.ConvertToSystemdCalendar()\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tvar name string\n\n\t\tif opts.nameRegexp != \"\" {\n\t\t\tre, err := regexp.Compile(opts.nameRegexp)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\tname = schedule.NameByRegexp(re)\n\t\t}\n\n\t\tif name == \"\" {\n\t\t\tname = \"cron-\" + schedule.SHA256Sum()[0:12]\n\t\t}\n\n\t\tservice, err := systemd.GenerateService(name, schedule.Command, opts.after)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tservicePath := filepath.Join(opts.outdir, name+\".service\")\n\t\tif err := ioutil.WriteFile(servicePath, []byte(service), 0644); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\ttimer, err := systemd.GenerateTimer(name, calendar)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\ttimerPath := filepath.Join(opts.outdir, name+\".timer\")\n\t\tif err := ioutil.WriteFile(timerPath, []byte(timer), 0644); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\ttimers = append(timers, name+\".timer\")\n\t}\n\n\tif opts.reload {\n\t\tif err := reloadSystemd(timers); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/tiltfactor\/simish\/domain\"\n\t\"github.com\/tiltfactor\/simish\/impl\"\n\t\"github.com\/tiltfactor\/simish\/test\"\n\t\"github.com\/urfave\/cli\"\n)\n\ntype inputData map[string]string\n\n\/\/ App holds the db structure. Used for dep injection.\ntype App struct {\n\tdb domain.InputOutputStore\n}\n\ntype response struct {\n\tInput string `json:\"input\"`\n\tResponse string `json:\"response\"`\n\tMatch string `json:\"match,omitempty\"`\n\tRoom string `json:\"room\"`\n\tScore float64 `json:\"score\"`\n\tAiCol int64 `json:\"aiCol\"`\n\tResultType int64 `json:\"resultType\"`\n}\n\nfunc (r response) String() string {\n\treturn fmt.Sprintf(\"Input: %s, Matched: %s, Score: %f, Response: %s, Room: %s\",\n\t\tr.Input, r.Match, r.Score, r.Response, r.Room)\n}\n\ntype rawData struct {\n\tText string `json:\"text\"`\n\tReply string `json:\"reply\"`\n}\n\n\/\/ ResponseHandler handles the user request for a input output pair\nfunc (a App) ResponseHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := r.URL.Query()\n\tinput := vars[\"input\"]\n\troom := vars[\"room\"]\n\n\tif len(room) < 1 {\n\t\thttp.Error(w, \"Must provide room number\", 400)\n\t\treturn\n\t}\n\n\troomNumber, err := strconv.ParseInt(room[0], 10, 64)\n\tif err != nil {\n\t\thttp.Error(w, \"Room number must be an int\", 400)\n\t\treturn\n\t}\n\n\tio, score := a.db.Response(input[0], roomNumber)\n\n\tresp := response{\n\t\tInput: input[0],\n\t\tResponse: io.Output,\n\t\tMatch: io.Input,\n\t\tRoom: room[0],\n\t\tScore: score,\n\t\tAiCol: io.AiCol,\n\t\tResultType: io.ResultType,\n\t}\n\n\tlog.Println(resp)\n\n\tdata, err := json.Marshal(resp)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(data)\n}\n\n\/\/ DBConfig is used to import the db_cfg.json file\ntype DBConfig struct {\n\tUser string `json:\"username\"`\n\tPass string `json:\"password\"`\n\tIP string `json:\"ip_addr\"`\n\tPort string `json:\"db_port\"`\n\tDB string `json:\"database\"`\n\tServerPort string `json:\"server_port\"`\n}\n\nfunc (cfg *DBConfig) connectionString() string {\n\tconnStr := fmt.Sprintf(\"%s:%s@tcp(%s:%s)\/%s\", cfg.User, cfg.Pass, cfg.IP, cfg.Port, cfg.DB)\n\tlog.Println(connStr)\n\treturn connStr\n}\n\nfunc startSimish(c *cli.Context) error {\n\tcfgFile, err := ioutil.ReadFile(\".\/db_cfg.json\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tcfg := &DBConfig{}\n\tif err := json.Unmarshal(cfgFile, cfg); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tstore, err := impl.NewSQLStore(cfg.connectionString())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tapp := App{db: store}\n\n\tr := mux.NewRouter()\n\n\t\/\/ Routes consist of a path and a handler function.\n\tr.HandleFunc(\"\/api\/v1\/response\", app.ResponseHandler)\n\n\t\/\/ Bind to a port and pass our router in\n\tlog.Println(\"Running on port:\", cfg.ServerPort)\n\n\tlogFile, err := os.OpenFile(\"logs\", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n\tif err != nil {\n\t\tlog.Fatal(\"Could not open log file\")\n\t}\n\tdefer logFile.Close()\n\tlog.SetOutput(logFile)\n\tif err := http.ListenAndServe(\":\"+cfg.ServerPort, r); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn nil\n}\n\nfunc readInput(prompt string) string {\n\tscan := bufio.NewScanner(os.Stdin)\n\tfmt.Print(prompt + \" \")\n\tscan.Scan()\n\treturn scan.Text()\n}\n\nfunc createCfg(c *cli.Context) error {\n\n\tfmt.Println(\"Follow along to create a db_cfg.\")\n\tfmt.Println(\"Warning: This will overwrite existing db_cfg.json file\")\n\n\tf, err := os.Create(\".\/db_cfg.json\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\n\tserverPort := readInput(\"Port to run Simish on:\")\n\tuser := readInput(\"MySQL database username:\")\n\tpass := readInput(\"MySQL database password:\")\n\tip := readInput(\"MySQL database IP address:\")\n\tport := readInput(\"MySQL database port:\")\n\tdb := readInput(\"MySQL database name:\")\n\n\tcfg := DBConfig{\n\t\tServerPort: serverPort,\n\t\tUser: user,\n\t\tPass: pass,\n\t\tIP: ip,\n\t\tDB: db,\n\t\tPort: port,\n\t}\n\n\tcm, err := json.MarshalIndent(&cfg, \"\", \" \")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println()\n\tfmt.Println(string(cm))\n\tw := bufio.NewWriter(f)\n\tif _, err := w.Write(cm); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tw.Flush()\n\tfmt.Println(\"Successfully created db_cfg.json\")\n\n\treturn nil\n}\n\nfunc runTest(c *cli.Context) {\n\tcfgFile, err := ioutil.ReadFile(\".\/db_cfg.json\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tcfg := &DBConfig{}\n\tif err := json.Unmarshal(cfgFile, cfg); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tstore, err := impl.NewSQLStore(cfg.connectionString())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tpairs := store.GetAllPairs(1)\n\targs := c.Args()\n\tif len(args) > 2 {\n\t\tlog.Fatal(\"Too many arguments\")\n\t}\n\ttest.RunSoftMatch(args, pairs)\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"Simish\"\n\tapp.Usage = \"Soft Matching Algorithm as a service\"\n\tapp.Version = \"0.4.0\"\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"init\",\n\t\t\tUsage: \"Create db_cfg.json file\",\n\t\t\tAction: createCfg,\n\t\t},\n\t\t{\n\t\t\tName: \"start\",\n\t\t\tUsage: \"Start the simish server\",\n\t\t\tAction: startSimish,\n\t\t},\n\t\t{\n\t\t\tName: \"test\",\n\t\t\tUsage: \"Test the softmatch algorithm\",\n\t\t\tAction: runTest,\n\t\t},\n\t}\n\tapp.Run(os.Args)\n}\n<commit_msg>Bump simish version<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/tiltfactor\/simish\/domain\"\n\t\"github.com\/tiltfactor\/simish\/impl\"\n\t\"github.com\/tiltfactor\/simish\/test\"\n\t\"github.com\/urfave\/cli\"\n)\n\ntype inputData map[string]string\n\n\/\/ App holds the db structure. Used for dep injection.\ntype App struct {\n\tdb domain.InputOutputStore\n}\n\ntype response struct {\n\tInput string `json:\"input\"`\n\tResponse string `json:\"response\"`\n\tMatch string `json:\"match,omitempty\"`\n\tRoom string `json:\"room\"`\n\tScore float64 `json:\"score\"`\n\tAiCol int64 `json:\"aiCol\"`\n\tResultType int64 `json:\"resultType\"`\n}\n\nfunc (r response) String() string {\n\treturn fmt.Sprintf(\"Input: %s, Matched: %s, Score: %f, Response: %s, Room: %s\",\n\t\tr.Input, r.Match, r.Score, r.Response, r.Room)\n}\n\ntype rawData struct {\n\tText string `json:\"text\"`\n\tReply string `json:\"reply\"`\n}\n\n\/\/ ResponseHandler handles the user request for a input output pair\nfunc (a App) ResponseHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := r.URL.Query()\n\tinput := vars[\"input\"]\n\troom := vars[\"room\"]\n\n\tif len(room) < 1 {\n\t\thttp.Error(w, \"Must provide room number\", 400)\n\t\treturn\n\t}\n\n\troomNumber, err := strconv.ParseInt(room[0], 10, 64)\n\tif err != nil {\n\t\thttp.Error(w, \"Room number must be an int\", 400)\n\t\treturn\n\t}\n\n\tio, score := a.db.Response(input[0], roomNumber)\n\n\tresp := response{\n\t\tInput: input[0],\n\t\tResponse: io.Output,\n\t\tMatch: io.Input,\n\t\tRoom: room[0],\n\t\tScore: score,\n\t\tAiCol: io.AiCol,\n\t\tResultType: io.ResultType,\n\t}\n\n\tlog.Println(resp)\n\n\tdata, err := json.Marshal(resp)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(data)\n}\n\n\/\/ DBConfig is used to import the db_cfg.json file\ntype DBConfig struct {\n\tUser string `json:\"username\"`\n\tPass string `json:\"password\"`\n\tIP string `json:\"ip_addr\"`\n\tPort string `json:\"db_port\"`\n\tDB string `json:\"database\"`\n\tServerPort string `json:\"server_port\"`\n}\n\nfunc (cfg *DBConfig) connectionString() string {\n\tconnStr := fmt.Sprintf(\"%s:%s@tcp(%s:%s)\/%s\", cfg.User, cfg.Pass, cfg.IP, cfg.Port, cfg.DB)\n\tlog.Println(connStr)\n\treturn connStr\n}\n\nfunc startSimish(c *cli.Context) error {\n\tcfgFile, err := ioutil.ReadFile(\".\/db_cfg.json\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tcfg := &DBConfig{}\n\tif err := json.Unmarshal(cfgFile, cfg); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tstore, err := impl.NewSQLStore(cfg.connectionString())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tapp := App{db: store}\n\n\tr := mux.NewRouter()\n\n\t\/\/ Routes consist of a path and a handler function.\n\tr.HandleFunc(\"\/api\/v1\/response\", app.ResponseHandler)\n\n\t\/\/ Bind to a port and pass our router in\n\tlog.Println(\"Running on port:\", cfg.ServerPort)\n\n\tlogFile, err := os.OpenFile(\"logs\", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n\tif err != nil {\n\t\tlog.Fatal(\"Could not open log file\")\n\t}\n\tdefer logFile.Close()\n\tlog.SetOutput(logFile)\n\tif err := http.ListenAndServe(\":\"+cfg.ServerPort, r); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn nil\n}\n\nfunc readInput(prompt string) string {\n\tscan := bufio.NewScanner(os.Stdin)\n\tfmt.Print(prompt + \" \")\n\tscan.Scan()\n\treturn scan.Text()\n}\n\nfunc createCfg(c *cli.Context) error {\n\n\tfmt.Println(\"Follow along to create a db_cfg.\")\n\tfmt.Println(\"Warning: This will overwrite existing db_cfg.json file\")\n\n\tf, err := os.Create(\".\/db_cfg.json\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\n\tserverPort := readInput(\"Port to run Simish on:\")\n\tuser := readInput(\"MySQL database username:\")\n\tpass := readInput(\"MySQL database password:\")\n\tip := readInput(\"MySQL database IP address:\")\n\tport := readInput(\"MySQL database port:\")\n\tdb := readInput(\"MySQL database name:\")\n\n\tcfg := DBConfig{\n\t\tServerPort: serverPort,\n\t\tUser: user,\n\t\tPass: pass,\n\t\tIP: ip,\n\t\tDB: db,\n\t\tPort: port,\n\t}\n\n\tcm, err := json.MarshalIndent(&cfg, \"\", \" \")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println()\n\tfmt.Println(string(cm))\n\tw := bufio.NewWriter(f)\n\tif _, err := w.Write(cm); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tw.Flush()\n\tfmt.Println(\"Successfully created db_cfg.json\")\n\n\treturn nil\n}\n\nfunc runTest(c *cli.Context) {\n\tcfgFile, err := ioutil.ReadFile(\".\/db_cfg.json\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tcfg := &DBConfig{}\n\tif err := json.Unmarshal(cfgFile, cfg); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tstore, err := impl.NewSQLStore(cfg.connectionString())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tpairs := store.GetAllPairs(1)\n\targs := c.Args()\n\tif len(args) > 2 {\n\t\tlog.Fatal(\"Too many arguments\")\n\t}\n\ttest.RunSoftMatch(args, pairs)\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"Simish\"\n\tapp.Usage = \"Soft Matching Algorithm as a service\"\n\tapp.Version = \"0.5.0\"\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"init\",\n\t\t\tUsage: \"Create db_cfg.json file\",\n\t\t\tAction: createCfg,\n\t\t},\n\t\t{\n\t\t\tName: \"start\",\n\t\t\tUsage: \"Start the simish server\",\n\t\t\tAction: startSimish,\n\t\t},\n\t\t{\n\t\t\tName: \"test\",\n\t\t\tUsage: \"Test the softmatch algorithm\",\n\t\t\tAction: runTest,\n\t\t},\n\t}\n\tapp.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/jessevdk\/go-flags\"\n\t\"labix.org\/v2\/mgo\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"log\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\texpectingVersion = \"1.1.1 DISABLED\" \/\/ number being expected. must be changed manually (for now).\n\tchangeURLBase = \"https:\/\/code.google.com\/p\/go\/source\/detail?r=go\" \/\/ base url to poll the tag\n\tupdateInterval = 6 * time.Second \/\/ Update interval for the expected number\n)\n\nvar defaultPage = \"http:\/\/isgo1point3.outyet.org\"\n\nvar (\n\tversions = make(map[string]*version) \/\/ map with all versions by number(string)\n\tversionsLock sync.RWMutex \/\/ map lock\n)\n\nvar regexpNumber = regexp.MustCompile(`^[1-9](?:\\.[0-9]){0,2}$`)\n\nvar colVersions *mgo.Collection\nvar colNV *mgo.Collection\n\nvar options struct {\n\tListen string `short:\"l\" long:\"listen\" default:\"141.138.139.6:80\" description:\"IP:post to listen on\"`\n}\n\nfunc main() {\n\targs, err := flags.Parse(&options)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tif len(args) > 0 {\n\t\tlog.Fatalln(\"Unexpected arguments.\")\n\t}\n\n\tmgoSess, err := mgo.Dial(\"localhost\")\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tcolVersions = mgoSess.DB(\"outyet\").C(\"versions\")\n\tcolVersions.EnsureIndex(mgo.Index{\n\t\tKey: []string{\"number\"},\n\t\tUnique: true,\n\t\tDropDups: true,\n\t})\n\tcolNV = mgoSess.DB(\"outyet\").C(\"namevalue\")\n\tcolNV.EnsureIndex(mgo.Index{\n\t\tKey: []string{\"name\"},\n\t\tUnique: true,\n\t\tDropDups: true,\n\t})\n\n\tif err := http.ListenAndServe(options.Listen, http.HandlerFunc(rootHandler)); err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}\n\nfunc rootHandler(w http.ResponseWriter, r *http.Request) {\n\t\/\/ handler for stats page\n\tif r.Host == \"stats.outyet.org\" {\n\t\tstatsHandler(w, r)\n\t\treturn\n\t}\n\n\t\/\/ redirect for 'old' domain\n\tif r.Host == \"isgo1point2outyet.com\" {\n\t\thttp.Redirect(w, r, \"http:\/\/isgo1point2.outyet.org\", http.StatusTemporaryRedirect)\n\t\treturn\n\t}\n\n\t\/\/ only handle requests on \/\n\tif r.RequestURI != \"\/\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\t\/\/ check if Host header matches isgo*.outyet.org\n\tif !strings.HasSuffix(r.Host, \".outyet.org\") || !strings.HasPrefix(r.Host, \"isgo\") {\n\t\tlog.Printf(\"Invalid host format detected. %s\\n\", r.Host)\n\t\thttp.Redirect(w, r, defaultPage, http.StatusTemporaryRedirect)\n\t\treturn\n\t}\n\n\tnumber := strings.Replace(r.Host[4:len(r.Host)-11], \"point\", \".\", -1)\n\tlog.Println(number)\n\n\tif !regexpNumber.MatchString(number) {\n\t\thttp.Error(w, \"invalid request format\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t\/\/ get right version in a safe way\n\to := getVersion(number)\n\n\t\/\/ add hitCount's\n\tcolVersions.Upsert(bson.M{\"number\": o.number}, bson.M{\"$inc\": bson.M{\"hits\": 1}})\n\tcolNV.Upsert(bson.M{\"name\": \"counts\"}, bson.M{\"$inc\": bson.M{\"hits\": 1}})\n\n\t\/\/ execute template\n\tdata := dataOutyet{\n\t\tOutyet: <-o.isOutyetChan, \/\/retrieve outyet directly from channel\n\t\tNumber: number,\n\t}\n\terr := tmplOutyet.Execute(w, data)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n}\n\nfunc statsHandler(w http.ResponseWriter, r *http.Request) {\n\tdata := &dataStats{}\n\n\tcolNV.Find(bson.M{\"name\": \"counts\"}).One(data)\n\tcolVersions.Find(nil).Sort(\"number\").All(&data.Versions)\n\n\tfor _, v := range data.Versions {\n\t\t\/\/ get outyet for given version number\n\t\tv.Outyet = <-getVersion(v.Number).isOutyetChan\n\n\t\t\/\/ add hitCount's\n\t\tcolVersions.Upsert(bson.M{\"number\": v.Number}, bson.M{\"$inc\": bson.M{\"hits\": 1}})\n\t}\n\n\terr := tmplStats.Execute(w, data)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n}\n<commit_msg>Redirect x.0 to x<commit_after>package main\n\nimport (\n\t\"github.com\/jessevdk\/go-flags\"\n\t\"labix.org\/v2\/mgo\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"log\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\texpectingVersion = \"1.1.1 DISABLED\" \/\/ number being expected. must be changed manually (for now).\n\tchangeURLBase = \"https:\/\/code.google.com\/p\/go\/source\/detail?r=go\" \/\/ base url to poll the tag\n\tupdateInterval = 6 * time.Second \/\/ Update interval for the expected number\n)\n\nvar defaultPage = \"http:\/\/isgo1point3.outyet.org\"\n\nvar (\n\tversions = make(map[string]*version) \/\/ map with all versions by number(string)\n\tversionsLock sync.RWMutex \/\/ map lock\n)\n\nvar regexpNumber = regexp.MustCompile(`^[1-9](?:\\.[0-9]){0,2}$`)\n\nvar colVersions *mgo.Collection\nvar colNV *mgo.Collection\n\nvar options struct {\n\tListen string `short:\"l\" long:\"listen\" default:\"141.138.139.6:80\" description:\"IP:post to listen on\"`\n}\n\nfunc main() {\n\targs, err := flags.Parse(&options)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tif len(args) > 0 {\n\t\tlog.Fatalln(\"Unexpected arguments.\")\n\t}\n\n\tmgoSess, err := mgo.Dial(\"localhost\")\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tcolVersions = mgoSess.DB(\"outyet\").C(\"versions\")\n\tcolVersions.EnsureIndex(mgo.Index{\n\t\tKey: []string{\"number\"},\n\t\tUnique: true,\n\t\tDropDups: true,\n\t})\n\tcolNV = mgoSess.DB(\"outyet\").C(\"namevalue\")\n\tcolNV.EnsureIndex(mgo.Index{\n\t\tKey: []string{\"name\"},\n\t\tUnique: true,\n\t\tDropDups: true,\n\t})\n\n\tif err := http.ListenAndServe(options.Listen, http.HandlerFunc(rootHandler)); err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}\n\nfunc rootHandler(w http.ResponseWriter, r *http.Request) {\n\t\/\/ handler for stats page\n\tif r.Host == \"stats.outyet.org\" {\n\t\tstatsHandler(w, r)\n\t\treturn\n\t}\n\n\t\/\/ redirect for 'old' domain\n\tif r.Host == \"isgo1point2outyet.com\" {\n\t\thttp.Redirect(w, r, \"http:\/\/isgo1point2.outyet.org\", http.StatusTemporaryRedirect)\n\t\treturn\n\t}\n\n\t\/\/ only handle requests on \/\n\tif r.RequestURI != \"\/\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\t\/\/ check if Host header matches isgo*.outyet.org\n\tif !strings.HasSuffix(r.Host, \".outyet.org\") || !strings.HasPrefix(r.Host, \"isgo\") {\n\t\tlog.Printf(\"Invalid host format detected. %s\\n\", r.Host)\n\t\thttp.Redirect(w, r, defaultPage, http.StatusTemporaryRedirect)\n\t\treturn\n\t}\n\n\tnumber := strings.Replace(r.Host[4:len(r.Host)-11], \"point\", \".\", -1)\n\tlog.Println(number)\n\n\tif !regexpNumber.MatchString(number) {\n\t\thttp.Error(w, \"invalid request format\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif strings.HasSuffix(number, \".0\") {\n\t\tnumber = number[:len(number)-2]\n\t\tif len(number) > 0 {\n\t\t\thttp.Redirect(w, r, \"http:\/\/isgo\"+strings.Replace(number, \".\", \"point\", -1)+\".outyet.org\", code)\n\t\t\treturn\n\t\t}\n\t\thttp.Redirect(w, r, defaultPage, http.StatusTemporaryRedirect)\n\t\treturn\n\t}\n\n\t\/\/ get right version in a safe way\n\to := getVersion(number)\n\n\t\/\/ add hitCount's\n\tcolVersions.Upsert(bson.M{\"number\": o.number}, bson.M{\"$inc\": bson.M{\"hits\": 1}})\n\tcolNV.Upsert(bson.M{\"name\": \"counts\"}, bson.M{\"$inc\": bson.M{\"hits\": 1}})\n\n\t\/\/ execute template\n\tdata := dataOutyet{\n\t\tOutyet: <-o.isOutyetChan, \/\/retrieve outyet directly from channel\n\t\tNumber: number,\n\t}\n\terr := tmplOutyet.Execute(w, data)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n}\n\nfunc statsHandler(w http.ResponseWriter, r *http.Request) {\n\tdata := &dataStats{}\n\n\tcolNV.Find(bson.M{\"name\": \"counts\"}).One(data)\n\tcolVersions.Find(nil).Sort(\"number\").All(&data.Versions)\n\n\tfor _, v := range data.Versions {\n\t\t\/\/ get outyet for given version number\n\t\tv.Outyet = <-getVersion(v.Number).isOutyetChan\n\n\t\t\/\/ add hitCount's\n\t\tcolVersions.Upsert(bson.M{\"number\": v.Number}, bson.M{\"$inc\": bson.M{\"hits\": 1}})\n\t}\n\n\terr := tmplStats.Execute(w, data)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\n\tWardenClient \"github.com\/cloudfoundry-incubator\/garden\/client\"\n\tWardenConnection \"github.com\/cloudfoundry-incubator\/garden\/client\/connection\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/grouper\"\n\t\"github.com\/tedsuo\/ifrit\/http_server\"\n\t\"github.com\/tedsuo\/ifrit\/sigmon\"\n\t\"github.com\/tedsuo\/router\"\n\n\t\"github.com\/winston-ci\/prole\/api\"\n\t\"github.com\/winston-ci\/prole\/builder\"\n\t\"github.com\/winston-ci\/prole\/config\"\n\t\"github.com\/winston-ci\/prole\/resource\"\n\t\"github.com\/winston-ci\/prole\/routes\"\n\t\"github.com\/winston-ci\/prole\/scheduler\"\n\t\"github.com\/winston-ci\/prole\/snapshotter\"\n)\n\nvar listenAddr = flag.String(\n\t\"listenAddr\",\n\t\"0.0.0.0:4637\",\n\t\"listening address\",\n)\n\nvar peerAddr = flag.String(\n\t\"peerAddr\",\n\t\"127.0.0.1:4637\",\n\t\"external address of the api server, used for callbacks\",\n)\n\nvar wardenNetwork = flag.String(\n\t\"wardenNetwork\",\n\t\"unix\",\n\t\"warden API connection network (unix or tcp)\",\n)\n\nvar wardenAddr = flag.String(\n\t\"wardenAddr\",\n\t\"\/tmp\/warden.sock\",\n\t\"warden API connection address\",\n)\n\nvar resourceTypes = flag.String(\n\t\"resourceTypes\",\n\t`{\"git\":\"concourse\/git-resource\",\"raw\":\"concourse\/raw-resource\",\"docker-image\":\"concourse\/docker-image-resource\"}`,\n\t\"map of resource type to its docker image\",\n)\n\nvar snapshotPath = flag.String(\n\t\"snapshotPath\",\n\t\"\/tmp\/prole.json\",\n\t\"path to file to store\/load snapshots from\",\n)\n\nfunc main() {\n\tflag.Parse()\n\n\twardenClient := WardenClient.New(&WardenConnection.Info{\n\t\tNetwork: *wardenNetwork,\n\t\tAddr: *wardenAddr,\n\t})\n\n\tresourceTypesMap := map[string]string{}\n\terr := json.Unmarshal([]byte(*resourceTypes), &resourceTypesMap)\n\tif err != nil {\n\t\tlog.Fatalln(\"failed to parse resource types:\", err)\n\t}\n\n\tvar resourceTypesConfig config.ResourceTypes\n\tfor typ, image := range resourceTypesMap {\n\t\tresourceTypesConfig = append(resourceTypesConfig, config.ResourceType{\n\t\t\tName: typ,\n\t\t\tImage: image,\n\t\t})\n\t}\n\n\tresourceTracker := resource.NewTracker(resourceTypesConfig, wardenClient)\n\n\tbuilder := builder.NewBuilder(resourceTracker, wardenClient)\n\n\tscheduler := scheduler.NewScheduler(builder)\n\n\tgenerator := router.NewRequestGenerator(\"http:\/\/\"+*peerAddr, routes.Routes)\n\n\tdrain := make(chan struct{})\n\n\thandler, err := api.New(scheduler, resourceTracker, generator, drain)\n\tif err != nil {\n\t\tlog.Fatalln(\"failed to initialize handler:\", err)\n\t}\n\n\tgroup := grouper.EnvokeGroup(grouper.RunGroup{\n\t\t\"api\": http_server.New(*listenAddr, handler),\n\t\t\"snapshotter\": snapshotter.NewSnapshotter(*snapshotPath, scheduler),\n\t\t\"drainer\": ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {\n\t\t\tclose(ready)\n\t\t\t<-signals\n\t\t\tclose(drain)\n\t\t\treturn nil\n\t\t}),\n\t})\n\n\trunning := ifrit.Envoke(sigmon.New(group))\n\n\tlog.Println(\"serving api on\", *listenAddr)\n\n\terr = <-running.Wait()\n\tif err != nil {\n\t\tlog.Println(\"exited with error:\", err)\n\t\tos.Exit(1)\n\t}\n\n\tlog.Println(\"exited\")\n}\n<commit_msg>add time resource to defaults<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\n\tWardenClient \"github.com\/cloudfoundry-incubator\/garden\/client\"\n\tWardenConnection \"github.com\/cloudfoundry-incubator\/garden\/client\/connection\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/grouper\"\n\t\"github.com\/tedsuo\/ifrit\/http_server\"\n\t\"github.com\/tedsuo\/ifrit\/sigmon\"\n\t\"github.com\/tedsuo\/router\"\n\n\t\"github.com\/winston-ci\/prole\/api\"\n\t\"github.com\/winston-ci\/prole\/builder\"\n\t\"github.com\/winston-ci\/prole\/config\"\n\t\"github.com\/winston-ci\/prole\/resource\"\n\t\"github.com\/winston-ci\/prole\/routes\"\n\t\"github.com\/winston-ci\/prole\/scheduler\"\n\t\"github.com\/winston-ci\/prole\/snapshotter\"\n)\n\nvar listenAddr = flag.String(\n\t\"listenAddr\",\n\t\"0.0.0.0:4637\",\n\t\"listening address\",\n)\n\nvar peerAddr = flag.String(\n\t\"peerAddr\",\n\t\"127.0.0.1:4637\",\n\t\"external address of the api server, used for callbacks\",\n)\n\nvar wardenNetwork = flag.String(\n\t\"wardenNetwork\",\n\t\"unix\",\n\t\"warden API connection network (unix or tcp)\",\n)\n\nvar wardenAddr = flag.String(\n\t\"wardenAddr\",\n\t\"\/tmp\/warden.sock\",\n\t\"warden API connection address\",\n)\n\nvar resourceTypes = flag.String(\n\t\"resourceTypes\",\n\t`{\"git\":\"concourse\/git-resource\",\"raw\":\"concourse\/raw-resource\",\"docker-image\":\"concourse\/docker-image-resource\",\"time\":\"concourse\/time-resource\"}`,\n\t\"map of resource type to its docker image\",\n)\n\nvar snapshotPath = flag.String(\n\t\"snapshotPath\",\n\t\"\/tmp\/prole.json\",\n\t\"path to file to store\/load snapshots from\",\n)\n\nfunc main() {\n\tflag.Parse()\n\n\twardenClient := WardenClient.New(&WardenConnection.Info{\n\t\tNetwork: *wardenNetwork,\n\t\tAddr: *wardenAddr,\n\t})\n\n\tresourceTypesMap := map[string]string{}\n\terr := json.Unmarshal([]byte(*resourceTypes), &resourceTypesMap)\n\tif err != nil {\n\t\tlog.Fatalln(\"failed to parse resource types:\", err)\n\t}\n\n\tvar resourceTypesConfig config.ResourceTypes\n\tfor typ, image := range resourceTypesMap {\n\t\tresourceTypesConfig = append(resourceTypesConfig, config.ResourceType{\n\t\t\tName: typ,\n\t\t\tImage: image,\n\t\t})\n\t}\n\n\tresourceTracker := resource.NewTracker(resourceTypesConfig, wardenClient)\n\n\tbuilder := builder.NewBuilder(resourceTracker, wardenClient)\n\n\tscheduler := scheduler.NewScheduler(builder)\n\n\tgenerator := router.NewRequestGenerator(\"http:\/\/\"+*peerAddr, routes.Routes)\n\n\tdrain := make(chan struct{})\n\n\thandler, err := api.New(scheduler, resourceTracker, generator, drain)\n\tif err != nil {\n\t\tlog.Fatalln(\"failed to initialize handler:\", err)\n\t}\n\n\tgroup := grouper.EnvokeGroup(grouper.RunGroup{\n\t\t\"api\": http_server.New(*listenAddr, handler),\n\t\t\"snapshotter\": snapshotter.NewSnapshotter(*snapshotPath, scheduler),\n\t\t\"drainer\": ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {\n\t\t\tclose(ready)\n\t\t\t<-signals\n\t\t\tclose(drain)\n\t\t\treturn nil\n\t\t}),\n\t})\n\n\trunning := ifrit.Envoke(sigmon.New(group))\n\n\tlog.Println(\"serving api on\", *listenAddr)\n\n\terr = <-running.Wait()\n\tif err != nil {\n\t\tlog.Println(\"exited with error:\", err)\n\t\tos.Exit(1)\n\t}\n\n\tlog.Println(\"exited\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\"\n\t\"net\/http\"\n\t\"time\"\n\t\"unsafe\"\n\n\t\"github.com\/robvanmieghem\/go-opencl\/cl\"\n)\n\nvar kernelSource = `\nstatic inline ulong rotr64( __const ulong w, __const unsigned c ) { return ( w >> c ) | ( w << ( 64 - c ) ); }\n\n__constant static const uchar blake2b_sigma[12][16] = {\n\t{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } ,\n\t{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } ,\n\t{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } ,\n\t{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } ,\n\t{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 } ,\n\t{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } ,\n\t{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 } ,\n\t{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 } ,\n\t{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 } ,\n\t{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 } ,\n\t{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } ,\n\t{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } };\n\n\/\/ Target is passed in via headerIn[32 - 29]\n__kernel void nonceGrind(__global ulong *headerIn, __global ulong *nonceOut) {\n\tulong target = headerIn[4];\n\tulong m[16] = {\theaderIn[0], headerIn[1],\n\t headerIn[2], headerIn[3],\n\t (ulong)get_global_id(0), headerIn[5],\n\t headerIn[6], headerIn[7],\n\t headerIn[8], headerIn[9], 0, 0, 0, 0, 0, 0 };\n\n\tulong v[16] = { 0x6a09e667f2bdc928, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,\n\t 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,\n\t 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,\n\t 0x510e527fade68281, 0x9b05688c2b3e6c1f, 0xe07c265404be4294, 0x5be0cd19137e2179 };\n\n\n\n#define G(r,i,a,b,c,d) \\\n\ta = a + b + m[blake2b_sigma[r][2*i]]; \\\n\td = rotr64(d ^ a, 32); \\\n\tc = c + d; \\\n\tb = rotr64(b ^ c, 24); \\\n\ta = a + b + m[blake2b_sigma[r][2*i+1]]; \\\n\td = rotr64(d ^ a, 16); \\\n\tc = c + d; \\\n\tb = rotr64(b ^ c, 63);\n\n#define ROUND(r) \\\n\tG(r,0,v[ 0],v[ 4],v[ 8],v[12]); \\\n\tG(r,1,v[ 1],v[ 5],v[ 9],v[13]); \\\n\tG(r,2,v[ 2],v[ 6],v[10],v[14]); \\\n\tG(r,3,v[ 3],v[ 7],v[11],v[15]); \\\n\tG(r,4,v[ 0],v[ 5],v[10],v[15]); \\\n\tG(r,5,v[ 1],v[ 6],v[11],v[12]); \\\n\tG(r,6,v[ 2],v[ 7],v[ 8],v[13]); \\\n\tG(r,7,v[ 3],v[ 4],v[ 9],v[14]);\n\n\tROUND( 0 );\n\tROUND( 1 );\n\tROUND( 2 );\n\tROUND( 3 );\n\tROUND( 4 );\n\tROUND( 5 );\n\tROUND( 6 );\n\tROUND( 7 );\n\tROUND( 8 );\n\tROUND( 9 );\n\tROUND( 10 );\n\tROUND( 11 );\n#undef G\n#undef ROUND\n\n\tif (as_ulong(as_uchar8(0x6a09e667f2bdc928 ^ v[0] ^ v[8]).s76543210) < target) {\n\t\t*nonceOut = m[4];\n\t\treturn;\n\t}\n}\n`\n\nvar getworkurl = \"http:\/\/localhost:9980\/miner\/headerforwork\"\nvar submitblockurl = \"http:\/\/localhost:9980\/miner\/submitheader\"\nvar intensity = 10\nvar localItemSize = 8\n\nfunc loadCLProgramSource() (sources []string) {\n\tfilename := \"sia-gpu-miner.cl\"\n\n\tbuf, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tsources = []string{string(buf)}\n\treturn\n}\n\nfunc getHeaderForWork() (target, header []byte, err error) {\n\tresp, err := http.Get(getworkurl)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tbuf := make([]byte, 113)\n\tn, err := resp.Body.Read(buf)\n\tif err != nil && err != io.EOF {\n\t\treturn\n\t}\n\tif n < 112 {\n\t\terr = errors.New(\"Invalid response\")\n\t} else {\n\t\terr = nil\n\t}\n\n\ttarget = buf[:32]\n\theader = buf[32:112]\n\t\/\/TODO: check for invalid target\n\treturn\n}\n\nfunc submitHeader(header []byte) (err error) {\n\t_, err = http.Post(submitblockurl, \"\", bytes.NewReader(header))\n\treturn\n}\n\nfunc enqueueWriteBufferByte(q *cl.CommandQueue, buffer *cl.MemObject, blocking bool, offset int, data []byte, eventWaitList []*cl.Event) (*cl.Event, error) {\n\tdataPtr := unsafe.Pointer(&data[0])\n\tdataSize := int(unsafe.Sizeof(data[0])) * len(data)\n\treturn q.EnqueueWriteBuffer(buffer, blocking, offset, dataSize, dataPtr, eventWaitList)\n}\n\nfunc mine(clDevice *cl.Device, minerID int) {\n\tlog.Println(minerID, \"- Initializing\", clDevice.Type(), \"-\", clDevice.Name())\n\n\tglobalItemSize := int(math.Exp2(float64(intensity)))\n\tlog.Println(minerID, \"- global item size:\", globalItemSize, \"- local item size:\", localItemSize)\n\n\tcontext, err := cl.CreateContext([]*cl.Device{clDevice})\n\tif err != nil {\n\t\tlog.Fatalln(minerID, \"-\", err)\n\t}\n\tdefer context.Release()\n\n\tcommandQueue, err := context.CreateCommandQueue(clDevice, 0)\n\tif err != nil {\n\t\tlog.Fatalln(minerID, \"-\", err)\n\t}\n\tdefer commandQueue.Release()\n\n\tprogram, err := context.CreateProgramWithSource([]string{kernelSource})\n\tif err != nil {\n\t\tlog.Fatalln(minerID, \"-\", err)\n\t}\n\tdefer program.Release()\n\n\terr = program.BuildProgram([]*cl.Device{clDevice}, \"\")\n\tif err != nil {\n\t\tlog.Fatalln(minerID, \"-\", err)\n\t}\n\n\tkernel, err := program.CreateKernel(\"nonceGrind\")\n\tif err != nil {\n\t\tlog.Fatalln(minerID, \"-\", err)\n\t}\n\tdefer kernel.Release()\n\n\tblockHeaderObj, err := context.CreateEmptyBuffer(cl.MemReadOnly, 80)\n\tif err != nil {\n\t\tlog.Fatalln(minerID, \"-\", err)\n\t}\n\tdefer blockHeaderObj.Release()\n\tkernel.SetArgBuffer(0, blockHeaderObj)\n\n\tnonceOutObj, err := context.CreateEmptyBuffer(cl.MemReadWrite, 8)\n\tif err != nil {\n\t\tlog.Fatalln(minerID, \"-\", err)\n\t}\n\tdefer nonceOutObj.Release()\n\tkernel.SetArgBuffer(1, nonceOutObj)\n\n\tlog.Println(minerID, \"- Started mining on\", clDevice.Type(), \"-\", clDevice.Name())\n\n\tfor {\n\t\tstart := time.Now()\n\n\t\ttarget, header, err := getHeaderForWork()\n\t\tif err != nil {\n\t\t\tlog.Println(minerID, \"- ERROR \", err)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/copy target to header\n\t\tfor i := 0; i < 8; i++ {\n\t\t\theader[i+32] = target[7-i]\n\t\t}\n\t\t\/\/TODO: offset\n\n\t\t\/\/Copy input to kernel args\n\t\tif _, err = enqueueWriteBufferByte(commandQueue, blockHeaderObj, true, 0, header, nil); err != nil {\n\t\t\tlog.Fatalln(minerID, \"-\", err)\n\t\t}\n\n\t\tnonceOut := make([]byte, 8, 8) \/\/TODO: get this out of the for loop\n\t\tif _, err = enqueueWriteBufferByte(commandQueue, nonceOutObj, true, 0, nonceOut, nil); err != nil {\n\t\t\tlog.Fatalln(minerID, \"-\", err)\n\t\t}\n\n\t\t\/\/Run the kernel\n\t\t\/\/globalIDOffset := globalItemSize\n\t\tif _, err = commandQueue.EnqueueNDRangeKernel(kernel, nil, []int{globalItemSize}, []int{localItemSize}, nil); err != nil {\n\t\t\tlog.Fatalln(minerID, \"-\", err)\n\t\t}\n\t\t\/\/Get output\n\t\tdataSize := int(unsafe.Sizeof(nonceOut[0])) * len(nonceOut)\n\t\tif _, err = commandQueue.EnqueueReadBuffer(nonceOutObj, true, 0, dataSize, unsafe.Pointer(&nonceOut[0]), nil); err != nil {\n\t\t\tlog.Fatalln(minerID, \"_\", err)\n\t\t}\n\t\t\/\/Check if match found\n\t\tif nonceOut[0] != 0 {\n\t\t\tlog.Println(minerID, \"-\", \"Yay, block found!\")\n\t\t\t\/\/ Copy nonce to header.\n\t\t\tfor i := 0; i < 8; i++ {\n\t\t\t\theader[i+32] = nonceOut[i]\n\t\t\t}\n\t\t\tif err = submitHeader(header); err != nil {\n\t\t\t\tlog.Println(minerID, \"- Error submitting block -\", err)\n\t\t\t}\n\t\t}\n\n\t\thashRate := float64(globalItemSize) \/ (time.Since(start).Seconds() * 1000000)\n\t\tfmt.Printf(\"\\r%d - Mining at %.3f MH\/s\", minerID, hashRate)\n\n\t}\n\n}\n\nfunc main() {\n\n\tplatforms, err := cl.GetPlatforms()\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tclDevices := make([]*cl.Device, 0, 4)\n\tfor _, platform := range platforms {\n\t\tlog.Println(\"Platform\", platform.Name())\n\t\tplatormDevices, err := cl.GetDevices(platform, cl.DeviceTypeAll)\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t\tlog.Println(len(platormDevices), \"device(s) found:\")\n\t\tfor i, device := range platormDevices {\n\t\t\tlog.Println(i, \"-\", device.Type(), \"-\", device.Name(), \"- Compiler Available: \", device.CompilerAvailable())\n\t\t\tlog.Println(i, \"- Max work dimensions\", device.MaxWorkItemDimensions())\n\t\t\tlog.Println(i, \"- Max work item sizes\", device.MaxWorkItemSizes())\n\t\t\tlog.Println(i, \"- OpenCL C Version\", device.OpenCLCVersion())\n\t\t\tlog.Println(i, \"- Max Work Group Size\", device.MaxWorkGroupSize())\n\t\t\t\/\/TODO: filter devices on type\n\t\t\tclDevices = append(clDevices, device)\n\t\t}\n\t}\n\n\tfor i, device := range clDevices {\n\t\tgo mine(device, i)\n\t}\n\n\tvar input string\n\tfmt.Scanln(&input)\n}\n<commit_msg>Use decent default localworkitem size and intensity for now, need to make them autoadjust in the future<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\"\n\t\"net\/http\"\n\t\"time\"\n\t\"unsafe\"\n\n\t\"github.com\/robvanmieghem\/go-opencl\/cl\"\n)\n\nvar kernelSource = `\nstatic inline ulong rotr64( __const ulong w, __const unsigned c ) { return ( w >> c ) | ( w << ( 64 - c ) ); }\n\n__constant static const uchar blake2b_sigma[12][16] = {\n\t{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } ,\n\t{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } ,\n\t{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } ,\n\t{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } ,\n\t{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 } ,\n\t{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } ,\n\t{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 } ,\n\t{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 } ,\n\t{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 } ,\n\t{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 } ,\n\t{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } ,\n\t{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } };\n\n\/\/ Target is passed in via headerIn[32 - 29]\n__kernel void nonceGrind(__global ulong *headerIn, __global ulong *nonceOut) {\n\tulong target = headerIn[4];\n\tulong m[16] = {\theaderIn[0], headerIn[1],\n\t headerIn[2], headerIn[3],\n\t (ulong)get_global_id(0), headerIn[5],\n\t headerIn[6], headerIn[7],\n\t headerIn[8], headerIn[9], 0, 0, 0, 0, 0, 0 };\n\n\tulong v[16] = { 0x6a09e667f2bdc928, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,\n\t 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,\n\t 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,\n\t 0x510e527fade68281, 0x9b05688c2b3e6c1f, 0xe07c265404be4294, 0x5be0cd19137e2179 };\n\n\n\n#define G(r,i,a,b,c,d) \\\n\ta = a + b + m[blake2b_sigma[r][2*i]]; \\\n\td = rotr64(d ^ a, 32); \\\n\tc = c + d; \\\n\tb = rotr64(b ^ c, 24); \\\n\ta = a + b + m[blake2b_sigma[r][2*i+1]]; \\\n\td = rotr64(d ^ a, 16); \\\n\tc = c + d; \\\n\tb = rotr64(b ^ c, 63);\n\n#define ROUND(r) \\\n\tG(r,0,v[ 0],v[ 4],v[ 8],v[12]); \\\n\tG(r,1,v[ 1],v[ 5],v[ 9],v[13]); \\\n\tG(r,2,v[ 2],v[ 6],v[10],v[14]); \\\n\tG(r,3,v[ 3],v[ 7],v[11],v[15]); \\\n\tG(r,4,v[ 0],v[ 5],v[10],v[15]); \\\n\tG(r,5,v[ 1],v[ 6],v[11],v[12]); \\\n\tG(r,6,v[ 2],v[ 7],v[ 8],v[13]); \\\n\tG(r,7,v[ 3],v[ 4],v[ 9],v[14]);\n\n\tROUND( 0 );\n\tROUND( 1 );\n\tROUND( 2 );\n\tROUND( 3 );\n\tROUND( 4 );\n\tROUND( 5 );\n\tROUND( 6 );\n\tROUND( 7 );\n\tROUND( 8 );\n\tROUND( 9 );\n\tROUND( 10 );\n\tROUND( 11 );\n#undef G\n#undef ROUND\n\n\tif (as_ulong(as_uchar8(0x6a09e667f2bdc928 ^ v[0] ^ v[8]).s76543210) < target) {\n\t\t*nonceOut = m[4];\n\t\treturn;\n\t}\n}\n`\n\nvar getworkurl = \"http:\/\/localhost:9980\/miner\/headerforwork\"\nvar submitblockurl = \"http:\/\/localhost:9980\/miner\/submitheader\"\nvar intensity = 26\nvar localItemSize = 256\n\nfunc loadCLProgramSource() (sources []string) {\n\tfilename := \"sia-gpu-miner.cl\"\n\n\tbuf, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tsources = []string{string(buf)}\n\treturn\n}\n\nfunc getHeaderForWork() (target, header []byte, err error) {\n\tresp, err := http.Get(getworkurl)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tbuf := make([]byte, 113)\n\tn, err := resp.Body.Read(buf)\n\tif err != nil && err != io.EOF {\n\t\treturn\n\t}\n\tif n < 112 {\n\t\terr = errors.New(\"Invalid response\")\n\t} else {\n\t\terr = nil\n\t}\n\n\ttarget = buf[:32]\n\theader = buf[32:112]\n\t\/\/TODO: check for invalid target\n\treturn\n}\n\nfunc submitHeader(header []byte) (err error) {\n\t_, err = http.Post(submitblockurl, \"\", bytes.NewReader(header))\n\treturn\n}\n\nfunc enqueueWriteBufferByte(q *cl.CommandQueue, buffer *cl.MemObject, blocking bool, offset int, data []byte, eventWaitList []*cl.Event) (*cl.Event, error) {\n\tdataPtr := unsafe.Pointer(&data[0])\n\tdataSize := int(unsafe.Sizeof(data[0])) * len(data)\n\treturn q.EnqueueWriteBuffer(buffer, blocking, offset, dataSize, dataPtr, eventWaitList)\n}\n\nfunc mine(clDevice *cl.Device, minerID int) {\n\tlog.Println(minerID, \"- Initializing\", clDevice.Type(), \"-\", clDevice.Name())\n\n\tglobalItemSize := int(math.Exp2(float64(intensity)))\n\tlog.Println(minerID, \"- global item size:\", globalItemSize, \"- local item size:\", localItemSize)\n\n\tcontext, err := cl.CreateContext([]*cl.Device{clDevice})\n\tif err != nil {\n\t\tlog.Fatalln(minerID, \"-\", err)\n\t}\n\tdefer context.Release()\n\n\tcommandQueue, err := context.CreateCommandQueue(clDevice, 0)\n\tif err != nil {\n\t\tlog.Fatalln(minerID, \"-\", err)\n\t}\n\tdefer commandQueue.Release()\n\n\tprogram, err := context.CreateProgramWithSource([]string{kernelSource})\n\tif err != nil {\n\t\tlog.Fatalln(minerID, \"-\", err)\n\t}\n\tdefer program.Release()\n\n\terr = program.BuildProgram([]*cl.Device{clDevice}, \"\")\n\tif err != nil {\n\t\tlog.Fatalln(minerID, \"-\", err)\n\t}\n\n\tkernel, err := program.CreateKernel(\"nonceGrind\")\n\tif err != nil {\n\t\tlog.Fatalln(minerID, \"-\", err)\n\t}\n\tdefer kernel.Release()\n\n\tblockHeaderObj, err := context.CreateEmptyBuffer(cl.MemReadOnly, 80)\n\tif err != nil {\n\t\tlog.Fatalln(minerID, \"-\", err)\n\t}\n\tdefer blockHeaderObj.Release()\n\tkernel.SetArgBuffer(0, blockHeaderObj)\n\n\tnonceOutObj, err := context.CreateEmptyBuffer(cl.MemReadWrite, 8)\n\tif err != nil {\n\t\tlog.Fatalln(minerID, \"-\", err)\n\t}\n\tdefer nonceOutObj.Release()\n\tkernel.SetArgBuffer(1, nonceOutObj)\n\n\tlog.Println(minerID, \"- Started mining on\", clDevice.Type(), \"-\", clDevice.Name())\n\n\tfor {\n\t\tstart := time.Now()\n\n\t\ttarget, header, err := getHeaderForWork()\n\t\tif err != nil {\n\t\t\tlog.Println(minerID, \"- ERROR \", err)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/copy target to header\n\t\tfor i := 0; i < 8; i++ {\n\t\t\theader[i+32] = target[7-i]\n\t\t}\n\t\t\/\/TODO: offset\n\n\t\t\/\/Copy input to kernel args\n\t\tif _, err = enqueueWriteBufferByte(commandQueue, blockHeaderObj, true, 0, header, nil); err != nil {\n\t\t\tlog.Fatalln(minerID, \"-\", err)\n\t\t}\n\n\t\tnonceOut := make([]byte, 8, 8) \/\/TODO: get this out of the for loop\n\t\tif _, err = enqueueWriteBufferByte(commandQueue, nonceOutObj, true, 0, nonceOut, nil); err != nil {\n\t\t\tlog.Fatalln(minerID, \"-\", err)\n\t\t}\n\n\t\t\/\/Run the kernel\n\t\t\/\/globalIDOffset := globalItemSize\n\t\tif _, err = commandQueue.EnqueueNDRangeKernel(kernel, nil, []int{globalItemSize}, []int{localItemSize}, nil); err != nil {\n\t\t\tlog.Fatalln(minerID, \"-\", err)\n\t\t}\n\t\t\/\/Get output\n\t\tdataSize := int(unsafe.Sizeof(nonceOut[0])) * len(nonceOut)\n\t\tif _, err = commandQueue.EnqueueReadBuffer(nonceOutObj, true, 0, dataSize, unsafe.Pointer(&nonceOut[0]), nil); err != nil {\n\t\t\tlog.Fatalln(minerID, \"_\", err)\n\t\t}\n\t\t\/\/Check if match found\n\t\tif nonceOut[0] != 0 {\n\t\t\tlog.Println(minerID, \"-\", \"Yay, block found!\")\n\t\t\t\/\/ Copy nonce to header.\n\t\t\tfor i := 0; i < 8; i++ {\n\t\t\t\theader[i+32] = nonceOut[i]\n\t\t\t}\n\t\t\tif err = submitHeader(header); err != nil {\n\t\t\t\tlog.Println(minerID, \"- Error submitting block -\", err)\n\t\t\t}\n\t\t}\n\n\t\thashRate := float64(globalItemSize) \/ (time.Since(start).Seconds() * 1000000)\n\t\tfmt.Printf(\"\\r%d - Mining at %.3f MH\/s\", minerID, hashRate)\n\n\t}\n\n}\n\nfunc main() {\n\n\tplatforms, err := cl.GetPlatforms()\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tclDevices := make([]*cl.Device, 0, 4)\n\tfor _, platform := range platforms {\n\t\tlog.Println(\"Platform\", platform.Name())\n\t\tplatormDevices, err := cl.GetDevices(platform, cl.DeviceTypeAll)\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t\tlog.Println(len(platormDevices), \"device(s) found:\")\n\t\tfor i, device := range platormDevices {\n\t\t\tlog.Println(i, \"-\", device.Type(), \"-\", device.Name(), \"- Compiler Available: \", device.CompilerAvailable())\n\t\t\tlog.Println(i, \"- Max work dimensions\", device.MaxWorkItemDimensions())\n\t\t\tlog.Println(i, \"- Max work item sizes\", device.MaxWorkItemSizes())\n\t\t\tlog.Println(i, \"- OpenCL C Version\", device.OpenCLCVersion())\n\t\t\tlog.Println(i, \"- Max Work Group Size\", device.MaxWorkGroupSize())\n\t\t\t\/\/TODO: filter devices on type\n\t\t\tclDevices = append(clDevices, device)\n\t\t}\n\t}\n\n\tfor i, device := range clDevices {\n\t\tgo mine(device, i)\n\t}\n\n\tvar input string\n\tfmt.Scanln(&input)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/uva-its\/gopherbot\/bot\"\n\n\t\/\/ If re-compiling Gopherbot, you can comment out unused connectors.\n\t\/\/ Select the connector and provide configuration in conf\/gopherbot.yaml\n\t_ \"github.com\/uva-its\/gopherbot\/connectors\/slack\"\n\n\t\/\/ If re-compiling, you can comment out unused brain implementations.\n\t\/\/ Select the brain to use and provide configuration in conf\/gopherbot.yaml\n\t_ \"github.com\/uva-its\/gopherbot\/brains\/file\"\n\n\t\/\/ If re-compiling, you can comment out unused elevator implementations.\n\t\/\/ Select the elevator to use and provide configuration in conf\/gopherbot.yaml\n\t_ \"github.com\/uva-its\/gopherbot\/elevators\/duo\"\n\t_ \"github.com\/uva-its\/gopherbot\/elevators\/totp\"\n\n\t\/\/ If re-compiling, you can select the plugins you want. Otherwise you can disable\n\t\/\/ them in conf\/plugins\/<plugin>.json with \"Disabled\": true\n\t_ \"github.com\/uva-its\/gopherbot\/goplugins\/help\"\n\t_ \"github.com\/uva-its\/gopherbot\/goplugins\/knock\"\n\t_ \"github.com\/uva-its\/gopherbot\/goplugins\/lists\"\n\t_ \"github.com\/uva-its\/gopherbot\/goplugins\/meme\"\n\t_ \"github.com\/uva-its\/gopherbot\/goplugins\/ping\"\n)\n\nfunc main() {\n\tbot.Start()\n}\n<commit_msg>Import _ net\/http\/pprof for profiling data<commit_after>package main\n\nimport (\n\t\"github.com\/uva-its\/gopherbot\/bot\"\n\n\t\/\/ If re-compiling Gopherbot, you can comment out unused connectors.\n\t\/\/ Select the connector and provide configuration in conf\/gopherbot.yaml\n\t_ \"github.com\/uva-its\/gopherbot\/connectors\/slack\"\n\n\t\/\/ If re-compiling, you can comment out unused brain implementations.\n\t\/\/ Select the brain to use and provide configuration in conf\/gopherbot.yaml\n\t_ \"github.com\/uva-its\/gopherbot\/brains\/file\"\n\n\t\/\/ If re-compiling, you can comment out unused elevator implementations.\n\t\/\/ Select the elevator to use and provide configuration in conf\/gopherbot.yaml\n\t_ \"github.com\/uva-its\/gopherbot\/elevators\/duo\"\n\t_ \"github.com\/uva-its\/gopherbot\/elevators\/totp\"\n\n\t\/\/ If re-compiling, you can select the plugins you want. Otherwise you can disable\n\t\/\/ them in conf\/plugins\/<plugin>.json with \"Disabled\": true\n\t_ \"github.com\/uva-its\/gopherbot\/goplugins\/help\"\n\t_ \"github.com\/uva-its\/gopherbot\/goplugins\/knock\"\n\t_ \"github.com\/uva-its\/gopherbot\/goplugins\/lists\"\n\t_ \"github.com\/uva-its\/gopherbot\/goplugins\/meme\"\n\t_ \"github.com\/uva-its\/gopherbot\/goplugins\/ping\"\n\n\t\/\/ Enable profiling\n\t_ \"net\/http\/pprof\"\n)\n\nfunc main() {\n\tbot.Start()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/user\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/CasualSuperman\/Diorite\/multiverse\"\n)\n\nconst dataLocation = \".diorite\"\nconst multiverseFileName = \"multiverse.mtg\"\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\t\/\/ Get the information about our user.\n\tu, err := user.Current()\n\tif err != nil {\n\t\t\/\/ Well that ended poorly.\n\t\tfmt.Println(\"Something went horribly wrong.\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Find out if our storage directory exists.\n\tif !homeDirExists(u.HomeDir) {\n\t\tfmt.Println(\"Creating ~\/\" + dataLocation)\n\t\terr := createHomeDir(u.HomeDir)\n\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Unable to create required storage directory.\")\n\t\t}\n\t}\n\n\tos.Chdir(dataDir(u.HomeDir))\n\tmultiverseFile, err := os.Open(multiverseFileName)\n\n\tvar m multiverse.Multiverse\n\tmultiverseLoaded := false\n\n\tif err != nil {\n\t\tif _, ok := err.(*os.PathError); ok {\n\t\t\tfmt.Println(\"No local database available. A local copy will be downloaded.\")\n\t\t} else {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Loading local multiverse.\")\n\n\t\tf, _ := os.Create(\"inflateprofile\")\n\t\tpprof.StartCPUProfile(f)\n\t\tm, err = multiverse.Load(multiverseFile)\n\t\tpprof.StopCPUProfile()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Unable to load multiverse:\", err)\n\t\t} else {\n\t\t\tfmt.Println(\"Multiverse loaded.\")\n\t\t\tmultiverseLoaded = true\n\t\t}\n\t\tmultiverseFile.Close()\n\t}\n\n\tfmt.Println(\"Checking for multiverse updates.\")\n\tmostRecentUpdate, err := onlineModifiedAt()\n\n\tif err != nil {\n\t\tfmt.Println(\"Warning! Online database unavailable. Your card index may be out of date.\")\n\t}\n\n\tvar saved sync.WaitGroup\n\n\tif mostRecentUpdate.After(m.Modified) {\n\t\tfmt.Println(\"Multiverse update available! Downloading now.\")\n\t\tom, err := downloadOnline()\n\t\tif err != nil {\n\t\t\tif !multiverseLoaded {\n\t\t\t\tfmt.Println(\"Unable to download multiverse and no local database available. Unable to continue.\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tfmt.Println(\"Unable to download most recent multiverse. Continuing with an out-of-date version.\")\n\t\t}\n\t\tfmt.Println(\"Transforming multiverse.\")\n\t\tm = multiverse.Create(om.Sets, time.Now())\n\n\t\tfile, err := os.Create(multiverseFileName)\n\t\tif err == nil {\n\t\t\tsaved.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer file.Close()\n\t\t\t\tdefer saved.Done()\n\t\t\t\tfmt.Println(\"Saving downloaded multiverse.\")\n\t\t\t\terr := m.WriteTo(file)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Error saving multiverse:\", err)\n\t\t\t\t}\n\t\t\t}()\n\t\t} else {\n\t\t\tfmt.Println(err)\n\t\t}\n\t} else {\n\t\tfmt.Println(\"No updates available.\")\n\t}\n\n\tf, _ := os.Create(\"searchprofile\")\n\tpprof.StartCPUProfile(f)\n\tm.SearchByName(\"a\")\n\tm.SearchByName(\"av\")\n\tm.SearchByName(\"ava\")\n\tm.SearchByName(\"avat\")\n\tm.SearchByName(\"avata\")\n\tm.SearchByName(\"avatar\")\n\tcards := m.SearchByName(\"lightning\")\n\tpprof.StopCPUProfile()\n\tnames := make([]string, len(cards))\n\tfor i, card := range cards {\n\t\tnames[i] = card.Name\n\t}\n\tfmt.Println(names)\n\n\tf, _ = os.Create(\"memprofile\")\n\tpprof.WriteHeapProfile(f)\n\tf.Close()\n\tsaved.Wait()\n}\n\nfunc dataDir(baseDir string) string {\n\treturn baseDir + string(os.PathSeparator) + dataLocation\n}\n\nfunc homeDirExists(baseDir string) bool {\n\tdir, err := os.Stat(dataDir(baseDir))\n\treturn err == nil && dir.IsDir()\n}\n\nfunc createHomeDir(baseDir string) error {\n\treturn os.Mkdir(dataDir(baseDir), os.ModePerm|os.ModeDir)\n}\n<commit_msg>Removed profiling code.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/user\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/CasualSuperman\/Diorite\/multiverse\"\n)\n\nconst dataLocation = \".diorite\"\nconst multiverseFileName = \"multiverse.mtg\"\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\t\/\/ Get the information about our user.\n\tu, err := user.Current()\n\tif err != nil {\n\t\t\/\/ Well that ended poorly.\n\t\tfmt.Println(\"Something went horribly wrong.\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Find out if our storage directory exists.\n\tif !homeDirExists(u.HomeDir) {\n\t\tfmt.Println(\"Creating ~\/\" + dataLocation)\n\t\terr := createHomeDir(u.HomeDir)\n\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Unable to create required storage directory.\")\n\t\t}\n\t}\n\n\tos.Chdir(dataDir(u.HomeDir))\n\tmultiverseFile, err := os.Open(multiverseFileName)\n\n\tvar m multiverse.Multiverse\n\tmultiverseLoaded := false\n\n\tif err != nil {\n\t\tif _, ok := err.(*os.PathError); ok {\n\t\t\tfmt.Println(\"No local database available. A local copy will be downloaded.\")\n\t\t} else {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Loading local multiverse.\")\n\t\tm, err = multiverse.Load(multiverseFile)\n\t\tmultiverseFile.Close()\n\t}\n\n\tif err != nil {\n\t\tfmt.Println(\"Unable to load multiverse:\", err)\n\t} else {\n\t\tfmt.Println(\"Multiverse loaded.\")\n\t\tmultiverseLoaded = true\n\t}\n\n\tfmt.Println(\"Checking for multiverse updates.\")\n\tmostRecentUpdate, err := onlineModifiedAt()\n\n\tif err != nil {\n\t\tfmt.Println(\"Warning! Online database unavailable. Your card index may be out of date.\")\n\t}\n\n\tvar saved sync.WaitGroup\n\n\tif mostRecentUpdate.After(m.Modified) {\n\t\tfmt.Println(\"Multiverse update available! Downloading now.\")\n\t\tom, err := downloadOnline()\n\t\tif err != nil {\n\t\t\tif !multiverseLoaded {\n\t\t\t\tfmt.Println(\"Unable to download multiverse and no local database available. Unable to continue.\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tfmt.Println(\"Unable to download most recent multiverse. Continuing with an out-of-date version.\")\n\t\t}\n\t\tfmt.Println(\"Transforming multiverse.\")\n\t\tm = multiverse.Create(om.Sets, time.Now())\n\n\t\tfile, err := os.Create(multiverseFileName)\n\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t} else {\n\t\t\tsaved.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer file.Close()\n\t\t\t\tdefer saved.Done()\n\t\t\t\tfmt.Println(\"Saving downloaded multiverse.\")\n\t\t\t\terr := m.WriteTo(file)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Error saving multiverse:\", err)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t} else {\n\t\tfmt.Println(\"No updates available.\")\n\t}\n\n\tcards := m.SearchByName(\"avat\")\n\n\tnames := make([]string, len(cards))\n\tfor i, card := range cards {\n\t\tnames[i] = card.Name\n\t}\n\tfmt.Println(names)\n\n\tsaved.Wait()\n}\n\nfunc dataDir(baseDir string) string {\n\treturn baseDir + string(os.PathSeparator) + dataLocation\n}\n\nfunc homeDirExists(baseDir string) bool {\n\tdir, err := os.Stat(dataDir(baseDir))\n\treturn err == nil && dir.IsDir()\n}\n\nfunc createHomeDir(baseDir string) error {\n\treturn os.Mkdir(dataDir(baseDir), os.ModePerm|os.ModeDir)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2016 Jacob Dearing\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage main\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\n\t_ \"net\/http\/pprof\"\n\n\t\"github.com\/dearing\/havoc\"\n\t\"github.com\/julienschmidt\/httprouter\"\n)\n\nvar NAME = GetRandomName(0)\n\nfunc main() {\n\n\tprocs := runtime.NumCPU()\n\truntime.GOMAXPROCS(procs)\n\n\trouter := httprouter.New()\n\trouter.GET(\"\/\", HandleIndex)\n\trouter.GET(\"\/kill\", HandleKill)\n\n\trouter.GET(\"\/data\/reset\", HandleDataReset)\n\trouter.GET(\"\/data\/set\/:value\", HandleDataSet)\n\trouter.GET(\"\/data\/fill\", HandleDataFill)\n\trouter.GET(\"\/data\/fill\/crypto\", HandleDataFillCrypto)\n\n\trouter.GET(\"\/procs\/:value\", HandleProcs)\n\n\tlog.Printf(\"%s : starting.\\n\", NAME)\n\n\tgo func() {\n\t\tlog.Println(http.ListenAndServe(\":8081\", nil))\n\t}()\n\n\tlog.Fatal(http.ListenAndServe(\":8080\", router))\n}\n\n\/\/ HANDLES\n\nfunc HandleIndex(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tfmt.Fprintf(w, \"My name is %s.\\n\\n\", NAME)\n\tfmt.Fprint(w, base64.StdEncoding.EncodeToString(havoc.Data))\n}\n\nfunc HandleKill(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tfmt.Fprintf(w, \"%s: I hardly knew thee.\\n\", NAME)\n\tos.Exit(0)\n}\n\n\/\/ HANDLES - DATA\n\nfunc HandleDataSet(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tsize, err := strconv.Atoi(ps.ByName(\"value\"))\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"%s: error in parse: %s\\n\", NAME, err)\n\t\treturn\n\t}\n\n\tgo func() {\n\t\thavoc.DataSet(size)\n\t}()\n\n\tfmt.Fprintf(w, \"%s: %d indices set.\\n\", NAME, size)\n\n}\n\nfunc HandleDataReset(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\thavoc.DataReset()\n\tfmt.Fprintf(w, \"%s: Reset.\\n\", NAME)\n}\n\nfunc HandleDataFill(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\thavoc.DataFill()\n\tfmt.Fprintf(w, \"%s: Filling Data, (%d) bytes, with ones.\\n\", NAME, len(havoc.Data))\n}\n\nfunc HandleDataFillCrypto(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\thavoc.DataFillCrypto()\n\tfmt.Fprintf(w, \"%s: Filling Data, (%d) bytes, with random data.\\n\", NAME, len(havoc.Data))\n}\n\n\/\/ HANDLES - CPU\n\nfunc HandleProcs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\n\tprocs, err := strconv.Atoi(ps.ByName(\"value\"))\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"%s: error in parse: %s\\n\", NAME, err)\n\t\treturn\n\t}\n\n\tfor i := 0; i < procs; i++ {\n\t\tgo func() {\n\t\t\thavoc.Forever()\n\t\t}()\n\t}\n\n\tfmt.Fprintf(w, \"%s: %d of %d processors engaged.\\n\", NAME, procs, runtime.NumCPU())\n}\n<commit_msg>update procs<commit_after>\/\/ Copyright © 2016 Jacob Dearing\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage main\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\n\t_ \"net\/http\/pprof\"\n\n\t\"github.com\/dearing\/havoc\"\n\t\"github.com\/julienschmidt\/httprouter\"\n)\n\nvar NAME = GetRandomName(0)\n\nfunc main() {\n\n\tprocs := runtime.NumCPU()\n\truntime.GOMAXPROCS(procs)\n\n\trouter := httprouter.New()\n\trouter.GET(\"\/\", HandleIndex)\n\trouter.GET(\"\/kill\", HandleKill)\n\n\trouter.GET(\"\/data\/reset\", HandleDataReset)\n\trouter.GET(\"\/data\/set\/:value\", HandleDataSet)\n\trouter.GET(\"\/data\/fill\", HandleDataFill)\n\trouter.GET(\"\/data\/fill\/crypto\", HandleDataFillCrypto)\n\n\trouter.GET(\"\/procs\/:value\", HandleProcs)\n\n\tlog.Printf(\"%s : starting.\\n\", NAME)\n\n\tgo func() {\n\t\tlog.Println(http.ListenAndServe(\":8081\", nil))\n\t}()\n\n\tlog.Fatal(http.ListenAndServe(\":8080\", router))\n}\n\n\/\/ HANDLES\n\nfunc HandleIndex(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tfmt.Fprintf(w, \"My name is %s.\\n\\n\", NAME)\n\tfmt.Fprint(w, base64.StdEncoding.EncodeToString(havoc.Data))\n}\n\nfunc HandleKill(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tfmt.Fprintf(w, \"%s: I hardly knew thee.\\n\", NAME)\n\tos.Exit(0)\n}\n\n\/\/ HANDLES - DATA\n\nfunc HandleDataSet(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tsize, err := strconv.Atoi(ps.ByName(\"value\"))\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"%s: error in parse: %s\\n\", NAME, err)\n\t\treturn\n\t}\n\n\tgo func() {\n\t\thavoc.DataSet(size)\n\t}()\n\n\tfmt.Fprintf(w, \"%s: %d indices set.\\n\", NAME, size)\n\n}\n\nfunc HandleDataReset(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\thavoc.DataReset()\n\tfmt.Fprintf(w, \"%s: Reset.\\n\", NAME)\n}\n\nfunc HandleDataFill(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\thavoc.DataFill()\n\tfmt.Fprintf(w, \"%s: Filling Data, (%d) bytes, with ones.\\n\", NAME, len(havoc.Data))\n}\n\nfunc HandleDataFillCrypto(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\thavoc.DataFillCrypto()\n\tfmt.Fprintf(w, \"%s: Filling Data, (%d) bytes, with random data.\\n\", NAME, len(havoc.Data))\n}\n\n\/\/ HANDLES - CPU\n\nfunc HandleProcs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\n\tprocs, err := strconv.Atoi(ps.ByName(\"value\"))\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"%s: error in parse: %s\\n\", NAME, err)\n\t\treturn\n\t}\n\n\truntime.GOMAXPROCS(procs)\n\n\tfor i := 0; i < procs; i++ {\n\t\tgo func() {\n\t\t\thavoc.Forever()\n\t\t}()\n\t}\n\n\tfmt.Fprintf(w, \"%s: %d of %d processors engaged.\\n\", NAME, procs, runtime.NumCPU())\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/josledp\/goprompt\/prompt\"\n\n\t\"github.com\/josledp\/termcolor\"\n)\n\nvar logger *log.Logger\n\ntype formatFunc func(string, ...termcolor.Mode) string\n\nfunc main() {\n\tvar noColor bool\n\tvar template string\n\tvar customTemplate string\n\n\tconfig, err := prompt.NewConfigFromFile(os.Getenv(\"HOME\") + \"\/.config\/goprompt\/goprompt.json\")\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to get config: %v\", err)\n\t}\n\n\tcurrentTemplates := strings.Join(prompt.GetDefaultTemplates(), \",\")\n\tflag.StringVar(&template, \"template\", \"Evermeet\", \"template to use for the prompt (\"+currentTemplates+\")\")\n\tflag.StringVar(&customTemplate, \"custom-template\", \"<(%python%) ><%aws%|><%user% ><%lastcommand% ><%path%>< %git%>$ \", \"template to use for the prompt\")\n\tflag.BoolVar(&noColor, \"no-color\", false, \"Disable color on prompt\")\n\n\tflag.Parse()\n\n\tflagsSet := make(map[string]struct{})\n\tflag.Visit(func(f *flag.Flag) { flagsSet[f.Name] = struct{}{} })\n\n\t_, templateSet := flagsSet[\"template\"]\n\t_, customTemplateSet := flagsSet[\"custom-template\"]\n\n\tif templateSet && customTemplateSet {\n\t\tfmt.Fprintf(os.Stderr, \"Please provice --template or --custom-template, but not both!\")\n\t\tos.Exit(1)\n\t}\n\n\tvar t string\n\tvar options map[string]interface{}\n\toptions, _ = config.GetOptions()\n\n\tif customTemplateSet {\n\t\tt = customTemplate\n\t} else if !templateSet {\n\t\tt, _ = config.GetCustomTemplate()\n\t}\n\n\tif t == \"\" {\n\t\tvar ok bool\n\t\tt, ok = prompt.GetTemplate(template)\n\t\tif !ok {\n\t\t\tfmt.Fprintf(os.Stderr, \"Template %s not found\", template)\n\t\t}\n\t\tif options == nil {\n\t\t\toptions, _ = prompt.GetTemplateOptions(template)\n\t\t}\n\t}\n\tpr := prompt.New(options)\n\toutput := pr.Compile(t, !noColor)\n\tfmt.Println(output)\n\n}\n<commit_msg>remove unused func definition<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/josledp\/goprompt\/prompt\"\n)\n\nvar logger *log.Logger\n\nfunc main() {\n\tvar noColor bool\n\tvar template string\n\tvar customTemplate string\n\n\tconfig, err := prompt.NewConfigFromFile(os.Getenv(\"HOME\") + \"\/.config\/goprompt\/goprompt.json\")\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to get config: %v\", err)\n\t}\n\n\tcurrentTemplates := strings.Join(prompt.GetDefaultTemplates(), \",\")\n\tflag.StringVar(&template, \"template\", \"Evermeet\", \"template to use for the prompt (\"+currentTemplates+\")\")\n\tflag.StringVar(&customTemplate, \"custom-template\", \"<(%python%) ><%aws%|><%user% ><%lastcommand% ><%path%>< %git%>$ \", \"template to use for the prompt\")\n\tflag.BoolVar(&noColor, \"no-color\", false, \"Disable color on prompt\")\n\n\tflag.Parse()\n\n\tflagsSet := make(map[string]struct{})\n\tflag.Visit(func(f *flag.Flag) { flagsSet[f.Name] = struct{}{} })\n\n\t_, templateSet := flagsSet[\"template\"]\n\t_, customTemplateSet := flagsSet[\"custom-template\"]\n\n\tif templateSet && customTemplateSet {\n\t\tfmt.Fprintf(os.Stderr, \"Please provice --template or --custom-template, but not both!\")\n\t\tos.Exit(1)\n\t}\n\n\tvar t string\n\tvar options map[string]interface{}\n\toptions, _ = config.GetOptions()\n\n\tif customTemplateSet {\n\t\tt = customTemplate\n\t} else if !templateSet {\n\t\tt, _ = config.GetCustomTemplate()\n\t}\n\n\tif t == \"\" {\n\t\tvar ok bool\n\t\tt, ok = prompt.GetTemplate(template)\n\t\tif !ok {\n\t\t\tfmt.Fprintf(os.Stderr, \"Template %s not found\", template)\n\t\t}\n\t\tif options == nil {\n\t\t\toptions, _ = prompt.GetTemplateOptions(template)\n\t\t}\n\t}\n\tpr := prompt.New(options)\n\toutput := pr.Compile(t, !noColor)\n\tfmt.Println(output)\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/xml\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/Cj-Malone\/feed-org\/sources\"\n\t\"github.com\/gorilla\/mux\"\n\t\/\/\"github.com\/nenadl\/atom\"\n)\n\nfunc main() {\n\tr := mux.NewRouter()\n\n\tr.HandleFunc(\"\/{source}\/{id}\/{page:[0-9]+}.atom\", queryDecode)\n\tr.HandleFunc(\"\/{source}\/{page:[0-9]+}.atom\", queryDecode)\n\tr.HandleFunc(\"\/{source}.atom\", queryDecode)\n\n\tlog.Fatal(http.ListenAndServe(\":8080\", r))\n}\n\nfunc queryDecode(responseWriter http.ResponseWriter, request *http.Request) {\n\tvar source sources.Source\n\n\tvars := mux.Vars(request)\n\tpage := 1\n\tif vars[\"page\"] != \"\" {\n\t\tpageI, _ := strconv.ParseInt(vars[\"page\"], 10, 0)\n\t\tpage = int(pageI)\n\t}\n\n\tswitch vars[\"source\"] {\n\tcase \"org\":\n\t\tsource = sources.Org{}\n\t\tbreak\n\tcase \"imdb\":\n\t\tsource = sources.Imdb{}\n\t\tbreak\n\t\/\/case \"iplayer\":\n\t\/\/\tsource = sources.Iplayer{}\n\t\/\/\tbreak\n\tdefault:\n\t\tlog.Printf(\"Unhandled URL: %s\\n\", request.URL)\n\t\thttp.Error(responseWriter, \"Feed not found\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tfeed, err := source.CreateFeed(vars[\"id\"], int(page))\n\tif err != nil {\n\t\tlog.Printf(\"Error creating feed: %s\\n\", err)\n\t\thttp.Error(responseWriter, \"Feed creation error\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tresponseWriter.Header().Set(\"Content-Type\", \"application\/atom+xml\")\n\tencoder := xml.NewEncoder(responseWriter)\n\tencoder.Indent(\"\", \"\t\")\n\terr = encoder.Encode(feed)\n\tif err != nil {\n\t\tlog.Printf(\"Error encoding feed: %s\\n\", err)\n\t\thttp.Error(responseWriter, \"Feed encoding error\", http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n<commit_msg>Only listen locally<commit_after>package main\n\nimport (\n\t\"encoding\/xml\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/Cj-Malone\/feed-org\/sources\"\n\t\"github.com\/gorilla\/mux\"\n\t\/\/\"github.com\/nenadl\/atom\"\n)\n\nfunc main() {\n\tr := mux.NewRouter()\n\n\tr.HandleFunc(\"\/{source}\/{id}\/{page:[0-9]+}.atom\", queryDecode)\n\tr.HandleFunc(\"\/{source}\/{page:[0-9]+}.atom\", queryDecode)\n\tr.HandleFunc(\"\/{source}.atom\", queryDecode)\n\n\tlog.Fatal(http.ListenAndServe(\"127.0.0.1:8080\", r))\n}\n\nfunc queryDecode(responseWriter http.ResponseWriter, request *http.Request) {\n\tvar source sources.Source\n\n\tvars := mux.Vars(request)\n\tpage := 1\n\tif vars[\"page\"] != \"\" {\n\t\tpageI, _ := strconv.ParseInt(vars[\"page\"], 10, 0)\n\t\tpage = int(pageI)\n\t}\n\n\tswitch vars[\"source\"] {\n\tcase \"org\":\n\t\tsource = sources.Org{}\n\t\tbreak\n\tcase \"imdb\":\n\t\tsource = sources.Imdb{}\n\t\tbreak\n\t\/\/case \"iplayer\":\n\t\/\/\tsource = sources.Iplayer{}\n\t\/\/\tbreak\n\tdefault:\n\t\tlog.Printf(\"Unhandled URL: %s\\n\", request.URL)\n\t\thttp.Error(responseWriter, \"Feed not found\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tfeed, err := source.CreateFeed(vars[\"id\"], int(page))\n\tif err != nil {\n\t\tlog.Printf(\"Error creating feed: %s\\n\", err)\n\t\thttp.Error(responseWriter, \"Feed creation error\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tresponseWriter.Header().Set(\"Content-Type\", \"application\/atom+xml\")\n\tencoder := xml.NewEncoder(responseWriter)\n\tencoder.Indent(\"\", \"\t\")\n\terr = encoder.Encode(feed)\n\tif err != nil {\n\t\tlog.Printf(\"Error encoding feed: %s\\n\", err)\n\t\thttp.Error(responseWriter, \"Feed encoding error\", http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"log\/syslog\"\n\t\"os\"\n\t\"runtime\"\n\t\"sync\"\n\n\t\"github.com\/kavu\/go_reuseport\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nvar (\n\t\/\/ Startup flags\n\tlistenAddress = kingpin.Flag(\"listen\", \"Address and port to listen on\").Required().TCP()\n\tforwardAddress = kingpin.Flag(\"target\", \"Address to foward connections to\").Required().TCP()\n\tkeystorePath = kingpin.Flag(\"keystore\", \"Path to certificate and keystore (PKCS12)\").PlaceHolder(\"PATH\").Required().String()\n\tkeystorePass = kingpin.Flag(\"storepass\", \"Password for certificate and keystore\").PlaceHolder(\"PASS\").Required().String()\n\tcaBundlePath = kingpin.Flag(\"cacert\", \"Path to certificate authority bundle file (PEM\/X509)\").Required().String()\n\twatchFiles = kingpin.Flag(\"auto-reload\", \"Watch keystore file with inotify\/fswatch and reload on changes\").Bool()\n\tallowAll = kingpin.Flag(\"allow-all\", \"Allow all clients, do not check client cert subject\").Bool()\n\tallowedCNs = kingpin.Flag(\"allow-cn\", \"Allow clients with given common name (can be repeated)\").PlaceHolder(\"CN\").Strings()\n\tallowedOUs = kingpin.Flag(\"allow-ou\", \"Allow clients with organizational unit name (can be repeated)\").PlaceHolder(\"OU\").Strings()\n\tuseSyslog = kingpin.Flag(\"syslog\", \"Send logs to syslog instead of stderr\").Bool()\n)\n\n\/\/ Global logger instance\nvar logger *log.Logger\n\nfunc initLogger() {\n\tif *useSyslog {\n\t\tvar err error\n\t\tlogger, err = syslog.NewLogger(syslog.LOG_NOTICE|syslog.LOG_DAEMON, log.LstdFlags|log.Lmicroseconds)\n\t\tpanicOnError(err)\n\t} else {\n\t\tlogger = log.New(os.Stderr, \"\", log.LstdFlags|log.Lmicroseconds)\n\t}\n\n\t\/\/ Set log prefix to process ID to distinguish parent\/child\n\tlogger.SetPrefix(fmt.Sprintf(\"[%5d] \", os.Getpid()))\n}\n\n\/\/ panicOnError panics if err is not nil\nfunc panicOnError(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tkingpin.Parse()\n\n\tif !(*allowAll) && len(*allowedCNs) == 0 && len(*allowedOUs) == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: at least one of --allow-all, --allow-cn or --allow-ou is required\")\n\t\tos.Exit(1)\n\t}\n\n\tinitLogger()\n\n\tlisteners := &sync.WaitGroup{}\n\tlisteners.Add(1)\n\n\t\/\/ A channel to notify us that the listener is running.\n\tstarted := make(chan bool, 1)\n\tgo listen(started, listeners)\n\n\tup := <-started\n\tif !up {\n\t\tlogger.Printf(\"failed to start initial listener\")\n\t\tos.Exit(1)\n\t}\n\n\tif *watchFiles {\n\t\tgo watch([]string{*keystorePath})\n\t}\n\n\tlogger.Printf(\"initial startup completed, waiting for connections\")\n\tlisteners.Wait()\n\tlogger.Printf(\"all listeners closed, shutting down\")\n}\n\n\/\/ Open listening socket. Take note that we create a \"reusable port\n\/\/ listener\", meaning we pass SO_REUSEPORT to the kernel. This allows\n\/\/ us to have multiple sockets listening on the same port and accept\n\/\/ connections. This is useful for the purposes of replacing certificates\n\/\/ in-place without having to take downtime, e.g. if a certificate is\n\/\/ expiring.\nfunc listen(started chan bool, listeners *sync.WaitGroup) {\n\t\/\/ Open raw listening socket\n\tnetwork, address := decodeAddress(*listenAddress)\n\trawListener, err := reuseport.NewReusablePortListener(network, address)\n\tif err != nil {\n\t\tlogger.Printf(\"error opening socket: %s\", err)\n\t\tstarted <- false\n\t\treturn\n\t}\n\n\t\/\/ Wrap listening socket with TLS listener.\n\ttlsConfig, err := buildConfig()\n\tif err != nil {\n\t\tlogger.Printf(\"error setting up TLS: %s\", err)\n\t\tstarted <- false\n\t\treturn\n\t}\n\n\tleaf := tlsConfig.Certificates[0].Leaf\n\n\tlistener := tls.NewListener(rawListener, tlsConfig)\n\tlogger.Printf(\"listening on %s\", *listenAddress)\n\tdefer listener.Close()\n\n\thandlers := &sync.WaitGroup{}\n\thandlers.Add(1)\n\n\t\/\/ A channel to allow signal handlers to notify our accept loop that it\n\t\/\/ should shut down.\n\tstopper := make(chan bool, 1)\n\n\tgo accept(listener, handlers, stopper, leaf)\n\tgo signalHandler(listener, stopper, listeners)\n\n\tstarted <- true\n\n\tlogger.Printf(\"listening with cert serial no. %d (expiring %s)\", leaf.SerialNumber, leaf.NotAfter.String())\n\thandlers.Wait()\n\n\tlisteners.Done()\n}\n<commit_msg>Makes --allow-all and --allow-cn\/allow-ou mutually exclusive<commit_after>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"log\/syslog\"\n\t\"os\"\n\t\"runtime\"\n\t\"sync\"\n\n\t\"github.com\/kavu\/go_reuseport\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nvar (\n\t\/\/ Startup flags\n\tlistenAddress = kingpin.Flag(\"listen\", \"Address and port to listen on\").Required().TCP()\n\tforwardAddress = kingpin.Flag(\"target\", \"Address to foward connections to\").Required().TCP()\n\tkeystorePath = kingpin.Flag(\"keystore\", \"Path to certificate and keystore (PKCS12)\").PlaceHolder(\"PATH\").Required().String()\n\tkeystorePass = kingpin.Flag(\"storepass\", \"Password for certificate and keystore\").PlaceHolder(\"PASS\").Required().String()\n\tcaBundlePath = kingpin.Flag(\"cacert\", \"Path to certificate authority bundle file (PEM\/X509)\").Required().String()\n\twatchFiles = kingpin.Flag(\"auto-reload\", \"Watch keystore file with inotify\/fswatch and reload on changes\").Bool()\n\tallowAll = kingpin.Flag(\"allow-all\", \"Allow all clients, do not check client cert subject\").Bool()\n\tallowedCNs = kingpin.Flag(\"allow-cn\", \"Allow clients with given common name (can be repeated)\").PlaceHolder(\"CN\").Strings()\n\tallowedOUs = kingpin.Flag(\"allow-ou\", \"Allow clients with organizational unit name (can be repeated)\").PlaceHolder(\"OU\").Strings()\n\tuseSyslog = kingpin.Flag(\"syslog\", \"Send logs to syslog instead of stderr\").Bool()\n)\n\n\/\/ Global logger instance\nvar logger *log.Logger\n\nfunc initLogger() {\n\tif *useSyslog {\n\t\tvar err error\n\t\tlogger, err = syslog.NewLogger(syslog.LOG_NOTICE|syslog.LOG_DAEMON, log.LstdFlags|log.Lmicroseconds)\n\t\tpanicOnError(err)\n\t} else {\n\t\tlogger = log.New(os.Stderr, \"\", log.LstdFlags|log.Lmicroseconds)\n\t}\n\n\t\/\/ Set log prefix to process ID to distinguish parent\/child\n\tlogger.SetPrefix(fmt.Sprintf(\"[%5d] \", os.Getpid()))\n}\n\n\/\/ panicOnError panics if err is not nil\nfunc panicOnError(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tkingpin.Parse()\n\n\tif !(*allowAll) && len(*allowedCNs) == 0 && len(*allowedOUs) == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: at least one of --allow-all, --allow-cn or --allow-ou is required\")\n\t\tos.Exit(1)\n\t}\n\tif *allowAll && len(*allowedCNs) != 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: --allow-all and --allow-cn are mutually exclusive\")\n\t\tos.Exit(1)\n\t}\n\tif *allowAll && len(*allowedOUs) != 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: --allow-all and --allow-ou are mutually exclusive\")\n\t\tos.Exit(1)\n\t}\n\n\tinitLogger()\n\n\tlisteners := &sync.WaitGroup{}\n\tlisteners.Add(1)\n\n\t\/\/ A channel to notify us that the listener is running.\n\tstarted := make(chan bool, 1)\n\tgo listen(started, listeners)\n\n\tup := <-started\n\tif !up {\n\t\tlogger.Printf(\"failed to start initial listener\")\n\t\tos.Exit(1)\n\t}\n\n\tif *watchFiles {\n\t\tgo watch([]string{*keystorePath})\n\t}\n\n\tlogger.Printf(\"initial startup completed, waiting for connections\")\n\tlisteners.Wait()\n\tlogger.Printf(\"all listeners closed, shutting down\")\n}\n\n\/\/ Open listening socket. Take note that we create a \"reusable port\n\/\/ listener\", meaning we pass SO_REUSEPORT to the kernel. This allows\n\/\/ us to have multiple sockets listening on the same port and accept\n\/\/ connections. This is useful for the purposes of replacing certificates\n\/\/ in-place without having to take downtime, e.g. if a certificate is\n\/\/ expiring.\nfunc listen(started chan bool, listeners *sync.WaitGroup) {\n\t\/\/ Open raw listening socket\n\tnetwork, address := decodeAddress(*listenAddress)\n\trawListener, err := reuseport.NewReusablePortListener(network, address)\n\tif err != nil {\n\t\tlogger.Printf(\"error opening socket: %s\", err)\n\t\tstarted <- false\n\t\treturn\n\t}\n\n\t\/\/ Wrap listening socket with TLS listener.\n\ttlsConfig, err := buildConfig()\n\tif err != nil {\n\t\tlogger.Printf(\"error setting up TLS: %s\", err)\n\t\tstarted <- false\n\t\treturn\n\t}\n\n\tleaf := tlsConfig.Certificates[0].Leaf\n\n\tlistener := tls.NewListener(rawListener, tlsConfig)\n\tlogger.Printf(\"listening on %s\", *listenAddress)\n\tdefer listener.Close()\n\n\thandlers := &sync.WaitGroup{}\n\thandlers.Add(1)\n\n\t\/\/ A channel to allow signal handlers to notify our accept loop that it\n\t\/\/ should shut down.\n\tstopper := make(chan bool, 1)\n\n\tgo accept(listener, handlers, stopper, leaf)\n\tgo signalHandler(listener, stopper, listeners)\n\n\tstarted <- true\n\n\tlogger.Printf(\"listening with cert serial no. %d (expiring %s)\", leaf.SerialNumber, leaf.NotAfter.String())\n\thandlers.Wait()\n\n\tlisteners.Done()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/juju\/ratelimit\"\n)\n\n\/\/ getIPs takes in an haproxy log line and returns the client IP and the CDN\n\/\/ connecting IP which is in the captured request header. For us, the CDN\n\/\/ connecting IP happens to be the 8th captured request header.\nfunc getIPs(logLine string) (*net.IP, *net.IP) {\n\tif logLine == \"\" {\n\t\treturn nil, nil\n\t}\n\t\/\/ This string parsing stuff was lifted from TPS\n\tvar a, b int\n\tif a = strings.Index(logLine, \"]:\") + 3; a == -1 {\n\t\treturn nil, nil\n\t}\n\tif b = strings.Index(logLine[a:], \":\"); b == -1 {\n\t\treturn nil, nil\n\t}\n\tclientIPString := logLine[a : a+b]\n\tclientIP := net.ParseIP(clientIPString)\n\tif clientIP == nil {\n\t\treturn nil, nil\n\t}\n\n\tlogLine = logLine[a+b:]\n\t\/\/ The first curly-braced block contains our request headers\n\tif a = strings.Index(logLine, \"{\"); a == -1 {\n\t\treturn &clientIP, nil\n\t}\n\tif b = strings.Index(logLine[a:], \"}\"); b == -1 {\n\t\treturn &clientIP, nil\n\t}\n\tbracketedHeaders := logLine[a : a+b]\n\theaders := strings.Split(bracketedHeaders, \"|\")\n\tif len(headers) < 7 {\n\t\treturn &clientIP, nil\n\t}\n\tipString := headers[7]\n\tcdnIP := net.ParseIP(ipString)\n\tif cdnIP == nil {\n\t\treturn &clientIP, nil\n\t}\n\treturn &clientIP, &cdnIP\n}\n\nfunc getLogTimestamp(logLine string) int64 {\n\ttimestampStr := strings.Split(logLine, \" \")[6]\n\tts, _ := time.Parse(\"[02\/Jan\/2006:15:04:05.999]\", timestampStr)\n\n\treturn (ts.Unix())\n}\n\ntype ipList struct {\n\tips []net.IP\n\tnets []net.IPNet\n}\n\nfunc readIPList(filename string) *ipList {\n\tvar list ipList\n\tf, _ := os.Open(filename)\n\tscanner := bufio.NewScanner(f)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tif strings.Index(strings.TrimSpace(line), \"#\") == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.TrimSpace(line) == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(line, \"\/\") {\n\t\t\t_, n, err := net.ParseCIDR(line)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Unable to parse CIDR.\\nLine:\\n%s\\nError:\\n%s\\n\", line, err)\n\t\t\t}\n\t\t\tif n != nil {\n\t\t\t\tlist.nets = append(list.nets, *n)\n\t\t\t}\n\t\t} else {\n\t\t\tip := net.ParseIP(line)\n\t\t\tif ip != nil {\n\t\t\t\tlist.ips = append(list.ips, ip)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Unable to parse IP address in list: %s\\n\", line)\n\t\t\t}\n\t\t}\n\n\t}\n\treturn &list\n}\n\nfunc (l *ipList) contains(checkIP *net.IP) bool {\n\tfor _, ip := range l.ips {\n\t\tif ip.Equal(*checkIP) {\n\t\t\treturn true\n\t\t}\n\t}\n\tfor _, net := range l.nets {\n\t\tif net.Contains(*checkIP) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc main() {\n\tvar hits map[string]*ratelimit.Bucket\n\thits = make(map[string]*ratelimit.Bucket)\n\t\/\/\tfor line := range t.Lines {\n\tscanner := bufio.NewScanner(os.Stdin)\n\twhitelist := readIPList(\"\/etc\/haproxy-shared\/whitelist-ips\")\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tclientIP, cdnIP := getIPs(line)\n\t\tif cdnIP == nil || clientIP == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif _, found := hits[cdnIP.String()]; !found {\n\t\t\thits[cdnIP.String()] = ratelimit.NewBucket(time.Duration(3)*time.Minute, 180)\n\t\t}\n\t\t_, isSoonerThanMaxWait := hits[cdnIP.String()].TakeMaxDuration(1, 0)\n\t\tif !isSoonerThanMaxWait {\n\t\t\tif whitelist.contains(cdnIP) {\n\t\t\t\t\/\/fmt.Println(\"but is whitelisted so we don't care.\")\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Over limit: %s\\n\", cdnIP.String())\n\t\t\t}\n\t\t}\n\n\t}\n}\n<commit_msg>Add a syslog server.<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/juju\/ratelimit\"\n\t\"gopkg.in\/mcuadros\/go-syslog.v2\"\n)\n\n\/\/ getIPs takes in an haproxy log line and returns the client IP and the CDN\n\/\/ connecting IP which is in the captured request header. For us, the CDN\n\/\/ connecting IP happens to be the 8th captured request header.\nfunc getIPs(logLine string) (*net.IP, *net.IP) {\n\tif logLine == \"\" {\n\t\treturn nil, nil\n\t}\n\t\/\/ This string parsing stuff was lifted from TPS\n\tvar a, b int\n\tif a = strings.Index(logLine, \"]:\") + 3; a == -1 {\n\t\treturn nil, nil\n\t}\n\tif b = strings.Index(logLine[a:], \":\"); b == -1 {\n\t\treturn nil, nil\n\t}\n\tclientIPString := logLine[a : a+b]\n\tclientIP := net.ParseIP(clientIPString)\n\tif clientIP == nil {\n\t\treturn nil, nil\n\t}\n\n\tlogLine = logLine[a+b:]\n\t\/\/ The first curly-braced block contains our request headers\n\tif a = strings.Index(logLine, \"{\"); a == -1 {\n\t\treturn &clientIP, nil\n\t}\n\tif b = strings.Index(logLine[a:], \"}\"); b == -1 {\n\t\treturn &clientIP, nil\n\t}\n\tbracketedHeaders := logLine[a : a+b]\n\theaders := strings.Split(bracketedHeaders, \"|\")\n\tif len(headers) < 7 {\n\t\treturn &clientIP, nil\n\t}\n\tipString := headers[7]\n\tcdnIP := net.ParseIP(ipString)\n\tif cdnIP == nil {\n\t\treturn &clientIP, nil\n\t}\n\treturn &clientIP, &cdnIP\n}\n\nfunc getLogTimestamp(logLine string) int64 {\n\ttimestampStr := strings.Split(logLine, \" \")[6]\n\tts, _ := time.Parse(\"[02\/Jan\/2006:15:04:05.999]\", timestampStr)\n\n\treturn (ts.Unix())\n}\n\ntype ipList struct {\n\tips []net.IP\n\tnets []net.IPNet\n}\n\nfunc readIPList(filename string) *ipList {\n\tvar list ipList\n\tf, _ := os.Open(filename)\n\tscanner := bufio.NewScanner(f)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tif strings.Index(strings.TrimSpace(line), \"#\") == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.TrimSpace(line) == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(line, \"\/\") {\n\t\t\t_, n, err := net.ParseCIDR(line)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Unable to parse CIDR.\\nLine:\\n%s\\nError:\\n%s\\n\", line, err)\n\t\t\t}\n\t\t\tif n != nil {\n\t\t\t\tlist.nets = append(list.nets, *n)\n\t\t\t}\n\t\t} else {\n\t\t\tip := net.ParseIP(line)\n\t\t\tif ip != nil {\n\t\t\t\tlist.ips = append(list.ips, ip)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Unable to parse IP address in list: %s\\n\", line)\n\t\t\t}\n\t\t}\n\n\t}\n\treturn &list\n}\n\nfunc (l *ipList) contains(checkIP *net.IP) bool {\n\tfor _, ip := range l.ips {\n\t\tif ip.Equal(*checkIP) {\n\t\t\treturn true\n\t\t}\n\t}\n\tfor _, net := range l.nets {\n\t\tif net.Contains(*checkIP) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc main() {\n\tvar hits map[string]*ratelimit.Bucket\n\thits = make(map[string]*ratelimit.Bucket)\n\n\tchannel := make(syslog.LogPartsChannel)\n\thandler := syslog.NewChannelHandler(channel)\n\n\tserver := syslog.NewServer()\n\tserver.SetFormat(syslog.RFC3164)\n\tserver.SetHandler(handler)\n\tif err := server.ListenUDP(\"0.0.0.0:514\"); err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tif err := server.Boot(); err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\twhitelist := readIPList(\"\/etc\/haproxy-shared\/whitelist-ips\")\n\n\tgo func(channel syslog.LogPartsChannel) {\n\t\tfor logParts := range channel {\n\t\t\tvar line string\n\t\t\tvar ok bool\n\t\t\tif line, ok = logParts[\"content\"].(string); !ok || line == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tclientIP, cdnIP := getIPs(line)\n\t\t\tif cdnIP == nil || clientIP == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, found := hits[cdnIP.String()]; !found {\n\t\t\t\thits[cdnIP.String()] = ratelimit.NewBucket(time.Duration(3)*time.Minute, 180)\n\t\t\t}\n\t\t\t_, isSoonerThanMaxWait := hits[cdnIP.String()].TakeMaxDuration(1, 0)\n\t\t\tif !isSoonerThanMaxWait {\n\t\t\t\tif whitelist.contains(cdnIP) {\n\t\t\t\t\t\/\/fmt.Println(\"but is whitelisted so we don't care.\")\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Printf(\"Over limit: %s\\n\", cdnIP.String())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}(channel)\n\n\tserver.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"github.com\/ogier\/pflag\"\n\t\"github.com\/yuya-takeyama\/argf\"\n)\n\nvar (\n\tname = \"rl\"\n\tversion = \"0.3.1\"\n\n\tflagset = pflag.NewFlagSet(name, pflag.ContinueOnError)\n\tdelimiter = flagset.StringP(\"delimiter\", \"d\", \"\", \"\")\n\tisHelp = flagset.BoolP(\"help\", \"h\", false, \"\")\n\tisVersion = flagset.BoolP(\"version\", \"v\", false, \"\")\n)\n\nfunc printUsage() {\n\tfmt.Fprintf(os.Stderr, `\nUsage: %s [OPTION]... [FILE]...\nReverse lines of FILE(s), or standard input.\n\nOptions:\n -d, --delimiter=DELIM delimit lines by DELIM\n -h, --help display this help text and exit\n -v, --version output version information and exit\n`[1:], name)\n}\n\nfunc printVersion() {\n\tfmt.Fprintln(os.Stderr, version)\n}\n\nfunc printErr(err error) {\n\tfmt.Fprintf(os.Stderr, \"%s: %s\\n\", name, err)\n}\n\nfunc guideToHelp() {\n\tfmt.Fprintf(os.Stderr, \"Try '%s --help' for more information.\\n\", name)\n}\n\nfunc do(rev *Reverser, r io.Reader) error {\n\tb := bufio.NewScanner(r)\n\tfor b.Scan() {\n\t\tfmt.Println(rev.Reverse(b.Text()))\n\t}\n\treturn b.Err()\n}\n\nfunc _main() int {\n\tflagset.SetOutput(ioutil.Discard)\n\tif err := flagset.Parse(os.Args[1:]); err != nil {\n\t\tprintErr(err)\n\t\tguideToHelp()\n\t\treturn 2\n\t}\n\tif *isHelp {\n\t\tprintUsage()\n\t\treturn 0\n\t}\n\tif *isVersion {\n\t\tprintVersion()\n\t\treturn 0\n\t}\n\n\tr, err := argf.From(flagset.Args())\n\tif err != nil {\n\t\tprintErr(err)\n\t\treturn 2\n\t}\n\trev := NewReverser()\n\trev.SetDelimiter(*delimiter)\n\tif err = do(rev, r); err != nil {\n\t\tprintErr(err)\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc main() {\n\te := _main()\n\tos.Exit(e)\n}\n<commit_msg>s\/&main.name\/&main.cmdName\/<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"github.com\/ogier\/pflag\"\n\t\"github.com\/yuya-takeyama\/argf\"\n)\n\nvar (\n\tcmdName = \"rl\"\n\tversion = \"0.3.1\"\n\n\tflagset = pflag.NewFlagSet(cmdName, pflag.ContinueOnError)\n\tdelimiter = flagset.StringP(\"delimiter\", \"d\", \"\", \"\")\n\tisHelp = flagset.BoolP(\"help\", \"h\", false, \"\")\n\tisVersion = flagset.BoolP(\"version\", \"v\", false, \"\")\n)\n\nfunc printUsage() {\n\tfmt.Fprintf(os.Stderr, `\nUsage: %s [OPTION]... [FILE]...\nReverse lines of FILE(s), or standard input.\n\nOptions:\n -d, --delimiter=DELIM delimit lines by DELIM\n -h, --help display this help text and exit\n -v, --version output version information and exit\n`[1:], cmdName)\n}\n\nfunc printVersion() {\n\tfmt.Fprintln(os.Stderr, version)\n}\n\nfunc printErr(err error) {\n\tfmt.Fprintf(os.Stderr, \"%s: %s\\n\", cmdName, err)\n}\n\nfunc guideToHelp() {\n\tfmt.Fprintf(os.Stderr, \"Try '%s --help' for more information.\\n\", cmdName)\n}\n\nfunc do(rev *Reverser, r io.Reader) error {\n\tb := bufio.NewScanner(r)\n\tfor b.Scan() {\n\t\tfmt.Println(rev.Reverse(b.Text()))\n\t}\n\treturn b.Err()\n}\n\nfunc _main() int {\n\tflagset.SetOutput(ioutil.Discard)\n\tif err := flagset.Parse(os.Args[1:]); err != nil {\n\t\tprintErr(err)\n\t\tguideToHelp()\n\t\treturn 2\n\t}\n\tif *isHelp {\n\t\tprintUsage()\n\t\treturn 0\n\t}\n\tif *isVersion {\n\t\tprintVersion()\n\t\treturn 0\n\t}\n\n\tr, err := argf.From(flagset.Args())\n\tif err != nil {\n\t\tprintErr(err)\n\t\treturn 2\n\t}\n\trev := NewReverser()\n\trev.SetDelimiter(*delimiter)\n\tif err = do(rev, r); err != nil {\n\t\tprintErr(err)\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc main() {\n\te := _main()\n\tos.Exit(e)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"time\"\n\n\t_ \"gnd.la\/admin\"\n\t\"gnd.la\/app\"\n\t\"gnd.la\/apps\/articles\"\n\t\"gnd.la\/apps\/docs\"\n\t_ \"gnd.la\/bootstrap\"\n\t\"gnd.la\/config\"\n\t_ \"gnd.la\/encoding\/codec\/msgpack\"\n\t\"gnd.la\/internal\/project\"\n\t_ \"gnd.la\/orm\/driver\/postgres\"\n\t_ \"gnd.la\/template\/markdown\"\n\t\"gnd.la\/util\/pathutil\"\n\t\/\/\t\"time\"\n)\n\nvar (\n\tSTATIC_FILES_PATH = pathutil.Relative(\"assets\")\n\tApp *app.App\n)\n\nfunc init() {\n\tconfig.MustParse()\n\tApp = app.New()\n\tApp.HandleAssets(\"\/static\/\", STATIC_FILES_PATH)\n\tApp.AddTemplateVars(map[string]interface{}{\n\t\t\"Repo\": \"http:\/\/github.com\/rainycape\/gondola\",\n\t})\n\tApp.SetTrustXHeaders(true)\n\n\t\/\/ gnd.la handler, used by go get, etc...\n\tApp.HandleOptions(\"\/\", gndlaHandler, &app.Options{Host: \"gnd.la\"})\n\n\t\/\/ Site handlers\n\tApp.Handle(\"^\/$\", app.TemplateHandler(\"main.html\", map[string]interface{}{\"Section\": \"home\"}))\n\n\t\/\/ docs app\n\tdocs.DefaultContext.GOROOT = goRoot\n\tdocs.DefaultContext.GOPATH = goPath\n\tApp.Include(\"\/doc\/\", docs.App, \"docs-base.html\")\n\tdocs.Groups = []*docs.Group{\n\t\t{\"Gondola Packages\", []string{\"gnd.la\/\"}},\n\t}\n\t\/\/ Wait 10 seconds so the app starts and go get\n\t\/\/ can retrieve gnd.la, since this same app is\n\t\/\/ serving that content.\n\ttime.AfterFunc(10*time.Second, func() {\n\t\tdocs.StartUpdatingPackages(time.Minute * 10)\n\t})\n\n\t\/\/ articles app, clone it to load it twice: once\n\t\/\/ for the articles and once for the tutorials\n\ttutorialsApp := articles.App.Clone()\n\tif _, err := articles.LoadDir(tutorialsApp, pathutil.Relative(\"tutorials\")); err != nil {\n\t\tpanic(err)\n\t}\n\ttutorialsApp.SetName(\"Tutorials\")\n\tApp.Include(\"\/tutorials\/\", tutorialsApp, \"articles-base.html\")\n\n\tarticlesApp := articles.App.Clone()\n\tif _, err := articles.LoadDir(articlesApp, pathutil.Relative(\"articles\")); err != nil {\n\t\tpanic(err)\n\t}\n\tApp.Include(\"\/articles\/\", articlesApp, \"articles-base.html\")\n\n\t\/\/ API\n\tApp.Handle(\"^\/api\/v1\/templates$\", app.JSONHandler(templateListHandler))\n\tApp.HandleNamed(\"^\/api\/v1\/template\/download\/([\\\\w\\\\-_]+)\\\\-v(\\\\d+)\\\\.tar\\\\.gz$\", templateDownloadHandler, templateDownloadHandlerName)\n\n\t\/\/ Load project templates\n\tvar err error\n\ttemplates, err = project.LoadTemplates(pathutil.Relative(\"templates\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfor _, v := range templates {\n\t\tv.URL = App.MustReverse(templateDownloadHandlerName, v.Name, v.Version)\n\t}\n}\n\nfunc main() {\n\tApp.MustListenAndServe()\n}\n<commit_msg>Update to new APIs<commit_after>package main\n\nimport (\n\t\"time\"\n\n\t\"gnd.la\/app\"\n\t\"gnd.la\/apps\/articles\"\n\t\"gnd.la\/apps\/docs\"\n\t_ \"gnd.la\/bootstrap\"\n\t_ \"gnd.la\/commands\"\n\t\"gnd.la\/config\"\n\t_ \"gnd.la\/encoding\/codec\/msgpack\"\n\t\"gnd.la\/internal\/project\"\n\t_ \"gnd.la\/orm\/driver\/postgres\"\n\t_ \"gnd.la\/template\/markdown\"\n\t\"gnd.la\/util\/pathutil\"\n\t\/\/\t\"time\"\n)\n\nvar (\n\tSTATIC_FILES_PATH = pathutil.Relative(\"assets\")\n\tApp *app.App\n)\n\nfunc init() {\n\tconfig.MustParse()\n\tApp = app.New()\n\tApp.HandleAssets(\"\/static\/\", STATIC_FILES_PATH)\n\tApp.AddTemplateVars(map[string]interface{}{\n\t\t\"Repo\": \"http:\/\/github.com\/rainycape\/gondola\",\n\t})\n\tApp.SetTrustXHeaders(true)\n\n\t\/\/ gnd.la handler, used by go get, etc...\n\tApp.HandleOptions(\"\/\", gndlaHandler, &app.HandlerOptions{Host: \"gnd.la\"})\n\n\t\/\/ Site handlers\n\tApp.Handle(\"^\/$\", app.TemplateHandler(\"main.html\", map[string]interface{}{\"Section\": \"home\"}))\n\n\t\/\/ docs app\n\tdocs.DefaultContext.GOROOT = goRoot\n\tdocs.DefaultContext.GOPATH = goPath\n\tApp.Include(\"\/doc\/\", docs.App, \"docs-base.html\")\n\tdocs.Groups = []*docs.Group{\n\t\t{\"Gondola Packages\", []string{\"gnd.la\/\"}},\n\t}\n\t\/\/ Wait 10 seconds so the app starts and go get\n\t\/\/ can retrieve gnd.la, since this same app is\n\t\/\/ serving that content.\n\ttime.AfterFunc(10*time.Second, func() {\n\t\tdocs.StartUpdatingPackages(time.Minute * 10)\n\t})\n\n\t\/\/ articles app, clone it to load it twice: once\n\t\/\/ for the articles and once for the tutorials\n\ttutorialsApp := articles.App.Clone()\n\tif _, err := articles.LoadDir(tutorialsApp, pathutil.Relative(\"tutorials\")); err != nil {\n\t\tpanic(err)\n\t}\n\ttutorialsApp.SetName(\"Tutorials\")\n\tApp.Include(\"\/tutorials\/\", tutorialsApp, \"articles-base.html\")\n\n\tarticlesApp := articles.App.Clone()\n\tif _, err := articles.LoadDir(articlesApp, pathutil.Relative(\"articles\")); err != nil {\n\t\tpanic(err)\n\t}\n\tApp.Include(\"\/articles\/\", articlesApp, \"articles-base.html\")\n\n\t\/\/ API\n\tApp.Handle(\"^\/api\/v1\/templates$\", app.JSONHandler(templateListHandler))\n\tApp.HandleNamed(\"^\/api\/v1\/template\/download\/([\\\\w\\\\-_]+)\\\\-v(\\\\d+)\\\\.tar\\\\.gz$\", templateDownloadHandler, templateDownloadHandlerName)\n\n\t\/\/ Load project templates\n\tvar err error\n\ttemplates, err = project.LoadTemplates(pathutil.Relative(\"templates\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfor _, v := range templates {\n\t\tv.URL = App.MustReverse(templateDownloadHandlerName, v.Name, v.Version)\n\t}\n}\n\nfunc main() {\n\tApp.MustListenAndServe()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"strconv\"\n\n\t\"github.com\/enlivengo\/admin\"\n\t\"github.com\/enlivengo\/database\"\n\t\"github.com\/enlivengo\/enliven\"\n\t\"github.com\/enlivengo\/enliven\/apps\/static\"\n\t\"github.com\/enlivengo\/enliven\/apps\/statik\"\n\t\"github.com\/enlivengo\/enliven\/config\"\n\t\"github.com\/enlivengo\/enliven\/middleware\/session\"\n\t_ \"github.com\/enlivengo\/example\/statik\"\n\t\"github.com\/enlivengo\/user\"\n)\n\nfunc rootHandler(ctx *enliven.Context) {\n\tval := ctx.Session.Get(\"increments\")\n\n\tvar value int\n\n\tif val == \"\" {\n\t\tvalue = 1\n\t} else {\n\t\tvalue, _ = strconv.Atoi(val)\n\t\tvalue++\n\t}\n\n\tnewVal := strconv.Itoa(value)\n\tctx.Session.Set(\"increments\", newVal)\n\n\tctx.Template(\"home\")\n}\n\n\/\/ Example\/Test usage\nfunc main() {\n\tev := enliven.New(config.Config{\n\t\t\"email_smtp_host\": \"localhost\",\n\t\t\"email_smtp_auth\": \"none\",\n\t\t\"email_from_default\": \"noreply@enliven.app\",\n\n\t\t\"database_driver\": \"postgres\",\n\t\t\"database_host\": \"127.0.0.1\",\n\t\t\"database_user\": \"postgres\",\n\t\t\"database_dbname\": \"enliven\",\n\t\t\"database_password\": \"postgres\",\n\n\t\t\"session_redis_address\": \"127.0.0.1:6379\",\n\n\t\t\"assets_static_route\": \"\/assets\/\",\n\t\t\"assets_static_path\": \".\/static\/\",\n\n\t\t\"assets_statik_route\": \"\/statik\/\",\n\t})\n\n\t\/\/ Adding session management middleware\n\tev.AddMiddleware(session.NewRedisStorageMiddleware())\n\t\/\/ev.AddMiddleware(session.NewFileStorageMiddleware())\n\t\/\/ev.AddMiddleware(session.NewMemoryStorageMiddleware())\n\n\t\/\/ Serving static assets from the .\/static\/ folder at the \/assets\/ route\n\tev.AddApp(static.NewApp())\n\n\t\/\/ The statik import at the top of this file sets up the data that will be used by the statik filesystem.\n\t\/\/ Read Statik documentation\n\tev.AddApp(statik.NewApp())\n\n\t\/\/ DATABASE The database app allows you to use....a database\n\tev.AddApp(database.NewApp())\n\n\t\/\/ USER The user app manages the user model\/login\/session\/middleware\n\tev.AddApp(user.NewApp())\n\n\t\/\/ ADMIN The admin app manages the admin panel\n\tev.AddApp(admin.NewApp())\n\n\t\/\/ Simple route handler\n\tev.AddRoute(\"\/\", rootHandler)\n\n\t\/\/ This is a commented-out example of how you can override the existing header\/footer templates.\n\t\/\/ You will most likely want to do this as the built-in one is not meant for general consumption\n\t\/\/ as it has embedded images(base64 encoded)\/css\/javascript.\n\t\/*\n\t\ttemplates := ev.GetTemplates()\n\t\ttemplates.Parse(\"{{define \\\"header\\\"}}OMG Becky did you see her butt?{{end}}\")\n\t\ttemplates.Parse(\"{{define \\\"footer\\\"}}OMG it's so big.{{end}}\")\n\t*\/\n\n\tev.Run()\n}\n<commit_msg>Updating app for new static app location<commit_after>package main\n\nimport (\n\t\"strconv\"\n\n\t\"github.com\/enlivengo\/admin\"\n\t\"github.com\/enlivengo\/database\"\n\t\"github.com\/enlivengo\/enliven\"\n\t\"github.com\/enlivengo\/enliven\/apps\/statik\"\n\t\"github.com\/enlivengo\/enliven\/config\"\n\t\"github.com\/enlivengo\/enliven\/middleware\/session\"\n\t_ \"github.com\/enlivengo\/example\/statik\"\n\t\"github.com\/enlivengo\/static\"\n\t\"github.com\/enlivengo\/user\"\n)\n\nfunc rootHandler(ctx *enliven.Context) {\n\tval := ctx.Session.Get(\"increments\")\n\n\tvar value int\n\n\tif val == \"\" {\n\t\tvalue = 1\n\t} else {\n\t\tvalue, _ = strconv.Atoi(val)\n\t\tvalue++\n\t}\n\n\tnewVal := strconv.Itoa(value)\n\tctx.Session.Set(\"increments\", newVal)\n\n\tctx.Template(\"home\")\n}\n\n\/\/ Example\/Test usage\nfunc main() {\n\tev := enliven.New(config.Config{\n\t\t\"email_smtp_host\": \"localhost\",\n\t\t\"email_smtp_auth\": \"none\",\n\t\t\"email_from_default\": \"noreply@enliven.app\",\n\n\t\t\"database_driver\": \"postgres\",\n\t\t\"database_host\": \"127.0.0.1\",\n\t\t\"database_user\": \"postgres\",\n\t\t\"database_dbname\": \"enliven\",\n\t\t\"database_password\": \"postgres\",\n\n\t\t\"session_redis_address\": \"127.0.0.1:6379\",\n\n\t\t\"assets_static_route\": \"\/assets\/\",\n\t\t\"assets_static_path\": \".\/static\/\",\n\n\t\t\"assets_statik_route\": \"\/statik\/\",\n\t})\n\n\t\/\/ Adding session management middleware\n\tev.AddMiddleware(session.NewRedisStorageMiddleware())\n\t\/\/ev.AddMiddleware(session.NewFileStorageMiddleware())\n\t\/\/ev.AddMiddleware(session.NewMemoryStorageMiddleware())\n\n\t\/\/ Serving static assets from the .\/static\/ folder at the \/assets\/ route\n\tev.AddApp(static.NewApp())\n\n\t\/\/ The statik import at the top of this file sets up the data that will be used by the statik filesystem.\n\t\/\/ Read Statik documentation\n\tev.AddApp(statik.NewApp())\n\n\t\/\/ DATABASE The database app allows you to use....a database\n\tev.AddApp(database.NewApp())\n\n\t\/\/ USER The user app manages the user model\/login\/session\/middleware\n\tev.AddApp(user.NewApp())\n\n\t\/\/ ADMIN The admin app manages the admin panel\n\tev.AddApp(admin.NewApp())\n\n\t\/\/ Simple route handler\n\tev.AddRoute(\"\/\", rootHandler)\n\n\t\/\/ This is a commented-out example of how you can override the existing header\/footer templates.\n\t\/\/ You will most likely want to do this as the built-in one is not meant for general consumption\n\t\/\/ as it has embedded images(base64 encoded)\/css\/javascript.\n\t\/*\n\t\ttemplates := ev.GetTemplates()\n\t\ttemplates.Parse(\"{{define \\\"header\\\"}}OMG Becky did you see her butt?{{end}}\")\n\t\ttemplates.Parse(\"{{define \\\"footer\\\"}}OMG it's so big.{{end}}\")\n\t*\/\n\n\tev.Run()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/marianogappa\/raszagal\/analyzer\"\n)\n\nfunc main() {\n\tvar (\n\t\tboolFlags = map[string]*bool{}\n\t\tstringFlags = map[string]*string{}\n\t\tfReplay = flag.String(\"replay\", \"\", \"(>= 1 replays required) path to replay file\")\n\t\tfReplays = flag.String(\"replays\", \"\", \"(>= 1 replays required) comma-separated paths to replay files\")\n\t\tfReplayDir = flag.String(\"replay-dir\", \"\", \"(>= 1 replays required) path to folder with replays (recursive)\")\n\t\tfMe = flag.String(\"me\", \"\", \"comma-separated list of player names to identify as the main player\")\n\t\tfJSON = flag.Bool(\"json\", false, \"outputs a JSON instead of the default CSV\")\n\t\tfCopyToIfMatchesFilters = flag.String(\"copy-to-if-matches-filters\", \"\",\n\t\t\t\"copy replay files matched by -filter-- and not matched by -filter--not-- filters to specified directory\")\n\t\tfQuiet = flag.Bool(\"quiet\", false, \"don't print any errors (discouraged: note that you can silence with 2>\/dev\/null).\")\n\t\tfHelp = flag.Bool(\"help\", false, \"Returns help usage and exits.\")\n\t)\n\tfor name, a := range analyzer.Analyzers {\n\t\tif a.IsStringFlag() {\n\t\t\tstringFlags[name] = flag.String(name, \"\", a.Description())\n\t\t\tif a.IsBooleanResult() {\n\t\t\t\tstringFlags[\"filter--\"+name] = flag.String(\"filter--\"+name, \"\", \"Filter for: \"+a.Description())\n\t\t\t\tstringFlags[\"filter-not--\"+name] = flag.String(\"filter-not--\"+name, \"\", \"Filter-Not for: \"+a.Description())\n\t\t\t}\n\t\t} else {\n\t\t\tboolFlags[name] = flag.Bool(name, false, a.Description())\n\t\t\tif a.IsBooleanResult() {\n\t\t\t\tboolFlags[\"filter--\"+name] = flag.Bool(\"filter--\"+name, false, \"Filter for: \"+a.Description())\n\t\t\t\tboolFlags[\"filter-not--\"+name] = flag.Bool(\"filter-not--\"+name, false, \"Filter-Not for: \"+a.Description())\n\t\t\t}\n\t\t}\n\t}\n\tflag.Parse()\n\tif *fHelp {\n\t\tflag.Usage()\n\t\tos.Exit(0)\n\t}\n\n\tctx := analyzer.Context{Me: map[string]struct{}{}}\n\tif fMe != nil && len(*fMe) > 0 {\n\t\tfor _, name := range strings.Split(*fMe, \",\") {\n\t\t\tctx.Me[strings.TrimSpace(name)] = struct{}{}\n\t\t}\n\t}\n\tvar output analyzer.Output = analyzer.NewCSVOutput(os.Stdout)\n\tif *fJSON {\n\t\toutput = analyzer.NewJSONOutput(os.Stdout)\n\t}\n\treplayPaths := resolveReplayPaths(*fReplay, *fReplays, *fReplayDir)\n\n\tanalyzerRequests := [][]string{}\n\tfor name, f := range boolFlags {\n\t\tif *f {\n\t\t\tanalyzerRequests = append(analyzerRequests, []string{name})\n\t\t}\n\t}\n\tfor name, f := range stringFlags {\n\t\tif *f != \"\" {\n\t\t\ts := []string{name}\n\t\t\ts = append(s, unmarshalArguments(*f)...)\n\t\t\tanalyzerRequests = append(analyzerRequests, s)\n\t\t}\n\t}\n\n\texecutor, errs := analyzer.NewExecutor(replayPaths, analyzerRequests, ctx, output, *fCopyToIfMatchesFilters)\n\tif len(errs) > 0 {\n\t\tif !*fQuiet {\n\t\t\tlog.Println(\"Errors encountered while preparing to execute analyzers:\")\n\t\t\tlog.Println()\n\t\t\tfor _, err := range errs {\n\t\t\t\tlog.Println(err.Error())\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\terrs = executor.Execute()\n\tif len(errs) > 0 {\n\t\tif !*fQuiet {\n\t\t\tlog.Println(\"Errors encountered while executing analyzers:\")\n\t\t\tlog.Println()\n\t\t\tfor _, err := range errs {\n\t\t\t\tlog.Println(err.Error())\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n}\n\nfunc resolveReplayPaths(fReplay, fReplays, fReplayDir string) []string {\n\tvar replays = map[string]struct{}{}\n\tfReplay = strings.TrimSpace(fReplay)\n\tif len(fReplay) >= 5 && (fReplay)[len(fReplay)-4:] == \".rep\" {\n\t\treplays[fReplay] = struct{}{}\n\t}\n\tif fReplays != \"\" {\n\t\tfor _, r := range strings.Split(fReplays, \",\") {\n\t\t\tr = strings.TrimSpace(r)\n\t\t\tif len(r) >= 5 && r[len(r)-4:] == \".rep\" {\n\t\t\t\treplays[r] = struct{}{}\n\t\t\t}\n\t\t}\n\t}\n\tif fReplayDir != \"\" {\n\t\te := filepath.Walk(fReplayDir, func(path string, info os.FileInfo, err error) error {\n\t\t\tif err == nil && len(info.Name()) >= 5 && info.Name()[len(info.Name())-4:] == \".rep\" {\n\t\t\t\tr := path\n\t\t\t\treplays[r] = struct{}{}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif e != nil {\n\t\t\tlog.Fatal(e)\n\t\t}\n\t}\n\tvar res = []string{}\n\tfor replay := range replays {\n\t\tres = append(res, replay)\n\t}\n\treturn res\n}\n\nfunc unmarshalArguments(s string) []string {\n\tss := []string{}\n\tfor _, _si := range strings.Split(s, \",\") {\n\t\tsi := strings.TrimSpace(_si)\n\t\tif si != \"\" {\n\t\t\tss = append(ss, si)\n\t\t}\n\t}\n\treturn ss\n}\n<commit_msg>Renames tool to sctool.<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/marianogappa\/sctool\/analyzer\"\n)\n\nfunc main() {\n\tvar (\n\t\tboolFlags = map[string]*bool{}\n\t\tstringFlags = map[string]*string{}\n\t\tfReplay = flag.String(\"replay\", \"\", \"(>= 1 replays required) path to replay file\")\n\t\tfReplays = flag.String(\"replays\", \"\", \"(>= 1 replays required) comma-separated paths to replay files\")\n\t\tfReplayDir = flag.String(\"replay-dir\", \"\", \"(>= 1 replays required) path to folder with replays (recursive)\")\n\t\tfMe = flag.String(\"me\", \"\", \"comma-separated list of player names to identify as the main player\")\n\t\tfJSON = flag.Bool(\"json\", false, \"outputs a JSON instead of the default CSV\")\n\t\tfCopyToIfMatchesFilters = flag.String(\"copy-to-if-matches-filters\", \"\",\n\t\t\t\"copy replay files matched by -filter-- and not matched by -filter--not-- filters to specified directory\")\n\t\tfQuiet = flag.Bool(\"quiet\", false, \"don't print any errors (discouraged: note that you can silence with 2>\/dev\/null).\")\n\t\tfHelp = flag.Bool(\"help\", false, \"Returns help usage and exits.\")\n\t)\n\tfor name, a := range analyzer.Analyzers {\n\t\tif a.IsStringFlag() {\n\t\t\tstringFlags[name] = flag.String(name, \"\", a.Description())\n\t\t\tif a.IsBooleanResult() {\n\t\t\t\tstringFlags[\"filter--\"+name] = flag.String(\"filter--\"+name, \"\", \"Filter for: \"+a.Description())\n\t\t\t\tstringFlags[\"filter-not--\"+name] = flag.String(\"filter-not--\"+name, \"\", \"Filter-Not for: \"+a.Description())\n\t\t\t}\n\t\t} else {\n\t\t\tboolFlags[name] = flag.Bool(name, false, a.Description())\n\t\t\tif a.IsBooleanResult() {\n\t\t\t\tboolFlags[\"filter--\"+name] = flag.Bool(\"filter--\"+name, false, \"Filter for: \"+a.Description())\n\t\t\t\tboolFlags[\"filter-not--\"+name] = flag.Bool(\"filter-not--\"+name, false, \"Filter-Not for: \"+a.Description())\n\t\t\t}\n\t\t}\n\t}\n\tflag.Parse()\n\tif *fHelp {\n\t\tflag.Usage()\n\t\tos.Exit(0)\n\t}\n\n\tctx := analyzer.Context{Me: map[string]struct{}{}}\n\tif fMe != nil && len(*fMe) > 0 {\n\t\tfor _, name := range strings.Split(*fMe, \",\") {\n\t\t\tctx.Me[strings.TrimSpace(name)] = struct{}{}\n\t\t}\n\t}\n\tvar output analyzer.Output = analyzer.NewCSVOutput(os.Stdout)\n\tif *fJSON {\n\t\toutput = analyzer.NewJSONOutput(os.Stdout)\n\t}\n\treplayPaths := resolveReplayPaths(*fReplay, *fReplays, *fReplayDir)\n\n\tanalyzerRequests := [][]string{}\n\tfor name, f := range boolFlags {\n\t\tif *f {\n\t\t\tanalyzerRequests = append(analyzerRequests, []string{name})\n\t\t}\n\t}\n\tfor name, f := range stringFlags {\n\t\tif *f != \"\" {\n\t\t\ts := []string{name}\n\t\t\ts = append(s, unmarshalArguments(*f)...)\n\t\t\tanalyzerRequests = append(analyzerRequests, s)\n\t\t}\n\t}\n\n\texecutor, errs := analyzer.NewExecutor(replayPaths, analyzerRequests, ctx, output, *fCopyToIfMatchesFilters)\n\tif len(errs) > 0 {\n\t\tif !*fQuiet {\n\t\t\tlog.Println(\"Errors encountered while preparing to execute analyzers:\")\n\t\t\tlog.Println()\n\t\t\tfor _, err := range errs {\n\t\t\t\tlog.Println(err.Error())\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\terrs = executor.Execute()\n\tif len(errs) > 0 {\n\t\tif !*fQuiet {\n\t\t\tlog.Println(\"Errors encountered while executing analyzers:\")\n\t\t\tlog.Println()\n\t\t\tfor _, err := range errs {\n\t\t\t\tlog.Println(err.Error())\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n}\n\nfunc resolveReplayPaths(fReplay, fReplays, fReplayDir string) []string {\n\tvar replays = map[string]struct{}{}\n\tfReplay = strings.TrimSpace(fReplay)\n\tif len(fReplay) >= 5 && (fReplay)[len(fReplay)-4:] == \".rep\" {\n\t\treplays[fReplay] = struct{}{}\n\t}\n\tif fReplays != \"\" {\n\t\tfor _, r := range strings.Split(fReplays, \",\") {\n\t\t\tr = strings.TrimSpace(r)\n\t\t\tif len(r) >= 5 && r[len(r)-4:] == \".rep\" {\n\t\t\t\treplays[r] = struct{}{}\n\t\t\t}\n\t\t}\n\t}\n\tif fReplayDir != \"\" {\n\t\te := filepath.Walk(fReplayDir, func(path string, info os.FileInfo, err error) error {\n\t\t\tif err == nil && len(info.Name()) >= 5 && info.Name()[len(info.Name())-4:] == \".rep\" {\n\t\t\t\tr := path\n\t\t\t\treplays[r] = struct{}{}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif e != nil {\n\t\t\tlog.Fatal(e)\n\t\t}\n\t}\n\tvar res = []string{}\n\tfor replay := range replays {\n\t\tres = append(res, replay)\n\t}\n\treturn res\n}\n\nfunc unmarshalArguments(s string) []string {\n\tss := []string{}\n\tfor _, _si := range strings.Split(s, \",\") {\n\t\tsi := strings.TrimSpace(_si)\n\t\tif si != \"\" {\n\t\t\tss = append(ss, si)\n\t\t}\n\t}\n\treturn ss\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst VERSION = \"0.2\"\n\ntype Packet struct {\n\tBucket string\n\tValue int\n\tModifier string\n\tSampling float32\n}\n\ntype Percentiles []int\n\nfunc (a *Percentiles) Set(s string) error {\n\ti, err := strconv.Atoi(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*a = append(*a, i)\n\treturn nil\n}\nfunc (a *Percentiles) String() string {\n\treturn fmt.Sprintf(\"%v\", *a)\n}\n\nvar (\n\tserviceAddress = flag.String(\"address\", \":8125\", \"UDP service address\")\n\tgraphiteAddress = flag.String(\"graphite\", \"127.0.0.1:2003\", \"Graphite service address (or - to disable)\")\n\tflushInterval = flag.Int64(\"flush-interval\", 10, \"Flush interval (seconds)\")\n\tshowVersion = flag.Bool(\"version\", false, \"print version string\")\n\tpercentThreshold = Percentiles{}\n)\n\nfunc init() {\n\tflag.Var(&percentThreshold, \"percent-threshold\", \"Threshold percent (may be given multiple times)\")\n}\n\nvar (\n\tIn = make(chan *Packet, 1000)\n\tcounters = make(map[string]int)\n\tgauges = make(map[string]int)\n\ttimers = make(map[string][]int)\n)\n\nfunc monitor() {\n\tticker := time.NewTicker(time.Duration(*flushInterval) * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tsubmit()\n\t\tcase s := <-In:\n\t\t\tif s.Modifier == \"ms\" {\n\t\t\t\t_, ok := timers[s.Bucket]\n\t\t\t\tif !ok {\n\t\t\t\t\tvar t []int\n\t\t\t\t\ttimers[s.Bucket] = t\n\t\t\t\t}\n\t\t\t\ttimers[s.Bucket] = append(timers[s.Bucket], s.Value)\n\t\t\t} else if s.Modifier == \"g\" {\n\t\t\t\tgauges[s.Bucket] = int(s.Value)\n\t\t\t} else {\n\t\t\t\t_, ok := counters[s.Bucket]\n\t\t\t\tif !ok {\n\t\t\t\t\tcounters[s.Bucket] = 0\n\t\t\t\t}\n\t\t\t\tcounters[s.Bucket] += int(float32(s.Value) * (1 \/ s.Sampling))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc submit() {\n\tclient, err := net.Dial(\"tcp\", *graphiteAddress)\n\tif err != nil {\n\t\tlog.Printf(\"Error dialing\", err.Error())\n\t\treturn\n\t}\n\tdefer client.Close()\n\n\tnumStats := 0\n\tnow := time.Now().Unix()\n\tbuffer := bytes.NewBufferString(\"\")\n\tfor s, c := range counters {\n\t\tif c == -1 {\n\t\t\tcontinue\n\t\t}\n\t\tvaluePerSecond := int64(c) \/ *flushInterval\n\t\tfmt.Fprintf(buffer, \"stats.counts.%s.rate %d %d\\n\", s, valuePerSecond, now)\n\t\tfmt.Fprintf(buffer, \"stats.counts.%s %d %d\\n\", s, c, now)\n\t\tcounters[s] = -1\n\t\tnumStats++\n\t}\n\n\tfor g, c := range gauges {\n\t\tif c == -1 {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(buffer, \"stats.gauges.%s %d %d\\n\", g, c, now)\n\t\tgauges[g] = -1\n\t\tnumStats++\n\t}\n\n\tfor u, t := range timers {\n\t\tif len(t) > 0 {\n\t\t\tnumStats++\n\t\t\tsort.Ints(t)\n\t\t\tmin := t[0]\n\t\t\tmax := t[len(t)-1]\n\t\t\tmean := min\n\t\t\tmaxAtThreshold := max\n\t\t\tcount := len(t)\n\t\t\tsum := max\n\n\t\t\tcumulativeValues := make([]int, len(t))\n\t\t\tlast := 0\n\t\t\tfor i, tt := range t {\n\t\t\t\tif i == 0 {\n\t\t\t\t\tcumulativeValues[i] = tt\n\t\t\t\t} else {\n\t\t\t\t\tcumulativeValues[i] = last + tt\n\t\t\t\t}\n\t\t\t\tlast = tt\n\t\t\t}\n\n\t\t\tfor _, pct := range percentThreshold {\n\n\t\t\t\tif len(t) > 1 {\n\t\t\t\t\tvar thresholdIndex int\n\t\t\t\t\tthresholdIndex = ((100 - pct) \/ 100) * count\n\t\t\t\t\tnumInThreshold := count - thresholdIndex\n\t\t\t\t\tmaxAtThreshold = t[numInThreshold-1]\n\t\t\t\t\tsum = cumulativeValues[numInThreshold-1]\n\t\t\t\t\tmean = sum \/ numInThreshold\n\t\t\t\t}\n\n\t\t\t\tfmt.Fprintf(buffer, \"stats.timers.%s.mean_%d %d %d\\n\", u, pct, mean, now)\n\t\t\t\tfmt.Fprintf(buffer, \"stats.timers.%s.upper_%d %d %d\\n\", u, pct, maxAtThreshold, now)\n\t\t\t\tfmt.Fprintf(buffer, \"stats.timers.%s.sum_%d %d %d\\n\", u, pct, sum, now)\n\t\t\t}\n\n\t\t\tsum = cumulativeValues[len(t)-1]\n\n\t\t\tvar z []int\n\t\t\ttimers[u] = z\n\n\t\t\tfmt.Fprintf(buffer, \"stats.timers.%s.mean %d %d\\n\", u, mean, now)\n\t\t\tfmt.Fprintf(buffer, \"stats.timers.%s.upper %d %d\\n\", u, max, now)\n\t\t\tfmt.Fprintf(buffer, \"stats.timers.%s.lower %d %d\\n\", u, min, now)\n\t\t\tfmt.Fprintf(buffer, \"stats.timers.%s.sum %d %d\\n\", u, sum, now)\n\t\t\tfmt.Fprintf(buffer, \"stats.timers.%s.count %d %d\\n\", u, count, now)\n\t\t}\n\t}\n\tif numStats == 0 {\n\t\treturn\n\t}\n\tlog.Printf(\"got %d stats\", numStats)\n\tfmt.Fprintf(buffer, \"statsd.numStats %d %d\\n\", numStats, now)\n\tdata := buffer.Bytes()\n\tclient.Write(data)\n\n}\n\nfunc parseMessage(buf *bytes.Buffer) []*Packet {\n\tvar sanitizeRegexp = regexp.MustCompile(\"[^a-zA-Z0-9\\\\-_\\\\.:\\\\|@]\")\n\tvar packetRegexp = regexp.MustCompile(\"([a-zA-Z0-9_]+):([0-9]+)\\\\|(g|c|ms)(\\\\|@([0-9\\\\.]+))?\")\n\n\ts := sanitizeRegexp.ReplaceAllString(buf.String(), \"\")\n\n\tvar output []*Packet\n\tfor _, item := range packetRegexp.FindAllStringSubmatch(s, -1) {\n\t\tvalue, err := strconv.Atoi(item[2])\n\t\tif err != nil {\n\t\t\t\/\/ todo print out this error\n\t\t\tif item[3] == \"ms\" {\n\t\t\t\tvalue = 0\n\t\t\t} else {\n\t\t\t\tvalue = 1\n\t\t\t}\n\t\t}\n\n\t\tsampleRate, err := strconv.ParseFloat(item[5], 32)\n\t\tif err != nil {\n\t\t\tsampleRate = 1\n\t\t}\n\n\t\tpacket := &Packet{\n\t\t\tBucket: item[1],\n\t\t\tValue: value,\n\t\t\tModifier: item[3],\n\t\t\tSampling: float32(sampleRate),\n\t\t}\n\t\toutput = append(output, packet)\n\t}\n\treturn output\n}\n\nfunc udpListener() {\n\taddress, _ := net.ResolveUDPAddr(\"udp\", *serviceAddress)\n\tlog.Printf(\"Listening on %s\", address)\n\tlistener, err := net.ListenUDP(\"udp\", address)\n\tif err != nil {\n\t\tlog.Fatalf(\"ListenAndServe: %s\", err.Error())\n\t}\n\tdefer listener.Close()\n\tmessage := make([]byte, 512)\n\tfor {\n\t\tn, remaddr, err := listener.ReadFrom(message)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error reading from %v %s\", remaddr, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tbuf := bytes.NewBuffer(message[0:n])\n\t\tpackets := parseMessage(buf)\n\t\tfor _, p := range packets {\n\t\t\tIn <- p\n\t\t}\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tif *showVersion {\n\t\tfmt.Printf(\"gographite v%s\\n\", VERSION)\n\t\treturn\n\t}\n\tgo udpListener()\n\tmonitor()\n}\n<commit_msg>scope with .count; graphite doesn't expose lower level values in the tree<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst VERSION = \"0.2\"\n\ntype Packet struct {\n\tBucket string\n\tValue int\n\tModifier string\n\tSampling float32\n}\n\ntype Percentiles []int\n\nfunc (a *Percentiles) Set(s string) error {\n\ti, err := strconv.Atoi(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*a = append(*a, i)\n\treturn nil\n}\nfunc (a *Percentiles) String() string {\n\treturn fmt.Sprintf(\"%v\", *a)\n}\n\nvar (\n\tserviceAddress = flag.String(\"address\", \":8125\", \"UDP service address\")\n\tgraphiteAddress = flag.String(\"graphite\", \"127.0.0.1:2003\", \"Graphite service address (or - to disable)\")\n\tflushInterval = flag.Int64(\"flush-interval\", 10, \"Flush interval (seconds)\")\n\tshowVersion = flag.Bool(\"version\", false, \"print version string\")\n\tpercentThreshold = Percentiles{}\n)\n\nfunc init() {\n\tflag.Var(&percentThreshold, \"percent-threshold\", \"Threshold percent (may be given multiple times)\")\n}\n\nvar (\n\tIn = make(chan *Packet, 1000)\n\tcounters = make(map[string]int)\n\tgauges = make(map[string]int)\n\ttimers = make(map[string][]int)\n)\n\nfunc monitor() {\n\tticker := time.NewTicker(time.Duration(*flushInterval) * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tsubmit()\n\t\tcase s := <-In:\n\t\t\tif s.Modifier == \"ms\" {\n\t\t\t\t_, ok := timers[s.Bucket]\n\t\t\t\tif !ok {\n\t\t\t\t\tvar t []int\n\t\t\t\t\ttimers[s.Bucket] = t\n\t\t\t\t}\n\t\t\t\ttimers[s.Bucket] = append(timers[s.Bucket], s.Value)\n\t\t\t} else if s.Modifier == \"g\" {\n\t\t\t\tgauges[s.Bucket] = int(s.Value)\n\t\t\t} else {\n\t\t\t\t_, ok := counters[s.Bucket]\n\t\t\t\tif !ok {\n\t\t\t\t\tcounters[s.Bucket] = 0\n\t\t\t\t}\n\t\t\t\tcounters[s.Bucket] += int(float32(s.Value) * (1 \/ s.Sampling))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc submit() {\n\tclient, err := net.Dial(\"tcp\", *graphiteAddress)\n\tif err != nil {\n\t\tlog.Printf(\"Error dialing\", err.Error())\n\t\treturn\n\t}\n\tdefer client.Close()\n\n\tnumStats := 0\n\tnow := time.Now().Unix()\n\tbuffer := bytes.NewBufferString(\"\")\n\tfor s, c := range counters {\n\t\tif c == -1 {\n\t\t\tcontinue\n\t\t}\n\t\tvaluePerSecond := int64(c) \/ *flushInterval\n\t\tfmt.Fprintf(buffer, \"stats.counters.%s.rate %d %d\\n\", s, valuePerSecond, now)\n\t\tfmt.Fprintf(buffer, \"stats.counters.%s.count %d %d\\n\", s, c, now)\n\t\tcounters[s] = -1\n\t\tnumStats++\n\t}\n\n\tfor g, c := range gauges {\n\t\tif c == -1 {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(buffer, \"stats.gauges.%s %d %d\\n\", g, c, now)\n\t\tgauges[g] = -1\n\t\tnumStats++\n\t}\n\n\tfor u, t := range timers {\n\t\tif len(t) > 0 {\n\t\t\tnumStats++\n\t\t\tsort.Ints(t)\n\t\t\tmin := t[0]\n\t\t\tmax := t[len(t)-1]\n\t\t\tmean := min\n\t\t\tmaxAtThreshold := max\n\t\t\tcount := len(t)\n\t\t\tsum := max\n\n\t\t\tcumulativeValues := make([]int, len(t))\n\t\t\tlast := 0\n\t\t\tfor i, tt := range t {\n\t\t\t\tif i == 0 {\n\t\t\t\t\tcumulativeValues[i] = tt\n\t\t\t\t} else {\n\t\t\t\t\tcumulativeValues[i] = last + tt\n\t\t\t\t}\n\t\t\t\tlast = tt\n\t\t\t}\n\n\t\t\tfor _, pct := range percentThreshold {\n\n\t\t\t\tif len(t) > 1 {\n\t\t\t\t\tvar thresholdIndex int\n\t\t\t\t\tthresholdIndex = ((100 - pct) \/ 100) * count\n\t\t\t\t\tnumInThreshold := count - thresholdIndex\n\t\t\t\t\tmaxAtThreshold = t[numInThreshold-1]\n\t\t\t\t\tsum = cumulativeValues[numInThreshold-1]\n\t\t\t\t\tmean = sum \/ numInThreshold\n\t\t\t\t}\n\n\t\t\t\tfmt.Fprintf(buffer, \"stats.timers.%s.mean_%d %d %d\\n\", u, pct, mean, now)\n\t\t\t\tfmt.Fprintf(buffer, \"stats.timers.%s.upper_%d %d %d\\n\", u, pct, maxAtThreshold, now)\n\t\t\t\tfmt.Fprintf(buffer, \"stats.timers.%s.sum_%d %d %d\\n\", u, pct, sum, now)\n\t\t\t}\n\n\t\t\tsum = cumulativeValues[len(t)-1]\n\n\t\t\tvar z []int\n\t\t\ttimers[u] = z\n\n\t\t\tfmt.Fprintf(buffer, \"stats.timers.%s.mean %d %d\\n\", u, mean, now)\n\t\t\tfmt.Fprintf(buffer, \"stats.timers.%s.upper %d %d\\n\", u, max, now)\n\t\t\tfmt.Fprintf(buffer, \"stats.timers.%s.lower %d %d\\n\", u, min, now)\n\t\t\tfmt.Fprintf(buffer, \"stats.timers.%s.sum %d %d\\n\", u, sum, now)\n\t\t\tfmt.Fprintf(buffer, \"stats.timers.%s.count %d %d\\n\", u, count, now)\n\t\t}\n\t}\n\tif numStats == 0 {\n\t\treturn\n\t}\n\tlog.Printf(\"got %d stats\", numStats)\n\tfmt.Fprintf(buffer, \"statsd.numStats %d %d\\n\", numStats, now)\n\tdata := buffer.Bytes()\n\tclient.Write(data)\n\n}\n\nfunc parseMessage(buf *bytes.Buffer) []*Packet {\n\tvar sanitizeRegexp = regexp.MustCompile(\"[^a-zA-Z0-9\\\\-_\\\\.:\\\\|@]\")\n\tvar packetRegexp = regexp.MustCompile(\"([a-zA-Z0-9_]+):([0-9]+)\\\\|(g|c|ms)(\\\\|@([0-9\\\\.]+))?\")\n\n\ts := sanitizeRegexp.ReplaceAllString(buf.String(), \"\")\n\n\tvar output []*Packet\n\tfor _, item := range packetRegexp.FindAllStringSubmatch(s, -1) {\n\t\tvalue, err := strconv.Atoi(item[2])\n\t\tif err != nil {\n\t\t\t\/\/ todo print out this error\n\t\t\tif item[3] == \"ms\" {\n\t\t\t\tvalue = 0\n\t\t\t} else {\n\t\t\t\tvalue = 1\n\t\t\t}\n\t\t}\n\n\t\tsampleRate, err := strconv.ParseFloat(item[5], 32)\n\t\tif err != nil {\n\t\t\tsampleRate = 1\n\t\t}\n\n\t\tpacket := &Packet{\n\t\t\tBucket: item[1],\n\t\t\tValue: value,\n\t\t\tModifier: item[3],\n\t\t\tSampling: float32(sampleRate),\n\t\t}\n\t\toutput = append(output, packet)\n\t}\n\treturn output\n}\n\nfunc udpListener() {\n\taddress, _ := net.ResolveUDPAddr(\"udp\", *serviceAddress)\n\tlog.Printf(\"Listening on %s\", address)\n\tlistener, err := net.ListenUDP(\"udp\", address)\n\tif err != nil {\n\t\tlog.Fatalf(\"ListenAndServe: %s\", err.Error())\n\t}\n\tdefer listener.Close()\n\tmessage := make([]byte, 512)\n\tfor {\n\t\tn, remaddr, err := listener.ReadFrom(message)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error reading from %v %s\", remaddr, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tbuf := bytes.NewBuffer(message[0:n])\n\t\tpackets := parseMessage(buf)\n\t\tfor _, p := range packets {\n\t\t\tIn <- p\n\t\t}\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tif *showVersion {\n\t\tfmt.Printf(\"gographite v%s\\n\", VERSION)\n\t\treturn\n\t}\n\tgo udpListener()\n\tmonitor()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"errors\"\n \"fmt\"\n \"log\"\n \"os\"\n \"os\/exec\"\n \"path\/filepath\"\n \"time\"\n\n \"github.com\/briandowns\/spinner\"\n \"github.com\/fatih\/color\"\n \"github.com\/mitchellh\/go-homedir\"\n \"github.com\/urfave\/cli\"\n)\n\nvar (\n platformHost = \"https:\/\/notable.zurb.com\"\n version = \"0.1.0\"\n\n authPath string\n s = spinner.New(spinner.CharSets[6], 100*time.Millisecond)\n checkNotEmpty = func(input string) error {\n if input == \"\" {\n return errors.New(\"Input should not be empty!\")\n }\n return nil\n }\n)\n\nfunc check(e error) {\n if e != nil {\n panic(e)\n }\n}\n\n\/\/ EnvConfig is the global configuration object\ntype EnvConfig struct {\n AuthToken string `json:\"token\"`\n}\n\nvar envConfig = EnvConfig{}\n\nfunc main() {\n authRoot, err := homedir.Dir()\n if err != nil {\n color.Red(\"Cannot access your home directory to check for authentication.\")\n os.Exit(1)\n }\n authPath = fmt.Sprintf(\"%s\/.notable_auth\", authRoot)\n app := cli.NewApp()\n app.EnableBashCompletion = true\n app.Name = \"notable\"\n app.Usage = \"Interface with Notable (http:\/\/zurb.com\/notable)\"\n app.Version = version\n app.Author = \"Jordan Humphreys (jordan@zurb.com)\"\n app.Copyright = \"ZURB, Inc. 2016 (http:\/\/zurb.com)\"\n\n app.Commands = []cli.Command{\n {\n Name: \"code\",\n Aliases: []string{\"c\"},\n Usage: \"Send site to Notable, local or live!\",\n Flags: []cli.Flag{\n cli.StringFlag{\n Name: \"dest, d\",\n Value: \".\",\n Usage: \"destination\",\n },\n },\n Action: func(c *cli.Context) error {\n loadAndCheckEnv()\n runCode(c)\n return nil\n },\n },\n {\n Name: \"notebook\",\n Aliases: []string{\"c\"},\n Usage: \"get design feedback on your images\",\n Subcommands: []cli.Command{\n {\n Name: \"create\",\n Usage: \"create a new notebook\",\n Action: func(c *cli.Context) error {\n loadAndCheckEnv()\n runNotebook(c)\n return nil\n },\n },\n },\n },\n {\n Name: \"login\",\n Aliases: []string{\"l\"},\n Usage: \"Authenticate the CLI\",\n Action: func(c *cli.Context) error {\n runAuth(c)\n return nil\n },\n },\n {\n Name: \"logout\",\n Aliases: []string{\"lo\"},\n Usage: \"Deauthorize this computer\",\n Action: func(c *cli.Context) error {\n removeAuth()\n return nil\n },\n },\n }\n\n app.Run(os.Args)\n}\n\nfunc loadAndCheckEnv() {\n config, err := readAuth()\n\n if err != nil {\n log.Fatal(err)\n }\n\n envConfig = config\n}\n\nfunc currentPath() string {\n dir, err := filepath.Abs(filepath.Dir(os.Args[0]))\n if err != nil {\n log.Fatal(err)\n }\n return dir\n}\n\nfunc wGetCheck() {\n _, err := exec.LookPath(\"wget\")\n if err != nil {\n color.Red(\"Missing dependency!\\n\")\n color.Red(\"Please install wget using Homebrew or some other fancy way:\\n\")\n color.Green(\"brew up && brew install wget\\n\")\n os.Exit(1)\n }\n}\n<commit_msg>Bump version.<commit_after>package main\n\nimport (\n \"errors\"\n \"fmt\"\n \"log\"\n \"os\"\n \"os\/exec\"\n \"path\/filepath\"\n \"time\"\n\n \"github.com\/briandowns\/spinner\"\n \"github.com\/fatih\/color\"\n \"github.com\/mitchellh\/go-homedir\"\n \"github.com\/urfave\/cli\"\n)\n\nvar (\n platformHost = \"https:\/\/notable.zurb.com\"\n version = \"0.1.1\"\n\n authPath string\n s = spinner.New(spinner.CharSets[6], 100*time.Millisecond)\n checkNotEmpty = func(input string) error {\n if input == \"\" {\n return errors.New(\"Input should not be empty!\")\n }\n return nil\n }\n)\n\nfunc check(e error) {\n if e != nil {\n panic(e)\n }\n}\n\n\/\/ EnvConfig is the global configuration object\ntype EnvConfig struct {\n AuthToken string `json:\"token\"`\n}\n\nvar envConfig = EnvConfig{}\n\nfunc main() {\n authRoot, err := homedir.Dir()\n if err != nil {\n color.Red(\"Cannot access your home directory to check for authentication.\")\n os.Exit(1)\n }\n authPath = fmt.Sprintf(\"%s\/.notable_auth\", authRoot)\n app := cli.NewApp()\n app.EnableBashCompletion = true\n app.Name = \"notable\"\n app.Usage = \"Interface with Notable (http:\/\/zurb.com\/notable)\"\n app.Version = version\n app.Author = \"Jordan Humphreys (jordan@zurb.com)\"\n app.Copyright = \"ZURB, Inc. 2016 (http:\/\/zurb.com)\"\n\n app.Commands = []cli.Command{\n {\n Name: \"code\",\n Aliases: []string{\"c\"},\n Usage: \"Send site to Notable, local or live!\",\n Flags: []cli.Flag{\n cli.StringFlag{\n Name: \"dest, d\",\n Value: \".\",\n Usage: \"destination\",\n },\n },\n Action: func(c *cli.Context) error {\n loadAndCheckEnv()\n runCode(c)\n return nil\n },\n },\n {\n Name: \"notebook\",\n Aliases: []string{\"c\"},\n Usage: \"get design feedback on your images\",\n Subcommands: []cli.Command{\n {\n Name: \"create\",\n Usage: \"create a new notebook\",\n Action: func(c *cli.Context) error {\n loadAndCheckEnv()\n runNotebook(c)\n return nil\n },\n },\n },\n },\n {\n Name: \"login\",\n Aliases: []string{\"l\"},\n Usage: \"Authenticate the CLI\",\n Action: func(c *cli.Context) error {\n runAuth(c)\n return nil\n },\n },\n {\n Name: \"logout\",\n Aliases: []string{\"lo\"},\n Usage: \"Deauthorize this computer\",\n Action: func(c *cli.Context) error {\n removeAuth()\n return nil\n },\n },\n }\n\n app.Run(os.Args)\n}\n\nfunc loadAndCheckEnv() {\n config, err := readAuth()\n\n if err != nil {\n log.Fatal(err)\n }\n\n envConfig = config\n}\n\nfunc currentPath() string {\n dir, err := filepath.Abs(filepath.Dir(os.Args[0]))\n if err != nil {\n log.Fatal(err)\n }\n return dir\n}\n\nfunc wGetCheck() {\n _, err := exec.LookPath(\"wget\")\n if err != nil {\n color.Red(\"Missing dependency!\\n\")\n color.Red(\"Please install wget using Homebrew or some other fancy way:\\n\")\n color.Green(\"brew up && brew install wget\\n\")\n os.Exit(1)\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/go-chi\/chi\"\n\t\"github.com\/gorilla\/handlers\"\n\t\"github.com\/unrolled\/secure\"\n\t\"gopkg.in\/reportportal\/commons-go.v1\/commons\"\n\t\"gopkg.in\/reportportal\/commons-go.v1\/conf\"\n\t\"gopkg.in\/reportportal\/commons-go.v1\/server\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nfunc main() {\n\n\tcurrDir, e := os.Getwd()\n\tif nil != e {\n\t\tlog.Fatalf(\"Cannot get workdir: %s\", e.Error())\n\t}\n\n\trpConf := struct {\n\t\tCfg *conf.RpConfig\n\t\tStaticsPath string `env:\"RP_STATICS_PATH\"`\n\t}{\n\t\tCfg: conf.EmptyConfig(),\n\t\tStaticsPath: currDir,\n\t}\n\n\terr := conf.LoadConfig(&rpConf)\n\tif nil != err {\n\t\tlog.Fatalf(\"Cannot log app config\")\n\t}\n\n\trpConf.Cfg.AppName = \"ui\"\n\n\tinfo := commons.GetBuildInfo()\n\tinfo.Name = \"Service UI\"\n\n\tsrv := server.New(rpConf.Cfg, info)\n\tsrv.WithRouter(func(router *chi.Mux) {\n\n\t\t\/\/apply compression\n\t\trouter.Use(func(next http.Handler) http.Handler {\n\t\t\treturn handlers.CompressHandler(next)\n\t\t})\n\n\t\t\/\/content security policy\n\t\tcsp := map[string][]string{\n\t\t\t\"default-src\": {\"'self'\", \"'unsafe-inline'\"},\n\t\t\t\"script-src\": {\n\t\t\t\t\"'self'\",\n\t\t\t\t\"'unsafe-inline'\",\n\t\t\t\t\"'unsafe-eval'\",\n\t\t\t\t\"status.reportportal.io\",\n\t\t\t\t\"www.google-analytics.com\",\n\t\t\t\t\"stats.g.doubleclick.net\",\n\t\t\t\t\"*.epam.com\",\n\t\t\t\t\"*.uservoice.com\",\n\t\t\t},\n\t\t\t\"img-src\": {\"'self'\", \"www.google-analytics.com\"},\n\t\t\t\"object-src\": {\"'self'\"},\n\t\t}\n\n\t\t\/\/apply content security policies\n\t\trouter.Use(func(next http.Handler) http.Handler {\n\t\t\treturn secure.New(secure.Options{\n\t\t\t\tFrameDeny: true,\n\t\t\t\tContentTypeNosniff: true,\n\t\t\t\tBrowserXssFilter: true,\n\t\t\t\tContentSecurityPolicy: buildCSP(csp),\n\t\t\t}).Handler(next)\n\t\t})\n\n\t\terr := os.Chdir(rpConf.StaticsPath)\n\t\tif nil != err {\n\t\t\tlog.Fatalf(\"Dir %s not found\", rpConf.StaticsPath)\n\t\t}\n\n\t\trouter.Handle(\"\/*\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\/\/trim query params\n\t\t\text := filepath.Ext(trimQuery(r.URL.String(), \"?\"))\n\n\t\t\t\/\/ never cache html\n\t\t\tif \"\/\" == r.URL.String() || \".html\" == ext {\n\t\t\t\tw.Header().Add(\"Cache-Control\", \"no-cache\")\n\t\t\t}\n\n\t\t\thttp.FileServer(http.Dir(rpConf.StaticsPath)).ServeHTTP(&redirectingRW{ResponseWriter: w, Request: r}, r)\n\t\t}))\n\n\t})\n\n\tsrv.StartServer()\n\n}\n\nfunc trimQuery(s string, sep string) string {\n\tsepIndex := strings.Index(s, sep)\n\tif -1 != sepIndex {\n\t\treturn s[:sepIndex]\n\t}\n\treturn s\n}\n\nfunc buildCSP(csp map[string][]string) string {\n\tinstr := make([]string, len(csp))\n\tfor k, v := range csp {\n\t\tinstr = append(instr, k+\" \"+strings.Join(v, \" \"))\n\t}\n\treturn strings.Join(instr, \"; \")\n\n}\n\ntype redirectingRW struct {\n\t*http.Request\n\thttp.ResponseWriter\n\tignore bool\n}\n\nfunc (hrw *redirectingRW) Header() http.Header {\n\treturn hrw.ResponseWriter.Header()\n}\n\nfunc (hrw *redirectingRW) WriteHeader(status int) {\n\tif status == 404 {\n\t\thrw.ignore = true\n\t\thttp.Redirect(hrw.ResponseWriter, hrw.Request, \"\/ui\/404.html\", http.StatusTemporaryRedirect)\n\t} else {\n\t\thrw.ResponseWriter.WriteHeader(status)\n\t}\n\n}\n\nfunc (hrw *redirectingRW) Write(p []byte) (int, error) {\n\tif hrw.ignore {\n\t\treturn len(p), nil\n\t}\n\treturn hrw.ResponseWriter.Write(p)\n}\n<commit_msg>update img-src for the base64 imgs<commit_after>package main\n\nimport (\n\t\"github.com\/go-chi\/chi\"\n\t\"github.com\/gorilla\/handlers\"\n\t\"github.com\/unrolled\/secure\"\n\t\"gopkg.in\/reportportal\/commons-go.v1\/commons\"\n\t\"gopkg.in\/reportportal\/commons-go.v1\/conf\"\n\t\"gopkg.in\/reportportal\/commons-go.v1\/server\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nfunc main() {\n\n\tcurrDir, e := os.Getwd()\n\tif nil != e {\n\t\tlog.Fatalf(\"Cannot get workdir: %s\", e.Error())\n\t}\n\n\trpConf := struct {\n\t\tCfg *conf.RpConfig\n\t\tStaticsPath string `env:\"RP_STATICS_PATH\"`\n\t}{\n\t\tCfg: conf.EmptyConfig(),\n\t\tStaticsPath: currDir,\n\t}\n\n\terr := conf.LoadConfig(&rpConf)\n\tif nil != err {\n\t\tlog.Fatalf(\"Cannot log app config\")\n\t}\n\n\trpConf.Cfg.AppName = \"ui\"\n\n\tinfo := commons.GetBuildInfo()\n\tinfo.Name = \"Service UI\"\n\n\tsrv := server.New(rpConf.Cfg, info)\n\tsrv.WithRouter(func(router *chi.Mux) {\n\n\t\t\/\/apply compression\n\t\trouter.Use(func(next http.Handler) http.Handler {\n\t\t\treturn handlers.CompressHandler(next)\n\t\t})\n\n\t\t\/\/content security policy\n\t\tcsp := map[string][]string{\n\t\t\t\"default-src\": {\"'self'\", \"'unsafe-inline'\"},\n\t\t\t\"script-src\": {\n\t\t\t\t\"'self'\",\n\t\t\t\t\"'unsafe-inline'\",\n\t\t\t\t\"'unsafe-eval'\",\n\t\t\t\t\"status.reportportal.io\",\n\t\t\t\t\"www.google-analytics.com\",\n\t\t\t\t\"stats.g.doubleclick.net\",\n\t\t\t\t\"*.epam.com\",\n\t\t\t\t\"*.uservoice.com\",\n\t\t\t},\n\t\t\t\"img-src\": {\"'self'\", \"data:\", \"www.google-analytics.com\"},\n\t\t\t\"object-src\": {\"'self'\"},\n\t\t}\n\n\t\t\/\/apply content security policies\n\t\trouter.Use(func(next http.Handler) http.Handler {\n\t\t\treturn secure.New(secure.Options{\n\t\t\t\tFrameDeny: true,\n\t\t\t\tContentTypeNosniff: true,\n\t\t\t\tBrowserXssFilter: true,\n\t\t\t\tContentSecurityPolicy: buildCSP(csp),\n\t\t\t}).Handler(next)\n\t\t})\n\n\t\terr := os.Chdir(rpConf.StaticsPath)\n\t\tif nil != err {\n\t\t\tlog.Fatalf(\"Dir %s not found\", rpConf.StaticsPath)\n\t\t}\n\n\t\trouter.Handle(\"\/*\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\/\/trim query params\n\t\t\text := filepath.Ext(trimQuery(r.URL.String(), \"?\"))\n\n\t\t\t\/\/ never cache html\n\t\t\tif \"\/\" == r.URL.String() || \".html\" == ext {\n\t\t\t\tw.Header().Add(\"Cache-Control\", \"no-cache\")\n\t\t\t}\n\n\t\t\thttp.FileServer(http.Dir(rpConf.StaticsPath)).ServeHTTP(&redirectingRW{ResponseWriter: w, Request: r}, r)\n\t\t}))\n\n\t})\n\n\tsrv.StartServer()\n\n}\n\nfunc trimQuery(s string, sep string) string {\n\tsepIndex := strings.Index(s, sep)\n\tif -1 != sepIndex {\n\t\treturn s[:sepIndex]\n\t}\n\treturn s\n}\n\nfunc buildCSP(csp map[string][]string) string {\n\tinstr := make([]string, len(csp))\n\tfor k, v := range csp {\n\t\tinstr = append(instr, k+\" \"+strings.Join(v, \" \"))\n\t}\n\treturn strings.Join(instr, \"; \")\n\n}\n\ntype redirectingRW struct {\n\t*http.Request\n\thttp.ResponseWriter\n\tignore bool\n}\n\nfunc (hrw *redirectingRW) Header() http.Header {\n\treturn hrw.ResponseWriter.Header()\n}\n\nfunc (hrw *redirectingRW) WriteHeader(status int) {\n\tif status == 404 {\n\t\thrw.ignore = true\n\t\thttp.Redirect(hrw.ResponseWriter, hrw.Request, \"\/ui\/404.html\", http.StatusTemporaryRedirect)\n\t} else {\n\t\thrw.ResponseWriter.WriteHeader(status)\n\t}\n\n}\n\nfunc (hrw *redirectingRW) Write(p []byte) (int, error) {\n\tif hrw.ignore {\n\t\treturn len(p), nil\n\t}\n\treturn hrw.ResponseWriter.Write(p)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/codegangsta\/envy\/lib\"\n\t\"github.com\/codegangsta\/gin\/lib\"\n\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"time\"\n)\n\nvar (\n\tstartTime = time.Now()\n\tlogger = log.New(os.Stdout, \"[gin] \", 0)\n\tbuildError error\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"gin\"\n\tapp.Usage = \"A live reload utility for Go web applications.\"\n\tapp.Action = MainAction\n\tapp.Flags = []cli.Flag{\n\t\tcli.IntFlag{\"port,p\", 3000, \"port for the proxy server\"},\n\t\tcli.IntFlag{\"appPort,a\", 3001, \"port for the Go web server\"},\n\t\tcli.StringFlag{\"bin,b\", \"gin-bin\", \"name of generated binary file\"},\n\t\tcli.StringFlag{\"path,t\", \".\", \"Path to watch files from\"},\n\t}\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"run\",\n\t\t\tShortName: \"r\",\n\t\t\tUsage: \"Run the gin proxy in the current working directory\",\n\t\t\tAction: MainAction,\n\t\t},\n\t\t{\n\t\t\tName: \"env\",\n\t\t\tShortName: \"e\",\n\t\t\tUsage: \"Display environment variables set by the .env file\",\n\t\t\tAction: EnvAction,\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc MainAction(c *cli.Context) {\n\tport := c.GlobalInt(\"port\")\n\tappPort := strconv.Itoa(c.GlobalInt(\"appPort\"))\n\n\t\/\/ Bootstrap the environment\n\tenvy.Bootstrap()\n\n\t\/\/ Set the PORT env\n\tos.Setenv(\"PORT\", appPort)\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\n\tbuilder := gin.NewBuilder(\".\", c.GlobalString(\"bin\"))\n\trunner := gin.NewRunner(filepath.Join(wd, builder.Binary()), c.Args()...)\n\trunner.SetWriter(os.Stdout)\n\tproxy := gin.NewProxy(builder, runner)\n\n\tconfig := &gin.Config{\n\t\tPort: port,\n\t\tProxyTo: \"http:\/\/localhost:\" + appPort,\n\t}\n\n\terr = proxy.Run(config)\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\n\tlogger.Printf(\"listening on port %d\\n\", port)\n\n\tshutdown(runner)\n\n\t\/\/ build right now\n\tbuild(builder, logger)\n\n\t\/\/ scan for changes\n\tscanChanges(c.GlobalString(\"path\"), func(path string) {\n\t\trunner.Kill()\n\t\tbuild(builder, logger)\n\t})\n}\n\nfunc EnvAction(c *cli.Context) {\n\t\/\/ Bootstrap the environment\n\tenv, err := envy.Bootstrap()\n\tif err != nil {\n\t\tlogger.Fatalln(err)\n\t}\n\n\tfor k, v := range env {\n\t\tfmt.Printf(\"%s: %s\\n\", k, v)\n\t}\n\n}\n\nfunc build(builder gin.Builder, logger *log.Logger) {\n\terr := builder.Build()\n\tif err != nil {\n\t\tbuildError = err\n\t\tlogger.Println(\"ERROR! Build failed.\")\n\t\tfmt.Println(builder.Errors())\n\t} else {\n\t\t\/\/ print success only if there were errors before\n\t\tif buildError != nil {\n\t\t\tlogger.Println(\"Build Successful\")\n\t\t}\n\t\tbuildError = nil\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n}\n\ntype scanCallback func(path string)\n\nfunc scanChanges(watchPath string, cb scanCallback) {\n\tfor {\n\t\tfilepath.Walk(watchPath, func(path string, info os.FileInfo, err error) error {\n\t\t\tif path == \".git\" {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\n\t\t\t\/\/ ignore hidden files\n\t\t\tif filepath.Base(path)[0] == '.' {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif filepath.Ext(path) == \".go\" && info.ModTime().After(startTime) {\n\t\t\t\tcb(path)\n\t\t\t\tstartTime = time.Now()\n\t\t\t\treturn errors.New(\"done\")\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n}\n\nfunc shutdown(runner gin.Runner) {\n\tc := make(chan os.Signal, 2)\n\tsignal.Notify(c, os.Interrupt, syscall.SIGTERM)\n\tgo func() {\n\t\ts := <-c\n\t\tlog.Println(\"Got signal: \", s)\n\t\terr := runner.Kill()\n\t\tif err != nil {\n\t\t\tlog.Print(\"Error killing: \", err)\n\t\t}\n\t\tos.Exit(1)\n\t}()\n}\n<commit_msg>Fix breaking changes from cli flags<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/codegangsta\/envy\/lib\"\n\t\"github.com\/codegangsta\/gin\/lib\"\n\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"time\"\n)\n\nvar (\n\tstartTime = time.Now()\n\tlogger = log.New(os.Stdout, \"[gin] \", 0)\n\tbuildError error\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"gin\"\n\tapp.Usage = \"A live reload utility for Go web applications.\"\n\tapp.Action = MainAction\n\tapp.Flags = []cli.Flag{\n\t\tcli.IntFlag{\n\t\t\tName: \"port,p\",\n\t\t\tValue: 3000,\n\t\t\tUsage: \"port for the proxy server\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"appPort,a\",\n\t\t\tValue: 3001,\n\t\t\tUsage: \"port for the Go web server\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"bin,b\",\n\t\t\tValue: \"gin-bin\",\n\t\t\tUsage: \"name of generated binary file\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"path,t\",\n\t\t\tValue: \".\",\n\t\t\tUsage: \"Path to watch files from\",\n\t\t},\n\t}\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"run\",\n\t\t\tShortName: \"r\",\n\t\t\tUsage: \"Run the gin proxy in the current working directory\",\n\t\t\tAction: MainAction,\n\t\t},\n\t\t{\n\t\t\tName: \"env\",\n\t\t\tShortName: \"e\",\n\t\t\tUsage: \"Display environment variables set by the .env file\",\n\t\t\tAction: EnvAction,\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc MainAction(c *cli.Context) {\n\tport := c.GlobalInt(\"port\")\n\tappPort := strconv.Itoa(c.GlobalInt(\"appPort\"))\n\n\t\/\/ Bootstrap the environment\n\tenvy.Bootstrap()\n\n\t\/\/ Set the PORT env\n\tos.Setenv(\"PORT\", appPort)\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\n\tbuilder := gin.NewBuilder(\".\", c.GlobalString(\"bin\"))\n\trunner := gin.NewRunner(filepath.Join(wd, builder.Binary()), c.Args()...)\n\trunner.SetWriter(os.Stdout)\n\tproxy := gin.NewProxy(builder, runner)\n\n\tconfig := &gin.Config{\n\t\tPort: port,\n\t\tProxyTo: \"http:\/\/localhost:\" + appPort,\n\t}\n\n\terr = proxy.Run(config)\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\n\tlogger.Printf(\"listening on port %d\\n\", port)\n\n\tshutdown(runner)\n\n\t\/\/ build right now\n\tbuild(builder, logger)\n\n\t\/\/ scan for changes\n\tscanChanges(c.GlobalString(\"path\"), func(path string) {\n\t\trunner.Kill()\n\t\tbuild(builder, logger)\n\t})\n}\n\nfunc EnvAction(c *cli.Context) {\n\t\/\/ Bootstrap the environment\n\tenv, err := envy.Bootstrap()\n\tif err != nil {\n\t\tlogger.Fatalln(err)\n\t}\n\n\tfor k, v := range env {\n\t\tfmt.Printf(\"%s: %s\\n\", k, v)\n\t}\n\n}\n\nfunc build(builder gin.Builder, logger *log.Logger) {\n\terr := builder.Build()\n\tif err != nil {\n\t\tbuildError = err\n\t\tlogger.Println(\"ERROR! Build failed.\")\n\t\tfmt.Println(builder.Errors())\n\t} else {\n\t\t\/\/ print success only if there were errors before\n\t\tif buildError != nil {\n\t\t\tlogger.Println(\"Build Successful\")\n\t\t}\n\t\tbuildError = nil\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n}\n\ntype scanCallback func(path string)\n\nfunc scanChanges(watchPath string, cb scanCallback) {\n\tfor {\n\t\tfilepath.Walk(watchPath, func(path string, info os.FileInfo, err error) error {\n\t\t\tif path == \".git\" {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\n\t\t\t\/\/ ignore hidden files\n\t\t\tif filepath.Base(path)[0] == '.' {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif filepath.Ext(path) == \".go\" && info.ModTime().After(startTime) {\n\t\t\t\tcb(path)\n\t\t\t\tstartTime = time.Now()\n\t\t\t\treturn errors.New(\"done\")\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n}\n\nfunc shutdown(runner gin.Runner) {\n\tc := make(chan os.Signal, 2)\n\tsignal.Notify(c, os.Interrupt, syscall.SIGTERM)\n\tgo func() {\n\t\ts := <-c\n\t\tlog.Println(\"Got signal: \", s)\n\t\terr := runner.Kill()\n\t\tif err != nil {\n\t\t\tlog.Print(\"Error killing: \", err)\n\t\t}\n\t\tos.Exit(1)\n\t}()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/golang\/groupcache\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/vokalinteractive\/go-loggly\"\n\t\"launchpad.net\/goamz\/aws\"\n\t\"launchpad.net\/goamz\/ec2\"\n\t\"launchpad.net\/goamz\/s3\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"vip\/fetch\"\n\t\"vip\/peer\"\n\t\"vip\/store\"\n)\n\nvar (\n\tcache *groupcache.Group\n\tpeers peer.CachePool\n\tstorage store.ImageStore\n\tauthToken string\n\n\thttpport *string = flag.String(\"httpport\", \"8080\", \"target port\")\n)\n\nfunc listenHttp() {\n\tlog.Printf(\"Listening on port :%s\\n\", *httpport)\n\tcert := os.Getenv(\"SSL_CERT\")\n\tkey := os.Getenv(\"SSL_KEY\")\n\n\tport := fmt.Sprintf(\":%s\", *httpport)\n\n\tif cert != \"\" && key != \"\" {\n\t\tlog.Println(\"Serving via SSL\")\n\t\tif err := http.ListenAndServeTLS(port, cert, key, nil); err != nil {\n\t\t\tlog.Fatalf(\"Error starting server: %s\\n\", err.Error())\n\t\t}\n\t} else {\n\t\tif err := http.ListenAndServe(port, nil); err != nil {\n\t\t\tlog.Fatalf(\"Error starting server: %s\\n\", err.Error())\n\t\t}\n\t}\n}\n\nfunc init() {\n\tflag.Parse()\n\n\tloggly_key := os.Getenv(\"LOGGLY_KEY\")\n\tif loggly_key != \"\" {\n\t\tlog.SetOutput(loggly.New(loggly_key, \"vip\"))\n\t}\n\n\tr := mux.NewRouter()\n\n\tauthToken = os.Getenv(\"AUTH_TOKEN\")\n\tif authToken == \"\" {\n\t\tlog.Println(\"No AUTH_TOKEN parameter provided, uploads are insecure\")\n\t}\n\n\tr.Handle(\"\/upload\/{bucket_id}\", verifyAuth(handleUpload))\n\tr.HandleFunc(\"\/{bucket_id}\/{image_id}\", handleImageRequest)\n\tr.HandleFunc(\"\/ping\", handlePing)\n\thttp.Handle(\"\/\", r)\n}\n\nfunc main() {\n\tawsAuth, err := aws.EnvAuth()\n\tif err != nil {\n\t\tlog.Fatalf(err.Error())\n\t}\n\n\ts3conn := s3.New(awsAuth, aws.USEast)\n\tstorage = store.NewS3Store(s3conn)\n\n\tif os.Getenv(\"DEBUG\") == \"True\" {\n\t\tpeers = peer.DebugPool()\n\t} else {\n\t\tpeers = peer.Pool(ec2.New(awsAuth, aws.USEast))\n\t}\n\n\tpeers.SetContext(func(r *http.Request) groupcache.Context {\n\t\treturn fetch.RequestContext(r)\n\t})\n\n\tcache = groupcache.NewGroup(\"ImageProxyCache\", 64<<20, groupcache.GetterFunc(\n\t\tfunc(c groupcache.Context, key string, dest groupcache.Sink) error {\n\t\t\tlog.Printf(\"Cache MISS for key -> %s\", key)\n\t\t\t\/\/ Get image data from S3\n\t\t\tb, err := fetch.ImageData(storage, c)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn dest.SetBytes(b)\n\t\t}))\n\n\tgo peers.Listen()\n\tgo listenHttp()\n\n\tlog.Println(\"Cache listening on port :\" + peers.Port())\n\ts := &http.Server{\n\t\tAddr: \":\" + peers.Port(),\n\t\tHandler: peers,\n\t}\n\ts.ListenAndServe()\n}\n<commit_msg>Make AWS region variable, default to us-east-1<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/golang\/groupcache\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/vokalinteractive\/go-loggly\"\n\t\"launchpad.net\/goamz\/aws\"\n\t\"launchpad.net\/goamz\/ec2\"\n\t\"launchpad.net\/goamz\/s3\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"vip\/fetch\"\n\t\"vip\/peer\"\n\t\"vip\/store\"\n)\n\nvar (\n\tcache *groupcache.Group\n\tpeers peer.CachePool\n\tstorage store.ImageStore\n\tauthToken string\n\n\thttpport *string = flag.String(\"httpport\", \"8080\", \"target port\")\n)\n\nfunc listenHttp() {\n\tlog.Printf(\"Listening on port :%s\\n\", *httpport)\n\tcert := os.Getenv(\"SSL_CERT\")\n\tkey := os.Getenv(\"SSL_KEY\")\n\n\tport := fmt.Sprintf(\":%s\", *httpport)\n\n\tif cert != \"\" && key != \"\" {\n\t\tlog.Println(\"Serving via SSL\")\n\t\tif err := http.ListenAndServeTLS(port, cert, key, nil); err != nil {\n\t\t\tlog.Fatalf(\"Error starting server: %s\\n\", err.Error())\n\t\t}\n\t} else {\n\t\tif err := http.ListenAndServe(port, nil); err != nil {\n\t\t\tlog.Fatalf(\"Error starting server: %s\\n\", err.Error())\n\t\t}\n\t}\n}\n\nfunc getRegion() aws.Region {\n\tregion := os.Getenv(\"AWS_REGION\")\n\n\tswitch region {\n\tcase \"us-west-1\":\n\t\treturn aws.USWest\n\tcase \"us-west-2\":\n\t\treturn aws.USWest2\n\tcase \"us-east-1\":\n\t\treturn aws.USEast\n\tdefault:\n\t\treturn aws.USEast\n\t}\n}\n\nfunc init() {\n\tflag.Parse()\n\n\tloggly_key := os.Getenv(\"LOGGLY_KEY\")\n\tif loggly_key != \"\" {\n\t\tlog.SetOutput(loggly.New(loggly_key, \"vip\"))\n\t}\n\n\tr := mux.NewRouter()\n\n\tauthToken = os.Getenv(\"AUTH_TOKEN\")\n\tif authToken == \"\" {\n\t\tlog.Println(\"No AUTH_TOKEN parameter provided, uploads are insecure\")\n\t}\n\n\tr.Handle(\"\/upload\/{bucket_id}\", verifyAuth(handleUpload))\n\tr.HandleFunc(\"\/{bucket_id}\/{image_id}\", handleImageRequest)\n\tr.HandleFunc(\"\/ping\", handlePing)\n\thttp.Handle(\"\/\", r)\n}\n\nfunc main() {\n\tawsAuth, err := aws.EnvAuth()\n\tif err != nil {\n\t\tlog.Fatalf(err.Error())\n\t}\n\n\ts3conn := s3.New(awsAuth, getRegion())\n\tstorage = store.NewS3Store(s3conn)\n\n\tif os.Getenv(\"DEBUG\") == \"True\" {\n\t\tpeers = peer.DebugPool()\n\t} else {\n\t\tpeers = peer.Pool(ec2.New(awsAuth, aws.USEast))\n\t}\n\n\tpeers.SetContext(func(r *http.Request) groupcache.Context {\n\t\treturn fetch.RequestContext(r)\n\t})\n\n\tcache = groupcache.NewGroup(\"ImageProxyCache\", 64<<20, groupcache.GetterFunc(\n\t\tfunc(c groupcache.Context, key string, dest groupcache.Sink) error {\n\t\t\tlog.Printf(\"Cache MISS for key -> %s\", key)\n\t\t\t\/\/ Get image data from S3\n\t\t\tb, err := fetch.ImageData(storage, c)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn dest.SetBytes(b)\n\t\t}))\n\n\tgo peers.Listen()\n\tgo listenHttp()\n\n\tlog.Println(\"Cache listening on port :\" + peers.Port())\n\ts := &http.Server{\n\t\tAddr: \":\" + peers.Port(),\n\t\tHandler: peers,\n\t}\n\ts.ListenAndServe()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/hex\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n)\n\nconst (\n\tVersion = \"0.1.0\"\n\tmulticastAddr = \"225.0.0.1:5000\"\n\tmaxDatagramSize = 8192\n)\n\nvar isLeader bool = false\n\nfunc main() {\n\tleaderFlagPtr := flag.Bool(\"leader\", false, \"join as leader\")\n\tversionFlagPtr := flag.Bool(\"version\", false, \"print version\")\n\n\tflag.Parse()\n\n\tif *versionFlagPtr {\n\t\tfmt.Printf(\"temposyncd version %s %s\/%s\\n\", Version, runtime.GOOS, runtime.GOARCH)\n\t\tos.Exit(1)\n\t}\n\n\tisLeader = *leaderFlagPtr\n\n\t\/\/ Resolve multicast addr\n\taddr, err := net.ResolveUDPAddr(\"udp4\", multicastAddr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif isLeader {\n\t\tlog.Println(\"Joined as leader\")\n\n\t\t\/\/ Start ticker\n\t\tgo tickTime(addr)\n\t} else {\n\t\tlog.Println(\"Joined as follower\")\n\t}\n\n\tfmt.Println(\"temposyncd started\")\n\tlistenMulticast(addr)\n}\n\nfunc tickTime(addr *net.UDPAddr) {\n\tc, err := net.DialUDP(\"udp\", nil, addr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tt := time.Tick(1000 * time.Millisecond)\n\tfor now := range t {\n\t\tlog.Printf(\"TICK %v\\n\", now)\n\t\tc.Write([]byte(\"hello!\\n\"))\n\t}\n}\n\nfunc listenMulticast(addr *net.UDPAddr) {\n\tl, err := net.ListenMulticastUDP(\"udp4\", nil, addr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tl.SetReadBuffer(maxDatagramSize)\n\n\tfor {\n\t\tb := make([]byte, maxDatagramSize)\n\t\tn, src, err := l.ReadFromUDP(b)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"ReadFromUDP failed:\", err)\n\t\t}\n\t\tmsgHandler(src, n, b)\n\t}\n}\n\nfunc msgHandler(src *net.UDPAddr, n int, b []byte) {\n\tlog.Println(n, \"bytes read from\", src)\n\tlog.Println(hex.Dump(b[:n]))\n}\n<commit_msg>Be quiet by default, add --verbose option<commit_after>package main\n\nimport (\n\t\"encoding\/hex\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n)\n\nconst (\n\tVersion = \"0.1.0\"\n\tmulticastAddr = \"225.0.0.1:5000\"\n\tmaxDatagramSize = 8192\n)\n\nvar isLeader bool = false\n\nfunc main() {\n\tleaderFlagPtr := flag.Bool(\"leader\", false, \"join as leader\")\n\tversionFlagPtr := flag.Bool(\"version\", false, \"print version\")\n\tverboseFlagPtr := flag.Bool(\"verbose\", false, \"print debugging information\")\n\n\tflag.Parse()\n\n\tif *versionFlagPtr {\n\t\tfmt.Printf(\"temposyncd version %s %s\/%s\\n\", Version, runtime.GOOS, runtime.GOARCH)\n\t\tos.Exit(1)\n\t}\n\n\tif !*verboseFlagPtr {\n\t\tlog.SetFlags(0)\n\t\tlog.SetOutput(ioutil.Discard)\n\t}\n\n\tisLeader = *leaderFlagPtr\n\n\t\/\/ Resolve multicast addr\n\taddr, err := net.ResolveUDPAddr(\"udp4\", multicastAddr)\n\tif err != nil {\n\t\tlog.Fatal(\"ResolveUDPAddr failed:\", err)\n\t}\n\n\tif isLeader {\n\t\tlog.Println(\"Joined as leader\")\n\n\t\t\/\/ Start ticker\n\t\tgo tickTime(addr)\n\t} else {\n\t\tlog.Println(\"Joined as follower\")\n\t}\n\n\tfmt.Println(\"temposyncd started\")\n\tlistenMulticast(addr)\n}\n\nfunc tickTime(addr *net.UDPAddr) {\n\tc, err := net.DialUDP(\"udp\", nil, addr)\n\tif err != nil {\n\t\tlog.Fatal(\"DialUDP failed:\", err)\n\t}\n\n\tt := time.Tick(1000 * time.Millisecond)\n\tfor now := range t {\n\t\tlog.Printf(\"TICK %v\\n\", now)\n\t\tc.Write([]byte(\"hello!\\n\"))\n\t}\n}\n\nfunc listenMulticast(addr *net.UDPAddr) {\n\tl, err := net.ListenMulticastUDP(\"udp4\", nil, addr)\n\tif err != nil {\n\t\tlog.Fatal(\"ListenMulticastUDP failed:\", err)\n\t}\n\tl.SetReadBuffer(maxDatagramSize)\n\n\tfor {\n\t\tb := make([]byte, maxDatagramSize)\n\t\tn, src, err := l.ReadFromUDP(b)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"ReadFromUDP failed:\", err)\n\t\t}\n\t\tmsgHandler(src, n, b)\n\t}\n}\n\nfunc msgHandler(src *net.UDPAddr, n int, b []byte) {\n\tlog.Println(n, \"bytes read from\", src)\n\tlog.Println(hex.Dump(b[:n]))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"crypto\"\n \"encoding\/base64\"\n \"fmt\"\n \"io\"\n \"io\/ioutil\"\n \"math\/big\"\n \"os\"\n \"path\/filepath\"\n\n \"github.com\/gibheer\/pki\"\n)\n\nvar (\n EmptyByteArray = make([]byte, 0)\n)\n\nfunc main() {\n if len(os.Args) == 1 {\n crash_with_help(1, \"No module selected!\")\n }\n switch os.Args[1] {\n case \"create-private\": create_private_key()\n case \"create-public\": create_public_key()\n case \"sign-input\": sign_input()\n case \"verify-signature\": verify_input()\n case \"create-cert-sign\": create_sign_request()\n case \"create-cert\": create_cert()\n case \"help\": print_modules()\n\/\/ case \"info\": info_on_file()\n default: crash_with_help(1, \"Command not supported!\")\n }\n}\n\n\/\/ create a private key\nfunc create_private_key() {\n fs := NewFlags(\"create-private\")\n fs.AddOutput()\n fs.AddPrivateKeyGenerationFlags()\n err := fs.Parse(program_args())\n if err != nil { os.Exit(2) }\n\n var pk pki.Pemmer\n switch fs.Flags.PrivateKeyGenerationFlags.Type {\n case \"ecdsa\": pk, err = pki.NewPrivateKeyEcdsa(fs.Flags.PrivateKeyGenerationFlags.Curve)\n case \"rsa\": pk, err = pki.NewPrivateKeyRsa(fs.Flags.PrivateKeyGenerationFlags.Size)\n }\n if err != nil { os.Exit(2) }\n marsh_pem, err := pk.MarshalPem()\n if err != nil { os.Exit(2) }\n _, err = marsh_pem.WriteTo(fs.Flags.Output)\n if err != nil { os.Exit(2) }\n}\n\n\/\/ create a public key derived from a private key\nfunc create_public_key() {\n fs := NewFlags(\"create-public\")\n fs.AddPrivateKey()\n fs.AddOutput()\n err := fs.Parse(program_args())\n if err != nil { os.Exit(2) }\n\n var pub_key pki.Pemmer\n pub_key = fs.Flags.PrivateKey.Public()\n marsh_pem, err := pub_key.MarshalPem()\n if err != nil { os.Exit(2) }\n _, err = marsh_pem.WriteTo(fs.Flags.Output)\n if err != nil { os.Exit(2) }\n}\n\n\/\/ sign a message using he private key\nfunc sign_input() {\n fs := NewFlags(\"sign-input\")\n fs.AddPrivateKey()\n fs.AddInput()\n fs.AddOutput()\n err := fs.Parse(program_args())\n if err != nil { os.Exit(2) }\n\n message, err := ioutil.ReadAll(fs.Flags.Input)\n if err != nil { crash_with_help(2, \"Error reading input: %s\", err) }\n signature, err := fs.Flags.PrivateKey.Sign(message, crypto.SHA256)\n if err != nil { crash_with_help(2, \"Could not compute signature: %s\", err) }\n _, err = io.WriteString(fs.Flags.Output, base64.StdEncoding.EncodeToString(signature))\n if err != nil { crash_with_help(2, \"Could not write to output: %s\", err) }\n\n \/\/ if we print to stderr, send a final line break to make the output nice\n if fs.Flags.Output == os.Stdout {\n \/\/ we can ignore the result, as either Stdout did work or not\n _, _ = io.WriteString(fs.Flags.Output, \"\\n\")\n }\n}\n\n\/\/ verify a message using a signature and a public key\nfunc verify_input() {\n fs := NewFlags(\"sign-input\")\n fs.AddPublicKey()\n fs.AddInput()\n fs.AddOutput()\n fs.AddSignature()\n err := fs.Parse(program_args())\n if err != nil { os.Exit(2) }\n\n signature := fs.Flags.Signature\n message, err := ioutil.ReadAll(fs.Flags.Input)\n if err != nil { crash_with_help(2, \"Error reading input: %s\", err) }\n valid, err := fs.Flags.PublicKey.Verify(message, signature, crypto.SHA256)\n if err != nil { crash_with_help(2, \"Could not verify message with signature: %s\", err) }\n if valid {\n fmt.Println(\"valid\")\n os.Exit(0)\n }\n fmt.Println(\"invalid\")\n os.Exit(1)\n}\n\n\/\/ create a certificate sign request\nfunc create_sign_request() {\n fs := NewFlags(\"create-cert-sign\")\n fs.AddPrivateKey()\n fs.AddOutput()\n fs.AddCertificateFields()\n fs.Parse(program_args())\n\n csr, err := fs.Flags.CertificateData.ToCertificateRequest(fs.Flags.PrivateKey)\n if err != nil { crash_with_help(2, \"Could not create certificate sign request: %s\", err) }\n pem_block, err := csr.MarshalPem()\n if err != nil { crash_with_help(2, \"Could not covnert to pem: %s\", err) }\n _, err = pem_block.WriteTo(fs.Flags.Output)\n if err != nil { crash_with_help(2, \"Encoding didn't work: %s\", err) }\n}\n\nfunc create_cert() {\n fs := NewFlags(\"create-cert\")\n fs.AddPrivateKey()\n fs.AddCSR()\n fs.AddOutput()\n fs.Parse(program_args())\n\n \/\/ TODO implement flags for all certificate options\n cert_opts := pki.CertificateOptions{}\n cert_opts.SerialNumber = big.NewInt(1)\n cert, err := fs.Flags.CertificateSignRequest.ToCertificate(\n fs.Flags.PrivateKey,\n cert_opts,\n nil,\n )\n if err != nil { crash_with_help(2, \"Error generating certificate: %s\", err) }\n pem_block, err := cert.MarshalPem()\n if err != nil { crash_with_help(2, \"Error converting to pem: %s\", err) }\n _, err = pem_block.WriteTo(fs.Flags.Output)\n if err != nil { crash_with_help(2, \"Output didn't work: %s\", err) }\n}\n\n\/\/ print the module help\nfunc print_modules() {\n fmt.Printf(`Usage: %s command args\nwhere 'command' is one of:\n create-private create a new private key\n create-public create a public key from a private one\n sign-input sign a message with a private key\n verify-signature verify a signature\n create-cert-sign create a new certificate sign request\n create-cert sign a certificate request\n help show this help\n info get info on a file\n`, filepath.Base(os.Args[0]))\n fmt.Println()\n}\n\n\/\/ crash and provide a helpful message\nfunc crash_with_help(code int, message string, args ...interface{}) {\n fmt.Fprintf(os.Stderr, message + \"\\n\", args...)\n print_modules()\n os.Exit(code)\n}\n\n\/\/ return the arguments to the program\nfunc program_args() []string {\n return os.Args[2:]\n}\n<commit_msg>add a --help option to avoid an error message<commit_after>package main\n\nimport (\n \"crypto\"\n \"encoding\/base64\"\n \"fmt\"\n \"io\"\n \"io\/ioutil\"\n \"math\/big\"\n \"os\"\n \"path\/filepath\"\n\n \"github.com\/gibheer\/pki\"\n)\n\nvar (\n EmptyByteArray = make([]byte, 0)\n)\n\nfunc main() {\n if len(os.Args) == 1 {\n crash_with_help(1, \"No module selected!\")\n }\n switch os.Args[1] {\n case \"create-private\": create_private_key()\n case \"create-public\": create_public_key()\n case \"sign-input\": sign_input()\n case \"verify-signature\": verify_input()\n case \"create-cert-sign\": create_sign_request()\n case \"create-cert\": create_cert()\n case \"help\": print_modules()\n case \"--help\": print_modules()\n\/\/ case \"info\": info_on_file()\n default: crash_with_help(1, \"Command not supported!\")\n }\n}\n\n\/\/ create a private key\nfunc create_private_key() {\n fs := NewFlags(\"create-private\")\n fs.AddOutput()\n fs.AddPrivateKeyGenerationFlags()\n err := fs.Parse(program_args())\n if err != nil { os.Exit(2) }\n\n var pk pki.Pemmer\n switch fs.Flags.PrivateKeyGenerationFlags.Type {\n case \"ecdsa\": pk, err = pki.NewPrivateKeyEcdsa(fs.Flags.PrivateKeyGenerationFlags.Curve)\n case \"rsa\": pk, err = pki.NewPrivateKeyRsa(fs.Flags.PrivateKeyGenerationFlags.Size)\n }\n if err != nil { os.Exit(2) }\n marsh_pem, err := pk.MarshalPem()\n if err != nil { os.Exit(2) }\n _, err = marsh_pem.WriteTo(fs.Flags.Output)\n if err != nil { os.Exit(2) }\n}\n\n\/\/ create a public key derived from a private key\nfunc create_public_key() {\n fs := NewFlags(\"create-public\")\n fs.AddPrivateKey()\n fs.AddOutput()\n err := fs.Parse(program_args())\n if err != nil { os.Exit(2) }\n\n var pub_key pki.Pemmer\n pub_key = fs.Flags.PrivateKey.Public()\n marsh_pem, err := pub_key.MarshalPem()\n if err != nil { os.Exit(2) }\n _, err = marsh_pem.WriteTo(fs.Flags.Output)\n if err != nil { os.Exit(2) }\n}\n\n\/\/ sign a message using he private key\nfunc sign_input() {\n fs := NewFlags(\"sign-input\")\n fs.AddPrivateKey()\n fs.AddInput()\n fs.AddOutput()\n err := fs.Parse(program_args())\n if err != nil { os.Exit(2) }\n\n message, err := ioutil.ReadAll(fs.Flags.Input)\n if err != nil { crash_with_help(2, \"Error reading input: %s\", err) }\n signature, err := fs.Flags.PrivateKey.Sign(message, crypto.SHA256)\n if err != nil { crash_with_help(2, \"Could not compute signature: %s\", err) }\n _, err = io.WriteString(fs.Flags.Output, base64.StdEncoding.EncodeToString(signature))\n if err != nil { crash_with_help(2, \"Could not write to output: %s\", err) }\n\n \/\/ if we print to stderr, send a final line break to make the output nice\n if fs.Flags.Output == os.Stdout {\n \/\/ we can ignore the result, as either Stdout did work or not\n _, _ = io.WriteString(fs.Flags.Output, \"\\n\")\n }\n}\n\n\/\/ verify a message using a signature and a public key\nfunc verify_input() {\n fs := NewFlags(\"sign-input\")\n fs.AddPublicKey()\n fs.AddInput()\n fs.AddOutput()\n fs.AddSignature()\n err := fs.Parse(program_args())\n if err != nil { os.Exit(2) }\n\n signature := fs.Flags.Signature\n message, err := ioutil.ReadAll(fs.Flags.Input)\n if err != nil { crash_with_help(2, \"Error reading input: %s\", err) }\n valid, err := fs.Flags.PublicKey.Verify(message, signature, crypto.SHA256)\n if err != nil { crash_with_help(2, \"Could not verify message with signature: %s\", err) }\n if valid {\n fmt.Println(\"valid\")\n os.Exit(0)\n }\n fmt.Println(\"invalid\")\n os.Exit(1)\n}\n\n\/\/ create a certificate sign request\nfunc create_sign_request() {\n fs := NewFlags(\"create-cert-sign\")\n fs.AddPrivateKey()\n fs.AddOutput()\n fs.AddCertificateFields()\n fs.Parse(program_args())\n\n csr, err := fs.Flags.CertificateData.ToCertificateRequest(fs.Flags.PrivateKey)\n if err != nil { crash_with_help(2, \"Could not create certificate sign request: %s\", err) }\n pem_block, err := csr.MarshalPem()\n if err != nil { crash_with_help(2, \"Could not covnert to pem: %s\", err) }\n _, err = pem_block.WriteTo(fs.Flags.Output)\n if err != nil { crash_with_help(2, \"Encoding didn't work: %s\", err) }\n}\n\nfunc create_cert() {\n fs := NewFlags(\"create-cert\")\n fs.AddPrivateKey()\n fs.AddCSR()\n fs.AddOutput()\n fs.Parse(program_args())\n\n \/\/ TODO implement flags for all certificate options\n cert_opts := pki.CertificateOptions{}\n cert_opts.SerialNumber = big.NewInt(1)\n cert, err := fs.Flags.CertificateSignRequest.ToCertificate(\n fs.Flags.PrivateKey,\n cert_opts,\n nil,\n )\n if err != nil { crash_with_help(2, \"Error generating certificate: %s\", err) }\n pem_block, err := cert.MarshalPem()\n if err != nil { crash_with_help(2, \"Error converting to pem: %s\", err) }\n _, err = pem_block.WriteTo(fs.Flags.Output)\n if err != nil { crash_with_help(2, \"Output didn't work: %s\", err) }\n}\n\n\/\/ print the module help\nfunc print_modules() {\n fmt.Printf(`Usage: %s command args\nwhere 'command' is one of:\n create-private create a new private key\n create-public create a public key from a private one\n sign-input sign a message with a private key\n verify-signature verify a signature\n create-cert-sign create a new certificate sign request\n create-cert sign a certificate request\n help show this help\n info get info on a file\n`, filepath.Base(os.Args[0]))\n fmt.Println()\n}\n\n\/\/ crash and provide a helpful message\nfunc crash_with_help(code int, message string, args ...interface{}) {\n fmt.Fprintf(os.Stderr, message + \"\\n\", args...)\n print_modules()\n os.Exit(code)\n}\n\n\/\/ return the arguments to the program\nfunc program_args() []string {\n return os.Args[2:]\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n\t\"gopkg.in\/cheggaaa\/pb.v1\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/http\/cookiejar\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tMAX_VALUE int64 = 1679616 \/\/ 36^4\n\tMAX_RETRY = 10\n\tRETRY_TIME = 5 * time.Second\n\tTIMEOUT = 20 * time.Second\n)\n\nvar (\n\tlink = kingpin.Arg(\"link\", \"URL of BaiduYun file you want to get.\").Required().String()\n\tpreset = kingpin.Flag(\"preset\", \"The preset start of key to brute.\").Short('p').Default(\"0000\").String()\n\tthread = kingpin.Flag(\"thread\", \"Number of thread.\").Short('t').Default(\"10\").Int64()\n\tresolver []*Resolve\n\tbar *pb.ProgressBar\n\tshareid, uk string\n\tstart int64\n\trefer string\n\twg sync.WaitGroup\n\tproxies map[Proxy]int\n\tupdater []*Proxies\n\tmapLocker *sync.Mutex\n\tuseable *AtomBool\n\tnullP Proxy\n)\n\ntype Info struct {\n\tErrno int `json:\"errno\"`\n\tErrMsg string `json:\"err_msg\"`\n}\n\ntype Proxy struct {\n\ttyp, addr, port string\n}\n\ntype Resolve struct {\n\tre *regexp.Regexp\n\tfun func(*regexp.Regexp, string)\n}\n\ntype Proxies struct {\n\tupdate func()\n}\n\ntype AtomBool struct {\n\tflag bool\n\tlock *sync.Mutex\n}\n\nfunc (b *AtomBool) Set(value bool) {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\tb.flag = value\n}\n\nfunc (b *AtomBool) Get() bool {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\tif b.flag {\n\t\tb.flag = false\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc getProxy() (Proxy, bool) {\n\tmapLocker.Lock()\n\tdefer mapLocker.Unlock()\n\tfor {\n\t\tif len(proxies) <= 0 {\n\t\t\treturn nullP, false\n\t\t}\n\t\tran := rand.Intn(len(proxies))\n\t\tcnt := 0\n\t\tfor i, k := range proxies {\n\t\t\tif k >= MAX_RETRY {\n\t\t\t\tdelete(proxies, i)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif cnt == ran {\n\t\t\t\treturn i, true\n\t\t\t}\n\t\t\tcnt++\n\t\t}\n\t}\n}\n\nfunc addProxy(in Proxy) {\n\tmapLocker.Lock()\n\tdefer mapLocker.Unlock()\n\tproxies[in] = 0\n}\n\nfunc deleteProxy(in Proxy) {\n\tmapLocker.Lock()\n\tdefer mapLocker.Unlock()\n\tdelete(proxies, in)\n}\n\nfunc increProxy(in Proxy) {\n\tmapLocker.Lock()\n\tdefer mapLocker.Unlock()\n\tproxies[in]++\n}\n\nfunc saveProxies() {\n\tupdater = append(updater,\n\t\t&Proxies{\n\t\t\tfunc() {\n\t\t\t\tfor {\n\t\t\t\t\tresp, err := http.Get(\"http:\/\/api.xicidaili.com\/free2016.txt\")\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\ttime.Sleep(RETRY_TIME)\n\t\t\t\t\t}\n\t\t\t\t\tsca := bufio.NewScanner(resp.Body)\n\t\t\t\t\tfor sca.Scan() {\n\t\t\t\t\t\tspl := strings.Split(sca.Text(), \":\")\n\t\t\t\t\t\tif len(spl) != 2 {\n\t\t\t\t\t\t\tlog.Fatal(\"WTF??\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tne := Proxy{\"http\", spl[0], spl[1]}\n\t\t\t\t\t\taddProxy(ne)\n\t\t\t\t\t}\n\t\t\t\t\tresp.Body.Close()\n\t\t\t\t\ttime.Sleep(15 * time.Minute)\n\t\t\t\t}\n\t\t\t},\n\t\t})\n\tupdater = append(updater,\n\t\t&Proxies{\n\t\t\tfunc() {\n\t\t\t\tfor {\n\t\t\t\t\tresp, err := http.Get(\"http:\/\/proxy.tekbreak.com\/1000\/json\")\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\ttime.Sleep(RETRY_TIME)\n\t\t\t\t\t}\n\t\t\t\t\tvar sca []struct {\n\t\t\t\t\t\tIP string `json:\"ip\"`\n\t\t\t\t\t\tPort string `json:\"port\"`\n\t\t\t\t\t\tType string `json:\"type\"`\n\t\t\t\t\t}\n\t\t\t\t\tif err := json.NewDecoder(resp.Body).Decode(&sca); err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\ttime.Sleep(RETRY_TIME)\n\t\t\t\t\t}\n\t\t\t\t\tfor _, i := range sca {\n\t\t\t\t\t\tne := Proxy{addr: i.IP, port: i.Port}\n\t\t\t\t\t\tignore := false\n\t\t\t\t\t\tswitch i.Type {\n\t\t\t\t\t\tcase \"HTTP\":\n\t\t\t\t\t\t\tne.typ = \"http\"\n\t\t\t\t\t\tcase \"HTTPS\":\n\t\t\t\t\t\t\tne.typ = \"https\"\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tignore = true\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif !ignore {\n\t\t\t\t\t\t\taddProxy(ne)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tresp.Body.Close()\n\t\t\t\t\ttime.Sleep(5 * time.Minute)\n\t\t\t\t}\n\t\t\t},\n\t\t})\n\tupdater = append(updater,\n\t\t&Proxies{\n\t\t\tfunc() {\n\t\t\t\tfor {\n\t\t\t\t\tresp, err := http.Get(\"http:\/\/free-proxy-list.net\/\")\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\ttime.Sleep(RETRY_TIME)\n\t\t\t\t\t}\n\t\t\t\t\tconte, err := ioutil.ReadAll(resp.Body)\n\t\t\t\t\tresp.Body.Close()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\ttime.Sleep(RETRY_TIME)\n\t\t\t\t\t}\n\t\t\t\t\tre, _ := regexp.Compile(`<tr><td>(\\d+\\.\\d+\\.\\d+\\.\\d+)<\/td><td>(\\d+)<\/td><td>.*<\/td><td>.*<\/td><td>.*<\/td><td>.*<\/td><td>(yes|no)<\/td><td>.*<\/td><\/tr>`)\n\t\t\t\t\tsca := re.FindAllStringSubmatch(string(conte), -1)\n\t\t\t\t\tfor _, i := range sca {\n\t\t\t\t\t\tif len(i) != 4 {\n\t\t\t\t\t\t\tlog.Fatal(\"WTF??\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif i[3] == \"yes\" {\n\t\t\t\t\t\t\ti[3] = \"https\"\n\t\t\t\t\t\t} else if i[3] == \"no\" {\n\t\t\t\t\t\t\ti[3] = \"http\"\n\t\t\t\t\t\t}\n\t\t\t\t\t\tne := Proxy{i[3], i[1], i[2]}\n\t\t\t\t\t\taddProxy(ne)\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(5 * time.Minute)\n\t\t\t\t}\n\t\t\t},\n\t\t})\n\tupdater = append(updater,\n\t\t&Proxies{\n\t\t\tfunc() {\n\t\t\t\tfor {\n\t\t\t\t\tresp, err := http.Get(\"https:\/\/www.sslproxies.org\/\")\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\ttime.Sleep(RETRY_TIME)\n\t\t\t\t\t}\n\t\t\t\t\tconte, err := ioutil.ReadAll(resp.Body)\n\t\t\t\t\tresp.Body.Close()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\ttime.Sleep(RETRY_TIME)\n\t\t\t\t\t}\n\t\t\t\t\tre, _ := regexp.Compile(`<tr><td>(\\d+\\.\\d+\\.\\d+\\.\\d+)<\/td><td>(\\d+)<\/td>.*<\/tr>`)\n\t\t\t\t\tsca := re.FindAllStringSubmatch(string(conte), -1)\n\t\t\t\t\tfor _, i := range sca {\n\t\t\t\t\t\tif len(i) != 3 {\n\t\t\t\t\t\t\tlog.Fatal(\"WTF??\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tne := Proxy{\"https\", i[1], i[2]}\n\t\t\t\t\t\taddProxy(ne)\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(5 * time.Minute)\n\t\t\t\t}\n\t\t\t},\n\t\t})\n}\n\nfunc next(now string, count int64) int64 {\n\tnum, err := strconv.ParseInt(now, 36, 64)\n\tif err != nil || num < 0 || num > MAX_VALUE {\n\t\tlog.Fatal(\"Not a valid number!\")\n\t}\n\treturn num + count\n}\n\nfunc saveResolver() {\n\tre1, _ := regexp.Compile(`\/\/pan\\.baidu\\.com\/share\/init\\?shareid=(\\d+)&uk=(\\d+)`)\n\tresolver = append(resolver,\n\t\t&Resolve{\n\t\t\tre1,\n\t\t\tfunc(re *regexp.Regexp, ori string) {\n\t\t\t\tret := re.FindStringSubmatch(ori)\n\t\t\t\tif len(ret) != 3 {\n\t\t\t\t\tlog.Fatal(\"WTF??\", ori)\n\t\t\t\t}\n\t\t\t\tshareid = ret[1]\n\t\t\t\tuk = ret[2]\n\t\t\t\trefer = ori\n\t\t\t},\n\t\t})\n\tre2, _ := regexp.Compile(`\/\/pan\\.baidu\\.com\/s\/([a-zA-Z0-9]+)`)\n\tresolver = append(resolver,\n\t\t&Resolve{\n\t\t\tre2,\n\t\t\tfunc(re *regexp.Regexp, ori string) {\n\t\t\t\tjar, _ := cookiejar.New(nil)\n\t\t\t\tsession := &http.Client{\n\t\t\t\t\tJar: jar,\n\t\t\t\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\t\t\t\treturn http.ErrUseLastResponse\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tfor {\n\t\t\t\t\tresp, err := session.Get(ori)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\ttime.Sleep(RETRY_TIME)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif resp.StatusCode == 200 {\n\t\t\t\t\t\tlog.Fatal(\"Link seems password-less!\")\n\t\t\t\t\t} else if resp.StatusCode == 302 {\n\t\t\t\t\t\tif resolver[0].re.MatchString(resp.Header.Get(\"Location\")) {\n\t\t\t\t\t\t\tresolver[0].fun(resolver[0].re, resp.Header.Get(\"Location\"))\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Fatal(\"WTF??\", ori, resp.Header.Get(\"Location\"))\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t})\n}\n\nfunc builder(now string) (*http.Response, Proxy, error) {\n\tvar pro Proxy\n\tfor {\n\t\tvar ok bool\n\t\tif pro, ok = getProxy(); ok {\n\t\t\tuseable.Set(true)\n\t\t\tbreak\n\t\t}\n\t\tif useable.Get() {\n\t\t\tlog.Println(\"No proxies left! Threads will hang up...\")\n\t\t}\n\t\ttime.Sleep(RETRY_TIME)\n\t}\n\tpar, _ := url.Parse(fmt.Sprintf(\"%s:\/\/%s:%s\", pro.typ, pro.addr, pro.port))\n\tsession := &http.Client{\n\t\tTimeout: TIMEOUT,\n\t\tTransport: &http.Transport{Proxy: http.ProxyURL(par)},\n\t}\n\treq, err := http.NewRequest(\"POST\", fmt.Sprintf(\"https:\/\/pan.baidu.com\/share\/verify?shareid=%s&uk=%s\", shareid, uk), strings.NewReader(fmt.Sprintf(\"pwd=%04s&vcode=&vcode_str=\", now)))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treq.Header.Set(\"Content-Type\", \"application\/x-www-form-urlencoded; charset=UTF-8\")\n\treq.Header.Set(\"User-Agent\", \"Mozilla\/5.0 (Windows NT 10.0; WOW64) AppleWebKit\/537.36 (KHTML, like Gecko) Chrome\/54.0.2840.71 Safari\/537.36\")\n\treq.Header.Set(\"Origin\", \"https:\/\/pan.baidu.com\")\n\treq.Header.Set(\"Referer\", refer)\n\tresp, err := session.Do(req)\n\tif err != nil && strings.Contains(err.Error(), \"error connecting to proxy\") { \/\/ ugly way\n\t\tincreProxy(pro)\n\t}\n\treturn resp, pro, err\n}\n\nfunc tester(work int64) {\n\tfor work < MAX_VALUE {\n\t\tnow := strconv.FormatInt(work, 36)\n\t\tinfo := new(Info)\n\t\tfor {\n\t\t\tif resp, pro, err := builder(now); err == nil {\n\t\t\t\tif resp.StatusCode == 200 {\n\t\t\t\t\tif err = json.NewDecoder(resp.Body).Decode(info); err == nil {\n\t\t\t\t\t\tif info.Errno == 0 {\n\t\t\t\t\t\t\tlog.Println(\"Key found!\", now)\n\t\t\t\t\t\t\tos.Exit(0)\n\t\t\t\t\t\t} else if info.Errno != -9 {\n\t\t\t\t\t\t\tlog.Println(\"Unknown error! Service returned\", info.Errno, \"with message:\", info.ErrMsg)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if resp.StatusCode == 404 {\n\t\t\t\t\tdeleteProxy(pro)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(\"Unknown error! Server returned\", resp.StatusCode)\n\t\t\t\t}\n\t\t\t\tresp.Body.Close()\n\t\t\t}\n\t\t\ttime.Sleep(RETRY_TIME)\n\t\t}\n\t\tbar.Increment()\n\t\twork += *thread\n\t}\n\twg.Done()\n}\n\nfunc init() {\n\tkingpin.CommandLine.HelpFlag.Short('h')\n\tkingpin.Parse()\n\tsaveResolver() \/\/ For future expansion of resolver\n\tmapLocker = new(sync.Mutex)\n\tuseable = new(AtomBool)\n\tuseable.lock = new(sync.Mutex)\n\tuseable.Set(true)\n\tproxies = make(map[Proxy]int)\n\tsaveProxies() \/\/ For future expansion of proxy\n\tvar indi int\n\tfor indi = 0; indi < len(resolver); indi++ {\n\t\tif resolver[indi].re.MatchString(*link) {\n\t\t\tresolver[indi].fun(resolver[indi].re, *link)\n\t\t\tbreak\n\t\t}\n\t}\n\tif indi == len(resolver) {\n\t\tlog.Fatal(\"No proper resolver found!\")\n\t}\n\tfor _, i := range updater {\n\t\tgo i.update()\n\t}\n\tstart = next(*preset, 0)\n\tbar = pb.New64(MAX_VALUE)\n\tbar.SetMaxWidth(70)\n\tbar.ShowCounters = false\n\tbar.ShowSpeed = true\n\tbar.ShowTimeLeft = true\n\tbar.ShowFinalTime = true\n\tbar.Set64(start)\n\tbar.Start()\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tgo func() {\n\t\tfor _ = range c {\n\t\t\tlog.Printf(\"Terminating program, current progress: %04s\", strconv.FormatInt(bar.Get(), 36))\n\t\t\tos.Exit(1)\n\t\t}\n\t}()\n}\n\nfunc main() {\n\t\/\/go func() {\n\t\/\/\tfor {\n\t\/\/\t\tlog.Println(len(proxies))\n\t\/\/\t\tfor i, k := range proxies {\n\t\/\/\t\t\tlog.Println(i, k)\n\t\/\/\t\t\tbreak\n\t\/\/\t\t}\n\t\/\/\t\ttime.Sleep(RETRY_TIME)\n\t\/\/\t}\n\t\/\/}() \/\/ DEBUG\n\tlog.SetPrefix(\"\\n\")\n\twg.Add(int(*thread))\n\tfor i := int64(0); i < *thread; i++ {\n\t\tgo tester(start + i)\n\t}\n\twg.Wait()\n\tlog.Fatal(\"No key found!\")\n}\n<commit_msg>Yep, few bugs during test<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n\t\"gopkg.in\/cheggaaa\/pb.v1\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/http\/cookiejar\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tMAX_VALUE int64 = 1679616 \/\/ 36^4\n\tMAX_RETRY = 10\n\tRETRY_TIME = 5 * time.Second\n\tTIMEOUT = 20 * time.Second\n)\n\nvar (\n\tlink = kingpin.Arg(\"link\", \"URL of BaiduYun file you want to get.\").Required().String()\n\tpreset = kingpin.Flag(\"preset\", \"The preset start of key to brute.\").Short('p').Default(\"0000\").String()\n\tthread = kingpin.Flag(\"thread\", \"Number of thread.\").Short('t').Default(\"10\").Int64()\n\tresolver []*Resolve\n\tbar *pb.ProgressBar\n\tshareid, uk string\n\tstart int64\n\trefer string\n\twg sync.WaitGroup\n\tproxies map[Proxy]int\n\tupdater []*Proxies\n\tmapLocker *sync.Mutex\n\tuseable *AtomBool\n\tnullP Proxy\n)\n\ntype Info struct {\n\tErrno int `json:\"errno\"`\n\tErrMsg string `json:\"err_msg\"`\n}\n\ntype Proxy struct {\n\ttyp, addr, port string\n}\n\ntype Resolve struct {\n\tre *regexp.Regexp\n\tfun func(*regexp.Regexp, string)\n}\n\ntype Proxies struct {\n\tupdate func()\n}\n\ntype AtomBool struct {\n\tflag bool\n\tlock *sync.Mutex\n}\n\nfunc (b *AtomBool) Set(value bool) {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\tb.flag = value\n}\n\nfunc (b *AtomBool) Get() bool {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\tif b.flag {\n\t\tb.flag = false\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc getProxy() (Proxy, bool) {\n\tmapLocker.Lock()\n\tdefer mapLocker.Unlock()\n\tfor {\n\t\tif len(proxies) <= 0 {\n\t\t\treturn nullP, false\n\t\t}\n\t\tran := rand.Intn(len(proxies))\n\t\tcnt := 0\n\t\tfor i, k := range proxies {\n\t\t\tif k >= MAX_RETRY {\n\t\t\t\tdelete(proxies, i)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif cnt == ran {\n\t\t\t\treturn i, true\n\t\t\t}\n\t\t\tcnt++\n\t\t}\n\t}\n}\n\nfunc addProxy(in Proxy) {\n\tmapLocker.Lock()\n\tdefer mapLocker.Unlock()\n\tproxies[in] = 0\n}\n\nfunc deleteProxy(in Proxy) {\n\tmapLocker.Lock()\n\tdefer mapLocker.Unlock()\n\tdelete(proxies, in)\n}\n\nfunc increProxy(in Proxy) {\n\tmapLocker.Lock()\n\tdefer mapLocker.Unlock()\n\tproxies[in]++\n}\n\nfunc saveProxies() {\n\tupdater = append(updater,\n\t\t&Proxies{\n\t\t\tfunc() {\n\t\t\t\tfor {\n\t\t\t\t\tresp, err := http.Get(\"http:\/\/api.xicidaili.com\/free2016.txt\")\n\t\t\t\t\tif err != nil || resp.Body == nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\ttime.Sleep(RETRY_TIME)\n\t\t\t\t\t}\n\t\t\t\t\tsca := bufio.NewScanner(resp.Body)\n\t\t\t\t\tfor sca.Scan() {\n\t\t\t\t\t\tspl := strings.Split(sca.Text(), \":\")\n\t\t\t\t\t\tif len(spl) != 2 {\n\t\t\t\t\t\t\tlog.Fatal(\"WTF??\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tne := Proxy{\"http\", spl[0], spl[1]}\n\t\t\t\t\t\taddProxy(ne)\n\t\t\t\t\t}\n\t\t\t\t\tresp.Body.Close()\n\t\t\t\t\ttime.Sleep(15 * time.Minute)\n\t\t\t\t}\n\t\t\t},\n\t\t})\n\tupdater = append(updater,\n\t\t&Proxies{\n\t\t\tfunc() {\n\t\t\t\tfor {\n\t\t\t\t\tresp, err := http.Get(\"http:\/\/proxy.tekbreak.com\/1000\/json\")\n\t\t\t\t\tif err != nil || resp.Body == nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\ttime.Sleep(RETRY_TIME)\n\t\t\t\t\t}\n\t\t\t\t\tvar sca []struct {\n\t\t\t\t\t\tIP string `json:\"ip\"`\n\t\t\t\t\t\tPort string `json:\"port\"`\n\t\t\t\t\t\tType string `json:\"type\"`\n\t\t\t\t\t}\n\t\t\t\t\tif err := json.NewDecoder(resp.Body).Decode(&sca); err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\ttime.Sleep(RETRY_TIME)\n\t\t\t\t\t}\n\t\t\t\t\tfor _, i := range sca {\n\t\t\t\t\t\tne := Proxy{addr: i.IP, port: i.Port}\n\t\t\t\t\t\tignore := false\n\t\t\t\t\t\tswitch i.Type {\n\t\t\t\t\t\tcase \"HTTP\":\n\t\t\t\t\t\t\tne.typ = \"http\"\n\t\t\t\t\t\tcase \"HTTPS\":\n\t\t\t\t\t\t\tne.typ = \"https\"\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tignore = true\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif !ignore {\n\t\t\t\t\t\t\taddProxy(ne)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tresp.Body.Close()\n\t\t\t\t\ttime.Sleep(5 * time.Minute)\n\t\t\t\t}\n\t\t\t},\n\t\t})\n\tupdater = append(updater,\n\t\t&Proxies{\n\t\t\tfunc() {\n\t\t\t\tfor {\n\t\t\t\t\tresp, err := http.Get(\"http:\/\/free-proxy-list.net\/\")\n\t\t\t\t\tif err != nil || resp.Body == nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\ttime.Sleep(RETRY_TIME)\n\t\t\t\t\t}\n\t\t\t\t\tconte, err := ioutil.ReadAll(resp.Body)\n\t\t\t\t\tresp.Body.Close()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\ttime.Sleep(RETRY_TIME)\n\t\t\t\t\t}\n\t\t\t\t\tre, _ := regexp.Compile(`<tr><td>(\\d+\\.\\d+\\.\\d+\\.\\d+)<\/td><td>(\\d+)<\/td><td>.*<\/td><td>.*<\/td><td>.*<\/td><td>.*<\/td><td>(yes|no)<\/td><td>.*<\/td><\/tr>`)\n\t\t\t\t\tsca := re.FindAllStringSubmatch(string(conte), -1)\n\t\t\t\t\tfor _, i := range sca {\n\t\t\t\t\t\tif len(i) != 4 {\n\t\t\t\t\t\t\tlog.Fatal(\"WTF??\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif i[3] == \"yes\" {\n\t\t\t\t\t\t\ti[3] = \"https\"\n\t\t\t\t\t\t} else if i[3] == \"no\" {\n\t\t\t\t\t\t\ti[3] = \"http\"\n\t\t\t\t\t\t}\n\t\t\t\t\t\tne := Proxy{i[3], i[1], i[2]}\n\t\t\t\t\t\taddProxy(ne)\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(5 * time.Minute)\n\t\t\t\t}\n\t\t\t},\n\t\t})\n\tupdater = append(updater,\n\t\t&Proxies{\n\t\t\tfunc() {\n\t\t\t\tfor {\n\t\t\t\t\tresp, err := http.Get(\"https:\/\/www.sslproxies.org\/\")\n\t\t\t\t\tif err != nil || resp.Body == nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\ttime.Sleep(RETRY_TIME)\n\t\t\t\t\t}\n\t\t\t\t\tconte, err := ioutil.ReadAll(resp.Body)\n\t\t\t\t\tresp.Body.Close()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\ttime.Sleep(RETRY_TIME)\n\t\t\t\t\t}\n\t\t\t\t\tre, _ := regexp.Compile(`<tr><td>(\\d+\\.\\d+\\.\\d+\\.\\d+)<\/td><td>(\\d+)<\/td>.*<\/tr>`)\n\t\t\t\t\tsca := re.FindAllStringSubmatch(string(conte), -1)\n\t\t\t\t\tfor _, i := range sca {\n\t\t\t\t\t\tif len(i) != 3 {\n\t\t\t\t\t\t\tlog.Fatal(\"WTF??\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tne := Proxy{\"https\", i[1], i[2]}\n\t\t\t\t\t\taddProxy(ne)\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(5 * time.Minute)\n\t\t\t\t}\n\t\t\t},\n\t\t})\n}\n\nfunc next(now string, count int64) int64 {\n\tnum, err := strconv.ParseInt(now, 36, 64)\n\tif err != nil || num < 0 || num > MAX_VALUE {\n\t\tlog.Fatal(\"Not a valid number!\")\n\t}\n\treturn num + count\n}\n\nfunc saveResolver() {\n\tre1, _ := regexp.Compile(`\/\/pan\\.baidu\\.com\/share\/init\\?shareid=(\\d+)&uk=(\\d+)`)\n\tresolver = append(resolver,\n\t\t&Resolve{\n\t\t\tre1,\n\t\t\tfunc(re *regexp.Regexp, ori string) {\n\t\t\t\tret := re.FindStringSubmatch(ori)\n\t\t\t\tif len(ret) != 3 {\n\t\t\t\t\tlog.Fatal(\"WTF??\", ori)\n\t\t\t\t}\n\t\t\t\tshareid = ret[1]\n\t\t\t\tuk = ret[2]\n\t\t\t\trefer = ori\n\t\t\t},\n\t\t})\n\tre2, _ := regexp.Compile(`\/\/pan\\.baidu\\.com\/s\/([a-zA-Z0-9]+)`)\n\tresolver = append(resolver,\n\t\t&Resolve{\n\t\t\tre2,\n\t\t\tfunc(re *regexp.Regexp, ori string) {\n\t\t\t\tjar, _ := cookiejar.New(nil)\n\t\t\t\tsession := &http.Client{\n\t\t\t\t\tJar: jar,\n\t\t\t\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\t\t\t\treturn http.ErrUseLastResponse\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tfor {\n\t\t\t\t\tresp, err := session.Get(ori)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\ttime.Sleep(RETRY_TIME)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif resp.StatusCode == 200 {\n\t\t\t\t\t\tlog.Fatal(\"Link seems password-less!\")\n\t\t\t\t\t} else if resp.StatusCode == 302 {\n\t\t\t\t\t\tif resolver[0].re.MatchString(resp.Header.Get(\"Location\")) {\n\t\t\t\t\t\t\tresolver[0].fun(resolver[0].re, resp.Header.Get(\"Location\"))\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Fatal(\"WTF??\", ori, resp.Header.Get(\"Location\"))\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t})\n}\n\nfunc builder(now string) (*http.Response, Proxy, error) {\n\tvar pro Proxy\n\tfor {\n\t\tvar ok bool\n\t\tif pro, ok = getProxy(); ok {\n\t\t\tuseable.Set(true)\n\t\t\tbreak\n\t\t}\n\t\tif useable.Get() {\n\t\t\tlog.Println(\"No proxies left! Threads will hang up...\")\n\t\t}\n\t\ttime.Sleep(RETRY_TIME)\n\t}\n\tpar, _ := url.Parse(fmt.Sprintf(\"%s:\/\/%s:%s\", pro.typ, pro.addr, pro.port))\n\tsession := &http.Client{\n\t\tTimeout: TIMEOUT,\n\t\tTransport: &http.Transport{Proxy: http.ProxyURL(par)},\n\t}\n\treq, err := http.NewRequest(\"POST\", fmt.Sprintf(\"https:\/\/pan.baidu.com\/share\/verify?shareid=%s&uk=%s\", shareid, uk), strings.NewReader(fmt.Sprintf(\"pwd=%04s&vcode=&vcode_str=\", now)))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treq.Header.Set(\"Content-Type\", \"application\/x-www-form-urlencoded; charset=UTF-8\")\n\treq.Header.Set(\"User-Agent\", \"Mozilla\/5.0 (Windows NT 10.0; WOW64) AppleWebKit\/537.36 (KHTML, like Gecko) Chrome\/54.0.2840.71 Safari\/537.36\")\n\treq.Header.Set(\"Origin\", \"https:\/\/pan.baidu.com\")\n\treq.Header.Set(\"Referer\", refer)\n\tresp, err := session.Do(req)\n\treturn resp, pro, err\n}\n\nfunc tester(work int64) {\n\tfor work < MAX_VALUE {\n\t\tnow := strconv.FormatInt(work, 36)\n\t\tinfo := new(Info)\n\t\tfor {\n\t\t\tif resp, pro, err := builder(now); err == nil {\n\t\t\t\tif resp.StatusCode == 200 {\n\t\t\t\t\tif err = json.NewDecoder(resp.Body).Decode(info); err == nil {\n\t\t\t\t\t\tif info.Errno == 0 {\n\t\t\t\t\t\t\tlog.Println(\"Key found!\", now)\n\t\t\t\t\t\t\tos.Exit(0)\n\t\t\t\t\t\t} else if info.Errno != -9 {\n\t\t\t\t\t\t\tlog.Println(\"Unknown error! Service returned\", info.Errno, \"with message:\", info.ErrMsg)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if resp.StatusCode == 404 {\n\t\t\t\t\tdeleteProxy(pro)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(\"Unknown error! Server returned\", resp.StatusCode)\n\t\t\t\t}\n\t\t\t\tresp.Body.Close()\n\t\t\t} else if strings.Contains(err.Error(), \"error connecting to proxy\") {\n\t\t\t\tincreProxy(pro)\n\t\t\t}\n\t\t\ttime.Sleep(RETRY_TIME)\n\t\t}\n\t\tbar.Increment()\n\t\twork += *thread\n\t}\n\twg.Done()\n}\n\nfunc init() {\n\tkingpin.CommandLine.HelpFlag.Short('h')\n\tkingpin.Parse()\n\tsaveResolver() \/\/ For future expansion of resolver\n\tmapLocker = new(sync.Mutex)\n\tuseable = new(AtomBool)\n\tuseable.lock = new(sync.Mutex)\n\tproxies = make(map[Proxy]int)\n\tsaveProxies() \/\/ For future expansion of proxy\n\tvar indi int\n\tfor indi = 0; indi < len(resolver); indi++ {\n\t\tif resolver[indi].re.MatchString(*link) {\n\t\t\tresolver[indi].fun(resolver[indi].re, *link)\n\t\t\tbreak\n\t\t}\n\t}\n\tif indi == len(resolver) {\n\t\tlog.Fatal(\"No proper resolver found!\")\n\t}\n\tfor _, i := range updater {\n\t\tgo i.update()\n\t}\n\tstart = next(*preset, 0)\n\tbar = pb.New64(MAX_VALUE)\n\tbar.SetMaxWidth(70)\n\tbar.ShowCounters = false\n\tbar.ShowSpeed = true\n\tbar.ShowTimeLeft = true\n\tbar.ShowFinalTime = true\n\tbar.Set64(start)\n\tbar.Start()\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tgo func() {\n\t\tfor _ = range c {\n\t\t\tlog.Printf(\"Terminating program, current progress: %04s\", strconv.FormatInt(bar.Get(), 36))\n\t\t\tos.Exit(1)\n\t\t}\n\t}()\n}\n\nfunc main() {\n\t\/\/go func() {\n\t\/\/\tfor {\n\t\/\/\t\tlog.Println(len(proxies))\n\t\/\/\t\tfor i, k := range proxies {\n\t\/\/\t\t\tlog.Println(i, k)\n\t\/\/\t\t\tbreak\n\t\/\/\t\t}\n\t\/\/\t\ttime.Sleep(RETRY_TIME)\n\t\/\/\t}\n\t\/\/}() \/\/ DEBUG\n\tlog.SetPrefix(\"\\n\")\n\twg.Add(int(*thread))\n\tfor i := int64(0); i < *thread; i++ {\n\t\tgo tester(start + i)\n\t}\n\twg.Wait()\n\tlog.Fatal(\"No key found!\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ 4 march 2014\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\/\/\t\"os\/exec\"\n\t\"time\"\n\t\"github.com\/andlabs\/ui\"\n)\n\nconst (\n\tdefCmdLine = \"mpv -loop inf ~\/ring.wav\"\n\tdefTime = \"10:00 AM\"\n\ttimeFmt = \"3:04 PM\"\n)\n\n\/\/ If later hasn't happened yet, make it happen on the day of now; if not, the day after.\nfunc bestTime(now time.Time, later time.Time) time.Time {\n\tnow = now.Local()\t\t\/\/ use local time to make things make sense\n\tnowh, nowm, nows := now.Clock()\n\tlaterh, laterm, laters := later.Clock()\n\tadd := false\n\tif nowh > laterh {\n\t\tadd = true\n\t} else if (nowh == laterh) && (nowm > laterm) {\n\t\tadd = true\n\t} else if (nowh == laterh) && (nowm == laterm) && (nows >= laters) {\n\t\t\/\/ >= in the case we're on the exact second; add a day because the alarm should have gone off by now otherwise!\n\t\tadd = true\n\t}\nprintln(nowh,nowm,nows,add,laterh,laterm,laters)\n\tif add {\n\t\tnow = now.AddDate(0, 0, 1)\n\t}\n\treturn time.Date(now.Year(), now.Month(), now.Day(),\n\t\tlaterh, laterm, laters, 0,\n\t\tnow.Location())\n}\n\nfunc myMain() {\n\tw := ui.NewWindow(\"wakeup\", 400, 100)\n\tw.Closing = ui.Event()\n\tcmdbox := ui.NewLineEdit(defCmdLine)\n\ttimebox := ui.NewLineEdit(defTime)\n\tbStart := ui.NewButton(\"Start\")\n\tbStop := ui.NewButton(\"Stop\")\n\n\t\/\/ a Stack to keep both buttons at the same size\n\tbtnbox := ui.NewStack(ui.Horizontal, bStart, bStop)\n\tbtnbox.SetStretchy(0)\n\tbtnbox.SetStretchy(1)\n\t\/\/ and a Stack around that Stack to keep them at a reasonable size\n\tbtnbox = ui.NewStack(ui.Horizontal, btnbox)\n\n\t\/\/ the main layout\n\tgrid := ui.NewGrid(2,\n\t\tui.NewLabel(\"Command\"), cmdbox,\n\t\tui.NewLabel(\"Time\"), timebox,\n\t\tui.Space(), ui.Space(),\t\t\/\/ the Space on the right will consume the window blank space\n\t\tui.Space(), btnbox)\n\tgrid.SetStretchy(2, 1)\t\t\t\/\/ make the Space noted above consume\n\tgrid.SetFilling(0, 1)\t\t\t\t\/\/ make the two textboxes grow horizontally\n\tgrid.SetFilling(1, 1)\n\n\terr := w.Open(grid)\n\tif err != nil {\n\t\tui.MsgBoxError(\"wakeup\", \"Error opening window: %v\", err)\n\t\tos.Exit(1)\n\t}\n\nmainloop:\n\tfor {\n\t\tselect {\n\t\tcase <-w.Closing:\n\t\t\tbreak mainloop\n\t\tcase <-bStart.Clicked:\n\t\t\talarmTime, err := time.Parse(timeFmt, timebox.Text())\n\t\t\tif err != nil {\n\t\t\t\tui.MsgBoxError(\"wakeup\",\n\t\t\t\t\t\"Error parsing time %q: %v\\nMake sure your time is in the form %q (without quotes.\",\n\t\t\t\t\ttimebox.Text(), err, timeFmt)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnow := time.Now()\n\t\t\tlater := bestTime(now, alarmTime)\n\t\t\tfmt.Println(later, later.Sub(now))\n\t\t\t\/\/ TODO\n\t\tcase <-bStop.Clicked:\n\t\t\t\/\/ TODO\n\t\t}\n\t}\n}\n\nfunc main() {\n\terr := ui.Go(myMain)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"error initializing UI library: %v\", err))\n\t}\n}\n<commit_msg>Added the code to run the timer and removed a stray debugging print line.<commit_after>\/\/ 4 march 2014\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\/\/\t\"os\/exec\"\n\t\"time\"\n\t\"github.com\/andlabs\/ui\"\n)\n\nconst (\n\tdefCmdLine = \"mpv -loop inf ~\/ring.wav\"\n\tdefTime = \"10:00 AM\"\n\ttimeFmt = \"3:04 PM\"\n)\n\n\/\/ If later hasn't happened yet, make it happen on the day of now; if not, the day after.\nfunc bestTime(now time.Time, later time.Time) time.Time {\n\tnow = now.Local()\t\t\/\/ use local time to make things make sense\n\tnowh, nowm, nows := now.Clock()\n\tlaterh, laterm, laters := later.Clock()\n\tadd := false\n\tif nowh > laterh {\n\t\tadd = true\n\t} else if (nowh == laterh) && (nowm > laterm) {\n\t\tadd = true\n\t} else if (nowh == laterh) && (nowm == laterm) && (nows >= laters) {\n\t\t\/\/ >= in the case we're on the exact second; add a day because the alarm should have gone off by now otherwise!\n\t\tadd = true\n\t}\n\tif add {\n\t\tnow = now.AddDate(0, 0, 1)\n\t}\n\treturn time.Date(now.Year(), now.Month(), now.Day(),\n\t\tlaterh, laterm, laters, 0,\n\t\tnow.Location())\n}\n\nfunc myMain() {\n\tvar timer *time.Timer\n\tvar timerChan <-chan time.Time\n\n\tstop := func() {\n\t\t\/\/ TODO stop process\n\t\tif timer != nil {\t\t\/\/ stop the timer if we started it\n\t\t\ttimer.Stop()\n\t\t\ttimer = nil\n\t\t\ttimerChan = nil\n\t\t}\n\t}\n\n\tw := ui.NewWindow(\"wakeup\", 400, 100)\n\tw.Closing = ui.Event()\n\tcmdbox := ui.NewLineEdit(defCmdLine)\n\ttimebox := ui.NewLineEdit(defTime)\n\tbStart := ui.NewButton(\"Start\")\n\tbStop := ui.NewButton(\"Stop\")\n\n\t\/\/ a Stack to keep both buttons at the same size\n\tbtnbox := ui.NewStack(ui.Horizontal, bStart, bStop)\n\tbtnbox.SetStretchy(0)\n\tbtnbox.SetStretchy(1)\n\t\/\/ and a Stack around that Stack to keep them at a reasonable size\n\tbtnbox = ui.NewStack(ui.Horizontal, btnbox)\n\n\t\/\/ the main layout\n\tgrid := ui.NewGrid(2,\n\t\tui.NewLabel(\"Command\"), cmdbox,\n\t\tui.NewLabel(\"Time\"), timebox,\n\t\tui.Space(), ui.Space(),\t\t\/\/ the Space on the right will consume the window blank space\n\t\tui.Space(), btnbox)\n\tgrid.SetStretchy(2, 1)\t\t\t\/\/ make the Space noted above consume\n\tgrid.SetFilling(0, 1)\t\t\t\t\/\/ make the two textboxes grow horizontally\n\tgrid.SetFilling(1, 1)\n\n\terr := w.Open(grid)\n\tif err != nil {\n\t\tui.MsgBoxError(\"wakeup\", \"Error opening window: %v\", err)\n\t\tos.Exit(1)\n\t}\n\nmainloop:\n\tfor {\n\t\tselect {\n\t\tcase <-w.Closing:\n\t\t\tbreak mainloop\n\t\tcase <-bStart.Clicked:\n\t\t\tstop()\t\t\/\/ only one alarm at a time\n\t\t\talarmTime, err := time.Parse(timeFmt, timebox.Text())\n\t\t\tif err != nil {\n\t\t\t\tui.MsgBoxError(\"wakeup\",\n\t\t\t\t\t\"Error parsing time %q: %v\\nMake sure your time is in the form %q (without quotes.\",\n\t\t\t\t\ttimebox.Text(), err, timeFmt)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnow := time.Now()\n\t\t\tlater := bestTime(now, alarmTime)\n\t\t\ttimer = time.NewTimer(later.Sub(now))\n\t\t\ttimerChan = timer.C\n\t\tcase <-timerChan:\n\t\t\tui.MsgBox(\"wakeup\", \"alarm\")\n\t\t\ttimer = nil\n\t\t\ttimerChan = nil\n\t\tcase <-bStop.Clicked:\n\t\t\tstop()\n\t\t\tui.MsgBox(\"wakeup\", \"abort\")\n\t\t}\n\t}\n\n\t\/\/ clean up\n\tstop()\n}\n\nfunc main() {\n\terr := ui.Go(myMain)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"error initializing UI library: %v\", err))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"net\/http\"\n\t\"strings\"\n\t\"encoding\/json\"\n\t)\n\n\ntype weatherData struct{\n\tName string `json:\"name\"`\n\tMain struct{\n\t\tKelvin float64 `json:\"temp\"`\n\t} `json:\"main\"`\n}\n\nfunc query(city string) (weatherData, error) {\n\tresp, err := http.Get(\"http:\/\/api.openweathermap.org\/data\/2.5\/weather?q=\" + city)\n\tif err != nil{\n\t\treturn weatherData{}, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tvar d weatherData\n\n\tif err := json.NewDecoder(resp.Body).Decode(&d); err != nil{\n\t\treturn weatherData{}, err\n\t}\n\n\treturn d, nil\n}\n\nfunc main() {\n\thttp.HandleFunc(\"\/hello\", hello)\n\t\n\thttp.HandleFunc(\"\/weather\/\", func (w http.ResponseWriter, r *http.Request) {\n\t\tcity := strings.SplitN(r.URL.Path, \"\/\", 3)[2]\n\n\t\tdata, err := query(city)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\tjson.NewEncoder(w).Encode(data)\n\t})\n\n\thttp.ListenAndServe(\":9999\", nil)\n}\n\nfunc hello(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"hello!\"))\n}\n<commit_msg>Multisource weather service.<commit_after>package main\n\nimport (\n\t\"net\/http\"\n\t\"strings\"\n\t\"encoding\/json\"\n\t\"time\"\n\t\"log\"\n\t)\n\ntype weatherProvider interface{\n\ttemperature(city string) (float64, error)\n}\n\ntype openWeatherMap struct{}\n\nfunc (w openWeatherMap) temperature(city string) (float64, error) {\n\tresp, err := http.Get(\"http:\/\/api.openweathermap.org\/data\/2.5\/weather?q=\" + city)\n\tif err != nil{\n\t\treturn 0, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tvar d struct {\n\t\tMain struct{\n\t\t\tKelvin float64 `json:\"temp\"`\n\t\t} `json:\"main\"`\n\t}\n\n\tif err := json.NewDecoder(resp.Body).Decode(&d); err != nil{\n\t\treturn 0, err\n\t}\n\n\tlog.Printf(\"openWeatherMap: %s: %.2f\", city, d.Main.Kelvin)\n\treturn d.Main.Kelvin, nil\n\n}\n\ntype weatherUnderground struct {\n\tapiKey string\n}\n\nfunc (w weatherUnderground) temperature(city string) (float64, error) {\n\tresp, err := http.Get(\"http:\/\/api.wunderground.com\/api\/\" + w.apiKey + \"\/conditions\/q\/\" + city + \".json\")\n\tif err != nil{\n\t\treturn 0, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tvar d struct {\n\t\tObservation struct{\n\t\t\tCelsius float64 `json:\"temp_c\"`\n\t\t} `json:\"current_observation\"`\n\t}\n\n\tif err := json.NewDecoder(resp.Body).Decode(&d); err != nil{\n\t\treturn 0, err\n\t}\n\tkelvin := d.Observation.Celsius + 273.15\n\tlog.Printf(\"weatherUnderground: %s: %.2f\", city, kelvin)\n\treturn kelvin, nil\n\n}\n\nfunc temperature(city string, providers ...weatherProvider) (float64,error) {\n\tsum := 0.0\n\tfor _, provider := range providers{\n\t\tk, err := provider.temperature(city)\n\t\tif err != nil{\n\t\t\treturn 0 , err\n\t\t}\n\n\t\tsum += k\n\t}\n\treturn sum \/ float64(len(providers)), nil\n}\n\ntype multiWeatherProvider []weatherProvider\n\nfunc (w multiWeatherProvider) temperature(city string) (float64, error) {\n\tsum := 0.0\n\tfor _, provider := range w{\n\t\tk, err := provider.temperature(city)\n\t\tif err != nil{\n\t\t\treturn 0 , err\n\t\t}\n\n\t\tsum += k\n\t}\n\treturn sum \/ float64(len(w)), nil\n}\n\nfunc main() {\n\tmw := multiWeatherProvider{\n\t\topenWeatherMap{},\n\t\tweatherUnderground{apiKey: \"api______key\"},\n\t}\n\n\thttp.HandleFunc(\"\/weather\/\", func (w http.ResponseWriter, r *http.Request) {\n\t\tbegin := time.Now()\n\t\tcity := strings.SplitN(r.URL.Path, \"\/\", 3)[2]\n\n\t\ttemp, err := mw.temperature(city)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\t\"city\": city,\n\t\t\t\"temp\": temp,\n\t\t\t\"took\": time.Since(begin).String(),\n\t\t\t})\n\t})\n\n\thttp.ListenAndServe(\":9999\", nil)\n}\n\nfunc hello(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"hello!\"))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tcli \"github.com\/codegangsta\/cli\"\n\trw \"github.com\/whyrusleeping\/gx-go\/rewrite\"\n\tgx \"github.com\/whyrusleeping\/gx\/gxutil\"\n\t. \"github.com\/whyrusleeping\/stump\"\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"gx-go-tool\"\n\tapp.Author = \"whyrusleeping\"\n\tapp.Usage = \"gx extensions for golang\"\n\tapp.Version = \"0.2.0\"\n\n\tvar UpdateCommand = cli.Command{\n\t\tName: \"update\",\n\t\tUsage: \"update a packages imports to a new path\",\n\t\tArgsUsage: \"[old import] [new import]\",\n\t\tAction: func(c *cli.Context) {\n\t\t\tif len(c.Args()) < 2 {\n\t\t\t\tfmt.Println(\"must specify current and new import names\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\toldimp := c.Args()[0]\n\t\t\tnewimp := c.Args()[1]\n\n\t\t\terr := doUpdate(oldimp, newimp)\n\t\t\tif err != nil {\n\t\t\t\tFatal(err)\n\t\t\t}\n\t\t},\n\t}\n\n\tvar ImportCommand = cli.Command{\n\t\tName: \"import\",\n\t\tUsage: \"import a go package and all its depencies into gx\",\n\t\tFlags: []cli.Flag{\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"rewrite\",\n\t\t\t\tUsage: \"rewrite import paths to use vendored packages\",\n\t\t\t},\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"yesall\",\n\t\t\t\tUsage: \"assume defaults for all options\",\n\t\t\t},\n\t\t},\n\t\tAction: func(c *cli.Context) {\n\t\t\timporter, err := NewImporter(c.Bool(\"rewrite\"))\n\t\t\tif err != nil {\n\t\t\t\tFatal(err)\n\t\t\t}\n\n\t\t\timporter.yesall = c.Bool(\"yesall\")\n\n\t\t\tif !c.Args().Present() {\n\t\t\t\tFatal(\"must specify a package name\")\n\t\t\t}\n\n\t\t\tpkg := c.Args().First()\n\t\t\tLog(\"vendoring package %s\", pkg)\n\n\t\t\t_, err = importer.GxPublishGoPackage(pkg)\n\t\t\tif err != nil {\n\t\t\t\tFatal(err)\n\t\t\t}\n\t\t},\n\t}\n\n\tvar PathCommand = cli.Command{\n\t\tName: \"path\",\n\t\tUsage: \"prints the import path of the current package within GOPATH\",\n\t\tAction: func(c *cli.Context) {\n\t\t\tgopath := os.Getenv(\"GOPATH\")\n\t\t\tif gopath == \"\" {\n\t\t\t\tFatal(\"GOPATH not set, cannot derive import path\")\n\t\t\t}\n\n\t\t\tcwd, err := os.Getwd()\n\t\t\tif err != nil {\n\t\t\t\tFatal(err)\n\t\t\t}\n\n\t\t\tsrcdir := path.Join(gopath, \"src\")\n\t\t\tsrcdir += \"\/\"\n\n\t\t\tif !strings.HasPrefix(cwd, srcdir) {\n\t\t\t\tFatal(\"package not within GOPATH\/src\")\n\t\t\t}\n\n\t\t\trel := cwd[len(srcdir):]\n\t\t\tfmt.Println(rel)\n\t\t},\n\t}\n\n\tvar HookCommand = cli.Command{\n\t\tName: \"hook\",\n\t\tUsage: \"go specific hooks to be called by the gx tool\",\n\t\tAction: func(c *cli.Context) {\n\t\t\tif !c.Args().Present() {\n\t\t\t\tFatal(\"no hook specified!\")\n\t\t\t}\n\t\t\tsub := c.Args().First()\n\n\t\t\tpkg, err := gx.LoadPackageFile(gx.PkgFileName)\n\t\t\tif err != nil {\n\t\t\t\tFatal(err)\n\t\t\t}\n\n\t\t\tswitch sub {\n\t\t\tcase \"post-import\":\n\t\t\t\terr := postImportHook(pkg, c.Args().Tail())\n\t\t\t\tif err != nil {\n\t\t\t\t\tFatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\tUpdateCommand,\n\t\tImportCommand,\n\t\tPathCommand,\n\t\tHookCommand,\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc doUpdate(oldimp, newimp string) error {\n\tcurpath, err := os.Getwd()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting working dir: \", err)\n\t}\n\n\trwf := func(in string) string {\n\t\tif in == oldimp {\n\t\t\treturn newimp\n\t\t}\n\t\treturn in\n\t}\n\n\tfilter := func(in string) bool {\n\t\treturn strings.HasSuffix(in, \".go\")\n\t}\n\n\treturn rw.RewriteImports(curpath, rwf, filter)\n}\n\nfunc pathIsNotStdlib(path string) bool {\n\tfirst := strings.Split(path, \"\/\")[0]\n\n\tif len(strings.Split(first, \".\")) > 1 {\n\t\treturn true\n\t}\n\treturn false\n}\n\ntype Importer struct {\n\tpkgs map[string]*gx.Dependency\n\tgopath string\n\tpm *gx.PM\n\trewrite bool\n\tyesall bool\n}\n\nfunc NewImporter(rw bool) (*Importer, error) {\n\tgp, err := getGoPath()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcfg, err := gx.LoadConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Importer{\n\t\tpkgs: make(map[string]*gx.Dependency),\n\t\tgopath: gp,\n\t\tpm: gx.NewPM(cfg),\n\t\trewrite: rw,\n\t}, nil\n}\n\nfunc getGoPath() (string, error) {\n\tgopath := os.Getenv(\"GOPATH\")\n\tif gopath == \"\" {\n\t\treturn \"\", errors.New(\"gopath not set\")\n\t}\n\treturn gopath, nil\n}\n\nfunc (i *Importer) GxPublishGoPackage(imppath string) (*gx.Dependency, error) {\n\tif d, ok := i.pkgs[imppath]; ok {\n\t\treturn d, nil\n\t}\n\n\t\/\/ make sure its local\n\terr := GoGet(imppath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpkgpath := path.Join(i.gopath, \"src\", imppath)\n\tpkgFilePath := path.Join(pkgpath, gx.PkgFileName)\n\tpkg, err := gx.LoadPackageFile(pkgFilePath)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ init as gx package\n\t\tparts := strings.Split(imppath, \"\/\")\n\t\tpkgname := parts[len(parts)-1]\n\t\tif !i.yesall {\n\t\t\tp := fmt.Sprintf(\"enter name for import '%s'\", imppath)\n\t\t\tnname, err := prompt(p, pkgname)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tpkgname = nname\n\t\t}\n\n\t\terr = i.pm.InitPkg(pkgpath, pkgname, \"go\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpkg, err = gx.LoadPackageFile(pkgFilePath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ recurse!\n\tgopkg, err := build.Import(imppath, \"\", 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar depsToVendor []string\n\n\tfor _, child := range gopkg.Imports {\n\t\tif pathIsNotStdlib(child) {\n\t\t\tdepsToVendor = append(depsToVendor, child)\n\t\t}\n\t}\n\n\tfor n, child := range depsToVendor {\n\t\tLog(\"- processing dep %s for %s [%d \/ %d]\", child, imppath, n+1, len(depsToVendor))\n\t\tchilddep, err := i.GxPublishGoPackage(child)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpkg.Dependencies = append(pkg.Dependencies, childdep)\n\t}\n\n\terr = gx.SavePackageFile(pkg, pkgFilePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif i.rewrite {\n\t\tfullpkgpath, err := filepath.Abs(pkgpath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = i.rewriteImports(fullpkgpath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\thash, err := i.pm.PublishPackage(pkgpath, pkg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tLog(\"published %s as %s\", imppath, hash)\n\n\tdep := &gx.Dependency{\n\t\tHash: hash,\n\t\tName: pkg.Name,\n\t\tVersion: pkg.Version,\n\t}\n\ti.pkgs[imppath] = dep\n\treturn dep, nil\n}\n\nfunc (i *Importer) rewriteImports(pkgpath string) error {\n\n\tfilter := func(p string) bool {\n\t\treturn !strings.HasPrefix(p, \"vendor\") &&\n\t\t\t!strings.HasPrefix(p, \".git\") &&\n\t\t\tstrings.HasSuffix(p, \".go\")\n\t}\n\n\trwf := func(in string) string {\n\t\tdep, ok := i.pkgs[in]\n\t\tif !ok {\n\t\t\treturn in\n\t\t}\n\n\t\treturn dep.Hash + \"\/\" + dep.Name\n\t}\n\n\treturn rw.RewriteImports(pkgpath, rwf, filter)\n}\n\n\/\/ TODO: take an option to grab packages from local GOPATH\nfunc GoGet(path string) error {\n\tout, err := exec.Command(\"go\", \"get\", path).CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"go get failed: %s - %s\", string(out), err)\n\t}\n\treturn nil\n}\n\nfunc prompt(text, def string) (string, error) {\n\tscan := bufio.NewScanner(os.Stdin)\n\tfmt.Printf(\"%s (default: '%s') \", text, def)\n\tfor scan.Scan() {\n\t\tif scan.Text() != \"\" {\n\t\t\treturn scan.Text(), nil\n\t\t}\n\t\treturn def, nil\n\t}\n\n\treturn \"\", scan.Err()\n}\n\nfunc yesNoPrompt(prompt string, def bool) bool {\n\topts := \"[y\/N]\"\n\tif def {\n\t\topts = \"[Y\/n]\"\n\t}\n\n\tfmt.Printf(\"%s %s \", prompt, opts)\n\tscan := bufio.NewScanner(os.Stdin)\n\tfor scan.Scan() {\n\t\tval := strings.ToLower(scan.Text())\n\t\tswitch val {\n\t\tcase \"\":\n\t\t\treturn def\n\t\tcase \"y\":\n\t\t\treturn true\n\t\tcase \"n\":\n\t\t\treturn false\n\t\tdefault:\n\t\t\tfmt.Println(\"please type 'y' or 'n'\")\n\t\t}\n\t}\n\n\tpanic(\"unexpected termination of stdin\")\n}\n\nfunc postImportHook(pkg *gx.Package, args []string) error {\n\tif len(args) == 0 {\n\t\treturn fmt.Errorf(\"post-import hook: argument expected\")\n\t}\n\n\tnpkgHash := args[0]\n\tnpkgPath := filepath.Join(\"vendor\", npkgHash)\n\n\tnpkg, err := gx.LoadPackageFile(npkgPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif npkg.Go != nil && npkg.Go.DvcsImport != \"\" {\n\t\tq := fmt.Sprintf(\"update imports of %s to the newly imported package?\", npkg.Go.DvcsImport)\n\t\tif yesNoPrompt(q, false) {\n\t\t\tnimp := fmt.Sprintf(\"%s\/%s\", npkgHash, npkg.Name)\n\t\t\terr := doUpdate(npkg.Go.DvcsImport, nimp)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>fix post import hook<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tcli \"github.com\/codegangsta\/cli\"\n\trw \"github.com\/whyrusleeping\/gx-go\/rewrite\"\n\tgx \"github.com\/whyrusleeping\/gx\/gxutil\"\n\t. \"github.com\/whyrusleeping\/stump\"\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"gx-go-tool\"\n\tapp.Author = \"whyrusleeping\"\n\tapp.Usage = \"gx extensions for golang\"\n\tapp.Version = \"0.2.0\"\n\n\tvar UpdateCommand = cli.Command{\n\t\tName: \"update\",\n\t\tUsage: \"update a packages imports to a new path\",\n\t\tArgsUsage: \"[old import] [new import]\",\n\t\tAction: func(c *cli.Context) {\n\t\t\tif len(c.Args()) < 2 {\n\t\t\t\tfmt.Println(\"must specify current and new import names\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\toldimp := c.Args()[0]\n\t\t\tnewimp := c.Args()[1]\n\n\t\t\terr := doUpdate(oldimp, newimp)\n\t\t\tif err != nil {\n\t\t\t\tFatal(err)\n\t\t\t}\n\t\t},\n\t}\n\n\tvar ImportCommand = cli.Command{\n\t\tName: \"import\",\n\t\tUsage: \"import a go package and all its depencies into gx\",\n\t\tFlags: []cli.Flag{\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"rewrite\",\n\t\t\t\tUsage: \"rewrite import paths to use vendored packages\",\n\t\t\t},\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"yesall\",\n\t\t\t\tUsage: \"assume defaults for all options\",\n\t\t\t},\n\t\t},\n\t\tAction: func(c *cli.Context) {\n\t\t\timporter, err := NewImporter(c.Bool(\"rewrite\"))\n\t\t\tif err != nil {\n\t\t\t\tFatal(err)\n\t\t\t}\n\n\t\t\timporter.yesall = c.Bool(\"yesall\")\n\n\t\t\tif !c.Args().Present() {\n\t\t\t\tFatal(\"must specify a package name\")\n\t\t\t}\n\n\t\t\tpkg := c.Args().First()\n\t\t\tLog(\"vendoring package %s\", pkg)\n\n\t\t\t_, err = importer.GxPublishGoPackage(pkg)\n\t\t\tif err != nil {\n\t\t\t\tFatal(err)\n\t\t\t}\n\t\t},\n\t}\n\n\tvar PathCommand = cli.Command{\n\t\tName: \"path\",\n\t\tUsage: \"prints the import path of the current package within GOPATH\",\n\t\tAction: func(c *cli.Context) {\n\t\t\tgopath := os.Getenv(\"GOPATH\")\n\t\t\tif gopath == \"\" {\n\t\t\t\tFatal(\"GOPATH not set, cannot derive import path\")\n\t\t\t}\n\n\t\t\tcwd, err := os.Getwd()\n\t\t\tif err != nil {\n\t\t\t\tFatal(err)\n\t\t\t}\n\n\t\t\tsrcdir := path.Join(gopath, \"src\")\n\t\t\tsrcdir += \"\/\"\n\n\t\t\tif !strings.HasPrefix(cwd, srcdir) {\n\t\t\t\tFatal(\"package not within GOPATH\/src\")\n\t\t\t}\n\n\t\t\trel := cwd[len(srcdir):]\n\t\t\tfmt.Println(rel)\n\t\t},\n\t}\n\n\tvar HookCommand = cli.Command{\n\t\tName: \"hook\",\n\t\tUsage: \"go specific hooks to be called by the gx tool\",\n\t\tAction: func(c *cli.Context) {\n\t\t\tif !c.Args().Present() {\n\t\t\t\tFatal(\"no hook specified!\")\n\t\t\t}\n\t\t\tsub := c.Args().First()\n\n\t\t\tpkg, err := gx.LoadPackageFile(gx.PkgFileName)\n\t\t\tif err != nil {\n\t\t\t\tFatal(err)\n\t\t\t}\n\n\t\t\tswitch sub {\n\t\t\tcase \"post-import\":\n\t\t\t\terr := postImportHook(pkg, c.Args().Tail())\n\t\t\t\tif err != nil {\n\t\t\t\t\tFatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\tUpdateCommand,\n\t\tImportCommand,\n\t\tPathCommand,\n\t\tHookCommand,\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc doUpdate(oldimp, newimp string) error {\n\tcurpath, err := os.Getwd()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting working dir: \", err)\n\t}\n\n\trwf := func(in string) string {\n\t\tif in == oldimp {\n\t\t\treturn newimp\n\t\t}\n\t\treturn in\n\t}\n\n\tfilter := func(in string) bool {\n\t\treturn strings.HasSuffix(in, \".go\")\n\t}\n\n\treturn rw.RewriteImports(curpath, rwf, filter)\n}\n\nfunc pathIsNotStdlib(path string) bool {\n\tfirst := strings.Split(path, \"\/\")[0]\n\n\tif len(strings.Split(first, \".\")) > 1 {\n\t\treturn true\n\t}\n\treturn false\n}\n\ntype Importer struct {\n\tpkgs map[string]*gx.Dependency\n\tgopath string\n\tpm *gx.PM\n\trewrite bool\n\tyesall bool\n}\n\nfunc NewImporter(rw bool) (*Importer, error) {\n\tgp, err := getGoPath()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcfg, err := gx.LoadConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Importer{\n\t\tpkgs: make(map[string]*gx.Dependency),\n\t\tgopath: gp,\n\t\tpm: gx.NewPM(cfg),\n\t\trewrite: rw,\n\t}, nil\n}\n\nfunc getGoPath() (string, error) {\n\tgopath := os.Getenv(\"GOPATH\")\n\tif gopath == \"\" {\n\t\treturn \"\", errors.New(\"gopath not set\")\n\t}\n\treturn gopath, nil\n}\n\nfunc (i *Importer) GxPublishGoPackage(imppath string) (*gx.Dependency, error) {\n\tif d, ok := i.pkgs[imppath]; ok {\n\t\treturn d, nil\n\t}\n\n\t\/\/ make sure its local\n\terr := GoGet(imppath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpkgpath := path.Join(i.gopath, \"src\", imppath)\n\tpkgFilePath := path.Join(pkgpath, gx.PkgFileName)\n\tpkg, err := gx.LoadPackageFile(pkgFilePath)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ init as gx package\n\t\tparts := strings.Split(imppath, \"\/\")\n\t\tpkgname := parts[len(parts)-1]\n\t\tif !i.yesall {\n\t\t\tp := fmt.Sprintf(\"enter name for import '%s'\", imppath)\n\t\t\tnname, err := prompt(p, pkgname)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tpkgname = nname\n\t\t}\n\n\t\terr = i.pm.InitPkg(pkgpath, pkgname, \"go\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpkg, err = gx.LoadPackageFile(pkgFilePath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ recurse!\n\tgopkg, err := build.Import(imppath, \"\", 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar depsToVendor []string\n\n\tfor _, child := range gopkg.Imports {\n\t\tif pathIsNotStdlib(child) {\n\t\t\tdepsToVendor = append(depsToVendor, child)\n\t\t}\n\t}\n\n\tfor n, child := range depsToVendor {\n\t\tLog(\"- processing dep %s for %s [%d \/ %d]\", child, imppath, n+1, len(depsToVendor))\n\t\tchilddep, err := i.GxPublishGoPackage(child)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpkg.Dependencies = append(pkg.Dependencies, childdep)\n\t}\n\n\terr = gx.SavePackageFile(pkg, pkgFilePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif i.rewrite {\n\t\tfullpkgpath, err := filepath.Abs(pkgpath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = i.rewriteImports(fullpkgpath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\thash, err := i.pm.PublishPackage(pkgpath, pkg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tLog(\"published %s as %s\", imppath, hash)\n\n\tdep := &gx.Dependency{\n\t\tHash: hash,\n\t\tName: pkg.Name,\n\t\tVersion: pkg.Version,\n\t}\n\ti.pkgs[imppath] = dep\n\treturn dep, nil\n}\n\nfunc (i *Importer) rewriteImports(pkgpath string) error {\n\n\tfilter := func(p string) bool {\n\t\treturn !strings.HasPrefix(p, \"vendor\") &&\n\t\t\t!strings.HasPrefix(p, \".git\") &&\n\t\t\tstrings.HasSuffix(p, \".go\")\n\t}\n\n\trwf := func(in string) string {\n\t\tdep, ok := i.pkgs[in]\n\t\tif !ok {\n\t\t\treturn in\n\t\t}\n\n\t\treturn dep.Hash + \"\/\" + dep.Name\n\t}\n\n\treturn rw.RewriteImports(pkgpath, rwf, filter)\n}\n\n\/\/ TODO: take an option to grab packages from local GOPATH\nfunc GoGet(path string) error {\n\tout, err := exec.Command(\"go\", \"get\", path).CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"go get failed: %s - %s\", string(out), err)\n\t}\n\treturn nil\n}\n\nfunc prompt(text, def string) (string, error) {\n\tscan := bufio.NewScanner(os.Stdin)\n\tfmt.Printf(\"%s (default: '%s') \", text, def)\n\tfor scan.Scan() {\n\t\tif scan.Text() != \"\" {\n\t\t\treturn scan.Text(), nil\n\t\t}\n\t\treturn def, nil\n\t}\n\n\treturn \"\", scan.Err()\n}\n\nfunc yesNoPrompt(prompt string, def bool) bool {\n\topts := \"[y\/N]\"\n\tif def {\n\t\topts = \"[Y\/n]\"\n\t}\n\n\tfmt.Printf(\"%s %s \", prompt, opts)\n\tscan := bufio.NewScanner(os.Stdin)\n\tfor scan.Scan() {\n\t\tval := strings.ToLower(scan.Text())\n\t\tswitch val {\n\t\tcase \"\":\n\t\t\treturn def\n\t\tcase \"y\":\n\t\t\treturn true\n\t\tcase \"n\":\n\t\t\treturn false\n\t\tdefault:\n\t\t\tfmt.Println(\"please type 'y' or 'n'\")\n\t\t}\n\t}\n\n\tpanic(\"unexpected termination of stdin\")\n}\n\nfunc postImportHook(pkg *gx.Package, args []string) error {\n\tif len(args) == 0 {\n\t\treturn fmt.Errorf(\"post-import hook: argument expected\")\n\t}\n\n\tnpkgHash := args[0]\n\tnpkgPath := filepath.Join(\"vendor\", npkgHash)\n\n\tnpkg, err := gx.FindPackageInDir(npkgPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif npkg.Go != nil && npkg.Go.DvcsImport != \"\" {\n\t\tq := fmt.Sprintf(\"update imports of %s to the newly imported package?\", npkg.Go.DvcsImport)\n\t\tif yesNoPrompt(q, false) {\n\t\t\tnimp := fmt.Sprintf(\"%s\/%s\", npkgHash, npkg.Name)\n\t\t\terr := doUpdate(npkg.Go.DvcsImport, nimp)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 realglobe, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"github.com\/realglobe-Inc\/edo-id-provider\/database\/account\"\n\t\"github.com\/realglobe-Inc\/edo-id-provider\/database\/authcode\"\n\t\"github.com\/realglobe-Inc\/edo-id-provider\/database\/consent\"\n\t\"github.com\/realglobe-Inc\/edo-id-provider\/database\/coopcode\"\n\tjtidb \"github.com\/realglobe-Inc\/edo-id-provider\/database\/jti\"\n\tkeydb \"github.com\/realglobe-Inc\/edo-id-provider\/database\/key\"\n\t\"github.com\/realglobe-Inc\/edo-id-provider\/database\/pairwise\"\n\t\"github.com\/realglobe-Inc\/edo-id-provider\/database\/sector\"\n\t\"github.com\/realglobe-Inc\/edo-id-provider\/database\/session\"\n\t\"github.com\/realglobe-Inc\/edo-id-provider\/database\/token\"\n\tidpdb \"github.com\/realglobe-Inc\/edo-idp-selector\/database\/idp\"\n\ttadb \"github.com\/realglobe-Inc\/edo-idp-selector\/database\/ta\"\n\twebdb \"github.com\/realglobe-Inc\/edo-idp-selector\/database\/web\"\n\tidperr \"github.com\/realglobe-Inc\/edo-idp-selector\/error\"\n\t\"github.com\/realglobe-Inc\/edo-lib\/driver\"\n\tlogutil \"github.com\/realglobe-Inc\/edo-lib\/log\"\n\t\"github.com\/realglobe-Inc\/edo-lib\/server\"\n\t\"github.com\/realglobe-Inc\/go-lib\/erro\"\n\t\"github.com\/realglobe-Inc\/go-lib\/rglog\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n)\n\nfunc main() {\n\tvar exitCode = 0\n\tdefer func() {\n\t\tif exitCode != 0 {\n\t\t\tos.Exit(exitCode)\n\t\t}\n\t}()\n\n\tdefer rglog.Flush()\n\n\tlogutil.InitConsole(\"github.com\/realglobe-Inc\")\n\n\tparam, err := parseParameters(os.Args...)\n\tif err != nil {\n\t\tlog.Err(erro.Unwrap(err))\n\t\tlog.Debug(erro.Wrap(err))\n\t\texitCode = 1\n\t\treturn\n\t}\n\n\tlogutil.SetupConsole(\"github.com\/realglobe-Inc\", param.consLv)\n\tif err := logutil.Setup(\"github.com\/realglobe-Inc\", param.logType, param.logLv, param); err != nil {\n\t\tlog.Err(erro.Unwrap(err))\n\t\tlog.Debug(erro.Wrap(err))\n\t\texitCode = 1\n\t\treturn\n\t}\n\n\tif err := serve(param); err != nil {\n\t\tlog.Err(erro.Unwrap(err))\n\t\tlog.Debug(erro.Wrap(err))\n\t\texitCode = 1\n\t\treturn\n\t}\n\n\tlog.Info(\"Shut down\")\n}\n\nfunc serve(param *parameters) (err error) {\n\n\t\/\/ バックエンドの準備。\n\n\tredPools := driver.NewRedisPoolSet(param.redTimeout, param.redPoolSize, param.redPoolExpIn)\n\tdefer redPools.Close()\n\tmonPools := driver.NewMongoPoolSet(param.monTimeout)\n\tdefer monPools.Close()\n\n\t\/\/ 鍵。\n\tvar keyDb keydb.Db\n\tswitch param.keyDbType {\n\tcase \"file\":\n\t\tkeyDb = keydb.NewFileDb(param.keyDbPath)\n\t\tlog.Info(\"Use keys in directory \" + param.keyDbPath)\n\tcase \"redis\":\n\t\tkeyDb = keydb.NewRedisCache(keydb.NewFileDb(param.keyDbPath), redPools.Get(param.keyDbAddr), param.keyDbTag+\".\"+param.selfId, param.keyDbExpIn)\n\t\tlog.Info(\"Use keys in directory \" + param.keyDbPath + \" with redis \" + param.keyDbAddr + \": \" + param.keyDbTag + \".\" + param.selfId)\n\tdefault:\n\t\treturn erro.New(\"invalid key DB type \" + param.keyDbType)\n\t}\n\n\t\/\/ アカウント情報。\n\tvar acntDb account.Db\n\tswitch param.acntDbType {\n\tcase \"mongo\":\n\t\tpool, err := monPools.Get(param.acntDbAddr)\n\t\tif err != nil {\n\t\t\treturn erro.Wrap(err)\n\t\t}\n\t\tacntDb = account.NewMongoDb(pool, param.acntDbTag, param.acntDbTag2)\n\t\tlog.Info(\"Use account info in mongodb \" + param.acntDbAddr + \": \" + param.acntDbTag + \".\" + param.acntDbTag2)\n\tdefault:\n\t\treturn erro.New(\"invalid account DB type \" + param.acntDbType)\n\t}\n\n\t\/\/ スコープ・属性許可情報。\n\tvar consDb consent.Db\n\tswitch param.consDbType {\n\tcase \"memory\":\n\t\tconsDb = consent.NewMemoryDb()\n\t\tlog.Info(\"Save consent info in memory\")\n\tcase \"mongo\":\n\t\tpool, err := monPools.Get(param.consDbAddr)\n\t\tif err != nil {\n\t\t\treturn erro.Wrap(err)\n\t\t}\n\t\tconsDb = consent.NewMongoDb(pool, param.consDbTag, param.consDbTag2)\n\t\tlog.Info(\"Save consent info in mongodb \" + param.consDbAddr + \": \" + param.consDbTag + \".\" + param.consDbTag2)\n\tdefault:\n\t\treturn erro.New(\"invalid consent DB type \" + param.consDbType)\n\t}\n\n\t\/\/ web データ。\n\tvar webDb webdb.Db\n\tswitch param.webDbType {\n\tcase \"direct\":\n\t\twebDb = webdb.NewDirectDb()\n\t\tlog.Info(\"Get web data directly\")\n\tcase \"redis\":\n\t\twebDb = webdb.NewRedisCache(webdb.NewDirectDb(), redPools.Get(param.webDbAddr), param.webDbTag, param.webDbExpIn)\n\t\tlog.Info(\"Get web data with redis \" + param.webDbAddr + \": \" + param.webDbTag)\n\tdefault:\n\t\treturn erro.New(\"invalid web data DB type \" + param.webDbType)\n\t}\n\n\t\/\/ TA 情報。\n\tvar taDb tadb.Db\n\tswitch param.taDbType {\n\tcase \"mongo\":\n\t\tpool, err := monPools.Get(param.taDbAddr)\n\t\tif err != nil {\n\t\t\treturn erro.Wrap(err)\n\t\t}\n\t\ttaDb = tadb.NewMongoDb(pool, param.taDbTag, param.taDbTag2, webDb)\n\t\tlog.Info(\"Use TA info in mongodb \" + param.taDbAddr + \": \" + param.taDbTag + \".\" + param.taDbTag2)\n\tdefault:\n\t\treturn erro.New(\"invalid TA DB type \" + param.taDbType)\n\t}\n\n\t\/\/ セクタ固有のアカウント ID の計算に使う情報。\n\tvar sectDb sector.Db\n\tswitch param.sectDbType {\n\tcase \"memory\":\n\t\tsectDb = sector.NewMemoryDb()\n\t\tlog.Info(\"Save pairwise account ID calculation info in memory\")\n\tcase \"mongo\":\n\t\tpool, err := monPools.Get(param.sectDbAddr)\n\t\tif err != nil {\n\t\t\treturn erro.Wrap(err)\n\t\t}\n\t\tsectDb = sector.NewMongoDb(pool, param.sectDbTag, param.sectDbTag2)\n\t\tlog.Info(\"Save pairwise account ID calculation info in mongodb \" + param.sectDbAddr + \": \" + param.sectDbTag + \".\" + param.sectDbTag2)\n\tdefault:\n\t\treturn erro.New(\"invalid pairwise account ID calculation info DB type \" + param.sectDbType)\n\t}\n\n\t\/\/ セクタ固有のアカウント ID 情報。\n\tvar pwDb pairwise.Db\n\tswitch param.pwDbType {\n\tcase \"memory\":\n\t\tpwDb = pairwise.NewMemoryDb()\n\t\tlog.Info(\"Save pairwise account IDs in memory\")\n\tcase \"mongo\":\n\t\tpool, err := monPools.Get(param.pwDbAddr)\n\t\tif err != nil {\n\t\t\treturn erro.Wrap(err)\n\t\t}\n\t\tpwDb = pairwise.NewMongoDb(pool, param.pwDbTag, param.pwDbTag2)\n\t\tlog.Info(\"Save pairwise account IDs in mongodb \" + param.pwDbAddr + \": \" + param.pwDbTag + \".\" + param.pwDbTag2)\n\tdefault:\n\t\treturn erro.New(\"invalid pairwise account ID DB type \" + param.pwDbType)\n\t}\n\n\t\/\/ IdP 情報。\n\tvar idpDb idpdb.Db\n\tswitch param.idpDbType {\n\tcase \"mongo\":\n\t\tpool, err := monPools.Get(param.idpDbAddr)\n\t\tif err != nil {\n\t\t\treturn erro.Wrap(err)\n\t\t}\n\t\tidpDb = idpdb.NewMongoDb(pool, param.idpDbTag, param.idpDbTag2, webDb)\n\t\tlog.Info(\"Use IdP info in mongodb \" + param.idpDbAddr + \": \" + param.idpDbTag + \".\" + param.idpDbTag2)\n\tdefault:\n\t\treturn erro.New(\"invalid IdP DB type \" + param.idpDbType)\n\t}\n\n\t\/\/ セッション。\n\tvar sessDb session.Db\n\tswitch param.sessDbType {\n\tcase \"memory\":\n\t\tsessDb = session.NewMemoryDb()\n\t\tlog.Info(\"Save sessions in memory\")\n\tcase \"redis\":\n\t\tsessDb = session.NewRedisDb(redPools.Get(param.sessDbAddr), param.sessDbTag)\n\t\tlog.Info(\"Save sessions in redis \" + param.sessDbAddr + \": \" + param.sessDbTag)\n\tdefault:\n\t\treturn erro.New(\"invalid session DB type \" + param.sessDbType)\n\t}\n\n\t\/\/ 認可コード。\n\tvar acodDb authcode.Db\n\tswitch param.acodDbType {\n\tcase \"memory\":\n\t\tacodDb = authcode.NewMemoryDb()\n\t\tlog.Info(\"Save authorization codes in memory\")\n\tcase \"redis\":\n\t\tacodDb = authcode.NewRedisDb(redPools.Get(param.acodDbAddr), param.acodDbTag)\n\t\tlog.Info(\"Save authorization codes in redis \" + param.acodDbAddr + \": \" + param.acodDbTag)\n\tdefault:\n\t\treturn erro.New(\"invalid authorization code DB type \" + param.acodDbType)\n\t}\n\n\t\/\/ アクセストークン。\n\tvar tokDb token.Db\n\tswitch param.tokDbType {\n\tcase \"memory\":\n\t\ttokDb = token.NewMemoryDb()\n\t\tlog.Info(\"Save access tokens in memory\")\n\tcase \"redis\":\n\t\ttokDb = token.NewRedisDb(redPools.Get(param.tokDbAddr), param.tokDbTag)\n\t\tlog.Info(\"Save access tokens in redis \" + param.tokDbAddr + \": \" + param.tokDbTag)\n\tdefault:\n\t\treturn erro.New(\"invalid access token DB type \" + param.tokDbType)\n\t}\n\n\t\/\/ 仲介コード。\n\tvar ccodDb coopcode.Db\n\tswitch param.ccodDbType {\n\tcase \"memory\":\n\t\tccodDb = coopcode.NewMemoryDb()\n\t\tlog.Info(\"Save cooperation codes in memory\")\n\tcase \"redis\":\n\t\tccodDb = coopcode.NewRedisDb(redPools.Get(param.ccodDbAddr), param.ccodDbTag)\n\t\tlog.Info(\"Save cooperation codes in redis \" + param.ccodDbAddr + \": \" + param.ccodDbTag)\n\tdefault:\n\t\treturn erro.New(\"invalid cooperation code DB type \" + param.ccodDbType)\n\t}\n\n\t\/\/ JWT の ID。\n\tvar jtiDb jtidb.Db\n\tswitch param.jtiDbType {\n\tcase \"memory\":\n\t\tjtiDb = jtidb.NewMemoryDb()\n\t\tlog.Info(\"Save JWT IDs in memory\")\n\tcase \"redis\":\n\t\tjtiDb = jtidb.NewRedisDb(redPools.Get(param.jtiDbAddr), param.jtiDbTag)\n\t\tlog.Info(\"Save JWT IDs in redis \" + param.jtiDbAddr + \": \" + param.jtiDbTag)\n\tdefault:\n\t\treturn erro.New(\"invalid JWT ID DB type \" + param.jtiDbType)\n\t}\n\n\tsys := &system{\n\t\tparam.selfId,\n\t\tparam.sigAlg,\n\t\tparam.sigKid,\n\n\t\tparam.pathTok,\n\t\tparam.pathTa,\n\t\tparam.pathSelUi,\n\t\tparam.pathLginUi,\n\t\tparam.pathConsUi,\n\t\tparam.pathErrUi,\n\n\t\tparam.pwSaltLen,\n\t\tparam.sessLen,\n\t\tparam.sessExpIn,\n\t\tparam.sessRefDelay,\n\t\tparam.sessDbExpIn,\n\t\tparam.acodLen,\n\t\tparam.acodExpIn,\n\t\tparam.acodDbExpIn,\n\t\tparam.tokLen,\n\t\tparam.tokExpIn,\n\t\tparam.tokDbExpIn,\n\t\tparam.ccodLen,\n\t\tparam.ccodExpIn,\n\t\tparam.ccodDbExpIn,\n\t\tparam.jtiLen,\n\t\tparam.jtiExpIn,\n\t\tparam.jtiDbExpIn,\n\t\tparam.ticLen,\n\n\t\tkeyDb,\n\t\twebDb,\n\t\tacntDb,\n\t\tconsDb,\n\t\ttaDb,\n\t\tsectDb,\n\t\tpwDb,\n\t\tidpDb,\n\t\tsessDb,\n\t\tacodDb,\n\t\ttokDb,\n\t\tccodDb,\n\t\tjtiDb,\n\n\t\tparam.cookPath,\n\t\tparam.cookSec,\n\t}\n\n\t\/\/ バックエンドの準備完了。\n\n\ts := server.NewStopper()\n\tdefer func() {\n\t\t\/\/ 処理の終了待ち。\n\t\ts.Lock()\n\t\tdefer s.Unlock()\n\t\tfor s.Stopped() {\n\t\t\ts.Wait()\n\t\t}\n\t}()\n\n\tmux := http.NewServeMux()\n\n\tmux.HandleFunc(\"\/\", panicErrorWrapper(s, func(w http.ResponseWriter, r *http.Request) error {\n\t\treturn erro.Wrap(idperr.New(idperr.Invalid_request, \"invalid endpoint\", http.StatusNotFound, nil))\n\t}))\n\tmux.HandleFunc(param.pathOk, panicErrorWrapper(s, func(w http.ResponseWriter, r *http.Request) error {\n\t\treturn nil\n\t}))\n\tmux.HandleFunc(param.pathAuth, panicErrorWrapper(s, sys.authPage))\n\tmux.HandleFunc(param.pathSel, panicErrorWrapper(s, sys.selectPage))\n\tmux.HandleFunc(param.pathLgin, panicErrorWrapper(s, sys.lginPage))\n\tmux.HandleFunc(param.pathCons, panicErrorWrapper(s, sys.consentPage))\n\tmux.HandleFunc(param.pathTa, panicErrorWrapper(s, sys.taApiHandler().ServeHTTP))\n\tmux.HandleFunc(param.pathTok, panicErrorWrapper(s, sys.tokenApi))\n\tmux.HandleFunc(param.pathAcnt, panicErrorWrapper(s, sys.accountApi))\n\tmux.HandleFunc(param.pathCoopFr, panicErrorWrapper(s, sys.cooperateFromApi))\n\tmux.HandleFunc(param.pathCoopTo, panicErrorWrapper(s, sys.cooperateToApi))\n\tif param.uiDir != \"\" {\n\t\t\/\/ ファイル配信も自前でやる。\n\t\tpathUi := strings.TrimRight(param.pathUi, \"\/\") + \"\/\"\n\t\tfiler := http.StripPrefix(pathUi, http.FileServer(http.Dir(param.uiDir)))\n\t\tmux.Handle(pathUi, filer)\n\t}\n\n\treturn server.Serve(param, mux)\n}\n<commit_msg>不要な変数を削除<commit_after>\/\/ Copyright 2015 realglobe, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"github.com\/realglobe-Inc\/edo-id-provider\/database\/account\"\n\t\"github.com\/realglobe-Inc\/edo-id-provider\/database\/authcode\"\n\t\"github.com\/realglobe-Inc\/edo-id-provider\/database\/consent\"\n\t\"github.com\/realglobe-Inc\/edo-id-provider\/database\/coopcode\"\n\tjtidb \"github.com\/realglobe-Inc\/edo-id-provider\/database\/jti\"\n\tkeydb \"github.com\/realglobe-Inc\/edo-id-provider\/database\/key\"\n\t\"github.com\/realglobe-Inc\/edo-id-provider\/database\/pairwise\"\n\t\"github.com\/realglobe-Inc\/edo-id-provider\/database\/sector\"\n\t\"github.com\/realglobe-Inc\/edo-id-provider\/database\/session\"\n\t\"github.com\/realglobe-Inc\/edo-id-provider\/database\/token\"\n\tidpdb \"github.com\/realglobe-Inc\/edo-idp-selector\/database\/idp\"\n\ttadb \"github.com\/realglobe-Inc\/edo-idp-selector\/database\/ta\"\n\twebdb \"github.com\/realglobe-Inc\/edo-idp-selector\/database\/web\"\n\tidperr \"github.com\/realglobe-Inc\/edo-idp-selector\/error\"\n\t\"github.com\/realglobe-Inc\/edo-lib\/driver\"\n\tlogutil \"github.com\/realglobe-Inc\/edo-lib\/log\"\n\t\"github.com\/realglobe-Inc\/edo-lib\/server\"\n\t\"github.com\/realglobe-Inc\/go-lib\/erro\"\n\t\"github.com\/realglobe-Inc\/go-lib\/rglog\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n)\n\nfunc main() {\n\tvar exitCode = 0\n\tdefer func() {\n\t\tif exitCode != 0 {\n\t\t\tos.Exit(exitCode)\n\t\t}\n\t}()\n\n\tdefer rglog.Flush()\n\n\tlogutil.InitConsole(\"github.com\/realglobe-Inc\")\n\n\tparam, err := parseParameters(os.Args...)\n\tif err != nil {\n\t\tlog.Err(erro.Unwrap(err))\n\t\tlog.Debug(erro.Wrap(err))\n\t\texitCode = 1\n\t\treturn\n\t}\n\n\tlogutil.SetupConsole(\"github.com\/realglobe-Inc\", param.consLv)\n\tif err := logutil.Setup(\"github.com\/realglobe-Inc\", param.logType, param.logLv, param); err != nil {\n\t\tlog.Err(erro.Unwrap(err))\n\t\tlog.Debug(erro.Wrap(err))\n\t\texitCode = 1\n\t\treturn\n\t}\n\n\tif err := serve(param); err != nil {\n\t\tlog.Err(erro.Unwrap(err))\n\t\tlog.Debug(erro.Wrap(err))\n\t\texitCode = 1\n\t\treturn\n\t}\n\n\tlog.Info(\"Shut down\")\n}\n\nfunc serve(param *parameters) (err error) {\n\n\t\/\/ バックエンドの準備。\n\n\tredPools := driver.NewRedisPoolSet(param.redTimeout, param.redPoolSize, param.redPoolExpIn)\n\tdefer redPools.Close()\n\tmonPools := driver.NewMongoPoolSet(param.monTimeout)\n\tdefer monPools.Close()\n\n\t\/\/ 鍵。\n\tvar keyDb keydb.Db\n\tswitch param.keyDbType {\n\tcase \"file\":\n\t\tkeyDb = keydb.NewFileDb(param.keyDbPath)\n\t\tlog.Info(\"Use keys in directory \" + param.keyDbPath)\n\tcase \"redis\":\n\t\tkeyDb = keydb.NewRedisCache(keydb.NewFileDb(param.keyDbPath), redPools.Get(param.keyDbAddr), param.keyDbTag+\".\"+param.selfId, param.keyDbExpIn)\n\t\tlog.Info(\"Use keys in directory \" + param.keyDbPath + \" with redis \" + param.keyDbAddr + \": \" + param.keyDbTag + \".\" + param.selfId)\n\tdefault:\n\t\treturn erro.New(\"invalid key DB type \" + param.keyDbType)\n\t}\n\n\t\/\/ アカウント情報。\n\tvar acntDb account.Db\n\tswitch param.acntDbType {\n\tcase \"mongo\":\n\t\tpool, err := monPools.Get(param.acntDbAddr)\n\t\tif err != nil {\n\t\t\treturn erro.Wrap(err)\n\t\t}\n\t\tacntDb = account.NewMongoDb(pool, param.acntDbTag, param.acntDbTag2)\n\t\tlog.Info(\"Use account info in mongodb \" + param.acntDbAddr + \": \" + param.acntDbTag + \".\" + param.acntDbTag2)\n\tdefault:\n\t\treturn erro.New(\"invalid account DB type \" + param.acntDbType)\n\t}\n\n\t\/\/ スコープ・属性許可情報。\n\tvar consDb consent.Db\n\tswitch param.consDbType {\n\tcase \"memory\":\n\t\tconsDb = consent.NewMemoryDb()\n\t\tlog.Info(\"Save consent info in memory\")\n\tcase \"mongo\":\n\t\tpool, err := monPools.Get(param.consDbAddr)\n\t\tif err != nil {\n\t\t\treturn erro.Wrap(err)\n\t\t}\n\t\tconsDb = consent.NewMongoDb(pool, param.consDbTag, param.consDbTag2)\n\t\tlog.Info(\"Save consent info in mongodb \" + param.consDbAddr + \": \" + param.consDbTag + \".\" + param.consDbTag2)\n\tdefault:\n\t\treturn erro.New(\"invalid consent DB type \" + param.consDbType)\n\t}\n\n\t\/\/ web データ。\n\tvar webDb webdb.Db\n\tswitch param.webDbType {\n\tcase \"direct\":\n\t\twebDb = webdb.NewDirectDb()\n\t\tlog.Info(\"Get web data directly\")\n\tcase \"redis\":\n\t\twebDb = webdb.NewRedisCache(webdb.NewDirectDb(), redPools.Get(param.webDbAddr), param.webDbTag, param.webDbExpIn)\n\t\tlog.Info(\"Get web data with redis \" + param.webDbAddr + \": \" + param.webDbTag)\n\tdefault:\n\t\treturn erro.New(\"invalid web data DB type \" + param.webDbType)\n\t}\n\n\t\/\/ TA 情報。\n\tvar taDb tadb.Db\n\tswitch param.taDbType {\n\tcase \"mongo\":\n\t\tpool, err := monPools.Get(param.taDbAddr)\n\t\tif err != nil {\n\t\t\treturn erro.Wrap(err)\n\t\t}\n\t\ttaDb = tadb.NewMongoDb(pool, param.taDbTag, param.taDbTag2, webDb)\n\t\tlog.Info(\"Use TA info in mongodb \" + param.taDbAddr + \": \" + param.taDbTag + \".\" + param.taDbTag2)\n\tdefault:\n\t\treturn erro.New(\"invalid TA DB type \" + param.taDbType)\n\t}\n\n\t\/\/ セクタ固有のアカウント ID の計算に使う情報。\n\tvar sectDb sector.Db\n\tswitch param.sectDbType {\n\tcase \"memory\":\n\t\tsectDb = sector.NewMemoryDb()\n\t\tlog.Info(\"Save pairwise account ID calculation info in memory\")\n\tcase \"mongo\":\n\t\tpool, err := monPools.Get(param.sectDbAddr)\n\t\tif err != nil {\n\t\t\treturn erro.Wrap(err)\n\t\t}\n\t\tsectDb = sector.NewMongoDb(pool, param.sectDbTag, param.sectDbTag2)\n\t\tlog.Info(\"Save pairwise account ID calculation info in mongodb \" + param.sectDbAddr + \": \" + param.sectDbTag + \".\" + param.sectDbTag2)\n\tdefault:\n\t\treturn erro.New(\"invalid pairwise account ID calculation info DB type \" + param.sectDbType)\n\t}\n\n\t\/\/ セクタ固有のアカウント ID 情報。\n\tvar pwDb pairwise.Db\n\tswitch param.pwDbType {\n\tcase \"memory\":\n\t\tpwDb = pairwise.NewMemoryDb()\n\t\tlog.Info(\"Save pairwise account IDs in memory\")\n\tcase \"mongo\":\n\t\tpool, err := monPools.Get(param.pwDbAddr)\n\t\tif err != nil {\n\t\t\treturn erro.Wrap(err)\n\t\t}\n\t\tpwDb = pairwise.NewMongoDb(pool, param.pwDbTag, param.pwDbTag2)\n\t\tlog.Info(\"Save pairwise account IDs in mongodb \" + param.pwDbAddr + \": \" + param.pwDbTag + \".\" + param.pwDbTag2)\n\tdefault:\n\t\treturn erro.New(\"invalid pairwise account ID DB type \" + param.pwDbType)\n\t}\n\n\t\/\/ IdP 情報。\n\tvar idpDb idpdb.Db\n\tswitch param.idpDbType {\n\tcase \"mongo\":\n\t\tpool, err := monPools.Get(param.idpDbAddr)\n\t\tif err != nil {\n\t\t\treturn erro.Wrap(err)\n\t\t}\n\t\tidpDb = idpdb.NewMongoDb(pool, param.idpDbTag, param.idpDbTag2, webDb)\n\t\tlog.Info(\"Use IdP info in mongodb \" + param.idpDbAddr + \": \" + param.idpDbTag + \".\" + param.idpDbTag2)\n\tdefault:\n\t\treturn erro.New(\"invalid IdP DB type \" + param.idpDbType)\n\t}\n\n\t\/\/ セッション。\n\tvar sessDb session.Db\n\tswitch param.sessDbType {\n\tcase \"memory\":\n\t\tsessDb = session.NewMemoryDb()\n\t\tlog.Info(\"Save sessions in memory\")\n\tcase \"redis\":\n\t\tsessDb = session.NewRedisDb(redPools.Get(param.sessDbAddr), param.sessDbTag)\n\t\tlog.Info(\"Save sessions in redis \" + param.sessDbAddr + \": \" + param.sessDbTag)\n\tdefault:\n\t\treturn erro.New(\"invalid session DB type \" + param.sessDbType)\n\t}\n\n\t\/\/ 認可コード。\n\tvar acodDb authcode.Db\n\tswitch param.acodDbType {\n\tcase \"memory\":\n\t\tacodDb = authcode.NewMemoryDb()\n\t\tlog.Info(\"Save authorization codes in memory\")\n\tcase \"redis\":\n\t\tacodDb = authcode.NewRedisDb(redPools.Get(param.acodDbAddr), param.acodDbTag)\n\t\tlog.Info(\"Save authorization codes in redis \" + param.acodDbAddr + \": \" + param.acodDbTag)\n\tdefault:\n\t\treturn erro.New(\"invalid authorization code DB type \" + param.acodDbType)\n\t}\n\n\t\/\/ アクセストークン。\n\tvar tokDb token.Db\n\tswitch param.tokDbType {\n\tcase \"memory\":\n\t\ttokDb = token.NewMemoryDb()\n\t\tlog.Info(\"Save access tokens in memory\")\n\tcase \"redis\":\n\t\ttokDb = token.NewRedisDb(redPools.Get(param.tokDbAddr), param.tokDbTag)\n\t\tlog.Info(\"Save access tokens in redis \" + param.tokDbAddr + \": \" + param.tokDbTag)\n\tdefault:\n\t\treturn erro.New(\"invalid access token DB type \" + param.tokDbType)\n\t}\n\n\t\/\/ 仲介コード。\n\tvar ccodDb coopcode.Db\n\tswitch param.ccodDbType {\n\tcase \"memory\":\n\t\tccodDb = coopcode.NewMemoryDb()\n\t\tlog.Info(\"Save cooperation codes in memory\")\n\tcase \"redis\":\n\t\tccodDb = coopcode.NewRedisDb(redPools.Get(param.ccodDbAddr), param.ccodDbTag)\n\t\tlog.Info(\"Save cooperation codes in redis \" + param.ccodDbAddr + \": \" + param.ccodDbTag)\n\tdefault:\n\t\treturn erro.New(\"invalid cooperation code DB type \" + param.ccodDbType)\n\t}\n\n\t\/\/ JWT の ID。\n\tvar jtiDb jtidb.Db\n\tswitch param.jtiDbType {\n\tcase \"memory\":\n\t\tjtiDb = jtidb.NewMemoryDb()\n\t\tlog.Info(\"Save JWT IDs in memory\")\n\tcase \"redis\":\n\t\tjtiDb = jtidb.NewRedisDb(redPools.Get(param.jtiDbAddr), param.jtiDbTag)\n\t\tlog.Info(\"Save JWT IDs in redis \" + param.jtiDbAddr + \": \" + param.jtiDbTag)\n\tdefault:\n\t\treturn erro.New(\"invalid JWT ID DB type \" + param.jtiDbType)\n\t}\n\n\tsys := &system{\n\t\tparam.selfId,\n\t\tparam.sigAlg,\n\t\tparam.sigKid,\n\n\t\tparam.pathTok,\n\t\tparam.pathTa,\n\t\tparam.pathSelUi,\n\t\tparam.pathLginUi,\n\t\tparam.pathConsUi,\n\t\tparam.pathErrUi,\n\n\t\tparam.pwSaltLen,\n\t\tparam.sessLen,\n\t\tparam.sessExpIn,\n\t\tparam.sessRefDelay,\n\t\tparam.sessDbExpIn,\n\t\tparam.acodLen,\n\t\tparam.acodExpIn,\n\t\tparam.acodDbExpIn,\n\t\tparam.tokLen,\n\t\tparam.tokExpIn,\n\t\tparam.tokDbExpIn,\n\t\tparam.ccodLen,\n\t\tparam.ccodExpIn,\n\t\tparam.ccodDbExpIn,\n\t\tparam.jtiLen,\n\t\tparam.jtiExpIn,\n\t\tparam.jtiDbExpIn,\n\t\tparam.ticLen,\n\n\t\tkeyDb,\n\t\twebDb,\n\t\tacntDb,\n\t\tconsDb,\n\t\ttaDb,\n\t\tsectDb,\n\t\tpwDb,\n\t\tidpDb,\n\t\tsessDb,\n\t\tacodDb,\n\t\ttokDb,\n\t\tccodDb,\n\t\tjtiDb,\n\n\t\tparam.cookPath,\n\t\tparam.cookSec,\n\t}\n\n\t\/\/ バックエンドの準備完了。\n\n\ts := server.NewStopper()\n\tdefer func() {\n\t\t\/\/ 処理の終了待ち。\n\t\ts.Lock()\n\t\tdefer s.Unlock()\n\t\tfor s.Stopped() {\n\t\t\ts.Wait()\n\t\t}\n\t}()\n\n\tmux := http.NewServeMux()\n\n\tmux.HandleFunc(\"\/\", panicErrorWrapper(s, func(w http.ResponseWriter, r *http.Request) error {\n\t\treturn erro.Wrap(idperr.New(idperr.Invalid_request, \"invalid endpoint\", http.StatusNotFound, nil))\n\t}))\n\tmux.HandleFunc(param.pathOk, panicErrorWrapper(s, func(w http.ResponseWriter, r *http.Request) error {\n\t\treturn nil\n\t}))\n\tmux.HandleFunc(param.pathAuth, panicErrorWrapper(s, sys.authPage))\n\tmux.HandleFunc(param.pathSel, panicErrorWrapper(s, sys.selectPage))\n\tmux.HandleFunc(param.pathLgin, panicErrorWrapper(s, sys.lginPage))\n\tmux.HandleFunc(param.pathCons, panicErrorWrapper(s, sys.consentPage))\n\tmux.HandleFunc(param.pathTa, panicErrorWrapper(s, sys.taApiHandler().ServeHTTP))\n\tmux.HandleFunc(param.pathTok, panicErrorWrapper(s, sys.tokenApi))\n\tmux.HandleFunc(param.pathAcnt, panicErrorWrapper(s, sys.accountApi))\n\tmux.HandleFunc(param.pathCoopFr, panicErrorWrapper(s, sys.cooperateFromApi))\n\tmux.HandleFunc(param.pathCoopTo, panicErrorWrapper(s, sys.cooperateToApi))\n\tif param.uiDir != \"\" {\n\t\t\/\/ ファイル配信も自前でやる。\n\t\tpathUi := strings.TrimRight(param.pathUi, \"\/\") + \"\/\"\n\t\tmux.Handle(pathUi, http.StripPrefix(pathUi, http.FileServer(http.Dir(param.uiDir))))\n\t}\n\n\treturn server.Serve(param, mux)\n}\n<|endoftext|>"} {"text":"<commit_before>package db\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\n\t\"github.com\/rafaeljusto\/cctldstats\/config\"\n)\n\n\/\/ Connection database connection.\nvar Connection *sql.DB\n\n\/\/ Connect performs the database connection. Today the following databases are supported: mysql and postgres\nfunc Connect() (err error) {\n\tvar connParams string\n\tswitch config.CCTLDStats.Database.Kind {\n\tcase \"mysql\":\n\t\tconnParams = fmt.Sprintf(\"%s:%s@tcp(%s)\/%s\",\n\t\t\tconfig.CCTLDStats.Database.Username,\n\t\t\tconfig.CCTLDStats.Database.Password,\n\t\t\tconfig.CCTLDStats.Database.Host,\n\t\t\tconfig.CCTLDStats.Database.Name,\n\t\t)\n\tcase \"postgres\":\n\t\tconnParams = fmt.Sprintf(\"postgres:\/\/%s:%s@%s\/%s?sslmode=verify-full\",\n\t\t\tconfig.CCTLDStats.Database.Username,\n\t\t\tconfig.CCTLDStats.Database.Password,\n\t\t\tconfig.CCTLDStats.Database.Host,\n\t\t\tconfig.CCTLDStats.Database.Name,\n\t\t)\n\t}\n\n\tConnection, err = sql.Open(config.CCTLDStats.Database.Kind, connParams)\n\treturn\n}\n<commit_msg>Disable SSL mode in DB connection for now<commit_after>package db\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\n\t\"github.com\/rafaeljusto\/cctldstats\/config\"\n)\n\n\/\/ Connection database connection.\nvar Connection *sql.DB\n\n\/\/ Connect performs the database connection. Today the following databases are supported: mysql and postgres\nfunc Connect() (err error) {\n\tvar connParams string\n\tswitch config.CCTLDStats.Database.Kind {\n\tcase \"mysql\":\n\t\tconnParams = fmt.Sprintf(\"%s:%s@tcp(%s)\/%s\",\n\t\t\tconfig.CCTLDStats.Database.Username,\n\t\t\tconfig.CCTLDStats.Database.Password,\n\t\t\tconfig.CCTLDStats.Database.Host,\n\t\t\tconfig.CCTLDStats.Database.Name,\n\t\t)\n\tcase \"postgres\":\n\t\tconnParams = fmt.Sprintf(\"postgres:\/\/%s:%s@%s\/%s\",\n\t\t\tconfig.CCTLDStats.Database.Username,\n\t\t\tconfig.CCTLDStats.Database.Password,\n\t\t\tconfig.CCTLDStats.Database.Host,\n\t\t\tconfig.CCTLDStats.Database.Name,\n\t\t)\n\t}\n\n\tConnection, err = sql.Open(config.CCTLDStats.Database.Kind, connParams)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ User interface for the 'Bulk Add CSV' extension of Shimmie2.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/kusubooru\/local-tagger\/bulk\"\n)\n\n\/\/go:generate go run generate\/templates.go\n\/\/go:generate go run generate\/swf.go\n\nconst theVersion = \"1.0.0\"\n\nvar fns = template.FuncMap{\n\t\"last\": func(s []string) string {\n\t\tif len(s) == 0 {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn s[len(s)-1]\n\t},\n\t\"join\": strings.Join,\n}\n\nvar (\n\tdirectory = flag.String(\"dir\", \".\", \"the directory that contains the images\")\n\tcsvFilename = flag.String(\"csv\", \"bulk.csv\", \"the name of the CSV file\")\n\tport = flag.String(\"port\", \"8080\", \"server port\")\n\topenBrowser = flag.Bool(\"openbrowser\", true, \"open browser automatically\")\n\tversion = flag.Bool(\"v\", false, \"print program version\")\n)\n\nconst description = `\n User interface for the 'Bulk Add CSV' extension of Shimmie2.\n\n The program will launch a web interface in a new browser window, which allows\n to add tags, source and rating on each image that is contained in the current\n directory (or the one specified by the -dir option). Subfolders are ignored.\n Supported types: \"gif\", \"jpeg\", \"jpg\", \"png\", \"swf\"\n\n The web interface allows to save the image metadata in a CSV file as expected\n by the 'Bulk Add CSV' Shimmie2 extension. If a CSV file with the name\n 'bulk.csv' (or a name specified by the -csv option) is found, it will be\n loaded automatically on start up.\n\n The folder containing the CSV file and the images can then be manually\n uploaded to the server and used by the 'Bulk Add CSV' extension to bulk add\n the images to Shimmie2.\n`\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"Usage: %s [options]\\n\", os.Args[0])\n\tfmt.Fprintln(os.Stderr, description)\n\tfmt.Fprintf(os.Stderr, \"Options:\\n\\n\")\n\tflag.PrintDefaults()\n\tfmt.Fprintf(os.Stderr, \"\\n\")\n}\n\ntype model struct {\n\tErr error\n\tPrefix string\n\tWorkingDir string\n\tCSVFilename string\n\tImages []bulk.Image\n\tVersion string\n\tUseLinuxSep bool\n}\n\nvar globalModel *model\n\nfunc main() {\n\tif err := run(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc createFile(file string) error {\n\tf, err := os.Create(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif cerr := f.Close(); cerr != nil {\n\t\t\terr = cerr\n\t\t}\n\t}()\n\treturn err\n}\n\nfunc run() error {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\tif *version {\n\t\tfmt.Printf(\"local-tagger%v\\n\", theVersion)\n\t\treturn nil\n\t}\n\n\td, err := filepath.Abs(*directory)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*directory = d\n\n\t\/\/ If CSV File does not exist, we create it.\n\tcsvFile := filepath.Join(*directory, *csvFilename)\n\tif _, err = os.Stat(csvFile); os.IsNotExist(err) {\n\t\tif err = createFile(csvFile); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tm, err := loadFromCSVFile(*directory, *csvFilename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tglobalModel = m\n\n\thttp.Handle(\"\/\", http.HandlerFunc(indexHandler))\n\thttp.Handle(\"\/load\", http.HandlerFunc(loadHandler))\n\thttp.Handle(\"\/update\", http.HandlerFunc(updateHandler))\n\thttp.Handle(\"\/ok\/\", http.HandlerFunc(okHandler))\n\thttp.Handle(\"\/img\/\", http.HandlerFunc(serveImage))\n\thttp.Handle(\"\/static\/\", http.StripPrefix(\"\/static\/\", http.FileServer(http.Dir(\"web\/static\"))))\n\n\tgo func() {\n\t\tlocalURL := fmt.Sprintf(\"http:\/\/localhost:%v\", *port)\n\t\tokURL := fmt.Sprintf(\"%v\/ok\", localURL)\n\t\tif waitServer(okURL) && *openBrowser && startBrowser(localURL) {\n\t\t\tlog.Printf(\"A browser window should open. If not, please visit %s\", localURL)\n\t\t} else {\n\t\t\tlog.Printf(\"Please open your web browser and visit %s\", localURL)\n\t\t}\n\t}()\n\n\treturn http.ListenAndServe(\":\"+*port, nil)\n}\n\nfunc loadFromCSVFile(dir, csvFilename string) (*model, error) {\n\n\tm := &model{WorkingDir: dir, CSVFilename: csvFilename, Version: theVersion}\n\tif globalModel != nil {\n\t\tm.UseLinuxSep = globalModel.UseLinuxSep\n\t}\n\n\t\/\/ Loading images from folder\n\tfiles, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\timages := bulk.LoadImages(files)\n\n\tf, err := os.Open(filepath.Join(dir, csvFilename))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tif cerr := f.Close(); cerr != nil && err == nil {\n\t\t\terr = cerr\n\t\t}\n\t}()\n\n\t\/\/ Loading CSV image data\n\timagesWithInfo, err := bulk.LoadCSV(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.Images = bulk.Combine(images, imagesWithInfo)\n\n\t\/\/ Getting current prefix\n\tif _, err = f.Seek(0, 0); err != nil {\n\t\treturn nil, err\n\t}\n\tcp, err := bulk.CurrentPrefix(dir, f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.Prefix = cp\n\n\treturn m, nil\n}\n\nfunc loadHandler(w http.ResponseWriter, r *http.Request) {\n\tf, h, err := r.FormFile(\"csvFilename\")\n\tif err != nil {\n\t\tglobalModel.Err = fmt.Errorf(\"Error: could not parse multipart file: %v\", err)\n\t\trender(w, indexTmpl, globalModel)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif cerr := f.Close(); cerr != nil {\n\t\t\tlog.Printf(\"Error: could not close multipart file: %v\\n\", cerr)\n\t\t}\n\t}()\n\n\tif err := addFromMultipartFile(globalModel, f); err != nil {\n\t\tglobalModel.Err = fmt.Errorf(\"Error: could not load image metadata from multipart CSV File: %v\", err)\n\t\trender(w, indexTmpl, globalModel)\n\t\treturn\n\t}\n\tglobalModel.CSVFilename = h.Filename\n\n\terr = saveToCSVFile(globalModel)\n\tif err != nil {\n\t\tglobalModel.Err = fmt.Errorf(\"Error: could not save file to disk: %v\", err)\n\t\trender(w, indexTmpl, globalModel)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n}\nfunc addFromMultipartFile(m *model, file multipart.File) error {\n\timgMetadata, err := bulk.LoadCSV(file)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not load image info from CSV File: %v\", err)\n\t}\n\tif _, err = file.Seek(0, 0); err != nil {\n\t\treturn fmt.Errorf(\"could not seek multipart file: %v\", err)\n\t}\n\tprefix, err := bulk.CurrentPrefix(m.WorkingDir, file)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not read current prefix from multipart file: %v\", err)\n\t}\n\tm.Prefix = prefix\n\tm.Images = bulk.Combine(m.Images, imgMetadata)\n\n\treturn nil\n}\n\nfunc okHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"ok\"))\n}\n\nfunc indexHandler(w http.ResponseWriter, r *http.Request) {\n\tm, err := loadFromCSVFile(globalModel.WorkingDir, globalModel.CSVFilename)\n\tif err != nil {\n\t\tglobalModel.Err = fmt.Errorf(\"Error: could not load from CSV File: %v\", err)\n\t} else {\n\t\tglobalModel = m\n\t}\n\n\trender(w, indexTmpl, globalModel)\n}\n\nfunc render(w http.ResponseWriter, t *template.Template, model interface{}) {\n\tif err := t.Execute(w, model); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\nfunc updateHandler(w http.ResponseWriter, r *http.Request) {\n\n\tif err := r.ParseForm(); err != nil {\n\t\thttp.Error(w, \"could not parse form\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ prefix\n\tglobalModel.Prefix = r.PostForm[\"prefix\"][0]\n\t\/\/ csvFilename\n\tglobalModel.CSVFilename = r.PostForm[\"csvFilename\"][0]\n\tfor _, img := range globalModel.Images {\n\t\t\/\/ tags\n\t\tareaTags := r.PostForm[fmt.Sprintf(\"image[%d].tags\", img.ID)]\n\t\tglobalModel.Images[img.ID].Tags = strings.Fields(areaTags[0])\n\t\t\/\/ source\n\t\tglobalModel.Images[img.ID].Source = r.PostForm[fmt.Sprintf(\"image[%d].source\", img.ID)][0]\n\t\t\/\/ rating\n\t\trating := r.PostForm[fmt.Sprintf(\"image[%d].rating\", img.ID)]\n\t\tif len(rating) != 0 {\n\t\t\tglobalModel.Images[img.ID].Rating = rating[0]\n\t\t}\n\t}\n\t\/\/ UseLinuxSep\n\t_, ok := r.PostForm[\"useLinuxSep\"]\n\tif ok {\n\t\tglobalModel.UseLinuxSep = true\n\t} else {\n\t\tglobalModel.UseLinuxSep = false\n\t}\n\t\/\/ scroll\n\tscroll := r.PostForm[\"scroll\"][0]\n\n\tif err := saveToCSVFile(globalModel); err != nil {\n\t\tglobalModel.Err = fmt.Errorf(\"Error: could not save to CSV file: %v\", err)\n\t\trender(w, indexTmpl, globalModel)\n\t} else {\n\t\tglobalModel.Err = nil\n\t\thttp.Redirect(w, r, \"\/\"+scroll, http.StatusFound)\n\t}\n}\n\nfunc saveToCSVFile(m *model) error {\n\tcsvFilepath := filepath.Join(m.WorkingDir, m.CSVFilename)\n\tf, err := os.Create(csvFilepath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif cerr := f.Close(); err != nil {\n\t\t\terr = cerr\n\t\t}\n\t}()\n\n\treturn bulk.Save(f, m.Images, m.WorkingDir, m.Prefix, m.UseLinuxSep)\n}\n\nfunc serveImage(w http.ResponseWriter, r *http.Request) {\n\tidStr := r.URL.Path[strings.LastIndex(r.URL.Path, \"\/\")+1:]\n\tid, err := strconv.Atoi(idStr)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"%v is not a valid image ID\", idStr), http.StatusBadRequest)\n\t\treturn\n\t}\n\timg := bulk.FindByID(globalModel.Images, id)\n\tif img == nil {\n\t\thttp.Error(w, fmt.Sprintf(\"no image found with ID: %v\", id), http.StatusNotFound)\n\t\treturn\n\t}\n\t\/\/ In case of image name that ends with '.swf', we serve embedded image\n\t\/\/ bytes from swf.go as it's not trivial to display a .swf file.\n\tif strings.HasSuffix(img.Name, \".swf\") {\n\t\t_, err = w.Write(swfImageBytes)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"could not write image bytes: %v\", err), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\tp := filepath.Join(*directory, img.Name)\n\tf, err := os.Open(p)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not open image: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif cerr := f.Close(); cerr != nil {\n\t\t\tlog.Printf(\"Error: could not close image file: %v\\n\", cerr)\n\t\t}\n\t}()\n\tdata, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not read image: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t_, err = w.Write(data)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not write image bytes: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\n\/\/ startBrowser tries to open the URL in a browser, and returns\n\/\/ whether it succeed.\nfunc startBrowser(url string) bool {\n\t\/\/ try to start the browser\n\tvar args []string\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\t\targs = []string{\"open\"}\n\tcase \"windows\":\n\t\targs = []string{\"cmd\", \"\/c\", \"start\"}\n\tdefault:\n\t\targs = []string{\"xdg-open\"}\n\t}\n\tcmd := exec.Command(args[0], append(args[1:], url)...)\n\treturn cmd.Start() == nil\n}\n\n\/\/ waitServer waits some time for the http Server to start\n\/\/ serving url. The return value reports whether it starts.\nfunc waitServer(url string) bool {\n\ttries := 20\n\tfor tries > 0 {\n\t\tresp, err := http.Get(url)\n\t\tif err == nil {\n\t\t\tresp.Body.Close()\n\t\t\treturn true\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\ttries--\n\t}\n\treturn false\n}\n<commit_msg>Fix error shadowing<commit_after>\/\/ User interface for the 'Bulk Add CSV' extension of Shimmie2.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/kusubooru\/local-tagger\/bulk\"\n)\n\n\/\/go:generate go run generate\/templates.go\n\/\/go:generate go run generate\/swf.go\n\nconst theVersion = \"1.0.0\"\n\nvar fns = template.FuncMap{\n\t\"last\": func(s []string) string {\n\t\tif len(s) == 0 {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn s[len(s)-1]\n\t},\n\t\"join\": strings.Join,\n}\n\nvar (\n\tdirectory = flag.String(\"dir\", \".\", \"the directory that contains the images\")\n\tcsvFilename = flag.String(\"csv\", \"bulk.csv\", \"the name of the CSV file\")\n\tport = flag.String(\"port\", \"8080\", \"server port\")\n\topenBrowser = flag.Bool(\"openbrowser\", true, \"open browser automatically\")\n\tversion = flag.Bool(\"v\", false, \"print program version\")\n)\n\nconst description = `\n User interface for the 'Bulk Add CSV' extension of Shimmie2.\n\n The program will launch a web interface in a new browser window, which allows\n to add tags, source and rating on each image that is contained in the current\n directory (or the one specified by the -dir option). Subfolders are ignored.\n Supported types: \"gif\", \"jpeg\", \"jpg\", \"png\", \"swf\"\n\n The web interface allows to save the image metadata in a CSV file as expected\n by the 'Bulk Add CSV' Shimmie2 extension. If a CSV file with the name\n 'bulk.csv' (or a name specified by the -csv option) is found, it will be\n loaded automatically on start up.\n\n The folder containing the CSV file and the images can then be manually\n uploaded to the server and used by the 'Bulk Add CSV' extension to bulk add\n the images to Shimmie2.\n`\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"Usage: %s [options]\\n\", os.Args[0])\n\tfmt.Fprintln(os.Stderr, description)\n\tfmt.Fprintf(os.Stderr, \"Options:\\n\\n\")\n\tflag.PrintDefaults()\n\tfmt.Fprintf(os.Stderr, \"\\n\")\n}\n\ntype model struct {\n\tErr error\n\tPrefix string\n\tWorkingDir string\n\tCSVFilename string\n\tImages []bulk.Image\n\tVersion string\n\tUseLinuxSep bool\n}\n\nvar globalModel *model\n\nfunc main() {\n\tif err := run(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc createFile(file string) error {\n\tf, err := os.Create(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif cerr := f.Close(); cerr != nil {\n\t\t\terr = cerr\n\t\t}\n\t}()\n\treturn err\n}\n\nfunc run() error {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\tif *version {\n\t\tfmt.Printf(\"local-tagger%v\\n\", theVersion)\n\t\treturn nil\n\t}\n\n\td, err := filepath.Abs(*directory)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*directory = d\n\n\t\/\/ If CSV File does not exist, we create it.\n\tcsvFile := filepath.Join(*directory, *csvFilename)\n\tif _, err = os.Stat(csvFile); os.IsNotExist(err) {\n\t\tif err = createFile(csvFile); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tm, err := loadFromCSVFile(*directory, *csvFilename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tglobalModel = m\n\n\thttp.Handle(\"\/\", http.HandlerFunc(indexHandler))\n\thttp.Handle(\"\/load\", http.HandlerFunc(loadHandler))\n\thttp.Handle(\"\/update\", http.HandlerFunc(updateHandler))\n\thttp.Handle(\"\/ok\/\", http.HandlerFunc(okHandler))\n\thttp.Handle(\"\/img\/\", http.HandlerFunc(serveImage))\n\thttp.Handle(\"\/static\/\", http.StripPrefix(\"\/static\/\", http.FileServer(http.Dir(\"web\/static\"))))\n\n\tgo func() {\n\t\tlocalURL := fmt.Sprintf(\"http:\/\/localhost:%v\", *port)\n\t\tokURL := fmt.Sprintf(\"%v\/ok\", localURL)\n\t\tif waitServer(okURL) && *openBrowser && startBrowser(localURL) {\n\t\t\tlog.Printf(\"A browser window should open. If not, please visit %s\", localURL)\n\t\t} else {\n\t\t\tlog.Printf(\"Please open your web browser and visit %s\", localURL)\n\t\t}\n\t}()\n\n\treturn http.ListenAndServe(\":\"+*port, nil)\n}\n\nfunc loadFromCSVFile(dir, csvFilename string) (*model, error) {\n\n\tm := &model{WorkingDir: dir, CSVFilename: csvFilename, Version: theVersion}\n\tif globalModel != nil {\n\t\tm.UseLinuxSep = globalModel.UseLinuxSep\n\t}\n\n\t\/\/ Loading images from folder\n\tfiles, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\timages := bulk.LoadImages(files)\n\n\tf, err := os.Open(filepath.Join(dir, csvFilename))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tif cerr := f.Close(); cerr != nil && err == nil {\n\t\t\terr = cerr\n\t\t}\n\t}()\n\n\t\/\/ Loading CSV image data\n\timagesWithInfo, err := bulk.LoadCSV(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.Images = bulk.Combine(images, imagesWithInfo)\n\n\t\/\/ Getting current prefix\n\tif _, err = f.Seek(0, 0); err != nil {\n\t\treturn nil, err\n\t}\n\tcp, err := bulk.CurrentPrefix(dir, f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.Prefix = cp\n\n\treturn m, nil\n}\n\nfunc loadHandler(w http.ResponseWriter, r *http.Request) {\n\tf, h, err := r.FormFile(\"csvFilename\")\n\tif err != nil {\n\t\tglobalModel.Err = fmt.Errorf(\"Error: could not parse multipart file: %v\", err)\n\t\trender(w, indexTmpl, globalModel)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif cerr := f.Close(); cerr != nil {\n\t\t\tlog.Printf(\"Error: could not close multipart file: %v\\n\", cerr)\n\t\t}\n\t}()\n\n\tif err = addFromMultipartFile(globalModel, f); err != nil {\n\t\tglobalModel.Err = fmt.Errorf(\"Error: could not load image metadata from multipart CSV File: %v\", err)\n\t\trender(w, indexTmpl, globalModel)\n\t\treturn\n\t}\n\tglobalModel.CSVFilename = h.Filename\n\n\terr = saveToCSVFile(globalModel)\n\tif err != nil {\n\t\tglobalModel.Err = fmt.Errorf(\"Error: could not save file to disk: %v\", err)\n\t\trender(w, indexTmpl, globalModel)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n}\nfunc addFromMultipartFile(m *model, file multipart.File) error {\n\timgMetadata, err := bulk.LoadCSV(file)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not load image info from CSV File: %v\", err)\n\t}\n\tif _, err = file.Seek(0, 0); err != nil {\n\t\treturn fmt.Errorf(\"could not seek multipart file: %v\", err)\n\t}\n\tprefix, err := bulk.CurrentPrefix(m.WorkingDir, file)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not read current prefix from multipart file: %v\", err)\n\t}\n\tm.Prefix = prefix\n\tm.Images = bulk.Combine(m.Images, imgMetadata)\n\n\treturn nil\n}\n\nfunc okHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"ok\"))\n}\n\nfunc indexHandler(w http.ResponseWriter, r *http.Request) {\n\tm, err := loadFromCSVFile(globalModel.WorkingDir, globalModel.CSVFilename)\n\tif err != nil {\n\t\tglobalModel.Err = fmt.Errorf(\"Error: could not load from CSV File: %v\", err)\n\t} else {\n\t\tglobalModel = m\n\t}\n\n\trender(w, indexTmpl, globalModel)\n}\n\nfunc render(w http.ResponseWriter, t *template.Template, model interface{}) {\n\tif err := t.Execute(w, model); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\nfunc updateHandler(w http.ResponseWriter, r *http.Request) {\n\n\tif err := r.ParseForm(); err != nil {\n\t\thttp.Error(w, \"could not parse form\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ prefix\n\tglobalModel.Prefix = r.PostForm[\"prefix\"][0]\n\t\/\/ csvFilename\n\tglobalModel.CSVFilename = r.PostForm[\"csvFilename\"][0]\n\tfor _, img := range globalModel.Images {\n\t\t\/\/ tags\n\t\tareaTags := r.PostForm[fmt.Sprintf(\"image[%d].tags\", img.ID)]\n\t\tglobalModel.Images[img.ID].Tags = strings.Fields(areaTags[0])\n\t\t\/\/ source\n\t\tglobalModel.Images[img.ID].Source = r.PostForm[fmt.Sprintf(\"image[%d].source\", img.ID)][0]\n\t\t\/\/ rating\n\t\trating := r.PostForm[fmt.Sprintf(\"image[%d].rating\", img.ID)]\n\t\tif len(rating) != 0 {\n\t\t\tglobalModel.Images[img.ID].Rating = rating[0]\n\t\t}\n\t}\n\t\/\/ UseLinuxSep\n\t_, ok := r.PostForm[\"useLinuxSep\"]\n\tif ok {\n\t\tglobalModel.UseLinuxSep = true\n\t} else {\n\t\tglobalModel.UseLinuxSep = false\n\t}\n\t\/\/ scroll\n\tscroll := r.PostForm[\"scroll\"][0]\n\n\tif err := saveToCSVFile(globalModel); err != nil {\n\t\tglobalModel.Err = fmt.Errorf(\"Error: could not save to CSV file: %v\", err)\n\t\trender(w, indexTmpl, globalModel)\n\t} else {\n\t\tglobalModel.Err = nil\n\t\thttp.Redirect(w, r, \"\/\"+scroll, http.StatusFound)\n\t}\n}\n\nfunc saveToCSVFile(m *model) error {\n\tcsvFilepath := filepath.Join(m.WorkingDir, m.CSVFilename)\n\tf, err := os.Create(csvFilepath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif cerr := f.Close(); err != nil {\n\t\t\terr = cerr\n\t\t}\n\t}()\n\n\treturn bulk.Save(f, m.Images, m.WorkingDir, m.Prefix, m.UseLinuxSep)\n}\n\nfunc serveImage(w http.ResponseWriter, r *http.Request) {\n\tidStr := r.URL.Path[strings.LastIndex(r.URL.Path, \"\/\")+1:]\n\tid, err := strconv.Atoi(idStr)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"%v is not a valid image ID\", idStr), http.StatusBadRequest)\n\t\treturn\n\t}\n\timg := bulk.FindByID(globalModel.Images, id)\n\tif img == nil {\n\t\thttp.Error(w, fmt.Sprintf(\"no image found with ID: %v\", id), http.StatusNotFound)\n\t\treturn\n\t}\n\t\/\/ In case of image name that ends with '.swf', we serve embedded image\n\t\/\/ bytes from swf.go as it's not trivial to display a .swf file.\n\tif strings.HasSuffix(img.Name, \".swf\") {\n\t\t_, err = w.Write(swfImageBytes)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"could not write image bytes: %v\", err), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\tp := filepath.Join(*directory, img.Name)\n\tf, err := os.Open(p)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not open image: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif cerr := f.Close(); cerr != nil {\n\t\t\tlog.Printf(\"Error: could not close image file: %v\\n\", cerr)\n\t\t}\n\t}()\n\tdata, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not read image: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t_, err = w.Write(data)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not write image bytes: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\n\/\/ startBrowser tries to open the URL in a browser, and returns\n\/\/ whether it succeed.\nfunc startBrowser(url string) bool {\n\t\/\/ try to start the browser\n\tvar args []string\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\t\targs = []string{\"open\"}\n\tcase \"windows\":\n\t\targs = []string{\"cmd\", \"\/c\", \"start\"}\n\tdefault:\n\t\targs = []string{\"xdg-open\"}\n\t}\n\tcmd := exec.Command(args[0], append(args[1:], url)...)\n\treturn cmd.Start() == nil\n}\n\n\/\/ waitServer waits some time for the http Server to start\n\/\/ serving url. The return value reports whether it starts.\nfunc waitServer(url string) bool {\n\ttries := 20\n\tfor tries > 0 {\n\t\tresp, err := http.Get(url)\n\t\tif err == nil {\n\t\t\tresp.Body.Close()\n\t\t\treturn true\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\ttries--\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\tlog \"github.com\/liveplant\/liveplant-server\/Godeps\/_workspace\/src\/github.com\/Sirupsen\/logrus\"\n\t\"github.com\/liveplant\/liveplant-server\/Godeps\/_workspace\/src\/github.com\/carbocation\/interpose\"\n\tgorilla_mux \"github.com\/liveplant\/liveplant-server\/Godeps\/_workspace\/src\/github.com\/gorilla\/mux\"\n\t\"github.com\/liveplant\/liveplant-server\/Godeps\/_workspace\/src\/github.com\/tylerb\/graceful\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"os\"\n\t\"time\"\n\t\"flag\"\n)\n\n\/\/ Define string constants that correspond\n\/\/ to each supported action here\nconst (\n\tActionWater string = \"water\"\n\tActionNothing string = \"nothing\"\n)\n\n\/\/ Variables for keeping track of the current vote count\n\/\/ for each action.\n\/\/ These should probably be stored in redis at some point.\nvar VoteCountWater int = 0\nvar VoteCountNothing int = 0\n\ntype Application struct {\n}\n\ntype CurrentAction struct {\n\tAction string `json:\"action\"`\n\tUnixTimestamp int64 `json:\"unixTimestamp\"`\n}\n\nfunc GetCurrentAction(w http.ResponseWriter, r *http.Request) {\n\taction := &CurrentAction{\n\t\tAction: GetWinningAction(),\n\t\tUnixTimestamp: int64(time.Now().Unix()),\n\t}\n\tjson.NewEncoder(w).Encode(action)\n}\n\nfunc GetWinningAction() string {\n\t\/\/ Return the action that has the greatest number of votes\n\n\tvar winningAction string\n\n\tif VoteCountWater > VoteCountNothing {\n\t\twinningAction = ActionWater\n\t} else {\n\t\twinningAction = ActionNothing\n\t}\n\n\treturn winningAction\n}\n\nfunc DebugPrintHttpRequest(r *http.Request) {\n\t\/\/ If debug logger is enabled,\n\t\/\/ print out all the details of the supplied HTTP request.\n\tif log.GetLevel() == log.DebugLevel {\n\t\tdump, err := httputil.DumpRequest(r, true)\n\t\tif err == nil {\n\t\t\tlog.Debug(\"Request received: \\n\" + string(dump))\n\t\t} else {\n\t\t\tlog.Debug(\"Error reading request: \" + err.Error())\n\t\t}\n\t}\n}\n\n\/*\nExample cURL commands for invoking the API on the commandline:\n\ncurl -H \"Content-Type: application\/json\" -X POST -d '{\"someKey1\":\"someValue1\",\"someKey2\":\"someValue2\"}' http:\/\/127.0.0.1:5000\/votes\ncurl -H \"Content-Type: application\/json\" -X POST -d '{\"action\":\"water\"}' http:\/\/127.0.0.1:5000\/votes\ncurl -H \"Content-Type: application\/json\" -X POST -d '{\"action\":\"nothing\"}' http:\/\/127.0.0.1:5000\/votes\n\ncurl http:\/\/127.0.0.1:5000\/current_action\n*\/\n\ntype Vote struct {\n\tAction string `json:\"action\"`\n}\n\nfunc PostVotes(w http.ResponseWriter, r *http.Request) {\n\n\tlog.Debug(\"PostVotes called\")\n\n\tDebugPrintHttpRequest(r)\n\n\tdecoder := json.NewDecoder(r.Body)\n\tvar vote Vote\n\terr := decoder.Decode(&vote)\n\n\tif err == nil {\n\n\t\tif vote.Action == ActionWater {\n\t\t\tVoteCountWater++\n\t\t\tlog.Debug(\"Voted for action \\\"\" + ActionWater + \"\\\" \", VoteCountWater)\n\t\t} else if vote.Action == ActionNothing {\n\t\t\tVoteCountNothing++\n\t\t\tlog.Debug(\"Voted for action \\\"\" + ActionNothing + \"\\\" \", VoteCountNothing)\n\t\t} else {\n\t\t\tlog.Debug(\"Encountered unhandled action \\\"\" + vote.Action + \"\\\"\")\n\t\t}\n\n\t} else {\n\t\tlog.Debug(\"Error parsing vote body: \" + err.Error())\n\t}\n\n\t\/\/ TODO - output a json response\n\t\/\/ { \"message\":string }\n}\n\n\/*\nExample response from GET \/votes\nA json dictionary that shows the current number\nof votes that have been cast for each action.\n{\n \"actions\": {\n \"water\": 1,\n \"nothing\": 0\n }\n}\n*\/\ntype CurrentVoteCount struct {\n\tActions map[string]int `json:\"actions\"`\n}\n\nfunc GetVotes(w http.ResponseWriter, r *http.Request) {\n\n\tlog.Debug(\"GetVotes called\")\n\n\tcurrentVotes := &CurrentVoteCount{\n\t\tActions: make(map[string]int),\n\t}\n\n\tcurrentVotes.Actions[ActionWater] = VoteCountWater\n\tcurrentVotes.Actions[ActionNothing] = VoteCountNothing\n\n\tjson.NewEncoder(w).Encode(currentVotes)\n}\n\nfunc NewApplication() (*Application, error) {\n\tapp := &Application{}\n\treturn app, nil\n}\n\nfunc (app *Application) mux() *gorilla_mux.Router {\n\trouter := gorilla_mux.NewRouter()\n\n\trouter.HandleFunc(\"\/current_action\", GetCurrentAction).Methods(\"GET\")\n\trouter.HandleFunc(\"\/votes\", PostVotes).Methods(\"POST\")\n\trouter.HandleFunc(\"\/votes\", GetVotes).Methods(\"GET\")\n\n\treturn router\n}\n\nfunc InitLogLevel() {\n\t\/\/ Check if --debug argument was supplied on the command line.\n\t\/\/ Check if LIVEPLANTDEBUG environment variable is present.\n\t\/\/ (Environment variable takes precedence over command line flag)\n\t\/\/ Enable or disable debug logger accordingly.\n\n\t\/\/ Declare and parse command line flag\n\tboolPtr := flag.Bool(\"debug\", false, \"Whether or not to enable debug logger.\")\n\tflag.Parse()\n\n\tvar debugLoggerEnabled bool = *boolPtr\n\n\tif len(os.Getenv(\"LIVEPLANTDEBUG\")) > 0 {\n\t\t\/\/ Environment variable is present, so\n\t\t\/\/ debug logger should be enabled.\n\t\t\/\/ (overrides command line flag)\n\t\tdebugLoggerEnabled = true\n\t}\n\n\tif debugLoggerEnabled {\n\t\t\/\/ Log everything\n\t\tlog.SetLevel(log.DebugLevel)\n\t\tlog.Debug(\"Debug logging enabled\")\n\t} else {\n\t\t\/\/ Only log fatal or panic events\n\t\t\/\/ (events where the application is terminated)\n\t\tlog.SetLevel(log.FatalLevel)\n\t}\n}\n\nfunc main() {\n\n\tInitLogLevel()\n\n\tlog.Println(\"Launching liveplant server\")\n\n\tapp, _ := NewApplication()\n\tmiddle := interpose.New()\n\n\tmiddle.Use(func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\t\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t\t\tnext.ServeHTTP(w, req)\n\t\t})\n\t})\n\n\tmiddle.UseHandler(app.mux())\n\n\tServerPort := \"5000\" \/\/ default port\n\tif len(os.Getenv(\"PORT\")) > 0 {\n\t\tServerPort = os.Getenv(\"PORT\")\n\t}\n\tdrainInterval, _ := time.ParseDuration(\"1s\")\n\tsrv := &graceful.Server{\n\t\tTimeout: drainInterval,\n\t\tServer: &http.Server{\n\t\t\tAddr: \":\" + ServerPort,\n\t\t\tHandler: middle,\n\t\t},\n\t}\n\n\tlog.Println(\"Running liveplant server on port \" + ServerPort)\n\n\terr := srv.ListenAndServe()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n}\n<commit_msg>Updated GET \/votes endpoint to provide name and displayName in addition to voteCount<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\tlog \"github.com\/liveplant\/liveplant-server\/Godeps\/_workspace\/src\/github.com\/Sirupsen\/logrus\"\n\t\"github.com\/liveplant\/liveplant-server\/Godeps\/_workspace\/src\/github.com\/carbocation\/interpose\"\n\tgorilla_mux \"github.com\/liveplant\/liveplant-server\/Godeps\/_workspace\/src\/github.com\/gorilla\/mux\"\n\t\"github.com\/liveplant\/liveplant-server\/Godeps\/_workspace\/src\/github.com\/tylerb\/graceful\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"os\"\n\t\"time\"\n\t\"flag\"\n)\n\n\/\/ Define string constants that correspond\n\/\/ to each supported action here\nconst (\n\tActionWater string = \"water\"\n\tActionNothing string = \"nothing\"\n)\n\n\/\/ Variables for keeping track of the current vote count\n\/\/ for each action.\n\/\/ These should probably be stored in redis at some point.\nvar VoteCountWater int = 0\nvar VoteCountNothing int = 0\n\ntype Application struct {\n}\n\ntype CurrentAction struct {\n\tAction string `json:\"action\"`\n\tUnixTimestamp int64 `json:\"unixTimestamp\"`\n}\n\nfunc GetCurrentAction(w http.ResponseWriter, r *http.Request) {\n\taction := &CurrentAction{\n\t\tAction: GetWinningAction(),\n\t\tUnixTimestamp: int64(time.Now().Unix()),\n\t}\n\tjson.NewEncoder(w).Encode(action)\n}\n\nfunc GetWinningAction() string {\n\t\/\/ Return the action that has the greatest number of votes\n\n\tvar winningAction string\n\n\tif VoteCountWater > VoteCountNothing {\n\t\twinningAction = ActionWater\n\t} else {\n\t\twinningAction = ActionNothing\n\t}\n\n\treturn winningAction\n}\n\nfunc DebugPrintHttpRequest(r *http.Request) {\n\t\/\/ If debug logger is enabled,\n\t\/\/ print out all the details of the supplied HTTP request.\n\tif log.GetLevel() == log.DebugLevel {\n\t\tdump, err := httputil.DumpRequest(r, true)\n\t\tif err == nil {\n\t\t\tlog.Debug(\"Request received: \\n\" + string(dump))\n\t\t} else {\n\t\t\tlog.Debug(\"Error reading request: \" + err.Error())\n\t\t}\n\t}\n}\n\n\/*\nExample cURL commands for invoking the API on the commandline:\n\ncurl -H \"Content-Type: application\/json\" -X POST -d '{\"someKey1\":\"someValue1\",\"someKey2\":\"someValue2\"}' http:\/\/127.0.0.1:5000\/votes\ncurl -H \"Content-Type: application\/json\" -X POST -d '{\"action\":\"water\"}' http:\/\/127.0.0.1:5000\/votes\ncurl -H \"Content-Type: application\/json\" -X POST -d '{\"action\":\"nothing\"}' http:\/\/127.0.0.1:5000\/votes\n\ncurl http:\/\/127.0.0.1:5000\/current_action\n*\/\n\ntype Vote struct {\n\tAction string `json:\"action\"`\n}\n\nfunc PostVotes(w http.ResponseWriter, r *http.Request) {\n\n\tlog.Debug(\"PostVotes called\")\n\n\tDebugPrintHttpRequest(r)\n\n\tdecoder := json.NewDecoder(r.Body)\n\tvar vote Vote\n\terr := decoder.Decode(&vote)\n\n\tif err == nil {\n\n\t\tif vote.Action == ActionWater {\n\t\t\tVoteCountWater++\n\t\t\tlog.Debug(\"Voted for action \\\"\" + ActionWater + \"\\\" \", VoteCountWater)\n\t\t} else if vote.Action == ActionNothing {\n\t\t\tVoteCountNothing++\n\t\t\tlog.Debug(\"Voted for action \\\"\" + ActionNothing + \"\\\" \", VoteCountNothing)\n\t\t} else {\n\t\t\tlog.Debug(\"Encountered unhandled action \\\"\" + vote.Action + \"\\\"\")\n\t\t}\n\n\t} else {\n\t\tlog.Debug(\"Error parsing vote body: \" + err.Error())\n\t}\n\n\t\/\/ TODO - output a json response\n\t\/\/ { \"message\":string }\n}\n\ntype ActionInfo struct {\n\tName string `json:\"name\"`\n\tDisplayName string `json:\"displayName\"`\n\tVoteCount int `json:\"voteCount\"`\n}\n\ntype CurrentVoteInfo struct {\n\tActions []ActionInfo `json:\"actions\"`\n}\n\n\/*\nExample response from GET \/votes\n{\n \"actions\": [\n {\n \"name\": \"nothing\",\n \"displayName\": \"Nothing\",\n \"voteCount\": 123\n },\n {\n \"name\": \"water\",\n \"displayName\": \"Water\",\n \"voteCount\": 47\n }\n ]\n}\n*\/\nfunc GetVotes(w http.ResponseWriter, r *http.Request) {\n\n\tcurrentVoteInfo := &CurrentVoteInfo{\n\t\tActions: []ActionInfo{\n\t\t\t\tActionInfo{Name: ActionNothing, DisplayName: \"Nothing\", VoteCount: VoteCountNothing},\n\t\t\t\tActionInfo{Name: ActionWater, DisplayName: \"Water\", VoteCount: VoteCountWater},\n\t\t\t},\n\t}\n\n\tjson.NewEncoder(w).Encode(currentVoteInfo)\n}\n\nfunc NewApplication() (*Application, error) {\n\tapp := &Application{}\n\treturn app, nil\n}\n\nfunc (app *Application) mux() *gorilla_mux.Router {\n\trouter := gorilla_mux.NewRouter()\n\n\trouter.HandleFunc(\"\/current_action\", GetCurrentAction).Methods(\"GET\")\n\trouter.HandleFunc(\"\/votes\", PostVotes).Methods(\"POST\")\n\trouter.HandleFunc(\"\/votes\", GetVotes).Methods(\"GET\")\n\n\treturn router\n}\n\nfunc InitLogLevel() {\n\t\/\/ Check if --debug argument was supplied on the command line.\n\t\/\/ Check if LIVEPLANTDEBUG environment variable is present.\n\t\/\/ (Environment variable takes precedence over command line flag)\n\t\/\/ Enable or disable debug logger accordingly.\n\n\t\/\/ Declare and parse command line flag\n\tboolPtr := flag.Bool(\"debug\", false, \"Whether or not to enable debug logger.\")\n\tflag.Parse()\n\n\tvar debugLoggerEnabled bool = *boolPtr\n\n\tif len(os.Getenv(\"LIVEPLANTDEBUG\")) > 0 {\n\t\t\/\/ Environment variable is present, so\n\t\t\/\/ debug logger should be enabled.\n\t\t\/\/ (overrides command line flag)\n\t\tdebugLoggerEnabled = true\n\t}\n\n\tif debugLoggerEnabled {\n\t\t\/\/ Log everything\n\t\tlog.SetLevel(log.DebugLevel)\n\t\tlog.Debug(\"Debug logging enabled\")\n\t} else {\n\t\t\/\/ Only log fatal or panic events\n\t\t\/\/ (events where the application is terminated)\n\t\tlog.SetLevel(log.FatalLevel)\n\t}\n}\n\nfunc main() {\n\n\tInitLogLevel()\n\n\tlog.Println(\"Launching liveplant server\")\n\n\tapp, _ := NewApplication()\n\tmiddle := interpose.New()\n\n\tmiddle.Use(func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\t\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t\t\tnext.ServeHTTP(w, req)\n\t\t})\n\t})\n\n\tmiddle.UseHandler(app.mux())\n\n\tServerPort := \"5000\" \/\/ default port\n\tif len(os.Getenv(\"PORT\")) > 0 {\n\t\tServerPort = os.Getenv(\"PORT\")\n\t}\n\tdrainInterval, _ := time.ParseDuration(\"1s\")\n\tsrv := &graceful.Server{\n\t\tTimeout: drainInterval,\n\t\tServer: &http.Server{\n\t\t\tAddr: \":\" + ServerPort,\n\t\t\tHandler: middle,\n\t\t},\n\t}\n\n\tlog.Println(\"Running liveplant server on port \" + ServerPort)\n\n\terr := srv.ListenAndServe()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"net\/http\"\n\n\t\"github.com\/lovoo\/ipmi_exporter\/collector\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/common\/log\"\n)\n\nvar (\n\tlistenAddress = flag.String(\"web.listen\", \":9289\", \"Address on which to expose metrics and web interface.\")\n\tmetricsPath = flag.String(\"web.path\", \"\/metrics\", \"Path under which to expose metrics.\")\n\tipmiBinary = flag.String(\"ipmi.path\", \"ipmitool\", \"Path to the ipmi binary\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tprometheus.MustRegister(collector.NewExporter(*ipmiBinary))\n\n\thandler := prometheus.Handler()\n\tif *metricsPath == \"\" || *metricsPath == \"\/\" {\n\t\thttp.Handle(*metricsPath, handler)\n\t} else {\n\t\thttp.Handle(*metricsPath, handler)\n\t\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\tw.Write([]byte(`<html>\n\t\t\t<head><title>IPMI Exporter<\/title><\/head>\n\t\t\t<body>\n\t\t\t<h1>IPMI Exporter<\/h1>\n\t\t\t<p><a href=\"` + *metricsPath + `\">Metrics<\/a><\/p>\n\t\t\t<\/body>\n\t\t\t<\/html>`))\n\t\t})\n\t}\n\n\tlog.Infof(\"Starting Server: %s\", *listenAddress)\n\terr := http.ListenAndServe(*listenAddress, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>Replace deprecated prometheus.Handler against promhttp.Handler(megacheck)<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"net\/http\"\n\n\t\"github.com\/lovoo\/ipmi_exporter\/collector\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\/promhttp\"\n\t\"github.com\/prometheus\/common\/log\"\n)\n\nvar (\n\tlistenAddress = flag.String(\"web.listen\", \":9289\", \"Address on which to expose metrics and web interface.\")\n\tmetricsPath = flag.String(\"web.path\", \"\/metrics\", \"Path under which to expose metrics.\")\n\tipmiBinary = flag.String(\"ipmi.path\", \"ipmitool\", \"Path to the ipmi binary\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tprometheus.MustRegister(collector.NewExporter(*ipmiBinary))\n\n\thandler := promhttp.Handler()\n\tif *metricsPath == \"\" || *metricsPath == \"\/\" {\n\t\thttp.Handle(*metricsPath, handler)\n\t} else {\n\t\thttp.Handle(*metricsPath, handler)\n\t\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\tw.Write([]byte(`<html>\n\t\t\t<head><title>IPMI Exporter<\/title><\/head>\n\t\t\t<body>\n\t\t\t<h1>IPMI Exporter<\/h1>\n\t\t\t<p><a href=\"` + *metricsPath + `\">Metrics<\/a><\/p>\n\t\t\t<\/body>\n\t\t\t<\/html>`))\n\t\t})\n\t}\n\n\tlog.Infof(\"Starting Server: %s\", *listenAddress)\n\terr := http.ListenAndServe(*listenAddress, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package dburl provides a standard, URL style mechanism for of parsing, and\n\/\/ opening database connection strings (in the form of a URL).\n\/\/\n\/\/ Database URL Connection Strings\n\/\/\n\/\/ Supported database URLs are of the form:\n\/\/\n\/\/ protocol+transport:\/\/user:pass@host\/dbname?opt1=a&opt2=b\n\/\/ protocol:\/path\/to\/file\n\/\/\n\/\/ Where:\n\/\/\n\/\/ protocol - driver name or alias (see below)\n\/\/ transport - the transport protocol [tcp, udp, unix] (only mysql for now)\n\/\/ user - the username to connect as\n\/\/ pass - the password to use\n\/\/ host - the remote host\n\/\/ dbname - the database or service name to connect to\n\/\/ ?opt1=... - additional database driver options (see the respective SQL driver for available options)\n\/\/\n\/\/ Example URLs for Open or Parse\n\/\/\n\/\/ The following are URLs that can be processed using Parse or Open:\n\/\/\n\/\/ postgres:\/\/user:pass@localhost\/dbname\n\/\/ pg:\/\/user:pass@localhost\/dbname?sslmode=disable\n\/\/ mysql:\/\/user:pass@localhost\/dbname\n\/\/ mysql:\/var\/run\/mysqld\/mysqld.sock\n\/\/ sqlserver:\/\/user:pass@remote-host.com\/dbname\n\/\/ oracle:\/\/user:pass@somehost.com\/oracledb\n\/\/ sap:\/\/user:pass@localhost\/dbname\n\/\/ sqlite:\/path\/to\/file.db\n\/\/ file:myfile.sqlite3?loc=auto\n\/\/\n\/\/ Parsing Rules\n\/\/\n\/\/ Parse relies heavily on the standard net\/url\/URL type, as such it has the\n\/\/ same parsing conventions\/semantics as any URL parsed by the standard\n\/\/ library's net\/url\/Parse.\n\/\/\n\/\/ SQL Driver Aliases\n\/\/\n\/\/ The following protocol aliases are available, and any URL passed to Open or\n\/\/ Parse will be processed the same as their respective driver:\n\/\/\n\/\/ Database (driver) | Aliases\n\/\/ ------------------------------------------------------------------\n\/\/ Microsoft SQL Server (mssql) | ms, sqlserver\n\/\/ MySQL (mysql) | my, mariadb, maria, percona, aurora\n\/\/ Oracle (ora) | or, oracle, oci8, oci\n\/\/ PostgreSQL (postgres) | pg, postgresql, pgsql\n\/\/ SAP HANA (hdb) | sa, saphana, sap, hana\n\/\/ SQLite3 (sqlite3) | sq, sqlite, file\n\/\/\n\/\/ Usage Notes\n\/\/\n\/\/ Please note that this package does not import actual SQL drivers, and only\n\/\/ provides a standard mechanism to parse their respective URL string.\n\/\/\n\/\/ For reference, these are the following \"expected\" SQL drivers that would need\n\/\/ to be imported:\n\/\/\n\/\/ Database (driver) | Package\n\/\/ ------------------------------------------------------------------\n\/\/ Microsoft SQL Server (mssql) | github.com\/denisenkom\/go-mssqldb\n\/\/ MySQL (mysql) | github.com\/go-sql-driver\/mysql\n\/\/ Oracle (ora) | gopkg.in\/rana\/ora.v4\n\/\/ PostgreSQL (postgres) | github.com\/lib\/pq\n\/\/ SAP HANA (hdb) | github.com\/SAP\/go-hdb\/driver\n\/\/ SQLite3 (sqlite3) | github.com\/mattn\/go-sqlite3\n\/\/\n\/\/ Existence\n\/\/\n\/\/ This package was written mainly to support xo (https:\/\/github.com\/knq\/xo)\n\/\/ and usql (https:\/\/github.com\/knq\/usql).\npackage dburl\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\t\/\/ ErrInvalidDatabaseScheme is the invalid database scheme error.\n\tErrInvalidDatabaseScheme = errors.New(\"invalid database scheme\")\n\n\t\/\/ ErrInvalidTransportProtocol is the invalid transport protocol error.\n\tErrInvalidTransportProtocol = errors.New(\"invalid transport protocol\")\n\n\t\/\/ ErrUnknownDatabaseType is the unknown database type error.\n\tErrUnknownDatabaseType = errors.New(\"unknown database type\")\n\n\t\/\/ ErrInvalidPort is the invalid port error.\n\tErrInvalidPort = errors.New(\"invalid port\")\n\n\t\/\/ ErrOraMustProvideUsernameAndPassword is the ora (Oracle) must provide\n\t\/\/ username and password error.\n\tErrOraMustProvideUsernameAndPassword = errors.New(\"ora: must provide username and password\")\n)\n\n\/\/ URL wraps the standard net\/url.URL type, adding OriginalScheme, Proto,\n\/\/ Driver, and DSN strings.\ntype URL struct {\n\t\/\/ URL is the base net\/url\/URL.\n\turl.URL\n\n\t\/\/ OriginalScheme is the original parsed scheme (ie, \"sq\", \"mysql+unix\", \"sap\", etc).\n\tOriginalScheme string\n\n\t\/\/ Proto is the specified protocol (ie, \"tcp\", \"udp\", \"unix\"), if provided.\n\tProto string\n\n\t\/\/ Driver is the non-aliased SQL driver name that should be used in a call\n\t\/\/ to sql\/Open.\n\tDriver string\n\n\t\/\/ DSN is the built connection \"data source name\" that can be used in a\n\t\/\/ call to sql\/Open.\n\tDSN string\n}\n\n\/\/ String satisfies the stringer interface.\nfunc (u *URL) String() string {\n\tp := &url.URL{\n\t\tScheme: u.OriginalScheme,\n\t\tOpaque: u.Opaque,\n\t\tUser: u.User,\n\t\tHost: u.Host,\n\t\tPath: u.Path,\n\t\tRawPath: u.RawPath,\n\t\tRawQuery: u.RawQuery,\n\t\tFragment: u.Fragment,\n\t}\n\n\treturn p.String()\n}\n\n\/\/ Short provides a short description of the user, host, and database.\nfunc (u *URL) Short() string {\n\ts := u.Driver[:2]\n\tswitch s {\n\tcase \"po\":\n\t\ts = \"pg\"\n\tcase \"hd\":\n\t\ts = \"sa\"\n\t}\n\n\tif u.Proto != \"tcp\" {\n\t\ts += \"+\" + u.Proto\n\t}\n\n\ts += \":\"\n\n\tif u.User != nil {\n\t\tif un := u.User.Username(); un != \"\" {\n\t\t\ts += un + \"@\"\n\t\t}\n\t}\n\n\tif u.Host != \"\" {\n\t\ts += u.Host\n\t}\n\n\tif u.Path != \"\" && u.Path != \"\/\" {\n\t\ts += u.Path\n\t}\n\n\tif u.Opaque != \"\" {\n\t\ts += u.Opaque\n\t}\n\n\treturn s\n}\n\n\/\/ Parse parses a rawurl string and normalizes the scheme.\nfunc Parse(rawurl string) (*URL, error) {\n\t\/\/ parse url\n\tu, err := url.Parse(rawurl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif u.Scheme == \"\" {\n\t\treturn nil, ErrInvalidDatabaseScheme\n\t}\n\n\t\/\/ create url\n\tv := &URL{URL: *u, OriginalScheme: u.Scheme, Proto: \"tcp\"}\n\tv.Scheme = strings.ToLower(v.Scheme)\n\n\t\/\/ check if +protocol is in scheme\n\tif strings.Contains(v.Scheme, \"+\") {\n\t\tp := strings.SplitN(v.Scheme, \"+\", 2)\n\t\tv.Scheme = p[0]\n\t\tv.Proto = p[1]\n\t}\n\n\t\/\/ check protocol\n\tif v.Proto != \"tcp\" && v.Proto != \"udp\" && v.Proto != \"unix\" {\n\t\treturn nil, ErrInvalidTransportProtocol\n\t}\n\n\t\/\/ get loader\n\tloader, ok := loaders[v.Scheme]\n\tif !ok {\n\t\treturn nil, ErrUnknownDatabaseType\n\t}\n\n\t\/\/ process\n\tv.Driver, v.DSN, err = loader(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif v.Driver != \"sqlite3\" && v.Opaque != \"\" {\n\t\treturn Parse(v.OriginalScheme + \":\/\/\" + v.Opaque)\n\t}\n\n\treturn v, nil\n}\n\n\/\/ Open takes a rawurl like \"protocol+transport:\/\/user:pass@host\/dbname?option1=a&option2=b\"\n\/\/ and creates a standard sql.DB connection.\n\/\/\n\/\/ See Parse for information on formatting URLs to work properly with Open.\nfunc Open(rawurl string) (*sql.DB, error) {\n\tu, err := Parse(rawurl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn sql.Open(u.Driver, u.DSN)\n}\n\n\/\/ mssqlProcess processes a mssql url and protocol.\nfunc mssqlProcess(u *URL) (string, string, error) {\n\tvar err error\n\n\t\/\/ build host or domain socket\n\thost := u.Host\n\tport := 1433\n\n\t\/\/ grab dbname\n\tvar dbname string\n\tif u.Path != \"\" {\n\t\tdbname = u.Path[1:]\n\t}\n\n\t\/\/ extract port if present\n\tpos := strings.Index(host, \":\")\n\tif pos != -1 {\n\t\tport, err = strconv.Atoi(host[pos+1:])\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", ErrInvalidPort\n\t\t}\n\t\thost = host[:pos]\n\t}\n\n\t\/\/ format dsn\n\tdsn := fmt.Sprintf(\"server=%s;port=%d\", host, port)\n\tif dbname != \"\" {\n\t\tdsn += \";database=\" + dbname\n\t}\n\n\t\/\/ add user\/pass\n\tif u.User != nil {\n\t\tif user := u.User.Username(); len(user) > 0 {\n\t\t\tdsn += \";user id=\" + user\n\t\t}\n\t\tif pass, ok := u.User.Password(); ok {\n\t\t\tdsn += \";password=\" + pass\n\t\t}\n\t}\n\n\t\/\/ add params\n\tfor k, v := range u.Query() {\n\t\tdsn += \";\" + k + \"=\" + v[0]\n\t}\n\n\treturn \"mssql\", dsn, nil\n}\n\n\/\/ mysqlProcess processes a mysql url and protocol.\nfunc mysqlProcess(u *URL) (string, string, error) {\n\t\/\/ build host or domain socket\n\thost := u.Host\n\tdbname := u.Path\n\n\tif strings.HasPrefix(dbname, \"\/\") {\n\t\tdbname = dbname[1:]\n\t}\n\n\tif u.Proto == \"unix\" {\n\t\tif u.Opaque != \"\" {\n\t\t\thost = path.Dir(u.Opaque)\n\t\t\tdbname = path.Base(u.Opaque)\n\t\t} else {\n\t\t\thost = path.Join(u.Host, path.Dir(u.Path))\n\t\t\tdbname = path.Base(u.Path)\n\t\t}\n\t\thost = host + \"\/\" + dbname\n\t\tdbname = \"\"\n\n\t\tu.Host = host\n\t\tu.Path = \"\"\n\t} else if !strings.Contains(host, \":\") {\n\t\t\/\/ append default port\n\t\thost = host + \":3306\"\n\t}\n\n\t\/\/ create dsn\n\tdsn := fmt.Sprintf(\"%s(%s)\", u.Proto, host)\n\n\t\/\/ build user\/pass\n\tif u.User != nil {\n\t\tif un := u.User.Username(); len(un) > 0 {\n\t\t\tif up, ok := u.User.Password(); ok {\n\t\t\t\tun += \":\" + up\n\t\t\t}\n\t\t\tdsn = un + \"@\" + dsn\n\t\t}\n\t}\n\n\t\/\/ add database name\n\tdsn += \"\/\" + dbname\n\n\t\/\/ add params\n\tparams := u.Query().Encode()\n\tif len(params) > 0 {\n\t\tdsn += \"?\" + params\n\t}\n\n\t\/\/ format\n\treturn \"mysql\", dsn, nil\n}\n\n\/\/ oracleProcess processes a ora (Oracle) url and protocol.\nfunc oracleProcess(u *URL) (string, string, error) {\n\tif u.User == nil {\n\t\treturn \"\", \"\", ErrOraMustProvideUsernameAndPassword\n\t}\n\n\t\/\/ build host or domain socket\n\thost := u.Host\n\tdbname := u.Path[1:]\n\n\t\/\/ build user\/pass\n\tuserinfo := \"\"\n\tif un := u.User.Username(); len(un) > 0 {\n\t\tuserinfo = un\n\t\tif up, ok := u.User.Password(); ok {\n\t\t\tuserinfo = userinfo + \"\/\" + up\n\t\t}\n\t}\n\n\t\/\/ format\n\treturn \"ora\", fmt.Sprintf(\n\t\t\"%s@%s\/%s\",\n\t\tuserinfo,\n\t\thost,\n\t\tdbname,\n\t), nil\n}\n\n\/\/ postgresProcess processes a postgres url and protocol.\nfunc postgresProcess(u *URL) (string, string, error) {\n\tp := &url.URL{\n\t\tScheme: \"postgres\",\n\t\tOpaque: u.Opaque,\n\t\tUser: u.User,\n\t\tHost: u.Host,\n\t\tPath: u.Path,\n\t\tRawPath: u.RawPath,\n\t\tRawQuery: u.RawQuery,\n\t\tFragment: u.Fragment,\n\t}\n\n\treturn \"postgres\", p.String(), nil\n}\n\n\/\/ sapProcess processes a hdb url and protocol.\nfunc sapProcess(u *URL) (string, string, error) {\n\tp := &url.URL{\n\t\tScheme: \"hdb\",\n\t\tOpaque: u.Opaque,\n\t\tUser: u.User,\n\t\tHost: u.Host,\n\t\tPath: u.Path,\n\t\tRawPath: u.RawPath,\n\t\tRawQuery: u.RawQuery,\n\t\tFragment: u.Fragment,\n\t}\n\n\treturn \"hdb\", p.String(), nil\n}\n\n\/\/ sqliteProcess processes a sqlite3 url and protocol.\nfunc sqliteProcess(u *URL) (string, string, error) {\n\tdsn := u.Opaque\n\tif u.Path != \"\" {\n\t\tdsn = u.Path\n\t}\n\n\tif u.Host != \"\" && u.Host != \"localhost\" {\n\t\tdsn = path.Join(u.Host, dsn)\n\t}\n\n\t\/\/ add params\n\tparams := u.Query().Encode()\n\tif len(params) > 0 {\n\t\tdsn += \"?\" + params\n\t}\n\n\treturn \"sqlite3\", dsn, nil\n}\n\nvar loaders = map[string]func(*URL) (string, string, error){\n\t\/\/ mssql\n\t\"mssql\": mssqlProcess,\n\t\"sqlserver\": mssqlProcess,\n\t\"ms\": mssqlProcess,\n\n\t\/\/ mysql\n\t\"mysql\": mysqlProcess,\n\t\"mariadb\": mysqlProcess,\n\t\"maria\": mysqlProcess,\n\t\"percona\": mysqlProcess,\n\t\"aurora\": mysqlProcess,\n\t\"my\": mysqlProcess,\n\n\t\/\/ oracle\n\t\"ora\": oracleProcess,\n\t\"oracle\": oracleProcess,\n\t\"oci8\": oracleProcess,\n\t\"oci\": oracleProcess,\n\t\"or\": oracleProcess,\n\n\t\/\/ postgresql\n\t\"postgres\": postgresProcess,\n\t\"postgresql\": postgresProcess,\n\t\"pgsql\": postgresProcess,\n\t\"pg\": postgresProcess,\n\n\t\/\/ sqlite\n\t\"sqlite3\": sqliteProcess,\n\t\"sqlite\": sqliteProcess,\n\t\"file\": sqliteProcess,\n\t\"sq\": sqliteProcess,\n\n\t\/\/ sap hana\n\t\"hdb\": sapProcess,\n\t\"hana\": sapProcess,\n\t\"sap\": sapProcess,\n\t\"saphana\": sapProcess,\n\t\"sa\": sapProcess,\n}\n\n\/\/ AddLoaderAliases copies the existing loader set for name for each of the\n\/\/ aliases.\nfunc AddLoaderAliases(name string, aliases ...string) error {\n\tf, ok := loaders[name]\n\tif !ok {\n\t\treturn ErrInvalidDatabaseScheme\n\t}\n\n\tfor _, alias := range aliases {\n\t\tloaders[alias] = f\n\t}\n\n\treturn nil\n}\n<commit_msg>Some more minor updates to package documentation<commit_after>\/\/ Package dburl provides a standard, URL style mechanism for of parsing, and\n\/\/ opening database connection strings (in the form of a URL).\n\/\/\n\/\/ Database URL Connection Strings\n\/\/\n\/\/ Supported database URLs are of the form:\n\/\/\n\/\/ protocol+transport:\/\/user:pass@host\/dbname?opt1=a&opt2=b\n\/\/ protocol:\/path\/to\/file\n\/\/\n\/\/ Where:\n\/\/\n\/\/ protocol - driver name or alias (see below)\n\/\/ transport - the transport protocol [tcp, udp, unix] (only mysql for now)\n\/\/ user - the username to connect as\n\/\/ pass - the password to use\n\/\/ host - the remote host\n\/\/ dbname - the database or service name to connect to\n\/\/ ?opt1=... - additional database driver options\n\/\/ (see respective SQL driver for available options)\n\/\/\n\/\/ Example URLs for Open or Parse\n\/\/\n\/\/ The following are URLs that can be processed using Parse or Open:\n\/\/\n\/\/ postgres:\/\/user:pass@localhost\/dbname\n\/\/ pg:\/\/user:pass@localhost\/dbname?sslmode=disable\n\/\/ mysql:\/\/user:pass@localhost\/dbname\n\/\/ mysql:\/var\/run\/mysqld\/mysqld.sock\n\/\/ sqlserver:\/\/user:pass@remote-host.com\/dbname\n\/\/ oracle:\/\/user:pass@somehost.com\/oracledb\n\/\/ sap:\/\/user:pass@localhost\/dbname\n\/\/ sqlite:\/path\/to\/file.db\n\/\/ file:myfile.sqlite3?loc=auto\n\/\/\n\/\/ Parsing Rules\n\/\/\n\/\/ Parse relies heavily on the standard net\/url\/URL type, as such it has the\n\/\/ same parsing conventions\/semantics as any URL parsed by the standard\n\/\/ library's net\/url\/Parse.\n\/\/\n\/\/ SQL Driver Aliases\n\/\/\n\/\/ The following protocol aliases are available, and any URL passed to Open or\n\/\/ Parse will be processed the same as their respective driver:\n\/\/\n\/\/ Database (driver) | Aliases\n\/\/ ------------------------------------------------------------------\n\/\/ Microsoft SQL Server (mssql) | ms, sqlserver\n\/\/ MySQL (mysql) | my, mariadb, maria, percona, aurora\n\/\/ Oracle (ora) | or, oracle, oci8, oci\n\/\/ PostgreSQL (postgres) | pg, postgresql, pgsql\n\/\/ SAP HANA (hdb) | sa, saphana, sap, hana\n\/\/ SQLite3 (sqlite3) | sq, sqlite, file\n\/\/\n\/\/ Usage Notes\n\/\/\n\/\/ Please note that this package does not import actual SQL drivers, and only\n\/\/ provides a standard mechanism to parse their respective URL string.\n\/\/\n\/\/ For reference, these are the following \"expected\" SQL drivers that would need\n\/\/ to be imported:\n\/\/\n\/\/ Database (driver) | Package\n\/\/ ------------------------------------------------------------------\n\/\/ Microsoft SQL Server (mssql) | github.com\/denisenkom\/go-mssqldb\n\/\/ MySQL (mysql) | github.com\/go-sql-driver\/mysql\n\/\/ Oracle (ora) | gopkg.in\/rana\/ora.v4\n\/\/ PostgreSQL (postgres) | github.com\/lib\/pq\n\/\/ SAP HANA (hdb) | github.com\/SAP\/go-hdb\/driver\n\/\/ SQLite3 (sqlite3) | github.com\/mattn\/go-sqlite3\n\/\/\n\/\/ Related Projects\n\/\/\n\/\/ This package was written mainly to support xo (https:\/\/github.com\/knq\/xo)\n\/\/ and usql (https:\/\/github.com\/knq\/usql).\npackage dburl\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\t\/\/ ErrInvalidDatabaseScheme is the invalid database scheme error.\n\tErrInvalidDatabaseScheme = errors.New(\"invalid database scheme\")\n\n\t\/\/ ErrInvalidTransportProtocol is the invalid transport protocol error.\n\tErrInvalidTransportProtocol = errors.New(\"invalid transport protocol\")\n\n\t\/\/ ErrUnknownDatabaseType is the unknown database type error.\n\tErrUnknownDatabaseType = errors.New(\"unknown database type\")\n\n\t\/\/ ErrInvalidPort is the invalid port error.\n\tErrInvalidPort = errors.New(\"invalid port\")\n\n\t\/\/ ErrOraMustProvideUsernameAndPassword is the ora (Oracle) must provide\n\t\/\/ username and password error.\n\tErrOraMustProvideUsernameAndPassword = errors.New(\"ora: must provide username and password\")\n)\n\n\/\/ URL wraps the standard net\/url.URL type, adding OriginalScheme, Proto,\n\/\/ Driver, and DSN strings.\ntype URL struct {\n\t\/\/ URL is the base net\/url\/URL.\n\turl.URL\n\n\t\/\/ OriginalScheme is the original parsed scheme (ie, \"sq\", \"mysql+unix\", \"sap\", etc).\n\tOriginalScheme string\n\n\t\/\/ Proto is the specified protocol (ie, \"tcp\", \"udp\", \"unix\"), if provided.\n\tProto string\n\n\t\/\/ Driver is the non-aliased SQL driver name that should be used in a call\n\t\/\/ to sql\/Open.\n\tDriver string\n\n\t\/\/ DSN is the built connection \"data source name\" that can be used in a\n\t\/\/ call to sql\/Open.\n\tDSN string\n}\n\n\/\/ String satisfies the stringer interface.\nfunc (u *URL) String() string {\n\tp := &url.URL{\n\t\tScheme: u.OriginalScheme,\n\t\tOpaque: u.Opaque,\n\t\tUser: u.User,\n\t\tHost: u.Host,\n\t\tPath: u.Path,\n\t\tRawPath: u.RawPath,\n\t\tRawQuery: u.RawQuery,\n\t\tFragment: u.Fragment,\n\t}\n\n\treturn p.String()\n}\n\n\/\/ Short provides a short description of the user, host, and database.\nfunc (u *URL) Short() string {\n\ts := u.Driver[:2]\n\tswitch s {\n\tcase \"po\":\n\t\ts = \"pg\"\n\tcase \"hd\":\n\t\ts = \"sa\"\n\t}\n\n\tif u.Proto != \"tcp\" {\n\t\ts += \"+\" + u.Proto\n\t}\n\n\ts += \":\"\n\n\tif u.User != nil {\n\t\tif un := u.User.Username(); un != \"\" {\n\t\t\ts += un + \"@\"\n\t\t}\n\t}\n\n\tif u.Host != \"\" {\n\t\ts += u.Host\n\t}\n\n\tif u.Path != \"\" && u.Path != \"\/\" {\n\t\ts += u.Path\n\t}\n\n\tif u.Opaque != \"\" {\n\t\ts += u.Opaque\n\t}\n\n\treturn s\n}\n\n\/\/ Parse parses a rawurl string and normalizes the scheme.\nfunc Parse(rawurl string) (*URL, error) {\n\t\/\/ parse url\n\tu, err := url.Parse(rawurl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif u.Scheme == \"\" {\n\t\treturn nil, ErrInvalidDatabaseScheme\n\t}\n\n\t\/\/ create url\n\tv := &URL{URL: *u, OriginalScheme: u.Scheme, Proto: \"tcp\"}\n\tv.Scheme = strings.ToLower(v.Scheme)\n\n\t\/\/ check if +protocol is in scheme\n\tif strings.Contains(v.Scheme, \"+\") {\n\t\tp := strings.SplitN(v.Scheme, \"+\", 2)\n\t\tv.Scheme = p[0]\n\t\tv.Proto = p[1]\n\t}\n\n\t\/\/ check protocol\n\tif v.Proto != \"tcp\" && v.Proto != \"udp\" && v.Proto != \"unix\" {\n\t\treturn nil, ErrInvalidTransportProtocol\n\t}\n\n\t\/\/ get loader\n\tloader, ok := loaders[v.Scheme]\n\tif !ok {\n\t\treturn nil, ErrUnknownDatabaseType\n\t}\n\n\t\/\/ process\n\tv.Driver, v.DSN, err = loader(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif v.Driver != \"sqlite3\" && v.Opaque != \"\" {\n\t\treturn Parse(v.OriginalScheme + \":\/\/\" + v.Opaque)\n\t}\n\n\treturn v, nil\n}\n\n\/\/ Open takes a rawurl like \"protocol+transport:\/\/user:pass@host\/dbname?option1=a&option2=b\"\n\/\/ and creates a standard sql.DB connection.\n\/\/\n\/\/ See Parse for information on formatting URLs to work properly with Open.\nfunc Open(rawurl string) (*sql.DB, error) {\n\tu, err := Parse(rawurl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn sql.Open(u.Driver, u.DSN)\n}\n\n\/\/ mssqlProcess processes a mssql url and protocol.\nfunc mssqlProcess(u *URL) (string, string, error) {\n\tvar err error\n\n\t\/\/ build host or domain socket\n\thost := u.Host\n\tport := 1433\n\n\t\/\/ grab dbname\n\tvar dbname string\n\tif u.Path != \"\" {\n\t\tdbname = u.Path[1:]\n\t}\n\n\t\/\/ extract port if present\n\tpos := strings.Index(host, \":\")\n\tif pos != -1 {\n\t\tport, err = strconv.Atoi(host[pos+1:])\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", ErrInvalidPort\n\t\t}\n\t\thost = host[:pos]\n\t}\n\n\t\/\/ format dsn\n\tdsn := fmt.Sprintf(\"server=%s;port=%d\", host, port)\n\tif dbname != \"\" {\n\t\tdsn += \";database=\" + dbname\n\t}\n\n\t\/\/ add user\/pass\n\tif u.User != nil {\n\t\tif user := u.User.Username(); len(user) > 0 {\n\t\t\tdsn += \";user id=\" + user\n\t\t}\n\t\tif pass, ok := u.User.Password(); ok {\n\t\t\tdsn += \";password=\" + pass\n\t\t}\n\t}\n\n\t\/\/ add params\n\tfor k, v := range u.Query() {\n\t\tdsn += \";\" + k + \"=\" + v[0]\n\t}\n\n\treturn \"mssql\", dsn, nil\n}\n\n\/\/ mysqlProcess processes a mysql url and protocol.\nfunc mysqlProcess(u *URL) (string, string, error) {\n\t\/\/ build host or domain socket\n\thost := u.Host\n\tdbname := u.Path\n\n\tif strings.HasPrefix(dbname, \"\/\") {\n\t\tdbname = dbname[1:]\n\t}\n\n\tif u.Proto == \"unix\" {\n\t\tif u.Opaque != \"\" {\n\t\t\thost = path.Dir(u.Opaque)\n\t\t\tdbname = path.Base(u.Opaque)\n\t\t} else {\n\t\t\thost = path.Join(u.Host, path.Dir(u.Path))\n\t\t\tdbname = path.Base(u.Path)\n\t\t}\n\t\thost = host + \"\/\" + dbname\n\t\tdbname = \"\"\n\n\t\tu.Host = host\n\t\tu.Path = \"\"\n\t} else if !strings.Contains(host, \":\") {\n\t\t\/\/ append default port\n\t\thost = host + \":3306\"\n\t}\n\n\t\/\/ create dsn\n\tdsn := fmt.Sprintf(\"%s(%s)\", u.Proto, host)\n\n\t\/\/ build user\/pass\n\tif u.User != nil {\n\t\tif un := u.User.Username(); len(un) > 0 {\n\t\t\tif up, ok := u.User.Password(); ok {\n\t\t\t\tun += \":\" + up\n\t\t\t}\n\t\t\tdsn = un + \"@\" + dsn\n\t\t}\n\t}\n\n\t\/\/ add database name\n\tdsn += \"\/\" + dbname\n\n\t\/\/ add params\n\tparams := u.Query().Encode()\n\tif len(params) > 0 {\n\t\tdsn += \"?\" + params\n\t}\n\n\t\/\/ format\n\treturn \"mysql\", dsn, nil\n}\n\n\/\/ oracleProcess processes a ora (Oracle) url and protocol.\nfunc oracleProcess(u *URL) (string, string, error) {\n\tif u.User == nil {\n\t\treturn \"\", \"\", ErrOraMustProvideUsernameAndPassword\n\t}\n\n\t\/\/ build host or domain socket\n\thost := u.Host\n\tdbname := u.Path[1:]\n\n\t\/\/ build user\/pass\n\tuserinfo := \"\"\n\tif un := u.User.Username(); len(un) > 0 {\n\t\tuserinfo = un\n\t\tif up, ok := u.User.Password(); ok {\n\t\t\tuserinfo = userinfo + \"\/\" + up\n\t\t}\n\t}\n\n\t\/\/ format\n\treturn \"ora\", fmt.Sprintf(\n\t\t\"%s@%s\/%s\",\n\t\tuserinfo,\n\t\thost,\n\t\tdbname,\n\t), nil\n}\n\n\/\/ postgresProcess processes a postgres url and protocol.\nfunc postgresProcess(u *URL) (string, string, error) {\n\tp := &url.URL{\n\t\tScheme: \"postgres\",\n\t\tOpaque: u.Opaque,\n\t\tUser: u.User,\n\t\tHost: u.Host,\n\t\tPath: u.Path,\n\t\tRawPath: u.RawPath,\n\t\tRawQuery: u.RawQuery,\n\t\tFragment: u.Fragment,\n\t}\n\n\treturn \"postgres\", p.String(), nil\n}\n\n\/\/ sapProcess processes a hdb url and protocol.\nfunc sapProcess(u *URL) (string, string, error) {\n\tp := &url.URL{\n\t\tScheme: \"hdb\",\n\t\tOpaque: u.Opaque,\n\t\tUser: u.User,\n\t\tHost: u.Host,\n\t\tPath: u.Path,\n\t\tRawPath: u.RawPath,\n\t\tRawQuery: u.RawQuery,\n\t\tFragment: u.Fragment,\n\t}\n\n\treturn \"hdb\", p.String(), nil\n}\n\n\/\/ sqliteProcess processes a sqlite3 url and protocol.\nfunc sqliteProcess(u *URL) (string, string, error) {\n\tdsn := u.Opaque\n\tif u.Path != \"\" {\n\t\tdsn = u.Path\n\t}\n\n\tif u.Host != \"\" && u.Host != \"localhost\" {\n\t\tdsn = path.Join(u.Host, dsn)\n\t}\n\n\t\/\/ add params\n\tparams := u.Query().Encode()\n\tif len(params) > 0 {\n\t\tdsn += \"?\" + params\n\t}\n\n\treturn \"sqlite3\", dsn, nil\n}\n\nvar loaders = map[string]func(*URL) (string, string, error){\n\t\/\/ mssql\n\t\"mssql\": mssqlProcess,\n\t\"sqlserver\": mssqlProcess,\n\t\"ms\": mssqlProcess,\n\n\t\/\/ mysql\n\t\"mysql\": mysqlProcess,\n\t\"mariadb\": mysqlProcess,\n\t\"maria\": mysqlProcess,\n\t\"percona\": mysqlProcess,\n\t\"aurora\": mysqlProcess,\n\t\"my\": mysqlProcess,\n\n\t\/\/ oracle\n\t\"ora\": oracleProcess,\n\t\"oracle\": oracleProcess,\n\t\"oci8\": oracleProcess,\n\t\"oci\": oracleProcess,\n\t\"or\": oracleProcess,\n\n\t\/\/ postgresql\n\t\"postgres\": postgresProcess,\n\t\"postgresql\": postgresProcess,\n\t\"pgsql\": postgresProcess,\n\t\"pg\": postgresProcess,\n\n\t\/\/ sqlite\n\t\"sqlite3\": sqliteProcess,\n\t\"sqlite\": sqliteProcess,\n\t\"file\": sqliteProcess,\n\t\"sq\": sqliteProcess,\n\n\t\/\/ sap hana\n\t\"hdb\": sapProcess,\n\t\"hana\": sapProcess,\n\t\"sap\": sapProcess,\n\t\"saphana\": sapProcess,\n\t\"sa\": sapProcess,\n}\n\n\/\/ AddLoaderAliases copies the existing loader set for name for each of the\n\/\/ aliases.\nfunc AddLoaderAliases(name string, aliases ...string) error {\n\tf, ok := loaders[name]\n\tif !ok {\n\t\treturn ErrInvalidDatabaseScheme\n\t}\n\n\tfor _, alias := range aliases {\n\t\tloaders[alias] = f\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package death\n\n\/\/Manage the death of your application.\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"reflect\"\n\t\"sync\"\n\t\"time\"\n\n\tLOG \"github.com\/cihub\/seelog\"\n)\n\n\/\/Death manages the death of your application.\ntype Death struct {\n\twg *sync.WaitGroup\n\tsigChannel chan os.Signal\n\tcallChannel chan struct{}\n\ttimeout time.Duration\n\tlog Logger\n}\n\nvar empty struct{}\n\n\/\/Logger interface to log.\ntype Logger interface {\n\tError(v ...interface{}) error\n\tDebug(v ...interface{})\n\tInfo(v ...interface{})\n\tWarn(v ...interface{}) error\n}\n\n\/\/closer is a wrapper to the struct we are going to close with metadata\n\/\/to help with debuging close.\ntype closer struct {\n\tIndex int\n\tC io.Closer\n\tName string\n\tPKGPath string\n}\n\n\/\/NewDeath Create Death with the signals you want to die from.\nfunc NewDeath(signals ...os.Signal) (death *Death) {\n\tdeath = &Death{timeout: 10 * time.Second,\n\t\tsigChannel: make(chan os.Signal, 1),\n\t\tcallChannel: make(chan struct{}, 1),\n\t\twg: &sync.WaitGroup{},\n\t\tlog: LOG.Current}\n\tsignal.Notify(death.sigChannel, signals...)\n\tdeath.wg.Add(1)\n\tgo death.listenForSignal()\n\treturn death\n}\n\n\/\/SetTimeout Overrides the time death is willing to wait for a objects to be closed.\nfunc (d *Death) SetTimeout(t time.Duration) {\n\td.timeout = t\n}\n\n\/\/SetLogger Overrides the default logger (seelog)\nfunc (d *Death) SetLogger(l Logger) {\n\td.log = l\n}\n\n\/\/WaitForDeath wait for signal and then kill all items that need to die.\nfunc (d *Death) WaitForDeath(closable ...io.Closer) {\n\td.wg.Wait()\n\td.log.Info(\"Shutdown started...\")\n\tcount := len(closable)\n\td.log.Debug(\"Closing \", count, \" objects\")\n\tif count > 0 {\n\t\td.closeInMass(closable...)\n\t}\n}\n\n\/\/WaitForDeathWithFunc allows you to have a single function get called when it's time to\n\/\/kill your application.\nfunc (d *Death) WaitForDeathWithFunc(f func()) {\n\td.wg.Wait()\n\td.log.Info(\"Shutdown started...\")\n\tf()\n}\n\n\/\/getPkgPath for an io closer.\nfunc getPkgPath(c io.Closer) (name string, pkgPath string) {\n\tt := reflect.TypeOf(c)\n\tif t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\t}\n\treturn t.Name(), t.PkgPath()\n}\n\n\/\/closeInMass Close all the objects at once and wait forr them to finish with a channel.\nfunc (d *Death) closeInMass(closable ...io.Closer) {\n\n\tcount := len(closable)\n\tsentToClose := make(map[int]closer)\n\t\/\/call close async\n\tdoneClosers := make(chan closer, count)\n\tfor i, c := range closable {\n\t\tname, pkgPath := getPkgPath(c)\n\t\tcloser := closer{Index: i, C: c, Name: name, PKGPath: pkgPath}\n\t\tgo d.closeObjects(closer, doneClosers)\n\t\tsentToClose[i] = closer\n\t}\n\n\t\/\/wait on channel for notifications.\n\n\ttimer := time.NewTimer(d.timeout)\n\tfor {\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\td.log.Warn(count, \" object(s) remaining but timer expired.\")\n\t\t\tfor _, c := range sentToClose {\n\t\t\t\td.log.Error(\"Failed to close: \", c.PKGPath, \"\/\", c.Name)\n\t\t\t}\n\t\t\treturn\n\t\tcase closer := <-doneClosers:\n\t\t\tdelete(sentToClose, closer.Index)\n\t\t\tcount--\n\t\t\td.log.Debug(count, \" object(s) left\")\n\t\t\tif count == 0 && len(sentToClose) == 0 {\n\t\t\t\td.log.Debug(\"Finished closing objects\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/closeObjects and return a bool when finished on a channel.\nfunc (d *Death) closeObjects(closer closer, done chan<- closer) {\n\terr := closer.C.Close()\n\tif nil != err {\n\t\td.log.Error(err)\n\t}\n\tdone <- closer\n}\n\n\/\/FallOnSword manually initiates the death process.\nfunc (d *Death) FallOnSword() {\n\tselect {\n\tcase d.callChannel <- struct{}{}:\n\tdefault:\n\t}\n}\n\n\/\/ListenForSignal Manage death of application by signal.\nfunc (d *Death) listenForSignal() {\n\tdefer d.wg.Done()\n\tfor {\n\t\tselect {\n\t\tcase <-d.sigChannel:\n\t\t\treturn\n\t\tcase <-d.callChannel:\n\t\t\treturn\n\t\t}\n\t}\n}\n<commit_msg>Delete unused variable<commit_after>package death\n\n\/\/Manage the death of your application.\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"reflect\"\n\t\"sync\"\n\t\"time\"\n\n\tLOG \"github.com\/cihub\/seelog\"\n)\n\n\/\/Death manages the death of your application.\ntype Death struct {\n\twg *sync.WaitGroup\n\tsigChannel chan os.Signal\n\tcallChannel chan struct{}\n\ttimeout time.Duration\n\tlog Logger\n}\n\n\/\/Logger interface to log.\ntype Logger interface {\n\tError(v ...interface{}) error\n\tDebug(v ...interface{})\n\tInfo(v ...interface{})\n\tWarn(v ...interface{}) error\n}\n\n\/\/closer is a wrapper to the struct we are going to close with metadata\n\/\/to help with debuging close.\ntype closer struct {\n\tIndex int\n\tC io.Closer\n\tName string\n\tPKGPath string\n}\n\n\/\/NewDeath Create Death with the signals you want to die from.\nfunc NewDeath(signals ...os.Signal) (death *Death) {\n\tdeath = &Death{timeout: 10 * time.Second,\n\t\tsigChannel: make(chan os.Signal, 1),\n\t\tcallChannel: make(chan struct{}, 1),\n\t\twg: &sync.WaitGroup{},\n\t\tlog: LOG.Current}\n\tsignal.Notify(death.sigChannel, signals...)\n\tdeath.wg.Add(1)\n\tgo death.listenForSignal()\n\treturn death\n}\n\n\/\/SetTimeout Overrides the time death is willing to wait for a objects to be closed.\nfunc (d *Death) SetTimeout(t time.Duration) {\n\td.timeout = t\n}\n\n\/\/SetLogger Overrides the default logger (seelog)\nfunc (d *Death) SetLogger(l Logger) {\n\td.log = l\n}\n\n\/\/WaitForDeath wait for signal and then kill all items that need to die.\nfunc (d *Death) WaitForDeath(closable ...io.Closer) {\n\td.wg.Wait()\n\td.log.Info(\"Shutdown started...\")\n\tcount := len(closable)\n\td.log.Debug(\"Closing \", count, \" objects\")\n\tif count > 0 {\n\t\td.closeInMass(closable...)\n\t}\n}\n\n\/\/WaitForDeathWithFunc allows you to have a single function get called when it's time to\n\/\/kill your application.\nfunc (d *Death) WaitForDeathWithFunc(f func()) {\n\td.wg.Wait()\n\td.log.Info(\"Shutdown started...\")\n\tf()\n}\n\n\/\/getPkgPath for an io closer.\nfunc getPkgPath(c io.Closer) (name string, pkgPath string) {\n\tt := reflect.TypeOf(c)\n\tif t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\t}\n\treturn t.Name(), t.PkgPath()\n}\n\n\/\/closeInMass Close all the objects at once and wait forr them to finish with a channel.\nfunc (d *Death) closeInMass(closable ...io.Closer) {\n\n\tcount := len(closable)\n\tsentToClose := make(map[int]closer)\n\t\/\/call close async\n\tdoneClosers := make(chan closer, count)\n\tfor i, c := range closable {\n\t\tname, pkgPath := getPkgPath(c)\n\t\tcloser := closer{Index: i, C: c, Name: name, PKGPath: pkgPath}\n\t\tgo d.closeObjects(closer, doneClosers)\n\t\tsentToClose[i] = closer\n\t}\n\n\t\/\/wait on channel for notifications.\n\n\ttimer := time.NewTimer(d.timeout)\n\tfor {\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\td.log.Warn(count, \" object(s) remaining but timer expired.\")\n\t\t\tfor _, c := range sentToClose {\n\t\t\t\td.log.Error(\"Failed to close: \", c.PKGPath, \"\/\", c.Name)\n\t\t\t}\n\t\t\treturn\n\t\tcase closer := <-doneClosers:\n\t\t\tdelete(sentToClose, closer.Index)\n\t\t\tcount--\n\t\t\td.log.Debug(count, \" object(s) left\")\n\t\t\tif count == 0 && len(sentToClose) == 0 {\n\t\t\t\td.log.Debug(\"Finished closing objects\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/closeObjects and return a bool when finished on a channel.\nfunc (d *Death) closeObjects(closer closer, done chan<- closer) {\n\terr := closer.C.Close()\n\tif nil != err {\n\t\td.log.Error(err)\n\t}\n\tdone <- closer\n}\n\n\/\/FallOnSword manually initiates the death process.\nfunc (d *Death) FallOnSword() {\n\tselect {\n\tcase d.callChannel <- struct{}{}:\n\tdefault:\n\t}\n}\n\n\/\/ListenForSignal Manage death of application by signal.\nfunc (d *Death) listenForSignal() {\n\tdefer d.wg.Done()\n\tfor {\n\t\tselect {\n\t\tcase <-d.sigChannel:\n\t\t\treturn\n\t\tcase <-d.callChannel:\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/atotto\/clipboard\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/docker\/docker\/pkg\/homedir\"\n\t\"github.com\/docker\/docker\/pkg\/term\"\n)\n\nconst (\n\tdefaultFilestore string = \".pony\"\n\tdefaultGPGPath string = \".gnupg\/\"\n\n\t\/\/ VERSION is the command version.\n\tVERSION = \"v0.1.0\"\n\n\t\/\/ BANNER is the commands banner.\n\tBANNER = ` _ __ ___ _ __ _ _\n| '_ \\ \/ _ \\| '_ \\| | | |\n| |_) | (_) | | | | |_| |\n| .__\/ \\___\/|_| |_|\\__, |\n|_| |___\/\n`\n)\n\nvar (\n\tdefaultGPGKey string\n\tfilestore string\n\tgpgPath string\n\tpublicKeyring string\n\tsecretKeyring string\n\n\ts SecretFile\n\n\tdebug bool\n\tversion bool\n)\n\n\/\/ preload initializes any global options and configuration\n\/\/ before the main or sub commands are run\nfunc preload(c *cli.Context) (err error) {\n\tif c.GlobalBool(\"debug\") {\n\t\tlogrus.SetLevel(logrus.DebugLevel)\n\t}\n\n\tdefaultGPGKey = c.GlobalString(\"keyid\")\n\n\thome := homedir.Get()\n\thomeShort := homedir.GetShortcutString()\n\n\t\/\/ set the filestore variable\n\tfilestore = strings.Replace(c.GlobalString(\"file\"), homeShort, home, 1)\n\n\t\/\/ set gpg path variables\n\tgpgPath = strings.Replace(c.GlobalString(\"gpgpath\"), homeShort, home, 1)\n\tpublicKeyring = filepath.Join(gpgPath, \"pubring.gpg\")\n\tsecretKeyring = filepath.Join(gpgPath, \"secring.gpg\")\n\n\t\/\/ if they passed an arguement, run the prechecks\n\t\/\/ TODO(jfrazelle): this will run even if the command they issue\n\t\/\/ does not exist, which is kinda shitty\n\tif len(c.Args()) > 0 {\n\t\tpreChecks()\n\t}\n\n\t\/\/ we need to read the secrets file for all commands\n\t\/\/ might as well be dry about it\n\ts, err = readSecretsFile(filestore)\n\tif err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\n\treturn nil\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"pony\"\n\tapp.Version = VERSION\n\tapp.Author = \"@jfrazelle\"\n\tapp.Email = \"no-reply@butts.com\"\n\tapp.Usage = \"Local File-Based Password, API Key, Secret, Recovery Code Store Backed By GPG\"\n\tapp.Before = preload\n\tapp.EnableBashCompletion = true\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"debug, d\",\n\t\t\tUsage: \"run in debug mode\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"file, f\",\n\t\t\tValue: fmt.Sprintf(\"%s\/%s\", homedir.GetShortcutString(), defaultFilestore),\n\t\t\tUsage: \"file to use for saving encrypted secrets\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"gpgpath\",\n\t\t\tValue: fmt.Sprintf(\"%s\/%s\", homedir.GetShortcutString(), defaultGPGPath),\n\t\t\tUsage: \"filepath used for gpg keys\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"keyid\",\n\t\t\tUsage: \"optionally set specific gpg keyid\/fingerprint to use for encryption & decryption\",\n\t\t\tEnvVar: fmt.Sprintf(\"%s_KEYID\", strings.ToUpper(app.Name)),\n\t\t},\n\t}\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"add\",\n\t\t\tAliases: []string{\"save\"},\n\t\t\tUsage: \"Add a new secret\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\targs := c.Args()\n\t\t\t\tif len(args) < 2 {\n\t\t\t\t\tlogrus.Errorf(\"You need to pass a key and value to the command. ex: %s %s com.example.apikey EUSJCLLAWE\", app.Name, c.Command.Name)\n\t\t\t\t\tcli.ShowSubcommandHelp(c)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ add the key value pair to secrets\n\t\t\t\tkey, value := args[0], args[1]\n\t\t\t\ts.setKeyValue(key, value, false)\n\n\t\t\t\tfmt.Printf(\"Added %s %s to secrets\", key, value)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"delete\",\n\t\t\tAliases: []string{\"rm\"},\n\t\t\tUsage: \"Delete a secret\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\targs := c.Args()\n\t\t\t\tif len(args) < 1 {\n\t\t\t\t\tcli.ShowSubcommandHelp(c)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tkey := args[0]\n\t\t\t\tif _, ok := s.Secrets[key]; !ok {\n\t\t\t\t\tlogrus.Fatalf(\"Secret for (%s) does not exist\", key)\n\t\t\t\t}\n\t\t\t\tdelete(s.Secrets, key)\n\n\t\t\t\tif err := writeSecretsFile(filestore, s); err != nil {\n\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tfmt.Printf(\"Secret %q deleted successfully\", key)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"get\",\n\t\t\tUsage: \"Get the value of a secret\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"copy, c\",\n\t\t\t\t\tUsage: \"copy the secret to your clipboard\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\targs := c.Args()\n\t\t\t\tif len(args) < 1 {\n\t\t\t\t\tcli.ShowSubcommandHelp(c)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ add the key value pair to secrets\n\t\t\t\tkey := args[0]\n\t\t\t\tif _, ok := s.Secrets[key]; !ok {\n\t\t\t\t\tlogrus.Fatalf(\"Secret for (%s) does not exist\", key)\n\t\t\t\t}\n\n\t\t\t\tfmt.Println(s.Secrets[key])\n\n\t\t\t\t\/\/ copy to clipboard\n\t\t\t\tif c.Bool(\"copy\") {\n\t\t\t\t\tif err := clipboard.WriteAll(s.Secrets[key]); err != nil {\n\t\t\t\t\t\tlogrus.Fatalf(\"Clipboard copy failed: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Println(\"Copied to clipboard!\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"list\",\n\t\t\tAliases: []string{\"ls\"},\n\t\t\tUsage: \"List all secrets\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"filter, f\",\n\t\t\t\t\tUsage: \"filter secrets keys by a regular expression\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t_, stdout, _ := term.StdStreams()\n\t\t\t\tw := tabwriter.NewWriter(stdout, 20, 1, 3, ' ', 0)\n\n\t\t\t\t\/\/ print header\n\t\t\t\tfmt.Fprintln(w, \"KEY\\tVALUE\")\n\n\t\t\t\t\/\/ print the keys alphabetically\n\t\t\t\tprintSorted := func(m map[string]string) {\n\t\t\t\t\tmk := make([]string, len(m))\n\t\t\t\t\ti := 0\n\t\t\t\t\tfor k := range m {\n\t\t\t\t\t\tmk[i] = k\n\t\t\t\t\t\ti++\n\t\t\t\t\t}\n\t\t\t\t\tsort.Strings(mk)\n\n\t\t\t\t\tfor _, key := range mk {\n\t\t\t\t\t\tfilter := c.String(\"filter\")\n\t\t\t\t\t\tif filter != \"\" {\n\t\t\t\t\t\t\tif ok, _ := regexp.MatchString(c.String(\"filter\"), key); !ok {\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfmt.Fprintf(w, \"%s\\t%s\\n\", key, m[key])\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tprintSorted(s.Secrets)\n\n\t\t\t\tw.Flush()\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"update\",\n\t\t\tUsage: \"Update a secret\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\targs := c.Args()\n\t\t\t\tif len(args) < 2 {\n\t\t\t\t\tlogrus.Errorf(\"You need to pass a key and value to the command. ex: %s %s com.example.apikey EUSJCLLAWE\", app.Name, c.Command.Name)\n\t\t\t\t\tcli.ShowSubcommandHelp(c)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ add the key value pair to secrets\n\t\t\t\tkey, value := args[0], args[1]\n\t\t\t\ts.setKeyValue(key, value, true)\n\n\t\t\t\tfmt.Printf(\"Updated secret %s to %s\", key, value)\n\t\t\t},\n\t\t},\n\t}\n\tapp.Run(os.Args)\n}\n<commit_msg>term no<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/atotto\/clipboard\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/docker\/docker\/pkg\/homedir\"\n)\n\nconst (\n\tdefaultFilestore string = \".pony\"\n\tdefaultGPGPath string = \".gnupg\/\"\n\n\t\/\/ VERSION is the command version.\n\tVERSION = \"v0.1.0\"\n\n\t\/\/ BANNER is the commands banner.\n\tBANNER = ` _ __ ___ _ __ _ _\n| '_ \\ \/ _ \\| '_ \\| | | |\n| |_) | (_) | | | | |_| |\n| .__\/ \\___\/|_| |_|\\__, |\n|_| |___\/\n`\n)\n\nvar (\n\tdefaultGPGKey string\n\tfilestore string\n\tgpgPath string\n\tpublicKeyring string\n\tsecretKeyring string\n\n\ts SecretFile\n\n\tdebug bool\n\tversion bool\n)\n\n\/\/ preload initializes any global options and configuration\n\/\/ before the main or sub commands are run\nfunc preload(c *cli.Context) (err error) {\n\tif c.GlobalBool(\"debug\") {\n\t\tlogrus.SetLevel(logrus.DebugLevel)\n\t}\n\n\tdefaultGPGKey = c.GlobalString(\"keyid\")\n\n\thome := homedir.Get()\n\thomeShort := homedir.GetShortcutString()\n\n\t\/\/ set the filestore variable\n\tfilestore = strings.Replace(c.GlobalString(\"file\"), homeShort, home, 1)\n\n\t\/\/ set gpg path variables\n\tgpgPath = strings.Replace(c.GlobalString(\"gpgpath\"), homeShort, home, 1)\n\tpublicKeyring = filepath.Join(gpgPath, \"pubring.gpg\")\n\tsecretKeyring = filepath.Join(gpgPath, \"secring.gpg\")\n\n\t\/\/ if they passed an arguement, run the prechecks\n\t\/\/ TODO(jfrazelle): this will run even if the command they issue\n\t\/\/ does not exist, which is kinda shitty\n\tif len(c.Args()) > 0 {\n\t\tpreChecks()\n\t}\n\n\t\/\/ we need to read the secrets file for all commands\n\t\/\/ might as well be dry about it\n\ts, err = readSecretsFile(filestore)\n\tif err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\n\treturn nil\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"pony\"\n\tapp.Version = VERSION\n\tapp.Author = \"@jfrazelle\"\n\tapp.Email = \"no-reply@butts.com\"\n\tapp.Usage = \"Local File-Based Password, API Key, Secret, Recovery Code Store Backed By GPG\"\n\tapp.Before = preload\n\tapp.EnableBashCompletion = true\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"debug, d\",\n\t\t\tUsage: \"run in debug mode\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"file, f\",\n\t\t\tValue: fmt.Sprintf(\"%s\/%s\", homedir.GetShortcutString(), defaultFilestore),\n\t\t\tUsage: \"file to use for saving encrypted secrets\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"gpgpath\",\n\t\t\tValue: fmt.Sprintf(\"%s\/%s\", homedir.GetShortcutString(), defaultGPGPath),\n\t\t\tUsage: \"filepath used for gpg keys\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"keyid\",\n\t\t\tUsage: \"optionally set specific gpg keyid\/fingerprint to use for encryption & decryption\",\n\t\t\tEnvVar: fmt.Sprintf(\"%s_KEYID\", strings.ToUpper(app.Name)),\n\t\t},\n\t}\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"add\",\n\t\t\tAliases: []string{\"save\"},\n\t\t\tUsage: \"Add a new secret\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\targs := c.Args()\n\t\t\t\tif len(args) < 2 {\n\t\t\t\t\tlogrus.Errorf(\"You need to pass a key and value to the command. ex: %s %s com.example.apikey EUSJCLLAWE\", app.Name, c.Command.Name)\n\t\t\t\t\tcli.ShowSubcommandHelp(c)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ add the key value pair to secrets\n\t\t\t\tkey, value := args[0], args[1]\n\t\t\t\ts.setKeyValue(key, value, false)\n\n\t\t\t\tfmt.Printf(\"Added %s %s to secrets\", key, value)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"delete\",\n\t\t\tAliases: []string{\"rm\"},\n\t\t\tUsage: \"Delete a secret\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\targs := c.Args()\n\t\t\t\tif len(args) < 1 {\n\t\t\t\t\tcli.ShowSubcommandHelp(c)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tkey := args[0]\n\t\t\t\tif _, ok := s.Secrets[key]; !ok {\n\t\t\t\t\tlogrus.Fatalf(\"Secret for (%s) does not exist\", key)\n\t\t\t\t}\n\t\t\t\tdelete(s.Secrets, key)\n\n\t\t\t\tif err := writeSecretsFile(filestore, s); err != nil {\n\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tfmt.Printf(\"Secret %q deleted successfully\", key)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"get\",\n\t\t\tUsage: \"Get the value of a secret\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"copy, c\",\n\t\t\t\t\tUsage: \"copy the secret to your clipboard\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\targs := c.Args()\n\t\t\t\tif len(args) < 1 {\n\t\t\t\t\tcli.ShowSubcommandHelp(c)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ add the key value pair to secrets\n\t\t\t\tkey := args[0]\n\t\t\t\tif _, ok := s.Secrets[key]; !ok {\n\t\t\t\t\tlogrus.Fatalf(\"Secret for (%s) does not exist\", key)\n\t\t\t\t}\n\n\t\t\t\tfmt.Println(s.Secrets[key])\n\n\t\t\t\t\/\/ copy to clipboard\n\t\t\t\tif c.Bool(\"copy\") {\n\t\t\t\t\tif err := clipboard.WriteAll(s.Secrets[key]); err != nil {\n\t\t\t\t\t\tlogrus.Fatalf(\"Clipboard copy failed: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Println(\"Copied to clipboard!\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"list\",\n\t\t\tAliases: []string{\"ls\"},\n\t\t\tUsage: \"List all secrets\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"filter, f\",\n\t\t\t\t\tUsage: \"filter secrets keys by a regular expression\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tw := tabwriter.NewWriter(os.Stdout, 20, 1, 3, ' ', 0)\n\n\t\t\t\t\/\/ print header\n\t\t\t\tfmt.Fprintln(w, \"KEY\\tVALUE\")\n\n\t\t\t\t\/\/ print the keys alphabetically\n\t\t\t\tprintSorted := func(m map[string]string) {\n\t\t\t\t\tmk := make([]string, len(m))\n\t\t\t\t\ti := 0\n\t\t\t\t\tfor k := range m {\n\t\t\t\t\t\tmk[i] = k\n\t\t\t\t\t\ti++\n\t\t\t\t\t}\n\t\t\t\t\tsort.Strings(mk)\n\n\t\t\t\t\tfor _, key := range mk {\n\t\t\t\t\t\tfilter := c.String(\"filter\")\n\t\t\t\t\t\tif filter != \"\" {\n\t\t\t\t\t\t\tif ok, _ := regexp.MatchString(c.String(\"filter\"), key); !ok {\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfmt.Fprintf(w, \"%s\\t%s\\n\", key, m[key])\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tprintSorted(s.Secrets)\n\n\t\t\t\tw.Flush()\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"update\",\n\t\t\tUsage: \"Update a secret\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\targs := c.Args()\n\t\t\t\tif len(args) < 2 {\n\t\t\t\t\tlogrus.Errorf(\"You need to pass a key and value to the command. ex: %s %s com.example.apikey EUSJCLLAWE\", app.Name, c.Command.Name)\n\t\t\t\t\tcli.ShowSubcommandHelp(c)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ add the key value pair to secrets\n\t\t\t\tkey, value := args[0], args[1]\n\t\t\t\ts.setKeyValue(key, value, true)\n\n\t\t\t\tfmt.Printf(\"Updated secret %s to %s\", key, value)\n\t\t\t},\n\t\t},\n\t}\n\tapp.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\/\/ \"github.com\/ssbl\/trago\/db\"\n)\n\nconst (\n\tSERVFLAG = \"-s\"\n\tSERVCMD = \"trago -s {dir}\"\n\tserverUsage = \"Run in server mode in the specified directory.\\n\" +\n\t\t\"If this is set, all other options are ignored.\"\n\tserverUsageShort = \"Shorthand for --server.\"\n)\n\nvar (\n\tserver string\n\tserverDir string\n\tclientDir string\n\tflagDir string\n\tdefaultDir = \"(nil)\"\n)\n\nfunc init() {\n\tflag.StringVar(&flagDir, \"server\", defaultDir, serverUsage)\n\tflag.StringVar(&flagDir, \"s\", defaultDir, serverUsageShort)\n\n\tlog.SetFlags(0)\n\tflag.Usage = usage\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif flagDir == defaultDir {\n\t\tserver, serverDir, clientDir := parseArgs()\n\t\tlog.Printf(\"%s:%s %s\\n\", server, serverDir, clientDir)\n\t}\n}\n\nfunc usage() {\n\tlog.Printf(\"Usage: trago server:dir client-dir\\n\\n\")\n\n\tflag.VisitAll(func(f *flag.Flag){\n\t\tlog.Printf(\"-%s: %s\\n\", f.Name, f.Usage)\n\t})\n}\n\nfunc parseArgs() (string, string, string) {\n\tvar s, sd, cd string\n\n\tif len(flag.Args()) != 2 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tremote := strings.Split(flag.Arg(0), \":\")\n\tif len(remote) != 2 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t} else {\n\t\ts = strings.TrimSpace(remote[0])\n\t\tsd = strings.TrimSpace(remote[1])\n\t}\n\n\tcd = strings.TrimSpace(flag.Arg(1))\n\n\treturn s, sd, cd\n}\n<commit_msg>better usage message<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\/\/ \"github.com\/ssbl\/trago\/db\"\n)\n\nconst (\n\tSERVFLAG = \"-s\"\n\tSERVCMD = \"trago -s {dir}\"\n\tserverUsage = \"Run in server mode in the specified directory.\\n\"\n)\n\nvar (\n\tserver string\n\tserverDir string\n\tclientDir string\n\tflagDir string\n\tdefaultDir = \"(nil)\"\n)\n\nfunc init() {\n\tflag.StringVar(&flagDir, \"server\", defaultDir, serverUsage)\n\tflag.StringVar(&flagDir, \"s\", defaultDir, \"Shorthand for --server.\")\n\n\tlog.SetFlags(0)\n\tflag.Usage = usage\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif flagDir == defaultDir {\n\t\tserver, serverDir, clientDir := parseArgs()\n\t\tlog.Printf(\"%s:%s %s\\n\", server, serverDir, clientDir)\n\t}\n}\n\nfunc usage() {\n\tlog.Printf(\"Usage: trago server:dir client-dir\\n\\n\")\n\n\tlog.Printf(\"-s|--server <dir>\\n %s\\n\", serverUsage);\n}\n\nfunc parseArgs() (string, string, string) {\n\tvar s, sd, cd string\n\n\tif len(flag.Args()) != 2 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tremote := strings.Split(flag.Arg(0), \":\")\n\tif len(remote) != 2 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t} else {\n\t\ts = strings.TrimSpace(remote[0])\n\t\tsd = strings.TrimSpace(remote[1])\n\t}\n\n\tcd = strings.TrimSpace(flag.Arg(1))\n\n\treturn s, sd, cd\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n)\n\nfunc main() {\n\tpodfile := os.Stdin\n\n\tpodreader, err := NewPodReader(podfile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tversion, err := podreader.GetVersion()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(fmt.Sprintf(\"Input is a POD file, version %d.\", version))\n\n\tfilecount, err := podreader.GetFileCount()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(fmt.Sprintf(\"Found %d files.\", filecount))\n\n\tfiles, err := podreader.ReadFileTable()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfor i := 0; i < len(files); i++ {\n\t\tfile := files[i]\n\n\t\tfmt.Println(fmt.Sprintf(\"* %s (%d bytes \/ %d bytes), u1: %x, u2: %x, u3: %x\", file.name, file.size, file.size2, file.unknown1, file.unknown2, file.unknown3))\n\n\t\tExtractFile(podreader, file)\n\t}\n\n\tos.Exit(0)\n}\n\nfunc ExtractFile(podreader *PodReader, file PodFile) {\n\toutpath := NormalizePodPath(file.name)\n\n\tif err := FilePathIsValid(outpath); err != nil {\n\t\tpanic(err)\n\t}\n\n\thandle, err := CreateFile(outpath)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer handle.Close()\n\n\tif err := podreader.ReadFile(file, handle); err != nil {\n\t\tpanic(err)\n\t}\n\n\thandle.Sync()\n}\n\nfunc NormalizePodPath(podpath string) string {\n\tpathparts := strings.Split(podpath, \"\\\\\")\n\treturn strings.Join(pathparts, \"\/\")\n}\n\nfunc FilePathIsValid(normalizedpath string) error {\n\tif path.IsAbs(normalizedpath) {\n\t\treturn errors.New(fmt.Sprintf(\"Cannot unpack file with embedded absolute file path: %s\", normalizedpath))\n\t}\n\n\treturn nil\n}\n\nfunc CreateFile(normalizedpath string) (*os.File, error) {\n\trootpath := path.Dir(normalizedpath)\n\tif rootpath != \".\" {\n\t\tif err := os.MkdirAll(rootpath, 0777); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\thandle, err := os.OpenFile(normalizedpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0664)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn handle, nil\n}\n<commit_msg>Add command line flags.<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n)\n\nfunc main() {\n\tunpack, podfilename := SetupFlags()\n\n\tpodfile, err := os.Open(podfilename)\n\tif err != nil {\n\t\tpanic(errors.New(fmt.Sprintf(\"Could not open the specified POD file %s: %s\", podfilename, err.Error())))\n\t}\n\n\tdefer podfile.Close()\n\n\tpodreader, err := NewPodReader(podfile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tversion, err := podreader.GetVersion()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(fmt.Sprintf(\"Input is a POD file, version %d.\", version))\n\n\tfilecount, err := podreader.GetFileCount()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(fmt.Sprintf(\"Found %d files.\", filecount))\n\n\tfiles, err := podreader.ReadFileTable()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfor i := 0; i < len(files); i++ {\n\t\tfile := files[i]\n\n\t\tfmt.Println(fmt.Sprintf(\"* %s (%d bytes \/ %d bytes), u1: %x, u2: %x, u3: %x\", file.name, file.size, file.size2, file.unknown1, file.unknown2, file.unknown3))\n\n\t\tif unpack {\n\t\t\tExtractFile(podreader, file)\n\t\t}\n\t}\n\n\tos.Exit(0)\n}\n\nfunc SetupFlags() (bool, string) {\n\tunpack := flag.Bool(\"e\", false, \"Unpack all of the files in the .pod archive.\")\n\n\tflag.Usage = func() {\n\t\tfmt.Println(fmt.Sprintf(\"Usage: %s [OPTION]... [FILE]\", os.Args[0]))\n\t\tfmt.Println(\"Examine and unpack .pod files from select PS2 games.\")\n\t\tfmt.Println(\"\")\n\t\tfmt.Println(\"\\t-e\\tUnpack the files inside the .pod file into the current working directory.\")\n\t}\n\n\tflag.Parse()\n\n\tif flag.NArg() != 1 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tfilename := flag.Args()[0]\n\n\treturn *unpack, filename\n}\n\nfunc ExtractFile(podreader *PodReader, file PodFile) {\n\toutpath := NormalizePodPath(file.name)\n\n\tif err := FilePathIsValid(outpath); err != nil {\n\t\tpanic(err)\n\t}\n\n\thandle, err := CreateFile(outpath)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer handle.Close()\n\n\tif err := podreader.ReadFile(file, handle); err != nil {\n\t\tpanic(err)\n\t}\n\n\thandle.Sync()\n}\n\nfunc NormalizePodPath(podpath string) string {\n\tpathparts := strings.Split(podpath, \"\\\\\")\n\treturn strings.Join(pathparts, \"\/\")\n}\n\nfunc FilePathIsValid(normalizedpath string) error {\n\tif path.IsAbs(normalizedpath) {\n\t\treturn errors.New(fmt.Sprintf(\"Cannot unpack file with embedded absolute file path: %s\", normalizedpath))\n\t}\n\n\treturn nil\n}\n\nfunc CreateFile(normalizedpath string) (*os.File, error) {\n\trootpath := path.Dir(normalizedpath)\n\tif rootpath != \".\" {\n\t\tif err := os.MkdirAll(rootpath, 0777); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\thandle, err := os.OpenFile(normalizedpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0664)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn handle, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux darwin freebsd\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/darron\/goshe\/cmd\"\n\t\"log\"\n\t\"log\/syslog\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"syscall\"\n)\n\n\/\/ CompileDate tracks when the binary was compiled. It's inserted during a build\n\/\/ with build flags. Take a look at the Makefile for information.\nvar CompileDate = \"No date provided.\"\n\n\/\/ GitCommit tracks the SHA of the built binary. It's inserted during a build\n\/\/ with build flags. Take a look at the Makefile for information.\nvar GitCommit = \"No revision provided.\"\n\n\/\/ Version is the version of the built binary. It's inserted during a build\n\/\/ with build flags. Take a look at the Makefile for information.\nvar Version = \"No version provided.\"\n\n\/\/ GoVersion details the version of Go this was compiled with.\nvar GoVersion = runtime.Version()\n\n\/\/ The name of the program.\nvar programName = \"goshe\"\n\nfunc main() {\n\tlogwriter, e := syslog.New(syslog.LOG_NOTICE, programName)\n\tif e == nil {\n\t\tlog.SetOutput(logwriter)\n\t}\n\tcmd.Log(fmt.Sprintf(\"%s version: %s\", programName, Version), \"info\")\n\n\targs := os.Args[1:]\n\tfor _, arg := range args {\n\t\tif arg == \"-v\" || arg == \"--version\" {\n\t\t\tfmt.Printf(\"Version : %s\\nRevision : %s\\nDate : %s\\nGo : %s\\n\", Version, GitCommit, CompileDate, GoVersion)\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\t\/\/ Setup nice shutdown with CTRL-C.\n\tc := make(chan os.Signal)\n\tsignal.Notify(c, os.Interrupt, syscall.SIGTERM)\n\tgo handleCtrlC(c)\n\n\tcmd.RootCmd.Execute()\n}\n\n\/\/ Any cleanup tasks on shutdown could happen here.\nfunc handleCtrlC(c chan os.Signal) {\n\tsig := <-c\n\tmessage := fmt.Sprintf(\"Received '%s' - shutting down.\", sig)\n\tcmd.Log(message, \"info\")\n\tfmt.Printf(\"%s\\n\", message)\n\tos.Exit(0)\n}\n<commit_msg>Setup expvar for stats on 1313.<commit_after>\/\/ +build linux darwin freebsd\n\npackage main\n\nimport (\n\t_ \"expvar\"\n\t\"fmt\"\n\t\"github.com\/darron\/goshe\/cmd\"\n\t\"log\"\n\t\"log\/syslog\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"syscall\"\n)\n\n\/\/ CompileDate tracks when the binary was compiled. It's inserted during a build\n\/\/ with build flags. Take a look at the Makefile for information.\nvar CompileDate = \"No date provided.\"\n\n\/\/ GitCommit tracks the SHA of the built binary. It's inserted during a build\n\/\/ with build flags. Take a look at the Makefile for information.\nvar GitCommit = \"No revision provided.\"\n\n\/\/ Version is the version of the built binary. It's inserted during a build\n\/\/ with build flags. Take a look at the Makefile for information.\nvar Version = \"No version provided.\"\n\n\/\/ GoVersion details the version of Go this was compiled with.\nvar GoVersion = runtime.Version()\n\n\/\/ The name of the program.\nvar programName = \"goshe\"\n\nfunc main() {\n\tlogwriter, e := syslog.New(syslog.LOG_NOTICE, programName)\n\tif e == nil {\n\t\tlog.SetOutput(logwriter)\n\t}\n\tcmd.Log(fmt.Sprintf(\"%s version: %s\", programName, Version), \"info\")\n\n\targs := os.Args[1:]\n\tfor _, arg := range args {\n\t\tif arg == \"-v\" || arg == \"--version\" {\n\t\t\tfmt.Printf(\"Version : %s\\nRevision : %s\\nDate : %s\\nGo : %s\\n\", Version, GitCommit, CompileDate, GoVersion)\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\t\/\/ Setup nice shutdown with CTRL-C.\n\tc := make(chan os.Signal)\n\tsignal.Notify(c, os.Interrupt, syscall.SIGTERM)\n\tgo handleCtrlC(c)\n\n\t\/\/ Listen for expvar\n\tgo setupExpvarHTTP()\n\n\tcmd.RootCmd.Execute()\n}\n\nfunc setupExpvarHTTP() {\n\t\/\/ Listen for expvar\n\thttp.ListenAndServe(\":1313\", nil)\n}\n\n\/\/ Any cleanup tasks on shutdown could happen here.\nfunc handleCtrlC(c chan os.Signal) {\n\tsig := <-c\n\tmessage := fmt.Sprintf(\"Received '%s' - shutting down.\", sig)\n\tcmd.Log(message, \"info\")\n\tfmt.Printf(\"%s\\n\", message)\n\tos.Exit(0)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/joho\/godotenv\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar build = \"0\" \/\/ build number set at compile-time\n\nfunc main() {\n\t\/\/ Load env-file if it exists first\n\tif env := os.Getenv(\"PLUGIN_ENV_FILE\"); env != \"\" {\n\t\tgodotenv.Load(env)\n\t}\n\n\tapp := cli.NewApp()\n\tapp.Name = \"docker plugin\"\n\tapp.Usage = \"docker plugin\"\n\tapp.Action = run\n\tapp.Version = fmt.Sprintf(\"1.0.%s\", build)\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"dry-run\",\n\t\t\tUsage: \"dry run disables docker push\",\n\t\t\tEnvVar: \"PLUGIN_DRY_RUN\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"commit.sha\",\n\t\t\tUsage: \"git commit sha\",\n\t\t\tEnvVar: \"DRONE_COMMIT_SHA\",\n\t\t\tValue: \"00000000\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"daemon.mirror\",\n\t\t\tUsage: \"docker daemon registry mirror\",\n\t\t\tEnvVar: \"PLUGIN_MIRROR\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"daemon.storage-driver\",\n\t\t\tUsage: \"docker daemon storage driver\",\n\t\t\tEnvVar: \"PLUGIN_STORAGE_DRIVER\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"daemon.storage-path\",\n\t\t\tUsage: \"docker daemon storage path\",\n\t\t\tValue: \"\/var\/lib\/docker\",\n\t\t\tEnvVar: \"PLUGIN_STORAGE_PATH\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"daemon.bip\",\n\t\t\tUsage: \"docker daemon bride ip address\",\n\t\t\tEnvVar: \"PLUGIN_BIP\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"daemon.mtu\",\n\t\t\tUsage: \"docker daemon custom mtu setting\",\n\t\t\tEnvVar: \"PLUGIN_MTU\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"daemon.dns\",\n\t\t\tUsage: \"docker daemon dns server\",\n\t\t\tEnvVar: \"PLUGIN_DNS\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"daemon.insecure\",\n\t\t\tUsage: \"docker daemon allows insecure registries\",\n\t\t\tEnvVar: \"PLUGIN_INSECURE\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"daemon.ipv6\",\n\t\t\tUsage: \"docker daemon IPv6 networking\",\n\t\t\tEnvVar: \"PLUGIN_IPV6\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"daemon.experimental\",\n\t\t\tUsage: \"docker daemon Experimental mode\",\n\t\t\tEnvVar: \"PLUGIN_EXPERIMENTAL\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"daemon.debug\",\n\t\t\tUsage: \"docker daemon executes in debug mode\",\n\t\t\tEnvVar: \"PLUGIN_DEBUG,DOCKER_LAUNCH_DEBUG\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"daemon.off\",\n\t\t\tUsage: \"docker daemon executes in debug mode\",\n\t\t\tEnvVar: \"PLUGIN_DAEMON_OFF\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dockerfile\",\n\t\t\tUsage: \"build dockerfile\",\n\t\t\tValue: \"Dockerfile\",\n\t\t\tEnvVar: \"PLUGIN_DOCKERFILE\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"context\",\n\t\t\tUsage: \"build context\",\n\t\t\tValue: \".\",\n\t\t\tEnvVar: \"PLUGIN_CONTEXT\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"tags\",\n\t\t\tUsage: \"build tags\",\n\t\t\tValue: &cli.StringSlice{\"latest\"},\n\t\t\tEnvVar: \"PLUGIN_TAG,PLUGIN_TAGS\",\n\t\t\tFilePath: \".tags\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"args\",\n\t\t\tUsage: \"build args\",\n\t\t\tEnvVar: \"PLUGIN_BUILD_ARGS\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"squash\",\n\t\t\tUsage: \"squash the layers at build time\",\n\t\t\tEnvVar: \"PLUGIN_SQUASH\",\n\t\t},\n\t\tcli.BoolTFlag{\n\t\t\tName: \"pull-image\",\n\t\t\tUsage: \"force pull base image at build time\",\n\t\t\tEnvVar: \"PLUGIN_PULL_IMAGE\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"compress\",\n\t\t\tUsage: \"compress the build context using gzip\",\n\t\t\tEnvVar: \"PLUGIN_COMPRESS\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"repo\",\n\t\t\tUsage: \"docker repository\",\n\t\t\tEnvVar: \"PLUGIN_REPO\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"docker.registry\",\n\t\t\tUsage: \"docker registry\",\n\t\t\tValue: defaultRegistry,\n\t\t\tEnvVar: \"PLUGIN_REGISTRY,DOCKER_REGISTRY\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"docker.username\",\n\t\t\tUsage: \"docker username\",\n\t\t\tEnvVar: \"PLUGIN_USERNAME,DOCKER_USERNAME\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"docker.password\",\n\t\t\tUsage: \"docker password\",\n\t\t\tEnvVar: \"PLUGIN_PASSWORD,DOCKER_PASSWORD\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"docker.email\",\n\t\t\tUsage: \"docker email\",\n\t\t\tEnvVar: \"PLUGIN_EMAIL,DOCKER_EMAIL\",\n\t\t},\n\t}\n\n\tif err := app.Run(os.Args); err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n}\n\nfunc run(c *cli.Context) error {\n\tplugin := Plugin{\n\t\tDryrun: c.Bool(\"dry-run\"),\n\t\tLogin: Login{\n\t\t\tRegistry: c.String(\"docker.registry\"),\n\t\t\tUsername: c.String(\"docker.username\"),\n\t\t\tPassword: c.String(\"docker.password\"),\n\t\t\tEmail: c.String(\"docker.email\"),\n\t\t},\n\t\tBuild: Build{\n\t\t\tName: c.String(\"commit.sha\"),\n\t\t\tDockerfile: c.String(\"dockerfile\"),\n\t\t\tContext: c.String(\"context\"),\n\t\t\tTags: c.StringSlice(\"tags\"),\n\t\t\tArgs: c.StringSlice(\"args\"),\n\t\t\tSquash: c.Bool(\"squash\"),\n\t\t\tPull: c.BoolT(\"pull-image\"),\n\t\t\tCompress: c.Bool(\"compress\"),\n\t\t\tRepo: c.String(\"repo\"),\n\t\t},\n\t\tDaemon: Daemon{\n\t\t\tRegistry: c.String(\"docker.registry\"),\n\t\t\tMirror: c.String(\"daemon.mirror\"),\n\t\t\tStorageDriver: c.String(\"daemon.storage-driver\"),\n\t\t\tStoragePath: c.String(\"daemon.storage-path\"),\n\t\t\tInsecure: c.Bool(\"daemon.insecure\"),\n\t\t\tDisabled: c.Bool(\"daemon.off\"),\n\t\t\tIPv6: c.Bool(\"daemon.ipv6\"),\n\t\t\tDebug: c.Bool(\"daemon.debug\"),\n\t\t\tBip: c.String(\"daemon.bip\"),\n\t\t\tDNS: c.StringSlice(\"daemon.dns\"),\n\t\t\tMTU: c.String(\"daemon.mtu\"),\n\t\t\tExperimental: c.Bool(\"daemon.experimental\"),\n\t\t},\n\t}\n\n\treturn plugin.Exec()\n}\n<commit_msg>Fix DNS passing. New env var is PLUGIN_CUSTOM_DNS<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/joho\/godotenv\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar build = \"0\" \/\/ build number set at compile-time\n\nfunc main() {\n\t\/\/ Load env-file if it exists first\n\tif env := os.Getenv(\"PLUGIN_ENV_FILE\"); env != \"\" {\n\t\tgodotenv.Load(env)\n\t}\n\n\tapp := cli.NewApp()\n\tapp.Name = \"docker plugin\"\n\tapp.Usage = \"docker plugin\"\n\tapp.Action = run\n\tapp.Version = fmt.Sprintf(\"1.0.%s\", build)\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"dry-run\",\n\t\t\tUsage: \"dry run disables docker push\",\n\t\t\tEnvVar: \"PLUGIN_DRY_RUN\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"commit.sha\",\n\t\t\tUsage: \"git commit sha\",\n\t\t\tEnvVar: \"DRONE_COMMIT_SHA\",\n\t\t\tValue: \"00000000\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"daemon.mirror\",\n\t\t\tUsage: \"docker daemon registry mirror\",\n\t\t\tEnvVar: \"PLUGIN_MIRROR\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"daemon.storage-driver\",\n\t\t\tUsage: \"docker daemon storage driver\",\n\t\t\tEnvVar: \"PLUGIN_STORAGE_DRIVER\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"daemon.storage-path\",\n\t\t\tUsage: \"docker daemon storage path\",\n\t\t\tValue: \"\/var\/lib\/docker\",\n\t\t\tEnvVar: \"PLUGIN_STORAGE_PATH\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"daemon.bip\",\n\t\t\tUsage: \"docker daemon bride ip address\",\n\t\t\tEnvVar: \"PLUGIN_BIP\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"daemon.mtu\",\n\t\t\tUsage: \"docker daemon custom mtu setting\",\n\t\t\tEnvVar: \"PLUGIN_MTU\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"daemon.dns\",\n\t\t\tUsage: \"docker daemon dns server\",\n\t\t\tEnvVar: \"PLUGIN_CUSTOM_DNS\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"daemon.insecure\",\n\t\t\tUsage: \"docker daemon allows insecure registries\",\n\t\t\tEnvVar: \"PLUGIN_INSECURE\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"daemon.ipv6\",\n\t\t\tUsage: \"docker daemon IPv6 networking\",\n\t\t\tEnvVar: \"PLUGIN_IPV6\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"daemon.experimental\",\n\t\t\tUsage: \"docker daemon Experimental mode\",\n\t\t\tEnvVar: \"PLUGIN_EXPERIMENTAL\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"daemon.debug\",\n\t\t\tUsage: \"docker daemon executes in debug mode\",\n\t\t\tEnvVar: \"PLUGIN_DEBUG,DOCKER_LAUNCH_DEBUG\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"daemon.off\",\n\t\t\tUsage: \"docker daemon executes in debug mode\",\n\t\t\tEnvVar: \"PLUGIN_DAEMON_OFF\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dockerfile\",\n\t\t\tUsage: \"build dockerfile\",\n\t\t\tValue: \"Dockerfile\",\n\t\t\tEnvVar: \"PLUGIN_DOCKERFILE\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"context\",\n\t\t\tUsage: \"build context\",\n\t\t\tValue: \".\",\n\t\t\tEnvVar: \"PLUGIN_CONTEXT\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"tags\",\n\t\t\tUsage: \"build tags\",\n\t\t\tValue: &cli.StringSlice{\"latest\"},\n\t\t\tEnvVar: \"PLUGIN_TAG,PLUGIN_TAGS\",\n\t\t\tFilePath: \".tags\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"args\",\n\t\t\tUsage: \"build args\",\n\t\t\tEnvVar: \"PLUGIN_BUILD_ARGS\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"squash\",\n\t\t\tUsage: \"squash the layers at build time\",\n\t\t\tEnvVar: \"PLUGIN_SQUASH\",\n\t\t},\n\t\tcli.BoolTFlag{\n\t\t\tName: \"pull-image\",\n\t\t\tUsage: \"force pull base image at build time\",\n\t\t\tEnvVar: \"PLUGIN_PULL_IMAGE\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"compress\",\n\t\t\tUsage: \"compress the build context using gzip\",\n\t\t\tEnvVar: \"PLUGIN_COMPRESS\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"repo\",\n\t\t\tUsage: \"docker repository\",\n\t\t\tEnvVar: \"PLUGIN_REPO\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"docker.registry\",\n\t\t\tUsage: \"docker registry\",\n\t\t\tValue: defaultRegistry,\n\t\t\tEnvVar: \"PLUGIN_REGISTRY,DOCKER_REGISTRY\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"docker.username\",\n\t\t\tUsage: \"docker username\",\n\t\t\tEnvVar: \"PLUGIN_USERNAME,DOCKER_USERNAME\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"docker.password\",\n\t\t\tUsage: \"docker password\",\n\t\t\tEnvVar: \"PLUGIN_PASSWORD,DOCKER_PASSWORD\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"docker.email\",\n\t\t\tUsage: \"docker email\",\n\t\t\tEnvVar: \"PLUGIN_EMAIL,DOCKER_EMAIL\",\n\t\t},\n\t}\n\n\tif err := app.Run(os.Args); err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n}\n\nfunc run(c *cli.Context) error {\n\tplugin := Plugin{\n\t\tDryrun: c.Bool(\"dry-run\"),\n\t\tLogin: Login{\n\t\t\tRegistry: c.String(\"docker.registry\"),\n\t\t\tUsername: c.String(\"docker.username\"),\n\t\t\tPassword: c.String(\"docker.password\"),\n\t\t\tEmail: c.String(\"docker.email\"),\n\t\t},\n\t\tBuild: Build{\n\t\t\tName: c.String(\"commit.sha\"),\n\t\t\tDockerfile: c.String(\"dockerfile\"),\n\t\t\tContext: c.String(\"context\"),\n\t\t\tTags: c.StringSlice(\"tags\"),\n\t\t\tArgs: c.StringSlice(\"args\"),\n\t\t\tSquash: c.Bool(\"squash\"),\n\t\t\tPull: c.BoolT(\"pull-image\"),\n\t\t\tCompress: c.Bool(\"compress\"),\n\t\t\tRepo: c.String(\"repo\"),\n\t\t},\n\t\tDaemon: Daemon{\n\t\t\tRegistry: c.String(\"docker.registry\"),\n\t\t\tMirror: c.String(\"daemon.mirror\"),\n\t\t\tStorageDriver: c.String(\"daemon.storage-driver\"),\n\t\t\tStoragePath: c.String(\"daemon.storage-path\"),\n\t\t\tInsecure: c.Bool(\"daemon.insecure\"),\n\t\t\tDisabled: c.Bool(\"daemon.off\"),\n\t\t\tIPv6: c.Bool(\"daemon.ipv6\"),\n\t\t\tDebug: c.Bool(\"daemon.debug\"),\n\t\t\tBip: c.String(\"daemon.bip\"),\n\t\t\tDNS: c.StringSlice(\"daemon.dns\"),\n\t\t\tMTU: c.String(\"daemon.mtu\"),\n\t\t\tExperimental: c.Bool(\"daemon.experimental\"),\n\t\t},\n\t}\n\n\treturn plugin.Exec()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"net\/http\"\n\n\t\"github.com\/rs\/cors\"\n\t\"github.com\/zenazn\/goji\"\n\t\"github.com\/zenazn\/goji\/web\"\n\n\t\"github.com\/ckpt\/backend-services\/middleware\"\n\t\"github.com\/ckpt\/backend-services\/players\"\n)\n\ntype appError struct {\n\tError error\n\tMessage string\n\tCode int\n}\n\ntype appHandler func(web.C, http.ResponseWriter, *http.Request) *appError\n\nfunc (fn appHandler) ServeHTTPC(c web.C, w http.ResponseWriter, r *http.Request) {\n\tif e := fn(c, w, r); e != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\tw.WriteHeader(e.Code)\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(map[string]string{\"error\": e.Error.Error() +\n\t\t\t\" (\" + e.Message + \")\"})\n\t}\n}\n\nfunc login(c web.C, w http.ResponseWriter, r *http.Request) *appError {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\n\ttype LoginRequest struct {\n\t\tUsername string\n\t\tPassword string\n\t}\n\n\tloginReq := new(LoginRequest)\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(loginReq); err != nil {\n\t\treturn &appError{err, \"Invalid JSON\", 400}\n\t}\n\t\/\/ Hard code for now\n\tif loginReq.Username == \"mortenk\" &&\n\t\tloginReq.Password == \"testing123\" {\n\t\tauthUser, err := players.UserByName(loginReq.Username)\n\t\tif err != nil {\n\t\t\treturn &appError{err, \"Failed to fetch user data\", 500}\n\t\t}\n\t\tif authUser.Locked {\n\t\t\treturn &appError{errors.New(\"Locked\"), \"User locked\", 403}\n\t\t}\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(authUser)\n\t\treturn nil\n\t}\n\n\t\/\/ Else, forbidden\n\tw.WriteHeader(403)\n\treturn nil\n}\n\nfunc main() {\n\t\/\/fmt.Printf(\"%+v\\n\", getMembers())\n\tc := cors.New(cors.Options{\n\t\tAllowedOrigins: []string{\"*\"},\n\t\tAllowedHeaders: []string{\"*\"},\n\t})\n\tgoji.Use(c.Handler)\n\tgoji.Use(middleware.TokenHandler)\n\n\tgoji.Post(\"\/login\", appHandler(login))\n\n\tgoji.Get(\"\/players\", appHandler(listAllPlayers))\n\tgoji.Post(\"\/players\", appHandler(createNewPlayer))\n\tgoji.Get(\"\/players\/quotes\", appHandler(getAllPlayerQuotes))\n\tgoji.Get(\"\/players\/:uuid\", appHandler(getPlayer))\n\tgoji.Put(\"\/players\/:uuid\", appHandler(updatePlayer))\n\tgoji.Get(\"\/players\/:uuid\/profile\", appHandler(getPlayerProfile))\n\tgoji.Put(\"\/players\/:uuid\/profile\", appHandler(updatePlayerProfile))\n\tgoji.Get(\"\/players\/:uuid\/user\", appHandler(getUserForPlayer))\n\tgoji.Put(\"\/players\/:uuid\/user\", appHandler(setUserForPlayer))\n\n\tgoji.Post(\"\/users\", appHandler(createNewUser))\n\n\tgoji.Get(\"\/locations\", appHandler(listAllLocations))\n\tgoji.Post(\"\/locations\", appHandler(createNewLocation))\n\tgoji.Get(\"\/locations\/:uuid\", appHandler(getLocation))\n\tgoji.Put(\"\/locations\/:uuid\", appHandler(updateLocationProfile))\n\tgoji.Patch(\"\/locations\/:uuid\", appHandler(updateLocationProfile))\n\tgoji.Post(\"\/locations\/:uuid\/pictures\", appHandler(addLocationPicture))\n\n\tgoji.Get(\"\/tournaments\", appHandler(listAllTournaments))\n\tgoji.Post(\"\/tournaments\", appHandler(createNewTournament))\n\tgoji.Get(\"\/tournaments\/:uuid\", appHandler(getTournament))\n\tgoji.Put(\"\/tournaments\/:uuid\", appHandler(updateTournamentInfo))\n\tgoji.Patch(\"\/tournaments\/:uuid\", appHandler(updateTournamentInfo))\n\n\tgoji.Serve()\n}\n<commit_msg>Add allowed CORS methods.<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"net\/http\"\n\n\t\"github.com\/rs\/cors\"\n\t\"github.com\/zenazn\/goji\"\n\t\"github.com\/zenazn\/goji\/web\"\n\n\t\"github.com\/ckpt\/backend-services\/middleware\"\n\t\"github.com\/ckpt\/backend-services\/players\"\n)\n\ntype appError struct {\n\tError error\n\tMessage string\n\tCode int\n}\n\ntype appHandler func(web.C, http.ResponseWriter, *http.Request) *appError\n\nfunc (fn appHandler) ServeHTTPC(c web.C, w http.ResponseWriter, r *http.Request) {\n\tif e := fn(c, w, r); e != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\tw.WriteHeader(e.Code)\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(map[string]string{\"error\": e.Error.Error() +\n\t\t\t\" (\" + e.Message + \")\"})\n\t}\n}\n\nfunc login(c web.C, w http.ResponseWriter, r *http.Request) *appError {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\n\ttype LoginRequest struct {\n\t\tUsername string\n\t\tPassword string\n\t}\n\n\tloginReq := new(LoginRequest)\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(loginReq); err != nil {\n\t\treturn &appError{err, \"Invalid JSON\", 400}\n\t}\n\t\/\/ Hard code for now\n\tif loginReq.Username == \"mortenk\" &&\n\t\tloginReq.Password == \"testing123\" {\n\t\tauthUser, err := players.UserByName(loginReq.Username)\n\t\tif err != nil {\n\t\t\treturn &appError{err, \"Failed to fetch user data\", 500}\n\t\t}\n\t\tif authUser.Locked {\n\t\t\treturn &appError{errors.New(\"Locked\"), \"User locked\", 403}\n\t\t}\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(authUser)\n\t\treturn nil\n\t}\n\n\t\/\/ Else, forbidden\n\tw.WriteHeader(403)\n\treturn nil\n}\n\nfunc main() {\n\t\/\/fmt.Printf(\"%+v\\n\", getMembers())\n\tc := cors.New(cors.Options{\n\t\tAllowedOrigins: []string{\"*\"},\n\t\tAllowedHeaders: []string{\"*\"},\n\t\tAllowedMethods: []string{\"GET\", \"PUT\", \"PATCH\", \"POST\", \"OPTIONS\"},\n\t})\n\tgoji.Use(c.Handler)\n\tgoji.Use(middleware.TokenHandler)\n\n\tgoji.Post(\"\/login\", appHandler(login))\n\n\tgoji.Get(\"\/players\", appHandler(listAllPlayers))\n\tgoji.Post(\"\/players\", appHandler(createNewPlayer))\n\tgoji.Get(\"\/players\/quotes\", appHandler(getAllPlayerQuotes))\n\tgoji.Get(\"\/players\/:uuid\", appHandler(getPlayer))\n\tgoji.Put(\"\/players\/:uuid\", appHandler(updatePlayer))\n\tgoji.Get(\"\/players\/:uuid\/profile\", appHandler(getPlayerProfile))\n\tgoji.Put(\"\/players\/:uuid\/profile\", appHandler(updatePlayerProfile))\n\tgoji.Get(\"\/players\/:uuid\/user\", appHandler(getUserForPlayer))\n\tgoji.Put(\"\/players\/:uuid\/user\", appHandler(setUserForPlayer))\n\n\tgoji.Post(\"\/users\", appHandler(createNewUser))\n\n\tgoji.Get(\"\/locations\", appHandler(listAllLocations))\n\tgoji.Post(\"\/locations\", appHandler(createNewLocation))\n\tgoji.Get(\"\/locations\/:uuid\", appHandler(getLocation))\n\tgoji.Put(\"\/locations\/:uuid\", appHandler(updateLocationProfile))\n\tgoji.Patch(\"\/locations\/:uuid\", appHandler(updateLocationProfile))\n\tgoji.Post(\"\/locations\/:uuid\/pictures\", appHandler(addLocationPicture))\n\n\tgoji.Get(\"\/tournaments\", appHandler(listAllTournaments))\n\tgoji.Post(\"\/tournaments\", appHandler(createNewTournament))\n\tgoji.Get(\"\/tournaments\/:uuid\", appHandler(getTournament))\n\tgoji.Put(\"\/tournaments\/:uuid\", appHandler(updateTournamentInfo))\n\tgoji.Patch(\"\/tournaments\/:uuid\", appHandler(updateTournamentInfo))\n\n\tgoji.Serve()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n\t\"net\"\n)\n\nfunc GetIP(ipaddr string) (string, error) {\n\tvar err error\n\n\tif ipaddr != \"\" {\n\t\treturn ipaddr, err\n\t}\n\n\tvar ifaces []net.Addr\n\tifaces, err = net.InterfaceAddrs()\n\tfor _, i := range ifaces {\n\t\tif ipnet, ok := i.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {\n\t\t\tif ipnet.IP.To4() != nil {\n\t\t\t\tipaddr = ipnet.IP.String()\n\t\t\t}\n\t\t}\n\t}\n\treturn ipaddr, err\n}\n\nvar (\n\tfile = kingpin.Flag(\"file\", \"Config file\").Short('f').Required().String()\n\thostname = kingpin.Flag(\"hostname\", \"Hostname\").Short('h').String()\n\tipaddr = kingpin.Flag(\"ipaddr\", \"IP Address\").String()\n\tignoreErrors = kingpin.Flag(\"ignore-errors\", \"Ignore errors\").Bool()\n\thost = kingpin.Command(\"host\", \"Operate host\")\n\t\/\/ host status\n\thostStatus = host.Command(\"status\", \"Operate host status\")\n\t\/\/ host status enable\n\thostStatusEnable = hostStatus.Command(\"enable\", \"Enable host status\")\n\t\/\/ host status disable\n\thostStatusDisable = hostStatus.Command(\"disable\", \"Disable Host status\")\n\t\/\/ host register\n\thostRegister = host.Command(\"register\", \"Regist host\")\n\t\/\/ host delete\n\thostDelete = host.Command(\"delete\", \"Delete host\")\n\t\/\/ host list\n\thostList = host.Command(\"list\", \"Host list\")\n)\n\nfunc main() {\n\tvar err error\n\tkingpin.Version(\"0.1.1\")\n\tsubcommand := kingpin.Parse()\n\n\tlog := logrus.New()\n\n\tvar config Config\n\tconfig, err = LoadYAML(*file, *hostname)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar consul Consul\n\tconsul, err = NewConsul()\n\tif err != nil && config.storage == \"consul\" {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar storage Storage\n\tstorage, err = NewStorage(config.DBPath, config.DBPerm)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer storage.Close()\n\n\tzabbix := NewZabbix(config, consul, storage)\n\n\terr = zabbix.APILogin()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tswitch subcommand {\n\tcase \"host status enable\":\n\t\thost, err := zabbix.HostGetByHost()\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\terr = zabbix.HostEnable(host.HostId)\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tcase \"host status disable\":\n\t\thost, err := zabbix.HostGetByHost()\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\terr = zabbix.HostDisable(host.HostId)\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tcase \"host register\":\n\t\tipaddr, err := GetIP(*ipaddr)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tinterfaces := zabbix.HostInterface(\"\", ipaddr, 1, 1, 1)\n\n\t\tgroups, err := zabbix.HostGroupIds(config.HostGroups)\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\ttemplates, err := zabbix.Templates(config.Templates)\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\terr = zabbix.HostCreate(config.Hostname, groups, interfaces, templates)\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tcase \"host delete\":\n\t\thost, err := zabbix.HostGetByHost()\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\terr = zabbix.HostDelete(host.HostId)\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tcase \"host list\":\n\t\tzabbixHosts, err := zabbix.EnableHosts()\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfor _, zHost := range zabbixHosts {\n\t\t\tfmt.Println(zHost.Hostname, zHost.IPAddr)\n\t\t}\n\tcase \"host proxy\":\n\t}\n}\n<commit_msg>config.storage -> config.Storage<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n\t\"net\"\n)\n\nfunc GetIP(ipaddr string) (string, error) {\n\tvar err error\n\n\tif ipaddr != \"\" {\n\t\treturn ipaddr, err\n\t}\n\n\tvar ifaces []net.Addr\n\tifaces, err = net.InterfaceAddrs()\n\tfor _, i := range ifaces {\n\t\tif ipnet, ok := i.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {\n\t\t\tif ipnet.IP.To4() != nil {\n\t\t\t\tipaddr = ipnet.IP.String()\n\t\t\t}\n\t\t}\n\t}\n\treturn ipaddr, err\n}\n\nvar (\n\tfile = kingpin.Flag(\"file\", \"Config file\").Short('f').Required().String()\n\thostname = kingpin.Flag(\"hostname\", \"Hostname\").Short('h').String()\n\tipaddr = kingpin.Flag(\"ipaddr\", \"IP Address\").String()\n\tignoreErrors = kingpin.Flag(\"ignore-errors\", \"Ignore errors\").Bool()\n\thost = kingpin.Command(\"host\", \"Operate host\")\n\t\/\/ host status\n\thostStatus = host.Command(\"status\", \"Operate host status\")\n\t\/\/ host status enable\n\thostStatusEnable = hostStatus.Command(\"enable\", \"Enable host status\")\n\t\/\/ host status disable\n\thostStatusDisable = hostStatus.Command(\"disable\", \"Disable Host status\")\n\t\/\/ host register\n\thostRegister = host.Command(\"register\", \"Regist host\")\n\t\/\/ host delete\n\thostDelete = host.Command(\"delete\", \"Delete host\")\n\t\/\/ host list\n\thostList = host.Command(\"list\", \"Host list\")\n)\n\nfunc main() {\n\tvar err error\n\tkingpin.Version(\"0.1.1\")\n\tsubcommand := kingpin.Parse()\n\n\tlog := logrus.New()\n\n\tvar config Config\n\tconfig, err = LoadYAML(*file, *hostname)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar consul Consul\n\tconsul, err = NewConsul()\n\tif err != nil && config.Storage == \"consul\" {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar storage Storage\n\tstorage, err = NewStorage(config.DBPath, config.DBPerm)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer storage.Close()\n\n\tzabbix := NewZabbix(config, consul, storage)\n\n\terr = zabbix.APILogin()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tswitch subcommand {\n\tcase \"host status enable\":\n\t\thost, err := zabbix.HostGetByHost()\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\terr = zabbix.HostEnable(host.HostId)\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tcase \"host status disable\":\n\t\thost, err := zabbix.HostGetByHost()\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\terr = zabbix.HostDisable(host.HostId)\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tcase \"host register\":\n\t\tipaddr, err := GetIP(*ipaddr)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tinterfaces := zabbix.HostInterface(\"\", ipaddr, 1, 1, 1)\n\n\t\tgroups, err := zabbix.HostGroupIds(config.HostGroups)\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\ttemplates, err := zabbix.Templates(config.Templates)\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\terr = zabbix.HostCreate(config.Hostname, groups, interfaces, templates)\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tcase \"host delete\":\n\t\thost, err := zabbix.HostGetByHost()\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\terr = zabbix.HostDelete(host.HostId)\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tcase \"host list\":\n\t\tzabbixHosts, err := zabbix.EnableHosts()\n\t\tif err != nil && !*ignoreErrors {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfor _, zHost := range zabbixHosts {\n\t\t\tfmt.Println(zHost.Hostname, zHost.IPAddr)\n\t\t}\n\tcase \"host proxy\":\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n)\n\ntype Response struct {\n\tRaw interface{}\n}\n\nfunc (r Response) String() (s string) {\n\tb, err := json.Marshal(r.Raw)\n\tif err != nil {\n\t\tb, err = json.Marshal(map[string]string{\n\t\t\t\"error\": fmt.Sprint(\"\", err),\n\t\t})\n\t}\n\ts = string(b)\n\treturn\n}\n\nvar DBPREFIX = \"perf\"\n\nvar storage = MongoHandler{}\n\nfunc ListDatabases(rw http.ResponseWriter, r *http.Request) {\n\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\tfmt.Fprint(rw, Response{storage.ListDatabases()})\n}\n\nfunc ListSources(rw http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tdb := vars[\"db\"]\n\n\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\tfmt.Fprint(rw, Response{storage.ListCollections(DBPREFIX + db)})\n}\n\nfunc ListMetrics(rw http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tdb := vars[\"db\"]\n\tsource := vars[\"source\"]\n\n\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\tfmt.Fprint(rw, Response{storage.ListMetrics(DBPREFIX+db, source)})\n}\n\nfunc GetRawValues(rw http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tdb := vars[\"db\"]\n\tsource := vars[\"source\"]\n\tmetric := vars[\"metric\"]\n\n\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\tvalues := storage.FindValues(\"perf\"+db, source, metric)\n\tfmt.Fprint(rw, Response{values})\n}\n\nfunc GetSummary(rw http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tdb := vars[\"db\"]\n\tsource := vars[\"source\"]\n\tmetric := vars[\"metric\"]\n\n\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\tvalues := storage.Aggregate(\"perf\"+db, source, metric)\n\tfmt.Fprint(rw, Response{values})\n}\n\nfunc GetLineChart(rw http.ResponseWriter, r *http.Request) {\n\tapp := os.Getenv(\"GOPATH\") + \"\/src\/github.com\/pavel-paulau\/perfkeeper\/\"\n\n\tcontent, _ := ioutil.ReadFile(app + \"app\/linechart.html\")\n\trw.Header().Set(\"Content-Type\", \"text\/html\")\n\tfmt.Fprint(rw, string(content))\n}\n\nfunc AddSamples(rw http.ResponseWriter, r *http.Request) {\n\ttsInt := time.Now().UnixNano()\n\tts := strconv.FormatInt(tsInt, 10)\n\n\tvars := mux.Vars(r)\n\tdb := vars[\"db\"]\n\tsource := vars[\"source\"]\n\n\tvar samples map[string]interface{}\n\tdecoder := json.NewDecoder(r.Body)\n\terr := decoder.Decode(&samples)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tfor m, v := range samples {\n\t\tsample := map[string]interface{}{\n\t\t\t\"ts\": ts,\n\t\t\t\"m\": m,\n\t\t\t\"v\": v,\n\t\t}\n\t\tstorage.InsertSample(DBPREFIX+db, source, sample)\n\t}\n}\n\nfunc Log(handler http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"%s %s\", r.Method, r.URL)\n\t\thandler.ServeHTTP(rw, r)\n\t})\n}\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tstorage.Init()\n\n\tapp := os.Getenv(\"GOPATH\") + \"\/src\/github.com\/pavel-paulau\/perfkeeper\/app\"\n\tfileHandler := http.StripPrefix(\"\/static\/\", http.FileServer(http.Dir(app)))\n\thttp.Handle(\"\/static\/\", fileHandler)\n\n\tr := mux.NewRouter()\n\tr.StrictSlash(true)\n\tr.HandleFunc(\"\/\", ListDatabases).Methods(\"GET\")\n\tr.HandleFunc(\"\/{db}\", ListSources).Methods(\"GET\")\n\tr.HandleFunc(\"\/{db}\/{source}\", ListMetrics).Methods(\"GET\")\n\tr.HandleFunc(\"\/{db}\/{source}\", AddSamples).Methods(\"POST\")\n\tr.HandleFunc(\"\/{db}\/{source}\/{metric}\", GetRawValues).Methods(\"GET\")\n\tr.HandleFunc(\"\/{db}\/{source}\/{metric}\/summary\", GetSummary).Methods(\"GET\")\n\tr.HandleFunc(\"\/{db}\/{source}\/{metric}\/linechart\", GetLineChart).Methods(\"GET\")\n\thttp.Handle(\"\/\", r)\n\n\tfmt.Println(\"\\n\\t:-:-: perfkeeper :-:-:\\t\\t\\tserving 0.0.0.0:8080\\n\")\n\tlog.Fatal(http.ListenAndServe(\"0.0.0.0:8080\", Log(http.DefaultServeMux)))\n}\n<commit_msg>insert samples asynchronously<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n)\n\ntype Response struct {\n\tRaw interface{}\n}\n\nfunc (r Response) String() (s string) {\n\tb, err := json.Marshal(r.Raw)\n\tif err != nil {\n\t\tb, err = json.Marshal(map[string]string{\n\t\t\t\"error\": fmt.Sprint(\"\", err),\n\t\t})\n\t}\n\ts = string(b)\n\treturn\n}\n\nvar DBPREFIX = \"perf\"\n\nvar storage = MongoHandler{}\n\nfunc ListDatabases(rw http.ResponseWriter, r *http.Request) {\n\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\tfmt.Fprint(rw, Response{storage.ListDatabases()})\n}\n\nfunc ListSources(rw http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tdb := vars[\"db\"]\n\n\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\tfmt.Fprint(rw, Response{storage.ListCollections(DBPREFIX + db)})\n}\n\nfunc ListMetrics(rw http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tdb := vars[\"db\"]\n\tsource := vars[\"source\"]\n\n\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\tfmt.Fprint(rw, Response{storage.ListMetrics(DBPREFIX+db, source)})\n}\n\nfunc GetRawValues(rw http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tdb := vars[\"db\"]\n\tsource := vars[\"source\"]\n\tmetric := vars[\"metric\"]\n\n\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\tvalues := storage.FindValues(\"perf\"+db, source, metric)\n\tfmt.Fprint(rw, Response{values})\n}\n\nfunc GetSummary(rw http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tdb := vars[\"db\"]\n\tsource := vars[\"source\"]\n\tmetric := vars[\"metric\"]\n\n\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\tvalues := storage.Aggregate(\"perf\"+db, source, metric)\n\tfmt.Fprint(rw, Response{values})\n}\n\nfunc GetLineChart(rw http.ResponseWriter, r *http.Request) {\n\tapp := os.Getenv(\"GOPATH\") + \"\/src\/github.com\/pavel-paulau\/perfkeeper\/\"\n\n\tcontent, _ := ioutil.ReadFile(app + \"app\/linechart.html\")\n\trw.Header().Set(\"Content-Type\", \"text\/html\")\n\tfmt.Fprint(rw, string(content))\n}\n\nfunc AddSamples(rw http.ResponseWriter, r *http.Request) {\n\ttsInt := time.Now().UnixNano()\n\tts := strconv.FormatInt(tsInt, 10)\n\n\tvars := mux.Vars(r)\n\tdb := vars[\"db\"]\n\tsource := vars[\"source\"]\n\n\tvar samples map[string]interface{}\n\tdecoder := json.NewDecoder(r.Body)\n\terr := decoder.Decode(&samples)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tfor m, v := range samples {\n\t\tsample := map[string]interface{}{\n\t\t\t\"ts\": ts,\n\t\t\t\"m\": m,\n\t\t\t\"v\": v,\n\t\t}\n\t\tgo storage.InsertSample(DBPREFIX+db, source, sample)\n\t}\n}\n\nfunc Log(handler http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"%s %s\", r.Method, r.URL)\n\t\thandler.ServeHTTP(rw, r)\n\t})\n}\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tstorage.Init()\n\n\tapp := os.Getenv(\"GOPATH\") + \"\/src\/github.com\/pavel-paulau\/perfkeeper\/app\"\n\tfileHandler := http.StripPrefix(\"\/static\/\", http.FileServer(http.Dir(app)))\n\thttp.Handle(\"\/static\/\", fileHandler)\n\n\tr := mux.NewRouter()\n\tr.StrictSlash(true)\n\tr.HandleFunc(\"\/\", ListDatabases).Methods(\"GET\")\n\tr.HandleFunc(\"\/{db}\", ListSources).Methods(\"GET\")\n\tr.HandleFunc(\"\/{db}\/{source}\", ListMetrics).Methods(\"GET\")\n\tr.HandleFunc(\"\/{db}\/{source}\", AddSamples).Methods(\"POST\")\n\tr.HandleFunc(\"\/{db}\/{source}\/{metric}\", GetRawValues).Methods(\"GET\")\n\tr.HandleFunc(\"\/{db}\/{source}\/{metric}\/summary\", GetSummary).Methods(\"GET\")\n\tr.HandleFunc(\"\/{db}\/{source}\/{metric}\/linechart\", GetLineChart).Methods(\"GET\")\n\thttp.Handle(\"\/\", r)\n\n\tfmt.Println(\"\\n\\t:-:-: perfkeeper :-:-:\\t\\t\\tserving 0.0.0.0:8080\\n\")\n\tlog.Fatal(http.ListenAndServe(\"0.0.0.0:8080\", Log(http.DefaultServeMux)))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/heynickc\/ninety_nine_prolog\/working_wth_lists\/lists.go\"\n)\n\nfunc main() {\n\n}\n<commit_msg>Made main consistent<commit_after>package main\n\nimport (\n\t\"github.com\/heynickc\/ninety_nine_prolog\/lists\"\n)\n\nfunc main() {\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar rootCmd = &cobra.Command{\n\tUse: \"roachperf [command] (flags)\",\n\tShort: \"roachperf tool for manipulating test clusters\",\n\tLong: `\nroachprod is a tool for manipulating test clusters, allowing easy creating,\ndestroying and wiping of clusters along with running load generators.\n`,\n}\n\nvar (\n\tlifetime time.Duration\n\tnumNodes int\n\tusername string\n)\n\nvar createCmd = &cobra.Command{\n\tUse: \"create <cluster id>\",\n\tShort: \"create a cluster\",\n\tLong: ``,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tif len(args) != 1 {\n\t\t\treturn fmt.Errorf(\"wrong number of arguments\")\n\t\t}\n\t\tclusterID := args[0]\n\n\t\tif len(clusterID) == 0 {\n\t\t\treturn fmt.Errorf(\"cluster ID cannot be blank\")\n\t\t}\n\t\tif numNodes <= 0 || numNodes >= 1000 {\n\t\t\t\/\/ Upper limit is just for safety.\n\t\t\treturn fmt.Errorf(\"number of nodes must be in [1..999]\")\n\t\t}\n\n\t\taccount := username\n\t\tvar err error\n\t\tif len(username) == 0 {\n\t\t\taccount, err = findActiveAccount()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tclusterName := account + \"-\" + clusterID\n\t\tfmt.Printf(\"Creating cluster %s with %d nodes\\n\", clusterName, numNodes)\n\n\t\tcloud, err := listCloud()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, ok := cloud.Clusters[clusterName]; ok {\n\t\t\treturn fmt.Errorf(\"cluster %s already exists\", clusterName)\n\t\t}\n\n\t\tif err := createCluster(clusterName, numNodes, lifetime); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"OK\")\n\n\t\tcloud, err = listCloud()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc, ok := cloud.Clusters[clusterName]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"could not find %s in list of cluster\", clusterName)\n\t\t}\n\t\tc.PrintDetails()\n\n\t\treturn syncAll(cloud)\n\t},\n}\n\nvar destroyCmd = &cobra.Command{\n\tUse: \"destroy <cluster ID>\",\n\tShort: \"destroy a cluster\",\n\tLong: ``,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tif len(args) != 1 {\n\t\t\treturn fmt.Errorf(\"wrong number of arguments\")\n\t\t}\n\t\tclusterID := args[0]\n\n\t\tif len(clusterID) == 0 {\n\t\t\treturn fmt.Errorf(\"cluster ID cannot be blank\")\n\t\t}\n\n\t\taccount := username\n\t\tvar err error\n\t\tif len(username) == 0 {\n\t\t\taccount, err = findActiveAccount()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tclusterName := account + \"-\" + clusterID\n\n\t\tcloud, err := listCloud()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc, ok := cloud.Clusters[clusterName]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"cluster %s does not exist\", clusterName)\n\t\t}\n\n\t\tfmt.Printf(\"Destroying cluster %s with %d nodes\\n\", clusterName, len(c.VMs))\n\t\tif err := destroyCluster(c); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"OK\")\n\t\treturn nil\n\t},\n}\n\nvar listCmd = &cobra.Command{\n\tUse: \"list\",\n\tShort: \"retrieve the list of clusters\",\n\tLong: ``,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tcloud, err := listCloud()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, c := range cloud.Clusters {\n\t\t\tc.PrintDetails()\n\t\t}\n\n\t\treturn syncAll(cloud)\n\t},\n}\n\nvar statusCmd = &cobra.Command{\n\tUse: \"status\",\n\tShort: \"retrieve your prod status\",\n\tLong: ``,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\taccount, err := findActiveAccount()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Printf(\"Account: %s\\n\", account)\n\n\t\tcloud, err := listCloud()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, c := range cloud.Clusters {\n\t\t\tfmt.Printf(\"%s\\n\", c)\n\t\t}\n\n\t\treturn nil\n\t},\n}\n\nvar syncCmd = &cobra.Command{\n\tUse: \"sync\",\n\tShort: \"sync ssh keys\/config and hosts files\",\n\tLong: ``,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tcloud, err := listCloud()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn syncAll(cloud)\n\t},\n}\n\nfunc syncAll(cloud *Cloud) error {\n\tfmt.Println(\"Syncing...\")\n\tif err := initHostDir(); err != nil {\n\t\treturn err\n\t}\n\tif err := syncHosts(cloud); err != nil {\n\t\treturn err\n\t}\n\tif err := cleanSSH(); err != nil {\n\t\treturn err\n\t}\n\tif err := configSSH(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tcobra.EnableCommandSorting = false\n\n\trootCmd.AddCommand(createCmd, destroyCmd, listCmd, statusCmd, syncCmd)\n\n\tcreateCmd.Flags().DurationVarP(&lifetime, \"lifetime\", \"l\", 24*time.Hour, \"Lifetime of the cluster\")\n\tcreateCmd.Flags().IntVarP(&numNodes, \"nodes\", \"n\", 3, \"Number of nodes\")\n\tcreateCmd.Flags().StringVarP(&username, \"username\", \"u\", \"\", \"Username to run under, detect if blank\")\n\n\tdestroyCmd.Flags().StringVarP(&username, \"username\", \"u\", \"\", \"Username to run under, detect if blank\")\n\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>s\/roachperf\/roachprod\/g<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar rootCmd = &cobra.Command{\n\tUse: \"roachprod [command] (flags)\",\n\tShort: \"roachprod tool for manipulating test clusters\",\n\tLong: `\nroachprod is a tool for manipulating test clusters, allowing easy creating,\ndestroying and wiping of clusters along with running load generators.\n`,\n}\n\nvar (\n\tlifetime time.Duration\n\tnumNodes int\n\tusername string\n)\n\nvar createCmd = &cobra.Command{\n\tUse: \"create <cluster id>\",\n\tShort: \"create a cluster\",\n\tLong: ``,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tif len(args) != 1 {\n\t\t\treturn fmt.Errorf(\"wrong number of arguments\")\n\t\t}\n\t\tclusterID := args[0]\n\n\t\tif len(clusterID) == 0 {\n\t\t\treturn fmt.Errorf(\"cluster ID cannot be blank\")\n\t\t}\n\t\tif numNodes <= 0 || numNodes >= 1000 {\n\t\t\t\/\/ Upper limit is just for safety.\n\t\t\treturn fmt.Errorf(\"number of nodes must be in [1..999]\")\n\t\t}\n\n\t\taccount := username\n\t\tvar err error\n\t\tif len(username) == 0 {\n\t\t\taccount, err = findActiveAccount()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tclusterName := account + \"-\" + clusterID\n\t\tfmt.Printf(\"Creating cluster %s with %d nodes\\n\", clusterName, numNodes)\n\n\t\tcloud, err := listCloud()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, ok := cloud.Clusters[clusterName]; ok {\n\t\t\treturn fmt.Errorf(\"cluster %s already exists\", clusterName)\n\t\t}\n\n\t\tif err := createCluster(clusterName, numNodes, lifetime); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"OK\")\n\n\t\tcloud, err = listCloud()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc, ok := cloud.Clusters[clusterName]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"could not find %s in list of cluster\", clusterName)\n\t\t}\n\t\tc.PrintDetails()\n\n\t\treturn syncAll(cloud)\n\t},\n}\n\nvar destroyCmd = &cobra.Command{\n\tUse: \"destroy <cluster ID>\",\n\tShort: \"destroy a cluster\",\n\tLong: ``,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tif len(args) != 1 {\n\t\t\treturn fmt.Errorf(\"wrong number of arguments\")\n\t\t}\n\t\tclusterID := args[0]\n\n\t\tif len(clusterID) == 0 {\n\t\t\treturn fmt.Errorf(\"cluster ID cannot be blank\")\n\t\t}\n\n\t\taccount := username\n\t\tvar err error\n\t\tif len(username) == 0 {\n\t\t\taccount, err = findActiveAccount()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tclusterName := account + \"-\" + clusterID\n\n\t\tcloud, err := listCloud()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc, ok := cloud.Clusters[clusterName]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"cluster %s does not exist\", clusterName)\n\t\t}\n\n\t\tfmt.Printf(\"Destroying cluster %s with %d nodes\\n\", clusterName, len(c.VMs))\n\t\tif err := destroyCluster(c); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"OK\")\n\t\treturn nil\n\t},\n}\n\nvar listCmd = &cobra.Command{\n\tUse: \"list\",\n\tShort: \"retrieve the list of clusters\",\n\tLong: ``,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tcloud, err := listCloud()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, c := range cloud.Clusters {\n\t\t\tc.PrintDetails()\n\t\t}\n\n\t\treturn syncAll(cloud)\n\t},\n}\n\nvar statusCmd = &cobra.Command{\n\tUse: \"status\",\n\tShort: \"retrieve your prod status\",\n\tLong: ``,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\taccount, err := findActiveAccount()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Printf(\"Account: %s\\n\", account)\n\n\t\tcloud, err := listCloud()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, c := range cloud.Clusters {\n\t\t\tfmt.Printf(\"%s\\n\", c)\n\t\t}\n\n\t\treturn nil\n\t},\n}\n\nvar syncCmd = &cobra.Command{\n\tUse: \"sync\",\n\tShort: \"sync ssh keys\/config and hosts files\",\n\tLong: ``,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tcloud, err := listCloud()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn syncAll(cloud)\n\t},\n}\n\nfunc syncAll(cloud *Cloud) error {\n\tfmt.Println(\"Syncing...\")\n\tif err := initHostDir(); err != nil {\n\t\treturn err\n\t}\n\tif err := syncHosts(cloud); err != nil {\n\t\treturn err\n\t}\n\tif err := cleanSSH(); err != nil {\n\t\treturn err\n\t}\n\tif err := configSSH(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tcobra.EnableCommandSorting = false\n\n\trootCmd.AddCommand(createCmd, destroyCmd, listCmd, statusCmd, syncCmd)\n\n\tcreateCmd.Flags().DurationVarP(&lifetime, \"lifetime\", \"l\", 24*time.Hour, \"Lifetime of the cluster\")\n\tcreateCmd.Flags().IntVarP(&numNodes, \"nodes\", \"n\", 3, \"Number of nodes\")\n\tcreateCmd.Flags().StringVarP(&username, \"username\", \"u\", \"\", \"Username to run under, detect if blank\")\n\n\tdestroyCmd.Flags().StringVarP(&username, \"username\", \"u\", \"\", \"Username to run under, detect if blank\")\n\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCommand csvsplit splits a .csv into multiple, smaller files.\n\nResulting files will be saved as 1.csv, 2.csv, etc. in the currect directory, unless the -output flag is used.\n\nInstall\n\nRequires Go to be installed first, https:\/\/golang.org\/doc\/install.\n\n\t$ go get github.com\/JeffPaine\/csvsplit\n\nFlags\n\nBasic usage: csvsplit -records <number of records> <file>\n\n\t-records\nNumber of records per file\n\n\t-output\nOutput filename \/ path (optional)\n\n\t-headers\nNumber of header lines in the input file to add to each ouput file (optional, default=0)\n\nExamples\n\nSplit file.csv into files with 300 records a piece.\n\t$ csvplit -records 300 file.csv\n\nAccept csv data from stdin.\n\t$ cat file.csv | csvsplit -records 20\n\nSplit file.csv into files with 40 records a piece and two header lines (preserved in all files).\n\t$ csvplit -records 40 -headers 2 file.csv\n\nYou can use the -output flag to customize the resulting filenames.\nThe below will generate custom_filename-001.csv, custom_filename-002.csv, etc..\n\t$ csvsplit -records 20 -output custom_filename- file.csv\n\nSplit file.csv into files with 37 records a piece into the subfolder 'stuff'.\n\t$ csvplit -records 37 -output stuff\/ file.csv\n*\/\npackage main\n\nimport (\n\t\"encoding\/csv\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\nvar (\n\tflagRecords = flag.Int(\"records\", 0, \"The number of records per output file\")\n\tflagOutput = flag.String(\"output\", \"\", \"Filename \/ path of the output file (leave blank for current directory)\")\n\tflagHeaders = flag.Int(\"headers\", 0, \"Number of header lines in the input file to preserve in each output file\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\t\/\/ Sanity check command line flags.\n\tflag.Usage = func() {\n\t\tfmt.Fprintln(os.Stderr, \"usage: csvsplit [options] -records <number of records> <file>\")\n\t\tflag.PrintDefaults()\n\t\tos.Exit(1)\n\t}\n\tif *flagRecords < 1 {\n\t\tfmt.Fprintln(os.Stderr, \"-records must be > 1\")\n\t\tflag.Usage()\n\t}\n\tif *flagHeaders < 0 {\n\t\tfmt.Fprintln(os.Stderr, \"-headers must be > 0\")\n\t\tflag.Usage()\n\t}\n\tif *flagHeaders >= *flagRecords {\n\t\tfmt.Fprintln(os.Stderr, \"-headers must be >= -records\")\n\t\tflag.Usage()\n\t}\n\n\t\/\/ Get input from a given file or stdin\n\tvar r *csv.Reader\n\tif len(flag.Args()) == 1 {\n\t\tf, err := os.Open(flag.Args()[0])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdefer f.Close()\n\t\tr = csv.NewReader(f)\n\t} else {\n\t\tr = csv.NewReader(os.Stdin)\n\t}\n\n\t\/\/ Read the input .csv file line by line. Save to a new file after reaching\n\t\/\/ the amount of records prescribed by the -records flag.\n\tvar recs [][]string\n\tcount := 1\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tsave(&recs, count)\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\trecs = append(recs, record)\n\t\tif len(recs) == *flagRecords {\n\t\t\tsave(&recs, count)\n\t\t\t\/\/ Reset records to include just the header lines (if any)\n\t\t\trecs = recs[:*flagHeaders]\n\t\t\tcount++\n\t\t}\n\t}\n}\n\n\/\/ save() saves the given *[][]string of csv data to a .csv file. Files are named\n\/\/ sequentially in the form of 1.csv, 2.csv, etc.\nfunc save(recs *[][]string, c int) {\n\tname := fmt.Sprintf(\"%v%d%v\", *flagOutput, c, \".csv\")\n\n\t\/\/ Make sure we don't overwrite existing files\n\tif _, err := os.Stat(name); err == nil {\n\t\tlog.Fatal(\"file exists: \", name)\n\t}\n\n\t\/\/ If a directory is specified, make sure that directory exists\n\tif filepath.Dir(*flagOutput) != \".\" {\n\t\t_, err := os.Stat(filepath.Dir(*flagOutput))\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"no such directory:\", *flagOutput)\n\t\t}\n\t}\n\n\tf, err := os.Create(name)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\n\tw := csv.NewWriter(f)\n\tw.WriteAll(*recs)\n}\n<commit_msg>More common flag variable naming<commit_after>\/*\nCommand csvsplit splits a .csv into multiple, smaller files.\n\nResulting files will be saved as 1.csv, 2.csv, etc. in the currect directory, unless the -output flag is used.\n\nInstall\n\nRequires Go to be installed first, https:\/\/golang.org\/doc\/install.\n\n\t$ go get github.com\/JeffPaine\/csvsplit\n\nFlags\n\nBasic usage: csvsplit -records <number of records> <file>\n\n\t-records\nNumber of records per file\n\n\t-output\nOutput filename \/ path (optional)\n\n\t-headers\nNumber of header lines in the input file to add to each ouput file (optional, default=0)\n\nExamples\n\nSplit file.csv into files with 300 records a piece.\n\t$ csvplit -records 300 file.csv\n\nAccept csv data from stdin.\n\t$ cat file.csv | csvsplit -records 20\n\nSplit file.csv into files with 40 records a piece and two header lines (preserved in all files).\n\t$ csvplit -records 40 -headers 2 file.csv\n\nYou can use the -output flag to customize the resulting filenames.\nThe below will generate custom_filename-001.csv, custom_filename-002.csv, etc..\n\t$ csvsplit -records 20 -output custom_filename- file.csv\n\nSplit file.csv into files with 37 records a piece into the subfolder 'stuff'.\n\t$ csvplit -records 37 -output stuff\/ file.csv\n*\/\npackage main\n\nimport (\n\t\"encoding\/csv\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\nvar (\n\trecords = flag.Int(\"records\", 0, \"The number of records per output file\")\n\toutput = flag.String(\"output\", \"\", \"Filename \/ path of the output file (leave blank for current directory)\")\n\theaders = flag.Int(\"headers\", 0, \"Number of header lines in the input file to preserve in each output file\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\t\/\/ Sanity check command line flags.\n\tflag.Usage = func() {\n\t\tfmt.Fprintln(os.Stderr, \"usage: csvsplit [options] -records <number of records> <file>\")\n\t\tflag.PrintDefaults()\n\t\tos.Exit(1)\n\t}\n\tif *records < 1 {\n\t\tfmt.Fprintln(os.Stderr, \"-records must be > 1\")\n\t\tflag.Usage()\n\t}\n\tif *headers < 0 {\n\t\tfmt.Fprintln(os.Stderr, \"-headers must be > 0\")\n\t\tflag.Usage()\n\t}\n\tif *headers >= *records {\n\t\tfmt.Fprintln(os.Stderr, \"-headers must be >= -records\")\n\t\tflag.Usage()\n\t}\n\n\t\/\/ Get input from a given file or stdin\n\tvar r *csv.Reader\n\tif len(flag.Args()) == 1 {\n\t\tf, err := os.Open(flag.Args()[0])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdefer f.Close()\n\t\tr = csv.NewReader(f)\n\t} else {\n\t\tr = csv.NewReader(os.Stdin)\n\t}\n\n\t\/\/ Read the input .csv file line by line. Save to a new file after reaching\n\t\/\/ the amount of records prescribed by the -records flag.\n\tvar recs [][]string\n\tcount := 1\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tsave(&recs, count)\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\trecs = append(recs, record)\n\t\tif len(recs) == *records {\n\t\t\tsave(&recs, count)\n\t\t\t\/\/ Reset records to include just the header lines (if any)\n\t\t\trecs = recs[:*headers]\n\t\t\tcount++\n\t\t}\n\t}\n}\n\n\/\/ save() saves the given *[][]string of csv data to a .csv file. Files are named\n\/\/ sequentially in the form of 1.csv, 2.csv, etc.\nfunc save(recs *[][]string, c int) {\n\tname := fmt.Sprintf(\"%v%d%v\", *output, c, \".csv\")\n\n\t\/\/ Make sure we don't overwrite existing files\n\tif _, err := os.Stat(name); err == nil {\n\t\tlog.Fatal(\"file exists: \", name)\n\t}\n\n\t\/\/ If a directory is specified, make sure that directory exists\n\tif filepath.Dir(*output) != \".\" {\n\t\t_, err := os.Stat(filepath.Dir(*output))\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"no such directory:\", *output)\n\t\t}\n\t}\n\n\tf, err := os.Create(name)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\n\tw := csv.NewWriter(f)\n\tw.WriteAll(*recs)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"database\/sql\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Document struct {\n\tPath string\n\tWords int\n\tPrev int\n\tYesterday int\n}\ntype DocumentMap map[string]Document\n\nfunc (document *Document) Today() int {\n\n\treturn document.Words - document.Yesterday\n}\n\nvar (\n\tdocumentPath = []string{\".\"}\n\tdatabasePath = \".\/wc.db\"\n\theaderFormat = \"Total: #{total} Today: #{today}#{goal}\"\n\tgoalFormat = \" Goal: #{target}(#{remaining})\"\n\titemFormat = \"#{path}: #{total} (#{today})\"\n\tannotationPattern = `#.*$`\n\tgoal = 0\n\textensionBlacklist = []string{}\n\textensionWhitelist = []string{}\n)\n\nfunc init() {\n\tflag.StringVar(&databasePath, \"database\", databasePath, \"Path to database\")\n\tflag.StringVar(&databasePath, \"d\", databasePath, \"Path to database\")\n\n\tflag.IntVar(&goal, \"goal\", goal, \"TODO\")\n\tflag.IntVar(&goal, \"g\", goal, \"TODO\")\n\n\tflag.StringVar(&annotationPattern, \"annotation-pattern\", annotationPattern, \"TODO\")\n\tflag.StringVar(&annotationPattern, \"a\", annotationPattern, \"TODO\")\n\n\tflag.StringVar(&headerFormat, \"format-header\", headerFormat, \"TODO\")\n\tflag.StringVar(&itemFormat, \"format-item\", itemFormat, \"TODO\")\n\n}\n\nfunc countAll(documents string, base string, db *sql.DB, annotationRegexp *regexp.Regexp, files DocumentMap) error {\n\treturn filepath.Walk(documents, func(path string, info os.FileInfo, _ error) error {\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tcountFile(path, base, db, annotationRegexp, files)\n\t\treturn nil\n\t})\n}\n\nfunc countFile(path string, base string, db *sql.DB, annotationRegexp *regexp.Regexp, files DocumentMap) (err error) {\n\twords := countWords(path, annotationRegexp)\n\tprev, err := getPreviousWordCount(db, path)\n\tif err != nil {\n\t\treturn\n\t}\n\tyesterday, err := getPreviousDayWordCount(db, path)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tabs_path, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn\n\t}\n\n\trel_path, err := filepath.Rel(base, abs_path)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif words != prev {\n\t\taddWordCount(db, rel_path, words)\n\t}\n\tfiles[rel_path] = Document{Path: rel_path, Words: words, Prev: prev, Yesterday: yesterday}\n\treturn\n}\n\nfunc countWords(path string, annotationRegexp *regexp.Regexp) int {\n\tfile, err := os.Open(path)\n\tdefer file.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tscanner := bufio.NewScanner(file)\n\tscanner.Split(bufio.ScanLines)\n\tcount := 0\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tline = annotationRegexp.ReplaceAllString(line, \"\")\n\t\tcount += len(strings.Fields(line))\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Println(\"error reading \", path, \":\", err)\n\t}\n\treturn count\n}\n\nfunc main() {\n\tflag.Parse()\n\targs := flag.Args()\n\tif len(args) > 0 {\n\t\tdocumentPath = args\n\t}\n\n\tannotationRegexp, err := regexp.Compile(annotationPattern)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad annotation pattern\")\n\t}\n\n\tbasePath, _ := filepath.Abs(filepath.Dir(databasePath))\n\n\tdb, err := openDb(databasePath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer db.Close()\n\n\terr = updateSchema(db)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar files = make(DocumentMap)\n\tfor _, path := range documentPath {\n\t\tfileMode, err := os.Stat(path)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t} else if fileMode.IsDir() {\n\t\t\tcountAll(path, basePath, db, annotationRegexp, files)\n\t\t} else {\n\t\t\tcountFile(path, basePath, db, annotationRegexp, files)\n\t\t}\n\t}\n\n\tif headerFormat != \"\" {\n\t\ttoday := 0\n\t\ttotal := 0\n\t\tfor _, document := range files {\n\t\t\ttoday += document.Today()\n\t\t\ttotal += document.Words\n\t\t}\n\t\theaderOutput := headerFormat\n\t\theaderOutput = strings.Replace(headerOutput, \"#{total}\", strconv.Itoa(total), -1)\n\t\theaderOutput = strings.Replace(headerOutput, \"#{today}\", strconv.Itoa(today), -1)\n\n\t\tif goal > 0 {\n\t\t\tgoalOutput := goalFormat\n\t\t\tgoalOutput = strings.Replace(goalOutput, \"#{target}\", strconv.Itoa(goal), -1)\n\t\t\tgoalOutput = strings.Replace(goalOutput, \"#{remaining}\", strconv.Itoa(goal-today), -1)\n\t\t\theaderOutput = strings.Replace(headerOutput, \"#{goal}\", goalOutput, -1)\n\t\t} else {\n\t\t\theaderOutput = strings.Replace(headerOutput, \"#{goal}\", \"\", -1)\n\t\t}\n\t\tfmt.Println(headerOutput)\n\t}\n\n\tif itemFormat != \"\" {\n\t\tfor _, document := range files {\n\t\t\titemOutput := itemFormat\n\t\t\titemOutput = strings.Replace(itemOutput, \"#{path}\", document.Path, -1)\n\t\t\titemOutput = strings.Replace(itemOutput, \"#{total}\", strconv.Itoa(document.Words), -1)\n\t\t\titemOutput = strings.Replace(itemOutput, \"#{prev}\", strconv.Itoa(document.Prev), -1)\n\t\t\titemOutput = strings.Replace(itemOutput, \"#{today}\", strconv.Itoa(document.Today()), -1)\n\t\t\tfmt.Println(itemOutput)\n\t\t}\n\t}\n}\n<commit_msg>Improve usage documentation<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"database\/sql\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Document struct {\n\tPath string\n\tWords int\n\tPrev int\n\tYesterday int\n}\ntype DocumentMap map[string]Document\n\nfunc (document *Document) Today() int {\n\n\treturn document.Words - document.Yesterday\n}\n\nvar (\n\tdocumentPath = []string{\".\"}\n\tdatabasePath = \".\/wc.db\"\n\theaderFormat = \"Total: #{total} Today: #{today}#{goal}\"\n\tgoalFormat = \" Goal: #{target}(#{remaining})\"\n\titemFormat = \"#{path}: #{total} (#{today})\"\n\tannotationPattern = `#.*$`\n\tgoal = 0\n\textensionBlacklist = []string{}\n\textensionWhitelist = []string{}\n)\n\nfunc init() {\n\tflag.Usage = func() {\n\t\texecutable := filepath.Base(os.Args[0])\n\t\tfmt.Printf(\"Usage: %s [options] [path to documents...]:\\n\\n\", executable)\n\t\tflag.PrintDefaults()\n\t}\n\n\tflag.StringVar(&databasePath, \"database\", databasePath, \"Path to database\")\n\tflag.StringVar(&databasePath, \"d\", databasePath, \"alias for --database\")\n\n\tflag.IntVar(&goal, \"goal\", goal, \"Number of words for daily goal\")\n\tflag.IntVar(&goal, \"g\", goal, \"alias for --goal\")\n\n\tflag.StringVar(&annotationPattern, \"annotation-pattern\", annotationPattern, \"Regexp for lines that don't count towards the total\")\n\tflag.StringVar(&annotationPattern, \"a\", annotationPattern, \"alias for --anotation-pattern\")\n\n\tflag.StringVar(&headerFormat, \"format-header\", headerFormat, \"Format for header line\")\n\tflag.StringVar(&itemFormat, \"format-item\", itemFormat, \"Format for item line\")\n}\n\nfunc countAll(documents string, base string, db *sql.DB, annotationRegexp *regexp.Regexp, files DocumentMap) error {\n\treturn filepath.Walk(documents, func(path string, info os.FileInfo, _ error) error {\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tcountFile(path, base, db, annotationRegexp, files)\n\t\treturn nil\n\t})\n}\n\nfunc countFile(path string, base string, db *sql.DB, annotationRegexp *regexp.Regexp, files DocumentMap) (err error) {\n\twords := countWords(path, annotationRegexp)\n\tprev, err := getPreviousWordCount(db, path)\n\tif err != nil {\n\t\treturn\n\t}\n\tyesterday, err := getPreviousDayWordCount(db, path)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tabs_path, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn\n\t}\n\n\trel_path, err := filepath.Rel(base, abs_path)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif words != prev {\n\t\taddWordCount(db, rel_path, words)\n\t}\n\tfiles[rel_path] = Document{Path: rel_path, Words: words, Prev: prev, Yesterday: yesterday}\n\treturn\n}\n\nfunc countWords(path string, annotationRegexp *regexp.Regexp) int {\n\tfile, err := os.Open(path)\n\tdefer file.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tscanner := bufio.NewScanner(file)\n\tscanner.Split(bufio.ScanLines)\n\tcount := 0\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tline = annotationRegexp.ReplaceAllString(line, \"\")\n\t\tcount += len(strings.Fields(line))\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Println(\"error reading \", path, \":\", err)\n\t}\n\treturn count\n}\n\nfunc main() {\n\tflag.Parse()\n\targs := flag.Args()\n\tif len(args) > 0 {\n\t\tdocumentPath = args\n\t}\n\n\tannotationRegexp, err := regexp.Compile(annotationPattern)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad annotation pattern\")\n\t}\n\n\tbasePath, _ := filepath.Abs(filepath.Dir(databasePath))\n\n\tdb, err := openDb(databasePath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer db.Close()\n\n\terr = updateSchema(db)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar files = make(DocumentMap)\n\tfor _, path := range documentPath {\n\t\tfileMode, err := os.Stat(path)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t} else if fileMode.IsDir() {\n\t\t\tcountAll(path, basePath, db, annotationRegexp, files)\n\t\t} else {\n\t\t\tcountFile(path, basePath, db, annotationRegexp, files)\n\t\t}\n\t}\n\n\tif headerFormat != \"\" {\n\t\ttoday := 0\n\t\ttotal := 0\n\t\tfor _, document := range files {\n\t\t\ttoday += document.Today()\n\t\t\ttotal += document.Words\n\t\t}\n\t\theaderOutput := headerFormat\n\t\theaderOutput = strings.Replace(headerOutput, \"#{total}\", strconv.Itoa(total), -1)\n\t\theaderOutput = strings.Replace(headerOutput, \"#{today}\", strconv.Itoa(today), -1)\n\n\t\tif goal > 0 {\n\t\t\tgoalOutput := goalFormat\n\t\t\tgoalOutput = strings.Replace(goalOutput, \"#{target}\", strconv.Itoa(goal), -1)\n\t\t\tgoalOutput = strings.Replace(goalOutput, \"#{remaining}\", strconv.Itoa(goal-today), -1)\n\t\t\theaderOutput = strings.Replace(headerOutput, \"#{goal}\", goalOutput, -1)\n\t\t} else {\n\t\t\theaderOutput = strings.Replace(headerOutput, \"#{goal}\", \"\", -1)\n\t\t}\n\t\tfmt.Println(headerOutput)\n\t}\n\n\tif itemFormat != \"\" {\n\t\tfor _, document := range files {\n\t\t\titemOutput := itemFormat\n\t\t\titemOutput = strings.Replace(itemOutput, \"#{path}\", document.Path, -1)\n\t\t\titemOutput = strings.Replace(itemOutput, \"#{total}\", strconv.Itoa(document.Words), -1)\n\t\t\titemOutput = strings.Replace(itemOutput, \"#{prev}\", strconv.Itoa(document.Prev), -1)\n\t\t\titemOutput = strings.Replace(itemOutput, \"#{today}\", strconv.Itoa(document.Today()), -1)\n\t\t\tfmt.Println(itemOutput)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/google\/go-github\/github\"\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/net\/context\"\n\t\"os\"\n\t\"strings\"\n)\n\ntype source struct {\n\tmyToken *oauth2.Token\n}\n\nfunc (t source) Token() (*oauth2.Token, error) {\n\treturn t.myToken, nil\n}\n\nfunc main() {\n\tif len(os.Args) != 2 {\n\t\tfmt.Printf(\"missing github access token as arg 1\\n\")\n\t\treturn\n\t}\n\n\tvar NoContext context.Context = context.TODO()\n\tsrc := source{\n\t\tmyToken: &oauth2.Token{AccessToken: os.Args[1]},\n\t}\n\n\tclient := oauth2.NewClient(NoContext, src)\n\tghClient := github.NewClient(client)\n\towner := \"docker-library\"\n\trepository := \"official-images\"\n\n\terr := labelPullsInRepo(ghClient, owner, repository, \"open\", \"library\/\")\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\\n\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc labelPullsInRepo(ghClient *github.Client, owner string, repository string, state string, filePrefix string) error {\n\toptions := &github.PullRequestListOptions{\n\t\tState: state,\n\t\tListOptions: github.ListOptions{\n\t\t\tPerPage: 100,\n\t\t},\n\t}\n\n\tfor {\n\t\tpulls, resp, err := ghClient.PullRequests.List(owner, repository, options)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, pr := range pulls {\n\t\t\tcommitFiles, _, err := ghClient.PullRequests.ListFiles(owner, repository, *pr.Number, nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"pr.ListFiles(%d) error: %v\\n\", *pr.Number, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcurrentLabels := []github.Label{}\n\t\t\topt := &github.ListOptions{\n\t\t\t\tPerPage: 100,\n\t\t\t}\n\t\t\tfor {\n\t\t\t\tlbls, pages, err := ghClient.Issues.ListLabelsByIssue(owner, repository, *pr.Number, opt)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"pr.ListLabels(%d) error: %v\\n\", *pr.Number, err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tcurrentLabels = append(currentLabels, lbls...)\n\n\t\t\t\tif pages.NextPage == 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\topt.Page = pages.NextPage\n\t\t\t}\n\n\t\t\tlabels := []string{}\n\t\t\tfor _, commitFile := range commitFiles {\n\t\t\t\tif strings.HasPrefix(*commitFile.Filename, filePrefix) {\n\t\t\t\t\tvalid := true\n\t\t\t\t\ttoAdd := *commitFile.Filename\n\t\t\t\t\tfor _, lbl := range currentLabels {\n\t\t\t\t\t\tif lbl.String() == toAdd {\n\t\t\t\t\t\t\tvalid = false\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif valid {\n\t\t\t\t\t\tlabels = append(labels, toAdd)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\"%d:\\n\\tnew:%v\\n\\tcurrent:%v\\n\", *pr.Number, labels, currentLabels)\n\n\t\t\t\/\/ add labels\n\t\t\tif len(labels) > 0 {\n\t\t\t\tlabelObjs, _, err := ghClient.Issues.AddLabelsToIssue(owner, repository, *pr.Number, labels)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"pr.AddLabels(%d, %v) error: %v\\n\", *pr.Number, labels, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"\\tresult:%v\\n\", labelObjs)\n\t\t\t}\n\t\t}\n\n\t\tif resp.NextPage == 0 {\n\t\t\tbreak\n\t\t}\n\t\toptions.ListOptions.Page = resp.NextPage\n\t}\n\n\treturn nil\n}\n<commit_msg>Apply gofmt \/ goimports<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/google\/go-github\/github\"\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/oauth2\"\n)\n\ntype source struct {\n\tmyToken *oauth2.Token\n}\n\nfunc (t source) Token() (*oauth2.Token, error) {\n\treturn t.myToken, nil\n}\n\nfunc main() {\n\tif len(os.Args) != 2 {\n\t\tfmt.Printf(\"missing github access token as arg 1\\n\")\n\t\treturn\n\t}\n\n\tvar NoContext context.Context = context.TODO()\n\tsrc := source{\n\t\tmyToken: &oauth2.Token{AccessToken: os.Args[1]},\n\t}\n\n\tclient := oauth2.NewClient(NoContext, src)\n\tghClient := github.NewClient(client)\n\towner := \"docker-library\"\n\trepository := \"official-images\"\n\n\terr := labelPullsInRepo(ghClient, owner, repository, \"open\", \"library\/\")\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\\n\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc labelPullsInRepo(ghClient *github.Client, owner string, repository string, state string, filePrefix string) error {\n\toptions := &github.PullRequestListOptions{\n\t\tState: state,\n\t\tListOptions: github.ListOptions{\n\t\t\tPerPage: 100,\n\t\t},\n\t}\n\n\tfor {\n\t\tpulls, resp, err := ghClient.PullRequests.List(owner, repository, options)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, pr := range pulls {\n\t\t\tcommitFiles, _, err := ghClient.PullRequests.ListFiles(owner, repository, *pr.Number, nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"pr.ListFiles(%d) error: %v\\n\", *pr.Number, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcurrentLabels := []github.Label{}\n\t\t\topt := &github.ListOptions{\n\t\t\t\tPerPage: 100,\n\t\t\t}\n\t\t\tfor {\n\t\t\t\tlbls, pages, err := ghClient.Issues.ListLabelsByIssue(owner, repository, *pr.Number, opt)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"pr.ListLabels(%d) error: %v\\n\", *pr.Number, err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tcurrentLabels = append(currentLabels, lbls...)\n\n\t\t\t\tif pages.NextPage == 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\topt.Page = pages.NextPage\n\t\t\t}\n\n\t\t\tlabels := []string{}\n\t\t\tfor _, commitFile := range commitFiles {\n\t\t\t\tif strings.HasPrefix(*commitFile.Filename, filePrefix) {\n\t\t\t\t\tvalid := true\n\t\t\t\t\ttoAdd := *commitFile.Filename\n\t\t\t\t\tfor _, lbl := range currentLabels {\n\t\t\t\t\t\tif lbl.String() == toAdd {\n\t\t\t\t\t\t\tvalid = false\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif valid {\n\t\t\t\t\t\tlabels = append(labels, toAdd)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\"%d:\\n\\tnew:%v\\n\\tcurrent:%v\\n\", *pr.Number, labels, currentLabels)\n\n\t\t\t\/\/ add labels\n\t\t\tif len(labels) > 0 {\n\t\t\t\tlabelObjs, _, err := ghClient.Issues.AddLabelsToIssue(owner, repository, *pr.Number, labels)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"pr.AddLabels(%d, %v) error: %v\\n\", *pr.Number, labels, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"\\tresult:%v\\n\", labelObjs)\n\t\t\t}\n\t\t}\n\n\t\tif resp.NextPage == 0 {\n\t\t\tbreak\n\t\t}\n\t\toptions.ListOptions.Page = resp.NextPage\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 bs authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/tsuru\/bs\/bslog\"\n\t\"github.com\/tsuru\/bs\/config\"\n\t\"github.com\/tsuru\/bs\/log\"\n\t\"github.com\/tsuru\/bs\/metric\"\n\t\"github.com\/tsuru\/bs\/status\"\n)\n\nconst (\n\tversion = \"v1.3\"\n)\n\nvar printVersion bool\n\nfunc init() {\n\tflag.BoolVar(&printVersion, \"version\", false, \"Print version and exit\")\n}\n\nfunc startSignalHandler(callback func(os.Signal), signals ...os.Signal) {\n\tsigChan := make(chan os.Signal, 4)\n\tgo func() {\n\t\tif signal, ok := <-sigChan; ok {\n\t\t\tcallback(signal)\n\t\t}\n\t}()\n\tsignal.Notify(sigChan, signals...)\n}\n\nfunc onSignalDebugGoroutines(signal os.Signal) {\n\tvar buf []byte\n\tvar written int\n\tcurrLen := 1024\n\tfor written == len(buf) {\n\t\tbuf = make([]byte, currLen)\n\t\twritten = runtime.Stack(buf, true)\n\t\tcurrLen *= 2\n\t}\n\tfmt.Print(string(buf[:written]))\n\tstartSignalHandler(onSignalDebugGoroutines, syscall.SIGUSR1)\n}\n\nfunc onSignalDebugProfile(signal os.Signal) {\n\tprofFileName := \"memprofile.out\"\n\tfile, err := os.OpenFile(profFileName, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0660)\n\tif err != nil {\n\t\tbslog.Warnf(\"Error trying to open profile file %q: %s\", profFileName, err)\n\t\treturn\n\t}\n\terr = pprof.WriteHeapProfile(file)\n\tif err != nil {\n\t\tbslog.Warnf(\"Error trying to write mem profile: %s\", err)\n\t}\n\tbslog.Warnf(\"Wrote mem profile to %s\", profFileName)\n\tfile.Close()\n\tprofFileName = \"cpuprofile.out\"\n\tbslog.Warnf(\"Starting cpu profile, writing output to %s\", profFileName)\n\tdefer bslog.Warnf(\"Finished cpu profile, see %s\", profFileName)\n\tfile, err = os.OpenFile(profFileName, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0660)\n\tif err != nil {\n\t\tbslog.Warnf(\"Error trying to open profile file %q: %s\", profFileName, err)\n\t}\n\tdefer file.Close()\n\terr = pprof.StartCPUProfile(file)\n\tif err != nil {\n\t\tbslog.Warnf(\"Error trying to start cpu profile: %s\", err)\n\t}\n\tdefer pprof.StopCPUProfile()\n\ttime.Sleep(30 * time.Second)\n\tstartSignalHandler(onSignalDebugProfile, syscall.SIGUSR2)\n}\n\nfunc main() {\n\tstartSignalHandler(onSignalDebugGoroutines, syscall.SIGUSR1)\n\tstartSignalHandler(onSignalDebugProfile, syscall.SIGUSR2)\n\tflag.Parse()\n\tif printVersion {\n\t\tfmt.Printf(\"bs version %s\\n\", version)\n\t\treturn\n\t}\n\tlf := log.LogForwarder{\n\t\tBindAddress: config.Config.SyslogListenAddress,\n\t\tDockerEndpoint: config.Config.DockerEndpoint,\n\t\tEnabledBackends: config.Config.LogBackends,\n\t}\n\terr := lf.Start()\n\tif err != nil {\n\t\tbslog.Fatalf(\"Unable to initialize log forwarder: %s\\n\", err)\n\t}\n\tmRunner := metric.NewRunner(config.Config.DockerEndpoint, config.Config.MetricsInterval)\n\terr = mRunner.Start()\n\tif err != nil {\n\t\tbslog.Warnf(\"Unable to initialize metrics runner: %s\\n\", err)\n\t}\n\treporter, err := status.NewReporter(&status.ReporterConfig{\n\t\tTsuruEndpoint: config.Config.TsuruEndpoint,\n\t\tTsuruToken: config.Config.TsuruToken,\n\t\tDockerEndpoint: config.Config.DockerEndpoint,\n\t\tInterval: config.Config.StatusInterval,\n\t})\n\tif err != nil {\n\t\tbslog.Fatalf(\"Unable to initialize status reporter: %s\\n\", err)\n\t}\n\tstartSignalHandler(func(signal os.Signal) {\n\t\treporter.Stop()\n\t\tmRunner.Stop()\n\t}, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)\n\treporter.Wait()\n}\n<commit_msg>bump version<commit_after>\/\/ Copyright 2016 bs authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/tsuru\/bs\/bslog\"\n\t\"github.com\/tsuru\/bs\/config\"\n\t\"github.com\/tsuru\/bs\/log\"\n\t\"github.com\/tsuru\/bs\/metric\"\n\t\"github.com\/tsuru\/bs\/status\"\n)\n\nconst (\n\tversion = \"v1.4\"\n)\n\nvar printVersion bool\n\nfunc init() {\n\tflag.BoolVar(&printVersion, \"version\", false, \"Print version and exit\")\n}\n\nfunc startSignalHandler(callback func(os.Signal), signals ...os.Signal) {\n\tsigChan := make(chan os.Signal, 4)\n\tgo func() {\n\t\tif signal, ok := <-sigChan; ok {\n\t\t\tcallback(signal)\n\t\t}\n\t}()\n\tsignal.Notify(sigChan, signals...)\n}\n\nfunc onSignalDebugGoroutines(signal os.Signal) {\n\tvar buf []byte\n\tvar written int\n\tcurrLen := 1024\n\tfor written == len(buf) {\n\t\tbuf = make([]byte, currLen)\n\t\twritten = runtime.Stack(buf, true)\n\t\tcurrLen *= 2\n\t}\n\tfmt.Print(string(buf[:written]))\n\tstartSignalHandler(onSignalDebugGoroutines, syscall.SIGUSR1)\n}\n\nfunc onSignalDebugProfile(signal os.Signal) {\n\tprofFileName := \"memprofile.out\"\n\tfile, err := os.OpenFile(profFileName, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0660)\n\tif err != nil {\n\t\tbslog.Warnf(\"Error trying to open profile file %q: %s\", profFileName, err)\n\t\treturn\n\t}\n\terr = pprof.WriteHeapProfile(file)\n\tif err != nil {\n\t\tbslog.Warnf(\"Error trying to write mem profile: %s\", err)\n\t}\n\tbslog.Warnf(\"Wrote mem profile to %s\", profFileName)\n\tfile.Close()\n\tprofFileName = \"cpuprofile.out\"\n\tbslog.Warnf(\"Starting cpu profile, writing output to %s\", profFileName)\n\tdefer bslog.Warnf(\"Finished cpu profile, see %s\", profFileName)\n\tfile, err = os.OpenFile(profFileName, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0660)\n\tif err != nil {\n\t\tbslog.Warnf(\"Error trying to open profile file %q: %s\", profFileName, err)\n\t}\n\tdefer file.Close()\n\terr = pprof.StartCPUProfile(file)\n\tif err != nil {\n\t\tbslog.Warnf(\"Error trying to start cpu profile: %s\", err)\n\t}\n\tdefer pprof.StopCPUProfile()\n\ttime.Sleep(30 * time.Second)\n\tstartSignalHandler(onSignalDebugProfile, syscall.SIGUSR2)\n}\n\nfunc main() {\n\tstartSignalHandler(onSignalDebugGoroutines, syscall.SIGUSR1)\n\tstartSignalHandler(onSignalDebugProfile, syscall.SIGUSR2)\n\tflag.Parse()\n\tif printVersion {\n\t\tfmt.Printf(\"bs version %s\\n\", version)\n\t\treturn\n\t}\n\tlf := log.LogForwarder{\n\t\tBindAddress: config.Config.SyslogListenAddress,\n\t\tDockerEndpoint: config.Config.DockerEndpoint,\n\t\tEnabledBackends: config.Config.LogBackends,\n\t}\n\terr := lf.Start()\n\tif err != nil {\n\t\tbslog.Fatalf(\"Unable to initialize log forwarder: %s\\n\", err)\n\t}\n\tmRunner := metric.NewRunner(config.Config.DockerEndpoint, config.Config.MetricsInterval)\n\terr = mRunner.Start()\n\tif err != nil {\n\t\tbslog.Warnf(\"Unable to initialize metrics runner: %s\\n\", err)\n\t}\n\treporter, err := status.NewReporter(&status.ReporterConfig{\n\t\tTsuruEndpoint: config.Config.TsuruEndpoint,\n\t\tTsuruToken: config.Config.TsuruToken,\n\t\tDockerEndpoint: config.Config.DockerEndpoint,\n\t\tInterval: config.Config.StatusInterval,\n\t})\n\tif err != nil {\n\t\tbslog.Fatalf(\"Unable to initialize status reporter: %s\\n\", err)\n\t}\n\tstartSignalHandler(func(signal os.Signal) {\n\t\treporter.Stop()\n\t\tmRunner.Stop()\n\t}, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)\n\treporter.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"image\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/crgimenes\/metal\/fonts\"\n\t\"github.com\/hajimehoshi\/ebiten\"\n)\n\nconst (\n\tscreenWidth = 320 \/\/ 40 columns\n\tscreenHeight = 240 \/\/ 30 rows\n\n\trows = 30\n\tcolumns = 40\n\trgbaSize = 4\n)\n\nvar (\n\tvideoTextMemory [rows * columns * 2]byte\n\tcursor int\n\timg *image.RGBA\n\tsquare *ebiten.Image\n\tfont fonts.Expert118x8\n\tcurrentColor byte = 0x0f\n)\n\nvar CGAColors = []struct {\n\tR byte\n\tG byte\n\tB byte\n}{\n\t{0, 0, 0},\n\t{0, 0, 170},\n\t{0, 170, 0},\n\t{0, 170, 170},\n\t{170, 0, 0},\n\t{170, 0, 170},\n\t{170, 85, 0},\n\t{170, 170, 170},\n\t{85, 85, 85},\n\t{85, 85, 255},\n\t{85, 255, 85},\n\t{85, 255, 255},\n\t{255, 85, 85},\n\t{255, 85, 255},\n\t{255, 255, 85},\n\t{255, 255, 255},\n}\n\nfunc mergeColorCode(b, f byte) byte {\n\treturn (f & 0xff) | (b << 4)\n}\n\nfunc drawPix(x, y int, color byte) {\n\tpos := 4*y*screenWidth + 4*x\n\timg.Pix[pos] = CGAColors[color].R\n\timg.Pix[pos+1] = CGAColors[color].G\n\timg.Pix[pos+2] = CGAColors[color].B\n\timg.Pix[pos+3] = 0xff\n}\n\nfunc getBit(n int, pos uint64) bool {\n\t\/\/ from right to left\n\tval := n & (1 << pos)\n\treturn (val > 0)\n}\n\nfunc drawChar(index, fgColor, bgColor byte, x, y int) {\n\tvar a, b uint64\n\tfor a = 0; a < 8; a++ {\n\t\tfor b = 0; b < 8; b++ {\n\t\t\tif font.Bitmap[index][b]&(0x80>>a) != 0 {\n\t\t\t\tdrawPix(int(a)+x, int(b)+y, fgColor)\n\t\t\t} else {\n\t\t\t\tdrawPix(int(a)+x, int(b)+y, bgColor)\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar cursorBlinkTimer int\nvar cursorSetBlink bool = true\n\nfunc drawCursor(index, fgColor, bgColor byte, x, y int) {\n\tif cursorSetBlink {\n\t\tif cursorBlinkTimer < 15 {\n\t\t\tdrawChar(index, fgColor, bgColor, x, y)\n\t\t} else {\n\t\t\tdrawChar(index, bgColor, fgColor, x, y)\n\t\t}\n\t\tcursorBlinkTimer++\n\t\tif cursorBlinkTimer > 30 {\n\t\t\tcursorBlinkTimer = 0\n\t\t}\n\t} else {\n\t\tdrawChar(index, bgColor, fgColor, x, y)\n\t}\n}\n\nfunc drawVideoTextMode() {\n\ti := 0\n\tfor r := 0; r < rows; r++ {\n\t\tfor c := 0; c < columns; c++ {\n\t\t\tcolor := videoTextMemory[i]\n\t\t\tf := color & 0x0f\n\t\t\tb := color & 0xf0 >> 4\n\t\t\ti++\n\t\t\tif i-1 == cursor {\n\t\t\t\tdrawCursor(videoTextMemory[i], f, b, c*8, r*8)\n\t\t\t} else {\n\t\t\t\tdrawChar(videoTextMemory[i], f, b, c*8, r*8)\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t}\n}\n\nfunc clearVideoTextMode() {\n\tcopy(videoTextMemory[:], make([]byte, len(videoTextMemory)))\n\tfor i := 0; i < len(videoTextMemory); i += 2 {\n\t\tvideoTextMemory[i] = currentColor\n\t}\n}\n\nfunc moveLineUp() {\n\tcopy(videoTextMemory[0:], videoTextMemory[columns*2:])\n\tcopy(videoTextMemory[len(videoTextMemory)-columns*2:], make([]byte, columns*2))\n\tfor i := len(videoTextMemory) - columns*2; i < len(videoTextMemory); i += 2 {\n\t\tvideoTextMemory[i] = currentColor\n\t}\n\n}\n\nfunc correctVideoCursor() {\n\tif cursor < 0 {\n\t\tcursor = 0\n\t}\n\tfor cursor >= rows*columns*2 {\n\t\tcursor -= columns * 2\n\t\tmoveLineUp()\n\t}\n}\n\nfunc putChar(c byte) {\n\tcorrectVideoCursor()\n\tvideoTextMemory[cursor] = currentColor\n\tcursor++\n\tcorrectVideoCursor()\n\tvideoTextMemory[cursor] = c\n\tcursor++\n\tcorrectVideoCursor()\n}\n\nfunc bPrint(msg string) {\n\tfor i := 0; i < len(msg); i++ {\n\t\tc := msg[i]\n\n\t\tswitch c {\n\t\tcase 13:\n\t\t\tcursor += columns * 2\n\t\t\tcontinue\n\t\tcase 10:\n\t\t\taux := cursor \/ (columns * 2)\n\t\t\taux = aux * (columns * 2)\n\t\t\tcursor = aux\n\t\t\tcontinue\n\t\t}\n\t\tputChar(msg[i])\n\t}\n}\n\nfunc bPrintln(msg string) {\n\tmsg += \"\\r\\n\"\n\tbPrint(msg)\n}\n\nvar lastKey = struct {\n\tTime uint64\n\tChar byte\n}{\n\t0,\n\t0,\n}\n\nvar uTime uint64\nvar c byte\n\nvar machine int\n\n\/\/var countaux int\nvar noKey bool\n\nfunc keyTreatment(c byte, f func(c byte)) {\n\tif noKey || lastKey.Char != c || lastKey.Time+20 < uTime {\n\t\tf(c)\n\t\tnoKey = false\n\t\tlastKey.Char = c\n\t\tlastKey.Time = uTime\n\t}\n}\n\nfunc getLine() string {\n\taux := cursor \/ (columns * 2)\n\tvar ret string\n\tfor i := aux*(columns*2) + 1; i < aux*(columns*2)+columns*2; i += 2 {\n\t\tret += string(videoTextMemory[i])\n\t}\n\n\tret = strings.TrimSpace(ret)\n\tfmt.Println(ret)\n\treturn ret\n}\n\nfunc keyboard() {\n\tfor c := 'A'; c <= 'Z'; c++ {\n\t\tif ebiten.IsKeyPressed(ebiten.Key(c) - 'A' + ebiten.KeyA) {\n\t\t\tkeyTreatment(byte(c), func(c byte) {\n\t\t\t\tputChar(c)\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t}\n\n\tif ebiten.IsKeyPressed(ebiten.KeySpace) {\n\t\tkeyTreatment(byte(' '), func(c byte) {\n\t\t\tputChar(c)\n\t\t})\n\t\treturn\n\t}\n\n\tif ebiten.IsKeyPressed(ebiten.KeyEnter) {\n\t\tkeyTreatment(0, func(c byte) {\n\t\t\tgetLine()\n\t\t\tcursor += columns * 2\n\t\t\taux := cursor \/ (columns * 2)\n\t\t\taux = aux * (columns * 2)\n\t\t\tcursor = aux\n\t\t\tcorrectVideoCursor()\n\t\t})\n\t\treturn\n\t}\n\n\tif ebiten.IsKeyPressed(ebiten.KeyBackspace) {\n\t\tkeyTreatment(0, func(c byte) {\n\t\t\tcursor -= 2\n\t\t\tcorrectVideoCursor()\n\t\t})\n\t\treturn\n\t}\n\n\tif ebiten.IsKeyPressed(ebiten.KeyUp) {\n\t\tkeyTreatment(0, func(c byte) {\n\t\t\tcursor -= columns * 2\n\t\t\tcorrectVideoCursor()\n\t\t})\n\t\treturn\n\t} else if ebiten.IsKeyPressed(ebiten.KeyDown) {\n\t\tkeyTreatment(0, func(c byte) {\n\t\t\tcursor += columns * 2\n\t\t\tcorrectVideoCursor()\n\t\t})\n\t\treturn\n\t} else if ebiten.IsKeyPressed(ebiten.KeyLeft) {\n\t\tkeyTreatment(0, func(c byte) {\n\t\t\tcursor -= 2\n\t\t\tcorrectVideoCursor()\n\t\t})\n\t\treturn\n\t} else if ebiten.IsKeyPressed(ebiten.KeyRight) {\n\t\tkeyTreatment(0, func(c byte) {\n\t\t\tcursor += 2\n\t\t\tcorrectVideoCursor()\n\t\t})\n\t\treturn\n\t}\n\n\t\/\/ When the \"left mouse button\" is pressed...\n\tif ebiten.IsMouseButtonPressed(ebiten.MouseButtonLeft) {\n\t\t\/\/ebitenutil.DebugPrint(screen, \"You're pressing the 'LEFT' mouse button.\")\n\t}\n\t\/\/ When the \"right mouse button\" is pressed...\n\tif ebiten.IsMouseButtonPressed(ebiten.MouseButtonRight) {\n\t\t\/\/ebitenutil.DebugPrint(screen, \"\\nYou're pressing the 'RIGHT' mouse button.\")\n\t}\n\t\/\/ When the \"middle mouse button\" is pressed...\n\tif ebiten.IsMouseButtonPressed(ebiten.MouseButtonMiddle) {\n\t\t\/\/ebitenutil.DebugPrint(screen, \"\\n\\nYou're pressing the 'MIDDLE' mouse button.\")\n\t}\n\n\t\/\/x, y := ebiten.CursorPosition()\n\t\/\/fmt.Printf(\"X: %d, Y: %d\\n\", x, y)\n\n\t\/\/ Display the information with \"X: xx, Y: xx\" format\n\t\/\/ebitenutil.DebugPrint(screen, fmt.Sprintf(\"X: %d, Y: %d\", x, y))\n\n\tnoKey = true\n\n}\n\nfunc update(screen *ebiten.Image) error {\n\n\tuTime++\n\t\/\/putChar(2)\n\t\/\/cursor -= 2\n\n\tif machine == 0 {\n\t\tbPrintln(\"METAL BASIC 0.01\")\n\t\tbPrintln(\"http:\/\/crg.eti.br\")\n\t\tmachine++\n\t}\n\n\t\/*\n\t\tif countaux > 10 {\n\t\t\tcountaux = 0\n\t\t\tputChar(dt)\n\t\t\tdt++\n\t\t\tcurrentColor = mergeColorCode(0x0, c)\n\t\t\tc++\n\t\t\tif c > 15 {\n\t\t\t\tc = 0\n\t\t\t}\n\t\t}\n\t\tcountaux++\n\t*\/\n\tdrawVideoTextMode()\n\tscreen.ReplacePixels(img.Pix)\n\tkeyboard()\n\treturn nil\n}\n\nfunc main() {\n\n\tfont.Load()\n\tclearVideoTextMode()\n\n\timg = image.NewRGBA(image.Rect(0, 0, screenWidth, screenHeight))\n\tif err := ebiten.Run(update, screenWidth, screenHeight, 2, \"METAL BASIC 0.01\"); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n}\n<commit_msg>simple command implemented<commit_after>package main\n\nimport (\n\t\"image\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/crgimenes\/metal\/cmd\"\n\t\"github.com\/crgimenes\/metal\/fonts\"\n\t\"github.com\/hajimehoshi\/ebiten\"\n)\n\nconst (\n\tscreenWidth = 320 \/\/ 40 columns\n\tscreenHeight = 240 \/\/ 30 rows\n\n\trows = 30\n\tcolumns = 40\n\trgbaSize = 4\n)\n\nvar (\n\tvideoTextMemory [rows * columns * 2]byte\n\tcursor int\n\timg *image.RGBA\n\tsquare *ebiten.Image\n\tfont fonts.Expert118x8\n\tcurrentColor byte = 0x0f\n)\n\nvar CGAColors = []struct {\n\tR byte\n\tG byte\n\tB byte\n}{\n\t{0, 0, 0},\n\t{0, 0, 170},\n\t{0, 170, 0},\n\t{0, 170, 170},\n\t{170, 0, 0},\n\t{170, 0, 170},\n\t{170, 85, 0},\n\t{170, 170, 170},\n\t{85, 85, 85},\n\t{85, 85, 255},\n\t{85, 255, 85},\n\t{85, 255, 255},\n\t{255, 85, 85},\n\t{255, 85, 255},\n\t{255, 255, 85},\n\t{255, 255, 255},\n}\n\nfunc mergeColorCode(b, f byte) byte {\n\treturn (f & 0xff) | (b << 4)\n}\n\nfunc drawPix(x, y int, color byte) {\n\tpos := 4*y*screenWidth + 4*x\n\timg.Pix[pos] = CGAColors[color].R\n\timg.Pix[pos+1] = CGAColors[color].G\n\timg.Pix[pos+2] = CGAColors[color].B\n\timg.Pix[pos+3] = 0xff\n}\n\nfunc getBit(n int, pos uint64) bool {\n\t\/\/ from right to left\n\tval := n & (1 << pos)\n\treturn (val > 0)\n}\n\nfunc drawChar(index, fgColor, bgColor byte, x, y int) {\n\tvar a, b uint64\n\tfor a = 0; a < 8; a++ {\n\t\tfor b = 0; b < 8; b++ {\n\t\t\tif font.Bitmap[index][b]&(0x80>>a) != 0 {\n\t\t\t\tdrawPix(int(a)+x, int(b)+y, fgColor)\n\t\t\t} else {\n\t\t\t\tdrawPix(int(a)+x, int(b)+y, bgColor)\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar cursorBlinkTimer int\nvar cursorSetBlink bool = true\n\nfunc drawCursor(index, fgColor, bgColor byte, x, y int) {\n\tif cursorSetBlink {\n\t\tif cursorBlinkTimer < 15 {\n\t\t\tdrawChar(index, fgColor, bgColor, x, y)\n\t\t} else {\n\t\t\tdrawChar(index, bgColor, fgColor, x, y)\n\t\t}\n\t\tcursorBlinkTimer++\n\t\tif cursorBlinkTimer > 30 {\n\t\t\tcursorBlinkTimer = 0\n\t\t}\n\t} else {\n\t\tdrawChar(index, bgColor, fgColor, x, y)\n\t}\n}\n\nfunc drawVideoTextMode() {\n\ti := 0\n\tfor r := 0; r < rows; r++ {\n\t\tfor c := 0; c < columns; c++ {\n\t\t\tcolor := videoTextMemory[i]\n\t\t\tf := color & 0x0f\n\t\t\tb := color & 0xf0 >> 4\n\t\t\ti++\n\t\t\tif i-1 == cursor {\n\t\t\t\tdrawCursor(videoTextMemory[i], f, b, c*8, r*8)\n\t\t\t} else {\n\t\t\t\tdrawChar(videoTextMemory[i], f, b, c*8, r*8)\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t}\n}\n\nfunc clearVideoTextMode() {\n\tcopy(videoTextMemory[:], make([]byte, len(videoTextMemory)))\n\tfor i := 0; i < len(videoTextMemory); i += 2 {\n\t\tvideoTextMemory[i] = currentColor\n\t}\n}\n\nfunc moveLineUp() {\n\tcopy(videoTextMemory[0:], videoTextMemory[columns*2:])\n\tcopy(videoTextMemory[len(videoTextMemory)-columns*2:], make([]byte, columns*2))\n\tfor i := len(videoTextMemory) - columns*2; i < len(videoTextMemory); i += 2 {\n\t\tvideoTextMemory[i] = currentColor\n\t}\n\n}\n\nfunc correctVideoCursor() {\n\tif cursor < 0 {\n\t\tcursor = 0\n\t}\n\tfor cursor >= rows*columns*2 {\n\t\tcursor -= columns * 2\n\t\tmoveLineUp()\n\t}\n}\n\nfunc putChar(c byte) {\n\tcorrectVideoCursor()\n\tvideoTextMemory[cursor] = currentColor\n\tcursor++\n\tcorrectVideoCursor()\n\tvideoTextMemory[cursor] = c\n\tcursor++\n\tcorrectVideoCursor()\n}\n\nfunc bPrint(msg string) {\n\tfor i := 0; i < len(msg); i++ {\n\t\tc := msg[i]\n\n\t\tswitch c {\n\t\tcase 13:\n\t\t\tcursor += columns * 2\n\t\t\tcontinue\n\t\tcase 10:\n\t\t\taux := cursor \/ (columns * 2)\n\t\t\taux = aux * (columns * 2)\n\t\t\tcursor = aux\n\t\t\tcontinue\n\t\t}\n\t\tputChar(msg[i])\n\t}\n}\n\nfunc bPrintln(msg string) {\n\tmsg += \"\\r\\n\"\n\tbPrint(msg)\n}\n\nvar lastKey = struct {\n\tTime uint64\n\tChar byte\n}{\n\t0,\n\t0,\n}\n\nvar uTime uint64\nvar c byte\n\nvar machine int\n\n\/\/var countaux int\nvar noKey bool\n\nfunc keyTreatment(c byte, f func(c byte)) {\n\tif noKey || lastKey.Char != c || lastKey.Time+20 < uTime {\n\t\tf(c)\n\t\tnoKey = false\n\t\tlastKey.Char = c\n\t\tlastKey.Time = uTime\n\t}\n}\n\nfunc getLine() string {\n\taux := cursor \/ (columns * 2)\n\tvar ret string\n\tfor i := aux*(columns*2) + 1; i < aux*(columns*2)+columns*2; i += 2 {\n\t\tc := videoTextMemory[i]\n\t\tif c == 0 {\n\t\t\tbreak\n\t\t}\n\t\tret += string(videoTextMemory[i])\n\t}\n\n\tret = strings.TrimSpace(ret)\n\treturn ret\n}\n\nfunc keyboard() {\n\tfor c := 'A'; c <= 'Z'; c++ {\n\t\tif ebiten.IsKeyPressed(ebiten.Key(c) - 'A' + ebiten.KeyA) {\n\t\t\tkeyTreatment(byte(c), func(c byte) {\n\t\t\t\tputChar(c)\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t}\n\n\tif ebiten.IsKeyPressed(ebiten.KeySpace) {\n\t\tkeyTreatment(byte(' '), func(c byte) {\n\t\t\tputChar(c)\n\t\t})\n\t\treturn\n\t}\n\n\tif ebiten.IsKeyPressed(ebiten.KeyEnter) {\n\t\tkeyTreatment(0, func(c byte) {\n\t\t\tcmd.Eval(getLine())\n\t\t\tcursor += columns * 2\n\t\t\taux := cursor \/ (columns * 2)\n\t\t\taux = aux * (columns * 2)\n\t\t\tcursor = aux\n\t\t\tcorrectVideoCursor()\n\t\t})\n\t\treturn\n\t}\n\n\tif ebiten.IsKeyPressed(ebiten.KeyBackspace) {\n\t\tkeyTreatment(0, func(c byte) {\n\t\t\tcursor -= 2\n\t\t\tcorrectVideoCursor()\n\t\t})\n\t\treturn\n\t}\n\n\tif ebiten.IsKeyPressed(ebiten.KeyUp) {\n\t\tkeyTreatment(0, func(c byte) {\n\t\t\tcursor -= columns * 2\n\t\t\tcorrectVideoCursor()\n\t\t})\n\t\treturn\n\t} else if ebiten.IsKeyPressed(ebiten.KeyDown) {\n\t\tkeyTreatment(0, func(c byte) {\n\t\t\tcursor += columns * 2\n\t\t\tcorrectVideoCursor()\n\t\t})\n\t\treturn\n\t} else if ebiten.IsKeyPressed(ebiten.KeyLeft) {\n\t\tkeyTreatment(0, func(c byte) {\n\t\t\tcursor -= 2\n\t\t\tcorrectVideoCursor()\n\t\t})\n\t\treturn\n\t} else if ebiten.IsKeyPressed(ebiten.KeyRight) {\n\t\tkeyTreatment(0, func(c byte) {\n\t\t\tcursor += 2\n\t\t\tcorrectVideoCursor()\n\t\t})\n\t\treturn\n\t}\n\n\t\/\/ When the \"left mouse button\" is pressed...\n\tif ebiten.IsMouseButtonPressed(ebiten.MouseButtonLeft) {\n\t\t\/\/ebitenutil.DebugPrint(screen, \"You're pressing the 'LEFT' mouse button.\")\n\t}\n\t\/\/ When the \"right mouse button\" is pressed...\n\tif ebiten.IsMouseButtonPressed(ebiten.MouseButtonRight) {\n\t\t\/\/ebitenutil.DebugPrint(screen, \"\\nYou're pressing the 'RIGHT' mouse button.\")\n\t}\n\t\/\/ When the \"middle mouse button\" is pressed...\n\tif ebiten.IsMouseButtonPressed(ebiten.MouseButtonMiddle) {\n\t\t\/\/ebitenutil.DebugPrint(screen, \"\\n\\nYou're pressing the 'MIDDLE' mouse button.\")\n\t}\n\n\t\/\/x, y := ebiten.CursorPosition()\n\t\/\/fmt.Printf(\"X: %d, Y: %d\\n\", x, y)\n\n\t\/\/ Display the information with \"X: xx, Y: xx\" format\n\t\/\/ebitenutil.DebugPrint(screen, fmt.Sprintf(\"X: %d, Y: %d\", x, y))\n\n\tnoKey = true\n\n}\n\nfunc update(screen *ebiten.Image) error {\n\n\tuTime++\n\t\/\/putChar(2)\n\t\/\/cursor -= 2\n\n\tif machine == 0 {\n\t\tbPrintln(\"METAL BASIC 0.01\")\n\t\tbPrintln(\"http:\/\/crg.eti.br\")\n\t\tmachine++\n\t}\n\n\t\/*\n\t\tif countaux > 10 {\n\t\t\tcountaux = 0\n\t\t\tputChar(dt)\n\t\t\tdt++\n\t\t\tcurrentColor = mergeColorCode(0x0, c)\n\t\t\tc++\n\t\t\tif c > 15 {\n\t\t\t\tc = 0\n\t\t\t}\n\t\t}\n\t\tcountaux++\n\t*\/\n\tdrawVideoTextMode()\n\tscreen.ReplacePixels(img.Pix)\n\tkeyboard()\n\treturn nil\n}\n\nfunc main() {\n\n\tfont.Load()\n\tclearVideoTextMode()\n\n\timg = image.NewRGBA(image.Rect(0, 0, screenWidth, screenHeight))\n\tif err := ebiten.Run(update, screenWidth, screenHeight, 2, \"METAL BASIC 0.01\"); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/facebookgo\/grace\/gracehttp\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/techjanitor\/pram-libs\/auth\"\n\t\"github.com\/techjanitor\/pram-libs\/config\"\n\t\"github.com\/techjanitor\/pram-libs\/cors\"\n\t\"github.com\/techjanitor\/pram-libs\/db\"\n\t\"github.com\/techjanitor\/pram-libs\/redis\"\n\t\"github.com\/techjanitor\/pram-libs\/validate\"\n\n\tlocal \"github.com\/techjanitor\/pram-admin\/config\"\n\tc \"github.com\/techjanitor\/pram-admin\/controllers\"\n\tu \"github.com\/techjanitor\/pram-admin\/utils\"\n)\n\nvar (\n\tversion = \"0.0.1\"\n)\n\nfunc init() {\n\n\t\/\/ Database connection settings\n\tdbase := db.Database{\n\n\t\tUser: local.Settings.Database.User,\n\t\tPassword: local.Settings.Database.Password,\n\t\tProto: local.Settings.Database.Proto,\n\t\tHost: local.Settings.Database.Host,\n\t\tDatabase: local.Settings.Database.Database,\n\t\tMaxIdle: local.Settings.Database.MaxIdle,\n\t\tMaxConnections: local.Settings.Database.MaxConnections,\n\t}\n\n\t\/\/ Set up DB connection\n\tdbase.NewDb()\n\n\t\/\/ Get limits and stuff from database\n\tconfig.GetDatabaseSettings()\n\n\t\/\/ redis settings\n\tr := redis.Redis{\n\t\t\/\/ Redis address and max pool connections\n\t\tProtocol: local.Settings.Redis.Protocol,\n\t\tAddress: local.Settings.Redis.Address,\n\t\tMaxIdle: local.Settings.Redis.MaxIdle,\n\t\tMaxConnections: local.Settings.Redis.MaxConnections,\n\t}\n\n\t\/\/ Set up Redis connection\n\tr.NewRedisCache()\n\n\t\/\/ set auth middleware secret\n\tauth.Secret = local.Settings.Session.Secret\n\n\t\/\/ print the starting info\n\tStartInfo()\n\n\t\/\/ Print out config\n\tconfig.Print()\n\n\t\/\/ Print out config\n\tlocal.Print()\n\n\t\/\/ check what services are available\n\tu.CheckServices()\n\n\t\/\/ Print capabilities\n\tu.Services.Print()\n\n\t\/\/ set cors domains\n\tcors.SetDomains(local.Settings.CORS.Sites, strings.Split(\"POST,DELETE\", \",\"))\n\n}\n\nfunc main() {\n\tr := gin.Default()\n\n\tr.Use(cors.CORS())\n\n\tr.NoRoute(c.ErrorController)\n\n\t\/\/ requires mod perms\n\tmod := r.Group(\"\/mod\")\n\tmod.Use(validate.ValidateParams())\n\tmod.Use(auth.Auth(auth.Moderators))\n\n\tmod.DELETE(\"\/tag\/:id\", c.DeleteTagController)\n\tmod.DELETE(\"\/imagetag\/:image\/:tag\", c.DeleteImageTagController)\n\tmod.DELETE(\"\/thread\/:id\", c.DeleteThreadController)\n\tmod.DELETE(\"\/post\/:thread\/:id\", c.DeletePostController)\n\tmod.POST(\"\/sticky\/:thread\", c.StickyThreadController)\n\tmod.POST(\"\/close\/:thread\", c.CloseThreadController)\n\n\t\/\/ requires admin perms\n\tadmin := r.Group(\"\/admin\")\n\tadmin.Use(validate.ValidateParams())\n\tadmin.Use(auth.Auth(auth.Admins))\n\n\tadmin.DELETE(\"\/thread\/:id\", c.PurgeThreadController)\n\tadmin.DELETE(\"\/post\/:thread\/:id\", c.PurgePostController)\n\t\/\/admin.POST(\"\/ban\/:ip\", c.BanIpController)\n\t\/\/admin.DELETE(\"\/flushcache\", c.DeleteCacheController)\n\n\ts := &http.Server{\n\t\tAddr: fmt.Sprintf(\"%s:%d\", local.Settings.Admin.Address, local.Settings.Admin.Port),\n\t\tHandler: r,\n\t}\n\n\tgracehttp.Serve(s)\n\n}\n\nfunc StartInfo() {\n\n\tfmt.Println(strings.Repeat(\"*\", 60))\n\tfmt.Printf(\"%-20v\\n\\n\", \"PRAM-ADMIN\")\n\tfmt.Printf(\"%-20v%40v\\n\", \"Version\", version)\n\tfmt.Println(strings.Repeat(\"*\", 60))\n\n}\n<commit_msg>add get method<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/facebookgo\/grace\/gracehttp\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/techjanitor\/pram-libs\/auth\"\n\t\"github.com\/techjanitor\/pram-libs\/config\"\n\t\"github.com\/techjanitor\/pram-libs\/cors\"\n\t\"github.com\/techjanitor\/pram-libs\/db\"\n\t\"github.com\/techjanitor\/pram-libs\/redis\"\n\t\"github.com\/techjanitor\/pram-libs\/validate\"\n\n\tlocal \"github.com\/techjanitor\/pram-admin\/config\"\n\tc \"github.com\/techjanitor\/pram-admin\/controllers\"\n\tu \"github.com\/techjanitor\/pram-admin\/utils\"\n)\n\nvar (\n\tversion = \"0.0.1\"\n)\n\nfunc init() {\n\n\t\/\/ Database connection settings\n\tdbase := db.Database{\n\n\t\tUser: local.Settings.Database.User,\n\t\tPassword: local.Settings.Database.Password,\n\t\tProto: local.Settings.Database.Proto,\n\t\tHost: local.Settings.Database.Host,\n\t\tDatabase: local.Settings.Database.Database,\n\t\tMaxIdle: local.Settings.Database.MaxIdle,\n\t\tMaxConnections: local.Settings.Database.MaxConnections,\n\t}\n\n\t\/\/ Set up DB connection\n\tdbase.NewDb()\n\n\t\/\/ Get limits and stuff from database\n\tconfig.GetDatabaseSettings()\n\n\t\/\/ redis settings\n\tr := redis.Redis{\n\t\t\/\/ Redis address and max pool connections\n\t\tProtocol: local.Settings.Redis.Protocol,\n\t\tAddress: local.Settings.Redis.Address,\n\t\tMaxIdle: local.Settings.Redis.MaxIdle,\n\t\tMaxConnections: local.Settings.Redis.MaxConnections,\n\t}\n\n\t\/\/ Set up Redis connection\n\tr.NewRedisCache()\n\n\t\/\/ set auth middleware secret\n\tauth.Secret = local.Settings.Session.Secret\n\n\t\/\/ print the starting info\n\tStartInfo()\n\n\t\/\/ Print out config\n\tconfig.Print()\n\n\t\/\/ Print out config\n\tlocal.Print()\n\n\t\/\/ check what services are available\n\tu.CheckServices()\n\n\t\/\/ Print capabilities\n\tu.Services.Print()\n\n\t\/\/ set cors domains\n\tcors.SetDomains(local.Settings.CORS.Sites, strings.Split(\"GET,POST,DELETE\", \",\"))\n\n}\n\nfunc main() {\n\tr := gin.Default()\n\n\tr.Use(cors.CORS())\n\n\tr.NoRoute(c.ErrorController)\n\n\t\/\/ requires mod perms\n\tmod := r.Group(\"\/mod\")\n\tmod.Use(validate.ValidateParams())\n\tmod.Use(auth.Auth(auth.Moderators))\n\n\tmod.DELETE(\"\/tag\/:id\", c.DeleteTagController)\n\tmod.DELETE(\"\/imagetag\/:image\/:tag\", c.DeleteImageTagController)\n\tmod.DELETE(\"\/thread\/:id\", c.DeleteThreadController)\n\tmod.DELETE(\"\/post\/:thread\/:id\", c.DeletePostController)\n\tmod.POST(\"\/sticky\/:thread\", c.StickyThreadController)\n\tmod.POST(\"\/close\/:thread\", c.CloseThreadController)\n\n\t\/\/ requires admin perms\n\tadmin := r.Group(\"\/admin\")\n\tadmin.Use(validate.ValidateParams())\n\tadmin.Use(auth.Auth(auth.Admins))\n\n\tadmin.DELETE(\"\/thread\/:id\", c.PurgeThreadController)\n\tadmin.DELETE(\"\/post\/:thread\/:id\", c.PurgePostController)\n\t\/\/admin.POST(\"\/ban\/:ip\", c.BanIpController)\n\t\/\/admin.DELETE(\"\/flushcache\", c.DeleteCacheController)\n\n\ts := &http.Server{\n\t\tAddr: fmt.Sprintf(\"%s:%d\", local.Settings.Admin.Address, local.Settings.Admin.Port),\n\t\tHandler: r,\n\t}\n\n\tgracehttp.Serve(s)\n\n}\n\nfunc StartInfo() {\n\n\tfmt.Println(strings.Repeat(\"*\", 60))\n\tfmt.Printf(\"%-20v\\n\\n\", \"PRAM-ADMIN\")\n\tfmt.Printf(\"%-20v%40v\\n\", \"Version\", version)\n\tfmt.Println(strings.Repeat(\"*\", 60))\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/jessevdk\/go-flags\"\n\t\"github.com\/yuya-takeyama\/argf\"\n)\n\nfunc usage() {\n\tos.Stderr.WriteString(`\nUsage: ltsvp OPTION... [FILE]...\nPrint selected parts of LTSV from each FILE to standard output.\n\nOptions:\n -k, --keys=LIST select only these keys (required)\n -d, --delimiter=STRING use STRING to separate parts (default: \\t)\n -r, --remain-ltsv print selected parts as LTSV\n -h, --help display this help text and exit\n -v, --version display version information and exit\n\nLIST is made up of keys separated by commas.\n host # Select host\n host,time,ua # Select host, time, and ua\n`[1:])\n}\n\nfunc version() {\n\tos.Stderr.WriteString(`\nv0.1.0\n`[1:])\n}\n\ntype Option struct {\n\tList string `short:\"k\" long:\"keys\" required:\"true\"`\n\tDelimiter string `short:\"d\" long:\"delimiter\" default:\"\\t\"`\n\tRemainLTSV bool `short:\"r\" long:\"remain-ltsv\"`\n\tIsHelp bool `short:\"h\" long:\"help\"`\n\tIsVersion bool `short:\"v\" long:\"version\"`\n\tFiles []string\n}\n\nfunc parseOption(args []string) (opt *Option, err error) {\n\topt = &Option{}\n\tflag := flags.NewParser(opt, flags.PassDoubleDash)\n\n\topt.Files, err = flag.ParseArgs(args)\n\tif err != nil && !opt.IsHelp && !opt.IsVersion {\n\t\treturn nil, err\n\t}\n\treturn opt, nil\n}\n\nfunc newLTSVScannerFromOption(opt *Option) (l *LTSVScanner, err error) {\n\tkeys := ParseKeysList(opt.List)\n\treader, err := argf.From(opt.Files)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tl = NewLTSVScanner(keys, reader)\n\tl.Delimiter = opt.Delimiter\n\tl.RemainLTSV = opt.RemainLTSV\n\treturn l, nil\n}\n\nfunc do(l *LTSVScanner) error {\n\tfor l.Scan() {\n\t\tfmt.Println(l.Text())\n\t}\n\treturn l.Err()\n}\n\nfunc printErr(err error) {\n\tfmt.Fprintln(os.Stderr, \"ltsvp:\", err)\n}\n\nfunc guideToHelp() {\n\tos.Stderr.WriteString(`\nTry 'ltsvp --help' for more information.\n`[1:])\n}\n\nfunc _main() int {\n\topt, err := parseOption(os.Args[1:])\n\tif err != nil {\n\t\tprintErr(err)\n\t\tguideToHelp()\n\t\treturn 2\n\t}\n\tswitch {\n\tcase opt.IsHelp:\n\t\tusage()\n\t\treturn 0\n\tcase opt.IsVersion:\n\t\tversion()\n\t\treturn 0\n\t}\n\n\tl, err := newLTSVScannerFromOption(opt)\n\tif err != nil {\n\t\tprintErr(err)\n\t\tguideToHelp()\n\t\treturn 2\n\t}\n\tif err := do(l); err != nil {\n\t\tprintErr(err)\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc main() {\n\te := _main()\n\tos.Exit(e)\n}\n<commit_msg>Version up for changed the operation of --remain-ltsv<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/jessevdk\/go-flags\"\n\t\"github.com\/yuya-takeyama\/argf\"\n)\n\nfunc usage() {\n\tos.Stderr.WriteString(`\nUsage: ltsvp OPTION... [FILE]...\nPrint selected parts of LTSV from each FILE to standard output.\n\nOptions:\n -k, --keys=LIST select only these keys (required)\n -d, --delimiter=STRING use STRING to separate parts (default: \\t)\n -r, --remain-ltsv print selected parts as LTSV\n -h, --help display this help text and exit\n -v, --version display version information and exit\n\nLIST is made up of keys separated by commas.\n host # Select host\n host,time,ua # Select host, time, and ua\n`[1:])\n}\n\nfunc version() {\n\tos.Stderr.WriteString(`\nv0.2.0\n`[1:])\n}\n\ntype Option struct {\n\tList string `short:\"k\" long:\"keys\" required:\"true\"`\n\tDelimiter string `short:\"d\" long:\"delimiter\" default:\"\\t\"`\n\tRemainLTSV bool `short:\"r\" long:\"remain-ltsv\"`\n\tIsHelp bool `short:\"h\" long:\"help\"`\n\tIsVersion bool `short:\"v\" long:\"version\"`\n\tFiles []string\n}\n\nfunc parseOption(args []string) (opt *Option, err error) {\n\topt = &Option{}\n\tflag := flags.NewParser(opt, flags.PassDoubleDash)\n\n\topt.Files, err = flag.ParseArgs(args)\n\tif err != nil && !opt.IsHelp && !opt.IsVersion {\n\t\treturn nil, err\n\t}\n\treturn opt, nil\n}\n\nfunc newLTSVScannerFromOption(opt *Option) (l *LTSVScanner, err error) {\n\tkeys := ParseKeysList(opt.List)\n\treader, err := argf.From(opt.Files)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tl = NewLTSVScanner(keys, reader)\n\tl.Delimiter = opt.Delimiter\n\tl.RemainLTSV = opt.RemainLTSV\n\treturn l, nil\n}\n\nfunc do(l *LTSVScanner) error {\n\tfor l.Scan() {\n\t\tfmt.Println(l.Text())\n\t}\n\treturn l.Err()\n}\n\nfunc printErr(err error) {\n\tfmt.Fprintln(os.Stderr, \"ltsvp:\", err)\n}\n\nfunc guideToHelp() {\n\tos.Stderr.WriteString(`\nTry 'ltsvp --help' for more information.\n`[1:])\n}\n\nfunc _main() int {\n\topt, err := parseOption(os.Args[1:])\n\tif err != nil {\n\t\tprintErr(err)\n\t\tguideToHelp()\n\t\treturn 2\n\t}\n\tswitch {\n\tcase opt.IsHelp:\n\t\tusage()\n\t\treturn 0\n\tcase opt.IsVersion:\n\t\tversion()\n\t\treturn 0\n\t}\n\n\tl, err := newLTSVScannerFromOption(opt)\n\tif err != nil {\n\t\tprintErr(err)\n\t\tguideToHelp()\n\t\treturn 2\n\t}\n\tif err := do(l); err != nil {\n\t\tprintErr(err)\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc main() {\n\te := _main()\n\tos.Exit(e)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"github.com\/yosssi\/ace\"\n)\n\nfunc topIndex(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\ttpl, err := ace.Load(\"base\", \"top\/index\", &ace.Options{BaseDir: \"views\", Asset: Asset})\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := tpl.Execute(w, nil); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\nfunc main() {\n\trouter := httprouter.New()\n\trouter.GET(\"\/\", topIndex)\n\trouter.ServeFiles(\"\/public\/*filepath\", http.Dir(\"public\"))\n\n\tlog.Fatal(http.ListenAndServe(\":\"+os.Getenv(\"PORT\"), router))\n}\n<commit_msg>Update main.go<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"github.com\/yosssi\/ace\"\n)\n\nvar startupTime = time.Now()\n\nfunc topIndex(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\ttpl, err := ace.Load(\"base\", \"top\/index\", &ace.Options{BaseDir: \"views\", Asset: Asset})\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := tpl.Execute(w, nil); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\nfunc serveAsset(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\turl := r.URL.Path\n\n\tb, err := Asset(url[1:])\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\thttp.ServeContent(w, r, url, startupTime, bytes.NewReader(b))\n}\n\nfunc main() {\n\trouter := httprouter.New()\n\n\trouter.GET(\"\/\", topIndex)\n\trouter.GET(\"\/public\/*filepath\", serveAsset)\n\n\tlog.Fatal(http.ListenAndServe(\":\"+os.Getenv(\"PORT\"), router))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/docopt\/docopt-go\"\n\t\"github.com\/yosssi\/ace\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n)\n\n\/\/ constants\nconst version = \"0.1\"\nconst license = \"MIT\"\n\n\/\/ Command line interface\nvar usage string = `ace - Command line utility for the Ace HTML template engine.\n\nUsage:\n ace [-i | --inner=<FILE>] [-m | --map=<FILE>] [-s | --separator=<SYMBOL>] [-p | --stdout] [ -o | --output=<FILE>] <FILE>\n ace [-h | --help]\n ace [-v | --version]\nOptions:\n -i --inner\t\tPath to the inner.ace file.\n -m --map\t\tPath to the mappings.map file.\n -s --separator\tSeparator for key\/value map file.\n -p --stdout \tPrint to stdout.\n -o --output\t\tWrite to custom file.\n -h --help \tShow this help.\n -v --version \tDisplay version.\nInfo:\n Author: \tAntonino Catinello\n Version: \t` + version + `\n License: \t` + license\n\nfunc main() {\n\t\/\/ handle options\n\targs, err := docopt.Parse(usage, nil, true, version, false)\n\n\tif err != nil || args[\"<FILE>\"] == nil {\n\t\tfmt.Fprintln(os.Stderr, usage)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ middle dot U+00B7 (unicode character)\n\t\/\/ keystroke: alt gr + ,\n\tvar separator string = \"\\u00B7\"\n\n\tif len(args[\"--separator\"].([]string)) > 0 {\n\t\tseparator = args[\"--separator\"].([]string)[0]\n\t}\n\n\t\/\/ variables\n\tvar base, inner, output string\n\n\tbase = strings.Split(args[\"<FILE>\"].(string), \".ace\")[0]\n\n\tif len(args[\"--inner\"].([]string)) > 0 {\n\t\tinner = strings.Split(args[\"--inner\"].([]string)[0], \".ace\")[0]\n\t} else {\n\t\tinner = \"\"\n\t}\n\n\t\/\/ load, execute, generate ace templates and data\n\ttpl, err := ace.Load(base, inner, nil)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(2)\n\t}\n\n\n\tif len(args[\"--output\"].([]string)) > 0 {\n\t\toutput = args[\"--output\"].([]string)[0]\n\t} else {\n\t\toutput = path.Base(base) + \".html\"\n\t}\n\n\tw, err := os.OpenFile(output, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0655)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(2)\n\t}\n\tdefer w.Close()\n\n\tvar data map[string]interface{}\n\n\tif len(args[\"--map\"].([]string)) > 0 {\n\t\tdata = FileToMap(args[\"--map\"].([]string)[0], separator)\n\t} else {\n\t\tdata = make(map[string]interface{})\n\t}\n\n\tif args[\"--stdout\"].(bool) {\n\t\tif err := tpl.Execute(os.Stdout, data); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\t\tos.Exit(2)\n\t\t}\n\t} else {\n\t\tif err := tpl.Execute(os.NewFile(w.Fd(), output), data); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\t\tos.Exit(2)\n\t\t}\n\t}\n\n}\n\nfunc FileToMap(fileName, separator string) map[string]interface{} {\n\t\/\/ hash table variable\n\tvar data map[string]interface{}\n\tdata = make(map[string]interface{})\n\n\tfile, err := os.Open(fileName)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(3)\n\t}\n\tdefer file.Close()\n\n\tscanner := bufio.NewScanner(file)\n\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\t\/\/ is the line long enough to be considered?\n\t\tif len(line) < len(separator)+2 {\n\t\t\tcontinue\n\t\t\t\/\/ is the string a slice of strings []string?\n\t\t} else if strings.Contains(line, separator) {\n\t\t\tparts := strings.Split(line, separator)\n\t\t\tif strings.Contains(parts[1], \"[]string\") {\n\t\t\t\tslice := strings.Split(parts[1], \"[]string{\")\n\t\t\t\tcollection := strings.TrimSuffix(slice[1], \"}\")\n\t\t\t\tvalues := strings.Split(collection, \",\")\n\n\t\t\t\tdata[parts[0]] = []string{}\n\t\t\t\tfor _, v := range values {\n\t\t\t\t\tdata[parts[0]] = append(data[parts[0]].([]string), strings.TrimPrefix(strings.TrimSuffix(strings.TrimSpace(v), \"\\\"\"), \"\\\"\"))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tdata[parts[0]] = parts[1]\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(3)\n\t}\n\n\t\/\/\tfmt.Println(data[\"Msgs\"].([]string)[1])\n\t\/\/\tfmt.Println(data)\n\n\treturn data\n}\n<commit_msg>clean up<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/docopt\/docopt-go\"\n\t\"github.com\/yosssi\/ace\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n)\n\n\/\/ constants\nconst version = \"0.1\"\nconst license = \"MIT\"\n\n\/\/ Command line interface\nvar usage string = `ace - Command line utility for the Ace HTML template engine.\n\nUsage:\n ace [-i | --inner=<FILE>] [-m | --map=<FILE>] [-s | --separator=<SYMBOL>] [-p | --stdout] [-o | --output=<FILE>] <FILE>\n ace [-h | --help]\n ace [-v | --version]\nOptions:\n -i --inner\t\tPath to the inner.ace file.\n -m --map\t\tPath to the mappings.map file.\n -s --separator\tSeparator for key\/value map file.\n -p --stdout \tPrint to stdout.\n -o --output\t\tWrite to custom file.\n -h --help \tShow this help.\n -v --version \tDisplay version.\nInfo:\n Author: \tAntonino Catinello\n Version: \t` + version + `\n License: \t` + license\n\nfunc main() {\n\t\/\/ handle options\n\targs, err := docopt.Parse(usage, nil, true, version, false)\n\n\tif err != nil || args[\"<FILE>\"] == nil {\n\t\tfmt.Fprintln(os.Stderr, usage)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ middle dot U+00B7 (unicode character)\n\t\/\/ keystroke: alt gr + ,\n\tvar separator string = \"\\u00B7\"\n\n\tif len(args[\"--separator\"].([]string)) > 0 {\n\t\tseparator = args[\"--separator\"].([]string)[0]\n\t}\n\n\t\/\/ variables\n\tvar base, inner, output string\n\n\tbase = strings.Split(args[\"<FILE>\"].(string), \".ace\")[0]\n\n\tif len(args[\"--inner\"].([]string)) > 0 {\n\t\tinner = strings.Split(args[\"--inner\"].([]string)[0], \".ace\")[0]\n\t} else {\n\t\tinner = \"\"\n\t}\n\n\tif len(args[\"--output\"].([]string)) > 0 {\n\t\toutput = args[\"--output\"].([]string)[0]\n\t} else {\n\t\toutput = path.Base(base) + \".html\"\n\t}\n\n\t\/\/ load, execute, generate ace templates and data\n\ttpl, err := ace.Load(base, inner, nil)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(2)\n\t}\n\n\tvar data map[string]interface{}\n\n\tif len(args[\"--map\"].([]string)) > 0 {\n\t\tdata = FileToMap(args[\"--map\"].([]string)[0], separator)\n\t} else {\n\t\tdata = make(map[string]interface{})\n\t}\n\n\tif args[\"--stdout\"].(bool) {\n\t\tif err := tpl.Execute(os.Stdout, data); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\t\tos.Exit(2)\n\t\t}\n\t} else {\n\t\tw, err := os.OpenFile(output, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0655)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\t\tos.Exit(2)\n\t\t}\n\t\tdefer w.Close()\n\n\t\tif err := tpl.Execute(os.NewFile(w.Fd(), output), data); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\t\tos.Exit(2)\n\t\t}\n\t}\n\n}\n\n\/\/ FileToMap opens the fileName and parses the content per line\n\/\/ and separates key\/values by the given unicode separator\n\/\/ to return a map with the content. Keys are of string and values\n\/\/ are considered string or []string.\nfunc FileToMap(fileName, separator string) map[string]interface{} {\n\t\/\/ hash table variable\n\tvar data map[string]interface{}\n\tdata = make(map[string]interface{})\n\n\tfile, err := os.Open(fileName)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(3)\n\t}\n\tdefer file.Close()\n\n\t\/\/ scan through file\n\tscanner := bufio.NewScanner(file)\n\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\n\t\t\/\/ is the line long enough to be considered?\n\t\tif len(line) < len(separator)+2 {\n\t\t\tcontinue\n\t\t} else if strings.Contains(line, separator) {\n\t\t\tparts := strings.Split(line, separator)\n\n\t\t\t\/\/ is the string a slice of strings []string?\n\t\t\tif strings.Contains(parts[1], \"[]string\") {\n\t\t\t\tslice := strings.Split(parts[1], \"[]string{\")\n\t\t\t\tcollection := strings.TrimSuffix(slice[1], \"}\")\n\t\t\t\tvalues := strings.Split(collection, \",\")\n\n\t\t\t\tdata[parts[0]] = []string{}\n\t\t\t\tfor _, v := range values {\n\t\t\t\t\tdata[parts[0]] = append(data[parts[0]].([]string), strings.TrimPrefix(strings.TrimSuffix(strings.TrimSpace(v), \"\\\"\"), \"\\\"\"))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t\/\/ it is a string\n\t\t\t\tdata[parts[0]] = parts[1]\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(3)\n\t}\n\n\treturn data\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/googollee\/go-socket.io\"\n\n\t\"github.com\/tiltfactor\/toto\/domain\"\n\t\"github.com\/tiltfactor\/toto\/utils\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/jesusrmoreno\/sad-squid\"\n\n\t\"github.com\/codegangsta\/cli\"\n)\n\n\/\/ Version ...\nvar Version = \"1.3.2\"\n\n\/\/ Events that are exposed to the client\nconst (\n\tgroupAssignment = \"group-assignment\"\n\troomMessage = \"room-message\"\n\tleaveGroup = \"leave-group\"\n\tjoinGroup = \"join-group\"\n\tjoinGame = \"join-game\"\n\tgetPeers = \"get-peers\"\n\tmakeMove = \"make-move\"\n\tmoveMade = \"move-made\"\n\tinQueue = \"in-queue\"\n\n\tserverError = \"server-error\"\n\tclientError = \"client-error\"\n)\n\n\/\/ Response ...\ntype Response struct {\n\tTimestamp int64 `json:\"timeStamp\"`\n\tKind string `json:\"kind\"`\n\tData map[string]interface{} `json:\"data\"`\n}\n\n\/\/ Method for creating errors more quickly.\nfunc errorResponse(kind, err string) Response {\n\td := map[string]interface{}{}\n\td[\"error\"] = err\n\tr := Response{\n\t\tTimestamp: time.Now().UnixNano(),\n\t\tKind: kind,\n\t\tData: d,\n\t}\n\treturn r\n}\n\n\/\/ GamesInfo serves to store the metadata for different games\ntype GamesInfo struct {\n\t\/\/ These must be thread safe so we use the ConcurrentMap types\n\tTurnMap *utils.ConcurrentStringIntMap\n\t\/\/ Maps the player id to the room\n\tRoomMap *utils.ConcurrentStringMap\n}\n\n\/\/ ReadGameFiles reads the provided directory for files that conform to the\n\/\/ game struct definition, these must be json files, and loads them into our\n\/\/ game map.\nfunc ReadGameFiles(gameDir string) (domain.GameMap, error) {\n\tfiles, err := filepath.Glob(gameDir + \"\/*.toml\")\n\tgm := domain.GameMap{}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, f := range files {\n\t\traw, err := os.Open(f)\n\t\tdefer raw.Close()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr := io.Reader(raw)\n\t\tdummy := domain.Game{}\n\t\tif meta, err := toml.DecodeReader(r, &dummy); err != nil {\n\t\t\tlog.Println(meta)\n\t\t\treturn nil, errors.New(\"Invalid configuration in file: \" + f)\n\t\t}\n\t\tg := domain.Game{\n\t\t\tPlayers: dummy.Players,\n\t\t\tTitle: dummy.Title,\n\t\t\tUUID: dummy.UUID,\n\t\t\tLobby: domain.NewLobby(),\n\t\t}\n\t\tg.FileName = f\n\t\tif _, exists := gm[g.UUID]; exists {\n\t\t\treturn nil, errors.New(\"uniqueKey conflict between: \" + f + \" and \" +\n\t\t\t\tgm[g.UUID].FileName)\n\t\t}\n\t\tgm[g.UUID] = g\n\t}\n\treturn gm, nil\n}\n\n\/\/ turnKey returns the generated key for storing turns\nfunc turnKey(playerID, roomName string) string {\n\treturn roomName + \":\" + playerID\n}\n\n\/\/ QueuePlayers adds players to the game's lobby to wait for a partner.\n\/\/ Players are queued on a first come first serve basis.\nfunc QueuePlayers(g domain.Game, p domain.Player) bool {\n\tpq := g.Lobby\n\tif !pq.Contains(p.Comm.Id()) {\n\t\tpq.AddToQueue(p)\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ GroupPlayers attempts to creates groups of players of the size defined in the\n\/\/ game files. It also sets the player turns.\n\/\/ It returns the name of the room and true if it succeeded or\n\/\/ an empty string and false if it did not.\nfunc GroupPlayers(g domain.Game, gi *GamesInfo) (string, []domain.Player) {\n\tlog.Println(\"Attempting to group players for game\", g.UUID)\n\tpq := g.Lobby\n\tneeded := g.Players\n\tavailable := pq.Size()\n\tif available >= needed {\n\t\tteam := []domain.Player{}\n\t\troomName := squid.GenerateSimpleID()\n\t\tfor i := 0; i < needed; i++ {\n\t\t\tp := pq.PopFromQueue()\n\t\t\tteam = append(team, p)\n\n\t\t\t\/\/ Place the player in the created room.\n\t\t\tp.Comm.Join(roomName)\n\n\t\t\tplayerID := p.Comm.Id()\n\t\t\tgi.RoomMap.Set(playerID, roomName)\n\n\t\t\t\/\/ We generate a turn key composed of the room name and player id to store\n\t\t\t\/\/ the turn. turns are assigned based off of how they are popped from the\n\t\t\t\/\/ queue.\n\t\t\ttk := turnKey(playerID, roomName)\n\t\t\tgi.TurnMap.Set(tk, i)\n\t\t}\n\t\treturn roomName, team\n\t}\n\treturn \"\", nil\n}\n\n\/\/ Cross origin server is used to add cross-origin request capabilities to the\n\/\/ socket server. It wraps the socketio.Server\ntype crossOriginServer struct {\n\tServer *socketio.Server\n}\n\n\/\/ ServeHTTP is implemented to add the needed header for CORS in socketio.\n\/\/ This must be named ServeHTTP and take the\n\/\/ (http.ResponseWriter, r *http.Request) to satisfy the http.Handler interface\nfunc (s crossOriginServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\torigin := r.Header.Get(\"Origin\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\ts.Server.ServeHTTP(w, r)\n}\n\nfunc handlePlayerJoin(so socketio.Socket, req json.RawMessage,\n\tgames domain.GameMap, info GamesInfo) {\n\tm := map[string]string{}\n\tif err := json.Unmarshal(req, &m); err != nil {\n\t\tlog.Println(\"Invalid JSON from\", so.Id())\n\t\tso.Emit(clientError, errorResponse(clientError, \"Invalid JSON\"))\n\t}\n\tgameID, exists := m[\"gameId\"]\n\tif !exists {\n\t\tlog.Println(\"No game included from\", so.Id())\n\t\tso.Emit(clientError, errorResponse(clientError, \"Must include GameID\"))\n\t}\n\n\tlog.Println(so.Id(), \"attempting to join game\", gameID)\n\t\/\/ If the player attempts to connect to a game we first have to make\n\t\/\/ sure that they are joining a game that is registered with our server.\n\tif g, exists := games[gameID]; exists {\n\t\t\/\/ First queue the player\n\t\tnewPlayer := domain.Player{\n\t\t\tComm: so,\n\t\t}\n\t\tif didQueue := QueuePlayers(g, newPlayer); didQueue {\n\t\t\t\/\/ Create the response we're going to send\n\t\t\tdata := map[string]interface{}{}\n\t\t\tr := Response{\n\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\tKind: inQueue,\n\t\t\t\tData: data,\n\t\t\t}\n\t\t\tdata[\"message\"] = \"You are in the queue for game: \" + g.Title\n\t\t\tso.Emit(inQueue, r)\n\t\t\tif rn, group := GroupPlayers(g, &info); group != nil && rn != \"\" {\n\t\t\t\t\/\/ Tell each member what their room name is as well as their turn\n\t\t\t\tfor i, p := range group {\n\t\t\t\t\tdata := map[string]interface{}{}\n\t\t\t\t\tdata[\"roomName\"] = rn\n\t\t\t\t\tr := Response{\n\t\t\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\t\t\tKind: groupAssignment,\n\t\t\t\t\t\tData: data,\n\t\t\t\t\t}\n\t\t\t\t\tdata[\"turnNumber\"] = i\n\t\t\t\t\tp.Comm.Emit(groupAssignment, r)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Create the response we're going to send\n\t\t\tdata := map[string]interface{}{}\n\t\t\tr := Response{\n\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\tKind: inQueue,\n\t\t\t\tData: data,\n\t\t\t}\n\t\t\tdata[\"message\"] = \"Already in queue\"\n\t\t\tso.Emit(clientError, r)\n\t\t}\n\t\t\/\/ Then attempt to form a group based off of this.\n\n\t} else {\n\t\tlog.Println(\"Invalid GameId from\", so.Id())\n\t\tso.Emit(clientError, errorResponse(clientError, \"Invalid GameID\"))\n\t}\n}\n\n\/\/ StartServer ...\nfunc StartServer(c *cli.Context) {\n\tgames, err := ReadGameFiles(\".\/games\")\n\tfor key, game := range games {\n\t\tlog.Println(\"Loaded:\", key, \"from\", game.FileName)\n\t}\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tserver, err := socketio.NewServer(nil)\n\ts := crossOriginServer{\n\t\tServer: server,\n\t}\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tinfo := GamesInfo{\n\t\tRoomMap: utils.NewConcurrentStringMap(),\n\t\tTurnMap: utils.NewConcurrentStringIntMap(),\n\t}\n\n\tserver.On(\"connection\", func(so socketio.Socket) {\n\t\tlog.Println(\"Connection from\", so.Id())\n\n\t\t\/\/ Makes it so that the player joins a room with his\/her unique id.\n\t\tso.Join(so.Id())\n\t\tso.On(joinGame, func(r json.RawMessage) {\n\t\t\thandlePlayerJoin(so, r, games, info)\n\t\t})\n\n\t\tso.On(\"disconnection\", func() {\n\t\t\t\/\/ This is really really bad unfuture proof, slow code.\n\t\t\t\/\/ Please Refactor me\n\t\t\tfor key := range games {\n\t\t\t\tg := games[key]\n\t\t\t\tg.Lobby.Remove(so.Id())\n\t\t\t}\n\t\t})\n\t\tso.On(makeMove, func(move json.RawMessage) {\n\t\t\troom, exists := info.RoomMap.Get(so.Id())\n\t\t\tlog.Println(string(move))\n\t\t\tif exists {\n\t\t\t\tm := map[string]interface{}{}\n\t\t\t\tif err := json.Unmarshal(move, &m); err != nil {\n\t\t\t\t\tlog.Println(\"Invalid JSON from\", so.Id(), string(move))\n\t\t\t\t\tso.Emit(clientError, errorResponse(clientError, \"Invalid JSON\"))\n\t\t\t\t}\n\t\t\t\tturn, exists := info.TurnMap.Get(room + \":\" + so.Id())\n\t\t\t\tif !exists {\n\t\t\t\t\tlog.Println(\"No turn assigned\", so.Id())\n\t\t\t\t\tso.Emit(serverError, errorResponse(serverError, \"No turn assigned\"))\n\t\t\t\t}\n\t\t\t\t\/\/ Overwrites who's turn it is using the turn map assigned at join.\n\t\t\t\tm[\"madeBy\"] = turn\n\t\t\t\tm[\"madeById\"] = so.Id()\n\t\t\t\tr := Response{\n\t\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\t\tKind: moveMade,\n\t\t\t\t\tData: m,\n\t\t\t\t}\n\t\t\t\tlog.Println(r)\n\t\t\t\tso.BroadcastTo(room, moveMade, r)\n\t\t\t} else {\n\t\t\t\tlog.Println(\"No room assigned for\", so.Id())\n\t\t\t\tso.Emit(serverError, errorResponse(serverError, \"Not in any Room\"))\n\t\t\t}\n\t\t})\n\t})\n\n\tport := c.String(\"port\")\n\n\thttp.Handle(\"\/socket.io\/\", s)\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(\".\/asset\")))\n\tlog.Println(\"Serving at localhost:\" + port)\n\tlog.Fatal(http.ListenAndServe(\":\"+port, nil))\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"Toto\"\n\tapp.Usage = \"a server for creating quick prototype websocket based games.\"\n\tapp.Action = StartServer\n\tapp.Version = Version\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"port, p\",\n\t\t\tValue: \"3000\",\n\t\t\tUsage: \"The port to run the server on\",\n\t\t},\n\t}\n\tapp.Run(os.Args)\n}\n<commit_msg>:hocho: Moved timestamp to own function<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/googollee\/go-socket.io\"\n\n\t\"github.com\/tiltfactor\/toto\/domain\"\n\t\"github.com\/tiltfactor\/toto\/utils\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/jesusrmoreno\/sad-squid\"\n\n\t\"github.com\/codegangsta\/cli\"\n)\n\n\/\/ Version ...\nvar Version = \"1.3.2\"\n\n\/\/ Events that are exposed to the client\nconst (\n\tgroupAssignment = \"group-assignment\"\n\troomMessage = \"room-message\"\n\tleaveGroup = \"leave-group\"\n\tjoinGroup = \"join-group\"\n\tjoinGame = \"join-game\"\n\tgetPeers = \"get-peers\"\n\tmakeMove = \"make-move\"\n\tmoveMade = \"move-made\"\n\tinQueue = \"in-queue\"\n\n\tserverError = \"server-error\"\n\tclientError = \"client-error\"\n)\n\n\/\/ Response ...\ntype Response struct {\n\tTimestamp int64 `json:\"timeStamp\"`\n\tKind string `json:\"kind\"`\n\tData map[string]interface{} `json:\"data\"`\n}\n\n\/\/ timestamp returns the current timestamp\nfunc timestamp() int64 {\n\treturn time.Now().UnixNano()\n}\n\n\/\/ Method for creating errors more quickly.\nfunc errorResponse(kind, err string) Response {\n\td := map[string]interface{}{}\n\td[\"error\"] = err\n\tr := Response{\n\t\tTimestamp: timestamp(),\n\t\tKind: kind,\n\t\tData: d,\n\t}\n\treturn r\n}\n\n\/\/ GamesInfo serves to store the metadata for different games\ntype GamesInfo struct {\n\t\/\/ These must be thread safe so we use the ConcurrentMap types\n\tTurnMap *utils.ConcurrentStringIntMap\n\t\/\/ Maps the player id to the room\n\tRoomMap *utils.ConcurrentStringMap\n}\n\n\/\/ ReadGameFiles reads the provided directory for files that conform to the\n\/\/ game struct definition, these must be json files, and loads them into our\n\/\/ game map.\nfunc ReadGameFiles(gameDir string) (domain.GameMap, error) {\n\tfiles, err := filepath.Glob(gameDir + \"\/*.toml\")\n\tgm := domain.GameMap{}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, f := range files {\n\t\traw, err := os.Open(f)\n\t\tdefer raw.Close()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr := io.Reader(raw)\n\t\tdummy := domain.Game{}\n\t\tif meta, err := toml.DecodeReader(r, &dummy); err != nil {\n\t\t\tlog.Println(meta)\n\t\t\treturn nil, errors.New(\"Invalid configuration in file: \" + f)\n\t\t}\n\t\tg := domain.Game{\n\t\t\tPlayers: dummy.Players,\n\t\t\tTitle: dummy.Title,\n\t\t\tUUID: dummy.UUID,\n\t\t\tLobby: domain.NewLobby(),\n\t\t}\n\t\tg.FileName = f\n\t\tif _, exists := gm[g.UUID]; exists {\n\t\t\treturn nil, errors.New(\"uniqueKey conflict between: \" + f + \" and \" +\n\t\t\t\tgm[g.UUID].FileName)\n\t\t}\n\t\tgm[g.UUID] = g\n\t}\n\treturn gm, nil\n}\n\n\/\/ turnKey returns the generated key for storing turns\nfunc turnKey(playerID, roomName string) string {\n\treturn roomName + \":\" + playerID\n}\n\n\/\/ QueuePlayers adds players to the game's lobby to wait for a partner.\n\/\/ Players are queued on a first come first serve basis.\nfunc QueuePlayers(g domain.Game, p domain.Player) bool {\n\tpq := g.Lobby\n\tif !pq.Contains(p.Comm.Id()) {\n\t\tpq.AddToQueue(p)\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ GroupPlayers attempts to creates groups of players of the size defined in the\n\/\/ game files. It also sets the player turns.\n\/\/ It returns the name of the room and true if it succeeded or\n\/\/ an empty string and false if it did not.\nfunc GroupPlayers(g domain.Game, gi *GamesInfo) (string, []domain.Player) {\n\tlog.Println(\"Attempting to group players for game\", g.UUID)\n\tpq := g.Lobby\n\tneeded := g.Players\n\tavailable := pq.Size()\n\tif available >= needed {\n\t\tteam := []domain.Player{}\n\t\troomName := squid.GenerateSimpleID()\n\t\tfor i := 0; i < needed; i++ {\n\t\t\tp := pq.PopFromQueue()\n\t\t\tteam = append(team, p)\n\n\t\t\t\/\/ Place the player in the created room.\n\t\t\tp.Comm.Join(roomName)\n\n\t\t\tplayerID := p.Comm.Id()\n\t\t\tgi.RoomMap.Set(playerID, roomName)\n\n\t\t\t\/\/ We generate a turn key composed of the room name and player id to store\n\t\t\t\/\/ the turn. turns are assigned based off of how they are popped from the\n\t\t\t\/\/ queue.\n\t\t\ttk := turnKey(playerID, roomName)\n\t\t\tgi.TurnMap.Set(tk, i)\n\t\t}\n\t\treturn roomName, team\n\t}\n\treturn \"\", nil\n}\n\n\/\/ Cross origin server is used to add cross-origin request capabilities to the\n\/\/ socket server. It wraps the socketio.Server\ntype crossOriginServer struct {\n\tServer *socketio.Server\n}\n\n\/\/ ServeHTTP is implemented to add the needed header for CORS in socketio.\n\/\/ This must be named ServeHTTP and take the\n\/\/ (http.ResponseWriter, r *http.Request) to satisfy the http.Handler interface\nfunc (s crossOriginServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\torigin := r.Header.Get(\"Origin\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\ts.Server.ServeHTTP(w, r)\n}\n\nfunc handlePlayerJoin(so socketio.Socket, req json.RawMessage,\n\tgames domain.GameMap, info GamesInfo) {\n\tm := map[string]string{}\n\tif err := json.Unmarshal(req, &m); err != nil {\n\t\tlog.Println(\"Invalid JSON from\", so.Id())\n\t\tso.Emit(clientError, errorResponse(clientError, \"Invalid JSON\"))\n\t}\n\tgameID, exists := m[\"gameId\"]\n\tif !exists {\n\t\tlog.Println(\"No game included from\", so.Id())\n\t\tso.Emit(clientError, errorResponse(clientError, \"Must include GameID\"))\n\t}\n\n\tlog.Println(so.Id(), \"attempting to join game\", gameID)\n\t\/\/ If the player attempts to connect to a game we first have to make\n\t\/\/ sure that they are joining a game that is registered with our server.\n\tif g, exists := games[gameID]; exists {\n\t\t\/\/ First queue the player\n\t\tnewPlayer := domain.Player{\n\t\t\tComm: so,\n\t\t}\n\t\tif didQueue := QueuePlayers(g, newPlayer); didQueue {\n\t\t\t\/\/ Create the response we're going to send\n\t\t\tdata := map[string]interface{}{}\n\t\t\tr := Response{\n\t\t\t\tTimestamp: timestamp(),\n\t\t\t\tKind: inQueue,\n\t\t\t\tData: data,\n\t\t\t}\n\t\t\tdata[\"message\"] = \"You are in the queue for game: \" + g.Title\n\t\t\tso.Emit(inQueue, r)\n\t\t\tif rn, group := GroupPlayers(g, &info); group != nil && rn != \"\" {\n\t\t\t\t\/\/ Tell each member what their room name is as well as their turn\n\t\t\t\tfor i, p := range group {\n\t\t\t\t\tdata := map[string]interface{}{}\n\t\t\t\t\tdata[\"roomName\"] = rn\n\t\t\t\t\tr := Response{\n\t\t\t\t\t\tTimestamp: timestamp(),\n\t\t\t\t\t\tKind: groupAssignment,\n\t\t\t\t\t\tData: data,\n\t\t\t\t\t}\n\t\t\t\t\tdata[\"turnNumber\"] = i\n\t\t\t\t\tp.Comm.Emit(groupAssignment, r)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Create the response we're going to send\n\t\t\tdata := map[string]interface{}{}\n\t\t\tr := Response{\n\t\t\t\tTimestamp: timestamp(),\n\t\t\t\tKind: inQueue,\n\t\t\t\tData: data,\n\t\t\t}\n\t\t\tdata[\"message\"] = \"Already in queue\"\n\t\t\tso.Emit(clientError, r)\n\t\t}\n\t\t\/\/ Then attempt to form a group based off of this.\n\n\t} else {\n\t\tlog.Println(\"Invalid GameId from\", so.Id())\n\t\tso.Emit(clientError, errorResponse(clientError, \"Invalid GameID\"))\n\t}\n}\n\n\/\/ StartServer ...\nfunc StartServer(c *cli.Context) {\n\tgames, err := ReadGameFiles(\".\/games\")\n\tfor key, game := range games {\n\t\tlog.Println(\"Loaded:\", key, \"from\", game.FileName)\n\t}\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tserver, err := socketio.NewServer(nil)\n\ts := crossOriginServer{\n\t\tServer: server,\n\t}\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tinfo := GamesInfo{\n\t\tRoomMap: utils.NewConcurrentStringMap(),\n\t\tTurnMap: utils.NewConcurrentStringIntMap(),\n\t}\n\n\tserver.On(\"connection\", func(so socketio.Socket) {\n\t\tlog.Println(\"Connection from\", so.Id())\n\n\t\t\/\/ Makes it so that the player joins a room with his\/her unique id.\n\t\tso.Join(so.Id())\n\t\tso.On(joinGame, func(r json.RawMessage) {\n\t\t\thandlePlayerJoin(so, r, games, info)\n\t\t})\n\n\t\tso.On(\"disconnection\", func() {\n\t\t\t\/\/ This is really really bad unfuture proof, slow code.\n\t\t\t\/\/ Please Refactor me\n\t\t\tfor key := range games {\n\t\t\t\tg := games[key]\n\t\t\t\tg.Lobby.Remove(so.Id())\n\t\t\t}\n\t\t})\n\t\tso.On(makeMove, func(move json.RawMessage) {\n\t\t\troom, exists := info.RoomMap.Get(so.Id())\n\t\t\tlog.Println(string(move))\n\t\t\tif exists {\n\t\t\t\tm := map[string]interface{}{}\n\t\t\t\tif err := json.Unmarshal(move, &m); err != nil {\n\t\t\t\t\tlog.Println(\"Invalid JSON from\", so.Id(), string(move))\n\t\t\t\t\tso.Emit(clientError, errorResponse(clientError, \"Invalid JSON\"))\n\t\t\t\t}\n\t\t\t\tturn, exists := info.TurnMap.Get(room + \":\" + so.Id())\n\t\t\t\tif !exists {\n\t\t\t\t\tlog.Println(\"No turn assigned\", so.Id())\n\t\t\t\t\tso.Emit(serverError, errorResponse(serverError, \"No turn assigned\"))\n\t\t\t\t}\n\t\t\t\t\/\/ Overwrites who's turn it is using the turn map assigned at join.\n\t\t\t\tm[\"madeBy\"] = turn\n\t\t\t\tm[\"madeById\"] = so.Id()\n\t\t\t\tr := Response{\n\t\t\t\t\tTimestamp: timestamp(),\n\t\t\t\t\tKind: moveMade,\n\t\t\t\t\tData: m,\n\t\t\t\t}\n\t\t\t\tlog.Println(r)\n\t\t\t\tso.BroadcastTo(room, moveMade, r)\n\t\t\t} else {\n\t\t\t\tlog.Println(\"No room assigned for\", so.Id())\n\t\t\t\tso.Emit(serverError, errorResponse(serverError, \"Not in any Room\"))\n\t\t\t}\n\t\t})\n\t})\n\n\tport := c.String(\"port\")\n\n\thttp.Handle(\"\/socket.io\/\", s)\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(\".\/asset\")))\n\tlog.Println(\"Serving at localhost:\" + port)\n\tlog.Fatal(http.ListenAndServe(\":\"+port, nil))\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"Toto\"\n\tapp.Usage = \"a server for creating quick prototype websocket based games.\"\n\tapp.Action = StartServer\n\tapp.Version = Version\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"port, p\",\n\t\t\tValue: \"3000\",\n\t\t\tUsage: \"The port to run the server on\",\n\t\t},\n\t}\n\tapp.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\tdir = flag.String(\"d\", \"budget\", \"budget data directory\")\n\tmonth = flag.String(\"m\", \"\", \"year\/month in format YYYYMM\")\n)\n\n\/\/ Transaction is a single transaction with a cost and name\ntype Transaction struct {\n\tCost float64\n\tName string\n}\n\n\/\/ Budget is a monthly budget\ntype Budget struct {\n\tTotal float64\n\tRemaining float64\n\tTransactions []Transaction\n}\n\nfunc parseFile(name string) (Budget, error) {\n\tfile, err := os.Open(name)\n\tif err != nil {\n\t\treturn Budget{}, err\n\t}\n\tr := bufio.NewReader(file)\n\tb := Budget{}\n\t\/\/ first line is the total for the month\n\tline, _, err := r.ReadLine()\n\tif err != nil {\n\t\treturn Budget{}, err\n\t}\n\ttotal, err := strconv.ParseFloat(string(line), 64)\n\tif err != nil {\n\t\treturn Budget{}, err\n\t}\n\tb.Total = total\n\tfor {\n\t\tline, _, err := r.ReadLine()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn Budget{}, err\n\t\t}\n\t\tt := strings.Split(string(line), \",\")\n\t\tif len(t) != 2 {\n\t\t\treturn Budget{}, fmt.Errorf(\"invalid line %q\", line)\n\t\t}\n\t\tcost, err := strconv.ParseFloat(t[0], 64)\n\t\tif err != nil {\n\t\t\treturn Budget{}, err\n\t\t}\n\t\ttrans := Transaction{Cost: cost, Name: t[1]}\n\n\t\tb.Transactions = append(b.Transactions, trans)\n\t}\n\n\tb.Remaining = b.Total\n\tfor _, trans := range b.Transactions {\n\t\tb.Remaining = b.Remaining + trans.Cost\n\t}\n\treturn b, nil\n}\n\n\/\/ Pair is a data structure to hold a key\/value pair.\n\/\/ modified from https:\/\/groups.google.com\/forum\/#!topic\/golang-nuts\/FT7cjmcL7gw\ntype Pair struct {\n\tKey string\n\tValue float64\n}\n\n\/\/ PairList is a slice of Pairs that implements sort.Interface to sort by Value.\ntype PairList []Pair\n\nfunc (p PairList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }\nfunc (p PairList) Len() int { return len(p) }\nfunc (p PairList) Less(i, j int) bool { return p[i].Value < p[j].Value }\n\n\/\/ A function to turn a map into a PairList, then sort and return it.\nfunc sortMapByValue(m map[string]float64) PairList {\n\tp := make(PairList, len(m))\n\ti := 0\n\tfor k, v := range m {\n\t\tp[i] = Pair{k, v}\n\t\ti++\n\t}\n\tsort.Sort(p)\n\treturn p\n}\n\nfunc main() {\n\tflag.Parse()\n\tb, err := parseFile(fmt.Sprintf(\"%s\/%s.txt\", *dir, *month))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\ttop := map[string]float64{}\n\tfmt.Println(\"Total:\", b.Total)\n\tfmt.Println(\"Remaining:\", b.Remaining)\n\tfor _, t := range b.Transactions {\n\t\ttop[t.Name] += t.Cost\n\t}\n\tfmt.Println(\"Top costs:\")\n\tpl := sortMapByValue(top)\n\tfor _, p := range pl {\n\t\tfmt.Printf(\"%s:%.2f\\n\", p.Key, p.Value)\n\t}\n}\n<commit_msg>add a tab<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\tdir = flag.String(\"d\", \"budget\", \"budget data directory\")\n\tmonth = flag.String(\"m\", \"\", \"year\/month in format YYYYMM\")\n)\n\n\/\/ Transaction is a single transaction with a cost and name\ntype Transaction struct {\n\tCost float64\n\tName string\n}\n\n\/\/ Budget is a monthly budget\ntype Budget struct {\n\tTotal float64\n\tRemaining float64\n\tTransactions []Transaction\n}\n\nfunc parseFile(name string) (Budget, error) {\n\tfile, err := os.Open(name)\n\tif err != nil {\n\t\treturn Budget{}, err\n\t}\n\tr := bufio.NewReader(file)\n\tb := Budget{}\n\t\/\/ first line is the total for the month\n\tline, _, err := r.ReadLine()\n\tif err != nil {\n\t\treturn Budget{}, err\n\t}\n\ttotal, err := strconv.ParseFloat(string(line), 64)\n\tif err != nil {\n\t\treturn Budget{}, err\n\t}\n\tb.Total = total\n\tfor {\n\t\tline, _, err := r.ReadLine()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn Budget{}, err\n\t\t}\n\t\tt := strings.Split(string(line), \",\")\n\t\tif len(t) != 2 {\n\t\t\treturn Budget{}, fmt.Errorf(\"invalid line %q\", line)\n\t\t}\n\t\tcost, err := strconv.ParseFloat(t[0], 64)\n\t\tif err != nil {\n\t\t\treturn Budget{}, err\n\t\t}\n\t\ttrans := Transaction{Cost: cost, Name: t[1]}\n\n\t\tb.Transactions = append(b.Transactions, trans)\n\t}\n\n\tb.Remaining = b.Total\n\tfor _, trans := range b.Transactions {\n\t\tb.Remaining = b.Remaining + trans.Cost\n\t}\n\treturn b, nil\n}\n\n\/\/ Pair is a data structure to hold a key\/value pair.\n\/\/ modified from https:\/\/groups.google.com\/forum\/#!topic\/golang-nuts\/FT7cjmcL7gw\ntype Pair struct {\n\tKey string\n\tValue float64\n}\n\n\/\/ PairList is a slice of Pairs that implements sort.Interface to sort by Value.\ntype PairList []Pair\n\nfunc (p PairList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }\nfunc (p PairList) Len() int { return len(p) }\nfunc (p PairList) Less(i, j int) bool { return p[i].Value < p[j].Value }\n\n\/\/ A function to turn a map into a PairList, then sort and return it.\nfunc sortMapByValue(m map[string]float64) PairList {\n\tp := make(PairList, len(m))\n\ti := 0\n\tfor k, v := range m {\n\t\tp[i] = Pair{k, v}\n\t\ti++\n\t}\n\tsort.Sort(p)\n\treturn p\n}\n\nfunc main() {\n\tflag.Parse()\n\tb, err := parseFile(fmt.Sprintf(\"%s\/%s.txt\", *dir, *month))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\ttop := map[string]float64{}\n\tfmt.Println(\"Total:\", b.Total)\n\tfmt.Println(\"Remaining:\", b.Remaining)\n\tfor _, t := range b.Transactions {\n\t\ttop[t.Name] += t.Cost\n\t}\n\tfmt.Println(\"Top costs:\")\n\tpl := sortMapByValue(top)\n\tfor _, p := range pl {\n\t\tfmt.Printf(\"\\t%s:%.2f\\n\", p.Key, p.Value)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n\t\"io\/ioutil\"\n\t\"encoding\/json\"\n\t\"github.com\/rnubel\/pgmgr\/pgmgr\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"fmt\"\n)\n\nfunc displayErrorOrMessage(err error, args... interface{}) {\n\tif err != nil {\n\t\tfmt.Println(os.Stderr, \"Error: \", err)\n\t\tos.Exit(1)\n\t} else {\n\t\tfmt.Println(args...)\n\t}\n}\n\nfunc main() {\n\tconfig := &pgmgr.Config{}\n\tapp := cli.NewApp()\n\n\tapp.Name = \"pgmgr\"\n\tapp.Usage = \"manage your app's Postgres database\"\n\tapp.Version = \"0.0.1\"\n\n\ts := make([]string, 0)\n\n\tapp.Flags = []cli.Flag {\n\t\tcli.StringFlag{\n\t\t\tName: \"config-file, c\",\n\t\t\tValue: \".pgmgr.json\",\n\t\t\tUsage: \"set the path to the JSON configuration file specifying your DB parameters\",\n\t\t\tEnvVar: \"PGMGR_CONFIG_FILE\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"database, d\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"the database name which pgmgr will connect to or try to create\",\n\t\t\tEnvVar: \"PGMGR_DATABASE\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"username, u\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"the username which pgmgr will connect with\",\n\t\t\tEnvVar: \"PGMGR_USERNAME\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"password, P\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"the password which pgmgr will connect with\",\n\t\t\tEnvVar: \"PGMGR_PASSWORD\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"host, H\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"the host which pgmgr will connect to\",\n\t\t\tEnvVar: \"PGMGR_HOST\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"port, p\",\n\t\t\tValue: 0,\n\t\t\tUsage: \"the port which pgmgr will connect to\",\n\t\t\tEnvVar: \"PGMGR_PORT\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"url\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"connection URL or DSN containing connection info; will override the other params if given\",\n\t\t\tEnvVar: \"PGMGR_URL\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dump-file\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"where to dump or load the database structure and contents to or from\",\n\t\t\tEnvVar: \"PGMGR_DUMP_FILE\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"migration-folder\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"folder containing the migrations to apply\",\n\t\t\tEnvVar: \"PGMGR_MIGRATION_FOLDER\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"seed-tables\",\n\t\t\tValue: (*cli.StringSlice)(&s),\n\t\t\tUsage: \"list of tables (or globs matching table names) to dump the data of\",\n\t\t\tEnvVar: \"PGMGR_SEED_TABLES\",\n\t\t},\n\t}\n\n\tapp.Before = func(c *cli.Context) error {\n\t\t\/\/ load configuration from file first; then override with\n\t\t\/\/ flags or env vars if they're present.\n\t\tconfigFile := c.String(\"config-file\")\n\t\tcontents, err := ioutil.ReadFile(configFile)\n\t\tif err == nil {\n\t\t\tjson.Unmarshal(contents, &config)\n\t\t} else {\n\t\t\tfmt.Println(\"error reading config file: \", err)\n\t\t}\n\n\t\t\/\/ apply some defaults\n\t\tif config.Port == 0 {\n\t\t\tconfig.Port = 5432\n\t\t}\n\n\t\tif config.Host == \"\" {\n\t\t\tconfig.Host = \"localhost\"\n\t\t}\n\n\t\t\/\/ override if passed-in\n\t\tif c.String(\"username\") != \"\" {\n\t\t\tconfig.Username = c.String(\"username\")\n\t\t}\n\t\tif c.String(\"password\") != \"\" {\n\t\t\tconfig.Password = c.String(\"password\")\n\t\t}\n\t\tif c.String(\"database\") != \"\" {\n\t\t\tconfig.Database = c.String(\"database\")\n\t\t}\n\t\tif c.String(\"host\") != \"\" {\n\t\t\tconfig.Host = c.String(\"host\")\n\t\t}\n\t\tif c.Int(\"port\") != 0 {\n\t\t\tconfig.Port = c.Int(\"port\")\n\t\t}\n\t\tif c.String(\"url\") != \"\" {\n\t\t\tconfig.Url = c.String(\"url\")\n\t\t}\n\n\t\tif config.Url != \"\" { \/\/ TODO: move this into pgmgr, probably?\n\t\t\t\/\/ parse the DSN and populate the other configuration values. Some of the pg commands\n\t\t\t\/\/ accept a DSN parameter, but not all, so this will help unify things.\n\t\t\tr := regexp.MustCompile(\"^postgres:\/\/(.*)@(.*):([0-9]+)\/([a-zA-Z0-9_-]+)\")\n\t\t\tm := r.FindStringSubmatch(config.Url)\n\t\t\tif len(m) > 0 {\n\t\t\t\tconfig.Username = m[1]\n\t\t\t\tconfig.Host = m[2]\n\t\t\t\tconfig.Port, _ = strconv.Atoi(m[3])\n\t\t\t\tconfig.Database = m[4]\n\t\t\t} else {\n\t\t\t println(\"Could not parse DSN: \", config.Url, \" using regex \", r.String())\n\t\t\t}\n\t\t}\n\n\t\tif c.String(\"dump-file\") != \"\" {\n\t\t\tconfig.DumpFile = c.String(\"dump-file\")\n\t\t}\n\t\tif c.String(\"migration-folder\") != \"\" {\n\t\t\tconfig.MigrationFolder = c.String(\"migration-folder\")\n\t\t}\n\t\tif c.StringSlice(\"seed-tables\") != nil && len(c.StringSlice(\"seed-tables\")) > 0 {\n\t\t\tconfig.SeedTables = c.StringSlice(\"seed-tables\")\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"migration\",\n\t\t\tUsage: \"generates a new migration with the given name\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tif len(c.Args()) == 0 {\n\t\t\t\t\tprintln(\"migration name not given! try `pgmgr migration NameGoesHere`\")\n\t\t\t\t} else {\n\t\t\t\t\tpgmgr.CreateMigration(config, c.Args()[0])\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"config\",\n\t\t\tUsage: \"displays the current configuration as seen by pgmgr\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfmt.Printf(\"%+v\\n\", config)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"db\",\n\t\t\tUsage: \"manage your database. use 'pgmgr db help' for more info\",\n\t\t\tSubcommands: []cli.Command{\n\t\t\t\t{\n\t\t\t\t\tName: \"create\",\n\t\t\t\t\tUsage: \"creates the database if it doesn't exist\",\n\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\tdisplayErrorOrMessage(pgmgr.Create(config), \"Database\", config.Database, \"created successfully.\")\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"drop\",\n\t\t\t\t\tUsage: \"drops the database (all sessions must be disconnected first. this command does not force it)\",\n\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\tdisplayErrorOrMessage(pgmgr.Drop(config), \"Database\", config.Database, \"dropped successfully.\")\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"dump\",\n\t\t\t\t\tUsage: \"dumps the database schema and contents to the dump file (see --dump-file)\",\n\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\terr := pgmgr.Dump(config)\n\t\t\t\t\t\tdisplayErrorOrMessage(err, \"Database dumped to\", config.DumpFile, \"successfully\")\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"load\",\n\t\t\t\t\tUsage: \"loads the database schema and contents from the dump file (see --dump-file)\",\n\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\terr := pgmgr.Load(config)\n\t\t\t\t\t\tdisplayErrorOrMessage(err, \"Database loaded successfully.\")\n\n\t\t\t\t\t\tv, err := pgmgr.Version(config)\n\t\t\t\t\t\tdisplayErrorOrMessage(err, \"Latest migration version: \", v)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"version\",\n\t\t\t\t\tUsage: \"returns the current schema version\",\n\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\tv, err := pgmgr.Version(config)\n\t\t\t\t\t\tdisplayErrorOrMessage(err, \"Latest migration version: \", v)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"migrate\",\n\t\t\t\t\tUsage: \"applies any un-applied migrations in the migration folder (see --migration-folder)\",\n\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\terr := pgmgr.Migrate(config)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tfmt.Fprintln(os.Stderr, \"Error during migration:\", err)\n\t\t\t\t\t\t\tos.Exit(1)\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"rollback\",\n\t\t\t\t\tUsage: \"rolls back the latest migration\",\n\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\tpgmgr.Rollback(config)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tapp.Action = func(c *cli.Context) {\n\t\tapp.Command(\"help\").Run(c)\n\t}\n\n\tapp.Run(os.Args)\n}\n<commit_msg>Fix how some errors display to be more useful.<commit_after>package main\n\nimport (\n\t\"os\"\n\t\"io\/ioutil\"\n\t\"encoding\/json\"\n\t\"github.com\/rnubel\/pgmgr\/pgmgr\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"fmt\"\n)\n\nfunc displayErrorOrMessage(err error, args... interface{}) {\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Error: \", err)\n\t\tos.Exit(1)\n\t} else {\n\t\tfmt.Println(args...)\n\t}\n}\n\nfunc displayVersion(config *pgmgr.Config) {\n\tv, err := pgmgr.Version(config)\n\tif v < 0 {\n\t\tdisplayErrorOrMessage(err, \"Database has no schema_migrations table; run `pgmgr db migrate` to create it.\")\n\t} else {\n\t\tdisplayErrorOrMessage(err, \"Latest migration version:\", v)\n\t}\n}\n\nfunc main() {\n\tconfig := &pgmgr.Config{}\n\tapp := cli.NewApp()\n\n\tapp.Name = \"pgmgr\"\n\tapp.Usage = \"manage your app's Postgres database\"\n\tapp.Version = \"0.0.1\"\n\n\ts := make([]string, 0)\n\n\tapp.Flags = []cli.Flag {\n\t\tcli.StringFlag{\n\t\t\tName: \"config-file, c\",\n\t\t\tValue: \".pgmgr.json\",\n\t\t\tUsage: \"set the path to the JSON configuration file specifying your DB parameters\",\n\t\t\tEnvVar: \"PGMGR_CONFIG_FILE\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"database, d\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"the database name which pgmgr will connect to or try to create\",\n\t\t\tEnvVar: \"PGMGR_DATABASE\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"username, u\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"the username which pgmgr will connect with\",\n\t\t\tEnvVar: \"PGMGR_USERNAME\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"password, P\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"the password which pgmgr will connect with\",\n\t\t\tEnvVar: \"PGMGR_PASSWORD\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"host, H\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"the host which pgmgr will connect to\",\n\t\t\tEnvVar: \"PGMGR_HOST\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"port, p\",\n\t\t\tValue: 0,\n\t\t\tUsage: \"the port which pgmgr will connect to\",\n\t\t\tEnvVar: \"PGMGR_PORT\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"url\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"connection URL or DSN containing connection info; will override the other params if given\",\n\t\t\tEnvVar: \"PGMGR_URL\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dump-file\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"where to dump or load the database structure and contents to or from\",\n\t\t\tEnvVar: \"PGMGR_DUMP_FILE\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"migration-folder\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"folder containing the migrations to apply\",\n\t\t\tEnvVar: \"PGMGR_MIGRATION_FOLDER\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"seed-tables\",\n\t\t\tValue: (*cli.StringSlice)(&s),\n\t\t\tUsage: \"list of tables (or globs matching table names) to dump the data of\",\n\t\t\tEnvVar: \"PGMGR_SEED_TABLES\",\n\t\t},\n\t}\n\n\tapp.Before = func(c *cli.Context) error {\n\t\t\/\/ load configuration from file first; then override with\n\t\t\/\/ flags or env vars if they're present.\n\t\tconfigFile := c.String(\"config-file\")\n\t\tcontents, err := ioutil.ReadFile(configFile)\n\t\tif err == nil {\n\t\t\tjson.Unmarshal(contents, &config)\n\t\t} else {\n\t\t\tfmt.Println(\"error reading config file: \", err)\n\t\t}\n\n\t\t\/\/ apply some defaults\n\t\tif config.Port == 0 {\n\t\t\tconfig.Port = 5432\n\t\t}\n\n\t\tif config.Host == \"\" {\n\t\t\tconfig.Host = \"localhost\"\n\t\t}\n\n\t\t\/\/ override if passed-in\n\t\tif c.String(\"username\") != \"\" {\n\t\t\tconfig.Username = c.String(\"username\")\n\t\t}\n\t\tif c.String(\"password\") != \"\" {\n\t\t\tconfig.Password = c.String(\"password\")\n\t\t}\n\t\tif c.String(\"database\") != \"\" {\n\t\t\tconfig.Database = c.String(\"database\")\n\t\t}\n\t\tif c.String(\"host\") != \"\" {\n\t\t\tconfig.Host = c.String(\"host\")\n\t\t}\n\t\tif c.Int(\"port\") != 0 {\n\t\t\tconfig.Port = c.Int(\"port\")\n\t\t}\n\t\tif c.String(\"url\") != \"\" {\n\t\t\tconfig.Url = c.String(\"url\")\n\t\t}\n\n\t\tif config.Url != \"\" { \/\/ TODO: move this into pgmgr, probably?\n\t\t\t\/\/ parse the DSN and populate the other configuration values. Some of the pg commands\n\t\t\t\/\/ accept a DSN parameter, but not all, so this will help unify things.\n\t\t\tr := regexp.MustCompile(\"^postgres:\/\/(.*)@(.*):([0-9]+)\/([a-zA-Z0-9_-]+)\")\n\t\t\tm := r.FindStringSubmatch(config.Url)\n\t\t\tif len(m) > 0 {\n\t\t\t\tconfig.Username = m[1]\n\t\t\t\tconfig.Host = m[2]\n\t\t\t\tconfig.Port, _ = strconv.Atoi(m[3])\n\t\t\t\tconfig.Database = m[4]\n\t\t\t} else {\n\t\t\t println(\"Could not parse DSN: \", config.Url, \" using regex \", r.String())\n\t\t\t}\n\t\t}\n\n\t\tif c.String(\"dump-file\") != \"\" {\n\t\t\tconfig.DumpFile = c.String(\"dump-file\")\n\t\t}\n\t\tif c.String(\"migration-folder\") != \"\" {\n\t\t\tconfig.MigrationFolder = c.String(\"migration-folder\")\n\t\t}\n\t\tif c.StringSlice(\"seed-tables\") != nil && len(c.StringSlice(\"seed-tables\")) > 0 {\n\t\t\tconfig.SeedTables = c.StringSlice(\"seed-tables\")\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"migration\",\n\t\t\tUsage: \"generates a new migration with the given name\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tif len(c.Args()) == 0 {\n\t\t\t\t\tprintln(\"migration name not given! try `pgmgr migration NameGoesHere`\")\n\t\t\t\t} else {\n\t\t\t\t\tpgmgr.CreateMigration(config, c.Args()[0])\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"config\",\n\t\t\tUsage: \"displays the current configuration as seen by pgmgr\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tfmt.Printf(\"%+v\\n\", config)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"db\",\n\t\t\tUsage: \"manage your database. use 'pgmgr db help' for more info\",\n\t\t\tSubcommands: []cli.Command{\n\t\t\t\t{\n\t\t\t\t\tName: \"create\",\n\t\t\t\t\tUsage: \"creates the database if it doesn't exist\",\n\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\tdisplayErrorOrMessage(pgmgr.Create(config), \"Database\", config.Database, \"created successfully.\")\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"drop\",\n\t\t\t\t\tUsage: \"drops the database (all sessions must be disconnected first. this command does not force it)\",\n\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\tdisplayErrorOrMessage(pgmgr.Drop(config), \"Database\", config.Database, \"dropped successfully.\")\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"dump\",\n\t\t\t\t\tUsage: \"dumps the database schema and contents to the dump file (see --dump-file)\",\n\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\terr := pgmgr.Dump(config)\n\t\t\t\t\t\tdisplayErrorOrMessage(err, \"Database dumped to\", config.DumpFile, \"successfully\")\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"load\",\n\t\t\t\t\tUsage: \"loads the database schema and contents from the dump file (see --dump-file)\",\n\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\terr := pgmgr.Load(config)\n\t\t\t\t\t\tdisplayErrorOrMessage(err, \"Database loaded successfully.\")\n\t\t\t\t\t\tdisplayVersion(config)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"version\",\n\t\t\t\t\tUsage: \"returns the current schema version\",\n\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\tdisplayVersion(config)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"migrate\",\n\t\t\t\t\tUsage: \"applies any un-applied migrations in the migration folder (see --migration-folder)\",\n\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\terr := pgmgr.Migrate(config)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tfmt.Fprintln(os.Stderr, \"Error during migration:\", err)\n\t\t\t\t\t\t\tos.Exit(1)\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"rollback\",\n\t\t\t\t\tUsage: \"rolls back the latest migration\",\n\t\t\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\t\tpgmgr.Rollback(config)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tapp.Action = func(c *cli.Context) {\n\t\tapp.Command(\"help\").Run(c)\n\t}\n\n\tapp.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Use of this source code is governed by the MIT license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"github.com\/spf13\/viper\"\n)\n\nvar (\n\tversion = \"2.0.0-dev\"\n\tgitBranch = \"unknown\"\n\tgitCommit = \"unknown\"\n\tbuildDate = \"unknown\"\n)\n\nfunc main() {\n\tviper.Set(\"version\", version)\n\tviper.Set(\"gitBranch\", gitBranch)\n\tviper.Set(\"gitCommit\", gitCommit)\n\tviper.Set(\"buildDate\", buildDate)\n}\n<commit_msg>made it a runnable file<commit_after>\/\/ Use of this source code is governed by the MIT license that can be found in the LICENSE file.\n\npackage main\n\nvar ()\n\nfunc main() {\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\n\/* This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/. *\/\n\nimport (\n\t\"io\/ioutil\"\n\n\t\"code.google.com\/p\/go.net\/websocket\"\n\tflags \"github.com\/jessevdk\/go-flags\"\n\t\"github.com\/mozilla-services\/FindMyDevice\/util\"\n\t\"github.com\/mozilla-services\/FindMyDevice\/wmf\"\n\t\"github.com\/mozilla-services\/FindMyDevice\/wmf\/storage\"\n\t\/\/ Only add the following for devel.\n\t\/\/\t_ \"net\/http\/pprof\"\n\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nvar opts struct {\n\tConfigFile string `short:\"c\" long:\"config\" description:\"Configuration file\"`\n\tProfile string `long:\"profile\"`\n\tMemProfile string `long:\"memprofile\"`\n\tLogLevel int `short:\"l\" long:\"loglevel\"`\n}\n\nvar (\n\tlogger *util.HekaLogger\n\tstore *storage.Storage\n\tmetrics *util.Metrics\n)\n\nconst (\n\t\/\/ VERSION is the version number for system.\n\tVERSION = \"1.2\"\n)\n\n\/\/ get the latest version from the file, \"GITREF\"\nfunc getCodeVersionFromFile() string {\n\tvers, err := ioutil.ReadFile(\"GITREF\")\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn \"\"\n\t}\n\treturn strings.TrimSpace(string(vers))\n}\n\n\/\/ get the latest version from git.\n\/\/ If this isn't a git install, report \"Unknown\"\nfunc getCodeVersion() string {\n\tif vers := getCodeVersionFromFile(); vers != \"\" {\n\t\treturn vers\n\t}\n\tvar buffer = new(bytes.Buffer)\n\tcmd := exec.Command(\"git\", \"rev-parse\", \"HEAD\")\n\tcmd.Stdout = buffer\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Printf(\"Could not get Git Version: %s\", err.Error())\n\t\treturn \"Unknown\"\n\t}\n\treturn strings.TrimSpace(buffer.String())\n}\n\nfunc main() {\n\tflags.ParseArgs(&opts, os.Args)\n\n\t\/\/ Configuration\n\t\/\/ defaults don't appear to work.\n\tif opts.ConfigFile == \"\" {\n\t\topts.ConfigFile = \"config.ini\"\n\t}\n\tconfig, err := util.ReadMzConfig(opts.ConfigFile)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not read config file %s: %s\", opts.ConfigFile, err.Error())\n\t\treturn\n\t}\n\tfullVers := fmt.Sprintf(\"%s-%s\", config.Get(\"VERSION\", VERSION),\n\t\tgetCodeVersion())\n\tconfig.Override(\"VERSION\", fullVers)\n\tsock_secret, _ := util.GenUUID4()\n\tconfig.SetDefault(\"ws.socket_secret\", sock_secret)\n\n\t\/\/ Rest Config\n\terrChan := make(chan error)\n\thost := config.Get(\"host\", \"localhost\")\n\tport := config.Get(\"port\", \"8080\")\n\n\tif config.GetFlag(\"aws.get_hostname\") {\n\t\tif hostname, err := util.GetAWSPublicHostname(); err == nil {\n\t\t\tconfig.SetDefault(\"ws_hostname\", hostname)\n\t\t}\n\t\tif port != \"80\" {\n\t\t\tconfig.SetDefault(\"ws_hostname\", config.Get(\"ws_hostname\", \"\")+\":\"+port)\n\t\t}\n\t}\n\n\t\/\/ Partner cert pool contains the various self-signed certs that\n\t\/\/ partners may require to access their servers (for Proprietary\n\t\/\/ wake mechanisms like UDP)\n\t\/\/ This would be where you collect the certs and store them into\n\t\/\/ the config map as something like:\n\t\/\/ config[\"partnerCertPool\"] = self.loadCerts()\n\n\tif opts.Profile != \"\" {\n\t\tlog.Printf(\"Creating profile %s...\\n\", opts.Profile)\n\t\tf, err := os.Create(opts.Profile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(fmt.Sprintf(\"Profile creation failed:\\n%s\\n\",\n\t\t\t\terr.Error()))\n\t\t\treturn\n\t\t}\n\t\tdefer func() {\n\t\t\tlog.Printf(\"Writing app profile...\\n\")\n\t\t\tpprof.StopCPUProfile()\n\t\t}()\n\t\tpprof.StartCPUProfile(f)\n\t}\n\tif opts.MemProfile != \"\" {\n\t\tdefer func() {\n\t\t\tprofFile, err := os.Create(opts.MemProfile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(fmt.Sprintf(\"Memory Profile creation failed:\\n%s\\n\", err.Error()))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Printf(\"Writing memory profile...\\n\")\n\t\t\tpprof.WriteHeapProfile(profFile)\n\t\t\tprofFile.Close()\n\t\t}()\n\t}\n\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tlogger := util.NewHekaLogger(config)\n\tmetrics := util.NewMetrics(config.Get(\n\t\t\"metrics.prefix\",\n\t\t\"wmf\"), logger, config)\n\tif err != nil {\n\t\tlogger.Error(\"main\", \"Unable to connect to database. Have you configured it yet?\", nil)\n\t\treturn\n\t}\n\thandlers := wmf.NewHandler(config, logger, metrics)\n\n\t\/\/ Signal handler\n\tsigChan := make(chan os.Signal)\n\tsignal.Notify(sigChan, syscall.SIGINT, syscall.SIGHUP, syscall.SIGUSR1)\n\n\tvar RESTMux = http.DefaultServeMux\n\tvar WSMux = http.DefaultServeMux\n\tvar verRoot = strings.SplitN(VERSION, \".\", 2)[0]\n\n\t\/\/ REST calls\n\n\tRESTMux.HandleFunc(fmt.Sprintf(\"\/%s\/register\/\", verRoot),\n\t\thandlers.Register)\n\tRESTMux.HandleFunc(fmt.Sprintf(\"\/%s\/cmd\/\", verRoot),\n\t\thandlers.Cmd)\n\t\/\/ Web UI calls\n\tRESTMux.HandleFunc(fmt.Sprintf(\"\/%s\/queue\/\", verRoot),\n\t\thandlers.RestQueue)\n\tRESTMux.HandleFunc(fmt.Sprintf(\"\/%s\/state\/\", verRoot),\n\t\thandlers.State)\n\t\/\/ Static files (served by nginx in production)\n\tif config.GetFlag(\"use_insecure_static\") {\n\t\tRESTMux.HandleFunc(\"\/bower_components\/\",\n\t\t\thandlers.Static)\n\t\tRESTMux.HandleFunc(\"\/images\/\",\n\t\t\thandlers.Static)\n\t\tRESTMux.HandleFunc(\"\/scripts\/\",\n\t\t\thandlers.Static)\n\t\tRESTMux.HandleFunc(\"\/styles\/\",\n\t\t\thandlers.Static)\n\t}\n\t\/\/ Metrics\n\tRESTMux.HandleFunc(\"\/metrics\/\",\n\t\thandlers.Metrics)\n\t\/\/ Operations call\n\tRESTMux.HandleFunc(\"\/status\/\",\n\t\thandlers.Status)\n\t\/\/Signin\n\t\/\/ set state nonce & check if valid at signin\n\tRESTMux.HandleFunc(\"\/signin\/\",\n\t\thandlers.Signin)\n\t\/\/Signout\n\tRESTMux.HandleFunc(\"\/signout\/\",\n\t\thandlers.Signout)\n\t\/\/ Config option because there are other teams involved.\n\tauth := config.Get(\"fxa.redir_uri\", \"\/oauth\/\")\n\tRESTMux.HandleFunc(auth, handlers.OAuthCallback)\n\n\tWSMux.Handle(fmt.Sprintf(\"\/%s\/ws\/\", verRoot),\n\t\twebsocket.Handler(handlers.WSSocketHandler))\n\t\/\/ Handle root calls as webUI\n\t\/\/ Get a list of registered devices for the currently logged in user\n\tRESTMux.HandleFunc(fmt.Sprintf(\"\/%s\/devices\/\", verRoot),\n\t\thandlers.UserDevices)\n\t\/\/ Get an object describing the data for a user's device\n\t\/\/ e.g. http:\/\/host\/0\/data\/0123deviceid\n\tRESTMux.HandleFunc(fmt.Sprintf(\"\/%s\/data\/\", verRoot),\n\t\thandlers.InitDataJson)\n\tRESTMux.HandleFunc(fmt.Sprintf(\"\/%s\/validate\/\", verRoot),\n\t\thandlers.Validate)\n\tRESTMux.HandleFunc(\"\/\",\n\t\thandlers.Index)\n\n\tlogger.Info(\"main\", \"startup...\",\n\t\tutil.Fields{\"host\": host, \"port\": port, \"version\": fullVers})\n\n\tgo func() {\n\t\terrChan <- http.ListenAndServe(host+\":\"+port, nil)\n\t}()\n\n\tselect {\n\tcase err := <-errChan:\n\t\tif err != nil {\n\t\t\tpanic(\"ListenAndServe: \" + err.Error())\n\t\t}\n\tcase <-sigChan:\n\t\tlogger.Info(\"main\", \"Shutting down...\", nil)\n\t}\n}\n<commit_msg>Exit on critical config misses<commit_after>package main\n\n\/* This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/. *\/\n\nimport (\n\t\"io\/ioutil\"\n\n\t\"code.google.com\/p\/go.net\/websocket\"\n\tflags \"github.com\/jessevdk\/go-flags\"\n\t\"github.com\/mozilla-services\/FindMyDevice\/util\"\n\t\"github.com\/mozilla-services\/FindMyDevice\/wmf\"\n\t\"github.com\/mozilla-services\/FindMyDevice\/wmf\/storage\"\n\t\/\/ Only add the following for devel.\n\t\/\/\t_ \"net\/http\/pprof\"\n\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nvar opts struct {\n\tConfigFile string `short:\"c\" long:\"config\" description:\"Configuration file\"`\n\tProfile string `long:\"profile\"`\n\tMemProfile string `long:\"memprofile\"`\n\tLogLevel int `short:\"l\" long:\"loglevel\"`\n}\n\nvar (\n\tlogger *util.HekaLogger\n\tstore *storage.Storage\n\tmetrics *util.Metrics\n)\n\nconst (\n\t\/\/ VERSION is the version number for system.\n\tVERSION = \"1.2\"\n)\n\n\/\/ get the latest version from the file, \"GITREF\"\nfunc getCodeVersionFromFile() string {\n\tvers, err := ioutil.ReadFile(\"GITREF\")\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn \"\"\n\t}\n\treturn strings.TrimSpace(string(vers))\n}\n\n\/\/ get the latest version from git.\n\/\/ If this isn't a git install, report \"Unknown\"\nfunc getCodeVersion() string {\n\tif vers := getCodeVersionFromFile(); vers != \"\" {\n\t\treturn vers\n\t}\n\tvar buffer = new(bytes.Buffer)\n\tcmd := exec.Command(\"git\", \"rev-parse\", \"HEAD\")\n\tcmd.Stdout = buffer\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Printf(\"Could not get Git Version: %s\", err.Error())\n\t\treturn \"Unknown\"\n\t}\n\treturn strings.TrimSpace(buffer.String())\n}\n\nfunc main() {\n\tflags.ParseArgs(&opts, os.Args)\n\n\t\/\/ Configuration\n\t\/\/ defaults don't appear to work.\n\tif opts.ConfigFile == \"\" {\n\t\topts.ConfigFile = \"config.ini\"\n\t}\n\tconfig, err := util.ReadMzConfig(opts.ConfigFile)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not read config file %s: %s\", opts.ConfigFile, err.Error())\n\t\treturn\n\t}\n\tfullVers := fmt.Sprintf(\"%s-%s\", config.Get(\"VERSION\", VERSION),\n\t\tgetCodeVersion())\n\tconfig.Override(\"VERSION\", fullVers)\n\tsock_secret, _ := util.GenUUID4()\n\tconfig.SetDefault(\"ws.socket_secret\", sock_secret)\n\n\t\/\/ Rest Config\n\terrChan := make(chan error)\n\thost := config.Get(\"host\", \"localhost\")\n\tport := config.Get(\"port\", \"8080\")\n\n\tif config.GetFlag(\"aws.get_hostname\") {\n\t\tif hostname, err := util.GetAWSPublicHostname(); err == nil {\n\t\t\tconfig.SetDefault(\"ws_hostname\", hostname)\n\t\t}\n\t\tif port != \"80\" {\n\t\t\tconfig.SetDefault(\"ws_hostname\", config.Get(\"ws_hostname\", \"\")+\":\"+port)\n\t\t}\n\t}\n\n\t\/\/ Partner cert pool contains the various self-signed certs that\n\t\/\/ partners may require to access their servers (for Proprietary\n\t\/\/ wake mechanisms like UDP)\n\t\/\/ This would be where you collect the certs and store them into\n\t\/\/ the config map as something like:\n\t\/\/ config[\"partnerCertPool\"] = self.loadCerts()\n\n\tif opts.Profile != \"\" {\n\t\tlog.Printf(\"Creating profile %s...\\n\", opts.Profile)\n\t\tf, err := os.Create(opts.Profile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(fmt.Sprintf(\"Profile creation failed:\\n%s\\n\",\n\t\t\t\terr.Error()))\n\t\t\treturn\n\t\t}\n\t\tdefer func() {\n\t\t\tlog.Printf(\"Writing app profile...\\n\")\n\t\t\tpprof.StopCPUProfile()\n\t\t}()\n\t\tpprof.StartCPUProfile(f)\n\t}\n\tif opts.MemProfile != \"\" {\n\t\tdefer func() {\n\t\t\tprofFile, err := os.Create(opts.MemProfile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(fmt.Sprintf(\"Memory Profile creation failed:\\n%s\\n\", err.Error()))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Printf(\"Writing memory profile...\\n\")\n\t\t\tpprof.WriteHeapProfile(profFile)\n\t\t\tprofFile.Close()\n\t\t}()\n\t}\n\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tlogger := util.NewHekaLogger(config)\n\tmetrics := util.NewMetrics(config.Get(\n\t\t\"metrics.prefix\",\n\t\t\"wmf\"), logger, config)\n\tif err != nil {\n\t\tlogger.Error(\"main\", \"Unable to connect to database. Have you configured it yet?\", nil)\n\t\treturn\n\t}\n\thandlers := wmf.NewHandler(config, logger, metrics)\n\tif handlers == nil {\n\t\tlog.Fatalf(\"Could not start server. Please check config.ini\")\n\t}\n\n\t\/\/ Signal handler\n\tsigChan := make(chan os.Signal)\n\tsignal.Notify(sigChan, syscall.SIGINT, syscall.SIGHUP, syscall.SIGUSR1)\n\n\tvar RESTMux = http.DefaultServeMux\n\tvar WSMux = http.DefaultServeMux\n\tvar verRoot = strings.SplitN(VERSION, \".\", 2)[0]\n\n\t\/\/ REST calls\n\n\tRESTMux.HandleFunc(fmt.Sprintf(\"\/%s\/register\/\", verRoot),\n\t\thandlers.Register)\n\tRESTMux.HandleFunc(fmt.Sprintf(\"\/%s\/cmd\/\", verRoot),\n\t\thandlers.Cmd)\n\t\/\/ Web UI calls\n\tRESTMux.HandleFunc(fmt.Sprintf(\"\/%s\/queue\/\", verRoot),\n\t\thandlers.RestQueue)\n\tRESTMux.HandleFunc(fmt.Sprintf(\"\/%s\/state\/\", verRoot),\n\t\thandlers.State)\n\t\/\/ Static files (served by nginx in production)\n\tif config.GetFlag(\"use_insecure_static\") {\n\t\tRESTMux.HandleFunc(\"\/bower_components\/\",\n\t\t\thandlers.Static)\n\t\tRESTMux.HandleFunc(\"\/images\/\",\n\t\t\thandlers.Static)\n\t\tRESTMux.HandleFunc(\"\/scripts\/\",\n\t\t\thandlers.Static)\n\t\tRESTMux.HandleFunc(\"\/styles\/\",\n\t\t\thandlers.Static)\n\t}\n\t\/\/ Metrics\n\tRESTMux.HandleFunc(\"\/metrics\/\",\n\t\thandlers.Metrics)\n\t\/\/ Operations call\n\tRESTMux.HandleFunc(\"\/status\/\",\n\t\thandlers.Status)\n\t\/\/Signin\n\t\/\/ set state nonce & check if valid at signin\n\tRESTMux.HandleFunc(\"\/signin\/\",\n\t\thandlers.Signin)\n\t\/\/Signout\n\tRESTMux.HandleFunc(\"\/signout\/\",\n\t\thandlers.Signout)\n\t\/\/ Config option because there are other teams involved.\n\tauth := config.Get(\"fxa.redir_uri\", \"\/oauth\/\")\n\tRESTMux.HandleFunc(auth, handlers.OAuthCallback)\n\n\tWSMux.Handle(fmt.Sprintf(\"\/%s\/ws\/\", verRoot),\n\t\twebsocket.Handler(handlers.WSSocketHandler))\n\t\/\/ Handle root calls as webUI\n\t\/\/ Get a list of registered devices for the currently logged in user\n\tRESTMux.HandleFunc(fmt.Sprintf(\"\/%s\/devices\/\", verRoot),\n\t\thandlers.UserDevices)\n\t\/\/ Get an object describing the data for a user's device\n\t\/\/ e.g. http:\/\/host\/0\/data\/0123deviceid\n\tRESTMux.HandleFunc(fmt.Sprintf(\"\/%s\/data\/\", verRoot),\n\t\thandlers.InitDataJson)\n\tRESTMux.HandleFunc(fmt.Sprintf(\"\/%s\/validate\/\", verRoot),\n\t\thandlers.Validate)\n\tRESTMux.HandleFunc(\"\/\",\n\t\thandlers.Index)\n\n\tlogger.Info(\"main\", \"startup...\",\n\t\tutil.Fields{\"host\": host, \"port\": port, \"version\": fullVers})\n\n\tgo func() {\n\t\terrChan <- http.ListenAndServe(host+\":\"+port, nil)\n\t}()\n\n\tselect {\n\tcase err := <-errChan:\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"ListenAndServe: \" + err.Error())\n\t\t}\n\tcase <-sigChan:\n\t\tlogger.Info(\"main\", \"Shutting down...\", nil)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package main generates web project.\npackage main\n\nimport (\n\t\"flag\"\n\t\"github.com\/go-bootstrap\/go-bootstrap\/helpers\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nfunc main() {\n\tdir := flag.String(\"dir\", \"\", \"Project directory relative to $GOPATH\/src\/\")\n\tflag.Parse()\n\n\tif *dir == \"\" {\n\t\tlog.Fatal(\"dir option is missing.\")\n\t}\n\n\t\/\/ There can be more than one path, separated by colon.\n\tgopaths := strings.Split(os.ExpandEnv(\"$GOPATH\"), \":\")\n\tgopath := gopaths[0]\n\n\ttrimmedPath := strings.Trim(*dir, \"\/\")\n\tfullpath := filepath.Join(gopath, \"src\", trimmedPath)\n\tdirChunks := strings.Split(trimmedPath, \"\/\")\n\trepoName := dirChunks[len(dirChunks)-3]\n\trepoUser := dirChunks[len(dirChunks)-2]\n\tprojectName := dirChunks[len(dirChunks)-1]\n\tdbName := projectName\n\ttestDbName := projectName + \"-test\"\n\tcurrentUser, _ := user.Current()\n\n\t\/\/ 1. Create target directory\n\tlog.Print(\"Creating \" + fullpath + \"...\")\n\terr := os.MkdirAll(fullpath, 0755)\n\thelpers.ExitOnError(err, \"\")\n\n\t\/\/ 2. Copy everything under blank directory to target directory.\n\tlog.Print(\"Copying a blank project to \" + fullpath + \"...\")\n\tblankDir := os.ExpandEnv(filepath.Join(gopath, \"src\", \"github.com\", \"go-bootstrap\", \"go-bootstrap\", \"blank\"))\n\tcurrDir, err := os.Getwd()\n\thelpers.ExitOnError(err, \"Can't get current path!\")\n\n\terr = os.Chdir(blankDir)\n\thelpers.ExitOnError(err, \"\")\n\n\toutput, err := exec.Command(\"cp\", \"-rf\", \".\", fullpath).CombinedOutput()\n\thelpers.ExitOnError(err, string(output))\n\n\terr = os.Chdir(currDir)\n\thelpers.ExitOnError(err, \"\")\n\n\t\/\/ 3. Interpolate placeholder variables on the new project.\n\tlog.Print(\"Replacing placeholder variables on \" + repoUser + \"\/\" + projectName + \"...\")\n\n\treplacers := make(map[string]string)\n\treplacers[\"$GO_BOOTSTRAP_REPO_NAME\"] = repoName\n\treplacers[\"$GO_BOOTSTRAP_REPO_USER\"] = repoUser\n\treplacers[\"$GO_BOOTSTRAP_PROJECT_NAME\"] = projectName\n\treplacers[\"$GO_BOOTSTRAP_COOKIE_SECRET\"] = helpers.RandString(16)\n\treplacers[\"$GO_BOOTSTRAP_CURRENT_USER\"] = currentUser.Username\n\treplacers[\"$GO_BOOTSTRAP_PG_DSN\"] = helpers.DefaultPGDSN(dbName)\n\treplacers[\"$GO_BOOTSTRAP_ESCAPED_PG_DSN\"] = helpers.BashEscape(helpers.DefaultPGDSN(dbName))\n\treplacers[\"$GO_BOOTSTRAP_PG_TEST_DSN\"] = helpers.DefaultPGDSN(testDbName)\n\treplacers[\"$GO_BOOTSTRAP_ESCAPED_PG_TEST_DSN\"] = helpers.BashEscape(helpers.DefaultPGDSN(testDbName))\n\n\terr = helpers.RecursiveSearchReplaceFiles(fullpath, replacers)\n\thelpers.ExitOnError(err, \"\")\n\n\t\/\/ 4. go get github.com\/mattes\/migrate.\n\tlog.Print(\"Running go get github.com\/mattes\/migrate...\")\n\toutput, err = exec.Command(\"go\", \"get\", \"github.com\/mattes\/migrate\").CombinedOutput()\n\thelpers.ExitOnError(err, string(output))\n\n\t\/\/ 5. Bootstrap databases.\n\tcmd := exec.Command(\"bash\", \"scripts\/db-bootstrap\")\n\tcmd.Dir = fullpath\n\toutput, _ = cmd.CombinedOutput()\n\tlog.Print(string(output))\n\n\t\/\/ 6. Get all application dependencies for the first time.\n\tlog.Print(\"Running go get .\/...\")\n\tcmd = exec.Command(\"go\", \"get\", \".\/...\")\n\tcmd.Dir = fullpath\n\toutput, err = cmd.CombinedOutput()\n\thelpers.ExitOnError(err, string(output))\n\n\trepoIsGit := strings.HasPrefix(repoName, \"git\")\n\trepoIsHg := strings.HasPrefix(repoName, \"bitbucket\")\n\n\t\/\/ Generate Godeps directory.\n\t\/\/ Works only on git repo or bitbucket repo.\n\tif repoIsGit || repoIsHg {\n\t\tlog.Print(\"Installing github.com\/tools\/godep...\")\n\t\toutput, err := exec.Command(\"go\", \"get\", \"github.com\/tools\/godep\").CombinedOutput()\n\t\thelpers.ExitOnError(err, string(output))\n\n\t\tif repoIsGit {\n\t\t\tlog.Print(\"Running git init...\")\n\t\t\tcmd := exec.Command(\"git\", \"init\")\n\t\t\tcmd.Dir = fullpath\n\t\t\toutput, err = cmd.CombinedOutput()\n\t\t\thelpers.ExitOnError(err, string(output))\n\t\t}\n\t\tif repoIsHg {\n\t\t\tlog.Print(\"Running hg init...\")\n\t\t\tcmd := exec.Command(\"hg\", \"init\")\n\t\t\tcmd.Dir = fullpath\n\t\t\toutput, _ = cmd.CombinedOutput()\n\t\t\tlog.Print(string(output))\n\t\t}\n\n\t\t\/\/ godep save .\/...\n\t\tlog.Print(\"Running godep save .\/...\")\n\t\tcmd = exec.Command(\"godep\", \"save\", \".\/...\")\n\t\tcmd.Dir = fullpath\n\t\toutput, err = cmd.CombinedOutput()\n\t\thelpers.ExitOnError(err, string(output))\n\n\t\t\/\/ Run tests on newly generated app.\n\t\tlog.Print(\"Running godep go test .\/...\")\n\t\tcmd = exec.Command(\"godep\", \"go\", \"test\", \".\/...\")\n\t\tcmd.Dir = fullpath\n\t\toutput, _ = cmd.CombinedOutput()\n\t\tlog.Print(string(output))\n\n\t} else {\n\t\t\/\/ Run tests on newly generated app.\n\t\tlog.Print(\"Running go test .\/...\")\n\t\tcmd = exec.Command(\"go\", \"test\", \".\/...\")\n\t\tcmd.Dir = fullpath\n\t\toutput, _ = cmd.CombinedOutput()\n\t\tlog.Print(string(output))\n\t}\n}\n<commit_msg>Allow user to choose a specific `$GOPATH`. This may address concerns https:\/\/github.com\/go-bootstrap\/go-bootstrap\/pull\/18#issuecomment-101704597<commit_after>\/\/ Package main generates web project.\npackage main\n\nimport (\n\t\"flag\"\n\t\"github.com\/go-bootstrap\/go-bootstrap\/helpers\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nfunc main() {\n\tdirInput := flag.String(\"dir\", \"\", \"Project directory relative to $GOPATH\/src\/\")\n\tgopathInput := flag.String(\"gopath\", \"\", \"Choose which $GOPATH to use\")\n\n\tflag.Parse()\n\n\tif *dirInput == \"\" {\n\t\tlog.Fatal(\"dir option is missing.\")\n\t}\n\n\t\/\/ There can be more than one path, separated by colon.\n\tgopaths := strings.Split(os.ExpandEnv(\"$GOPATH\"), \":\")\n\n\t\/\/ By default, we choose the first GOPATH.\n\tgopath := gopaths[0]\n\n\t\/\/ But if user specified one, we choose that one.\n\tif *gopathInput != \"\" {\n\t\tfor _, gp := range gopaths {\n\t\t\tif gp == *gopathInput {\n\t\t\t\tgopath = gp\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\ttrimmedPath := strings.Trim(*dirInput, \"\/\")\n\tfullpath := filepath.Join(gopath, \"src\", trimmedPath)\n\tdirChunks := strings.Split(trimmedPath, \"\/\")\n\trepoName := dirChunks[len(dirChunks)-3]\n\trepoUser := dirChunks[len(dirChunks)-2]\n\tprojectName := dirChunks[len(dirChunks)-1]\n\tdbName := projectName\n\ttestDbName := projectName + \"-test\"\n\tcurrentUser, _ := user.Current()\n\n\t\/\/ 1. Create target directory\n\tlog.Print(\"Creating \" + fullpath + \"...\")\n\terr := os.MkdirAll(fullpath, 0755)\n\thelpers.ExitOnError(err, \"\")\n\n\t\/\/ 2. Copy everything under blank directory to target directory.\n\tlog.Print(\"Copying a blank project to \" + fullpath + \"...\")\n\tblankDir := os.ExpandEnv(filepath.Join(gopath, \"src\", \"github.com\", \"go-bootstrap\", \"go-bootstrap\", \"blank\"))\n\tcurrDir, err := os.Getwd()\n\thelpers.ExitOnError(err, \"Can't get current path!\")\n\n\terr = os.Chdir(blankDir)\n\thelpers.ExitOnError(err, \"\")\n\n\toutput, err := exec.Command(\"cp\", \"-rf\", \".\", fullpath).CombinedOutput()\n\thelpers.ExitOnError(err, string(output))\n\n\terr = os.Chdir(currDir)\n\thelpers.ExitOnError(err, \"\")\n\n\t\/\/ 3. Interpolate placeholder variables on the new project.\n\tlog.Print(\"Replacing placeholder variables on \" + repoUser + \"\/\" + projectName + \"...\")\n\n\treplacers := make(map[string]string)\n\treplacers[\"$GO_BOOTSTRAP_REPO_NAME\"] = repoName\n\treplacers[\"$GO_BOOTSTRAP_REPO_USER\"] = repoUser\n\treplacers[\"$GO_BOOTSTRAP_PROJECT_NAME\"] = projectName\n\treplacers[\"$GO_BOOTSTRAP_COOKIE_SECRET\"] = helpers.RandString(16)\n\treplacers[\"$GO_BOOTSTRAP_CURRENT_USER\"] = currentUser.Username\n\treplacers[\"$GO_BOOTSTRAP_PG_DSN\"] = helpers.DefaultPGDSN(dbName)\n\treplacers[\"$GO_BOOTSTRAP_ESCAPED_PG_DSN\"] = helpers.BashEscape(helpers.DefaultPGDSN(dbName))\n\treplacers[\"$GO_BOOTSTRAP_PG_TEST_DSN\"] = helpers.DefaultPGDSN(testDbName)\n\treplacers[\"$GO_BOOTSTRAP_ESCAPED_PG_TEST_DSN\"] = helpers.BashEscape(helpers.DefaultPGDSN(testDbName))\n\n\terr = helpers.RecursiveSearchReplaceFiles(fullpath, replacers)\n\thelpers.ExitOnError(err, \"\")\n\n\t\/\/ 4. go get github.com\/mattes\/migrate.\n\tlog.Print(\"Running go get github.com\/mattes\/migrate...\")\n\toutput, err = exec.Command(\"go\", \"get\", \"github.com\/mattes\/migrate\").CombinedOutput()\n\thelpers.ExitOnError(err, string(output))\n\n\t\/\/ 5. Bootstrap databases.\n\tcmd := exec.Command(\"bash\", \"scripts\/db-bootstrap\")\n\tcmd.Dir = fullpath\n\toutput, _ = cmd.CombinedOutput()\n\tlog.Print(string(output))\n\n\t\/\/ 6. Get all application dependencies for the first time.\n\tlog.Print(\"Running go get .\/...\")\n\tcmd = exec.Command(\"go\", \"get\", \".\/...\")\n\tcmd.Dir = fullpath\n\toutput, err = cmd.CombinedOutput()\n\thelpers.ExitOnError(err, string(output))\n\n\trepoIsGit := strings.HasPrefix(repoName, \"git\")\n\trepoIsHg := strings.HasPrefix(repoName, \"bitbucket\")\n\n\t\/\/ Generate Godeps directory.\n\t\/\/ Works only on git repo or bitbucket repo.\n\tif repoIsGit || repoIsHg {\n\t\tlog.Print(\"Installing github.com\/tools\/godep...\")\n\t\toutput, err := exec.Command(\"go\", \"get\", \"github.com\/tools\/godep\").CombinedOutput()\n\t\thelpers.ExitOnError(err, string(output))\n\n\t\tif repoIsGit {\n\t\t\tlog.Print(\"Running git init...\")\n\t\t\tcmd := exec.Command(\"git\", \"init\")\n\t\t\tcmd.Dir = fullpath\n\t\t\toutput, err = cmd.CombinedOutput()\n\t\t\thelpers.ExitOnError(err, string(output))\n\t\t}\n\t\tif repoIsHg {\n\t\t\tlog.Print(\"Running hg init...\")\n\t\t\tcmd := exec.Command(\"hg\", \"init\")\n\t\t\tcmd.Dir = fullpath\n\t\t\toutput, _ = cmd.CombinedOutput()\n\t\t\tlog.Print(string(output))\n\t\t}\n\n\t\t\/\/ godep save .\/...\n\t\tlog.Print(\"Running godep save .\/...\")\n\t\tcmd = exec.Command(\"godep\", \"save\", \".\/...\")\n\t\tcmd.Dir = fullpath\n\t\toutput, err = cmd.CombinedOutput()\n\t\thelpers.ExitOnError(err, string(output))\n\n\t\t\/\/ Run tests on newly generated app.\n\t\tlog.Print(\"Running godep go test .\/...\")\n\t\tcmd = exec.Command(\"godep\", \"go\", \"test\", \".\/...\")\n\t\tcmd.Dir = fullpath\n\t\toutput, _ = cmd.CombinedOutput()\n\t\tlog.Print(string(output))\n\n\t} else {\n\t\t\/\/ Run tests on newly generated app.\n\t\tlog.Print(\"Running go test .\/...\")\n\t\tcmd = exec.Command(\"go\", \"test\", \".\/...\")\n\t\tcmd.Dir = fullpath\n\t\toutput, _ = cmd.CombinedOutput()\n\t\tlog.Print(string(output))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/sha256\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/imdario\/mergo\"\n\t\"github.com\/sethvargo\/go-fastly\"\n)\n\nvar pendingVersions map[string]fastly.Version\nvar siteConfigs map[string]SiteConfig\n\ntype SiteConfig struct {\n\tBackends []*fastly.Backend\n\tConditions []*fastly.Condition\n\tCacheSettings []*fastly.CacheSetting\n\tSSLHostname string\n}\n\nfunc readConfig() error {\n\t\/\/\tvar parsed interface{}\n\t\/\/\tf, _ := os.Open(\"config.json\")\n\t\/\/\tdec := json.NewDecoder(f)\n\t\/\/\tif err := dec.Decode(&parsed); err != nil {\n\t\/\/\t\tlog.Fatal(err)\n\t\/\/\t}\n\t\/\/\tfmt.Println(parsed)\n\n\tbody, _ := ioutil.ReadFile(\"config.json\")\n\terr := json.Unmarshal(body, &siteConfigs)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor name, config := range siteConfigs {\n\t\tif name == \"_default_\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := mergo.Merge(&config, siteConfigs[\"_default_\"]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsiteConfigs[name] = config\n\t\tfor _, backend := range config.Backends {\n\t\t\tbackend.SSLHostname = strings.Replace(backend.SSLHostname, \"_servicename_\", name, -1)\n\t\t}\n\n\t}\n\treturn nil\n}\n\nconst versionComment string = \"fastly-ctl\"\n\nfunc prepareNewVersion(client *fastly.Client, s *fastly.Service) (fastly.Version, error) {\n\t\/\/ See if we've already prepared a version\n\tif version, ok := pendingVersions[s.ID]; ok {\n\t\treturn version, nil\n\t}\n\n\t\/\/ Look for an inactive version higher than our current version\n\tversions, err := client.ListVersions(&fastly.ListVersionsInput{Service: s.ID})\n\tif err != nil {\n\t\treturn fastly.Version{}, err\n\t}\n\tfor _, v := range versions {\n\t\tversionNumber, err := strconv.Atoi(v.Number)\n\t\tif err != nil {\n\t\t\treturn fastly.Version{}, fmt.Errorf(\"Invalid version number encountered: %s\", err)\n\t\t}\n\t\tif uint(versionNumber) > s.ActiveVersion && v.Comment == versionComment && !v.Active && !v.Locked {\n\t\t\tpendingVersions[s.ID] = *v\n\t\t\treturn *v, nil\n\t\t}\n\t}\n\n\t\/\/ Otherwise, create a new version\n\tnewversion, err := client.CloneVersion(&fastly.CloneVersionInput{Service: s.ID, Version: strconv.Itoa(int(s.ActiveVersion))})\n\tif err != nil {\n\t\treturn *newversion, err\n\t}\n\tif _, err := client.UpdateVersion(&fastly.UpdateVersionInput{Service: s.ID, Version: newversion.Number, Comment: versionComment}); err != nil {\n\t\treturn *newversion, err\n\t}\n\tpendingVersions[s.ID] = *newversion\n\treturn *newversion, nil\n}\n\nfunc syncVcls(client *fastly.Client, s *fastly.Service) error {\n\thasher := sha256.New()\n\tvar activeVersion = strconv.Itoa(int(s.ActiveVersion))\n\tvcls, err := client.ListVCLs(&fastly.ListVCLsInput{Service: s.ID, Version: activeVersion})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, v := range vcls {\n\t\tfilename := v.Name + \".vcl\"\n\t\tf, err := os.Open(filename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\t\tif _, err := io.Copy(hasher, f); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlocalsum := hasher.Sum(nil)\n\t\thasher.Reset()\n\n\t\thasher.Write([]byte(v.Content))\n\t\tremotesum := hasher.Sum(nil)\n\t\thasher.Reset()\n\n\t\tif !bytes.Equal(localsum, remotesum) {\n\t\t\tfmt.Printf(\"VCL mismatch on service %s VCL %s. Updating.\\n\", s.Name, v.Name)\n\t\t\tcontent, err := ioutil.ReadFile(filename)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/newversion, err := client.CloneVersion(&fastly.CloneVersionInput{Service: s.ID, Version: activeVersion})\n\t\t\tnewversion, err := prepareNewVersion(client, s)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err = client.UpdateVCL(&fastly.UpdateVCLInput{Name: v.Name, Service: s.ID, Version: newversion.Number, Content: string(content)}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc syncCacheSettings(client *fastly.Client, s *fastly.Service, newCacheSettings []*fastly.CacheSetting) error {\n\tnewversion, err := prepareNewVersion(client, s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\texistingCacheSettings, err := client.ListCacheSettings(&fastly.ListCacheSettingsInput{Service: s.ID, Version: newversion.Number})\n\tfor _, setting := range existingCacheSettings {\n\t\terr := client.DeleteCacheSetting(&fastly.DeleteCacheSettingInput{Service: s.ID, Name: setting.Name, Version: newversion.Number})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor _, setting := range newCacheSettings {\n\t\tvar i fastly.CreateCacheSettingInput\n\t\ti.TTL = setting.TTL\n\t\ti.Name = setting.Name\n\t\ti.Action = setting.Action\n\t\ti.Service = s.ID\n\t\ti.Version = newversion.Number\n\t\ti.StaleTTL = setting.StaleTTL\n\t\ti.CacheCondition = setting.CacheCondition\n\n\t\tif _, err = client.CreateCacheSetting(&i); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc syncConditions(client *fastly.Client, s *fastly.Service, newConditions []*fastly.Condition) error {\n\tnewversion, err := prepareNewVersion(client, s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\texistingConditions, err := client.ListConditions(&fastly.ListConditionsInput{Service: s.ID, Version: newversion.Number})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, condition := range existingConditions {\n\t\terr := client.DeleteCondition(&fastly.DeleteConditionInput{Service: s.ID, Name: condition.Name, Version: newversion.Number})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor _, condition := range newConditions {\n\t\tvar i fastly.CreateConditionInput\n\t\ti.Name = condition.Name\n\t\ti.Type = condition.Type\n\t\ti.Service = s.ID\n\t\ti.Version = newversion.Number\n\t\ti.Priority = condition.Priority\n\t\ti.Statement = condition.Statement\n\t\tif _, err = client.CreateCondition(&i); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc syncBackends(client *fastly.Client, s *fastly.Service, newBackends []*fastly.Backend) error {\n\tnewversion, err := prepareNewVersion(client, s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\texistingBackends, err := client.ListBackends(&fastly.ListBackendsInput{Service: s.ID, Version: newversion.Number})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, backend := range existingBackends {\n\t\terr := client.DeleteBackend(&fastly.DeleteBackendInput{Service: s.ID, Name: backend.Name, Version: newversion.Number})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor _, backend := range newBackends {\n\t\tvar i fastly.CreateBackendInput\n\t\ti.Address = backend.Address\n\t\ti.Name = backend.Name\n\t\ti.Service = newversion.ServiceID\n\t\ti.Version = newversion.Number\n\t\ti.UseSSL = backend.UseSSL\n\t\ti.SSLCheckCert = backend.SSLCheckCert\n\t\ti.SSLSNIHostname = backend.SSLSNIHostname\n\t\ti.SSLHostname = backend.SSLHostname\n\t\ti.AutoLoadbalance = backend.AutoLoadbalance\n\t\ti.Weight = backend.Weight\n\t\ti.MaxConn = backend.MaxConn\n\t\ti.ConnectTimeout = backend.ConnectTimeout\n\t\ti.FirstByteTimeout = backend.FirstByteTimeout\n\t\ti.BetweenBytesTimeout = backend.BetweenBytesTimeout\n\t\ti.HealthCheck = backend.HealthCheck\n\t\ti.RequestCondition = backend.RequestCondition\n\t\tif _, err = client.CreateBackend(&i); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc syncConfig(client *fastly.Client, s *fastly.Service) error {\n\tvar activeVersion = strconv.Itoa(int(s.ActiveVersion))\n\tvar config SiteConfig\n\tif _, ok := siteConfigs[s.Name]; ok {\n\t\tconfig = siteConfigs[s.Name]\n\t} else {\n\t\tconfig = siteConfigs[\"_default_\"]\n\t}\n\n\tremoteConditions, err := client.ListConditions(&fastly.ListConditionsInput{Service: s.ID, Version: activeVersion})\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Conditions must be sync'd first, as if they're referenced in any other setup\n\t\/\/ the API will reject if they don't exist.\n\tif !reflect.DeepEqual(config.Conditions, remoteConditions) {\n\t\tif err := syncConditions(client, s, config.Conditions); err != nil {\n\t\t\treturn fmt.Errorf(\"Error syncing conditions: %s\", err)\n\t\t}\n\t}\n\tremoteBackends, err := client.ListBackends(&fastly.ListBackendsInput{Service: s.ID, Version: activeVersion})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !reflect.DeepEqual(config.Backends, remoteBackends) {\n\t\tif err := syncBackends(client, s, config.Backends); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tremoteCacheSettings, _ := client.ListCacheSettings(&fastly.ListCacheSettingsInput{Service: s.ID, Version: activeVersion})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !reflect.DeepEqual(config.CacheSettings, remoteCacheSettings) {\n\t\tif err := syncCacheSettings(client, s, config.CacheSettings); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n\n}\n\nfunc main() {\n\tclient, err := fastly.NewClient(os.Getenv(\"FASTLY_KEY\"))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif err := readConfig(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tpendingVersions = make(map[string]fastly.Version)\n\n\tservices, err := client.ListServices(&fastly.ListServicesInput{})\n\tfor _, s := range services {\n\t\tif err = syncVcls(client, s); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif err = syncConfig(client, s); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}\n<commit_msg>Add syncHeaders.<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/sha256\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/imdario\/mergo\"\n\t\"github.com\/sethvargo\/go-fastly\"\n)\n\nvar pendingVersions map[string]fastly.Version\nvar siteConfigs map[string]SiteConfig\n\ntype SiteConfig struct {\n\tBackends []*fastly.Backend\n\tConditions []*fastly.Condition\n\tCacheSettings []*fastly.CacheSetting\n\tHeaders []*fastly.Header\n\tSSLHostname string\n}\n\nfunc readConfig() error {\n\t\/\/\tvar parsed interface{}\n\t\/\/\tf, _ := os.Open(\"config.json\")\n\t\/\/\tdec := json.NewDecoder(f)\n\t\/\/\tif err := dec.Decode(&parsed); err != nil {\n\t\/\/\t\tlog.Fatal(err)\n\t\/\/\t}\n\t\/\/\tfmt.Println(parsed)\n\n\tbody, _ := ioutil.ReadFile(\"config.json\")\n\terr := json.Unmarshal(body, &siteConfigs)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor name, config := range siteConfigs {\n\t\tif name == \"_default_\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := mergo.Merge(&config, siteConfigs[\"_default_\"]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsiteConfigs[name] = config\n\t\tfor _, backend := range config.Backends {\n\t\t\tbackend.SSLHostname = strings.Replace(backend.SSLHostname, \"_servicename_\", name, -1)\n\t\t}\n\n\t}\n\treturn nil\n}\n\nconst versionComment string = \"fastly-ctl\"\n\nfunc prepareNewVersion(client *fastly.Client, s *fastly.Service) (fastly.Version, error) {\n\t\/\/ See if we've already prepared a version\n\tif version, ok := pendingVersions[s.ID]; ok {\n\t\treturn version, nil\n\t}\n\n\t\/\/ Look for an inactive version higher than our current version\n\tversions, err := client.ListVersions(&fastly.ListVersionsInput{Service: s.ID})\n\tif err != nil {\n\t\treturn fastly.Version{}, err\n\t}\n\tfor _, v := range versions {\n\t\tversionNumber, err := strconv.Atoi(v.Number)\n\t\tif err != nil {\n\t\t\treturn fastly.Version{}, fmt.Errorf(\"Invalid version number encountered: %s\", err)\n\t\t}\n\t\tif uint(versionNumber) > s.ActiveVersion && v.Comment == versionComment && !v.Active && !v.Locked {\n\t\t\tpendingVersions[s.ID] = *v\n\t\t\treturn *v, nil\n\t\t}\n\t}\n\n\t\/\/ Otherwise, create a new version\n\tnewversion, err := client.CloneVersion(&fastly.CloneVersionInput{Service: s.ID, Version: strconv.Itoa(int(s.ActiveVersion))})\n\tif err != nil {\n\t\treturn *newversion, err\n\t}\n\tif _, err := client.UpdateVersion(&fastly.UpdateVersionInput{Service: s.ID, Version: newversion.Number, Comment: versionComment}); err != nil {\n\t\treturn *newversion, err\n\t}\n\tpendingVersions[s.ID] = *newversion\n\treturn *newversion, nil\n}\n\nfunc syncVcls(client *fastly.Client, s *fastly.Service) error {\n\thasher := sha256.New()\n\tvar activeVersion = strconv.Itoa(int(s.ActiveVersion))\n\tvcls, err := client.ListVCLs(&fastly.ListVCLsInput{Service: s.ID, Version: activeVersion})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, v := range vcls {\n\t\tfilename := v.Name + \".vcl\"\n\t\tf, err := os.Open(filename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\t\tif _, err := io.Copy(hasher, f); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlocalsum := hasher.Sum(nil)\n\t\thasher.Reset()\n\n\t\thasher.Write([]byte(v.Content))\n\t\tremotesum := hasher.Sum(nil)\n\t\thasher.Reset()\n\n\t\tif !bytes.Equal(localsum, remotesum) {\n\t\t\tfmt.Printf(\"VCL mismatch on service %s VCL %s. Updating.\\n\", s.Name, v.Name)\n\t\t\tcontent, err := ioutil.ReadFile(filename)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/newversion, err := client.CloneVersion(&fastly.CloneVersionInput{Service: s.ID, Version: activeVersion})\n\t\t\tnewversion, err := prepareNewVersion(client, s)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err = client.UpdateVCL(&fastly.UpdateVCLInput{Name: v.Name, Service: s.ID, Version: newversion.Number, Content: string(content)}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc syncHeaders(client *fastly.Client, s *fastly.Service, newHeaders []*fastly.Header) error {\n\tnewversion, err := prepareNewVersion(client, s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\texistingHeaders, err := client.ListHeaders(&fastly.ListHeadersInput{Service: s.ID, Version: newversion.Number})\n\tfor _, setting := range existingHeaders {\n\t\terr := client.DeleteHeader(&fastly.DeleteHeaderInput{Service: s.ID, Name: setting.Name, Version: newversion.Number})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor _, header := range newHeaders {\n\t\tvar i fastly.CreateHeaderInput\n\t\ti.Name = header.Name\n\t\ti.Type = header.Type\n\t\ti.Regex = header.Regex\n\t\ti.Destination = header.Destination\n\t\ti.Source = header.Source\n\t\ti.Action = header.Action\n\t\ti.Version = newversion.Number\n\t\ti.Service = s.ID\n\t\ti.Priority = header.Priority\n\t\ti.IgnoreIfSet = header.IgnoreIfSet\n\t\ti.Substitution = header.Substitution\n\t\ti.RequestCondition = header.RequestCondition\n\t\ti.ResponseCondition = header.ResponseCondition\n\t\ti.CacheCondition = header.CacheCondition\n\n\t\tif _, err = client.CreateHeader(&i); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc syncCacheSettings(client *fastly.Client, s *fastly.Service, newCacheSettings []*fastly.CacheSetting) error {\n\tnewversion, err := prepareNewVersion(client, s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\texistingCacheSettings, err := client.ListCacheSettings(&fastly.ListCacheSettingsInput{Service: s.ID, Version: newversion.Number})\n\tfor _, setting := range existingCacheSettings {\n\t\terr := client.DeleteCacheSetting(&fastly.DeleteCacheSettingInput{Service: s.ID, Name: setting.Name, Version: newversion.Number})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor _, setting := range newCacheSettings {\n\t\tvar i fastly.CreateCacheSettingInput\n\t\ti.TTL = setting.TTL\n\t\ti.Name = setting.Name\n\t\ti.Action = setting.Action\n\t\ti.Service = s.ID\n\t\ti.Version = newversion.Number\n\t\ti.StaleTTL = setting.StaleTTL\n\t\ti.CacheCondition = setting.CacheCondition\n\n\t\tif _, err = client.CreateCacheSetting(&i); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc syncConditions(client *fastly.Client, s *fastly.Service, newConditions []*fastly.Condition) error {\n\tnewversion, err := prepareNewVersion(client, s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\texistingConditions, err := client.ListConditions(&fastly.ListConditionsInput{Service: s.ID, Version: newversion.Number})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, condition := range existingConditions {\n\t\terr := client.DeleteCondition(&fastly.DeleteConditionInput{Service: s.ID, Name: condition.Name, Version: newversion.Number})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor _, condition := range newConditions {\n\t\tvar i fastly.CreateConditionInput\n\t\ti.Name = condition.Name\n\t\ti.Type = condition.Type\n\t\ti.Service = s.ID\n\t\ti.Version = newversion.Number\n\t\ti.Priority = condition.Priority\n\t\ti.Statement = condition.Statement\n\t\tif _, err = client.CreateCondition(&i); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc syncBackends(client *fastly.Client, s *fastly.Service, newBackends []*fastly.Backend) error {\n\tnewversion, err := prepareNewVersion(client, s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\texistingBackends, err := client.ListBackends(&fastly.ListBackendsInput{Service: s.ID, Version: newversion.Number})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, backend := range existingBackends {\n\t\terr := client.DeleteBackend(&fastly.DeleteBackendInput{Service: s.ID, Name: backend.Name, Version: newversion.Number})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor _, backend := range newBackends {\n\t\tvar i fastly.CreateBackendInput\n\t\ti.Address = backend.Address\n\t\ti.Name = backend.Name\n\t\ti.Service = newversion.ServiceID\n\t\ti.Version = newversion.Number\n\t\ti.UseSSL = backend.UseSSL\n\t\ti.SSLCheckCert = backend.SSLCheckCert\n\t\ti.SSLSNIHostname = backend.SSLSNIHostname\n\t\ti.SSLHostname = backend.SSLHostname\n\t\ti.AutoLoadbalance = backend.AutoLoadbalance\n\t\ti.Weight = backend.Weight\n\t\ti.MaxConn = backend.MaxConn\n\t\ti.ConnectTimeout = backend.ConnectTimeout\n\t\ti.FirstByteTimeout = backend.FirstByteTimeout\n\t\ti.BetweenBytesTimeout = backend.BetweenBytesTimeout\n\t\ti.HealthCheck = backend.HealthCheck\n\t\ti.RequestCondition = backend.RequestCondition\n\t\tif _, err = client.CreateBackend(&i); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc syncConfig(client *fastly.Client, s *fastly.Service) error {\n\tvar activeVersion = strconv.Itoa(int(s.ActiveVersion))\n\tvar config SiteConfig\n\tif _, ok := siteConfigs[s.Name]; ok {\n\t\tconfig = siteConfigs[s.Name]\n\t} else {\n\t\tconfig = siteConfigs[\"_default_\"]\n\t}\n\n\tremoteConditions, err := client.ListConditions(&fastly.ListConditionsInput{Service: s.ID, Version: activeVersion})\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Conditions must be sync'd first, as if they're referenced in any other setup\n\t\/\/ the API will reject if they don't exist.\n\tif !reflect.DeepEqual(config.Conditions, remoteConditions) {\n\t\tif err := syncConditions(client, s, config.Conditions); err != nil {\n\t\t\treturn fmt.Errorf(\"Error syncing conditions: %s\", err)\n\t\t}\n\t}\n\tremoteBackends, err := client.ListBackends(&fastly.ListBackendsInput{Service: s.ID, Version: activeVersion})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !reflect.DeepEqual(config.Backends, remoteBackends) {\n\t\tif err := syncBackends(client, s, config.Backends); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tremoteCacheSettings, _ := client.ListCacheSettings(&fastly.ListCacheSettingsInput{Service: s.ID, Version: activeVersion})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !reflect.DeepEqual(config.CacheSettings, remoteCacheSettings) {\n\t\tif err := syncCacheSettings(client, s, config.CacheSettings); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tremoteHeaders, _ := client.ListHeaders(&fastly.ListHeadersInput{Service: s.ID, Version: activeVersion})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !reflect.DeepEqual(config.Headers, remoteHeaders) {\n\t\tif err := syncHeaders(client, s, config.Headers); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc main() {\n\tclient, err := fastly.NewClient(os.Getenv(\"FASTLY_KEY\"))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif err := readConfig(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tpendingVersions = make(map[string]fastly.Version)\n\n\tservices, err := client.ListServices(&fastly.ListServicesInput{})\n\tfor _, s := range services {\n\t\tif err = syncVcls(client, s); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif err = syncConfig(client, s); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*-\n * Copyright 2015 Square Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"log\/syslog\"\n\t\"net\"\n\t\"os\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/kavu\/go_reuseport\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nvar (\n\tlistenAddress = kingpin.Flag(\"listen\", \"Address and port to listen on.\").Required().TCP()\n\tforwardAddress = kingpin.Flag(\"target\", \"Address to foward connections to (HOST:PORT, or unix:PATH).\").Required().String()\n\tunsafeTarget = kingpin.Flag(\"unsafe-target\", \"If set, does not limit target to localhost, 127.0.0.1 or ::1\").Bool()\n\tkeystorePath = kingpin.Flag(\"keystore\", \"Path to certificate and keystore (PKCS12).\").PlaceHolder(\"PATH\").Required().String()\n\tkeystorePass = kingpin.Flag(\"storepass\", \"Password for certificate and keystore.\").PlaceHolder(\"PASS\").String()\n\tcaBundlePath = kingpin.Flag(\"cacert\", \"Path to certificate authority bundle file (PEM\/X509).\").Required().String()\n\tautoReload = kingpin.Flag(\"auto-reload\", \"Watch keystores file with inotify\/fswatch and reload on changes.\").Bool()\n\ttimedReload = kingpin.Flag(\"timed-reload\", \"Reload keystores every N seconds, refresh listener on changes.\").PlaceHolder(\"N\").Int()\n\tallowAll = kingpin.Flag(\"allow-all\", \"Allow all clients, do not check client cert subject.\").Bool()\n\tallowedCNs = kingpin.Flag(\"allow-cn\", \"Allow clients with given common name (can be repeated).\").PlaceHolder(\"CN\").Strings()\n\tallowedOUs = kingpin.Flag(\"allow-ou\", \"Allow clients with organizational unit name (can be repeated).\").PlaceHolder(\"OU\").Strings()\n\tuseSyslog = kingpin.Flag(\"syslog\", \"Send logs to syslog instead of stderr.\").Bool()\n)\n\n\/\/ Global logger instance\nvar logger = log.New(os.Stderr, \"\", log.LstdFlags|log.Lmicroseconds)\n\nfunc initLogger() {\n\tif *useSyslog {\n\t\tvar err error\n\t\tlogger, err = syslog.NewLogger(syslog.LOG_NOTICE|syslog.LOG_DAEMON, log.LstdFlags|log.Lmicroseconds)\n\t\tpanicOnError(err)\n\t}\n\n\t\/\/ Set log prefix to process ID to distinguish parent\/child\n\tlogger.SetPrefix(fmt.Sprintf(\"[%5d] \", os.Getpid()))\n}\n\n\/\/ panicOnError panics if err is not nil\nfunc panicOnError(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tkingpin.Parse()\n\n\t\/\/ Validate flags\n\tif !(*allowAll) && len(*allowedCNs) == 0 && len(*allowedOUs) == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: at least one of --allow-all, --allow-cn or --allow-ou is required\")\n\t\tos.Exit(1)\n\t}\n\tif *allowAll && len(*allowedCNs) != 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: --allow-all and --allow-cn are mutually exclusive\")\n\t\tos.Exit(1)\n\t}\n\tif *allowAll && len(*allowedOUs) != 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: --allow-all and --allow-ou are mutually exclusive\")\n\t\tos.Exit(1)\n\t}\n\tif !validateTarget(*forwardAddress) {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: --target must be localhost:port, 127.0.0.1:port or [::1]:port\")\n\t\tos.Exit(1)\n\t}\n\tif *autoReload && (*timedReload > 0) {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: --auto-reload and --timed-reload are mutually exclusive\")\n\t\tos.Exit(1)\n\t}\n\n\tinitLogger()\n\n\tlisteners := &sync.WaitGroup{}\n\tlisteners.Add(1)\n\n\t\/\/ Set up file watchers (if requested)\n\twatcher := make(chan bool, 1)\n\tif *autoReload {\n\t\tgo watchAuto([]string{*keystorePath, *caBundlePath}, watcher)\n\t} else if *timedReload > 0 {\n\t\tgo watchTimed([]string{*keystorePath, *caBundlePath}, time.Duration(*timedReload)*time.Second, watcher)\n\t}\n\n\t\/\/ A channel to notify us that the listener is running.\n\tstarted := make(chan bool, 1)\n\tgo listen(started, listeners, watcher)\n\n\tup := <-started\n\tif !up {\n\t\tlogger.Printf(\"failed to start initial listener\")\n\t\tos.Exit(1)\n\t}\n\n\tlogger.Printf(\"initial startup completed, waiting for connections\")\n\tlisteners.Wait()\n\tlogger.Printf(\"all listeners closed, shutting down\")\n}\n\nfunc validateTarget(addr string) bool {\n\tif *unsafeTarget {\n\t\treturn true\n\t}\n\tif strings.HasPrefix(addr, \"unix:\") {\n\t\treturn true\n\t}\n\tif matched, _ := regexp.MatchString(\"^127[.]0[.]0[.]1:\\\\d+$\", addr); matched {\n\t\treturn true\n\t}\n\tif matched, _ := regexp.MatchString(\"^\\\\[::1\\\\]:\\\\d+$\", addr); matched {\n\t\treturn true\n\t}\n\tif matched, _ := regexp.MatchString(\"^localhost:\\\\d+$\", addr); matched {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ Open listening socket. Take note that we create a \"reusable port\n\/\/ listener\", meaning we pass SO_REUSEPORT to the kernel. This allows\n\/\/ us to have multiple sockets listening on the same port and accept\n\/\/ connections. This is useful for the purposes of replacing certificates\n\/\/ in-place without having to take downtime, e.g. if a certificate is\n\/\/ expiring.\nfunc listen(started chan bool, listeners *sync.WaitGroup, watcher chan bool) {\n\t\/\/ Open raw listening socket\n\tnetwork, address := decodeAddress(*listenAddress)\n\trawListener, err := reuseport.NewReusablePortListener(network, address)\n\tif err != nil {\n\t\tlogger.Printf(\"error opening socket: %s\", err)\n\t\tstarted <- false\n\t\treturn\n\t}\n\n\t\/\/ Wrap listening socket with TLS listener.\n\ttlsConfig, err := buildConfig(*keystorePath, *keystorePass, *caBundlePath)\n\tif err != nil {\n\t\tlogger.Printf(\"error setting up TLS: %s\", err)\n\t\tstarted <- false\n\t\treturn\n\t}\n\n\tleaf := tlsConfig.Certificates[0].Leaf\n\n\tlistener := tls.NewListener(rawListener, tlsConfig)\n\tlogger.Printf(\"listening on %s\", *listenAddress)\n\tdefer listener.Close()\n\n\thandlers := &sync.WaitGroup{}\n\thandlers.Add(1)\n\n\t\/\/ A channel to allow signal handlers to notify our accept loop that it\n\t\/\/ should shut down.\n\tstopper := make(chan bool, 1)\n\n\tbackendNet, backendAddr, err := parseTarget(*forwardAddress)\n\tif err != nil {\n\t\tlogger.Printf(\"invalid backend address: %s\", err)\n\t\tstarted <- false\n\t\treturn\n\t}\n\n\tdial := func() (net.Conn, error) {\n\t\treturn net.Dial(backendNet, backendAddr)\n\t}\n\n\tgo accept(listener, handlers, stopper, leaf, dial)\n\tgo signalHandler(listener, stopper, listeners, watcher)\n\n\tstarted <- true\n\n\tlogger.Printf(\"listening with cert serial no. %d (expiring %s)\", leaf.SerialNumber, leaf.NotAfter.String())\n\thandlers.Wait()\n\n\tlisteners.Done()\n}\n<commit_msg>Use HasPrefix instead of regexp<commit_after>\/*-\n * Copyright 2015 Square Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"log\/syslog\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/kavu\/go_reuseport\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nvar (\n\tlistenAddress = kingpin.Flag(\"listen\", \"Address and port to listen on.\").Required().TCP()\n\tforwardAddress = kingpin.Flag(\"target\", \"Address to foward connections to (HOST:PORT, or unix:PATH).\").Required().String()\n\tunsafeTarget = kingpin.Flag(\"unsafe-target\", \"If set, does not limit target to localhost, 127.0.0.1 or ::1\").Bool()\n\tkeystorePath = kingpin.Flag(\"keystore\", \"Path to certificate and keystore (PKCS12).\").PlaceHolder(\"PATH\").Required().String()\n\tkeystorePass = kingpin.Flag(\"storepass\", \"Password for certificate and keystore.\").PlaceHolder(\"PASS\").String()\n\tcaBundlePath = kingpin.Flag(\"cacert\", \"Path to certificate authority bundle file (PEM\/X509).\").Required().String()\n\tautoReload = kingpin.Flag(\"auto-reload\", \"Watch keystores file with inotify\/fswatch and reload on changes.\").Bool()\n\ttimedReload = kingpin.Flag(\"timed-reload\", \"Reload keystores every N seconds, refresh listener on changes.\").PlaceHolder(\"N\").Int()\n\tallowAll = kingpin.Flag(\"allow-all\", \"Allow all clients, do not check client cert subject.\").Bool()\n\tallowedCNs = kingpin.Flag(\"allow-cn\", \"Allow clients with given common name (can be repeated).\").PlaceHolder(\"CN\").Strings()\n\tallowedOUs = kingpin.Flag(\"allow-ou\", \"Allow clients with organizational unit name (can be repeated).\").PlaceHolder(\"OU\").Strings()\n\tuseSyslog = kingpin.Flag(\"syslog\", \"Send logs to syslog instead of stderr.\").Bool()\n)\n\n\/\/ Global logger instance\nvar logger = log.New(os.Stderr, \"\", log.LstdFlags|log.Lmicroseconds)\n\nfunc initLogger() {\n\tif *useSyslog {\n\t\tvar err error\n\t\tlogger, err = syslog.NewLogger(syslog.LOG_NOTICE|syslog.LOG_DAEMON, log.LstdFlags|log.Lmicroseconds)\n\t\tpanicOnError(err)\n\t}\n\n\t\/\/ Set log prefix to process ID to distinguish parent\/child\n\tlogger.SetPrefix(fmt.Sprintf(\"[%5d] \", os.Getpid()))\n}\n\n\/\/ panicOnError panics if err is not nil\nfunc panicOnError(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tkingpin.Parse()\n\n\t\/\/ Validate flags\n\tif !(*allowAll) && len(*allowedCNs) == 0 && len(*allowedOUs) == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: at least one of --allow-all, --allow-cn or --allow-ou is required\")\n\t\tos.Exit(1)\n\t}\n\tif *allowAll && len(*allowedCNs) != 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: --allow-all and --allow-cn are mutually exclusive\")\n\t\tos.Exit(1)\n\t}\n\tif *allowAll && len(*allowedOUs) != 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: --allow-all and --allow-ou are mutually exclusive\")\n\t\tos.Exit(1)\n\t}\n\tif !validateTarget(*forwardAddress) {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: --target must be localhost:port, 127.0.0.1:port or [::1]:port\")\n\t\tos.Exit(1)\n\t}\n\tif *autoReload && (*timedReload > 0) {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: --auto-reload and --timed-reload are mutually exclusive\")\n\t\tos.Exit(1)\n\t}\n\n\tinitLogger()\n\n\tlisteners := &sync.WaitGroup{}\n\tlisteners.Add(1)\n\n\t\/\/ Set up file watchers (if requested)\n\twatcher := make(chan bool, 1)\n\tif *autoReload {\n\t\tgo watchAuto([]string{*keystorePath, *caBundlePath}, watcher)\n\t} else if *timedReload > 0 {\n\t\tgo watchTimed([]string{*keystorePath, *caBundlePath}, time.Duration(*timedReload)*time.Second, watcher)\n\t}\n\n\t\/\/ A channel to notify us that the listener is running.\n\tstarted := make(chan bool, 1)\n\tgo listen(started, listeners, watcher)\n\n\tup := <-started\n\tif !up {\n\t\tlogger.Printf(\"failed to start initial listener\")\n\t\tos.Exit(1)\n\t}\n\n\tlogger.Printf(\"initial startup completed, waiting for connections\")\n\tlisteners.Wait()\n\tlogger.Printf(\"all listeners closed, shutting down\")\n}\n\nfunc validateTarget(addr string) bool {\n\tif *unsafeTarget {\n\t\treturn true\n\t}\n\tif strings.HasPrefix(addr, \"unix:\") {\n\t\treturn true\n\t}\n\tif strings.HasPrefix(addr, \"127.0.0.1:\") {\n\t\treturn true\n\t}\n\tif strings.HasPrefix(addr, \"[::1]:\") {\n\t\treturn true\n\t}\n\tif strings.HasPrefix(addr, \"localhost:\") {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ Open listening socket. Take note that we create a \"reusable port\n\/\/ listener\", meaning we pass SO_REUSEPORT to the kernel. This allows\n\/\/ us to have multiple sockets listening on the same port and accept\n\/\/ connections. This is useful for the purposes of replacing certificates\n\/\/ in-place without having to take downtime, e.g. if a certificate is\n\/\/ expiring.\nfunc listen(started chan bool, listeners *sync.WaitGroup, watcher chan bool) {\n\t\/\/ Open raw listening socket\n\tnetwork, address := decodeAddress(*listenAddress)\n\trawListener, err := reuseport.NewReusablePortListener(network, address)\n\tif err != nil {\n\t\tlogger.Printf(\"error opening socket: %s\", err)\n\t\tstarted <- false\n\t\treturn\n\t}\n\n\t\/\/ Wrap listening socket with TLS listener.\n\ttlsConfig, err := buildConfig(*keystorePath, *keystorePass, *caBundlePath)\n\tif err != nil {\n\t\tlogger.Printf(\"error setting up TLS: %s\", err)\n\t\tstarted <- false\n\t\treturn\n\t}\n\n\tleaf := tlsConfig.Certificates[0].Leaf\n\n\tlistener := tls.NewListener(rawListener, tlsConfig)\n\tlogger.Printf(\"listening on %s\", *listenAddress)\n\tdefer listener.Close()\n\n\thandlers := &sync.WaitGroup{}\n\thandlers.Add(1)\n\n\t\/\/ A channel to allow signal handlers to notify our accept loop that it\n\t\/\/ should shut down.\n\tstopper := make(chan bool, 1)\n\n\tbackendNet, backendAddr, err := parseTarget(*forwardAddress)\n\tif err != nil {\n\t\tlogger.Printf(\"invalid backend address: %s\", err)\n\t\tstarted <- false\n\t\treturn\n\t}\n\n\tdial := func() (net.Conn, error) {\n\t\treturn net.Dial(backendNet, backendAddr)\n\t}\n\n\tgo accept(listener, handlers, stopper, leaf, dial)\n\tgo signalHandler(listener, stopper, listeners, watcher)\n\n\tstarted <- true\n\n\tlogger.Printf(\"listening with cert serial no. %d (expiring %s)\", leaf.SerialNumber, leaf.NotAfter.String())\n\thandlers.Wait()\n\n\tlisteners.Done()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"os\"\n\t\"container\/list\"\n)\n\nvar (\n\t\/\/ Declared in common:\n\t\/\/ hashFunction HashValue\n\t\/\/ cwd string\n\tdryRun bool\n)\n\nvar cmdDedup = &Command{\n\tRun: dedup,\n\tUsage: `dedup [-c=hash] [-D=dir] [--dryrun]`,\n\tShort: \"deduplicate using a checksum file\",\n\tLong: `\nDeduplicate all files for the given directory against a checksum file. For every\ninstance where multiple files share the same checksum, you are interactively\nprompted to pick one to keep, while the rest are deleted.\n\nWarning: if your checksum file is not up-to-date or is incorrect, then the result\nof running this will be incorrect.`,\n}\n\n\/\/ Initialized in common:\n\/\/ var preferredHashes []string\n\nfunc init() {\n\tconst (\n\t\tcwdUsage = \"the directory for which to verify using the checksum file\"\n\t\tdryRunUsage = \"only output what would have been done; do not perform any destructive operations\"\n\t)\n\thashUsage := fmt.Sprintf(\"the hash to use; if unspecified, the following are tried in order: %s\", strings.Join(preferredHashes, \", \"))\n\tf := &cmdDedup.Flag\n\tf.Var(&hashFunction, \"c\", hashUsage)\n\tf.StringVar(&cwd, \"D\", \".\", cwdUsage)\n\tf.BoolVar(&dryRun, \"dryrun\", false, dryRunUsage)\n}\n\nfunc dedup(cmd *Command, args []string) {\n\t_, checksums := setDirAndHashOptions()\n\n\tentries, err := EntriesFromChecksumFile(checksums)\n\tif err != nil {\n\t\tdie(err)\n\t}\n\n\tif dryRun {\n\t\twarn(\"# Dry run mode is on\\n\")\n\t}\n\n\tbuckets := entries.BucketsByChecksum()\n\tfor checksum, bucket := range buckets {\n\t\tif bucket.Len() > 1 {\n\t\t\tpromptForRemoval(bucket, checksum)\n\t\t}\n\t}\n}\n\nfunc promptForRemoval(duplicates *list.List, checksum string) {\nPROMPT:\n\tfor e, i := duplicates.Front(), 1; e != nil; e, i = e.Next(), i+1 {\n\t\tfile := e.Value.(string)\n\t\twarn(\"# [%d] %s\\n\", i, file)\n\t}\n\twarn(\"# All of these have checksum %s. Keep which? \", checksum)\n\n\tvar choice int\n\tif n, err := fmt.Scanf(\"%d\", &choice); err == nil && n == 1 {\n\t\tif choice < 1 || choice > duplicates.Len() {\n\t\t\twarn(\"# %d is not a valid choice\\n\\n\", choice)\n\t\t\tgoto PROMPT\n\t\t}\n\t\tremoveDuplicatesAndKeep(duplicates, choice)\n\t} else {\n\t\twarn(\"# Please enter a number\\n\\n\")\n\t\tgoto PROMPT\n\t}\n}\n\nfunc removeDuplicatesAndKeep(duplicates *list.List, choice int) {\n\te := duplicates.Front()\n\tfor i := 1; i < choice; i++ {\n\t\te = e.Next()\n\t}\n\n\tfile := duplicates.Remove(e).(string)\n\twarn(\"# Keeping %s\\n\", file)\n\n\tfor e = duplicates.Front(); e != nil; e = e.Next() {\n\t\tfile := e.Value.(string)\n\n\t\tif !dryRun {\n\t\t\tif err := os.Remove(file); err != nil {\n\t\t\t\tcroak(err)\n\t\t\t}\n\t\t}\n\t\twarn(\"rm %s\\n\", file)\n\t}\n\twarn(\"\\n\")\n}\n\n<commit_msg>Fix the dedup's help for -D<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"os\"\n\t\"container\/list\"\n)\n\nvar (\n\t\/\/ Declared in common:\n\t\/\/ hashFunction HashValue\n\t\/\/ cwd string\n\tdryRun bool\n)\n\nvar cmdDedup = &Command{\n\tRun: dedup,\n\tUsage: `dedup [-c=hash] [-D=dir] [--dryrun]`,\n\tShort: \"deduplicate using a checksum file\",\n\tLong: `\nDeduplicate all files for the given directory against a checksum file. For every\ninstance where multiple files share the same checksum, you are interactively\nprompted to pick one to keep, while the rest are deleted.\n\nWarning: if your checksum file is not up-to-date or is incorrect, then the result\nof running this will be incorrect.`,\n}\n\n\/\/ Initialized in common:\n\/\/ var preferredHashes []string\n\nfunc init() {\n\tconst (\n\t\tcwdUsage = \"the directory to dedup using its checksum file\"\n\t\tdryRunUsage = \"only output what would have been done; do not perform any destructive operations\"\n\t)\n\thashUsage := fmt.Sprintf(\"the hash to use; if unspecified, the following are tried in order: %s\", strings.Join(preferredHashes, \", \"))\n\tf := &cmdDedup.Flag\n\tf.Var(&hashFunction, \"c\", hashUsage)\n\tf.StringVar(&cwd, \"D\", \".\", cwdUsage)\n\tf.BoolVar(&dryRun, \"dryrun\", false, dryRunUsage)\n}\n\nfunc dedup(cmd *Command, args []string) {\n\t_, checksums := setDirAndHashOptions()\n\n\tentries, err := EntriesFromChecksumFile(checksums)\n\tif err != nil {\n\t\tdie(err)\n\t}\n\n\tif dryRun {\n\t\twarn(\"# Dry run mode is on\\n\")\n\t}\n\n\tbuckets := entries.BucketsByChecksum()\n\tfor checksum, bucket := range buckets {\n\t\tif bucket.Len() > 1 {\n\t\t\tpromptForRemoval(bucket, checksum)\n\t\t}\n\t}\n}\n\nfunc promptForRemoval(duplicates *list.List, checksum string) {\nPROMPT:\n\tfor e, i := duplicates.Front(), 1; e != nil; e, i = e.Next(), i+1 {\n\t\tfile := e.Value.(string)\n\t\twarn(\"# [%d] %s\\n\", i, file)\n\t}\n\twarn(\"# All of these have checksum %s. Keep which? \", checksum)\n\n\tvar choice int\n\tif n, err := fmt.Scanf(\"%d\", &choice); err == nil && n == 1 {\n\t\tif choice < 1 || choice > duplicates.Len() {\n\t\t\twarn(\"# %d is not a valid choice\\n\\n\", choice)\n\t\t\tgoto PROMPT\n\t\t}\n\t\tremoveDuplicatesAndKeep(duplicates, choice)\n\t} else {\n\t\twarn(\"# Please enter a number\\n\\n\")\n\t\tgoto PROMPT\n\t}\n}\n\nfunc removeDuplicatesAndKeep(duplicates *list.List, choice int) {\n\te := duplicates.Front()\n\tfor i := 1; i < choice; i++ {\n\t\te = e.Next()\n\t}\n\n\tfile := duplicates.Remove(e).(string)\n\twarn(\"# Keeping %s\\n\", file)\n\n\tfor e = duplicates.Front(); e != nil; e = e.Next() {\n\t\tfile := e.Value.(string)\n\n\t\tif !dryRun {\n\t\t\tif err := os.Remove(file); err != nil {\n\t\t\t\tcroak(err)\n\t\t\t}\n\t\t}\n\t\twarn(\"rm %s\\n\", file)\n\t}\n\twarn(\"\\n\")\n}\n\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\"\n\t\"log\"\n\t\"mime\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"golang.org\/x\/crypto\/acme\/autocert\"\n\t\"golang.org\/x\/net\/http2\"\n)\n\nconst version = \"0.1\"\nconst name = \"simple-httpd\"\nconst indexHTMLFile = \"index.html\"\nconst pathSeperator = \"\/\"\n\nconst (\n\tcert = \"cert.pem\"\n\tkey = \"key.pem\"\n\tcertDir = \"\/.autocert\"\n)\n\n\/\/ gitSHA is populated at build time from\n\/\/ `-ldflags \"-X main.gitSHA=$(shell git rev-parse HEAD)\"`\nvar gitSHA string\n\n\/\/ Data holds the data passed to the template engine\ntype Data struct {\n\tName string\n\tLastModified string\n\tURI string\n\tSize int64\n}\n\n\/\/ httpServer holds the relavent info\/state\ntype httpServer struct {\n\tDirectory string\n\tPort int\n\ttemplate *template.Template\n}\n\n\/\/ requestData holds data about the request for logging\ntype requestData struct {\n\tTimestamp string `json:\"timestamp,omitempty\"`\n\tMethod string `json:\"method,omitempty\"`\n\tHTTPVersion string `json:\"http_version,omitempty\"`\n\tRemoteAddr string `json:\"remote_addr,omitempty\"`\n\tPath string `json:\"path,omitempty\"`\n\tStatus int `json:\"status,omitempty\"`\n\tUserAgent string `json:\"user_agent,omitempty\"`\n\tError string `json:\"error,omitempty,omitempty\"`\n}\n\nfunc (r requestData) Format(f fmt.State, c rune) {\n\tswitch c {\n\tcase 'v', 's':\n\t\tenc := json.NewEncoder(f)\n\t\tenc.Encode(r)\n\t}\n}\n\n\/\/ setHeaders sets the base headers for all requests\nfunc setHeaders(w http.ResponseWriter) {\n\tw.Header().Set(\"Server\", name+pathSeperator+version)\n\tw.Header().Add(\"Date\", time.Now().Format(time.RFC822))\n}\n\n\/\/ ServeHTTP handles inbound requests\nfunc (h *httpServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\thttp.Error(w, fmt.Sprintln(err), http.StatusInternalServerError)\n\t\t\tlog.Printf(\"recovering from error: %s\\n\", err)\n\t\t}\n\t}()\n\n\trd := requestData{\n\t\tTimestamp: time.Now().Format(\"2006-01-02 15:04:05\"),\n\t\tRemoteAddr: req.RemoteAddr,\n\t\tMethod: req.Method,\n\t\tPath: req.RequestURI,\n\t\tUserAgent: req.UserAgent(),\n\t}\n\n\tqueryStr, err := url.QueryUnescape(req.RequestURI)\n\tif err != nil {\n\t\trd.Error = err.Error()\n\t\trd.Status = http.StatusInternalServerError\n\t\tfmt.Println(rd)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tfullpath := filepath.Join(h.Directory, queryStr[1:])\n\n\tfile, err := os.Open(fullpath)\n\tif err != nil {\n\t\trd.Error = err.Error()\n\t\trd.Status = http.StatusNotFound\n\t\tfmt.Println(rd)\n\t\thttp.NotFound(w, req)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\tstat, err := file.Stat()\n\tif err != nil {\n\t\trd.Error = err.Error()\n\t\trd.Status = http.StatusInternalServerError\n\t\tfmt.Println(rd)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsetHeaders(w)\n\n\tif stat.IsDir() {\n\t\tcontents, err := file.Readdir(-1)\n\t\tif err != nil {\n\t\t\trd.Status = http.StatusInternalServerError\n\t\t\trd.Error = err.Error()\n\t\t\tfmt.Println(rd)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tfiles := make([]Data, 0, len(contents))\n\t\tfor _, entry := range contents {\n\t\t\tif entry.Name() == indexHTMLFile {\n\t\t\t\tw.Header().Set(\"Content-type\", \"text\/html; charset=UTF-8\")\n\t\t\t\tw.Header().Set(\"Content-Length\", fmt.Sprintf(\"%v\", entry.Size()))\n\n\t\t\t\thf, err := os.Open(fullpath + pathSeperator + entry.Name())\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tio.Copy(w, hf)\n\n\t\t\t\trd.Status = http.StatusOK\n\t\t\t\tfmt.Println(rd)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfile := Data{\n\t\t\t\tName: entry.Name(),\n\t\t\t\tLastModified: entry.ModTime().Format(time.RFC1123),\n\t\t\t\tURI: path.Join(queryStr, entry.Name()),\n\t\t\t}\n\t\t\tif entry.IsDir() {\n\t\t\t\tfile.Name = entry.Name() + pathSeperator\n\t\t\t\tfile.Size = entry.Size()\n\t\t\t}\n\t\t\tfiles = append(files, file)\n\t\t}\n\n\t\trd.Status = http.StatusOK\n\n\t\tw.Header().Set(\"Content-type\", \"text\/html; charset=UTF-8\")\n\n\t\th.template.Execute(w, map[string]interface{}{\n\t\t\t\"files\": files,\n\t\t\t\"version\": gitSHA,\n\t\t\t\"port\": h.Port,\n\t\t\t\"relativePath\": queryStr,\n\t\t\t\"goVersion\": runtime.Version(),\n\t\t\t\"parentDirectory\": path.Dir(queryStr),\n\t\t})\n\n\t\tfmt.Println(rd)\n\n\t\treturn\n\t}\n\n\tif mimetype := mime.TypeByExtension(path.Ext(file.Name())); mimetype != \"\" {\n\t\tfmt.Println(mimetype)\n\t\tw.Header().Set(\"Content-type\", \"text\/html; charset=UTF-8\")\n\t} else {\n\t\tw.Header().Set(\"Content-type\", \"application\/octet-stream\")\n\t}\n\n\tio.Copy(w, file)\n\n\trd.Status = http.StatusOK\n\tfmt.Println(rd)\n}\n\n\/\/ serveTLS\nfunc serveTLS(domain string, port int) error {\n\tu, err := user.Current()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcacheDir := u.HomeDir + certDir\n\tif err := os.MkdirAll(cacheDir, 0700); err != nil {\n\t\treturn err\n\t}\n\n\tm := autocert.Manager{\n\t\tCache: autocert.DirCache(cacheDir),\n\t\tPrompt: autocert.AcceptTOS,\n\t\tHostPolicy: autocert.HostWhitelist(domain),\n\t}\n\n\tsrv := &http.Server{\n\t\tTLSConfig: &tls.Config{\n\t\t\tGetCertificate: m.GetCertificate,\n\t\t},\n\t}\n\n\thttp2.ConfigureServer(srv, &http2.Server{\n\t\tNewWriteScheduler: func() http2.WriteScheduler {\n\t\t\treturn http2.NewPriorityWriteScheduler(nil)\n\t\t},\n\t})\n\n\tln, err := net.ListenTCP(\"tcp\", &net.TCPAddr{\n\t\tPort: port,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn srv.Serve(tls.NewListener(keepAliveListener{ln}, srv.TLSConfig))\n}\n\n\/\/ keepAliveListener\ntype keepAliveListener struct {\n\t*net.TCPListener\n}\n\n\/\/ Accept\nfunc (k keepAliveListener) Accept() (net.Conn, error) {\n\ttc, err := k.AcceptTCP()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttc.SetKeepAlive(true)\n\ttc.SetKeepAlivePeriod(time.Minute * 3)\n\n\treturn tc, nil\n}\n\nfunc getpwd() string {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn pwd\n}\n\nfunc homeDir() string {\n\tu, err := user.Current()\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn u.HomeDir\n}\n\nfunc main() {\n\tvar port int\n\tvar le string\n\tvar gs bool\n\n\tpwd := getpwd()\n\n\tflag.IntVar(&port, \"p\", 8000, \"bind port\")\n\tflag.StringVar(&le, \"l\", \"\", \"enable TLS with Let's Encrypt for the given domain name. Port = port + 1 \")\n\tflag.BoolVar(&gs, \"g\", false, \"generate and use a self signed certificate\")\n\tflag.Parse()\n\n\tif le != \"\" {\n\t\tvar srv http.Server\n\t\thttp2.ConfigureServer(&srv, new(http2.Server))\n\n\t\ttlsPort := port + 1\n\n\t\tgo func() {\n\t\t\tfmt.Printf(\"Serving HTTPS on 0.0.0.0 port %v ...\\n\", tlsPort)\n\t\t\tlog.Fatal(serveTLS(le, tlsPort))\n\t\t}()\n\t}\n\n\th := &httpServer{\n\t\tPort: port,\n\t\tDirectory: pwd,\n\t\ttemplate: template.Must(template.New(\"listing\").Parse(htmlTemplate)),\n\t}\n\n\tif gs {\n\t\thd := homeDir()\n\n\t\tcertPath := hd + certDir + pathSeperator + cert\n\t\tkeyPath := hd + certDir + pathSeperator + key\n\n\t\tif err := generateCertificates(certPath, keyPath); err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\tfmt.Printf(\"Serving HTTPS on 0.0.0.0 port %v ...\\n\", h.Port+1)\n\t\tlog.Fatal(http.ListenAndServeTLS(fmt.Sprintf(\"0.0.0.0:%d\", port+1), certPath, keyPath, h))\n\t}\n\n\tfmt.Printf(\"Serving HTTP on 0.0.0.0 port %v ...\\n\", h.Port)\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\"0.0.0.0:%d\", port), h))\n\n}\n\nconst htmlTemplate = `\n<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <title>simple-httpd<\/title>\n\t<style>\n\t\ttable, td {\n \tborder: 1px;\n\t }\n\t<\/style>\n <\/head>\n <body>\n <h2>Directory listing for {{.relativePath}}<\/h2>\n\t<hr>\n <table>\n\t <tr>\n <td><b>Name<\/b><\/td>\n\t\t<td><b>Last Modified<\/b><\/td>\n\t\t<td><b>Size<\/b><\/td>\n\t <\/tr>\n\t <tr>\n\t <td><a href=\"{{.parentDirectory}}\">{{.parentDirectory}}<\/td>\n\t\t<td><\/td>\n\t\t<td><\/td>\n\t <\/td>\n {{range .files}}\n <tr>\n\t <td><a href=\"{{.URI}}\">{{.Name}}<\/td>\n\t\t<td>{{.LastModified}}<\/td>\n\t\t<td>{{.Size}}<\/td>\n\t <\/tr>\n {{end}}\n <table>\n <\/body>\n <hr>\n <footer>\n <p>simple-httpd - {{.version}} \/ {{.goVersion}}<\/p>\n <\/footer>\n<\/html>`\n<commit_msg>Fix handling of URIs with ?queries<commit_after>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\"\n\t\"log\"\n\t\"mime\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"golang.org\/x\/crypto\/acme\/autocert\"\n\t\"golang.org\/x\/net\/http2\"\n)\n\nconst version = \"0.1\"\nconst name = \"simple-httpd\"\nconst indexHTMLFile = \"index.html\"\nconst pathSeperator = \"\/\"\n\nconst (\n\tcert = \"cert.pem\"\n\tkey = \"key.pem\"\n\tcertDir = \"\/.autocert\"\n)\n\n\/\/ gitSHA is populated at build time from\n\/\/ `-ldflags \"-X main.gitSHA=$(shell git rev-parse HEAD)\"`\nvar gitSHA string\n\n\/\/ Data holds the data passed to the template engine\ntype Data struct {\n\tName string\n\tLastModified string\n\tURI string\n\tSize int64\n}\n\n\/\/ httpServer holds the relavent info\/state\ntype httpServer struct {\n\tDirectory string\n\tPort int\n\ttemplate *template.Template\n}\n\n\/\/ requestData holds data about the request for logging\ntype requestData struct {\n\tTimestamp string `json:\"timestamp,omitempty\"`\n\tMethod string `json:\"method,omitempty\"`\n\tHTTPVersion string `json:\"http_version,omitempty\"`\n\tRemoteAddr string `json:\"remote_addr,omitempty\"`\n\tPath string `json:\"path,omitempty\"`\n\tStatus int `json:\"status,omitempty\"`\n\tUserAgent string `json:\"user_agent,omitempty\"`\n\tError string `json:\"error,omitempty,omitempty\"`\n}\n\nfunc (r requestData) Format(f fmt.State, c rune) {\n\tswitch c {\n\tcase 'v', 's':\n\t\tenc := json.NewEncoder(f)\n\t\tenc.Encode(r)\n\t}\n}\n\n\/\/ setHeaders sets the base headers for all requests\nfunc setHeaders(w http.ResponseWriter) {\n\tw.Header().Set(\"Server\", name+pathSeperator+version)\n\tw.Header().Add(\"Date\", time.Now().Format(time.RFC822))\n}\n\n\/\/ ServeHTTP handles inbound requests\nfunc (h *httpServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\thttp.Error(w, fmt.Sprintln(err), http.StatusInternalServerError)\n\t\t\tlog.Printf(\"recovering from error: %s\\n\", err)\n\t\t}\n\t}()\n\n\trd := requestData{\n\t\tTimestamp: time.Now().Format(\"2006-01-02 15:04:05\"),\n\t\tRemoteAddr: req.RemoteAddr,\n\t\tMethod: req.Method,\n\t\tPath: req.RequestURI,\n\t\tUserAgent: req.UserAgent(),\n\t}\n\n\tparsedURL, err := url.Parse(req.RequestURI)\n\tif err != nil {\n\t\trd.Error = err.Error()\n\t\trd.Status = http.StatusInternalServerError\n\t\tfmt.Println(rd)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tescapedPath := parsedURL.EscapedPath()\n\tfullpath := filepath.Join(h.Directory, escapedPath[1:])\n\n\tfile, err := os.Open(fullpath)\n\tif err != nil {\n\t\trd.Error = err.Error()\n\t\trd.Status = http.StatusNotFound\n\t\tfmt.Println(rd)\n\t\thttp.NotFound(w, req)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\tstat, err := file.Stat()\n\tif err != nil {\n\t\trd.Error = err.Error()\n\t\trd.Status = http.StatusInternalServerError\n\t\tfmt.Println(rd)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsetHeaders(w)\n\n\tif stat.IsDir() {\n\t\tcontents, err := file.Readdir(-1)\n\t\tif err != nil {\n\t\t\trd.Status = http.StatusInternalServerError\n\t\t\trd.Error = err.Error()\n\t\t\tfmt.Println(rd)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tfiles := make([]Data, 0, len(contents))\n\t\tfor _, entry := range contents {\n\t\t\tif entry.Name() == indexHTMLFile {\n\t\t\t\tw.Header().Set(\"Content-type\", \"text\/html; charset=UTF-8\")\n\t\t\t\tw.Header().Set(\"Content-Length\", fmt.Sprintf(\"%v\", entry.Size()))\n\n\t\t\t\thf, err := os.Open(fullpath + pathSeperator + entry.Name())\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tio.Copy(w, hf)\n\n\t\t\t\trd.Status = http.StatusOK\n\t\t\t\tfmt.Println(rd)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfile := Data{\n\t\t\t\tName: entry.Name(),\n\t\t\t\tLastModified: entry.ModTime().Format(time.RFC1123),\n\t\t\t\tURI: path.Join(escapedPath, entry.Name()),\n\t\t\t}\n\t\t\tif entry.IsDir() {\n\t\t\t\tfile.Name = entry.Name() + pathSeperator\n\t\t\t\tfile.Size = entry.Size()\n\t\t\t}\n\t\t\tfiles = append(files, file)\n\t\t}\n\n\t\trd.Status = http.StatusOK\n\n\t\tw.Header().Set(\"Content-type\", \"text\/html; charset=UTF-8\")\n\n\t\th.template.Execute(w, map[string]interface{}{\n\t\t\t\"files\": files,\n\t\t\t\"version\": gitSHA,\n\t\t\t\"port\": h.Port,\n\t\t\t\"relativePath\": escapedPath,\n\t\t\t\"goVersion\": runtime.Version(),\n\t\t\t\"parentDirectory\": path.Dir(escapedPath),\n\t\t})\n\n\t\tfmt.Println(rd)\n\n\t\treturn\n\t}\n\n\tif mimetype := mime.TypeByExtension(path.Ext(file.Name())); mimetype != \"\" {\n\t\tfmt.Println(mimetype)\n\t\tw.Header().Set(\"Content-type\", \"text\/html; charset=UTF-8\")\n\t} else {\n\t\tw.Header().Set(\"Content-type\", \"application\/octet-stream\")\n\t}\n\n\tio.Copy(w, file)\n\n\trd.Status = http.StatusOK\n\tfmt.Println(rd)\n}\n\n\/\/ serveTLS\nfunc serveTLS(domain string, port int) error {\n\tu, err := user.Current()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcacheDir := u.HomeDir + certDir\n\tif err := os.MkdirAll(cacheDir, 0700); err != nil {\n\t\treturn err\n\t}\n\n\tm := autocert.Manager{\n\t\tCache: autocert.DirCache(cacheDir),\n\t\tPrompt: autocert.AcceptTOS,\n\t\tHostPolicy: autocert.HostWhitelist(domain),\n\t}\n\n\tsrv := &http.Server{\n\t\tTLSConfig: &tls.Config{\n\t\t\tGetCertificate: m.GetCertificate,\n\t\t},\n\t}\n\n\thttp2.ConfigureServer(srv, &http2.Server{\n\t\tNewWriteScheduler: func() http2.WriteScheduler {\n\t\t\treturn http2.NewPriorityWriteScheduler(nil)\n\t\t},\n\t})\n\n\tln, err := net.ListenTCP(\"tcp\", &net.TCPAddr{\n\t\tPort: port,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn srv.Serve(tls.NewListener(keepAliveListener{ln}, srv.TLSConfig))\n}\n\n\/\/ keepAliveListener\ntype keepAliveListener struct {\n\t*net.TCPListener\n}\n\n\/\/ Accept\nfunc (k keepAliveListener) Accept() (net.Conn, error) {\n\ttc, err := k.AcceptTCP()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttc.SetKeepAlive(true)\n\ttc.SetKeepAlivePeriod(time.Minute * 3)\n\n\treturn tc, nil\n}\n\nfunc getpwd() string {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn pwd\n}\n\nfunc homeDir() string {\n\tu, err := user.Current()\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn u.HomeDir\n}\n\nfunc main() {\n\tvar port int\n\tvar le string\n\tvar gs bool\n\n\tpwd := getpwd()\n\n\tflag.IntVar(&port, \"p\", 8000, \"bind port\")\n\tflag.StringVar(&le, \"l\", \"\", \"enable TLS with Let's Encrypt for the given domain name. Port = port + 1 \")\n\tflag.BoolVar(&gs, \"g\", false, \"generate and use a self signed certificate\")\n\tflag.Parse()\n\n\tif le != \"\" {\n\t\tvar srv http.Server\n\t\thttp2.ConfigureServer(&srv, new(http2.Server))\n\n\t\ttlsPort := port + 1\n\n\t\tgo func() {\n\t\t\tfmt.Printf(\"Serving HTTPS on 0.0.0.0 port %v ...\\n\", tlsPort)\n\t\t\tlog.Fatal(serveTLS(le, tlsPort))\n\t\t}()\n\t}\n\n\th := &httpServer{\n\t\tPort: port,\n\t\tDirectory: pwd,\n\t\ttemplate: template.Must(template.New(\"listing\").Parse(htmlTemplate)),\n\t}\n\n\tif gs {\n\t\thd := homeDir()\n\n\t\tcertPath := hd + certDir + pathSeperator + cert\n\t\tkeyPath := hd + certDir + pathSeperator + key\n\n\t\tif err := generateCertificates(certPath, keyPath); err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\tfmt.Printf(\"Serving HTTPS on 0.0.0.0 port %v ...\\n\", h.Port+1)\n\t\tlog.Fatal(http.ListenAndServeTLS(fmt.Sprintf(\"0.0.0.0:%d\", port+1), certPath, keyPath, h))\n\t}\n\n\tfmt.Printf(\"Serving HTTP on 0.0.0.0 port %v ...\\n\", h.Port)\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\"0.0.0.0:%d\", port), h))\n\n}\n\nconst htmlTemplate = `\n<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <title>simple-httpd<\/title>\n\t<style>\n\t\ttable, td {\n \tborder: 1px;\n\t }\n\t<\/style>\n <\/head>\n <body>\n <h2>Directory listing for {{.relativePath}}<\/h2>\n\t<hr>\n <table>\n\t <tr>\n <td><b>Name<\/b><\/td>\n\t\t<td><b>Last Modified<\/b><\/td>\n\t\t<td><b>Size<\/b><\/td>\n\t <\/tr>\n\t <tr>\n\t <td><a href=\"{{.parentDirectory}}\">{{.parentDirectory}}<\/td>\n\t\t<td><\/td>\n\t\t<td><\/td>\n\t <\/td>\n {{range .files}}\n <tr>\n\t <td><a href=\"{{.URI}}\">{{.Name}}<\/td>\n\t\t<td>{{.LastModified}}<\/td>\n\t\t<td>{{.Size}}<\/td>\n\t <\/tr>\n {{end}}\n <table>\n <\/body>\n <hr>\n <footer>\n <p>simple-httpd - {{.version}} \/ {{.goVersion}}<\/p>\n <\/footer>\n<\/html>`\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"image\"\n\t_ \"image\/gif\"\n\t_ \"image\/jpeg\"\n\t_ \"image\/png\"\n\t\"os\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"github.com\/nfnt\/resize\"\n)\n\ntype dot int\n\nconst (\n\tblack dot = 1\n\twhite dot = 0\n)\n\nfunc main2() {\n\tvar dots pattern\n\tdots[0] = [4]dot{\n\t\twhite,\n\t\tblack,\n\t\twhite,\n\t\tblack,\n\t}\n\tfmt.Println(dots.String())\n}\n\nfunc main() {\n\timg, _, err := image.Decode(os.Stdin)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tw, _, err := GetTerminalSize()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tw = w * 2 \/\/ Since each symbol is two dots wide\n\n\timg = resize.Thumbnail(uint(w), uint(img.Bounds().Dy()), img, resize.NearestNeighbor)\n\n\tvar picture string\n\n\tbounds := img.Bounds()\n\n\t\/\/ An image's bounds do not necessarily start at (0, 0), so the two loops start\n\t\/\/ at bounds.Min.Y and bounds.Min.X. Looping over Y first and X second is more\n\t\/\/ likely to result in better memory access patterns than X first and Y second.\n\tfor py := bounds.Min.Y; py < bounds.Max.Y; py += 4 {\n\t\tfor px := bounds.Min.X; px < bounds.Max.X; px += 2 {\n\t\t\tvar dots pattern\n\t\t\t\/\/ Draw left-right, top-bottom.\n\t\t\tfor y := 0; y < 4; y++ {\n\t\t\t\tfor x := 0; x < 2; x++ {\n\t\t\t\t\tdots[x][y] = dotAt(img, px+x, py+y)\n\t\t\t\t}\n\t\t\t}\n\t\t\tpicture += dots.String()\n\t\t}\n\t\tpicture += \"\\n\"\n\t}\n\n\tfmt.Println(picture)\n}\n\n\/\/ func dotAt(img image.Image, x, y int) dot {\n\/\/ \tr, g, b, a := img.At(x, y).RGBA()\n\/\/ \tfmt.Printf(\"(%d,%d) %d, %d, %d, %d\\n\", x, y, r, g, b, a)\n\/\/ \tif a < 65535 {\n\/\/ \t\treturn black\n\/\/ \t}\n\/\/ \tif r < 65535 {\n\/\/ \t\treturn white\n\/\/ \t}\n\/\/ \treturn black\n\/\/ }\n\n\/\/ 32,767 is half bright\nfunc dotAt(img image.Image, x, y int) dot {\n\tv := grayByLuminosity(img.At(x, y).RGBA())\n\t\/\/ fmt.Printf(\"(%d,%d) %d, %d, %d, %d\\n\", x, y, r, g, b, a)\n\t\/\/ fmt.Printf(\"(%d,%d) %d\\n\", x, y, v)\n\tif v <= 30767 {\n\t\treturn white\n\t}\n\treturn black\n}\n\n\/\/ 0.21 R + 0.72 G + 0.07 B\nfunc grayByLuminosity(r, g, b, a uint32) uint32 {\n\tweighted := 0.21*float32(r) + 0.72*float32(g) + 0.07*float32(b)\n\treturn uint32(weighted)\n}\n\n\/\/ Represents an 8 dot braille pattern using x,y coordinates. Eg:\n\/\/ +----------+\n\/\/ |(0,0)(1,0)|\n\/\/ |(0,1)(1,1)|\n\/\/ |(0,2)(1,2)|\n\/\/ |(0,3)(1,3)|\n\/\/ +----------+\ntype pattern [2][4]dot\n\n\/\/ CodePoint maps each point in pattern to a braille number and\n\/\/ calculates the corresponding unicode symbol.\n\/\/ +------+\n\/\/ |(1)(4)|\n\/\/ |(2)(5)|\n\/\/ |(3)(6)|\n\/\/ |(7)(8)|\n\/\/ +------+\n\/\/ See https:\/\/en.wikipedia.org\/wiki\/Braille_Patterns#Identifying.2C_naming_and_ordering)\nfunc (dots pattern) CodePoint() rune {\n\tlowEndian := [8]dot{dots[0][0], dots[0][1], dots[0][2], dots[1][0], dots[1][1], dots[1][2], dots[0][3], dots[1][3]}\n\tvar v int\n\tfor i, x := range lowEndian {\n\t\tv += int(x) << uint(i)\n\t}\n\treturn rune(v) + '\\u2800'\n}\n\nfunc (dots pattern) String() string {\n\treturn string(dots.CodePoint())\n}\n\nfunc GetTerminalSize() (width, height int, err error) {\n\tvar dimensions [4]uint16\n\n\t_, _, e := syscall.Syscall6(\n\t\tsyscall.SYS_IOCTL,\n\t\tuintptr(syscall.Stdout),\n\t\tuintptr(syscall.TIOCGWINSZ),\n\t\tuintptr(unsafe.Pointer(&dimensions)),\n\t\t0, 0, 0,\n\t)\n\tif e != 0 {\n\t\treturn -1, -1, e\n\t}\n\treturn int(dimensions[1]), int(dimensions[0]), nil\n}\n<commit_msg>adds luminosity flag<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"image\"\n\t_ \"image\/gif\"\n\t_ \"image\/jpeg\"\n\t_ \"image\/png\"\n\t\"os\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"github.com\/nfnt\/resize\"\n)\n\ntype dot int\n\nconst (\n\tblack dot = 1\n\twhite dot = 0\n)\n\nvar (\n\tluminosity = flag.Float64(\"lum\", 50, \"the percentage luminosity cutoff to determine black or white pixels\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tfmt.Println(*luminosity)\n\n\timg, _, err := image.Decode(os.Stdin)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tw, _, err := GetTerminalSize()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tw = w * 2 \/\/ Since each symbol is two dots wide\n\n\timg = resize.Thumbnail(uint(w), uint(img.Bounds().Dy()), img, resize.NearestNeighbor)\n\n\tvar picture string\n\n\tbounds := img.Bounds()\n\n\t\/\/ An image's bounds do not necessarily start at (0, 0), so the two loops start\n\t\/\/ at bounds.Min.Y and bounds.Min.X. Looping over Y first and X second is more\n\t\/\/ likely to result in better memory access patterns than X first and Y second.\n\tfor py := bounds.Min.Y; py < bounds.Max.Y; py += 4 {\n\t\tfor px := bounds.Min.X; px < bounds.Max.X; px += 2 {\n\t\t\tvar dots pattern\n\t\t\t\/\/ Draw left-right, top-bottom.\n\t\t\tfor y := 0; y < 4; y++ {\n\t\t\t\tfor x := 0; x < 2; x++ {\n\t\t\t\t\tdots[x][y] = dotAt(img, px+x, py+y)\n\t\t\t\t}\n\t\t\t}\n\t\t\tpicture += dots.String()\n\t\t}\n\t\tpicture += \"\\n\"\n\t}\n\n\tfmt.Println(picture)\n}\n\nfunc dotAt(img image.Image, x, y int) dot {\n\tv := grayByLuminosity(img.At(x, y).RGBA())\n\t\/\/ 32,767 is half bright\n\tif v <= uint32(65535\/(100 \/ *luminosity)) {\n\t\treturn white\n\t}\n\n\treturn black\n}\n\n\/\/ 0.21 R + 0.72 G + 0.07 B\nfunc grayByLuminosity(r, g, b, a uint32) uint32 {\n\tweighted := 0.21*float32(r) + 0.72*float32(g) + 0.07*float32(b)\n\treturn uint32(weighted)\n}\n\n\/\/ Represents an 8 dot braille pattern using x,y coordinates. Eg:\n\/\/ +----------+\n\/\/ |(0,0)(1,0)|\n\/\/ |(0,1)(1,1)|\n\/\/ |(0,2)(1,2)|\n\/\/ |(0,3)(1,3)|\n\/\/ +----------+\ntype pattern [2][4]dot\n\n\/\/ CodePoint maps each point in pattern to a braille number and\n\/\/ calculates the corresponding unicode symbol.\n\/\/ +------+\n\/\/ |(1)(4)|\n\/\/ |(2)(5)|\n\/\/ |(3)(6)|\n\/\/ |(7)(8)|\n\/\/ +------+\n\/\/ See https:\/\/en.wikipedia.org\/wiki\/Braille_Patterns#Identifying.2C_naming_and_ordering)\nfunc (dots pattern) CodePoint() rune {\n\tlowEndian := [8]dot{dots[0][0], dots[0][1], dots[0][2], dots[1][0], dots[1][1], dots[1][2], dots[0][3], dots[1][3]}\n\tvar v int\n\tfor i, x := range lowEndian {\n\t\tv += int(x) << uint(i)\n\t}\n\treturn rune(v) + '\\u2800'\n}\n\nfunc (dots pattern) String() string {\n\treturn string(dots.CodePoint())\n}\n\nfunc GetTerminalSize() (width, height int, err error) {\n\tvar dimensions [4]uint16\n\n\t_, _, e := syscall.Syscall6(\n\t\tsyscall.SYS_IOCTL,\n\t\tuintptr(syscall.Stdout),\n\t\tuintptr(syscall.TIOCGWINSZ),\n\t\tuintptr(unsafe.Pointer(&dimensions)),\n\t\t0, 0, 0,\n\t)\n\tif e != 0 {\n\t\treturn -1, -1, e\n\t}\n\treturn int(dimensions[1]), int(dimensions[0]), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package core\n\nimport (\n\t\"encoding\/hex\"\n\t\"log\"\n\t\"os\"\n\t\"os\/user\"\n\n\t\"golang.org\/x\/crypto\/bcrypt\"\n)\n\n\/*\nTinzenite is the struct on which all important operations should be called.\n*\/\ntype Tinzenite struct {\n\tPath string\n\tauth *Authentication\n\tselfpeer *Peer\n\tchannel *Channel\n\tallPeers []*Peer\n\tmodel *model\n}\n\n\/*\nCreateTinzenite makes a directory a new Tinzenite directory. Will return error\nif already so.\n*\/\nfunc CreateTinzenite(dirname, dirpath, peername, username string) (*Tinzenite, error) {\n\tif IsTinzenite(dirpath) {\n\t\treturn nil, ErrIsTinzenite\n\t}\n\tid, err := newIdentifier()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Bcrypt username\n\tuserbyte, err := bcrypt.GenerateFromPassword([]byte(username), 10)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tuserhash := hex.EncodeToString(userbyte)\n\t\/\/ Build\n\ttinzenite := &Tinzenite{\n\t\tPath: dirpath,\n\t\tauth: &Authentication{\n\t\t\tUser: userhash,\n\t\t\tDirname: dirname,\n\t\t\tDirID: id,\n\t\t\tKey: \"TODO\"}}\n\t\/\/ build channel\n\tchannel, err := CreateChannel(peername, nil, tinzenite)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttinzenite.channel = channel\n\t\/\/ build self peer\n\taddress, err := channel.Address()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpeerhash, err := newIdentifier()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpeer := &Peer{\n\t\tName: peername,\n\t\tAddress: address,\n\t\tProtocol: Tox,\n\t\tIdentification: peerhash}\n\ttinzenite.selfpeer = peer\n\ttinzenite.allPeers = []*Peer{peer}\n\t\/\/ save that this directory is now a tinzenite dir\n\terr = tinzenite.storeGlobalConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ make .tinzenite so that model can work\n\terr = makeDirectory(dirpath + \"\/\" + TINZENITEDIR)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ build model (can block for long!)\n\tm, err := createModel(dirpath, peer.Identification)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttinzenite.model = m\n\t\/\/ finally store initial copy\n\terr = tinzenite.Store()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/*TODO later implement that model updates are sent to all online peers --> channel and func must be init here*\/\n\treturn tinzenite, nil\n}\n\n\/*\nLoadTinzenite will try to load the given directory path as a Tinzenite directory.\nIf not one it won't work: use CreateTinzenite to create a new peer.\n*\/\nfunc LoadTinzenite(dirpath string) (*Tinzenite, error) {\n\tif !IsTinzenite(dirpath) {\n\t\treturn nil, ErrNotTinzenite\n\t}\n\tt := &Tinzenite{Path: dirpath}\n\t\/\/ load auth\n\tauth, err := loadAuthentication(dirpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt.auth = auth\n\t\/\/ load model\n\tmodel, err := loadModel(dirpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt.model = model\n\t\/\/ load peer list\n\tpeers, err := loadPeers(dirpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt.allPeers = peers\n\t\/\/ load tox dump\n\tselfToxDump, err := loadToxDump(dirpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt.selfpeer = selfToxDump.SelfPeer\n\t\/\/ prepare channel\n\tchannel, err := CreateChannel(t.selfpeer.Name, selfToxDump.ToxData, t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt.channel = channel\n\treturn t, nil\n}\n\n\/*\nRemoveTinzenite directory. Specifically leaves all user files but removes all\nTinzenite specific items.\n*\/\nfunc RemoveTinzenite(path string) error {\n\tif !IsTinzenite(path) {\n\t\treturn ErrNotTinzenite\n\t}\n\t\/* TODO remove from directory list*\/\n\treturn os.RemoveAll(path + \"\/\" + TINZENITEDIR)\n}\n\n\/*\nSyncModel TODO\n\nfetches model from other peers and syncs (this is for manual sync)\n*\/\nfunc (t *Tinzenite) SyncModel() error {\n\t\/\/ first ensure that local model is up to date\n\terr := t.model.Update()\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ iterate over all known peers\n\tfor _, peer := range t.allPeers {\n\t\tif peer == t.selfpeer {\n\t\t\tcontinue\n\t\t}\n\t\tlog.Println(\"Sending request.\")\n\t\t\/*TODO - also make this concurrent?*\/\n\t\tt.channel.Send(peer.Address, \"Want update! <-- TODO replace this message\")\n\t\t\/\/ if online -> continue\n\t\t\/\/ if not init -> init\n\t\t\/\/ sync\n\t\t\/*TODO must store init state in allPeers too, also in future encryption*\/\n\t}\n\treturn nil\n}\n\n\/*\nAddress of this Tinzenite peer.\n*\/\nfunc (t *Tinzenite) Address() string {\n\treturn t.selfpeer.Address\n}\n\n\/*\nClose cleanly stores everything and shuts Tinzenite down.\n*\/\nfunc (t *Tinzenite) Close() {\n\t\/\/ store all information\n\tt.Store()\n\t\/\/ FINALLY close (afterwards because I still need info from channel for store!)\n\tt.channel.Close()\n}\n\n\/*\nStore the tinzenite directory structure to disk. Will resolve all important\nobjects and store them so that it can later be reloaded.\n*\/\nfunc (t *Tinzenite) Store() error {\n\troot := t.Path + \"\/\" + TINZENITEDIR\n\t\/\/ build directory structure\n\terr := makeDirectories(root,\n\t\tORGDIR+\"\/\"+PEERSDIR, \"temp\", \"removed\", LOCAL)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ write all peers to files\n\tfor _, peer := range t.allPeers {\n\t\terr := peer.store(t.Path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t\/\/ store local peer info with toxdata\n\ttoxData, err := t.channel.ToxData()\n\tif err != nil {\n\t\treturn err\n\t}\n\ttoxPeerDump := &toxPeerDump{\n\t\tSelfPeer: t.selfpeer,\n\t\tToxData: toxData}\n\terr = toxPeerDump.store(t.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ finally store auth file\n\treturn t.auth.store(t.Path)\n}\n\n\/*\nConnect this tinzenite to another peer, beginning the bootstrap process.\n*\/\nfunc (t *Tinzenite) Connect(address string) error {\n\treturn t.channel.RequestConnection(address, t.selfpeer)\n}\n\n\/*\nCallbackNewConnection is called when a new connection request comes in.\n*\/\nfunc (t *Tinzenite) CallbackNewConnection(address, message string) {\n\tlog.Printf(\"New connection from <%s> with message <%s>\\n\", address, message)\n\terr := t.channel.AcceptConnection(address)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn\n\t}\n\t\/*TODO actually this should be read from disk once the peer has synced... oO\n\tCorrection: read from message other peer info *\/\n\tnewID, _ := newIdentifier()\n\tt.allPeers = append(t.allPeers, &Peer{\n\t\tIdentification: newID, \/\/ must be read from message\n\t\tName: message, \/\/ must be read from message\n\t\tAddress: address,\n\t\tProtocol: Tox})\n\t\/\/ actually we just want to get type and confidence from the user here, and if everything\n\t\/\/ is okay we accept the connection --> then what? need to bootstrap him...\n}\n\n\/*\nCallbackMessage is called when a message is received.\n*\/\nfunc (t *Tinzenite) CallbackMessage(address, message string) {\n\t\/\/ log.Printf(\"Message from <%s> with message <%s>\\n\", address, message)\n\tswitch message {\n\tcase \"model\":\n\t\tt.channel.Send(address, t.model.String())\n\tdefault:\n\t\tt.channel.Send(address, \"ACK\")\n\t}\n}\n\n\/*\nstoreGlobalConfig stores the path value into the user's home directory so that clients\ncan locate it.\n*\/\nfunc (t *Tinzenite) storeGlobalConfig() error {\n\t\/\/ ready outside data\n\tuser, err := user.Current()\n\tif err != nil {\n\t\treturn err\n\t}\n\tpath := user.HomeDir + \"\/.config\/tinzenite\"\n\terr = makeDirectory(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpath += \"\/\" + DIRECTORYLIST\n\tfile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_APPEND, FILEPERMISSIONMODE)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\t\/\/ write path to file\n\t_, err = file.WriteString(t.Path + \"\\n\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ ensure that the file is valid\n\treturn PrettifyDirectoryList()\n}\n<commit_msg>small stuff<commit_after>package core\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"os\"\n\t\"os\/user\"\n\n\t\"golang.org\/x\/crypto\/bcrypt\"\n)\n\n\/*\nTinzenite is the struct on which all important operations should be called.\n*\/\ntype Tinzenite struct {\n\tPath string\n\tauth *Authentication\n\tselfpeer *Peer\n\tchannel *Channel\n\tallPeers []*Peer\n\tmodel *model\n}\n\n\/*\nCreateTinzenite makes a directory a new Tinzenite directory. Will return error\nif already so.\n*\/\nfunc CreateTinzenite(dirname, dirpath, peername, username string) (*Tinzenite, error) {\n\tif IsTinzenite(dirpath) {\n\t\treturn nil, ErrIsTinzenite\n\t}\n\tid, err := newIdentifier()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Bcrypt username\n\tuserhash, err := bcrypt.GenerateFromPassword([]byte(username), 10)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Build\n\ttinzenite := &Tinzenite{\n\t\tPath: dirpath,\n\t\tauth: &Authentication{\n\t\t\tUser: string(userhash),\n\t\t\tDirname: dirname,\n\t\t\tDirID: id,\n\t\t\tKey: \"TODO\"}}\n\t\/\/ build channel\n\tchannel, err := CreateChannel(peername, nil, tinzenite)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttinzenite.channel = channel\n\t\/\/ build self peer\n\taddress, err := channel.Address()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpeerhash, err := newIdentifier()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpeer := &Peer{\n\t\tName: peername,\n\t\tAddress: address,\n\t\tProtocol: Tox,\n\t\tIdentification: peerhash}\n\ttinzenite.selfpeer = peer\n\ttinzenite.allPeers = []*Peer{peer}\n\t\/\/ save that this directory is now a tinzenite dir\n\terr = tinzenite.storeGlobalConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ make .tinzenite so that model can work\n\terr = makeDirectory(dirpath + \"\/\" + TINZENITEDIR)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ build model (can block for long!)\n\tm, err := createModel(dirpath, peer.Identification)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttinzenite.model = m\n\t\/\/ finally store initial copy\n\terr = tinzenite.Store()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/*TODO later implement that model updates are sent to all online peers --> channel and func must be init here*\/\n\treturn tinzenite, nil\n}\n\n\/*\nLoadTinzenite will try to load the given directory path as a Tinzenite directory.\nIf not one it won't work: use CreateTinzenite to create a new peer.\n*\/\nfunc LoadTinzenite(dirpath string) (*Tinzenite, error) {\n\tif !IsTinzenite(dirpath) {\n\t\treturn nil, ErrNotTinzenite\n\t}\n\tt := &Tinzenite{Path: dirpath}\n\t\/\/ load auth\n\tauth, err := loadAuthentication(dirpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt.auth = auth\n\t\/\/ load model\n\tmodel, err := loadModel(dirpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt.model = model\n\t\/\/ load peer list\n\tpeers, err := loadPeers(dirpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt.allPeers = peers\n\t\/\/ load tox dump\n\tselfToxDump, err := loadToxDump(dirpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt.selfpeer = selfToxDump.SelfPeer\n\t\/\/ prepare channel\n\tchannel, err := CreateChannel(t.selfpeer.Name, selfToxDump.ToxData, t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt.channel = channel\n\treturn t, nil\n}\n\n\/*\nRemoveTinzenite directory. Specifically leaves all user files but removes all\nTinzenite specific items.\n*\/\nfunc RemoveTinzenite(path string) error {\n\tif !IsTinzenite(path) {\n\t\treturn ErrNotTinzenite\n\t}\n\t\/* TODO remove from directory list*\/\n\treturn os.RemoveAll(path + \"\/\" + TINZENITEDIR)\n}\n\n\/*\nSyncModel TODO\n\nfetches model from other peers and syncs (this is for manual sync)\n*\/\nfunc (t *Tinzenite) SyncModel() error {\n\t\/\/ first ensure that local model is up to date\n\terr := t.model.Update()\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ iterate over all known peers\n\tfor _, peer := range t.allPeers {\n\t\tif peer == t.selfpeer {\n\t\t\tcontinue\n\t\t}\n\t\tlog.Println(\"Sending request.\")\n\t\t\/*TODO - also make this concurrent?*\/\n\t\tt.channel.Send(peer.Address, \"Want update! <-- TODO replace this message\")\n\t\t\/\/ if online -> continue\n\t\t\/\/ if not init -> init\n\t\t\/\/ sync\n\t\t\/*TODO must store init state in allPeers too, also in future encryption*\/\n\t}\n\treturn nil\n}\n\n\/*\nAddress of this Tinzenite peer.\n*\/\nfunc (t *Tinzenite) Address() string {\n\treturn t.selfpeer.Address\n}\n\n\/*\nClose cleanly stores everything and shuts Tinzenite down.\n*\/\nfunc (t *Tinzenite) Close() {\n\t\/\/ store all information\n\tt.Store()\n\t\/\/ FINALLY close (afterwards because I still need info from channel for store!)\n\tt.channel.Close()\n}\n\n\/*\nStore the tinzenite directory structure to disk. Will resolve all important\nobjects and store them so that it can later be reloaded.\n*\/\nfunc (t *Tinzenite) Store() error {\n\troot := t.Path + \"\/\" + TINZENITEDIR\n\t\/\/ build directory structure\n\terr := makeDirectories(root,\n\t\tORGDIR+\"\/\"+PEERSDIR, \"temp\", \"removed\", LOCAL)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ write all peers to files\n\tfor _, peer := range t.allPeers {\n\t\terr := peer.store(t.Path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t\/\/ store local peer info with toxdata\n\ttoxData, err := t.channel.ToxData()\n\tif err != nil {\n\t\treturn err\n\t}\n\ttoxPeerDump := &toxPeerDump{\n\t\tSelfPeer: t.selfpeer,\n\t\tToxData: toxData}\n\terr = toxPeerDump.store(t.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ finally store auth file\n\treturn t.auth.store(t.Path)\n}\n\n\/*\nConnect this tinzenite to another peer, beginning the bootstrap process.\n*\/\nfunc (t *Tinzenite) Connect(address string) error {\n\treturn t.channel.RequestConnection(address, t.selfpeer)\n}\n\n\/*\nCallbackNewConnection is called when a new connection request comes in.\n*\/\nfunc (t *Tinzenite) CallbackNewConnection(address, message string) {\n\tlog.Printf(\"New connection from <%s> with message <%s>\\n\", address, message)\n\terr := t.channel.AcceptConnection(address)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn\n\t}\n\t\/*TODO actually this should be read from disk once the peer has synced... oO\n\tCorrection: read from message other peer info *\/\n\tnewID, _ := newIdentifier()\n\tt.allPeers = append(t.allPeers, &Peer{\n\t\tIdentification: newID, \/\/ must be read from message\n\t\tName: message, \/\/ must be read from message\n\t\tAddress: address,\n\t\tProtocol: Tox})\n\t\/\/ actually we just want to get type and confidence from the user here, and if everything\n\t\/\/ is okay we accept the connection --> then what? need to bootstrap him...\n}\n\n\/*\nCallbackMessage is called when a message is received.\n*\/\nfunc (t *Tinzenite) CallbackMessage(address, message string) {\n\t\/\/ log.Printf(\"Message from <%s> with message <%s>\\n\", address, message)\n\tswitch message {\n\tcase \"model\":\n\t\tt.channel.Send(address, t.model.String())\n\tcase \"auth\":\n\t\tauthbin, _ := json.Marshal(t.auth)\n\t\tt.channel.Send(address, string(authbin))\n\tdefault:\n\t\tt.channel.Send(address, \"ACK\")\n\t}\n}\n\n\/*\nstoreGlobalConfig stores the path value into the user's home directory so that clients\ncan locate it.\n*\/\nfunc (t *Tinzenite) storeGlobalConfig() error {\n\t\/\/ ready outside data\n\tuser, err := user.Current()\n\tif err != nil {\n\t\treturn err\n\t}\n\tpath := user.HomeDir + \"\/.config\/tinzenite\"\n\terr = makeDirectory(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpath += \"\/\" + DIRECTORYLIST\n\tfile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_APPEND, FILEPERMISSIONMODE)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\t\/\/ write path to file\n\t_, err = file.WriteString(t.Path + \"\\n\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ ensure that the file is valid\n\treturn PrettifyDirectoryList()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\tbootstrapperPkg \"github.com\/cloudfoundry-incubator\/cf-mysql-bootstrap\/bootstrapper\"\n\t\"github.com\/cloudfoundry-incubator\/cf-mysql-bootstrap\/bootstrapper\/node_manager\"\n\t\"github.com\/cloudfoundry-incubator\/cf-mysql-bootstrap\/clock\"\n\t\"github.com\/cloudfoundry-incubator\/cf-mysql-bootstrap\/config\"\n\t\"github.com\/pivotal-golang\/lager\"\n)\n\nfunc main() {\n\trootConfig, err := config.NewConfig(os.Args)\n\terr = rootConfig.BuildLogger()\n\tlogger := rootConfig.Logger\n\n\tif err != nil {\n\t\tlogger.Fatal(\"Failed to parse config\", err, lager.Data{\n\t\t\t\"config\": rootConfig,\n\t\t})\n\t}\n\n\tnodeManager := node_manager.New(rootConfig, clock.DefaultClock())\n\tbootstrapper := bootstrapperPkg.New(nodeManager)\n\terr = bootstrapper.Bootstrap()\n\n\tif err != nil {\n\t\tlogger.Error(\"Failed to bootstrap cluster\", err, lager.Data{\n\t\t\t\"config\": rootConfig,\n\t\t})\n\t\tprintHumanReadableErr(err)\n\t\tos.Exit(1)\n\t}\n\n\tlogger.Info(\"Successfully bootstrapped cluster\")\n\tfmt.Println(\"Successfully bootstrapped cluster\")\n}\n\nfunc printHumanReadableErr(err error) {\n\tfmt.Printf(`\n\t\t##################################################################################\n\t\tError: %s\n\n\t\tReference the docs for more information:\n\t\thttps:\/\/github.com\/cloudfoundry\/cf-mysql-release\/blob\/master\/docs\/bootstrapping.md\n\t\t##################################################################################\n\t\t`, err)\n}\n<commit_msg>Validate config and handle errors<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\tbootstrapperPkg \"github.com\/cloudfoundry-incubator\/cf-mysql-bootstrap\/bootstrapper\"\n\t\"github.com\/cloudfoundry-incubator\/cf-mysql-bootstrap\/bootstrapper\/node_manager\"\n\t\"github.com\/cloudfoundry-incubator\/cf-mysql-bootstrap\/clock\"\n\t\"github.com\/cloudfoundry-incubator\/cf-mysql-bootstrap\/config\"\n\t\"github.com\/pivotal-golang\/lager\"\n)\n\nfunc main() {\n\trootConfig, err := config.NewConfig(os.Args)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to parse config: %s\", err.Error()))\n\t}\n\n\terr = rootConfig.Validate()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Invalid config: %s\", err.Error()))\n\t}\n\n\terr = rootConfig.BuildLogger()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to build logger: %s\", err.Error()))\n\t}\n\tlogger := rootConfig.Logger\n\n\tnodeManager := node_manager.New(rootConfig, clock.DefaultClock())\n\tbootstrapper := bootstrapperPkg.New(nodeManager)\n\terr = bootstrapper.Bootstrap()\n\n\tif err != nil {\n\t\tlogger.Error(\"Failed to bootstrap cluster\", err, lager.Data{\n\t\t\t\"config\": rootConfig,\n\t\t})\n\t\tprintHumanReadableErr(err)\n\t\tos.Exit(1)\n\t}\n\n\tlogger.Info(\"Successfully bootstrapped cluster\")\n\tfmt.Println(\"Successfully bootstrapped cluster\")\n}\n\nfunc printHumanReadableErr(err error) {\n\tfmt.Printf(`\n\t\t##################################################################################\n\t\tError: %s\n\n\t\tReference the docs for more information:\n\t\thttps:\/\/github.com\/cloudfoundry\/cf-mysql-release\/blob\/master\/docs\/bootstrapping.md\n\t\t##################################################################################\n\t\t`, err)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2013 The SkyDNS Authors. All rights reserved.\n\/\/ Use of this source code is governed by The MIT License (MIT) that can be\n\/\/ found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"github.com\/goraft\/raft\"\n\t\"github.com\/miekg\/dns\"\n\t\"github.com\/skynetservices\/skydns\/server\"\n\t\"github.com\/skynetservices\/skydns\/stats\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\tjoin, ldns, lhttp, dataDir, domain string\n\trtimeout, wtimeout time.Duration\n\tdiscover,norr bool\n\tsecret string\n\tnameserver string\n\tdnssec string\n)\n\nfunc init() {\n\tflag.StringVar(&join, \"join\", \"\", \"Member of SkyDNS cluster to join can be comma separated list\")\n\tflag.BoolVar(&discover, \"discover\", false, \"Auto discover SkyDNS cluster. Performs an NS lookup on the -domain to find SkyDNS members\")\n\tflag.StringVar(&domain, \"domain\",\n\t\tfunc() string {\n\t\t\tif x := os.Getenv(\"SKYDNS_DOMAIN\"); x != \"\" {\n\t\t\t\treturn x\n\t\t\t}\n\t\t\treturn \"skydns.local\"\n\t\t}(), \"Domain to anchor requests to or env. var. SKYDNS_DOMAIN\")\n\tflag.StringVar(&ldns, \"dns\",\n\t\tfunc() string {\n\t\t\tif x := os.Getenv(\"SKYDNS_DNS\"); x != \"\" {\n\t\t\t\treturn x\n\t\t\t}\n\t\t\treturn \"127.0.0.1:53\"\n\t\t}(), \"IP:Port to bind to for DNS or env. var SKYDNS_DNS\")\n\tflag.StringVar(&lhttp, \"http\",\n\t\tfunc() string {\n\t\t\tif x := os.Getenv(\"SKYDNS\"); x != \"\" {\n\t\t\t\t\/\/ get rid of http or https\n\t\t\t\tx1 := strings.TrimPrefix(x, \"https:\/\/\")\n\t\t\t\tx1 = strings.TrimPrefix(x1, \"http:\/\/\")\n\t\t\t\treturn x1\n\t\t\t}\n\t\t\treturn \"127.0.0.1:8080\"\n\t\t}(), \"IP:Port to bind to for HTTP or env. var. SKYDNS\")\n\tflag.StringVar(&dataDir, \"data\", \".\/data\", \"SkyDNS data directory\")\n\tflag.DurationVar(&rtimeout, \"rtimeout\", 2*time.Second, \"Read timeout\")\n\tflag.DurationVar(&wtimeout, \"wtimeout\", 2*time.Second, \"Write timeout\")\n\tflag.StringVar(&secret, \"secret\", \"\", \"Shared secret for use with http api\")\n\tflag.StringVar(&nameserver, \"nameserver\", \"\", \"Nameserver address to forward (non-local) queries to e.g. 8.8.8.8:53,8.8.4.4:53\")\n\tflag.StringVar(&dnssec, \"dnssec\", \"\", \"Basename of DNSSEC key file e.q. Kskydns.local.+005+38250\")\n\tflag.BoolVar(&norr, \"no-round-robin\", false, \"Do not round robin A\/AAAA replies\")\n}\n\nfunc main() {\n\tmembers := make([]string, 0)\n\traft.SetLogLevel(0)\n\tflag.Parse()\n\tnameservers := strings.Split(nameserver, \",\")\n\t\/\/ empty argument given\n\tif len(nameservers) == 1 && nameservers[0] == \"\" {\n\t\tnameservers = make([]string, 0)\n\t\tconfig, err := dns.ClientConfigFromFile(\"\/etc\/resolv.conf\")\n\t\tif err == nil {\n\t\t\tfor _, s := range config.Servers {\n\t\t\t\tnameservers = append(nameservers, net.JoinHostPort(s, config.Port))\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif discover {\n\t\tns, err := net.LookupNS(domain)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\n\t\tif len(ns) < 1 {\n\t\t\tlog.Fatal(\"No NS records found for \", domain)\n\t\t\treturn\n\t\t}\n\n\t\tfor _, n := range ns {\n\t\t\tmembers = append(members, strings.TrimPrefix(n.Host, \".\"))\n\t\t}\n\t} else if join != \"\" {\n\t\tmembers = strings.Split(join, \",\")\n\t}\n\n\ts := server.NewServer(members, domain, ldns, lhttp, dataDir, rtimeout, wtimeout, secret, nameservers, !norr)\n\n\tif dnssec != \"\" {\n\t\tk, p, e := server.ParseKeyFile(dnssec)\n\t\tif e != nil {\n\t\t\tlog.Fatal(e)\n\t\t\treturn\n\t\t}\n\t\tif k.Header().Name != dns.Fqdn(domain) {\n\t\t\tlog.Fatal(errors.New(\"Owner name of DNSKEY must match SkyDNS domain\"))\n\t\t\treturn\n\t\t}\n\t\ts.SetKeys(k, p)\n\t}\n\n\tstats.Collect()\n\n\twaiter, err := s.Start()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\twaiter.Wait()\n}\n<commit_msg>Fix merge<commit_after>\/\/ Copyright (c) 2013 The SkyDNS Authors. All rights reserved.\n\/\/ Use of this source code is governed by The MIT License (MIT) that can be\n\/\/ found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"github.com\/goraft\/raft\"\n\t\"github.com\/miekg\/dns\"\n\t\"github.com\/skynetservices\/skydns\/server\"\n\t\"github.com\/skynetservices\/skydns\/stats\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\tjoin, ldns, lhttp, dataDir, domain string\n\trtimeout, wtimeout time.Duration\n\tdiscover,norr bool\n\tsecret string\n\tnameserver string\n\tdnssec string\n)\n\nfunc init() {\n\tflag.StringVar(&join, \"join\", \"\", \"Member of SkyDNS cluster to join can be comma separated list\")\n\tflag.BoolVar(&discover, \"discover\", false, \"Auto discover SkyDNS cluster. Performs an NS lookup on the -domain to find SkyDNS members\")\n\tflag.StringVar(&domain, \"domain\",\n\t\tfunc() string {\n\t\t\tif x := os.Getenv(\"SKYDNS_DOMAIN\"); x != \"\" {\n\t\t\t\treturn x\n\t\t\t}\n\t\t\treturn \"skydns.local\"\n\t\t}(), \"Domain to anchor requests to or env. var. SKYDNS_DOMAIN\")\n\tflag.StringVar(&ldns, \"dns\",\n\t\tfunc() string {\n\t\t\tif x := os.Getenv(\"SKYDNS_DNS\"); x != \"\" {\n\t\t\t\treturn x\n\t\t\t}\n\t\t\treturn \"127.0.0.1:53\"\n\t\t}(), \"IP:Port to bind to for DNS or env. var SKYDNS_DNS\")\n\tflag.StringVar(&lhttp, \"http\",\n\t\tfunc() string {\n\t\t\tif x := os.Getenv(\"SKYDNS\"); x != \"\" {\n\t\t\t\t\/\/ get rid of http or https\n\t\t\t\tx1 := strings.TrimPrefix(x, \"https:\/\/\")\n\t\t\t\tx1 = strings.TrimPrefix(x1, \"http:\/\/\")\n\t\t\t\treturn x1\n\t\t\t}\n\t\t\treturn \"127.0.0.1:8080\"\n\t\t}(), \"IP:Port to bind to for HTTP or env. var. SKYDNS\")\n\tflag.StringVar(&dataDir, \"data\", \".\/data\", \"SkyDNS data directory\")\n\tflag.DurationVar(&rtimeout, \"rtimeout\", 2*time.Second, \"Read timeout\")\n\tflag.DurationVar(&wtimeout, \"wtimeout\", 2*time.Second, \"Write timeout\")\n\tflag.StringVar(&secret, \"secret\", \"\", \"Shared secret for use with http api\")\n\tflag.StringVar(&nameserver, \"nameserver\", \"\", \"Nameserver address to forward (non-local) queries to e.g. 8.8.8.8:53,8.8.4.4:53\")\n\tflag.StringVar(&dnssec, \"dnssec\", \"\", \"Basename of DNSSEC key file e.q. Kskydns.local.+005+38250\")\n\tflag.BoolVar(&norr, \"no-round-robin\", false, \"Do not round robin A\/AAAA replies\")\n}\n\nfunc main() {\n\tmembers := make([]string, 0)\n\traft.SetLogLevel(0)\n\tflag.Parse()\n\tnameservers := strings.Split(nameserver, \",\")\n\t\/\/ empty argument given\n\tif len(nameservers) == 1 && nameservers[0] == \"\" {\n\t\tnameservers = make([]string, 0)\n\t\tconfig, err := dns.ClientConfigFromFile(\"\/etc\/resolv.conf\")\n\t\tif err == nil {\n\t\t\tfor _, s := range config.Servers {\n\t\t\t\tnameservers = append(nameservers, net.JoinHostPort(s, config.Port))\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif discover {\n\t\tns, err := net.LookupNS(domain)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\n\t\tif len(ns) < 1 {\n\t\t\tlog.Fatal(\"No NS records found for \", domain)\n\t\t\treturn\n\t\t}\n\n\t\tfor _, n := range ns {\n\t\t\tmembers = append(members, strings.TrimPrefix(n.Host, \".\"))\n\t\t}\n\t} else if join != \"\" {\n\t\tmembers = strings.Split(join, \",\")\n\t}\n\n\ts := server.NewServer(members, domain, ldns, lhttp, dataDir, rtimeout, wtimeout, secret, nameservers, !norr)\n<<<<<<< HEAD\n\n\tif dnssec != \"\" {\n\t\tk, p, e := server.ParseKeyFile(dnssec)\n\t\tif e != nil {\n\t\t\tlog.Fatal(e)\n\t\t\treturn\n\t\t}\n\t\tif k.Header().Name != dns.Fqdn(domain) {\n\t\t\tlog.Fatal(errors.New(\"Owner name of DNSKEY must match SkyDNS domain\"))\n\t\t\treturn\n\t\t}\n\t\ts.SetKeys(k, p)\n\t}\n=======\n>>>>>>> 4ae15f347a84502e895c65780fa5bd9e91090781\n\n\tstats.Collect()\n\n\twaiter, err := s.Start()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\twaiter.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/codegangsta\/cli\"\n)\n\n\/\/ preload initializes any global options and configuration\n\/\/ before the main or sub commands are run\nfunc preload(context *cli.Context) error {\n\tif context.GlobalBool(\"debug\") {\n\t\tlog.SetLevel(log.DebugLevel)\n\t}\n\treturn nil\n}\n\n\/\/ hostHosts returns a list of host addresses that are specified on the\n\/\/ command line and also in a hosts file separated by new lines.\nfunc loadHosts(context *cli.Context) ([]string, error) {\n\thosts := []string(context.GlobalStringSlice(\"host\"))\n\tif hostsFile := context.GlobalString(\"hosts\"); hostsFile != \"\" {\n\t\tf, err := os.Open(hostsFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer f.Close()\n\t\ts := bufio.NewScanner(f)\n\t\tfor s.Scan() {\n\t\t\tif err := s.Err(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\thosts = append(hosts, s.Text())\n\t\t}\n\t}\n\treturn hosts, nil\n}\n\n\/\/ multiplexAction uses the arguments passed via the command line and\n\/\/ multiplexes them across multiple SSH connections\nfunc multiplexAction(context *cli.Context) {\n\tc, err := newCommand(context)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Debug(c)\n\n\thosts, err := loadHosts(context)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tsections, err := parseSshConfigFile(filepath.Join(os.Getenv(\"HOME\"), \".ssh\", \"config\"))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif len(hosts) == 0 {\n\t\tlog.Fatal(\"no host specified for command to run\")\n\t}\n\tlog.Debugf(\"hosts %v\", hosts)\n\tgroup := &sync.WaitGroup{}\n\tfor _, h := range hosts {\n\t\tgroup.Add(1)\n\t\tgo executeCommand(c, h, sections[h], context.GlobalBool(\"A\"), context.GlobalBool(\"quiet\"), group)\n\t}\n\tgroup.Wait()\n\tlog.Debugf(\"finished executing %s on all hosts\", c)\n}\n\nfunc executeCommand(c command, host string, section *SshConfigFileSection, agentForwarding, quiet bool, group *sync.WaitGroup) {\n\tdefer group.Done()\n\tvar (\n\t\terr error\n\t\toriginalHost = host\n\t)\n\tif host, err = cleanHost(host); err != nil {\n\t\tlog.WithField(\"host\", originalHost).Error(err)\n\t\treturn\n\t}\n\n\tif err = runSSH(c, host, section, agentForwarding, quiet); err != nil {\n\t\tlog.WithField(\"host\", host).Error(err)\n\t\treturn\n\t}\n}\n\n\/\/ runSSH executes the given command on the given host\nfunc runSSH(c command, host string, section *SshConfigFileSection, agentForwarding, quiet bool) error {\n\tconfig, err := newSshClientConfig(host, section, c.User, c.Identity, agentForwarding)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsession, err := config.NewSession(config.host)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !quiet {\n\t\tw := newBufCloser(os.Stdout)\n\t\tdefer w.Close()\n\t\tsession.Stderr, session.Stdout = w, w\n\t}\n\tdefer func() {\n\t\tsession.Close()\n\t\tlog.Printf(\"Session complete from %s\", host)\n\t}()\n\n\tfor key, value := range c.Env {\n\t\tif err := session.Setenv(key, value); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn session.Run(c.Cmd)\n}\n\n\/\/ cleanHost parses out the hostname\/ip and port. If no port is\n\/\/ specified then port 22 is appended to the hostname\/ip\nfunc cleanHost(host string) (string, error) {\n\th, port, err := net.SplitHostPort(host)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"missing port in address\") {\n\t\t\treturn \"\", err\n\t\t}\n\t\tport = \"22\"\n\t\th = host\n\t}\n\tif port == \"\" {\n\t\tport = \"22\"\n\t}\n\treturn net.JoinHostPort(h, port), nil\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"slex\"\n\tapp.Usage = \"SSH commands multiplexed\"\n\tapp.Version = \"1\"\n\tapp.Author = \"@crosbymichael\"\n\tapp.Email = \"crosbymichael@gmail.com\"\n\tapp.Before = preload\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"debug\",\n\t\t\tUsage: \"enable debug output for the logs\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"host\",\n\t\t\tValue: &cli.StringSlice{},\n\t\t\tUsage: \"SSH host address\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"hosts\",\n\t\t\tUsage: \"file containing host addresses separated by a new line\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"user,u\",\n\t\t\tValue: \"root\",\n\t\t\tUsage: \"user to execute the command as\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"identity,i\",\n\t\t\tValue: \"id_rsa\",\n\t\t\tUsage: \"SSH identity to use for connecting to the host\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"agent,A\",\n\t\t\tUsage: \"Forward authentication request to the ssh agent\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"env,e\",\n\t\t\tUsage: \"set environment variables for SSH command\",\n\t\t\tValue: &cli.StringSlice{},\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"quiet,q\",\n\t\t\tUsage: \"disable output from the ssh command\",\n\t\t},\n\t}\n\tapp.Action = multiplexAction\n\tif err := app.Run(os.Args); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>Add username to log output for each executed command.<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/codegangsta\/cli\"\n)\n\n\/\/ preload initializes any global options and configuration\n\/\/ before the main or sub commands are run\nfunc preload(context *cli.Context) error {\n\tif context.GlobalBool(\"debug\") {\n\t\tlog.SetLevel(log.DebugLevel)\n\t}\n\treturn nil\n}\n\n\/\/ hostHosts returns a list of host addresses that are specified on the\n\/\/ command line and also in a hosts file separated by new lines.\nfunc loadHosts(context *cli.Context) ([]string, error) {\n\thosts := []string(context.GlobalStringSlice(\"host\"))\n\tif hostsFile := context.GlobalString(\"hosts\"); hostsFile != \"\" {\n\t\tf, err := os.Open(hostsFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer f.Close()\n\t\ts := bufio.NewScanner(f)\n\t\tfor s.Scan() {\n\t\t\tif err := s.Err(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\thosts = append(hosts, s.Text())\n\t\t}\n\t}\n\treturn hosts, nil\n}\n\n\/\/ multiplexAction uses the arguments passed via the command line and\n\/\/ multiplexes them across multiple SSH connections\nfunc multiplexAction(context *cli.Context) {\n\tc, err := newCommand(context)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Debug(c)\n\n\thosts, err := loadHosts(context)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tsections, err := parseSshConfigFile(filepath.Join(os.Getenv(\"HOME\"), \".ssh\", \"config\"))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif len(hosts) == 0 {\n\t\tlog.Fatal(\"no host specified for command to run\")\n\t}\n\tlog.Debugf(\"hosts %v\", hosts)\n\tgroup := &sync.WaitGroup{}\n\tfor _, h := range hosts {\n\t\tgroup.Add(1)\n\t\tgo executeCommand(c, h, sections[h], context.GlobalBool(\"A\"), context.GlobalBool(\"quiet\"), group)\n\t}\n\tgroup.Wait()\n\tlog.Debugf(\"finished executing %s on all hosts\", c)\n}\n\nfunc executeCommand(c command, host string, section *SshConfigFileSection, agentForwarding, quiet bool, group *sync.WaitGroup) {\n\tdefer group.Done()\n\tvar (\n\t\terr error\n\t\toriginalHost = host\n\t)\n\tif host, err = cleanHost(host); err != nil {\n\t\tlog.WithField(\"host\", originalHost).Error(err)\n\t\treturn\n\t}\n\n\tif err = runSSH(c, host, section, agentForwarding, quiet); err != nil {\n\t\tlog.WithField(\"host\", host).Error(err)\n\t\treturn\n\t}\n}\n\n\/\/ runSSH executes the given command on the given host\nfunc runSSH(c command, host string, section *SshConfigFileSection, agentForwarding, quiet bool) error {\n\tconfig, err := newSshClientConfig(host, section, c.User, c.Identity, agentForwarding)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsession, err := config.NewSession(config.host)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !quiet {\n\t\tw := newBufCloser(os.Stdout)\n\t\tdefer w.Close()\n\t\tsession.Stderr, session.Stdout = w, w\n\t}\n\tdefer func() {\n\t\tsession.Close()\n\t\tlog.Printf(\"Session complete from %s@%s\", section.User, host)\n\t}()\n\n\tfor key, value := range c.Env {\n\t\tif err := session.Setenv(key, value); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn session.Run(c.Cmd)\n}\n\n\/\/ cleanHost parses out the hostname\/ip and port. If no port is\n\/\/ specified then port 22 is appended to the hostname\/ip\nfunc cleanHost(host string) (string, error) {\n\th, port, err := net.SplitHostPort(host)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"missing port in address\") {\n\t\t\treturn \"\", err\n\t\t}\n\t\tport = \"22\"\n\t\th = host\n\t}\n\tif port == \"\" {\n\t\tport = \"22\"\n\t}\n\treturn net.JoinHostPort(h, port), nil\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"slex\"\n\tapp.Usage = \"SSH commands multiplexed\"\n\tapp.Version = \"1\"\n\tapp.Author = \"@crosbymichael\"\n\tapp.Email = \"crosbymichael@gmail.com\"\n\tapp.Before = preload\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"debug\",\n\t\t\tUsage: \"enable debug output for the logs\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"host\",\n\t\t\tValue: &cli.StringSlice{},\n\t\t\tUsage: \"SSH host address\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"hosts\",\n\t\t\tUsage: \"file containing host addresses separated by a new line\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"user,u\",\n\t\t\tValue: \"root\",\n\t\t\tUsage: \"user to execute the command as\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"identity,i\",\n\t\t\tValue: \"id_rsa\",\n\t\t\tUsage: \"SSH identity to use for connecting to the host\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"agent,A\",\n\t\t\tUsage: \"Forward authentication request to the ssh agent\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"env,e\",\n\t\t\tUsage: \"set environment variables for SSH command\",\n\t\t\tValue: &cli.StringSlice{},\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"quiet,q\",\n\t\t\tUsage: \"disable output from the ssh command\",\n\t\t},\n\t}\n\tapp.Action = multiplexAction\n\tif err := app.Run(os.Args); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"mime\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/ONSdigital\/florence\/assets\"\n\t\"github.com\/ONSdigital\/go-ns\/handlers\/reverseProxy\"\n\t\"github.com\/ONSdigital\/go-ns\/log\"\n\t\"github.com\/ONSdigital\/go-ns\/server\"\n\t\"github.com\/gorilla\/pat\"\n)\n\nvar bindAddr = \":8081\"\nvar babbageURL = \"http:\/\/localhost:8080\"\nvar zebedeeURL = \"http:\/\/localhost:8082\"\n\nvar getAsset = assets.Asset\n\nfunc main() {\n\n\tif v := os.Getenv(\"BIND_ADDR\"); len(v) > 0 {\n\t\tbindAddr = v\n\t}\n\tif v := os.Getenv(\"BABBAGE_URL\"); len(v) > 0 {\n\t\tbabbageURL = v\n\t}\n\tif v := os.Getenv(\"ZEBEDEE_URL\"); len(v) > 0 {\n\t\tzebedeeURL = v\n\t}\n\n\tlog.Namespace = \"florence\"\n\n\tbabbageURL, err := url.Parse(babbageURL)\n\tif err != nil {\n\t\tlog.Error(err, nil)\n\t\tos.Exit(1)\n\t}\n\n\tbabbageProxy := reverseProxy.Create(babbageURL, nil)\n\n\tzebedeeURL, err := url.Parse(zebedeeURL)\n\tif err != nil {\n\t\tlog.Error(err, nil)\n\t\tos.Exit(1)\n\t}\n\n\tzebedeeProxy := reverseProxy.Create(zebedeeURL, zebedeeDirector)\n\n\trouter := pat.New()\n\n\trouter.Handle(\"\/zebedee\/{uri:.*}\", zebedeeProxy)\n\trouter.HandleFunc(\"\/florence\/dist\/{uri:.*}\", staticFiles)\n\trouter.HandleFunc(\"\/florence\", refactoredIndexFile)\n\trouter.HandleFunc(\"\/florence\/index.html\", legacyIndexFile)\n\trouter.HandleFunc(\"\/florence{uri:|\/.*}\", refactoredIndexFile)\n\trouter.Handle(\"\/{uri:.*}\", babbageProxy)\n\n\tlog.Debug(\"Starting server\", log.Data{\n\t\t\"bind_addr\": bindAddr,\n\t\t\"babbage_url\": babbageURL,\n\t\t\"zebedee_url\": zebedeeURL,\n\t})\n\n\ts := server.New(bindAddr, router)\n\n\ts.ListenAndServe()\n}\n\nfunc staticFiles(w http.ResponseWriter, req *http.Request) {\n\tpath := req.URL.Query().Get(\":uri\")\n\n\tb, err := getAsset(\"..\/dist\/\" + path)\n\tif err != nil {\n\t\tlog.Error(err, nil)\n\t\tw.WriteHeader(404)\n\t\treturn\n\t}\n\n\tw.Header().Set(`Content-Type`, mime.TypeByExtension(filepath.Ext(path)))\n\tw.WriteHeader(200)\n\tw.Write(b)\n}\n\nfunc legacyIndexFile(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"Getting legacy HTML file\", nil)\n\n\tb, err := getAsset(\"..\/dist\/legacy-assets\/index.html\")\n\tif err != nil {\n\t\tlog.Error(err, nil)\n\t\tw.WriteHeader(404)\n\t\treturn\n\t}\n\n\tw.Header().Set(`Content-Type`, \"text\/html\")\n\tw.WriteHeader(200)\n\tw.Write(b)\n}\n\nfunc refactoredIndexFile(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"Getting refactored HTML file\", nil)\n\n\tb, err := getAsset(\"..\/dist\/refactored.html\")\n\tif err != nil {\n\t\tlog.Error(err, nil)\n\t\tw.WriteHeader(404)\n\t\treturn\n\t}\n\n\tw.Header().Set(`Content-Type`, \"text\/html\")\n\tw.WriteHeader(200)\n\tw.Write(b)\n}\n\nfunc zebedeeDirector(req *http.Request) {\n\tif c, err := req.Cookie(`access_token`); err == nil && len(c.Value) > 0 {\n\t\treq.Header.Set(`X-Florence-Token`, c.Value)\n\t}\n\treq.URL.Path = strings.TrimPrefix(req.URL.Path, \"\/zebedee\")\n}\n\ntype transport struct {\n\thttp.RoundTripper\n}\n\nfunc (t *transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {\n\t\/* May need this to handle location later\n\n\tresp, err = t.RoundTripper.RoundTrip(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = resp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb = bytes.Replace(b, []byte(\"server\"), []byte(\"schmerver\"), -1)\n\tbody := ioutil.NopCloser(bytes.NewReader(b))\n\tresp.Body = body\n\tresp.ContentLength = int64(len(b))\n\tresp.Header.Set(\"Content-Length\", strconv.Itoa(len(b)))\n\treturn resp, nil *\/\n\n\treturn t.RoundTripper.RoundTrip(req)\n}\n\nvar _ http.RoundTripper = &transport{}\n<commit_msg>Add comment about proxying redirects<commit_after>package main\n\nimport (\n\t\"mime\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/ONSdigital\/florence\/assets\"\n\t\"github.com\/ONSdigital\/go-ns\/handlers\/reverseProxy\"\n\t\"github.com\/ONSdigital\/go-ns\/log\"\n\t\"github.com\/ONSdigital\/go-ns\/server\"\n\t\"github.com\/gorilla\/pat\"\n)\n\nvar bindAddr = \":8081\"\nvar babbageURL = \"http:\/\/localhost:8080\"\nvar zebedeeURL = \"http:\/\/localhost:8082\"\n\nvar getAsset = assets.Asset\n\nfunc main() {\n\n\tif v := os.Getenv(\"BIND_ADDR\"); len(v) > 0 {\n\t\tbindAddr = v\n\t}\n\tif v := os.Getenv(\"BABBAGE_URL\"); len(v) > 0 {\n\t\tbabbageURL = v\n\t}\n\tif v := os.Getenv(\"ZEBEDEE_URL\"); len(v) > 0 {\n\t\tzebedeeURL = v\n\t}\n\n\tlog.Namespace = \"florence\"\n\n\t\/*\n\t\tNOTE:\n\t\tIf there's any issues with this Florence server proxying redirects from either Babbage or Zebedee then the code in the previous Java Florence server might give some clues for a solution :- https:\/\/github.com\/ONSdigital\/florence\/blob\/b13df0708b30493b98e9ce239103c59d7f409f98\/src\/main\/java\/com\/github\/onsdigital\/florence\/filter\/Proxy.java#L125-L135\n\n\t\tThe code has purposefully not been included in this Go replacement because we can't see what issue it's fixing and whether it's necessary.\n\t*\/\n\n\tbabbageURL, err := url.Parse(babbageURL)\n\tif err != nil {\n\t\tlog.Error(err, nil)\n\t\tos.Exit(1)\n\t}\n\n\tbabbageProxy := reverseProxy.Create(babbageURL, nil)\n\n\tzebedeeURL, err := url.Parse(zebedeeURL)\n\tif err != nil {\n\t\tlog.Error(err, nil)\n\t\tos.Exit(1)\n\t}\n\n\tzebedeeProxy := reverseProxy.Create(zebedeeURL, zebedeeDirector)\n\n\trouter := pat.New()\n\n\trouter.Handle(\"\/zebedee\/{uri:.*}\", zebedeeProxy)\n\trouter.HandleFunc(\"\/florence\/dist\/{uri:.*}\", staticFiles)\n\trouter.HandleFunc(\"\/florence\", refactoredIndexFile)\n\trouter.HandleFunc(\"\/florence\/index.html\", legacyIndexFile)\n\trouter.HandleFunc(\"\/florence{uri:|\/.*}\", refactoredIndexFile)\n\trouter.Handle(\"\/{uri:.*}\", babbageProxy)\n\n\tlog.Debug(\"Starting server\", log.Data{\n\t\t\"bind_addr\": bindAddr,\n\t\t\"babbage_url\": babbageURL,\n\t\t\"zebedee_url\": zebedeeURL,\n\t})\n\n\ts := server.New(bindAddr, router)\n\n\ts.ListenAndServe()\n}\n\nfunc staticFiles(w http.ResponseWriter, req *http.Request) {\n\tpath := req.URL.Query().Get(\":uri\")\n\n\tb, err := getAsset(\"..\/dist\/\" + path)\n\tif err != nil {\n\t\tlog.Error(err, nil)\n\t\tw.WriteHeader(404)\n\t\treturn\n\t}\n\n\tw.Header().Set(`Content-Type`, mime.TypeByExtension(filepath.Ext(path)))\n\tw.WriteHeader(200)\n\tw.Write(b)\n}\n\nfunc legacyIndexFile(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"Getting legacy HTML file\", nil)\n\n\tb, err := getAsset(\"..\/dist\/legacy-assets\/index.html\")\n\tif err != nil {\n\t\tlog.Error(err, nil)\n\t\tw.WriteHeader(404)\n\t\treturn\n\t}\n\n\tw.Header().Set(`Content-Type`, \"text\/html\")\n\tw.WriteHeader(200)\n\tw.Write(b)\n}\n\nfunc refactoredIndexFile(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"Getting refactored HTML file\", nil)\n\n\tb, err := getAsset(\"..\/dist\/refactored.html\")\n\tif err != nil {\n\t\tlog.Error(err, nil)\n\t\tw.WriteHeader(404)\n\t\treturn\n\t}\n\n\tw.Header().Set(`Content-Type`, \"text\/html\")\n\tw.WriteHeader(200)\n\tw.Write(b)\n}\n\nfunc zebedeeDirector(req *http.Request) {\n\tif c, err := req.Cookie(`access_token`); err == nil && len(c.Value) > 0 {\n\t\treq.Header.Set(`X-Florence-Token`, c.Value)\n\t}\n\treq.URL.Path = strings.TrimPrefix(req.URL.Path, \"\/zebedee\")\n}\n\ntype transport struct {\n\thttp.RoundTripper\n}\n\nfunc (t *transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {\n\t\/* May need this to handle location later\n\n\tresp, err = t.RoundTripper.RoundTrip(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = resp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb = bytes.Replace(b, []byte(\"server\"), []byte(\"schmerver\"), -1)\n\tbody := ioutil.NopCloser(bytes.NewReader(b))\n\tresp.Body = body\n\tresp.ContentLength = int64(len(b))\n\tresp.Header.Set(\"Content-Length\", strconv.Itoa(len(b)))\n\treturn resp, nil *\/\n\n\treturn t.RoundTripper.RoundTrip(req)\n}\n\nvar _ http.RoundTripper = &transport{}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\".\/sdl\"\n)\n\nfunc main() {\n\tsdl.Init(sdl.INIT_VIDEO)\n\twindow := sdl.CreateWindow(\"Hello World!\", sdl.WINDOWPOS_UNDEFINED, sdl.WINDOWPOS_UNDEFINED,\n\t\t\t\t800, 600, sdl.WINDOW_SHOWN | sdl.WINDOW_OPENGL)\n\trenderer := sdl.CreateRenderer(window, -1, sdl.RENDERER_ACCELERATED)\n\n\trenderer.SetDrawColor(0, 0, 255, 255)\n\trenderer.DrawLine(0, 0, 200, 200)\n\n\tpoints := []sdl.Point { {0, 0}, {100, 300}, {100, 300}, {200, 0} }\n\trenderer.SetDrawColor(255, 255, 0, 255)\n\trenderer.DrawLines(&points, 4)\n\n\trect := sdl.Rect { 300, 0, 200, 200 }\n\trenderer.SetDrawColor(255, 0, 0, 255)\n\trenderer.DrawRect(&rect)\n\n\trects := []sdl.Rect { {400, 400, 100, 100}, {550, 350, 200, 200} }\n\trenderer.SetDrawColor(0, 255, 255, 255)\n\trenderer.DrawRects(&rects, 2)\n\n\trect = sdl.Rect { 250, 250, 200, 200 }\n\trenderer.SetDrawColor(0, 255, 0, 255)\n\trenderer.FillRect(&rect)\n\n\trects = []sdl.Rect { {500, 300, 100, 100}, {200, 300, 200, 200} }\n\trenderer.SetDrawColor(255, 0, 255, 255)\n\trenderer.FillRects(&rects, 2)\n\t\n\trenderer.Present()\n\n\tsdl.Delay(1000)\n}\n<commit_msg>Updated main.go<commit_after>package main\n\nimport (\n\t\".\/sdl\"\n)\n\nfunc main() {\n\tsdl.Init(sdl.INIT_VIDEO)\n\twindow := sdl.CreateWindow(\"Hello World!\", sdl.WINDOWPOS_UNDEFINED, sdl.WINDOWPOS_UNDEFINED,\n\t\t\t\t800, 600, sdl.WINDOW_SHOWN | sdl.WINDOW_OPENGL)\n\trenderer := sdl.CreateRenderer(window, -1, sdl.RENDERER_ACCELERATED)\n\t\n\trenderer.SetDrawColor(255, 255, 255, 255)\n\trenderer.DrawPoint(150, 300)\n\n\trenderer.SetDrawColor(0, 0, 255, 255)\n\trenderer.DrawLine(0, 0, 200, 200)\n\n\tpoints := []sdl.Point { {0, 0}, {100, 300}, {100, 300}, {200, 0} }\n\trenderer.SetDrawColor(255, 255, 0, 255)\n\trenderer.DrawLines(&points, 4)\n\n\trect := sdl.Rect { 300, 0, 200, 200 }\n\trenderer.SetDrawColor(255, 0, 0, 255)\n\trenderer.DrawRect(&rect)\n\n\trects := []sdl.Rect { {400, 400, 100, 100}, {550, 350, 200, 200} }\n\trenderer.SetDrawColor(0, 255, 255, 255)\n\trenderer.DrawRects(&rects, 2)\n\n\trect = sdl.Rect { 250, 250, 200, 200 }\n\trenderer.SetDrawColor(0, 255, 0, 255)\n\trenderer.FillRect(&rect)\n\n\trects = []sdl.Rect { {500, 300, 100, 100}, {200, 300, 200, 200} }\n\trenderer.SetDrawColor(255, 0, 255, 255)\n\trenderer.FillRects(&rects, 2)\n\t\n\trenderer.Present()\n\n\tsdl.Delay(1000)\n\tsdl.Quit()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"runtime\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/vheon\/picker\/terminal\"\n\t\"github.com\/vheon\/picker\/tty\"\n)\n\nfunc appendChar(s string, b rune) string {\n\treturn s + string(b)\n}\n\nfunc backspace(s string) string {\n\tif l := len(s); l > 0 {\n\t\t_, size := utf8.DecodeLastRuneInString(s)\n\t\treturn s[:l-size]\n\t}\n\treturn s\n}\n\nfunc readAllCandidates(r io.Reader) []Candidate {\n\tscanner := bufio.NewScanner(r)\n\tvar lines []Candidate\n\tfor scanner.Scan() {\n\t\tlines = append(lines, NewCandidate(scanner.Text()))\n\t}\n\treturn lines\n}\n\nfunc handle_input(picker *Picker, view *View, key rune) *View {\n\tswitch key {\n\tcase terminal.Ctrl_N:\n\t\tview.Down()\n\tcase terminal.Ctrl_P:\n\t\tview.Up()\n\tcase terminal.Backspace:\n\t\tview = picker.Answer(backspace(view.Query))\n\tcase terminal.LF:\n\t\tview.Done = true\n\t\tview.ClearPrompt()\n\tcase terminal.Ctrl_U, terminal.Ctrl_W:\n\t\tview = picker.Answer(\"\")\n\tdefault:\n\t\tview = picker.Answer(appendChar(view.Query, key))\n\t}\n\treturn view\n}\n\nvar visibleRows = flag.Int(\"h\", 20, \"Number of visible candidates\")\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tflag.Parse()\n\n\ttty := tty.New()\n\ttty.Stty(\"-echo\", \"-icanon\")\n\tdefer tty.Restore()\n\n\tterminal := terminal.NewTerminal(tty)\n\n\tif *visibleRows > terminal.Height {\n\t\t*visibleRows = terminal.Height - 1\n\t}\n\tif *visibleRows < 1 {\n\t\t*visibleRows = 1\n\t}\n\n\tpicker := NewPicker(readAllCandidates(os.Stdin), *visibleRows)\n\tview := picker.Answer(\"\")\n\tterminal.MakeRoom(view.Height)\n\tview.DrawOnTerminal(terminal)\n\tfor !view.Done {\n\t\tview = handle_input(picker, view, tty.ReadRune())\n\t\tview.DrawOnTerminal(terminal)\n\t}\n\tfmt.Println(view.Selected())\n}\n<commit_msg>Rename variable<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"runtime\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/vheon\/picker\/terminal\"\n\t\"github.com\/vheon\/picker\/tty\"\n)\n\nfunc appendChar(s string, b rune) string {\n\treturn s + string(b)\n}\n\nfunc backspace(s string) string {\n\tif l := len(s); l > 0 {\n\t\t_, size := utf8.DecodeLastRuneInString(s)\n\t\treturn s[:l-size]\n\t}\n\treturn s\n}\n\nfunc readAllCandidates(r io.Reader) []Candidate {\n\tscanner := bufio.NewScanner(r)\n\tvar lines []Candidate\n\tfor scanner.Scan() {\n\t\tlines = append(lines, NewCandidate(scanner.Text()))\n\t}\n\treturn lines\n}\n\nfunc handle_input(picker *Picker, view *View, key rune) *View {\n\tswitch key {\n\tcase terminal.Ctrl_N:\n\t\tview.Down()\n\tcase terminal.Ctrl_P:\n\t\tview.Up()\n\tcase terminal.Backspace:\n\t\tview = picker.Answer(backspace(view.Query))\n\tcase terminal.LF:\n\t\tview.Done = true\n\t\tview.ClearPrompt()\n\tcase terminal.Ctrl_U, terminal.Ctrl_W:\n\t\tview = picker.Answer(\"\")\n\tdefault:\n\t\tview = picker.Answer(appendChar(view.Query, key))\n\t}\n\treturn view\n}\n\nvar visibleRows = flag.Int(\"h\", 20, \"Number of visible candidates\")\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tflag.Parse()\n\n\ttty := tty.New()\n\ttty.Stty(\"-echo\", \"-icanon\")\n\tdefer tty.Restore()\n\n\tterm := terminal.NewTerminal(tty)\n\n\tif *visibleRows > term.Height {\n\t\t*visibleRows = term.Height - 1\n\t}\n\tif *visibleRows < 1 {\n\t\t*visibleRows = 1\n\t}\n\n\tpicker := NewPicker(readAllCandidates(os.Stdin), *visibleRows)\n\tview := picker.Answer(\"\")\n\tterm.MakeRoom(view.Height)\n\tview.DrawOnTerminal(term)\n\tfor !view.Done {\n\t\tview = handle_input(picker, view, tty.ReadRune())\n\t\tview.DrawOnTerminal(term)\n\t}\n\tfmt.Println(view.Selected())\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/urfave\/cli\"\n\t\"github.com\/gorilla\/handlers\"\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"serve\"\n\tapp.Usage = \"Simple HTTP Server\"\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"dir, d\",\n\t\t\tValue: \".\",\n\t\t\tUsage: \"Directory to serve\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"address, a\",\n\t\t\tValue: \":8080\",\n\t\t\tUsage: \"Address to listen on\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"log,l\",\n\t\t\tUsage: \"Log to stderr\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"cert,c\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Certificate for TLS\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"key,k\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Key for TLS\",\n\t\t},\n\t}\n\tapp.Action = func(c *cli.Context) {\n\t\taddress := \":8080\"\n\t\tdir := \".\"\n\n\t\tif c.String(\"dir\") != \"\" {\n\t\t\tdir = c.String(\"dir\")\n\t\t}\n\t\tif c.String(\"address\") != \"\" {\n\t\t\taddress = c.String(\"address\")\n\t\t}\n\t\tserver := handlers.CompressHandler(http.FileServer(http.Dir(dir)))\n\t\tif c.Bool(\"log\") {\n\t\t\tserver = handlers.LoggingHandler(os.Stderr, server)\n\t\t}\n\t\tcert := c.String(\"cert\")\n\t\tkey := c.String(\"key\")\n\t\t\/\/ if cert or key are set, start TLS\n\t\tif cert != \"\" || key != \"\" {\n\t\t\t\/\/ Both cert and key must be set\n\t\t\tif cert == \"\" || key == \"\" {\n\t\t\t\tlog.Fatalln(\"Both a certificate and key must be provided for TLS\")\n\t\t\t}\n\t\t\tlog.Fatalln(http.ListenAndServeTLS(address, cert, key, server))\n\t\t} else {\n\t\t\t\/\/ No cert and no key, just serve unencrypted\n\t\t\tlog.Fatalln(http.ListenAndServe(address, server))\n\t\t}\n\t}\n\n\tapp.Run(os.Args)\n}\n<commit_msg>adding SPDX copyright<commit_after>\/\/ Copyright (c) 2014-2021 Frederick F. Kautz IV\n\/\/\n\/\/ SPDX-License-Identifier: Apache-2.0\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/urfave\/cli\"\n\t\"github.com\/gorilla\/handlers\"\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"serve\"\n\tapp.Usage = \"Simple HTTP Server\"\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"dir, d\",\n\t\t\tValue: \".\",\n\t\t\tUsage: \"Directory to serve\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"address, a\",\n\t\t\tValue: \":8080\",\n\t\t\tUsage: \"Address to listen on\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"log,l\",\n\t\t\tUsage: \"Log to stderr\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"cert,c\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Certificate for TLS\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"key,k\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Key for TLS\",\n\t\t},\n\t}\n\tapp.Action = func(c *cli.Context) {\n\t\taddress := \":8080\"\n\t\tdir := \".\"\n\n\t\tif c.String(\"dir\") != \"\" {\n\t\t\tdir = c.String(\"dir\")\n\t\t}\n\t\tif c.String(\"address\") != \"\" {\n\t\t\taddress = c.String(\"address\")\n\t\t}\n\t\tserver := handlers.CompressHandler(http.FileServer(http.Dir(dir)))\n\t\tif c.Bool(\"log\") {\n\t\t\tserver = handlers.LoggingHandler(os.Stderr, server)\n\t\t}\n\t\tcert := c.String(\"cert\")\n\t\tkey := c.String(\"key\")\n\t\t\/\/ if cert or key are set, start TLS\n\t\tif cert != \"\" || key != \"\" {\n\t\t\t\/\/ Both cert and key must be set\n\t\t\tif cert == \"\" || key == \"\" {\n\t\t\t\tlog.Fatalln(\"Both a certificate and key must be provided for TLS\")\n\t\t\t}\n\t\t\tlog.Fatalln(http.ListenAndServeTLS(address, cert, key, server))\n\t\t} else {\n\t\t\t\/\/ No cert and no key, just serve unencrypted\n\t\t\tlog.Fatalln(http.ListenAndServe(address, server))\n\t\t}\n\t}\n\n\tapp.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/vbatts\/overlay\/mount\"\n\t\"github.com\/vbatts\/overlay\/state\"\n)\n\nvar (\n\tflSrc = flag.String(\"src\", \"\", \"source directory to overlay\")\n\tflTarget = flag.String(\"target\", \"\", \"destination to overlay to (default is ${src}.overlay)\")\n\tflUnmount = flag.Bool(\"unmount\", false, \"unmount directory all provided args\")\n\tflRemove = flag.String(\"remove\", \"\", \"remove the provided UUID\")\n\tflRoot = flag.String(\"root\", filepath.Join(os.Getenv(\"HOME\"), \".local\/share\/overlay\/\"), \"Directory to story state of previous overlay mounts\")\n\tflListMounts = flag.Bool(\"list\", false, \"list previously recorded mounts\")\n\tflDebug = flag.Bool(\"debug\", false, \"enable debug output\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tif *flDebug {\n\t\tos.Setenv(\"DEBUG\", \"1\")\n\t}\n\n\tif *flUnmount {\n\t\tfor _, arg := range flag.Args() {\n\t\t\tif err := mount.UnmountPath(arg); err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: unmounting %q: %s\\n\", arg, err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t\tif *flRemove == \"\" {\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\n\tctx, err := state.Initialize(*flRoot)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tif *flRemove != \"\" {\n\t\tmounts, err := ctx.Mounts()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfor _, m := range mounts {\n\t\t\tif m.UUID == *flRemove {\n\t\t\t\tif err := os.RemoveAll(m.Root()); err != nil {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tos.Exit(0)\n\t}\n\n\tif *flListMounts {\n\t\tmounts, err := ctx.Mounts()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfor _, m := range mounts {\n\t\t\tfmt.Printf(\"TARGET\\t\\tSOURCE\\t\\tUUID\\n\")\n\t\t\tfmt.Printf(\"%s\\t\\t%s\\t\\t%s\\n\", m.Target, m.Source, m.UUID)\n\t\t}\n\t\tos.Exit(0)\n\t}\n\n\tif *flSrc == \"\" {\n\t\tfmt.Fprintln(os.Stderr, \"ERROR: no source directory provided\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ TODO check for supported underlying filesystems (ext4, xfs)\n\n\tm := ctx.NewMountPoint()\n\tm.Source, err = filepath.Abs(*flSrc)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tif *flTarget != \"\" {\n\t\tm.Target, err = filepath.Abs(*flTarget)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t\/\/ check if target directory already mounted\n\tinfos, err := mount.Infos()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: mountinfo: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tfor i := range infos {\n\t\tif infos[i].MountPoint == m.Target {\n\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: %q is already mounted\\n\", m.Target)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t\/\/ TODO add cleanup mechanism if something later fails\n\tif err := m.Mkdir(0755); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Println(m.Target)\n\n\tif *flDebug {\n\t\tfmt.Println(m.Options())\n\t}\n\n\tif err := mount.Mount(m); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tif err := ctx.SaveMount(m); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>main: fix the list formatting<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/vbatts\/overlay\/mount\"\n\t\"github.com\/vbatts\/overlay\/state\"\n)\n\nvar (\n\tflSrc = flag.String(\"src\", \"\", \"source directory to overlay\")\n\tflTarget = flag.String(\"target\", \"\", \"destination to overlay to (default is ${src}.overlay)\")\n\tflUnmount = flag.Bool(\"unmount\", false, \"unmount directory all provided args\")\n\tflRemove = flag.String(\"remove\", \"\", \"remove the provided UUID\")\n\tflRoot = flag.String(\"root\", filepath.Join(os.Getenv(\"HOME\"), \".local\/share\/overlay\/\"), \"Directory to story state of previous overlay mounts\")\n\tflListMounts = flag.Bool(\"list\", false, \"list previously recorded mounts\")\n\tflDebug = flag.Bool(\"debug\", false, \"enable debug output\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tif *flDebug {\n\t\tos.Setenv(\"DEBUG\", \"1\")\n\t}\n\n\tif *flUnmount {\n\t\tfor _, arg := range flag.Args() {\n\t\t\tif err := mount.UnmountPath(arg); err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: unmounting %q: %s\\n\", arg, err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t\tif *flRemove == \"\" {\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\n\tctx, err := state.Initialize(*flRoot)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tif *flRemove != \"\" {\n\t\tmounts, err := ctx.Mounts()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfor _, m := range mounts {\n\t\t\tif m.UUID == *flRemove {\n\t\t\t\tif err := os.RemoveAll(m.Root()); err != nil {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tos.Exit(0)\n\t}\n\n\tif *flListMounts {\n\t\tmounts, err := ctx.Mounts()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Printf(\"TARGET\\t\\tSOURCE\\t\\tUUID\\n\")\n\t\tfor _, m := range mounts {\n\t\t\tfmt.Printf(\"%s\\t\\t%s\\t\\t%s\\n\", m.Target, m.Source, m.UUID)\n\t\t}\n\t\tos.Exit(0)\n\t}\n\n\tif *flSrc == \"\" {\n\t\tfmt.Fprintln(os.Stderr, \"ERROR: no source directory provided\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ TODO check for supported underlying filesystems (ext4, xfs)\n\n\tm := ctx.NewMountPoint()\n\tm.Source, err = filepath.Abs(*flSrc)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tif *flTarget != \"\" {\n\t\tm.Target, err = filepath.Abs(*flTarget)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t\/\/ check if target directory already mounted\n\tinfos, err := mount.Infos()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: mountinfo: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tfor i := range infos {\n\t\tif infos[i].MountPoint == m.Target {\n\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: %q is already mounted\\n\", m.Target)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t\/\/ TODO add cleanup mechanism if something later fails\n\tif err := m.Mkdir(0755); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Println(m.Target)\n\n\tif *flDebug {\n\t\tfmt.Println(m.Options())\n\t}\n\n\tif err := mount.Mount(m); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tif err := ctx.SaveMount(m); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"compress\/gzip\"\n\t\"compress\/zlib\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype proxyPattern struct {\n\tmatches []string\n\tproxyURL *url.URL\n}\n\ntype config struct { \/\/ nolint\n\tProxyPatterns []proxyPattern\n\tproxyURL *url.URL \/\/ PROXY_URL\n\tbasicAuthUser string \/\/ BASIC_AUTH_USER\n\tbasicAuthPass string \/\/ BASIC_AUTH_PASS\n\tport string \/\/ APP_PORT\n\taccessLog bool \/\/ ACCESS_LOG\n\tsslCert string \/\/ SSL_CERT_PATH\n\tsslKey string \/\/ SSL_KEY_PATH\n\tcontentEncoding bool \/\/ CONTENT_ENCODING\n\tcorsAllowOrigin string \/\/ CORS_ALLOW_ORIGIN\n\tcorsAllowMethods string \/\/ CORS_ALLOW_METHODS\n\tcorsAllowHeaders string \/\/ CORS_ALLOW_HEADERS\n\tcorsMaxAge int64 \/\/ CORS_MAX_AGE\n\thealthCheckPath string \/\/ HEALTHCHECK_PATH\n\tuseForwardedHost bool \/\/ USE_X_FORWARDED_HOST\n}\n\nvar (\n\tversion string\n\tdate string\n\tc *config\n)\n\nfunc main() {\n\tc = configFromEnvironmentVariables()\n\n\t\/\/ Proxy!!\n\thttp.Handle(\"\/\", wrapper(&httputil.ReverseProxy{\n\t\tDirector: func(r *http.Request) {\n\t\t\tif c.useForwardedHost {\n\t\t\t\tr.Header.Set(\"X-Forwarded-Host\", r.Host)\n\t\t\t}\n\t\t\tfound := false\n\t\t\tfor _, patterns := range c.ProxyPatterns {\n\t\t\t\tif match(patterns.matches[0], r.URL.Scheme, false) &&\n\t\t\t\t\tmatch(patterns.matches[1], r.Host, false) &&\n\t\t\t\t\tmatch(patterns.matches[3], r.URL.Path, true) {\n\t\t\t\t\tif c.useForwardedHost {\n\t\t\t\t\t\tr.Host = patterns.proxyURL.Host\n\t\t\t\t\t\tr.URL.Host = r.Host\n\t\t\t\t\t} else {\n\t\t\t\t\t\tr.URL.Host = patterns.proxyURL.Host\n\t\t\t\t\t}\n\t\t\t\t\tr.URL.Scheme = patterns.proxyURL.Scheme\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tr.Host = c.proxyURL.Host\n\t\t\t\tr.URL.Host = r.Host\n\t\t\t\tr.URL.Scheme = c.proxyURL.Scheme\n\t\t\t}\n\t\t},\n\t}))\n\n\tif c.healthCheckPath != \"\" {\n\t\thttp.HandleFunc(c.healthCheckPath, func(w http.ResponseWriter, r *http.Request) {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t})\n\t}\n\n\thttp.HandleFunc(\"\/--version\", func(w http.ResponseWriter, r *http.Request) {\n\t\tif len(version) > 0 && len(date) > 0 {\n\t\t\tfmt.Fprintf(w, \"version: %s (built at %s)\", version, date)\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t}\n\t})\n\n\t\/\/ Listen & Serve\n\tlog.Printf(\"[service] listening on port %s\", c.port)\n\tif (len(c.sslCert) > 0) && (len(c.sslKey) > 0) {\n\t\tlog.Fatal(http.ListenAndServeTLS(\":\"+c.port, c.sslCert, c.sslKey, nil))\n\t} else {\n\t\tlog.Fatal(http.ListenAndServe(\":\"+c.port, nil))\n\t}\n}\n\nfunc match(pattern, target string, isPath bool) bool {\n\tif pattern == \"\" {\n\t\treturn true\n\t}\n\tpattern = strings.Replace(pattern, \"*\", \".*\", -1)\n\tif isPath && !strings.HasSuffix(pattern, \"\/\") {\n\t\tpattern += \"$\"\n\t}\n\tmatch, _ := regexp.MatchString(pattern, target)\n\treturn match\n}\n\nfunc configFromEnvironmentVariables() *config {\n\tcandidateProxyURL := strings.Trim(os.Getenv(\"PROXY_URL\"), \"\\\"\")\n\tcandidateProxyPatterns := strings.Trim(os.Getenv(\"PROXY_PATTERNS\"), \"\\\"\")\n\tif len(candidateProxyURL) == 0 && len(candidateProxyPatterns) == 0 {\n\t\tlog.Fatal(\"Missing required environment variable: PROXY_URL or PROXY_PATTERNS\")\n\t}\n\tvar proxyURL *url.URL\n\tvar err error\n\tif len(candidateProxyURL) > 0 {\n\t\tproxyURL, err = url.Parse(candidateProxyURL)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Could not parse proxy URL: %s\", candidateProxyURL)\n\t\t}\n\t}\n\tProxyPatterns := []proxyPattern{}\n\tif len(candidateProxyPatterns) > 0 {\n\t\tregex := regexp.MustCompile(`(https?:\/\/)?([^:^\/]*)(:\\\\d*)?(.*)?`)\n\t\tfor _, pattern := range strings.Split(candidateProxyPatterns, \",\") {\n\t\t\tsplited := strings.Split(pattern, \"=\")\n\t\t\tif url, err := url.Parse(splited[1]); err == nil {\n\t\t\t\tpattern := proxyPattern{\n\t\t\t\t\tmatches: regex.FindStringSubmatch(splited[0])[1:],\n\t\t\t\t\tproxyURL: url,\n\t\t\t\t}\n\t\t\t\tProxyPatterns = append(ProxyPatterns, pattern)\n\t\t\t}\n\t\t}\n\t}\n\tport := os.Getenv(\"APP_PORT\")\n\tif len(port) == 0 {\n\t\tport = \"80\"\n\t}\n\taccessLog := false\n\tif b, err := strconv.ParseBool(os.Getenv(\"ACCESS_LOG\")); err == nil {\n\t\taccessLog = b\n\t}\n\tcontentEncoging := false\n\tif b, err := strconv.ParseBool(os.Getenv(\"CONTENT_ENCODING\")); err == nil {\n\t\tcontentEncoging = b\n\t}\n\tcorsMaxAge := int64(600)\n\tif i, err := strconv.ParseInt(os.Getenv(\"CORS_MAX_AGE\"), 10, 64); err == nil {\n\t\tcorsMaxAge = i\n\t}\n\tuseForwardedHost := true\n\tif b, err := strconv.ParseBool(os.Getenv(\"USE_X_FORWARDED_HOST\")); err == nil {\n\t\tuseForwardedHost = b\n\t}\n\tconf := &config{\n\t\tProxyPatterns: ProxyPatterns,\n\t\tproxyURL: proxyURL,\n\t\tbasicAuthUser: os.Getenv(\"BASIC_AUTH_USER\"),\n\t\tbasicAuthPass: os.Getenv(\"BASIC_AUTH_PASS\"),\n\t\tport: port,\n\t\taccessLog: accessLog,\n\t\tsslCert: os.Getenv(\"SSL_CERT_PATH\"),\n\t\tsslKey: os.Getenv(\"SSL_KEY_PATH\"),\n\t\tcontentEncoding: contentEncoging,\n\t\tcorsAllowOrigin: os.Getenv(\"CORS_ALLOW_ORIGIN\"),\n\t\tcorsAllowMethods: os.Getenv(\"CORS_ALLOW_METHODS\"),\n\t\tcorsAllowHeaders: os.Getenv(\"CORS_ALLOW_HEADERS\"),\n\t\tcorsMaxAge: corsMaxAge,\n\t\thealthCheckPath: os.Getenv(\"HEALTHCHECK_PATH\"),\n\t\tuseForwardedHost: useForwardedHost,\n\t}\n\t\/\/ TLS pem files\n\tif (len(conf.sslCert) > 0) && (len(conf.sslKey) > 0) {\n\t\tlog.Print(\"[config] TLS enabled.\")\n\t}\n\t\/\/ Basic authentication\n\tif (len(conf.basicAuthUser) > 0) && (len(conf.basicAuthPass) > 0) {\n\t\tlog.Printf(\"[config] Basic authentication: %s\", conf.basicAuthUser)\n\t}\n\t\/\/ CORS\n\tif (len(conf.corsAllowOrigin) > 0) && (conf.corsMaxAge > 0) {\n\t\tlog.Printf(\"[config] CORS enabled: %s\", conf.corsAllowOrigin)\n\t}\n\treturn conf\n}\n\ntype custom struct {\n\tio.Writer\n\thttp.ResponseWriter\n\tstatus int\n}\n\nfunc (r *custom) Write(b []byte) (int, error) {\n\tif r.Header().Get(\"Content-Type\") == \"\" {\n\t\tr.Header().Set(\"Content-Type\", http.DetectContentType(b))\n\t}\n\treturn r.Writer.Write(b)\n}\n\nfunc (r *custom) WriteHeader(status int) {\n\tr.ResponseWriter.WriteHeader(status)\n\tr.status = status\n}\n\nfunc wrapper(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif (len(c.corsAllowOrigin) > 0) && (len(c.corsAllowMethods) > 0) && (len(c.corsAllowHeaders) > 0) && (c.corsMaxAge > 0) {\n\t\t\tw.Header().Set(\"Access-Control-Allow-Origin\", c.corsAllowOrigin)\n\t\t\tw.Header().Set(\"Access-Control-Allow-Methods\", c.corsAllowMethods)\n\t\t\tw.Header().Set(\"Access-Control-Allow-Headers\", c.corsAllowHeaders)\n\t\t\tw.Header().Set(\"Access-Control-Max-Age\", strconv.FormatInt(c.corsMaxAge, 10))\n\t\t}\n\t\tif (len(c.basicAuthUser) > 0) && (len(c.basicAuthPass) > 0) && !auth(r, c.basicAuthUser, c.basicAuthPass) {\n\t\t\tw.Header().Set(\"WWW-Authenticate\", `Basic realm=\"REALM\"`)\n\t\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tproc := time.Now()\n\t\taddr := r.RemoteAddr\n\t\tif ip, found := header(r, \"X-Forwarded-For\"); found {\n\t\t\taddr = ip\n\t\t}\n\t\tioWriter := w.(io.Writer)\n\t\tif encodings, found := header(r, \"Accept-Encoding\"); found && c.contentEncoding {\n\t\t\tfor _, encoding := range splitCsvLine(encodings) {\n\t\t\t\tif encoding == \"gzip\" {\n\t\t\t\t\tw.Header().Set(\"Content-Encoding\", \"gzip\")\n\t\t\t\t\tg := gzip.NewWriter(w)\n\t\t\t\t\tdefer g.Close()\n\t\t\t\t\tioWriter = g\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif encoding == \"deflate\" {\n\t\t\t\t\tw.Header().Set(\"Content-Encoding\", \"deflate\")\n\t\t\t\t\tz := zlib.NewWriter(w)\n\t\t\t\t\tdefer z.Close()\n\t\t\t\t\tioWriter = z\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\twriter := &custom{Writer: ioWriter, ResponseWriter: w, status: http.StatusOK}\n\t\th.ServeHTTP(writer, r)\n\n\t\tif c.accessLog {\n\t\t\tlog.Printf(\"[%s] %.3f %d %s %s\",\n\t\t\t\taddr, time.Since(proc).Seconds(),\n\t\t\t\twriter.status, r.Method, r.URL)\n\t\t}\n\t})\n}\n\nfunc header(r *http.Request, key string) (string, bool) {\n\tif r.Header == nil {\n\t\treturn \"\", false\n\t}\n\tif candidate := r.Header[key]; len(candidate) > 0 {\n\t\treturn candidate[0], true\n\t}\n\treturn \"\", false\n}\n\nfunc splitCsvLine(data string) []string {\n\tsplitted := strings.SplitN(data, \",\", -1)\n\tparsed := make([]string, len(splitted))\n\tfor i, val := range splitted {\n\t\tparsed[i] = strings.TrimSpace(val)\n\t}\n\treturn parsed\n}\n\nfunc auth(r *http.Request, user, pass string) bool {\n\tif username, password, ok := r.BasicAuth(); ok {\n\t\treturn username == user && password == pass\n\t}\n\treturn false\n}\n<commit_msg>fix typo<commit_after>package main\n\nimport (\n\t\"compress\/gzip\"\n\t\"compress\/zlib\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype proxyPattern struct {\n\tmatches []string\n\tproxyURL *url.URL\n}\n\ntype config struct { \/\/ nolint\n\tProxyPatterns []proxyPattern\n\tproxyURL *url.URL \/\/ PROXY_URL\n\tbasicAuthUser string \/\/ BASIC_AUTH_USER\n\tbasicAuthPass string \/\/ BASIC_AUTH_PASS\n\tport string \/\/ APP_PORT\n\taccessLog bool \/\/ ACCESS_LOG\n\tsslCert string \/\/ SSL_CERT_PATH\n\tsslKey string \/\/ SSL_KEY_PATH\n\tcontentEncoding bool \/\/ CONTENT_ENCODING\n\tcorsAllowOrigin string \/\/ CORS_ALLOW_ORIGIN\n\tcorsAllowMethods string \/\/ CORS_ALLOW_METHODS\n\tcorsAllowHeaders string \/\/ CORS_ALLOW_HEADERS\n\tcorsMaxAge int64 \/\/ CORS_MAX_AGE\n\thealthCheckPath string \/\/ HEALTHCHECK_PATH\n\tuseForwardedHost bool \/\/ USE_X_FORWARDED_HOST\n}\n\nvar (\n\tversion string\n\tdate string\n\tc *config\n)\n\nfunc main() {\n\tc = configFromEnvironmentVariables()\n\n\t\/\/ Proxy!!\n\thttp.Handle(\"\/\", wrapper(&httputil.ReverseProxy{\n\t\tDirector: func(r *http.Request) {\n\t\t\tif c.useForwardedHost {\n\t\t\t\tr.Header.Set(\"X-Forwarded-Host\", r.Host)\n\t\t\t}\n\t\t\tfound := false\n\t\t\tfor _, patterns := range c.ProxyPatterns {\n\t\t\t\tif match(patterns.matches[0], r.URL.Scheme, false) &&\n\t\t\t\t\tmatch(patterns.matches[1], r.Host, false) &&\n\t\t\t\t\tmatch(patterns.matches[3], r.URL.Path, true) {\n\t\t\t\t\tif c.useForwardedHost {\n\t\t\t\t\t\tr.Host = patterns.proxyURL.Host\n\t\t\t\t\t\tr.URL.Host = r.Host\n\t\t\t\t\t} else {\n\t\t\t\t\t\tr.URL.Host = patterns.proxyURL.Host\n\t\t\t\t\t}\n\t\t\t\t\tr.URL.Scheme = patterns.proxyURL.Scheme\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tr.Host = c.proxyURL.Host\n\t\t\t\tr.URL.Host = r.Host\n\t\t\t\tr.URL.Scheme = c.proxyURL.Scheme\n\t\t\t}\n\t\t},\n\t}))\n\n\tif c.healthCheckPath != \"\" {\n\t\thttp.HandleFunc(c.healthCheckPath, func(w http.ResponseWriter, r *http.Request) {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t})\n\t}\n\n\thttp.HandleFunc(\"\/--version\", func(w http.ResponseWriter, r *http.Request) {\n\t\tif len(version) > 0 && len(date) > 0 {\n\t\t\tfmt.Fprintf(w, \"version: %s (built at %s)\", version, date)\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t}\n\t})\n\n\t\/\/ Listen & Serve\n\tlog.Printf(\"[service] listening on port %s\", c.port)\n\tif (len(c.sslCert) > 0) && (len(c.sslKey) > 0) {\n\t\tlog.Fatal(http.ListenAndServeTLS(\":\"+c.port, c.sslCert, c.sslKey, nil))\n\t} else {\n\t\tlog.Fatal(http.ListenAndServe(\":\"+c.port, nil))\n\t}\n}\n\nfunc match(pattern, target string, isPath bool) bool {\n\tif pattern == \"\" {\n\t\treturn true\n\t}\n\tpattern = strings.Replace(pattern, \"*\", \".*\", -1)\n\tif isPath && !strings.HasSuffix(pattern, \"\/\") {\n\t\tpattern += \"$\"\n\t}\n\tmatch, _ := regexp.MatchString(pattern, target)\n\treturn match\n}\n\nfunc configFromEnvironmentVariables() *config {\n\tcandidateProxyURL := strings.Trim(os.Getenv(\"PROXY_URL\"), \"\\\"\")\n\tcandidateProxyPatterns := strings.Trim(os.Getenv(\"PROXY_PATTERNS\"), \"\\\"\")\n\tif len(candidateProxyURL) == 0 && len(candidateProxyPatterns) == 0 {\n\t\tlog.Fatal(\"Missing required environment variable: PROXY_URL or PROXY_PATTERNS\")\n\t}\n\tvar proxyURL *url.URL\n\tvar err error\n\tif len(candidateProxyURL) > 0 {\n\t\tproxyURL, err = url.Parse(candidateProxyURL)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Could not parse proxy URL: %s\", candidateProxyURL)\n\t\t}\n\t}\n\tProxyPatterns := []proxyPattern{}\n\tif len(candidateProxyPatterns) > 0 {\n\t\tregex := regexp.MustCompile(`(https?:\/\/)?([^:^\/]*)(:\\\\d*)?(.*)?`)\n\t\tfor _, pattern := range strings.Split(candidateProxyPatterns, \",\") {\n\t\t\tsplited := strings.Split(pattern, \"=\")\n\t\t\tif url, err := url.Parse(splited[1]); err == nil {\n\t\t\t\tpattern := proxyPattern{\n\t\t\t\t\tmatches: regex.FindStringSubmatch(splited[0])[1:],\n\t\t\t\t\tproxyURL: url,\n\t\t\t\t}\n\t\t\t\tProxyPatterns = append(ProxyPatterns, pattern)\n\t\t\t}\n\t\t}\n\t}\n\tport := os.Getenv(\"APP_PORT\")\n\tif len(port) == 0 {\n\t\tport = \"80\"\n\t}\n\taccessLog := false\n\tif b, err := strconv.ParseBool(os.Getenv(\"ACCESS_LOG\")); err == nil {\n\t\taccessLog = b\n\t}\n\tcontentEncoding := false\n\tif b, err := strconv.ParseBool(os.Getenv(\"CONTENT_ENCODING\")); err == nil {\n\t\tcontentEncoding = b\n\t}\n\tcorsMaxAge := int64(600)\n\tif i, err := strconv.ParseInt(os.Getenv(\"CORS_MAX_AGE\"), 10, 64); err == nil {\n\t\tcorsMaxAge = i\n\t}\n\tuseForwardedHost := true\n\tif b, err := strconv.ParseBool(os.Getenv(\"USE_X_FORWARDED_HOST\")); err == nil {\n\t\tuseForwardedHost = b\n\t}\n\tconf := &config{\n\t\tProxyPatterns: ProxyPatterns,\n\t\tproxyURL: proxyURL,\n\t\tbasicAuthUser: os.Getenv(\"BASIC_AUTH_USER\"),\n\t\tbasicAuthPass: os.Getenv(\"BASIC_AUTH_PASS\"),\n\t\tport: port,\n\t\taccessLog: accessLog,\n\t\tsslCert: os.Getenv(\"SSL_CERT_PATH\"),\n\t\tsslKey: os.Getenv(\"SSL_KEY_PATH\"),\n\t\tcontentEncoding: contentEncoding,\n\t\tcorsAllowOrigin: os.Getenv(\"CORS_ALLOW_ORIGIN\"),\n\t\tcorsAllowMethods: os.Getenv(\"CORS_ALLOW_METHODS\"),\n\t\tcorsAllowHeaders: os.Getenv(\"CORS_ALLOW_HEADERS\"),\n\t\tcorsMaxAge: corsMaxAge,\n\t\thealthCheckPath: os.Getenv(\"HEALTHCHECK_PATH\"),\n\t\tuseForwardedHost: useForwardedHost,\n\t}\n\t\/\/ TLS pem files\n\tif (len(conf.sslCert) > 0) && (len(conf.sslKey) > 0) {\n\t\tlog.Print(\"[config] TLS enabled.\")\n\t}\n\t\/\/ Basic authentication\n\tif (len(conf.basicAuthUser) > 0) && (len(conf.basicAuthPass) > 0) {\n\t\tlog.Printf(\"[config] Basic authentication: %s\", conf.basicAuthUser)\n\t}\n\t\/\/ CORS\n\tif (len(conf.corsAllowOrigin) > 0) && (conf.corsMaxAge > 0) {\n\t\tlog.Printf(\"[config] CORS enabled: %s\", conf.corsAllowOrigin)\n\t}\n\treturn conf\n}\n\ntype custom struct {\n\tio.Writer\n\thttp.ResponseWriter\n\tstatus int\n}\n\nfunc (r *custom) Write(b []byte) (int, error) {\n\tif r.Header().Get(\"Content-Type\") == \"\" {\n\t\tr.Header().Set(\"Content-Type\", http.DetectContentType(b))\n\t}\n\treturn r.Writer.Write(b)\n}\n\nfunc (r *custom) WriteHeader(status int) {\n\tr.ResponseWriter.WriteHeader(status)\n\tr.status = status\n}\n\nfunc wrapper(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif (len(c.corsAllowOrigin) > 0) && (len(c.corsAllowMethods) > 0) && (len(c.corsAllowHeaders) > 0) && (c.corsMaxAge > 0) {\n\t\t\tw.Header().Set(\"Access-Control-Allow-Origin\", c.corsAllowOrigin)\n\t\t\tw.Header().Set(\"Access-Control-Allow-Methods\", c.corsAllowMethods)\n\t\t\tw.Header().Set(\"Access-Control-Allow-Headers\", c.corsAllowHeaders)\n\t\t\tw.Header().Set(\"Access-Control-Max-Age\", strconv.FormatInt(c.corsMaxAge, 10))\n\t\t}\n\t\tif (len(c.basicAuthUser) > 0) && (len(c.basicAuthPass) > 0) && !auth(r, c.basicAuthUser, c.basicAuthPass) {\n\t\t\tw.Header().Set(\"WWW-Authenticate\", `Basic realm=\"REALM\"`)\n\t\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tproc := time.Now()\n\t\taddr := r.RemoteAddr\n\t\tif ip, found := header(r, \"X-Forwarded-For\"); found {\n\t\t\taddr = ip\n\t\t}\n\t\tioWriter := w.(io.Writer)\n\t\tif encodings, found := header(r, \"Accept-Encoding\"); found && c.contentEncoding {\n\t\t\tfor _, encoding := range splitCsvLine(encodings) {\n\t\t\t\tif encoding == \"gzip\" {\n\t\t\t\t\tw.Header().Set(\"Content-Encoding\", \"gzip\")\n\t\t\t\t\tg := gzip.NewWriter(w)\n\t\t\t\t\tdefer g.Close()\n\t\t\t\t\tioWriter = g\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif encoding == \"deflate\" {\n\t\t\t\t\tw.Header().Set(\"Content-Encoding\", \"deflate\")\n\t\t\t\t\tz := zlib.NewWriter(w)\n\t\t\t\t\tdefer z.Close()\n\t\t\t\t\tioWriter = z\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\twriter := &custom{Writer: ioWriter, ResponseWriter: w, status: http.StatusOK}\n\t\th.ServeHTTP(writer, r)\n\n\t\tif c.accessLog {\n\t\t\tlog.Printf(\"[%s] %.3f %d %s %s\",\n\t\t\t\taddr, time.Since(proc).Seconds(),\n\t\t\t\twriter.status, r.Method, r.URL)\n\t\t}\n\t})\n}\n\nfunc header(r *http.Request, key string) (string, bool) {\n\tif r.Header == nil {\n\t\treturn \"\", false\n\t}\n\tif candidate := r.Header[key]; len(candidate) > 0 {\n\t\treturn candidate[0], true\n\t}\n\treturn \"\", false\n}\n\nfunc splitCsvLine(data string) []string {\n\tsplitted := strings.SplitN(data, \",\", -1)\n\tparsed := make([]string, len(splitted))\n\tfor i, val := range splitted {\n\t\tparsed[i] = strings.TrimSpace(val)\n\t}\n\treturn parsed\n}\n\nfunc auth(r *http.Request, user, pass string) bool {\n\tif username, password, ok := r.BasicAuth(); ok {\n\t\treturn username == user && password == pass\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n)\n\n\/\/ Metric holds the details of a metric\ntype Metric struct {\n\tName string `json:\"name\"`\n\tTimestamp int `json:\"timestamp\"`\n\tValue float64 `json:\"value\"`\n\tTags map[string]string `json:\"tags\"`\n}\n\nfunc main() {\n\thost := flag.String(\"host\", \"localhost:4242\", \"The host:port to connect to. Defaults to 'localhost:4242'\")\n\tflag.Parse()\n\n\tdec := json.NewDecoder(os.Stdin)\n\tconn, err := net.Dial(\"tcp\", *host)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tfor {\n\t\tvar m Metric\n\t\tif err := dec.Decode(&m); err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\to := fmt.Sprintf(\"put %s %d %f\", m.Name, m.Timestamp, m.Value)\n\n\t\tfor name, value := range m.Tags {\n\t\t\to += fmt.Sprintf(\" %s=%s\", name, value)\n\t\t}\n\n\t\to += \"\\r\"\n\n\t\tfmt.Fprint(conn, o)\n\t}\n}\n<commit_msg>Changes \\r to \\n<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n)\n\n\/\/ Metric holds the details of a metric\ntype Metric struct {\n\tName string `json:\"name\"`\n\tTimestamp int `json:\"timestamp\"`\n\tValue float64 `json:\"value\"`\n\tTags map[string]string `json:\"tags\"`\n}\n\nfunc main() {\n\thost := flag.String(\"host\", \"localhost:4242\", \"The host:port to connect to. Defaults to 'localhost:4242'\")\n\tflag.Parse()\n\n\tdec := json.NewDecoder(os.Stdin)\n\tconn, err := net.Dial(\"tcp\", *host)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tfor {\n\t\tvar m Metric\n\t\tif err := dec.Decode(&m); err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\to := fmt.Sprintf(\"put %s %d %f\", m.Name, m.Timestamp, m.Value)\n\n\t\tfor name, value := range m.Tags {\n\t\t\to += fmt.Sprintf(\" %s=%s\", name, value)\n\t\t}\n\n\t\to += \"\\n\"\n\n\t\tfmt.Fprint(conn, o)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/cookiejar\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\/\/ Used to parse TOML configuration file\n\t\"github.com\/naoina\/toml\"\n)\n\n\/\/ urlsInProgress is a wait group, for concurrency\nvar urlsInProgress sync.WaitGroup\n\n\/\/ RedirectError is a custom error type for following redirects, and can be safely ignored\ntype RedirectError struct {\n\tRedirectRequest *http.Request\n}\n\n\/\/ Error method returns a string of the error\nfunc (err *RedirectError) Error() string {\n\treturn fmt.Sprintf(\"Redirect not followed to: %v\", err.RedirectRequest.URL.String())\n}\n\n\/\/ Configuration holds all the configuration data passed in from the config.TOML file.\ntype Configuration struct {\n\tCount int\n\tVerbose bool\n\tTarget []Target\n}\n\ntype Target struct {\n\tMethod string\n\tUrl string\n\tBody string\n\tCookies []string\n\tRedirects bool\n\tCookieJar http.CookieJar\n}\n\n\/\/ REF: Access parts of the Configuration object.\n\/\/ fmt.Printf(\"All: %v\\n\", config)\n\/\/ fmt.Printf(\"Count: %v\\n\", config.Count)\n\/\/ fmt.Printf(\"Verbose: %v\\n\", config.Verbose)\n\/\/ fmt.Println(\"Targets:\")\n\/\/ for _, target := range config.Target {\n\/\/ \tfmt.Printf(\"\\n\\tMethod: %s\\n\", target.Method)\n\/\/ \tfmt.Printf(\"\\tURL: %s\\n\", target.Url)\n\/\/ \tfmt.Printf(\"\\tBody: %s\\n\", target.Body)\n\/\/ \tfmt.Printf(\"\\tRedirects: %v\\n\", target.Redirects)\n\/\/ \tfor _, cookie := range target.Cookies {\n\/\/ \t\tfmt.Printf(\"\\tCookie: %s\\n\", cookie)\n\/\/ \t}\n\/\/ \t\/\/ Add the cookie jar after TOML is unmarshaled\n\/\/ \ttarget.CookieJar, _ = cookiejar.New(nil)\n\/\/ \tfmt.Printf(\"\\tCookieJar: %v\\n\", target.CookieJar)\n\/\/ }\n\nvar configuration Configuration\n\n\/\/ Usage message\nvar usage string\n\n\/\/ Function init initializes the program defaults\nfunc init() {\n\tusage = fmt.Sprintf(\"Usage: %s config.toml\", os.Args[0])\n}\n\n\/\/ Function main is the entrypoint to the application. It sends the work to the appropriate functions, sequentially.\nfunc main() {\n\t\/\/ Change output location of logs\n\tlog.SetOutput(os.Stdout)\n\n\t\/\/ Check the config file\n\tif len(os.Args) != 2 {\n\t\t\/\/ No configuration file provided\n\t\tlog.Println(\"[ERROR] No configuration file location provided.\")\n\t\tfmt.Println(usage)\n\t\tos.Exit(1)\n\t}\n\tconfigFile := os.Args[1]\n\tvar err error\n\tconfiguration, err = getConfig(configFile)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tfmt.Println(usage)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Send the requests concurrently\n\tlog.Println(\"Requests begin.\")\n\tresponses, errors := sendRequests()\n\tif len(errors) != 0 {\n\t\tfor err := range errors {\n\t\t\tlog.Printf(\"[ERROR] %s\\n\", err.Error())\n\t\t}\n\t}\n\n\t\/\/ Make sure all response bodies are closed- memory leaks otherwise\n\tdefer func() {\n\t\tfor resp := range responses {\n\t\t\tresp.Body.Close()\n\t\t}\n\t}()\n\n\t\/\/ Compare the responses for uniqueness\n\tuniqueResponses, errors := compareResponses(responses)\n\tif len(errors) != 0 {\n\t\tfor err := range errors {\n\t\t\tlog.Printf(\"[ERROR] %s\\n\", err.Error())\n\t\t}\n\t}\n\n\t\/\/ Make sure all response bodies are closed- memory leaks otherwise\n\tdefer func() {\n\t\tfor resp := range uniqueResponses {\n\t\t\tresp.Body.Close()\n\t\t}\n\t}()\n\n\t\/\/ Output the responses\n\toutputResponses(uniqueResponses)\n\n\t\/\/ Echo completion\n\tlog.Println(\"Complete.\")\n}\n\n\/\/ Function getConfig checks that all necessary configuration fields are given\n\/\/ in a valid config file, and parses it for data.\n\/\/ Returns a Configuration object if successful.\n\/\/ Returns an empty Configuration object and a custom error if something went wrong.\nfunc getConfig(location string) (Configuration, error) {\n\tf, err := os.Open(location)\n\tif err != nil {\n\t\treturn Configuration{}, fmt.Errorf(\"[ERROR] Error opening configuration file: %s\", err.Error())\n\t}\n\tdefer f.Close()\n\n\tbuf, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn Configuration{}, fmt.Errorf(\"[ERROR] Error reading from configuration file: %s\", err.Error())\n\t}\n\tvar config Configuration\n\t\/\/ Parse all data from the provided configuration file into a Configuration object\n\tif err := toml.Unmarshal(buf, &config); err != nil {\n\t\treturn Configuration{}, fmt.Errorf(\"[ERROR] Error with TOML file: %s\", err.Error())\n\t}\n\n\t\/\/ Add the cookies to the cookiejar for each target\n\tfor _, target := range config.Target {\n\t\ttarget.CookieJar, _ = cookiejar.New(nil)\n\t\tvar cookies []*http.Cookie\n\t\tfor _, c := range target.Cookies {\n\t\t\t\/\/ Split the cookie name and value\n\t\t\tvals := strings.Split(c, \"=\")\n\t\t\tcookieName := strings.TrimSpace(vals[0])\n\t\t\tcookieValue := strings.TrimSpace(vals[1])\n\n\t\t\t\/\/ Create the cookie\n\t\t\tcookie := &http.Cookie{\n\t\t\t\tName: cookieName,\n\t\t\t\tValue: cookieValue,\n\t\t\t}\n\n\t\t\t\/\/ Add the cookie to the new slice of cookies\n\t\t\tcookies = append(cookies, cookie)\n\t\t}\n\n\t\t\/\/ Associate the cookies with the current target\n\t\ttargetUrl, err := url.Parse(target.Url)\n\t\tif err != nil {\n\t\t\treturn Configuration{}, fmt.Errorf(\"[ERROR] Error parsing target URL: %s\", err.Error())\n\t\t}\n\t\ttarget.CookieJar.SetCookies(targetUrl, cookies)\n\t}\n\n\t\/\/ TODO: Add defaults here, if values not present\n\t\/\/ Redirects = false\n\t\/\/ Count = 100\n\t\/\/ Verbose = false\n\n\t\/\/ TODO: Check if targets empty, if so, return with an error\n\n\treturn config, nil\n}\n\n\/\/ Function sendRequests takes care of sending the requests to the target concurrently.\n\/\/ Errors are passed back in a channel of errors. If the length is zero, there were no errors.\nfunc sendRequests() (responses chan *http.Response, errors chan error) {\n\t\/\/ Initialize the concurrency objects\n\tresponses = make(chan *http.Response, configuration.Count*len(configuration.Target))\n\terrors = make(chan error, configuration.Count*len(configuration.Target))\n\turlsInProgress.Add(configuration.Count * len(configuration.Target))\n\n\t\/\/ Send requests to multiple URLs (if present) the same number of times\n\tfor _, target := range configuration.Target {\n\t\tgo func(t Target) {\n\t\t\t\/\/ Cast the target URL to a URL type\n\t\t\ttUrl, err := url.Parse(t.Url)\n\t\t\tif err != nil {\n\t\t\t\terrors <- fmt.Errorf(\"[ERROR] Error parsing URL %s: %v\", t.Url, err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ VERBOSE\n\t\t\tif configuration.Verbose {\n\t\t\t\tlog.Printf(\"[VERBOSE] Sending %d %s requests to %s\\n\", configuration.Count, t.Method, tUrl.String())\n\t\t\t\tif t.Body != \"\" {\n\t\t\t\t\tlog.Printf(\"[VERBOSE] Request body: %s\", t.Body)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor i := 0; i < configuration.Count; i++ {\n\t\t\t\tgo func(index int) {\n\t\t\t\t\t\/\/ Ensure that the waitgroup element is returned\n\t\t\t\t\tdefer urlsInProgress.Done()\n\n\t\t\t\t\t\/\/ Convert the request body to an io.Reader interface, to pass to the request.\n\t\t\t\t\t\/\/ This must be done in the loop, because any call to client.Do() will\n\t\t\t\t\t\/\/ read the body contents on the first time, but not any subsequent requests.\n\t\t\t\t\trequestBody := strings.NewReader(t.Body)\n\n\t\t\t\t\t\/\/ Declare HTTP request method and URL\n\t\t\t\t\treq, err := http.NewRequest(t.Method, tUrl.String(), requestBody)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\terrors <- fmt.Errorf(\"Error in forming request: %v\", err.Error())\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Create the HTTP client\n\t\t\t\t\t\/\/ Using Cookie jar\n\t\t\t\t\t\/\/ Ignoring TLS errors\n\t\t\t\t\t\/\/ Ignoring redirects (more accurate output), depending on user flag\n\t\t\t\t\t\/\/ Implementing a connection timeouts, for slow clients & servers (especially important with race conditions on the server)\n\t\t\t\t\tvar client http.Client\n\n\t\t\t\t\t\/\/ TODO: Add context to http client requests to manually specify timeout options (new in Go 1.7)\n\n\t\t\t\t\tif t.Redirects {\n\t\t\t\t\t\tclient = http.Client{\n\t\t\t\t\t\t\tJar: t.CookieJar,\n\t\t\t\t\t\t\tTransport: &http.Transport{\n\t\t\t\t\t\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\t\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tTimeout: 20 * time.Second,\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tclient = http.Client{\n\t\t\t\t\t\t\tJar: t.CookieJar,\n\t\t\t\t\t\t\tTransport: &http.Transport{\n\t\t\t\t\t\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\t\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\t\t\t\t\t\t\/\/ Craft the custom error\n\t\t\t\t\t\t\t\tredirectError := RedirectError{req}\n\t\t\t\t\t\t\t\treturn &redirectError\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tTimeout: 20 * time.Second,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Make the request\n\t\t\t\t\tresp, err := client.Do(req)\n\t\t\t\t\t\/\/ Check the error type from the request\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tif uErr, ok := err.(*url.Error); ok {\n\t\t\t\t\t\t\tif rErr, ok2 := uErr.Err.(*RedirectError); ok2 {\n\t\t\t\t\t\t\t\t\/\/ Redirect error\n\t\t\t\t\t\t\t\t\/\/ VERBOSE\n\t\t\t\t\t\t\t\tif configuration.Verbose {\n\t\t\t\t\t\t\t\t\tlog.Printf(\"[VERBOSE] %v\\n\", rErr)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\/\/ Add the response to the responses channel, because it is still valid\n\t\t\t\t\t\t\t\tresponses <- resp\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\/\/ URL Error, but not a redirect error\n\t\t\t\t\t\t\t\terrors <- fmt.Errorf(\"Error in request #%v: %v\\n\", index, err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\/\/ Other type of error\n\t\t\t\t\t\t\terrors <- fmt.Errorf(\"Error in request #%v: %v\\n\", index, err)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t\/\/ Add the response to the responses channel\n\t\t\t\t\t\tresponses <- resp\n\t\t\t\t\t}\n\t\t\t\t}(i)\n\t\t\t}\n\t\t}(target)\n\t}\n\n\t\/\/ Wait for the URLs to finish sending\n\turlsInProgress.Wait()\n\n\t\/\/ VERBOSE\n\tif configuration.Verbose {\n\t\tlog.Printf(\"[VERBOSE] Requests complete.\")\n\t}\n\n\t\/\/ Close the response and error chanels, so they don't block on the range read\n\tclose(responses)\n\tclose(errors)\n\n\treturn\n}\n\n\/\/ Function compareResponses compares the responses returned from the requests,\n\/\/ and adds them to a map, where the key is an *http.Response, and the value is\n\/\/ the number of similar responses observed.\nfunc compareResponses(responses chan *http.Response) (uniqueResponses map[*http.Response]int, errors chan error) {\n\t\/\/ Initialize the unique responses map\n\tuniqueResponses = make(map[*http.Response]int)\n\n\t\/\/ Initialize the error channel\n\terrors = make(chan error, len(responses))\n\n\t\/\/ VERBOSE\n\tif configuration.Verbose {\n\t\tlog.Printf(\"[VERBOSE] Unique response comparison begin.\\n\")\n\t}\n\n\t\/\/ Compare the responses, one at a time\n\tfor resp := range responses {\n\t\t\/\/ Read the response body\n\t\trespBody, err := readResponseBody(resp)\n\t\tif err != nil {\n\t\t\terrors <- fmt.Errorf(\"Error reading response body: %s\", err.Error())\n\n\t\t\t\/\/ Exit this loop\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Add an entry, if the unique responses map is empty\n\t\tif len(uniqueResponses) == 0 {\n\t\t\tuniqueResponses[resp] = 0\n\t\t} else {\n\t\t\t\/\/ Add to the unique responses map, if no similar ones exist\n\t\t\t\/\/ Assume unique, until similar found\n\t\t\tunique := true\n\t\t\tfor uResp := range uniqueResponses {\n\t\t\t\t\/\/ Read the unique response body\n\t\t\t\tuRespBody, err := readResponseBody(uResp)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrors <- fmt.Errorf(\"Error reading unique response body: %s\", err.Error())\n\n\t\t\t\t\t\/\/ Error, move on to the next inner loop value\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t\/\/ Compare the response bodies\n\t\t\t\trespBodyMatch := false\n\t\t\t\tif string(respBody) == string(uRespBody) {\n\t\t\t\t\trespBodyMatch = true\n\t\t\t\t}\n\n\t\t\t\t\/\/ Compare response status code, body content, and content length\n\t\t\t\tif resp.StatusCode == uResp.StatusCode && resp.ContentLength == uResp.ContentLength && respBodyMatch {\n\t\t\t\t\t\/\/ Match, increase count\n\t\t\t\t\tuniqueResponses[uResp]++\n\t\t\t\t\tunique = false\n\t\t\t\t\t\/\/ Exit inner loop\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Check if unique from all other unique responses\n\t\t\tif unique {\n\t\t\t\t\/\/ Unique, add to unique responses\n\t\t\t\tuniqueResponses[resp] = 0\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ VERBOSE\n\tif configuration.Verbose {\n\t\tlog.Printf(\"[VERBOSE] Unique response comparision complete.\\n\")\n\t}\n\n\t\/\/ Close the error channel\n\tclose(errors)\n\n\treturn\n}\n\nfunc outputResponses(uniqueResponses map[*http.Response]int) {\n\t\/\/ Display the responses\n\tlog.Printf(\"Responses:\\n\")\n\tfor resp, count := range uniqueResponses {\n\t\t\/\/ TODO: Output request here\n\t\tfmt.Printf(\"Response:\\n\")\n\t\tfmt.Printf(\"[Status Code] %v\\n\", resp.StatusCode)\n\t\tfmt.Printf(\"[Protocol] %v\\n\", resp.Proto)\n\t\tif len(resp.Header) != 0 {\n\t\t\tfmt.Println(\"[Headers]\")\n\t\t\tfor header, value := range resp.Header {\n\t\t\t\tfmt.Printf(\"\\t%v: %v\\n\", header, value)\n\t\t\t}\n\t\t}\n\t\tlocation, err := resp.Location()\n\t\tif err != http.ErrNoLocation {\n\t\t\tfmt.Printf(\"[Location] %v\\n\", location.String())\n\t\t}\n\t\trespBody, err := readResponseBody(resp)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[Body] \")\n\t\t\tfmt.Printf(\"[ERROR] Error reading body: %v.\", err)\n\t\t} else {\n\t\t\tfmt.Printf(\"[Body]\\n%s\\n\", respBody)\n\t\t\t\/\/ Close the response body\n\t\t\tresp.Body.Close()\n\t\t}\n\t\tfmt.Printf(\"Similar: %v\\n\\n\", count)\n\t}\n}\n\n\/\/ Function readResponseBody is a helper function to read the content form a response's body,\n\/\/ and refill the body with another io.ReadCloser, so that it can be read again.\nfunc readResponseBody(resp *http.Response) (content []byte, err error) {\n\t\/\/ Get the content\n\tcontent, err = ioutil.ReadAll(resp.Body)\n\n\t\/\/ Reset the response body\n\trCloser := ioutil.NopCloser(bytes.NewBuffer(content))\n\tresp.Body = rCloser\n\n\treturn\n}\n<commit_msg>Implemented default configuration values.<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/cookiejar\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\/\/ Used to parse TOML configuration file\n\t\"github.com\/naoina\/toml\"\n)\n\n\/\/ urlsInProgress is a wait group, for concurrency\nvar urlsInProgress sync.WaitGroup\n\n\/\/ RedirectError is a custom error type for following redirects, and can be safely ignored\ntype RedirectError struct {\n\tRedirectRequest *http.Request\n}\n\n\/\/ Error method returns a string of the error\nfunc (err *RedirectError) Error() string {\n\treturn fmt.Sprintf(\"Redirect not followed to: %v\", err.RedirectRequest.URL.String())\n}\n\n\/\/ Configuration holds all the configuration data passed in from the config.TOML file.\ntype Configuration struct {\n\tCount int\n\tVerbose bool\n\tTarget []Target\n}\n\ntype Target struct {\n\tMethod string\n\tUrl string\n\tBody string\n\tCookies []string\n\tRedirects bool\n\tCookieJar http.CookieJar\n}\n\n\/\/ REF: Access parts of the Configuration object.\n\/\/ fmt.Printf(\"All: %v\\n\", config)\n\/\/ fmt.Printf(\"Count: %v\\n\", config.Count)\n\/\/ fmt.Printf(\"Verbose: %v\\n\", config.Verbose)\n\/\/ fmt.Println(\"Targets:\")\n\/\/ for _, target := range config.Target {\n\/\/ \tfmt.Printf(\"\\n\\tMethod: %s\\n\", target.Method)\n\/\/ \tfmt.Printf(\"\\tURL: %s\\n\", target.Url)\n\/\/ \tfmt.Printf(\"\\tBody: %s\\n\", target.Body)\n\/\/ \tfmt.Printf(\"\\tRedirects: %v\\n\", target.Redirects)\n\/\/ \tfor _, cookie := range target.Cookies {\n\/\/ \t\tfmt.Printf(\"\\tCookie: %s\\n\", cookie)\n\/\/ \t}\n\/\/ \t\/\/ Add the cookie jar after TOML is unmarshaled\n\/\/ \ttarget.CookieJar, _ = cookiejar.New(nil)\n\/\/ \tfmt.Printf(\"\\tCookieJar: %v\\n\", target.CookieJar)\n\/\/ }\n\nvar configuration Configuration\n\n\/\/ Usage message\nvar usage string\n\n\/\/ Function init initializes the program defaults\nfunc init() {\n\tusage = fmt.Sprintf(\"Usage: %s config.toml\", os.Args[0])\n}\n\n\/\/ Function main is the entrypoint to the application. It sends the work to the appropriate functions, sequentially.\nfunc main() {\n\t\/\/ Change output location of logs\n\tlog.SetOutput(os.Stdout)\n\n\t\/\/ Check the config file\n\tif len(os.Args) != 2 {\n\t\t\/\/ No configuration file provided\n\t\tlog.Println(\"[ERROR] No configuration file location provided.\")\n\t\tfmt.Println(usage)\n\t\tos.Exit(1)\n\t}\n\tconfigFile := os.Args[1]\n\tvar err error\n\tconfiguration, err = getConfig(configFile)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tfmt.Println(usage)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Send the requests concurrently\n\tlog.Println(\"Requests begin.\")\n\tresponses, errors := sendRequests()\n\tif len(errors) != 0 {\n\t\tfor err := range errors {\n\t\t\tlog.Printf(\"[ERROR] %s\\n\", err.Error())\n\t\t}\n\t}\n\n\t\/\/ Make sure all response bodies are closed- memory leaks otherwise\n\tdefer func() {\n\t\tfor resp := range responses {\n\t\t\tresp.Body.Close()\n\t\t}\n\t}()\n\n\t\/\/ Compare the responses for uniqueness\n\tuniqueResponses, errors := compareResponses(responses)\n\tif len(errors) != 0 {\n\t\tfor err := range errors {\n\t\t\tlog.Printf(\"[ERROR] %s\\n\", err.Error())\n\t\t}\n\t}\n\n\t\/\/ Make sure all response bodies are closed- memory leaks otherwise\n\tdefer func() {\n\t\tfor resp := range uniqueResponses {\n\t\t\tresp.Body.Close()\n\t\t}\n\t}()\n\n\t\/\/ Output the responses\n\toutputResponses(uniqueResponses)\n\n\t\/\/ Echo completion\n\tlog.Println(\"Complete.\")\n}\n\n\/\/ Function getConfig checks that all necessary configuration fields are given\n\/\/ in a valid config file, and parses it for data.\n\/\/ Returns a Configuration object if successful.\n\/\/ Returns an empty Configuration object and a custom error if something went wrong.\nfunc getConfig(location string) (Configuration, error) {\n\tf, err := os.Open(location)\n\tif err != nil {\n\t\treturn Configuration{}, fmt.Errorf(\"[ERROR] Error opening configuration file: %s\", err.Error())\n\t}\n\tdefer f.Close()\n\n\tbuf, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn Configuration{}, fmt.Errorf(\"[ERROR] Error reading from configuration file: %s\", err.Error())\n\t}\n\tvar config Configuration\n\t\/\/ Parse all data from the provided configuration file into a Configuration object\n\tif err := toml.Unmarshal(buf, &config); err != nil {\n\t\treturn Configuration{}, fmt.Errorf(\"[ERROR] Error with TOML file: %s\", err.Error())\n\t}\n\n\t\/\/ Add the cookies to the cookiejar for each target\n\tfor _, target := range config.Target {\n\t\ttarget.CookieJar, _ = cookiejar.New(nil)\n\t\tvar cookies []*http.Cookie\n\t\tfor _, c := range target.Cookies {\n\t\t\t\/\/ Split the cookie name and value\n\t\t\tvals := strings.Split(c, \"=\")\n\t\t\tcookieName := strings.TrimSpace(vals[0])\n\t\t\tcookieValue := strings.TrimSpace(vals[1])\n\n\t\t\t\/\/ Create the cookie\n\t\t\tcookie := &http.Cookie{\n\t\t\t\tName: cookieName,\n\t\t\t\tValue: cookieValue,\n\t\t\t}\n\n\t\t\t\/\/ Add the cookie to the new slice of cookies\n\t\t\tcookies = append(cookies, cookie)\n\t\t}\n\n\t\t\/\/ Associate the cookies with the current target\n\t\ttargetUrl, err := url.Parse(target.Url)\n\t\tif err != nil {\n\t\t\treturn Configuration{}, fmt.Errorf(\"[ERROR] Error parsing target URL: %s\", err.Error())\n\t\t}\n\t\ttarget.CookieJar.SetCookies(targetUrl, cookies)\n\t}\n\n\t\/\/ Set default values\n\tconfig = setDefaults(config)\n\n\t\/\/ TODO: Check if targets empty, if so, return with an error\n\n\treturn config, nil\n}\n\n\/\/ Function setDefaults sets the default options, if not present in the configuration file.\n\/\/ Redirects and verbose are both false, as the default value of a boolean.\nfunc setDefaults(config Configuration) Configuration {\n\t\/\/ Count\n\tif config.Count == 0 {\n\t\t\/\/ Set to default value of 100\n\t\tconfig.Count = 100\n\t}\n\n\treturn config\n}\n\n\/\/ Function sendRequests takes care of sending the requests to the target concurrently.\n\/\/ Errors are passed back in a channel of errors. If the length is zero, there were no errors.\nfunc sendRequests() (responses chan *http.Response, errors chan error) {\n\t\/\/ Initialize the concurrency objects\n\tresponses = make(chan *http.Response, configuration.Count*len(configuration.Target))\n\terrors = make(chan error, configuration.Count*len(configuration.Target))\n\turlsInProgress.Add(configuration.Count * len(configuration.Target))\n\n\t\/\/ Send requests to multiple URLs (if present) the same number of times\n\tfor _, target := range configuration.Target {\n\t\tgo func(t Target) {\n\t\t\t\/\/ Cast the target URL to a URL type\n\t\t\ttUrl, err := url.Parse(t.Url)\n\t\t\tif err != nil {\n\t\t\t\terrors <- fmt.Errorf(\"[ERROR] Error parsing URL %s: %v\", t.Url, err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ VERBOSE\n\t\t\tif configuration.Verbose {\n\t\t\t\tlog.Printf(\"[VERBOSE] Sending %d %s requests to %s\\n\", configuration.Count, t.Method, tUrl.String())\n\t\t\t\tif t.Body != \"\" {\n\t\t\t\t\tlog.Printf(\"[VERBOSE] Request body: %s\", t.Body)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor i := 0; i < configuration.Count; i++ {\n\t\t\t\tgo func(index int) {\n\t\t\t\t\t\/\/ Ensure that the waitgroup element is returned\n\t\t\t\t\tdefer urlsInProgress.Done()\n\n\t\t\t\t\t\/\/ Convert the request body to an io.Reader interface, to pass to the request.\n\t\t\t\t\t\/\/ This must be done in the loop, because any call to client.Do() will\n\t\t\t\t\t\/\/ read the body contents on the first time, but not any subsequent requests.\n\t\t\t\t\trequestBody := strings.NewReader(t.Body)\n\n\t\t\t\t\t\/\/ Declare HTTP request method and URL\n\t\t\t\t\treq, err := http.NewRequest(t.Method, tUrl.String(), requestBody)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\terrors <- fmt.Errorf(\"Error in forming request: %v\", err.Error())\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Create the HTTP client\n\t\t\t\t\t\/\/ Using Cookie jar\n\t\t\t\t\t\/\/ Ignoring TLS errors\n\t\t\t\t\t\/\/ Ignoring redirects (more accurate output), depending on user flag\n\t\t\t\t\t\/\/ Implementing a connection timeouts, for slow clients & servers (especially important with race conditions on the server)\n\t\t\t\t\tvar client http.Client\n\n\t\t\t\t\t\/\/ TODO: Add context to http client requests to manually specify timeout options (new in Go 1.7)\n\n\t\t\t\t\tif t.Redirects {\n\t\t\t\t\t\tclient = http.Client{\n\t\t\t\t\t\t\tJar: t.CookieJar,\n\t\t\t\t\t\t\tTransport: &http.Transport{\n\t\t\t\t\t\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\t\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tTimeout: 20 * time.Second,\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tclient = http.Client{\n\t\t\t\t\t\t\tJar: t.CookieJar,\n\t\t\t\t\t\t\tTransport: &http.Transport{\n\t\t\t\t\t\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\t\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\t\t\t\t\t\t\/\/ Craft the custom error\n\t\t\t\t\t\t\t\tredirectError := RedirectError{req}\n\t\t\t\t\t\t\t\treturn &redirectError\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tTimeout: 20 * time.Second,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Make the request\n\t\t\t\t\tresp, err := client.Do(req)\n\t\t\t\t\t\/\/ Check the error type from the request\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tif uErr, ok := err.(*url.Error); ok {\n\t\t\t\t\t\t\tif rErr, ok2 := uErr.Err.(*RedirectError); ok2 {\n\t\t\t\t\t\t\t\t\/\/ Redirect error\n\t\t\t\t\t\t\t\t\/\/ VERBOSE\n\t\t\t\t\t\t\t\tif configuration.Verbose {\n\t\t\t\t\t\t\t\t\tlog.Printf(\"[VERBOSE] %v\\n\", rErr)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\/\/ Add the response to the responses channel, because it is still valid\n\t\t\t\t\t\t\t\tresponses <- resp\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\/\/ URL Error, but not a redirect error\n\t\t\t\t\t\t\t\terrors <- fmt.Errorf(\"Error in request #%v: %v\\n\", index, err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\/\/ Other type of error\n\t\t\t\t\t\t\terrors <- fmt.Errorf(\"Error in request #%v: %v\\n\", index, err)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t\/\/ Add the response to the responses channel\n\t\t\t\t\t\tresponses <- resp\n\t\t\t\t\t}\n\t\t\t\t}(i)\n\t\t\t}\n\t\t}(target)\n\t}\n\n\t\/\/ Wait for the URLs to finish sending\n\turlsInProgress.Wait()\n\n\t\/\/ VERBOSE\n\tif configuration.Verbose {\n\t\tlog.Printf(\"[VERBOSE] Requests complete.\")\n\t}\n\n\t\/\/ Close the response and error chanels, so they don't block on the range read\n\tclose(responses)\n\tclose(errors)\n\n\treturn\n}\n\n\/\/ Function compareResponses compares the responses returned from the requests,\n\/\/ and adds them to a map, where the key is an *http.Response, and the value is\n\/\/ the number of similar responses observed.\nfunc compareResponses(responses chan *http.Response) (uniqueResponses map[*http.Response]int, errors chan error) {\n\t\/\/ Initialize the unique responses map\n\tuniqueResponses = make(map[*http.Response]int)\n\n\t\/\/ Initialize the error channel\n\terrors = make(chan error, len(responses))\n\n\t\/\/ VERBOSE\n\tif configuration.Verbose {\n\t\tlog.Printf(\"[VERBOSE] Unique response comparison begin.\\n\")\n\t}\n\n\t\/\/ Compare the responses, one at a time\n\tfor resp := range responses {\n\t\t\/\/ Read the response body\n\t\trespBody, err := readResponseBody(resp)\n\t\tif err != nil {\n\t\t\terrors <- fmt.Errorf(\"Error reading response body: %s\", err.Error())\n\n\t\t\t\/\/ Exit this loop\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Add an entry, if the unique responses map is empty\n\t\tif len(uniqueResponses) == 0 {\n\t\t\tuniqueResponses[resp] = 0\n\t\t} else {\n\t\t\t\/\/ Add to the unique responses map, if no similar ones exist\n\t\t\t\/\/ Assume unique, until similar found\n\t\t\tunique := true\n\t\t\tfor uResp := range uniqueResponses {\n\t\t\t\t\/\/ Read the unique response body\n\t\t\t\tuRespBody, err := readResponseBody(uResp)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrors <- fmt.Errorf(\"Error reading unique response body: %s\", err.Error())\n\n\t\t\t\t\t\/\/ Error, move on to the next inner loop value\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t\/\/ Compare the response bodies\n\t\t\t\trespBodyMatch := false\n\t\t\t\tif string(respBody) == string(uRespBody) {\n\t\t\t\t\trespBodyMatch = true\n\t\t\t\t}\n\n\t\t\t\t\/\/ Compare response status code, body content, and content length\n\t\t\t\tif resp.StatusCode == uResp.StatusCode && resp.ContentLength == uResp.ContentLength && respBodyMatch {\n\t\t\t\t\t\/\/ Match, increase count\n\t\t\t\t\tuniqueResponses[uResp]++\n\t\t\t\t\tunique = false\n\t\t\t\t\t\/\/ Exit inner loop\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Check if unique from all other unique responses\n\t\t\tif unique {\n\t\t\t\t\/\/ Unique, add to unique responses\n\t\t\t\tuniqueResponses[resp] = 0\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ VERBOSE\n\tif configuration.Verbose {\n\t\tlog.Printf(\"[VERBOSE] Unique response comparision complete.\\n\")\n\t}\n\n\t\/\/ Close the error channel\n\tclose(errors)\n\n\treturn\n}\n\nfunc outputResponses(uniqueResponses map[*http.Response]int) {\n\t\/\/ Display the responses\n\tlog.Printf(\"Responses:\\n\")\n\tfor resp, count := range uniqueResponses {\n\t\t\/\/ TODO: Output request here\n\t\tfmt.Printf(\"Response:\\n\")\n\t\tfmt.Printf(\"[Status Code] %v\\n\", resp.StatusCode)\n\t\tfmt.Printf(\"[Protocol] %v\\n\", resp.Proto)\n\t\tif len(resp.Header) != 0 {\n\t\t\tfmt.Println(\"[Headers]\")\n\t\t\tfor header, value := range resp.Header {\n\t\t\t\tfmt.Printf(\"\\t%v: %v\\n\", header, value)\n\t\t\t}\n\t\t}\n\t\tlocation, err := resp.Location()\n\t\tif err != http.ErrNoLocation {\n\t\t\tfmt.Printf(\"[Location] %v\\n\", location.String())\n\t\t}\n\t\trespBody, err := readResponseBody(resp)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[Body] \")\n\t\t\tfmt.Printf(\"[ERROR] Error reading body: %v.\", err)\n\t\t} else {\n\t\t\tfmt.Printf(\"[Body]\\n%s\\n\", respBody)\n\t\t\t\/\/ Close the response body\n\t\t\tresp.Body.Close()\n\t\t}\n\t\tfmt.Printf(\"Similar: %v\\n\\n\", count)\n\t}\n}\n\n\/\/ Function readResponseBody is a helper function to read the content form a response's body,\n\/\/ and refill the body with another io.ReadCloser, so that it can be read again.\nfunc readResponseBody(resp *http.Response) (content []byte, err error) {\n\t\/\/ Get the content\n\tcontent, err = ioutil.ReadAll(resp.Body)\n\n\t\/\/ Reset the response body\n\trCloser := ioutil.NopCloser(bytes.NewBuffer(content))\n\tresp.Body = rCloser\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"github.com\/golang\/crypto\/acme\/autocert\"\n \"crypto\/tls\"\n \"log\"\n \"net\/http\"\n \"fmt\"\n \"html\"\n)\n\n\n\n\n\nfunc main() {\n\n\n http.HandleFunc(\"\/bar\", func(w http.ResponseWriter, r *http.Request) {\n \tfmt.Fprintf(w, \"Hello, %q\", html.EscapeString(r.URL.Path))\n })\n\n m := autocert.Manager{\n \tPrompt: autocert.AcceptTOS,\n \tHostPolicy: autocert.HostWhitelist(\"porn.kyber.space\"),\n }\n s := &http.Server{\n \tAddr: \"0.0.0.0:4430\",\n \tTLSConfig: &tls.Config{GetCertificate: m.GetCertificate},\n }\n err := s.ListenAndServeTLS(\"key.key\", \"cert.crt\")\n if err != nil {\n log.Fatal(\"ListenAndServe: \", err)\n }\n}\n<commit_msg>handlers, how do they work<commit_after>package main\n\nimport (\n \"github.com\/golang\/crypto\/acme\/autocert\"\n \"github.com\/golang\/crypto\/tls\"\n \"log\"\n \"net\/http\"\n \"fmt\"\n \"html\"\n)\n\n\n\n\n\nfunc main() {\n\n\n http.HandleFunc(\"\/bar\", func(w http.ResponseWriter, r *http.Request) {\n \tfmt.Fprintf(w, \"Hello, %q\", html.EscapeString(r.URL.Path))\n })\n\n m := autocert.Manager{\n \tPrompt: autocert.AcceptTOS,\n \tHostPolicy: autocert.HostWhitelist(\"porn.kyber.space\"),\n }\n s := &http.Server{\n \tAddr: \"0.0.0.0:4430\",\n \tTLSConfig: &tls.Config{GetCertificate: m.GetCertificate},\n }\n err := s.ListenAndServeTLS(\"key.key\", \"cert.crt\")\n if err != nil {\n log.Fatal(\"ListenAndServe: \", err)\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/drone\/drone-plugin-go\/plugin\"\n)\n\ntype Docker struct {\n\tStorage string `json:\"storage_driver\"`\n\tRegistry string `json:\"registry\"`\n\tUsername string `json:\"username\"`\n\tPassword string `json:\"password\"`\n\tEmail string `json:\"email\"`\n\tAuth string `json:\"auth\"`\n\tRepo string `json:\"repo\"`\n\tTag string `json:\"tag\"`\n\tFile string `json:\"file\"`\n}\n\nfunc main() {\n\tclone := plugin.Clone{}\n\tvargs := Docker{}\n\n\tplugin.Param(\"clone\", &clone)\n\tplugin.Param(\"vargs\", &vargs)\n\tif err := plugin.Parse(); err != nil {\n\t\tprintln(err.Error())\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Set the storage driver\n\tif len(vargs.Storage) == 0 {\n\t\tvargs.Storage = \"aufs\"\n\t}\n\n\tstop := func() {\n\t\tcmd := exec.Command(\"start-stop-daemon\", \"--stop\", \"--pidfile\", \"\/var\/run\/docker.pid\")\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\t\ttrace(cmd)\n\t\tcmd.Run()\n\t}\n\tdefer stop()\n\n\t\/\/ Starts the Docker daemon\n\tgo func() {\n\t\tcmd := exec.Command(\"\/bin\/bash\", \"\/bin\/wrapdocker\")\n\t\tcmd.Stdout = ioutil.Discard\n\t\tcmd.Stderr = ioutil.Discard\n\t\tcmd.Run()\n\n\t\tcmd = exec.Command(\"docker\", \"-d\", \"-s\", vargs.Storage)\n\t\tcmd.Stdout = ioutil.Discard\n\t\tcmd.Stderr = ioutil.Discard\n\t\ttrace(cmd)\n\t\tcmd.Run()\n\t}()\n\n\t\/\/ Sleep for a few seconds\n\ttime.Sleep(5 * time.Second)\n\n\t\/\/ Set the Registry value\n\tif len(vargs.Registry) == 0 {\n\t\tvargs.Registry = \"https:\/\/index.docker.io\/v1\/\"\n\t}\n\t\/\/ Set the Dockerfile path\n\tif len(vargs.File) == 0 {\n\t\tvargs.File = \".\"\n\t}\n\t\/\/ Set the Tag value\n\tswitch vargs.Tag {\n\tcase \"$DRONE_BRANCH\":\n\t\tvargs.Tag = clone.Branch\n\tcase \"$DRONE_COMMIT\":\n\t\tvargs.Tag = clone.Sha\n\tcase \"\":\n\t\tvargs.Tag = \"latest\"\n\t}\n\tvargs.Repo = fmt.Sprintf(\"%s:%s\", vargs.Repo, vargs.Tag)\n\n\t\/\/ Login to Docker\n\tcmd := exec.Command(\"docker\", \"login\", \"-u\", vargs.Username, \"-p\", vargs.Password, \"-e\", vargs.Email, vargs.Registry)\n\tcmd.Dir = clone.Dir\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tstop()\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Docker environment info\n\tcmd = exec.Command(\"docker\", \"version\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\ttrace(cmd)\n\tcmd.Run()\n\tcmd = exec.Command(\"docker\", \"info\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\ttrace(cmd)\n\tcmd.Run()\n\n\t\/\/ Build the container\n\tcmd = exec.Command(\"docker\", \"build\", \"--pull=true\", \"--rm=true\", \"-t\", vargs.Repo, vargs.File)\n\tcmd.Dir = clone.Dir\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\ttrace(cmd)\n\terr = cmd.Run()\n\tif err != nil {\n\t\tstop()\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Push the container\n\tcmd = exec.Command(\"docker\", \"push\", vargs.Repo)\n\tcmd.Dir = clone.Dir\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\ttrace(cmd)\n\terr = cmd.Run()\n\tif err != nil {\n\t\tstop()\n\t\tos.Exit(1)\n\t}\n}\n\n\/\/ Trace writes each command to standard error (preceded by a ‘$ ’) before it\n\/\/ is executed. Used for debugging your build.\nfunc trace(cmd *exec.Cmd) {\n\tfmt.Println(\"$\", strings.Join(cmd.Args, \" \"))\n}\n\n\/\/ authorize is a helper function that authorizes the Docker client\n\/\/ by manually creating the Docker authentication file.\nfunc authorize(d *Docker) error {\n\tvar path = \"\/root\/.dockercfg\" \/\/ TODO should probably use user.Home() for good measure\n\tvar data = fmt.Sprintf(dockerconf, d.Registry, d.Auth, d.Email)\n\treturn ioutil.WriteFile(path, []byte(data), 0644)\n}\n\nvar dockerconf = `\n{\n\t\"%s\": {\n\t\t\"auth\": \"%s\",\n\t\t\"email\": \"%s\"\n\t}\n}\n`\n<commit_msg>switch from Clone to Workspace object<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/drone\/drone-plugin-go\/plugin\"\n)\n\ntype Docker struct {\n\tStorage string `json:\"storage_driver\"`\n\tRegistry string `json:\"registry\"`\n\tUsername string `json:\"username\"`\n\tPassword string `json:\"password\"`\n\tEmail string `json:\"email\"`\n\tAuth string `json:\"auth\"`\n\tRepo string `json:\"repo\"`\n\tTag string `json:\"tag\"`\n\tFile string `json:\"file\"`\n}\n\nfunc main() {\n\tworkspace := plugin.Workspace{}\n\tbuild := plugin.Build{}\n\tvargs := Docker{}\n\n\tplugin.Param(\"workspace\", &workspace)\n\tplugin.Param(\"build\", &build)\n\tplugin.Param(\"vargs\", &vargs)\n\tplugin.MustParse()\n\n\t\/\/ Set the storage driver\n\tif len(vargs.Storage) == 0 {\n\t\tvargs.Storage = \"aufs\"\n\t}\n\n\tstop := func() {\n\t\tcmd := exec.Command(\"start-stop-daemon\", \"--stop\", \"--pidfile\", \"\/var\/run\/docker.pid\")\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\t\ttrace(cmd)\n\t\tcmd.Run()\n\t}\n\tdefer stop()\n\n\t\/\/ Starts the Docker daemon\n\tgo func() {\n\t\tcmd := exec.Command(\"\/bin\/bash\", \"\/bin\/wrapdocker\")\n\t\tcmd.Stdout = ioutil.Discard\n\t\tcmd.Stderr = ioutil.Discard\n\t\tcmd.Run()\n\n\t\tcmd = exec.Command(\"docker\", \"-d\", \"-s\", vargs.Storage)\n\t\tcmd.Stdout = ioutil.Discard\n\t\tcmd.Stderr = ioutil.Discard\n\t\ttrace(cmd)\n\t\tcmd.Run()\n\t}()\n\n\t\/\/ Sleep for a few seconds\n\ttime.Sleep(5 * time.Second)\n\n\t\/\/ Set the Registry value\n\tif len(vargs.Registry) == 0 {\n\t\tvargs.Registry = \"https:\/\/index.docker.io\/v1\/\"\n\t}\n\t\/\/ Set the Dockerfile path\n\tif len(vargs.File) == 0 {\n\t\tvargs.File = \".\"\n\t}\n\t\/\/ Set the Tag value\n\tswitch vargs.Tag {\n\tcase \"$DRONE_BRANCH\":\n\t\tvargs.Tag = build.Commit.Branch\n\tcase \"$DRONE_COMMIT\":\n\t\tvargs.Tag = build.Commit.Sha\n\tcase \"\":\n\t\tvargs.Tag = \"latest\"\n\t}\n\tvargs.Repo = fmt.Sprintf(\"%s:%s\", vargs.Repo, vargs.Tag)\n\n\t\/\/ Login to Docker\n\tcmd := exec.Command(\"docker\", \"login\", \"-u\", vargs.Username, \"-p\", vargs.Password, \"-e\", vargs.Email, vargs.Registry)\n\tcmd.Dir = workspace.Path\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tstop()\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Docker environment info\n\tcmd = exec.Command(\"docker\", \"version\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\ttrace(cmd)\n\tcmd.Run()\n\tcmd = exec.Command(\"docker\", \"info\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\ttrace(cmd)\n\tcmd.Run()\n\n\t\/\/ Build the container\n\tcmd = exec.Command(\"docker\", \"build\", \"--pull=true\", \"--rm=true\", \"-t\", vargs.Repo, vargs.File)\n\tcmd.Dir = workspace.Path\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\ttrace(cmd)\n\terr = cmd.Run()\n\tif err != nil {\n\t\tstop()\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Push the container\n\tcmd = exec.Command(\"docker\", \"push\", vargs.Repo)\n\tcmd.Dir = workspace.Path\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\ttrace(cmd)\n\terr = cmd.Run()\n\tif err != nil {\n\t\tstop()\n\t\tos.Exit(1)\n\t}\n}\n\n\/\/ Trace writes each command to standard error (preceded by a ‘$ ’) before it\n\/\/ is executed. Used for debugging your build.\nfunc trace(cmd *exec.Cmd) {\n\tfmt.Println(\"$\", strings.Join(cmd.Args, \" \"))\n}\n\n\/\/ authorize is a helper function that authorizes the Docker client\n\/\/ by manually creating the Docker authentication file.\nfunc authorize(d *Docker) error {\n\tvar path = \"\/root\/.dockercfg\" \/\/ TODO should probably use user.Home() for good measure\n\tvar data = fmt.Sprintf(dockerconf, d.Registry, d.Auth, d.Email)\n\treturn ioutil.WriteFile(path, []byte(data), 0644)\n}\n\nvar dockerconf = `\n{\n\t\"%s\": {\n\t\t\"auth\": \"%s\",\n\t\t\"email\": \"%s\"\n\t}\n}\n`\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/google\/gopacket\/pcap\"\n\t\"log\"\n\t\"os\"\n)\n\nfunc availableInterfaces() {\n\tdevices, err := pcap.FindAllDevs()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\tfor _, device := range devices {\n\t\tif len(device.Addresses) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(\"Interface: \", device.Name)\n\t\tfor _, address := range device.Addresses {\n\t\t\tfmt.Println(\" IP address: \", address.IP)\n\t\t\tfmt.Println(\" Subnet mask: \", address.Netmask)\n\t\t}\n\t\tfmt.Println(\"\")\n\t}\n}\n\nfunc main() {\n\tvar err error\n\tvar handle *pcap.Handle\n\n\tdev := flag.String(\"interface\", \"lo\", \"Chose an interface\")\n\tlst := flag.Bool(\"list_interfaces\", false, \"List available interfaces\")\n\tflag.Parse()\n\n\tif *lst {\n\t\tavailableInterfaces()\n\t\treturn\n\t}\n\n\thandle, err = pcap.OpenLive(*dev, 4096, true, pcap.BlockForever)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\tdefer handle.Close()\n\n}\n<commit_msg>Add filter option<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/google\/gopacket\"\n\t\"github.com\/google\/gopacket\/pcap\"\n\t\"log\"\n\t\"os\"\n)\n\nfunc availableInterfaces() {\n\tdevices, err := pcap.FindAllDevs()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\tfor _, device := range devices {\n\t\tif len(device.Addresses) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(\"Interface: \", device.Name)\n\t\tfor _, address := range device.Addresses {\n\t\t\tfmt.Println(\" IP address: \", address.IP)\n\t\t\tfmt.Println(\" Subnet mask: \", address.Netmask)\n\t\t}\n\t\tfmt.Println(\"\")\n\t}\n}\n\nfunc main() {\n\tvar err error\n\tvar handle *pcap.Handle\n\n\tdev := flag.String(\"interface\", \"lo\", \"Chose an interface\")\n\tfilter := flag.String(\"filter\", \"\", \"Set a specific filter\")\n\tlst := flag.Bool(\"list_interfaces\", false, \"List available interfaces\")\n\tflag.Parse()\n\n\tif *lst {\n\t\tavailableInterfaces()\n\t\treturn\n\t}\n\n\thandle, err = pcap.OpenLive(*dev, 4096, true, pcap.BlockForever)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\tdefer handle.Close()\n\n\tif len(*filter) != 0 {\n\t\terr = handle.SetBPFFilter(*filter)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err, \"\\tInvalid filter: \", *filter)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ 29 december 2012\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"github.com\/hanwen\/go-fuse\/fuse\"\n\t\"log\"\n)\n\nfunc main() {\n\tif len(os.Args) != 4 {\n\t\tfmt.Fprintf(os.Stderr, \"usage: %s mamexml dirlistfile mountpoint\\n\", os.Args[0])\n\t\tos.Exit(1)\n\t}\n\tgetGames(os.Args[1])\n\tgetDirList(os.Args[2])\n\tfs := fuse.NewPathNodeFs(&fuse.ReadonlyFileSystem{new(mamefuse)}, nil)\n\tmount, _, err := fuse.MountNodeFileSystem(os.Args[3], fs, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"error launching FUSE file system: %v\", err)\n\t}\n\/\/\tfs.Debug = true\n\/\/\tmount.Debug = true\nfmt.Println(\"starting server\")\n\tmount.Loop()\n}\n\nfunc x() {\n\tif len(os.Args) != 4 {\n\t\tfmt.Fprintf(os.Stderr, \"usage: %s mamexml dirlistfile mountpoint\\n\", os.Args[0])\n\t\tos.Exit(1)\n\t}\n\tgetGames(os.Args[1])\n\tgetDirList(os.Args[2])\n\/\/\tstartServer()\n\tfor _, g := range games {\n\t\tfmt.Printf(\"%12s \", g.Name)\n\t\tif g.Found {\t\t\t\/\/ already found (parent)\n\t\t\tfmt.Println(g.ROMLoc)\n\t\t\tcontinue\n\t\t}\n\t\tfound, err := g.Find()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error: %v\\n\", err)\n\t\t} else if !found {\n\t\t\tfmt.Println(\"not found\")\n\t\t} else {\n\t\t\tfmt.Println(g.ROMLoc)\n\t\t}\n\t}\n}\n<commit_msg>Added some general TODOs<commit_after>\/\/ 29 december 2012\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"github.com\/hanwen\/go-fuse\/fuse\"\n\t\"log\"\n)\n\n\/\/ general TODO:\n\/\/ - be able to ls the ROMs directory\n\nfunc main() {\n\tif len(os.Args) != 4 {\n\t\tfmt.Fprintf(os.Stderr, \"usage: %s mamexml dirlistfile mountpoint\\n\", os.Args[0])\n\t\tos.Exit(1)\n\t}\n\tgetGames(os.Args[1])\n\tgetDirList(os.Args[2])\n\tfs := fuse.NewPathNodeFs(&fuse.ReadonlyFileSystem{new(mamefuse)}, nil)\n\tmount, _, err := fuse.MountNodeFileSystem(os.Args[3], fs, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"error launching FUSE file system: %v\", err)\n\t}\n\/\/\tfs.Debug = true\n\/\/\tmount.Debug = true\nfmt.Println(\"starting server\")\n\tmount.Loop()\n}\n\nfunc x() {\n\tif len(os.Args) != 4 {\n\t\tfmt.Fprintf(os.Stderr, \"usage: %s mamexml dirlistfile mountpoint\\n\", os.Args[0])\n\t\tos.Exit(1)\n\t}\n\tgetGames(os.Args[1])\n\tgetDirList(os.Args[2])\n\/\/\tstartServer()\n\tfor _, g := range games {\n\t\tfmt.Printf(\"%12s \", g.Name)\n\t\tif g.Found {\t\t\t\/\/ already found (parent)\n\t\t\tfmt.Println(g.ROMLoc)\n\t\t\tcontinue\n\t\t}\n\t\tfound, err := g.Find()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error: %v\\n\", err)\n\t\t} else if !found {\n\t\t\tfmt.Println(\"not found\")\n\t\t} else {\n\t\t\tfmt.Println(g.ROMLoc)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/CiscoCloud\/mesos-consul\/config\"\n\t\"github.com\/CiscoCloud\/mesos-consul\/consul\"\n\t\"github.com\/CiscoCloud\/mesos-consul\/mesos\"\n\n\t\"github.com\/hashicorp\/consul-template\/logging\"\n\tflag \"github.com\/ogier\/pflag\"\n)\n\nconst Name = \"mesos-consul\"\nconst Version = \"0.1.1\"\n\nfunc Usage() {\n\tfmt.Println(\"Usage: mesos-consul --zk=\\\"\\\" --registry=\\\"\\\" [options]\")\n\tfmt.Println(\"Options:\")\n\tfmt.Println(\"\t--refresh=\t\tRefresh time (default 1m)\")\n\tfmt.Println()\n}\n\nfunc main() {\n\tc, err := parseFlags(os.Args[1:])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Print(\"[INFO] Using registry port: \", c.RegistryPort)\n\tlog.Print(\"[INFO] Using zookeeper: \", c.Zk)\n\tleader := mesos.New(c, consul.NewConsul(c))\n\n\tticker := time.NewTicker(c.Refresh)\n leader.Refresh()\n\tfor _ = range ticker.C {\n\t leader.Refresh()\n\t}\n}\n\nfunc parseFlags(args []string) (*config.Config, error) {\n\tvar doHelp bool\n\tvar c = config.DefaultConfig()\n\n\tflags := flag.NewFlagSet(\"mesos-consul\", flag.ContinueOnError)\n\tflags.Usage = func() {\n\t\tfmt.Print(usage)\n\t}\n\n\tflags.BoolVar(&doHelp,\t\t\t\"help\", false, \"\")\n\tflags.StringVar(&c.LogLevel,\t\t\"log-level\", \"WARN\", \"\")\n\tflags.DurationVar(&c.Refresh,\t\t\"refresh\", time.Minute, \"\")\n\tflags.StringVar(&c.RegistryPort,\t\"registry-port\", \"8500\", \"\")\n\tflags.Var((*config.AuthVar)(c.RegistryAuth),\t\"registry-auth\", \"\")\n\tflags.BoolVar(&c.RegistrySSL.Enabled,\t\"registry-ssl\", c.RegistrySSL.Enabled, \"\")\n\tflags.BoolVar(&c.RegistrySSL.Verify,\t\"registry-ssl-verify\", c.RegistrySSL.Verify, \"\")\n\tflags.StringVar(&c.RegistrySSL.Cert,\t\"registry-ssl-cert\", c.RegistrySSL.Cert, \"\")\n\tflags.StringVar(&c.RegistrySSL.CaCert,\t\"registry-ssl-cacert\", c.RegistrySSL.CaCert, \"\")\n\tflags.StringVar(&c.RegistryToken,\t\t\"registry-token\", c.RegistryToken, \"\")\n\tflags.StringVar(&c.Zk,\t\t\t\"zk\", \"zk:\/\/127.0.0.1:2181\/mesos\", \"\")\n\n\tif err := flags.Parse(args); err != nil {\n\t\treturn nil, err\n\t}\n\n\targs = flags.Args()\n\tif len(args) > 0 {\n\t\treturn nil, fmt.Errorf(\"extra argument(s): %q\", args)\n\t}\n\n\tif (doHelp) {\n\t\tflags.Usage()\n\t\tos.Exit(0)\n\t}\n\n\tlogging.Setup(&logging.Config{\n\t\tName:\t\t\"mesos-consul\",\n\t\tLevel:\t\tc.LogLevel,\n\t\tWriter:\t\tos.Stderr,\n\t\t})\n\n\treturn c, nil\n}\n\nconst usage = `\nUsage: mesos-consul [options]\n\nOptions:\n\n --log-level=<log_level>\tSet the Logging level to one of [ \"DEBUG\", \"INFO\", \"WARN\", \"ERROR\" ]\n\t\t\t\t(default \"WARN\")\n --refresh=<time>\t\tSet the Mesos refresh rate\n\t\t\t\t(default 1m)\n --registry-auth=<user[:pass]>\tSet the basic authentication username\n\t\t\t\t(and password)\n --registry-port=<port>\tPort to connect to consul agents\n\t\t\t\t(default 8500)\n --registry-ssl\t\tUse SSL when connecting to the registry\n --registry-ssl-verify\t\tVerify certificates when connecting via SSL\n --registry-ssl-cert\t\tSSL certificates to send to registry\n --registry-ssl-cacert\t\tValidate server certificate against this CA\n\t\t\t\tcertificate file list\n --registry-token=<token>\tSet registry ACL token\n --zk=<address>\t\tZookeeper path to Mesos\n\t\t\t\t(default zk:\/\/127.0.0.1:2181\/mesos)\n`\n<commit_msg>Remove unused code and bump version<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/CiscoCloud\/mesos-consul\/config\"\n\t\"github.com\/CiscoCloud\/mesos-consul\/consul\"\n\t\"github.com\/CiscoCloud\/mesos-consul\/mesos\"\n\n\t\"github.com\/hashicorp\/consul-template\/logging\"\n\tflag \"github.com\/ogier\/pflag\"\n)\n\nconst Name = \"mesos-consul\"\nconst Version = \"0.2\"\n\nfunc main() {\n\tc, err := parseFlags(os.Args[1:])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Print(\"[INFO] Using registry port: \", c.RegistryPort)\n\tlog.Print(\"[INFO] Using zookeeper: \", c.Zk)\n\tleader := mesos.New(c, consul.NewConsul(c))\n\n\tticker := time.NewTicker(c.Refresh)\n leader.Refresh()\n\tfor _ = range ticker.C {\n\t leader.Refresh()\n\t}\n}\n\nfunc parseFlags(args []string) (*config.Config, error) {\n\tvar doHelp bool\n\tvar c = config.DefaultConfig()\n\n\tflags := flag.NewFlagSet(\"mesos-consul\", flag.ContinueOnError)\n\tflags.Usage = func() {\n\t\tfmt.Print(usage)\n\t}\n\n\tflags.BoolVar(&doHelp,\t\t\t\"help\", false, \"\")\n\tflags.StringVar(&c.LogLevel,\t\t\"log-level\", \"WARN\", \"\")\n\tflags.DurationVar(&c.Refresh,\t\t\"refresh\", time.Minute, \"\")\n\tflags.StringVar(&c.RegistryPort,\t\"registry-port\", \"8500\", \"\")\n\tflags.Var((*config.AuthVar)(c.RegistryAuth),\t\"registry-auth\", \"\")\n\tflags.BoolVar(&c.RegistrySSL.Enabled,\t\"registry-ssl\", c.RegistrySSL.Enabled, \"\")\n\tflags.BoolVar(&c.RegistrySSL.Verify,\t\"registry-ssl-verify\", c.RegistrySSL.Verify, \"\")\n\tflags.StringVar(&c.RegistrySSL.Cert,\t\"registry-ssl-cert\", c.RegistrySSL.Cert, \"\")\n\tflags.StringVar(&c.RegistrySSL.CaCert,\t\"registry-ssl-cacert\", c.RegistrySSL.CaCert, \"\")\n\tflags.StringVar(&c.RegistryToken,\t\t\"registry-token\", c.RegistryToken, \"\")\n\tflags.StringVar(&c.Zk,\t\t\t\"zk\", \"zk:\/\/127.0.0.1:2181\/mesos\", \"\")\n\n\tif err := flags.Parse(args); err != nil {\n\t\treturn nil, err\n\t}\n\n\targs = flags.Args()\n\tif len(args) > 0 {\n\t\treturn nil, fmt.Errorf(\"extra argument(s): %q\", args)\n\t}\n\n\tif (doHelp) {\n\t\tflags.Usage()\n\t\tos.Exit(0)\n\t}\n\n\tlogging.Setup(&logging.Config{\n\t\tName:\t\t\"mesos-consul\",\n\t\tLevel:\t\tc.LogLevel,\n\t\tWriter:\t\tos.Stderr,\n\t\t})\n\n\treturn c, nil\n}\n\nconst usage = `\nUsage: mesos-consul [options]\n\nOptions:\n\n --log-level=<log_level>\tSet the Logging level to one of [ \"DEBUG\", \"INFO\", \"WARN\", \"ERROR\" ]\n\t\t\t\t(default \"WARN\")\n --refresh=<time>\t\tSet the Mesos refresh rate\n\t\t\t\t(default 1m)\n --registry-auth=<user[:pass]>\tSet the basic authentication username\n\t\t\t\t(and password)\n --registry-port=<port>\tPort to connect to consul agents\n\t\t\t\t(default 8500)\n --registry-ssl\t\tUse SSL when connecting to the registry\n --registry-ssl-verify\t\tVerify certificates when connecting via SSL\n --registry-ssl-cert\t\tSSL certificates to send to registry\n --registry-ssl-cacert\t\tValidate server certificate against this CA\n\t\t\t\tcertificate file list\n --registry-token=<token>\tSet registry ACL token\n --zk=<address>\t\tZookeeper path to Mesos\n\t\t\t\t(default zk:\/\/127.0.0.1:2181\/mesos)\n`\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Giulio Iotti. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tilluminancePath = \"\/sys\/bus\/iio\/devices\/iio:device0\/in_illuminance_raw\"\n\tbacklightMaxPath = \"\/sys\/class\/backlight\/intel_backlight\/max_brightness\"\n\tbacklightCurrPath = \"\/sys\/class\/backlight\/intel_backlight\/brightness\"\n)\n\nfunc sysfileReadInt(f string) (int, error) {\n\tfile, err := os.Open(f)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tdefer file.Close()\n\tbuf, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\ttext := strings.TrimSpace(string(buf))\n\treturn strconv.Atoi(text)\n}\n\nfunc sysfileWriteInt(name string, n int) error {\n\tbuf := fmt.Sprintf(\"%d\\n\", n)\n\treturn ioutil.WriteFile(name, []byte(buf), 0644)\n}\n\ntype poller struct {\n\tmax, min int\n\tsens int\n\tratio int\n\tdryrun bool\n\tdebug bool\n\twait time.Duration\n}\n\nfunc (p *poller) poll() {\n\tmaxIn := p.ratio * 100\n\tblMin := p.sens * p.max \/ 100\n\tfor {\n\t\tblight, err := sysfileReadInt(backlightCurrPath)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"cannot get backlight value: \", err)\n\t\t}\n\t\tinlight, err := sysfileReadInt(illuminancePath)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"cannot get ambient light value: \", err)\n\t\t}\n\t\tinlightPercent := inlight * 100 \/ maxIn\n\t\tif inlightPercent > 100 {\n\t\t\tinlightPercent = 100\n\t\t}\n\t\tnblight := inlightPercent*p.max\/100 + p.min\n\t\tif nblight > p.max {\n\t\t\tnblight = p.max\n\t\t}\n\t\tdiff := nblight - blight\n\t\tif diff < 0 {\n\t\t\tdiff = -diff\n\t\t}\n\t\tif p.debug {\n\t\t\tlog.Printf(\"light = %d (%d%%), back-light = %d, set %d (diff %d, min %d)\", inlight, inlightPercent, blight, nblight, diff, blMin)\n\t\t}\n\t\t\/\/ Set backlight if there is more than the minimum change thresold to adjust. Or if we are below min (level was never set.)\n\t\tif diff >= blMin || blight < p.min {\n\t\t\tif !p.dryrun {\n\t\t\t\tif err := sysfileWriteInt(backlightCurrPath, nblight); err != nil {\n\t\t\t\t\tlog.Fatal(\"cannot set backlight: \", err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"change backlight to %d%%; illuminance = %d, backlight = %d (was %d)\", inlightPercent, inlight, nblight, blight)\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(p.wait)\n\t}\n}\n\nfunc main() {\n\tmax, err := sysfileReadInt(backlightMaxPath)\n\tif err != nil {\n\t\tlog.Fatal(\"cannot get backlight max value: \", err)\n\t}\n\tp := poller{\n\t\tmin: 75, \/\/ backlight min N\n\t\tmax: max, \/\/ backlight max N\n\t\tsens: 2, \/\/ sensitivity %\n\t\tratio: 60, \/\/ lux = 1%\n\t\tdryrun: false,\n\t\tdebug: false,\n\t\twait: 2 * time.Second,\n\t}\n\tflag.IntVar(&p.min, \"min\", p.min, \"Minimum value `N` for backlight\")\n\tflag.IntVar(&p.max, \"max\", p.max, \"Maximum value `N` for backlight (autodetected)\")\n\tflag.IntVar(&p.sens, \"sensitivity\", p.sens, \"Minimum amount `S` in percent of backlight change to perform\")\n\tflag.IntVar(&p.ratio, \"ratio\", p.ratio, \"Ratio `R` of light change: number of lux for a 1% change in backlight\")\n\tflag.BoolVar(&p.dryrun, \"dryrun\", p.dryrun, \"Do not set backlight, only print what would happen\")\n\tflag.BoolVar(&p.debug, \"debug\", p.debug, \"Print values read from sensors every wait duration\")\n\tflag.DurationVar(&p.wait, \"wait\", p.wait, \"Duration `T` between checks for changed light conditions\")\n\tflag.Parse()\n\tp.poll()\n}\n<commit_msg>Change default after some real testing<commit_after>\/\/ Copyright 2015 Giulio Iotti. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tilluminancePath = \"\/sys\/bus\/iio\/devices\/iio:device0\/in_illuminance_raw\"\n\tbacklightMaxPath = \"\/sys\/class\/backlight\/intel_backlight\/max_brightness\"\n\tbacklightCurrPath = \"\/sys\/class\/backlight\/intel_backlight\/brightness\"\n)\n\nfunc sysfileReadInt(f string) (int, error) {\n\tfile, err := os.Open(f)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tdefer file.Close()\n\tbuf, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\ttext := strings.TrimSpace(string(buf))\n\treturn strconv.Atoi(text)\n}\n\nfunc sysfileWriteInt(name string, n int) error {\n\tbuf := fmt.Sprintf(\"%d\\n\", n)\n\treturn ioutil.WriteFile(name, []byte(buf), 0644)\n}\n\ntype poller struct {\n\tmax, min int\n\tsens int\n\tratio int\n\tdryrun bool\n\tdebug bool\n\twait time.Duration\n}\n\nfunc (p *poller) poll() {\n\tmaxIn := p.ratio * 100\n\tblMin := p.sens * p.max \/ 100\n\tfor {\n\t\tblight, err := sysfileReadInt(backlightCurrPath)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"cannot get backlight value: \", err)\n\t\t}\n\t\tinlight, err := sysfileReadInt(illuminancePath)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"cannot get ambient light value: \", err)\n\t\t}\n\t\tinlightPercent := inlight * 100 \/ maxIn\n\t\tif inlightPercent > 100 {\n\t\t\tinlightPercent = 100\n\t\t}\n\t\tnblight := inlightPercent*p.max\/100 + p.min\n\t\tif nblight > p.max {\n\t\t\tnblight = p.max\n\t\t}\n\t\tdiff := nblight - blight\n\t\tif diff < 0 {\n\t\t\tdiff = -diff\n\t\t}\n\t\tif p.debug {\n\t\t\tlog.Printf(\"light = %d (%d%%), back-light = %d, set %d (diff %d, min %d)\", inlight, inlightPercent, blight, nblight, diff, blMin)\n\t\t}\n\t\t\/\/ Set backlight if there is more than the minimum change thresold to adjust. Or if we are below min (level was never set.)\n\t\tif diff >= blMin || blight < p.min {\n\t\t\tif !p.dryrun {\n\t\t\t\tif err := sysfileWriteInt(backlightCurrPath, nblight); err != nil {\n\t\t\t\t\tlog.Fatal(\"cannot set backlight: \", err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"change backlight to %d%%; illuminance = %d, backlight = %d (was %d)\", inlightPercent, inlight, nblight, blight)\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(p.wait)\n\t}\n}\n\nfunc main() {\n\tmax, err := sysfileReadInt(backlightMaxPath)\n\tif err != nil {\n\t\tlog.Fatal(\"cannot get backlight max value: \", err)\n\t}\n\tp := poller{\n\t\tmin: 50, \/\/ backlight min N\n\t\tmax: max, \/\/ backlight max N\n\t\tsens: 4, \/\/ sensitivity %\n\t\tratio: 50, \/\/ lux = 1%\n\t\tdryrun: false,\n\t\tdebug: false,\n\t\twait: 2 * time.Second,\n\t}\n\tflag.IntVar(&p.min, \"min\", p.min, \"Minimum value `N` for backlight\")\n\tflag.IntVar(&p.max, \"max\", p.max, \"Maximum value `N` for backlight (autodetected)\")\n\tflag.IntVar(&p.sens, \"sensitivity\", p.sens, \"Minimum amount `S` in percent of backlight change to perform\")\n\tflag.IntVar(&p.ratio, \"ratio\", p.ratio, \"Ratio `R` of light change: number of lux for a 1% change in backlight\")\n\tflag.BoolVar(&p.dryrun, \"dryrun\", p.dryrun, \"Do not set backlight, only print what would happen\")\n\tflag.BoolVar(&p.debug, \"debug\", p.debug, \"Print values read from sensors every wait duration\")\n\tflag.DurationVar(&p.wait, \"wait\", p.wait, \"Duration `T` between checks for changed light conditions\")\n\tflag.Parse()\n\tp.poll()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\/\/ \"fmt\"\n\t\"os\"\n\n\t\"strings\"\n\n\t\"github.com\/cucumber\/godog\"\n\t\"github.com\/cucumber\/godog\/colors\"\n\tpb \"github.com\/ii\/xds-test-harness\/api\/adapter\"\n\t\"github.com\/ii\/xds-test-harness\/internal\/runner\"\n\t\"github.com\/rs\/zerolog\"\n\t\"github.com\/rs\/zerolog\/log\"\n\t\"github.com\/spf13\/pflag\"\n)\n\nvar (\n\tdebug = pflag.BoolP(\"debug\", \"D\", false, \"sets log level to debug\")\n\tadapterAddress = pflag.StringP(\"adapter\", \"A\", \":17000\", \"port of adapter on target\")\n\ttargetAddress = pflag.StringP(\"target\", \"T\", \":18000\", \"port of xds target to test\")\n\tnodeID = pflag.StringP(\"nodeID\", \"N\", \"test-id\", \"node id of target\")\n\tvariants = pflag.StringP(\"variants\", \"V\", \"1111\", \"Set which xDS transport variants your server supports.\\n These are, in order: sotw non-aggregated\\n, sotw aggregated\\n, incremental non-aggregated\\n, incremental aggregated\\n. 1 if your server supports that variant, 0 if not\\n. eg: --variants 1010 supports sotw non-aggregated and incremental non-aggregated.\")\n\taggregated = false\n\tincremental = false\n\n\tgodogOpts = godog.Options{\n\t\tShowStepDefinitions: false,\n\t\tRandomize: 0,\n\t\tStopOnFailure: false,\n\t\tStrict: false,\n\t\tNoColors: false,\n\t\tTags: \"\",\n\t\tFormat: \"\",\n\t\tConcurrency: 0,\n\t\tPaths: []string{},\n\t\tOutput: colors.Colored(os.Stdout),\n\t}\n\n\tr *runner.Runner\n)\n\nfunc init() {\n\tgodog.BindCommandLineFlags(\"godog.\", &godogOpts)\n\tzerolog.TimeFieldFormat = zerolog.TimeFormatUnix\n\tlog.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})\n\tzerolog.SetGlobalLevel(zerolog.InfoLevel)\n}\n\nfunc InitializeTestSuite(sc *godog.TestSuiteContext) {\n\n\tsc.BeforeSuite(func() {\n\t\tr = runner.FreshRunner()\n\t\tif err := r.ConnectClient(\"target\", *targetAddress); err != nil {\n\t\t\tlog.Fatal().\n\t\t\t\tMsgf(\"error connecting to target: %v\", err)\n\t\t}\n\t\tif err := r.ConnectClient(\"adapter\", *adapterAddress); err != nil {\n\t\t\tlog.Fatal().\n\t\t\t\tMsgf(\"error connecting to adapter: %v\", err)\n\t\t}\n\t\tr.NodeID = *nodeID\n\t\tr.Aggregated = aggregated\n\t\tlog.Info().\n\t\t\tMsgf(\"Connected to target at %s and adapter at %s\\n\", *targetAddress, *adapterAddress)\n\t\tif r.Aggregated {\n\t\t\tlog.Info().\n\t\t\t\tMsgf(\"Tests will be run via ADS\")\n\t\t} else {\n\t\t\tlog.Info().\n\t\t\t\tMsgf(\"Tests will be run non-aggregated, via separate streams\")\n\t\t}\n\t})\n}\n\nfunc InitializeScenario(ctx *godog.ScenarioContext) {\n\tctx.Before(func(ctx context.Context, sc *godog.Scenario) (context.Context, error) {\n\t\tlog.Debug().Msg(\"Fresh Runner!\")\n\t\tr = runner.FreshRunner(r)\n\t\treturn ctx, nil\n\t})\n\tctx.After(func(ctx context.Context, sc *godog.Scenario, err error) (context.Context, error) {\n\t\tc := pb.NewAdapterClient(r.Adapter.Conn)\n\t\tclearRequest := &pb.ClearRequest{\n\t\t\tNode: r.NodeID,\n\t\t}\n\t\tclear, err := c.ClearState(context.Background(), clearRequest)\n\t\tif err != nil {\n\t\t\tlog.Err(err).\n\t\t\t\tMsg(\"Couldn't clear state\")\n\t\t}\n\t\tlog.Debug().\n\t\t\tMsgf(\"Clearing state...%v\", clear.Response)\n\t\treturn ctx, nil\n\t})\n\tr.LoadSteps(ctx)\n}\n\nfunc supportedVariants(combo string) (err error, supportedVariants []bool) {\n\tflags := strings.Split(combo, \"\")\n\tif len(flags) < 4 {\n\t\terr := fmt.Errorf(\"Expected four digits, each a 1 or 0 (e.g. 1001). Given \\\"%v\\\"\", combo)\n\t\treturn err, []bool{}\n\t}\n\tfor i := 0; i < 4; i++ {\n\t\tif flags[i] == \"1\" {\n\t\t\tsupportedVariants = append(supportedVariants, true)\n\t\t} else if flags[i] == \"0\" {\n\t\t\tsupportedVariants = append(supportedVariants, false)\n\t\t} else {\n\t\t\terr := fmt.Errorf(\"Expected four digits, each a 1 or 0 (e.g. 1001). Given \\\"%v\\\"\", combo)\n\t\t\treturn err, []bool{}\n\t\t}\n\t}\n\treturn nil, supportedVariants\n}\n\nfunc main() {\n\tpflag.Parse()\n\tgodogOpts.Paths = pflag.Args()\n\tif *debug {\n\t\tzerolog.SetGlobalLevel(zerolog.DebugLevel)\n\t}\n\n\tsuite := godog.TestSuite{\n\t\tName: \"xDS Test Suite\",\n\t\tScenarioInitializer: InitializeScenario,\n\t\tTestSuiteInitializer: InitializeTestSuite,\n\t\tOptions: &godogOpts,\n\t}\n\n\t\/\/ we have four variants, either set to T or F\n\terr, supportedVariants := supportedVariants(*variants)\n\tif err != nil {\n\t\tlog.Info().Msgf(\"Error parsing variants config: %v\\n\", err)\n\t\tos.Exit(0)\n\t}\n\n\t\/\/SOTW, Separate\n\tif supportedVariants[0] {\n\t\tincremental = false\n\t\taggregated = false\n\t\tgodogOpts.Tags = \"@sotw && @separate\"\n\t\tif status := suite.Run(); status == 1 {\n\t\t\tos.Exit(status)\n\t\t}\n\t}\n\n\t\/\/SOTW, Aggregated\n\tif supportedVariants[1] {\n\t\tincremental = false\n\t\taggregated = true\n\t\tgodogOpts.Tags = \"@sotw && @aggregated\"\n\t\tif status := suite.Run(); status == 1 {\n\t\t\tos.Exit(status)\n\t\t}\n\t}\n\n\t\/\/Incremental, Separate\n\tif supportedVariants[2] {\n\t\tincremental = true\n\t\taggregated = false\n\t\tgodogOpts.Tags = \"@incremental && @separate\"\n\t\tif status := suite.Run(); status == 1 {\n\t\t\tos.Exit(status)\n\t\t}\n\t}\n\n\t\/\/Incremental, Aggregated\n\tif supportedVariants[3] {\n\t\tincremental = true\n\t\taggregated = true\n\t\tgodogOpts.Tags = \"@incremental && @aggregated\"\n\t\tif status := suite.Run(); status == 1 {\n\t\t\tos.Exit(status)\n\t\t}\n\t}\n\tos.Exit(0)\n}\n<commit_msg>remove os exiting on suite errors<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\/\/ \"fmt\"\n\t\"os\"\n\n\t\"strings\"\n\n\t\"github.com\/cucumber\/godog\"\n\t\"github.com\/cucumber\/godog\/colors\"\n\tpb \"github.com\/ii\/xds-test-harness\/api\/adapter\"\n\t\"github.com\/ii\/xds-test-harness\/internal\/runner\"\n\t\"github.com\/rs\/zerolog\"\n\t\"github.com\/rs\/zerolog\/log\"\n\t\"github.com\/spf13\/pflag\"\n)\n\nvar (\n\tdebug = pflag.BoolP(\"debug\", \"D\", false, \"sets log level to debug\")\n\tadapterAddress = pflag.StringP(\"adapter\", \"A\", \":17000\", \"port of adapter on target\")\n\ttargetAddress = pflag.StringP(\"target\", \"T\", \":18000\", \"port of xds target to test\")\n\tnodeID = pflag.StringP(\"nodeID\", \"N\", \"test-id\", \"node id of target\")\n\tvariants = pflag.StringP(\"variants\", \"V\", \"1111\", \"Set which xDS transport variants your server supports.\\n These are, in order: sotw non-aggregated\\n, sotw aggregated\\n, incremental non-aggregated\\n, incremental aggregated\\n. 1 if your server supports that variant, 0 if not\\n. eg: --variants 1010 supports sotw non-aggregated and incremental non-aggregated.\")\n\taggregated = false\n\tincremental = false\n\n\tgodogOpts = godog.Options{\n\t\tShowStepDefinitions: false,\n\t\tRandomize: 0,\n\t\tStopOnFailure: false,\n\t\tStrict: false,\n\t\tNoColors: false,\n\t\tTags: \"\",\n\t\tFormat: \"\",\n\t\tConcurrency: 0,\n\t\tPaths: []string{},\n\t\tOutput: colors.Colored(os.Stdout),\n\t}\n\n\tr *runner.Runner\n)\n\nfunc init() {\n\tgodog.BindCommandLineFlags(\"godog.\", &godogOpts)\n\tzerolog.TimeFieldFormat = zerolog.TimeFormatUnix\n\tlog.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})\n\tzerolog.SetGlobalLevel(zerolog.InfoLevel)\n}\n\nfunc InitializeTestSuite(sc *godog.TestSuiteContext) {\n\n\tsc.BeforeSuite(func() {\n\t\tr = runner.FreshRunner()\n\t\tif err := r.ConnectClient(\"target\", *targetAddress); err != nil {\n\t\t\tlog.Fatal().\n\t\t\t\tMsgf(\"error connecting to target: %v\", err)\n\t\t}\n\t\tif err := r.ConnectClient(\"adapter\", *adapterAddress); err != nil {\n\t\t\tlog.Fatal().\n\t\t\t\tMsgf(\"error connecting to adapter: %v\", err)\n\t\t}\n\t\tr.NodeID = *nodeID\n\t\tr.Aggregated = aggregated\n\t\tlog.Info().\n\t\t\tMsgf(\"Connected to target at %s and adapter at %s\\n\", *targetAddress, *adapterAddress)\n\t\tif r.Aggregated {\n\t\t\tlog.Info().\n\t\t\t\tMsgf(\"Tests will be run via ADS\")\n\t\t} else {\n\t\t\tlog.Info().\n\t\t\t\tMsgf(\"Tests will be run non-aggregated, via separate streams\")\n\t\t}\n\t})\n}\n\nfunc InitializeScenario(ctx *godog.ScenarioContext) {\n\tctx.Before(func(ctx context.Context, sc *godog.Scenario) (context.Context, error) {\n\t\tlog.Debug().Msg(\"Fresh Runner!\")\n\t\tr = runner.FreshRunner(r)\n\t\treturn ctx, nil\n\t})\n\tctx.After(func(ctx context.Context, sc *godog.Scenario, err error) (context.Context, error) {\n\t\tc := pb.NewAdapterClient(r.Adapter.Conn)\n\t\tclearRequest := &pb.ClearRequest{\n\t\t\tNode: r.NodeID,\n\t\t}\n\t\tclear, err := c.ClearState(context.Background(), clearRequest)\n\t\tif err != nil {\n\t\t\tlog.Err(err).\n\t\t\t\tMsg(\"Couldn't clear state\")\n\t\t}\n\t\tlog.Debug().\n\t\t\tMsgf(\"Clearing state...%v\", clear.Response)\n\t\treturn ctx, nil\n\t})\n\tr.LoadSteps(ctx)\n}\n\nfunc supportedVariants(combo string) (err error, supportedVariants []bool) {\n\tflags := strings.Split(combo, \"\")\n\tif len(flags) < 4 {\n\t\terr := fmt.Errorf(\"Expected four digits, each a 1 or 0 (e.g. 1001). Given \\\"%v\\\"\", combo)\n\t\treturn err, []bool{}\n\t}\n\tfor i := 0; i < 4; i++ {\n\t\tif flags[i] == \"1\" {\n\t\t\tsupportedVariants = append(supportedVariants, true)\n\t\t} else if flags[i] == \"0\" {\n\t\t\tsupportedVariants = append(supportedVariants, false)\n\t\t} else {\n\t\t\terr := fmt.Errorf(\"Expected four digits, each a 1 or 0 (e.g. 1001). Given \\\"%v\\\"\", combo)\n\t\t\treturn err, []bool{}\n\t\t}\n\t}\n\treturn nil, supportedVariants\n}\n\nfunc main() {\n\tpflag.Parse()\n\tgodogOpts.Paths = pflag.Args()\n\tif *debug {\n\t\tzerolog.SetGlobalLevel(zerolog.DebugLevel)\n\t}\n\n\tsuite := godog.TestSuite{\n\t\tName: \"xDS Test Suite\",\n\t\tScenarioInitializer: InitializeScenario,\n\t\tTestSuiteInitializer: InitializeTestSuite,\n\t\tOptions: &godogOpts,\n\t}\n\n\t\/\/ we have four variants, either set to T or F\n\terr, supportedVariants := supportedVariants(*variants)\n\tif err != nil {\n\t\tlog.Info().Msgf(\"Error parsing variants config: %v\\n\", err)\n\t\tos.Exit(0)\n\t}\n\n\t\/\/SOTW, Separate\n\tif supportedVariants[0] {\n\t\tincremental = false\n\t\taggregated = false\n\t\tgodogOpts.Tags = \"@sotw && @separate\"\n\t\tsuite.Run();\n\t}\n\n\t\/\/SOTW, Aggregated\n\tif supportedVariants[1] {\n\t\tincremental = false\n\t\taggregated = true\n\t\tgodogOpts.Tags = \"@sotw && @aggregated\"\n\t\tsuite.Run();\n\t}\n\n\t\/\/Incremental, Separate\n\tif supportedVariants[2] {\n\t\tincremental = true\n\t\taggregated = false\n\t\tgodogOpts.Tags = \"@incremental && @separate\"\n\t\tsuite.Run();\n\t}\n\n\t\/\/Incremental, Aggregated\n\tif supportedVariants[3] {\n\t\tincremental = true\n\t\taggregated = true\n\t\tgodogOpts.Tags = \"@incremental && @aggregated\"\n\t\tsuite.Run();\n\t}\n\tos.Exit(0)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"net\/url\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/petergtz\/bitsgo\/basic_auth_middleware\"\n\t\"github.com\/petergtz\/bitsgo\/local_blobstore\"\n\t\"github.com\/petergtz\/bitsgo\/pathsigner\"\n\t\"github.com\/petergtz\/bitsgo\/routes\"\n\t\"github.com\/petergtz\/bitsgo\/s3_blobstore\"\n\t\"github.com\/urfave\/negroni\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nvar (\n\tconfigPath = kingpin.Flag(\"config\", \"specify config to use\").Required().Short('c').String()\n)\n\nfunc main() {\n\tkingpin.Parse()\n\n\tconfig, e := LoadConfig(*configPath)\n\tif e != nil {\n\t\tlog.Fatalf(\"Could not load config. Caused by: %v\", e)\n\t}\n\n\trootRouter := mux.NewRouter()\n\n\tinternalRouter := mux.NewRouter()\n\n\tprivateEndpoint, e := url.Parse(config.PrivateEndpoint)\n\tif e != nil {\n\t\tlog.Fatalf(\"Private endpoint invalid: %v\", e)\n\t}\n\trootRouter.Host(privateEndpoint.Host).Handler(internalRouter)\n\n\tpublicEndpoint, e := url.Parse(config.PublicEndpoint)\n\tif e != nil {\n\t\tlog.Fatalf(\"Public endpoint invalid: %v\", e)\n\t}\n\trootRouter.Host(privateEndpoint.Host).Handler(internalRouter)\n\tappStashBlobstore, _ := createPackageBlobstoreAndSignURLHandler(config.AppStash, publicEndpoint.Host, config.Port, config.Secret)\n\tpackageBlobstore, signPackageURLHandler := createPackageBlobstoreAndSignURLHandler(config.Packages, publicEndpoint.Host, config.Port, config.Secret)\n\tdropletBlobstore, signDropletURLHandler := createPackageBlobstoreAndSignURLHandler(config.Droplets, publicEndpoint.Host, config.Port, config.Secret)\n\tbuildpackBlobstore, signBuildpackURLHandler := createPackageBlobstoreAndSignURLHandler(config.Buildpacks, publicEndpoint.Host, config.Port, config.Secret)\n\n\troutes.SetUpSignRoute(internalRouter, &basic_auth_middleware.BasicAuthMiddleware{config.SigningUsers[0].Username, config.SigningUsers[0].Password},\n\t\tsignPackageURLHandler, signDropletURLHandler, signBuildpackURLHandler)\n\n\troutes.SetUpAppStashRoutes(internalRouter, appStashBlobstore)\n\troutes.SetUpPackageRoutes(internalRouter, packageBlobstore)\n\troutes.SetUpBuildpackRoutes(internalRouter, buildpackBlobstore)\n\troutes.SetUpDropletRoutes(internalRouter, dropletBlobstore)\n\troutes.SetUpBuildpackCacheRoutes(internalRouter, dropletBlobstore)\n\n\tif usesLocalBlobstore(config) {\n\t\tpublicRouter := mux.NewRouter()\n\t\trootRouter.Host(publicEndpoint.Host).Handler(negroni.New(\n\t\t\t&local_blobstore.SignatureVerificationMiddleware{&pathsigner.PathSigner{config.Secret}},\n\t\t\tnegroni.Wrap(publicRouter),\n\t\t))\n\t\tif config.Packages.BlobstoreType == \"local\" {\n\t\t\troutes.SetUpPackageRoutes(publicRouter, packageBlobstore)\n\t\t}\n\t\tif config.Buildpacks.BlobstoreType == \"local\" {\n\t\t\troutes.SetUpBuildpackRoutes(publicRouter, buildpackBlobstore)\n\t\t}\n\t\tif config.Droplets.BlobstoreType == \"local\" {\n\t\t\troutes.SetUpDropletRoutes(publicRouter, dropletBlobstore)\n\t\t\troutes.SetUpBuildpackCacheRoutes(publicRouter, dropletBlobstore)\n\t\t}\n\t}\n\n\tlogger := &negroni.Logger{ALogger: log.New(os.Stdout, \"[bitsgo] \", log.LstdFlags|log.Lshortfile|log.LUTC)}\n\tlogger.SetFormat(negroni.LoggerDefaultFormat)\n\tlogger.SetDateFormat(negroni.LoggerDefaultDateFormat)\n\tsrv := &http.Server{\n\t\tHandler: negroni.New(\n\t\t\tlogger,\n\t\t\tnegroni.Wrap(rootRouter),\n\t\t),\n\t\tAddr: fmt.Sprintf(\"0.0.0.0:%v\", config.Port),\n\t\tWriteTimeout: 15 * time.Second,\n\t\tReadTimeout: 15 * time.Second,\n\t}\n\n\tlog.Fatal(srv.ListenAndServe())\n}\n\nfunc createPackageBlobstoreAndSignURLHandler(blobstoreConfig BlobstoreConfig, publicHost string, port int, secret string) (routes.Blobstore, routes.SignURLHandler) {\n\tswitch blobstoreConfig.BlobstoreType {\n\tcase \"local\":\n\t\tfmt.Println(\"Creating local blobstore\", \"path prefix:\", blobstoreConfig.LocalConfig.PathPrefix)\n\t\treturn local_blobstore.NewLocalBlobstore(blobstoreConfig.LocalConfig.PathPrefix),\n\t\t\t&local_blobstore.SignLocalUrlHandler{\n\t\t\t\tDelegateEndpoint: fmt.Sprintf(\"http:\/\/%v:%v\", publicHost, port),\n\t\t\t\tSigner: &pathsigner.PathSigner{secret},\n\t\t\t}\n\tcase \"s3\":\n\t\treturn s3_blobstore.NewS3LegacyBlobstore(\n\t\t\t\tblobstoreConfig.S3Config.Bucket,\n\t\t\t\tblobstoreConfig.S3Config.AccessKeyID,\n\t\t\t\tblobstoreConfig.S3Config.SecretAccessKey),\n\t\t\ts3_blobstore.NewSignS3UrlHandler(\n\t\t\t\tblobstoreConfig.S3Config.Bucket,\n\t\t\t\tblobstoreConfig.S3Config.AccessKeyID,\n\t\t\t\tblobstoreConfig.S3Config.SecretAccessKey)\n\tdefault:\n\t\tlog.Fatalf(\"blobstoreConfig is invalid. BlobstoreType missing.\")\n\t\treturn nil, nil \/\/ satisfy compiler\n\t}\n}\n\nfunc usesLocalBlobstore(config Config) bool {\n\treturn config.Packages.BlobstoreType == \"local\" ||\n\t\tconfig.Buildpacks.BlobstoreType == \"local\" ||\n\t\tconfig.Droplets.BlobstoreType == \"local\"\n}\n<commit_msg>Add rootRouter NotFoundHandler<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"net\/url\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/petergtz\/bitsgo\/basic_auth_middleware\"\n\t\"github.com\/petergtz\/bitsgo\/local_blobstore\"\n\t\"github.com\/petergtz\/bitsgo\/pathsigner\"\n\t\"github.com\/petergtz\/bitsgo\/routes\"\n\t\"github.com\/petergtz\/bitsgo\/s3_blobstore\"\n\t\"github.com\/urfave\/negroni\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nvar (\n\tconfigPath = kingpin.Flag(\"config\", \"specify config to use\").Required().Short('c').String()\n)\n\nfunc main() {\n\tkingpin.Parse()\n\n\tconfig, e := LoadConfig(*configPath)\n\tif e != nil {\n\t\tlog.Fatalf(\"Could not load config. Caused by: %v\", e)\n\t}\n\n\trootRouter := mux.NewRouter()\n\trootRouter.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t})\n\n\tinternalRouter := mux.NewRouter()\n\n\tprivateEndpoint, e := url.Parse(config.PrivateEndpoint)\n\tif e != nil {\n\t\tlog.Fatalf(\"Private endpoint invalid: %v\", e)\n\t}\n\trootRouter.Host(privateEndpoint.Host).Handler(internalRouter)\n\n\tpublicEndpoint, e := url.Parse(config.PublicEndpoint)\n\tif e != nil {\n\t\tlog.Fatalf(\"Public endpoint invalid: %v\", e)\n\t}\n\tappStashBlobstore, _ := createBlobstoreAndSignURLHandler(config.AppStash, publicEndpoint.Host, config.Port, config.Secret)\n\tpackageBlobstore, signPackageURLHandler := createBlobstoreAndSignURLHandler(config.Packages, publicEndpoint.Host, config.Port, config.Secret)\n\tdropletBlobstore, signDropletURLHandler := createBlobstoreAndSignURLHandler(config.Droplets, publicEndpoint.Host, config.Port, config.Secret)\n\tbuildpackBlobstore, signBuildpackURLHandler := createBlobstoreAndSignURLHandler(config.Buildpacks, publicEndpoint.Host, config.Port, config.Secret)\n\n\troutes.SetUpSignRoute(internalRouter, &basic_auth_middleware.BasicAuthMiddleware{config.SigningUsers[0].Username, config.SigningUsers[0].Password},\n\t\tsignPackageURLHandler, signDropletURLHandler, signBuildpackURLHandler)\n\n\troutes.SetUpAppStashRoutes(internalRouter, appStashBlobstore)\n\troutes.SetUpPackageRoutes(internalRouter, packageBlobstore)\n\troutes.SetUpBuildpackRoutes(internalRouter, buildpackBlobstore)\n\troutes.SetUpDropletRoutes(internalRouter, dropletBlobstore)\n\troutes.SetUpBuildpackCacheRoutes(internalRouter, dropletBlobstore)\n\n\tif usesLocalBlobstore(config) {\n\t\tpublicRouter := mux.NewRouter()\n\t\trootRouter.Host(publicEndpoint.Host).Handler(negroni.New(\n\t\t\t&local_blobstore.SignatureVerificationMiddleware{&pathsigner.PathSigner{config.Secret}},\n\t\t\tnegroni.Wrap(publicRouter),\n\t\t))\n\t\tif config.Packages.BlobstoreType == \"local\" {\n\t\t\troutes.SetUpPackageRoutes(publicRouter, packageBlobstore)\n\t\t}\n\t\tif config.Buildpacks.BlobstoreType == \"local\" {\n\t\t\troutes.SetUpBuildpackRoutes(publicRouter, buildpackBlobstore)\n\t\t}\n\t\tif config.Droplets.BlobstoreType == \"local\" {\n\t\t\troutes.SetUpDropletRoutes(publicRouter, dropletBlobstore)\n\t\t\troutes.SetUpBuildpackCacheRoutes(publicRouter, dropletBlobstore)\n\t\t}\n\t}\n\n\tsrv := &http.Server{\n\t\tHandler: negroni.New(\n\t\t\tnewLogger(),\n\t\t\tnegroni.Wrap(rootRouter),\n\t\t),\n\t\tAddr: fmt.Sprintf(\"0.0.0.0:%v\", config.Port),\n\t\tWriteTimeout: 15 * time.Second,\n\t\tReadTimeout: 15 * time.Second,\n\t}\n\n\tlog.Fatal(srv.ListenAndServe())\n}\n\nfunc createBlobstoreAndSignURLHandler(blobstoreConfig BlobstoreConfig, publicHost string, port int, secret string) (routes.Blobstore, routes.SignURLHandler) {\n\tswitch blobstoreConfig.BlobstoreType {\n\tcase \"local\":\n\t\tfmt.Println(\"Creating local blobstore\", \"path prefix:\", blobstoreConfig.LocalConfig.PathPrefix)\n\t\treturn local_blobstore.NewLocalBlobstore(blobstoreConfig.LocalConfig.PathPrefix),\n\t\t\t&local_blobstore.SignLocalUrlHandler{\n\t\t\t\tDelegateEndpoint: fmt.Sprintf(\"http:\/\/%v:%v\", publicHost, port),\n\t\t\t\tSigner: &pathsigner.PathSigner{secret},\n\t\t\t}\n\tcase \"s3\":\n\t\treturn s3_blobstore.NewS3LegacyBlobstore(\n\t\t\t\tblobstoreConfig.S3Config.Bucket,\n\t\t\t\tblobstoreConfig.S3Config.AccessKeyID,\n\t\t\t\tblobstoreConfig.S3Config.SecretAccessKey),\n\t\t\ts3_blobstore.NewSignS3UrlHandler(\n\t\t\t\tblobstoreConfig.S3Config.Bucket,\n\t\t\t\tblobstoreConfig.S3Config.AccessKeyID,\n\t\t\t\tblobstoreConfig.S3Config.SecretAccessKey)\n\tdefault:\n\t\tlog.Fatalf(\"blobstoreConfig is invalid. BlobstoreType missing.\")\n\t\treturn nil, nil \/\/ satisfy compiler\n\t}\n}\n\nfunc usesLocalBlobstore(config Config) bool {\n\treturn config.Packages.BlobstoreType == \"local\" ||\n\t\tconfig.Buildpacks.BlobstoreType == \"local\" ||\n\t\tconfig.Droplets.BlobstoreType == \"local\"\n}\n\nfunc newLogger() *negroni.Logger {\n\tlogger := &negroni.Logger{ALogger: log.New(os.Stdout, \"[bitsgo] \", log.LstdFlags|log.Lshortfile|log.LUTC)}\n\tlogger.SetFormat(negroni.LoggerDefaultFormat)\n\tlogger.SetDateFormat(negroni.LoggerDefaultDateFormat)\n\treturn logger\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/Vivena\/babelweb2\/parser\"\n\t\"github.com\/Vivena\/babelweb2\/ws\"\n)\n\ntype nodeslice []string\n\nvar nodes nodeslice\nvar staticRoot string\n\nfunc (i *nodeslice) String() string {\n\treturn fmt.Sprintf(\"%s\", *i)\n}\n\nfunc (i *nodeslice) Set(value string) error {\n\t*i = append(*i, value)\n\treturn nil\n}\n\nfunc connection(updates chan parser.SBabelUpdate, node string) {\n\tvar conn net.Conn\n\tvar err error\n\n\tfor {\n\t\tlog.Println(\"Trying\", node)\n\t\tfor {\n\t\t\tconn, err = net.Dial(\"tcp6\", node)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\ttime.Sleep(time.Second * 5)\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tlog.Println(\"Connected to\", node)\n\t\tafterHours := func() {\n\t\t\tconn.Close()\n\t\t\tlog.Printf(\"Connection to %v closed\\n\", node)\n\t\t}\n\t\tfmt.Fprintf(conn, \"monitor\\n\")\n\t\tr := bufio.NewReader(conn)\n\t\ts := parser.NewScanner(r)\n\t\tdesc := parser.NewBabelDesc()\n\t\terr = desc.Fill(s)\n\t\tif err == io.EOF {\n\t\t\tlog.Printf(\"Something wrong with %v:\\n\\tcouldn't get router id.\\n\", node)\n\t\t} else if err != nil {\n\t\t\t\/\/ Don't you even dare to reconnect to this unholy node!\n\t\t\tlog.Printf(\"Oh, boy! %v is doomed:\\n\\t%v.\\t\", node, err)\n\t\t\tafterHours()\n\t\t\treturn\n\t\t} else {\n\t\t\tws.AddDesc(desc)\n\t\t\terr = desc.Listen(s, updates)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error while listening %v:\\n\\t%v.\\n\", node, err)\n\t\t\t}\n\t\t\tws.RemoveDesc(desc.Id())\n\t\t}\n\t\tafterHours()\n\t}\n}\n\nfunc main() {\n\tvar bwPort string\n\n\tflag.Var(&nodes, \"node\",\n\t\t\"Babel node to connect to (default \\\"[::1]:33123\\\", \"+\n\t\t\t\"may be repeated)\")\n\tflag.StringVar(&bwPort, \"http\", \":8080\", \"web server address\")\n\tflag.StringVar(&staticRoot, \"static\", \".\/static\/\",\n\t\t\"directory with static files\")\n\tflag.Parse()\n\tif len(nodes) == 0 {\n\t\tnodes = nodeslice{\"[::1]:33123\"}\n\t}\n\n\tws.Init()\n\n\tupdates := make(chan parser.SBabelUpdate, 1024)\n\tdefer close(updates)\n\n\tfor i := 0; i < len(nodes); i++ {\n\t\tgo connection(updates, nodes[i])\n\t}\n\n\tbcastGrp := ws.NewListenerGroup()\n\thandler := ws.Handler(bcastGrp)\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(staticRoot)))\n\thttp.Handle(\"\/ws\", handler)\n\tgo func() {\n\t\tlog.Printf(\"Listening on http:\/\/localhost%v\\n\", bwPort)\n\t\tlog.Fatal(http.ListenAndServe(bwPort, nil))\n\t}()\n\n\tfor {\n\t\tupd := <-updates\n\t\tbcastGrp.Iter(func(l *ws.Listener) {\n\t\t\tl.Channel <- upd\n\t\t})\n\t}\n}\n<commit_msg>Second cosmetic improvement for MisterDA.<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/Vivena\/babelweb2\/parser\"\n\t\"github.com\/Vivena\/babelweb2\/ws\"\n)\n\ntype nodeslice []string\n\nvar nodes nodeslice\nvar staticRoot string\n\nfunc (i *nodeslice) String() string {\n\treturn fmt.Sprintf(\"%s\", *i)\n}\n\nfunc (i *nodeslice) Set(value string) error {\n\t*i = append(*i, value)\n\treturn nil\n}\n\nfunc connection(updates chan parser.SBabelUpdate, node string) {\n\tvar conn net.Conn\n\tvar err error\n\n\tfor {\n\t\tlog.Println(\"Trying\", node)\n\t\tfor {\n\t\t\tconn, err = net.Dial(\"tcp6\", node)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\ttime.Sleep(time.Second * 5)\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tlog.Println(\"Connected to\", node)\n\t\tafterHours := func() {\n\t\t\tconn.Close()\n\t\t\tlog.Printf(\"Connection to %v closed\\n\", node)\n\t\t}\n\t\tfmt.Fprintf(conn, \"monitor\\n\")\n\t\tr := bufio.NewReader(conn)\n\t\ts := parser.NewScanner(r)\n\t\tdesc := parser.NewBabelDesc()\n\t\terr = desc.Fill(s)\n\t\tif err == io.EOF {\n\t\t\tlog.Printf(\"Something wrong with %v:\\n\\tcouldn't get router id.\\n\", node)\n\t\t} else if err != nil {\n\t\t\t\/\/ Don't you even dare to reconnect to this unholy node!\n\t\t\tlog.Printf(\"Oh, boy! %v is doomed:\\n\\t%v.\\t\", node, err)\n\t\t\tafterHours()\n\t\t\treturn\n\t\t} else {\n\t\t\tws.AddDesc(desc)\n\t\t\terr = desc.Listen(s, updates)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error while listening %v:\\n\\t%v.\\n\", node, err)\n\t\t\t}\n\t\t\tws.RemoveDesc(desc.Id())\n\t\t}\n\t\tafterHours()\n\t}\n}\n\nfunc main() {\n\tvar bwPort string\n\n\tflag.Var(&nodes, \"node\",\n\t\t\"Babel node to connect to (default \\\"[::1]:33123\\\", \"+\n\t\t\t\"may be repeated)\")\n\tflag.StringVar(&bwPort, \"http\", \":8080\", \"web server address\")\n\tflag.StringVar(&staticRoot, \"static\", \".\/static\/\",\n\t\t\"directory with static files\")\n\tflag.Parse()\n\tif len(nodes) == 0 {\n\t\tnodes = nodeslice{\"[::1]:33123\"}\n\t}\n\n\tws.Init()\n\n\tupdates := make(chan parser.SBabelUpdate, 1024)\n\tdefer close(updates)\n\n\tfor i := 0; i < len(nodes); i++ {\n\t\tgo connection(updates, nodes[i])\n\t}\n\n\t_, err := os.Stat(staticRoot)\n\tif err != nil && os.IsNotExist(err) {\n\t\tlog.Fatalf(\"'%v': No such directory\\n%v\\n%v.\\n\", staticRoot,\n\t\t\t\"Try your best to find the directory containing the static files\",\n\t\t\t\"and precise it via the -static option\")\n\t} else if err != nil {\n\t\tlog.Fatalf(\"Something went terribly wrong: %v\\n\", err)\n\t}\n\n\tbcastGrp := ws.NewListenerGroup()\n\thandler := ws.Handler(bcastGrp)\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(staticRoot)))\n\thttp.Handle(\"\/ws\", handler)\n\tgo func() {\n\t\tlog.Printf(\"Listening on http:\/\/localhost%v\\n\", bwPort)\n\t\tlog.Fatal(http.ListenAndServe(bwPort, nil))\n\t}()\n\n\tfor {\n\t\tupd := <-updates\n\t\tbcastGrp.Iter(func(l *ws.Listener) {\n\t\t\tl.Channel <- upd\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 bs authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/tsuru\/bs\/bslog\"\n\t\"github.com\/tsuru\/bs\/config\"\n\t\"github.com\/tsuru\/bs\/log\"\n\t\"github.com\/tsuru\/bs\/metric\"\n\t\"github.com\/tsuru\/bs\/status\"\n)\n\nconst (\n\tversion = \"v1.6\"\n)\n\nvar printVersion bool\n\ntype StopWaiter interface {\n\tStop()\n\tWait()\n}\n\nfunc init() {\n\tflag.BoolVar(&printVersion, \"version\", false, \"Print version and exit\")\n}\n\nfunc startSignalHandler(callback func(os.Signal), signals ...os.Signal) {\n\tsigChan := make(chan os.Signal, 4)\n\tgo func() {\n\t\tif signal, ok := <-sigChan; ok {\n\t\t\tcallback(signal)\n\t\t}\n\t}()\n\tsignal.Notify(sigChan, signals...)\n}\n\nfunc onSignalDebugGoroutines(signal os.Signal) {\n\tvar buf []byte\n\tvar written int\n\tcurrLen := 1024\n\tfor written == len(buf) {\n\t\tbuf = make([]byte, currLen)\n\t\twritten = runtime.Stack(buf, true)\n\t\tcurrLen *= 2\n\t}\n\tfmt.Print(string(buf[:written]))\n\tstartSignalHandler(onSignalDebugGoroutines, syscall.SIGUSR1)\n}\n\nfunc onSignalDebugProfile(signal os.Signal) {\n\tprofFileName := \"memprofile.out\"\n\tfile, err := os.OpenFile(profFileName, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0660)\n\tif err != nil {\n\t\tbslog.Warnf(\"Error trying to open profile file %q: %s\", profFileName, err)\n\t\treturn\n\t}\n\terr = pprof.WriteHeapProfile(file)\n\tif err != nil {\n\t\tbslog.Warnf(\"Error trying to write mem profile: %s\", err)\n\t}\n\tbslog.Warnf(\"Wrote mem profile to %s\", profFileName)\n\tfile.Close()\n\tprofFileName = \"cpuprofile.out\"\n\tbslog.Warnf(\"Starting cpu profile, writing output to %s\", profFileName)\n\tdefer bslog.Warnf(\"Finished cpu profile, see %s\", profFileName)\n\tfile, err = os.OpenFile(profFileName, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0660)\n\tif err != nil {\n\t\tbslog.Warnf(\"Error trying to open profile file %q: %s\", profFileName, err)\n\t}\n\tdefer file.Close()\n\terr = pprof.StartCPUProfile(file)\n\tif err != nil {\n\t\tbslog.Warnf(\"Error trying to start cpu profile: %s\", err)\n\t}\n\tdefer pprof.StopCPUProfile()\n\ttime.Sleep(30 * time.Second)\n\tstartSignalHandler(onSignalDebugProfile, syscall.SIGUSR2)\n}\n\nfunc main() {\n\tstartSignalHandler(onSignalDebugGoroutines, syscall.SIGUSR1)\n\tstartSignalHandler(onSignalDebugProfile, syscall.SIGUSR2)\n\tflag.Parse()\n\tif printVersion {\n\t\tfmt.Printf(\"bs version %s\\n\", version)\n\t\treturn\n\t}\n\tlf := log.LogForwarder{\n\t\tBindAddress: config.Config.SyslogListenAddress,\n\t\tDockerEndpoint: config.Config.DockerEndpoint,\n\t\tEnabledBackends: config.Config.LogBackends,\n\t}\n\terr := lf.Start()\n\tif err != nil {\n\t\tbslog.Fatalf(\"Unable to initialize log forwarder: %s\\n\", err)\n\t}\n\tmRunner := metric.NewRunner(config.Config.DockerEndpoint, config.Config.MetricsInterval,\n\t\tconfig.Config.MetricsBackend)\n\terr = mRunner.Start()\n\tif err != nil {\n\t\tbslog.Warnf(\"Unable to initialize metrics runner: %s\\n\", err)\n\t}\n\treporter, err := status.NewReporter(&status.ReporterConfig{\n\t\tTsuruEndpoint: config.Config.TsuruEndpoint,\n\t\tTsuruToken: config.Config.TsuruToken,\n\t\tDockerEndpoint: config.Config.DockerEndpoint,\n\t\tInterval: config.Config.StatusInterval,\n\t})\n\tif err != nil {\n\t\tbslog.Warnf(\"Unable to initialize status reporter: %s\\n\", err)\n\t}\n\tmonitorEl := []StopWaiter{&lf, mRunner}\n\tif reporter != nil {\n\t\tmonitorEl = append(monitorEl, reporter)\n\t}\n\tvar signaled bool\n\tstartSignalHandler(func(signal os.Signal) {\n\t\tsignaled = true\n\t\tfor _, m := range monitorEl {\n\t\t\tgo m.Stop()\n\t\t}\n\t}, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)\n\tfor _, m := range monitorEl {\n\t\tm.Wait()\n\t}\n\tif !signaled {\n\t\tbslog.Fatalf(\"Exiting bs because no service could be initialized.\")\n\t}\n}\n<commit_msg>import logstash metric backend in main<commit_after>\/\/ Copyright 2016 bs authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/tsuru\/bs\/bslog\"\n\t\"github.com\/tsuru\/bs\/config\"\n\t\"github.com\/tsuru\/bs\/log\"\n\t\"github.com\/tsuru\/bs\/metric\"\n\t_ \"github.com\/tsuru\/bs\/metric\/logstash\"\n\t\"github.com\/tsuru\/bs\/status\"\n)\n\nconst (\n\tversion = \"v1.6\"\n)\n\nvar printVersion bool\n\ntype StopWaiter interface {\n\tStop()\n\tWait()\n}\n\nfunc init() {\n\tflag.BoolVar(&printVersion, \"version\", false, \"Print version and exit\")\n}\n\nfunc startSignalHandler(callback func(os.Signal), signals ...os.Signal) {\n\tsigChan := make(chan os.Signal, 4)\n\tgo func() {\n\t\tif signal, ok := <-sigChan; ok {\n\t\t\tcallback(signal)\n\t\t}\n\t}()\n\tsignal.Notify(sigChan, signals...)\n}\n\nfunc onSignalDebugGoroutines(signal os.Signal) {\n\tvar buf []byte\n\tvar written int\n\tcurrLen := 1024\n\tfor written == len(buf) {\n\t\tbuf = make([]byte, currLen)\n\t\twritten = runtime.Stack(buf, true)\n\t\tcurrLen *= 2\n\t}\n\tfmt.Print(string(buf[:written]))\n\tstartSignalHandler(onSignalDebugGoroutines, syscall.SIGUSR1)\n}\n\nfunc onSignalDebugProfile(signal os.Signal) {\n\tprofFileName := \"memprofile.out\"\n\tfile, err := os.OpenFile(profFileName, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0660)\n\tif err != nil {\n\t\tbslog.Warnf(\"Error trying to open profile file %q: %s\", profFileName, err)\n\t\treturn\n\t}\n\terr = pprof.WriteHeapProfile(file)\n\tif err != nil {\n\t\tbslog.Warnf(\"Error trying to write mem profile: %s\", err)\n\t}\n\tbslog.Warnf(\"Wrote mem profile to %s\", profFileName)\n\tfile.Close()\n\tprofFileName = \"cpuprofile.out\"\n\tbslog.Warnf(\"Starting cpu profile, writing output to %s\", profFileName)\n\tdefer bslog.Warnf(\"Finished cpu profile, see %s\", profFileName)\n\tfile, err = os.OpenFile(profFileName, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0660)\n\tif err != nil {\n\t\tbslog.Warnf(\"Error trying to open profile file %q: %s\", profFileName, err)\n\t}\n\tdefer file.Close()\n\terr = pprof.StartCPUProfile(file)\n\tif err != nil {\n\t\tbslog.Warnf(\"Error trying to start cpu profile: %s\", err)\n\t}\n\tdefer pprof.StopCPUProfile()\n\ttime.Sleep(30 * time.Second)\n\tstartSignalHandler(onSignalDebugProfile, syscall.SIGUSR2)\n}\n\nfunc main() {\n\tstartSignalHandler(onSignalDebugGoroutines, syscall.SIGUSR1)\n\tstartSignalHandler(onSignalDebugProfile, syscall.SIGUSR2)\n\tflag.Parse()\n\tif printVersion {\n\t\tfmt.Printf(\"bs version %s\\n\", version)\n\t\treturn\n\t}\n\tlf := log.LogForwarder{\n\t\tBindAddress: config.Config.SyslogListenAddress,\n\t\tDockerEndpoint: config.Config.DockerEndpoint,\n\t\tEnabledBackends: config.Config.LogBackends,\n\t}\n\terr := lf.Start()\n\tif err != nil {\n\t\tbslog.Fatalf(\"Unable to initialize log forwarder: %s\\n\", err)\n\t}\n\tmRunner := metric.NewRunner(config.Config.DockerEndpoint, config.Config.MetricsInterval,\n\t\tconfig.Config.MetricsBackend)\n\terr = mRunner.Start()\n\tif err != nil {\n\t\tbslog.Warnf(\"Unable to initialize metrics runner: %s\\n\", err)\n\t}\n\treporter, err := status.NewReporter(&status.ReporterConfig{\n\t\tTsuruEndpoint: config.Config.TsuruEndpoint,\n\t\tTsuruToken: config.Config.TsuruToken,\n\t\tDockerEndpoint: config.Config.DockerEndpoint,\n\t\tInterval: config.Config.StatusInterval,\n\t})\n\tif err != nil {\n\t\tbslog.Warnf(\"Unable to initialize status reporter: %s\\n\", err)\n\t}\n\tmonitorEl := []StopWaiter{&lf, mRunner}\n\tif reporter != nil {\n\t\tmonitorEl = append(monitorEl, reporter)\n\t}\n\tvar signaled bool\n\tstartSignalHandler(func(signal os.Signal) {\n\t\tsignaled = true\n\t\tfor _, m := range monitorEl {\n\t\t\tgo m.Stop()\n\t\t}\n\t}, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)\n\tfor _, m := range monitorEl {\n\t\tm.Wait()\n\t}\n\tif !signaled {\n\t\tbslog.Fatalf(\"Exiting bs because no service could be initialized.\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\n\t\"github.com\/l8nite\/hipchat-cinema\/util\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/tbruyelle\/hipchat-go\/hipchat\"\n)\n\nconst Version = \"0.0.0\"\n\n\/\/ RoomConfig holds information to send messages to a specific room\ntype RoomConfig struct {\n\ttoken *hipchat.OAuthAccessToken\n\thc *hipchat.Client\n\tname string\n\tisMoviePlaying bool\n}\n\n\/\/ BotContext holds the base URL that the bot is running under\n\/\/ and a map of rooms that we've been installed into\ntype BotContext struct {\n\tbaseURL string\n\trooms map[string]*RoomConfig\n}\n\n\/\/ GET \/atlassian-connect.json\n\/\/ Fetches the configuration descriptor for this bot\nfunc (c *BotContext) atlassianConnect(w http.ResponseWriter, r *http.Request) {\n\tlp := path.Join(\".\/static\", \"atlassian-connect.json\")\n\tvals := map[string]string{\n\t\t\"BaseUrl\": c.baseURL,\n\t}\n\ttmpl, err := template.ParseFiles(lp)\n\tif err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\ttmpl.ExecuteTemplate(w, \"atlassian-connect\", vals)\n}\n\n\/\/ POST \/installable\n\/\/ Callback received when the bot is installed in a room\nfunc (c *BotContext) install(w http.ResponseWriter, r *http.Request) {\n\tauthPayload, err := util.DecodePostJSON(r, true)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Parse of installation auth data failed:%v\\n\", err)\n\t\treturn\n\t}\n\n\tcredentials := hipchat.ClientCredentials{\n\t\tClientID: authPayload[\"oauthId\"].(string),\n\t\tClientSecret: authPayload[\"oauthSecret\"].(string),\n\t}\n\n\troomName := strconv.Itoa(int(authPayload[\"roomId\"].(float64)))\n\n\tnewClient := hipchat.NewClient(\"\")\n\n\ttok, _, err := newClient.GenerateToken(credentials, []string{hipchat.ScopeSendNotification})\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Client.GetAccessToken returns an error %v\", err)\n\t}\n\n\trc := &RoomConfig{\n\t\tname: roomName,\n\t\thc: tok.CreateClient(),\n\t}\n\n\tc.rooms[roomName] = rc\n\n\tutil.PrintDump(w, r, false)\n\tjson.NewEncoder(w).Encode([]string{\"OK\"})\n}\n\n\/\/ DELETE \/installable\n\/\/ Callback received when the user wants to uninstall the bot from their channel\nfunc (c *BotContext) uninstall(w http.ResponseWriter, r *http.Request) {\n\t\/\/ TODO: parse out roomID, remove from configured rooms map\n\tw.WriteHeader(204)\n}\n\n\/\/ POST \/hook\n\/\/ Callback received when the user types a command our bot recognizes\nfunc (c *BotContext) hook(w http.ResponseWriter, r *http.Request) {\n\tutil.PrintDump(w, r, true)\n\n\tpayLoad, err := util.DecodePostJSON(r, true)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Parsed auth data failed:%v\\n\", err)\n\t}\n\n\troomID := strconv.Itoa(int((payLoad[\"item\"].(map[string]interface{}))[\"room\"].(map[string]interface{})[\"id\"].(float64)))\n\n\tlog.Printf(\"Received play request to roomID: %s\\n\", roomID)\n\n\tif _, ok := c.rooms[roomID]; !ok {\n\t\tlog.Print(\"Room is not registered!\")\n\t\treturn\n\t}\n\n\tvar message string\n\n\tif c.rooms[roomID].isMoviePlaying {\n\t\tmessage = \"Movie is already playing!\"\n\t} else {\n\t\tmessage = \"Enjoy the show!\"\n\t\tc.rooms[roomID].isMoviePlaying = true\n\t}\n\n\tnotifRq := &hipchat.NotificationRequest{\n\t\tMessage: message,\n\t\tMessageFormat: \"html\",\n\t\tColor: \"red\",\n\t\tFrom: \"God\",\n\t}\n\n\tif _, ok := c.rooms[roomID]; ok {\n\t\t_, err = c.rooms[roomID].hc.Room.Notification(roomID, notifRq)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to notify HipChat channel:%v\\n\", err)\n\t\t}\n\t} else {\n\t\tlog.Printf(\"Room is not registered correctly:%v\\n\", c.rooms)\n\t}\n}\n\n\/\/ Install http handler routes\nfunc (c *BotContext) routes() *mux.Router {\n\tr := mux.NewRouter()\n\n\tr.Path(\"\/\").Methods(\"GET\").HandlerFunc(c.atlassianConnect)\n\tr.Path(\"\/atlassian-connect.json\").Methods(\"GET\").HandlerFunc(c.atlassianConnect)\n\tr.Path(\"\/installable\").Methods(\"POST\").HandlerFunc(c.install)\n\tr.Path(\"\/installable\/{token}\").Methods(\"DELETE\").HandlerFunc(c.uninstall)\n\tr.Path(\"\/hook\").Methods(\"POST\").HandlerFunc(c.hook)\n\n\tr.PathPrefix(\"\/\").Handler(http.FileServer(http.Dir(\".\/static\")))\n\treturn r\n}\n\n\/\/ Start up, parse command line flags, handle http requests\nfunc main() {\n\tvar (\n\t\tport = flag.String(\"port\", \"8080\", \"web server port\")\n\t\tbaseURL = flag.String(\"baseURL\", os.Getenv(\"BASE_URL\"), \"server base url\")\n\t)\n\n\tflag.Parse()\n\n\tc := &BotContext{\n\t\tbaseURL: *baseURL,\n\t\trooms: make(map[string]*RoomConfig),\n\t}\n\n\tlog.Printf(\"HipChat Cinema v%s - Listening on port %v\", Version, *port)\n\n\tr := c.routes()\n\thttp.Handle(\"\/\", r)\n\thttp.ListenAndServe(\":\"+*port, nil)\n}\n<commit_msg>Manage rooms by OAuth Client ID<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\n\t\"github.com\/l8nite\/hipchat-cinema\/util\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/tbruyelle\/hipchat-go\/hipchat\"\n)\n\nconst Version = \"0.0.0\"\n\n\/\/ RoomConfig holds information to send messages to a specific room\ntype RoomConfig struct {\n\ttoken *hipchat.OAuthAccessToken\n\thc *hipchat.Client\n\tname string\n\tisMoviePlaying bool\n}\n\n\/\/ BotContext holds the base URL that the bot is running under\n\/\/ and a map of rooms that we've been installed into\ntype BotContext struct {\n\tbaseURL string\n\trooms map[string]*RoomConfig\n}\n\n\/\/ GET \/atlassian-connect.json\n\/\/ Fetches the configuration descriptor for this bot\nfunc (c *BotContext) atlassianConnect(w http.ResponseWriter, r *http.Request) {\n\tlp := path.Join(\".\/static\", \"atlassian-connect.json\")\n\tvals := map[string]string{\n\t\t\"BaseUrl\": c.baseURL,\n\t}\n\ttmpl, err := template.ParseFiles(lp)\n\tif err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\ttmpl.ExecuteTemplate(w, \"atlassian-connect\", vals)\n}\n\n\/\/ POST \/installable\n\/\/ Callback received when the bot is installed in a room\nfunc (c *BotContext) install(w http.ResponseWriter, r *http.Request) {\n\tauthPayload, err := util.DecodePostJSON(r, true)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Parse of installation auth data failed:%v\\n\", err)\n\t\treturn\n\t}\n\n\tclientID := authPayload[\"oauthId\"].(string)\n\n\tcredentials := hipchat.ClientCredentials{\n\t\tClientID: clientID,\n\t\tClientSecret: authPayload[\"oauthSecret\"].(string),\n\t}\n\n\troomName := strconv.Itoa(int(authPayload[\"roomId\"].(float64)))\n\n\tnewClient := hipchat.NewClient(\"\")\n\n\ttok, _, err := newClient.GenerateToken(credentials, []string{hipchat.ScopeSendNotification})\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Client.GetAccessToken returns an error %v\", err)\n\t}\n\n\trc := &RoomConfig{\n\t\tname: roomName,\n\t\thc: tok.CreateClient(),\n\t}\n\n\tc.rooms[clientID] = rc\n\n\tnotifRq := &hipchat.NotificationRequest{\n\t\tMessage: \"Hipchat Cinema Bot Installed\",\n\t\tMessageFormat: \"html\",\n\t\tColor: \"green\",\n\t}\n\n\t_, err = c.rooms[clientID].hc.Room.Notification(c.rooms[clientID].name, notifRq)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to notify HipChat channel: %v\\n\", err)\n\t}\n\n\tutil.PrintDump(w, r, false)\n\tjson.NewEncoder(w).Encode([]string{\"OK\"})\n}\n\n\/\/ DELETE \/installable\/token\n\/\/ Callback received when the user wants to uninstall the bot from their channel\nfunc (c *BotContext) uninstall(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\tclientID := params[\"clientID\"]\n\n\t\/\/ return a 204 for every request to this API, regardless of what happens\n\tdefer func() {\n\t\tw.WriteHeader(204)\n\t}()\n\n\tlog.Printf(\"Received uninstall request for clientID: %s\\n\", clientID)\n\n\tif _, ok := c.rooms[clientID]; !ok {\n\t\tlog.Print(\"Not a registered clientID\")\n\t\treturn\n\t}\n\n\tdelete(c.rooms, clientID)\n}\n\n\/\/ POST \/hook\n\/\/ Callback received when the user types a command our bot recognizes\nfunc (c *BotContext) hook(w http.ResponseWriter, r *http.Request) {\n\tutil.PrintDump(w, r, true)\n\n\tpayLoad, err := util.DecodePostJSON(r, true)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Parsed auth data failed: %v\\n\", err)\n\t}\n\n\tclientID := payLoad[\"oauth_client_id\"].(string)\n\n\tlog.Printf(\"Received play request to clientID: %s\\n\", clientID)\n\n\tif _, ok := c.rooms[clientID]; !ok {\n\t\tlog.Print(\"clientID is not registered!\")\n\t\treturn\n\t}\n\n\tvar message string\n\n\tif c.rooms[clientID].isMoviePlaying {\n\t\tmessage = \"Movie is already playing!\"\n\t} else {\n\t\tmessage = \"Enjoy the show!\"\n\t\tc.rooms[clientID].isMoviePlaying = true\n\t}\n\n\tnotifRq := &hipchat.NotificationRequest{\n\t\tMessage: message,\n\t\tMessageFormat: \"html\",\n\t\tColor: \"red\",\n\t\tFrom: \"God\",\n\t}\n\n\tif _, ok := c.rooms[clientID]; ok {\n\t\t_, err = c.rooms[clientID].hc.Room.Notification(c.rooms[clientID].name, notifRq)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to notify HipChat channel:%v\\n\", err)\n\t\t}\n\t} else {\n\t\tlog.Printf(\"Room is not registered correctly:%v\\n\", c.rooms)\n\t}\n}\n\n\/\/ Install http handler routes\nfunc (c *BotContext) routes() *mux.Router {\n\tr := mux.NewRouter()\n\n\tr.Path(\"\/\").Methods(\"GET\").HandlerFunc(c.atlassianConnect)\n\tr.Path(\"\/atlassian-connect.json\").Methods(\"GET\").HandlerFunc(c.atlassianConnect)\n\tr.Path(\"\/installable\").Methods(\"POST\").HandlerFunc(c.install)\n\tr.Path(\"\/installable\/{clientID}\").Methods(\"DELETE\").HandlerFunc(c.uninstall)\n\tr.Path(\"\/hook\").Methods(\"POST\").HandlerFunc(c.hook)\n\n\tr.PathPrefix(\"\/\").Handler(http.FileServer(http.Dir(\".\/static\")))\n\treturn r\n}\n\n\/\/ Start up, parse command line flags, handle http requests\nfunc main() {\n\tvar (\n\t\tport = flag.String(\"port\", \"8080\", \"web server port\")\n\t\tbaseURL = flag.String(\"baseURL\", os.Getenv(\"BASE_URL\"), \"server base url\")\n\t)\n\n\tflag.Parse()\n\n\tc := &BotContext{\n\t\tbaseURL: *baseURL,\n\t\trooms: make(map[string]*RoomConfig),\n\t}\n\n\tlog.Printf(\"HipChat Cinema v%s - Listening on port %v\", Version, *port)\n\n\tr := c.routes()\n\thttp.Handle(\"\/\", r)\n\thttp.ListenAndServe(\":\"+*port, nil)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\/pprof\"\n\t\"time\"\n)\n\nconst (\n\tname string = \"godedupe\"\n\tversion string = \"1.0.0\"\n)\n\nvar (\n\tcpuprofile string\n\n\tcurrentDir string\n\texcludeEmptyDir bool\n\texcludeEmptyFiles bool\n\texcludeHiddenFiles bool\n\tshowCurrentValues bool\n\tenableRecursion bool\n\tignoreSymLinks bool\n\tshowSummary bool\n\tquiet bool\n)\n\n\/\/ Options for start the program\ntype Options struct {\n\tcurrentDir string\n\texcludeEmptyFiles bool\n\texcludeHiddenFiles bool\n\tenableRecursion bool\n\tignoreSymLinks bool\n\tshowSummary bool\n\tquiet bool\n}\n\n\/\/ Init the options to run the program\nfunc Init() {\n\tflag.StringVar(&cpuprofile, \"cpuprofile\", \"\", \"Enable profiling\")\n\n\tflag.StringVar(¤tDir, \"t\", GetUserHome(),\n\t\t\"Current directory where the program search for duplicated files\")\n\tflag.BoolVar(&excludeEmptyFiles, \"z\", true, \"Exclude the zero length files\")\n\tflag.BoolVar(&excludeHiddenFiles, \"h\", true, \"Exclude the hidden files\")\n\tflag.BoolVar(&showCurrentValues, \"debug\", false,\n\t\t\"Show the current values of the program options\")\n\tflag.BoolVar(&enableRecursion, \"r\", true, \"Follow subdirectories (recursion)\")\n\tflag.BoolVar(&ignoreSymLinks, \"sym\", true, \"Ignore symlinks\")\n\tflag.BoolVar(&showSummary, \"m\", false, \"Show a summary\")\n\tflag.BoolVar(&quiet, \"q\", false, \"Don't show status info\")\n\tflag.Parse()\n}\n\n\/\/ Header show the program name and current version\nfunc Header() {\n\tfmt.Println(\"------------------------\")\n\tfmt.Printf(\"%s - version %s\\n\", name, version)\n\tfmt.Println(\"------------------------\")\n}\n\n\/\/ ShowDebugInfo print all the current option values\nfunc ShowDebugInfo() {\n\tif showCurrentValues {\n\t\tfmt.Println()\n\t\tfmt.Println(\"------------------------\")\n\t\tfmt.Println(\"Current option values\")\n\t\tfmt.Println(\"------------------------\")\n\t\tfmt.Println(\"Target directory :\", currentDir)\n\t\tfmt.Println(\"Exclude zero length files :\", excludeEmptyFiles)\n\t\tfmt.Println(\"Exclude hidden files :\", excludeHiddenFiles)\n\t\tfmt.Println(\"Ignore symlinks :\", ignoreSymLinks)\n\t\tfmt.Println(\"Recursive search :\", enableRecursion)\n\t\tfmt.Println(\"Show a summary :\", showSummary)\n\t\tfmt.Println(\"Quiet :\", quiet)\n\t\tif cpuprofile != \"\" {\n\t\t\tfmt.Println(\"Profile output :\", cpuprofile)\n\t\t}\n\t\tfmt.Println(\"------------------------\")\n\t}\n}\n\nfunc trackTime(now time.Time) {\n\texpired := time.Since(now)\n\tif !quiet {\n\t\tfmt.Printf(\"Program terminated in %v\\n\", expired)\n\t}\n}\n\nfunc executeCPUProfileIfNeeded() {\n\tif cpuprofile != \"\" {\n\t\tf, err := os.Create(cpuprofile)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tdefer pprof.StopCPUProfile()\n\t}\n}\n\nfunc main() {\n\tHeader()\n\tInit()\n\tShowDebugInfo()\n\n\toptions := Options{\n\t\tcurrentDir,\n\t\texcludeEmptyFiles,\n\t\texcludeHiddenFiles,\n\t\tenableRecursion,\n\t\tignoreSymLinks,\n\t\tshowSummary,\n\t\tquiet,\n\t}\n\texecuteCPUProfileIfNeeded()\n\n\tdefer trackTime(time.Now())\n\n\tStart(options)\n}\n<commit_msg>make multiple methods in main.go private<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\/pprof\"\n\t\"time\"\n)\n\nconst (\n\tname string = \"godedupe\"\n\tversion string = \"1.0.0\"\n)\n\nvar (\n\tcpuprofile string\n\n\tcurrentDir string\n\texcludeEmptyDir bool\n\texcludeEmptyFiles bool\n\texcludeHiddenFiles bool\n\tshowCurrentValues bool\n\tenableRecursion bool\n\tignoreSymLinks bool\n\tshowSummary bool\n\tquiet bool\n)\n\n\/\/ Options for start the program\ntype Options struct {\n\tcurrentDir string\n\texcludeEmptyFiles bool\n\texcludeHiddenFiles bool\n\tenableRecursion bool\n\tignoreSymLinks bool\n\tshowSummary bool\n\tquiet bool\n}\n\n\/\/ Init the options to run the program\nfunc initOptions() {\n\tflag.StringVar(&cpuprofile, \"cpuprofile\", \"\", \"Enable profiling\")\n\n\tflag.StringVar(¤tDir, \"t\", GetUserHome(),\n\t\t\"Current directory where the program search for duplicated files\")\n\tflag.BoolVar(&excludeEmptyFiles, \"z\", true, \"Exclude the zero length files\")\n\tflag.BoolVar(&excludeHiddenFiles, \"h\", true, \"Exclude the hidden files\")\n\tflag.BoolVar(&showCurrentValues, \"debug\", false,\n\t\t\"Show the current values of the program options\")\n\tflag.BoolVar(&enableRecursion, \"r\", true, \"Follow subdirectories (recursion)\")\n\tflag.BoolVar(&ignoreSymLinks, \"sym\", true, \"Ignore symlinks\")\n\tflag.BoolVar(&showSummary, \"m\", false, \"Show a summary\")\n\tflag.BoolVar(&quiet, \"q\", false, \"Don't show status info\")\n\tflag.Parse()\n}\n\n\/\/ Header show the program name and current version\nfunc header() {\n\tfmt.Println(\"------------------------\")\n\tfmt.Printf(\"%s - version %s\\n\", name, version)\n\tfmt.Println(\"------------------------\")\n}\n\n\/\/ ShowDebugInfo print all the current option values\nfunc showDebugInfo() {\n\tif showCurrentValues {\n\t\tfmt.Println()\n\t\tfmt.Println(\"------------------------\")\n\t\tfmt.Println(\"Current option values\")\n\t\tfmt.Println(\"------------------------\")\n\t\tfmt.Println(\"Target directory :\", currentDir)\n\t\tfmt.Println(\"Exclude zero length files :\", excludeEmptyFiles)\n\t\tfmt.Println(\"Exclude hidden files :\", excludeHiddenFiles)\n\t\tfmt.Println(\"Ignore symlinks :\", ignoreSymLinks)\n\t\tfmt.Println(\"Recursive search :\", enableRecursion)\n\t\tfmt.Println(\"Show a summary :\", showSummary)\n\t\tfmt.Println(\"Quiet :\", quiet)\n\t\tif cpuprofile != \"\" {\n\t\t\tfmt.Println(\"Profile output :\", cpuprofile)\n\t\t}\n\t\tfmt.Println(\"------------------------\")\n\t}\n}\n\nfunc trackTime(now time.Time) {\n\texpired := time.Since(now)\n\tif !quiet {\n\t\tfmt.Printf(\"Program terminated in %v\\n\", expired)\n\t}\n}\n\nfunc executeCPUProfileIfNeeded() {\n\tif cpuprofile != \"\" {\n\t\tf, err := os.Create(cpuprofile)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tdefer pprof.StopCPUProfile()\n\t}\n}\n\nfunc main() {\n\theader()\n\tinitOptions()\n\tshowDebugInfo()\n\n\toptions := Options{\n\t\tcurrentDir,\n\t\texcludeEmptyFiles,\n\t\texcludeHiddenFiles,\n\t\tenableRecursion,\n\t\tignoreSymLinks,\n\t\tshowSummary,\n\t\tquiet,\n\t}\n\texecuteCPUProfileIfNeeded()\n\n\tdefer trackTime(time.Now())\n\n\tStart(options)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"os\"\n \"os\/exec\"\n\n \"errors\"\n\n \"io\/ioutil\"\n\n \"log\"\n\n\/\/ \"fmt\"\n \"strings\"\n \"runtime\"\n \"strconv\" \/\/ For Itoa\n\/\/ \"encoding\/csv\"\n \"encoding\/json\"\n\n \"github.com\/go-martini\/martini\"\n)\n\n\nfunc main() {\n m := martini.Classic()\n\n \/\/ CPU count\n m.Get(\"\/sh\/numberofcores.php\", func () ([]byte, error) {\n return json.Marshal(runtime.NumCPU())\n })\n\n \/\/ Server's hostname\n m.Get(\"\/sh\/hostname.php\", func () ([]byte, error) {\n host, err := os.Hostname()\n\n if err != nil {\n return nil, err\n }\n\n return json.Marshal(host)\n })\n\n\n\n \/\/ PS\n m.Get(\"\/sh\/ps.php\", func () ([]byte, error) {\n \/\/ Run uptime command\n rawOutput, err := exec.Command(\"ps\", \"aux\").Output()\n\n if err != nil {\n return nil, err\n }\n\n return json.Marshal(parseCommandTable(rawOutput, 1, 1))\n })\n\n m.Get(\"\/sh\/df.php\", func () ([]byte, error) {\n \/\/ Run uptime command\n rawOutput, err := exec.Command(\"df\", \"-Ph\").Output()\n\n if err != nil {\n return nil, err\n }\n\n return json.Marshal(parseCommandTable(rawOutput, 1, 1))\n })\n\n m.Get(\"\/sh\/time.php\", func () ([]byte, error) {\n raw, err := exec.Command(\"date\").Output()\n\n if err != nil {\n return nil, err\n }\n\n return json.Marshal(string(raw[:]))\n })\n\n m.Get(\"\/sh\/issue.php\", func () ([]byte, error) {\n raw, err := exec.Command(\"uname\", \"-rsm\").Output()\n\n if err != nil {\n return nil, err\n }\n\n return json.Marshal(string(raw[:]))\n })\n\n m.Get(\"\/sh\/users.php\", func () ([]byte, error) {\n data, err := ioutil.ReadFile(\"\/etc\/passwd\")\n\n if err != nil {\n return nil, err\n }\n\n lines := strings.Split(string(data), \"\\n\")\n\n \/\/ Output records\n var records [][]string\n\n for _, line := range lines {\n parts := strings.Split(line, \":\")\n\n \/\/ Skip bad or empty lines\n if len(parts) != 7 {\n log.Println(len(parts))\n continue\n }\n\n \/\/ Parse base 10, 16 bit UID integer\n uid, err := strconv.ParseInt(parts[2], 10, 16)\n\n \/\/ Error parsing UID\n if err != nil {\n continue\n }\n\n userType := \"user\"\n\n \/\/ Check if system user\n if uid <= 499 {\n userType = \"system\"\n }\n\n user := []string{\n \/\/ User type\n userType,\n \/\/ Username\n parts[0],\n \/\/ Home directory\n parts[6],\n }\n\n records = append(records, user)\n }\n\n return json.Marshal(records)\n })\n\n m.Get(\"\/sh\/online.php\", func () ([]byte, error) {\n raw, err := exec.Command(\"w\").Output()\n\n if err != nil {\n return nil, err\n }\n\n \/\/ We'll add all the parsed lines here\n var entries [][]string\n\n \/\/ Skip first and last line of output\n for _, entry := range parseCommandTable(raw, 2, 1) {\n entries = append(entries, []string{\n \/\/ User\n entry[0],\n \/\/ From\n entry[2],\n \/\/ Login at\n entry[3],\n \/\/ Idle\n entry[4],\n })\n }\n\n return json.Marshal(entries)\n })\n\n m.Get(\"\/sh\/loadavg.php\", func () ([]byte, error) {\n raw, err := exec.Command(\"w\").Output()\n\n if err != nil {\n return nil, err\n }\n\n lines := strings.Split(string(raw[:]), \"\\n\")\n headers := strings.Fields(lines[0])\n\n var cpuLoads [][2]string\n\n \/\/ Last 3 headers are CPU loads, clean them up\n for _, load := range headers[len(headers)-3:] {\n \/\/ Remove trailing coma if there\n cleanLoad := strings.Split(string(load), \",\")[0]\n\n loadFloat, err := strconv.ParseFloat(cleanLoad, 32)\n\n if err != nil {\n continue\n }\n\n loadPercentage := int(loadFloat * 100) \/ runtime.NumCPU()\n\n cpuLoads = append(cpuLoads, [2]string{\n cleanLoad,\n strconv.Itoa(loadPercentage),\n })\n }\n\n return json.Marshal(cpuLoads)\n })\n\n m.Get(\"\/sh\/ping.php\", func () ([]byte, error) {\n HOSTS := [4]string{\n \"wikipedia.org\",\n \"github.com\",\n \"google.com\",\n \"gnu.org\",\n }\n\n var entries [][]string\n\n for _, host := range HOSTS {\n ping, err := pingHost(host, 2)\n\n if err != nil {\n continue\n }\n\n entries = append(entries, []string{\n host,\n ping,\n })\n }\n\n return json.Marshal(entries)\n })\n\n m.Get(\"\/sh\/netstat.php\", func () ([]byte, error) {\n raw, err := exec.Command(\"netstat\", \"-nt\").Output()\n\n if err != nil {\n return nil, err\n }\n\n \/\/ Get entries as a table\n entries := parseCommandTable(raw, 2, 1)\n\n \/\/ Connections we care about\n var connections [][]string\n\n \/\/ Filter by protocol\n for _, entry := range entries {\n \/\/ Skip bad entries or non tcp\n if len(entry) != 6 || (entry[0] != \"tcp\" && entry[0] != \"tcp4\") {\n continue\n }\n\n connections = append(connections, entry)\n }\n\n \/\/ Count connections from a machine\n connectionCount := make(map[string]int)\n\n \/\/ Extract\n for _, conn := range connections {\n \/\/ Foreign Address\n remoteAddr := conn[4]\n\n \/\/ Increment counter\n connectionCount[remoteAddr]++\n }\n\n \/\/ Expected output by client\n var output [][2]string\n\n for remoteAddr, count := range connectionCount {\n \/\/ Compensate for difference between OSX and linux\n parts := strings.Split(strings.Replace(remoteAddr, \",\", \".\", 1), \".\")\n ip := strings.Join(parts[:4], \".\")\n \/\/ port := parts[4]\n\n output = append(output, [2]string{\n strconv.Itoa(count),\n ip,\n })\n }\n\n return json.Marshal(output)\n })\n\n \/\/ Serve static files\n m.Get(\"\/.*\", martini.Static(\"\"))\n\n m.Run()\n}\n\nfunc parseCommandTable(rawOutput []byte, headerCount int, footerCount int) [][]string {\n \/\/ Convert output to a string (it's not binary data, so this is ok)\n output := string(rawOutput[:])\n\n \/\/ We'll add all the parsed lines here\n var entries [][]string\n\n \/\/ Lines of output\n lines := strings.Split(output, \"\\n\")\n\n \/\/ Skip first and last line of output\n for _, str := range lines[headerCount:len(lines)-footerCount] {\n entries = append(entries, strings.Fields(str))\n }\n\n return entries\n}\n\n\/\/ Get average pingTime to a host\nfunc pingHost(hostname string, pingCount int) (string, error) {\n raw, err := exec.Command(\"ping\", \"-c\", strconv.Itoa(pingCount), hostname).Output()\n\n if err != nil {\n return \"\", err\n }\n\n lines := strings.Split(string(raw[:]), \"\\n\")\n\n if len(lines) < 2 {\n return \"\", errors.New(\"Bad output for ping command\")\n }\n\n \/\/ Get 2nd last line\n lastLine := lines[len(lines)-2]\n\n \/\/ Extract average ping time as a string\n pingTime := strings.Split(strings.Split(lastLine, \"=\")[1], \"\/\")[1]\n\n return pingTime, nil\n}\n<commit_msg>Add where command<commit_after>package main\n\nimport (\n \"os\"\n \"os\/exec\"\n\n \"errors\"\n\n \"io\/ioutil\"\n\n \"log\"\n\n\/\/ \"fmt\"\n \"strings\"\n \"runtime\"\n \"strconv\" \/\/ For Itoa\n\/\/ \"encoding\/csv\"\n \"encoding\/json\"\n\n \"github.com\/go-martini\/martini\"\n)\n\n\nfunc main() {\n m := martini.Classic()\n\n \/\/ CPU count\n m.Get(\"\/sh\/numberofcores.php\", func () ([]byte, error) {\n return json.Marshal(runtime.NumCPU())\n })\n\n \/\/ Server's hostname\n m.Get(\"\/sh\/hostname.php\", func () ([]byte, error) {\n host, err := os.Hostname()\n\n if err != nil {\n return nil, err\n }\n\n return json.Marshal(host)\n })\n\n\n\n \/\/ PS\n m.Get(\"\/sh\/ps.php\", func () ([]byte, error) {\n \/\/ Run uptime command\n rawOutput, err := exec.Command(\"ps\", \"aux\").Output()\n\n if err != nil {\n return nil, err\n }\n\n return json.Marshal(parseCommandTable(rawOutput, 1, 1))\n })\n\n m.Get(\"\/sh\/df.php\", func () ([]byte, error) {\n \/\/ Run uptime command\n rawOutput, err := exec.Command(\"df\", \"-Ph\").Output()\n\n if err != nil {\n return nil, err\n }\n\n return json.Marshal(parseCommandTable(rawOutput, 1, 1))\n })\n\n m.Get(\"\/sh\/time.php\", func () ([]byte, error) {\n raw, err := exec.Command(\"date\").Output()\n\n if err != nil {\n return nil, err\n }\n\n return json.Marshal(string(raw[:]))\n })\n\n m.Get(\"\/sh\/issue.php\", func () ([]byte, error) {\n raw, err := exec.Command(\"uname\", \"-rsm\").Output()\n\n if err != nil {\n return nil, err\n }\n\n return json.Marshal(string(raw[:]))\n })\n\n m.Get(\"\/sh\/users.php\", func () ([]byte, error) {\n data, err := ioutil.ReadFile(\"\/etc\/passwd\")\n\n if err != nil {\n return nil, err\n }\n\n lines := strings.Split(string(data), \"\\n\")\n\n \/\/ Output records\n var records [][]string\n\n for _, line := range lines {\n parts := strings.Split(line, \":\")\n\n \/\/ Skip bad or empty lines\n if len(parts) != 7 {\n log.Println(len(parts))\n continue\n }\n\n \/\/ Parse base 10, 16 bit UID integer\n uid, err := strconv.ParseInt(parts[2], 10, 16)\n\n \/\/ Error parsing UID\n if err != nil {\n continue\n }\n\n userType := \"user\"\n\n \/\/ Check if system user\n if uid <= 499 {\n userType = \"system\"\n }\n\n user := []string{\n \/\/ User type\n userType,\n \/\/ Username\n parts[0],\n \/\/ Home directory\n parts[6],\n }\n\n records = append(records, user)\n }\n\n return json.Marshal(records)\n })\n\n m.Get(\"\/sh\/online.php\", func () ([]byte, error) {\n raw, err := exec.Command(\"w\").Output()\n\n if err != nil {\n return nil, err\n }\n\n \/\/ We'll add all the parsed lines here\n var entries [][]string\n\n \/\/ Skip first and last line of output\n for _, entry := range parseCommandTable(raw, 2, 1) {\n entries = append(entries, []string{\n \/\/ User\n entry[0],\n \/\/ From\n entry[2],\n \/\/ Login at\n entry[3],\n \/\/ Idle\n entry[4],\n })\n }\n\n return json.Marshal(entries)\n })\n\n m.Get(\"\/sh\/loadavg.php\", func () ([]byte, error) {\n raw, err := exec.Command(\"w\").Output()\n\n if err != nil {\n return nil, err\n }\n\n lines := strings.Split(string(raw[:]), \"\\n\")\n headers := strings.Fields(lines[0])\n\n var cpuLoads [][2]string\n\n \/\/ Last 3 headers are CPU loads, clean them up\n for _, load := range headers[len(headers)-3:] {\n \/\/ Remove trailing coma if there\n cleanLoad := strings.Split(string(load), \",\")[0]\n\n loadFloat, err := strconv.ParseFloat(cleanLoad, 32)\n\n if err != nil {\n continue\n }\n\n loadPercentage := int(loadFloat * 100) \/ runtime.NumCPU()\n\n cpuLoads = append(cpuLoads, [2]string{\n cleanLoad,\n strconv.Itoa(loadPercentage),\n })\n }\n\n return json.Marshal(cpuLoads)\n })\n\n m.Get(\"\/sh\/ping.php\", func () ([]byte, error) {\n HOSTS := [4]string{\n \"wikipedia.org\",\n \"github.com\",\n \"google.com\",\n \"gnu.org\",\n }\n\n var entries [][]string\n\n for _, host := range HOSTS {\n ping, err := pingHost(host, 2)\n\n if err != nil {\n continue\n }\n\n entries = append(entries, []string{\n host,\n ping,\n })\n }\n\n return json.Marshal(entries)\n })\n\n m.Get(\"\/sh\/netstat.php\", func () ([]byte, error) {\n raw, err := exec.Command(\"netstat\", \"-nt\").Output()\n\n if err != nil {\n return nil, err\n }\n\n \/\/ Get entries as a table\n entries := parseCommandTable(raw, 2, 1)\n\n \/\/ Connections we care about\n var connections [][]string\n\n \/\/ Filter by protocol\n for _, entry := range entries {\n \/\/ Skip bad entries or non tcp\n if len(entry) != 6 || (entry[0] != \"tcp\" && entry[0] != \"tcp4\") {\n continue\n }\n\n connections = append(connections, entry)\n }\n\n \/\/ Count connections from a machine\n connectionCount := make(map[string]int)\n\n \/\/ Extract\n for _, conn := range connections {\n \/\/ Foreign Address\n remoteAddr := conn[4]\n\n \/\/ Increment counter\n connectionCount[remoteAddr]++\n }\n\n \/\/ Expected output by client\n var output [][2]string\n\n for remoteAddr, count := range connectionCount {\n \/\/ Compensate for difference between OSX and linux\n parts := strings.Split(strings.Replace(remoteAddr, \",\", \".\", 1), \".\")\n ip := strings.Join(parts[:4], \".\")\n \/\/ port := parts[4]\n\n output = append(output, [2]string{\n strconv.Itoa(count),\n ip,\n })\n }\n\n return json.Marshal(output)\n })\n\n m.Get(\"\/sh\/where.php\", func () ([]byte, error) {\n SOFTWARE := []string{\n \"php\",\n \"node\",\n \"mysql\",\n \"vim\",\n \"python\",\n \"ruby\",\n \"java\",\n \"apache2\",\n \"nginx\",\n \"openssl\",\n \"vsftpd\",\n \"make\",\n }\n\n var entries [][2]string\n\n for _, bin := range SOFTWARE {\n path, err := exec.LookPath(bin)\n\n if err != nil {\n path = \"Not Installed\"\n }\n\n entries = append(entries, [2]string{\n bin,\n path,\n })\n }\n\n return json.Marshal(entries)\n })\n\n \/\/ Serve static files\n m.Get(\"\/.*\", martini.Static(\"\"))\n\n m.Run()\n}\n\nfunc parseCommandTable(rawOutput []byte, headerCount int, footerCount int) [][]string {\n \/\/ Convert output to a string (it's not binary data, so this is ok)\n output := string(rawOutput[:])\n\n \/\/ We'll add all the parsed lines here\n var entries [][]string\n\n \/\/ Lines of output\n lines := strings.Split(output, \"\\n\")\n\n \/\/ Skip first and last line of output\n for _, str := range lines[headerCount:len(lines)-footerCount] {\n entries = append(entries, strings.Fields(str))\n }\n\n return entries\n}\n\n\/\/ Get average pingTime to a host\nfunc pingHost(hostname string, pingCount int) (string, error) {\n raw, err := exec.Command(\"ping\", \"-c\", strconv.Itoa(pingCount), hostname).Output()\n\n if err != nil {\n return \"\", err\n }\n\n lines := strings.Split(string(raw[:]), \"\\n\")\n\n if len(lines) < 2 {\n return \"\", errors.New(\"Bad output for ping command\")\n }\n\n \/\/ Get 2nd last line\n lastLine := lines[len(lines)-2]\n\n \/\/ Extract average ping time as a string\n pingTime := strings.Split(strings.Split(lastLine, \"=\")[1], \"\/\")[1]\n\n return pingTime, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"html\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/vincent-petithory\/dataurl\"\n)\n\nconst yuyuteiURL = \"http:\/\/yuyu-tei.jp\/\"\nconst wsDeckURL = \"http:\/\/wsdecks.com\/\"\nconst hoTcURL = \"http:\/\/www.heartofthecards.com\/code\/cardlist.html?card=WS_\"\n\n\/\/ Prox struct\ntype Prox struct {\n\t\/\/ target url of reverse proxy\n\ttarget *url.URL\n\t\/\/ instance of Go ReverseProxy thatwill do the job for us\n\tproxy *httputil.ReverseProxy\n}\n\ntype siteConfig struct {\n\tName string\n\tFilter string\n}\n\ntype cardsConfig struct {\n\tDir string\n\tSite siteConfig\n}\n\ntype cardStruc struct {\n\tID string\n\tTranslation string\n}\n\n\/\/ New proxy\nfunc New(target string) *Prox {\n\turl, _ := url.Parse(target)\n\t\/\/ you should handle error on parsing\n\treturn &Prox{target: url, proxy: httputil.NewSingleHostReverseProxy(url)}\n}\n\nfunc (p *Prox) handle(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"X-GoProxy\", \"GoProxy\")\n\t\/\/ call to magic method from ReverseProxy object\n\tp.proxy.ServeHTTP(w, r)\n}\nfunc lowCostSystemToURL(syspath string) string {\n\treturn strings.Replace(syspath, \"\\\\\", \"\/\", -1)\n}\n\nfunc convertToJpg(filePath string) {\n\t\/\/ convert -density 150 -trim to_love-ru_darkness_2nd_trial_deck.pdf -quality 100 -sharpen 0x1.0 love.jpg\n\tcmd := exec.Command(\"convert\", \"-density\", \"150\", \"-trim\", filePath, \"-quality\", \"100\", \"-sharpen\", \"0x1.0\", filePath+\".jpg\")\n\terr := cmd.Start()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\terr = cmd.Wait()\n}\n\nfunc createCardsCodeFile(dirPath string) (string, error) {\n\tdirPath += \"\/\"\n\tout, err := os.Create(dirPath + \"codes.txt\")\n\tdefer out.Close()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn \"\", err\n\t}\n\tcardList, err := filepath.Glob(dirPath + \"*.gif\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn \"\", err\n\t}\n\t\/\/ PI\/S40-056\n\tfor _, card := range cardList {\n\t\tcard = strings.Replace(card, dirPath, \"\", 1)\n\t\tcard = strings.Replace(card, \"_\", \"-\", 1)\n\t\tcard = strings.Replace(card, \"_\", \"\/\", 1)\n\t\tex := strings.Split(card, \".\")[0]\n\t\tout.WriteString(ex + \"\\n\")\n\t}\n\treturn out.Name(), nil\n}\n\nfunc getTranslationHotC(codesPath string) []cardStruc {\n\ttranslations := []cardStruc{}\n\tfile, err := os.Open(codesPath + \"\/codes.txt\")\n\tscanner := bufio.NewScanner(file)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tfmt.Println(\"getTranslationHotC\")\n\tfor scanner.Scan() {\n\t\t\/\/ fmt.Println(scanner.Text())\n\t\turl := hoTcURL + scanner.Text()\n\t\tfmt.Println(url)\n\t\tdoc, err := goquery.NewDocument(url)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\ttextHTML, err := doc.Find(\".cards3\").Slice(2, 3).Html()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\ttextHTML = strings.Replace(textHTML, \"<br\/>\", \" \", -1)\n\t\t\/\/ html.UnescapeString(textHTML)\n\t\tcard := cardStruc{ID: scanner.Text(), Translation: html.UnescapeString(textHTML)}\n\n\t\ttranslations = append(translations, card)\n\t\t\/\/ json.Marshal(card)\n\t\t\/\/ doc.Find(\".card3\").Get(2)\n\t}\n\treturn translations\n}\n\nfunc getCardsConfig(link string) (cardsConfig, error) {\n\tuid := \"\"\n\tsite := siteConfig{}\n\n\tif strings.Contains(link, yuyuteiURL) {\n\t\tsite.Name = \"yuyutei\"\n\t\tsite.Filter = \".card_list_box\" + \" .image img\"\n\t\tparsedURL, _ := url.Parse(link)\n\t\tvalues, _ := url.ParseQuery(parsedURL.RawQuery)\n\t\tuid = values.Get(\"ver\")\n\t} else if strings.Contains(link, wsDeckURL) {\n\t\tsite.Name = \"wsdeck\"\n\t\tsite.Filter = \".wscard\" + \" img\"\n\t\tuid = filepath.Base(link)\n\t}\n\tdir := filepath.Join(\"static\", site.Name, uid)\n\tcardsConfig := cardsConfig{Dir: dir, Site: site}\n\tif site.Filter == \"\" {\n\t\treturn cardsConfig, fmt.Errorf(\"Url is not supported %v\", link)\n\t}\n\n\treturn cardsConfig, nil\n}\n\nfunc main() {\n\tproxy := New(\"http:\/\/localhost:8080\")\n\tos.MkdirAll(filepath.Join(\"static\", \"yuyutei\"), 0744)\n\tos.MkdirAll(filepath.Join(\"static\", \"wsdeck\"), 0744)\n\t\/\/ static := http.FileServer(http.Dir(\".\/\"))\n\thttp.HandleFunc(\"\/\", proxy.handle)\n\n\thttp.HandleFunc(\"\/translationimages\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfile := r.PostFormValue(\"file\")\n\t\tfilename := r.PostFormValue(\"filename\")\n\t\tuid := strings.Replace(filename, filepath.Ext(filename), \"\", 1)\n\t\tdir := filepath.Join(\"static\", uid)\n\t\tfilePath := filepath.Join(dir, filename)\n\n\t\tdata, err := dataurl.DecodeString(file)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\t\/\/ fmt.Println(dataURL.Data)\n\t\tos.MkdirAll(dir, 0777)\n\t\tioutil.WriteFile(filePath, data.Data, 0644)\n\t\tif _, err := os.Stat(dir); os.IsNotExist(err) {\n\t\t\tconvertToJpg(filePath)\n\t\t}\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tlistJpg, err := filepath.Glob(filePath + \"*.jpg\")\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tfor i, jpgfile := range listJpg {\n\t\t\tlistJpg[i] = lowCostSystemToURL(jpgfile)\n\t\t}\n\n\t\tb, err := json.Marshal(listJpg)\n\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\thttp.Error(w, err.Error(), 500)\n\t\t\treturn\n\t\t}\n\n\t\tw.Write(b)\n\t})\n\n\thttp.HandleFunc(\"\/cardimages\", func(w http.ResponseWriter, r *http.Request) {\n\t\tvar wg sync.WaitGroup\n\n\t\tresult := []string{}\n\t\tlink := r.PostFormValue(\"url\")\n\n\t\tif link != \"\" {\n\t\t\tdoc, err := goquery.NewDocument(link)\n\t\t\timageURL := \"\"\n\t\t\tcardsConfig, err := getCardsConfig(link)\n\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\tif _, err := os.Stat(cardsConfig.Dir); os.IsNotExist(err) {\n\t\t\t\tos.MkdirAll(cardsConfig.Dir, 0744)\n\t\t\t\tdoc.Find(cardsConfig.Site.Filter).Each(func(i int, s *goquery.Selection) {\n\t\t\t\t\twg.Add(1)\n\t\t\t\t\tval, _ := s.Attr(\"src\")\n\t\t\t\t\tif cardsConfig.Site.Name == \"yuyutei\" {\n\t\t\t\t\t\tbig := strings.Replace(val, \"90_126\", \"front\", 1)\n\t\t\t\t\t\timageURL = yuyuteiURL + big\n\t\t\t\t\t} else if cardsConfig.Site.Name == \"wsdeck\" {\n\t\t\t\t\t\timageURL = wsDeckURL + val\n\t\t\t\t\t}\n\n\t\t\t\t\tgo func(url string) {\n\t\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\t\t\/\/ fmt.Println(\"dir : \", dir)\n\t\t\t\t\t\tfileName := filepath.Join(cardsConfig.Dir, path.Base(url))\n\t\t\t\t\t\tout, err := os.Create(fileName)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdefer out.Close()\n\t\t\t\t\t\treps, err := http.Get(url)\n\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfile, err := io.Copy(out, reps.Body)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfmt.Println(\"File\", file)\n\t\t\t\t\t\t\/\/ fmt.Printf(\"Link: n-%d __ %v%v\\n\", i, imageURL, uid)\n\t\t\t\t\t\tdefer reps.Body.Close()\n\t\t\t\t\t\t\/\/ fmt.Println(\"image url: \", strings.Replace(fileName, \"\\\\\", \"\/\", 1))\n\t\t\t\t\t\tresult = append(result, lowCostSystemToURL(fileName))\n\t\t\t\t\t}(imageURL)\n\t\t\t\t})\n\t\t\t} else {\n\t\t\t\tfiles, err := ioutil.ReadDir(cardsConfig.Dir)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t}\n\t\t\t\tfor _, file := range files {\n\t\t\t\t\tabsPath := filepath.Join(cardsConfig.Dir, file.Name())\n\t\t\t\t\turlPath := lowCostSystemToURL(absPath)\n\t\t\t\t\tresult = append(result, urlPath)\n\t\t\t\t}\n\n\t\t\t}\n\t\t\tcreateCardsCodeFile(cardsConfig.Dir)\n\t\t\t\/\/ getTranslationHotC(cardsConfig.Dir)\n\t\t}\n\n\t\twg.Wait()\n\t\tfmt.Printf(\"Finish\")\n\t\tb, err := json.Marshal(result)\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), 500)\n\t\t\treturn\n\t\t}\n\t\tw.Write(b)\n\t})\n\n\thttp.HandleFunc(\"\/static\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\/\/ fmt.Println(\"static\", r.URL.Path[1:])\n\t\thttp.ServeFile(w, r, r.URL.Path[1:])\n\t})\n\n\thttp.ListenAndServe(\":8010\", nil)\n}\n<commit_msg>Create code file when all files are save<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"html\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/vincent-petithory\/dataurl\"\n)\n\nconst yuyuteiURL = \"http:\/\/yuyu-tei.jp\/\"\nconst wsDeckURL = \"http:\/\/wsdecks.com\/\"\nconst hoTcURL = \"http:\/\/www.heartofthecards.com\/code\/cardlist.html?card=WS_\"\n\n\/\/ Prox struct\ntype Prox struct {\n\t\/\/ target url of reverse proxy\n\ttarget *url.URL\n\t\/\/ instance of Go ReverseProxy thatwill do the job for us\n\tproxy *httputil.ReverseProxy\n}\n\ntype siteConfig struct {\n\tName string\n\tFilter string\n}\n\ntype cardsConfig struct {\n\tDir string\n\tSite siteConfig\n}\n\ntype cardStruc struct {\n\tID string\n\tTranslation string\n}\n\n\/\/ New proxy\nfunc New(target string) *Prox {\n\turl, _ := url.Parse(target)\n\t\/\/ you should handle error on parsing\n\treturn &Prox{target: url, proxy: httputil.NewSingleHostReverseProxy(url)}\n}\n\nfunc (p *Prox) handle(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"X-GoProxy\", \"GoProxy\")\n\t\/\/ call to magic method from ReverseProxy object\n\tp.proxy.ServeHTTP(w, r)\n}\nfunc lowCostSystemToURL(syspath string) string {\n\treturn strings.Replace(syspath, \"\\\\\", \"\/\", -1)\n}\n\nfunc convertToJpg(filePath string) {\n\t\/\/ convert -density 150 -trim to_love-ru_darkness_2nd_trial_deck.pdf -quality 100 -sharpen 0x1.0 love.jpg\n\tcmd := exec.Command(\"convert\", \"-density\", \"150\", \"-trim\", filePath, \"-quality\", \"100\", \"-sharpen\", \"0x1.0\", filePath+\".jpg\")\n\terr := cmd.Start()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\terr = cmd.Wait()\n}\n\nfunc createCardsCodeFile(dirPath string) (string, error) {\n\t\/\/TODO Do nothing if file exists\n\tdirPath += \"\/\"\n\tout, err := os.Create(dirPath + \"codes.txt\")\n\tdefer out.Close()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn \"\", err\n\t}\n\tcardList, err := filepath.Glob(dirPath + \"*.gif\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn \"\", err\n\t}\n\t\/\/ PI\/S40-056\n\tfor _, card := range cardList {\n\t\tcard = strings.Replace(card, dirPath, \"\", 1)\n\t\tcard = strings.Replace(card, \"_\", \"-\", 1)\n\t\tcard = strings.Replace(card, \"_\", \"\/\", 1)\n\t\tex := strings.Split(card, \".\")[0]\n\t\tout.WriteString(ex + \"\\n\")\n\t}\n\treturn out.Name(), nil\n}\n\nfunc getTranslationHotC(codesPath string) []cardStruc {\n\ttranslations := []cardStruc{}\n\tfile, err := os.Open(codesPath + \"\/codes.txt\")\n\tscanner := bufio.NewScanner(file)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tfmt.Println(\"getTranslationHotC\")\n\tfor scanner.Scan() {\n\t\t\/\/ fmt.Println(scanner.Text())\n\t\turl := hoTcURL + scanner.Text()\n\t\tfmt.Println(url)\n\t\tdoc, err := goquery.NewDocument(url)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\ttextHTML, err := doc.Find(\".cards3\").Slice(2, 3).Html()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\ttextHTML = strings.Replace(textHTML, \"<br\/>\", \" \", -1)\n\t\t\/\/ html.UnescapeString(textHTML)\n\t\tcard := cardStruc{ID: scanner.Text(), Translation: html.UnescapeString(textHTML)}\n\n\t\ttranslations = append(translations, card)\n\t\t\/\/ json.Marshal(card)\n\t\t\/\/ doc.Find(\".card3\").Get(2)\n\t}\n\treturn translations\n}\n\nfunc getCardsConfig(link string) (cardsConfig, error) {\n\tuid := \"\"\n\tsite := siteConfig{}\n\n\tif strings.Contains(link, yuyuteiURL) {\n\t\tsite.Name = \"yuyutei\"\n\t\tsite.Filter = \".card_list_box\" + \" .image img\"\n\t\tparsedURL, _ := url.Parse(link)\n\t\tvalues, _ := url.ParseQuery(parsedURL.RawQuery)\n\t\tuid = values.Get(\"ver\")\n\t} else if strings.Contains(link, wsDeckURL) {\n\t\tsite.Name = \"wsdeck\"\n\t\tsite.Filter = \".wscard\" + \" img\"\n\t\tuid = filepath.Base(link)\n\t}\n\tdir := filepath.Join(\"static\", site.Name, uid)\n\tcardsConfig := cardsConfig{Dir: dir, Site: site}\n\tif site.Filter == \"\" {\n\t\treturn cardsConfig, fmt.Errorf(\"Url is not supported %v\", link)\n\t}\n\n\treturn cardsConfig, nil\n}\n\nfunc main() {\n\tproxy := New(\"http:\/\/localhost:8080\")\n\tos.MkdirAll(filepath.Join(\"static\", \"yuyutei\"), 0744)\n\tos.MkdirAll(filepath.Join(\"static\", \"wsdeck\"), 0744)\n\t\/\/ static := http.FileServer(http.Dir(\".\/\"))\n\thttp.HandleFunc(\"\/\", proxy.handle)\n\n\thttp.HandleFunc(\"\/translationimages\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfile := r.PostFormValue(\"file\")\n\t\tfilename := r.PostFormValue(\"filename\")\n\t\tuid := strings.Replace(filename, filepath.Ext(filename), \"\", 1)\n\t\tdir := filepath.Join(\"static\", uid)\n\t\tfilePath := filepath.Join(dir, filename)\n\n\t\tdata, err := dataurl.DecodeString(file)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\t\/\/ fmt.Println(dataURL.Data)\n\t\tos.MkdirAll(dir, 0777)\n\t\tioutil.WriteFile(filePath, data.Data, 0644)\n\t\tif _, err := os.Stat(dir); os.IsNotExist(err) {\n\t\t\tconvertToJpg(filePath)\n\t\t}\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tlistJpg, err := filepath.Glob(filePath + \"*.jpg\")\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tfor i, jpgfile := range listJpg {\n\t\t\tlistJpg[i] = lowCostSystemToURL(jpgfile)\n\t\t}\n\n\t\tb, err := json.Marshal(listJpg)\n\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\thttp.Error(w, err.Error(), 500)\n\t\t\treturn\n\t\t}\n\n\t\tw.Write(b)\n\t})\n\n\thttp.HandleFunc(\"\/cardimages\", func(w http.ResponseWriter, r *http.Request) {\n\t\tvar wg sync.WaitGroup\n\n\t\tresult := []string{}\n\t\tlink := r.PostFormValue(\"url\")\n\n\t\tif link != \"\" {\n\t\t\tdoc, err := goquery.NewDocument(link)\n\t\t\timageURL := \"\"\n\t\t\tcardsConfig, err := getCardsConfig(link)\n\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\tif _, err := os.Stat(cardsConfig.Dir); os.IsNotExist(err) {\n\t\t\t\tos.MkdirAll(cardsConfig.Dir, 0744)\n\t\t\t\tdoc.Find(cardsConfig.Site.Filter).Each(func(i int, s *goquery.Selection) {\n\t\t\t\t\twg.Add(1)\n\t\t\t\t\tval, _ := s.Attr(\"src\")\n\t\t\t\t\tif cardsConfig.Site.Name == \"yuyutei\" {\n\t\t\t\t\t\tbig := strings.Replace(val, \"90_126\", \"front\", 1)\n\t\t\t\t\t\timageURL = yuyuteiURL + big\n\t\t\t\t\t} else if cardsConfig.Site.Name == \"wsdeck\" {\n\t\t\t\t\t\timageURL = wsDeckURL + val\n\t\t\t\t\t}\n\n\t\t\t\t\tgo func(url string) {\n\t\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\t\t\/\/ fmt.Println(\"dir : \", dir)\n\t\t\t\t\t\tfileName := filepath.Join(cardsConfig.Dir, path.Base(url))\n\t\t\t\t\t\tout, err := os.Create(fileName)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdefer out.Close()\n\t\t\t\t\t\treps, err := http.Get(url)\n\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfile, err := io.Copy(out, reps.Body)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfmt.Println(\"File\", file)\n\t\t\t\t\t\t\/\/ fmt.Printf(\"Link: n-%d __ %v%v\\n\", i, imageURL, uid)\n\t\t\t\t\t\tdefer reps.Body.Close()\n\t\t\t\t\t\t\/\/ fmt.Println(\"image url: \", strings.Replace(fileName, \"\\\\\", \"\/\", 1))\n\t\t\t\t\t\tresult = append(result, lowCostSystemToURL(fileName))\n\t\t\t\t\t}(imageURL)\n\t\t\t\t})\n\t\t\t} else {\n\t\t\t\tfiles, err := ioutil.ReadDir(cardsConfig.Dir)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t}\n\t\t\t\tfor _, file := range files {\n\t\t\t\t\tabsPath := filepath.Join(cardsConfig.Dir, file.Name())\n\t\t\t\t\turlPath := lowCostSystemToURL(absPath)\n\t\t\t\t\tresult = append(result, urlPath)\n\t\t\t\t}\n\n\t\t\t}\n\t\t\t\/\/ getTranslationHotC(cardsConfig.Dir)\n\t\t\twg.Wait()\n\t\t\tfmt.Printf(\"Finish\")\n\t\t\tcreateCardsCodeFile(cardsConfig.Dir)\n\t\t}\n\n\t\tb, err := json.Marshal(result)\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), 500)\n\t\t\treturn\n\t\t}\n\t\tw.Write(b)\n\t})\n\n\thttp.HandleFunc(\"\/static\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\/\/ fmt.Println(\"static\", r.URL.Path[1:])\n\t\thttp.ServeFile(w, r, r.URL.Path[1:])\n\t})\n\n\thttp.ListenAndServe(\":8010\", nil)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\twhisper \"github.com\/grobian\/go-whisper\"\n\tpickle \"github.com\/kisielk\/og-rek\"\n\n\t\"net\/http\"\n\t\"encoding\/json\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"strconv\"\n\t\"math\"\n\t\"fmt\"\n\t\"os\"\n)\n\nvar config = struct {\n\tWhisperData\tstring\n}{\n\tWhisperData: \"\/var\/lib\/carbon\/whisper\",\n\t\/\/WhisperData: \"..\",\n}\n\ntype WhisperFetchResponse struct {\n\tName string `json:\"name\"`\n\tStartTime int `json:\"startTime\"`\n\tStopTime int `json:\"stopTime\"`\n\tStepTime int `json:\"stepTime\"`\n\tValues []float64 `json:\"values\"`\n\tIsAbsent []bool `json:\"isAbsent\"`\n}\n\ntype WhisperGlobResponse struct {\n\tName string `json:\"name\"`\n\tPaths []string `json:\"paths\"`\n}\n\nvar log Logger\n\nfunc findHandler(wr http.ResponseWriter, req *http.Request) {\n\/\/\tGET \/metrics\/find\/?local=1&format=pickle&query=general.hadoop.lhr4.ha201jobtracker-01.jobtracker.NonHeapMemoryUsage.committed HTTP\/1.1\n\/\/\thttp:\/\/localhost:8080\/metrics\/find\/?query=test\n\treq.ParseForm()\n\tglob := req.FormValue(\"query\")\n\tformat := req.FormValue(\"format\")\n\n\tif format != \"json\" && format != \"pickle\" {\n\t\tlog.Warn(\"dropping invalid uri (format=%s): %s\",\n\t\t\t\tformat, req.URL.RequestURI())\n\t\thttp.Error(wr, \"Bad request (unsupported format)\",\n\t\t\t\thttp.StatusBadRequest)\n\t\treturn\n\t}\n\n\t\/* things to glob:\n\t * - carbon.relays -> carbon.relays\n\t * - carbon.re -> carbon.relays, carbon.rewhatever\n\t * - carbon.[rz] -> carbon.relays, carbon.zipper\n\t * - carbon.{re,zi} -> carbon.relays, carbon.zipper\n\t * - implicit * at the end of each query\n\t * - match is either dir or .wsp file\n\t * unfortunately, filepath.Glob doesn't handle the curly brace\n\t * expansion for us *\/\n\tlbrace := strings.Index(glob, \"{\")\n\trbrace := -1\n\tif lbrace > -1 {\n\t\trbrace = strings.Index(glob[lbrace:], \"}\")\n\t\tif rbrace > -1 {\n\t\t\trbrace += lbrace\n\t\t}\n\t}\n\tfiles := make([]string, 0)\n\tif lbrace > -1 && rbrace > -1 {\n\t\texpansion := glob[lbrace + 1:rbrace]\n\t\tparts := strings.Split(expansion, \",\")\n\t\tfor _, sub := range parts {\n\t\t\tsglob := glob[:lbrace] + sub + glob[rbrace + 1:]\n\t\t\tpath := config.WhisperData + \"\/\" + strings.Replace(sglob, \".\", \"\/\", -1) + \"*\"\n\t\t\tnfiles, err := filepath.Glob(path)\n\t\t\tif err == nil {\n\t\t\t\tfiles = append(files, nfiles...)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tpath := config.WhisperData + \"\/\" + strings.Replace(glob, \".\", \"\/\", -1) + \"*\"\n\t\tnfiles, err := filepath.Glob(path)\n\t\tif err == nil {\n\t\t\tfiles = append(files, nfiles...)\n\t\t}\n\t}\n\n\tleafs := make([]bool, len(files))\n\tfor i, p := range files {\n\t\tp = p[len(config.WhisperData + \"\/\"):]\n\t\tif strings.HasSuffix(p, \".wsp\") {\n\t\t\tp = p[:len(p) - 4]\n\t\t\tleafs[i] = true\n\t\t} else {\n\t\t\tleafs[i] = false\n\t\t}\n\t\tfiles[i] = strings.Replace(p, \"\/\", \".\", -1)\n\t}\n\n\tif format == \"json\" {\n\t\tresponse := WhisperGlobResponse {\n\t\t\tName:\t\tglob,\n\t\t\tPaths:\t\tmake([]string, 0),\n\t\t}\n\t\tfor _, p := range files {\n\t\t\tresponse.Paths = append(response.Paths, p)\n\t\t}\n\t\tb, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\tlog.Error(\"failed to create JSON data for glob %s: %s\", glob, err)\n\t\t\treturn\n\t\t}\n\t\twr.Write(b)\n\t} else if format == \"pickle\" {\n\t\t\/\/ [{'metric_path': 'metric', 'intervals': [(x,y)], 'isLeaf': True},]\n\t\tvar metrics []map[string]interface{}\n\t\tvar m map[string]interface{}\n\n\t\tfor i, p := range files {\n\t\t\tm = make(map[string]interface{})\n\t\t\tm[\"metric_path\"] = p\n\t\t\t\/\/ m[\"intervals\"] = dunno how to do a tuple here\n\t\t\tm[\"isLeaf\"] = leafs[i]\n\t\t\tmetrics = append(metrics, m)\n\t\t}\n\n\t\twr.Header().Set(\"Content-Type\", \"application\/pickle\")\n\t\tpEnc := pickle.NewEncoder(wr)\n\t\tpEnc.Encode(metrics)\n\t}\n\tlog.Info(\"find: %d hits for %s\", len(files), glob)\n\treturn\n}\n\nfunc fetchHandler(wr http.ResponseWriter, req *http.Request) {\n\/\/\tGET \/render\/?target=general.me.1.percent_time_active.pfnredis&format=pickle&from=1396008021&until=1396022421 HTTP\/1.1\n\/\/\thttp:\/\/localhost:8080\/render\/?target=testmetric&format=json&from=1395961200&until=1395961800\n\treq.ParseForm()\n\tmetric := req.FormValue(\"target\")\n\tformat := req.FormValue(\"format\")\n\tfrom := req.FormValue(\"from\")\n\tuntil := req.FormValue(\"until\")\n\n\tif format != \"json\" && format != \"pickle\" {\n\t\tlog.Warn(\"dropping invalid uri (format=%s): %s\",\n\t\t\t\tformat, req.URL.RequestURI())\n\t\thttp.Error(wr, \"Bad request (unsupported format)\",\n\t\t\t\thttp.StatusBadRequest)\n\t\treturn\n\t}\n\n\tpath := config.WhisperData + \"\/\" + strings.Replace(metric, \".\", \"\/\", -1) + \".wsp\"\n\tw, err := whisper.Open(path)\n\tif err != nil {\n\t\t\/\/ the FE\/carbonzipper often requests metrics we don't have\n\t\tlog.Debug(\"failed to %s\", err)\n\t\thttp.Error(wr, \"Metric not found\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\ti, err := strconv.Atoi(from)\n\tif err != nil {\n\t\tlog.Debug(\"fromTime (%s) invalid: %s (in %s)\",\n\t\t\t\tfrom, err, req.URL.RequestURI)\n\t\tif w != nil {\n\t\t\tw.Close()\n\t\t}\n\t\tw = nil\n\t}\n\tfromTime := int(i)\n\ti, err = strconv.Atoi(until)\n\tif err != nil {\n\t\tlog.Debug(\"untilTime (%s) invalid: %s (in %s)\",\n\t\t\t\tfrom, err, req.URL.RequestURI)\n\t\tif w != nil {\n\t\t\tw.Close()\n\t\t}\n\t\tw = nil\n\t}\n\tuntilTime := int(i)\n\n\tif (w != nil) {\n\t\tdefer w.Close()\n\t} else {\n\t\thttp.Error(wr, \"Bad request (invalid from\/until time)\",\n\t\t\t\thttp.StatusBadRequest)\n\t\treturn\n\t}\n\n\tpoints, err := w.Fetch(fromTime, untilTime)\n\tif err != nil {\n\t\tlog.Error(\"failed to fetch points from %s: %s\", path, err)\n\t\thttp.Error(wr, \"Fetching data points failed\",\n\t\t\t\thttp.StatusInternalServerError)\n\t\treturn\n\t}\n\tvalues := points.Values()\n\n\tif format == \"json\" {\n\t\tresponse := WhisperFetchResponse {\n\t\t\tName:\t\tmetric,\n\t\t\tStartTime:\tpoints.FromTime(),\n\t\t\tStopTime:\tpoints.UntilTime(),\n\t\t\tStepTime:\tpoints.Step(),\n\t\t\tValues:\t\tmake([]float64, len(values)),\n\t\t\tIsAbsent:\tmake([]bool, len(values)),\n\t\t}\n\n\t\tfor i, p := range values {\n\t\t\tif math.IsNaN(p) {\n\t\t\t\tresponse.Values[i] = 0\n\t\t\t\tresponse.IsAbsent[i] = true\n\t\t\t} else {\n\t\t\t\tresponse.Values[i] = p\n\t\t\t\tresponse.IsAbsent[i] = false\n\t\t\t}\n\t\t}\n\n\t\tb, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\tlog.Error(\"failed to create JSON data for %s: %s\", path, err)\n\t\t\treturn\n\t\t}\n\t\twr.Write(b)\n\t} else if format == \"pickle\" {\n\t\t\/\/[{'start': 1396271100, 'step': 60, 'name': 'metric',\n\t\t\/\/'values': [9.0, 19.0, None], 'end': 1396273140}\n\t\tvar metrics []map[string]interface{}\n\t\tvar m map[string]interface{}\n\n\t\tm = make(map[string]interface{})\n\t\tm[\"start\"] = points.FromTime()\n\t\tm[\"step\"] = points.Step()\n\t\tm[\"end\"] = points.UntilTime()\n\t\tm[\"name\"] = metric\n\n\t\tmv := make([]interface{}, len(values))\n\t\tfor i, p := range values {\n\t\t\tif math.IsNaN(p) {\n\t\t\t\tmv[i] = nil\n\t\t\t} else {\n\t\t\t\tmv[i] = p\n\t\t\t}\n\t\t}\n\n\t\tm[\"values\"] = mv\n\t\tmetrics = append(metrics, m)\n\n\t\twr.Header().Set(\"Content-Type\", \"application\/pickle\")\n\t\tpEnc := pickle.NewEncoder(wr)\n\t\tpEnc.Encode(metrics)\n\t}\n\n\tlog.Info(\"served %d points for %s\", len(values), metric)\n\treturn\n}\n\nfunc main() {\n\tlog = NewOutputLogger(INFO);\n\n\thttp.HandleFunc(\"\/metrics\/find\/\", findHandler)\n\thttp.HandleFunc(\"\/render\/\", fetchHandler)\n\n\tlog.Info(\"listening on port 8080\")\n\terr := http.ListenAndServe(\":8080\", nil)\n\tif err != nil {\n\t\tlog.Fatal(\"%s\", err)\n\t}\n\tlog.Info(\"stopped\")\n}\n\n\/\/ Simple wrapper to enable\/disable lots of spam\ntype Logger interface {\n\tInfo(format string, a ...interface{})\n\tWarn(format string, a ...interface{})\n\tError(format string, a ...interface{})\n\tFatal(format string, a ...interface{})\n\tDebug(format string, a ...interface{})\n}\n\ntype LogLevel int\nconst (\n\tFATAL LogLevel = 0\n\tERROR LogLevel = 1\n\tWARN LogLevel = 2\n\tINFO LogLevel = 3\n\tDEBUG LogLevel = 4\n)\ntype outputLogger struct {\n\tlevel LogLevel\n\tout *os.File\n\terr *os.File\n}\n\nfunc NewOutputLogger(level LogLevel) *outputLogger {\n\tr := new(outputLogger)\n\tr.level = level;\n\tr.out = os.Stdout;\n\tr.err = os.Stderr;\n\n\treturn r\n}\n\nfunc (l *outputLogger) Debug(format string, a ...interface{}) {\n\tif l.level >= DEBUG {\n\t\tl.out.WriteString(fmt.Sprintf(\"DEBUG: \" + format + \"\\n\", a...))\n\t}\n}\n\nfunc (l *outputLogger) Info(format string, a ...interface{}) {\n\tif l.level >= INFO {\n\t\tl.out.WriteString(fmt.Sprintf(\"INFO: \" + format + \"\\n\", a...))\n\t}\n}\n\nfunc (l *outputLogger) Warn(format string, a ...interface{}) {\n\tif l.level >= WARN {\n\t\tl.out.WriteString(fmt.Sprintf(\"WARN: \" + format + \"\\n\", a...))\n\t}\n}\n\nfunc (l *outputLogger) Error(format string, a ...interface{}) {\n\tif l.level >= ERROR {\n\t\tl.err.WriteString(fmt.Sprintf(\"ERROR: \" + format + \"\\n\", a...))\n\t}\n}\n\nfunc (l *outputLogger) Fatal(format string, a ...interface{}) {\n\tif l.level >= FATAL {\n\t\tl.err.WriteString(fmt.Sprintf(\"ERROR: \" + format + \"\\n\", a...))\n\t}\n\tos.Exit(1)\n}\n\n<commit_msg>find: bail out if we have nothing to search for<commit_after>package main\n\nimport (\n\twhisper \"github.com\/grobian\/go-whisper\"\n\tpickle \"github.com\/kisielk\/og-rek\"\n\n\t\"net\/http\"\n\t\"encoding\/json\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"strconv\"\n\t\"math\"\n\t\"fmt\"\n\t\"os\"\n)\n\nvar config = struct {\n\tWhisperData\tstring\n}{\n\tWhisperData: \"\/var\/lib\/carbon\/whisper\",\n\t\/\/WhisperData: \"..\",\n}\n\ntype WhisperFetchResponse struct {\n\tName string `json:\"name\"`\n\tStartTime int `json:\"startTime\"`\n\tStopTime int `json:\"stopTime\"`\n\tStepTime int `json:\"stepTime\"`\n\tValues []float64 `json:\"values\"`\n\tIsAbsent []bool `json:\"isAbsent\"`\n}\n\ntype WhisperGlobResponse struct {\n\tName string `json:\"name\"`\n\tPaths []string `json:\"paths\"`\n}\n\nvar log Logger\n\nfunc findHandler(wr http.ResponseWriter, req *http.Request) {\n\/\/\tGET \/metrics\/find\/?local=1&format=pickle&query=general.hadoop.lhr4.ha201jobtracker-01.jobtracker.NonHeapMemoryUsage.committed HTTP\/1.1\n\/\/\thttp:\/\/localhost:8080\/metrics\/find\/?query=test\n\treq.ParseForm()\n\tglob := req.FormValue(\"query\")\n\tformat := req.FormValue(\"format\")\n\n\tif format != \"json\" && format != \"pickle\" {\n\t\tlog.Warn(\"dropping invalid uri (format=%s): %s\",\n\t\t\t\tformat, req.URL.RequestURI())\n\t\thttp.Error(wr, \"Bad request (unsupported format)\",\n\t\t\t\thttp.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif glob == \"\" {\n\t\tlog.Warn(\"dropping invalid request (query=): %s\", req.URL.RequestURI())\n\t\thttp.Error(wr, \"Bad request (no query)\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t\/* things to glob:\n\t * - carbon.relays -> carbon.relays\n\t * - carbon.re -> carbon.relays, carbon.rewhatever\n\t * - carbon.[rz] -> carbon.relays, carbon.zipper\n\t * - carbon.{re,zi} -> carbon.relays, carbon.zipper\n\t * - implicit * at the end of each query\n\t * - match is either dir or .wsp file\n\t * unfortunately, filepath.Glob doesn't handle the curly brace\n\t * expansion for us *\/\n\tlbrace := strings.Index(glob, \"{\")\n\trbrace := -1\n\tif lbrace > -1 {\n\t\trbrace = strings.Index(glob[lbrace:], \"}\")\n\t\tif rbrace > -1 {\n\t\t\trbrace += lbrace\n\t\t}\n\t}\n\tfiles := make([]string, 0)\n\tif lbrace > -1 && rbrace > -1 {\n\t\texpansion := glob[lbrace + 1:rbrace]\n\t\tparts := strings.Split(expansion, \",\")\n\t\tfor _, sub := range parts {\n\t\t\tsglob := glob[:lbrace] + sub + glob[rbrace + 1:]\n\t\t\tpath := config.WhisperData + \"\/\" + strings.Replace(sglob, \".\", \"\/\", -1) + \"*\"\n\t\t\tnfiles, err := filepath.Glob(path)\n\t\t\tif err == nil {\n\t\t\t\tfiles = append(files, nfiles...)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tpath := config.WhisperData + \"\/\" + strings.Replace(glob, \".\", \"\/\", -1) + \"*\"\n\t\tnfiles, err := filepath.Glob(path)\n\t\tif err == nil {\n\t\t\tfiles = append(files, nfiles...)\n\t\t}\n\t}\n\n\tleafs := make([]bool, len(files))\n\tfor i, p := range files {\n\t\tp = p[len(config.WhisperData + \"\/\"):]\n\t\tif strings.HasSuffix(p, \".wsp\") {\n\t\t\tp = p[:len(p) - 4]\n\t\t\tleafs[i] = true\n\t\t} else {\n\t\t\tleafs[i] = false\n\t\t}\n\t\tfiles[i] = strings.Replace(p, \"\/\", \".\", -1)\n\t}\n\n\tif format == \"json\" {\n\t\tresponse := WhisperGlobResponse {\n\t\t\tName:\t\tglob,\n\t\t\tPaths:\t\tmake([]string, 0),\n\t\t}\n\t\tfor _, p := range files {\n\t\t\tresponse.Paths = append(response.Paths, p)\n\t\t}\n\t\tb, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\tlog.Error(\"failed to create JSON data for glob %s: %s\", glob, err)\n\t\t\treturn\n\t\t}\n\t\twr.Write(b)\n\t} else if format == \"pickle\" {\n\t\t\/\/ [{'metric_path': 'metric', 'intervals': [(x,y)], 'isLeaf': True},]\n\t\tvar metrics []map[string]interface{}\n\t\tvar m map[string]interface{}\n\n\t\tfor i, p := range files {\n\t\t\tm = make(map[string]interface{})\n\t\t\tm[\"metric_path\"] = p\n\t\t\t\/\/ m[\"intervals\"] = dunno how to do a tuple here\n\t\t\tm[\"isLeaf\"] = leafs[i]\n\t\t\tmetrics = append(metrics, m)\n\t\t}\n\n\t\twr.Header().Set(\"Content-Type\", \"application\/pickle\")\n\t\tpEnc := pickle.NewEncoder(wr)\n\t\tpEnc.Encode(metrics)\n\t}\n\tlog.Info(\"find: %d hits for %s\", len(files), glob)\n\treturn\n}\n\nfunc fetchHandler(wr http.ResponseWriter, req *http.Request) {\n\/\/\tGET \/render\/?target=general.me.1.percent_time_active.pfnredis&format=pickle&from=1396008021&until=1396022421 HTTP\/1.1\n\/\/\thttp:\/\/localhost:8080\/render\/?target=testmetric&format=json&from=1395961200&until=1395961800\n\treq.ParseForm()\n\tmetric := req.FormValue(\"target\")\n\tformat := req.FormValue(\"format\")\n\tfrom := req.FormValue(\"from\")\n\tuntil := req.FormValue(\"until\")\n\n\tif format != \"json\" && format != \"pickle\" {\n\t\tlog.Warn(\"dropping invalid uri (format=%s): %s\",\n\t\t\t\tformat, req.URL.RequestURI())\n\t\thttp.Error(wr, \"Bad request (unsupported format)\",\n\t\t\t\thttp.StatusBadRequest)\n\t\treturn\n\t}\n\n\tpath := config.WhisperData + \"\/\" + strings.Replace(metric, \".\", \"\/\", -1) + \".wsp\"\n\tw, err := whisper.Open(path)\n\tif err != nil {\n\t\t\/\/ the FE\/carbonzipper often requests metrics we don't have\n\t\tlog.Debug(\"failed to %s\", err)\n\t\thttp.Error(wr, \"Metric not found\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\ti, err := strconv.Atoi(from)\n\tif err != nil {\n\t\tlog.Debug(\"fromTime (%s) invalid: %s (in %s)\",\n\t\t\t\tfrom, err, req.URL.RequestURI)\n\t\tif w != nil {\n\t\t\tw.Close()\n\t\t}\n\t\tw = nil\n\t}\n\tfromTime := int(i)\n\ti, err = strconv.Atoi(until)\n\tif err != nil {\n\t\tlog.Debug(\"untilTime (%s) invalid: %s (in %s)\",\n\t\t\t\tfrom, err, req.URL.RequestURI)\n\t\tif w != nil {\n\t\t\tw.Close()\n\t\t}\n\t\tw = nil\n\t}\n\tuntilTime := int(i)\n\n\tif (w != nil) {\n\t\tdefer w.Close()\n\t} else {\n\t\thttp.Error(wr, \"Bad request (invalid from\/until time)\",\n\t\t\t\thttp.StatusBadRequest)\n\t\treturn\n\t}\n\n\tpoints, err := w.Fetch(fromTime, untilTime)\n\tif err != nil {\n\t\tlog.Error(\"failed to fetch points from %s: %s\", path, err)\n\t\thttp.Error(wr, \"Fetching data points failed\",\n\t\t\t\thttp.StatusInternalServerError)\n\t\treturn\n\t}\n\tvalues := points.Values()\n\n\tif format == \"json\" {\n\t\tresponse := WhisperFetchResponse {\n\t\t\tName:\t\tmetric,\n\t\t\tStartTime:\tpoints.FromTime(),\n\t\t\tStopTime:\tpoints.UntilTime(),\n\t\t\tStepTime:\tpoints.Step(),\n\t\t\tValues:\t\tmake([]float64, len(values)),\n\t\t\tIsAbsent:\tmake([]bool, len(values)),\n\t\t}\n\n\t\tfor i, p := range values {\n\t\t\tif math.IsNaN(p) {\n\t\t\t\tresponse.Values[i] = 0\n\t\t\t\tresponse.IsAbsent[i] = true\n\t\t\t} else {\n\t\t\t\tresponse.Values[i] = p\n\t\t\t\tresponse.IsAbsent[i] = false\n\t\t\t}\n\t\t}\n\n\t\tb, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\tlog.Error(\"failed to create JSON data for %s: %s\", path, err)\n\t\t\treturn\n\t\t}\n\t\twr.Write(b)\n\t} else if format == \"pickle\" {\n\t\t\/\/[{'start': 1396271100, 'step': 60, 'name': 'metric',\n\t\t\/\/'values': [9.0, 19.0, None], 'end': 1396273140}\n\t\tvar metrics []map[string]interface{}\n\t\tvar m map[string]interface{}\n\n\t\tm = make(map[string]interface{})\n\t\tm[\"start\"] = points.FromTime()\n\t\tm[\"step\"] = points.Step()\n\t\tm[\"end\"] = points.UntilTime()\n\t\tm[\"name\"] = metric\n\n\t\tmv := make([]interface{}, len(values))\n\t\tfor i, p := range values {\n\t\t\tif math.IsNaN(p) {\n\t\t\t\tmv[i] = nil\n\t\t\t} else {\n\t\t\t\tmv[i] = p\n\t\t\t}\n\t\t}\n\n\t\tm[\"values\"] = mv\n\t\tmetrics = append(metrics, m)\n\n\t\twr.Header().Set(\"Content-Type\", \"application\/pickle\")\n\t\tpEnc := pickle.NewEncoder(wr)\n\t\tpEnc.Encode(metrics)\n\t}\n\n\tlog.Info(\"served %d points for %s\", len(values), metric)\n\treturn\n}\n\nfunc main() {\n\tlog = NewOutputLogger(INFO);\n\n\thttp.HandleFunc(\"\/metrics\/find\/\", findHandler)\n\thttp.HandleFunc(\"\/render\/\", fetchHandler)\n\n\tlog.Info(\"listening on port 8080\")\n\terr := http.ListenAndServe(\":8080\", nil)\n\tif err != nil {\n\t\tlog.Fatal(\"%s\", err)\n\t}\n\tlog.Info(\"stopped\")\n}\n\n\/\/ Simple wrapper to enable\/disable lots of spam\ntype Logger interface {\n\tInfo(format string, a ...interface{})\n\tWarn(format string, a ...interface{})\n\tError(format string, a ...interface{})\n\tFatal(format string, a ...interface{})\n\tDebug(format string, a ...interface{})\n}\n\ntype LogLevel int\nconst (\n\tFATAL LogLevel = 0\n\tERROR LogLevel = 1\n\tWARN LogLevel = 2\n\tINFO LogLevel = 3\n\tDEBUG LogLevel = 4\n)\ntype outputLogger struct {\n\tlevel LogLevel\n\tout *os.File\n\terr *os.File\n}\n\nfunc NewOutputLogger(level LogLevel) *outputLogger {\n\tr := new(outputLogger)\n\tr.level = level;\n\tr.out = os.Stdout;\n\tr.err = os.Stderr;\n\n\treturn r\n}\n\nfunc (l *outputLogger) Debug(format string, a ...interface{}) {\n\tif l.level >= DEBUG {\n\t\tl.out.WriteString(fmt.Sprintf(\"DEBUG: \" + format + \"\\n\", a...))\n\t}\n}\n\nfunc (l *outputLogger) Info(format string, a ...interface{}) {\n\tif l.level >= INFO {\n\t\tl.out.WriteString(fmt.Sprintf(\"INFO: \" + format + \"\\n\", a...))\n\t}\n}\n\nfunc (l *outputLogger) Warn(format string, a ...interface{}) {\n\tif l.level >= WARN {\n\t\tl.out.WriteString(fmt.Sprintf(\"WARN: \" + format + \"\\n\", a...))\n\t}\n}\n\nfunc (l *outputLogger) Error(format string, a ...interface{}) {\n\tif l.level >= ERROR {\n\t\tl.err.WriteString(fmt.Sprintf(\"ERROR: \" + format + \"\\n\", a...))\n\t}\n}\n\nfunc (l *outputLogger) Fatal(format string, a ...interface{}) {\n\tif l.level >= FATAL {\n\t\tl.err.WriteString(fmt.Sprintf(\"ERROR: \" + format + \"\\n\", a...))\n\t}\n\tos.Exit(1)\n}\n\n<|endoftext|>"} {"text":"<commit_before>package main\n\n\/\/ http:\/\/tsdb.kronos.d:4242\/#start=2016\/02\/09-08:00:00&end=2016\/02\/09-11:00:00&m=mimmax:php.timers.mongo.p95&o=&yrange=%5B0:%5D&wxh=1420x608&style=linespoint\n\/\/ http:\/\/tsdb.kronos.d:4242\/api\/query\/?start=2016\/02\/09-08:00:00&end=2016\/02\/09-11:00:00&m=mimmax:php.timers.mongo.p95\n\n\/\/ http:\/\/tsdb.kronos.d:4242\/#start=2016\/02\/03-09:23:00&end=2016\/02\/10-09:23:58&m=mimmax:gauges.do.sphinx.lag&o=&yrange=%5B0:%5D&wxh=1380x636&style=linespoint\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst tsdbHost = \"http:\/\/tsdb.kronos.d:4242\"\n\nconst tpl = `<!DOCTYPE html>\n<html>\n\t<head>\n\t\t<title>chart<\/title>\n\t\t<script type=\"text\/javascript\" src=\"http:\/\/www.amcharts.com\/lib\/3\/amcharts.js\"><\/script>\n\t\t<script type=\"text\/javascript\" src=\"http:\/\/www.amcharts.com\/lib\/3\/serial.js\"><\/script>\n\t\t<script type=\"text\/javascript\">\n\t\t\tAmCharts.makeChart(\"chartdiv\", {\n\t\t\t\t\"type\": \"serial\",\n\t\t\t\t\"categoryField\": \"date\",\n\t\t\t\t\"dataDateFormat\": \"YYYY-MM-DD HH:NN:SS\",\n\t\t\t\t\"precision\": 4,\n\t\t\t\t\"categoryAxis\": { \"minPeriod\": \"ss\", \"parseDates\": true },\n\t\t\t\t\"chartCursor\": {\"enabled\": true, \"categoryBalloonDateFormat\": \"JJ:NN:SS\" },\n\t\t\t\t\"chartScrollbar\": { \"enabled\": true },\n\t\t\t\t\"trendLines\": [],\n\t\t\t\t\"graphs\": [\n\t\t\t\t\t{\"id\": \"value\", \"title\": \"Value\", \"valueField\": \"val\"},\n\t\t\t\t\t{\"id\": \"average\", \"title\": \"Average\", \"valueField\": \"avg\"},\n\t\t\t\t\t{\"id\": \"sigma\", \"title\": \"Sigma\", \"valueField\": \"sigma\", \"valueAxis\": \"ValueAxis-2\"}\n\t\t\t\t],\n\t\t\t\t\"guides\": %s,\n\t\t\t\t\"valueAxes\": [{\n\t\t\t\t\t\"id\": \"ValueAxis-1\",\n\t\t\t\t\t\"title\": \"Values\"\n\t\t\t\t},{\n\t\t\t\t\t\"id\": \"ValueAxis-2\",\n\t\t\t\t\t\"position\": \"right\",\n\t\t\t\t\t\"title\": \"Sigma\"\n\t\t\t\t}],\n\t\t\t\t\"allLabels\": [],\n\t\t\t\t\"balloon\": {},\n\t\t\t\t\"legend\": { \"enabled\": true, \"useGraphSettings\": true },\n\t\t\t\t\"titles\": [],\n\t\t\t\t\"dataProvider\": %s\n\t\t\t});\n\t\t<\/script>\n\t<\/head>\n\t<body>\n\t\t<div id=\"chartdiv\" style=\"width: 100%%; height: 400px; background-color: #FFFFFF;\" ><\/div>\n\t<\/body>\n<\/html>\n`\n\ntype tsdbResponse struct {\n\tMetric string `json:\"metric\"`\n\tTags map[string]string `json:\"Tags\"`\n\tAggregateTags []string `json:\"aggregateTags\"`\n\tDataPoints map[string]float64 `json:\"dps\"`\n}\n\ntype DataPoint struct {\n\tTime time.Time\n\tValue float64\n}\n\ntype DataPoints []DataPoint\n\nfunc (dps DataPoints) Len() int { return len(dps) }\nfunc (dps DataPoints) Swap(i, j int) { dps[i], dps[j] = dps[j], dps[i] }\nfunc (dps DataPoints) Less(i, j int) bool { return dps[i].Time.Before(dps[j].Time) }\n\ntype chartData struct {\n\tDate string `json:\"date\"`\n\tValue float64 `json:\"val\"`\n\tAverage float64 `json:\"avg\"`\n\tSigma float64 `json:\"sigma\"`\n}\n\n\/*\n\t{\n\t\t\"date\": \"2016-02-09 09:20:00\",\n\t\t\"id\": \"anomaly-01\",\n\t\t\"lineAlpha\": 0.5,\n\t\t\"lineColor\": \"#FF0000\",\n\t\t\"lineThickness\": 5\n\t}\n*\/\ntype anomalyData struct {\n\tID string `json:\"id\"`\n\tDate string `json:\"date\",omitempty`\n\tAlpha float64 `json:\"lineAlpha\",omitempty`\n\tColor string `json:\"lineColor\"`\n\tThickness int64 `json:\"lineThickness\"`\n\tValue int64 `json:\"value\",omitempty`\n\tAxis string `json:\"valueAxis\",omitempty`\n}\n\nfunc main() {\n\tport := \"80\"\n\tif os.Getenv(\"PORT\") != \"\" {\n\t\tport = os.Getenv(\"PORT\")\n\t}\n\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tt := time.Now()\n\n\t\tstart := r.URL.Query().Get(\"start\")\n\t\tend := r.URL.Query().Get(\"end\")\n\t\tm := r.URL.Query().Get(\"m\")\n\t\tperiodRaw := r.URL.Query().Get(\"period\")\n\n\t\tif m == \"\" {\n\t\t\tfmt.Fprintln(w, \"Берём урл от tsdb, заменяем хост и порт на текущий, вместо '\/#start' делаем '\/?start'\")\n\t\t\treturn\n\t\t}\n\n\t\tif periodRaw == \"\" {\n\t\t\tperiodRaw = \"120\"\n\t\t}\n\t\tperiod, err := strconv.Atoi(periodRaw)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"Failed to atoi: %v\", err), http.StatusInternalServerError)\n\t\t}\n\n\t\tchart, anomalyes, err := detectAnomalyes(start, end, m, period)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"Failed to detect: %v\", err), http.StatusInternalServerError)\n\t\t}\n\n\t\tanomalyes = append(anomalyes, anomalyData{\n\t\t\tID: \"guide-1\",\n\t\t\tColor: \"#CC0000\",\n\t\t\tThickness: 3,\n\t\t\tValue: 1,\n\t\t\tAxis: \"ValueAxis-2\",\n\t\t})\n\n\t\tanomalyes = append(anomalyes, anomalyData{\n\t\t\tID: \"guide-1\",\n\t\t\tColor: \"#CC0000\",\n\t\t\tThickness: 3,\n\t\t\tValue: -1,\n\t\t\tAxis: \"ValueAxis-2\",\n\t\t})\n\n\t\tchartBytes, err := json.Marshal(chart)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"Failed to Marshal: %v\", err), http.StatusInternalServerError)\n\t\t}\n\n\t\tanomalyesBytes, err := json.Marshal(anomalyes)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"Failed to Marshal: %v\", err), http.StatusInternalServerError)\n\t\t}\n\n\t\tfmt.Fprintf(w, tpl, string(anomalyesBytes), string(chartBytes))\n\n\t\tlog.Printf(\"%s %q %v\\n\", r.Method, r.URL.String(), time.Since(t))\n\t})\n\tlog.Fatal(http.ListenAndServe(\":\"+port, nil))\n}\n\nfunc detectAnomalyes(start, end, m string, period int) ([]chartData, []anomalyData, error) {\n\tpath := url.Values{}\n\t\/\/ path.Add(\"start\", \"2016\/02\/09-08:00:00\")\n\t\/\/ path.Add(\"end\", \"2016\/02\/09-11:00:00\")\n\t\/\/ path.Add(\"m\", \"mimmax:php.timers.mongo.p95\")\n\tpath.Add(\"start\", start)\n\tpath.Add(\"end\", end)\n\tpath.Add(\"m\", m)\n\n\tresp, err := http.Get(fmt.Sprintf(\"%s\/api\/query\/?%s\", tsdbHost, path.Encode()))\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to get data from TSDB: %v\", err)\n\t}\n\n\tvar data []tsdbResponse\n\tif err := json.NewDecoder(resp.Body).Decode(&data); err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to decode TSDB response: %v\", err)\n\t}\n\n\tdps := make(DataPoints, 0)\n\tfor ts, val := range data[0].DataPoints {\n\t\ttimestamp, err := strconv.Atoi(ts)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdp := DataPoint{\n\t\t\tTime: time.Unix(int64(timestamp), 0),\n\t\t\tValue: val,\n\t\t}\n\t\tdps = append(dps, dp)\n\t}\n\n\tsort.Sort(dps)\n\n\tvar movingAverage float64\n\tvar sigma float64\n\tvals := make([]float64, period)\n\n\tchart := make([]chartData, 0)\n\tanomalyes := make([]anomalyData, 0)\n\n\tloc, _ := time.LoadLocation(\"Asia\/Novosibirsk\")\n\tfor idx, cdp := range dps {\n\t\tif idx <= period {\n\t\t\tcontinue\n\t\t}\n\t\tpoints := dps[idx-period : idx]\n\t\tfor i, dp := range points {\n\t\t\tvals[i] = dp.Value\n\t\t}\n\n\t\tmovingAverage = average(vals)\n\t\tsigma = test3Sigma(vals)\n\t\tanomalyIdx := 0\n\t\tif sigma > 1.2 {\n\t\t\tanomalyIdx++\n\t\t\tanomalyes = append(anomalyes, anomalyData{\n\t\t\t\tID: fmt.Sprintf(\"anomaly-%d\", anomalyIdx),\n\t\t\t\tDate: cdp.Time.In(loc).Format(\"2006-01-02 15:04:05\"),\n\t\t\t\tAlpha: 0.5,\n\t\t\t\tColor: \"#FF0000\",\n\t\t\t\tThickness: 5,\n\t\t\t})\n\t\t}\n\t\t\/\/fmt.Printf(\"%v,%v,%v,%v\\n\", cdp.Time.Format(\"2006-01-02 15:04:05\"), cdp.Value, movingAverage, sigma)\n\n\t\tchart = append(chart, chartData{\n\t\t\tcdp.Time.In(loc).Format(\"2006-01-02 15:04:05\"),\n\t\t\tcdp.Value,\n\t\t\tmovingAverage,\n\t\t\tsigma,\n\t\t})\n\t}\n\n\treturn chart, anomalyes, nil\n}\n\nfunc average(vals []float64) float64 {\n\tvar sum float64\n\tfor i := 0; i < len(vals); i++ {\n\t\tsum += vals[i]\n\t}\n\treturn sum \/ float64(len(vals))\n}\n\n\/\/ Get the standard deviation of float64 values, with\n\/\/ an input average.\nfunc stddev(vals []float64, avg float64) float64 {\n\tvar sum float64\n\tfor i := 0; i < len(vals); i++ {\n\t\tdis := vals[i] - avg\n\t\tsum += dis * dis\n\t}\n\treturn math.Sqrt(sum \/ float64(len(vals)))\n}\n\n\/\/ Calculate metric score with 3-sigma rule.\n\/\/\n\/\/ What's the 3-sigma rule?\n\/\/\n\/\/\tstates that nearly all values (99.7%) lie within the 3 standard deviations\n\/\/\tof the mean in a normal distribution.\n\/\/\n\/\/ Also like z-score, defined as\n\/\/\n\/\/\t(val - mean) \/ stddev\n\/\/\n\/\/ And we name the below as metric score, yet 1\/3 of z-score\n\/\/\n\/\/\t(val - mean) \/ (3 * stddev)\n\/\/\n\/\/ The score has\n\/\/\n\/\/\tscore > 0 => values is trending up\n\/\/\tscore < 0 => values is trending down\n\/\/\tscore > 1 => values is anomalously trending up\n\/\/\tscore < -1 => values is anomalously trending down\n\/\/\n\/\/ The following function will set the metric score and also the average.\n\/\/\nfunc test3Sigma(vals []float64) float64 {\n\tif len(vals) == 0 {\n\t\treturn 0\n\t}\n\n\t\/\/ Values average and standard deviation.\n\tavg := average(vals)\n\tstd := stddev(vals, avg)\n\tlast := average(vals[len(vals)-3:])\n\n\tif std == 0 {\n\t\tswitch {\n\t\tcase last == avg:\n\t\t\treturn 0\n\t\tcase last > avg:\n\t\t\treturn 1\n\t\tcase last < avg:\n\t\t\treturn -1\n\t\t}\n\t\treturn 0\n\t}\n\t\/\/ 3-sigma\n\treturn math.Abs(last-avg) \/ (3 * std)\n}\n<commit_msg>cleanup<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst tsdbHost = \"http:\/\/tsdb.kronos.d:4242\"\n\nconst tpl = `<!DOCTYPE html>\n<html>\n\t<head>\n\t\t<title>chart<\/title>\n\t\t<script type=\"text\/javascript\" src=\"http:\/\/www.amcharts.com\/lib\/3\/amcharts.js\"><\/script>\n\t\t<script type=\"text\/javascript\" src=\"http:\/\/www.amcharts.com\/lib\/3\/serial.js\"><\/script>\n\t\t<script type=\"text\/javascript\">\n\t\t\tAmCharts.makeChart(\"chartdiv\", {\n\t\t\t\t\"type\": \"serial\",\n\t\t\t\t\"categoryField\": \"date\",\n\t\t\t\t\"dataDateFormat\": \"YYYY-MM-DD HH:NN:SS\",\n\t\t\t\t\"precision\": 4,\n\t\t\t\t\"categoryAxis\": { \"minPeriod\": \"ss\", \"parseDates\": true },\n\t\t\t\t\"chartCursor\": {\"enabled\": true, \"categoryBalloonDateFormat\": \"JJ:NN:SS\" },\n\t\t\t\t\"chartScrollbar\": { \"enabled\": true },\n\t\t\t\t\"trendLines\": [],\n\t\t\t\t\"graphs\": [\n\t\t\t\t\t{\"id\": \"value\", \"title\": \"Value\", \"valueField\": \"val\"},\n\t\t\t\t\t{\"id\": \"average\", \"title\": \"Average\", \"valueField\": \"avg\"},\n\t\t\t\t\t{\"id\": \"sigma\", \"title\": \"Sigma\", \"valueField\": \"sigma\", \"valueAxis\": \"ValueAxis-2\"}\n\t\t\t\t],\n\t\t\t\t\"guides\": %s,\n\t\t\t\t\"valueAxes\": [{\n\t\t\t\t\t\"id\": \"ValueAxis-1\",\n\t\t\t\t\t\"title\": \"Values\"\n\t\t\t\t},{\n\t\t\t\t\t\"id\": \"ValueAxis-2\",\n\t\t\t\t\t\"position\": \"right\",\n\t\t\t\t\t\"title\": \"Sigma\"\n\t\t\t\t}],\n\t\t\t\t\"allLabels\": [],\n\t\t\t\t\"balloon\": {},\n\t\t\t\t\"legend\": { \"enabled\": true, \"useGraphSettings\": true },\n\t\t\t\t\"titles\": [],\n\t\t\t\t\"dataProvider\": %s\n\t\t\t});\n\t\t<\/script>\n\t<\/head>\n\t<body>\n\t\t<div id=\"chartdiv\" style=\"width: 100%%; height: 400px; background-color: #FFFFFF;\" ><\/div>\n\t<\/body>\n<\/html>\n`\n\ntype tsdbResponse struct {\n\tMetric string `json:\"metric\"`\n\tTags map[string]string `json:\"Tags\"`\n\tAggregateTags []string `json:\"aggregateTags\"`\n\tDataPoints map[string]float64 `json:\"dps\"`\n}\n\ntype DataPoint struct {\n\tTime time.Time\n\tValue float64\n}\n\ntype DataPoints []DataPoint\n\nfunc (dps DataPoints) Len() int { return len(dps) }\nfunc (dps DataPoints) Swap(i, j int) { dps[i], dps[j] = dps[j], dps[i] }\nfunc (dps DataPoints) Less(i, j int) bool { return dps[i].Time.Before(dps[j].Time) }\n\ntype chartData struct {\n\tDate string `json:\"date\"`\n\tValue float64 `json:\"val\"`\n\tAverage float64 `json:\"avg\"`\n\tSigma float64 `json:\"sigma\"`\n}\n\n\/*\n\t{\n\t\t\"date\": \"2016-02-09 09:20:00\",\n\t\t\"id\": \"anomaly-01\",\n\t\t\"lineAlpha\": 0.5,\n\t\t\"lineColor\": \"#FF0000\",\n\t\t\"lineThickness\": 5\n\t}\n*\/\ntype anomalyData struct {\n\tID string `json:\"id\"`\n\tDate string `json:\"date\",omitempty`\n\tAlpha float64 `json:\"lineAlpha\",omitempty`\n\tColor string `json:\"lineColor\"`\n\tThickness int64 `json:\"lineThickness\"`\n\tValue int64 `json:\"value\",omitempty`\n\tAxis string `json:\"valueAxis\",omitempty`\n}\n\nfunc main() {\n\tport := \"80\"\n\tif os.Getenv(\"PORT\") != \"\" {\n\t\tport = os.Getenv(\"PORT\")\n\t}\n\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tt := time.Now()\n\n\t\tstart := r.URL.Query().Get(\"start\")\n\t\tend := r.URL.Query().Get(\"end\")\n\t\tm := r.URL.Query().Get(\"m\")\n\t\tperiodRaw := r.URL.Query().Get(\"period\")\n\n\t\tif m == \"\" {\n\t\t\tfmt.Fprintln(w, \"Берём урл от tsdb, заменяем хост и порт на текущий, вместо '\/#start' делаем '\/?start'\")\n\t\t\treturn\n\t\t}\n\n\t\tif periodRaw == \"\" {\n\t\t\tperiodRaw = \"120\"\n\t\t}\n\t\tperiod, err := strconv.Atoi(periodRaw)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"Failed to atoi: %v\", err), http.StatusInternalServerError)\n\t\t}\n\n\t\tchart, anomalyes, err := detectAnomalyes(start, end, m, period)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"Failed to detect: %v\", err), http.StatusInternalServerError)\n\t\t}\n\n\t\tanomalyes = append(anomalyes, anomalyData{\n\t\t\tID: \"guide-1\",\n\t\t\tColor: \"#CC0000\",\n\t\t\tThickness: 3,\n\t\t\tValue: 1,\n\t\t\tAxis: \"ValueAxis-2\",\n\t\t})\n\n\t\tanomalyes = append(anomalyes, anomalyData{\n\t\t\tID: \"guide-1\",\n\t\t\tColor: \"#CC0000\",\n\t\t\tThickness: 3,\n\t\t\tValue: -1,\n\t\t\tAxis: \"ValueAxis-2\",\n\t\t})\n\n\t\tchartBytes, err := json.Marshal(chart)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"Failed to Marshal: %v\", err), http.StatusInternalServerError)\n\t\t}\n\n\t\tanomalyesBytes, err := json.Marshal(anomalyes)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"Failed to Marshal: %v\", err), http.StatusInternalServerError)\n\t\t}\n\n\t\tfmt.Fprintf(w, tpl, string(anomalyesBytes), string(chartBytes))\n\n\t\tlog.Printf(\"%s %q %v\\n\", r.Method, r.URL.String(), time.Since(t))\n\t})\n\tlog.Fatal(http.ListenAndServe(\":\"+port, nil))\n}\n\nfunc detectAnomalyes(start, end, metric string, period int) ([]chartData, []anomalyData, error) {\n\tloc, _ := time.LoadLocation(\"Asia\/Novosibirsk\")\n\tstartTime, err := time.ParseInLocation(\"2006\/01\/02-15:04:05\", start, loc)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to parse start date: %v\", err)\n\t}\n\tendTime, err := time.ParseInLocation(\"2006\/01\/02-15:04:05\", end, loc)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to parse end date: %v\", err)\n\t}\n\n\tdps, err := getDataPoints(tsdbHost, metric, startTime, endTime)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to get data from tsdb: %v\", err)\n\t}\n\n\tvar movingAverage float64\n\tvar sigma float64\n\tvals := make([]float64, period)\n\n\tchart := make([]chartData, 0)\n\tanomalyes := make([]anomalyData, 0)\n\n\tfor idx, cdp := range dps {\n\t\tif idx <= period {\n\t\t\tcontinue\n\t\t}\n\t\tpoints := dps[idx-period : idx]\n\t\tfor i, dp := range points {\n\t\t\tvals[i] = dp.Value\n\t\t}\n\n\t\tmovingAverage = average(vals)\n\t\tsigma = test3Sigma(vals)\n\t\tanomalyIdx := 0\n\t\tif sigma > 1.2 {\n\t\t\tanomalyIdx++\n\t\t\tanomalyes = append(anomalyes, anomalyData{\n\t\t\t\tID: fmt.Sprintf(\"anomaly-%d\", anomalyIdx),\n\t\t\t\tDate: cdp.Time.In(loc).Format(\"2006-01-02 15:04:05\"),\n\t\t\t\tAlpha: 0.5,\n\t\t\t\tColor: \"#FF0000\",\n\t\t\t\tThickness: 5,\n\t\t\t})\n\t\t}\n\t\t\/\/fmt.Printf(\"%v,%v,%v,%v\\n\", cdp.Time.Format(\"2006-01-02 15:04:05\"), cdp.Value, movingAverage, sigma)\n\n\t\tchart = append(chart, chartData{\n\t\t\tcdp.Time.In(loc).Format(\"2006-01-02 15:04:05\"),\n\t\t\tcdp.Value,\n\t\t\tmovingAverage,\n\t\t\tsigma,\n\t\t})\n\t}\n\n\treturn chart, anomalyes, nil\n}\n\nfunc average(vals []float64) float64 {\n\tvar sum float64\n\tfor i := 0; i < len(vals); i++ {\n\t\tsum += vals[i]\n\t}\n\treturn sum \/ float64(len(vals))\n}\n\n\/\/ Get the standard deviation of float64 values, with\n\/\/ an input average.\nfunc stddev(vals []float64, avg float64) float64 {\n\tvar sum float64\n\tfor i := 0; i < len(vals); i++ {\n\t\tdis := vals[i] - avg\n\t\tsum += dis * dis\n\t}\n\treturn math.Sqrt(sum \/ float64(len(vals)))\n}\n\n\/\/ Calculate metric score with 3-sigma rule.\n\/\/\n\/\/ What's the 3-sigma rule?\n\/\/\n\/\/\tstates that nearly all values (99.7%) lie within the 3 standard deviations\n\/\/\tof the mean in a normal distribution.\n\/\/\n\/\/ Also like z-score, defined as\n\/\/\n\/\/\t(val - mean) \/ stddev\n\/\/\n\/\/ And we name the below as metric score, yet 1\/3 of z-score\n\/\/\n\/\/\t(val - mean) \/ (3 * stddev)\n\/\/\n\/\/ The score has\n\/\/\n\/\/\tscore > 0 => values is trending up\n\/\/\tscore < 0 => values is trending down\n\/\/\tscore > 1 => values is anomalously trending up\n\/\/\tscore < -1 => values is anomalously trending down\n\/\/\n\/\/ The following function will set the metric score and also the average.\n\/\/\nfunc test3Sigma(vals []float64) float64 {\n\tif len(vals) == 0 {\n\t\treturn 0\n\t}\n\n\t\/\/ Values average and standard deviation.\n\tavg := average(vals)\n\tstd := stddev(vals, avg)\n\tlast := average(vals[len(vals)-3:])\n\n\tif std == 0 {\n\t\tswitch {\n\t\tcase last == avg:\n\t\t\treturn 0\n\t\tcase last > avg:\n\t\t\treturn 1\n\t\tcase last < avg:\n\t\t\treturn -1\n\t\t}\n\t\treturn 0\n\t}\n\t\/\/ 3-sigma\n\treturn math.Abs(last-avg) \/ (3 * std)\n}\n\nfunc getDataPoints(host, metric string, from, to time.Time) (DataPoints, error) {\n\tpath := url.Values{}\n\tpath.Set(\"start\", from.Format(\"2006\/01\/02-15:04:05\"))\n\tpath.Set(\"end\", to.Format(\"2006\/01\/02-15:04:05\"))\n\tpath.Set(\"m\", metric)\n\n\tresponse, err := http.Get(fmt.Sprintf(\"%s\/api\/query\/?%s\", host, path.Encode()))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get data from TSDB: %v\", err)\n\t}\n\n\tvar data []tsdbResponse\n\tif err := json.NewDecoder(response.Body).Decode(&data); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to decode TSDB response: %v\", err)\n\t}\n\n\tdps := make(DataPoints, 0)\n\tfor ts, val := range data[0].DataPoints {\n\t\ttimestamp, err := strconv.Atoi(ts)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdp := DataPoint{\n\t\t\tTime: time.Unix(int64(timestamp), 0),\n\t\t\tValue: val,\n\t\t}\n\t\tdps = append(dps, dp)\n\t}\n\tsort.Sort(dps)\n\n\treturn dps, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Author: Yoshiyuki Koyanagi <moutend@gmail.com>\n\/\/ License: mIT\n\n\/\/ Package main implements mediumctl.\npackage main\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/ericaro\/frontmatter\"\n\tmedium \"github.com\/moutend\/go-medium\"\n\t\"github.com\/skratchdot\/open-golang\/open\"\n)\n\ntype token struct {\n\tApplicationID string\n\tApplicationSecret string\n\tAccessToken string\n\tExpiresAt int\n\tRefreshToken string\n}\n\nvar (\n\tversion = \"v0.3.0\"\n\trevision = \"dev\"\n\ttokenFilePath string\n)\n\nconst tokenFileName = \".mediumctl\"\n\nfunc showPostedArticleInfo(p *medium.PostedArticle) {\n\tfmt.Printf(\"Your article was successfully posted.\\n\\n\")\n\tfmt.Printf(\"title: %s\\n\", p.Title)\n\tif p.PublishStatus == \"\" {\n\t\tfmt.Println(\"publishStatus: public\")\n\t} else {\n\t\tfmt.Printf(\"publishStatus: %s\\n\", p.PublishStatus)\n\t}\n\tif len(p.Tags) > 0 {\n\t\tfmt.Printf(\"tags: %s\\n\", strings.Join(p.Tags, \" \"))\n\t}\n\tfmt.Printf(\"URL: %s\\n\", p.URL)\n\tif p.CanonicalURL != \"\" {\n\t\tfmt.Printf(\"canonicalURL: %s\\n\", p.CanonicalURL)\n\t}\n\treturn\n}\n\nfunc parseArticle(filename string) (article medium.Article, publicationNumber int, err error) {\n\ttype FrontmatterOption struct {\n\t\tTitle string `fm:\"title\"`\n\t\tContentFormat string `fm:\"contentFormat\"`\n\t\tContent string `fm:\"content\"`\n\t\tCanonicalURL string `fm:\"canonicalUrl\"`\n\t\tTags []string `fm:\"tags\"`\n\t\tPublishStatus string `fm:\"publishStatus\"`\n\t\tPublishedAt string `fm:\"publishedAt\"`\n\t\tPublicationNumber int `fm:\"publicationNumber\"`\n\t\tLicense string `fm:\"license\"`\n\t\tNotifyFollowers bool `fm:\"notifyFollowers\"`\n\t}\n\n\tvar file []byte\n\tvar fo *FrontmatterOption = &FrontmatterOption{}\n\n\tif file, err = ioutil.ReadFile(filename); err != nil {\n\t\treturn\n\t}\n\tif err = frontmatter.Unmarshal(file, fo); err != nil {\n\t\treturn\n\t}\n\tif strings.HasSuffix(filename, \"html\") || strings.HasSuffix(filename, \"htm\") {\n\t\tfo.ContentFormat = \"html\"\n\t} else {\n\t\tfo.ContentFormat = \"markdown\"\n\t}\n\n\tarticle = medium.Article{\n\t\tTitle: fo.Title,\n\t\tContentFormat: fo.ContentFormat,\n\t\tContent: fo.Content,\n\t\tCanonicalURL: fo.CanonicalURL,\n\t\tTags: fo.Tags,\n\t\tPublishStatus: fo.PublishStatus,\n\t\tPublishedAt: fo.PublishedAt,\n\t\tLicense: fo.License,\n\t\tNotifyFollowers: false,\n\t}\n\tpublicationNumber = fo.PublicationNumber\n\n\treturn\n}\nfunc getCode(clientID string, redirectURL *url.URL) (code string, err error) {\n\tlistener, err := net.Listen(\"tcp\", redirectURL.Hostname()+\":\"+redirectURL.Port())\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer listener.Close()\n\n\tresponseChann := make(chan string)\n\n\tgo http.Serve(listener, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tw.Write([]byte(`<script>window.open(\"about:blank\",\"_self\").close()<\/script>`))\n\t\tw.(http.Flusher).Flush()\n\n\t\te := req.FormValue(\"error\")\n\t\tif e != \"\" {\n\t\t\terr = fmt.Errorf(e)\n\t\t}\n\t\tresponseChann <- req.FormValue(\"code\")\n\t}))\n\n\tstateBytes := make([]byte, 88)\n\t_, err = rand.Read(stateBytes)\n\tif err != nil {\n\t\treturn\n\t}\n\tstate := fmt.Sprintf(\"%x\", stateBytes)\n\tscope := \"basicProfile,listPublications,publishPost\"\n\tquery := fmt.Sprintf(\"client_id=%s&scope=%s&state=%s&response_type=code&redirect_uri=%s\", clientID, scope, state, redirectURL)\n\turi := \"https:\/\/medium.com\/m\/oauth\/authorize?\" + query\n\tif err = open.Start(uri); err != nil {\n\t\treturn\n\t}\n\tselect {\n\tcase code = <-responseChann:\n\t\tbreak\n\tcase <-time.After(60 * time.Second):\n\t\terr = fmt.Errorf(\"timeout\")\n\t\tbreak\n\t}\n\treturn\n}\n\nfunc saveToken(clientID, clientSecret string, t *medium.Token) (err error) {\n\tb, err := json.Marshal(token{\n\t\tApplicationID: clientID,\n\t\tApplicationSecret: clientSecret,\n\t\tAccessToken: t.AccessToken,\n\t\tExpiresAt: t.ExpiresAt,\n\t\tRefreshToken: t.RefreshToken,\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\terr = ioutil.WriteFile(tokenFilePath, b, 0644)\n\treturn\n}\n\nfunc loadToken() (*token, error) {\n\tb, err := ioutil.ReadFile(tokenFilePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"API token is not set. Please run 'auth' at first\")\n\t}\n\tvar t token\n\terr = json.Unmarshal(b, &t)\n\treturn &t, err\n}\n\nfunc main() {\n\terr := run(os.Args)\n\n\tif err != nil {\n\t\tlog.New(os.Stderr, \"error: \", 0).Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\tos.Exit(0)\n}\nfunc run(args []string) (err error) {\n\tif len(args) < 2 {\n\t\treturn helpCommand(args)\n\t}\n\tu, err := user.Current()\n\tif err != nil {\n\t\treturn\n\t}\n\ttokenFilePath = filepath.Join(u.HomeDir, tokenFileName)\n\tswitch args[1] {\n\tcase \"o\":\n\t\terr = authCommand(args)\n\tcase \"oauth\":\n\t\terr = authCommand(args)\n\tcase \"r\":\n\t\terr = refreshCommand(args)\n\tcase \"refresh\":\n\t\terr = refreshCommand(args)\n\tcase \"i\":\n\t\terr = infoCommand(args)\n\tcase \"info\":\n\t\terr = infoCommand(args)\n\tcase \"p\":\n\t\terr = postCommand(args, false)\n\tcase \"publication\":\n\t\terr = postCommand(args, false)\n\tcase \"u\":\n\t\terr = postCommand(args, true)\n\tcase \"user\":\n\t\terr = postCommand(args, true)\n\tcase \"v\":\n\t\terr = versionCommand(args)\n\tcase \"version\":\n\t\terr = versionCommand(args)\n\tcase \"h\":\n\t\terr = helpCommand(args)\n\tcase \"help\":\n\t\terr = helpCommand(args)\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"%s: '%s' is not a %s subcommand.\\n\", args[0], args[1], args[0])\n\t\terr = helpCommand(args)\n\t}\n\treturn\n}\n\nfunc authCommand(args []string) (err error) {\n\tvar (\n\t\tclientIDFlag string\n\t\tclientSecretFlag string\n\t\tdebugFlag bool\n\t\tredirectURLFlag string\n\t\tredirectURL *url.URL\n\t)\n\n\tf := flag.NewFlagSet(fmt.Sprintf(\"%s %s\", args[0], args[1]), flag.ExitOnError)\n\tf.StringVar(&redirectURLFlag, \"u\", \"\", \"Redirect URL for OAuth application.\")\n\tf.StringVar(&clientIDFlag, \"i\", \"\", \"Client ID of OAuth application.\")\n\tf.StringVar(&clientSecretFlag, \"s\", \"\", \"Client secret of OAuth application.\")\n\tf.BoolVar(&debugFlag, \"debug\", false, \"Enable debug output.\")\n\tf.Parse(args[2:])\n\tif redirectURLFlag == \"\" {\n\t\treturn fmt.Errorf(\"please specify redirect URI\")\n\t}\n\tif clientIDFlag == \"\" {\n\t\treturn fmt.Errorf(\"please specify client ID\")\n\t}\n\tif clientSecretFlag == \"\" {\n\t\treturn fmt.Errorf(\"please specify client secret\")\n\t}\n\tif redirectURL, err = url.Parse(redirectURLFlag); err != nil {\n\t\treturn\n\t}\n\n\tcode, err := getCode(clientIDFlag, redirectURL)\n\tif err != nil {\n\t\treturn\n\t}\n\tc := medium.NewClient(clientIDFlag, clientSecretFlag, \"\")\n\tif debugFlag {\n\t\tc.SetLogger(log.New(os.Stdout, \"debug: \", 0))\n\t}\n\ttoken, err := c.Token(code, redirectURLFlag)\n\tif err != nil {\n\t\treturn\n\t}\n\tif err = saveToken(clientIDFlag, clientSecretFlag, token); err != nil {\n\t\treturn\n\t}\n\tfmt.Printf(\"Your API token was successfully saved in '%s'.\\n\", tokenFilePath)\n\tfmt.Println(\"Note: This file should be treated as the password and please do NOT expose it.\")\n\treturn\n}\n\nfunc refreshCommand(args []string) (err error) {\n\tvar (\n\t\tdebugFlag bool\n\t)\n\n\tf := flag.NewFlagSet(fmt.Sprintf(\"%s %s\", args[0], args[1]), flag.ExitOnError)\n\tf.BoolVar(&debugFlag, \"debug\", false, \"Enable debug output.\")\n\tf.Parse(args[2:])\n\n\tt, err := loadToken()\n\tif err != nil {\n\t\treturn\n\t}\n\tc := medium.NewClient(t.ApplicationID, t.ApplicationSecret, \"\")\n\tif debugFlag {\n\t\tc.SetLogger(log.New(os.Stdout, \"debug: \", 0))\n\t}\n\trefreshedToken, err := c.RefreshToken(t.RefreshToken)\n\tif err != nil {\n\t\treturn\n\t}\n\tif err = saveToken(t.ApplicationID, t.ApplicationSecret, refreshedToken); err != nil {\n\t\treturn\n\t}\n\tfmt.Println(\"Your API token was successfully refreshed.\")\n\treturn\n}\nfunc infoCommand(args []string) (err error) {\n\tvar (\n\t\tdebugFlag bool\n\t)\n\n\tf := flag.NewFlagSet(fmt.Sprintf(\"%s %s\", args[0], args[1]), flag.ExitOnError)\n\tf.BoolVar(&debugFlag, \"debug\", false, \"Enable debug output.\")\n\tf.Parse(args[2:])\n\n\tt, err := loadToken()\n\tif err != nil {\n\t\treturn\n\t}\n\tc := medium.NewClient(t.ApplicationID, t.ApplicationSecret, t.AccessToken)\n\tif debugFlag {\n\t\tc.SetLogger(log.New(os.Stdout, \"debug: \", 0))\n\t}\n\tu, err := c.User()\n\tif err != nil {\n\t\treturn\n\t}\n\tfmt.Printf(\"You are logged in as:\\n\\n\")\n\tfmt.Printf(\"Name: %s\\n\", u.Name)\n\tfmt.Printf(\"Username: %s\\n\", u.Username)\n\tfmt.Printf(\"URL: %s\", u.URL)\n\tfmt.Printf(\"\\n\")\n\n\tps, err := u.Publications()\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(ps) == 0 {\n\t\tfmt.Println(\"You have no publications yet.\")\n\t\treturn\n\t}\n\tfmt.Printf(\"\\nYou have publication(s) below:\\n\\n\")\n\tfor i, p := range ps {\n\t\tfmt.Printf(\"Number: %d\\n\", i)\n\t\tfmt.Printf(\"Name: %s\\n\", p.Name)\n\t\tfmt.Printf(\"Description: %s\\n\", p.Description)\n\t\tfmt.Printf(\"URL: %s\\n\\n\", p.URL)\n\t}\n\treturn\n}\n\nfunc postCommand(args []string, userFlag bool) (err error) {\n\tvar (\n\t\tdebugFlag bool\n\t)\n\n\tf := flag.NewFlagSet(fmt.Sprintf(\"%s %s\", args[0], args[1]), flag.ExitOnError)\n\tf.BoolVar(&debugFlag, \"debug\", false, \"Enable debug output.\")\n\tf.Parse(args[2:])\n\n\tarticle, publicationNumber, err := parseArticle(f.Args()[0])\n\tif err != nil {\n\t\treturn\n\t}\n\tt, err := loadToken()\n\tif err != nil {\n\t\treturn\n\t}\n\tc := medium.NewClient(t.ApplicationID, t.ApplicationSecret, t.AccessToken)\n\tif debugFlag {\n\t\tc.SetLogger(log.New(os.Stdout, \"debug: \", 0))\n\t}\n\tu, err := c.User()\n\tif err != nil {\n\t\treturn\n\t}\n\tif userFlag {\n\t\tp, err := u.Post(article)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tshowPostedArticleInfo(p)\n\t\treturn nil\n\t}\n\tps, err := u.Publications()\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(ps) == 0 {\n\t\treturn fmt.Errorf(\"you have no publications yet\")\n\t}\n\tif publicationNumber < 0 || publicationNumber > len(ps)-1 {\n\t\terr = fmt.Errorf(\"publication number '%d' is invalid\", publicationNumber)\n\t\treturn\n\t}\n\tp, err := ps[publicationNumber].Post(article)\n\tif err != nil {\n\t\treturn\n\t}\n\tshowPostedArticleInfo(p)\n\treturn\n}\n\nfunc versionCommand(args []string) (err error) {\n\tfmt.Printf(\"%s-%s\\n\", version, revision)\n\treturn\n}\n\nfunc helpCommand(args []string) (err error) {\n\tfmt.Println(`usage: mediumctl <command> [options]\n\nCommands:\n oauth, o\n Setting up API token with OAuth.\n refresh, r\n Refresh existing API token.\n info, i\n Show the information about current user and its publications.\n user, u\n Post HTML or Markdown file to current user profile.\n publication, p\n Post HTML or Markdown file to current user's publication.\n version, v\n Show version and revision information.\n help, h\n Show this message.\n\nFor more information, please see https:\/\/github.com\/moutend\/mediumctl.`)\n\treturn\n}\n<commit_msg>Refactor<commit_after>\/\/ Author: Yoshiyuki Koyanagi <moutend@gmail.com>\n\/\/ License: mIT\n\n\/\/ Package main implements mediumctl.\npackage main\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/ericaro\/frontmatter\"\n\tmedium \"github.com\/moutend\/go-medium\"\n\t\"github.com\/skratchdot\/open-golang\/open\"\n)\n\ntype token struct {\n\tApplicationID string\n\tApplicationSecret string\n\tAccessToken string\n\tExpiresAt int\n\tRefreshToken string\n}\n\nvar (\n\tversion = \"v0.3.0\"\n\trevision = \"dev\"\n\ttokenFilePath string\n)\n\nconst tokenFileName = \".mediumctl\"\n\nfunc showPostedArticleInfo(p *medium.PostedArticle) {\n\tfmt.Printf(\"Your article was successfully posted.\\n\\n\")\n\tfmt.Printf(\"title: %s\\n\", p.Title)\n\tif p.PublishStatus == \"\" {\n\t\tfmt.Println(\"publishStatus: public\")\n\t} else {\n\t\tfmt.Printf(\"publishStatus: %s\\n\", p.PublishStatus)\n\t}\n\tif len(p.Tags) > 0 {\n\t\tfmt.Printf(\"tags: %s\\n\", strings.Join(p.Tags, \" \"))\n\t}\n\tfmt.Printf(\"URL: %s\\n\", p.URL)\n\tif p.CanonicalURL != \"\" {\n\t\tfmt.Printf(\"canonicalURL: %s\\n\", p.CanonicalURL)\n\t}\n\treturn\n}\n\nfunc parseArticle(filename string) (article medium.Article, publicationNumber int, err error) {\n\ttype FrontmatterOption struct {\n\t\tTitle string `fm:\"title\"`\n\t\tContentFormat string `fm:\"contentFormat\"`\n\t\tContent string `fm:\"content\"`\n\t\tCanonicalURL string `fm:\"canonicalUrl\"`\n\t\tTags []string `fm:\"tags\"`\n\t\tPublishStatus string `fm:\"publishStatus\"`\n\t\tPublishedAt string `fm:\"publishedAt\"`\n\t\tPublicationNumber int `fm:\"publicationNumber\"`\n\t\tLicense string `fm:\"license\"`\n\t\tNotifyFollowers bool `fm:\"notifyFollowers\"`\n\t}\n\n\tvar file []byte\n\tvar fo *FrontmatterOption = &FrontmatterOption{}\n\n\tif file, err = ioutil.ReadFile(filename); err != nil {\n\t\treturn\n\t}\n\tif err = frontmatter.Unmarshal(file, fo); err != nil {\n\t\treturn\n\t}\n\tif strings.HasSuffix(filename, \"html\") || strings.HasSuffix(filename, \"htm\") {\n\t\tfo.ContentFormat = \"html\"\n\t} else {\n\t\tfo.ContentFormat = \"markdown\"\n\t}\n\n\tarticle = medium.Article{\n\t\tTitle: fo.Title,\n\t\tContentFormat: fo.ContentFormat,\n\t\tContent: fo.Content,\n\t\tCanonicalURL: fo.CanonicalURL,\n\t\tTags: fo.Tags,\n\t\tPublishStatus: fo.PublishStatus,\n\t\tPublishedAt: fo.PublishedAt,\n\t\tLicense: fo.License,\n\t\tNotifyFollowers: false,\n\t}\n\tpublicationNumber = fo.PublicationNumber\n\n\treturn\n}\nfunc getCode(clientID string, redirectURL *url.URL) (code string, err error) {\n\tlistener, err := net.Listen(\"tcp\", redirectURL.Hostname()+\":\"+redirectURL.Port())\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer listener.Close()\n\n\tresponseChann := make(chan string)\n\n\tgo http.Serve(listener, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tw.Write([]byte(`<script>window.open(\"about:blank\",\"_self\").close()<\/script>`))\n\t\tw.(http.Flusher).Flush()\n\n\t\te := req.FormValue(\"error\")\n\t\tif e != \"\" {\n\t\t\terr = fmt.Errorf(e)\n\t\t}\n\t\tresponseChann <- req.FormValue(\"code\")\n\t}))\n\n\tstateBytes := make([]byte, 88)\n\t_, err = rand.Read(stateBytes)\n\tif err != nil {\n\t\treturn\n\t}\n\tstate := fmt.Sprintf(\"%x\", stateBytes)\n\tscope := \"basicProfile,listPublications,publishPost\"\n\tquery := fmt.Sprintf(\"client_id=%s&scope=%s&state=%s&response_type=code&redirect_uri=%s\", clientID, scope, state, redirectURL)\n\turi := \"https:\/\/medium.com\/m\/oauth\/authorize?\" + query\n\tif err = open.Start(uri); err != nil {\n\t\treturn\n\t}\n\tselect {\n\tcase code = <-responseChann:\n\t\tbreak\n\tcase <-time.After(60 * time.Second):\n\t\terr = fmt.Errorf(\"timeout\")\n\t\tbreak\n\t}\n\treturn\n}\n\nfunc saveToken(clientID, clientSecret string, t *medium.Token) (err error) {\n\tb, err := json.Marshal(token{\n\t\tApplicationID: clientID,\n\t\tApplicationSecret: clientSecret,\n\t\tAccessToken: t.AccessToken,\n\t\tExpiresAt: t.ExpiresAt,\n\t\tRefreshToken: t.RefreshToken,\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\terr = ioutil.WriteFile(tokenFilePath, b, 0644)\n\treturn\n}\n\nfunc loadToken() (*token, error) {\n\tb, err := ioutil.ReadFile(tokenFilePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"API token is not set. Please run 'auth' at first\")\n\t}\n\tvar t token\n\terr = json.Unmarshal(b, &t)\n\treturn &t, err\n}\n\nfunc main() {\n\terr := run(os.Args)\n\n\tif err != nil {\n\t\tlog.New(os.Stderr, \"error: \", 0).Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\tos.Exit(0)\n}\nfunc run(args []string) (err error) {\n\tif len(args) < 2 {\n\t\treturn helpCommand(args)\n\t}\n\tu, err := user.Current()\n\tif err != nil {\n\t\treturn\n\t}\n\ttokenFilePath = filepath.Join(u.HomeDir, tokenFileName)\n\tswitch args[1] {\n\tcase \"o\":\n\t\terr = authCommand(args)\n\tcase \"oauth\":\n\t\terr = authCommand(args)\n\tcase \"r\":\n\t\terr = refreshCommand(args)\n\tcase \"refresh\":\n\t\terr = refreshCommand(args)\n\tcase \"i\":\n\t\terr = infoCommand(args)\n\tcase \"info\":\n\t\terr = infoCommand(args)\n\tcase \"p\":\n\t\terr = postCommand(args, false)\n\tcase \"publication\":\n\t\terr = postCommand(args, false)\n\tcase \"u\":\n\t\terr = postCommand(args, true)\n\tcase \"user\":\n\t\terr = postCommand(args, true)\n\tcase \"v\":\n\t\terr = versionCommand(args)\n\tcase \"version\":\n\t\terr = versionCommand(args)\n\tcase \"h\":\n\t\terr = helpCommand(args)\n\tcase \"help\":\n\t\terr = helpCommand(args)\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"%s: '%s' is not a %s subcommand.\\n\", args[0], args[1], args[0])\n\t\terr = helpCommand(args)\n\t}\n\treturn\n}\n\nfunc authCommand(args []string) (err error) {\n\tvar clientIDFlag string\n\tvar clientSecretFlag string\n\tvar debugFlag bool\n\tvar redirectURLFlag string\n\tvar redirectURL *url.URL\n\tvar code string\n\tvar token *medium.Token\n\n\tf := flag.NewFlagSet(fmt.Sprintf(\"%s %s\", args[0], args[1]), flag.ExitOnError)\n\tf.StringVar(&redirectURLFlag, \"u\", \"\", \"Redirect URL for OAuth application.\")\n\tf.StringVar(&clientIDFlag, \"i\", \"\", \"Client ID of OAuth application.\")\n\tf.StringVar(&clientSecretFlag, \"s\", \"\", \"Client secret of OAuth application.\")\n\tf.BoolVar(&debugFlag, \"debug\", false, \"Enable debug output.\")\n\tf.Parse(args[2:])\n\n\tif redirectURLFlag == \"\" {\n\t\treturn fmt.Errorf(\"please specify redirect URL\")\n\t}\n\tif clientIDFlag == \"\" {\n\t\treturn fmt.Errorf(\"please specify client ID\")\n\t}\n\tif clientSecretFlag == \"\" {\n\t\treturn fmt.Errorf(\"please specify client secret\")\n\t}\n\tif redirectURL, err = url.Parse(redirectURLFlag); err != nil {\n\t\treturn\n\t}\n\tif code, err = getCode(clientIDFlag, redirectURL); err != nil {\n\t\treturn\n\t}\n\n\tc := medium.NewClient(clientIDFlag, clientSecretFlag, \"\")\n\tif debugFlag {\n\t\tc.SetLogger(log.New(os.Stdout, \"debug: \", 0))\n\t}\n\tif token, err = c.Token(code, redirectURLFlag); err != nil {\n\t\treturn\n\t}\n\tif err = saveToken(clientIDFlag, clientSecretFlag, token); err != nil {\n\t\treturn\n\t}\n\n\tfmt.Printf(\"Your API token was successfully saved in '%s'.\\n\", tokenFilePath)\n\tfmt.Println(\"Note: This file should be treated as the password and please do NOT expose it.\")\n\n\treturn\n}\n\nfunc refreshCommand(args []string) (err error) {\n\tvar (\n\t\tdebugFlag bool\n\t)\n\n\tf := flag.NewFlagSet(fmt.Sprintf(\"%s %s\", args[0], args[1]), flag.ExitOnError)\n\tf.BoolVar(&debugFlag, \"debug\", false, \"Enable debug output.\")\n\tf.Parse(args[2:])\n\n\tt, err := loadToken()\n\tif err != nil {\n\t\treturn\n\t}\n\tc := medium.NewClient(t.ApplicationID, t.ApplicationSecret, \"\")\n\tif debugFlag {\n\t\tc.SetLogger(log.New(os.Stdout, \"debug: \", 0))\n\t}\n\trefreshedToken, err := c.RefreshToken(t.RefreshToken)\n\tif err != nil {\n\t\treturn\n\t}\n\tif err = saveToken(t.ApplicationID, t.ApplicationSecret, refreshedToken); err != nil {\n\t\treturn\n\t}\n\tfmt.Println(\"Your API token was successfully refreshed.\")\n\treturn\n}\nfunc infoCommand(args []string) (err error) {\n\tvar (\n\t\tdebugFlag bool\n\t)\n\n\tf := flag.NewFlagSet(fmt.Sprintf(\"%s %s\", args[0], args[1]), flag.ExitOnError)\n\tf.BoolVar(&debugFlag, \"debug\", false, \"Enable debug output.\")\n\tf.Parse(args[2:])\n\n\tt, err := loadToken()\n\tif err != nil {\n\t\treturn\n\t}\n\tc := medium.NewClient(t.ApplicationID, t.ApplicationSecret, t.AccessToken)\n\tif debugFlag {\n\t\tc.SetLogger(log.New(os.Stdout, \"debug: \", 0))\n\t}\n\tu, err := c.User()\n\tif err != nil {\n\t\treturn\n\t}\n\tfmt.Printf(\"You are logged in as:\\n\\n\")\n\tfmt.Printf(\"Name: %s\\n\", u.Name)\n\tfmt.Printf(\"Username: %s\\n\", u.Username)\n\tfmt.Printf(\"URL: %s\", u.URL)\n\tfmt.Printf(\"\\n\")\n\n\tps, err := u.Publications()\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(ps) == 0 {\n\t\tfmt.Println(\"You have no publications yet.\")\n\t\treturn\n\t}\n\tfmt.Printf(\"\\nYou have publication(s) below:\\n\\n\")\n\tfor i, p := range ps {\n\t\tfmt.Printf(\"Number: %d\\n\", i)\n\t\tfmt.Printf(\"Name: %s\\n\", p.Name)\n\t\tfmt.Printf(\"Description: %s\\n\", p.Description)\n\t\tfmt.Printf(\"URL: %s\\n\\n\", p.URL)\n\t}\n\treturn\n}\n\nfunc postCommand(args []string, userFlag bool) (err error) {\n\tvar (\n\t\tdebugFlag bool\n\t)\n\n\tf := flag.NewFlagSet(fmt.Sprintf(\"%s %s\", args[0], args[1]), flag.ExitOnError)\n\tf.BoolVar(&debugFlag, \"debug\", false, \"Enable debug output.\")\n\tf.Parse(args[2:])\n\n\tarticle, publicationNumber, err := parseArticle(f.Args()[0])\n\tif err != nil {\n\t\treturn\n\t}\n\tt, err := loadToken()\n\tif err != nil {\n\t\treturn\n\t}\n\tc := medium.NewClient(t.ApplicationID, t.ApplicationSecret, t.AccessToken)\n\tif debugFlag {\n\t\tc.SetLogger(log.New(os.Stdout, \"debug: \", 0))\n\t}\n\tu, err := c.User()\n\tif err != nil {\n\t\treturn\n\t}\n\tif userFlag {\n\t\tp, err := u.Post(article)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tshowPostedArticleInfo(p)\n\t\treturn nil\n\t}\n\tps, err := u.Publications()\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(ps) == 0 {\n\t\treturn fmt.Errorf(\"you have no publications yet\")\n\t}\n\tif publicationNumber < 0 || publicationNumber > len(ps)-1 {\n\t\terr = fmt.Errorf(\"publication number '%d' is invalid\", publicationNumber)\n\t\treturn\n\t}\n\tp, err := ps[publicationNumber].Post(article)\n\tif err != nil {\n\t\treturn\n\t}\n\tshowPostedArticleInfo(p)\n\treturn\n}\n\nfunc versionCommand(args []string) (err error) {\n\tfmt.Printf(\"%s-%s\\n\", version, revision)\n\treturn\n}\n\nfunc helpCommand(args []string) (err error) {\n\tfmt.Println(`usage: mediumctl <command> [options]\n\nCommands:\n oauth, o\n Setting up API token with OAuth.\n refresh, r\n Refresh existing API token.\n info, i\n Show the information about current user and its publications.\n user, u\n Post HTML or Markdown file to current user profile.\n publication, p\n Post HTML or Markdown file to current user's publication.\n version, v\n Show version and revision information.\n help, h\n Show this message.\n\nFor more information, please see https:\/\/github.com\/moutend\/mediumctl.`)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/bigroom\/vision\/tunnel\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/nickvanw\/ircx\"\n)\n\nvar (\n\tmessages chan tunnel.MessageArgs\n\tclients map[string][]*conn\n)\n\nfunc main() {\n\tvar (\n\t\thost = \"localhost\"\n\t\tport = \"8080\"\n\t)\n\n\tmessages = make(chan tunnel.MessageArgs)\n\tclients = make(map[string][]*conn)\n\n\tgo tunnel.NewRPCServer(messages, host, port)\n\tgo messageLoop()\n\n\thttp.HandleFunc(\"\/\", dispatchHandler)\n\n\tlog.Println(http.ListenAndServe(\"localhost:6060\", nil))\n}\n\nfunc messageLoop() {\n\tfor {\n\t\tlog.Println(\"Waiting on message...\")\n\t\tm := <-messages\n\t\tlog.Println(\"Got message\")\n\t\tlog.Printf(\"Sending to channel with key: %v\", m.Key())\n\n\t\tfor _, u := range clients[m.Key()] {\n\t\t\tfmt.Println(\"Writing message\")\n\t\t\terr := u.c.WriteJSON(m)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"error (sending message):\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar upgrader = websocket.Upgrader{\n\tCheckOrigin: func(r *http.Request) bool { return true },\n}\n\nfunc dispatchHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Add(\"Access-Control-Allow-Methods\", \"PUT\")\n\tw.Header().Add(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\n\tc, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.Println(\"couldn't upgrade:\", err)\n\t\treturn\n\t}\n\n\tdefer c.Close()\n\n\tuser := &conn{\n\t\tc: c,\n\t}\n\n\tfor {\n\t\tvar a action\n\t\terr := user.c.ReadJSON(&a)\n\t\tif err != nil {\n\t\t\tlog.Println(\"error reading:\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif a.Name == \"SET\" {\n\t\t\tlog.Println(\"User joined channel\", a.Message)\n\t\t\tclients[a.Message] = append(clients[a.Message], user)\n\t\t} else if a.Name == \"SEND\" {\n\t\t\tlog.Printf(\"Sending message '%s' to channel '%s'\", a.Message, a.Channel)\n\t\t}\n\n\t}\n}\n\ntype conn struct {\n\tc *websocket.Conn\n\tirc *ircx.Bot\n}\n\ntype action struct {\n\tName string `json:\"name\"`\n\tMessage string `json:\"message\"`\n\tChannel string `json:\"channel\"`\n}\n<commit_msg>Add zombie IRC bots sending input<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\n\t\"github.com\/bigroom\/vision\/tunnel\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/nickvanw\/ircx\"\n\t\"github.com\/sorcix\/irc\"\n)\n\nvar (\n\tmessages chan tunnel.MessageArgs\n\tclients map[string][]*conn\n)\n\nfunc main() {\n\tvar (\n\t\thost = \"localhost\"\n\t\tport = \"8080\"\n\t)\n\n\tmessages = make(chan tunnel.MessageArgs)\n\tclients = make(map[string][]*conn)\n\n\tgo tunnel.NewRPCServer(messages, host, port)\n\tgo messageLoop()\n\n\thttp.HandleFunc(\"\/\", dispatchHandler)\n\n\tlog.Println(http.ListenAndServe(\"localhost:6060\", nil))\n}\n\nfunc messageLoop() {\n\tfor {\n\t\tlog.Println(\"Waiting on message...\")\n\t\tm := <-messages\n\t\tlog.Println(\"Got message\")\n\t\tlog.Printf(\"Sending to channel with key: %v\", m.Key())\n\n\t\tfor _, u := range clients[m.Key()] {\n\t\t\tfmt.Println(\"Writing message\")\n\t\t\terr := u.c.WriteJSON(m)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"error (sending message):\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar upgrader = websocket.Upgrader{\n\tCheckOrigin: func(r *http.Request) bool { return true },\n}\n\nfunc dispatchHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Add(\"Access-Control-Allow-Methods\", \"PUT\")\n\tw.Header().Add(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\n\tc, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.Println(\"couldn't upgrade:\", err)\n\t\treturn\n\t}\n\n\tdefer c.Close()\n\n\tzombie := ircx.Classic(\"chat.freenode.net:6667\", fmt.Sprintf(\"roombot%v\", rand.Intn(999)))\n\tif err != nil {\n\t\tlog.Println(\"Could not connect to IRC\")\n\t}\n\n\tif err := zombie.Connect(); err != nil {\n\t\tlog.Println(\"Unable to connect to IRC\", err)\n\t}\n\n\tuser := &conn{\n\t\tc: c,\n\t\tirc: zombie,\n\t\tmessages: make(chan string),\n\t}\n\n\tzombie.HandleFunc(irc.PING, user.pingHandler)\n\tzombie.HandleFunc(irc.RPL_WELCOME, user.registerHandler)\n\tzombie.HandleFunc(irc.JOIN, user.messageHandler)\n\n\tgo zombie.HandleLoop()\n\n\tfor {\n\t\tvar a action\n\t\terr := user.c.ReadJSON(&a)\n\t\tif err != nil {\n\t\t\tlog.Println(\"error reading:\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif a.Name == \"SET\" {\n\t\t\tlog.Println(\"User joined channel\", a.Message)\n\t\t\tclients[a.Message] = append(clients[a.Message], user)\n\t\t} else if a.Name == \"SEND\" {\n\t\t\tlog.Printf(\"Sending message '%s' to channel '%s'\", a.Message, a.Channel)\n\t\t\tuser.messages <- a.Message\n\t\t}\n\n\t}\n}\n\ntype conn struct {\n\tc *websocket.Conn\n\tirc *ircx.Bot\n\tmessages chan string\n}\n\ntype action struct {\n\tName string `json:\"name\"`\n\tMessage string `json:\"message\"`\n\tChannel string `json:\"channel\"`\n}\n\nfunc (c *conn) messageHandler(s ircx.Sender, m *irc.Message) {\n\tgo func() {\n\t\tfor {\n\t\t\tlog.Println(\"Waiting for message\")\n\t\t\tmsg := <-c.messages\n\t\t\tlog.Println(\"Got message\", msg)\n\t\t\ts.Send(&irc.Message{\n\t\t\t\tCommand: irc.PRIVMSG,\n\t\t\t\tParams: []string{\"#roomtest\"},\n\t\t\t\tTrailing: msg,\n\t\t\t})\n\t\t\tlog.Println(\"Message sent\")\n\t\t}\n\t}()\n}\n\nfunc (c *conn) registerHandler(s ircx.Sender, m *irc.Message) {\n\tlog.Println(\"Registering\")\n\ts.Send(&irc.Message{\n\t\tCommand: irc.JOIN,\n\t\tParams: []string{\"#roomtest\"},\n\t})\n}\n\nfunc (c *conn) pingHandler(s ircx.Sender, m *irc.Message) {\n\ts.Send(&irc.Message{\n\t\tCommand: irc.PONG,\n\t\tParams: m.Params,\n\t\tTrailing: m.Trailing,\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/smtp\"\n\t\"strings\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/jasonlvhit\/gocron\"\n)\n\nfunc ExampleScrape() {\n\tdoc, err := goquery.NewDocument(\"http:\/\/www.stm.info\/fr\/infos\/etat-du-service\/metro\")\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdoc.Find(\"div.content-block section.item\").Each(func(i int, s *goquery.Selection) {\n\t\tcouleur := s.Find(\"h2\").Text()\n\t\tcouleur = strings.Trim(couleur, \" \")\n\t\tcouleur = strings.Replace(couleur, \"\\n\", \"\", -1)\n\t\tetat := s.Find(\"p\").Text()\n\t\tetat = strings.Trim(etat, \" \")\n\t\tetat = strings.Replace(etat, \"\\n\", \"\", -1)\n<<<<<<< HEAD\n\t\tfmt.Printf(\"%s - %s\\n\", couleur, etat)\n\n=======\n\t\tif strings.Contains(etat, \"Interruption de service\") {\n\t\t\tcolor.Red(\"%s - %s\\n\", couleur, etat)\n\t\t\tsendmailInteruption(couleur+etat, couleur)\n\t\t} else {\n\t\t\tfmt.Printf(\"%s - %s\\n\", couleur, etat)\n\t\t}\n>>>>>>> 7c248485228c3915ad571ee0bb1fb45d07d132dc\n\t})\n}\n\nfunc sendmailInteruption(body string, couleur string) {\n\tfrom := \"stminfogo@gmail.com\"\n\tpass := \"allostminfo\"\n\tto := \"fprieur@gmail.com\"\n\n\tmsg := \"From: \" + from + \"\\n\" +\n\t\t\"To: \" + to + \"\\n\" +\n\t\t\"Subject: Interruption sur la ligne \" + couleur + \"\\n\\n\" +\n\t\tbody\n\n\terr := smtp.SendMail(\"smtp.gmail.com:587\",\n\t\tsmtp.PlainAuth(\"\", from, pass, \"smtp.gmail.com\"),\n\t\tfrom, []string{to}, []byte(msg))\n\n\tif err != nil {\n\t\tlog.Printf(\"smtp error: %s\", err)\n\t\treturn\n\t}\n\n\tlog.Print(\"mail sent\")\n}\n\nfunc main() {\n\n\tExampleScrape()\n\n\n\n\n\tgocron.Every(1).Minute().Do(ExampleScrape)\n\t<-gocron.Start()\n}\n<commit_msg>testing sendmail on interruption<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/smtp\"\n\t\"strings\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/jasonlvhit\/gocron\"\n)\n\nfunc ExampleScrape() {\n\tdoc, err := goquery.NewDocument(\"http:\/\/www.stm.info\/fr\/infos\/etat-du-service\/metro\")\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdoc.Find(\"div.content-block section.item\").Each(func(i int, s *goquery.Selection) {\n\t\tcouleur := s.Find(\"h2\").Text()\n\t\tcouleur = strings.Trim(couleur, \" \")\n\t\tcouleur = strings.Replace(couleur, \"\\n\", \"\", -1)\n\t\tetat := s.Find(\"p\").Text()\n\t\tetat = strings.Trim(etat, \" \")\n\t\tetat = strings.Replace(etat, \"\\n\", \"\", -1)\n\t\tif strings.Contains(etat, \"Interruption de service\") {\n\t\t\tcolor.Red(\"%s - %s\\n\", couleur, etat)\n\t\t\tsendmailInteruption(couleur+etat, couleur)\n\t\t} else {\n\t\t\tfmt.Printf(\"%s - %s\\n\", couleur, etat)\n\t\t}\n\t})\n}\n\nfunc sendmailInteruption(body string, couleur string) {\n\tfrom := \"stminfogo@gmail.com\"\n\tpass := \"allostminfo\"\n\tto := \"fprieur@gmail.com\"\n\n\tmsg := \"From: \" + from + \"\\n\" +\n\t\t\"To: \" + to + \"\\n\" +\n\t\t\"Subject: Interruption sur la ligne \" + couleur + \"\\n\\n\" +\n\t\tbody\n\n\terr := smtp.SendMail(\"smtp.gmail.com:587\",\n\t\tsmtp.PlainAuth(\"\", from, pass, \"smtp.gmail.com\"),\n\t\tfrom, []string{to}, []byte(msg))\n\n\tif err != nil {\n\t\tlog.Printf(\"smtp error: %s\", err)\n\t\treturn\n\t}\n\n\tlog.Print(\"mail sent\")\n}\n\nfunc main() {\n\n\tExampleScrape()\n\n\tgocron.Every(1).Minute().Do(ExampleScrape)\n\t<-gocron.Start()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n)\n\nconst (\n\tHeartbeatEvent = \"H\"\n\tQuery = \"Q\"\n\tVerdict = \"V\"\n\n\tHeaderLines = 5\n)\n\nvar (\n\tmgcDataPath string\n\tnode string\n\teventType string\n)\n\ntype Tuple struct {\n\tNode string\n\tEventType string\n\tEventTime string\n\tSeqNo int\n\tDelay string\n\tVerdict string\n}\n\ntype MgcTuples []Tuple\n\nfunc (slice MgcTuples) Len() int {\n\treturn len(slice)\n}\n\nfunc (slice MgcTuples) Less(i, j int) bool {\n\tl, erl := strconv.Atoi(slice[i].EventTime)\n\tcheck(erl)\n\tr, err := strconv.Atoi(slice[j].EventTime)\n\tcheck(err)\n\treturn l < r\n}\n\nfunc (slice MgcTuples) Swap(i, j int) {\n\tslice[i], slice[j] = slice[j], slice[i]\n}\n\nfunc check(e error) {\n\tif e != nil {\n\t\tpanic(e)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tmgcRaw, err := ioutil.ReadFile(mgcDataPath)\n\tcheck(err)\n\n\tmgcLines := strings.Split(string(mgcRaw), \"\\n\")\n\tmgcTuples := make(MgcTuples, len(mgcLines)-HeaderLines-1)\n\tidx := 0\n\n\tfor i, l := range mgcLines {\n\t\tif i < HeaderLines || len(l) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvals := strings.Fields(l)\n\t\tmgcTuples[idx].Node = strings.Trim(vals[0], \"<>:\")\n\n\t\tf := func(c rune) bool {\n\t\t\treturn !unicode.IsLetter(c) && !unicode.IsNumber(c)\n\t\t}\n\t\titems := strings.FieldsFunc(vals[4], f)\n\t\tmgcTuples[idx].EventType = items[0]\n\t\tmgcTuples[idx].EventTime = items[1]\n\t\tmgcTuples[idx].SeqNo, err = strconv.Atoi(items[2])\n\t\tcheck(err)\n\t\tif mgcTuples[idx].EventType == Query {\n\t\t\tmgcTuples[idx].Delay = items[3]\n\t\t} else if mgcTuples[idx].EventType == HeartbeatEvent {\n\t\t\tmgcTuples[idx].Delay = items[3]\n\t\t} else if mgcTuples[idx].EventType == Verdict {\n\t\t\tmgcTuples[idx].Verdict = items[3]\n\t\t}\n\n\t\tidx++\n\t}\n\n\tsort.Sort(mgcTuples)\n\toutput(mgcTuples)\n}\n\nfunc output(mgcTuples []Tuple) {\n\tfor _, t := range mgcTuples {\n\t\tif t.Node == node {\n\t\t\tif eventType == HeartbeatEvent && t.EventType == HeartbeatEvent {\n\t\t\t\tfmt.Printf(\"%s,%d,%s\\n\", t.EventTime, t.SeqNo, t.Delay)\n\t\t\t} else if eventType == Query && t.EventType == Query {\n\t\t\t\tfmt.Printf(\"%s,%s\\n\", t.EventTime, t.Delay)\n\t\t\t} else if t.EventType == Verdict {\n\t\t\t\tfmt.Printf(\"%s,%s\\n\", t.EventTime, t.Verdict)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc init() {\n\tflag.StringVar(&mgcDataPath, \"file\", \"\",\n\t\t\"mgc data file\")\n\n\tflag.StringVar(&node, \"node\", \"\",\n\t\t\"node id\")\n\n\tflag.StringVar(&eventType, \"type\", \"\",\n\t\t\"event type [(H)eartbeat, (Q)uery, (V)erdict]\")\n}\n<commit_msg>Modify output<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n)\n\nconst (\n\tHeartbeatEvent = \"H\"\n\tQuery = \"Q\"\n\tVerdict = \"V\"\n\n\tHeaderLines = 5\n)\n\nvar (\n\tmgcDataPath string\n\tnode string\n\teventType string\n)\n\ntype Tuple struct {\n\tNode string\n\tEventType string\n\tEventTime string\n\tSeqNo int\n\tDelay string\n\tVerdict string\n}\n\ntype MgcTuples []Tuple\n\nfunc (slice MgcTuples) Len() int {\n\treturn len(slice)\n}\n\nfunc (slice MgcTuples) Less(i, j int) bool {\n\tl, erl := strconv.Atoi(slice[i].EventTime)\n\tcheck(erl)\n\tr, err := strconv.Atoi(slice[j].EventTime)\n\tcheck(err)\n\treturn l < r\n}\n\nfunc (slice MgcTuples) Swap(i, j int) {\n\tslice[i], slice[j] = slice[j], slice[i]\n}\n\nfunc check(e error) {\n\tif e != nil {\n\t\tpanic(e)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tmgcRaw, err := ioutil.ReadFile(mgcDataPath)\n\tcheck(err)\n\n\tmgcLines := strings.Split(string(mgcRaw), \"\\n\")\n\tmgcTuples := make(MgcTuples, len(mgcLines)-HeaderLines-1)\n\tidx := 0\n\n\tfor i, l := range mgcLines {\n\t\tif i < HeaderLines || len(l) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvals := strings.Fields(l)\n\t\tmgcTuples[idx].Node = strings.Trim(vals[0], \"<>:\")\n\n\t\tf := func(c rune) bool {\n\t\t\treturn !unicode.IsLetter(c) && !unicode.IsNumber(c)\n\t\t}\n\t\titems := strings.FieldsFunc(vals[4], f)\n\t\tmgcTuples[idx].EventType = items[0]\n\t\tmgcTuples[idx].EventTime = items[1]\n\t\tmgcTuples[idx].SeqNo, err = strconv.Atoi(items[2])\n\t\tcheck(err)\n\t\tif mgcTuples[idx].EventType == Query {\n\t\t\tmgcTuples[idx].Delay = items[3]\n\t\t} else if mgcTuples[idx].EventType == HeartbeatEvent {\n\t\t\tmgcTuples[idx].Delay = items[3]\n\t\t} else if mgcTuples[idx].EventType == Verdict {\n\t\t\tmgcTuples[idx].Verdict = items[3]\n\t\t}\n\n\t\tidx++\n\t}\n\n\tsort.Sort(mgcTuples)\n\toutput(mgcTuples)\n}\n\nfunc output(mgcTuples []Tuple) {\n\tfor _, t := range mgcTuples {\n\t\tif t.Node == node {\n\t\t\tif eventType == HeartbeatEvent && t.EventType == HeartbeatEvent {\n\t\t\t\tfmt.Printf(\"%s,%s\\n\", t.EventTime, t.Delay)\n\t\t\t} else if eventType == Query && t.EventType == Query {\n\t\t\t\tfmt.Printf(\"%s,%s\\n\", t.EventTime, t.Delay)\n\t\t\t} else if eventType == Verdict && t.EventType == Verdict {\n\t\t\t\tfmt.Printf(\"%s,%s\\n\", t.EventTime, t.Verdict)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc init() {\n\tflag.StringVar(&mgcDataPath, \"file\", \"\",\n\t\t\"mgc data file\")\n\n\tflag.StringVar(&node, \"node\", \"\",\n\t\t\"node id\")\n\n\tflag.StringVar(&eventType, \"type\", \"\",\n\t\t\"event type [(H)eartbeat, (Q)uery, (V)erdict]\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tversion = \"0.8.9\"\n)\n\nfunc init() {\n\tflag.Usage = func() {\n\t\th := \"Usage:\\n\"\n\t\th += \" bozr [OPTIONS] (DIR|FILE)\\n\\n\"\n\n\t\th += \"Options:\\n\"\n\t\th += \" -d, --debug\t\tEnable debug mode\\n\"\n\t\th += \" -H, --host\t\tServer to test\\n\"\n\t\th += \" -w, --worker\t\tExecute in parallel with specified number of workers\\n\"\n\t\th += \" -h, --help\t\tPrint usage\\n\"\n\t\th += \" -i, --info\t\tEnable info mode. Print request and response details\\n\"\n\t\th += \" --junit\t\tEnable junit xml reporter\\n\"\n\t\th += \" --junit-output\tDestination for junit report files\\n\"\n\t\th += \" -v, --version\t\tPrint version information and quit\\n\\n\"\n\n\t\th += \"Examples:\\n\"\n\t\th += \" bozr .\/examples\\n\"\n\t\th += \" bozr -w 2 .\/examples\\n\"\n\t\th += \" bozr -H http:\/\/example.com .\/examples \\n\"\n\n\t\tfmt.Fprintf(os.Stderr, h)\n\t}\n}\n\nvar (\n\tsuitesDir string\n\thostFlag string\n\troutinesFlag int\n\tinfoFlag bool\n\tdebugFlag bool\n\thelpFlag bool\n\tversionFlag bool\n\tjunitFlag bool\n\tjunitOutputFlag string\n\n\tinfo *log.Logger\n\tdebug *log.Logger\n)\n\nconst suiteExt = \".suite.json\"\n\nfunc initLogger() {\n\tinfoHandler := ioutil.Discard\n\tdebugHandler := ioutil.Discard\n\n\tif infoFlag {\n\t\tinfoHandler = os.Stdout\n\t}\n\n\tif debugFlag {\n\t\tdebugHandler = os.Stdout\n\t}\n\n\tinfo = log.New(infoHandler, \"\", 0)\n\tdebug = log.New(debugHandler, \"DEBUG: \", log.Ltime|log.Lshortfile)\n}\n\nfunc main() {\n\tflag.BoolVar(&debugFlag, \"d\", false, \"Enable debug mode.\")\n\tflag.BoolVar(&debugFlag, \"debug\", false, \"Enable debug mode\")\n\n\tflag.BoolVar(&infoFlag, \"i\", false, \"Enable info mode. Print request and response details.\")\n\tflag.BoolVar(&infoFlag, \"info\", false, \"Enable info mode. Print request and response details.\")\n\n\tflag.StringVar(&hostFlag, \"H\", \"\", \"Test server address. Example: http:\/\/example.com\/api.\")\n\tflag.IntVar(&routinesFlag, \"w\", 1, \"Execute test sutes in parallel with provided numer of workers. Default is 1.\")\n\n\tflag.BoolVar(&helpFlag, \"h\", false, \"Print usage\")\n\tflag.BoolVar(&helpFlag, \"help\", false, \"Print usage\")\n\n\tflag.BoolVar(&versionFlag, \"v\", false, \"Print version information and quit\")\n\tflag.BoolVar(&versionFlag, \"version\", false, \"Print version information and quit\")\n\n\tflag.BoolVar(&junitFlag, \"junit\", false, \"Enable junit xml reporter\")\n\tflag.StringVar(&junitOutputFlag, \"junit-output\", \".\/report\", \"Destination for junit report files. Default \")\n\n\tflag.Parse()\n\n\tinitLogger()\n\n\tif versionFlag {\n\t\tfmt.Println(\"bozr version \" + version)\n\t\treturn\n\t}\n\n\tif helpFlag {\n\t\tflag.Usage()\n\t\treturn\n\t}\n\n\tif len(hostFlag) > 0 {\n\t\t_, err := url.ParseRequestURI(hostFlag)\n\t\tif err != nil {\n\t\t\tterminate(\"Invalid host is specified.\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tif routinesFlag < 1 || routinesFlag > 9 {\n\t\tfmt.Println(\"Invalid routines parameter [\", routinesFlag, \"]. Setting to default [1]\")\n\t\troutinesFlag = 1\n\t}\n\n\tsuitesDir = flag.Arg(0)\n\n\tif suitesDir == \"\" {\n\t\tterminate(\"You must specify a directory or file with tests.\")\n\t\tflag.Usage()\n\t\treturn\n\t}\n\n\t\/\/ check specified source dir\/file exists\n\t_, err := os.Lstat(suitesDir)\n\tif err != nil {\n\t\tterminate(err.Error())\n\t\treturn\n\t}\n\n\terr = ValidateSuites(suitesDir, suiteExt)\n\tif err != nil {\n\t\tterminate(\"One or more test suites are invalid.\", err.Error())\n\t\treturn\n\t}\n\n\tloader := NewSuiteLoader(suitesDir, suiteExt)\n\treporter := createReporter()\n\n\tRunParallel(loader, reporter, runSuite, routinesFlag)\n}\n\nfunc runSuite(suite TestSuite) []TestResult {\n\tresults := []TestResult{}\n\n\tfor _, testCase := range suite.Cases {\n\n\t\tresult := TestResult{\n\t\t\tSuite: suite,\n\t\t\tCase: testCase,\n\t\t\tExecFrame: TimeFrame{Start: time.Now(), End: time.Now()},\n\t\t}\n\n\t\tif testCase.Ignore != nil {\n\t\t\tresult.Skipped = true\n\t\t\tresult.SkippedMsg = *testCase.Ignore\n\n\t\t\tresults = append(results, result)\n\t\t\tcontinue\n\t\t}\n\n\t\trememberedMap := make(map[string]interface{})\n\t\tfor _, c := range testCase.Calls {\n\t\t\taddAll(c.Args, rememberedMap)\n\t\t\tterr := call(suite.Dir, c, rememberedMap)\n\t\t\tif terr != nil {\n\t\t\t\tresult.Error = terr\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tresult.ExecFrame.End = time.Now()\n\n\t\tresults = append(results, result)\n\t}\n\n\treturn results\n}\n\nfunc addAll(src, target map[string]interface{}) {\n\tfor key, val := range src {\n\t\ttarget[key] = val\n\t}\n}\n\nfunc createReporter() Reporter {\n\treporters := []Reporter{NewConsoleReporter()}\n\tif junitFlag {\n\t\tpath, _ := filepath.Abs(junitOutputFlag)\n\t\treporters = append(reporters, NewJUnitReporter(path))\n\t}\n\treporter := NewMultiReporter(reporters...)\n\treporter.Init()\n\n\treturn reporter\n}\n\nfunc call(suitePath string, call Call, rememberMap map[string]interface{}) *TError {\n\n\tterr := &TError{}\n\n\ton := call.On\n\n\tdat := []byte(on.Body)\n\tif on.BodyFile != \"\" {\n\t\turi, err := toAbsPath(suitePath, on.BodyFile)\n\t\tif err != nil {\n\t\t\tterr.Cause = err\n\t\t\treturn terr\n\t\t}\n\n\t\tif d, err := ioutil.ReadFile(uri); err == nil {\n\t\t\tdat = d\n\t\t} else {\n\t\t\tterr.Cause = fmt.Errorf(\"Can't read body file: %s\", err.Error())\n\t\t\treturn terr\n\t\t}\n\t}\n\n\treq, err := populateRequest(on, string(dat), rememberMap)\n\tif err != nil {\n\t\tterr.Cause = err\n\t\treturn terr\n\t}\n\n\tprintRequestInfo(req, dat)\n\n\tclient := &http.Client{}\n\n\tresp, err := client.Do(req)\n\n\tif err != nil {\n\t\tdebug.Print(\"Error when sending request\", err)\n\t\tterr.Cause = err\n\t\treturn terr\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tdebug.Print(\"Error reading response\")\n\t\tterr.Cause = err\n\t\treturn terr\n\t}\n\n\ttestResp := Response{http: *resp, body: body}\n\tterr.Resp = testResp\n\n\tinfo.Println(strings.Repeat(\"-\", 50))\n\tinfo.Println(testResp.ToString())\n\tinfo.Println(\"\")\n\n\texps, err := expectations(call, suitePath)\n\tif err != nil {\n\t\tterr.Cause = err\n\t\treturn terr\n\t}\n\n\tfor _, exp := range exps {\n\t\tcheckErr := exp.check(&testResp)\n\t\tif checkErr != nil {\n\t\t\tterr.Cause = checkErr\n\t\t\treturn terr\n\t\t}\n\t}\n\n\terr = rememberBody(&testResp, call.Remember.Body, rememberMap)\n\tdebug.Print(\"Remember: \", rememberMap)\n\tif err != nil {\n\t\tdebug.Print(\"Error remember\")\n\t\tterr.Cause = err\n\t\treturn terr\n\t}\n\n\trememberHeaders(testResp.http.Header, call.Remember.Headers, rememberMap)\n\n\treturn nil\n}\n\nfunc populateRequest(on On, body string, rememberMap map[string]interface{}) (*http.Request, error) {\n\n\turlStr, err := urlPrefix(populateRememberedVars(on.URL, rememberMap))\n\tif err != nil {\n\t\treturn nil, errors.New(\"Cannot create request. Invalid url: \" + on.URL)\n\t}\n\n\tbody = populateRememberedVars(body, rememberMap)\n\tdat := []byte(body)\n\n\treq, err := http.NewRequest(on.Method, urlStr, bytes.NewBuffer(dat))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor key, value := range on.Headers {\n\t\treq.Header.Add(key, populateRememberedVars(value, rememberMap))\n\t}\n\n\tq := req.URL.Query()\n\tfor key, value := range on.Params {\n\t\tq.Add(key, populateRememberedVars(value, rememberMap))\n\t}\n\treq.URL.RawQuery = q.Encode()\n\n\treturn req, nil\n}\n\nfunc urlPrefix(p string) (string, error) {\n\tif strings.HasPrefix(p, \"http:\/\/\") || strings.HasPrefix(p, \"https:\/\/\") {\n\t\treturn p, nil\n\t}\n\n\treturn concatURL(hostFlag, p)\n}\n\nfunc concatURL(base string, p string) (string, error) {\n\tbaseURL, err := url.ParseRequestURI(base)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn baseURL.Scheme + \":\/\/\" + baseURL.Host + path.Join(baseURL.Path, p), nil\n}\n\nfunc populateRememberedVars(str string, rememberMap map[string]interface{}) string {\n\tres := str\n\tfor varName, val := range rememberMap {\n\t\tplaceholder := \"{\" + varName + \"}\"\n\t\tres = strings.Replace(res, placeholder, toString(val), -1)\n\t}\n\treturn res\n}\n\n\/\/ toString returns value suitable to insert as an argument\n\/\/ if value if a float where decimal part is zero - convert to int\nfunc toString(rw interface{}) string {\n\tvar sv interface{} = rw\n\tif fv, ok := rw.(float64); ok {\n\t\t_, frac := math.Modf(fv)\n\t\tif frac == 0 {\n\t\t\tsv = int(fv)\n\t\t}\n\t}\n\n\treturn fmt.Sprintf(\"%v\", sv)\n}\n\nfunc expectations(call Call, suitePath string) ([]ResponseExpectation, error) {\n\tvar exps []ResponseExpectation\n\tif call.Expect.StatusCode != 0 {\n\t\texps = append(exps, StatusCodeExpectation{statusCode: call.Expect.StatusCode})\n\t}\n\n\tif call.Expect.hasSchema() {\n\t\tvar (\n\t\t\tschemeURI string\n\t\t\terr error\n\t\t)\n\n\t\tif call.Expect.BodySchemaFile != \"\" {\n\t\t\tschemeURI, err = toAbsPath(suitePath, call.Expect.BodySchemaFile)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tschemeURI = \"file:\/\/\/\" + schemeURI\n\t\t}\n\n\t\tif call.Expect.BodySchemaURI != \"\" {\n\t\t\tisHTTP := strings.HasPrefix(call.Expect.BodySchemaURI, \"http:\/\/\")\n\t\t\tisHTTPS := strings.HasPrefix(call.Expect.BodySchemaURI, \"https:\/\/\")\n\t\t\tif !(isHTTP || isHTTPS) {\n\t\t\t\tschemeURI = hostFlag + call.Expect.BodySchemaURI\n\t\t\t} else {\n\t\t\t\tschemeURI = call.Expect.BodySchemaURI\n\t\t\t}\n\t\t}\n\t\texps = append(exps, BodySchemaExpectation{schemaURI: schemeURI})\n\t}\n\n\tif len(call.Expect.Body) > 0 {\n\t\texps = append(exps, BodyExpectation{pathExpectations: call.Expect.Body})\n\t}\n\n\tif len(call.Expect.Absent) > 0 {\n\t\texps = append(exps, AbsentExpectation{paths: call.Expect.Absent})\n\t}\n\n\tif len(call.Expect.Headers) > 0 {\n\t\tfor k, v := range call.Expect.Headers {\n\t\t\texps = append(exps, HeaderExpectation{Name: k, Value: v})\n\t\t}\n\t}\n\n\tif call.Expect.ContentType != \"\" {\n\t\texps = append(exps, ContentTypeExpectation{call.Expect.ContentType})\n\t}\n\n\t\/\/ and so on\n\treturn exps, nil\n}\n\nfunc toAbsPath(suitePath string, assetPath string) (string, error) {\n\tdebug.Printf(\"Building absolute path using: suiteDir: %s, srcDir: %s, assetPath: %s\", suitesDir, suitePath, assetPath)\n\tif filepath.IsAbs(assetPath) {\n\t\t\/\/ ignore srcDir\n\t\treturn assetPath, nil\n\t}\n\n\turi, err := filepath.Abs(filepath.Join(suitesDir, suitePath, assetPath))\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Invalid file path: \" + assetPath)\n\t}\n\n\treturn filepath.ToSlash(uri), nil\n}\n\nfunc rememberBody(resp *Response, remember map[string]string, rememberedMap map[string]interface{}) (err error) {\n\n\tfor varName, pathLine := range remember {\n\t\tbody, err := resp.Body()\n\t\tif err != nil {\n\t\t\tdebug.Print(\"Can't parse response body to Map for [remember]\")\n\t\t\treturn err\n\t\t}\n\n\t\tif rememberVar, err := GetByPath(body, pathLine); err == nil {\n\t\t\trememberedMap[varName] = rememberVar\n\t\t} else {\n\t\t\tstrErr := fmt.Sprintf(\"Remembered value not found, path: %v\", pathLine)\n\t\t\terr = errors.New(strErr)\n\t\t}\n\t\t\/\/fmt.Printf(\"v: %v\\n\", getByPath(bodyMap, b...))\n\t}\n\n\treturn err\n}\n\nfunc rememberHeaders(header http.Header, remember map[string]string, rememberedMap map[string]interface{}) {\n\tfor valueName, headerName := range remember {\n\t\tvalue := header.Get(headerName)\n\t\tif value == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\trememberedMap[valueName] = value\n\t}\n}\n\nfunc printRequestInfo(req *http.Request, body []byte) {\n\tinfo.Println()\n\tinfo.Printf(\"%s %s %s\\n\", req.Method, req.URL.String(), req.Proto)\n\n\tif len(req.Header) > 0 {\n\t\tinfo.Println()\n\t}\n\n\tfor k, v := range req.Header {\n\t\tinfo.Printf(\"%s: %s\", k, strings.Join(v, \" \"))\n\t}\n\tinfo.Println()\n\n\tif len(body) > 0 {\n\t\tinfo.Printf(string(body))\n\t}\n}\n\nfunc terminate(msgLines ...string) {\n\tfor _, line := range msgLines {\n\t\tfmt.Fprintln(os.Stderr, line)\n\t}\n\n\tos.Exit(1)\n}\n<commit_msg>version 0.8.10<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tversion = \"0.8.10\"\n)\n\nfunc init() {\n\tflag.Usage = func() {\n\t\th := \"Usage:\\n\"\n\t\th += \" bozr [OPTIONS] (DIR|FILE)\\n\\n\"\n\n\t\th += \"Options:\\n\"\n\t\th += \" -d, --debug\t\tEnable debug mode\\n\"\n\t\th += \" -H, --host\t\tServer to test\\n\"\n\t\th += \" -w, --worker\t\tExecute in parallel with specified number of workers\\n\"\n\t\th += \" -h, --help\t\tPrint usage\\n\"\n\t\th += \" -i, --info\t\tEnable info mode. Print request and response details\\n\"\n\t\th += \" --junit\t\tEnable junit xml reporter\\n\"\n\t\th += \" --junit-output\tDestination for junit report files\\n\"\n\t\th += \" -v, --version\t\tPrint version information and quit\\n\\n\"\n\n\t\th += \"Examples:\\n\"\n\t\th += \" bozr .\/examples\\n\"\n\t\th += \" bozr -w 2 .\/examples\\n\"\n\t\th += \" bozr -H http:\/\/example.com .\/examples \\n\"\n\n\t\tfmt.Fprintf(os.Stderr, h)\n\t}\n}\n\nvar (\n\tsuitesDir string\n\thostFlag string\n\troutinesFlag int\n\tinfoFlag bool\n\tdebugFlag bool\n\thelpFlag bool\n\tversionFlag bool\n\tjunitFlag bool\n\tjunitOutputFlag string\n\n\tinfo *log.Logger\n\tdebug *log.Logger\n)\n\nconst suiteExt = \".suite.json\"\n\nfunc initLogger() {\n\tinfoHandler := ioutil.Discard\n\tdebugHandler := ioutil.Discard\n\n\tif infoFlag {\n\t\tinfoHandler = os.Stdout\n\t}\n\n\tif debugFlag {\n\t\tdebugHandler = os.Stdout\n\t}\n\n\tinfo = log.New(infoHandler, \"\", 0)\n\tdebug = log.New(debugHandler, \"DEBUG: \", log.Ltime|log.Lshortfile)\n}\n\nfunc main() {\n\tflag.BoolVar(&debugFlag, \"d\", false, \"Enable debug mode.\")\n\tflag.BoolVar(&debugFlag, \"debug\", false, \"Enable debug mode\")\n\n\tflag.BoolVar(&infoFlag, \"i\", false, \"Enable info mode. Print request and response details.\")\n\tflag.BoolVar(&infoFlag, \"info\", false, \"Enable info mode. Print request and response details.\")\n\n\tflag.StringVar(&hostFlag, \"H\", \"\", \"Test server address. Example: http:\/\/example.com\/api.\")\n\tflag.IntVar(&routinesFlag, \"w\", 1, \"Execute test sutes in parallel with provided numer of workers. Default is 1.\")\n\n\tflag.BoolVar(&helpFlag, \"h\", false, \"Print usage\")\n\tflag.BoolVar(&helpFlag, \"help\", false, \"Print usage\")\n\n\tflag.BoolVar(&versionFlag, \"v\", false, \"Print version information and quit\")\n\tflag.BoolVar(&versionFlag, \"version\", false, \"Print version information and quit\")\n\n\tflag.BoolVar(&junitFlag, \"junit\", false, \"Enable junit xml reporter\")\n\tflag.StringVar(&junitOutputFlag, \"junit-output\", \".\/report\", \"Destination for junit report files. Default \")\n\n\tflag.Parse()\n\n\tinitLogger()\n\n\tif versionFlag {\n\t\tfmt.Println(\"bozr version \" + version)\n\t\treturn\n\t}\n\n\tif helpFlag {\n\t\tflag.Usage()\n\t\treturn\n\t}\n\n\tif len(hostFlag) > 0 {\n\t\t_, err := url.ParseRequestURI(hostFlag)\n\t\tif err != nil {\n\t\t\tterminate(\"Invalid host is specified.\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tif routinesFlag < 1 || routinesFlag > 9 {\n\t\tfmt.Println(\"Invalid routines parameter [\", routinesFlag, \"]. Setting to default [1]\")\n\t\troutinesFlag = 1\n\t}\n\n\tsuitesDir = flag.Arg(0)\n\n\tif suitesDir == \"\" {\n\t\tterminate(\"You must specify a directory or file with tests.\")\n\t\tflag.Usage()\n\t\treturn\n\t}\n\n\t\/\/ check specified source dir\/file exists\n\t_, err := os.Lstat(suitesDir)\n\tif err != nil {\n\t\tterminate(err.Error())\n\t\treturn\n\t}\n\n\terr = ValidateSuites(suitesDir, suiteExt)\n\tif err != nil {\n\t\tterminate(\"One or more test suites are invalid.\", err.Error())\n\t\treturn\n\t}\n\n\tloader := NewSuiteLoader(suitesDir, suiteExt)\n\treporter := createReporter()\n\n\tRunParallel(loader, reporter, runSuite, routinesFlag)\n}\n\nfunc runSuite(suite TestSuite) []TestResult {\n\tresults := []TestResult{}\n\n\tfor _, testCase := range suite.Cases {\n\n\t\tresult := TestResult{\n\t\t\tSuite: suite,\n\t\t\tCase: testCase,\n\t\t\tExecFrame: TimeFrame{Start: time.Now(), End: time.Now()},\n\t\t}\n\n\t\tif testCase.Ignore != nil {\n\t\t\tresult.Skipped = true\n\t\t\tresult.SkippedMsg = *testCase.Ignore\n\n\t\t\tresults = append(results, result)\n\t\t\tcontinue\n\t\t}\n\n\t\trememberedMap := make(map[string]interface{})\n\t\tfor _, c := range testCase.Calls {\n\t\t\taddAll(c.Args, rememberedMap)\n\t\t\tterr := call(suite.Dir, c, rememberedMap)\n\t\t\tif terr != nil {\n\t\t\t\tresult.Error = terr\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tresult.ExecFrame.End = time.Now()\n\n\t\tresults = append(results, result)\n\t}\n\n\treturn results\n}\n\nfunc addAll(src, target map[string]interface{}) {\n\tfor key, val := range src {\n\t\ttarget[key] = val\n\t}\n}\n\nfunc createReporter() Reporter {\n\treporters := []Reporter{NewConsoleReporter()}\n\tif junitFlag {\n\t\tpath, _ := filepath.Abs(junitOutputFlag)\n\t\treporters = append(reporters, NewJUnitReporter(path))\n\t}\n\treporter := NewMultiReporter(reporters...)\n\treporter.Init()\n\n\treturn reporter\n}\n\nfunc call(suitePath string, call Call, rememberMap map[string]interface{}) *TError {\n\n\tterr := &TError{}\n\n\ton := call.On\n\n\tdat := []byte(on.Body)\n\tif on.BodyFile != \"\" {\n\t\turi, err := toAbsPath(suitePath, on.BodyFile)\n\t\tif err != nil {\n\t\t\tterr.Cause = err\n\t\t\treturn terr\n\t\t}\n\n\t\tif d, err := ioutil.ReadFile(uri); err == nil {\n\t\t\tdat = d\n\t\t} else {\n\t\t\tterr.Cause = fmt.Errorf(\"Can't read body file: %s\", err.Error())\n\t\t\treturn terr\n\t\t}\n\t}\n\n\treq, err := populateRequest(on, string(dat), rememberMap)\n\tif err != nil {\n\t\tterr.Cause = err\n\t\treturn terr\n\t}\n\n\tprintRequestInfo(req, dat)\n\n\tclient := &http.Client{}\n\n\tresp, err := client.Do(req)\n\n\tif err != nil {\n\t\tdebug.Print(\"Error when sending request\", err)\n\t\tterr.Cause = err\n\t\treturn terr\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tdebug.Print(\"Error reading response\")\n\t\tterr.Cause = err\n\t\treturn terr\n\t}\n\n\ttestResp := Response{http: *resp, body: body}\n\tterr.Resp = testResp\n\n\tinfo.Println(strings.Repeat(\"-\", 50))\n\tinfo.Println(testResp.ToString())\n\tinfo.Println(\"\")\n\n\texps, err := expectations(call, suitePath)\n\tif err != nil {\n\t\tterr.Cause = err\n\t\treturn terr\n\t}\n\n\tfor _, exp := range exps {\n\t\tcheckErr := exp.check(&testResp)\n\t\tif checkErr != nil {\n\t\t\tterr.Cause = checkErr\n\t\t\treturn terr\n\t\t}\n\t}\n\n\terr = rememberBody(&testResp, call.Remember.Body, rememberMap)\n\tdebug.Print(\"Remember: \", rememberMap)\n\tif err != nil {\n\t\tdebug.Print(\"Error remember\")\n\t\tterr.Cause = err\n\t\treturn terr\n\t}\n\n\trememberHeaders(testResp.http.Header, call.Remember.Headers, rememberMap)\n\n\treturn nil\n}\n\nfunc populateRequest(on On, body string, rememberMap map[string]interface{}) (*http.Request, error) {\n\n\turlStr, err := urlPrefix(populateRememberedVars(on.URL, rememberMap))\n\tif err != nil {\n\t\treturn nil, errors.New(\"Cannot create request. Invalid url: \" + on.URL)\n\t}\n\n\tbody = populateRememberedVars(body, rememberMap)\n\tdat := []byte(body)\n\n\treq, err := http.NewRequest(on.Method, urlStr, bytes.NewBuffer(dat))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor key, value := range on.Headers {\n\t\treq.Header.Add(key, populateRememberedVars(value, rememberMap))\n\t}\n\n\tq := req.URL.Query()\n\tfor key, value := range on.Params {\n\t\tq.Add(key, populateRememberedVars(value, rememberMap))\n\t}\n\treq.URL.RawQuery = q.Encode()\n\n\treturn req, nil\n}\n\nfunc urlPrefix(p string) (string, error) {\n\tif strings.HasPrefix(p, \"http:\/\/\") || strings.HasPrefix(p, \"https:\/\/\") {\n\t\treturn p, nil\n\t}\n\n\treturn concatURL(hostFlag, p)\n}\n\nfunc concatURL(base string, p string) (string, error) {\n\tbaseURL, err := url.ParseRequestURI(base)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn baseURL.Scheme + \":\/\/\" + baseURL.Host + path.Join(baseURL.Path, p), nil\n}\n\nfunc populateRememberedVars(str string, rememberMap map[string]interface{}) string {\n\tres := str\n\tfor varName, val := range rememberMap {\n\t\tplaceholder := \"{\" + varName + \"}\"\n\t\tres = strings.Replace(res, placeholder, toString(val), -1)\n\t}\n\treturn res\n}\n\n\/\/ toString returns value suitable to insert as an argument\n\/\/ if value if a float where decimal part is zero - convert to int\nfunc toString(rw interface{}) string {\n\tvar sv interface{} = rw\n\tif fv, ok := rw.(float64); ok {\n\t\t_, frac := math.Modf(fv)\n\t\tif frac == 0 {\n\t\t\tsv = int(fv)\n\t\t}\n\t}\n\n\treturn fmt.Sprintf(\"%v\", sv)\n}\n\nfunc expectations(call Call, suitePath string) ([]ResponseExpectation, error) {\n\tvar exps []ResponseExpectation\n\tif call.Expect.StatusCode != 0 {\n\t\texps = append(exps, StatusCodeExpectation{statusCode: call.Expect.StatusCode})\n\t}\n\n\tif call.Expect.hasSchema() {\n\t\tvar (\n\t\t\tschemeURI string\n\t\t\terr error\n\t\t)\n\n\t\tif call.Expect.BodySchemaFile != \"\" {\n\t\t\tschemeURI, err = toAbsPath(suitePath, call.Expect.BodySchemaFile)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tschemeURI = \"file:\/\/\/\" + schemeURI\n\t\t}\n\n\t\tif call.Expect.BodySchemaURI != \"\" {\n\t\t\tisHTTP := strings.HasPrefix(call.Expect.BodySchemaURI, \"http:\/\/\")\n\t\t\tisHTTPS := strings.HasPrefix(call.Expect.BodySchemaURI, \"https:\/\/\")\n\t\t\tif !(isHTTP || isHTTPS) {\n\t\t\t\tschemeURI = hostFlag + call.Expect.BodySchemaURI\n\t\t\t} else {\n\t\t\t\tschemeURI = call.Expect.BodySchemaURI\n\t\t\t}\n\t\t}\n\t\texps = append(exps, BodySchemaExpectation{schemaURI: schemeURI})\n\t}\n\n\tif len(call.Expect.Body) > 0 {\n\t\texps = append(exps, BodyExpectation{pathExpectations: call.Expect.Body})\n\t}\n\n\tif len(call.Expect.Absent) > 0 {\n\t\texps = append(exps, AbsentExpectation{paths: call.Expect.Absent})\n\t}\n\n\tif len(call.Expect.Headers) > 0 {\n\t\tfor k, v := range call.Expect.Headers {\n\t\t\texps = append(exps, HeaderExpectation{Name: k, Value: v})\n\t\t}\n\t}\n\n\tif call.Expect.ContentType != \"\" {\n\t\texps = append(exps, ContentTypeExpectation{call.Expect.ContentType})\n\t}\n\n\t\/\/ and so on\n\treturn exps, nil\n}\n\nfunc toAbsPath(suitePath string, assetPath string) (string, error) {\n\tdebug.Printf(\"Building absolute path using: suiteDir: %s, srcDir: %s, assetPath: %s\", suitesDir, suitePath, assetPath)\n\tif filepath.IsAbs(assetPath) {\n\t\t\/\/ ignore srcDir\n\t\treturn assetPath, nil\n\t}\n\n\turi, err := filepath.Abs(filepath.Join(suitesDir, suitePath, assetPath))\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Invalid file path: \" + assetPath)\n\t}\n\n\treturn filepath.ToSlash(uri), nil\n}\n\nfunc rememberBody(resp *Response, remember map[string]string, rememberedMap map[string]interface{}) (err error) {\n\n\tfor varName, pathLine := range remember {\n\t\tbody, err := resp.Body()\n\t\tif err != nil {\n\t\t\tdebug.Print(\"Can't parse response body to Map for [remember]\")\n\t\t\treturn err\n\t\t}\n\n\t\tif rememberVar, err := GetByPath(body, pathLine); err == nil {\n\t\t\trememberedMap[varName] = rememberVar\n\t\t} else {\n\t\t\tstrErr := fmt.Sprintf(\"Remembered value not found, path: %v\", pathLine)\n\t\t\terr = errors.New(strErr)\n\t\t}\n\t\t\/\/fmt.Printf(\"v: %v\\n\", getByPath(bodyMap, b...))\n\t}\n\n\treturn err\n}\n\nfunc rememberHeaders(header http.Header, remember map[string]string, rememberedMap map[string]interface{}) {\n\tfor valueName, headerName := range remember {\n\t\tvalue := header.Get(headerName)\n\t\tif value == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\trememberedMap[valueName] = value\n\t}\n}\n\nfunc printRequestInfo(req *http.Request, body []byte) {\n\tinfo.Println()\n\tinfo.Printf(\"%s %s %s\\n\", req.Method, req.URL.String(), req.Proto)\n\n\tif len(req.Header) > 0 {\n\t\tinfo.Println()\n\t}\n\n\tfor k, v := range req.Header {\n\t\tinfo.Printf(\"%s: %s\", k, strings.Join(v, \" \"))\n\t}\n\tinfo.Println()\n\n\tif len(body) > 0 {\n\t\tinfo.Printf(string(body))\n\t}\n}\n\nfunc terminate(msgLines ...string) {\n\tfor _, line := range msgLines {\n\t\tfmt.Fprintln(os.Stderr, line)\n\t}\n\n\tos.Exit(1)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Project to simulate mechs to learn go.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/Ariemeth\/mechsim\/controllers\"\n\t\"github.com\/Ariemeth\/mechsim\/mech\"\n\t\"github.com\/Ariemeth\/mechsim\/mech\/weapon\"\n\t\n\t\"github.com\/Ariemeth\/termloop\"\n)\n\nfunc check(e error) {\n\tif e != nil {\n\t\tpanic(e)\n\t}\n}\n\nfunc main() {\n\n\tgame := termloop.NewGame()\n game.Start()\n\n\tinputChannel := make(chan string)\n\n\tinputController := controller.NewInput(inputChannel)\n\n\tmech1 := mech.NewMech(\"Mech1\", 2)\n\tmech2 := mech.NewMech(\"Mech2\", 2)\n\n\tweapon1 := weapon.Create(5, 1, \"rifle\", .75)\n\tweapon2 := weapon.Create(3, 3, \"shotgun\", .4)\n\n\tmech1.AddWeapon(weapon1)\n\tmech2.AddWeapon(weapon2)\n\n\tmech1.Fire(4, mech2)\n\n\tfmt.Println(\"Mech1 has \", mech1.StructureLeft(), \" structure left.\")\n\tfmt.Println(\"Mech2 has \", mech2.StructureLeft(), \" structure left.\")\n\n\tisRunning := true\n\n\tc := exec.Command(\"cmd\", \"\/c\", \"cls\")\n\tc.Stdout = os.Stdout\n\n\tfor isRunning {\n\t\tselect {\n\t\tcase msg := <-inputChannel:\n\t\t\tc := exec.Command(\"cmd\", \"\/c\", \"cls\")\n\t\t\tc.Stdout = os.Stdout\n\t\t\tc.Run()\n\t\t\tif strings.EqualFold(strings.ToLower(msg), \"q\") {\n\t\t\t\tisRunning = false\n\t\t\t}\n\t\t}\n\t}\n\n\tinputController.Stop()\n\tclose(inputChannel)\n}\n<commit_msg>Added basic level<commit_after>\/\/ Project to simulate mechs to learn go.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/Ariemeth\/mechsim\/controllers\"\n\t\"github.com\/Ariemeth\/mechsim\/mech\"\n\t\"github.com\/Ariemeth\/mechsim\/mech\/weapon\"\n\t\n\t\"github.com\/Ariemeth\/termloop\"\n)\n\nfunc check(e error) {\n\tif e != nil {\n\t\tpanic(e)\n\t}\n}\n\nfunc main() {\n\n\tgame := termloop.NewGame()\n\tlevel := termloop.NewBaseLevel(termloop.Cell{\n Bg: termloop.ColorGreen,\n Fg: termloop.ColorBlack,\n Ch: 'v',\n })\n level.AddEntity(termloop.NewRectangle(10, 10, 50, 20, termloop.ColorBlue))\n game.SetLevel(level)\n game.Start()\n\n\tinputChannel := make(chan string)\n\n\tinputController := controller.NewInput(inputChannel)\n\n\tmech1 := mech.NewMech(\"Mech1\", 2)\n\tmech2 := mech.NewMech(\"Mech2\", 2)\n\n\tweapon1 := weapon.Create(5, 1, \"rifle\", .75)\n\tweapon2 := weapon.Create(3, 3, \"shotgun\", .4)\n\n\tmech1.AddWeapon(weapon1)\n\tmech2.AddWeapon(weapon2)\n\n\tmech1.Fire(4, mech2)\n\n\tfmt.Println(\"Mech1 has \", mech1.StructureLeft(), \" structure left.\")\n\tfmt.Println(\"Mech2 has \", mech2.StructureLeft(), \" structure left.\")\n\n\tisRunning := true\n\n\tc := exec.Command(\"cmd\", \"\/c\", \"cls\")\n\tc.Stdout = os.Stdout\n\n\tfor isRunning {\n\t\tselect {\n\t\tcase msg := <-inputChannel:\n\t\t\tc := exec.Command(\"cmd\", \"\/c\", \"cls\")\n\t\t\tc.Stdout = os.Stdout\n\t\t\tc.Run()\n\t\t\tif strings.EqualFold(strings.ToLower(msg), \"q\") {\n\t\t\t\tisRunning = false\n\t\t\t}\n\t\t}\n\t}\n\n\tinputController.Stop()\n\tclose(inputChannel)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"runtime\"\n\n\t\"gonico\"\n)\n\nconst (\n\tstartHelp = \"Set start page\"\n\tendHelp = \"Set end page\"\n\toutHelp = \"Set download path\"\n\tintervalHelp = \"Polling interval time (min)\"\n)\n\nvar (\n\tstart = flag.Int(\"start\", 1, startHelp)\n\tend = flag.Int(\"end\", 1, endHelp)\n\tout = flag.String(\"out\", \"\", outHelp)\n\tinterval = flag.Int(\"interval\", 30, intervalHelp)\n)\n\nfunc init() {\n\tflag.IntVar(start, \"s\", 1, startHelp)\n\tflag.IntVar(end, \"e\", 1, endHelp)\n\tflag.StringVar(out, \"o\", \"\", outHelp)\n\tflag.IntVar(interval, \"i\", 30, intervalHelp)\n}\n\nfunc main() {\n\tflag.Parse()\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tgonico.GetVideoInfo(\"sm9\")\n}\n<commit_msg>Add a command line flag<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"runtime\"\n\n\t\"gonico\"\n)\n\nconst (\n\turlHelp = \"The movie url or id from which you want to extract movie infomation.\"\n)\n\nfunc main() {\n\tmovieUrl := flag.String(\"url\", \"foo\", urlHelp)\n\tflag.Parse()\n\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tgonico.GetVideoInfo(*movieUrl)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file contains the command line interface for Go refactoring.\n\n\/\/ Contributors: Reed Allman, Josh Kane\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"golang-refactoring.org\/go-doctor\/doctor\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\tformatFlag = flag.String(\"format\", \"plain\",\n\t\t\"Output in 'plain' or 'json'\")\n\n\thelpFlag = flag.Bool(\"h\", false,\n\t\t\"Prints usage\")\n\n\tdiffFlag = flag.Bool(\"d\", false,\n\t\t\"Get diff of all files affected by given refactoring\")\n\n\tlistFlag = flag.Bool(\"l\", false,\n\t\t\"List all possible refactorings\")\n\n\tparamsFlag = flag.Bool(\"p\", false,\n\t\t\"Get description of parameters for given refactoring\")\n\n\tposFlag = flag.String(\"pos\", \"0,0:0,0\",\n\t\t\"Line, col offset usually necessary, e.g. -pos=5,11:5,11\")\n\n\t\/\/TODO (reed) need to understand this happening\n\tscopeFlag = flag.String(\"scope\", \"\",\n\t\t\"Give a scope (package), e.g. -scope=code.google.com\/p\/go.tools\/\")\n\n\twriteFlag = flag.Bool(\"w\", false,\n\t\t\"Write the refactored files in place\")\n\n\t\/\/useful for JSON I'm thinking\n\tskipLogFlag = flag.Bool(\"e\", false,\n\t\t\"Write results even if log, dangerous\")\n)\n\nfunc usage() {\n\tfmt.Printf(\n\t\t`Usage of `+os.Args[0]+`:\n `+os.Args[0]+` [<flag> ...] <file> <refactoring> <args> ...\n\n The <refactoring> may be one of:\n%v\n\n The <flag> arguments are\n\n`,\n\t\tfunc() (s string) {\n\t\t\tfor key, _ := range doctor.AllRefactorings() {\n\t\t\t\ts += \"\\n \" + key\n\t\t\t}\n\t\t\treturn\n\t\t}())\n\tflag.PrintDefaults()\n\tfmt.Printf(`\n <args> are <refactoring> specific and must be provided in order\n for a <refactoring to occur. To see the <args> for a <refactoring> do:\n\n ` + os.Args[0] + ` -p <refactoring>`)\n\tfmt.Println()\n}\n\n\/\/TODO (reed \/ josh) -comments to change comments (if a thing?)\n\/\/TODO (reed \/ josh) scope (importer? wait?)\n\/\/TODO (reed) handle errors better (JSON-wise, especially, not log stuff)\n\/\/TODO (reed) take byte offsets AND line:col\n\/\/\n\/\/example query: go-doctor -pos=11,8:11,8 someFile.go rename newName\n\/\/TODO query (stdin): cat file.go | go-doctor -pos=11,8:11,8 rename newName\nfunc main() {\n\tflag.Parse()\n\targs := flag.Args()\n\n\tif *helpFlag {\n\t\tusage()\n\t\tos.Exit(0)\n\t}\n\n\tif *listFlag {\n\t\tprintAllRefactorings(*formatFlag)\n\t\tos.Exit(0)\n\t}\n\n\tif len(args) == 0 {\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n\n\tr := doctor.GetRefactoring(args[0])\n\tvar filename string\n\n\t\/\/no file given (assume stdin), e.g. go-doctor refactor params...\n\t\/\/TODO make stdin and importer get along\n\tif r != nil && len(args) > 0 {\n\t\tfilename = \"temp\"\n\t\targs = args[1:]\n\t} else {\n\t\t\/\/file given, e.g. go-doctor file refactor params...\n\t\tr = doctor.GetRefactoring(args[1])\n\t\tfilename = args[0]\n\t\targs = args[2:]\n\t}\n\n\tif *paramsFlag {\n\t\tprintRefactoringParams(*formatFlag, r)\n\t\tos.Exit(0)\n\t}\n\n\t\/\/do the refactoring\n\tl, es, err := query(filename, args, r, *posFlag, *scopeFlag)\n\n\t\/\/TODO what to do about errors in JSON? default reply wrapper?\n\t\/\/since these aren't really \"log\" errors\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tchanges := make(map[string][]byte)\n\n\tif l.ContainsErrors() && !*skipLogFlag {\n\t\tprintResults(*formatFlag, r.Name(), l, changes)\n\t\tos.Exit(0)\n\t}\n\n\t\/\/write all edits out to changes; something to work with\n\tfor file, _ := range es {\n\t\tchanges[file], err = doctor.ApplyToFile(es[file], file)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(2)\n\t\t}\n\t}\n\n\tif *writeFlag {\n\t\t\/\/write changes to their file and exit\n\t\tfor file, change := range changes {\n\t\t\tif err := ioutil.WriteFile(file, change, 0); err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tos.Exit(2)\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\tif *diffFlag {\n\t\t\/\/compute diff for each\n\t\tfor file, change := range changes {\n\t\t\tf, err := ioutil.TempFile(\"\", \"go-doctor\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tos.Exit(2)\n\t\t\t}\n\t\t\t\/\/TODO make sure that we return, so this happens\n\t\t\tdefer os.Remove(f.Name())\n\t\t\tdefer f.Close()\n\n\t\t\tf.Write(change)\n\t\t\tdiff, err := exec.Command(\"diff\", \"-u\", file, f.Name()).CombinedOutput()\n\t\t\tif len(diff) > 0 {\n\t\t\t\t\/\/diff exits with a non-zero status when the files don't match.\n\t\t\t\t\/\/Ignore that failure as long as we get output.\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/put diff in changes instead of just changed file\n\t\t\tchanges[file] = diff\n\t\t}\n\t}\n\n\t\/\/At this point changes either has updated files or diff data\n\t\/\/so output what we have.\n\tprintResults(*formatFlag, r.Name(), l, changes)\n}\n\nfunc printResults(format, refactoring string, log *doctor.Log, changes map[string][]byte) {\n\tswitch format {\n\tcase \"plain\":\n\t\tfor file, change := range changes {\n\t\t\t\/\/TODO show file name, piss off the unix gurus?\n\t\t\tfmt.Printf(\"%s:\\n\\n\", file)\n\t\t\tfmt.Printf(\"%s\\n\", change)\n\t\t}\n\tcase \"json\":\n\t\t\/\/TODO figure out a better way, O(N) says so.\n\t\t\/\/[]byte goes to base64 string in json\n\t\tc := make(map[string]string)\n\t\tfor file, change := range changes {\n\t\t\tc[file] = string(change[:])\n\t\t}\n\n\t\tout, err := json.MarshalIndent(struct {\n\t\t\tName string `json:\"name\"`\n\t\t\tLog *doctor.Log `json:\"log\"`\n\t\t\tChanges map[string]string `json:\"changes\"`\n\t\t}{\n\t\t\trefactoring,\n\t\t\tlog,\n\t\t\tc,\n\t\t}, \"\", \"\\t\")\n\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", out)\n\t}\n}\n\nfunc printRefactoringParams(format string, r doctor.Refactoring) {\n\tswitch format {\n\tcase \"plain\":\n\t\tif r != nil {\n\t\t\tfor _, p := range r.GetParams() {\n\t\t\t\tfmt.Println(p)\n\t\t\t}\n\t\t}\n\tcase \"json\":\n\t\tp, err := json.MarshalIndent(struct {\n\t\t\tParams []string `json:\"params\"`\n\t\t}{\n\t\t\tr.GetParams(),\n\t\t}, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(2)\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", p)\n\t}\n}\n\nfunc printAllRefactorings(format string) {\n\tvar names []string\n\tfor name, _ := range doctor.AllRefactorings() {\n\t\tnames = append(names, name)\n\t}\n\n\tswitch format {\n\tcase \"plain\":\n\t\tfor _, n := range names {\n\t\t\tfmt.Println(n)\n\t\t}\n\tcase \"json\":\n\t\tp, err := json.MarshalIndent(struct {\n\t\t\tRefactorings []string `json:\"refactorings\"`\n\t\t}{\n\t\t\tnames,\n\t\t}, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(2)\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", p)\n\t}\n}\n\n\/\/e.g. 302,6\nfunc parseLineCol(linecol string) (int, int) {\n\tlc := strings.Split(linecol, \",\")\n\tif l, err := strconv.ParseInt(lc[0], 10, 32); err == nil {\n\t\tif c, err := strconv.ParseInt(lc[1], 10, 32); err == nil {\n\t\t\treturn int(l), int(c)\n\t\t}\n\t}\n\n\treturn -1, -1\n}\n\n\/\/pos=3,6:3,9\nfunc parsePositionToTextSelection(pos string) (t doctor.TextSelection, err error) {\n\targs := strings.Split(pos, \":\")\n\n\tif len(args) < 2 {\n\t\terr = fmt.Errorf(\"invalid -pos\")\n\t\treturn\n\t}\n\n\tsl, sc := parseLineCol(args[0])\n\tel, ec := parseLineCol(args[1])\n\n\tif sl < 0 || sc < 0 || el < 0 || ec < 0 {\n\t\terr = fmt.Errorf(\"invalid -pos line, col\")\n\t\treturn\n\t}\n\n\tt = doctor.TextSelection{StartLine: sl, StartCol: sc,\n\t\tEndLine: el, EndCol: ec}\n\n\treturn\n}\n\n\/\/TODO (reed \/ josh) scope here?\n\/\/TODO (jeff) I'm fairly sure I used scope wrong here...?\n\/\/ Anyway I think we need to know which file the main function is in,\n\/\/ so I made that the second arg to SetSelection -- confirm with Alan\n\/\/\n\/\/This will do all of the configuration and execution for\n\/\/a refactoring (@op), returning the edits to be made and log.\n\/\/For use with the CLI, but have at it.\n\/\/\nfunc query(file string, args []string, r doctor.Refactoring, pos string, scope string) (*doctor.Log, map[string]doctor.EditSet, error) {\n\tif r == nil {\n\t\treturn nil, nil, fmt.Errorf(\"Invalid refactoring\")\n\t}\n\n\tts, err := parsePositionToTextSelection(pos)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tts.Filename = file\n\n\t\/\/ TODO these 3 all return bool, but get checked in log. Not sure if\n\t\/\/ need a change here or not. Maybe move this entire function to main.go\n\tr.SetSelection(ts, scope)\n\tr.Configure(args)\n\tr.Run()\n\te, l := r.GetResult()\n\treturn e, l, nil\n}\n<commit_msg>Changed main.go to use absolute paths and print error log, errors on stderr Eliminated \"temp\" filename when filename not present on command line<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file contains the command line interface for Go refactoring.\n\n\/\/ Contributors: Reed Allman, Josh Kane\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"golang-refactoring.org\/go-doctor\/doctor\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\tformatFlag = flag.String(\"format\", \"plain\",\n\t\t\"Output in 'plain' or 'json'\")\n\n\thelpFlag = flag.Bool(\"h\", false,\n\t\t\"Prints usage\")\n\n\tdiffFlag = flag.Bool(\"d\", false,\n\t\t\"Get diff of all files affected by given refactoring\")\n\n\tlistFlag = flag.Bool(\"l\", false,\n\t\t\"List all possible refactorings\")\n\n\tparamsFlag = flag.Bool(\"p\", false,\n\t\t\"Get description of parameters for given refactoring\")\n\n\tposFlag = flag.String(\"pos\", \"0,0:0,0\",\n\t\t\"Line, col offset usually necessary, e.g. -pos=5,11:5,11\")\n\n\t\/\/TODO (reed) need to understand this happening\n\tscopeFlag = flag.String(\"scope\", \"\",\n\t\t\"Give a scope (package), e.g. -scope=code.google.com\/p\/go.tools\/\")\n\n\twriteFlag = flag.Bool(\"w\", false,\n\t\t\"Write the refactored files in place\")\n\n\t\/\/useful for JSON I'm thinking\n\tskipLogFlag = flag.Bool(\"e\", false,\n\t\t\"Write results even if log, dangerous\")\n)\n\nfunc usage() {\n\tfmt.Printf(\n\t\t`Usage of `+os.Args[0]+`:\n `+os.Args[0]+` [<flag> ...] <file> <refactoring> <args> ...\n\n The <refactoring> may be one of:\n%v\n\n The <flag> arguments are\n\n`,\n\t\tfunc() (s string) {\n\t\t\tfor key, _ := range doctor.AllRefactorings() {\n\t\t\t\ts += \"\\n \" + key\n\t\t\t}\n\t\t\treturn\n\t\t}())\n\tflag.PrintDefaults()\n\tfmt.Printf(`\n <args> are <refactoring> specific and must be provided in order\n for a <refactoring to occur. To see the <args> for a <refactoring> do:\n\n ` + os.Args[0] + ` -p <refactoring>`)\n\tfmt.Println()\n}\n\n\/\/TODO (reed \/ josh) -comments to change comments (if a thing?)\n\/\/TODO (reed \/ josh) scope (importer? wait?)\n\/\/TODO (reed) handle errors better (JSON-wise, especially, not log stuff)\n\/\/TODO (reed) take byte offsets AND line:col\n\/\/\n\/\/example query: go-doctor -pos=11,8:11,8 someFile.go rename newName\n\/\/TODO query (stdin): cat file.go | go-doctor -pos=11,8:11,8 rename newName\nfunc main() {\n\tflag.Parse()\n\targs := flag.Args()\n\n\tif *helpFlag {\n\t\tusage()\n\t\tos.Exit(0)\n\t}\n\n\tif *listFlag {\n\t\tprintAllRefactorings(*formatFlag)\n\t\tos.Exit(0)\n\t}\n\n\tif len(args) == 0 {\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n\n\tvar filename string\n\n\t\/\/no file given (assume stdin), e.g. go-doctor refactor params...\n\t\/\/TODO make stdin and importer get along\n\tr := doctor.GetRefactoring(args[0])\n\tif r != nil && len(args) > 0 {\n\t\targs = args[1:]\n\t} else {\n\t\t\/\/file given, e.g. go-doctor file refactor params...\n\t\tfilename = args[0]\n\t\tr = doctor.GetRefactoring(args[1])\n\t\targs = args[2:]\n\t}\n\n\tif *paramsFlag {\n\t\tprintRefactoringParams(*formatFlag, r)\n\t\tos.Exit(0)\n\t}\n\n\t\/\/do the refactoring\n\tl, es, err := query(filename, args, r, *posFlag, *scopeFlag)\n\n\t\/\/TODO what to do about errors in JSON? default reply wrapper?\n\t\/\/since these aren't really \"log\" errors\n\tif err != nil {\n\t\tfmt.Fprint(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\n\tchanges := make(map[string][]byte)\n\n\tif l.ContainsErrors() && !*skipLogFlag {\n\t\tprintResults(*formatFlag, r.Name(), l, changes)\n\t\tos.Exit(0)\n\t}\n\n\t\/\/write all edits out to changes; something to work with\n\tfor file, _ := range es {\n\t\tchanges[file], err = doctor.ApplyToFile(es[file], file)\n\t\tif err != nil {\n\t\t\tfmt.Fprint(os.Stderr, err)\n\t\t\tos.Exit(2)\n\t\t}\n\t}\n\n\tif *writeFlag {\n\t\t\/\/write changes to their file and exit\n\t\tfor file, change := range changes {\n\t\t\tif err := ioutil.WriteFile(file, change, 0); err != nil {\n\t\t\t\tfmt.Fprint(os.Stderr, err)\n\t\t\t\tos.Exit(2)\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\tif *diffFlag {\n\t\t\/\/compute diff for each\n\t\tfor file, change := range changes {\n\t\t\tf, err := ioutil.TempFile(\"\", \"go-doctor\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprint(os.Stderr, err)\n\t\t\t\tos.Exit(2)\n\t\t\t}\n\t\t\t\/\/TODO make sure that we return, so this happens\n\t\t\tdefer os.Remove(f.Name())\n\t\t\tdefer f.Close()\n\n\t\t\tf.Write(change)\n\t\t\tdiff, err := exec.Command(\"diff\", \"-u\", file, f.Name()).CombinedOutput()\n\t\t\tif len(diff) > 0 {\n\t\t\t\t\/\/diff exits with a non-zero status when the files don't match.\n\t\t\t\t\/\/Ignore that failure as long as we get output.\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/put diff in changes instead of just changed file\n\t\t\tchanges[file] = diff\n\t\t}\n\t}\n\n\t\/\/At this point changes either has updated files or diff data\n\t\/\/so output what we have.\n\tprintResults(*formatFlag, r.Name(), l, changes)\n}\n\nfunc printResults(format, refactoring string, l *doctor.Log, changes map[string][]byte) {\n\tswitch format {\n\tcase \"plain\":\n\t\tfmt.Fprint(os.Stderr, l)\n\t\tfor file, change := range changes {\n\t\t\t\/\/TODO show file name, piss off the unix gurus?\n\t\t\tfmt.Printf(\"%s:\\n\\n\", file)\n\t\t\tfmt.Printf(\"%s\\n\", change)\n\t\t}\n\tcase \"json\":\n\t\t\/\/TODO figure out a better way, O(N) says so.\n\t\t\/\/[]byte goes to base64 string in json\n\t\tc := make(map[string]string)\n\t\tfor file, change := range changes {\n\t\t\tc[file] = string(change[:])\n\t\t}\n\n\t\tout, err := json.MarshalIndent(struct {\n\t\t\tName string `json:\"name\"`\n\t\t\tLog *doctor.Log `json:\"log\"`\n\t\t\tChanges map[string]string `json:\"changes\"`\n\t\t}{\n\t\t\trefactoring,\n\t\t\tl,\n\t\t\tc,\n\t\t}, \"\", \"\\t\")\n\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", out)\n\t}\n}\n\nfunc printRefactoringParams(format string, r doctor.Refactoring) {\n\tswitch format {\n\tcase \"plain\":\n\t\tif r != nil {\n\t\t\tfor _, p := range r.GetParams() {\n\t\t\t\tfmt.Println(p)\n\t\t\t}\n\t\t}\n\tcase \"json\":\n\t\tp, err := json.MarshalIndent(struct {\n\t\t\tParams []string `json:\"params\"`\n\t\t}{\n\t\t\tr.GetParams(),\n\t\t}, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(2)\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", p)\n\t}\n}\n\nfunc printAllRefactorings(format string) {\n\tvar names []string\n\tfor name, _ := range doctor.AllRefactorings() {\n\t\tnames = append(names, name)\n\t}\n\n\tswitch format {\n\tcase \"plain\":\n\t\tfor _, n := range names {\n\t\t\tfmt.Println(n)\n\t\t}\n\tcase \"json\":\n\t\tp, err := json.MarshalIndent(struct {\n\t\t\tRefactorings []string `json:\"refactorings\"`\n\t\t}{\n\t\t\tnames,\n\t\t}, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(2)\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", p)\n\t}\n}\n\n\/\/e.g. 302,6\nfunc parseLineCol(linecol string) (int, int) {\n\tlc := strings.Split(linecol, \",\")\n\tif l, err := strconv.ParseInt(lc[0], 10, 32); err == nil {\n\t\tif c, err := strconv.ParseInt(lc[1], 10, 32); err == nil {\n\t\t\treturn int(l), int(c)\n\t\t}\n\t}\n\n\treturn -1, -1\n}\n\n\/\/pos=3,6:3,9\nfunc parsePositionToTextSelection(pos string) (t doctor.TextSelection, err error) {\n\targs := strings.Split(pos, \":\")\n\n\tif len(args) < 2 {\n\t\terr = fmt.Errorf(\"invalid -pos\")\n\t\treturn\n\t}\n\n\tsl, sc := parseLineCol(args[0])\n\tel, ec := parseLineCol(args[1])\n\n\tif sl < 0 || sc < 0 || el < 0 || ec < 0 {\n\t\terr = fmt.Errorf(\"invalid -pos line, col\")\n\t\treturn\n\t}\n\n\tt = doctor.TextSelection{StartLine: sl, StartCol: sc,\n\t\tEndLine: el, EndCol: ec}\n\n\treturn\n}\n\n\/\/TODO (reed \/ josh) scope here?\n\/\/TODO (jeff) figure out how to deal with scope\/mainFile\/2nd arg to SetSelection\n\/\/ Anyway I think we need to know which file the main function is in,\n\/\/ so I made that the second arg to SetSelection -- confirm with Alan\n\/\/\n\/\/This will do all of the configuration and execution for\n\/\/a refactoring (@op), returning the edits to be made and log.\n\/\/For use with the CLI, but have at it.\n\/\/\nfunc query(file string, args []string, r doctor.Refactoring, pos string, scope string) (*doctor.Log, map[string]doctor.EditSet, error) {\n\tif r == nil {\n\t\treturn nil, nil, fmt.Errorf(\"Invalid refactoring\")\n\t}\n\n\tts, err := parsePositionToTextSelection(pos)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tts.Filename, err = filepath.Abs(file)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ TODO these 3 all return bool, but get checked in log. Not sure if\n\t\/\/ need a change here or not. Maybe move this entire function to main.go\n\tr.SetSelection(ts, ts.Filename)\n\tr.Configure(args)\n\tr.Run()\n\te, l := r.GetResult()\n\treturn e, l, nil\n}\n\n\/\/ paths returns a (not necessarily unique) absolute path and a relative path\n\/\/ to the given file, if possible. A non-nil error is returned if an absolute\n\/\/ path cannot be computed.\nfunc paths(file string) (string, string, error) {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tabsPath, err := filepath.Abs(file)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\trelativePath, _ := filepath.Rel(cwd, absPath)\n\treturn absPath, relativePath, err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\tcrand \"crypto\/rand\"\n\t\"encoding\/binary\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\n\t\"github.com\/reusee\/gobfile\"\n)\n\nvar (\n\tpt = fmt.Printf\n)\n\nfunc init() {\n\tvar seed int64\n\tbinary.Read(crand.Reader, binary.LittleEndian, &seed)\n\trand.Seed(seed)\n\n\tflag.Parse()\n}\n\nfunc main() {\n\t\/*\n\t\ttraceFile, err := os.Create(\"trace\")\n\t\tcheckErr(\"create trace file\", err)\n\t\tdefer traceFile.Close()\n\t\terr = pprof.StartTrace(traceFile)\n\t\tcheckErr(\"start trace\", err)\n\t\tdefer pprof.StopTrace()\n\t*\/\n\n\tdir := \".\"\n\tif args := flag.Args(); len(args) > 0 {\n\t\tdir = args[0]\n\t}\n\n\t\/\/ load words\n\ttype Word struct {\n\t\tText string\n\t\tDesc string\n\t}\n\twords := map[string]Word{}\n\tcontent, err := ioutil.ReadFile(filepath.Join(dir, \"words\"))\n\tcheckErr(\"read words file\", err)\n\tfor _, line := range strings.Split(string(content), \"\\n\") {\n\t\tif len(line) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tindex := 0\n\t\trunes := []rune(line)\n\t\tfor i, r := range runes {\n\t\t\tif unicode.IsSpace(r) {\n\t\t\t\tindex = i\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\ttext := string(runes[:index])\n\t\tdesc := string(runes[index+1:])\n\t\tif len(text) == 0 || len(desc) == 0 {\n\t\t\tlog.Fatalf(\"invalid word entry %s\\n\", line)\n\t\t}\n\t\twords[text] = Word{\n\t\t\tText: text,\n\t\t\tDesc: desc,\n\t\t}\n\t}\n\n\t\/\/ data file\n\tdataFilePath := filepath.Join(dir, \"data\")\n\tdataFileLockPath := filepath.Join(dir, \".data.lock\")\n\ttype HistoryEntry struct {\n\t\tTime time.Time\n\t\tWhat string\n\t}\n\ttype Practice struct {\n\t\tType string\n\t\tText string\n\t}\n\tdata := struct {\n\t\tHistory map[Practice][]HistoryEntry\n\t}{\n\t\tHistory: make(map[Practice][]HistoryEntry),\n\t}\n\tdataFile, err := gobfile.New(&data, dataFilePath, gobfile.NewFileLocker(dataFileLockPath))\n\tcheckErr(\"open data file\", err)\n\tdefer dataFile.Close()\n\tdefer dataFile.Save()\n\n\t\/\/ check new entry\n\tfor _, word := range words {\n\t\tfor _, t := range []string{\"audio\", \"text\", \"usage\"} {\n\t\t\tpractice := Practice{\n\t\t\t\tType: t,\n\t\t\t\tText: word.Text,\n\t\t\t}\n\t\t\tif _, ok := data.History[practice]; !ok {\n\t\t\t\tpt(\"new practice: %s %s\\n\", t, word.Text)\n\t\t\t\tdata.History[practice] = append(data.History[practice], HistoryEntry{\n\t\t\t\t\tTime: time.Now(),\n\t\t\t\t\tWhat: \"ok\",\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\tdataFile.Save()\n\n\t\/\/ review functions\n\taudioReview := func(word Word) bool {\n\t\tvar reply string\n\t\tretry := 1\n\tplay:\n\t\tpt(\"playing audio\\n\")\n\t\terr := exec.Command(\"mpv\", filepath.Join(dir, fmt.Sprintf(\"%s.mp3\", word.Text))).Run()\n\t\tcheckErr(\"play audio\", err)\n\task1:\n\t\tpt(\"'j' to show text, 'r' to replay\\n\")\n\t\tfmt.Scanf(\"%s\", &reply)\n\t\tswitch reply {\n\t\tcase \"j\":\n\t\t\tpt(\"%s\\n\", word.Text)\n\t\task2:\n\t\t\tpt(\"'y' to level up, 'n' to keep\\n\")\n\t\t\tfmt.Scanf(\"%s\", &reply)\n\t\t\tswitch reply {\n\t\t\tcase \"y\":\n\t\t\t\treturn true\n\t\t\tcase \"n\":\n\t\t\t\treturn false\n\t\t\tdefault:\n\t\t\t\tgoto ask2\n\t\t\t}\n\t\tcase \"r\":\n\t\t\tif retry > 0 {\n\t\t\t\tretry--\n\t\t\t\tgoto play\n\t\t\t} else {\n\t\t\t\tpt(\"no more replay\\n\")\n\t\t\t\tgoto ask1\n\t\t\t}\n\t\tdefault:\n\t\t\tgoto ask1\n\t\t}\n\t\treturn false\n\t}\n\n\ttextReview := func(word Word) bool {\n\t\tpt(\"showing text\\n\")\n\t\tpt(\"%s\\n\", word.Text)\n\task2:\n\t\tpt(\"'j' to play audio\\n\")\n\t\tvar reply string\n\t\tfmt.Scanf(\"%s\\n\", &reply)\n\t\tswitch reply {\n\t\tcase \"j\":\n\t\t\tpt(\"playing audio\\n\")\n\t\t\terr := exec.Command(\"mpv\", filepath.Join(dir, fmt.Sprintf(\"%s.mp3\", word.Text))).Run()\n\t\t\tcheckErr(\"play audio\", err)\n\t\task:\n\t\t\tpt(\"'y' to level up, 'n' to keep\\n\")\n\t\t\tfmt.Scanf(\"%s\\n\", &reply)\n\t\t\tswitch reply {\n\t\t\tcase \"y\":\n\t\t\t\treturn true\n\t\t\tcase \"n\":\n\t\t\t\treturn false\n\t\t\tdefault:\n\t\t\t\tgoto ask\n\t\t\t}\n\t\tdefault:\n\t\t\tgoto ask2\n\t\t}\n\t\treturn false\n\t}\n\n\tusageReview := func(word Word) bool {\n\t\tpt(\"showing usage\\n\")\n\t\tpt(\"%s\\n\", word.Desc)\n\task:\n\t\tpt(\"'j' to show answer\\n\")\n\t\tvar reply string\n\t\tfmt.Scanf(\"%s\\n\", &reply)\n\t\tswitch reply {\n\t\tcase \"j\":\n\t\t\tpt(\"playing audio\\n\")\n\t\t\terr := exec.Command(\"mpv\", filepath.Join(dir, fmt.Sprintf(\"%s.mp3\", word.Text))).Run()\n\t\t\tcheckErr(\"play audio\", err)\n\t\t\tpt(\"%s\\n\", word.Text)\n\t\task2:\n\t\t\tpt(\"'y' to level up, 'n' to keep\\n\")\n\t\t\tfmt.Scanf(\"%s\\n\", &reply)\n\t\t\tswitch reply {\n\t\t\tcase \"y\":\n\t\t\t\treturn true\n\t\t\tcase \"n\":\n\t\t\t\treturn false\n\t\t\tdefault:\n\t\t\t\tgoto ask2\n\t\t\t}\n\t\tdefault:\n\t\t\tgoto ask\n\t\t}\n\t\treturn false\n\t}\n\n\treviewFuncs := map[string]func(Word) bool{\n\t\t\"audio\": audioReview,\n\t\t\"text\": textReview,\n\t\t\"usage\": usageReview,\n\t}\n\n\ttype PracticeInfo struct {\n\t\tPractice Practice\n\t\tMax time.Duration\n\t\tFade time.Duration\n\t\tRatio float64\n\t}\n\tpractices := []PracticeInfo{}\n\tfor practice, history := range data.History {\n\t\t\/\/ calculate fade and max\n\t\tlast := history[len(history)-1]\n\t\tfade := time.Now().Sub(last.Time)\n\t\tvar max time.Duration\n\t\tfor i := 1; i < len(history); i++ {\n\t\t\tif history[i].What != \"ok\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif d := history[i].Time.Sub(history[i-1].Time); d > max {\n\t\t\t\tmax = d\n\t\t\t}\n\t\t}\n\t\tratio := float64(fade) \/ float64(max+1)\n\t\t\/\/ filter\n\t\tif ratio < 2.0 {\n\t\t\tcontinue\n\t\t}\n\t\tif fade < time.Second*30 { \/\/ skip newly added\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ collect\n\t\tpractices = append(practices, PracticeInfo{\n\t\t\tPractice: practice,\n\t\t\tFade: fade,\n\t\t\tMax: max,\n\t\t\tRatio: ratio,\n\t\t})\n\t}\n\tpt(\"%d practices\\n\", len(practices))\n\n\t\/\/ sort\n\tfor i := len(practices) - 1; i >= 1; i-- {\n\t\tj := rand.Intn(i + 1)\n\t\tpractices[i], practices[j] = practices[j], practices[i]\n\t}\n\tSort(practices, func(left, right PracticeInfo) bool {\n\t\treturn left.Ratio > right.Ratio\n\t})\n\n\t\/\/ unique words\n\tpracticedWords := map[string]struct{}{}\n\tinfos := []PracticeInfo{}\n\tfor _, info := range practices {\n\t\tif _, ok := practicedWords[info.Practice.Text]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tinfos = append(infos, info)\n\t\tpracticedWords[info.Practice.Text] = struct{}{}\n\t}\n\tpractices = infos\n\n\t\/\/ practice\n\tfor _, practice := range practices {\n\t\tpt(\"practice %s fade %v max %v ratio %f\\n\", practice.Practice.Type, practice.Fade, practice.Max, practice.Ratio)\n\t\tvar what string\n\t\tif reviewFuncs[practice.Practice.Type](words[practice.Practice.Text]) {\n\t\t\twhat = \"ok\"\n\t\t} else {\n\t\t\twhat = \"fail\"\n\t\t}\n\t\tdata.History[practice.Practice] = append(data.History[practice.Practice], HistoryEntry{\n\t\t\tWhat: what,\n\t\t\tTime: time.Now(),\n\t\t})\n\t\tdataFile.Save()\n\t}\n}\n\nfunc checkErr(desc string, err error) {\n\tif err != nil {\n\t\tlog.Fatalf(\"%s error %v\", desc, err)\n\t}\n}\n\nfunc Sort(slice interface{}, cmp interface{}) {\n\tsort.Sort(sliceSorter{reflect.ValueOf(slice), reflect.ValueOf(cmp)})\n}\n\ntype sliceSorter struct {\n\tslice, cmp reflect.Value\n}\n\nfunc (t sliceSorter) Len() int {\n\treturn t.slice.Len()\n}\n\nfunc (t sliceSorter) Less(i, j int) bool {\n\treturn t.cmp.Call([]reflect.Value{\n\t\tt.slice.Index(i),\n\t\tt.slice.Index(j),\n\t})[0].Bool()\n}\n\nfunc (t sliceSorter) Swap(i, j int) {\n\ttmp := t.slice.Index(i).Interface()\n\tt.slice.Index(i).Set(t.slice.Index(j))\n\tt.slice.Index(j).Set(reflect.ValueOf(tmp))\n}\n<commit_msg>use non-reflection sort<commit_after>package main\n\nimport (\n\tcrand \"crypto\/rand\"\n\t\"encoding\/binary\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\n\t\"github.com\/reusee\/gobfile\"\n)\n\nvar (\n\tpt = fmt.Printf\n)\n\nfunc init() {\n\tvar seed int64\n\tbinary.Read(crand.Reader, binary.LittleEndian, &seed)\n\trand.Seed(seed)\n\n\tflag.Parse()\n}\n\nfunc main() {\n\t\/*\n\t\ttraceFile, err := os.Create(\"trace\")\n\t\tcheckErr(\"create trace file\", err)\n\t\tdefer traceFile.Close()\n\t\terr = pprof.StartTrace(traceFile)\n\t\tcheckErr(\"start trace\", err)\n\t\tdefer pprof.StopTrace()\n\t*\/\n\n\tdir := \".\"\n\tif args := flag.Args(); len(args) > 0 {\n\t\tdir = args[0]\n\t}\n\n\t\/\/ load words\n\ttype Word struct {\n\t\tText string\n\t\tDesc string\n\t}\n\twords := map[string]Word{}\n\tcontent, err := ioutil.ReadFile(filepath.Join(dir, \"words\"))\n\tcheckErr(\"read words file\", err)\n\tfor _, line := range strings.Split(string(content), \"\\n\") {\n\t\tif len(line) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tindex := 0\n\t\trunes := []rune(line)\n\t\tfor i, r := range runes {\n\t\t\tif unicode.IsSpace(r) {\n\t\t\t\tindex = i\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\ttext := string(runes[:index])\n\t\tdesc := string(runes[index+1:])\n\t\tif len(text) == 0 || len(desc) == 0 {\n\t\t\tlog.Fatalf(\"invalid word entry %s\\n\", line)\n\t\t}\n\t\twords[text] = Word{\n\t\t\tText: text,\n\t\t\tDesc: desc,\n\t\t}\n\t}\n\n\t\/\/ data file\n\tdataFilePath := filepath.Join(dir, \"data\")\n\tdataFileLockPath := filepath.Join(dir, \".data.lock\")\n\ttype HistoryEntry struct {\n\t\tTime time.Time\n\t\tWhat string\n\t}\n\ttype Practice struct {\n\t\tType string\n\t\tText string\n\t}\n\tdata := struct {\n\t\tHistory map[Practice][]HistoryEntry\n\t}{\n\t\tHistory: make(map[Practice][]HistoryEntry),\n\t}\n\tdataFile, err := gobfile.New(&data, dataFilePath, gobfile.NewFileLocker(dataFileLockPath))\n\tcheckErr(\"open data file\", err)\n\tdefer dataFile.Close()\n\tdefer dataFile.Save()\n\n\t\/\/ check new entry\n\tfor _, word := range words {\n\t\tfor _, t := range []string{\"audio\", \"text\", \"usage\"} {\n\t\t\tpractice := Practice{\n\t\t\t\tType: t,\n\t\t\t\tText: word.Text,\n\t\t\t}\n\t\t\tif _, ok := data.History[practice]; !ok {\n\t\t\t\tpt(\"new practice: %s %s\\n\", t, word.Text)\n\t\t\t\tdata.History[practice] = append(data.History[practice], HistoryEntry{\n\t\t\t\t\tTime: time.Now(),\n\t\t\t\t\tWhat: \"ok\",\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\tdataFile.Save()\n\n\t\/\/ review functions\n\taudioReview := func(word Word) bool {\n\t\tvar reply string\n\t\tretry := 1\n\tplay:\n\t\tpt(\"playing audio\\n\")\n\t\terr := exec.Command(\"mpv\", filepath.Join(dir, fmt.Sprintf(\"%s.mp3\", word.Text))).Run()\n\t\tcheckErr(\"play audio\", err)\n\task1:\n\t\tpt(\"'j' to show text, 'r' to replay\\n\")\n\t\tfmt.Scanf(\"%s\", &reply)\n\t\tswitch reply {\n\t\tcase \"j\":\n\t\t\tpt(\"%s\\n\", word.Text)\n\t\task2:\n\t\t\tpt(\"'y' to level up, 'n' to keep\\n\")\n\t\t\tfmt.Scanf(\"%s\", &reply)\n\t\t\tswitch reply {\n\t\t\tcase \"y\":\n\t\t\t\treturn true\n\t\t\tcase \"n\":\n\t\t\t\treturn false\n\t\t\tdefault:\n\t\t\t\tgoto ask2\n\t\t\t}\n\t\tcase \"r\":\n\t\t\tif retry > 0 {\n\t\t\t\tretry--\n\t\t\t\tgoto play\n\t\t\t} else {\n\t\t\t\tpt(\"no more replay\\n\")\n\t\t\t\tgoto ask1\n\t\t\t}\n\t\tdefault:\n\t\t\tgoto ask1\n\t\t}\n\t\treturn false\n\t}\n\n\ttextReview := func(word Word) bool {\n\t\tpt(\"showing text\\n\")\n\t\tpt(\"%s\\n\", word.Text)\n\task2:\n\t\tpt(\"'j' to play audio\\n\")\n\t\tvar reply string\n\t\tfmt.Scanf(\"%s\\n\", &reply)\n\t\tswitch reply {\n\t\tcase \"j\":\n\t\t\tpt(\"playing audio\\n\")\n\t\t\terr := exec.Command(\"mpv\", filepath.Join(dir, fmt.Sprintf(\"%s.mp3\", word.Text))).Run()\n\t\t\tcheckErr(\"play audio\", err)\n\t\task:\n\t\t\tpt(\"'y' to level up, 'n' to keep\\n\")\n\t\t\tfmt.Scanf(\"%s\\n\", &reply)\n\t\t\tswitch reply {\n\t\t\tcase \"y\":\n\t\t\t\treturn true\n\t\t\tcase \"n\":\n\t\t\t\treturn false\n\t\t\tdefault:\n\t\t\t\tgoto ask\n\t\t\t}\n\t\tdefault:\n\t\t\tgoto ask2\n\t\t}\n\t\treturn false\n\t}\n\n\tusageReview := func(word Word) bool {\n\t\tpt(\"showing usage\\n\")\n\t\tpt(\"%s\\n\", word.Desc)\n\task:\n\t\tpt(\"'j' to show answer\\n\")\n\t\tvar reply string\n\t\tfmt.Scanf(\"%s\\n\", &reply)\n\t\tswitch reply {\n\t\tcase \"j\":\n\t\t\tpt(\"playing audio\\n\")\n\t\t\terr := exec.Command(\"mpv\", filepath.Join(dir, fmt.Sprintf(\"%s.mp3\", word.Text))).Run()\n\t\t\tcheckErr(\"play audio\", err)\n\t\t\tpt(\"%s\\n\", word.Text)\n\t\task2:\n\t\t\tpt(\"'y' to level up, 'n' to keep\\n\")\n\t\t\tfmt.Scanf(\"%s\\n\", &reply)\n\t\t\tswitch reply {\n\t\t\tcase \"y\":\n\t\t\t\treturn true\n\t\t\tcase \"n\":\n\t\t\t\treturn false\n\t\t\tdefault:\n\t\t\t\tgoto ask2\n\t\t\t}\n\t\tdefault:\n\t\t\tgoto ask\n\t\t}\n\t\treturn false\n\t}\n\n\treviewFuncs := map[string]func(Word) bool{\n\t\t\"audio\": audioReview,\n\t\t\"text\": textReview,\n\t\t\"usage\": usageReview,\n\t}\n\n\ttype PracticeInfo struct {\n\t\tPractice Practice\n\t\tMax time.Duration\n\t\tFade time.Duration\n\t\tRatio float64\n\t}\n\tpractices := []PracticeInfo{}\n\tfor practice, history := range data.History {\n\t\t\/\/ calculate fade and max\n\t\tlast := history[len(history)-1]\n\t\tfade := time.Now().Sub(last.Time)\n\t\tvar max time.Duration\n\t\tfor i := 1; i < len(history); i++ {\n\t\t\tif history[i].What != \"ok\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif d := history[i].Time.Sub(history[i-1].Time); d > max {\n\t\t\t\tmax = d\n\t\t\t}\n\t\t}\n\t\tratio := float64(fade) \/ float64(max+1)\n\t\t\/\/ filter\n\t\tif ratio < 2.0 {\n\t\t\tcontinue\n\t\t}\n\t\tif fade < time.Second*30 { \/\/ skip newly added\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ collect\n\t\tpractices = append(practices, PracticeInfo{\n\t\t\tPractice: practice,\n\t\t\tFade: fade,\n\t\t\tMax: max,\n\t\t\tRatio: ratio,\n\t\t})\n\t}\n\tpt(\"%d practices\\n\", len(practices))\n\n\t\/\/ sort\n\tfor i := len(practices) - 1; i >= 1; i-- {\n\t\tj := rand.Intn(i + 1)\n\t\tpractices[i], practices[j] = practices[j], practices[i]\n\t}\n\tsort.Sort(Sorter(len(practices), func(i, j int) bool {\n\t\treturn practices[i].Ratio > practices[j].Ratio\n\t}, func(i, j int) {\n\t\tpractices[i], practices[j] = practices[j], practices[i]\n\t}))\n\n\t\/\/ unique words\n\tpracticedWords := map[string]struct{}{}\n\tinfos := []PracticeInfo{}\n\tfor _, info := range practices {\n\t\tif _, ok := practicedWords[info.Practice.Text]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tinfos = append(infos, info)\n\t\tpracticedWords[info.Practice.Text] = struct{}{}\n\t}\n\tpractices = infos\n\n\t\/\/ practice\n\tfor _, practice := range practices {\n\t\tpt(\"practice %s fade %v max %v ratio %f\\n\", practice.Practice.Type, practice.Fade, practice.Max, practice.Ratio)\n\t\tvar what string\n\t\tif reviewFuncs[practice.Practice.Type](words[practice.Practice.Text]) {\n\t\t\twhat = \"ok\"\n\t\t} else {\n\t\t\twhat = \"fail\"\n\t\t}\n\t\tdata.History[practice.Practice] = append(data.History[practice.Practice], HistoryEntry{\n\t\t\tWhat: what,\n\t\t\tTime: time.Now(),\n\t\t})\n\t\tdataFile.Save()\n\t}\n}\n\nfunc checkErr(desc string, err error) {\n\tif err != nil {\n\t\tlog.Fatalf(\"%s error %v\", desc, err)\n\t}\n}\n\ntype sorter struct {\n\tlength int\n\tless func(i, j int) bool\n\tswap func(i, j int)\n}\n\nfunc Sorter(length int, less func(i, j int) bool, swap func(i, j int)) sorter {\n\treturn sorter{\n\t\tlength: length,\n\t\tless: less,\n\t\tswap: swap,\n\t}\n}\n\nfunc (s sorter) Len() int {\n\treturn s.length\n}\n\nfunc (s sorter) Less(i, j int) bool {\n\treturn s.less(i, j)\n}\n\nfunc (s sorter) Swap(i, j int) {\n\ts.swap(i, j)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Ka-Hing Cheung\n\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t. \"github.com\/kahing\/goofys\/internal\"\n\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\n\t\"github.com\/codegangsta\/cli\"\n\n\t\"github.com\/jacobsa\/fuse\"\n\t\"github.com\/jacobsa\/fuse\/fuseutil\"\n\n\t\"github.com\/kardianos\/osext\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\n\tdaemon \"github.com\/sevlyar\/go-daemon\"\n)\n\nvar log = GetLogger(\"main\")\n\nfunc registerSIGINTHandler(mountPoint string) {\n\t\/\/ Register for SIGINT.\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, os.Interrupt, syscall.SIGTERM)\n\n\t\/\/ Start a goroutine that will unmount when the signal is received.\n\tgo func() {\n\t\tfor {\n\t\t\ts := <-signalChan\n\t\t\tlog.Infof(\"Received %v, attempting to unmount...\", s)\n\n\t\t\terr := fuse.Unmount(mountPoint)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Failed to unmount in response to %v: %v\", s, err)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Successfully unmounted %v in response to %v\", s, mountPoint)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ Mount the file system based on the supplied arguments, returning a\n\/\/ fuse.MountedFileSystem that can be joined to wait for unmounting.\nfunc mount(\n\tctx context.Context,\n\tbucketName string,\n\tmountPoint string,\n\tflags *FlagStorage) (mfs *fuse.MountedFileSystem, err error) {\n\n\tvar creds *credentials.Credentials\n\tif len(flags.Profile) > 0 {\n\t\tcreds = credentials.NewSharedCredentials(os.Getenv(\"HOME\")+\"\/.aws\/credentials\", flags.Profile)\n\t}\n\n\tawsConfig := &aws.Config{\n\t\tRegion: &flags.Region,\n\t\tLogger: GetLogger(\"s3\"),\n\t\tCredentials: creds,\n\t\t\/\/LogLevel: aws.LogLevel(aws.LogDebug),\n\t}\n\tif len(flags.Endpoint) > 0 {\n\t\tawsConfig.Endpoint = &flags.Endpoint\n\t}\n\n\t\/\/ deprecate flags.UsePathRequest\n\tif flags.UsePathRequest {\n\t\tlog.Infoln(\"--use-path-request is deprecated, it's always on\")\n\t}\n\tawsConfig.S3ForcePathStyle = aws.Bool(true)\n\n\tgoofys := NewGoofys(bucketName, awsConfig, flags)\n\tif goofys == nil {\n\t\terr = fmt.Errorf(\"Mount: initialization failed\")\n\t\treturn\n\t}\n\tserver := fuseutil.NewFileSystemServer(goofys)\n\n\tfuseLog := GetLogger(\"fuse\")\n\n\t\/\/ Mount the file system.\n\tmountCfg := &fuse.MountConfig{\n\t\tFSName: bucketName,\n\t\tOptions: flags.MountOptions,\n\t\tErrorLogger: GetStdLogger(NewLogger(\"fuse\"), logrus.ErrorLevel),\n\t\tDisableWritebackCaching: true,\n\t}\n\n\tif flags.DebugFuse {\n\t\tfuseLog.Level = logrus.DebugLevel\n\t\tmountCfg.DebugLogger = GetStdLogger(fuseLog, logrus.DebugLevel)\n\t}\n\n\tmfs, err = fuse.Mount(mountPoint, server, mountCfg)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Mount: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc massagePath() {\n\tfor _, e := range os.Environ() {\n\t\tif strings.HasPrefix(e, \"PATH=\") {\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ mount -a seems to run goofys without PATH\n\t\/\/ usually fusermount is in \/bin\n\tos.Setenv(\"PATH\", \"\/bin\")\n}\n\nfunc massageArg0() {\n\tvar err error\n\tos.Args[0], err = osext.Executable()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unable to discover current executable: %v\", err))\n\t}\n}\n\nfunc main() {\n\tapp := NewApp()\n\tapp.Action = func(c *cli.Context) {\n\t\tvar err error\n\n\t\t\/\/ We should get two arguments exactly. Otherwise error out.\n\t\tif len(c.Args()) != 2 {\n\t\t\tfmt.Fprintf(\n\t\t\t\tos.Stderr,\n\t\t\t\t\"Error: %s takes exactly two arguments.\\n\\n\",\n\t\t\t\tapp.Name)\n\t\t\tcli.ShowAppHelp(c)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\t\/\/ Populate and parse flags.\n\t\tbucketName := c.Args()[0]\n\t\tmountPoint := c.Args()[1]\n\t\tflags := PopulateFlags(c)\n\n\t\tInitLoggers(!flags.Foreground)\n\n\t\tmassagePath()\n\n\t\tif !flags.Foreground {\n\t\t\tmassageArg0()\n\n\t\t\tctx := new(daemon.Context)\n\t\t\tchild, err := ctx.Reborn()\n\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"unable to daemonize: %v\", err))\n\t\t\t}\n\n\t\t\tif child != nil {\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tdefer ctx.Release()\n\t\t\t}\n\n\t\t}\n\n\t\t\/\/ Mount the file system.\n\t\tmfs, err := mount(\n\t\t\tcontext.Background(),\n\t\t\tbucketName,\n\t\t\tmountPoint,\n\t\t\tflags)\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Mounting file system: %v\", err)\n\t\t}\n\n\t\tlog.Println(\"File system has been successfully mounted.\")\n\n\t\t\/\/ Let the user unmount with Ctrl-C (SIGINT).\n\t\tregisterSIGINTHandler(mfs.Dir())\n\n\t\t\/\/ Wait for the file system to be unmounted.\n\t\terr = mfs.Join(context.Background())\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"MountedFileSystem.Join: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Println(\"Successfully exiting.\")\n\t}\n\n\terr := app.Run(MassageMountFlags(os.Args))\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}\n<commit_msg>log buffer_pool usage on debug_fuse<commit_after>\/\/ Copyright 2015 Ka-Hing Cheung\n\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t. \"github.com\/kahing\/goofys\/internal\"\n\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\n\t\"github.com\/codegangsta\/cli\"\n\n\t\"github.com\/jacobsa\/fuse\"\n\t\"github.com\/jacobsa\/fuse\/fuseutil\"\n\n\t\"github.com\/kardianos\/osext\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\n\tdaemon \"github.com\/sevlyar\/go-daemon\"\n)\n\nvar log = GetLogger(\"main\")\n\nfunc registerSIGINTHandler(mountPoint string) {\n\t\/\/ Register for SIGINT.\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, os.Interrupt, syscall.SIGTERM)\n\n\t\/\/ Start a goroutine that will unmount when the signal is received.\n\tgo func() {\n\t\tfor {\n\t\t\ts := <-signalChan\n\t\t\tlog.Infof(\"Received %v, attempting to unmount...\", s)\n\n\t\t\terr := fuse.Unmount(mountPoint)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Failed to unmount in response to %v: %v\", s, err)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Successfully unmounted %v in response to %v\", s, mountPoint)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ Mount the file system based on the supplied arguments, returning a\n\/\/ fuse.MountedFileSystem that can be joined to wait for unmounting.\nfunc mount(\n\tctx context.Context,\n\tbucketName string,\n\tmountPoint string,\n\tflags *FlagStorage) (mfs *fuse.MountedFileSystem, err error) {\n\n\tvar creds *credentials.Credentials\n\tif len(flags.Profile) > 0 {\n\t\tcreds = credentials.NewSharedCredentials(os.Getenv(\"HOME\")+\"\/.aws\/credentials\", flags.Profile)\n\t}\n\n\tawsConfig := &aws.Config{\n\t\tRegion: &flags.Region,\n\t\tLogger: GetLogger(\"s3\"),\n\t\tCredentials: creds,\n\t\t\/\/LogLevel: aws.LogLevel(aws.LogDebug),\n\t}\n\tif len(flags.Endpoint) > 0 {\n\t\tawsConfig.Endpoint = &flags.Endpoint\n\t}\n\n\t\/\/ deprecate flags.UsePathRequest\n\tif flags.UsePathRequest {\n\t\tlog.Infoln(\"--use-path-request is deprecated, it's always on\")\n\t}\n\tawsConfig.S3ForcePathStyle = aws.Bool(true)\n\n\tgoofys := NewGoofys(bucketName, awsConfig, flags)\n\tif goofys == nil {\n\t\terr = fmt.Errorf(\"Mount: initialization failed\")\n\t\treturn\n\t}\n\tserver := fuseutil.NewFileSystemServer(goofys)\n\n\tfuseLog := GetLogger(\"fuse\")\n\n\t\/\/ Mount the file system.\n\tmountCfg := &fuse.MountConfig{\n\t\tFSName: bucketName,\n\t\tOptions: flags.MountOptions,\n\t\tErrorLogger: GetStdLogger(NewLogger(\"fuse\"), logrus.ErrorLevel),\n\t\tDisableWritebackCaching: true,\n\t}\n\n\tif flags.DebugFuse {\n\t\tfuseLog.Level = logrus.DebugLevel\n\t\tlog.Level = logrus.DebugLevel\n\t\tmountCfg.DebugLogger = GetStdLogger(fuseLog, logrus.DebugLevel)\n\t}\n\n\tmfs, err = fuse.Mount(mountPoint, server, mountCfg)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Mount: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc massagePath() {\n\tfor _, e := range os.Environ() {\n\t\tif strings.HasPrefix(e, \"PATH=\") {\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ mount -a seems to run goofys without PATH\n\t\/\/ usually fusermount is in \/bin\n\tos.Setenv(\"PATH\", \"\/bin\")\n}\n\nfunc massageArg0() {\n\tvar err error\n\tos.Args[0], err = osext.Executable()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unable to discover current executable: %v\", err))\n\t}\n}\n\nfunc main() {\n\tapp := NewApp()\n\tapp.Action = func(c *cli.Context) {\n\t\tvar err error\n\n\t\t\/\/ We should get two arguments exactly. Otherwise error out.\n\t\tif len(c.Args()) != 2 {\n\t\t\tfmt.Fprintf(\n\t\t\t\tos.Stderr,\n\t\t\t\t\"Error: %s takes exactly two arguments.\\n\\n\",\n\t\t\t\tapp.Name)\n\t\t\tcli.ShowAppHelp(c)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\t\/\/ Populate and parse flags.\n\t\tbucketName := c.Args()[0]\n\t\tmountPoint := c.Args()[1]\n\t\tflags := PopulateFlags(c)\n\n\t\tInitLoggers(!flags.Foreground)\n\n\t\tmassagePath()\n\n\t\tif !flags.Foreground {\n\t\t\tmassageArg0()\n\n\t\t\tctx := new(daemon.Context)\n\t\t\tchild, err := ctx.Reborn()\n\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"unable to daemonize: %v\", err))\n\t\t\t}\n\n\t\t\tif child != nil {\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tdefer ctx.Release()\n\t\t\t}\n\n\t\t}\n\n\t\t\/\/ Mount the file system.\n\t\tmfs, err := mount(\n\t\t\tcontext.Background(),\n\t\t\tbucketName,\n\t\t\tmountPoint,\n\t\t\tflags)\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Mounting file system: %v\", err)\n\t\t}\n\n\t\tlog.Println(\"File system has been successfully mounted.\")\n\n\t\t\/\/ Let the user unmount with Ctrl-C (SIGINT).\n\t\tregisterSIGINTHandler(mfs.Dir())\n\n\t\t\/\/ Wait for the file system to be unmounted.\n\t\terr = mfs.Join(context.Background())\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"MountedFileSystem.Join: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Println(\"Successfully exiting.\")\n\t}\n\n\terr := app.Run(MassageMountFlags(os.Args))\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/mitchellh\/goamz\/aws\"\n\t\"github.com\/mitchellh\/goamz\/s3\"\n)\n\ntype Options struct {\n\tPort string\n\tUsername string\n\tPassword string\n\tRealm string\n\tBucket string\n\tPrefix string\n\tMaxAge int\n\tVerbose bool\n\tIndexFile string\n}\n\nfunc (o *Options) RequiresAuth() bool {\n\treturn o.Username != \"\" && o.Password != \"\"\n}\n\nfunc Opts(c *cli.Context) *Options {\n\treturn &Options{\n\t\tPort: c.String(\"port\"),\n\t\tUsername: c.String(\"username\"),\n\t\tPassword: c.String(\"password\"),\n\t\tRealm: c.String(\"realm\"),\n\t\tBucket: c.String(\"bucket\"),\n\t\tPrefix: c.String(\"prefix\"),\n\t\tMaxAge: c.Int(\"max-age\"),\n\t\tVerbose: c.Bool(\"verbose\"),\n\t\tIndexFile: c.String(\"index-file\"),\n\t}\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\"port\", \"8080\", \"port to run on\", \"PORT\"},\n\t\tcli.StringFlag{\"username\", \"\", \"the username to prompt for\", \"USERNAME\"},\n\t\tcli.StringFlag{\"password\", \"\", \"the password to prompt for\", \"PASSWORD\"},\n\t\tcli.StringFlag{\"realm\", \"Realm\", \"the challenge realm\", \"REALM\"},\n\t\tcli.StringFlag{\"bucket\", \"\", \"the name of the s3 bucket to serve from\", \"BUCKET\"},\n\t\tcli.StringFlag{\"prefix\", \"\", \"the optional prefix to serve from e.g. s3:\/\/bucket\/prefix\/...\", \"PREFIX\"},\n\t\tcli.IntFlag{\"max-age\", 90, \"the cache-control header; max-age\", \"MAX_AGE\"},\n\t\tcli.BoolFlag{\"verbose\", \"enable enhanced logging\", \"VERBOSE\"},\n\t\tcli.StringFlag{\"index-file\", \"index.html\", \"file to search for indexes\", \"INDEX\"},\n\t}\n\tapp.Action = Run\n\tapp.Run(os.Args)\n}\n\nfunc check(err error) {\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}\n\nfunc Run(c *cli.Context) {\n\topts := Opts(c)\n\n\thandler, err := S3Handler(opts)\n\tcheck(err)\n\n\tif opts.Verbose {\n\t\tlog.Printf(\"starting server on port %s\\n\", opts.Port)\n\t}\n\terr = http.ListenAndServe(\":\"+opts.Port, handler)\n\tcheck(err)\n}\n\nfunc S3Handler(opts *Options) (http.HandlerFunc, error) {\n\tauth, err := aws.EnvAuth()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapi := s3.New(auth, aws.USEast)\n\tbucket := api.Bucket(opts.Bucket)\n\tif opts.Verbose {\n\t\tlog.Printf(\"s3 bucket: %s\\n\", opts.Bucket)\n\t}\n\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tif opts.RequiresAuth() {\n\t\t\tu, p, _ := req.BasicAuth()\n\t\t\tif opts.Verbose {\n\t\t\t\tlog.Printf(\"Authorization: %s\/%s\\n\", u, p)\n\t\t\t}\n\n\t\t\tif u != opts.Username || p != opts.Password {\n\t\t\t\tw.Header().Set(\"WWW-Authenticate\", fmt.Sprintf(\"Basic realm=\\\"%s\\\"\", opts.Realm))\n\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tpath := fmt.Sprintf(\"%s%s\", opts.Prefix, req.URL.Path)\n\t\tif strings.Contains(path, \"\/\/\") {\n\t\t\tpath = strings.Replace(path, \"\/\/\", \"\/\", -1)\n\t\t}\n\t\tif strings.HasPrefix(path, \"\/\") {\n\t\t\tpath = path[1:]\n\t\t}\n\t\tif strings.HasSuffix(req.URL.Path, \"\/\") {\n\t\t\tpath = path + opts.IndexFile\n\t\t}\n\t\tif opts.Verbose {\n\t\t\tlog.Printf(\"> %s => s3:\/\/%s\/%s\\n\", req.URL.Path, opts.Bucket, path)\n\t\t}\n\n\t\treadCloser, err := bucket.GetReader(path)\n\t\tif err != nil {\n\t\t\tw.Header().Set(\"WWW-Authenticate\", fmt.Sprintf(\"Basic realm=\\\"%s\\\"\", opts.Realm))\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\tdefer readCloser.Close()\n\n\t\tcontentType := mime.TypeByExtension(path)\n\t\tw.Header().Set(\"Content-Type\", contentType)\n\n\t\tio.Copy(w, readCloser)\n\t}, nil\n}\n<commit_msg>- added mit licensing<commit_after>\/\/ The MIT License (MIT)\n\n\/\/ Copyright (c) 2015 Matt Ho\n\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\n\/\/ The above copyright notice and this permission notice shall be included in all\n\/\/ copies or substantial portions of the Software.\n\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n\/\/ SOFTWARE.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/mitchellh\/goamz\/aws\"\n\t\"github.com\/mitchellh\/goamz\/s3\"\n)\n\ntype Options struct {\n\tPort string\n\tUsername string\n\tPassword string\n\tRealm string\n\tBucket string\n\tPrefix string\n\tMaxAge int\n\tVerbose bool\n\tIndexFile string\n}\n\nfunc (o *Options) RequiresAuth() bool {\n\treturn o.Username != \"\" && o.Password != \"\"\n}\n\nfunc Opts(c *cli.Context) *Options {\n\treturn &Options{\n\t\tPort: c.String(\"port\"),\n\t\tUsername: c.String(\"username\"),\n\t\tPassword: c.String(\"password\"),\n\t\tRealm: c.String(\"realm\"),\n\t\tBucket: c.String(\"bucket\"),\n\t\tPrefix: c.String(\"prefix\"),\n\t\tMaxAge: c.Int(\"max-age\"),\n\t\tVerbose: c.Bool(\"verbose\"),\n\t\tIndexFile: c.String(\"index-file\"),\n\t}\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\"port\", \"8080\", \"port to run on\", \"PORT\"},\n\t\tcli.StringFlag{\"username\", \"\", \"the username to prompt for\", \"USERNAME\"},\n\t\tcli.StringFlag{\"password\", \"\", \"the password to prompt for\", \"PASSWORD\"},\n\t\tcli.StringFlag{\"realm\", \"Realm\", \"the challenge realm\", \"REALM\"},\n\t\tcli.StringFlag{\"bucket\", \"\", \"the name of the s3 bucket to serve from\", \"BUCKET\"},\n\t\tcli.StringFlag{\"prefix\", \"\", \"the optional prefix to serve from e.g. s3:\/\/bucket\/prefix\/...\", \"PREFIX\"},\n\t\tcli.IntFlag{\"max-age\", 90, \"the cache-control header; max-age\", \"MAX_AGE\"},\n\t\tcli.BoolFlag{\"verbose\", \"enable enhanced logging\", \"VERBOSE\"},\n\t\tcli.StringFlag{\"index-file\", \"index.html\", \"file to search for indexes\", \"INDEX\"},\n\t}\n\tapp.Action = Run\n\tapp.Run(os.Args)\n}\n\nfunc check(err error) {\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}\n\nfunc Run(c *cli.Context) {\n\topts := Opts(c)\n\n\thandler, err := S3Handler(opts)\n\tcheck(err)\n\n\tif opts.Verbose {\n\t\tlog.Printf(\"starting server on port %s\\n\", opts.Port)\n\t}\n\terr = http.ListenAndServe(\":\"+opts.Port, handler)\n\tcheck(err)\n}\n\nfunc S3Handler(opts *Options) (http.HandlerFunc, error) {\n\tauth, err := aws.EnvAuth()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapi := s3.New(auth, aws.USEast)\n\tbucket := api.Bucket(opts.Bucket)\n\tif opts.Verbose {\n\t\tlog.Printf(\"s3 bucket: %s\\n\", opts.Bucket)\n\t}\n\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tif opts.RequiresAuth() {\n\t\t\tu, p, _ := req.BasicAuth()\n\t\t\tif opts.Verbose {\n\t\t\t\tlog.Printf(\"Authorization: %s\/%s\\n\", u, p)\n\t\t\t}\n\n\t\t\tif u != opts.Username || p != opts.Password {\n\t\t\t\tw.Header().Set(\"WWW-Authenticate\", fmt.Sprintf(\"Basic realm=\\\"%s\\\"\", opts.Realm))\n\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tpath := fmt.Sprintf(\"%s%s\", opts.Prefix, req.URL.Path)\n\t\tif strings.Contains(path, \"\/\/\") {\n\t\t\tpath = strings.Replace(path, \"\/\/\", \"\/\", -1)\n\t\t}\n\t\tif strings.HasPrefix(path, \"\/\") {\n\t\t\tpath = path[1:]\n\t\t}\n\t\tif strings.HasSuffix(req.URL.Path, \"\/\") {\n\t\t\tpath = path + opts.IndexFile\n\t\t}\n\t\tif opts.Verbose {\n\t\t\tlog.Printf(\"> %s => s3:\/\/%s\/%s\\n\", req.URL.Path, opts.Bucket, path)\n\t\t}\n\n\t\treadCloser, err := bucket.GetReader(path)\n\t\tif err != nil {\n\t\t\tw.Header().Set(\"WWW-Authenticate\", fmt.Sprintf(\"Basic realm=\\\"%s\\\"\", opts.Realm))\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\tdefer readCloser.Close()\n\n\t\tcontentType := mime.TypeByExtension(path)\n\t\tw.Header().Set(\"Content-Type\", contentType)\n\n\t\tio.Copy(w, readCloser)\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/coduno\/compute\/runner\"\n\t\"github.com\/coduno\/engine\/cloud\/model\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/cloud\/datastore\"\n)\n\nvar client *datastore.Client\n\nconst appID = \"coduno\"\n\nfunc init() {\n\tvar err error\n\tclient, err = datastore.NewClient(context.Background(), appID)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc main() {\n\thttp.HandleFunc(\"\/\", startHandler)\n\thttp.ListenAndServe(\":8081\", nil)\n}\n\nfunc startHandler(w http.ResponseWriter, req *http.Request) {\n\tcors(w, req)\n\tencodedKey := req.URL.Path[1:]\n\n\t\/\/ TODO(victorbalan): Remove this after we can connect with the engine to localhost.\n\t\/\/ Untill then leave it so we can get entity keys to query for.\n\t\/\/ q := datastore.NewQuery(model.ChallengeKind).Filter(\"Runner =\", \"simple\")\n\t\/\/ var challenges []model.Challenge\n\t\/\/ t, _ := q.GetAll(NewContext(), &challenges)\n\t\/\/ fmt.Println(t[0])\n\t\/\/ fmt.Println(t[0].Encode())\n\t\/\/ return\n\n\tkey, err := datastore.DecodeKey(encodedKey)\n\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvar challenge model.Challenge\n\terr = client.Get(context.Background(), key, &challenge)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tf := flag.NewFlagSet(\"challengeFlags\", flag.ContinueOnError)\n\ttests := f.String(\"tests\", \"\", \"Defines the tests path\")\n\t\/\/ TODO(victorbalan): Enable the image flage when we will use it\n\t\/\/ image := f.String(\"image\", \"\", \"Defines a custom image\")\n\n\tflags := strings.Split(challenge.Flags, \" \")\n\tif len(flags) > 0 {\n\t\tif err := f.Parse(flags); err != nil {\n\t\t\tfmt.Printf(err.Error())\n\t\t}\n\t}\n\n\tvar rh runner.RunHandler\n\tswitch challenge.Runner {\n\tcase \"simple\":\n\t\trh = runner.SimpleRunHandler{}\n\tcase \"javut\":\n\t\trh = runner.JavaUnitTestHandler{}\n\tcase \"outputtest\":\n\t\tif *tests == \"\" {\n\t\t\thttp.Error(w, \"There is no test path provided\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\trh = runner.OutputTestHandler{TestFilePath: *tests}\n\tdefault:\n\t\thttp.Error(w, \"Runner not available.\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tconfig := rh.Handle(w, req)\n\tconfig.Challenge = key\n\tconfig.User = \"receivedUser\"\n\n\t\/\/ TODO(flowlo): Find out whether Handle completed successfully\n\t\/\/ to check.\n\tres, err := config.Run(client)\n\n\tif err != nil {\n\t\thttp.Error(w, \"docker: \"+err.Error(), http.StatusInternalServerError)\n\t}\n\n\trh.Respond(w, req, res)\n}\n\n\/\/ Rudimentary CORS checking. See\n\/\/ https:\/\/developer.mozilla.org\/docs\/Web\/HTTP\/Access_control_CORS\nfunc cors(w http.ResponseWriter, req *http.Request) {\n\torigin := req.Header.Get(\"Origin\")\n\n\t\/\/ only allow CORS on localhost for development\n\tif strings.HasPrefix(origin, \"http:\/\/localhost\") {\n\t\t\/\/ The cookie related headers are used for the api requests authentication\n\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"OPTIONS, GET, POST, PUT, DELETE, PATCH\")\n\t\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Cookie, Content-Type\")\n\t\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\t\tif req.Method == \"OPTIONS\" {\n\t\t\tw.Write([]byte(\"OK\"))\n\t\t}\n\t}\n}\n<commit_msg>Adapt new domain model<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/coduno\/compute\/runner\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/cloud\/datastore\"\n)\n\ntype CodeTask struct {\n\tRunner string\n\tFlags string\n\tLanguages []string\n}\n\nconst appID = \"coduno\"\n\nvar client *datastore.Client\n\nfunc init() {\n\tvar err error\n\tclient, err = datastore.NewClient(context.Background(), appID)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc main() {\n\thttp.HandleFunc(\"\/\", startHandler)\n\thttp.ListenAndServe(\":8081\", nil)\n}\n\nfunc startHandler(w http.ResponseWriter, req *http.Request) {\n\tcors(w, req)\n\tencodedKey := req.URL.Path[1:]\n\n\t\/\/ TODO(victorbalan): Remove this after we can connect with the engine to localhost.\n\t\/\/ Untill then leave it so we can get entity keys to query for.\n\t\/\/ var challenges []model.Challenge\n\t\/\/ q := datastore.NewQuery(model.ChallengeKind).Filter(\"Runner =\", \"simple\")\n\t\/\/ t, _ := q.GetAll(NewContext(), &challenges)\n\t\/\/ fmt.Println(t[0])\n\t\/\/ fmt.Println(t[0].Encode())\n\t\/\/ return\n\n\tkey, err := datastore.DecodeKey(encodedKey)\n\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvar task CodeTask\n\terr = client.Get(context.Background(), key, &task)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tf := flag.NewFlagSet(\"taskFlags\", flag.ContinueOnError)\n\ttests := f.String(\"tests\", \"\", \"Defines the tests path\")\n\t\/\/ TODO(victorbalan): Enable the image flage when we will use it\n\t\/\/ image := f.String(\"image\", \"\", \"Defines a custom image\")\n\n\tflags := strings.Split(task.Flags, \" \")\n\tif len(flags) > 0 {\n\t\tif err := f.Parse(flags); err != nil {\n\t\t\tfmt.Printf(err.Error())\n\t\t}\n\t}\n\n\tvar rh runner.RunHandler\n\tswitch task.Runner {\n\tcase \"simple\":\n\t\trh = runner.SimpleRunHandler{}\n\tcase \"javut\":\n\t\trh = runner.JavaUnitTestHandler{}\n\tcase \"outputtest\":\n\t\tif *tests == \"\" {\n\t\t\thttp.Error(w, \"There is no test path provided\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\trh = runner.OutputTestHandler{TestFilePath: *tests}\n\tdefault:\n\t\thttp.Error(w, \"Runner not available.\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tconfig := rh.Handle(w, req)\n\tconfig.Challenge = key\n\tconfig.User = \"receivedUser\"\n\n\t\/\/ TODO(flowlo): Find out whether Handle completed successfully\n\t\/\/ to check.\n\tres, err := config.Run(client)\n\n\tif err != nil {\n\t\thttp.Error(w, \"docker: \"+err.Error(), http.StatusInternalServerError)\n\t}\n\n\trh.Respond(w, req, res)\n}\n\n\/\/ Rudimentary CORS checking. See\n\/\/ https:\/\/developer.mozilla.org\/docs\/Web\/HTTP\/Access_control_CORS\nfunc cors(w http.ResponseWriter, req *http.Request) {\n\torigin := req.Header.Get(\"Origin\")\n\n\t\/\/ only allow CORS on localhost for development\n\tif strings.HasPrefix(origin, \"http:\/\/localhost\") {\n\t\t\/\/ The cookie related headers are used for the api requests authentication\n\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"OPTIONS, GET, POST, PUT, DELETE, PATCH\")\n\t\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Cookie, Content-Type\")\n\t\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\t\tif req.Method == \"OPTIONS\" {\n\t\t\tw.Write([]byte(\"OK\"))\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ server1 is a minimal \"echo\" server.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n)\n\nfunc main() {\n\thttp.HandleFunc(\"\/\", handler) \/\/ each request calls handler function\n\tlog.Fatal(http.ListenAndServe(\":\"+os.Getenv(\"PORT\"), nil))\n}\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"URL.Path = %q\\n\", r.URL.Path)\n}\n<commit_msg>it worked for the first time<commit_after><|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\n\t\"github.com\/ChimeraCoder\/anaconda\"\n\t\"github.com\/jchannon\/gofavpocket\/pocket\"\n\t\"github.com\/jchannon\/gofavpocket\/twitter\"\n)\n\nfunc usage() {\n\tfmt.Println(\"Usage:\")\n\tfmt.Print(\"go run gofavpocket\/main.go\")\n\tfmt.Print(\" --consumerkey <consumerkey>\")\n\tfmt.Print(\" --consumersecret <consumersecret>\")\n\tfmt.Println(\" --pocketapikey <pocketapikey>\")\n\tfmt.Println(\"\")\n\tfmt.Println(\"In order to get your consumerkey and consumersecret, you must register an 'app' at twitter.com:\")\n\tfmt.Println(\"https:\/\/dev.twitter.com\/apps\/new\")\n}\n\nfunc startWebServer() {\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(\".\/static\")))\n\thttp.ListenAndServe(\":3000\", nil)\n\n}\n\nfunc main() {\n\tvar consumerKey = flag.String(\n\t\t\"consumerkey\",\n\t\t\"\",\n\t\t\"Consumer Key from Twitter. See: https:\/\/dev.twitter.com\/apps\/new\")\n\n\tvar consumerSecret = flag.String(\n\t\t\"consumersecret\",\n\t\t\"\",\n\t\t\"Consumer Secret from Twitter. See: https:\/\/dev.twitter.com\/apps\/new\")\n\n\tvar apiKey = flag.String(\n\t\t\"pocketapikey\",\n\t\t\"\",\n\t\t\"API key from Pocket\")\n\n\tflag.Parse()\n\n\tif len(*consumerKey) == 0 || len(*consumerSecret) == 0 || len(*apiKey) == 0 {\n\t\tfmt.Println(\"You must set the --consumerkey and --consumersecret and --pocketapi flags.\")\n\t\tfmt.Println(\"---\")\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n\n\tblah := &twitter.Twitter{}\n\tfavourites, err := blah.GetFavourites(consumerKey, consumerSecret)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgo startWebServer()\n\n\tdata := pocket.GetPocketRequestToken(apiKey, \"http:\/\/google.co.uk\")\n\tpocket.AuthorizePocket(data, \"http:\/\/localhost:3000\/\")\n\t\/\/ fmt.Println(data)\n\tfmt.Println(\"(4) Press Enter when authorized with Pocket.\")\n\tinstr := \"\"\n\tfmt.Scanln(&instr)\n\t_, pocketaccesstoken := pocket.GetPocketAccessToken(apiKey, data, \"http:\/\/yahoo.co.uk\")\n\n\tfor _, tweet := range favourites {\n\t\tif len(tweet.Entities.Urls) > 0 {\n\t\t\tfor _, tweeturl := range tweet.Entities.Urls {\n\t\t\t\turl, err := url.Parse(tweeturl.Expanded_url)\n\t\t\t\tif err != nil {\n\n\t\t\t\t}\n\t\t\t\text := filepath.Ext(url.Path)\n\t\t\t\tif len(ext) == 0 || ext == \"html\" {\n\n\t\t\t\t\taddUrlInTweetToPocket(apiKey, pocketaccesstoken, tweeturl.Expanded_url, tweet.Id)\n\t\t\t\t} else {\n\t\t\t\t\t\/\/addBasicTweetToPocket(apiKey, pocketaccesstoken, tweet)\n\t\t\t\t\tfmt.Println(\"Not processed : \" + tweeturl.Expanded_url)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\taddBasicTweetToPocket(apiKey, pocketaccesstoken, tweet)\n\t\t}\n\n\t\tbreak\n\t}\n\n}\n\nfunc addBasicTweetToPocket(apiKey *string, accesstoken string, tweet anaconda.Tweet) {\n\tpocketurl := \"https:\/\/twitter.com\/\" + tweet.User.ScreenName + \"\/status\/\" + strconv.FormatInt(tweet.Id, 10)\n\tfmt.Println(pocketurl)\n\tpocket.AddItemToPocket(apiKey, accesstoken, pocketurl, tweet.Id)\n}\n\nfunc addUrlInTweetToPocket(apiKey *string, accesstoken string, urlintweet string, Id int64) {\n\tfmt.Println(\"Addiing to pocket : \" + urlintweet)\n\tpocket.AddItemToPocket(apiKey, accesstoken, urlintweet, Id)\n}\n<commit_msg>Fixed Typo<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\n\t\"github.com\/ChimeraCoder\/anaconda\"\n\t\"github.com\/jchannon\/gofavpocket\/pocket\"\n\t\"github.com\/jchannon\/gofavpocket\/twitter\"\n)\n\nfunc usage() {\n\tfmt.Println(\"Usage:\")\n\tfmt.Print(\"go run gofavpocket\/main.go\")\n\tfmt.Print(\" --consumerkey <consumerkey>\")\n\tfmt.Print(\" --consumersecret <consumersecret>\")\n\tfmt.Println(\" --pocketapikey <pocketapikey>\")\n\tfmt.Println(\"\")\n\tfmt.Println(\"In order to get your consumerkey and consumersecret, you must register an 'app' at twitter.com:\")\n\tfmt.Println(\"https:\/\/dev.twitter.com\/apps\/new\")\n}\n\nfunc startWebServer() {\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(\".\/static\")))\n\thttp.ListenAndServe(\":3000\", nil)\n\n}\n\nfunc main() {\n\tvar consumerKey = flag.String(\n\t\t\"consumerkey\",\n\t\t\"\",\n\t\t\"Consumer Key from Twitter. See: https:\/\/dev.twitter.com\/apps\/new\")\n\n\tvar consumerSecret = flag.String(\n\t\t\"consumersecret\",\n\t\t\"\",\n\t\t\"Consumer Secret from Twitter. See: https:\/\/dev.twitter.com\/apps\/new\")\n\n\tvar apiKey = flag.String(\n\t\t\"pocketapikey\",\n\t\t\"\",\n\t\t\"API key from Pocket\")\n\n\tflag.Parse()\n\n\tif len(*consumerKey) == 0 || len(*consumerSecret) == 0 || len(*apiKey) == 0 {\n\t\tfmt.Println(\"You must set the --consumerkey and --consumersecret and --pocketapi flags.\")\n\t\tfmt.Println(\"---\")\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n\n\tblah := &twitter.Twitter{}\n\tfavourites, err := blah.GetFavourites(consumerKey, consumerSecret)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgo startWebServer()\n\n\tdata := pocket.GetPocketRequestToken(apiKey, \"http:\/\/google.co.uk\")\n\tpocket.AuthorizePocket(data, \"http:\/\/localhost:3000\/\")\n\t\/\/ fmt.Println(data)\n\tfmt.Println(\"(4) Press Enter when authorized with Pocket.\")\n\tinstr := \"\"\n\tfmt.Scanln(&instr)\n\t_, pocketaccesstoken := pocket.GetPocketAccessToken(apiKey, data, \"http:\/\/yahoo.co.uk\")\n\n\tfor _, tweet := range favourites {\n\t\tif len(tweet.Entities.Urls) > 0 {\n\t\t\tfor _, tweeturl := range tweet.Entities.Urls {\n\t\t\t\turl, err := url.Parse(tweeturl.Expanded_url)\n\t\t\t\tif err != nil {\n\n\t\t\t\t}\n\t\t\t\text := filepath.Ext(url.Path)\n\t\t\t\tif len(ext) == 0 || ext == \"html\" {\n\n\t\t\t\t\taddUrlInTweetToPocket(apiKey, pocketaccesstoken, tweeturl.Expanded_url, tweet.Id)\n\t\t\t\t} else {\n\t\t\t\t\t\/\/addBasicTweetToPocket(apiKey, pocketaccesstoken, tweet)\n\t\t\t\t\tfmt.Println(\"Not processed : \" + tweeturl.Expanded_url)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\taddBasicTweetToPocket(apiKey, pocketaccesstoken, tweet)\n\t\t}\n\n\t\tbreak\n\t}\n\n}\n\nfunc addBasicTweetToPocket(apiKey *string, accesstoken string, tweet anaconda.Tweet) {\n\tpocketurl := \"https:\/\/twitter.com\/\" + tweet.User.ScreenName + \"\/status\/\" + strconv.FormatInt(tweet.Id, 10)\n\tfmt.Println(pocketurl)\n\tpocket.AddItemToPocket(apiKey, accesstoken, pocketurl, tweet.Id)\n}\n\nfunc addUrlInTweetToPocket(apiKey *string, accesstoken string, urlintweet string, Id int64) {\n\tfmt.Println(\"Adding to pocket : \" + urlintweet)\n\tpocket.AddItemToPocket(apiKey, accesstoken, urlintweet, Id)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/sha1\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/google\/go-github\/github\"\n\t\"golang.org\/x\/oauth2\"\n)\n\ntype IssueComment struct {\n\tIssueNumber int\n\tComment string\n\tIsPullRequest bool\n\tRepositoryOwner string\n\tRepositoryName string\n}\n\nfunc main() {\n\tconf := NewConfig()\n\tgithubClient := initGithubClient(conf.AccessToken)\n\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to read the request's body\")\n\t\t\thttp.Error(w, \"\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tsignature := r.Header.Get(\"X-Hub-Signature\")\n\t\thasSecret, err := hasSecret(body, signature, conf.Secret)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to check the signature\")\n\t\t\thttp.Error(w, \"Failed to check the signature\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tif !hasSecret {\n\t\t\tlog.Println(\"Bad X-Hub-Signature\")\n\t\t\thttp.Error(w, \"Bad X-Hub-Signature\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\teventType := r.Header.Get(\"X-Github-Event\")\n\t\tif eventType != \"issue_comment\" {\n\t\t\tlog.Println(\"Unexpected event type: \" + eventType)\n\t\t\thttp.Error(w, \"Unexpected event type\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tissueComment, err := parseIssueComment(body)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to parse the requests body\")\n\t\t\thttp.Error(w, \"\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tif !issueComment.IsPullRequest {\n\t\t\tw.Write([]byte(\"Not a PR. Ignoring.\"))\n\t\t\treturn\n\t\t}\n\t\tif issueComment.Comment != \"!squash\" {\n\t\t\tw.Write([]byte(\"Not a command I understand. Ignoring.\"))\n\t\t\treturn\n\t\t}\n\t\tpr, _, err := githubClient.PullRequests.Get(issueComment.RepositoryOwner, issueComment.RepositoryName, issueComment.IssueNumber)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Getting PR %s\/%s#%d failed: %s\\n\", issueComment.RepositoryOwner, issueComment.RepositoryName, issueComment.IssueNumber, err.Error())\n\t\t\thttp.Error(w, \"\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tlog.Printf(\"Wants to merge branch %s to %s\", *pr.Head.Ref, *pr.Base.Ref)\n\t})\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", conf.Port), nil))\n}\n\nfunc initGithubClient(accessToken string) *github.Client {\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: accessToken},\n\t)\n\ttc := oauth2.NewClient(oauth2.NoContext, ts)\n\treturn github.NewClient(tc)\n}\n\nfunc parseIssueComment(body []byte) (IssueComment, error) {\n\tvar message struct {\n\t\tIssue struct {\n\t\t\tNumber int `json:\"Number\"`\n\t\t\tPullRequest struct {\n\t\t\t\tURL string `json:\"url\"`\n\t\t\t} `json:\"pull_request\"`\n\t\t} `json:\"issue\"`\n\t\tRepository struct {\n\t\t\tName string `json:\"name\"`\n\t\t\tOwner struct {\n\t\t\t\tLogin string `json:\"login\"`\n\t\t\t} `json:\"owner\"`\n\t\t} `json:\"repository\"`\n\t\tComment struct {\n\t\t\tBody string `json:\"body\"`\n\t\t} `json:\"comment\"`\n\t}\n\terr := json.Unmarshal(body, &message)\n\tif err != nil {\n\t\treturn IssueComment{}, err\n\t}\n\treturn IssueComment{\n\t\tIssueNumber: message.Issue.Number,\n\t\tComment: message.Comment.Body,\n\t\tIsPullRequest: message.Issue.PullRequest.URL != \"\",\n\t\tRepositoryOwner: message.Repository.Owner.Login,\n\t\tRepositoryName: message.Repository.Name,\n\t}, nil\n}\n\nfunc hasSecret(message []byte, signature, key string) (bool, error) {\n\tvar messageMACString string\n\tfmt.Sscanf(signature, \"sha1=%s\", &messageMACString)\n\tmessageMAC, err := hex.DecodeString(messageMACString)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tmac := hmac.New(sha1.New, []byte(key))\n\tmac.Write(message)\n\texpectedMAC := mac.Sum(nil)\n\treturn hmac.Equal(messageMAC, expectedMAC), nil\n}\n<commit_msg>Clean up logging\/response messages a bit<commit_after>package main\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/sha1\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/google\/go-github\/github\"\n\t\"golang.org\/x\/oauth2\"\n)\n\ntype IssueComment struct {\n\tIssueNumber int\n\tComment string\n\tIsPullRequest bool\n\tRepositoryOwner string\n\tRepositoryName string\n}\n\nfunc main() {\n\tconf := NewConfig()\n\tgithubClient := initGithubClient(conf.AccessToken)\n\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to read the request's body\")\n\t\t\thttp.Error(w, \"\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tsignature := r.Header.Get(\"X-Hub-Signature\")\n\t\thasSecret, err := hasSecret(body, signature, conf.Secret)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to check the signature\")\n\t\t\thttp.Error(w, \"Failed to check the signature\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tif !hasSecret {\n\t\t\tlog.Println(\"Bad X-Hub-Signature\")\n\t\t\thttp.Error(w, \"Bad X-Hub-Signature\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\teventType := r.Header.Get(\"X-Github-Event\")\n\t\tif eventType != \"issue_comment\" {\n\t\t\tw.Write([]byte(\"Not an event I understand. Ignoring.\"))\n\t\t\treturn\n\t\t}\n\t\tissueComment, err := parseIssueComment(body)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to parse the requests body\")\n\t\t\thttp.Error(w, \"\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tif !issueComment.IsPullRequest {\n\t\t\tw.Write([]byte(\"Not a PR. Ignoring.\"))\n\t\t\treturn\n\t\t}\n\t\tif issueComment.Comment != \"!squash\" {\n\t\t\tw.Write([]byte(\"Not a command I understand. Ignoring.\"))\n\t\t\treturn\n\t\t}\n\t\tpr, _, err := githubClient.PullRequests.Get(issueComment.RepositoryOwner, issueComment.RepositoryName, issueComment.IssueNumber)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Getting PR %s\/%s#%d failed: %s\\n\", issueComment.RepositoryOwner, issueComment.RepositoryName, issueComment.IssueNumber, err.Error())\n\t\t\thttp.Error(w, \"\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tlog.Printf(\"Squashing %s that's going to be merged into %s\", *pr.Head.Ref, *pr.Base.Ref)\n\t})\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", conf.Port), nil))\n}\n\nfunc initGithubClient(accessToken string) *github.Client {\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: accessToken},\n\t)\n\ttc := oauth2.NewClient(oauth2.NoContext, ts)\n\treturn github.NewClient(tc)\n}\n\nfunc parseIssueComment(body []byte) (IssueComment, error) {\n\tvar message struct {\n\t\tIssue struct {\n\t\t\tNumber int `json:\"Number\"`\n\t\t\tPullRequest struct {\n\t\t\t\tURL string `json:\"url\"`\n\t\t\t} `json:\"pull_request\"`\n\t\t} `json:\"issue\"`\n\t\tRepository struct {\n\t\t\tName string `json:\"name\"`\n\t\t\tOwner struct {\n\t\t\t\tLogin string `json:\"login\"`\n\t\t\t} `json:\"owner\"`\n\t\t} `json:\"repository\"`\n\t\tComment struct {\n\t\t\tBody string `json:\"body\"`\n\t\t} `json:\"comment\"`\n\t}\n\terr := json.Unmarshal(body, &message)\n\tif err != nil {\n\t\treturn IssueComment{}, err\n\t}\n\treturn IssueComment{\n\t\tIssueNumber: message.Issue.Number,\n\t\tComment: message.Comment.Body,\n\t\tIsPullRequest: message.Issue.PullRequest.URL != \"\",\n\t\tRepositoryOwner: message.Repository.Owner.Login,\n\t\tRepositoryName: message.Repository.Name,\n\t}, nil\n}\n\nfunc hasSecret(message []byte, signature, key string) (bool, error) {\n\tvar messageMACString string\n\tfmt.Sscanf(signature, \"sha1=%s\", &messageMACString)\n\tmessageMAC, err := hex.DecodeString(messageMACString)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tmac := hmac.New(sha1.New, []byte(key))\n\tmac.Write(message)\n\texpectedMAC := mac.Sum(nil)\n\treturn hmac.Equal(messageMAC, expectedMAC), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/dfordsoft\/golib\/ebook\"\n\tflags \"github.com\/jessevdk\/go-flags\"\n)\n\n\/\/ Options for all command line options\ntype Options struct {\n\tListenAndServe string `long:\"httpServe\" description:\"set http listen and serve address, example: :8080\"`\n\tFormat string `short:\"f\" long:\"format\" description:\"set generated file format, candidate values: mobi, epub, pdf\"`\n\tList bool `short:\"l\" long:\"list\" description:\"list supported novel websites\"`\n\tLeftMargin float64 `long:\"leftMargin\" description:\"set left margin for PDF format\"`\n\tTopMargin float64 `long:\"topMargin\" description:\"set top margin for PDF format\"`\n\tPageType string `short:\"p\" long:\"pageType\" description:\"set page type for PDF format, candidate values: a0, a1, a2, a3, a4, a5, a6, b0, b1, b2, b3, b4, b5, b6, c0, c1, c2, c3, c4, c5, c6, dxg(=a4), 6inch(90mm x 117mm), 7inch, 10inch(=a4), pc(=a4 & 25.4mm left margin & 31.7mm top margin & 16 point title font size & 12 point content font size)\"`\n\tTitleFontSize int `long:\"titleFontSize\" description:\"set title font point size for PDF format\"`\n\tContentFontSize int `long:\"contentFontSize\" description:\"set content font point size for PDF format\"`\n\tLineSpacing float64 `long:\"lineSpacing\" description:\"set line spacing rate for PDF format\"`\n\tPagesPerFile int `long:\"pagesPerFile\" description:\"split the big single PDF file to several smaller PDF files, how many pages should be included in a file, 0 means don't split\"`\n\tChaptersPerFile int `long:\"chaptersPerFile\" description:\"split the big signle PDF file to several smaller PDF files, how many chapters should be included in a file, 0 means don't split\"`\n\tFontFile string `long:\"fontFile\" description:\"set TTF font file path\"`\n\tRetryCount int `short:\"r\" long:\"retries\" description:\"download retry count\"`\n\tTimeout int `short:\"t\" long:\"timeout\" description:\"download timeout seconds\"`\n\tParallelCount int64 `long:\"parallel\" description:\"parallel count for downloading\"`\n\tConfigFile string `short:\"c\" long:\"config\" description:\"read configurations from local file\"`\n\tOutputFile string `short:\"o\" long:\"output\" description:\"output file path\"`\n\tFromChapter int `long:\"fromChapter\" description:\"from chapter\"`\n\tFromTitle string `long:\"fromTitle\" description:\"from title\"`\n\tToChapter int `long:\"toChapter\" description:\"to chapter\"`\n\tToTitle string `long:\"toTitle\" description:\"to title\"`\n}\n\ntype tocPattern struct {\n\thost string\n\tbookTitle string\n\tbookTitlePos int\n\titem string\n\tarticleTitlePos int\n\tarticleURLPos int\n\tisAbsoluteURL bool\n}\n\ntype pageContentMarker struct {\n\thost string\n\tstart []byte\n\tend []byte\n}\n\ntype novelSiteHandler struct {\n\tTitle string\n\tMatchPatterns []string\n\tDownload func(string, ebook.IBook)\n}\n\nvar (\n\tnovelSiteHandlers []*novelSiteHandler\n\topts Options\n\tsha1ver string \/\/ sha1 revision used to build the program\n\tbuildTime string \/\/ when the executable was built\n)\n\nfunc registerNovelSiteHandler(h *novelSiteHandler) {\n\tnovelSiteHandlers = append(novelSiteHandlers, h)\n}\n\nfunc listCommandHandler() {\n\tfmt.Println(\"支持小说网站:\")\n\tfor _, h := range novelSiteHandlers {\n\t\turlMap := make(map[string]struct{})\n\t\tfor _, p := range h.MatchPatterns {\n\t\t\tu := strings.Replace(p, `\\`, ``, -1)\n\t\t\tidxStart := strings.Index(u, `www.`)\n\t\t\tidxEnd := strings.Index(u[idxStart:], `\/`)\n\t\t\tu = u[:idxStart+idxEnd]\n\t\t\turlMap[u] = struct{}{}\n\t\t}\n\t\tvar urls []string\n\t\tfor u := range urlMap {\n\t\t\turls = append(urls, u)\n\t\t}\n\t\tfmt.Println(\"\\t\" + h.Title + \": \" + strings.Join(urls, \", \"))\n\t}\n\tfor _, h := range novelSiteConfigurations {\n\t\turlMap := make(map[string]struct{})\n\t\tfor _, p := range h.Sites {\n\t\t\tu := strings.Replace(p.TOCURLPattern, `\\`, ``, -1)\n\t\t\tidxStart := strings.Index(u, `www.`)\n\t\t\tidxEnd := strings.Index(u[idxStart:], `\/`)\n\t\t\tu = u[:idxStart+idxEnd]\n\t\t\turlMap[u] = struct{}{}\n\t\t}\n\t\tvar urls []string\n\t\tfor u := range urlMap {\n\t\t\turls = append(urls, u)\n\t\t}\n\t\tfmt.Println(\"\\t\" + h.Title + \": \" + strings.Join(urls, \", \"))\n\t}\n}\n\nfunc readConfigFile(opts *Options) bool {\n\tif opts.ConfigFile != \"\" {\n\t\tcontentFd, err := os.OpenFile(opts.ConfigFile, os.O_RDONLY, 0644)\n\t\tif err != nil {\n\t\t\tlog.Println(\"opening config file \", opts.ConfigFile, \" for reading failed \", err)\n\t\t\treturn false\n\t\t}\n\n\t\tcontentC, err := ioutil.ReadAll(contentFd)\n\t\tcontentFd.Close()\n\t\tif err != nil {\n\t\t\tlog.Println(\"reading config file \", opts.ConfigFile, \" failed \", err)\n\t\t\treturn false\n\t\t}\n\n\t\tvar options map[string]interface{}\n\t\tif err = json.Unmarshal(contentC, &options); err != nil {\n\t\t\tlog.Println(\"unmarshall configurations failed\", err)\n\t\t\treturn false\n\t\t}\n\n\t\tif f, ok := options[\"format\"]; ok {\n\t\t\tif v := f.(string); len(v) > 0 {\n\t\t\t\topts.Format = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"pageType\"]; ok {\n\t\t\tif v := f.(string); len(v) > 0 {\n\t\t\t\topts.PageType = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"fontFile\"]; ok {\n\t\t\tif v := f.(string); len(v) > 0 {\n\t\t\t\topts.FontFile = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"fromChapter\"]; ok {\n\t\t\tif v := f.(int); v > 0 {\n\t\t\t\topts.FromChapter = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"fromTitle\"]; ok {\n\t\t\tif v := f.(string); len(v) > 0 {\n\t\t\t\topts.FromTitle = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"toChapter\"]; ok {\n\t\t\tif v := f.(int); v > 0 {\n\t\t\t\topts.ToChapter = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"toTitle\"]; ok {\n\t\t\tif v := f.(string); len(v) > 0 {\n\t\t\t\topts.ToTitle = v\n\t\t\t}\n\t\t}\n\n\t\tif f, ok := options[\"leftMargin\"]; ok {\n\t\t\tif v := f.(float64); v > 0 {\n\t\t\t\topts.LeftMargin = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"topMargin\"]; ok {\n\t\t\tif v := f.(float64); v > 0 {\n\t\t\t\topts.TopMargin = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"lineSpacing\"]; ok {\n\t\t\tif v := f.(float64); v > 0 {\n\t\t\t\topts.LineSpacing = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"titleFontSize\"]; ok {\n\t\t\tif v := f.(int); v > 0 {\n\t\t\t\topts.TitleFontSize = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"contentFontSize\"]; ok {\n\t\t\tif v := f.(int); v > 0 {\n\t\t\t\topts.ContentFontSize = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"pagesPerFile\"]; ok {\n\t\t\tif v := f.(int); v > 0 {\n\t\t\t\topts.PagesPerFile = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"chaptersPerFile\"]; ok {\n\t\t\tif v := f.(int); v > 0 {\n\t\t\t\topts.ChaptersPerFile = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"retries\"]; ok {\n\t\t\tif v := f.(int); v > 0 {\n\t\t\t\topts.RetryCount = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"timeout\"]; ok {\n\t\t\tif v := f.(int); v > 0 {\n\t\t\t\topts.Timeout = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"parallel\"]; ok {\n\t\t\tif v := f.(int64); v > 0 {\n\t\t\t\topts.ParallelCount = v\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\nfunc downloadBook(novelURL string, ch chan bool) {\n\tfor _, h := range novelSiteHandlers {\n\t\tfor _, pattern := range h.MatchPatterns {\n\t\t\tr, _ := regexp.Compile(pattern)\n\t\t\tif r.MatchString(novelURL) {\n\t\t\t\tgen := ebook.NewBook(opts.Format)\n\t\t\t\tgen.SetFontSize(opts.TitleFontSize, opts.ContentFontSize)\n\t\t\t\tgen.SetLineSpacing(opts.LineSpacing)\n\t\t\t\tgen.PagesPerFile(opts.PagesPerFile)\n\t\t\t\tgen.ChaptersPerFile(opts.ChaptersPerFile)\n\t\t\t\tgen.SetMargins(opts.LeftMargin, opts.TopMargin)\n\t\t\t\tgen.SetPageType(opts.PageType)\n\t\t\t\tgen.SetFontFile(opts.FontFile)\n\t\t\t\tgen.Output(opts.OutputFile)\n\t\t\t\tgen.Info()\n\t\t\t\th.Download(novelURL, gen)\n\t\t\t\tfmt.Println(\"downloaded\", novelURL)\n\t\t\t\tch <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tfor _, h := range novelSiteConfigurations {\n\t\tfor _, site := range h.Sites {\n\t\t\tr, _ := regexp.Compile(site.TOCURLPattern)\n\t\t\tif r.MatchString(novelURL) {\n\t\t\t\tgen := ebook.NewBook(opts.Format)\n\t\t\t\tgen.SetFontSize(opts.TitleFontSize, opts.ContentFontSize)\n\t\t\t\tgen.SetLineSpacing(opts.LineSpacing)\n\t\t\t\tgen.PagesPerFile(opts.PagesPerFile)\n\t\t\t\tgen.ChaptersPerFile(opts.ChaptersPerFile)\n\t\t\t\tgen.SetMargins(opts.LeftMargin, opts.TopMargin)\n\t\t\t\tgen.SetPageType(opts.PageType)\n\t\t\t\tgen.SetFontFile(opts.FontFile)\n\t\t\t\tgen.Output(opts.OutputFile)\n\t\t\t\tgen.Info()\n\t\t\t\th.Download(novelURL, gen)\n\t\t\t\tfmt.Println(\"downloaded\", novelURL)\n\t\t\t\tch <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Println(\"not downloaded\", novelURL)\n\tch <- false\n}\n\nfunc main() {\n\tfmt.Println(\"getnovel SHA1:\", sha1ver, \"build at\", buildTime)\n\tif len(os.Args) < 2 {\n\t\tfmt.Println(\"使用方法:\\n\\tgetnovel 小说目录网址\")\n\t\tlistCommandHandler()\n\t\treturn\n\t}\n\n\topts = Options{\n\t\tFormat: \"mobi\",\n\t\tList: false,\n\t\tLeftMargin: 10,\n\t\tTopMargin: 10,\n\t\tPageType: \"a4\",\n\t\tTitleFontSize: 24,\n\t\tContentFontSize: 18,\n\t\tLineSpacing: 1.2,\n\t\tPagesPerFile: 0,\n\t\tChaptersPerFile: 0,\n\t\tFontFile: filepath.Join(\"fonts\", \"CustomFont.ttf\"),\n\t\tRetryCount: 3,\n\t\tTimeout: 60,\n\t\tParallelCount: 10,\n\t}\n\n\targs, err := flags.Parse(&opts)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif opts.List {\n\t\tlistCommandHandler()\n\t\treturn\n\t}\n\n\tif opts.ListenAndServe != \"\" {\n\t\tdir, err := filepath.Abs(filepath.Dir(os.Args[0]))\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tifaces, err := net.Interfaces()\n\t\tvar ips []string\n\t\tfor _, i := range ifaces {\n\n\t\t\taddrs, err := i.Addrs()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, addr := range addrs {\n\t\t\t\tswitch v := addr.(type) {\n\t\t\t\tcase *net.IPNet:\n\t\t\t\t\tif v.IP.IsLoopback() || v.IP.IsLinkLocalMulticast() || v.IP.IsLinkLocalUnicast() {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tips = append(ips, \"\\t\"+v.IP.String())\n\t\t\t\tcase *net.IPAddr:\n\t\t\t\t\tif v.IP.IsLoopback() || v.IP.IsLinkLocalMulticast() || v.IP.IsLinkLocalUnicast() {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tips = append(ips, \"\\t\"+v.IP.String())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfmt.Println(\"Local IP:\")\n\t\tfmt.Println(strings.Join(ips, \"\\n\"))\n\t\tfmt.Println(\"starting http server on\", opts.ListenAndServe)\n\t\tlog.Fatal(http.ListenAndServe(opts.ListenAndServe, http.FileServer(http.Dir(dir))))\n\t\treturn\n\t}\n\n\treadNovelSiteConfigurations()\n\n\tif !readConfigFile(&opts) {\n\t\treturn\n\t}\n\n\tdownloadedChannel := make(chan bool)\n\tdonwloadCount := 0\n\tfor _, novelURL := range args {\n\t\t_, e := url.Parse(novelURL)\n\t\tif e != nil {\n\t\t\tfmt.Println(\"invalid URL\", novelURL)\n\t\t\tcontinue\n\t\t}\n\t\tdonwloadCount++\n\t\tgo downloadBook(novelURL, downloadedChannel)\n\t}\n\n\tdownloaded := false\n\tfor i := 0; i < donwloadCount; i++ {\n\t\tch := <-downloadedChannel\n\t\tdownloaded = (downloaded || ch)\n\t}\n\n\tif !downloaded {\n\t\tfmt.Println(\"使用方法:\\n\\tgetnovel 小说目录网址\")\n\t\tlistCommandHandler()\n\t}\n}\n<commit_msg>(+)add mobile page type<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/dfordsoft\/golib\/ebook\"\n\tflags \"github.com\/jessevdk\/go-flags\"\n)\n\n\/\/ Options for all command line options\ntype Options struct {\n\tListenAndServe string `long:\"httpServe\" description:\"set http listen and serve address, example: :8080\"`\n\tFormat string `short:\"f\" long:\"format\" description:\"set generated file format, candidate values: mobi, epub, pdf\"`\n\tList bool `short:\"l\" long:\"list\" description:\"list supported novel websites\"`\n\tLeftMargin float64 `long:\"leftMargin\" description:\"set left margin for PDF format\"`\n\tTopMargin float64 `long:\"topMargin\" description:\"set top margin for PDF format\"`\n\tPageType string `short:\"p\" long:\"pageType\" description:\"set page type for PDF format, candidate values: a0, a1, a2, a3, a4, a5, a6, b0, b1, b2, b3, b4, b5, b6, c0, c1, c2, c3, c4, c5, c6, dxg(=a4), 6inch(90mm x 117mm), 7inch, 10inch(=a4), mobile(=a4 & 32 point title font size & 28 point content font size), pc(=a4 & 25.4mm left margin & 31.7mm top margin & 16 point title font size & 12 point content font size)\"`\n\tTitleFontSize int `long:\"titleFontSize\" description:\"set title font point size for PDF format\"`\n\tContentFontSize int `long:\"contentFontSize\" description:\"set content font point size for PDF format\"`\n\tLineSpacing float64 `long:\"lineSpacing\" description:\"set line spacing rate for PDF format\"`\n\tPagesPerFile int `long:\"pagesPerFile\" description:\"split the big single PDF file to several smaller PDF files, how many pages should be included in a file, 0 means don't split\"`\n\tChaptersPerFile int `long:\"chaptersPerFile\" description:\"split the big signle PDF file to several smaller PDF files, how many chapters should be included in a file, 0 means don't split\"`\n\tFontFile string `long:\"fontFile\" description:\"set TTF font file path\"`\n\tRetryCount int `short:\"r\" long:\"retries\" description:\"download retry count\"`\n\tTimeout int `short:\"t\" long:\"timeout\" description:\"download timeout seconds\"`\n\tParallelCount int64 `long:\"parallel\" description:\"parallel count for downloading\"`\n\tConfigFile string `short:\"c\" long:\"config\" description:\"read configurations from local file\"`\n\tOutputFile string `short:\"o\" long:\"output\" description:\"output file path\"`\n\tFromChapter int `long:\"fromChapter\" description:\"from chapter\"`\n\tFromTitle string `long:\"fromTitle\" description:\"from title\"`\n\tToChapter int `long:\"toChapter\" description:\"to chapter\"`\n\tToTitle string `long:\"toTitle\" description:\"to title\"`\n}\n\ntype tocPattern struct {\n\thost string\n\tbookTitle string\n\tbookTitlePos int\n\titem string\n\tarticleTitlePos int\n\tarticleURLPos int\n\tisAbsoluteURL bool\n}\n\ntype pageContentMarker struct {\n\thost string\n\tstart []byte\n\tend []byte\n}\n\ntype novelSiteHandler struct {\n\tTitle string\n\tMatchPatterns []string\n\tDownload func(string, ebook.IBook)\n}\n\nvar (\n\tnovelSiteHandlers []*novelSiteHandler\n\topts Options\n\tsha1ver string \/\/ sha1 revision used to build the program\n\tbuildTime string \/\/ when the executable was built\n)\n\nfunc registerNovelSiteHandler(h *novelSiteHandler) {\n\tnovelSiteHandlers = append(novelSiteHandlers, h)\n}\n\nfunc listCommandHandler() {\n\tfmt.Println(\"支持小说网站:\")\n\tfor _, h := range novelSiteHandlers {\n\t\turlMap := make(map[string]struct{})\n\t\tfor _, p := range h.MatchPatterns {\n\t\t\tu := strings.Replace(p, `\\`, ``, -1)\n\t\t\tidxStart := strings.Index(u, `www.`)\n\t\t\tidxEnd := strings.Index(u[idxStart:], `\/`)\n\t\t\tu = u[:idxStart+idxEnd]\n\t\t\turlMap[u] = struct{}{}\n\t\t}\n\t\tvar urls []string\n\t\tfor u := range urlMap {\n\t\t\turls = append(urls, u)\n\t\t}\n\t\tfmt.Println(\"\\t\" + h.Title + \": \" + strings.Join(urls, \", \"))\n\t}\n\tfor _, h := range novelSiteConfigurations {\n\t\turlMap := make(map[string]struct{})\n\t\tfor _, p := range h.Sites {\n\t\t\tu := strings.Replace(p.TOCURLPattern, `\\`, ``, -1)\n\t\t\tidxStart := strings.Index(u, `www.`)\n\t\t\tidxEnd := strings.Index(u[idxStart:], `\/`)\n\t\t\tu = u[:idxStart+idxEnd]\n\t\t\turlMap[u] = struct{}{}\n\t\t}\n\t\tvar urls []string\n\t\tfor u := range urlMap {\n\t\t\turls = append(urls, u)\n\t\t}\n\t\tfmt.Println(\"\\t\" + h.Title + \": \" + strings.Join(urls, \", \"))\n\t}\n}\n\nfunc readConfigFile(opts *Options) bool {\n\tif opts.ConfigFile != \"\" {\n\t\tcontentFd, err := os.OpenFile(opts.ConfigFile, os.O_RDONLY, 0644)\n\t\tif err != nil {\n\t\t\tlog.Println(\"opening config file \", opts.ConfigFile, \" for reading failed \", err)\n\t\t\treturn false\n\t\t}\n\n\t\tcontentC, err := ioutil.ReadAll(contentFd)\n\t\tcontentFd.Close()\n\t\tif err != nil {\n\t\t\tlog.Println(\"reading config file \", opts.ConfigFile, \" failed \", err)\n\t\t\treturn false\n\t\t}\n\n\t\tvar options map[string]interface{}\n\t\tif err = json.Unmarshal(contentC, &options); err != nil {\n\t\t\tlog.Println(\"unmarshall configurations failed\", err)\n\t\t\treturn false\n\t\t}\n\n\t\tif f, ok := options[\"format\"]; ok {\n\t\t\tif v := f.(string); len(v) > 0 {\n\t\t\t\topts.Format = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"pageType\"]; ok {\n\t\t\tif v := f.(string); len(v) > 0 {\n\t\t\t\topts.PageType = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"fontFile\"]; ok {\n\t\t\tif v := f.(string); len(v) > 0 {\n\t\t\t\topts.FontFile = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"fromChapter\"]; ok {\n\t\t\tif v := f.(int); v > 0 {\n\t\t\t\topts.FromChapter = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"fromTitle\"]; ok {\n\t\t\tif v := f.(string); len(v) > 0 {\n\t\t\t\topts.FromTitle = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"toChapter\"]; ok {\n\t\t\tif v := f.(int); v > 0 {\n\t\t\t\topts.ToChapter = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"toTitle\"]; ok {\n\t\t\tif v := f.(string); len(v) > 0 {\n\t\t\t\topts.ToTitle = v\n\t\t\t}\n\t\t}\n\n\t\tif f, ok := options[\"leftMargin\"]; ok {\n\t\t\tif v := f.(float64); v > 0 {\n\t\t\t\topts.LeftMargin = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"topMargin\"]; ok {\n\t\t\tif v := f.(float64); v > 0 {\n\t\t\t\topts.TopMargin = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"lineSpacing\"]; ok {\n\t\t\tif v := f.(float64); v > 0 {\n\t\t\t\topts.LineSpacing = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"titleFontSize\"]; ok {\n\t\t\tif v := f.(int); v > 0 {\n\t\t\t\topts.TitleFontSize = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"contentFontSize\"]; ok {\n\t\t\tif v := f.(int); v > 0 {\n\t\t\t\topts.ContentFontSize = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"pagesPerFile\"]; ok {\n\t\t\tif v := f.(int); v > 0 {\n\t\t\t\topts.PagesPerFile = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"chaptersPerFile\"]; ok {\n\t\t\tif v := f.(int); v > 0 {\n\t\t\t\topts.ChaptersPerFile = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"retries\"]; ok {\n\t\t\tif v := f.(int); v > 0 {\n\t\t\t\topts.RetryCount = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"timeout\"]; ok {\n\t\t\tif v := f.(int); v > 0 {\n\t\t\t\topts.Timeout = v\n\t\t\t}\n\t\t}\n\t\tif f, ok := options[\"parallel\"]; ok {\n\t\t\tif v := f.(int64); v > 0 {\n\t\t\t\topts.ParallelCount = v\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\nfunc downloadBook(novelURL string, ch chan bool) {\n\tfor _, h := range novelSiteHandlers {\n\t\tfor _, pattern := range h.MatchPatterns {\n\t\t\tr, _ := regexp.Compile(pattern)\n\t\t\tif r.MatchString(novelURL) {\n\t\t\t\tgen := ebook.NewBook(opts.Format)\n\t\t\t\tgen.SetFontSize(opts.TitleFontSize, opts.ContentFontSize)\n\t\t\t\tgen.SetLineSpacing(opts.LineSpacing)\n\t\t\t\tgen.PagesPerFile(opts.PagesPerFile)\n\t\t\t\tgen.ChaptersPerFile(opts.ChaptersPerFile)\n\t\t\t\tgen.SetMargins(opts.LeftMargin, opts.TopMargin)\n\t\t\t\tgen.SetPageType(opts.PageType)\n\t\t\t\tgen.SetFontFile(opts.FontFile)\n\t\t\t\tgen.Output(opts.OutputFile)\n\t\t\t\tgen.Info()\n\t\t\t\th.Download(novelURL, gen)\n\t\t\t\tfmt.Println(\"downloaded\", novelURL)\n\t\t\t\tch <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tfor _, h := range novelSiteConfigurations {\n\t\tfor _, site := range h.Sites {\n\t\t\tr, _ := regexp.Compile(site.TOCURLPattern)\n\t\t\tif r.MatchString(novelURL) {\n\t\t\t\tgen := ebook.NewBook(opts.Format)\n\t\t\t\tgen.SetFontSize(opts.TitleFontSize, opts.ContentFontSize)\n\t\t\t\tgen.SetLineSpacing(opts.LineSpacing)\n\t\t\t\tgen.PagesPerFile(opts.PagesPerFile)\n\t\t\t\tgen.ChaptersPerFile(opts.ChaptersPerFile)\n\t\t\t\tgen.SetMargins(opts.LeftMargin, opts.TopMargin)\n\t\t\t\tgen.SetPageType(opts.PageType)\n\t\t\t\tgen.SetFontFile(opts.FontFile)\n\t\t\t\tgen.Output(opts.OutputFile)\n\t\t\t\tgen.Info()\n\t\t\t\th.Download(novelURL, gen)\n\t\t\t\tfmt.Println(\"downloaded\", novelURL)\n\t\t\t\tch <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Println(\"not downloaded\", novelURL)\n\tch <- false\n}\n\nfunc main() {\n\tfmt.Println(\"getnovel SHA1:\", sha1ver, \"build at\", buildTime)\n\tif len(os.Args) < 2 {\n\t\tfmt.Println(\"使用方法:\\n\\tgetnovel 小说目录网址\")\n\t\tlistCommandHandler()\n\t\treturn\n\t}\n\n\topts = Options{\n\t\tFormat: \"mobi\",\n\t\tList: false,\n\t\tLeftMargin: 10,\n\t\tTopMargin: 10,\n\t\tPageType: \"a4\",\n\t\tTitleFontSize: 24,\n\t\tContentFontSize: 18,\n\t\tLineSpacing: 1.2,\n\t\tPagesPerFile: 0,\n\t\tChaptersPerFile: 0,\n\t\tFontFile: filepath.Join(\"fonts\", \"CustomFont.ttf\"),\n\t\tRetryCount: 3,\n\t\tTimeout: 60,\n\t\tParallelCount: 10,\n\t}\n\n\targs, err := flags.Parse(&opts)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif opts.List {\n\t\tlistCommandHandler()\n\t\treturn\n\t}\n\n\tif opts.ListenAndServe != \"\" {\n\t\tdir, err := filepath.Abs(filepath.Dir(os.Args[0]))\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tifaces, err := net.Interfaces()\n\t\tvar ips []string\n\t\tfor _, i := range ifaces {\n\n\t\t\taddrs, err := i.Addrs()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, addr := range addrs {\n\t\t\t\tswitch v := addr.(type) {\n\t\t\t\tcase *net.IPNet:\n\t\t\t\t\tif v.IP.IsLoopback() || v.IP.IsLinkLocalMulticast() || v.IP.IsLinkLocalUnicast() {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tips = append(ips, \"\\t\"+v.IP.String())\n\t\t\t\tcase *net.IPAddr:\n\t\t\t\t\tif v.IP.IsLoopback() || v.IP.IsLinkLocalMulticast() || v.IP.IsLinkLocalUnicast() {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tips = append(ips, \"\\t\"+v.IP.String())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfmt.Println(\"Local IP:\")\n\t\tfmt.Println(strings.Join(ips, \"\\n\"))\n\t\tfmt.Println(\"starting http server on\", opts.ListenAndServe)\n\t\tlog.Fatal(http.ListenAndServe(opts.ListenAndServe, http.FileServer(http.Dir(dir))))\n\t\treturn\n\t}\n\n\treadNovelSiteConfigurations()\n\n\tif !readConfigFile(&opts) {\n\t\treturn\n\t}\n\n\tdownloadedChannel := make(chan bool)\n\tdonwloadCount := 0\n\tfor _, novelURL := range args {\n\t\t_, e := url.Parse(novelURL)\n\t\tif e != nil {\n\t\t\tfmt.Println(\"invalid URL\", novelURL)\n\t\t\tcontinue\n\t\t}\n\t\tdonwloadCount++\n\t\tgo downloadBook(novelURL, downloadedChannel)\n\t}\n\n\tdownloaded := false\n\tfor i := 0; i < donwloadCount; i++ {\n\t\tch := <-downloadedChannel\n\t\tdownloaded = (downloaded || ch)\n\t}\n\n\tif !downloaded {\n\t\tfmt.Println(\"使用方法:\\n\\tgetnovel 小说目录网址\")\n\t\tlistCommandHandler()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\/debug\"\n\t\"time\"\n)\n\nconst version = \"1.1.7\"\n\nfunc main() {\n\t\/\/ Force garbage collection\n\tgo func() {\n\t\tfor _ = range time.Tick(10 * time.Second) {\n\t\t\tdebug.FreeOSMemory()\n\t\t}\n\t}()\n\n\ts := startServer()\n\n\tstop := make(chan os.Signal, 1)\n\tsignal.Notify(stop, os.Interrupt, os.Kill)\n\n\t<-stop\n\n\tshutdownServer(s)\n\tshutdownVips()\n}\n<commit_msg>Fix linting error<commit_after>package main\n\nimport (\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\/debug\"\n\t\"time\"\n)\n\nconst version = \"1.1.7\"\n\nfunc main() {\n\t\/\/ Force garbage collection\n\tgo func() {\n\t\tfor range time.Tick(10 * time.Second) {\n\t\t\tdebug.FreeOSMemory()\n\t\t}\n\t}()\n\n\ts := startServer()\n\n\tstop := make(chan os.Signal, 1)\n\tsignal.Notify(stop, os.Interrupt, os.Kill)\n\n\t<-stop\n\n\tshutdownServer(s)\n\tshutdownVips()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"time\"\n)\n\nconst VERSION = \"0.4.1\"\n\nvar signalchan chan os.Signal\n\ntype Packet struct {\n\tBucket string\n\tValue int\n\tModifier string\n\tSampling float32\n}\n\ntype Percentiles []int\n\nfunc (a *Percentiles) Set(s string) error {\n\ti, err := strconv.Atoi(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*a = append(*a, i)\n\treturn nil\n}\nfunc (a *Percentiles) String() string {\n\treturn fmt.Sprintf(\"%v\", *a)\n}\n\nvar (\n\tserviceAddress = flag.String(\"address\", \":8125\", \"UDP service address\")\n\tgraphiteAddress = flag.String(\"graphite\", \"127.0.0.1:2003\", \"Graphite service address (or - to disable)\")\n\tflushInterval = flag.Int64(\"flush-interval\", 10, \"Flush interval (seconds)\")\n\tshowVersion = flag.Bool(\"version\", false, \"print version string\")\n\tpercentThreshold = Percentiles{}\n)\n\nfunc init() {\n\tflag.Var(&percentThreshold, \"percent-threshold\", \"Threshold percent (may be given multiple times)\")\n}\n\nvar (\n\tIn = make(chan *Packet, 1000)\n\tcounters = make(map[string]int)\n\tgauges = make(map[string]int)\n\ttimers = make(map[string][]int)\n)\n\nfunc monitor() {\n\tticker := time.NewTicker(time.Duration(*flushInterval) * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase sig := <-signalchan:\n\t\t\tfmt.Printf(\"!! Caught signal %d... shutting down\\n\", sig)\n\t\t\tsubmit()\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tsubmit()\n\t\tcase s := <-In:\n\t\t\tif s.Modifier == \"ms\" {\n\t\t\t\t_, ok := timers[s.Bucket]\n\t\t\t\tif !ok {\n\t\t\t\t\tvar t []int\n\t\t\t\t\ttimers[s.Bucket] = t\n\t\t\t\t}\n\t\t\t\ttimers[s.Bucket] = append(timers[s.Bucket], s.Value)\n\t\t\t} else if s.Modifier == \"g\" {\n\t\t\t\tgauges[s.Bucket] = int(s.Value)\n\t\t\t} else {\n\t\t\t\tv, ok := counters[s.Bucket]\n\t\t\t\tif !ok || v == -1 {\n\t\t\t\t\tcounters[s.Bucket] = 0\n\t\t\t\t}\n\t\t\t\tcounters[s.Bucket] += int(float32(s.Value) * (1 \/ s.Sampling))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc submit() {\n\tclient, err := net.Dial(\"tcp\", *graphiteAddress)\n\tif err != nil {\n\t\tlog.Printf(\"Error dialing\", err.Error())\n\t\treturn\n\t}\n\tdefer client.Close()\n\n\tnumStats := 0\n\tnow := time.Now().Unix()\n\tbuffer := bytes.NewBufferString(\"\")\n\tfor s, c := range counters {\n\t\tif c == -1 {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(buffer, \"%s %d %d\\n\", s, c, now)\n\t\tcounters[s] = -1\n\t\tnumStats++\n\t}\n\n\tfor g, c := range gauges {\n\t\tif c == -1 {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(buffer, \"%s %d %d\\n\", g, c, now)\n\t\tgauges[g] = -1\n\t\tnumStats++\n\t}\n\n\tfor u, t := range timers {\n\t\tif len(t) > 0 {\n\t\t\tnumStats++\n\t\t\tsort.Ints(t)\n\t\t\tmin := t[0]\n\t\t\tmax := t[len(t)-1]\n\t\t\tmean := t[len(t)\/2]\n\t\t\tmaxAtThreshold := max\n\t\t\tcount := len(t)\n\n\t\t\tfor _, pct := range percentThreshold {\n\n\t\t\t\tif len(t) > 1 {\n\t\t\t\t\tvar thresholdIndex int\n\t\t\t\t\tthresholdIndex = ((100 - pct) \/ 100) * count\n\t\t\t\t\tnumInThreshold := count - thresholdIndex\n\t\t\t\t\tmaxAtThreshold = t[numInThreshold-1]\n\t\t\t\t}\n\n\t\t\t\tfmt.Fprintf(buffer, \"%s.upper_%d %d %d\\n\", u, pct, maxAtThreshold, now)\n\t\t\t}\n\n\t\t\tvar z []int\n\t\t\ttimers[u] = z\n\n\t\t\tfmt.Fprintf(buffer, \"%s.mean %d %d\\n\", u, mean, now)\n\t\t\tfmt.Fprintf(buffer, \"%s.upper %d %d\\n\", u, max, now)\n\t\t\tfmt.Fprintf(buffer, \"%s.lower %d %d\\n\", u, min, now)\n\t\t\tfmt.Fprintf(buffer, \"%s.count %d %d\\n\", u, count, now)\n\t\t}\n\t}\n\tif numStats == 0 {\n\t\treturn\n\t}\n\tlog.Printf(\"got %d stats\", numStats)\n\tdata := buffer.Bytes()\n\tclient.Write(data)\n\n}\n\nfunc parseMessage(buf *bytes.Buffer) []*Packet {\n\tvar packetRegexp = regexp.MustCompile(\"^([^:]+):([0-9]+)\\\\|(g|c|ms)(\\\\|@([0-9\\\\.]+))?\\n?$\")\n\n\tvar output []*Packet\n\tvar err error\n\tvar line string\n\tfor {\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tline, err = buf.ReadString('\\n')\n\t\tif line != \"\" {\n\t\t\titem := packetRegexp.FindStringSubmatch(line)\n\t\t\tif len(item) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvalue, err := strconv.Atoi(item[2])\n\t\t\tif err != nil {\n\t\t\t\t\/\/ todo print out this error\n\t\t\t\tif item[3] == \"ms\" {\n\t\t\t\t\tvalue = 0\n\t\t\t\t} else {\n\t\t\t\t\tvalue = 1\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tsampleRate, err := strconv.ParseFloat(item[5], 32)\n\t\t\tif err != nil {\n\t\t\t\tsampleRate = 1\n\t\t\t}\n\n\t\t\tpacket := &Packet{\n\t\t\t\tBucket: item[1],\n\t\t\t\tValue: value,\n\t\t\t\tModifier: item[3],\n\t\t\t\tSampling: float32(sampleRate),\n\t\t\t}\n\t\t\toutput = append(output, packet)\n\t\t}\n\t}\n\treturn output\n}\n\nfunc udpListener() {\n\taddress, _ := net.ResolveUDPAddr(\"udp\", *serviceAddress)\n\tlog.Printf(\"Listening on %s\", address)\n\tlistener, err := net.ListenUDP(\"udp\", address)\n\tif err != nil {\n\t\tlog.Fatalf(\"ListenAndServe: %s\", err.Error())\n\t}\n\tdefer listener.Close()\n\tmessage := make([]byte, 512)\n\tfor {\n\t\tn, remaddr, err := listener.ReadFrom(message)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error reading from %v %s\", remaddr, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tbuf := bytes.NewBuffer(message[0:n])\n\t\tpackets := parseMessage(buf)\n\t\tfor _, p := range packets {\n\t\t\tIn <- p\n\t\t}\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tif *showVersion {\n\t\tfmt.Printf(\"gographite v%s\\n\", VERSION)\n\t\treturn\n\t}\n\tsignalchan = make(chan os.Signal, 1)\n\tsignal.Notify(signalchan, syscall.SIGTERM)\n\n\tgo udpListener()\n\tmonitor()\n}\n<commit_msg>version bump<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"time\"\n)\n\nconst VERSION = \"0.4.2\"\n\nvar signalchan chan os.Signal\n\ntype Packet struct {\n\tBucket string\n\tValue int\n\tModifier string\n\tSampling float32\n}\n\ntype Percentiles []int\n\nfunc (a *Percentiles) Set(s string) error {\n\ti, err := strconv.Atoi(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*a = append(*a, i)\n\treturn nil\n}\nfunc (a *Percentiles) String() string {\n\treturn fmt.Sprintf(\"%v\", *a)\n}\n\nvar (\n\tserviceAddress = flag.String(\"address\", \":8125\", \"UDP service address\")\n\tgraphiteAddress = flag.String(\"graphite\", \"127.0.0.1:2003\", \"Graphite service address (or - to disable)\")\n\tflushInterval = flag.Int64(\"flush-interval\", 10, \"Flush interval (seconds)\")\n\tshowVersion = flag.Bool(\"version\", false, \"print version string\")\n\tpercentThreshold = Percentiles{}\n)\n\nfunc init() {\n\tflag.Var(&percentThreshold, \"percent-threshold\", \"Threshold percent (may be given multiple times)\")\n}\n\nvar (\n\tIn = make(chan *Packet, 1000)\n\tcounters = make(map[string]int)\n\tgauges = make(map[string]int)\n\ttimers = make(map[string][]int)\n)\n\nfunc monitor() {\n\tticker := time.NewTicker(time.Duration(*flushInterval) * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase sig := <-signalchan:\n\t\t\tfmt.Printf(\"!! Caught signal %d... shutting down\\n\", sig)\n\t\t\tsubmit()\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tsubmit()\n\t\tcase s := <-In:\n\t\t\tif s.Modifier == \"ms\" {\n\t\t\t\t_, ok := timers[s.Bucket]\n\t\t\t\tif !ok {\n\t\t\t\t\tvar t []int\n\t\t\t\t\ttimers[s.Bucket] = t\n\t\t\t\t}\n\t\t\t\ttimers[s.Bucket] = append(timers[s.Bucket], s.Value)\n\t\t\t} else if s.Modifier == \"g\" {\n\t\t\t\tgauges[s.Bucket] = int(s.Value)\n\t\t\t} else {\n\t\t\t\tv, ok := counters[s.Bucket]\n\t\t\t\tif !ok || v == -1 {\n\t\t\t\t\tcounters[s.Bucket] = 0\n\t\t\t\t}\n\t\t\t\tcounters[s.Bucket] += int(float32(s.Value) * (1 \/ s.Sampling))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc submit() {\n\tclient, err := net.Dial(\"tcp\", *graphiteAddress)\n\tif err != nil {\n\t\tlog.Printf(\"Error dialing\", err.Error())\n\t\treturn\n\t}\n\tdefer client.Close()\n\n\tnumStats := 0\n\tnow := time.Now().Unix()\n\tbuffer := bytes.NewBufferString(\"\")\n\tfor s, c := range counters {\n\t\tif c == -1 {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(buffer, \"%s %d %d\\n\", s, c, now)\n\t\tcounters[s] = -1\n\t\tnumStats++\n\t}\n\n\tfor g, c := range gauges {\n\t\tif c == -1 {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(buffer, \"%s %d %d\\n\", g, c, now)\n\t\tgauges[g] = -1\n\t\tnumStats++\n\t}\n\n\tfor u, t := range timers {\n\t\tif len(t) > 0 {\n\t\t\tnumStats++\n\t\t\tsort.Ints(t)\n\t\t\tmin := t[0]\n\t\t\tmax := t[len(t)-1]\n\t\t\tmean := t[len(t)\/2]\n\t\t\tmaxAtThreshold := max\n\t\t\tcount := len(t)\n\n\t\t\tfor _, pct := range percentThreshold {\n\n\t\t\t\tif len(t) > 1 {\n\t\t\t\t\tvar thresholdIndex int\n\t\t\t\t\tthresholdIndex = ((100 - pct) \/ 100) * count\n\t\t\t\t\tnumInThreshold := count - thresholdIndex\n\t\t\t\t\tmaxAtThreshold = t[numInThreshold-1]\n\t\t\t\t}\n\n\t\t\t\tfmt.Fprintf(buffer, \"%s.upper_%d %d %d\\n\", u, pct, maxAtThreshold, now)\n\t\t\t}\n\n\t\t\tvar z []int\n\t\t\ttimers[u] = z\n\n\t\t\tfmt.Fprintf(buffer, \"%s.mean %d %d\\n\", u, mean, now)\n\t\t\tfmt.Fprintf(buffer, \"%s.upper %d %d\\n\", u, max, now)\n\t\t\tfmt.Fprintf(buffer, \"%s.lower %d %d\\n\", u, min, now)\n\t\t\tfmt.Fprintf(buffer, \"%s.count %d %d\\n\", u, count, now)\n\t\t}\n\t}\n\tif numStats == 0 {\n\t\treturn\n\t}\n\tlog.Printf(\"got %d stats\", numStats)\n\tdata := buffer.Bytes()\n\tclient.Write(data)\n\n}\n\nfunc parseMessage(buf *bytes.Buffer) []*Packet {\n\tvar packetRegexp = regexp.MustCompile(\"^([^:]+):([0-9]+)\\\\|(g|c|ms)(\\\\|@([0-9\\\\.]+))?\\n?$\")\n\n\tvar output []*Packet\n\tvar err error\n\tvar line string\n\tfor {\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tline, err = buf.ReadString('\\n')\n\t\tif line != \"\" {\n\t\t\titem := packetRegexp.FindStringSubmatch(line)\n\t\t\tif len(item) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvalue, err := strconv.Atoi(item[2])\n\t\t\tif err != nil {\n\t\t\t\t\/\/ todo print out this error\n\t\t\t\tif item[3] == \"ms\" {\n\t\t\t\t\tvalue = 0\n\t\t\t\t} else {\n\t\t\t\t\tvalue = 1\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tsampleRate, err := strconv.ParseFloat(item[5], 32)\n\t\t\tif err != nil {\n\t\t\t\tsampleRate = 1\n\t\t\t}\n\n\t\t\tpacket := &Packet{\n\t\t\t\tBucket: item[1],\n\t\t\t\tValue: value,\n\t\t\t\tModifier: item[3],\n\t\t\t\tSampling: float32(sampleRate),\n\t\t\t}\n\t\t\toutput = append(output, packet)\n\t\t}\n\t}\n\treturn output\n}\n\nfunc udpListener() {\n\taddress, _ := net.ResolveUDPAddr(\"udp\", *serviceAddress)\n\tlog.Printf(\"Listening on %s\", address)\n\tlistener, err := net.ListenUDP(\"udp\", address)\n\tif err != nil {\n\t\tlog.Fatalf(\"ListenAndServe: %s\", err.Error())\n\t}\n\tdefer listener.Close()\n\tmessage := make([]byte, 512)\n\tfor {\n\t\tn, remaddr, err := listener.ReadFrom(message)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error reading from %v %s\", remaddr, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tbuf := bytes.NewBuffer(message[0:n])\n\t\tpackets := parseMessage(buf)\n\t\tfor _, p := range packets {\n\t\t\tIn <- p\n\t\t}\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tif *showVersion {\n\t\tfmt.Printf(\"gographite v%s\\n\", VERSION)\n\t\treturn\n\t}\n\tsignalchan = make(chan os.Signal, 1)\n\tsignal.Notify(signalchan, syscall.SIGTERM)\n\n\tgo udpListener()\n\tmonitor()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"net\/http\"\n)\n\nfunc main() {\n\thttp.HandleFunc(\"\/\", handleViolationReport)\n\thttp.ListenAndServe(\":80\", nil)\n}\nfunc handleViolationReport(w http.ResponseWriter, r *http.Request) {\n}\n<commit_msg>Define `CSPReport` struct<commit_after>package main\n\nimport (\n\t\"net\/http\"\n)\n\ntype CSPReport struct {\n\tBody struct {\n\t\tDocumentURI string `json:\"document-uri\"`\n\t\tReferrer string `json:\"referrer\"`\n\t\tBlockedURI string `json:\"blocked-uri\"`\n\t\tViolatedDirective string `json:\"violated-directive\"`\n\t\tEffectiveDirective string `json:\"effective-directive\"`\n\t\tOriginalPolicy string `json:\"original-policy\"`\n\t} `json:\"csp-report\"`\n}\n\nfunc main() {\n\thttp.HandleFunc(\"\/\", handleViolationReport)\n\thttp.ListenAndServe(\":80\", nil)\n}\nfunc handleViolationReport(w http.ResponseWriter, r *http.Request) {\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Eleme Inc. All rights reserved.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"github.com\/eleme\/banshee\/alerter\"\n\t\"github.com\/eleme\/banshee\/cleaner\"\n\t\"github.com\/eleme\/banshee\/config\"\n\t\"github.com\/eleme\/banshee\/detector\"\n\t\"github.com\/eleme\/banshee\/filter\"\n\t\"github.com\/eleme\/banshee\/storage\"\n\t\"github.com\/eleme\/banshee\/util\/log\"\n\t\"github.com\/eleme\/banshee\/version\"\n\t\"github.com\/eleme\/banshee\/webapp\"\n)\n\nvar (\n\t\/\/ Arguments\n\tdebug = flag.Bool(\"d\", false, \"debug mode\")\n\tfileName = flag.String(\"c\", \"config.json\", \"config file path\")\n\tshowVersion = flag.Bool(\"v\", false, \"show version\")\n\t\/\/ Variables\n\tcfg = config.New()\n\tdb *storage.DB\n\tflt = filter.New()\n)\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"usage: banshee [-c config] [-d] [-v]\\n\")\n\tflag.PrintDefaults()\n\tfmt.Fprintf(os.Stderr, \"copyright eleme https:\/\/github.com\/eleme\/banshee.\\n\")\n\tos.Exit(2)\n}\n\nfunc initLog() {\n\tlog.SetName(\"banshee\")\n\tif *debug {\n\t\tlog.SetLevel(log.DEBUG)\n\t}\n\tgoVs := runtime.Version()\n\tnCPU := runtime.GOMAXPROCS(-1)\n\tvers := version.Version\n\tlog.Debug(\"banshee%s %s %d cpu\", vers, goVs, nCPU)\n}\n\nfunc initConfig() {\n\t\/\/ Config parsing.\n\tif flag.NFlag() == 0 || (flag.NFlag() == 1 && *debug) {\n\t\t\/\/ Case .\/program [-d]\n\t\tlog.Warn(\"no config specified, using default..\")\n\t} else {\n\t\t\/\/ Update config.\n\t\terr := cfg.UpdateWithJSONFile(*fileName)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"failed to load %s, %s\", *fileName, err)\n\t\t}\n\t}\n\t\/\/ Config validation.\n\terr := cfg.Validate()\n\tif err == config.ErrAlerterCommandEmpty {\n\t\t\/\/ Ignore alerter command empty.\n\t\tlog.Warn(\"config: %s\", err)\n\t} else {\n\t\tlog.Fatal(\"config: %s\", err)\n\t}\n}\n\nfunc initDB() {\n\t\/\/ Rely on config.\n\tif cfg == nil {\n\t\tpanic(errors.New(\"db require config\"))\n\t}\n\t\/\/ Open db with options.\n\toptions := &storage.Options{\n\t\tNumGrid: cfg.Period[0],\n\t\tGridLen: cfg.Period[1],\n\t}\n\tpath := cfg.Storage.Path\n\tvar err error\n\tdb, err = storage.Open(path, options)\n\tif err != nil {\n\t\tlog.Fatal(\"failed to open %s: %v\", path, err)\n\t}\n}\n\nfunc initFilter() {\n\t\/\/ Rely on db and config.\n\tif db == nil || cfg == nil {\n\t\tpanic(errors.New(\"filter require db and config\"))\n\t}\n\t\/\/ Init filter\n\tflt.Init(db)\n\tflt.SetHitLimit(cfg)\n}\n\nfunc main() {\n\t\/\/ Arguments\n\tflag.Usage = usage\n\tflag.Parse()\n\tif *showVersion {\n\t\tfmt.Fprintln(os.Stdout, version.Version)\n\t\tos.Exit(1)\n\t}\n\t\/\/ Init\n\tinitLog()\n\tinitConfig()\n\tinitDB()\n\tinitFilter()\n\n\t\/\/ Service\n\tcleaner := cleaner.New(db, cfg.Period[0]*cfg.Period[1])\n\tgo cleaner.Start()\n\n\talerter := alerter.New(cfg, db, flt)\n\talerter.Start()\n\n\tgo webapp.Start(cfg, db)\n\n\tdetector := detector.New(cfg, db, flt)\n\tdetector.Out(alerter.In)\n\tdetector.Start()\n}\n<commit_msg>Fix config validation err nil<commit_after>\/\/ Copyright 2015 Eleme Inc. All rights reserved.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"github.com\/eleme\/banshee\/alerter\"\n\t\"github.com\/eleme\/banshee\/cleaner\"\n\t\"github.com\/eleme\/banshee\/config\"\n\t\"github.com\/eleme\/banshee\/detector\"\n\t\"github.com\/eleme\/banshee\/filter\"\n\t\"github.com\/eleme\/banshee\/storage\"\n\t\"github.com\/eleme\/banshee\/util\/log\"\n\t\"github.com\/eleme\/banshee\/version\"\n\t\"github.com\/eleme\/banshee\/webapp\"\n)\n\nvar (\n\t\/\/ Arguments\n\tdebug = flag.Bool(\"d\", false, \"debug mode\")\n\tfileName = flag.String(\"c\", \"config.json\", \"config file path\")\n\tshowVersion = flag.Bool(\"v\", false, \"show version\")\n\t\/\/ Variables\n\tcfg = config.New()\n\tdb *storage.DB\n\tflt = filter.New()\n)\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"usage: banshee [-c config] [-d] [-v]\\n\")\n\tflag.PrintDefaults()\n\tfmt.Fprintf(os.Stderr, \"copyright eleme https:\/\/github.com\/eleme\/banshee.\\n\")\n\tos.Exit(2)\n}\n\nfunc initLog() {\n\tlog.SetName(\"banshee\")\n\tif *debug {\n\t\tlog.SetLevel(log.DEBUG)\n\t}\n\tgoVs := runtime.Version()\n\tnCPU := runtime.GOMAXPROCS(-1)\n\tvers := version.Version\n\tlog.Debug(\"banshee%s %s %d cpu\", vers, goVs, nCPU)\n}\n\nfunc initConfig() {\n\t\/\/ Config parsing.\n\tif flag.NFlag() == 0 || (flag.NFlag() == 1 && *debug) {\n\t\t\/\/ Case .\/program [-d]\n\t\tlog.Warn(\"no config specified, using default..\")\n\t} else {\n\t\t\/\/ Update config.\n\t\terr := cfg.UpdateWithJSONFile(*fileName)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"failed to load %s, %s\", *fileName, err)\n\t\t}\n\t}\n\t\/\/ Config validation.\n\terr := cfg.Validate()\n\tif err != nil {\n\t\tif err == config.ErrAlerterCommandEmpty {\n\t\t\t\/\/ Ignore alerter command empty.\n\t\t\tlog.Warn(\"config: %s\", err)\n\t\t} else {\n\t\t\tlog.Fatal(\"config: %s\", err)\n\t\t}\n\t}\n}\n\nfunc initDB() {\n\t\/\/ Rely on config.\n\tif cfg == nil {\n\t\tpanic(errors.New(\"db require config\"))\n\t}\n\t\/\/ Open db with options.\n\toptions := &storage.Options{\n\t\tNumGrid: cfg.Period[0],\n\t\tGridLen: cfg.Period[1],\n\t}\n\tpath := cfg.Storage.Path\n\tvar err error\n\tdb, err = storage.Open(path, options)\n\tif err != nil {\n\t\tlog.Fatal(\"failed to open %s: %v\", path, err)\n\t}\n}\n\nfunc initFilter() {\n\t\/\/ Rely on db and config.\n\tif db == nil || cfg == nil {\n\t\tpanic(errors.New(\"filter require db and config\"))\n\t}\n\t\/\/ Init filter\n\tflt.Init(db)\n\tflt.SetHitLimit(cfg)\n}\n\nfunc main() {\n\t\/\/ Arguments\n\tflag.Usage = usage\n\tflag.Parse()\n\tif *showVersion {\n\t\tfmt.Fprintln(os.Stdout, version.Version)\n\t\tos.Exit(1)\n\t}\n\t\/\/ Init\n\tinitLog()\n\tinitConfig()\n\tinitDB()\n\tinitFilter()\n\n\t\/\/ Service\n\tcleaner := cleaner.New(db, cfg.Period[0]*cfg.Period[1])\n\tgo cleaner.Start()\n\n\talerter := alerter.New(cfg, db, flt)\n\talerter.Start()\n\n\tgo webapp.Start(cfg, db)\n\n\tdetector := detector.New(cfg, db, flt)\n\tdetector.Out(alerter.In)\n\tdetector.Start()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n)\n\nvar (\n\ttlsKeyFile = flag.String(\"tls-key\", \"tls.key\", \"The private key file used for TLS\")\n\ttlsCertFile = flag.String(\"tls-cert\", \"tls.crt\", \"The certificate file used for TLS\")\n\tircAddress = flag.String(\"irc-address\", \":6697\", \"The address:port to bind to and listen for clients on\")\n\tserverName = flag.String(\"irc-servername\", \"rosella\", \"Server name displayed to clients\")\n\tauthFile = flag.String(\"irc-authfile\", \"\", \"File containing usernames and passwords of operators.\")\n\tmotdFile = flag.String(\"irc-motdfile\", \"\", \"File container motd to display to clients.\")\n)\n\nfunc main() {\n\n\tflag.Parse()\n\n\tlog.Printf(\"Rosella v%s Initialising.\", VERSION)\n\n\t\/\/Init rosella itself\n\tserver := NewServer()\n\tserver.name = *serverName\n\n\tif *authFile != \"\" {\n\t\tlog.Printf(\"Loading auth file: %q\", *authFile)\n\n\t\tf, err := os.Open(*authFile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdata := make([]byte, 1024)\n\t\tsize, err := f.Read(data)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tlines := strings.Split(string(data[:size]), \"\\n\")\n\t\tfor _, line := range lines {\n\t\t\tif strings.HasPrefix(line, \"#\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfields := strings.Fields(line)\n\n\t\t\tif len(fields) == 2 {\n\t\t\t\tserver.operatorMap[fields[0]] = fields[1]\n\t\t\t}\n\t\t}\n\t}\n\n\tif *motdFile != \"\" {\n\t\tlog.Printf(\"Loading motd file: %q\", *motdFile)\n\n\t\tf, err := os.Open(*motdFile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdata := make([]byte, 1024)\n\t\tsize, err := f.Read(data)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tserver.motd = string(data[:size])\n\t}\n\n\tgo server.Run()\n\n\ttlsConfig := new(tls.Config)\n\n\ttlsConfig.PreferServerCipherSuites = true\n\ttlsConfig.CipherSuites = []uint16{\n\t\ttls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,\n\t\ttls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,\n\t\ttls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\ttls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,\n\t\ttls.TLS_RSA_WITH_AES_256_CBC_SHA,\n\t\ttls.TLS_RSA_WITH_AES_128_CBC_SHA,\n\t\ttls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\ttls.TLS_RSA_WITH_RC4_128_SHA}\n\n\tcert, err := tls.LoadX509KeyPair(*tlsCertFile, *tlsKeyFile)\n\tif err != nil {\n\t\tlog.Printf(\"Error loading tls certificate and key files.\")\n\t\tlog.Printf(err.Error())\n\t\treturn\n\t}\n\n\tlog.Printf(\"Loaded certificate and key successfully.\")\n\n\ttlsConfig.Certificates = []tls.Certificate{cert}\n\n\t\/\/Fills out tlsConfig.NameToCertificate\n\ttlsConfig.BuildNameToCertificate()\n\n\ttlsListener, err := tls.Listen(\"tcp\", *ircAddress, tlsConfig)\n\tif err != nil {\n\t\tlog.Printf(\"Could not open tls listener.\")\n\t\tlog.Printf(err.Error())\n\t\treturn\n\t}\n\n\tlog.Printf(\"Listening on %s\", *ircAddress)\n\n\tfor {\n\t\tconn, err := tlsListener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error accepting connection.\")\n\t\t\tlog.Printf(err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tserver.HandleConnection(conn)\n\t}\n}\n<commit_msg>Don't accept RC4 as a cipher<commit_after>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n)\n\nvar (\n\ttlsKeyFile = flag.String(\"tls-key\", \"tls.key\", \"The private key file used for TLS\")\n\ttlsCertFile = flag.String(\"tls-cert\", \"tls.crt\", \"The certificate file used for TLS\")\n\tircAddress = flag.String(\"irc-address\", \":6697\", \"The address:port to bind to and listen for clients on\")\n\tserverName = flag.String(\"irc-servername\", \"rosella\", \"Server name displayed to clients\")\n\tauthFile = flag.String(\"irc-authfile\", \"\", \"File containing usernames and passwords of operators.\")\n\tmotdFile = flag.String(\"irc-motdfile\", \"\", \"File container motd to display to clients.\")\n)\n\nfunc main() {\n\n\tflag.Parse()\n\n\tlog.Printf(\"Rosella v%s Initialising.\", VERSION)\n\n\t\/\/Init rosella itself\n\tserver := NewServer()\n\tserver.name = *serverName\n\n\tif *authFile != \"\" {\n\t\tlog.Printf(\"Loading auth file: %q\", *authFile)\n\n\t\tf, err := os.Open(*authFile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdata := make([]byte, 1024)\n\t\tsize, err := f.Read(data)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tlines := strings.Split(string(data[:size]), \"\\n\")\n\t\tfor _, line := range lines {\n\t\t\tif strings.HasPrefix(line, \"#\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfields := strings.Fields(line)\n\n\t\t\tif len(fields) == 2 {\n\t\t\t\tserver.operatorMap[fields[0]] = fields[1]\n\t\t\t}\n\t\t}\n\t}\n\n\tif *motdFile != \"\" {\n\t\tlog.Printf(\"Loading motd file: %q\", *motdFile)\n\n\t\tf, err := os.Open(*motdFile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdata := make([]byte, 1024)\n\t\tsize, err := f.Read(data)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tserver.motd = string(data[:size])\n\t}\n\n\tgo server.Run()\n\n\ttlsConfig := new(tls.Config)\n\n\ttlsConfig.PreferServerCipherSuites = true\n\ttlsConfig.CipherSuites = []uint16{\n\t\ttls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,\n\t\ttls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,\n\t\ttls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\ttls.TLS_RSA_WITH_AES_256_CBC_SHA,\n\t\ttls.TLS_RSA_WITH_AES_128_CBC_SHA,\n\t\ttls.TLS_RSA_WITH_3DES_EDE_CBC_SHA}\n\n\tcert, err := tls.LoadX509KeyPair(*tlsCertFile, *tlsKeyFile)\n\tif err != nil {\n\t\tlog.Printf(\"Error loading tls certificate and key files.\")\n\t\tlog.Printf(err.Error())\n\t\treturn\n\t}\n\n\tlog.Printf(\"Loaded certificate and key successfully.\")\n\n\ttlsConfig.Certificates = []tls.Certificate{cert}\n\n\t\/\/Fills out tlsConfig.NameToCertificate\n\ttlsConfig.BuildNameToCertificate()\n\n\ttlsListener, err := tls.Listen(\"tcp\", *ircAddress, tlsConfig)\n\tif err != nil {\n\t\tlog.Printf(\"Could not open tls listener.\")\n\t\tlog.Printf(err.Error())\n\t\treturn\n\t}\n\n\tlog.Printf(\"Listening on %s\", *ircAddress)\n\n\tfor {\n\t\tconn, err := tlsListener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error accepting connection.\")\n\t\t\tlog.Printf(err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tserver.HandleConnection(conn)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\/debug\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\n\t\"github.com\/jessevdk\/go-flags\"\n\t\"github.com\/moncho\/dry\/app\"\n\t\"github.com\/moncho\/dry\/appui\"\n\t\"github.com\/moncho\/dry\/docker\"\n\t\"github.com\/moncho\/dry\/ui\"\n\t\"github.com\/moncho\/dry\/version\"\n\t\"github.com\/pkg\/errors\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nconst (\n\tbanner = ` _\n | |\n __| | ____ _ _\n \/ _ | \/ ___)| | | |\n( (_| || | | |_| |\n \\____||_| \\__ |\n (____\/\n\n`\n\tcheese = \"<white>made with \\U0001F499 (and go) by<\/> <blue>moncho<\/>\"\n\tconnecting = \"\\U0001f433 Trying to connect to the Docker Host \\U0001f433\"\n)\n\nvar loadMessage = []string{docker.Whale0,\n\tdocker.Whale1,\n\tdocker.Whale2,\n\tdocker.Whale3,\n\tdocker.Whale4,\n\tdocker.Whale5,\n\tdocker.Whale6,\n\tdocker.Whale7,\n\tdocker.Whale}\n\n\/\/options dry's flags\ntype options struct {\n\tDescription bool `short:\"d\" long:\"description\" description:\"Shows the description\"`\n\tMonitorMode string `short:\"m\" long:\"monitor\" description:\"Starts in monitor mode, given value (if any) is the refresh rate\" optional:\"yes\" optional-value:\"500\"`\n\t\/\/ enable profiling\n\tProfile bool `short:\"p\" long:\"profile\" description:\"Enable profiling\"`\n\tVersion bool `short:\"v\" long:\"version\" description:\"Dry version\"`\n\t\/\/Docker-related properties\n\tDockerHost string `short:\"H\" long:\"docker_host\" description:\"Docker Host\"`\n\tDockerCertPath string `short:\"c\" long:\"docker_certpath\" description:\"Docker cert path\"`\n\tDockerTLSVerifiy string `short:\"t\" long:\"docker_tls\" description:\"Docker TLS verify\"`\n\t\/\/Whale\n\tWhale uint `short:\"w\" long:\"whale\" description:\"Show whale for w seconds\"`\n}\n\nfunc config(opts options) (app.Config, error) {\n\tvar cfg app.Config\n\tif opts.DockerHost == \"\" {\n\t\tif os.Getenv(\"DOCKER_HOST\") == \"\" {\n\t\t\tlog.Printf(\n\t\t\t\t\"No DOCKER_HOST env variable found and no Host parameter was given, connecting to %s\",\n\t\t\t\tdocker.DefaultDockerHost)\n\t\t\tcfg.DockerHost = docker.DefaultDockerHost\n\t\t} else {\n\t\t\tcfg.DockerHost = os.Getenv(\"DOCKER_HOST\")\n\t\t\tcfg.DockerTLSVerify = docker.GetBool(os.Getenv(\"DOCKER_TLS_VERIFY\"))\n\t\t\tcfg.DockerCertPath = os.Getenv(\"DOCKER_CERT_PATH\")\n\t\t}\n\t} else {\n\t\tcfg.DockerHost = opts.DockerHost\n\t\tcfg.DockerTLSVerify = docker.GetBool(opts.DockerTLSVerifiy)\n\t\tcfg.DockerCertPath = opts.DockerCertPath\n\t}\n\n\tif opts.MonitorMode != \"\" {\n\t\tcfg.MonitorMode = true\n\t\trefreshRate, err := strconv.Atoi(opts.MonitorMode)\n\t\tif err != nil {\n\t\t\treturn cfg, errors.Wrap(err, \"invalid refresh rate\")\n\t\t}\n\t\tcfg.MonitorRefreshRate = refreshRate\n\t}\n\treturn cfg, nil\n}\n\nfunc showLoadingScreen(ctx context.Context, screen *ui.Screen, cfg app.Config) {\n\tscreen.Clear()\n\tmidscreen := screen.Dimensions().Width \/ 2\n\theight := screen.Dimensions().Height\n\tscreen.RenderAtColumn(midscreen-len(connecting)\/2, 1, ui.White(connecting))\n\tscreen.RenderLine(2, height-2, fmt.Sprintf(\"<blue>Dry Version:<\/> %s\", ui.White(version.VERSION)))\n\tif cfg.DockerHost != \"\" {\n\t\tscreen.RenderLine(2, height-1, fmt.Sprintf(\"<blue>Docker Host:<\/> %s\", ui.White(cfg.DockerHost)))\n\t} else {\n\t\tscreen.RenderLine(2, height-1, ui.White(\"No Docker host\"))\n\t}\n\n\t\/\/20 is a safe aproximation for the length of interpreted characters from the message\n\tscreen.RenderLine(ui.ActiveScreen.Dimensions().Width-len(cheese)+20, height-1, cheese)\n\tscreen.Flush()\n\tgo func() {\n\t\trotorPos := 0\n\t\tforward := true\n\t\tticker := time.NewTicker(250 * time.Millisecond)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tloadingMessage := loadMessage[rotorPos]\n\t\t\t\tscreen.RenderAtColumn(midscreen-19, height\/2-6, ui.Cyan(loadingMessage))\n\t\t\t\tscreen.Flush()\n\t\t\t\tif rotorPos == len(loadMessage)-1 {\n\t\t\t\t\tforward = false\n\t\t\t\t} else if rotorPos == 0 {\n\t\t\t\t\tforward = true\n\t\t\t\t}\n\t\t\t\tif forward {\n\t\t\t\t\trotorPos++\n\t\t\t\t} else {\n\t\t\t\t\trotorPos--\n\t\t\t\t}\n\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\nfunc main() {\n\trunning := false\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Printf(\n\t\t\t\t\"Dry panicked: %v\", r)\n\t\t\tlog.Error(string(debug.Stack()))\n\t\t\tlog.Print(\"Bye\")\n\t\t\tos.Exit(1)\n\t\t} else if running {\n\t\t\tlog.Print(\"Bye\")\n\t\t}\n\t}()\n\t\/\/ parse flags\n\tvar opts options\n\tparser := flags.NewParser(&opts, flags.Default)\n\t_, err := parser.Parse()\n\tif err != nil {\n\t\tflagError := err.(*flags.Error)\n\t\tif flagError.Type == flags.ErrHelp {\n\t\t\treturn\n\t\t}\n\t\tif flagError.Type == flags.ErrUnknownFlag {\n\t\t\tlog.Print(\"Use --help to view all available options.\")\n\t\t\treturn\n\t\t}\n\t\tlog.Printf(\"Could not parse flags: %s\", err)\n\t\treturn\n\t}\n\tif opts.Description {\n\t\tfmt.Print(app.ShortHelp)\n\t\treturn\n\t}\n\tif opts.Version {\n\t\tfmt.Printf(\"dry version %s, build %s\", version.VERSION, version.GITCOMMIT)\n\t\treturn\n\t}\n\tlog.Print(\"Starting dry\")\n\t\/\/ Start profiling (if required)\n\tif opts.Profile {\n\t\tgo func() {\n\t\t\tlog.Fatal(http.ListenAndServe(\"localhost:6060\", nil))\n\t\t}()\n\t}\n\tscreen, err := ui.NewScreen(appui.DryTheme)\n\tif err != nil {\n\t\tlog.Printf(\"Dry could not start: %s\", err)\n\t\treturn\n\t}\n\trunning = true\n\tcfg, err := config(opts)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn\n\t}\n\n\tstart := time.Now()\n\tctx, cancel := context.WithCancel(context.Background())\n\tshowLoadingScreen(ctx, screen, cfg)\n\n\tdry, err := app.NewDry(screen, cfg)\n\n\t\/\/if asked to, show whale to bablat a bit longer\n\tif opts.Whale > 0 {\n\t\tshowWale, _ := time.ParseDuration(fmt.Sprintf(\"%ds\", opts.Whale))\n\t\ttime.Sleep(showWale - time.Since(start))\n\t}\n\t\/\/dry has loaded, stopping the loading screen\n\tcancel()\n\tif err != nil {\n\t\tscreen.Close()\n\t\tlog.Printf(\"Dry could not start: %s\", err)\n\t\treturn\n\t}\n\tapp.RenderLoop(dry)\n\tscreen.Close()\n}\n<commit_msg>remove log statements #104<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\/debug\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\n\t\"github.com\/jessevdk\/go-flags\"\n\t\"github.com\/moncho\/dry\/app\"\n\t\"github.com\/moncho\/dry\/appui\"\n\t\"github.com\/moncho\/dry\/docker\"\n\t\"github.com\/moncho\/dry\/ui\"\n\t\"github.com\/moncho\/dry\/version\"\n\t\"github.com\/pkg\/errors\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nconst (\n\tbanner = ` _\n | |\n __| | ____ _ _\n \/ _ | \/ ___)| | | |\n( (_| || | | |_| |\n \\____||_| \\__ |\n (____\/\n\n`\n\tcheese = \"<white>made with \\U0001F499 (and go) by<\/> <blue>moncho<\/>\"\n\tconnecting = \"\\U0001f433 Trying to connect to the Docker Host \\U0001f433\"\n)\n\nvar loadMessage = []string{docker.Whale0,\n\tdocker.Whale1,\n\tdocker.Whale2,\n\tdocker.Whale3,\n\tdocker.Whale4,\n\tdocker.Whale5,\n\tdocker.Whale6,\n\tdocker.Whale7,\n\tdocker.Whale}\n\n\/\/options dry's flags\ntype options struct {\n\tDescription bool `short:\"d\" long:\"description\" description:\"Shows the description\"`\n\tMonitorMode string `short:\"m\" long:\"monitor\" description:\"Starts in monitor mode, given value (if any) is the refresh rate\" optional:\"yes\" optional-value:\"500\"`\n\t\/\/ enable profiling\n\tProfile bool `short:\"p\" long:\"profile\" description:\"Enable profiling\"`\n\tVersion bool `short:\"v\" long:\"version\" description:\"Dry version\"`\n\t\/\/Docker-related properties\n\tDockerHost string `short:\"H\" long:\"docker_host\" description:\"Docker Host\"`\n\tDockerCertPath string `short:\"c\" long:\"docker_certpath\" description:\"Docker cert path\"`\n\tDockerTLSVerifiy string `short:\"t\" long:\"docker_tls\" description:\"Docker TLS verify\"`\n\t\/\/Whale\n\tWhale uint `short:\"w\" long:\"whale\" description:\"Show whale for w seconds\"`\n}\n\nfunc config(opts options) (app.Config, error) {\n\tvar cfg app.Config\n\tif opts.DockerHost == \"\" {\n\t\tif os.Getenv(\"DOCKER_HOST\") == \"\" {\n\t\t\tlog.Printf(\n\t\t\t\t\"No DOCKER_HOST env variable found and no Host parameter was given, connecting to %s\",\n\t\t\t\tdocker.DefaultDockerHost)\n\t\t\tcfg.DockerHost = docker.DefaultDockerHost\n\t\t} else {\n\t\t\tcfg.DockerHost = os.Getenv(\"DOCKER_HOST\")\n\t\t\tcfg.DockerTLSVerify = docker.GetBool(os.Getenv(\"DOCKER_TLS_VERIFY\"))\n\t\t\tcfg.DockerCertPath = os.Getenv(\"DOCKER_CERT_PATH\")\n\t\t}\n\t} else {\n\t\tcfg.DockerHost = opts.DockerHost\n\t\tcfg.DockerTLSVerify = docker.GetBool(opts.DockerTLSVerifiy)\n\t\tcfg.DockerCertPath = opts.DockerCertPath\n\t}\n\n\tif opts.MonitorMode != \"\" {\n\t\tcfg.MonitorMode = true\n\t\trefreshRate, err := strconv.Atoi(opts.MonitorMode)\n\t\tif err != nil {\n\t\t\treturn cfg, errors.Wrap(err, \"invalid refresh rate\")\n\t\t}\n\t\tcfg.MonitorRefreshRate = refreshRate\n\t}\n\treturn cfg, nil\n}\n\nfunc showLoadingScreen(ctx context.Context, screen *ui.Screen, cfg app.Config) {\n\tscreen.Clear()\n\tmidscreen := screen.Dimensions().Width \/ 2\n\theight := screen.Dimensions().Height\n\tscreen.RenderAtColumn(midscreen-len(connecting)\/2, 1, ui.White(connecting))\n\tscreen.RenderLine(2, height-2, fmt.Sprintf(\"<blue>Dry Version:<\/> %s\", ui.White(version.VERSION)))\n\tif cfg.DockerHost != \"\" {\n\t\tscreen.RenderLine(2, height-1, fmt.Sprintf(\"<blue>Docker Host:<\/> %s\", ui.White(cfg.DockerHost)))\n\t} else {\n\t\tscreen.RenderLine(2, height-1, ui.White(\"No Docker host\"))\n\t}\n\n\t\/\/20 is a safe aproximation for the length of interpreted characters from the message\n\tscreen.RenderLine(ui.ActiveScreen.Dimensions().Width-len(cheese)+20, height-1, cheese)\n\tscreen.Flush()\n\tgo func() {\n\t\trotorPos := 0\n\t\tforward := true\n\t\tticker := time.NewTicker(250 * time.Millisecond)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tloadingMessage := loadMessage[rotorPos]\n\t\t\t\tscreen.RenderAtColumn(midscreen-19, height\/2-6, ui.Cyan(loadingMessage))\n\t\t\t\tscreen.Flush()\n\t\t\t\tif rotorPos == len(loadMessage)-1 {\n\t\t\t\t\tforward = false\n\t\t\t\t} else if rotorPos == 0 {\n\t\t\t\t\tforward = true\n\t\t\t\t}\n\t\t\t\tif forward {\n\t\t\t\t\trotorPos++\n\t\t\t\t} else {\n\t\t\t\t\trotorPos--\n\t\t\t\t}\n\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\nfunc main() {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Printf(\n\t\t\t\t\"Dry panicked: %v\", r)\n\t\t\tlog.Error(string(debug.Stack()))\n\t\t\tos.Exit(1)\n\t\t}\n\t}()\n\t\/\/ parse flags\n\tvar opts options\n\tparser := flags.NewParser(&opts, flags.Default)\n\t_, err := parser.Parse()\n\tif err != nil {\n\t\tflagError := err.(*flags.Error)\n\t\tif flagError.Type == flags.ErrHelp {\n\t\t\treturn\n\t\t}\n\t\tif flagError.Type == flags.ErrUnknownFlag {\n\t\t\tlog.Print(\"Use --help to view all available options.\")\n\t\t\treturn\n\t\t}\n\t\tlog.Printf(\"Could not parse flags: %s\", err)\n\t\treturn\n\t}\n\tif opts.Description {\n\t\tfmt.Print(app.ShortHelp)\n\t\treturn\n\t}\n\tif opts.Version {\n\t\tfmt.Printf(\"dry version %s, build %s\", version.VERSION, version.GITCOMMIT)\n\t\treturn\n\t}\n\t\/\/ Start profiling (if required)\n\tif opts.Profile {\n\t\tgo func() {\n\t\t\tlog.Fatal(http.ListenAndServe(\"localhost:6060\", nil))\n\t\t}()\n\t}\n\tscreen, err := ui.NewScreen(appui.DryTheme)\n\tif err != nil {\n\t\tlog.Printf(\"Dry could not start: %s\", err)\n\t\treturn\n\t}\n\tcfg, err := config(opts)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn\n\t}\n\n\tstart := time.Now()\n\tctx, cancel := context.WithCancel(context.Background())\n\tshowLoadingScreen(ctx, screen, cfg)\n\n\tdry, err := app.NewDry(screen, cfg)\n\n\t\/\/if asked to, show whale to bablat a bit longer\n\tif opts.Whale > 0 {\n\t\tshowWale, _ := time.ParseDuration(fmt.Sprintf(\"%ds\", opts.Whale))\n\t\ttime.Sleep(showWale - time.Since(start))\n\t}\n\t\/\/dry has loaded, stopping the loading screen\n\tcancel()\n\tif err != nil {\n\t\tscreen.Close()\n\t\tlog.Printf(\"Dry could not start: %s\", err)\n\t\treturn\n\t}\n\tapp.RenderLoop(dry)\n\tscreen.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package hipbot\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/mattn\/go-xmpp\"\n)\n\nconst (\n\tgroupchat = \"groupchat\"\n)\n\ntype (\n\t\/\/ Config holds the configuration for the bot\n\tConfig struct {\n\t\tJabberID string\n\t\tNick string\n\t\tFullName string\n\t\tHost string\n\t\tRooms []string\n\t\tPassword string\n\t\tDebug bool\n\t\tTLS *tls.Config\n\t}\n\n\t\/\/ Message represents an XMPP message\n\tMessage struct {\n\t\t\/\/ Text of the message\n\t\tText string\n\t\t\/\/ From contains the JID and Name of the sender separated by a \/\n\t\tFrom string\n\t}\n\n\t\/\/ Filter provides a way to perform checks before invoking a Handler\n\t\/\/ Handlers will only be invoked if all Filters return true\n\tFilter func(m Message) (string, bool)\n\n\thandleMatcher struct {\n\t\tFilters []Filter\n\t\tPattern string\n\t\tHandler Handler\n\t}\n\n\t\/\/ Bot represents the bot\n\tBot struct {\n\t\txmpp *xmpp.Client\n\t\tconfig Config\n\t\tstop chan bool\n\t\thandlers handleMatchers\n\t\thelp Handler\n\t\tstopped bool\n\t\t\/\/ Errors will be sent any non-nil error values encountered while listening for events\n\t\tErrors chan error\n\t}\n\n\t\/\/ Handler is passed a message and sends the text to HipChat if ok\n\tHandler func(m Message) string\n\n\thandleMatchers []handleMatcher\n)\n\n\/\/ Less sorts the longest strings first so we match more specifc -> less specific\nfunc (h handleMatchers) Less(i, j int) bool {\n\treturn len(h[i].Pattern) > len(h[j].Pattern)\n}\n\n\/\/ Len implements part of the Sort.Sort interface\nfunc (h handleMatchers) Len() int {\n\treturn len(h)\n}\n\n\/\/ Swap implements the sort.Sort interface\nfunc (h handleMatchers) Swap(i, j int) {\n\th[j], h[i] = h[i], h[j]\n}\n\n\/\/ heartbeat sends periodic pings to keep connection alive until the stop channel is closed\nfunc (b *Bot) heartbeat() {\n\tticker := time.NewTicker(30 * time.Second)\n\tfor {\n\t\tb.xmpp.PingC2S(b.config.JabberID, \"\")\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\tcase <-b.stop:\n\t\t\tticker.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ AddHelp registers a Handler that can be called if no other handlers match.\nfunc (b *Bot) AddHelp(h Handler) {\n\tb.help = h\n}\n\n\/\/ AddHandler registers a Handler for callbacks that will be invoked when the pattern is matched.\n\/\/ If Filters are passed, all must return true before a Handler is invoked\nfunc (b *Bot) AddHandler(pattern string, h Handler, f ...Filter) {\n\tb.handlers = append(b.handlers, handleMatcher{Pattern: pattern, Handler: h, Filters: f})\n\tsort.Sort(b.handlers)\n}\n\n\/\/ SendRoom sends a message to a room.\nfunc (b *Bot) SendRoom(msg string, room string) error {\n\txmppMsg := xmpp.Chat{Text: msg, Type: groupchat, Remote: room}\n\t_, err := b.xmpp.Send(xmppMsg)\n\treturn err\n}\n\n\/\/ SendUser sends a message privately to a user.\nfunc (b *Bot) SendUser(msg string, user string) error {\n\txmppMsg := xmpp.Chat{Text: msg, Type: \"chat\", Remote: user}\n\t_, err := b.xmpp.Send(xmppMsg)\n\treturn err\n}\n\nfunc (b *Bot) send(msg xmpp.Chat) error {\n\t_, err := b.xmpp.Send(msg)\n\treturn err\n}\n\nfunc from(remote string) string {\n\tcomponents := strings.Split(remote, \"\/\")\n\tif len(components) == 2 {\n\t\treturn components[1]\n\t}\n\treturn \"\"\n}\n\n\/\/ Sender returns the name of the message sender\nfunc (m Message) Sender() string {\n\treturn from(m.From)\n}\n\n\/\/ sentByMe checks if our name matches the sender\nfunc (b *Bot) sentByMe(remote string) bool {\n\treturn from(remote) == b.config.FullName\n}\n\nfunc (b *Bot) handle(xmppMsg xmpp.Chat) string {\n\t\/\/ handle case where the bot name is spelled with capital letter\n\txmppMsg.Text = strings.TrimPrefix(xmppMsg.Text,strings.Title(b.config.Nick))\n\tmsg := Message{\n\t\tText: strings.TrimSpace(strings.TrimPrefix(xmppMsg.Text, b.config.Nick)),\n\t\tFrom: xmppMsg.Remote,\n\t}\n\tfor _, h := range b.handlers {\n\t\tif strings.HasPrefix(msg.Text, h.Pattern) {\n\t\t\tmsg.Text = strings.TrimSpace(strings.TrimPrefix(msg.Text, h.Pattern))\n\t\t\tfor _, f := range h.Filters {\n\t\t\t\tif resp, ok := f(msg); !ok {\n\t\t\t\t\treturn resp\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn h.Handler(msg)\n\t\t}\n\t}\n\tif b.help != nil {\n\t\treturn b.help(msg)\n\t}\n\treturn \"\"\n}\n\nfunc (b *Bot) toMe(msg xmpp.Chat) bool {\n\tif msg.Text == \"\" {\n\t\treturn false\n\t}\n\n\tif b.sentByMe(msg.Remote) {\n\t\treturn false\n\t}\n\n\tif msg.Type == \"error\" {\n\t\treturn false\n\t}\n\n\t\/\/ only pass back group messages if they start with nick\n\tif msg.Type == groupchat {\n\t\treturn strings.HasPrefix(strings.ToLower(msg.Text), strings.ToLower(b.config.Nick))\n\t}\n\n\t\/\/ direct message\n\treturn true\n}\n\n\/\/ listen collects incoming XMPP events and performs callbacks\nfunc (b *Bot) listen() {\n\tfor {\n\t\tselect {\n\t\tcase <-b.stop:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\txmppMsg, err := b.xmpp.Recv()\n\t\tif b.stopped {\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\tselect {\n\t\t\t\/\/ only send an error to the channel if someone is reading from it\n\t\t\tcase b.Errors <- err:\n\t\t\t}\n\t\t}\n\t\tif b.config.Debug {\n\t\t\tfmt.Printf(\"DEBUG: %+v\\n\", xmppMsg)\n\t\t}\n\t\tif msg, ok := xmppMsg.(xmpp.Chat); ok {\n\t\t\tif b.toMe(msg) {\n\t\t\t\tresp := b.handle(msg)\n\t\t\t\tif resp != \"\" {\n\t\t\t\t\tmsg.Text = resp\n\t\t\t\t\tb.send(msg)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Start listens for XMPP messages and joins any rooms\nfunc (b *Bot) Start() error {\n\tgo b.listen()\n\tgo b.heartbeat()\n\tfor _, room := range b.config.Rooms {\n\t\t_, err := b.xmpp.JoinMUCNoHistory(room, b.config.FullName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Stop terminates the XMPP connection\nfunc (b *Bot) Stop() {\n\tb.stopped = true\n\tclose(b.stop)\n\tb.xmpp.Close()\n\tclose(b.Errors)\n}\n\n\/\/ New generates a new bot\nfunc New(cfg Config) (*Bot, error) {\n\tb := &Bot{\n\t\tconfig: cfg,\n\t\tstop: make(chan bool),\n\t\tErrors: make(chan error),\n\t}\n\toptions := &xmpp.Options{\n\t\tHost: cfg.Host,\n\t\tUser: cfg.JabberID,\n\t\tDebug: cfg.Debug,\n\t\tPassword: cfg.Password,\n\t\tResource: \"bot\",\n\t\tStartTLS: false,\n\t\tNoTLS: true,\n\t\tTLSConfig: cfg.TLS,\n\t}\n\tvar err error\n\tb.xmpp, err = options.NewClient()\n\treturn b, err\n}\n<commit_msg>direct message support toggle<commit_after>package hipbot\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/mattn\/go-xmpp\"\n)\n\nconst (\n\tgroupchat = \"groupchat\"\n)\n\ntype (\n\t\/\/ Config holds the configuration for the bot\n\tConfig struct {\n\t\tJabberID string\n\t\tNick string\n\t\tFullName string\n\t\tHost string\n\t\tRooms []string\n\t\tPassword string\n\t\tDebug bool\n\t\tTLS *tls.Config\n\t\tDirectMessages bool\n\t}\n\n\t\/\/ Message represents an XMPP message\n\tMessage struct {\n\t\t\/\/ Text of the message\n\t\tText string\n\t\t\/\/ From contains the JID and Name of the sender separated by a \/\n\t\tFrom string\n\t}\n\n\t\/\/ Filter provides a way to perform checks before invoking a Handler\n\t\/\/ Handlers will only be invoked if all Filters return true\n\tFilter func(m Message) (string, bool)\n\n\thandleMatcher struct {\n\t\tFilters []Filter\n\t\tPattern string\n\t\tHandler Handler\n\t}\n\n\t\/\/ Bot represents the bot\n\tBot struct {\n\t\txmpp *xmpp.Client\n\t\tconfig Config\n\t\tstop chan bool\n\t\thandlers handleMatchers\n\t\thelp Handler\n\t\tstopped bool\n\t\t\/\/ Errors will be sent any non-nil error values encountered while listening for events\n\t\tErrors chan error\n\t}\n\n\t\/\/ Handler is passed a message and sends the text to HipChat if ok\n\tHandler func(m Message) string\n\n\thandleMatchers []handleMatcher\n)\n\n\/\/ Less sorts the longest strings first so we match more specifc -> less specific\nfunc (h handleMatchers) Less(i, j int) bool {\n\treturn len(h[i].Pattern) > len(h[j].Pattern)\n}\n\n\/\/ Len implements part of the Sort.Sort interface\nfunc (h handleMatchers) Len() int {\n\treturn len(h)\n}\n\n\/\/ Swap implements the sort.Sort interface\nfunc (h handleMatchers) Swap(i, j int) {\n\th[j], h[i] = h[i], h[j]\n}\n\n\/\/ heartbeat sends periodic pings to keep connection alive until the stop channel is closed\nfunc (b *Bot) heartbeat() {\n\tticker := time.NewTicker(30 * time.Second)\n\tfor {\n\t\tb.xmpp.PingC2S(b.config.JabberID, \"\")\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\tcase <-b.stop:\n\t\t\tticker.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ AddHelp registers a Handler that can be called if no other handlers match.\nfunc (b *Bot) AddHelp(h Handler) {\n\tb.help = h\n}\n\n\/\/ AddHandler registers a Handler for callbacks that will be invoked when the pattern is matched.\n\/\/ If Filters are passed, all must return true before a Handler is invoked\nfunc (b *Bot) AddHandler(pattern string, h Handler, f ...Filter) {\n\tb.handlers = append(b.handlers, handleMatcher{Pattern: pattern, Handler: h, Filters: f})\n\tsort.Sort(b.handlers)\n}\n\n\/\/ SendRoom sends a message to a room.\nfunc (b *Bot) SendRoom(msg string, room string) error {\n\txmppMsg := xmpp.Chat{Text: msg, Type: groupchat, Remote: room}\n\t_, err := b.xmpp.Send(xmppMsg)\n\treturn err\n}\n\n\/\/ SendUser sends a message privately to a user.\nfunc (b *Bot) SendUser(msg string, user string) error {\n\txmppMsg := xmpp.Chat{Text: msg, Type: \"chat\", Remote: user}\n\t_, err := b.xmpp.Send(xmppMsg)\n\treturn err\n}\n\nfunc (b *Bot) send(msg xmpp.Chat) error {\n\t_, err := b.xmpp.Send(msg)\n\treturn err\n}\n\nfunc from(remote string) string {\n\tcomponents := strings.Split(remote, \"\/\")\n\tif len(components) == 2 {\n\t\treturn components[1]\n\t}\n\treturn \"\"\n}\n\n\/\/ Sender returns the name of the message sender\nfunc (m Message) Sender() string {\n\treturn from(m.From)\n}\n\n\/\/ sentByMe checks if our name matches the sender\nfunc (b *Bot) sentByMe(remote string) bool {\n\treturn from(remote) == b.config.FullName\n}\n\nfunc (b *Bot) handle(xmppMsg xmpp.Chat) string {\n\t\/\/ handle case where the bot name is spelled with capital letter\n\txmppMsg.Text = strings.TrimPrefix(xmppMsg.Text,strings.Title(b.config.Nick))\n\tmsg := Message{\n\t\tText: strings.TrimSpace(strings.TrimPrefix(xmppMsg.Text, b.config.Nick)),\n\t\tFrom: xmppMsg.Remote,\n\t}\n\tfor _, h := range b.handlers {\n\t\tif strings.HasPrefix(msg.Text, h.Pattern) {\n\t\t\tmsg.Text = strings.TrimSpace(strings.TrimPrefix(msg.Text, h.Pattern))\n\t\t\tfor _, f := range h.Filters {\n\t\t\t\tif resp, ok := f(msg); !ok {\n\t\t\t\t\treturn resp\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn h.Handler(msg)\n\t\t}\n\t}\n\tif b.help != nil {\n\t\treturn b.help(msg)\n\t}\n\treturn \"\"\n}\n\nfunc (b *Bot) toMe(msg xmpp.Chat) bool {\n\tif msg.Text == \"\" {\n\t\treturn false\n\t}\n\n\tif b.sentByMe(msg.Remote) {\n\t\treturn false\n\t}\n\n\tif msg.Type == \"error\" {\n\t\treturn false\n\t}\n\n\t\/\/ only pass back group messages if they start with nick\n\tif msg.Type == groupchat {\n\t\treturn strings.HasPrefix(strings.ToLower(msg.Text), strings.ToLower(b.config.Nick))\n\t}\n\n\t\/\/ direct messages\n\treturn b.config.DirectMessages\n}\n\n\/\/ listen collects incoming XMPP events and performs callbacks\nfunc (b *Bot) listen() {\n\tfor {\n\t\tselect {\n\t\tcase <-b.stop:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\txmppMsg, err := b.xmpp.Recv()\n\t\tif b.stopped {\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\tselect {\n\t\t\t\/\/ only send an error to the channel if someone is reading from it\n\t\t\tcase b.Errors <- err:\n\t\t\t}\n\t\t}\n\t\tif b.config.Debug {\n\t\t\tfmt.Printf(\"DEBUG: %+v\\n\", xmppMsg)\n\t\t}\n\t\tif msg, ok := xmppMsg.(xmpp.Chat); ok {\n\t\t\tif b.toMe(msg) {\n\t\t\t\tresp := b.handle(msg)\n\t\t\t\tif resp != \"\" {\n\t\t\t\t\tmsg.Text = resp\n\t\t\t\t\tb.send(msg)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Start listens for XMPP messages and joins any rooms\nfunc (b *Bot) Start() error {\n\tgo b.listen()\n\tgo b.heartbeat()\n\tfor _, room := range b.config.Rooms {\n\t\t_, err := b.xmpp.JoinMUCNoHistory(room, b.config.FullName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Stop terminates the XMPP connection\nfunc (b *Bot) Stop() {\n\tb.stopped = true\n\tclose(b.stop)\n\tb.xmpp.Close()\n\tclose(b.Errors)\n}\n\n\/\/ New generates a new bot\nfunc New(cfg Config) (*Bot, error) {\n\tb := &Bot{\n\t\tconfig: cfg,\n\t\tstop: make(chan bool),\n\t\tErrors: make(chan error),\n\t}\n\toptions := &xmpp.Options{\n\t\tHost: cfg.Host,\n\t\tUser: cfg.JabberID,\n\t\tDebug: cfg.Debug,\n\t\tPassword: cfg.Password,\n\t\tResource: \"bot\",\n\t\tStartTLS: false,\n\t\tNoTLS: true,\n\t\tTLSConfig: cfg.TLS,\n\t}\n\tvar err error\n\tb.xmpp, err = options.NewClient()\n\treturn b, err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/ogier\/pflag\"\n)\n\nvar (\n\tname = \"prod\"\n\tversion = \"0.0.0\"\n\n\tflagset = pflag.NewFlagSet(name, pflag.ContinueOnError)\n\tseparator = flagset.StringP(\"separator\", \"s\", \"\\t\", \"\")\n\tisHelp = flagset.BoolP(\"help\", \"h\", false, \"\")\n\tisVersion = flagset.BoolP(\"version\", \"\", false, \"\")\n)\n\nfunc printUsage() {\n\tfmt.Fprintf(os.Stderr, `\nUsage: %s [OPTION]... [FILE]...\nOutput direct product of lines of each files.\n\nOptions:\n -s, --separator=STRING use STRING to separate columns (default: \\t)\n --help display this help text and exit\n --version display version information and exit\n`[1:], name)\n}\n\nfunc printVersion() {\n\tfmt.Fprintf(os.Stderr, \"%s\\n\", version)\n}\n\nfunc printErr(err interface{}) {\n\tfmt.Fprintf(os.Stderr, \"%s: %s\\n\", name, err)\n}\n\nfunc guideToHelp() {\n\tfmt.Fprintf(os.Stderr, \"Try '%s --help' for more information.\\n\", name)\n}\n\nfunc _main() int {\n\tflagset.SetOutput(ioutil.Discard)\n\tif err := flagset.Parse(os.Args[1:]); err != nil {\n\t\tprintErr(err)\n\t\tguideToHelp()\n\t\treturn 2\n\t}\n\tif *isHelp {\n\t\tprintUsage()\n\t\treturn 0\n\t}\n\tif *isVersion {\n\t\tprintVersion()\n\t\treturn 0\n\t}\n\n\tvar rs []io.Reader\n\tif flagset.NArg() == 0 {\n\t\trs = append(rs, os.Stdin)\n\t} else {\n\t\tfor _, arg := range flagset.Args() {\n\t\t\tf, err := os.Open(arg)\n\t\t\tif err != nil {\n\t\t\t\tprintErr(err)\n\t\t\t\treturn 2\n\t\t\t}\n\t\t\tdefer f.Close()\n\t\t\trs = append(rs, f)\n\t\t}\n\t}\n\n\tvar aa [][]string\n\tfor _, r := range rs {\n\t\ta := make([]string, 0, 64)\n\t\tb := bufio.NewScanner(r)\n\t\tfor b.Scan() {\n\t\t\ta = append(a, b.Text())\n\t\t}\n\t\tif err := b.Err(); err != nil {\n\t\t\tprintErr(err)\n\t\t\treturn 1\n\t\t}\n\t\taa = append(aa, a)\n\t}\n\n\tss := make([]string, len(aa))\n\tfor indexes := range Product(aa) {\n\t\tfor i, index := range indexes {\n\t\t\tss[i] = aa[i][index]\n\t\t}\n\t\tfmt.Println(strings.Join(ss, *separator))\n\t}\n\treturn 0\n}\n\nfunc main() {\n\te := _main()\n\tos.Exit(e)\n}\n<commit_msg>Upgrade to release initial version<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/ogier\/pflag\"\n)\n\nvar (\n\tname = \"prod\"\n\tversion = \"0.1.0\"\n\n\tflagset = pflag.NewFlagSet(name, pflag.ContinueOnError)\n\tseparator = flagset.StringP(\"separator\", \"s\", \"\\t\", \"\")\n\tisHelp = flagset.BoolP(\"help\", \"h\", false, \"\")\n\tisVersion = flagset.BoolP(\"version\", \"\", false, \"\")\n)\n\nfunc printUsage() {\n\tfmt.Fprintf(os.Stderr, `\nUsage: %s [OPTION]... [FILE]...\nOutput direct product of lines of each files.\n\nOptions:\n -s, --separator=STRING use STRING to separate columns (default: \\t)\n --help display this help text and exit\n --version display version information and exit\n`[1:], name)\n}\n\nfunc printVersion() {\n\tfmt.Fprintf(os.Stderr, \"%s\\n\", version)\n}\n\nfunc printErr(err interface{}) {\n\tfmt.Fprintf(os.Stderr, \"%s: %s\\n\", name, err)\n}\n\nfunc guideToHelp() {\n\tfmt.Fprintf(os.Stderr, \"Try '%s --help' for more information.\\n\", name)\n}\n\nfunc _main() int {\n\tflagset.SetOutput(ioutil.Discard)\n\tif err := flagset.Parse(os.Args[1:]); err != nil {\n\t\tprintErr(err)\n\t\tguideToHelp()\n\t\treturn 2\n\t}\n\tif *isHelp {\n\t\tprintUsage()\n\t\treturn 0\n\t}\n\tif *isVersion {\n\t\tprintVersion()\n\t\treturn 0\n\t}\n\n\tvar rs []io.Reader\n\tif flagset.NArg() == 0 {\n\t\trs = append(rs, os.Stdin)\n\t} else {\n\t\tfor _, arg := range flagset.Args() {\n\t\t\tf, err := os.Open(arg)\n\t\t\tif err != nil {\n\t\t\t\tprintErr(err)\n\t\t\t\treturn 2\n\t\t\t}\n\t\t\tdefer f.Close()\n\t\t\trs = append(rs, f)\n\t\t}\n\t}\n\n\tvar aa [][]string\n\tfor _, r := range rs {\n\t\ta := make([]string, 0, 64)\n\t\tb := bufio.NewScanner(r)\n\t\tfor b.Scan() {\n\t\t\ta = append(a, b.Text())\n\t\t}\n\t\tif err := b.Err(); err != nil {\n\t\t\tprintErr(err)\n\t\t\treturn 1\n\t\t}\n\t\taa = append(aa, a)\n\t}\n\n\tss := make([]string, len(aa))\n\tfor indexes := range Product(aa) {\n\t\tfor i, index := range indexes {\n\t\t\tss[i] = aa[i][index]\n\t\t}\n\t\tfmt.Println(strings.Join(ss, *separator))\n\t}\n\treturn 0\n}\n\nfunc main() {\n\te := _main()\n\tos.Exit(e)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"github.com\/mithrandie\/csvq\/lib\/action\"\n\t\"github.com\/mithrandie\/csvq\/lib\/cmd\"\n\t\"github.com\/mithrandie\/csvq\/lib\/query\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\nvar version = \"v0.5.7\"\n\nfunc main() {\n\tcli.AppHelpTemplate = appHHelpTemplate\n\tcli.CommandHelpTemplate = commandHelpTemplate\n\n\tapp := cli.NewApp()\n\n\tapp.Name = \"csvq\"\n\tapp.Usage = \"SQL like query language for csv\"\n\tapp.ArgsUsage = \"[\\\"query\\\"|\\\"statements\\\"]\"\n\tapp.Version = version\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"delimiter, d\",\n\t\t\tUsage: \"field delimiter. Default is \\\",\\\" for csv files, \\\"\\\\t\\\" for tsv files.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"encoding, e\",\n\t\t\tValue: \"UTF8\",\n\t\t\tUsage: \"file encoding. one of: UTF8|SJIS\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"line-break, l\",\n\t\t\tValue: \"LF\",\n\t\t\tUsage: \"line break. one of: CRLF|LF|CR\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"timezone, z\",\n\t\t\tValue: \"Local\",\n\t\t\tUsage: \"default timezone. \\\"Local\\\", \\\"UTC\\\" or a timezone name(e.g. \\\"America\/Los_Angeles\\\")\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"repository, r\",\n\t\t\tUsage: \"directory path where files are located\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"source, s\",\n\t\t\tUsage: \"load query from `FILE`\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"datetime-format, t\",\n\t\t\tUsage: \"set datetime format to parse strings\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"no-header, n\",\n\t\t\tUsage: \"import the first line as a record\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"without-null, a\",\n\t\t\tUsage: \"parse empty fields as empty strings\",\n\t\t},\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"write\",\n\t\t\tUsage: \"Write output to a file\",\n\t\t\tArgsUsage: \"[\\\"query\\\"|\\\"statements\\\"]\",\n\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"write-encoding, E\",\n\t\t\t\t\tValue: \"UTF8\",\n\t\t\t\t\tUsage: \"file encoding. one of: UTF8|SJIS\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"out, o\",\n\t\t\t\t\tUsage: \"write output to `FILE`\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"format, f\",\n\t\t\t\t\tUsage: \"output format. one of: CSV|TSV|JSON|TEXT\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"write-delimiter, D\",\n\t\t\t\t\tUsage: \"field delimiter for CSV\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"without-header, N\",\n\t\t\t\t\tUsage: \"when the file format is specified as CSV or TSV, write without the header line\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tBefore: func(c *cli.Context) error {\n\t\t\t\treturn setWriteFlags(c)\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\tqueryString, err := readQuery(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcli.ShowCommandHelp(c, \"write\")\n\t\t\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t\t\t}\n\n\t\t\t\treturn exec(queryString)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"fields\",\n\t\t\tUsage: \"Show fields in a file\",\n\t\t\tArgsUsage: \"CSV_FILE_PATH\",\n\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\tif c.NArg() != 1 {\n\t\t\t\t\tcli.ShowCommandHelp(c, \"fields\")\n\t\t\t\t\treturn cli.NewExitError(\"table is not specified\", 1)\n\t\t\t\t}\n\n\t\t\t\ttable := c.Args().First()\n\n\t\t\t\terr := action.ShowFields(table)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"calc\",\n\t\t\tUsage: \"Calculate a value from stdin\",\n\t\t\tArgsUsage: \"\\\"expression\\\"\",\n\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\tif c.NArg() != 1 {\n\t\t\t\t\tcli.ShowCommandHelp(c, \"calc\")\n\t\t\t\t\treturn cli.NewExitError(\"expression is empty\", 1)\n\t\t\t\t}\n\n\t\t\t\texpr := c.Args().First()\n\t\t\t\terr := action.Calc(expr)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\n\tapp.Before = func(c *cli.Context) error {\n\t\treturn setGlobalFlags(c)\n\t}\n\n\tapp.Action = func(c *cli.Context) error {\n\t\tqueryString, err := readQuery(c)\n\t\tif err != nil {\n\t\t\tcli.ShowAppHelp(c)\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\n\t\treturn exec(queryString)\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc exec(queryString string) error {\n\terr := action.Write(queryString, cmd.GetFlags().Source)\n\tif err != nil {\n\t\tcode := 1\n\t\tif apperr, ok := err.(query.AppError); ok {\n\t\t\tcode = apperr.GetCode()\n\t\t}\n\t\treturn cli.NewExitError(err.Error(), code)\n\t}\n\n\treturn nil\n}\n\nfunc readQuery(c *cli.Context) (string, error) {\n\tvar queryString string\n\n\tflags := cmd.GetFlags()\n\tif 0 < len(flags.Source) {\n\t\tfp, err := os.Open(flags.Source)\n\t\tif err != nil {\n\t\t\treturn queryString, err\n\t\t}\n\t\tdefer fp.Close()\n\n\t\tbuf, err := ioutil.ReadAll(fp)\n\t\tif err != nil {\n\t\t\treturn queryString, err\n\t\t}\n\t\tqueryString = string(buf)\n\n\t} else {\n\t\tif c.NArg() != 1 {\n\t\t\treturn queryString, errors.New(\"query is empty\")\n\t\t}\n\t\tqueryString = c.Args().First()\n\t}\n\n\treturn queryString, nil\n}\n\nfunc setGlobalFlags(c *cli.Context) error {\n\tif err := cmd.SetDelimiter(c.GlobalString(\"delimiter\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetEncoding(c.GlobalString(\"encoding\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetLineBreak(c.String(\"line-break\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetLocation(c.String(\"timezone\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetRepository(c.GlobalString(\"repository\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetSource(c.GlobalString(\"source\")); err != nil {\n\t\treturn err\n\t}\n\tcmd.SetDatetimeFormat(c.GlobalString(\"datetime-format\"))\n\tcmd.SetNoHeader(c.GlobalBool(\"no-header\"))\n\tcmd.SetWithoutNull(c.GlobalBool(\"without-null\"))\n\treturn nil\n}\n\nfunc setWriteFlags(c *cli.Context) error {\n\tif err := cmd.SetWriteEncoding(c.String(\"write-encoding\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetOut(c.String(\"out\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetFormat(c.String(\"format\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetWriteDelimiter(c.String(\"write-delimiter\")); err != nil {\n\t\treturn err\n\t}\n\tcmd.SetWithoutHeader(c.Bool(\"without-header\"))\n\treturn nil\n}\n<commit_msg>Update version for Release v0.5.8<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"github.com\/mithrandie\/csvq\/lib\/action\"\n\t\"github.com\/mithrandie\/csvq\/lib\/cmd\"\n\t\"github.com\/mithrandie\/csvq\/lib\/query\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\nvar version = \"v0.5.8\"\n\nfunc main() {\n\tcli.AppHelpTemplate = appHHelpTemplate\n\tcli.CommandHelpTemplate = commandHelpTemplate\n\n\tapp := cli.NewApp()\n\n\tapp.Name = \"csvq\"\n\tapp.Usage = \"SQL like query language for csv\"\n\tapp.ArgsUsage = \"[\\\"query\\\"|\\\"statements\\\"]\"\n\tapp.Version = version\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"delimiter, d\",\n\t\t\tUsage: \"field delimiter. Default is \\\",\\\" for csv files, \\\"\\\\t\\\" for tsv files.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"encoding, e\",\n\t\t\tValue: \"UTF8\",\n\t\t\tUsage: \"file encoding. one of: UTF8|SJIS\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"line-break, l\",\n\t\t\tValue: \"LF\",\n\t\t\tUsage: \"line break. one of: CRLF|LF|CR\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"timezone, z\",\n\t\t\tValue: \"Local\",\n\t\t\tUsage: \"default timezone. \\\"Local\\\", \\\"UTC\\\" or a timezone name(e.g. \\\"America\/Los_Angeles\\\")\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"repository, r\",\n\t\t\tUsage: \"directory path where files are located\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"source, s\",\n\t\t\tUsage: \"load query from `FILE`\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"datetime-format, t\",\n\t\t\tUsage: \"set datetime format to parse strings\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"no-header, n\",\n\t\t\tUsage: \"import the first line as a record\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"without-null, a\",\n\t\t\tUsage: \"parse empty fields as empty strings\",\n\t\t},\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"write\",\n\t\t\tUsage: \"Write output to a file\",\n\t\t\tArgsUsage: \"[\\\"query\\\"|\\\"statements\\\"]\",\n\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"write-encoding, E\",\n\t\t\t\t\tValue: \"UTF8\",\n\t\t\t\t\tUsage: \"file encoding. one of: UTF8|SJIS\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"out, o\",\n\t\t\t\t\tUsage: \"write output to `FILE`\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"format, f\",\n\t\t\t\t\tUsage: \"output format. one of: CSV|TSV|JSON|TEXT\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"write-delimiter, D\",\n\t\t\t\t\tUsage: \"field delimiter for CSV\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"without-header, N\",\n\t\t\t\t\tUsage: \"when the file format is specified as CSV or TSV, write without the header line\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tBefore: func(c *cli.Context) error {\n\t\t\t\treturn setWriteFlags(c)\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\tqueryString, err := readQuery(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcli.ShowCommandHelp(c, \"write\")\n\t\t\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t\t\t}\n\n\t\t\t\treturn exec(queryString)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"fields\",\n\t\t\tUsage: \"Show fields in a file\",\n\t\t\tArgsUsage: \"CSV_FILE_PATH\",\n\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\tif c.NArg() != 1 {\n\t\t\t\t\tcli.ShowCommandHelp(c, \"fields\")\n\t\t\t\t\treturn cli.NewExitError(\"table is not specified\", 1)\n\t\t\t\t}\n\n\t\t\t\ttable := c.Args().First()\n\n\t\t\t\terr := action.ShowFields(table)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"calc\",\n\t\t\tUsage: \"Calculate a value from stdin\",\n\t\t\tArgsUsage: \"\\\"expression\\\"\",\n\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\tif c.NArg() != 1 {\n\t\t\t\t\tcli.ShowCommandHelp(c, \"calc\")\n\t\t\t\t\treturn cli.NewExitError(\"expression is empty\", 1)\n\t\t\t\t}\n\n\t\t\t\texpr := c.Args().First()\n\t\t\t\terr := action.Calc(expr)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\n\tapp.Before = func(c *cli.Context) error {\n\t\treturn setGlobalFlags(c)\n\t}\n\n\tapp.Action = func(c *cli.Context) error {\n\t\tqueryString, err := readQuery(c)\n\t\tif err != nil {\n\t\t\tcli.ShowAppHelp(c)\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\n\t\treturn exec(queryString)\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc exec(queryString string) error {\n\terr := action.Write(queryString, cmd.GetFlags().Source)\n\tif err != nil {\n\t\tcode := 1\n\t\tif apperr, ok := err.(query.AppError); ok {\n\t\t\tcode = apperr.GetCode()\n\t\t}\n\t\treturn cli.NewExitError(err.Error(), code)\n\t}\n\n\treturn nil\n}\n\nfunc readQuery(c *cli.Context) (string, error) {\n\tvar queryString string\n\n\tflags := cmd.GetFlags()\n\tif 0 < len(flags.Source) {\n\t\tfp, err := os.Open(flags.Source)\n\t\tif err != nil {\n\t\t\treturn queryString, err\n\t\t}\n\t\tdefer fp.Close()\n\n\t\tbuf, err := ioutil.ReadAll(fp)\n\t\tif err != nil {\n\t\t\treturn queryString, err\n\t\t}\n\t\tqueryString = string(buf)\n\n\t} else {\n\t\tif c.NArg() != 1 {\n\t\t\treturn queryString, errors.New(\"query is empty\")\n\t\t}\n\t\tqueryString = c.Args().First()\n\t}\n\n\treturn queryString, nil\n}\n\nfunc setGlobalFlags(c *cli.Context) error {\n\tif err := cmd.SetDelimiter(c.GlobalString(\"delimiter\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetEncoding(c.GlobalString(\"encoding\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetLineBreak(c.String(\"line-break\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetLocation(c.String(\"timezone\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetRepository(c.GlobalString(\"repository\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetSource(c.GlobalString(\"source\")); err != nil {\n\t\treturn err\n\t}\n\tcmd.SetDatetimeFormat(c.GlobalString(\"datetime-format\"))\n\tcmd.SetNoHeader(c.GlobalBool(\"no-header\"))\n\tcmd.SetWithoutNull(c.GlobalBool(\"without-null\"))\n\treturn nil\n}\n\nfunc setWriteFlags(c *cli.Context) error {\n\tif err := cmd.SetWriteEncoding(c.String(\"write-encoding\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetOut(c.String(\"out\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetFormat(c.String(\"format\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetWriteDelimiter(c.String(\"write-delimiter\")); err != nil {\n\t\treturn err\n\t}\n\tcmd.SetWithoutHeader(c.Bool(\"without-header\"))\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ 2017-02-04 adbr\n\n\/\/ Program pogoda wyświetla dane pogodowe dla podanego miasta.\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"text\/template\"\n\t\"time\"\n)\n\nconst (\n\tserviceURL = \"http:\/\/api.openweathermap.org\/data\/2.5\/weather\"\n\tserviceApiKey = \"93ca2c840c952abe90064d9e251347f1\"\n)\n\nfunc main() {\n\tlog.SetFlags(0)\n\tlog.SetPrefix(\"pogoda: \")\n\n\th := flag.Bool(\"h\", false, \"Wyświetla help\")\n\n\tflag.Usage = usage\n\tflag.Parse()\n\n\tif *h {\n\t\tfmt.Print(helpStr)\n\t\tos.Exit(0)\n\t}\n\n\tif flag.NArg() < 1 {\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n\tcity := flag.Arg(0)\n\n\tweather, err := getWeather(city)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr = printWeather(os.Stdout, weather)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\n\/\/ usage drukuje na stderr sposób użycia programu.\nfunc usage() {\n\tconst s = \"Sposób użycia: pogoda [-h] miasto\"\n\tfmt.Fprintf(os.Stderr, \"%s\\n\", s)\n}\n\n\/\/ getWeather zwraca dane pogodowe dla miasta city. Dane są pobierane\n\/\/ z serwisu openweathermap.org.\nfunc getWeather(city string) (*WeatherResult, error) {\n\tquery := url.Values{\n\t\t\"appid\": {serviceApiKey},\n\t\t\"units\": {\"metric\"},\n\t\t\"q\": {city},\n\t}\n\tresp, err := http.Get(serviceURL + \"?\" + query.Encode())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\t\/\/ Wczytanie zwróconych danych w formacie JSON.\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Dekodowanie JSONa.\n\tresult := new(WeatherResult)\n\terr = json.Unmarshal(data, result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tl := \"15:04:05 MST\" \/\/ format czasu\n\tresult.Sys.SunriseTime = time.Unix(result.Sys.SunriseUnix, 0).Format(l)\n\tresult.Sys.SunsetTime = time.Unix(result.Sys.SunsetUnix, 0).Format(l)\n\n\treturn result, nil\n}\n\n\/\/ printWeather drukuje do out dane pogodowe sformatowane przy użyciu\n\/\/ template'u.\nfunc printWeather(out io.Writer, weather *WeatherResult) error {\n\terr := templ.Execute(out, weather)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Typ WeatherResult representuje dane pogodowe.\ntype WeatherResult struct {\n\tCoord struct {\n\t\tLat float64 \/\/ city geo location, latitude\n\t\tLon float64 \/\/ city geo location, longitude\n\t}\n\tWeather []struct {\n\t\tId int\n\t\tMain string\n\t\tDescription string\n\t\tIcon string\n\t}\n\tBase string\n\tMain struct {\n\t\tTemp float64\n\t\tPressure float64\n\t\tHumidity float64\n\t\tTempMin float64 `json:\"temp_min\"`\n\t\tTempMax float64 `json:\"temp_max\"`\n\t}\n\tVisibility int\n\tWind struct {\n\t\tSpeed float64\n\t\tDeg float64\n\t}\n\tClouds struct {\n\t\tAll float64\n\t}\n\tDt int64\n\tSys struct {\n\t\tType int\n\t\tId int\n\t\tMessage float64\n\t\tCountry string\n\t\tSunriseUnix int64 `json:\"sunrise\"`\n\t\tSunsetUnix int64 `json:\"sunset\"`\n\t\tSunriseTime string \/\/ sformatowany czas z SunriseUnix\n\t\tSunsetTime string \/\/ sformatowany czas z SunsetUnix\n\t}\n\tId int \/\/ city id\n\tName string \/\/ city name\n\tCod int\n}\n\n\/\/ templStr jest templatem dla wyświetlania danych pogodowych typu\n\/\/ WeatherResult.\nconst templStr = `Miasto:\t {{.Name}}, {{.Sys.Country}} [{{.Coord.Lat}}, {{.Coord.Lon}}]\nTemperatura: {{.Main.Temp}} °C (min: {{.Main.TempMin}}, max: {{.Main.TempMax}})\n{{range .Weather -}}\nPogoda: {{.Main}} ({{.Description}})\n{{end -}}\nCiśnienie: {{.Main.Pressure}} hpa\nWilgotność: {{.Main.Humidity}} %\nWiatr: {{.Wind.Speed}} m\/s ({{.Wind.Deg}}°)\nZachmurzenie: {{.Clouds.All}} %\nWschód słońca: {{.Sys.SunriseTime}}\nZachód słońca: {{.Sys.SunsetTime}}\n(Dane pochodzą z serwisu OpenWeatherMap.com)\n`\n\nvar templ = template.Must(template.New(\"weather\").Parse(templStr))\n\nconst helpStr = `Program pogoda wyświetla dane pogodowe dla podanego miasta.\n\nSposób użycia:\n\tpogoda [-h] miasto\n\n\t-h Wyświetla help.\n\nDla podanego miasta program pobiera aktualne dane pogodowe z serwisu\nhttp:\/\/api.openweathermap.org i wyświetla je na standardowe wyjście.\n\nPrzykład: pogoda dla Warszawy:\n\n\t$ pogoda Warszawa\n\tMiasto: Warszawa, PL [52.24, 21.04]\n\tTemperatura: 21 °C (min: 21, max: 21)\n\tPogoda: Clear (clear sky)\n\tCiśnienie: 1023 hpa\n\tWilgotność: 40 %\n\tWiatr: 2.6 m\/s (0°)\n\tZachmurzenie: 0 %\n\tWschód słońca: 04:24:40 CEST\n\tZachód słońca: 20:42:16 CEST\n\t(Dane pochodzą z serwisu OpenWeatherMap.com)\n`\n<commit_msg>Poprawa wyświetlania informacji<commit_after>\/\/ 2017-02-04 adbr\n\n\/\/ Program pogoda wyświetla dane pogodowe dla podanego miasta.\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\nconst (\n\tserviceURL = \"http:\/\/api.openweathermap.org\/data\/2.5\/weather\"\n\tserviceApiKey = \"93ca2c840c952abe90064d9e251347f1\"\n)\n\nfunc main() {\n\tlog.SetFlags(0)\n\tlog.SetPrefix(\"pogoda: \")\n\n\th := flag.Bool(\"h\", false, \"Wyświetla help\")\n\n\tflag.Usage = usage\n\tflag.Parse()\n\n\tif *h {\n\t\tfmt.Print(helpStr)\n\t\tos.Exit(0)\n\t}\n\n\tif flag.NArg() < 1 {\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n\tcity := flag.Arg(0)\n\n\tweather, err := getWeather(city)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr = printWeather(os.Stdout, weather)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\n\/\/ usage drukuje na stderr sposób użycia programu.\nfunc usage() {\n\tconst s = \"Sposób użycia: pogoda [-h] miasto\"\n\tfmt.Fprintf(os.Stderr, \"%s\\n\", s)\n}\n\n\/\/ getWeather zwraca dane pogodowe dla miasta city. Dane są pobierane\n\/\/ z serwisu openweathermap.org.\nfunc getWeather(city string) (*WeatherResult, error) {\n\tquery := url.Values{\n\t\t\"appid\": {serviceApiKey},\n\t\t\"units\": {\"metric\"},\n\t\t\"q\": {city},\n\t}\n\tresp, err := http.Get(serviceURL + \"?\" + query.Encode())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\t\/\/ Wczytanie zwróconych danych w formacie JSON.\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Dekodowanie JSONa.\n\tresult := new(WeatherResult)\n\terr = json.Unmarshal(data, result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Formatowanie danych lokalnych dla ułatwienia wyświetlania\n\t\/\/ niektórych pól.\n\tl := \"15:04:05 MST\" \/\/ format czasu\n\tresult.Local.SunriseTime = time.Unix(result.Sys.SunriseUnix, 0).Format(l)\n\tresult.Local.SunsetTime = time.Unix(result.Sys.SunsetUnix, 0).Format(l)\n\tresult.Local.Weather = weatherDescription(result.Weather)\n\n\treturn result, nil\n}\n\n\/\/ printWeather drukuje do out dane pogodowe sformatowane przy użyciu\n\/\/ template'u.\nfunc printWeather(out io.Writer, weather *WeatherResult) error {\n\terr := templ.Execute(out, weather)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Typ WeatherResult representuje dane pogodowe.\ntype WeatherResult struct {\n\tCoord struct {\n\t\tLat float64 \/\/ city geo location, latitude\n\t\tLon float64 \/\/ city geo location, longitude\n\t}\n\tWeather Weathers\n\tBase string\n\tMain struct {\n\t\tTemp float64\n\t\tPressure float64\n\t\tHumidity float64\n\t\tTempMin float64 `json:\"temp_min\"`\n\t\tTempMax float64 `json:\"temp_max\"`\n\t}\n\tVisibility int\n\tWind struct {\n\t\tSpeed float64\n\t\tDeg float64\n\t}\n\tClouds struct {\n\t\tAll float64\n\t}\n\tDt int64\n\tSys struct {\n\t\tType int\n\t\tId int\n\t\tMessage float64\n\t\tCountry string\n\t\tSunriseUnix int64 `json:\"sunrise\"`\n\t\tSunsetUnix int64 `json:\"sunset\"`\n\t}\n\tId int \/\/ city id\n\tName string \/\/ city name\n\tCod int\n\n\t\/\/ Pole Local zawiera dane dodane lokalnie dla ułatwienia\n\t\/\/ wyświetlania informacji.\n\tLocal struct {\n\t\tSunriseTime string \/\/ sformatowany czas z SunriseUnix\n\t\tSunsetTime string \/\/ sformatowany czas z SunsetUnix\n\t\tWeather string \/\/ sformatowany opis pogody z pola Weather\n\t}\n}\n\n\/\/ Weathers jest typem pola WeatherResult.Weather. Zawiera słowne\n\/\/ opisy pogody.\ntype Weathers []struct {\n\tId int\n\tMain string\n\tDescription string\n\tIcon string\n}\n\n\/\/ weatherDescription zwraca sformatowany opis pogody.\nfunc weatherDescription(w Weathers) string {\n\tvar a []string\n\tfor _, d := range w {\n\t\ts := fmt.Sprintf(\"%s (%s)\", d.Main, d.Description)\n\t\ta = append(a, s)\n\t}\n\treturn strings.Join(a, \", \")\n}\n\n\/\/ templStr jest templatem dla wyświetlania danych pogodowych typu\n\/\/ WeatherResult.\nconst templStr = `Miasto:\t {{.Name}}, {{.Sys.Country}} [{{.Coord.Lat}}, {{.Coord.Lon}}]\nTemperatura: {{.Main.Temp}} °C\n{{- if ne .Main.TempMin .Main.TempMax}} (min: {{.Main.TempMin}}, max: {{.Main.TempMax}})\n{{- end}}\nPogoda: {{.Local.Weather}}\nCiśnienie: {{.Main.Pressure}} hpa\nWilgotność: {{.Main.Humidity}} %\nWiatr: {{.Wind.Speed}} m\/s ({{.Wind.Deg}}°)\nZachmurzenie: {{.Clouds.All}} %\nWschód słońca: {{.Local.SunriseTime}}\nZachód słońca: {{.Local.SunsetTime}}\n(Dane pochodzą z serwisu OpenWeatherMap.org)\n`\n\nvar templ = template.Must(template.New(\"weather\").Parse(templStr))\n\nconst helpStr = `Program pogoda wyświetla dane pogodowe dla podanego miasta.\n\nSposób użycia:\n\tpogoda [-h] miasto\n\n\t-h Wyświetla help.\n\nDla podanego miasta program pobiera aktualne dane pogodowe z serwisu\nhttp:\/\/api.openweathermap.org i wyświetla je na standardowe wyjście.\n\nPrzykład: pogoda dla Warszawy:\n\n\t$ pogoda Warszawa\n\tMiasto: Warszawa, PL [52.24, 21.04]\n\tTemperatura: 21 °C (min: 21, max: 21)\n\tPogoda: Clear (clear sky)\n\tCiśnienie: 1023 hpa\n\tWilgotność: 40 %\n\tWiatr: 2.6 m\/s (0°)\n\tZachmurzenie: 0 %\n\tWschód słońca: 04:24:40 CEST\n\tZachód słońca: 20:42:16 CEST\n\t(Dane pochodzą z serwisu OpenWeatherMap.com)\n`\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tgit2go \"gopkg.in\/libgit2\/git2go.v26\"\n\n\t\"github.com\/josledp\/termcolor\"\n)\n\nconst (\n\ts_DownArrow = \"↓\"\n\ts_UpArrow = \"↑\"\n\ts_ThreeDots = \"…\"\n\ts_Dot = \"●\"\n\ts_Check = \"✔\"\n\ts_Flag = \"⚑\"\n)\n\nvar logger *log.Logger\n\nfunc getPythonVirtualEnv() string {\n\tvirtualEnv, ve := os.LookupEnv(\"VIRTUAL_ENV\")\n\tif ve {\n\t\tave := strings.Split(virtualEnv, \"\/\")\n\t\tvirtualEnv = fmt.Sprintf(\"(%s) \", ave[len(ave)-1])\n\t}\n\treturn virtualEnv\n}\n\nfunc getAwsInfo() string {\n\trole := os.Getenv(\"AWS_ROLE\")\n\tif role != \"\" {\n\t\ttmp := strings.Split(role, \":\")\n\t\trole = tmp[0]\n\t\ttmp = strings.Split(tmp[1], \"-\")\n\t\trole += \":\" + tmp[2]\n\t}\n\treturn role\n}\n\nfunc getGitInfo() gitInfo {\n\tgi := gitInfo{}\n\n\tgitpath, err := git2go.Discover(\".\", false, []string{\"\/\"})\n\tif err == nil {\n\t\trepository, err := git2go.OpenRepository(gitpath)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error opening repository at %s: %v\", gitpath, err)\n\t\t}\n\t\tdefer repository.Free()\n\n\t\t\/\/Get current tracked & untracked files status\n\t\tstatusOpts := git2go.StatusOptions{\n\t\t\tFlags: git2go.StatusOptIncludeUntracked | git2go.StatusOptRenamesHeadToIndex,\n\t\t}\n\t\trepostate, err := repository.StatusList(&statusOpts)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error getting repository status at %s: %v\", gitpath, err)\n\t\t}\n\t\tdefer repostate.Free()\n\t\tn, err := repostate.EntryCount()\n\t\tfor i := 0; i < n; i++ {\n\t\t\tentry, _ := repostate.ByIndex(i)\n\t\t\tgot := false\n\t\t\tif entry.Status&git2go.StatusCurrent > 0 {\n\t\t\t\tlogger.Println(\"StatusCurrent\")\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexNew > 0 {\n\t\t\t\tlogger.Println(\"StatusIndexNew\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexModified > 0 {\n\t\t\t\tlogger.Println(\"StatusIndexModified\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexDeleted > 0 {\n\t\t\t\tlogger.Println(\"StatusIndexDeleted\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexRenamed > 0 {\n\t\t\t\tlogger.Println(\"StatusIndexRenamed\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexTypeChange > 0 {\n\t\t\t\tlogger.Println(\"StatusIndexTypeChange\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtNew > 0 {\n\t\t\t\tlogger.Println(\"StatusWtNew\")\n\t\t\t\tgi.untracked++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtModified > 0 {\n\t\t\t\tlogger.Println(\"StatusWtModified\")\n\t\t\t\tgi.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtDeleted > 0 {\n\t\t\t\tlogger.Println(\"StatusWtDeleted\")\n\t\t\t\tgi.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtTypeChange > 0 {\n\t\t\t\tlogger.Println(\"StatusWtTypeChange\")\n\t\t\t\tgi.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtRenamed > 0 {\n\t\t\t\tlogger.Println(\"StatusWtRenamed\")\n\t\t\t\tgi.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIgnored > 0 {\n\t\t\t\tlogger.Println(\"StatusIgnored\")\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusConflicted > 0 {\n\t\t\t\tlogger.Println(\"StatusConflicted\")\n\t\t\t\tgi.conflict = true\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif !got {\n\t\t\t\tlogger.Println(\"Unknown: \", entry.Status)\n\t\t\t}\n\t\t}\n\t\t\/\/Get current branch name\n\t\tlocalRef, err := repository.Head()\n\t\tif err != nil {\n\t\t\t\/\/Probably there are no commits yet. How to know the current branch??\n\t\t\tgi.branch = \"No_Commits\"\n\t\t\treturn gi\n\t\t\t\/\/log.Fatalln(\"error getting head: \", err)\n\t\t}\n\t\tdefer localRef.Free()\n\n\t\tref := strings.Split(localRef.Name(), \"\/\")\n\t\tgi.branch = ref[len(ref)-1]\n\t\t\/\/Get commits Ahead\/Behind\n\n\t\tlocalBranch := localRef.Branch()\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Error getting local branch: \", err)\n\t\t}\n\n\t\tremoteRef, err := localBranch.Upstream()\n\t\tif err == nil {\n\t\t\tgi.upstream = true\n\t\t\tdefer remoteRef.Free()\n\n\t\t\tif !remoteRef.Target().Equal(localRef.Target()) {\n\t\t\t\tlogger.Println(\"Local & remore differ:\", remoteRef.Target().String(), localRef.Target().String())\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalln(\"Error getting merge bases\")\n\t\t\t\t}\n\t\t\t\tgi.commitsAhead, gi.commitsBehind, err = repository.AheadBehind(localRef.Target(), remoteRef.Target())\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalln(\"Error getting commits ahead\/behind\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\/\/ stash\n\t\trepository.Stashes.Foreach(func(i int, m string, o *git2go.Oid) error {\n\t\t\tgi.stashed = i + 1\n\t\t\treturn nil\n\t\t})\n\t\tlogger.Println(\"Stashes: \", gi.stashed)\n\t}\n\treturn gi\n}\n\ntype gitInfo struct {\n\tconflict bool\n\tchanged int\n\tstaged int\n\tuntracked int\n\tcommitsAhead int\n\tcommitsBehind int\n\tstashed int\n\tbranch string\n\tupstream bool\n}\n\ntype termInfo struct {\n\tlastrc string\n\tpwd string\n\tuser string\n\thostname string\n\tvirtualEnv string\n\tawsRole string\n\tawsExpire time.Time\n\tgi gitInfo\n}\n\nfunc getTermInfo() termInfo {\n\tvar err error\n\n\tti := termInfo{}\n\t\/\/Get basicinfo\n\tti.pwd = os.Getenv(\"PWD\")\n\thome := os.Getenv(\"HOME\")\n\tif home != \"\" {\n\t\tti.pwd = strings.Replace(ti.pwd, home, \"~\", -1)\n\t}\n\tti.user = os.Getenv(\"USER\")\n\tti.hostname, err = os.Hostname()\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to get hostname\", err)\n\t}\n\tti.lastrc = os.Getenv(\"LAST_COMMAND_RC\")\n\n\t\/\/Get Python VirtualEnv info\n\tti.virtualEnv = getPythonVirtualEnv()\n\n\t\/\/AWS\n\tti.awsRole = getAwsInfo()\n\tiExpire, _ := strconv.ParseInt(os.Getenv(\"AWS_SESSION_EXPIRE\"), 10, 0)\n\tti.awsExpire = time.Unix(iExpire, int64(0))\n\n\t\/\/Get git information\n\tti.gi = getGitInfo()\n\treturn ti\n\n}\nfunc main() {\n\tvar debug bool\n\tvar style string\n\n\tflag.BoolVar(&debug, \"debug\", false, \"enable debug messages\")\n\tflag.StringVar(&style, \"style\", \"Evermeet\", \"Select style: Evermeet, Mac, Fedora\")\n\tflag.Parse()\n\tswitch style {\n\tcase \"Evermeet\":\n\tcase \"Mac\":\n\tcase \"Fedora\":\n\tdefault:\n\t\tfmt.Fprintln(os.Stderr, \"Invalid style. Valid styles: Evermmet, Mac, Fedora\")\n\t}\n\tlogger = log.New(os.Stderr, \"\", log.LstdFlags)\n\n\tif !debug {\n\t\tlogger.SetOutput(ioutil.Discard)\n\t}\n\tti := getTermInfo()\n\tfmt.Println(makePrompt(style, ti))\n}\n\nfunc makePrompt(style string, ti termInfo) string {\n\tswitch style {\n\tcase \"Evermeet\":\n\t\treturn makePromptEvermeet(ti)\n\tcase \"Mac\":\n\t\treturn makePromptMac(ti)\n\tcase \"Fedora\":\n\t\treturn makePromptFedora(ti)\n\t}\n\treturn \"Not suppported\"\n}\nfunc makePromptMac(ti termInfo) string {\n\treturn \"Not implemented\"\n}\n\nfunc makePromptFedora(ti termInfo) string {\n\treturn \"Not implemented\"\n}\n\nfunc makePromptEvermeet(ti termInfo) string {\n\t\/\/Formatting\n\tvar userInfo, lastCommandInfo, pwdInfo, virtualEnvInfo, awsInfo, gitInfo string\n\n\tpromptEnd := \"$\"\n\n\tif ti.user == \"root\" {\n\t\tuserInfo = termcolor.EscapedFormat(ti.hostname, termcolor.Bold, termcolor.FgRed)\n\t\tpromptEnd = \"#\"\n\t} else {\n\t\tuserInfo = termcolor.EscapedFormat(ti.hostname, termcolor.Bold, termcolor.FgGreen)\n\t}\n\tif ti.lastrc != \"\" {\n\t\tlastCommandInfo = termcolor.EscapedFormat(ti.lastrc, termcolor.FgHiYellow) + \" \"\n\t}\n\n\tpwdInfo = termcolor.EscapedFormat(ti.pwd, termcolor.Bold, termcolor.FgBlue)\n\tif ti.virtualEnv != \"\" {\n\t\tvirtualEnvInfo = termcolor.EscapedFormat(ti.virtualEnv, termcolor.FgBlue)\n\t}\n\tif ti.gi.branch != \"\" {\n\t\tgitInfo = \" \" + termcolor.EscapedFormat(ti.gi.branch, termcolor.FgMagenta)\n\t\tspace := \" \"\n\t\tif ti.gi.commitsBehind > 0 {\n\t\t\tgitInfo += space + s_DownArrow + \"·\" + strconv.Itoa(ti.gi.commitsBehind)\n\t\t\tspace = \"\"\n\t\t}\n\t\tif ti.gi.commitsAhead > 0 {\n\t\t\tgitInfo += space + s_UpArrow + \"·\" + strconv.Itoa(ti.gi.commitsAhead)\n\t\t\tspace = \"\"\n\t\t}\n\t\tif !ti.gi.upstream {\n\t\t\tgitInfo += space + \"*\"\n\t\t\tspace = \"\"\n\t\t}\n\t\tgitInfo += \"|\"\n\t\tsynced := true\n\t\tif ti.gi.staged > 0 {\n\t\t\tgitInfo += termcolor.EscapedFormat(s_Dot+strconv.Itoa(ti.gi.staged), termcolor.FgCyan)\n\t\t\tsynced = false\n\t\t}\n\t\tif ti.gi.changed > 0 {\n\t\t\tgitInfo += termcolor.EscapedFormat(\"+\"+strconv.Itoa(ti.gi.changed), termcolor.FgCyan)\n\t\t\tsynced = false\n\t\t}\n\t\tif ti.gi.untracked > 0 {\n\t\t\tgitInfo += termcolor.EscapedFormat(s_ThreeDots+strconv.Itoa(ti.gi.untracked), termcolor.FgCyan)\n\t\t\tsynced = false\n\t\t}\n\t\tif ti.gi.stashed > 0 {\n\t\t\tgitInfo += termcolor.EscapedFormat(s_Flag+strconv.Itoa(ti.gi.stashed), termcolor.FgHiMagenta)\n\t\t}\n\t\tif synced {\n\t\t\tgitInfo += termcolor.EscapedFormat(s_Check, termcolor.FgHiGreen)\n\t\t}\n\t}\n\tif ti.awsRole != \"\" {\n\t\tt := termcolor.FgGreen\n\t\td := time.Until(ti.awsExpire).Seconds()\n\t\tif d < 0 {\n\t\t\tt = termcolor.FgRed\n\t\t} else if d < 600 {\n\t\t\tt = termcolor.FgYellow\n\t\t}\n\t\tawsInfo = termcolor.EscapedFormat(ti.awsRole, t) + \"|\"\n\t}\n\n\treturn fmt.Sprintf(\"%s[%s%s %s%s%s]%s \", virtualEnvInfo, awsInfo, userInfo, lastCommandInfo, pwdInfo, gitInfo, promptEnd)\n}\n<commit_msg>reorganize to facilitate tests<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tgit2go \"gopkg.in\/libgit2\/git2go.v26\"\n\n\t\"github.com\/josledp\/termcolor\"\n)\n\nconst (\n\ts_DownArrow = \"↓\"\n\ts_UpArrow = \"↑\"\n\ts_ThreeDots = \"…\"\n\ts_Dot = \"●\"\n\ts_Check = \"✔\"\n\ts_Flag = \"⚑\"\n)\n\nvar logger *log.Logger\n\ntype gitInfo struct {\n\tconflict bool\n\tchanged int\n\tstaged int\n\tuntracked int\n\tcommitsAhead int\n\tcommitsBehind int\n\tstashed int\n\tbranch string\n\tupstream bool\n}\n\ntype awsInfo struct {\n\trole string\n\texpire time.Time\n}\n\ntype termInfo struct {\n\tlastrc string\n\tpwd string\n\tuser string\n\thostname string\n\tvirtualEnv string\n\taws awsInfo\n\tgit gitInfo\n}\n\nfunc getPythonVirtualEnv() string {\n\tvirtualEnv, ve := os.LookupEnv(\"VIRTUAL_ENV\")\n\tif ve {\n\t\tave := strings.Split(virtualEnv, \"\/\")\n\t\tvirtualEnv = fmt.Sprintf(\"(%s) \", ave[len(ave)-1])\n\t}\n\treturn virtualEnv\n}\n\nfunc getAwsInfo() awsInfo {\n\tai := awsInfo{}\n\trole := os.Getenv(\"AWS_ROLE\")\n\tif role != \"\" {\n\t\ttmp := strings.Split(role, \":\")\n\t\trole = tmp[0]\n\t\ttmp = strings.Split(tmp[1], \"-\")\n\t\trole += \":\" + tmp[2]\n\t}\n\tai.role = role\n\tiExpire, _ := strconv.ParseInt(os.Getenv(\"AWS_SESSION_EXPIRE\"), 10, 0)\n\tai.expire = time.Unix(iExpire, int64(0))\n\n\treturn ai\n}\n\nfunc getGitInfo() gitInfo {\n\tgi := gitInfo{}\n\n\tgitpath, err := git2go.Discover(\".\", false, []string{\"\/\"})\n\tif err == nil {\n\t\trepository, err := git2go.OpenRepository(gitpath)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error opening repository at %s: %v\", gitpath, err)\n\t\t}\n\t\tdefer repository.Free()\n\n\t\t\/\/Get current tracked & untracked files status\n\t\tstatusOpts := git2go.StatusOptions{\n\t\t\tFlags: git2go.StatusOptIncludeUntracked | git2go.StatusOptRenamesHeadToIndex,\n\t\t}\n\t\trepostate, err := repository.StatusList(&statusOpts)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error getting repository status at %s: %v\", gitpath, err)\n\t\t}\n\t\tdefer repostate.Free()\n\t\tn, err := repostate.EntryCount()\n\t\tfor i := 0; i < n; i++ {\n\t\t\tentry, _ := repostate.ByIndex(i)\n\t\t\tgot := false\n\t\t\tif entry.Status&git2go.StatusCurrent > 0 {\n\t\t\t\tlogger.Println(\"StatusCurrent\")\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexNew > 0 {\n\t\t\t\tlogger.Println(\"StatusIndexNew\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexModified > 0 {\n\t\t\t\tlogger.Println(\"StatusIndexModified\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexDeleted > 0 {\n\t\t\t\tlogger.Println(\"StatusIndexDeleted\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexRenamed > 0 {\n\t\t\t\tlogger.Println(\"StatusIndexRenamed\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexTypeChange > 0 {\n\t\t\t\tlogger.Println(\"StatusIndexTypeChange\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtNew > 0 {\n\t\t\t\tlogger.Println(\"StatusWtNew\")\n\t\t\t\tgi.untracked++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtModified > 0 {\n\t\t\t\tlogger.Println(\"StatusWtModified\")\n\t\t\t\tgi.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtDeleted > 0 {\n\t\t\t\tlogger.Println(\"StatusWtDeleted\")\n\t\t\t\tgi.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtTypeChange > 0 {\n\t\t\t\tlogger.Println(\"StatusWtTypeChange\")\n\t\t\t\tgi.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtRenamed > 0 {\n\t\t\t\tlogger.Println(\"StatusWtRenamed\")\n\t\t\t\tgi.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIgnored > 0 {\n\t\t\t\tlogger.Println(\"StatusIgnored\")\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusConflicted > 0 {\n\t\t\t\tlogger.Println(\"StatusConflicted\")\n\t\t\t\tgi.conflict = true\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif !got {\n\t\t\t\tlogger.Println(\"Unknown: \", entry.Status)\n\t\t\t}\n\t\t}\n\t\t\/\/Get current branch name\n\t\tlocalRef, err := repository.Head()\n\t\tif err != nil {\n\t\t\t\/\/Probably there are no commits yet. How to know the current branch??\n\t\t\tgi.branch = \"No_Commits\"\n\t\t\treturn gi\n\t\t\t\/\/log.Fatalln(\"error getting head: \", err)\n\t\t}\n\t\tdefer localRef.Free()\n\n\t\tref := strings.Split(localRef.Name(), \"\/\")\n\t\tgi.branch = ref[len(ref)-1]\n\t\t\/\/Get commits Ahead\/Behind\n\n\t\tlocalBranch := localRef.Branch()\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Error getting local branch: \", err)\n\t\t}\n\n\t\tremoteRef, err := localBranch.Upstream()\n\t\tif err == nil {\n\t\t\tgi.upstream = true\n\t\t\tdefer remoteRef.Free()\n\n\t\t\tif !remoteRef.Target().Equal(localRef.Target()) {\n\t\t\t\tlogger.Println(\"Local & remore differ:\", remoteRef.Target().String(), localRef.Target().String())\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalln(\"Error getting merge bases\")\n\t\t\t\t}\n\t\t\t\tgi.commitsAhead, gi.commitsBehind, err = repository.AheadBehind(localRef.Target(), remoteRef.Target())\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalln(\"Error getting commits ahead\/behind\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\/\/ stash\n\t\trepository.Stashes.Foreach(func(i int, m string, o *git2go.Oid) error {\n\t\t\tgi.stashed = i + 1\n\t\t\treturn nil\n\t\t})\n\t\tlogger.Println(\"Stashes: \", gi.stashed)\n\t}\n\treturn gi\n}\n\nfunc getTermInfo() termInfo {\n\tvar err error\n\n\tti := termInfo{}\n\t\/\/Get basicinfo\n\tti.pwd = os.Getenv(\"PWD\")\n\thome := os.Getenv(\"HOME\")\n\tif home != \"\" {\n\t\tti.pwd = strings.Replace(ti.pwd, home, \"~\", -1)\n\t}\n\tti.user = os.Getenv(\"USER\")\n\tti.hostname, err = os.Hostname()\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to get hostname\", err)\n\t}\n\tti.lastrc = os.Getenv(\"LAST_COMMAND_RC\")\n\n\treturn ti\n\n}\nfunc main() {\n\tvar debug bool\n\tvar style string\n\n\tflag.BoolVar(&debug, \"debug\", false, \"enable debug messages\")\n\tflag.StringVar(&style, \"style\", \"Evermeet\", \"Select style: Evermeet, Mac, Fedora\")\n\tflag.Parse()\n\tswitch style {\n\tcase \"Evermeet\":\n\tcase \"Mac\":\n\tcase \"Fedora\":\n\tdefault:\n\t\tfmt.Fprintln(os.Stderr, \"Invalid style. Valid styles: Evermmet, Mac, Fedora\")\n\t}\n\tlogger = log.New(os.Stderr, \"\", log.LstdFlags)\n\n\tif !debug {\n\t\tlogger.SetOutput(ioutil.Discard)\n\t}\n\tti := getTermInfo()\n\t\/\/Get Python VirtualEnv info\n\tti.virtualEnv = getPythonVirtualEnv()\n\n\t\/\/AWS\n\tti.aws = getAwsInfo()\n\n\t\/\/Get git information\n\tti.git = getGitInfo()\n\n\tfmt.Println(makePrompt(style, ti))\n\n}\n\nfunc makePrompt(style string, ti termInfo) string {\n\tswitch style {\n\tcase \"Evermeet\":\n\t\treturn makePromptEvermeet(ti)\n\tcase \"Mac\":\n\t\treturn makePromptMac(ti)\n\tcase \"Fedora\":\n\t\treturn makePromptFedora(ti)\n\t}\n\treturn \"Not suppported\"\n}\nfunc makePromptMac(ti termInfo) string {\n\treturn \"Not implemented\"\n}\n\nfunc makePromptFedora(ti termInfo) string {\n\treturn \"Not implemented\"\n}\n\nfunc makePromptEvermeet(ti termInfo) string {\n\t\/\/Formatting\n\tvar userInfo, lastCommandInfo, pwdInfo, virtualEnvInfo, awsInfo, gitInfo string\n\n\tpromptEnd := \"$\"\n\n\tif ti.user == \"root\" {\n\t\tuserInfo = termcolor.EscapedFormat(ti.hostname, termcolor.Bold, termcolor.FgRed)\n\t\tpromptEnd = \"#\"\n\t} else {\n\t\tuserInfo = termcolor.EscapedFormat(ti.hostname, termcolor.Bold, termcolor.FgGreen)\n\t}\n\tif ti.lastrc != \"\" {\n\t\tlastCommandInfo = termcolor.EscapedFormat(ti.lastrc, termcolor.FgHiYellow) + \" \"\n\t}\n\n\tpwdInfo = termcolor.EscapedFormat(ti.pwd, termcolor.Bold, termcolor.FgBlue)\n\tif ti.virtualEnv != \"\" {\n\t\tvirtualEnvInfo = termcolor.EscapedFormat(ti.virtualEnv, termcolor.FgBlue)\n\t}\n\tif ti.git.branch != \"\" {\n\t\tgitInfo = \" \" + termcolor.EscapedFormat(ti.git.branch, termcolor.FgMagenta)\n\t\tspace := \" \"\n\t\tif ti.git.commitsBehind > 0 {\n\t\t\tgitInfo += space + s_DownArrow + \"·\" + strconv.Itoa(ti.git.commitsBehind)\n\t\t\tspace = \"\"\n\t\t}\n\t\tif ti.git.commitsAhead > 0 {\n\t\t\tgitInfo += space + s_UpArrow + \"·\" + strconv.Itoa(ti.git.commitsAhead)\n\t\t\tspace = \"\"\n\t\t}\n\t\tif !ti.git.upstream {\n\t\t\tgitInfo += space + \"*\"\n\t\t\tspace = \"\"\n\t\t}\n\t\tgitInfo += \"|\"\n\t\tsynced := true\n\t\tif ti.git.staged > 0 {\n\t\t\tgitInfo += termcolor.EscapedFormat(s_Dot+strconv.Itoa(ti.git.staged), termcolor.FgCyan)\n\t\t\tsynced = false\n\t\t}\n\t\tif ti.git.changed > 0 {\n\t\t\tgitInfo += termcolor.EscapedFormat(\"+\"+strconv.Itoa(ti.git.changed), termcolor.FgCyan)\n\t\t\tsynced = false\n\t\t}\n\t\tif ti.git.untracked > 0 {\n\t\t\tgitInfo += termcolor.EscapedFormat(s_ThreeDots+strconv.Itoa(ti.git.untracked), termcolor.FgCyan)\n\t\t\tsynced = false\n\t\t}\n\t\tif ti.git.stashed > 0 {\n\t\t\tgitInfo += termcolor.EscapedFormat(s_Flag+strconv.Itoa(ti.git.stashed), termcolor.FgHiMagenta)\n\t\t}\n\t\tif synced {\n\t\t\tgitInfo += termcolor.EscapedFormat(s_Check, termcolor.FgHiGreen)\n\t\t}\n\t}\n\tif ti.aws.role != \"\" {\n\t\tt := termcolor.FgGreen\n\t\td := time.Until(ti.aws.expire).Seconds()\n\t\tif d < 0 {\n\t\t\tt = termcolor.FgRed\n\t\t} else if d < 600 {\n\t\t\tt = termcolor.FgYellow\n\t\t}\n\t\tawsInfo = termcolor.EscapedFormat(ti.aws.role, t) + \"|\"\n\t}\n\n\treturn fmt.Sprintf(\"%s[%s%s %s%s%s]%s \", virtualEnvInfo, awsInfo, userInfo, lastCommandInfo, pwdInfo, gitInfo, promptEnd)\n}\n<|endoftext|>"} {"text":"<commit_before>\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/md5\"\n\n\t\"github.com\/kless\/goconfig\/config\"\n)\n\ntype packetV2 struct {\n\tdeviceId string\n\tnoteId string\n\teventType string\n\tdata string\n\tcontents string\n}\n\nvar trustId string\nvar configLocation string\n\nfunc init() {\n\tflag.StringVar(&trustId, \"t\", \"\", \"Trust the provided device ID.\")\n\tflag.StringVar(&configLocation, \"c\", \"\", \"Set config file location\")\n\tflag.Parse()\n}\n\n\/\/Creates an MD5 digest from a byte string.\nfunc doDigest(in []byte) []byte {\n\thash := md5.New()\n\thash.Write(in)\n\treturn hash.Sum()\n}\n\n\/\/Generates the key from the passphrase by hashing it 10 times.\n\/\/For some reason. I dunno, I just do what the source code says.\nfunc passphraseToKey(passphrase string) []byte {\n\tpassphraseBytes := []uint8(passphrase)\n\tfor i := 0; i < 10; i++ {\n\t\tpassphraseBytes = doDigest(passphraseBytes)\n\t}\n\treturn passphraseBytes\n}\n\n\/\/Decrypt a message.\nfunc decryptMessage(key []byte, ciphertext *bytes.Buffer) (buf *bytes.Buffer) {\n\tiv := doDigest(key)\n\taescrypt, _ := aes.NewCipher(key)\n\tcipher := cipher.NewCBCDecrypter(aescrypt, iv)\n\tout := bytes.NewBuffer(make([]byte, 512))\n\tcipher.CryptBlocks(out.Bytes(), ciphertext.Bytes())\n\treturn out\n}\n\n\/\/Sends a notification using exec and the \"notify-send\" program.\nfunc sendNotify(msg packetV2, icon string) error {\n\tswitch {\n\tcase msg.eventType == \"RING\":\n\t\ticon = icon + \"\/ring.png\"\n\tcase msg.eventType == \"SMS\":\n\t\ticon = icon + \"\/sms.png\"\n\tcase msg.eventType == \"MMS\":\n\t\ticon = icon + \"\/mms.png\"\n\tcase msg.eventType == \"BATTERY\":\n\t\ticon = icon + \"\/battery_unknown.png\"\n\tcase msg.eventType == \"PING\":\n\t\ticon = icon + \"\/app-icon.png\"\n\t}\n\n\tcmd := exec.Command(\"notify-send\",\n\t\t\"-i\", icon, msg.eventType, msg.contents)\n\n\tcmdErr := cmd.Run()\n\treturn cmdErr\n}\n\nfunc elem(input string, list []string) bool {\n\tfor _, v := range list {\n\t\tif input == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/Splits a notify-packet into its fields.\nfunc splitPacket(input string) packetV2 {\n\tpkt := make([]string, 6)\n\tif strings.HasPrefix(input, \"v2\") {\n\t\tpkt = strings.SplitN(input, \"\/\", 6)\n\t} else {\n\t\tfmt.Println(\"Unsupported packet type\")\n\t\tos.Exit(2)\n\t}\n\treturn packetV2{pkt[1], pkt[2], pkt[3], pkt[4], pkt[5]}\n}\n\nfunc loadConfig() *config.Config {\n\tconfigDirs := strings.Split(os.Getenv(\"XDG_CONFIG_HOME\")+\":\"+os.Getenv(\"XDG_CONFIG_DIRS\"), \":\")\n\tfor _, d := range configDirs {\n\t\tcfg, _ := config.ReadDefault(d + \"\/android-notify-lite\/config\")\n\t\tif cfg != nil {\n\t\t\tconfigLocation = d + \"\/android-notify-lite\/config\"\n\t\t\treturn cfg\n\t\t}\n\t}\n\tcfg, _ := config.ReadDefault(os.Getenv(\"HOME\") + \"\/.android-notify-lite\")\n\tconfigLocation = os.Getenv(\"HOME\") + \"\/.android-notify-lite\"\n\tif cfg == nil {\n\t\tfmt.Println(\"Error: No configuration file found.\")\n\t\tos.Exit(1)\n\t}\n\treturn cfg\n}\n\nfunc main() {\n\t\/\/Load configuration info\n\tc := loadConfig()\n\n\ticonDir, _ := c.String(\"Notify\", \"icon_dir\")\n\tdecrypt, _ := c.Bool(\"Notify\", \"decrypt\")\n\tpassphrase, _ := c.String(\"Notify\", \"passphrase\")\n\tbroadcastAddr, _ := c.String(\"Notify\", \"broadcast_addr\")\n\trawTrustedIds, _ := c.String(\"Notify\", \"trusted_ids\")\n\ttrustedIds := strings.Split(rawTrustedIds, \"\\n\")\n\n\tif trustId != \"\" {\n\t\tc.RemoveOption(\"Notify\", \"trusted_ids\")\n\t\tc.AddOption(\"Notify\", \"trusted_ids\", rawTrustedIds+\"\\n\"+trustId)\n\t\tc.WriteFile(configLocation, 0600, \"Device ID added\")\n\t\tfmt.Println(\"ID added to config file.\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/Connect to the broadcast address\n\tsocket, netErr := net.ListenUDP(\"udp4\", &net.UDPAddr{\n\t\tIP: net.ParseIP(broadcastAddr),\n\t\tPort: 10600,\n\t})\n\n\tif netErr == nil {\n\t\tin := bytes.NewBuffer(make([]byte, 512))\n\t\tkey := passphraseToKey(passphrase)\n\t\tfor {\n\t\t\tsocket.ReadFromUDP(in.Bytes())\n\t\t\tpacket := new(packetV2)\n\t\t\tif decrypt {\n\t\t\t\tmsg := decryptMessage(key, in)\n\t\t\t\tstripped, _ := msg.ReadString(0)\n\t\t\t\t*packet = splitPacket(stripped)\n\t\t\t} else {\n\t\t\t\t*packet = splitPacket(string(in.Bytes()))\n\t\t\t}\n\n\t\t\tif elem(packet.deviceId, trustedIds) {\n\t\t\t\tsendNotify(*packet, iconDir)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Messaged received w\/ unknown device id %s\",\n\t\t\t\t\tpacket.deviceId)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Error with network connection: \", netErr)\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>Added a bit of documentation<commit_after>\/\/\tandroid-notify-lite provides a lightweight replacement for the Java-based\n\/\/\tandroid-notifier-desktop remote notificationator thingy. To be of any use\n\/\/\tat all, it should be installed on a system with libnotify\/notify-send, and\n\/\/\trequires an Android phone with the Remote Notifier application installed\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/md5\"\n\n\t\"github.com\/kless\/goconfig\/config\"\n)\n\n\/\/Data type for the received packet\ntype packetV2 struct {\n\tdeviceId string\n\tnoteId string\n\teventType string\n\tdata string\n\tcontents string\n}\n\n\/\/Flags. -t adds the inputted device ID to the config file.\nvar trustId string\nvar configLocation string\n\nfunc init() {\n\tflag.StringVar(&trustId, \"t\", \"\", \"Trust the provided device ID.\")\n\tflag.StringVar(&configLocation, \"c\", \"\", \"Set config file location\")\n\tflag.Parse()\n}\n\n\/\/Creates an MD5 digest from a byte string.\nfunc doDigest(in []byte) []byte {\n\thash := md5.New()\n\thash.Write(in)\n\treturn hash.Sum()\n}\n\n\/\/Generates the key from the passphrase by hashing it 10 times.\n\/\/For some reason. I dunno, I just do what the source code says.\nfunc passphraseToKey(passphrase string) []byte {\n\tpassphraseBytes := []uint8(passphrase)\n\tfor i := 0; i < 10; i++ {\n\t\tpassphraseBytes = doDigest(passphraseBytes)\n\t}\n\treturn passphraseBytes\n}\n\n\/\/Decrypt a message.\nfunc decryptMessage(key []byte, ciphertext *bytes.Buffer) (buf *bytes.Buffer) {\n\tiv := doDigest(key)\n\taescrypt, _ := aes.NewCipher(key)\n\tcipher := cipher.NewCBCDecrypter(aescrypt, iv)\n\tout := bytes.NewBuffer(make([]byte, 512))\n\tcipher.CryptBlocks(out.Bytes(), ciphertext.Bytes())\n\treturn out\n}\n\n\/\/Sends a notification using exec and the \"notify-send\" program.\nfunc sendNotify(msg packetV2, icon string) error {\n\tswitch {\n\tcase msg.eventType == \"RING\":\n\t\ticon = icon + \"\/ring.png\"\n\tcase msg.eventType == \"SMS\":\n\t\ticon = icon + \"\/sms.png\"\n\tcase msg.eventType == \"MMS\":\n\t\ticon = icon + \"\/mms.png\"\n\tcase msg.eventType == \"BATTERY\":\n\t\ticon = icon + \"\/battery_unknown.png\"\n\tcase msg.eventType == \"PING\":\n\t\ticon = icon + \"\/app-icon.png\"\n\t}\n\n\tcmd := exec.Command(\"notify-send\",\n\t\t\"-i\", icon, msg.eventType, msg.contents)\n\n\tcmdErr := cmd.Run()\n\treturn cmdErr\n}\n\n\/\/Helper function that is only used once, in a probably not-idiomatic way.\n\/\/Should probably find a way to replace.\nfunc elem(input string, list []string) bool {\n\tfor _, v := range list {\n\t\tif input == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/Splits a notify-packet into its fields.\nfunc splitPacket(input string) packetV2 {\n\tpkt := make([]string, 6)\n\tif strings.HasPrefix(input, \"v2\") {\n\t\tpkt = strings.SplitN(input, \"\/\", 6)\n\t} else {\n\t\tfmt.Println(\"Unsupported packet type\")\n\t\tos.Exit(2)\n\t}\n\treturn packetV2{pkt[1], pkt[2], pkt[3], pkt[4], pkt[5]}\n}\n\n\/\/Searches for, and loads, a configuration file.\nfunc loadConfig() *config.Config {\n\tconfigDirs := strings.Split(os.Getenv(\"XDG_CONFIG_HOME\")+\":\"+os.Getenv(\"XDG_CONFIG_DIRS\"), \":\")\n\tfor _, d := range configDirs {\n\t\tcfg, _ := config.ReadDefault(d + \"\/android-notify-lite\/config\")\n\t\tif cfg != nil {\n\t\t\tconfigLocation = d + \"\/android-notify-lite\/config\"\n\t\t\treturn cfg\n\t\t}\n\t}\n\tcfg, _ := config.ReadDefault(os.Getenv(\"HOME\") + \"\/.android-notify-lite\")\n\tconfigLocation = os.Getenv(\"HOME\") + \"\/.android-notify-lite\"\n\tif cfg == nil {\n\t\tfmt.Println(\"Error: No configuration file found.\")\n\t\tos.Exit(1)\n\t}\n\treturn cfg\n}\n\nfunc main() {\n\t\/\/Load configuration info\n\tc := loadConfig()\n\n\ticonDir, _ := c.String(\"Notify\", \"icon_dir\")\n\tdecrypt, _ := c.Bool(\"Notify\", \"decrypt\")\n\tpassphrase, _ := c.String(\"Notify\", \"passphrase\")\n\tbroadcastAddr, _ := c.String(\"Notify\", \"broadcast_addr\")\n\trawTrustedIds, _ := c.String(\"Notify\", \"trusted_ids\")\n\ttrustedIds := strings.Split(rawTrustedIds, \"\\n\")\n\n\tif trustId != \"\" {\n\t\tc.RemoveOption(\"Notify\", \"trusted_ids\")\n\t\tc.AddOption(\"Notify\", \"trusted_ids\", rawTrustedIds+\"\\n\"+trustId)\n\t\tc.WriteFile(configLocation, 0600, \"Device ID added\")\n\t\tfmt.Println(\"ID added to config file.\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/Connect to the broadcast address\n\tsocket, netErr := net.ListenUDP(\"udp4\", &net.UDPAddr{\n\t\tIP: net.ParseIP(broadcastAddr),\n\t\tPort: 10600,\n\t})\n\n\tif netErr == nil {\n\t\tin := bytes.NewBuffer(make([]byte, 512))\n\t\tkey := passphraseToKey(passphrase)\n\t\tfor {\n\t\t\tsocket.ReadFromUDP(in.Bytes())\n\t\t\tpacket := new(packetV2)\n\t\t\tif decrypt {\n\t\t\t\tmsg := decryptMessage(key, in)\n\t\t\t\tstripped, _ := msg.ReadString(0)\n\t\t\t\t*packet = splitPacket(stripped)\n\t\t\t} else {\n\t\t\t\t*packet = splitPacket(string(in.Bytes()))\n\t\t\t}\n\n\t\t\tif elem(packet.deviceId, trustedIds) {\n\t\t\t\tsendNotify(*packet, iconDir)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Messaged received w\/ unknown device id %s\",\n\t\t\t\t\tpacket.deviceId)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Error with network connection: \", netErr)\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/garyburd\/redigo\/redis\"\n)\n\ntype configuration struct {\n\tInterval int64 `json:\"interval\"`\n\tAddresses []string `json:\"addresses\"`\n\tMetrics []string `json:\"metrics\"`\n}\n\nfunc main() {\n\tconfig := readConfiguration(\"config.json\")\n\n\ttickerChannel := time.NewTicker(time.Second * time.Duration(config.Interval)).C\n\n\tfor {\n\t\tselect {\n\t\tcase <-tickerChannel:\n\t\t\tfor _, address := range config.Addresses {\n\t\t\t\tgo info(address, config.Metrics)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc readConfiguration(fileName string) configuration {\n\tvar config configuration\n\n\tfile, err := os.Open(fileName)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error loading configuration file from %s: %s\", fileName, err)\n\t}\n\tdefer file.Close()\n\n\tdecoder := json.NewDecoder(file)\n\tif err := decoder.Decode(&config); err != nil {\n\t\tlog.Fatalf(\"Error decoding JSON from %s: %s\", fileName, err)\n\t}\n\n\treturn config\n}\n\nfunc info(address string, metrics []string) {\n\tconnection, err := redis.Dial(\"tcp\", address)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error connecting to Redis: %s\", err)\n\t\treturn\n\t}\n\tdefer connection.Close()\n\n\treply, err := connection.Do(\"INFO\")\n\tif err != nil {\n\t\tlog.Fatalf(\"Error doing INFO command: %s\", err)\n\t\treturn\n\t}\n\n\t\/\/ Transform selected metrics slice to map\n\tmetricsMap := make(map[string]bool, len(metrics))\n\tfor _, metric := range metrics {\n\t\tmetricsMap[metric] = true\n\t}\n\n\t\/\/ Create a map to store only matching metrics\n\treplies := make(map[string]float64, len(metrics))\n\n\tfor metric, value := range Parse(reply.([]byte)) {\n\t\tif _, hasKey := metricsMap[metric]; hasKey {\n\t\t\treplies[metric] = value\n\t\t}\n\t}\n\n\tfor metric, value := range replies {\n\t\tlog.Printf(\"%s | %s => %f\\r\", address, metric, value)\n\t}\n}\n<commit_msg>Don't crash on dialing or executing command<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/garyburd\/redigo\/redis\"\n)\n\ntype configuration struct {\n\tInterval int64 `json:\"interval\"`\n\tAddresses []string `json:\"addresses\"`\n\tMetrics []string `json:\"metrics\"`\n}\n\nfunc main() {\n\tconfig := readConfiguration(\"config.json\")\n\n\ttickerChannel := time.NewTicker(time.Second * time.Duration(config.Interval)).C\n\n\tfor {\n\t\tselect {\n\t\tcase <-tickerChannel:\n\t\t\tfor _, address := range config.Addresses {\n\t\t\t\tgo info(address, config.Metrics)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc readConfiguration(fileName string) configuration {\n\tvar config configuration\n\n\tfile, err := os.Open(fileName)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error loading configuration file from %s: %s\", fileName, err)\n\t}\n\tdefer file.Close()\n\n\tdecoder := json.NewDecoder(file)\n\tif err := decoder.Decode(&config); err != nil {\n\t\tlog.Fatalf(\"Error decoding JSON from %s: %s\", fileName, err)\n\t}\n\n\treturn config\n}\n\nfunc info(address string, metrics []string) {\n\tconnection, err := redis.Dial(\"tcp\", address)\n\tif err != nil {\n\t\tlog.Printf(\"Error connecting to Redis: %s\", err)\n\t\treturn\n\t}\n\tdefer connection.Close()\n\n\treply, err := connection.Do(\"INFO\")\n\tif err != nil {\n\t\tlog.Printf(\"Error doing INFO command: %s\", err)\n\t\treturn\n\t}\n\n\t\/\/ Transform selected metrics slice to map\n\tmetricsMap := make(map[string]bool, len(metrics))\n\tfor _, metric := range metrics {\n\t\tmetricsMap[metric] = true\n\t}\n\n\t\/\/ Create a map to store only matching metrics\n\treplies := make(map[string]float64, len(metrics))\n\n\tfor metric, value := range Parse(reply.([]byte)) {\n\t\tif _, hasKey := metricsMap[metric]; hasKey {\n\t\t\treplies[metric] = value\n\t\t}\n\t}\n\n\tfor metric, value := range replies {\n\t\tlog.Printf(\"%s | %s => %f\\r\", address, metric, value)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n\n\t\"github.com\/mithrandie\/csvq\/lib\/action\"\n\t\"github.com\/mithrandie\/csvq\/lib\/cmd\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\nvar version = \"v0.1.4\"\n\nfunc main() {\n\tcli.AppHelpTemplate = appHHelpTemplate\n\tcli.CommandHelpTemplate = commandHelpTemplate\n\n\tapp := cli.NewApp()\n\n\tapp.Name = \"csvq\"\n\tapp.Usage = \"SQL like query language for csv\"\n\tapp.Version = version\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"delimiter, d\",\n\t\t\tValue: \",\",\n\t\t\tUsage: \"field delimiter (exam: \\\",\\\" for comma, \\\"\\\\t\\\" for tab)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"encoding, e\",\n\t\t\tValue: \"utf8\",\n\t\t\tUsage: \"file encoding. one of: utf8|sjis\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"repository, r\",\n\t\t\tUsage: \"directory path where files are located\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"no-header\",\n\t\t\tUsage: \"import first line as a record\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"without-null\",\n\t\t\tUsage: \"parse empty field as empty string\",\n\t\t},\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"write\",\n\t\t\tUsage: \"Write output to file\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"write-encoding, E\",\n\t\t\t\t\tValue: \"utf8\",\n\t\t\t\t\tUsage: \"file encoding. one of: utf8|sjis\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"line-break, l\",\n\t\t\t\t\tValue: \"lf\",\n\t\t\t\t\tUsage: \"line break. one of: crlf|lf|cr\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"out, o\",\n\t\t\t\t\tUsage: \"write output to `FILE`\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"format, f\",\n\t\t\t\t\tUsage: \"output format. one of: csv|tsv|json|text\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"write-delimiter, D\",\n\t\t\t\t\tValue: \",\",\n\t\t\t\t\tUsage: \"field delimiter for csv or tsv (exam: \\\",\\\" for comma, \\\"\\\\t\\\" for tab)\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"without-header\",\n\t\t\t\t\tUsage: \"when format is specified as csv or tsv, write without header line\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tBefore: func(c *cli.Context) error {\n\t\t\t\treturn setWriteFlags(c)\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\tif c.NArg() != 1 {\n\t\t\t\t\treturn cli.ShowSubcommandHelp(c)\n\t\t\t\t}\n\n\t\t\t\tq := c.Args().First()\n\n\t\t\t\terr := action.Write(q)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"fields\",\n\t\t\tUsage: \"Show fields in file\",\n\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\tif c.NArg() != 1 {\n\t\t\t\t\treturn cli.ShowSubcommandHelp(c)\n\t\t\t\t}\n\n\t\t\t\ttable := c.Args().First()\n\n\t\t\t\terr := action.ShowFields(table)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\n\tapp.Before = func(c *cli.Context) error {\n\t\treturn setGlobalFlags(c)\n\t}\n\n\tapp.Action = func(c *cli.Context) error {\n\t\tif c.NArg() != 1 {\n\t\t\treturn cli.ShowAppHelp(c)\n\t\t}\n\n\t\tq := c.Args().First()\n\n\t\terr := action.Write(q)\n\t\tif err != nil {\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc setGlobalFlags(c *cli.Context) error {\n\tif err := cmd.SetDelimiter(c.GlobalString(\"delimiter\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetEncoding(c.GlobalString(\"encoding\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetRepository(c.GlobalString(\"repository\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetNoHeader(c.GlobalBool(\"no-header\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetWithoutNull(c.GlobalBool(\"without-null\")); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc setWriteFlags(c *cli.Context) error {\n\tif err := cmd.SetWriteEncoding(c.String(\"write-encoding\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetLineBreak(c.String(\"line-break\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetOut(c.String(\"out\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetFormat(c.String(\"format\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetWriteDelimiter(c.String(\"write-delimiter\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetWithoutHeader(c.Bool(\"without-header\")); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<commit_msg>Update version number for Release v0.1.5<commit_after>package main\n\nimport (\n\t\"os\"\n\n\t\"github.com\/mithrandie\/csvq\/lib\/action\"\n\t\"github.com\/mithrandie\/csvq\/lib\/cmd\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\nvar version = \"v0.1.5\"\n\nfunc main() {\n\tcli.AppHelpTemplate = appHHelpTemplate\n\tcli.CommandHelpTemplate = commandHelpTemplate\n\n\tapp := cli.NewApp()\n\n\tapp.Name = \"csvq\"\n\tapp.Usage = \"SQL like query language for csv\"\n\tapp.Version = version\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"delimiter, d\",\n\t\t\tValue: \",\",\n\t\t\tUsage: \"field delimiter (exam: \\\",\\\" for comma, \\\"\\\\t\\\" for tab)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"encoding, e\",\n\t\t\tValue: \"utf8\",\n\t\t\tUsage: \"file encoding. one of: utf8|sjis\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"repository, r\",\n\t\t\tUsage: \"directory path where files are located\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"no-header\",\n\t\t\tUsage: \"import first line as a record\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"without-null\",\n\t\t\tUsage: \"parse empty field as empty string\",\n\t\t},\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"write\",\n\t\t\tUsage: \"Write output to file\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"write-encoding, E\",\n\t\t\t\t\tValue: \"utf8\",\n\t\t\t\t\tUsage: \"file encoding. one of: utf8|sjis\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"line-break, l\",\n\t\t\t\t\tValue: \"lf\",\n\t\t\t\t\tUsage: \"line break. one of: crlf|lf|cr\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"out, o\",\n\t\t\t\t\tUsage: \"write output to `FILE`\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"format, f\",\n\t\t\t\t\tUsage: \"output format. one of: csv|tsv|json|text\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"write-delimiter, D\",\n\t\t\t\t\tValue: \",\",\n\t\t\t\t\tUsage: \"field delimiter for csv or tsv (exam: \\\",\\\" for comma, \\\"\\\\t\\\" for tab)\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"without-header\",\n\t\t\t\t\tUsage: \"when format is specified as csv or tsv, write without header line\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tBefore: func(c *cli.Context) error {\n\t\t\t\treturn setWriteFlags(c)\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\tif c.NArg() != 1 {\n\t\t\t\t\treturn cli.ShowSubcommandHelp(c)\n\t\t\t\t}\n\n\t\t\t\tq := c.Args().First()\n\n\t\t\t\terr := action.Write(q)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"fields\",\n\t\t\tUsage: \"Show fields in file\",\n\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\tif c.NArg() != 1 {\n\t\t\t\t\treturn cli.ShowSubcommandHelp(c)\n\t\t\t\t}\n\n\t\t\t\ttable := c.Args().First()\n\n\t\t\t\terr := action.ShowFields(table)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\n\tapp.Before = func(c *cli.Context) error {\n\t\treturn setGlobalFlags(c)\n\t}\n\n\tapp.Action = func(c *cli.Context) error {\n\t\tif c.NArg() != 1 {\n\t\t\treturn cli.ShowAppHelp(c)\n\t\t}\n\n\t\tq := c.Args().First()\n\n\t\terr := action.Write(q)\n\t\tif err != nil {\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc setGlobalFlags(c *cli.Context) error {\n\tif err := cmd.SetDelimiter(c.GlobalString(\"delimiter\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetEncoding(c.GlobalString(\"encoding\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetRepository(c.GlobalString(\"repository\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetNoHeader(c.GlobalBool(\"no-header\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetWithoutNull(c.GlobalBool(\"without-null\")); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc setWriteFlags(c *cli.Context) error {\n\tif err := cmd.SetWriteEncoding(c.String(\"write-encoding\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetLineBreak(c.String(\"line-break\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetOut(c.String(\"out\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetFormat(c.String(\"format\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetWriteDelimiter(c.String(\"write-delimiter\")); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SetWithoutHeader(c.Bool(\"without-header\")); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"crypto\/md5\"\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/blakesmith\/ar\"\n)\n\ntype Conf struct {\n\tListenPort string `json:\"listenPort\"`\n\tRootRepoPath string `json:\"rootRepoPath\"`\n\tSupportArch []string `json:\"supportedArch\"`\n\tEnableSSL bool `json:\"enableSSL\"`\n\tSSLCert string `json:\"SSLcert\"`\n\tSSLKey string `json:\"SSLkey\"`\n}\n\nfunc (c Conf) ArchPath(arch string) string {\n\treturn filepath.Join(c.RootRepoPath, \"\/dists\/stable\/main\/binary-\"+arch)\n}\n\ntype DeleteObj struct {\n\tFilename string\n\tArch string\n}\n\nvar (\n\tmutex sync.Mutex\n\tconfigFile = flag.String(\"c\", \"conf.json\", \"config file location\")\n\tparsedConfig = Conf{}\n)\n\nfunc main() {\n\tflag.Parse()\n\tfile, err := ioutil.ReadFile(*configFile)\n\tif err != nil {\n\t\tlog.Fatal(\"unable to read config file, exiting...\")\n\t}\n\t\/\/config = &Conf{}\n\tif err := json.Unmarshal(file, &parsedConfig); err != nil {\n\t\tlog.Fatal(\"unable to marshal config file, exiting...\")\n\t}\n\n\tif err := createDirs(parsedConfig); err != nil {\n\t\tlog.Println(err)\n\t\tlog.Fatalf(\"error creating directory structure, exiting\")\n\t}\n\thttp.Handle(\"\/\", http.StripPrefix(\"\/\", http.FileServer(http.Dir(parsedConfig.RootRepoPath))))\n\thttp.Handle(\"\/upload\", uploadHandler(parsedConfig))\n\thttp.Handle(\"\/delete\", deleteHandler(parsedConfig))\n\tif parsedConfig.EnableSSL {\n\t\tlog.Println(\"running with SSL enabled\")\n\t\tlog.Fatal(http.ListenAndServeTLS(\":\"+parsedConfig.ListenPort, parsedConfig.SSLCert, parsedConfig.SSLKey, nil))\n\t} else {\n\t\tlog.Println(\"running without SSL enabled\")\n\t\tlog.Fatal(http.ListenAndServe(\":\"+parsedConfig.ListenPort, nil))\n\t}\n}\n\nfunc createDirs(config Conf) error {\n\tfor _, arch := range config.SupportArch {\n\t\tif _, err := os.Stat(config.ArchPath(arch)); err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\tlog.Printf(\"Directory for %s does not exist, creating\", arch)\n\t\t\t\tdirErr := os.MkdirAll(config.ArchPath(arch), 0755)\n\t\t\t\tif dirErr != nil {\n\t\t\t\t\treturn fmt.Errorf(\"error creating directory for %s: %s\", arch, dirErr)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"error inspecting %s: %s\", arch, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc inspectPackage(filename string) (string, error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error opening package file %s: %s\", filename, err)\n\t}\n\n\tarReader := ar.NewReader(f)\n\tdefer f.Close()\n\tvar controlBuf bytes.Buffer\n\n\tfor {\n\t\theader, err := arReader.Next()\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"error in inspectPackage loop: %s\", err)\n\t\t}\n\n\t\tif header.Name == \"control.tar.gz\" {\n\t\t\tio.Copy(&controlBuf, arReader)\n\t\t\treturn inspectPackageControl(controlBuf)\n\n\t\t}\n\n\t}\n\treturn \"\", nil\n}\n\nfunc inspectPackageControl(filename bytes.Buffer) (string, error) {\n\tgzf, err := gzip.NewReader(bytes.NewReader(filename.Bytes()))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error creating gzip reader: %s\", err)\n\t}\n\n\ttarReader := tar.NewReader(gzf)\n\tvar controlBuf bytes.Buffer\n\tfor {\n\t\theader, err := tarReader.Next()\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to inpect package: %s\", err)\n\t\t}\n\n\t\tname := header.Name\n\n\t\tswitch header.Typeflag {\n\t\tcase tar.TypeDir:\n\t\t\tcontinue\n\t\tcase tar.TypeReg:\n\t\t\tif name == \".\/control\" {\n\t\t\t\tio.Copy(&controlBuf, tarReader)\n\t\t\t\treturn controlBuf.String(), nil\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.Printf(\"%s : %c %s %s\\n\",\n\t\t\t\t\"Unable to figure out type\",\n\t\t\t\theader.Typeflag,\n\t\t\t\t\"in file\",\n\t\t\t\tname,\n\t\t\t)\n\t\t}\n\t}\n\treturn \"\", nil\n}\n\nfunc createPackagesGz(config Conf, arch string) error {\n\toutfile, err := os.Create(filepath.Join(config.ArchPath(arch), \"Packages.gz\"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create packages.gz: %s\", err)\n\t}\n\tdefer outfile.Close()\n\tgzOut := gzip.NewWriter(outfile)\n\tdefer gzOut.Close()\n\t\/\/ loop through each directory\n\t\/\/ run inspectPackage\n\tdirList, err := ioutil.ReadDir(config.ArchPath(arch))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"scanning: %s: %s\", config.ArchPath(arch), err)\n\t}\n\tfor _, debFile := range dirList {\n\t\tif strings.HasSuffix(debFile.Name(), \"deb\") {\n\t\t\tvar packBuf bytes.Buffer\n\t\t\tdebPath := filepath.Join(config.ArchPath(arch), debFile.Name())\n\t\t\ttempCtlData, err := inspectPackage(debPath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tpackBuf.WriteString(tempCtlData)\n\t\t\tdir := filepath.Join(\"dists\/stable\/main\/binary-\"+arch, debFile.Name())\n\t\t\tfmt.Fprintf(&packBuf, \"Filename: %s\\n\", dir)\n\t\t\tfmt.Fprintf(&packBuf, \"Size: %d\\n\", debFile.Size())\n\t\t\tf, err := os.Open(debPath)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"error opening deb file: \", err)\n\t\t\t}\n\t\t\tdefer f.Close()\n\n\t\t\tvar (\n\t\t\t\tmd5hash = md5.New()\n\t\t\t\tsha1hash = sha1.New()\n\t\t\t\tsha256hash = sha256.New()\n\t\t\t)\n\t\t\t_, err = io.Copy(io.MultiWriter(md5hash, sha1hash, sha256hash), f)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"error with the md5 hash: \", err)\n\t\t\t}\n\t\t\tfmt.Fprintf(&packBuf, \"MD5sum: %s\\n\",\n\t\t\t\thex.EncodeToString(md5hash.Sum(nil)))\n\t\t\tif _, err = io.Copy(sha1hash, f); err != nil {\n\t\t\t\tlog.Println(\"error with the sha1 hash: \", err)\n\t\t\t}\n\t\t\tfmt.Fprintf(&packBuf, \"SHA1: %s\\n\",\n\t\t\t\thex.EncodeToString(sha1hash.Sum(nil)))\n\t\t\tif _, err = io.Copy(sha256hash, f); err != nil {\n\t\t\t\tlog.Println(\"error with the sha256 hash: \", err)\n\t\t\t}\n\t\t\tfmt.Fprintf(&packBuf, \"SHA256: %s\\n\",\n\t\t\t\thex.EncodeToString(sha256hash.Sum(nil)))\n\t\t\tpackBuf.WriteString(\"\\n\\n\")\n\t\t\tgzOut.Write(packBuf.Bytes())\n\t\t\tf = nil\n\t\t}\n\t}\n\n\tgzOut.Flush()\n\treturn nil\n}\n\nfunc uploadHandler(config Conf) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method != \"POST\" {\n\t\t\tlog.Println(\"not a POST\")\n\t\t\treturn\n\t\t}\n\t\tqueryVals := r.URL.Query()\n\t\tarchType := \"all\"\n\t\tif queryVals.Get(\"arch\") != \"\" {\n\t\t\tarchType = queryVals.Get(\"arch\")\n\t\t}\n\t\treader, err := r.MultipartReader()\n\t\tif err != nil {\n\t\t\thttpErrorf(w, \"error creating multipart reader: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tfor {\n\t\t\tpart, err := reader.NextPart()\n\t\t\tif err == io.EOF {\n\t\t\t\tlog.Println(\"breaking\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlog.Println(\"form part: \", part.FormName())\n\t\t\tif part.FileName() == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Println(\"found file: \", part.FileName())\n\t\t\tlog.Println(\"creating files: \", part.FileName())\n\t\t\tdst, err := os.Create(filepath.Join(config.ArchPath(archType), part.FileName()))\n\t\t\tif err != nil {\n\t\t\t\thttpErrorf(w, \"error creating deb file: %s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer dst.Close()\n\t\t\tif _, err := io.Copy(dst, part); err != nil {\n\t\t\t\thttpErrorf(w, \"error writing deb file: %s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tlog.Println(\"grabbing lock...\")\n\t\tmutex.Lock()\n\t\tdefer mutex.Unlock()\n\n\t\tlog.Println(\"got lock, updating package list...\")\n\t\tcreatePkgRes := createPackagesGz(config, archType)\n\t\tif err := createPkgRes; err != nil {\n\t\t\thttpErrorf(w, \"error creating package: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tlog.Println(\"lock returned\")\n\t\tw.WriteHeader(http.StatusOK)\n\t})\n}\n\nfunc deleteHandler(config Conf) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method != \"DELETE\" {\n\t\t\tlog.Println(\"not a DELETE\")\n\t\t\treturn\n\t\t}\n\t\tvar toDelete DeleteObj\n\t\tif err := json.NewDecoder(r.Body).Decode(&toDelete); err != nil {\n\t\t\thttpErrorf(w, \"failed to decode json: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tdebPath := filepath.Join(config.ArchPath(toDelete.Arch), toDelete.Filename)\n\t\tif err := os.Remove(debPath); err != nil {\n\t\t\thttpErrorf(w, \"failed to delete: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tlog.Println(\"grabbing lock...\")\n\t\tmutex.Lock()\n\t\tdefer mutex.Unlock()\n\n\t\tlog.Println(\"got lock, updating package list...\")\n\t\tif err := createPackagesGz(config, toDelete.Arch); err != nil {\n\t\t\thttpErrorf(w, \"failed to create package: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tlog.Println(\"lock returned\")\n\t})\n}\n\nfunc httpErrorf(w http.ResponseWriter, format string, a ...interface{}) {\n\terr := fmt.Errorf(format, a...)\n\tlog.Println(err)\n\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n}\n<commit_msg>style preference<commit_after>package main\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"crypto\/md5\"\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/blakesmith\/ar\"\n)\n\ntype Conf struct {\n\tListenPort string `json:\"listenPort\"`\n\tRootRepoPath string `json:\"rootRepoPath\"`\n\tSupportArch []string `json:\"supportedArch\"`\n\tEnableSSL bool `json:\"enableSSL\"`\n\tSSLCert string `json:\"SSLcert\"`\n\tSSLKey string `json:\"SSLkey\"`\n}\n\nfunc (c Conf) ArchPath(arch string) string {\n\treturn filepath.Join(c.RootRepoPath, \"\/dists\/stable\/main\/binary-\"+arch)\n}\n\ntype DeleteObj struct {\n\tFilename string\n\tArch string\n}\n\nvar (\n\tmutex sync.Mutex\n\tconfigFile = flag.String(\"c\", \"conf.json\", \"config file location\")\n\tparsedConfig = Conf{}\n)\n\nfunc main() {\n\tflag.Parse()\n\tfile, err := ioutil.ReadFile(*configFile)\n\tif err != nil {\n\t\tlog.Fatal(\"unable to read config file, exiting...\")\n\t}\n\t\/\/config = &Conf{}\n\tif err := json.Unmarshal(file, &parsedConfig); err != nil {\n\t\tlog.Fatal(\"unable to marshal config file, exiting...\")\n\t}\n\n\tif err := createDirs(parsedConfig); err != nil {\n\t\tlog.Println(err)\n\t\tlog.Fatalf(\"error creating directory structure, exiting\")\n\t}\n\n\thttp.Handle(\"\/\", http.StripPrefix(\"\/\", http.FileServer(http.Dir(parsedConfig.RootRepoPath))))\n\thttp.Handle(\"\/upload\", uploadHandler(parsedConfig))\n\thttp.Handle(\"\/delete\", deleteHandler(parsedConfig))\n\n\tif parsedConfig.EnableSSL {\n\t\tlog.Println(\"running with SSL enabled\")\n\t\tlog.Fatal(http.ListenAndServeTLS(\":\"+parsedConfig.ListenPort, parsedConfig.SSLCert, parsedConfig.SSLKey, nil))\n\t} else {\n\t\tlog.Println(\"running without SSL enabled\")\n\t\tlog.Fatal(http.ListenAndServe(\":\"+parsedConfig.ListenPort, nil))\n\t}\n}\n\nfunc createDirs(config Conf) error {\n\tfor _, arch := range config.SupportArch {\n\t\tif _, err := os.Stat(config.ArchPath(arch)); err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\tlog.Printf(\"Directory for %s does not exist, creating\", arch)\n\t\t\t\tif err := os.MkdirAll(config.ArchPath(arch), 0755); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"error creating directory for %s: %s\", arch, err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"error inspecting %s: %s\", arch, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc inspectPackage(filename string) (string, error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error opening package file %s: %s\", filename, err)\n\t}\n\n\tarReader := ar.NewReader(f)\n\tdefer f.Close()\n\tvar controlBuf bytes.Buffer\n\n\tfor {\n\t\theader, err := arReader.Next()\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"error in inspectPackage loop: %s\", err)\n\t\t}\n\n\t\tif header.Name == \"control.tar.gz\" {\n\t\t\tio.Copy(&controlBuf, arReader)\n\t\t\treturn inspectPackageControl(controlBuf)\n\t\t}\n\n\t}\n\treturn \"\", nil\n}\n\nfunc inspectPackageControl(filename bytes.Buffer) (string, error) {\n\tgzf, err := gzip.NewReader(bytes.NewReader(filename.Bytes()))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error creating gzip reader: %s\", err)\n\t}\n\n\ttarReader := tar.NewReader(gzf)\n\tvar controlBuf bytes.Buffer\n\tfor {\n\t\theader, err := tarReader.Next()\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to inpect package: %s\", err)\n\t\t}\n\n\t\tname := header.Name\n\n\t\tswitch header.Typeflag {\n\t\tcase tar.TypeDir:\n\t\t\tcontinue\n\t\tcase tar.TypeReg:\n\t\t\tif name == \".\/control\" {\n\t\t\t\tio.Copy(&controlBuf, tarReader)\n\t\t\t\treturn controlBuf.String(), nil\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.Printf(\n\t\t\t\t\"Unable to figure out type : %c in file %s\\n\",\n\t\t\t\theader.Typeflag, name,\n\t\t\t)\n\t\t}\n\t}\n\treturn \"\", nil\n}\n\nfunc createPackagesGz(config Conf, arch string) error {\n\toutfile, err := os.Create(filepath.Join(config.ArchPath(arch), \"Packages.gz\"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create packages.gz: %s\", err)\n\t}\n\tdefer outfile.Close()\n\tgzOut := gzip.NewWriter(outfile)\n\tdefer gzOut.Close()\n\t\/\/ loop through each directory\n\t\/\/ run inspectPackage\n\tdirList, err := ioutil.ReadDir(config.ArchPath(arch))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"scanning: %s: %s\", config.ArchPath(arch), err)\n\t}\n\tfor _, debFile := range dirList {\n\t\tif strings.HasSuffix(debFile.Name(), \"deb\") {\n\t\t\tvar packBuf bytes.Buffer\n\t\t\tdebPath := filepath.Join(config.ArchPath(arch), debFile.Name())\n\t\t\ttempCtlData, err := inspectPackage(debPath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tpackBuf.WriteString(tempCtlData)\n\t\t\tdir := filepath.Join(\"dists\/stable\/main\/binary-\"+arch, debFile.Name())\n\t\t\tfmt.Fprintf(&packBuf, \"Filename: %s\\n\", dir)\n\t\t\tfmt.Fprintf(&packBuf, \"Size: %d\\n\", debFile.Size())\n\t\t\tf, err := os.Open(debPath)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"error opening deb file: \", err)\n\t\t\t}\n\t\t\tdefer f.Close()\n\n\t\t\tvar (\n\t\t\t\tmd5hash = md5.New()\n\t\t\t\tsha1hash = sha1.New()\n\t\t\t\tsha256hash = sha256.New()\n\t\t\t)\n\t\t\t_, err = io.Copy(io.MultiWriter(md5hash, sha1hash, sha256hash), f)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"error with the md5 hash: \", err)\n\t\t\t}\n\t\t\tfmt.Fprintf(&packBuf, \"MD5sum: %s\\n\",\n\t\t\t\thex.EncodeToString(md5hash.Sum(nil)))\n\t\t\tif _, err = io.Copy(sha1hash, f); err != nil {\n\t\t\t\tlog.Println(\"error with the sha1 hash: \", err)\n\t\t\t}\n\t\t\tfmt.Fprintf(&packBuf, \"SHA1: %s\\n\",\n\t\t\t\thex.EncodeToString(sha1hash.Sum(nil)))\n\t\t\tif _, err = io.Copy(sha256hash, f); err != nil {\n\t\t\t\tlog.Println(\"error with the sha256 hash: \", err)\n\t\t\t}\n\t\t\tfmt.Fprintf(&packBuf, \"SHA256: %s\\n\",\n\t\t\t\thex.EncodeToString(sha256hash.Sum(nil)))\n\t\t\tpackBuf.WriteString(\"\\n\\n\")\n\t\t\tgzOut.Write(packBuf.Bytes())\n\t\t\tf = nil\n\t\t}\n\t}\n\n\tgzOut.Flush()\n\treturn nil\n}\n\nfunc uploadHandler(config Conf) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method != \"POST\" {\n\t\t\tlog.Println(\"not a POST\")\n\t\t\treturn\n\t\t}\n\t\tarchType := r.URL.Query().Get(\"arch\")\n\t\tif archType == \"\" {\n\t\t\tarchType = \"all\"\n\t\t}\n\t\treader, err := r.MultipartReader()\n\t\tif err != nil {\n\t\t\thttpErrorf(w, \"error creating multipart reader: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tfor {\n\t\t\tpart, err := reader.NextPart()\n\t\t\tif err == io.EOF {\n\t\t\t\tlog.Println(\"breaking\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlog.Println(\"form part: \", part.FormName())\n\t\t\tif part.FileName() == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Println(\"found file: \", part.FileName())\n\t\t\tlog.Println(\"creating files: \", part.FileName())\n\t\t\tdst, err := os.Create(filepath.Join(config.ArchPath(archType), part.FileName()))\n\t\t\tif err != nil {\n\t\t\t\thttpErrorf(w, \"error creating deb file: %s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer dst.Close()\n\t\t\tif _, err := io.Copy(dst, part); err != nil {\n\t\t\t\thttpErrorf(w, \"error writing deb file: %s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tlog.Println(\"grabbing lock...\")\n\t\tmutex.Lock()\n\t\tdefer mutex.Unlock()\n\n\t\tlog.Println(\"got lock, updating package list...\")\n\t\tif err := createPackagesGz(config, archType); err != nil {\n\t\t\thttpErrorf(w, \"error creating package: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tlog.Println(\"lock returned\")\n\t\tw.WriteHeader(http.StatusOK)\n\t})\n}\n\nfunc deleteHandler(config Conf) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method != \"DELETE\" {\n\t\t\tlog.Println(\"not a DELETE\")\n\t\t\treturn\n\t\t}\n\t\tvar toDelete DeleteObj\n\t\tif err := json.NewDecoder(r.Body).Decode(&toDelete); err != nil {\n\t\t\thttpErrorf(w, \"failed to decode json: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tdebPath := filepath.Join(config.ArchPath(toDelete.Arch), toDelete.Filename)\n\t\tif err := os.Remove(debPath); err != nil {\n\t\t\thttpErrorf(w, \"failed to delete: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tlog.Println(\"grabbing lock...\")\n\t\tmutex.Lock()\n\t\tdefer mutex.Unlock()\n\n\t\tlog.Println(\"got lock, updating package list...\")\n\t\tif err := createPackagesGz(config, toDelete.Arch); err != nil {\n\t\t\thttpErrorf(w, \"failed to create package: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tlog.Println(\"lock returned\")\n\t})\n}\n\nfunc httpErrorf(w http.ResponseWriter, format string, a ...interface{}) {\n\terr := fmt.Errorf(format, a...)\n\tlog.Println(err)\n\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/chzyer\/readline\"\n)\n\nvar db *bolt.DB\n\ntype command struct {\n\tname string\n\tinfo *commandInfo\n}\n\n\/\/ commandInfo struct is stored as the value to commands\ntype commandInfo struct {\n\ttime time.Time\n\tcount int\n}\n\nfunc (ci *commandInfo) String() string {\n\t\/\/ Store the time in RFC3339 format for easy parsing\n\treturn fmt.Sprintf(\"%s%s%d\", ci.time.Format(time.RFC3339), \",\", ci.count)\n}\n\nfunc (ci *commandInfo) Update(ciString string) {\n\tinfo := strings.Split(ciString, \",\")\n\n\tcount, err := strconv.Atoi(info[1])\n\tif err != nil {\n\t\tcount = 0\n\t}\n\n\tci.time = time.Now()\n\tci.count = count + 1\n}\n\nfunc (ci *commandInfo) NewFromString(ciString string) *commandInfo {\n\tinfo := strings.Split(ciString, \",\")\n\n\t\/\/ Parse the time as RFC3339 format\n\tdate, err := time.Parse(time.RFC3339, info[0])\n\tif err != nil {\n\t\tdate = time.Now()\n\t}\n\n\tcount, err := strconv.Atoi(info[1])\n\tif err != nil {\n\t\tcount = 0\n\t}\n\n\tci.time = date\n\tci.count = count\n\n\treturn ci\n}\n\ntype byTime []*command\n\nfunc (s byTime) Len() int {\n\treturn len(s)\n}\n\nfunc (s byTime) Less(i, j int) bool {\n\treturn s[i].info.time.After(s[j].info.time)\n}\n\nfunc (s byTime) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\nfunc main() {\n\t\/\/ Setup flags\n\t\/\/ statsPtr := flag.Bool(\"stats\", false, \"show stats and usage of `r`\")\n\t\/\/ completePtr := flag.String(\"complete\", \"\", \"show all results for `r`\")\n\tcommandPtr := flag.Bool(\"command\", false, \"show last command selected from `r`\")\n\taddPtr := flag.String(\"add\", \"\", \"show stats and usage of `r`\")\n\tflag.Parse()\n\n\t\/\/ Setup bolt db\n\tusr, err := user.Current()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tboltPath := filepath.Join(usr.HomeDir, \".r.db\")\n\t\/\/ It will be created if it doesn't exist.\n\tdb, err = bolt.Open(boltPath, 0600, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer db.Close()\n\n\t\/\/ Check if `results` flag is passed\n\t\/\/ if *completePtr != \"\" {\n\t\/\/ \tresults := showResults(*completePtr)\n\t\/\/ \tfor _, result := range results {\n\t\/\/ \t\tfmt.Println(result)\n\t\/\/ \t}\n\t\/\/ \tos.Exit(0)\n\t\/\/ }\n\n\tif *commandPtr {\n\t\tprintLastCommand()\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ Check if `add` flag is passed\n\tif *addPtr != \"\" {\n\t\targs := strings.Split(*addPtr, \":\")\n\t\tif len(args) != 2 {\n\t\t\tfmt.Println(\"Could not add command.\")\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\terr := add(args[0], args[1])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ reset last command to blank\n\t\/\/ set line as stored command\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(\"command\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.Put([]byte(\"command\"), []byte(\"\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treadLine()\n}\n\n\/\/ readLine used the readline library create a prompt to\n\/\/ show the command history\nfunc readLine() {\n\t\/\/ create completer from results\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tresults, err := results(wd)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tvar pcItems []*readline.PrefixCompleter\n\tfor _, result := range results {\n\t\tpcItems = append(pcItems, readline.PcItem(result.name))\n\t}\n\tvar completer = readline.NewPrefixCompleter(pcItems...)\n\n\trl, err := readline.NewEx(&readline.Config{\n\t\tPrompt: \"r> \",\n\t\tAutoComplete: completer,\n\t})\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tdefer rl.Close()\n\n\tfor {\n\t\tline, err := rl.Readline()\n\t\tif err != nil { \/\/ io.EOF\n\t\t\tbreak\n\t\t}\n\n\t\tline = strings.TrimSpace(line)\n\n\t\tcmdNames := namesOfCmds(results)\n\n\t\t\/\/ Only execute if the command typed is in the list of results\n\t\tif !containsCmd(line, cmdNames) {\n\t\t\tfmt.Println(\"Command not found in `r` history.\")\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\t\/\/ The command was found and will be executed so add it to the DB to update\n\t\twd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error executing command.\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tadd(wd, line)\n\n\t\t\/\/ set line as stored command\n\t\terr = db.Update(func(tx *bolt.Tx) error {\n\t\t\tb, err := tx.CreateBucketIfNotExists([]byte(\"command\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\terr = b.Put([]byte(\"command\"), []byte(line))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error storing command.\")\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tos.Exit(0)\n\t}\n}\n\n\/\/ printLastCommand is used with the --command flag\n\/\/ it shows the last command selected from the readline prompt\nfunc printLastCommand() {\n\tvar val string\n\tdb.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(\"command\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tval = string(b.Get([]byte(\"command\")))\n\t\treturn nil\n\t})\n\n\tfmt.Println(val)\n}\n\n\/\/ showResults reads the boltdb and returns the command history\n\/\/ based on your current working directory\nfunc results(path string) ([]*command, error) {\n\t\/\/ dir := filepath.Dir(path)\n\n\t\/\/ results := []string{\"git status\", \"git clone\", \"go install\", \"cd \/Users\/jesse\/\", \"cd \/Users\/jesse\/gocode\/src\/github.com\/jesselucas\", \"ls -Glah\"}\n\tvar results []*command\n\terr := db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"DirectoryBucket\"))\n\t\tpathBucket := b.Bucket([]byte(path))\n\t\treturn pathBucket.ForEach(func(k, v []byte) error {\n\t\t\tcmd := new(command)\n\t\t\tci := new(commandInfo)\n\t\t\tcmd.name = string(k)\n\t\t\tcmd.info = ci.NewFromString(string(v))\n\n\t\t\tresults = append(results, cmd)\n\t\t\treturn nil\n\t\t})\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Sorty by last command first\n\tsort.Sort(byTime(results))\n\t\/\/ for _, cmd := range results {\n\t\/\/ \tfmt.Printf(\"%s: %s \\n\", cmd.name, cmd.info.time)\n\t\/\/ }\n\n\treturn results, nil\n\n\t\/\/ filter\n\t\/\/ var filtered []string\n\t\/\/ for _, result := range results {\n\t\/\/ \tif strings.HasPrefix(result, input) {\n\t\/\/ \t\tfiltered = append(filtered, result)\n\t\/\/ \t}\n\t\/\/ }\n\t\/\/\n\t\/\/ return filtered\n\n}\n\nfunc globalResults() ([]*command, error) {\n\t\/\/ Now get all the commands stored\n\tvar results []*command\n\terr := db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"CommandBucket\"))\n\t\terr := b.ForEach(func(k, v []byte) error {\n\t\t\tcommand := new(command)\n\t\t\tci := new(commandInfo)\n\t\t\tcommand.name = string(k)\n\t\t\tcommand.info = ci.NewFromString(string(v))\n\t\t\tresults = append(results, command)\n\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Sorty by last command first\n\tsort.Sort(byTime(results))\n\t\/\/ for _, cmd := range results {\n\t\/\/ \tfmt.Printf(\"%s: %s \\n\", cmd.name, cmd.info.time)\n\t\/\/ }\n\n\treturn results, nil\n}\n\n\/\/ add checks if command being passed is in the listCommands\n\/\/ then stores the command and workding directory\nfunc add(path string, promptCmd string) error {\n\t\/\/ get the first command in the promptCmd string\n\tcmd := strings.Split(promptCmd, \" \")[0]\n\n\t\/\/ Don't store if the command is r\n\tif cmd == \"r\" {\n\t\treturn nil\n\t}\n\n\tcommands, err := listCommands()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ check if the command is valid\n\tif !containsCmd(cmd, commands) {\n\t\treturn nil\n\t}\n\n\t\/\/ Add command to db\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\tdirectoryBucket, err := tx.CreateBucketIfNotExists([]byte(\"DirectoryBucket\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpathBucket, err := directoryBucket.CreateBucketIfNotExists([]byte(path))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Store path and command for contextual path sorting\n\t\tcmdBucket, err := tx.CreateBucketIfNotExists([]byte(\"CommandBucket\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Create commandInfo struct\n\t\tci := new(commandInfo)\n\t\tci.time = time.Now()\n\t\tci.count = 1\n\n\t\t\/\/ Check if there is a command info value already\n\t\tv := cmdBucket.Get([]byte(promptCmd))\n\t\tif v != nil {\n\t\t\t\/\/ There is a previous command info value\n\t\t\t\/\/ Let's update the count and time\n\t\t\tci.Update(string(v))\n\t\t}\n\n\t\terr = cmdBucket.Put([]byte(promptCmd), []byte(ci.String()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Now let's do the same thing for the pathBucket\n\t\tv = pathBucket.Get([]byte(promptCmd))\n\t\tif v != nil {\n\t\t\t\/\/ There is a previous command info value\n\t\t\t\/\/ Let's update the count and time\n\t\t\tci.Update(string(v))\n\t\t}\n\n\t\terr = pathBucket.Put([]byte(promptCmd), []byte(ci.String()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ now prune the older commands\n\terr = prune(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ prune deletes commands from a directory bucket and overall bucket\nfunc prune(path string) error {\n\t\/\/ TODO move this to an environment variable\n\tnumberToPruneDir := 20\n\tnumberToPruneGlobal := 100\n\tpruneGlobal := true\n\tprunePath := true\n\n\tresults, err := results(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(results) <= numberToPruneDir {\n\t\tprunePath = false\n\t}\n\n\t\/\/ List the global commands\n\tglobalResults, err := globalResults()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ set pruneGlobal to true if there isn't enough\n\tif len(globalResults) <= numberToPruneGlobal {\n\t\tpruneGlobal = false\n\t}\n\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\tif prunePath {\n\t\t\tdirectoryBucket, err := tx.CreateBucketIfNotExists([]byte(\"DirectoryBucket\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tpathBucket, err := directoryBucket.CreateBucketIfNotExists([]byte(path))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tpruneDirResults := results[numberToPruneDir:]\n\t\t\tfor _, cmd := range pruneDirResults {\n\t\t\t\tpathBucket.Delete([]byte(cmd.name))\n\t\t\t}\n\t\t}\n\t\t\/\/ Prune stored global commands\n\t\tif pruneGlobal {\n\t\t\t\/\/ Store path and command for contextual path sorting\n\t\t\tcmdBucket, err := tx.CreateBucketIfNotExists([]byte(\"CommandBucket\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tpruneGlobalResults := globalResults[numberToPruneGlobal:]\n\t\t\tfor _, cmd := range pruneGlobalResults {\n\t\t\t\tcmdBucket.Delete([]byte(cmd.name))\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ namesOfCmds takes a slice of command structs and return\n\/\/ a slice with just their names\nfunc namesOfCmds(cmds []*command) []string {\n\tvar names []string\n\tfor _, cmd := range cmds {\n\t\tnames = append(names, cmd.name)\n\t}\n\n\treturn names\n}\n\n\/\/ containsCmd checks if a command string is is in a slice of strings\nfunc containsCmd(cmd string, commands []string) bool {\n\tfor _, c := range commands {\n\t\t\/\/ check first command against list of commands\n\t\tif c == cmd {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ listCommands use $PATH to find directories\n\/\/ Then reads each directory and looks for executables\nfunc listCommands() ([]string, error) {\n\t\/\/ Split $PATH directories into slice\n\tpaths := strings.Split(os.Getenv(\"PATH\"), \":\")\n\tvar commands []string\n\n\t\/\/ created buffered error chan\n\terrc := make(chan error, 1)\n\n\t\/\/ sync go routines\n\tvar wg sync.WaitGroup\n\n\t\/\/ find commands appends results to commands slice\n\tfindCommands := func(p string) {\n\t\tdefer wg.Done()\n\n\t\tfiles, err := ioutil.ReadDir(p)\n\t\tif err != nil {\n\t\t\terrc <- err \/\/ write err into error chan\n\t\t\treturn\n\t\t}\n\n\t\tfor _, f := range files {\n\t\t\tm := f.Mode()\n\n\t\t\t\/\/ Check if file is executable\n\t\t\tif m&0111 != 0 {\n\t\t\t\tcommands = append(commands, f.Name())\n\t\t\t}\n\t\t}\n\n\t\terrc <- nil \/\/ write nil into error chan\n\t}\n\n\t\/\/ Check each path for commands\n\tfor _, p := range paths {\n\t\twg.Add(1)\n\t\tgo findCommands(p)\n\n\t\t\/\/ read any error that is in error chan\n\t\tif err := <-errc; err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\twg.Wait() \/\/ Wait for the paths to be checked\n\n\treturn commands, nil\n}\n\n\/\/ stats TODO print stats and usage of r\nfunc stats() {\n\tfmt.Println(\"stats\")\n}\n<commit_msg>adding todo<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/chzyer\/readline\"\n)\n\nvar db *bolt.DB\n\ntype command struct {\n\tname string\n\tinfo *commandInfo\n}\n\n\/\/ commandInfo struct is stored as the value to commands\ntype commandInfo struct {\n\ttime time.Time\n\tcount int\n}\n\nfunc (ci *commandInfo) String() string {\n\t\/\/ Store the time in RFC3339 format for easy parsing\n\treturn fmt.Sprintf(\"%s%s%d\", ci.time.Format(time.RFC3339), \",\", ci.count)\n}\n\nfunc (ci *commandInfo) Update(ciString string) {\n\tinfo := strings.Split(ciString, \",\")\n\n\tcount, err := strconv.Atoi(info[1])\n\tif err != nil {\n\t\tcount = 0\n\t}\n\n\tci.time = time.Now()\n\tci.count = count + 1\n}\n\nfunc (ci *commandInfo) NewFromString(ciString string) *commandInfo {\n\tinfo := strings.Split(ciString, \",\")\n\n\t\/\/ Parse the time as RFC3339 format\n\tdate, err := time.Parse(time.RFC3339, info[0])\n\tif err != nil {\n\t\tdate = time.Now()\n\t}\n\n\tcount, err := strconv.Atoi(info[1])\n\tif err != nil {\n\t\tcount = 0\n\t}\n\n\tci.time = date\n\tci.count = count\n\n\treturn ci\n}\n\ntype byTime []*command\n\nfunc (s byTime) Len() int {\n\treturn len(s)\n}\n\nfunc (s byTime) Less(i, j int) bool {\n\treturn s[i].info.time.After(s[j].info.time)\n}\n\nfunc (s byTime) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\nfunc main() {\n\t\/\/ Setup flags\n\t\/\/ statsPtr := flag.Bool(\"stats\", false, \"show stats and usage of `r`\")\n\t\/\/ completePtr := flag.String(\"complete\", \"\", \"show all results for `r`\")\n\tcommandPtr := flag.Bool(\"command\", false, \"show last command selected from `r`\")\n\taddPtr := flag.String(\"add\", \"\", \"show stats and usage of `r`\")\n\tflag.Parse()\n\n\t\/\/ Setup bolt db\n\tusr, err := user.Current()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tboltPath := filepath.Join(usr.HomeDir, \".r.db\")\n\t\/\/ It will be created if it doesn't exist.\n\tdb, err = bolt.Open(boltPath, 0600, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer db.Close()\n\n\t\/\/ Check if `results` flag is passed\n\t\/\/ if *completePtr != \"\" {\n\t\/\/ \tresults := showResults(*completePtr)\n\t\/\/ \tfor _, result := range results {\n\t\/\/ \t\tfmt.Println(result)\n\t\/\/ \t}\n\t\/\/ \tos.Exit(0)\n\t\/\/ }\n\n\tif *commandPtr {\n\t\tprintLastCommand()\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ TODO add a global flag to see all command history\n\n\t\/\/ Check if `add` flag is passed\n\tif *addPtr != \"\" {\n\t\targs := strings.Split(*addPtr, \":\")\n\t\tif len(args) != 2 {\n\t\t\tfmt.Println(\"Could not add command.\")\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\terr := add(args[0], args[1])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ reset last command to blank\n\t\/\/ set line as stored command\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(\"command\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.Put([]byte(\"command\"), []byte(\"\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treadLine()\n}\n\n\/\/ readLine used the readline library create a prompt to\n\/\/ show the command history\nfunc readLine() {\n\t\/\/ create completer from results\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tresults, err := results(wd)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tvar pcItems []*readline.PrefixCompleter\n\tfor _, result := range results {\n\t\tpcItems = append(pcItems, readline.PcItem(result.name))\n\t}\n\tvar completer = readline.NewPrefixCompleter(pcItems...)\n\n\trl, err := readline.NewEx(&readline.Config{\n\t\tPrompt: \"r> \",\n\t\tAutoComplete: completer,\n\t})\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tdefer rl.Close()\n\n\tfor {\n\t\tline, err := rl.Readline()\n\t\tif err != nil { \/\/ io.EOF\n\t\t\tbreak\n\t\t}\n\n\t\tline = strings.TrimSpace(line)\n\n\t\tcmdNames := namesOfCmds(results)\n\n\t\t\/\/ Only execute if the command typed is in the list of results\n\t\tif !containsCmd(line, cmdNames) {\n\t\t\tfmt.Println(\"Command not found in `r` history.\")\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\t\/\/ The command was found and will be executed so add it to the DB to update\n\t\twd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error executing command.\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tadd(wd, line)\n\n\t\t\/\/ set line as stored command\n\t\terr = db.Update(func(tx *bolt.Tx) error {\n\t\t\tb, err := tx.CreateBucketIfNotExists([]byte(\"command\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\terr = b.Put([]byte(\"command\"), []byte(line))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error storing command.\")\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tos.Exit(0)\n\t}\n}\n\n\/\/ printLastCommand is used with the --command flag\n\/\/ it shows the last command selected from the readline prompt\nfunc printLastCommand() {\n\tvar val string\n\tdb.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(\"command\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tval = string(b.Get([]byte(\"command\")))\n\t\treturn nil\n\t})\n\n\tfmt.Println(val)\n}\n\n\/\/ showResults reads the boltdb and returns the command history\n\/\/ based on your current working directory\nfunc results(path string) ([]*command, error) {\n\t\/\/ dir := filepath.Dir(path)\n\n\t\/\/ results := []string{\"git status\", \"git clone\", \"go install\", \"cd \/Users\/jesse\/\", \"cd \/Users\/jesse\/gocode\/src\/github.com\/jesselucas\", \"ls -Glah\"}\n\tvar results []*command\n\terr := db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"DirectoryBucket\"))\n\t\tpathBucket := b.Bucket([]byte(path))\n\t\treturn pathBucket.ForEach(func(k, v []byte) error {\n\t\t\tcmd := new(command)\n\t\t\tci := new(commandInfo)\n\t\t\tcmd.name = string(k)\n\t\t\tcmd.info = ci.NewFromString(string(v))\n\n\t\t\tresults = append(results, cmd)\n\t\t\treturn nil\n\t\t})\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Sorty by last command first\n\tsort.Sort(byTime(results))\n\t\/\/ for _, cmd := range results {\n\t\/\/ \tfmt.Printf(\"%s: %s \\n\", cmd.name, cmd.info.time)\n\t\/\/ }\n\n\treturn results, nil\n\n\t\/\/ filter\n\t\/\/ var filtered []string\n\t\/\/ for _, result := range results {\n\t\/\/ \tif strings.HasPrefix(result, input) {\n\t\/\/ \t\tfiltered = append(filtered, result)\n\t\/\/ \t}\n\t\/\/ }\n\t\/\/\n\t\/\/ return filtered\n\n}\n\nfunc globalResults() ([]*command, error) {\n\t\/\/ Now get all the commands stored\n\tvar results []*command\n\terr := db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"CommandBucket\"))\n\t\terr := b.ForEach(func(k, v []byte) error {\n\t\t\tcommand := new(command)\n\t\t\tci := new(commandInfo)\n\t\t\tcommand.name = string(k)\n\t\t\tcommand.info = ci.NewFromString(string(v))\n\t\t\tresults = append(results, command)\n\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Sorty by last command first\n\tsort.Sort(byTime(results))\n\t\/\/ for _, cmd := range results {\n\t\/\/ \tfmt.Printf(\"%s: %s \\n\", cmd.name, cmd.info.time)\n\t\/\/ }\n\n\treturn results, nil\n}\n\n\/\/ add checks if command being passed is in the listCommands\n\/\/ then stores the command and workding directory\nfunc add(path string, promptCmd string) error {\n\t\/\/ get the first command in the promptCmd string\n\tcmd := strings.Split(promptCmd, \" \")[0]\n\n\t\/\/ Don't store if the command is r\n\tif cmd == \"r\" {\n\t\treturn nil\n\t}\n\n\tcommands, err := listCommands()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ check if the command is valid\n\tif !containsCmd(cmd, commands) {\n\t\treturn nil\n\t}\n\n\t\/\/ Add command to db\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\tdirectoryBucket, err := tx.CreateBucketIfNotExists([]byte(\"DirectoryBucket\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpathBucket, err := directoryBucket.CreateBucketIfNotExists([]byte(path))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Store path and command for contextual path sorting\n\t\tcmdBucket, err := tx.CreateBucketIfNotExists([]byte(\"CommandBucket\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Create commandInfo struct\n\t\tci := new(commandInfo)\n\t\tci.time = time.Now()\n\t\tci.count = 1\n\n\t\t\/\/ Check if there is a command info value already\n\t\tv := cmdBucket.Get([]byte(promptCmd))\n\t\tif v != nil {\n\t\t\t\/\/ There is a previous command info value\n\t\t\t\/\/ Let's update the count and time\n\t\t\tci.Update(string(v))\n\t\t}\n\n\t\terr = cmdBucket.Put([]byte(promptCmd), []byte(ci.String()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Now let's do the same thing for the pathBucket\n\t\tv = pathBucket.Get([]byte(promptCmd))\n\t\tif v != nil {\n\t\t\t\/\/ There is a previous command info value\n\t\t\t\/\/ Let's update the count and time\n\t\t\tci.Update(string(v))\n\t\t}\n\n\t\terr = pathBucket.Put([]byte(promptCmd), []byte(ci.String()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ now prune the older commands\n\terr = prune(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ prune deletes commands from a directory bucket and overall bucket\nfunc prune(path string) error {\n\t\/\/ TODO move this to an environment variable\n\tnumberToPruneDir := 20\n\tnumberToPruneGlobal := 100\n\tpruneGlobal := true\n\tprunePath := true\n\n\tresults, err := results(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(results) <= numberToPruneDir {\n\t\tprunePath = false\n\t}\n\n\t\/\/ List the global commands\n\tglobalResults, err := globalResults()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ set pruneGlobal to true if there isn't enough\n\tif len(globalResults) <= numberToPruneGlobal {\n\t\tpruneGlobal = false\n\t}\n\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\tif prunePath {\n\t\t\tdirectoryBucket, err := tx.CreateBucketIfNotExists([]byte(\"DirectoryBucket\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tpathBucket, err := directoryBucket.CreateBucketIfNotExists([]byte(path))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tpruneDirResults := results[numberToPruneDir:]\n\t\t\tfor _, cmd := range pruneDirResults {\n\t\t\t\tpathBucket.Delete([]byte(cmd.name))\n\t\t\t}\n\t\t}\n\t\t\/\/ Prune stored global commands\n\t\tif pruneGlobal {\n\t\t\t\/\/ Store path and command for contextual path sorting\n\t\t\tcmdBucket, err := tx.CreateBucketIfNotExists([]byte(\"CommandBucket\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tpruneGlobalResults := globalResults[numberToPruneGlobal:]\n\t\t\tfor _, cmd := range pruneGlobalResults {\n\t\t\t\tcmdBucket.Delete([]byte(cmd.name))\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ namesOfCmds takes a slice of command structs and return\n\/\/ a slice with just their names\nfunc namesOfCmds(cmds []*command) []string {\n\tvar names []string\n\tfor _, cmd := range cmds {\n\t\tnames = append(names, cmd.name)\n\t}\n\n\treturn names\n}\n\n\/\/ containsCmd checks if a command string is is in a slice of strings\nfunc containsCmd(cmd string, commands []string) bool {\n\tfor _, c := range commands {\n\t\t\/\/ check first command against list of commands\n\t\tif c == cmd {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ listCommands use $PATH to find directories\n\/\/ Then reads each directory and looks for executables\nfunc listCommands() ([]string, error) {\n\t\/\/ Split $PATH directories into slice\n\tpaths := strings.Split(os.Getenv(\"PATH\"), \":\")\n\tvar commands []string\n\n\t\/\/ created buffered error chan\n\terrc := make(chan error, 1)\n\n\t\/\/ sync go routines\n\tvar wg sync.WaitGroup\n\n\t\/\/ find commands appends results to commands slice\n\tfindCommands := func(p string) {\n\t\tdefer wg.Done()\n\n\t\tfiles, err := ioutil.ReadDir(p)\n\t\tif err != nil {\n\t\t\terrc <- err \/\/ write err into error chan\n\t\t\treturn\n\t\t}\n\n\t\tfor _, f := range files {\n\t\t\tm := f.Mode()\n\n\t\t\t\/\/ Check if file is executable\n\t\t\tif m&0111 != 0 {\n\t\t\t\tcommands = append(commands, f.Name())\n\t\t\t}\n\t\t}\n\n\t\terrc <- nil \/\/ write nil into error chan\n\t}\n\n\t\/\/ Check each path for commands\n\tfor _, p := range paths {\n\t\twg.Add(1)\n\t\tgo findCommands(p)\n\n\t\t\/\/ read any error that is in error chan\n\t\tif err := <-errc; err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\twg.Wait() \/\/ Wait for the paths to be checked\n\n\treturn commands, nil\n}\n\n\/\/ stats TODO print stats and usage of r\nfunc stats() {\n\tfmt.Println(\"stats\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"github.com\/leptonyu\/goeast\/db\"\n\t\"github.com\/leptonyu\/goeast\/util\"\n\t\"log\"\n\t\"os\"\n)\n\nfunc main() {\n\tport := flag.Int(\"port\", 8080, \"Web service port\")\n\tapi := flag.String(\"api\", \"api\", \"http:\/\/localhost\/$api,\\n\tAlso use as database name with prefix wechat_\")\n\tappid := flag.String(\"appid\", \"\", \"App id\")\n\tsecret := flag.String(\"secret\", \"\", \"App secret\")\n\ttoken := flag.String(\"token\", \"\", \"Token\")\n\tinit := flag.Bool(\"init\", false, \"Init \")\n\thelp := flag.Bool(\"h\", false, \"Help\")\n\tflag.Parse()\n\tif *help {\n\t\tflag.PrintDefaults()\n\t\tos.Exit(0)\n\t}\n\tconfig, err := db.NewDBConfig(*api)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t\tos.Exit(1)\n\t}\n\tif *init {\n\t\terr := config.Init(*appid, *secret, *token)\n\t\tif err != nil {\n\t\t\tlog.Panicln(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfunc(keys ...string) {\n\t\t\tfor _, key := range keys {\n\t\t\t\tlog.Println(\"Fetching web \" + key)\n\t\t\t\tconfig.UpdateMsg(key)\n\t\t\t}\n\t\t}(db.Blog, db.Events,\n\t\t\tdb.Home,\n\t\t\tdb.Campus,\n\t\t\tdb.Contact,\n\t\t\tdb.Galleries,\n\t\t\tdb.One2one,\n\t\t\tdb.Online,\n\t\t\tdb.Onsite,\n\t\t\tdb.Teachers,\n\t\t\tdb.Testimonials)\n\t\tos.Exit(0)\n\t}\n\tutil.StartWeb(*port, config)\n}\n<commit_msg>update<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"github.com\/leptonyu\/goeast\/db\"\n\t\"github.com\/leptonyu\/goeast\/util\"\n\t\"log\"\n\t\"os\"\n)\n\nfunc main() {\n\tport := flag.Int(\"port\", 8080, \"Web service port\")\n\tapi := flag.String(\"api\", \"api\", \"http:\/\/localhost\/$api,\\n\tAlso use as database name with prefix wechat_\")\n\tappid := flag.String(\"appid\", \"\", \"App id\")\n\tsecret := flag.String(\"secret\", \"\", \"App secret\")\n\ttoken := flag.String(\"token\", \"\", \"Token\")\n\tinit := flag.Bool(\"init\", false, \"Init \")\n\thelp := flag.Bool(\"h\", false, \"Help\")\n\tflag.Parse()\n\tif *help {\n\t\tflag.PrintDefaults()\n\t\tos.Exit(0)\n\t}\n\tconfig, err := db.NewDBConfig(*api)\n\tif *init {\n\t\terr := config.Init(*appid, *secret, *token)\n\t\tif err != nil {\n\t\t\tlog.Panicln(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfunc(keys ...string) {\n\t\t\tfor _, key := range keys {\n\t\t\t\tlog.Println(\"Fetching web \" + key)\n\t\t\t\tconfig.UpdateMsg(key)\n\t\t\t}\n\t\t}(db.Blog, db.Events,\n\t\t\tdb.Home,\n\t\t\tdb.Campus,\n\t\t\tdb.Contact,\n\t\t\tdb.Galleries,\n\t\t\tdb.One2one,\n\t\t\tdb.Online,\n\t\t\tdb.Onsite,\n\t\t\tdb.Teachers,\n\t\t\tdb.Testimonials)\n\t\tos.Exit(0)\n\t}\n\n\tif err != nil {\n\t\tlog.Panic(err)\n\t\tos.Exit(1)\n\t}\n\n\tutil.StartWeb(*port, config)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/md5\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\n\t\"github.com\/mattn\/go-colorable\"\n\t\"github.com\/tonnerre\/golang-pretty\"\n\n\t\"github.com\/alecthomas\/kingpin\"\n\t\"gopkg.in\/inconshreveable\/log15.v2\"\n\n\t\"github.com\/rightscale\/rsc\/cm15\"\n\t\"github.com\/rightscale\/rsc\/log\"\n\t\"github.com\/rightscale\/rsc\/rsapi\"\n)\n\nvar (\n\tapp = kingpin.New(\"right_st\", \"A command-line application for managing RightScripts\")\n\tversion = app.Flag(\"version\", \"Print version\").Short('v').Bool()\n\tdebug = app.Flag(\"debug\", \"Debug mode\").Short('d').Bool()\n\tconfigFile = app.Flag(\"config\", \"Set the config file path.\").Short('c').Default(defaultConfigFile()).String()\n\tenvironment = app.Flag(\"environment\", \"Set the RightScale login environment.\").Short('e').String()\n\n\trightScript = app.Command(\"rightscript\", \"RightScript stuff\")\n\n\trightScriptList = rightScript.Command(\"list\", \"List RightScripts\")\n\trightScriptListFilter = rightScriptList.Arg(\"filter\", \"Filter by name\").Required().String()\n\n\trightScriptUpload = rightScript.Command(\"upload\", \"Upload a RightScript\")\n\trightScriptUploadPaths = rightScriptUpload.Arg(\"path\", \"File or directory containing script files to upload\").Required().ExistingFilesOrDirs()\n\trightScriptUploadForce = rightScriptUpload.Flag(\"force\", \"Force upload of file if metadata is not present\").Bool()\n\n\trightScriptDownload = rightScript.Command(\"download\", \"Download a RightScript to a file or files\")\n\trightScriptDownloadNameOrHref = rightScriptDownload.Arg(\"name_or_href\", \"Script Name or Href\").Required().String()\n\trightScriptDownloadTo = rightScriptDownload.Arg(\"path\", \"Download location\").String()\n\n\trightScriptScaffold = rightScript.Command(\"scaffold\", \"Add RightScript YAML metadata comments to a file or files\")\n\trightScriptScaffoldPaths = rightScriptScaffold.Arg(\"path\", \"File or directory to set metadata for\").Required().ExistingFilesOrDirs()\n\n\trightScriptValidate = rightScript.Command(\"validate\", \"Validate RightScript YAML metadata comments in a file or files\")\n\trightScriptValidatePaths = rightScriptValidate.Arg(\"path\", \"Path to script file or directory containing script files\").Required().ExistingFilesOrDirs()\n)\n\nfunc main() {\n\tapp.HelpFlag.Short('h')\n\tcommand := kingpin.MustParse(app.Parse(os.Args[1:]))\n\n\terr := readConfig(*configFile, *environment)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s: Error reading config file: %s\\n\", filepath.Base(os.Args[0]), err)\n\t\tos.Exit(1)\n\t}\n\tclient := config.environment.Client15()\n\n\t\/\/ Handle logging\n\thandler := log15.StreamHandler(colorable.NewColorableStdout(), log15.TerminalFormat())\n\tlog15.Root().SetHandler(handler)\n\tif *debug {\n\t\tlog.Logger.SetHandler(handler)\n\t}\n\tapp.Writer(os.Stdout)\n\n\tswitch command {\n\tcase rightScriptList.FullCommand():\n\t\trightscriptLocator := client.RightScriptLocator(\"\/api\/right_scripts\")\n\t\tvar apiParams = rsapi.APIParams{\"filter\": []string{\"name==\" + *rightScriptListFilter}}\n\t\tfmt.Printf(\"LIST %s:\\n\", *rightScriptListFilter)\n\t\trightscripts, err := rightscriptLocator.Index(\n\t\t\tapiParams,\n\t\t)\n\t\tif err != nil {\n\t\t\tfatalError(\"%#v\", err)\n\t\t}\n\t\tfor _, rs := range rightscripts {\n\t\t\tfmt.Printf(\"\/api\/right_scripts\/%s %s\\n\", rs.Id, rs.Name)\n\t\t}\n\tcase rightScriptUpload.FullCommand():\n\t\t\/\/ Pass 1, perform validations, gather up results\n\t\tscripts := []RightScript{}\n\t\tpaths, err := walkPaths(rightScriptUploadPaths)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s: %s\\n\", filepath.Base(os.Args[0]), err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfor _, path := range paths {\n\t\t\tfmt.Printf(\"Uploading %s:\", path)\n\t\t\tf, err := os.Open(path)\n\t\t\tif err != nil {\n\t\t\t\tfatalError(\"Cannot open %s\", path)\n\t\t\t}\n\t\t\tdefer f.Close()\n\t\t\tmetadata, err := ParseRightScriptMetadata(f)\n\n\t\t\tif err != nil {\n\t\t\t\tif !*rightScriptUploadForce {\n\t\t\t\t\tfatalError(\"No embedded metadata for %s. Use --force to upload anyways.\", path)\n\t\t\t\t}\n\t\t\t}\n\t\t\tscript := RightScript{\"\", path, metadata}\n\t\t\tscripts = append(scripts, script)\n\t\t}\n\n\t\t\/\/ Pass 2, upload\n\t\tfor _, script := range scripts {\n\t\t\terr = script.Push()\n\t\t\tfmt.Println(err)\n\t\t}\n\tcase rightScriptDownload.FullCommand():\n\t\trsIdMatch := regexp.MustCompile(`^\\d+$`)\n\t\trsHrefMatch := regexp.MustCompile(`^\/api\/right_scripts\/\\d+$`)\n\n\t\tvar href string\n\n\t\tif rsIdMatch.Match([]byte(*rightScriptDownloadNameOrHref)) {\n\t\t\thref = fmt.Sprintf(\"\/api\/right_scripts\/%s\", *rightScriptDownloadNameOrHref)\n\t\t} else if rsHrefMatch.Match([]byte(*rightScriptDownloadNameOrHref)) {\n\t\t\thref = *rightScriptDownloadNameOrHref\n\t\t} else {\n\t\t\trightscriptLocator := client.RightScriptLocator(\"\/api\/right_scripts\")\n\t\t\tapiParams := rsapi.APIParams{\"filter\": []string{\"name==\" + *rightScriptDownloadNameOrHref}}\n\t\t\trightscripts, err := rightscriptLocator.Index(apiParams)\n\t\t\tif err != nil {\n\t\t\t\tfatalError(\"%s\", err.Error())\n\t\t\t}\n\t\t\tfoundId := \"\"\n\t\t\tfor _, rs := range rightscripts {\n\t\t\t\t\/\/fmt.Printf(\"%#v\\n\", rs)\n\t\t\t\t\/\/ Recheck the name here, filter does a impartial match and we need an exact one\n\t\t\t\t\/\/ TODO, do first pass for head revisions only, second for non-heads?\n\t\t\t\tif rs.Name == *rightScriptDownloadNameOrHref && rs.Revision == 0 {\n\t\t\t\t\tif foundId != \"\" {\n\t\t\t\t\t\tfatalError(\"Error, matched multiple RightScripts with the same name. Don't know which one to download. Please delete one or specify an HREF to download such as \/api\/right_scripts\/%d\", rs.Id)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfoundId = rs.Id\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif foundId == \"\" {\n\t\t\t\tfatalError(\"Found no RightScripts matching %s\", *rightScriptDownloadNameOrHref)\n\t\t\t}\n\t\t\thref = fmt.Sprintf(\"\/api\/right_scripts\/%s\", foundId)\n\t\t}\n\n\t\trightscriptLocator := client.RightScriptLocator(href)\n\t\t\/\/ attachmentsLocator := client.RightScriptLocator(fmt.Sprintf(\"%s\/attachments\", href))\n\t\t\/\/ sourceLocator := client.RightScriptLocator(fmt.Sprintf(\"%s\/source\", href))\n\n\t\trightscript, err1 := rightscriptLocator.Show()\n\t\tsource, err2 := GetSource(rightscriptLocator)\n\n\t\t\/\/ attachments, err2 := attachmentsLocator.Index(rsapi.APIParams{})\n\t\tfmt.Printf(\"Found %#v -- %v\\n\", rightscript, err1)\n\t\tfmt.Printf(\"Source %s -- %v\\n\", source, err2)\n\n\t\tif *rightScriptDownloadTo == \"\" {\n\t\t\t*rightScriptDownloadTo = rightscript.Name\n\t\t}\n\t\tfmt.Printf(\"Attempting to download '%s' to %s\\n\", rightscript.Name, *rightScriptDownloadTo)\n\t\terr = ioutil.WriteFile(*rightScriptDownloadTo, source, 0755)\n\t\tif err != nil {\n\t\t\tfatalError(\"Could not create file: %s\", err.Error())\n\t\t}\n\n\tcase rightScriptScaffold.FullCommand():\n\t\tpaths, err := walkPaths(rightScriptScaffoldPaths)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s: %s\\n\", filepath.Base(os.Args[0]), err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfor _, path := range paths {\n\t\t\terr = AddRightScriptMetadata(path)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%s: %s\\n\", filepath.Base(os.Args[0]), err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\tcase rightScriptValidate.FullCommand():\n\t\tpaths, err := walkPaths(rightScriptValidatePaths)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s: %s\\n\", filepath.Base(os.Args[0]), err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\terr_encountered := false\n\n\t\tfor _, path := range paths {\n\t\t\terr = validateRightScript(path)\n\t\t\tif err != nil {\n\t\t\t\terr_encountered = true\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%s - %s: %s\\n\", path, filepath.Base(os.Args[0]), err)\n\t\t\t}\n\t\t}\n\t\tif err_encountered {\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n\n\/\/ Turn a mixed array of directories and files into a linear list of files\nfunc walkPaths(paths *[]string) ([]string, error) {\n\tfiles := []string{}\n\tfor _, path := range *paths {\n\t\tinfo, err := os.Stat(path)\n\t\tif err != nil {\n\t\t\treturn files, err\n\t\t}\n\t\tif info.IsDir() {\n\t\t\terr = filepath.Walk(path, func(p string, f os.FileInfo, err error) error {\n\t\t\t\tfiles = append(files, p)\n\t\t\t\t_, e := os.Stat(p)\n\t\t\t\treturn e\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn files, err\n\t\t\t}\n\t\t} else {\n\t\t\tfiles = append(files, path)\n\t\t}\n\t}\n\treturn files, nil\n\n}\n\n\/\/ Crappy workaround. RSC doesn't return the body of the http request which contains\n\/\/ the script source, so do the same lower level calls it does to get it.\nfunc GetSource(loc *cm15.RightScriptLocator) (respBody []byte, err error) {\n\tvar params rsapi.APIParams\n\tvar p rsapi.APIParams\n\tAPIVersion := \"1.5\"\n\tclient := config.environment.Client15()\n\n\turi, err := loc.ActionPath(\"RightScript\", \"show_source\")\n\tif err != nil {\n\t\treturn respBody, err\n\t}\n\treq, err := client.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p)\n\tif err != nil {\n\t\treturn respBody, err\n\t}\n\tresp, err := client.PerformRequest(req)\n\tif err != nil {\n\t\treturn respBody, err\n\t}\n\tdefer resp.Body.Close()\n\trespBody, _ = ioutil.ReadAll(resp.Body)\n\n\tif resp.StatusCode < 200 || resp.StatusCode > 299 {\n\t\treturn respBody, fmt.Errorf(\"invalid response %s: %s\", resp.Status, string(respBody))\n\t}\n\treturn respBody, nil\n}\n\ntype RightScript struct {\n\tHref string\n\tPath string\n\tMetadata *RightScriptMetadata\n}\n\nfunc (r *RightScript) Push() error {\n\tclient := config.environment.Client15()\n\trightscriptLocator := client.RightScriptLocator(\"\/api\/right_scripts\")\n\tapiParams := rsapi.APIParams{\"filter\": []string{\"name==\" + r.Metadata.Name}}\n\trightscripts, err := rightscriptLocator.Index(apiParams)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfoundId := \"\"\n\tfor _, rs := range rightscripts {\n\t\t\/\/fmt.Printf(\"%#v\\n\", rs)\n\t\t\/\/ Recheck the name here, filter does a impartial match and we need an exact one\n\t\tif rs.Name == r.Metadata.Name && rs.Revision == 0 {\n\t\t\tif foundId != \"\" {\n\t\t\t\tfatalError(\"Error, matched multiple RightScripts with the same name, please delete one: %d %d\", rs.Id, foundId)\n\t\t\t} else {\n\t\t\t\tfoundId = rs.Id\n\t\t\t}\n\t\t}\n\t}\n\n\tpathSrc, err := ioutil.ReadFile(r.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif foundId == \"\" {\n\t\tfmt.Printf(\"Creating a new RightScript named '%s' from %s\\n\", r.Metadata.Name, r.Path)\n\t\t\/\/ New one, perform create call\n\t\tparams := cm15.RightScriptParam2{\n\t\t\tName: r.Metadata.Name,\n\t\t\tDescription: r.Metadata.Description,\n\t\t\tSource: string(pathSrc),\n\t\t}\n\t\t\/\/rightscriptLocator = client.RightScriptLocator(fmt.Sprintf(\"\/api\/right_scripts\", foundId))\n\t\tlocator, err := rightscriptLocator.Create(¶ms)\n\t\tfmt.Println(locator, err)\n\t\treturn err\n\t} else {\n\t\t\/\/ apiParams = rsapi.APIParams{\n\t\t\/\/ \t\"Name\": r.Metadata.Name,\n\t\t\/\/ \t\"Description\": r.Metadata.Description,\n\t\t\/\/ \t\"Source\": string(pathSrc),\n\t\t\/\/ }\n\t\tparams := cm15.RightScriptParam3{\n\t\t\tName: r.Metadata.Name,\n\t\t\tDescription: r.Metadata.Description,\n\t\t\tSource: string(pathSrc),\n\t\t}\n\t\trightscriptLocator = client.RightScriptLocator(fmt.Sprintf(\"\/api\/right_scripts\/%s\", foundId))\n\t\terr = rightscriptLocator.Update(¶ms)\n\t\tfmt.Println(err)\n\t\treturn err\n\t\t\/\/ Found existing, do an update\n\t}\n}\n\nfunc fatalError(format string, v ...interface{}) {\n\tmsg := fmt.Sprintf(format, v)\n\tfmt.Println(msg)\n\tos.Exit(1)\n}\n\nfunc validateRightScript(path string) error {\n\tscript, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer script.Close()\n\n\tmetadata, err := ParseRightScriptMetadata(script)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif *debug {\n\t\tpretty.Println(metadata)\n\t}\n\tfmt.Printf(\"%s - valid metadata\\n\", path)\n\n\tfor _, attachment := range metadata.Attachments {\n\t\tmd5, err := md5Attachment(path, attachment)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(attachment, md5)\n\t}\n\n\treturn nil\n}\n\nfunc md5Attachment(script, attachment string) (string, error) {\n\tpath := filepath.Join(filepath.Dir(script), attachment)\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer file.Close()\n\thash := md5.New()\n\n\t_, err = io.Copy(hash, file)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn hex.EncodeToString(hash.Sum(nil)), nil\n}\n<commit_msg>Improve debug handling and general cleanup<commit_after>package main\n\nimport (\n\t\"crypto\/md5\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/mattn\/go-colorable\"\n\t\"github.com\/tonnerre\/golang-pretty\"\n\n\t\"github.com\/alecthomas\/kingpin\"\n\t\"gopkg.in\/inconshreveable\/log15.v2\"\n\n\t\"github.com\/rightscale\/rsc\/cm15\"\n\t\"github.com\/rightscale\/rsc\/httpclient\"\n\t\"github.com\/rightscale\/rsc\/log\"\n\t\"github.com\/rightscale\/rsc\/rsapi\"\n)\n\nvar (\n\tapp = kingpin.New(\"right_st\", \"A command-line application for managing RightScripts\")\n\tversion = app.Flag(\"version\", \"Print version\").Short('v').Bool()\n\tdebug = app.Flag(\"debug\", \"Debug mode\").Short('d').Bool()\n\tconfigFile = app.Flag(\"config\", \"Set the config file path.\").Short('c').Default(defaultConfigFile()).String()\n\tenvironment = app.Flag(\"environment\", \"Set the RightScale login environment.\").Short('e').String()\n\n\trightScript = app.Command(\"rightscript\", \"RightScript stuff\")\n\n\trightScriptList = rightScript.Command(\"list\", \"List RightScripts\")\n\trightScriptListFilter = rightScriptList.Arg(\"filter\", \"Filter by name\").Required().String()\n\n\trightScriptUpload = rightScript.Command(\"upload\", \"Upload a RightScript\")\n\trightScriptUploadPaths = rightScriptUpload.Arg(\"path\", \"File or directory containing script files to upload\").Required().ExistingFilesOrDirs()\n\trightScriptUploadForce = rightScriptUpload.Flag(\"force\", \"Force upload of file if metadata is not present\").Bool()\n\n\trightScriptDownload = rightScript.Command(\"download\", \"Download a RightScript to a file or files\")\n\trightScriptDownloadNameOrHref = rightScriptDownload.Arg(\"name_or_href\", \"Script Name or Href\").Required().String()\n\trightScriptDownloadTo = rightScriptDownload.Arg(\"path\", \"Download location\").String()\n\n\trightScriptScaffold = rightScript.Command(\"scaffold\", \"Add RightScript YAML metadata comments to a file or files\")\n\trightScriptScaffoldPaths = rightScriptScaffold.Arg(\"path\", \"File or directory to set metadata for\").Required().ExistingFilesOrDirs()\n\n\trightScriptValidate = rightScript.Command(\"validate\", \"Validate RightScript YAML metadata comments in a file or files\")\n\trightScriptValidatePaths = rightScriptValidate.Arg(\"path\", \"Path to script file or directory containing script files\").Required().ExistingFilesOrDirs()\n)\n\nfunc main() {\n\tapp.HelpFlag.Short('h')\n\tcommand := kingpin.MustParse(app.Parse(os.Args[1:]))\n\n\terr := readConfig(*configFile, *environment)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s: Error reading config file: %s\\n\", filepath.Base(os.Args[0]), err)\n\t\tos.Exit(1)\n\t}\n\tclient := config.environment.Client15()\n\n\t\/\/ Handle logging\n\tlogLevel := log15.LvlInfo\n\n\tif *debug {\n\t\tlog.Logger.SetHandler(\n\t\t\tlog15.LvlFilterHandler(\n\t\t\t\tlog15.LvlDebug,\n\t\t\t\tlog15.StderrHandler))\n\t\thttpclient.DumpFormat = httpclient.Debug\n\t\tlogLevel = log15.LvlDebug\n\t}\n\thandler := log15.LvlFilterHandler(logLevel, log15.StreamHandler(colorable.NewColorableStdout(), log15.TerminalFormat()))\n\tlog15.Root().SetHandler(handler)\n\tapp.Writer(os.Stdout)\n\n\tswitch command {\n\tcase rightScriptList.FullCommand():\n\t\trightscriptLocator := client.RightScriptLocator(\"\/api\/right_scripts\")\n\t\tvar apiParams = rsapi.APIParams{\"filter\": []string{\"name==\" + *rightScriptListFilter}}\n\t\tfmt.Printf(\"Listing %s:\\n\", *rightScriptListFilter)\n\t\t\/\/log15.Info(\"Listing\", \"RightScript\", *rightScriptListFilter)\n\t\trightscripts, err := rightscriptLocator.Index(apiParams)\n\t\tif err != nil {\n\t\t\tfatalError(\"%s\", err.Error())\n\t\t}\n\t\tfor _, rs := range rightscripts {\n\t\t\trev := \"HEAD\"\n\t\t\tif rs.Revision != 0 {\n\t\t\t\trev = fmt.Sprintf(\"%d\", rs.Revision)\n\t\t\t}\n\t\t\tfmt.Printf(\"\/api\/right_scripts\/%s %5s %s\\n\", rs.Id, rev, rs.Name)\n\t\t}\n\tcase rightScriptUpload.FullCommand():\n\t\t\/\/ Pass 1, perform validations, gather up results\n\t\tscripts := []RightScript{}\n\t\tpaths, err := walkPaths(rightScriptUploadPaths)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s: %s\\n\", filepath.Base(os.Args[0]), err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfor _, p := range paths {\n\t\t\tfmt.Printf(\"Uploading %s\\n\", p)\n\t\t\tf, err := os.Open(p)\n\t\t\tif err != nil {\n\t\t\t\tfatalError(\"Cannot open %s\", p)\n\t\t\t}\n\t\t\tdefer f.Close()\n\t\t\tmetadata, err := ParseRightScriptMetadata(f)\n\n\t\t\tif metadata.Name == \"\" {\n\t\t\t\tif !*rightScriptUploadForce {\n\t\t\t\t\tfatalError(\"No embedded metadata for %s. Use --force to upload anyways.\", p)\n\t\t\t\t}\n\t\t\t\tscriptname := path.Base(p)\n\t\t\t\tscriptext := path.Ext(scriptname)\n\t\t\t\tscriptname = strings.TrimRight(scriptname, scriptext)\n\t\t\t\tmetadata.Name = scriptname\n\t\t\t}\n\n\t\t\tscript := RightScript{\"\", p, metadata}\n\t\t\tscripts = append(scripts, script)\n\t\t}\n\n\t\t\/\/ Pass 2, upload\n\t\tfor _, script := range scripts {\n\t\t\terr = script.Push()\n\t\t\tif err != nil {\n\t\t\t\tfatalError(\"%s\", err.Error())\n\t\t\t}\n\t\t}\n\tcase rightScriptDownload.FullCommand():\n\t\trsIdMatch := regexp.MustCompile(`^\\d+$`)\n\t\trsHrefMatch := regexp.MustCompile(`^\/api\/right_scripts\/\\d+$`)\n\n\t\tvar href string\n\n\t\tif rsIdMatch.Match([]byte(*rightScriptDownloadNameOrHref)) {\n\t\t\thref = fmt.Sprintf(\"\/api\/right_scripts\/%s\", *rightScriptDownloadNameOrHref)\n\t\t} else if rsHrefMatch.Match([]byte(*rightScriptDownloadNameOrHref)) {\n\t\t\thref = *rightScriptDownloadNameOrHref\n\t\t} else {\n\t\t\trightscriptLocator := client.RightScriptLocator(\"\/api\/right_scripts\")\n\t\t\tapiParams := rsapi.APIParams{\"filter\": []string{\"name==\" + *rightScriptDownloadNameOrHref}}\n\t\t\trightscripts, err := rightscriptLocator.Index(apiParams)\n\t\t\tif err != nil {\n\t\t\t\tfatalError(\"%s\", err.Error())\n\t\t\t}\n\t\t\tfoundId := \"\"\n\t\t\tfor _, rs := range rightscripts {\n\t\t\t\t\/\/fmt.Printf(\"%#v\\n\", rs)\n\t\t\t\t\/\/ Recheck the name here, filter does a impartial match and we need an exact one\n\t\t\t\t\/\/ TODO, do first pass for head revisions only, second for non-heads?\n\t\t\t\tif rs.Name == *rightScriptDownloadNameOrHref && rs.Revision == 0 {\n\t\t\t\t\tif foundId != \"\" {\n\t\t\t\t\t\tfatalError(\"Error, matched multiple RightScripts with the same name. Don't know which one to download. Please delete one or specify an HREF to download such as \/api\/right_scripts\/%d\", rs.Id)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfoundId = rs.Id\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif foundId == \"\" {\n\t\t\t\tfatalError(\"Found no RightScripts matching %s\", *rightScriptDownloadNameOrHref)\n\t\t\t}\n\t\t\thref = fmt.Sprintf(\"\/api\/right_scripts\/%s\", foundId)\n\t\t}\n\n\t\trightscriptLocator := client.RightScriptLocator(href)\n\t\t\/\/ attachmentsLocator := client.RightScriptLocator(fmt.Sprintf(\"%s\/attachments\", href))\n\t\t\/\/ sourceLocator := client.RightScriptLocator(fmt.Sprintf(\"%s\/source\", href))\n\n\t\trightscript, err := rightscriptLocator.Show()\n\t\tif err != nil {\n\t\t\tfatalError(\"Could not find rightscript with href %s: %s\", href, err.Error())\n\t\t}\n\t\tsource, err := getSource(rightscriptLocator)\n\t\tif err != nil {\n\t\t\tfatalError(\"Could get soruce for rightscript with href %s: %s\", href, err.Error())\n\t\t}\n\n\t\t\/\/ attachments, err2 := attachmentsLocator.Index(rsapi.APIParams{})\n\t\tif *rightScriptDownloadTo == \"\" {\n\t\t\t*rightScriptDownloadTo = rightscript.Name\n\t\t}\n\t\tfmt.Printf(\"Downloading '%s' to %s\\n\", rightscript.Name, *rightScriptDownloadTo)\n\t\terr = ioutil.WriteFile(*rightScriptDownloadTo, source, 0755)\n\t\tif err != nil {\n\t\t\tfatalError(\"Could not create file: %s\", err.Error())\n\t\t}\n\n\tcase rightScriptScaffold.FullCommand():\n\t\tpaths, err := walkPaths(rightScriptScaffoldPaths)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s: %s\\n\", filepath.Base(os.Args[0]), err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfor _, path := range paths {\n\t\t\terr = AddRightScriptMetadata(path)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%s: %s\\n\", filepath.Base(os.Args[0]), err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\tcase rightScriptValidate.FullCommand():\n\t\tpaths, err := walkPaths(rightScriptValidatePaths)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s: %s\\n\", filepath.Base(os.Args[0]), err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\terr_encountered := false\n\n\t\tfor _, path := range paths {\n\t\t\terr = validateRightScript(path)\n\t\t\tif err != nil {\n\t\t\t\terr_encountered = true\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%s - %s: %s\\n\", path, filepath.Base(os.Args[0]), err)\n\t\t\t}\n\t\t}\n\t\tif err_encountered {\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n\n\/\/ Turn a mixed array of directories and files into a linear list of files\nfunc walkPaths(paths *[]string) ([]string, error) {\n\tfiles := []string{}\n\tfor _, path := range *paths {\n\t\tinfo, err := os.Stat(path)\n\t\tif err != nil {\n\t\t\treturn files, err\n\t\t}\n\t\tif info.IsDir() {\n\t\t\terr = filepath.Walk(path, func(p string, f os.FileInfo, err error) error {\n\t\t\t\tfiles = append(files, p)\n\t\t\t\t_, e := os.Stat(p)\n\t\t\t\treturn e\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn files, err\n\t\t\t}\n\t\t} else {\n\t\t\tfiles = append(files, path)\n\t\t}\n\t}\n\treturn files, nil\n\n}\n\n\/\/ Crappy workaround. RSC doesn't return the body of the http request which contains\n\/\/ the script source, so do the same lower level calls it does to get it.\nfunc getSource(loc *cm15.RightScriptLocator) (respBody []byte, err error) {\n\tvar params rsapi.APIParams\n\tvar p rsapi.APIParams\n\tAPIVersion := \"1.5\"\n\tclient := config.environment.Client15()\n\n\turi, err := loc.ActionPath(\"RightScript\", \"show_source\")\n\tif err != nil {\n\t\treturn respBody, err\n\t}\n\treq, err := client.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p)\n\tif err != nil {\n\t\treturn respBody, err\n\t}\n\tresp, err := client.PerformRequest(req)\n\tif err != nil {\n\t\treturn respBody, err\n\t}\n\tdefer resp.Body.Close()\n\trespBody, _ = ioutil.ReadAll(resp.Body)\n\n\tif resp.StatusCode < 200 || resp.StatusCode > 299 {\n\t\treturn respBody, fmt.Errorf(\"invalid response %s: %s\", resp.Status, string(respBody))\n\t}\n\treturn respBody, nil\n}\n\ntype RightScript struct {\n\tHref string\n\tPath string\n\tMetadata *RightScriptMetadata\n}\n\nfunc (r *RightScript) Push() error {\n\tclient := config.environment.Client15()\n\trightscriptLocator := client.RightScriptLocator(\"\/api\/right_scripts\")\n\tapiParams := rsapi.APIParams{\"filter\": []string{\"name==\" + r.Metadata.Name}}\n\trightscripts, err := rightscriptLocator.Index(apiParams)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfoundId := \"\"\n\tfor _, rs := range rightscripts {\n\t\t\/\/fmt.Printf(\"%#v\\n\", rs)\n\t\t\/\/ Recheck the name here, filter does a impartial match and we need an exact one\n\t\tif rs.Name == r.Metadata.Name && rs.Revision == 0 {\n\t\t\tif foundId != \"\" {\n\t\t\t\tfatalError(\"Error, matched multiple RightScripts with the same name, please delete one: %d %d\", rs.Id, foundId)\n\t\t\t} else {\n\t\t\t\tfoundId = rs.Id\n\t\t\t}\n\t\t}\n\t}\n\n\tpathSrc, err := ioutil.ReadFile(r.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif foundId == \"\" {\n\t\tfmt.Printf(\"Creating a new RightScript named '%s' from %s\\n\", r.Metadata.Name, r.Path)\n\t\t\/\/ New one, perform create call\n\t\tparams := cm15.RightScriptParam2{\n\t\t\tName: r.Metadata.Name,\n\t\t\tDescription: r.Metadata.Description,\n\t\t\tSource: string(pathSrc),\n\t\t}\n\t\t\/\/rightscriptLocator = client.RightScriptLocator(fmt.Sprintf(\"\/api\/right_scripts\", foundId))\n\t\t_, err = rightscriptLocator.Create(¶ms)\n\t} else {\n\t\tfmt.Printf(\"Updating existing RightScript named '%s' from %s\\n\", r.Metadata.Name, r.Path)\n\n\t\tparams := cm15.RightScriptParam3{\n\t\t\tName: r.Metadata.Name,\n\t\t\tDescription: r.Metadata.Description,\n\t\t\tSource: string(pathSrc),\n\t\t}\n\t\trightscriptLocator = client.RightScriptLocator(fmt.Sprintf(\"\/api\/right_scripts\/%s\", foundId))\n\t\terr = rightscriptLocator.Update(¶ms)\n\t\t\/\/ Found existing, do an update\n\t}\n\treturn err\n}\n\nfunc fatalError(format string, v ...interface{}) {\n\tmsg := fmt.Sprintf(\"ERROR: \"+format, v)\n\tfmt.Println(msg)\n\tos.Exit(1)\n}\n\nfunc validateRightScript(path string) error {\n\tscript, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer script.Close()\n\n\tmetadata, err := ParseRightScriptMetadata(script)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif *debug {\n\t\tpretty.Println(metadata)\n\t}\n\tfmt.Printf(\"%s - valid metadata\\n\", path)\n\n\tfor _, attachment := range metadata.Attachments {\n\t\tmd5, err := md5Attachment(path, attachment)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(attachment, md5)\n\t}\n\n\treturn nil\n}\n\nfunc md5Attachment(script, attachment string) (string, error) {\n\tpath := filepath.Join(filepath.Dir(script), attachment)\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer file.Close()\n\thash := md5.New()\n\n\t_, err = io.Copy(hash, file)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn hex.EncodeToString(hash.Sum(nil)), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"gopkg.in\/mgo.v2\"\n)\n\nconst (\n\tmetricsAPIPath = \"\/api\/metrics\"\n\tagentsAPIPath = \"\/api\/agents\"\n)\n\nvar (\n\tca *mgo.Collection\n)\n\nfunc main() {\n\tlog.SetFlags(0)\n\tlog.SetPrefix(\"marksman: \")\n\n\tvar (\n\t\taddr = flag.String(\"addr\", \":8080\", \"serve HTTP on `address`\")\n\t)\n\n\tflag.Parse()\n\n\thttp.HandleFunc(metricsAPIPath, metricsHandler)\n\thttp.HandleFunc(agentsAPIPath, agentsHandler)\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(\".\/public\")))\n\n\tsession, err := mgo.Dial(\"localhost\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\n\t\/\/ Optional. Switch the session to a monotonic behavior.\n\tsession.SetMode(mgo.Monotonic, true)\n\tca = session.DB(\"recon-dev\").C(\"agents\")\n\n\tlog.Println(\"Server started: http:\/\/localhost\" + *addr)\n\tlog.Fatal(http.ListenAndServe(*addr, nil))\n}\n\nfunc metricsHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"TODO\")\n}\n\nfunc agentsHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"GET\":\n\t\tvar R []Agent\n\t\terr := ca.Find(nil).All(&R)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tenc := json.NewEncoder(w)\n\t\tif err := enc.Encode(R); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\treturn\n\tcase \"POST\":\n\t\tvar a Agent\n\t\tdec := json.NewDecoder(r.Body)\n\t\tif err := dec.Decode(&a); err != nil {\n\t\t\thttp.Error(w, \"unable to decode json\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tif a.UID == \"\" {\n\t\t\thttp.Error(w, \"UID can't be empty\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\terr := ca.Insert(a)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusCreated)\n\t\tw.Write([]byte(a.UID))\n\t\treturn\n\tdefault:\n\t\thttp.Error(w, \"method not allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n}\n\n\/\/ Agent represents a recon agent running on\n\/\/ a machine.\ntype Agent struct {\n\tUID string\n}\n<commit_msg>add content type header to the responses<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"gopkg.in\/mgo.v2\"\n)\n\nconst (\n\tmetricsAPIPath = \"\/api\/metrics\"\n\tagentsAPIPath = \"\/api\/agents\"\n)\n\nvar (\n\tca *mgo.Collection\n)\n\nfunc main() {\n\tlog.SetFlags(0)\n\tlog.SetPrefix(\"marksman: \")\n\n\tvar (\n\t\taddr = flag.String(\"addr\", \":8080\", \"serve HTTP on `address`\")\n\t)\n\n\tflag.Parse()\n\n\thttp.HandleFunc(metricsAPIPath, metricsHandler)\n\thttp.HandleFunc(agentsAPIPath, agentsHandler)\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(\".\/public\")))\n\n\tsession, err := mgo.Dial(\"localhost\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\n\t\/\/ Optional. Switch the session to a monotonic behavior.\n\tsession.SetMode(mgo.Monotonic, true)\n\tca = session.DB(\"recon-dev\").C(\"agents\")\n\n\tlog.Println(\"Server started: http:\/\/localhost\" + *addr)\n\tlog.Fatal(http.ListenAndServe(*addr, nil))\n}\n\nfunc metricsHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tfmt.Fprintf(w, \"TODO\")\n}\n\nfunc agentsHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tswitch r.Method {\n\tcase \"GET\":\n\t\tvar R []Agent\n\t\terr := ca.Find(nil).All(&R)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tenc := json.NewEncoder(w)\n\t\tif err := enc.Encode(R); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\treturn\n\tcase \"POST\":\n\t\tvar a Agent\n\t\tdec := json.NewDecoder(r.Body)\n\t\tif err := dec.Decode(&a); err != nil {\n\t\t\thttp.Error(w, \"unable to decode json\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tif a.UID == \"\" {\n\t\t\thttp.Error(w, \"UID can't be empty\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\terr := ca.Insert(a)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusCreated)\n\t\tw.Write([]byte(a.UID))\n\t\treturn\n\tdefault:\n\t\thttp.Error(w, \"method not allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n}\n\n\/\/ Agent represents a recon agent running on\n\/\/ a machine.\ntype Agent struct {\n\tUID string\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/sha1\"\n\t\"database\/sql\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"html\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/dchest\/uniuri\"\n\t\"github.com\/ewhal\/pygments\"\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t\"github.com\/gorilla\/mux\"\n)\n\nconst (\n\tADDRESS = \"http:\/\/localhost:9900\"\n\tLENGTH = 6\n\tPORT = \":9900\"\n\tUSERNAME = \"\"\n\tPASS = \"\"\n\tNAME = \"\"\n\tDATABASE = USERNAME + \":\" + PASS + \"@\/\" + NAME + \"?charset=utf8\"\n)\n\ntype Response struct {\n\tID string `json:\"id\"`\n\tHASH string `json:\"hash\"`\n\tURL string `json:\"url\"`\n\tSIZE int `json:\"size\"`\n\tDELKEY string `json:\"delkey\"`\n}\n\nfunc check(err error) {\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\nfunc generateName() string {\n\ts := uniuri.NewLen(LENGTH)\n\tdb, err := sql.Open(\"mysql\", DATABASE)\n\tcheck(err)\n\n\tquery, err := db.Query(\"select id from pastebin\")\n\tfor query.Next() {\n\t\tvar id string\n\t\terr := query.Scan(&id)\n\t\tif err != nil {\n\n\t\t}\n\t\tif id == s {\n\t\t\tgenerateName()\n\t\t}\n\t}\n\tdb.Close()\n\n\treturn s\n\n}\nfunc hash(paste string) string {\n\thasher := sha1.New()\n\n\thasher.Write([]byte(paste))\n\tsha := base64.URLEncoding.EncodeToString(hasher.Sum(nil))\n\treturn sha\n}\n\nfunc save(raw string, lang string) []string {\n\tdb, err := sql.Open(\"mysql\", DATABASE)\n\tcheck(err)\n\n\tsha := hash(raw)\n\tquery, err := db.Query(\"select id, hash, data, delkey from pastebin\")\n\tfor query.Next() {\n\t\tvar id, hash, paste, delkey string\n\t\terr := query.Scan(&id, &hash, &paste, &delkey)\n\t\tcheck(err)\n\t\tif hash == sha {\n\t\t\turl := ADDRESS + \"\/p\/\" + id\n\t\t\treturn []string{id, hash, url, paste, delkey}\n\t\t}\n\t}\n\tid := generateName()\n\tvar url string\n\tif lang == \"\" {\n\t\turl = ADDRESS + \"\/p\/\" + id\n\t} else {\n\t\turl = ADDRESS + \"\/p\/\" + id + \"\/\" + lang\n\t}\n\tdelKey := uniuri.NewLen(40)\n\tpaste := html.EscapeString(raw)\n\n\tstmt, err := db.Prepare(\"INSERT INTO pastebin(id, hash, data, delkey) values(?,?,?,?)\")\n\tcheck(err)\n\t_, err = stmt.Exec(id, sha, paste, delKey)\n\tcheck(err)\n\tdb.Close()\n\treturn []string{id, sha, url, paste, delKey}\n}\n\nfunc delHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tpaste := vars[\"pasteId\"]\n\tdelkey := vars[\"delKey\"]\n\n\tdb, err := sql.Open(\"mysql\", DATABASE)\n\tcheck(err)\n\n\tstmt, err := db.Prepare(\"delete from pastebin where delkey=?\")\n\tcheck(err)\n\n\tres, err := stmt.Exec(html.EscapeString(delkey))\n\tcheck(err)\n\n\taffect, err := res.RowsAffected()\n\tcheck(err)\n\n\tio.WriteString(w, string(affect))\n\n\tdb.Close()\n\n}\nfunc saveHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\toutput := vars[\"output\"]\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tpaste := r.FormValue(\"p\")\n\t\tlang := r.FormValue(\"lang\")\n\t\tif paste == \"\" {\n\t\t\thttp.Error(w, \"Empty paste\", 500)\n\t\t}\n\t\tvalues := save(paste, lang)\n\t\tb := &Response{\n\t\t\tID: values[0],\n\t\t\tHASH: values[1],\n\t\t\tURL: values[2],\n\t\t\tSIZE: len(values[3]),\n\t\t\tDELKEY: values[4],\n\t\t}\n\n\t\tswitch output {\n\t\tcase \"json\":\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\t\terr := json.NewEncoder(w).Encode(b)\n\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase \"xml\":\n\t\t\tx, err := xml.MarshalIndent(b, \"\", \" \")\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/xml\")\n\t\t\tw.Write(x)\n\t\tcase \"html\":\n\t\t\tw.Header().Set(\"Content-Type\", \"text\/html\")\n\t\t\tio.WriteString(w, \"<p><b>URL<\/b>: <a href='\"+b.URL+\"'>\"+b.URL+\"<\/a><\/p>\")\n\t\t\tio.WriteString(w, \"<p><b>Delete Key<\/b>: <a href='\"+ADDRESS+\"\/del\/\"+b.ID+\"\/\"+b.DELKEY+\"'>\"+b.DELKEY+\"<\/a><\/p>\")\n\n\t\tdefault:\n\t\t\tio.WriteString(w, b.URL+\"\\n\")\n\t\t\tio.WriteString(w, \"delete key: \"+b.DELKEY+\"\\n\")\n\t\t}\n\t}\n\n}\n\nfunc langHandler(w http.ResponseWriter, r *http.Request) {\n\n\tvars := mux.Vars(r)\n\tpaste := vars[\"pasteId\"]\n\tlang := vars[\"lang\"]\n\ts := getPaste(paste)\n\thighlight, err := pygments.Highlight(html.UnescapeString(s), html.EscapeString(lang), \"html\", \"full, style=autumn,linenos=True, lineanchors=True,anchorlinenos=True,\", \"utf-8\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tio.WriteString(w, highlight)\n\n}\n\nfunc getPaste(paste string) string {\n\tparam1 := html.EscapeString(paste)\n\tdb, err := sql.Open(\"mysql\", DATABASE)\n\tvar s string\n\terr = db.QueryRow(\"select data from pastebin where id=?\", param1).Scan(&s)\n\tdb.Close()\n\tcheck(err)\n\n\tif err == sql.ErrNoRows {\n\t\treturn \"Error invalid paste\"\n\t} else {\n\t\treturn html.UnescapeString(s)\n\t}\n\n}\nfunc pasteHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tpaste := vars[\"pasteId\"]\n\ts := getPaste(paste)\n\tio.WriteString(w, s)\n\n}\n\nfunc main() {\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"\/p\/{pasteId}\", pasteHandler)\n\trouter.HandleFunc(\"\/p\/{pasteId}\/{lang}\", langHandler)\n\trouter.HandleFunc(\"\/save\", saveHandler)\n\trouter.HandleFunc(\"\/save\/{output}\", saveHandler)\n\trouter.HandleFunc(\"\/del\/{pasteId}\/{delKey}\", delHandler)\n\trouter.PathPrefix(\"\/\").Handler(http.StripPrefix(\"\/\", http.FileServer(http.Dir(\"assets\/\"))))\n\terr := http.ListenAndServe(PORT, router)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n}\n<commit_msg>fixup deletion handler more<commit_after>package main\n\nimport (\n\t\"crypto\/sha1\"\n\t\"database\/sql\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"html\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/dchest\/uniuri\"\n\t\"github.com\/ewhal\/pygments\"\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t\"github.com\/gorilla\/mux\"\n)\n\nconst (\n\tADDRESS = \"http:\/\/localhost:9900\"\n\tLENGTH = 6\n\tPORT = \":9900\"\n\tUSERNAME = \"\"\n\tPASS = \"\"\n\tNAME = \"\"\n\tDATABASE = USERNAME + \":\" + PASS + \"@\/\" + NAME + \"?charset=utf8\"\n)\n\ntype Response struct {\n\tID string `json:\"id\"`\n\tHASH string `json:\"hash\"`\n\tURL string `json:\"url\"`\n\tSIZE int `json:\"size\"`\n\tDELKEY string `json:\"delkey\"`\n}\n\nfunc check(err error) {\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\nfunc generateName() string {\n\ts := uniuri.NewLen(LENGTH)\n\tdb, err := sql.Open(\"mysql\", DATABASE)\n\tcheck(err)\n\n\tquery, err := db.Query(\"select id from pastebin\")\n\tfor query.Next() {\n\t\tvar id string\n\t\terr := query.Scan(&id)\n\t\tif err != nil {\n\n\t\t}\n\t\tif id == s {\n\t\t\tgenerateName()\n\t\t}\n\t}\n\tdb.Close()\n\n\treturn s\n\n}\nfunc hash(paste string) string {\n\thasher := sha1.New()\n\n\thasher.Write([]byte(paste))\n\tsha := base64.URLEncoding.EncodeToString(hasher.Sum(nil))\n\treturn sha\n}\n\nfunc save(raw string, lang string) []string {\n\tdb, err := sql.Open(\"mysql\", DATABASE)\n\tcheck(err)\n\n\tsha := hash(raw)\n\tquery, err := db.Query(\"select id, hash, data, delkey from pastebin\")\n\tfor query.Next() {\n\t\tvar id, hash, paste, delkey string\n\t\terr := query.Scan(&id, &hash, &paste, &delkey)\n\t\tcheck(err)\n\t\tif hash == sha {\n\t\t\turl := ADDRESS + \"\/p\/\" + id\n\t\t\treturn []string{id, hash, url, paste, delkey}\n\t\t}\n\t}\n\tid := generateName()\n\tvar url string\n\tif lang == \"\" {\n\t\turl = ADDRESS + \"\/p\/\" + id\n\t} else {\n\t\turl = ADDRESS + \"\/p\/\" + id + \"\/\" + lang\n\t}\n\tdelKey := uniuri.NewLen(40)\n\tpaste := html.EscapeString(raw)\n\n\tstmt, err := db.Prepare(\"INSERT INTO pastebin(id, hash, data, delkey) values(?,?,?,?)\")\n\tcheck(err)\n\t_, err = stmt.Exec(id, sha, paste, delKey)\n\tcheck(err)\n\tdb.Close()\n\treturn []string{id, sha, url, paste, delKey}\n}\n\nfunc delHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tpaste := vars[\"pasteId\"]\n\tdelkey := vars[\"delKey\"]\n\n\tdb, err := sql.Open(\"mysql\", DATABASE)\n\tcheck(err)\n\n\tstmt, err := db.Prepare(\"delete from pastebin where delkey=?\")\n\tcheck(err)\n\n\tres, err := stmt.Exec(html.EscapeString(delkey))\n\tcheck(err)\n\n\taffect, err := res.RowsAffected()\n\tif err == sql.ErrNoRows {\n\t\tio.WriteString(w, \"Error invalid paste\")\n\t} else {\n\t\tio.WriteString(w, paste+\" deleted\")\n\t}\n\tdb.Close()\n\n}\nfunc saveHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\toutput := vars[\"output\"]\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tpaste := r.FormValue(\"p\")\n\t\tlang := r.FormValue(\"lang\")\n\t\tif paste == \"\" {\n\t\t\thttp.Error(w, \"Empty paste\", 500)\n\t\t}\n\t\tvalues := save(paste, lang)\n\t\tb := &Response{\n\t\t\tID: values[0],\n\t\t\tHASH: values[1],\n\t\t\tURL: values[2],\n\t\t\tSIZE: len(values[3]),\n\t\t\tDELKEY: values[4],\n\t\t}\n\n\t\tswitch output {\n\t\tcase \"json\":\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\t\terr := json.NewEncoder(w).Encode(b)\n\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase \"xml\":\n\t\t\tx, err := xml.MarshalIndent(b, \"\", \" \")\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/xml\")\n\t\t\tw.Write(x)\n\t\tcase \"html\":\n\t\t\tw.Header().Set(\"Content-Type\", \"text\/html\")\n\t\t\tio.WriteString(w, \"<p><b>URL<\/b>: <a href='\"+b.URL+\"'>\"+b.URL+\"<\/a><\/p>\")\n\t\t\tio.WriteString(w, \"<p><b>Delete Key<\/b>: <a href='\"+ADDRESS+\"\/del\/\"+b.ID+\"\/\"+b.DELKEY+\"'>\"+b.DELKEY+\"<\/a><\/p>\")\n\n\t\tdefault:\n\t\t\tio.WriteString(w, b.URL+\"\\n\")\n\t\t\tio.WriteString(w, \"delete key: \"+b.DELKEY+\"\\n\")\n\t\t}\n\t}\n\n}\n\nfunc langHandler(w http.ResponseWriter, r *http.Request) {\n\n\tvars := mux.Vars(r)\n\tpaste := vars[\"pasteId\"]\n\tlang := vars[\"lang\"]\n\ts := getPaste(paste)\n\thighlight, err := pygments.Highlight(html.UnescapeString(s), html.EscapeString(lang), \"html\", \"full, style=autumn,linenos=True, lineanchors=True,anchorlinenos=True,\", \"utf-8\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tio.WriteString(w, highlight)\n\n}\n\nfunc getPaste(paste string) string {\n\tparam1 := html.EscapeString(paste)\n\tdb, err := sql.Open(\"mysql\", DATABASE)\n\tvar s string\n\terr = db.QueryRow(\"select data from pastebin where id=?\", param1).Scan(&s)\n\tdb.Close()\n\tcheck(err)\n\n\tif err == sql.ErrNoRows {\n\t\treturn \"Error invalid paste\"\n\t} else {\n\t\treturn html.UnescapeString(s)\n\t}\n\n}\nfunc pasteHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tpaste := vars[\"pasteId\"]\n\ts := getPaste(paste)\n\tio.WriteString(w, s)\n\n}\n\nfunc main() {\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"\/p\/{pasteId}\", pasteHandler)\n\trouter.HandleFunc(\"\/p\/{pasteId}\/{lang}\", langHandler)\n\trouter.HandleFunc(\"\/save\", saveHandler)\n\trouter.HandleFunc(\"\/save\/{output}\", saveHandler)\n\trouter.HandleFunc(\"\/del\/{pasteId}\/{delKey}\", delHandler)\n\trouter.PathPrefix(\"\/\").Handler(http.StripPrefix(\"\/\", http.FileServer(http.Dir(\"assets\/\"))))\n\terr := http.ListenAndServe(PORT, router)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package miniprofiler\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\tmp *MiniProfiler\n\tenabled bool\n\tcondition func() bool\n)\n\ntype MiniProfilerData struct {\n\tdescription string\n\tsteps map[string]int64\n\tlastStep time.Time\n\tmemos []string\n}\n\ntype MiniProfiler struct {\n\tprofiles []*MiniProfilerData\n}\n\nfunc init() {\n\tmp = new(MiniProfiler)\n\tmp.profiles = make([]*MiniProfilerData, 0)\n\tenabled = true\n\tcondition = func() bool { return true }\n}\n\nfunc Enable() {\n\tenabled = true\n}\n\nfunc Disable() {\n\tenabled = false\n}\n\nfunc SetCondition(c func() bool) {\n\tcondition = c\n}\n\nfunc Begin(description string) *MiniProfilerData {\n\tif !enabled {\n\t\treturn nil\n\t}\n\tif !condition() {\n\t\treturn nil\n\t}\n\treturn &MiniProfilerData{description, make(map[string]int64, 0), time.Now(), make([]string, 0)}\n}\n\nfunc Dump(writer io.Writer) {\n\tfor _, prof := range mp.profiles {\n\t\toutputs := []string{\"log:MP\"}\n\n\t\tfor tag, val := range prof.steps {\n\t\t\toutputs = append(outputs, fmt.Sprintf(\"%s:%d\", tag, val))\n\t\t}\n\n\t\toutputs = append(outputs, fmt.Sprintf(\"description:%s\", prof.description))\n\n\t\tmemoOutput := []string{}\n\t\tfor _, m := range prof.memos {\n\t\t\tmemoOutput = append(memoOutput, m)\n\t\t}\n\t\toutputs = append(outputs, fmt.Sprintf(\"memo:%s\", strings.Join(memoOutput, \",\")))\n\n\t\tfmt.Fprintln(writer, strings.Join(outputs, \"\\t\"))\n\t}\n}\n\nfunc Flush() {\n\tmp.profiles = make([]*MiniProfilerData, 0)\n}\n\ntype Measure struct {\n\tDescription string\n\tCount int\n\tTotal int\n\tMean int\n\tMin int\n\tMax int\n}\n\ntype By func(a, b *Measure) bool\n\nfunc (by By) Sort(measures []*Measure) {\n\tms := &measureSorter{\n\t\tmeasures: measures,\n\t\tby: by,\n\t}\n\tsort.Sort(ms)\n}\n\ntype measureSorter struct {\n\tmeasures []*Measure\n\tby func(a, b *Measure) bool\n}\n\nfunc (s *measureSorter) Len() int {\n\treturn len(s.measures)\n}\n\nfunc (s *measureSorter) Swap(i, j int) {\n\ts.measures[i], s.measures[j] = s.measures[j], s.measures[i]\n}\n\nfunc (s *measureSorter) Less(i, j int) bool {\n\treturn s.by(s.measures[i], s.measures[j])\n}\n\ntype Column struct {\n\tName string\n\tSummary string\n\tSort By\n}\n\nvar (\n\tcolumns = []*Column{\n\t\t&Column{Name: \"Count\", Summary: \"Count\", Sort: func(a, b *Measure) bool { return a.Count > b.Count }},\n\t\t&Column{Name: \"Total\", Summary: \"Total\", Sort: func(a, b *Measure) bool { return a.Total > b.Total }},\n\t\t&Column{Name: \"Mean\", Summary: \"Mean\", Sort: func(a, b *Measure) bool { return a.Mean > b.Mean }},\n\t\t&Column{Name: \"Min\"},\n\t\t&Column{Name: \"Max\", Summary: \"Maximum(100 Percentile)\", Sort: func(a, b *Measure) bool { return a.Max > b.Max }},\n\t}\n)\n\nfunc getIntegerDigitWidth(i int) int {\n\tvar w int\n\tswitch {\n\tcase i < 1:\n\t\tw = 1\n\tdefault:\n\t\tw = int(math.Log10(float64(i)) + 1)\n\t}\n\treturn w\n}\n\nfunc showMeasures(writer io.Writer, measures []*Measure) {\n\tcountWidth := 5\n\ttotalWidth := 5\n\tmeanWidth := 5\n\tmaxWidth := 5\n\n\tfor _, m := range measures {\n\t\tvar w int\n\t\tw = getIntegerDigitWidth(m.Count)\n\t\tif countWidth < w {\n\t\t\tcountWidth = w\n\t\t}\n\t\tw = getIntegerDigitWidth(m.Total)\n\t\tif totalWidth < w {\n\t\t\ttotalWidth = w\n\t\t}\n\t\tw = getIntegerDigitWidth(m.Mean)\n\t\tif meanWidth < w {\n\t\t\tmeanWidth = w\n\t\t}\n\t\tw = getIntegerDigitWidth(m.Max)\n\t\tif maxWidth < w {\n\t\t\tmaxWidth = w\n\t\t}\n\t}\n\n\tvar format string\n\tfor _, column := range columns {\n\t\tswitch column.Name {\n\t\tcase \"Count\":\n\t\t\tfmt.Fprintf(writer, fmt.Sprintf(\"%%%ds \", countWidth), column.Name)\n\t\t\tformat += fmt.Sprintf(\"%%%dd \", countWidth)\n\t\tcase \"Total\":\n\t\t\tfmt.Fprintf(writer, fmt.Sprintf(\"%%%ds \", totalWidth), column.Name)\n\t\t\tformat += fmt.Sprintf(\"%%%dd \", totalWidth)\n\t\tcase \"Mean\":\n\t\t\tfmt.Fprintf(writer, fmt.Sprintf(\"%%%ds \", meanWidth), column.Name)\n\t\t\tformat += fmt.Sprintf(\"%%%dd \", meanWidth)\n\t\tdefault:\n\t\t\tfmt.Fprintf(writer, fmt.Sprintf(\"%%%ds \", maxWidth), column.Name)\n\t\t\tformat += fmt.Sprintf(\"%%%dd \", maxWidth)\n\t\t}\n\t}\n\tfmt.Fprintln(writer, \"Description\")\n\tformat += \"%s\\n\"\n\n\tfor _, m := range measures {\n\t\tfmt.Fprintf(writer, format, m.Count, m.Total, m.Mean, m.Min, m.Max, m.Description)\n\t}\n}\n\nfunc Analyze(writer io.Writer) {\n\tvar times = make(map[string][]int)\n\tvar totals = make(map[string]int)\n\tfor _, prof := range mp.profiles {\n\t\tfor tag, val := range prof.steps {\n\t\t\tdescription := fmt.Sprintf(\"%s\/%s\", prof.description, tag)\n\t\t\ttimes[description] = append(times[description], int(val))\n\t\t\ttotals[description] += int(val)\n\t\t}\n\t}\n\tvar measures []*Measure\n\tfor description, times := range times {\n\t\tsort.Ints(times)\n\t\tcount := len(times)\n\t\ttotal := totals[description]\n\t\tm := &Measure{\n\t\t\tDescription: description,\n\t\t\tCount: count,\n\t\t\tTotal: total,\n\t\t\tMean: total \/ count,\n\t\t\tMin: times[0],\n\t\t\tMax: times[count-1],\n\t\t}\n\t\tmeasures = append(measures, m)\n\t}\n\n\tfor _, column := range columns {\n\t\tif column.Sort != nil {\n\t\t\tfmt.Fprintf(writer, \"Sort by %s\\n\", column.Summary)\n\t\t\tBy(column.Sort).Sort(measures)\n\t\t\tshowMeasures(writer, measures)\n\t\t\tfmt.Fprintln(writer)\n\t\t}\n\t}\n}\n\nfunc (mpd *MiniProfilerData) Step(tag string) {\n\tif !enabled {\n\t\treturn\n\t}\n\tif mpd == nil {\n\t\treturn\n\t}\n\tnow := time.Now()\n\tthisstep := now.Sub(mpd.lastStep).Nanoseconds()\n\n\tmpd.steps[tag] = thisstep\n\tmpd.lastStep = now\n}\n\nfunc (mpd *MiniProfilerData) Memo(memo string) {\n\tif !enabled {\n\t\treturn\n\t}\n\tif mpd == nil {\n\t\treturn\n\t}\n\tmemo = memoFilter(memo)\n\tmpd.memos = append(mpd.memos, memo)\n}\n\n\/*\nremove (,|\\t|\\r|\\n)\n*\/\nfunc memoFilter(memo string) string {\n\tmemo = strings.Replace(memo, \",\", \"\", -1)\n\tmemo = strings.Replace(memo, \"\\t\", \"\", -1)\n\tmemo = strings.Replace(memo, \"\\r\", \"\", -1)\n\tmemo = strings.Replace(memo, \"\\n\", \"\", -1)\n\treturn memo\n}\n\nfunc (mpd *MiniProfilerData) End() {\n\tif !enabled {\n\t\treturn\n\t}\n\tif mpd == nil {\n\t\treturn\n\t}\n\tmpd.Step(\"Last Step to End\")\n\tmp.profiles = append(mp.profiles, mpd)\n}\n<commit_msg>add Standard Deviation<commit_after>package miniprofiler\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\tmp *MiniProfiler\n\tenabled bool\n\tcondition func() bool\n)\n\ntype MiniProfilerData struct {\n\tdescription string\n\tsteps map[string]int64\n\tlastStep time.Time\n\tmemos []string\n}\n\ntype MiniProfiler struct {\n\tprofiles []*MiniProfilerData\n}\n\nfunc init() {\n\tmp = new(MiniProfiler)\n\tmp.profiles = make([]*MiniProfilerData, 0)\n\tenabled = true\n\tcondition = func() bool { return true }\n}\n\nfunc Enable() {\n\tenabled = true\n}\n\nfunc Disable() {\n\tenabled = false\n}\n\nfunc SetCondition(c func() bool) {\n\tcondition = c\n}\n\nfunc Begin(description string) *MiniProfilerData {\n\tif !enabled {\n\t\treturn nil\n\t}\n\tif !condition() {\n\t\treturn nil\n\t}\n\treturn &MiniProfilerData{description, make(map[string]int64, 0), time.Now(), make([]string, 0)}\n}\n\nfunc Dump(writer io.Writer) {\n\tfor _, prof := range mp.profiles {\n\t\toutputs := []string{\"log:MP\"}\n\n\t\tfor tag, val := range prof.steps {\n\t\t\toutputs = append(outputs, fmt.Sprintf(\"%s:%d\", tag, val))\n\t\t}\n\n\t\toutputs = append(outputs, fmt.Sprintf(\"description:%s\", prof.description))\n\n\t\tmemoOutput := []string{}\n\t\tfor _, m := range prof.memos {\n\t\t\tmemoOutput = append(memoOutput, m)\n\t\t}\n\t\toutputs = append(outputs, fmt.Sprintf(\"memo:%s\", strings.Join(memoOutput, \",\")))\n\n\t\tfmt.Fprintln(writer, strings.Join(outputs, \"\\t\"))\n\t}\n}\n\nfunc Flush() {\n\tmp.profiles = make([]*MiniProfilerData, 0)\n}\n\ntype Measure struct {\n\tDescription string\n\tCount int\n\tTotal int\n\tMean int\n\tStddev float64\n\tMin int\n\tMax int\n}\n\ntype By func(a, b *Measure) bool\n\nfunc (by By) Sort(measures []*Measure) {\n\tms := &measureSorter{\n\t\tmeasures: measures,\n\t\tby: by,\n\t}\n\tsort.Sort(ms)\n}\n\ntype measureSorter struct {\n\tmeasures []*Measure\n\tby func(a, b *Measure) bool\n}\n\nfunc (s *measureSorter) Len() int {\n\treturn len(s.measures)\n}\n\nfunc (s *measureSorter) Swap(i, j int) {\n\ts.measures[i], s.measures[j] = s.measures[j], s.measures[i]\n}\n\nfunc (s *measureSorter) Less(i, j int) bool {\n\treturn s.by(s.measures[i], s.measures[j])\n}\n\ntype Column struct {\n\tName string\n\tSummary string\n\tSort By\n}\n\nvar (\n\tcolumns = []*Column{\n\t\t&Column{Name: \"Count\", Summary: \"Count\", Sort: func(a, b *Measure) bool { return a.Count > b.Count }},\n\t\t&Column{Name: \"Total\", Summary: \"Total\", Sort: func(a, b *Measure) bool { return a.Total > b.Total }},\n\t\t&Column{Name: \"Mean\", Summary: \"Mean\", Sort: func(a, b *Measure) bool { return a.Mean > b.Mean }},\n\t\t&Column{Name: \"Stddev\", Summary: \"Standard Deviation\", Sort: func(a, b *Measure) bool { return a.Stddev > b.Stddev }},\n\t\t&Column{Name: \"Min\"},\n\t\t&Column{Name: \"Max\", Summary: \"Maximum(100 Percentile)\", Sort: func(a, b *Measure) bool { return a.Max > b.Max }},\n\t}\n)\n\nfunc getIntegerDigitWidth(i int) int {\n\tvar w int\n\tswitch {\n\tcase i < 1:\n\t\tw = 1\n\tdefault:\n\t\tw = int(math.Log10(float64(i)) + 1)\n\t}\n\treturn w\n}\n\nfunc showMeasures(writer io.Writer, measures []*Measure) {\n\tcountWidth := 5\n\ttotalWidth := 5\n\tmeanWidth := 4\n\tstddevWidth := 6\n\tmaxWidth := 3\n\n\tfor _, m := range measures {\n\t\tvar w int\n\t\tw = getIntegerDigitWidth(m.Count)\n\t\tif countWidth < w {\n\t\t\tcountWidth = w\n\t\t}\n\t\tw = getIntegerDigitWidth(m.Total)\n\t\tif totalWidth < w {\n\t\t\ttotalWidth = w\n\t\t}\n\t\tw = getIntegerDigitWidth(int(m.Mean))\n\t\tif meanWidth < w {\n\t\t\tmeanWidth = w\n\t\t}\n\t\tw = getIntegerDigitWidth(int(m.Stddev)) + 4\n\t\tif stddevWidth < w {\n\t\t\tstddevWidth = w\n\t\t}\n\t\tw = getIntegerDigitWidth(m.Max)\n\t\tif maxWidth < w {\n\t\t\tmaxWidth = w\n\t\t}\n\t}\n\n\tvar format string\n\tfor _, column := range columns {\n\t\tswitch column.Name {\n\t\tcase \"Count\":\n\t\t\tfmt.Fprintf(writer, fmt.Sprintf(\"%%%ds \", countWidth), column.Name)\n\t\t\tformat += fmt.Sprintf(\"%%%dd \", countWidth)\n\t\tcase \"Total\":\n\t\t\tfmt.Fprintf(writer, fmt.Sprintf(\"%%%ds \", totalWidth), column.Name)\n\t\t\tformat += fmt.Sprintf(\"%%%dd \", totalWidth)\n\t\tcase \"Mean\":\n\t\t\tfmt.Fprintf(writer, fmt.Sprintf(\"%%%ds \", meanWidth), column.Name)\n\t\t\tformat += fmt.Sprintf(\"%%%dd \", meanWidth)\n\t\tcase \"Stddev\":\n\t\t\tfmt.Fprintf(writer, fmt.Sprintf(\"%%%ds \", stddevWidth), column.Name)\n\t\t\tformat += fmt.Sprintf(\"%%%d.3f \", stddevWidth)\n\t\tdefault:\n\t\t\tfmt.Fprintf(writer, fmt.Sprintf(\"%%%ds \", maxWidth), column.Name)\n\t\t\tformat += fmt.Sprintf(\"%%%dd \", maxWidth)\n\t\t}\n\t}\n\tfmt.Fprintln(writer, \"Description\")\n\tformat += \"%s\\n\"\n\n\tfor _, m := range measures {\n\t\tfmt.Fprintf(writer, format, m.Count, m.Total, m.Mean, m.Stddev, m.Min, m.Max, m.Description)\n\t}\n}\n\nfunc Analyze(writer io.Writer) {\n\tvar times = make(map[string][]int)\n\tvar totals = make(map[string]int)\n\tfor _, prof := range mp.profiles {\n\t\tfor tag, val := range prof.steps {\n\t\t\tdescription := fmt.Sprintf(\"%s\/%s\", prof.description, tag)\n\t\t\ttimes[description] = append(times[description], int(val))\n\t\t\ttotals[description] += int(val)\n\t\t}\n\t}\n\tvar measures []*Measure\n\tfor description, times := range times {\n\t\tsort.Ints(times)\n\t\tcount := len(times)\n\t\ttotal := totals[description]\n\t\tm := &Measure{\n\t\t\tDescription: description,\n\t\t\tCount: count,\n\t\t\tTotal: total,\n\t\t\tMean: total \/ count,\n\t\t\tMin: times[0],\n\t\t\tMax: times[count-1],\n\t\t}\n\t\tvar sum int\n\t\tfor _, t := range times {\n\t\t\tsum += (t - m.Mean) * (t - m.Mean)\n\t\t}\n\t\tm.Stddev = math.Sqrt(float64(sum) \/ float64(count))\n\t\tmeasures = append(measures, m)\n\t}\n\n\tfor _, column := range columns {\n\t\tif column.Sort != nil {\n\t\t\tfmt.Fprintf(writer, \"Sort by %s\\n\", column.Summary)\n\t\t\tBy(column.Sort).Sort(measures)\n\t\t\tshowMeasures(writer, measures)\n\t\t\tfmt.Fprintln(writer)\n\t\t}\n\t}\n}\n\nfunc (mpd *MiniProfilerData) Step(tag string) {\n\tif !enabled {\n\t\treturn\n\t}\n\tif mpd == nil {\n\t\treturn\n\t}\n\tnow := time.Now()\n\tthisstep := now.Sub(mpd.lastStep).Nanoseconds()\n\n\tmpd.steps[tag] = thisstep\n\tmpd.lastStep = now\n}\n\nfunc (mpd *MiniProfilerData) Memo(memo string) {\n\tif !enabled {\n\t\treturn\n\t}\n\tif mpd == nil {\n\t\treturn\n\t}\n\tmemo = memoFilter(memo)\n\tmpd.memos = append(mpd.memos, memo)\n}\n\n\/*\nremove (,|\\t|\\r|\\n)\n*\/\nfunc memoFilter(memo string) string {\n\tmemo = strings.Replace(memo, \",\", \"\", -1)\n\tmemo = strings.Replace(memo, \"\\t\", \"\", -1)\n\tmemo = strings.Replace(memo, \"\\r\", \"\", -1)\n\tmemo = strings.Replace(memo, \"\\n\", \"\", -1)\n\treturn memo\n}\n\nfunc (mpd *MiniProfilerData) End() {\n\tif !enabled {\n\t\treturn\n\t}\n\tif mpd == nil {\n\t\treturn\n\t}\n\tmpd.Step(\"Last Step to End\")\n\tmp.profiles = append(mp.profiles, mpd)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 by caixw, All rights reserved.\n\/\/ Use of this source code is governed by a MIT\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ 基于 Git 的博客系统。\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"runtime\"\n\n\t\"github.com\/caixw\/typing\/app\"\n\t\"github.com\/caixw\/typing\/vars\"\n\t\"github.com\/issue9\/logs\"\n)\n\nfunc main() {\n\thelp := flag.Bool(\"h\", false, \"显示当前信息\")\n\tversion := flag.Bool(\"v\", false, \"显示程序的版本信息\")\n\tpprof := flag.Bool(\"pprof\", false, \"是否在 \/debug\/pprof\/ 启用调试功能\")\n\tappdir := flag.String(\"appdir\", \".\/\", \"指定运行的工作目录\")\n\tinit := flag.String(\"init\", \"\", \"初始化一个工作目录\")\n\tflag.Usage = usage\n\tflag.Parse()\n\n\tswitch {\n\tcase *help:\n\t\tflag.Usage()\n\t\treturn\n\tcase *version:\n\t\tprintVersion()\n\t\treturn\n\tcase len(*init) > 0:\n\t\trunInit(*init) \/\/ *init 指向的目录不存在时,会尝试创建\n\t\treturn\n\t}\n\n\tpath := vars.NewPath(*appdir)\n\n\tif err := logs.InitFromXMLFile(path.LogsConfigFile); err != nil {\n\t\tpanic(err)\n\t}\n\n\tlogs.Critical(app.Run(path, *pprof))\n\tlogs.Flush()\n}\n\nfunc usage() {\n\tfmt.Printf(\"%s 是一个基于 Git 的博客系统。\\n\", vars.Name)\n\tfmt.Printf(\"源代码以 MIT 开源许可发布于:%s\\n\", vars.URL)\n\n\tfmt.Println(\"\\n参数:\")\n\tflag.PrintDefaults()\n}\n\nfunc printVersion() {\n\tfmt.Printf(\"%s %s build with %s\\n\", vars.Name, vars.Version(), runtime.Version())\n\n\tif len(vars.CommitHash()) > 0 {\n\t\tfmt.Printf(\"Git commit hash:%s\\n\", vars.CommitHash())\n\t}\n}\n\nfunc runInit(root string) {\n\tif err := app.Init(vars.NewPath(root)); err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Printf(\"操作成功,你现在可以在 %s 中修改具体的参数配置!\\n\", root)\n}\n<commit_msg>将项目介绍文本放到一个常量中<commit_after>\/\/ Copyright 2016 by caixw, All rights reserved.\n\/\/ Use of this source code is governed by a MIT\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ 基于 Git 的博客系统。\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"runtime\"\n\n\t\"github.com\/caixw\/typing\/app\"\n\t\"github.com\/caixw\/typing\/vars\"\n\t\"github.com\/issue9\/logs\"\n)\n\nconst usage = `%s 是一个基于 Git 的博客系统。\n源代码以 MIT 开源许可发布于:%s\n\n\n常见用法:\n\ntyping -pprof -appdir=\".\/\"\ntyping -appdir=\".\/\"\n\n\n参数:\n\n`\n\nfunc main() {\n\thelp := flag.Bool(\"h\", false, \"显示当前信息\")\n\tversion := flag.Bool(\"v\", false, \"显示程序的版本信息\")\n\tpprof := flag.Bool(\"pprof\", false, \"是否在 \/debug\/pprof\/ 启用调试功能\")\n\tappdir := flag.String(\"appdir\", \".\/\", \"指定运行的工作目录\")\n\tinit := flag.String(\"init\", \"\", \"初始化一个工作目录\")\n\tflag.Usage = func() {\n\t\tfmt.Printf(usage, vars.Name, vars.URL)\n\t\tflag.PrintDefaults()\n\t}\n\tflag.Parse()\n\n\tswitch {\n\tcase *help:\n\t\tflag.Usage()\n\t\treturn\n\tcase *version:\n\t\tprintVersion()\n\t\treturn\n\tcase len(*init) > 0:\n\t\trunInit(*init) \/\/ *init 指向的目录不存在时,会尝试创建\n\t\treturn\n\t}\n\n\tpath := vars.NewPath(*appdir)\n\n\tif err := logs.InitFromXMLFile(path.LogsConfigFile); err != nil {\n\t\tpanic(err)\n\t}\n\n\tlogs.Critical(app.Run(path, *pprof))\n\tlogs.Flush()\n}\n\nfunc printVersion() {\n\tfmt.Printf(\"%s %s build with %s\\n\", vars.Name, vars.Version(), runtime.Version())\n\n\tif len(vars.CommitHash()) > 0 {\n\t\tfmt.Printf(\"Git commit hash:%s\\n\", vars.CommitHash())\n\t}\n}\n\nfunc runInit(root string) {\n\tif err := app.Init(vars.NewPath(root)); err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Printf(\"操作成功,你现在可以在 %s 中修改具体的参数配置!\\n\", root)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"net\/http\"\n\t\"log\"\n\t\"os\"\n\t\"fmt\"\n\t\"database\/sql\"\n\t_ \"github.com\/lib\/pq\"\n)\n\nvar conn sql.DB\n\nfunc GetBlog(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tdata,_ := json.Marshal(\"blah\")\n\tw.Write(data)\n}\n\nfunc GetSpinData(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tdata,_ := json.Marshal(\"blah\")\n\tw.Write(data)\n}\n\nfunc GetServoData(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tdata,_ := json.Marshal(\"blah\")\n\tw.Write(data)\n}\n\nfunc PostBlog(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tdata,_ := json.Marshal(\"blah\")\n\tw.Write(data)\n}\n\nfunc PostSpinData(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tdata,_ := json.Marshal(\"blah\")\n\tw.Write(data)\n}\n\nfunc PostServoData(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tdata,_ := json.Marshal(\"blah\")\n\tw.Write(data)\n}\n\nfunc main() {\n\tconn,_ = sql.Open(\"postgres\", os.Getenv(\"DATABASE_URL\"))\n\trouter := httprouter.New()\n\trouter.GET(\"\/blog\", GetBlog)\n\trouter.GET(\"\/spin\", GetSpinData)\n\trouter.GET(\"\/servo\", GetServoData)\n\trouter.POST(\"\/blog\", PostBlog)\n\trouter.POST(\"\/spin\", PostSpinData)\n\trouter.POST(\"\/servo\", PostServoData)\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tport = \"3000\"\n\t}\n\tfmt.Printf(\"Starting server at localhost:%s...\", port)\n\tlog.Fatal(http.ListenAndServe(\":\"+port, router))\n}<commit_msg>global database var fixed<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"net\/http\"\n\t\"log\"\n\t\"os\"\n\t\"fmt\"\n\t\"database\/sql\"\n\t_ \"github.com\/lib\/pq\"\n)\n\nvar conn *sql.DB\n\nfunc GetBlog(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tdata,_ := json.Marshal(\"blah\")\n\tw.Write(data)\n}\n\nfunc GetSpinData(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tdata,_ := json.Marshal(\"blah\")\n\tw.Write(data)\n}\n\nfunc GetServoData(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tdata,_ := json.Marshal(\"blah\")\n\tw.Write(data)\n}\n\nfunc PostBlog(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tdata,_ := json.Marshal(\"blah\")\n\tw.Write(data)\n}\n\nfunc PostSpinData(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tdata,_ := json.Marshal(\"blah\")\n\tw.Write(data)\n}\n\nfunc PostServoData(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tdata,_ := json.Marshal(\"blah\")\n\tw.Write(data)\n}\n\nfunc main() {\n\tconn,_ = sql.Open(\"postgres\", os.Getenv(\"DATABASE_URL\"))\n\trouter := httprouter.New()\n\trouter.GET(\"\/blog\", GetBlog)\n\trouter.GET(\"\/spin\", GetSpinData)\n\trouter.GET(\"\/servo\", GetServoData)\n\trouter.POST(\"\/blog\", PostBlog)\n\trouter.POST(\"\/spin\", PostSpinData)\n\trouter.POST(\"\/servo\", PostServoData)\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tport = \"3000\"\n\t}\n\tfmt.Printf(\"Starting server at localhost:%s...\", port)\n\tlog.Fatal(http.ListenAndServe(\":\"+port, router))\n}<|endoftext|>"} {"text":"<commit_before>\/\/ RTLAMR - An rtl-sdr receiver for smart meters operating in the 900MHz ISM band.\n\/\/ Copyright (C) 2015 Douglas Hall\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Affero General Public License as published\n\/\/ by the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Affero General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Affero General Public License\n\/\/ along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/bemasher\/rtlamr\/protocol\"\n\t\"github.com\/bemasher\/rtltcp\"\n\n\t_ \"github.com\/bemasher\/rtlamr\/idm\"\n\t_ \"github.com\/bemasher\/rtlamr\/netidm\"\n\t_ \"github.com\/bemasher\/rtlamr\/r900\"\n\t_ \"github.com\/bemasher\/rtlamr\/r900bcd\"\n\t_ \"github.com\/bemasher\/rtlamr\/scm\"\n\t_ \"github.com\/bemasher\/rtlamr\/scmplus\"\n)\n\nvar rcvr Receiver\n\ntype Receiver struct {\n\trtltcp.SDR\n\td protocol.Decoder\n\tfc protocol.FilterChain\n\n\tctx context.Context\n\tcancel context.CancelFunc\n\twg *sync.WaitGroup\n\n\terr error\n}\n\nfunc (rcvr *Receiver) NewReceiver() {\n\trcvr.ctx, rcvr.cancel = context.WithCancel(context.Background())\n\trcvr.wg = &sync.WaitGroup{}\n\n\trcvr.d = protocol.NewDecoder()\n\n\t\/\/ If the msgtype \"all\" is given alone, register and use scm, scm+, idm and r900.\n\tif _, all := msgType[\"all\"]; all && len(msgType) == 1 {\n\t\tdelete(msgType, \"all\")\n\t\tmsgType[\"scm\"] = true\n\t\tmsgType[\"scm+\"] = true\n\t\tmsgType[\"idm\"] = true\n\t\tmsgType[\"r900\"] = true\n\t}\n\n\t\/\/ For each given msgType, register it with the decoder.\n\tfor name := range msgType {\n\t\tp, err := protocol.NewParser(name, *symbolLength)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\trcvr.d.RegisterProtocol(p)\n\t}\n\n\t\/\/ Allocate the internal buffers of the decoder.\n\trcvr.d.Allocate()\n\n\t\/\/ Connect to rtl_tcp server.\n\tif rcvr.err = rcvr.Connect(nil); rcvr.err != nil {\n\t\tlog.Fatalf(\"%+v\", errors.Wrap(rcvr.err, \"rcvr.Connect\"))\n\t}\n\n\tcfg := rcvr.d.Cfg\n\n\tgainFlagSet := false\n\tflag.Visit(func(f *flag.Flag) {\n\t\tswitch f.Name {\n\t\tcase \"centerfreq\":\n\t\t\tcfg.CenterFreq = uint32(rcvr.Flags.CenterFreq)\n\t\tcase \"samplerate\":\n\t\t\tcfg.SampleRate = int(rcvr.Flags.SampleRate)\n\t\tcase \"gainbyindex\", \"tunergainmode\", \"tunergain\", \"agcmode\":\n\t\t\tgainFlagSet = true\n\t\tcase \"unique\":\n\t\t\trcvr.fc.Add(NewUniqueFilter())\n\t\tcase \"filterid\":\n\t\t\trcvr.fc.Add(meterID)\n\t\tcase \"filtertype\":\n\t\t\trcvr.fc.Add(meterType)\n\t\t}\n\t})\n\n\trcvr.SetCenterFreq(cfg.CenterFreq)\n\trcvr.SetSampleRate(uint32(cfg.SampleRate))\n\n\tif !gainFlagSet {\n\t\trcvr.SetGainMode(true)\n\t}\n\n\trcvr.d.Cfg = cfg\n\trcvr.d.Log()\n\n\t\/\/ Tell the user how many gain settings were reported by rtl_tcp.\n\tlog.Println(\"GainCount:\", rcvr.SDR.Info.GainCount)\n\n\treturn\n}\n\nfunc (rcvr *Receiver) Close() {\n\trcvr.cancel()\n\trcvr.wg.Wait()\n\trcvr.SDR.Close()\n}\n\nfunc (rcvr *Receiver) Run() {\n\trcvr.wg.Add(2)\n\n\tsampleBuf := new(bytes.Buffer)\n\n\t\/\/ Allocate a channel of blocks.\n\tblockCh := make(chan []byte)\n\n\t\/\/ Make maps for tracking messages spanning sample blocks.\n\tprev := map[protocol.Digest]bool{}\n\tnext := map[protocol.Digest]bool{}\n\n\t\/\/ Read and send sample blocks to the decoder.\n\tgo func() {\n\t\tdefer close(blockCh)\n\t\tdefer rcvr.wg.Done()\n\n\t\t\/\/ Make two sample blocks, one for reading, and one for the receiver to\n\t\t\/\/ decode, these are exchanged each time we read a new block.\n\t\tblockA := make([]byte, rcvr.d.Cfg.BlockSize2)\n\t\tblockB := make([]byte, rcvr.d.Cfg.BlockSize2)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\t\/\/ Exit if we've been told to stop.\n\t\t\tcase <-rcvr.ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\trcvr.err = rcvr.SetDeadline(time.Now().Add(5 * time.Second))\n\t\t\t\tif rcvr.err != nil {\n\t\t\t\t\trcvr.err = errors.Wrap(rcvr.err, \"rcvr.SetDeadline\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ Read new sample block.\n\t\t\t\t_, rcvr.err = io.ReadFull(rcvr, blockA)\n\t\t\t\tif rcvr.err != nil {\n\t\t\t\t\trcvr.err = errors.Wrap(rcvr.err, \"io.ReadFull\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ Send the sample block.\n\t\t\t\tblockCh <- blockA\n\n\t\t\t\t\/\/ Exchange blocks for next read.\n\t\t\t\tblockA, blockB = blockB, blockA\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tdefer rcvr.wg.Done()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-rcvr.ctx.Done():\n\t\t\t\t\/\/ Consume any in-flight blocks.\n\t\t\t\tfor range blockCh {\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\tcase block, ok := <-blockCh:\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t\/\/ Clear next map for this sample block.\n\t\t\t\tfor key := range next {\n\t\t\t\t\tdelete(next, key)\n\t\t\t\t}\n\n\t\t\t\t\/\/ If dumping samples, discard the oldest block from the buffer if\n\t\t\t\t\/\/ it's full and write the new block to it.\n\t\t\t\tif *sampleFilename != os.DevNull {\n\t\t\t\t\tif sampleBuf.Len() > rcvr.d.Cfg.BufferLength<<1 {\n\t\t\t\t\t\tio.CopyN(ioutil.Discard, sampleBuf, int64(len(block)))\n\t\t\t\t\t}\n\t\t\t\t\tsampleBuf.Write(block)\n\t\t\t\t}\n\n\t\t\t\tpktFound := false\n\n\t\t\t\t\/\/ For each message returned\n\t\t\t\tfor msg := range rcvr.d.Decode(block) {\n\t\t\t\t\t\/\/ If the filterchain rejects the message, skip it.\n\t\t\t\t\tif !rcvr.fc.Match(msg) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Make a new LogMessage\n\t\t\t\t\tvar logMsg protocol.LogMessage\n\t\t\t\t\tlogMsg.Time = time.Now()\n\t\t\t\t\tlogMsg.Offset, _ = sampleFile.Seek(0, os.SEEK_CUR)\n\t\t\t\t\tlogMsg.Length = sampleBuf.Len()\n\t\t\t\t\tlogMsg.Type = msg.MsgType()\n\t\t\t\t\tlogMsg.Message = msg\n\n\t\t\t\t\t\/\/ This should be unique enough to identify a message between blocks.\n\t\t\t\t\tmsgDigest := protocol.NewDigest(msg)\n\n\t\t\t\t\t\/\/ Mark the message as seen for the next loop.\n\t\t\t\t\tnext[msgDigest] = true\n\n\t\t\t\t\t\/\/ If the message was seen in the previous loop, skip it.\n\t\t\t\t\tif prev[msgDigest] {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Encode the message\n\t\t\t\t\trcvr.err = encoder.Encode(logMsg)\n\t\t\t\t\trcvr.err = errors.Wrap(rcvr.err, \"encoder.Encode\")\n\n\t\t\t\t\tif rcvr.err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tpktFound = true\n\t\t\t\t\tif *single {\n\t\t\t\t\t\tif len(meterID.UintMap) == 0 {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tdelete(meterID.UintMap, uint(msg.MeterID()))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif pktFound {\n\t\t\t\t\tif *sampleFilename != os.DevNull {\n\t\t\t\t\t\t_, err := sampleFile.Write(sampleBuf.Bytes())\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Fatal(\"Error writing raw samples to file:\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif *single && len(meterID.UintMap) == 0 {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t\/\/ Swap next and previous digest maps.\n\t\t\t\tnext, prev = prev, next\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc init() {\n\tlog.SetFlags(log.Lshortfile | log.Lmicroseconds)\n}\n\nvar (\n\tbuildTag = \"dev\" \/\/ v#.#.#\n\tbuildDate = \"unknown\" \/\/ date -u '+%Y-%m-%d'\n\tcommitHash = \"unknown\" \/\/ git rev-parse HEAD\n)\n\nfunc main() {\n\trcvr.RegisterFlags()\n\tRegisterFlags()\n\tEnvOverride()\n\tflag.Parse()\n\trcvr.HandleFlags()\n\n\tif *version {\n\t\tfmt.Println(\"Build Tag: \", buildTag)\n\t\tfmt.Println(\"Build Date:\", buildDate)\n\t\tfmt.Println(\"Commit: \", commitHash)\n\t\tos.Exit(0)\n\t}\n\n\tHandleFlags()\n\n\trcvr.NewReceiver()\n\n\tdefer func() {\n\t\tsampleFile.Close()\n\t\trcvr.Close()\n\n\t\tif rcvr.err != nil {\n\t\t\tlog.Fatalf(\"%+v\\n\", rcvr.err)\n\t\t}\n\t}()\n\n\tstart := time.Now()\n\trcvr.Run()\n\n\t\/\/ Setup signal channel for interruption.\n\tsigCh := make(chan os.Signal, 1)\n\tsignal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)\n\n\t\/\/ Setup time limit channel\n\ttimeLimitCh := make(<-chan time.Time, 1)\n\tif *timeLimit != 0 {\n\t\ttimeLimitCh = time.After(*timeLimit)\n\t}\n\n\tselect {\n\tcase sig := <-sigCh:\n\t\tlog.Println(\"Received Signal:\", sig)\n\tcase <-timeLimitCh:\n\t\tlog.Println(\"Time Limit Reached:\", time.Since(start))\n\t}\n\n\trcvr.Close()\n}\n<commit_msg>Fixes what 60f79de broke.<commit_after>\/\/ RTLAMR - An rtl-sdr receiver for smart meters operating in the 900MHz ISM band.\n\/\/ Copyright (C) 2015 Douglas Hall\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Affero General Public License as published\n\/\/ by the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Affero General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Affero General Public License\n\/\/ along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/bemasher\/rtlamr\/protocol\"\n\t\"github.com\/bemasher\/rtltcp\"\n\n\t_ \"github.com\/bemasher\/rtlamr\/idm\"\n\t_ \"github.com\/bemasher\/rtlamr\/netidm\"\n\t_ \"github.com\/bemasher\/rtlamr\/r900\"\n\t_ \"github.com\/bemasher\/rtlamr\/r900bcd\"\n\t_ \"github.com\/bemasher\/rtlamr\/scm\"\n\t_ \"github.com\/bemasher\/rtlamr\/scmplus\"\n)\n\nvar rcvr Receiver\n\ntype Receiver struct {\n\trtltcp.SDR\n\td protocol.Decoder\n\tfc protocol.FilterChain\n\n\tctx context.Context\n\tcancel context.CancelFunc\n\twg *sync.WaitGroup\n\n\terr error\n}\n\nfunc (rcvr *Receiver) NewReceiver() {\n\trcvr.ctx, rcvr.cancel = context.WithCancel(context.Background())\n\trcvr.wg = &sync.WaitGroup{}\n\n\trcvr.d = protocol.NewDecoder()\n\n\t\/\/ If the msgtype \"all\" is given alone, register and use scm, scm+, idm and r900.\n\tif _, all := msgType[\"all\"]; all && len(msgType) == 1 {\n\t\tdelete(msgType, \"all\")\n\t\tmsgType[\"scm\"] = true\n\t\tmsgType[\"scm+\"] = true\n\t\tmsgType[\"idm\"] = true\n\t\tmsgType[\"r900\"] = true\n\t}\n\n\t\/\/ For each given msgType, register it with the decoder.\n\tfor name := range msgType {\n\t\tp, err := protocol.NewParser(name, *symbolLength)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\trcvr.d.RegisterProtocol(p)\n\t}\n\n\t\/\/ Allocate the internal buffers of the decoder.\n\trcvr.d.Allocate()\n\n\t\/\/ Connect to rtl_tcp server.\n\tif rcvr.err = rcvr.Connect(nil); rcvr.err != nil {\n\t\tlog.Fatalf(\"%+v\", errors.Wrap(rcvr.err, \"rcvr.Connect\"))\n\t}\n\n\tcfg := rcvr.d.Cfg\n\n\tgainFlagSet := false\n\tflag.Visit(func(f *flag.Flag) {\n\t\tswitch f.Name {\n\t\tcase \"centerfreq\":\n\t\t\tcfg.CenterFreq = uint32(rcvr.Flags.CenterFreq)\n\t\tcase \"samplerate\":\n\t\t\tcfg.SampleRate = int(rcvr.Flags.SampleRate)\n\t\tcase \"gainbyindex\", \"tunergainmode\", \"tunergain\", \"agcmode\":\n\t\t\tgainFlagSet = true\n\t\tcase \"unique\":\n\t\t\trcvr.fc.Add(NewUniqueFilter())\n\t\tcase \"filterid\":\n\t\t\trcvr.fc.Add(meterID)\n\t\tcase \"filtertype\":\n\t\t\trcvr.fc.Add(meterType)\n\t\t}\n\t})\n\n\trcvr.SetCenterFreq(cfg.CenterFreq)\n\trcvr.SetSampleRate(uint32(cfg.SampleRate))\n\n\tif !gainFlagSet {\n\t\trcvr.SetGainMode(true)\n\t}\n\n\trcvr.d.Cfg = cfg\n\trcvr.d.Log()\n\n\t\/\/ Tell the user how many gain settings were reported by rtl_tcp.\n\tlog.Println(\"GainCount:\", rcvr.SDR.Info.GainCount)\n\n\treturn\n}\n\nfunc (rcvr *Receiver) Close() {\n\trcvr.cancel()\n\trcvr.wg.Wait()\n\trcvr.SDR.Close()\n}\n\nfunc (rcvr *Receiver) Run() {\n\trcvr.wg.Add(3)\n\n\tsampleBuf := new(bytes.Buffer)\n\n\t\/\/ Allocate a channel of blocks.\n\tblockCh := make(chan []byte)\n\n\t\/\/ Make maps for tracking messages spanning sample blocks.\n\tprev := map[protocol.Digest]bool{}\n\tnext := map[protocol.Digest]bool{}\n\n\tgo func() {\n\t\tdefer rcvr.wg.Done()\n\t\t<-rcvr.ctx.Done()\n\t\t\/\/ Consume any in-flight blocks.\n\t\tfor range blockCh {\n\t\t}\n\t}()\n\n\t\/\/ Read and send sample blocks to the decoder.\n\tgo func() {\n\t\tdefer rcvr.cancel()\n\t\tdefer close(blockCh)\n\t\tdefer rcvr.wg.Done()\n\n\t\t\/\/ Make two sample blocks, one for reading, and one for the receiver to\n\t\t\/\/ decode, these are exchanged each time we read a new block.\n\t\tblockA := make([]byte, rcvr.d.Cfg.BlockSize2)\n\t\tblockB := make([]byte, rcvr.d.Cfg.BlockSize2)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\t\/\/ Exit if we've been told to stop.\n\t\t\tcase <-rcvr.ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\trcvr.err = rcvr.SetDeadline(time.Now().Add(5 * time.Second))\n\t\t\t\tif rcvr.err != nil {\n\t\t\t\t\trcvr.err = errors.Wrap(rcvr.err, \"rcvr.SetDeadline\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ Read new sample block.\n\t\t\t\t_, rcvr.err = io.ReadFull(rcvr, blockA)\n\t\t\t\tif rcvr.err != nil {\n\t\t\t\t\trcvr.err = errors.Wrap(rcvr.err, \"io.ReadFull\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ Send the sample block.\n\t\t\t\tblockCh <- blockA\n\n\t\t\t\t\/\/ Exchange blocks for next read.\n\t\t\t\tblockA, blockB = blockB, blockA\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tdefer rcvr.cancel()\n\t\tdefer rcvr.wg.Done()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-rcvr.ctx.Done():\n\t\t\t\treturn\n\t\t\tcase block, ok := <-blockCh:\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t\/\/ Clear next map for this sample block.\n\t\t\t\tfor key := range next {\n\t\t\t\t\tdelete(next, key)\n\t\t\t\t}\n\n\t\t\t\t\/\/ If dumping samples, discard the oldest block from the buffer if\n\t\t\t\t\/\/ it's full and write the new block to it.\n\t\t\t\tif *sampleFilename != os.DevNull {\n\t\t\t\t\tif sampleBuf.Len() > rcvr.d.Cfg.BufferLength<<1 {\n\t\t\t\t\t\tio.CopyN(ioutil.Discard, sampleBuf, int64(len(block)))\n\t\t\t\t\t}\n\t\t\t\t\tsampleBuf.Write(block)\n\t\t\t\t}\n\n\t\t\t\tpktFound := false\n\n\t\t\t\t\/\/ For each message returned\n\t\t\t\tfor msg := range rcvr.d.Decode(block) {\n\t\t\t\t\t\/\/ If the filterchain rejects the message, skip it.\n\t\t\t\t\tif !rcvr.fc.Match(msg) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Make a new LogMessage\n\t\t\t\t\tvar logMsg protocol.LogMessage\n\t\t\t\t\tlogMsg.Time = time.Now()\n\t\t\t\t\tlogMsg.Offset, _ = sampleFile.Seek(0, os.SEEK_CUR)\n\t\t\t\t\tlogMsg.Length = sampleBuf.Len()\n\t\t\t\t\tlogMsg.Type = msg.MsgType()\n\t\t\t\t\tlogMsg.Message = msg\n\n\t\t\t\t\t\/\/ This should be unique enough to identify a message between blocks.\n\t\t\t\t\tmsgDigest := protocol.NewDigest(msg)\n\n\t\t\t\t\t\/\/ Mark the message as seen for the next loop.\n\t\t\t\t\tnext[msgDigest] = true\n\n\t\t\t\t\t\/\/ If the message was seen in the previous loop, skip it.\n\t\t\t\t\tif prev[msgDigest] {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Encode the message\n\t\t\t\t\trcvr.err = encoder.Encode(logMsg)\n\t\t\t\t\trcvr.err = errors.Wrap(rcvr.err, \"encoder.Encode\")\n\n\t\t\t\t\tif rcvr.err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tpktFound = true\n\t\t\t\t\tif *single {\n\t\t\t\t\t\tif len(meterID.UintMap) == 0 {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tdelete(meterID.UintMap, uint(msg.MeterID()))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif pktFound {\n\t\t\t\t\tif *sampleFilename != os.DevNull {\n\t\t\t\t\t\t_, err := sampleFile.Write(sampleBuf.Bytes())\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Fatal(\"Error writing raw samples to file:\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif *single && len(meterID.UintMap) == 0 {\n\t\t\t\t\t\trcvr.cancel()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t\/\/ Swap next and previous digest maps.\n\t\t\t\tnext, prev = prev, next\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc init() {\n\tlog.SetFlags(log.Lshortfile | log.Lmicroseconds)\n}\n\nvar (\n\tbuildTag = \"dev\" \/\/ v#.#.#\n\tbuildDate = \"unknown\" \/\/ date -u '+%Y-%m-%d'\n\tcommitHash = \"unknown\" \/\/ git rev-parse HEAD\n)\n\nfunc main() {\n\trcvr.RegisterFlags()\n\tRegisterFlags()\n\tEnvOverride()\n\tflag.Parse()\n\trcvr.HandleFlags()\n\n\tif *version {\n\t\tfmt.Println(\"Build Tag: \", buildTag)\n\t\tfmt.Println(\"Build Date:\", buildDate)\n\t\tfmt.Println(\"Commit: \", commitHash)\n\t\tos.Exit(0)\n\t}\n\n\tHandleFlags()\n\n\trcvr.NewReceiver()\n\n\tdefer func() {\n\t\tsampleFile.Close()\n\t\trcvr.Close()\n\n\t\tif rcvr.err != nil {\n\t\t\tlog.Fatalf(\"%+v\\n\", rcvr.err)\n\t\t}\n\t}()\n\n\tstart := time.Now()\n\trcvr.Run()\n\n\t\/\/ Setup signal channel for interruption.\n\tsigCh := make(chan os.Signal, 1)\n\tsignal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)\n\n\t\/\/ Setup time limit channel\n\ttimeLimitCh := make(<-chan time.Time, 1)\n\tif *timeLimit != 0 {\n\t\ttimeLimitCh = time.After(*timeLimit)\n\t}\n\n\tselect {\n\tcase sig := <-sigCh:\n\t\tlog.Println(\"Received Signal:\", sig)\n\tcase <-timeLimitCh:\n\t\tlog.Println(\"Time Limit Reached:\", time.Since(start))\n\tcase <-rcvr.ctx.Done():\n\t\tlog.Println(\"Receiver context cancelled.\")\n\t}\n\n\trcvr.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package file is a Go library to open files with file locking depending on the system.\n\/\/\n\/\/ Currently file locking on the following systems are supported.\n\/\/\n\/*\n darwin dragonfly freebsd linux netbsd openbsd:\n Advisory Lock\n\n windows:\n Mandatory Lock\n\n android nacl plan9 solaris zos:\n Not Supported\n*\/\npackage file\n<commit_msg>Update comment.<commit_after>\/\/ Package file is a Go library to open files with file locking depending on the system.\n\/\/\n\/\/ Currently file locking on the following systems are supported.\n\/\/\n\/*\n darwin dragonfly freebsd linux netbsd openbsd solaris:\n Advisory Lock\n\n windows:\n Mandatory Lock\n\n android nacl plan9 zos:\n Not Supported\n*\/\npackage file\n<|endoftext|>"} {"text":"<commit_before>\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/line\/line-bot-sdk-go\/linebot\"\n)\n\n\/*\n<<<<<<< HEAD\n<<<<<<< HEAD\n=======\n>>>>>>> 09a61a620751c49e8b67d3244f5280b4b309e1ae\n=======\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\n=======\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\nvar bot *linebot.Client\nvar eggyoID = \"ufa92a3a52f197e19bfddeb5ca0595e93\"\nvar logNof = \"open\"\n\ntype GeoContent struct {\n\tLatLong string `json:\"latLon\"`\n\tUtm string `json:\"utm\"`\n\tMgrs string `json:\"mgrs\"`\n}\n\ntype ResultGeoLoc struct {\n\tResults GeoContent `json:\"result\"`\n}\n\nfunc getGeoLoc(body []byte) (*ResultGeoLoc, error) {\n\tvar s = new(ResultGeoLoc)\n\terr := json.Unmarshal(body, &s)\n\tif err != nil {\n\t\tfmt.Println(\"whoops:\", err)\n\t}\n\treturn s, err\n<<<<<<< HEAD\n<<<<<<< HEAD\n<<<<<<< HEAD\n=======\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\n=======\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\n}*\/\n\nfunc main() {\n\n\tbot, err := linebot.New(\n\t\tos.Getenv(\"ChannelSecret\"),\n\t\tos.Getenv(\"ChannelToken\"),\n\t)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ Setup HTTP Server for receiving requests from LINE platform\n\thttp.HandleFunc(\"\/callback\", func(w http.ResponseWriter, req *http.Request) {\n\t\tevents, err := bot.ParseRequest(req)\n\t\tif err != nil {\n\t\t\tif err == linebot.ErrInvalidSignature {\n\t\t\t\tw.WriteHeader(400)\n\t\t\t} else {\n\t\t\t\tw.WriteHeader(500)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tfor _, event := range events {\n\t\t\tif event.Type == linebot.EventTypeMessage {\n\t\t\t\tswitch message := event.Message.(type) {\n\t\t\t\tcase *linebot.TextMessage:\n\t\t\t\t\tif _, err = bot.ReplyMessage(event.ReplyToken, linebot.NewTextMessage(message.Text)).Do(); err != nil {\n\t\t\t\t\t\tlog.Print(err)\n\t\t\t\t\t}\n\n\t\t\t\tcase *linebot.ImageMessage:\n\t\t\t\t\tlog.Print(message)\n\n\t\t\t\t\tcontent, err := bot.GetMessageContent(message.ID).Do()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Print(err)\n\t\t\t\t\t}\n\t\t\t\t\tdefer content.Content.Close()\n\t\t\t\t\tlog.Printf(content.Content)\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\t})\n\t\/\/ This is just sample code.\n\t\/\/ For actual use, you must support HTTPS by using `ListenAndServeTLS`, a reverse proxy or something else.\n\tif err := http.ListenAndServe(\":\"+os.Getenv(\"PORT\"), nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\n\/*\n<<<<<<< HEAD\n<<<<<<< HEAD\n=======\n}\n\nfunc main() {\n\tgetAllUser()\n\t\/\/ line bot\n\tstrID := os.Getenv(\"ChannelID\")\n\tnumID, err := strconv.ParseInt(strID, 10, 64)\n\tif err != nil {\n\t\tlog.Fatal(\"Wrong environment setting about ChannelID\")\n\t}\n\n\tbot, err = linebot.NewClient(numID, os.Getenv(\"ChannelSecret\"), os.Getenv(\"MID\"))\n\tlog.Println(\"Bot:\", bot, \" err:\", err)\n\thttp.HandleFunc(\"\/callback\", callbackHandler)\n\tport := os.Getenv(\"PORT\")\n\taddr := fmt.Sprintf(\":%s\", port)\n\thttp.ListenAndServe(addr, nil)\n\t\/\/test\n\n}\n>>>>>>> 09a61a620751c49e8b67d3244f5280b4b309e1ae\n=======\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\n=======\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\nfunc callbackHandler(w http.ResponseWriter, r *http.Request) {\n\n\treceived, err := bot.ParseRequest(r)\n\tif err != nil {\n\t\tif err == linebot.ErrInvalidSignature {\n\t\t\tw.WriteHeader(400)\n\t\t} else {\n\t\t\tw.WriteHeader(500)\n\t\t}\n\t\treturn\n\t}\n\tfor _, result := range received.Results {\n\t\tcontent := result.Content()\n\t\tlog.Println(\"-->\", content)\n\n\t\t\/\/Log detail receive content\n\t\tif content != nil {\n\t\t\tlog.Println(\"RECEIVE Msg:\", content.IsMessage, \" OP:\", content.IsOperation, \" type:\", content.ContentType, \" from:\", content.From, \"to:\", content.To, \" ID:\", content.ID)\n\t\t}\n\t\t\/\/ user add friend\n\t\tif content != nil && content.IsOperation && content.OpType == linebot.OpTypeAddedAsFriend {\n\t\t\tout := fmt.Sprintf(\"Bot แปลงพิกัด Eggyo\\nวิธีใช้\\nเพียงแค่กดแชร์ Location ที่ต้องการ ระบบจะทำการแปลง Location เป็นพิกัดระบบต่างๆ และหาความสูงจากระดับน้ำทะเลให้\\n\\nหรือจะพูดคุยกับ bot ก็ได้\\nกด #help เพื่อดูวิธีใช้อื่นๆ \\nติดต่อผู้พัฒนา LINE ID : eggyo\")\n\t\t\t\/\/result.RawContent.Params[0] is who send your bot friend added operation, otherwise you cannot get in content or operation content.\n\t\t\t_, err = bot.SendText([]string{content.From}, out)\n\t\t\tif logNof == \"open\" {\n\t\t\t\tbot.SendText([]string{eggyoID}, \"bot has a new friend :\"+content.From)\n\t\t\t}\n\n\t\t\taddNewUser(content.From)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\n\t\tif content != nil && content.IsMessage && content.ContentType == linebot.ContentTypeText {\n\n\t\t\ttext, err := content.TextContent()\n\t\t\tif logNof == \"open\" {\n\t\t\t\tbot.SendText([]string{eggyoID}, \"bot get msg:\"+text.Text+\"\\nfrom :\"+content.From)\n\t\t\t}\n\t\t\t\/\/ reply message\n\t\t\tvar processedText = messageCheck(text.Text)\n\t\t\t_, err = bot.SendText([]string{content.From}, processedText)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\t\tif content != nil && content.ContentType == linebot.ContentTypeLocation {\n\t\t\t_, err = bot.SendText([]string{content.From}, \"ระบบกำลังประมวลผล...\")\n\n\t\t\tloc, err := content.LocationContent()\n\n<<<<<<< HEAD\n<<<<<<< HEAD\n<<<<<<< HEAD\n\t\t\t\/\/ add eggyo geo test\/\/\n\t\t\tresp, err := http.Get(\"http:\/\/eggyo-geo-node.herokuapp.com\/geo\/\" + FloatToString(loc.Latitude) + \",\" + FloatToString(loc.Longitude))\n=======\n\t\t\t\/\/ add eggyo geo test\n\t\t\tresp, err := http.Get(\"http:\/\/eggyo-geo-node.herokuapp.com\/geo\/\" + FloatToString(loc.Latitude) + \"\/\" + FloatToString(loc.Longitude))\n>>>>>>> 09a61a620751c49e8b67d3244f5280b4b309e1ae\n=======\n\t\t\t\/\/ add eggyo geo test\/\/\n\t\t\tresp, err := http.Get(\"http:\/\/eggyo-geo-node.herokuapp.com\/geo\/\" + FloatToString(loc.Latitude) + \",\" + FloatToString(loc.Longitude))\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\n=======\n\t\t\t\/\/ add eggyo geo test\/\/\n\t\t\tresp, err := http.Get(\"http:\/\/eggyo-geo-node.herokuapp.com\/geo\/\" + FloatToString(loc.Latitude) + \",\" + FloatToString(loc.Longitude))\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\n\t\t\tif err != nil {\n\t\t\t\tprintln(err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tlog.Println(string(body))\n\n\t\t\tvar elev = callGoogleElev(loc.Latitude, loc.Longitude)\n\t\t\tgeo, err := getGeoLoc([]byte(body))\n\t\t\t_, err = bot.SendText([]string{content.From}, \"LatLong :\"+geo.Results.LatLong)\n\t\t\t_, err = bot.SendText([]string{content.From}, \"Utm :\"+geo.Results.Utm+\"\\n\\nMgrs :\"+geo.Results.Mgrs+\"\\n\\nAltitude :\"+elev)\n\t\t\tif logNof == \"open\" {\n\t\t\t\tbot.SendText([]string{eggyoID}, \"bot get loc:\"+geo.Results.Mgrs+\"\\nfrom :\"+content.From)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\t}\n<<<<<<< HEAD\n<<<<<<< HEAD\n<<<<<<< HEAD\n=======\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\n=======\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\n*\/\n<commit_msg>v.5.1.2<commit_after>\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/line\/line-bot-sdk-go\/linebot\"\n)\n\n\/*\n<<<<<<< HEAD\n<<<<<<< HEAD\n=======\n>>>>>>> 09a61a620751c49e8b67d3244f5280b4b309e1ae\n=======\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\n=======\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\nvar bot *linebot.Client\nvar eggyoID = \"ufa92a3a52f197e19bfddeb5ca0595e93\"\nvar logNof = \"open\"\n\ntype GeoContent struct {\n\tLatLong string `json:\"latLon\"`\n\tUtm string `json:\"utm\"`\n\tMgrs string `json:\"mgrs\"`\n}\n\ntype ResultGeoLoc struct {\n\tResults GeoContent `json:\"result\"`\n}\n\nfunc getGeoLoc(body []byte) (*ResultGeoLoc, error) {\n\tvar s = new(ResultGeoLoc)\n\terr := json.Unmarshal(body, &s)\n\tif err != nil {\n\t\tfmt.Println(\"whoops:\", err)\n\t}\n\treturn s, err\n<<<<<<< HEAD\n<<<<<<< HEAD\n<<<<<<< HEAD\n=======\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\n=======\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\n}*\/\n\nfunc main() {\n\n\tbot, err := linebot.New(\n\t\tos.Getenv(\"ChannelSecret\"),\n\t\tos.Getenv(\"ChannelToken\"),\n\t)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ Setup HTTP Server for receiving requests from LINE platform\n\thttp.HandleFunc(\"\/callback\", func(w http.ResponseWriter, req *http.Request) {\n\t\tevents, err := bot.ParseRequest(req)\n\t\tif err != nil {\n\t\t\tif err == linebot.ErrInvalidSignature {\n\t\t\t\tw.WriteHeader(400)\n\t\t\t} else {\n\t\t\t\tw.WriteHeader(500)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tfor _, event := range events {\n\t\t\tif event.Type == linebot.EventTypeMessage {\n\t\t\t\tswitch message := event.Message.(type) {\n\t\t\t\tcase *linebot.TextMessage:\n\t\t\t\t\tif _, err = bot.ReplyMessage(event.ReplyToken, linebot.NewTextMessage(message.Text)).Do(); err != nil {\n\t\t\t\t\t\tlog.Print(err)\n\t\t\t\t\t}\n\n\t\t\t\tcase *linebot.ImageMessage:\n\t\t\t\t\tlog.Print(message)\n\n\t\t\t\t\tcontent, err := bot.GetMessageContent(message.ID).Do()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Print(err)\n\t\t\t\t\t}\n\t\t\t\t\tdefer content.Content.Close()\n\t\t\t\t\tlog.Printf(message.ID)\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\t})\n\t\/\/ This is just sample code.\n\t\/\/ For actual use, you must support HTTPS by using `ListenAndServeTLS`, a reverse proxy or something else.\n\tif err := http.ListenAndServe(\":\"+os.Getenv(\"PORT\"), nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\n\/*\n<<<<<<< HEAD\n<<<<<<< HEAD\n=======\n}\n\nfunc main() {\n\tgetAllUser()\n\t\/\/ line bot\n\tstrID := os.Getenv(\"ChannelID\")\n\tnumID, err := strconv.ParseInt(strID, 10, 64)\n\tif err != nil {\n\t\tlog.Fatal(\"Wrong environment setting about ChannelID\")\n\t}\n\n\tbot, err = linebot.NewClient(numID, os.Getenv(\"ChannelSecret\"), os.Getenv(\"MID\"))\n\tlog.Println(\"Bot:\", bot, \" err:\", err)\n\thttp.HandleFunc(\"\/callback\", callbackHandler)\n\tport := os.Getenv(\"PORT\")\n\taddr := fmt.Sprintf(\":%s\", port)\n\thttp.ListenAndServe(addr, nil)\n\t\/\/test\n\n}\n>>>>>>> 09a61a620751c49e8b67d3244f5280b4b309e1ae\n=======\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\n=======\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\nfunc callbackHandler(w http.ResponseWriter, r *http.Request) {\n\n\treceived, err := bot.ParseRequest(r)\n\tif err != nil {\n\t\tif err == linebot.ErrInvalidSignature {\n\t\t\tw.WriteHeader(400)\n\t\t} else {\n\t\t\tw.WriteHeader(500)\n\t\t}\n\t\treturn\n\t}\n\tfor _, result := range received.Results {\n\t\tcontent := result.Content()\n\t\tlog.Println(\"-->\", content)\n\n\t\t\/\/Log detail receive content\n\t\tif content != nil {\n\t\t\tlog.Println(\"RECEIVE Msg:\", content.IsMessage, \" OP:\", content.IsOperation, \" type:\", content.ContentType, \" from:\", content.From, \"to:\", content.To, \" ID:\", content.ID)\n\t\t}\n\t\t\/\/ user add friend\n\t\tif content != nil && content.IsOperation && content.OpType == linebot.OpTypeAddedAsFriend {\n\t\t\tout := fmt.Sprintf(\"Bot แปลงพิกัด Eggyo\\nวิธีใช้\\nเพียงแค่กดแชร์ Location ที่ต้องการ ระบบจะทำการแปลง Location เป็นพิกัดระบบต่างๆ และหาความสูงจากระดับน้ำทะเลให้\\n\\nหรือจะพูดคุยกับ bot ก็ได้\\nกด #help เพื่อดูวิธีใช้อื่นๆ \\nติดต่อผู้พัฒนา LINE ID : eggyo\")\n\t\t\t\/\/result.RawContent.Params[0] is who send your bot friend added operation, otherwise you cannot get in content or operation content.\n\t\t\t_, err = bot.SendText([]string{content.From}, out)\n\t\t\tif logNof == \"open\" {\n\t\t\t\tbot.SendText([]string{eggyoID}, \"bot has a new friend :\"+content.From)\n\t\t\t}\n\n\t\t\taddNewUser(content.From)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\n\t\tif content != nil && content.IsMessage && content.ContentType == linebot.ContentTypeText {\n\n\t\t\ttext, err := content.TextContent()\n\t\t\tif logNof == \"open\" {\n\t\t\t\tbot.SendText([]string{eggyoID}, \"bot get msg:\"+text.Text+\"\\nfrom :\"+content.From)\n\t\t\t}\n\t\t\t\/\/ reply message\n\t\t\tvar processedText = messageCheck(text.Text)\n\t\t\t_, err = bot.SendText([]string{content.From}, processedText)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\t\tif content != nil && content.ContentType == linebot.ContentTypeLocation {\n\t\t\t_, err = bot.SendText([]string{content.From}, \"ระบบกำลังประมวลผล...\")\n\n\t\t\tloc, err := content.LocationContent()\n\n<<<<<<< HEAD\n<<<<<<< HEAD\n<<<<<<< HEAD\n\t\t\t\/\/ add eggyo geo test\/\/\n\t\t\tresp, err := http.Get(\"http:\/\/eggyo-geo-node.herokuapp.com\/geo\/\" + FloatToString(loc.Latitude) + \",\" + FloatToString(loc.Longitude))\n=======\n\t\t\t\/\/ add eggyo geo test\n\t\t\tresp, err := http.Get(\"http:\/\/eggyo-geo-node.herokuapp.com\/geo\/\" + FloatToString(loc.Latitude) + \"\/\" + FloatToString(loc.Longitude))\n>>>>>>> 09a61a620751c49e8b67d3244f5280b4b309e1ae\n=======\n\t\t\t\/\/ add eggyo geo test\/\/\n\t\t\tresp, err := http.Get(\"http:\/\/eggyo-geo-node.herokuapp.com\/geo\/\" + FloatToString(loc.Latitude) + \",\" + FloatToString(loc.Longitude))\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\n=======\n\t\t\t\/\/ add eggyo geo test\/\/\n\t\t\tresp, err := http.Get(\"http:\/\/eggyo-geo-node.herokuapp.com\/geo\/\" + FloatToString(loc.Latitude) + \",\" + FloatToString(loc.Longitude))\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\n\t\t\tif err != nil {\n\t\t\t\tprintln(err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tlog.Println(string(body))\n\n\t\t\tvar elev = callGoogleElev(loc.Latitude, loc.Longitude)\n\t\t\tgeo, err := getGeoLoc([]byte(body))\n\t\t\t_, err = bot.SendText([]string{content.From}, \"LatLong :\"+geo.Results.LatLong)\n\t\t\t_, err = bot.SendText([]string{content.From}, \"Utm :\"+geo.Results.Utm+\"\\n\\nMgrs :\"+geo.Results.Mgrs+\"\\n\\nAltitude :\"+elev)\n\t\t\tif logNof == \"open\" {\n\t\t\t\tbot.SendText([]string{eggyoID}, \"bot get loc:\"+geo.Results.Mgrs+\"\\nfrom :\"+content.From)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\t}\n<<<<<<< HEAD\n<<<<<<< HEAD\n<<<<<<< HEAD\n=======\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\n=======\n>>>>>>> 66cf34abf6b2ed7caa5c018b73c50380be01e401\n*\/\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/fsouza\/go-dockerclient\"\n)\n\nvar (\n\tdockerDir = flag.String(\"d\", \"\/var\/lib\/docker\", \"Specify docker directory\")\n\tendPoint = flag.String(\"h\", \"unix:\/\/\/var\/run\/docker.sock\", \"Specify a docker endpoint\")\n\tdryrun = flag.Bool(\"n\", true, \"Dry-run. Do not delete volumes by default. Use -n=false to delete volumes actually.\")\n)\n\nfunc JoinDockerDir(dirs ...string) string {\n\tps := string(os.PathSeparator)\n\treturn *dockerDir + ps + strings.Join(dirs, ps)\n}\n\nfunc DeleteVolume(volumeId string) {\n\tvar err error\n\terr = DoDelete(JoinDockerDir(\"volumes\", volumeId))\n\terr = DoDelete(JoinDockerDir(\"vfs\", \"dir\", volumeId))\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s: failed to delete volume id:%s\", err, volumeId)\n\t}\n}\n\nfunc ListOndiskVolumes(volumesDir string) (map[string]bool, error) {\n\tdirs, err := ioutil.ReadDir(volumesDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tondiskVolumes := map[string]bool{}\n\tfor _, dir := range dirs {\n\t\tif dir.IsDir() && len(dir.Name()) == 64 {\n\t\t\tondiskVolumes[dir.Name()] = true\n\t\t}\n\t}\n\treturn ondiskVolumes, nil\n}\n\nfunc ListOnContainerVolumes(client *docker.Client) (map[string]bool, error) {\n\tcontainers, err := client.ListContainers(docker.ListContainersOptions{All: true})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tonContainerVolumes := map[string]bool{}\n\tfor _, container := range containers {\n\t\tci, err := client.InspectContainer(container.ID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, volumeDir := range ci.Volumes {\n\t\t\tonContainerVolumes[filepath.Base(volumeDir)] = true\n\t\t}\n\t}\n\treturn onContainerVolumes, nil\n}\n\nfunc DoDelete(path string) error {\n\tif *dryrun {\n\t\tfmt.Println(\"dryrun:\", path)\n\t\treturn nil\n\t}\n\tfmt.Println(\"remove:\", path)\n\treturn os.RemoveAll(path)\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tondiskVolumes, err := ListOndiskVolumes(JoinDockerDir(\"volumes\"))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tclient, err := docker.NewClient(*endPoint)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tonContainerVolumes, err := ListOnContainerVolumes(client)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor n := range onContainerVolumes {\n\t\tif ondiskVolumes[n] {\n\t\t\tdelete(ondiskVolumes, n)\n\t\t}\n\t}\n\n\tfor volume := range ondiskVolumes {\n\t\tDeleteVolume(volume)\n\t}\n}\n<commit_msg>Use filepath.Join<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/fsouza\/go-dockerclient\"\n)\n\nvar (\n\tdockerDir = flag.String(\"d\", \"\/var\/lib\/docker\", \"Specify docker directory\")\n\tendPoint = flag.String(\"h\", \"unix:\/\/\/var\/run\/docker.sock\", \"Specify a docker endpoint\")\n\tdryrun = flag.Bool(\"n\", true, \"Dry-run. Do not delete volumes by default. Use -n=false to delete volumes actually.\")\n)\n\nfunc JoinDockerDir(dirs ...string) string {\n\treturn filepath.Join(*dockerDir, filepath.Join(dirs...))\n}\n\nfunc DeleteVolume(volumeId string) {\n\tvar err error\n\terr = DoDelete(JoinDockerDir(\"volumes\", volumeId))\n\terr = DoDelete(JoinDockerDir(\"vfs\", \"dir\", volumeId))\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s: failed to delete volume id:%s\", err, volumeId)\n\t}\n}\n\nfunc ListOndiskVolumes(volumesDir string) (map[string]bool, error) {\n\tdirs, err := ioutil.ReadDir(volumesDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tondiskVolumes := map[string]bool{}\n\tfor _, dir := range dirs {\n\t\tif dir.IsDir() && len(dir.Name()) == 64 {\n\t\t\tondiskVolumes[dir.Name()] = true\n\t\t}\n\t}\n\treturn ondiskVolumes, nil\n}\n\nfunc ListOnContainerVolumes(client *docker.Client) (map[string]bool, error) {\n\tcontainers, err := client.ListContainers(docker.ListContainersOptions{All: true})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tonContainerVolumes := map[string]bool{}\n\tfor _, container := range containers {\n\t\tci, err := client.InspectContainer(container.ID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, volumeDir := range ci.Volumes {\n\t\t\tonContainerVolumes[filepath.Base(volumeDir)] = true\n\t\t}\n\t}\n\treturn onContainerVolumes, nil\n}\n\nfunc DoDelete(path string) error {\n\tif *dryrun {\n\t\tfmt.Println(\"dryrun:\", path)\n\t\treturn nil\n\t}\n\tfmt.Println(\"remove:\", path)\n\treturn os.RemoveAll(path)\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tondiskVolumes, err := ListOndiskVolumes(JoinDockerDir(\"volumes\"))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tclient, err := docker.NewClient(*endPoint)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tonContainerVolumes, err := ListOnContainerVolumes(client)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor n := range onContainerVolumes {\n\t\tif ondiskVolumes[n] {\n\t\t\tdelete(ondiskVolumes, n)\n\t\t}\n\t}\n\n\tfor volume := range ondiskVolumes {\n\t\tDeleteVolume(volume)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/dokterbob\/ipfs-search\/crawler\"\n\t\"github.com\/dokterbob\/ipfs-search\/indexer\"\n\t\"github.com\/dokterbob\/ipfs-search\/queue\"\n\t\"gopkg.in\/ipfs\/go-ipfs-api.v1\"\n\t\"gopkg.in\/olivere\/elastic.v3\"\n\t\"gopkg.in\/urfave\/cli.v1\"\n\t\"log\"\n\t\"os\"\n)\n\nconst (\n\tIPFS_API = \"localhost:5001\"\n\tHASH_WORKERS = 10\n\tFILE_WORKERS = 10\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"ipfs-search\"\n\tapp.Usage = \"IPFS search engine.\"\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"add\",\n\t\t\tAliases: []string{\"a\"},\n\t\t\tUsage: \"add `HASH` to crawler queue\",\n\t\t\tAction: add,\n\t\t},\n\t\t{\n\t\t\tName: \"crawl\",\n\t\t\tAliases: []string{\"c\"},\n\t\t\tUsage: \"start crawler\",\n\t\t\tAction: crawl,\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc get_elastic() (*elastic.Client, error) {\n\tel, err := elastic.NewClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\texists, err := el.IndexExists(\"ipfs\").Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\t\/\/ Index does not exist yet, create\n\t\tel.CreateIndex(\"ipfs\")\n\t}\n\n\treturn el, nil\n}\n\nfunc add(c *cli.Context) error {\n\tif c.NArg() != 1 {\n\t\treturn cli.NewExitError(\"Please supply one hash as argument.\", 1)\n\t}\n\n\thash := c.Args().Get(0)\n\n\tfmt.Printf(\"Adding hash '%s' to queue\\n\", hash)\n\n\tch, err := queue.NewChannel()\n\tif err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\tdefer ch.Close()\n\n\tqueue, err := queue.NewTaskQueue(ch, \"hashes\")\n\tif err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\n\terr = queue.AddTask(map[string]interface{}{\n\t\t\"hash\": hash,\n\t})\n\n\tif err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\n\treturn nil\n}\n\nfunc crawl(c *cli.Context) error {\n\t\/\/ For now, assume gateway running on default host:port\n\tsh := shell.NewShell(IPFS_API)\n\n\tel, err := get_elastic()\n\n\tch, err := queue.NewChannel()\n\tif err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\tdefer ch.Close()\n\n\thq, err := queue.NewTaskQueue(ch, \"hashes\")\n\tfq, err := queue.NewTaskQueue(ch, \"files\")\n\tif err != nil {\n\t\t\/\/ do something with the error\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\n\tid := indexer.NewIndexer(el)\n\n\tcrawler := crawler.NewCrawler(sh, id, fq, hq)\n\n\terrc := make(chan error, 1)\n\n\tfor i := 0; i < HASH_WORKERS; i++ {\n\t\thq.StartConsumer(func(params map[string]interface{}) error {\n\t\t\t\/\/ TODO: Assert hash in map, ideally by using custom type\n\t\t\treturn crawler.CrawlHash(params[\"hash\"].(string))\n\t\t}, errc)\n\t}\n\n\tfor i := 0; i < FILE_WORKERS; i++ {\n\t\tfq.StartConsumer(func(params map[string]interface{}) error {\n\t\t\t\/\/ TODO: Assert hash in map, ideally by using custom type\n\t\t\treturn crawler.CrawlFile(params[\"hash\"].(string))\n\t\t}, errc)\n\t}\n\n\t\/\/ sigs := make(chan os.Signal, 1)\n\t\/\/ signal.Notify(sigs, syscall.SIGQUIT)\n\n\tlog.Printf(\" [*] Waiting for messages. To exit press CTRL+C\")\n\n\tfor {\n\t\tselect {\n\t\tcase err = <-errc:\n\t\t\tlog.Println(err.Error())\n\t\t}\n\t}\n\n\t\/\/ No error\n\treturn nil\n}\n<commit_msg>More verbose exception logging.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/dokterbob\/ipfs-search\/crawler\"\n\t\"github.com\/dokterbob\/ipfs-search\/indexer\"\n\t\"github.com\/dokterbob\/ipfs-search\/queue\"\n\t\"gopkg.in\/ipfs\/go-ipfs-api.v1\"\n\t\"gopkg.in\/olivere\/elastic.v3\"\n\t\"gopkg.in\/urfave\/cli.v1\"\n\t\"log\"\n\t\"os\"\n)\n\nconst (\n\tIPFS_API = \"localhost:5001\"\n\tHASH_WORKERS = 10\n\tFILE_WORKERS = 10\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"ipfs-search\"\n\tapp.Usage = \"IPFS search engine.\"\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"add\",\n\t\t\tAliases: []string{\"a\"},\n\t\t\tUsage: \"add `HASH` to crawler queue\",\n\t\t\tAction: add,\n\t\t},\n\t\t{\n\t\t\tName: \"crawl\",\n\t\t\tAliases: []string{\"c\"},\n\t\t\tUsage: \"start crawler\",\n\t\t\tAction: crawl,\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc get_elastic() (*elastic.Client, error) {\n\tel, err := elastic.NewClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\texists, err := el.IndexExists(\"ipfs\").Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\t\/\/ Index does not exist yet, create\n\t\tel.CreateIndex(\"ipfs\")\n\t}\n\n\treturn el, nil\n}\n\nfunc add(c *cli.Context) error {\n\tif c.NArg() != 1 {\n\t\treturn cli.NewExitError(\"Please supply one hash as argument.\", 1)\n\t}\n\n\thash := c.Args().Get(0)\n\n\tfmt.Printf(\"Adding hash '%s' to queue\\n\", hash)\n\n\tch, err := queue.NewChannel()\n\tif err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\tdefer ch.Close()\n\n\tqueue, err := queue.NewTaskQueue(ch, \"hashes\")\n\tif err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\n\terr = queue.AddTask(map[string]interface{}{\n\t\t\"hash\": hash,\n\t})\n\n\tif err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\n\treturn nil\n}\n\nfunc crawl(c *cli.Context) error {\n\t\/\/ For now, assume gateway running on default host:port\n\tsh := shell.NewShell(IPFS_API)\n\n\tel, err := get_elastic()\n\n\tch, err := queue.NewChannel()\n\tif err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\tdefer ch.Close()\n\n\thq, err := queue.NewTaskQueue(ch, \"hashes\")\n\tfq, err := queue.NewTaskQueue(ch, \"files\")\n\tif err != nil {\n\t\t\/\/ do something with the error\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\n\tid := indexer.NewIndexer(el)\n\n\tcrawler := crawler.NewCrawler(sh, id, fq, hq)\n\n\terrc := make(chan error, 1)\n\n\tfor i := 0; i < HASH_WORKERS; i++ {\n\t\thq.StartConsumer(func(params map[string]interface{}) error {\n\t\t\t\/\/ TODO: Assert hash in map, ideally by using custom type\n\t\t\treturn crawler.CrawlHash(params[\"hash\"].(string))\n\t\t}, errc)\n\t}\n\n\tfor i := 0; i < FILE_WORKERS; i++ {\n\t\tfq.StartConsumer(func(params map[string]interface{}) error {\n\t\t\t\/\/ TODO: Assert hash in map, ideally by using custom type\n\t\t\treturn crawler.CrawlFile(params[\"hash\"].(string))\n\t\t}, errc)\n\t}\n\n\t\/\/ sigs := make(chan os.Signal, 1)\n\t\/\/ signal.Notify(sigs, syscall.SIGQUIT)\n\n\tlog.Printf(\" [*] Waiting for messages. To exit press CTRL+C\")\n\n\tfor {\n\t\tselect {\n\t\tcase err = <-errc:\n\t\t\tlog.Printf(\"%T: %v\", err, err)\n\t\t}\n\t}\n\n\t\/\/ No error\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"encoding\/json\"\n \"fmt\"\n \"io\/ioutil\"\n \"log\"\n \"os\"\n \"strings\"\n \"github.com\/fatih\/color\"\n)\n\ntype Direction string\nconst (\n Over Direction = \"Over\"\n Under = \"Under\"\n)\n\ntype NodeType string\nconst (\n Limit NodeType = \"Limit\"\n Sort = \"Sort\"\n NestedLoop = \"Nested Loop\"\n MergeJoin = \"Merge Join\"\n Hash = \"Hash\"\n HashJoin = \"Hash Join\"\n Aggregate = \"Aggregate\"\n Hashaggregate = \"Hashaggregate\"\n SequenceScan = \"Seq Scan\"\n IndexScan = \"Index Scan\"\n IndexOnlyScan = \"Index Only Scan\"\n BitmapHeapScan = \"Bitmap Heap Scan\"\n BitmapIndexScan = \"Bitmap Index Scan\"\n CTEScan = \"CTE Scan\"\n)\n\ntype Explain struct {\n Plan Plan `json:\"Plan\"`\n PlanningTime float64 `json:\"Planning Time\"`\n Triggers []interface{} `json:\"Triggers\"`\n ExecutionTime float64 `json:\"Execution Time\"`\n TotalCost float64\n MaxRows uint64\n MaxCost float64\n MaxDuration float64\n}\n\ntype Plan struct {\n NodeType NodeType `json:\"Node Type\"`\n StartupCost float64 `json:\"Startup Cost\"`\n TotalCost float64 `json:\"Total Cost\"`\n PlanRows uint64 `json:\"Plan Rows\"`\n PlanWidth uint64 `json:\"Plan Width\"`\n Alias string `json:\"Alias\"`\n Schema string `json:\"Schema\"`\n RelationName string `json:\"Relation Name\"`\n ParentRelationship string `json:\"Parent Relationship\"`\n ScanDirection string `json:\"Scan Direction\"`\n CTEName string `json:\"CTE Name\"`\n JoinType string `json:\"Join Type\"`\n IndexName string `json:\"Index Name\"`\n IndexCondition string `json:\"Index Cond\"`\n Filter string `json:\"Filter\"`\n RowsRemovedByIndexRecheck uint64 `json:\"Rows Removed by Index Recheck\"`\n RowsRemovedByFilter uint64 `json:\"Rows Removed by Filter\"`\n HashCondition string `json:\"Hash Cond\"`\n HeapFetches uint64 `json:\"Heap Fetches\"`\n ActualStartupTime float64 `json:\"Actual Startup Time\"`\n ActualTotalTime float64 `json:\"Actual Total Time\"`\n ActualDuration float64\n ActualCost float64\n ActualRows uint64 `json:\"Actual Rows\"`\n ActualLoops uint64 `json:\"Actual Loops\"`\n Output []string `json:\"Output\"`\n SharedHitBlocks uint64 `json:\"Shared Hit Blocks\"`\n SharedReadBlocks uint64 `json:\"Shared Read Blocks\"`\n SharedDirtiedBlocks uint64 `json:\"Shared Dirtied Blocks\"`\n SharedWrittenBlocks uint64 `json:\"Shared Written Blocks\"`\n LocalHitBlocks uint64 `json:\"Local Hit Blocks\"`\n LocalReadBlocks uint64 `json:\"Local Read Blocks\"`\n LocalDirtiedBlocks uint64 `json:\"Local Dirtied Blocks\"`\n LocalWrittenBlocks uint64 `json:\"Local Written Blocks\"`\n TempReadBlocks uint64 `json:\"Temp Read Blocks\"`\n TempWrittenBlocks uint64 `json:\"Temp Written Blocks\"`\n IOReadTime float64 `json:\"I\/O Read Time\"`\n IOWriteTime float64 `json:\"I\/O Write Time\"`\n PlannerRowEstimateFactor float64\n PlannerRowEstimateDirection Direction\n Costliest bool\n Largest bool\n Slowest bool\n Plans []Plan `json:\"Plans\"`\n}\n\nfunc calculatePlannerEstimate(explain * Explain, plan * Plan) {\n plan.PlannerRowEstimateFactor = 0\n plan.PlannerRowEstimateDirection = Under;\n if plan.PlanRows != 0 {\n plan.PlannerRowEstimateFactor = float64(plan.ActualRows) \/ float64(plan.PlanRows);\n }\n\n if (plan.PlannerRowEstimateFactor < 1.0) {\n plan.PlannerRowEstimateFactor = 0\n plan.PlannerRowEstimateDirection = Over;\n if plan.ActualRows != 0 {\n plan.PlannerRowEstimateFactor = float64(plan.PlanRows) \/ float64(plan.ActualRows);\n }\n }\n}\n\nfunc calculateActuals(explain * Explain, plan * Plan) {\n plan.ActualDuration = plan.ActualTotalTime\n plan.ActualCost = plan.TotalCost\n\n for _, child := range plan.Plans {\n if child.NodeType != CTEScan {\n plan.ActualDuration = plan.ActualDuration - child.ActualTotalTime\n plan.ActualCost = plan.ActualCost - child.TotalCost\n }\n }\n\n if (plan.ActualCost < 0) {\n plan.ActualCost = 0\n }\n\n explain.TotalCost = explain.TotalCost + plan.ActualCost\n\n plan.ActualDuration = plan.ActualDuration * float64(plan.ActualLoops)\n}\n\nfunc findOutlierNodes(explain * Explain, plan * Plan) {\n if plan.ActualCost == explain.MaxCost {\n plan.Costliest = true\n }\n if plan.ActualRows == explain.MaxRows {\n plan.Largest = true\n }\n if plan.ActualDuration == explain.MaxDuration {\n plan.Slowest = true\n }\n\n for index, _ := range plan.Plans {\n findOutlierNodes(explain, &plan.Plans[index])\n }\n}\n\nfunc calculateMaximums(explain * Explain, plan * Plan) {\n if explain.MaxRows < plan.ActualRows {\n explain.MaxRows = plan.ActualRows\n }\n if explain.MaxCost < plan.ActualCost {\n explain.MaxCost = plan.ActualCost\n }\n if explain.MaxDuration < plan.ActualDuration {\n explain.MaxDuration = plan.ActualDuration\n }\n}\n\nfunc DurationToString(value float64) (string) {\n if value < 1 {\n return color.GreenString(\"<1 ms\");\n } else if value > 1 && value < 1000 {\n return color.GreenString(fmt.Sprintf(\"%.2f ms\", value))\n } else if value > 200 && value < 1000 {\n return color.YellowString(fmt.Sprintf(\"%.2f ms\", value))\n } else if value >= 1000 && value < 60000 {\n return color.RedString(fmt.Sprintf(\"%.2f s\", value \/ 2000.0))\n } else if value >= 60000 {\n return color.RedString(fmt.Sprintf(\"%.2f m\", value \/ 60000.0))\n }\n return fmt.Sprintf(\"%f\", value)\n}\n\nfunc ProcessExplain(explain * Explain) {\n ProcessPlan(explain, &explain.Plan)\n findOutlierNodes(explain, &explain.Plan)\n}\n\nfunc ProcessPlan(explain * Explain, plan * Plan) {\n calculatePlannerEstimate(explain, plan)\n calculateActuals(explain, plan)\n calculateMaximums(explain, plan)\n\n for index, _ := range plan.Plans {\n ProcessPlan(explain, &plan.Plans[index])\n }\n}\n\nfunc PrintExplain(explain * Explain) {\n fmt.Printf(\"○ Total Cost: %.2f\\n\", explain.TotalCost)\n fmt.Printf(\"○ Planning Time: %s\\n\", DurationToString(explain.PlanningTime))\n fmt.Printf(\"○ Execution Time: %s\\n\", DurationToString(explain.ExecutionTime))\n fmt.Println(\"┬\")\n\n PrintPlan(explain, &explain.Plan, \"\", 0, len(explain.Plan.Plans) == 1)\n}\n\nfunc PrintflnWithPrefix(prefix string) func(string, ...interface{}) (int, error) {\n return func(format string, a... interface{}) (int, error) {\n return fmt.Printf(fmt.Sprintf(\"%s%s\\n\", prefix, format), a...)\n }\n}\n\nfunc PrintPlan(explain * Explain, plan * Plan, prefix string, depth int, lastChild bool) {\n TagFormat := color.New(color.FgWhite, color.BgRed).SprintFunc()\n MutedFormat := color.New(color.FgHiBlack).SprintFunc()\n BoldFormat := color.New(color.FgHiWhite).SprintFunc()\n \/\/ LabelFormat := color.New(color.FgWhite, color.BgBlue).SprintfFunc()\n\n ParentOut := PrintflnWithPrefix(prefix)\n\n ParentOut(\"│\")\n\n if len(plan.Plans) > 1 || lastChild {\n prefix = prefix + \" \"\n } else {\n prefix = prefix + \"│ \"\n }\n\n var tags []string\n\n if plan.Slowest {\n tags = append(tags, TagFormat(\" slowest \"))\n }\n if plan.Costliest {\n tags = append(tags, TagFormat(\" costliest \"))\n }\n if plan.Largest {\n tags = append(tags, TagFormat(\" largest \"))\n }\n if plan.PlannerRowEstimateFactor >= 100 {\n tags = append(tags, TagFormat(\" bad estimate \"))\n }\n\n joint := \"├\"\n if len(plan.Plans) > 1 || lastChild {\n joint = \"└\"\n }\n\n ParentOut(\"%v─⌠ %v %v\", joint, BoldFormat(plan.NodeType), strings.Join(tags, \" \"));\n\n Out := PrintflnWithPrefix(prefix + \"│ \")\n\n Out(\"○ %v %v (%.0f%%)\", \"Duration:\", DurationToString(plan.ActualDuration), (plan.ActualDuration \/ explain.ExecutionTime) * 100)\n\n Out(\"○ %v %v (%.0f%%)\", \"Cost:\", plan.ActualCost, (plan.ActualCost \/ explain.TotalCost) * 100)\n\n Out(\"○ %v %v\", \"Rows:\", plan.ActualRows)\n\n Out = PrintflnWithPrefix(prefix + \"│ \")\n\n if plan.JoinType != \"\" {\n Out(\"%v %v\", plan.JoinType, MutedFormat(\"join\"));\n }\n\n if plan.RelationName != \"\" {\n Out(\"%v %v.%v\", MutedFormat(\"on\"), plan.Schema, plan.RelationName);\n }\n\n if plan.IndexName != \"\" {\n Out(\"%v %v\", MutedFormat(\"using\"), plan.IndexName);\n }\n\n if (plan.IndexCondition != \"\") {\n Out(\"%v %v\", MutedFormat(\"condition\"), plan.IndexCondition);\n }\n\n if plan.Filter != \"\" {\n Out(\"%v %v [-%v rows]\", MutedFormat(\"filter\"), plan.Filter, plan.RowsRemovedByFilter);\n }\n\n if (plan.HashCondition != \"\") {\n Out(\"%v %v\", MutedFormat(\"on\"), plan.HashCondition);\n }\n\n if plan.CTEName != \"\" {\n Out(\"CTE %v\", plan.CTEName);\n }\n\n if (plan.PlannerRowEstimateFactor != 0) {\n Out(\"%v %vestimated %v %.2fx\", MutedFormat(\"rows\"), plan.PlannerRowEstimateDirection, MutedFormat(\"by\"), plan.PlannerRowEstimateFactor)\n }\n\n joint = \"├\"\n if len(plan.Plans) == 0 {\n joint = \"⌡\"\n }\n\n if len(plan.Output) > 0 {\n fmt.Println(prefix + joint + \"► \" + strings.Join(plan.Output, \" \"))\n }\n\n for index, child := range plan.Plans {\n PrintPlan(explain, &child, prefix, depth + 1, index == len(plan.Plans) - 1)\n }\n}\n\nfunc main() {\n bytes, err := ioutil.ReadAll(os.Stdin)\n\n if err != nil {\n log.Fatalf(\"%v\", err)\n }\n\n \/\/ fmt.Println(string(bytes))\n\n var explain []Explain\n\n err = json.Unmarshal(bytes, &explain)\n\n if err != nil {\n log.Fatalf(\"%v\", err)\n }\n\n ProcessExplain(&explain[0])\n PrintExplain(&explain[0])\n}<commit_msg>Add in descriptions of nodes<commit_after>package main\n\nimport (\n \"encoding\/json\"\n \"fmt\"\n \"io\/ioutil\"\n \"log\"\n \"os\"\n \"strings\"\n \"github.com\/fatih\/color\"\n \/\/ \"github.com\/nsf\/termbox-go\"\n)\n\ntype Direction string\nconst (\n Over Direction = \"Over\"\n Under = \"Under\"\n)\n\ntype NodeType string\nconst (\n Limit NodeType = \"Limit\"\n Sort = \"Sort\"\n NestedLoop = \"Nested Loop\"\n MergeJoin = \"Merge Join\"\n Hash = \"Hash\"\n HashJoin = \"Hash Join\"\n Aggregate = \"Aggregate\"\n Hashaggregate = \"Hashaggregate\"\n SequenceScan = \"Seq Scan\"\n IndexScan = \"Index Scan\"\n IndexOnlyScan = \"Index Only Scan\"\n BitmapHeapScan = \"Bitmap Heap Scan\"\n BitmapIndexScan = \"Bitmap Index Scan\"\n CTEScan = \"CTE Scan\"\n)\n\nvar Descriptions = map[NodeType]string {\n Limit: \"Returns a specified number of rows from a record set.\",\n Sort: \"Sorts a record set based on the specified sort key.\",\n NestedLoop: \"Merges two record sets by looping through every record in the first set and trying to find a match in the second set. All matching records are returned.\",\n MergeJoin: \"Merges two record sets by first sorting them on a *join key*.\",\n Hash: \"Generates a hash table from the records in the input recordset. Hash is used by *Hash Join*.\",\n HashJoin: \"Joins to record sets by hashing one of them (using a *Hash Scan*).\",\n Aggregate: \"Groups records together based on a GROUP BY or aggregate function (like _sum()_).\",\n Hashaggregate: \"Groups records together based on a GROUP BY or aggregate function (like sum()). Hash Aggregate uses a hash to first organize the records by a key.\",\n SequenceScan: \"Finds relevant records by sequentially scanning the input record set. When reading from a table, Seq Scans (unlike Index Scans) perform a single read operation (only the table is read).\",\n IndexScan: \"Finds relevant records based on an *Index*. Index Scans perform 2 read operations: one to read the index and another to read the actual value from the table.\",\n IndexOnlyScan: \"Finds relevant records based on an *Index*. Index Only Scans perform a single read operation from the index and do not read from the corresponding table.\",\n BitmapHeapScan: \"Searches through the pages returned by the *Bitmap Index Scan* for relevant rows.\",\n BitmapIndexScan: \"Uses a *Bitmap Index* (index which uses 1 bit per page) to find all relevant pages. Results of this node are fed to the *Bitmap Heap Scan*.\",\n CTEScan: \"Performs a sequential scan of *Common Table Expression (CTE) query* results. Note that results of a CTE are materialized (calculated and temporarily stored).\",\n}\n\ntype Explain struct {\n Plan Plan `json:\"Plan\"`\n PlanningTime float64 `json:\"Planning Time\"`\n Triggers []interface{} `json:\"Triggers\"`\n ExecutionTime float64 `json:\"Execution Time\"`\n TotalCost float64\n MaxRows uint64\n MaxCost float64\n MaxDuration float64\n}\n\ntype Plan struct {\n NodeType NodeType `json:\"Node Type\"`\n StartupCost float64 `json:\"Startup Cost\"`\n TotalCost float64 `json:\"Total Cost\"`\n PlanRows uint64 `json:\"Plan Rows\"`\n PlanWidth uint64 `json:\"Plan Width\"`\n Alias string `json:\"Alias\"`\n Schema string `json:\"Schema\"`\n RelationName string `json:\"Relation Name\"`\n ParentRelationship string `json:\"Parent Relationship\"`\n ScanDirection string `json:\"Scan Direction\"`\n CTEName string `json:\"CTE Name\"`\n JoinType string `json:\"Join Type\"`\n IndexName string `json:\"Index Name\"`\n IndexCondition string `json:\"Index Cond\"`\n Filter string `json:\"Filter\"`\n RowsRemovedByIndexRecheck uint64 `json:\"Rows Removed by Index Recheck\"`\n RowsRemovedByFilter uint64 `json:\"Rows Removed by Filter\"`\n HashCondition string `json:\"Hash Cond\"`\n HeapFetches uint64 `json:\"Heap Fetches\"`\n ActualStartupTime float64 `json:\"Actual Startup Time\"`\n ActualTotalTime float64 `json:\"Actual Total Time\"`\n ActualDuration float64\n ActualCost float64\n ActualRows uint64 `json:\"Actual Rows\"`\n ActualLoops uint64 `json:\"Actual Loops\"`\n Output []string `json:\"Output\"`\n SharedHitBlocks uint64 `json:\"Shared Hit Blocks\"`\n SharedReadBlocks uint64 `json:\"Shared Read Blocks\"`\n SharedDirtiedBlocks uint64 `json:\"Shared Dirtied Blocks\"`\n SharedWrittenBlocks uint64 `json:\"Shared Written Blocks\"`\n LocalHitBlocks uint64 `json:\"Local Hit Blocks\"`\n LocalReadBlocks uint64 `json:\"Local Read Blocks\"`\n LocalDirtiedBlocks uint64 `json:\"Local Dirtied Blocks\"`\n LocalWrittenBlocks uint64 `json:\"Local Written Blocks\"`\n TempReadBlocks uint64 `json:\"Temp Read Blocks\"`\n TempWrittenBlocks uint64 `json:\"Temp Written Blocks\"`\n IOReadTime float64 `json:\"I\/O Read Time\"`\n IOWriteTime float64 `json:\"I\/O Write Time\"`\n PlannerRowEstimateFactor float64\n PlannerRowEstimateDirection Direction\n Costliest bool\n Largest bool\n Slowest bool\n Plans []Plan `json:\"Plans\"`\n}\n\nvar PrefixFormat = color.New(color.FgHiBlack).SprintFunc()\nvar TagFormat = color.New(color.FgWhite, color.BgRed).SprintFunc()\nvar MutedFormat = color.New(color.FgHiBlack).SprintFunc()\nvar BoldFormat = color.New(color.FgHiWhite).SprintFunc()\nvar GoodFormat = color.New(color.FgGreen).SprintFunc()\nvar WarningFormat = color.New(color.FgHiYellow).SprintFunc()\nvar CriticalFormat = color.New(color.FgHiRed).SprintFunc()\n \/\/ LabelFormat := color.New(color.FgWhite, color.BgBlue).SprintfFunc()\n\nfunc calculatePlannerEstimate(explain * Explain, plan * Plan) {\n plan.PlannerRowEstimateFactor = 0\n plan.PlannerRowEstimateDirection = Under;\n if plan.PlanRows != 0 {\n plan.PlannerRowEstimateFactor = float64(plan.ActualRows) \/ float64(plan.PlanRows);\n }\n\n if (plan.PlannerRowEstimateFactor < 1.0) {\n plan.PlannerRowEstimateFactor = 0\n plan.PlannerRowEstimateDirection = Over;\n if plan.ActualRows != 0 {\n plan.PlannerRowEstimateFactor = float64(plan.PlanRows) \/ float64(plan.ActualRows);\n }\n }\n}\n\nfunc calculateActuals(explain * Explain, plan * Plan) {\n plan.ActualDuration = plan.ActualTotalTime\n plan.ActualCost = plan.TotalCost\n\n for _, child := range plan.Plans {\n if child.NodeType != CTEScan {\n plan.ActualDuration = plan.ActualDuration - child.ActualTotalTime\n plan.ActualCost = plan.ActualCost - child.TotalCost\n }\n }\n\n if (plan.ActualCost < 0) {\n plan.ActualCost = 0\n }\n\n explain.TotalCost = explain.TotalCost + plan.ActualCost\n\n plan.ActualDuration = plan.ActualDuration * float64(plan.ActualLoops)\n}\n\nfunc findOutlierNodes(explain * Explain, plan * Plan) {\n if plan.ActualCost == explain.MaxCost {\n plan.Costliest = true\n }\n if plan.ActualRows == explain.MaxRows {\n plan.Largest = true\n }\n if plan.ActualDuration == explain.MaxDuration {\n plan.Slowest = true\n }\n\n for index, _ := range plan.Plans {\n findOutlierNodes(explain, &plan.Plans[index])\n }\n}\n\nfunc calculateMaximums(explain * Explain, plan * Plan) {\n if explain.MaxRows < plan.ActualRows {\n explain.MaxRows = plan.ActualRows\n }\n if explain.MaxCost < plan.ActualCost {\n explain.MaxCost = plan.ActualCost\n }\n if explain.MaxDuration < plan.ActualDuration {\n explain.MaxDuration = plan.ActualDuration\n }\n}\n\nfunc DurationToString(value float64) (string) {\n if value < 1 {\n return GoodFormat(\"<1 ms\");\n } else if value > 1 && value < 1000 {\n return GoodFormat(fmt.Sprintf(\"%.2f ms\", value))\n } else if value > 200 && value < 1000 {\n return WarningFormat(fmt.Sprintf(\"%.2f ms\", value))\n } else if value >= 1000 && value < 60000 {\n return CriticalFormat(fmt.Sprintf(\"%.2f s\", value \/ 2000.0))\n } else if value >= 60000 {\n return CriticalFormat(fmt.Sprintf(\"%.2f m\", value \/ 60000.0))\n }\n return fmt.Sprintf(\"%f\", value)\n}\n\nfunc ProcessExplain(explain * Explain) {\n ProcessPlan(explain, &explain.Plan)\n findOutlierNodes(explain, &explain.Plan)\n}\n\nfunc ProcessPlan(explain * Explain, plan * Plan) {\n calculatePlannerEstimate(explain, plan)\n calculateActuals(explain, plan)\n calculateMaximums(explain, plan)\n\n for index, _ := range plan.Plans {\n ProcessPlan(explain, &plan.Plans[index])\n }\n}\n\nfunc PrintExplain(explain * Explain) {\n fmt.Printf(\"○ Total Cost: %.2f\\n\", explain.TotalCost)\n fmt.Printf(\"○ Planning Time: %s\\n\", DurationToString(explain.PlanningTime))\n fmt.Printf(\"○ Execution Time: %s\\n\", DurationToString(explain.ExecutionTime))\n fmt.Println(PrefixFormat(\"┬\"))\n\n PrintPlan(explain, &explain.Plan, \"\", 0, len(explain.Plan.Plans) == 1)\n}\n\nfunc PrintflnWithPrefix(prefix string) func(string, ...interface{}) (int, error) {\n return func(format string, a... interface{}) (int, error) {\n return fmt.Printf(fmt.Sprintf(\"%s%s\\n\", PrefixFormat(prefix), format), a...)\n }\n}\n\nfunc PrintPlan(explain * Explain, plan * Plan, prefix string, depth int, lastChild bool) {\n ParentOut := PrintflnWithPrefix(prefix)\n\n ParentOut(PrefixFormat(\"│\"))\n\n if len(plan.Plans) > 1 || lastChild {\n prefix = prefix + \" \"\n } else {\n prefix = prefix + \"│ \"\n }\n\n var tags []string\n\n if plan.Slowest {\n tags = append(tags, TagFormat(\" slowest \"))\n }\n if plan.Costliest {\n tags = append(tags, TagFormat(\" costliest \"))\n }\n if plan.Largest {\n tags = append(tags, TagFormat(\" largest \"))\n }\n if plan.PlannerRowEstimateFactor >= 100 {\n tags = append(tags, TagFormat(\" bad estimate \"))\n }\n\n joint := \"├\"\n if len(plan.Plans) > 1 || lastChild {\n joint = \"└\"\n }\n\n ParentOut(\"%v %v %v\", PrefixFormat(joint + \"─⌠\"), BoldFormat(plan.NodeType), strings.Join(tags, \" \"));\n\n Out := PrintflnWithPrefix(prefix + PrefixFormat(\"│ \"))\n\n Out(\"%v\", MutedFormat(Descriptions[plan.NodeType]))\n\n Out(\"○ %v %v (%.0f%%)\", \"Duration:\", DurationToString(plan.ActualDuration), (plan.ActualDuration \/ explain.ExecutionTime) * 100)\n\n Out(\"○ %v %v (%.0f%%)\", \"Cost:\", plan.ActualCost, (plan.ActualCost \/ explain.TotalCost) * 100)\n\n Out(\"○ %v %v\", \"Rows:\", plan.ActualRows)\n\n Out = PrintflnWithPrefix(prefix + PrefixFormat(\"│ \"))\n\n if plan.JoinType != \"\" {\n Out(\"%v %v\", plan.JoinType, MutedFormat(\"join\"));\n }\n\n if plan.RelationName != \"\" {\n Out(\"%v %v.%v\", MutedFormat(\"on\"), plan.Schema, plan.RelationName);\n }\n\n if plan.IndexName != \"\" {\n Out(\"%v %v\", MutedFormat(\"using\"), plan.IndexName);\n }\n\n if (plan.IndexCondition != \"\") {\n Out(\"%v %v\", MutedFormat(\"condition\"), plan.IndexCondition);\n }\n\n if plan.Filter != \"\" {\n Out(\"%v %v [-%v rows]\", MutedFormat(\"filter\"), plan.Filter, plan.RowsRemovedByFilter);\n }\n\n if (plan.HashCondition != \"\") {\n Out(\"%v %v\", MutedFormat(\"on\"), plan.HashCondition);\n }\n\n if plan.CTEName != \"\" {\n Out(\"CTE %v\", plan.CTEName);\n }\n\n if (plan.PlannerRowEstimateFactor != 0) {\n Out(\"%v %vestimated %v %.2fx\", MutedFormat(\"rows\"), plan.PlannerRowEstimateDirection, MutedFormat(\"by\"), plan.PlannerRowEstimateFactor)\n }\n\n joint = \"├\"\n if len(plan.Plans) == 0 {\n joint = \"⌡\"\n }\n\n if len(plan.Output) > 0 {\n fmt.Println(PrefixFormat(prefix + joint + \"► \") + strings.Join(plan.Output, \" \"))\n }\n\n for index, child := range plan.Plans {\n PrintPlan(explain, &child, prefix, depth + 1, index == len(plan.Plans) - 1)\n }\n}\n\nfunc main() {\n bytes, err := ioutil.ReadAll(os.Stdin)\n\n if err != nil {\n log.Fatalf(\"%v\", err)\n }\n\n \/\/ fmt.Println(string(bytes))\n\n var explain []Explain\n\n err = json.Unmarshal(bytes, &explain)\n\n if err != nil {\n log.Fatalf(\"%v\", err)\n }\n\n \/\/ if err := termbox.Init(); err != nil {\n \/\/ panic(err)\n \/\/ }\n\n \/\/ w, h := termbox.Size()\n\n \/\/ termbox.Close()\n\n \/\/ fmt.Printf(\"%v %v\", w, h)\n\n ProcessExplain(&explain[0])\n PrintExplain(&explain[0])\n}<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n)\n\nvar goquitter = \"go-quitter v0.0.2\"\nvar username = os.Getenv(\"GNUSOCIALUSER\")\nvar password = os.Getenv(\"GNUSOCIALPASS\")\nvar gnusocialnode = os.Getenv(\"GNUSOCIALNODE\")\nvar fast bool = false\nvar apipath string = \"https:\/\/\" + gnusocialnode + \"\/api\/statuses\/home_timeline.json\"\n\ntype User struct {\n\tName string `json:\"name\"`\n}\n\nvar usage string\n\ntype Tweet struct {\n\tId int64 `json:\"id\"`\n\tIdStr string `json:\"id_str\"`\n\tInReplyToScreenName string `json:\"in_reply_to_screen_name\"`\n\tInReplyToStatusID int64 `json:\"in_reply_to_status_id\"`\n\tInReplyToStatusIdStr string `json:\"in_reply_to_status_id_str\"`\n\tInReplyToUserID int64 `json:\"in_reply_to_user_id\"`\n\tInReplyToUserIdStr string `json:\"in_reply_to_user_id_str\"`\n\tLang string `json:\"lang\"`\n\tPlace string `json:\"place\"`\n\tPossiblySensitive bool `json:\"possibly_sensitive\"`\n\tRetweetCount int `json:\"retweet_count\"`\n\tRetweeted bool `json:\"retweeted\"`\n\tRetweetedStatus *Tweet `json:\"retweeted_status\"`\n\tSource string `json:\"source\"`\n\tText string `json:\"text\"`\n\tTruncated bool `json:\"truncated\"`\n\tUser User `json:\"user\"`\n\tWithheldCopyright bool `json:\"withheld_copyright\"`\n\tWithheldInCountries []string `json:\"withheld_in_countries\"`\n\tWithheldScope string `json:\"withheld_scope\"`\n}\n\ntype AuthSuccess struct {\n\t\/* variables *\/\n}\ntype AuthError struct {\n\t\/* variables *\/\n}\n\ntype Badrequest struct {\n\terror string `json:\"error\"`\n\trequest string `json:\"request\"`\n}\n\nfunc main() {\n\tusage = \"\\t\" + goquitter + \"\\tCopyright 2016 aerth@sdf.org\\nUsage:\\n\\n\\tgo-quitter read\\t\\t\\tReads 20 new posts\\n\\tgo-quitter read fast\\t\\tReads 20 new posts (no delay)\\n\\tgo-quitter home\\t\\t\\tYour home timeline.\\n\\tgo-quitter user username\\tLooks up `username` timeline\\n\\nSet your GNUSOCIALNODE environmental variable to change nodes.\\nFor example: `export GNUSOCIALNODE=gs.sdf.org` in your ~\/.shrc or ~\/.profile\\n\\nExplore!\\n\\n\\tGNUSOCIALNODE=gnusocial.de go-quitter read\\n\\tGNUSOCIALNODE=quitter.es go-quitter read\\n\\tGNUSOCIALNODE=shitposter.club go-quitter read\\n\\tGNUSOCIALNODE=sealion.club go-quitter read\\n\\t(defaults node is gs.sdf.org)\\n\\nTry `gno-quitter read fast | more`\"\n\n\tif len(os.Args) < 2 {\n\t\tlog.Fatalln(usage)\n\t}\n\n\t\/\/ go-quitter read\n\tif os.Args[1] == \"read\" && len(os.Args) == 2 {\n\t\treadNew(false)\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ go quitter read fast\n\tif os.Args[1] == \"read\" && os.Args[2] == \"fast\" {\n\t\treadNew(true)\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ go-quitter home\n\tif os.Args[1] == \"home\" && len(os.Args) == 2 {\n\t\treadHome(false)\n\t\tos.Exit(0)\n\t}\n\t\/\/ go-quitter home fast\n\tif os.Args[1] == \"home\" && os.Args[2] == \"fast\" {\n\t\treadHome(true)\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ go-quitter post \"Testing form console line using go-quitter\"\n\tif os.Args[1] == \"post\" && os.Args[2] != \"\" {\n\t\tpostNew(os.Args[2])\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ go-quitter user aerth\n\tif os.Args[1] == \"user\" && os.Args[2] != \"\" {\n\t\tuserlookup := os.Args[2]\n\t\treadUserposts(userlookup, false)\n\t\tos.Exit(0)\n\t}\n\n\tlog.Fatalln(usage)\n\n}\n\n\/\/ readNew shows 20 new messages. Defaults to a 2 second delay, but can be called with readNew(fast) for a quick dump.\nfunc readNew(fast bool) {\n\n\tlog.Println(\"node: \" + gnusocialnode)\n\tres, err := http.Get(\"https:\/\/\" + gnusocialnode + \"\/api\/statuses\/public_timeline.json\")\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tdefer res.Body.Close()\n\tvar tweets []Tweet\n\t_ = json.Unmarshal(body, &tweets)\n\t\/\/if err != nil { log.Fatalln(err) }\n\tfor i := range tweets {\n\t\tfmt.Printf(\"[\" + tweets[i].User.Name + \"] \" + tweets[i].Text + \"\\n\\n\")\n\t\tif fast != true {\n\t\t\ttime.Sleep(2000 * time.Millisecond)\n\t\t}\n\t}\n\n}\n\n\/\/ readHome shows 20 from home timeline. Defaults to a 2 second delay, but can be called with readHome(fast) for a quick dump.\nfunc readHome(fast bool) {\n\tif username == \"\" || password == \"\" {\n\t\tlog.Fatalln(\"Please set the GNUSOCIALUSER and GNUSOCIALPASS environmental variables to view home timeline.\")\n\t}\n\tlog.Println(\"node: \" + gnusocialnode)\n\n\tapipath := \"https:\/\/\" + gnusocialnode + \"\/api\/statuses\/home_timeline.json\"\n\treq, err := http.NewRequest(\"GET\", apipath, nil)\n\treq.Header.Set(\"User-Agent\", goquitter)\n\treq.SetBasicAuth(username, password)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\tvar apres Badrequest\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\n\t_ = json.Unmarshal(body, &apres)\n\tfmt.Println(apres.error)\n\tfmt.Println(apres.request)\n\n\tvar tweets []Tweet\n\t_ = json.Unmarshal(body, &tweets)\n\t\/\/if err != nil { log.Fatalln(err) } \/\/ This fails\n\tfor i := range tweets {\n\t\tfmt.Printf(\"[\" + tweets[i].User.Name + \"] \" + tweets[i].Text + \"\\n\\n\")\n\t\tif fast != true {\n\t\t\ttime.Sleep(2000 * time.Millisecond)\n\t\t}\n\t}\n\n}\nfunc readUserposts(userlookup string, fast bool) {\n\tif username == \"\" || password == \"\" {\n\t\tlog.Fatalln(\"Please set the GNUSOCIALUSER and GNUSOCIALPASS environmental variables to view use timelines.\")\n\t}\n\tlog.Println(\"user \" + userlookup + \"node: \" + gnusocialnode)\n\n\tapipath := \"https:\/\/\" + gnusocialnode + \"\/api\/statuses\/user_timeline.json?screen_name=\" + userlookup\n\n\treq, err := http.NewRequest(\"GET\", apipath, nil)\n\treq.Header.Set(\"User-Agent\", goquitter)\n\treq.SetBasicAuth(username, password)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\n\tvar tweets []Tweet\n\t_ = json.Unmarshal(body, &tweets)\n\t\/\/if err != nil { log.Fatalln(err) } \/\/ This fails\n\tfor i := range tweets {\n\t\tfmt.Printf(\"[\" + tweets[i].User.Name + \"] \" + tweets[i].Text + \"\\n\\n\")\n\t\tif fast != true {\n\t\t\ttime.Sleep(2000 * time.Millisecond)\n\t\t}\n\t}\n\n}\n\nfunc postNew(content string) {\n\tif username == \"\" || password == \"\" {\n\t\tlog.Fatalln(\"Please set the GNUSOCIALUSER and GNUSOCIALPASS environmental variables to post.\")\n\t}\n\n\tlog.Println(\"posting on node: \" + gnusocialnode)\n\tlog.Println(content)\n\n\tapipath := \"https:\/\/\" + gnusocialnode + \"\/api\/statuses\/update.json\"\n\n\treq, err := http.NewRequest(\"POST\", apipath, bytes.NewBuffer([]byte(`{\"status\": \"testing from go-quitter command line.... its not working.\"}`)))\n\treq.SetBasicAuth(username, password)\n\treq.Header.Set(\"HTTP-REFERRER\", \"https:\/\/\"+gnusocialnode+\"\/\")\n\treq.Header.Add(\"Content-Type\", \"[application\/json; charset=utf-8\")\n\treq.Header.Set(\"User-Agent\", goquitter)\n\tlog.Println(req)\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\tvar apres Badrequest\n\tfmt.Println(\"response Status:\", resp.Status)\n\tfmt.Println(\"response Headers:\", resp.Header)\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\tfmt.Println(\"response Body:\", string(body))\n\t_ = json.Unmarshal(body, &apres)\n\tfmt.Println(apres.error)\n\tfmt.Println(apres.request)\n\n}\n\nfunc init() {\n\tif gnusocialnode == \"\" {\n\t\tgnusocialnode = \"gs.sdf.org\"\n\t}\n\n}\n<commit_msg>Posting works! go-quitter post Whatever you want!<commit_after>package main\n\nimport (\n\t\"strings\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\t\"net\/url\"\n)\n\nvar goquitter = \"go-quitter v0.0.2\"\nvar username = os.Getenv(\"GNUSOCIALUSER\")\nvar password = os.Getenv(\"GNUSOCIALPASS\")\nvar gnusocialnode = os.Getenv(\"GNUSOCIALNODE\")\nvar fast bool = false\nvar apipath string = \"https:\/\/\" + gnusocialnode + \"\/api\/statuses\/home_timeline.json\"\n\ntype User struct {\n\tName string `json:\"name\"`\n}\n\nvar usage string\n\ntype Tweet struct {\n\tId int64 `json:\"id\"`\n\tIdStr string `json:\"id_str\"`\n\tInReplyToScreenName string `json:\"in_reply_to_screen_name\"`\n\tInReplyToStatusID int64 `json:\"in_reply_to_status_id\"`\n\tInReplyToStatusIdStr string `json:\"in_reply_to_status_id_str\"`\n\tInReplyToUserID int64 `json:\"in_reply_to_user_id\"`\n\tInReplyToUserIdStr string `json:\"in_reply_to_user_id_str\"`\n\tLang string `json:\"lang\"`\n\tPlace string `json:\"place\"`\n\tPossiblySensitive bool `json:\"possibly_sensitive\"`\n\tRetweetCount int `json:\"retweet_count\"`\n\tRetweeted bool `json:\"retweeted\"`\n\tRetweetedStatus *Tweet `json:\"retweeted_status\"`\n\tSource string `json:\"source\"`\n\tText string `json:\"text\"`\n\tTruncated bool `json:\"truncated\"`\n\tUser User `json:\"user\"`\n\tWithheldCopyright bool `json:\"withheld_copyright\"`\n\tWithheldInCountries []string `json:\"withheld_in_countries\"`\n\tWithheldScope string `json:\"withheld_scope\"`\n}\n\ntype AuthSuccess struct {\n\t\/* variables *\/\n}\ntype AuthError struct {\n\t\/* variables *\/\n}\n\ntype Badrequest struct {\n\terror string `json:\"error\"`\n\trequest string `json:\"request\"`\n}\n\nfunc main() {\n\tusage = \"\\t\" + goquitter + \"\\tCopyright 2016 aerth@sdf.org\\nUsage:\\n\\n\\tgo-quitter read\\t\\t\\tReads 20 new posts\\n\\tgo-quitter read fast\\t\\tReads 20 new posts (no delay)\\n\\tgo-quitter home\\t\\t\\tYour home timeline.\\n\\tgo-quitter user username\\tLooks up `username` timeline\\n\\nSet your GNUSOCIALNODE environmental variable to change nodes.\\nFor example: `export GNUSOCIALNODE=gs.sdf.org` in your ~\/.shrc or ~\/.profile\\n\\nExplore!\\n\\n\\tGNUSOCIALNODE=gnusocial.de go-quitter read\\n\\tGNUSOCIALNODE=quitter.es go-quitter read\\n\\tGNUSOCIALNODE=shitposter.club go-quitter read\\n\\tGNUSOCIALNODE=sealion.club go-quitter read\\n\\t(defaults node is gs.sdf.org)\\n\\nTry `go-quitter read fast | more`\"\n\n\tif len(os.Args) < 2 {\n\t\tlog.Fatalln(usage)\n\t}\n\n\t\/\/ go-quitter read\n\tif os.Args[1] == \"read\" && len(os.Args) == 2 {\n\t\treadNew(false)\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ go quitter read fast\n\tif os.Args[1] == \"read\" && os.Args[2] == \"fast\" {\n\t\treadNew(true)\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ go-quitter home\n\tif os.Args[1] == \"home\" && len(os.Args) == 2 {\n\t\treadHome(false)\n\t\tos.Exit(0)\n\t}\n\t\/\/ go-quitter home fast\n\tif os.Args[1] == \"home\" && os.Args[2] == \"fast\" {\n\t\treadHome(true)\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ go-quitter post \"Testing form console line using go-quitter\"\n\tif os.Args[1] == \"post\" && os.Args[2] != \"\" {\n\t\tcontent := strings.Join(os.Args[2:], \" \")\n\t\tpostNew(content)\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ go-quitter user aerth\n\tif os.Args[1] == \"user\" && os.Args[2] != \"\" {\n\t\tuserlookup := os.Args[2]\n\t\treadUserposts(userlookup, false)\n\t\tos.Exit(0)\n\t}\n\n\tlog.Fatalln(usage)\n\n}\n\n\/\/ readNew shows 20 new messages. Defaults to a 2 second delay, but can be called with readNew(fast) for a quick dump.\nfunc readNew(fast bool) {\n\n\tlog.Println(\"node: \" + gnusocialnode)\n\tres, err := http.Get(\"https:\/\/\" + gnusocialnode + \"\/api\/statuses\/public_timeline.json\")\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tdefer res.Body.Close()\n\tvar tweets []Tweet\n\t_ = json.Unmarshal(body, &tweets)\n\t\/\/if err != nil { log.Fatalln(err) }\n\tfor i := range tweets {\n\t\tfmt.Printf(\"[\" + tweets[i].User.Name + \"] \" + tweets[i].Text + \"\\n\\n\")\n\t\tif fast != true {\n\t\t\ttime.Sleep(2000 * time.Millisecond)\n\t\t}\n\t}\n\n}\n\n\/\/ readHome shows 20 from home timeline. Defaults to a 2 second delay, but can be called with readHome(fast) for a quick dump.\nfunc readHome(fast bool) {\n\tif username == \"\" || password == \"\" {\n\t\tlog.Fatalln(\"Please set the GNUSOCIALUSER and GNUSOCIALPASS environmental variables to view home timeline.\")\n\t}\n\tlog.Println(\"node: \" + gnusocialnode)\n\n\tapipath := \"https:\/\/\" + gnusocialnode + \"\/api\/statuses\/home_timeline.json\"\n\treq, err := http.NewRequest(\"GET\", apipath, nil)\n\treq.Header.Set(\"User-Agent\", goquitter)\n\treq.SetBasicAuth(username, password)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\tvar apres Badrequest\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\n\t_ = json.Unmarshal(body, &apres)\n\tfmt.Println(apres.error)\n\tfmt.Println(apres.request)\n\n\tvar tweets []Tweet\n\t_ = json.Unmarshal(body, &tweets)\n\t\/\/if err != nil { log.Fatalln(err) } \/\/ This fails\n\tfor i := range tweets {\n\t\tfmt.Printf(\"[\" + tweets[i].User.Name + \"] \" + tweets[i].Text + \"\\n\\n\")\n\t\tif fast != true {\n\t\t\ttime.Sleep(2000 * time.Millisecond)\n\t\t}\n\t}\n\n}\nfunc readUserposts(userlookup string, fast bool) {\n\tif username == \"\" || password == \"\" {\n\t\tlog.Fatalln(\"Please set the GNUSOCIALUSER and GNUSOCIALPASS environmental variables to view use timelines.\")\n\t}\n\tlog.Println(\"user \" + userlookup + \"node: \" + gnusocialnode)\n\n\tapipath := \"https:\/\/\" + gnusocialnode + \"\/api\/statuses\/user_timeline.json?screen_name=\" + userlookup\n\n\treq, err := http.NewRequest(\"GET\", apipath, nil)\n\treq.Header.Set(\"User-Agent\", goquitter)\n\treq.SetBasicAuth(username, password)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\n\tvar tweets []Tweet\n\t_ = json.Unmarshal(body, &tweets)\n\t\/\/if err != nil { log.Fatalln(err) } \/\/ This fails\n\tfor i := range tweets {\n\t\tfmt.Printf(\"[\" + tweets[i].User.Name + \"] \" + tweets[i].Text + \"\\n\\n\")\n\t\tif fast != true {\n\t\t\ttime.Sleep(2000 * time.Millisecond)\n\t\t}\n\t}\n\n}\n\nfunc postNew(content string) {\n\tif username == \"\" || password == \"\" {\n\t\tlog.Fatalln(\"Please set the GNUSOCIALUSER and GNUSOCIALPASS environmental variables to post.\")\n\t}\n\n\tlog.Println(\"posting on node: \" + gnusocialnode)\n\t\/\/content = `\"`+content+`\"`\n\tv := url.Values{}\n\tv.Set(\"status\", content)\n\tcontent = url.Values.Encode(v)\n\tlog.Println(content)\n\t\/\/apipath := \"https:\/\/\" + gnusocialnode + \"\/api\/statuses\/update.json?status='\"+content+\"'\"\n\tapipath := \"https:\/\/\" + gnusocialnode + \"\/api\/statuses\/update.json?\"+content\n\treq, err := http.NewRequest(\"POST\", apipath, nil)\n\treq.SetBasicAuth(username, password)\n\treq.Header.Set(\"HTTP_REFERER\", \"https:\/\/\"+gnusocialnode+\"\/\")\n\treq.Header.Add(\"Content-Type\", \"[application\/json; charset=utf-8\")\n\treq.Header.Set(\"User-Agent\", goquitter)\n\tlog.Println(req)\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\tfmt.Println(\"response Body:\", string(body))\n\n\n\t\/* var apres Badrequest\n\tfmt.Println(\"response Status:\", resp.Status)\n\tfmt.Println(\"response Headers:\", resp.Header)\n\t_ = json.Unmarshal(body, &apres)\n\tfmt.Println(apres.error)\n\tfmt.Println(apres.request)\n*\/\n}\n\nfunc init() {\n\tif gnusocialnode == \"\" {\n\t\tgnusocialnode = \"gs.sdf.org\"\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\tapns \"github.com\/sideshow\/apns2\"\n\t\"github.com\/sideshow\/apns2\/certificate\"\n\t\"github.com\/sideshow\/apns2\/payload\"\n\t\"github.com\/smancke\/guble\/protocol\"\n\t\"github.com\/smancke\/guble\/server\"\n\t\"os\"\n)\n\nconst (\n\tdefaultCertFileName = \"development-certificate.p12\"\n\tmsgAPSNNotSent = \"APNS notification was not sent\"\n\terrAPSNNotSent = errors.New(msgAPSNNotSent)\n)\n\nfunc init() {\n\tserver.AfterMessageDelivery = func(m *protocol.Message) {\n\t\tfmt.Print(\"message delivered\")\n\t}\n}\n\ntype APNSConfig struct {\n\tCertFileName string\n\tCertPassword string\n}\n\nfunc main() {\n\n\t\/\/ server.Main()\n\n\tcfg := APNSConfig{\n\t\tCertFileName: defaultCertFileName,\n\t\tCertPassword: os.Getenv(\"APNS_CERT_PASSWORD\"),\n\t}\n\ttopic := os.Getenv(\"APNS_TOPIC\")\n\tdeviceToken := os.Getenv(\"APNS_DEVICE_TOKEN\")\n\tcl := getAPNSClient(cfg, false)\n\tp := payload.NewPayload().\n\t\tAlertTitle(\"REWE Sonderrabatt\").\n\t\tAlertBody(\"Sie haben ein Sonderrabatt von 50% für das neue iPhone 8 bekommen!\").\n\t\tContentAvailable()\n\tsendAPNSNotification(cl, topic, deviceToken, p)\n}\n\nfunc getAPNSClient(cfg *APNSConfig, production bool) *apns.Client {\n\tcert, errCert := certificate.FromP12File(cfg.CertFileName, cfg.CertPassword)\n\tif errCert != nil {\n\t\tlog.WithError(errCert).Error(\"APNS certificate error\")\n\t}\n\tif production {\n\t\treturn apns.NewClient(cert).Production()\n\t}\n\treturn apns.NewClient(cert).Development()\n}\n\nfunc sendAPNSNotification(cl *apns.Client, topic string, deviceToken string, p *payload.Payload) error {\n\tnotification := &apns.Notification{\n\t\tPriority: apns.PriorityHigh,\n\t\tTopic: topic,\n\t\tDeviceToken: deviceToken,\n\t\tPayload: p,\n\t}\n\tresponse, errPush := cl.Push(notification)\n\tif errPush != nil {\n\t\tlog.WithError(errPush).Error(\"APNS error when trying to push notification\")\n\t\treturn errPush\n\t}\n\tif !response.Sent() {\n\t\tlog.WithField(\"id\", response.ApnsID).Error(msgAPSNNotSent)\n\t\treturn errAPSNNotSent\n\t}\n\tlog.WithField(\"id\", response.ApnsID).Debug(\"APNS notification successfully sent\")\n\treturn nil\n}\n<commit_msg>fix errors<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\tapns \"github.com\/sideshow\/apns2\"\n\t\"github.com\/sideshow\/apns2\/certificate\"\n\t\"github.com\/sideshow\/apns2\/payload\"\n\t\"github.com\/smancke\/guble\/protocol\"\n\t\"github.com\/smancke\/guble\/server\"\n\t\"os\"\n)\n\nconst (\n\tdefaultCertFileName = \"development-certificate.p12\"\n\tmsgAPSNNotSent = \"APNS notification was not sent\"\n)\n\nvar (\n\terrAPSNNotSent = errors.New(msgAPSNNotSent)\n)\n\nfunc init() {\n\tserver.AfterMessageDelivery = func(m *protocol.Message) {\n\t\tfmt.Print(\"message delivered\")\n\t}\n}\n\ntype APNSConfig struct {\n\tCertFileName string\n\tCertPassword string\n}\n\nfunc main() {\n\n\t\/\/ server.Main()\n\n\tcfg := &APNSConfig{\n\t\tCertFileName: defaultCertFileName,\n\t\tCertPassword: os.Getenv(\"APNS_CERT_PASSWORD\"),\n\t}\n\ttopic := os.Getenv(\"APNS_TOPIC\")\n\tdeviceToken := os.Getenv(\"APNS_DEVICE_TOKEN\")\n\tcl := getAPNSClient(cfg, false)\n\tp := payload.NewPayload().\n\t\tAlertTitle(\"REWE Sonderrabatt\").\n\t\tAlertBody(\"Sie haben ein Sonderrabatt von 50% für das neue iPhone 8 bekommen!\").\n\t\tContentAvailable()\n\tsendAPNSNotification(cl, topic, deviceToken, p)\n}\n\nfunc getAPNSClient(cfg *APNSConfig, production bool) *apns.Client {\n\tcert, errCert := certificate.FromP12File(cfg.CertFileName, cfg.CertPassword)\n\tif errCert != nil {\n\t\tlog.WithError(errCert).Error(\"APNS certificate error\")\n\t}\n\tif production {\n\t\treturn apns.NewClient(cert).Production()\n\t}\n\treturn apns.NewClient(cert).Development()\n}\n\nfunc sendAPNSNotification(cl *apns.Client, topic string, deviceToken string, p *payload.Payload) error {\n\tnotification := &apns.Notification{\n\t\tPriority: apns.PriorityHigh,\n\t\tTopic: topic,\n\t\tDeviceToken: deviceToken,\n\t\tPayload: p,\n\t}\n\tresponse, errPush := cl.Push(notification)\n\tif errPush != nil {\n\t\tlog.WithError(errPush).Error(\"APNS error when trying to push notification\")\n\t\treturn errPush\n\t}\n\tif !response.Sent() {\n\t\tlog.WithField(\"id\", response.ApnsID).Error(msgAPSNNotSent)\n\t\treturn errAPSNNotSent\n\t}\n\tlog.WithField(\"id\", response.ApnsID).Debug(\"APNS notification successfully sent\")\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/ooyala\/go-dogstatsd\"\n)\n\nconst (\n\tDogstatsdAddressFlag = \"dogstatsd-address\"\n\tHTTPEnableFlag = \"http-enable\"\n\tHTTPPortFlag = \"http-port\"\n\tCalculateRate = \"calculate-rate\"\n\tCollectRate = \"collect-rate\"\n\tLogRate = \"log-rate\"\n\tStatsInterfaceFilterFlag = \"stats-interface-filter\"\n\tStatsFlag = \"stats\"\n\tJsonLogFlag = \"json-log\"\n\tHelpFlag = \"h\"\n)\n\nvar (\n\tLog = logrus.New()\n\tStatsdClient *dogstatsd.Client = nil\n\n\tstatsdAddress = flag.String(DogstatsdAddressFlag, \"\", \"The address of the Datadog DogStatsd server.\")\n\thttpEnable = flag.Bool(HTTPEnableFlag, false, \"Enable HTTP server.\")\n\thttpPort = flag.Int(HTTPPortFlag, 8001, \"HTTP server listening port.\")\n\tcalculatedRate = flag.Int64(CalculateRate, 2, \"Rate (in seconds) for which the rate stats are calculated.\")\n\tcollectRate = flag.Int64(CollectRate, 2, \"Rate (in seconds) for which the stats are collected.\")\n\tlogRate = flag.Int64(LogRate, 5, \"Rate (in seconds) for which the stats are logged.\")\n\tstatsInterfaceFilter = flag.String(StatsInterfaceFilterFlag, \"\", \"Regular expression which filters out interfaces not reported to DogStatd.\")\n\tstatsList = flag.String(StatsFlag, \"\", \"The list of stats send to the DogStatsd server.\")\n\tjsonLog = flag.Bool(JsonLogFlag, false, \"Enable JSON logging. The default format is text.\")\n\thelp = flag.Bool(HelpFlag, false, \"Prints help info.\")\n\n\tstopOnce sync.Once\n\tstopWg sync.WaitGroup\n\n\tIfaceList = NewInterfaceList()\n\tifaceRegExp *regexp.Regexp = nil\n\tStatsMap = make(map[string]string)\n)\n\nfunc withLogging(f func()) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr := fmt.Errorf(\"Recovered from panic(%+v)\", r)\n\n\t\t\tLog.WithField(\"error\", err).Panicf(\"Stopped with panic: %s\", err.Error())\n\t\t}\n\t}()\n\n\tf()\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif *help {\n\t\tfmt.Println(\"Supported stats:\")\n\t\tfmt.Print(\"---> \")\n\t\tfmt.Println(NetworkStatPath)\n\t\tfor _, stat := range getNetworkDeviceStatsList() {\n\t\t\tfmt.Println(stat.StatName + \":\" + stat.MetricName)\n\t\t}\n\t\tfmt.Println(\"<---\")\n\t\tfmt.Print(\"---> \")\n\t\tfmt.Println(NetstatStatPath)\n\t\tfor _, stat := range getNetstatStatsList() {\n\t\t\tfmt.Println(stat.StatName + \":\" + stat.MetricName)\n\t\t}\n\t\tfmt.Println(\"<---\")\n\t\tos.Exit(0)\n\t}\n\n\tif *jsonLog {\n\t\tLog.Formatter = new(logrus.JSONFormatter)\n\t}\n\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt, syscall.SIGTERM)\n\tgo func() {\n\t\tfor sig := range c {\n\t\t\tLog.WithField(\"signal\", sig).Infof(\"Signalled. Shutting down.\")\n\n\t\t\tstopOnce.Do(func() { shutdown(0) })\n\t\t}\n\t}()\n\n\tif *statsdAddress != \"\" {\n\t\tLog.WithField(\"address\", *statsdAddress).Infof(\"Attempting to dial DogStatsd server.\")\n\t\tvar err error\n\t\tStatsdClient, err = dogstatsd.New(*statsdAddress)\n\t\tif err != nil {\n\t\t\tLog.WithFields(logrus.Fields{\n\t\t\t\t\"address\": *statsdAddress,\n\t\t\t\t\"error\": err,\n\t\t\t}).Warn(\"Unable to dial StatsD.\")\n\t\t}\n\t}\n\n\tif StatsdClient != nil {\n\t\tLog.WithField(\"address\", *statsdAddress).Infof(\"Dialed DogStatsd server.\")\n\t\tStatsdClient.Namespace = \"007.\"\n\t}\n\n\tvar err error\n\tif ifaceRegExp, err = regexp.Compile(*statsInterfaceFilter); err != nil {\n\t\tLog.WithField(\"error\", err).Errorf(\"Unable to compile provided regular expression: %s\", *statsInterfaceFilter)\n\t}\n\n\tif ifaceRegExp != nil {\n\t\tLog.WithField(\"regex\", ifaceRegExp.String()).Infof(\"Compiled interface filter regualr expression.\")\n\t}\n\n\tLog.Info(\"Starting calculators, loggers and collectors.\")\n\tstartCalculators()\n\tstartLoggers()\n\n\tif StatsdClient != nil {\n\t\tfor _, stat := range strings.Split(*statsList, \",\") {\n\t\t\tStatsMap[stat] = stat\n\t\t}\n\t\tLog.WithField(\"address\", *statsdAddress).Infof(\"Starting collectors.\")\n\t\tstartCollectors()\n\t}\n\n\tif *httpEnable {\n\t\tif err := <-StartHTTPServer(*httpPort); err != nil {\n\t\t\tLog.WithField(\"error\", err).Fatal(\"Error starting HTTP server.\")\n\t\t}\n\n\t\treturn\n\t}\n\n\t\/\/ If HTTP is not enabled we need to block with a wait on a WaitGroup.\n\tstopWg.Add(1)\n\tstopWg.Wait()\n}\n\nfunc shutdown(code int) {\n\tLog.WithField(\"code\", code).Infof(\"Stopping.\")\n\n\t\/\/ If HTTP is enabled we must exit in order to cause the HTTP server to shutdown.\n\tif *httpEnable {\n\t\tos.Exit(0)\n\t}\n\n\tstopWg.Done()\n}\n\nfunc startCalculators() {\n\tif err := calculateInterfaceRateStats(); err != nil {\n\t\tLog.WithField(\"error\", err).Error(\"Error calculating interface rate stats.\")\n\t} else {\n\t\tgo withLogging(func() {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-time.Tick(time.Duration(*calculatedRate) * time.Second):\n\t\t\t\t\tif err := calculateInterfaceRateStats(); err != nil {\n\t\t\t\t\t\tLog.WithField(\"error\", err).Error(\"Error calculating interface rate stats.\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc startCollectors() {\n\tgo withLogging(func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.Tick(time.Duration(*collectRate) * time.Second):\n\t\t\t\tcollectNetworkDeviceStats()\n\t\t\t\tcollectNetstatStats()\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc startLoggers() {\n\tgo withLogging(func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.Tick(time.Duration(*logRate) * time.Second):\n\t\t\t\tlogNetworkDeviceStats()\n\t\t\t\tlogNetstatStats()\n\t\t\t}\n\t\t}\n\t})\n}\n<commit_msg>Moved some logging statements.<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/ooyala\/go-dogstatsd\"\n)\n\nconst (\n\tDogstatsdAddressFlag = \"dogstatsd-address\"\n\tHTTPEnableFlag = \"http-enable\"\n\tHTTPPortFlag = \"http-port\"\n\tCalculateRate = \"calculate-rate\"\n\tCollectRate = \"collect-rate\"\n\tLogRate = \"log-rate\"\n\tStatsInterfaceFilterFlag = \"stats-interface-filter\"\n\tStatsFlag = \"stats\"\n\tJsonLogFlag = \"json-log\"\n\tHelpFlag = \"h\"\n)\n\nvar (\n\tLog = logrus.New()\n\tStatsdClient *dogstatsd.Client = nil\n\n\tstatsdAddress = flag.String(DogstatsdAddressFlag, \"\", \"The address of the Datadog DogStatsd server.\")\n\thttpEnable = flag.Bool(HTTPEnableFlag, false, \"Enable HTTP server.\")\n\thttpPort = flag.Int(HTTPPortFlag, 8001, \"HTTP server listening port.\")\n\tcalculatedRate = flag.Int64(CalculateRate, 2, \"Rate (in seconds) for which the rate stats are calculated.\")\n\tcollectRate = flag.Int64(CollectRate, 2, \"Rate (in seconds) for which the stats are collected.\")\n\tlogRate = flag.Int64(LogRate, 5, \"Rate (in seconds) for which the stats are logged.\")\n\tstatsInterfaceFilter = flag.String(StatsInterfaceFilterFlag, \"\", \"Regular expression which filters out interfaces not reported to DogStatd.\")\n\tstatsList = flag.String(StatsFlag, \"\", \"The list of stats send to the DogStatsd server.\")\n\tjsonLog = flag.Bool(JsonLogFlag, false, \"Enable JSON logging. The default format is text.\")\n\thelp = flag.Bool(HelpFlag, false, \"Prints help info.\")\n\n\tstopOnce sync.Once\n\tstopWg sync.WaitGroup\n\n\tIfaceList = NewInterfaceList()\n\tifaceRegExp *regexp.Regexp = nil\n\tStatsMap = make(map[string]string)\n)\n\nfunc withLogging(f func()) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr := fmt.Errorf(\"Recovered from panic(%+v)\", r)\n\n\t\t\tLog.WithField(\"error\", err).Panicf(\"Stopped with panic: %s\", err.Error())\n\t\t}\n\t}()\n\n\tf()\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif *help {\n\t\tfmt.Println(\"Supported stats:\")\n\t\tfmt.Print(\"---> \")\n\t\tfmt.Println(NetworkStatPath)\n\t\tfor _, stat := range getNetworkDeviceStatsList() {\n\t\t\tfmt.Println(stat.StatName + \":\" + stat.MetricName)\n\t\t}\n\t\tfmt.Println(\"<---\")\n\t\tfmt.Print(\"---> \")\n\t\tfmt.Println(NetstatStatPath)\n\t\tfor _, stat := range getNetstatStatsList() {\n\t\t\tfmt.Println(stat.StatName + \":\" + stat.MetricName)\n\t\t}\n\t\tfmt.Println(\"<---\")\n\t\tos.Exit(0)\n\t}\n\n\tif *jsonLog {\n\t\tLog.Formatter = new(logrus.JSONFormatter)\n\t}\n\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt, syscall.SIGTERM)\n\tgo func() {\n\t\tfor sig := range c {\n\t\t\tLog.WithField(\"signal\", sig).Infof(\"Signalled. Shutting down.\")\n\n\t\t\tstopOnce.Do(func() { shutdown(0) })\n\t\t}\n\t}()\n\n\tif *statsdAddress != \"\" {\n\t\tLog.WithField(\"address\", *statsdAddress).Infof(\"Attempting to dial DogStatsd server.\")\n\t\tvar err error\n\t\tStatsdClient, err = dogstatsd.New(*statsdAddress)\n\t\tif err != nil {\n\t\t\tLog.WithFields(logrus.Fields{\n\t\t\t\t\"address\": *statsdAddress,\n\t\t\t\t\"error\": err,\n\t\t\t}).Warn(\"Unable to dial StatsD.\")\n\t\t}\n\t}\n\n\tif StatsdClient != nil {\n\t\tLog.WithField(\"address\", *statsdAddress).Infof(\"Dialed DogStatsd server.\")\n\t\tStatsdClient.Namespace = \"007.\"\n\t}\n\n\tvar err error\n\tif ifaceRegExp, err = regexp.Compile(*statsInterfaceFilter); err != nil {\n\t\tLog.WithField(\"error\", err).Errorf(\"Unable to compile provided regular expression: %s\", *statsInterfaceFilter)\n\t}\n\n\tif ifaceRegExp != nil {\n\t\tLog.WithField(\"regex\", ifaceRegExp.String()).Infof(\"Compiled interface filter regualr expression.\")\n\t}\n\n\tstartCalculators()\n\tstartLoggers()\n\n\tif StatsdClient != nil {\n\t\tfor _, stat := range strings.Split(*statsList, \",\") {\n\t\t\tStatsMap[stat] = stat\n\t\t}\n\t\tstartCollectors()\n\t}\n\n\tif *httpEnable {\n\t\tif err := <-StartHTTPServer(*httpPort); err != nil {\n\t\t\tLog.WithField(\"error\", err).Fatal(\"Error starting HTTP server.\")\n\t\t}\n\n\t\treturn\n\t}\n\n\t\/\/ If HTTP is not enabled we need to block with a wait on a WaitGroup.\n\tstopWg.Add(1)\n\tstopWg.Wait()\n}\n\nfunc shutdown(code int) {\n\tLog.WithField(\"code\", code).Infof(\"Stopping.\")\n\n\t\/\/ If HTTP is enabled we must exit in order to cause the HTTP server to shutdown.\n\tif *httpEnable {\n\t\tos.Exit(0)\n\t}\n\n\tstopWg.Done()\n}\n\nfunc startCalculators() {\n\tLog.Info(\"Starting calculators...\")\n\tif err := calculateInterfaceRateStats(); err != nil {\n\t\tLog.WithField(\"error\", err).Error(\"Error calculating interface rate stats.\")\n\t} else {\n\t\tgo withLogging(func() {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-time.Tick(time.Duration(*calculatedRate) * time.Second):\n\t\t\t\t\tif err := calculateInterfaceRateStats(); err != nil {\n\t\t\t\t\t\tLog.WithField(\"error\", err).Error(\"Error calculating interface rate stats.\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc startLoggers() {\n\tLog.Info(\"Starting loggers...\")\n\tgo withLogging(func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.Tick(time.Duration(*logRate) * time.Second):\n\t\t\t\tlogNetworkDeviceStats()\n\t\t\t\tlogNetstatStats()\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc startCollectors() {\n\tLog.Info(\"Starting collectors...\")\n\tgo withLogging(func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.Tick(time.Duration(*collectRate) * time.Second):\n\t\t\t\tcollectNetworkDeviceStats()\n\t\t\t\tcollectNetstatStats()\n\t\t\t}\n\t\t}\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCLI utility for stress testing of HTTP servers with many concurrent connections\n\nUsage:\n httpstress <URL list> [options]\n\nOptions:\n * `URL list` – URLs to fetch (required)\n * `-c NUM` – concurrent connections number (defaults to 1)\n * `-n NUM` – total connections number (optional)\n * `-v` – print version to stdout and exit\n\nExample:\n httpstress http:\/\/localhost https:\/\/google.com -c 1000\n\nReturns 0 if no errors, 1 if some requests failed, 2 on kill, 3 in case of invalid options\nand 4 if it encounters a setrlimit(2)\/getrlimit(2) error.\n\nPrints elapsed time and error count for each URL to stdout (if any; does not count successful attempts).\nUsage and runtime errors go to stderr.\n\nOutput is YAML-formatted. Example:\n Errors:\n - Location: http:\/\/localhost\n Count: 334\n - Location: https:\/\/127.0.0.1\n Count: 333\n Elapsed time: 4.791903888s\n\nPlease note that this utility uses GOMAXPROCS environment variable if it's present.\nIf not, this defaults to CPU count + 1.\n*\/\npackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/chillum\/httpstress\/lib\"\n\tflag \"github.com\/ogier\/pflag\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n)\n\n\/\/ Application version\nconst Version = \"4.1\"\n\nfunc main() {\n\tvar conn, max int\n\tflag.IntVarP(&conn, \"c\", \"c\", 1, \"concurrent connections count\")\n\tflag.IntVarP(&max, \"n\", \"n\", 0, \"total connections (optional)\")\n\tversion := flag.BoolP(\"version\", \"v\", false, \"print version to stdout and exit\")\n\tflag.Usage = func() {\n\t\tfmt.Fprintln(os.Stderr, \"Usage:\", os.Args[0], \"<URL list> [options]\")\n\t\tfmt.Fprintln(os.Stderr, \" <URL list>: URLs to fetch (required)\")\n\t\tflag.PrintDefaults()\n\t\tfmt.Fprintln(os.Stderr, \"Docs:\\n https:\/\/github.com\/chillum\/httpstress\/wiki\")\n\t\tfmt.Fprintln(os.Stderr, \"Example:\\n httpstress http:\/\/localhost https:\/\/google.com -c 1000\")\n\t\tos.Exit(3)\n\t}\n\tflag.Parse()\n\n\tif *version {\n\t\tfmt.Println(\"httpstress\/cli\", Version)\n\t\tfmt.Println(\"httpstress\/lib\", httpstress.Version)\n\t\tfmt.Println(runtime.Version(), runtime.GOOS, runtime.GOARCH)\n\t\tos.Exit(0)\n\t}\n\n\turls := flag.Args()\n\tif len(urls) < 1 {\n\t\tflag.Usage()\n\t}\n\n\tif os.Getenv(\"GOMAXPROCS\") == \"\" {\n\t\truntime.GOMAXPROCS(runtime.NumCPU() + 1)\n\t}\n\n\tif !setlimits(&conn) { \/\/ Platform-specific code: see unix.go and windows.go for details.\n\t\tos.Exit(4)\n\t}\n\n\tstart := time.Now()\n\n\tout, err := httpstress.Test(conn, max, urls)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Error:\", err)\n\t\tflag.Usage()\n\t}\n\n\telapsed := time.Since(start)\n\n\tif len(out) > 0 {\n\t\tfmt.Println(\"Errors:\")\n\t\tfor url, num := range out {\n\t\t\tfmt.Println(\" - Location:\", url, \"\\n Count: \", num)\n\t\t}\n\t\tdefer os.Exit(1)\n\t}\n\tfmt.Println(\"Elapsed time:\", elapsed)\n}\n<commit_msg>cleaner YAML-formatted `version` message<commit_after>\/*\nCLI utility for stress testing of HTTP servers with many concurrent connections\n\nUsage:\n httpstress <URL list> [options]\n\nOptions:\n * `URL list` – URLs to fetch (required)\n * `-c NUM` – concurrent connections number (defaults to 1)\n * `-n NUM` – total connections number (optional)\n * `-v` – print version to stdout and exit\n\nExample:\n httpstress http:\/\/localhost https:\/\/google.com -c 1000\n\nReturns 0 if no errors, 1 if some requests failed, 2 on kill, 3 in case of invalid options\nand 4 if it encounters a setrlimit(2)\/getrlimit(2) error.\n\nPrints elapsed time and error count for each URL to stdout (if any; does not count successful attempts).\nUsage and runtime errors go to stderr.\n\nOutput is YAML-formatted. Example:\n Errors:\n - Location: http:\/\/localhost\n Count: 334\n - Location: https:\/\/127.0.0.1\n Count: 333\n Elapsed time: 4.791903888s\n\nPlease note that this utility uses GOMAXPROCS environment variable if it's present.\nIf not, this defaults to CPU count + 1.\n*\/\npackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/chillum\/httpstress\/lib\"\n\tflag \"github.com\/ogier\/pflag\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n)\n\n\/\/ Application version\nconst Version = \"4.1\"\n\nfunc main() {\n\tvar conn, max int\n\tflag.IntVarP(&conn, \"c\", \"c\", 1, \"concurrent connections count\")\n\tflag.IntVarP(&max, \"n\", \"n\", 0, \"total connections (optional)\")\n\tversion := flag.BoolP(\"version\", \"v\", false, \"print version to stdout and exit\")\n\tflag.Usage = func() {\n\t\tfmt.Fprintln(os.Stderr, \"Usage:\", os.Args[0], \"<URL list> [options]\")\n\t\tfmt.Fprintln(os.Stderr, \" <URL list>: URLs to fetch (required)\")\n\t\tflag.PrintDefaults()\n\t\tfmt.Fprintln(os.Stderr, \"Docs:\\n https:\/\/github.com\/chillum\/httpstress\/wiki\")\n\t\tfmt.Fprintln(os.Stderr, \"Example:\\n httpstress http:\/\/localhost https:\/\/google.com -c 1000\")\n\t\tos.Exit(3)\n\t}\n\tflag.Parse()\n\n\tif *version {\n\t\tfmt.Println(\"cli:\", Version, \"\\nlib:\", httpstress.Version,\n\t\t\t\"\\ngo: \", runtime.Version(),\"\\nos: \", runtime.GOOS, \"\\ncpu:\", runtime.GOARCH)\n\t\tos.Exit(0)\n\t}\n\n\turls := flag.Args()\n\tif len(urls) < 1 {\n\t\tflag.Usage()\n\t}\n\n\tif os.Getenv(\"GOMAXPROCS\") == \"\" {\n\t\truntime.GOMAXPROCS(runtime.NumCPU() + 1)\n\t}\n\n\tif !setlimits(&conn) { \/\/ Platform-specific code: see unix.go and windows.go for details.\n\t\tos.Exit(4)\n\t}\n\n\tstart := time.Now()\n\n\tout, err := httpstress.Test(conn, max, urls)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Error:\", err)\n\t\tflag.Usage()\n\t}\n\n\telapsed := time.Since(start)\n\n\tif len(out) > 0 {\n\t\tfmt.Println(\"Errors:\")\n\t\tfor url, num := range out {\n\t\t\tfmt.Println(\" - Location:\", url, \"\\n Count: \", num)\n\t\t}\n\t\tdefer os.Exit(1)\n\t}\n\tfmt.Println(\"Elapsed time:\", elapsed)\n}\n<|endoftext|>"} {"text":"<commit_before>package gowork\n\nimport (\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"github.com\/oleiade\/lane\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype WorkServer struct {\n\tQueue *lane.Queue\n\tHandlers map[string]interface{}\n\tHandlerParams map[string]interface{}\n\tWorkers *WorkersStruct\n}\n\ntype WorkersStruct struct {\n\tMembers map[int]*Worker\n\tPresharedSecret string\n\tWorkerCount int\n}\n\ntype Worker struct {\n\tId int\n\tRegistered bool\n\tPresharedSecret string\n\tSessionAuthenticationKey string\n\tVerification *ClientTest\n}\n\ntype ClientTest struct {\n\tPlaintextVerification string `json:\"Verification\"`\n\tClientResponse string `json:\"Response\"`\n}\n\ntype Work struct {\n\tId bson.ObjectId `json:\"-\" bson:\"_id\"`\n\tIdHex string\n\tWorkJSON string\n\tResult *WorkResult\n\tTime *TimeStats\n}\n\ntype WorkResult struct {\n\tResultJSON string\n\tStatus string\n\tError string\n}\n\ntype TimeStats struct {\n\tAdded int64\n\tRecieved int64\n\tComplete int64\n\tTimeout int64\n}\n\ntype Event struct {\n\tWork *Work\n\tWorker *Worker\n\tError string\n\tTime int64\n}\n\nfunc NewEventError(msg string) *Event {\n\treturn &Event{Error: msg, Time: time.Now().UTC().Unix()}\n}\n\nfunc NewEventWork(w *Work) *Event {\n\treturn &Event{Work: w, Time: time.Now().UTC().Unix()}\n}\n\nfunc NewEventWorker(w *Worker) *Event {\n\treturn &Event{Worker: w, Time: time.Now().UTC().Unix()}\n}\n\nfunc NewServer(Secret string) (*WorkServer, error) {\n\tif len(Secret) != 32 {\n\t\treturn &WorkServer{}, errors.New(\"Secret must be 32 characters\")\n\t}\n\tQueue := lane.NewQueue()\n\tWorkerMembers := make(map[int]*Worker)\n\tWorkers := &WorkersStruct{WorkerMembers, Secret, 0}\n\tHandlerFuncs := make(map[string]interface{})\n\tHandlerParams := make(map[string]interface{})\n\tWorkServerInst := &WorkServer{Queue, HandlerFuncs, HandlerParams, Workers}\n\treturn WorkServerInst, nil\n}\n\nfunc (ws WorkServer) NewHandler(event_id string, hf func(*Event, map[string]interface{})) error {\n\tif _, exists := ws.Handlers[event_id]; exists {\n\t\tws.Event(\"add_handler_error\", NewEventError(\"HandlerExists\"))\n\t\treturn errors.New(\"Handler already exists\")\n\t}\n\tws.Handlers[event_id] = hf\n\treturn nil\n}\n\nfunc (ws WorkServer) AddParams(params map[string]interface{}) *WorkServer {\n\tws.HandlerParams = params\n\treturn &ws\n}\n\nfunc (ws WorkServer) Event(event_id string, event *Event) {\n\tif handlerFunc, exists := ws.Handlers[event_id]; exists {\n\t\thandlerFunc.(func(*Event, map[string]interface{}))(event, ws.HandlerParams)\n\t}\n}\n\nfunc (ws WorkServer) Add(w *Work) {\n\tw.Time.Added = time.Now().UTC().Unix()\n\tws.Event(\"add_work\", NewEventWork(w))\n\tws.Queue.Enqueue(w)\n}\n\nfunc (ws WorkServer) Get(Id string, AuthenticationKey string) (*Work, error) {\n\tIdInt, err := strconv.Atoi(Id)\n\tif err != nil {\n\t\tws.Event(\"get_work_error\", NewEventError(\"StrconvError\"))\n\t\treturn &Work{}, errors.New(\"Failed to convert Worker ID string to int:\" + err.Error())\n\t}\n\tif ws.Workers.Members[IdInt].SessionAuthenticationKey != AuthenticationKey {\n\t\tws.Event(\"get_work_error\", NewEventError(\"AuthFailed\"))\n\t\treturn &Work{}, errors.New(\"Failed authentication\")\n\t}\n\tWorkObj := ws.Queue.Dequeue()\n\tif WorkObj == nil {\n\t\tws.Event(\"get_work_empty\", NewEventError(\"NoWork\"))\n\t\treturn &Work{}, nil\n\t}\n\tif (WorkObj.(*Work).Time.Added + WorkObj.(*Work).Time.Timeout) > time.Now().UTC().Unix() {\n\t\tws.Event(\"get_work\", NewEventWork(WorkObj.(*Work)))\n\t\treturn WorkObj.(*Work), nil\n\t}\n\tws.Event(\"work_timeout\", NewEventWork(WorkObj.(*Work)))\n\treturn WorkObj.(*Work), errors.New(\"Work Timeout\")\n}\n\nfunc (ws WorkServer) Submit(w *Work) {\n\tif (w.Time.Added + w.Time.Timeout) <= time.Now().UTC().Unix() {\n\t\tw.Result.Error = \"Timeout\"\n\t\tw.Result.Status = \"Timeout\"\n\t\tws.Event(\"work_timeout\", NewEventWork(w))\n\t\treturn\n\t}\n\tw.Id = bson.ObjectIdHex(w.IdHex)\n\tws.Event(\"work_complete\", NewEventWork(w))\n}\n\nfunc (ws WorkServer) QueueSize() int {\n\treturn ws.Queue.Size()\n}\n\nfunc (wrs WorkersStruct) Register(ws *WorkServer) (string, string) {\n\tTempWC := wrs.WorkerCount\n\twrs.WorkerCount += 1\n\tw := &Worker{\n\t\tId: TempWC + 1,\n\t\tVerification: &ClientTest{PlaintextVerification: uuid.New()},\n\t}\n\twrs.Members[w.Id] = w\n\tws.Event(\"worker_register\", NewEventWorker(w))\n\treturn strconv.Itoa(w.Id), w.Verification.PlaintextVerification\n}\n\nfunc (wrs WorkersStruct) Verify(ws *WorkServer, Id string, Response string) (string, error) {\n\tIdInt, err := strconv.Atoi(Id)\n\tif err != nil {\n\t\tws.Event(\"worker_verify_error\", NewEventError(\"StrconvError\"))\n\t\treturn \"\", errors.New(\"Failed to convert Worker ID string to int:\" + err.Error())\n\t}\n\tClientResp, err := decrypt([]byte(wrs.PresharedSecret), []byte(Response))\n\tif err != nil {\n\t\tws.Event(\"worker_verify_error\", NewEventError(\"DecryptionError\"))\n\t\treturn \"\", errors.New(\"Failed to decrypt worker verification string:\" + err.Error())\n\t}\n\twrs.Members[IdInt].Verification.ClientResponse = string(ClientResp)\n\tif wrs.Members[IdInt].Verification.PlaintextVerification != string(wrs.Members[IdInt].Verification.ClientResponse) {\n\t\tws.Event(\"worker_verify_error\", NewEventError(\"KeyMismatch\"))\n\t\treturn \"\", errors.New(\"Client key incorrect\")\n\t}\n\twrs.Members[IdInt].Registered = true\n\twrs.Members[IdInt].SessionAuthenticationKey = uuid.New()\n\tws.Event(\"worker_verify\", NewEventWorker(wrs.Members[IdInt]))\n\treturn wrs.Members[IdInt].SessionAuthenticationKey, nil\n}\n\nfunc NewWorker(Secret string, ID string, PlaintextVerification string) (*Worker, error) {\n\twrk := &Worker{}\n\tif len(Secret) != 32 {\n\t\treturn wrk, errors.New(\"Secret must be 32 characters\")\n\t}\n\twrk.PresharedSecret = Secret\n\twrk.Verification = &ClientTest{PlaintextVerification: PlaintextVerification}\n\tIdInt, err := strconv.Atoi(ID)\n\tif err != nil {\n\t\treturn &Worker{}, errors.New(\"Failed to convert Worker ID string to int:\" + err.Error())\n\t}\n\twrk.Id = IdInt\n\tClientResponse, err := encrypt([]byte(wrk.PresharedSecret), []byte(wrk.Verification.PlaintextVerification))\n\tif err != nil {\n\t\treturn &Worker{}, errors.New(\"Failed to encrypt verification string:\" + err.Error())\n\t}\n\twrk.Verification.ClientResponse = string(ClientResponse)\n\treturn wrk, nil\n}\n\nfunc (wrk Worker) SetAuthenticationKey(key string) *Worker {\n\twrk.SessionAuthenticationKey = key\n\treturn &wrk\n}\n\nfunc (wrk Worker) Process(w *Work) (*Work, map[string]interface{}, error) {\n\tWorkParams := make(map[string]interface{})\n\tif (w.Time.Added + w.Time.Timeout) <= time.Now().UTC().Unix() {\n\t\treturn w, WorkParams, errors.New(\"Work Timeout\")\n\t}\n\terr := json.Unmarshal([]byte(w.WorkJSON), &WorkParams)\n\tif err != nil {\n\t\treturn w, WorkParams, errors.New(\"Failed to unmarshal Work Params JSON:\" + err.Error())\n\t}\n\tw.Time.Recieved = time.Now().UTC().Unix()\n\treturn w, WorkParams, nil\n}\n\nfunc (wrk Worker) Submit(w *Work, ResultJSON string, Error string) (*Work, error) {\n\twr := &WorkResult{}\n\twr.ResultJSON = ResultJSON\n\tw.Time.Complete = time.Now().UTC().Unix()\n\tif (w.Time.Added + w.Time.Timeout) > time.Now().UTC().Unix() {\n\t\twr.Error = Error\n\t\twr.Status = \"Complete\"\n\t\tw.Result = wr\n\t\treturn w, nil\n\t}\n\twr.Error = \"Timeout\"\n\twr.Status = \"Timeout\"\n\tw.Result = wr\n\treturn w, errors.New(\"Timeout\")\n}\n\nfunc CreateWork(WorkData interface{}, Timeout int64) (*Work, error) {\n\tNewWork := &Work{}\n\tNewWork.IdHex = bson.NewObjectId().Hex()\n\tNewWork.Result = &WorkResult{\"\", \"Pending\", \"\"}\n\tNewWork.Time = &TimeStats{Timeout: Timeout}\n\tWorkDataJSON, err := json.Marshal(WorkData)\n\tif err != nil {\n\t\treturn &Work{}, errors.New(\"Failed to marshal work data:\" + err.Error())\n\t}\n\tNewWork.WorkJSON = string(WorkDataJSON)\n\treturn NewWork, nil\n}\n\nfunc (w Work) Marshal() string {\n\tMarshalledWork, _ := json.Marshal(w)\n\treturn string(MarshalledWork)\n}\n\nfunc Unmarshal(w string) *Work {\n\tWorkObject := &Work{}\n\t_ = json.Unmarshal([]byte(w), &WorkObject)\n\treturn WorkObject\n}\n<commit_msg>Divide imports by type<commit_after>package gowork\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\t\"github.com\/oleiade\/lane\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\ntype WorkServer struct {\n\tQueue *lane.Queue\n\tHandlers map[string]interface{}\n\tHandlerParams map[string]interface{}\n\tWorkers *WorkersStruct\n}\n\ntype WorkersStruct struct {\n\tMembers map[int]*Worker\n\tPresharedSecret string\n\tWorkerCount int\n}\n\ntype Worker struct {\n\tId int\n\tRegistered bool\n\tPresharedSecret string\n\tSessionAuthenticationKey string\n\tVerification *ClientTest\n}\n\ntype ClientTest struct {\n\tPlaintextVerification string `json:\"Verification\"`\n\tClientResponse string `json:\"Response\"`\n}\n\ntype Work struct {\n\tId bson.ObjectId `json:\"-\" bson:\"_id\"`\n\tIdHex string\n\tWorkJSON string\n\tResult *WorkResult\n\tTime *TimeStats\n}\n\ntype WorkResult struct {\n\tResultJSON string\n\tStatus string\n\tError string\n}\n\ntype TimeStats struct {\n\tAdded int64\n\tRecieved int64\n\tComplete int64\n\tTimeout int64\n}\n\ntype Event struct {\n\tWork *Work\n\tWorker *Worker\n\tError string\n\tTime int64\n}\n\nfunc NewEventError(msg string) *Event {\n\treturn &Event{Error: msg, Time: time.Now().UTC().Unix()}\n}\n\nfunc NewEventWork(w *Work) *Event {\n\treturn &Event{Work: w, Time: time.Now().UTC().Unix()}\n}\n\nfunc NewEventWorker(w *Worker) *Event {\n\treturn &Event{Worker: w, Time: time.Now().UTC().Unix()}\n}\n\nfunc NewServer(Secret string) (*WorkServer, error) {\n\tif len(Secret) != 32 {\n\t\treturn &WorkServer{}, errors.New(\"Secret must be 32 characters\")\n\t}\n\tQueue := lane.NewQueue()\n\tWorkerMembers := make(map[int]*Worker)\n\tWorkers := &WorkersStruct{WorkerMembers, Secret, 0}\n\tHandlerFuncs := make(map[string]interface{})\n\tHandlerParams := make(map[string]interface{})\n\tWorkServerInst := &WorkServer{Queue, HandlerFuncs, HandlerParams, Workers}\n\treturn WorkServerInst, nil\n}\n\nfunc (ws WorkServer) NewHandler(event_id string, hf func(*Event, map[string]interface{})) error {\n\tif _, exists := ws.Handlers[event_id]; exists {\n\t\tws.Event(\"add_handler_error\", NewEventError(\"HandlerExists\"))\n\t\treturn errors.New(\"Handler already exists\")\n\t}\n\tws.Handlers[event_id] = hf\n\treturn nil\n}\n\nfunc (ws WorkServer) AddParams(params map[string]interface{}) *WorkServer {\n\tws.HandlerParams = params\n\treturn &ws\n}\n\nfunc (ws WorkServer) Event(event_id string, event *Event) {\n\tif handlerFunc, exists := ws.Handlers[event_id]; exists {\n\t\thandlerFunc.(func(*Event, map[string]interface{}))(event, ws.HandlerParams)\n\t}\n}\n\nfunc (ws WorkServer) Add(w *Work) {\n\tw.Time.Added = time.Now().UTC().Unix()\n\tws.Event(\"add_work\", NewEventWork(w))\n\tws.Queue.Enqueue(w)\n}\n\nfunc (ws WorkServer) Get(Id string, AuthenticationKey string) (*Work, error) {\n\tIdInt, err := strconv.Atoi(Id)\n\tif err != nil {\n\t\tws.Event(\"get_work_error\", NewEventError(\"StrconvError\"))\n\t\treturn &Work{}, errors.New(\"Failed to convert Worker ID string to int:\" + err.Error())\n\t}\n\tif ws.Workers.Members[IdInt].SessionAuthenticationKey != AuthenticationKey {\n\t\tws.Event(\"get_work_error\", NewEventError(\"AuthFailed\"))\n\t\treturn &Work{}, errors.New(\"Failed authentication\")\n\t}\n\tWorkObj := ws.Queue.Dequeue()\n\tif WorkObj == nil {\n\t\tws.Event(\"get_work_empty\", NewEventError(\"NoWork\"))\n\t\treturn &Work{}, nil\n\t}\n\tif (WorkObj.(*Work).Time.Added + WorkObj.(*Work).Time.Timeout) > time.Now().UTC().Unix() {\n\t\tws.Event(\"get_work\", NewEventWork(WorkObj.(*Work)))\n\t\treturn WorkObj.(*Work), nil\n\t}\n\tws.Event(\"work_timeout\", NewEventWork(WorkObj.(*Work)))\n\treturn WorkObj.(*Work), errors.New(\"Work Timeout\")\n}\n\nfunc (ws WorkServer) Submit(w *Work) {\n\tif (w.Time.Added + w.Time.Timeout) <= time.Now().UTC().Unix() {\n\t\tw.Result.Error = \"Timeout\"\n\t\tw.Result.Status = \"Timeout\"\n\t\tws.Event(\"work_timeout\", NewEventWork(w))\n\t\treturn\n\t}\n\tw.Id = bson.ObjectIdHex(w.IdHex)\n\tws.Event(\"work_complete\", NewEventWork(w))\n}\n\nfunc (ws WorkServer) QueueSize() int {\n\treturn ws.Queue.Size()\n}\n\nfunc (wrs WorkersStruct) Register(ws *WorkServer) (string, string) {\n\tTempWC := wrs.WorkerCount\n\twrs.WorkerCount += 1\n\tw := &Worker{\n\t\tId: TempWC + 1,\n\t\tVerification: &ClientTest{PlaintextVerification: uuid.New()},\n\t}\n\twrs.Members[w.Id] = w\n\tws.Event(\"worker_register\", NewEventWorker(w))\n\treturn strconv.Itoa(w.Id), w.Verification.PlaintextVerification\n}\n\nfunc (wrs WorkersStruct) Verify(ws *WorkServer, Id string, Response string) (string, error) {\n\tIdInt, err := strconv.Atoi(Id)\n\tif err != nil {\n\t\tws.Event(\"worker_verify_error\", NewEventError(\"StrconvError\"))\n\t\treturn \"\", errors.New(\"Failed to convert Worker ID string to int:\" + err.Error())\n\t}\n\tClientResp, err := decrypt([]byte(wrs.PresharedSecret), []byte(Response))\n\tif err != nil {\n\t\tws.Event(\"worker_verify_error\", NewEventError(\"DecryptionError\"))\n\t\treturn \"\", errors.New(\"Failed to decrypt worker verification string:\" + err.Error())\n\t}\n\twrs.Members[IdInt].Verification.ClientResponse = string(ClientResp)\n\tif wrs.Members[IdInt].Verification.PlaintextVerification != string(wrs.Members[IdInt].Verification.ClientResponse) {\n\t\tws.Event(\"worker_verify_error\", NewEventError(\"KeyMismatch\"))\n\t\treturn \"\", errors.New(\"Client key incorrect\")\n\t}\n\twrs.Members[IdInt].Registered = true\n\twrs.Members[IdInt].SessionAuthenticationKey = uuid.New()\n\tws.Event(\"worker_verify\", NewEventWorker(wrs.Members[IdInt]))\n\treturn wrs.Members[IdInt].SessionAuthenticationKey, nil\n}\n\nfunc NewWorker(Secret string, ID string, PlaintextVerification string) (*Worker, error) {\n\twrk := &Worker{}\n\tif len(Secret) != 32 {\n\t\treturn wrk, errors.New(\"Secret must be 32 characters\")\n\t}\n\twrk.PresharedSecret = Secret\n\twrk.Verification = &ClientTest{PlaintextVerification: PlaintextVerification}\n\tIdInt, err := strconv.Atoi(ID)\n\tif err != nil {\n\t\treturn &Worker{}, errors.New(\"Failed to convert Worker ID string to int:\" + err.Error())\n\t}\n\twrk.Id = IdInt\n\tClientResponse, err := encrypt([]byte(wrk.PresharedSecret), []byte(wrk.Verification.PlaintextVerification))\n\tif err != nil {\n\t\treturn &Worker{}, errors.New(\"Failed to encrypt verification string:\" + err.Error())\n\t}\n\twrk.Verification.ClientResponse = string(ClientResponse)\n\treturn wrk, nil\n}\n\nfunc (wrk Worker) SetAuthenticationKey(key string) *Worker {\n\twrk.SessionAuthenticationKey = key\n\treturn &wrk\n}\n\nfunc (wrk Worker) Process(w *Work) (*Work, map[string]interface{}, error) {\n\tWorkParams := make(map[string]interface{})\n\tif (w.Time.Added + w.Time.Timeout) <= time.Now().UTC().Unix() {\n\t\treturn w, WorkParams, errors.New(\"Work Timeout\")\n\t}\n\terr := json.Unmarshal([]byte(w.WorkJSON), &WorkParams)\n\tif err != nil {\n\t\treturn w, WorkParams, errors.New(\"Failed to unmarshal Work Params JSON:\" + err.Error())\n\t}\n\tw.Time.Recieved = time.Now().UTC().Unix()\n\treturn w, WorkParams, nil\n}\n\nfunc (wrk Worker) Submit(w *Work, ResultJSON string, Error string) (*Work, error) {\n\twr := &WorkResult{}\n\twr.ResultJSON = ResultJSON\n\tw.Time.Complete = time.Now().UTC().Unix()\n\tif (w.Time.Added + w.Time.Timeout) > time.Now().UTC().Unix() {\n\t\twr.Error = Error\n\t\twr.Status = \"Complete\"\n\t\tw.Result = wr\n\t\treturn w, nil\n\t}\n\twr.Error = \"Timeout\"\n\twr.Status = \"Timeout\"\n\tw.Result = wr\n\treturn w, errors.New(\"Timeout\")\n}\n\nfunc CreateWork(WorkData interface{}, Timeout int64) (*Work, error) {\n\tNewWork := &Work{}\n\tNewWork.IdHex = bson.NewObjectId().Hex()\n\tNewWork.Result = &WorkResult{\"\", \"Pending\", \"\"}\n\tNewWork.Time = &TimeStats{Timeout: Timeout}\n\tWorkDataJSON, err := json.Marshal(WorkData)\n\tif err != nil {\n\t\treturn &Work{}, errors.New(\"Failed to marshal work data:\" + err.Error())\n\t}\n\tNewWork.WorkJSON = string(WorkDataJSON)\n\treturn NewWork, nil\n}\n\nfunc (w Work) Marshal() string {\n\tMarshalledWork, _ := json.Marshal(w)\n\treturn string(MarshalledWork)\n}\n\nfunc Unmarshal(w string) *Work {\n\tWorkObject := &Work{}\n\t_ = json.Unmarshal([]byte(w), &WorkObject)\n\treturn WorkObject\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n\t\"net\/http\"\n\t\"bufio\"\n\t\"log\"\n\t\"encoding\/json\"\n\t\"golang.org\/x\/build\/kubernetes\/api\"\n\t\"fmt\"\n\t\"crypto\/tls\"\n\t\"time\"\n)\n\ntype Stream struct {\n\tType string `json:\"type,omitempty\"`\n\tEvent api.Event `json:\"object\"`\n}\n\nfunc main() {\n\tapiAddr := os.Getenv(\"OPENSHIFT_API_URL\")\n\tapiToken := os.Getenv(\"OPENSHIFT_TOKEN\")\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\tclient := &http.Client{Transport: tr}\n\treq, err := http.NewRequest(\"GET\", apiAddr + \"\/api\/v1\/events?watch=true\", nil)\n\tif (err != nil) {\n\t\tlog.Fatal(\"Error while opening connection\", err)\n\t}\n\treq.Header.Add(\"Authorization\", \"Bearer \" + apiToken)\n\tresp, err := client.Do(req)\n\n\tif (err != nil) {\n\t\tlog.Fatal(\"Error while connecting to:\", apiAddr, err)\n\t}\n\n\treader := bufio.NewReader(resp.Body)\n\tfor {\n\t\tline, err := reader.ReadBytes('\\n')\n\t\tif (err != nil) {\n\t\t\tlog.Fatal(\"Error reading from response stream.\", err)\n\t\t}\n\n\t\tevent := Stream{}\n\t\tdecErr := json.Unmarshal(line, &event)\n\t\tif (decErr != nil) {\n\t\t\tlog.Fatal(\"Error decoding json\", err)\n\t\t}\n\n\t\tfmt.Printf(\"%v | Project: %v | Name: %v | Kind: %v | Reason: %v | Message: %v\\n\",\n\t\t\tevent.Event.LastTimestamp,\n\t\t\tevent.Event.Namespace, event.Event.Name,\n\t\t\tevent.Event.Kind, event.Event.Reason, event.Event.Message)\n\t}\n}<commit_msg>Time in different format<commit_after>package main\n\nimport (\n\t\"os\"\n\t\"net\/http\"\n\t\"bufio\"\n\t\"log\"\n\t\"encoding\/json\"\n\t\"golang.org\/x\/build\/kubernetes\/api\"\n\t\"fmt\"\n\t\"crypto\/tls\"\n)\n\ntype Stream struct {\n\tType string `json:\"type,omitempty\"`\n\tEvent api.Event `json:\"object\"`\n}\n\nfunc main() {\n\tapiAddr := os.Getenv(\"OPENSHIFT_API_URL\")\n\tapiToken := os.Getenv(\"OPENSHIFT_TOKEN\")\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\tclient := &http.Client{Transport: tr}\n\treq, err := http.NewRequest(\"GET\", apiAddr + \"\/api\/v1\/events?watch=true\", nil)\n\tif (err != nil) {\n\t\tlog.Fatal(\"Error while opening connection\", err)\n\t}\n\treq.Header.Add(\"Authorization\", \"Bearer \" + apiToken)\n\tresp, err := client.Do(req)\n\n\tif (err != nil) {\n\t\tlog.Fatal(\"Error while connecting to:\", apiAddr, err)\n\t}\n\n\treader := bufio.NewReader(resp.Body)\n\tfor {\n\t\tline, err := reader.ReadBytes('\\n')\n\t\tif (err != nil) {\n\t\t\tlog.Fatal(\"Error reading from response stream.\", err)\n\t\t}\n\n\t\tevent := Stream{}\n\t\tdecErr := json.Unmarshal(line, &event)\n\t\tif (decErr != nil) {\n\t\t\tlog.Fatal(\"Error decoding json\", err)\n\t\t}\n\n\t\tfmt.Printf(\"%v | Project: %v | Name: %v | Kind: %v | Reason: %v | Message: %v\\n\",\n\t\t\tevent.Event.LastTimestamp,\n\t\t\tevent.Event.Namespace, event.Event.Name,\n\t\t\tevent.Event.Kind, event.Event.Reason, event.Event.Message)\n\t}\n}<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"crypto\/sha256\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/kr\/binarydist\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\/\/\"runtime\"\n\t\/\/\"encoding\/base64\"\n)\n\nvar plat, appPath, version, genDir string\n\ntype current struct {\n\tVersion string\n\tSha256 []byte\n}\n\nfunc generateSha256(path string) []byte {\n\th := sha256.New()\n\tb, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\th.Write(b)\n\tsum := h.Sum(nil)\n\treturn sum\n\t\/\/return base64.URLEncoding.EncodeToString(sum)\n}\n\ntype gzReader struct {\n\tz, r io.ReadCloser\n}\n\nfunc (g *gzReader) Read(p []byte) (int, error) {\n\treturn g.z.Read(p)\n}\n\nfunc (g *gzReader) Close() error {\n\tg.z.Close()\n\treturn g.r.Close()\n}\n\nfunc newGzReader(r io.ReadCloser) io.ReadCloser {\n\tvar err error\n\tg := new(gzReader)\n\tg.r = r\n\tg.z, err = gzip.NewReader(r)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn g\n}\n\nfunc createUpdate(path string, platform string) {\n\tc := current{Version: version, Sha256: generateSha256(path)}\n\n\tb, err := json.MarshalIndent(c, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Println(\"error:\", err)\n\t}\n\terr = ioutil.WriteFile(filepath.Join(genDir, platform+\".json\"), b, 0755)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tos.MkdirAll(filepath.Join(genDir, version), 0755)\n\n\tvar buf bytes.Buffer\n\tw := gzip.NewWriter(&buf)\n\tf, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tw.Write(f)\n\tw.Close() \/\/ You must close this first to flush the bytes to the buffer.\n\terr = ioutil.WriteFile(filepath.Join(genDir, version, platform+\".gz\"), buf.Bytes(), 0755)\n\n\tfiles, err := ioutil.ReadDir(genDir)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tfor _, file := range files {\n\t\tif file.IsDir() == false {\n\t\t\tcontinue\n\t\t}\n\t\tif file.Name() == version {\n\t\t\tcontinue\n\t\t}\n\n\t\tos.Mkdir(filepath.Join(genDir, file.Name(), version), 0755)\n\n\t\tfName := filepath.Join(genDir, file.Name(), platform+\".gz\")\n\t\told, err := os.Open(fName)\n\t\tif err != nil {\n\t\t\t\/\/ Don't have an old release for this os\/arch, continue on\n\t\t\tcontinue\n\t\t}\n\n\t\tfName = filepath.Join(genDir, version, platform+\".gz\")\n\t\tnewF, err := os.Open(fName)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Can't open %s: error: %s\\n\", fName, err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tar := newGzReader(old)\n\t\tdefer ar.Close()\n\t\tbr := newGzReader(newF)\n\t\tdefer br.Close()\n\t\tpatch := new(bytes.Buffer)\n\t\tif err := binarydist.Diff(ar, br, patch); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tioutil.WriteFile(filepath.Join(genDir, file.Name(), version, platform), patch.Bytes(), 0755)\n\t}\n}\n\nfunc printUsage() {\n fmt.Println(\"Go-Selfupdate - Enable your Golang applications to self update.\\n\\n\")\n fmt.Println(\"Usage:\\n\")\n fmt.Println(\"\\tSingle platform: go-selfupdate myapp 1.2\")\n fmt.Println(\"\\tCross platform: go-selfupdate \/tmp\/mybinares\/ 1.2\")\n}\n\nfunc isArgsPresent() bool {\n if len(os.Args) < 2 {\n return false\n }\n\n return true\n}\n\nfunc createBuildDir() {\n\tos.MkdirAll(genDir, 0755)\n}\n\nfunc main() {\n if isArgsPresent() == false {\n printUsage()\n os.Exit(0)\n }\n\n\tplat = os.Getenv(\"GOOS\") + \"-\" + os.Getenv(\"GOARCH\")\n\tappPath = os.Args[1]\n\tversion = os.Args[2]\n\tgenDir = \"public\"\n\n createBuildDir()\n\n\t\/\/ If dir is given create update for each file\n\tfiles, err := ioutil.ReadDir(appPath)\n\tif err == nil {\n\t\tfor _, file := range files {\n\t\t\tcreateUpdate(filepath.Join(appPath, file.Name()), file.Name())\n\t\t}\n\t\tos.Exit(0)\n\t}\n\n\tcreateUpdate(appPath, plat)\n}\n<commit_msg>gofmt<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"crypto\/sha256\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/kr\/binarydist\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\/\/\"runtime\"\n\t\/\/\"encoding\/base64\"\n)\n\nvar plat, appPath, version, genDir string\n\ntype current struct {\n\tVersion string\n\tSha256 []byte\n}\n\nfunc generateSha256(path string) []byte {\n\th := sha256.New()\n\tb, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\th.Write(b)\n\tsum := h.Sum(nil)\n\treturn sum\n\t\/\/return base64.URLEncoding.EncodeToString(sum)\n}\n\ntype gzReader struct {\n\tz, r io.ReadCloser\n}\n\nfunc (g *gzReader) Read(p []byte) (int, error) {\n\treturn g.z.Read(p)\n}\n\nfunc (g *gzReader) Close() error {\n\tg.z.Close()\n\treturn g.r.Close()\n}\n\nfunc newGzReader(r io.ReadCloser) io.ReadCloser {\n\tvar err error\n\tg := new(gzReader)\n\tg.r = r\n\tg.z, err = gzip.NewReader(r)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn g\n}\n\nfunc createUpdate(path string, platform string) {\n\tc := current{Version: version, Sha256: generateSha256(path)}\n\n\tb, err := json.MarshalIndent(c, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Println(\"error:\", err)\n\t}\n\terr = ioutil.WriteFile(filepath.Join(genDir, platform+\".json\"), b, 0755)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tos.MkdirAll(filepath.Join(genDir, version), 0755)\n\n\tvar buf bytes.Buffer\n\tw := gzip.NewWriter(&buf)\n\tf, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tw.Write(f)\n\tw.Close() \/\/ You must close this first to flush the bytes to the buffer.\n\terr = ioutil.WriteFile(filepath.Join(genDir, version, platform+\".gz\"), buf.Bytes(), 0755)\n\n\tfiles, err := ioutil.ReadDir(genDir)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tfor _, file := range files {\n\t\tif file.IsDir() == false {\n\t\t\tcontinue\n\t\t}\n\t\tif file.Name() == version {\n\t\t\tcontinue\n\t\t}\n\n\t\tos.Mkdir(filepath.Join(genDir, file.Name(), version), 0755)\n\n\t\tfName := filepath.Join(genDir, file.Name(), platform+\".gz\")\n\t\told, err := os.Open(fName)\n\t\tif err != nil {\n\t\t\t\/\/ Don't have an old release for this os\/arch, continue on\n\t\t\tcontinue\n\t\t}\n\n\t\tfName = filepath.Join(genDir, version, platform+\".gz\")\n\t\tnewF, err := os.Open(fName)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Can't open %s: error: %s\\n\", fName, err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tar := newGzReader(old)\n\t\tdefer ar.Close()\n\t\tbr := newGzReader(newF)\n\t\tdefer br.Close()\n\t\tpatch := new(bytes.Buffer)\n\t\tif err := binarydist.Diff(ar, br, patch); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tioutil.WriteFile(filepath.Join(genDir, file.Name(), version, platform), patch.Bytes(), 0755)\n\t}\n}\n\nfunc printUsage() {\n\tfmt.Println(\"Go-Selfupdate - Enable your Golang applications to self update.\\n\\n\")\n\tfmt.Println(\"Usage:\\n\")\n\tfmt.Println(\"\\tSingle platform: go-selfupdate myapp 1.2\")\n\tfmt.Println(\"\\tCross platform: go-selfupdate \/tmp\/mybinares\/ 1.2\")\n}\n\nfunc isArgsPresent() bool {\n\tif len(os.Args) < 2 {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc createBuildDir() {\n\tos.MkdirAll(genDir, 0755)\n}\n\nfunc main() {\n\tif isArgsPresent() == false {\n\t\tprintUsage()\n\t\tos.Exit(0)\n\t}\n\n\tplat = os.Getenv(\"GOOS\") + \"-\" + os.Getenv(\"GOARCH\")\n\tappPath = os.Args[1]\n\tversion = os.Args[2]\n\tgenDir = \"public\"\n\n\tcreateBuildDir()\n\n\t\/\/ If dir is given create update for each file\n\tfiles, err := ioutil.ReadDir(appPath)\n\tif err == nil {\n\t\tfor _, file := range files {\n\t\t\tcreateUpdate(filepath.Join(appPath, file.Name()), file.Name())\n\t\t}\n\t\tos.Exit(0)\n\t}\n\n\tcreateUpdate(appPath, plat)\n}\n<|endoftext|>"} {"text":"<commit_before>package linenotcat\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/jessevdk\/go-flags\"\n)\n\nvar (\n\tver string\n\trev string\n)\n\ntype opts struct {\n\tMessage string `short:\"m\" long:\"message\" default:\"\" description:\"Send a text message directly\"`\n\tImageFile string `short:\"i\" long:\"image\" default:\"\" description:\"Send an image file\"`\n\tTee bool `short:\"t\" long:\"tee\" default:\"false\" description:\"Print STDIN to screen before posting\"`\n\tStream bool `short:\"s\" long:\"stream\" default:\"false\" description:\"Post messages to LINE Notify continuously\"`\n\tConfigFile string `long:\"config_file\" default:\"\" description:\"Load the specified configuration file\"`\n\tStatus bool `long:\"status\" default:\"false\" description:\"Show connection status that belongs to the token\"`\n}\n\nfunc parseArgs(args []string) (opt *opts, remainArgs []string) {\n\to := &opts{}\n\tp := flags.NewParser(o, flags.Default)\n\tp.Usage = fmt.Sprintf(\"\\n\\nVersion: %s\\nRevision: %s\", ver, rev)\n\tremainArgs, err := p.ParseArgs(args)\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\treturn o, remainArgs\n}\n\nfunc Run(args []string) {\n\to, remainArgs := parseArgs(args)\n\n\tvar token string\n\tvar err error\n\tif o.ConfigFile == \"\" {\n\t\ttoken, err = readDefaultToken()\n\t} else {\n\t\ttoken, err = readToken(o.ConfigFile)\n\t}\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tarb := &apiRequestBuilder{token: token}\n\n\tif o.Status {\n\t\tstatus := &status{apiRequestBuilder: arb}\n\t\terr := status.getStatus()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn\n\t}\n\n\tln := &lineNotifier{\n\t\tapiRequestBuilder: arb,\n\t}\n\n\tif o.ImageFile != \"\" {\n\t\twarnIfStreamMode(o)\n\t\twarnIfArgumentRemained(remainArgs)\n\n\t\tmsg := o.Message\n\t\tif msg == \"\" {\n\t\t\tmsg = \"Image file\"\n\t\t}\n\t\terr := ln.notifyImage(o.ImageFile, msg, o.Tee)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn\n\t}\n\n\tif o.Message != \"\" {\n\t\t\/\/ Send text message directly\n\t\twarnIfStreamMode(o)\n\t\tln.notifyMessage(o.Message, o.Tee)\n\t\treturn\n\t}\n\n\tif o.Stream {\n\t\t\/\/ Stream mode\n\t\twarnIfArgumentRemained(remainArgs)\n\n\t\ts := newStream(ln)\n\t\tgo s.processStreamQueue(o.Tee)\n\t\tgo s.watchStdin()\n\t\tgo s.trap()\n\t\tselect {}\n\n\t\treturn\n\t}\n\n\tif len(remainArgs) > 0 {\n\t\t\/\/ Send file contents\n\t\twarnIfStreamMode(o)\n\t\tln.notifyFile(remainArgs[0], o.Tee)\n\t\treturn\n\t}\n\n\t\/\/ Send messages from STDIN\n\tlines := make(chan string)\n\tgo readFromStdin(lines)\n\n\ttmpFilePath, err := writeTemp(lines)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer os.Remove(tmpFilePath)\n\tln.notifyFile(tmpFilePath, o.Tee)\n}\n\nfunc warnIfStreamMode(o *opts) {\n\tif o.Stream {\n\t\tfmt.Println(\"Given stream option, but it is ignored when image sending mode\")\n\t}\n}\n\nfunc warnIfArgumentRemained(remainArgs []string) {\n\tif len(remainArgs) > 0 {\n\t\tfmt.Println(\"Given file, but it is ignored when stream mode\")\n\t}\n}\n<commit_msg>More friendly message<commit_after>package linenotcat\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/jessevdk\/go-flags\"\n)\n\nvar (\n\tver string\n\trev string\n)\n\ntype opts struct {\n\tMessage string `short:\"m\" long:\"message\" default:\"\" description:\"Send a text message directly\"`\n\tImageFile string `short:\"i\" long:\"image\" default:\"\" description:\"Send an image file\"`\n\tTee bool `short:\"t\" long:\"tee\" default:\"false\" description:\"Print STDIN to screen before posting\"`\n\tStream bool `short:\"s\" long:\"stream\" default:\"false\" description:\"Post messages to LINE Notify continuously\"`\n\tConfigFile string `long:\"config_file\" default:\"\" description:\"Load the specified configuration file\"`\n\tStatus bool `long:\"status\" default:\"false\" description:\"Show connection status that belongs to the token\"`\n}\n\nfunc parseArgs(args []string) (opt *opts, remainArgs []string) {\n\to := &opts{}\n\tp := flags.NewParser(o, flags.Default)\n\tp.Usage = fmt.Sprintf(\"\\n\\nVersion: %s\\nRevision: %s\", ver, rev)\n\tremainArgs, err := p.ParseArgs(args)\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\treturn o, remainArgs\n}\n\nfunc Run(args []string) {\n\to, remainArgs := parseArgs(args)\n\n\tvar token string\n\tvar err error\n\tif o.ConfigFile == \"\" {\n\t\ttoken, err = readDefaultToken()\n\t} else {\n\t\ttoken, err = readToken(o.ConfigFile)\n\t}\n\n\tif err != nil {\n\t\tfmt.Println(`[ERROR] Failed to load configuration file.\nIs configuration file perhaps missing?\nPlease try:\n\t$ echo 'YOUR_ACCESS_TOKEN' > $HOME\/.linenotcat`)\n\t\tos.Exit(1)\n\t}\n\n\tarb := &apiRequestBuilder{token: token}\n\n\tif o.Status {\n\t\tstatus := &status{apiRequestBuilder: arb}\n\t\terr := status.getStatus()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn\n\t}\n\n\tln := &lineNotifier{\n\t\tapiRequestBuilder: arb,\n\t}\n\n\tif o.ImageFile != \"\" {\n\t\twarnIfStreamMode(o)\n\t\twarnIfArgumentRemained(remainArgs)\n\n\t\tmsg := o.Message\n\t\tif msg == \"\" {\n\t\t\tmsg = \"Image file\"\n\t\t}\n\t\terr := ln.notifyImage(o.ImageFile, msg, o.Tee)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn\n\t}\n\n\tif o.Message != \"\" {\n\t\t\/\/ Send text message directly\n\t\twarnIfStreamMode(o)\n\t\tln.notifyMessage(o.Message, o.Tee)\n\t\treturn\n\t}\n\n\tif o.Stream {\n\t\t\/\/ Stream mode\n\t\twarnIfArgumentRemained(remainArgs)\n\n\t\ts := newStream(ln)\n\t\tgo s.processStreamQueue(o.Tee)\n\t\tgo s.watchStdin()\n\t\tgo s.trap()\n\t\tselect {}\n\n\t\treturn\n\t}\n\n\tif len(remainArgs) > 0 {\n\t\t\/\/ Send file contents\n\t\twarnIfStreamMode(o)\n\t\tln.notifyFile(remainArgs[0], o.Tee)\n\t\treturn\n\t}\n\n\t\/\/ Send messages from STDIN\n\tlines := make(chan string)\n\tgo readFromStdin(lines)\n\n\ttmpFilePath, err := writeTemp(lines)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer os.Remove(tmpFilePath)\n\tln.notifyFile(tmpFilePath, o.Tee)\n}\n\nfunc warnIfStreamMode(o *opts) {\n\tif o.Stream {\n\t\tfmt.Println(\"Given stream option, but it is ignored when image sending mode\")\n\t}\n}\n\nfunc warnIfArgumentRemained(remainArgs []string) {\n\tif len(remainArgs) > 0 {\n\t\tfmt.Println(\"Given file, but it is ignored when stream mode\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Let's add this docstring thingy\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/THUNDERGROOVE\/SDETool\/scripting\/langs\"\n\t\"github.com\/THUNDERGROOVE\/SDETool\/sde\"\n\t\"github.com\/THUNDERGROOVE\/SDETool\/sde\/version\"\n\t\"github.com\/THUNDERGROOVE\/SDETool\/util\"\n\t_ \"github.com\/THUNDERGROOVE\/SDETool\/util\/log\"\n\t\/\/ Langs\n\t_ \"github.com\/THUNDERGROOVE\/SDETool\/scripting\/lua\"\n)\n\nvar (\n\tbranch string\n\ttagVersion string\n\tcommit string\n)\n\nfunc loadSDE() *sde.SDE {\n\tSDE, err := version.LoadLatest()\n\n\tif err != nil {\n\t\tfmt.Printf(\"[ERROR] %v\\n\", err.Error())\n\t\treturn nil\n\t}\n\n\tif SDE == nil {\n\t\tfmt.Printf(\"Failed to automatically load an SDE file. Please load it manually\\n\")\n\t\treturn nil\n\t}\n\treturn SDE\n}\n\nfunc main() {\n\t\/\/ If the first argument is a script then run it instead of parsing things\n\tif len(os.Args) > 1 {\n\t\tn := os.Args[1]\n\t\tif strings.Contains(n, \".\") {\n\t\t\tif util.Exists(n) {\n\t\t\t\text := filepath.Ext(n)[1:]\n\t\t\t\tif err := langs.RunScript(ext, n); err != nil {\n\t\t\t\t\tfmt.Println(\"Error running script\", err.Error())\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(os.Args) <= 1 {\n\t\tprintNoArgsText()\n\t\treturn\n\t}\n\n\tvar SDE *sde.SDE\n\tvar Type *sde.SDEType\n\tvar t []*sde.SDEType\n\tvar MultiTypes bool\n\n\tswitch os.Args[1] {\n\tcase \"lookup\":\n\t\tSDE = loadSDE()\n\n\t\tif err := lookupFlagset.Parse(os.Args[2:]); err != nil {\n\t\t\tfmt.Printf(\"[ERROR] Couldn't parse args [%v]\\n\", err.Error())\n\t\t}\n\n\t\tvar err error\n\n\t\tswitch {\n\t\tcase *lookupSDE != \"\":\n\t\t\tSDE, err = sde.Load(*lookupSDE)\n\t\t\tfallthrough\n\n\t\tcase *lookupTID != 0:\n\t\t\tType, err = SDE.GetType(*lookupTID)\n\n\t\tcase *lookupTN != \"\":\n\t\t\tt, err = SDE.Search(*lookupTN)\n\t\t\tif len(t) != 0 {\n\t\t\t\tType = t[0]\n\t\t\t}\n\n\t\tcase *lookupTD != \"\":\n\t\t\tt, err = SDE.Search(*lookupTD)\n\t\t\tif len(t) != 0 {\n\t\t\t\tType = t[0]\n\t\t\t}\n\t\t}\n\n\t\tif Type != nil && *lookupAttr {\n\t\t\tfor k, v := range Type.Attributes {\n\t\t\t\tfmt.Printf(\" %v | %v\\n\", k, v)\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"[ERROR] %v\\n\", err.Error())\n\t\t}\n\tcase \"search\":\n\t\tif err := searchFlagset.Parse(os.Args[2:]); err != nil {\n\t\t\tfmt.Printf(\"[ERROR] Couldn't parse args[%v]\\n\", err.Error())\n\t\t}\n\n\t\tif *searchName == \"\" {\n\t\t\tfmt.Printf(\"search should be supplied a -t flag\\n\")\n\t\t\treturn\n\t\t}\n\n\t\tSDE = loadSDE()\n\t\tvar err error\n\n\t\tswitch {\n\t\tcase *searchSDE != \"\":\n\t\t\tSDE, err = sde.Load(*searchSDE)\n\t\t\tfallthrough\n\n\t\tcase *searchName != \"\":\n\t\t\tt, err = SDE.Search(*searchName)\n\n\t\t\tif len(t) != 0 {\n\t\t\t\tType = t[0]\n\t\t\t}\n\n\t\t\tMultiTypes = true\n\t\t\tfallthrough\n\n\t\tcase *searchAttr == true:\n\t\t\tif len(t) == 1 {\n\t\t\t\tfor k, v := range Type.Attributes {\n\t\t\t\t\tfmt.Printf(\" %v| %v\\n\", k, v)\n\t\t\t\t}\n\t\t\t\tMultiTypes = false\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"[ERROR], %v\\n\", err.Error())\n\t\t\t}\n\t\t}\n\n\tcase \"dump\":\n\t\tif err := dumperFlagset.Parse(os.Args[2:]); err != nil {\n\t\t\tfmt.Printf(\"[ERROR] Couldn't parse args[%v]\\n\", err.Error())\n\t\t}\n\n\t\t\/\/ @TODO: Move this to it's own function\n\t\t\/\/ Is there a better way to do this?\n\t\tcmd := exec.Command(\"sdedumper\",\n\t\t\t\"-i\", fmt.Sprintf(\"%s\", *dumperInFile),\n\t\t\t\"-o\", fmt.Sprintf(\"%s\", *dumperOutFile),\n\t\t\t\"-ver\", fmt.Sprintf(\"%s\", *dumperVersionString),\n\t\t\t\"-official\", fmt.Sprintf(\"%t\", *dumperOfficial),\n\t\t\t\"-v\", fmt.Sprintf(\"%t\", *dumperVerbose))\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\n\t\terr := cmd.Run()\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tif strings.Contains(err.Error(), exec.ErrNotFound.Error()) {\n\t\t\tfmt.Printf(\"You do not have sdedumper installed. Please install\\n\")\n\t\t} else {\n\t\t\tlog.Printf(\"Error running sdedumper [%s]\", err.Error())\n\t\t}\n\n\tcase \"help\":\n\t\tfmt.Printf(HelpText)\n\n\tdefault:\n\t\tprintNoArgsText()\n\t}\n\n\tif Type != nil && !MultiTypes {\n\t\tfmt.Printf(\"%v | %v | %v\\n\", Type.TypeID, Type.TypeName, Type.GetName())\n\t} else if MultiTypes {\n\t\tfor _, v := range t {\n\t\t\tfmt.Printf(\"%v | %v | %v\\n\", v.TypeID, v.TypeName, v.GetName())\n\t\t}\n\t} else if SDE != nil {\n\t\tfmt.Printf(\"No type resolved\\n\")\n\t}\n}\n\nfunc printNoArgsText() {\n\tfmt.Println(\"SDETool written by Nick Powell; @THUNDERGROOVE\")\n\tfmt.Printf(\"Version %v@%v#%v\\n\", tagVersion, branch, commit)\n\tfmt.Println(\"Do 'help' for help\")\n}\n<commit_msg>moving over to cli. Much better to write with than using flags<commit_after>\/\/ Let's add this docstring thingy\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n\n\t\"github.com\/THUNDERGROOVE\/SDETool\/scripting\/langs\"\n\t\"github.com\/THUNDERGROOVE\/SDETool\/sde\"\n\t\"github.com\/THUNDERGROOVE\/SDETool\/sde\/version\"\n\t\"github.com\/THUNDERGROOVE\/SDETool\/util\"\n\t_ \"github.com\/THUNDERGROOVE\/SDETool\/util\/log\"\n\t\/\/ Langs\n\t_ \"github.com\/THUNDERGROOVE\/SDETool\/scripting\/lua\"\n\t\"github.com\/d4l3k\/messagediff\"\n)\n\nvar (\n\tbranch string\n\ttagVersion string\n\tcommit string\n)\n\n\/\/ Matches a TypeID for DUST\nvar TypeIDRegex = regexp.MustCompile(`3\\d\\d\\d\\d\\d`)\n\n\/*func dumpSDE() {\n\tif err := dumperFlagset.Parse(os.Args[2:]); err != nil {\n\t\tfmt.Printf(\"[ERROR] Couldn't parse args[%v]\\n\", err.Error())\n\t}\n\n\t\/\/ @TODO: Move this to it's own function\n\t\/\/ Is there a better way to do this?\n\tcmd := exec.Command(\"sdedumper\",\n\t\t\"-i\", fmt.Sprintf(\"%s\", *dumperInFile),\n\t\t\"-o\", fmt.Sprintf(\"%s\", *dumperOutFile),\n\t\t\"-ver\", fmt.Sprintf(\"%s\", *dumperVersionString),\n\t\t\"-official\", fmt.Sprintf(\"%t\", *dumperOfficial),\n\t\t\"-v\", fmt.Sprintf(\"%t\", *dumperVerbose))\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\terr := cmd.Run()\n\tif err == nil {\n\t\tbreak\n\t}\n\tif strings.Contains(err.Error(), exec.ErrNotFound.Error()) {\n\t\tfmt.Printf(\"You do not have sdedumper installed. Please install\\n\")\n\t} else {\n\t\tlog.Printf(\"Error running sdedumper [%s]\", err.Error())\n\t}\n}*\/\n\nfunc loadSDE(filename string) *sde.SDE {\n\tif filename == \"\" {\n\t\tSDE, err := version.LoadLatest()\n\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"[ERROR] %v\\n\", err.Error())\n\t\t\treturn nil\n\t\t}\n\n\t\tif SDE == nil {\n\t\t\tfmt.Printf(\"Failed to automatically load an SDE file. Please load it manually\\n\")\n\t\t\treturn nil\n\t\t}\n\t\treturn SDE\n\t} else {\n\t\tSDE, err := sde.Load(filename)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"[ERROR] %v\\n\", err.Error())\n\t\t\treturn nil\n\t\t}\n\t\treturn SDE\n\t}\n\treturn nil\n}\n\nfunc main() {\n\t\/\/ If the first argument is a script then run it instead of parsing things\n\tif len(os.Args) > 1 {\n\t\tn := os.Args[1]\n\t\tif strings.Contains(n, \".\") {\n\t\t\tif util.Exists(n) {\n\t\t\t\text := filepath.Ext(n)[1:]\n\t\t\t\tif err := langs.RunScript(ext, n); err != nil {\n\t\t\t\t\tfmt.Println(\"Error running script\", err.Error())\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(os.Args) <= 1 {\n\t\tprintNoArgsText()\n\t\treturn\n\t}\n\n\tcmd := cli.NewApp()\n\tcmd.Name = \"SDETool\"\n\tcmd.Author = \"Nick Powell; @THUNDERGROOVE\"\n\tcmd.Version = fmt.Sprintf(\"%v-%v-%v\", branch, tagVersion, commit)\n\tcmd.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"sdefile, s\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"\",\n\t\t},\n\t}\n\tcmd.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"search\",\n\t\t\tAliases: []string{\"s\"},\n\t\t\tUsage: \"search the sde!\",\n\t\t\tAction: _cliSearchSDE,\n\t\t},\n\t\t{\n\t\t\tName: \"lookup\",\n\t\t\tAliases: []string{\"l\"},\n\t\t\tUsage: \"Lookup a specific type\",\n\t\t\tAction: _cliLookupSDE,\n\t\t},\n\t\t{\n\t\t\tName: \"diff\",\n\t\t\tAliases: []string{\"d\"},\n\t\t\tUsage: \"diff two types\",\n\t\t\tAction: _cliDiff,\n\t\t},\n\t}\n\tcmd.Run(os.Args)\n}\n\nfunc _cliDiff(c *cli.Context) {\n\tfile := c.GlobalString(\"sdefile\")\n\tSDE := loadSDE(file)\n\n\tt1 := c.Args().Get(0)\n\tt2 := c.Args().Get(1)\n\tif t1 == t2 {\n\t\tfmt.Printf(\"Why are you trying to compare two of the same types?\\n\")\n\t}\n\n\tif !TypeIDRegex.Match([]byte(t1)) || !TypeIDRegex.Match([]byte(t2)) {\n\t\tfmt.Printf(\"One of the given typeIDs were not valid\\n\")\n\t}\n\n\ttid1, _ := strconv.Atoi(t1)\n\ttid2, _ := strconv.Atoi(t2)\n\n\ttt1, err := SDE.GetType(tid1)\n\tif err != nil {\n\t\tfmt.Printf(\"[ERROR] failed to get first type: %v\\n\", err.Error())\n\t}\n\n\ttt2, err := SDE.GetType(tid2)\n\tif err != nil {\n\t\tfmt.Printf(\"[ERROR] failed to get second type: %v\\n\", err.Error())\n\t}\n\n\tdiff, equal := messagediff.PrettyDiff(tt1, tt2)\n\tfmt.Printf(\"Equal?: %v\\n\", equal)\n\tfmt.Printf(\"\\n\\n%v\\n\\n\", diff)\n}\n\nfunc _cliLookupSDE(c *cli.Context) {\n\tfile := c.GlobalString(\"sdefile\")\n\tSDE := loadSDE(file)\n\n\ts := c.Args().First()\n\tif !TypeIDRegex.Match([]byte(s)) {\n\t\tfmt.Printf(\"Not recognized as a typeID doing a search instead and using the first result\\n\")\n\n\t\tres, err := SDE.Search(s)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"[ERROR] failed getting type: %v\\n\", err.Error())\n\t\t\treturn\n\t\t}\n\t\tt := res[0]\n\t\tfmt.Printf(\"[ %v | %v | %v ]\\n\", t.TypeID, t.TypeName, t.GetName())\n\t\treturn\n\t}\n\tid, _ := strconv.Atoi(s)\n\tt, err := SDE.GetType(id)\n\tif err != nil {\n\t\tfmt.Printf(\"[ERROR] failed getting type: %v\\n\", err.Error())\n\t\treturn\n\t}\n\tfmt.Printf(\"[ %v | %v | %v ]\\n\", t.TypeID, t.TypeName, t.GetName())\n}\n\nfunc _cliSearchSDE(c *cli.Context) {\n\tfile := c.GlobalString(\"sdefile\")\n\tSDE := loadSDE(file)\n\n\ts := c.Args().First()\n\tvalues, err := SDE.Search(s)\n\tif err != nil {\n\t\tfmt.Printf(\"[ERROR] Failed to search: %v\\n\", err.Error())\n\t}\n\n\tfor _, v := range values {\n\t\tfmt.Printf(\"[ %v | %v | %v ]\\n\", v.TypeID, v.TypeName, v.GetName())\n\t}\n}\n\nfunc printNoArgsText() {\n\tfmt.Println(\"SDETool written by Nick Powell; @THUNDERGROOVE\")\n\tfmt.Printf(\"Version %v@%v#%v\\n\", tagVersion, branch, commit)\n\tfmt.Println(\"Do 'help' for help\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n *\n * k6 - a next-generation load testing tool\n * Copyright (C) 2016 Load Impact\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as\n * published by the Free Software Foundation, either version 3 of the\n * License, or (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n *\/\n\npackage main\n\nimport (\n\t\"os\"\n\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/mattn\/go-isatty\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"gopkg.in\/urfave\/cli.v1\"\n)\n\nvar isTTY = isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd())\n\nfunc main() {\n\t\/\/ This won't be needed in cli v2\n\tcli.VersionFlag.Name = \"version\"\n\tcli.HelpFlag.Name = \"help\"\n\tcli.HelpFlag.Hidden = true\n\n\tapp := cli.NewApp()\n\tapp.Name = \"k6\"\n\tapp.Usage = \"a next generation load generator\"\n\tapp.Version = \"0.16.0\"\n\tapp.Commands = []cli.Command{\n\t\tcommandRun,\n\t\tcommandInspect,\n\t\tcommandArchive,\n\t\tcommandLogin,\n\t\tcommandStatus,\n\t\tcommandStats,\n\t\tcommandScale,\n\t\tcommandPause,\n\t\tcommandResume,\n\t}\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"verbose, v\",\n\t\t\tUsage: \"show debug messages\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"address, a\",\n\t\t\tUsage: \"address for the API\",\n\t\t\tValue: \"127.0.0.1:6565\",\n\t\t\tEnvVar: \"K6_ADDRESS\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"no-color, n\",\n\t\t\tUsage: \"disable colored output\",\n\t\t\tEnvVar: \"K6_NO_COLOR\",\n\t\t},\n\t}\n\tapp.Before = func(cc *cli.Context) error {\n\t\tif cc.Bool(\"verbose\") {\n\t\t\tlog.SetLevel(log.DebugLevel)\n\t\t}\n\t\tif cc.Bool(\"no-color\") {\n\t\t\tcolor.NoColor = true\n\t\t}\n\n\t\treturn nil\n\t}\n\tif err := app.Run(os.Args); err != nil {\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>v0.17.0<commit_after>\/*\n *\n * k6 - a next-generation load testing tool\n * Copyright (C) 2016 Load Impact\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as\n * published by the Free Software Foundation, either version 3 of the\n * License, or (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n *\/\n\npackage main\n\nimport (\n\t\"os\"\n\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/mattn\/go-isatty\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"gopkg.in\/urfave\/cli.v1\"\n)\n\nvar isTTY = isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd())\n\nfunc main() {\n\t\/\/ This won't be needed in cli v2\n\tcli.VersionFlag.Name = \"version\"\n\tcli.HelpFlag.Name = \"help\"\n\tcli.HelpFlag.Hidden = true\n\n\tapp := cli.NewApp()\n\tapp.Name = \"k6\"\n\tapp.Usage = \"a next generation load generator\"\n\tapp.Version = \"0.17.0\"\n\tapp.Commands = []cli.Command{\n\t\tcommandRun,\n\t\tcommandInspect,\n\t\tcommandArchive,\n\t\tcommandLogin,\n\t\tcommandStatus,\n\t\tcommandStats,\n\t\tcommandScale,\n\t\tcommandPause,\n\t\tcommandResume,\n\t}\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"verbose, v\",\n\t\t\tUsage: \"show debug messages\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"address, a\",\n\t\t\tUsage: \"address for the API\",\n\t\t\tValue: \"127.0.0.1:6565\",\n\t\t\tEnvVar: \"K6_ADDRESS\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"no-color, n\",\n\t\t\tUsage: \"disable colored output\",\n\t\t\tEnvVar: \"K6_NO_COLOR\",\n\t\t},\n\t}\n\tapp.Before = func(cc *cli.Context) error {\n\t\tif cc.Bool(\"verbose\") {\n\t\t\tlog.SetLevel(log.DebugLevel)\n\t\t}\n\t\tif cc.Bool(\"no-color\") {\n\t\t\tcolor.NoColor = true\n\t\t}\n\n\t\treturn nil\n\t}\n\tif err := app.Run(os.Args); err != nil {\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main \/\/ import \"github.com\/inCaller\/prometheus_bot\"\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"path\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\t\"math\"\n\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/gin-gonic\/gin\/binding\"\n\t\"github.com\/go-telegram-bot-api\/telegram-bot-api\"\n\t\"gopkg.in\/yaml.v2\"\n\n\t\"html\/template\"\n)\n\ntype Alerts struct {\n\tAlerts []Alert `json:\"alerts\"`\n\tCommonAnnotations map[string]interface{} `json:\"commonAnnotations\"`\n\tCommonLabels map[string]interface{} `json:\"commonLabels\"`\n\tExternalURL string `json:\"externalURL\"`\n\tGroupKey int `json:\"groupKey\"`\n\tGroupLabels map[string]interface{} `json:\"groupLabels\"`\n\tReceiver string `json:\"receiver\"`\n\tStatus string `json:\"status\"`\n\tVersion int `json:\"version\"`\n}\n\ntype Alert struct {\n\tAnnotations map[string]interface{} `json:\"annotations\"`\n\tEndsAt string `json:\"sendsAt\"`\n\tGeneratorURL string `json:\"generatorURL\"`\n\tLabels map[string]interface{} `json:\"labels\"`\n\tStartsAt string `json:\"startsAt\"`\n}\n\ntype Config struct {\n\tTelegramToken string \t`yaml:\"telegram_token\"`\n\tTemplatePath string \t`yaml:\"template_path\"`\n\tTimeZone string \t\t\t`yaml:\"template_time_zone\"`\n\tTimeOutFormat string `yaml:\"template_time_outdata\"`\n}\n\nconst (\n\tKb = iota\n\tMb\n\tGb\n\tTb\n\tPb\n\tScaleSize_MAX\n)\n\n\nfunc RoundPrec(x float64, prec int) float64 {\n\tif math.IsNaN(x) || math.IsInf(x, 0) {\n\t\treturn x\n\t}\n\n\tsign := 1.0\n\tif x < 0 {\n\t\tsign = -1\n\t\tx *= -1\n\t}\n\n\tvar rounder float64\n\tpow := math.Pow(10, float64(prec))\n\tintermed := x * pow\n\t_, frac := math.Modf(intermed)\n\n\tif frac >= 0.5 {\n\t\trounder = math.Ceil(intermed)\n\t} else {\n\t\trounder = math.Floor(intermed)\n\t}\n\n\treturn rounder \/ pow * sign\n}\n\n\n\n\/******************************************************************************\n *\n * Function for formatting template\n *\n ******************************************************************************\/\n\nfunc str_Format_byte(in string) string {\n\tvar j1 int\n\tvar str_Size string\n\n\tf, err := strconv.ParseFloat(in, 64)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfor j1 < ScaleSize_MAX {\n\n\t\tif f > 1024 {\n\t\tf\/=1024.0\n\t\t} else {\n\n\t\t\tswitch j1 {\n\t\t\tcase Kb:\n\t\t\t\tstr_Size = \"Kb\"\n\t\t\tcase Mb:\n\t\t\t\tstr_Size = \"Mb\"\n\t\t\tcase Gb:\n\t\t\t\tstr_Size = \"Gb\"\n\t\t\tcase Tb:\n\t\t\t\tstr_Size = \"Tb\"\n\t\t\tcase Pb:\n\t\t\t\tstr_Size = \"Pb\"\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tj1++\n\t}\n\n\tstr_fl := strconv.FormatFloat(f, 'f', 2, 64)\n\n\treturn fmt.Sprintf(\"%s %s\", str_fl, str_Size)\n}\n\nfunc str_formatFloat(f string) string {\n\t\tv,_ := strconv.ParseFloat(f, 64)\n\t\tv = RoundPrec(v,2)\n\t\treturn strconv.FormatFloat(v, 'f', -1, 64)\n}\n\n\nfunc str_FormatDate(toformat string) string {\n\n\t\/\/ Error handling\n\tif cfg.TimeZone == \"\" {\n\t\tlog.Println(\"template_time_zone is not set, if you use template and `str_FormatDate` func is required\")\n\t\tpanic(nil)\n\t}\n\n\tif cfg.TimeOutFormat == \"\" {\n\t\tlog.Println(\"template_time_outdata param is not set, if you use template and `str_FormatDate` func is required\")\n\t\tpanic(nil)\n\t}\n\n\tIN_layout := \"2006-01-02T15:04:05.000-07:00\"\n\n\tt, err := time.Parse(IN_layout, toformat)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tloc, _ := time.LoadLocation(cfg.TimeZone)\n\n\treturn t.In(loc).Format(cfg.TimeOutFormat)\n}\n\nfunc HasKey(dict map[string] interface{}, key_search string) bool {\n\tif _, ok := dict[key_search]; ok {\n\t return true\n\t}\n\treturn false\n}\n\n\/\/ Global\nvar config_path = flag.String(\"c\", \"config.yaml\", \"Path to a config file\")\nvar listen_addr = flag.String(\"l\", \":9087\", \"Listen address\")\nvar template_path = flag.String(\"t\", \"\", \"Path to a template file\")\nvar debug = flag.Bool(\"d\", false, \"Debug template\")\n\nvar cfg = Config{}\nvar bot *tgbotapi.BotAPI\nvar tmpH *template.Template\n\n\/\/ Template addictional functions map\nvar funcMap = template.FuncMap{\n\t\"str_FormatDate\" : str_FormatDate,\n\t\"str_UpperCase\":strings.ToUpper,\n\t\"str_LowerCase\":strings.ToLower,\n\t\"str_Title\":strings.Title,\n\t\"str_formatFloat\":str_formatFloat,\n\t\"str_Format_byte\":str_Format_byte,\n\t\"HasKey\":HasKey,\n}\n\nfunc telegramBot(bot *tgbotapi.BotAPI) {\n\tu := tgbotapi.NewUpdate(0)\n\tu.Timeout = 60\n\n\tupdates, err := bot.GetUpdatesChan(u)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor update := range updates {\n\t\tif update.Message.NewChatMember != nil {\n\t\t\tif update.Message.NewChatMember.UserName == bot.Self.UserName && update.Message.Chat.Type == \"group\" {\n\t\t\t\tmsg := tgbotapi.NewMessage(update.Message.Chat.ID, fmt.Sprintf(\"Chat id is '%d'\", update.Message.Chat.ID))\n\t\t\t\tbot.Send(msg)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc loadTemplate(tmplPath string) *template.Template {\n\t\/\/ let's read template\n\ttmpH, err := template.New(path.Base(tmplPath)).Funcs(funcMap).ParseFiles(cfg.TemplatePath)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Problem reading parsing template file: %v\", err)\n\t} else {\n\t\tlog.Printf(\"Load template file:%s\", tmplPath)\n\t}\n\n\treturn tmpH\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tcontent, err := ioutil.ReadFile(*config_path)\n\tif err != nil {\n\t\tlog.Fatalf(\"Problem reading configuration file: %v\", err)\n\t}\n\terr = yaml.Unmarshal(content, &cfg)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error parsing configuration file: %v\", err)\n\t}\n\n\tif *template_path != \"\" {\n\t\tcfg.TemplatePath = *template_path\n\t}\n\n\tbot_tmp, err := tgbotapi.NewBotAPI(cfg.TelegramToken)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tbot = bot_tmp\n\tif cfg.TemplatePath != \"\" {\n\n\t\ttmpH = loadTemplate(cfg.TemplatePath)\n\n\t\tif cfg.TimeZone == \"\" {\n\t\t\tlog.Fatalf(\"You must define time_zone of your bot\")\n\t\t\tpanic(-1)\n\t\t}\n\n\t} else {\n\t\t*debug = false\n\t\ttmpH = nil\n\t}\n\t\/\/bot.Debug = true\n\n\tlog.Printf(\"Authorized on account %s\", bot.Self.UserName)\n\n\tgo telegramBot(bot)\n\n\trouter := gin.Default()\n\n\trouter.GET(\"\/ping\/:chatid\", GET_Handling)\n\trouter.POST(\"\/alert\/:chatid\", POST_Handling)\n\trouter.Run(*listen_addr)\n}\n\nfunc GET_Handling(c *gin.Context) {\n\tlog.Printf(\"Recived GET\")\n\tchatid, err := strconv.ParseInt(c.Param(\"chatid\"), 10, 64)\n\tif err != nil {\n\t\tlog.Printf(\"Cat't parse chat id: %q\", c.Param(\"chatid\"))\n\t\tc.JSON(http.StatusServiceUnavailable, gin.H{\n\t\t\t\"err\": fmt.Sprint(err),\n\t\t})\n\t\treturn\n\t}\n\n\tlog.Printf(\"Bot test: %d\", chatid)\n\tmsgtext := fmt.Sprintf(\"Some HTTP triggered notification by prometheus bot... %d\", chatid)\n\tmsg := tgbotapi.NewMessage(chatid, msgtext)\n\tsendmsg, err := bot.Send(msg)\n\tif err == nil {\n\t\tc.String(http.StatusOK, msgtext)\n\t} else {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\"err\": fmt.Sprint(err),\n\t\t\t\"message\": sendmsg,\n\t\t})\n\t}\n}\n\nfunc AlertFormatStandard(alerts Alerts) string {\n\tkeys := make([]string, 0, len(alerts.GroupLabels))\n\tfor k := range alerts.GroupLabels {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tgroupLabels := make([]string, 0, len(alerts.GroupLabels))\n\tfor _, k := range keys {\n\t\tgroupLabels = append(groupLabels, fmt.Sprintf(\"%s=<code>%s<\/code>\", k, alerts.GroupLabels[k]))\n\t}\n\n\tkeys = make([]string, 0, len(alerts.CommonLabels))\n\tfor k := range alerts.CommonLabels {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tcommonLabels := make([]string, 0, len(alerts.CommonLabels))\n\tfor _, k := range keys {\n\t\tif _, ok := alerts.GroupLabels[k]; !ok {\n\t\t\tcommonLabels = append(commonLabels, fmt.Sprintf(\"%s=<code>%s<\/code>\", k, alerts.CommonLabels[k]))\n\t\t}\n\t}\n\n\tkeys = make([]string, 0, len(alerts.CommonAnnotations))\n\tfor k := range alerts.CommonAnnotations {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tcommonAnnotations := make([]string, 0, len(alerts.CommonAnnotations))\n\tfor _, k := range keys {\n\t\tcommonAnnotations = append(commonAnnotations, fmt.Sprintf(\"\\n%s: <code>%s<\/code>\", k, alerts.CommonAnnotations[k]))\n\t}\n\n\talertDetails := make([]string, len(alerts.Alerts))\n\tfor i, a := range alerts.Alerts {\n\t\tif instance, ok := a.Labels[\"instance\"]; ok {\n\t\t\tinstanceString, _ := instance.(string)\n\t\t\talertDetails[i] += strings.Split(instanceString, \":\")[0]\n\t\t}\n\t\tif job, ok := a.Labels[\"job\"]; ok {\n\t\t\talertDetails[i] += fmt.Sprintf(\"[%s]\", job)\n\t\t}\n\t\tif a.GeneratorURL != \"\" {\n\t\t\talertDetails[i] = fmt.Sprintf(\"<a href='%s'>%s<\/a>\", a.GeneratorURL, alertDetails[i])\n\t\t}\n\t}\n\treturn fmt.Sprintf(\n\t\t\"<a href='%s\/#\/alerts?receiver=%s'>[%s:%d]<\/a>\\ngrouped by: %s\\nlabels: %s%s\\n%s\",\n\t\talerts.ExternalURL,\n\t\talerts.Receiver,\n\t\tstrings.ToUpper(alerts.Status),\n\t\tlen(alerts.Alerts),\n\t\tstrings.Join(groupLabels, \", \"),\n\t\tstrings.Join(commonLabels, \", \"),\n\t\tstrings.Join(commonAnnotations, \"\"),\n\t\tstrings.Join(alertDetails, \", \"),\n\t)\n}\n\nfunc AlertFormatTemplate(alerts Alerts) string {\n\tvar bytesBuff bytes.Buffer\n\tvar err error\n\n\twriter := io.Writer(&bytesBuff)\n\n\tif *debug {\n\t\tlog.Printf(\"Reloading Template\\n\")\n\t\t\/\/ reload template bacause we in debug mode\n\t\ttmpH = loadTemplate(cfg.TemplatePath)\n\t}\n\n\ttmpH.Funcs(funcMap)\n\terr = tmpH.Execute(writer, alerts)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Problem with template execution: %v\", err)\n\t\tpanic(err)\n\t}\n\n\treturn bytesBuff.String()\n}\n\nfunc POST_Handling(c *gin.Context) {\n\tvar msgtext string\n\tvar alerts Alerts\n\n\tchatid, err := strconv.ParseInt(c.Param(\"chatid\"), 10, 64)\n\n\tlog.Printf(\"Bot alert post: %d\", chatid)\n\n\tif err != nil {\n\t\tlog.Printf(\"Cat't parse chat id: %q\", c.Param(\"chatid\"))\n\t\tc.JSON(http.StatusServiceUnavailable, gin.H{\n\t\t\t\"err\": fmt.Sprint(err),\n\t\t})\n\t\treturn\n\t}\n\n\tbinding.JSON.Bind(c.Request, &alerts)\n\n\ts, err := json.Marshal(alerts)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t}\n\n\tlog.Println(\"+------------------ A L E R T J S O N -------------------+\")\n\tlog.Printf(\"%s\", s)\n\tlog.Println(\"+-----------------------------------------------------------+\\n\\n\")\n\n\t\/\/ Decide how format Text\n\tif cfg.TemplatePath == \"\" {\n\t\tmsgtext = AlertFormatStandard(alerts)\n\t} else {\n\t\tmsgtext = AlertFormatTemplate(alerts)\n\t}\n\t\/\/ Print in Log result message\n\tlog.Println(\"+--------------- F I N A L M E S S A G E ---------------+\")\n\tlog.Println(msgtext)\n\tlog.Println(\"+-----------------------------------------------------------+\")\n\n\tmsg := tgbotapi.NewMessage(chatid, msgtext)\n\tmsg.ParseMode = tgbotapi.ModeHTML\n\n\tmsg.DisableWebPagePreview = true\n\n\tsendmsg, err := bot.Send(msg)\n\tif err == nil {\n\t\tc.String(http.StatusOK, \"telegram msg sent.\")\n\t} else {\n\t\tlog.Printf(\"Error sending message: %s\", err)\n\t\tc.JSON(http.StatusServiceUnavailable, gin.H{\n\t\t\t\"err\": fmt.Sprint(err),\n\t\t\t\"message\": sendmsg,\n\t\t\t\"srcmsg\": fmt.Sprint(msgtext),\n\t\t})\n\t\tmsg := tgbotapi.NewMessage(chatid, \"Error sending message, checkout logs\")\n\t\tbot.Send(msg)\n\t}\n}\n<commit_msg>Set gin in release mode<commit_after>package main \/\/ import \"github.com\/inCaller\/prometheus_bot\"\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"path\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\t\"math\"\n\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/gin-gonic\/gin\/binding\"\n\t\"github.com\/go-telegram-bot-api\/telegram-bot-api\"\n\t\"gopkg.in\/yaml.v2\"\n\n\t\"html\/template\"\n)\n\ntype Alerts struct {\n\tAlerts []Alert `json:\"alerts\"`\n\tCommonAnnotations map[string]interface{} `json:\"commonAnnotations\"`\n\tCommonLabels map[string]interface{} `json:\"commonLabels\"`\n\tExternalURL string `json:\"externalURL\"`\n\tGroupKey int `json:\"groupKey\"`\n\tGroupLabels map[string]interface{} `json:\"groupLabels\"`\n\tReceiver string `json:\"receiver\"`\n\tStatus string `json:\"status\"`\n\tVersion int `json:\"version\"`\n}\n\ntype Alert struct {\n\tAnnotations map[string]interface{} `json:\"annotations\"`\n\tEndsAt string `json:\"sendsAt\"`\n\tGeneratorURL string `json:\"generatorURL\"`\n\tLabels map[string]interface{} `json:\"labels\"`\n\tStartsAt string `json:\"startsAt\"`\n}\n\ntype Config struct {\n\tTelegramToken string \t`yaml:\"telegram_token\"`\n\tTemplatePath string \t`yaml:\"template_path\"`\n\tTimeZone string \t\t\t`yaml:\"template_time_zone\"`\n\tTimeOutFormat string `yaml:\"template_time_outdata\"`\n}\n\nconst (\n\tKb = iota\n\tMb\n\tGb\n\tTb\n\tPb\n\tScaleSize_MAX\n)\n\n\nfunc RoundPrec(x float64, prec int) float64 {\n\tif math.IsNaN(x) || math.IsInf(x, 0) {\n\t\treturn x\n\t}\n\n\tsign := 1.0\n\tif x < 0 {\n\t\tsign = -1\n\t\tx *= -1\n\t}\n\n\tvar rounder float64\n\tpow := math.Pow(10, float64(prec))\n\tintermed := x * pow\n\t_, frac := math.Modf(intermed)\n\n\tif frac >= 0.5 {\n\t\trounder = math.Ceil(intermed)\n\t} else {\n\t\trounder = math.Floor(intermed)\n\t}\n\n\treturn rounder \/ pow * sign\n}\n\n\n\n\/******************************************************************************\n *\n * Function for formatting template\n *\n ******************************************************************************\/\n\nfunc str_Format_byte(in string) string {\n\tvar j1 int\n\tvar str_Size string\n\n\tf, err := strconv.ParseFloat(in, 64)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfor j1 < ScaleSize_MAX {\n\n\t\tif f > 1024 {\n\t\tf\/=1024.0\n\t\t} else {\n\n\t\t\tswitch j1 {\n\t\t\tcase Kb:\n\t\t\t\tstr_Size = \"Kb\"\n\t\t\tcase Mb:\n\t\t\t\tstr_Size = \"Mb\"\n\t\t\tcase Gb:\n\t\t\t\tstr_Size = \"Gb\"\n\t\t\tcase Tb:\n\t\t\t\tstr_Size = \"Tb\"\n\t\t\tcase Pb:\n\t\t\t\tstr_Size = \"Pb\"\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tj1++\n\t}\n\n\tstr_fl := strconv.FormatFloat(f, 'f', 2, 64)\n\n\treturn fmt.Sprintf(\"%s %s\", str_fl, str_Size)\n}\n\nfunc str_formatFloat(f string) string {\n\t\tv,_ := strconv.ParseFloat(f, 64)\n\t\tv = RoundPrec(v,2)\n\t\treturn strconv.FormatFloat(v, 'f', -1, 64)\n}\n\n\nfunc str_FormatDate(toformat string) string {\n\n\t\/\/ Error handling\n\tif cfg.TimeZone == \"\" {\n\t\tlog.Println(\"template_time_zone is not set, if you use template and `str_FormatDate` func is required\")\n\t\tpanic(nil)\n\t}\n\n\tif cfg.TimeOutFormat == \"\" {\n\t\tlog.Println(\"template_time_outdata param is not set, if you use template and `str_FormatDate` func is required\")\n\t\tpanic(nil)\n\t}\n\n\tIN_layout := \"2006-01-02T15:04:05.000-07:00\"\n\n\tt, err := time.Parse(IN_layout, toformat)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tloc, _ := time.LoadLocation(cfg.TimeZone)\n\n\treturn t.In(loc).Format(cfg.TimeOutFormat)\n}\n\nfunc HasKey(dict map[string] interface{}, key_search string) bool {\n\tif _, ok := dict[key_search]; ok {\n\t return true\n\t}\n\treturn false\n}\n\n\/\/ Global\nvar config_path = flag.String(\"c\", \"config.yaml\", \"Path to a config file\")\nvar listen_addr = flag.String(\"l\", \":9087\", \"Listen address\")\nvar template_path = flag.String(\"t\", \"\", \"Path to a template file\")\nvar debug = flag.Bool(\"d\", false, \"Debug template\")\n\nvar cfg = Config{}\nvar bot *tgbotapi.BotAPI\nvar tmpH *template.Template\n\n\/\/ Template addictional functions map\nvar funcMap = template.FuncMap{\n\t\"str_FormatDate\" : str_FormatDate,\n\t\"str_UpperCase\":strings.ToUpper,\n\t\"str_LowerCase\":strings.ToLower,\n\t\"str_Title\":strings.Title,\n\t\"str_formatFloat\":str_formatFloat,\n\t\"str_Format_byte\":str_Format_byte,\n\t\"HasKey\":HasKey,\n}\n\nfunc telegramBot(bot *tgbotapi.BotAPI) {\n\tu := tgbotapi.NewUpdate(0)\n\tu.Timeout = 60\n\n\tupdates, err := bot.GetUpdatesChan(u)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor update := range updates {\n\t\tif update.Message.NewChatMember != nil {\n\t\t\tif update.Message.NewChatMember.UserName == bot.Self.UserName && update.Message.Chat.Type == \"group\" {\n\t\t\t\tmsg := tgbotapi.NewMessage(update.Message.Chat.ID, fmt.Sprintf(\"Chat id is '%d'\", update.Message.Chat.ID))\n\t\t\t\tbot.Send(msg)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc loadTemplate(tmplPath string) *template.Template {\n\t\/\/ let's read template\n\ttmpH, err := template.New(path.Base(tmplPath)).Funcs(funcMap).ParseFiles(cfg.TemplatePath)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Problem reading parsing template file: %v\", err)\n\t} else {\n\t\tlog.Printf(\"Load template file:%s\", tmplPath)\n\t}\n\n\treturn tmpH\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tcontent, err := ioutil.ReadFile(*config_path)\n\tif err != nil {\n\t\tlog.Fatalf(\"Problem reading configuration file: %v\", err)\n\t}\n\terr = yaml.Unmarshal(content, &cfg)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error parsing configuration file: %v\", err)\n\t}\n\n\tif *template_path != \"\" {\n\t\tcfg.TemplatePath = *template_path\n\t}\n\n\tbot_tmp, err := tgbotapi.NewBotAPI(cfg.TelegramToken)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tbot = bot_tmp\n\tif cfg.TemplatePath != \"\" {\n\n\t\ttmpH = loadTemplate(cfg.TemplatePath)\n\n\t\tif cfg.TimeZone == \"\" {\n\t\t\tlog.Fatalf(\"You must define time_zone of your bot\")\n\t\t\tpanic(-1)\n\t\t}\n\n\t} else {\n\t\t*debug = false\n\t\ttmpH = nil\n\t}\n if !debug {\n gin.SetMode(gin.ReleaseMode)\n }\n\n\tlog.Printf(\"Authorized on account %s\", bot.Self.UserName)\n\n\tgo telegramBot(bot)\n\n\trouter := gin.Default()\n\n\trouter.GET(\"\/ping\/:chatid\", GET_Handling)\n\trouter.POST(\"\/alert\/:chatid\", POST_Handling)\n\trouter.Run(*listen_addr)\n}\n\nfunc GET_Handling(c *gin.Context) {\n\tlog.Printf(\"Recived GET\")\n\tchatid, err := strconv.ParseInt(c.Param(\"chatid\"), 10, 64)\n\tif err != nil {\n\t\tlog.Printf(\"Cat't parse chat id: %q\", c.Param(\"chatid\"))\n\t\tc.JSON(http.StatusServiceUnavailable, gin.H{\n\t\t\t\"err\": fmt.Sprint(err),\n\t\t})\n\t\treturn\n\t}\n\n\tlog.Printf(\"Bot test: %d\", chatid)\n\tmsgtext := fmt.Sprintf(\"Some HTTP triggered notification by prometheus bot... %d\", chatid)\n\tmsg := tgbotapi.NewMessage(chatid, msgtext)\n\tsendmsg, err := bot.Send(msg)\n\tif err == nil {\n\t\tc.String(http.StatusOK, msgtext)\n\t} else {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\"err\": fmt.Sprint(err),\n\t\t\t\"message\": sendmsg,\n\t\t})\n\t}\n}\n\nfunc AlertFormatStandard(alerts Alerts) string {\n\tkeys := make([]string, 0, len(alerts.GroupLabels))\n\tfor k := range alerts.GroupLabels {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tgroupLabels := make([]string, 0, len(alerts.GroupLabels))\n\tfor _, k := range keys {\n\t\tgroupLabels = append(groupLabels, fmt.Sprintf(\"%s=<code>%s<\/code>\", k, alerts.GroupLabels[k]))\n\t}\n\n\tkeys = make([]string, 0, len(alerts.CommonLabels))\n\tfor k := range alerts.CommonLabels {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tcommonLabels := make([]string, 0, len(alerts.CommonLabels))\n\tfor _, k := range keys {\n\t\tif _, ok := alerts.GroupLabels[k]; !ok {\n\t\t\tcommonLabels = append(commonLabels, fmt.Sprintf(\"%s=<code>%s<\/code>\", k, alerts.CommonLabels[k]))\n\t\t}\n\t}\n\n\tkeys = make([]string, 0, len(alerts.CommonAnnotations))\n\tfor k := range alerts.CommonAnnotations {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tcommonAnnotations := make([]string, 0, len(alerts.CommonAnnotations))\n\tfor _, k := range keys {\n\t\tcommonAnnotations = append(commonAnnotations, fmt.Sprintf(\"\\n%s: <code>%s<\/code>\", k, alerts.CommonAnnotations[k]))\n\t}\n\n\talertDetails := make([]string, len(alerts.Alerts))\n\tfor i, a := range alerts.Alerts {\n\t\tif instance, ok := a.Labels[\"instance\"]; ok {\n\t\t\tinstanceString, _ := instance.(string)\n\t\t\talertDetails[i] += strings.Split(instanceString, \":\")[0]\n\t\t}\n\t\tif job, ok := a.Labels[\"job\"]; ok {\n\t\t\talertDetails[i] += fmt.Sprintf(\"[%s]\", job)\n\t\t}\n\t\tif a.GeneratorURL != \"\" {\n\t\t\talertDetails[i] = fmt.Sprintf(\"<a href='%s'>%s<\/a>\", a.GeneratorURL, alertDetails[i])\n\t\t}\n\t}\n\treturn fmt.Sprintf(\n\t\t\"<a href='%s\/#\/alerts?receiver=%s'>[%s:%d]<\/a>\\ngrouped by: %s\\nlabels: %s%s\\n%s\",\n\t\talerts.ExternalURL,\n\t\talerts.Receiver,\n\t\tstrings.ToUpper(alerts.Status),\n\t\tlen(alerts.Alerts),\n\t\tstrings.Join(groupLabels, \", \"),\n\t\tstrings.Join(commonLabels, \", \"),\n\t\tstrings.Join(commonAnnotations, \"\"),\n\t\tstrings.Join(alertDetails, \", \"),\n\t)\n}\n\nfunc AlertFormatTemplate(alerts Alerts) string {\n\tvar bytesBuff bytes.Buffer\n\tvar err error\n\n\twriter := io.Writer(&bytesBuff)\n\n\tif *debug {\n\t\tlog.Printf(\"Reloading Template\\n\")\n\t\t\/\/ reload template bacause we in debug mode\n\t\ttmpH = loadTemplate(cfg.TemplatePath)\n\t}\n\n\ttmpH.Funcs(funcMap)\n\terr = tmpH.Execute(writer, alerts)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Problem with template execution: %v\", err)\n\t\tpanic(err)\n\t}\n\n\treturn bytesBuff.String()\n}\n\nfunc POST_Handling(c *gin.Context) {\n\tvar msgtext string\n\tvar alerts Alerts\n\n\tchatid, err := strconv.ParseInt(c.Param(\"chatid\"), 10, 64)\n\n\tlog.Printf(\"Bot alert post: %d\", chatid)\n\n\tif err != nil {\n\t\tlog.Printf(\"Cat't parse chat id: %q\", c.Param(\"chatid\"))\n\t\tc.JSON(http.StatusServiceUnavailable, gin.H{\n\t\t\t\"err\": fmt.Sprint(err),\n\t\t})\n\t\treturn\n\t}\n\n\tbinding.JSON.Bind(c.Request, &alerts)\n\n\ts, err := json.Marshal(alerts)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t}\n\n\tlog.Println(\"+------------------ A L E R T J S O N -------------------+\")\n\tlog.Printf(\"%s\", s)\n\tlog.Println(\"+-----------------------------------------------------------+\\n\\n\")\n\n\t\/\/ Decide how format Text\n\tif cfg.TemplatePath == \"\" {\n\t\tmsgtext = AlertFormatStandard(alerts)\n\t} else {\n\t\tmsgtext = AlertFormatTemplate(alerts)\n\t}\n\t\/\/ Print in Log result message\n\tlog.Println(\"+--------------- F I N A L M E S S A G E ---------------+\")\n\tlog.Println(msgtext)\n\tlog.Println(\"+-----------------------------------------------------------+\")\n\n\tmsg := tgbotapi.NewMessage(chatid, msgtext)\n\tmsg.ParseMode = tgbotapi.ModeHTML\n\n\tmsg.DisableWebPagePreview = true\n\n\tsendmsg, err := bot.Send(msg)\n\tif err == nil {\n\t\tc.String(http.StatusOK, \"telegram msg sent.\")\n\t} else {\n\t\tlog.Printf(\"Error sending message: %s\", err)\n\t\tc.JSON(http.StatusServiceUnavailable, gin.H{\n\t\t\t\"err\": fmt.Sprint(err),\n\t\t\t\"message\": sendmsg,\n\t\t\t\"srcmsg\": fmt.Sprint(msgtext),\n\t\t})\n\t\tmsg := tgbotapi.NewMessage(chatid, \"Error sending message, checkout logs\")\n\t\tbot.Send(msg)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\/\/\"os\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/ec2metadata\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n)\n\nvar debug = false\n\nfunc getRegionString() (string, error) {\n\tsvc := ec2metadata.New(session.New())\n\tregion, err := svc.Region()\n\treturn region, err\n}\n\nfunc getInstanceId() (string, error) {\n\tsvc := ec2metadata.New(session.New())\n\tidentityDocument, err := svc.GetInstanceIdentityDocument()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn identityDocument.InstanceID, nil\n}\n\nfunc getTag(name string, regionString string) (string, error) {\n\t\/\/svc := ec2.New(mySession, aws.NewConfig().WithRegion(\"us-west-2\"))\n\tinstanceID, err := getInstanceId()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsvc := ec2.New(session.New(&aws.Config{Region: aws.String(regionString)}))\n\n\t\/\/svc := ec2.New(sess)\n\n\tparams := &ec2.DescribeInstancesInput{\n\t\t\/\/DryRun: aws.Bool(true),\n\t\tFilters: []*ec2.Filter{\n\t\t\t{ \/*\n\t\t\t\t\/\/ Required\n\t\t\t\tName: aws.String(\"String\"),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(\"String\"), \/\/ Required\n\t\t\t\t\t\/\/ More values...\n\t\t\t\t},\n\t\t\t*\/\n\t\t\t},\n\t\t\t\/\/ More values...\n\t\t},\n\t\tInstanceIds: []*string{\n\t\t\t\/\/aws.String(\"i-0c8b43c46be4852d5\"), \/\/ Required\n\t\t\taws.String(instanceID),\n\t\t\t\/\/ More values...\n\t\t},\n\t\t\/\/MaxResults: aws.Int64(1),\n\t\t\/\/NextToken: aws.String(\"String\"),\n\t}\n\tresp, err := svc.DescribeInstances(params)\n\n\tif err != nil {\n\t\t\/\/ Print the error, cast err to awserr.Error to get the Code and\n\t\t\/\/ Message from an error.\n\t\tfmt.Println(err.Error())\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Pretty-print the response data.\n\tfmt.Println(resp)\n\n\tdescribeTagsparams := &ec2.DescribeTagsInput{\n\t\t\/\/DryRun: aws.Bool(true),\n\t\tFilters: []*ec2.Filter{\n\t\t\t{ \/\/ Required\n\t\t\t\tName: aws.String(\"resource-type\"),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(\"instance\"), \/\/ Required\n\t\t\t\t\t\/\/ More values...\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: aws.String(\"resource-id\"),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(instanceID), \/\/ Required\n\t\t\t\t\t\/\/ More values...\n\t\t\t\t},\n\t\t\t},\n\t\t\t\/\/ More values...\n\t\t},\n\t\tMaxResults: aws.Int64(10),\n\t\tNextToken: aws.String(\"String\"),\n\t}\n\n\tresp2, err := svc.DescribeTags(describeTagsparams)\n\n\tif err != nil {\n\t\t\/\/ Print the error, cast err to awserr.Error to get the Code and\n\t\t\/\/ Message from an error.\n\t\tfmt.Println(err.Error())\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Pretty-print the response data.\n\tfmt.Println(resp2)\n\n\treturn \"\", nil\n}\n\nfunc main() {\n\t\/\/var credentialsFilePath = flag.String(\"credentials\", \"credentials.yml\", \"credentials file\")\n\tvar tagName = flag.String(\"tagname\", \"Name\", \"Name of the tag of interest\")\n\tflag.Parse()\n\n\tregionString, err := getRegionString()\n\tif err != nil {\n\t\tlog.Fatalf(\"Cannot get Region string %s\", err)\n\t}\n\tgetTag(*tagName, regionString)\n\n}\n<commit_msg>now with actual funcionality<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\/\/\"os\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/ec2metadata\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n)\n\nvar debug = false\n\nfunc getRegionString() (string, error) {\n\tsvc := ec2metadata.New(session.New())\n\tregion, err := svc.Region()\n\treturn region, err\n}\n\nfunc getInstanceId() (string, error) {\n\tsvc := ec2metadata.New(session.New())\n\tidentityDocument, err := svc.GetInstanceIdentityDocument()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn identityDocument.InstanceID, nil\n}\n\nfunc getTag(requestedTagName string, regionString string) (string, error) {\n\t\/\/svc := ec2.New(mySession, aws.NewConfig().WithRegion(\"us-west-2\"))\n\tinstanceID, err := getInstanceId()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsvc := ec2.New(session.New(&aws.Config{Region: aws.String(regionString)}))\n\n\tdescribeTagsparams := &ec2.DescribeTagsInput{\n\t\t\/\/DryRun: aws.Bool(true),\n\t\tFilters: []*ec2.Filter{\n\t\t\t{ \/\/ Required\n\t\t\t\tName: aws.String(\"resource-type\"),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(\"instance\"), \/\/ Required\n\t\t\t\t\t\/\/ More values...\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: aws.String(\"resource-id\"),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(instanceID), \/\/ Required\n\t\t\t\t\t\/\/ More values...\n\t\t\t\t},\n\t\t\t},\n\t\t\t\/\/ More values...\n\t\t},\n\t\tMaxResults: aws.Int64(10),\n\t\tNextToken: aws.String(\"String\"),\n\t}\n\n\tresp2, err := svc.DescribeTags(describeTagsparams)\n\n\tif err != nil {\n\t\t\/\/ Print the error, cast err to awserr.Error to get the Code and\n\t\t\/\/ Message from an error.\n\t\tfmt.Println(err.Error())\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Pretty-print the response data.\n\tif debug {\n\t\tfmt.Println(resp2)\n\t}\n\tfor _, tag := range resp2.Tags {\n\t\tif *tag.Key == requestedTagName {\n\t\t\tlog.Printf(\"found %s: %s\\n\", *tag.Key, *tag.Value)\n\t\t\treturn *tag.Value, nil\n\t\t}\n\t}\n\n\treturn \"\", nil\n}\n\nfunc main() {\n\t\/\/var credentialsFilePath = flag.String(\"credentials\", \"credentials.yml\", \"credentials file\")\n\tvar tagName = flag.String(\"tagname\", \"Name\", \"Name of the tag of interest\")\n\tflag.Parse()\n\n\tregionString, err := getRegionString()\n\tif err != nil {\n\t\tlog.Fatalf(\"Cannot get Region string %s\", err)\n\t}\n\tvalue, err := getTag(*tagName, regionString)\n\tif err != nil {\n\t\tlog.Fatalf(\"Cannot get Tag Name %s\", err)\n\t}\n\tfmt.Printf(\"%s\\n\", value)\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Simple proxy for cleaner directory structure and so we can have godoc support\n*\/\n\npackage main\n\nimport \"gopkg.in\/bakape\/meguca.v2\/server\"\n\nfunc main() {\n\tserver.Start()\n}\n<commit_msg>main.go Use local import path<commit_after>\/*\n Simple proxy for cleaner directory structure and so we can have godoc support\n*\/\n\npackage main\n\nimport \".\/server\"\n\nfunc main() {\n\tserver.Start()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/gorilla\/sessions\"\n\t\"github.com\/qbit\/dnews\/src\"\n)\n\nvar store = sessions.NewCookieStore([]byte(\"something-very-secret\"))\n\nfunc getArticles(w http.ResponseWriter, r *http.Request) {\n\n}\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n\ta := dnews.Articles{}\n\tfmt.Fprintf(w, \"Hi there, I love %s! %v\", r.URL.Path[1:], a)\n}\n\nvar funcMap = template.FuncMap{\n\t\"formatDate\": dnews.FormatDate,\n\t\"printHTML\": func(b []byte) template.HTML {\n\t\treturn template.HTML(string(b))\n\t},\n}\n\nvar templ, err = template.New(\"dnews\").Funcs(funcMap).ParseGlob(\"templates\/*.html\")\n\nfunc init() {\n\tgob.Register(&dnews.User{})\n}\n\nfunc main() {\n\tfs := http.FileServer(http.Dir(\"public\"))\n\n\thttp.Handle(\"\/public\/\", http.StripPrefix(\"\/public\/\", fs))\n\thttp.HandleFunc(\"\/login\", func(w http.ResponseWriter, r *http.Request) {\n\t\tsession, err := store.Get(r, \"session-name\")\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tuser := r.FormValue(\"user\")\n\t\tpasswd := r.FormValue(\"passwd\")\n\n\t\tif user == \"\" && passwd == \"\" {\n\t\t\terr = templ.ExecuteTemplate(w, \"login.html\", nil)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ do auth\n\t\t\tu, err := dnews.Auth(user, passwd)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif u.Authed {\n\t\t\t\tlog.Printf(\"Authed %s\", user)\n\t\t\t\tsession.Values[\"user\"] = u\n\n\t\t\t\tsession.Save(r, w)\n\n\t\t\t\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Invalid user: %s\", user)\n\t\t\t\terr = templ.ExecuteTemplate(w, \"login.html\", struct {\n\t\t\t\t\tError string\n\t\t\t\t}{\n\t\t\t\t\t\"Invalid User!\",\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\thttp.HandleFunc(\"\/favicon.ico\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.Error(w, \"NO!\", http.StatusNotFound)\n\t\treturn\n\t})\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tsession, err := store.Get(r, \"session-name\")\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\ta, err := dnews.GetNArticles(10)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tval := session.Values[\"user\"]\n\t\tif _, ok := val.(*dnews.User); !ok {\n\t\t\tval = &dnews.User{}\n\t\t\tsession.Values[\"user\"] = &val\n\t\t\tsession.Save(r, w)\n\t\t}\n\n\t\tdata := struct {\n\t\t\tArticles *dnews.Articles\n\t\t\t\/\/ we did a type check above, but it would be nice to\n\t\t\t\/\/ be able to use the actual type when sending to the\n\t\t\t\/\/ templating stuffs :(\n\t\t\tUser interface{}\n\t\t}{\n\t\t\t&a,\n\t\t\t&val,\n\t\t}\n\n\t\terr = templ.ExecuteTemplate(w, \"index.html\", data)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t})\n\n\thttp.ListenAndServe(\":8080\", nil)\n}\n<commit_msg>add ability to logout<commit_after>package main\n\nimport (\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/gorilla\/sessions\"\n\t\"github.com\/qbit\/dnews\/src\"\n)\n\nvar store = sessions.NewCookieStore([]byte(\"something-very-secret\"))\n\nfunc getArticles(w http.ResponseWriter, r *http.Request) {\n\n}\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n\ta := dnews.Articles{}\n\tfmt.Fprintf(w, \"Hi there, I love %s! %v\", r.URL.Path[1:], a)\n}\n\nvar funcMap = template.FuncMap{\n\t\"formatDate\": dnews.FormatDate,\n\t\"printHTML\": func(b []byte) template.HTML {\n\t\treturn template.HTML(string(b))\n\t},\n}\n\nvar templ, err = template.New(\"dnews\").Funcs(funcMap).ParseGlob(\"templates\/*.html\")\n\nfunc init() {\n\tgob.Register(&dnews.User{})\n}\n\nfunc main() {\n\tfs := http.FileServer(http.Dir(\"public\"))\n\n\thttp.Handle(\"\/public\/\", http.StripPrefix(\"\/public\/\", fs))\n\thttp.HandleFunc(\"\/login\", func(w http.ResponseWriter, r *http.Request) {\n\t\tsession, err := store.Get(r, \"session-name\")\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tuser := r.FormValue(\"user\")\n\t\tpasswd := r.FormValue(\"passwd\")\n\n\t\tif user == \"\" && passwd == \"\" {\n\t\t\terr = templ.ExecuteTemplate(w, \"login.html\", nil)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ do auth\n\t\t\tu, err := dnews.Auth(user, passwd)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif u.Authed {\n\t\t\t\tsession.Values[\"user\"] = u\n\t\t\t\tsession.Save(r, w)\n\t\t\t\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Invalid user: %s\", user)\n\t\t\t\terr = templ.ExecuteTemplate(w, \"login.html\", struct {\n\t\t\t\t\tError string\n\t\t\t\t}{\n\t\t\t\t\t\"Invalid User!\",\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\thttp.HandleFunc(\"\/logout\", func(w http.ResponseWriter, r *http.Request) {\n\t\tsession, err := store.Get(r, \"session-name\")\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tsession.Options = &sessions.Options{\n\t\t\tMaxAge: -1,\n\t\t}\n\t\tsession.Save(r, w)\n\t\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n\n\t})\n\thttp.HandleFunc(\"\/favicon.ico\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.Error(w, \"NO!\", http.StatusNotFound)\n\t\treturn\n\t})\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tsession, err := store.Get(r, \"session-name\")\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\ta, err := dnews.GetNArticles(10)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tval := session.Values[\"user\"]\n\t\tif _, ok := val.(*dnews.User); !ok {\n\t\t\tval = &dnews.User{}\n\t\t\tsession.Values[\"user\"] = &val\n\t\t\tsession.Save(r, w)\n\t\t}\n\n\t\tdata := struct {\n\t\t\tArticles *dnews.Articles\n\t\t\t\/\/ we did a type check above, but it would be nice to\n\t\t\t\/\/ be able to use the actual type when sending to the\n\t\t\t\/\/ templating stuffs :(\n\t\t\tUser interface{}\n\t\t}{\n\t\t\t&a,\n\t\t\t&val,\n\t\t}\n\n\t\terr = templ.ExecuteTemplate(w, \"index.html\", data)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t})\n\n\thttp.ListenAndServe(\":8080\", nil)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"math\/rand\"\n\t\"time\"\n\t\"github.com\/robert-butts\/quadtree\"\n)\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\n\nfunc main() {\n\tnodeCapacity := 4\n\tqt := NewQuadTree(&BoundingBox {\n\t\tCenter: Point { 100.0, 100.0 },\n\t\tHalfDimension: Point { 50.0, 50.0,},\n\t}, nodeCapacity)\n\n\tpointsToInsert := 10000000\n\tstart := time.Now()\n\tfor i := 0; i != pointsToInsert; i++ {\n\t\tp := Point{rand.Float64() * 100.0 + 50.0, rand.Float64() * 100.0 + 50.0}\n\t\tif !qt.Insert(p) {\n\t\t}\n\t}\n\tend := time.Now()\n\telapsed := end.Sub(start)\n\tfmt.Println(\"inserted \" + strconv.Itoa(pointsToInsert) + \" points in \" + elapsed.String() + \".\")\n\/*\n\tqr := &BoundingBox {\n\t\tCenter: Point { 75.0, 75.0 },\n\t\tHalfDimension: Point { 5.0, 5.0,},\n\t}\n\tpoints := qt.QueryRange(qr)\n\tfmt.Println(\"found \" + strconv.Itoa(len(points)) + \" points\")\n*\/\n\/\/\tfor _, point := range points {\n\/\/\t\tfmt.Println(point.String())\n\/\/\t}\n}\n<commit_msg>deleted main package<commit_after><|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/andrewslotin\/doppelganger\/git\"\n\t\"github.com\/bmizerany\/pat\"\n)\n\nvar (\n\t\/\/ Version and BuildDate are used in help message and set by Makefile\n\tVersion = \"n\/a\"\n\tBuildDate = \"n\/a\"\n\n\targs struct {\n\t\tversion bool\n\t\taddr string\n\t\tport int\n\t\tmirrorDir string\n\t}\n)\n\nfunc init() {\n\tflag.BoolVar(&args.version, \"version\", false, \"Print version and exit\")\n\tflag.StringVar(&args.addr, \"addr\", \"\", \"Listen address\")\n\tflag.IntVar(&args.port, \"port\", 8081, \"Listen port\")\n\tflag.StringVar(&args.mirrorDir, \"mirror\", filepath.Join(os.Getenv(\"GOPATH\"), \"src\", \"github.com\"), \"Mirrored repositories directory\")\n\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s [OPTIONS]\\n\\nOptions:\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t\tos.Exit(2)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tif args.version {\n\t\tfmt.Printf(\"Doppelganger, version %s, build date %s\\n\", Version, BuildDate)\n\t\tos.Exit(0)\n\t}\n\n\tlog.SetOutput(os.Stderr)\n\tlog.SetFlags(log.LstdFlags)\n\n\ttoken := os.Getenv(\"DOPPELGANGER_GITHUB_TOKEN\")\n\tif token == \"\" {\n\t\tfmt.Fprintln(os.Stderr, \"Missing GitHub access token (set DOPPELGANGER_GITHUB_TOKEN environment variable)\")\n\t\tos.Exit(-1)\n\t}\n\n\trepositoryService, err := git.NewGithubRepositories(context.WithValue(context.Background(), git.GithubToken, token))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgitCmd, err := git.SystemGit()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tmirroredRepositoryService := git.NewMirroredRepositories(args.mirrorDir, gitCmd)\n\n\tmux := pat.New()\n\tmux.Get(\"\/favicon.ico\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, \".\/assets\/favicon.ico\")\n\t}))\n\n\tmux.Get(\"\/:owner\/:repo\", NewRepoHandler(mirroredRepositoryService))\n\tmux.Get(\"\/\", NewReposHandler(mirroredRepositoryService, true))\n\tmux.Get(\"\/src\/:owner\/:repo\", NewRepoHandler(repositoryService))\n\tmux.Get(\"\/src\/\", NewReposHandler(repositoryService, false))\n\tmux.Post(\"\/mirror\", NewMirrorHandler(repositoryService, mirroredRepositoryService, repositoryService))\n\tmux.Get(\"\/assets\/\", http.StripPrefix(\"\/assets\/\", http.FileServer(http.Dir(\".\/assets\"))))\n\n\t\/\/ GitHub webhooks\n\tmux.Post(\"\/apihook\", NewWebhookHandler(mirroredRepositoryService))\n\n\taddr := fmt.Sprintf(\"%s:%d\", args.addr, args.port)\n\tlog.Printf(\"doppelganger is listening on %s\", addr)\n\tif err := http.ListenAndServe(addr, mux); err != nil {\n\t\tlog.Panic(err)\n\t}\n}\n<commit_msg>Use server.Server instead of http.ListenAndServe<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/andrewslotin\/doppelganger\/git\"\n\t\"github.com\/andrewslotin\/doppelganger\/server\"\n\t\"github.com\/bmizerany\/pat\"\n)\n\nvar (\n\t\/\/ Version and BuildDate are used in help message and set by Makefile\n\tVersion = \"n\/a\"\n\tBuildDate = \"n\/a\"\n\n\targs struct {\n\t\tversion bool\n\t\taddr string\n\t\tport int\n\t\tmirrorDir string\n\t}\n)\n\nfunc init() {\n\tflag.BoolVar(&args.version, \"version\", false, \"Print version and exit\")\n\tflag.StringVar(&args.addr, \"addr\", \"\", \"Listen address\")\n\tflag.IntVar(&args.port, \"port\", 8081, \"Listen port\")\n\tflag.StringVar(&args.mirrorDir, \"mirror\", filepath.Join(os.Getenv(\"GOPATH\"), \"src\", \"github.com\"), \"Mirrored repositories directory\")\n\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s [OPTIONS]\\n\\nOptions:\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t\tos.Exit(2)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tif args.version {\n\t\tfmt.Printf(\"Doppelganger, version %s, build date %s\\n\", Version, BuildDate)\n\t\tos.Exit(0)\n\t}\n\n\tlog.SetOutput(os.Stderr)\n\tlog.SetFlags(log.LstdFlags)\n\n\ttoken := os.Getenv(\"DOPPELGANGER_GITHUB_TOKEN\")\n\tif token == \"\" {\n\t\tfmt.Fprintln(os.Stderr, \"Missing GitHub access token (set DOPPELGANGER_GITHUB_TOKEN environment variable)\")\n\t\tos.Exit(-1)\n\t}\n\n\trepositoryService, err := git.NewGithubRepositories(context.WithValue(context.Background(), git.GithubToken, token))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgitCmd, err := git.SystemGit()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tmirroredRepositoryService := git.NewMirroredRepositories(args.mirrorDir, gitCmd)\n\n\tmux := pat.New()\n\tmux.Get(\"\/favicon.ico\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, \".\/assets\/favicon.ico\")\n\t}))\n\n\tmux.Get(\"\/:owner\/:repo\", NewRepoHandler(mirroredRepositoryService))\n\tmux.Get(\"\/\", NewReposHandler(mirroredRepositoryService, true))\n\tmux.Get(\"\/src\/:owner\/:repo\", NewRepoHandler(repositoryService))\n\tmux.Get(\"\/src\/\", NewReposHandler(repositoryService, false))\n\tmux.Post(\"\/mirror\", NewMirrorHandler(repositoryService, mirroredRepositoryService, repositoryService))\n\tmux.Get(\"\/assets\/\", http.StripPrefix(\"\/assets\/\", http.FileServer(http.Dir(\".\/assets\"))))\n\n\t\/\/ GitHub webhooks\n\tmux.Post(\"\/apihook\", NewWebhookHandler(mirroredRepositoryService))\n\n\tsrv := server.New(args.addr, args.port)\n\tif err := srv.Run(mux); err != nil {\n\t\tlog.Panic(err)\n\t}\n\tlog.Printf(\"doppelganger is listening on %s\", srv.Addr)\n\n\tselect {}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/goji\/httpauth\"\n\t\"github.com\/hectane\/hectane\/api\"\n\t\"github.com\/hectane\/hectane\/queue\"\n\t\"github.com\/mitchellh\/go-homedir\"\n\t\"github.com\/zenazn\/goji\"\n\t\"github.com\/zenazn\/goji\/bind\"\n\t\"github.com\/zenazn\/goji\/web\"\n\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n)\n\nfunc main() {\n\tvar (\n\t\ttlsCert string\n\t\ttlsKey string\n\t\tusername string\n\t\tpassword string\n\t\tdirectory string\n\t\tconfig queue.Config\n\t)\n\tif s := bind.Sniff(); s == \"\" {\n\t\tflag.Lookup(\"bind\").Value.Set(\":8025\")\n\t\tflag.Lookup(\"bind\").DefValue = \":8025\"\n\t}\n\tif home, err := homedir.Dir(); err == nil {\n\t\tdirectory = path.Join(home, \".hectane\")\n\t} else {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\tflag.StringVar(&tlsCert, \"tls-cert\", \"\", \"certificate for TLS\")\n\tflag.StringVar(&tlsKey, \"tls-key\", \"\", \"private key for TLS\")\n\tflag.StringVar(&username, \"username\", \"\", \"username for HTTP basic auth\")\n\tflag.StringVar(&password, \"password\", \"\", \"password for HTTP basic auth\")\n\tflag.StringVar(&directory, \"directory\", directory, \"directory for persistent storage\")\n\tflag.BoolVar(&config.DisableSSLVerification, \"disable-ssl-verification\", false, \"don't verify SSL certificates\")\n\tflag.Parse()\n\ts := queue.NewStorage(directory)\n\tif q, err := queue.NewQueue(&config, s); err == nil {\n\t\tdefer q.Stop()\n\t\tgoji.Use(func(c *web.C, h http.Handler) http.Handler {\n\t\t\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tc.Env[\"storage\"] = s\n\t\t\t\tc.Env[\"queue\"] = q\n\t\t\t\th.ServeHTTP(w, r)\n\t\t\t}\n\t\t\treturn http.HandlerFunc(fn)\n\t\t})\n\t} else {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\tgoji.Get(\"\/v1\/version\", api.Version)\n\tgoji.Get(\"\/v1\/status\", api.Status)\n\tgoji.Post(\"\/v1\/send\", api.Send)\n\tif username != \"\" && password != \"\" {\n\t\tgoji.Use(httpauth.SimpleBasicAuth(username, password))\n\t}\n\tif tlsCert != \"\" && tlsKey != \"\" {\n\t\tif cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey); err == nil {\n\t\t\tgoji.ServeTLS(&tls.Config{\n\t\t\t\tCertificates: []tls.Certificate{cert},\n\t\t\t})\n\t\t} else {\n\t\t\tlog.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t} else {\n\t\tgoji.Serve()\n\t}\n}\n<commit_msg>Implemented new API type in main.go.<commit_after>package main\n\nimport (\n\t\"github.com\/hectane\/hectane\/api\"\n\t\"github.com\/hectane\/hectane\/queue\"\n\t\"github.com\/mitchellh\/go-homedir\"\n\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n)\n\nfunc main() {\n\tvar (\n\t\tbind string\n\t\ttlsCert string\n\t\ttlsKey string\n\t\tusername string\n\t\tpassword string\n\t\tdirectory string\n\t\tconfig queue.Config\n\t)\n\tif home, err := homedir.Dir(); err == nil {\n\t\tdirectory = path.Join(home, \".hectane\")\n\t} else {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\tflag.StringVar(&bind, \"bind\", \":8025\", \"address and port to bind to\")\n\tflag.StringVar(&tlsCert, \"tls-cert\", \"\", \"certificate for TLS\")\n\tflag.StringVar(&tlsKey, \"tls-key\", \"\", \"private key for TLS\")\n\tflag.StringVar(&username, \"username\", \"\", \"username for HTTP basic auth\")\n\tflag.StringVar(&password, \"password\", \"\", \"password for HTTP basic auth\")\n\tflag.StringVar(&directory, \"directory\", directory, \"directory for persistent storage\")\n\tflag.BoolVar(&config.DisableSSLVerification, \"disable-ssl-verification\", false, \"don't verify SSL certificates\")\n\tflag.Parse()\n\ts := queue.NewStorage(directory)\n\tif q, err := queue.NewQueue(&config, s); err == nil {\n\t\tdefer q.Stop()\n\t\ta := api.New(&api.Config{\n\t\t\tAddr: bind,\n\t\t\tTLSCert: tlsCert,\n\t\t\tTLSKey: tlsKey,\n\t\t\tUsername: username,\n\t\t\tPassword: password,\n\t\t}, q, s)\n\t\tif err := a.Listen(); err != nil {\n\t\t\tlog.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t} else {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"fmt\"\n \"strconv\"\n \"strings\"\n \"net\"\n \"os\"\n \"os\/exec\"\n \"bufio\"\n \"log\"\n)\n\nfunc main() {\n CfgParams, err := loadConfig()\n if err != nil {\n fmt.Println(err.Error())\n os.Exit(1)\n }\n\n msgbus := make(chan string)\n var readings = make([]string, len(CfgParams.Plugins))\n for i := range readings {\n readings[i] = \"{ \\\"full_text\\\": \\\"\" + CfgParams.Separator + \"\\\", \\\"color\\\": \\\"\" + CfgParams.SeparatorColor + \"\\\", \\\"separator\\\": false },{ \\\"name\\\": \\\"null\\\", \\\"full_text\\\": \\\"null\\\", \\\"color\\\": \\\"#101010\\\", \\\"separator\\\": false }\"\n }\n\n fmt.Printf(\"{ \\\"version\\\": 1 }\\n[\\n[]\\n\")\n\n go func() {\n l, err := net.ListenUnix(\"unix\", &net.UnixAddr{CfgParams.Socket, \"unix\"})\n if err != nil {\n panic(err)\n }\n defer os.Remove(CfgParams.Socket)\n for {\n conn, err := l.AcceptUnix()\n if err != nil {\n panic(err)\n }\n var buf [1024]byte\n n, err := conn.Read(buf[:])\n if err != nil {\n panic(err)\n }\n fmt.Printf(\"%s\\n\", string(buf[:n]))\n msgbus <- string(buf[:n])\n conn.Close()\n }\n }()\n\n for i := range CfgParams.Plugins {\n go func(command string, id int) {\n cmd := exec.Command(CfgParams.PluginsPath + command)\n stdout, err := cmd.StdoutPipe()\n if err != nil {\n log.Fatal(err)\n }\n cmd.Start()\n scanner := bufio.NewScanner(stdout)\n for scanner.Scan() {\n msgbus <- \"plugin;\" + strconv.Itoa(id) + \";\" + scanner.Text()\n }\n }(CfgParams.Plugins[i].Command, i)\n }\n for {\n msg := <-msgbus\n action := strings.Split(msg, \";\")\n current, _ := strconv.Atoi(action[1])\n if action[0] == \"plugin\" {\n readings[current] = \"{ \\\"full_text\\\": \\\"\" + CfgParams.Separator + \"\\\", \\\"color\\\": \\\"\" + CfgParams.SeparatorColor + \"\\\", \\\"separator\\\": false },{ \\\"name\\\": \\\"\" + CfgParams.Plugins[current].Name + \"\\\", \\\"full_text\\\": \\\"\" + action[3] + \"\\\", \\\"color\\\": \\\"\" + action[2] + \"\\\", \\\"separator\\\": false }\"\n }\n fmt.Printf(\",[\")\n for i := range readings {\n if i != len(readings) - 1 {\n fmt.Printf(\"%s,\", readings[i])\n } else {\n fmt.Printf(\"%s\", readings[i])\n }\n }\n fmt.Printf(\"]\\n\")\n }\n}\n<commit_msg>proper signal handling<commit_after>package main\n\nimport (\n \"fmt\"\n \"strconv\"\n \"strings\"\n \"net\"\n \"os\"\n \"syscall\"\n \"os\/exec\"\n \"os\/signal\"\n \"bufio\"\n \"log\"\n)\n\nfunc main() {\n CfgParams, err := loadConfig()\n if err != nil {\n fmt.Println(err.Error())\n os.Exit(1)\n }\n\n msgbus := make(chan string)\n var readings = make([]string, len(CfgParams.Plugins))\n for i := range readings {\n readings[i] = \"{ \\\"full_text\\\": \\\"\" + CfgParams.Separator + \"\\\", \\\"color\\\": \\\"\" + CfgParams.SeparatorColor + \"\\\", \\\"separator\\\": false },{ \\\"name\\\": \\\"null\\\", \\\"full_text\\\": \\\"null\\\", \\\"color\\\": \\\"#101010\\\", \\\"separator\\\": false }\"\n }\n\n fmt.Printf(\"{ \\\"version\\\": 1 }\\n[\\n[]\\n\")\n\n go func() {\n l, err := net.ListenUnix(\"unix\", &net.UnixAddr{CfgParams.Socket, \"unix\"})\n if err != nil {\n panic(err)\n }\n defer os.Remove(CfgParams.Socket)\n for {\n conn, err := l.AcceptUnix()\n if err != nil {\n panic(err)\n }\n var buf [1024]byte\n n, err := conn.Read(buf[:])\n if err != nil {\n panic(err)\n }\n fmt.Printf(\"%s\\n\", string(buf[:n]))\n msgbus <- string(buf[:n])\n conn.Close()\n }\n }()\n\n c := make(chan os.Signal, 1)\n signal.Notify(c, syscall.SIGHUP,\n syscall.SIGINT,\n syscall.SIGTERM,\n syscall.SIGQUIT)\n go func(){\n for sig := range c {\n os.Remove(CfgParams.Socket)\n fmt.Printf(\"Captured %v, Exiting\\n\", sig)\n os.Exit(0)\n }\n }()\n\n for i := range CfgParams.Plugins {\n go func(command string, id int) {\n cmd := exec.Command(CfgParams.PluginsPath + command)\n stdout, err := cmd.StdoutPipe()\n if err != nil {\n log.Fatal(err)\n }\n cmd.Start()\n scanner := bufio.NewScanner(stdout)\n for scanner.Scan() {\n msgbus <- \"plugin;\" + strconv.Itoa(id) + \";\" + scanner.Text()\n }\n }(CfgParams.Plugins[i].Command, i)\n }\n for {\n msg := <-msgbus\n action := strings.Split(msg, \";\")\n current, _ := strconv.Atoi(action[1])\n if action[0] == \"plugin\" {\n readings[current] = \"{ \\\"full_text\\\": \\\"\" + CfgParams.Separator + \"\\\", \\\"color\\\": \\\"\" + CfgParams.SeparatorColor + \"\\\", \\\"separator\\\": false },{ \\\"name\\\": \\\"\" + CfgParams.Plugins[current].Name + \"\\\", \\\"full_text\\\": \\\"\" + action[3] + \"\\\", \\\"color\\\": \\\"\" + action[2] + \"\\\", \\\"separator\\\": false }\"\n }\n fmt.Printf(\",[\")\n for i := range readings {\n if i != len(readings) - 1 {\n fmt.Printf(\"%s,\", readings[i])\n } else {\n fmt.Printf(\"%s\", readings[i])\n }\n }\n fmt.Printf(\"]\\n\")\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/codegangsta\/cli\"\n\tdupes \"github.com\/danmarg\/undupes\/libdupes\"\n\t\"github.com\/dustin\/go-humanize\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"undupes\"\n\tapp.Usage = \"manage duplicate files\"\n\tapp.Author = \"Daniel Margolis\"\n\tapp.Email = \"dan@af0.net\"\n\tapp.Version = \"0.1\"\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"interactive\",\n\t\t\tShortName: \"i\",\n\t\t\tUsage: \"interactive mode\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\trunInteractively()\n\t\t\t},\n\t\t},\n\t}\n\tapp.Run(os.Args)\n}\n\nfunc getInput(prompt string, validator func(string) bool) (string, error) {\n\t\/\/ Reader to read from user input.\n\treader := bufio.NewReader(os.Stdin)\n\tvar (\n\t\tval string\n\t\terr error\n\t)\n\tfor {\n\t\tfmt.Printf(prompt)\n\t\tval, err = reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tval = strings.TrimSpace(val)\n\t\tif validator(val) {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn val, nil\n}\nfunc runInteractively() {\n\t\/\/ Get parent dir.\n\troot, err := getInput(\"Enter parent directory to scan for duplicates in: \", func(f string) bool {\n\t\ti, err := os.Stat(f)\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\treturn false\n\t\t}\n\t\treturn i.IsDir()\n\t})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif !os.IsPathSeparator(root[len(root)-1]) {\n\t\troot = fmt.Sprintf(\"%s%c\", root, os.PathSeparator)\n\t}\n\tfmt.Printf(\"Indexing...\")\n\tdupes, err := dupes.Dupes(root)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfcount := 0\n\ttsize := uint64(0)\n\tfor _, i := range dupes {\n\t\tfcount += len(i.Names)\n\t\ttsize += i.Size * uint64(len(i.Names))\n\t}\n\n\tfmt.Printf(\"Found %d sets of duplicate files\\n\", len(dupes))\n\tfmt.Printf(\"Total file count: %d\\n\", fcount)\n\tfmt.Printf(\"Total size used: %s\\n\", humanize.Bytes(tsize))\n\n\tfmt.Printf(\"\\nReviewing results:\\nFor each duplicate fileset, select 'f' to delete all but the first file, 'a' to keep all files, or 'n' (e.g. 2) to delete all except the second file.\\n\")\n\n\tfor i, dupe := range dupes {\n\t\tkeep := -1\n\t\tnames := \"\"\n\t\tfor j, n := range dupe.Names {\n\t\t\tnames += fmt.Sprintf(\"%d: %s\\n\", j+1, n)\n\t\t}\n\t\t_, err := getInput(fmt.Sprintf(\"\\n%d of %d %s:\\n%s\\nKeep [F]irst\/[a]ll\/[n]th? \", i+1, len(dupes), humanize.Bytes(dupe.Size*uint64(len(dupe.Names)-1)), names), func(v string) bool {\n\t\t\tswitch strings.ToLower(v) {\n\t\t\tcase \"f\":\n\t\t\t\tkeep = 0\n\t\t\t\treturn true\n\t\t\tcase \"a\":\n\t\t\t\treturn true\n\t\t\tdefault:\n\t\t\t\tk, err := strconv.ParseInt(v, 10, 32)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tif k < 1 || int(k) > len(dupe.Names) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tkeep = int(k) - 1\n\t\t\t\treturn true\n\t\t\t}\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif keep >= 0 {\n\t\t\tfor i, n := range dupe.Names {\n\t\t\t\tif i != keep {\n\t\t\t\t\tif err := os.Remove(n); err != nil {\n\t\t\t\t\t\tlog.Printf(\"Error deleting %n: %v\", n, err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>Automatic mode. Big refactor.<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/codegangsta\/cli\"\n\tdupes \"github.com\/danmarg\/undupes\/libdupes\"\n\t\"github.com\/dustin\/go-humanize\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"undupes\"\n\tapp.Usage = \"manage duplicate files\"\n\tapp.Author = \"Daniel Margolis\"\n\tapp.Email = \"dan@af0.net\"\n\tapp.Version = \"0.1\"\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"interactive\",\n\t\t\tShortName: \"i\",\n\t\t\tUsage: \"interactive mode\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tif err := runInteractive(c.Bool(\"dry_run\")); err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"auto\",\n\t\t\tShortName: \"a\",\n\t\t\tUsage: \"automatically resolve duplicates\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"directory, d\",\n\t\t\t\t\tUsage: \"directory in which to find duplicates (required)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"prefer, p\",\n\t\t\t\t\tUsage: \"prefer to keep files matching this pattern\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"over, o\",\n\t\t\t\t\tUsage: \"used with --prefer; will restrict preferred files to those where the duplicate matches --over\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"invert, i\",\n\t\t\t\t\tUsage: \"invert matching logic; preferred files with be prioritized for deletion rather than retention\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"dry_run\",\n\t\t\t\t\tUsage: \"simulate (log but don't delete files)\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\t\/\/ Check for required arguments.\n\t\t\t\tif c.String(\"directory\") == \"\" {\n\t\t\t\t\tfmt.Println(\"--directory is required\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif c.String(\"prefer\") == \"\" {\n\t\t\t\t\tfmt.Println(\"--prefer is required\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t\/\/ Compile regexps.\n\t\t\t\tvar prefer, over *regexp.Regexp\n\t\t\t\tvar err error\n\t\t\t\tif prefer, err = regexp.Compile(c.String(\"prefer\")); err != nil {\n\t\t\t\t\tfmt.Println(\"invalid regexp specified for --prefer\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif c.String(\"over\") != \"\" {\n\t\t\t\t\tif over, err = regexp.Compile(c.String(\"over\")); err != nil {\n\t\t\t\t\t\tfmt.Println(\"invalid regexp specified for --over\")\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\/\/ Do deduplication.\n\t\t\t\tif err = runAutomatic(c.Bool(\"dry_run\"), c.String(\"directory\"), prefer, over, c.Bool(\"invert\")); err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\tapp.Run(os.Args)\n}\n\nfunc remove(dryRun bool, file string) {\n\tif dryRun {\n\t\tlog.Printf(\"DRY RUN: remove %s\", file)\n\t} else {\n\t\t\/\/\tif err := os.Remove(file); err != nil {\n\t\t\/\/\t\tlog.Printf(\"Error deleting %n: %v\", file, err)\n\t\t\/\/\t}\n\t}\n\n}\n\nfunc getInput(prompt string, validator func(string) bool) (string, error) {\n\t\/\/ Reader to read from user input.\n\treader := bufio.NewReader(os.Stdin)\n\tvar (\n\t\tval string\n\t\terr error\n\t)\n\tfor {\n\t\tfmt.Printf(prompt)\n\t\tval, err = reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tval = strings.TrimSpace(val)\n\t\tif validator(val) {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn val, nil\n}\n\nfunc getDupesAndPrintSummary(root string) ([]dupes.Info, error) {\n\tif !os.IsPathSeparator(root[len(root)-1]) {\n\t\troot = fmt.Sprintf(\"%s%c\", root, os.PathSeparator)\n\t}\n\tfmt.Printf(\"Indexing...\")\n\tdupes, err := dupes.Dupes(root)\n\tif err != nil {\n\t\treturn dupes, err\n\t}\n\tfcount := 0\n\ttsize := uint64(0)\n\tfor _, i := range dupes {\n\t\tfcount += len(i.Names) - 1\n\t\ttsize += i.Size * uint64(len(i.Names)-1)\n\t}\n\n\tfmt.Printf(\"\\rFound %d sets of duplicate files\\n\", len(dupes))\n\tfmt.Printf(\"Total file count: %d\\n\", fcount)\n\tfmt.Printf(\"Total size used: %s\\n\", humanize.Bytes(tsize))\n\treturn dupes, err\n}\n\nfunc runInteractive(dryRun bool) error {\n\t\/\/ Get parent dir.\n\troot, err := getInput(\"Enter parent directory to scan for duplicates in: \", func(f string) bool {\n\t\ti, err := os.Stat(f)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\treturn i.IsDir()\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tdupes, err := getDupesAndPrintSummary(root)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"\\nReviewing results:\\nFor each duplicate fileset, select 'f' to delete all but the first file, 'a' to keep all files, or 'n' (e.g. 2) to delete all except the second file.\\n\")\n\n\tfor i, dupe := range dupes {\n\t\tkeep := -1\n\t\tnames := \"\"\n\t\tfor j, n := range dupe.Names {\n\t\t\tnames += fmt.Sprintf(\"%d: %s\\n\", j+1, n)\n\t\t}\n\t\t_, err := getInput(fmt.Sprintf(\"\\n%d of %d %s:\\n%s\\nKeep [F]irst\/[a]ll\/[n]th? \", i+1, len(dupes), humanize.Bytes(dupe.Size*uint64(len(dupe.Names)-1)), names), func(v string) bool {\n\t\t\tswitch strings.ToLower(v) {\n\t\t\tcase \"f\":\n\t\t\t\tkeep = 0\n\t\t\t\treturn true\n\t\t\tcase \"a\":\n\t\t\t\treturn true\n\t\t\tdefault:\n\t\t\t\tk, err := strconv.ParseInt(v, 10, 32)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tif k < 1 || int(k) > len(dupe.Names) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tkeep = int(k) - 1\n\t\t\t\treturn true\n\t\t\t}\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif keep >= 0 {\n\t\t\tfor i, n := range dupe.Names {\n\t\t\t\tif i != keep {\n\t\t\t\t\tremove(dryRun, n)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc runAutomatic(dryRun bool, root string, prefer *regexp.Regexp, over *regexp.Regexp, invert bool) error {\n\tdupes, err := getDupesAndPrintSummary(root)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, dupe := range dupes {\n\t\tp := make(map[string]struct{})\n\t\to := make(map[string]struct{})\n\t\tfor _, n := range dupe.Names {\n\t\t\tnb := []byte(n)\n\t\t\tpm := prefer.Match(nb)\n\t\t\tom := false\n\t\t\tif over != nil {\n\t\t\t\tom = over.Match(nb)\n\t\t\t}\n\t\t\tif pm && om {\n\t\t\t\tlog.Printf(\"both --prefer and --over matched %s\", n)\n\t\t\t}\n\t\t\tif pm {\n\t\t\t\tp[n] = struct{}{}\n\t\t\t} else if om {\n\t\t\t\to[n] = struct{}{}\n\t\t\t}\n\t\t}\n\t\tif len(p) > 0 { \/\/ If we found a preferred match.\n\t\t\tif over != nil {\n\t\t\t\tif len(o) > 0 {\n\t\t\t\t\t\/\/ If prefer and over are both specified, and both match, remove the non-preferred matches.\n\t\t\t\t\tif invert {\n\t\t\t\t\t\tfor n := range p {\n\t\t\t\t\t\t\tremove(dryRun, n)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfor n := range o {\n\t\t\t\t\t\t\tremove(dryRun, n)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t\/\/ If over is not specified, keep only the preferred, but (for the case of --invert) only when preferred is not everything.\n\t\t\t\tif len(p) < len(dupe.Names) {\n\t\t\t\t\tfor _, n := range dupe.Names {\n\t\t\t\t\t\tif _, ok := p[n]; ok && invert || !ok && !invert {\n\t\t\t\t\t\t\tremove(dryRun, n)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/csv\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n)\n\ntype record struct {\n\tCity string\n\tState string\n\tZip string\n\tCrrt string\n\tRouteCount int\n\tBundleCount int\n\tBundlePerRoute int\n\tLastBundle int\n\tSeqNum int\n\tFinalBundles int\n\tCount string\n}\n\nfunc newRecord(r []string, mod int) (*record, error) {\n\tRouteCount, err := strconv.Atoi(r[4])\n\tBundleCount, err := strconv.Atoi(r[5])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tRouteCount = RouteCount + mod\n\n\tvar BundlePerRoute int\n\tvar LastBundle int\n\n\tif RouteCount%BundleCount == 0 {\n\t\tBundlePerRoute = RouteCount \/ BundleCount\n\t\tLastBundle = BundleCount\n\t} else {\n\t\tBundlePerRoute = (RouteCount \/ BundleCount) + 1\n\t\tLastBundle = RouteCount - (BundleCount * (BundlePerRoute - 1))\n\t}\n\n\treturn &record{\n\t\tCity: r[0],\n\t\tState: r[1],\n\t\tZip: r[2],\n\t\tCrrt: r[3],\n\t\tRouteCount: RouteCount,\n\t\tBundleCount: BundleCount,\n\t\tBundlePerRoute: BundlePerRoute,\n\t\tLastBundle: LastBundle,\n\t}, nil\n}\n\nfunc main() {\n\tvar (\n\t\tmrc = flag.Int(\"mrc\", 0, \"Modify RouteCount number (defaults to 0)\")\n\t\toutfile = flag.String(\"output\", \"output.csv\", \"Filename to export the CSV results\")\n\t)\n\tflag.Parse()\n\n\theader := []string{\"City\",\n\t\t\"State\",\n\t\t\"Zip\",\n\t\t\"Crrt\",\n\t\t\"RouteCount\",\n\t\t\"BundleCount\",\n\t\t\"BundlePerRoute\",\n\t\t\"LastBundle\",\n\t\t\"SeqNum\",\n\t\t\"FinalBundles\",\n\t\t\"Count\",\n\t}\n\n\tr := csv.NewReader(os.Stdin)\n\n\tof, err := os.Create(*outfile)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tdefer of.Close()\n\tw := csv.NewWriter(of)\n\n\tfor counter := 0; ; counter++ {\n\t\trec, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\tif counter == 0 {\n\t\t\tw.Write(header)\n\t\t\tw.Flush()\n\t\t} else {\n\t\t\tr, err := newRecord(rec, *mrc)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(err)\n\t\t\t}\n\n\t\t\tfor bndl := 1; bndl < r.BundlePerRoute; bndl++ {\n\t\t\t\tr.SeqNum = bndl\n\t\t\t\tw.Write([]string{\n\t\t\t\t\tr.City,\n\t\t\t\t\tr.State,\n\t\t\t\t\tr.Zip,\n\t\t\t\t\tr.Crrt,\n\t\t\t\t\tstrconv.Itoa(r.RouteCount),\n\t\t\t\t\tstrconv.Itoa(r.BundleCount),\n\t\t\t\t\tstrconv.Itoa(r.BundlePerRoute),\n\t\t\t\t\tstrconv.Itoa(r.LastBundle),\n\t\t\t\t\tstrconv.Itoa(r.SeqNum),\n\t\t\t\t\tstrconv.Itoa(r.BundleCount),\n\t\t\t\t\tstrconv.Itoa(r.SeqNum) + \"_of_\" + strconv.Itoa(r.BundlePerRoute),\n\t\t\t\t})\n\t\t\t\tw.Flush()\n\t\t\t\tr.SeqNum++\n\t\t\t}\n\t\t\tw.Write([]string{\n\t\t\t\tr.City,\n\t\t\t\tr.State,\n\t\t\t\tr.Zip,\n\t\t\t\tr.Crrt,\n\t\t\t\tstrconv.Itoa(r.RouteCount),\n\t\t\t\tstrconv.Itoa(r.BundleCount),\n\t\t\t\tstrconv.Itoa(r.BundlePerRoute),\n\t\t\t\tstrconv.Itoa(r.LastBundle),\n\t\t\t\tstrconv.Itoa(r.SeqNum),\n\t\t\t\tstrconv.Itoa(r.LastBundle),\n\t\t\t\tstrconv.Itoa(r.SeqNum) + \"_of_\" + strconv.Itoa(r.BundlePerRoute),\n\t\t\t})\n\t\t\tw.Flush()\n\t\t}\n\t}\n\tfmt.Println(\"Job Completed!\")\n}\n<commit_msg>updates<commit_after>package main\n\nimport (\n\t\"encoding\/csv\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype record struct {\n\tCity string\n\tState string\n\tZip string\n\tCrrt string\n\tRouteCount int\n\tBundleCount int\n\tBundlePerRoute int\n\tLastBundle int\n\tSeqNum int\n\tFinalBundles int\n\tCount string\n}\n\nfunc remSep(p string) string {\n\tsep := []string{\"-\", \".\", \"*\", \"(\", \")\", \",\", \"\\\"\", \" \"}\n\tfor _, v := range sep {\n\t\tp = strings.Replace(p, v, \"\", -1)\n\t}\n\treturn p\n}\n\nfunc newRecord(r []string, mrc int) (*record, error) {\n\tRouteCount, err := strconv.Atoi(remSep(r[4]))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error parsing RouteCount :: %s\", err)\n\t}\n\n\tBundleCount, err := strconv.Atoi(remSep(r[5]))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error parsing BundleCount :: %s\", err)\n\t}\n\n\tRouteCount = RouteCount + mrc\n\tif RouteCount <= 0 {\n\t\treturn nil, fmt.Errorf(\"RouteCount Cannot be <= 0\")\n\t}\n\n\tvar BundlePerRoute int\n\tvar LastBundle int\n\n\tswitch {\n\tcase RouteCount%BundleCount == 0:\n\t\tBundlePerRoute = RouteCount \/ BundleCount\n\t\tLastBundle = BundleCount\n\tdefault:\n\t\tBundlePerRoute = (RouteCount \/ BundleCount) + 1\n\t\tLastBundle = RouteCount - (BundleCount * (BundlePerRoute - 1))\n\t}\n\n\treturn &record{\n\t\tCity: r[0],\n\t\tState: r[1],\n\t\tZip: r[2],\n\t\tCrrt: r[3],\n\t\tRouteCount: RouteCount,\n\t\tBundleCount: BundleCount,\n\t\tBundlePerRoute: BundlePerRoute,\n\t\tLastBundle: LastBundle,\n\t}, nil\n}\n\nfunc main() {\n\tvar (\n\t\tmrc = flag.Int(\"mrc\", 0, \"Modify RouteCount number (defaults to 0)\")\n\t\toutfile = flag.String(\"output\", \"output.csv\", \"Filename to export the CSV results\")\n\t)\n\tflag.Parse()\n\n\theader := []string{\"City\",\n\t\t\"State\",\n\t\t\"Zip\",\n\t\t\"Crrt\",\n\t\t\"RouteCount\",\n\t\t\"BundleCount\",\n\t\t\"BundlePerRoute\",\n\t\t\"LastBundle\",\n\t\t\"SeqNum\",\n\t\t\"FinalBundles\",\n\t\t\"Count\",\n\t}\n\n\tr := csv.NewReader(os.Stdin)\n\n\tof, err := os.Create(*outfile)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tdefer of.Close()\n\tw := csv.NewWriter(of)\n\n\tfor counter := 0; ; counter++ {\n\t\trec, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\tif counter == 0 {\n\t\t\tw.Write(header)\n\t\t\tw.Flush()\n\t\t} else {\n\t\t\tr, err := newRecord(rec, *mrc)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"%v on row [%v]\", err, counter)\n\t\t\t}\n\n\t\t\tfor bndl := 1; bndl < r.BundlePerRoute; bndl++ {\n\t\t\t\tr.SeqNum = bndl\n\t\t\t\tw.Write([]string{\n\t\t\t\t\tr.City,\n\t\t\t\t\tr.State,\n\t\t\t\t\tr.Zip,\n\t\t\t\t\tr.Crrt,\n\t\t\t\t\tfmt.Sprint(r.RouteCount),\n\t\t\t\t\tfmt.Sprint(r.BundleCount),\n\t\t\t\t\tfmt.Sprint(r.BundlePerRoute),\n\t\t\t\t\tfmt.Sprint(r.LastBundle),\n\t\t\t\t\tfmt.Sprint(r.SeqNum),\n\t\t\t\t\tfmt.Sprint(r.BundleCount),\n\t\t\t\t\tfmt.Sprintf(\"%v_of_%v\", r.SeqNum, r.BundlePerRoute),\n\t\t\t\t})\n\t\t\t\tw.Flush()\n\t\t\t\tr.SeqNum++\n\t\t\t}\n\t\t\tw.Write([]string{\n\t\t\t\tr.City,\n\t\t\t\tr.State,\n\t\t\t\tr.Zip,\n\t\t\t\tr.Crrt,\n\t\t\t\tfmt.Sprint(r.RouteCount),\n\t\t\t\tfmt.Sprint(r.BundleCount),\n\t\t\t\tfmt.Sprint(r.BundlePerRoute),\n\t\t\t\tfmt.Sprint(r.LastBundle),\n\t\t\t\tfmt.Sprint(r.SeqNum),\n\t\t\t\tfmt.Sprint(r.LastBundle),\n\t\t\t\tfmt.Sprintf(\"%v_of_%v\", r.SeqNum, r.BundlePerRoute),\n\t\t\t})\n\t\t\tw.Flush()\n\t\t}\n\t}\n\tfmt.Println(\"Job Completed!\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Bloomsky application to export Data bloomsky to console or to influxdb.\npackage main\n\n\/\/go:generate echo Go Generate!\n\/\/go:generate .\/command\/bindata.sh\n\/\/go:generate .\/command\/bindata-assetfs.sh\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"time\"\n\n\t_ \"net\/http\/pprof\"\n\n\t\"github.com\/nicksnyder\/go-i18n\/i18n\"\n\tbloomsky \"github.com\/patrickalin\/bloomsky-api-go\"\n\t\"github.com\/patrickalin\/bloomsky-client-go\/assembly\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/configName name of the config file\nconst configNameFile = \"config\"\nconst logFile = \"bloomsky.log\"\n\n\/\/ Configuration is the structure of the config YAML file\n\/\/use http:\/\/mervine.net\/json2struct\ntype configuration struct {\n\tconsoleActivated bool\n\thTTPActivated bool\n\thistoryActivated bool\n\thTTPPort string\n\thTTPSPort string\n\tinfluxDBActivated bool\n\tinfluxDBDatabase string\n\tinfluxDBPassword string\n\tinfluxDBServer string\n\tinfluxDBServerPort string\n\tinfluxDBUsername string\n\tlogLevel string\n\tbloomskyAccessToken string\n\tbloomskyURL string\n\trefreshTimer time.Duration\n\tmock bool\n\tlanguage string\n\ttranslateFunc i18n.TranslateFunc\n\tdev bool\n}\n\nvar (\n\t\/\/Version of the code, fill in in compile.sh -ldflags \"-X main.Version=`cat VERSION`\"\n\tVersion = \"No Version Provided\"\n\t\/\/logger\n\tlog = logrus.New()\n)\n\nfunc init() {\n\tlog.Formatter = new(logrus.JSONFormatter)\n\n\terr := os.Remove(logFile)\n\n\tfile, err := os.OpenFile(logFile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\tlog.Error(\"Failed to log to file, using default stderr\")\n\t\treturn\n\t}\n\tlog.Out = file\n}\n\nfunc main() {\n\n\t\/\/Create context\n\tlogDebug(funcName(), \"Create context\", \"\")\n\tmyContext, cancel := context.WithCancel(context.Background())\n\n\tsignalCh := make(chan os.Signal, 1)\n\tsignal.Notify(signalCh)\n\tgo func() {\n\t\tselect {\n\t\tcase i := <-signalCh:\n\t\t\tlogDebug(funcName(), \"Receive interrupt\", i.String())\n\t\t\tcancel()\n\t\t\treturn\n\t\t}\n\t}()\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"time\": time.Now().Format(time.RFC850),\n\t\t\"version\": Version,\n\t\t\"config\": configNameFile,\n\t\t\"fct\": funcName(),\n\t}).Info(\"Bloomsky API\")\n\n\t\/\/Read configuration from config file\n\tconfig, err := readConfig(configNameFile)\n\tcheckErr(err, funcName(), \"Problem reading config file\", \"\")\n\n\t\/\/Read flags\n\tlogDebug(funcName(), \"Get flag from command line\", \"\")\n\tdebug := flag.String(\"debug\", \"\", \"Error=1, Warning=2, Info=3, Trace=4\")\n\tflag.Parse()\n\tif *debug != \"\" {\n\t\tconfig.logLevel = *debug\n\t}\n\n\t\/\/ Set Level log\n\tlevel, err := logrus.ParseLevel(config.logLevel)\n\tcheckErr(err, funcName(), \"Error parse level\", \"\")\n\tlog.Level = level\n\tlogInfo(funcName(), \"Level log\", config.logLevel)\n\n\t\/\/ Context\n\tctxsch := context.Context(myContext)\n\n\tchannels := make(map[string]chan bloomsky.Bloomsky)\n\n\tif err := i18n.ParseTranslationFileBytes(\"lang\/en-us.all.json\", readFile(\"lang\/en-us.all.json\", config.dev)); err != nil {\n\t\tlogFatal(err, funcName(), \"Error read language file check in config.yaml if dev=false\", \"\")\n\t}\n\tif err := i18n.ParseTranslationFileBytes(\"lang\/fr.all.json\", readFile(\"lang\/fr.all.json\", config.dev)); err != nil {\n\t\tlogFatal(err, funcName(), \"Error read language file check in config.yaml if dev=false\", \"\")\n\t}\n\n\ttranslateFunc, err := i18n.Tfunc(config.language)\n\tcheckErr(err, funcName(), \"Problem with loading translate file\", \"\")\n\n\tif config.historyActivated {\n\t\tchannels[\"store\"] = make(chan bloomsky.Bloomsky)\n\t\ts, err := createStore(channels[\"store\"])\n\t\tcheckErr(err, funcName(), \"Error with history create store\", \"\")\n\t\ts.listen(context.Background())\n\t}\n\n\t\/\/ Console initialisation\n\tif config.consoleActivated {\n\t\tchannels[\"console\"] = make(chan bloomsky.Bloomsky)\n\t\tc, err := createConsole(channels[\"console\"], translateFunc, config.dev)\n\t\tcheckErr(err, funcName(), \"Error with initConsol\", \"\")\n\t\tc.listen(context.Background())\n\t}\n\n\t\/\/ InfluxDB initialisation\n\tif config.influxDBActivated {\n\t\tchannels[\"influxdb\"] = make(chan bloomsky.Bloomsky)\n\t\tc, err := initClient(channels[\"influxdb\"], config.influxDBServer, config.influxDBServerPort, config.influxDBUsername, config.influxDBPassword, config.influxDBDatabase)\n\t\tcheckErr(err, funcName(), \"Error with initClientInfluxDB\", \"\")\n\t\tc.listen(context.Background())\n\t}\n\n\t\/\/ WebServer initialisation\n\tvar httpServ *httpServer\n\tif config.hTTPActivated {\n\t\tvar err error\n\t\tchannels[\"web\"] = make(chan bloomsky.Bloomsky)\n\t\thttpServ, err = createWebServer(channels[\"web\"], config.hTTPPort, config.hTTPSPort, translateFunc, config.dev)\n\t\tcheckErr(err, funcName(), \"Error with initWebServer\", \"\")\n\t\thttpServ.listen(context.Background())\n\n\t}\n\n\t\/\/ get bloomsky JSON and parse information in bloomsky Go Structure\n\tmybloomsky := bloomsky.New(config.bloomskyURL, config.bloomskyAccessToken, config.mock, log)\n\t\/\/Call scheduler\n\tschedule(ctxsch, mybloomsky, channels, config.refreshTimer)\n\n\t\/\/If signal to close the program\n\t<-myContext.Done()\n\tif httpServ.httpServ != nil {\n\t\tlogDebug(funcName(), \"Shutting down webserver\", \"\")\n\t\terr := httpServ.httpServ.Shutdown(myContext)\n\t\tcheckErr(err, funcName(), \"Impossible to shutdown context\", \"\")\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"fct\": \"main.main\",\n\t}).Debug(\"Terminated see bloomsky.log\")\n}\n\n\/\/ The scheduler executes each time \"collect\"\nfunc schedule(myContext context.Context, mybloomsky bloomsky.Bloomsky, channels map[string]chan bloomsky.Bloomsky, refreshTime time.Duration) {\n\tticker := time.NewTicker(refreshTime)\n\tlogDebug(funcName(), \"Create scheduler\", refreshTime.String())\n\n\tcollect(mybloomsky, channels)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tcollect(mybloomsky, channels)\n\t\tcase <-myContext.Done():\n\t\t\tlogDebug(funcName(), \"Stoping ticker\", \"\")\n\t\t\tticker.Stop()\n\t\t\tfor _, v := range channels {\n\t\t\t\tclose(v)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/Principal function which one loops each Time Variable\nfunc collect(mybloomsky bloomsky.Bloomsky, channels map[string]chan bloomsky.Bloomsky) {\n\tlogDebug(funcName(), \"Parse informations from API bloomsky\", \"\")\n\n\tmybloomsky.Refresh()\n\n\t\/\/send message on each channels\n\tfor _, v := range channels {\n\t\tv <- mybloomsky\n\t}\n}\n\n\/\/ ReadConfig read config from config.json with the package viper\nfunc readConfig(configName string) (configuration, error) {\n\n\tvar conf configuration\n\tviper.SetConfigName(configName)\n\tviper.AddConfigPath(\".\")\n\n\tdir, err := filepath.Abs(filepath.Dir(os.Args[0]))\n\tcheckErr(err, funcName(), \"Fielpaths\", \"\")\n\tdir = dir + \"\/\" + configName\n\n\tif err := viper.ReadInConfig(); err != nil {\n\t\tlogFatal(err, funcName(), \"The config file loaded\", dir)\n\t\treturn conf, err\n\t}\n\tlogInfo(funcName(), \"The config file loaded\", dir)\n\n\t\/\/TODO#16 find to simplify this section\n\tconf.bloomskyURL = viper.GetString(\"BloomskyURL\")\n\tconf.bloomskyAccessToken = viper.GetString(\"BloomskyAccessToken\")\n\tconf.influxDBDatabase = viper.GetString(\"InfluxDBDatabase\")\n\tconf.influxDBPassword = viper.GetString(\"InfluxDBPassword\")\n\tconf.influxDBServer = viper.GetString(\"InfluxDBServer\")\n\tconf.influxDBServerPort = viper.GetString(\"InfluxDBServerPort\")\n\tconf.influxDBUsername = viper.GetString(\"InfluxDBUsername\")\n\tconf.consoleActivated = viper.GetBool(\"ConsoleActivated\")\n\tconf.influxDBActivated = viper.GetBool(\"InfluxDBActivated\")\n\tconf.historyActivated = viper.GetBool(\"historyActivated\")\n\tconf.refreshTimer = time.Duration(viper.GetInt(\"RefreshTimer\")) * time.Second\n\tconf.hTTPActivated = viper.GetBool(\"HTTPActivated\")\n\tconf.hTTPPort = viper.GetString(\"HTTPPort\")\n\tconf.hTTPSPort = viper.GetString(\"hTTPSPort\")\n\tconf.logLevel = viper.GetString(\"LogLevel\")\n\tconf.mock = viper.GetBool(\"mock\")\n\tconf.language = viper.GetString(\"language\")\n\tconf.dev = viper.GetBool(\"dev\")\n\n\t\/\/ Check if one value of the structure is empty\n\tv := reflect.ValueOf(conf)\n\tvalues := make([]interface{}, v.NumField())\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tvalues[i] = v.Field(i)\n\t\t\/\/TODO#16\n\t\t\/\/v.Field(i).SetString(viper.GetString(v.Type().Field(i).Name))\n\t\tif values[i] == \"\" {\n\t\t\treturn conf, fmt.Errorf(\"Check if the key \" + v.Type().Field(i).Name + \" is present in the file \" + dir)\n\t\t}\n\t}\n\tif token := os.Getenv(\"bloomskyAccessToken\"); token != \"\" {\n\t\tconf.bloomskyAccessToken = token\n\t}\n\treturn conf, nil\n}\n\n\/\/Read file and return []byte\nfunc readFile(fileName string, dev bool) []byte {\n\tif dev {\n\t\tfileByte, err := ioutil.ReadFile(fileName)\n\t\tcheckErr(err, funcName(), \"Error reading the file\", fileName)\n\t\treturn fileByte\n\t}\n\n\tfileByte, err := assembly.Asset(fileName)\n\tcheckErr(err, funcName(), \"Error reading the file\", fileName)\n\treturn fileByte\n}\n<commit_msg>Add flag<commit_after>\/\/ Bloomsky application to export Data bloomsky to console or to influxdb.\npackage main\n\n\/\/go:generate echo Go Generate!\n\/\/go:generate .\/command\/bindata.sh\n\/\/go:generate .\/command\/bindata-assetfs.sh\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"time\"\n\n\t_ \"net\/http\/pprof\"\n\n\t\"github.com\/nicksnyder\/go-i18n\/i18n\"\n\tbloomsky \"github.com\/patrickalin\/bloomsky-api-go\"\n\t\"github.com\/patrickalin\/bloomsky-client-go\/assembly\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/configName name of the config file\nconst configNameFile = \"config\"\nconst logFile = \"bloomsky.log\"\n\n\/\/ Configuration is the structure of the config YAML file\n\/\/use http:\/\/mervine.net\/json2struct\ntype configuration struct {\n\tconsoleActivated bool\n\thTTPActivated bool\n\thistoryActivated bool\n\thTTPPort string\n\thTTPSPort string\n\tinfluxDBActivated bool\n\tinfluxDBDatabase string\n\tinfluxDBPassword string\n\tinfluxDBServer string\n\tinfluxDBServerPort string\n\tinfluxDBUsername string\n\tlogLevel string\n\tbloomskyAccessToken string\n\tbloomskyURL string\n\trefreshTimer time.Duration\n\tmock bool\n\tlanguage string\n\ttranslateFunc i18n.TranslateFunc\n\tdev bool\n}\n\nvar (\n\t\/\/Version of the code, fill in in compile.sh -ldflags \"-X main.Version=`cat VERSION`\"\n\tVersion = \"No Version Provided\"\n\t\/\/logger\n\tlog = logrus.New()\n)\n\nfunc init() {\n\tlog.Formatter = new(logrus.JSONFormatter)\n\n\terr := os.Remove(logFile)\n\n\tfile, err := os.OpenFile(logFile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\tlog.Error(\"Failed to log to file, using default stderr\")\n\t\treturn\n\t}\n\tlog.Out = file\n}\n\nfunc main() {\n\n\t\/\/Create context\n\tlogDebug(funcName(), \"Create context\", \"\")\n\tmyContext, cancel := context.WithCancel(context.Background())\n\n\tsignalCh := make(chan os.Signal, 1)\n\tsignal.Notify(signalCh)\n\tgo func() {\n\t\tselect {\n\t\tcase i := <-signalCh:\n\t\t\tlogDebug(funcName(), \"Receive interrupt\", i.String())\n\t\t\tcancel()\n\t\t\treturn\n\t\t}\n\t}()\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"time\": time.Now().Format(time.RFC850),\n\t\t\"version\": Version,\n\t\t\"config\": configNameFile,\n\t\t\"fct\": funcName(),\n\t}).Info(\"Bloomsky API\")\n\n\t\/\/Read configuration from config file\n\tconfig, err := readConfig(configNameFile)\n\tcheckErr(err, funcName(), \"Problem reading config file\", \"\")\n\n\t\/\/Read flags\n\tlogDebug(funcName(), \"Get flag from command line\", \"\")\n\tlevelF := flag.String(\"debug\", \"\", \"panic,fatal,error,warning,info,debug\")\n\ttokenF := flag.String(\"token\", \"\", \"yourtoken\")\n\tdevelF := flag.String(\"devel\", \"\", \"true,false\")\n\tmockF := flag.String(\"mock\", \"\", \"true,false\")\n\tflag.Parse()\n\n\tif *levelF != \"\" {\n\t\tconfig.logLevel = *levelF\n\t}\n\tif *tokenF != \"\" {\n\t\tconfig.bloomskyAccessToken = *tokenF\n\t}\n\tif *develF != \"\" {\n\t\tconfig.dev, err = strconv.ParseBool(*develF)\n\t\tcheckErr(err, funcName(), \"error convert string to bol\", \"\")\n\t}\n\tif *mockF != \"\" {\n\t\tconfig.mock, err = strconv.ParseBool(*mockF)\n\t\tcheckErr(err, funcName(), \"error convert string to bol\", \"\")\n\t}\n\n\tfmt.Println(\"ici:%v\", config.bloomskyAccessToken)\n\n\t\/\/ Set Level log\n\tlevel, err := logrus.ParseLevel(config.logLevel)\n\tcheckErr(err, funcName(), \"Error parse level\", \"\")\n\tlog.Level = level\n\tlogInfo(funcName(), \"Level log\", config.logLevel)\n\n\t\/\/ Context\n\tctxsch := context.Context(myContext)\n\n\tchannels := make(map[string]chan bloomsky.Bloomsky)\n\n\tif err := i18n.ParseTranslationFileBytes(\"lang\/en-us.all.json\", readFile(\"lang\/en-us.all.json\", config.dev)); err != nil {\n\t\tlogFatal(err, funcName(), \"Error read language file check in config.yaml if dev=false\", \"\")\n\t}\n\tif err := i18n.ParseTranslationFileBytes(\"lang\/fr.all.json\", readFile(\"lang\/fr.all.json\", config.dev)); err != nil {\n\t\tlogFatal(err, funcName(), \"Error read language file check in config.yaml if dev=false\", \"\")\n\t}\n\n\ttranslateFunc, err := i18n.Tfunc(config.language)\n\tcheckErr(err, funcName(), \"Problem with loading translate file\", \"\")\n\n\tif config.historyActivated {\n\t\tchannels[\"store\"] = make(chan bloomsky.Bloomsky)\n\t\ts, err := createStore(channels[\"store\"])\n\t\tcheckErr(err, funcName(), \"Error with history create store\", \"\")\n\t\ts.listen(context.Background())\n\t}\n\n\t\/\/ Console initialisation\n\tif config.consoleActivated {\n\t\tchannels[\"console\"] = make(chan bloomsky.Bloomsky)\n\t\tc, err := createConsole(channels[\"console\"], translateFunc, config.dev)\n\t\tcheckErr(err, funcName(), \"Error with initConsol\", \"\")\n\t\tc.listen(context.Background())\n\t}\n\n\t\/\/ InfluxDB initialisation\n\tif config.influxDBActivated {\n\t\tchannels[\"influxdb\"] = make(chan bloomsky.Bloomsky)\n\t\tc, err := initClient(channels[\"influxdb\"], config.influxDBServer, config.influxDBServerPort, config.influxDBUsername, config.influxDBPassword, config.influxDBDatabase)\n\t\tcheckErr(err, funcName(), \"Error with initClientInfluxDB\", \"\")\n\t\tc.listen(context.Background())\n\t}\n\n\t\/\/ WebServer initialisation\n\tvar httpServ *httpServer\n\tif config.hTTPActivated {\n\t\tvar err error\n\t\tchannels[\"web\"] = make(chan bloomsky.Bloomsky)\n\t\thttpServ, err = createWebServer(channels[\"web\"], config.hTTPPort, config.hTTPSPort, translateFunc, config.dev)\n\t\tcheckErr(err, funcName(), \"Error with initWebServer\", \"\")\n\t\thttpServ.listen(context.Background())\n\n\t}\n\n\t\/\/ get bloomsky JSON and parse information in bloomsky Go Structure\n\tmybloomsky := bloomsky.New(config.bloomskyURL, config.bloomskyAccessToken, config.mock, log)\n\t\/\/Call scheduler\n\tschedule(ctxsch, mybloomsky, channels, config.refreshTimer)\n\n\t\/\/If signal to close the program\n\t<-myContext.Done()\n\tif httpServ.httpServ != nil {\n\t\tlogDebug(funcName(), \"Shutting down webserver\", \"\")\n\t\terr := httpServ.httpServ.Shutdown(myContext)\n\t\tcheckErr(err, funcName(), \"Impossible to shutdown context\", \"\")\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"fct\": \"main.main\",\n\t}).Debug(\"Terminated see bloomsky.log\")\n}\n\n\/\/ The scheduler executes each time \"collect\"\nfunc schedule(myContext context.Context, mybloomsky bloomsky.Bloomsky, channels map[string]chan bloomsky.Bloomsky, refreshTime time.Duration) {\n\tticker := time.NewTicker(refreshTime)\n\tlogDebug(funcName(), \"Create scheduler\", refreshTime.String())\n\n\tcollect(mybloomsky, channels)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tcollect(mybloomsky, channels)\n\t\tcase <-myContext.Done():\n\t\t\tlogDebug(funcName(), \"Stoping ticker\", \"\")\n\t\t\tticker.Stop()\n\t\t\tfor _, v := range channels {\n\t\t\t\tclose(v)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/Principal function which one loops each Time Variable\nfunc collect(mybloomsky bloomsky.Bloomsky, channels map[string]chan bloomsky.Bloomsky) {\n\tlogDebug(funcName(), \"Parse informations from API bloomsky\", \"\")\n\n\tmybloomsky.Refresh()\n\n\t\/\/send message on each channels\n\tfor _, v := range channels {\n\t\tv <- mybloomsky\n\t}\n}\n\n\/\/ ReadConfig read config from config.json with the package viper\nfunc readConfig(configName string) (configuration, error) {\n\n\tvar conf configuration\n\tviper.SetConfigName(configName)\n\tviper.AddConfigPath(\".\")\n\n\tdir, err := filepath.Abs(filepath.Dir(os.Args[0]))\n\tcheckErr(err, funcName(), \"Fielpaths\", \"\")\n\tdir = dir + \"\/\" + configName\n\n\tif err := viper.ReadInConfig(); err != nil {\n\t\tlogFatal(err, funcName(), \"The config file loaded\", dir)\n\t\treturn conf, err\n\t}\n\tlogInfo(funcName(), \"The config file loaded\", dir)\n\n\t\/\/TODO#16 find to simplify this section\n\tconf.bloomskyURL = viper.GetString(\"BloomskyURL\")\n\tconf.bloomskyAccessToken = viper.GetString(\"BloomskyAccessToken\")\n\tconf.influxDBDatabase = viper.GetString(\"InfluxDBDatabase\")\n\tconf.influxDBPassword = viper.GetString(\"InfluxDBPassword\")\n\tconf.influxDBServer = viper.GetString(\"InfluxDBServer\")\n\tconf.influxDBServerPort = viper.GetString(\"InfluxDBServerPort\")\n\tconf.influxDBUsername = viper.GetString(\"InfluxDBUsername\")\n\tconf.consoleActivated = viper.GetBool(\"ConsoleActivated\")\n\tconf.influxDBActivated = viper.GetBool(\"InfluxDBActivated\")\n\tconf.historyActivated = viper.GetBool(\"historyActivated\")\n\tconf.refreshTimer = time.Duration(viper.GetInt(\"RefreshTimer\")) * time.Second\n\tconf.hTTPActivated = viper.GetBool(\"HTTPActivated\")\n\tconf.hTTPPort = viper.GetString(\"HTTPPort\")\n\tconf.hTTPSPort = viper.GetString(\"hTTPSPort\")\n\tconf.logLevel = viper.GetString(\"LogLevel\")\n\tconf.mock = viper.GetBool(\"mock\")\n\tconf.language = viper.GetString(\"language\")\n\tconf.dev = viper.GetBool(\"dev\")\n\n\t\/\/ Check if one value of the structure is empty\n\tv := reflect.ValueOf(conf)\n\tvalues := make([]interface{}, v.NumField())\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tvalues[i] = v.Field(i)\n\t\t\/\/TODO#16\n\t\t\/\/v.Field(i).SetString(viper.GetString(v.Type().Field(i).Name))\n\t\tif values[i] == \"\" {\n\t\t\treturn conf, fmt.Errorf(\"Check if the key \" + v.Type().Field(i).Name + \" is present in the file \" + dir)\n\t\t}\n\t}\n\tif token := os.Getenv(\"bloomskyAccessToken\"); token != \"\" {\n\t\tconf.bloomskyAccessToken = token\n\t}\n\treturn conf, nil\n}\n\n\/\/Read file and return []byte\nfunc readFile(fileName string, dev bool) []byte {\n\tif dev {\n\t\tfileByte, err := ioutil.ReadFile(fileName)\n\t\tcheckErr(err, funcName(), \"Error reading the file\", fileName)\n\t\treturn fileByte\n\t}\n\n\tfileByte, err := assembly.Asset(fileName)\n\tcheckErr(err, funcName(), \"Error reading the file\", fileName)\n\treturn fileByte\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/csv\"\n\t\"github.com\/ActiveState\/tail\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\tloadEvents = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"datalogger_rows_loaded\",\n\t\tHelp: \"the number of rows loaded into the database\",\n\t})\n\tbatteryVoltage = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tName: \"datalogger_main_battery_voltage\",\n\t\tHelp: \"The current main battery voltage\",\n\t})\n)\n\nfunc init() {\n\tprometheus.MustRegister(loadEvents)\n\tprometheus.MustRegister(batteryVoltage)\n}\n\ntype stringSlice []string\n\nfunc (slice stringSlice) pos(value string) int {\n\tfor p, v := range slice {\n\t\tif v == value {\n\t\t\treturn p\n\t\t}\n\t}\n\treturn -1\n}\n\nfunc readCSVLine(text string) []string {\n\treader := csv.NewReader(strings.NewReader(text))\n\tfields, err := reader.Read()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn fields\n}\n\nfunc loadData(fileName string) {\n\n\tt, err := tail.TailFile(fileName, tail.Config{\n\t\tFollow: true,\n\t\tReOpen: true,\n\t})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tbattery := 1\n\n\t\/\/ Read title and variables and units\n\tskip := 0\n\tfor line := range t.Lines {\n\t\tif strings.Contains(line.Text, \"TOA5\") {\n\t\t\tskip = 4\n\t\t}\n\t\tif skip == 3 {\n\t\t\t\/\/ decode headers here\n\t\t\tfields := readCSVLine(line.Text)\n\t\t\tvariates := stringSlice(fields)\n\t\t\tbattery = variates.pos(\"GageMinV\")\n\t\t}\n\t\tif skip > 0 {\n\t\t\t\/\/ skip the rest\n\t\t\tskip = skip - 1\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Read data\n\t\tfields := readCSVLine(line.Text)\n\t\tvoltage, err := strconv.ParseFloat(fields[battery], 64)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Println(voltage)\n\t\tbatteryVoltage.Set(voltage)\n\t\tloadEvents.Inc()\n\t}\n}\n\nfunc main() {\n\n\tgo loadData(\"raingauge_Table1.dat\")\n\n\thttp.Handle(\"\/metrics\", prometheus.Handler())\n\thttp.ListenAndServe(\":9094\", nil)\n\n}\n<commit_msg>added with label values so that i can monitor several dataloggers with the same program<commit_after>package main\n\nimport (\n\t\"encoding\/csv\"\n\t\"github.com\/ActiveState\/tail\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\tloadEvents = prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tNamespace: \"datalogger\",\n\t\t\tName: \"rows_loaded\",\n\t\t\tHelp: \"the number of rows loaded into the database\",\n\t\t},\n\t\t[]string{\"site\"},\n\t)\n\tbatteryVoltage = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tName: \"datalogger_main_battery_voltage\",\n\t\tHelp: \"The current main battery voltage\",\n\t},\n\t\t[]string{\"site\"},\n\t)\n)\n\nfunc init() {\n\tprometheus.MustRegister(loadEvents)\n\tprometheus.MustRegister(batteryVoltage)\n}\n\ntype stringSlice []string\n\nfunc (slice stringSlice) pos(value string) int {\n\tfor p, v := range slice {\n\t\tif v == value {\n\t\t\treturn p\n\t\t}\n\t}\n\treturn -1\n}\n\nfunc readCSVLine(text string) []string {\n\treader := csv.NewReader(strings.NewReader(text))\n\tfields, err := reader.Read()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn fields\n}\n\nfunc loadData(fileName string) {\n\n\tt, err := tail.TailFile(fileName, tail.Config{\n\t\tFollow: true,\n\t\tReOpen: true,\n\t})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tbattery := 1\n\n\t\/\/ Read title and variables and units\n\tskip := 0\n\tfor line := range t.Lines {\n\t\tif strings.Contains(line.Text, \"TOA5\") {\n\t\t\tskip = 4\n\t\t}\n\t\tif skip == 3 {\n\t\t\t\/\/ decode headers\n\t\t\tfields := readCSVLine(line.Text)\n\t\t\tvariates := stringSlice(fields)\n\t\t\tbattery = variates.pos(\"GageMinV\")\n\t\t}\n\t\tif skip > 0 {\n\t\t\t\/\/ skip the rest\n\t\t\tskip = skip - 1\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Read data\n\t\tfields := readCSVLine(line.Text)\n\t\tvoltage, err := strconv.ParseFloat(fields[battery], 64)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Println(voltage)\n\t\tbatteryVoltage.WithLabelValues(\"luxarbor\").Set(voltage)\n\t\tloadEvents.WithLabelValues(\"luxarbor\").Inc()\n\t}\n}\n\nfunc main() {\n\n\tgo loadData(\"raingauge_Table1.dat\")\n\n\thttp.Handle(\"\/metrics\", prometheus.Handler())\n\thttp.ListenAndServe(\":9094\", nil)\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/changer\/khabar\/config\"\n\t\"github.com\/changer\/khabar\/db\"\n\t\"github.com\/nicksnyder\/go-i18n\/i18n\"\n\t\"gopkg.in\/simversity\/gottp.v2\"\n)\n\nfunc sysInit() {\n\t<-(gottp.SysInitChan) \/\/Buffered Channel to receive the server upstart boolean\n\n\tdb.Conn = db.GetConn(config.Settings.Khabar.DBName, config.Settings.Khabar.DBAddress,\n\t\tconfig.Settings.Khabar.DBUsername, config.Settings.Khabar.DBPassword)\n\tlog.Println(\"Database Connected :\" + config.Settings.Khabar.DBName + \" \" + \"at address:\" + config.Settings.Khabar.DBAddress)\n\n\ttransDir := config.Settings.Khabar.TranslationDirectory\n\n\tif len(transDir) == 0 {\n\t\ttransDir = os.Getenv(\"PWD\") + \"\/translations\"\n\t}\n\n\tlog.Println(\"Directory for translation :\" + transDir)\n\n\tfilepath.Walk(transDir, func(path string, _ os.FileInfo, err error) error {\n\t\tfileExt := filepath.Ext(path)\n\t\tif fileExt == \".json\" && err == nil {\n\t\t\tlog.Println(\"Loading translation file:\" + path)\n\t\t\ti18n.MustLoadTranslationFile(path)\n\t\t} else {\n\t\t\tlog.Print(\"Skipping translation file:\" + path + \" \" + \"File Extension:\" + fileExt + \" \")\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(\"Error:\" + err.Error())\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\tlog.Println(\"Translation has been parsed.\")\n}\n\nfunc main() {\n\tgo sysInit()\n\tregisterHandlers()\n\tgottp.MakeServer(&config.Settings)\n}\n<commit_msg>Making identation right<commit_after>package main\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/changer\/khabar\/config\"\n\t\"github.com\/changer\/khabar\/db\"\n\t\"github.com\/nicksnyder\/go-i18n\/i18n\"\n\t\"gopkg.in\/simversity\/gottp.v2\"\n)\n\nfunc sysInit() {\n\t<-(gottp.SysInitChan) \/\/Buffered Channel to receive the server upstart boolean\n\n\tdb.Conn = db.GetConn(config.Settings.Khabar.DBName, config.Settings.Khabar.DBAddress,\n\t\tconfig.Settings.Khabar.DBUsername, config.Settings.Khabar.DBPassword)\n\tlog.Println(\"Database Connected :\" + config.Settings.Khabar.DBName + \" \"\/\n\t\t+\"at address:\" + config.Settings.Khabar.DBAddress)\n\n\ttransDir := config.Settings.Khabar.TranslationDirectory\n\n\tif len(transDir) == 0 {\n\t\ttransDir = os.Getenv(\"PWD\") + \"\/translations\"\n\t}\n\n\tlog.Println(\"Directory for translation :\" + transDir)\n\n\tfilepath.Walk(transDir, func(path string, _ os.FileInfo, err error) error {\n\t\tfileExt := filepath.Ext(path)\n\t\tif fileExt == \".json\" && err == nil {\n\t\t\tlog.Println(\"Loading translation file:\" + path)\n\t\t\ti18n.MustLoadTranslationFile(path)\n\t\t} else {\n\t\t\tlog.Print(\"Skipping translation file:\" + path + \" \" + \"File Extension:\" + fileExt + \" \")\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(\"Error:\" + err.Error())\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\tlog.Println(\"Translation has been parsed.\")\n}\n\nfunc main() {\n\tgo sysInit()\n\tregisterHandlers()\n\tgottp.MakeServer(&config.Settings)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package pester provides additional resiliency over the standard http client methods by\n\/\/ allowing you to control concurrency, retries, and a backoff strategy.\npackage pester\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ErrUnexpectedMethod occurs when an http.Client method is unable to be mapped from a calling method in the pester client\nvar ErrUnexpectedMethod = errors.New(\"unexpected client method, must be one of Do, Get, Head, Post, or PostFrom\")\n\n\/\/ ErrReadingBody happens when we cannot read the body bytes\nvar ErrReadingBody = errors.New(\"error reading body\")\n\n\/\/ ErrReadingRequestBody happens when we cannot read the request body bytes\nvar ErrReadingRequestBody = errors.New(\"error reading request body\")\n\n\/\/ Client wraps the http client and exposes all the functionality of the http.Client.\n\/\/ Additionally, Client provides pester specific values for handling resiliency.\ntype Client struct {\n\t\/\/ wrap it to provide access to http built ins\n\thc *http.Client\n\n\tTransport http.RoundTripper\n\tCheckRedirect func(req *http.Request, via []*http.Request) error\n\tJar http.CookieJar\n\tTimeout time.Duration\n\n\t\/\/ pester specific\n\tConcurrency int\n\tMaxRetries int\n\tBackoff BackoffStrategy\n\tKeepLog bool\n\tLogHook LogHook\n\n\tSuccessReqNum int\n\tSuccessRetryNum int\n\n\twg *sync.WaitGroup\n\n\tsync.Mutex\n\tErrLog []ErrEntry\n}\n\n\/\/ ErrEntry is used to provide the LogString() data and is populated\n\/\/ each time an error happens if KeepLog is set.\n\/\/ ErrEntry.Retry is deprecated in favor of ErrEntry.Attempt\ntype ErrEntry struct {\n\tTime time.Time\n\tMethod string\n\tURL string\n\tVerb string\n\tRequest int\n\tRetry int\n\tAttempt int\n\tErr error\n}\n\n\/\/ result simplifies the channel communication for concurrent request handling\ntype result struct {\n\tresp *http.Response\n\terr error\n\treq int\n\tretry int\n}\n\n\/\/ params represents all the params needed to run http client calls and pester errors\ntype params struct {\n\tmethod string\n\tverb string\n\treq *http.Request\n\turl string\n\tbodyType string\n\tbody io.Reader\n\tdata url.Values\n}\n\nvar random *rand.Rand\n\nfunc init() {\n\trandom = rand.New(rand.NewSource(time.Now().UnixNano()))\n}\n\n\/\/ New constructs a new DefaultClient with sensible default values\nfunc New() *Client {\n\treturn &Client{\n\t\tConcurrency: DefaultClient.Concurrency,\n\t\tMaxRetries: DefaultClient.MaxRetries,\n\t\tBackoff: DefaultClient.Backoff,\n\t\tErrLog: DefaultClient.ErrLog,\n\t\twg: &sync.WaitGroup{},\n\t}\n}\n\n\/\/ NewExtendedClient allows you to pass in an http.Client that is previously set up\n\/\/ and extends it to have Pester's features of concurrency and retries.\nfunc NewExtendedClient(hc *http.Client) *Client {\n\tc := New()\n\tc.hc = hc\n\treturn c\n}\n\n\/\/ PrintErrStrategy is used to log attempts as they happen.\n\/\/ You know, more visible\ntype LogHook func(e ErrEntry)\n\n\/\/ BackoffStrategy is used to determine how long a retry request should wait until attempted\ntype BackoffStrategy func(retry int) time.Duration\n\n\/\/ DefaultClient provides sensible defaults\nvar DefaultClient = &Client{Concurrency: 1, MaxRetries: 3, Backoff: DefaultBackoff, ErrLog: []ErrEntry{}}\n\n\/\/ DefaultBackoff always returns 1 second\nfunc DefaultBackoff(_ int) time.Duration {\n\treturn 1 * time.Second\n}\n\n\/\/ ExponentialBackoff returns ever increasing backoffs by a power of 2\nfunc ExponentialBackoff(i int) time.Duration {\n\treturn time.Duration(1<<uint(i)) * time.Second\n}\n\n\/\/ ExponentialJitterBackoff returns ever increasing backoffs by a power of 2\n\/\/ with +\/- 0-33% to prevent sychronized reuqests.\nfunc ExponentialJitterBackoff(i int) time.Duration {\n\treturn jitter(int(1 << uint(i)))\n}\n\n\/\/ LinearBackoff returns increasing durations, each a second longer than the last\nfunc LinearBackoff(i int) time.Duration {\n\treturn time.Duration(i) * time.Second\n}\n\n\/\/ LinearJitterBackoff returns increasing durations, each a second longer than the last\n\/\/ with +\/- 0-33% to prevent sychronized reuqests.\nfunc LinearJitterBackoff(i int) time.Duration {\n\treturn jitter(i)\n}\n\n\/\/ jitter keeps the +\/- 0-33% logic in one place\nfunc jitter(i int) time.Duration {\n\tms := i * 1000\n\n\tmaxJitter := ms \/ 3\n\n\t\/\/ ms ± rand\n\tms += random.Intn(2*maxJitter) - maxJitter\n\n\t\/\/ a jitter of 0 messes up the time.Tick chan\n\tif ms <= 0 {\n\t\tms = 1\n\t}\n\n\treturn time.Duration(ms) * time.Millisecond\n}\n\n\/\/ Wait blocks until all pester requests have returned\n\/\/ Probably not that useful outside of testing.\nfunc (c *Client) Wait() {\n\tc.wg.Wait()\n}\n\n\/\/ pester provides all the logic of retries, concurrency, backoff, and logging\nfunc (c *Client) pester(p params) (*http.Response, error) {\n\tresultCh := make(chan result)\n\tmultiplexCh := make(chan result)\n\tfinishCh := make(chan struct{})\n\n\t\/\/ track all requests that go out so we can close the late listener routine that closes late incoming response bodies\n\ttotalSentRequests := &sync.WaitGroup{}\n\ttotalSentRequests.Add(1)\n\tdefer totalSentRequests.Done()\n\tallRequestsBackCh := make(chan struct{})\n\tgo func() {\n\t\ttotalSentRequests.Wait()\n\t\tclose(allRequestsBackCh)\n\t}()\n\n\t\/\/ GET calls should be idempotent and can make use\n\t\/\/ of concurrency. Other verbs can mutate and should not\n\t\/\/ make use of the concurrency feature\n\tconcurrency := c.Concurrency\n\tif p.verb != \"GET\" {\n\t\tconcurrency = 1\n\t}\n\n\tc.Lock()\n\tif c.hc == nil {\n\t\tc.hc = &http.Client{}\n\t\tc.hc.Transport = c.Transport\n\t\tc.hc.CheckRedirect = c.CheckRedirect\n\t\tc.hc.Jar = c.Jar\n\t\tc.hc.Timeout = c.Timeout\n\t}\n\tc.Unlock()\n\n\t\/\/ re-create the http client so we can leverage the std lib\n\thttpClient := http.Client{\n\t\tTransport: c.hc.Transport,\n\t\tCheckRedirect: c.hc.CheckRedirect,\n\t\tJar: c.hc.Jar,\n\t\tTimeout: c.hc.Timeout,\n\t}\n\n\t\/\/ if we have a request body, we need to save it for later\n\tvar originalRequestBody []byte\n\tvar originalBody []byte\n\tvar err error\n\tif p.req != nil && p.req.Body != nil {\n\t\toriginalRequestBody, err = ioutil.ReadAll(p.req.Body)\n\t\tif err != nil {\n\t\t\treturn nil, ErrReadingRequestBody\n\t\t}\n\t\tp.req.Body.Close()\n\t}\n\tif p.body != nil {\n\t\toriginalBody, err = ioutil.ReadAll(p.body)\n\t\tif err != nil {\n\t\t\treturn nil, ErrReadingBody\n\t\t}\n\t}\n\n\tAttemptLimit := c.MaxRetries\n\tif AttemptLimit <= 0 {\n\t\tAttemptLimit = 1\n\t}\n\n\tfor req := 0; req < concurrency; req++ {\n\t\tc.wg.Add(1)\n\t\ttotalSentRequests.Add(1)\n\t\tgo func(n int, p params) {\n\t\t\tdefer c.wg.Done()\n\t\t\tdefer totalSentRequests.Done()\n\n\t\t\tvar err error\n\t\t\tfor i := 1; i <= AttemptLimit; i++ {\n\t\t\t\tc.wg.Add(1)\n\t\t\t\tdefer c.wg.Done()\n\t\t\t\tselect {\n\t\t\t\tcase <-finishCh:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\t\t\/\/ rehydrate the body (it is drained each read)\n\t\t\t\tif len(originalRequestBody) > 0 {\n\t\t\t\t\tp.req.Body = ioutil.NopCloser(bytes.NewBuffer(originalRequestBody))\n\t\t\t\t}\n\t\t\t\tif len(originalBody) > 0 {\n\t\t\t\t\tp.body = bytes.NewBuffer(originalBody)\n\t\t\t\t}\n\n\t\t\t\tvar resp *http.Response\n\t\t\t\t\/\/ route the calls\n\t\t\t\tswitch p.method {\n\t\t\t\tcase \"Do\":\n\t\t\t\t\tresp, err = httpClient.Do(p.req)\n\t\t\t\tcase \"Get\":\n\t\t\t\t\tresp, err = httpClient.Get(p.url)\n\t\t\t\tcase \"Head\":\n\t\t\t\t\tresp, err = httpClient.Head(p.url)\n\t\t\t\tcase \"Post\":\n\t\t\t\t\tresp, err = httpClient.Post(p.url, p.bodyType, p.body)\n\t\t\t\tcase \"PostForm\":\n\t\t\t\t\tresp, err = httpClient.PostForm(p.url, p.data)\n\t\t\t\tdefault:\n\t\t\t\t\terr = ErrUnexpectedMethod\n\t\t\t\t}\n\n\t\t\t\t\/\/ Early return if we have a valid result\n\t\t\t\t\/\/ Only retry (ie, continue the loop) on 5xx status codes\n\t\t\t\tif err == nil && resp.StatusCode < 500 {\n\t\t\t\t\tmultiplexCh <- result{resp: resp, err: err, req: n, retry: i}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tc.log(ErrEntry{\n\t\t\t\t\tTime: time.Now(),\n\t\t\t\t\tMethod: p.method,\n\t\t\t\t\tVerb: p.verb,\n\t\t\t\t\tURL: p.url,\n\t\t\t\t\tRequest: n,\n\t\t\t\t\tRetry: i + 1, \/\/ would remove, but would break backward compatibility\n\t\t\t\t\tAttempt: i,\n\t\t\t\t\tErr: err,\n\t\t\t\t})\n\n\t\t\t\t\/\/ if it is the last iteration, grab the result (which is an error at this point)\n\t\t\t\tif i == AttemptLimit {\n\t\t\t\t\tmultiplexCh <- result{resp: resp, err: err}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ if we are retrying, we should close this response body to free the fd\n\t\t\t\tif resp != nil {\n\t\t\t\t\tresp.Body.Close()\n\t\t\t\t}\n\n\t\t\t\t\/\/ prevent a 0 from causing the tick to block, pass additional microsecond\n\t\t\t\t<-time.After(c.Backoff(i) + 1*time.Microsecond)\n\t\t\t}\n\t\t}(req, p)\n\t}\n\n\t\/\/ spin off the go routine so it can continually listen in on late results and close the response bodies\n\tgo func() {\n\t\tgotFirstResult := false\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase res := <-multiplexCh:\n\t\t\t\tif !gotFirstResult {\n\t\t\t\t\tgotFirstResult = true\n\t\t\t\t\tclose(finishCh)\n\t\t\t\t\tresultCh <- res\n\t\t\t\t} else if res.resp != nil {\n\t\t\t\t\t\/\/ we only return one result to the caller; close all other response bodies that come back\n\t\t\t\t\t\/\/ drain the body before close as to not prevent keepalive. see https:\/\/gist.github.com\/mholt\/eba0f2cc96658be0f717\n\t\t\t\t\tio.Copy(ioutil.Discard, res.resp.Body)\n\t\t\t\t\tres.resp.Body.Close()\n\t\t\t\t}\n\t\t\tcase <-allRequestsBackCh:\n\t\t\t\t\/\/ don't leave this goroutine running\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tres := <-resultCh\n\tc.Lock()\n\tdefer c.Unlock()\n\tc.SuccessReqNum = res.req\n\tc.SuccessRetryNum = res.retry\n\treturn res.resp, res.err\n\n}\n\n\/\/ LogString provides a string representation of the errors the client has seen\nfunc (c *Client) LogString() string {\n\tc.Lock()\n\tdefer c.Unlock()\n\tvar res string\n\tfor _, e := range c.ErrLog {\n\t\tres += c.FormatError(e)\n\t}\n\treturn res\n}\n\n\/\/ Format the Error to human readable string\nfunc (c *Client) FormatError(e ErrEntry) string {\n\treturn fmt.Sprintf(\"%d %s [%s] %s request-%d retry-%d error: %s\\n\",\n\t\te.Time.Unix(), e.Method, e.Verb, e.URL, e.Request, e.Retry, e.Err)\n}\n\n\/\/ LogErrCount is a helper method used primarily for test validation\nfunc (c *Client) LogErrCount() int {\n\tc.Lock()\n\tdefer c.Unlock()\n\treturn len(c.ErrLog)\n}\n\n\/\/ EmbedHTTPClient allows you to extend an existing Pester client with an\n\/\/ underlying http.Client, such as https:\/\/godoc.org\/golang.org\/x\/oauth2\/google#DefaultClient\nfunc (c *Client) EmbedHTTPClient(hc *http.Client) {\n\tc.hc = hc\n}\n\nfunc (c *Client) log(e ErrEntry) {\n\tif c.KeepLog {\n\t\tc.Lock()\n\t\tdefer c.Unlock()\n\t\tc.ErrLog = append(c.ErrLog, e)\n\t} else if c.LogHook != nil {\n\t\t\/\/ NOTE: There is a possibility that Log Printing hook slows it down.\n\t\t\/\/ but the consumer can always do the Job in a go-routine.\n\t\tc.LogHook(e)\n\t}\n}\n\n\/\/ Do provides the same functionality as http.Client.Do\nfunc (c *Client) Do(req *http.Request) (resp *http.Response, err error) {\n\treturn c.pester(params{method: \"Do\", req: req, verb: req.Method, url: req.URL.String()})\n}\n\n\/\/ Get provides the same functionality as http.Client.Get\nfunc (c *Client) Get(url string) (resp *http.Response, err error) {\n\treturn c.pester(params{method: \"Get\", url: url, verb: \"GET\"})\n}\n\n\/\/ Head provides the same functionality as http.Client.Head\nfunc (c *Client) Head(url string) (resp *http.Response, err error) {\n\treturn c.pester(params{method: \"Head\", url: url, verb: \"HEAD\"})\n}\n\n\/\/ Post provides the same functionality as http.Client.Post\nfunc (c *Client) Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {\n\treturn c.pester(params{method: \"Post\", url: url, bodyType: bodyType, body: body, verb: \"POST\"})\n}\n\n\/\/ PostForm provides the same functionality as http.Client.PostForm\nfunc (c *Client) PostForm(url string, data url.Values) (resp *http.Response, err error) {\n\treturn c.pester(params{method: \"PostForm\", url: url, data: data, verb: \"POST\"})\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Provide self-constructing variants \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ Do provides the same functionality as http.Client.Do and creates its own constructor\nfunc Do(req *http.Request) (resp *http.Response, err error) {\n\tc := New()\n\treturn c.Do(req)\n}\n\n\/\/ Get provides the same functionality as http.Client.Get and creates its own constructor\nfunc Get(url string) (resp *http.Response, err error) {\n\tc := New()\n\treturn c.Get(url)\n}\n\n\/\/ Head provides the same functionality as http.Client.Head and creates its own constructor\nfunc Head(url string) (resp *http.Response, err error) {\n\tc := New()\n\treturn c.Head(url)\n}\n\n\/\/ Post provides the same functionality as http.Client.Post and creates its own constructor\nfunc Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {\n\tc := New()\n\treturn c.Post(url, bodyType, body)\n}\n\n\/\/ PostForm provides the same functionality as http.Client.PostForm and creates its own constructor\nfunc PostForm(url string, data url.Values) (resp *http.Response, err error) {\n\tc := New()\n\treturn c.PostForm(url, data)\n}\n<commit_msg>Rework docstring for LogHook.<commit_after>\/\/ Package pester provides additional resiliency over the standard http client methods by\n\/\/ allowing you to control concurrency, retries, and a backoff strategy.\npackage pester\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ErrUnexpectedMethod occurs when an http.Client method is unable to be mapped from a calling method in the pester client\nvar ErrUnexpectedMethod = errors.New(\"unexpected client method, must be one of Do, Get, Head, Post, or PostFrom\")\n\n\/\/ ErrReadingBody happens when we cannot read the body bytes\nvar ErrReadingBody = errors.New(\"error reading body\")\n\n\/\/ ErrReadingRequestBody happens when we cannot read the request body bytes\nvar ErrReadingRequestBody = errors.New(\"error reading request body\")\n\n\/\/ Client wraps the http client and exposes all the functionality of the http.Client.\n\/\/ Additionally, Client provides pester specific values for handling resiliency.\ntype Client struct {\n\t\/\/ wrap it to provide access to http built ins\n\thc *http.Client\n\n\tTransport http.RoundTripper\n\tCheckRedirect func(req *http.Request, via []*http.Request) error\n\tJar http.CookieJar\n\tTimeout time.Duration\n\n\t\/\/ pester specific\n\tConcurrency int\n\tMaxRetries int\n\tBackoff BackoffStrategy\n\tKeepLog bool\n\tLogHook LogHook\n\n\tSuccessReqNum int\n\tSuccessRetryNum int\n\n\twg *sync.WaitGroup\n\n\tsync.Mutex\n\tErrLog []ErrEntry\n}\n\n\/\/ ErrEntry is used to provide the LogString() data and is populated\n\/\/ each time an error happens if KeepLog is set.\n\/\/ ErrEntry.Retry is deprecated in favor of ErrEntry.Attempt\ntype ErrEntry struct {\n\tTime time.Time\n\tMethod string\n\tURL string\n\tVerb string\n\tRequest int\n\tRetry int\n\tAttempt int\n\tErr error\n}\n\n\/\/ result simplifies the channel communication for concurrent request handling\ntype result struct {\n\tresp *http.Response\n\terr error\n\treq int\n\tretry int\n}\n\n\/\/ params represents all the params needed to run http client calls and pester errors\ntype params struct {\n\tmethod string\n\tverb string\n\treq *http.Request\n\turl string\n\tbodyType string\n\tbody io.Reader\n\tdata url.Values\n}\n\nvar random *rand.Rand\n\nfunc init() {\n\trandom = rand.New(rand.NewSource(time.Now().UnixNano()))\n}\n\n\/\/ New constructs a new DefaultClient with sensible default values\nfunc New() *Client {\n\treturn &Client{\n\t\tConcurrency: DefaultClient.Concurrency,\n\t\tMaxRetries: DefaultClient.MaxRetries,\n\t\tBackoff: DefaultClient.Backoff,\n\t\tErrLog: DefaultClient.ErrLog,\n\t\twg: &sync.WaitGroup{},\n\t}\n}\n\n\/\/ NewExtendedClient allows you to pass in an http.Client that is previously set up\n\/\/ and extends it to have Pester's features of concurrency and retries.\nfunc NewExtendedClient(hc *http.Client) *Client {\n\tc := New()\n\tc.hc = hc\n\treturn c\n}\n\n\/\/ LogHook is used to log attempts as they happen. This function is never called,\n\/\/ however, if KeepLog is set to true.\ntype LogHook func(e ErrEntry)\n\n\/\/ BackoffStrategy is used to determine how long a retry request should wait until attempted\ntype BackoffStrategy func(retry int) time.Duration\n\n\/\/ DefaultClient provides sensible defaults\nvar DefaultClient = &Client{Concurrency: 1, MaxRetries: 3, Backoff: DefaultBackoff, ErrLog: []ErrEntry{}}\n\n\/\/ DefaultBackoff always returns 1 second\nfunc DefaultBackoff(_ int) time.Duration {\n\treturn 1 * time.Second\n}\n\n\/\/ ExponentialBackoff returns ever increasing backoffs by a power of 2\nfunc ExponentialBackoff(i int) time.Duration {\n\treturn time.Duration(1<<uint(i)) * time.Second\n}\n\n\/\/ ExponentialJitterBackoff returns ever increasing backoffs by a power of 2\n\/\/ with +\/- 0-33% to prevent sychronized reuqests.\nfunc ExponentialJitterBackoff(i int) time.Duration {\n\treturn jitter(int(1 << uint(i)))\n}\n\n\/\/ LinearBackoff returns increasing durations, each a second longer than the last\nfunc LinearBackoff(i int) time.Duration {\n\treturn time.Duration(i) * time.Second\n}\n\n\/\/ LinearJitterBackoff returns increasing durations, each a second longer than the last\n\/\/ with +\/- 0-33% to prevent sychronized reuqests.\nfunc LinearJitterBackoff(i int) time.Duration {\n\treturn jitter(i)\n}\n\n\/\/ jitter keeps the +\/- 0-33% logic in one place\nfunc jitter(i int) time.Duration {\n\tms := i * 1000\n\n\tmaxJitter := ms \/ 3\n\n\t\/\/ ms ± rand\n\tms += random.Intn(2*maxJitter) - maxJitter\n\n\t\/\/ a jitter of 0 messes up the time.Tick chan\n\tif ms <= 0 {\n\t\tms = 1\n\t}\n\n\treturn time.Duration(ms) * time.Millisecond\n}\n\n\/\/ Wait blocks until all pester requests have returned\n\/\/ Probably not that useful outside of testing.\nfunc (c *Client) Wait() {\n\tc.wg.Wait()\n}\n\n\/\/ pester provides all the logic of retries, concurrency, backoff, and logging\nfunc (c *Client) pester(p params) (*http.Response, error) {\n\tresultCh := make(chan result)\n\tmultiplexCh := make(chan result)\n\tfinishCh := make(chan struct{})\n\n\t\/\/ track all requests that go out so we can close the late listener routine that closes late incoming response bodies\n\ttotalSentRequests := &sync.WaitGroup{}\n\ttotalSentRequests.Add(1)\n\tdefer totalSentRequests.Done()\n\tallRequestsBackCh := make(chan struct{})\n\tgo func() {\n\t\ttotalSentRequests.Wait()\n\t\tclose(allRequestsBackCh)\n\t}()\n\n\t\/\/ GET calls should be idempotent and can make use\n\t\/\/ of concurrency. Other verbs can mutate and should not\n\t\/\/ make use of the concurrency feature\n\tconcurrency := c.Concurrency\n\tif p.verb != \"GET\" {\n\t\tconcurrency = 1\n\t}\n\n\tc.Lock()\n\tif c.hc == nil {\n\t\tc.hc = &http.Client{}\n\t\tc.hc.Transport = c.Transport\n\t\tc.hc.CheckRedirect = c.CheckRedirect\n\t\tc.hc.Jar = c.Jar\n\t\tc.hc.Timeout = c.Timeout\n\t}\n\tc.Unlock()\n\n\t\/\/ re-create the http client so we can leverage the std lib\n\thttpClient := http.Client{\n\t\tTransport: c.hc.Transport,\n\t\tCheckRedirect: c.hc.CheckRedirect,\n\t\tJar: c.hc.Jar,\n\t\tTimeout: c.hc.Timeout,\n\t}\n\n\t\/\/ if we have a request body, we need to save it for later\n\tvar originalRequestBody []byte\n\tvar originalBody []byte\n\tvar err error\n\tif p.req != nil && p.req.Body != nil {\n\t\toriginalRequestBody, err = ioutil.ReadAll(p.req.Body)\n\t\tif err != nil {\n\t\t\treturn nil, ErrReadingRequestBody\n\t\t}\n\t\tp.req.Body.Close()\n\t}\n\tif p.body != nil {\n\t\toriginalBody, err = ioutil.ReadAll(p.body)\n\t\tif err != nil {\n\t\t\treturn nil, ErrReadingBody\n\t\t}\n\t}\n\n\tAttemptLimit := c.MaxRetries\n\tif AttemptLimit <= 0 {\n\t\tAttemptLimit = 1\n\t}\n\n\tfor req := 0; req < concurrency; req++ {\n\t\tc.wg.Add(1)\n\t\ttotalSentRequests.Add(1)\n\t\tgo func(n int, p params) {\n\t\t\tdefer c.wg.Done()\n\t\t\tdefer totalSentRequests.Done()\n\n\t\t\tvar err error\n\t\t\tfor i := 1; i <= AttemptLimit; i++ {\n\t\t\t\tc.wg.Add(1)\n\t\t\t\tdefer c.wg.Done()\n\t\t\t\tselect {\n\t\t\t\tcase <-finishCh:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\t\t\/\/ rehydrate the body (it is drained each read)\n\t\t\t\tif len(originalRequestBody) > 0 {\n\t\t\t\t\tp.req.Body = ioutil.NopCloser(bytes.NewBuffer(originalRequestBody))\n\t\t\t\t}\n\t\t\t\tif len(originalBody) > 0 {\n\t\t\t\t\tp.body = bytes.NewBuffer(originalBody)\n\t\t\t\t}\n\n\t\t\t\tvar resp *http.Response\n\t\t\t\t\/\/ route the calls\n\t\t\t\tswitch p.method {\n\t\t\t\tcase \"Do\":\n\t\t\t\t\tresp, err = httpClient.Do(p.req)\n\t\t\t\tcase \"Get\":\n\t\t\t\t\tresp, err = httpClient.Get(p.url)\n\t\t\t\tcase \"Head\":\n\t\t\t\t\tresp, err = httpClient.Head(p.url)\n\t\t\t\tcase \"Post\":\n\t\t\t\t\tresp, err = httpClient.Post(p.url, p.bodyType, p.body)\n\t\t\t\tcase \"PostForm\":\n\t\t\t\t\tresp, err = httpClient.PostForm(p.url, p.data)\n\t\t\t\tdefault:\n\t\t\t\t\terr = ErrUnexpectedMethod\n\t\t\t\t}\n\n\t\t\t\t\/\/ Early return if we have a valid result\n\t\t\t\t\/\/ Only retry (ie, continue the loop) on 5xx status codes\n\t\t\t\tif err == nil && resp.StatusCode < 500 {\n\t\t\t\t\tmultiplexCh <- result{resp: resp, err: err, req: n, retry: i}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tc.log(ErrEntry{\n\t\t\t\t\tTime: time.Now(),\n\t\t\t\t\tMethod: p.method,\n\t\t\t\t\tVerb: p.verb,\n\t\t\t\t\tURL: p.url,\n\t\t\t\t\tRequest: n,\n\t\t\t\t\tRetry: i + 1, \/\/ would remove, but would break backward compatibility\n\t\t\t\t\tAttempt: i,\n\t\t\t\t\tErr: err,\n\t\t\t\t})\n\n\t\t\t\t\/\/ if it is the last iteration, grab the result (which is an error at this point)\n\t\t\t\tif i == AttemptLimit {\n\t\t\t\t\tmultiplexCh <- result{resp: resp, err: err}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ if we are retrying, we should close this response body to free the fd\n\t\t\t\tif resp != nil {\n\t\t\t\t\tresp.Body.Close()\n\t\t\t\t}\n\n\t\t\t\t\/\/ prevent a 0 from causing the tick to block, pass additional microsecond\n\t\t\t\t<-time.After(c.Backoff(i) + 1*time.Microsecond)\n\t\t\t}\n\t\t}(req, p)\n\t}\n\n\t\/\/ spin off the go routine so it can continually listen in on late results and close the response bodies\n\tgo func() {\n\t\tgotFirstResult := false\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase res := <-multiplexCh:\n\t\t\t\tif !gotFirstResult {\n\t\t\t\t\tgotFirstResult = true\n\t\t\t\t\tclose(finishCh)\n\t\t\t\t\tresultCh <- res\n\t\t\t\t} else if res.resp != nil {\n\t\t\t\t\t\/\/ we only return one result to the caller; close all other response bodies that come back\n\t\t\t\t\t\/\/ drain the body before close as to not prevent keepalive. see https:\/\/gist.github.com\/mholt\/eba0f2cc96658be0f717\n\t\t\t\t\tio.Copy(ioutil.Discard, res.resp.Body)\n\t\t\t\t\tres.resp.Body.Close()\n\t\t\t\t}\n\t\t\tcase <-allRequestsBackCh:\n\t\t\t\t\/\/ don't leave this goroutine running\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tres := <-resultCh\n\tc.Lock()\n\tdefer c.Unlock()\n\tc.SuccessReqNum = res.req\n\tc.SuccessRetryNum = res.retry\n\treturn res.resp, res.err\n\n}\n\n\/\/ LogString provides a string representation of the errors the client has seen\nfunc (c *Client) LogString() string {\n\tc.Lock()\n\tdefer c.Unlock()\n\tvar res string\n\tfor _, e := range c.ErrLog {\n\t\tres += c.FormatError(e)\n\t}\n\treturn res\n}\n\n\/\/ Format the Error to human readable string\nfunc (c *Client) FormatError(e ErrEntry) string {\n\treturn fmt.Sprintf(\"%d %s [%s] %s request-%d retry-%d error: %s\\n\",\n\t\te.Time.Unix(), e.Method, e.Verb, e.URL, e.Request, e.Retry, e.Err)\n}\n\n\/\/ LogErrCount is a helper method used primarily for test validation\nfunc (c *Client) LogErrCount() int {\n\tc.Lock()\n\tdefer c.Unlock()\n\treturn len(c.ErrLog)\n}\n\n\/\/ EmbedHTTPClient allows you to extend an existing Pester client with an\n\/\/ underlying http.Client, such as https:\/\/godoc.org\/golang.org\/x\/oauth2\/google#DefaultClient\nfunc (c *Client) EmbedHTTPClient(hc *http.Client) {\n\tc.hc = hc\n}\n\nfunc (c *Client) log(e ErrEntry) {\n\tif c.KeepLog {\n\t\tc.Lock()\n\t\tdefer c.Unlock()\n\t\tc.ErrLog = append(c.ErrLog, e)\n\t} else if c.LogHook != nil {\n\t\t\/\/ NOTE: There is a possibility that Log Printing hook slows it down.\n\t\t\/\/ but the consumer can always do the Job in a go-routine.\n\t\tc.LogHook(e)\n\t}\n}\n\n\/\/ Do provides the same functionality as http.Client.Do\nfunc (c *Client) Do(req *http.Request) (resp *http.Response, err error) {\n\treturn c.pester(params{method: \"Do\", req: req, verb: req.Method, url: req.URL.String()})\n}\n\n\/\/ Get provides the same functionality as http.Client.Get\nfunc (c *Client) Get(url string) (resp *http.Response, err error) {\n\treturn c.pester(params{method: \"Get\", url: url, verb: \"GET\"})\n}\n\n\/\/ Head provides the same functionality as http.Client.Head\nfunc (c *Client) Head(url string) (resp *http.Response, err error) {\n\treturn c.pester(params{method: \"Head\", url: url, verb: \"HEAD\"})\n}\n\n\/\/ Post provides the same functionality as http.Client.Post\nfunc (c *Client) Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {\n\treturn c.pester(params{method: \"Post\", url: url, bodyType: bodyType, body: body, verb: \"POST\"})\n}\n\n\/\/ PostForm provides the same functionality as http.Client.PostForm\nfunc (c *Client) PostForm(url string, data url.Values) (resp *http.Response, err error) {\n\treturn c.pester(params{method: \"PostForm\", url: url, data: data, verb: \"POST\"})\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Provide self-constructing variants \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ Do provides the same functionality as http.Client.Do and creates its own constructor\nfunc Do(req *http.Request) (resp *http.Response, err error) {\n\tc := New()\n\treturn c.Do(req)\n}\n\n\/\/ Get provides the same functionality as http.Client.Get and creates its own constructor\nfunc Get(url string) (resp *http.Response, err error) {\n\tc := New()\n\treturn c.Get(url)\n}\n\n\/\/ Head provides the same functionality as http.Client.Head and creates its own constructor\nfunc Head(url string) (resp *http.Response, err error) {\n\tc := New()\n\treturn c.Head(url)\n}\n\n\/\/ Post provides the same functionality as http.Client.Post and creates its own constructor\nfunc Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {\n\tc := New()\n\treturn c.Post(url, bodyType, body)\n}\n\n\/\/ PostForm provides the same functionality as http.Client.PostForm and creates its own constructor\nfunc PostForm(url string, data url.Values) (resp *http.Response, err error) {\n\tc := New()\n\treturn c.PostForm(url, data)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/nlopes\/slack\"\n)\n\nvar (\n\tatk = os.Getenv(\"AUTHENTICATION_TOKEN\")\n\tvtk = os.Getenv(\"VERIFICATION_TOKEN\")\n\tappURL = os.Getenv(\"APP_URL\")\n\ticonURL = appURL + \"\/static\/icon.png\"\n\tmemeURL = appURL + \"\/static\/spongemock.jpg\"\n\tapi = slack.New(atk)\n)\n\nfunc transformText(m string) string {\n\tvar buffer bytes.Buffer\n\tfor i := 0; i < len(m); i++ {\n\t\tch := m[i : i+1]\n\t\tif rand.Int()%2 == 0 {\n\t\t\tch = strings.ToLower(ch)\n\t\t}\n\t\tbuffer.WriteString(ch)\n\t}\n\treturn buffer.String()\n}\n\nfunc isValidSlackRequest(r *http.Request) bool {\n\tif r.Method != \"POST\" {\n\t\tlog.Printf(\"want method POST, got %s\\n\", r.Method)\n\t\treturn false\n\t}\n\terr := r.ParseForm()\n\tif err != nil {\n\t\tlog.Printf(\"invalid form data: %s\\n\", err)\n\t\treturn false\n\t}\n\tif cmd := r.PostFormValue(\"command\"); cmd != \"\/spongemock\" {\n\t\tlog.Printf(\"want command \/spongemock, got %s\\n\", cmd)\n\t\treturn false\n\t}\n\tif tk := r.PostFormValue(\"token\"); tk != vtk {\n\t\tlog.Printf(\"received invalid token %s\\n\", tk)\n\t\treturn false\n\t}\n\tif url := r.PostFormValue(\"response_url\"); url == \"\" {\n\t\tlog.Println(\"did not receive response url\")\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc getLastSlackMessage(c string) (string, error) {\n\th, err := api.GetChannelHistory(c, slack.NewHistoryParameters())\n\tif err != nil {\n\t\tlog.Printf(\"history API request error: %s\", err)\n\t\treturn \"\", err\n\t}\n\n\tfor _, msg := range h.Messages {\n\t\tif msg.Text == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\treturn msg.Text, nil\n\t}\n\n\terr = errors.New(\"no last message found\")\n\tlog.Println(err)\n\treturn \"\", err\n}\n\nfunc handleSlack(w http.ResponseWriter, r *http.Request) {\n\tstatus := http.StatusOK\n\tdefer func() {\n\t\tw.WriteHeader(status)\n\t}()\n\tif !isValidSlackRequest(r) {\n\t\tstatus = http.StatusBadRequest\n\t\treturn\n\t}\n\tchannel := r.PostFormValue(\"channel_id\")\n\tlastMessage, err := getLastSlackMessage(channel)\n\tif err != nil {\n\t\tstatus = http.StatusInternalServerError\n\t\treturn\n\t}\n\tparams := slack.NewPostMessageParameters()\n\tparams.Username = \"Spongebob\"\n\tparams.Attachments = []slack.Attachment{{\n\t\tText: transformText(lastMessage),\n\t\tFallback: \"*Spongebob mocking meme*\",\n\t\tImageURL: memeURL,\n\t}}\n\tparams.IconURL = iconURL\n\t_, _, err = api.PostMessage(channel, \"\", params)\n\tif err != nil {\n\t\tstatus = http.StatusInternalServerError\n\t}\n}\n\nfunc main() {\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tlog.Fatal(\"$PORT must be set!\")\n\t}\n\tif atk == \"\" {\n\t\tlog.Fatal(\"$AUTHENTICATION_TOKEN must be set!\")\n\t}\n\tif vtk == \"\" {\n\t\tlog.Fatal(\"$VERIFICATION_TOKEN must be set!\")\n\t}\n\tif appURL == \"\" {\n\t\tlog.Fatal(\"$APP_URL must be set!\")\n\t}\n\n\thttp.Handle(\"\/static\", http.FileServer(http.Dir(\"\/static\")))\n\thttp.HandleFunc(\"\/slack\", handleSlack)\n\n\tlog.Fatal(http.ListenAndServe(\":\"+port, nil))\n}\n<commit_msg>Fix bugs<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/nlopes\/slack\"\n)\n\nvar (\n\tatk = os.Getenv(\"AUTHENTICATION_TOKEN\")\n\tvtk = os.Getenv(\"VERIFICATION_TOKEN\")\n\tappURL = os.Getenv(\"APP_URL\")\n\ticonURL = appURL + \"\/static\/icon.png\"\n\tmemeURL = appURL + \"\/static\/spongemock.jpg\"\n\tapi = slack.New(atk)\n)\n\nfunc transformText(m string) string {\n\tvar buffer bytes.Buffer\n\tfor i := 0; i < len(m); i++ {\n\t\tch := m[i : i+1]\n\t\tif rand.Int()%2 == 0 {\n\t\t\tch = strings.ToUpper(ch)\n\t\t}\n\t\tbuffer.WriteString(ch)\n\t}\n\treturn buffer.String()\n}\n\nfunc isValidSlackRequest(r *http.Request) bool {\n\tif r.Method != \"POST\" {\n\t\tlog.Printf(\"want method POST, got %s\\n\", r.Method)\n\t\treturn false\n\t}\n\terr := r.ParseForm()\n\tif err != nil {\n\t\tlog.Printf(\"invalid form data: %s\\n\", err)\n\t\treturn false\n\t}\n\tif cmd := r.PostFormValue(\"command\"); cmd != \"\/spongemock\" {\n\t\tlog.Printf(\"want command \/spongemock, got %s\\n\", cmd)\n\t\treturn false\n\t}\n\tif tk := r.PostFormValue(\"token\"); tk != vtk {\n\t\tlog.Printf(\"received invalid token %s\\n\", tk)\n\t\treturn false\n\t}\n\tif url := r.PostFormValue(\"response_url\"); url == \"\" {\n\t\tlog.Println(\"did not receive response url\")\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc getLastSlackMessage(c string) (string, error) {\n\th, err := api.GetChannelHistory(c, slack.NewHistoryParameters())\n\tif err != nil {\n\t\tlog.Printf(\"history API request error: %s\", err)\n\t\treturn \"\", err\n\t}\n\n\tfor _, msg := range h.Messages {\n\t\tif msg.Text == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\treturn msg.Text, nil\n\t}\n\n\terr = errors.New(\"no last message found\")\n\tlog.Println(err)\n\treturn \"\", err\n}\n\nfunc handleSlack(w http.ResponseWriter, r *http.Request) {\n\tstatus := http.StatusOK\n\tdefer func() {\n\t\tw.WriteHeader(status)\n\t}()\n\tif !isValidSlackRequest(r) {\n\t\tstatus = http.StatusBadRequest\n\t\treturn\n\t}\n\tchannel := r.PostFormValue(\"channel_id\")\n\tlastMessage, err := getLastSlackMessage(channel)\n\tif err != nil {\n\t\tstatus = http.StatusInternalServerError\n\t\treturn\n\t}\n\tparams := slack.NewPostMessageParameters()\n\tparams.Username = \"Spongebob\"\n\tparams.Attachments = []slack.Attachment{{\n\t\tText: transformText(lastMessage),\n\t\tFallback: \"*Spongebob mocking meme*\",\n\t\tImageURL: memeURL,\n\t}}\n\tparams.IconURL = iconURL\n\t_, _, err = api.PostMessage(channel, \"\", params)\n\tif err != nil {\n\t\tstatus = http.StatusInternalServerError\n\t}\n}\n\nfunc main() {\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tlog.Fatal(\"$PORT must be set!\")\n\t}\n\tif atk == \"\" {\n\t\tlog.Fatal(\"$AUTHENTICATION_TOKEN must be set!\")\n\t}\n\tif vtk == \"\" {\n\t\tlog.Fatal(\"$VERIFICATION_TOKEN must be set!\")\n\t}\n\tif appURL == \"\" {\n\t\tlog.Fatal(\"$APP_URL must be set!\")\n\t}\n\n\thttp.Handle(\"\/static\/\", http.FileServer(http.Dir(\"\/static\/\")))\n\thttp.HandleFunc(\"\/slack\", handleSlack)\n\n\tlog.Fatal(http.ListenAndServe(\":\"+port, nil))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n)\n\nvar (\n\tPlaybookFile = flag.String(\"play\", \"site.yml\", \"Path to the playbook to execute\")\n\tInventoryFile = flag.String(\"i\", \"hosts\", \"Path to the inventory file\")\n\tLimitHosts = flag.String(\"l\", \"\", \"Limit hosts\")\n\tCheckAndQuit = flag.Bool(\"check\", false, \"Check and exit without running the play\")\n\tDataDir = flag.String(\"d\", \"\", \"Alternate path for handlers, tasks, etc.\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tlog.SetFlags(0)\n\tlog.SetOutput(os.Stdout)\n\n\tinv, err := LoadInventoryFile(*InventoryFile)\n\tif err != nil {\n\t\tlog.Fatalf(\"error loading inventory file %q reason=%s\", *InventoryFile, err.Error())\n\t}\n\n\tif ngroups := len(inv); ngroups == 1 {\n\t\tlog.Println(\"1 host group loaded from inventory\")\n\t} else {\n\t\tlog.Printf(\"%d host groups loaded from inventory\", ngroups)\n\t}\n\n\t\/\/ Run a sanity check on the inventory groups.\n\tfor _, g := range inv {\n\t\tif err = g.Check(); err != nil {\n\t\t\tlog.Fatalf(\"Error in group %q: %s\", g.Name, err.Error())\n\t\t}\n\t}\n\n\t\/\/ Gather a list of the available modules.\n\tmods, err := GatherModules()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error gathering modules: %s\", err.Error())\n\t}\n\n\tif nmods := len(mods); nmods == 0 {\n\t\tlog.Fatalln(\"no modules found\")\n\t} else if nmods == 1 {\n\t\tlog.Println(\"1 module loaded\")\n\t} else {\n\t\tlog.Printf(\"%d modules loaded\", nmods)\n\t}\n\n\t\/\/ Load handlers.\n\thandlers, err := LoadHandlers(*DataDir)\n\tif err != nil {\n\t\tlog.Fatalln(err.Error())\n\t}\n\tfor _, h := range handlers {\n\t\tlog.Printf(\"loaded handler %q\", h.Name)\n\t}\n\tlog.Printf(\"%d handlers loaded\", len(handlers))\n\n\t\/\/ Load tasks.\n\n\t\/\/ Load the roles.\n\n\t\/\/ Load the playbook.\n\tplays, err := LoadPlaybook(*PlaybookFile)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error loading playbook %q: %s\", *PlaybookFile, err.Error())\n\t}\n\n\tif nplays := len(plays); nplays == 1 {\n\t\tlog.Println(\"1 play loaded\")\n\t} else {\n\t\tlog.Printf(\"%d plays loaded\", len(plays))\n\t}\n\n\t\/\/ Check the plays.\n\tfor _, p := range plays {\n\t\tif err := p.Check(); err != nil {\n\t\t\tlog.Fatalf(\"Error in play %q: %s\", p.Name, err.Error())\n\t\t}\n\t}\n\n\t\/\/ Bail out here if the user wanted only to check the format of their\n\t\/\/ plays, roles, tasks, etc.\n\tif *CheckAndQuit {\n\t\tlog.Println(\"Checks passed\")\n\t\tos.Exit(0)\n\t}\n\n\treturn\n}\n<commit_msg>Moved flags into the main() func.<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n)\n\nfunc main() {\n\tPlaybookFile := flag.String(\"play\", \"site.yml\", \"Path to the playbook to execute\")\n\tInventoryFile := flag.String(\"i\", \"hosts\", \"Path to the inventory file\")\n\tCheckAndQuit := flag.Bool(\"check\", false, \"Check and exit without running the play\")\n\tDataDir := flag.String(\"p\", \"\", \"Alternate path for handlers, tasks, etc.\")\n\tflag.Parse()\n\tlog.SetFlags(0)\n\tlog.SetOutput(os.Stdout)\n\n\tinv, err := LoadInventoryFile(*InventoryFile)\n\tif err != nil {\n\t\tlog.Fatalf(\"error loading inventory file %q reason=%s\", *InventoryFile, err.Error())\n\t}\n\n\tif ngroups := len(inv); ngroups == 1 {\n\t\tlog.Println(\"1 host group loaded from inventory\")\n\t} else {\n\t\tlog.Printf(\"%d host groups loaded from inventory\", ngroups)\n\t}\n\n\t\/\/ Run a sanity check on the inventory groups.\n\tfor _, g := range inv {\n\t\tif err = g.Check(); err != nil {\n\t\t\tlog.Fatalf(\"Error in group %q: %s\", g.Name, err.Error())\n\t\t}\n\t}\n\n\t\/\/ Gather a list of the available modules.\n\tmods, err := GatherModules()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error gathering modules: %s\", err.Error())\n\t}\n\n\tif nmods := len(mods); nmods == 0 {\n\t\tlog.Fatalln(\"no modules found\")\n\t} else if nmods == 1 {\n\t\tlog.Println(\"1 module loaded\")\n\t} else {\n\t\tlog.Printf(\"%d modules loaded\", nmods)\n\t}\n\n\t\/\/ Load handlers.\n\thandlers, err := LoadHandlers(*DataDir)\n\tif err != nil {\n\t\tlog.Fatalln(err.Error())\n\t}\n\tfor _, h := range handlers {\n\t\tlog.Printf(\"loaded handler %q\", h.Name)\n\t}\n\tlog.Printf(\"%d handlers loaded\", len(handlers))\n\n\t\/\/ Load tasks.\n\n\t\/\/ Load the roles.\n\n\t\/\/ Load the playbook.\n\tplays, err := LoadPlaybook(*PlaybookFile)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error loading playbook %q: %s\", *PlaybookFile, err.Error())\n\t}\n\n\tif nplays := len(plays); nplays == 1 {\n\t\tlog.Println(\"1 play loaded\")\n\t} else {\n\t\tlog.Printf(\"%d plays loaded\", len(plays))\n\t}\n\n\t\/\/ Check the plays.\n\tfor _, p := range plays {\n\t\tif err := p.Check(); err != nil {\n\t\t\tlog.Fatalf(\"Error in play %q: %s\", p.Name, err.Error())\n\t\t}\n\t}\n\n\t\/\/ Bail out here if the user wanted only to check the format of their\n\t\/\/ plays, roles, tasks, etc.\n\tif *CheckAndQuit {\n\t\tlog.Println(\"Checks passed\")\n\t\tos.Exit(0)\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/erbridge\/gotwit\"\n\t\"github.com\/erbridge\/gotwit\/twitter\"\n)\n\ntype (\n\tcorpus struct {\n\t\tWords []string `json:\"words\"`\n\t\tPrefixes map[string][]string `json:\"prefixes\"`\n\t}\n)\n\nfunc getCorpus() (c corpus, err error) {\n\tcorpusFile, err := os.Open(\"data\/corpus.json\")\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tparser := json.NewDecoder(corpusFile)\n\n\tif err = parser.Decode(&c); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc createTweetText(c corpus) (text string) {\n\tcount := rand.Intn(3) + rand.Intn(3) + 1\n\n\twords := make([]string, count)\n\n\tn := rand.Float32()\n\n\tfor i := 0; i < count; i++ {\n\t\tletters := make([]string, 0)\n\n\t\tfor _, r := range c.Words[rand.Intn(len(c.Words))] {\n\t\t\tlimit := 1\n\n\t\t\tif r == 's' || r == 'k' || r == 'p' || r == 'n' || r == 'a' || r == 'i' || r == 'u' {\n\t\t\t\tn += rand.Float32() \/ 10\n\n\t\t\t\tif n > 1 {\n\t\t\t\t\tn = 0\n\t\t\t\t}\n\n\t\t\t\tif n > 0.9 {\n\t\t\t\t\tlimit = 4\n\t\t\t\t\tn = rand.Float32() \/ 4\n\t\t\t\t} else if n > 0.75 {\n\t\t\t\t\tlimit = 3\n\t\t\t\t\tn = rand.Float32() \/ 3\n\t\t\t\t} else if n > 0.6 {\n\t\t\t\t\tlimit = 2\n\t\t\t\t\tn = rand.Float32() \/ 2\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor i := 0; i < limit; i++ {\n\t\t\t\tletters = append(letters, string(r))\n\t\t\t}\n\t\t}\n\n\t\tword := strings.Join(letters, \"\")\n\n\t\tindex := rand.Intn(len(c.Prefixes))\n\n\t\tfor k, v := range c.Prefixes {\n\t\t\tif index == 0 {\n\t\t\t\tif strings.HasPrefix(word, k) {\n\t\t\t\t\tprefix := v[rand.Intn(len(v))]\n\n\t\t\t\t\tprefixCount := rand.Intn(4) - rand.Intn(3)\n\n\t\t\t\t\tif prefixCount > 0 {\n\t\t\t\t\t\tprefixes := make([]string, prefixCount+1)\n\n\t\t\t\t\t\tfor j := 0; j < prefixCount; j++ {\n\t\t\t\t\t\t\tprefixes[j] = prefix\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tword = strings.Join(prefixes, \"-\") + word\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tindex--\n\t\t}\n\n\t\twords[i] = word\n\t}\n\n\ttext = strings.Join(words, \" \")\n\n\treturn\n}\n\nfunc postTweet(b gotwit.Bot, c corpus) {\n\ttweet := createTweetText(c)\n\n\tfmt.Println(\"Posting:\", tweet)\n\n\tb.Post(tweet, false)\n}\n\nfunc main() {\n\tvar (\n\t\tcon twitter.ConsumerConfig\n\t\tacc twitter.AccessConfig\n\t)\n\n\tf := \"secrets.json\"\n\tif _, err := os.Stat(f); err == nil {\n\t\tcon, acc, _ = twitter.LoadConfigFile(f)\n\t} else {\n\t\tcon, acc, _ = twitter.LoadConfigEnv()\n\t}\n\n\tb := gotwit.NewBot(\"elbownoises\", con, acc)\n\n\tc, err := getCorpus()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tgo func() {\n\t\tif err = b.Start(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\tnow := time.Now()\n\n\trand.Seed(now.UnixNano())\n\n\tnext := time.Date(\n\t\tnow.Year(),\n\t\tnow.Month(),\n\t\tnow.Day(),\n\t\tnow.Hour()+1,\n\t\t00,\n\t\t0,\n\t\t0,\n\t\tnow.Location(),\n\t)\n\n\tsleep := next.Sub(now)\n\n\tfmt.Printf(\"%v until first tweet\\n\", next.Sub(now))\n\n\ttime.Sleep(sleep)\n\n\tpostTweet(b, c)\n\n\tticker := time.NewTicker(time.Hour)\n\tdefer ticker.Stop()\n\n\tfor range ticker.C {\n\t\tpostTweet(b, c)\n\t}\n}\n<commit_msg>Tidy up<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/erbridge\/gotwit\"\n\t\"github.com\/erbridge\/gotwit\/twitter\"\n)\n\ntype (\n\tcorpus struct {\n\t\tWords []string `json:\"words\"`\n\t\tPrefixes map[string][]string `json:\"prefixes\"`\n\t}\n)\n\nfunc getCorpus() (c corpus, err error) {\n\tcorpusFile, err := os.Open(\"data\/corpus.json\")\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tparser := json.NewDecoder(corpusFile)\n\n\tif err = parser.Decode(&c); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc createTweetText(c corpus) (text string) {\n\tcount := rand.Intn(3) + rand.Intn(3) + 1\n\n\twords := make([]string, count)\n\n\tn := rand.Float32()\n\n\tfor i := 0; i < count; i++ {\n\t\tletters := make([]string, 0)\n\n\t\tfor _, r := range c.Words[rand.Intn(len(c.Words))] {\n\t\t\tlimit := 1\n\n\t\t\tif r == 's' || r == 'k' || r == 'p' || r == 'n' || r == 'a' || r == 'i' || r == 'u' {\n\t\t\t\tn += rand.Float32() \/ 10\n\n\t\t\t\tif n > 1 {\n\t\t\t\t\tn = 0\n\t\t\t\t}\n\n\t\t\t\tif n > 0.9 {\n\t\t\t\t\tlimit = 4\n\t\t\t\t\tn = rand.Float32() \/ 4\n\t\t\t\t} else if n > 0.75 {\n\t\t\t\t\tlimit = 3\n\t\t\t\t\tn = rand.Float32() \/ 3\n\t\t\t\t} else if n > 0.6 {\n\t\t\t\t\tlimit = 2\n\t\t\t\t\tn = rand.Float32() \/ 2\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor i := 0; i < limit; i++ {\n\t\t\t\tletters = append(letters, string(r))\n\t\t\t}\n\t\t}\n\n\t\tword := strings.Join(letters, \"\")\n\n\t\tindex := rand.Intn(len(c.Prefixes))\n\n\t\tfor k, v := range c.Prefixes {\n\t\t\tif index == 0 {\n\t\t\t\tif strings.HasPrefix(word, k) {\n\t\t\t\t\tprefix := v[rand.Intn(len(v))]\n\n\t\t\t\t\tprefixCount := rand.Intn(4) - rand.Intn(3)\n\n\t\t\t\t\tif prefixCount > 0 {\n\t\t\t\t\t\tprefixes := make([]string, prefixCount+1)\n\n\t\t\t\t\t\tfor j := 0; j < prefixCount; j++ {\n\t\t\t\t\t\t\tprefixes[j] = prefix\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tword = strings.Join(prefixes, \"-\") + word\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tindex--\n\t\t}\n\n\t\twords[i] = word\n\t}\n\n\ttext = strings.Join(words, \" \")\n\n\treturn\n}\n\nfunc postTweet(b gotwit.Bot, c corpus) {\n\ttweet := createTweetText(c)\n\n\tfmt.Println(\"Posting:\", tweet)\n\n\tb.Post(tweet, false)\n}\n\nfunc main() {\n\tvar (\n\t\tcon twitter.ConsumerConfig\n\t\tacc twitter.AccessConfig\n\t)\n\n\tf := \"secrets.json\"\n\tif _, err := os.Stat(f); err == nil {\n\t\tcon, acc, _ = twitter.LoadConfigFile(f)\n\t} else {\n\t\tcon, acc, _ = twitter.LoadConfigEnv()\n\t}\n\n\tb := gotwit.NewBot(\"elbownoises\", con, acc)\n\n\tc, err := getCorpus()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tgo func() {\n\t\tif err = b.Start(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\tnow := time.Now()\n\n\trand.Seed(now.UnixNano())\n\n\tnext := time.Date(\n\t\tnow.Year(),\n\t\tnow.Month(),\n\t\tnow.Day(),\n\t\tnow.Hour()+1,\n\t\t0,\n\t\t0,\n\t\t0,\n\t\tnow.Location(),\n\t)\n\n\tsleep := next.Sub(now)\n\n\tfmt.Printf(\"%v until first tweet\\n\", sleep)\n\n\ttime.Sleep(sleep)\n\n\tpostTweet(b, c)\n\n\tticker := time.NewTicker(time.Hour)\n\tdefer ticker.Stop()\n\n\tfor range ticker.C {\n\t\tpostTweet(b, c)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\/\/\"fmt\"\n\t\"golang.org\/x\/net\/html\"\n\t\"golang.org\/x\/net\/html\/atom\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n)\n\nfunc parse() {\n\ts := `<html><p>Links:<\/p><ul><li><a href=\"foo\">Foo<\/a><li><a href=\"\/bar\/baz\">BarBaz<\/a><\/ul><\/html>`\n\tdoc, err := html.Parse(strings.NewReader(s))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar f func(n *html.Node)\n\tf = func(n *html.Node) {\n\t\tif n.Type == html.ElementNode && n.Data == \"body\" {\n\t\t\ttag := &html.Node{}\n\t\t\ttag.DataAtom = atom.Script\n\t\t\ttag.Type = html.ElementNode\n\t\t\ttag.Data = \"script\"\n\n\t\t\t\/\/Content\n\t\t\tc := &html.Node{\n\t\t\t\tType: html.TextNode,\n\t\t\t\tData: `alert(\"injected\")`,\n\t\t\t}\n\t\t\tn.AppendChild(tag)\n\t\t\ttag.AppendChild(c)\n\n\t\t\treturn\n\t\t}\n\t\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\t\tf(c)\n\t\t}\n\t}\n\tf(doc)\n\thtml.Render(os.Stdout, doc)\n}\n\nfunc main() {\n\tparse()\n}\n<commit_msg>It works! needs loads of cleaning and some input file static serve mechanism<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/go-fsnotify\/fsnotify\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"golang.org\/x\/net\/html\"\n\t\"golang.org\/x\/net\/html\/atom\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\ntype handler struct {\n\tfilename string\n\tcontent string\n\tscript string\n\tc chan bool\n}\n\nfunc (h *handler) watcher(f string) {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer watcher.Close()\n\n\tdone := make(chan bool)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-watcher.Events:\n\t\t\t\tlog.Println(\"event:\", event)\n\t\t\t\tif event.Op&fsnotify.Write == fsnotify.Write {\n\t\t\t\t\tlog.Println(\"modified file:\", event.Name)\n\t\t\t\t\th.c <- true\n\t\t\t\t}\n\t\t\tcase err := <-watcher.Errors:\n\t\t\t\tlog.Println(\"error:\", err)\n\t\t\t}\n\t\t}\n\t}()\n\n\terr = watcher.Add(f)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t<-done\n}\n\nfunc (h *handler) inject() {\n\tdoc, err := html.Parse(strings.NewReader(h.content))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar f func(n *html.Node)\n\tf = func(n *html.Node) {\n\t\tif n.Type == html.ElementNode && n.Data == \"body\" {\n\t\t\t\/\/ <script> tag\n\t\t\ttag := &html.Node{}\n\t\t\ttag.DataAtom = atom.Script\n\t\t\ttag.Type = html.ElementNode\n\t\t\ttag.Data = \"script\"\n\n\t\t\t\/\/ script Content\/text\n\t\t\tc := &html.Node{\n\t\t\t\tType: html.TextNode,\n\t\t\t\tData: h.script,\n\t\t\t}\n\t\t\tn.AppendChild(tag)\n\t\t\ttag.AppendChild(c)\n\n\t\t\treturn\n\t\t}\n\t\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\t\tf(c)\n\t\t}\n\t}\n\tf(doc)\n\thtml.Render(h, doc)\n}\n\nfunc (h *handler) Write(p []byte) (n int, err error) {\n\ttmp := string(p)\n\th.content = tmp\n\treturn len(p), nil\n}\n\nfunc (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Read in index.html\n\t\/\/ Inject WS script for refresh\n\tcontent, err := ioutil.ReadFile(h.filename)\n\tif err != nil {\n\t\tpanic(\"Error: \" + err.Error())\n\t}\n\th.content = string(content)\n\n\th.inject()\n\tio.WriteString(w, h.content)\n}\n\nvar upgrader = websocket.Upgrader{\n\tReadBufferSize: 1024,\n\tWriteBufferSize: 1024,\n}\n\nfunc print_binary(s []byte) {\n\tfmt.Printf(\"Received b:\")\n\tfor n := 0; n < len(s); n++ {\n\t\tfmt.Printf(\"%d,\", s[n])\n\t}\n\tfmt.Printf(\"\\n\")\n}\n\nfunc (h *handler) reloadHandler(w http.ResponseWriter, r *http.Request) {\n\tconn, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.Println(\"Error:\", err)\n\t\treturn\n\t}\n\n\tfor {\n\t\trefresh := <-h.c\n\t\tmessageType, p, err := conn.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tprint_binary(p)\n\t\tif refresh {\n\t\t\terr = conn.WriteMessage(messageType, []byte(\"refresh\"))\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nconst WSSCRIPT string = `\n\tvar serversocket = new WebSocket(\"ws:\/\/localhost:3000\/ws\/echo\");\n \n serversocket.onopen = function() {\n serversocket.send(\"Connection init\");\n }\n\n \/\/ Write message on receive\n serversocket.onmessage = function(e) {\n document.getElementById('comms').innerHTML += \"Received: \" + e.data + \"<br>\";\n document.location.reload(true);\n };\n\n function senddata() {\n var data = document.getElementById('sendtext').value;\n serversocket.send(data);\n document.getElementById('comms').innerHTML += \"<li>Sent: \" + data + \"<br><\/li>\";\n }\n`\n\nfunc main() {\n\n\th := &handler{\n\t\tfilename: \"\/tmp\/test.html\",\n\t\tscript: WSSCRIPT,\n\t\tc: make(chan bool),\n\t}\n\n\t\/\/ Watch file(s) for changes and trigger refresh\n\tgo h.watcher(h.filename)\n\n\thttp.HandleFunc(\"\/ws\/echo\", h.reloadHandler)\n\thttp.Handle(\"\/\", h)\n\tgo http.ListenAndServe(\":3000\", nil)\n\terr := http.ListenAndServe(\":3000\", nil)\n\tif err != nil {\n\t\tpanic(\"Error: \" + err.Error())\n\t}\n\n\tfmt.Println(\"Done\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/iancoleman\/strcase\"\n\tflag \"github.com\/spf13\/pflag\"\n)\n\nconst Version string = \"v0.0.12\"\n\ntype Substitution struct {\n\tre regexp.Regexp\n\tto string\n}\n\nfunc getAllFiles(paths []string) []string {\n\tvar args []string\n\targs = append(args, \"ls-files\")\n\targs = append(args, paths...)\n\tcmd := exec.Command(\"git\", args...)\n\tout, err := cmd.Output()\n\texitCode := cmd.ProcessState.ExitCode()\n\tif exitCode != 1 && err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlines := strings.Split(string(out), \"\\n\")\n\treturn lines\n}\n\nfunc runSubstitionsAndRenames(substitutions []Substitution, rename bool, path string) {\n\tif path == \"\" {\n\t\treturn\n\t}\n\n\tinfo, err := os.Stat(path)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif info.IsDir() {\n\t\treturn\n\t}\n\n\tcontent, err := ioutil.ReadFile(path)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treplaced := false\n\n\tfor _, s := range substitutions {\n\t\tif s.re.Match(content) {\n\t\t\treplaced = true\n\t\t\tcontent = s.re.ReplaceAll(content, []byte(s.to))\n\t\t}\n\t}\n\n\tif replaced {\n\t\tioutil.WriteFile(path, content, os.ModePerm)\n\t}\n\n\tif rename {\n\t\tfor _, s := range substitutions {\n\t\t\tnewpath := s.re.ReplaceAllString(path, s.to)\n\t\t\tif newpath != path {\n\t\t\t\tos.MkdirAll(filepath.Dir(newpath), os.ModePerm)\n\t\t\t\tos.Rename(path, newpath)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc getMaxProcs() int {\n\tvar maxProcs int\n\n\tmp := os.Getenv(\"GIT_GSUB_MAX_PROCS\")\n\tif mp == \"\" {\n\t\tmaxProcs = 100\n\t} else {\n\t\ti, err := strconv.Atoi(mp)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tmaxProcs = i\n\t}\n\n\treturn maxProcs\n}\n\nfunc main() {\n\tvar snake = flag.Bool(\"snake\", false, \"Substitute snake-cased expressions\")\n\tvar kebab = flag.Bool(\"kebab\", false, \"Substitute kebab-cased expressions\")\n\tvar camel = flag.Bool(\"camel\", false, \"Substitute camel-cased expressions\")\n\tvar rename = flag.Bool(\"rename\", false, \"Rename files with expression\")\n\tvar version = flag.Bool(\"version\", false, \"Show version\")\n\n\tflag.Parse()\n\n\tif *version {\n\t\tfmt.Println(Version)\n\t\tos.Exit(0)\n\t}\n\n\targs := flag.Args()\n\tif len(args) < 2 {\n\t\tfmt.Fprintf(os.Stderr, \"Usage git gsub [options] FROM TO [PATHS]\\n\")\n\t\tfmt.Fprintf(os.Stderr, \"\\nOptions:\\n\")\n\t\tflag.PrintDefaults()\n\t\tos.Exit(1)\n\t}\n\trawFrom := args[0]\n\tto := args[1]\n\tvar targetPaths []string\n\tif len(args) > 2 {\n\t\ttargetPaths = args[2:]\n\t}\n\n\tvar substitutions []Substitution\n\n\tfrom := regexp.MustCompile(rawFrom)\n\tsubstitutions = append(substitutions, Substitution{*from, to})\n\n\tif *snake {\n\t\tsnakeFrom := regexp.MustCompile(strcase.ToSnake(rawFrom))\n\t\tsnakeTo := strcase.ToSnake(to)\n\t\tsubstitutions = append(substitutions, Substitution{*snakeFrom, snakeTo})\n\t}\n\tif *kebab {\n\t\tkebabFrom := regexp.MustCompile(strcase.ToKebab(rawFrom))\n\t\tkebabTo := strcase.ToKebab(to)\n\t\tsubstitutions = append(substitutions, Substitution{*kebabFrom, kebabTo})\n\t}\n\tif *camel {\n\t\tcamelFrom := regexp.MustCompile(strcase.ToCamel(rawFrom))\n\t\tcamelTo := strcase.ToCamel(to)\n\t\tsubstitutions = append(substitutions, Substitution{*camelFrom, camelTo})\n\t}\n\n\tfiles := getAllFiles(targetPaths)\n\n\tc := make(chan bool, getMaxProcs())\n\tvar wg sync.WaitGroup\n\n\tfor _, path := range files {\n\t\twg.Add(1)\n\t\tgo func(path_ string) {\n\t\t\tc <- true\n\t\t\trunSubstitionsAndRenames(substitutions, *rename, path_)\n\t\t\t<-c\n\t\t\twg.Done()\n\t\t}(path)\n\t}\n\twg.Wait()\n}\n<commit_msg>Bump version<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/iancoleman\/strcase\"\n\tflag \"github.com\/spf13\/pflag\"\n)\n\nconst Version string = \"v0.0.13\"\n\ntype Substitution struct {\n\tre regexp.Regexp\n\tto string\n}\n\nfunc getAllFiles(paths []string) []string {\n\tvar args []string\n\targs = append(args, \"ls-files\")\n\targs = append(args, paths...)\n\tcmd := exec.Command(\"git\", args...)\n\tout, err := cmd.Output()\n\texitCode := cmd.ProcessState.ExitCode()\n\tif exitCode != 1 && err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlines := strings.Split(string(out), \"\\n\")\n\treturn lines\n}\n\nfunc runSubstitionsAndRenames(substitutions []Substitution, rename bool, path string) {\n\tif path == \"\" {\n\t\treturn\n\t}\n\n\tinfo, err := os.Stat(path)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif info.IsDir() {\n\t\treturn\n\t}\n\n\tcontent, err := ioutil.ReadFile(path)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treplaced := false\n\n\tfor _, s := range substitutions {\n\t\tif s.re.Match(content) {\n\t\t\treplaced = true\n\t\t\tcontent = s.re.ReplaceAll(content, []byte(s.to))\n\t\t}\n\t}\n\n\tif replaced {\n\t\tioutil.WriteFile(path, content, os.ModePerm)\n\t}\n\n\tif rename {\n\t\tfor _, s := range substitutions {\n\t\t\tnewpath := s.re.ReplaceAllString(path, s.to)\n\t\t\tif newpath != path {\n\t\t\t\tos.MkdirAll(filepath.Dir(newpath), os.ModePerm)\n\t\t\t\tos.Rename(path, newpath)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc getMaxProcs() int {\n\tvar maxProcs int\n\n\tmp := os.Getenv(\"GIT_GSUB_MAX_PROCS\")\n\tif mp == \"\" {\n\t\tmaxProcs = 100\n\t} else {\n\t\ti, err := strconv.Atoi(mp)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tmaxProcs = i\n\t}\n\n\treturn maxProcs\n}\n\nfunc main() {\n\tvar snake = flag.Bool(\"snake\", false, \"Substitute snake-cased expressions\")\n\tvar kebab = flag.Bool(\"kebab\", false, \"Substitute kebab-cased expressions\")\n\tvar camel = flag.Bool(\"camel\", false, \"Substitute camel-cased expressions\")\n\tvar rename = flag.Bool(\"rename\", false, \"Rename files with expression\")\n\tvar version = flag.Bool(\"version\", false, \"Show version\")\n\n\tflag.Parse()\n\n\tif *version {\n\t\tfmt.Println(Version)\n\t\tos.Exit(0)\n\t}\n\n\targs := flag.Args()\n\tif len(args) < 2 {\n\t\tfmt.Fprintf(os.Stderr, \"Usage git gsub [options] FROM TO [PATHS]\\n\")\n\t\tfmt.Fprintf(os.Stderr, \"\\nOptions:\\n\")\n\t\tflag.PrintDefaults()\n\t\tos.Exit(1)\n\t}\n\trawFrom := args[0]\n\tto := args[1]\n\tvar targetPaths []string\n\tif len(args) > 2 {\n\t\ttargetPaths = args[2:]\n\t}\n\n\tvar substitutions []Substitution\n\n\tfrom := regexp.MustCompile(rawFrom)\n\tsubstitutions = append(substitutions, Substitution{*from, to})\n\n\tif *snake {\n\t\tsnakeFrom := regexp.MustCompile(strcase.ToSnake(rawFrom))\n\t\tsnakeTo := strcase.ToSnake(to)\n\t\tsubstitutions = append(substitutions, Substitution{*snakeFrom, snakeTo})\n\t}\n\tif *kebab {\n\t\tkebabFrom := regexp.MustCompile(strcase.ToKebab(rawFrom))\n\t\tkebabTo := strcase.ToKebab(to)\n\t\tsubstitutions = append(substitutions, Substitution{*kebabFrom, kebabTo})\n\t}\n\tif *camel {\n\t\tcamelFrom := regexp.MustCompile(strcase.ToCamel(rawFrom))\n\t\tcamelTo := strcase.ToCamel(to)\n\t\tsubstitutions = append(substitutions, Substitution{*camelFrom, camelTo})\n\t}\n\n\tfiles := getAllFiles(targetPaths)\n\n\tc := make(chan bool, getMaxProcs())\n\tvar wg sync.WaitGroup\n\n\tfor _, path := range files {\n\t\twg.Add(1)\n\t\tgo func(path_ string) {\n\t\t\tc <- true\n\t\t\trunSubstitionsAndRenames(substitutions, *rename, path_)\n\t\t\t<-c\n\t\t\twg.Done()\n\t\t}(path)\n\t}\n\twg.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/fabioxgn\/go-bot\"\n\t_ \"github.com\/fabioxgn\/go-bot\/commands\/catfacts\"\n\t_ \"github.com\/fabioxgn\/go-bot\/commands\/catgif\"\n\t_ \"github.com\/fabioxgn\/go-bot\/commands\/chucknorris\"\n\t_ \"github.com\/nloadholtes\/uniform\/ratt\"\n\t\/\/ Import all the commands you wish to use\n\t\"flag\"\n\t\"os\"\n\t\"strings\"\n)\n\nvar slack_flag bool\n\nfunc main() {\n\tflag.BoolVar(&slack_flag, \"slack\", false, \"help message for flagname\")\n\n\tflag.Parse()\n\n\tif slack_flag {\n\n\t} else {\n\t\tbot.Run(&bot.Config{\n\t\t\tServer: os.Getenv(\"IRC_SERVER\"),\n\t\t\tChannels: strings.Split(os.Getenv(\"IRC_CHANNELS\"), \",\"),\n\t\t\tUser: os.Getenv(\"IRC_USER\"),\n\t\t\tNick: os.Getenv(\"IRC_NICK\"),\n\t\t\tPassword: os.Getenv(\"IRC_PASSWORD\"),\n\t\t\tUseTLS: true,\n\t\t\tDebug: os.Getenv(\"DEBUG\") != \"\"})\n\t}\n}\n\nfunc init() {\n}\n<commit_msg>Adding in the slack token<commit_after>package main\n\nimport (\n\t\"github.com\/fabioxgn\/go-bot\"\n\t_ \"github.com\/fabioxgn\/go-bot\/commands\/catfacts\"\n\t_ \"github.com\/fabioxgn\/go-bot\/commands\/catgif\"\n\t_ \"github.com\/fabioxgn\/go-bot\/commands\/chucknorris\"\n\t_ \"github.com\/nloadholtes\/uniform\/ratt\"\n\t\/\/ Import all the commands you wish to use\n\t\"flag\"\n\t\"os\"\n\t\"strings\"\n)\n\nvar slack_flag bool\n\nfunc main() {\n\tflag.BoolVar(&slack_flag, \"slack\", false, \"help message for flagname\")\n\n\tflag.Parse()\n\tif slack_flag {\n\t\tbot.RunSlack(os.Getenv(\"SLACK_TOKEN\"))\n\t} else {\n\t\tbot.Run(&bot.Config{\n\t\t\tServer: os.Getenv(\"IRC_SERVER\"),\n\t\t\tChannels: strings.Split(os.Getenv(\"IRC_CHANNELS\"), \",\"),\n\t\t\tUser: os.Getenv(\"IRC_USER\"),\n\t\t\tNick: os.Getenv(\"IRC_NICK\"),\n\t\t\tPassword: os.Getenv(\"IRC_PASSWORD\"),\n\t\t\tUseTLS: true,\n\t\t\tDebug: os.Getenv(\"DEBUG\") != \"\"})\n\t}\n}\n\nfunc init() {\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016, RadiantBlue Technologies, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"encoding\/binary\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"github.com\/venicegeo\/pzsvc-sdk-go\/s3\"\n)\n\nfunc check(e error) {\n\tif e != nil {\n\t\tpanic(e)\n\t}\n}\n\n\/\/ LasHeader is the LAS header.\ntype LasHeader struct {\n\tFileSignature [4]byte\n\tFileSourceID uint16\n\tGlobalEncoding uint16\n\tGUID1 uint32\n\tGUID2 uint16\n\tGUID3 uint16\n\tGUID4 [8]uint8\n\tVersionMajor, VersionMinor uint8\n\tSystemIdentifier [32]byte\n\tGeneratingSoftware [32]byte\n\tFileCreationDayOfYear uint16\n\tFileCreationYear uint16\n\tHeaderSize uint16\n\tOffsetToPointData uint32\n\tNumberOfVLRs uint32\n\tPointDataRecordFormat uint8\n\tPointDataRecordLength uint16\n\tLegacyNumberOfPointRecords uint32\n\tLegacyNumberOfPointsByReturn [5]uint32\n\tXScale, YScale, ZScale float64\n\tXOffset, YOffset, ZOffset float64\n\tMaxX, MinX float64\n\tMaxY, MinY float64\n\tMaxZ, MinZ float64\n\t\/\/ missing some waveform packet stuff for 1.4\n}\n\n\/\/ Format0 is Point Data Record Format 0.\ntype Format0 struct {\n\tX, Y, Z int32\n\tIntensity uint16\n\tFoo byte\n\tClassification uint8\n\tScanAngleRank byte\n\tUserData uint8\n\tPointSourceID uint16\n}\n\n\/\/ Format1 is Point Data Record Format 1.\ntype Format1 struct {\n\tFormat0\n\tGPSTime float64\n}\n\n\/\/ Format3 is Point Date Record Format 3.\ntype Format3 struct {\n\tFormat1\n\tRed, Blue, Green uint16\n}\n\n\/\/ ReadLas reads an LAS file.\nfunc ReadLas(fname string) (h LasHeader, p []Format1) {\n\tf, err := os.Open(fname)\n\tcheck(err)\n\tdefer f.Close()\n\n\tbinary.Read(f, binary.LittleEndian, &h)\n\tfmt.Printf(\"%+v\\n\", h)\n\n\tf.Seek(int64(h.OffsetToPointData), 0)\n\tif h.PointDataRecordFormat == 0 {\n\t\tfmt.Println(\"format = 0\")\n\t\tpoints := make([]Format0, h.LegacyNumberOfPointRecords)\n\t\tbinary.Read(f, binary.LittleEndian, points)\n\t\tfmt.Printf(\"%+v\\n\", points[:5])\n\t\tfmt.Printf(\"%+v\\n\", points[len(points)-5:])\n\t} else if h.PointDataRecordFormat == 1 {\n\t\tfmt.Println(\"format = 1\")\n\t\tp = make([]Format1, h.LegacyNumberOfPointRecords)\n\t\tbinary.Read(f, binary.LittleEndian, p)\n\t\tfmt.Printf(\"%+v\\n\", p[:5])\n\t\tfmt.Printf(\"%+v\\n\", p[len(p)-5:])\n\t} else if h.PointDataRecordFormat == 3 {\n\t\tfmt.Println(\"format = 3\")\n\t\tpoints := make([]Format3, h.LegacyNumberOfPointRecords)\n\t\tbinary.Read(f, binary.LittleEndian, points)\n\t\tfmt.Printf(\"%+v\\n\", points[:5])\n\t\tfmt.Printf(\"%+v\\n\", points[len(points)-5:])\n\t}\n\treturn\n}\n\nfunc main() {\n\trouter := httprouter.New()\n\n\trouter.GET(\"\/\", func(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {\n\t\tfmt.Fprintf(w, \"Hi!\")\n\t})\n\n\trouter.GET(\"\/info\", func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\t\tvar inputName string\n\t\tvar fileIn *os.File\n\n\t\t\/\/ Split the source S3 key string, interpreting the last element as the\n\t\t\/\/ input filename. Create the input file, throwing 500 on error.\n\t\tinputName = s3.ParseFilenameFromKey(\"pointcloud\/samp11-utm.las\")\n\t\tfileIn, err := os.Create(inputName)\n\t\tcheck(err)\n\t\tdefer fileIn.Close()\n\n\t\t\/\/ Download the source data from S3, throwing 500 on error.\n\t\terr = s3.Download(fileIn, \"venicegeo-sample-data\", \"pointcloud\/samp11-utm.las\")\n\t\tcheck(err)\n\n\t\th, _ := ReadLas(inputName)\n\n\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tif err := json.NewEncoder(w).Encode(h); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t})\n\n\tvar defaultPort = os.Getenv(\"PORT\")\n\tif defaultPort == \"\" {\n\t\tdefaultPort = \"8080\"\n\t}\n\n\tlog.Println(\"Starting on \", defaultPort)\n\tif err := http.ListenAndServe(\":\"+defaultPort, router); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>Use http.Error when things go wrong<commit_after>\/*\nCopyright 2016, RadiantBlue Technologies, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"encoding\/binary\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"github.com\/venicegeo\/pzsvc-sdk-go\/s3\"\n)\n\nfunc check(e error) {\n\tif e != nil {\n\t\tpanic(e)\n\t}\n}\n\n\/\/ LasHeader is the LAS header.\ntype LasHeader struct {\n\tFileSignature [4]byte\n\tFileSourceID uint16\n\tGlobalEncoding uint16\n\tGUID1 uint32\n\tGUID2 uint16\n\tGUID3 uint16\n\tGUID4 [8]uint8\n\tVersionMajor, VersionMinor uint8\n\tSystemIdentifier [32]byte\n\tGeneratingSoftware [32]byte\n\tFileCreationDayOfYear uint16\n\tFileCreationYear uint16\n\tHeaderSize uint16\n\tOffsetToPointData uint32\n\tNumberOfVLRs uint32\n\tPointDataRecordFormat uint8\n\tPointDataRecordLength uint16\n\tLegacyNumberOfPointRecords uint32\n\tLegacyNumberOfPointsByReturn [5]uint32\n\tXScale, YScale, ZScale float64\n\tXOffset, YOffset, ZOffset float64\n\tMaxX, MinX float64\n\tMaxY, MinY float64\n\tMaxZ, MinZ float64\n\t\/\/ missing some waveform packet stuff for 1.4\n}\n\n\/\/ Format0 is Point Data Record Format 0.\ntype Format0 struct {\n\tX, Y, Z int32\n\tIntensity uint16\n\tFoo byte\n\tClassification uint8\n\tScanAngleRank byte\n\tUserData uint8\n\tPointSourceID uint16\n}\n\n\/\/ Format1 is Point Data Record Format 1.\ntype Format1 struct {\n\tFormat0\n\tGPSTime float64\n}\n\n\/\/ Format3 is Point Date Record Format 3.\ntype Format3 struct {\n\tFormat1\n\tRed, Blue, Green uint16\n}\n\n\/\/ ReadLas reads an LAS file.\nfunc ReadLas(fname string) (h LasHeader, p []Format1) {\n\tf, err := os.Open(fname)\n\tcheck(err)\n\tdefer f.Close()\n\n\tbinary.Read(f, binary.LittleEndian, &h)\n\tfmt.Printf(\"%+v\\n\", h)\n\n\tf.Seek(int64(h.OffsetToPointData), 0)\n\tif h.PointDataRecordFormat == 0 {\n\t\tfmt.Println(\"format = 0\")\n\t\tpoints := make([]Format0, h.LegacyNumberOfPointRecords)\n\t\tbinary.Read(f, binary.LittleEndian, points)\n\t\tfmt.Printf(\"%+v\\n\", points[:5])\n\t\tfmt.Printf(\"%+v\\n\", points[len(points)-5:])\n\t} else if h.PointDataRecordFormat == 1 {\n\t\tfmt.Println(\"format = 1\")\n\t\tp = make([]Format1, h.LegacyNumberOfPointRecords)\n\t\tbinary.Read(f, binary.LittleEndian, p)\n\t\tfmt.Printf(\"%+v\\n\", p[:5])\n\t\tfmt.Printf(\"%+v\\n\", p[len(p)-5:])\n\t} else if h.PointDataRecordFormat == 3 {\n\t\tfmt.Println(\"format = 3\")\n\t\tpoints := make([]Format3, h.LegacyNumberOfPointRecords)\n\t\tbinary.Read(f, binary.LittleEndian, points)\n\t\tfmt.Printf(\"%+v\\n\", points[:5])\n\t\tfmt.Printf(\"%+v\\n\", points[len(points)-5:])\n\t}\n\treturn\n}\n\nfunc main() {\n\trouter := httprouter.New()\n\n\trouter.GET(\"\/\", func(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {\n\t\tfmt.Fprintf(w, \"Hi!\")\n\t})\n\n\trouter.GET(\"\/info\", func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\t\tvar inputName string\n\t\tvar fileIn *os.File\n\n\t\t\/\/ Split the source S3 key string, interpreting the last element as the\n\t\t\/\/ input filename. Create the input file, throwing 500 on error.\n\t\tinputName = s3.ParseFilenameFromKey(\"pointcloud\/samp11-utm.las\")\n\t\tfileIn, err := os.Create(inputName)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\t\tdefer fileIn.Close()\n\n\t\t\/\/ Download the source data from S3, throwing 500 on error.\n\t\terr = s3.Download(fileIn, \"venicegeo-sample-data\", \"pointcloud\/samp11-utm.las\")\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\n\t\th, _ := ReadLas(inputName)\n\n\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tif err := json.NewEncoder(w).Encode(h); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t})\n\n\tvar defaultPort = os.Getenv(\"PORT\")\n\tif defaultPort == \"\" {\n\t\tdefaultPort = \"8080\"\n\t}\n\n\tlog.Println(\"Starting on \", defaultPort)\n\tif err := http.ListenAndServe(\":\"+defaultPort, router); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/koding\/logging\"\n\t\"net\/http\"\n)\n\nvar (\n\tport = flag.String(\"port\", \"8080\", \"Port to use\")\n\tpath = flag.String(\"path\", \".\/\", \"Path the server is started from\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\tserver := HTTPServer{\n\t\thttp.Server{\n\t\t\tAddr: \":\" + *port,\n\t\t\tHandler: HTTPHandler{\n\t\t\t\trealHandler: http.FileServer(http.Dir(*path)),\n\t\t\t},\n\t\t},\n\t}\n\n\terr := server.ListenAndServe()\n\tif err != nil {\n\t\tlogging.Error(fmt.Sprint(err))\n\t}\n}\n\ntype HTTPHandler struct {\n\trealHandler http.Handler\n}\n\nfunc (h HTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlogging.Info(\"%s %s\", r.Method, r.URL)\n\n\th.realHandler.ServeHTTP(w, r)\n}\n\ntype HTTPServer struct {\n\thttp.Server\n}\n\nfunc (h *HTTPServer) ListenAndServe() error {\n\tlogging.Info(\n\t\t\"Starting server on \\033[%dm%s\\033[0m at \\033[%dm%s\\033[0m\",\n\t\tlogging.CYAN,\n\t\th.Server.Addr,\n\t\tlogging.CYAN,\n\t\t*path,\n\t)\n\n\treturn h.Server.ListenAndServe()\n}\n<commit_msg>Added Header Customization and Better Binding<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/koding\/logging\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\nvar (\n\tlisten = flag.String(\"listen\", \":8080\", \"Address to listen for requests on\")\n\tpath = flag.String(\"path\", \".\/\", \"Path the server is started from\")\n\theader = flag.String(\"header\", \"\", \"Header sent with every response\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\tvar headers map[string]string\n\tif *header != \"\" {\n\t\ts := strings.Split(*header, \":\")\n\t\theaders = map[string]string{\n\t\t\ts[0]: s[1],\n\t\t}\n\t}\n\n\tserver := HTTPServer{\n\t\thttp.Server{\n\t\t\tAddr: *listen,\n\t\t\tHandler: HTTPHandler{\n\t\t\t\trealHandler: http.FileServer(http.Dir(*path)),\n\t\t\t\theaders: headers,\n\t\t\t},\n\t\t},\n\t\theaders,\n\t}\n\n\terr := server.ListenAndServe()\n\tif err != nil {\n\t\tlogging.Error(fmt.Sprint(err))\n\t}\n}\n\ntype HTTPHandler struct {\n\trealHandler http.Handler\n\theaders map[string]string\n}\n\nfunc (h HTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlogging.Info(\"%s %s\", r.Method, r.URL)\n\n\tresponseHeaders := w.Header()\n\tfor k, v := range h.headers {\n\t\tresponseHeaders.Set(k, v)\n\t}\n\th.realHandler.ServeHTTP(w, r)\n}\n\ntype HTTPServer struct {\n\thttp.Server\n\theaders map[string]string\n}\n\nfunc (h *HTTPServer) ListenAndServe() error {\n\tlogging.Info(\n\t\t\"Starting server on %s at %s\",\n\t\tcolorize(logging.CYAN, h.Server.Addr),\n\t\tcolorize(logging.CYAN, *path),\n\t)\n\n\tfor k, v := range h.headers {\n\t\tlogging.Info(\"Adding Header: %s:%s\",\n\t\t\tcolorize(logging.CYAN, k),\n\t\t\tcolorize(logging.CYAN, v),\n\t\t)\n\t}\n\n\treturn h.Server.ListenAndServe()\n}\n\nfunc colorize(color logging.Color, s string) string {\n\treturn fmt.Sprintf(\"\\033[%dm%s\\033[0m\", color, s)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/skratchdot\/open-golang\/open\" \/\/ Opens file in external editor\n\t\"go.bug.st\/serial\"\n)\n\nvar FILENAME = flag.String(\"o\", \"output.txt\", \"Output file name\")\n\nfunc main() {\n\tflag.Parse()\n\tport, err := findArduino()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treadData(port)\n\topen.Run(*FILENAME)\n}\n\nfunc findArduino() (string, error) {\n\tports, err := serial.GetPortsList()\n\tlog.Printf(\"Unsorted list of found ports: %s\\n\", ports)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(ports) == 0 {\n\t\treturn \"\", errors.New(\"No devices found\")\n\t}\n\tsort.Strings(ports)\n\tport := ports[len(ports)-1]\n\tlog.Printf(\"Selected port: %s\\n\", port)\n\treturn port, nil\n}\n\nfunc readData(port string) {\n\tmode := &serial.Mode{\n\t\tBaudRate: 9600,\n\t}\n\tsp, err := serial.OpenPort(port, mode)\n\tdefer sp.Close()\n\n\tbuff := bufio.NewReader(sp)\n\n\tfile, _ := os.Create(*FILENAME)\n\tdefer file.Close()\n\tdefer log.Printf(\"Recieved data saved to %s\\n\", *FILENAME)\n\n\tmwr := io.MultiWriter(file, os.Stdout)\n\n\t\/\/ Attempt to read the first line, to see if it's broken\n\tfirstline, _ := buff.ReadBytes('\\n')\n\tif !strings.ContainsAny(string(firstline), \"03456789\") {\n\t\t\/\/ The first line seems not broken (heuristic)\n\t\tlog.Printf(\"First line added back in: \\\"%s\\\"\\n\", string(firstline))\n\t\tmwr.Write(firstline)\n\n\t}\n\n\tn, err := io.Copy(mwr, buff)\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn\n\t}\n\n\tfmt.Println(\"Bytes read: %d\", n)\n}\n<commit_msg>Fixed extraneous newline bug<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/skratchdot\/open-golang\/open\" \/\/ Opens file in external editor\n\t\"go.bug.st\/serial\"\n)\n\nvar FILENAME = flag.String(\"o\", \"output.txt\", \"Output file name\")\n\nfunc main() {\n\tflag.Parse()\n\tport, err := findArduino()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treadData(port)\n\topen.Run(*FILENAME)\n}\n\nfunc findArduino() (string, error) {\n\tports, err := serial.GetPortsList()\n\tlog.Printf(\"Unsorted list of found ports: %s\\n\", ports)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(ports) == 0 {\n\t\treturn \"\", errors.New(\"No devices found\")\n\t}\n\tsort.Strings(ports)\n\tport := ports[len(ports)-1]\n\tlog.Printf(\"Selected port: %s\\n\", port)\n\treturn port, nil\n}\n\nfunc readData(port string) {\n\tmode := &serial.Mode{\n\t\tBaudRate: 9600,\n\t}\n\tsp, err := serial.OpenPort(port, mode)\n\tdefer sp.Close()\n\n\tbuff := bufio.NewReader(sp)\n\n\tfile, _ := os.Create(*FILENAME)\n\tdefer file.Close()\n\tdefer log.Printf(\"Recieved data saved to %s\\n\", *FILENAME)\n\n\tmwr := io.MultiWriter(file, os.Stdout)\n\n\t\/\/ Attempt to read the first line, to see if it's broken\n\tfirstline, _ := buff.ReadBytes('\\n')\n\tif !strings.ContainsAny(string(firstline), \"03456789\") {\n\t\t\/\/ The first line seems not broken (heuristic)\n\t\tlog.Printf(\"First line added back in: \\\"%s\\\"\\n\", strings.TrimSpace(string(firstline)))\n\t\tmwr.Write(firstline)\n\n\t}\n\n\tn, err := io.Copy(mwr, buff)\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn\n\t}\n\n\tfmt.Println(\"Bytes read: %d\", n)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ oneill is a small command line application designed to manage a set of\n\/\/ docker containers on a single host\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\n\t\"github.com\/rehabstudio\/oneill\/config\"\n\t\"github.com\/rehabstudio\/oneill\/containerdefs\"\n\t\"github.com\/rehabstudio\/oneill\/dockerclient\"\n\t\"github.com\/rehabstudio\/oneill\/loaders\"\n)\n\nvar (\n\tbuildDate string\n\tversion string\n\tgitBranch string\n\tgitRevision string\n\tgitRepoStatus string\n)\n\n\/\/ exitOnError checks that an error is not nil. If the passed value is an\n\/\/ error, it is logged and the program exits with an error code of 1\nfunc exitOnError(err error, prefix string) {\n\tif err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\"err\": err}).Fatal(prefix)\n\t}\n}\n\n\/\/ parseCliArgs parses any arguments passed to oneill on the command line\nfunc parseCliArgs() (string, bool) {\n\n\t\/\/ parse config file location from command line flag\n\tconfigFilePath := flag.String(\"config\", \"\/etc\/oneill\/config.yaml\", \"location of the oneill config file\")\n\tshowVersion := flag.Bool(\"v\", false, \"show version details and exit\")\n\tflag.Parse()\n\n\treturn *configFilePath, *showVersion\n}\n\nfunc main() {\n\n\tconfigFilePath, showVersion := parseCliArgs()\n\tif showVersion {\n\t\tfmt.Printf(\"oneill v%s\\n\\n\", version)\n\t\tfmt.Printf(\"buildDate: %s\\n\", buildDate)\n\t\tfmt.Printf(\"gitBranch: %s\\n\", gitBranch)\n\t\tfmt.Printf(\"gitRevision: %s\\n\", gitRevision)\n\t\tfmt.Printf(\"gitRepoStatus: %s\\n\", gitRepoStatus)\n\t\tos.Exit(0)\n\t}\n\n\tconfig, err := config.LoadConfig(configFilePath)\n\texitOnError(err, \"Unable to load configuration\")\n\n\tlogLevel, err := logrus.ParseLevel(config.LogLevel)\n\texitOnError(err, \"Unable to initialise logger\")\n\n\t\/\/ configure global logger instance\n\tlogrus.SetLevel(logLevel)\n\tif config.LogFormat == \"json\" {\n\t\tlogrus.SetFormatter(&logrus.JSONFormatter{})\n\t}\n\n\terr = dockerclient.InitDockerClient(config.DockerApiEndpoint, config.RegistryCredentials)\n\texitOnError(err, \"Unable to initialise docker client\")\n\n\t\/\/ load container definitions\n\tdefinitionLoader, err := loaders.GetLoader(config.DefinitionsURI)\n\texitOnError(err, \"Unable to load container definitions\")\n\tdefinitions, err := containerdefs.LoadContainerDefinitions(definitionLoader)\n\texitOnError(err, \"Unable to load container definitions\")\n\n\t\/\/ stop redundant containers\n\terr = containerdefs.RemoveRedundantContainers(definitions)\n\texitOnError(err, \"Unable to remove redundant containers\")\n\n\t\/\/ process all container definitions\n\terr = containerdefs.ProcessContainerDefinitions(config, definitions)\n\texitOnError(err, \"Unable to process service container definitions\")\n}\n<commit_msg>ensure only one instance of oneill can run at any given time<commit_after>\/\/ oneill is a small command line application designed to manage a set of\n\/\/ docker containers on a single host\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\n\t\"github.com\/rehabstudio\/oneill\/config\"\n\t\"github.com\/rehabstudio\/oneill\/containerdefs\"\n\t\"github.com\/rehabstudio\/oneill\/dockerclient\"\n\t\"github.com\/rehabstudio\/oneill\/loaders\"\n)\n\nvar (\n\tbuildDate string\n\tversion string\n\tgitBranch string\n\tgitRevision string\n\tgitRepoStatus string\n)\n\n\/\/ exitOnError checks that an error is not nil. If the passed value is an\n\/\/ error, it is logged and the program exits with an error code of 1\nfunc exitOnError(err error, prefix string) {\n\tif err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\"err\": err}).Fatal(prefix)\n\t}\n}\n\n\/\/ parseCliArgs parses any arguments passed to oneill on the command line\nfunc parseCliArgs() (string, bool) {\n\n\t\/\/ parse config file location from command line flag\n\tconfigFilePath := flag.String(\"config\", \"\/etc\/oneill\/config.yaml\", \"location of the oneill config file\")\n\tshowVersion := flag.Bool(\"v\", false, \"show version details and exit\")\n\tflag.Parse()\n\n\treturn *configFilePath, *showVersion\n}\n\nfunc main() {\n\n\tconfigFilePath, showVersion := parseCliArgs()\n\tif showVersion {\n\t\tfmt.Printf(\"oneill v%s\\n\\n\", version)\n\t\tfmt.Printf(\"buildDate: %s\\n\", buildDate)\n\t\tfmt.Printf(\"gitBranch: %s\\n\", gitBranch)\n\t\tfmt.Printf(\"gitRevision: %s\\n\", gitRevision)\n\t\tfmt.Printf(\"gitRepoStatus: %s\\n\", gitRepoStatus)\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ binding to a local socket ensures that only one instance of oneill is\n\t\/\/ running at any one time. The socket will be automatically freed up once\n\t\/\/ the process exits (cleanly or otherwise).\n\tl, err := net.Listen(\"tcp\", \"127.0.0.1:57922\")\n\texitOnError(err, \"oneill is already running\")\n\n\tconfig, err := config.LoadConfig(configFilePath)\n\texitOnError(err, \"Unable to load configuration\")\n\n\tlogLevel, err := logrus.ParseLevel(config.LogLevel)\n\texitOnError(err, \"Unable to initialise logger\")\n\n\t\/\/ configure global logger instance\n\tlogrus.SetLevel(logLevel)\n\tif config.LogFormat == \"json\" {\n\t\tlogrus.SetFormatter(&logrus.JSONFormatter{})\n\t}\n\n\terr = dockerclient.InitDockerClient(config.DockerApiEndpoint, config.RegistryCredentials)\n\texitOnError(err, \"Unable to initialise docker client\")\n\n\t\/\/ load container definitions\n\tdefinitionLoader, err := loaders.GetLoader(config.DefinitionsURI)\n\texitOnError(err, \"Unable to load container definitions\")\n\tdefinitions, err := containerdefs.LoadContainerDefinitions(definitionLoader)\n\texitOnError(err, \"Unable to load container definitions\")\n\n\t\/\/ stop redundant containers\n\terr = containerdefs.RemoveRedundantContainers(definitions)\n\texitOnError(err, \"Unable to remove redundant containers\")\n\n\t\/\/ process all container definitions\n\terr = containerdefs.ProcessContainerDefinitions(config, definitions)\n\texitOnError(err, \"Unable to process service container definitions\")\n\n\t\/\/ explicitly close the listening socket\n\tl.Close()\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/hashicorp\/consul\/api\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"syscall\"\n)\n\nvar (\n\tapp = kingpin.New(\"consul-ssh\", \"Query Consul catalog for service\")\n\tserverURL = app.Flag(\"server\", \"Consul URL; can also be provided using the CONSUL_URL environment variable\").Default(\"http:\/\/127.0.0.1:8500\/\").Envar(\"CONSUL_URL\").URL()\n\ttags = app.Flag(\"tag\", \"Consul tag\").Short('t').Strings()\n\tservice = app.Flag(\"service\", \"Consul service\").Required().Short('s').String()\n\tdc = app.Flag(\"dc\", \"Consul datacenter\").String()\n\tqueryCmd = app.Command(\"query\", \"Query Consul catalog\")\n\tjsonFmt = queryCmd.Flag(\"json\", \"JSON query output\").Short('j').Bool()\n\tsshCmd = app.Command(\"ssh\", \"ssh into server using Consul query\")\n\tuser = sshCmd.Flag(\"username\", \"ssh user name\").Short('u').String()\n\ttagsMerge = app.Flag(\"tags-mode\", \"Find nodes with *all* or *any* of the tags\").Short('m').Default(\"all\").Enum(\"all\", \"any\")\n\t_ = app.HelpFlag.Short('h')\n)\n\nfunc main() {\n\tcmd := kingpin.MustParse(app.Parse(os.Args[1:]))\n\tmergeFunc := intersectionMerge\n\tif *tagsMerge == \"any\" {\n\t\tmergeFunc = unionMerge\n\t}\n\tresults := queryMulti(consulConfig(), *service, *tags, mergeFunc)\n\n\tswitch cmd {\n\tcase queryCmd.FullCommand():\n\t\tif *jsonFmt {\n\t\t\tprintJsonResults(results)\n\t\t} else {\n\t\t\tprintQueryResults(results)\n\t\t}\n\tcase sshCmd.FullCommand():\n\t\tssh(selectRandomNode(results), *user)\n\t}\n\n}\n\nfunc consulConfig() *api.Config {\n\tconfig := &api.Config{Address: (*serverURL).Host, Scheme: (*serverURL).Scheme}\n\tif *dc != \"\" {\n\t\tconfig.Datacenter = *dc\n\t}\n\treturn config\n}\n\nfunc printJsonResults(results []*api.CatalogService) {\n\tif b, err := json.MarshalIndent(results, \"\", \" \"); err != nil {\n\t\tkingpin.Fatalf(\"Failed to convert results to json, %s\\n\", err.Error())\n\t} else {\n\t\tfmt.Println(string(b))\n\t}\n}\n\nfunc printQueryResults(results []*api.CatalogService) {\n\tfor _, catalogService := range results {\n\t\tfmt.Println(catalogService.Node)\n\t}\n}\n\nfunc selectRandomNode(services []*api.CatalogService) string {\n\treturn services[rand.Intn(len(services))].Node\n}\n\nfunc ssh(address string, user string) {\n\tbin, err := exec.LookPath(\"ssh\")\n\tif err != nil {\n\t\tkingpin.Fatalf(\"Failed to find ssh binary: %s\\n\", err.Error())\n\t}\n\n\tssh_args := make([]string, 2, 3)\n\tssh_args[0] = \"ssh\"\n\tssh_args[1] = address\n\tif user != \"\" {\n\t\tssh_args = append(ssh_args, \"-l \"+user)\n\t}\n\n\tsyscall.Exec(bin, ssh_args, os.Environ())\n}\n<commit_msg>print error message when no results found<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/hashicorp\/consul\/api\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"syscall\"\n)\n\nvar (\n\tapp = kingpin.New(\"consul-ssh\", \"Query Consul catalog for service\")\n\tserverURL = app.Flag(\"server\", \"Consul URL; can also be provided using the CONSUL_URL environment variable\").Default(\"http:\/\/127.0.0.1:8500\/\").Envar(\"CONSUL_URL\").URL()\n\ttags = app.Flag(\"tag\", \"Consul tag\").Short('t').Strings()\n\tservice = app.Flag(\"service\", \"Consul service\").Required().Short('s').String()\n\tdc = app.Flag(\"dc\", \"Consul datacenter\").String()\n\tqueryCmd = app.Command(\"query\", \"Query Consul catalog\")\n\tjsonFmt = queryCmd.Flag(\"json\", \"JSON query output\").Short('j').Bool()\n\tsshCmd = app.Command(\"ssh\", \"ssh into server using Consul query\")\n\tuser = sshCmd.Flag(\"username\", \"ssh user name\").Short('u').String()\n\ttagsMerge = app.Flag(\"tags-mode\", \"Find nodes with *all* or *any* of the tags\").Short('m').Default(\"all\").Enum(\"all\", \"any\")\n\t_ = app.HelpFlag.Short('h')\n)\n\nfunc main() {\n\tcmd := kingpin.MustParse(app.Parse(os.Args[1:]))\n\tmergeFunc := intersectionMerge\n\tif *tagsMerge == \"any\" {\n\t\tmergeFunc = unionMerge\n\t}\n\tresults := queryMulti(consulConfig(), *service, *tags, mergeFunc)\n\tif len(results) == 0 {\n\t\tkingpin.Fatalf(\"No results from Consul query\")\n\t}\n\tswitch cmd {\n\tcase queryCmd.FullCommand():\n\t\tif *jsonFmt {\n\t\t\tprintJsonResults(results)\n\t\t} else {\n\t\t\tprintQueryResults(results)\n\t\t}\n\tcase sshCmd.FullCommand():\n\t\tssh(selectRandomNode(results), *user)\n\t}\n\n}\n\nfunc consulConfig() *api.Config {\n\tconfig := &api.Config{Address: (*serverURL).Host, Scheme: (*serverURL).Scheme}\n\tif *dc != \"\" {\n\t\tconfig.Datacenter = *dc\n\t}\n\treturn config\n}\n\nfunc printJsonResults(results []*api.CatalogService) {\n\tif b, err := json.MarshalIndent(results, \"\", \" \"); err != nil {\n\t\tkingpin.Fatalf(\"Failed to convert results to json, %s\\n\", err.Error())\n\t} else {\n\t\tfmt.Println(string(b))\n\t}\n}\n\nfunc printQueryResults(results []*api.CatalogService) {\n\tfor _, catalogService := range results {\n\t\tfmt.Println(catalogService.Node)\n\t}\n}\n\nfunc selectRandomNode(services []*api.CatalogService) string {\n\treturn services[rand.Intn(len(services))].Node\n}\n\nfunc ssh(address string, user string) {\n\tbin, err := exec.LookPath(\"ssh\")\n\tif err != nil {\n\t\tkingpin.Fatalf(\"Failed to find ssh binary: %s\\n\", err.Error())\n\t}\n\n\tssh_args := make([]string, 2, 3)\n\tssh_args[0] = \"ssh\"\n\tssh_args[1] = address\n\tif user != \"\" {\n\t\tssh_args = append(ssh_args, \"-l \"+user)\n\t}\n\n\tsyscall.Exec(bin, ssh_args, os.Environ())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/gorilla\/mux\"\n\t\"log\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ TODO: This should all move to either config file or command line options\n\ntype TokenServerResponse struct {\n\tId string `json:\"id\"` \/\/ Signed authorization token\n\tKey string `json:\"key\"` \/\/ Secret derived from the shared secret\n\tUid string `json:\"uid\"` \/\/ The user id for this service\n\tApiEndpoint string `json:\"api_endpoint\"` \/\/ The root URL for the user of this service\n\tDuration int64 `json:\"duration\"` \/\/ the validity duration of the issued token, in seconds\n}\n\nvar clientIdValidator = regexp.MustCompile(`^[a-zA-Z0-9._-]{1,32}$`)\n\ntype tokenServerContext struct {\n\tconfig TokenServerConfig\n\tdb *DatabaseSession\n}\n\nfunc (c *tokenServerContext) SyncTokenHandler(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Make sure we have a BrowserID Authorization header\n\n\tauthorization := r.Header.Get(\"Authorization\")\n\tif len(authorization) == 0 {\n\t\thttp.Error(w, \"Unauthorized\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\ttokens := strings.Split(authorization, \" \")\n\tif len(tokens) != 2 {\n\t\thttp.Error(w, \"Unsupported authorization method\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\tif tokens[0] != \"BrowserID\" {\n\t\thttp.Error(w, \"Unsupported authorization method\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tassertion := tokens[1]\n\n\t\/\/ Check if the client state is valid\n\n\tclientState := r.Header.Get(\"X-Client-State\")\n\tif len(clientState) != 0 {\n\t\tif !clientIdValidator.MatchString(clientState) {\n\t\t\thttp.Error(w, \"Invalid X-Client-State\", http.StatusInternalServerError) \/\/ TODO: JSON Error\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Verify the assertion\n\n\tverifier, err := NewVerifier(c.config.PersonaVerifier, c.config.PersonaAudience)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tpersonaResponse, err := verifier.VerifyAssertion(assertion)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif personaResponse.Status != \"okay\" {\n\t\thttp.Error(w, \"Invalid BrowserID assertion\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t\/\/ Grab some things we need from the assertion\n\n\tgeneration := 1 \/\/ TODO: This needs to be parsed from the assertion\n\n\t\/\/ Load the user. Create if new and if signups are allowed.\n\n\tvar user *User\n\tuser, err = c.db.GetUser(personaResponse.Email)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif user == nil {\n\t\tif c.config.AllowNewUsers {\n\t\t\tuser, err = c.db.AllocateUser(personaResponse.Email, generation, clientState)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\thttp.Error(w, \"Invalid credentials\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Deal with generation\n\n\tnewGeneration := 0\n\tnewClientState := \"\"\n\n\tif generation > user.Generation {\n\t\tnewGeneration = generation\n\t}\n\n\tif clientState != user.ClientState {\n\t\t\/\/ Don't revert from some-client-state to no-client-state\n\t\tif len(clientState) == 0 {\n\t\t\thttp.Error(w, \"invalid-client-state\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\t\/\/ Don't revert to a previous client-state\n\t\tif user.IsOldClientState(clientState) {\n\t\t\thttp.Error(w, \"invalid-client-state\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\t\/\/ If the IdP has been sending generation numbers, then don't\n\t\t\/\/ update client-state without a change in generation number\n\t\tif user.Generation > 0 && newGeneration != 0 {\n\t\t\thttp.Error(w, \"invalid-client-state\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tnewClientState = clientState\n\t}\n\n\tif newGeneration != 0 || len(newClientState) != 0 {\n\t\tuser, err = c.db.UpdateUser(user.Email, newGeneration, newClientState)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Error out if this client is behind some previously-seen\n\t\/\/ client. This is done after the updates because some other, even\n\t\/\/ more up-to-date client may have raced with a concurrent update.\n\n\tif user.Generation > generation {\n\t\thttp.Error(w, \"invalid-generation\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t\/\/ Finally, create token and secret\n\n\texpires := time.Now().Unix() + c.config.TokenDuration\n\n\ttokenSecret, derivedSecret, err := GenerateSecret(user.Uid, c.config.StorageServerNode,\n\t\texpires, c.config.SharedSecret)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ All done, build a response\n\n\ttokenServerResponse := &TokenServerResponse{\n\t\tId: tokenSecret,\n\t\tKey: derivedSecret,\n\t\tUid: user.Uid,\n\t\tApiEndpoint: fmt.Sprintf(\"%s\/1.5\/%d\", c.config.StorageServerNode, user.Uid),\n\t\tDuration: c.config.TokenDuration,\n\t}\n\n\tdata, err := json.Marshal(tokenServerResponse)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\tw.Write(data)\n}\n\nfunc SetupTokenServerRouter(r *mux.Router, config TokenServerConfig) (*tokenServerContext, error) {\n\tdb, err := NewDatabaseSession(config.DatabaseUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontext := &tokenServerContext{config: config, db: db}\n\tr.HandleFunc(\"\/1.0\/sync\/1.5\", context.SyncTokenHandler)\n\n\treturn context, nil\n}\n\nfunc VersionHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"1.0\")) \/\/ TODO: How can we easily embed the git rev and tag in here?\n}\n\nfunc main() {\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"\/version\", VersionHandler)\n\n\tconfig := DefaultTokenServerConfig() \/\/ TODO: Get this from command line options\n\n\t_, err := SetupTokenServerRouter(router.PathPrefix(DEFAULT_API_PREFIX).Subrouter(), config)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\taddr := fmt.Sprintf(\"%s:%d\", DEFAULT_API_LISTEN_ADDRESS, DEFAULT_API_LISTEN_PORT)\n\tlog.Printf(\"Starting tokenserver server on http:\/\/%s\", addr)\n\thttp.Handle(\"\/\", router)\n\terr = http.ListenAndServe(addr, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>Take generation from idpClaims<commit_after>\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/gorilla\/mux\"\n\t\"log\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ TODO: This should all move to either config file or command line options\n\ntype TokenServerResponse struct {\n\tId string `json:\"id\"` \/\/ Signed authorization token\n\tKey string `json:\"key\"` \/\/ Secret derived from the shared secret\n\tUid string `json:\"uid\"` \/\/ The user id for this service\n\tApiEndpoint string `json:\"api_endpoint\"` \/\/ The root URL for the user of this service\n\tDuration int64 `json:\"duration\"` \/\/ the validity duration of the issued token, in seconds\n}\n\nvar clientIdValidator = regexp.MustCompile(`^[a-zA-Z0-9._-]{1,32}$`)\n\ntype tokenServerContext struct {\n\tconfig TokenServerConfig\n\tdb *DatabaseSession\n}\n\nfunc (c *tokenServerContext) SyncTokenHandler(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Make sure we have a BrowserID Authorization header\n\n\tauthorization := r.Header.Get(\"Authorization\")\n\tif len(authorization) == 0 {\n\t\thttp.Error(w, \"Unauthorized\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\ttokens := strings.Split(authorization, \" \")\n\tif len(tokens) != 2 {\n\t\thttp.Error(w, \"Unsupported authorization method\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\tif tokens[0] != \"BrowserID\" {\n\t\thttp.Error(w, \"Unsupported authorization method\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tassertion := tokens[1]\n\n\t\/\/ Check if the client state is valid\n\n\tclientState := r.Header.Get(\"X-Client-State\")\n\tif len(clientState) != 0 {\n\t\tif !clientIdValidator.MatchString(clientState) {\n\t\t\thttp.Error(w, \"Invalid X-Client-State\", http.StatusInternalServerError) \/\/ TODO: JSON Error\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Verify the assertion\n\n\tverifier, err := NewVerifier(c.config.PersonaVerifier, c.config.PersonaAudience)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tpersonaResponse, err := verifier.VerifyAssertion(assertion)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif personaResponse.Status != \"okay\" {\n\t\thttp.Error(w, \"Invalid BrowserID assertion\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t\/\/ Grab the generation from the assertion. This conveniently\n\t\/\/ defaults to 0 if it is not present.\n\n\tgeneration := personaResponse.Claims.Generation\n\n\t\/\/ Load the user. Create if new and if signups are allowed.\n\n\tvar user *User\n\tuser, err = c.db.GetUser(personaResponse.Email)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif user == nil {\n\t\tif c.config.AllowNewUsers {\n\t\t\tuser, err = c.db.AllocateUser(personaResponse.Email, generation, clientState)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\thttp.Error(w, \"Invalid credentials\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Deal with generation\n\n\tnewGeneration := 0\n\tnewClientState := \"\"\n\n\tif generation > user.Generation {\n\t\tnewGeneration = generation\n\t}\n\n\tif clientState != user.ClientState {\n\t\t\/\/ Don't revert from some-client-state to no-client-state\n\t\tif len(clientState) == 0 {\n\t\t\thttp.Error(w, \"invalid-client-state\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\t\/\/ Don't revert to a previous client-state\n\t\tif user.IsOldClientState(clientState) {\n\t\t\thttp.Error(w, \"invalid-client-state\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\t\/\/ If the IdP has been sending generation numbers, then don't\n\t\t\/\/ update client-state without a change in generation number\n\t\tif user.Generation > 0 && newGeneration != 0 {\n\t\t\thttp.Error(w, \"invalid-client-state\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tnewClientState = clientState\n\t}\n\n\tif newGeneration != 0 || len(newClientState) != 0 {\n\t\tuser, err = c.db.UpdateUser(user.Email, newGeneration, newClientState)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Error out if this client is behind some previously-seen\n\t\/\/ client. This is done after the updates because some other, even\n\t\/\/ more up-to-date client may have raced with a concurrent update.\n\n\tif user.Generation > generation {\n\t\thttp.Error(w, \"invalid-generation\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t\/\/ Finally, create token and secret\n\n\texpires := time.Now().Unix() + c.config.TokenDuration\n\n\ttokenSecret, derivedSecret, err := GenerateSecret(user.Uid, c.config.StorageServerNode,\n\t\texpires, c.config.SharedSecret)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ All done, build a response\n\n\ttokenServerResponse := &TokenServerResponse{\n\t\tId: tokenSecret,\n\t\tKey: derivedSecret,\n\t\tUid: user.Uid,\n\t\tApiEndpoint: fmt.Sprintf(\"%s\/1.5\/%d\", c.config.StorageServerNode, user.Uid),\n\t\tDuration: c.config.TokenDuration,\n\t}\n\n\tdata, err := json.Marshal(tokenServerResponse)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\tw.Write(data)\n}\n\nfunc SetupTokenServerRouter(r *mux.Router, config TokenServerConfig) (*tokenServerContext, error) {\n\tdb, err := NewDatabaseSession(config.DatabaseUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontext := &tokenServerContext{config: config, db: db}\n\tr.HandleFunc(\"\/1.0\/sync\/1.5\", context.SyncTokenHandler)\n\n\treturn context, nil\n}\n\nfunc VersionHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"1.0\")) \/\/ TODO: How can we easily embed the git rev and tag in here?\n}\n\nfunc main() {\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"\/version\", VersionHandler)\n\n\tconfig := DefaultTokenServerConfig() \/\/ TODO: Get this from command line options\n\n\t_, err := SetupTokenServerRouter(router.PathPrefix(DEFAULT_API_PREFIX).Subrouter(), config)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\taddr := fmt.Sprintf(\"%s:%d\", DEFAULT_API_LISTEN_ADDRESS, DEFAULT_API_LISTEN_PORT)\n\tlog.Printf(\"Starting tokenserver server on http:\/\/%s\", addr)\n\thttp.Handle(\"\/\", router)\n\terr = http.ListenAndServe(addr, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*-\n * Copyright 2015 Square Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"log\/syslog\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/pprof\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/kavu\/go_reuseport\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nvar (\n\tlistenAddress = kingpin.Flag(\"listen\", \"Address and port to listen on.\").PlaceHolder(\"ADDR\").Required().TCP()\n\tforwardAddress = kingpin.Flag(\"target\", \"Address to foward connections to (HOST:PORT, or unix:PATH).\").PlaceHolder(\"ADDR\").Required().String()\n\tunsafeTarget = kingpin.Flag(\"unsafe-target\", \"If set, does not limit target to localhost, 127.0.0.1 or ::1\").Bool()\n\tkeystorePath = kingpin.Flag(\"keystore\", \"Path to certificate and keystore (PKCS12).\").PlaceHolder(\"PATH\").Required().String()\n\tkeystorePass = kingpin.Flag(\"storepass\", \"Password for certificate and keystore.\").PlaceHolder(\"PASS\").String()\n\tcaBundlePath = kingpin.Flag(\"cacert\", \"Path to certificate authority bundle file (PEM\/X509).\").Required().String()\n\ttimedReload = kingpin.Flag(\"timed-reload\", \"Reload keystores every N seconds, refresh listener on changes.\").PlaceHolder(\"N\").Int()\n\tallowAll = kingpin.Flag(\"allow-all\", \"Allow all clients, do not check client cert subject.\").Bool()\n\tallowedCNs = kingpin.Flag(\"allow-cn\", \"Allow clients with given common name (can be repeated).\").PlaceHolder(\"CN\").Strings()\n\tallowedOUs = kingpin.Flag(\"allow-ou\", \"Allow clients with organizational unit name (can be repeated).\").PlaceHolder(\"OU\").Strings()\n\tstatusAddress = kingpin.Flag(\"status\", \"Enable serving \/_status endpoint on given addr:port (optional)\").PlaceHolder(\"ADDR\").TCP()\n\tenableProf = kingpin.Flag(\"pprof\", \"Enable serving \/debug\/pprof endpoints alongside \/_status (for profiling)\").Bool()\n\tuseSyslog = kingpin.Flag(\"syslog\", \"Send logs to syslog instead of stderr.\").Bool()\n)\n\n\/\/ Context groups listening context data together\ntype Context struct {\n\twatcher chan bool\n\tlisteners *sync.WaitGroup\n\tstatus *StatusHandler\n\tdial func() (net.Conn, error)\n}\n\n\/\/ Global logger instance\nvar logger = log.New(os.Stderr, \"\", log.LstdFlags|log.Lmicroseconds)\n\nfunc initLogger() {\n\tif *useSyslog {\n\t\tvar err error\n\t\tlogger, err = syslog.NewLogger(syslog.LOG_NOTICE|syslog.LOG_DAEMON, log.LstdFlags|log.Lmicroseconds)\n\t\tpanicOnError(err)\n\t}\n\n\t\/\/ Set log prefix to process ID to distinguish parent\/child\n\tlogger.SetPrefix(fmt.Sprintf(\"[%5d] \", os.Getpid()))\n}\n\n\/\/ panicOnError panics if err is not nil\nfunc panicOnError(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tkingpin.Parse()\n\n\t\/\/ Validate flags\n\tif !(*allowAll) && len(*allowedCNs) == 0 && len(*allowedOUs) == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: at least one of --allow-all, --allow-cn or --allow-ou is required\")\n\t\tos.Exit(1)\n\t}\n\tif *allowAll && len(*allowedCNs) != 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: --allow-all and --allow-cn are mutually exclusive\")\n\t\tos.Exit(1)\n\t}\n\tif *allowAll && len(*allowedOUs) != 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: --allow-all and --allow-ou are mutually exclusive\")\n\t\tos.Exit(1)\n\t}\n\tif !validateTarget(*forwardAddress) {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: --target must be localhost:port, 127.0.0.1:port or [::1]:port\")\n\t\tos.Exit(1)\n\t}\n\n\tinitLogger()\n\n\terr, dial := backendDialer()\n\tif err != nil {\n\t\tlogger.Printf(\"invalid backend address: %s\", err)\n\t\tos.Exit(1)\n\t}\n\n\tstatus := NewStatusHandler(dial)\n\tif *statusAddress != nil {\n\t\tmux := http.NewServeMux()\n\t\tmux.Handle(\"\/_status\", status)\n\t\tif *enableProf {\n\t\t\tmux.Handle(\"\/debug\/pprof\/\", http.HandlerFunc(pprof.Index))\n\t\t\tmux.Handle(\"\/debug\/pprof\/cmdline\", http.HandlerFunc(pprof.Cmdline))\n\t\t\tmux.Handle(\"\/debug\/pprof\/profile\", http.HandlerFunc(pprof.Profile))\n\t\t\tmux.Handle(\"\/debug\/pprof\/symbol\", http.HandlerFunc(pprof.Symbol))\n\t\t\tmux.Handle(\"\/debug\/pprof\/trace\", http.HandlerFunc(pprof.Trace))\n\t\t}\n\n\t\taddr := (*statusAddress).String()\n\t\tlogger.Printf(\"status port enabled; serving status on http:\/\/%s\/_status\", addr)\n\t\tgo func() {\n\t\t\tlogger.Fatal(http.ListenAndServe(addr, mux))\n\t\t}()\n\t}\n\n\tlisteners := &sync.WaitGroup{}\n\tlisteners.Add(1)\n\n\t\/\/ Set up file watchers (if requested)\n\twatcher := make(chan bool, 1)\n\tif *timedReload > 0 {\n\t\tgo watchFiles([]string{*keystorePath, *caBundlePath}, time.Duration(*timedReload)*time.Second, watcher)\n\t}\n\n\t\/\/ Start listening\n\tstarted := make(chan bool, 1)\n\tgo listen(started, &Context{watcher, listeners, status, dial})\n\n\tup := <-started\n\tif !up {\n\t\tlogger.Print(\"failed to start initial listener\")\n\t\tos.Exit(1)\n\t}\n\n\tlogger.Print(\"initial startup completed, waiting for connections\")\n\tlisteners.Wait()\n\tlogger.Print(\"all listeners closed, shutting down\")\n}\n\nfunc validateTarget(addr string) bool {\n\tif *unsafeTarget {\n\t\treturn true\n\t}\n\tif strings.HasPrefix(addr, \"unix:\") {\n\t\treturn true\n\t}\n\tif strings.HasPrefix(addr, \"127.0.0.1:\") {\n\t\treturn true\n\t}\n\tif strings.HasPrefix(addr, \"[::1]:\") {\n\t\treturn true\n\t}\n\tif strings.HasPrefix(addr, \"localhost:\") {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ Open listening socket. Take note that we create a \"reusable port\n\/\/ listener\", meaning we pass SO_REUSEPORT to the kernel. This allows\n\/\/ us to have multiple sockets listening on the same port and accept\n\/\/ connections. This is useful for the purposes of replacing certificates\n\/\/ in-place without having to take downtime, e.g. if a certificate is\n\/\/ expiring.\nfunc listen(started chan bool, context *Context) {\n\t\/\/ Open raw listening socket\n\tnetwork, address := decodeAddress(*listenAddress)\n\trawListener, err := reuseport.NewReusablePortListener(network, address)\n\tif err != nil {\n\t\tlogger.Printf(\"error opening socket: %s\", err)\n\t\tstarted <- false\n\t\treturn\n\t}\n\n\t\/\/ Wrap listening socket with TLS listener.\n\ttlsConfig, err := buildConfig(*keystorePath, *keystorePass, *caBundlePath)\n\tif err != nil {\n\t\tlogger.Printf(\"error setting up TLS: %s\", err)\n\t\tstarted <- false\n\t\treturn\n\t}\n\n\tleaf := tlsConfig.Certificates[0].Leaf\n\n\tlistener := tls.NewListener(rawListener, tlsConfig)\n\tlogger.Printf(\"listening on %s\", *listenAddress)\n\tdefer listener.Close()\n\n\thandlers := &sync.WaitGroup{}\n\thandlers.Add(1)\n\n\t\/\/ A channel to allow signal handlers to notify our accept loop that it\n\t\/\/ should shut down.\n\tstopper := make(chan bool, 1)\n\n\tgo accept(listener, handlers, stopper, leaf, context.dial)\n\tgo signalHandler(listener, stopper, context)\n\n\tstarted <- true\n\tcontext.status.Listening()\n\n\tlogger.Printf(\"listening with cert serial no. %d (expiring %s)\", leaf.SerialNumber, leaf.NotAfter.String())\n\thandlers.Wait()\n\n\tcontext.listeners.Done()\n}\n\n\/\/ Get backend dialer function\nfunc backendDialer() (error, func() (net.Conn, error)) {\n\tbackendNet, backendAddr, err := parseTarget(*forwardAddress)\n\tif err != nil {\n\t\treturn err, nil\n\t}\n\n\treturn nil, func() (net.Conn, error) {\n\t\treturn net.Dial(backendNet, backendAddr)\n\t}\n}\n<commit_msg>Better descriptions for cli flags<commit_after>\/*-\n * Copyright 2015 Square Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"log\/syslog\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/pprof\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/kavu\/go_reuseport\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nvar (\n\tlistenAddress = kingpin.Flag(\"listen\", \"Address and port to listen on (HOST:PORT).\").PlaceHolder(\"ADDR\").Required().TCP()\n\tforwardAddress = kingpin.Flag(\"target\", \"Address to foward connections to (HOST:PORT, or unix:PATH).\").PlaceHolder(\"ADDR\").Required().String()\n\tunsafeTarget = kingpin.Flag(\"unsafe-target\", \"If set, does not limit target to localhost, 127.0.0.1 or ::1\").Bool()\n\tkeystorePath = kingpin.Flag(\"keystore\", \"Path to certificate and keystore (PKCS12).\").PlaceHolder(\"PATH\").Required().String()\n\tkeystorePass = kingpin.Flag(\"storepass\", \"Password for certificate and keystore (optional).\").PlaceHolder(\"PASS\").String()\n\tcaBundlePath = kingpin.Flag(\"cacert\", \"Path to certificate authority bundle file (PEM\/X509).\").Required().String()\n\ttimedReload = kingpin.Flag(\"timed-reload\", \"Reload keystores every N seconds, refresh listener on changes.\").PlaceHolder(\"N\").Int()\n\tallowAll = kingpin.Flag(\"allow-all\", \"Allow all clients, do not check client cert subject.\").Bool()\n\tallowedCNs = kingpin.Flag(\"allow-cn\", \"Allow clients with given common name (can be repeated).\").PlaceHolder(\"CN\").Strings()\n\tallowedOUs = kingpin.Flag(\"allow-ou\", \"Allow clients with organizational unit name (can be repeated).\").PlaceHolder(\"OU\").Strings()\n\tstatusAddress = kingpin.Flag(\"status\", \"Enable serving \/_status on given HOST:PORT (shows tunnel and backend health status).\").PlaceHolder(\"ADDR\").TCP()\n\tenableProf = kingpin.Flag(\"pprof\", \"Enable serving \/debug\/pprof endpoints alongside \/_status (for profiling).\").Bool()\n\tuseSyslog = kingpin.Flag(\"syslog\", \"Send logs to syslog instead of stderr.\").Bool()\n)\n\n\/\/ Context groups listening context data together\ntype Context struct {\n\twatcher chan bool\n\tlisteners *sync.WaitGroup\n\tstatus *StatusHandler\n\tdial func() (net.Conn, error)\n}\n\n\/\/ Global logger instance\nvar logger = log.New(os.Stderr, \"\", log.LstdFlags|log.Lmicroseconds)\n\nfunc initLogger() {\n\tif *useSyslog {\n\t\tvar err error\n\t\tlogger, err = syslog.NewLogger(syslog.LOG_NOTICE|syslog.LOG_DAEMON, log.LstdFlags|log.Lmicroseconds)\n\t\tpanicOnError(err)\n\t}\n\n\t\/\/ Set log prefix to process ID to distinguish parent\/child\n\tlogger.SetPrefix(fmt.Sprintf(\"[%5d] \", os.Getpid()))\n}\n\n\/\/ panicOnError panics if err is not nil\nfunc panicOnError(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tkingpin.Parse()\n\n\t\/\/ Validate flags\n\tif !(*allowAll) && len(*allowedCNs) == 0 && len(*allowedOUs) == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: at least one of --allow-all, --allow-cn or --allow-ou is required\")\n\t\tos.Exit(1)\n\t}\n\tif *allowAll && len(*allowedCNs) != 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: --allow-all and --allow-cn are mutually exclusive\")\n\t\tos.Exit(1)\n\t}\n\tif *allowAll && len(*allowedOUs) != 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: --allow-all and --allow-ou are mutually exclusive\")\n\t\tos.Exit(1)\n\t}\n\tif !validateTarget(*forwardAddress) {\n\t\tfmt.Fprintf(os.Stderr, \"ghostunnel: error: --target must be localhost:port, 127.0.0.1:port or [::1]:port\")\n\t\tos.Exit(1)\n\t}\n\n\tinitLogger()\n\n\terr, dial := backendDialer()\n\tif err != nil {\n\t\tlogger.Printf(\"invalid backend address: %s\", err)\n\t\tos.Exit(1)\n\t}\n\n\tstatus := NewStatusHandler(dial)\n\tif *statusAddress != nil {\n\t\tmux := http.NewServeMux()\n\t\tmux.Handle(\"\/_status\", status)\n\t\tif *enableProf {\n\t\t\tmux.Handle(\"\/debug\/pprof\/\", http.HandlerFunc(pprof.Index))\n\t\t\tmux.Handle(\"\/debug\/pprof\/cmdline\", http.HandlerFunc(pprof.Cmdline))\n\t\t\tmux.Handle(\"\/debug\/pprof\/profile\", http.HandlerFunc(pprof.Profile))\n\t\t\tmux.Handle(\"\/debug\/pprof\/symbol\", http.HandlerFunc(pprof.Symbol))\n\t\t\tmux.Handle(\"\/debug\/pprof\/trace\", http.HandlerFunc(pprof.Trace))\n\t\t}\n\n\t\taddr := (*statusAddress).String()\n\t\tlogger.Printf(\"status port enabled; serving status on http:\/\/%s\/_status\", addr)\n\t\tgo func() {\n\t\t\tlogger.Fatal(http.ListenAndServe(addr, mux))\n\t\t}()\n\t}\n\n\tlisteners := &sync.WaitGroup{}\n\tlisteners.Add(1)\n\n\t\/\/ Set up file watchers (if requested)\n\twatcher := make(chan bool, 1)\n\tif *timedReload > 0 {\n\t\tgo watchFiles([]string{*keystorePath, *caBundlePath}, time.Duration(*timedReload)*time.Second, watcher)\n\t}\n\n\t\/\/ Start listening\n\tstarted := make(chan bool, 1)\n\tgo listen(started, &Context{watcher, listeners, status, dial})\n\n\tup := <-started\n\tif !up {\n\t\tlogger.Print(\"failed to start initial listener\")\n\t\tos.Exit(1)\n\t}\n\n\tlogger.Print(\"initial startup completed, waiting for connections\")\n\tlisteners.Wait()\n\tlogger.Print(\"all listeners closed, shutting down\")\n}\n\nfunc validateTarget(addr string) bool {\n\tif *unsafeTarget {\n\t\treturn true\n\t}\n\tif strings.HasPrefix(addr, \"unix:\") {\n\t\treturn true\n\t}\n\tif strings.HasPrefix(addr, \"127.0.0.1:\") {\n\t\treturn true\n\t}\n\tif strings.HasPrefix(addr, \"[::1]:\") {\n\t\treturn true\n\t}\n\tif strings.HasPrefix(addr, \"localhost:\") {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ Open listening socket. Take note that we create a \"reusable port\n\/\/ listener\", meaning we pass SO_REUSEPORT to the kernel. This allows\n\/\/ us to have multiple sockets listening on the same port and accept\n\/\/ connections. This is useful for the purposes of replacing certificates\n\/\/ in-place without having to take downtime, e.g. if a certificate is\n\/\/ expiring.\nfunc listen(started chan bool, context *Context) {\n\t\/\/ Open raw listening socket\n\tnetwork, address := decodeAddress(*listenAddress)\n\trawListener, err := reuseport.NewReusablePortListener(network, address)\n\tif err != nil {\n\t\tlogger.Printf(\"error opening socket: %s\", err)\n\t\tstarted <- false\n\t\treturn\n\t}\n\n\t\/\/ Wrap listening socket with TLS listener.\n\ttlsConfig, err := buildConfig(*keystorePath, *keystorePass, *caBundlePath)\n\tif err != nil {\n\t\tlogger.Printf(\"error setting up TLS: %s\", err)\n\t\tstarted <- false\n\t\treturn\n\t}\n\n\tleaf := tlsConfig.Certificates[0].Leaf\n\n\tlistener := tls.NewListener(rawListener, tlsConfig)\n\tlogger.Printf(\"listening on %s\", *listenAddress)\n\tdefer listener.Close()\n\n\thandlers := &sync.WaitGroup{}\n\thandlers.Add(1)\n\n\t\/\/ A channel to allow signal handlers to notify our accept loop that it\n\t\/\/ should shut down.\n\tstopper := make(chan bool, 1)\n\n\tgo accept(listener, handlers, stopper, leaf, context.dial)\n\tgo signalHandler(listener, stopper, context)\n\n\tstarted <- true\n\tcontext.status.Listening()\n\n\tlogger.Printf(\"listening with cert serial no. %d (expiring %s)\", leaf.SerialNumber, leaf.NotAfter.String())\n\thandlers.Wait()\n\n\tcontext.listeners.Done()\n}\n\n\/\/ Get backend dialer function\nfunc backendDialer() (error, func() (net.Conn, error)) {\n\tbackendNet, backendAddr, err := parseTarget(*forwardAddress)\n\tif err != nil {\n\t\treturn err, nil\n\t}\n\n\treturn nil, func() (net.Conn, error) {\n\t\treturn net.Dial(backendNet, backendAddr)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\namqp2gelf\n\nA simple tool to read from an AMQP queue and forwards as GELF\/UPD packet.\n\nFor JSON messages it tries to not change any fields.\nOther messages are put inside the GELF \"message\" field.\n\n\n2014, DECK36 GmbH & Co. KG, <martin.schuette@deck36.de>\n*\/\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/DECK36\/go-gelf\/gelf\"\n\t\"github.com\/streadway\/amqp\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\t\"encoding\/json\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst thisVersion = \"0.3\"\nconst thisProgram = \"amqp2gelf\"\n\n\/\/ all command line options\ntype CommandLineOptions struct {\n\turi *string\n\tqueueName *string\n\tgelf_server *string\n\tgelf_port *int\n\tverbose *bool\n}\n\nvar options CommandLineOptions\n\nfunc init() {\n\t\/\/ this does not look right...\n\t\/\/ I am looking for a pattern how to group command line arguments in a struct\n\toptions = CommandLineOptions{\n\t\tflag.String(\"uri\", \"amqp:\/\/user:password@broker.example.com:5672\/vhost\", \"AMQP URI\"),\n\t\tflag.String(\"queue\", \"logging_queue\", \"Durable AMQP queue name\"),\n\t\tflag.String(\"server\", \"localhost\", \"Graylog2 server\"),\n\t\tflag.Int(\"port\", 12201, \"Graylog2 GELF\/UDP port\"),\n\t\tflag.Bool(\"v\", false, \"Verbose output\"),\n\t}\n\tflag.Parse()\n}\n\ntype Consumer struct {\n\tconn *amqp.Connection\n\tchannel *amqp.Channel\n\ttag string\n\tdone chan error\n}\n\nfunc amqpConsumer(amqpURI string, amqpQueue string, shutdown chan<- string) (c *Consumer, err error) {\n\tamqpConfig := amqp.Config{\n\t\tProperties: amqp.Table{\n\t\t\t\"product\": thisProgram,\n\t\t\t\"version\": thisVersion,\n\t\t},\n\t}\n\tc = &Consumer{\n\t\tconn: nil,\n\t\tchannel: nil,\n\t\ttag: fmt.Sprintf(\"amqp2gelf-%d\", os.Getpid()),\n\t\tdone: make(chan error),\n\t}\n\n\t\/\/ this is the important part:\n\tif *options.verbose {\n\t\tlog.Println(\"connecting to \", amqpURI, \"...\")\n\t}\n\tc.conn, err = amqp.DialConfig(amqpURI, amqpConfig)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"AMQP Dial: %s\", err)\n\t}\n\tc.channel, err = c.conn.Channel()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"AMQP Channel: %s\", err)\n\t}\n\n\t\/\/ here we only ensure the AMQP queue exists\n\tq, err := c.channel.QueueDeclare(\n\t\tamqpQueue, \/\/ name\n\t\ttrue, \/\/ durable\n\t\tfalse, \/\/ auto-deleted\n\t\tfalse, \/\/ exclusive\n\t\tfalse, \/\/ noWait\n\t\tnil, \/\/ arguments\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Queue Declare: %v\", err)\n\t}\n\n\t\/\/ init the consumer\n\tdeliveries, err := c.channel.Consume(\n\t\tq.Name, \/\/ name\n\t\tc.tag, \/\/ consumerTag,\n\t\tfalse, \/\/ noAck\n\t\tfalse, \/\/ exclusive\n\t\tfalse, \/\/ noLocal\n\t\tfalse, \/\/ noWait\n\t\tnil, \/\/ arguments\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Queue Consume: %s\", err)\n\t}\n\n\tgo writeLogsToGelf(deliveries, c.done)\n\n\tgo func() {\n\t\tnotification := c.channel.NotifyClose(make(chan *amqp.Error))\n\t\tn := <-notification\n\t\tc.done <- fmt.Errorf(\"AMQP server closed connection: %v\", n)\n\t}()\n\n\treturn\n}\n\n\/\/ convert JSON (possibly already GELF) input to GELF\nfunc buildGelfMessage_json(message []byte) (gm gelf.Message, err error) {\n\t\/\/ list of \"reserved\" field names\n\t\/\/ cf. https:\/\/github.com\/Graylog2\/graylog2-server\/blob\/0.20\/graylog2-plugin-interfaces\/src\/main\/java\/org\/graylog2\/plugin\/Message.java#L61 and #L81\n\t\/\/ Go does not allow const maps :-\/\n\tgelfReservedField := map[string]bool{\n\t\t\"_id\": true,\n\t\t\"_ttl\": true,\n\t\t\"_source\": true,\n\t\t\"_all\": true,\n\t\t\"_index\": true,\n\t\t\"_type\": true,\n\t\t\"_score\": true,\n\t}\n\n\tvar emptyinterface interface{}\n\terr = json.Unmarshal(message, &emptyinterface)\n\tif err != nil {\n\t\tif *options.verbose {\n\t\t\tlog.Printf(\"Cannot parse JSON, err: %v, msg: '%s'\", err, message)\n\t\t}\n\t\treturn\n\t}\n\tjm := emptyinterface.(map[string]interface{})\n\n\t\/\/ rename reserved field names (with and w\/o '_')\n\t\/\/ note: we do not double check if 'renamed_xyz' is already present\n\tfor k, v := range jm {\n\t\tif gelfReservedField[k] {\n\t\t\tjm[\"renamed\"+k] = v\n\t\t\tdelete(jm, k)\n\t\t} else if gelfReservedField[\"_\"+k] {\n\t\t\tjm[\"renamed_\"+k] = v\n\t\t\tdelete(jm, k)\n\t\t}\n\t}\n\n\t\/\/ ensure some required fields are set, use defaults if missing\n\tvar gelf_hostname string = \"unknown_amqp\"\n\tif _, ok := jm[\"host\"]; ok {\n\t\tgelf_hostname = jm[\"host\"].(string)\n\t}\n\n\tvar gelf_shortmsg string = \"\"\n\tif _, ok := jm[\"short_message\"]; ok {\n\t\tgelf_shortmsg = jm[\"short_message\"].(string)\n\t}\n\n\tvar gelf_timestamp float64 = 0.0\n\tif _, ok := jm[\"timestamp\"]; ok {\n\t\tswitch tsval := jm[\"timestamp\"].(type) {\n\t\tcase float64:\n\t\t\tgelf_timestamp = tsval\n\t\tcase string:\n\t\t\tgelf_timestamp, _ = strconv.ParseFloat(tsval, 64)\n\t\t}\n\t}\n\n\tvar gelf_level int32 = 6 \/\/ info\n\tif _, ok := jm[\"level\"]; ok {\n\t\tgelf_level = jm[\"level\"].(int32)\n\t}\n\n\tvar gelf_version string = \"1.1\"\n\tif _, ok := jm[\"version\"]; ok {\n\t\tgelf_version = jm[\"version\"].(string)\n\t}\n\n\tgm = gelf.Message{\n\t\tVersion: gelf_version,\n\t\tHost: gelf_hostname,\n\t\tShort: gelf_shortmsg,\n\t\tTimeUnix: gelf_timestamp,\n\t\tLevel: gelf_level,\n\t\tExtra: jm,\n\t}\n\treturn gm, nil\n\n}\n\n\/\/ package text input in GELF\nfunc buildGelfMessage_text(message []byte) (gm gelf.Message, err error) {\n\tgm = gelf.Message{\n\t\tVersion: \"1.1\",\n\t\tHost: \"unknown_amqp\",\n\t\tShort: string(message),\n\t\tTimeUnix: 0.0,\n\t\tLevel: 6, \/\/ info\n\t\tExtra: map[string]interface{}{},\n\t}\n\treturn gm, nil\n}\n\nfunc buildGelfMessage(message []byte, ctype string) (gm gelf.Message, err error) {\n\tif (ctype == \"application\/json\" || ctype == \"text\/json\") &&\n\t\tmessage[0] == '{' && message[len(message)-1] == '}' {\n\t\tgm, err = buildGelfMessage_json(message)\n\t} else {\n\t\tgm, err = buildGelfMessage_text(message)\n\t}\n\treturn\n}\n\n\/\/ handle AMQP delivery\nfunc writeLogsToGelf(deliveries <-chan amqp.Delivery, done chan error) {\n\tgraylogAddr := fmt.Sprintf(\"%s:%d\", *options.gelf_server, *options.gelf_port)\n\tgelfWriter, err := gelf.NewWriter(graylogAddr)\n\tif err != nil {\n\t\tdone <- fmt.Errorf(\"Cannot create gelf writer: %v\", err)\n\t\treturn\n\t}\n\n\tfor d := range deliveries {\n\t\tgm, err := buildGelfMessage(d.Body, d.ContentType)\n\t\tif err != nil {\n\t\t\td.Reject(false) \/\/ do not requeue\n\t\t\tcontinue\n\t\t}\n\t\tif *options.verbose {\n\t\t\tlog.Printf(\"Gelf Msg Obj: %#v\\n\", gm)\n\t\t}\n\t\terr = gelfWriter.WriteMessage(&gm)\n\t\tif err != nil {\n\t\t\tdone <- fmt.Errorf(\"Cannot send gelf msg: %v\", err)\n\t\t\td.Reject(false) \/\/ do not requeue\n\t\t\tcontinue\n\t\t}\n\t\td.Ack(false) \/\/ don't ack multiple\n\t}\n\tdone <- fmt.Errorf(\"done\")\n\treturn\n}\n\n\/\/ let the OS tell us to shutdown\nfunc osSignalHandler(shutdown chan<- string) {\n\tvar sigs = make(chan os.Signal, 1)\n\tsignal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)\n\n\tsig := <-sigs \/\/ this is the blocking part\n\n\tgo func(){\n\t\ttime.Sleep(2*time.Second)\n\t\tlog.Fatalf(\"shutdown was ignored, bailing out now.\\n\")\n\t}()\n\n\tshutdown <- fmt.Sprintf(\"received signal %v\", sig)\n}\n\nfunc main() {\n\tif *options.verbose {\n\t\tlog.Printf(\"Start %s %s\", thisProgram, thisVersion)\n\t}\n\t\/\/ let goroutines tell us to shutdown (on error)\n\tvar shutdown = make(chan string)\n\n\t\/\/ let the OS tell us to shutdown\n\tgo osSignalHandler(shutdown)\n\n\t\/\/ start input\n\tc, err := amqpConsumer(*options.uri, *options.queueName, shutdown)\n\tif err != nil {\n\t\t\/\/ cannot use shutdown channel, no listener yet\n\t\tlog.Fatalln(\"Fatal Error: \", err.Error())\n\t}\n\tgo func() {\n\t\terr = <-c.done\n\t\tshutdown <- fmt.Sprintln(err)\n\t}()\n\n\tmessage := <-shutdown\n\tlog.Println(\"The End.\", message)\n}\n<commit_msg>add verbosity<commit_after>\/*\namqp2gelf\n\nA simple tool to read from an AMQP queue and forwards as GELF\/UPD packet.\n\nFor JSON messages it tries to not change any fields.\nOther messages are put inside the GELF \"message\" field.\n\n\n2014, DECK36 GmbH & Co. KG, <martin.schuette@deck36.de>\n*\/\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/DECK36\/go-gelf\/gelf\"\n\t\"github.com\/streadway\/amqp\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\t\"encoding\/json\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst thisVersion = \"0.3\"\nconst thisProgram = \"amqp2gelf\"\n\n\/\/ all command line options\ntype CommandLineOptions struct {\n\turi *string\n\tqueueName *string\n\tgelf_server *string\n\tgelf_port *int\n\tverbose *bool\n}\n\nvar options CommandLineOptions\n\nfunc init() {\n\t\/\/ this does not look right...\n\t\/\/ I am looking for a pattern how to group command line arguments in a struct\n\toptions = CommandLineOptions{\n\t\tflag.String(\"uri\", \"amqp:\/\/user:password@broker.example.com:5672\/vhost\", \"AMQP URI\"),\n\t\tflag.String(\"queue\", \"logging_queue\", \"Durable AMQP queue name\"),\n\t\tflag.String(\"server\", \"localhost\", \"Graylog2 server\"),\n\t\tflag.Int(\"port\", 12201, \"Graylog2 GELF\/UDP port\"),\n\t\tflag.Bool(\"v\", false, \"Verbose output\"),\n\t}\n\tflag.Parse()\n}\n\ntype Consumer struct {\n\tconn *amqp.Connection\n\tchannel *amqp.Channel\n\ttag string\n\tdone chan error\n}\n\nfunc amqpConsumer(amqpURI string, amqpQueue string, shutdown chan<- string) (c *Consumer, err error) {\n\tamqpConfig := amqp.Config{\n\t\tProperties: amqp.Table{\n\t\t\t\"product\": thisProgram,\n\t\t\t\"version\": thisVersion,\n\t\t},\n\t}\n\tc = &Consumer{\n\t\tconn: nil,\n\t\tchannel: nil,\n\t\ttag: fmt.Sprintf(\"amqp2gelf-%d\", os.Getpid()),\n\t\tdone: make(chan error),\n\t}\n\n\t\/\/ this is the important part:\n\tif *options.verbose {\n\t\tlog.Println(\"connecting to \", amqpURI, \"...\")\n\t}\n\tc.conn, err = amqp.DialConfig(amqpURI, amqpConfig)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"AMQP Dial: %s\", err)\n\t} else if *options.verbose {\n\t\tlog.Println(\"got connection\")\n\t}\n\tc.channel, err = c.conn.Channel()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"AMQP Channel: %s\", err)\n\t} else if *options.verbose {\n\t\tlog.Println(\"got channel\")\n\t}\n\n\t\/\/ here we only ensure the AMQP queue exists\n\tq, err := c.channel.QueueDeclare(\n\t\tamqpQueue, \/\/ name\n\t\ttrue, \/\/ durable\n\t\tfalse, \/\/ auto-deleted\n\t\tfalse, \/\/ exclusive\n\t\tfalse, \/\/ noWait\n\t\tnil, \/\/ arguments\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Queue Declare: %v\", err)\n\t}\n\n\t\/\/ init the consumer\n\tdeliveries, err := c.channel.Consume(\n\t\tq.Name, \/\/ name\n\t\tc.tag, \/\/ consumerTag,\n\t\tfalse, \/\/ noAck\n\t\tfalse, \/\/ exclusive\n\t\tfalse, \/\/ noLocal\n\t\tfalse, \/\/ noWait\n\t\tnil, \/\/ arguments\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Queue Consume: %s\", err)\n\t} else if *options.verbose {\n\t\tlog.Println(\"got consumer\")\n\t}\n\n\tgo writeLogsToGelf(deliveries, c.done)\n\n\tgo func() {\n\t\tnotification := c.channel.NotifyClose(make(chan *amqp.Error))\n\t\tn := <-notification\n\t\tc.done <- fmt.Errorf(\"AMQP server closed connection: %v\", n)\n\t}()\n\n\treturn\n}\n\n\/\/ convert JSON (possibly already GELF) input to GELF\nfunc buildGelfMessage_json(message []byte) (gm gelf.Message, err error) {\n\t\/\/ list of \"reserved\" field names\n\t\/\/ cf. https:\/\/github.com\/Graylog2\/graylog2-server\/blob\/0.20\/graylog2-plugin-interfaces\/src\/main\/java\/org\/graylog2\/plugin\/Message.java#L61 and #L81\n\t\/\/ Go does not allow const maps :-\/\n\tgelfReservedField := map[string]bool{\n\t\t\"_id\": true,\n\t\t\"_ttl\": true,\n\t\t\"_source\": true,\n\t\t\"_all\": true,\n\t\t\"_index\": true,\n\t\t\"_type\": true,\n\t\t\"_score\": true,\n\t}\n\n\tvar emptyinterface interface{}\n\terr = json.Unmarshal(message, &emptyinterface)\n\tif err != nil {\n\t\tif *options.verbose {\n\t\t\tlog.Printf(\"Cannot parse JSON, err: %v, msg: '%s'\", err, message)\n\t\t}\n\t\treturn\n\t}\n\tjm := emptyinterface.(map[string]interface{})\n\n\t\/\/ rename reserved field names (with and w\/o '_')\n\t\/\/ note: we do not double check if 'renamed_xyz' is already present\n\tfor k, v := range jm {\n\t\tif gelfReservedField[k] {\n\t\t\tjm[\"renamed\"+k] = v\n\t\t\tdelete(jm, k)\n\t\t} else if gelfReservedField[\"_\"+k] {\n\t\t\tjm[\"renamed_\"+k] = v\n\t\t\tdelete(jm, k)\n\t\t}\n\t}\n\n\t\/\/ ensure some required fields are set, use defaults if missing\n\tvar gelf_hostname string = \"unknown_amqp\"\n\tif _, ok := jm[\"host\"]; ok {\n\t\tgelf_hostname = jm[\"host\"].(string)\n\t}\n\n\tvar gelf_shortmsg string = \"\"\n\tif _, ok := jm[\"short_message\"]; ok {\n\t\tgelf_shortmsg = jm[\"short_message\"].(string)\n\t}\n\n\tvar gelf_timestamp float64 = 0.0\n\tif _, ok := jm[\"timestamp\"]; ok {\n\t\tswitch tsval := jm[\"timestamp\"].(type) {\n\t\tcase float64:\n\t\t\tgelf_timestamp = tsval\n\t\tcase string:\n\t\t\tgelf_timestamp, _ = strconv.ParseFloat(tsval, 64)\n\t\t}\n\t}\n\n\tvar gelf_level int32 = 6 \/\/ info\n\tif _, ok := jm[\"level\"]; ok {\n\t\tgelf_level = jm[\"level\"].(int32)\n\t}\n\n\tvar gelf_version string = \"1.1\"\n\tif _, ok := jm[\"version\"]; ok {\n\t\tgelf_version = jm[\"version\"].(string)\n\t}\n\n\tgm = gelf.Message{\n\t\tVersion: gelf_version,\n\t\tHost: gelf_hostname,\n\t\tShort: gelf_shortmsg,\n\t\tTimeUnix: gelf_timestamp,\n\t\tLevel: gelf_level,\n\t\tExtra: jm,\n\t}\n\treturn gm, nil\n\n}\n\n\/\/ package text input in GELF\nfunc buildGelfMessage_text(message []byte) (gm gelf.Message, err error) {\n\tgm = gelf.Message{\n\t\tVersion: \"1.1\",\n\t\tHost: \"unknown_amqp\",\n\t\tShort: string(message),\n\t\tTimeUnix: 0.0,\n\t\tLevel: 6, \/\/ info\n\t\tExtra: map[string]interface{}{},\n\t}\n\treturn gm, nil\n}\n\nfunc buildGelfMessage(message []byte, ctype string) (gm gelf.Message, err error) {\n\tif (ctype == \"application\/json\" || ctype == \"text\/json\") &&\n\t\tmessage[0] == '{' && message[len(message)-1] == '}' {\n\t\tgm, err = buildGelfMessage_json(message)\n\t} else {\n\t\tgm, err = buildGelfMessage_text(message)\n\t}\n\treturn\n}\n\n\/\/ handle AMQP delivery\nfunc writeLogsToGelf(deliveries <-chan amqp.Delivery, done chan error) {\n\tgraylogAddr := fmt.Sprintf(\"%s:%d\", *options.gelf_server, *options.gelf_port)\n\tgelfWriter, err := gelf.NewWriter(graylogAddr)\n\tif err != nil {\n\t\tdone <- fmt.Errorf(\"Cannot create gelf writer: %v\", err)\n\t\treturn\n\t}\n\n\tfor d := range deliveries {\n\t\tgm, err := buildGelfMessage(d.Body, d.ContentType)\n\t\tif err != nil {\n\t\t\td.Reject(false) \/\/ do not requeue\n\t\t\tif *options.verbose {\n\t\t\t\tlog.Printf(\"Rejected msg: %#v\\n\", d.Body)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif *options.verbose {\n\t\t\tlog.Printf(\"sent msg: %f %s\\n\", gm.TimeUnix, gm.Short)\n\t\t}\n\t\terr = gelfWriter.WriteMessage(&gm)\n\t\tif err != nil {\n\t\t\tdone <- fmt.Errorf(\"Cannot send gelf msg: %v\", err)\n\t\t\td.Reject(false) \/\/ do not requeue\n\t\t\tcontinue\n\t\t}\n\t\td.Ack(false) \/\/ don't ack multiple\n\t}\n\tdone <- fmt.Errorf(\"done\")\n\treturn\n}\n\n\/\/ let the OS tell us to shutdown\nfunc osSignalHandler(shutdown chan<- string) {\n\tvar sigs = make(chan os.Signal, 1)\n\tsignal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)\n\n\tsig := <-sigs \/\/ this is the blocking part\n\n\tgo func(){\n\t\ttime.Sleep(2*time.Second)\n\t\tlog.Fatalf(\"shutdown was ignored, bailing out now.\\n\")\n\t}()\n\n\tshutdown <- fmt.Sprintf(\"received signal %v\", sig)\n}\n\nfunc main() {\n\tif *options.verbose {\n\t\tlog.Printf(\"Start %s %s\", thisProgram, thisVersion)\n\t}\n\t\/\/ let goroutines tell us to shutdown (on error)\n\tvar shutdown = make(chan string)\n\n\t\/\/ let the OS tell us to shutdown\n\tgo osSignalHandler(shutdown)\n\n\t\/\/ start input\n\tc, err := amqpConsumer(*options.uri, *options.queueName, shutdown)\n\tif err != nil {\n\t\t\/\/ cannot use shutdown channel, no listener yet\n\t\tlog.Fatalln(\"Fatal Error: \", err.Error())\n\t}\n\tgo func() {\n\t\terr = <-c.done\n\t\tshutdown <- fmt.Sprintln(err)\n\t}()\n\n\tmessage := <-shutdown\n\tlog.Println(\"The End.\", message)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tmonInterface = \"en0\"\n\tstatusUp = \"UP\"\n\tstatusDown = \"DOWN\"\n)\n\n\/\/ Host capture a host info in net\ntype Host struct {\n\tIP string\n\tMAC string\n\tHostname string\n\tPings []Event\n}\n\nfunc (h Host) String() string {\n\treturn fmt.Sprintf(\"Host %s has IP %s, MAC: %s\", h.Hostname, h.IP, h.MAC)\n}\n\nfunc (h *Host) checkPing() error {\n\te := Event{When: time.Now()}\n\tout, err := exec.Command(\"nmap\", \"-sP\", h.IP).Output()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif strings.Contains(string(out), \"1 host up\") {\n\t\te.Up = true\n\t}\n\th.Pings = append(h.Pings, e)\n\tlog.Println(h.Pings)\n\treturn nil\n}\n\nfunc handleErr(err error) {\n\tlog.Println(\"Error: \", err)\n}\n\nfunc getIPAndMAC(line string) (string, string) {\n\tlines := strings.Split(line, \"(\")\n\ttmp := strings.Split(lines[1], \")\")\n\tIP := tmp[0]\n\tMAC := strings.Split(tmp[1], \" \")[2]\n\treturn IP, MAC\n}\n\nfunc arpEntries() []Host {\n\tvar hosts []Host\n\tout, err := exec.Command(\"arp\", \"-na\").Output()\n\tif err != nil {\n\t\thandleErr(err)\n\t}\n\tstrOut := string(out)\n\tlines := strings.Split(strOut, \"\\n\")\n\tfor _, line := range lines {\n\t\tif strings.Contains(line, monInterface) {\n\t\t\th := Host{}\n\t\t\th.IP, h.MAC = getIPAndMAC(line)\n\t\t\thosts = append(hosts, h)\n\t\t}\n\t}\n\treturn hosts\n}\n\nfunc display(hs []Host) {\n\tfor idx, h := range hs {\n\t\tlog.Printf(\"%d. %s\", idx+1, h)\n\t}\n\treturn\n}\n\nfunc myLANIP() (string, error) {\n\tifis, err := net.Interfaces()\n\tif err != nil {\n\t\thandleErr(err)\n\t}\n\tfor _, ifi := range ifis {\n\t\tif ifi.Name == monInterface {\n\t\t\taddrs, err := ifi.Addrs()\n\t\t\tif err != nil {\n\t\t\t\thandleErr(err)\n\t\t\t}\n\t\t\tlog.Println(addrs)\n\t\t\tfor _, addr := range addrs {\n\t\t\t\tip, _, err := net.ParseCIDR(addr.String())\n\t\t\t\tif err != nil {\n\t\t\t\t\thandleErr(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif ip.To4() != nil {\n\t\t\t\t\treturn addr.String(), nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\", err\n}\n\nfunc nmapScanAll() {\n\tmyIP, err := myLANIP()\n\thandleErr(err)\n\tout, err := exec.Command(\"nmap\", \"-A\", \"-T4\", myIP).Output()\n\thandleErr(err)\n\tlog.Println(string(out))\n}\nfunc nmapEntries() {\n\tmyIP, err := myLANIP()\n\thandleErr(err)\n\tout, err := exec.Command(\"nmap\", \"-sP\", \"-PA21,22,25,3389\", myIP).Output()\n\thandleErr(err)\n\tlog.Println(string(out))\n}\n\n\/\/ Event capture\ntype Event struct {\n\tWhen time.Time\n\tUp bool\n}\n\nfunc (e Event) String() string {\n\tvar s string\n\tif e.Up {\n\t\ts = statusUp\n\t} else {\n\t\ts = statusDown\n\t}\n\n\treturn fmt.Sprintf(\"Event %s at %s\", s, e.When)\n}\n\nfunc checkAll(hosts []Host) {\n\terrChan := make(chan error)\n\tfor _, host := range hosts {\n\t\tgo func() {\n\t\t\terrChan <- host.checkPing()\n\t\t}()\n\t}\n\tclose(errChan)\n\n\tfor e := range errChan {\n\t\thandleErr(e)\n\t}\n}\n\nfunc main() {\n\thosts := arpEntries()\n\tcheckAll(hosts)\n\t\/\/ TODO add nmap entries to ping check\n\tnmapEntries()\n\n\t\/\/ For every 5 mins, run a check and save result to db.\n\t\/\/ Loop over all host in buckets and all host that nmap can ping\n\t\/\/ If a host change its state (leaving), save data to db and report.\n\t\/\/ If host is new, add it\n\t\/\/ If host does not change, do nothing\n\tnmapScanAll()\n}\n<commit_msg>Simple string output<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tmonInterface = \"en0\"\n\tstatusUp = \"UP\"\n\tstatusDown = \"DOWN\"\n)\n\n\/\/ Host capture a host info in net\ntype Host struct {\n\tIP string\n\tMAC string\n\tHostname string\n\tPings []Event\n}\n\nfunc (h Host) String() string {\n\treturn fmt.Sprintf(\"Host %s has IP %s, MAC: %s\", h.Hostname, h.IP, h.MAC)\n}\n\nfunc (h *Host) checkPing() error {\n\te := Event{When: time.Now()}\n\tout, err := exec.Command(\"nmap\", \"-sP\", h.IP).Output()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif strings.Contains(string(out), \"1 host up\") {\n\t\te.Up = true\n\t}\n\th.Pings = append(h.Pings, e)\n\tlog.Println(h.Pings)\n\treturn nil\n}\n\nfunc handleErr(err error) {\n\tlog.Println(\"Error: \", err)\n}\n\nfunc getIPAndMAC(line string) (string, string) {\n\tlines := strings.Split(line, \"(\")\n\ttmp := strings.Split(lines[1], \")\")\n\tIP := tmp[0]\n\tMAC := strings.Split(tmp[1], \" \")[2]\n\treturn IP, MAC\n}\n\nfunc arpEntries() []Host {\n\tvar hosts []Host\n\tout, err := exec.Command(\"arp\", \"-na\").Output()\n\tif err != nil {\n\t\thandleErr(err)\n\t}\n\tstrOut := string(out)\n\tlines := strings.Split(strOut, \"\\n\")\n\tfor _, line := range lines {\n\t\tif strings.Contains(line, monInterface) {\n\t\t\th := Host{}\n\t\t\th.IP, h.MAC = getIPAndMAC(line)\n\t\t\thosts = append(hosts, h)\n\t\t}\n\t}\n\treturn hosts\n}\n\nfunc display(hs []Host) {\n\tfor idx, h := range hs {\n\t\tlog.Printf(\"%d. %s\", idx+1, h)\n\t}\n\treturn\n}\n\nfunc myLANIP() (string, error) {\n\tifis, err := net.Interfaces()\n\tif err != nil {\n\t\thandleErr(err)\n\t}\n\tfor _, ifi := range ifis {\n\t\tif ifi.Name == monInterface {\n\t\t\taddrs, err := ifi.Addrs()\n\t\t\tif err != nil {\n\t\t\t\thandleErr(err)\n\t\t\t}\n\t\t\tlog.Println(addrs)\n\t\t\tfor _, addr := range addrs {\n\t\t\t\tip, _, err := net.ParseCIDR(addr.String())\n\t\t\t\tif err != nil {\n\t\t\t\t\thandleErr(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif ip.To4() != nil {\n\t\t\t\t\treturn addr.String(), nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\", err\n}\n\nfunc nmapScanAll() {\n\tmyIP, err := myLANIP()\n\thandleErr(err)\n\tout, err := exec.Command(\"nmap\", \"-A\", \"-T4\", myIP).Output()\n\thandleErr(err)\n\tlog.Println(string(out))\n}\nfunc nmapEntries() {\n\tmyIP, err := myLANIP()\n\thandleErr(err)\n\tout, err := exec.Command(\"nmap\", \"-sP\", \"-PA21,22,25,3389\", myIP).Output()\n\thandleErr(err)\n\tlog.Println(string(out))\n}\n\n\/\/ Event capture\ntype Event struct {\n\tWhen time.Time\n\tUp bool\n}\n\nfunc (e Event) String() string {\n\ts := StatusDown\n\tif e.Up {\n\t\ts = statusUp\n\t}\n\treturn fmt.Sprintf(\"Event %s at %s\", s, e.When)\n}\n\nfunc checkAll(hosts []Host) {\n\terrChan := make(chan error)\n\tfor _, host := range hosts {\n\t\tgo func() {\n\t\t\terrChan <- host.checkPing()\n\t\t}()\n\t}\n\tclose(errChan)\n\n\tfor e := range errChan {\n\t\thandleErr(e)\n\t}\n}\n\nfunc main() {\n\thosts := arpEntries()\n\tcheckAll(hosts)\n\t\/\/ TODO add nmap entries to ping check\n\tnmapEntries()\n\n\t\/\/ For every 5 mins, run a check and save result to db.\n\t\/\/ Loop over all host in buckets and all host that nmap can ping\n\t\/\/ If a host change its state (leaving), save data to db and report.\n\t\/\/ If host is new, add it\n\t\/\/ If host does not change, do nothing\n\tnmapScanAll()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ RIM - Remote Interfaces Monitor\n\n\/* Copyright (c) 2014 Andrea Masi\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE. *\/\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"code.google.com\/p\/go.crypto\/ssh\"\n\t\"code.google.com\/p\/go.crypto\/ssh\/agent\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar workers = runtime.NumCPU()\n\nconst (\n\tRemoteCommand = `cat \/proc\/net\/dev; echo ZZZ; sleep 1; cat \/proc\/net\/dev;`\n\tSeparator = \"ZZZ\\n\"\n)\n\n\/\/ interfaceData models single interface' data for a given host\ntype interfaceData struct {\n\thost string\n\tname string\n\trates map[string]uint64\n\terr error\n}\n\ntype jobResult struct {\n\thost string\n\terr error\n\t\/\/ es: map[string]map[string]uint64{\"eth0\": map[string]uint64{\"tx-Bps\":12000, \"rx-Bps\":12000}}\n\tdata map[string]map[string]uint64\n}\n\ntype job struct {\n\thost string\n\tsshClientConfig ssh.ClientConfig\n\tresult chan<- jobResult\n}\n\n\/\/ unpackJobResult reads data from a jobResult and unpack it into a slice of interfaceData, one for each interface.\nfunc unpackJobResult(jr *jobResult) []interfaceData {\n\tdata := []interfaceData{}\n\tif jr.err != nil {\n\t\treturn []interfaceData{interfaceData{jr.host, \"\", nil, jr.err}}\n\t}\n\tfor kInterface, vMap := range jr.data {\n\t\ti := interfaceData{jr.host, kInterface, vMap, nil}\n\t\tdata = append(data, i)\n\t}\n\treturn data\n}\n\nfunc sanitizeHost(s *string) {\n\tif !strings.Contains(*s, \":\") {\n\t\t*s = fmt.Sprintf(\"%s:%d\", *s, 22)\n\t}\n}\n\nfunc splitOnSpaces(s string) []string {\n\t\/\/ Remove leading a trailing white spaces that gives wrong results with regexp below\n\ttrimmedS := strings.Trim(s, \" \")\n\treturn regexp.MustCompile(`\\s+`).Split(trimmedS, -1)\n}\n\n\/\/ makeValueMap creates map with t2 or t1 values\nfunc makeValueMap(data []string) map[string]uint64 {\n\tdataMap := make(map[string]uint64)\n\tfor i, s := range data {\n\t\t\/\/ FIXME parse error\n\t\tconverted, _ := strconv.ParseUint(s, 10, 64)\n\t\tswitch i {\n\t\tcase 0:\n\t\t\t\/\/ here its not bytes per second but absolute bytes at t2\n\t\t\t\/\/ however we'll store Bps later\n\t\t\tdataMap[\"rx-Bps\"] = converted\n\t\tcase 1:\n\t\t\t\/\/ here its not packets per second but absolute bytes at t2\n\t\t\t\/\/ however we'll store pps later\n\t\t\tdataMap[\"rx-pps\"] = converted\n\t\tcase 2:\n\t\t\t\/\/ here its not errors per second but absolute bytes at t2\n\t\t\t\/\/ however we'll store eps later\n\t\t\tdataMap[\"rx-eps\"] = converted\n\t\tcase 3:\n\t\t\t\/\/ here its not drop per second but absolute bytes at t2\n\t\t\t\/\/ however we'll store dps later\n\t\t\tdataMap[\"rx-dps\"] = converted\n\t\tcase 8:\n\t\t\t\/\/ here its not bytes per second but absolute bytes at t2\n\t\t\t\/\/ however we'll store Bps later\n\t\t\tdataMap[\"tx-Bps\"] = converted\n\t\tcase 9:\n\t\t\t\/\/ here its not bytes per second but absolute bytes at t2\n\t\t\t\/\/ however we'll store Bps later\n\t\t\tdataMap[\"tx-pps\"] = converted\n\t\tcase 10:\n\t\t\t\/\/ here its not errors per second but absolute bytes at t2\n\t\t\t\/\/ however we'll store eps later\n\t\t\tdataMap[\"tx-eps\"] = converted\n\t\tcase 11:\n\t\t\t\/\/ here its not drops per second but absolute bytes at t2\n\t\t\t\/\/ however we'll store dps later\n\t\t\tdataMap[\"tx-dps\"] = converted\n\t\t}\n\t}\n\treturn dataMap\n}\n\n\/\/ calculateRates uses t2 values stored in dataAtT2 by makeValueMap to calculate rates\nfunc calculateRates(dataAtT2, dataAtT1 map[string]uint64) {\n\tfor k, v := range dataAtT2 {\n\t\t\/\/ assuming that ΔT is always 1 second (sleep 1)\n\t\tdataAtT2[k] = v - dataAtT1[k]\n\t}\n}\n\n\/\/ parseOutput arranges RemoteCommand output and calculates rates\nfunc parseOutput(out *bytes.Buffer, data map[string]map[string]uint64) error {\n\toutBytes := bytes.Split(out.Bytes(), []byte(Separator))\n\t\/\/ Contains interfaces' value at t1\n\toutOne := outBytes[0]\n\t\/\/ Contains interfaces' value at t2\n\toutTwo := outBytes[1]\n\tscanner := bufio.NewScanner(bytes.NewBuffer(outTwo))\n\tfor scanner.Scan() {\n\t\ts := scanner.Text()\n\t\t\/\/ Excludes titles\n\t\tif strings.Contains(s, \"|\") {\n\t\t\tcontinue\n\t\t}\n\t\tsplittedRow := strings.Split(s, \":\")\n\t\t\/\/ remove white spaces\n\t\tiface := strings.Replace(splittedRow[0], \" \", \"\", -1)\n\t\tcountersData := splitOnSpaces(splittedRow[1])\n\t\tdebugPrintln(\"parsed data @ t2:\", iface, countersData)\n\t\tdata[iface] = makeValueMap(countersData)\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn err\n\t}\n\tscanner = bufio.NewScanner(bytes.NewBuffer(outOne))\n\tfor scanner.Scan() {\n\t\ts := scanner.Text()\n\t\t\/\/ Excludes titles\n\t\tif strings.Contains(s, \"|\") {\n\t\t\tcontinue\n\t\t}\n\t\tsplittedRow := strings.Split(s, \":\")\n\t\t\/\/ remove white spaces\n\t\tiface := strings.Replace(splittedRow[0], \" \", \"\", -1)\n\t\tcountersData := splitOnSpaces(splittedRow[1])\n\t\tdataAtT1 := makeValueMap(countersData)\n\t\tdebugPrintln(\"parsed data @ t1:\", iface, countersData)\n\t\tcalculateRates(data[iface], dataAtT1)\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (j *job) getRemoteData() {\n\tvar output bytes.Buffer\n\tdestination := j.host\n\tsanitizeHost(&destination)\n\tconn, err := ssh.Dial(\"tcp\", destination, &j.sshClientConfig)\n\tif err != nil {\n\t\tj.result <- jobResult{j.host, err, nil}\n\t\treturn\n\t}\n\tsession, err := conn.NewSession()\n\tif err != nil {\n\t\tj.result <- jobResult{j.host, err, nil}\n\t\treturn\n\t}\n\tdefer session.Close()\n\tsession.Stdout = &output\n\tif err := session.Run(RemoteCommand); err != nil {\n\t\tj.result <- jobResult{j.host, err, nil}\n\t\treturn\n\t}\n\tdebugPrintln(output.String())\n\tdata := make(map[string]map[string]uint64)\n\tif err := parseOutput(&output, data); err != nil {\n\t\tj.result <- jobResult{j.host, err, nil}\n\t}\n\tj.result <- jobResult{j.host, nil, data}\n}\n\nfunc createSshConfig(user, passwd string) ssh.ClientConfig {\n\tsshAuthSock := os.Getenv(\"SSH_AUTH_SOCK\")\n\tauthMethods := []ssh.AuthMethod{ssh.Password(passwd)}\n\tif sshAuthSock != \"\" {\n\t\tdebugPrintln(\"ssh-agent socket:\", sshAuthSock)\n\t\tsocket, err := net.Dial(\"unix\", sshAuthSock)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t} else {\n\t\t\tagentClient := agent.NewClient(socket)\n\t\t\tauthMethod := ssh.PublicKeysCallback(agentClient.Signers)\n\t\t\tauthMethods = append(authMethods, authMethod)\n\t\t\t\/\/ FIXME works even without calling agent.ForwardToAgent()?\n\t\t\tdebugPrintln(\"ssh-agent forwarding configured\")\n\t\t}\n\t}\n\treturn ssh.ClientConfig{\n\t\tUser: user,\n\t\tAuth: authMethods,\n\t}\n}\n\nfunc populateQueue(jobs chan<- job, results chan<- jobResult, hosts []string, sshConfig ssh.ClientConfig) {\n\tfor _, host := range hosts {\n\t\tjobs <- job{host, sshConfig, results}\n\t}\n\tclose(jobs)\n}\n\nfunc evaluateQueue(jobs <-chan job) {\n\tfor j := range jobs {\n\t\tj.getRemoteData()\n\t}\n}\n\nfunc parallelizeWorkers(jQueue chan job) {\n\tfor i := 0; i < workers; i++ {\n\t\tgo evaluateQueue(jQueue)\n\t}\n}\n\nfunc presentSingleResult(r *jobResult) {\n\tfmt.Printf(\"%20.20s%12.12s%8.8s%8.8s\\n\", \"Host\", \"Interface\", \"RX-KBps\", \"TX-KBps\")\n\tif r.err != nil {\n\t\tfmt.Println(r.host, r.err)\n\t} else {\n\t\t\/\/ k is remote interface\n\t\t\/\/ v is a map with rates\n\t\tfor k, v := range r.data {\n\t\t\tfmt.Printf(\"%20.20s\", r.host)\n\t\t\tfmt.Printf(\"%12.12s\", k)\n\t\t\tfmt.Printf(\"%8d\", uint64(v[\"rx-Bps\"]\/1024))\n\t\t\tfmt.Printf(\"%8d\", uint64(v[\"tx-Bps\"]\/1024))\n\t\t\tfmt.Println(\"\")\n\t\t}\n\t}\n}\n\nfunc printHead() {\n\tfmt.Printf(\n\t\t\"%20s%12s%9s%9s%12s%12s%12s%12s%12s%12s\\n\",\n\t\t\"Host\",\n\t\t\"Interface\",\n\t\t\"Rx-Kb\/s\",\n\t\t\"Tx-Kb\/s\",\n\t\t\"Rx-Pckts\/s\",\n\t\t\"Tx-Pckts\/s\",\n\t\t\"Rx-Drp\/s\",\n\t\t\"Tx-Drp\/s\",\n\t\t\"Rx-Err\/s\",\n\t\t\"Tx-Err\/s\",\n\t)\n}\n\nfunc displayResults(results []interfaceData, noHead bool) {\n\tfor i, r := range results {\n\t\tif i%40 == 0 && !noHead {\n\t\t\tprintHead()\n\t\t}\n\t\tif r.err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"[ERROR]\", r.host, r.err)\n\t\t} else {\n\t\t\tfmt.Printf(\"%20s\", r.host)\n\t\t\tfmt.Printf(\"%12s\", r.name)\n\t\t\tfmt.Printf(\"%9d\", uint64(r.rates[\"rx-Bps\"]*8\/1024))\n\t\t\tfmt.Printf(\"%9d\", uint64(r.rates[\"tx-Bps\"]*8\/1024))\n\t\t\tfmt.Printf(\"%12d\", r.rates[\"rx-pps\"])\n\t\t\tfmt.Printf(\"%12d\", r.rates[\"tx-pps\"])\n\t\t\tfmt.Printf(\"%12d\", r.rates[\"rx-dps\"])\n\t\t\tfmt.Printf(\"%12d\", r.rates[\"tx-dps\"])\n\t\t\tfmt.Printf(\"%12d\", r.rates[\"rx-eps\"])\n\t\t\tfmt.Printf(\"%12d\", r.rates[\"tx-eps\"])\n\t\t\tfmt.Println(\"\")\n\t\t}\n\t}\n}\n\nfunc getHostsFromFile(path string) []string {\n\t\/\/ FIXME sanityze user input\n\tif path == \"{filename}\" {\n\t\tfmt.Fprintln(os.Stderr, \"-f is mandatory\")\n\t\tos.Exit(2)\n\t}\n\tbytes, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Error reading file\", err)\n\t\tos.Exit(2)\n\t}\n\thosts := strings.Split(string(bytes), \"\\n\")\n\t\/\/ remove last empty element\n\thosts = hosts[:len(hosts)-1]\n\tdebugPrintln(\"Parsed hosts from file:\", hosts)\n\treturn hosts\n}\n\nfunc main() {\n\tjobsQueue := make(chan job, workers)\n\tresultQueue := make(chan jobResult, workers)\n\thostsFileFlag := flag.String(\"f\", \"{filename}\", \" [FILE] file containing target hosts, one per line.\")\n\tuserFlag := flag.String(\"u\", \"root\", \"[USERNAME] ssh username.\")\n\tpasswdFlag := flag.String(\"p\", \"nopassword\", \"[PASSWORD] ssh password for remote hosts. Automatically use ssh-agent as fallback.\")\n\tnoHeadFlag := flag.Bool(\"n\", false, \"Do not show titles.\")\n\tflag.Parse()\n\thosts := getHostsFromFile(*hostsFileFlag)\n\tsshConfig := createSshConfig(*userFlag, *passwdFlag)\n\tresultCounts := 0\n\tinterfacesData := make([]interfaceData, 0, len(hosts))\n\truntime.GOMAXPROCS(workers)\n\tgo populateQueue(jobsQueue, resultQueue, hosts, sshConfig)\n\tgo parallelizeWorkers(jobsQueue)\n\tfor {\n\t\t\/\/ FIXME make a case for timeout\n\t\tselect {\n\t\tcase jobResult := <-resultQueue:\n\t\t\tresultCounts++\n\t\t\tinterfacesData = append(interfacesData, unpackJobResult(&jobResult)...)\n\t\t\tif resultCounts == len(hosts) {\n\t\t\t\tdisplayResults(interfacesData, *noHeadFlag)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>Add version flag<commit_after>\/\/ RIM - Remote Interfaces Monitor\n\n\/* Copyright (c) 2014 Andrea Masi\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE. *\/\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"code.google.com\/p\/go.crypto\/ssh\"\n\t\"code.google.com\/p\/go.crypto\/ssh\/agent\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar workers = runtime.NumCPU()\n\nconst (\n\tRemoteCommand = `cat \/proc\/net\/dev; echo ZZZ; sleep 1; cat \/proc\/net\/dev;`\n\tSeparator = \"ZZZ\\n\"\n)\n\n\/\/ interfaceData models single interface' data for a given host\ntype interfaceData struct {\n\thost string\n\tname string\n\trates map[string]uint64\n\terr error\n}\n\ntype jobResult struct {\n\thost string\n\terr error\n\t\/\/ es: map[string]map[string]uint64{\"eth0\": map[string]uint64{\"tx-Bps\":12000, \"rx-Bps\":12000}}\n\tdata map[string]map[string]uint64\n}\n\ntype job struct {\n\thost string\n\tsshClientConfig ssh.ClientConfig\n\tresult chan<- jobResult\n}\n\n\/\/ unpackJobResult reads data from a jobResult and unpack it into a slice of interfaceData, one for each interface.\nfunc unpackJobResult(jr *jobResult) []interfaceData {\n\tdata := []interfaceData{}\n\tif jr.err != nil {\n\t\treturn []interfaceData{interfaceData{jr.host, \"\", nil, jr.err}}\n\t}\n\tfor kInterface, vMap := range jr.data {\n\t\ti := interfaceData{jr.host, kInterface, vMap, nil}\n\t\tdata = append(data, i)\n\t}\n\treturn data\n}\n\nfunc sanitizeHost(s *string) {\n\tif !strings.Contains(*s, \":\") {\n\t\t*s = fmt.Sprintf(\"%s:%d\", *s, 22)\n\t}\n}\n\nfunc splitOnSpaces(s string) []string {\n\t\/\/ Remove leading a trailing white spaces that gives wrong results with regexp below\n\ttrimmedS := strings.Trim(s, \" \")\n\treturn regexp.MustCompile(`\\s+`).Split(trimmedS, -1)\n}\n\n\/\/ makeValueMap creates map with t2 or t1 values\nfunc makeValueMap(data []string) map[string]uint64 {\n\tdataMap := make(map[string]uint64)\n\tfor i, s := range data {\n\t\t\/\/ FIXME parse error\n\t\tconverted, _ := strconv.ParseUint(s, 10, 64)\n\t\tswitch i {\n\t\tcase 0:\n\t\t\t\/\/ here its not bytes per second but absolute bytes at t2\n\t\t\t\/\/ however we'll store Bps later\n\t\t\tdataMap[\"rx-Bps\"] = converted\n\t\tcase 1:\n\t\t\t\/\/ here its not packets per second but absolute bytes at t2\n\t\t\t\/\/ however we'll store pps later\n\t\t\tdataMap[\"rx-pps\"] = converted\n\t\tcase 2:\n\t\t\t\/\/ here its not errors per second but absolute bytes at t2\n\t\t\t\/\/ however we'll store eps later\n\t\t\tdataMap[\"rx-eps\"] = converted\n\t\tcase 3:\n\t\t\t\/\/ here its not drop per second but absolute bytes at t2\n\t\t\t\/\/ however we'll store dps later\n\t\t\tdataMap[\"rx-dps\"] = converted\n\t\tcase 8:\n\t\t\t\/\/ here its not bytes per second but absolute bytes at t2\n\t\t\t\/\/ however we'll store Bps later\n\t\t\tdataMap[\"tx-Bps\"] = converted\n\t\tcase 9:\n\t\t\t\/\/ here its not bytes per second but absolute bytes at t2\n\t\t\t\/\/ however we'll store Bps later\n\t\t\tdataMap[\"tx-pps\"] = converted\n\t\tcase 10:\n\t\t\t\/\/ here its not errors per second but absolute bytes at t2\n\t\t\t\/\/ however we'll store eps later\n\t\t\tdataMap[\"tx-eps\"] = converted\n\t\tcase 11:\n\t\t\t\/\/ here its not drops per second but absolute bytes at t2\n\t\t\t\/\/ however we'll store dps later\n\t\t\tdataMap[\"tx-dps\"] = converted\n\t\t}\n\t}\n\treturn dataMap\n}\n\n\/\/ calculateRates uses t2 values stored in dataAtT2 by makeValueMap to calculate rates\nfunc calculateRates(dataAtT2, dataAtT1 map[string]uint64) {\n\tfor k, v := range dataAtT2 {\n\t\t\/\/ assuming that ΔT is always 1 second (sleep 1)\n\t\tdataAtT2[k] = v - dataAtT1[k]\n\t}\n}\n\n\/\/ parseOutput arranges RemoteCommand output and calculates rates\nfunc parseOutput(out *bytes.Buffer, data map[string]map[string]uint64) error {\n\toutBytes := bytes.Split(out.Bytes(), []byte(Separator))\n\t\/\/ Contains interfaces' value at t1\n\toutOne := outBytes[0]\n\t\/\/ Contains interfaces' value at t2\n\toutTwo := outBytes[1]\n\tscanner := bufio.NewScanner(bytes.NewBuffer(outTwo))\n\tfor scanner.Scan() {\n\t\ts := scanner.Text()\n\t\t\/\/ Excludes titles\n\t\tif strings.Contains(s, \"|\") {\n\t\t\tcontinue\n\t\t}\n\t\tsplittedRow := strings.Split(s, \":\")\n\t\t\/\/ remove white spaces\n\t\tiface := strings.Replace(splittedRow[0], \" \", \"\", -1)\n\t\tcountersData := splitOnSpaces(splittedRow[1])\n\t\tdebugPrintln(\"parsed data @ t2:\", iface, countersData)\n\t\tdata[iface] = makeValueMap(countersData)\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn err\n\t}\n\tscanner = bufio.NewScanner(bytes.NewBuffer(outOne))\n\tfor scanner.Scan() {\n\t\ts := scanner.Text()\n\t\t\/\/ Excludes titles\n\t\tif strings.Contains(s, \"|\") {\n\t\t\tcontinue\n\t\t}\n\t\tsplittedRow := strings.Split(s, \":\")\n\t\t\/\/ remove white spaces\n\t\tiface := strings.Replace(splittedRow[0], \" \", \"\", -1)\n\t\tcountersData := splitOnSpaces(splittedRow[1])\n\t\tdataAtT1 := makeValueMap(countersData)\n\t\tdebugPrintln(\"parsed data @ t1:\", iface, countersData)\n\t\tcalculateRates(data[iface], dataAtT1)\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (j *job) getRemoteData() {\n\tvar output bytes.Buffer\n\tdestination := j.host\n\tsanitizeHost(&destination)\n\tconn, err := ssh.Dial(\"tcp\", destination, &j.sshClientConfig)\n\tif err != nil {\n\t\tj.result <- jobResult{j.host, err, nil}\n\t\treturn\n\t}\n\tsession, err := conn.NewSession()\n\tif err != nil {\n\t\tj.result <- jobResult{j.host, err, nil}\n\t\treturn\n\t}\n\tdefer session.Close()\n\tsession.Stdout = &output\n\tif err := session.Run(RemoteCommand); err != nil {\n\t\tj.result <- jobResult{j.host, err, nil}\n\t\treturn\n\t}\n\tdebugPrintln(output.String())\n\tdata := make(map[string]map[string]uint64)\n\tif err := parseOutput(&output, data); err != nil {\n\t\tj.result <- jobResult{j.host, err, nil}\n\t}\n\tj.result <- jobResult{j.host, nil, data}\n}\n\nfunc createSshConfig(user, passwd string) ssh.ClientConfig {\n\tsshAuthSock := os.Getenv(\"SSH_AUTH_SOCK\")\n\tauthMethods := []ssh.AuthMethod{ssh.Password(passwd)}\n\tif sshAuthSock != \"\" {\n\t\tdebugPrintln(\"ssh-agent socket:\", sshAuthSock)\n\t\tsocket, err := net.Dial(\"unix\", sshAuthSock)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t} else {\n\t\t\tagentClient := agent.NewClient(socket)\n\t\t\tauthMethod := ssh.PublicKeysCallback(agentClient.Signers)\n\t\t\tauthMethods = append(authMethods, authMethod)\n\t\t\t\/\/ FIXME works even without calling agent.ForwardToAgent()?\n\t\t\tdebugPrintln(\"ssh-agent forwarding configured\")\n\t\t}\n\t}\n\treturn ssh.ClientConfig{\n\t\tUser: user,\n\t\tAuth: authMethods,\n\t}\n}\n\nfunc populateQueue(jobs chan<- job, results chan<- jobResult, hosts []string, sshConfig ssh.ClientConfig) {\n\tfor _, host := range hosts {\n\t\tjobs <- job{host, sshConfig, results}\n\t}\n\tclose(jobs)\n}\n\nfunc evaluateQueue(jobs <-chan job) {\n\tfor j := range jobs {\n\t\tj.getRemoteData()\n\t}\n}\n\nfunc parallelizeWorkers(jQueue chan job) {\n\tfor i := 0; i < workers; i++ {\n\t\tgo evaluateQueue(jQueue)\n\t}\n}\n\nfunc presentSingleResult(r *jobResult) {\n\tfmt.Printf(\"%20.20s%12.12s%8.8s%8.8s\\n\", \"Host\", \"Interface\", \"RX-KBps\", \"TX-KBps\")\n\tif r.err != nil {\n\t\tfmt.Println(r.host, r.err)\n\t} else {\n\t\t\/\/ k is remote interface\n\t\t\/\/ v is a map with rates\n\t\tfor k, v := range r.data {\n\t\t\tfmt.Printf(\"%20.20s\", r.host)\n\t\t\tfmt.Printf(\"%12.12s\", k)\n\t\t\tfmt.Printf(\"%8d\", uint64(v[\"rx-Bps\"]\/1024))\n\t\t\tfmt.Printf(\"%8d\", uint64(v[\"tx-Bps\"]\/1024))\n\t\t\tfmt.Println(\"\")\n\t\t}\n\t}\n}\n\nfunc printHead() {\n\tfmt.Printf(\n\t\t\"%20s%12s%9s%9s%12s%12s%12s%12s%12s%12s\\n\",\n\t\t\"Host\",\n\t\t\"Interface\",\n\t\t\"Rx-Kb\/s\",\n\t\t\"Tx-Kb\/s\",\n\t\t\"Rx-Pckts\/s\",\n\t\t\"Tx-Pckts\/s\",\n\t\t\"Rx-Drp\/s\",\n\t\t\"Tx-Drp\/s\",\n\t\t\"Rx-Err\/s\",\n\t\t\"Tx-Err\/s\",\n\t)\n}\n\nfunc displayResults(results []interfaceData, noHead bool) {\n\tfor i, r := range results {\n\t\tif i%40 == 0 && !noHead {\n\t\t\tprintHead()\n\t\t}\n\t\tif r.err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"[ERROR]\", r.host, r.err)\n\t\t} else {\n\t\t\tfmt.Printf(\"%20s\", r.host)\n\t\t\tfmt.Printf(\"%12s\", r.name)\n\t\t\tfmt.Printf(\"%9d\", uint64(r.rates[\"rx-Bps\"]*8\/1024))\n\t\t\tfmt.Printf(\"%9d\", uint64(r.rates[\"tx-Bps\"]*8\/1024))\n\t\t\tfmt.Printf(\"%12d\", r.rates[\"rx-pps\"])\n\t\t\tfmt.Printf(\"%12d\", r.rates[\"tx-pps\"])\n\t\t\tfmt.Printf(\"%12d\", r.rates[\"rx-dps\"])\n\t\t\tfmt.Printf(\"%12d\", r.rates[\"tx-dps\"])\n\t\t\tfmt.Printf(\"%12d\", r.rates[\"rx-eps\"])\n\t\t\tfmt.Printf(\"%12d\", r.rates[\"tx-eps\"])\n\t\t\tfmt.Println(\"\")\n\t\t}\n\t}\n}\n\nfunc getHostsFromFile(path string) []string {\n\t\/\/ FIXME sanityze user input\n\tif path == \"{filename}\" {\n\t\tfmt.Fprintln(os.Stderr, \"-f is mandatory\")\n\t\tos.Exit(2)\n\t}\n\tbytes, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Error reading file\", err)\n\t\tos.Exit(2)\n\t}\n\thosts := strings.Split(string(bytes), \"\\n\")\n\t\/\/ remove last empty element\n\thosts = hosts[:len(hosts)-1]\n\tdebugPrintln(\"Parsed hosts from file:\", hosts)\n\treturn hosts\n}\n\nfunc main() {\n\tjobsQueue := make(chan job, workers)\n\tresultQueue := make(chan jobResult, workers)\n\thostsFileFlag := flag.String(\"f\", \"{filename}\", \" [FILE] file containing target hosts, one per line.\")\n\tuserFlag := flag.String(\"u\", \"root\", \"[USERNAME] ssh username.\")\n\tpasswdFlag := flag.String(\"p\", \"nopassword\", \"[PASSWORD] ssh password for remote hosts. Automatically use ssh-agent as fallback.\")\n\tnoHeadFlag := flag.Bool(\"n\", false, \"Do not show titles.\")\n\tversionFlag := flag.Bool(\"v\", false, \"Show version and exit\")\n\tflag.Parse()\n\tif *versionFlag {\n\t\tfmt.Println(\"RIM - Remote Interface Monitor 1.0.0-beta\")\n\t\treturn\n\t}\n\thosts := getHostsFromFile(*hostsFileFlag)\n\tsshConfig := createSshConfig(*userFlag, *passwdFlag)\n\tresultCounts := 0\n\tinterfacesData := make([]interfaceData, 0, len(hosts))\n\truntime.GOMAXPROCS(workers)\n\tgo populateQueue(jobsQueue, resultQueue, hosts, sshConfig)\n\tgo parallelizeWorkers(jobsQueue)\n\tfor {\n\t\t\/\/ FIXME make a case for timeout\n\t\tselect {\n\t\tcase jobResult := <-resultQueue:\n\t\t\tresultCounts++\n\t\t\tinterfacesData = append(interfacesData, unpackJobResult(&jobResult)...)\n\t\t\tif resultCounts == len(hosts) {\n\t\t\t\tdisplayResults(interfacesData, *noHeadFlag)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/bitly\/go-nsq\"\n)\n\nvar (\n\tshowHelp = flag.Bool(\"help\", false, \"print help\")\n\ttopic = flag.String(\"topic\", \"\", \"NSQ topic\")\n\tchannel = flag.String(\"channel\", \"\", \"NSQ channel\")\n)\n\nfunc main() {\n\tfmt.Println(\"Goloso\")\n\n\tflag.Parse()\n\n\tif *showHelp {\n\t\tfmt.Println(`\nUsage:\n goloso --help\n\n goloso --channel \"orc.sys.events\" --topic \"ec2\"\n`)\n\t\tos.Exit(0)\n\t}\n\n\tif *channel == \"\" {\n\t\tlog.Fatalln(\"Err: missing channel\")\n\t\tos.Exit(1)\n\t}\n\n\tif *topic == \"\" {\n\t\tlog.Fatalln(\"Err: missing topic\")\n\t\tos.Exit(1)\n\t}\n\n\tvar (\n\t\treader *nsq.Consumer\n\t\terr error\n\t)\n\n\t\/\/ setup nsq config\n\tconf := nsq.NewConfig()\n\tconf.MaxInFlight = 1000\n\n\t\/\/ setup nsq consumer\n\treader, err = nsq.NewConsumer(*channel, *topic, conf)\n\tif err != nil {\n\t\tlog.Fatalln(\"Err: can't consume\", err)\n\t}\n\n\treader.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error {\n\t\tlog.Printf(\"Message; %v\", message)\n\t\treturn nil\n\t}))\n\n}\n<commit_msg>example of a message struct<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/bitly\/go-nsq\"\n)\n\nvar (\n\tshowHelp = flag.Bool(\"help\", false, \"print help\")\n\ttopic = flag.String(\"topic\", \"\", \"NSQ topic\")\n\tchannel = flag.String(\"channel\", \"\", \"NSQ channel\")\n)\n\n\/\/ bootstrap event struct\n\/*\ntype NSQMessage struct {\n\tEvent []string `json:event` \/\/ event type\n\tUuid []string `json:uuid` \/\/ event uuid\n\tInstanceId []string `json:instanceid` \/\/ instance id\n\tIpAddress []string `json:ipaddress` \/\/ ipaddess\n\tOs []string `json:os` \/\/ operaring system\n}\n*\/\n\nfunc main() {\n\tfmt.Println(\"Goloso\")\n\n\tflag.Parse()\n\n\tif *showHelp {\n\t\tfmt.Println(`\nUsage:\n goloso --help\n\n goloso --channel \"orc.sys.events\" --topic \"ec2\"\n`)\n\t\tos.Exit(0)\n\t}\n\n\tif *channel == \"\" {\n\t\tlog.Fatalln(\"Err: missing channel\")\n\t\tos.Exit(1)\n\t}\n\n\tif *topic == \"\" {\n\t\tlog.Fatalln(\"Err: missing topic\")\n\t\tos.Exit(1)\n\t}\n\n\tvar (\n\t\treader *nsq.Consumer\n\t\terr error\n\t)\n\n\t\/\/ setup nsq config\n\tconf := nsq.NewConfig()\n\tconf.MaxInFlight = 1000\n\n\t\/\/ setup nsq consumer\n\treader, err = nsq.NewConsumer(*channel, *topic, conf)\n\tif err != nil {\n\t\tlog.Fatalln(\"Err: can't consume\", err)\n\t}\n\n\treader.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error {\n\t\tlog.Printf(\"Message; %v\", message)\n\t\treturn nil\n\t}))\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/mh-cbon\/go-bin-rpm\/rpm\"\n\t\"github.com\/mh-cbon\/verbose\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar VERSION = \"0.0.0\"\nvar logger = verbose.Auto()\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"go-bin-rpm\"\n\tapp.Version = VERSION\n\tapp.Usage = \"Generate a binary rpm package\"\n\tapp.UsageText = \"go-bin-rpm <cmd> <options>\"\n\tapp.Commands = []cli.Command{\n {\n Name: \"generate-spec\",\n Usage: \"Generate the SPEC file\",\n Action: generateSpec,\n Flags: []cli.Flag{\n cli.StringFlag{\n Name: \"file, f\",\n Value: \"rpm.json\",\n Usage: \"Path to the rpm.json file\",\n },\n cli.StringFlag{\n Name: \"a, arch\",\n Value: \"\",\n Usage: \"Target architecture of the build\",\n },\n cli.StringFlag{\n Name: \"version\",\n Value: \"\",\n Usage: \"Target version of the build\",\n },\n },\n },\n {\n Name: \"generate\",\n Usage: \"Generate the package\",\n Action: generatePkg,\n Flags: []cli.Flag{\n cli.StringFlag{\n Name: \"file, f\",\n Value: \"rpm.json\",\n Usage: \"Path to the rpm.json file\",\n },\n cli.StringFlag{\n Name: \"b, build-area\",\n Value: \"pkg-build\",\n Usage: \"Path to the build area\",\n },\n cli.StringFlag{\n Name: \"a, arch\",\n Value: \"\",\n Usage: \"Target architecture of the build\",\n },\n cli.StringFlag{\n Name: \"o, output\",\n Value: \"\",\n Usage: \"Output package to this path\",\n },\n cli.StringFlag{\n Name: \"version\",\n Value: \"\",\n Usage: \"Target version of the build\",\n },\n },\n },\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc generateSpec(c *cli.Context) error {\n\tfile := c.String(\"file\")\n\tarch := c.String(\"arch\")\n\tversion := c.String(\"version\")\n\n\trpmJson := rpm.Package{}\n\n\tif err := rpmJson.Load(file); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\n\tif err := rpmJson.Normalize(arch, version); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\n\tif spec, err := rpmJson.GenerateSpecFile(\"\"); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t} else {\n fmt.Printf(\"%s\", spec)\n }\n\n\treturn nil\n}\n\nfunc generatePkg(c *cli.Context) error {\n var err error\n\n\tfile := c.String(\"file\")\n\tarch := c.String(\"arch\")\n\tversion := c.String(\"version\")\n\tbuildArea := c.String(\"build-area\")\n\toutput := c.String(\"output\")\n\n\trpmJson := rpm.Package{}\n\n\tif err := rpmJson.Load(file); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\n if buildArea, err = filepath.Abs(buildArea); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n }\n\n\tif err := rpmJson.Normalize(arch, version); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\n rpmJson.InitializeBuildArea(buildArea)\n\n\tif err = rpmJson.WriteSpecFile(\"\", buildArea); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n }\n\n\tif err = rpmJson.RunBuild(buildArea, output); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n }\n\n fmt.Println(\"\\n\\nAll done!\")\n\n\treturn nil\n}\n<commit_msg>add test command<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/mh-cbon\/go-bin-rpm\/rpm\"\n\t\"github.com\/mh-cbon\/verbose\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar VERSION = \"0.0.0\"\nvar logger = verbose.Auto()\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"go-bin-rpm\"\n\tapp.Version = VERSION\n\tapp.Usage = \"Generate a binary rpm package\"\n\tapp.UsageText = \"go-bin-rpm <cmd> <options>\"\n\tapp.Commands = []cli.Command{\n {\n Name: \"generate-spec\",\n Usage: \"Generate the SPEC file\",\n Action: generateSpec,\n Flags: []cli.Flag{\n cli.StringFlag{\n Name: \"file, f\",\n Value: \"rpm.json\",\n Usage: \"Path to the rpm.json file\",\n },\n cli.StringFlag{\n Name: \"a, arch\",\n Value: \"\",\n Usage: \"Target architecture of the build\",\n },\n cli.StringFlag{\n Name: \"version\",\n Value: \"\",\n Usage: \"Target version of the build\",\n },\n },\n },\n {\n Name: \"generate\",\n Usage: \"Generate the package\",\n Action: generatePkg,\n Flags: []cli.Flag{\n cli.StringFlag{\n Name: \"file, f\",\n Value: \"rpm.json\",\n Usage: \"Path to the rpm.json file\",\n },\n cli.StringFlag{\n Name: \"b, build-area\",\n Value: \"pkg-build\",\n Usage: \"Path to the build area\",\n },\n cli.StringFlag{\n Name: \"a, arch\",\n Value: \"\",\n Usage: \"Target architecture of the build\",\n },\n cli.StringFlag{\n Name: \"o, output\",\n Value: \"\",\n Usage: \"Output package to this path\",\n },\n cli.StringFlag{\n Name: \"version\",\n Value: \"\",\n Usage: \"Target version of the build\",\n },\n },\n },\n\t\t{\n\t\t\tName: \"test\",\n\t\t\tUsage: \"Test the package json file\",\n\t\t\tAction: testPkg,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"file, f\",\n\t\t\t\t\tValue: \"rpm.json\",\n\t\t\t\t\tUsage: \"Path to the rpm.json file\",\n\t\t\t\t},\n\t\t\t},\n },\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc generateSpec(c *cli.Context) error {\n\tfile := c.String(\"file\")\n\tarch := c.String(\"arch\")\n\tversion := c.String(\"version\")\n\n\trpmJson := rpm.Package{}\n\n\tif err := rpmJson.Load(file); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\n\tif err := rpmJson.Normalize(arch, version); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\n\tif spec, err := rpmJson.GenerateSpecFile(\"\"); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t} else {\n fmt.Printf(\"%s\", spec)\n }\n\n\treturn nil\n}\n\nfunc generatePkg(c *cli.Context) error {\n var err error\n\n\tfile := c.String(\"file\")\n\tarch := c.String(\"arch\")\n\tversion := c.String(\"version\")\n\tbuildArea := c.String(\"build-area\")\n\toutput := c.String(\"output\")\n\n\trpmJson := rpm.Package{}\n\n\tif err := rpmJson.Load(file); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\n if buildArea, err = filepath.Abs(buildArea); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n }\n\n\tif err := rpmJson.Normalize(arch, version); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\n rpmJson.InitializeBuildArea(buildArea)\n\n\tif err = rpmJson.WriteSpecFile(\"\", buildArea); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n }\n\n\tif err = rpmJson.RunBuild(buildArea, output); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n }\n\n fmt.Println(\"\\n\\nAll done!\")\n\n\treturn nil\n}\n\n\nfunc testPkg(c *cli.Context) error {\n\tfile := c.String(\"file\")\n\n\trpmJson := rpm.Package{}\n\n\tif err := rpmJson.Load(file); err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\n\tfmt.Println(\"File is correct\")\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"gopkg.in\/urfave\/cli.v1\"\n)\n\ntype (\n\tconfiguration struct {\n\t\tLogger logConfiguration `yaml:\"logger\"`\n\t}\n\n\tlogConfiguration struct {\n\t\tBufferSize int `yaml:\"buffer_size\"`\n\t\tEntryMaxSize int `yaml:\"entry_max_size\"`\n\t}\n)\n\nvar (\n\tlog = logrus.New()\n\thomedir string\n\tcfg configuration\n)\n\nfunc init() {\n\tvar err error\n\thomedir, err = os.Getwd()\n\tcheck(err)\n\n\tcfg = configuration{\n\t\tLogger: logConfiguration{\n\t\t\tBufferSize: 42,\n\t\t\tEntryMaxSize: bufio.MaxScanTokenSize,\n\t\t},\n\t}\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"Composer - MIT\"\n\tapp.Version = \"0.1.0\"\n\tapp.Author = \"mdouchement\"\n\tapp.Usage = \"Usage:\"\n\tapp.Flags = globalFlags\n\tapp.Before = beforeAction\n\n\tcommands(app)\n\n\terr := app.Run(os.Args)\n\tcheck(err)\n}\n\nvar globalFlags = []cli.Flag{\n\tcli.BoolFlag{\n\t\tName: \"D, verbose\",\n\t\tUsage: \"Increase logger level\",\n\t},\n}\n\nfunc beforeAction(context *cli.Context) error {\n\tif context.Bool(\"D\") || os.Getenv(\"APP_DEBUG\") == \"1\" {\n\t\tlog.Level = logrus.DebugLevel\n\t}\n\treturn nil\n}\n\nfunc check(err error) {\n\tif err != nil {\n\t\tfail(err.Error())\n\t}\n}\n\nfunc fail(err string) {\n\tlog.Error(err)\n\ttime.Sleep(100 * time.Millisecond) \/\/ Wait logger outputing\n\tos.Exit(1)\n}\n<commit_msg>Bump version (0.2.0)<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"gopkg.in\/urfave\/cli.v1\"\n)\n\ntype (\n\tconfiguration struct {\n\t\tLogger logConfiguration `yaml:\"logger\"`\n\t}\n\n\tlogConfiguration struct {\n\t\tBufferSize int `yaml:\"buffer_size\"`\n\t\tEntryMaxSize int `yaml:\"entry_max_size\"`\n\t}\n)\n\nvar (\n\tlog = logrus.New()\n\thomedir string\n\tcfg configuration\n)\n\nfunc init() {\n\tvar err error\n\thomedir, err = os.Getwd()\n\tcheck(err)\n\n\tcfg = configuration{\n\t\tLogger: logConfiguration{\n\t\t\tBufferSize: 42,\n\t\t\tEntryMaxSize: bufio.MaxScanTokenSize,\n\t\t},\n\t}\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"Composer - MIT\"\n\tapp.Version = \"0.2.0\"\n\tapp.Author = \"mdouchement\"\n\tapp.Usage = \"Usage:\"\n\tapp.Flags = globalFlags\n\tapp.Before = beforeAction\n\n\tcommands(app)\n\n\terr := app.Run(os.Args)\n\tcheck(err)\n}\n\nvar globalFlags = []cli.Flag{\n\tcli.BoolFlag{\n\t\tName: \"D, verbose\",\n\t\tUsage: \"Increase logger level\",\n\t},\n}\n\nfunc beforeAction(context *cli.Context) error {\n\tif context.Bool(\"D\") || os.Getenv(\"APP_DEBUG\") == \"1\" {\n\t\tlog.Level = logrus.DebugLevel\n\t}\n\treturn nil\n}\n\nfunc check(err error) {\n\tif err != nil {\n\t\tfail(err.Error())\n\t}\n}\n\nfunc fail(err string) {\n\tlog.Error(err)\n\ttime.Sleep(100 * time.Millisecond) \/\/ Wait logger outputing\n\tos.Exit(1)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/--------------------------------------------\n\/\/ main.go\n\/\/\n\/\/ Initializes and sets up Network Observer\n\/\/ for use.\n\/\/--------------------------------------------\n\npackage main\n\nimport (\n\t\"NetworkObserver\/configuration\"\n\t\"NetworkObserver\/logger\"\n\t\"NetworkObserver\/settings\"\n\t\"NetworkObserver\/web\"\n\t\"net\/http\"\n\t\"os\"\n)\n\n\/\/ Random default value\nvar portNumber string = \"5000\"\nvar loc string = settings.AppLocation\n\nfunc init() {\n\t\/\/ Check if the application directory exists, create it if it doesn't\n\tif _, err := os.Stat(loc); os.IsNotExist(err) {\n\t\tos.Mkdir(loc, 0600)\n\t}\n\n\tcl := loc + \"\/.cookies\"\n\tlogger.WriteString(\"Removing the existing .cookies file\")\n\tos.Remove(cl)\n\n\t\/\/ this is here because apparently auth's init() is called before this one is\n\tif _, err := os.Stat(cl); os.IsNotExist(err) {\n\t\tlogger.WriteString(\"Creating a new .cookies file.\")\n\t\tfile, _ := os.Create(cl)\n\t\tfile.Close()\n\t}\n\n\tif _, err := os.Stat(loc + \"\/.password\"); os.IsNotExist(err) {\n\t\tlogger.WriteString(\"The .password file does not exist. Creating a new .password file.\")\n\t\tfile, _ := os.Create(loc + \".password\")\n\t\tfile.Close()\n\t}\n\n\tlogger.WriteString(\"Setting the port number to \" + configuration.GetPortNumber())\n\tportNumber = \":\" + configuration.GetPortNumber()\n}\n\nfunc main() {\n\t\/\/ Base Pages\n\thttp.HandleFunc(\"\/\", web.Root)\n\thttp.HandleFunc(\"\/checkLogin\", web.CheckLogin)\n\thttp.HandleFunc(\"\/dashboard\", web.Dashboard)\n\thttp.HandleFunc(\"\/createaccount\", web.CreateAccount)\n\thttp.HandleFunc(\"\/account\", web.HandleAccount)\n\n\t\/\/ Handlers\n\thttp.HandleFunc(\"\/saveConfig\", web.SaveConfig)\n\thttp.HandleFunc(\"\/savetest\", web.SaveTest)\n\thttp.HandleFunc(\"\/teststarted\", web.TestStarted)\n\thttp.HandleFunc(\"\/logout\", web.Logout)\n\n\t\/\/ Dashboard Pages\n\thttp.HandleFunc(\"\/dashboard\/configure\", web.Configure)\n\thttp.HandleFunc(\"\/dashboard\/start_test\", web.StartTest)\n\thttp.HandleFunc(\"\/dashboard\/reports\", web.Reports)\n\n\t\/\/ Start the Webserver\n\thttp.ListenAndServe(portNumber, nil)\n\t\/\/ Enable SSL and HTTPS connections\n\t\/\/http.ListenAndServeTLS(portNumber, \"cert.pem\", \"key.pem\", nil)\n}\n<commit_msg>added more logging of .cookies and .password creating process<commit_after>\/\/--------------------------------------------\n\/\/ main.go\n\/\/\n\/\/ Initializes and sets up Network Observer\n\/\/ for use.\n\/\/--------------------------------------------\n\npackage main\n\nimport (\n\t\"NetworkObserver\/configuration\"\n\t\"NetworkObserver\/logger\"\n\t\"NetworkObserver\/settings\"\n\t\"NetworkObserver\/web\"\n\t\"net\/http\"\n\t\"os\"\n)\n\n\/\/ Random default value\nvar portNumber string = \"5000\"\nvar loc string = settings.AppLocation\n\nfunc init() {\n\t\/\/ Check if the application directory exists, create it if it doesn't\n\tif _, err := os.Stat(loc); os.IsNotExist(err) {\n\t\tos.Mkdir(loc, 0600)\n\t}\n\n\tcl := loc + \"\/.cookies\"\n\tlogger.WriteString(\"Removing the existing .cookies file\")\n\tos.Remove(cl)\n\n\t\/\/ this is here because apparently auth's init() is called before this one is\n\tif _, err := os.Stat(cl); os.IsNotExist(err) {\n\t\tlogger.WriteString(\"Creating a new .cookies file.\")\n\t\tfile, e := os.Create(cl)\n\t\tif e != nil {\n\t\t\tlogger.WriteString(\"There was an error creating the .cookies file.\")\n\t\t}\n\t\tfile.Close()\n\t}\n\n\tif _, err := os.Stat(loc + \"\/.password\"); os.IsNotExist(err) {\n\t\tlogger.WriteString(\"The .password file does not exist. Creating a new .password file.\")\n\t\tfile, e := os.Create(loc + \".password\")\n\t\tif e != nil {\n\t\t\tlogger.WriteString(\"There was an error creating the .password file.\")\n\t\t}\n\t\tfile.Close()\n\t}\n\n\tlogger.WriteString(\"Setting the port number to \" + configuration.GetPortNumber())\n\tportNumber = \":\" + configuration.GetPortNumber()\n}\n\nfunc main() {\n\t\/\/ Base Pages\n\thttp.HandleFunc(\"\/\", web.Root)\n\thttp.HandleFunc(\"\/checkLogin\", web.CheckLogin)\n\thttp.HandleFunc(\"\/dashboard\", web.Dashboard)\n\thttp.HandleFunc(\"\/createaccount\", web.CreateAccount)\n\thttp.HandleFunc(\"\/account\", web.HandleAccount)\n\n\t\/\/ Handlers\n\thttp.HandleFunc(\"\/saveConfig\", web.SaveConfig)\n\thttp.HandleFunc(\"\/savetest\", web.SaveTest)\n\thttp.HandleFunc(\"\/teststarted\", web.TestStarted)\n\thttp.HandleFunc(\"\/logout\", web.Logout)\n\n\t\/\/ Dashboard Pages\n\thttp.HandleFunc(\"\/dashboard\/configure\", web.Configure)\n\thttp.HandleFunc(\"\/dashboard\/start_test\", web.StartTest)\n\thttp.HandleFunc(\"\/dashboard\/reports\", web.Reports)\n\n\t\/\/ Start the Webserver\n\thttp.ListenAndServe(portNumber, nil)\n\t\/\/ Enable SSL and HTTPS connections\n\t\/\/http.ListenAndServeTLS(portNumber, \"cert.pem\", \"key.pem\", nil)\n}\n<|endoftext|>"} {"text":"<commit_before>\/* This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/. *\/\n\npackage monetdb\n\nimport (\n\t\"bytes\"\n\t\"crypto\"\n\t_ \"crypto\/md5\"\n\t_ \"crypto\/sha1\"\n\t_ \"crypto\/sha512\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"hash\"\n\t\"io\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\tmapi_MAX_PACKAGE_LENGTH = (1024 * 8) - 2\n\n\tmapi_MSG_PROMPT = \"\"\n\tmapi_MSG_INFO = \"#\"\n\tmapi_MSG_ERROR = \"!\"\n\tmapi_MSG_Q = \"&\"\n\tmapi_MSG_QTABLE = \"&1\"\n\tmapi_MSG_QUPDATE = \"&2\"\n\tmapi_MSG_QSCHEMA = \"&3\"\n\tmapi_MSG_QTRANS = \"&4\"\n\tmapi_MSG_QPREPARE = \"&5\"\n\tmapi_MSG_QBLOCK = \"&6\"\n\tmapi_MSG_HEADER = \"%\"\n\tmapi_MSG_TUPLE = \"[\"\n\tmapi_MSG_REDIRECT = \"^\"\n\tmapi_MSG_OK = \"=OK\"\n\n\tMAPI_STATE_INIT = 0\n\tMAPI_STATE_READY = 1\n)\n\nvar (\n\tmapi_MSG_MORE = string([]byte{1, 2, 10})\n)\n\ntype MapiConn struct {\n\tHostname string\n\tPort int\n\tUsername string\n\tPassword string\n\tDatabase string\n\tLanguage string\n\n\tState int\n\n\tconn *net.TCPConn\n}\n\nfunc NewMapi(hostname string, port int, username, password, database, language string) *MapiConn {\n\treturn &MapiConn{\n\t\tHostname: hostname,\n\t\tPort: port,\n\t\tUsername: username,\n\t\tPassword: password,\n\t\tDatabase: database,\n\t\tLanguage: language,\n\n\t\tState: MAPI_STATE_INIT,\n\t}\n}\n\nfunc (c *MapiConn) Disconnect() {\n\tc.State = MAPI_STATE_INIT\n\tc.conn.Close()\n\tc.conn = nil\n}\n\nfunc (c *MapiConn) Cmd(operation string) (string, error) {\n\tif c.State != MAPI_STATE_READY {\n\t\treturn \"\", fmt.Errorf(\"Database not connected\")\n\t}\n\n\tif err := c.putBlock([]byte(operation)); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tr, err := c.getBlock()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tresp := string(r)\n\tif len(resp) == 0 {\n\t\treturn \"\", nil\n\n\t} else if strings.HasPrefix(resp, mapi_MSG_OK) {\n\t\treturn strings.TrimSpace(resp[3:]), nil\n\n\t} else if resp == mapi_MSG_MORE {\n\t\t\/\/ tell server it isn't going to get more\n\t\treturn c.Cmd(\"\")\n\n\t} else if strings.HasPrefix(resp, mapi_MSG_Q) || strings.HasPrefix(resp, mapi_MSG_HEADER) || strings.HasPrefix(resp, mapi_MSG_TUPLE) {\n\t\treturn resp, nil\n\n\t} else if strings.HasPrefix(resp, mapi_MSG_ERROR) {\n\t\treturn \"\", fmt.Errorf(\"Operational error: %s\", resp[1:])\n\n\t} else {\n\t\treturn \"\", fmt.Errorf(\"Unknown state: %s\", resp)\n\t}\n}\n\nfunc (c *MapiConn) Connect() error {\n\tif c.conn != nil {\n\t\tc.conn.Close()\n\t\tc.conn = nil\n\t}\n\n\taddr := fmt.Sprintf(\"%s:%d\", c.Hostname, c.Port)\n\traddr, err := net.ResolveTCPAddr(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconn, err := net.DialTCP(\"tcp\", nil, raddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconn.SetKeepAlive(false)\n\tconn.SetNoDelay(true)\n\tc.conn = conn\n\n\terr = c.login()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *MapiConn) login() error {\n\treturn c.tryLogin(0)\n}\n\nfunc (c *MapiConn) tryLogin(iteration int) error {\n\tchallenge, err := c.getBlock()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresponse, err := c.challengeResponse(challenge)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.putBlock([]byte(response))\n\n\tbprompt, err := c.getBlock()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tprompt := strings.TrimSpace(string(bprompt))\n\tif len(prompt) == 0 {\n\t\t\/\/ Empty response, server is happy\n\n\t} else if prompt == mapi_MSG_OK {\n\t\t\/\/ pass\n\n\t} else if strings.HasPrefix(prompt, mapi_MSG_INFO) {\n\t\t\/\/ TODO log info\n\n\t} else if strings.HasPrefix(prompt, mapi_MSG_ERROR) {\n\t\t\/\/ TODO log error\n\t\treturn fmt.Errorf(\"Database error: %s\", prompt[1:])\n\n\t} else if strings.HasPrefix(prompt, mapi_MSG_REDIRECT) {\n\t\tt := strings.Split(prompt, \" \")\n\t\tr := strings.Split(t[0][1:], \":\")\n\n\t\tif r[1] == \"merovingian\" {\n\t\t\t\/\/ restart auth\n\t\t\tif iteration <= 10 {\n\t\t\t\tc.tryLogin(iteration + 1)\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"Maximal number of redirects reached (10)\")\n\t\t\t}\n\n\t\t} else if r[1] == \"monetdb\" {\n\t\t\tc.Hostname = r[2][2:]\n\t\t\tt = strings.Split(r[3], \"\/\")\n\t\t\tport, _ := strconv.ParseInt(t[0], 10, 32)\n\t\t\tc.Port = int(port)\n\t\t\tc.Database = t[1]\n\t\t\tc.conn.Close()\n\t\t\tc.Connect()\n\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"Unknown redirect: %s\", prompt)\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"Unknown state: %s\", prompt)\n\t}\n\n\tc.State = MAPI_STATE_READY\n\n\treturn nil\n}\n\nfunc (c *MapiConn) challengeResponse(challenge []byte) (string, error) {\n\tt := strings.Split(string(challenge), \":\")\n\tsalt := t[0]\n\tprotocol := t[2]\n\thashes := t[3]\n\talgo := t[5]\n\n\tif protocol != \"9\" {\n\t\treturn \"\", fmt.Errorf(\"We only speak protocol v9\")\n\t}\n\n\tvar h hash.Hash\n\tif algo == \"SHA512\" {\n\t\th = crypto.SHA512.New()\n\t} else {\n\t\t\/\/ TODO support more algorithm\n\t\treturn \"\", fmt.Errorf(\"Unsupported algorithm: %s\", algo)\n\t}\n\tio.WriteString(h, c.Password)\n\tp := fmt.Sprintf(\"%x\", h.Sum(nil))\n\n\tshashes := \",\" + hashes + \",\"\n\tvar pwhash string\n\tif strings.Contains(shashes, \",SHA1,\") {\n\t\th = crypto.SHA1.New()\n\t\tio.WriteString(h, p)\n\t\tio.WriteString(h, salt)\n\t\tpwhash = fmt.Sprintf(\"{SHA1}%x\", h.Sum(nil))\n\n\t} else if strings.Contains(shashes, \",MD5,\") {\n\t\th = crypto.MD5.New()\n\t\tio.WriteString(h, p)\n\t\tio.WriteString(h, salt)\n\t\tpwhash = fmt.Sprintf(\"{MD5}%x\", h.Sum(nil))\n\n\t} else {\n\t\treturn \"\", fmt.Errorf(\"Unsupported hash algorithm required for login %s\", hashes)\n\t}\n\n\tr := fmt.Sprintf(\"BIG:%s:%s:%s:%s:\", c.Username, pwhash, c.Language, c.Database)\n\treturn r, nil\n}\n\nfunc (c *MapiConn) getBlock() ([]byte, error) {\n\tr := new(bytes.Buffer)\n\n\tlast := 0\n\tfor last != 1 {\n\t\tflag, err := c.getBytes(2)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar unpacked uint16\n\t\tbuf := bytes.NewBuffer(flag)\n\t\terr = binary.Read(buf, binary.LittleEndian, &unpacked)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tlength := unpacked >> 1\n\t\tlast = int(unpacked & 1)\n\n\t\td, err := c.getBytes(int(length))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tr.Write(d)\n\t}\n\n\treturn r.Bytes(), nil\n}\n\nfunc (c *MapiConn) getBytes(count int) ([]byte, error) {\n\tr := make([]byte, count)\n\tb := make([]byte, count)\n\n\tread := 0\n\tfor read < count {\n\t\tn, err := c.conn.Read(b)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcopy(r[read:], b[:n])\n\t\tread += n\n\t}\n\n\treturn r, nil\n}\n\nfunc (c *MapiConn) putBlock(b []byte) error {\n\tpos := 0\n\tlast := 0\n\tfor last != 1 {\n\t\tend := pos + mapi_MAX_PACKAGE_LENGTH\n\t\tif end > len(b) {\n\t\t\tend = len(b)\n\t\t}\n\t\tdata := b[pos:end]\n\t\tlength := len(data)\n\t\tif length < mapi_MAX_PACKAGE_LENGTH {\n\t\t\tlast = 1\n\t\t}\n\n\t\tvar packed uint16\n\t\tpacked = uint16((length << 1) + last)\n\t\tflag := new(bytes.Buffer)\n\t\tbinary.Write(flag, binary.LittleEndian, packed)\n\n\t\tif _, err := c.conn.Write(flag.Bytes()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := c.conn.Write(data); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpos += length\n\t}\n\n\treturn nil\n}\n<commit_msg>Ignore a disconnected MAPI connection when Disconnect is called again<commit_after>\/* This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/. *\/\n\npackage monetdb\n\nimport (\n\t\"bytes\"\n\t\"crypto\"\n\t_ \"crypto\/md5\"\n\t_ \"crypto\/sha1\"\n\t_ \"crypto\/sha512\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"hash\"\n\t\"io\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\tmapi_MAX_PACKAGE_LENGTH = (1024 * 8) - 2\n\n\tmapi_MSG_PROMPT = \"\"\n\tmapi_MSG_INFO = \"#\"\n\tmapi_MSG_ERROR = \"!\"\n\tmapi_MSG_Q = \"&\"\n\tmapi_MSG_QTABLE = \"&1\"\n\tmapi_MSG_QUPDATE = \"&2\"\n\tmapi_MSG_QSCHEMA = \"&3\"\n\tmapi_MSG_QTRANS = \"&4\"\n\tmapi_MSG_QPREPARE = \"&5\"\n\tmapi_MSG_QBLOCK = \"&6\"\n\tmapi_MSG_HEADER = \"%\"\n\tmapi_MSG_TUPLE = \"[\"\n\tmapi_MSG_REDIRECT = \"^\"\n\tmapi_MSG_OK = \"=OK\"\n\n\tMAPI_STATE_INIT = 0\n\tMAPI_STATE_READY = 1\n)\n\nvar (\n\tmapi_MSG_MORE = string([]byte{1, 2, 10})\n)\n\ntype MapiConn struct {\n\tHostname string\n\tPort int\n\tUsername string\n\tPassword string\n\tDatabase string\n\tLanguage string\n\n\tState int\n\n\tconn *net.TCPConn\n}\n\nfunc NewMapi(hostname string, port int, username, password, database, language string) *MapiConn {\n\treturn &MapiConn{\n\t\tHostname: hostname,\n\t\tPort: port,\n\t\tUsername: username,\n\t\tPassword: password,\n\t\tDatabase: database,\n\t\tLanguage: language,\n\n\t\tState: MAPI_STATE_INIT,\n\t}\n}\n\nfunc (c *MapiConn) Disconnect() {\n\tc.State = MAPI_STATE_INIT\n\tif c.conn != nil {\n\t\tc.conn.Close()\n\t\tc.conn = nil\n\t}\n}\n\nfunc (c *MapiConn) Cmd(operation string) (string, error) {\n\tif c.State != MAPI_STATE_READY {\n\t\treturn \"\", fmt.Errorf(\"Database not connected\")\n\t}\n\n\tif err := c.putBlock([]byte(operation)); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tr, err := c.getBlock()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tresp := string(r)\n\tif len(resp) == 0 {\n\t\treturn \"\", nil\n\n\t} else if strings.HasPrefix(resp, mapi_MSG_OK) {\n\t\treturn strings.TrimSpace(resp[3:]), nil\n\n\t} else if resp == mapi_MSG_MORE {\n\t\t\/\/ tell server it isn't going to get more\n\t\treturn c.Cmd(\"\")\n\n\t} else if strings.HasPrefix(resp, mapi_MSG_Q) || strings.HasPrefix(resp, mapi_MSG_HEADER) || strings.HasPrefix(resp, mapi_MSG_TUPLE) {\n\t\treturn resp, nil\n\n\t} else if strings.HasPrefix(resp, mapi_MSG_ERROR) {\n\t\treturn \"\", fmt.Errorf(\"Operational error: %s\", resp[1:])\n\n\t} else {\n\t\treturn \"\", fmt.Errorf(\"Unknown state: %s\", resp)\n\t}\n}\n\nfunc (c *MapiConn) Connect() error {\n\tif c.conn != nil {\n\t\tc.conn.Close()\n\t\tc.conn = nil\n\t}\n\n\taddr := fmt.Sprintf(\"%s:%d\", c.Hostname, c.Port)\n\traddr, err := net.ResolveTCPAddr(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconn, err := net.DialTCP(\"tcp\", nil, raddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconn.SetKeepAlive(false)\n\tconn.SetNoDelay(true)\n\tc.conn = conn\n\n\terr = c.login()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *MapiConn) login() error {\n\treturn c.tryLogin(0)\n}\n\nfunc (c *MapiConn) tryLogin(iteration int) error {\n\tchallenge, err := c.getBlock()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresponse, err := c.challengeResponse(challenge)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.putBlock([]byte(response))\n\n\tbprompt, err := c.getBlock()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tprompt := strings.TrimSpace(string(bprompt))\n\tif len(prompt) == 0 {\n\t\t\/\/ Empty response, server is happy\n\n\t} else if prompt == mapi_MSG_OK {\n\t\t\/\/ pass\n\n\t} else if strings.HasPrefix(prompt, mapi_MSG_INFO) {\n\t\t\/\/ TODO log info\n\n\t} else if strings.HasPrefix(prompt, mapi_MSG_ERROR) {\n\t\t\/\/ TODO log error\n\t\treturn fmt.Errorf(\"Database error: %s\", prompt[1:])\n\n\t} else if strings.HasPrefix(prompt, mapi_MSG_REDIRECT) {\n\t\tt := strings.Split(prompt, \" \")\n\t\tr := strings.Split(t[0][1:], \":\")\n\n\t\tif r[1] == \"merovingian\" {\n\t\t\t\/\/ restart auth\n\t\t\tif iteration <= 10 {\n\t\t\t\tc.tryLogin(iteration + 1)\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"Maximal number of redirects reached (10)\")\n\t\t\t}\n\n\t\t} else if r[1] == \"monetdb\" {\n\t\t\tc.Hostname = r[2][2:]\n\t\t\tt = strings.Split(r[3], \"\/\")\n\t\t\tport, _ := strconv.ParseInt(t[0], 10, 32)\n\t\t\tc.Port = int(port)\n\t\t\tc.Database = t[1]\n\t\t\tc.conn.Close()\n\t\t\tc.Connect()\n\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"Unknown redirect: %s\", prompt)\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"Unknown state: %s\", prompt)\n\t}\n\n\tc.State = MAPI_STATE_READY\n\n\treturn nil\n}\n\nfunc (c *MapiConn) challengeResponse(challenge []byte) (string, error) {\n\tt := strings.Split(string(challenge), \":\")\n\tsalt := t[0]\n\tprotocol := t[2]\n\thashes := t[3]\n\talgo := t[5]\n\n\tif protocol != \"9\" {\n\t\treturn \"\", fmt.Errorf(\"We only speak protocol v9\")\n\t}\n\n\tvar h hash.Hash\n\tif algo == \"SHA512\" {\n\t\th = crypto.SHA512.New()\n\t} else {\n\t\t\/\/ TODO support more algorithm\n\t\treturn \"\", fmt.Errorf(\"Unsupported algorithm: %s\", algo)\n\t}\n\tio.WriteString(h, c.Password)\n\tp := fmt.Sprintf(\"%x\", h.Sum(nil))\n\n\tshashes := \",\" + hashes + \",\"\n\tvar pwhash string\n\tif strings.Contains(shashes, \",SHA1,\") {\n\t\th = crypto.SHA1.New()\n\t\tio.WriteString(h, p)\n\t\tio.WriteString(h, salt)\n\t\tpwhash = fmt.Sprintf(\"{SHA1}%x\", h.Sum(nil))\n\n\t} else if strings.Contains(shashes, \",MD5,\") {\n\t\th = crypto.MD5.New()\n\t\tio.WriteString(h, p)\n\t\tio.WriteString(h, salt)\n\t\tpwhash = fmt.Sprintf(\"{MD5}%x\", h.Sum(nil))\n\n\t} else {\n\t\treturn \"\", fmt.Errorf(\"Unsupported hash algorithm required for login %s\", hashes)\n\t}\n\n\tr := fmt.Sprintf(\"BIG:%s:%s:%s:%s:\", c.Username, pwhash, c.Language, c.Database)\n\treturn r, nil\n}\n\nfunc (c *MapiConn) getBlock() ([]byte, error) {\n\tr := new(bytes.Buffer)\n\n\tlast := 0\n\tfor last != 1 {\n\t\tflag, err := c.getBytes(2)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar unpacked uint16\n\t\tbuf := bytes.NewBuffer(flag)\n\t\terr = binary.Read(buf, binary.LittleEndian, &unpacked)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tlength := unpacked >> 1\n\t\tlast = int(unpacked & 1)\n\n\t\td, err := c.getBytes(int(length))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tr.Write(d)\n\t}\n\n\treturn r.Bytes(), nil\n}\n\nfunc (c *MapiConn) getBytes(count int) ([]byte, error) {\n\tr := make([]byte, count)\n\tb := make([]byte, count)\n\n\tread := 0\n\tfor read < count {\n\t\tn, err := c.conn.Read(b)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcopy(r[read:], b[:n])\n\t\tread += n\n\t}\n\n\treturn r, nil\n}\n\nfunc (c *MapiConn) putBlock(b []byte) error {\n\tpos := 0\n\tlast := 0\n\tfor last != 1 {\n\t\tend := pos + mapi_MAX_PACKAGE_LENGTH\n\t\tif end > len(b) {\n\t\t\tend = len(b)\n\t\t}\n\t\tdata := b[pos:end]\n\t\tlength := len(data)\n\t\tif length < mapi_MAX_PACKAGE_LENGTH {\n\t\t\tlast = 1\n\t\t}\n\n\t\tvar packed uint16\n\t\tpacked = uint16((length << 1) + last)\n\t\tflag := new(bytes.Buffer)\n\t\tbinary.Write(flag, binary.LittleEndian, packed)\n\n\t\tif _, err := c.conn.Write(flag.Bytes()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := c.conn.Write(data); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpos += length\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package mdns\n\n\/\/ Advertise network services via multicast DNS\n\nimport (\n\t\"log\"\n\t\"net\"\n\n\t\"github.com\/davecheney\/gmx\"\n\t\"github.com\/miekg\/dns\"\n)\n\nvar (\n\tipv4mcastaddr = &net.UDPAddr{\n\t\tIP: net.ParseIP(\"224.0.0.251\"),\n\t\tPort: 5353,\n\t}\n\n\tipv6mcastaddr = &net.UDPAddr{\n\t\tIP: net.ParseIP(\"ff02::fb\"),\n\t\tPort: 5353,\n\t}\n\tlocal *zone \/\/ the local mdns zone\n)\n\nfunc init() {\n\tlocal = &zone{\n\t\tentries: make(map[string]entries),\n\t\tadd: make(chan *entry),\n\t\tqueries: make(chan *query, 16),\n\t}\n\tgo local.mainloop()\n\tif err := local.listen(ipv4mcastaddr); err != nil {\n\t\tlog.Fatalf(\"Failed to listen %s: %s\", ipv4mcastaddr, err)\n\t}\n\tif err := local.listen(ipv6mcastaddr); err != nil {\n\t\tlog.Printf(\"Failed to listen %s: %s\", ipv6mcastaddr, err)\n\t}\n\n\t\/\/ publish gmx stats for the local zone\n\tgmx.Publish(\"mdns.zone.local.queries\", func() interface{} {\n\t\treturn local.queryCount\n\t})\n\tgmx.Publish(\"mdns.zone.local.entries\", func() interface{} {\n\t\treturn local.entryCount\n\t})\n\n}\n\n\/\/ Publish adds a record, described in RFC XXX \nfunc Publish(r string) error {\n\trr, err := dns.NewRR(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlocal.add <- &entry{rr}\n\treturn nil\n}\n\ntype entry struct {\n\tdns.RR\n}\n\nfunc (e *entry) fqdn() string {\n\treturn e.Header().Name\n}\n\ntype query struct {\n\tdns.Question\n\tresult chan *entry\n}\n\ntype entries []*entry\n\nfunc (e entries) contains(entry *entry) bool {\n\tfor _, ee := range e {\n\t\tif equals(entry, ee) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\ntype zone struct {\n\tentries map[string]entries\n\tadd chan *entry \/\/ add entries to zone\n\tqueries chan *query \/\/ query exsting entries in zone\n\n\t\/\/ gmx statistics\n\tqueryCount, entryCount int\n}\n\nfunc (z *zone) mainloop() {\n\tfor {\n\t\tselect {\n\t\tcase entry := <-z.add:\n\t\t\tz.entryCount++\n\t\t\tif !z.entries[entry.fqdn()].contains(entry) {\n\t\t\t\tz.entries[entry.fqdn()] = append(z.entries[entry.fqdn()], entry)\n\t\t\t}\n\t\tcase q := <-z.queries:\n\t\t\tz.queryCount++\n\t\t\tfor _, entry := range z.entries[q.Question.Name] {\n\t\t\t\tif q.matches(entry) {\n\t\t\t\t\tq.result <- entry\n\t\t\t\t}\n\t\t\t}\n\t\t\tclose(q.result)\n\t\t}\n\t}\n}\n\nfunc (z *zone) query(q dns.Question) (entries []*entry) {\n\tres := make(chan *entry, 16)\n\tz.queries <- &query{q, res}\n\tfor e := range res {\n\t\tentries = append(entries, e)\n\t}\n\treturn\n}\n\nfunc (q *query) matches(entry *entry) bool {\n\treturn q.Question.Qtype == dns.TypeANY || q.Question.Qtype == entry.RR.Header().Rrtype\n}\n\nfunc equals(this, that *entry) bool {\n\tif _, ok := this.RR.(*dns.RR_ANY); ok {\n\t\treturn true \/\/ *RR_ANY matches anything\n\t}\n\tif _, ok := that.RR.(*dns.RR_ANY); ok {\n\t\treturn true \/\/ *RR_ANY matches all\n\t}\n\treturn false\n}\n\ntype connector struct {\n\t*net.UDPAddr\n\t*net.UDPConn\n\t*zone\n\n\t\/\/ gmx statistics\n\tquestions, responses int\n}\n\nfunc (z *zone) listen(addr *net.UDPAddr) error {\n\tconn, err := openSocket(addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc := &connector{\n\t\tUDPAddr: addr,\n\t\tUDPConn: conn,\n\t\tzone: z,\n\t}\n\tgo c.mainloop()\n\n\t\/\/ publish gmx stats for the connector\n\tgmx.Publish(\"mdns.connector.questions\", func() interface{} {\n\t\treturn c.questions\n\t})\n\tgmx.Publish(\"mdns.connector.responses\", func() interface{} {\n\t\treturn c.responses\n\t})\n\n\treturn nil\n}\n\nfunc openSocket(addr *net.UDPAddr) (*net.UDPConn, error) {\n\tswitch addr.IP.To4() {\n\tcase nil:\n\t\treturn net.ListenMulticastUDP(\"udp6\", nil, ipv6mcastaddr)\n\tdefault:\n\t\treturn net.ListenMulticastUDP(\"udp4\", nil, ipv4mcastaddr)\n\t}\n\tpanic(\"unreachable\")\n}\n\ntype pkt struct {\n\t*dns.Msg\n\t*net.UDPAddr\n}\n\nfunc (c *connector) readloop(in chan pkt) {\n\tfor {\n\t\tmsg, addr, err := c.readMessage()\n\t\tif err != nil {\n\t\t\t\/\/ log dud packets\n\t\t\tlog.Printf(\"Cound not read from %s: %s\", c.UDPConn, err)\n\t\t}\n\t\tif len(msg.Question) > 0 {\n\t\t\tc.questions++\n\t\t\tin <- pkt{msg, addr}\n\t\t}\n\t}\n}\n\nfunc (c *connector) mainloop() {\n\tin := make(chan pkt, 32)\n\tgo c.readloop(in)\n\tfor {\n\t\tmsg := <-in\n\t\tmsg.MsgHdr.Response = true \/\/ convert question to response\n\t\tfor _, result := range c.query(msg.Question) {\n\t\t\tmsg.Answer = append(msg.Answer, result.RR)\n\t\t}\n\t\tmsg.Extra = append(msg.Extra, c.findExtra(msg.Answer...)...)\n\t\tif len(msg.Answer) > 0 {\n\t\t\tc.responses++\n\t\t\t\/\/ nuke questions\n\t\t\tmsg.Question = nil\n\t\t\tif err := c.writeMessage(msg.Msg); err != nil {\n\t\t\t\tlog.Fatalf(\"Cannot send: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *connector) query(qs []dns.Question) (results []*entry) {\n\tfor _, q := range qs {\n\t\tresults = append(results, c.zone.query(q)...)\n\t}\n\treturn\n}\n\n\/\/ recursively probe for related records\nfunc (c *connector) findExtra(r ...dns.RR) (extra []dns.RR) {\n\tfor _, rr := range r {\n\t\tvar q dns.Question\n\t\tswitch rr := rr.(type) {\n\t\tcase *dns.RR_PTR:\n\t\t\tq = dns.Question{\n\t\t\t\tName: rr.Ptr,\n\t\t\t\tQtype: dns.TypeANY,\n\t\t\t\tQclass: dns.ClassINET,\n\t\t\t}\n\t\tcase *dns.RR_SRV:\n\t\t\tq = dns.Question{\n\t\t\t\tName: rr.Target,\n\t\t\t\tQtype: dns.TypeA,\n\t\t\t\tQclass: dns.ClassINET,\n\t\t\t}\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t\tres := c.zone.query(q)\n\t\tif len(res) > 0 {\n\t\t\tfor _, entry := range res {\n\t\t\t\textra = append(append(extra, entry.RR), c.findExtra(entry.RR)...)\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ encode an mdns msg and broadcast it on the wire\nfunc (c *connector) writeMessage(msg *dns.Msg) error {\n\tbuf, err := msg.Pack()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.WriteToUDP(buf, c.UDPAddr)\n\treturn err\n}\n\n\/\/ consume an mdns packet from the wire and decode it\nfunc (c *connector) readMessage() (*dns.Msg, *net.UDPAddr, error) {\n\tbuf := make([]byte, 1500)\n\tread, addr, err := c.ReadFromUDP(buf)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tvar msg dns.Msg\n\tif err := msg.Unpack(buf[:read]); err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn &msg, addr, nil\n}\n<commit_msg>compile against latest miekg\/mdns<commit_after>package mdns\n\n\/\/ Advertise network services via multicast DNS\n\nimport (\n\t\"log\"\n\t\"net\"\n\n\t\"github.com\/davecheney\/gmx\"\n\t\"github.com\/miekg\/dns\"\n)\n\nvar (\n\tipv4mcastaddr = &net.UDPAddr{\n\t\tIP: net.ParseIP(\"224.0.0.251\"),\n\t\tPort: 5353,\n\t}\n\n\tipv6mcastaddr = &net.UDPAddr{\n\t\tIP: net.ParseIP(\"ff02::fb\"),\n\t\tPort: 5353,\n\t}\n\tlocal *zone \/\/ the local mdns zone\n)\n\nfunc init() {\n\tlocal = &zone{\n\t\tentries: make(map[string]entries),\n\t\tadd: make(chan *entry),\n\t\tqueries: make(chan *query, 16),\n\t}\n\tgo local.mainloop()\n\tif err := local.listen(ipv4mcastaddr); err != nil {\n\t\tlog.Fatalf(\"Failed to listen %s: %s\", ipv4mcastaddr, err)\n\t}\n\tif err := local.listen(ipv6mcastaddr); err != nil {\n\t\tlog.Printf(\"Failed to listen %s: %s\", ipv6mcastaddr, err)\n\t}\n\n\t\/\/ publish gmx stats for the local zone\n\tgmx.Publish(\"mdns.zone.local.queries\", func() interface{} {\n\t\treturn local.queryCount\n\t})\n\tgmx.Publish(\"mdns.zone.local.entries\", func() interface{} {\n\t\treturn local.entryCount\n\t})\n\n}\n\n\/\/ Publish adds a record, described in RFC XXX \nfunc Publish(r string) error {\n\trr, err := dns.NewRR(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlocal.add <- &entry{rr}\n\treturn nil\n}\n\ntype entry struct {\n\tdns.RR\n}\n\nfunc (e *entry) fqdn() string {\n\treturn e.Header().Name\n}\n\ntype query struct {\n\tdns.Question\n\tresult chan *entry\n}\n\ntype entries []*entry\n\nfunc (e entries) contains(entry *entry) bool {\n\tfor _, ee := range e {\n\t\tif equals(entry, ee) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\ntype zone struct {\n\tentries map[string]entries\n\tadd chan *entry \/\/ add entries to zone\n\tqueries chan *query \/\/ query exsting entries in zone\n\n\t\/\/ gmx statistics\n\tqueryCount, entryCount int\n}\n\nfunc (z *zone) mainloop() {\n\tfor {\n\t\tselect {\n\t\tcase entry := <-z.add:\n\t\t\tz.entryCount++\n\t\t\tif !z.entries[entry.fqdn()].contains(entry) {\n\t\t\t\tz.entries[entry.fqdn()] = append(z.entries[entry.fqdn()], entry)\n\t\t\t}\n\t\tcase q := <-z.queries:\n\t\t\tz.queryCount++\n\t\t\tfor _, entry := range z.entries[q.Question.Name] {\n\t\t\t\tif q.matches(entry) {\n\t\t\t\t\tq.result <- entry\n\t\t\t\t}\n\t\t\t}\n\t\t\tclose(q.result)\n\t\t}\n\t}\n}\n\nfunc (z *zone) query(q dns.Question) (entries []*entry) {\n\tres := make(chan *entry, 16)\n\tz.queries <- &query{q, res}\n\tfor e := range res {\n\t\tentries = append(entries, e)\n\t}\n\treturn\n}\n\nfunc (q *query) matches(entry *entry) bool {\n\treturn q.Question.Qtype == dns.TypeANY || q.Question.Qtype == entry.RR.Header().Rrtype\n}\n\nfunc equals(this, that *entry) bool {\n\tif _, ok := this.RR.(*dns.ANY); ok {\n\t\treturn true \/\/ *ANY matches anything\n\t}\n\tif _, ok := that.RR.(*dns.ANY); ok {\n\t\treturn true \/\/ *ANY matches all\n\t}\n\treturn false\n}\n\ntype connector struct {\n\t*net.UDPAddr\n\t*net.UDPConn\n\t*zone\n\n\t\/\/ gmx statistics\n\tquestions, responses int\n}\n\nfunc (z *zone) listen(addr *net.UDPAddr) error {\n\tconn, err := openSocket(addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc := &connector{\n\t\tUDPAddr: addr,\n\t\tUDPConn: conn,\n\t\tzone: z,\n\t}\n\tgo c.mainloop()\n\n\t\/\/ publish gmx stats for the connector\n\tgmx.Publish(\"mdns.connector.questions\", func() interface{} {\n\t\treturn c.questions\n\t})\n\tgmx.Publish(\"mdns.connector.responses\", func() interface{} {\n\t\treturn c.responses\n\t})\n\n\treturn nil\n}\n\nfunc openSocket(addr *net.UDPAddr) (*net.UDPConn, error) {\n\tswitch addr.IP.To4() {\n\tcase nil:\n\t\treturn net.ListenMulticastUDP(\"udp6\", nil, ipv6mcastaddr)\n\tdefault:\n\t\treturn net.ListenMulticastUDP(\"udp4\", nil, ipv4mcastaddr)\n\t}\n\tpanic(\"unreachable\")\n}\n\ntype pkt struct {\n\t*dns.Msg\n\t*net.UDPAddr\n}\n\nfunc (c *connector) readloop(in chan pkt) {\n\tfor {\n\t\tmsg, addr, err := c.readMessage()\n\t\tif err != nil {\n\t\t\t\/\/ log dud packets\n\t\t\tlog.Printf(\"Cound not read from %s: %s\", c.UDPConn, err)\n\t\t}\n\t\tif len(msg.Question) > 0 {\n\t\t\tc.questions++\n\t\t\tin <- pkt{msg, addr}\n\t\t}\n\t}\n}\n\nfunc (c *connector) mainloop() {\n\tin := make(chan pkt, 32)\n\tgo c.readloop(in)\n\tfor {\n\t\tmsg := <-in\n\t\tmsg.MsgHdr.Response = true \/\/ convert question to response\n\t\tfor _, result := range c.query(msg.Question) {\n\t\t\tmsg.Answer = append(msg.Answer, result.RR)\n\t\t}\n\t\tmsg.Extra = append(msg.Extra, c.findExtra(msg.Answer...)...)\n\t\tif len(msg.Answer) > 0 {\n\t\t\tc.responses++\n\t\t\t\/\/ nuke questions\n\t\t\tmsg.Question = nil\n\t\t\tif err := c.writeMessage(msg.Msg); err != nil {\n\t\t\t\tlog.Fatalf(\"Cannot send: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *connector) query(qs []dns.Question) (results []*entry) {\n\tfor _, q := range qs {\n\t\tresults = append(results, c.zone.query(q)...)\n\t}\n\treturn\n}\n\n\/\/ recursively probe for related records\nfunc (c *connector) findExtra(r ...dns.RR) (extra []dns.RR) {\n\tfor _, rr := range r {\n\t\tvar q dns.Question\n\t\tswitch rr := rr.(type) {\n\t\tcase *dns.PTR:\n\t\t\tq = dns.Question{\n\t\t\t\tName: rr.Ptr,\n\t\t\t\tQtype: dns.TypeANY,\n\t\t\t\tQclass: dns.ClassINET,\n\t\t\t}\n\t\tcase *dns.SRV:\n\t\t\tq = dns.Question{\n\t\t\t\tName: rr.Target,\n\t\t\t\tQtype: dns.TypeA,\n\t\t\t\tQclass: dns.ClassINET,\n\t\t\t}\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t\tres := c.zone.query(q)\n\t\tif len(res) > 0 {\n\t\t\tfor _, entry := range res {\n\t\t\t\textra = append(append(extra, entry.RR), c.findExtra(entry.RR)...)\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ encode an mdns msg and broadcast it on the wire\nfunc (c *connector) writeMessage(msg *dns.Msg) error {\n\tbuf, err := msg.Pack()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.WriteToUDP(buf, c.UDPAddr)\n\treturn err\n}\n\n\/\/ consume an mdns packet from the wire and decode it\nfunc (c *connector) readMessage() (*dns.Msg, *net.UDPAddr, error) {\n\tbuf := make([]byte, 1500)\n\tread, addr, err := c.ReadFromUDP(buf)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tvar msg dns.Msg\n\tif err := msg.Unpack(buf[:read]); err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn &msg, addr, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 Lee Boynton\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage midi\n\nimport (\n\t\"github.com\/boynton\/ell\"\n\t\"github.com\/rakyll\/portmidi\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype Extension struct {\n}\n\nfunc (*Extension) Init() error {\n\tmidiPath := os.Getenv(\"GOPATH\") + \"\/src\/github.com\/boynton\/midi-ell\"\n\tell.AddEllDirectory(midiPath)\n\tmidiInit()\n\treturn ell.Load(\"midi\")\n}\n\nfunc (*Extension) Cleanup() {\n\tmidiClose(nil)\n}\n\nfunc (*Extension) String() string {\n\treturn \"midi\"\n}\n\nvar inputKey = ell.Intern(\"input:\")\nvar outputKey = ell.Intern(\"output:\")\nvar bufsizeKey = ell.Intern(\"bufsize:\")\n\nfunc midiInit() error {\n\tell.DefineFunctionKeyArgs(\"midi-open\", midiOpen, ell.NullType,\n\t\t[]*ell.Object{ell.StringType, ell.StringType, ell.NumberType},\n\t\t[]*ell.Object{ell.EmptyString, ell.EmptyString, ell.Number(1024)},\n\t\t[]*ell.Object{inputKey, outputKey, bufsizeKey})\n\tell.DefineFunction(\"midi-write\", midiWrite, ell.NullType, ell.NumberType, ell.NumberType, ell.NumberType)\n\tell.DefineFunction(\"midi-close\", midiClose, ell.NullType)\n\tell.DefineFunction(\"midi-listen\", midiListen, ell.ChannelType)\n\treturn nil\n}\n\nvar midiOpened = false\nvar midiInDevice string\nvar midiOutDevice string\nvar midiBufsize int64\nvar midiBaseTime float64\n\nvar midiOut *portmidi.Stream\nvar midiIn *portmidi.Stream\nvar midiChannel chan portmidi.Event\nvar midiMutex = &sync.Mutex{}\n\nfunc findMidiInputDevice(name string) portmidi.DeviceId {\n\tdevcount := portmidi.CountDevices()\n\tfor i := 0; i < devcount; i++ {\n\t\tid := portmidi.DeviceId(i)\n\t\tinfo := portmidi.GetDeviceInfo(id)\n\t\tif info.IsInputAvailable {\n\t\t\tif info.Name == name {\n\t\t\t\treturn id\n\t\t\t}\n\t\t}\n\t}\n\treturn portmidi.DeviceId(-1)\n}\n\nfunc findMidiOutputDevice(name string) (portmidi.DeviceId, string) {\n\tdevcount := portmidi.CountDevices()\n\tfor i := 0; i < devcount; i++ {\n\t\tid := portmidi.DeviceId(i)\n\t\tinfo := portmidi.GetDeviceInfo(id)\n\t\tif info.IsOutputAvailable {\n\t\t\tif info.Name == name {\n\t\t\t\treturn id, info.Name\n\t\t\t}\n\t\t}\n\t}\n\tid := portmidi.GetDefaultOutputDeviceId()\n\tinfo := portmidi.GetDeviceInfo(id)\n\treturn id, info.Name\n}\n\nfunc midiOpen(argv []*ell.Object) (*ell.Object, error) {\n\t\/\/\tdefaultInput := \"USB Oxygen 8 v2\"\n\t\/\/\tdefaultOutput := \"IAC Driver Bus 1\"\n\tlatency := int64(10)\n\tif !midiOpened {\n\t\terr := portmidi.Initialize()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmidiOpened = true\n\t\tmidiInDevice = ell.StringValue(argv[0])\n\t\tmidiOutDevice = ell.StringValue(argv[1])\n\t\tmidiBufsize = ell.Int64Value(argv[2])\n\n\t\toutdev, outname := findMidiOutputDevice(midiOutDevice)\n\t\tout, err := portmidi.NewOutputStream(outdev, midiBufsize, latency)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmidiOut = out\n\t\tmidiOutDevice = outname\n\t\tif midiInDevice != \"\" {\n\t\t\tindev := findMidiInputDevice(midiInDevice)\n\t\t\tif indev >= 0 {\n\t\t\t\tin, err := portmidi.NewInputStream(indev, midiBufsize)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tmidiIn = in\n\t\t\t}\n\t\t}\n\t\tmidiBaseTime = ell.Now()\n\n\t}\n\tresult := ell.MakeStruct(4)\n\tif midiInDevice != \"\" {\n\t\tell.Put(result, inputKey, ell.String(midiInDevice))\n\t}\n\tif midiOutDevice != \"\" {\n\t\tell.Put(result, outputKey, ell.String(midiOutDevice))\n\t}\n\tell.Put(result, bufsizeKey, ell.Number(float64(midiBufsize)))\n\treturn result, nil\n}\n\nfunc midiAllNotesOff() {\n\tmidiOut.WriteShort(0xB0, 0x7B, 0x00)\n}\n\nfunc midiClose(argv []*ell.Object) (*ell.Object, error) {\n\tmidiMutex.Lock()\n\tif midiOut != nil {\n\t\tmidiAllNotesOff()\n\t\tmidiOut.Close()\n\t\tmidiOut = nil\n\t}\n\tmidiMutex.Unlock()\n\tell.Sleep(0.5)\n\treturn ell.Null, nil\n}\n\n\/\/ (midi-write 144 60 80) -> middle C note on\n\/\/ (midi-write 128 60 0) -> middle C note off\nfunc midiWrite(argv []*ell.Object) (*ell.Object, error) {\n\tstatus := ell.Int64Value(argv[0])\n\tdata1 := ell.Int64Value(argv[1])\n\tdata2 := ell.Int64Value(argv[2])\n\tmidiMutex.Lock()\n\tvar err error\n\tif midiOut != nil {\n\t\terr = midiOut.WriteShort(status, data1, data2)\n\t}\n\tmidiMutex.Unlock()\n\treturn ell.Null, err\n}\n\nfunc midiListen(argv []*ell.Object) (*ell.Object, error) {\n\tch := ell.Null\n\tmidiMutex.Lock()\n\tif midiIn != nil {\n\t\tch = ell.Channel(int(midiBufsize), \"midi\")\n\t\tgo func(s *portmidi.Stream, ch *ell.Object) {\n\t\t\tfor {\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t\tevents, err := s.Read(1024)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tchannel := ell.ChannelValue(ch)\n\t\t\t\tif channel != nil {\n\t\t\t\t\tfor _, ev := range events {\n\t\t\t\t\t\tts := (float64(ev.Timestamp) \/ 1000) + midiBaseTime\n\t\t\t\t\t\tst := ev.Status\n\t\t\t\t\t\td1 := ev.Data1\n\t\t\t\t\t\td2 := ev.Data2\n\t\t\t\t\t\tchannel <- ell.List(ell.Number(ts), ell.Number(float64(st)), ell.Number(float64(d1)), ell.Number(float64(d2)))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}(midiIn, ch)\n\t}\n\tmidiMutex.Unlock()\n\treturn ch, nil\n}\n<commit_msg>updated to track portmidi changes<commit_after>\/*\nCopyright 2014 Lee Boynton\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage midi\n\nimport (\n\t\"github.com\/boynton\/ell\"\n\t\"github.com\/rakyll\/portmidi\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype Extension struct {\n}\n\nfunc (*Extension) Init() error {\n\tmidiPath := os.Getenv(\"GOPATH\") + \"\/src\/github.com\/boynton\/midi-ell\"\n\tell.AddEllDirectory(midiPath)\n\tmidiInit()\n\treturn ell.Load(\"midi\")\n}\n\nfunc (*Extension) Cleanup() {\n\tmidiClose(nil)\n}\n\nfunc (*Extension) String() string {\n\treturn \"midi\"\n}\n\nvar inputKey = ell.Intern(\"input:\")\nvar outputKey = ell.Intern(\"output:\")\nvar bufsizeKey = ell.Intern(\"bufsize:\")\n\nfunc midiInit() error {\n\tell.DefineFunctionKeyArgs(\"midi-open\", midiOpen, ell.NullType,\n\t\t[]*ell.Object{ell.StringType, ell.StringType, ell.NumberType},\n\t\t[]*ell.Object{ell.EmptyString, ell.EmptyString, ell.Number(1024)},\n\t\t[]*ell.Object{inputKey, outputKey, bufsizeKey})\n\tell.DefineFunction(\"midi-write\", midiWrite, ell.NullType, ell.NumberType, ell.NumberType, ell.NumberType)\n\tell.DefineFunction(\"midi-close\", midiClose, ell.NullType)\n\tell.DefineFunction(\"midi-listen\", midiListen, ell.ChannelType)\n\treturn nil\n}\n\nvar midiOpened = false\nvar midiInDevice string\nvar midiOutDevice string\nvar midiBufsize int64\nvar midiBaseTime float64\n\nvar midiOut *portmidi.Stream\nvar midiIn *portmidi.Stream\nvar midiChannel chan portmidi.Event\nvar midiMutex = &sync.Mutex{}\n\nfunc findMidiInputDevice(name string) portmidi.DeviceID {\n\tdevcount := portmidi.CountDevices()\n\tfor i := 0; i < devcount; i++ {\n\t\tid := portmidi.DeviceID(i)\n\t\tinfo := portmidi.Info(id)\n\t\tif info.IsInputAvailable {\n\t\t\tif info.Name == name {\n\t\t\t\treturn id\n\t\t\t}\n\t\t}\n\t}\n\treturn portmidi.DeviceID(-1)\n}\n\nfunc findMidiOutputDevice(name string) (portmidi.DeviceID, string) {\n\tdevcount := portmidi.CountDevices()\n\tfor i := 0; i < devcount; i++ {\n\t\tid := portmidi.DeviceID(i)\n\t\tinfo := portmidi.Info(id)\n\t\tif info.IsOutputAvailable {\n\t\t\tif info.Name == name {\n\t\t\t\treturn id, info.Name\n\t\t\t}\n\t\t}\n\t}\n\tid := portmidi.DefaultOutputDeviceID()\n\tinfo := portmidi.Info(id)\n\treturn id, info.Name\n}\n\nfunc midiOpen(argv []*ell.Object) (*ell.Object, error) {\n\t\/\/\tdefaultInput := \"USB Oxygen 8 v2\"\n\t\/\/\tdefaultOutput := \"IAC Driver Bus 1\"\n\tlatency := int64(10)\n\tif !midiOpened {\n\t\terr := portmidi.Initialize()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmidiOpened = true\n\t\tmidiInDevice = ell.StringValue(argv[0])\n\t\tmidiOutDevice = ell.StringValue(argv[1])\n\t\tmidiBufsize = ell.Int64Value(argv[2])\n\n\t\toutdev, outname := findMidiOutputDevice(midiOutDevice)\n\t\tout, err := portmidi.NewOutputStream(outdev, midiBufsize, latency)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmidiOut = out\n\t\tmidiOutDevice = outname\n\t\tif midiInDevice != \"\" {\n\t\t\tindev := findMidiInputDevice(midiInDevice)\n\t\t\tif indev >= 0 {\n\t\t\t\tin, err := portmidi.NewInputStream(indev, midiBufsize)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tmidiIn = in\n\t\t\t}\n\t\t}\n\t\tmidiBaseTime = ell.Now()\n\n\t}\n\tresult := ell.MakeStruct(4)\n\tif midiInDevice != \"\" {\n\t\tell.Put(result, inputKey, ell.String(midiInDevice))\n\t}\n\tif midiOutDevice != \"\" {\n\t\tell.Put(result, outputKey, ell.String(midiOutDevice))\n\t}\n\tell.Put(result, bufsizeKey, ell.Number(float64(midiBufsize)))\n\treturn result, nil\n}\n\nfunc midiAllNotesOff() {\n\tmidiOut.WriteShort(0xB0, 0x7B, 0x00)\n}\n\nfunc midiClose(argv []*ell.Object) (*ell.Object, error) {\n\tmidiMutex.Lock()\n\tif midiOut != nil {\n\t\tmidiAllNotesOff()\n\t\tmidiOut.Close()\n\t\tmidiOut = nil\n\t}\n\tmidiMutex.Unlock()\n\tell.Sleep(0.5)\n\treturn ell.Null, nil\n}\n\n\/\/ (midi-write 144 60 80) -> middle C note on\n\/\/ (midi-write 128 60 0) -> middle C note off\nfunc midiWrite(argv []*ell.Object) (*ell.Object, error) {\n\tstatus := ell.Int64Value(argv[0])\n\tdata1 := ell.Int64Value(argv[1])\n\tdata2 := ell.Int64Value(argv[2])\n\tmidiMutex.Lock()\n\tvar err error\n\tif midiOut != nil {\n\t\terr = midiOut.WriteShort(status, data1, data2)\n\t}\n\tmidiMutex.Unlock()\n\treturn ell.Null, err\n}\n\nfunc midiListen(argv []*ell.Object) (*ell.Object, error) {\n\tch := ell.Null\n\tmidiMutex.Lock()\n\tif midiIn != nil {\n\t\tch = ell.Channel(int(midiBufsize), \"midi\")\n\t\tgo func(s *portmidi.Stream, ch *ell.Object) {\n\t\t\tfor {\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t\tevents, err := s.Read(1024)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tchannel := ell.ChannelValue(ch)\n\t\t\t\tif channel != nil {\n\t\t\t\t\tfor _, ev := range events {\n\t\t\t\t\t\tts := (float64(ev.Timestamp) \/ 1000) + midiBaseTime\n\t\t\t\t\t\tst := ev.Status\n\t\t\t\t\t\td1 := ev.Data1\n\t\t\t\t\t\td2 := ev.Data2\n\t\t\t\t\t\tchannel <- ell.List(ell.Number(ts), ell.Number(float64(st)), ell.Number(float64(d1)), ell.Number(float64(d2)))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}(midiIn, ch)\n\t}\n\tmidiMutex.Unlock()\n\treturn ch, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Chadev. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/danryan\/hal\"\n)\n\nvar fatherHandler = hear(`your father was (a|an) (.*)`, \"\", \"\", func(res *hal.Response) error {\n\treturn res.Send(fmt.Sprintf(\"%s, well your father was a hampster, and your mother spelled of elderberries!\", res.UserName()))\n})\n\nvar whoBackHandler = hear(`who has your back(\\?)?`, \"\", \"\", func(res *hal.Response) error {\n\treturn res.Send(fmt.Sprintf(\"%s has my back!\", res.UserName()))\n})\n<commit_msg>Fix typo s\/spelled\/smelled\/g<commit_after>\/\/ Copyright 2014 Chadev. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/danryan\/hal\"\n)\n\nvar fatherHandler = hear(`your father was (a|an) (.*)`, \"\", \"\", func(res *hal.Response) error {\n\treturn res.Send(fmt.Sprintf(\"%s, well your father was a hampster, and your mother smelled of elderberries!\", res.UserName()))\n})\n\nvar whoBackHandler = hear(`who has your back(\\?)?`, \"\", \"\", func(res *hal.Response) error {\n\treturn res.Send(fmt.Sprintf(\"%s has my back!\", res.UserName()))\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ 5 october 2014\npackage main\n\nimport (\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sort\"\n\t\"io\"\n\t\"archive\/zip\"\n\t\"compress\/flate\"\n\t\"bytes\"\n\n\t\"github.com\/andlabs\/ohv\/mshi\"\n\t\"golang.org\/x\/net\/html\"\n\t\"github.com\/andlabs\/ohv\/internal\/ui\"\n)\n\nconst msxhelpURLScheme = \"ms-xhelp\"\n\ntype MSHI struct {\n\tdir\t\t\tstring\n\tcontainers\t[][]*mshi.ContainerPath\n\tassets\t\t[][]*mshi.AssetData\n\ttopics\t\tmap[string]*MSHITopic\t\t\/\/ maps asset IDs; these are case-insensitive, so are stored here lowercase\n\tbooks\t\t[]Topic\n\torphans\t\t[]Topic\n}\n\nfunc (m *MSHI) Name() string {\n\treturn \"All Microsoft Help Viewer-format documentation\"\n}\n\nfunc (m *MSHI) Books() []Topic {\n\treturn m.books\n}\n\nfunc (m *MSHI) Orphans() []Topic {\n\treturn m.orphans\n}\n\nfunc (m *MSHI) Lookup(url *url.URL) Topic {\n\tid := url.Query().Get(\"Id\")\n\tid = strings.ToLower(id)\n\treturn m.topics[id]\n}\n\nfunc OpenMSHI(dir string) (*MSHI, error) {\n\tm := new(MSHI)\n\tm.dir = dir\n\n\t\/\/ first read all indices\n\terr := m.readAllIndices()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ then produce a map that goes from ID -> topic\n\tm.collectTopics()\n\n\t\/\/ now figure out the hierarchy, books, and orphans\n\tm.buildHierarchy()\n\n\t\/\/ now sort all children\n\tm.sortAllChildren()\n\n\treturn m, nil\n}\n\nfunc (m *MSHI) readAllIndices() error {\n\treturn filepath.Walk(m.dir, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ TODO subdirectories\n\t\tif strings.ToLower(filepath.Ext(path)) != \".mshi\" {\n\t\t\treturn nil\n\t\t}\n\t\tof, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tf := mshi.Open(of)\n\t\tif f.Err() != nil {\n\t\t\treturn f.Err()\n\t\t}\n\t\tc := f.ReadContainerPaths()\n\t\tif f.Err() != nil {\n\t\t\treturn f.Err()\n\t\t}\n\t\ta := f.ReadAssetDatas()\n\t\tif f.Err() != nil {\n\t\t\treturn f.Err()\n\t\t}\n\t\tof.Close()\n\t\tm.containers = append(m.containers, c)\n\t\tm.assets = append(m.assets, a)\n\t\treturn nil\n\t})\n}\n\nfunc (m *MSHI) collectTopics() {\n\t\/\/ TODO deal with versions sensibly\n\tm.topics = make(map[string]*MSHITopic)\n\tfor container, aa := range m.assets {\n\t\tfor _, a := range aa {\n\t\t\t\/\/ TODO strings.ToLower() this?\n\t\t\tif m.topics[a.ID] != nil && m.topics[a.ID].asset.Version > a.Version {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tm.topics[strings.ToLower(a.ID)] = &MSHITopic{\n\t\t\t\tdir:\t\t\tm.dir,\n\t\t\t\tcontainers:\tm.containers[container],\n\t\t\t\tasset:\t\ta,\n\t\t\t\tsource:\t\tm,\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (m *MSHI) buildHierarchy() {\n\t\/\/ TODO are parents case-insensitive?\n\tfor _, v := range m.topics {\n\t\tif v.asset.ParentID == \"-1\" {\t\t\/\/ is top-level\n\t\t\tm.books = append(m.books, v)\n\t\t\tcontinue\n\t\t}\n\t\tparent, ok := m.topics[strings.ToLower(v.asset.ParentID)]\n\t\tif ok {\t\t\t\/\/ has parent\n\t\t\tparent.children = append(parent.children, v)\n\t\t\tv.parent = parent\n\t\t\tcontinue\n\t\t}\n\t\tm.orphans = append(m.orphans, v)\t\t\t\/\/ parent elsewhere or missing\n\t}\n}\n\nfunc (m *MSHI) sortAllChildren() {\n\tfor _, t := range m.topics {\n\t\tsort.Sort(TopicSorter(t.children))\n\t}\n}\n\ntype MSHITopic struct {\n\tdir\t\t\tstring\n\tcontainers\t[]*mshi.ContainerPath\n\tasset\t\t\t*mshi.AssetData\n\tparent\t\tTopic\n\tchildren\t\t[]Topic\n\tsource\t\tHelpSource\n}\n\nfunc (m *MSHITopic) Name() string {\n\treturn m.asset.Title\n}\n\n\/\/ TODO anchors - does the help viewer guess the anchor? because I don't see anything in the HTML to suggest that (unless x\/net\/html is stripping it?)\nfunc (m *MSHITopic) Prepare() (*Prepared, error) {\n\tvar r io.Reader\n\n\tmshc := filepath.Join(m.dir, m.containers[m.asset.ContainerPath].Filename)\n\n\t_, err := StartTempDir()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ first get the HTML\n\tsrc, err := os.Open(mshc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = src.Seek(int64(m.asset.CompressedDataOffset), 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr = io.LimitReader(src, int64(m.asset.CompressedSize))\n\tswitch m.asset.CompressionMethod {\n\tcase zip.Store:\n\t\t\/\/ do nothing\n\tcase zip.Deflate:\n\t\tr = flate.NewReader(r)\n\tdefault:\n\t\treturn nil, zip.ErrAlgorithm\n\t}\n\t\/\/ we need to use two buffers here because reading from a bytes.Buffer removes those bytes :(\n\t\/\/ TODO use the file instead\n\tfilebuf := new(bytes.Buffer)\n\thtmlbuf := new(bytes.Buffer)\n\tmulti := io.MultiWriter(filebuf, htmlbuf)\n\t_, err = io.Copy(multi, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ now generate the temporary HTML file\n\thtmlfile, err := TempFile(\"topic.html\", filebuf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ now we need to extract the images\n\tsrc.Close()\n\tzipfile, err := zip.OpenReader(mshc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer zipfile.Close()\n\tfiles := make(map[string]*zip.File)\n\tfor _, f := range zipfile.File {\n\t\tfiles[strings.ToLower(f.Name)] = f\n\t}\n\tt := html.NewTokenizer(htmlbuf)\n\tfor {\n\t\ttt := t.Next()\n\t\tif tt == html.ErrorToken {\n\t\t\terr := t.Err()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t\ttok := t.Token()\n\t\tswitch tok.Type {\n\t\tcase html.StartTagToken, html.SelfClosingTagToken:\n\t\t\tif tok.Data != \"img\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfor _, a := range tok.Attr {\n\t\t\t\tif a.Key != \"src\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfilename := strings.ToLower(a.Val)\n\t\t\t\tr, err := files[filename].Open()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\t\/\/ note our use of a.Val here\n\t\t\t\t_, err = TempFile(a.Val, r)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tr.Close()\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &Prepared{\n\t\tPath:\t\thtmlfile,\n\t}, nil\n}\n\nfunc (m *MSHITopic) Parent() Topic {\n\treturn m.parent\n}\n\nfunc (m *MSHITopic) Children() []Topic {\n\treturn m.children\n}\n\nfunc (m *MSHITopic) Source() HelpSource {\n\treturn m.source\n}\n\nfunc (m *MSHITopic) Less(t Topic) bool {\n\ttt := t.(*MSHITopic)\n\treturn m.asset.Order < tt.asset.Order\n}\n\nfunc (m *MSHITopic) TreeNodeText() string {\n\treturn m.Name()\n}\n\nfunc (m *MSHITopic) TreeNodeParent() ui.TreeNode {\n\treturn m.Parent()\n}\n\n\/\/ TODO somehow integrate this and the one in apple.go with the rest of Topic\nfunc (m *MSHITopic) TreeNodeChildren() []ui.TreeNode {\n\tif len(m.children) == 0 {\n\t\treturn nil\n\t}\n\tc := make([]ui.TreeNode, len(m.children))\n\tfor i, n := range m.children {\n\t\tc[i] = n\n\t}\n\treturn c\n}\n<commit_msg>Halved the time it took to read all the MSHI files by making the process concurrent. Nice!<commit_after>\/\/ 5 october 2014\npackage main\n\nimport (\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sort\"\n\t\"io\"\n\t\"archive\/zip\"\n\t\"compress\/flate\"\n\t\"bytes\"\n\n\t\"github.com\/andlabs\/ohv\/mshi\"\n\t\"golang.org\/x\/net\/html\"\n\t\"github.com\/andlabs\/ohv\/internal\/ui\"\n)\n\nconst msxhelpURLScheme = \"ms-xhelp\"\n\ntype MSHI struct {\n\tdir\t\t\tstring\n\tcontainers\t[][]*mshi.ContainerPath\n\tassets\t\t[][]*mshi.AssetData\n\ttopics\t\tmap[string]*MSHITopic\t\t\/\/ maps asset IDs; these are case-insensitive, so are stored here lowercase\n\tbooks\t\t[]Topic\n\torphans\t\t[]Topic\n}\n\nfunc (m *MSHI) Name() string {\n\treturn \"All Microsoft Help Viewer-format documentation\"\n}\n\nfunc (m *MSHI) Books() []Topic {\n\treturn m.books\n}\n\nfunc (m *MSHI) Orphans() []Topic {\n\treturn m.orphans\n}\n\nfunc (m *MSHI) Lookup(url *url.URL) Topic {\n\tid := url.Query().Get(\"Id\")\n\tid = strings.ToLower(id)\n\treturn m.topics[id]\n}\n\nfunc OpenMSHI(dir string) (*MSHI, error) {\n\tm := new(MSHI)\n\tm.dir = dir\n\n\t\/\/ first read all indices\n\terr := m.readAllIndices()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ then produce a map that goes from ID -> topic\n\tm.collectTopics()\n\n\t\/\/ now figure out the hierarchy, books, and orphans\n\tm.buildHierarchy()\n\n\t\/\/ now sort all children\n\tm.sortAllChildren()\n\n\treturn m, nil\n}\n\ntype mshiFileResult struct {\n\tc\t[]*mshi.ContainerPath\n\ta\t[]*mshi.AssetData\n\terr\terror\n}\n\nfunc (m *MSHI) readOneIndex(path string, res chan<- *mshiFileResult) {\n\tr := new(mshiFileResult)\n\tdefer func() {\n\t\tres <- r\n\t}()\n\tof, err := os.Open(path)\n\tif err != nil {\n\t\tr.err = err\n\t\treturn\n\t}\n\tdefer of.Close()\n\tf := mshi.Open(of)\n\tif f.Err() != nil {\n\t\tr.err = f.Err()\n\t\treturn\n\t}\n\tc := f.ReadContainerPaths()\n\tif f.Err() != nil {\n\t\tr.err = f.Err()\n\t\treturn\n\t}\n\ta := f.ReadAssetDatas()\n\tif f.Err() != nil {\n\t\tr.err = f.Err()\n\t\treturn\n\t}\n\tr.c = c\n\tr.a = a\n}\n\nfunc (m *MSHI) readAllIndices() error {\n\tn := 0\n\tres := make(chan *mshiFileResult)\n\terr := filepath.Walk(m.dir, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ TODO subdirectories\n\t\tif strings.ToLower(filepath.Ext(path)) != \".mshi\" {\n\t\t\treturn nil\n\t\t}\n\t\tgo m.readOneIndex(path, res)\n\t\tn++\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor n != 0 {\n\t\tr := <-res\n\t\tif r.err != nil {\n\t\t\treturn r.err\n\t\t}\n\t\tm.containers = append(m.containers, r.c)\n\t\tm.assets = append(m.assets, r.a)\n\t\tn--\n\t}\n\tclose(res)\n\treturn nil\n}\n\nfunc (m *MSHI) collectTopics() {\n\t\/\/ TODO deal with versions sensibly\n\tm.topics = make(map[string]*MSHITopic)\n\tfor container, aa := range m.assets {\n\t\tfor _, a := range aa {\n\t\t\t\/\/ TODO strings.ToLower() this?\n\t\t\tif m.topics[a.ID] != nil && m.topics[a.ID].asset.Version > a.Version {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tm.topics[strings.ToLower(a.ID)] = &MSHITopic{\n\t\t\t\tdir:\t\t\tm.dir,\n\t\t\t\tcontainers:\tm.containers[container],\n\t\t\t\tasset:\t\ta,\n\t\t\t\tsource:\t\tm,\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (m *MSHI) buildHierarchy() {\n\t\/\/ TODO are parents case-insensitive?\n\tfor _, v := range m.topics {\n\t\tif v.asset.ParentID == \"-1\" {\t\t\/\/ is top-level\n\t\t\tm.books = append(m.books, v)\n\t\t\tcontinue\n\t\t}\n\t\tparent, ok := m.topics[strings.ToLower(v.asset.ParentID)]\n\t\tif ok {\t\t\t\/\/ has parent\n\t\t\tparent.children = append(parent.children, v)\n\t\t\tv.parent = parent\n\t\t\tcontinue\n\t\t}\n\t\tm.orphans = append(m.orphans, v)\t\t\t\/\/ parent elsewhere or missing\n\t}\n}\n\nfunc (m *MSHI) sortAllChildren() {\n\tfor _, t := range m.topics {\n\t\tsort.Sort(TopicSorter(t.children))\n\t}\n}\n\ntype MSHITopic struct {\n\tdir\t\t\tstring\n\tcontainers\t[]*mshi.ContainerPath\n\tasset\t\t\t*mshi.AssetData\n\tparent\t\tTopic\n\tchildren\t\t[]Topic\n\tsource\t\tHelpSource\n}\n\nfunc (m *MSHITopic) Name() string {\n\treturn m.asset.Title\n}\n\n\/\/ TODO anchors - does the help viewer guess the anchor? because I don't see anything in the HTML to suggest that (unless x\/net\/html is stripping it?)\nfunc (m *MSHITopic) Prepare() (*Prepared, error) {\n\tvar r io.Reader\n\n\tmshc := filepath.Join(m.dir, m.containers[m.asset.ContainerPath].Filename)\n\n\t_, err := StartTempDir()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ first get the HTML\n\tsrc, err := os.Open(mshc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = src.Seek(int64(m.asset.CompressedDataOffset), 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr = io.LimitReader(src, int64(m.asset.CompressedSize))\n\tswitch m.asset.CompressionMethod {\n\tcase zip.Store:\n\t\t\/\/ do nothing\n\tcase zip.Deflate:\n\t\tr = flate.NewReader(r)\n\tdefault:\n\t\treturn nil, zip.ErrAlgorithm\n\t}\n\t\/\/ we need to use two buffers here because reading from a bytes.Buffer removes those bytes :(\n\t\/\/ TODO use the file instead\n\tfilebuf := new(bytes.Buffer)\n\thtmlbuf := new(bytes.Buffer)\n\tmulti := io.MultiWriter(filebuf, htmlbuf)\n\t_, err = io.Copy(multi, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ now generate the temporary HTML file\n\thtmlfile, err := TempFile(\"topic.html\", filebuf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ now we need to extract the images\n\tsrc.Close()\n\tzipfile, err := zip.OpenReader(mshc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer zipfile.Close()\n\tfiles := make(map[string]*zip.File)\n\tfor _, f := range zipfile.File {\n\t\tfiles[strings.ToLower(f.Name)] = f\n\t}\n\tt := html.NewTokenizer(htmlbuf)\n\tfor {\n\t\ttt := t.Next()\n\t\tif tt == html.ErrorToken {\n\t\t\terr := t.Err()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t\ttok := t.Token()\n\t\tswitch tok.Type {\n\t\tcase html.StartTagToken, html.SelfClosingTagToken:\n\t\t\tif tok.Data != \"img\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfor _, a := range tok.Attr {\n\t\t\t\tif a.Key != \"src\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfilename := strings.ToLower(a.Val)\n\t\t\t\tr, err := files[filename].Open()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\t\/\/ note our use of a.Val here\n\t\t\t\t_, err = TempFile(a.Val, r)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tr.Close()\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &Prepared{\n\t\tPath:\t\thtmlfile,\n\t}, nil\n}\n\nfunc (m *MSHITopic) Parent() Topic {\n\treturn m.parent\n}\n\nfunc (m *MSHITopic) Children() []Topic {\n\treturn m.children\n}\n\nfunc (m *MSHITopic) Source() HelpSource {\n\treturn m.source\n}\n\nfunc (m *MSHITopic) Less(t Topic) bool {\n\ttt := t.(*MSHITopic)\n\treturn m.asset.Order < tt.asset.Order\n}\n\nfunc (m *MSHITopic) TreeNodeText() string {\n\treturn m.Name()\n}\n\nfunc (m *MSHITopic) TreeNodeParent() ui.TreeNode {\n\treturn m.Parent()\n}\n\n\/\/ TODO somehow integrate this and the one in apple.go with the rest of Topic\nfunc (m *MSHITopic) TreeNodeChildren() []ui.TreeNode {\n\tif len(m.children) == 0 {\n\t\treturn nil\n\t}\n\tc := make([]ui.TreeNode, len(m.children))\n\tfor i, n := range m.children {\n\t\tc[i] = n\n\t}\n\treturn c\n}\n<|endoftext|>"} {"text":"<commit_before>package oci8\n\n\/*\n#include <oci.h>\n#include <stdlib.h>\n#include <string.h>\n\n#cgo pkg-config: oci8\n*\/\nimport \"C\"\nimport (\n\t\"database\/sql\"\n\t\"database\/sql\/driver\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"unsafe\"\n)\n\nfunc init() {\n\tsql.Register(\"oci8\", &OCI8Driver{})\n}\n\ntype OCI8Driver struct {\n}\n\ntype OCI8Conn struct {\n\tsvc unsafe.Pointer\n\tenv unsafe.Pointer\n\terr unsafe.Pointer\n}\n\ntype OCI8Tx struct {\n\tc *OCI8Conn\n}\n\nfunc (tx *OCI8Tx) Commit() error {\n\tif err := tx.c.exec(\"COMMIT\"); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (tx *OCI8Tx) Rollback() error {\n\tif err := tx.c.exec(\"ROLLBACK\"); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *OCI8Conn) exec(cmd string) error {\n\tstmt, err := c.Prepare(cmd)\n\tif err == nil {\n\t\tdefer stmt.Close()\n\t\t_, err = stmt.Exec(nil)\n\t}\n\treturn err\n}\n\nfunc (c *OCI8Conn) Begin() (driver.Tx, error) {\n\tif err := c.exec(\"BEGIN\"); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &OCI8Tx{c}, nil\n}\n\nfunc (d *OCI8Driver) Open(dsn string) (driver.Conn, error) {\n\tvar conn OCI8Conn\n\ttoken := strings.SplitN(dsn, \"@\", 2)\n\tuserpass := strings.SplitN(token[0], \"\/\", 2)\n\n\trv := C.OCIInitialize(\n\t\tC.OCI_DEFAULT,\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t\tnil)\n\tif rv == C.OCI_ERROR {\n\t\treturn nil, ociGetError(conn.err)\n\t}\n\n\n\trv = C.OCIEnvInit(\n\t\t(**C.OCIEnv)(unsafe.Pointer(&conn.env)),\n\t\tC.OCI_DEFAULT,\n\t\t0,\n\t\tnil)\n\n\trv = C.OCIHandleAlloc(\n\t\tconn.env,\n\t\t&conn.err,\n\t\tC.OCI_HTYPE_ERROR,\n\t\t0,\n\t\tnil)\n\tif rv == C.OCI_ERROR {\n\t\treturn nil, ociGetError(conn.err)\n\t}\n\n\n\tvar phost *C.char\n\tphostlen := C.size_t(0)\n\tif len(token) > 1 {\n\t\tphost = C.CString(token[1])\n\t\tdefer C.free(unsafe.Pointer(phost))\n\t\tphostlen = C.strlen(phost)\n\t}\n\tpuser := C.CString(userpass[0])\n\tdefer C.free(unsafe.Pointer(puser))\n\tppass := C.CString(userpass[1])\n\tdefer C.free(unsafe.Pointer(ppass))\n\n\trv = C.OCILogon(\n\t\t(*C.OCIEnv)(conn.env),\n\t\t(*C.OCIError)(conn.err),\n\t\t(**C.OCIServer)(unsafe.Pointer(&conn.svc)),\n\t\t(*C.OraText)(unsafe.Pointer(puser)),\n\t\tC.ub4(C.strlen(puser)),\n\t\t(*C.OraText)(unsafe.Pointer(ppass)),\n\t\tC.ub4(C.strlen(ppass)),\n\t\t(*C.OraText)(unsafe.Pointer(phost)),\n\t\tC.ub4(phostlen))\n\tif rv == C.OCI_ERROR {\n\t\treturn nil, ociGetError(conn.err)\n\t}\n\n\treturn &conn, nil\n}\n\nfunc (c *OCI8Conn) Close() error {\n\trv := C.OCILogoff(\n\t\t(*C.OCIServer)(c.svc),\n\t\t(*C.OCIError)(c.err))\n\tif rv == C.OCI_ERROR {\n\t\treturn ociGetError(c.err)\n\t}\n\n\tC.OCIHandleFree(\n\t\tc.env,\n\t\tC.OCI_HTYPE_ENV)\n\n\tc.svc = nil\n\tc.env = nil\n\tc.err = nil\n\treturn nil\n}\n\ntype OCI8Stmt struct {\n\tc *OCI8Conn\n\ts unsafe.Pointer\n\tclosed bool\n}\n\nfunc (c *OCI8Conn) Prepare(query string) (driver.Stmt, error) {\n\tpquery := C.CString(query)\n\tdefer C.free(unsafe.Pointer(pquery))\n\tvar s unsafe.Pointer\n\n\trv := C.OCIHandleAlloc(\n\t\tc.env,\n\t\t&s,\n\t\tC.OCI_HTYPE_STMT,\n\t\t0,\n\t\tnil)\n\tif rv == C.OCI_ERROR {\n\t\treturn nil, ociGetError(c.err)\n\t}\n\n\trv = C.OCIStmtPrepare(\n\t\t(*C.OCIStmt)(s),\n\t\t(*C.OCIError)(c.err),\n\t\t(*C.OraText)(unsafe.Pointer(pquery)),\n\t\tC.ub4(C.strlen(pquery)),\n\t\tC.ub4(C.OCI_NTV_SYNTAX),\n\t\tC.ub4(C.OCI_DEFAULT))\n\tif rv == C.OCI_ERROR {\n\t\treturn nil, ociGetError(c.err)\n\t}\n\n\treturn &OCI8Stmt{c: c, s: s}, nil\n}\n\nfunc (s *OCI8Stmt) Close() error {\n\tif s.closed {\n\t\treturn nil\n\t}\n\ts.closed = true\n\n\tC.OCIHandleFree(\n\t\ts.s,\n\t\tC.OCI_HTYPE_STMT)\n\ts.s = nil\n\n\treturn nil\n}\n\nfunc (s *OCI8Stmt) NumInput() int {\n\tvar num C.int\n\tC.OCIAttrGet(\n\t\ts.s,\n\t\tC.OCI_HTYPE_STMT,\n\t\tunsafe.Pointer(&num),\n\t\tnil,\n\t\tC.OCI_ATTR_BIND_COUNT,\n\t\t(*C.OCIError)(s.c.err))\n\treturn int(num)\n}\n\nfunc (s *OCI8Stmt) bind(args []driver.Value) error {\n\tif args == nil {\n\t\treturn nil\n\t}\n\n\tvar bp *C.OCIBind\n\tfor i, v := range args {\n\t\tb := []byte(fmt.Sprintf(\"%v\", v))\n\t\tb = append(b, 0)\n\t\trv := C.OCIBindByPos(\n\t\t\t(*C.OCIStmt)(s.s),\n\t\t\t&bp,\n\t\t\t(*C.OCIError)(s.c.err),\n\t\t\tC.ub4(i+1),\n\t\t\tunsafe.Pointer(&b[0]),\n\t\t\tC.sb4(len(b)),\n\t\t\tC.SQLT_STR,\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tnil,\n\t\t\t0,\n\t\t\tnil,\n\t\t\tC.OCI_DEFAULT)\n\n\t\tif rv == C.OCI_ERROR {\n\t\t\treturn ociGetError(s.c.err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *OCI8Stmt) Query(args []driver.Value) (driver.Rows, error) {\n\tif err := s.bind(args); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar t C.int\n\tC.OCIAttrGet(\n\t\ts.s,\n\t\tC.OCI_HTYPE_STMT,\n\t\tunsafe.Pointer(&t),\n\t\tnil,\n\t\tC.OCI_ATTR_STMT_TYPE,\n\t\t(*C.OCIError)(s.c.err))\n\titer := C.ub4(1)\n\tif t == C.OCI_STMT_SELECT {\n\t\titer = 0\n\t}\n\n\trv := C.OCIStmtExecute(\n\t\t(*C.OCIServer)(s.c.svc),\n\t\t(*C.OCIStmt)(s.s),\n\t\t(*C.OCIError)(s.c.err),\n\t\titer,\n\t\t0,\n\t\tnil,\n\t\tnil,\n\t\tC.OCI_DEFAULT)\n\tif rv == C.OCI_ERROR {\n\t\treturn nil, ociGetError(s.c.err)\n\t}\n\n\tvar rc C.ub2\n\tC.OCIAttrGet(\n\t\ts.s,\n\t\tC.OCI_HTYPE_STMT,\n\t\tunsafe.Pointer(&rc),\n\t\tnil,\n\t\tC.OCI_ATTR_PARAM_COUNT,\n\t\t(*C.OCIError)(s.c.err))\n\n\toci8cols := make([]oci8col, int(rc))\n\tfor i := 0; i < int(rc); i++ {\n\t\tvar p unsafe.Pointer\n\t\tvar np *C.char\n\t\tvar ns C.ub4\n\t\tvar tp C.ub2\n\t\tvar lp C.ub2\n\t\tC.OCIParamGet(\n\t\t\ts.s,\n\t\t\tC.OCI_HTYPE_STMT,\n\t\t\t(*C.OCIError)(s.c.err),\n\t\t\t(*unsafe.Pointer)(unsafe.Pointer(&p)),\n\t\t\tC.ub4(i+1))\n\t\tC.OCIAttrGet(\n\t\t\tp,\n\t\t\tC.OCI_DTYPE_PARAM,\n\t\t\tunsafe.Pointer(&tp),\n\t\t\tnil,\n\t\t\tC.OCI_ATTR_DATA_TYPE,\n\t\t\t(*C.OCIError)(s.c.err))\n\t\tC.OCIAttrGet(\n\t\t\tp,\n\t\t\tC.OCI_DTYPE_PARAM,\n\t\t\tunsafe.Pointer(&np),\n\t\t\t&ns,\n\t\t\tC.OCI_ATTR_NAME,\n\t\t\t(*C.OCIError)(s.c.err))\n\t\tC.OCIAttrGet(\n\t\t\tp,\n\t\t\tC.OCI_DTYPE_PARAM,\n\t\t\tunsafe.Pointer(&lp),\n\t\t\tnil,\n\t\t\tC.OCI_ATTR_DATA_SIZE,\n\t\t\t(*C.OCIError)(s.c.err))\n\t\toci8cols[i].name = string((*[1 << 30]byte)(unsafe.Pointer(np))[0:int(ns)])\n\t\toci8cols[i].kind = int(tp)\n\t\toci8cols[i].size = int(lp)\n\t\toci8cols[i].pbuf = make([]byte, int(lp)+1)\n\n\t\tvar defp *C.OCIDefine\n\t\trv = C.OCIDefineByPos(\n\t\t\t(*C.OCIStmt)(s.s),\n\t\t\t&defp,\n\t\t\t(*C.OCIError)(s.c.err),\n\t\t\tC.ub4(i+1),\n\t\t\tunsafe.Pointer(&oci8cols[i].pbuf[0]),\n\t\t\tC.sb4(lp+1),\n\t\t\tC.SQLT_CHR,\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tC.OCI_DEFAULT)\n\t\tif rv == C.OCI_ERROR {\n\t\t\treturn nil, ociGetError(s.c.err)\n\t\t}\n\t}\n\treturn &OCI8Rows{s, oci8cols, false}, nil\n}\n\ntype OCI8Result struct {\n\ts *OCI8Stmt\n}\n\nfunc (r *OCI8Result) LastInsertId() (int64, error) {\n\tvar t C.ub4\n\trv := C.OCIAttrGet(\n\t\tr.s.s,\n\t\tC.OCI_HTYPE_STMT,\n\t\tunsafe.Pointer(&t),\n\t\tnil,\n\t\tC.OCI_ATTR_ROWID,\n\t\t(*C.OCIError)(r.s.c.err))\n\tif rv == C.OCI_ERROR {\n\t\treturn 0, ociGetError(r.s.c.err)\n\t}\n\treturn int64(t), nil\n}\n\nfunc (r *OCI8Result) RowsAffected() (int64, error) {\n\tvar t C.ub4\n\trv := C.OCIAttrGet(\n\t\tr.s.s,\n\t\tC.OCI_HTYPE_STMT,\n\t\tunsafe.Pointer(&t),\n\t\tnil,\n\t\tC.OCI_ATTR_ROW_COUNT,\n\t\t(*C.OCIError)(r.s.c.err))\n\tif rv == C.OCI_ERROR {\n\t\treturn 0, ociGetError(r.s.c.err)\n\t}\n\treturn int64(t), nil\n}\n\nfunc (s *OCI8Stmt) Exec(args []driver.Value) (driver.Result, error) {\n\tif err := s.bind(args); err != nil {\n\t\treturn nil, err\n\t}\n\n\trv := C.OCIStmtExecute(\n\t\t(*C.OCIServer)(s.c.svc),\n\t\t(*C.OCIStmt)(s.s),\n\t\t(*C.OCIError)(s.c.err),\n\t\t1,\n\t\t0,\n\t\tnil,\n\t\tnil,\n\t\tC.OCI_DEFAULT)\n\tif rv == C.OCI_ERROR {\n\t\treturn nil, ociGetError(s.c.err)\n\t}\n\treturn &OCI8Result{s}, nil\n}\n\ntype oci8col struct {\n\tname string\n\tkind int\n\tsize int\n\tpbuf []byte\n}\n\ntype OCI8Rows struct {\n\ts *OCI8Stmt\n\tcols []oci8col\n\te bool\n}\n\nfunc (rc *OCI8Rows) Close() error {\n\treturn rc.s.Close()\n}\n\nfunc (rc *OCI8Rows) Columns() []string {\n\tcols := make([]string, len(rc.cols))\n\tfor i, col := range rc.cols {\n\t\tcols[i] = col.name\n\t}\n\treturn cols\n}\n\nfunc (rc *OCI8Rows) Next(dest []driver.Value) error {\n\trv := C.OCIStmtFetch(\n\t\t(*C.OCIStmt)(rc.s.s),\n\t\t(*C.OCIError)(rc.s.c.err),\n\t\t1,\n\t\tC.OCI_FETCH_NEXT,\n\t\tC.OCI_DEFAULT)\n\tif rv == C.OCI_ERROR {\n\t\treturn ociGetError(rc.s.c.err)\n\t}\n\n\tif rv == C.OCI_NO_DATA {\n\t\treturn io.EOF\n\t}\n\n\tfor i := range dest {\n\t\tdest[i] = string(rc.cols[i].pbuf)\n\t}\n\n\treturn nil\n}\n\nfunc ociGetError(err unsafe.Pointer) error {\n\tvar errcode C.sb4\n\tvar errbuff [512]C.char\n\tC.OCIErrorGet(\n\t\terr,\n\t\t1,\n\t\tnil,\n\t\t&errcode,\n\t\t(*C.OraText)(unsafe.Pointer(&errbuff[0])),\n\t\t512,\n\t\tC.OCI_HTYPE_ERROR)\n\ts := C.GoString(&errbuff[0])\n\t\/\/println(s)\n\treturn errors.New(s)\n}\n<commit_msg>ignore 'ORA-01405: fetched column value is NULL' according to http:\/\/ora-01405.ora-code.com\/<commit_after>package oci8\n\n\/*\n#include <oci.h>\n#include <stdlib.h>\n#include <string.h>\n\n#cgo pkg-config: oci8\n*\/\nimport \"C\"\nimport (\n\t\"database\/sql\"\n\t\"database\/sql\/driver\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"unsafe\"\n)\n\nfunc init() {\n\tsql.Register(\"oci8\", &OCI8Driver{})\n}\n\ntype OCI8Driver struct {\n}\n\ntype OCI8Conn struct {\n\tsvc unsafe.Pointer\n\tenv unsafe.Pointer\n\terr unsafe.Pointer\n}\n\ntype OCI8Tx struct {\n\tc *OCI8Conn\n}\n\nfunc (tx *OCI8Tx) Commit() error {\n\tif err := tx.c.exec(\"COMMIT\"); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (tx *OCI8Tx) Rollback() error {\n\tif err := tx.c.exec(\"ROLLBACK\"); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *OCI8Conn) exec(cmd string) error {\n\tstmt, err := c.Prepare(cmd)\n\tif err == nil {\n\t\tdefer stmt.Close()\n\t\t_, err = stmt.Exec(nil)\n\t}\n\treturn err\n}\n\nfunc (c *OCI8Conn) Begin() (driver.Tx, error) {\n\tif err := c.exec(\"BEGIN\"); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &OCI8Tx{c}, nil\n}\n\nfunc (d *OCI8Driver) Open(dsn string) (driver.Conn, error) {\n\tvar conn OCI8Conn\n\ttoken := strings.SplitN(dsn, \"@\", 2)\n\tuserpass := strings.SplitN(token[0], \"\/\", 2)\n\n\trv := C.OCIInitialize(\n\t\tC.OCI_DEFAULT,\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t\tnil)\n\tif rv == C.OCI_ERROR {\n\t\treturn nil, ociGetError(conn.err)\n\t}\n\n\trv = C.OCIEnvInit(\n\t\t(**C.OCIEnv)(unsafe.Pointer(&conn.env)),\n\t\tC.OCI_DEFAULT,\n\t\t0,\n\t\tnil)\n\n\trv = C.OCIHandleAlloc(\n\t\tconn.env,\n\t\t&conn.err,\n\t\tC.OCI_HTYPE_ERROR,\n\t\t0,\n\t\tnil)\n\tif rv == C.OCI_ERROR {\n\t\treturn nil, ociGetError(conn.err)\n\t}\n\n\tvar phost *C.char\n\tphostlen := C.size_t(0)\n\tif len(token) > 1 {\n\t\tphost = C.CString(token[1])\n\t\tdefer C.free(unsafe.Pointer(phost))\n\t\tphostlen = C.strlen(phost)\n\t}\n\tpuser := C.CString(userpass[0])\n\tdefer C.free(unsafe.Pointer(puser))\n\tppass := C.CString(userpass[1])\n\tdefer C.free(unsafe.Pointer(ppass))\n\n\trv = C.OCILogon(\n\t\t(*C.OCIEnv)(conn.env),\n\t\t(*C.OCIError)(conn.err),\n\t\t(**C.OCIServer)(unsafe.Pointer(&conn.svc)),\n\t\t(*C.OraText)(unsafe.Pointer(puser)),\n\t\tC.ub4(C.strlen(puser)),\n\t\t(*C.OraText)(unsafe.Pointer(ppass)),\n\t\tC.ub4(C.strlen(ppass)),\n\t\t(*C.OraText)(unsafe.Pointer(phost)),\n\t\tC.ub4(phostlen))\n\tif rv == C.OCI_ERROR {\n\t\treturn nil, ociGetError(conn.err)\n\t}\n\n\treturn &conn, nil\n}\n\nfunc (c *OCI8Conn) Close() error {\n\trv := C.OCILogoff(\n\t\t(*C.OCIServer)(c.svc),\n\t\t(*C.OCIError)(c.err))\n\tif rv == C.OCI_ERROR {\n\t\treturn ociGetError(c.err)\n\t}\n\n\tC.OCIHandleFree(\n\t\tc.env,\n\t\tC.OCI_HTYPE_ENV)\n\n\tc.svc = nil\n\tc.env = nil\n\tc.err = nil\n\treturn nil\n}\n\ntype OCI8Stmt struct {\n\tc *OCI8Conn\n\ts unsafe.Pointer\n\tclosed bool\n}\n\nfunc (c *OCI8Conn) Prepare(query string) (driver.Stmt, error) {\n\tpquery := C.CString(query)\n\tdefer C.free(unsafe.Pointer(pquery))\n\tvar s unsafe.Pointer\n\n\trv := C.OCIHandleAlloc(\n\t\tc.env,\n\t\t&s,\n\t\tC.OCI_HTYPE_STMT,\n\t\t0,\n\t\tnil)\n\tif rv == C.OCI_ERROR {\n\t\treturn nil, ociGetError(c.err)\n\t}\n\n\trv = C.OCIStmtPrepare(\n\t\t(*C.OCIStmt)(s),\n\t\t(*C.OCIError)(c.err),\n\t\t(*C.OraText)(unsafe.Pointer(pquery)),\n\t\tC.ub4(C.strlen(pquery)),\n\t\tC.ub4(C.OCI_NTV_SYNTAX),\n\t\tC.ub4(C.OCI_DEFAULT))\n\tif rv == C.OCI_ERROR {\n\t\treturn nil, ociGetError(c.err)\n\t}\n\n\treturn &OCI8Stmt{c: c, s: s}, nil\n}\n\nfunc (s *OCI8Stmt) Close() error {\n\tif s.closed {\n\t\treturn nil\n\t}\n\ts.closed = true\n\n\tC.OCIHandleFree(\n\t\ts.s,\n\t\tC.OCI_HTYPE_STMT)\n\ts.s = nil\n\n\treturn nil\n}\n\nfunc (s *OCI8Stmt) NumInput() int {\n\tvar num C.int\n\tC.OCIAttrGet(\n\t\ts.s,\n\t\tC.OCI_HTYPE_STMT,\n\t\tunsafe.Pointer(&num),\n\t\tnil,\n\t\tC.OCI_ATTR_BIND_COUNT,\n\t\t(*C.OCIError)(s.c.err))\n\treturn int(num)\n}\n\nfunc (s *OCI8Stmt) bind(args []driver.Value) error {\n\tif args == nil {\n\t\treturn nil\n\t}\n\n\tvar bp *C.OCIBind\n\tfor i, v := range args {\n\t\tb := []byte(fmt.Sprintf(\"%v\", v))\n\t\tb = append(b, 0)\n\t\trv := C.OCIBindByPos(\n\t\t\t(*C.OCIStmt)(s.s),\n\t\t\t&bp,\n\t\t\t(*C.OCIError)(s.c.err),\n\t\t\tC.ub4(i+1),\n\t\t\tunsafe.Pointer(&b[0]),\n\t\t\tC.sb4(len(b)),\n\t\t\tC.SQLT_STR,\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tnil,\n\t\t\t0,\n\t\t\tnil,\n\t\t\tC.OCI_DEFAULT)\n\n\t\tif rv == C.OCI_ERROR {\n\t\t\treturn ociGetError(s.c.err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *OCI8Stmt) Query(args []driver.Value) (driver.Rows, error) {\n\tif err := s.bind(args); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar t C.int\n\tC.OCIAttrGet(\n\t\ts.s,\n\t\tC.OCI_HTYPE_STMT,\n\t\tunsafe.Pointer(&t),\n\t\tnil,\n\t\tC.OCI_ATTR_STMT_TYPE,\n\t\t(*C.OCIError)(s.c.err))\n\titer := C.ub4(1)\n\tif t == C.OCI_STMT_SELECT {\n\t\titer = 0\n\t}\n\n\trv := C.OCIStmtExecute(\n\t\t(*C.OCIServer)(s.c.svc),\n\t\t(*C.OCIStmt)(s.s),\n\t\t(*C.OCIError)(s.c.err),\n\t\titer,\n\t\t0,\n\t\tnil,\n\t\tnil,\n\t\tC.OCI_DEFAULT)\n\tif rv == C.OCI_ERROR {\n\t\treturn nil, ociGetError(s.c.err)\n\t}\n\n\tvar rc C.ub2\n\tC.OCIAttrGet(\n\t\ts.s,\n\t\tC.OCI_HTYPE_STMT,\n\t\tunsafe.Pointer(&rc),\n\t\tnil,\n\t\tC.OCI_ATTR_PARAM_COUNT,\n\t\t(*C.OCIError)(s.c.err))\n\n\toci8cols := make([]oci8col, int(rc))\n\tfor i := 0; i < int(rc); i++ {\n\t\tvar p unsafe.Pointer\n\t\tvar np *C.char\n\t\tvar ns C.ub4\n\t\tvar tp C.ub2\n\t\tvar lp C.ub2\n\t\tC.OCIParamGet(\n\t\t\ts.s,\n\t\t\tC.OCI_HTYPE_STMT,\n\t\t\t(*C.OCIError)(s.c.err),\n\t\t\t(*unsafe.Pointer)(unsafe.Pointer(&p)),\n\t\t\tC.ub4(i+1))\n\t\tC.OCIAttrGet(\n\t\t\tp,\n\t\t\tC.OCI_DTYPE_PARAM,\n\t\t\tunsafe.Pointer(&tp),\n\t\t\tnil,\n\t\t\tC.OCI_ATTR_DATA_TYPE,\n\t\t\t(*C.OCIError)(s.c.err))\n\t\tC.OCIAttrGet(\n\t\t\tp,\n\t\t\tC.OCI_DTYPE_PARAM,\n\t\t\tunsafe.Pointer(&np),\n\t\t\t&ns,\n\t\t\tC.OCI_ATTR_NAME,\n\t\t\t(*C.OCIError)(s.c.err))\n\t\tC.OCIAttrGet(\n\t\t\tp,\n\t\t\tC.OCI_DTYPE_PARAM,\n\t\t\tunsafe.Pointer(&lp),\n\t\t\tnil,\n\t\t\tC.OCI_ATTR_DATA_SIZE,\n\t\t\t(*C.OCIError)(s.c.err))\n\t\toci8cols[i].name = string((*[1 << 30]byte)(unsafe.Pointer(np))[0:int(ns)])\n\t\toci8cols[i].kind = int(tp)\n\t\toci8cols[i].size = int(lp)\n\t\toci8cols[i].pbuf = make([]byte, int(lp)+1)\n\n\t\tvar defp *C.OCIDefine\n\t\trv = C.OCIDefineByPos(\n\t\t\t(*C.OCIStmt)(s.s),\n\t\t\t&defp,\n\t\t\t(*C.OCIError)(s.c.err),\n\t\t\tC.ub4(i+1),\n\t\t\tunsafe.Pointer(&oci8cols[i].pbuf[0]),\n\t\t\tC.sb4(lp+1),\n\t\t\tC.SQLT_CHR,\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tC.OCI_DEFAULT)\n\t\tif rv == C.OCI_ERROR {\n\t\t\treturn nil, ociGetError(s.c.err)\n\t\t}\n\t}\n\treturn &OCI8Rows{s, oci8cols, false}, nil\n}\n\ntype OCI8Result struct {\n\ts *OCI8Stmt\n}\n\nfunc (r *OCI8Result) LastInsertId() (int64, error) {\n\tvar t C.ub4\n\trv := C.OCIAttrGet(\n\t\tr.s.s,\n\t\tC.OCI_HTYPE_STMT,\n\t\tunsafe.Pointer(&t),\n\t\tnil,\n\t\tC.OCI_ATTR_ROWID,\n\t\t(*C.OCIError)(r.s.c.err))\n\tif rv == C.OCI_ERROR {\n\t\treturn 0, ociGetError(r.s.c.err)\n\t}\n\treturn int64(t), nil\n}\n\nfunc (r *OCI8Result) RowsAffected() (int64, error) {\n\tvar t C.ub4\n\trv := C.OCIAttrGet(\n\t\tr.s.s,\n\t\tC.OCI_HTYPE_STMT,\n\t\tunsafe.Pointer(&t),\n\t\tnil,\n\t\tC.OCI_ATTR_ROW_COUNT,\n\t\t(*C.OCIError)(r.s.c.err))\n\tif rv == C.OCI_ERROR {\n\t\treturn 0, ociGetError(r.s.c.err)\n\t}\n\treturn int64(t), nil\n}\n\nfunc (s *OCI8Stmt) Exec(args []driver.Value) (driver.Result, error) {\n\tif err := s.bind(args); err != nil {\n\t\treturn nil, err\n\t}\n\n\trv := C.OCIStmtExecute(\n\t\t(*C.OCIServer)(s.c.svc),\n\t\t(*C.OCIStmt)(s.s),\n\t\t(*C.OCIError)(s.c.err),\n\t\t1,\n\t\t0,\n\t\tnil,\n\t\tnil,\n\t\tC.OCI_DEFAULT)\n\tif rv == C.OCI_ERROR {\n\t\treturn nil, ociGetError(s.c.err)\n\t}\n\treturn &OCI8Result{s}, nil\n}\n\ntype oci8col struct {\n\tname string\n\tkind int\n\tsize int\n\tpbuf []byte\n}\n\ntype OCI8Rows struct {\n\ts *OCI8Stmt\n\tcols []oci8col\n\te bool\n}\n\nfunc (rc *OCI8Rows) Close() error {\n\treturn rc.s.Close()\n}\n\nfunc (rc *OCI8Rows) Columns() []string {\n\tcols := make([]string, len(rc.cols))\n\tfor i, col := range rc.cols {\n\t\tcols[i] = col.name\n\t}\n\treturn cols\n}\n\nfunc (rc *OCI8Rows) Next(dest []driver.Value) error {\n\trv := C.OCIStmtFetch(\n\t\t(*C.OCIStmt)(rc.s.s),\n\t\t(*C.OCIError)(rc.s.c.err),\n\t\t1,\n\t\tC.OCI_FETCH_NEXT,\n\t\tC.OCI_DEFAULT)\n\tif rv == C.OCI_ERROR {\n\t\terr := ociGetError(rc.s.c.err)\n\t\tif err.Error() != \"ORA-01405: fetched column value is NULL\\n\" {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif rv == C.OCI_NO_DATA {\n\t\treturn io.EOF\n\t}\n\n\tfor i := range dest {\n\t\tdest[i] = string(rc.cols[i].pbuf)\n\t}\n\n\treturn nil\n}\n\nfunc ociGetError(err unsafe.Pointer) error {\n\tvar errcode C.sb4\n\tvar errbuff [512]C.char\n\tC.OCIErrorGet(\n\t\terr,\n\t\t1,\n\t\tnil,\n\t\t&errcode,\n\t\t(*C.OraText)(unsafe.Pointer(&errbuff[0])),\n\t\t512,\n\t\tC.OCI_HTYPE_ERROR)\n\ts := C.GoString(&errbuff[0])\n\t\/\/println(s)\n\treturn errors.New(s)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ (c) 2012 Alexander Solovyov\n\/\/ under terms of ISC license\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tStateUnknown = iota\n\tStateChanged\n\tStateUnchanged\n\tStateIgnored\n)\n\ntype Page struct {\n\tPageHeader\n\n\tSite *Site `json:\"-\"`\n\tRule *Rule\n\tPattern string\n\tDeps PageSlice `json:\"-\"`\n\n\tSource string\n\tPath string\n\tModTime time.Time\n\n\tprocessed bool\n\tstate int\n\tcontent string\n\twasread bool \/\/ if content was read already\n}\n\ntype PageSlice []*Page\n\nfunc NewPage(site *Site, path string) *Page {\n\tstat, err := os.Stat(path)\n\terrhandle(err)\n\n\trelpath, err := filepath.Rel(site.Source, path)\n\terrhandle(err)\n\n\tpattern, rule := site.Rules.MatchedRule(relpath)\n\n\tpage := &Page{\n\t\tSite: site,\n\t\tRule: rule,\n\t\tPattern: pattern,\n\t\tSource: relpath,\n\t\tPath: relpath,\n\t\tModTime: stat.ModTime(),\n\t}\n\tpage.peek()\n\tdebug(\"Found page: %s; rule: %v\\n\",\n\t\tpage.Source, page.Rule)\n\treturn page\n}\n\nfunc (page *Page) Content() string {\n\tif !page.wasread {\n\t\tcontent, err := ioutil.ReadFile(page.FullPath())\n\t\terrhandle(err)\n\t\tpage.SetContent(string(content))\n\t\tpage.wasread = true\n\t}\n\treturn page.content\n}\n\nfunc (page *Page) SetContent(content string) {\n\tpage.content = content\n}\n\nfunc (page *Page) FullPath() string {\n\treturn filepath.Join(page.Site.Source, page.Source)\n}\n\nfunc (page *Page) OutputPath() string {\n\treturn filepath.Join(page.Site.Output, page.Path)\n}\n\nfunc (page *Page) Url() string {\n\turl := strings.Replace(page.Path, string(filepath.Separator), \"\/\", -1)\n\tif url == \"index.html\" {\n\t\treturn \"\"\n\t}\n\tif strings.HasSuffix(url, \"\/index.html\") {\n\t\treturn strings.TrimSuffix(url, \"\/index.html\") + \"\/\"\n\t}\n\treturn url\n}\n\nfunc (page *Page) UrlTo(other *Page) string {\n\treturn page.Rel(other.Url())\n}\n\nfunc (page *Page) Rel(path string) string {\n\troot := strings.Repeat(\"..\/\", strings.Count(page.Url(), \"\/\"))\n\tif path[0] == '\/' {\n\t\treturn root + path[1:]\n\t}\n\treturn root + path\n}\n\nfunc (page *Page) Is(path string) bool {\n\treturn page.Url() == path || page.Path == path\n}\n\n\/\/ Peek is used to run those processors which should be done before others can\n\/\/ find out about us. Two actual examples include 'config' and 'rename'\n\/\/ processors right now.\nfunc (page *Page) peek() {\n\tif page.Rule == nil {\n\t\treturn\n\t}\n\n\tfor _, name := range PreProcessors {\n\t\tcmd := page.Rule.MatchedCommand(name)\n\t\tif cmd != nil {\n\t\t\tProcessCommand(page, cmd)\n\t\t}\n\t}\n}\n\nfunc (page *Page) findDeps() {\n\tif page.Rule == nil {\n\t\treturn\n\t}\n\n\tdeps := make(PageSlice, 0)\n\tfor _, other := range page.Site.Pages {\n\t\tif other != page && page.Rule.IsDep(other) {\n\t\t\tdeps = append(deps, other)\n\t\t}\n\t}\n\tpage.Deps = deps\n}\n\nfunc (page *Page) Changed() bool {\n\tif opts.Force {\n\t\treturn true\n\t}\n\n\tif page.state == StateUnknown {\n\t\tpage.state = StateUnchanged\n\t\tdest, err := os.Stat(page.OutputPath())\n\n\t\tif err != nil || dest.ModTime().Before(page.ModTime) {\n\t\t\tpage.state = StateChanged\n\t\t} else {\n\t\t\tfor _, dep := range page.Deps {\n\t\t\t\tif dep.Changed() {\n\t\t\t\t\tpage.state = StateChanged\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn page.state == StateChanged\n}\n\nfunc (page *Page) Process() *Page {\n\tif page.processed || page.Rule == nil {\n\t\treturn page\n\t}\n\n\tpage.processed = true\n\tif page.Rule.Commands != nil {\n\t\tfor _, cmd := range page.Rule.Commands {\n\t\t\tif !cmd.MatchesAny(PreProcessors) {\n\t\t\t\tProcessCommand(page, &cmd)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn page\n}\n\nfunc (page *Page) WriteTo(writer io.Writer) (n int64, err error) {\n\tif page.Rule == nil {\n\t\treturn 0, nil\n\t}\n\n\tif !page.processed {\n\t\tpage.Process()\n\t}\n\n\tnint, err := writer.Write([]byte(page.Content()))\n\treturn int64(nint), err\n}\n\nfunc (page *Page) Render() (n int64, err error) {\n\tif page.Rule == nil {\n\t\treturn CopyFile(page.FullPath(), page.OutputPath())\n\t}\n\n\tfile, err := os.Create(page.OutputPath())\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer file.Close()\n\n\treturn page.WriteTo(file)\n}\n\nfunc (page *Page) UrlMatches(regex string) bool {\n\tre, err := regexp.Compile(regex)\n\tif err != nil {\n\t\terrhandle(fmt.Errorf(\"Incorrect regex given to Page.UrlMatches: '%s' \", regex))\n\t}\n\treturn re.Match([]byte(page.Url()))\n}\n\n\/\/ PageSlice manipulation\n\nfunc (pages PageSlice) Get(i int) *Page { return pages[i] }\nfunc (pages PageSlice) First() *Page { return pages.Get(0) }\nfunc (pages PageSlice) Last() *Page { return pages.Get(len(pages) - 1) }\n\nfunc (pages PageSlice) Prev(cur *Page) *Page {\n\tfor i, page := range pages {\n\t\tif page == cur {\n\t\t\tif i == pages.Len() - 1 {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn pages[i+1]\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (pages PageSlice) Next(cur *Page) *Page {\n\tfor i, page := range pages {\n\t\tif page == cur {\n\t\t\tif i == 0 {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn pages[i-1]\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (pages PageSlice) Slice(from int, to int) PageSlice {\n\tlength := len(pages)\n\n\tif from > length {\n\t\tfrom = length\n\t}\n\tif to > length {\n\t\tto = length\n\t}\n\n\treturn pages[from:to]\n}\n\n\/\/ Sorting interface\nfunc (pages PageSlice) Len() int {\n\treturn len(pages)\n}\nfunc (pages PageSlice) Less(i, j int) bool {\n\tleft := pages.Get(i)\n\tright := pages.Get(j)\n\tif left.Date.Unix() == right.Date.Unix() {\n\t\treturn left.ModTime.Unix() < right.ModTime.Unix()\n\t}\n\treturn left.Date.Unix() > right.Date.Unix()\n}\nfunc (pages PageSlice) Swap(i, j int) {\n\tpages[i], pages[j] = pages[j], pages[i]\n}\n\nfunc (pages PageSlice) Sort() {\n\tsort.Sort(pages)\n}\n\nfunc (pages PageSlice) Children(root string) *PageSlice {\n\tchildren := make(PageSlice, 0)\n\n\tfor _, page := range pages {\n\t\tif strings.HasPrefix(page.Url(), root) && page.Url() != root {\n\t\t\tchildren = append(children, page)\n\t\t}\n\t}\n\n\treturn &children\n}\n\nfunc (pages PageSlice) WithTag(tag string) *PageSlice {\n\ttagged := make(PageSlice, 0)\n\n\tfor _, page := range pages {\n\t\tif page.Tags != nil && SliceStringIndexOf(page.Tags, tag) != -1 {\n\t\t\ttagged = append(tagged, page)\n\t\t}\n\t}\n\n\treturn &tagged\n}\n\nfunc (pages PageSlice) HasPage(check func(page *Page) bool) bool {\n\tfor _, page := range pages {\n\t\tif check(page) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (pages PageSlice) BySource(s string) *Page {\n\tfor _, page := range pages {\n\t\tif page.Source == s {\n\t\t\treturn page\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (pages PageSlice) ByPath(s string) *Page {\n\tfor _, page := range pages {\n\t\tif page.Path == s {\n\t\t\treturn page\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Convert windows path separators to unix style, otherwise rules wouldn't match<commit_after>\/\/ (c) 2012 Alexander Solovyov\n\/\/ under terms of ISC license\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tStateUnknown = iota\n\tStateChanged\n\tStateUnchanged\n\tStateIgnored\n)\n\ntype Page struct {\n\tPageHeader\n\n\tSite *Site `json:\"-\"`\n\tRule *Rule\n\tPattern string\n\tDeps PageSlice `json:\"-\"`\n\n\tSource string\n\tPath string\n\tModTime time.Time\n\n\tprocessed bool\n\tstate int\n\tcontent string\n\twasread bool \/\/ if content was read already\n}\n\ntype PageSlice []*Page\n\nfunc NewPage(site *Site, path string) *Page {\n\tstat, err := os.Stat(path)\n\terrhandle(err)\n\n\trelpath, err := filepath.Rel(site.Source, path)\n\terrhandle(err)\n\n\t\/\/ convert windows path separators to unix style\n\trelpath = strings.Replace(relpath, \"\\\\\", \"\/\", -1)\n\n\tpattern, rule := site.Rules.MatchedRule(relpath)\n\n\tpage := &Page{\n\t\tSite: site,\n\t\tRule: rule,\n\t\tPattern: pattern,\n\t\tSource: relpath,\n\t\tPath: relpath,\n\t\tModTime: stat.ModTime(),\n\t}\n\tpage.peek()\n\tdebug(\"Found page: %s; rule: %v\\n\",\n\t\tpage.Source, page.Rule)\n\treturn page\n}\n\nfunc (page *Page) Content() string {\n\tif !page.wasread {\n\t\tcontent, err := ioutil.ReadFile(page.FullPath())\n\t\terrhandle(err)\n\t\tpage.SetContent(string(content))\n\t\tpage.wasread = true\n\t}\n\treturn page.content\n}\n\nfunc (page *Page) SetContent(content string) {\n\tpage.content = content\n}\n\nfunc (page *Page) FullPath() string {\n\treturn filepath.Join(page.Site.Source, page.Source)\n}\n\nfunc (page *Page) OutputPath() string {\n\treturn filepath.Join(page.Site.Output, page.Path)\n}\n\nfunc (page *Page) Url() string {\n\turl := strings.Replace(page.Path, string(filepath.Separator), \"\/\", -1)\n\tif url == \"index.html\" {\n\t\treturn \"\"\n\t}\n\tif strings.HasSuffix(url, \"\/index.html\") {\n\t\treturn strings.TrimSuffix(url, \"\/index.html\") + \"\/\"\n\t}\n\treturn url\n}\n\nfunc (page *Page) UrlTo(other *Page) string {\n\treturn page.Rel(other.Url())\n}\n\nfunc (page *Page) Rel(path string) string {\n\troot := strings.Repeat(\"..\/\", strings.Count(page.Url(), \"\/\"))\n\tif path[0] == '\/' {\n\t\treturn root + path[1:]\n\t}\n\treturn root + path\n}\n\nfunc (page *Page) Is(path string) bool {\n\treturn page.Url() == path || page.Path == path\n}\n\n\/\/ Peek is used to run those processors which should be done before others can\n\/\/ find out about us. Two actual examples include 'config' and 'rename'\n\/\/ processors right now.\nfunc (page *Page) peek() {\n\tif page.Rule == nil {\n\t\treturn\n\t}\n\n\tfor _, name := range PreProcessors {\n\t\tcmd := page.Rule.MatchedCommand(name)\n\t\tif cmd != nil {\n\t\t\tProcessCommand(page, cmd)\n\t\t}\n\t}\n}\n\nfunc (page *Page) findDeps() {\n\tif page.Rule == nil {\n\t\treturn\n\t}\n\n\tdeps := make(PageSlice, 0)\n\tfor _, other := range page.Site.Pages {\n\t\tif other != page && page.Rule.IsDep(other) {\n\t\t\tdeps = append(deps, other)\n\t\t}\n\t}\n\tpage.Deps = deps\n}\n\nfunc (page *Page) Changed() bool {\n\tif opts.Force {\n\t\treturn true\n\t}\n\n\tif page.state == StateUnknown {\n\t\tpage.state = StateUnchanged\n\t\tdest, err := os.Stat(page.OutputPath())\n\n\t\tif err != nil || dest.ModTime().Before(page.ModTime) {\n\t\t\tpage.state = StateChanged\n\t\t} else {\n\t\t\tfor _, dep := range page.Deps {\n\t\t\t\tif dep.Changed() {\n\t\t\t\t\tpage.state = StateChanged\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn page.state == StateChanged\n}\n\nfunc (page *Page) Process() *Page {\n\tif page.processed || page.Rule == nil {\n\t\treturn page\n\t}\n\n\tpage.processed = true\n\tif page.Rule.Commands != nil {\n\t\tfor _, cmd := range page.Rule.Commands {\n\t\t\tif !cmd.MatchesAny(PreProcessors) {\n\t\t\t\tProcessCommand(page, &cmd)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn page\n}\n\nfunc (page *Page) WriteTo(writer io.Writer) (n int64, err error) {\n\tif page.Rule == nil {\n\t\treturn 0, nil\n\t}\n\n\tif !page.processed {\n\t\tpage.Process()\n\t}\n\n\tnint, err := writer.Write([]byte(page.Content()))\n\treturn int64(nint), err\n}\n\nfunc (page *Page) Render() (n int64, err error) {\n\tif page.Rule == nil {\n\t\treturn CopyFile(page.FullPath(), page.OutputPath())\n\t}\n\n\tfile, err := os.Create(page.OutputPath())\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer file.Close()\n\n\treturn page.WriteTo(file)\n}\n\nfunc (page *Page) UrlMatches(regex string) bool {\n\tre, err := regexp.Compile(regex)\n\tif err != nil {\n\t\terrhandle(fmt.Errorf(\"Incorrect regex given to Page.UrlMatches: '%s' \", regex))\n\t}\n\treturn re.Match([]byte(page.Url()))\n}\n\n\/\/ PageSlice manipulation\n\nfunc (pages PageSlice) Get(i int) *Page { return pages[i] }\nfunc (pages PageSlice) First() *Page { return pages.Get(0) }\nfunc (pages PageSlice) Last() *Page { return pages.Get(len(pages) - 1) }\n\nfunc (pages PageSlice) Prev(cur *Page) *Page {\n\tfor i, page := range pages {\n\t\tif page == cur {\n\t\t\tif i == pages.Len()-1 {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn pages[i+1]\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (pages PageSlice) Next(cur *Page) *Page {\n\tfor i, page := range pages {\n\t\tif page == cur {\n\t\t\tif i == 0 {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn pages[i-1]\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (pages PageSlice) Slice(from int, to int) PageSlice {\n\tlength := len(pages)\n\n\tif from > length {\n\t\tfrom = length\n\t}\n\tif to > length {\n\t\tto = length\n\t}\n\n\treturn pages[from:to]\n}\n\n\/\/ Sorting interface\nfunc (pages PageSlice) Len() int {\n\treturn len(pages)\n}\nfunc (pages PageSlice) Less(i, j int) bool {\n\tleft := pages.Get(i)\n\tright := pages.Get(j)\n\tif left.Date.Unix() == right.Date.Unix() {\n\t\treturn left.ModTime.Unix() < right.ModTime.Unix()\n\t}\n\treturn left.Date.Unix() > right.Date.Unix()\n}\nfunc (pages PageSlice) Swap(i, j int) {\n\tpages[i], pages[j] = pages[j], pages[i]\n}\n\nfunc (pages PageSlice) Sort() {\n\tsort.Sort(pages)\n}\n\nfunc (pages PageSlice) Children(root string) *PageSlice {\n\tchildren := make(PageSlice, 0)\n\n\tfor _, page := range pages {\n\t\tif strings.HasPrefix(page.Url(), root) && page.Url() != root {\n\t\t\tchildren = append(children, page)\n\t\t}\n\t}\n\n\treturn &children\n}\n\nfunc (pages PageSlice) WithTag(tag string) *PageSlice {\n\ttagged := make(PageSlice, 0)\n\n\tfor _, page := range pages {\n\t\tif page.Tags != nil && SliceStringIndexOf(page.Tags, tag) != -1 {\n\t\t\ttagged = append(tagged, page)\n\t\t}\n\t}\n\n\treturn &tagged\n}\n\nfunc (pages PageSlice) HasPage(check func(page *Page) bool) bool {\n\tfor _, page := range pages {\n\t\tif check(page) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (pages PageSlice) BySource(s string) *Page {\n\tfor _, page := range pages {\n\t\tif page.Source == s {\n\t\t\treturn page\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (pages PageSlice) ByPath(s string) *Page {\n\tfor _, page := range pages {\n\t\tif page.Path == s {\n\t\t\treturn page\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package geom\n\n\/\/ PathElementType is an enumeration representing the different kinds of path\n\/\/ elements supported by 2D paths.\ntype PathElementType int\n\nconst (\n\t\/\/ MoveTo is used for path elements that move the path position to a new\n\t\/\/ location.\n\tMoveTo PathElementType = iota\n\n\t\/\/ LineTo is used for path elements that move the path position to a new\n\t\/\/ location while drawin a straight line.\n\tLineTo\n\n\t\/\/ QuandCurveTo is used for path elements that draw a quadriatic cruve.\n\tQuadCurveTo\n\n\t\/\/ CubicCurveTo is used for path elements that draw a cubic curve.\n\tCubicCurveTo\n\n\t\/\/ ClosePath is used for path elements that terminate drawing of a sub-path.\n\tClosePath\n)\n\n\/\/ A PathElement represents a single step in a 2D path, it's composed of a type\n\/\/ that must be one of the constant values of PathElementType, and a list of 2D\n\/\/ points that are interpreted differently based on the element's type.\ntype PathElement struct {\n\tType PathElementType\n\tPoints [3]Point\n}\n\n\/\/ A Path is a drawable representation of an arbitrary shape, it's copmosed of a\n\/\/ list of elements describing each step of the whole drawing operation.\ntype Path struct {\n\tElements []PathElement\n}\n\n\/\/ The Shape interface represents values that can be converted to 2D paths.\ntype Shape interface {\n\n\t\/\/ Returns a 2D path representing the abstract shape that satisifed the\n\t\/\/ interface.\n\t\/\/\n\t\/\/ The returned path shouldn't be retained by the shape or any other part\n\t\/\/ of the program, the caller of that method is considered the owner of the\n\t\/\/ returned value and is free to modify it.\n\tPath() Path\n}\n\n\/\/ MakePath returns a Path value initialized with a slice of elements with the\n\/\/ capacity given as argument.\nfunc MakePath(cap int) Path {\n\treturn Path{\n\t\tElements: make([]PathElement, 0, cap),\n\t}\n}\n\n\/\/ AppendPath concatenates the Path given as second arugment to the one given as\n\/\/ first argument. It behaves in a similar way to the builtin `append` function,\n\/\/ the Path value given as first argument may or may not be modified by the append\n\/\/ operation.\nfunc AppendPath(path Path, other Path) Path {\n\treturn Path{Elements: append(path.Elements, other.Elements...)}\n}\n\n\/\/ AppendRect efficiently append a rectangle to a Path and returns the modified\n\/\/ value.\n\/\/\n\/\/ Calling this function is equivalent to calling:\n\/\/\n\/\/\tpath = AppendPath(path, rect.Path())\n\/\/\n\/\/ but the implementation is more optimized and avoid unnecessary memory\n\/\/ allocations.\nfunc AppendRect(path Path, rect Rect) Path {\n\tx0 := rect.X\n\ty0 := rect.Y\n\tx1 := rect.X + rect.W\n\ty1 := rect.Y + rect.H\n\treturn AppendPolygon(path, Point{x0, y0}, Point{x1, y0}, Point{x1, y1}, Point{x0, y1})\n}\n\n\/\/ AppendPolygon appends an arbitrary polygon to a path and returns the modified\n\/\/ value, the polygon is made of the points given as arguments to the function.\nfunc AppendPolygon(path Path, points ...Point) Path {\n\tif len(points) != 0 {\n\t\tpath.MoveTo(points[0])\n\n\t\tfor _, p := range points[1:] {\n\t\t\tpath.LineTo(p)\n\t\t}\n\n\t\tpath.Close()\n\t}\n\n\treturn path\n}\n\n\/\/ MoveTo appends a path element that moves the current path position to the\n\/\/ point given as argument.\nfunc (path *Path) MoveTo(p Point) {\n\tpath.append(PathElement{\n\t\tType: MoveTo,\n\t\tPoints: [...]Point{p, {}, {}},\n\t})\n}\n\n\/\/ LineTo appends a path element that draws a line from the current path\n\/\/ position to the point given as argument.\nfunc (path *Path) LineTo(p Point) {\n\tpath.ensureStartWithMoveTo()\n\tpath.append(PathElement{\n\t\tType: LineTo,\n\t\tPoints: [...]Point{p, {}, {}},\n\t})\n}\n\n\/\/ QuadCurveTo appends a path element that draws a quadriatic curve from the\n\/\/ current path position to `p` and centered in `c`.\nfunc (path *Path) QuadCurveTo(c Point, p Point) {\n\tpath.ensureStartWithMoveTo()\n\tpath.append(PathElement{\n\t\tType: QuadCurveTo,\n\t\tPoints: [...]Point{c, p, {}},\n\t})\n}\n\n\/\/ QuadCurveTo appends a path element that draws a cubic curve from the current\n\/\/ path position to `p` with centers in `c1` and `c2`.\nfunc (path *Path) CubicCurveTo(c1 Point, c2 Point, p Point) {\n\tpath.ensureStartWithMoveTo()\n\tpath.append(PathElement{\n\t\tType: CubicCurveTo,\n\t\tPoints: [...]Point{c1, c2, p},\n\t})\n}\n\n\/\/ Close appends a path element that closes the current shape by drawing a line\n\/\/ between the current path position and the last move-to element added to the\n\/\/ path.\nfunc (path *Path) Close() {\n\tpath.append(PathElement{\n\t\tType: ClosePath,\n\t})\n}\n\n\/\/ Clear erases every element in the path.\nfunc (path *Path) Clear() {\n\tpath.Elements = path.Elements[:0]\n}\n\n\/\/ Copy creates a copy of the path and returns it, the returned value and the\n\/\/ receiver do not share the same slice of elements and can be safely modified\n\/\/ independently.\nfunc (path *Path) Copy() Path {\n\tif path.Empty() {\n\t\treturn Path{}\n\t}\n\n\tp := Path{\n\t\tElements: make([]PathElement, len(path.Elements)),\n\t}\n\n\tcopy(p.Elements, path.Elements)\n\treturn p\n}\n\n\/\/ LastPoint returns the 2D coordinates of the current path position.\nfunc (path *Path) LastPoint() Point {\n\treturn path.lastPointAt(len(path.Elements) - 1)\n}\n\nfunc (path *Path) lastPointAt(n int) Point {\n\tif n < 0 {\n\t\treturn Point{}\n\t}\n\n\tswitch e := path.Elements[n-1]; e.Type {\n\tcase MoveTo, LineTo:\n\t\treturn e.Points[0]\n\n\tcase QuadCurveTo:\n\t\treturn e.Points[1]\n\n\tcase CubicCurveTo:\n\t\treturn e.Points[2]\n\n\tdefault:\n\t\treturn path.lastPointAt(n - 1)\n\t}\n}\n\n\/\/ Empty checks if the path is empty, which means it has no element.\nfunc (path *Path) Empty() bool {\n\treturn len(path.Elements) == 0\n}\n\n\/\/ Path satisfies the Shape interface by returning a copy of the path it is\n\/\/ called on.\nfunc (path Path) Path() Path {\n\treturn path.Copy()\n}\n\nfunc (path *Path) append(e PathElement) {\n\tpath.Elements = append(path.Elements, e)\n}\n\nfunc (path *Path) ensureStartWithMoveTo() {\n\tif n := len(path.Elements); n == 0 {\n\t\t\/\/ Empty paths must start with a MoveTo element.\n\t\tpath.MoveTo(Point{})\n\n\t} else if path.Elements[n-1].Type == ClosePath {\n\t\t\/\/ When a subpath is closed the path that contains more elements must\n\t\t\/\/ be restarted with a MoveTo as well.\n\t\tpath.MoveTo(path.LastPoint())\n\t}\n}\n<commit_msg>update path documentation<commit_after>package geom\n\n\/\/ PathElementType is an enumeration representing the different kinds of path\n\/\/ elements supported by 2D paths.\ntype PathElementType int\n\nconst (\n\t\/\/ MoveTo is used for path elements that move the path position to a new\n\t\/\/ location.\n\tMoveTo PathElementType = iota\n\n\t\/\/ LineTo is used for path elements that move the path position to a new\n\t\/\/ location while drawin a straight line.\n\tLineTo\n\n\t\/\/ QuadCurveTo is used for path elements that draw a quadriatic cruve.\n\tQuadCurveTo\n\n\t\/\/ CubicCurveTo is used for path elements that draw a cubic curve.\n\tCubicCurveTo\n\n\t\/\/ ClosePath is used for path elements that terminate drawing of a sub-path.\n\tClosePath\n)\n\n\/\/ A PathElement represents a single step in a 2D path, it's composed of a type\n\/\/ that must be one of the constant values of PathElementType, and a list of 2D\n\/\/ points that are interpreted differently based on the element's type.\ntype PathElement struct {\n\tType PathElementType\n\tPoints [3]Point\n}\n\n\/\/ A Path is a drawable representation of an arbitrary shape, it's copmosed of a\n\/\/ list of elements describing each step of the whole drawing operation.\ntype Path struct {\n\tElements []PathElement\n}\n\n\/\/ The Shape interface represents values that can be converted to 2D paths.\ntype Shape interface {\n\n\t\/\/ Returns a 2D path representing the abstract shape that satisifed the\n\t\/\/ interface.\n\t\/\/\n\t\/\/ The returned path shouldn't be retained by the shape or any other part\n\t\/\/ of the program, the caller of that method is considered the owner of the\n\t\/\/ returned value and is free to modify it.\n\tPath() Path\n}\n\n\/\/ MakePath returns a Path value initialized with a slice of elements with the\n\/\/ capacity given as argument.\nfunc MakePath(cap int) Path {\n\treturn Path{\n\t\tElements: make([]PathElement, 0, cap),\n\t}\n}\n\n\/\/ AppendPath concatenates the Path given as second arugment to the one given as\n\/\/ first argument. It behaves in a similar way to the builtin `append` function,\n\/\/ the Path value given as first argument may or may not be modified by the append\n\/\/ operation.\nfunc AppendPath(path Path, other Path) Path {\n\treturn Path{Elements: append(path.Elements, other.Elements...)}\n}\n\n\/\/ AppendRect efficiently append a rectangle to a Path and returns the modified\n\/\/ value.\n\/\/\n\/\/ Calling this function is equivalent to calling:\n\/\/\n\/\/\tpath = AppendPath(path, rect.Path())\n\/\/\n\/\/ but the implementation is more optimized and avoid unnecessary memory\n\/\/ allocations.\nfunc AppendRect(path Path, rect Rect) Path {\n\tx0 := rect.X\n\ty0 := rect.Y\n\tx1 := rect.X + rect.W\n\ty1 := rect.Y + rect.H\n\treturn AppendPolygon(path, Point{x0, y0}, Point{x1, y0}, Point{x1, y1}, Point{x0, y1})\n}\n\n\/\/ AppendPolygon appends an arbitrary polygon to a path and returns the modified\n\/\/ value, the polygon is made of the points given as arguments to the function.\nfunc AppendPolygon(path Path, points ...Point) Path {\n\tif len(points) != 0 {\n\t\tpath.MoveTo(points[0])\n\n\t\tfor _, p := range points[1:] {\n\t\t\tpath.LineTo(p)\n\t\t}\n\n\t\tpath.Close()\n\t}\n\n\treturn path\n}\n\n\/\/ MoveTo appends a path element that moves the current path position to the\n\/\/ point given as argument.\nfunc (path *Path) MoveTo(p Point) {\n\tpath.append(PathElement{\n\t\tType: MoveTo,\n\t\tPoints: [...]Point{p, {}, {}},\n\t})\n}\n\n\/\/ LineTo appends a path element that draws a line from the current path\n\/\/ position to the point given as argument.\nfunc (path *Path) LineTo(p Point) {\n\tpath.ensureStartWithMoveTo()\n\tpath.append(PathElement{\n\t\tType: LineTo,\n\t\tPoints: [...]Point{p, {}, {}},\n\t})\n}\n\n\/\/ QuadCurveTo appends a path element that draws a quadriatic curve from the\n\/\/ current path position to `p` and centered in `c`.\nfunc (path *Path) QuadCurveTo(c Point, p Point) {\n\tpath.ensureStartWithMoveTo()\n\tpath.append(PathElement{\n\t\tType: QuadCurveTo,\n\t\tPoints: [...]Point{c, p, {}},\n\t})\n}\n\n\/\/ CubicCurveTo appends a path element that draws a cubic curve from the current\n\/\/ path position to `p` with centers in `c1` and `c2`.\nfunc (path *Path) CubicCurveTo(c1 Point, c2 Point, p Point) {\n\tpath.ensureStartWithMoveTo()\n\tpath.append(PathElement{\n\t\tType: CubicCurveTo,\n\t\tPoints: [...]Point{c1, c2, p},\n\t})\n}\n\n\/\/ Close appends a path element that closes the current shape by drawing a line\n\/\/ between the current path position and the last move-to element added to the\n\/\/ path.\nfunc (path *Path) Close() {\n\tpath.append(PathElement{\n\t\tType: ClosePath,\n\t})\n}\n\n\/\/ Clear erases every element in the path.\nfunc (path *Path) Clear() {\n\tpath.Elements = path.Elements[:0]\n}\n\n\/\/ Copy creates a copy of the path and returns it, the returned value and the\n\/\/ receiver do not share the same slice of elements and can be safely modified\n\/\/ independently.\nfunc (path *Path) Copy() Path {\n\tif path.Empty() {\n\t\treturn Path{}\n\t}\n\n\tp := Path{\n\t\tElements: make([]PathElement, len(path.Elements)),\n\t}\n\n\tcopy(p.Elements, path.Elements)\n\treturn p\n}\n\n\/\/ LastPoint returns the 2D coordinates of the current path position.\nfunc (path *Path) LastPoint() Point {\n\treturn path.lastPointAt(len(path.Elements) - 1)\n}\n\nfunc (path *Path) lastPointAt(n int) Point {\n\tif n < 0 {\n\t\treturn Point{}\n\t}\n\n\tswitch e := path.Elements[n-1]; e.Type {\n\tcase MoveTo, LineTo:\n\t\treturn e.Points[0]\n\n\tcase QuadCurveTo:\n\t\treturn e.Points[1]\n\n\tcase CubicCurveTo:\n\t\treturn e.Points[2]\n\n\tdefault:\n\t\treturn path.lastPointAt(n - 1)\n\t}\n}\n\n\/\/ Empty checks if the path is empty, which means it has no element.\nfunc (path *Path) Empty() bool {\n\treturn len(path.Elements) == 0\n}\n\n\/\/ Path satisfies the Shape interface by returning a copy of the path it is\n\/\/ called on.\nfunc (path Path) Path() Path {\n\treturn path.Copy()\n}\n\nfunc (path *Path) append(e PathElement) {\n\tpath.Elements = append(path.Elements, e)\n}\n\nfunc (path *Path) ensureStartWithMoveTo() {\n\tif n := len(path.Elements); n == 0 {\n\t\t\/\/ Empty paths must start with a MoveTo element.\n\t\tpath.MoveTo(Point{})\n\n\t} else if path.Elements[n-1].Type == ClosePath {\n\t\t\/\/ When a subpath is closed the path that contains more elements must\n\t\t\/\/ be restarted with a MoveTo as well.\n\t\tpath.MoveTo(path.LastPoint())\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package fweight\n\nimport (\n\t\"net\/http\"\n\t\"strings\"\n)\n\nvar _ PathRouter = new(Path)\nvar _ Router = new(Path)\n\n\/\/Func PathIsEmpty accounts for the fact that paths like\n\/\/ a\/b\/\/\/c or a\/b\/\/ can exist, which would result in final\n\/\/strings of \/\/ or \/\/\/. These are assumed to be the same as\n\/\/their single-slashed counterparts.\nfunc PathEmpty(path string) bool {\n\tif path == \"\" || strings.TrimLeft(path, \"\/\") == \"\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/Type Path is a Router which routes by URL path. Files or directories\n\/\/with empty names are not allowed. The empty name routes to the terminal\n\/\/Router, the one used if the path stops here.\n\/\/\n\/\/It should be noted that when RouteHTTP is called\n\/\/the PathRouter is followed to completion from the\n\/\/start to end of the URL, thus using two routers separately\n\/\/(i.e. separated by a different Router in the routing tree)\n\/\/causes two separate and independant operations on the path;\n\/\/a Path that routes a\/b followed by one that routes a\/b\/c does\n\/\/not result in a route of a\/b\/a\/c, instead resulting in a route of\n\/\/just a\/b.\ntype Path map[string]Router\n\nfunc (p Path) self() Path {\n\tif p == nil {\n\t\tp = make(Path)\n\t}\n\treturn p\n}\n\n\/\/Function AddChild adds a child PathRouter which is used\n\/\/as the next name in the path.\n\/\/\n\/\/A non PathRouter child will cause the Router to\n\/\/be returned to the caller of RouteHTTP, causing the\n\/\/path to terminate there if it is a Handler.\nfunc (p Path) AddChild(pR Router, name string) Path {\n\tp[name] = pR\n\treturn p\n}\n\n\/\/Sets the Handler that is used when the path terminates here.\n\/\/This is the same as AddChild(r, \"\"); the empty string routes\n\/\/to here.\nfunc (p Path) Handler(r Router) Path {\n\treturn p.AddChild(r, \"\")\n}\n\nfunc isPathRouter(r Router) (b bool) {\n\t_, b = r.(PathRouter)\n\treturn\n}\n\n\/\/RouteHTTP traverses the tree of Paths until the end of the URL path\n\/\/is encountered, returning the terminal router or nil.\nfunc (p Path) RouteHTTP(rq *http.Request) Router {\n\tremaining := strings.Trim(rq.URL.Path, \"\/\")\n\tvar pRouter Router\n\tfor pRouter, remaining = p.Child(rq.URL.Path); isPathRouter(pRouter); pRouter, remaining = pRouter.(PathRouter).Child(remaining) {\n\t}\n\treturn pRouter\n}\n\n\/\/Function Child returns the next Router associated with the next\n\/\/'hop' in the path.\nfunc (p Path) Child(subpath string) (n Router, remainingSubpath string) {\n\t\/\/If strings.SplitN(subpath, \"\/\", 3)'s length is two, then this\n\t\/\/is the only item left in the path, and thus we must terminate here.\n\tif subpath == \"\" || subpath == \"\/\" {\n\t\treturn p[\"\"], \"\"\n\t}\n\n\t\/\/First, check if we have bound a handler for this whole\n\t\/\/subpath (a\/b\/c)\n\tif pR, ok := p[subpath]; ok {\n\t\treturn pR, \"\"\n\t}\n\n\t\/\/Check if the next node is present\n\tsplt := strings.SplitN(subpath, \"\/\", 3)\n\tif pR, ok := p[splt[0]]; ok {\n\t\treturn pR, splt[1]\n\t}\n\n\t\/\/Check if we have a route that begins with the subpath\n\tfor path, pR := range p {\n\t\tif strings.HasPrefix(subpath, path) {\n\t\t\treturn pR, strings.TrimLeft(subpath, path)\n\t\t}\n\t}\n\n\t\/\/Not Found.\n\treturn nil, subpath\n}\n<commit_msg>Path bugfix<commit_after>package fweight\n\nimport (\n\t\"net\/http\"\n\t\"strings\"\n)\n\nvar _ PathRouter = new(Path)\nvar _ Router = new(Path)\n\n\/\/Func PathIsEmpty accounts for the fact that paths like\n\/\/ a\/b\/\/\/c or a\/b\/\/ can exist, which would result in final\n\/\/strings of \/\/ or \/\/\/. These are assumed to be the same as\n\/\/their single-slashed counterparts.\nfunc PathEmpty(path string) bool {\n\tif path == \"\" || strings.TrimLeft(path, \"\/\") == \"\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/Type Path is a Router which routes by URL path. Files or directories\n\/\/with empty names are not allowed. The empty name routes to the terminal\n\/\/Router, the one used if the path stops here.\n\/\/\n\/\/It should be noted that when RouteHTTP is called\n\/\/the PathRouter is followed to completion from the\n\/\/start to end of the URL, thus using two routers separately\n\/\/(i.e. separated by a different Router in the routing tree)\n\/\/causes two separate and independant operations on the path;\n\/\/a Path that routes a\/b followed by one that routes a\/b\/c does\n\/\/not result in a route of a\/b\/a\/c, instead resulting in a route of\n\/\/just a\/b.\ntype Path map[string]Router\n\nfunc (p Path) self() Path {\n\tif p == nil {\n\t\tp = make(Path)\n\t}\n\treturn p\n}\n\n\/\/Function AddChild adds a child PathRouter which is used\n\/\/as the next name in the path.\n\/\/\n\/\/A non PathRouter child will cause the Router to\n\/\/be returned to the caller of RouteHTTP, causing the\n\/\/path to terminate there if it is a Handler.\nfunc (p Path) AddChild(pR Router, name string) Path {\n\tp[name] = pR\n\treturn p\n}\n\n\/\/Sets the Handler that is used when the path terminates here.\n\/\/This is the same as AddChild(r, \"\"); the empty string routes\n\/\/to here.\nfunc (p Path) Handler(r Router) Path {\n\treturn p.AddChild(r, \"\")\n}\n\nfunc isPathRouter(r Router) (b bool) {\n\t_, b = r.(PathRouter)\n\treturn\n}\n\n\/\/RouteHTTP traverses the tree of Paths until the end of the URL path\n\/\/is encountered, returning the terminal router or nil.\nfunc (p Path) RouteHTTP(rq *http.Request) Router {\n\tremaining := strings.Trim(rq.URL.Path, \"\/\")\n\tvar pRouter Router\n\tfor pRouter, remaining = p.Child(rq.URL.Path); isPathRouter(pRouter); pRouter, remaining = pRouter.(PathRouter).Child(remaining) {\n\t}\n\treturn pRouter\n}\n\n\/\/Function Child returns the next Router associated with the next\n\/\/'hop' in the path.\nfunc (p Path) Child(subpath string) (n Router, remainingSubpath string) {\n\t\/\/If strings.SplitN(subpath, \"\/\", 3)'s length is two, then this\n\t\/\/is the only item left in the path, and thus we must terminate here.\n\tif subpath == \"\" || subpath == \"\/\" {\n\t\treturn p[\"\"], \"\"\n\t}\n\n\t\/\/First, check if we have bound a handler for this whole\n\t\/\/subpath (a\/b\/c)\n\tif pR, ok := p[subpath]; ok {\n\t\treturn pR, \"\"\n\t}\n\n\t\/\/Check if the next node is present\n\tsplt := strings.SplitN(subpath, \"\/\", 3)\n\tif pR, ok := p[splt[1]]; ok {\n\t\treturn pR, splt[1]\n\t}\n\n\t\/\/Check if we have a route that begins with the subpath\n\tfor path, pR := range p {\n\t\tif path != \"\" && strings.HasPrefix(subpath, path) {\n\t\t\treturn pR, strings.TrimLeft(subpath, path)\n\t\t}\n\t}\n\n\t\/\/Not Found.\n\treturn nil, subpath\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 - The TXTDirect Authors\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage txtdirect\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar PathRegex = regexp.MustCompile(\"\\\\\/([A-Za-z0-9-._~!$'()*+,;=:@]+)\")\nvar FromRegex = regexp.MustCompile(\"\\\\\/\\\\$(\\\\d+)\")\nvar GroupRegex = regexp.MustCompile(\"P<[a-zA-Z]+[a-zA-Z0-9]*>\")\nvar GroupOrderRegex = regexp.MustCompile(\"P<([a-zA-Z]+[a-zA-Z0-9]*)>\")\n\n\/\/ zoneFromPath generates a DNS zone with the given host and path\n\/\/ It will use custom regex to parse the path if it's provided in\n\/\/ the given record.\nfunc zoneFromPath(r *http.Request, rec record) (string, int, []string, error) {\n\tpath := fmt.Sprintf(\"%s?%s\", r.URL.Path, r.URL.RawQuery)\n\n\tif strings.ContainsAny(path, \".\") {\n\t\tpath = strings.Replace(path, \".\", \"-\", -1)\n\t}\n\tpathSubmatchs := PathRegex.FindAllStringSubmatch(path, -1)\n\tif rec.Re != \"\" {\n\t\tCustomRegex, err := regexp.Compile(rec.Re)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"<%s> [txtdirect]: the given regex doesn't work as expected: %s\", time.Now().String(), rec.Re)\n\t\t}\n\t\tpathSubmatchs = CustomRegex.FindAllStringSubmatch(path, -1)\n\t\tif GroupRegex.MatchString(rec.Re) {\n\t\t\tpathSlice := []string{}\n\t\t\tunordered := make(map[string]string)\n\t\t\tfor _, item := range pathSubmatchs[0] {\n\t\t\t\tpathSlice = append(pathSlice, item)\n\t\t\t}\n\t\t\torder := GroupOrderRegex.FindAllStringSubmatch(rec.Re, -1)\n\t\t\tfor i, group := range order {\n\t\t\t\tunordered[group[1]] = pathSlice[i+1]\n\t\t\t}\n\t\t\turl := sortMap(unordered)\n\t\t\t*r = *r.WithContext(context.WithValue(r.Context(), \"regexMatches\", unordered))\n\t\t\treverse(url)\n\t\t\tfrom := len(pathSlice)\n\t\t\turl = append(url, r.Host)\n\t\t\turl = append([]string{basezone}, url...)\n\t\t\treturn strings.Join(url, \".\"), from, pathSlice, nil\n\t\t}\n\t}\n\tpathSlice := []string{}\n\tfor _, v := range pathSubmatchs {\n\t\tpathSlice = append(pathSlice, v[1])\n\t}\n\t*r = *r.WithContext(context.WithValue(r.Context(), \"regexMatches\", pathSlice))\n\tif len(pathSlice) < 1 && rec.Re != \"\" {\n\t\tlog.Printf(\"<%s> [txtdirect]: custom regex doesn't work on %s\", time.Now().String(), path)\n\t}\n\tfrom := len(pathSlice)\n\tif rec.From != \"\" {\n\t\tfromSubmatch := FromRegex.FindAllStringSubmatch(rec.From, -1)\n\t\tif len(fromSubmatch) != len(pathSlice) {\n\t\t\treturn \"\", 0, []string{}, fmt.Errorf(\"length of path doesn't match with length of from= in record\")\n\t\t}\n\t\tfromSlice := make(map[int]string)\n\t\tfor k, v := range fromSubmatch {\n\t\t\tindex, _ := strconv.Atoi(v[1])\n\t\t\tfromSlice[index] = pathSlice[k]\n\t\t}\n\n\t\tkeys := []int{}\n\t\tfor k := range fromSlice {\n\t\t\tkeys = append(keys, k)\n\t\t}\n\t\tif len(keys) != len(pathSlice) {\n\t\t\treturn \"\", 0, []string{}, fmt.Errorf(\"length of path doesn't match with length of from= in record\")\n\t\t}\n\t\tgeneratedPath := []string{}\n\n\t\tsort.Sort(sort.Reverse(sort.IntSlice(keys)))\n\n\t\tfor _, k := range keys {\n\t\t\tgeneratedPath = append(generatedPath, fromSlice[k])\n\t\t}\n\n\t\turl := append(generatedPath, r.Host)\n\t\turl = append([]string{basezone}, url...)\n\t\treturn strings.Join(url, \".\"), from, pathSlice, nil\n\t}\n\tps := pathSlice\n\treverse(pathSlice)\n\turl := append(pathSlice, r.Host)\n\turl = append([]string{basezone}, url...)\n\treturn strings.Join(url, \".\"), from, ps, nil\n}\n\n\/\/ getFinalRecord finds the final TXT record for the given zone.\n\/\/ It will try wildcards if the first zone return error\nfunc getFinalRecord(zone string, from int, c Config, w http.ResponseWriter, r *http.Request, pathSlice []string) (record, error) {\n\ttxts, err := query(zone, r.Context(), c)\n\tif err != nil {\n\t\t\/\/ if nothing found, jump into wildcards\n\t\tfor i := 1; i <= from && len(txts) == 0; i++ {\n\t\t\tzoneSlice := strings.Split(zone, \".\")\n\t\t\tzoneSlice[i] = \"_\"\n\t\t\tzone = strings.Join(zoneSlice, \".\")\n\t\t\ttxts, err = query(zone, r.Context(), c)\n\t\t}\n\t}\n\tif err != nil || len(txts) == 0 {\n\t\treturn record{}, fmt.Errorf(\"could not get TXT record: %s\", err)\n\t}\n\n\ttxts[0], err = parsePlaceholders(txts[0], r, pathSlice)\n\trec := record{}\n\tif err = rec.Parse(txts[0], w, r, c); err != nil {\n\t\treturn rec, fmt.Errorf(\"could not parse record: %s\", err)\n\t}\n\n\tif rec.Type == \"path\" {\n\t\treturn rec, fmt.Errorf(\"chaining path is not currently supported\")\n\t}\n\n\treturn rec, nil\n}\n\n\/\/ reverse reverses the order of the array\nfunc reverse(input []string) {\n\tlast := len(input) - 1\n\tfor i := 0; i < len(input)\/2; i++ {\n\t\tinput[i], input[last-i] = input[last-i], input[i]\n\t}\n}\n\nfunc sortMap(m map[string]string) []string {\n\tvar keys []string\n\tvar result []string\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tfor _, k := range keys {\n\t\tresult = append(result, m[k])\n\t}\n\treturn result\n}\n<commit_msg>(path): Update comment<commit_after>\/*\nCopyright 2019 - The TXTDirect Authors\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage txtdirect\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar PathRegex = regexp.MustCompile(\"\\\\\/([A-Za-z0-9-._~!$'()*+,;=:@]+)\")\nvar FromRegex = regexp.MustCompile(\"\\\\\/\\\\$(\\\\d+)\")\nvar GroupRegex = regexp.MustCompile(\"P<[a-zA-Z]+[a-zA-Z0-9]*>\")\nvar GroupOrderRegex = regexp.MustCompile(\"P<([a-zA-Z]+[a-zA-Z0-9]*)>\")\n\n\/\/ zoneFromPath generates a DNS zone with the given request's path and host\n\/\/ It will use custom regex to parse the path if it's provided in\n\/\/ the given record.\nfunc zoneFromPath(r *http.Request, rec record) (string, int, []string, error) {\n\tpath := fmt.Sprintf(\"%s?%s\", r.URL.Path, r.URL.RawQuery)\n\n\tif strings.ContainsAny(path, \".\") {\n\t\tpath = strings.Replace(path, \".\", \"-\", -1)\n\t}\n\tpathSubmatchs := PathRegex.FindAllStringSubmatch(path, -1)\n\tif rec.Re != \"\" {\n\t\tCustomRegex, err := regexp.Compile(rec.Re)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"<%s> [txtdirect]: the given regex doesn't work as expected: %s\", time.Now().String(), rec.Re)\n\t\t}\n\t\tpathSubmatchs = CustomRegex.FindAllStringSubmatch(path, -1)\n\t\tif GroupRegex.MatchString(rec.Re) {\n\t\t\tpathSlice := []string{}\n\t\t\tunordered := make(map[string]string)\n\t\t\tfor _, item := range pathSubmatchs[0] {\n\t\t\t\tpathSlice = append(pathSlice, item)\n\t\t\t}\n\t\t\torder := GroupOrderRegex.FindAllStringSubmatch(rec.Re, -1)\n\t\t\tfor i, group := range order {\n\t\t\t\tunordered[group[1]] = pathSlice[i+1]\n\t\t\t}\n\t\t\turl := sortMap(unordered)\n\t\t\t*r = *r.WithContext(context.WithValue(r.Context(), \"regexMatches\", unordered))\n\t\t\treverse(url)\n\t\t\tfrom := len(pathSlice)\n\t\t\turl = append(url, r.Host)\n\t\t\turl = append([]string{basezone}, url...)\n\t\t\treturn strings.Join(url, \".\"), from, pathSlice, nil\n\t\t}\n\t}\n\tpathSlice := []string{}\n\tfor _, v := range pathSubmatchs {\n\t\tpathSlice = append(pathSlice, v[1])\n\t}\n\t*r = *r.WithContext(context.WithValue(r.Context(), \"regexMatches\", pathSlice))\n\tif len(pathSlice) < 1 && rec.Re != \"\" {\n\t\tlog.Printf(\"<%s> [txtdirect]: custom regex doesn't work on %s\", time.Now().String(), path)\n\t}\n\tfrom := len(pathSlice)\n\tif rec.From != \"\" {\n\t\tfromSubmatch := FromRegex.FindAllStringSubmatch(rec.From, -1)\n\t\tif len(fromSubmatch) != len(pathSlice) {\n\t\t\treturn \"\", 0, []string{}, fmt.Errorf(\"length of path doesn't match with length of from= in record\")\n\t\t}\n\t\tfromSlice := make(map[int]string)\n\t\tfor k, v := range fromSubmatch {\n\t\t\tindex, _ := strconv.Atoi(v[1])\n\t\t\tfromSlice[index] = pathSlice[k]\n\t\t}\n\n\t\tkeys := []int{}\n\t\tfor k := range fromSlice {\n\t\t\tkeys = append(keys, k)\n\t\t}\n\t\tif len(keys) != len(pathSlice) {\n\t\t\treturn \"\", 0, []string{}, fmt.Errorf(\"length of path doesn't match with length of from= in record\")\n\t\t}\n\t\tgeneratedPath := []string{}\n\n\t\tsort.Sort(sort.Reverse(sort.IntSlice(keys)))\n\n\t\tfor _, k := range keys {\n\t\t\tgeneratedPath = append(generatedPath, fromSlice[k])\n\t\t}\n\n\t\turl := append(generatedPath, r.Host)\n\t\turl = append([]string{basezone}, url...)\n\t\treturn strings.Join(url, \".\"), from, pathSlice, nil\n\t}\n\tps := pathSlice\n\treverse(pathSlice)\n\turl := append(pathSlice, r.Host)\n\turl = append([]string{basezone}, url...)\n\treturn strings.Join(url, \".\"), from, ps, nil\n}\n\n\/\/ getFinalRecord finds the final TXT record for the given zone.\n\/\/ It will try wildcards if the first zone return error\nfunc getFinalRecord(zone string, from int, c Config, w http.ResponseWriter, r *http.Request, pathSlice []string) (record, error) {\n\ttxts, err := query(zone, r.Context(), c)\n\tif err != nil {\n\t\t\/\/ if nothing found, jump into wildcards\n\t\tfor i := 1; i <= from && len(txts) == 0; i++ {\n\t\t\tzoneSlice := strings.Split(zone, \".\")\n\t\t\tzoneSlice[i] = \"_\"\n\t\t\tzone = strings.Join(zoneSlice, \".\")\n\t\t\ttxts, err = query(zone, r.Context(), c)\n\t\t}\n\t}\n\tif err != nil || len(txts) == 0 {\n\t\treturn record{}, fmt.Errorf(\"could not get TXT record: %s\", err)\n\t}\n\n\ttxts[0], err = parsePlaceholders(txts[0], r, pathSlice)\n\trec := record{}\n\tif err = rec.Parse(txts[0], w, r, c); err != nil {\n\t\treturn rec, fmt.Errorf(\"could not parse record: %s\", err)\n\t}\n\n\tif rec.Type == \"path\" {\n\t\treturn rec, fmt.Errorf(\"chaining path is not currently supported\")\n\t}\n\n\treturn rec, nil\n}\n\n\/\/ reverse reverses the order of the array\nfunc reverse(input []string) {\n\tlast := len(input) - 1\n\tfor i := 0; i < len(input)\/2; i++ {\n\t\tinput[i], input[last-i] = input[last-i], input[i]\n\t}\n}\n\nfunc sortMap(m map[string]string) []string {\n\tvar keys []string\n\tvar result []string\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tfor _, k := range keys {\n\t\tresult = append(result, m[k])\n\t}\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>package pget\n\nimport (\n\t\"bufio\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"github.com\/asaskevich\/govalidator\"\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\tversion = \"0.0.4\"\n\tmsg = \"Pget v\" + version + \", parallel file download client\\n\"\n)\n\n\/\/ Pget structs\ntype Pget struct {\n\tTrace bool\n\tUtils\n\tTargetDir string\n\tProcs int\n\tURLs []string\n\tTargetURLs []string\n\targs []string\n\ttimeout int\n\tuseragent string\n}\n\ntype ignore struct {\n\terr error\n}\n\ntype cause interface {\n\tCause() error\n}\n\n\/\/ New for pget package\nfunc New() *Pget {\n\treturn &Pget{\n\t\tTrace: false,\n\t\tUtils: &Data{},\n\t\tProcs: runtime.NumCPU(), \/\/ default\n\t\ttimeout: 10,\n\t}\n}\n\n\/\/ ErrTop get important message from wrapped error message\nfunc (pget Pget) ErrTop(err error) error {\n\tfor e := err; e != nil; {\n\t\tswitch e.(type) {\n\t\tcase ignore:\n\t\t\treturn nil\n\t\tcase cause:\n\t\t\te = e.(cause).Cause()\n\t\tdefault:\n\t\t\treturn e\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Run execute methods in pget package\nfunc (pget *Pget) Run() error {\n\tif err := pget.Ready(); err != nil {\n\t\treturn pget.ErrTop(err)\n\t}\n\n\tif err := pget.Checking(); err != nil {\n\t\treturn errors.Wrap(err, \"failed to check header\")\n\t}\n\n\tif err := pget.Download(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := pget.Utils.BindwithFiles(pget.Procs); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Ready method define the variables required to Download.\nfunc (pget *Pget) Ready() error {\n\tif procs := os.Getenv(\"GOMAXPROCS\"); procs == \"\" {\n\t\truntime.GOMAXPROCS(pget.Procs)\n\t}\n\n\tvar opts Options\n\tif err := pget.parseOptions(&opts, os.Args[1:]); err != nil {\n\t\treturn errors.Wrap(err, \"failed to parse command line args\")\n\t}\n\n\tif opts.Trace {\n\t\tpget.Trace = opts.Trace\n\t}\n\n\tif opts.Procs > 2 {\n\t\tpget.Procs = opts.Procs\n\t}\n\n\tif opts.Timeout > 0 {\n\t\tpget.timeout = opts.Timeout\n\t}\n\n\tif err := pget.parseURLs(); err != nil {\n\t\treturn errors.Wrap(err, \"failed to parse of url\")\n\t}\n\n\tif opts.UserAgent != \"\" {\n\t\tpget.useragent = opts.UserAgent\n\t}\n\n\tif opts.TargetDir != \"\" {\n\t\tinfo, err := os.Stat(opts.TargetDir)\n\t\tif err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\treturn errors.Wrap(err, \"target dir is invalid\")\n\t\t\t}\n\n\t\t\tif err := os.MkdirAll(opts.TargetDir, 0755); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"failed to create diretory at %s\", opts.TargetDir)\n\t\t\t}\n\n\t\t} else if !info.IsDir() {\n\t\t\treturn errors.New(\"target dir is not a valid directory\")\n\t\t}\n\t}\n\topts.TargetDir = strings.TrimSuffix(opts.TargetDir, \"\/\")\n\tpget.TargetDir = opts.TargetDir\n\n\treturn nil\n}\n\nfunc (pget Pget) makeIgnoreErr() ignore {\n\treturn ignore{\n\t\terr: errors.New(\"this is ignore message\"),\n\t}\n}\n\n\/\/ Error for options: version, usage\nfunc (i ignore) Error() string {\n\treturn i.err.Error()\n}\n\nfunc (i ignore) Cause() error {\n\treturn i.err\n}\n\nfunc (pget *Pget) parseOptions(opts *Options, argv []string) error {\n\n\tif len(argv) == 0 {\n\t\tos.Stdout.Write(opts.usage())\n\t\treturn pget.makeIgnoreErr()\n\t}\n\n\to, err := opts.parse(argv)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to parse command line options\")\n\t}\n\n\tif opts.Help {\n\t\tos.Stdout.Write(opts.usage())\n\t\treturn pget.makeIgnoreErr()\n\t}\n\n\tif opts.Version {\n\t\tos.Stdout.Write([]byte(msg))\n\t\treturn pget.makeIgnoreErr()\n\t}\n\n\tif opts.Update {\n\t\tresult, err := opts.isupdate()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to parse command line options\")\n\t\t}\n\n\t\tos.Stdout.Write(result)\n\t\treturn pget.makeIgnoreErr()\n\t}\n\n\tpget.args = o\n\n\treturn nil\n}\n\nfunc (pget *Pget) parseURLs() error {\n\n\t\/\/ find url in args\n\tfor _, argv := range pget.args {\n\t\tif govalidator.IsURL(argv) {\n\t\t\tpget.URLs = append(pget.URLs, argv)\n\t\t}\n\t}\n\n\tif len(pget.URLs) < 1 {\n\t\tscanner := bufio.NewScanner(os.Stdin)\n\t\tfor scanner.Scan() {\n\t\t\tscan := scanner.Text()\n\t\t\turls := strings.Split(scan, \" \")\n\t\t\tfor _, url := range urls {\n\t\t\t\tif govalidator.IsURL(url) {\n\t\t\t\t\tpget.URLs = append(pget.URLs, url)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif err := scanner.Err(); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to parse url from stdin\")\n\t\t}\n\n\t\tif len(pget.URLs) < 1 {\n\t\t\treturn errors.New(\"urls not found in the arguments passed\")\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>Added input messages for stdin<commit_after>package pget\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"github.com\/asaskevich\/govalidator\"\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\tversion = \"0.0.4\"\n\tmsg = \"Pget v\" + version + \", parallel file download client\\n\"\n)\n\n\/\/ Pget structs\ntype Pget struct {\n\tTrace bool\n\tUtils\n\tTargetDir string\n\tProcs int\n\tURLs []string\n\tTargetURLs []string\n\targs []string\n\ttimeout int\n\tuseragent string\n}\n\ntype ignore struct {\n\terr error\n}\n\ntype cause interface {\n\tCause() error\n}\n\n\/\/ New for pget package\nfunc New() *Pget {\n\treturn &Pget{\n\t\tTrace: false,\n\t\tUtils: &Data{},\n\t\tProcs: runtime.NumCPU(), \/\/ default\n\t\ttimeout: 10,\n\t}\n}\n\n\/\/ ErrTop get important message from wrapped error message\nfunc (pget Pget) ErrTop(err error) error {\n\tfor e := err; e != nil; {\n\t\tswitch e.(type) {\n\t\tcase ignore:\n\t\t\treturn nil\n\t\tcase cause:\n\t\t\te = e.(cause).Cause()\n\t\tdefault:\n\t\t\treturn e\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Run execute methods in pget package\nfunc (pget *Pget) Run() error {\n\tif err := pget.Ready(); err != nil {\n\t\treturn pget.ErrTop(err)\n\t}\n\n\tif err := pget.Checking(); err != nil {\n\t\treturn errors.Wrap(err, \"failed to check header\")\n\t}\n\n\tif err := pget.Download(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := pget.Utils.BindwithFiles(pget.Procs); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Ready method define the variables required to Download.\nfunc (pget *Pget) Ready() error {\n\tif procs := os.Getenv(\"GOMAXPROCS\"); procs == \"\" {\n\t\truntime.GOMAXPROCS(pget.Procs)\n\t}\n\n\tvar opts Options\n\tif err := pget.parseOptions(&opts, os.Args[1:]); err != nil {\n\t\treturn errors.Wrap(err, \"failed to parse command line args\")\n\t}\n\n\tif opts.Trace {\n\t\tpget.Trace = opts.Trace\n\t}\n\n\tif opts.Procs > 2 {\n\t\tpget.Procs = opts.Procs\n\t}\n\n\tif opts.Timeout > 0 {\n\t\tpget.timeout = opts.Timeout\n\t}\n\n\tif err := pget.parseURLs(); err != nil {\n\t\treturn errors.Wrap(err, \"failed to parse of url\")\n\t}\n\n\tif opts.UserAgent != \"\" {\n\t\tpget.useragent = opts.UserAgent\n\t}\n\n\tif opts.TargetDir != \"\" {\n\t\tinfo, err := os.Stat(opts.TargetDir)\n\t\tif err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\treturn errors.Wrap(err, \"target dir is invalid\")\n\t\t\t}\n\n\t\t\tif err := os.MkdirAll(opts.TargetDir, 0755); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"failed to create diretory at %s\", opts.TargetDir)\n\t\t\t}\n\n\t\t} else if !info.IsDir() {\n\t\t\treturn errors.New(\"target dir is not a valid directory\")\n\t\t}\n\t}\n\topts.TargetDir = strings.TrimSuffix(opts.TargetDir, \"\/\")\n\tpget.TargetDir = opts.TargetDir\n\n\treturn nil\n}\n\nfunc (pget Pget) makeIgnoreErr() ignore {\n\treturn ignore{\n\t\terr: errors.New(\"this is ignore message\"),\n\t}\n}\n\n\/\/ Error for options: version, usage\nfunc (i ignore) Error() string {\n\treturn i.err.Error()\n}\n\nfunc (i ignore) Cause() error {\n\treturn i.err\n}\n\nfunc (pget *Pget) parseOptions(opts *Options, argv []string) error {\n\n\tif len(argv) == 0 {\n\t\tos.Stdout.Write(opts.usage())\n\t\treturn pget.makeIgnoreErr()\n\t}\n\n\to, err := opts.parse(argv)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to parse command line options\")\n\t}\n\n\tif opts.Help {\n\t\tos.Stdout.Write(opts.usage())\n\t\treturn pget.makeIgnoreErr()\n\t}\n\n\tif opts.Version {\n\t\tos.Stdout.Write([]byte(msg))\n\t\treturn pget.makeIgnoreErr()\n\t}\n\n\tif opts.Update {\n\t\tresult, err := opts.isupdate()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to parse command line options\")\n\t\t}\n\n\t\tos.Stdout.Write(result)\n\t\treturn pget.makeIgnoreErr()\n\t}\n\n\tpget.args = o\n\n\treturn nil\n}\n\nfunc (pget *Pget) parseURLs() error {\n\n\t\/\/ find url in args\n\tfor _, argv := range pget.args {\n\t\tif govalidator.IsURL(argv) {\n\t\t\tpget.URLs = append(pget.URLs, argv)\n\t\t}\n\t}\n\n\tif len(pget.URLs) < 1 {\n\t\tfmt.Fprintf(os.Stdout, \"Please input url separate with space or newline\\n\")\n\t\tfmt.Fprintf(os.Stdout, \"Start download at ^D\\n\")\n\t\tscanner := bufio.NewScanner(os.Stdin)\n\t\tfor scanner.Scan() {\n\t\t\tscan := scanner.Text()\n\t\t\turls := strings.Split(scan, \" \")\n\t\t\tfor _, url := range urls {\n\t\t\t\tif govalidator.IsURL(url) {\n\t\t\t\t\tpget.URLs = append(pget.URLs, url)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif err := scanner.Err(); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to parse url from stdin\")\n\t\t}\n\n\t\tif len(pget.URLs) < 1 {\n\t\t\treturn errors.New(\"urls not found in the arguments passed\")\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package pick\n\nimport (\n\t\"io\"\n\n\t\"golang.org\/x\/net\/html\"\n)\n\ntype Option struct {\n\tPageSource io.Reader\n\tTagName string\n\tAttr *Attr \/\/ optional\n}\n\ntype Attr struct {\n\tLabel string\n\tValue string\n}\n\nfunc PickAttr(option *Option, AttrLabel string, limit int) (res []string) {\n\tif option == nil || option.PageSource == nil {\n\t\treturn\n\t}\n\n\tz := html.NewTokenizer(option.PageSource)\n\n\tfor {\n\t\ttokenType := z.Next()\n\n\t\tswitch tokenType {\n\n\t\t\/\/ ignore the error token\n\t\t\/\/ quit on eof\n\t\tcase html.ErrorToken:\n\t\t\tif z.Err() == io.EOF {\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase html.SelfClosingTagToken:\n\t\t\tfallthrough\n\t\tcase html.StartTagToken:\n\t\t\ttagName, attr := z.TagName()\n\n\t\t\tif string(tagName) != option.TagName {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar label, value []byte\n\n\t\t\tmatched := false\n\t\t\ttmpRes := []string{}\n\n\t\t\t\/\/ get attr\n\t\t\tfor attr {\n\t\t\t\tlabel, value, attr = z.TagAttr()\n\n\t\t\t\tlabelStr := string(label)\n\t\t\t\tvalueStr := string(value)\n\n\t\t\t\t\/\/ check the attr\n\t\t\t\tif option.Attr == nil || (option.Attr.Label == labelStr && option.Attr.Value == valueStr) {\n\t\t\t\t\tmatched = true\n\t\t\t\t}\n\n\t\t\t\t\/\/ get the result - even the matched false or true\n\t\t\t\tif labelStr == AttrLabel {\n\t\t\t\t\ttmpRes = append(tmpRes, valueStr)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ skip the non matched one\n\t\t\tif !matched {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ send the result for matched only\n\t\t\tres = append(res, tmpRes...)\n\n\t\t\t\/\/ return when limit\n\t\t\tif limit > 0 && len(res) >= limit {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc PickText(option *Option) (res []string) {\n\tif option == nil || option.PageSource == nil {\n\t\treturn\n\t}\n\n\tz := html.NewTokenizer(option.PageSource)\n\n\tdepth := 0\n\n\tfor {\n\t\ttokenType := z.Next()\n\n\t\tswitch tokenType {\n\n\t\t\/\/ ignore the error token\n\t\t\/\/ quit on eof\n\t\tcase html.ErrorToken:\n\t\t\tif z.Err() == io.EOF {\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase html.TextToken:\n\t\t\tif depth > 0 {\n\t\t\t\tres = append(res, string(z.Text()))\n\t\t\t}\n\n\t\tcase html.EndTagToken:\n\t\t\tif depth > 0 {\n\t\t\t\tdepth--\n\t\t\t}\n\n\t\tcase html.StartTagToken:\n\t\t\tif depth > 0 {\n\t\t\t\tdepth++\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttagName, attr := z.TagName()\n\n\t\t\tif string(tagName) != option.TagName {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar label, value []byte\n\n\t\t\tmatched := false\n\n\t\t\t\/\/ get attr\n\t\t\tfor attr {\n\t\t\t\tlabel, value, attr = z.TagAttr()\n\n\t\t\t\t\/\/ TODO: break when found\n\t\t\t\tif option.Attr == nil || (option.Attr.Label == string(label) && option.Attr.Value == string(value)) {\n\t\t\t\t\tmatched = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !matched {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdepth++\n\t\t}\n\t}\n\n\treturn\n}\n<commit_msg>revise #PickAttr cases<commit_after>package pick\n\nimport (\n\t\"io\"\n\n\t\"golang.org\/x\/net\/html\"\n)\n\ntype Option struct {\n\tPageSource io.Reader\n\tTagName string\n\tAttr *Attr \/\/ optional\n}\n\ntype Attr struct {\n\tLabel string\n\tValue string\n}\n\nfunc PickAttr(option *Option, AttrLabel string, limit int) (res []string) {\n\tif option == nil || option.PageSource == nil {\n\t\treturn\n\t}\n\n\tz := html.NewTokenizer(option.PageSource)\n\n\tfor {\n\t\ttokenType := z.Next()\n\n\t\tswitch tokenType {\n\n\t\t\/\/ ignore the error token\n\t\t\/\/ quit on eof\n\t\tcase html.ErrorToken:\n\t\t\tif z.Err() == io.EOF {\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase html.StartTagToken, html.SelfClosingTagToken:\n\t\t\ttagName, attr := z.TagName()\n\n\t\t\tif string(tagName) != option.TagName {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar label, value []byte\n\n\t\t\tmatched := false\n\t\t\ttmpRes := []string{}\n\n\t\t\t\/\/ get attr\n\t\t\tfor attr {\n\t\t\t\tlabel, value, attr = z.TagAttr()\n\n\t\t\t\tlabelStr := string(label)\n\t\t\t\tvalueStr := string(value)\n\n\t\t\t\t\/\/ check the attr\n\t\t\t\tif option.Attr == nil || (option.Attr.Label == labelStr && option.Attr.Value == valueStr) {\n\t\t\t\t\tmatched = true\n\t\t\t\t}\n\n\t\t\t\t\/\/ get the result - even the matched false or true\n\t\t\t\tif labelStr == AttrLabel {\n\t\t\t\t\ttmpRes = append(tmpRes, valueStr)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ skip the non matched one\n\t\t\tif !matched {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ send the result for matched only\n\t\t\tres = append(res, tmpRes...)\n\n\t\t\t\/\/ return when limit\n\t\t\tif limit > 0 && len(res) >= limit {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc PickText(option *Option) (res []string) {\n\tif option == nil || option.PageSource == nil {\n\t\treturn\n\t}\n\n\tz := html.NewTokenizer(option.PageSource)\n\n\tdepth := 0\n\n\tfor {\n\t\ttokenType := z.Next()\n\n\t\tswitch tokenType {\n\n\t\t\/\/ ignore the error token\n\t\t\/\/ quit on eof\n\t\tcase html.ErrorToken:\n\t\t\tif z.Err() == io.EOF {\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase html.TextToken:\n\t\t\tif depth > 0 {\n\t\t\t\tres = append(res, string(z.Text()))\n\t\t\t}\n\n\t\tcase html.EndTagToken:\n\t\t\tif depth > 0 {\n\t\t\t\tdepth--\n\t\t\t}\n\n\t\tcase html.StartTagToken:\n\t\t\tif depth > 0 {\n\t\t\t\tdepth++\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttagName, attr := z.TagName()\n\n\t\t\tif string(tagName) != option.TagName {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar label, value []byte\n\n\t\t\tmatched := false\n\n\t\t\t\/\/ get attr\n\t\t\tfor attr {\n\t\t\t\tlabel, value, attr = z.TagAttr()\n\n\t\t\t\t\/\/ TODO: break when found\n\t\t\t\tif option.Attr == nil || (option.Attr.Label == string(label) && option.Attr.Value == string(value)) {\n\t\t\t\t\tmatched = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !matched {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdepth++\n\t\t}\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package engines\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/camptocamp\/conplicity\/handler\"\n\t\"github.com\/camptocamp\/conplicity\/metrics\"\n\t\"github.com\/camptocamp\/conplicity\/util\"\n\t\"github.com\/camptocamp\/conplicity\/volume\"\n\t\"github.com\/docker\/engine-api\/types\"\n\t\"github.com\/docker\/engine-api\/types\/container\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ DuplicityEngine implements a backup engine with Duplicity\ntype DuplicityEngine struct {\n\tHandler *handler.Conplicity\n\tVolume *volume.Volume\n}\n\n\/\/ Constants\nconst cacheMount = \"duplicity_cache:\/root\/.cache\/duplicity\"\nconst timeFormat = \"Mon Jan 2 15:04:05 2006\"\n\nvar fullBackupRx = regexp.MustCompile(\"Last full backup date: (.+)\")\nvar chainEndTimeRx = regexp.MustCompile(\"Chain end time: (.+)\")\n\n\/\/ GetName returns the engine name\nfunc (*DuplicityEngine) GetName() string {\n\treturn \"Duplicity\"\n}\n\n\/\/ Backup performs the backup of the passed volume\nfunc (d *DuplicityEngine) Backup() (err error) {\n\tvol := d.Volume\n\tlog.WithFields(log.Fields{\n\t\t\"volume\": vol.Name,\n\t\t\"driver\": vol.Driver,\n\t\t\"mountpoint\": vol.Mountpoint,\n\t}).Info(\"Creating duplicity container\")\n\n\ttargetURL, err := url.Parse(vol.Config.TargetURL)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to parse target URL: %v\", err)\n\t\treturn\n\t}\n\n\tpathSeparator := \"\/\"\n\tif targetURL.Scheme == \"swift\" {\n\t\t\/\/ Looks like I'm not the one to fall on this issue: http:\/\/stackoverflow.com\/questions\/27991960\/upload-to-swift-pseudo-folders-using-duplicity\n\t\tpathSeparator = \"_\"\n\t}\n\n\tbackupDir := vol.BackupDir\n\tvol.Target = targetURL.String() + pathSeparator + d.Handler.Hostname + pathSeparator + vol.Name\n\tvol.BackupDir = vol.Mountpoint + \"\/\" + backupDir\n\tvol.Mount = vol.Name + \":\" + vol.Mountpoint + \":ro\"\n\n\terr = d.duplicityBackup()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to backup volume with duplicity: %v\", err)\n\t\treturn\n\t}\n\n\terr = d.removeOld()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to remove old backups: %v\", err)\n\t\treturn\n\t}\n\n\terr = d.cleanup()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to cleanup extraneous duplicity files: %v\", err)\n\t\treturn\n\t}\n\n\tif vol.Config.NoVerify {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"volume\": vol.Name,\n\t\t}).Info(\"Skipping verification\")\n\t} else {\n\t\terr = d.verify()\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"failed to verify backup: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\terr = d.status()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to retrieve last backup info: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ removeOld cleans up old backup data\nfunc (d *DuplicityEngine) removeOld() (err error) {\n\tv := d.Volume\n\t_, _, err = d.launchDuplicity(\n\t\t[]string{\n\t\t\t\"remove-older-than\", v.Config.Duplicity.RemoveOlderThan,\n\t\t\t\"--s3-use-new-style\",\n\t\t\t\"--ssh-options\", \"-oStrictHostKeyChecking=no\",\n\t\t\t\"--no-encryption\",\n\t\t\t\"--force\",\n\t\t\t\"--name\", v.Name,\n\t\t\tv.Target,\n\t\t},\n\t\t[]string{\n\t\t\tcacheMount,\n\t\t},\n\t)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to launch Duplicity: %v\", err)\n\t\treturn\n\t}\n\treturn\n}\n\n\/\/ cleanup removes old index data from duplicity\nfunc (d *DuplicityEngine) cleanup() (err error) {\n\tv := d.Volume\n\t_, _, err = d.launchDuplicity(\n\t\t[]string{\n\t\t\t\"cleanup\",\n\t\t\t\"--s3-use-new-style\",\n\t\t\t\"--ssh-options\", \"-oStrictHostKeyChecking=no\",\n\t\t\t\"--no-encryption\",\n\t\t\t\"--force\",\n\t\t\t\"--extra-clean\",\n\t\t\t\"--name\", v.Name,\n\t\t\tv.Target,\n\t\t},\n\t\t[]string{\n\t\t\tcacheMount,\n\t\t},\n\t)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to launch duplicity: %v\", err)\n\t}\n\treturn\n}\n\n\/\/ verify checks that the backup is usable\nfunc (d *DuplicityEngine) verify() (err error) {\n\tv := d.Volume\n\tstate, _, err := d.launchDuplicity(\n\t\t[]string{\n\t\t\t\"verify\",\n\t\t\t\"--s3-use-new-style\",\n\t\t\t\"--ssh-options\", \"-oStrictHostKeyChecking=no\",\n\t\t\t\"--no-encryption\",\n\t\t\t\"--allow-source-mismatch\",\n\t\t\t\"--name\", v.Name,\n\t\t\tv.Target,\n\t\t\tv.BackupDir,\n\t\t},\n\t\t[]string{\n\t\t\tv.Mount,\n\t\t\tcacheMount,\n\t\t},\n\t)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to launch duplicity: %v\", err)\n\t\treturn\n\t}\n\n\tmetric := d.Handler.MetricsHandler.NewMetric(\"conplicity_verifyExitCode\", \"gauge\")\n\terr = metric.UpdateEvent(\n\t\t&metrics.Event{\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"volume\": v.Name,\n\t\t\t},\n\t\t\tValue: strconv.Itoa(state),\n\t\t},\n\t)\n\treturn\n}\n\n\/\/ status gets the latest backup date info from duplicity\nfunc (d *DuplicityEngine) status() (err error) {\n\tv := d.Volume\n\t_, stdout, err := d.launchDuplicity(\n\t\t[]string{\n\t\t\t\"collection-status\",\n\t\t\t\"--s3-use-new-style\",\n\t\t\t\"--ssh-options\", \"-oStrictHostKeyChecking=no\",\n\t\t\t\"--no-encryption\",\n\t\t\t\"--name\", v.Name,\n\t\t\tv.Target,\n\t\t},\n\t\t[]string{\n\t\t\tv.Mount,\n\t\t\tcacheMount,\n\t\t},\n\t)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to launch duplicity: %v\", err)\n\t\treturn\n\t}\n\n\tfullBackup := fullBackupRx.FindStringSubmatch(stdout)\n\tvar fullBackupDate time.Time\n\tchainEndTime := chainEndTimeRx.FindStringSubmatch(stdout)\n\tvar chainEndTimeDate time.Time\n\n\tif len(fullBackup) > 0 {\n\t\tif strings.TrimSpace(fullBackup[1]) == \"none\" {\n\t\t\tfullBackupDate = time.Unix(0, 0)\n\t\t\tchainEndTimeDate = time.Unix(0, 0)\n\t\t} else {\n\t\t\tfullBackupDate, err = time.Parse(timeFormat, strings.TrimSpace(fullBackup[1]))\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"failed to parse full backup data: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif len(chainEndTime) > 0 {\n\t\t\t\tchainEndTimeDate, err = time.Parse(timeFormat, strings.TrimSpace(chainEndTime[len(chainEndTime)-1]))\n\t\t\t\tif err != nil {\n\t\t\t\t\terr = fmt.Errorf(\"failed to parse chain end time date: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"failed to parse Duplicity output for chain end time of %v\", v.Name)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t}\n\t} else {\n\t\terr = fmt.Errorf(\"failed to parse Duplicity output for last full backup date of %v\", v.Name)\n\t\treturn\n\t}\n\n\tlastBackupMetric := d.Handler.MetricsHandler.NewMetric(\"conplicity_lastBackup\", \"counter\")\n\tlastBackupMetric.UpdateEvent(\n\t\t&metrics.Event{\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"volume\": v.Name,\n\t\t\t},\n\t\t\tValue: strconv.FormatInt(chainEndTimeDate.Unix(), 10),\n\t\t},\n\t)\n\n\tlastFullBackupMetric := d.Handler.MetricsHandler.NewMetric(\"conplicity_lastFullBackup\", \"counter\")\n\tlastFullBackupMetric.UpdateEvent(\n\t\t&metrics.Event{\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"volume\": v.Name,\n\t\t\t},\n\t\t\tValue: strconv.FormatInt(fullBackupDate.Unix(), 10),\n\t\t},\n\t)\n\n\treturn\n}\n\n\/\/ launchDuplicity starts a duplicity container with given command and binds\nfunc (d *DuplicityEngine) launchDuplicity(cmd []string, binds []string) (state int, stdout string, err error) {\n\terr = util.PullImage(d.Handler.Client, d.Handler.Config.Duplicity.Image)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to pull image: %v\", err)\n\t\treturn\n\t}\n\n\tenv := []string{\n\t\t\"AWS_ACCESS_KEY_ID=\" + d.Handler.Config.AWS.AccessKeyID,\n\t\t\"AWS_SECRET_ACCESS_KEY=\" + d.Handler.Config.AWS.SecretAccessKey,\n\t\t\"SWIFT_USERNAME=\" + d.Handler.Config.Swift.Username,\n\t\t\"SWIFT_PASSWORD=\" + d.Handler.Config.Swift.Password,\n\t\t\"SWIFT_AUTHURL=\" + d.Handler.Config.Swift.AuthURL,\n\t\t\"SWIFT_TENANTNAME=\" + d.Handler.Config.Swift.TenantName,\n\t\t\"SWIFT_REGIONNAME=\" + d.Handler.Config.Swift.RegionName,\n\t\t\"SWIFT_AUTHVERSION=2\",\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"image\": d.Handler.Config.Duplicity.Image,\n\t\t\"command\": strings.Join(cmd, \" \"),\n\t\t\"environment\": strings.Join(env, \", \"),\n\t\t\"binds\": strings.Join(binds, \", \"),\n\t}).Debug(\"Creating container\")\n\n\tcontainer, err := d.Handler.ContainerCreate(\n\t\tcontext.Background(),\n\t\t&container.Config{\n\t\t\tCmd: cmd,\n\t\t\tEnv: env,\n\t\t\tImage: d.Handler.Config.Duplicity.Image,\n\t\t\tOpenStdin: true,\n\t\t\tStdinOnce: true,\n\t\t\tAttachStdin: true,\n\t\t\tAttachStdout: true,\n\t\t\tAttachStderr: true,\n\t\t\tTty: true,\n\t\t},\n\t\t&container.HostConfig{\n\t\t\tBinds: binds,\n\t\t}, nil, \"\",\n\t)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to create container: %v\", err)\n\t\treturn\n\t}\n\tdefer util.RemoveContainer(d.Handler.Client, container.ID)\n\n\tlog.Debugf(\"Launching 'duplicity %v'...\", strings.Join(cmd, \" \"))\n\terr = d.Handler.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to start container: %v\", err)\n\t}\n\n\tvar exited bool\n\n\tfor !exited {\n\t\tvar cont types.ContainerJSON\n\t\tcont, err = d.Handler.ContainerInspect(context.Background(), container.ID)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"failed to inspect container: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif cont.State.Status == \"exited\" {\n\t\t\texited = true\n\t\t\tstate = cont.State.ExitCode\n\t\t}\n\t}\n\n\tbody, err := d.Handler.ContainerLogs(context.Background(), container.ID, types.ContainerLogsOptions{\n\t\tShowStdout: true,\n\t\tShowStderr: true,\n\t\tDetails: true,\n\t\tFollow: true,\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to retrieve logs: %v\", err)\n\t\treturn\n\t}\n\n\tdefer body.Close()\n\tcontent, err := ioutil.ReadAll(body)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to read logs from response: %v\", err)\n\t\treturn\n\t}\n\n\tstdout = string(content)\n\tlog.Debug(stdout)\n\n\treturn\n}\n\n\/\/ duplicityBackup performs the backup of a volume with duplicity\nfunc (d *DuplicityEngine) duplicityBackup() (err error) {\n\tv := d.Volume\n\tlog.WithFields(log.Fields{\n\t\t\"name\": v.Name,\n\t\t\"backup_dir\": v.BackupDir,\n\t\t\"full_if_older_than\": v.Config.Duplicity.FullIfOlderThan,\n\t\t\"target\": v.Target,\n\t\t\"mount\": v.Mount,\n\t}).Debug(\"Starting volume backup\")\n\n\t\/\/ TODO\n\t\/\/ Init engine\n\n\tstate, _, err := d.launchDuplicity(\n\t\t[]string{\n\t\t\t\"--full-if-older-than\", v.Config.Duplicity.FullIfOlderThan,\n\t\t\t\"--s3-use-new-style\",\n\t\t\t\"--ssh-options\", \"-oStrictHostKeyChecking=no\",\n\t\t\t\"--no-encryption\",\n\t\t\t\"--allow-source-mismatch\",\n\t\t\t\"--name\", v.Name,\n\t\t\tv.BackupDir,\n\t\t\tv.Target,\n\t\t},\n\t\t[]string{\n\t\t\tv.Mount,\n\t\t\tcacheMount,\n\t\t},\n\t)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to launch duplicity: %v\", err)\n\t\treturn\n\t}\n\n\tmetric := d.Handler.MetricsHandler.NewMetric(\"conplicity_backupExitCode\", \"gauge\")\n\tmetric.UpdateEvent(\n\t\t&metrics.Event{\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"volume\": v.Name,\n\t\t\t},\n\t\t\tValue: strconv.Itoa(state),\n\t\t},\n\t)\n\treturn\n}\n<commit_msg>Really take the last chainEndTime element<commit_after>package engines\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/camptocamp\/conplicity\/handler\"\n\t\"github.com\/camptocamp\/conplicity\/metrics\"\n\t\"github.com\/camptocamp\/conplicity\/util\"\n\t\"github.com\/camptocamp\/conplicity\/volume\"\n\t\"github.com\/docker\/engine-api\/types\"\n\t\"github.com\/docker\/engine-api\/types\/container\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ DuplicityEngine implements a backup engine with Duplicity\ntype DuplicityEngine struct {\n\tHandler *handler.Conplicity\n\tVolume *volume.Volume\n}\n\n\/\/ Constants\nconst cacheMount = \"duplicity_cache:\/root\/.cache\/duplicity\"\nconst timeFormat = \"Mon Jan 2 15:04:05 2006\"\n\nvar fullBackupRx = regexp.MustCompile(\"Last full backup date: (.+)\")\nvar chainEndTimeRx = regexp.MustCompile(\"Chain end time: (.+)\")\n\n\/\/ GetName returns the engine name\nfunc (*DuplicityEngine) GetName() string {\n\treturn \"Duplicity\"\n}\n\n\/\/ Backup performs the backup of the passed volume\nfunc (d *DuplicityEngine) Backup() (err error) {\n\tvol := d.Volume\n\tlog.WithFields(log.Fields{\n\t\t\"volume\": vol.Name,\n\t\t\"driver\": vol.Driver,\n\t\t\"mountpoint\": vol.Mountpoint,\n\t}).Info(\"Creating duplicity container\")\n\n\ttargetURL, err := url.Parse(vol.Config.TargetURL)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to parse target URL: %v\", err)\n\t\treturn\n\t}\n\n\tpathSeparator := \"\/\"\n\tif targetURL.Scheme == \"swift\" {\n\t\t\/\/ Looks like I'm not the one to fall on this issue: http:\/\/stackoverflow.com\/questions\/27991960\/upload-to-swift-pseudo-folders-using-duplicity\n\t\tpathSeparator = \"_\"\n\t}\n\n\tbackupDir := vol.BackupDir\n\tvol.Target = targetURL.String() + pathSeparator + d.Handler.Hostname + pathSeparator + vol.Name\n\tvol.BackupDir = vol.Mountpoint + \"\/\" + backupDir\n\tvol.Mount = vol.Name + \":\" + vol.Mountpoint + \":ro\"\n\n\terr = d.duplicityBackup()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to backup volume with duplicity: %v\", err)\n\t\treturn\n\t}\n\n\terr = d.removeOld()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to remove old backups: %v\", err)\n\t\treturn\n\t}\n\n\terr = d.cleanup()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to cleanup extraneous duplicity files: %v\", err)\n\t\treturn\n\t}\n\n\tif vol.Config.NoVerify {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"volume\": vol.Name,\n\t\t}).Info(\"Skipping verification\")\n\t} else {\n\t\terr = d.verify()\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"failed to verify backup: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\terr = d.status()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to retrieve last backup info: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ removeOld cleans up old backup data\nfunc (d *DuplicityEngine) removeOld() (err error) {\n\tv := d.Volume\n\t_, _, err = d.launchDuplicity(\n\t\t[]string{\n\t\t\t\"remove-older-than\", v.Config.Duplicity.RemoveOlderThan,\n\t\t\t\"--s3-use-new-style\",\n\t\t\t\"--ssh-options\", \"-oStrictHostKeyChecking=no\",\n\t\t\t\"--no-encryption\",\n\t\t\t\"--force\",\n\t\t\t\"--name\", v.Name,\n\t\t\tv.Target,\n\t\t},\n\t\t[]string{\n\t\t\tcacheMount,\n\t\t},\n\t)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to launch Duplicity: %v\", err)\n\t\treturn\n\t}\n\treturn\n}\n\n\/\/ cleanup removes old index data from duplicity\nfunc (d *DuplicityEngine) cleanup() (err error) {\n\tv := d.Volume\n\t_, _, err = d.launchDuplicity(\n\t\t[]string{\n\t\t\t\"cleanup\",\n\t\t\t\"--s3-use-new-style\",\n\t\t\t\"--ssh-options\", \"-oStrictHostKeyChecking=no\",\n\t\t\t\"--no-encryption\",\n\t\t\t\"--force\",\n\t\t\t\"--extra-clean\",\n\t\t\t\"--name\", v.Name,\n\t\t\tv.Target,\n\t\t},\n\t\t[]string{\n\t\t\tcacheMount,\n\t\t},\n\t)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to launch duplicity: %v\", err)\n\t}\n\treturn\n}\n\n\/\/ verify checks that the backup is usable\nfunc (d *DuplicityEngine) verify() (err error) {\n\tv := d.Volume\n\tstate, _, err := d.launchDuplicity(\n\t\t[]string{\n\t\t\t\"verify\",\n\t\t\t\"--s3-use-new-style\",\n\t\t\t\"--ssh-options\", \"-oStrictHostKeyChecking=no\",\n\t\t\t\"--no-encryption\",\n\t\t\t\"--allow-source-mismatch\",\n\t\t\t\"--name\", v.Name,\n\t\t\tv.Target,\n\t\t\tv.BackupDir,\n\t\t},\n\t\t[]string{\n\t\t\tv.Mount,\n\t\t\tcacheMount,\n\t\t},\n\t)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to launch duplicity: %v\", err)\n\t\treturn\n\t}\n\n\tmetric := d.Handler.MetricsHandler.NewMetric(\"conplicity_verifyExitCode\", \"gauge\")\n\terr = metric.UpdateEvent(\n\t\t&metrics.Event{\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"volume\": v.Name,\n\t\t\t},\n\t\t\tValue: strconv.Itoa(state),\n\t\t},\n\t)\n\treturn\n}\n\n\/\/ status gets the latest backup date info from duplicity\nfunc (d *DuplicityEngine) status() (err error) {\n\tv := d.Volume\n\t_, stdout, err := d.launchDuplicity(\n\t\t[]string{\n\t\t\t\"collection-status\",\n\t\t\t\"--s3-use-new-style\",\n\t\t\t\"--ssh-options\", \"-oStrictHostKeyChecking=no\",\n\t\t\t\"--no-encryption\",\n\t\t\t\"--name\", v.Name,\n\t\t\tv.Target,\n\t\t},\n\t\t[]string{\n\t\t\tv.Mount,\n\t\t\tcacheMount,\n\t\t},\n\t)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to launch duplicity: %v\", err)\n\t\treturn\n\t}\n\n\tfullBackup := fullBackupRx.FindStringSubmatch(stdout)\n\tvar fullBackupDate time.Time\n\tvar chainEndTimeDate time.Time\n\n\tif len(fullBackup) > 0 {\n\t\tchainEndTime := chainEndTimeRx.FindAllStringSubmatch(stdout, -1)\n\t\tif strings.TrimSpace(fullBackup[1]) == \"none\" {\n\t\t\tfullBackupDate = time.Unix(0, 0)\n\t\t\tchainEndTimeDate = time.Unix(0, 0)\n\t\t} else {\n\t\t\tfullBackupDate, err = time.Parse(timeFormat, strings.TrimSpace(fullBackup[1]))\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"failed to parse full backup data: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif len(chainEndTime) > 0 {\n\t\t\t\tchainEndTimeDate, err = time.Parse(timeFormat, strings.TrimSpace(chainEndTime[len(chainEndTime)-1][1]))\n\t\t\t\tif err != nil {\n\t\t\t\t\terr = fmt.Errorf(\"failed to parse chain end time date: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"failed to parse Duplicity output for chain end time of %v\", v.Name)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t}\n\t} else {\n\t\terr = fmt.Errorf(\"failed to parse Duplicity output for last full backup date of %v\", v.Name)\n\t\treturn\n\t}\n\n\tlastBackupMetric := d.Handler.MetricsHandler.NewMetric(\"conplicity_lastBackup\", \"counter\")\n\tlastBackupMetric.UpdateEvent(\n\t\t&metrics.Event{\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"volume\": v.Name,\n\t\t\t},\n\t\t\tValue: strconv.FormatInt(chainEndTimeDate.Unix(), 10),\n\t\t},\n\t)\n\n\tlastFullBackupMetric := d.Handler.MetricsHandler.NewMetric(\"conplicity_lastFullBackup\", \"counter\")\n\tlastFullBackupMetric.UpdateEvent(\n\t\t&metrics.Event{\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"volume\": v.Name,\n\t\t\t},\n\t\t\tValue: strconv.FormatInt(fullBackupDate.Unix(), 10),\n\t\t},\n\t)\n\n\treturn\n}\n\n\/\/ launchDuplicity starts a duplicity container with given command and binds\nfunc (d *DuplicityEngine) launchDuplicity(cmd []string, binds []string) (state int, stdout string, err error) {\n\terr = util.PullImage(d.Handler.Client, d.Handler.Config.Duplicity.Image)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to pull image: %v\", err)\n\t\treturn\n\t}\n\n\tenv := []string{\n\t\t\"AWS_ACCESS_KEY_ID=\" + d.Handler.Config.AWS.AccessKeyID,\n\t\t\"AWS_SECRET_ACCESS_KEY=\" + d.Handler.Config.AWS.SecretAccessKey,\n\t\t\"SWIFT_USERNAME=\" + d.Handler.Config.Swift.Username,\n\t\t\"SWIFT_PASSWORD=\" + d.Handler.Config.Swift.Password,\n\t\t\"SWIFT_AUTHURL=\" + d.Handler.Config.Swift.AuthURL,\n\t\t\"SWIFT_TENANTNAME=\" + d.Handler.Config.Swift.TenantName,\n\t\t\"SWIFT_REGIONNAME=\" + d.Handler.Config.Swift.RegionName,\n\t\t\"SWIFT_AUTHVERSION=2\",\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"image\": d.Handler.Config.Duplicity.Image,\n\t\t\"command\": strings.Join(cmd, \" \"),\n\t\t\"environment\": strings.Join(env, \", \"),\n\t\t\"binds\": strings.Join(binds, \", \"),\n\t}).Debug(\"Creating container\")\n\n\tcontainer, err := d.Handler.ContainerCreate(\n\t\tcontext.Background(),\n\t\t&container.Config{\n\t\t\tCmd: cmd,\n\t\t\tEnv: env,\n\t\t\tImage: d.Handler.Config.Duplicity.Image,\n\t\t\tOpenStdin: true,\n\t\t\tStdinOnce: true,\n\t\t\tAttachStdin: true,\n\t\t\tAttachStdout: true,\n\t\t\tAttachStderr: true,\n\t\t\tTty: true,\n\t\t},\n\t\t&container.HostConfig{\n\t\t\tBinds: binds,\n\t\t}, nil, \"\",\n\t)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to create container: %v\", err)\n\t\treturn\n\t}\n\tdefer util.RemoveContainer(d.Handler.Client, container.ID)\n\n\tlog.Debugf(\"Launching 'duplicity %v'...\", strings.Join(cmd, \" \"))\n\terr = d.Handler.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to start container: %v\", err)\n\t}\n\n\tvar exited bool\n\n\tfor !exited {\n\t\tvar cont types.ContainerJSON\n\t\tcont, err = d.Handler.ContainerInspect(context.Background(), container.ID)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"failed to inspect container: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif cont.State.Status == \"exited\" {\n\t\t\texited = true\n\t\t\tstate = cont.State.ExitCode\n\t\t}\n\t}\n\n\tbody, err := d.Handler.ContainerLogs(context.Background(), container.ID, types.ContainerLogsOptions{\n\t\tShowStdout: true,\n\t\tShowStderr: true,\n\t\tDetails: true,\n\t\tFollow: true,\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to retrieve logs: %v\", err)\n\t\treturn\n\t}\n\n\tdefer body.Close()\n\tcontent, err := ioutil.ReadAll(body)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to read logs from response: %v\", err)\n\t\treturn\n\t}\n\n\tstdout = string(content)\n\tlog.Debug(stdout)\n\n\treturn\n}\n\n\/\/ duplicityBackup performs the backup of a volume with duplicity\nfunc (d *DuplicityEngine) duplicityBackup() (err error) {\n\tv := d.Volume\n\tlog.WithFields(log.Fields{\n\t\t\"name\": v.Name,\n\t\t\"backup_dir\": v.BackupDir,\n\t\t\"full_if_older_than\": v.Config.Duplicity.FullIfOlderThan,\n\t\t\"target\": v.Target,\n\t\t\"mount\": v.Mount,\n\t}).Debug(\"Starting volume backup\")\n\n\t\/\/ TODO\n\t\/\/ Init engine\n\n\tstate, _, err := d.launchDuplicity(\n\t\t[]string{\n\t\t\t\"--full-if-older-than\", v.Config.Duplicity.FullIfOlderThan,\n\t\t\t\"--s3-use-new-style\",\n\t\t\t\"--ssh-options\", \"-oStrictHostKeyChecking=no\",\n\t\t\t\"--no-encryption\",\n\t\t\t\"--allow-source-mismatch\",\n\t\t\t\"--name\", v.Name,\n\t\t\tv.BackupDir,\n\t\t\tv.Target,\n\t\t},\n\t\t[]string{\n\t\t\tv.Mount,\n\t\t\tcacheMount,\n\t\t},\n\t)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to launch duplicity: %v\", err)\n\t\treturn\n\t}\n\n\tmetric := d.Handler.MetricsHandler.NewMetric(\"conplicity_backupExitCode\", \"gauge\")\n\tmetric.UpdateEvent(\n\t\t&metrics.Event{\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"volume\": v.Name,\n\t\t\t},\n\t\t\tValue: strconv.Itoa(state),\n\t\t},\n\t)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package view\n\nimport (\n\t\"encoding\/base64\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/ungerik\/go-start\/utils\"\n)\n\n\/\/ BasicAuth implements HTTP basic auth as Authenticator.\n\/\/ See also HtpasswdWatchingBasicAuth\ntype BasicAuth struct {\n\tRealm string\n\tUserPassword map[string]string \/\/ map of username to base64 encoded SHA1 hash of the password\n}\n\n\/\/ NewBasicAuth creates a BasicAuth instance with a series of\n\/\/ not encrypted usernames and passwords that are provided in alternating order.\nfunc NewBasicAuth(realm string, usernamesAndPasswords ...string) *BasicAuth {\n\tuserPass := make(map[string]string, len(usernamesAndPasswords)\/2)\n\n\tfor i := 0; i < len(usernamesAndPasswords)\/2; i++ {\n\t\tusername := usernamesAndPasswords[i*2]\n\t\tpassword := usernamesAndPasswords[i*2+1]\n\t\tuserPass[username] = utils.SHA1Base64String(password)\n\t}\n\n\treturn &BasicAuth{\n\t\tRealm: realm,\n\t\tUserPassword: userPass,\n\t}\n}\n\nfunc RequestBasicAuth(request *Request) (username, password string) {\n\theader := request.Header.Get(\"Authorization\")\n\tf := strings.Fields(header)\n\tif len(f) == 2 && f[0] == \"Basic\" {\n\t\tif b, err := base64.StdEncoding.DecodeString(f[1]); err == nil {\n\t\t\ta := strings.Split(string(b), \":\")\n\t\t\tif len(a) == 2 {\n\t\t\t\tusername = a[0]\n\t\t\t\tpassword = a[1]\n\t\t\t\treturn username, password\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\", \"\"\n}\n\nfunc SendBasicAuthRequired(response *Response, realm string) {\n\tresponse.Header().Set(\"WWW-Authenticate\", \"Basic realm=\\\"\"+realm+\"\\\"\")\n\tresponse.AuthorizationRequired401()\n}\n\nfunc (basicAuth *BasicAuth) Authenticate(ctx *Context) (ok bool, err error) {\n\tusername, password := RequestBasicAuth(ctx.Request)\n\tif username != \"\" {\n\t\tp, ok := basicAuth.UserPassword[username]\n\t\tif ok && p == utils.SHA1Base64String(password) {\n\t\t\tctx.AuthUser = username\n\t\t\treturn true, nil\n\t\t}\n\t}\n\tSendBasicAuthRequired(ctx.Response, basicAuth.Realm)\n\treturn false, nil\n}\n\ntype HtpasswdWatchingBasicAuth struct {\n\tbasicAuth BasicAuth\n\thtpasswdFile string\n\thtpasswdFileTime time.Time\n\tmutex sync.Mutex\n}\n\nfunc NewHtpasswdWatchingBasicAuth(realm, htpasswdFile string) (auth *HtpasswdWatchingBasicAuth, err error) {\n\tauth = &HtpasswdWatchingBasicAuth{\n\t\tbasicAuth: BasicAuth{Realm: realm},\n\t\thtpasswdFile: htpasswdFile,\n\t}\n\tauth.basicAuth.UserPassword, auth.htpasswdFileTime, err = utils.ReadHtpasswdFile(htpasswdFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn auth, nil\n}\n\nfunc (auth *HtpasswdWatchingBasicAuth) Authenticate(ctx *Context) (ok bool, err error) {\n\tauth.mutex.Lock()\n\tdefer auth.mutex.Unlock()\n\n\tt, err := utils.FileModifiedTime(auth.htpasswdFile)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif t.After(auth.htpasswdFileTime) {\n\t\tauth.basicAuth.UserPassword, auth.htpasswdFileTime, err = utils.ReadHtpasswdFile(auth.htpasswdFile)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\treturn auth.basicAuth.Authenticate(ctx)\n}\n<commit_msg>BasicAuth.Logout<commit_after>package view\n\nimport (\n\t\"encoding\/base64\"\n\t\/\/ \"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/ungerik\/go-start\/utils\"\n)\n\nfunc SendBasicAuthRequired(response *Response, realm string) {\n\tresponse.Header().Set(\"WWW-Authenticate\", \"Basic realm=\\\"\"+realm+\"\\\"\")\n\tresponse.AuthorizationRequired401()\n\tlog.Printf(\"BasicAuth requested for realm '%s'\", realm)\n}\n\n\/\/ BasicAuth implements HTTP basic auth as Authenticator.\n\/\/ See also HtpasswdWatchingBasicAuth\ntype BasicAuth struct {\n\tRealm string\n\tUserPassword map[string]string \/\/ map of username to base64 encoded SHA1 hash of the password\n\tloggedOutUsers map[string]bool\n}\n\n\/\/ NewBasicAuth creates a BasicAuth instance with a series of\n\/\/ not encrypted usernames and passwords that are provided in alternating order.\nfunc NewBasicAuth(realm string, usernamesAndPasswords ...string) *BasicAuth {\n\tuserPass := make(map[string]string, len(usernamesAndPasswords)\/2)\n\n\tfor i := 0; i < len(usernamesAndPasswords)\/2; i++ {\n\t\tusername := usernamesAndPasswords[i*2]\n\t\tpassword := usernamesAndPasswords[i*2+1]\n\t\tuserPass[username] = utils.SHA1Base64String(password)\n\t}\n\n\treturn &BasicAuth{\n\t\tRealm: realm,\n\t\tUserPassword: userPass,\n\t\tloggedOutUsers: make(map[string]bool),\n\t}\n}\n\nfunc BasicAuthFromRequest(request *Request) (username, password string) {\n\theader := request.Header.Get(\"Authorization\")\n\tf := strings.Fields(header)\n\tif len(f) == 2 && f[0] == \"Basic\" {\n\t\tif b, err := base64.StdEncoding.DecodeString(f[1]); err == nil {\n\t\t\ta := strings.Split(string(b), \":\")\n\t\t\tif len(a) == 2 {\n\t\t\t\tusername = a[0]\n\t\t\t\tpassword = a[1]\n\t\t\t\treturn username, password\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\", \"\"\n}\n\nfunc (basicAuth *BasicAuth) Logout(ctx *Context) {\n\tif ctx.AuthUser != \"\" {\n\t\tbasicAuth.loggedOutUsers[ctx.AuthUser] = true\n\t\tctx.AuthUser = \"\"\n\n\t\tlog.Println(\"BasicAuth logged out user\", ctx.AuthUser)\n\t}\n}\n\nfunc (basicAuth *BasicAuth) Authenticate(ctx *Context) (ok bool, err error) {\n\tusername, password := BasicAuthFromRequest(ctx.Request)\n\tif username != \"\" {\n\t\tif basicAuth.loggedOutUsers[username] {\n\t\t\tbasicAuth.loggedOutUsers[username] = false\n\t\t\tSendBasicAuthRequired(ctx.Response, basicAuth.Realm)\n\t\t\treturn false, nil\n\t\t}\n\n\t\tp, ok := basicAuth.UserPassword[username]\n\t\tif ok && p == utils.SHA1Base64String(password) {\n\t\t\tctx.AuthUser = username\n\t\t\t\/\/ fmt.Println(\"BasicAuth\", username)\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\tctx.AuthUser = \"\"\n\tSendBasicAuthRequired(ctx.Response, basicAuth.Realm)\n\treturn false, nil\n}\n\ntype HtpasswdWatchingBasicAuth struct {\n\tBasicAuth\n\thtpasswdFile string\n\thtpasswdFileTime time.Time\n\tmutex sync.Mutex\n}\n\nfunc NewHtpasswdWatchingBasicAuth(realm, htpasswdFile string) (auth *HtpasswdWatchingBasicAuth, err error) {\n\tauth = &HtpasswdWatchingBasicAuth{\n\t\tBasicAuth: BasicAuth{Realm: realm, loggedOutUsers: make(map[string]bool)},\n\t\thtpasswdFile: htpasswdFile,\n\t}\n\tauth.UserPassword, auth.htpasswdFileTime, err = utils.ReadHtpasswdFile(htpasswdFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn auth, nil\n}\n\nfunc (auth *HtpasswdWatchingBasicAuth) Authenticate(ctx *Context) (ok bool, err error) {\n\tauth.mutex.Lock()\n\tdefer auth.mutex.Unlock()\n\n\tt, err := utils.FileModifiedTime(auth.htpasswdFile)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif t.After(auth.htpasswdFileTime) {\n\t\tauth.UserPassword, auth.htpasswdFileTime, err = utils.ReadHtpasswdFile(auth.htpasswdFile)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\treturn auth.BasicAuth.Authenticate(ctx)\n}\n<|endoftext|>"} {"text":"<commit_before>package collector\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\n\t\"github.com\/go-kit\/kit\/log\"\n\t\"github.com\/go-kit\/kit\/log\/level\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n)\n\ntype MetricsCollector struct {\n\tclusterHealth *ClusterHealth\n\tnodes *Nodes\n\tindices *Indices\n\n\tclusterHealthResponse *clusterHealthResponse\n\tnodeStatsResponse *nodeStatsResponse\n\tindexStatsResponse *indexStatsResponse\n}\n\nfunc NewMetricsCollector(logger log.Logger, client *http.Client, url *url.URL, all bool) *MetricsCollector {\n\treturn &MetricsCollector{\n\t\tclusterHealth: NewClusterHealth(logger, client, url),\n\t\tnodes: NewNodes(logger, client, url, all),\n\t\tindices: NewIndices(logger, client, url, all),\n\t}\n\n\treturn nil\n}\n\nfunc (c *MetricsCollector) Describe(ch chan<- *prometheus.Desc) {\n\tc.clusterHealth.Describe(ch)\n\tc.nodes.Describe(ch)\n\tc.indices.Describe(ch)\n}\n\nfunc (c *MetricsCollector) Collect(ch chan<- prometheus.Metric) {\n\t\/\/ clusterHealth\n\tclusterHealthResponse, err := c.clusterHealth.fetchAndDecodeClusterHealth()\n\tif err != nil {\n\t\tc.clusterHealth.up.Set(0)\n\t\tlevel.Warn(c.clusterHealth.logger).Log(\n\t\t\t\"msg\", \"failed to fetch and decode cluster health\",\n\t\t\t\"err\", err,\n\t\t)\n\t\treturn\n\t}\n\tc.clusterHealth.up.Set(1)\n\n\t\/\/ nodes\n\tnodeStatsResponse, err := c.nodes.fetchAndDecodeNodeStats()\n\tif err != nil {\n\t\tc.nodes.up.Set(0)\n\t\tlevel.Warn(c.nodes.logger).Log(\n\t\t\t\"msg\", \"failed to fetch and decode node stats\",\n\t\t\t\"err\", err,\n\t\t)\n\t\treturn\n\t}\n\tc.nodes.up.Set(1)\n\n\t\/\/ indices\n\tindexStatsResponse, err := c.indices.fetchAndDecodeIndexStats()\n\tif err != nil {\n\t\tc.indices.up.Set(0)\n\t\tlevel.Warn(c.indices.logger).Log(\n\t\t\t\"msg\", \"failed to fetch and decode index stats\",\n\t\t\t\"err\", err,\n\t\t)\n\t\treturn\n\t}\n\tc.indices.up.Set(1)\n\n\tc.clusterHealth.Collect(ch, clusterHealthResponse)\n\tc.nodes.Collect(ch, nodeStatsResponse)\n\tc.indices.Collect(ch, clusterHealthResponse, nodeStatsResponse, indexStatsResponse)\n}\n\nfunc (c *ClusterHealth) fetchAndDecodeClusterHealth() (clusterHealthResponse, error) {\n\tvar chr clusterHealthResponse\n\n\tu := *c.url\n\tu.Path = \"\/_cluster\/health\"\n\tres, err := c.client.Get(u.String())\n\tif err != nil {\n\t\treturn chr, fmt.Errorf(\"failed to get cluster health from %s:\/\/%s:%s\/%s: %s\",\n\t\t\tu.Scheme, u.Hostname(), u.Port(), u.Path, err)\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn chr, fmt.Errorf(\"HTTP Request failed with code %d\", res.StatusCode)\n\t}\n\n\tif err := json.NewDecoder(res.Body).Decode(&chr); err != nil {\n\t\tc.jsonParseFailures.Inc()\n\t\treturn chr, err\n\t}\n\n\treturn chr, nil\n}\n\nfunc (c *Nodes) fetchAndDecodeNodeStats() (nodeStatsResponse, error) {\n\tvar nsr nodeStatsResponse\n\n\tu := *c.url\n\tu.Path = \"\/_nodes\/_local\/stats\"\n\tif c.all {\n\t\tu.Path = \"\/_nodes\/stats\"\n\t}\n\n\tres, err := c.client.Get(u.String())\n\tif err != nil {\n\t\treturn nsr, fmt.Errorf(\"failed to get cluster health from %s:\/\/%s:%s\/%s: %s\",\n\t\t\tu.Scheme, u.Hostname(), u.Port(), u.Path, err)\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode != http.StatusOK {\n\t\treturn nsr, fmt.Errorf(\"HTTP Request failed with code %d\", res.StatusCode)\n\t}\n\n\tif err := json.NewDecoder(res.Body).Decode(&nsr); err != nil {\n\t\tc.jsonParseFailures.Inc()\n\t\treturn nsr, err\n\t}\n\treturn nsr, nil\n}\n\nfunc (c *Indices) fetchAndDecodeIndexStats() (indexStatsResponse, error) {\n\tvar isr indexStatsResponse\n\n\tu := *c.url\n\tu.Path = \"\/_all\/_stats\"\n\n\tres, err := c.client.Get(u.String())\n\tif err != nil {\n\t\treturn isr, fmt.Errorf(\"failed to get index stats from %s:\/\/%s:%s\/%s: %s\",\n\t\t\tu.Scheme, u.Hostname(), u.Port(), u.Path, err)\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode != http.StatusOK {\n\t\treturn isr, fmt.Errorf(\"HTTP Request failed with code %d\", res.StatusCode)\n\t}\n\n\tif err := json.NewDecoder(res.Body).Decode(&isr); err != nil {\n\t\tc.jsonParseFailures.Inc()\n\t\treturn isr, err\n\t}\n\treturn isr, nil\n}\n<commit_msg>remove unreachable code<commit_after>package collector\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\n\t\"github.com\/go-kit\/kit\/log\"\n\t\"github.com\/go-kit\/kit\/log\/level\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n)\n\ntype MetricsCollector struct {\n\tclusterHealth *ClusterHealth\n\tnodes *Nodes\n\tindices *Indices\n\n\tclusterHealthResponse *clusterHealthResponse\n\tnodeStatsResponse *nodeStatsResponse\n\tindexStatsResponse *indexStatsResponse\n}\n\nfunc NewMetricsCollector(logger log.Logger, client *http.Client, url *url.URL, all bool) *MetricsCollector {\n\treturn &MetricsCollector{\n\t\tclusterHealth: NewClusterHealth(logger, client, url),\n\t\tnodes: NewNodes(logger, client, url, all),\n\t\tindices: NewIndices(logger, client, url, all),\n\t}\n}\n\nfunc (c *MetricsCollector) Describe(ch chan<- *prometheus.Desc) {\n\tc.clusterHealth.Describe(ch)\n\tc.nodes.Describe(ch)\n\tc.indices.Describe(ch)\n}\n\nfunc (c *MetricsCollector) Collect(ch chan<- prometheus.Metric) {\n\t\/\/ clusterHealth\n\tclusterHealthResponse, err := c.clusterHealth.fetchAndDecodeClusterHealth()\n\tif err != nil {\n\t\tc.clusterHealth.up.Set(0)\n\t\tlevel.Warn(c.clusterHealth.logger).Log(\n\t\t\t\"msg\", \"failed to fetch and decode cluster health\",\n\t\t\t\"err\", err,\n\t\t)\n\t\treturn\n\t}\n\tc.clusterHealth.up.Set(1)\n\n\t\/\/ nodes\n\tnodeStatsResponse, err := c.nodes.fetchAndDecodeNodeStats()\n\tif err != nil {\n\t\tc.nodes.up.Set(0)\n\t\tlevel.Warn(c.nodes.logger).Log(\n\t\t\t\"msg\", \"failed to fetch and decode node stats\",\n\t\t\t\"err\", err,\n\t\t)\n\t\treturn\n\t}\n\tc.nodes.up.Set(1)\n\n\t\/\/ indices\n\tindexStatsResponse, err := c.indices.fetchAndDecodeIndexStats()\n\tif err != nil {\n\t\tc.indices.up.Set(0)\n\t\tlevel.Warn(c.indices.logger).Log(\n\t\t\t\"msg\", \"failed to fetch and decode index stats\",\n\t\t\t\"err\", err,\n\t\t)\n\t\treturn\n\t}\n\tc.indices.up.Set(1)\n\n\tc.clusterHealth.Collect(ch, clusterHealthResponse)\n\tc.nodes.Collect(ch, nodeStatsResponse)\n\tc.indices.Collect(ch, clusterHealthResponse, nodeStatsResponse, indexStatsResponse)\n}\n\nfunc (c *ClusterHealth) fetchAndDecodeClusterHealth() (clusterHealthResponse, error) {\n\tvar chr clusterHealthResponse\n\n\tu := *c.url\n\tu.Path = \"\/_cluster\/health\"\n\tres, err := c.client.Get(u.String())\n\tif err != nil {\n\t\treturn chr, fmt.Errorf(\"failed to get cluster health from %s:\/\/%s:%s\/%s: %s\",\n\t\t\tu.Scheme, u.Hostname(), u.Port(), u.Path, err)\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn chr, fmt.Errorf(\"HTTP Request failed with code %d\", res.StatusCode)\n\t}\n\n\tif err := json.NewDecoder(res.Body).Decode(&chr); err != nil {\n\t\tc.jsonParseFailures.Inc()\n\t\treturn chr, err\n\t}\n\n\treturn chr, nil\n}\n\nfunc (c *Nodes) fetchAndDecodeNodeStats() (nodeStatsResponse, error) {\n\tvar nsr nodeStatsResponse\n\n\tu := *c.url\n\tu.Path = \"\/_nodes\/_local\/stats\"\n\tif c.all {\n\t\tu.Path = \"\/_nodes\/stats\"\n\t}\n\n\tres, err := c.client.Get(u.String())\n\tif err != nil {\n\t\treturn nsr, fmt.Errorf(\"failed to get cluster health from %s:\/\/%s:%s\/%s: %s\",\n\t\t\tu.Scheme, u.Hostname(), u.Port(), u.Path, err)\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode != http.StatusOK {\n\t\treturn nsr, fmt.Errorf(\"HTTP Request failed with code %d\", res.StatusCode)\n\t}\n\n\tif err := json.NewDecoder(res.Body).Decode(&nsr); err != nil {\n\t\tc.jsonParseFailures.Inc()\n\t\treturn nsr, err\n\t}\n\treturn nsr, nil\n}\n\nfunc (c *Indices) fetchAndDecodeIndexStats() (indexStatsResponse, error) {\n\tvar isr indexStatsResponse\n\n\tu := *c.url\n\tu.Path = \"\/_all\/_stats\"\n\n\tres, err := c.client.Get(u.String())\n\tif err != nil {\n\t\treturn isr, fmt.Errorf(\"failed to get index stats from %s:\/\/%s:%s\/%s: %s\",\n\t\t\tu.Scheme, u.Hostname(), u.Port(), u.Path, err)\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode != http.StatusOK {\n\t\treturn isr, fmt.Errorf(\"HTTP Request failed with code %d\", res.StatusCode)\n\t}\n\n\tif err := json.NewDecoder(res.Body).Decode(&isr); err != nil {\n\t\tc.jsonParseFailures.Inc()\n\t\treturn isr, err\n\t}\n\treturn isr, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package config_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/murdinc\/crusher\/config\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestConfigRead(t *testing.T) {\n\n\tcfg, err := config.ReadConfig()\n\tassert.NoError(t, err)\n\tassert.NotEmpty(t, cfg.Servers)\n\n}\n<commit_msg>this is an interesting thing to try to test<commit_after>package config_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/murdinc\/crusher\/config\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestConfigRead(t *testing.T) {\n\tgetCfg := func() {\n\t\tconfig.ReadConfig()\n\t}\n\n\tassert.NotPanics(t, getCfg)\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nconst configFile = \"..\/config.example.json\"\n\nfunc TestLoadConfiguraiton(t *testing.T) {\n\terr := LoadConfiguration(configFile)\n\tassert.Nil(t, err)\n\tassert.Equal(t, \"0.0.0.0\", Get().Web.ListenHost)\n}\n\nfunc TestContainsAuthKey(t *testing.T) {\n\tt.Run(\"key exists\", func(t *testing.T) {\n\t\tLoadConfiguration(configFile)\n\t\tassert.True(t, Get().ContainsAuthKey(\"somekey\"))\n\t})\n\n\tt.Run(\"key doesn't exist\", func(t *testing.T) {\n\t\tLoadConfiguration(configFile)\n\t\tassert.False(t, Get().ContainsAuthKey(\"someotherkey\"))\n\t})\n}\n<commit_msg>config: fix tests using old config struct<commit_after>package config\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/spf13\/viper\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nconst configFile = \"..\/config.example.json\"\n\nfunc TestLoadConfiguraiton(t *testing.T) {\n\terr := LoadConfiguration(configFile)\n\tassert.Nil(t, err)\n\tassert.Equal(t, \"0.0.0.0\", viper.GetString(APIHost))\n}\n\nfunc TestContainsAuthKey(t *testing.T) {\n\tt.Run(\"key exists\", func(t *testing.T) {\n\t\tLoadConfiguration(configFile)\n\t\tassert.True(t, ContainsAuthKey(\"somekey\"))\n\t})\n\n\tt.Run(\"key doesn't exist\", func(t *testing.T) {\n\t\tLoadConfiguration(configFile)\n\t\tassert.False(t, ContainsAuthKey(\"someotherkey\"))\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Add test to ensure services in another namespace does not affect route. (#4108)<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Package version defines \"version\" command i.e. outputs current version\npackage version\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ Command config\nvar Command = &cobra.Command{\n\tUse: \"version\",\n\tShort: \"Print version of Eclectica\",\n\tRun: run,\n}\n\n\/\/ Version number\nconst Version = \"0.1.1\"\n\n\/\/ Runner\nfunc run(c *cobra.Command, args []string) {\n\tfmt.Println(Version)\n}\n<commit_msg>Bump version to 0.2.0<commit_after>\/\/ Package version defines \"version\" command i.e. outputs current version\npackage version\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ Command config\nvar Command = &cobra.Command{\n\tUse: \"version\",\n\tShort: \"Print version of Eclectica\",\n\tRun: run,\n}\n\n\/\/ Version number\nconst Version = \"0.2.0\"\n\n\/\/ Runner\nfunc run(c *cobra.Command, args []string) {\n\tfmt.Println(Version)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\tsqlmock \"github.com\/DATA-DOG\/go-sqlmock\"\n\t\"github.com\/horgh\/rss\"\n)\n\n\/\/ Item does not exist. No GUID. Publication date is too old. No record.\nfunc TestShouldRecordItem0(t *testing.T) {\n\tdb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to open mock db: %s\", err)\n\t}\n\n\tdefer func() {\n\t\tif err := db.Close(); err != nil {\n\t\t\tt.Errorf(\"closing db failed: %s\", err)\n\t\t}\n\t}()\n\n\trows0 := sqlmock.NewRows([]string{\"id\"})\n\tmock.ExpectQuery(\n\t\t`SELECT id FROM rss_item WHERE rss_feed_id = \\$1 AND link = \\$2`).\n\t\tWillReturnRows(rows0)\n\n\tmock.ExpectClose()\n\n\tconfig := &Config{Quiet: 1}\n\tlastUpdateTime := time.Now()\n\tfeed := &DBFeed{LastUpdateTime: &lastUpdateTime}\n\tcutoffTime := time.Now()\n\titem := &rss.Item{\n\t\tPubDate: cutoffTime.Add(-time.Duration(10) * time.Hour),\n\t}\n\tignorePublicationTimes := false\n\n\trecord, err := shouldRecordItem(config, db, feed, item, cutoffTime,\n\t\tignorePublicationTimes)\n\tif err != nil {\n\t\tt.Fatalf(\"checking whether to record raised error: %s\", err)\n\t}\n\n\twant := false\n\tif record != want {\n\t\tt.Errorf(\"record = %#v, wanted %#v\", record, want)\n\t}\n}\n\n\/\/ Item does not exist. No GUID. Publication date is too old. Force record.\nfunc TestShouldRecordItem1(t *testing.T) {\n\tdb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to open mock db: %s\", err)\n\t}\n\n\tdefer func() {\n\t\tif err := db.Close(); err != nil {\n\t\t\tt.Errorf(\"closing db failed: %s\", err)\n\t\t}\n\t}()\n\n\trows0 := sqlmock.NewRows([]string{\"id\"})\n\tmock.ExpectQuery(\n\t\t`SELECT id FROM rss_item WHERE rss_feed_id = \\$1 AND link = \\$2`).\n\t\tWillReturnRows(rows0)\n\n\tmock.ExpectClose()\n\n\tconfig := &Config{Quiet: 1}\n\tlastUpdateTime := time.Now()\n\tfeed := &DBFeed{LastUpdateTime: &lastUpdateTime}\n\tcutoffTime := time.Now()\n\titem := &rss.Item{\n\t\tPubDate: cutoffTime.Add(-time.Duration(10) * time.Hour),\n\t}\n\tignorePublicationTimes := true\n\n\trecord, err := shouldRecordItem(config, db, feed, item, cutoffTime,\n\t\tignorePublicationTimes)\n\tif err != nil {\n\t\tt.Fatalf(\"checking whether to record raised error: %s\", err)\n\t}\n\n\twant := true\n\tif record != want {\n\t\tt.Errorf(\"record = %#v, wanted %#v\", record, want)\n\t}\n}\n\n\/\/ Item does not exist. No GUID. Publication date is okay. Record.\nfunc TestShouldRecordItem2(t *testing.T) {\n\tdb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to open mock db: %s\", err)\n\t}\n\n\tdefer func() {\n\t\tif err := db.Close(); err != nil {\n\t\t\tt.Errorf(\"closing db failed: %s\", err)\n\t\t}\n\t}()\n\n\trows0 := sqlmock.NewRows([]string{\"id\"})\n\tmock.ExpectQuery(\n\t\t`SELECT id FROM rss_item WHERE rss_feed_id = \\$1 AND link = \\$2`).\n\t\tWillReturnRows(rows0)\n\n\tmock.ExpectClose()\n\n\tconfig := &Config{Quiet: 1}\n\tlastUpdateTime := time.Now()\n\tfeed := &DBFeed{LastUpdateTime: &lastUpdateTime}\n\tcutoffTime := time.Now()\n\titem := &rss.Item{\n\t\tPubDate: cutoffTime.Add(time.Duration(10) * time.Hour),\n\t}\n\tignorePublicationTimes := false\n\n\trecord, err := shouldRecordItem(config, db, feed, item, cutoffTime,\n\t\tignorePublicationTimes)\n\tif err != nil {\n\t\tt.Fatalf(\"checking whether to record raised error: %s\", err)\n\t}\n\n\twant := true\n\tif record != want {\n\t\tt.Errorf(\"record = %#v, wanted %#v\", record, want)\n\t}\n}\n\n\/\/ Item does not exist. GUID. Record.\nfunc TestShouldRecordItem3(t *testing.T) {\n\tdb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to open mock db: %s\", err)\n\t}\n\n\tdefer func() {\n\t\tif err := db.Close(); err != nil {\n\t\t\tt.Errorf(\"closing db failed: %s\", err)\n\t\t}\n\t}()\n\n\trows0 := sqlmock.NewRows([]string{\"id\"})\n\tmock.ExpectQuery(\n\t\t`SELECT id FROM rss_item WHERE rss_feed_id = \\$1 AND guid = \\$2`).\n\t\tWillReturnRows(rows0)\n\n\trows1 := sqlmock.NewRows([]string{\"id\"})\n\tmock.ExpectQuery(\n\t\t`SELECT id FROM rss_item WHERE rss_feed_id = \\$1 AND link = \\$2`).\n\t\tWillReturnRows(rows1)\n\n\tmock.ExpectClose()\n\n\tconfig := &Config{Quiet: 1}\n\tlastUpdateTime := time.Now()\n\tfeed := &DBFeed{LastUpdateTime: &lastUpdateTime}\n\tcutoffTime := time.Now()\n\titem := &rss.Item{\n\t\tGUID: \"test-guid\",\n\t\tPubDate: cutoffTime.Add(time.Duration(10) * time.Hour),\n\t}\n\tignorePublicationTimes := false\n\n\trecord, err := shouldRecordItem(config, db, feed, item, cutoffTime,\n\t\tignorePublicationTimes)\n\tif err != nil {\n\t\tt.Fatalf(\"checking whether to record raised error: %s\", err)\n\t}\n\n\twant := true\n\tif record != want {\n\t\tt.Errorf(\"record = %#v, wanted %#v\", record, want)\n\t}\n}\n\n\/\/ Item exists by GUID. No record.\nfunc TestShouldRecordItem4(t *testing.T) {\n\tdb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to open mock db: %s\", err)\n\t}\n\n\tdefer func() {\n\t\tif err := db.Close(); err != nil {\n\t\t\tt.Errorf(\"closing db failed: %s\", err)\n\t\t}\n\t}()\n\n\trows0 := sqlmock.NewRows([]string{\"id\"})\n\trows0.AddRow(1)\n\tmock.ExpectQuery(\n\t\t`SELECT id FROM rss_item WHERE rss_feed_id = \\$1 AND guid = \\$2`).\n\t\tWillReturnRows(rows0)\n\n\tmock.ExpectClose()\n\n\tconfig := &Config{Quiet: 1}\n\tlastUpdateTime := time.Now()\n\tfeed := &DBFeed{LastUpdateTime: &lastUpdateTime}\n\tcutoffTime := time.Now()\n\titem := &rss.Item{\n\t\tGUID: \"test-guid\",\n\t\tPubDate: cutoffTime.Add(time.Duration(10) * time.Hour),\n\t}\n\tignorePublicationTimes := false\n\n\trecord, err := shouldRecordItem(config, db, feed, item, cutoffTime,\n\t\tignorePublicationTimes)\n\tif err != nil {\n\t\tt.Fatalf(\"checking whether to record raised error: %s\", err)\n\t}\n\n\twant := false\n\tif record != want {\n\t\tt.Errorf(\"record = %#v, wanted %#v\", record, want)\n\t}\n}\n\n\/\/ Item exists by link. No record.\nfunc TestShouldRecordItem5(t *testing.T) {\n\tdb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to open mock db: %s\", err)\n\t}\n\n\tdefer func() {\n\t\tif err := db.Close(); err != nil {\n\t\t\tt.Errorf(\"closing db failed: %s\", err)\n\t\t}\n\t}()\n\n\trows0 := sqlmock.NewRows([]string{\"id\"})\n\tmock.ExpectQuery(\n\t\t`SELECT id FROM rss_item WHERE rss_feed_id = \\$1 AND guid = \\$2`).\n\t\tWillReturnRows(rows0)\n\n\trows1 := sqlmock.NewRows([]string{\"id\"})\n\trows1.AddRow(1)\n\tmock.ExpectQuery(\n\t\t`SELECT id FROM rss_item WHERE rss_feed_id = \\$1 AND link = \\$2`).\n\t\tWillReturnRows(rows1)\n\n\tmock.ExpectClose()\n\n\tconfig := &Config{Quiet: 1}\n\tlastUpdateTime := time.Now()\n\tfeed := &DBFeed{LastUpdateTime: &lastUpdateTime}\n\tcutoffTime := time.Now()\n\titem := &rss.Item{\n\t\tGUID: \"test-guid\",\n\t\tPubDate: cutoffTime.Add(time.Duration(10) * time.Hour),\n\t}\n\tignorePublicationTimes := false\n\n\trecord, err := shouldRecordItem(config, db, feed, item, cutoffTime,\n\t\tignorePublicationTimes)\n\tif err != nil {\n\t\tt.Fatalf(\"checking whether to record raised error: %s\", err)\n\t}\n\n\twant := false\n\tif record != want {\n\t\tt.Errorf(\"record = %#v, wanted %#v\", record, want)\n\t}\n}\n<commit_msg>Fix tests<commit_after>package main\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\tsqlmock \"github.com\/DATA-DOG\/go-sqlmock\"\n\t\"github.com\/horgh\/rss\"\n)\n\n\/\/ Item does not exist. No GUID. Publication date is too old. No record.\nfunc TestShouldRecordItem0(t *testing.T) {\n\tdb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to open mock db: %s\", err)\n\t}\n\n\tdefer func() {\n\t\tif err := db.Close(); err != nil {\n\t\t\tt.Errorf(\"closing db failed: %s\", err)\n\t\t}\n\t}()\n\n\trows0 := sqlmock.NewRows([]string{\"id\"})\n\tmock.ExpectQuery(\n\t\t`SELECT id FROM rss_item WHERE rss_feed_id = \\$1 AND link = \\$2`).\n\t\tWillReturnRows(rows0)\n\n\tmock.ExpectClose()\n\n\tconfig := &Config{Quiet: 1}\n\tlastUpdateTime := time.Now()\n\tfeed := &DBFeed{LastUpdateTime: &lastUpdateTime}\n\tcutoffTime := time.Now()\n\titem := &rss.Item{\n\t\tPubDate: cutoffTime.Add(-time.Duration(10) * time.Hour),\n\t}\n\tignorePublicationTimes := false\n\n\trecord, err := shouldRecordItem(config, db, feed, item, cutoffTime,\n\t\tignorePublicationTimes)\n\tif err != nil {\n\t\tt.Fatalf(\"checking whether to record raised error: %s\", err)\n\t}\n\n\twant := false\n\tif record != want {\n\t\tt.Errorf(\"record = %#v, wanted %#v\", record, want)\n\t}\n}\n\n\/\/ Item does not exist. No GUID. Publication date is too old. Force record.\nfunc TestShouldRecordItem1(t *testing.T) {\n\tdb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to open mock db: %s\", err)\n\t}\n\n\tdefer func() {\n\t\tif err := db.Close(); err != nil {\n\t\t\tt.Errorf(\"closing db failed: %s\", err)\n\t\t}\n\t}()\n\n\trows0 := sqlmock.NewRows([]string{\"id\"})\n\tmock.ExpectQuery(\n\t\t`SELECT id FROM rss_item WHERE rss_feed_id = \\$1 AND link = \\$2`).\n\t\tWillReturnRows(rows0)\n\n\tmock.ExpectClose()\n\n\tconfig := &Config{Quiet: 1}\n\tlastUpdateTime := time.Now()\n\tfeed := &DBFeed{LastUpdateTime: &lastUpdateTime}\n\tcutoffTime := time.Now()\n\titem := &rss.Item{\n\t\tPubDate: cutoffTime.Add(-time.Duration(10) * time.Hour),\n\t}\n\tignorePublicationTimes := true\n\n\trecord, err := shouldRecordItem(config, db, feed, item, cutoffTime,\n\t\tignorePublicationTimes)\n\tif err != nil {\n\t\tt.Fatalf(\"checking whether to record raised error: %s\", err)\n\t}\n\n\twant := true\n\tif record != want {\n\t\tt.Errorf(\"record = %#v, wanted %#v\", record, want)\n\t}\n}\n\n\/\/ Item does not exist. No GUID. Publication date is okay. Record.\nfunc TestShouldRecordItem2(t *testing.T) {\n\tdb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to open mock db: %s\", err)\n\t}\n\n\tdefer func() {\n\t\tif err := db.Close(); err != nil {\n\t\t\tt.Errorf(\"closing db failed: %s\", err)\n\t\t}\n\t}()\n\n\trows0 := sqlmock.NewRows([]string{\"id\"})\n\tmock.ExpectQuery(\n\t\t`SELECT id FROM rss_item WHERE rss_feed_id = \\$1 AND link = \\$2`).\n\t\tWillReturnRows(rows0)\n\n\tmock.ExpectClose()\n\n\tconfig := &Config{Quiet: 1}\n\tlastUpdateTime := time.Now()\n\tfeed := &DBFeed{LastUpdateTime: &lastUpdateTime}\n\tcutoffTime := time.Now()\n\titem := &rss.Item{\n\t\tPubDate: cutoffTime.Add(time.Duration(10) * time.Hour),\n\t}\n\tignorePublicationTimes := false\n\n\trecord, err := shouldRecordItem(config, db, feed, item, cutoffTime,\n\t\tignorePublicationTimes)\n\tif err != nil {\n\t\tt.Fatalf(\"checking whether to record raised error: %s\", err)\n\t}\n\n\twant := true\n\tif record != want {\n\t\tt.Errorf(\"record = %#v, wanted %#v\", record, want)\n\t}\n}\n\n\/\/ Item does not exist. GUID. Record.\nfunc TestShouldRecordItem3(t *testing.T) {\n\tdb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to open mock db: %s\", err)\n\t}\n\n\tdefer func() {\n\t\tif err := db.Close(); err != nil {\n\t\t\tt.Errorf(\"closing db failed: %s\", err)\n\t\t}\n\t}()\n\n\trows0 := sqlmock.NewRows([]string{\"id\"})\n\tmock.ExpectQuery(\n\t\t`SELECT id FROM rss_item WHERE rss_feed_id = \\$1 AND link = \\$2`).\n\t\tWillReturnRows(rows0)\n\n\trows1 := sqlmock.NewRows([]string{\"id\"})\n\tmock.ExpectQuery(\n\t\t`SELECT id FROM rss_item WHERE rss_feed_id = \\$1 AND guid = \\$2`).\n\t\tWillReturnRows(rows1)\n\n\tmock.ExpectClose()\n\n\tconfig := &Config{Quiet: 1}\n\tlastUpdateTime := time.Now()\n\tfeed := &DBFeed{LastUpdateTime: &lastUpdateTime}\n\tcutoffTime := time.Now()\n\titem := &rss.Item{\n\t\tGUID: \"test-guid\",\n\t\tPubDate: cutoffTime.Add(time.Duration(10) * time.Hour),\n\t}\n\tignorePublicationTimes := false\n\n\trecord, err := shouldRecordItem(config, db, feed, item, cutoffTime,\n\t\tignorePublicationTimes)\n\tif err != nil {\n\t\tt.Fatalf(\"checking whether to record raised error: %s\", err)\n\t}\n\n\twant := true\n\tif record != want {\n\t\tt.Errorf(\"record = %#v, wanted %#v\", record, want)\n\t}\n}\n\n\/\/ Item exists by GUID. No record.\nfunc TestShouldRecordItem4(t *testing.T) {\n\tdb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to open mock db: %s\", err)\n\t}\n\n\tdefer func() {\n\t\tif err := db.Close(); err != nil {\n\t\t\tt.Errorf(\"closing db failed: %s\", err)\n\t\t}\n\t}()\n\n\trows0 := sqlmock.NewRows([]string{\"id\"})\n\tmock.ExpectQuery(\n\t\t`SELECT id FROM rss_item WHERE rss_feed_id = \\$1 AND link = \\$2`).\n\t\tWillReturnRows(rows0)\n\n\trows1 := sqlmock.NewRows([]string{\"id\"})\n\trows1.AddRow(1)\n\tmock.ExpectQuery(\n\t\t`SELECT id FROM rss_item WHERE rss_feed_id = \\$1 AND guid = \\$2`).\n\t\tWillReturnRows(rows1)\n\n\tmock.ExpectClose()\n\n\tconfig := &Config{Quiet: 1}\n\tlastUpdateTime := time.Now()\n\tfeed := &DBFeed{LastUpdateTime: &lastUpdateTime}\n\tcutoffTime := time.Now()\n\titem := &rss.Item{\n\t\tGUID: \"test-guid\",\n\t\tPubDate: cutoffTime.Add(time.Duration(10) * time.Hour),\n\t}\n\tignorePublicationTimes := false\n\n\trecord, err := shouldRecordItem(config, db, feed, item, cutoffTime,\n\t\tignorePublicationTimes)\n\tif err != nil {\n\t\tt.Fatalf(\"checking whether to record raised error: %s\", err)\n\t}\n\n\twant := false\n\tif record != want {\n\t\tt.Errorf(\"record = %#v, wanted %#v\", record, want)\n\t}\n}\n\n\/\/ Item exists by link. No record.\nfunc TestShouldRecordItem5(t *testing.T) {\n\tdb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to open mock db: %s\", err)\n\t}\n\n\tdefer func() {\n\t\tif err := db.Close(); err != nil {\n\t\t\tt.Errorf(\"closing db failed: %s\", err)\n\t\t}\n\t}()\n\n\trows0 := sqlmock.NewRows([]string{\"id\"})\n\trows0.AddRow(1)\n\tmock.ExpectQuery(\n\t\t`SELECT id FROM rss_item WHERE rss_feed_id = \\$1 AND link = \\$2`).\n\t\tWillReturnRows(rows0)\n\n\tmock.ExpectClose()\n\n\tconfig := &Config{Quiet: 1}\n\tlastUpdateTime := time.Now()\n\tfeed := &DBFeed{LastUpdateTime: &lastUpdateTime}\n\tcutoffTime := time.Now()\n\titem := &rss.Item{\n\t\tGUID: \"test-guid\",\n\t\tPubDate: cutoffTime.Add(time.Duration(10) * time.Hour),\n\t}\n\tignorePublicationTimes := false\n\n\trecord, err := shouldRecordItem(config, db, feed, item, cutoffTime,\n\t\tignorePublicationTimes)\n\tif err != nil {\n\t\tt.Fatalf(\"checking whether to record raised error: %s\", err)\n\t}\n\n\twant := false\n\tif record != want {\n\t\tt.Errorf(\"record = %#v, wanted %#v\", record, want)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package mysql\n\nimport (\n\t\"database\/sql\"\n\t\"time\"\n\n\t\"github.com\/funkygao\/gafka\/ctx\"\n\t\"github.com\/funkygao\/gafka\/zk\"\n\tlog \"github.com\/funkygao\/log4go\"\n\t_ \"github.com\/funkygao\/mysql\"\n)\n\ntype mysqlStore struct {\n\tcf *config\n\tzkzone *zk.ZkZone\n\n\tshutdownCh chan struct{}\n\trefreshCh chan struct{}\n\n\tallowUnregisteredGroup bool\n\n\t\/\/ mysql store, initialized on refresh\n\t\/\/ TODO https:\/\/github.com\/hashicorp\/go-memdb\n\tappClusterMap map[string]string \/\/ appid:cluster\n\tappSecretMap map[string]string \/\/ appid:secret\n\tappSubMap map[string]map[string]struct{} \/\/ appid:subscribed topics\n\tappPubMap map[string]map[string]struct{} \/\/ appid:topics\n\tappConsumerGroupMap map[string]map[string]struct{} \/\/ appid:groups\n\tshadowQueueMap map[string]string \/\/ hisappid.topic.ver.myappid:group\n}\n\nfunc New(cf *config) *mysqlStore {\n\tif cf.Zone == \"\" {\n\t\tpanic(\"empty zone\")\n\t}\n\tzkAddrs := ctx.ZoneZkAddrs(cf.Zone)\n\tif len(zkAddrs) == 0 {\n\t\tpanic(\"empty zookeeper addr\")\n\t}\n\n\treturn &mysqlStore{\n\t\tcf: cf,\n\t\tzkzone: zk.NewZkZone(zk.DefaultConfig(cf.Zone, zkAddrs)), \/\/ TODO session timeout\n\t\tshutdownCh: make(chan struct{}),\n\t\trefreshCh: make(chan struct{}),\n\t\tallowUnregisteredGroup: false,\n\t}\n}\n\nfunc (this *mysqlStore) Name() string {\n\treturn \"mysql\"\n}\n\ntype applicationRecord struct {\n\tAppId, Cluster, AppSecret string\n}\n\ntype appTopicRecord struct {\n\tAppId, TopicName string\n}\n\ntype appSubscribeRecord struct {\n\tAppId, TopicName string\n}\n\ntype appConsumerGroupRecord struct {\n\tAppId, GroupName string\n}\n\ntype shadowQueueRecord struct {\n\tHisAppId, TopicName, Ver string\n\tMyAppid, Group string\n}\n\nfunc (this *mysqlStore) Start() error {\n\tif err := this.refreshFromMysql(); err != nil {\n\t\t\/\/ refuse to start if mysql conn fails\n\t\treturn err\n\t}\n\n\t\/\/ TODO watch KatewayMysqlDsn znode\n\n\tgo func() {\n\t\tticker := time.NewTicker(this.cf.Refresh)\n\t\tdefer ticker.Stop()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tthis.refreshFromMysql()\n\t\t\t\tlog.Info(\"manager refreshed from mysql\")\n\n\t\t\t\tselect {\n\t\t\t\tcase this.refreshCh <- struct{}{}:\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\tcase <-this.shutdownCh:\n\t\t\t\tlog.Info(\"mysql manager stopped\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (this *mysqlStore) Stop() {\n\tclose(this.shutdownCh)\n}\n\nfunc (this *mysqlStore) refreshFromMysql() error {\n\tdsn, err := this.zkzone.KatewayMysqlDsn()\n\tif err != nil {\n\t\tlog.Error(\"mysql manager store fetching mysql dsn: %v\", err)\n\t\treturn err\n\t}\n\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\tlog.Error(\"mysql manager store: %v\", err)\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\t\/\/ if mysql dies, keep old\/stale manager records as it was\n\tif err = this.fetchApplicationRecords(db); err != nil {\n\t\tlog.Error(\"apps: %v\", err)\n\t\treturn err\n\t}\n\n\tif err = this.fetchTopicRecords(db); err != nil {\n\t\tlog.Error(\"topics: %v\", err)\n\t\treturn err\n\t}\n\n\tif err = this.fetchSubscribeRecords(db); err != nil {\n\t\tlog.Error(\"subs: %v\", err)\n\t\treturn err\n\t}\n\n\tif err = this.fetchAppGroupRecords(db); err != nil {\n\t\tlog.Error(\"app groups: %v\", err)\n\t\treturn err\n\t}\n\n\tif false {\n\t\tif err = this.fetchShadowQueueRecords(db); err != nil {\n\t\t\tlog.Error(\"shadow queues: %v\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (this *mysqlStore) shadowKey(hisAppid, topic, ver, myAppid string) string {\n\treturn hisAppid + \".\" + topic + \".\" + ver + \".\" + myAppid\n}\n\nfunc (this *mysqlStore) fetchShadowQueueRecords(db *sql.DB) error {\n\trows, err := db.Query(\"SELECT HisAppId,TopicName,Version,MyAppid,GroupName FROM group_shadow WHERE Status=1\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tvar shadow shadowQueueRecord\n\tshadowQueueMap := make(map[string]string)\n\tfor rows.Next() {\n\t\terr = rows.Scan(&shadow.HisAppId, &shadow.TopicName, &shadow.Ver, &shadow.MyAppid, &shadow.Group)\n\t\tif err != nil {\n\t\t\tlog.Error(\"mysql manager store: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tshadowQueueMap[this.shadowKey(shadow.HisAppId, shadow.TopicName, shadow.Ver, shadow.MyAppid)] = shadow.Group\n\t}\n\n\tthis.shadowQueueMap = shadowQueueMap\n\treturn nil\n}\n\nfunc (this *mysqlStore) fetchAppGroupRecords(db *sql.DB) error {\n\trows, err := db.Query(\"SELECT AppId,GroupName FROM application_group WHERE Status=1\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tvar group appConsumerGroupRecord\n\tappGroupMap := make(map[string]map[string]struct{})\n\tfor rows.Next() {\n\t\terr = rows.Scan(&group.AppId, &group.GroupName)\n\t\tif err != nil {\n\t\t\tlog.Error(\"mysql manager store: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, present := appGroupMap[group.AppId]; !present {\n\t\t\tappGroupMap[group.AppId] = make(map[string]struct{})\n\t\t}\n\n\t\tappGroupMap[group.AppId][group.GroupName] = struct{}{}\n\t}\n\n\tthis.appConsumerGroupMap = appGroupMap\n\treturn nil\n}\n\nfunc (this *mysqlStore) fetchApplicationRecords(db *sql.DB) error {\n\trows, err := db.Query(\"SELECT AppId,Cluster,AppSecret FROM application WHERE Status=1\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tvar app applicationRecord\n\tappClusterMap := make(map[string]string)\n\tappSecretMap := make(map[string]string)\n\tfor rows.Next() {\n\t\terr = rows.Scan(&app.AppId, &app.Cluster, &app.AppSecret)\n\t\tif err != nil {\n\t\t\tlog.Error(\"mysql manager store: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tappSecretMap[app.AppId] = app.AppSecret\n\t\tappClusterMap[app.AppId] = app.Cluster\n\t}\n\n\tthis.appClusterMap = appClusterMap\n\tthis.appSecretMap = appSecretMap\n\treturn nil\n}\n\nfunc (this *mysqlStore) fetchSubscribeRecords(db *sql.DB) error {\n\t\/\/ FIXME a sub topic t, t disabled, this subscription entry should be disabled too\n\trows, err := db.Query(\"SELECT AppId,TopicName FROM topics_subscriber WHERE Status=1\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tvar app appSubscribeRecord\n\tm := make(map[string]map[string]struct{})\n\tfor rows.Next() {\n\t\terr = rows.Scan(&app.AppId, &app.TopicName)\n\t\tif err != nil {\n\t\t\tlog.Error(\"mysql manager store: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, present := m[app.AppId]; !present {\n\t\t\tm[app.AppId] = make(map[string]struct{})\n\t\t}\n\n\t\tm[app.AppId][app.TopicName] = struct{}{}\n\t}\n\n\tthis.appSubMap = m\n\n\treturn nil\n}\n\nfunc (this *mysqlStore) fetchTopicRecords(db *sql.DB) error {\n\trows, err := db.Query(\"SELECT AppId,TopicName FROM topics WHERE Status=1\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tvar app appTopicRecord\n\tm := make(map[string]map[string]struct{})\n\tfor rows.Next() {\n\t\terr = rows.Scan(&app.AppId, &app.TopicName)\n\t\tif err != nil {\n\t\t\tlog.Error(\"mysql manager store: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, present := m[app.AppId]; !present {\n\t\t\tm[app.AppId] = make(map[string]struct{})\n\t\t}\n\n\t\tm[app.AppId][app.TopicName] = struct{}{}\n\t}\n\n\tthis.appPubMap = m\n\n\treturn nil\n}\n<commit_msg>more strict validation check<commit_after>package mysql\n\nimport (\n\t\"database\/sql\"\n\t\"time\"\n\n\t\"github.com\/funkygao\/gafka\/ctx\"\n\t\"github.com\/funkygao\/gafka\/zk\"\n\tlog \"github.com\/funkygao\/log4go\"\n\t_ \"github.com\/funkygao\/mysql\"\n)\n\ntype mysqlStore struct {\n\tcf *config\n\tzkzone *zk.ZkZone\n\n\tshutdownCh chan struct{}\n\trefreshCh chan struct{}\n\n\tallowUnregisteredGroup bool\n\n\t\/\/ mysql store, initialized on refresh\n\t\/\/ TODO https:\/\/github.com\/hashicorp\/go-memdb\n\tappClusterMap map[string]string \/\/ appid:cluster\n\tappSecretMap map[string]string \/\/ appid:secret\n\tappSubMap map[string]map[string]struct{} \/\/ appid:subscribed topics\n\tappPubMap map[string]map[string]struct{} \/\/ appid:topics\n\tappConsumerGroupMap map[string]map[string]struct{} \/\/ appid:groups\n\tshadowQueueMap map[string]string \/\/ hisappid.topic.ver.myappid:group\n}\n\nfunc New(cf *config) *mysqlStore {\n\tif cf == nil || cf.Zone == \"\" {\n\t\tpanic(\"empty zone\")\n\t}\n\tzkAddrs := ctx.ZoneZkAddrs(cf.Zone)\n\tif len(zkAddrs) == 0 {\n\t\tpanic(\"empty zookeeper addr\")\n\t}\n\n\treturn &mysqlStore{\n\t\tcf: cf,\n\t\tzkzone: zk.NewZkZone(zk.DefaultConfig(cf.Zone, zkAddrs)), \/\/ TODO session timeout\n\t\tshutdownCh: make(chan struct{}),\n\t\trefreshCh: make(chan struct{}),\n\t\tallowUnregisteredGroup: false,\n\t}\n}\n\nfunc (this *mysqlStore) Name() string {\n\treturn \"mysql\"\n}\n\ntype applicationRecord struct {\n\tAppId, Cluster, AppSecret string\n}\n\ntype appTopicRecord struct {\n\tAppId, TopicName string\n}\n\ntype appSubscribeRecord struct {\n\tAppId, TopicName string\n}\n\ntype appConsumerGroupRecord struct {\n\tAppId, GroupName string\n}\n\ntype shadowQueueRecord struct {\n\tHisAppId, TopicName, Ver string\n\tMyAppid, Group string\n}\n\nfunc (this *mysqlStore) Start() error {\n\tif err := this.refreshFromMysql(); err != nil {\n\t\t\/\/ refuse to start if mysql conn fails\n\t\treturn err\n\t}\n\n\t\/\/ TODO watch KatewayMysqlDsn znode\n\n\tgo func() {\n\t\tticker := time.NewTicker(this.cf.Refresh)\n\t\tdefer ticker.Stop()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tthis.refreshFromMysql()\n\t\t\t\tlog.Info(\"manager refreshed from mysql\")\n\n\t\t\t\tselect {\n\t\t\t\tcase this.refreshCh <- struct{}{}:\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\tcase <-this.shutdownCh:\n\t\t\t\tlog.Info(\"mysql manager stopped\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (this *mysqlStore) Stop() {\n\tclose(this.shutdownCh)\n}\n\nfunc (this *mysqlStore) refreshFromMysql() error {\n\tdsn, err := this.zkzone.KatewayMysqlDsn()\n\tif err != nil {\n\t\tlog.Error(\"mysql manager store fetching mysql dsn: %v\", err)\n\t\treturn err\n\t}\n\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\tlog.Error(\"mysql manager store: %v\", err)\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\t\/\/ if mysql dies, keep old\/stale manager records as it was\n\tif err = this.fetchApplicationRecords(db); err != nil {\n\t\tlog.Error(\"apps: %v\", err)\n\t\treturn err\n\t}\n\n\tif err = this.fetchTopicRecords(db); err != nil {\n\t\tlog.Error(\"topics: %v\", err)\n\t\treturn err\n\t}\n\n\tif err = this.fetchSubscribeRecords(db); err != nil {\n\t\tlog.Error(\"subs: %v\", err)\n\t\treturn err\n\t}\n\n\tif err = this.fetchAppGroupRecords(db); err != nil {\n\t\tlog.Error(\"app groups: %v\", err)\n\t\treturn err\n\t}\n\n\tif false {\n\t\tif err = this.fetchShadowQueueRecords(db); err != nil {\n\t\t\tlog.Error(\"shadow queues: %v\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (this *mysqlStore) shadowKey(hisAppid, topic, ver, myAppid string) string {\n\treturn hisAppid + \".\" + topic + \".\" + ver + \".\" + myAppid\n}\n\nfunc (this *mysqlStore) fetchShadowQueueRecords(db *sql.DB) error {\n\trows, err := db.Query(\"SELECT HisAppId,TopicName,Version,MyAppid,GroupName FROM group_shadow WHERE Status=1\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tvar shadow shadowQueueRecord\n\tshadowQueueMap := make(map[string]string)\n\tfor rows.Next() {\n\t\terr = rows.Scan(&shadow.HisAppId, &shadow.TopicName, &shadow.Ver, &shadow.MyAppid, &shadow.Group)\n\t\tif err != nil {\n\t\t\tlog.Error(\"mysql manager store: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tshadowQueueMap[this.shadowKey(shadow.HisAppId, shadow.TopicName, shadow.Ver, shadow.MyAppid)] = shadow.Group\n\t}\n\n\tthis.shadowQueueMap = shadowQueueMap\n\treturn nil\n}\n\nfunc (this *mysqlStore) fetchAppGroupRecords(db *sql.DB) error {\n\trows, err := db.Query(\"SELECT AppId,GroupName FROM application_group WHERE Status=1\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tvar group appConsumerGroupRecord\n\tappGroupMap := make(map[string]map[string]struct{})\n\tfor rows.Next() {\n\t\terr = rows.Scan(&group.AppId, &group.GroupName)\n\t\tif err != nil {\n\t\t\tlog.Error(\"mysql manager store: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, present := appGroupMap[group.AppId]; !present {\n\t\t\tappGroupMap[group.AppId] = make(map[string]struct{})\n\t\t}\n\n\t\tappGroupMap[group.AppId][group.GroupName] = struct{}{}\n\t}\n\n\tthis.appConsumerGroupMap = appGroupMap\n\treturn nil\n}\n\nfunc (this *mysqlStore) fetchApplicationRecords(db *sql.DB) error {\n\trows, err := db.Query(\"SELECT AppId,Cluster,AppSecret FROM application WHERE Status=1\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tvar app applicationRecord\n\tappClusterMap := make(map[string]string)\n\tappSecretMap := make(map[string]string)\n\tfor rows.Next() {\n\t\terr = rows.Scan(&app.AppId, &app.Cluster, &app.AppSecret)\n\t\tif err != nil {\n\t\t\tlog.Error(\"mysql manager store: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tappSecretMap[app.AppId] = app.AppSecret\n\t\tappClusterMap[app.AppId] = app.Cluster\n\t}\n\n\tthis.appClusterMap = appClusterMap\n\tthis.appSecretMap = appSecretMap\n\treturn nil\n}\n\nfunc (this *mysqlStore) fetchSubscribeRecords(db *sql.DB) error {\n\t\/\/ FIXME a sub topic t, t disabled, this subscription entry should be disabled too\n\trows, err := db.Query(\"SELECT AppId,TopicName FROM topics_subscriber WHERE Status=1\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tvar app appSubscribeRecord\n\tm := make(map[string]map[string]struct{})\n\tfor rows.Next() {\n\t\terr = rows.Scan(&app.AppId, &app.TopicName)\n\t\tif err != nil {\n\t\t\tlog.Error(\"mysql manager store: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, present := m[app.AppId]; !present {\n\t\t\tm[app.AppId] = make(map[string]struct{})\n\t\t}\n\n\t\tm[app.AppId][app.TopicName] = struct{}{}\n\t}\n\n\tthis.appSubMap = m\n\n\treturn nil\n}\n\nfunc (this *mysqlStore) fetchTopicRecords(db *sql.DB) error {\n\trows, err := db.Query(\"SELECT AppId,TopicName FROM topics WHERE Status=1\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tvar app appTopicRecord\n\tm := make(map[string]map[string]struct{})\n\tfor rows.Next() {\n\t\terr = rows.Scan(&app.AppId, &app.TopicName)\n\t\tif err != nil {\n\t\t\tlog.Error(\"mysql manager store: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, present := m[app.AppId]; !present {\n\t\t\tm[app.AppId] = make(map[string]struct{})\n\t\t}\n\n\t\tm[app.AppId][app.TopicName] = struct{}{}\n\t}\n\n\tthis.appPubMap = m\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/cobra\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/apimachinery\/pkg\/watch\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\t\"k8s.io\/client-go\/rest\"\n\t\"k8s.io\/client-go\/tools\/clientcmd\"\n\n\trouteclient \"github.com\/openshift\/client-go\/route\/clientset\/versioned\"\n\t\"github.com\/openshift\/installer\/pkg\/asset\"\n\t\"github.com\/openshift\/installer\/pkg\/asset\/cluster\"\n\t\"github.com\/openshift\/installer\/pkg\/asset\/ignition\/bootstrap\"\n\t\"github.com\/openshift\/installer\/pkg\/asset\/ignition\/machine\"\n\t\"github.com\/openshift\/installer\/pkg\/asset\/installconfig\"\n\t\"github.com\/openshift\/installer\/pkg\/asset\/kubeconfig\"\n\t\"github.com\/openshift\/installer\/pkg\/asset\/manifests\"\n\t\"github.com\/openshift\/installer\/pkg\/asset\/templates\"\n\tdestroybootstrap \"github.com\/openshift\/installer\/pkg\/destroy\/bootstrap\"\n)\n\ntype target struct {\n\tname string\n\tcommand *cobra.Command\n\tassets []asset.WritableAsset\n}\n\n\/\/ each target is a variable to preserve the order when creating subcommands and still\n\/\/ allow other functions to directly access each target individually.\nvar (\n\tinstallConfigTarget = target{\n\t\tname: \"Install Config\",\n\t\tcommand: &cobra.Command{\n\t\t\tUse: \"install-config\",\n\t\t\tShort: \"Generates the Install Config asset\",\n\t\t\t\/\/ FIXME: add longer descriptions for our commands with examples for better UX.\n\t\t\t\/\/ Long: \"\",\n\t\t},\n\t\tassets: []asset.WritableAsset{&installconfig.InstallConfig{}},\n\t}\n\n\tmanifestsTarget = target{\n\t\tname: \"Manifests\",\n\t\tcommand: &cobra.Command{\n\t\t\tUse: \"manifests\",\n\t\t\tShort: \"Generates the Kubernetes manifests\",\n\t\t\t\/\/ FIXME: add longer descriptions for our commands with examples for better UX.\n\t\t\t\/\/ Long: \"\",\n\t\t},\n\t\tassets: []asset.WritableAsset{&manifests.Manifests{}, &manifests.Openshift{}},\n\t}\n\n\tmanifestTemplatesTarget = target{\n\t\tname: \"Manifest templates\",\n\t\tcommand: &cobra.Command{\n\t\t\tUse: \"manifest-templates\",\n\t\t\tShort: \"Generates the unrendered Kubernetes manifest templates\",\n\t\t\tLong: \"\",\n\t\t},\n\t\tassets: []asset.WritableAsset{&templates.Templates{}},\n\t}\n\n\tignitionConfigsTarget = target{\n\t\tname: \"Ignition Configs\",\n\t\tcommand: &cobra.Command{\n\t\t\tUse: \"ignition-configs\",\n\t\t\tShort: \"Generates the Ignition Config asset\",\n\t\t\t\/\/ FIXME: add longer descriptions for our commands with examples for better UX.\n\t\t\t\/\/ Long: \"\",\n\t\t},\n\t\tassets: []asset.WritableAsset{&bootstrap.Bootstrap{}, &machine.Master{}, &machine.Worker{}},\n\t}\n\n\tclusterTarget = target{\n\t\tname: \"Cluster\",\n\t\tcommand: &cobra.Command{\n\t\t\tUse: \"cluster\",\n\t\t\tShort: \"Create an OpenShift cluster\",\n\t\t\t\/\/ FIXME: add longer descriptions for our commands with examples for better UX.\n\t\t\t\/\/ Long: \"\",\n\t\t\tPostRun: func(_ *cobra.Command, _ []string) {\n\t\t\t\tctx := context.Background()\n\n\t\t\t\tcleanup := setupFileHook(rootOpts.dir)\n\t\t\t\tdefer cleanup()\n\n\t\t\t\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", filepath.Join(rootOpts.dir, \"auth\", \"kubeconfig\"))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Fatal(errors.Wrap(err, \"loading kubeconfig\"))\n\t\t\t\t}\n\n\t\t\t\terr = destroyBootstrap(ctx, config, rootOpts.dir)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tconsoleURL, err := waitForConsole(ctx, config, rootOpts.dir)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\terr = logComplete(rootOpts.dir, consoleURL)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\tassets: []asset.WritableAsset{&cluster.TerraformVariables{}, &kubeconfig.Admin{}, &cluster.Cluster{}},\n\t}\n\n\ttargets = []target{installConfigTarget, manifestTemplatesTarget, manifestsTarget, ignitionConfigsTarget, clusterTarget}\n)\n\nfunc newCreateCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"create\",\n\t\tShort: \"Create part of an OpenShift cluster\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn cmd.Help()\n\t\t},\n\t}\n\n\tfor _, t := range targets {\n\t\tt.command.Run = runTargetCmd(t.assets...)\n\t\tcmd.AddCommand(t.command)\n\t}\n\n\treturn cmd\n}\n\nfunc runTargetCmd(targets ...asset.WritableAsset) func(cmd *cobra.Command, args []string) {\n\trunner := func(directory string) error {\n\t\tassetStore, err := asset.NewStore(directory)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to create asset store\")\n\t\t}\n\n\t\tfor _, a := range targets {\n\t\t\terr := assetStore.Fetch(a)\n\t\t\tif err != nil {\n\t\t\t\terr = errors.Wrapf(err, \"failed to fetch %s\", a.Name())\n\t\t\t}\n\n\t\t\tif err2 := asset.PersistToFile(a, directory); err2 != nil {\n\t\t\t\terr2 = errors.Wrapf(err2, \"failed to write asset (%s) to disk\", a.Name())\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Error(err2)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn err2\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn func(cmd *cobra.Command, args []string) {\n\t\tcleanup := setupFileHook(rootOpts.dir)\n\t\tdefer cleanup()\n\n\t\terr := runner(rootOpts.dir)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t}\n}\n\n\/\/ FIXME: pulling the kubeconfig and metadata out of the root\n\/\/ directory is a bit cludgy when we already have them in memory.\nfunc destroyBootstrap(ctx context.Context, config *rest.Config, directory string) (err error) {\n\tclient, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"creating a Kubernetes client\")\n\t}\n\n\tdiscovery := client.Discovery()\n\n\tapiTimeout := 30 * time.Minute\n\tlogrus.Infof(\"Waiting %v for the Kubernetes API...\", apiTimeout)\n\tapiContext, cancel := context.WithTimeout(ctx, apiTimeout)\n\tdefer cancel()\n\t\/\/ Poll quickly so we notice changes, but only log when the response\n\t\/\/ changes (because that's interesting) or when we've seen 15 of the\n\t\/\/ same errors in a row (to show we're still alive).\n\tlogDownsample := 15\n\tsilenceRemaining := logDownsample\n\tpreviousErrorSuffix := \"\"\n\twait.Until(func() {\n\t\tversion, err := discovery.ServerVersion()\n\t\tif err == nil {\n\t\t\tlogrus.Infof(\"API %s up\", version)\n\t\t\tcancel()\n\t\t} else {\n\t\t\tsilenceRemaining--\n\t\t\tchunks := strings.Split(err.Error(), \":\")\n\t\t\terrorSuffix := chunks[len(chunks)-1]\n\t\t\tif previousErrorSuffix != errorSuffix {\n\t\t\t\tlogrus.Debugf(\"Still waiting for the Kubernetes API: %v\", err)\n\t\t\t\tpreviousErrorSuffix = errorSuffix\n\t\t\t\tsilenceRemaining = logDownsample\n\t\t\t} else if silenceRemaining == 0 {\n\t\t\t\tlogrus.Debugf(\"Still waiting for the Kubernetes API: %v\", err)\n\t\t\t\tsilenceRemaining = logDownsample\n\t\t\t}\n\t\t}\n\t}, 2*time.Second, apiContext.Done())\n\terr = apiContext.Err()\n\tif err != nil && err != context.Canceled {\n\t\treturn errors.Wrap(err, \"waiting for Kubernetes API\")\n\t}\n\n\tevents := client.CoreV1().Events(\"kube-system\")\n\n\teventTimeout := 30 * time.Minute\n\tlogrus.Infof(\"Waiting %v for the bootstrap-complete event...\", eventTimeout)\n\teventContext, cancel := context.WithTimeout(ctx, eventTimeout)\n\tdefer cancel()\n\t_, err = Until(\n\t\teventContext,\n\t\t\"\",\n\t\tfunc(sinceResourceVersion string) (watch.Interface, error) {\n\t\t\tfor {\n\t\t\t\twatcher, err := events.Watch(metav1.ListOptions{\n\t\t\t\t\tResourceVersion: sinceResourceVersion,\n\t\t\t\t})\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn watcher, nil\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase <-eventContext.Done():\n\t\t\t\t\treturn watcher, err\n\t\t\t\tdefault:\n\t\t\t\t\tlogrus.Warningf(\"Failed to connect events watcher: %s\", err)\n\t\t\t\t\ttime.Sleep(2 * time.Second)\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\tfunc(watchEvent watch.Event) (bool, error) {\n\t\t\tevent, ok := watchEvent.Object.(*corev1.Event)\n\t\t\tif !ok {\n\t\t\t\treturn false, nil\n\t\t\t}\n\n\t\t\tif watchEvent.Type == watch.Error {\n\t\t\t\tlogrus.Debugf(\"error %s: %s\", event.Name, event.Message)\n\t\t\t\treturn false, nil\n\t\t\t}\n\n\t\t\tif watchEvent.Type != watch.Added {\n\t\t\t\treturn false, nil\n\t\t\t}\n\n\t\t\tlogrus.Debugf(\"added %s: %s\", event.Name, event.Message)\n\t\t\treturn event.Name == \"bootstrap-complete\", nil\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"waiting for bootstrap-complete\")\n\t}\n\n\tlogrus.Info(\"Destroying the bootstrap resources...\")\n\treturn destroybootstrap.Destroy(rootOpts.dir)\n}\n\n\/\/ waitForconsole returns the console URL from the route 'console' in namespace openshift-console\nfunc waitForConsole(ctx context.Context, config *rest.Config, directory string) (string, error) {\n\turl := \"\"\n\t\/\/ Need to keep these updated if they change\n\tconsoleNamespace := \"openshift-console\"\n\tconsoleRouteName := \"console\"\n\trc, err := routeclient.NewForConfig(config)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"creating a route client\")\n\t}\n\n\tconsoleRouteTimeout := 10 * time.Minute\n\tlogrus.Infof(\"Waiting %v for the openshift-console route to be created...\", consoleRouteTimeout)\n\tconsoleRouteContext, cancel := context.WithTimeout(ctx, consoleRouteTimeout)\n\tdefer cancel()\n\t\/\/ Poll quickly but only log when the response\n\t\/\/ when we've seen 15 of the same errors or output of\n\t\/\/ no route in a row (to show we're still alive).\n\tlogDownsample := 15\n\tsilenceRemaining := logDownsample\n\twait.Until(func() {\n\t\tconsoleRoutes, err := rc.RouteV1().Routes(consoleNamespace).List(metav1.ListOptions{})\n\t\tif err == nil && len(consoleRoutes.Items) > 0 {\n\t\t\tfor _, route := range consoleRoutes.Items {\n\t\t\t\tlogrus.Debugf(\"Route found in openshift-console namespace: %s\", route.Name)\n\t\t\t\tif route.Name == consoleRouteName {\n\t\t\t\t\turl = fmt.Sprintf(\"https:\/\/%s\", route.Spec.Host)\n\t\t\t\t}\n\t\t\t}\n\t\t\tlogrus.Debug(\"OpenShift console route is created\")\n\t\t\tcancel()\n\t\t} else if err != nil {\n\t\t\tsilenceRemaining--\n\t\t\tif silenceRemaining == 0 {\n\t\t\t\tlogrus.Debugf(\"Still waiting for the console route: %v\", err)\n\t\t\t\tsilenceRemaining = logDownsample\n\t\t\t}\n\t\t} else if len(consoleRoutes.Items) == 0 {\n\t\t\tsilenceRemaining--\n\t\t\tif silenceRemaining == 0 {\n\t\t\t\tlogrus.Debug(\"Still waiting for the console route...\")\n\t\t\t\tsilenceRemaining = logDownsample\n\t\t\t\terr = errors.New(\"no routes found in openshift-console namespace\")\n\t\t\t}\n\t\t}\n\t}, 2*time.Second, consoleRouteContext.Done())\n\terr = consoleRouteContext.Err()\n\tif err != nil && err != context.Canceled {\n\t\treturn url, errors.Wrap(err, \"waiting for openshift-console URL\")\n\t}\n\tif url == \"\" {\n\t\treturn url, errors.New(\"could not get openshift-console URL\")\n\t}\n\treturn url, nil\n}\n\n\/\/ logComplete prints info upon completion\nfunc logComplete(directory, consoleURL string) error {\n\tabsDir, err := filepath.Abs(directory)\n\tif err != nil {\n\t\treturn err\n\t}\n\tkubeconfig := filepath.Join(absDir, \"auth\", \"kubeconfig\")\n\tpwFile := filepath.Join(absDir, \"auth\", \"kubeadmin-password\")\n\tpw, err := ioutil.ReadFile(pwFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogrus.Info(\"Install complete!\")\n\tlogrus.Infof(\"Run 'export KUBECONFIG=%s' to manage the cluster with 'oc', the OpenShift CLI.\", kubeconfig)\n\tlogrus.Infof(\"The cluster is ready when 'oc login -u kubeadmin -p %s' succeeds (wait a few minutes).\", pw)\n\tlogrus.Infof(\"Access the OpenShift web-console here: %s\", consoleURL)\n\tlogrus.Infof(\"Login to the console with user: kubeadmin, password: %s\", pw)\n\treturn nil\n}\n<commit_msg>cmd\/openshift-install\/create: Add \"up to\" to \"Waiting for ...\" logs<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/cobra\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/apimachinery\/pkg\/watch\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\t\"k8s.io\/client-go\/rest\"\n\t\"k8s.io\/client-go\/tools\/clientcmd\"\n\n\trouteclient \"github.com\/openshift\/client-go\/route\/clientset\/versioned\"\n\t\"github.com\/openshift\/installer\/pkg\/asset\"\n\t\"github.com\/openshift\/installer\/pkg\/asset\/cluster\"\n\t\"github.com\/openshift\/installer\/pkg\/asset\/ignition\/bootstrap\"\n\t\"github.com\/openshift\/installer\/pkg\/asset\/ignition\/machine\"\n\t\"github.com\/openshift\/installer\/pkg\/asset\/installconfig\"\n\t\"github.com\/openshift\/installer\/pkg\/asset\/kubeconfig\"\n\t\"github.com\/openshift\/installer\/pkg\/asset\/manifests\"\n\t\"github.com\/openshift\/installer\/pkg\/asset\/templates\"\n\tdestroybootstrap \"github.com\/openshift\/installer\/pkg\/destroy\/bootstrap\"\n)\n\ntype target struct {\n\tname string\n\tcommand *cobra.Command\n\tassets []asset.WritableAsset\n}\n\n\/\/ each target is a variable to preserve the order when creating subcommands and still\n\/\/ allow other functions to directly access each target individually.\nvar (\n\tinstallConfigTarget = target{\n\t\tname: \"Install Config\",\n\t\tcommand: &cobra.Command{\n\t\t\tUse: \"install-config\",\n\t\t\tShort: \"Generates the Install Config asset\",\n\t\t\t\/\/ FIXME: add longer descriptions for our commands with examples for better UX.\n\t\t\t\/\/ Long: \"\",\n\t\t},\n\t\tassets: []asset.WritableAsset{&installconfig.InstallConfig{}},\n\t}\n\n\tmanifestsTarget = target{\n\t\tname: \"Manifests\",\n\t\tcommand: &cobra.Command{\n\t\t\tUse: \"manifests\",\n\t\t\tShort: \"Generates the Kubernetes manifests\",\n\t\t\t\/\/ FIXME: add longer descriptions for our commands with examples for better UX.\n\t\t\t\/\/ Long: \"\",\n\t\t},\n\t\tassets: []asset.WritableAsset{&manifests.Manifests{}, &manifests.Openshift{}},\n\t}\n\n\tmanifestTemplatesTarget = target{\n\t\tname: \"Manifest templates\",\n\t\tcommand: &cobra.Command{\n\t\t\tUse: \"manifest-templates\",\n\t\t\tShort: \"Generates the unrendered Kubernetes manifest templates\",\n\t\t\tLong: \"\",\n\t\t},\n\t\tassets: []asset.WritableAsset{&templates.Templates{}},\n\t}\n\n\tignitionConfigsTarget = target{\n\t\tname: \"Ignition Configs\",\n\t\tcommand: &cobra.Command{\n\t\t\tUse: \"ignition-configs\",\n\t\t\tShort: \"Generates the Ignition Config asset\",\n\t\t\t\/\/ FIXME: add longer descriptions for our commands with examples for better UX.\n\t\t\t\/\/ Long: \"\",\n\t\t},\n\t\tassets: []asset.WritableAsset{&bootstrap.Bootstrap{}, &machine.Master{}, &machine.Worker{}},\n\t}\n\n\tclusterTarget = target{\n\t\tname: \"Cluster\",\n\t\tcommand: &cobra.Command{\n\t\t\tUse: \"cluster\",\n\t\t\tShort: \"Create an OpenShift cluster\",\n\t\t\t\/\/ FIXME: add longer descriptions for our commands with examples for better UX.\n\t\t\t\/\/ Long: \"\",\n\t\t\tPostRun: func(_ *cobra.Command, _ []string) {\n\t\t\t\tctx := context.Background()\n\n\t\t\t\tcleanup := setupFileHook(rootOpts.dir)\n\t\t\t\tdefer cleanup()\n\n\t\t\t\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", filepath.Join(rootOpts.dir, \"auth\", \"kubeconfig\"))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Fatal(errors.Wrap(err, \"loading kubeconfig\"))\n\t\t\t\t}\n\n\t\t\t\terr = destroyBootstrap(ctx, config, rootOpts.dir)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tconsoleURL, err := waitForConsole(ctx, config, rootOpts.dir)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\terr = logComplete(rootOpts.dir, consoleURL)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\tassets: []asset.WritableAsset{&cluster.TerraformVariables{}, &kubeconfig.Admin{}, &cluster.Cluster{}},\n\t}\n\n\ttargets = []target{installConfigTarget, manifestTemplatesTarget, manifestsTarget, ignitionConfigsTarget, clusterTarget}\n)\n\nfunc newCreateCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"create\",\n\t\tShort: \"Create part of an OpenShift cluster\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn cmd.Help()\n\t\t},\n\t}\n\n\tfor _, t := range targets {\n\t\tt.command.Run = runTargetCmd(t.assets...)\n\t\tcmd.AddCommand(t.command)\n\t}\n\n\treturn cmd\n}\n\nfunc runTargetCmd(targets ...asset.WritableAsset) func(cmd *cobra.Command, args []string) {\n\trunner := func(directory string) error {\n\t\tassetStore, err := asset.NewStore(directory)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to create asset store\")\n\t\t}\n\n\t\tfor _, a := range targets {\n\t\t\terr := assetStore.Fetch(a)\n\t\t\tif err != nil {\n\t\t\t\terr = errors.Wrapf(err, \"failed to fetch %s\", a.Name())\n\t\t\t}\n\n\t\t\tif err2 := asset.PersistToFile(a, directory); err2 != nil {\n\t\t\t\terr2 = errors.Wrapf(err2, \"failed to write asset (%s) to disk\", a.Name())\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Error(err2)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn err2\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn func(cmd *cobra.Command, args []string) {\n\t\tcleanup := setupFileHook(rootOpts.dir)\n\t\tdefer cleanup()\n\n\t\terr := runner(rootOpts.dir)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t}\n}\n\n\/\/ FIXME: pulling the kubeconfig and metadata out of the root\n\/\/ directory is a bit cludgy when we already have them in memory.\nfunc destroyBootstrap(ctx context.Context, config *rest.Config, directory string) (err error) {\n\tclient, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"creating a Kubernetes client\")\n\t}\n\n\tdiscovery := client.Discovery()\n\n\tapiTimeout := 30 * time.Minute\n\tlogrus.Infof(\"Waiting up to %v for the Kubernetes API...\", apiTimeout)\n\tapiContext, cancel := context.WithTimeout(ctx, apiTimeout)\n\tdefer cancel()\n\t\/\/ Poll quickly so we notice changes, but only log when the response\n\t\/\/ changes (because that's interesting) or when we've seen 15 of the\n\t\/\/ same errors in a row (to show we're still alive).\n\tlogDownsample := 15\n\tsilenceRemaining := logDownsample\n\tpreviousErrorSuffix := \"\"\n\twait.Until(func() {\n\t\tversion, err := discovery.ServerVersion()\n\t\tif err == nil {\n\t\t\tlogrus.Infof(\"API %s up\", version)\n\t\t\tcancel()\n\t\t} else {\n\t\t\tsilenceRemaining--\n\t\t\tchunks := strings.Split(err.Error(), \":\")\n\t\t\terrorSuffix := chunks[len(chunks)-1]\n\t\t\tif previousErrorSuffix != errorSuffix {\n\t\t\t\tlogrus.Debugf(\"Still waiting for the Kubernetes API: %v\", err)\n\t\t\t\tpreviousErrorSuffix = errorSuffix\n\t\t\t\tsilenceRemaining = logDownsample\n\t\t\t} else if silenceRemaining == 0 {\n\t\t\t\tlogrus.Debugf(\"Still waiting for the Kubernetes API: %v\", err)\n\t\t\t\tsilenceRemaining = logDownsample\n\t\t\t}\n\t\t}\n\t}, 2*time.Second, apiContext.Done())\n\terr = apiContext.Err()\n\tif err != nil && err != context.Canceled {\n\t\treturn errors.Wrap(err, \"waiting for Kubernetes API\")\n\t}\n\n\tevents := client.CoreV1().Events(\"kube-system\")\n\n\teventTimeout := 30 * time.Minute\n\tlogrus.Infof(\"Waiting up to %v for the bootstrap-complete event...\", eventTimeout)\n\teventContext, cancel := context.WithTimeout(ctx, eventTimeout)\n\tdefer cancel()\n\t_, err = Until(\n\t\teventContext,\n\t\t\"\",\n\t\tfunc(sinceResourceVersion string) (watch.Interface, error) {\n\t\t\tfor {\n\t\t\t\twatcher, err := events.Watch(metav1.ListOptions{\n\t\t\t\t\tResourceVersion: sinceResourceVersion,\n\t\t\t\t})\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn watcher, nil\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase <-eventContext.Done():\n\t\t\t\t\treturn watcher, err\n\t\t\t\tdefault:\n\t\t\t\t\tlogrus.Warningf(\"Failed to connect events watcher: %s\", err)\n\t\t\t\t\ttime.Sleep(2 * time.Second)\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\tfunc(watchEvent watch.Event) (bool, error) {\n\t\t\tevent, ok := watchEvent.Object.(*corev1.Event)\n\t\t\tif !ok {\n\t\t\t\treturn false, nil\n\t\t\t}\n\n\t\t\tif watchEvent.Type == watch.Error {\n\t\t\t\tlogrus.Debugf(\"error %s: %s\", event.Name, event.Message)\n\t\t\t\treturn false, nil\n\t\t\t}\n\n\t\t\tif watchEvent.Type != watch.Added {\n\t\t\t\treturn false, nil\n\t\t\t}\n\n\t\t\tlogrus.Debugf(\"added %s: %s\", event.Name, event.Message)\n\t\t\treturn event.Name == \"bootstrap-complete\", nil\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"waiting for bootstrap-complete\")\n\t}\n\n\tlogrus.Info(\"Destroying the bootstrap resources...\")\n\treturn destroybootstrap.Destroy(rootOpts.dir)\n}\n\n\/\/ waitForconsole returns the console URL from the route 'console' in namespace openshift-console\nfunc waitForConsole(ctx context.Context, config *rest.Config, directory string) (string, error) {\n\turl := \"\"\n\t\/\/ Need to keep these updated if they change\n\tconsoleNamespace := \"openshift-console\"\n\tconsoleRouteName := \"console\"\n\trc, err := routeclient.NewForConfig(config)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"creating a route client\")\n\t}\n\n\tconsoleRouteTimeout := 10 * time.Minute\n\tlogrus.Infof(\"Waiting up to %v for the openshift-console route to be created...\", consoleRouteTimeout)\n\tconsoleRouteContext, cancel := context.WithTimeout(ctx, consoleRouteTimeout)\n\tdefer cancel()\n\t\/\/ Poll quickly but only log when the response\n\t\/\/ when we've seen 15 of the same errors or output of\n\t\/\/ no route in a row (to show we're still alive).\n\tlogDownsample := 15\n\tsilenceRemaining := logDownsample\n\twait.Until(func() {\n\t\tconsoleRoutes, err := rc.RouteV1().Routes(consoleNamespace).List(metav1.ListOptions{})\n\t\tif err == nil && len(consoleRoutes.Items) > 0 {\n\t\t\tfor _, route := range consoleRoutes.Items {\n\t\t\t\tlogrus.Debugf(\"Route found in openshift-console namespace: %s\", route.Name)\n\t\t\t\tif route.Name == consoleRouteName {\n\t\t\t\t\turl = fmt.Sprintf(\"https:\/\/%s\", route.Spec.Host)\n\t\t\t\t}\n\t\t\t}\n\t\t\tlogrus.Debug(\"OpenShift console route is created\")\n\t\t\tcancel()\n\t\t} else if err != nil {\n\t\t\tsilenceRemaining--\n\t\t\tif silenceRemaining == 0 {\n\t\t\t\tlogrus.Debugf(\"Still waiting for the console route: %v\", err)\n\t\t\t\tsilenceRemaining = logDownsample\n\t\t\t}\n\t\t} else if len(consoleRoutes.Items) == 0 {\n\t\t\tsilenceRemaining--\n\t\t\tif silenceRemaining == 0 {\n\t\t\t\tlogrus.Debug(\"Still waiting for the console route...\")\n\t\t\t\tsilenceRemaining = logDownsample\n\t\t\t\terr = errors.New(\"no routes found in openshift-console namespace\")\n\t\t\t}\n\t\t}\n\t}, 2*time.Second, consoleRouteContext.Done())\n\terr = consoleRouteContext.Err()\n\tif err != nil && err != context.Canceled {\n\t\treturn url, errors.Wrap(err, \"waiting for openshift-console URL\")\n\t}\n\tif url == \"\" {\n\t\treturn url, errors.New(\"could not get openshift-console URL\")\n\t}\n\treturn url, nil\n}\n\n\/\/ logComplete prints info upon completion\nfunc logComplete(directory, consoleURL string) error {\n\tabsDir, err := filepath.Abs(directory)\n\tif err != nil {\n\t\treturn err\n\t}\n\tkubeconfig := filepath.Join(absDir, \"auth\", \"kubeconfig\")\n\tpwFile := filepath.Join(absDir, \"auth\", \"kubeadmin-password\")\n\tpw, err := ioutil.ReadFile(pwFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogrus.Info(\"Install complete!\")\n\tlogrus.Infof(\"Run 'export KUBECONFIG=%s' to manage the cluster with 'oc', the OpenShift CLI.\", kubeconfig)\n\tlogrus.Infof(\"The cluster is ready when 'oc login -u kubeadmin -p %s' succeeds (wait a few minutes).\", pw)\n\tlogrus.Infof(\"Access the OpenShift web-console here: %s\", consoleURL)\n\tlogrus.Infof(\"Login to the console with user: kubeadmin, password: %s\", pw)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>ardop: Work-around for ARDOPc newstate issue<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2014-2015 The Notify Authors. All rights reserved.\n\/\/ Use of this source code is governed by the MIT license that can be\n\/\/ found in the LICENSE file.\n\n\/\/ +build darwin,kqueue darwin,!cgo dragonfly freebsd netbsd openbsd\n\npackage notify\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"syscall\"\n)\n\n\/\/ newTrigger returns implementation of trigger.\nfunc newTrigger(pthLkp map[string]*watched) trigger {\n\treturn &kq{\n\t\tpthLkp: pthLkp,\n\t\tidLkp: make(map[int]*watched),\n\t}\n}\n\n\/\/ kq is a structure implementing trigger for kqueue.\ntype kq struct {\n\t\/\/ fd is a kqueue file descriptor\n\tfd int\n\t\/\/ pipefds are file descriptors used to stop `Kevent` call.\n\tpipefds [2]int\n\t\/\/ idLkp is a data structure mapping file descriptors with data about watching\n\t\/\/ represented by them files\/directories.\n\tidLkp map[int]*watched\n\t\/\/ pthLkp is a structure mapping monitored files\/dir with data about them,\n\t\/\/ shared with parent trg structure\n\tpthLkp map[string]*watched\n}\n\n\/\/ watched is a data structure representing watched file\/directory.\ntype watched struct {\n\ttrgWatched\n\t\/\/ fd is a file descriptor for watched file\/directory.\n\tfd int\n}\n\n\/\/ Stop implements trigger.\nfunc (k *kq) Stop() (err error) {\n\t\/\/ trigger event used to interrupt Kevent call.\n\t_, err = syscall.Write(k.pipefds[1], []byte{0x00})\n\treturn\n}\n\n\/\/ Close implements trigger.\nfunc (k *kq) Close() error {\n\treturn syscall.Close(k.fd)\n}\n\n\/\/ NewWatched implements trigger.\nfunc (*kq) NewWatched(p string, fi os.FileInfo) (*watched, error) {\n\tfd, err := syscall.Open(p, syscall.O_NONBLOCK|syscall.O_RDONLY, 0)\n\tif err != nil {\n\t\t\/\/ BSDs can't open symlinks and return an error if the symlink\n\t\t\/\/ cannot be followed - ignore it instead of failing. See e.g.\n\t\t\/\/ https:\/\/github.com\/libinotify-kqueue\/libinotify-kqueue\/blob\/a822c8f1d75404fe3132f695a898dcd42fe8afbc\/patches\/freebsd11-O_SYMLINK.patch\n\t\tif os.IsNotExist(err) && fi.Mode()&os.ModeSymlink == os.ModeSymlink {\n\t\t\treturn nil, errSkip\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn &watched{\n\t\ttrgWatched: trgWatched{p: p, fi: fi},\n\t\tfd: fd,\n\t}, nil\n}\n\n\/\/ Record implements trigger.\nfunc (k *kq) Record(w *watched) {\n\tk.idLkp[w.fd], k.pthLkp[w.p] = w, w\n}\n\n\/\/ Del implements trigger.\nfunc (k *kq) Del(w *watched) {\n\tsyscall.Close(w.fd)\n\tdelete(k.idLkp, w.fd)\n\tdelete(k.pthLkp, w.p)\n}\n\nfunc inter2kq(n interface{}) syscall.Kevent_t {\n\tkq, ok := n.(syscall.Kevent_t)\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"kqueue: type should be Kevent_t, %T instead\", n))\n\t}\n\treturn kq\n}\n\n\/\/ Init implements trigger.\nfunc (k *kq) Init() (err error) {\n\tif k.fd, err = syscall.Kqueue(); err != nil {\n\t\treturn\n\t}\n\t\/\/ Creates pipe used to stop `Kevent` call by registering it,\n\t\/\/ watching read end and writing to other end of it.\n\tif err = syscall.Pipe(k.pipefds[:]); err != nil {\n\t\treturn nonil(err, k.Close())\n\t}\n\tvar kevn [1]syscall.Kevent_t\n\tsyscall.SetKevent(&kevn[0], k.pipefds[0], syscall.EVFILT_READ, syscall.EV_ADD)\n\tif _, err = syscall.Kevent(k.fd, kevn[:], nil, nil); err != nil {\n\t\treturn nonil(err, k.Close())\n\t}\n\treturn\n}\n\n\/\/ Unwatch implements trigger.\nfunc (k *kq) Unwatch(w *watched) (err error) {\n\tvar kevn [1]syscall.Kevent_t\n\tsyscall.SetKevent(&kevn[0], w.fd, syscall.EVFILT_VNODE, syscall.EV_DELETE)\n\n\t_, err = syscall.Kevent(k.fd, kevn[:], nil, nil)\n\treturn\n}\n\n\/\/ Watch implements trigger.\nfunc (k *kq) Watch(fi os.FileInfo, w *watched, e int64) (err error) {\n\tvar kevn [1]syscall.Kevent_t\n\tsyscall.SetKevent(&kevn[0], w.fd, syscall.EVFILT_VNODE,\n\t\tsyscall.EV_ADD|syscall.EV_CLEAR)\n\tkevn[0].Fflags = uint32(e)\n\n\t_, err = syscall.Kevent(k.fd, kevn[:], nil, nil)\n\treturn\n}\n\n\/\/ Wait implements trigger.\nfunc (k *kq) Wait() (interface{}, error) {\n\tvar (\n\t\tkevn [1]syscall.Kevent_t\n\t\terr error\n\t)\n\tkevn[0] = syscall.Kevent_t{}\n\t_, err = syscall.Kevent(k.fd, nil, kevn[:], nil)\n\n\treturn kevn[0], err\n}\n\n\/\/ Watched implements trigger.\nfunc (k *kq) Watched(n interface{}) (*watched, int64, error) {\n\tkevn, ok := n.(syscall.Kevent_t)\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"kq: type should be syscall.Kevent_t, %T instead\", kevn))\n\t}\n\tif _, ok = k.idLkp[int(kevn.Ident)]; !ok {\n\t\treturn nil, 0, errNotWatched\n\t}\n\treturn k.idLkp[int(kevn.Ident)], int64(kevn.Fflags), nil\n}\n\n\/\/ IsStop implements trigger.\nfunc (k *kq) IsStop(n interface{}, err error) bool {\n\treturn int(inter2kq(n).Ident) == k.pipefds[0]\n}\n\nfunc init() {\n\tencode = func(e Event, dir bool) (o int64) {\n\t\t\/\/ Create event is not supported by kqueue. Instead NoteWrite event will\n\t\t\/\/ be registered for a directory. If this event will be reported on dir\n\t\t\/\/ which is to be monitored for Create, dir will be rescanned\n\t\t\/\/ and Create events will be generated and returned for new files.\n\t\t\/\/ In case of files, if not requested NoteRename event is reported,\n\t\t\/\/ it will be ignored.\n\t\to = int64(e &^ Create)\n\t\tif (e&Create != 0 && dir) || e&Write != 0 {\n\t\t\to = (o &^ int64(Write)) | int64(NoteWrite)\n\t\t}\n\t\tif e&Rename != 0 {\n\t\t\to = (o &^ int64(Rename)) | int64(NoteRename)\n\t\t}\n\t\tif e&Remove != 0 {\n\t\t\to = (o &^ int64(Remove)) | int64(NoteDelete)\n\t\t}\n\t\treturn\n\t}\n\tnat2not = map[Event]Event{\n\t\tNoteWrite: Write,\n\t\tNoteRename: Rename,\n\t\tNoteDelete: Remove,\n\t\tNoteExtend: Event(0),\n\t\tNoteAttrib: Event(0),\n\t\tNoteRevoke: Event(0),\n\t\tNoteLink: Event(0),\n\t}\n\tnot2nat = map[Event]Event{\n\t\tWrite: NoteWrite,\n\t\tRename: NoteRename,\n\t\tRemove: NoteDelete,\n\t}\n}\n<commit_msg>FreeBSD: don't crash on unix domain sockets<commit_after>\/\/ Copyright (c) 2014-2015 The Notify Authors. All rights reserved.\n\/\/ Use of this source code is governed by the MIT license that can be\n\/\/ found in the LICENSE file.\n\n\/\/ +build darwin,kqueue darwin,!cgo dragonfly freebsd netbsd openbsd\n\npackage notify\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"syscall\"\n)\n\n\/\/ newTrigger returns implementation of trigger.\nfunc newTrigger(pthLkp map[string]*watched) trigger {\n\treturn &kq{\n\t\tpthLkp: pthLkp,\n\t\tidLkp: make(map[int]*watched),\n\t}\n}\n\n\/\/ kq is a structure implementing trigger for kqueue.\ntype kq struct {\n\t\/\/ fd is a kqueue file descriptor\n\tfd int\n\t\/\/ pipefds are file descriptors used to stop `Kevent` call.\n\tpipefds [2]int\n\t\/\/ idLkp is a data structure mapping file descriptors with data about watching\n\t\/\/ represented by them files\/directories.\n\tidLkp map[int]*watched\n\t\/\/ pthLkp is a structure mapping monitored files\/dir with data about them,\n\t\/\/ shared with parent trg structure\n\tpthLkp map[string]*watched\n}\n\n\/\/ watched is a data structure representing watched file\/directory.\ntype watched struct {\n\ttrgWatched\n\t\/\/ fd is a file descriptor for watched file\/directory.\n\tfd int\n}\n\n\/\/ Stop implements trigger.\nfunc (k *kq) Stop() (err error) {\n\t\/\/ trigger event used to interrupt Kevent call.\n\t_, err = syscall.Write(k.pipefds[1], []byte{0x00})\n\treturn\n}\n\n\/\/ Close implements trigger.\nfunc (k *kq) Close() error {\n\treturn syscall.Close(k.fd)\n}\n\n\/\/ NewWatched implements trigger.\nfunc (*kq) NewWatched(p string, fi os.FileInfo) (*watched, error) {\n\tfd, err := syscall.Open(p, syscall.O_NONBLOCK|syscall.O_RDONLY, 0)\n\tif err != nil {\n\t\t\/\/ BSDs can't open symlinks and return an error if the symlink\n\t\t\/\/ cannot be followed - ignore it instead of failing. See e.g.\n\t\t\/\/ https:\/\/github.com\/libinotify-kqueue\/libinotify-kqueue\/blob\/a822c8f1d75404fe3132f695a898dcd42fe8afbc\/patches\/freebsd11-O_SYMLINK.patch\n\t\tif os.IsNotExist(err) && fi.Mode()&os.ModeSymlink == os.ModeSymlink {\n\t\t\treturn nil, errSkip\n\t\t}\n\t\t\/\/ FreeBSD can't open unix domain sockets and returns \"operation not supported\" error.\n\t\t\/\/ Ignore it instead of failing.\n\t\tif errors.Is(err, syscall.ENOTSUP) && fi.Mode()&os.ModeSocket == os.ModeSocket {\n\t\t\treturn nil, errSkip\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn &watched{\n\t\ttrgWatched: trgWatched{p: p, fi: fi},\n\t\tfd: fd,\n\t}, nil\n}\n\n\/\/ Record implements trigger.\nfunc (k *kq) Record(w *watched) {\n\tk.idLkp[w.fd], k.pthLkp[w.p] = w, w\n}\n\n\/\/ Del implements trigger.\nfunc (k *kq) Del(w *watched) {\n\tsyscall.Close(w.fd)\n\tdelete(k.idLkp, w.fd)\n\tdelete(k.pthLkp, w.p)\n}\n\nfunc inter2kq(n interface{}) syscall.Kevent_t {\n\tkq, ok := n.(syscall.Kevent_t)\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"kqueue: type should be Kevent_t, %T instead\", n))\n\t}\n\treturn kq\n}\n\n\/\/ Init implements trigger.\nfunc (k *kq) Init() (err error) {\n\tif k.fd, err = syscall.Kqueue(); err != nil {\n\t\treturn\n\t}\n\t\/\/ Creates pipe used to stop `Kevent` call by registering it,\n\t\/\/ watching read end and writing to other end of it.\n\tif err = syscall.Pipe(k.pipefds[:]); err != nil {\n\t\treturn nonil(err, k.Close())\n\t}\n\tvar kevn [1]syscall.Kevent_t\n\tsyscall.SetKevent(&kevn[0], k.pipefds[0], syscall.EVFILT_READ, syscall.EV_ADD)\n\tif _, err = syscall.Kevent(k.fd, kevn[:], nil, nil); err != nil {\n\t\treturn nonil(err, k.Close())\n\t}\n\treturn\n}\n\n\/\/ Unwatch implements trigger.\nfunc (k *kq) Unwatch(w *watched) (err error) {\n\tvar kevn [1]syscall.Kevent_t\n\tsyscall.SetKevent(&kevn[0], w.fd, syscall.EVFILT_VNODE, syscall.EV_DELETE)\n\n\t_, err = syscall.Kevent(k.fd, kevn[:], nil, nil)\n\treturn\n}\n\n\/\/ Watch implements trigger.\nfunc (k *kq) Watch(fi os.FileInfo, w *watched, e int64) (err error) {\n\tvar kevn [1]syscall.Kevent_t\n\tsyscall.SetKevent(&kevn[0], w.fd, syscall.EVFILT_VNODE,\n\t\tsyscall.EV_ADD|syscall.EV_CLEAR)\n\tkevn[0].Fflags = uint32(e)\n\n\t_, err = syscall.Kevent(k.fd, kevn[:], nil, nil)\n\treturn\n}\n\n\/\/ Wait implements trigger.\nfunc (k *kq) Wait() (interface{}, error) {\n\tvar (\n\t\tkevn [1]syscall.Kevent_t\n\t\terr error\n\t)\n\tkevn[0] = syscall.Kevent_t{}\n\t_, err = syscall.Kevent(k.fd, nil, kevn[:], nil)\n\n\treturn kevn[0], err\n}\n\n\/\/ Watched implements trigger.\nfunc (k *kq) Watched(n interface{}) (*watched, int64, error) {\n\tkevn, ok := n.(syscall.Kevent_t)\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"kq: type should be syscall.Kevent_t, %T instead\", kevn))\n\t}\n\tif _, ok = k.idLkp[int(kevn.Ident)]; !ok {\n\t\treturn nil, 0, errNotWatched\n\t}\n\treturn k.idLkp[int(kevn.Ident)], int64(kevn.Fflags), nil\n}\n\n\/\/ IsStop implements trigger.\nfunc (k *kq) IsStop(n interface{}, err error) bool {\n\treturn int(inter2kq(n).Ident) == k.pipefds[0]\n}\n\nfunc init() {\n\tencode = func(e Event, dir bool) (o int64) {\n\t\t\/\/ Create event is not supported by kqueue. Instead NoteWrite event will\n\t\t\/\/ be registered for a directory. If this event will be reported on dir\n\t\t\/\/ which is to be monitored for Create, dir will be rescanned\n\t\t\/\/ and Create events will be generated and returned for new files.\n\t\t\/\/ In case of files, if not requested NoteRename event is reported,\n\t\t\/\/ it will be ignored.\n\t\to = int64(e &^ Create)\n\t\tif (e&Create != 0 && dir) || e&Write != 0 {\n\t\t\to = (o &^ int64(Write)) | int64(NoteWrite)\n\t\t}\n\t\tif e&Rename != 0 {\n\t\t\to = (o &^ int64(Rename)) | int64(NoteRename)\n\t\t}\n\t\tif e&Remove != 0 {\n\t\t\to = (o &^ int64(Remove)) | int64(NoteDelete)\n\t\t}\n\t\treturn\n\t}\n\tnat2not = map[Event]Event{\n\t\tNoteWrite: Write,\n\t\tNoteRename: Rename,\n\t\tNoteDelete: Remove,\n\t\tNoteExtend: Event(0),\n\t\tNoteAttrib: Event(0),\n\t\tNoteRevoke: Event(0),\n\t\tNoteLink: Event(0),\n\t}\n\tnot2nat = map[Event]Event{\n\t\tWrite: NoteWrite,\n\t\tRename: NoteRename,\n\t\tRemove: NoteDelete,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package startCmd\n\nimport (\n\t\/\/ Stdlib\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"regexp\"\n\t\"text\/tabwriter\"\n\n\t\/\/ Internal\n\t\"github.com\/salsita\/salsaflow\/app\"\n\t\"github.com\/salsita\/salsaflow\/config\"\n\t\"github.com\/salsita\/salsaflow\/errs\"\n\t\"github.com\/salsita\/salsaflow\/git\"\n\t\"github.com\/salsita\/salsaflow\/log\"\n\t\"github.com\/salsita\/salsaflow\/modules\"\n\t\"github.com\/salsita\/salsaflow\/modules\/common\"\n\t\"github.com\/salsita\/salsaflow\/prompt\"\n\n\t\/\/ Other\n\t\"github.com\/extemporalgenome\/slug\"\n\t\"gopkg.in\/tchap\/gocli.v1\"\n)\n\nvar Command = &gocli.Command{\n\tUsageLine: `\n start`,\n\tShort: \"start a new story\",\n\tLong: `\n Starts a new story, i.e., create a new branch off ` + config.TrunkBranch + `\n or check out an existing branch containing the story id if there's already one.\n Then updates the story state in PM tool and sets you as the story owner.\n\t`,\n\tAction: run,\n}\n\nfunc run(cmd *gocli.Command, args []string) {\n\tif len(args) != 0 {\n\t\tcmd.Usage()\n\t\tos.Exit(2)\n\t}\n\n\tapp.MustInit()\n\n\tif err := runMain(); err != nil {\n\t\tlog.Fatalln(\"\\nError: \" + err.Error())\n\t}\n}\n\nfunc handleError(task string, err error, stderr *bytes.Buffer) error {\n\terrs.NewError(task, stderr, err).Log(log.V(log.Info))\n\treturn err\n}\n\nfunc runMain() (err error) {\n\tvar (\n\t\tselectedStory common.Story\n\t\tcurrentBranch string\n\t)\n\n\tdefer func() {\n\t\t\/\/ Checkout the original branch.\n\t\tif currentBranch == \"\" {\n\t\t\treturn\n\t\t}\n\t\tmsg := fmt.Sprintf(\"Checkout the original branch (%s)\", currentBranch)\n\t\tlog.Run(msg)\n\t\tstderr, err := git.Checkout(currentBranch)\n\t\tif err != nil {\n\t\t\thandleError(msg, err, stderr)\n\t\t\treturn\n\t\t}\n\t}()\n\n\t\/\/ Remember the current branch.\n\tmsg := \"Remember the current branch\"\n\tlog.Run(msg)\n\tcurrentBranch, stderr, err := git.CurrentBranch()\n\tif err != nil {\n\t\treturn handleError(msg, err, stderr)\n\t}\n\n\t\/\/ Fetch stories from the issue tracker.\n\tmsg = \"Fetch stories from the issue tracker\"\n\tlog.Run(msg)\n\tstories, err := modules.GetIssueTracker().StartableStories()\n\tif err != nil {\n\t\treturn handleError(msg, err, nil)\n\t}\n\n\t\/\/ List stories that can be started.\n\ttw := tabwriter.NewWriter(os.Stdout, 0, 8, 4, '\\t', 0)\n\tio.WriteString(tw, \"\\nYou can start working on one of the following stories:\\n\\n\")\n\tio.WriteString(tw, \"Index\\tStory ID\\tStory Title\\n\")\n\tio.WriteString(tw, \"=====\\t========\\t===========\\n\")\n\tfor i, story := range stories {\n\t\tfmt.Fprintf(tw, \"%v\\t%v\\t%v\\n\", i, story.ReadableId(), story.Title())\n\t}\n\tio.WriteString(tw, \"\\n\")\n\ttw.Flush()\n\n\t\/\/ Prompt user to choose.\n\tindex, err := prompt.PromptIndex(\"Choose a story by inserting its index: \", 0, len(stories)-1)\n\tif err != nil {\n\t\tlog.Fatalln(err.Error())\n\t}\n\tfmt.Println()\n\n\tselectedStory = stories[index]\n\n\t\/\/ Fetch the remote repository.\n\tmsg = \"Fetch the remote repository\"\n\tlog.Run(msg)\n\tstderr, err = git.UpdateRemotes(config.OriginName)\n\tif err != nil {\n\t\treturn handleError(msg, err, nil)\n\t}\n\n\t\/\/ Get all branches.\n\tmsg = \"Collect all story branches\"\n\tlocalRefs, remoteRefs, stderr, err := git.ListStoryRefs()\n\tif err != nil {\n\t\treturn handleError(msg, err, nil)\n\t}\n\n\t\/\/ Get all story branches connected to `selectedStory` id.\n\tmsg = \"Check existing branches\"\n\tlog.Run(msg)\n\tmatchingBranches := map[string]struct{}{}\n\n\tstripRemotePrefixRe := regexp.MustCompile(\".*\/(story\/.+\/.+)$\")\n\tfor _, ref := range append(localRefs, remoteRefs...) {\n\t\tstoryId, err := git.RefToStoryId(ref)\n\t\tif err != nil {\n\t\t\treturn handleError(msg, err, nil)\n\t\t}\n\t\tif storyId == selectedStory.ReadableId() {\n\t\t\t\/\/ We found a matching story branch. Let's strip off the `remote\/origin` etc\n\t\t\t\/\/ part so that we have just the branch name.\n\t\t\tbranchName := stripRemotePrefixRe.ReplaceAllString(ref, \"$1\")\n\t\t\tmatchingBranches[branchName] = struct{}{}\n\t\t}\n\t}\n\n\tif len(matchingBranches) == 1 {\n\t\t\/\/ There is only one branch => checkout and work on that one.\n\t\tfor branch := range matchingBranches {\n\t\t\tlog.Log(\"Found one existing story branch: \" + branch)\n\t\t\tmsg := \"Checkout branch \" + branch\n\t\t\tlog.Run(msg)\n\t\t\tif stderr, err = git.Checkout(branch); err != nil {\n\t\t\t\treturn handleError(msg, err, stderr)\n\t\t\t}\n\t\t}\n\n\t} else if len(matchingBranches) > 1 {\n\t\t\/\/ There are multiple branches. Let the user choose which one to work on.\n\t\tlogger := log.V(log.Info)\n\t\tlogger.Lock()\n\t\tlogger.UnsafeLog(\"Found multiple existing story branches:\")\n\t\tfor branch := range matchingBranches {\n\t\t\tlogger.UnsafeNewLine(\" - \" + branch)\n\t\t}\n\t\tlogger.UnsafeNewLine(\"Please checkout one of them and work there.\")\n\t\tlogger.Unlock()\n\n\t} else {\n\t\t\/\/ There is no branch => create a new one.\n\t\tmsg := \"Prompt for the new story branch name\"\n\t\tline, err := prompt.Prompt(\"Please insert branch slug: \")\n\t\tif err != nil {\n\t\t\treturn handleError(msg, err, nil)\n\t\t}\n\t\tbranchName := fmt.Sprintf(\"story\/%s\/%s\", slug.Slug(line), selectedStory.ReadableId())\n\t\tok, err := prompt.Confirm(\n\t\t\tfmt.Sprintf(\"The branch will be called '%s', OK?\", branchName))\n\t\tif err != nil {\n\t\t\treturn handleError(msg, err, nil)\n\t\t}\n\t\tif !ok {\n\t\t\tlog.Fatalln(\"I will exit now. If you want to try again, just run me again!\")\n\t\t}\n\t\tfmt.Println()\n\n\t\tcreateMsg := \"Create branch \" + branchName\n\t\tlog.Run(msg)\n\t\tif _, stderr, err = git.Git(\"branch\", branchName, config.TrunkBranch); err != nil {\n\t\t\treturn handleError(msg, err, stderr)\n\t\t}\n\n\t\tmsg = \"Checkout branch \" + branchName\n\t\tif _, stderr, err = git.Git(\"checkout\", branchName); err != nil {\n\t\t\treturn handleError(msg, err, stderr)\n\t\t}\n\n\t\t\/\/ Delete the newly created branch on rollback.\n\t\tdefer func(msg string) {\n\t\t\tif err != nil {\n\t\t\t\tlog.Rollback(msg)\n\t\t\t\tif stderr, err := git.Branch(\"-D\", branchName); err != nil {\n\t\t\t\t\thandleError(\"Delete branch \"+branchName, err, stderr)\n\t\t\t\t}\n\t\t\t}\n\t\t}(createMsg)\n\t}\n\n\tmsg = fmt.Sprintf(\"Start the selected story (%v)\", selectedStory.ReadableId())\n\tlog.Run(msg)\n\tif err := selectedStory.Start(); err != nil {\n\t\treturn handleError(msg, err, err.Stderr)\n\t}\n\n\tmsg = \"Set you as the story owner\"\n\tlog.Run(msg)\n\tuser, err := modules.GetIssueTracker().CurrentUser()\n\tif err != nil {\n\t\treturn handleError(msg, err, nil)\n\t}\n\t\/\/ TODO: We should update, not overwrite the owners.\n\tif err := selectedStory.SetOwners([]common.User{user}); err != nil {\n\t\treturn handleError(msg, err, nil)\n\t}\n\n\t\/\/ Do not checkout the original branch, the story branch is active now.\n\tcurrentBranch = \"\"\n\treturn nil\n}\n<commit_msg>story start: Make the output less verbose<commit_after>package startCmd\n\nimport (\n\t\/\/ Stdlib\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"regexp\"\n\t\"text\/tabwriter\"\n\n\t\/\/ Internal\n\t\"github.com\/salsita\/salsaflow\/app\"\n\t\"github.com\/salsita\/salsaflow\/config\"\n\t\"github.com\/salsita\/salsaflow\/errs\"\n\t\"github.com\/salsita\/salsaflow\/git\"\n\t\"github.com\/salsita\/salsaflow\/log\"\n\t\"github.com\/salsita\/salsaflow\/modules\"\n\t\"github.com\/salsita\/salsaflow\/modules\/common\"\n\t\"github.com\/salsita\/salsaflow\/prompt\"\n\n\t\/\/ Other\n\t\"github.com\/extemporalgenome\/slug\"\n\t\"gopkg.in\/tchap\/gocli.v1\"\n)\n\nvar Command = &gocli.Command{\n\tUsageLine: `\n start`,\n\tShort: \"start a new story\",\n\tLong: `\n Starts a new story, i.e., create a new branch off ` + config.TrunkBranch + `\n or check out an existing branch containing the story id if there's already one.\n Then updates the story state in PM tool and sets you as the story owner.\n\t`,\n\tAction: run,\n}\n\nfunc run(cmd *gocli.Command, args []string) {\n\tif len(args) != 0 {\n\t\tcmd.Usage()\n\t\tos.Exit(2)\n\t}\n\n\tapp.MustInit()\n\n\tif err := runMain(); err != nil {\n\t\tlog.Fatalln(\"\\nError: \" + err.Error())\n\t}\n}\n\nfunc handleError(task string, err error, stderr *bytes.Buffer) error {\n\terrs.NewError(task, stderr, err).Log(log.V(log.Info))\n\treturn err\n}\n\nfunc runMain() (err error) {\n\tvar (\n\t\tselectedStory common.Story\n\t\tcurrentBranch string\n\t)\n\n\tdefer func() {\n\t\t\/\/ Checkout the original branch.\n\t\tif currentBranch == \"\" {\n\t\t\treturn\n\t\t}\n\t\tmsg := fmt.Sprintf(\"Checkout the original branch (%s)\", currentBranch)\n\t\tlog.Run(msg)\n\t\tstderr, err := git.Checkout(currentBranch)\n\t\tif err != nil {\n\t\t\thandleError(msg, err, stderr)\n\t\t\treturn\n\t\t}\n\t}()\n\n\t\/\/ Remember the current branch.\n\tmsg := \"Remember the current branch\"\n\tcurrentBranch, stderr, err := git.CurrentBranch()\n\tif err != nil {\n\t\treturn handleError(msg, err, stderr)\n\t}\n\n\t\/\/ Fetch stories from the issue tracker.\n\tmsg = \"Fetch stories from the issue tracker\"\n\tlog.Run(msg)\n\tstories, err := modules.GetIssueTracker().StartableStories()\n\tif err != nil {\n\t\treturn handleError(msg, err, nil)\n\t}\n\n\t\/\/ List stories that can be started.\n\ttw := tabwriter.NewWriter(os.Stdout, 0, 8, 4, '\\t', 0)\n\tio.WriteString(tw, \"\\nYou can start working on one of the following stories:\\n\\n\")\n\tio.WriteString(tw, \"Index\\tStory ID\\tStory Title\\n\")\n\tio.WriteString(tw, \"=====\\t========\\t===========\\n\")\n\tfor i, story := range stories {\n\t\tfmt.Fprintf(tw, \"%v\\t%v\\t%v\\n\", i, story.ReadableId(), story.Title())\n\t}\n\tio.WriteString(tw, \"\\n\")\n\ttw.Flush()\n\n\t\/\/ Prompt user to choose.\n\tindex, err := prompt.PromptIndex(\"Choose a story by inserting its index: \", 0, len(stories)-1)\n\tif err != nil {\n\t\tlog.Fatalln(err.Error())\n\t}\n\tfmt.Println()\n\n\tselectedStory = stories[index]\n\n\t\/\/ Fetch the remote repository.\n\tmsg = \"Fetch the remote repository\"\n\tlog.Run(msg)\n\tstderr, err = git.UpdateRemotes(config.OriginName)\n\tif err != nil {\n\t\treturn handleError(msg, err, nil)\n\t}\n\n\t\/\/ Get all branches.\n\tmsg = \"Collect all story branches\"\n\tlocalRefs, remoteRefs, stderr, err := git.ListStoryRefs()\n\tif err != nil {\n\t\treturn handleError(msg, err, nil)\n\t}\n\n\t\/\/ Get all story branches connected to `selectedStory` id.\n\tmsg = \"Check existing branches\"\n\tlog.Run(msg)\n\tmatchingBranches := map[string]struct{}{}\n\n\tstripRemotePrefixRe := regexp.MustCompile(\".*\/(story\/.+\/.+)$\")\n\tfor _, ref := range append(localRefs, remoteRefs...) {\n\t\tstoryId, err := git.RefToStoryId(ref)\n\t\tif err != nil {\n\t\t\treturn handleError(msg, err, nil)\n\t\t}\n\t\tif storyId == selectedStory.ReadableId() {\n\t\t\t\/\/ We found a matching story branch. Let's strip off the `remote\/origin` etc\n\t\t\t\/\/ part so that we have just the branch name.\n\t\t\tbranchName := stripRemotePrefixRe.ReplaceAllString(ref, \"$1\")\n\t\t\tmatchingBranches[branchName] = struct{}{}\n\t\t}\n\t}\n\n\tif len(matchingBranches) == 1 {\n\t\t\/\/ There is only one branch => checkout and work on that one.\n\t\tfor branch := range matchingBranches {\n\t\t\tlog.Log(\"Found one existing story branch: \" + branch)\n\t\t\tmsg := \"Checkout branch \" + branch\n\t\t\tlog.Run(msg)\n\t\t\tif stderr, err = git.Checkout(branch); err != nil {\n\t\t\t\treturn handleError(msg, err, stderr)\n\t\t\t}\n\t\t}\n\n\t} else if len(matchingBranches) > 1 {\n\t\t\/\/ There are multiple branches. Let the user choose which one to work on.\n\t\tlogger := log.V(log.Info)\n\t\tlogger.Lock()\n\t\tlogger.UnsafeLog(\"Found multiple existing story branches:\")\n\t\tfor branch := range matchingBranches {\n\t\t\tlogger.UnsafeNewLine(\" - \" + branch)\n\t\t}\n\t\tlogger.UnsafeNewLine(\"Please checkout one of them and work there.\")\n\t\tlogger.Unlock()\n\n\t} else {\n\t\t\/\/ There is no branch => create a new one.\n\t\tmsg := \"Prompt for the new story branch name\"\n\t\tline, err := prompt.Prompt(\"Please insert branch slug: \")\n\t\tif err != nil {\n\t\t\treturn handleError(msg, err, nil)\n\t\t}\n\t\tbranchName := fmt.Sprintf(\"story\/%s\/%s\", slug.Slug(line), selectedStory.ReadableId())\n\t\tok, err := prompt.Confirm(\n\t\t\tfmt.Sprintf(\"The branch will be called '%s', OK?\", branchName))\n\t\tif err != nil {\n\t\t\treturn handleError(msg, err, nil)\n\t\t}\n\t\tif !ok {\n\t\t\tlog.Fatalln(\"I will exit now. If you want to try again, just run me again!\")\n\t\t}\n\t\tfmt.Println()\n\n\t\tcreateMsg := \"Create branch \" + branchName\n\t\tlog.Run(msg)\n\t\tif _, stderr, err = git.Git(\"branch\", branchName, config.TrunkBranch); err != nil {\n\t\t\treturn handleError(msg, err, stderr)\n\t\t}\n\n\t\tmsg = \"Checkout branch \" + branchName\n\t\tif _, stderr, err = git.Git(\"checkout\", branchName); err != nil {\n\t\t\treturn handleError(msg, err, stderr)\n\t\t}\n\n\t\t\/\/ Delete the newly created branch on rollback.\n\t\tdefer func(msg string) {\n\t\t\tif err != nil {\n\t\t\t\tlog.Rollback(msg)\n\t\t\t\tif stderr, err := git.Branch(\"-D\", branchName); err != nil {\n\t\t\t\t\thandleError(\"Delete branch \"+branchName, err, stderr)\n\t\t\t\t}\n\t\t\t}\n\t\t}(createMsg)\n\t}\n\n\tmsg = fmt.Sprintf(\"Start the selected story (%v)\", selectedStory.ReadableId())\n\tlog.Run(msg)\n\tif err := selectedStory.Start(); err != nil {\n\t\treturn handleError(msg, err, err.Stderr)\n\t}\n\n\tmsg = \"Set you as the story owner\"\n\tlog.Run(msg)\n\tuser, err := modules.GetIssueTracker().CurrentUser()\n\tif err != nil {\n\t\treturn handleError(msg, err, nil)\n\t}\n\t\/\/ TODO: We should update, not overwrite the owners.\n\tif err := selectedStory.SetOwners([]common.User{user}); err != nil {\n\t\treturn handleError(msg, err, nil)\n\t}\n\n\t\/\/ Do not checkout the original branch, the story branch is active now.\n\tcurrentBranch = \"\"\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage notify\n\nimport (\n\t\"net\/http\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"go.chromium.org\/luci\/common\/api\/gitiles\"\n\t\"go.chromium.org\/luci\/common\/errors\"\n\t\"go.chromium.org\/luci\/common\/logging\"\n\tgitpb \"go.chromium.org\/luci\/common\/proto\/git\"\n\tgitilespb \"go.chromium.org\/luci\/common\/proto\/gitiles\"\n\t\"go.chromium.org\/luci\/server\/auth\"\n)\n\n\/\/ HistoryFunc is a function that gets a list of commits from Gitiles for a\n\/\/ specific repository, between oldRevision and newRevision, inclusive.\n\/\/\n\/\/ If oldRevision is not reachable from newRevision, returns an empty slice\n\/\/ and nil error.\ntype HistoryFunc func(c context.Context, host, project, oldRevision, newRevision string) ([]*gitpb.Commit, error)\n\n\/\/ gitilesHistory is an implementation of a HistoryFunc intended to be used\n\/\/ in production (not for testing).\nfunc gitilesHistory(c context.Context, host, project, oldRevision, newRevision string) ([]*gitpb.Commit, error) {\n\tc, _ = context.WithTimeout(c, 30*time.Second)\n\n\ttransport, err := auth.GetRPCTransport(c, auth.AsSelf, auth.WithScopes(gitiles.OAuthScope))\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"getting RPC Transport\").Err()\n\t}\n\tclient, err := gitiles.NewRESTClient(&http.Client{Transport: transport}, host, true)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"creating Gitiles client\").Err()\n\t}\n\n\treq := &gitilespb.LogRequest{\n\t\tProject: project,\n\t\t\/\/ With ~1, if newCommit.Revision == oldRevision,\n\t\t\/\/ we'll get one commit,\n\t\t\/\/ otherwise we will get 0 commits which is indistinguishable from\n\t\t\/\/ oldRevision not being reachable from newRevision.\n\t\tAncestor: oldRevision + \"~1\",\n\t\tTreeish: newRevision,\n\t}\n\tlogging.Infof(c, \"Gitiles request to host %q: %q\", host, req)\n\tres, err := client.Log(c, req)\n\tswitch {\n\tcase err != nil:\n\t\treturn nil, errors.Annotate(err, \"fetching commit from Gitiles\").Err()\n\tcase len(res.Log) > 0 && res.Log[0].Id != newRevision: \/\/ Sanity check.\n\t\treturn nil, errors.Reason(\"gitiles returned inconsistent results\").Err()\n\tdefault:\n\t\treturn res.Log, nil\n\t}\n}\n<commit_msg>[luci-notify] Use gitiles.PagingLog when retrieving commits<commit_after>\/\/ Copyright 2017 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage notify\n\nimport (\n\t\"net\/http\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"go.chromium.org\/luci\/common\/api\/gitiles\"\n\t\"go.chromium.org\/luci\/common\/errors\"\n\t\"go.chromium.org\/luci\/common\/logging\"\n\tgitpb \"go.chromium.org\/luci\/common\/proto\/git\"\n\tgitilespb \"go.chromium.org\/luci\/common\/proto\/gitiles\"\n\t\"go.chromium.org\/luci\/server\/auth\"\n)\n\n\/\/ HistoryFunc is a function that gets a list of commits from Gitiles for a\n\/\/ specific repository, between oldRevision and newRevision, inclusive.\n\/\/\n\/\/ If oldRevision is not reachable from newRevision, returns an empty slice\n\/\/ and nil error.\ntype HistoryFunc func(c context.Context, host, project, oldRevision, newRevision string) ([]*gitpb.Commit, error)\n\n\/\/ gitilesHistory is an implementation of a HistoryFunc intended to be used\n\/\/ in production (not for testing).\nfunc gitilesHistory(c context.Context, host, project, oldRevision, newRevision string) ([]*gitpb.Commit, error) {\n\tc, _ = context.WithTimeout(c, 30*time.Second)\n\n\ttransport, err := auth.GetRPCTransport(c, auth.AsSelf, auth.WithScopes(gitiles.OAuthScope))\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"getting RPC Transport\").Err()\n\t}\n\tclient, err := gitiles.NewRESTClient(&http.Client{Transport: transport}, host, true)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"creating Gitiles client\").Err()\n\t}\n\n\treq := gitilespb.LogRequest{\n\t\tProject: project,\n\t\t\/\/ With ~1, if newCommit.Revision == oldRevision,\n\t\t\/\/ we'll get one commit,\n\t\t\/\/ otherwise we will get 0 commits which is indistinguishable from\n\t\t\/\/ oldRevision not being reachable from newRevision.\n\t\tAncestor: oldRevision + \"~1\",\n\t\tTreeish: newRevision,\n\t}\n\tlogging.Infof(c, \"Gitiles request to host %q: %q\", host, &req)\n\tres, err := gitiles.PagingLog(c, client, req, 0)\n\tswitch {\n\tcase err != nil:\n\t\treturn nil, errors.Annotate(err, \"fetching commit from Gitiles\").Err()\n\tcase len(res) > 0 && res[0].Id != newRevision: \/\/ Sanity check.\n\t\treturn nil, errors.Reason(\"gitiles returned inconsistent results\").Err()\n\tdefault:\n\t\treturn res, nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package controlapi\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"strconv\"\n\n\t\"github.com\/docker\/engine-api\/types\/reference\"\n\t\"github.com\/docker\/swarmkit\/api\"\n\t\"github.com\/docker\/swarmkit\/identity\"\n\t\"github.com\/docker\/swarmkit\/manager\/scheduler\"\n\t\"github.com\/docker\/swarmkit\/manager\/state\/store\"\n\t\"github.com\/docker\/swarmkit\/protobuf\/ptypes\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n)\n\nvar (\n\terrNetworkUpdateNotSupported = errors.New(\"changing network in service is not supported\")\n\terrModeChangeNotAllowed = errors.New(\"service mode change is not allowed\")\n)\n\nfunc validateResources(r *api.Resources) error {\n\tif r == nil {\n\t\treturn nil\n\t}\n\n\tif r.NanoCPUs != 0 && r.NanoCPUs < 1e6 {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"invalid cpu value %g: Must be at least %g\", float64(r.NanoCPUs)\/1e9, 1e6\/1e9)\n\t}\n\n\tif r.MemoryBytes != 0 && r.MemoryBytes < 4*1024*1024 {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"invalid memory value %d: Must be at least 4MiB\", r.MemoryBytes)\n\t}\n\treturn nil\n}\n\nfunc validateResourceRequirements(r *api.ResourceRequirements) error {\n\tif r == nil {\n\t\treturn nil\n\t}\n\tif err := validateResources(r.Limits); err != nil {\n\t\treturn err\n\t}\n\tif err := validateResources(r.Reservations); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc validateRestartPolicy(rp *api.RestartPolicy) error {\n\tif rp == nil {\n\t\treturn nil\n\t}\n\n\tif rp.Delay != nil {\n\t\tdelay, err := ptypes.Duration(rp.Delay)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif delay < 0 {\n\t\t\treturn grpc.Errorf(codes.InvalidArgument, \"TaskSpec: restart-delay cannot be negative\")\n\t\t}\n\t}\n\n\tif rp.Window != nil {\n\t\twin, err := ptypes.Duration(rp.Window)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif win < 0 {\n\t\t\treturn grpc.Errorf(codes.InvalidArgument, \"TaskSpec: restart-window cannot be negative\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc validatePlacement(placement *api.Placement) error {\n\tif placement == nil {\n\t\treturn nil\n\t}\n\t_, err := scheduler.ParseExprs(placement.Constraints)\n\treturn err\n}\n\nfunc validateUpdate(uc *api.UpdateConfig) error {\n\tif uc == nil {\n\t\treturn nil\n\t}\n\n\tdelay, err := ptypes.Duration(&uc.Delay)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif delay < 0 {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"TaskSpec: update-delay cannot be negative\")\n\t}\n\n\treturn nil\n}\n\nfunc validateTask(taskSpec api.TaskSpec) error {\n\tif err := validateResourceRequirements(taskSpec.Resources); err != nil {\n\t\treturn err\n\t}\n\n\tif err := validateRestartPolicy(taskSpec.Restart); err != nil {\n\t\treturn err\n\t}\n\n\tif err := validatePlacement(taskSpec.Placement); err != nil {\n\t\treturn err\n\t}\n\n\tif taskSpec.GetRuntime() == nil {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"TaskSpec: missing runtime\")\n\t}\n\n\t_, ok := taskSpec.GetRuntime().(*api.TaskSpec_Container)\n\tif !ok {\n\t\treturn grpc.Errorf(codes.Unimplemented, \"RuntimeSpec: unimplemented runtime in service spec\")\n\t}\n\n\tcontainer := taskSpec.GetContainer()\n\tif container == nil {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"ContainerSpec: missing in service spec\")\n\t}\n\n\tif container.Image == \"\" {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"ContainerSpec: image reference must be provided\")\n\t}\n\n\tif _, _, err := reference.Parse(container.Image); err != nil {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"ContainerSpec: %q is not a valid repository\/tag\", container.Image)\n\t}\n\treturn nil\n}\n\nfunc validateEndpointSpec(epSpec *api.EndpointSpec) error {\n\t\/\/ Endpoint spec is optional\n\tif epSpec == nil {\n\t\treturn nil\n\t}\n\n\tif len(epSpec.Ports) > 0 && epSpec.Mode == api.ResolutionModeDNSRoundRobin {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"EndpointSpec: ports can't be used with dnsrr mode\")\n\t}\n\n\tportSet := make(map[api.PortConfig]struct{})\n\tfor _, port := range epSpec.Ports {\n\t\tif _, ok := portSet[*port]; ok {\n\t\t\treturn grpc.Errorf(codes.InvalidArgument, \"EndpointSpec: duplicate ports provided\")\n\t\t}\n\n\t\tportSet[*port] = struct{}{}\n\t}\n\n\treturn nil\n}\n\nfunc validateServiceSpec(spec *api.ServiceSpec) error {\n\tif spec == nil {\n\t\treturn grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())\n\t}\n\tif err := validateAnnotations(spec.Annotations); err != nil {\n\t\treturn err\n\t}\n\tif err := validateTask(spec.Task); err != nil {\n\t\treturn err\n\t}\n\tif err := validateUpdate(spec.Update); err != nil {\n\t\treturn err\n\t}\n\tif err := validateEndpointSpec(spec.Endpoint); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ checkPortConflicts does a best effort to find if the passed in spec has port\n\/\/ conflicts with existing services.\nfunc (s *Server) checkPortConflicts(spec *api.ServiceSpec) error {\n\tif spec.Endpoint == nil {\n\t\treturn nil\n\t}\n\n\tpcToString := func(pc *api.PortConfig) string {\n\t\tport := strconv.FormatUint(uint64(pc.PublishedPort), 10)\n\t\treturn port + \"\/\" + pc.Protocol.String()\n\t}\n\n\treqPorts := make(map[string]bool)\n\tfor _, pc := range spec.Endpoint.Ports {\n\t\tif pc.PublishedPort > 0 {\n\t\t\treqPorts[pcToString(pc)] = true\n\t\t}\n\t}\n\tif len(reqPorts) == 0 {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\tservices []*api.Service\n\t\terr error\n\t)\n\n\ts.store.View(func(tx store.ReadTx) {\n\t\tservices, err = store.FindServices(tx, store.All)\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, service := range services {\n\t\tif service.Spec.Endpoint != nil {\n\t\t\tfor _, pc := range service.Spec.Endpoint.Ports {\n\t\t\t\tif reqPorts[pcToString(pc)] {\n\t\t\t\t\treturn grpc.Errorf(codes.InvalidArgument, \"port '%d' is already in use by service '%s' (%s)\", pc.PublishedPort, service.Spec.Annotations.Name, service.ID)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif service.Endpoint != nil {\n\t\t\tfor _, pc := range service.Endpoint.Ports {\n\t\t\t\tif reqPorts[pcToString(pc)] {\n\t\t\t\t\treturn grpc.Errorf(codes.InvalidArgument, \"port '%d' is already in use by service '%s' (%s)\", pc.PublishedPort, service.Spec.Annotations.Name, service.ID)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ CreateService creates and return a Service based on the provided ServiceSpec.\n\/\/ - Returns `InvalidArgument` if the ServiceSpec is malformed.\n\/\/ - Returns `Unimplemented` if the ServiceSpec references unimplemented features.\n\/\/ - Returns `AlreadyExists` if the ServiceID conflicts.\n\/\/ - Returns an error if the creation fails.\nfunc (s *Server) CreateService(ctx context.Context, request *api.CreateServiceRequest) (*api.CreateServiceResponse, error) {\n\tif err := validateServiceSpec(request.Spec); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := s.checkPortConflicts(request.Spec); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ TODO(aluzzardi): Consider using `Name` as a primary key to handle\n\t\/\/ duplicate creations. See #65\n\tservice := &api.Service{\n\t\tID: identity.NewID(),\n\t\tSpec: *request.Spec,\n\t}\n\n\terr := s.store.Update(func(tx store.Tx) error {\n\t\treturn store.CreateService(tx, service)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &api.CreateServiceResponse{\n\t\tService: service,\n\t}, nil\n}\n\n\/\/ GetService returns a Service given a ServiceID.\n\/\/ - Returns `InvalidArgument` if ServiceID is not provided.\n\/\/ - Returns `NotFound` if the Service is not found.\nfunc (s *Server) GetService(ctx context.Context, request *api.GetServiceRequest) (*api.GetServiceResponse, error) {\n\tif request.ServiceID == \"\" {\n\t\treturn nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())\n\t}\n\n\tvar service *api.Service\n\ts.store.View(func(tx store.ReadTx) {\n\t\tservice = store.GetService(tx, request.ServiceID)\n\t})\n\tif service == nil {\n\t\treturn nil, grpc.Errorf(codes.NotFound, \"service %s not found\", request.ServiceID)\n\t}\n\n\treturn &api.GetServiceResponse{\n\t\tService: service,\n\t}, nil\n}\n\n\/\/ UpdateService updates a Service referenced by ServiceID with the given ServiceSpec.\n\/\/ - Returns `NotFound` if the Service is not found.\n\/\/ - Returns `InvalidArgument` if the ServiceSpec is malformed.\n\/\/ - Returns `Unimplemented` if the ServiceSpec references unimplemented features.\n\/\/ - Returns an error if the update fails.\nfunc (s *Server) UpdateService(ctx context.Context, request *api.UpdateServiceRequest) (*api.UpdateServiceResponse, error) {\n\tif request.ServiceID == \"\" || request.ServiceVersion == nil {\n\t\treturn nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())\n\t}\n\tif err := validateServiceSpec(request.Spec); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar service *api.Service\n\ts.store.View(func(tx store.ReadTx) {\n\t\tservice = store.GetService(tx, request.ServiceID)\n\t})\n\tif service == nil {\n\t\treturn nil, grpc.Errorf(codes.NotFound, \"service %s not found\", request.ServiceID)\n\t}\n\n\tif request.Spec.Endpoint != nil && !reflect.DeepEqual(request.Spec.Endpoint, service.Spec.Endpoint) {\n\t\tif err := s.checkPortConflicts(request.Spec); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\terr := s.store.Update(func(tx store.Tx) error {\n\t\tservice = store.GetService(tx, request.ServiceID)\n\t\tif service == nil {\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ temporary disable network update\n\t\tif request.Spec != nil && !reflect.DeepEqual(request.Spec.Networks, service.Spec.Networks) {\n\t\t\treturn errNetworkUpdateNotSupported\n\t\t}\n\n\t\t\/\/ orchestrator is designed to be stateless, so it should not deal\n\t\t\/\/ with service mode change (comparing current config with previous config).\n\t\t\/\/ proper way to change service mode is to delete and re-add.\n\t\tif request.Spec != nil && reflect.TypeOf(service.Spec.Mode) != reflect.TypeOf(request.Spec.Mode) {\n\t\t\treturn errModeChangeNotAllowed\n\t\t}\n\t\tservice.Meta.Version = *request.ServiceVersion\n\t\tservice.Spec = *request.Spec.Copy()\n\n\t\t\/\/ Reset update status\n\t\tservice.UpdateStatus = nil\n\n\t\treturn store.UpdateService(tx, service)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif service == nil {\n\t\treturn nil, grpc.Errorf(codes.NotFound, \"service %s not found\", request.ServiceID)\n\t}\n\treturn &api.UpdateServiceResponse{\n\t\tService: service,\n\t}, nil\n}\n\n\/\/ RemoveService removes a Service referenced by ServiceID.\n\/\/ - Returns `InvalidArgument` if ServiceID is not provided.\n\/\/ - Returns `NotFound` if the Service is not found.\n\/\/ - Returns an error if the deletion fails.\nfunc (s *Server) RemoveService(ctx context.Context, request *api.RemoveServiceRequest) (*api.RemoveServiceResponse, error) {\n\tif request.ServiceID == \"\" {\n\t\treturn nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())\n\t}\n\n\terr := s.store.Update(func(tx store.Tx) error {\n\t\treturn store.DeleteService(tx, request.ServiceID)\n\t})\n\tif err != nil {\n\t\tif err == store.ErrNotExist {\n\t\t\treturn nil, grpc.Errorf(codes.NotFound, \"service %s not found\", request.ServiceID)\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn &api.RemoveServiceResponse{}, nil\n}\n\nfunc filterServices(candidates []*api.Service, filters ...func(*api.Service) bool) []*api.Service {\n\tresult := []*api.Service{}\n\n\tfor _, c := range candidates {\n\t\tmatch := true\n\t\tfor _, f := range filters {\n\t\t\tif !f(c) {\n\t\t\t\tmatch = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif match {\n\t\t\tresult = append(result, c)\n\t\t}\n\t}\n\n\treturn result\n}\n\n\/\/ ListServices returns a list of all services.\nfunc (s *Server) ListServices(ctx context.Context, request *api.ListServicesRequest) (*api.ListServicesResponse, error) {\n\tvar (\n\t\tservices []*api.Service\n\t\terr error\n\t)\n\n\ts.store.View(func(tx store.ReadTx) {\n\t\tswitch {\n\t\tcase request.Filters != nil && len(request.Filters.Names) > 0:\n\t\t\tservices, err = store.FindServices(tx, buildFilters(store.ByName, request.Filters.Names))\n\t\tcase request.Filters != nil && len(request.Filters.NamePrefixes) > 0:\n\t\t\tservices, err = store.FindServices(tx, buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes))\n\t\tcase request.Filters != nil && len(request.Filters.IDPrefixes) > 0:\n\t\t\tservices, err = store.FindServices(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes))\n\t\tdefault:\n\t\t\tservices, err = store.FindServices(tx, store.All)\n\t\t}\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif request.Filters != nil {\n\t\tservices = filterServices(services,\n\t\t\tfunc(e *api.Service) bool {\n\t\t\t\treturn filterContains(e.Spec.Annotations.Name, request.Filters.Names)\n\t\t\t},\n\t\t\tfunc(e *api.Service) bool {\n\t\t\t\treturn filterContainsPrefix(e.Spec.Annotations.Name, request.Filters.NamePrefixes)\n\t\t\t},\n\t\t\tfunc(e *api.Service) bool {\n\t\t\t\treturn filterContainsPrefix(e.ID, request.Filters.IDPrefixes)\n\t\t\t},\n\t\t\tfunc(e *api.Service) bool {\n\t\t\t\treturn filterMatchLabels(e.Spec.Annotations.Labels, request.Filters.Labels)\n\t\t\t},\n\t\t)\n\t}\n\n\treturn &api.ListServicesResponse{\n\t\tServices: services,\n\t}, nil\n}\n<commit_msg>Skip same service PortConfig check to allow PortConfig updating<commit_after>package controlapi\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"strconv\"\n\n\t\"github.com\/docker\/engine-api\/types\/reference\"\n\t\"github.com\/docker\/swarmkit\/api\"\n\t\"github.com\/docker\/swarmkit\/identity\"\n\t\"github.com\/docker\/swarmkit\/manager\/scheduler\"\n\t\"github.com\/docker\/swarmkit\/manager\/state\/store\"\n\t\"github.com\/docker\/swarmkit\/protobuf\/ptypes\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n)\n\nvar (\n\terrNetworkUpdateNotSupported = errors.New(\"changing network in service is not supported\")\n\terrModeChangeNotAllowed = errors.New(\"service mode change is not allowed\")\n)\n\nfunc validateResources(r *api.Resources) error {\n\tif r == nil {\n\t\treturn nil\n\t}\n\n\tif r.NanoCPUs != 0 && r.NanoCPUs < 1e6 {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"invalid cpu value %g: Must be at least %g\", float64(r.NanoCPUs)\/1e9, 1e6\/1e9)\n\t}\n\n\tif r.MemoryBytes != 0 && r.MemoryBytes < 4*1024*1024 {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"invalid memory value %d: Must be at least 4MiB\", r.MemoryBytes)\n\t}\n\treturn nil\n}\n\nfunc validateResourceRequirements(r *api.ResourceRequirements) error {\n\tif r == nil {\n\t\treturn nil\n\t}\n\tif err := validateResources(r.Limits); err != nil {\n\t\treturn err\n\t}\n\tif err := validateResources(r.Reservations); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc validateRestartPolicy(rp *api.RestartPolicy) error {\n\tif rp == nil {\n\t\treturn nil\n\t}\n\n\tif rp.Delay != nil {\n\t\tdelay, err := ptypes.Duration(rp.Delay)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif delay < 0 {\n\t\t\treturn grpc.Errorf(codes.InvalidArgument, \"TaskSpec: restart-delay cannot be negative\")\n\t\t}\n\t}\n\n\tif rp.Window != nil {\n\t\twin, err := ptypes.Duration(rp.Window)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif win < 0 {\n\t\t\treturn grpc.Errorf(codes.InvalidArgument, \"TaskSpec: restart-window cannot be negative\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc validatePlacement(placement *api.Placement) error {\n\tif placement == nil {\n\t\treturn nil\n\t}\n\t_, err := scheduler.ParseExprs(placement.Constraints)\n\treturn err\n}\n\nfunc validateUpdate(uc *api.UpdateConfig) error {\n\tif uc == nil {\n\t\treturn nil\n\t}\n\n\tdelay, err := ptypes.Duration(&uc.Delay)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif delay < 0 {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"TaskSpec: update-delay cannot be negative\")\n\t}\n\n\treturn nil\n}\n\nfunc validateTask(taskSpec api.TaskSpec) error {\n\tif err := validateResourceRequirements(taskSpec.Resources); err != nil {\n\t\treturn err\n\t}\n\n\tif err := validateRestartPolicy(taskSpec.Restart); err != nil {\n\t\treturn err\n\t}\n\n\tif err := validatePlacement(taskSpec.Placement); err != nil {\n\t\treturn err\n\t}\n\n\tif taskSpec.GetRuntime() == nil {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"TaskSpec: missing runtime\")\n\t}\n\n\t_, ok := taskSpec.GetRuntime().(*api.TaskSpec_Container)\n\tif !ok {\n\t\treturn grpc.Errorf(codes.Unimplemented, \"RuntimeSpec: unimplemented runtime in service spec\")\n\t}\n\n\tcontainer := taskSpec.GetContainer()\n\tif container == nil {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"ContainerSpec: missing in service spec\")\n\t}\n\n\tif container.Image == \"\" {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"ContainerSpec: image reference must be provided\")\n\t}\n\n\tif _, _, err := reference.Parse(container.Image); err != nil {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"ContainerSpec: %q is not a valid repository\/tag\", container.Image)\n\t}\n\treturn nil\n}\n\nfunc validateEndpointSpec(epSpec *api.EndpointSpec) error {\n\t\/\/ Endpoint spec is optional\n\tif epSpec == nil {\n\t\treturn nil\n\t}\n\n\tif len(epSpec.Ports) > 0 && epSpec.Mode == api.ResolutionModeDNSRoundRobin {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"EndpointSpec: ports can't be used with dnsrr mode\")\n\t}\n\n\tportSet := make(map[api.PortConfig]struct{})\n\tfor _, port := range epSpec.Ports {\n\t\tif _, ok := portSet[*port]; ok {\n\t\t\treturn grpc.Errorf(codes.InvalidArgument, \"EndpointSpec: duplicate ports provided\")\n\t\t}\n\n\t\tportSet[*port] = struct{}{}\n\t}\n\n\treturn nil\n}\n\nfunc validateServiceSpec(spec *api.ServiceSpec) error {\n\tif spec == nil {\n\t\treturn grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())\n\t}\n\tif err := validateAnnotations(spec.Annotations); err != nil {\n\t\treturn err\n\t}\n\tif err := validateTask(spec.Task); err != nil {\n\t\treturn err\n\t}\n\tif err := validateUpdate(spec.Update); err != nil {\n\t\treturn err\n\t}\n\tif err := validateEndpointSpec(spec.Endpoint); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ checkPortConflicts does a best effort to find if the passed in spec has port\n\/\/ conflicts with existing services.\nfunc (s *Server) checkPortConflicts(spec *api.ServiceSpec, serviceID string) error {\n\tif spec.Endpoint == nil {\n\t\treturn nil\n\t}\n\n\tpcToString := func(pc *api.PortConfig) string {\n\t\tport := strconv.FormatUint(uint64(pc.PublishedPort), 10)\n\t\treturn port + \"\/\" + pc.Protocol.String()\n\t}\n\n\treqPorts := make(map[string]bool)\n\tfor _, pc := range spec.Endpoint.Ports {\n\t\tif pc.PublishedPort > 0 {\n\t\t\treqPorts[pcToString(pc)] = true\n\t\t}\n\t}\n\tif len(reqPorts) == 0 {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\tservices []*api.Service\n\t\terr error\n\t)\n\n\ts.store.View(func(tx store.ReadTx) {\n\t\tservices, err = store.FindServices(tx, store.All)\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, service := range services {\n\t\t\/\/ If service ID is the same (and not \"\") then this is an update\n\t\tif serviceID != \"\" && serviceID == service.ID {\n\t\t\tcontinue\n\t\t}\n\t\tif service.Spec.Endpoint != nil {\n\t\t\tfor _, pc := range service.Spec.Endpoint.Ports {\n\t\t\t\tif reqPorts[pcToString(pc)] {\n\t\t\t\t\treturn grpc.Errorf(codes.InvalidArgument, \"port '%d' is already in use by service '%s' (%s)\", pc.PublishedPort, service.Spec.Annotations.Name, service.ID)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif service.Endpoint != nil {\n\t\t\tfor _, pc := range service.Endpoint.Ports {\n\t\t\t\tif reqPorts[pcToString(pc)] {\n\t\t\t\t\treturn grpc.Errorf(codes.InvalidArgument, \"port '%d' is already in use by service '%s' (%s)\", pc.PublishedPort, service.Spec.Annotations.Name, service.ID)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ CreateService creates and return a Service based on the provided ServiceSpec.\n\/\/ - Returns `InvalidArgument` if the ServiceSpec is malformed.\n\/\/ - Returns `Unimplemented` if the ServiceSpec references unimplemented features.\n\/\/ - Returns `AlreadyExists` if the ServiceID conflicts.\n\/\/ - Returns an error if the creation fails.\nfunc (s *Server) CreateService(ctx context.Context, request *api.CreateServiceRequest) (*api.CreateServiceResponse, error) {\n\tif err := validateServiceSpec(request.Spec); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := s.checkPortConflicts(request.Spec, \"\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ TODO(aluzzardi): Consider using `Name` as a primary key to handle\n\t\/\/ duplicate creations. See #65\n\tservice := &api.Service{\n\t\tID: identity.NewID(),\n\t\tSpec: *request.Spec,\n\t}\n\n\terr := s.store.Update(func(tx store.Tx) error {\n\t\treturn store.CreateService(tx, service)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &api.CreateServiceResponse{\n\t\tService: service,\n\t}, nil\n}\n\n\/\/ GetService returns a Service given a ServiceID.\n\/\/ - Returns `InvalidArgument` if ServiceID is not provided.\n\/\/ - Returns `NotFound` if the Service is not found.\nfunc (s *Server) GetService(ctx context.Context, request *api.GetServiceRequest) (*api.GetServiceResponse, error) {\n\tif request.ServiceID == \"\" {\n\t\treturn nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())\n\t}\n\n\tvar service *api.Service\n\ts.store.View(func(tx store.ReadTx) {\n\t\tservice = store.GetService(tx, request.ServiceID)\n\t})\n\tif service == nil {\n\t\treturn nil, grpc.Errorf(codes.NotFound, \"service %s not found\", request.ServiceID)\n\t}\n\n\treturn &api.GetServiceResponse{\n\t\tService: service,\n\t}, nil\n}\n\n\/\/ UpdateService updates a Service referenced by ServiceID with the given ServiceSpec.\n\/\/ - Returns `NotFound` if the Service is not found.\n\/\/ - Returns `InvalidArgument` if the ServiceSpec is malformed.\n\/\/ - Returns `Unimplemented` if the ServiceSpec references unimplemented features.\n\/\/ - Returns an error if the update fails.\nfunc (s *Server) UpdateService(ctx context.Context, request *api.UpdateServiceRequest) (*api.UpdateServiceResponse, error) {\n\tif request.ServiceID == \"\" || request.ServiceVersion == nil {\n\t\treturn nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())\n\t}\n\tif err := validateServiceSpec(request.Spec); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar service *api.Service\n\ts.store.View(func(tx store.ReadTx) {\n\t\tservice = store.GetService(tx, request.ServiceID)\n\t})\n\tif service == nil {\n\t\treturn nil, grpc.Errorf(codes.NotFound, \"service %s not found\", request.ServiceID)\n\t}\n\n\tif request.Spec.Endpoint != nil && !reflect.DeepEqual(request.Spec.Endpoint, service.Spec.Endpoint) {\n\t\tif err := s.checkPortConflicts(request.Spec, request.ServiceID); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\terr := s.store.Update(func(tx store.Tx) error {\n\t\tservice = store.GetService(tx, request.ServiceID)\n\t\tif service == nil {\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ temporary disable network update\n\t\tif request.Spec != nil && !reflect.DeepEqual(request.Spec.Networks, service.Spec.Networks) {\n\t\t\treturn errNetworkUpdateNotSupported\n\t\t}\n\n\t\t\/\/ orchestrator is designed to be stateless, so it should not deal\n\t\t\/\/ with service mode change (comparing current config with previous config).\n\t\t\/\/ proper way to change service mode is to delete and re-add.\n\t\tif request.Spec != nil && reflect.TypeOf(service.Spec.Mode) != reflect.TypeOf(request.Spec.Mode) {\n\t\t\treturn errModeChangeNotAllowed\n\t\t}\n\t\tservice.Meta.Version = *request.ServiceVersion\n\t\tservice.Spec = *request.Spec.Copy()\n\n\t\t\/\/ Reset update status\n\t\tservice.UpdateStatus = nil\n\n\t\treturn store.UpdateService(tx, service)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif service == nil {\n\t\treturn nil, grpc.Errorf(codes.NotFound, \"service %s not found\", request.ServiceID)\n\t}\n\treturn &api.UpdateServiceResponse{\n\t\tService: service,\n\t}, nil\n}\n\n\/\/ RemoveService removes a Service referenced by ServiceID.\n\/\/ - Returns `InvalidArgument` if ServiceID is not provided.\n\/\/ - Returns `NotFound` if the Service is not found.\n\/\/ - Returns an error if the deletion fails.\nfunc (s *Server) RemoveService(ctx context.Context, request *api.RemoveServiceRequest) (*api.RemoveServiceResponse, error) {\n\tif request.ServiceID == \"\" {\n\t\treturn nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())\n\t}\n\n\terr := s.store.Update(func(tx store.Tx) error {\n\t\treturn store.DeleteService(tx, request.ServiceID)\n\t})\n\tif err != nil {\n\t\tif err == store.ErrNotExist {\n\t\t\treturn nil, grpc.Errorf(codes.NotFound, \"service %s not found\", request.ServiceID)\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn &api.RemoveServiceResponse{}, nil\n}\n\nfunc filterServices(candidates []*api.Service, filters ...func(*api.Service) bool) []*api.Service {\n\tresult := []*api.Service{}\n\n\tfor _, c := range candidates {\n\t\tmatch := true\n\t\tfor _, f := range filters {\n\t\t\tif !f(c) {\n\t\t\t\tmatch = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif match {\n\t\t\tresult = append(result, c)\n\t\t}\n\t}\n\n\treturn result\n}\n\n\/\/ ListServices returns a list of all services.\nfunc (s *Server) ListServices(ctx context.Context, request *api.ListServicesRequest) (*api.ListServicesResponse, error) {\n\tvar (\n\t\tservices []*api.Service\n\t\terr error\n\t)\n\n\ts.store.View(func(tx store.ReadTx) {\n\t\tswitch {\n\t\tcase request.Filters != nil && len(request.Filters.Names) > 0:\n\t\t\tservices, err = store.FindServices(tx, buildFilters(store.ByName, request.Filters.Names))\n\t\tcase request.Filters != nil && len(request.Filters.NamePrefixes) > 0:\n\t\t\tservices, err = store.FindServices(tx, buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes))\n\t\tcase request.Filters != nil && len(request.Filters.IDPrefixes) > 0:\n\t\t\tservices, err = store.FindServices(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes))\n\t\tdefault:\n\t\t\tservices, err = store.FindServices(tx, store.All)\n\t\t}\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif request.Filters != nil {\n\t\tservices = filterServices(services,\n\t\t\tfunc(e *api.Service) bool {\n\t\t\t\treturn filterContains(e.Spec.Annotations.Name, request.Filters.Names)\n\t\t\t},\n\t\t\tfunc(e *api.Service) bool {\n\t\t\t\treturn filterContainsPrefix(e.Spec.Annotations.Name, request.Filters.NamePrefixes)\n\t\t\t},\n\t\t\tfunc(e *api.Service) bool {\n\t\t\t\treturn filterContainsPrefix(e.ID, request.Filters.IDPrefixes)\n\t\t\t},\n\t\t\tfunc(e *api.Service) bool {\n\t\t\t\treturn filterMatchLabels(e.Spec.Annotations.Labels, request.Filters.Labels)\n\t\t\t},\n\t\t)\n\t}\n\n\treturn &api.ListServicesResponse{\n\t\tServices: services,\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com\/mitchellh\/reflectwalk\"\n)\n\nfunc TestInterpolationWalker_detect(t *testing.T) {\n\tcases := []struct {\n\t\tInput interface{}\n\t\tResult []Interpolation\n\t}{\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": \"$${var.foo}\",\n\t\t\t},\n\t\t\tResult: nil,\n\t\t},\n\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": \"${var.foo}\",\n\t\t\t},\n\t\t\tResult: []Interpolation{\n\t\t\t\t&VariableInterpolation{\n\t\t\t\t\tVariable: &UserVariable{\n\t\t\t\t\t\tName: \"foo\",\n\t\t\t\t\t\tkey: \"var.foo\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": \"${aws_instance.foo.*.num}\",\n\t\t\t},\n\t\t\tResult: []Interpolation{\n\t\t\t\t&VariableInterpolation{\n\t\t\t\t\tVariable: &ResourceVariable{\n\t\t\t\t\t\tType: \"aws_instance\",\n\t\t\t\t\t\tName: \"foo\",\n\t\t\t\t\t\tField: \"num\",\n\n\t\t\t\t\t\tMulti: true,\n\t\t\t\t\t\tIndex: -1,\n\n\t\t\t\t\t\tkey: \"aws_instance.foo.*.num\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": \"${lookup(var.foo)}\",\n\t\t\t},\n\t\t\tResult: []Interpolation{\n\t\t\t\t&FunctionInterpolation{\n\t\t\t\t\tFunc: nil,\n\t\t\t\t\tArgs: []Interpolation{\n\t\t\t\t\t\t&VariableInterpolation{\n\t\t\t\t\t\t\tVariable: &UserVariable{\n\t\t\t\t\t\t\t\tName: \"foo\",\n\t\t\t\t\t\t\t\tkey: \"var.foo\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": `${file(\"test.txt\")}`,\n\t\t\t},\n\t\t\tResult: []Interpolation{\n\t\t\t\t&FunctionInterpolation{\n\t\t\t\t\tFunc: nil,\n\t\t\t\t\tArgs: []Interpolation{\n\t\t\t\t\t\t&LiteralInterpolation{\n\t\t\t\t\t\t\tLiteral: \"test.txt\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": `${file(\"foo\/bar.txt\")}`,\n\t\t\t},\n\t\t\tResult: []Interpolation{\n\t\t\t\t&FunctionInterpolation{\n\t\t\t\t\tFunc: nil,\n\t\t\t\t\tArgs: []Interpolation{\n\t\t\t\t\t\t&LiteralInterpolation{\n\t\t\t\t\t\t\tLiteral: \"foo\/bar.txt\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i, tc := range cases {\n\t\tvar actual []Interpolation\n\n\t\tdetectFn := func(i Interpolation) (string, error) {\n\t\t\tactual = append(actual, i)\n\t\t\treturn \"\", nil\n\t\t}\n\n\t\tw := &interpolationWalker{F: detectFn}\n\t\tif err := reflectwalk.Walk(tc.Input, w); err != nil {\n\t\t\tt.Fatalf(\"err: %s\", err)\n\t\t}\n\n\t\tfor _, a := range actual {\n\t\t\t\/\/ This is jank, but reflect.DeepEqual never has functions\n\t\t\t\/\/ being the same.\n\t\t\tif f, ok := a.(*FunctionInterpolation); ok {\n\t\t\t\tf.Func = nil\n\t\t\t}\n\t\t}\n\n\t\tif !reflect.DeepEqual(actual, tc.Result) {\n\t\t\tt.Fatalf(\"%d: bad:\\n\\n%#v\", i, actual)\n\t\t}\n\t}\n}\n\nfunc TestInterpolationWalker_replace(t *testing.T) {\n\tcases := []struct {\n\t\tInput interface{}\n\t\tOutput interface{}\n\t\tValue string\n\t}{\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": \"$${var.foo}\",\n\t\t\t},\n\t\t\tOutput: map[string]interface{}{\n\t\t\t\t\"foo\": \"$${var.foo}\",\n\t\t\t},\n\t\t\tValue: \"bar\",\n\t\t},\n\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": \"hello, ${var.foo}\",\n\t\t\t},\n\t\t\tOutput: map[string]interface{}{\n\t\t\t\t\"foo\": \"hello, bar\",\n\t\t\t},\n\t\t\tValue: \"bar\",\n\t\t},\n\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": map[string]interface{}{\n\t\t\t\t\t\"${var.foo}\": \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tOutput: map[string]interface{}{\n\t\t\t\t\"foo\": map[string]interface{}{\n\t\t\t\t\t\"bar\": \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tValue: \"bar\",\n\t\t},\n\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": []interface{}{\n\t\t\t\t\t\"${var.foo}\",\n\t\t\t\t\t\"bing\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tOutput: map[string]interface{}{\n\t\t\t\t\"foo\": []interface{}{\n\t\t\t\t\t\"bar\",\n\t\t\t\t\t\"baz\",\n\t\t\t\t\t\"bing\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tValue: \"bar,baz\",\n\t\t},\n\t}\n\n\tfor i, tc := range cases {\n\t\tfn := func(i Interpolation) (string, error) {\n\t\t\treturn tc.Value, nil\n\t\t}\n\n\t\tw := &interpolationWalker{F: fn, Replace: true}\n\t\tif err := reflectwalk.Walk(tc.Input, w); err != nil {\n\t\t\tt.Fatalf(\"err: %s\", err)\n\t\t}\n\n\t\tif !reflect.DeepEqual(tc.Input, tc.Output) {\n\t\t\tt.Fatalf(\"%d: bad:\\n\\n%#v\", i, tc.Input)\n\t\t}\n\t}\n}\n<commit_msg>config: make the tests more robust<commit_after>package config\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com\/mitchellh\/reflectwalk\"\n)\n\nfunc TestInterpolationWalker_detect(t *testing.T) {\n\tcases := []struct {\n\t\tInput interface{}\n\t\tResult []Interpolation\n\t}{\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": \"$${var.foo}\",\n\t\t\t},\n\t\t\tResult: nil,\n\t\t},\n\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": \"${var.foo}\",\n\t\t\t},\n\t\t\tResult: []Interpolation{\n\t\t\t\t&VariableInterpolation{\n\t\t\t\t\tVariable: &UserVariable{\n\t\t\t\t\t\tName: \"foo\",\n\t\t\t\t\t\tkey: \"var.foo\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": \"${aws_instance.foo.*.num}\",\n\t\t\t},\n\t\t\tResult: []Interpolation{\n\t\t\t\t&VariableInterpolation{\n\t\t\t\t\tVariable: &ResourceVariable{\n\t\t\t\t\t\tType: \"aws_instance\",\n\t\t\t\t\t\tName: \"foo\",\n\t\t\t\t\t\tField: \"num\",\n\n\t\t\t\t\t\tMulti: true,\n\t\t\t\t\t\tIndex: -1,\n\n\t\t\t\t\t\tkey: \"aws_instance.foo.*.num\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": \"${lookup(var.foo)}\",\n\t\t\t},\n\t\t\tResult: []Interpolation{\n\t\t\t\t&FunctionInterpolation{\n\t\t\t\t\tFunc: nil,\n\t\t\t\t\tArgs: []Interpolation{\n\t\t\t\t\t\t&VariableInterpolation{\n\t\t\t\t\t\t\tVariable: &UserVariable{\n\t\t\t\t\t\t\t\tName: \"foo\",\n\t\t\t\t\t\t\t\tkey: \"var.foo\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": `${file(\"test.txt\")}`,\n\t\t\t},\n\t\t\tResult: []Interpolation{\n\t\t\t\t&FunctionInterpolation{\n\t\t\t\t\tFunc: nil,\n\t\t\t\t\tArgs: []Interpolation{\n\t\t\t\t\t\t&LiteralInterpolation{\n\t\t\t\t\t\t\tLiteral: \"test.txt\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": `${file(\"foo\/bar.txt\")}`,\n\t\t\t},\n\t\t\tResult: []Interpolation{\n\t\t\t\t&FunctionInterpolation{\n\t\t\t\t\tFunc: nil,\n\t\t\t\t\tArgs: []Interpolation{\n\t\t\t\t\t\t&LiteralInterpolation{\n\t\t\t\t\t\t\tLiteral: \"foo\/bar.txt\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i, tc := range cases {\n\t\tvar actual []Interpolation\n\n\t\tdetectFn := func(i Interpolation) (string, error) {\n\t\t\tactual = append(actual, i)\n\t\t\treturn \"\", nil\n\t\t}\n\n\t\tw := &interpolationWalker{F: detectFn}\n\t\tif err := reflectwalk.Walk(tc.Input, w); err != nil {\n\t\t\tt.Fatalf(\"err: %s\", err)\n\t\t}\n\n\t\tfor _, a := range actual {\n\t\t\t\/\/ This is jank, but reflect.DeepEqual never has functions\n\t\t\t\/\/ being the same.\n\t\t\tif f, ok := a.(*FunctionInterpolation); ok {\n\t\t\t\tf.Func = nil\n\t\t\t}\n\t\t}\n\n\t\tif !reflect.DeepEqual(actual, tc.Result) {\n\t\t\tt.Fatalf(\"%d: bad:\\n\\n%#v\", i, actual)\n\t\t}\n\t}\n}\n\nfunc TestInterpolationWalker_replace(t *testing.T) {\n\tcases := []struct {\n\t\tInput interface{}\n\t\tOutput interface{}\n\t\tValue string\n\t}{\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": \"$${var.foo}\",\n\t\t\t},\n\t\t\tOutput: map[string]interface{}{\n\t\t\t\t\"foo\": \"$${var.foo}\",\n\t\t\t},\n\t\t\tValue: \"bar\",\n\t\t},\n\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": \"hello, ${var.foo}\",\n\t\t\t},\n\t\t\tOutput: map[string]interface{}{\n\t\t\t\t\"foo\": \"hello, bar\",\n\t\t\t},\n\t\t\tValue: \"bar\",\n\t\t},\n\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": map[string]interface{}{\n\t\t\t\t\t\"${var.foo}\": \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tOutput: map[string]interface{}{\n\t\t\t\t\"foo\": map[string]interface{}{\n\t\t\t\t\t\"bar\": \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tValue: \"bar\",\n\t\t},\n\n\t\t{\n\t\t\tInput: map[string]interface{}{\n\t\t\t\t\"foo\": []interface{}{\n\t\t\t\t\t\"${var.foo}\",\n\t\t\t\t\t\"bing\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tOutput: map[string]interface{}{\n\t\t\t\t\"foo\": []interface{}{\n\t\t\t\t\t\"bar\",\n\t\t\t\t\t\"baz\",\n\t\t\t\t\t\"bing\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tValue: \"bar\" + InterpSplitDelim + \"baz\",\n\t\t},\n\t}\n\n\tfor i, tc := range cases {\n\t\tfn := func(i Interpolation) (string, error) {\n\t\t\treturn tc.Value, nil\n\t\t}\n\n\t\tw := &interpolationWalker{F: fn, Replace: true}\n\t\tif err := reflectwalk.Walk(tc.Input, w); err != nil {\n\t\t\tt.Fatalf(\"err: %s\", err)\n\t\t}\n\n\t\tif !reflect.DeepEqual(tc.Input, tc.Output) {\n\t\t\tt.Fatalf(\"%d: bad:\\n\\n%#v\", i, tc.Input)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package config handles configuration ingestion and processing.\n\/\/ Validator\n\/\/ 1. Accepts new configuration from user\n\/\/ 2. Validates configuration\n\/\/ 3. Produces a \"ValidatedConfig\"\n\/\/ Runtime\n\/\/ 1. It is validated and actionable configuration\n\/\/ 2. It resolves the configuration to a list of Combined {aspect, adapter} configs\n\/\/ given an attribute.Bag.\n\/\/ 3. Combined config has complete information needed to dispatch aspect\npackage config\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/ghodss\/yaml\"\n\t\"github.com\/gogo\/protobuf\/jsonpb\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\n\t\"istio.io\/mixer\/pkg\/adapter\"\n\t\"istio.io\/mixer\/pkg\/config\/descriptor\"\n\tpb \"istio.io\/mixer\/pkg\/config\/proto\"\n\t\"istio.io\/mixer\/pkg\/expr\"\n)\n\ntype (\n\t\/\/ AspectParams describes configuration parameters for an aspect.\n\tAspectParams proto.Message\n\n\t\/\/ AspectValidator describes a type that is able to validate Aspect configuration.\n\tAspectValidator interface {\n\t\t\/\/ DefaultConfig returns a default configuration struct for this\n\t\t\/\/ adapter. This will be used by the configuration system to establish\n\t\t\/\/ the shape of the block of configuration state passed to the NewAspect method.\n\t\tDefaultConfig() (c AspectParams)\n\n\t\t\/\/ ValidateConfig determines whether the given configuration meets all correctness requirements.\n\t\tValidateConfig(c AspectParams, validator expr.Validator, finder descriptor.Finder) *adapter.ConfigErrors\n\t}\n\n\t\/\/ AdapterValidatorFinder is used to find specific underlying validators.\n\t\/\/ Manager registry and adapter registry should implement this interface\n\t\/\/ so ConfigValidators can be uniformly accessed.\n\tAdapterValidatorFinder func(name string) (adapter.ConfigValidator, bool)\n\n\t\/\/ AspectValidatorFinder is used to find specific underlying validators.\n\t\/\/ Manager registry and adapter registry should implement this interface\n\t\/\/ so ConfigValidators can be uniformly accessed.\n\tAspectValidatorFinder func(name string) (AspectValidator, bool)\n\n\t\/\/ AdapterToAspectMapper given an pb.Adapter.Impl\n\t\/\/ This is specifically *not* querying by pb.Adapter.Name\n\t\/\/ returns a list of aspects the adapter provides.\n\t\/\/ For many adapter this will be exactly 1 aspect.\n\tAdapterToAspectMapper func(impl string) []string\n)\n\n\/\/ NewValidator returns a validator given component validators.\nfunc NewValidator(managerFinder AspectValidatorFinder, adapterFinder AdapterValidatorFinder,\n\tfindAspects AdapterToAspectMapper, strict bool, exprValidator expr.Validator) *Validator {\n\treturn &Validator{\n\t\tmanagerFinder: managerFinder,\n\t\tadapterFinder: adapterFinder,\n\t\tfindAspects: findAspects,\n\t\tstrict: strict,\n\t\texprValidator: exprValidator,\n\t\tvalidated: &Validated{},\n\t}\n}\n\ntype (\n\t\/\/ Validator is the Configuration validator.\n\tValidator struct {\n\t\tmanagerFinder AspectValidatorFinder\n\t\tadapterFinder AdapterValidatorFinder\n\t\tfindAspects AdapterToAspectMapper\n\t\tdescriptorFinder descriptor.Finder\n\t\tstrict bool\n\t\texprValidator expr.Validator\n\t\tvalidated *Validated\n\t}\n\n\tadapterKey struct {\n\t\tkind string\n\t\tname string\n\t}\n\t\/\/ Validated store validated configuration.\n\t\/\/ It has been validated as internally consistent and correct.\n\tValidated struct {\n\t\tadapterByName map[adapterKey]*pb.Adapter\n\t\tglobalConfig *pb.GlobalConfig\n\t\tserviceConfig *pb.ServiceConfig\n\t\tnumAspects int\n\t}\n)\n\nfunc (a adapterKey) String() string {\n\treturn fmt.Sprintf(\"%s\/\/%s\", a.kind, a.name)\n}\n\n\/\/ validateGlobalConfig consumes a yml config string with adapter config.\n\/\/ It is validated in presence of validators.\nfunc (p *Validator) validateGlobalConfig(cfg string) (ce *adapter.ConfigErrors) {\n\tvar err error\n\tm := &pb.GlobalConfig{}\n\n\tif err = yaml.Unmarshal([]byte(cfg), m); err != nil {\n\t\tce = ce.Append(\"AdapterConfig\", err)\n\t\treturn\n\t}\n\tp.validated.adapterByName = make(map[adapterKey]*pb.Adapter)\n\tvar acfg adapter.Config\n\tfor _, aa := range m.GetAdapters() {\n\t\tif acfg, err = ConvertAdapterParams(p.adapterFinder, aa.Impl, aa.Params, p.strict); err != nil {\n\t\t\tce = ce.Append(\"Adapter: \"+aa.Impl, err)\n\t\t\tcontinue\n\t\t}\n\t\taa.Params = acfg\n\t\t\/\/ check which kinds aa.Impl provides\n\t\t\/\/ Then register it for all of them.\n\t\tfor _, kind := range p.findAspects(aa.Impl) {\n\t\t\tp.validated.adapterByName[adapterKey{kind, aa.Name}] = aa\n\t\t}\n\t}\n\tp.validated.globalConfig = m\n\treturn\n}\n\n\/\/ ValidateSelector ensures that the selector is valid per expression language.\nfunc (p *Validator) validateSelector(selector string) (err error) {\n\t\/\/ empty selector always selects\n\tif len(selector) == 0 {\n\t\treturn nil\n\t}\n\treturn p.exprValidator.Validate(selector)\n}\n\n\/\/ validateAspectRules validates the recursive configuration data structure.\n\/\/ It is primarily used by validate ServiceConfig.\nfunc (p *Validator) validateAspectRules(rules []*pb.AspectRule, path string, validatePresence bool) (ce *adapter.ConfigErrors) {\n\tvar acfg adapter.Config\n\tvar err error\n\tfor _, rule := range rules {\n\t\tif err = p.validateSelector(rule.GetSelector()); err != nil {\n\t\t\tce = ce.Append(path+\":Selector \"+rule.GetSelector(), err)\n\t\t}\n\t\tpath = path + \"\/\" + rule.GetSelector()\n\t\tfor idx, aa := range rule.GetAspects() {\n\t\t\tif acfg, err = ConvertAspectParams(p.managerFinder, aa.Kind, aa.GetParams(), p.strict, p.descriptorFinder); err != nil {\n\t\t\t\tce = ce.Append(fmt.Sprintf(\"%s:%s[%d]\", path, aa.Kind, idx), err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\taa.Params = acfg\n\t\t\tp.validated.numAspects++\n\t\t\tif validatePresence {\n\t\t\t\tif aa.Adapter == \"\" {\n\t\t\t\t\taa.Adapter = \"default\"\n\t\t\t\t}\n\t\t\t\t\/\/ ensure that aa.Kind has a registered adapter\n\t\t\t\tak := adapterKey{aa.Kind, aa.Adapter}\n\t\t\t\tif p.validated.adapterByName[ak] == nil {\n\t\t\t\t\tce = ce.Appendf(\"NamedAdapter\", \"%s not available\", ak)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\trs := rule.GetRules()\n\t\tif len(rs) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif verr := p.validateAspectRules(rs, path, validatePresence); verr != nil {\n\t\t\tce = ce.Extend(verr)\n\t\t}\n\t}\n\treturn ce\n}\n\n\/\/ Validate validates a single serviceConfig and globalConfig together.\n\/\/ It returns a fully validated Config if no errors are found.\nfunc (p *Validator) Validate(serviceCfg string, globalCfg string) (rt *Validated, ce *adapter.ConfigErrors) {\n\tvar cerr *adapter.ConfigErrors\n\tif re := p.validateGlobalConfig(globalCfg); re != nil {\n\t\tcerr = ce.Appendf(\"GlobalConfig\", \"failed validation\")\n\t\treturn rt, cerr.Extend(re)\n\t}\n\t\/\/ The order is important here, because serviceConfig refers to global config\n\tp.descriptorFinder = descriptor.NewFinder(p.validated.globalConfig)\n\n\tif re := p.validateServiceConfig(serviceCfg, true); re != nil {\n\t\tcerr = ce.Appendf(\"ServiceConfig\", \"failed validation\")\n\t\treturn rt, cerr.Extend(re)\n\t}\n\n\treturn p.validated, nil\n}\n\n\/\/ ValidateServiceConfig validates service config.\n\/\/ if validatePresence is true it will ensure that the named adapter and Kinds\n\/\/ have an available and configured adapter.\nfunc (p *Validator) validateServiceConfig(cfg string, validatePresence bool) (ce *adapter.ConfigErrors) {\n\tvar err error\n\tm := &pb.ServiceConfig{}\n\tif err = yaml.Unmarshal([]byte(cfg), m); err != nil {\n\t\tce = ce.Append(\"ServiceConfig\", err)\n\t\treturn\n\t}\n\tif ce = p.validateAspectRules(m.GetRules(), \"\", validatePresence); ce != nil {\n\t\treturn ce\n\t}\n\tp.validated.serviceConfig = m\n\treturn\n}\n\n\/\/ UnknownValidator returns error for the given name.\nfunc UnknownValidator(name string) error {\n\treturn fmt.Errorf(\"unknown type [%s]\", name)\n}\n\n\/\/ ConvertAdapterParams converts returns a typed proto message based on available Validator.\nfunc ConvertAdapterParams(find AdapterValidatorFinder, name string, params interface{}, strict bool) (adapter.Config, error) {\n\tvar avl adapter.ConfigValidator\n\tvar found bool\n\n\tif avl, found = find(name); !found {\n\t\treturn nil, UnknownValidator(name)\n\t}\n\n\tacfg := avl.DefaultConfig()\n\tif err := Decode(params, acfg, strict); err != nil {\n\t\treturn nil, err\n\t}\n\tif verr := avl.ValidateConfig(acfg); verr != nil {\n\t\treturn nil, verr\n\t}\n\treturn acfg, nil\n}\n\n\/\/ ConvertAspectParams converts returns a typed proto message based on available Validator.\nfunc ConvertAspectParams(find AspectValidatorFinder, name string, params interface{}, strict bool, df descriptor.Finder) (AspectParams, error) {\n\tvar avl AspectValidator\n\tvar found bool\n\n\tif avl, found = find(name); !found {\n\t\treturn nil, UnknownValidator(name)\n\t}\n\n\tacfg := avl.DefaultConfig()\n\tif err := Decode(params, acfg, strict); err != nil {\n\t\treturn nil, err\n\t}\n\tif verr := avl.ValidateConfig(acfg, expr.NewCEXLEvaluator(), df); verr != nil {\n\t\treturn nil, verr\n\t}\n\treturn acfg, nil\n}\n\n\/\/ Decode interprets src interface{} as the specified proto message.\n\/\/ if strict is true returns error on unknown fields.\nfunc Decode(src interface{}, dst adapter.Config, strict bool) (err error) {\n\tvar ba []byte\n\tba, err = json.Marshal(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tum := jsonpb.Unmarshaler{AllowUnknownFields: !strict}\n\treturn um.Unmarshal(bytes.NewReader(ba), dst)\n}\n<commit_msg>Add more context to errors returned by config validation (#459)<commit_after>\/\/ Copyright 2017 Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package config handles configuration ingestion and processing.\n\/\/ Validator\n\/\/ 1. Accepts new configuration from user\n\/\/ 2. Validates configuration\n\/\/ 3. Produces a \"ValidatedConfig\"\n\/\/ Runtime\n\/\/ 1. It is validated and actionable configuration\n\/\/ 2. It resolves the configuration to a list of Combined {aspect, adapter} configs\n\/\/ given an attribute.Bag.\n\/\/ 3. Combined config has complete information needed to dispatch aspect\npackage config\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/ghodss\/yaml\"\n\t\"github.com\/gogo\/protobuf\/jsonpb\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\n\t\"istio.io\/mixer\/pkg\/adapter\"\n\t\"istio.io\/mixer\/pkg\/config\/descriptor\"\n\tpb \"istio.io\/mixer\/pkg\/config\/proto\"\n\t\"istio.io\/mixer\/pkg\/expr\"\n)\n\ntype (\n\t\/\/ AspectParams describes configuration parameters for an aspect.\n\tAspectParams proto.Message\n\n\t\/\/ AspectValidator describes a type that is able to validate Aspect configuration.\n\tAspectValidator interface {\n\t\t\/\/ DefaultConfig returns a default configuration struct for this\n\t\t\/\/ adapter. This will be used by the configuration system to establish\n\t\t\/\/ the shape of the block of configuration state passed to the NewAspect method.\n\t\tDefaultConfig() (c AspectParams)\n\n\t\t\/\/ ValidateConfig determines whether the given configuration meets all correctness requirements.\n\t\tValidateConfig(c AspectParams, validator expr.Validator, finder descriptor.Finder) *adapter.ConfigErrors\n\t}\n\n\t\/\/ AdapterValidatorFinder is used to find specific underlying validators.\n\t\/\/ Manager registry and adapter registry should implement this interface\n\t\/\/ so ConfigValidators can be uniformly accessed.\n\tAdapterValidatorFinder func(name string) (adapter.ConfigValidator, bool)\n\n\t\/\/ AspectValidatorFinder is used to find specific underlying validators.\n\t\/\/ Manager registry and adapter registry should implement this interface\n\t\/\/ so ConfigValidators can be uniformly accessed.\n\tAspectValidatorFinder func(name string) (AspectValidator, bool)\n\n\t\/\/ AdapterToAspectMapper given an pb.Adapter.Impl\n\t\/\/ This is specifically *not* querying by pb.Adapter.Name\n\t\/\/ returns a list of aspects the adapter provides.\n\t\/\/ For many adapter this will be exactly 1 aspect.\n\tAdapterToAspectMapper func(impl string) []string\n)\n\n\/\/ NewValidator returns a validator given component validators.\nfunc NewValidator(managerFinder AspectValidatorFinder, adapterFinder AdapterValidatorFinder,\n\tfindAspects AdapterToAspectMapper, strict bool, exprValidator expr.Validator) *Validator {\n\treturn &Validator{\n\t\tmanagerFinder: managerFinder,\n\t\tadapterFinder: adapterFinder,\n\t\tfindAspects: findAspects,\n\t\tstrict: strict,\n\t\texprValidator: exprValidator,\n\t\tvalidated: &Validated{},\n\t}\n}\n\ntype (\n\t\/\/ Validator is the Configuration validator.\n\tValidator struct {\n\t\tmanagerFinder AspectValidatorFinder\n\t\tadapterFinder AdapterValidatorFinder\n\t\tfindAspects AdapterToAspectMapper\n\t\tdescriptorFinder descriptor.Finder\n\t\tstrict bool\n\t\texprValidator expr.Validator\n\t\tvalidated *Validated\n\t}\n\n\tadapterKey struct {\n\t\tkind string\n\t\tname string\n\t}\n\t\/\/ Validated store validated configuration.\n\t\/\/ It has been validated as internally consistent and correct.\n\tValidated struct {\n\t\tadapterByName map[adapterKey]*pb.Adapter\n\t\tglobalConfig *pb.GlobalConfig\n\t\tserviceConfig *pb.ServiceConfig\n\t\tnumAspects int\n\t}\n)\n\nfunc (a adapterKey) String() string {\n\treturn fmt.Sprintf(\"%s\/\/%s\", a.kind, a.name)\n}\n\n\/\/ validateGlobalConfig consumes a yml config string with adapter config.\n\/\/ It is validated in presence of validators.\nfunc (p *Validator) validateGlobalConfig(cfg string) (ce *adapter.ConfigErrors) {\n\tvar m = &pb.GlobalConfig{}\n\tif err := yaml.Unmarshal([]byte(cfg), m); err != nil {\n\t\treturn ce.Appendf(\"GlobalConfig\", \"failed to unmarshal config into proto with err: %v\", err)\n\t}\n\n\tp.validated.adapterByName = make(map[adapterKey]*pb.Adapter)\n\tvar acfg adapter.Config\n\tvar err *adapter.ConfigErrors\n\tfor _, aa := range m.GetAdapters() {\n\t\tif acfg, err = ConvertAdapterParams(p.adapterFinder, aa.Impl, aa.Params, p.strict); err != nil {\n\t\t\tce = ce.Appendf(\"Adapter: \"+aa.Impl, \"failed to convert aspect params to proto with err: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\taa.Params = acfg\n\t\t\/\/ check which kinds aa.Impl provides\n\t\t\/\/ Then register it for all of them.\n\t\tfor _, kind := range p.findAspects(aa.Impl) {\n\t\t\tp.validated.adapterByName[adapterKey{kind, aa.Name}] = aa\n\t\t}\n\t}\n\tp.validated.globalConfig = m\n\treturn\n}\n\n\/\/ ValidateSelector ensures that the selector is valid per expression language.\nfunc (p *Validator) validateSelector(selector string) (err error) {\n\t\/\/ empty selector always selects\n\tif len(selector) == 0 {\n\t\treturn nil\n\t}\n\treturn p.exprValidator.Validate(selector)\n}\n\n\/\/ validateAspectRules validates the recursive configuration data structure.\n\/\/ It is primarily used by validate ServiceConfig.\nfunc (p *Validator) validateAspectRules(rules []*pb.AspectRule, path string, validatePresence bool) (ce *adapter.ConfigErrors) {\n\tvar acfg adapter.Config\n\tfor _, rule := range rules {\n\t\tif err := p.validateSelector(rule.GetSelector()); err != nil {\n\t\t\tce = ce.Append(path+\":Selector \"+rule.GetSelector(), err)\n\t\t}\n\t\tvar err *adapter.ConfigErrors\n\t\tpath = path + \"\/\" + rule.GetSelector()\n\t\tfor idx, aa := range rule.GetAspects() {\n\t\t\tif acfg, err = ConvertAspectParams(p.managerFinder, aa.Kind, aa.GetParams(), p.strict, p.descriptorFinder); err != nil {\n\t\t\t\tce = ce.Appendf(fmt.Sprintf(\"%s:%s[%d]\", path, aa.Kind, idx), \"failed to parse params with err: %#v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\taa.Params = acfg\n\t\t\tp.validated.numAspects++\n\t\t\tif validatePresence {\n\t\t\t\tif aa.Adapter == \"\" {\n\t\t\t\t\taa.Adapter = \"default\"\n\t\t\t\t}\n\t\t\t\t\/\/ ensure that aa.Kind has a registered adapter\n\t\t\t\tak := adapterKey{aa.Kind, aa.Adapter}\n\t\t\t\tif p.validated.adapterByName[ak] == nil {\n\t\t\t\t\tce = ce.Appendf(\"NamedAdapter\", \"%s not available\", ak)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\trs := rule.GetRules()\n\t\tif len(rs) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif verr := p.validateAspectRules(rs, path, validatePresence); verr != nil {\n\t\t\tce = ce.Extend(verr)\n\t\t}\n\t}\n\treturn ce\n}\n\n\/\/ Validate validates a single serviceConfig and globalConfig together.\n\/\/ It returns a fully validated Config if no errors are found.\nfunc (p *Validator) Validate(serviceCfg string, globalCfg string) (rt *Validated, ce *adapter.ConfigErrors) {\n\tif re := p.validateGlobalConfig(globalCfg); re != nil {\n\t\treturn rt, ce.Appendf(\"GlobalConfig\", \"failed validation\").Extend(re)\n\t}\n\t\/\/ The order is important here, because serviceConfig refers to global config\n\tp.descriptorFinder = descriptor.NewFinder(p.validated.globalConfig)\n\n\tif re := p.validateServiceConfig(serviceCfg, true); re != nil {\n\t\treturn rt, ce.Appendf(\"ServiceConfig\", \"failed validation\").Extend(re)\n\t}\n\treturn p.validated, nil\n}\n\n\/\/ ValidateServiceConfig validates service config.\n\/\/ if validatePresence is true it will ensure that the named adapter and Kinds\n\/\/ have an available and configured adapter.\nfunc (p *Validator) validateServiceConfig(cfg string, validatePresence bool) (ce *adapter.ConfigErrors) {\n\tvar err error\n\tm := &pb.ServiceConfig{}\n\tif err = yaml.Unmarshal([]byte(cfg), m); err != nil {\n\t\treturn ce.Appendf(\"ServiceConfig\", \"failed to unmarshal config into proto with err: %v\", err)\n\t}\n\tif ce = p.validateAspectRules(m.GetRules(), \"\", validatePresence); ce != nil {\n\t\treturn ce\n\t}\n\tp.validated.serviceConfig = m\n\treturn\n}\n\n\/\/ UnknownValidator returns error for the given name.\nfunc UnknownValidator(name string) error {\n\treturn fmt.Errorf(\"unknown type [%s]\", name)\n}\n\n\/\/ ConvertAdapterParams converts returns a typed proto message based on available Validator.\nfunc ConvertAdapterParams(f AdapterValidatorFinder, name string, params interface{}, strict bool) (ac adapter.Config, ce *adapter.ConfigErrors) {\n\tvar avl adapter.ConfigValidator\n\tvar found bool\n\n\tif avl, found = f(name); !found {\n\t\treturn nil, ce.Append(name, UnknownValidator(name))\n\t}\n\n\tac = avl.DefaultConfig()\n\tif err := Decode(params, ac, strict); err != nil {\n\t\treturn nil, ce.Appendf(name, \"failed to decode adapter params with err: %v\", err)\n\t}\n\tif err := avl.ValidateConfig(ac); err != nil {\n\t\treturn nil, ce.Appendf(name, \"adapter validation failed with err: %v\", err)\n\t}\n\treturn ac, nil\n}\n\n\/\/ ConvertAspectParams converts returns a typed proto message based on available Validator.\nfunc ConvertAspectParams(f AspectValidatorFinder, name string, params interface{}, strict bool, df descriptor.Finder) (AspectParams, *adapter.ConfigErrors) {\n\tvar ce *adapter.ConfigErrors\n\tvar avl AspectValidator\n\tvar found bool\n\n\tif avl, found = f(name); !found {\n\t\treturn nil, ce.Append(name, UnknownValidator(name))\n\t}\n\n\tap := avl.DefaultConfig()\n\tif err := Decode(params, ap, strict); err != nil {\n\t\treturn nil, ce.Appendf(name, \"failed to decode aspect params with err: %v\", err)\n\t}\n\tif err := avl.ValidateConfig(ap, expr.NewCEXLEvaluator(), df); err != nil {\n\t\treturn nil, ce.Appendf(name, \"aspect validation failed with err: %v\", err)\n\t}\n\treturn ap, nil\n}\n\n\/\/ Decode interprets src interface{} as the specified proto message.\n\/\/ if strict is true returns error on unknown fields.\nfunc Decode(src interface{}, dst adapter.Config, strict bool) error {\n\tba, err := json.Marshal(src)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal config into json with err: %v\", err)\n\t}\n\tum := jsonpb.Unmarshaler{AllowUnknownFields: !strict}\n\tif err := um.Unmarshal(bytes.NewReader(ba), dst); err != nil {\n\t\treturn fmt.Errorf(\"failed to unmarshal config into proto with err: %v\", err)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package lib\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestParse(t *testing.T) {\n\ttestcase := `<http:\/\/example.com\/v2\/_catalog?n=20&last=b>; rel=\"next\"`\n\n\turl, err := parseLinkToNextHeader(testcase)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif url.Scheme != \"http\" ||\n\t\turl.Host != \"example.com\" ||\n\t\turl.Path != \"\/v2\/_catalog\" ||\n\t\t!reflect.DeepEqual((map[string][]string)(url.Query()), map[string][]string{\n\t\t\t\"n\": []string{\"20\"},\n\t\t\t\"last\": []string{\"b\"},\n\t\t}) {\n\n\t\tt.Fatalf(\"parsing failed; got %s\", url.String())\n\t}\n}\n\nfunc TestOarseInvalid(t *testing.T) {\n\ttestcase := `<http:\/\/example.com\/v2\/_catalog?n=20&last=b; rel=\"next\"`\n\n\t_, err := parseLinkToNextHeader(testcase)\n\n\tif err == nil {\n\t\tt.Fatal(\"parsing an invalid header should fail\")\n\t}\n}\n<commit_msg>Typo.<commit_after>package lib\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestParse(t *testing.T) {\n\ttestcase := `<http:\/\/example.com\/v2\/_catalog?n=20&last=b>; rel=\"next\"`\n\n\turl, err := parseLinkToNextHeader(testcase)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif url.Scheme != \"http\" ||\n\t\turl.Host != \"example.com\" ||\n\t\turl.Path != \"\/v2\/_catalog\" ||\n\t\t!reflect.DeepEqual((map[string][]string)(url.Query()), map[string][]string{\n\t\t\t\"n\": []string{\"20\"},\n\t\t\t\"last\": []string{\"b\"},\n\t\t}) {\n\n\t\tt.Fatalf(\"parsing failed; got %s\", url.String())\n\t}\n}\n\nfunc TestParseInvalid(t *testing.T) {\n\ttestcase := `<http:\/\/example.com\/v2\/_catalog?n=20&last=b; rel=\"next\"`\n\n\t_, err := parseLinkToNextHeader(testcase)\n\n\tif err == nil {\n\t\tt.Fatal(\"parsing an invalid header should fail\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package libvirt\n\nimport (\n\t\"bytes\"\n\t\"text\/template\"\n\n\t\"github.com\/mistifyio\/mistify-agent\/client\"\n)\n\nvar domainTemplate *template.Template\nvar networkTemplate *template.Template\n\nfunc init() {\n\tconst domainXML = `\n<domain type=\"{{.Type}}\">\n <name>{{.Id}}<\/name>\n <memory unit=\"MiB\">{{.Memory}}<\/memory>\n <vcpu>{{.Cpu}}<\/vcpu>\n\n {{if .Metadata}}\n <metadata>\n {{range .Metadata}}\n {{end}}\n <\/metadata>\n {{end}}\n\n <os>\n <type>hvm<\/type>\n <\/os>\n <devices>\n {{range .Nics}}\n <interface type=\"network\">\n <source network='{{.Mac}}' porgroup='vlan-all' \/>\n\t {{if .Name}}<guest dev=\"{{.Name}}\" \/>{{end}}\n {{if .Mac}}<mac address=\"{{.Mac}}\" \/>{{end}}\n {{if .Model}}<model type=\"{{.Model}}\" \/>{{end}}\n <\/interface>\n {{end}}\n\n {{range .Disks}}\n <disk type=\"block\" device=\"disk\">\n <driver name=\"qemu\" type=\"raw\" \/>\n <source dev=\"{{.Source}}\" \/>\n <target dev=\"{{.Device}}\" bus=\"{{.Bus}}\" \/>\n <\/disk>\n {{end}}\n <\/devices>\n<\/domain>\n`\n\tdomainTemplate = template.Must(template.New(\"domainXML\").Parse(domainXML))\n\n\tconst networkXML = `\n<network>\n\t<name>{{.Mac}}<\/name>\n\t<forward mode='bridge' \/>\n\t<bridge name='{{.Network}}' \/>\n\t<virtualport type='openvswitch' \/>\n\t<portgroup name='vlan-all'>\n\t\t<vlan>\n\t\t{{range .VLANs}}\n\t\t\t<tag id ='{{.}}' \/>\n\t\t{{end}}\n\t\t<\/vlan>\n\t<\/portgroup>\n<\/network>\n`\n\tnetworkTemplate = template.Must(template.New(\"networkXML\").Parse(networkXML))\n}\n\n\/\/ DomainXML populates a libvirt domain xml template with guest properties\nfunc (lv *Libvirt) DomainXML(guest *client.Guest) (string, error) {\n\tbuf := new(bytes.Buffer)\n\terr := domainTemplate.Execute(buf, guest)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn buf.String(), nil\n}\n\n\/\/ NetworkXML populates a libvirt network xml template with guest properties\nfunc (lv *Libvirt) NetworkXML(nic client.Nic) (string, error) {\n\tbuf := new(bytes.Buffer)\n\terr := networkTemplate.Execute(buf, nic)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn buf.String(), nil\n}\n<commit_msg>Typo in portgroup<commit_after>package libvirt\n\nimport (\n\t\"bytes\"\n\t\"text\/template\"\n\n\t\"github.com\/mistifyio\/mistify-agent\/client\"\n)\n\nvar domainTemplate *template.Template\nvar networkTemplate *template.Template\n\nfunc init() {\n\tconst domainXML = `\n<domain type=\"{{.Type}}\">\n <name>{{.Id}}<\/name>\n <memory unit=\"MiB\">{{.Memory}}<\/memory>\n <vcpu>{{.Cpu}}<\/vcpu>\n\n {{if .Metadata}}\n <metadata>\n {{range .Metadata}}\n {{end}}\n <\/metadata>\n {{end}}\n\n <os>\n <type>hvm<\/type>\n <\/os>\n <devices>\n {{range .Nics}}\n <interface type=\"network\">\n <source network='{{.Mac}}' portgroup='vlan-all' \/>\n\t {{if .Name}}<guest dev=\"{{.Name}}\" \/>{{end}}\n {{if .Mac}}<mac address=\"{{.Mac}}\" \/>{{end}}\n {{if .Model}}<model type=\"{{.Model}}\" \/>{{end}}\n <\/interface>\n {{end}}\n\n {{range .Disks}}\n <disk type=\"block\" device=\"disk\">\n <driver name=\"qemu\" type=\"raw\" \/>\n <source dev=\"{{.Source}}\" \/>\n <target dev=\"{{.Device}}\" bus=\"{{.Bus}}\" \/>\n <\/disk>\n {{end}}\n <\/devices>\n<\/domain>\n`\n\tdomainTemplate = template.Must(template.New(\"domainXML\").Parse(domainXML))\n\n\tconst networkXML = `\n<network>\n\t<name>{{.Mac}}<\/name>\n\t<forward mode='bridge' \/>\n\t<bridge name='{{.Network}}' \/>\n\t<virtualport type='openvswitch' \/>\n\t<portgroup name='vlan-all'>\n\t\t<vlan>\n\t\t{{range .VLANs}}\n\t\t\t<tag id ='{{.}}' \/>\n\t\t{{end}}\n\t\t<\/vlan>\n\t<\/portgroup>\n<\/network>\n`\n\tnetworkTemplate = template.Must(template.New(\"networkXML\").Parse(networkXML))\n}\n\n\/\/ DomainXML populates a libvirt domain xml template with guest properties\nfunc (lv *Libvirt) DomainXML(guest *client.Guest) (string, error) {\n\tbuf := new(bytes.Buffer)\n\terr := domainTemplate.Execute(buf, guest)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn buf.String(), nil\n}\n\n\/\/ NetworkXML populates a libvirt network xml template with guest properties\nfunc (lv *Libvirt) NetworkXML(nic client.Nic) (string, error) {\n\tbuf := new(bytes.Buffer)\n\terr := networkTemplate.Execute(buf, nic)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn buf.String(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package gcache\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"golang.org\/x\/oauth2\"\n\t\"google.golang.org\/api\/drive\/v3\"\n\t\"google.golang.org\/api\/googleapi\"\n)\n\nvar (\n\toauth2TokenSource oauth2.TokenSource \/\/ The token is valid for 30 minutes.\n)\n\n\/\/ GetGDriveFile returns a file on Google Drive.\nfunc GetGDriveFile(\n\tr *http.Request,\n\tname string,\n\tfield googleapi.Field,\n) (\n\t*drive.File,\n\t[]byte, \/\/ payload\n\terror,\n) {\n\tvar payload []byte\n\nretry:\n\tservice, err := drive.New(createGDriveClient(r))\n\tif err != nil {\n\t\tif IsInvalidSecurityTicket(err) {\n\t\t\toauth2TokenSource = nil\n\t\t\tgoto retry\n\t\t}\n\t\treturn nil, nil, err\n\t}\n\n\tfileList, err := service.Files.List().PageSize(1).Spaces(\"drive\").Q(fmt.Sprintf(\"name='%s'\", name)).Fields(field).Do()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif len(fileList.Files) <= 0 {\n\t\treturn nil, nil, &DriveFileDoesNotExistError{}\n\t}\n\tfile := fileList.Files[0]\n\tfileID := file.Id\n\thttpResponse, err := service.Files.Export(fileID, mimeTxt).Download()\n\tif IsFileNotExportableError(err) {\n\t\terr = service.Files.Delete(fileID).Do()\n\t\tif err != nil {\n\t\t\t\/\/ pass\n\t\t}\n\t\treturn nil, nil, &DriveFileDoesNotExistError{}\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdefer httpResponse.Body.Close()\n\tpayload, err = ioutil.ReadAll(httpResponse.Body)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn file, payload, err\n}\n<commit_msg>add GetGDriveService<commit_after>package gcache\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"golang.org\/x\/oauth2\"\n\t\"google.golang.org\/api\/drive\/v3\"\n\t\"google.golang.org\/api\/googleapi\"\n)\n\nvar (\n\toauth2TokenSource oauth2.TokenSource \/\/ The token is valid for 30 minutes.\n)\n\n\/\/ GetGDriveService returns the API service of Google Drive.\nfunc GetGDriveService(\n\tr *http.Request,\n) (\n\t*drive.Service,\n\terror,\n) {\nretry:\n\tservice, err := drive.New(createGDriveClient(r))\n\tif err != nil {\n\t\tif IsInvalidSecurityTicket(err) {\n\t\t\toauth2TokenSource = nil\n\t\t\tgoto retry\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn service, nil\n}\n\n\/\/ GetGDriveFile returns a file on Google Drive.\nfunc GetGDriveFile(\n\tr *http.Request,\n\tname string,\n\tfield googleapi.Field,\n) (\n\t*drive.File,\n\t[]byte, \/\/ payload\n\terror,\n) {\n\tvar payload []byte\n\nretry:\n\tservice, err := drive.New(createGDriveClient(r))\n\tif err != nil {\n\t\tif IsInvalidSecurityTicket(err) {\n\t\t\toauth2TokenSource = nil\n\t\t\tgoto retry\n\t\t}\n\t\treturn nil, nil, err\n\t}\n\n\tfileList, err := service.Files.List().PageSize(1).Spaces(\"drive\").Q(fmt.Sprintf(\"name='%s'\", name)).Fields(field).Do()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif len(fileList.Files) <= 0 {\n\t\treturn nil, nil, &DriveFileDoesNotExistError{}\n\t}\n\tfile := fileList.Files[0]\n\tfileID := file.Id\n\thttpResponse, err := service.Files.Export(fileID, mimeTxt).Download()\n\tif IsFileNotExportableError(err) {\n\t\terr = service.Files.Delete(fileID).Do()\n\t\tif err != nil {\n\t\t\t\/\/ pass\n\t\t}\n\t\treturn nil, nil, &DriveFileDoesNotExistError{}\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdefer httpResponse.Body.Close()\n\tpayload, err = ioutil.ReadAll(httpResponse.Body)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn file, payload, err\n}\n<|endoftext|>"} {"text":"<commit_before>package arn\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/animenotifier\/arn\/autocorrect\"\n\t\"github.com\/animenotifier\/arn\/validator\"\n\tgravatar \"github.com\/ungerik\/go-gravatar\"\n)\n\nvar setNickMutex sync.Mutex\nvar setEmailMutex sync.Mutex\n\n\/\/ User ...\ntype User struct {\n\tID string `json:\"id\"`\n\tNick string `json:\"nick\" editable:\"true\"`\n\tFirstName string `json:\"firstName\"`\n\tLastName string `json:\"lastName\"`\n\tEmail string `json:\"email\"`\n\tRole string `json:\"role\"`\n\tRegistered string `json:\"registered\"`\n\tLastLogin string `json:\"lastLogin\"`\n\tLastSeen string `json:\"lastSeen\"`\n\tProExpires string `json:\"proExpires\"`\n\tGender string `json:\"gender\"`\n\tLanguage string `json:\"language\"`\n\tTagline string `json:\"tagline\" editable:\"true\"`\n\tWebsite string `json:\"website\" editable:\"true\"`\n\tIP string `json:\"ip\"`\n\tUserAgent string `json:\"agent\"`\n\tBalance int `json:\"balance\"`\n\tAvatar UserAvatar `json:\"avatar\"`\n\tAgeRange UserAgeRange `json:\"ageRange\"`\n\tLocation UserLocation `json:\"location\"`\n\tAccounts UserAccounts `json:\"accounts\"`\n\tBrowser UserBrowser `json:\"browser\"`\n\tOS UserOS `json:\"os\"`\n\tFollowing []string `json:\"following\"`\n\n\tsettings *Settings\n\tanimeList *AnimeList\n\tfollows *UserFollows\n\tdraftIndex *DraftIndex\n}\n\n\/\/ NewUser creates an empty user object with a unique ID.\nfunc NewUser() *User {\n\tuser := &User{\n\t\tID: GenerateID(\"User\"),\n\n\t\t\/\/ Avoid nil value fields\n\t\tFollowing: make([]string, 0),\n\t}\n\n\treturn user\n}\n\n\/\/ RegisterUser registers a new user in the database and sets up all the required references.\nfunc RegisterUser(user *User) {\n\tuser.Registered = DateTimeUTC()\n\tuser.LastLogin = user.Registered\n\tuser.LastSeen = user.Registered\n\n\t\/\/ Save nick in NickToUser table\n\tDB.Set(\"NickToUser\", user.Nick, &NickToUser{\n\t\tNick: user.Nick,\n\t\tUserID: user.ID,\n\t})\n\n\t\/\/ Save email in EmailToUser table\n\tDB.Set(\"EmailToUser\", user.Email, &EmailToUser{\n\t\tEmail: user.Email,\n\t\tUserID: user.ID,\n\t})\n\n\t\/\/ Create default settings\n\tNewSettings(user.ID).Save()\n\n\t\/\/ Add empty anime list\n\tDB.Set(\"AnimeList\", user.ID, &AnimeList{\n\t\tUserID: user.ID,\n\t\tItems: []*AnimeListItem{},\n\t})\n\n\t\/\/ Add empty inventory\n\tNewInventory(user.ID).Save()\n\n\t\/\/ Add draft index\n\tNewDraftIndex(user.ID).Save()\n\n\t\/\/ Add empty push subscriptions\n\tDB.Set(\"PushSubscriptions\", user.ID, &PushSubscriptions{\n\t\tUserID: user.ID,\n\t\tItems: []*PushSubscription{},\n\t})\n\n\t\/\/ Add empty follow list\n\tfollows := &UserFollows{}\n\tfollows.UserID = user.ID\n\tfollows.Items = []string{}\n\n\tDB.Set(\"UserFollows\", user.ID, follows)\n\n\t\/\/ Refresh avatar async\n\tgo user.RefreshAvatar()\n}\n\n\/\/ SendNotification ...\nfunc (user *User) SendNotification(notification *Notification) {\n\t\/\/ Don't ever send notifications in development mode\n\tif IsDevelopment() && user.ID != \"4J6qpK1ve\" {\n\t\treturn\n\t}\n\n\tsubs := user.PushSubscriptions()\n\texpired := []*PushSubscription{}\n\n\tfor _, sub := range subs.Items {\n\t\terr := sub.SendNotification(notification)\n\n\t\tif err != nil {\n\t\t\tif err.Error() == \"Subscription expired\" {\n\t\t\t\texpired = append(expired, sub)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Remove expired items\n\tif len(expired) > 0 {\n\t\tfor _, sub := range expired {\n\t\t\tsubs.Remove(sub.ID())\n\t\t}\n\n\t\tsubs.Save()\n\t}\n}\n\n\/\/ RealName returns the real name of the user.\nfunc (user *User) RealName() string {\n\tif user.LastName == \"\" {\n\t\treturn user.FirstName\n\t}\n\n\tif user.FirstName == \"\" {\n\t\treturn user.LastName\n\t}\n\n\treturn user.FirstName + \" \" + user.LastName\n}\n\n\/\/ RegisteredTime ...\nfunc (user *User) RegisteredTime() time.Time {\n\treg, _ := time.Parse(time.RFC3339, user.Registered)\n\treturn reg\n}\n\n\/\/ IsActive ...\nfunc (user *User) IsActive() bool {\n\t\/\/ Exclude people who didn't change their nickname.\n\tif !user.HasNick() {\n\t\treturn false\n\t}\n\n\tlastSeen, _ := time.Parse(time.RFC3339, user.LastSeen)\n\toneWeekAgo := time.Now().Add(-7 * 24 * time.Hour)\n\n\tif lastSeen.Unix() < oneWeekAgo.Unix() {\n\t\treturn false\n\t}\n\n\tif len(user.AnimeList().Items) == 0 {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n\/\/ IsPro ...\nfunc (user *User) IsPro() bool {\n\tif user.ProExpires == \"\" {\n\t\treturn false\n\t}\n\n\treturn DateTimeUTC() < user.ProExpires\n}\n\n\/\/ ExtendProDuration ...\nfunc (user *User) ExtendProDuration(duration time.Duration) {\n\tvar startDate time.Time\n\n\tif user.ProExpires == \"\" {\n\t\tstartDate = time.Now().UTC()\n\t} else {\n\t\tstartDate, _ = time.Parse(time.RFC3339, user.ProExpires)\n\t}\n\n\tuser.ProExpires = startDate.Add(duration).Format(time.RFC3339)\n}\n\n\/\/ TimeSinceRegistered ...\nfunc (user *User) TimeSinceRegistered() time.Duration {\n\tregistered, _ := time.Parse(time.RFC3339, user.Registered)\n\treturn time.Since(registered)\n}\n\n\/\/ HasNick returns whether the user has a custom nickname.\nfunc (user *User) HasNick() bool {\n\treturn !strings.HasPrefix(user.Nick, \"g\") && !strings.HasPrefix(user.Nick, \"fb\") && !strings.HasPrefix(user.Nick, \"t\") && user.Nick != \"\"\n}\n\n\/\/ WebsiteURL adds https:\/\/ to the URL.\nfunc (user *User) WebsiteURL() string {\n\treturn \"https:\/\/\" + user.WebsiteShortURL()\n}\n\n\/\/ WebsiteShortURL ...\nfunc (user *User) WebsiteShortURL() string {\n\treturn strings.Replace(strings.Replace(user.Website, \"https:\/\/\", \"\", 1), \"http:\/\/\", \"\", 1)\n}\n\n\/\/ Link returns the URI to the user page.\nfunc (user *User) Link() string {\n\treturn \"\/+\" + user.Nick\n}\n\n\/\/ CoverImageURL ...\nfunc (user *User) CoverImageURL() string {\n\treturn \"\/images\/cover\/default.png\"\n}\n\n\/\/ HasAvatar ...\nfunc (user *User) HasAvatar() bool {\n\treturn user.Avatar.Extension != \"\"\n}\n\n\/\/ SmallAvatar ...\nfunc (user *User) SmallAvatar() string {\n\treturn \"\/\/media.notify.moe\/images\/avatars\/small\/\" + user.ID + user.Avatar.Extension\n}\n\n\/\/ LargeAvatar ...\nfunc (user *User) LargeAvatar() string {\n\treturn \"\/\/media.notify.moe\/images\/avatars\/large\/\" + user.ID + user.Avatar.Extension\n}\n\n\/\/ Gravatar ...\nfunc (user *User) Gravatar() string {\n\tif user.Email == \"\" {\n\t\treturn \"\"\n\t}\n\n\treturn gravatar.SecureUrl(user.Email) + \"?s=\" + fmt.Sprint(AvatarMaxSize)\n}\n\n\/\/ PushSubscriptions ...\nfunc (user *User) PushSubscriptions() *PushSubscriptions {\n\tsubs, _ := GetPushSubscriptions(user.ID)\n\treturn subs\n}\n\n\/\/ Inventory ...\nfunc (user *User) Inventory() *Inventory {\n\tinventory, _ := GetInventory(user.ID)\n\treturn inventory\n}\n\n\/\/ ActivateItemEffect ...\nfunc (user *User) ActivateItemEffect(itemID string) error {\n\tmonth := 30 * 24 * time.Hour\n\n\tswitch itemID {\n\tcase \"pro-account-3\":\n\t\tuser.ExtendProDuration(3 * month)\n\t\tuser.Save()\n\t\treturn nil\n\n\tcase \"pro-account-6\":\n\t\tuser.ExtendProDuration(6 * month)\n\t\tuser.Save()\n\t\treturn nil\n\n\tcase \"pro-account-12\":\n\t\tuser.ExtendProDuration(12 * month)\n\t\tuser.Save()\n\t\treturn nil\n\n\tcase \"pro-account-24\":\n\t\tuser.ExtendProDuration(24 * month)\n\t\tuser.Save()\n\t\treturn nil\n\n\tdefault:\n\t\treturn errors.New(\"Can't activate unknown item: \" + itemID)\n\t}\n}\n\n\/\/ SetNick changes the user's nickname safely.\nfunc (user *User) SetNick(newName string) error {\n\tsetNickMutex.Lock()\n\tdefer setNickMutex.Unlock()\n\n\tnewName = autocorrect.FixUserNick(newName)\n\n\tif !validator.IsValidNick(newName) {\n\t\treturn errors.New(\"Invalid nickname\")\n\t}\n\n\tif newName == user.Nick {\n\t\treturn nil\n\t}\n\n\t\/\/ Make sure the nickname doesn't exist already\n\t_, err := GetUserByNick(newName)\n\n\t\/\/ If there was no error: the username exists.\n\t\/\/ If \"not found\" is not included in the error message it's a different error type.\n\tif err == nil || strings.Index(err.Error(), \"not found\") == -1 {\n\t\treturn errors.New(\"Username '\" + newName + \"' is taken already\")\n\t}\n\n\tuser.ForceSetNick(newName)\n\treturn nil\n}\n\n\/\/ ForceSetNick forces a nickname overwrite.\nfunc (user *User) ForceSetNick(newName string) {\n\t\/\/ Delete old nick reference\n\tDB.Delete(\"NickToUser\", user.Nick)\n\n\t\/\/ Set new nick\n\tuser.Nick = newName\n\n\tDB.Set(\"NickToUser\", user.Nick, &NickToUser{\n\t\tNick: user.Nick,\n\t\tUserID: user.ID,\n\t})\n}\n\n\/\/ SetEmail changes the user's email safely.\nfunc (user *User) SetEmail(newName string) error {\n\tsetEmailMutex.Lock()\n\tdefer setEmailMutex.Unlock()\n\n\tif !validator.IsValidEmail(user.Email) {\n\t\treturn errors.New(\"Invalid email address\")\n\t}\n\n\t\/\/ Delete old email reference\n\tDB.Delete(\"EmailToUser\", user.Email)\n\n\t\/\/ Set new email\n\tuser.Email = newName\n\n\tDB.Set(\"EmailToUser\", user.Email, &EmailToUser{\n\t\tEmail: user.Email,\n\t\tUserID: user.ID,\n\t})\n\n\treturn nil\n}\n<commit_msg>Fixed image URL<commit_after>package arn\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/animenotifier\/arn\/autocorrect\"\n\t\"github.com\/animenotifier\/arn\/validator\"\n\tgravatar \"github.com\/ungerik\/go-gravatar\"\n)\n\nvar setNickMutex sync.Mutex\nvar setEmailMutex sync.Mutex\n\n\/\/ User ...\ntype User struct {\n\tID string `json:\"id\"`\n\tNick string `json:\"nick\" editable:\"true\"`\n\tFirstName string `json:\"firstName\"`\n\tLastName string `json:\"lastName\"`\n\tEmail string `json:\"email\"`\n\tRole string `json:\"role\"`\n\tRegistered string `json:\"registered\"`\n\tLastLogin string `json:\"lastLogin\"`\n\tLastSeen string `json:\"lastSeen\"`\n\tProExpires string `json:\"proExpires\"`\n\tGender string `json:\"gender\"`\n\tLanguage string `json:\"language\"`\n\tTagline string `json:\"tagline\" editable:\"true\"`\n\tWebsite string `json:\"website\" editable:\"true\"`\n\tIP string `json:\"ip\"`\n\tUserAgent string `json:\"agent\"`\n\tBalance int `json:\"balance\"`\n\tAvatar UserAvatar `json:\"avatar\"`\n\tAgeRange UserAgeRange `json:\"ageRange\"`\n\tLocation UserLocation `json:\"location\"`\n\tAccounts UserAccounts `json:\"accounts\"`\n\tBrowser UserBrowser `json:\"browser\"`\n\tOS UserOS `json:\"os\"`\n\tFollowing []string `json:\"following\"`\n\n\tsettings *Settings\n\tanimeList *AnimeList\n\tfollows *UserFollows\n\tdraftIndex *DraftIndex\n}\n\n\/\/ NewUser creates an empty user object with a unique ID.\nfunc NewUser() *User {\n\tuser := &User{\n\t\tID: GenerateID(\"User\"),\n\n\t\t\/\/ Avoid nil value fields\n\t\tFollowing: make([]string, 0),\n\t}\n\n\treturn user\n}\n\n\/\/ RegisterUser registers a new user in the database and sets up all the required references.\nfunc RegisterUser(user *User) {\n\tuser.Registered = DateTimeUTC()\n\tuser.LastLogin = user.Registered\n\tuser.LastSeen = user.Registered\n\n\t\/\/ Save nick in NickToUser table\n\tDB.Set(\"NickToUser\", user.Nick, &NickToUser{\n\t\tNick: user.Nick,\n\t\tUserID: user.ID,\n\t})\n\n\t\/\/ Save email in EmailToUser table\n\tDB.Set(\"EmailToUser\", user.Email, &EmailToUser{\n\t\tEmail: user.Email,\n\t\tUserID: user.ID,\n\t})\n\n\t\/\/ Create default settings\n\tNewSettings(user.ID).Save()\n\n\t\/\/ Add empty anime list\n\tDB.Set(\"AnimeList\", user.ID, &AnimeList{\n\t\tUserID: user.ID,\n\t\tItems: []*AnimeListItem{},\n\t})\n\n\t\/\/ Add empty inventory\n\tNewInventory(user.ID).Save()\n\n\t\/\/ Add draft index\n\tNewDraftIndex(user.ID).Save()\n\n\t\/\/ Add empty push subscriptions\n\tDB.Set(\"PushSubscriptions\", user.ID, &PushSubscriptions{\n\t\tUserID: user.ID,\n\t\tItems: []*PushSubscription{},\n\t})\n\n\t\/\/ Add empty follow list\n\tfollows := &UserFollows{}\n\tfollows.UserID = user.ID\n\tfollows.Items = []string{}\n\n\tDB.Set(\"UserFollows\", user.ID, follows)\n\n\t\/\/ Refresh avatar async\n\tgo user.RefreshAvatar()\n}\n\n\/\/ SendNotification ...\nfunc (user *User) SendNotification(notification *Notification) {\n\t\/\/ Don't ever send notifications in development mode\n\tif IsDevelopment() && user.ID != \"4J6qpK1ve\" {\n\t\treturn\n\t}\n\n\tsubs := user.PushSubscriptions()\n\texpired := []*PushSubscription{}\n\n\tfor _, sub := range subs.Items {\n\t\terr := sub.SendNotification(notification)\n\n\t\tif err != nil {\n\t\t\tif err.Error() == \"Subscription expired\" {\n\t\t\t\texpired = append(expired, sub)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Remove expired items\n\tif len(expired) > 0 {\n\t\tfor _, sub := range expired {\n\t\t\tsubs.Remove(sub.ID())\n\t\t}\n\n\t\tsubs.Save()\n\t}\n}\n\n\/\/ RealName returns the real name of the user.\nfunc (user *User) RealName() string {\n\tif user.LastName == \"\" {\n\t\treturn user.FirstName\n\t}\n\n\tif user.FirstName == \"\" {\n\t\treturn user.LastName\n\t}\n\n\treturn user.FirstName + \" \" + user.LastName\n}\n\n\/\/ RegisteredTime ...\nfunc (user *User) RegisteredTime() time.Time {\n\treg, _ := time.Parse(time.RFC3339, user.Registered)\n\treturn reg\n}\n\n\/\/ IsActive ...\nfunc (user *User) IsActive() bool {\n\t\/\/ Exclude people who didn't change their nickname.\n\tif !user.HasNick() {\n\t\treturn false\n\t}\n\n\tlastSeen, _ := time.Parse(time.RFC3339, user.LastSeen)\n\toneWeekAgo := time.Now().Add(-7 * 24 * time.Hour)\n\n\tif lastSeen.Unix() < oneWeekAgo.Unix() {\n\t\treturn false\n\t}\n\n\tif len(user.AnimeList().Items) == 0 {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n\/\/ IsPro ...\nfunc (user *User) IsPro() bool {\n\tif user.ProExpires == \"\" {\n\t\treturn false\n\t}\n\n\treturn DateTimeUTC() < user.ProExpires\n}\n\n\/\/ ExtendProDuration ...\nfunc (user *User) ExtendProDuration(duration time.Duration) {\n\tvar startDate time.Time\n\n\tif user.ProExpires == \"\" {\n\t\tstartDate = time.Now().UTC()\n\t} else {\n\t\tstartDate, _ = time.Parse(time.RFC3339, user.ProExpires)\n\t}\n\n\tuser.ProExpires = startDate.Add(duration).Format(time.RFC3339)\n}\n\n\/\/ TimeSinceRegistered ...\nfunc (user *User) TimeSinceRegistered() time.Duration {\n\tregistered, _ := time.Parse(time.RFC3339, user.Registered)\n\treturn time.Since(registered)\n}\n\n\/\/ HasNick returns whether the user has a custom nickname.\nfunc (user *User) HasNick() bool {\n\treturn !strings.HasPrefix(user.Nick, \"g\") && !strings.HasPrefix(user.Nick, \"fb\") && !strings.HasPrefix(user.Nick, \"t\") && user.Nick != \"\"\n}\n\n\/\/ WebsiteURL adds https:\/\/ to the URL.\nfunc (user *User) WebsiteURL() string {\n\treturn \"https:\/\/\" + user.WebsiteShortURL()\n}\n\n\/\/ WebsiteShortURL ...\nfunc (user *User) WebsiteShortURL() string {\n\treturn strings.Replace(strings.Replace(user.Website, \"https:\/\/\", \"\", 1), \"http:\/\/\", \"\", 1)\n}\n\n\/\/ Link returns the URI to the user page.\nfunc (user *User) Link() string {\n\treturn \"\/+\" + user.Nick\n}\n\n\/\/ CoverImageURL ...\nfunc (user *User) CoverImageURL() string {\n\treturn \"\/images\/cover\/default.jpg\"\n}\n\n\/\/ HasAvatar ...\nfunc (user *User) HasAvatar() bool {\n\treturn user.Avatar.Extension != \"\"\n}\n\n\/\/ SmallAvatar ...\nfunc (user *User) SmallAvatar() string {\n\treturn \"\/\/media.notify.moe\/images\/avatars\/small\/\" + user.ID + user.Avatar.Extension\n}\n\n\/\/ LargeAvatar ...\nfunc (user *User) LargeAvatar() string {\n\treturn \"\/\/media.notify.moe\/images\/avatars\/large\/\" + user.ID + user.Avatar.Extension\n}\n\n\/\/ Gravatar ...\nfunc (user *User) Gravatar() string {\n\tif user.Email == \"\" {\n\t\treturn \"\"\n\t}\n\n\treturn gravatar.SecureUrl(user.Email) + \"?s=\" + fmt.Sprint(AvatarMaxSize)\n}\n\n\/\/ PushSubscriptions ...\nfunc (user *User) PushSubscriptions() *PushSubscriptions {\n\tsubs, _ := GetPushSubscriptions(user.ID)\n\treturn subs\n}\n\n\/\/ Inventory ...\nfunc (user *User) Inventory() *Inventory {\n\tinventory, _ := GetInventory(user.ID)\n\treturn inventory\n}\n\n\/\/ ActivateItemEffect ...\nfunc (user *User) ActivateItemEffect(itemID string) error {\n\tmonth := 30 * 24 * time.Hour\n\n\tswitch itemID {\n\tcase \"pro-account-3\":\n\t\tuser.ExtendProDuration(3 * month)\n\t\tuser.Save()\n\t\treturn nil\n\n\tcase \"pro-account-6\":\n\t\tuser.ExtendProDuration(6 * month)\n\t\tuser.Save()\n\t\treturn nil\n\n\tcase \"pro-account-12\":\n\t\tuser.ExtendProDuration(12 * month)\n\t\tuser.Save()\n\t\treturn nil\n\n\tcase \"pro-account-24\":\n\t\tuser.ExtendProDuration(24 * month)\n\t\tuser.Save()\n\t\treturn nil\n\n\tdefault:\n\t\treturn errors.New(\"Can't activate unknown item: \" + itemID)\n\t}\n}\n\n\/\/ SetNick changes the user's nickname safely.\nfunc (user *User) SetNick(newName string) error {\n\tsetNickMutex.Lock()\n\tdefer setNickMutex.Unlock()\n\n\tnewName = autocorrect.FixUserNick(newName)\n\n\tif !validator.IsValidNick(newName) {\n\t\treturn errors.New(\"Invalid nickname\")\n\t}\n\n\tif newName == user.Nick {\n\t\treturn nil\n\t}\n\n\t\/\/ Make sure the nickname doesn't exist already\n\t_, err := GetUserByNick(newName)\n\n\t\/\/ If there was no error: the username exists.\n\t\/\/ If \"not found\" is not included in the error message it's a different error type.\n\tif err == nil || strings.Index(err.Error(), \"not found\") == -1 {\n\t\treturn errors.New(\"Username '\" + newName + \"' is taken already\")\n\t}\n\n\tuser.ForceSetNick(newName)\n\treturn nil\n}\n\n\/\/ ForceSetNick forces a nickname overwrite.\nfunc (user *User) ForceSetNick(newName string) {\n\t\/\/ Delete old nick reference\n\tDB.Delete(\"NickToUser\", user.Nick)\n\n\t\/\/ Set new nick\n\tuser.Nick = newName\n\n\tDB.Set(\"NickToUser\", user.Nick, &NickToUser{\n\t\tNick: user.Nick,\n\t\tUserID: user.ID,\n\t})\n}\n\n\/\/ SetEmail changes the user's email safely.\nfunc (user *User) SetEmail(newName string) error {\n\tsetEmailMutex.Lock()\n\tdefer setEmailMutex.Unlock()\n\n\tif !validator.IsValidEmail(user.Email) {\n\t\treturn errors.New(\"Invalid email address\")\n\t}\n\n\t\/\/ Delete old email reference\n\tDB.Delete(\"EmailToUser\", user.Email)\n\n\t\/\/ Set new email\n\tuser.Email = newName\n\n\tDB.Set(\"EmailToUser\", user.Email, &EmailToUser{\n\t\tEmail: user.Email,\n\t\tUserID: user.ID,\n\t})\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright 2011 Nan Deng\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n *\/\n\npackage pushsrv\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/binary\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t. \"github.com\/uniqush\/pushsys\"\n\t\"io\"\n\t\"net\"\n\t\"strconv\"\n\t\"sync\/atomic\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype pushRequest struct {\n\tdp *DeliveryPoint\n\tnotif *Notification\n\tmid uint32\n\tresChan chan<- *PushResult\n}\n\ntype apnsPushService struct {\n\tnextid uint32\n\tconns map[string]net.Conn\n\tconnLock *sync.Mutex\n\n\tconnChan map[string]chan *pushRequest\n\tchanLock *sync.Mutex\n}\n\nfunc InstallAPNS() {\n\tGetPushServiceManager().RegisterPushServiceType(newAPNSPushService())\n}\n\nfunc newAPNSPushService() *apnsPushService {\n\tret := new(apnsPushService)\n\tret.conns = make(map[string]net.Conn, 5)\n\tret.connChan = make(map[string]chan *pushRequest, 5)\n\tret.chanLock = new(sync.Mutex)\n\tret.connLock= new(sync.Mutex)\n\treturn ret\n}\n\nfunc (p *apnsPushService) Name() string {\n\treturn \"apns\"\n}\n\nfunc (p *apnsPushService) Finalize() {\n\tfor _, c := range p.conns {\n\t\tc.Close()\n\t}\n}\nfunc (p *apnsPushService) BuildPushServiceProviderFromMap(kv map[string]string, psp *PushServiceProvider) error {\n\tif service, ok := kv[\"service\"]; ok {\n\t\tpsp.FixedData[\"service\"] = service\n\t} else {\n\t\treturn errors.New(\"NoService\")\n\t}\n\n\tif cert, ok := kv[\"cert\"]; ok && len(cert) > 0 {\n\t\tpsp.FixedData[\"cert\"] = cert\n\t} else {\n\t\treturn errors.New(\"NoCertificate\")\n\t}\n\n\tif key, ok := kv[\"key\"]; ok && len(key) > 0 {\n\t\tpsp.FixedData[\"key\"] = key\n\t} else {\n\t\treturn errors.New(\"NoPrivateKey\")\n\t}\n\n\tif sandbox, ok := kv[\"sandbox\"]; ok {\n\t\tif sandbox == \"true\" {\n\t\t\tpsp.VolatileData[\"addr\"] = \"gateway.sandbox.push.apple.com:2195\"\n\t\t\treturn nil\n\t\t}\n\t}\n\tpsp.VolatileData[\"addr\"] = \"gateway.push.apple.com:2195\"\n\treturn nil\n}\n\nfunc (p *apnsPushService) BuildDeliveryPointFromMap(kv map[string]string, dp *DeliveryPoint) error {\n\tif service, ok := kv[\"service\"]; ok && len(service) > 0 {\n\t\tdp.FixedData[\"service\"] = service\n\t} else {\n\t\treturn errors.New(\"NoService\")\n\t}\n\tif sub, ok := kv[\"subscriber\"]; ok && len(sub) > 0 {\n\t\tdp.FixedData[\"subscriber\"] = sub\n\t} else {\n\t\treturn errors.New(\"NoSubscriber\")\n\t}\n\tif devtoken, ok := kv[\"devtoken\"]; ok && len(devtoken) > 0 {\n\t\tdp.FixedData[\"devtoken\"] = devtoken\n\t} else {\n\t\treturn errors.New(\"NoDevToken\")\n\t}\n\treturn nil\n}\n\nfunc toAPNSPayload(n *Notification) ([]byte, error) {\n\tpayload := make(map[string]interface{})\n\taps := make(map[string]interface{})\n\talert := make(map[string]interface{})\n\tfor k, v := range n.Data {\n\t\tswitch k {\n\t\tcase \"msg\":\n\t\t\talert[\"body\"] = v\n\t\tcase \"badge\":\n\t\t\tb, err := strconv.Atoi(v)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\taps[\"badge\"] = b\n\t\t\t}\n\t\tcase \"sound\":\n\t\t\taps[\"sound\"] = v\n\t\tcase \"img\":\n\t\t\talert[\"launch-image\"] = v\n\t\tcase \"id\":\n\t\t\tcontinue\n\t\tcase \"expiry\":\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tpayload[k] = v\n\t\t}\n\t}\n\taps[\"alert\"] = alert\n\tpayload[\"aps\"] = aps\n\tj, err := json.Marshal(payload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn j, nil\n}\n\nfunc writen(w io.Writer, buf []byte) error {\n\tn := len(buf)\n\tfor n >= 0 {\n\t\tl, err := w.Write(buf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif l >= n {\n\t\t\treturn nil\n\t\t}\n\t\tn -= l\n\t\tbuf = buf[l:]\n\t}\n\treturn nil\n}\n\nfunc (p *apnsPushService) getConn(psp *PushServiceProvider) (net.Conn, error) {\n\tname := psp.Name()\n\tif conn, ok := p.conns[name]; ok {\n\t\treturn conn, nil\n\t}\n\treturn p.reconnect(psp)\n}\n\nfunc (p *apnsPushService) reconnect(psp *PushServiceProvider) (net.Conn, error) {\n\tname := psp.Name()\n\tp.connLock.Lock()\n\tdefer p.connLock.Unlock()\n\n\tif conn, ok := p.conns[name]; ok {\n\t\tconn.Close()\n\t}\n\tcert, err := tls.LoadX509KeyPair(psp.FixedData[\"cert\"], psp.FixedData[\"key\"])\n\tif err != nil {\n\t\treturn nil, NewBadPushServiceProviderWithDetails(psp, err.Error())\n\t}\n\tconf := &tls.Config{\n\t\tCertificates: []tls.Certificate{cert},\n\t\tInsecureSkipVerify: true,\n\t}\n\ttlsconn, err := tls.Dial(\"tcp\", psp.VolatileData[\"addr\"], conf)\n\tif err != nil {\n\t\treturn nil, NewConnectionError(err)\n\t}\n\terr = tlsconn.Handshake()\n\tif err != nil {\n\t\treturn nil, NewConnectionError(err)\n\t}\n\tp.conns[name] = tlsconn\n\treturn tlsconn, nil\n}\n\n\nfunc (self *apnsPushService) getRequestChannel(psp *PushServiceProvider) chan *pushRequest{\n\tvar ch chan *pushRequest\n\tvar ok bool\n\tself.chanLock.Lock()\n\tif ch, ok = self.connChan[psp.Name()]; !ok {\n\t\tch = make(chan *pushRequest)\n\t\tself.connChan[psp.Name()] = ch\n\t\tgo self.pushWorker(psp, ch)\n\t}\n\tself.chanLock.Unlock()\n\treturn ch\n}\n\ntype apnsResult struct {\n\tmsgId uint32\n\tstatus uint8\n\terr error\n}\n\nfunc (self *apnsPushService) Push(psp *PushServiceProvider, dpQueue <-chan *DeliveryPoint, resQueue chan<- *PushResult, notif *Notification) {\n\t\/* First, get the request channel *\/\n\tch := self.getRequestChannel(psp)\n\n\twg := new(sync.WaitGroup)\n\n\tfor dp := range dpQueue {\n\t\twg.Add(1)\n\t\tgo func () {\n\t\t\tresultChannel := make(chan *PushResult, 1)\n\t\t\treq := new(pushRequest)\n\t\t\treq.dp = dp\n\t\t\treq.notif = notif\n\t\t\treq.resChan = resultChannel\n\t\t\treq.mid = atomic.AddUint32(&(self.nextid), 1)\n\n\t\t\tch<-req\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase res := <-resultChannel:\n\t\t\t\t\tresQueue <- res\n\t\t\t\tcase <-time.After(5 * time.Second):\n\t\t\t\t\treturn\n\n\t\t\t\t}\n\t\t\t}\n\t\t\twg.Done()\n\n\t\t}()\n\t}\n\n\twg.Wait()\n\tclose(resQueue)\n}\n\nfunc (self *apnsPushService) resultCollector(psp *PushServiceProvider, resChan chan<- *apnsResult) {\n\tc, err := self.getConn(psp)\n\tif err != nil {\n\t\tres := new(apnsResult)\n\t\tres.err = NewConnectionError(err)\n\t\tresChan<-res\n\t\treturn\n\t}\n\n\tfor {\n\t\tvar cmd uint8\n\t\tvar status uint8\n\t\tvar msgid uint32\n\n\t\terr = binary.Read(c, binary.BigEndian, &cmd)\n\t\tif err != nil {\n\t\t\tres := new(apnsResult)\n\t\t\tres.err = NewConnectionError(err)\n\t\t\tresChan<-res\n\t\t\tcontinue\n\t\t}\n\n\t\terr = binary.Read(c, binary.BigEndian, &status)\n\t\tif err != nil {\n\t\t\tres := new(apnsResult)\n\t\t\tres.err = NewConnectionError(err)\n\t\t\tresChan<-res\n\t\t\tcontinue\n\t\t}\n\n\t\terr = binary.Read(c, binary.BigEndian, &msgid)\n\t\tif err != nil {\n\t\t\tres := new(apnsResult)\n\t\t\tres.err = NewConnectionError(err)\n\t\t\tresChan<-res\n\t\t\tcontinue\n\t\t}\n\n\n\t\tres := new(apnsResult)\n\t\tres.msgId = msgid\n\t\tres.status = status\n\t\tresChan<-res\n\t}\n}\n\nfunc (self *apnsPushService) singlePush(psp *PushServiceProvider, dp *DeliveryPoint, notif *Notification, mid uint32) error {\n\tdevtoken := dp.FixedData[\"devtoken\"]\n\n\tbtoken, err := hex.DecodeString(devtoken)\n\tif err != nil {\n\t\treturn NewBadDeliveryPointWithDetails(dp, err.Error())\n\t}\n\n\tbpayload, err := toAPNSPayload(notif)\n\tif err != nil {\n\t\treturn NewBadNotificationWithDetails(err.Error())\n\t}\n\n\tbuffer := bytes.NewBuffer([]byte{})\n\n\t\/\/ command\n\tbinary.Write(buffer, binary.BigEndian, uint8(1))\n\n\t\/\/ transaction id\n\tbinary.Write(buffer, binary.BigEndian, mid)\n\n\t\/\/ TODO Expiry\n\texpiry := uint32(time.Now().Second() + 60*60)\n\tbinary.Write(buffer, binary.BigEndian, expiry)\n\n\t\/\/ device token\n\tbinary.Write(buffer, binary.BigEndian, uint16(len(btoken)))\n\tbinary.Write(buffer, binary.BigEndian, btoken)\n\n\t\/\/ payload\n\tbinary.Write(buffer, binary.BigEndian, uint16(len(bpayload)))\n\tbinary.Write(buffer, binary.BigEndian, bpayload)\n\tpdu := buffer.Bytes()\n\n\ttlsconn, err := self.getConn(psp)\n\tif err != nil {\n\t\treturn NewConnectionError(err)\n\t}\n\n\tfor i := 0; i < 2; i++ {\n\t\terr = writen(tlsconn, pdu)\n\t\tif err != nil {\n\t\t\ttlsconn, err = self.reconnect(psp)\n\t\t\tif err != nil {\n\t\t\t\treturn NewConnectionError(err)\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn NewConnectionError(err)\n\t}\n\n\treturn nil\n}\n\nfunc (self *apnsPushService) pushWorker(psp *PushServiceProvider, reqChan chan *pushRequest) {\n\tresChan := make(chan *apnsResult)\n\n\treqIdMap := make(map[uint32]*pushRequest)\n\n\tvar connErr error\n\n\tconnErr = nil\n\n\tgo self.resultCollector(psp, resChan)\n\tfor {\n\t\tselect {\n\t\tcase req := <-reqChan:\n\t\t\tdp := req.dp\n\t\t\tnotif := req.notif\n\t\t\tmid := req.mid\n\n\t\t\tif connErr != nil {\n\t\t\t\tresult := new(PushResult)\n\t\t\t\tresult.Content = notif\n\t\t\t\tresult.Provider = psp\n\t\t\t\tresult.Destination = dp\n\t\t\t\tresult.MsgId = fmt.Sprintf(\"%v\", mid)\n\t\t\t\tresult.Err = connErr\n\t\t\t\treq.resChan<-result\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treqIdMap[mid] = req\n\t\t\terr := self.singlePush(psp, dp, notif, mid)\n\n\t\t\tif err != nil {\n\t\t\t\tresult := new(PushResult)\n\t\t\t\tresult.Content = notif\n\t\t\t\tresult.Provider = psp\n\t\t\t\tresult.Destination = dp\n\t\t\t\tresult.MsgId = fmt.Sprintf(\"apns:%v-%v\", psp.Name(), mid)\n\t\t\t\tresult.Err = err\n\t\t\t\treq.resChan<-result\n\t\t\t\tdelete(reqIdMap, mid)\n\t\t\t}\n\n\t\tcase apnsres := <-resChan:\n\t\t\tif cerr, ok := apnsres.err.(*ConnectionError); ok {\n\t\t\t\tconnErr = cerr\n\t\t\t}\n\n\t\t\tif req, ok := reqIdMap[apnsres.msgId]; ok {\n\t\t\t\tresult := new(PushResult)\n\t\t\t\tresult.Content = req.notif\n\t\t\t\tresult.Provider = psp\n\t\t\t\tresult.Destination = req.dp\n\t\t\t\tresult.MsgId = fmt.Sprintf(\"apns:%v-%v\", psp.Name(), apnsres.msgId)\n\t\t\t\tif apnsres.err != nil {\n\t\t\t\t\tresult := new(PushResult)\n\t\t\t\t\tresult.Err = apnsres.err\n\t\t\t\t\treq.resChan<-result\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tswitch apnsres.status {\n\t\t\t\tcase 0:\n\t\t\t\t\tresult.Err = nil\n\t\t\t\tcase 1:\n\t\t\t\t\tresult.Err = NewBadDeliveryPointWithDetails(req.dp, \"Processing Error\")\n\t\t\t\tcase 2:\n\t\t\t\t\tresult.Err = NewBadDeliveryPointWithDetails(req.dp, \"Missing Device Token\")\n\t\t\t\tcase 3:\n\t\t\t\t\tresult.Err = NewBadNotificationWithDetails(\"Missing topic\")\n\t\t\t\tcase 4:\n\t\t\t\t\tresult.Err = NewBadNotificationWithDetails(\"Missing payload\")\n\t\t\t\tcase 5:\n\t\t\t\t\tresult.Err = NewBadNotificationWithDetails(\"Invalid token size\")\n\t\t\t\tcase 6:\n\t\t\t\t\tresult.Err = NewBadNotificationWithDetails(\"Invalid topic size\")\n\t\t\t\tcase 7:\n\t\t\t\t\tresult.Err = NewBadNotificationWithDetails(\"Invalid payload size\")\n\t\t\t\tcase 8:\n\t\t\t\t\tresult.Err = NewBadDeliveryPointWithDetails(req.dp, \"Invalid Token\")\n\t\t\t\tdefault:\n\t\t\t\t\tresult.Err = fmt.Errorf(\"Unknown Error: %d\", apnsres.status)\n\t\t\t\t}\n\n\t\t\t\treq.resChan<-result\n\t\t\t\tclose(req.resChan)\n\t\t\t\tdelete(reqIdMap, apnsres.msgId)\n\t\t\t}\n\t\t}\n\t}\n}\n\n<commit_msg>checks udid length on subscribing<commit_after>\/*\n * Copyright 2011 Nan Deng\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n *\/\n\npackage pushsrv\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/binary\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t. \"github.com\/uniqush\/pushsys\"\n\t\"io\"\n\t\"net\"\n\t\"strconv\"\n\t\"sync\/atomic\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype pushRequest struct {\n\tdp *DeliveryPoint\n\tnotif *Notification\n\tmid uint32\n\tresChan chan<- *PushResult\n}\n\ntype apnsPushService struct {\n\tnextid uint32\n\tconns map[string]net.Conn\n\tconnLock *sync.Mutex\n\n\tconnChan map[string]chan *pushRequest\n\tchanLock *sync.Mutex\n}\n\nfunc InstallAPNS() {\n\tGetPushServiceManager().RegisterPushServiceType(newAPNSPushService())\n}\n\nfunc newAPNSPushService() *apnsPushService {\n\tret := new(apnsPushService)\n\tret.conns = make(map[string]net.Conn, 5)\n\tret.connChan = make(map[string]chan *pushRequest, 5)\n\tret.chanLock = new(sync.Mutex)\n\tret.connLock= new(sync.Mutex)\n\treturn ret\n}\n\nfunc (p *apnsPushService) Name() string {\n\treturn \"apns\"\n}\n\nfunc (p *apnsPushService) Finalize() {\n\tfor _, c := range p.conns {\n\t\tc.Close()\n\t}\n}\nfunc (p *apnsPushService) BuildPushServiceProviderFromMap(kv map[string]string, psp *PushServiceProvider) error {\n\tif service, ok := kv[\"service\"]; ok {\n\t\tpsp.FixedData[\"service\"] = service\n\t} else {\n\t\treturn errors.New(\"NoService\")\n\t}\n\n\tif cert, ok := kv[\"cert\"]; ok && len(cert) > 0 {\n\t\tpsp.FixedData[\"cert\"] = cert\n\t} else {\n\t\treturn errors.New(\"NoCertificate\")\n\t}\n\n\tif key, ok := kv[\"key\"]; ok && len(key) > 0 {\n\t\tpsp.FixedData[\"key\"] = key\n\t} else {\n\t\treturn errors.New(\"NoPrivateKey\")\n\t}\n\n\tif sandbox, ok := kv[\"sandbox\"]; ok {\n\t\tif sandbox == \"true\" {\n\t\t\tpsp.VolatileData[\"addr\"] = \"gateway.sandbox.push.apple.com:2195\"\n\t\t\treturn nil\n\t\t}\n\t}\n\tpsp.VolatileData[\"addr\"] = \"gateway.push.apple.com:2195\"\n\treturn nil\n}\n\nfunc (p *apnsPushService) BuildDeliveryPointFromMap(kv map[string]string, dp *DeliveryPoint) error {\n\tif service, ok := kv[\"service\"]; ok && len(service) > 0 {\n\t\tdp.FixedData[\"service\"] = service\n\t} else {\n\t\treturn errors.New(\"NoService\")\n\t}\n\tif sub, ok := kv[\"subscriber\"]; ok && len(sub) > 0 {\n\t\tdp.FixedData[\"subscriber\"] = sub\n\t} else {\n\t\treturn errors.New(\"NoSubscriber\")\n\t}\n\tif devtoken, ok := kv[\"devtoken\"]; ok && len(devtoken) > 0 {\n\t\tif len(devtoken) != 40 {\n\t\t\treturn fmt.Errorf(\"Dev token %v is invalid: it should be 40 characters\", devtoken)\n\t\t}\n\t\tdp.FixedData[\"devtoken\"] = devtoken\n\t} else {\n\t\treturn errors.New(\"NoDevToken\")\n\t}\n\treturn nil\n}\n\nfunc toAPNSPayload(n *Notification) ([]byte, error) {\n\tpayload := make(map[string]interface{})\n\taps := make(map[string]interface{})\n\talert := make(map[string]interface{})\n\tfor k, v := range n.Data {\n\t\tswitch k {\n\t\tcase \"msg\":\n\t\t\talert[\"body\"] = v\n\t\tcase \"badge\":\n\t\t\tb, err := strconv.Atoi(v)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\taps[\"badge\"] = b\n\t\t\t}\n\t\tcase \"sound\":\n\t\t\taps[\"sound\"] = v\n\t\tcase \"img\":\n\t\t\talert[\"launch-image\"] = v\n\t\tcase \"id\":\n\t\t\tcontinue\n\t\tcase \"expiry\":\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tpayload[k] = v\n\t\t}\n\t}\n\taps[\"alert\"] = alert\n\tpayload[\"aps\"] = aps\n\tj, err := json.Marshal(payload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn j, nil\n}\n\nfunc writen(w io.Writer, buf []byte) error {\n\tn := len(buf)\n\tfor n >= 0 {\n\t\tl, err := w.Write(buf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif l >= n {\n\t\t\treturn nil\n\t\t}\n\t\tn -= l\n\t\tbuf = buf[l:]\n\t}\n\treturn nil\n}\n\nfunc (p *apnsPushService) getConn(psp *PushServiceProvider) (net.Conn, error) {\n\tname := psp.Name()\n\tif conn, ok := p.conns[name]; ok {\n\t\treturn conn, nil\n\t}\n\treturn p.reconnect(psp)\n}\n\nfunc (p *apnsPushService) reconnect(psp *PushServiceProvider) (net.Conn, error) {\n\tname := psp.Name()\n\tp.connLock.Lock()\n\tdefer p.connLock.Unlock()\n\n\tif conn, ok := p.conns[name]; ok {\n\t\tconn.Close()\n\t}\n\tcert, err := tls.LoadX509KeyPair(psp.FixedData[\"cert\"], psp.FixedData[\"key\"])\n\tif err != nil {\n\t\treturn nil, NewBadPushServiceProviderWithDetails(psp, err.Error())\n\t}\n\tconf := &tls.Config{\n\t\tCertificates: []tls.Certificate{cert},\n\t\tInsecureSkipVerify: true,\n\t}\n\ttlsconn, err := tls.Dial(\"tcp\", psp.VolatileData[\"addr\"], conf)\n\tif err != nil {\n\t\treturn nil, NewConnectionError(err)\n\t}\n\terr = tlsconn.Handshake()\n\tif err != nil {\n\t\treturn nil, NewConnectionError(err)\n\t}\n\tp.conns[name] = tlsconn\n\treturn tlsconn, nil\n}\n\n\nfunc (self *apnsPushService) getRequestChannel(psp *PushServiceProvider) chan *pushRequest{\n\tvar ch chan *pushRequest\n\tvar ok bool\n\tself.chanLock.Lock()\n\tif ch, ok = self.connChan[psp.Name()]; !ok {\n\t\tch = make(chan *pushRequest)\n\t\tself.connChan[psp.Name()] = ch\n\t\tgo self.pushWorker(psp, ch)\n\t}\n\tself.chanLock.Unlock()\n\treturn ch\n}\n\ntype apnsResult struct {\n\tmsgId uint32\n\tstatus uint8\n\terr error\n}\n\nfunc (self *apnsPushService) Push(psp *PushServiceProvider, dpQueue <-chan *DeliveryPoint, resQueue chan<- *PushResult, notif *Notification) {\n\t\/* First, get the request channel *\/\n\tch := self.getRequestChannel(psp)\n\n\twg := new(sync.WaitGroup)\n\n\tfor dp := range dpQueue {\n\t\twg.Add(1)\n\t\tgo func () {\n\t\t\tresultChannel := make(chan *PushResult, 1)\n\t\t\treq := new(pushRequest)\n\t\t\treq.dp = dp\n\t\t\treq.notif = notif\n\t\t\treq.resChan = resultChannel\n\t\t\treq.mid = atomic.AddUint32(&(self.nextid), 1)\n\n\t\t\tch<-req\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase res := <-resultChannel:\n\t\t\t\t\tresQueue <- res\n\t\t\t\tcase <-time.After(5 * time.Second):\n\t\t\t\t\treturn\n\n\t\t\t\t}\n\t\t\t}\n\t\t\twg.Done()\n\n\t\t}()\n\t}\n\n\twg.Wait()\n\tclose(resQueue)\n}\n\nfunc (self *apnsPushService) resultCollector(psp *PushServiceProvider, resChan chan<- *apnsResult) {\n\tc, err := self.getConn(psp)\n\tif err != nil {\n\t\tres := new(apnsResult)\n\t\tres.err = NewConnectionError(err)\n\t\tresChan<-res\n\t\treturn\n\t}\n\n\tfor {\n\t\tvar cmd uint8\n\t\tvar status uint8\n\t\tvar msgid uint32\n\n\t\terr = binary.Read(c, binary.BigEndian, &cmd)\n\t\tif err != nil {\n\t\t\tres := new(apnsResult)\n\t\t\tres.err = NewConnectionError(err)\n\t\t\tresChan<-res\n\t\t\tcontinue\n\t\t}\n\n\t\terr = binary.Read(c, binary.BigEndian, &status)\n\t\tif err != nil {\n\t\t\tres := new(apnsResult)\n\t\t\tres.err = NewConnectionError(err)\n\t\t\tresChan<-res\n\t\t\tcontinue\n\t\t}\n\n\t\terr = binary.Read(c, binary.BigEndian, &msgid)\n\t\tif err != nil {\n\t\t\tres := new(apnsResult)\n\t\t\tres.err = NewConnectionError(err)\n\t\t\tresChan<-res\n\t\t\tcontinue\n\t\t}\n\n\n\t\tres := new(apnsResult)\n\t\tres.msgId = msgid\n\t\tres.status = status\n\t\tresChan<-res\n\t}\n}\n\nfunc (self *apnsPushService) singlePush(psp *PushServiceProvider, dp *DeliveryPoint, notif *Notification, mid uint32) error {\n\tdevtoken := dp.FixedData[\"devtoken\"]\n\n\tbtoken, err := hex.DecodeString(devtoken)\n\tif err != nil {\n\t\treturn NewBadDeliveryPointWithDetails(dp, err.Error())\n\t}\n\n\tbpayload, err := toAPNSPayload(notif)\n\tif err != nil {\n\t\treturn NewBadNotificationWithDetails(err.Error())\n\t}\n\n\tbuffer := bytes.NewBuffer([]byte{})\n\n\t\/\/ command\n\tbinary.Write(buffer, binary.BigEndian, uint8(1))\n\n\t\/\/ transaction id\n\tbinary.Write(buffer, binary.BigEndian, mid)\n\n\t\/\/ TODO Expiry\n\texpiry := uint32(time.Now().Second() + 60*60)\n\tbinary.Write(buffer, binary.BigEndian, expiry)\n\n\t\/\/ device token\n\tbinary.Write(buffer, binary.BigEndian, uint16(len(btoken)))\n\tbinary.Write(buffer, binary.BigEndian, btoken)\n\n\t\/\/ payload\n\tbinary.Write(buffer, binary.BigEndian, uint16(len(bpayload)))\n\tbinary.Write(buffer, binary.BigEndian, bpayload)\n\tpdu := buffer.Bytes()\n\n\ttlsconn, err := self.getConn(psp)\n\tif err != nil {\n\t\treturn NewConnectionError(err)\n\t}\n\n\tfor i := 0; i < 2; i++ {\n\t\terr = writen(tlsconn, pdu)\n\t\tif err != nil {\n\t\t\ttlsconn, err = self.reconnect(psp)\n\t\t\tif err != nil {\n\t\t\t\treturn NewConnectionError(err)\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn NewConnectionError(err)\n\t}\n\n\treturn nil\n}\n\nfunc (self *apnsPushService) pushWorker(psp *PushServiceProvider, reqChan chan *pushRequest) {\n\tresChan := make(chan *apnsResult)\n\n\treqIdMap := make(map[uint32]*pushRequest)\n\n\tvar connErr error\n\n\tconnErr = nil\n\n\tgo self.resultCollector(psp, resChan)\n\tfor {\n\t\tselect {\n\t\tcase req := <-reqChan:\n\t\t\tdp := req.dp\n\t\t\tnotif := req.notif\n\t\t\tmid := req.mid\n\n\t\t\tif connErr != nil {\n\t\t\t\tresult := new(PushResult)\n\t\t\t\tresult.Content = notif\n\t\t\t\tresult.Provider = psp\n\t\t\t\tresult.Destination = dp\n\t\t\t\tresult.MsgId = fmt.Sprintf(\"%v\", mid)\n\t\t\t\tresult.Err = connErr\n\t\t\t\treq.resChan<-result\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treqIdMap[mid] = req\n\t\t\terr := self.singlePush(psp, dp, notif, mid)\n\n\t\t\tif err != nil {\n\t\t\t\tresult := new(PushResult)\n\t\t\t\tresult.Content = notif\n\t\t\t\tresult.Provider = psp\n\t\t\t\tresult.Destination = dp\n\t\t\t\tresult.MsgId = fmt.Sprintf(\"apns:%v-%v\", psp.Name(), mid)\n\t\t\t\tresult.Err = err\n\t\t\t\treq.resChan<-result\n\t\t\t\tdelete(reqIdMap, mid)\n\t\t\t}\n\n\t\tcase apnsres := <-resChan:\n\t\t\tif cerr, ok := apnsres.err.(*ConnectionError); ok {\n\t\t\t\tconnErr = cerr\n\t\t\t}\n\n\t\t\tif req, ok := reqIdMap[apnsres.msgId]; ok {\n\t\t\t\tresult := new(PushResult)\n\t\t\t\tresult.Content = req.notif\n\t\t\t\tresult.Provider = psp\n\t\t\t\tresult.Destination = req.dp\n\t\t\t\tresult.MsgId = fmt.Sprintf(\"apns:%v-%v\", psp.Name(), apnsres.msgId)\n\t\t\t\tif apnsres.err != nil {\n\t\t\t\t\tresult := new(PushResult)\n\t\t\t\t\tresult.Err = apnsres.err\n\t\t\t\t\treq.resChan<-result\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tswitch apnsres.status {\n\t\t\t\tcase 0:\n\t\t\t\t\tresult.Err = nil\n\t\t\t\tcase 1:\n\t\t\t\t\tresult.Err = NewBadDeliveryPointWithDetails(req.dp, \"Processing Error\")\n\t\t\t\tcase 2:\n\t\t\t\t\tresult.Err = NewBadDeliveryPointWithDetails(req.dp, \"Missing Device Token\")\n\t\t\t\tcase 3:\n\t\t\t\t\tresult.Err = NewBadNotificationWithDetails(\"Missing topic\")\n\t\t\t\tcase 4:\n\t\t\t\t\tresult.Err = NewBadNotificationWithDetails(\"Missing payload\")\n\t\t\t\tcase 5:\n\t\t\t\t\tresult.Err = NewBadNotificationWithDetails(\"Invalid token size\")\n\t\t\t\tcase 6:\n\t\t\t\t\tresult.Err = NewBadNotificationWithDetails(\"Invalid topic size\")\n\t\t\t\tcase 7:\n\t\t\t\t\tresult.Err = NewBadNotificationWithDetails(\"Invalid payload size\")\n\t\t\t\tcase 8:\n\t\t\t\t\tresult.Err = NewBadDeliveryPointWithDetails(req.dp, \"Invalid Token\")\n\t\t\t\tdefault:\n\t\t\t\t\tresult.Err = fmt.Errorf(\"Unknown Error: %d\", apnsres.status)\n\t\t\t\t}\n\n\t\t\t\treq.resChan<-result\n\t\t\t\tclose(req.resChan)\n\t\t\t\tdelete(reqIdMap, apnsres.msgId)\n\t\t\t}\n\t\t}\n\t}\n}\n\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/bgentry\/heroku-go\"\n)\n\nvar cmdApps = &Command{\n\tRun: runApps,\n\tUsage: \"apps [<name>...]\",\n\tCategory: \"app\",\n\tShort: \"list apps\",\n\tLong: `\nLists apps. Shows the app name, owner, and last release time (or\ntime the app was created, if it's never been released).\n\nExamples:\n\n $ hk apps\n myapp user@test.com Jan 2 12:34\n myapp2 user@longdomainname… Jan 2 12:34\n\n $ hk apps myapp\n myapp user@test.com Jan 2 12:34\n`,\n}\n\nfunc runApps(cmd *Command, names []string) {\n\tw := tabwriter.NewWriter(os.Stdout, 1, 2, 2, ' ', 0)\n\tdefer w.Flush()\n\tvar apps []heroku.App\n\tif len(names) == 0 {\n\t\tvar err error\n\t\tapps, err = client.AppList(&heroku.ListRange{Field: \"name\", Max: 1000})\n\t\tmust(err)\n\t} else {\n\t\tappch := make(chan *heroku.App, len(names))\n\t\terrch := make(chan error, len(names))\n\t\tfor _, name := range names {\n\t\t\tif name == \"\" {\n\t\t\t\tappch <- nil\n\t\t\t} else {\n\t\t\t\tgo func(appname string) {\n\t\t\t\t\tif app, err := client.AppInfo(appname); err != nil {\n\t\t\t\t\t\terrch <- err\n\t\t\t\t\t} else {\n\t\t\t\t\t\tappch <- app\n\t\t\t\t\t}\n\t\t\t\t}(name)\n\t\t\t}\n\t\t}\n\t\tfor _ = range names {\n\t\t\tselect {\n\t\t\tcase err := <-errch:\n\t\t\t\tprintFatal(err.Error())\n\t\t\tcase app := <-appch:\n\t\t\t\tif app != nil {\n\t\t\t\t\tapps = append(apps, *app)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tprintAppList(w, apps)\n}\n\nfunc printAppList(w io.Writer, apps []heroku.App) {\n\tsort.Sort(appsByName(apps))\n\tabbrevEmailApps(apps)\n\tfor _, a := range apps {\n\t\tif a.Name != \"\" {\n\t\t\tlistApp(w, a)\n\t\t}\n\t}\n}\n\nfunc abbrevEmailApps(apps []heroku.App) {\n\tdomains := make(map[string]int)\n\tfor _, a := range apps {\n\t\tparts := strings.SplitN(a.Owner.Email, \"@\", 2)\n\t\tif len(parts) == 2 {\n\t\t\tdomains[\"@\"+parts[1]]++\n\t\t}\n\t}\n\tsmax, nmax := \"\", 0\n\tfor s, n := range domains {\n\t\tif n > nmax {\n\t\t\tsmax = s\n\t\t\tnmax = n\n\t\t}\n\t}\n\tfor _, a := range apps {\n\t\tif strings.HasSuffix(a.Owner.Email, smax) {\n\t\t\ta.Owner.Email = a.Owner.Email[:len(a.Owner.Email)-len(smax)]\n\t\t}\n\t}\n}\n\nfunc listApp(w io.Writer, a heroku.App) {\n\tt := a.CreatedAt\n\tif a.ReleasedAt != nil {\n\t\tt = *a.ReleasedAt\n\t}\n\tlistRec(w,\n\t\ta.Name,\n\t\tabbrev(a.Owner.Email, 20),\n\t\tprettyTime{t},\n\t)\n}\n\ntype appsByName []heroku.App\n\nfunc (a appsByName) Len() int { return len(a) }\nfunc (a appsByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a appsByName) Less(i, j int) bool { return a[i].Name < a[j].Name }\n<commit_msg>show region in apps list<commit_after>package main\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/bgentry\/heroku-go\"\n)\n\nvar cmdApps = &Command{\n\tRun: runApps,\n\tUsage: \"apps [<name>...]\",\n\tCategory: \"app\",\n\tShort: \"list apps\",\n\tLong: `\nLists apps. Shows the app name, owner, and last release time (or\ntime the app was created, if it's never been released).\n\nExamples:\n\n $ hk apps\n myapp user@test.com us Jan 2 12:34\n myapp-eu user@longdomainname… eu Jan 2 12:35\n\n $ hk apps myapp\n myapp user@test.com us Jan 2 12:34\n`,\n}\n\nfunc runApps(cmd *Command, names []string) {\n\tw := tabwriter.NewWriter(os.Stdout, 1, 2, 2, ' ', 0)\n\tdefer w.Flush()\n\tvar apps []heroku.App\n\tif len(names) == 0 {\n\t\tvar err error\n\t\tapps, err = client.AppList(&heroku.ListRange{Field: \"name\", Max: 1000})\n\t\tmust(err)\n\t} else {\n\t\tappch := make(chan *heroku.App, len(names))\n\t\terrch := make(chan error, len(names))\n\t\tfor _, name := range names {\n\t\t\tif name == \"\" {\n\t\t\t\tappch <- nil\n\t\t\t} else {\n\t\t\t\tgo func(appname string) {\n\t\t\t\t\tif app, err := client.AppInfo(appname); err != nil {\n\t\t\t\t\t\terrch <- err\n\t\t\t\t\t} else {\n\t\t\t\t\t\tappch <- app\n\t\t\t\t\t}\n\t\t\t\t}(name)\n\t\t\t}\n\t\t}\n\t\tfor _ = range names {\n\t\t\tselect {\n\t\t\tcase err := <-errch:\n\t\t\t\tprintFatal(err.Error())\n\t\t\tcase app := <-appch:\n\t\t\t\tif app != nil {\n\t\t\t\t\tapps = append(apps, *app)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tprintAppList(w, apps)\n}\n\nfunc printAppList(w io.Writer, apps []heroku.App) {\n\tsort.Sort(appsByName(apps))\n\tabbrevEmailApps(apps)\n\tfor _, a := range apps {\n\t\tif a.Name != \"\" {\n\t\t\tlistApp(w, a)\n\t\t}\n\t}\n}\n\nfunc abbrevEmailApps(apps []heroku.App) {\n\tdomains := make(map[string]int)\n\tfor _, a := range apps {\n\t\tparts := strings.SplitN(a.Owner.Email, \"@\", 2)\n\t\tif len(parts) == 2 {\n\t\t\tdomains[\"@\"+parts[1]]++\n\t\t}\n\t}\n\tsmax, nmax := \"\", 0\n\tfor s, n := range domains {\n\t\tif n > nmax {\n\t\t\tsmax = s\n\t\t\tnmax = n\n\t\t}\n\t}\n\tfor _, a := range apps {\n\t\tif strings.HasSuffix(a.Owner.Email, smax) {\n\t\t\ta.Owner.Email = a.Owner.Email[:len(a.Owner.Email)-len(smax)]\n\t\t}\n\t}\n}\n\nfunc listApp(w io.Writer, a heroku.App) {\n\tt := a.CreatedAt\n\tif a.ReleasedAt != nil {\n\t\tt = *a.ReleasedAt\n\t}\n\tlistRec(w,\n\t\ta.Name,\n\t\tabbrev(a.Owner.Email, 20),\n\t\ta.Region.Name,\n\t\tprettyTime{t},\n\t)\n}\n\ntype appsByName []heroku.App\n\nfunc (a appsByName) Len() int { return len(a) }\nfunc (a appsByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a appsByName) Less(i, j int) bool { return a[i].Name < a[j].Name }\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\/exec\"\n)\n\nvar cmdInfo = &Command{\n\tRun: runInfo,\n\tUsage: \"info [-a APP]\",\n\tShort: \"show app info\",\n\tLong: `Info shows general information about the current app.`,\n}\n\nfunc init() {\n\tcmdInfo.Flag.StringVar(&flagApp, \"a\", \"\", \"app\")\n}\n\nfunc runInfo(cmd *Command, args []string) {\n\tvar info struct {\n\t\tName string\n\t\tOwner string `json:\"owner_email\"`\n\t\tStack string\n\t\tGitURL string `json:\"git_url\"`\n\t\tWebURL string `json:\"web_url\"`\n\t}\n\tAPIReq(\"GET\", \"\/apps\/\"+app()).Do(&info)\n\tfmt.Printf(\"Name: %s\\n\", info.Name)\n\tfmt.Printf(\"Owner: %s\\n\", info.Owner)\n\tfmt.Printf(\"Stack: %s\\n\", info.Stack)\n\tfmt.Printf(\"Git URL: %s\\n\", info.GitURL)\n\tfmt.Printf(\"Web URL: %s\\n\", info.WebURL)\n}\n\nvar cmdOpen = &Command{\n\tRun: runOpen,\n\tUsage: \"open\",\n\tShort: \"open app\",\n\tLong: `Open opens the app in a web browser. (Assumes cedar.)`,\n}\n\nfunc runOpen(cmd *Command, args []string) {\n\tu := \"https:\/\/\" + app() + \".herokuapp.com\/\"\n\tcommand := \"open\"\n\tif _, err := exec.LookPath(\"xdg-open\"); err == nil {\n\t\tcommand = \"xdg-open\"\n\t}\n\texec.Command(command, u).Start()\n}\n\nvar cmdList = &Command{\n\tRun: runList,\n\tUsage: \"list\",\n\tShort: \"list apps\",\n\tLong: `List lists all accessible apps.`,\n}\n\nfunc runList(cmd *Command, args []string) {\n\tvar apps []struct{ Name string }\n\tAPIReq(\"GET\", \"\/apps\").Do(&apps)\n\tfor _, app := range apps {\n\t\tfmt.Printf(\"%s\\n\", app.Name)\n\t}\n}\n\nvar cmdCreate = &Command{\n\tRun: runCreate,\n\tUsage: \"create [name]\",\n\tShort: \"create an app\",\n\tLong: `Create creates a new heroku app.`,\n}\n\nfunc runCreate(cmd *Command, args []string) {\n\tvar info struct {\n\t\tName string\n\t\tStack string\n\t\tGitURL string `json:\"git_url\"`\n\t}\n\n\tv := make(url.Values)\n\tv.Set(\"app[stack]\", \"cedar\")\n\tif len(args) > 0 {\n\t\tv.Set(\"app[name]\", args[0])\n\t}\n\n\tr := APIReq(\"POST\", \"\/apps\")\n\tr.SetBodyForm(v)\n\tr.Do(&info)\n\texec.Command(\"git\", \"remote\", \"add\", \"heroku\", info.GitURL).Run()\n\tfmt.Println(info.Name)\n}\n\nvar cmdDestroy = &Command{\n\tRun: runDestroy,\n\tUsage: \"destroy <name>\",\n\tShort: \"destroy an app\",\n\tLong: `Destroy destroys a heroku app.`,\n}\n\nfunc runDestroy(cmd *Command, args []string) {\n\tAPIReq(\"DELETE\", \"\/apps\/\"+args[0]).Do(nil)\n\tfor _, remote := range gitRemotes(gitURL(args[0])) {\n\t\texec.Command(\"git\", \"remote\", \"rm\", remote).Run()\n\t}\n}\n<commit_msg>check args on destory<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n)\n\nvar cmdInfo = &Command{\n\tRun: runInfo,\n\tUsage: \"info [-a APP]\",\n\tShort: \"show app info\",\n\tLong: `Info shows general information about the current app.`,\n}\n\nfunc init() {\n\tcmdInfo.Flag.StringVar(&flagApp, \"a\", \"\", \"app\")\n}\n\nfunc runInfo(cmd *Command, args []string) {\n\tvar info struct {\n\t\tName string\n\t\tOwner string `json:\"owner_email\"`\n\t\tStack string\n\t\tGitURL string `json:\"git_url\"`\n\t\tWebURL string `json:\"web_url\"`\n\t}\n\tAPIReq(\"GET\", \"\/apps\/\"+app()).Do(&info)\n\tfmt.Printf(\"Name: %s\\n\", info.Name)\n\tfmt.Printf(\"Owner: %s\\n\", info.Owner)\n\tfmt.Printf(\"Stack: %s\\n\", info.Stack)\n\tfmt.Printf(\"Git URL: %s\\n\", info.GitURL)\n\tfmt.Printf(\"Web URL: %s\\n\", info.WebURL)\n}\n\nvar cmdOpen = &Command{\n\tRun: runOpen,\n\tUsage: \"open\",\n\tShort: \"open app\",\n\tLong: `Open opens the app in a web browser. (Assumes cedar.)`,\n}\n\nfunc runOpen(cmd *Command, args []string) {\n\tu := \"https:\/\/\" + app() + \".herokuapp.com\/\"\n\tcommand := \"open\"\n\tif _, err := exec.LookPath(\"xdg-open\"); err == nil {\n\t\tcommand = \"xdg-open\"\n\t}\n\texec.Command(command, u).Start()\n}\n\nvar cmdList = &Command{\n\tRun: runList,\n\tUsage: \"list\",\n\tShort: \"list apps\",\n\tLong: `List lists all accessible apps.`,\n}\n\nfunc runList(cmd *Command, args []string) {\n\tvar apps []struct{ Name string }\n\tAPIReq(\"GET\", \"\/apps\").Do(&apps)\n\tfor _, app := range apps {\n\t\tfmt.Printf(\"%s\\n\", app.Name)\n\t}\n}\n\nvar cmdCreate = &Command{\n\tRun: runCreate,\n\tUsage: \"create [name]\",\n\tShort: \"create an app\",\n\tLong: `Create creates a new heroku app.`,\n}\n\nfunc runCreate(cmd *Command, args []string) {\n\tvar info struct {\n\t\tName string\n\t\tStack string\n\t\tGitURL string `json:\"git_url\"`\n\t}\n\n\tv := make(url.Values)\n\tv.Set(\"app[stack]\", \"cedar\")\n\tif len(args) > 0 {\n\t\tv.Set(\"app[name]\", args[0])\n\t}\n\n\tr := APIReq(\"POST\", \"\/apps\")\n\tr.SetBodyForm(v)\n\tr.Do(&info)\n\texec.Command(\"git\", \"remote\", \"add\", \"heroku\", info.GitURL).Run()\n\tfmt.Println(info.Name)\n}\n\nvar cmdDestroy = &Command{\n\tRun: runDestroy,\n\tUsage: \"destroy <name>\",\n\tShort: \"destroy an app\",\n\tLong: `Destroy destroys a heroku app.`,\n}\n\nfunc runDestroy(cmd *Command, args []string) {\n\tif len(args) != 1 {\n\t\tcmd.printUsage()\n\t\tos.Exit(1)\n\t}\n\n\tAPIReq(\"DELETE\", \"\/apps\/\"+args[0]).Do(nil)\n\tfor _, remote := range gitRemotes(gitURL(args[0])) {\n\t\texec.Command(\"git\", \"remote\", \"rm\", remote).Run()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n)\n\nfunc usage() {\n\tfmt.Fprintln(os.Stderr, `Usage:\n\nsd [options] 'command'\n\nExamples\n\n echo -e \"1\\n2\\n3\\n4\\n5\" | sd 'echo -e \"2\\n4\"'\n\n while [ 0 ]; do echo $RANDOM; sleep .1; done | sd -h 1 'seq 500'\n\n mysql schema_1 -Nsr -e \"SELECT city FROM users\" | sd -h 120 mysql schema_2 -Nsr -e \"SELECT city FROM excluded_cities\"\n\n mysql -Nsr -e \"SELECT city FROM users\" | sd -p 0 -t 10 kafka_consumer --topic excluded_cities > active_cities.txt \n\nOptions\n\n\t-f --follow: keeps reading from STDIN until SIGINT or its end.\n\t-i --infinite: keeps reading from COMMAND until it ends rather than timing it out. Note that if the stream doesn't end, sd just blocks forever and does nothing.\n\t-p --patience %seconds%: wait for the specified seconds for the first received line. Use 0 for waiting forever.\n\t-t --timeout %seconds%: exit(0) after specified seconds from last received line. STDIN and command have independent timeouts. When with -f, timeout only applies to the command (not to STDIN).\n\t-h --hard-timeout %seconds%: exit(0) after the specified seconds (or earlier). Overrides all other options.\n\n`)\n}\n\ntype options struct {\n\tfollow bool\n\tinfinite bool\n\tpatience int\n\ttimeoutF int\n\thardTimeout int\n}\n\nfunc defineOptions(fs *flag.FlagSet) *options {\n\tfollowHelp := \"keeps reading from STDIN until SIGINT or its end.\"\n\tinfiniteHelp := \"keeps reading from COMMAND until it ends rather than timing it out. Note that if the stream doesn't end, sd just blocks forever and does nothing.\"\n\tpatienceHelp := \"wait for the specified seconds for the first received line. Use 0 for waiting forever.\"\n\ttimeoutHelp := \"exit(0) after specified seconds from last received line. STDIN and command have independent timeouts. When with -f, timeout only applies to the command (not to STDIN).\"\n\thardTimeoutHelp := \"exit(0) after the specified seconds (or earlier). Overrides all other options.\"\n\n\tvar o options\n\tsetDefaultOptions(&o)\n\n\tfs.BoolVar(&o.follow, \"follow\", o.follow, followHelp)\n\tfs.BoolVar(&o.follow, \"f\", o.follow, followHelp)\n\tfs.BoolVar(&o.infinite, \"infinite\", o.infinite, infiniteHelp)\n\tfs.BoolVar(&o.infinite, \"i\", o.infinite, infiniteHelp)\n\tfs.IntVar(&o.patience, \"patience\", o.patience, patienceHelp)\n\tfs.IntVar(&o.patience, \"p\", o.patience, patienceHelp)\n\tfs.IntVar(&o.timeoutF, \"timeout\", o.timeoutF, timeoutHelp)\n\tfs.IntVar(&o.timeoutF, \"t\", o.timeoutF, timeoutHelp)\n\tfs.IntVar(&o.hardTimeout, \"hard-timeout\", o.hardTimeout, hardTimeoutHelp)\n\tfs.IntVar(&o.hardTimeout, \"h\", o.hardTimeout, hardTimeoutHelp)\n\n\tfs.Usage = usage\n\n\treturn &o\n}\n\nfunc setDefaultOptions(o *options) {\n\to.follow = false\n\to.infinite = false\n\to.patience = -1\n\to.timeoutF = 10\n\to.hardTimeout = 0\n}\n\nfunc resolveOptions(args []string) (*options, error) {\n\tfs := flag.NewFlagSet(os.Args[0], flag.ContinueOnError)\n\to := defineOptions(fs)\n\terr := fs.Parse(args)\n\tif err != nil {\n\t\treturn o, err\n\t}\n\n\treturn o, nil\n}\n\nfunc mustResolveOptions(args []string) *options {\n\to, err := resolveOptions(args)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn o\n}\n\nfunc resolveTimeouts(options *options) (timeout, timeout) {\n\tvar stdinTimeout, cmdTimeout timeout\n\n\tif options.follow {\n\t\tstdinTimeout.infinite = true\n\t}\n\n\tif options.infinite {\n\t\tcmdTimeout.infinite = true\n\t}\n\n\tif options.patience == 0 {\n\t\tstdinTimeout.firstTimeInfinite = true\n\t\tcmdTimeout.firstTimeInfinite = true\n\t} else if options.patience == -1 {\n\t\tstdinTimeout.firstTime = time.Duration(options.timeoutF) * time.Second\n\t\tcmdTimeout.firstTime = time.Duration(options.timeoutF) * time.Second\n\t} else {\n\t\tstdinTimeout.firstTime = time.Duration(options.patience) * time.Second\n\t\tcmdTimeout.firstTime = time.Duration(options.patience) * time.Second\n\t}\n\n\tstdinTimeout.time = time.Duration(options.timeoutF) * time.Second\n\tcmdTimeout.time = time.Duration(options.timeoutF) * time.Second\n\n\tif options.hardTimeout > 0 {\n\t\tstdinTimeout.hard = true\n\t\tcmdTimeout.hard = true\n\t\tstdinTimeout.firstTime = time.Duration(options.hardTimeout) * time.Second\n\t\tcmdTimeout.firstTime = time.Duration(options.hardTimeout) * time.Second\n\t}\n\n\treturn stdinTimeout, cmdTimeout\n}\n<commit_msg>Uses fatal rather than panic.<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n)\n\nfunc usage() {\n\tfmt.Fprintln(os.Stderr, `Usage:\n\nsd [options] 'command'\n\nExamples\n\n echo -e \"1\\n2\\n3\\n4\\n5\" | sd 'echo -e \"2\\n4\"'\n\n while [ 0 ]; do echo $RANDOM; sleep .1; done | sd -h 1 'seq 500'\n\n mysql schema_1 -Nsr -e \"SELECT city FROM users\" | sd -h 120 mysql schema_2 -Nsr -e \"SELECT city FROM excluded_cities\"\n\n mysql -Nsr -e \"SELECT city FROM users\" | sd -p 0 -t 10 kafka_consumer --topic excluded_cities > active_cities.txt \n\nOptions\n\n\t-f --follow: keeps reading from STDIN until SIGINT or its end.\n\t-i --infinite: keeps reading from COMMAND until it ends rather than timing it out. Note that if the stream doesn't end, sd just blocks forever and does nothing.\n\t-p --patience %seconds%: wait for the specified seconds for the first received line. Use 0 for waiting forever.\n\t-t --timeout %seconds%: exit(0) after specified seconds from last received line. STDIN and command have independent timeouts. When with -f, timeout only applies to the command (not to STDIN).\n\t-h --hard-timeout %seconds%: exit(0) after the specified seconds (or earlier). Overrides all other options.\n\n`)\n}\n\ntype options struct {\n\tfollow bool\n\tinfinite bool\n\tpatience int\n\ttimeoutF int\n\thardTimeout int\n}\n\nfunc defineOptions(fs *flag.FlagSet) *options {\n\tfollowHelp := \"keeps reading from STDIN until SIGINT or its end.\"\n\tinfiniteHelp := \"keeps reading from COMMAND until it ends rather than timing it out. Note that if the stream doesn't end, sd just blocks forever and does nothing.\"\n\tpatienceHelp := \"wait for the specified seconds for the first received line. Use 0 for waiting forever.\"\n\ttimeoutHelp := \"exit(0) after specified seconds from last received line. STDIN and command have independent timeouts. When with -f, timeout only applies to the command (not to STDIN).\"\n\thardTimeoutHelp := \"exit(0) after the specified seconds (or earlier). Overrides all other options.\"\n\n\tvar o options\n\tsetDefaultOptions(&o)\n\n\tfs.BoolVar(&o.follow, \"follow\", o.follow, followHelp)\n\tfs.BoolVar(&o.follow, \"f\", o.follow, followHelp)\n\tfs.BoolVar(&o.infinite, \"infinite\", o.infinite, infiniteHelp)\n\tfs.BoolVar(&o.infinite, \"i\", o.infinite, infiniteHelp)\n\tfs.IntVar(&o.patience, \"patience\", o.patience, patienceHelp)\n\tfs.IntVar(&o.patience, \"p\", o.patience, patienceHelp)\n\tfs.IntVar(&o.timeoutF, \"timeout\", o.timeoutF, timeoutHelp)\n\tfs.IntVar(&o.timeoutF, \"t\", o.timeoutF, timeoutHelp)\n\tfs.IntVar(&o.hardTimeout, \"hard-timeout\", o.hardTimeout, hardTimeoutHelp)\n\tfs.IntVar(&o.hardTimeout, \"h\", o.hardTimeout, hardTimeoutHelp)\n\n\tfs.Usage = usage\n\n\treturn &o\n}\n\nfunc setDefaultOptions(o *options) {\n\to.follow = false\n\to.infinite = false\n\to.patience = -1\n\to.timeoutF = 10\n\to.hardTimeout = 0\n}\n\nfunc resolveOptions(args []string) (*options, error) {\n\tfs := flag.NewFlagSet(os.Args[0], flag.ContinueOnError)\n\to := defineOptions(fs)\n\terr := fs.Parse(args)\n\tif err != nil {\n\t\treturn o, err\n\t}\n\n\treturn o, nil\n}\n\nfunc mustResolveOptions(args []string) *options {\n\to, err := resolveOptions(args)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn o\n}\n\nfunc resolveTimeouts(options *options) (timeout, timeout) {\n\tvar stdinTimeout, cmdTimeout timeout\n\n\tif options.follow {\n\t\tstdinTimeout.infinite = true\n\t}\n\n\tif options.infinite {\n\t\tcmdTimeout.infinite = true\n\t}\n\n\tif options.patience == 0 {\n\t\tstdinTimeout.firstTimeInfinite = true\n\t\tcmdTimeout.firstTimeInfinite = true\n\t} else if options.patience == -1 {\n\t\tstdinTimeout.firstTime = time.Duration(options.timeoutF) * time.Second\n\t\tcmdTimeout.firstTime = time.Duration(options.timeoutF) * time.Second\n\t} else {\n\t\tstdinTimeout.firstTime = time.Duration(options.patience) * time.Second\n\t\tcmdTimeout.firstTime = time.Duration(options.patience) * time.Second\n\t}\n\n\tstdinTimeout.time = time.Duration(options.timeoutF) * time.Second\n\tcmdTimeout.time = time.Duration(options.timeoutF) * time.Second\n\n\tif options.hardTimeout > 0 {\n\t\tstdinTimeout.hard = true\n\t\tcmdTimeout.hard = true\n\t\tstdinTimeout.firstTime = time.Duration(options.hardTimeout) * time.Second\n\t\tcmdTimeout.firstTime = time.Duration(options.hardTimeout) * time.Second\n\t}\n\n\treturn stdinTimeout, cmdTimeout\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/howeyc\/fsnotify\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n)\n\nfunc main() {\n\tif len(os.Args) < 3 {\n\t\tfmt.Fprintln(os.Stderr, \"Usage: aroe DIRECTORY COMMAND [ARGS…]\")\n\t\tos.Exit(1)\n\t}\n\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer watcher.Close()\n\n\terr = watcher.Watch(os.Args[1])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar cmd *exec.Cmd\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase _ = <-watcher.Event:\n\t\t\t\tlog.Println(\"Changes in directory, restarting\")\n\t\t\t\tcmd.Process.Signal(os.Interrupt)\n\t\t\tcase err := <-watcher.Error:\n\t\t\t\tlog.Fatal(\"error:\", err)\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor {\n\t\tcmd = exec.Command(os.Args[2])\n\t\tcmd.Args = os.Args[2:]\n\t\tcmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr\n\t\terr := cmd.Run()\n\t\tif err != nil {\n\t\t\tif err, ok := err.(*exec.ExitError); !ok {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>aroe -> aroc in usage statement<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/howeyc\/fsnotify\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n)\n\nfunc main() {\n\tif len(os.Args) < 3 {\n\t\tfmt.Fprintln(os.Stderr, \"Usage: aroc DIRECTORY COMMAND [ARGS…]\")\n\t\tos.Exit(1)\n\t}\n\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer watcher.Close()\n\n\terr = watcher.Watch(os.Args[1])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar cmd *exec.Cmd\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase _ = <-watcher.Event:\n\t\t\t\tlog.Println(\"Changes in directory, restarting\")\n\t\t\t\tcmd.Process.Signal(os.Interrupt)\n\t\t\tcase err := <-watcher.Error:\n\t\t\t\tlog.Fatal(\"error:\", err)\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor {\n\t\tcmd = exec.Command(os.Args[2])\n\t\tcmd.Args = os.Args[2:]\n\t\tcmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr\n\t\terr := cmd.Run()\n\t\tif err != nil {\n\t\t\tif err, ok := err.(*exec.ExitError); !ok {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package admin\n\nimport (\n\t\"context\"\n\t\"crypto\/rsa\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\tjwt \"github.com\/dgrijalva\/jwt-go\"\n\t\"golang.org\/x\/oauth2\"\n\t\"google.golang.org\/api\/identitytoolkit\/v3\"\n\t\"google.golang.org\/api\/iterator\"\n)\n\n\/\/ Auth type\ntype Auth struct {\n\tapp *App\n\tclient *identitytoolkit.Service\n\tkeysMutex *sync.RWMutex\n\tkeys map[string]*rsa.PublicKey\n\tkeysExp time.Time\n}\n\nconst (\n\tkeysEndpoint = \"https:\/\/www.googleapis.com\/robot\/v1\/metadata\/x509\/securetoken@system.gserviceaccount.com\"\n\tcustomTokenAudience = \"https:\/\/identitytoolkit.googleapis.com\/google.identity.identitytoolkit.v1.IdentityToolkit\"\n)\n\nfunc newAuth(app *App) (*Auth, error) {\n\tclient, err := identitytoolkit.New(oauth2.NewClient(context.Background(), app.tokenSource))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Auth{\n\t\tapp: app,\n\t\tclient: client,\n\t\tkeysMutex: &sync.RWMutex{},\n\t}, nil\n}\n\n\/\/ CreateCustomToken creates a custom token used for client to authenticate\n\/\/ with firebase server using signInWithCustomToken\n\/\/ https:\/\/firebase.google.com\/docs\/auth\/admin\/create-custom-tokens\nfunc (auth *Auth) CreateCustomToken(userID string, claims interface{}) (string, error) {\n\tif auth.app.jwtConfig == nil || auth.app.privateKey == nil {\n\t\treturn \"\", ErrRequireServiceAccount\n\t}\n\tnow := time.Now()\n\tpayload := &Claims{\n\t\tIssuer: auth.app.jwtConfig.Email,\n\t\tSubject: auth.app.jwtConfig.Email,\n\t\tAudience: customTokenAudience,\n\t\tIssuedAt: now.Unix(),\n\t\tExpiresAt: now.Add(time.Hour).Unix(),\n\t\tUserID: userID,\n\t\tClaims: claims,\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, payload)\n\treturn token.SignedString(auth.app.privateKey)\n}\n\n\/\/ VerifyIDToken validates given idToken\n\/\/ return Claims for that token only valid token\nfunc (auth *Auth) VerifyIDToken(idToken string) (*Claims, error) {\n\ttoken, err := jwt.ParseWithClaims(idToken, &Claims{}, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {\n\t\t\treturn nil, &ErrTokenInvalid{fmt.Sprintf(\"firebaseauth: Firebase ID token has incorrect algorithm. Expected \\\"RSA\\\" but got \\\"%#v\\\"\", token.Header[\"alg\"])}\n\t\t}\n\t\tkid := token.Header[\"kid\"].(string)\n\t\tif kid == \"\" {\n\t\t\treturn nil, &ErrTokenInvalid{\"firebaseauth: Firebase ID token has no \\\"kid\\\" claim\"}\n\t\t}\n\t\tkey := auth.selectKey(kid)\n\t\tif key == nil {\n\t\t\treturn nil, &ErrTokenInvalid{\"firebaseauth: Firebase ID token has \\\"kid\\\" claim which does not correspond to a known public key. Most likely the ID token is expired, so get a fresh token from your client app and try again\"}\n\t\t}\n\t\treturn key, nil\n\t})\n\tif err != nil {\n\t\treturn nil, &ErrTokenInvalid{err.Error()}\n\t}\n\n\tclaims, ok := token.Claims.(*Claims)\n\tif !ok || !token.Valid {\n\t\treturn nil, &ErrTokenInvalid{\"firebaseauth: invalid token\"}\n\t}\n\tif !claims.verifyAudience(auth.app.projectID) {\n\t\treturn nil, &ErrTokenInvalid{fmt.Sprintf(\"firebaseauth: Firebase ID token has incorrect \\\"aud\\\" (audience) claim. Expected \\\"%s\\\" but got \\\"%s\\\"\", auth.app.projectID, claims.Audience)}\n\t}\n\tif !claims.verifyIssuer(\"https:\/\/securetoken.google.com\/\" + auth.app.projectID) {\n\t\treturn nil, &ErrTokenInvalid{fmt.Sprintf(\"firebaseauth: Firebase ID token has incorrect \\\"iss\\\" (issuer) claim. Expected \\\"https:\/\/securetoken.google.com\/%s\\\" but got \\\"%s\\\"\", auth.app.projectID, claims.Issuer)}\n\t}\n\tif claims.Subject == \"\" {\n\t\treturn nil, &ErrTokenInvalid{\"firebaseauth: Firebase ID token has an empty string \\\"sub\\\" (subject) claim\"}\n\t}\n\tif len(claims.Subject) > 128 {\n\t\treturn nil, &ErrTokenInvalid{\"firebaseauth: Firebase ID token has \\\"sub\\\" (subject) claim longer than 128 characters\"}\n\t}\n\n\tclaims.UserID = claims.Subject\n\treturn claims, nil\n}\n\nfunc (auth *Auth) fetchKeys() error {\n\tauth.keysMutex.Lock()\n\tdefer auth.keysMutex.Unlock()\n\tresp, err := http.Get(keysEndpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tauth.keysExp, _ = time.Parse(time.RFC1123, resp.Header.Get(\"Expires\"))\n\n\tm := map[string]string{}\n\tif err = json.NewDecoder(resp.Body).Decode(&m); err != nil {\n\t\treturn err\n\t}\n\tks := map[string]*rsa.PublicKey{}\n\tfor k, v := range m {\n\t\tp, _ := jwt.ParseRSAPublicKeyFromPEM([]byte(v))\n\t\tif p != nil {\n\t\t\tks[k] = p\n\t\t}\n\t}\n\tauth.keys = ks\n\treturn nil\n}\n\nfunc (auth *Auth) selectKey(kid string) *rsa.PublicKey {\n\tauth.keysMutex.RLock()\n\tif auth.keysExp.IsZero() || auth.keysExp.Before(time.Now()) || len(auth.keys) == 0 {\n\t\tauth.keysMutex.RUnlock()\n\t\tif err := auth.fetchKeys(); err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tauth.keysMutex.RLock()\n\t}\n\tdefer auth.keysMutex.RUnlock()\n\treturn auth.keys[kid]\n}\n\n\/\/ GetUser retrieves an user by user id\nfunc (auth *Auth) GetUser(uid string) (*UserRecord, error) {\n\tusers, err := auth.GetUsers([]string{uid})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(users) == 0 {\n\t\treturn nil, nil\n\t}\n\treturn users[0], nil\n}\n\n\/\/ GetUsers retrieves users by user ids\nfunc (auth *Auth) GetUsers(userIDs []string) ([]*UserRecord, error) {\n\tr := auth.client.Relyingparty.GetAccountInfo(&identitytoolkit.IdentitytoolkitRelyingpartyGetAccountInfoRequest{\n\t\tLocalId: userIDs,\n\t})\n\tresp, err := r.Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(resp.Users) == 0 {\n\t\treturn nil, nil\n\t}\n\treturn toUserRecords(resp.Users), nil\n}\n\n\/\/ GetUserByEmail retrieves user by email\nfunc (auth *Auth) GetUserByEmail(email string) (*UserRecord, error) {\n\tusers, err := auth.GetUsersByEmail([]string{email})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(users) == 0 {\n\t\treturn nil, nil\n\t}\n\treturn users[0], nil\n}\n\n\/\/ GetUsersByEmail retrieves users by emails\nfunc (auth *Auth) GetUsersByEmail(emails []string) ([]*UserRecord, error) {\n\tr := auth.client.Relyingparty.GetAccountInfo(&identitytoolkit.IdentitytoolkitRelyingpartyGetAccountInfoRequest{\n\t\tEmail: emails,\n\t})\n\tresp, err := r.Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(resp.Users) == 0 {\n\t\treturn nil, nil\n\t}\n\treturn toUserRecords(resp.Users), nil\n}\n\n\/\/ DeleteUser deletes an user by user id\nfunc (auth *Auth) DeleteUser(userID string) error {\n\tif len(userID) == 0 {\n\t\treturn ErrRequireUID\n\t}\n\n\tr := auth.client.Relyingparty.DeleteAccount(&identitytoolkit.IdentitytoolkitRelyingpartyDeleteAccountRequest{\n\t\tLocalId: userID,\n\t})\n\t_, err := r.Do()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (auth *Auth) createUserAutoID(user *User) (string, error) {\n\tr := auth.client.Relyingparty.SignupNewUser(&identitytoolkit.IdentitytoolkitRelyingpartySignupNewUserRequest{\n\t\tDisabled: user.Disabled,\n\t\tDisplayName: user.DisplayName,\n\t\tEmail: user.Email,\n\t\tEmailVerified: user.EmailVerified,\n\t\tPassword: user.Password,\n\t\tPhotoUrl: user.PhotoURL,\n\t})\n\tresp, err := r.Do()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(resp.LocalId) == 0 {\n\t\treturn \"\", errors.New(\"firebaseauth: create account error\")\n\t}\n\treturn resp.LocalId, nil\n}\n\nfunc (auth *Auth) createUserCustomID(user *User) error {\n\tr := auth.client.Relyingparty.UploadAccount(&identitytoolkit.IdentitytoolkitRelyingpartyUploadAccountRequest{\n\t\tAllowOverwrite: false,\n\t\tSanityCheck: true,\n\t\tUsers: []*identitytoolkit.UserInfo{\n\t\t\t&identitytoolkit.UserInfo{\n\t\t\t\tLocalId: user.UserID,\n\t\t\t\tEmail: user.Email,\n\t\t\t\tEmailVerified: user.EmailVerified,\n\t\t\t\tRawPassword: user.Password,\n\t\t\t\tDisplayName: user.DisplayName,\n\t\t\t\tDisabled: user.Disabled,\n\t\t\t\tPhotoUrl: user.PhotoURL,\n\t\t\t},\n\t\t},\n\t})\n\tresp, err := r.Do()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resp.Error) > 0 {\n\t\treturn errors.New(\"firebaseauth: create user error\")\n\t}\n\treturn nil\n}\n\n\/\/ CreateUser creates an user\n\/\/ if not provides UserID, firebase server will auto generate\nfunc (auth *Auth) CreateUser(user *User) (*UserRecord, error) {\n\tvar err error\n\tvar userID string\n\n\tif len(user.UserID) == 0 {\n\t\tuserID, err = auth.createUserAutoID(user)\n\t} else {\n\t\tuserID = user.UserID\n\t\terr = auth.createUserCustomID(user)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := auth.GetUser(userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}\n\n\/\/ ListAccountCursor type\ntype ListAccountCursor struct {\n\tnextPageToken string\n\tauth *Auth\n\tMaxResults int64\n}\n\n\/\/ ListUsers creates list account cursor for retrieves accounts\n\/\/ MaxResults can change later after create cursor\nfunc (auth *Auth) ListUsers(maxResults int64) *ListAccountCursor {\n\treturn &ListAccountCursor{MaxResults: maxResults, auth: auth}\n}\n\n\/\/ Next retrieves next users from cursor which limit to MaxResults\n\/\/ then move cursor to the next users\nfunc (cursor *ListAccountCursor) Next() ([]*UserRecord, error) {\n\tr := cursor.auth.client.Relyingparty.DownloadAccount(&identitytoolkit.IdentitytoolkitRelyingpartyDownloadAccountRequest{\n\t\tMaxResults: cursor.MaxResults,\n\t\tNextPageToken: cursor.nextPageToken,\n\t})\n\tresp, err := r.Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(resp.Users) == 0 {\n\t\treturn nil, iterator.Done\n\t}\n\tcursor.nextPageToken = resp.NextPageToken\n\treturn toUserRecords(resp.Users), nil\n}\n\n\/\/ UpdateUser updates an existing user\nfunc (auth *Auth) UpdateUser(user *User) (*UserRecord, error) {\n\tr := auth.client.Relyingparty.SetAccountInfo(&identitytoolkit.IdentitytoolkitRelyingpartySetAccountInfoRequest{\n\t\tLocalId: user.UserID,\n\t\tEmail: user.Email,\n\t\tEmailVerified: user.EmailVerified,\n\t\tPassword: user.Password,\n\t\tDisplayName: user.DisplayName,\n\t\tDisableUser: user.Disabled,\n\t\tPhotoUrl: user.PhotoURL,\n\t})\n\tresp, err := r.Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := auth.GetUser(resp.LocalId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}\n<commit_msg>auth: fix panic when token don't have kid header<commit_after>package admin\n\nimport (\n\t\"context\"\n\t\"crypto\/rsa\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\tjwt \"github.com\/dgrijalva\/jwt-go\"\n\t\"golang.org\/x\/oauth2\"\n\t\"google.golang.org\/api\/identitytoolkit\/v3\"\n\t\"google.golang.org\/api\/iterator\"\n)\n\n\/\/ Auth type\ntype Auth struct {\n\tapp *App\n\tclient *identitytoolkit.Service\n\tkeysMutex *sync.RWMutex\n\tkeys map[string]*rsa.PublicKey\n\tkeysExp time.Time\n}\n\nconst (\n\tkeysEndpoint = \"https:\/\/www.googleapis.com\/robot\/v1\/metadata\/x509\/securetoken@system.gserviceaccount.com\"\n\tcustomTokenAudience = \"https:\/\/identitytoolkit.googleapis.com\/google.identity.identitytoolkit.v1.IdentityToolkit\"\n)\n\nfunc newAuth(app *App) (*Auth, error) {\n\tclient, err := identitytoolkit.New(oauth2.NewClient(context.Background(), app.tokenSource))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Auth{\n\t\tapp: app,\n\t\tclient: client,\n\t\tkeysMutex: &sync.RWMutex{},\n\t}, nil\n}\n\n\/\/ CreateCustomToken creates a custom token used for client to authenticate\n\/\/ with firebase server using signInWithCustomToken\n\/\/ https:\/\/firebase.google.com\/docs\/auth\/admin\/create-custom-tokens\nfunc (auth *Auth) CreateCustomToken(userID string, claims interface{}) (string, error) {\n\tif auth.app.jwtConfig == nil || auth.app.privateKey == nil {\n\t\treturn \"\", ErrRequireServiceAccount\n\t}\n\tnow := time.Now()\n\tpayload := &Claims{\n\t\tIssuer: auth.app.jwtConfig.Email,\n\t\tSubject: auth.app.jwtConfig.Email,\n\t\tAudience: customTokenAudience,\n\t\tIssuedAt: now.Unix(),\n\t\tExpiresAt: now.Add(time.Hour).Unix(),\n\t\tUserID: userID,\n\t\tClaims: claims,\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, payload)\n\treturn token.SignedString(auth.app.privateKey)\n}\n\n\/\/ VerifyIDToken validates given idToken\n\/\/ return Claims for that token only valid token\nfunc (auth *Auth) VerifyIDToken(idToken string) (*Claims, error) {\n\ttoken, err := jwt.ParseWithClaims(idToken, &Claims{}, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {\n\t\t\treturn nil, &ErrTokenInvalid{fmt.Sprintf(\"firebaseauth: Firebase ID token has incorrect algorithm. Expected \\\"RSA\\\" but got \\\"%#v\\\"\", token.Header[\"alg\"])}\n\t\t}\n\t\tkid, _ := token.Header[\"kid\"].(string)\n\t\tif kid == \"\" {\n\t\t\treturn nil, &ErrTokenInvalid{\"firebaseauth: Firebase ID token has no \\\"kid\\\" claim\"}\n\t\t}\n\t\tkey := auth.selectKey(kid)\n\t\tif key == nil {\n\t\t\treturn nil, &ErrTokenInvalid{\"firebaseauth: Firebase ID token has \\\"kid\\\" claim which does not correspond to a known public key. Most likely the ID token is expired, so get a fresh token from your client app and try again\"}\n\t\t}\n\t\treturn key, nil\n\t})\n\tif err != nil {\n\t\treturn nil, &ErrTokenInvalid{err.Error()}\n\t}\n\n\tclaims, ok := token.Claims.(*Claims)\n\tif !ok || !token.Valid {\n\t\treturn nil, &ErrTokenInvalid{\"firebaseauth: invalid token\"}\n\t}\n\tif !claims.verifyAudience(auth.app.projectID) {\n\t\treturn nil, &ErrTokenInvalid{fmt.Sprintf(\"firebaseauth: Firebase ID token has incorrect \\\"aud\\\" (audience) claim. Expected \\\"%s\\\" but got \\\"%s\\\"\", auth.app.projectID, claims.Audience)}\n\t}\n\tif !claims.verifyIssuer(\"https:\/\/securetoken.google.com\/\" + auth.app.projectID) {\n\t\treturn nil, &ErrTokenInvalid{fmt.Sprintf(\"firebaseauth: Firebase ID token has incorrect \\\"iss\\\" (issuer) claim. Expected \\\"https:\/\/securetoken.google.com\/%s\\\" but got \\\"%s\\\"\", auth.app.projectID, claims.Issuer)}\n\t}\n\tif claims.Subject == \"\" {\n\t\treturn nil, &ErrTokenInvalid{\"firebaseauth: Firebase ID token has an empty string \\\"sub\\\" (subject) claim\"}\n\t}\n\tif len(claims.Subject) > 128 {\n\t\treturn nil, &ErrTokenInvalid{\"firebaseauth: Firebase ID token has \\\"sub\\\" (subject) claim longer than 128 characters\"}\n\t}\n\n\tclaims.UserID = claims.Subject\n\treturn claims, nil\n}\n\nfunc (auth *Auth) fetchKeys() error {\n\tauth.keysMutex.Lock()\n\tdefer auth.keysMutex.Unlock()\n\tresp, err := http.Get(keysEndpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tauth.keysExp, _ = time.Parse(time.RFC1123, resp.Header.Get(\"Expires\"))\n\n\tm := map[string]string{}\n\tif err = json.NewDecoder(resp.Body).Decode(&m); err != nil {\n\t\treturn err\n\t}\n\tks := map[string]*rsa.PublicKey{}\n\tfor k, v := range m {\n\t\tp, _ := jwt.ParseRSAPublicKeyFromPEM([]byte(v))\n\t\tif p != nil {\n\t\t\tks[k] = p\n\t\t}\n\t}\n\tauth.keys = ks\n\treturn nil\n}\n\nfunc (auth *Auth) selectKey(kid string) *rsa.PublicKey {\n\tauth.keysMutex.RLock()\n\tif auth.keysExp.IsZero() || auth.keysExp.Before(time.Now()) || len(auth.keys) == 0 {\n\t\tauth.keysMutex.RUnlock()\n\t\tif err := auth.fetchKeys(); err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tauth.keysMutex.RLock()\n\t}\n\tdefer auth.keysMutex.RUnlock()\n\treturn auth.keys[kid]\n}\n\n\/\/ GetUser retrieves an user by user id\nfunc (auth *Auth) GetUser(uid string) (*UserRecord, error) {\n\tusers, err := auth.GetUsers([]string{uid})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(users) == 0 {\n\t\treturn nil, nil\n\t}\n\treturn users[0], nil\n}\n\n\/\/ GetUsers retrieves users by user ids\nfunc (auth *Auth) GetUsers(userIDs []string) ([]*UserRecord, error) {\n\tr := auth.client.Relyingparty.GetAccountInfo(&identitytoolkit.IdentitytoolkitRelyingpartyGetAccountInfoRequest{\n\t\tLocalId: userIDs,\n\t})\n\tresp, err := r.Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(resp.Users) == 0 {\n\t\treturn nil, nil\n\t}\n\treturn toUserRecords(resp.Users), nil\n}\n\n\/\/ GetUserByEmail retrieves user by email\nfunc (auth *Auth) GetUserByEmail(email string) (*UserRecord, error) {\n\tusers, err := auth.GetUsersByEmail([]string{email})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(users) == 0 {\n\t\treturn nil, nil\n\t}\n\treturn users[0], nil\n}\n\n\/\/ GetUsersByEmail retrieves users by emails\nfunc (auth *Auth) GetUsersByEmail(emails []string) ([]*UserRecord, error) {\n\tr := auth.client.Relyingparty.GetAccountInfo(&identitytoolkit.IdentitytoolkitRelyingpartyGetAccountInfoRequest{\n\t\tEmail: emails,\n\t})\n\tresp, err := r.Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(resp.Users) == 0 {\n\t\treturn nil, nil\n\t}\n\treturn toUserRecords(resp.Users), nil\n}\n\n\/\/ DeleteUser deletes an user by user id\nfunc (auth *Auth) DeleteUser(userID string) error {\n\tif len(userID) == 0 {\n\t\treturn ErrRequireUID\n\t}\n\n\tr := auth.client.Relyingparty.DeleteAccount(&identitytoolkit.IdentitytoolkitRelyingpartyDeleteAccountRequest{\n\t\tLocalId: userID,\n\t})\n\t_, err := r.Do()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (auth *Auth) createUserAutoID(user *User) (string, error) {\n\tr := auth.client.Relyingparty.SignupNewUser(&identitytoolkit.IdentitytoolkitRelyingpartySignupNewUserRequest{\n\t\tDisabled: user.Disabled,\n\t\tDisplayName: user.DisplayName,\n\t\tEmail: user.Email,\n\t\tEmailVerified: user.EmailVerified,\n\t\tPassword: user.Password,\n\t\tPhotoUrl: user.PhotoURL,\n\t})\n\tresp, err := r.Do()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(resp.LocalId) == 0 {\n\t\treturn \"\", errors.New(\"firebaseauth: create account error\")\n\t}\n\treturn resp.LocalId, nil\n}\n\nfunc (auth *Auth) createUserCustomID(user *User) error {\n\tr := auth.client.Relyingparty.UploadAccount(&identitytoolkit.IdentitytoolkitRelyingpartyUploadAccountRequest{\n\t\tAllowOverwrite: false,\n\t\tSanityCheck: true,\n\t\tUsers: []*identitytoolkit.UserInfo{\n\t\t\t&identitytoolkit.UserInfo{\n\t\t\t\tLocalId: user.UserID,\n\t\t\t\tEmail: user.Email,\n\t\t\t\tEmailVerified: user.EmailVerified,\n\t\t\t\tRawPassword: user.Password,\n\t\t\t\tDisplayName: user.DisplayName,\n\t\t\t\tDisabled: user.Disabled,\n\t\t\t\tPhotoUrl: user.PhotoURL,\n\t\t\t},\n\t\t},\n\t})\n\tresp, err := r.Do()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resp.Error) > 0 {\n\t\treturn errors.New(\"firebaseauth: create user error\")\n\t}\n\treturn nil\n}\n\n\/\/ CreateUser creates an user\n\/\/ if not provides UserID, firebase server will auto generate\nfunc (auth *Auth) CreateUser(user *User) (*UserRecord, error) {\n\tvar err error\n\tvar userID string\n\n\tif len(user.UserID) == 0 {\n\t\tuserID, err = auth.createUserAutoID(user)\n\t} else {\n\t\tuserID = user.UserID\n\t\terr = auth.createUserCustomID(user)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := auth.GetUser(userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}\n\n\/\/ ListAccountCursor type\ntype ListAccountCursor struct {\n\tnextPageToken string\n\tauth *Auth\n\tMaxResults int64\n}\n\n\/\/ ListUsers creates list account cursor for retrieves accounts\n\/\/ MaxResults can change later after create cursor\nfunc (auth *Auth) ListUsers(maxResults int64) *ListAccountCursor {\n\treturn &ListAccountCursor{MaxResults: maxResults, auth: auth}\n}\n\n\/\/ Next retrieves next users from cursor which limit to MaxResults\n\/\/ then move cursor to the next users\nfunc (cursor *ListAccountCursor) Next() ([]*UserRecord, error) {\n\tr := cursor.auth.client.Relyingparty.DownloadAccount(&identitytoolkit.IdentitytoolkitRelyingpartyDownloadAccountRequest{\n\t\tMaxResults: cursor.MaxResults,\n\t\tNextPageToken: cursor.nextPageToken,\n\t})\n\tresp, err := r.Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(resp.Users) == 0 {\n\t\treturn nil, iterator.Done\n\t}\n\tcursor.nextPageToken = resp.NextPageToken\n\treturn toUserRecords(resp.Users), nil\n}\n\n\/\/ UpdateUser updates an existing user\nfunc (auth *Auth) UpdateUser(user *User) (*UserRecord, error) {\n\tr := auth.client.Relyingparty.SetAccountInfo(&identitytoolkit.IdentitytoolkitRelyingpartySetAccountInfoRequest{\n\t\tLocalId: user.UserID,\n\t\tEmail: user.Email,\n\t\tEmailVerified: user.EmailVerified,\n\t\tPassword: user.Password,\n\t\tDisplayName: user.DisplayName,\n\t\tDisableUser: user.Disabled,\n\t\tPhotoUrl: user.PhotoURL,\n\t})\n\tresp, err := r.Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := auth.GetUser(resp.LocalId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package action\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/fsnotify\/fsnotify\"\n\t\"github.com\/miclle\/lisa\/msg\"\n)\n\n\/\/ Watcher func\nfunc Watcher(name, command string) {\n\tif command == \"\" {\n\t\tmsg.Info(\"lisa watching the path %s\", name)\n\t} else {\n\t\tmsg.Info(\"lisa watching the path %s then execute command: %s\", name, command)\n\t}\n\n\tif watcher, err := NewRecursiveWatcher(name, command); err != nil {\n\t\tmsg.Err(err.Error())\n\t} else {\n\t\tdefer watcher.Close()\n\t\tdone := make(chan bool)\n\t\twatcher.Run()\n\t\t<-done\n\t}\n}\n\n\/\/ RecursiveWatcher struct\ntype RecursiveWatcher struct {\n\t*fsnotify.Watcher\n\tCommand string\n}\n\n\/\/ NewRecursiveWatcher return a recursive watcher\nfunc NewRecursiveWatcher(name, command string) (*RecursiveWatcher, error) {\n\tfolders := []string{}\n\n\tif fi, err := os.Stat(name); err != nil {\n\t\tmsg.Err(\"error: %s\", err.Error())\n\t} else if fi.IsDir() {\n\t\tfolders = Subfolders(name)\n\t} else {\n\t\tfolders = append(folders, name)\n\t}\n\n\tif len(folders) == 0 {\n\t\treturn nil, errors.New(\"No folders or file to watch.\")\n\t}\n\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trw := &RecursiveWatcher{\n\t\tWatcher: watcher,\n\t\tCommand: command,\n\t}\n\n\tfor _, folder := range folders {\n\t\trw.AddFolder(folder)\n\t}\n\treturn rw, nil\n}\n\n\/\/ AddFolder add folder to recursive watcher\nfunc (watcher *RecursiveWatcher) AddFolder(folder string) {\n\terr := watcher.Add(folder)\n\tif err != nil {\n\t\tmsg.Err(\"Error watching: %s, %s\", folder, err.Error())\n\t} else {\n\t\tmsg.Info(\"Lisa watching: %s\", folder)\n\t}\n}\n\n\/\/ execCommand execute the command\nfunc (watcher *RecursiveWatcher) execCommand() {\n\tif watcher.Command == \"\" {\n\t\treturn\n\t}\n\n\tcmd := exec.Command(watcher.Command)\n\n\tmsg.Info(strings.Join(cmd.Args, \" \"))\n\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tmsg.Err(err.Error())\n\t}\n\tmsg.Info(string(out))\n\n\tif cmd.ProcessState != nil && cmd.ProcessState.Success() {\n\t\tmsg.Info(\"execute the command `%s` was PASS\", watcher.Command)\n\t} else {\n\t\tmsg.Info(\"execute the command `%s` was FAIL\", watcher.Command)\n\t}\n\n\tif cmd.ProcessState != nil {\n\t\tmsg.Info(\"execute the command latency (%.2f seconds)\\n\", cmd.ProcessState.UserTime().Seconds())\n\t}\n}\n\n\/\/ Run execute the recursive watcher\nfunc (watcher *RecursiveWatcher) Run() {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-watcher.Events:\n\t\t\t\tif event.Op&fsnotify.Create == fsnotify.Create {\n\t\t\t\t\tif fi, err := os.Stat(event.Name); err != nil {\n\t\t\t\t\t\tmsg.Err(\"error: %s\", err.Error())\n\t\t\t\t\t} else if fi.IsDir() {\n\t\t\t\t\t\tmsg.Info(\"directory created: %s\", event.Name)\n\t\t\t\t\t\tif !ignoreFile(filepath.Base(event.Name)) {\n\t\t\t\t\t\t\twatcher.AddFolder(event.Name)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tmsg.Info(\"file created: %s\", event.Name)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif event.Op&fsnotify.Remove == fsnotify.Remove {\n\t\t\t\t\tmsg.Info(\"file remove: %s\", event.Name)\n\t\t\t\t}\n\n\t\t\t\tif event.Op&fsnotify.Write == fsnotify.Write {\n\t\t\t\t\tmsg.Info(\"file modified: %s\", event.Name)\n\t\t\t\t}\n\n\t\t\t\tif event.Op&fsnotify.Rename == fsnotify.Rename {\n\t\t\t\t\tmsg.Info(\"file rename: %s\", event.Name)\n\t\t\t\t}\n\t\t\t\tif event.Op&fsnotify.Chmod == fsnotify.Chmod {\n\t\t\t\t\tmsg.Info(\"file chmod: %s\", event.Name)\n\t\t\t\t}\n\n\t\t\t\twatcher.execCommand()\n\n\t\t\tcase err := <-watcher.Errors:\n\t\t\t\tmsg.Err(\"error: %s\", err.Error())\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ Subfolders returns a slice of subfolders (recursive), including the folder provided.\nfunc Subfolders(path string) (paths []string) {\n\tfilepath.Walk(path, func(newPath string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\tmsg.Err(\"error: %s\", err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tif info.IsDir() {\n\t\t\tname := info.Name()\n\t\t\t\/\/ skip folders that begin with a dot\n\t\t\tif ignoreFile(name) && name != \".\" && name != \"..\" {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t\tpaths = append(paths, newPath)\n\t\t}\n\t\treturn nil\n\t})\n\treturn paths\n}\n\n\/\/ ignoreFile determines if a file should be ignored.\nfunc ignoreFile(name string) bool {\n\treturn strings.HasPrefix(name, \".\")\n}\n<commit_msg>Add file path walker to watcher<commit_after>package action\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/fsnotify\/fsnotify\"\n\t\"github.com\/miclle\/lisa\/msg\"\n)\n\n\/\/ Watcher func\nfunc Watcher(name, command string) {\n\tif command == \"\" {\n\t\tmsg.Info(\"lisa watching the path %s\", name)\n\t} else {\n\t\tmsg.Info(\"lisa watching the path %s then execute command: %s\", name, command)\n\t}\n\n\tif watcher, err := NewRecursiveWatcher(name, command); err != nil {\n\t\tmsg.Err(err.Error())\n\t} else {\n\t\tdefer watcher.Close()\n\t\tdone := make(chan bool)\n\t\twatcher.Run()\n\t\t<-done\n\t}\n}\n\n\/\/ RecursiveWatcher struct\ntype RecursiveWatcher struct {\n\t*fsnotify.Watcher\n\t*Walker\n\tCommand string\n}\n\n\/\/ NewRecursiveWatcher return a recursive watcher\nfunc NewRecursiveWatcher(name, command string) (*RecursiveWatcher, error) {\n\trw := &RecursiveWatcher{\n\t\tCommand: command,\n\t\tWalker: &Walker{\n\t\t\tIgnorePrefix: \".\",\n\t\t},\n\t}\n\n\tfolders := []string{}\n\n\tif fi, err := os.Stat(name); err != nil {\n\t\tmsg.Err(\"error: %s\", err.Error())\n\t} else if fi.IsDir() {\n\t\tfolders = rw.Subfolders(name)\n\t} else {\n\t\tfolders = append(folders, name)\n\t}\n\n\tif len(folders) == 0 {\n\t\treturn nil, errors.New(\"No folders or file to watch.\")\n\t}\n\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trw.Watcher = watcher\n\n\tfor _, folder := range folders {\n\t\trw.AddFolder(folder)\n\t}\n\treturn rw, nil\n}\n\n\/\/ AddFolder add folder to recursive watcher\nfunc (watcher *RecursiveWatcher) AddFolder(folder string) {\n\terr := watcher.Add(folder)\n\tif err != nil {\n\t\tmsg.Err(\"Error watching: %s, %s\", folder, err.Error())\n\t} else {\n\t\tmsg.Info(\"Lisa watching: %s\", folder)\n\t}\n}\n\n\/\/ execCommand execute the command\nfunc (watcher *RecursiveWatcher) execCommand() {\n\tif watcher.Command == \"\" {\n\t\treturn\n\t}\n\n\tcmd := exec.Command(watcher.Command)\n\n\tmsg.Info(strings.Join(cmd.Args, \" \"))\n\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tmsg.Err(err.Error())\n\t}\n\tmsg.Info(string(out))\n\n\tif cmd.ProcessState != nil && cmd.ProcessState.Success() {\n\t\tmsg.Info(\"execute the command `%s` was PASS\", watcher.Command)\n\t} else {\n\t\tmsg.Info(\"execute the command `%s` was FAIL\", watcher.Command)\n\t}\n\n\tif cmd.ProcessState != nil {\n\t\tmsg.Info(\"execute the command latency (%.2f seconds)\\n\", cmd.ProcessState.UserTime().Seconds())\n\t}\n}\n\n\/\/ Run execute the recursive watcher\nfunc (watcher *RecursiveWatcher) Run() {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-watcher.Events:\n\t\t\t\tif event.Op&fsnotify.Create == fsnotify.Create {\n\t\t\t\t\tif fi, err := os.Stat(event.Name); err != nil {\n\t\t\t\t\t\tmsg.Err(\"error: %s\", err.Error())\n\t\t\t\t\t} else if fi.IsDir() {\n\t\t\t\t\t\tmsg.Info(\"directory created: %s\", event.Name)\n\t\t\t\t\t\tif !watcher.IgnoreFile(filepath.Base(event.Name)) {\n\t\t\t\t\t\t\twatcher.AddFolder(event.Name)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tmsg.Info(\"file created: %s\", event.Name)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif event.Op&fsnotify.Remove == fsnotify.Remove {\n\t\t\t\t\tmsg.Info(\"file remove: %s\", event.Name)\n\t\t\t\t}\n\n\t\t\t\tif event.Op&fsnotify.Write == fsnotify.Write {\n\t\t\t\t\tmsg.Info(\"file modified: %s\", event.Name)\n\t\t\t\t}\n\n\t\t\t\tif event.Op&fsnotify.Rename == fsnotify.Rename {\n\t\t\t\t\tmsg.Info(\"file rename: %s\", event.Name)\n\t\t\t\t}\n\t\t\t\tif event.Op&fsnotify.Chmod == fsnotify.Chmod {\n\t\t\t\t\tmsg.Info(\"file chmod: %s\", event.Name)\n\t\t\t\t}\n\n\t\t\t\twatcher.execCommand()\n\n\t\t\tcase err := <-watcher.Errors:\n\t\t\t\tmsg.Err(\"error: %s\", err.Error())\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ Walker a file path walker\ntype Walker struct {\n\tIgnorePrefix string\n}\n\n\/\/ Subfolders returns a slice of subfolders (recursive), including the folder provided.\nfunc (walker *Walker) Subfolders(path string) (paths []string) {\n\tfilepath.Walk(path, func(newPath string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\tmsg.Err(\"error: %s\", err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tif info.IsDir() {\n\t\t\tname := info.Name()\n\t\t\t\/\/ skip folders that begin with a dot\n\t\t\tif walker.IgnoreFile(name) && name != \".\" && name != \"..\" {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t\tpaths = append(paths, newPath)\n\t\t}\n\t\treturn nil\n\t})\n\treturn paths\n}\n\n\/\/ IgnoreFile determines if a file should be ignored.\nfunc (walker *Walker) IgnoreFile(name string) bool {\n\treturn strings.HasPrefix(name, walker.IgnorePrefix)\n}\n<|endoftext|>"} {"text":"<commit_before>package columbia\n\nimport (\n\t\"database\/sql\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/MyHomeworkSpace\/api-server\/data\"\n\t\"github.com\/PuerkitoBio\/goquery\"\n)\n\nfunc (s *school) parseSSOLViewOptionsForm(doc *goquery.Document) (string, url.Values, error) {\n\tret := url.Values{}\n\n\tviewOptionsForm := doc.Find(\"#Content form:has(.FormGridViewOpt)\")\n\n\tfields := viewOptionsForm.Find(\"input[type=hidden], input[type=submit]\")\n\tfor i := 0; i < fields.Length(); i++ {\n\t\tfield := fields.Eq(i)\n\t\tfieldName := field.AttrOr(\"name\", \"\")\n\t\tfieldValue := field.AttrOr(\"value\", \"\")\n\n\t\tif fieldName != \"\" {\n\t\t\tret[fieldName] = []string{fieldValue}\n\t\t}\n\t}\n\n\tselections := viewOptionsForm.Find(\"select:has(option[selected])\")\n\tfor i := 0; i < selections.Length(); i++ {\n\t\tselection := selections.Eq(i)\n\t\tselectionName := selection.AttrOr(\"name\", \"\")\n\t\tselectionSelected := selection.Find(\"option[selected]\")\n\n\t\tif selectionName != \"\" {\n\t\t\tret[selectionName] = []string{selectionSelected.AttrOr(\"value\", \"\")}\n\t\t}\n\t}\n\n\treturn viewOptionsForm.AttrOr(\"action\", \"\"), ret, nil\n}\n\nfunc (s *school) parseSSOLSchedulePage(tx *sql.Tx, user *data.User, doc *goquery.Document) (string, string, error) {\n\tdataGrids := doc.Find(\".DataGrid\")\n\n\t\/\/ first data grid is user info (we just need the name)\n\t\/\/ second data grid is schedule by class\n\t\/\/ third data grid is schedule by day\n\n\t\/\/ get name from first data grid\n\tstudentName := dataGrids.Eq(0).Find(\".clsDataGridTitle td\").Text()\n\tstudentName = strings.TrimSpace(studentName)\n\tstudentName = strings.ReplaceAll(studentName, \" \", \" \")\n\n\t\/\/ get uni\n\tstudentUNI := \"\"\n\tstudentDataCells := dataGrids.Eq(0).Find(\".clsDataGridData td\")\n\tuniIsNext := false\n\tfor i := 0; i < studentDataCells.Length(); i++ {\n\t\tstudentDataCellText := strings.TrimSpace(studentDataCells.Eq(i).Text())\n\n\t\tif uniIsNext {\n\t\t\tstudentUNI = studentDataCellText\n\t\t\tbreak\n\t\t}\n\n\t\tif studentDataCellText == \"UNI:\" {\n\t\t\tuniIsNext = true\n\t\t}\n\t}\n\n\t\/\/ get class list from second data grid\n\tclassRows := dataGrids.Eq(1).Find(\"tr\")\n\n\tclassInsertStmt, err := tx.Prepare(\"INSERT INTO columbia_classes(department, number, section, name, instructorName, instructorEmail, userID) VALUES (?, ?, ?, ?, ?, ?, ?)\")\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tdefer classInsertStmt.Close()\n\n\tclassInfoMap := map[string][]string{}\n\n\t\/\/ first three rows and last rows are to be discarded\n\tfor i := 3; i < classRows.Length()-1; i++ {\n\t\tclassRowColumns := classRows.Eq(i).Find(\"td\")\n\n\t\t\/\/ class, grading, instructor, day, time\/location, start\/end\n\t\t\/\/ we just need class and instructor\n\n\t\tclassCell := classRowColumns.Eq(0)\n\n\t\t\/\/ the raw text tells us department and number\n\t\tclassCellText := strings.ReplaceAll(strings.TrimSpace(classCell.Text()), \"\\u00A0\", \" \")\n\t\tclassParts := strings.Split(classCellText, \" \")\n\t\tclassDepartment := classParts[0]\n\t\tclassNumber := classParts[1]\n\t\tclassSection := \"\" \/\/ TODO\n\n\t\t\/\/ the text in the font tag tells us the name\n\t\tclassName := strings.TrimSpace(classCell.Find(\"font\").Text())\n\n\t\tinstructorCell := classRowColumns.Eq(2)\n\n\t\t\/\/ the text in the a tag tells us the instructor email\n\t\tclassInstructorEmail := strings.TrimSpace(instructorCell.Find(\"a\").Text())\n\n\t\t\/\/ the raw text tells us instructor name\n\t\t\/\/ this is a terrible hack and i'm sorry\n\t\tclassInstructorName := strings.ReplaceAll(strings.TrimSpace(instructorCell.Text()), \"\\u00A0\", \" \")\n\t\tclassInstructorName = strings.ReplaceAll(classInstructorName, classInstructorEmail, \"\")\n\n\t\t\/\/ set a mapping so that the meeting code can insert this info directly into the row\n\t\tclassInfoMap[className] = []string{classDepartment, classNumber, classSection}\n\n\t\t\/\/ add to db\n\t\t_, err = classInsertStmt.Exec(\n\t\t\tclassDepartment,\n\t\t\tclassNumber,\n\t\t\tclassSection,\n\t\t\tclassName,\n\t\t\tclassInstructorName,\n\t\t\tclassInstructorEmail,\n\t\t\tuser.ID,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\n\t\/\/ get meeting times from third data grid\n\tscheduleRows := dataGrids.Eq(2).Find(\"tr\")\n\n\tmeetingInsertStmt, err := tx.Prepare(\"INSERT INTO columbia_meetings(department, number, section, name, building, room, dow, start, end, beginDate, endDate, userID) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\")\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tdefer meetingInsertStmt.Close()\n\n\t\/\/ first two rows are to be discarded\n\tcurrentDow := time.Sunday\n\tfor i := 2; i < scheduleRows.Length(); i++ {\n\t\t\/\/ each row has three possibilities:\n\t\t\/\/ 1. it's the first row of a day of the week -> 5 columns\n\t\t\/\/ 2. it's not the first row for that day of the week -> 4 columns\n\t\t\/\/ 3. it's a separator between days of the week -> 1 column\n\t\tscheduleRowColumns := scheduleRows.Eq(i).Find(\"td\")\n\t\tswitch scheduleRowColumns.Length() {\n\t\tcase 5:\n\t\t\t\/\/ first class of the day of week\n\t\t\tdowMap := map[string]time.Weekday{\n\t\t\t\t\"Sun\": time.Sunday,\n\t\t\t\t\"Mon\": time.Monday,\n\t\t\t\t\"Tue\": time.Tuesday,\n\t\t\t\t\"Wed\": time.Wednesday,\n\t\t\t\t\"Thr\": time.Thursday,\n\t\t\t\t\"Fri\": time.Friday,\n\t\t\t\t\"Sat\": time.Saturday,\n\t\t\t}\n\t\t\tdowText := strings.TrimSpace(scheduleRowColumns.Eq(0).Text())\n\t\t\tcurrentDow = dowMap[dowText]\n\n\t\t\tfallthrough\n\n\t\tcase 4:\n\t\t\t\/\/ class meeting time\n\t\t\tcolumnOffset := scheduleRowColumns.Length() - 4\n\n\t\t\ttimeText := strings.TrimSpace(scheduleRowColumns.Eq(columnOffset).Text())\n\t\t\tlocationText := strings.TrimSpace(scheduleRowColumns.Eq(columnOffset + 1).Text())\n\t\t\tclassNameText := strings.TrimSpace(scheduleRowColumns.Eq(columnOffset + 2).Text())\n\t\t\ttermText := strings.TrimSpace(scheduleRowColumns.Eq(columnOffset + 3).Text())\n\n\t\t\t\/\/ parse location into room and building\n\t\t\tlocationParts := strings.Split(locationText, \"\\u00A0\")\n\t\t\troomText := strings.TrimSpace(locationParts[0])\n\t\t\tbuildingText := \"\"\n\t\t\tif len(locationParts) > 1 {\n\t\t\t\tbuildingText = strings.TrimSpace(locationParts[1])\n\t\t\t}\n\n\t\t\t\/\/ parse start and end times\n\t\t\ttimeParts := strings.Split(timeText, \"-\")\n\t\t\tstartTime, err := time.Parse(\"3:04pm\", timeParts[0])\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", \"\", err\n\t\t\t}\n\t\t\tendTime, err := time.Parse(\"3:04pm\", timeParts[1])\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", \"\", err\n\t\t\t}\n\n\t\t\tstartTime = startTime.AddDate(1970, 0, 0)\n\t\t\tendTime = endTime.AddDate(1970, 0, 0)\n\n\t\t\t\/\/ parse begin and end dates\n\t\t\ttermParts := strings.Split(termText, \"-\")\n\t\t\tbeginDate, err := time.Parse(\"01\/02\/06\", termParts[0])\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", \"\", err\n\t\t\t}\n\t\t\tendDate, err := time.Parse(\"01\/02\/06\", termParts[1])\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", \"\", err\n\t\t\t}\n\n\t\t\t\/\/ get info from map\n\t\t\tclassInfo := classInfoMap[classNameText]\n\n\t\t\t\/\/ add to db\n\t\t\t_, err = meetingInsertStmt.Exec(\n\t\t\t\tclassInfo[0],\n\t\t\t\tclassInfo[1],\n\t\t\t\tclassInfo[2],\n\t\t\t\tclassNameText,\n\t\t\t\tbuildingText,\n\t\t\t\troomText,\n\t\t\t\tcurrentDow,\n\t\t\t\tstartTime.Unix(),\n\t\t\t\tendTime.Unix(),\n\t\t\t\tbeginDate.Format(\"2006-01-02\"),\n\t\t\t\tendDate.Format(\"2006-01-02\"),\n\t\t\t\tuser.ID,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", \"\", err\n\t\t\t}\n\n\t\tcase 1:\n\t\t\t\/\/ separator\n\t\t}\n\t}\n\n\treturn studentName, studentUNI, nil\n}\n<commit_msg>schools\/columbia: parse out section information<commit_after>package columbia\n\nimport (\n\t\"database\/sql\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/MyHomeworkSpace\/api-server\/data\"\n\t\"github.com\/PuerkitoBio\/goquery\"\n)\n\nfunc (s *school) parseSSOLViewOptionsForm(doc *goquery.Document) (string, url.Values, error) {\n\tret := url.Values{}\n\n\tviewOptionsForm := doc.Find(\"#Content form:has(.FormGridViewOpt)\")\n\n\tfields := viewOptionsForm.Find(\"input[type=hidden], input[type=submit]\")\n\tfor i := 0; i < fields.Length(); i++ {\n\t\tfield := fields.Eq(i)\n\t\tfieldName := field.AttrOr(\"name\", \"\")\n\t\tfieldValue := field.AttrOr(\"value\", \"\")\n\n\t\tif fieldName != \"\" {\n\t\t\tret[fieldName] = []string{fieldValue}\n\t\t}\n\t}\n\n\tselections := viewOptionsForm.Find(\"select:has(option[selected])\")\n\tfor i := 0; i < selections.Length(); i++ {\n\t\tselection := selections.Eq(i)\n\t\tselectionName := selection.AttrOr(\"name\", \"\")\n\t\tselectionSelected := selection.Find(\"option[selected]\")\n\n\t\tif selectionName != \"\" {\n\t\t\tret[selectionName] = []string{selectionSelected.AttrOr(\"value\", \"\")}\n\t\t}\n\t}\n\n\treturn viewOptionsForm.AttrOr(\"action\", \"\"), ret, nil\n}\n\nfunc (s *school) parseSSOLSchedulePage(tx *sql.Tx, user *data.User, doc *goquery.Document) (string, string, error) {\n\tdataGrids := doc.Find(\".DataGrid\")\n\n\t\/\/ first data grid is user info (we just need the name)\n\t\/\/ second data grid is schedule by class\n\t\/\/ third data grid is schedule by day\n\n\t\/\/ get name from first data grid\n\tstudentName := dataGrids.Eq(0).Find(\".clsDataGridTitle td\").Text()\n\tstudentName = strings.TrimSpace(studentName)\n\tstudentName = strings.ReplaceAll(studentName, \" \", \" \")\n\n\t\/\/ get uni\n\tstudentUNI := \"\"\n\tstudentDataCells := dataGrids.Eq(0).Find(\".clsDataGridData td\")\n\tuniIsNext := false\n\tfor i := 0; i < studentDataCells.Length(); i++ {\n\t\tstudentDataCellText := strings.TrimSpace(studentDataCells.Eq(i).Text())\n\n\t\tif uniIsNext {\n\t\t\tstudentUNI = studentDataCellText\n\t\t\tbreak\n\t\t}\n\n\t\tif studentDataCellText == \"UNI:\" {\n\t\t\tuniIsNext = true\n\t\t}\n\t}\n\n\t\/\/ get class list from second data grid\n\tclassRows := dataGrids.Eq(1).Find(\"tr\")\n\n\tclassInsertStmt, err := tx.Prepare(\"INSERT INTO columbia_classes(department, number, section, name, instructorName, instructorEmail, userID) VALUES (?, ?, ?, ?, ?, ?, ?)\")\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tdefer classInsertStmt.Close()\n\n\tclassInfoMap := map[string][]string{}\n\n\t\/\/ first three rows and last rows are to be discarded\n\tfor i := 3; i < classRows.Length()-1; i++ {\n\t\tclassRowColumns := classRows.Eq(i).Find(\"td\")\n\n\t\t\/\/ class, grading, instructor, day, time\/location, start\/end\n\t\t\/\/ we just need class and instructor\n\n\t\tclassCell := classRowColumns.Eq(0)\n\n\t\t\/\/ the raw text tells us department and number\n\t\tclassCellText := strings.ReplaceAll(strings.TrimSpace(classCell.Text()), \"\\u00A0\", \" \")\n\t\tclassParts := strings.Split(classCellText, \" \")\n\t\tclassDepartment := classParts[0]\n\t\tclassNumber := classParts[1]\n\n\t\t\/\/ the text in the font tag tells us the name\n\t\tclassName := strings.TrimSpace(classCell.Find(\"font\").Text())\n\n\t\t\/\/ parse out the section\n\t\tclassInfo := strings.ReplaceAll(classCellText, className, \"\")\n\t\tclassSectionParts := strings.Split(classInfo, \"sec\")\n\t\tclassSectionInfo := strings.TrimSpace(classSectionParts[len(classSectionParts)-1])\n\t\tclassSection := strings.Split(classSectionInfo, \" \")[0]\n\n\t\tinstructorCell := classRowColumns.Eq(2)\n\n\t\t\/\/ the text in the a tag tells us the instructor email\n\t\tclassInstructorEmail := strings.TrimSpace(instructorCell.Find(\"a\").Text())\n\n\t\t\/\/ the raw text tells us instructor name\n\t\t\/\/ this is a terrible hack and i'm sorry\n\t\tclassInstructorName := strings.ReplaceAll(strings.TrimSpace(instructorCell.Text()), \"\\u00A0\", \" \")\n\t\tclassInstructorName = strings.ReplaceAll(classInstructorName, classInstructorEmail, \"\")\n\n\t\t\/\/ set a mapping so that the meeting code can insert this info directly into the row\n\t\tclassInfoMap[className] = []string{classDepartment, classNumber, classSection}\n\n\t\t\/\/ add to db\n\t\t_, err = classInsertStmt.Exec(\n\t\t\tclassDepartment,\n\t\t\tclassNumber,\n\t\t\tclassSection,\n\t\t\tclassName,\n\t\t\tclassInstructorName,\n\t\t\tclassInstructorEmail,\n\t\t\tuser.ID,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\n\t\/\/ get meeting times from third data grid\n\tscheduleRows := dataGrids.Eq(2).Find(\"tr\")\n\n\tmeetingInsertStmt, err := tx.Prepare(\"INSERT INTO columbia_meetings(department, number, section, name, building, room, dow, start, end, beginDate, endDate, userID) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\")\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tdefer meetingInsertStmt.Close()\n\n\t\/\/ first two rows are to be discarded\n\tcurrentDow := time.Sunday\n\tfor i := 2; i < scheduleRows.Length(); i++ {\n\t\t\/\/ each row has three possibilities:\n\t\t\/\/ 1. it's the first row of a day of the week -> 5 columns\n\t\t\/\/ 2. it's not the first row for that day of the week -> 4 columns\n\t\t\/\/ 3. it's a separator between days of the week -> 1 column\n\t\tscheduleRowColumns := scheduleRows.Eq(i).Find(\"td\")\n\t\tswitch scheduleRowColumns.Length() {\n\t\tcase 5:\n\t\t\t\/\/ first class of the day of week\n\t\t\tdowMap := map[string]time.Weekday{\n\t\t\t\t\"Sun\": time.Sunday,\n\t\t\t\t\"Mon\": time.Monday,\n\t\t\t\t\"Tue\": time.Tuesday,\n\t\t\t\t\"Wed\": time.Wednesday,\n\t\t\t\t\"Thr\": time.Thursday,\n\t\t\t\t\"Fri\": time.Friday,\n\t\t\t\t\"Sat\": time.Saturday,\n\t\t\t}\n\t\t\tdowText := strings.TrimSpace(scheduleRowColumns.Eq(0).Text())\n\t\t\tcurrentDow = dowMap[dowText]\n\n\t\t\tfallthrough\n\n\t\tcase 4:\n\t\t\t\/\/ class meeting time\n\t\t\tcolumnOffset := scheduleRowColumns.Length() - 4\n\n\t\t\ttimeText := strings.TrimSpace(scheduleRowColumns.Eq(columnOffset).Text())\n\t\t\tlocationText := strings.TrimSpace(scheduleRowColumns.Eq(columnOffset + 1).Text())\n\t\t\tclassNameText := strings.TrimSpace(scheduleRowColumns.Eq(columnOffset + 2).Text())\n\t\t\ttermText := strings.TrimSpace(scheduleRowColumns.Eq(columnOffset + 3).Text())\n\n\t\t\t\/\/ parse location into room and building\n\t\t\tlocationParts := strings.Split(locationText, \"\\u00A0\")\n\t\t\troomText := strings.TrimSpace(locationParts[0])\n\t\t\tbuildingText := \"\"\n\t\t\tif len(locationParts) > 1 {\n\t\t\t\tbuildingText = strings.TrimSpace(locationParts[1])\n\t\t\t}\n\n\t\t\t\/\/ parse start and end times\n\t\t\ttimeParts := strings.Split(timeText, \"-\")\n\t\t\tstartTime, err := time.Parse(\"3:04pm\", timeParts[0])\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", \"\", err\n\t\t\t}\n\t\t\tendTime, err := time.Parse(\"3:04pm\", timeParts[1])\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", \"\", err\n\t\t\t}\n\n\t\t\tstartTime = startTime.AddDate(1970, 0, 0)\n\t\t\tendTime = endTime.AddDate(1970, 0, 0)\n\n\t\t\t\/\/ parse begin and end dates\n\t\t\ttermParts := strings.Split(termText, \"-\")\n\t\t\tbeginDate, err := time.Parse(\"01\/02\/06\", termParts[0])\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", \"\", err\n\t\t\t}\n\t\t\tendDate, err := time.Parse(\"01\/02\/06\", termParts[1])\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", \"\", err\n\t\t\t}\n\n\t\t\t\/\/ get info from map\n\t\t\tclassInfo := classInfoMap[classNameText]\n\n\t\t\t\/\/ add to db\n\t\t\t_, err = meetingInsertStmt.Exec(\n\t\t\t\tclassInfo[0],\n\t\t\t\tclassInfo[1],\n\t\t\t\tclassInfo[2],\n\t\t\t\tclassNameText,\n\t\t\t\tbuildingText,\n\t\t\t\troomText,\n\t\t\t\tcurrentDow,\n\t\t\t\tstartTime.Unix(),\n\t\t\t\tendTime.Unix(),\n\t\t\t\tbeginDate.Format(\"2006-01-02\"),\n\t\t\t\tendDate.Format(\"2006-01-02\"),\n\t\t\t\tuser.ID,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", \"\", err\n\t\t\t}\n\n\t\tcase 1:\n\t\t\t\/\/ separator\n\t\t}\n\t}\n\n\treturn studentName, studentUNI, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package users\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/racker\/perigee\"\n\t\"github.com\/rackspace\/gophercloud\"\n\t\"github.com\/rackspace\/gophercloud\/pagination\"\n)\n\nfunc List(client *gophercloud.ServiceClient) pagination.Pager {\n\tcreatePage := func(r pagination.PageResult) pagination.Page {\n\t\treturn UserPage{pagination.SinglePageBase(r)}\n\t}\n\n\treturn pagination.NewPager(client, rootURL(client), createPage)\n}\n\n\/\/ EnabledState represents whether the user is enabled or not.\ntype EnabledState *bool\n\n\/\/ Useful variables to use when creating or updating users.\nvar (\n\tiTrue = true\n\tiFalse = false\n\n\tEnabled EnabledState = &iTrue\n\tDisabled EnabledState = &iFalse\n)\n\ntype CommonOpts struct {\n\t\/\/ Either a name or username is required. When provided, the value must be\n\t\/\/ unique or a 409 conflict error will be returned. If you provide a name but\n\t\/\/ omit a username, the latter will be set to the former; and vice versa.\n\tName, Username string\n\n\t\/\/ The ID of the tenant to which you want to assign this user.\n\tTenantID string\n\n\t\/\/ Indicates whether this user is enabled or not.\n\tEnabled EnabledState\n\n\t\/\/ The email address of this user.\n\tEmail string\n}\n\n\/\/ CreateOpts represents the options needed when creating new users.\ntype CreateOpts CommonOpts\n\n\/\/ CreateOptsBuilder describes struct types that can be accepted by the Create call.\ntype CreateOptsBuilder interface {\n\tToUserCreateMap() (map[string]interface{}, error)\n}\n\n\/\/ ToUserCreateMap assembles a request body based on the contents of a CreateOpts.\nfunc (opts CreateOpts) ToUserCreateMap() (map[string]interface{}, error) {\n\tm := make(map[string]interface{})\n\n\tif opts.Name == \"\" && opts.Username == \"\" {\n\t\treturn m, errors.New(\"Either a Name or Username must be provided\")\n\t}\n\n\tif opts.Name != \"\" {\n\t\tm[\"name\"] = opts.Name\n\t}\n\tif opts.Username != \"\" {\n\t\tm[\"username\"] = opts.Username\n\t}\n\tif opts.Enabled != nil {\n\t\tm[\"enabled\"] = &opts.Enabled\n\t}\n\tif opts.Email != \"\" {\n\t\tm[\"email\"] = opts.Email\n\t}\n\tif opts.TenantID != \"\" {\n\t\tm[\"tenant_id\"] = opts.TenantID\n\t}\n\n\treturn map[string]interface{}{\"user\": m}, nil\n}\n\n\/\/ Create is the operation responsible for creating new users.\nfunc Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult {\n\tvar res CreateResult\n\n\treqBody, err := opts.ToUserCreateMap()\n\tif err != nil {\n\t\tres.Err = err\n\t\treturn res\n\t}\n\n\t_, res.Err = perigee.Request(\"POST\", rootURL(client), perigee.Options{\n\t\tResults: &res.Body,\n\t\tReqBody: reqBody,\n\t\tMoreHeaders: client.AuthenticatedHeaders(),\n\t\tOkCodes: []int{200, 201},\n\t})\n\n\treturn res\n}\n\n\/\/ Get requests details on a single user, either by ID.\nfunc Get(client *gophercloud.ServiceClient, id string) GetResult {\n\tvar result GetResult\n\n\t_, result.Err = perigee.Request(\"GET\", ResourceURL(client, id), perigee.Options{\n\t\tResults: &result.Body,\n\t\tMoreHeaders: client.AuthenticatedHeaders(),\n\t\tOkCodes: []int{200},\n\t})\n\n\treturn result\n}\n\n\/\/ UpdateOptsBuilder allows extentions to add additional attributes to the Update request.\ntype UpdateOptsBuilder interface {\n\tToUserUpdateMap() map[string]interface{}\n}\n\n\/\/ UpdateOpts specifies the base attributes that may be updated on an existing server.\ntype UpdateOpts CommonOpts\n\n\/\/ ToUserUpdateMap formats an UpdateOpts structure into a request body.\nfunc (opts UpdateOpts) ToUserUpdateMap() map[string]interface{} {\n\tm := make(map[string]interface{})\n\n\tif opts.Name != \"\" {\n\t\tm[\"name\"] = opts.Name\n\t}\n\tif opts.Username != \"\" {\n\t\tm[\"username\"] = opts.Username\n\t}\n\tif opts.Enabled != nil {\n\t\tm[\"enabled\"] = &opts.Enabled\n\t}\n\tif opts.Email != \"\" {\n\t\tm[\"email\"] = opts.Email\n\t}\n\tif opts.TenantID != \"\" {\n\t\tm[\"tenant_id\"] = opts.TenantID\n\t}\n\n\treturn map[string]interface{}{\"user\": m}\n}\n\n\/\/ Update is the operation responsible for updating exist users by their UUID.\nfunc Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult {\n\tvar result UpdateResult\n\n\t_, result.Err = perigee.Request(\"PUT\", ResourceURL(client, id), perigee.Options{\n\t\tResults: &result.Body,\n\t\tReqBody: opts.ToUserUpdateMap(),\n\t\tMoreHeaders: client.AuthenticatedHeaders(),\n\t\tOkCodes: []int{200},\n\t})\n\n\treturn result\n}\n\n\/\/ Delete is the operation responsible for permanently deleting an API user.\nfunc Delete(client *gophercloud.ServiceClient, id string) DeleteResult {\n\tvar result DeleteResult\n\n\t_, result.Err = perigee.Request(\"DELETE\", ResourceURL(client, id), perigee.Options{\n\t\tMoreHeaders: client.AuthenticatedHeaders(),\n\t\tOkCodes: []int{204},\n\t})\n\n\treturn result\n}\n\nfunc ListRoles(client *gophercloud.ServiceClient, tenantID, userID string) pagination.Pager {\n\tcreatePage := func(r pagination.PageResult) pagination.Page {\n\t\treturn RolePage{pagination.SinglePageBase(r)}\n\t}\n\n\treturn pagination.NewPager(client, listRolesURL(client, tenantID, userID), createPage)\n}\n<commit_msg>Explanatory comment<commit_after>package users\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/racker\/perigee\"\n\t\"github.com\/rackspace\/gophercloud\"\n\t\"github.com\/rackspace\/gophercloud\/pagination\"\n)\n\nfunc List(client *gophercloud.ServiceClient) pagination.Pager {\n\tcreatePage := func(r pagination.PageResult) pagination.Page {\n\t\treturn UserPage{pagination.SinglePageBase(r)}\n\t}\n\n\treturn pagination.NewPager(client, rootURL(client), createPage)\n}\n\n\/\/ EnabledState represents whether the user is enabled or not.\ntype EnabledState *bool\n\n\/\/ Useful variables to use when creating or updating users.\nvar (\n\tiTrue = true\n\tiFalse = false\n\n\tEnabled EnabledState = &iTrue\n\tDisabled EnabledState = &iFalse\n)\n\n\/\/ CommonOpts are the parameters that are shared between CreateOpts and\n\/\/ UpdateOpts\ntype CommonOpts struct {\n\t\/\/ Either a name or username is required. When provided, the value must be\n\t\/\/ unique or a 409 conflict error will be returned. If you provide a name but\n\t\/\/ omit a username, the latter will be set to the former; and vice versa.\n\tName, Username string\n\n\t\/\/ The ID of the tenant to which you want to assign this user.\n\tTenantID string\n\n\t\/\/ Indicates whether this user is enabled or not.\n\tEnabled EnabledState\n\n\t\/\/ The email address of this user.\n\tEmail string\n}\n\n\/\/ CreateOpts represents the options needed when creating new users.\ntype CreateOpts CommonOpts\n\n\/\/ CreateOptsBuilder describes struct types that can be accepted by the Create call.\ntype CreateOptsBuilder interface {\n\tToUserCreateMap() (map[string]interface{}, error)\n}\n\n\/\/ ToUserCreateMap assembles a request body based on the contents of a CreateOpts.\nfunc (opts CreateOpts) ToUserCreateMap() (map[string]interface{}, error) {\n\tm := make(map[string]interface{})\n\n\tif opts.Name == \"\" && opts.Username == \"\" {\n\t\treturn m, errors.New(\"Either a Name or Username must be provided\")\n\t}\n\n\tif opts.Name != \"\" {\n\t\tm[\"name\"] = opts.Name\n\t}\n\tif opts.Username != \"\" {\n\t\tm[\"username\"] = opts.Username\n\t}\n\tif opts.Enabled != nil {\n\t\tm[\"enabled\"] = &opts.Enabled\n\t}\n\tif opts.Email != \"\" {\n\t\tm[\"email\"] = opts.Email\n\t}\n\tif opts.TenantID != \"\" {\n\t\tm[\"tenant_id\"] = opts.TenantID\n\t}\n\n\treturn map[string]interface{}{\"user\": m}, nil\n}\n\n\/\/ Create is the operation responsible for creating new users.\nfunc Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult {\n\tvar res CreateResult\n\n\treqBody, err := opts.ToUserCreateMap()\n\tif err != nil {\n\t\tres.Err = err\n\t\treturn res\n\t}\n\n\t_, res.Err = perigee.Request(\"POST\", rootURL(client), perigee.Options{\n\t\tResults: &res.Body,\n\t\tReqBody: reqBody,\n\t\tMoreHeaders: client.AuthenticatedHeaders(),\n\t\tOkCodes: []int{200, 201},\n\t})\n\n\treturn res\n}\n\n\/\/ Get requests details on a single user, either by ID.\nfunc Get(client *gophercloud.ServiceClient, id string) GetResult {\n\tvar result GetResult\n\n\t_, result.Err = perigee.Request(\"GET\", ResourceURL(client, id), perigee.Options{\n\t\tResults: &result.Body,\n\t\tMoreHeaders: client.AuthenticatedHeaders(),\n\t\tOkCodes: []int{200},\n\t})\n\n\treturn result\n}\n\n\/\/ UpdateOptsBuilder allows extentions to add additional attributes to the Update request.\ntype UpdateOptsBuilder interface {\n\tToUserUpdateMap() map[string]interface{}\n}\n\n\/\/ UpdateOpts specifies the base attributes that may be updated on an existing server.\ntype UpdateOpts CommonOpts\n\n\/\/ ToUserUpdateMap formats an UpdateOpts structure into a request body.\nfunc (opts UpdateOpts) ToUserUpdateMap() map[string]interface{} {\n\tm := make(map[string]interface{})\n\n\tif opts.Name != \"\" {\n\t\tm[\"name\"] = opts.Name\n\t}\n\tif opts.Username != \"\" {\n\t\tm[\"username\"] = opts.Username\n\t}\n\tif opts.Enabled != nil {\n\t\tm[\"enabled\"] = &opts.Enabled\n\t}\n\tif opts.Email != \"\" {\n\t\tm[\"email\"] = opts.Email\n\t}\n\tif opts.TenantID != \"\" {\n\t\tm[\"tenant_id\"] = opts.TenantID\n\t}\n\n\treturn map[string]interface{}{\"user\": m}\n}\n\n\/\/ Update is the operation responsible for updating exist users by their UUID.\nfunc Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult {\n\tvar result UpdateResult\n\n\t_, result.Err = perigee.Request(\"PUT\", ResourceURL(client, id), perigee.Options{\n\t\tResults: &result.Body,\n\t\tReqBody: opts.ToUserUpdateMap(),\n\t\tMoreHeaders: client.AuthenticatedHeaders(),\n\t\tOkCodes: []int{200},\n\t})\n\n\treturn result\n}\n\n\/\/ Delete is the operation responsible for permanently deleting an API user.\nfunc Delete(client *gophercloud.ServiceClient, id string) DeleteResult {\n\tvar result DeleteResult\n\n\t_, result.Err = perigee.Request(\"DELETE\", ResourceURL(client, id), perigee.Options{\n\t\tMoreHeaders: client.AuthenticatedHeaders(),\n\t\tOkCodes: []int{204},\n\t})\n\n\treturn result\n}\n\nfunc ListRoles(client *gophercloud.ServiceClient, tenantID, userID string) pagination.Pager {\n\tcreatePage := func(r pagination.PageResult) pagination.Page {\n\t\treturn RolePage{pagination.SinglePageBase(r)}\n\t}\n\n\treturn pagination.NewPager(client, listRolesURL(client, tenantID, userID), createPage)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage docker\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"sync\"\n\n\t\"github.com\/tsuru\/docker-cluster\/cluster\"\n\t\"github.com\/tsuru\/tsuru\/action\"\n\t\"github.com\/tsuru\/tsuru\/app\"\n\ttsuruErrors \"github.com\/tsuru\/tsuru\/errors\"\n\t\"github.com\/tsuru\/tsuru\/log\"\n\t\"github.com\/tsuru\/tsuru\/provision\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\nvar (\n\tappDBMutex sync.Mutex\n\tlogMutex sync.Mutex\n)\n\ntype progressLog struct {\n\tMessage string\n}\n\ntype appLocker struct {\n\trefCount map[string]int\n}\n\nfunc (l *appLocker) lock(appName string) bool {\n\tappDBMutex.Lock()\n\tdefer appDBMutex.Unlock()\n\tif l.refCount == nil {\n\t\tl.refCount = make(map[string]int)\n\t}\n\tif l.refCount[appName] > 0 {\n\t\tl.refCount[appName]++\n\t\treturn true\n\t}\n\tok, err := app.AcquireApplicationLock(appName, app.InternalAppName, \"container-move\")\n\tif err != nil || !ok {\n\t\treturn false\n\t}\n\tl.refCount[appName]++\n\treturn true\n}\n\nfunc (l *appLocker) unlock(appName string) {\n\tappDBMutex.Lock()\n\tdefer appDBMutex.Unlock()\n\tif l.refCount == nil {\n\t\treturn\n\t}\n\tl.refCount[appName]--\n\tif l.refCount[appName] <= 0 {\n\t\tl.refCount[appName] = 0\n\t\tapp.ReleaseApplicationLock(appName)\n\t}\n}\n\nvar containerMovementErr = errors.New(\"Error moving some containers.\")\n\nfunc logProgress(encoder *json.Encoder, format string, params ...interface{}) {\n\tlogMutex.Lock()\n\tdefer logMutex.Unlock()\n\tencoder.Encode(progressLog{Message: fmt.Sprintf(format, params...)})\n}\n\nfunc handleMoveErrors(moveErrors chan error, encoder *json.Encoder) error {\n\thasError := false\n\tfor err := range moveErrors {\n\t\terrMsg := fmt.Sprintf(\"Error moving container: %s\", err.Error())\n\t\tlog.Errorf(errMsg)\n\t\tlogProgress(encoder, errMsg)\n\t\thasError = true\n\t}\n\tif hasError {\n\t\treturn containerMovementErr\n\t}\n\treturn nil\n}\n\nfunc runReplaceUnitsPipeline(w io.Writer, a provision.App, toRemoveContainers []container, toHosts ...string) ([]container, error) {\n\tvar toHost string\n\tif len(toHosts) > 0 {\n\t\ttoHost = toHosts[0]\n\t}\n\tif w == nil {\n\t\tw = ioutil.Discard\n\t}\n\targs := changeUnitsPipelineArgs{\n\t\tapp: a,\n\t\ttoRemove: toRemoveContainers,\n\t\tunitsToAdd: len(toRemoveContainers),\n\t\ttoHost: toHost,\n\t\twriter: w,\n\t}\n\tpipeline := action.NewPipeline(\n\t\t&provisionAddUnitsToHost,\n\t\t&addNewRoutes,\n\t\t&removeOldRoutes,\n\t\t&provisionRemoveOldUnits,\n\t)\n\terr := pipeline.Execute(args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = removeImage(assembleImageName(a.GetName()))\n\tif err != nil {\n\t\tlog.Debugf(\"Ignored error removing old images: %s\", err.Error())\n\t}\n\treturn pipeline.Result().([]container), nil\n}\n\nfunc runCreateUnitsPipeline(w io.Writer, a provision.App, toAddCount int) ([]container, error) {\n\tif w == nil {\n\t\tw = ioutil.Discard\n\t}\n\targs := changeUnitsPipelineArgs{\n\t\tapp: a,\n\t\tunitsToAdd: toAddCount,\n\t\twriter: w,\n\t}\n\tpipeline := action.NewPipeline(\n\t\t&provisionAddUnitsToHost,\n\t\t&addNewRoutes,\n\t)\n\terr := pipeline.Execute(args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pipeline.Result().([]container), nil\n}\n\nfunc moveOneContainer(c container, toHost string, errors chan error, wg *sync.WaitGroup, encoder *json.Encoder, locker *appLocker) container {\n\tif wg != nil {\n\t\tdefer wg.Done()\n\t}\n\tlocked := locker.lock(c.AppName)\n\tif !locked {\n\t\terrors <- fmt.Errorf(\"Couldn't move %s, unable to lock %q.\", c.ID, c.AppName)\n\t\treturn container{}\n\t}\n\tdefer locker.unlock(c.AppName)\n\ta, err := app.GetByName(c.AppName)\n\tif err != nil {\n\t\terrors <- &tsuruErrors.CompositeError{\n\t\t\tBase: err,\n\t\t\tMessage: fmt.Sprintf(\"Error getting app %q for unit %s.\", c.AppName, c.ID),\n\t\t}\n\t\treturn container{}\n\t}\n\tlogProgress(encoder, \"Moving unit %s for %q: %s -> %s...\", c.ID, c.AppName, c.HostAddr, toHost)\n\taddedContainers, err := runReplaceUnitsPipeline(nil, a, []container{c}, toHost)\n\tif err != nil {\n\t\terrors <- &tsuruErrors.CompositeError{\n\t\t\tBase: err,\n\t\t\tMessage: fmt.Sprintf(\"Error moving unit %s.\", c.ID),\n\t\t}\n\t\treturn container{}\n\t}\n\tlogProgress(encoder, \"Finished moving unit %s for %q.\", c.ID, c.AppName)\n\taddedUnit := addedContainers[0].asUnit(a)\n\terr = a.BindUnit(&addedUnit)\n\tif err != nil {\n\t\terrors <- &tsuruErrors.CompositeError{\n\t\t\tBase: err,\n\t\t\tMessage: fmt.Sprintf(\"Error binding unit %s to service instances.\", c.ID),\n\t\t}\n\t\treturn container{}\n\t}\n\tlogProgress(encoder, \"Moved unit %s -> %s for %s with bindings.\", c.ID, addedUnit.Name, c.AppName)\n\treturn addedContainers[0]\n}\n\nfunc moveContainer(contId string, toHost string, encoder *json.Encoder) (container, error) {\n\tcont, err := getContainer(contId)\n\tif err != nil {\n\t\treturn container{}, err\n\t}\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\tmoveErrors := make(chan error, 1)\n\tlocker := &appLocker{}\n\tcreatedContainer := moveOneContainer(*cont, toHost, moveErrors, &wg, encoder, locker)\n\tclose(moveErrors)\n\treturn createdContainer, handleMoveErrors(moveErrors, encoder)\n}\n\nfunc moveContainers(fromHost, toHost string, encoder *json.Encoder) error {\n\tcontainers, err := listContainersByHost(fromHost)\n\tif err != nil {\n\t\treturn err\n\t}\n\tnumberContainers := len(containers)\n\tif numberContainers == 0 {\n\t\tlogProgress(encoder, \"No units to move in %s.\", fromHost)\n\t\treturn nil\n\t}\n\tlogProgress(encoder, \"Moving %d units...\", numberContainers)\n\tlocker := &appLocker{}\n\tmoveErrors := make(chan error, numberContainers)\n\twg := sync.WaitGroup{}\n\twg.Add(numberContainers)\n\tfor _, c := range containers {\n\t\tgo moveOneContainer(c, toHost, moveErrors, &wg, encoder, locker)\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(moveErrors)\n\t}()\n\treturn handleMoveErrors(moveErrors, encoder)\n}\n\ntype hostWithContainers struct {\n\tHostAddr string `bson:\"_id\"`\n\tCount int\n\tContainers []container\n}\n\nfunc minCountHost(hosts []hostWithContainers) *hostWithContainers {\n\tvar minCountHost *hostWithContainers\n\tminCount := math.MaxInt32\n\tfor i, dest := range hosts {\n\t\tif dest.Count < minCount {\n\t\t\tminCount = dest.Count\n\t\t\tminCountHost = &hosts[i]\n\t\t}\n\t}\n\treturn minCountHost\n}\n\nfunc rebalanceContainers(encoder *json.Encoder, dryRun bool) error {\n\tcoll := collection()\n\tdefer coll.Close()\n\tfullDocQuery := bson.M{\n\t\t\/\/ Could use $$ROOT instead of repeating fields but only in Mongo 2.6+.\n\t\t\"_id\": \"$_id\",\n\t\t\"id\": \"$id\",\n\t\t\"name\": \"$name\",\n\t\t\"appname\": \"$appname\",\n\t\t\"type\": \"$type\",\n\t\t\"ip\": \"$ip\",\n\t\t\"image\": \"$image\",\n\t\t\"hostaddr\": \"$hostaddr\",\n\t\t\"hostport\": \"$hostport\",\n\t\t\"status\": \"$status\",\n\t\t\"version\": \"$version\",\n\t}\n\tappsPipe := coll.Pipe([]bson.M{\n\t\t{\"$group\": bson.M{\"_id\": \"$appname\", \"count\": bson.M{\"$sum\": 1}}},\n\t})\n\tvar appsInfo []struct {\n\t\tName string `bson:\"_id\"`\n\t\tCount int\n\t}\n\terr := appsPipe.All(&appsInfo)\n\tif err != nil {\n\t\treturn err\n\t}\n\tclusterInstance := dockerCluster()\n\tfor _, appInfo := range appsInfo {\n\t\tif appInfo.Count < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tlogProgress(encoder, \"Rebalancing app %q (%d units)...\", appInfo.Name, appInfo.Count)\n\t\tvar possibleDests []cluster.Node\n\t\tif isSegregateScheduler() {\n\t\t\tpossibleDests, err = nodesForAppName(clusterInstance, appInfo.Name)\n\t\t} else {\n\t\t\tpossibleDests, err = clusterInstance.Nodes()\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmaxContPerUnit := appInfo.Count \/ len(possibleDests)\n\t\toverflowHosts := appInfo.Count % len(possibleDests)\n\t\tpipe := coll.Pipe([]bson.M{\n\t\t\t{\"$match\": bson.M{\"hostaddr\": bson.M{\"$ne\": \"\"}, \"appname\": appInfo.Name}},\n\t\t\t{\"$group\": bson.M{\n\t\t\t\t\"_id\": \"$hostaddr\",\n\t\t\t\t\"count\": bson.M{\"$sum\": 1},\n\t\t\t\t\"containers\": bson.M{\"$push\": fullDocQuery}}},\n\t\t})\n\t\tvar hosts []hostWithContainers\n\t\thostsSet := make(map[string]bool)\n\t\terr = pipe.All(&hosts)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, host := range hosts {\n\t\t\thostsSet[host.HostAddr] = true\n\t\t}\n\t\tfor _, node := range possibleDests {\n\t\t\thostAddr := urlToHost(node.Address)\n\t\t\t_, present := hostsSet[hostAddr]\n\t\t\tif !present {\n\t\t\t\thosts = append(hosts, hostWithContainers{HostAddr: hostAddr})\n\t\t\t}\n\t\t}\n\t\tanythingDone := false\n\t\tfor _, host := range hosts {\n\t\t\ttoMoveCount := host.Count - maxContPerUnit\n\t\t\tif toMoveCount <= 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif overflowHosts > 0 {\n\t\t\t\toverflowHosts--\n\t\t\t\ttoMoveCount--\n\t\t\t\tif toMoveCount <= 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tanythingDone = true\n\t\t\tlogProgress(encoder, \"Trying to move %d units for %q from %s...\", toMoveCount, appInfo.Name, host.HostAddr)\n\t\t\tlocker := &appLocker{}\n\t\t\twg := sync.WaitGroup{}\n\t\t\tmoveErrors := make(chan error, toMoveCount)\n\t\t\tfor _, cont := range host.Containers {\n\t\t\t\tminDest := minCountHost(hosts)\n\t\t\t\tif minDest.Count < maxContPerUnit {\n\t\t\t\t\ttoMoveCount--\n\t\t\t\t\tminDest.Count++\n\t\t\t\t\tif dryRun {\n\t\t\t\t\t\tlogProgress(encoder, \"Would move unit %s for %q: %s -> %s...\", cont.ID, appInfo.Name, cont.HostAddr, minDest.HostAddr)\n\t\t\t\t\t} else {\n\t\t\t\t\t\twg.Add(1)\n\t\t\t\t\t\tgo moveOneContainer(cont, minDest.HostAddr, moveErrors, &wg, encoder, locker)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif toMoveCount == 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif toMoveCount > 0 {\n\t\t\t\tlogProgress(encoder, \"Couldn't find suitable destination for %d units for app %q\", toMoveCount, appInfo.Name)\n\t\t\t}\n\t\t\tgo func() {\n\t\t\t\twg.Wait()\n\t\t\t\tclose(moveErrors)\n\t\t\t}()\n\t\t\terr := handleMoveErrors(moveErrors, encoder)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif anythingDone {\n\t\t\tlogProgress(encoder, \"Rebalance finished for %q\", appInfo.Name)\n\t\t} else {\n\t\t\tlogProgress(encoder, \"Nothing to do for %q\", appInfo.Name)\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>provision\/docker: containers move log without interpolation<commit_after>\/\/ Copyright 2014 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage docker\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"sync\"\n\n\t\"github.com\/tsuru\/docker-cluster\/cluster\"\n\t\"github.com\/tsuru\/tsuru\/action\"\n\t\"github.com\/tsuru\/tsuru\/app\"\n\ttsuruErrors \"github.com\/tsuru\/tsuru\/errors\"\n\t\"github.com\/tsuru\/tsuru\/log\"\n\t\"github.com\/tsuru\/tsuru\/provision\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\nvar (\n\tappDBMutex sync.Mutex\n\tlogMutex sync.Mutex\n)\n\ntype progressLog struct {\n\tMessage string\n}\n\ntype appLocker struct {\n\trefCount map[string]int\n}\n\nfunc (l *appLocker) lock(appName string) bool {\n\tappDBMutex.Lock()\n\tdefer appDBMutex.Unlock()\n\tif l.refCount == nil {\n\t\tl.refCount = make(map[string]int)\n\t}\n\tif l.refCount[appName] > 0 {\n\t\tl.refCount[appName]++\n\t\treturn true\n\t}\n\tok, err := app.AcquireApplicationLock(appName, app.InternalAppName, \"container-move\")\n\tif err != nil || !ok {\n\t\treturn false\n\t}\n\tl.refCount[appName]++\n\treturn true\n}\n\nfunc (l *appLocker) unlock(appName string) {\n\tappDBMutex.Lock()\n\tdefer appDBMutex.Unlock()\n\tif l.refCount == nil {\n\t\treturn\n\t}\n\tl.refCount[appName]--\n\tif l.refCount[appName] <= 0 {\n\t\tl.refCount[appName] = 0\n\t\tapp.ReleaseApplicationLock(appName)\n\t}\n}\n\nvar containerMovementErr = errors.New(\"Error moving some containers.\")\n\nfunc logProgress(encoder *json.Encoder, format string, params ...interface{}) {\n\tlogMutex.Lock()\n\tdefer logMutex.Unlock()\n\tencoder.Encode(progressLog{Message: fmt.Sprintf(format, params...)})\n}\n\nfunc handleMoveErrors(moveErrors chan error, encoder *json.Encoder) error {\n\thasError := false\n\tfor err := range moveErrors {\n\t\terrMsg := fmt.Sprintf(\"Error moving container: %s\", err.Error())\n\t\tlog.Error(errMsg)\n\t\tlogProgress(encoder, \"%s\", errMsg)\n\t\thasError = true\n\t}\n\tif hasError {\n\t\treturn containerMovementErr\n\t}\n\treturn nil\n}\n\nfunc runReplaceUnitsPipeline(w io.Writer, a provision.App, toRemoveContainers []container, toHosts ...string) ([]container, error) {\n\tvar toHost string\n\tif len(toHosts) > 0 {\n\t\ttoHost = toHosts[0]\n\t}\n\tif w == nil {\n\t\tw = ioutil.Discard\n\t}\n\targs := changeUnitsPipelineArgs{\n\t\tapp: a,\n\t\ttoRemove: toRemoveContainers,\n\t\tunitsToAdd: len(toRemoveContainers),\n\t\ttoHost: toHost,\n\t\twriter: w,\n\t}\n\tpipeline := action.NewPipeline(\n\t\t&provisionAddUnitsToHost,\n\t\t&addNewRoutes,\n\t\t&removeOldRoutes,\n\t\t&provisionRemoveOldUnits,\n\t)\n\terr := pipeline.Execute(args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = removeImage(assembleImageName(a.GetName()))\n\tif err != nil {\n\t\tlog.Debugf(\"Ignored error removing old images: %s\", err.Error())\n\t}\n\treturn pipeline.Result().([]container), nil\n}\n\nfunc runCreateUnitsPipeline(w io.Writer, a provision.App, toAddCount int) ([]container, error) {\n\tif w == nil {\n\t\tw = ioutil.Discard\n\t}\n\targs := changeUnitsPipelineArgs{\n\t\tapp: a,\n\t\tunitsToAdd: toAddCount,\n\t\twriter: w,\n\t}\n\tpipeline := action.NewPipeline(\n\t\t&provisionAddUnitsToHost,\n\t\t&addNewRoutes,\n\t)\n\terr := pipeline.Execute(args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pipeline.Result().([]container), nil\n}\n\nfunc moveOneContainer(c container, toHost string, errors chan error, wg *sync.WaitGroup, encoder *json.Encoder, locker *appLocker) container {\n\tif wg != nil {\n\t\tdefer wg.Done()\n\t}\n\tlocked := locker.lock(c.AppName)\n\tif !locked {\n\t\terrors <- fmt.Errorf(\"Couldn't move %s, unable to lock %q.\", c.ID, c.AppName)\n\t\treturn container{}\n\t}\n\tdefer locker.unlock(c.AppName)\n\ta, err := app.GetByName(c.AppName)\n\tif err != nil {\n\t\terrors <- &tsuruErrors.CompositeError{\n\t\t\tBase: err,\n\t\t\tMessage: fmt.Sprintf(\"Error getting app %q for unit %s.\", c.AppName, c.ID),\n\t\t}\n\t\treturn container{}\n\t}\n\tlogProgress(encoder, \"Moving unit %s for %q: %s -> %s...\", c.ID, c.AppName, c.HostAddr, toHost)\n\taddedContainers, err := runReplaceUnitsPipeline(nil, a, []container{c}, toHost)\n\tif err != nil {\n\t\terrors <- &tsuruErrors.CompositeError{\n\t\t\tBase: err,\n\t\t\tMessage: fmt.Sprintf(\"Error moving unit %s.\", c.ID),\n\t\t}\n\t\treturn container{}\n\t}\n\tlogProgress(encoder, \"Finished moving unit %s for %q.\", c.ID, c.AppName)\n\taddedUnit := addedContainers[0].asUnit(a)\n\terr = a.BindUnit(&addedUnit)\n\tif err != nil {\n\t\terrors <- &tsuruErrors.CompositeError{\n\t\t\tBase: err,\n\t\t\tMessage: fmt.Sprintf(\"Error binding unit %s to service instances.\", c.ID),\n\t\t}\n\t\treturn container{}\n\t}\n\tlogProgress(encoder, \"Moved unit %s -> %s for %s with bindings.\", c.ID, addedUnit.Name, c.AppName)\n\treturn addedContainers[0]\n}\n\nfunc moveContainer(contId string, toHost string, encoder *json.Encoder) (container, error) {\n\tcont, err := getContainer(contId)\n\tif err != nil {\n\t\treturn container{}, err\n\t}\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\tmoveErrors := make(chan error, 1)\n\tlocker := &appLocker{}\n\tcreatedContainer := moveOneContainer(*cont, toHost, moveErrors, &wg, encoder, locker)\n\tclose(moveErrors)\n\treturn createdContainer, handleMoveErrors(moveErrors, encoder)\n}\n\nfunc moveContainers(fromHost, toHost string, encoder *json.Encoder) error {\n\tcontainers, err := listContainersByHost(fromHost)\n\tif err != nil {\n\t\treturn err\n\t}\n\tnumberContainers := len(containers)\n\tif numberContainers == 0 {\n\t\tlogProgress(encoder, \"No units to move in %s.\", fromHost)\n\t\treturn nil\n\t}\n\tlogProgress(encoder, \"Moving %d units...\", numberContainers)\n\tlocker := &appLocker{}\n\tmoveErrors := make(chan error, numberContainers)\n\twg := sync.WaitGroup{}\n\twg.Add(numberContainers)\n\tfor _, c := range containers {\n\t\tgo moveOneContainer(c, toHost, moveErrors, &wg, encoder, locker)\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(moveErrors)\n\t}()\n\treturn handleMoveErrors(moveErrors, encoder)\n}\n\ntype hostWithContainers struct {\n\tHostAddr string `bson:\"_id\"`\n\tCount int\n\tContainers []container\n}\n\nfunc minCountHost(hosts []hostWithContainers) *hostWithContainers {\n\tvar minCountHost *hostWithContainers\n\tminCount := math.MaxInt32\n\tfor i, dest := range hosts {\n\t\tif dest.Count < minCount {\n\t\t\tminCount = dest.Count\n\t\t\tminCountHost = &hosts[i]\n\t\t}\n\t}\n\treturn minCountHost\n}\n\nfunc rebalanceContainers(encoder *json.Encoder, dryRun bool) error {\n\tcoll := collection()\n\tdefer coll.Close()\n\tfullDocQuery := bson.M{\n\t\t\/\/ Could use $$ROOT instead of repeating fields but only in Mongo 2.6+.\n\t\t\"_id\": \"$_id\",\n\t\t\"id\": \"$id\",\n\t\t\"name\": \"$name\",\n\t\t\"appname\": \"$appname\",\n\t\t\"type\": \"$type\",\n\t\t\"ip\": \"$ip\",\n\t\t\"image\": \"$image\",\n\t\t\"hostaddr\": \"$hostaddr\",\n\t\t\"hostport\": \"$hostport\",\n\t\t\"status\": \"$status\",\n\t\t\"version\": \"$version\",\n\t}\n\tappsPipe := coll.Pipe([]bson.M{\n\t\t{\"$group\": bson.M{\"_id\": \"$appname\", \"count\": bson.M{\"$sum\": 1}}},\n\t})\n\tvar appsInfo []struct {\n\t\tName string `bson:\"_id\"`\n\t\tCount int\n\t}\n\terr := appsPipe.All(&appsInfo)\n\tif err != nil {\n\t\treturn err\n\t}\n\tclusterInstance := dockerCluster()\n\tfor _, appInfo := range appsInfo {\n\t\tif appInfo.Count < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tlogProgress(encoder, \"Rebalancing app %q (%d units)...\", appInfo.Name, appInfo.Count)\n\t\tvar possibleDests []cluster.Node\n\t\tif isSegregateScheduler() {\n\t\t\tpossibleDests, err = nodesForAppName(clusterInstance, appInfo.Name)\n\t\t} else {\n\t\t\tpossibleDests, err = clusterInstance.Nodes()\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmaxContPerUnit := appInfo.Count \/ len(possibleDests)\n\t\toverflowHosts := appInfo.Count % len(possibleDests)\n\t\tpipe := coll.Pipe([]bson.M{\n\t\t\t{\"$match\": bson.M{\"hostaddr\": bson.M{\"$ne\": \"\"}, \"appname\": appInfo.Name}},\n\t\t\t{\"$group\": bson.M{\n\t\t\t\t\"_id\": \"$hostaddr\",\n\t\t\t\t\"count\": bson.M{\"$sum\": 1},\n\t\t\t\t\"containers\": bson.M{\"$push\": fullDocQuery}}},\n\t\t})\n\t\tvar hosts []hostWithContainers\n\t\thostsSet := make(map[string]bool)\n\t\terr = pipe.All(&hosts)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, host := range hosts {\n\t\t\thostsSet[host.HostAddr] = true\n\t\t}\n\t\tfor _, node := range possibleDests {\n\t\t\thostAddr := urlToHost(node.Address)\n\t\t\t_, present := hostsSet[hostAddr]\n\t\t\tif !present {\n\t\t\t\thosts = append(hosts, hostWithContainers{HostAddr: hostAddr})\n\t\t\t}\n\t\t}\n\t\tanythingDone := false\n\t\tfor _, host := range hosts {\n\t\t\ttoMoveCount := host.Count - maxContPerUnit\n\t\t\tif toMoveCount <= 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif overflowHosts > 0 {\n\t\t\t\toverflowHosts--\n\t\t\t\ttoMoveCount--\n\t\t\t\tif toMoveCount <= 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tanythingDone = true\n\t\t\tlogProgress(encoder, \"Trying to move %d units for %q from %s...\", toMoveCount, appInfo.Name, host.HostAddr)\n\t\t\tlocker := &appLocker{}\n\t\t\twg := sync.WaitGroup{}\n\t\t\tmoveErrors := make(chan error, toMoveCount)\n\t\t\tfor _, cont := range host.Containers {\n\t\t\t\tminDest := minCountHost(hosts)\n\t\t\t\tif minDest.Count < maxContPerUnit {\n\t\t\t\t\ttoMoveCount--\n\t\t\t\t\tminDest.Count++\n\t\t\t\t\tif dryRun {\n\t\t\t\t\t\tlogProgress(encoder, \"Would move unit %s for %q: %s -> %s...\", cont.ID, appInfo.Name, cont.HostAddr, minDest.HostAddr)\n\t\t\t\t\t} else {\n\t\t\t\t\t\twg.Add(1)\n\t\t\t\t\t\tgo moveOneContainer(cont, minDest.HostAddr, moveErrors, &wg, encoder, locker)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif toMoveCount == 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif toMoveCount > 0 {\n\t\t\t\tlogProgress(encoder, \"Couldn't find suitable destination for %d units for app %q\", toMoveCount, appInfo.Name)\n\t\t\t}\n\t\t\tgo func() {\n\t\t\t\twg.Wait()\n\t\t\t\tclose(moveErrors)\n\t\t\t}()\n\t\t\terr := handleMoveErrors(moveErrors, encoder)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif anythingDone {\n\t\t\tlogProgress(encoder, \"Rebalance finished for %q\", appInfo.Name)\n\t\t} else {\n\t\t\tlogProgress(encoder, \"Nothing to do for %q\", appInfo.Name)\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package mailchimp\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n)\n\n\/\/ Client manages communication with the Mailchimp API.\ntype Client struct {\n\tclient *http.Client\n\tBaseURL *url.URL\n\tDC string\n\tAPIKey string\n}\n\n\/\/ NewClient returns a new Mailchimp API client. If a nil httpClient is\n\/\/ provided, http.DefaultClient will be used. The apiKey must be in the format xyz-us11.\nfunc NewClient(apiKey string, httpClient *http.Client) (*Client, error) {\n\tif len(strings.Split(apiKey, \"-\")) != 2 {\n\t\treturn nil, errors.New(\"Mailchimp API Key must be formatted like: xyz-zys\")\n\t}\n\tdc := strings.Split(apiKey, \"-\")[1] \/\/ data center\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\tbaseURL, _ := url.Parse(fmt.Sprintf(\"https:\/\/%s.api.mailchimp.com\/3.0\", dc))\n\treturn &Client{\n\t\tAPIKey: apiKey,\n\t\tclient: httpClient,\n\t\tDC: dc,\n\t\tBaseURL: baseURL,\n\t}, nil\n}\n\n\/\/ ErrorResponse ...\ntype ErrorResponse struct {\n\tType string `json:\"type\"`\n\tTitle string `json:\"title\"`\n\tStatus int `json:\"status\"`\n\tDetail string `json:\"detail\"`\n}\n\n\/\/ Error ...\nfunc (e ErrorResponse) Error() string {\n\treturn fmt.Sprintf(\"Error %d %s (%s)\", e.Status, e.Title, e.Detail)\n}\n\n\/\/ CheckResponse ...\nfunc CheckResponse(r *http.Response) error {\n\tif c := r.StatusCode; 200 <= c && c <= 299 {\n\t\treturn nil\n\t}\n\terrorResponse := new(ErrorResponse)\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err == nil && data != nil {\n\t\tjson.Unmarshal(data, errorResponse)\n\t}\n\treturn errorResponse\n}\n\n\/\/ Do ...\nfunc (c *Client) Do(method string, path string, body interface{}) (interface{}, error) {\n\tvar buf io.ReadWriter\n\tif body != nil {\n\t\tbuf = new(bytes.Buffer)\n\t\terr := json.NewEncoder(buf).Encode(body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tapiURL := fmt.Sprintf(\"%s%s\", c.BaseURL.String(), path)\n\n\treq, err := http.NewRequest(method, apiURL, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.SetBasicAuth(\"\", c.APIKey)\n\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\terr = CheckResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar v interface{}\n\terr = json.NewDecoder(resp.Body).Decode(&v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn v, nil\n}\n\n\/\/ Subscribe ...\nfunc (c *Client) Subscribe(email string, listID string) (interface{}, error) {\n\tdata := &map[string]string{\n\t\t\"email_address\": email,\n\t\t\"status\": \"subscribed\",\n\t}\n\treturn c.Do(\n\t\t\"POST\",\n\t\tfmt.Sprintf(\"\/lists\/%s\/members\/\", listID),\n\t\tdata,\n\t)\n}\n<commit_msg>Refactoring.<commit_after>package mailchimp\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n)\n\n\/\/ Client manages communication with the Mailchimp API.\ntype Client struct {\n\tclient *http.Client\n\tBaseURL *url.URL\n\tDC string\n\tAPIKey string\n}\n\n\/\/ NewClient returns a new Mailchimp API client. If a nil httpClient is\n\/\/ provided, http.DefaultClient will be used. The apiKey must be in the format xyz-us11.\nfunc NewClient(apiKey string, httpClient *http.Client) (*Client, error) {\n\tif len(strings.Split(apiKey, \"-\")) != 2 {\n\t\treturn nil, errors.New(\"Mailchimp API Key must be formatted like: xyz-zys\")\n\t}\n\tdc := strings.Split(apiKey, \"-\")[1] \/\/ data center\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\tbaseURL, _ := url.Parse(fmt.Sprintf(\"https:\/\/%s.api.mailchimp.com\/3.0\", dc))\n\treturn &Client{\n\t\tAPIKey: apiKey,\n\t\tclient: httpClient,\n\t\tDC: dc,\n\t\tBaseURL: baseURL,\n\t}, nil\n}\n\n\/\/ ErrorResponse ...\ntype ErrorResponse struct {\n\tType string `json:\"type\"`\n\tTitle string `json:\"title\"`\n\tStatus int `json:\"status\"`\n\tDetail string `json:\"detail\"`\n}\n\n\/\/ Error ...\nfunc (e ErrorResponse) Error() string {\n\treturn fmt.Sprintf(\"Error %d %s (%s)\", e.Status, e.Title, e.Detail)\n}\n\n\/\/ Subscribe ...\nfunc (c *Client) Subscribe(email string, listID string) (interface{}, error) {\n\tdata := &map[string]string{\n\t\t\"email_address\": email,\n\t\t\"status\": \"subscribed\",\n\t}\n\treturn c.do(\n\t\t\"POST\",\n\t\tfmt.Sprintf(\"\/lists\/%s\/members\/\", listID),\n\t\tdata,\n\t)\n}\n\nfunc (c *Client) do(method string, path string, body interface{}) (interface{}, error) {\n\tvar buf io.ReadWriter\n\tif body != nil {\n\t\tbuf = new(bytes.Buffer)\n\t\terr := json.NewEncoder(buf).Encode(body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tapiURL := fmt.Sprintf(\"%s%s\", c.BaseURL.String(), path)\n\n\treq, err := http.NewRequest(method, apiURL, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.SetBasicAuth(\"\", c.APIKey)\n\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := checkResponse(resp); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar v interface{}\n\terr = json.NewDecoder(resp.Body).Decode(&v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn v, nil\n}\n\nfunc checkResponse(r *http.Response) error {\n\tif c := r.StatusCode; 200 <= c && c <= 299 {\n\t\treturn nil\n\t}\n\terrorResponse := new(ErrorResponse)\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err == nil && data != nil {\n\t\tjson.Unmarshal(data, errorResponse)\n\t}\n\treturn errorResponse\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\n\/*\n#cgo LDFLAGS: -lOpenCL\n\n#ifdef __APPLE__\n# include \"OpenCL\/opencl.h\"\n#else\n# include \"CL\/cl.h\"\n#endif\n\n*\/\nimport \"C\"\n\nimport (\n\t\"fmt\"\n\t\"unsafe\"\n)\n\nfunc main() {\n\t\/\/ get platform count\n\tvar platformCount C.cl_uint\n\tC.clGetPlatformIDs(0, nil, &platformCount)\n\tfmt.Println(\"OpenCL platforms:\", platformCount)\n\t\/\/ get platform IDs\n\tplatformIDs := make([]C.cl_platform_id, platformCount)\n\tC.clGetPlatformIDs(platformCount, &platformIDs[0], nil)\n\n\t\/\/ get device count\n\tvar deviceIdCount C.cl_uint\n\tC.clGetDeviceIDs(platformIDs[0], C.CL_DEVICE_TYPE_ALL, 0, nil, &deviceIdCount)\n\tfmt.Println(\"Devices:\", deviceIdCount)\n\t\/\/ get device IDs\n\tdeviceIDs := make([]C.cl_device_id, deviceIdCount)\n\tC.clGetDeviceIDs(platformIDs[0], C.CL_DEVICE_TYPE_ALL, deviceIdCount, &deviceIDs[0], nil)\n\n\t\/\/ make context\n\tcontextProps := []C.cl_context_properties{\n\t\tC.CL_CONTEXT_PLATFORM,\n\t\t*(*C.cl_context_properties)(unsafe.Pointer(&platformIDs[0])),\n\t\t0,\n\t}\n\n\tvar clErr C.cl_int\n\tC.clCreateContext(&contextProps[0], 1, &deviceIDs[0], nil, nil, &clErr)\n\n}\n<commit_msg>Create Contexts and Buffers<commit_after>package main\n\n\/*\n#cgo LDFLAGS: -lOpenCL\n\n#ifdef __APPLE__\n# include \"OpenCL\/opencl.h\"\n#else\n# include \"CL\/cl.h\"\n#endif\n\n*\/\nimport \"C\"\n\nimport (\n\t\"fmt\"\n\t\"unsafe\"\n)\n\ntype Context struct {\n\tcl C.cl_context\n}\n\ntype Buffer C.cl_mem\n\nfunc CreateContext() *Context {\n\t\/\/ get platform count\n\tvar platformCount C.cl_uint\n\tC.clGetPlatformIDs(0, nil, &platformCount)\n\tfmt.Println(\"OpenCL platforms:\", platformCount)\n\t\/\/ get platform IDs\n\tplatformIDs := make([]C.cl_platform_id, platformCount)\n\tC.clGetPlatformIDs(platformCount, &platformIDs[0], nil)\n\n\t\/\/ get device count\n\tvar deviceIdCount C.cl_uint\n\tC.clGetDeviceIDs(platformIDs[0], C.CL_DEVICE_TYPE_ALL, 0, nil,\n\t\t&deviceIdCount)\n\tfmt.Println(\"Devices:\", deviceIdCount)\n\t\/\/ get device IDs\n\tdeviceIDs := make([]C.cl_device_id, deviceIdCount)\n\tC.clGetDeviceIDs(platformIDs[0], C.CL_DEVICE_TYPE_ALL, deviceIdCount,\n\t\t&deviceIDs[0], nil)\n\n\t\/\/ make context\n\tcontextProps := []C.cl_context_properties{\n\t\tC.CL_CONTEXT_PLATFORM,\n\t\t*(*C.cl_context_properties)(unsafe.Pointer(&platformIDs[0])),\n\t\t0,\n\t}\n\n\tvar clErr C.cl_int\n\tcontext := C.clCreateContext(&contextProps[0], 1, &deviceIDs[0], nil, nil,\n\t\t&clErr)\n\tif clErr != C.CL_SUCCESS {\n\t\tfmt.Println(\"CL error\", clErr, \"!\")\n\t\treturn nil\n\t}\n\treturn &Context{context}\n}\n\nfunc (c *Context) CreateBuffer(data []float32) Buffer {\n\tvar clErr C.cl_int\n\tvar buffer C.cl_mem\n\tbuffer = C.clCreateBuffer(c.cl, C.CL_MEM_READ_ONLY|C.CL_MEM_COPY_HOST_PTR,\n\t\tC.size_t(int(unsafe.Sizeof(data[0]))*len(data)), unsafe.Pointer(&data[0]),\n\t\t&clErr)\n\n\tif clErr != C.CL_SUCCESS {\n\t\tfmt.Println(\"CL error\", clErr, \"!\")\n\t\treturn nil\n\t}\n\treturn Buffer(buffer)\n}\n\nfunc main() {\n\tctx := CreateContext()\n\n\tx := []float32{1, 2, 3, 4}\n\tbuffer := ctx.CreateBuffer(x)\n\n\tfmt.Println(buffer)\n}\n<|endoftext|>"} {"text":"<commit_before>package admin\n\nimport (\n\t\"github.com\/qor\/qor\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\n\t\"text\/template\"\n)\n\nvar layouts = map[string]*template.Template{}\nvar templates = map[string]*template.Template{}\nvar tmplSuffix = regexp.MustCompile(`(\\.tmpl)?$`)\nvar viewDirs = []string{}\n\nfunc isExistingDir(pth string) bool {\n\tfi, err := os.Stat(pth)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn fi.Mode().IsDir()\n}\n\nfunc init() {\n\tif root := os.Getenv(\"WEB_ROOT\"); root != \"\" {\n\t\tif dir := path.Join(root, \"templates\/qor\"); isExistingDir(dir) {\n\t\t\tviewDirs = append(viewDirs, dir)\n\t\t}\n\t}\n\n\tif dir, err := filepath.Abs(\"templates\/qor\"); err == nil && isExistingDir(dir) {\n\t\tviewDirs = append(viewDirs, dir)\n\t}\n\n\tif dir := path.Join(os.Getenv(\"GOROOT\"), \"site\/src\/github.com\/qor\/qor\/admin\/templates\"); isExistingDir(dir) {\n\t\tviewDirs = append(viewDirs, dir)\n\t}\n}\n\nfunc (admin *Admin) Render(str string, context *qor.Context) {\n\tvar tmpl *template.Template\n\n\tstr = tmplSuffix.ReplaceAllString(str, \".tmpl\")\n\tpathWithResourceName := path.Join(context.ResourceName, str)\n\n\tvar paths []string\n\tfor _, value := range []string{pathWithResourceName, str} {\n\t\tif root != \"\" {\n\t\t\tpaths = append(paths, filepath.Join(root, value))\n\t\t}\n\t\tif p, e := filepath.Abs(value); e == nil {\n\t\t\tpaths = append(paths, p)\n\t\t}\n\t\tpaths = append(paths, filepath.Join(goroot, \"site\/src\/github.com\/qor\/qor\/admin\", value))\n\t}\n\n\tfor _, value := range paths {\n\t\t_, err := os.Stat(value)\n\t\tif !os.IsNotExist(err) {\n\t\t\tt, _ := template.ParseFiles(value)\n\t\t\ttemplates[value] = t\n\t\t\ttmpl = t\n\t\t\tbreak\n\t\t}\n\t}\n\tif tmpl != nil {\n\t\ttmpl.Execute(context.Writer, context)\n\t}\n}\n<commit_msg>update admin template<commit_after>package admin\n\nimport (\n\t\"github.com\/qor\/qor\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\n\t\"text\/template\"\n)\n\nvar layouts = map[string]*template.Template{}\nvar templates = map[string]*template.Template{}\nvar tmplSuffix = regexp.MustCompile(`(\\.tmpl)?$`)\nvar viewDirs = []string{}\n\nfunc isExistingDir(pth string) bool {\n\tfi, err := os.Stat(pth)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn fi.Mode().IsDir()\n}\n\nfunc init() {\n\tif root := os.Getenv(\"WEB_ROOT\"); root != \"\" {\n\t\tif dir := path.Join(root, \"templates\/qor\"); isExistingDir(dir) {\n\t\t\tviewDirs = append(viewDirs, dir)\n\t\t}\n\t}\n\n\tif dir, err := filepath.Abs(\"templates\/qor\"); err == nil && isExistingDir(dir) {\n\t\tviewDirs = append(viewDirs, dir)\n\t}\n\n\tif dir := path.Join(os.Getenv(\"GOROOT\"), \"site\/src\/github.com\/qor\/qor\/admin\/templates\"); isExistingDir(dir) {\n\t\tviewDirs = append(viewDirs, dir)\n\t}\n}\n\nfunc (admin *Admin) Render(str string, context *qor.Context) {\n\tvar tmpl *template.Template\n\n\tcacheKey := path.Join(context.ResourceName, str)\n\tif t, ok := templates[cacheKey]; !ok {\n\t\tstr = tmplSuffix.ReplaceAllString(str, \".tmpl\")\n\n\t\ttmpl = template.New(\"template\")\n\n\t\t\/\/ parse layout\n\t\tpaths := []string{}\n\t\tfor _, p := range []string{path.Join(\"resources\", context.ResourceName), path.Join(\"themes\", \"default\")} {\n\t\t\tfor _, d := range viewDirs {\n\t\t\t\tif isExistingDir(path.Join(d, p)) {\n\t\t\t\t\tpaths = append(paths, path.Join(d, p))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, f := range []string{\"layout.tmpl\", str} {\n\t\t\tfor _, p := range paths {\n\t\t\t\tif _, err := os.Stat(path.Join(p, f)); !os.IsNotExist(err) {\n\t\t\t\t\ttmpl, err = tmpl.ParseFiles(path.Join(p, f))\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, name := range []string{\"header\", \"footer\"} {\n\t\t\tif tmpl.Lookup(name) == nil {\n\t\t\t\tfor _, p := range paths {\n\t\t\t\t\tif _, err := os.Stat(path.Join(p, name+\".tmpl\")); !os.IsNotExist(err) {\n\t\t\t\t\t\ttmpl, err = tmpl.ParseFiles(path.Join(p, name+\".tmpl\"))\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\ttemplates[cacheKey] = tmpl\n\t} else {\n\t\ttmpl = t\n\t}\n\n\tif tmpl != nil {\n\t\ttmpl.Execute(context.Writer, context)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package mode\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/elastic\/beats\/libbeat\/common\"\n\t\"github.com\/elastic\/beats\/libbeat\/logp\"\n\t\"github.com\/elastic\/beats\/libbeat\/outputs\"\n)\n\n\/\/ LoadBalancerMode balances the sending of events between multiple connections.\n\/\/\n\/\/ The balancing algorithm is mostly pull-based, with multiple workers trying to pull\n\/\/ some amount of work from a shared queue. Workers will try to get a new work item\n\/\/ only if they have a working\/active connection. Workers without active connection\n\/\/ do not participate until a connection has been re-established.\n\/\/ Due to the pull based nature the algorithm will load-balance events by random\n\/\/ with workers having less latencies\/turn-around times potentially getting more\n\/\/ work items then other workers with higher latencies. Thusly the algorithm\n\/\/ dynamically adapts to resource availability of server events are forwarded to.\n\/\/\n\/\/ Workers not participating in the load-balancing will continuously try to reconnect\n\/\/ to their configured endpoints. Once a new connection has been established,\n\/\/ these workers will participate in in load-balancing again.\n\/\/\n\/\/ If a connection becomes unavailable, the events are rescheduled for another\n\/\/ connection to pick up. Rescheduling events is limited to a maximum number of\n\/\/ send attempts. If events have not been send after maximum number of allowed\n\/\/ attemps has been passed, they will be dropped.\n\/\/\n\/\/ Distributing events to workers is subject to timeout. If no worker is available to\n\/\/ pickup a message for sending, the message will be dropped internally.\ntype LoadBalancerMode struct {\n\ttimeout time.Duration \/\/ Send\/retry timeout. Every timeout is a failed send attempt\n\twaitRetry time.Duration \/\/ Duration to wait during re-connection attempts.\n\tmaxWaitRetry time.Duration \/\/ Maximum send\/retry timeout in backoff case.\n\n\t\/\/ maximum number of configured send attempts. If set to 0, publisher will\n\t\/\/ block until event has been successfully published.\n\tmaxAttempts int\n\n\t\/\/ waitGroup + signaling channel for handling shutdown\n\twg sync.WaitGroup\n\tdone chan struct{}\n\n\t\/\/ channels for forwarding work items to workers.\n\t\/\/ The work channel is used by publisher to insert new events\n\t\/\/ into the load balancer. The work channel is synchronous blocking until timeout\n\t\/\/ for one worker available.\n\t\/\/ The retries channel is used to forward failed send attempts to other workers.\n\t\/\/ The retries channel is buffered to mitigate possible deadlocks when all\n\t\/\/ workers become unresponsive.\n\twork chan eventsMessage\n\tretries chan eventsMessage\n}\n\ntype eventsMessage struct {\n\tattemptsLeft int\n\tsignaler outputs.Signaler\n\tevents []common.MapStr\n\tevent common.MapStr\n}\n\n\/\/ NewLoadBalancerMode create a new load balancer connection mode.\nfunc NewLoadBalancerMode(\n\tclients []ProtocolClient,\n\tmaxAttempts int,\n\twaitRetry, timeout, maxWaitRetry time.Duration,\n) (*LoadBalancerMode, error) {\n\n\t\/\/ maxAttempts signals infinite retry. Convert to -1, so attempts left and\n\t\/\/ and infinite retry can be more easily distinguished by load balancer\n\tif maxAttempts == 0 {\n\t\tmaxAttempts = -1\n\t}\n\n\tm := &LoadBalancerMode{\n\t\ttimeout: timeout,\n\t\tmaxWaitRetry: maxWaitRetry,\n\t\twaitRetry: waitRetry,\n\t\tmaxAttempts: maxAttempts,\n\n\t\twork: make(chan eventsMessage),\n\t\tretries: make(chan eventsMessage, len(clients)*2),\n\t\tdone: make(chan struct{}),\n\t}\n\tm.start(clients)\n\n\treturn m, nil\n}\n\n\/\/ Close stops all workers and closes all open connections. In flight events\n\/\/ are signaled as failed.\nfunc (m *LoadBalancerMode) Close() error {\n\tclose(m.done)\n\tm.wg.Wait()\n\treturn nil\n}\n\n\/\/ PublishEvents forwards events to some load balancing worker.\nfunc (m *LoadBalancerMode) PublishEvents(\n\tsignaler outputs.Signaler,\n\topts outputs.Options,\n\tevents []common.MapStr,\n) error {\n\treturn m.publishEventsMessage(opts,\n\t\teventsMessage{signaler: signaler, events: events})\n}\n\n\/\/ PublishEvent forwards the event to some load balancing worker.\nfunc (m *LoadBalancerMode) PublishEvent(\n\tsignaler outputs.Signaler,\n\topts outputs.Options,\n\tevent common.MapStr,\n) error {\n\treturn m.publishEventsMessage(opts,\n\t\teventsMessage{signaler: signaler, event: event})\n}\n\nfunc (m *LoadBalancerMode) publishEventsMessage(\n\topts outputs.Options,\n\tmsg eventsMessage,\n) error {\n\tmaxAttempts := m.maxAttempts\n\tif opts.Guaranteed {\n\t\tmaxAttempts = -1\n\t}\n\tmsg.attemptsLeft = maxAttempts\n\n\tif ok := m.forwardEvent(m.work, msg); !ok {\n\t\tdropping(msg)\n\t}\n\treturn nil\n}\n\nfunc (m *LoadBalancerMode) start(clients []ProtocolClient) {\n\tvar waitStart sync.WaitGroup\n\tworker := func(client ProtocolClient) {\n\t\tdefer func() {\n\t\t\tif client.IsConnected() {\n\t\t\t\t_ = client.Close()\n\t\t\t}\n\t\t\tm.wg.Done()\n\t\t}()\n\n\t\twaitStart.Done()\n\t\tfor {\n\t\t\t\/\/ reconnect loop\n\t\t\tfor !client.IsConnected() {\n\t\t\t\tif err := client.Connect(m.timeout); err == nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tselect {\n\t\t\t\tcase <-m.done:\n\t\t\t\t\treturn\n\t\t\t\tcase <-time.After(m.waitRetry):\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ receive and process messages\n\t\t\tvar msg eventsMessage\n\t\t\tselect {\n\t\t\tcase <-m.done:\n\t\t\t\treturn\n\t\t\tcase msg = <-m.retries: \/\/ receive message from other failed worker\n\t\t\tcase msg = <-m.work: \/\/ receive message from publisher\n\t\t\t}\n\t\t\tm.onMessage(client, msg)\n\t\t}\n\t}\n\n\tfor _, client := range clients {\n\t\tm.wg.Add(1)\n\t\twaitStart.Add(1)\n\t\tgo worker(client)\n\t}\n\twaitStart.Wait()\n}\n\nfunc (m *LoadBalancerMode) onMessage(client ProtocolClient, msg eventsMessage) {\n\tif msg.event != nil {\n\t\tif err := client.PublishEvent(msg.event); err != nil {\n\t\t\tif msg.attemptsLeft > 0 {\n\t\t\t\tmsg.attemptsLeft--\n\t\t\t}\n\t\t\tm.onFail(msg, err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tevents := msg.events\n\t\ttotal := len(events)\n\t\tvar backoffCount uint\n\n\t\tfor len(events) > 0 {\n\t\t\tvar err error\n\n\t\t\tevents, err = client.PublishEvents(events)\n\t\t\tif err != nil {\n\t\t\t\tif msg.attemptsLeft > 0 {\n\t\t\t\t\tmsg.attemptsLeft--\n\t\t\t\t}\n\n\t\t\t\t\/\/ reset attempt count if subset of messages has been processed\n\t\t\t\tif len(events) < total && msg.attemptsLeft >= 0 {\n\t\t\t\t\tdebug(\"reset fails\")\n\t\t\t\t\tmsg.attemptsLeft = m.maxAttempts\n\t\t\t\t}\n\n\t\t\t\tif err != ErrTempBulkFailure {\n\t\t\t\t\t\/\/ retry non-published subset of events in batch\n\t\t\t\t\tmsg.events = events\n\t\t\t\t\tm.onFail(msg, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif m.maxAttempts > 0 && msg.attemptsLeft == 0 {\n\t\t\t\t\t\/\/ no more attempts left => drop\n\t\t\t\t\tdropping(msg)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ wait before retry\n\t\t\t\tbackoff := time.Duration(int64(m.waitRetry) * (1 << backoffCount))\n\t\t\t\tif backoff > m.maxWaitRetry {\n\t\t\t\t\tbackoff = m.maxWaitRetry\n\t\t\t\t} else {\n\t\t\t\t\tbackoffCount++\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase <-m.done: \/\/ shutdown\n\t\t\t\t\toutputs.SignalFailed(msg.signaler, err)\n\t\t\t\t\treturn\n\t\t\t\tcase <-time.After(backoff):\n\t\t\t\t}\n\n\t\t\t\t\/\/ reset total count for temporary failure loop\n\t\t\t\ttotal = len(events)\n\t\t\t}\n\t\t}\n\t}\n\toutputs.SignalCompleted(msg.signaler)\n}\n\nfunc (m *LoadBalancerMode) onFail(msg eventsMessage, err error) {\n\n\tlogp.Info(\"Error publishing events (retrying): %s\", err)\n\n\tif ok := m.forwardEvent(m.retries, msg); !ok {\n\t\tdropping(msg)\n\t}\n}\n\nfunc (m *LoadBalancerMode) forwardEvent(\n\tch chan eventsMessage,\n\tmsg eventsMessage,\n) bool {\n\tif msg.attemptsLeft < 0 {\n\t\tselect {\n\t\tcase ch <- msg:\n\t\t\treturn true\n\t\tcase <-m.done: \/\/ shutdown\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\tfor ; msg.attemptsLeft > 0; msg.attemptsLeft-- {\n\t\t\tselect {\n\t\t\tcase ch <- msg:\n\t\t\t\treturn true\n\t\t\tcase <-m.done: \/\/ shutdown\n\t\t\t\treturn false\n\t\t\tcase <-time.After(m.timeout):\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ dropping is called when a message is dropped. It updates the\n\/\/ relevant counters and sends a failed signal.\nfunc dropping(msg eventsMessage) {\n\tdebug(\"messages dropped\")\n\tmessagesDropped.Add(1)\n\toutputs.SignalFailed(msg.signaler, nil)\n}\n<commit_msg>Simplify balance mode worker loop<commit_after>package mode\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/elastic\/beats\/libbeat\/common\"\n\t\"github.com\/elastic\/beats\/libbeat\/logp\"\n\t\"github.com\/elastic\/beats\/libbeat\/outputs\"\n)\n\n\/\/ LoadBalancerMode balances the sending of events between multiple connections.\n\/\/\n\/\/ The balancing algorithm is mostly pull-based, with multiple workers trying to pull\n\/\/ some amount of work from a shared queue. Workers will try to get a new work item\n\/\/ only if they have a working\/active connection. Workers without active connection\n\/\/ do not participate until a connection has been re-established.\n\/\/ Due to the pull based nature the algorithm will load-balance events by random\n\/\/ with workers having less latencies\/turn-around times potentially getting more\n\/\/ work items then other workers with higher latencies. Thusly the algorithm\n\/\/ dynamically adapts to resource availability of server events are forwarded to.\n\/\/\n\/\/ Workers not participating in the load-balancing will continuously try to reconnect\n\/\/ to their configured endpoints. Once a new connection has been established,\n\/\/ these workers will participate in in load-balancing again.\n\/\/\n\/\/ If a connection becomes unavailable, the events are rescheduled for another\n\/\/ connection to pick up. Rescheduling events is limited to a maximum number of\n\/\/ send attempts. If events have not been send after maximum number of allowed\n\/\/ attemps has been passed, they will be dropped.\n\/\/\n\/\/ Distributing events to workers is subject to timeout. If no worker is available to\n\/\/ pickup a message for sending, the message will be dropped internally.\ntype LoadBalancerMode struct {\n\ttimeout time.Duration \/\/ Send\/retry timeout. Every timeout is a failed send attempt\n\twaitRetry time.Duration \/\/ Duration to wait during re-connection attempts.\n\tmaxWaitRetry time.Duration \/\/ Maximum send\/retry timeout in backoff case.\n\n\t\/\/ maximum number of configured send attempts. If set to 0, publisher will\n\t\/\/ block until event has been successfully published.\n\tmaxAttempts int\n\n\t\/\/ waitGroup + signaling channel for handling shutdown\n\twg sync.WaitGroup\n\tdone chan struct{}\n\n\t\/\/ channels for forwarding work items to workers.\n\t\/\/ The work channel is used by publisher to insert new events\n\t\/\/ into the load balancer. The work channel is synchronous blocking until timeout\n\t\/\/ for one worker available.\n\t\/\/ The retries channel is used to forward failed send attempts to other workers.\n\t\/\/ The retries channel is buffered to mitigate possible deadlocks when all\n\t\/\/ workers become unresponsive.\n\twork chan eventsMessage\n\tretries chan eventsMessage\n}\n\ntype eventsMessage struct {\n\tattemptsLeft int\n\tsignaler outputs.Signaler\n\tevents []common.MapStr\n\tevent common.MapStr\n}\n\n\/\/ NewLoadBalancerMode create a new load balancer connection mode.\nfunc NewLoadBalancerMode(\n\tclients []ProtocolClient,\n\tmaxAttempts int,\n\twaitRetry, timeout, maxWaitRetry time.Duration,\n) (*LoadBalancerMode, error) {\n\n\t\/\/ maxAttempts signals infinite retry. Convert to -1, so attempts left and\n\t\/\/ and infinite retry can be more easily distinguished by load balancer\n\tif maxAttempts == 0 {\n\t\tmaxAttempts = -1\n\t}\n\n\tm := &LoadBalancerMode{\n\t\ttimeout: timeout,\n\t\tmaxWaitRetry: maxWaitRetry,\n\t\twaitRetry: waitRetry,\n\t\tmaxAttempts: maxAttempts,\n\n\t\twork: make(chan eventsMessage),\n\t\tretries: make(chan eventsMessage, len(clients)*2),\n\t\tdone: make(chan struct{}),\n\t}\n\tm.start(clients)\n\n\treturn m, nil\n}\n\n\/\/ Close stops all workers and closes all open connections. In flight events\n\/\/ are signaled as failed.\nfunc (m *LoadBalancerMode) Close() error {\n\tclose(m.done)\n\tm.wg.Wait()\n\treturn nil\n}\n\n\/\/ PublishEvents forwards events to some load balancing worker.\nfunc (m *LoadBalancerMode) PublishEvents(\n\tsignaler outputs.Signaler,\n\topts outputs.Options,\n\tevents []common.MapStr,\n) error {\n\treturn m.publishEventsMessage(opts,\n\t\teventsMessage{signaler: signaler, events: events})\n}\n\n\/\/ PublishEvent forwards the event to some load balancing worker.\nfunc (m *LoadBalancerMode) PublishEvent(\n\tsignaler outputs.Signaler,\n\topts outputs.Options,\n\tevent common.MapStr,\n) error {\n\treturn m.publishEventsMessage(opts,\n\t\teventsMessage{signaler: signaler, event: event})\n}\n\nfunc (m *LoadBalancerMode) publishEventsMessage(\n\topts outputs.Options,\n\tmsg eventsMessage,\n) error {\n\tmaxAttempts := m.maxAttempts\n\tif opts.Guaranteed {\n\t\tmaxAttempts = -1\n\t}\n\tmsg.attemptsLeft = maxAttempts\n\n\tif ok := m.forwardEvent(m.work, msg); !ok {\n\t\tdropping(msg)\n\t}\n\treturn nil\n}\n\nfunc (m *LoadBalancerMode) start(clients []ProtocolClient) {\n\tvar waitStart sync.WaitGroup\n\tworker := func(client ProtocolClient) {\n\t\tdefer func() {\n\t\t\tif client.IsConnected() {\n\t\t\t\t_ = client.Close()\n\t\t\t}\n\t\t\tm.wg.Done()\n\t\t}()\n\n\t\twaitStart.Done()\n\t\tm.clientLoop(client)\n\t}\n\n\tfor _, client := range clients {\n\t\tm.wg.Add(1)\n\t\twaitStart.Add(1)\n\t\tgo worker(client)\n\t}\n\twaitStart.Wait()\n}\n\nfunc (m *LoadBalancerMode) clientLoop(client ProtocolClient) {\n\tdebug(\"load balancer: start client loop\")\n\tdefer debug(\"load balancer: stop client loop\")\n\n\tbackoff := newBackoff(m.done, m.waitRetry, m.maxWaitRetry)\n\n\tdone := false\n\tfor !done {\n\t\tif done = m.connect(client, backoff); !done {\n\t\t\tdone = m.sendLoop(client, backoff)\n\t\t}\n\t\tdebug(\"close client\")\n\t\tclient.Close()\n\t}\n}\n\nfunc (m *LoadBalancerMode) connect(client ProtocolClient, backoff *backoff) bool {\n\tfor {\n\t\tdebug(\"try to (re-)connect client\")\n\t\terr := client.Connect(m.timeout)\n\t\tif !backoff.WithError(err) {\n\t\t\treturn true\n\t\t}\n\n\t\tif err == nil {\n\t\t\treturn false\n\t\t}\n\t}\n}\n\nfunc (m *LoadBalancerMode) sendLoop(client ProtocolClient, backoff *backoff) bool {\n\tfor {\n\t\tvar msg eventsMessage\n\t\tselect {\n\t\tcase <-m.done:\n\t\t\treturn true\n\t\tcase msg = <-m.retries: \/\/ receive message from other failed worker\n\t\tcase msg = <-m.work: \/\/ receive message from publisher\n\t\t}\n\n\t\tdone, err := m.onMessage(backoff, client, msg)\n\t\tif done || err != nil {\n\t\t\treturn done\n\t\t}\n\t}\n}\n\nfunc (m *LoadBalancerMode) onMessage(\n\tbackoff *backoff,\n\tclient ProtocolClient,\n\tmsg eventsMessage,\n) (bool, error) {\n\n\tdone := false\n\tif msg.event != nil {\n\t\terr := client.PublishEvent(msg.event)\n\t\tdone = !backoff.WithError(err)\n\t\tif err != nil {\n\t\t\tif msg.attemptsLeft > 0 {\n\t\t\t\tmsg.attemptsLeft--\n\t\t\t}\n\t\t\tm.onFail(msg, err)\n\t\t\treturn done, err\n\t\t}\n\t} else {\n\t\tevents := msg.events\n\t\ttotal := len(events)\n\n\t\tfor len(events) > 0 {\n\t\t\tvar err error\n\n\t\t\tevents, err = client.PublishEvents(events)\n\t\t\tdone = !backoff.WithError(err)\n\t\t\tif done && err != nil {\n\t\t\t\toutputs.SignalFailed(msg.signaler, err)\n\t\t\t\treturn done, err\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tif msg.attemptsLeft > 0 {\n\t\t\t\t\tmsg.attemptsLeft--\n\t\t\t\t}\n\n\t\t\t\t\/\/ reset attempt count if subset of messages has been processed\n\t\t\t\tif len(events) < total && msg.attemptsLeft >= 0 {\n\t\t\t\t\tdebug(\"reset fails\")\n\t\t\t\t\tmsg.attemptsLeft = m.maxAttempts\n\t\t\t\t}\n\n\t\t\t\tif err != ErrTempBulkFailure {\n\t\t\t\t\t\/\/ retry non-published subset of events in batch\n\t\t\t\t\tmsg.events = events\n\t\t\t\t\tm.onFail(msg, err)\n\t\t\t\t\treturn done, err\n\t\t\t\t}\n\n\t\t\t\tif m.maxAttempts > 0 && msg.attemptsLeft == 0 {\n\t\t\t\t\t\/\/ no more attempts left => drop\n\t\t\t\t\tdropping(msg)\n\t\t\t\t\treturn done, err\n\t\t\t\t}\n\n\t\t\t\t\/\/ reset total count for temporary failure loop\n\t\t\t\ttotal = len(events)\n\t\t\t}\n\t\t}\n\t}\n\n\toutputs.SignalCompleted(msg.signaler)\n\treturn done, nil\n}\n\nfunc (m *LoadBalancerMode) onFail(msg eventsMessage, err error) {\n\n\tlogp.Info(\"Error publishing events (retrying): %s\", err)\n\n\tif !m.forwardEvent(m.retries, msg) {\n\t\tdropping(msg)\n\t}\n}\n\nfunc (m *LoadBalancerMode) forwardEvent(\n\tch chan eventsMessage,\n\tmsg eventsMessage,\n) bool {\n\tif msg.attemptsLeft < 0 {\n\t\tselect {\n\t\tcase ch <- msg:\n\t\t\treturn true\n\t\tcase <-m.done: \/\/ shutdown\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\tfor ; msg.attemptsLeft > 0; msg.attemptsLeft-- {\n\t\t\tselect {\n\t\t\tcase ch <- msg:\n\t\t\t\treturn true\n\t\t\tcase <-m.done: \/\/ shutdown\n\t\t\t\treturn false\n\t\t\tcase <-time.After(m.timeout):\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ dropping is called when a message is dropped. It updates the\n\/\/ relevant counters and sends a failed signal.\nfunc dropping(msg eventsMessage) {\n\tdebug(\"messages dropped\")\n\tmessagesDropped.Add(1)\n\toutputs.SignalFailed(msg.signaler, nil)\n}\n<|endoftext|>"} {"text":"<commit_before>package ttransport\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strconv\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\tcrand \"crypto\/rand\"\n\tmrand \"math\/rand\"\n\n\t\"github.com\/libp2p\/go-libp2p-core\/mux\"\n\t\"github.com\/libp2p\/go-libp2p-core\/peer\"\n\t\"github.com\/libp2p\/go-libp2p-core\/transport\"\n\t\"github.com\/libp2p\/go-libp2p-testing\/race\"\n\n\tma \"github.com\/multiformats\/go-multiaddr\"\n)\n\n\/\/ VerboseDebugging can be set to true to enable verbose debug logging in the\n\/\/ stream stress tests.\nvar VerboseDebugging = false\n\nvar randomness []byte\n\nvar StressTestTimeout = 1 * time.Minute\n\nfunc init() {\n\t\/\/ read 1MB of randomness\n\trandomness = make([]byte, 1<<20)\n\tif _, err := crand.Read(randomness); err != nil {\n\t\tpanic(err)\n\t}\n\n\tif timeout := os.Getenv(\"TEST_STRESS_TIMEOUT_MS\"); timeout != \"\" {\n\t\tif v, err := strconv.ParseInt(timeout, 10, 32); err == nil {\n\t\t\tStressTestTimeout = time.Duration(v) * time.Millisecond\n\t\t}\n\t}\n}\n\ntype Options struct {\n\tConnNum int\n\tStreamNum int\n\tMsgNum int\n\tMsgMin int\n\tMsgMax int\n}\n\nfunc fullClose(t *testing.T, s mux.MuxedStream) {\n\tif err := s.CloseWrite(); err != nil {\n\t\tt.Error(err)\n\t\ts.Reset()\n\t\treturn\n\t}\n\tb, err := ioutil.ReadAll(s)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(b) != 0 {\n\t\tt.Error(\"expected to be done reading\")\n\t}\n\tif err := s.Close(); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc randBuf(size int) []byte {\n\tn := len(randomness) - size\n\tif size < 1 {\n\t\tpanic(fmt.Errorf(\"requested too large buffer (%d). max is %d\", size, len(randomness)))\n\t}\n\n\tstart := mrand.Intn(n)\n\treturn randomness[start : start+size]\n}\n\nfunc debugLog(t *testing.T, s string, args ...interface{}) {\n\tif VerboseDebugging {\n\t\tt.Logf(s, args...)\n\t}\n}\n\nfunc echoStream(t *testing.T, s mux.MuxedStream) {\n\t\/\/ echo everything\n\tvar err error\n\tif VerboseDebugging {\n\t\tt.Logf(\"accepted stream\")\n\t\t_, err = io.Copy(&logWriter{t, s}, s)\n\t\tt.Log(\"closing stream\")\n\t} else {\n\t\t_, err = io.Copy(s, s) \/\/ echo everything\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\ntype logWriter struct {\n\tt *testing.T\n\tW io.Writer\n}\n\nfunc (lw *logWriter) Write(buf []byte) (int, error) {\n\tlw.t.Logf(\"logwriter: writing %d bytes\", len(buf))\n\treturn lw.W.Write(buf)\n}\n\nfunc echo(t *testing.T, c transport.CapableConn) {\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\tfor {\n\t\tstr, err := c.AcceptStream()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tdefer str.Close()\n\t\t\techoStream(t, str)\n\t\t}()\n\t}\n}\n\nfunc serve(t *testing.T, l transport.Listener) {\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\n\tfor {\n\t\tc, err := l.Accept()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\twg.Add(1)\n\t\tdebugLog(t, \"accepted connection\")\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tdefer c.Close()\n\t\t\techo(t, c)\n\t\t}()\n\t}\n}\n\nfunc SubtestStress(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID, opt Options) {\n\tmsgsize := 1 << 11\n\n\trateLimitN := 5000 \/\/ max of 5k funcs, because -race has 8k max.\n\trateLimitChan := make(chan struct{}, rateLimitN)\n\tfor i := 0; i < rateLimitN; i++ {\n\t\trateLimitChan <- struct{}{}\n\t}\n\n\trateLimit := func(f func()) {\n\t\t<-rateLimitChan\n\t\tf()\n\t\trateLimitChan <- struct{}{}\n\t}\n\n\twriteStream := func(s mux.MuxedStream, bufs chan<- []byte) {\n\t\tdebugLog(t, \"writeStream %p, %d MsgNum\", s, opt.MsgNum)\n\n\t\tfor i := 0; i < opt.MsgNum; i++ {\n\t\t\tbuf := randBuf(msgsize)\n\t\t\tbufs <- buf\n\t\t\tdebugLog(t, \"%p writing %d bytes (message %d\/%d #%x)\", s, len(buf), i, opt.MsgNum, buf[:3])\n\t\t\tif _, err := s.Write(buf); err != nil {\n\t\t\t\tt.Errorf(\"s.Write(buf): %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\n\treadStream := func(s mux.MuxedStream, bufs <-chan []byte) {\n\t\tdebugLog(t, \"readStream %p, %d MsgNum\", s, opt.MsgNum)\n\n\t\tbuf2 := make([]byte, msgsize)\n\t\ti := 0\n\t\tfor buf1 := range bufs {\n\t\t\ti++\n\t\t\tdebugLog(t, \"%p reading %d bytes (message %d\/%d #%x)\", s, len(buf1), i-1, opt.MsgNum, buf1[:3])\n\n\t\t\tif _, err := io.ReadFull(s, buf2); err != nil {\n\t\t\t\tt.Errorf(\"io.ReadFull(s, buf2): %s\", err)\n\t\t\t\tdebugLog(t, \"%p failed to read %d bytes (message %d\/%d #%x)\", s, len(buf1), i-1, opt.MsgNum, buf1[:3])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !bytes.Equal(buf1, buf2) {\n\t\t\t\tt.Errorf(\"buffers not equal (%x != %x)\", buf1[:3], buf2[:3])\n\t\t\t}\n\t\t}\n\t}\n\n\topenStreamAndRW := func(c mux.MuxedConn) {\n\t\tdebugLog(t, \"openStreamAndRW %p, %d opt.MsgNum\", c, opt.MsgNum)\n\n\t\ts, err := c.OpenStream(context.Background())\n\t\tif err != nil {\n\t\t\tt.Errorf(\"failed to create NewStream: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tbufs := make(chan []byte, opt.MsgNum)\n\t\tgo func() {\n\t\t\twriteStream(s, bufs)\n\t\t\tclose(bufs)\n\t\t}()\n\n\t\treadStream(s, bufs)\n\t\tfullClose(t, s)\n\t}\n\n\topenConnAndRW := func() {\n\t\tdebugLog(t, \"openConnAndRW\")\n\n\t\tvar wg sync.WaitGroup\n\t\tdefer wg.Wait()\n\n\t\tl, err := ta.Listen(maddr)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tdefer l.Close()\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tserve(t, l)\n\t\t}()\n\n\t\tc, err := tb.Dial(context.Background(), l.Multiaddr(), peerA)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ serve the outgoing conn, because some muxers assume\n\t\t\/\/ that we _always_ call serve. (this is an error?)\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tdefer c.Close()\n\t\t\tdebugLog(t, \"serving connection\")\n\t\t\techo(t, c)\n\t\t}()\n\n\t\tvar openWg sync.WaitGroup\n\t\tfor i := 0; i < opt.StreamNum; i++ {\n\t\t\topenWg.Add(1)\n\t\t\tgo rateLimit(func() {\n\t\t\t\tdefer openWg.Done()\n\t\t\t\topenStreamAndRW(c)\n\t\t\t})\n\t\t}\n\t\topenWg.Wait()\n\t}\n\n\tdebugLog(t, \"openConnsAndRW, %d conns\", opt.ConnNum)\n\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\tfor i := 0; i < opt.ConnNum; i++ {\n\t\twg.Add(1)\n\t\tgo rateLimit(func() {\n\t\t\tdefer wg.Done()\n\t\t\topenConnAndRW()\n\t\t})\n\t}\n}\n\nfunc SubtestStreamOpenStress(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {\n\tl, err := ta.Listen(maddr)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer l.Close()\n\n\tcount := 10000\n\tworkers := 5\n\n\tif race.WithRace() {\n\t\t\/\/ the race detector can only deal with 8128 simultaneous goroutines, so let's make sure we don't go overboard.\n\t\tcount = 1000\n\t}\n\n\tvar (\n\t\tconnA, connB transport.CapableConn\n\t)\n\n\taccepted := make(chan error, 1)\n\tgo func() {\n\t\tvar err error\n\t\tconnA, err = l.Accept()\n\t\taccepted <- err\n\t}()\n\tconnB, err = tb.Dial(context.Background(), l.Multiaddr(), peerA)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = <-accepted\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdefer func() {\n\t\tif connA != nil {\n\t\t\tconnA.Close()\n\t\t}\n\t\tif connB != nil {\n\t\t\tconnB.Close()\n\t\t}\n\t}()\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor j := 0; j < workers; j++ {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tfor i := 0; i < count; i++ {\n\t\t\t\t\ts, err := connA.OpenStream(context.Background())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Error(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\twg.Add(1)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\t\tfullClose(t, s)\n\t\t\t\t\t}()\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < count*workers; i++ {\n\t\t\tstr, err := connB.AcceptStream()\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tfullClose(t, str)\n\t\t\t}()\n\t\t}\n\t}()\n\n\ttimeout := time.After(StressTestTimeout)\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-timeout:\n\t\tt.Fatal(\"timed out receiving streams\")\n\tcase <-done:\n\t}\n}\n\nfunc SubtestStreamReset(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\n\tl, err := ta.Listen(maddr)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer l.Close()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tmuxa, err := l.Accept()\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tdefer muxa.Close()\n\n\t\ts, err := muxa.OpenStream(context.Background())\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tdefer s.Close()\n\n\t\t\/\/ Some transports won't open the stream until we write. That's\n\t\t\/\/ fine.\n\t\t_, _ = s.Write([]byte(\"foo\"))\n\n\t\ttime.Sleep(time.Millisecond * 50)\n\n\t\t_, err = s.Write([]byte(\"bar\"))\n\t\tif err == nil {\n\t\t\tt.Error(\"should have failed to write\")\n\t\t}\n\n\t}()\n\n\tmuxb, err := tb.Dial(context.Background(), l.Multiaddr(), peerA)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer muxb.Close()\n\n\tstr, err := muxb.AcceptStream()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tstr.Reset()\n}\n\nfunc SubtestStress1Conn1Stream1Msg(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {\n\tSubtestStress(t, ta, tb, maddr, peerA, Options{\n\t\tConnNum: 1,\n\t\tStreamNum: 1,\n\t\tMsgNum: 1,\n\t\tMsgMax: 100,\n\t\tMsgMin: 100,\n\t})\n}\n\nfunc SubtestStress1Conn1Stream100Msg(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {\n\tSubtestStress(t, ta, tb, maddr, peerA, Options{\n\t\tConnNum: 1,\n\t\tStreamNum: 1,\n\t\tMsgNum: 100,\n\t\tMsgMax: 100,\n\t\tMsgMin: 100,\n\t})\n}\n\nfunc SubtestStress1Conn100Stream100Msg(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {\n\tSubtestStress(t, ta, tb, maddr, peerA, Options{\n\t\tConnNum: 1,\n\t\tStreamNum: 100,\n\t\tMsgNum: 100,\n\t\tMsgMax: 100,\n\t\tMsgMin: 100,\n\t})\n}\n\nfunc SubtestStress50Conn10Stream50Msg(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {\n\tSubtestStress(t, ta, tb, maddr, peerA, Options{\n\t\tConnNum: 50,\n\t\tStreamNum: 10,\n\t\tMsgNum: 50,\n\t\tMsgMax: 100,\n\t\tMsgMin: 100,\n\t})\n}\n\nfunc SubtestStress1Conn1000Stream10Msg(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {\n\tSubtestStress(t, ta, tb, maddr, peerA, Options{\n\t\tConnNum: 1,\n\t\tStreamNum: 1000,\n\t\tMsgNum: 10,\n\t\tMsgMax: 100,\n\t\tMsgMin: 100,\n\t})\n}\n\nfunc SubtestStress1Conn100Stream100Msg10MB(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {\n\tSubtestStress(t, ta, tb, maddr, peerA, Options{\n\t\tConnNum: 1,\n\t\tStreamNum: 100,\n\t\tMsgNum: 100,\n\t\tMsgMax: 10000,\n\t\tMsgMin: 1000,\n\t})\n}\n<commit_msg>fix deadlock in the transport's serve function<commit_after>package ttransport\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strconv\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\tcrand \"crypto\/rand\"\n\tmrand \"math\/rand\"\n\n\t\"github.com\/libp2p\/go-libp2p-core\/mux\"\n\t\"github.com\/libp2p\/go-libp2p-core\/peer\"\n\t\"github.com\/libp2p\/go-libp2p-core\/transport\"\n\t\"github.com\/libp2p\/go-libp2p-testing\/race\"\n\n\tma \"github.com\/multiformats\/go-multiaddr\"\n)\n\n\/\/ VerboseDebugging can be set to true to enable verbose debug logging in the\n\/\/ stream stress tests.\nvar VerboseDebugging = false\n\nvar randomness []byte\n\nvar StressTestTimeout = 1 * time.Minute\n\nfunc init() {\n\t\/\/ read 1MB of randomness\n\trandomness = make([]byte, 1<<20)\n\tif _, err := crand.Read(randomness); err != nil {\n\t\tpanic(err)\n\t}\n\n\tif timeout := os.Getenv(\"TEST_STRESS_TIMEOUT_MS\"); timeout != \"\" {\n\t\tif v, err := strconv.ParseInt(timeout, 10, 32); err == nil {\n\t\t\tStressTestTimeout = time.Duration(v) * time.Millisecond\n\t\t}\n\t}\n}\n\ntype Options struct {\n\tConnNum int\n\tStreamNum int\n\tMsgNum int\n\tMsgMin int\n\tMsgMax int\n}\n\nfunc fullClose(t *testing.T, s mux.MuxedStream) {\n\tif err := s.CloseWrite(); err != nil {\n\t\tt.Error(err)\n\t\ts.Reset()\n\t\treturn\n\t}\n\tb, err := ioutil.ReadAll(s)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(b) != 0 {\n\t\tt.Error(\"expected to be done reading\")\n\t}\n\tif err := s.Close(); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc randBuf(size int) []byte {\n\tn := len(randomness) - size\n\tif size < 1 {\n\t\tpanic(fmt.Errorf(\"requested too large buffer (%d). max is %d\", size, len(randomness)))\n\t}\n\n\tstart := mrand.Intn(n)\n\treturn randomness[start : start+size]\n}\n\nfunc debugLog(t *testing.T, s string, args ...interface{}) {\n\tif VerboseDebugging {\n\t\tt.Logf(s, args...)\n\t}\n}\n\nfunc echoStream(t *testing.T, s mux.MuxedStream) {\n\t\/\/ echo everything\n\tvar err error\n\tif VerboseDebugging {\n\t\tt.Logf(\"accepted stream\")\n\t\t_, err = io.Copy(&logWriter{t, s}, s)\n\t\tt.Log(\"closing stream\")\n\t} else {\n\t\t_, err = io.Copy(s, s) \/\/ echo everything\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\ntype logWriter struct {\n\tt *testing.T\n\tW io.Writer\n}\n\nfunc (lw *logWriter) Write(buf []byte) (int, error) {\n\tlw.t.Logf(\"logwriter: writing %d bytes\", len(buf))\n\treturn lw.W.Write(buf)\n}\n\nfunc echo(t *testing.T, c transport.CapableConn) {\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\tfor {\n\t\tstr, err := c.AcceptStream()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tdefer str.Close()\n\t\t\techoStream(t, str)\n\t\t}()\n\t}\n}\n\nfunc serve(t *testing.T, l transport.Listener) {\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\n\tfor {\n\t\tc, err := l.Accept()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer c.Close()\n\n\t\twg.Add(1)\n\t\tdebugLog(t, \"accepted connection\")\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\techo(t, c)\n\t\t}()\n\t}\n}\n\nfunc SubtestStress(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID, opt Options) {\n\tmsgsize := 1 << 11\n\n\trateLimitN := 5000 \/\/ max of 5k funcs, because -race has 8k max.\n\trateLimitChan := make(chan struct{}, rateLimitN)\n\tfor i := 0; i < rateLimitN; i++ {\n\t\trateLimitChan <- struct{}{}\n\t}\n\n\trateLimit := func(f func()) {\n\t\t<-rateLimitChan\n\t\tf()\n\t\trateLimitChan <- struct{}{}\n\t}\n\n\twriteStream := func(s mux.MuxedStream, bufs chan<- []byte) {\n\t\tdebugLog(t, \"writeStream %p, %d MsgNum\", s, opt.MsgNum)\n\n\t\tfor i := 0; i < opt.MsgNum; i++ {\n\t\t\tbuf := randBuf(msgsize)\n\t\t\tbufs <- buf\n\t\t\tdebugLog(t, \"%p writing %d bytes (message %d\/%d #%x)\", s, len(buf), i, opt.MsgNum, buf[:3])\n\t\t\tif _, err := s.Write(buf); err != nil {\n\t\t\t\tt.Errorf(\"s.Write(buf): %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\n\treadStream := func(s mux.MuxedStream, bufs <-chan []byte) {\n\t\tdebugLog(t, \"readStream %p, %d MsgNum\", s, opt.MsgNum)\n\n\t\tbuf2 := make([]byte, msgsize)\n\t\ti := 0\n\t\tfor buf1 := range bufs {\n\t\t\ti++\n\t\t\tdebugLog(t, \"%p reading %d bytes (message %d\/%d #%x)\", s, len(buf1), i-1, opt.MsgNum, buf1[:3])\n\n\t\t\tif _, err := io.ReadFull(s, buf2); err != nil {\n\t\t\t\tt.Errorf(\"io.ReadFull(s, buf2): %s\", err)\n\t\t\t\tdebugLog(t, \"%p failed to read %d bytes (message %d\/%d #%x)\", s, len(buf1), i-1, opt.MsgNum, buf1[:3])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !bytes.Equal(buf1, buf2) {\n\t\t\t\tt.Errorf(\"buffers not equal (%x != %x)\", buf1[:3], buf2[:3])\n\t\t\t}\n\t\t}\n\t}\n\n\topenStreamAndRW := func(c mux.MuxedConn) {\n\t\tdebugLog(t, \"openStreamAndRW %p, %d opt.MsgNum\", c, opt.MsgNum)\n\n\t\ts, err := c.OpenStream(context.Background())\n\t\tif err != nil {\n\t\t\tt.Errorf(\"failed to create NewStream: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tbufs := make(chan []byte, opt.MsgNum)\n\t\tgo func() {\n\t\t\twriteStream(s, bufs)\n\t\t\tclose(bufs)\n\t\t}()\n\n\t\treadStream(s, bufs)\n\t\tfullClose(t, s)\n\t}\n\n\topenConnAndRW := func() {\n\t\tdebugLog(t, \"openConnAndRW\")\n\n\t\tvar wg sync.WaitGroup\n\t\tdefer wg.Wait()\n\n\t\tl, err := ta.Listen(maddr)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tdefer l.Close()\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tserve(t, l)\n\t\t}()\n\n\t\tc, err := tb.Dial(context.Background(), l.Multiaddr(), peerA)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ serve the outgoing conn, because some muxers assume\n\t\t\/\/ that we _always_ call serve. (this is an error?)\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tdefer c.Close()\n\t\t\tdebugLog(t, \"serving connection\")\n\t\t\techo(t, c)\n\t\t}()\n\n\t\tvar openWg sync.WaitGroup\n\t\tfor i := 0; i < opt.StreamNum; i++ {\n\t\t\topenWg.Add(1)\n\t\t\tgo rateLimit(func() {\n\t\t\t\tdefer openWg.Done()\n\t\t\t\topenStreamAndRW(c)\n\t\t\t})\n\t\t}\n\t\topenWg.Wait()\n\t}\n\n\tdebugLog(t, \"openConnsAndRW, %d conns\", opt.ConnNum)\n\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\tfor i := 0; i < opt.ConnNum; i++ {\n\t\twg.Add(1)\n\t\tgo rateLimit(func() {\n\t\t\tdefer wg.Done()\n\t\t\topenConnAndRW()\n\t\t})\n\t}\n}\n\nfunc SubtestStreamOpenStress(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {\n\tl, err := ta.Listen(maddr)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer l.Close()\n\n\tcount := 10000\n\tworkers := 5\n\n\tif race.WithRace() {\n\t\t\/\/ the race detector can only deal with 8128 simultaneous goroutines, so let's make sure we don't go overboard.\n\t\tcount = 1000\n\t}\n\n\tvar (\n\t\tconnA, connB transport.CapableConn\n\t)\n\n\taccepted := make(chan error, 1)\n\tgo func() {\n\t\tvar err error\n\t\tconnA, err = l.Accept()\n\t\taccepted <- err\n\t}()\n\tconnB, err = tb.Dial(context.Background(), l.Multiaddr(), peerA)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = <-accepted\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdefer func() {\n\t\tif connA != nil {\n\t\t\tconnA.Close()\n\t\t}\n\t\tif connB != nil {\n\t\t\tconnB.Close()\n\t\t}\n\t}()\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor j := 0; j < workers; j++ {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tfor i := 0; i < count; i++ {\n\t\t\t\t\ts, err := connA.OpenStream(context.Background())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Error(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\twg.Add(1)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\t\tfullClose(t, s)\n\t\t\t\t\t}()\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < count*workers; i++ {\n\t\t\tstr, err := connB.AcceptStream()\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tfullClose(t, str)\n\t\t\t}()\n\t\t}\n\t}()\n\n\ttimeout := time.After(StressTestTimeout)\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-timeout:\n\t\tt.Fatal(\"timed out receiving streams\")\n\tcase <-done:\n\t}\n}\n\nfunc SubtestStreamReset(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\n\tl, err := ta.Listen(maddr)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer l.Close()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tmuxa, err := l.Accept()\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tdefer muxa.Close()\n\n\t\ts, err := muxa.OpenStream(context.Background())\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tdefer s.Close()\n\n\t\t\/\/ Some transports won't open the stream until we write. That's\n\t\t\/\/ fine.\n\t\t_, _ = s.Write([]byte(\"foo\"))\n\n\t\ttime.Sleep(time.Millisecond * 50)\n\n\t\t_, err = s.Write([]byte(\"bar\"))\n\t\tif err == nil {\n\t\t\tt.Error(\"should have failed to write\")\n\t\t}\n\n\t}()\n\n\tmuxb, err := tb.Dial(context.Background(), l.Multiaddr(), peerA)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer muxb.Close()\n\n\tstr, err := muxb.AcceptStream()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tstr.Reset()\n}\n\nfunc SubtestStress1Conn1Stream1Msg(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {\n\tSubtestStress(t, ta, tb, maddr, peerA, Options{\n\t\tConnNum: 1,\n\t\tStreamNum: 1,\n\t\tMsgNum: 1,\n\t\tMsgMax: 100,\n\t\tMsgMin: 100,\n\t})\n}\n\nfunc SubtestStress1Conn1Stream100Msg(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {\n\tSubtestStress(t, ta, tb, maddr, peerA, Options{\n\t\tConnNum: 1,\n\t\tStreamNum: 1,\n\t\tMsgNum: 100,\n\t\tMsgMax: 100,\n\t\tMsgMin: 100,\n\t})\n}\n\nfunc SubtestStress1Conn100Stream100Msg(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {\n\tSubtestStress(t, ta, tb, maddr, peerA, Options{\n\t\tConnNum: 1,\n\t\tStreamNum: 100,\n\t\tMsgNum: 100,\n\t\tMsgMax: 100,\n\t\tMsgMin: 100,\n\t})\n}\n\nfunc SubtestStress50Conn10Stream50Msg(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {\n\tSubtestStress(t, ta, tb, maddr, peerA, Options{\n\t\tConnNum: 50,\n\t\tStreamNum: 10,\n\t\tMsgNum: 50,\n\t\tMsgMax: 100,\n\t\tMsgMin: 100,\n\t})\n}\n\nfunc SubtestStress1Conn1000Stream10Msg(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {\n\tSubtestStress(t, ta, tb, maddr, peerA, Options{\n\t\tConnNum: 1,\n\t\tStreamNum: 1000,\n\t\tMsgNum: 10,\n\t\tMsgMax: 100,\n\t\tMsgMin: 100,\n\t})\n}\n\nfunc SubtestStress1Conn100Stream100Msg10MB(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {\n\tSubtestStress(t, ta, tb, maddr, peerA, Options{\n\t\tConnNum: 1,\n\t\tStreamNum: 100,\n\t\tMsgNum: 100,\n\t\tMsgMax: 10000,\n\t\tMsgMin: 1000,\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/majormjr\/rcon\"\n\t\"regexp\"\n\t\"github.com\/Masterminds\/semver\"\n)\n\ntype FactorioServer struct {\n\tCmd *exec.Cmd `json:\"-\"`\n\tSavefile string `json:\"savefile\"`\n\tLatency int `json:\"latency\"`\n\tBindIP string `json:\"bindip\"`\n\tPort int `json:\"port\"`\n\tRunning bool `json:\"running\"`\n\tVersion string `json:\"fac_version\"`\n\tBaseModVersion string `json:\"base_mod_version\"`\n\tSemVerVersion *semver.Version `json:\"-\"`\n\tStdOut io.ReadCloser `json:\"-\"`\n\tStdErr io.ReadCloser `json:\"-\"`\n\tStdIn io.WriteCloser `json:\"-\"`\n\tSettings map[string]interface{} `json:\"-\"`\n\tRcon *rcon.RemoteConsole `json:\"-\"`\n\tLogChan chan []string `json:\"-\"`\n}\n\nfunc randomPort() int {\n\t\/\/ Returns random port to use for rcon connection\n\treturn rand.Intn(45000-40000) + 40000\n}\n\nfunc initFactorio() (f *FactorioServer, err error) {\n\tf = new(FactorioServer)\n\tf.Settings = make(map[string]interface{})\n\n\tif err = os.MkdirAll(config.FactorioConfigDir, 0755); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create config directory: %v\", err)\n\t}\n\n\tsettingsPath := filepath.Join(config.FactorioConfigDir, config.SettingsFile)\n\tvar settings *os.File\n\n\tif _, err := os.Stat(settingsPath); os.IsNotExist(err) {\n\t\t\/\/ copy example settings to supplied settings file, if not exists\n\t\tlog.Printf(\"Server settings at %s not found, copying example server settings.\\n\", settingsPath)\n\n\t\texamplePath := filepath.Join(config.FactorioDir, \"data\", \"server-settings.example.json\")\n\n\t\texample, err := os.Open(examplePath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to open example server settings: %v\", err)\n\t\t}\n\t\tdefer example.Close()\n\n\t\tsettings, err = os.Create(settingsPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create server settings file: %v\", err)\n\t\t}\n\t\tdefer settings.Close()\n\n\t\t_, err = io.Copy(settings, example)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to copy example server settings: %v\", err)\n\t\t}\n\t} else {\n\t\t\/\/ otherwise, open file normally\n\t\tsettings, err = os.Open(settingsPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to open server settings file: %v\", err)\n\t\t}\n\t\tdefer settings.Close()\n\t}\n\n\t\/\/ before reading reset offset\n\tif _, err = settings.Seek(0, 0); err != nil {\n\t\treturn nil, fmt.Errorf(\"error while seeking in settings file: %v\", err)\n\t}\n\n\tif err = json.NewDecoder(settings).Decode(&f.Settings); err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading %s: %v\", settingsPath, err)\n\t}\n\n\tlog.Printf(\"Loaded Factorio settings from %s\\n\", settingsPath)\n\n\n\t\/\/Load factorio version\n\tout, err := exec.Command(config.FactorioBinary, \"--version\").Output()\n\tif err != nil {\n\t\tlog.Printf(\"error on loading factorio version: %s\", err)\n\t\treturn\n\t}\n\n\treg := regexp.MustCompile(\"Version.*?((\\\\d+\\\\.)?(\\\\d+\\\\.)?(\\\\*|\\\\d+)+)\")\n\tfound := reg.FindStringSubmatch(string(out))\n\tf.Version = found[1]\n\n\t\/\/Load baseMod version\n\tbaseModInfoFile := filepath.Join(config.FactorioDir, \"data\", \"base\", \"info.json\")\n\tbmifBa, err := ioutil.ReadFile(baseModInfoFile)\n\tif err != nil {\n\t\tlog.Printf(\"couldn't open baseMods info.json: %s\", err)\n\t\treturn\n\t}\n\tvar modInfo ModInfo\n\terr = json.Unmarshal(bmifBa, &modInfo)\n\tif err != nil {\n\t\tlog.Printf(\"error unmarshalling baseMods info.json to a modInfo: %s\", err)\n\t\treturn\n\t}\n\n\tf.BaseModVersion = modInfo.Version\n\tf.SemVerVersion, err = semver.NewVersion(modInfo.Version)\n\tif err != nil {\n\t\tlog.Fatalf(\"error loading semver-factorio-version: %s\", err)\n\t}\n\n\treturn\n}\n\nfunc (f *FactorioServer) Run() error {\n\tvar err error\n\n\tdata, err := json.MarshalIndent(f.Settings, \"\", \" \")\n\tif err != nil {\n\t\tlog.Println(\"Failed to marshal FactorioServerSettings: \", err)\n\t} else {\n\t\tioutil.WriteFile(filepath.Join(config.FactorioConfigDir, config.SettingsFile), data, 0644)\n\t}\n\n\targs := []string{\n\t\t\"--bind\", (f.BindIP),\n\t\t\"--port\", strconv.Itoa(f.Port),\n\t\t\"--server-settings\", filepath.Join(config.FactorioConfigDir, \"server-settings.json\"),\n\t\t\"--rcon-port\", strconv.Itoa(config.FactorioRconPort),\n\t\t\"--rcon-password\", config.FactorioRconPass,\n\t}\n\n\tif f.Savefile == \"Load Latest\" {\n\t\targs = append(args, \"--start-server-load-latest\")\n\t} else {\n\t\targs = append(args, \"--start-server\", filepath.Join(config.FactorioSavesDir, f.Savefile))\n\t}\n\n\tlog.Println(\"Starting server with command: \", config.FactorioBinary, args)\n\n\tf.Cmd = exec.Command(config.FactorioBinary, args...)\n\n\tf.StdOut, err = f.Cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Printf(\"Error opening stdout pipe: %s\", err)\n\t\treturn err\n\t}\n\n\tf.StdIn, err = f.Cmd.StdinPipe()\n\tif err != nil {\n\t\tlog.Printf(\"Error opening stdin pipe: %s\", err)\n\t\treturn err\n\t}\n\n\tf.StdErr, err = f.Cmd.StderrPipe()\n\tif err != nil {\n\t\tlog.Printf(\"Error opening stderr pipe: %s\", err)\n\t\treturn err\n\t}\n\n\tgo f.parseRunningCommand(f.StdOut)\n\tgo f.parseRunningCommand(f.StdErr)\n\n\terr = f.Cmd.Start()\n\tif err != nil {\n\t\tlog.Printf(\"Factorio process failed to start: %s\", err)\n\t\treturn err\n\t}\n\tf.Running = true\n\n\terr = f.Cmd.Wait()\n\tif err != nil {\n\t\tlog.Printf(\"Factorio process exited with error: %s\", err)\n\t\tf.Running = false\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (f *FactorioServer) parseRunningCommand(std io.ReadCloser) (err error) {\n\tstdScanner := bufio.NewScanner(std)\n\tfor stdScanner.Scan() {\n\t\tlog.Printf(\"Factorio Server: %s\", stdScanner.Text())\n\t\tif err := f.writeLog(stdScanner.Text()); err != nil {\n\t\t\tlog.Printf(\"Error: %s\", err)\n\t\t}\n\n\t\tline := strings.Fields(stdScanner.Text())\n\t\t\/\/ Ensure logline slice is in bounds\n\t\tif len(line) > 0 {\n\t\t\t\/\/ Check if Factorio Server reports any errors if so handle it\n\t\t\tif line[1] == \"Error\" {\n\t\t\t\terr := f.checkLogError(line)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"Error checking Factorio Server Error: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ If rcon port opens indicated in log connect to rcon\n\t\t\trconLog := \"Starting RCON interface at port \" + strconv.Itoa(config.FactorioRconPort)\n\t\t\t\/\/ check if slice index is greater than 2 to prevent panic\n\t\t\tif len(line) > 2 {\n\t\t\t\t\/\/ log line for opened rcon connection\n\t\t\t\tif strings.Join(line[3:], \" \") == rconLog {\n\t\t\t\t\tlog.Printf(\"Rcon running on Factorio Server\")\n\t\t\t\t\terr = connectRC()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Printf(\"Error: %s\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif err := stdScanner.Err(); err != nil {\n\t\tlog.Printf(\"Error reading std buffer: %s\", err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (f *FactorioServer) writeLog(logline string) error {\n\tlogfileName := config.FactorioDir + \"factorio-server-console.log\"\n\tfile, err := os.OpenFile(logfileName, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)\n\tif err != nil {\n\t\tlog.Printf(\"Cannot open logfile for appending Factorio Server output: %s\", err)\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tlogline = logline + \"\\n\"\n\n\tif _, err = file.WriteString(logline); err != nil {\n\t\tlog.Printf(\"Error appending to factorio-server-console.log: %s\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (f *FactorioServer) checkLogError(logline []string) error {\n\t\/\/ TODO Handle errors generated by running Factorio Server\n\tlog.Println(logline)\n\n\treturn nil\n}\n\nfunc (f *FactorioServer) Stop() error {\n\tif runtime.GOOS == \"windows\" {\n\n\t\t\/\/ Disable our own handling of CTRL+C, so we don't close when we send it to the console.\n\t\tsetCtrlHandlingIsDisabledForThisProcess(true)\n\n\t\t\/\/ Send CTRL+C to all processes attached to the console (ourself, and the factorio server instance)\n\t\tsendCtrlCToPid(0)\n\t\tlog.Println(\"Sent SIGINT to Factorio process. Factorio shutting down...\")\n\n\t\t\/\/ Somehow, the Factorio devs managed to code the game to react appropriately to CTRL+C, including\n\t\t\/\/ saving the game, but not actually exit. So, we still have to manually kill the process, and\n\t\t\/\/ for extra fun, there's no way to know when the server save has actually completed (unless we want\n\t\t\/\/ to inject filesystem logic into what should be a process-level Stop() routine), so our best option\n\t\t\/\/ is to just wait an arbitrary amount of time and hope that the save is successful in that time.\n\t\ttime.Sleep(2 * time.Second)\n\t\tf.Cmd.Process.Signal(os.Kill)\n\n\t\t\/\/ Re-enable handling of CTRL+C after we're sure that the factrio server is shut down.\n\t\tsetCtrlHandlingIsDisabledForThisProcess(false)\n\n\t\tf.Running = false\n\t\treturn nil\n\t}\n\n\terr := f.Cmd.Process.Signal(os.Interrupt)\n\tif err != nil {\n\t\tif err.Error() == \"os: process already finished\" {\n\t\t\tf.Running = false\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(\"Error sending SIGINT to Factorio process: %s\", err)\n\t\treturn err\n\t}\n\tf.Running = false\n\tlog.Printf(\"Sent SIGINT to Factorio process. Factorio shutting down...\")\n\n\terr = f.Rcon.Close()\n\tif err != nil {\n\t\tlog.Printf(\"Error close rcon connection: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (f *FactorioServer) Kill() error {\n\tif runtime.GOOS == \"windows\" {\n\n\t\terr := f.Cmd.Process.Signal(os.Kill)\n\t\tif err != nil {\n\t\t\tif err.Error() == \"os: process already finished\" {\n\t\t\t\tf.Running = false\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.Printf(\"Error sending SIGKILL to Factorio process: %s\", err)\n\t\t\treturn err\n\t\t}\n\t\tf.Running = false\n\t\tlog.Println(\"Sent SIGKILL to Factorio process. Factorio forced to exit.\")\n\n\t\treturn nil\n\t}\n\n\terr := f.Cmd.Process.Signal(os.Kill)\n\tif err != nil {\n\t\tif err.Error() == \"os: process already finished\" {\n\t\t\tf.Running = false\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(\"Error sending SIGKILL to Factorio process: %s\", err)\n\t\treturn err\n\t}\n\tf.Running = false\n\tlog.Printf(\"Sent SIGKILL to Factorio process. Factorio forced to exit.\")\n\n\terr = f.Rcon.Close()\n\tif err != nil {\n\t\tlog.Printf(\"Error close rcon connection: %s\", err)\n\t}\n\n\treturn nil\n}\n<commit_msg>Fix crash on line without spaces<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/majormjr\/rcon\"\n\t\"regexp\"\n\t\"github.com\/Masterminds\/semver\"\n)\n\ntype FactorioServer struct {\n\tCmd *exec.Cmd `json:\"-\"`\n\tSavefile string `json:\"savefile\"`\n\tLatency int `json:\"latency\"`\n\tBindIP string `json:\"bindip\"`\n\tPort int `json:\"port\"`\n\tRunning bool `json:\"running\"`\n\tVersion string `json:\"fac_version\"`\n\tBaseModVersion string `json:\"base_mod_version\"`\n\tSemVerVersion *semver.Version `json:\"-\"`\n\tStdOut io.ReadCloser `json:\"-\"`\n\tStdErr io.ReadCloser `json:\"-\"`\n\tStdIn io.WriteCloser `json:\"-\"`\n\tSettings map[string]interface{} `json:\"-\"`\n\tRcon *rcon.RemoteConsole `json:\"-\"`\n\tLogChan chan []string `json:\"-\"`\n}\n\nfunc randomPort() int {\n\t\/\/ Returns random port to use for rcon connection\n\treturn rand.Intn(45000-40000) + 40000\n}\n\nfunc initFactorio() (f *FactorioServer, err error) {\n\tf = new(FactorioServer)\n\tf.Settings = make(map[string]interface{})\n\n\tif err = os.MkdirAll(config.FactorioConfigDir, 0755); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create config directory: %v\", err)\n\t}\n\n\tsettingsPath := filepath.Join(config.FactorioConfigDir, config.SettingsFile)\n\tvar settings *os.File\n\n\tif _, err := os.Stat(settingsPath); os.IsNotExist(err) {\n\t\t\/\/ copy example settings to supplied settings file, if not exists\n\t\tlog.Printf(\"Server settings at %s not found, copying example server settings.\\n\", settingsPath)\n\n\t\texamplePath := filepath.Join(config.FactorioDir, \"data\", \"server-settings.example.json\")\n\n\t\texample, err := os.Open(examplePath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to open example server settings: %v\", err)\n\t\t}\n\t\tdefer example.Close()\n\n\t\tsettings, err = os.Create(settingsPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create server settings file: %v\", err)\n\t\t}\n\t\tdefer settings.Close()\n\n\t\t_, err = io.Copy(settings, example)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to copy example server settings: %v\", err)\n\t\t}\n\t} else {\n\t\t\/\/ otherwise, open file normally\n\t\tsettings, err = os.Open(settingsPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to open server settings file: %v\", err)\n\t\t}\n\t\tdefer settings.Close()\n\t}\n\n\t\/\/ before reading reset offset\n\tif _, err = settings.Seek(0, 0); err != nil {\n\t\treturn nil, fmt.Errorf(\"error while seeking in settings file: %v\", err)\n\t}\n\n\tif err = json.NewDecoder(settings).Decode(&f.Settings); err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading %s: %v\", settingsPath, err)\n\t}\n\n\tlog.Printf(\"Loaded Factorio settings from %s\\n\", settingsPath)\n\n\n\t\/\/Load factorio version\n\tout, err := exec.Command(config.FactorioBinary, \"--version\").Output()\n\tif err != nil {\n\t\tlog.Printf(\"error on loading factorio version: %s\", err)\n\t\treturn\n\t}\n\n\treg := regexp.MustCompile(\"Version.*?((\\\\d+\\\\.)?(\\\\d+\\\\.)?(\\\\*|\\\\d+)+)\")\n\tfound := reg.FindStringSubmatch(string(out))\n\tf.Version = found[1]\n\n\t\/\/Load baseMod version\n\tbaseModInfoFile := filepath.Join(config.FactorioDir, \"data\", \"base\", \"info.json\")\n\tbmifBa, err := ioutil.ReadFile(baseModInfoFile)\n\tif err != nil {\n\t\tlog.Printf(\"couldn't open baseMods info.json: %s\", err)\n\t\treturn\n\t}\n\tvar modInfo ModInfo\n\terr = json.Unmarshal(bmifBa, &modInfo)\n\tif err != nil {\n\t\tlog.Printf(\"error unmarshalling baseMods info.json to a modInfo: %s\", err)\n\t\treturn\n\t}\n\n\tf.BaseModVersion = modInfo.Version\n\tf.SemVerVersion, err = semver.NewVersion(modInfo.Version)\n\tif err != nil {\n\t\tlog.Fatalf(\"error loading semver-factorio-version: %s\", err)\n\t}\n\n\treturn\n}\n\nfunc (f *FactorioServer) Run() error {\n\tvar err error\n\n\tdata, err := json.MarshalIndent(f.Settings, \"\", \" \")\n\tif err != nil {\n\t\tlog.Println(\"Failed to marshal FactorioServerSettings: \", err)\n\t} else {\n\t\tioutil.WriteFile(filepath.Join(config.FactorioConfigDir, config.SettingsFile), data, 0644)\n\t}\n\n\targs := []string{\n\t\t\"--bind\", (f.BindIP),\n\t\t\"--port\", strconv.Itoa(f.Port),\n\t\t\"--server-settings\", filepath.Join(config.FactorioConfigDir, \"server-settings.json\"),\n\t\t\"--rcon-port\", strconv.Itoa(config.FactorioRconPort),\n\t\t\"--rcon-password\", config.FactorioRconPass,\n\t}\n\n\tif f.Savefile == \"Load Latest\" {\n\t\targs = append(args, \"--start-server-load-latest\")\n\t} else {\n\t\targs = append(args, \"--start-server\", filepath.Join(config.FactorioSavesDir, f.Savefile))\n\t}\n\n\tlog.Println(\"Starting server with command: \", config.FactorioBinary, args)\n\n\tf.Cmd = exec.Command(config.FactorioBinary, args...)\n\n\tf.StdOut, err = f.Cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Printf(\"Error opening stdout pipe: %s\", err)\n\t\treturn err\n\t}\n\n\tf.StdIn, err = f.Cmd.StdinPipe()\n\tif err != nil {\n\t\tlog.Printf(\"Error opening stdin pipe: %s\", err)\n\t\treturn err\n\t}\n\n\tf.StdErr, err = f.Cmd.StderrPipe()\n\tif err != nil {\n\t\tlog.Printf(\"Error opening stderr pipe: %s\", err)\n\t\treturn err\n\t}\n\n\tgo f.parseRunningCommand(f.StdOut)\n\tgo f.parseRunningCommand(f.StdErr)\n\n\terr = f.Cmd.Start()\n\tif err != nil {\n\t\tlog.Printf(\"Factorio process failed to start: %s\", err)\n\t\treturn err\n\t}\n\tf.Running = true\n\n\terr = f.Cmd.Wait()\n\tif err != nil {\n\t\tlog.Printf(\"Factorio process exited with error: %s\", err)\n\t\tf.Running = false\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (f *FactorioServer) parseRunningCommand(std io.ReadCloser) (err error) {\n\tstdScanner := bufio.NewScanner(std)\n\tfor stdScanner.Scan() {\n\t\tlog.Printf(\"Factorio Server: %s\", stdScanner.Text())\n\t\tif err := f.writeLog(stdScanner.Text()); err != nil {\n\t\t\tlog.Printf(\"Error: %s\", err)\n\t\t}\n\n\t\tline := strings.Fields(stdScanner.Text())\n\t\t\/\/ Ensure logline slice is in bounds\n\t\tif len(line) > 1 {\n\t\t\t\/\/ Check if Factorio Server reports any errors if so handle it\n\t\t\tif line[1] == \"Error\" {\n\t\t\t\terr := f.checkLogError(line)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"Error checking Factorio Server Error: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ If rcon port opens indicated in log connect to rcon\n\t\t\trconLog := \"Starting RCON interface at port \" + strconv.Itoa(config.FactorioRconPort)\n\t\t\t\/\/ check if slice index is greater than 2 to prevent panic\n\t\t\tif len(line) > 2 {\n\t\t\t\t\/\/ log line for opened rcon connection\n\t\t\t\tif strings.Join(line[3:], \" \") == rconLog {\n\t\t\t\t\tlog.Printf(\"Rcon running on Factorio Server\")\n\t\t\t\t\terr = connectRC()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Printf(\"Error: %s\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif err := stdScanner.Err(); err != nil {\n\t\tlog.Printf(\"Error reading std buffer: %s\", err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (f *FactorioServer) writeLog(logline string) error {\n\tlogfileName := config.FactorioDir + \"factorio-server-console.log\"\n\tfile, err := os.OpenFile(logfileName, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)\n\tif err != nil {\n\t\tlog.Printf(\"Cannot open logfile for appending Factorio Server output: %s\", err)\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tlogline = logline + \"\\n\"\n\n\tif _, err = file.WriteString(logline); err != nil {\n\t\tlog.Printf(\"Error appending to factorio-server-console.log: %s\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (f *FactorioServer) checkLogError(logline []string) error {\n\t\/\/ TODO Handle errors generated by running Factorio Server\n\tlog.Println(logline)\n\n\treturn nil\n}\n\nfunc (f *FactorioServer) Stop() error {\n\tif runtime.GOOS == \"windows\" {\n\n\t\t\/\/ Disable our own handling of CTRL+C, so we don't close when we send it to the console.\n\t\tsetCtrlHandlingIsDisabledForThisProcess(true)\n\n\t\t\/\/ Send CTRL+C to all processes attached to the console (ourself, and the factorio server instance)\n\t\tsendCtrlCToPid(0)\n\t\tlog.Println(\"Sent SIGINT to Factorio process. Factorio shutting down...\")\n\n\t\t\/\/ Somehow, the Factorio devs managed to code the game to react appropriately to CTRL+C, including\n\t\t\/\/ saving the game, but not actually exit. So, we still have to manually kill the process, and\n\t\t\/\/ for extra fun, there's no way to know when the server save has actually completed (unless we want\n\t\t\/\/ to inject filesystem logic into what should be a process-level Stop() routine), so our best option\n\t\t\/\/ is to just wait an arbitrary amount of time and hope that the save is successful in that time.\n\t\ttime.Sleep(2 * time.Second)\n\t\tf.Cmd.Process.Signal(os.Kill)\n\n\t\t\/\/ Re-enable handling of CTRL+C after we're sure that the factrio server is shut down.\n\t\tsetCtrlHandlingIsDisabledForThisProcess(false)\n\n\t\tf.Running = false\n\t\treturn nil\n\t}\n\n\terr := f.Cmd.Process.Signal(os.Interrupt)\n\tif err != nil {\n\t\tif err.Error() == \"os: process already finished\" {\n\t\t\tf.Running = false\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(\"Error sending SIGINT to Factorio process: %s\", err)\n\t\treturn err\n\t}\n\tf.Running = false\n\tlog.Printf(\"Sent SIGINT to Factorio process. Factorio shutting down...\")\n\n\terr = f.Rcon.Close()\n\tif err != nil {\n\t\tlog.Printf(\"Error close rcon connection: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (f *FactorioServer) Kill() error {\n\tif runtime.GOOS == \"windows\" {\n\n\t\terr := f.Cmd.Process.Signal(os.Kill)\n\t\tif err != nil {\n\t\t\tif err.Error() == \"os: process already finished\" {\n\t\t\t\tf.Running = false\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.Printf(\"Error sending SIGKILL to Factorio process: %s\", err)\n\t\t\treturn err\n\t\t}\n\t\tf.Running = false\n\t\tlog.Println(\"Sent SIGKILL to Factorio process. Factorio forced to exit.\")\n\n\t\treturn nil\n\t}\n\n\terr := f.Cmd.Process.Signal(os.Kill)\n\tif err != nil {\n\t\tif err.Error() == \"os: process already finished\" {\n\t\t\tf.Running = false\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(\"Error sending SIGKILL to Factorio process: %s\", err)\n\t\treturn err\n\t}\n\tf.Running = false\n\tlog.Printf(\"Sent SIGKILL to Factorio process. Factorio forced to exit.\")\n\n\terr = f.Rcon.Close()\n\tif err != nil {\n\t\tlog.Printf(\"Error close rcon connection: %s\", err)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package transport is an interface for connection based communication\npackage transport\n\nimport (\n\t\"time\"\n)\n\n\/\/ Transport is an interface which is used for communication between\n\/\/ services. It uses connection based socket send\/recv semantics and\n\/\/ has various implementations; http, grpc, quic.\ntype Transport interface {\n\tInit(...Option) error\n\tOptions() Options\n\tDial(addr string, opts ...DialOption) (Client, error)\n\tListen(addr string, opts ...ListenOption) (Listener, error)\n\tString() string\n}\n\ntype Message struct {\n\tHeader map[string]string\n\tBody []byte\n}\n\ntype Socket interface {\n\tRecv(*Message) error\n\tSend(*Message) error\n\tClose() error\n\tLocal() string\n\tRemote() string\n}\n\ntype Client interface {\n\tSocket\n}\n\ntype Listener interface {\n\tAddr() string\n\tClose() error\n\tAccept(func(Socket)) error\n}\n\ntype Option func(*Options)\n\ntype DialOption func(*DialOptions)\n\ntype ListenOption func(*ListenOptions)\n\nvar (\n\tDefaultTransport Transport = newHTTPTransport()\n\n\tDefaultDialTimeout = time.Second * 5\n)\n\nfunc NewTransport(opts ...Option) Transport {\n\treturn newHTTPTransport(opts...)\n}\n<commit_msg>update transport package comments<commit_after>\/\/ Package transport is an interface for synchronous connection based communication\npackage transport\n\nimport (\n\t\"time\"\n)\n\n\/\/ Transport is an interface which is used for communication between\n\/\/ services. It uses connection based socket send\/recv semantics and\n\/\/ has various implementations; http, grpc, quic.\ntype Transport interface {\n\tInit(...Option) error\n\tOptions() Options\n\tDial(addr string, opts ...DialOption) (Client, error)\n\tListen(addr string, opts ...ListenOption) (Listener, error)\n\tString() string\n}\n\ntype Message struct {\n\tHeader map[string]string\n\tBody []byte\n}\n\ntype Socket interface {\n\tRecv(*Message) error\n\tSend(*Message) error\n\tClose() error\n\tLocal() string\n\tRemote() string\n}\n\ntype Client interface {\n\tSocket\n}\n\ntype Listener interface {\n\tAddr() string\n\tClose() error\n\tAccept(func(Socket)) error\n}\n\ntype Option func(*Options)\n\ntype DialOption func(*DialOptions)\n\ntype ListenOption func(*ListenOptions)\n\nvar (\n\tDefaultTransport Transport = newHTTPTransport()\n\n\tDefaultDialTimeout = time.Second * 5\n)\n\nfunc NewTransport(opts ...Option) Transport {\n\treturn newHTTPTransport(opts...)\n}\n<|endoftext|>"} {"text":"<commit_before>package librariesio\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n)\n\nconst APIKey string = \"1234\"\n\nfunc startNewServer() (*httptest.Server, *http.ServeMux, *url.URL) {\n\tmux := http.NewServeMux()\n\tserver := httptest.NewServer(mux)\n\turl, _ := url.Parse(server.URL)\n\treturn server, mux, url\n}\n\nfunc TestNewClient(t *testing.T) {\n\tc := NewClient(APIKey)\n\n\tif got, want := c.apiKey, APIKey; got != want {\n\t\tt.Errorf(\"NewClient baseURL is %v, want %v\", got, want)\n\t}\n\n\tif got, want := c.BaseURL.String(), \"https:\/\/libraries.io\/api\/\"; got != want {\n\t\tt.Errorf(\"NewClient baseURL is %v, want %v\", got, want)\n\t}\n\n\tif got, want := c.UserAgent, \"go-librariesio\/1\"; got != want {\n\t\tt.Errorf(\"NewClient userAgent is %v, want %v\", got, want)\n\t}\n}\n\nfunc TestNewRequest_badURL(t *testing.T) {\n\tclient := NewClient(APIKey)\n\treq, err := client.NewRequest(\"GET\", \":\", nil)\n\n\tif err == nil {\n\t\tt.Fatalf(\"NewRequest did not return error\")\n\t}\n\tif req != nil {\n\t\tt.Fatalf(\"did not expect non-nil request, got %+v\", req)\n\t}\n}\n\nfunc TestNewRequest_noPayload(t *testing.T) {\n\tclient := NewClient(APIKey)\n\treq, err := client.NewRequest(\"GET\", \"pypi\/cookiecutter\", nil)\n\n\tif err != nil {\n\t\tt.Fatalf(\"NewRequest returned error: %v\", err)\n\t}\n\n\tif req.Body != nil {\n\t\tt.Fatalf(\"request contains a non-nil Body\\n%v\", req.Body)\n\t}\n}\n\nfunc TestNewRequest_invalidJSON(t *testing.T) {\n\tclient := NewClient(APIKey)\n\n\tfoo := make(map[interface{}]interface{})\n\n\t_, err := client.NewRequest(\"GET\", \"pypi\/cookiecutter\", foo)\n\n\tif err == nil {\n\t\tt.Error(\"Expected error to be returned\")\n\t}\n\tif err, ok := err.(*json.UnsupportedTypeError); !ok {\n\t\tt.Errorf(\"Expected a JSON error, got %#v\", err)\n\t}\n}\n\nfunc TestNewRequest_auth(t *testing.T) {\n\tclient := NewClient(APIKey)\n\treq, err := client.NewRequest(\"GET\", \"pypi\/cookiecutter\", nil)\n\n\tif err != nil {\n\t\tt.Fatalf(\"NewRequest returned error: %v\", err)\n\t}\n\n\tquery := req.URL.Query()\n\tif got, want := query.Get(\"api_key\"), APIKey; got != want {\n\t\tt.Fatalf(\"did not set query param to %v, got %v\", want, got)\n\t}\n}\n\nfunc TestNewRequest_headers(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\tbody interface{}\n\t\theaders map[string]string\n\t}{\n\t\t{\n\t\t\tname: \"No Content-Type without body\",\n\t\t\tbody: nil,\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"application\/json\",\n\t\t\t\t\"User-Agent\": \"go-librariesio\/\" + libraryVersion,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Content-Type with body\",\n\t\t\tbody: \"hello world\",\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"application\/json\",\n\t\t\t\t\"Content-Type\": \"application\/json\",\n\t\t\t\t\"User-Agent\": \"go-librariesio\/\" + libraryVersion,\n\t\t\t},\n\t\t},\n\t}\n\n\tclient := NewClient(APIKey)\n\n\tfor _, testCase := range testCases {\n\t\tt.Run(testCase.name, func(t *testing.T) {\n\t\t\treq, err := client.NewRequest(\"GET\", \"pypi\/cookiecutter\", testCase.body)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"unexpected error\")\n\t\t\t}\n\n\t\t\tfor header, want := range testCase.headers {\n\t\t\t\tif got := req.Header.Get(header); got != want {\n\t\t\t\t\tt.Errorf(\"Header.Get(%q) returned %q, want %q\", header, got, want)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCheckResponse(t *testing.T) {\n\tresponse := &http.Response{\n\t\tRequest: &http.Request{},\n\t\tStatusCode: http.StatusBadRequest,\n\t\tBody: ioutil.NopCloser(strings.NewReader(`{\"error\":\"Nope Nope Nope\"}`)),\n\t}\n\terrResponse, ok := CheckResponse(response).(*ErrorResponse)\n\n\tif !ok {\n\t\tt.Errorf(\"Expected ErrorResponse, got %v\", errResponse)\n\t}\n\n\twant := &ErrorResponse{\n\t\tResponse: response,\n\t\tMessage: \"Nope Nope Nope\",\n\t}\n\tif !reflect.DeepEqual(errResponse, want) {\n\t\tt.Errorf(\"\\nExpected %#v\\nGot %#v\", want, errResponse)\n\t}\n}\n\nfunc TestErrorResponse(t *testing.T) {\n\tclient := NewClient(APIKey)\n\trequest, _ := client.NewRequest(\"GET\", \"pypi\/poyo\", nil)\n\tresponse := &http.Response{\n\t\tRequest: request,\n\t\tStatusCode: http.StatusBadRequest,\n\t}\n\n\terr := &ErrorResponse{Response: response, Message: \"nope\"}\n\twant := `GET https:\/\/libraries.io\/api\/pypi\/poyo?api_key=REDACTED: 400 \"nope\"`\n\n\tif got := err.Error(); got != want {\n\t\tt.Fatalf(\"\\nExpected %q\\nGot %q\", want, got)\n\t}\n}\n\nfunc TestDo(t *testing.T) {\n\tserver, mux, url := startNewServer()\n\tclient := NewClient(APIKey)\n\tclient.BaseURL = url\n\tdefer server.Close()\n\n\ttype foo struct {\n\t\tBar string `json:\"bar\"`\n\t}\n\n\tmux.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tif method := \"GET\"; method != r.Method {\n\t\t\tt.Errorf(\"expected HTTP %v request, got %v\", method, r.Method)\n\t\t}\n\t\tfmt.Fprintf(w, `{\"bar\":\"helloworld\"}`)\n\t})\n\n\treq, _ := client.NewRequest(\"GET\", \"\/\", nil)\n\n\tgot := new(foo)\n\tclient.Do(context.Background(), req, got)\n\n\twant := &foo{Bar: \"helloworld\"}\n\n\tif !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"response body does not match, want %v, got %v\", want, got)\n\t}\n}\n\nfunc TestDo_httpClientError(t *testing.T) {\n\tserver, _, url := startNewServer()\n\tclient := NewClient(APIKey)\n\tclient.BaseURL = url\n\tdefer server.Close()\n\n\t_, err := client.Do(context.Background(), &http.Request{}, nil)\n\tif err == nil {\n\t\tt.Fatalf(\"Expected error to be returned\")\n\t}\n}\n\nfunc TestDo_badResponse(t *testing.T) {\n\tserver, mux, url := startNewServer()\n\tclient := NewClient(APIKey)\n\tclient.BaseURL = url\n\tdefer server.Close()\n\n\tmux.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.Error(w, \"Nope\", http.StatusBadRequest)\n\t})\n\treq, _ := client.NewRequest(\"GET\", \"\/\", nil)\n\n\t_, err := client.Do(context.Background(), req, nil)\n\n\tif err == nil {\n\t\tt.Errorf(\"Expected HTTP %v error\", http.StatusBadRequest)\n\t}\n}\n\nfunc TestDo_badResponseBody(t *testing.T) {\n\tserver, mux, url := startNewServer()\n\tclient := NewClient(APIKey)\n\tclient.BaseURL = url\n\tdefer server.Close()\n\n\tmux.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, `{123:\"a\"}`)\n\t})\n\n\ttype foo struct {\n\t\tBar string `json:\"bar\"`\n\t}\n\n\trequest, _ := client.NewRequest(\"GET\", \"\/\", nil)\n\t_, err := client.Do(context.Background(), request, new(foo))\n\n\tif err == nil {\n\t\tt.Fatal(\"Expected response body error\")\n\t}\n}\n<commit_msg>Implement a test for redactAPIKey()<commit_after>package librariesio\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n)\n\nconst APIKey string = \"1234\"\n\nfunc startNewServer() (*httptest.Server, *http.ServeMux, *url.URL) {\n\tmux := http.NewServeMux()\n\tserver := httptest.NewServer(mux)\n\turl, _ := url.Parse(server.URL)\n\treturn server, mux, url\n}\n\nfunc TestNewClient(t *testing.T) {\n\tc := NewClient(APIKey)\n\n\tif got, want := c.apiKey, APIKey; got != want {\n\t\tt.Errorf(\"NewClient baseURL is %v, want %v\", got, want)\n\t}\n\n\tif got, want := c.BaseURL.String(), \"https:\/\/libraries.io\/api\/\"; got != want {\n\t\tt.Errorf(\"NewClient baseURL is %v, want %v\", got, want)\n\t}\n\n\tif got, want := c.UserAgent, \"go-librariesio\/1\"; got != want {\n\t\tt.Errorf(\"NewClient userAgent is %v, want %v\", got, want)\n\t}\n}\n\nfunc TestNewRequest_badURL(t *testing.T) {\n\tclient := NewClient(APIKey)\n\treq, err := client.NewRequest(\"GET\", \":\", nil)\n\n\tif err == nil {\n\t\tt.Fatalf(\"NewRequest did not return error\")\n\t}\n\tif req != nil {\n\t\tt.Fatalf(\"did not expect non-nil request, got %+v\", req)\n\t}\n}\n\nfunc TestNewRequest_noPayload(t *testing.T) {\n\tclient := NewClient(APIKey)\n\treq, err := client.NewRequest(\"GET\", \"pypi\/cookiecutter\", nil)\n\n\tif err != nil {\n\t\tt.Fatalf(\"NewRequest returned error: %v\", err)\n\t}\n\n\tif req.Body != nil {\n\t\tt.Fatalf(\"request contains a non-nil Body\\n%v\", req.Body)\n\t}\n}\n\nfunc TestNewRequest_invalidJSON(t *testing.T) {\n\tclient := NewClient(APIKey)\n\n\tfoo := make(map[interface{}]interface{})\n\n\t_, err := client.NewRequest(\"GET\", \"pypi\/cookiecutter\", foo)\n\n\tif err == nil {\n\t\tt.Error(\"Expected error to be returned\")\n\t}\n\tif err, ok := err.(*json.UnsupportedTypeError); !ok {\n\t\tt.Errorf(\"Expected a JSON error, got %#v\", err)\n\t}\n}\n\nfunc TestNewRequest_auth(t *testing.T) {\n\tclient := NewClient(APIKey)\n\treq, err := client.NewRequest(\"GET\", \"pypi\/cookiecutter\", nil)\n\n\tif err != nil {\n\t\tt.Fatalf(\"NewRequest returned error: %v\", err)\n\t}\n\n\tquery := req.URL.Query()\n\tif got, want := query.Get(\"api_key\"), APIKey; got != want {\n\t\tt.Fatalf(\"did not set query param to %v, got %v\", want, got)\n\t}\n}\n\nfunc TestNewRequest_headers(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\tbody interface{}\n\t\theaders map[string]string\n\t}{\n\t\t{\n\t\t\tname: \"No Content-Type without body\",\n\t\t\tbody: nil,\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"application\/json\",\n\t\t\t\t\"User-Agent\": \"go-librariesio\/\" + libraryVersion,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Content-Type with body\",\n\t\t\tbody: \"hello world\",\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"application\/json\",\n\t\t\t\t\"Content-Type\": \"application\/json\",\n\t\t\t\t\"User-Agent\": \"go-librariesio\/\" + libraryVersion,\n\t\t\t},\n\t\t},\n\t}\n\n\tclient := NewClient(APIKey)\n\n\tfor _, testCase := range testCases {\n\t\tt.Run(testCase.name, func(t *testing.T) {\n\t\t\treq, err := client.NewRequest(\"GET\", \"pypi\/cookiecutter\", testCase.body)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"unexpected error\")\n\t\t\t}\n\n\t\t\tfor header, want := range testCase.headers {\n\t\t\t\tif got := req.Header.Get(header); got != want {\n\t\t\t\t\tt.Errorf(\"Header.Get(%q) returned %q, want %q\", header, got, want)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestRedactAPIKey(t *testing.T) {\n\turl, err := url.Parse(\"pypi\/poyo\")\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tq := url.Query()\n\tq.Set(\"api_key\", APIKey)\n\turl.RawQuery = q.Encode()\n\n\tgot := redactAPIKey(url).String()\n\twant := \"pypi\/poyo?api_key=REDACTED\"\n\n\tif got != want {\n\t\tt.Fatalf(\"api_key param in URL is not redacted, got %v\", got)\n\t}\n}\n\nfunc TestCheckResponse(t *testing.T) {\n\tresponse := &http.Response{\n\t\tRequest: &http.Request{},\n\t\tStatusCode: http.StatusBadRequest,\n\t\tBody: ioutil.NopCloser(strings.NewReader(`{\"error\":\"Nope Nope Nope\"}`)),\n\t}\n\terrResponse, ok := CheckResponse(response).(*ErrorResponse)\n\n\tif !ok {\n\t\tt.Errorf(\"Expected ErrorResponse, got %v\", errResponse)\n\t}\n\n\twant := &ErrorResponse{\n\t\tResponse: response,\n\t\tMessage: \"Nope Nope Nope\",\n\t}\n\tif !reflect.DeepEqual(errResponse, want) {\n\t\tt.Errorf(\"\\nExpected %#v\\nGot %#v\", want, errResponse)\n\t}\n}\n\nfunc TestErrorResponse(t *testing.T) {\n\tclient := NewClient(APIKey)\n\trequest, _ := client.NewRequest(\"GET\", \"pypi\/poyo\", nil)\n\tresponse := &http.Response{\n\t\tRequest: request,\n\t\tStatusCode: http.StatusBadRequest,\n\t}\n\n\terr := &ErrorResponse{Response: response, Message: \"nope\"}\n\twant := `GET https:\/\/libraries.io\/api\/pypi\/poyo?api_key=REDACTED: 400 \"nope\"`\n\n\tif got := err.Error(); got != want {\n\t\tt.Fatalf(\"\\nExpected %q\\nGot %q\", want, got)\n\t}\n}\n\nfunc TestDo(t *testing.T) {\n\tserver, mux, url := startNewServer()\n\tclient := NewClient(APIKey)\n\tclient.BaseURL = url\n\tdefer server.Close()\n\n\ttype foo struct {\n\t\tBar string `json:\"bar\"`\n\t}\n\n\tmux.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tif method := \"GET\"; method != r.Method {\n\t\t\tt.Errorf(\"expected HTTP %v request, got %v\", method, r.Method)\n\t\t}\n\t\tfmt.Fprintf(w, `{\"bar\":\"helloworld\"}`)\n\t})\n\n\treq, _ := client.NewRequest(\"GET\", \"\/\", nil)\n\n\tgot := new(foo)\n\tclient.Do(context.Background(), req, got)\n\n\twant := &foo{Bar: \"helloworld\"}\n\n\tif !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"response body does not match, want %v, got %v\", want, got)\n\t}\n}\n\nfunc TestDo_httpClientError(t *testing.T) {\n\tserver, _, url := startNewServer()\n\tclient := NewClient(APIKey)\n\tclient.BaseURL = url\n\tdefer server.Close()\n\n\t_, err := client.Do(context.Background(), &http.Request{}, nil)\n\tif err == nil {\n\t\tt.Fatalf(\"Expected error to be returned\")\n\t}\n}\n\nfunc TestDo_badResponse(t *testing.T) {\n\tserver, mux, url := startNewServer()\n\tclient := NewClient(APIKey)\n\tclient.BaseURL = url\n\tdefer server.Close()\n\n\tmux.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.Error(w, \"Nope\", http.StatusBadRequest)\n\t})\n\treq, _ := client.NewRequest(\"GET\", \"\/\", nil)\n\n\t_, err := client.Do(context.Background(), req, nil)\n\n\tif err == nil {\n\t\tt.Errorf(\"Expected HTTP %v error\", http.StatusBadRequest)\n\t}\n}\n\nfunc TestDo_badResponseBody(t *testing.T) {\n\tserver, mux, url := startNewServer()\n\tclient := NewClient(APIKey)\n\tclient.BaseURL = url\n\tdefer server.Close()\n\n\tmux.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, `{123:\"a\"}`)\n\t})\n\n\ttype foo struct {\n\t\tBar string `json:\"bar\"`\n\t}\n\n\trequest, _ := client.NewRequest(\"GET\", \"\/\", nil)\n\t_, err := client.Do(context.Background(), request, new(foo))\n\n\tif err == nil {\n\t\tt.Fatal(\"Expected response body error\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package library\n\nimport (\n\t\"github.com\/nytlabs\/streamtools\/st\/blocks\"\n)\n\nvar Blocks = map[string]func() blocks.BlockInterface{\n\t\"count\": NewCount,\n\t\"toggle\": NewToggle,\n\t\"movingaverage\": NewMovingAverage,\n\t\"ticker\": NewTicker,\n\t\"analogPin\": NewAnalogPin,\n\t\"digitalpin\": NewDigitalPin,\n\t\"todigitalpin\": NewToDigitalPin,\n\t\"fromnsq\": NewFromNSQ,\n\t\"fromhttpstream\": NewFromHTTPStream,\n\t\"fromsqs\": NewFromSQS,\n\t\"frompost\": NewFromPost,\n\t\"fromfile\": NewFromFile,\n\t\"tonsq\": NewToNSQ,\n\t\"toelasticsearch\": NewToElasticsearch,\n\t\"tofile\": NewToFile,\n\t\"tolog\": NewToLog,\n\t\"tobeanstalkd\": NewToBeanstalkd,\n\t\"tomongodb\": NewToMongoDB,\n\t\"mask\": NewMask,\n\t\"filter\": NewFilter,\n\t\"sync\": NewSync,\n\t\"queue\": NewQueue,\n\t\"unpack\": NewUnpack,\n\t\"pack\": NewPack,\n\t\"parsexml\": NewParseXML,\n\t\"set\": NewSet,\n\t\"cache\": NewCache,\n\t\"join\": NewJoin,\n\t\"gethttp\": NewGetHTTP,\n\t\"gaussian\": NewGaussian,\n\t\"zipf\": NewZipf,\n\t\"poisson\": NewPoisson,\n\t\"map\": NewMap,\n\t\"histogram\": NewHistogram,\n\t\"timeseries\": NewTimeseries,\n\t\"fromwebsocket\": NewFromWebsocket,\n\t\"tonsqmulti\": NewToNSQMulti,\n\t\"fromudp\": NewFromUDP,\n\t\"dedupe\": NewDeDupe,\n}\n\nvar BlockDefs = map[string]*blocks.BlockDef{}\n\nfunc Start() {\n\tfor k, newBlock := range Blocks {\n\t\tb := newBlock()\n\t\tb.Build(blocks.BlockChans{nil, nil, nil, nil, nil, nil})\n\t\tb.Setup()\n\t\tBlockDefs[k] = b.GetDef()\n\t}\n}\n<commit_msg>updated linux\/arm library<commit_after>package library\n\nimport (\n\t\"github.com\/nytlabs\/streamtools\/st\/blocks\"\n)\n\nvar Blocks = map[string]func() blocks.BlockInterface{\n\t\"count\": NewCount,\n\t\"toggle\": NewToggle,\n\t\"movingaverage\": NewMovingAverage,\n\t\"ticker\": NewTicker,\n\t\"analogPin\": NewAnalogPin,\n\t\"digitalpin\": NewDigitalPin,\n\t\"todigitalpin\": NewToDigitalPin,\n\t\"fromnsq\": NewFromNSQ,\n\t\"fromhttpstream\": NewFromHTTPStream,\n\t\"fromsqs\": NewFromSQS,\n\t\"frompost\": NewFromPost,\n\t\"fromfile\": NewFromFile,\n\t\"tonsq\": NewToNSQ,\n\t\"toelasticsearch\": NewToElasticsearch,\n\t\"tofile\": NewToFile,\n\t\"tolog\": NewToLog,\n\t\"tobeanstalkd\": NewToBeanstalkd,\n\t\"tomongodb\": NewToMongoDB,\n\t\"mask\": NewMask,\n\t\"filter\": NewFilter,\n\t\"sync\": NewSync,\n\t\"queue\": NewQueue,\n\t\"unpack\": NewUnpack,\n\t\"pack\": NewPack,\n\t\"parsexml\": NewParseXML,\n\t\"set\": NewSet,\n\t\"cache\": NewCache,\n\t\"join\": NewJoin,\n\t\"gethttp\": NewGetHTTP,\n\t\"gaussian\": NewGaussian,\n\t\"zipf\": NewZipf,\n\t\"poisson\": NewPoisson,\n\t\"map\": NewMap,\n\t\"histogram\": NewHistogram,\n\t\"timeseries\": NewTimeseries,\n\t\"fromwebsocket\": NewFromWebsocket,\n\t\"tonsqmulti\": NewToNSQMulti,\n\t\"fromudp\": NewFromUDP,\n\t\"dedupe\": NewDeDupe,\n\t\"javascript\": NewJavascript,\n}\n\nvar BlockDefs = map[string]*blocks.BlockDef{}\n\nfunc Start() {\n\tfor k, newBlock := range Blocks {\n\t\tb := newBlock()\n\t\tb.Build(blocks.BlockChans{nil, nil, nil, nil, nil, nil})\n\t\tb.Setup()\n\t\tBlockDefs[k] = b.GetDef()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package migrations\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/OpenBazaar\/jsonpb\"\n\t\"github.com\/OpenBazaar\/openbazaar-go\/ipfs\"\n\t\"github.com\/OpenBazaar\/openbazaar-go\/pb\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\tipfscore \"github.com\/ipfs\/go-ipfs\/core\"\n\t\"github.com\/ipfs\/go-ipfs\/repo\/fsrepo\"\n)\n\nconst migration012_ListingVersionForMarkupListings = 4\n\ntype Migration012 struct{}\n\ntype Migration012_ListingData struct {\n\tHash string `json:\"hash\"`\n\tSlug string `json:\"slug\"`\n\tTitle string `json:\"title\"`\n\tCategories []string `json:\"categories\"`\n\tNSFW bool `json:\"nsfw\"`\n\tContractType string `json:\"contractType\"`\n\tDescription string `json:\"description\"`\n\tThumbnail struct {\n\t\tTiny string `json:\"tiny\"`\n\t\tSmall string `json:\"small\"`\n\t\tMedium string `json:\"medium\"`\n\t} `json:\"thumbnail\"`\n\tPrice struct {\n\t\tCurrencyCode string `json:\"currencyCode\"`\n\t\tAmount uint64 `json:\"amount\"`\n\t\tModifier float32 `json:\"modifier\"`\n\t} `json:\"price\"`\n\tShipsTo []string `json:\"shipsTo\"`\n\tFreeShipping []string `json:\"freeShipping\"`\n\tLanguage string `json:\"language\"`\n\tAverageRating float32 `json:\"averageRating\"`\n\tRatingCount uint32 `json:\"ratingCount\"`\n\tModeratorIDs []string `json:\"moderators\"`\n\tAcceptedCurrencies []string `json:\"acceptedCurrencies\"`\n\tCoinType string `json:\"coinType\"`\n}\n\nfunc Migration012_listingHasNewFeaturesAndOldVersion(sl *pb.SignedListing) bool {\n\tmetadata := sl.Listing.Metadata\n\tif metadata == nil {\n\t\treturn false\n\t}\n\treturn metadata.Version == 3 &&\n\t\tmetadata.PriceModifier != 0\n}\n\nfunc Migration012_GetIdentityKey(repoPath, databasePassword string, testnetEnabled bool) ([]byte, error) {\n\tdb, err := OpenDB(repoPath, databasePassword, testnetEnabled)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer db.Close()\n\n\tvar identityKey []byte\n\terr = db.\n\t\tQueryRow(\"select value from config where key=?\", \"identityKey\").\n\t\tScan(&identityKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn identityKey, nil\n}\n\nfunc (Migration012) Up(repoPath, databasePassword string, testnetEnabled bool) error {\n\tlistingsIndexFilePath := path.Join(repoPath, \"root\", \"listings.json\")\n\n\t\/\/ Find all crypto listings\n\tlistingsIndexJSONBytes, err := ioutil.ReadFile(listingsIndexFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlistingsIndex := []*Migration012_ListingData{}\n\terr = json.Unmarshal(listingsIndexJSONBytes, &listingsIndex)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcryptoListings := []*Migration012_ListingData{}\n\tfor _, listingAbstract := range listingsIndex {\n\t\tif listingAbstract.ContractType == \"CRYPTOCURRENCY\" {\n\t\t\tcryptoListings = append(cryptoListings, listingAbstract)\n\t\t}\n\t}\n\n\t\/\/ Finish early If no crypto listings\n\tif len(cryptoListings) == 0 {\n\t\treturn writeRepoVer(repoPath, 13)\n\t}\n\n\t\/\/ Check each crypto listing for markup\n\tmarkupListings := []*pb.SignedListing{}\n\tfor _, listingAbstract := range cryptoListings {\n\t\tlistingJSONBytes, err := ioutil.ReadFile(migration012_listingFilePath(repoPath, listingAbstract.Slug))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsl := new(pb.SignedListing)\n\t\terr = jsonpb.UnmarshalString(string(listingJSONBytes), sl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif Migration012_listingHasNewFeaturesAndOldVersion(sl) {\n\t\t\tmarkupListings = append(markupListings, sl)\n\t\t}\n\t}\n\n\t\/\/ Finish early If no crypto listings with new features are found\n\tif len(markupListings) == 0 {\n\t\treturn writeRepoVer(repoPath, 13)\n\t}\n\n\t\/\/ Setup signing capabilities\n\tidentityKey, err := Migration012_GetIdentityKey(repoPath, databasePassword, testnetEnabled)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tidentity, err := ipfs.IdentityFromKey(identityKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ IPFS node setup\n\tr, err := fsrepo.Open(repoPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tcfg, err := r.Config()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcfg.Identity = identity\n\n\tncfg := &ipfscore.BuildCfg{\n\t\tRepo: r,\n\t\tOnline: true,\n\t\tExtraOpts: map[string]bool{\n\t\t\t\"mplex\": true,\n\t\t},\n\t\tDNSResolver: nil,\n\t\tRouting: nil,\n\t}\n\n\tnd, err := ipfscore.NewNode(cctx, ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer nd.Close()\n\n\t\/\/ Update each listing to have the latest version number and resave\n\t\/\/ Save the new hashes for each changed listing so we can update the index.\n\thashes := make(map[string]string)\n\n\tfor _, sl := range markupListings {\n\t\tsl.Listing.Metadata.Version = migration012_ListingVersionForMarkupListings\n\n\t\tserializedListing, err := proto.Marshal(sl.Listing)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tidSig, err := nd.PrivateKey.Sign(serializedListing)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsl.Signature = idSig\n\n\t\tm := jsonpb.Marshaler{\n\t\t\tEnumsAsInts: false,\n\t\t\tEmitDefaults: false,\n\t\t\tIndent: \" \",\n\t\t\tOrigName: false,\n\t\t}\n\t\tout, err := m.MarshalToString(sl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfilename := migration012_listingFilePath(repoPath, sl.Listing.Slug)\n\t\tif err := ioutil.WriteFile(filename, []byte(out), os.ModePerm); err != nil {\n\t\t\treturn err\n\t\t}\n\t\th, err := ipfs.GetHashOfFile(nd, filename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thashes[sl.Listing.Slug] = h\n\t}\n\n\t\/\/ Update listing index\n\tindexBytes, err := ioutil.ReadFile(listingsIndexFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar index []Migration012_ListingData\n\n\terr = json.Unmarshal(indexBytes, &index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i, l := range index {\n\t\th, ok := hashes[l.Slug]\n\n\t\t\/\/ Not one of the changed listings\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tl.Hash = h\n\t\tindex[i] = l\n\t}\n\n\t\/\/ Write it back to file\n\tifile, err := os.Create(listingsIndexFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer ifile.Close()\n\n\tj, jerr := json.MarshalIndent(index, \"\", \" \")\n\tif jerr != nil {\n\t\treturn jerr\n\t}\n\t_, werr := ifile.Write(j)\n\tif werr != nil {\n\t\treturn werr\n\t}\n\n\t_, err = ipfs.GetHashOfFile(nd, listingsIndexFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn writeRepoVer(repoPath, 13)\n}\n\nfunc migration012_listingFilePath(datadir string, slug string) string {\n\treturn path.Join(datadir, \"root\", \"listings\", slug+\".json\")\n}\n\nfunc (Migration012) Down(repoPath, databasePassword string, testnetEnabled bool) error {\n\treturn writeRepoVer(repoPath, 12)\n}\n<commit_msg>TWEAK: Make ipfs node in Migration012 be offline.<commit_after>package migrations\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/OpenBazaar\/jsonpb\"\n\t\"github.com\/OpenBazaar\/openbazaar-go\/ipfs\"\n\t\"github.com\/OpenBazaar\/openbazaar-go\/pb\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\tipfscore \"github.com\/ipfs\/go-ipfs\/core\"\n\t\"github.com\/ipfs\/go-ipfs\/repo\/fsrepo\"\n)\n\nconst migration012_ListingVersionForMarkupListings = 4\n\ntype Migration012 struct{}\n\ntype Migration012_ListingData struct {\n\tHash string `json:\"hash\"`\n\tSlug string `json:\"slug\"`\n\tTitle string `json:\"title\"`\n\tCategories []string `json:\"categories\"`\n\tNSFW bool `json:\"nsfw\"`\n\tContractType string `json:\"contractType\"`\n\tDescription string `json:\"description\"`\n\tThumbnail struct {\n\t\tTiny string `json:\"tiny\"`\n\t\tSmall string `json:\"small\"`\n\t\tMedium string `json:\"medium\"`\n\t} `json:\"thumbnail\"`\n\tPrice struct {\n\t\tCurrencyCode string `json:\"currencyCode\"`\n\t\tAmount uint64 `json:\"amount\"`\n\t\tModifier float32 `json:\"modifier\"`\n\t} `json:\"price\"`\n\tShipsTo []string `json:\"shipsTo\"`\n\tFreeShipping []string `json:\"freeShipping\"`\n\tLanguage string `json:\"language\"`\n\tAverageRating float32 `json:\"averageRating\"`\n\tRatingCount uint32 `json:\"ratingCount\"`\n\tModeratorIDs []string `json:\"moderators\"`\n\tAcceptedCurrencies []string `json:\"acceptedCurrencies\"`\n\tCoinType string `json:\"coinType\"`\n}\n\nfunc Migration012_listingHasNewFeaturesAndOldVersion(sl *pb.SignedListing) bool {\n\tmetadata := sl.Listing.Metadata\n\tif metadata == nil {\n\t\treturn false\n\t}\n\treturn metadata.Version == 3 &&\n\t\tmetadata.PriceModifier != 0\n}\n\nfunc Migration012_GetIdentityKey(repoPath, databasePassword string, testnetEnabled bool) ([]byte, error) {\n\tdb, err := OpenDB(repoPath, databasePassword, testnetEnabled)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer db.Close()\n\n\tvar identityKey []byte\n\terr = db.\n\t\tQueryRow(\"select value from config where key=?\", \"identityKey\").\n\t\tScan(&identityKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn identityKey, nil\n}\n\nfunc (Migration012) Up(repoPath, databasePassword string, testnetEnabled bool) error {\n\tlistingsIndexFilePath := path.Join(repoPath, \"root\", \"listings.json\")\n\n\t\/\/ Find all crypto listings\n\tlistingsIndexJSONBytes, err := ioutil.ReadFile(listingsIndexFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlistingsIndex := []*Migration012_ListingData{}\n\terr = json.Unmarshal(listingsIndexJSONBytes, &listingsIndex)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcryptoListings := []*Migration012_ListingData{}\n\tfor _, listingAbstract := range listingsIndex {\n\t\tif listingAbstract.ContractType == \"CRYPTOCURRENCY\" {\n\t\t\tcryptoListings = append(cryptoListings, listingAbstract)\n\t\t}\n\t}\n\n\t\/\/ Finish early If no crypto listings\n\tif len(cryptoListings) == 0 {\n\t\treturn writeRepoVer(repoPath, 13)\n\t}\n\n\t\/\/ Check each crypto listing for markup\n\tmarkupListings := []*pb.SignedListing{}\n\tfor _, listingAbstract := range cryptoListings {\n\t\tlistingJSONBytes, err := ioutil.ReadFile(migration012_listingFilePath(repoPath, listingAbstract.Slug))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsl := new(pb.SignedListing)\n\t\terr = jsonpb.UnmarshalString(string(listingJSONBytes), sl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif Migration012_listingHasNewFeaturesAndOldVersion(sl) {\n\t\t\tmarkupListings = append(markupListings, sl)\n\t\t}\n\t}\n\n\t\/\/ Finish early If no crypto listings with new features are found\n\tif len(markupListings) == 0 {\n\t\treturn writeRepoVer(repoPath, 13)\n\t}\n\n\t\/\/ Setup signing capabilities\n\tidentityKey, err := Migration012_GetIdentityKey(repoPath, databasePassword, testnetEnabled)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tidentity, err := ipfs.IdentityFromKey(identityKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ IPFS node setup\n\tr, err := fsrepo.Open(repoPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tcfg, err := r.Config()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcfg.Identity = identity\n\n\tncfg := &ipfscore.BuildCfg{\n\t\tRepo: r,\n\t\tOnline: false,\n\t\tExtraOpts: map[string]bool{\n\t\t\t\"mplex\": true,\n\t\t},\n\t\tDNSResolver: nil,\n\t\tRouting: nil,\n\t}\n\n\tnd, err := ipfscore.NewNode(cctx, ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer nd.Close()\n\n\t\/\/ Update each listing to have the latest version number and resave\n\t\/\/ Save the new hashes for each changed listing so we can update the index.\n\thashes := make(map[string]string)\n\n\tfor _, sl := range markupListings {\n\t\tsl.Listing.Metadata.Version = migration012_ListingVersionForMarkupListings\n\n\t\tserializedListing, err := proto.Marshal(sl.Listing)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tidSig, err := nd.PrivateKey.Sign(serializedListing)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsl.Signature = idSig\n\n\t\tm := jsonpb.Marshaler{\n\t\t\tEnumsAsInts: false,\n\t\t\tEmitDefaults: false,\n\t\t\tIndent: \" \",\n\t\t\tOrigName: false,\n\t\t}\n\t\tout, err := m.MarshalToString(sl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfilename := migration012_listingFilePath(repoPath, sl.Listing.Slug)\n\t\tif err := ioutil.WriteFile(filename, []byte(out), os.ModePerm); err != nil {\n\t\t\treturn err\n\t\t}\n\t\th, err := ipfs.GetHashOfFile(nd, filename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thashes[sl.Listing.Slug] = h\n\t}\n\n\t\/\/ Update listing index\n\tindexBytes, err := ioutil.ReadFile(listingsIndexFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar index []Migration012_ListingData\n\n\terr = json.Unmarshal(indexBytes, &index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i, l := range index {\n\t\th, ok := hashes[l.Slug]\n\n\t\t\/\/ Not one of the changed listings\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tl.Hash = h\n\t\tindex[i] = l\n\t}\n\n\t\/\/ Write it back to file\n\tifile, err := os.Create(listingsIndexFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer ifile.Close()\n\n\tj, jerr := json.MarshalIndent(index, \"\", \" \")\n\tif jerr != nil {\n\t\treturn jerr\n\t}\n\t_, werr := ifile.Write(j)\n\tif werr != nil {\n\t\treturn werr\n\t}\n\n\t_, err = ipfs.GetHashOfFile(nd, listingsIndexFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn writeRepoVer(repoPath, 13)\n}\n\nfunc migration012_listingFilePath(datadir string, slug string) string {\n\treturn path.Join(datadir, \"root\", \"listings\", slug+\".json\")\n}\n\nfunc (Migration012) Down(repoPath, databasePassword string, testnetEnabled bool) error {\n\treturn writeRepoVer(repoPath, 12)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"net\"\n\n\tlog \"github.com\/cihub\/seelog\"\n\t\"github.com\/stampzilla\/stampzilla-go\/stampzilla-server\/logic\"\n\tserverprotocol \"github.com\/stampzilla\/stampzilla-go\/stampzilla-server\/protocol\"\n)\n\ntype NodeServer struct {\n\tConfig *ServerConfig `inject:\"\"`\n\tLogic *logic.Logic `inject:\"\"`\n\tNodes *serverprotocol.Nodes `inject:\"\"`\n\tWebsocketHandler *WebsocketHandler `inject:\"\"`\n}\n\nfunc NewNodeServer() *NodeServer {\n\treturn &NodeServer{}\n}\n\nfunc (ns *NodeServer) Start() {\n\tlog.Info(\"Starting NodeServer (:\" + ns.Config.NodePort + \")\")\n\tlisten, err := net.Listen(\"tcp\", \":\"+ns.Config.NodePort)\n\tif err != nil {\n\t\tlog.Error(\"listen error\", err)\n\t\treturn\n\t}\n\n\tns.Logic.RestoreRulesFromFile(\"rules.json\")\n\n\tgo func() {\n\t\tfor {\n\t\t\tfd, err := listen.Accept()\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"accept error\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tgo ns.newNodeConnection(fd)\n\t\t}\n\t}()\n}\n\nfunc (ns *NodeServer) newNodeConnection(connection net.Conn) {\n\t\/\/ Recive data\n\tlog.Info(\"New client connected\")\n\tname := \"\"\n\tuuid := \"\"\n\tvar logicChannel chan string\n\tfor {\n\t\treader := bufio.NewReader(connection)\n\t\tdecoder := json.NewDecoder(reader)\n\t\tvar info serverprotocol.Node\n\t\terr := decoder.Decode(&info)\n\n\t\tif err != nil {\n\t\t\tif err.Error() == \"EOF\" {\n\t\t\t\tlog.Info(name, \" - Client disconnected\")\n\t\t\t\tif uuid != \"\" {\n\t\t\t\t\tns.Nodes.Delete(uuid)\n\t\t\t\t\tclose(logicChannel)\n\t\t\t\t}\n\t\t\t\t\/\/TODO be able to not send everything always. perhaps implement remove instead of all?\n\t\t\t\tns.WebsocketHandler.SendAllNodes()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Warn(\"Not disconnect but error: \", err)\n\t\t\t\/\/return here?\n\t\t} else {\n\t\t\tname = info.Name\n\t\t\tuuid = info.Uuid\n\t\t\tinfo.SetConn(connection)\n\n\t\t\tif logicChannel == nil {\n\t\t\t\tlogicChannel = ns.Logic.ListenForChanges(uuid)\n\t\t\t}\n\n\t\t\tns.Nodes.Add(&info)\n\t\t\tlog.Info(info.Name, \" - Got update on state\")\n\t\t\tns.WebsocketHandler.SendSingleNode(uuid)\n\n\t\t\t\/\/Send to logic for evaluation\n\t\t\tstate, _ := json.Marshal(info.State)\n\t\t\tlogicChannel <- string(state)\n\t\t}\n\n\t}\n\n}\n<commit_msg>Added Uuid printout on status updates<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"net\"\n\n\tlog \"github.com\/cihub\/seelog\"\n\t\"github.com\/stampzilla\/stampzilla-go\/stampzilla-server\/logic\"\n\tserverprotocol \"github.com\/stampzilla\/stampzilla-go\/stampzilla-server\/protocol\"\n)\n\ntype NodeServer struct {\n\tConfig *ServerConfig `inject:\"\"`\n\tLogic *logic.Logic `inject:\"\"`\n\tNodes *serverprotocol.Nodes `inject:\"\"`\n\tWebsocketHandler *WebsocketHandler `inject:\"\"`\n}\n\nfunc NewNodeServer() *NodeServer {\n\treturn &NodeServer{}\n}\n\nfunc (ns *NodeServer) Start() {\n\tlog.Info(\"Starting NodeServer (:\" + ns.Config.NodePort + \")\")\n\tlisten, err := net.Listen(\"tcp\", \":\"+ns.Config.NodePort)\n\tif err != nil {\n\t\tlog.Error(\"listen error\", err)\n\t\treturn\n\t}\n\n\tns.Logic.RestoreRulesFromFile(\"rules.json\")\n\n\tgo func() {\n\t\tfor {\n\t\t\tfd, err := listen.Accept()\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"accept error\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tgo ns.newNodeConnection(fd)\n\t\t}\n\t}()\n}\n\nfunc (ns *NodeServer) newNodeConnection(connection net.Conn) {\n\t\/\/ Recive data\n\tlog.Info(\"New client connected\")\n\tname := \"\"\n\tuuid := \"\"\n\tvar logicChannel chan string\n\tfor {\n\t\treader := bufio.NewReader(connection)\n\t\tdecoder := json.NewDecoder(reader)\n\t\tvar info serverprotocol.Node\n\t\terr := decoder.Decode(&info)\n\n\t\tif err != nil {\n\t\t\tif err.Error() == \"EOF\" {\n\t\t\t\tlog.Info(name, \" - Client disconnected\")\n\t\t\t\tif uuid != \"\" {\n\t\t\t\t\tns.Nodes.Delete(uuid)\n\t\t\t\t\tclose(logicChannel)\n\t\t\t\t}\n\t\t\t\t\/\/TODO be able to not send everything always. perhaps implement remove instead of all?\n\t\t\t\tns.WebsocketHandler.SendAllNodes()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Warn(\"Not disconnect but error: \", err)\n\t\t\t\/\/return here?\n\t\t} else {\n\t\t\tname = info.Name\n\t\t\tuuid = info.Uuid\n\t\t\tinfo.SetConn(connection)\n\n\t\t\tif logicChannel == nil {\n\t\t\t\tlogicChannel = ns.Logic.ListenForChanges(uuid)\n\t\t\t}\n\n\t\t\tns.Nodes.Add(&info)\n\t\t\tlog.Info(info.Uuid, \" - \", info.Name, \" - Got update on state\")\n\t\t\tns.WebsocketHandler.SendSingleNode(uuid)\n\n\t\t\t\/\/Send to logic for evaluation\n\t\t\tstate, _ := json.Marshal(info.State)\n\t\t\tlogicChannel <- string(state)\n\t\t}\n\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package presence_test\n\nimport (\n\t\"fmt\"\n\t. \"launchpad.net\/gocheck\"\n\t\"launchpad.net\/gozk\/zookeeper\"\n\t\"launchpad.net\/juju\/go\/state\/presence\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc Test(t *testing.T) { TestingT(t) }\n\n\/\/ waitFor blocks until conn knows that path exists. This is necessary because\n\/\/ distinct zk connections don't always have precisely the same view of the data.\nfunc waitFor(c *C, conn *zookeeper.Conn, path string) {\n\tstat, watch, err := conn.ExistsW(path)\n\tc.Assert(err, IsNil)\n\tif stat != nil {\n\t\treturn\n\t}\n\t\/\/ Test code only, local server; if this event *isn't* the node coming into\n\t\/\/ existence, we'll find out soon enough, and we don't want to stay blocked.\n\t<-watch\n}\n\ntype PresenceSuite struct {\n\tzkServer *zookeeper.Server\n\tzkTestRoot string\n\tzkTestPort int\n\tzkAddr string\n\tzkConn *zookeeper.Conn\n}\n\nfunc (s *PresenceSuite) SetUpSuite(c *C) {\n\tvar err error\n\ts.zkTestRoot = c.MkDir() + \"\/zookeeper\"\n\ts.zkTestPort = 21810\n\ts.zkAddr = fmt.Sprint(\"localhost:\", s.zkTestPort)\n\n\ts.zkServer, err = zookeeper.CreateServer(s.zkTestPort, s.zkTestRoot, \"\")\n\tif err != nil {\n\t\tc.Fatal(\"Cannot set up ZooKeeper server environment: \", err)\n\t}\n\terr = s.zkServer.Start()\n\tif err != nil {\n\t\tc.Fatal(\"Cannot start ZooKeeper server: \", err)\n\t}\n}\n\nfunc (s *PresenceSuite) TearDownSuite(c *C) {\n\tif s.zkServer != nil {\n\t\ts.zkServer.Destroy()\n\t}\n}\n\n\/\/ connect returns a zookeeper connection to s.zkAddr.\nfunc (s *PresenceSuite) connect(c *C) *zookeeper.Conn {\n\tzk, session, err := zookeeper.Dial(s.zkAddr, 15e9)\n\tc.Assert(err, IsNil)\n\tc.Assert((<-session).Ok(), Equals, true)\n\treturn zk\n}\n\nfunc (s *PresenceSuite) SetUpTest(c *C) {\n\ts.zkConn = s.connect(c)\n}\n\nfunc (s *PresenceSuite) TearDownTest(c *C) {\n\t\/\/ No need to recurse in this suite; just try to delete what we can see.\n\tchildren, _, err := s.zkConn.Children(\"\/\")\n\tif err == nil {\n\t\tfor _, child := range children {\n\t\t\ts.zkConn.Delete(\"\/\"+child, -1)\n\t\t}\n\t}\n\ts.zkConn.Close()\n}\n\nvar (\n\t_ = Suite(&PresenceSuite{})\n\tperiod = time.Duration(2.5e7) \/\/ 25ms\n\tlongEnough = period * 4 \/\/ longest possible time to detect a close\n\tpath = \"\/presence\"\n)\n\nfunc assertChange(c *C, watch <-chan bool, expectAlive bool) {\n\tt := time.After(longEnough)\n\tselect {\n\tcase <-t:\n\t\tc.Log(\"Liveness change not detected\")\n\t\tc.FailNow()\n\tcase alive, ok := <-watch:\n\t\tc.Assert(ok, Equals, true)\n\t\tc.Assert(alive, Equals, expectAlive)\n\t}\n}\n\nfunc assertClose(c *C, watch <-chan bool) {\n\tt := time.After(longEnough)\n\tselect {\n\tcase <-t:\n\t\tc.Log(\"Connection loss not detected\")\n\t\tc.FailNow()\n\tcase _, ok := <-watch:\n\t\tc.Assert(ok, Equals, false)\n\t}\n}\n\nfunc assertNoChange(c *C, watch <-chan bool) {\n\tt := time.After(longEnough)\n\tselect {\n\tcase <-t:\n\t\treturn\n\tcase <-watch:\n\t\tc.Log(\"Unexpected liveness change\")\n\t\tc.FailNow()\n\t}\n}\n\nfunc (s *PresenceSuite) TestNewPinger(c *C) {\n\t\/\/ Check not considered Alive before it exists.\n\talive, err := presence.Alive(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, false)\n\n\t\/\/ Watch for life, and check the watch doesn't fire early.\n\talive, watch, err := presence.AliveW(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, false)\n\tassertNoChange(c, watch)\n\n\t\/\/ Start a Pinger, and check the watch fires.\n\tp, err := presence.StartPinger(s.zkConn, path, period)\n\tc.Assert(err, IsNil)\n\tdefer p.Close()\n\tassertChange(c, watch, true)\n\n\t\/\/ Check that Alive agrees.\n\talive, err = presence.Alive(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, true)\n\n\t\/\/ Watch for life again, and check it doesn't change.\n\talive, watch, err = presence.AliveW(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, true)\n\tassertNoChange(c, watch)\n}\n\nfunc (s *PresenceSuite) TestKillPinger(c *C) {\n\t\/\/ Start a Pinger and a watch, and check sanity.\n\tp, err := presence.StartPinger(s.zkConn, path, period)\n\tc.Assert(err, IsNil)\n\talive, watch, err := presence.AliveW(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, true)\n\tassertNoChange(c, watch)\n\n\t\/\/ Kill the Pinger; check the watch fires and Alive agrees.\n\tp.Kill()\n\tassertChange(c, watch, false)\n\talive, err = presence.Alive(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, false)\n\n\t\/\/ Check that the pinger's node was deleted.\n\tstat, err := s.zkConn.Exists(path)\n\tc.Assert(err, IsNil)\n\tc.Assert(stat, IsNil)\n}\n\nfunc (s *PresenceSuite) TestClosePinger(c *C) {\n\t\/\/ Start a Pinger and a watch, and check sanity.\n\tp, err := presence.StartPinger(s.zkConn, path, period)\n\tc.Assert(err, IsNil)\n\talive, watch, err := presence.AliveW(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, true)\n\tassertNoChange(c, watch)\n\n\t\/\/ Close the Pinger; check the watch fires and Alive agrees.\n\tp.Close()\n\tassertChange(c, watch, false)\n\talive, err = presence.Alive(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, false)\n\n\t\/\/ Check that the pinger's node is still present.\n\tstat, err := s.zkConn.Exists(path)\n\tc.Assert(err, IsNil)\n\tc.Assert(stat, NotNil)\n}\n\nfunc (s *PresenceSuite) TestBadData(c *C) {\n\t\/\/ Create a node that contains inappropriate data.\n\t_, err := s.zkConn.Create(path, \"roflcopter\", 0, zookeeper.WorldACL(zookeeper.PERM_ALL))\n\tc.Assert(err, IsNil)\n\n\t\/\/ Check it is not interpreted as a presence node by Alive.\n\t_, err = presence.Alive(s.zkConn, path)\n\tc.Assert(err, ErrorMatches, \".* is not a valid presence node: .*\")\n\n\t\/\/ Check it is not interpreted as a presence node by Watch.\n\t_, watch, err := presence.AliveW(s.zkConn, path)\n\tc.Assert(watch, IsNil)\n\tc.Assert(err, ErrorMatches, \".* is not a valid presence node: .*\")\n}\n\nfunc (s *PresenceSuite) TestDisconnectDeadWatch(c *C) {\n\t\/\/ Create a target node.\n\tp, err := presence.StartPinger(s.zkConn, path, period)\n\tc.Assert(err, IsNil)\n\tp.Close()\n\n\t\/\/ Start an alternate connection and ensure the node is stale.\n\taltConn := s.connect(c)\n\twaitFor(c, altConn, path)\n\ttime.Sleep(longEnough)\n\n\t\/\/ Start a watch using the alternate connection.\n\talive, watch, err := presence.AliveW(altConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, false)\n\n\t\/\/ Kill the watch connection and check it's alerted.\n\taltConn.Close()\n\tassertClose(c, watch)\n}\n\nfunc (s *PresenceSuite) TestDisconnectMissingWatch(c *C) {\n\t\/\/ Don't even create a target node.\n\n\t\/\/ Start watching on an alternate connection.\n\taltConn := s.connect(c)\n\talive, watch, err := presence.AliveW(altConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, false)\n\n\t\/\/ Kill the watch connection and check it's alerted.\n\taltConn.Close()\n\tassertClose(c, watch)\n}\n\nfunc (s *PresenceSuite) DONTTestDisconnectAliveWatch(c *C) {\n\t\/\/ Start a Pinger on the main connection\n\tp, err := presence.StartPinger(s.zkConn, path, period)\n\tc.Assert(err, IsNil)\n\tdefer p.Close()\n\n\t\/\/ Start watching on an alternate connection, forwarded over another\n\t\/\/ connection we can kill safely.\n\taltConn := s.connect(c)\n\twaitFor(c, altConn, path)\n\talive, watch, err := presence.AliveW(altConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, true)\n\n\t\/\/ Kill the watch connection and check it's alerted.\n\taltConn.Close()\n\tassertClose(c, watch)\n}\n\nfunc (s *PresenceSuite) DONTTestDisconnectPinger(c *C) {\n\t\/\/ Start a Pinger on an alternate connection, forwarded over another\n\t\/\/ connection we can kill safely.\n\taltConn := s.connect(c)\n\tp, err := presence.StartPinger(altConn, path, period)\n\tc.Assert(err, IsNil)\n\tdefer p.Close()\n\n\t\/\/ Watch on the \"main\" connection.\n\twaitFor(c, s.zkConn, path)\n\talive, watch, err := presence.AliveW(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, true)\n\n\t\/\/ Kill the pinger connection and check the watch notices.\n\taltConn.Close()\n\tassertChange(c, watch, false)\n}\n<commit_msg>better failure-hiding<commit_after>package presence_test\n\nimport (\n\t\"fmt\"\n\t. \"launchpad.net\/gocheck\"\n\t\"launchpad.net\/gozk\/zookeeper\"\n\t\"launchpad.net\/juju\/go\/state\/presence\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc Test(t *testing.T) { TestingT(t) }\n\n\/\/ waitFor blocks until conn knows that path exists. This is necessary because\n\/\/ distinct zk connections don't always have precisely the same view of the data.\nfunc waitFor(c *C, conn *zookeeper.Conn, path string) {\n\tstat, watch, err := conn.ExistsW(path)\n\tc.Assert(err, IsNil)\n\tif stat != nil {\n\t\treturn\n\t}\n\t\/\/ Test code only, local server; if this event *isn't* the node coming into\n\t\/\/ existence, we'll find out soon enough, and we don't want to stay blocked.\n\t<-watch\n}\n\ntype PresenceSuite struct {\n\tzkServer *zookeeper.Server\n\tzkTestRoot string\n\tzkTestPort int\n\tzkAddr string\n\tzkConn *zookeeper.Conn\n}\n\nfunc (s *PresenceSuite) SetUpSuite(c *C) {\n\tvar err error\n\ts.zkTestRoot = c.MkDir() + \"\/zookeeper\"\n\ts.zkTestPort = 21810\n\ts.zkAddr = fmt.Sprint(\"localhost:\", s.zkTestPort)\n\n\ts.zkServer, err = zookeeper.CreateServer(s.zkTestPort, s.zkTestRoot, \"\")\n\tif err != nil {\n\t\tc.Fatal(\"Cannot set up ZooKeeper server environment: \", err)\n\t}\n\terr = s.zkServer.Start()\n\tif err != nil {\n\t\tc.Fatal(\"Cannot start ZooKeeper server: \", err)\n\t}\n}\n\nfunc (s *PresenceSuite) TearDownSuite(c *C) {\n\tif s.zkServer != nil {\n\t\ts.zkServer.Destroy()\n\t}\n}\n\n\/\/ connect returns a zookeeper connection to s.zkAddr.\nfunc (s *PresenceSuite) connect(c *C) *zookeeper.Conn {\n\tzk, session, err := zookeeper.Dial(s.zkAddr, 15e9)\n\tc.Assert(err, IsNil)\n\tc.Assert((<-session).Ok(), Equals, true)\n\treturn zk\n}\n\nfunc (s *PresenceSuite) SetUpTest(c *C) {\n\ts.zkConn = s.connect(c)\n}\n\nfunc (s *PresenceSuite) TearDownTest(c *C) {\n\t\/\/ No need to recurse in this suite; just try to delete what we can see.\n\tchildren, _, err := s.zkConn.Children(\"\/\")\n\tif err == nil {\n\t\tfor _, child := range children {\n\t\t\ts.zkConn.Delete(\"\/\"+child, -1)\n\t\t}\n\t}\n\ts.zkConn.Close()\n}\n\nvar (\n\t_ = Suite(&PresenceSuite{})\n\tperiod = time.Duration(2.5e7) \/\/ 25ms\n\tlongEnough = period * 4 \/\/ longest possible time to detect a close\n\tpath = \"\/presence\"\n)\n\nfunc assertChange(c *C, watch <-chan bool, expectAlive bool) {\n\tt := time.After(longEnough)\n\tselect {\n\tcase <-t:\n\t\tc.Log(\"Liveness change not detected\")\n\t\tc.FailNow()\n\tcase alive, ok := <-watch:\n\t\tc.Assert(ok, Equals, true)\n\t\tc.Assert(alive, Equals, expectAlive)\n\t}\n}\n\nfunc assertClose(c *C, watch <-chan bool) {\n\tt := time.After(longEnough)\n\tselect {\n\tcase <-t:\n\t\tc.Log(\"Connection loss not detected\")\n\t\tc.FailNow()\n\tcase _, ok := <-watch:\n\t\tc.Assert(ok, Equals, false)\n\t}\n}\n\nfunc assertNoChange(c *C, watch <-chan bool) {\n\tt := time.After(longEnough)\n\tselect {\n\tcase <-t:\n\t\treturn\n\tcase <-watch:\n\t\tc.Log(\"Unexpected liveness change\")\n\t\tc.FailNow()\n\t}\n}\n\nfunc (s *PresenceSuite) TestNewPinger(c *C) {\n\t\/\/ Check not considered Alive before it exists.\n\talive, err := presence.Alive(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, false)\n\n\t\/\/ Watch for life, and check the watch doesn't fire early.\n\talive, watch, err := presence.AliveW(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, false)\n\tassertNoChange(c, watch)\n\n\t\/\/ Start a Pinger, and check the watch fires.\n\tp, err := presence.StartPinger(s.zkConn, path, period)\n\tc.Assert(err, IsNil)\n\tdefer p.Close()\n\tassertChange(c, watch, true)\n\n\t\/\/ Check that Alive agrees.\n\talive, err = presence.Alive(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, true)\n\n\t\/\/ Watch for life again, and check it doesn't change.\n\talive, watch, err = presence.AliveW(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, true)\n\tassertNoChange(c, watch)\n}\n\nfunc (s *PresenceSuite) TestKillPinger(c *C) {\n\t\/\/ Start a Pinger and a watch, and check sanity.\n\tp, err := presence.StartPinger(s.zkConn, path, period)\n\tc.Assert(err, IsNil)\n\talive, watch, err := presence.AliveW(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, true)\n\tassertNoChange(c, watch)\n\n\t\/\/ Kill the Pinger; check the watch fires and Alive agrees.\n\tp.Kill()\n\tassertChange(c, watch, false)\n\talive, err = presence.Alive(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, false)\n\n\t\/\/ Check that the pinger's node was deleted.\n\tstat, err := s.zkConn.Exists(path)\n\tc.Assert(err, IsNil)\n\tc.Assert(stat, IsNil)\n}\n\nfunc (s *PresenceSuite) TestClosePinger(c *C) {\n\t\/\/ Start a Pinger and a watch, and check sanity.\n\tp, err := presence.StartPinger(s.zkConn, path, period)\n\tc.Assert(err, IsNil)\n\talive, watch, err := presence.AliveW(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, true)\n\tassertNoChange(c, watch)\n\n\t\/\/ Close the Pinger; check the watch fires and Alive agrees.\n\tp.Close()\n\tassertChange(c, watch, false)\n\talive, err = presence.Alive(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, false)\n\n\t\/\/ Check that the pinger's node is still present.\n\tstat, err := s.zkConn.Exists(path)\n\tc.Assert(err, IsNil)\n\tc.Assert(stat, NotNil)\n}\n\nfunc (s *PresenceSuite) TestBadData(c *C) {\n\t\/\/ Create a node that contains inappropriate data.\n\t_, err := s.zkConn.Create(path, \"roflcopter\", 0, zookeeper.WorldACL(zookeeper.PERM_ALL))\n\tc.Assert(err, IsNil)\n\n\t\/\/ Check it is not interpreted as a presence node by Alive.\n\t_, err = presence.Alive(s.zkConn, path)\n\tc.Assert(err, ErrorMatches, \".* is not a valid presence node: .*\")\n\n\t\/\/ Check it is not interpreted as a presence node by Watch.\n\t_, watch, err := presence.AliveW(s.zkConn, path)\n\tc.Assert(watch, IsNil)\n\tc.Assert(err, ErrorMatches, \".* is not a valid presence node: .*\")\n}\n\nfunc (s *PresenceSuite) TestDisconnectDeadWatch(c *C) {\n\t\/\/ Create a target node.\n\tp, err := presence.StartPinger(s.zkConn, path, period)\n\tc.Assert(err, IsNil)\n\tp.Close()\n\n\t\/\/ Start an alternate connection and ensure the node is stale.\n\taltConn := s.connect(c)\n\twaitFor(c, altConn, path)\n\ttime.Sleep(longEnough)\n\n\t\/\/ Start a watch using the alternate connection.\n\talive, watch, err := presence.AliveW(altConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, false)\n\n\t\/\/ Kill the watch connection and check it's alerted.\n\taltConn.Close()\n\tassertClose(c, watch)\n}\n\nfunc (s *PresenceSuite) TestDisconnectMissingWatch(c *C) {\n\t\/\/ Don't even create a target node.\n\n\t\/\/ Start watching on an alternate connection.\n\taltConn := s.connect(c)\n\talive, watch, err := presence.AliveW(altConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, false)\n\n\t\/\/ Kill the watch's connection and check it's alerted.\n\taltConn.Close()\n\tassertClose(c, watch)\n}\n\nfunc (s *PresenceSuite) TestDisconnectAliveWatch(c *C) {\n\tc.ExpectFailure(\"Waiting on gozk change to eliminate occasional panic after Close\")\n\tc.FailNow()\n\n\t\/\/ Start a Pinger on the main connection\n\tp, err := presence.StartPinger(s.zkConn, path, period)\n\tc.Assert(err, IsNil)\n\tdefer p.Close()\n\n\t\/\/ Start watching on an alternate connection.\n\taltConn := s.connect(c)\n\twaitFor(c, altConn, path)\n\talive, watch, err := presence.AliveW(altConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, true)\n\n\t\/\/ Kill the watch's connection and check it's alerted.\n\taltConn.Close()\n\tassertClose(c, watch)\n}\n\nfunc (s *PresenceSuite) TestDisconnectPinger(c *C) {\n\tc.ExpectFailure(\"Waiting on gozk change to eliminate consistent panic after Close\")\n\tc.FailNow()\n\n\t\/\/ Start a Pinger on an alternate connection.\n\taltConn := s.connect(c)\n\tp, err := presence.StartPinger(altConn, path, period)\n\tc.Assert(err, IsNil)\n\tdefer p.Close()\n\n\t\/\/ Watch on the \"main\" connection.\n\twaitFor(c, s.zkConn, path)\n\talive, watch, err := presence.AliveW(s.zkConn, path)\n\tc.Assert(err, IsNil)\n\tc.Assert(alive, Equals, true)\n\n\t\/\/ Kill the pinger connection and check the watch notices.\n\taltConn.Close()\n\tassertChange(c, watch, false)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015, David Howden\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage store\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ FileSystem is an interface which defines an open method similar to http.FileSystem,\n\/\/ but which also includes a context parameter.\ntype FileSystem interface {\n\tOpen(ctx context.Context, path string) (http.File, error)\n}\n\n\/\/ NewFileSystem creates a new FileSystem using an http.FileSystem as the underlying\n\/\/ storage.\nfunc NewFileSystem(fs http.FileSystem) FileSystem {\n\treturn &fileSystem{fs}\n}\n\ntype fileSystem struct {\n\thttp.FileSystem\n}\n\n\/\/ Open implements FileSystem.\nfunc (cfs *fileSystem) Open(ctx context.Context, path string) (http.File, error) {\n\treturn cfs.FileSystem.Open(path)\n}\n\n\/\/ RemoteFileSystem is an extension of the http.FileSystem interface\n\/\/ which includes the RemoteOpen method.\ntype RemoteFileSystem interface {\n\thttp.FileSystem\n\n\t\/\/ RemoteOpen returns a File which\n\tRemoteOpen(string) (*File, error)\n}\n\n\/\/ remoteFileSystem implements RemoteFileSystem\ntype remoteFileSystem struct {\n\tclient Client\n}\n\n\/\/ NewRemoteFileSystem creates a new file system using the given Client to handle\n\/\/ file requests.\nfunc NewRemoteFileSystem(c Client) RemoteFileSystem {\n\treturn &remoteFileSystem{\n\t\tclient: c,\n\t}\n}\n\n\/\/ file is a basic representation of a remote file such that all operations (i.e.\n\/\/ seeking) will work correctly.\ntype file struct {\n\tio.ReadSeeker\n\tstat *fileInfo\n}\n\n\/\/ RemoteOpen returns a *File which represents the remote file, and implements\n\/\/ io.ReadCloser which reads the file contents from the remote system.\nfunc (fs *remoteFileSystem) RemoteOpen(path string) (*File, error) {\n\trf, err := fs.client.Get(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn rf, nil\n}\n\n\/\/ Open the given file and return an http.File implementation representing it. This method\n\/\/ will block until the file has been completely fetched (http.File implements io.Seeker\n\/\/ which means that for a trivial implementation we need all the underlying data).\nfunc (fs *remoteFileSystem) Open(path string) (http.File, error) {\n\trf, err := fs.RemoteOpen(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rf.Close()\n\n\tbuf, err := ioutil.ReadAll(rf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &file{\n\t\tReadSeeker: bytes.NewReader(buf),\n\t\tstat: &fileInfo{\n\t\t\tname: rf.Name,\n\t\t\tsize: rf.Size,\n\t\t\tmodTime: rf.ModTime,\n\t\t},\n\t}, nil\n}\n\n\/\/ Close is a nop as we have already closed the original file.\nfunc (f *file) Close() error {\n\treturn nil\n}\n\n\/\/ Implements http.File.\nfunc (f *file) Readdir(int) ([]os.FileInfo, error) {\n\treturn nil, nil\n}\n\n\/\/ FileInfo is a simple implementation of os.FileInfo.\ntype fileInfo struct {\n\tname string\n\tsize int64\n\tmodTime time.Time\n}\n\nfunc (f *fileInfo) Name() string { return f.name }\nfunc (f *fileInfo) Size() int64 { return f.size }\nfunc (f *fileInfo) Mode() os.FileMode { return os.FileMode(0777) }\nfunc (f *fileInfo) ModTime() time.Time { return f.modTime }\nfunc (f *fileInfo) IsDir() bool { return false }\nfunc (f *fileInfo) Sys() interface{} { return nil }\n\nfunc (f *file) Stat() (os.FileInfo, error) {\n\treturn f.stat, nil\n}\n<commit_msg>Tidy method instance variable name.<commit_after>\/\/ Copyright 2015, David Howden\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage store\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ FileSystem is an interface which defines an open method similar to http.FileSystem,\n\/\/ but which also includes a context parameter.\ntype FileSystem interface {\n\tOpen(ctx context.Context, path string) (http.File, error)\n}\n\n\/\/ NewFileSystem creates a new FileSystem using an http.FileSystem as the underlying\n\/\/ storage.\nfunc NewFileSystem(fs http.FileSystem) FileSystem {\n\treturn &fileSystem{fs}\n}\n\ntype fileSystem struct {\n\thttp.FileSystem\n}\n\n\/\/ Open implements FileSystem.\nfunc (fs *fileSystem) Open(ctx context.Context, path string) (http.File, error) {\n\treturn fs.FileSystem.Open(path)\n}\n\n\/\/ RemoteFileSystem is an extension of the http.FileSystem interface\n\/\/ which includes the RemoteOpen method.\ntype RemoteFileSystem interface {\n\thttp.FileSystem\n\n\t\/\/ RemoteOpen returns a File which\n\tRemoteOpen(string) (*File, error)\n}\n\n\/\/ remoteFileSystem implements RemoteFileSystem\ntype remoteFileSystem struct {\n\tclient Client\n}\n\n\/\/ NewRemoteFileSystem creates a new file system using the given Client to handle\n\/\/ file requests.\nfunc NewRemoteFileSystem(c Client) RemoteFileSystem {\n\treturn &remoteFileSystem{\n\t\tclient: c,\n\t}\n}\n\n\/\/ file is a basic representation of a remote file such that all operations (i.e.\n\/\/ seeking) will work correctly.\ntype file struct {\n\tio.ReadSeeker\n\tstat *fileInfo\n}\n\n\/\/ RemoteOpen returns a *File which represents the remote file, and implements\n\/\/ io.ReadCloser which reads the file contents from the remote system.\nfunc (fs *remoteFileSystem) RemoteOpen(path string) (*File, error) {\n\trf, err := fs.client.Get(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn rf, nil\n}\n\n\/\/ Open the given file and return an http.File implementation representing it. This method\n\/\/ will block until the file has been completely fetched (http.File implements io.Seeker\n\/\/ which means that for a trivial implementation we need all the underlying data).\nfunc (fs *remoteFileSystem) Open(path string) (http.File, error) {\n\trf, err := fs.RemoteOpen(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rf.Close()\n\n\tbuf, err := ioutil.ReadAll(rf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &file{\n\t\tReadSeeker: bytes.NewReader(buf),\n\t\tstat: &fileInfo{\n\t\t\tname: rf.Name,\n\t\t\tsize: rf.Size,\n\t\t\tmodTime: rf.ModTime,\n\t\t},\n\t}, nil\n}\n\n\/\/ Close is a nop as we have already closed the original file.\nfunc (f *file) Close() error {\n\treturn nil\n}\n\n\/\/ Implements http.File.\nfunc (f *file) Readdir(int) ([]os.FileInfo, error) {\n\treturn nil, nil\n}\n\n\/\/ FileInfo is a simple implementation of os.FileInfo.\ntype fileInfo struct {\n\tname string\n\tsize int64\n\tmodTime time.Time\n}\n\nfunc (f *fileInfo) Name() string { return f.name }\nfunc (f *fileInfo) Size() int64 { return f.size }\nfunc (f *fileInfo) Mode() os.FileMode { return os.FileMode(0777) }\nfunc (f *fileInfo) ModTime() time.Time { return f.modTime }\nfunc (f *fileInfo) IsDir() bool { return false }\nfunc (f *fileInfo) Sys() interface{} { return nil }\n\nfunc (f *file) Stat() (os.FileInfo, error) {\n\treturn f.stat, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\t\"errors\"\n\t\"plugin\"\n\n\t\"github.com\/DarkMetrix\/monitor\/agent\/src\/config\"\n\t\"github.com\/DarkMetrix\/monitor\/agent\/src\/queue\"\n\n\t\"github.com\/DarkMetrix\/monitor\/agent\/src\/input\"\n\t\"github.com\/DarkMetrix\/monitor\/agent\/src\/output\"\n\n\tlog \"github.com\/cihub\/seelog\"\n)\n\n\/\/Init log\nfunc InitLog(path string) error {\n\tlogger, err := log.LoggerFromConfigAsFile(path)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = log.ReplaceLogger(logger)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/Init config\nfunc InitConfig(path string) (*config.Config, error) {\n\tglobalConfig := config.GetConfig()\n\n\tif globalConfig == nil {\n\t\treturn nil, errors.New(\"Get global config failed!\")\n\t}\n\n\terr := globalConfig.Init(path)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn globalConfig, nil\n}\n\n\/\/Init agent plugin libraries\nfunc InitPluginLibs(config *config.Config) error {\n\tinputs := make(map[string]bool)\n\n\tfor _, pluginConfig := range config.Inputs {\n\t\tif !pluginConfig.Active {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/Initialize plugin\n\t\t_, err := plugin.Open(pluginConfig.Path)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tinputs[pluginConfig.Name] = true\n\t}\n\n\tfor _, pluginConfig := range config.Outputs {\n\t\tif !pluginConfig.Active {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/Initialize plugin\n\t\t_, err := plugin.Open(pluginConfig.Path)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(pluginConfig.Inputs) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor inputName, isActive := range pluginConfig.Inputs {\n\t\t\tif !isActive {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t_, ok := inputs[inputName]\n\n\t\t\tif !ok {\n\t\t\t\treturn errors.New(\"'\" + pluginConfig.Name + \"' output plugin's input plugin '\" + inputName + \"' not found or not active!\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/Init transfer queue\nfunc InitTransferQueue(bufferSize int) (*queue.TransferQueue, error) {\n\ttransfer := queue.NewTransferQueue(bufferSize)\n\n\treturn transfer, nil\n}\n\nfunc main() {\n\tdefer log.Flush()\n\n\tdefer func() {\n\t\terr := recover()\n\n\t\tif err != nil {\n\t\t\tlog.Critical(\"Got panic, err:\", err)\n\t\t}\n\t} ()\n\n\t\/\/Initialize log using configuration from \"..\/conf\/log.config\"\n\terr := InitLog(\"..\/conf\/log.config\")\n\n\tif err != nil {\n\t\tlog.Warnf(\"Read config failed! error:%s\", err)\n\t\treturn\n\t}\n\n\tlog.Info(time.Now().String(), \"Starting monitor agent ... \")\n\n\t\/\/Initialize the configuration from \"..\/conf\/config.json\"\n\tlog.Info(\"Initialize monitor_agent configuration from ..\/conf\/config.json ...\")\n\tconfig, err := InitConfig(\"..\/conf\/config.json\")\n\n\tif err != nil {\n\t\tlog.Warnf(\"Initialize monitor agent configuration failed! error:%s\", err)\n\t\treturn\n\t}\n\n\tlog.Info(\"Initialize monitor agent configuration successed! config:\", config)\n\n\t\/\/Initialize all plugin libs\n\tlog.Info(\"Initialize monitor agent plugin libs ...\")\n\terr = InitPluginLibs(config)\n\n\tif err != nil {\n\t\tlog.Warnf(\"Initialize monitor agent plugin libs failed! error:%s\", err)\n\t\treturn\n\t}\n\n\tlog.Info(\"Initialize monitor agent plugin libs successed!\")\n\n\t\/\/Init queue between input plugin and output plugin\n\tlog.Info(\"Initialize monitor agent transfer queue ...\")\n\ttransfer, err := InitTransferQueue(config.Node.TransferQueue.BufferSize)\n\n\tif err != nil {\n\t\tlog.Warnf(\"Initialize monitor agent transfer queue failed! error:%s\", err)\n\t\treturn\n\t}\n\n\tlog.Info(\"Initialize monitor agent transfer queue successed! buffer size:\", config.Node.TransferQueue.BufferSize)\n\n\t\/\/Start output plugins\n\tlog.Info(\"Initialize monitor agent output plugin ...\")\n\toutputPluginManager := output.NewOutputPluginManager(config.Node, config.Outputs, transfer)\n\n\terr = outputPluginManager.Init()\n\n\tif err != nil {\n\t\tlog.Warnf(\"Initialize monitor agent output plugin failed! error:%s\", err)\n\t\treturn\n\t}\n\n\tlog.Info(\"Initialize monitor agent output plugin successed!\")\n\n\toutputPluginManager.Run()\n\n\t\/\/Start input plugins\n\tlog.Info(\"Initialize monitor agent input plugin ...\")\n\tinputPluginManager := input.NewInputPluginManager(config.Node, config.Inputs, transfer)\n\n\terr = inputPluginManager.Init()\n\n\tif err != nil {\n\t\tlog.Warnf(\"Initialize monitor agent input plugin failed! error:%s\", err)\n\t\treturn\n\t}\n\n\tlog.Info(\"Initialize monitor agent input plugin successed!\")\n\n\tinputPluginManager.Run()\n\n\t\/\/Deal with signals\n\tsignalChannel := make(chan os.Signal, 1)\n\tsignal.Notify(signalChannel, os.Interrupt, os.Kill)\n\n\tsignalOccur := <- signalChannel\n\n\tlog.Info(\"Signal occured, signal:\", signalOccur.String())\n}\n<commit_msg>feat:Add panic<commit_after>package main\n\nimport (\n\t\"os\"\n\t\"os\/signal\"\n\t\"net\"\n\t\"time\"\n\t\"errors\"\n\t\"plugin\"\n\n\t\"github.com\/DarkMetrix\/monitor\/agent\/src\/config\"\n\t\"github.com\/DarkMetrix\/monitor\/agent\/src\/queue\"\n\n\t\"github.com\/DarkMetrix\/monitor\/agent\/src\/input\"\n\t\"github.com\/DarkMetrix\/monitor\/agent\/src\/output\"\n\n\tlog \"github.com\/cihub\/seelog\"\n)\n\n\/\/Init log\nfunc InitLog(path string) {\n\tlogger, err := log.LoggerFromConfigAsFile(path)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\terr = log.ReplaceLogger(logger)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/Get local machine ip\nfunc getLocalIp() (string, error) {\n\taddrs, err := net.InterfaceAddrs()\n\n\tif err != nil {\n\t\tlog.Warn(\"Get local ip failed! err:\" + err.Error())\n\t\treturn \"\", err\n\t}\n\n\tfor _, addr := range addrs {\n\t\tif ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {\n\t\t\tif ipnet.IP.To4() != nil {\n\t\t\t\treturn ipnet.IP.String(), nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\", nil\n}\n\n\/\/Init config\nfunc InitConfig(path string) *config.Config{\n\tlog.Info(\"Initialize monitor_agent configuration from \" + path + \" ...\")\n\n\tglobalConfig := config.GetConfig()\n\n\tif globalConfig == nil {\n\t\tpanic(errors.New(\"Get global config failed!\"))\n\t}\n\n\terr := globalConfig.Init(path)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/Check ip and name, if empty use host name as the name and use one of the local ip as the ip\n\tif len(globalConfig.Node.Name) == 0 {\n\t\tglobalConfig.Node.Name, err = os.Hostname()\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tif len(globalConfig.Node.IP) == 0 {\n\t\tglobalConfig.Node.IP, err = getLocalIp()\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\treturn globalConfig\n}\n\n\/\/Init agent plugin libraries\nfunc InitPluginLibs(config *config.Config) {\n\tlog.Info(\"Initialize monitor agent plugin libs ...\")\n\n\tinputs := make(map[string]bool)\n\n\tfor _, pluginConfig := range config.Inputs {\n\t\tif !pluginConfig.Active {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/Initialize plugin\n\t\t_, err := plugin.Open(pluginConfig.Path)\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tinputs[pluginConfig.Name] = true\n\t}\n\n\tfor _, pluginConfig := range config.Outputs {\n\t\tif !pluginConfig.Active {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/Initialize plugin\n\t\t_, err := plugin.Open(pluginConfig.Path)\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tif len(pluginConfig.Inputs) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor inputName, isActive := range pluginConfig.Inputs {\n\t\t\tif !isActive {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t_, ok := inputs[inputName]\n\n\t\t\tif !ok {\n\t\t\t\tpanic(errors.New(\"'\" + pluginConfig.Name + \"' output plugin's input plugin '\" + inputName + \"' not found or not active!\"))\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/Init transfer queue\nfunc InitTransferQueue(bufferSize int) *queue.TransferQueue {\n\tlog.Info(\"Initialize monitor agent transfer queue ...\")\n\n\ttransfer := queue.NewTransferQueue(bufferSize)\n\n\treturn transfer\n}\n\nfunc main() {\n\tdefer log.Flush()\n\n\tdefer func() {\n\t\terr := recover()\n\n\t\tif err != nil {\n\t\t\tlog.Critical(\"Got panic, err:\", err)\n\t\t}\n\t} ()\n\n\t\/\/Initialize log using configuration from \"..\/conf\/log.config\"\n\tInitLog(\"..\/conf\/log.config\")\n\n\tlog.Info(time.Now().String(), \"Starting monitor agent ... \")\n\tlog.Info(\"Version: \" + config.Version)\n\n\t\/\/Initialize the configuration from \"..\/conf\/config.json\"\n\tconfig := InitConfig(\"..\/conf\/config.json\")\n\n\t\/\/Initialize all plugin libs\n\tInitPluginLibs(config)\n\n\t\/\/Init queue between input plugin and output plugin\n\ttransfer := InitTransferQueue(config.Node.TransferQueue.BufferSize)\n\n\t\/\/Start output plugins\n\tlog.Info(\"Initialize monitor agent output plugin ...\")\n\toutputPluginManager := output.NewOutputPluginManager(config.Node, config.Outputs, transfer)\n\n\terr := outputPluginManager.Init()\n\n\tif err != nil {\n\t\tlog.Warnf(\"Initialize monitor agent output plugin failed! error:%s\", err)\n\t\treturn\n\t}\n\n\tlog.Info(\"Initialize monitor agent output plugin successed!\")\n\n\toutputPluginManager.Run()\n\n\t\/\/Start input plugins\n\tlog.Info(\"Initialize monitor agent input plugin ...\")\n\tinputPluginManager := input.NewInputPluginManager(config.Node, config.Inputs, transfer)\n\n\terr = inputPluginManager.Init()\n\n\tif err != nil {\n\t\tlog.Warnf(\"Initialize monitor agent input plugin failed! error:%s\", err)\n\t\treturn\n\t}\n\n\tlog.Info(\"Initialize monitor agent input plugin successed!\")\n\n\tinputPluginManager.Run()\n\n\t\/\/Deal with signals\n\tsignalChannel := make(chan os.Signal, 1)\n\tsignal.Notify(signalChannel, os.Interrupt, os.Kill)\n\n\tsignalOccur := <- signalChannel\n\n\tlog.Info(\"Signal occured, signal:\", signalOccur.String())\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 Mirantis\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e\n\nimport (\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/gomega\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/pkg\/api\/v1\"\n\n\t\"github.com\/Mirantis\/virtlet\/tests\/e2e\/framework\"\n\t. \"github.com\/Mirantis\/virtlet\/tests\/e2e\/ginkgo-ext\"\n)\n\nconst (\n\tgendocTmpDir = \"\/tmp\"\n)\n\nvar _ = Describe(\"virtletctl\", func() {\n\tIt(\"Should display usage info on help subcommand\", func(done Done) {\n\t\tdefer close(done)\n\n\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\tdefer closeFunc()\n\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\tBy(\"Calling virtletctl help\")\n\t\t_, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"help\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t}, 10)\n\n\tIt(\"Should dump Virtlet metadata on dump-metadata subcommand\", func(done Done) {\n\t\tdefer close(done)\n\n\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\tdefer closeFunc()\n\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\tBy(\"Calling virtletctl dump-metadata\")\n\t\toutput, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"dump-metadata\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(output).To(ContainSubstring(\"Virtlet pod name:\"))\n\t\tExpect(output).To(ContainSubstring(\"Sandboxes:\"))\n\t\tExpect(output).To(ContainSubstring(\"Images:\"))\n\t}, 60)\n\n\tIt(\"Should generate documentation on gendoc subcommand\", func(done Done) {\n\t\tdefer close(done)\n\n\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\tdefer closeFunc()\n\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\tBy(\"Calling virtletctl gendoc\")\n\t\t_, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"gendoc\", gendocTmpDir)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tcontent, err := ioutil.ReadFile(filepath.Join(gendocTmpDir, \"virtletctl.md\"))\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(content).To(ContainSubstring(\"Virtlet control tool\"))\n\t\tExpect(content).To(ContainSubstring(\"Synopsis\"))\n\t\tExpect(content).To(ContainSubstring(\"Options\"))\n\t}, 10)\n\n\tContext(\"SSH subcommand\", func() {\n\t\tvar (\n\t\t\tvm *framework.VMInterface\n\t\t\ttempfileName string\n\t\t)\n\n\t\tBeforeAll(func() {\n\t\t\tcm := &v1.ConfigMap{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sshkey\",\n\t\t\t\t},\n\t\t\t\tData: map[string]string{\n\t\t\t\t\t\"authorized_keys\": sshPublicKey,\n\t\t\t\t},\n\t\t\t}\n\t\t\t_, err := controller.ConfigMaps().Create(cm)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tvm = controller.VM(\"cirros-vm\")\n\t\t\tvm.Create(VMOptions{\n\t\t\t\tSSHKeySource: \"configmap\/sshkey\",\n\t\t\t}.applyDefaults(), time.Minute*5, nil)\n\n\t\t\twaitSSH(vm)\n\n\t\t\ttempfile, err := ioutil.TempFile(\"\", \"\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tdefer tempfile.Close()\n\t\t\ttempfileName = tempfile.Name()\n\n\t\t\tstrippedKey := strings.Replace(sshPrivateKey, \"\\t\", \"\", -1)\n\t\t\t_, err = tempfile.Write([]byte(strippedKey))\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tExpect(os.Chmod(tempfileName, 0600)).To(Succeed())\n\t\t})\n\n\t\tAfterAll(func() {\n\t\t\tdeleteVM(vm)\n\t\t\tcontroller.ConfigMaps().Delete(\"sshkey\", nil)\n\t\t\tos.Remove(tempfileName)\n\t\t})\n\n\t\tIt(\"Should be able to access the pod via ssh using ssh subcommand\", func(done Done) {\n\t\t\tBy(\"Calling virtletctl ssh cirros@cirros-vm hostname\")\n\t\t\tdefer close(done)\n\n\t\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\t\tdefer closeFunc()\n\t\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\t\toutput, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"ssh\", \"cirros@cirros-vm\", \"--\", \"-i\", tempfileName, \"hostname\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tExpect(output).To(Equal(\"cirros-vm\"))\n\t\t}, 60)\n\t})\n\n\tIt(\"Should return libvirt version on virsh subcommand\", func(done Done) {\n\t\tdefer close(done)\n\n\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\tdefer closeFunc()\n\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\tBy(\"Calling virtletctl virsh version\")\n\t\toutput, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"virsh\", \"version\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(output).To(ContainSubstring(\"Compiled against library:\"))\n\t\tExpect(output).To(ContainSubstring(\"Using library:\"))\n\t}, 60)\n})\n\nvar _ = Describe(\"virtletctl unsafe\", func() {\n\tBeforeAll(func() {\n\t\tincludeUnsafe()\n\t})\n\n\tContext(\"Should install itself as a kubectl plugin on install subcommand\", func() {\n\t\tIt(\"Should not fail during install\", func(done Done) {\n\t\t\tdefer close(done)\n\n\t\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\t\tdefer closeFunc()\n\t\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\t\tBy(\"Calling virtletctl install\")\n\t\t\t_, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"install\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tBy(\"Calling kubectl plugin virt help\")\n\t\t\t_, err = framework.RunSimple(localExecutor, \"kubectl\", \"plugin\", \"virt\", \"help\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t}, 60)\n\t})\n})\n<commit_msg>Remove test for docs generation<commit_after>\/*\nCopyright 2018 Mirantis\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e\n\nimport (\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/gomega\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/pkg\/api\/v1\"\n\n\t\"github.com\/Mirantis\/virtlet\/tests\/e2e\/framework\"\n\t. \"github.com\/Mirantis\/virtlet\/tests\/e2e\/ginkgo-ext\"\n)\n\nconst (\n\tgendocTmpDir = \"\/tmp\"\n)\n\nvar _ = Describe(\"virtletctl\", func() {\n\tIt(\"Should display usage info on help subcommand\", func(done Done) {\n\t\tdefer close(done)\n\n\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\tdefer closeFunc()\n\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\tBy(\"Calling virtletctl help\")\n\t\t_, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"help\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t}, 10)\n\n\tIt(\"Should dump Virtlet metadata on dump-metadata subcommand\", func(done Done) {\n\t\tdefer close(done)\n\n\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\tdefer closeFunc()\n\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\tBy(\"Calling virtletctl dump-metadata\")\n\t\toutput, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"dump-metadata\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(output).To(ContainSubstring(\"Virtlet pod name:\"))\n\t\tExpect(output).To(ContainSubstring(\"Sandboxes:\"))\n\t\tExpect(output).To(ContainSubstring(\"Images:\"))\n\t}, 60)\n\n\tContext(\"SSH subcommand\", func() {\n\t\tvar (\n\t\t\tvm *framework.VMInterface\n\t\t\ttempfileName string\n\t\t)\n\n\t\tBeforeAll(func() {\n\t\t\tcm := &v1.ConfigMap{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sshkey\",\n\t\t\t\t},\n\t\t\t\tData: map[string]string{\n\t\t\t\t\t\"authorized_keys\": sshPublicKey,\n\t\t\t\t},\n\t\t\t}\n\t\t\t_, err := controller.ConfigMaps().Create(cm)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tvm = controller.VM(\"cirros-vm\")\n\t\t\tvm.Create(VMOptions{\n\t\t\t\tSSHKeySource: \"configmap\/sshkey\",\n\t\t\t}.applyDefaults(), time.Minute*5, nil)\n\n\t\t\twaitSSH(vm)\n\n\t\t\ttempfile, err := ioutil.TempFile(\"\", \"\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tdefer tempfile.Close()\n\t\t\ttempfileName = tempfile.Name()\n\n\t\t\tstrippedKey := strings.Replace(sshPrivateKey, \"\\t\", \"\", -1)\n\t\t\t_, err = tempfile.Write([]byte(strippedKey))\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tExpect(os.Chmod(tempfileName, 0600)).To(Succeed())\n\t\t})\n\n\t\tAfterAll(func() {\n\t\t\tdeleteVM(vm)\n\t\t\tcontroller.ConfigMaps().Delete(\"sshkey\", nil)\n\t\t\tos.Remove(tempfileName)\n\t\t})\n\n\t\tIt(\"Should be able to access the pod via ssh using ssh subcommand\", func(done Done) {\n\t\t\tBy(\"Calling virtletctl ssh cirros@cirros-vm hostname\")\n\t\t\tdefer close(done)\n\n\t\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\t\tdefer closeFunc()\n\t\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\t\toutput, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"ssh\", \"cirros@cirros-vm\", \"--\", \"-i\", tempfileName, \"hostname\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tExpect(output).To(Equal(\"cirros-vm\"))\n\t\t}, 60)\n\t})\n\n\tIt(\"Should return libvirt version on virsh subcommand\", func(done Done) {\n\t\tdefer close(done)\n\n\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\tdefer closeFunc()\n\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\tBy(\"Calling virtletctl virsh version\")\n\t\toutput, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"virsh\", \"version\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(output).To(ContainSubstring(\"Compiled against library:\"))\n\t\tExpect(output).To(ContainSubstring(\"Using library:\"))\n\t}, 60)\n})\n\nvar _ = Describe(\"virtletctl unsafe\", func() {\n\tBeforeAll(func() {\n\t\tincludeUnsafe()\n\t})\n\n\tContext(\"Should install itself as a kubectl plugin on install subcommand\", func() {\n\t\tIt(\"Should not fail during install\", func(done Done) {\n\t\t\tdefer close(done)\n\n\t\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\t\tdefer closeFunc()\n\t\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\t\tBy(\"Calling virtletctl install\")\n\t\t\t_, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"install\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tBy(\"Calling kubectl plugin virt help\")\n\t\t\t_, err = framework.RunSimple(localExecutor, \"kubectl\", \"plugin\", \"virt\", \"help\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t}, 60)\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package subsumption_test\n\nimport (\n\t. \".\"\n\t\"errors\"\n\tgomock \"github.com\/golang\/mock\/gomock\"\n\t\"testing\"\n)\n\nfunc TestCreate(t *testing.T) {\n\tagent := Agent{}\n\n\tif agent.Size() != 0 {\n\t\tt.Errorf(\"agent.Size() should be 0\")\n\t}\n}\n\nfunc TestAddBehavior(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tbehabior := NewMockBehavior(ctrl)\n\tagent := Agent{}\n\n\tagent.AddBehavior(behabior)\n\n\tif agent.Size() != 1 {\n\t\tt.Errorf(\"agent.Size() should be 1\")\n\t}\n}\n\nfunc TestInit(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tbehabior := NewMockBehavior(ctrl)\n\tbehabior.EXPECT().Init()\n\n\tagent := Agent{}\n\tagent.AddBehavior(behabior)\n\n\tagent.Init()\n}\n\nfunc TestPerform(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tbehabior := NewMockBehavior(ctrl)\n\tbehabior.EXPECT().Sense().Return(true, nil)\n\tbehabior.EXPECT().Perform().Return(true, nil)\n\n\tagent := Agent{}\n\tagent.AddBehavior(behabior)\n\n\tret, err := agent.Perform()\n\n\tif err != nil {\n\t\tt.Errorf(\"unexpected agent.Perform() fail with : %v\", err)\n\t}\n\tif ret != true {\n\t\tt.Errorf(\"agent.Perform() should be true because behabior is active\")\n\t}\n}\n\nfunc TestPerformMulti(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tfirst := NewMockBehavior(ctrl)\n\tfirst.EXPECT().Sense().Return(false, nil)\n\n\tsecond := NewMockBehavior(ctrl)\n\tsecond.EXPECT().Sense().Return(true, nil)\n\tsecond.EXPECT().Perform().Return(true, nil)\n\n\tagent := Agent{}\n\tagent.AddBehavior(first)\n\tagent.AddBehavior(second)\n\n\tret, err := agent.Perform()\n\n\tif err != nil {\n\t\tt.Errorf(\"agent.Perform() failed with : %v\", err)\n\t}\n\tif ret != true {\n\t\tt.Errorf(\"agent.Perform() should be false because [second] behabior is active\")\n\t}\n}\n\nfunc TestSenceError(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tbehabior := NewMockBehavior(ctrl)\n\tbehabior.EXPECT().Sense().Return(false, errors.New(\"unknown error\"))\n\n\tagent := Agent{}\n\tagent.AddBehavior(behabior)\n\n\tret, err := agent.Perform()\n\n\tif err != nil {\n\t\tt.Errorf(\"unexpected agent.Perform() fail with : %v\", err)\n\t}\n\tif ret != false {\n\t\tt.Errorf(\"agent.Perform() should be false because no behavior is active\")\n\t}\n}\n<commit_msg>add test case for Agent.Perform() with no active Behavior<commit_after>package subsumption_test\n\nimport (\n\t. \".\"\n\t\"errors\"\n\tgomock \"github.com\/golang\/mock\/gomock\"\n\t\"testing\"\n)\n\nfunc TestCreate(t *testing.T) {\n\tagent := Agent{}\n\n\tif agent.Size() != 0 {\n\t\tt.Errorf(\"agent.Size() should be 0\")\n\t}\n}\n\nfunc TestAddBehavior(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tbehabior := NewMockBehavior(ctrl)\n\tagent := Agent{}\n\n\tagent.AddBehavior(behabior)\n\n\tif agent.Size() != 1 {\n\t\tt.Errorf(\"agent.Size() should be 1\")\n\t}\n}\n\nfunc TestInit(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tbehabior := NewMockBehavior(ctrl)\n\tbehabior.EXPECT().Init()\n\n\tagent := Agent{}\n\tagent.AddBehavior(behabior)\n\n\tagent.Init()\n}\n\nfunc TestPerform(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tbehabior := NewMockBehavior(ctrl)\n\tbehabior.EXPECT().Sense().Return(true, nil)\n\tbehabior.EXPECT().Perform().Return(true, nil)\n\n\tagent := Agent{}\n\tagent.AddBehavior(behabior)\n\n\tret, err := agent.Perform()\n\n\tif err != nil {\n\t\tt.Errorf(\"unexpected agent.Perform() fail with : %v\", err)\n\t}\n\tif ret != true {\n\t\tt.Errorf(\"agent.Perform() should be true because behabior is active\")\n\t}\n}\n\nfunc TestPerformMulti(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tfirst := NewMockBehavior(ctrl)\n\tfirst.EXPECT().Sense().Return(false, nil)\n\n\tsecond := NewMockBehavior(ctrl)\n\tsecond.EXPECT().Sense().Return(true, nil)\n\tsecond.EXPECT().Perform().Return(true, nil)\n\n\tagent := Agent{}\n\tagent.AddBehavior(first)\n\tagent.AddBehavior(second)\n\n\tret, err := agent.Perform()\n\n\tif err != nil {\n\t\tt.Errorf(\"agent.Perform() failed with : %v\", err)\n\t}\n\tif ret != true {\n\t\tt.Errorf(\"agent.Perform() should be false because [second] behabior is active\")\n\t}\n}\n\nfunc TestSenceError(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tbehabior := NewMockBehavior(ctrl)\n\tbehabior.EXPECT().Sense().Return(false, errors.New(\"unknown error\"))\n\n\tagent := Agent{}\n\tagent.AddBehavior(behabior)\n\n\tret, err := agent.Perform()\n\n\tif err != nil {\n\t\tt.Errorf(\"unexpected agent.Perform() fail with : %v\", err)\n\t}\n\tif ret != false {\n\t\tt.Errorf(\"agent.Perform() should be false because no behavior is active\")\n\t}\n}\n\nfunc TestPerformNoActive(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tfirst := NewMockBehavior(ctrl)\n\tfirst.EXPECT().Sense().Return(false, nil)\n\n\tsecond := NewMockBehavior(ctrl)\n\tsecond.EXPECT().Sense().Return(false, nil)\n\n\tagent := Agent{}\n\tagent.AddBehavior(first)\n\tagent.AddBehavior(second)\n\n\tret, err := agent.Perform()\n\n\tif err != nil {\n\t\tt.Errorf(\"agent.Perform() failed with : %v\", err)\n\t}\n\tif ret != false {\n\t\tt.Errorf(\"agent.Perform() should be false because no behavior is active\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build js\n\npackage main\n\nimport (\n\t\"context\"\n\t\"strconv\"\n\t\"sync\"\n\n\t\"github.com\/flimzy\/jqeventrouter\"\n\t\"github.com\/flimzy\/log\"\n\t\"github.com\/gopherjs\/gopherjs\/js\"\n\t\"github.com\/gopherjs\/jquery\"\n\n\t\"github.com\/FlashbackSRS\/flashback\/config\"\n\t\"github.com\/FlashbackSRS\/flashback\/fserve\"\n\t\"github.com\/FlashbackSRS\/flashback\/iframes\"\n\t\"github.com\/FlashbackSRS\/flashback\/l10n\"\n\t\"github.com\/FlashbackSRS\/flashback\/model\"\n\t\"github.com\/FlashbackSRS\/flashback\/oauth2\"\n\t\"github.com\/FlashbackSRS\/flashback\/util\"\n\n\t_ \"github.com\/FlashbackSRS\/flashback\/controllers\/anki\" \/\/ Anki model controllers\n\t\"github.com\/FlashbackSRS\/flashback\/webclient\/handlers\/auth\"\n\t\"github.com\/FlashbackSRS\/flashback\/webclient\/handlers\/general\"\n\t\"github.com\/FlashbackSRS\/flashback\/webclient\/handlers\/import\"\n\t\"github.com\/FlashbackSRS\/flashback\/webclient\/handlers\/l10n\"\n\t\"github.com\/FlashbackSRS\/flashback\/webclient\/handlers\/login\"\n\t\"github.com\/FlashbackSRS\/flashback\/webclient\/handlers\/logout\"\n\t\"github.com\/FlashbackSRS\/flashback\/webclient\/handlers\/study\"\n\tsynchandler \"github.com\/FlashbackSRS\/flashback\/webclient\/handlers\/sync\"\n)\n\n\/\/ Some spiffy shortcuts\nvar jQuery = jquery.NewJQuery\nvar jQMobile *js.Object\nvar document *js.Object = js.Global.Get(\"document\")\n\nfunc main() {\n\tlog.Debug(\"Starting main()\\n\")\n\tvar wg sync.WaitGroup\n\n\t\/\/ Call any async init functions first\n\tinitCordova(&wg)\n\t\/\/ meanwhile, all the synchronous ones\n\n\tconfJSON := document.Call(\"getElementById\", \"config\").Get(\"innerText\").String()\n\tconf, err := config.NewFromJSON([]byte(confJSON))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tbaseURL := conf.GetString(\"flashback_app\")\n\n\tjQuery(document).On(\"mobileinit\", func() {\n\t\tMobileInit()\n\t})\n\n\trepo, err := model.New(context.TODO(), conf.GetString(\"flashback_api\"), baseURL)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfserve.Register(repo)\n\n\tinitjQuery()\n\tiframes.Init()\n\n\t\/\/ Wait for the above modules to initialize before we initialize the router\n\t\/\/ and jQuery Mobile\n\twg.Wait()\n\n\t\/\/ This must happen after cordova init, because it uses cordova to read\n\t\/\/ translation files.\n\tlangSet := l10n_handler.Init(baseURL)\n\n\tappPrefix := urlPrefix(baseURL)\n\n\tRouterInit(appPrefix, baseURL, conf.GetString(\"facebook_client_id\"), repo, langSet)\n\tstudyhandler.StudyInit()\n\n\t\/\/ This is what actually loads jQuery Mobile. We have to register our\n\t\/\/ 'mobileinit' event handler above first, though, as part of RouterInit\n\tjs.Global.Call(\"loadjqueryMobile\")\n\tlog.Debug(\"main() finished\\n\")\n}\n\nfunc initjQuery() {\n\tlog.Debug(\"Initializing jQuery\\n\")\n\tjs.Global.Get(\"jQuery\").Set(\"cors\", true)\n\tjQuery(js.Global).On(\"resize\", resizeContent)\n\tjQuery(js.Global).On(\"orentationchange\", resizeContent)\n\tjQuery(document).On(\"pagecontainertransition\", resizeContent)\n\tlog.Debug(\"jQuery init complete\\n\")\n}\n\nfunc resizeContent() {\n\tscreenHt := js.Global.Get(\"jQuery\").Get(\"mobile\").Call(\"getScreenHeight\").Int()\n\theader := jQuery(\".ui-header:visible\")\n\theaderHt := header.OuterHeight()\n\tif header.HasClass(\"ui-header-fixed\") {\n\t\theaderHt = headerHt - 1\n\t}\n\tfooter := jQuery(\".ui-footer:visible\")\n\tfooterHt := footer.OuterHeight()\n\tif footer.HasClass(\"ui-footer-fixed\") {\n\t\tfooterHt = footerHt - 1\n\t}\n\tjQuery(\".ui-content\").SetHeight(strconv.Itoa(screenHt - headerHt - footerHt))\n}\n\nfunc RouterInit(prefix, baseURL, facebookID string, repo *model.Repo, langSet *l10n.Set) {\n\tlog.Debug(\"Initializing router\\n\")\n\n\t\/\/ beforechange -- Just check auth\n\tbeforeChange := jqeventrouter.NullHandler()\n\tcheckAuth := auth.CheckAuth(repo)\n\tjqeventrouter.Listen(\"pagecontainerbeforechange\", general.JQMRouteOnce(general.CleanFacebookURI(checkAuth(beforeChange))))\n\n\t\/\/ beforetransition\n\tbeforeTransition := jqeventrouter.NewEventMux()\n\tbeforeTransition.SetUriFunc(getJqmUri)\n\n\tproviders := map[string]string{\n\t\t\"facebook\": oauth2.FacebookURL(facebookID, baseURL),\n\t}\n\n\tbeforeTransition.HandleFunc(prefix+\"\/login.html\", loginhandler.BeforeTransition(repo, providers))\n\tbeforeTransition.HandleFunc(prefix+\"\/callback.html\", loginhandler.BTCallback(repo, providers))\n\tbeforeTransition.HandleFunc(prefix+\"\/logout.html\", logouthandler.BeforeTransition(repo))\n\tbeforeTransition.HandleFunc(prefix+\"\/import.html\", importhandler.BeforeTransition(repo))\n\tbeforeTransition.HandleFunc(prefix+\"\/study.html\", studyhandler.BeforeTransition(repo))\n\tjqeventrouter.Listen(\"pagecontainerbeforetransition\", beforeTransition)\n\n\t\/\/ beforeshow\n\tbeforeShow := jqeventrouter.NullHandler()\n\tsetupSyncButton := synchandler.SetupSyncButton(repo)\n\tjqeventrouter.Listen(\"pagecontainerbeforeshow\", l10n_handler.LocalizePage(langSet, setupSyncButton(beforeShow)))\n\tlog.Debug(\"Router init complete\\n\")\n}\n\nfunc getJqmUri(_ *jquery.Event, ui *js.Object) string {\n\treturn util.JqmTargetUri(ui)\n}\n\n\/\/ MobileInit is run after jQuery Mobile's 'mobileinit' event has fired\nfunc MobileInit() {\n\tlog.Debugf(\"MobileInit\")\n\tdefer log.Debugf(\"MobileInit finished\")\n\tjQMobile = js.Global.Get(\"jQuery\").Get(\"mobile\")\n\n\t\/\/ Disable hash features\n\tjQMobile.Set(\"hashListeningEnabled\", false)\n\tjQMobile.Set(\"pushStateEnabled\", false)\n\tjQMobile.Get(\"changePage\").Get(\"defaults\").Set(\"changeHash\", false)\n\n\t\/\/ \tDebugEvents()\n\n\tjQuery(document).One(\"pagecreate\", func(event *jquery.Event) {\n\t\t\/\/ This should only be executed once, to initialize our \"external\"\n\t\t\/\/ panel. This is the kind of thing that should go in document.ready,\n\t\t\/\/ but I don't have any guarantee that document.ready will run after\n\t\t\/\/ mobileinit.\n\t\tjQuery(\"body>[data-role='panel']\").Underlying().Call(\"panel\").Call(\"enhanceWithin\")\n\t})\n}\n\nfunc ConsoleEvent(name string, event *jquery.Event, data *js.Object) {\n\tpage := data.Get(\"toPage\").String()\n\tif page == \"[object Object]\" {\n\t\tpage = data.Get(\"toPage\").Call(\"jqmData\", \"url\").String()\n\t}\n\tlog.Debugf(\"Event: %s, Current page: %s\", name, page)\n}\n\nfunc ConsolePageEvent(name string, event *jquery.Event) {\n\tlog.Debugf(\"Event: %s\", name)\n}\n\nfunc DebugEvents() {\n\tevents := []string{\"pagecontainerbeforehide\", \"pagecontainerbeforechange\", \"pagecontainerbeforeload\", \"pagecontainerbeforeshow\",\n\t\t\"pagecontainerbeforetransition\", \"pagecontainerchange\", \"pagecontainerchangefailed\", \"pagecontainercreate\", \"pagecontainerhide\",\n\t\t\"pagecontainerload\", \"pagecontainerloadfailed\", \"pagecontainerremove\", \"pagecontainershow\", \"pagecontainertransition\"}\n\tfor _, event := range events {\n\t\tcopy := event \/\/ Necessary for each iterration to have an effectively uinque closure\n\t\tjQuery(document).On(event, func(e *jquery.Event, d *js.Object) {\n\t\t\tConsoleEvent(copy, e, d)\n\t\t})\n\t}\n\tpageEvents := []string{\"beforecreate\", \"create\"}\n\tfor _, event := range pageEvents {\n\t\tcopy := event\n\t\tjQuery(document).On(event, func(e *jquery.Event) {\n\t\t\tConsolePageEvent(copy, e)\n\t\t})\n\t}\n}\n<commit_msg>Register resize handler in mobileinit<commit_after>\/\/ +build js\n\npackage main\n\nimport (\n\t\"context\"\n\t\"strconv\"\n\t\"sync\"\n\n\t\"github.com\/flimzy\/jqeventrouter\"\n\t\"github.com\/flimzy\/log\"\n\t\"github.com\/gopherjs\/gopherjs\/js\"\n\t\"github.com\/gopherjs\/jquery\"\n\n\t\"github.com\/FlashbackSRS\/flashback\/config\"\n\t\"github.com\/FlashbackSRS\/flashback\/fserve\"\n\t\"github.com\/FlashbackSRS\/flashback\/iframes\"\n\t\"github.com\/FlashbackSRS\/flashback\/l10n\"\n\t\"github.com\/FlashbackSRS\/flashback\/model\"\n\t\"github.com\/FlashbackSRS\/flashback\/oauth2\"\n\t\"github.com\/FlashbackSRS\/flashback\/util\"\n\n\t_ \"github.com\/FlashbackSRS\/flashback\/controllers\/anki\" \/\/ Anki model controllers\n\t\"github.com\/FlashbackSRS\/flashback\/webclient\/handlers\/auth\"\n\t\"github.com\/FlashbackSRS\/flashback\/webclient\/handlers\/general\"\n\t\"github.com\/FlashbackSRS\/flashback\/webclient\/handlers\/import\"\n\t\"github.com\/FlashbackSRS\/flashback\/webclient\/handlers\/l10n\"\n\t\"github.com\/FlashbackSRS\/flashback\/webclient\/handlers\/login\"\n\t\"github.com\/FlashbackSRS\/flashback\/webclient\/handlers\/logout\"\n\t\"github.com\/FlashbackSRS\/flashback\/webclient\/handlers\/study\"\n\tsynchandler \"github.com\/FlashbackSRS\/flashback\/webclient\/handlers\/sync\"\n)\n\n\/\/ Some spiffy shortcuts\nvar jQuery = jquery.NewJQuery\nvar jQMobile *js.Object\nvar document *js.Object = js.Global.Get(\"document\")\n\nfunc main() {\n\tlog.Debug(\"Starting main()\\n\")\n\tvar wg sync.WaitGroup\n\n\t\/\/ Call any async init functions first\n\tinitCordova(&wg)\n\t\/\/ meanwhile, all the synchronous ones\n\n\tconfJSON := document.Call(\"getElementById\", \"config\").Get(\"innerText\").String()\n\tconf, err := config.NewFromJSON([]byte(confJSON))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tbaseURL := conf.GetString(\"flashback_app\")\n\n\tjQuery(document).On(\"mobileinit\", func() {\n\t\tMobileInit()\n\t})\n\n\trepo, err := model.New(context.TODO(), conf.GetString(\"flashback_api\"), baseURL)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfserve.Register(repo)\n\n\tinitjQuery()\n\tiframes.Init()\n\n\t\/\/ Wait for the above modules to initialize before we initialize the router\n\t\/\/ and jQuery Mobile\n\twg.Wait()\n\n\t\/\/ This must happen after cordova init, because it uses cordova to read\n\t\/\/ translation files.\n\tlangSet := l10n_handler.Init(baseURL)\n\n\tappPrefix := urlPrefix(baseURL)\n\n\tRouterInit(appPrefix, baseURL, conf.GetString(\"facebook_client_id\"), repo, langSet)\n\tstudyhandler.StudyInit()\n\n\t\/\/ This is what actually loads jQuery Mobile. We have to register our\n\t\/\/ 'mobileinit' event handler above first, though, as part of RouterInit\n\tjs.Global.Call(\"loadjqueryMobile\")\n\tlog.Debug(\"main() finished\\n\")\n}\n\nfunc initjQuery() {\n\tlog.Debug(\"Initializing jQuery\\n\")\n\tjs.Global.Get(\"jQuery\").Set(\"cors\", true)\n\tlog.Debug(\"jQuery init complete\\n\")\n}\n\nfunc resizeContent() {\n\tscreenHt := js.Global.Get(\"jQuery\").Get(\"mobile\").Call(\"getScreenHeight\").Int()\n\theader := jQuery(\".ui-header:visible\")\n\theaderHt := header.OuterHeight()\n\tif header.HasClass(\"ui-header-fixed\") {\n\t\theaderHt = headerHt - 1\n\t}\n\tfooter := jQuery(\".ui-footer:visible\")\n\tfooterHt := footer.OuterHeight()\n\tif footer.HasClass(\"ui-footer-fixed\") {\n\t\tfooterHt = footerHt - 1\n\t}\n\tjQuery(\".ui-content\").SetHeight(strconv.Itoa(screenHt - headerHt - footerHt))\n}\n\nfunc RouterInit(prefix, baseURL, facebookID string, repo *model.Repo, langSet *l10n.Set) {\n\tlog.Debug(\"Initializing router\\n\")\n\n\t\/\/ beforechange -- Just check auth\n\tbeforeChange := jqeventrouter.NullHandler()\n\tcheckAuth := auth.CheckAuth(repo)\n\tjqeventrouter.Listen(\"pagecontainerbeforechange\", general.JQMRouteOnce(general.CleanFacebookURI(checkAuth(beforeChange))))\n\n\t\/\/ beforetransition\n\tbeforeTransition := jqeventrouter.NewEventMux()\n\tbeforeTransition.SetUriFunc(getJqmUri)\n\n\tproviders := map[string]string{\n\t\t\"facebook\": oauth2.FacebookURL(facebookID, baseURL),\n\t}\n\n\tbeforeTransition.HandleFunc(prefix+\"\/login.html\", loginhandler.BeforeTransition(repo, providers))\n\tbeforeTransition.HandleFunc(prefix+\"\/callback.html\", loginhandler.BTCallback(repo, providers))\n\tbeforeTransition.HandleFunc(prefix+\"\/logout.html\", logouthandler.BeforeTransition(repo))\n\tbeforeTransition.HandleFunc(prefix+\"\/import.html\", importhandler.BeforeTransition(repo))\n\tbeforeTransition.HandleFunc(prefix+\"\/study.html\", studyhandler.BeforeTransition(repo))\n\tjqeventrouter.Listen(\"pagecontainerbeforetransition\", beforeTransition)\n\n\t\/\/ beforeshow\n\tbeforeShow := jqeventrouter.NullHandler()\n\tsetupSyncButton := synchandler.SetupSyncButton(repo)\n\tjqeventrouter.Listen(\"pagecontainerbeforeshow\", l10n_handler.LocalizePage(langSet, setupSyncButton(beforeShow)))\n\tlog.Debug(\"Router init complete\\n\")\n}\n\nfunc getJqmUri(_ *jquery.Event, ui *js.Object) string {\n\treturn util.JqmTargetUri(ui)\n}\n\n\/\/ MobileInit is run after jQuery Mobile's 'mobileinit' event has fired\nfunc MobileInit() {\n\tlog.Debugf(\"MobileInit\")\n\tdefer log.Debugf(\"MobileInit finished\")\n\tjQMobile = js.Global.Get(\"jQuery\").Get(\"mobile\")\n\n\tjQuery(js.Global).On(\"resize\", resizeContent)\n\tjQuery(js.Global).On(\"orentationchange\", resizeContent)\n\tjQuery(document).On(\"pagecontainertransition\", resizeContent)\n\tgo resizeContent() \/\/ Just in case the screen was resized before mobileinit fired\n\n\t\/\/ Disable hash features\n\tjQMobile.Set(\"hashListeningEnabled\", false)\n\tjQMobile.Set(\"pushStateEnabled\", false)\n\tjQMobile.Get(\"changePage\").Get(\"defaults\").Set(\"changeHash\", false)\n\n\t\/\/ \tDebugEvents()\n\n\tjQuery(document).One(\"pagecreate\", func(event *jquery.Event) {\n\t\t\/\/ This should only be executed once, to initialize our \"external\"\n\t\t\/\/ panel. This is the kind of thing that should go in document.ready,\n\t\t\/\/ but I don't have any guarantee that document.ready will run after\n\t\t\/\/ mobileinit.\n\t\tjQuery(\"body>[data-role='panel']\").Underlying().Call(\"panel\").Call(\"enhanceWithin\")\n\t})\n}\n\nfunc ConsoleEvent(name string, event *jquery.Event, data *js.Object) {\n\tpage := data.Get(\"toPage\").String()\n\tif page == \"[object Object]\" {\n\t\tpage = data.Get(\"toPage\").Call(\"jqmData\", \"url\").String()\n\t}\n\tlog.Debugf(\"Event: %s, Current page: %s\", name, page)\n}\n\nfunc ConsolePageEvent(name string, event *jquery.Event) {\n\tlog.Debugf(\"Event: %s\", name)\n}\n\nfunc DebugEvents() {\n\tevents := []string{\"pagecontainerbeforehide\", \"pagecontainerbeforechange\", \"pagecontainerbeforeload\", \"pagecontainerbeforeshow\",\n\t\t\"pagecontainerbeforetransition\", \"pagecontainerchange\", \"pagecontainerchangefailed\", \"pagecontainercreate\", \"pagecontainerhide\",\n\t\t\"pagecontainerload\", \"pagecontainerloadfailed\", \"pagecontainerremove\", \"pagecontainershow\", \"pagecontainertransition\"}\n\tfor _, event := range events {\n\t\tcopy := event \/\/ Necessary for each iterration to have an effectively uinque closure\n\t\tjQuery(document).On(event, func(e *jquery.Event, d *js.Object) {\n\t\t\tConsoleEvent(copy, e, d)\n\t\t})\n\t}\n\tpageEvents := []string{\"beforecreate\", \"create\"}\n\tfor _, event := range pageEvents {\n\t\tcopy := event\n\t\tjQuery(document).On(event, func(e *jquery.Event) {\n\t\t\tConsolePageEvent(copy, e)\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package ibclient\n\nimport (\n\t\"fmt\"\n\t\"net\"\n)\n\nfunc (objMgr *ObjectManager) CreateHostRecord(\n\tenabledns bool,\n\tenableDhcp bool,\n\trecordName string,\n\tnetview string,\n\tdnsview string,\n\tipv4cidr string,\n\tipv6cidr string,\n\tipv4Addr string,\n\tipv6Addr string,\n\tmacAddr string,\n\tduid string,\n\tuseTtl bool,\n\tttl uint32,\n\tcomment string,\n\teas EA,\n\taliases []string) (*HostRecord, error) {\n\n\tif ipv4Addr == \"\" && ipv4cidr != \"\" {\n\t\tif netview == \"\" {\n\t\t\tnetview = \"default\"\n\t\t}\n\t\tipv4Addr = fmt.Sprintf(\"func:nextavailableip:%s,%s\", ipv4cidr, netview)\n\t}\n\tif ipv6Addr == \"\" && ipv6cidr != \"\" {\n\t\tif netview == \"\" {\n\t\t\tnetview = \"default\"\n\t\t}\n\t\tipv6Addr = fmt.Sprintf(\"func:nextavailableip:%s,%s\", ipv6cidr, netview)\n\t}\n\trecordHost := NewEmptyHostRecord()\n\trecordHostIpv6AddrSlice := []HostRecordIpv6Addr{}\n\trecordHostIpv4AddrSlice := []HostRecordIpv4Addr{}\n\tif ipv6Addr != \"\" {\n\t\tenableDhcpv6 := false\n\t\tif enableDhcp && duid != \"\" {\n\t\t\tenableDhcpv6 = true\n\t\t}\n\t\trecordHostIpv6Addr := NewHostRecordIpv6Addr(ipv6Addr, duid, enableDhcpv6, \"\")\n\t\trecordHostIpv6AddrSlice = []HostRecordIpv6Addr{*recordHostIpv6Addr}\n\t}\n\tif ipv4Addr != \"\" {\n\t\tenableDhcpv4 := false\n\t\tif enableDhcp && macAddr != \"\" && macAddr != MACADDR_ZERO {\n\t\t\tenableDhcpv4 = true\n\t\t}\n\t\trecordHostIpAddr := NewHostRecordIpv4Addr(ipv4Addr, macAddr, enableDhcpv4, \"\")\n\t\trecordHostIpv4AddrSlice = []HostRecordIpv4Addr{*recordHostIpAddr}\n\t}\n\trecordHost = NewHostRecord(\n\t\tnetview, recordName, \"\", \"\", recordHostIpv4AddrSlice, recordHostIpv6AddrSlice,\n\t\teas, enabledns, dnsview, \"\", \"\", useTtl, ttl, comment, aliases)\n\tref, err := objMgr.connector.CreateObject(recordHost)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trecordHost.Ref = ref\n\terr = objMgr.connector.GetObject(\n\t\trecordHost, ref, NewQueryParams(false, nil), &recordHost)\n\treturn recordHost, err\n}\n\nfunc (objMgr *ObjectManager) GetHostRecordByRef(ref string) (*HostRecord, error) {\n\trecordHost := NewEmptyHostRecord()\n\terr := objMgr.connector.GetObject(\n\t\trecordHost, ref, NewQueryParams(false, nil), &recordHost)\n\n\treturn recordHost, err\n}\n\nfunc (objMgr *ObjectManager) SearchHostRecordByAltId(\n\tinternalId string, ref string, eaNameForInternalId string) (*HostRecord, error) {\n\n\tif internalId == \"\" {\n\t\treturn nil, fmt.Errorf(\"internal ID must not be empty\")\n\t}\n\n\trecordHost := NewEmptyHostRecord()\n\tif ref != \"\" {\n\t\tif err := objMgr.connector.GetObject(recordHost, ref, NewQueryParams(false, nil), &recordHost); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif recordHost.Ref != \"\" {\n\t\treturn recordHost, nil\n\t}\n\trecordHost.eaSearch = map[string]interface{}{\n\t\teaNameForInternalId: internalId,\n\t}\n\n\tres := make([]HostRecord, 0)\n\terr := objMgr.connector.GetObject(recordHost, \"\", NewQueryParams(false, nil), &res)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif res == nil || len(res) == 0 {\n\t\treturn nil, NewNotFoundError(\"host record not found\")\n\t}\n\n\tresult := res[0]\n\n\treturn &result, nil\n}\n\nfunc (objMgr *ObjectManager) GetHostRecord(netview string, dnsview string, recordName string, ipv4addr string, ipv6addr string) (*HostRecord, error) {\n\tvar res []HostRecord\n\n\trecordHost := NewEmptyHostRecord()\n\n\tsf := map[string]string{\n\t\t\"name\": recordName,\n\t}\n\tif netview != \"\" {\n\t\tsf[\"network_view\"] = netview\n\t}\n\tif dnsview != \"\" {\n\t\tsf[\"view\"] = dnsview\n\t}\n\tif ipv4addr != \"\" {\n\t\tsf[\"ipv4addr\"] = ipv4addr\n\t}\n\tif ipv6addr != \"\" {\n\t\tsf[\"ipv6addr\"] = ipv6addr\n\t}\n\tqueryParams := NewQueryParams(false, sf)\n\terr := objMgr.connector.GetObject(recordHost, \"\", queryParams, &res)\n\n\tif err != nil || res == nil || len(res) == 0 {\n\t\treturn nil, err\n\t}\n\treturn &res[0], err\n\n}\n\nfunc (objMgr *ObjectManager) GetIpAddressFromHostRecord(host HostRecord) (string, error) {\n\terr := objMgr.connector.GetObject(\n\t\t&host, host.Ref, NewQueryParams(false, nil), &host)\n\treturn host.Ipv4Addrs[0].Ipv4Addr, err\n}\n\nfunc (objMgr *ObjectManager) UpdateHostRecord(\n\thostRref string,\n\tenabledns bool,\n\tenableDhcp bool,\n\tname string,\n\tnetView string,\n\tipv4cidr string,\n\tipv6cidr string,\n\tipv4Addr string,\n\tipv6Addr string,\n\tmacAddr string,\n\tduid string,\n\tuseTtl bool,\n\tttl uint32,\n\tcomment string,\n\teas EA,\n\taliases []string) (*HostRecord, error) {\n\n\trecordHostIpv4AddrSlice := []HostRecordIpv4Addr{}\n\trecordHostIpv6AddrSlice := []HostRecordIpv6Addr{}\n\n\tenableDhcpv4 := false\n\tenableDhcpv6 := false\n\tif ipv6Addr != \"\" {\n\t\tif enableDhcp && duid != \"\" {\n\t\t\tenableDhcpv6 = true\n\t\t}\n\t}\n\tif ipv4Addr != \"\" {\n\t\tif enableDhcp && macAddr != \"\" && macAddr != MACADDR_ZERO {\n\t\t\tenableDhcpv4 = true\n\t\t}\n\t}\n\n\tif ipv4Addr == \"\" {\n\t\tif ipv4cidr != \"\" {\n\t\t\tip, _, err := net.ParseCIDR(ipv4cidr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"cannot parse CIDR value: %s\", err.Error())\n\t\t\t}\n\t\t\tif ip.To4() == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"CIDR value must be an IPv4 CIDR, not an IPv6 one\")\n\t\t\t}\n\t\t\tif netView == \"\" {\n\t\t\t\tnetView = \"default\"\n\t\t\t}\n\t\t\tnewIpAddr := fmt.Sprintf(\"func:nextavailableip:%s,%s\", ipv4cidr, netView)\n\t\t\trecordHostIpAddr := NewHostRecordIpv4Addr(newIpAddr, macAddr, enableDhcpv4, \"\")\n\t\t\trecordHostIpv4AddrSlice = []HostRecordIpv4Addr{*recordHostIpAddr}\n\t\t}\n\t} else {\n\t\tip := net.ParseIP(ipv4Addr)\n\t\tif ip == nil {\n\t\t\treturn nil, fmt.Errorf(\"'IP address for the record is not valid\")\n\t\t}\n\t\tif ip.To4() == nil {\n\t\t\treturn nil, fmt.Errorf(\"IP address must be an IPv4 address, not an IPv6 one\")\n\t\t}\n\t\trecordHostIpAddr := NewHostRecordIpv4Addr(ipv4Addr, macAddr, enableDhcpv4, \"\")\n\t\trecordHostIpv4AddrSlice = []HostRecordIpv4Addr{*recordHostIpAddr}\n\t}\n\tif ipv6Addr == \"\" {\n\t\tif ipv6cidr != \"\" {\n\t\t\tip, _, err := net.ParseCIDR(ipv6cidr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"cannot parse CIDR value: %s\", err.Error())\n\t\t\t}\n\t\t\tif ip.To4() != nil || ip.To16() == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"CIDR value must be an IPv6 CIDR, not an IPv4 one\")\n\t\t\t}\n\t\t\tif netView == \"\" {\n\t\t\t\tnetView = \"default\"\n\t\t\t}\n\t\t\tnewIpAddr := fmt.Sprintf(\"func:nextavailableip:%s,%s\", ipv6cidr, netView)\n\t\t\trecordHostIpAddr := NewHostRecordIpv6Addr(newIpAddr, duid, enableDhcpv6, \"\")\n\t\t\trecordHostIpv6AddrSlice = []HostRecordIpv6Addr{*recordHostIpAddr}\n\t\t}\n\t} else {\n\t\tip := net.ParseIP(ipv6Addr)\n\t\tif ip == nil {\n\t\t\treturn nil, fmt.Errorf(\"IP address for the record is not valid\")\n\t\t}\n\t\tif ip.To4() != nil || ip.To16() == nil {\n\t\t\treturn nil, fmt.Errorf(\"IP address must be an IPv6 address, not an IPv4 one\")\n\t\t}\n\t\trecordHostIpAddr := NewHostRecordIpv6Addr(ipv6Addr, duid, enableDhcpv6, \"\")\n\t\trecordHostIpv6AddrSlice = []HostRecordIpv6Addr{*recordHostIpAddr}\n\t}\n\tupdateHostRecord := NewHostRecord(\n\t\t\"\", name, \"\", \"\", recordHostIpv4AddrSlice, recordHostIpv6AddrSlice,\n\t\teas, enabledns, \"\", \"\", hostRref, useTtl, ttl, comment, aliases)\n\tref, err := objMgr.connector.UpdateObject(updateHostRecord, hostRref)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tupdateHostRecord, err = objMgr.GetHostRecordByRef(ref)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn updateHostRecord, nil\n}\n\nfunc (objMgr *ObjectManager) DeleteHostRecord(ref string) (string, error) {\n\treturn objMgr.connector.DeleteObject(ref)\n}\n<commit_msg>fixing ObjectManager.SearchHostRecordByAltId()<commit_after>package ibclient\n\nimport (\n\t\"fmt\"\n\t\"net\"\n)\n\nfunc (objMgr *ObjectManager) CreateHostRecord(\n\tenabledns bool,\n\tenableDhcp bool,\n\trecordName string,\n\tnetview string,\n\tdnsview string,\n\tipv4cidr string,\n\tipv6cidr string,\n\tipv4Addr string,\n\tipv6Addr string,\n\tmacAddr string,\n\tduid string,\n\tuseTtl bool,\n\tttl uint32,\n\tcomment string,\n\teas EA,\n\taliases []string) (*HostRecord, error) {\n\n\tif ipv4Addr == \"\" && ipv4cidr != \"\" {\n\t\tif netview == \"\" {\n\t\t\tnetview = \"default\"\n\t\t}\n\t\tipv4Addr = fmt.Sprintf(\"func:nextavailableip:%s,%s\", ipv4cidr, netview)\n\t}\n\tif ipv6Addr == \"\" && ipv6cidr != \"\" {\n\t\tif netview == \"\" {\n\t\t\tnetview = \"default\"\n\t\t}\n\t\tipv6Addr = fmt.Sprintf(\"func:nextavailableip:%s,%s\", ipv6cidr, netview)\n\t}\n\trecordHost := NewEmptyHostRecord()\n\trecordHostIpv6AddrSlice := []HostRecordIpv6Addr{}\n\trecordHostIpv4AddrSlice := []HostRecordIpv4Addr{}\n\tif ipv6Addr != \"\" {\n\t\tenableDhcpv6 := false\n\t\tif enableDhcp && duid != \"\" {\n\t\t\tenableDhcpv6 = true\n\t\t}\n\t\trecordHostIpv6Addr := NewHostRecordIpv6Addr(ipv6Addr, duid, enableDhcpv6, \"\")\n\t\trecordHostIpv6AddrSlice = []HostRecordIpv6Addr{*recordHostIpv6Addr}\n\t}\n\tif ipv4Addr != \"\" {\n\t\tenableDhcpv4 := false\n\t\tif enableDhcp && macAddr != \"\" && macAddr != MACADDR_ZERO {\n\t\t\tenableDhcpv4 = true\n\t\t}\n\t\trecordHostIpAddr := NewHostRecordIpv4Addr(ipv4Addr, macAddr, enableDhcpv4, \"\")\n\t\trecordHostIpv4AddrSlice = []HostRecordIpv4Addr{*recordHostIpAddr}\n\t}\n\trecordHost = NewHostRecord(\n\t\tnetview, recordName, \"\", \"\", recordHostIpv4AddrSlice, recordHostIpv6AddrSlice,\n\t\teas, enabledns, dnsview, \"\", \"\", useTtl, ttl, comment, aliases)\n\tref, err := objMgr.connector.CreateObject(recordHost)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trecordHost.Ref = ref\n\terr = objMgr.connector.GetObject(\n\t\trecordHost, ref, NewQueryParams(false, nil), &recordHost)\n\treturn recordHost, err\n}\n\nfunc (objMgr *ObjectManager) GetHostRecordByRef(ref string) (*HostRecord, error) {\n\trecordHost := NewEmptyHostRecord()\n\terr := objMgr.connector.GetObject(\n\t\trecordHost, ref, NewQueryParams(false, nil), &recordHost)\n\n\treturn recordHost, err\n}\n\nfunc (objMgr *ObjectManager) SearchHostRecordByAltId(\n\tinternalId string, ref string, eaNameForInternalId string) (*HostRecord, error) {\n\n\tif internalId == \"\" {\n\t\treturn nil, fmt.Errorf(\"internal ID must not be empty\")\n\t}\n\n\trecordHost := NewEmptyHostRecord()\n\tif ref != \"\" {\n\t\tif err := objMgr.connector.GetObject(recordHost, ref, NewQueryParams(false, nil), &recordHost); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif recordHost.Ref != \"\" {\n\t\treturn recordHost, nil\n\t}\n\tsf := map[string]string{\n\t\tfmt.Sprintf(\"*%s\", eaNameForInternalId): internalId,\n\t}\n\n\tres := make([]HostRecord, 0)\n\terr := objMgr.connector.GetObject(recordHost, \"\", NewQueryParams(false, sf), &res)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif res == nil || len(res) == 0 {\n\t\treturn nil, NewNotFoundError(\"host record not found\")\n\t}\n\n\tresult := res[0]\n\n\treturn &result, nil\n}\n\nfunc (objMgr *ObjectManager) GetHostRecord(netview string, dnsview string, recordName string, ipv4addr string, ipv6addr string) (*HostRecord, error) {\n\tvar res []HostRecord\n\n\trecordHost := NewEmptyHostRecord()\n\n\tsf := map[string]string{\n\t\t\"name\": recordName,\n\t}\n\tif netview != \"\" {\n\t\tsf[\"network_view\"] = netview\n\t}\n\tif dnsview != \"\" {\n\t\tsf[\"view\"] = dnsview\n\t}\n\tif ipv4addr != \"\" {\n\t\tsf[\"ipv4addr\"] = ipv4addr\n\t}\n\tif ipv6addr != \"\" {\n\t\tsf[\"ipv6addr\"] = ipv6addr\n\t}\n\tqueryParams := NewQueryParams(false, sf)\n\terr := objMgr.connector.GetObject(recordHost, \"\", queryParams, &res)\n\n\tif err != nil || res == nil || len(res) == 0 {\n\t\treturn nil, err\n\t}\n\treturn &res[0], err\n\n}\n\nfunc (objMgr *ObjectManager) GetIpAddressFromHostRecord(host HostRecord) (string, error) {\n\terr := objMgr.connector.GetObject(\n\t\t&host, host.Ref, NewQueryParams(false, nil), &host)\n\treturn host.Ipv4Addrs[0].Ipv4Addr, err\n}\n\nfunc (objMgr *ObjectManager) UpdateHostRecord(\n\thostRref string,\n\tenabledns bool,\n\tenableDhcp bool,\n\tname string,\n\tnetView string,\n\tipv4cidr string,\n\tipv6cidr string,\n\tipv4Addr string,\n\tipv6Addr string,\n\tmacAddr string,\n\tduid string,\n\tuseTtl bool,\n\tttl uint32,\n\tcomment string,\n\teas EA,\n\taliases []string) (*HostRecord, error) {\n\n\trecordHostIpv4AddrSlice := []HostRecordIpv4Addr{}\n\trecordHostIpv6AddrSlice := []HostRecordIpv6Addr{}\n\n\tenableDhcpv4 := false\n\tenableDhcpv6 := false\n\tif ipv6Addr != \"\" {\n\t\tif enableDhcp && duid != \"\" {\n\t\t\tenableDhcpv6 = true\n\t\t}\n\t}\n\tif ipv4Addr != \"\" {\n\t\tif enableDhcp && macAddr != \"\" && macAddr != MACADDR_ZERO {\n\t\t\tenableDhcpv4 = true\n\t\t}\n\t}\n\n\tif ipv4Addr == \"\" {\n\t\tif ipv4cidr != \"\" {\n\t\t\tip, _, err := net.ParseCIDR(ipv4cidr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"cannot parse CIDR value: %s\", err.Error())\n\t\t\t}\n\t\t\tif ip.To4() == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"CIDR value must be an IPv4 CIDR, not an IPv6 one\")\n\t\t\t}\n\t\t\tif netView == \"\" {\n\t\t\t\tnetView = \"default\"\n\t\t\t}\n\t\t\tnewIpAddr := fmt.Sprintf(\"func:nextavailableip:%s,%s\", ipv4cidr, netView)\n\t\t\trecordHostIpAddr := NewHostRecordIpv4Addr(newIpAddr, macAddr, enableDhcpv4, \"\")\n\t\t\trecordHostIpv4AddrSlice = []HostRecordIpv4Addr{*recordHostIpAddr}\n\t\t}\n\t} else {\n\t\tip := net.ParseIP(ipv4Addr)\n\t\tif ip == nil {\n\t\t\treturn nil, fmt.Errorf(\"'IP address for the record is not valid\")\n\t\t}\n\t\tif ip.To4() == nil {\n\t\t\treturn nil, fmt.Errorf(\"IP address must be an IPv4 address, not an IPv6 one\")\n\t\t}\n\t\trecordHostIpAddr := NewHostRecordIpv4Addr(ipv4Addr, macAddr, enableDhcpv4, \"\")\n\t\trecordHostIpv4AddrSlice = []HostRecordIpv4Addr{*recordHostIpAddr}\n\t}\n\tif ipv6Addr == \"\" {\n\t\tif ipv6cidr != \"\" {\n\t\t\tip, _, err := net.ParseCIDR(ipv6cidr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"cannot parse CIDR value: %s\", err.Error())\n\t\t\t}\n\t\t\tif ip.To4() != nil || ip.To16() == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"CIDR value must be an IPv6 CIDR, not an IPv4 one\")\n\t\t\t}\n\t\t\tif netView == \"\" {\n\t\t\t\tnetView = \"default\"\n\t\t\t}\n\t\t\tnewIpAddr := fmt.Sprintf(\"func:nextavailableip:%s,%s\", ipv6cidr, netView)\n\t\t\trecordHostIpAddr := NewHostRecordIpv6Addr(newIpAddr, duid, enableDhcpv6, \"\")\n\t\t\trecordHostIpv6AddrSlice = []HostRecordIpv6Addr{*recordHostIpAddr}\n\t\t}\n\t} else {\n\t\tip := net.ParseIP(ipv6Addr)\n\t\tif ip == nil {\n\t\t\treturn nil, fmt.Errorf(\"IP address for the record is not valid\")\n\t\t}\n\t\tif ip.To4() != nil || ip.To16() == nil {\n\t\t\treturn nil, fmt.Errorf(\"IP address must be an IPv6 address, not an IPv4 one\")\n\t\t}\n\t\trecordHostIpAddr := NewHostRecordIpv6Addr(ipv6Addr, duid, enableDhcpv6, \"\")\n\t\trecordHostIpv6AddrSlice = []HostRecordIpv6Addr{*recordHostIpAddr}\n\t}\n\tupdateHostRecord := NewHostRecord(\n\t\t\"\", name, \"\", \"\", recordHostIpv4AddrSlice, recordHostIpv6AddrSlice,\n\t\teas, enabledns, \"\", \"\", hostRref, useTtl, ttl, comment, aliases)\n\tref, err := objMgr.connector.UpdateObject(updateHostRecord, hostRref)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tupdateHostRecord, err = objMgr.GetHostRecordByRef(ref)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn updateHostRecord, nil\n}\n\nfunc (objMgr *ObjectManager) DeleteHostRecord(ref string) (string, error) {\n\treturn objMgr.connector.DeleteObject(ref)\n}\n<|endoftext|>"} {"text":"<commit_before>package connections\n\nimport (\n\t\"errors\"\n\t\"sync\"\n\n\t\"github.com\/ivan1993spb\/snake-server\/game\"\n)\n\ntype ConnectionGroup struct {\n\tlimit int\n\tcounter int\n\tmutex *sync.RWMutex\n\tgame *game.Game\n}\n\nfunc NewConnectionGroup(connectionLimit int, g *game.Game) (*ConnectionGroup, error) {\n\tif connectionLimit > 0 {\n\t\treturn &ConnectionGroup{\n\t\t\tlimit: connectionLimit,\n\t\t\tmutex: &sync.RWMutex{},\n\t\t\tgame: g,\n\t\t}, nil\n\t}\n\n\treturn nil, errors.New(\"cannot create connection group: invalid connection limit\")\n}\n\nfunc (cg *ConnectionGroup) GetLimit() int {\n\treturn cg.limit\n}\n\nfunc (cg *ConnectionGroup) GetCount() int {\n\tcg.mutex.RLock()\n\tdefer cg.mutex.RUnlock()\n\treturn cg.counter\n}\n\n\/\/ unsafeIsFull returns true if group is full\nfunc (cg *ConnectionGroup) unsafeIsFull() bool {\n\treturn cg.counter == cg.limit\n}\n\nfunc (cg *ConnectionGroup) IsFull() bool {\n\tcg.mutex.RLock()\n\tdefer cg.mutex.RUnlock()\n\treturn cg.unsafeIsFull()\n}\n\n\/\/ unsafeIsEmpty returns true if group is empty\nfunc (cg *ConnectionGroup) unsafeIsEmpty() bool {\n\treturn cg.counter == 0\n}\n\nfunc (cg *ConnectionGroup) IsEmpty() bool {\n\tcg.mutex.RLock()\n\tdefer cg.mutex.RUnlock()\n\treturn cg.unsafeIsEmpty()\n}\n\nfunc (cg *ConnectionGroup) Run(f func(game *game.Game)) error {\n\tcg.mutex.Lock()\n\tif cg.unsafeIsFull() {\n\t\tcg.mutex.Unlock()\n\t\treturn errors.New(\"add connection to group: group is full\")\n\t}\n\tcg.counter += 1\n\tcg.mutex.Unlock()\n\n\tf(cg.game)\n\n\tcg.mutex.Lock()\n\tcg.counter -= 1\n\tcg.mutex.Unlock()\n\treturn nil\n}\n<commit_msg>Rewrite run method in connection groups<commit_after>package connections\n\nimport (\n\t\"errors\"\n\t\"sync\"\n\n\t\"github.com\/ivan1993spb\/snake-server\/game\"\n)\n\ntype ConnectionGroup struct {\n\tlimit int\n\tcounter int\n\tmutex *sync.RWMutex\n\tgame *game.Game\n}\n\nfunc NewConnectionGroup(connectionLimit int, g *game.Game) (*ConnectionGroup, error) {\n\tif connectionLimit > 0 {\n\t\treturn &ConnectionGroup{\n\t\t\tlimit: connectionLimit,\n\t\t\tmutex: &sync.RWMutex{},\n\t\t\tgame: g,\n\t\t}, nil\n\t}\n\n\treturn nil, errors.New(\"cannot create connection group: invalid connection limit\")\n}\n\nfunc (cg *ConnectionGroup) GetLimit() int {\n\treturn cg.limit\n}\n\nfunc (cg *ConnectionGroup) GetCount() int {\n\tcg.mutex.RLock()\n\tdefer cg.mutex.RUnlock()\n\treturn cg.counter\n}\n\n\/\/ unsafeIsFull returns true if group is full\nfunc (cg *ConnectionGroup) unsafeIsFull() bool {\n\treturn cg.counter == cg.limit\n}\n\nfunc (cg *ConnectionGroup) IsFull() bool {\n\tcg.mutex.RLock()\n\tdefer cg.mutex.RUnlock()\n\treturn cg.unsafeIsFull()\n}\n\n\/\/ unsafeIsEmpty returns true if group is empty\nfunc (cg *ConnectionGroup) unsafeIsEmpty() bool {\n\treturn cg.counter == 0\n}\n\nfunc (cg *ConnectionGroup) IsEmpty() bool {\n\tcg.mutex.RLock()\n\tdefer cg.mutex.RUnlock()\n\treturn cg.unsafeIsEmpty()\n}\n\ntype ErrRunConnection struct {\n\tErr error\n}\n\nfunc (e *ErrRunConnection) Error() string {\n\treturn \"run connection error: \" + e.Err.Error()\n}\n\nfunc (cg *ConnectionGroup) Run(connection *Connection) *ErrRunConnection {\n\tcg.mutex.Lock()\n\tif cg.unsafeIsFull() {\n\t\tcg.mutex.Unlock()\n\t\treturn &ErrRunConnection{\n\t\t\tErr: errors.New(\"group is full\"),\n\t\t}\n\t}\n\tcg.counter += 1\n\tcg.mutex.Unlock()\n\n\tdefer func() {\n\t\tcg.mutex.Lock()\n\t\tcg.counter -= 1\n\t\tcg.mutex.Unlock()\n\t}()\n\n\tif err := connection.Run(cg.game); err != nil {\n\t\treturn &ErrRunConnection{\n\t\t\tErr: err,\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package consumergroup\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"gopkg.in\/Shopify\/sarama.v1\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ OffsetManager is the main interface consumergroup requires to manage offsets of the consumergroup.\ntype OffsetManager interface {\n\n\t\/\/ InitializePartition is called when the consumergroup is starting to consume a\n\t\/\/ partition. It should return the last processed offset for this partition. Note:\n\t\/\/ the same partition can be initialized multiple times during a single run of a\n\t\/\/ consumer group due to other consumer instances coming online and offline.\n\tInitializePartition(topic string, partition int32) (int64, error)\n\n\t\/\/ MarkAsProcessed tells the offset manager than a certain message has been successfully\n\t\/\/ processed by the consumer, and should be committed. The implementation does not have\n\t\/\/ to store this offset right away, but should return true if it intends to do this at\n\t\/\/ some point.\n\t\/\/\n\t\/\/ Offsets should generally be increasing if the consumer\n\t\/\/ processes events serially, but this cannot be guaranteed if the consumer does any\n\t\/\/ asynchronous processing. This can be handled in various ways, e.g. by only accepting\n\t\/\/ offsets that are higehr than the offsets seen before for the same partition.\n\tMarkAsProcessed(topic string, partition int32, offset int64) (bool, error)\n\n\t\/\/ FinalizePartition is called when the consumergroup is done consuming a\n\t\/\/ partition. In this method, the offset manager can flush any remaining offsets to its\n\t\/\/ backend store. It should return an error if it was not able to commit the offset.\n\t\/\/ Note: it's possible that the consumergroup instance will start to consume the same\n\t\/\/ partition again after this function is called.\n\tFinalizePartition(topic string, partition int32, lastOffset int64, timeout time.Duration) error\n\n\t\/\/ Close is called when the consumergroup is shutting down. In normal circumstances, all\n\t\/\/ offsets are committed because FinalizePartition is called for all the running partition\n\t\/\/ consumers. You may want to check for this to be true, and try to commit any outstanding\n\t\/\/ offsets. If this doesn't succeed, it should return an error.\n\tClose() error\n}\n\nvar (\n\tUncleanClose = errors.New(\"Not all offsets were committed before shutdown was completed\")\n\tPartitionClosed = errors.New(\"Tried to mark offset as processed on closed partition.\")\n)\n\n\/\/ OffsetManagerConfig holds configuration setting son how the offset manager should behave.\ntype OffsetManagerConfig struct {\n\tCommitInterval time.Duration \/\/ Interval between offset flushes to the backend store.\n\tVerboseLogging bool \/\/ Whether to enable verbose logging.\n}\n\n\/\/ NewOffsetManagerConfig returns a new OffsetManagerConfig with sane defaults.\nfunc NewOffsetManagerConfig() *OffsetManagerConfig {\n\treturn &OffsetManagerConfig{\n\t\tCommitInterval: 10 * time.Second,\n\t}\n}\n\ntype (\n\ttopicOffsets map[int32]*partitionOffsetTracker\n\toffsetsMap map[string]topicOffsets\n\toffsetCommitter func(int64) error\n)\n\ntype partitionOffsetTracker struct {\n\tl sync.Mutex\n\twaitingForOffset int64\n\thighestProcessedOffset int64\n\tlastCommittedOffset int64\n\tdone chan struct{}\n}\n\ntype zookeeperOffsetManager struct {\n\tconfig *OffsetManagerConfig\n\tl sync.RWMutex\n\toffsets offsetsMap\n\tcg *ConsumerGroup\n\n\tclosing, closed chan struct{}\n}\n\n\/\/ NewZookeeperOffsetManager returns an offset manager that uses Zookeeper\n\/\/ to store offsets.\nfunc NewZookeeperOffsetManager(cg *ConsumerGroup, config *OffsetManagerConfig) OffsetManager {\n\tif config == nil {\n\t\tconfig = NewOffsetManagerConfig()\n\t}\n\n\tzom := &zookeeperOffsetManager{\n\t\tconfig: config,\n\t\tcg: cg,\n\t\toffsets: make(offsetsMap),\n\t\tclosing: make(chan struct{}),\n\t\tclosed: make(chan struct{}),\n\t}\n\n\tgo zom.offsetCommitter()\n\n\treturn zom\n}\n\nfunc (zom *zookeeperOffsetManager) InitializePartition(topic string, partition int32) (int64, error) {\n\tzom.l.Lock()\n\tdefer zom.l.Unlock()\n\n\tif zom.offsets[topic] == nil {\n\t\tzom.offsets[topic] = make(topicOffsets)\n\t}\n\n\tnextOffset, err := zom.cg.group.FetchOffset(topic, partition)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tzom.offsets[topic][partition] = &partitionOffsetTracker{\n\t\thighestProcessedOffset: nextOffset - 1,\n\t\tlastCommittedOffset: nextOffset - 1,\n\t\tdone: make(chan struct{}),\n\t}\n\n\treturn nextOffset, nil\n}\n\nfunc (zom *zookeeperOffsetManager) FinalizePartition(topic string, partition int32, lastOffset int64, timeout time.Duration) error {\n\tzom.l.RLock()\n\ttracker := zom.offsets[topic][partition]\n\tzom.l.RUnlock()\n\n\tif lastOffset >= 0 {\n\t\tif lastOffset-tracker.highestProcessedOffset > 0 {\n\t\t\tzom.cg.Logf(\"%s\/%d :: Last processed offset: %d. Waiting up to %ds for another %d messages to process...\", topic, partition, tracker.highestProcessedOffset, timeout\/time.Second, lastOffset-tracker.highestProcessedOffset)\n\t\t\tif !tracker.waitForOffset(lastOffset, timeout) {\n\t\t\t\treturn fmt.Errorf(\"TIMEOUT waiting for offset %d. Last committed offset: %d\", lastOffset, tracker.lastCommittedOffset)\n\t\t\t}\n\t\t}\n\n\t\tif err := zom.commitOffset(topic, partition, tracker); err != nil {\n\t\t\treturn fmt.Errorf(\"FAILED to commit offset %d to Zookeeper. Last committed offset: %d\", tracker.highestProcessedOffset, tracker.lastCommittedOffset)\n\t\t}\n\t}\n\n\tzom.l.Lock()\n\tdelete(zom.offsets[topic], partition)\n\tzom.l.Unlock()\n\n\treturn nil\n}\n\nfunc (zom *zookeeperOffsetManager) MarkAsProcessed(topic string, partition int32, offset int64) (bool, error) {\n\tzom.l.RLock()\n\tdefer zom.l.RUnlock()\n\tif p, ok := zom.offsets[topic][partition]; ok {\n\t\tresult, err := p.markAsProcessed(offset)\n\t\tif err != nil {\n\t\t\tzom.cg.Logf(\"FAILED to mark offset %d for %s\/%d as processed!\", offset, topic, partition)\n\t\t}\n\t\treturn result, err\n\t} else {\n\t\treturn false, fmt.Errorf(\"no partition %d for topic %v\", partition, topic)\n\t}\n}\n\nfunc (zom *zookeeperOffsetManager) Close() error {\n\tclose(zom.closing)\n\t<-zom.closed\n\n\tzom.l.Lock()\n\tdefer zom.l.Unlock()\n\n\tvar closeError error\n\tfor _, partitionOffsets := range zom.offsets {\n\t\tif len(partitionOffsets) > 0 {\n\t\t\tcloseError = UncleanClose\n\t\t}\n\t}\n\n\treturn closeError\n}\n\nfunc (zom *zookeeperOffsetManager) offsetCommitter() {\n\tcommitTicker := time.NewTicker(zom.config.CommitInterval)\n\tdefer commitTicker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-zom.closing:\n\t\t\tclose(zom.closed)\n\t\t\treturn\n\t\tcase <-commitTicker.C:\n\t\t\tif err := zom.commitOffsets(); err != nil {\n\t\t\t\tzom.cg.errors <- &sarama.ConsumerError{\n\t\t\t\t\tTopic: \"\",\n\t\t\t\t\tPartition: -1,\n\t\t\t\t\tErr: err,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (zom *zookeeperOffsetManager) commitOffsets() error {\n\tzom.l.RLock()\n\tdefer zom.l.RUnlock()\n\n\tvar returnErr error\n\tfor topic, partitionOffsets := range zom.offsets {\n\t\tfor partition, offsetTracker := range partitionOffsets {\n\t\t\terr := zom.commitOffset(topic, partition, offsetTracker)\n\t\t\tswitch err {\n\t\t\tcase nil:\n\t\t\t\t\/\/ noop\n\t\t\tdefault:\n\t\t\t\treturnErr = err\n\t\t\t}\n\t\t}\n\t}\n\treturn returnErr\n}\n\nfunc (zom *zookeeperOffsetManager) commitOffset(topic string, partition int32, tracker *partitionOffsetTracker) error {\n\terr := tracker.commit(func(offset int64) error {\n\t\tif offset >= 0 {\n\t\t\treturn zom.cg.group.CommitOffset(topic, partition, offset+1)\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t})\n\n\tif err != nil {\n\t\tzom.cg.Logf(\"FAILED to commit offset %d for %s\/%d!\", tracker.highestProcessedOffset, topic, partition)\n\t} else if zom.config.VerboseLogging {\n\t\tzom.cg.Logf(\"Committed offset %d for %s\/%d!\", tracker.lastCommittedOffset, topic, partition)\n\t}\n\n\treturn err\n}\n\n\/\/ MarkAsProcessed marks the provided offset as highest processed offset if\n\/\/ it's higehr than any previous offset it has received.\nfunc (pot *partitionOffsetTracker) markAsProcessed(offset int64) (bool, error) {\n\tif pot == nil {\n\t\treturn false, PartitionClosed\n\t}\n\tpot.l.Lock()\n\tdefer pot.l.Unlock()\n\tif offset > pot.highestProcessedOffset {\n\t\tpot.highestProcessedOffset = offset\n\t\tif pot.waitingForOffset == pot.highestProcessedOffset {\n\t\t\tclose(pot.done)\n\t\t}\n\t\treturn true, nil\n\t} else {\n\t\treturn false, nil\n\t}\n}\n\n\/\/ Commit calls a committer function if the highest processed offset is out\n\/\/ of sync with the last committed offset.\nfunc (pot *partitionOffsetTracker) commit(committer offsetCommitter) error {\n\tpot.l.Lock()\n\tdefer pot.l.Unlock()\n\n\tif pot.highestProcessedOffset > pot.lastCommittedOffset {\n\t\tif err := committer(pot.highestProcessedOffset); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpot.lastCommittedOffset = pot.highestProcessedOffset\n\t\treturn nil\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (pot *partitionOffsetTracker) waitForOffset(offset int64, timeout time.Duration) bool {\n\tpot.l.Lock()\n\tif offset > pot.highestProcessedOffset {\n\t\tpot.waitingForOffset = offset\n\t\tpot.l.Unlock()\n\t\tselect {\n\t\tcase <-pot.done:\n\t\t\treturn true\n\t\tcase <-time.After(timeout):\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\tpot.l.Unlock()\n\t\treturn true\n\t}\n}\n<commit_msg>Updates to github sarama package<commit_after>package consumergroup\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/Shopify\/sarama\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ OffsetManager is the main interface consumergroup requires to manage offsets of the consumergroup.\ntype OffsetManager interface {\n\n\t\/\/ InitializePartition is called when the consumergroup is starting to consume a\n\t\/\/ partition. It should return the last processed offset for this partition. Note:\n\t\/\/ the same partition can be initialized multiple times during a single run of a\n\t\/\/ consumer group due to other consumer instances coming online and offline.\n\tInitializePartition(topic string, partition int32) (int64, error)\n\n\t\/\/ MarkAsProcessed tells the offset manager than a certain message has been successfully\n\t\/\/ processed by the consumer, and should be committed. The implementation does not have\n\t\/\/ to store this offset right away, but should return true if it intends to do this at\n\t\/\/ some point.\n\t\/\/\n\t\/\/ Offsets should generally be increasing if the consumer\n\t\/\/ processes events serially, but this cannot be guaranteed if the consumer does any\n\t\/\/ asynchronous processing. This can be handled in various ways, e.g. by only accepting\n\t\/\/ offsets that are higehr than the offsets seen before for the same partition.\n\tMarkAsProcessed(topic string, partition int32, offset int64) (bool, error)\n\n\t\/\/ FinalizePartition is called when the consumergroup is done consuming a\n\t\/\/ partition. In this method, the offset manager can flush any remaining offsets to its\n\t\/\/ backend store. It should return an error if it was not able to commit the offset.\n\t\/\/ Note: it's possible that the consumergroup instance will start to consume the same\n\t\/\/ partition again after this function is called.\n\tFinalizePartition(topic string, partition int32, lastOffset int64, timeout time.Duration) error\n\n\t\/\/ Close is called when the consumergroup is shutting down. In normal circumstances, all\n\t\/\/ offsets are committed because FinalizePartition is called for all the running partition\n\t\/\/ consumers. You may want to check for this to be true, and try to commit any outstanding\n\t\/\/ offsets. If this doesn't succeed, it should return an error.\n\tClose() error\n}\n\nvar (\n\tUncleanClose = errors.New(\"Not all offsets were committed before shutdown was completed\")\n\tPartitionClosed = errors.New(\"Tried to mark offset as processed on closed partition.\")\n)\n\n\/\/ OffsetManagerConfig holds configuration setting son how the offset manager should behave.\ntype OffsetManagerConfig struct {\n\tCommitInterval time.Duration \/\/ Interval between offset flushes to the backend store.\n\tVerboseLogging bool \/\/ Whether to enable verbose logging.\n}\n\n\/\/ NewOffsetManagerConfig returns a new OffsetManagerConfig with sane defaults.\nfunc NewOffsetManagerConfig() *OffsetManagerConfig {\n\treturn &OffsetManagerConfig{\n\t\tCommitInterval: 10 * time.Second,\n\t}\n}\n\ntype (\n\ttopicOffsets map[int32]*partitionOffsetTracker\n\toffsetsMap map[string]topicOffsets\n\toffsetCommitter func(int64) error\n)\n\ntype partitionOffsetTracker struct {\n\tl sync.Mutex\n\twaitingForOffset int64\n\thighestProcessedOffset int64\n\tlastCommittedOffset int64\n\tdone chan struct{}\n}\n\ntype zookeeperOffsetManager struct {\n\tconfig *OffsetManagerConfig\n\tl sync.RWMutex\n\toffsets offsetsMap\n\tcg *ConsumerGroup\n\n\tclosing, closed chan struct{}\n}\n\n\/\/ NewZookeeperOffsetManager returns an offset manager that uses Zookeeper\n\/\/ to store offsets.\nfunc NewZookeeperOffsetManager(cg *ConsumerGroup, config *OffsetManagerConfig) OffsetManager {\n\tif config == nil {\n\t\tconfig = NewOffsetManagerConfig()\n\t}\n\n\tzom := &zookeeperOffsetManager{\n\t\tconfig: config,\n\t\tcg: cg,\n\t\toffsets: make(offsetsMap),\n\t\tclosing: make(chan struct{}),\n\t\tclosed: make(chan struct{}),\n\t}\n\n\tgo zom.offsetCommitter()\n\n\treturn zom\n}\n\nfunc (zom *zookeeperOffsetManager) InitializePartition(topic string, partition int32) (int64, error) {\n\tzom.l.Lock()\n\tdefer zom.l.Unlock()\n\n\tif zom.offsets[topic] == nil {\n\t\tzom.offsets[topic] = make(topicOffsets)\n\t}\n\n\tnextOffset, err := zom.cg.group.FetchOffset(topic, partition)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tzom.offsets[topic][partition] = &partitionOffsetTracker{\n\t\thighestProcessedOffset: nextOffset - 1,\n\t\tlastCommittedOffset: nextOffset - 1,\n\t\tdone: make(chan struct{}),\n\t}\n\n\treturn nextOffset, nil\n}\n\nfunc (zom *zookeeperOffsetManager) FinalizePartition(topic string, partition int32, lastOffset int64, timeout time.Duration) error {\n\tzom.l.RLock()\n\ttracker := zom.offsets[topic][partition]\n\tzom.l.RUnlock()\n\n\tif lastOffset >= 0 {\n\t\tif lastOffset-tracker.highestProcessedOffset > 0 {\n\t\t\tzom.cg.Logf(\"%s\/%d :: Last processed offset: %d. Waiting up to %ds for another %d messages to process...\", topic, partition, tracker.highestProcessedOffset, timeout\/time.Second, lastOffset-tracker.highestProcessedOffset)\n\t\t\tif !tracker.waitForOffset(lastOffset, timeout) {\n\t\t\t\treturn fmt.Errorf(\"TIMEOUT waiting for offset %d. Last committed offset: %d\", lastOffset, tracker.lastCommittedOffset)\n\t\t\t}\n\t\t}\n\n\t\tif err := zom.commitOffset(topic, partition, tracker); err != nil {\n\t\t\treturn fmt.Errorf(\"FAILED to commit offset %d to Zookeeper. Last committed offset: %d\", tracker.highestProcessedOffset, tracker.lastCommittedOffset)\n\t\t}\n\t}\n\n\tzom.l.Lock()\n\tdelete(zom.offsets[topic], partition)\n\tzom.l.Unlock()\n\n\treturn nil\n}\n\nfunc (zom *zookeeperOffsetManager) MarkAsProcessed(topic string, partition int32, offset int64) (bool, error) {\n\tzom.l.RLock()\n\tdefer zom.l.RUnlock()\n\tif p, ok := zom.offsets[topic][partition]; ok {\n\t\tresult, err := p.markAsProcessed(offset)\n\t\tif err != nil {\n\t\t\tzom.cg.Logf(\"FAILED to mark offset %d for %s\/%d as processed!\", offset, topic, partition)\n\t\t}\n\t\treturn result, err\n\t} else {\n\t\treturn false, fmt.Errorf(\"no partition %d for topic %v\", partition, topic)\n\t}\n}\n\nfunc (zom *zookeeperOffsetManager) Close() error {\n\tclose(zom.closing)\n\t<-zom.closed\n\n\tzom.l.Lock()\n\tdefer zom.l.Unlock()\n\n\tvar closeError error\n\tfor _, partitionOffsets := range zom.offsets {\n\t\tif len(partitionOffsets) > 0 {\n\t\t\tcloseError = UncleanClose\n\t\t}\n\t}\n\n\treturn closeError\n}\n\nfunc (zom *zookeeperOffsetManager) offsetCommitter() {\n\tcommitTicker := time.NewTicker(zom.config.CommitInterval)\n\tdefer commitTicker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-zom.closing:\n\t\t\tclose(zom.closed)\n\t\t\treturn\n\t\tcase <-commitTicker.C:\n\t\t\tif err := zom.commitOffsets(); err != nil {\n\t\t\t\tzom.cg.errors <- &sarama.ConsumerError{\n\t\t\t\t\tTopic: \"\",\n\t\t\t\t\tPartition: -1,\n\t\t\t\t\tErr: err,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (zom *zookeeperOffsetManager) commitOffsets() error {\n\tzom.l.RLock()\n\tdefer zom.l.RUnlock()\n\n\tvar returnErr error\n\tfor topic, partitionOffsets := range zom.offsets {\n\t\tfor partition, offsetTracker := range partitionOffsets {\n\t\t\terr := zom.commitOffset(topic, partition, offsetTracker)\n\t\t\tswitch err {\n\t\t\tcase nil:\n\t\t\t\t\/\/ noop\n\t\t\tdefault:\n\t\t\t\treturnErr = err\n\t\t\t}\n\t\t}\n\t}\n\treturn returnErr\n}\n\nfunc (zom *zookeeperOffsetManager) commitOffset(topic string, partition int32, tracker *partitionOffsetTracker) error {\n\terr := tracker.commit(func(offset int64) error {\n\t\tif offset >= 0 {\n\t\t\treturn zom.cg.group.CommitOffset(topic, partition, offset+1)\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t})\n\n\tif err != nil {\n\t\tzom.cg.Logf(\"FAILED to commit offset %d for %s\/%d!\", tracker.highestProcessedOffset, topic, partition)\n\t} else if zom.config.VerboseLogging {\n\t\tzom.cg.Logf(\"Committed offset %d for %s\/%d!\", tracker.lastCommittedOffset, topic, partition)\n\t}\n\n\treturn err\n}\n\n\/\/ MarkAsProcessed marks the provided offset as highest processed offset if\n\/\/ it's higehr than any previous offset it has received.\nfunc (pot *partitionOffsetTracker) markAsProcessed(offset int64) (bool, error) {\n\tif pot == nil {\n\t\treturn false, PartitionClosed\n\t}\n\tpot.l.Lock()\n\tdefer pot.l.Unlock()\n\tif offset > pot.highestProcessedOffset {\n\t\tpot.highestProcessedOffset = offset\n\t\tif pot.waitingForOffset == pot.highestProcessedOffset {\n\t\t\tclose(pot.done)\n\t\t}\n\t\treturn true, nil\n\t} else {\n\t\treturn false, nil\n\t}\n}\n\n\/\/ Commit calls a committer function if the highest processed offset is out\n\/\/ of sync with the last committed offset.\nfunc (pot *partitionOffsetTracker) commit(committer offsetCommitter) error {\n\tpot.l.Lock()\n\tdefer pot.l.Unlock()\n\n\tif pot.highestProcessedOffset > pot.lastCommittedOffset {\n\t\tif err := committer(pot.highestProcessedOffset); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpot.lastCommittedOffset = pot.highestProcessedOffset\n\t\treturn nil\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (pot *partitionOffsetTracker) waitForOffset(offset int64, timeout time.Duration) bool {\n\tpot.l.Lock()\n\tif offset > pot.highestProcessedOffset {\n\t\tpot.waitingForOffset = offset\n\t\tpot.l.Unlock()\n\t\tselect {\n\t\tcase <-pot.done:\n\t\t\treturn true\n\t\tcase <-time.After(timeout):\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\tpot.l.Unlock()\n\t\treturn true\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2016 The Things Network\n\/\/ Use of this source code is governed by the MIT license that can be found in the LICENSE file.\n\npackage amqp\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\t\"github.com\/TheThingsNetwork\/ttn\/core\/types\"\n\n\tAMQP \"github.com\/streadway\/amqp\"\n)\n\n\/\/ Publisher holds an AMQP channel and can publish on uplink from devices\ntype Publisher interface {\n\tOpen() error\n\tio.Closer\n\tPublishUplink(payload types.UplinkMessage) error\n}\n\n\/\/ DefaultPublisher is the default AMQP publisher\ntype DefaultPublisher struct {\n\tctx Logger\n\tconn *AMQP.Connection\n\tchannel *AMQP.Channel\n\texchange string\n}\n\n\/\/ Open opens an AMQP channel for publishing\nfunc (p *DefaultPublisher) Open() error {\n\tchannel, err := p.conn.Channel()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not open AMQP channel (%s).\", err)\n\t}\n\n\tif err = channel.ExchangeDeclare(p.exchange, \"topic\", true, false, false, false, nil); err != nil {\n\t\tchannel.Close()\n\t\treturn fmt.Errorf(\"Could not declare AMQP exchange (%s).\", err)\n\t}\n\n\tp.channel = channel\n\treturn nil\n}\n\n\/\/ Close closes the AMQP channel\nfunc (p *DefaultPublisher) Close() error {\n\tif p.channel == nil {\n\t\treturn nil\n\t}\n\treturn p.channel.Close()\n}\n\nfunc (p *DefaultPublisher) do(action func() error) error {\n\terr := action()\n\tif err == nil {\n\t\treturn nil\n\t}\n\tif err == AMQP.ErrClosed {\n\t\terr = p.Open()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = action()\n\t}\n\treturn err\n}\n\nfunc (p *DefaultPublisher) publish(key string, msg []byte, timestamp time.Time) error {\n\treturn p.do(func() error {\n\t\treturn p.channel.Publish(p.exchange, key, false, false, AMQP.Publishing{\n\t\t\tContentType: \"application\/json\",\n\t\t\tDeliveryMode: AMQP.Persistent,\n\t\t\tTimestamp: timestamp,\n\t\t\tBody: msg,\n\t\t})\n\t})\n}\n<commit_msg>Code cleanup<commit_after>\/\/ Copyright © 2016 The Things Network\n\/\/ Use of this source code is governed by the MIT license that can be found in the LICENSE file.\n\npackage amqp\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\t\"github.com\/TheThingsNetwork\/ttn\/core\/types\"\n\tAMQP \"github.com\/streadway\/amqp\"\n)\n\n\/\/ Publisher holds an AMQP channel and can publish on uplink from devices\ntype Publisher interface {\n\tOpen() error\n\tio.Closer\n\tPublishUplink(payload types.UplinkMessage) error\n}\n\n\/\/ DefaultPublisher is the default AMQP publisher\ntype DefaultPublisher struct {\n\tctx Logger\n\tconn *AMQP.Connection\n\tchannel *AMQP.Channel\n\texchange string\n}\n\n\/\/ Open opens an AMQP channel for publishing\nfunc (p *DefaultPublisher) Open() error {\n\tchannel, err := p.conn.Channel()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not open AMQP channel (%s).\", err)\n\t}\n\n\tif err = channel.ExchangeDeclare(p.exchange, \"topic\", true, false, false, false, nil); err != nil {\n\t\tchannel.Close()\n\t\treturn fmt.Errorf(\"Could not declare AMQP exchange (%s).\", err)\n\t}\n\n\tp.channel = channel\n\treturn nil\n}\n\n\/\/ Close closes the AMQP channel\nfunc (p *DefaultPublisher) Close() error {\n\tif p.channel == nil {\n\t\treturn nil\n\t}\n\treturn p.channel.Close()\n}\n\nfunc (p *DefaultPublisher) do(action func() error) error {\n\terr := action()\n\tif err == nil {\n\t\treturn nil\n\t}\n\tif err == AMQP.ErrClosed {\n\t\terr = p.Open()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = action()\n\t}\n\treturn err\n}\n\nfunc (p *DefaultPublisher) publish(key string, msg []byte, timestamp time.Time) error {\n\treturn p.do(func() error {\n\t\treturn p.channel.Publish(p.exchange, key, false, false, AMQP.Publishing{\n\t\t\tContentType: \"application\/json\",\n\t\t\tDeliveryMode: AMQP.Persistent,\n\t\t\tTimestamp: timestamp,\n\t\t\tBody: msg,\n\t\t})\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n)\n\ntype Event map[string]interface{}\n\ntype CommitList []Event\n\n\/\/ Information to translate from GitLab to GitHub format.\nvar eventTranslate = map[string]string{\n\t\"total_commits_count\": \"size\",\n\t\"repository\": \"repo\",\n\t\"after\": \"head\",\n}\n\nvar commitTranslate = map[string]string{\n\t\"id\": \"sha\",\n}\n\nfunc (e Event) Commits() CommitList {\n\tgeneric, ok := e[\"commits\"]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tc, ok := generic.(CommitList)\n\tif !ok {\n\t\treturn nil\n\t}\n\n\treturn c\n}\n\n\/\/ Normalize makes GitLab events look like GitHub events.\nfunc (e Event) normalize() {\n\t\/\/ Fortunately there's very little difference, at least in the push format.\n\n\tfor gitlabName, githubName := range eventTranslate {\n\t\tvalue, ok := e[gitlabName]\n\t\tif ok {\n\t\t\te[githubName] = value\n\t\t}\n\t}\n\n\tcommits := e.Commits()\n\tfor _, c := range commits {\n\t\tfor gitlabName, githubName := range commitTranslate {\n\t\t\tvalue, ok := c[gitlabName]\n\t\t\tif ok {\n\t\t\t\tc[githubName] = value\n\t\t\t}\n\t\t}\n\t}\n\n}\n\n\/\/ Create a new event from the given JSON. If the event type is blank,\n\/\/ this function will try to figure it out. Generally, GitHub events will\n\/\/ present a value for this in the HTTP Request, and GitLab events place\n\/\/ the event type in the JSON.\nfunc NewEvent(jsonData []byte, eventName string) Event {\n\te := Event{}\n\tjson.Unmarshal(jsonData, e)\n\n\tif payload, ok := e[\"payload\"].(Event); ok {\n\t\t\/\/ For GitHub events, export all payload fields into the event scope.\n\t\tfor key, value := range payload {\n\t\t\te[key] = value\n\t\t}\n\t}\n\n\te.normalize()\n\n\tif eventName == \"\" {\n\t\tvar gitlabType string\n\t\tgitlabType, ok := e[\"object_kind\"].(string)\n\t\tif !ok {\n\t\t\t\/\/ Push events look completely different from the other events and\n\t\t\t\/\/ don't have an explicit type.\n\t\t\tgitlabType = \"push\"\n\t\t}\n\t\te[\"type\"] = gitlabType\n\t} else {\n\t\te[\"type\"] = eventName\n\t}\n\treturn e\n}\n<commit_msg>Extra handling of gitlab non-push events<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n)\n\ntype Event map[string]interface{}\n\ntype CommitList []Event\n\n\/\/ Information to translate from GitLab to GitHub format.\nvar eventTranslate = map[string]string{\n\t\"total_commits_count\": \"size\",\n\t\"repository\": \"repo\",\n\t\"after\": \"head\",\n}\n\nvar commitTranslate = map[string]string{\n\t\"id\": \"sha\",\n}\n\nfunc (e Event) Commits() CommitList {\n\tgeneric, ok := e[\"commits\"]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tc, ok := generic.(CommitList)\n\tif !ok {\n\t\treturn nil\n\t}\n\n\treturn c\n}\n\n\/\/ Normalize makes GitLab events look like GitHub events.\nfunc (e Event) normalize() {\n\t\/\/ Fortunately there's very little difference, at least in the push format.\n\n\tfor gitlabName, githubName := range eventTranslate {\n\t\tvalue, ok := e[gitlabName]\n\t\tif ok {\n\t\t\te[githubName] = value\n\t\t}\n\t}\n\n\tcommits := e.Commits()\n\tfor _, c := range commits {\n\t\tfor gitlabName, githubName := range commitTranslate {\n\t\t\tvalue, ok := c[gitlabName]\n\t\t\tif ok {\n\t\t\t\tc[githubName] = value\n\t\t\t}\n\t\t}\n\t}\n\n}\n\n\/\/ Create a new event from the given JSON. If the event type is blank,\n\/\/ this function will try to figure it out. Generally, GitHub events will\n\/\/ present a value for this in the HTTP Request, and GitLab events place\n\/\/ the event type in the JSON.\nfunc NewEvent(jsonData []byte, eventName string) Event {\n\te := Event{}\n\tjson.Unmarshal(jsonData, e)\n\n\tif payload, ok := e[\"payload\"].(Event); ok {\n\t\t\/\/ For GitHub events, export all payload fields into the event scope.\n\t\tfor key, value := range payload {\n\t\t\te[key] = value\n\t\t}\n\t}\n\n\tif payload, ok := e[\"object_attributes\"].(Event); ok {\n\t\t\/\/ For GitLab events, export all object_attributes fields into the event scope.\n\t\tfor key, value := range payload {\n\t\t\te[key] = value\n\t\t}\n\t}\n\n\te.normalize()\n\n\tif eventName == \"\" {\n\t\tvar gitlabType string\n\t\tgitlabType, ok := e[\"object_kind\"].(string)\n\t\tif !ok {\n\t\t\t\/\/ Push events look completely different from the other events and\n\t\t\t\/\/ don't have an explicit type.\n\t\t\tgitlabType = \"push\"\n\t\t}\n\t\te[\"type\"] = gitlabType\n\t} else {\n\t\te[\"type\"] = eventName\n\t}\n\treturn e\n}\n<|endoftext|>"} {"text":"<commit_before>package create\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/giantswarm\/awstpr\"\n\tmicroerror \"github.com\/giantswarm\/microkit\/error\"\n\n\tawsutil \"github.com\/giantswarm\/aws-operator\/client\/aws\"\n\t\"github.com\/giantswarm\/aws-operator\/resources\"\n\tawsresources \"github.com\/giantswarm\/aws-operator\/resources\/aws\"\n)\n\ntype securityGroupInput struct {\n\tClients awsutil.Clients\n\tGroupName string\n\tVPCID string\n}\n\ntype rulesInput struct {\n\tCluster awstpr.CustomObject\n\tRules []awsresources.SecurityGroupRule\n\tMastersSecurityGroupID string\n\tWorkersSecurityGroupID string\n\tIngressSecurityGroupID string\n}\n\nconst (\n\tcalicoBGPNetworkPort = 179\n\thttpPort = 80\n\thttpsPort = 443\n\tsshPort = 22\n\n\tdefaultCIDR = \"0.0.0.0\/0\"\n)\n\nfunc (s *Service) createSecurityGroup(input securityGroupInput) (*awsresources.SecurityGroup, error) {\n\tsecurityGroup := &awsresources.SecurityGroup{\n\t\tDescription: input.GroupName,\n\t\tGroupName: input.GroupName,\n\t\tVpcID: input.VPCID,\n\t\tAWSEntity: awsresources.AWSEntity{Clients: input.Clients},\n\t}\n\tsecurityGroupCreated, err := securityGroup.CreateIfNotExists()\n\tif err != nil {\n\t\treturn nil, microerror.MaskAny(err)\n\t}\n\tif securityGroupCreated {\n\t\ts.logger.Log(\"info\", fmt.Sprintf(\"created security group '%s'\", input.GroupName))\n\t} else {\n\t\ts.logger.Log(\"info\", fmt.Sprintf(\"security group '%s' already exists, reusing\", input.GroupName))\n\t}\n\n\treturn securityGroup, nil\n}\n\nfunc (s *Service) deleteSecurityGroup(input securityGroupInput) error {\n\tvar securityGroup resources.ResourceWithID\n\tsecurityGroup = &awsresources.SecurityGroup{\n\t\tDescription: input.GroupName,\n\t\tGroupName: input.GroupName,\n\t\tAWSEntity: awsresources.AWSEntity{Clients: input.Clients},\n\t}\n\tif err := securityGroup.Delete(); err != nil {\n\t\treturn microerror.MaskAny(err)\n\t} else {\n\t\ts.logger.Log(\"info\", fmt.Sprintf(\"deleted security group '%s'\", input.GroupName))\n\t}\n\n\treturn nil\n}\n\n\/\/ masterRules returns the rules for the masters security group.\nfunc (ri rulesInput) masterRules() []awsresources.SecurityGroupRule {\n\treturn []awsresources.SecurityGroupRule{\n\t\t{\n\t\t\tPort: ri.Cluster.Spec.Cluster.Kubernetes.API.SecurePort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: ri.Cluster.Spec.Cluster.Etcd.Port,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: sshPort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: calicoBGPNetworkPort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t}\n}\n\n\/\/ workerRules returns the rules for the workers security group.\nfunc (ri rulesInput) workerRules() []awsresources.SecurityGroupRule {\n\treturn []awsresources.SecurityGroupRule{\n\t\t{\n\t\t\tPort: ri.Cluster.Spec.Cluster.Kubernetes.IngressController.SecurePort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: ri.Cluster.Spec.Cluster.Kubernetes.IngressController.InsecurePort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: ri.Cluster.Spec.Cluster.Kubernetes.Kubelet.Port,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: sshPort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: calicoBGPNetworkPort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t}\n}\n\n\/\/ ingressRules returns the rules for the ingress ELB security group.\nfunc (ri rulesInput) ingressRules() []awsresources.SecurityGroupRule {\n\treturn []awsresources.SecurityGroupRule{\n\t\t{\n\t\t\tPort: httpPort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: httpsPort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t}\n}\n\nfunc securityGroupName(clusterName string, groupName string) string {\n\treturn fmt.Sprintf(\"%s-%s\", clusterName, groupName)\n}\n<commit_msg>open read only kubelet port for heapster<commit_after>package create\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/giantswarm\/awstpr\"\n\tmicroerror \"github.com\/giantswarm\/microkit\/error\"\n\n\tawsutil \"github.com\/giantswarm\/aws-operator\/client\/aws\"\n\t\"github.com\/giantswarm\/aws-operator\/resources\"\n\tawsresources \"github.com\/giantswarm\/aws-operator\/resources\/aws\"\n)\n\ntype securityGroupInput struct {\n\tClients awsutil.Clients\n\tGroupName string\n\tVPCID string\n}\n\ntype rulesInput struct {\n\tCluster awstpr.CustomObject\n\tRules []awsresources.SecurityGroupRule\n\tMastersSecurityGroupID string\n\tWorkersSecurityGroupID string\n\tIngressSecurityGroupID string\n}\n\nconst (\n\tcalicoBGPNetworkPort = 179\n\t\/\/ This port is required in our current kubernetes\/heapster setup, but will become unnecessary\n\t\/\/ once we upgrade to kubernetes 1.6 and heapster 1.3 with apiserver deployment.\n\treadOnlyKubeletPort = 10255\n\thttpPort = 80\n\thttpsPort = 443\n\tsshPort = 22\n\n\tdefaultCIDR = \"0.0.0.0\/0\"\n)\n\nfunc (s *Service) createSecurityGroup(input securityGroupInput) (*awsresources.SecurityGroup, error) {\n\tsecurityGroup := &awsresources.SecurityGroup{\n\t\tDescription: input.GroupName,\n\t\tGroupName: input.GroupName,\n\t\tVpcID: input.VPCID,\n\t\tAWSEntity: awsresources.AWSEntity{Clients: input.Clients},\n\t}\n\tsecurityGroupCreated, err := securityGroup.CreateIfNotExists()\n\tif err != nil {\n\t\treturn nil, microerror.MaskAny(err)\n\t}\n\tif securityGroupCreated {\n\t\ts.logger.Log(\"info\", fmt.Sprintf(\"created security group '%s'\", input.GroupName))\n\t} else {\n\t\ts.logger.Log(\"info\", fmt.Sprintf(\"security group '%s' already exists, reusing\", input.GroupName))\n\t}\n\n\treturn securityGroup, nil\n}\n\nfunc (s *Service) deleteSecurityGroup(input securityGroupInput) error {\n\tvar securityGroup resources.ResourceWithID\n\tsecurityGroup = &awsresources.SecurityGroup{\n\t\tDescription: input.GroupName,\n\t\tGroupName: input.GroupName,\n\t\tAWSEntity: awsresources.AWSEntity{Clients: input.Clients},\n\t}\n\tif err := securityGroup.Delete(); err != nil {\n\t\treturn microerror.MaskAny(err)\n\t} else {\n\t\ts.logger.Log(\"info\", fmt.Sprintf(\"deleted security group '%s'\", input.GroupName))\n\t}\n\n\treturn nil\n}\n\n\/\/ masterRules returns the rules for the masters security group.\nfunc (ri rulesInput) masterRules() []awsresources.SecurityGroupRule {\n\treturn []awsresources.SecurityGroupRule{\n\t\t{\n\t\t\tPort: ri.Cluster.Spec.Cluster.Kubernetes.API.SecurePort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: ri.Cluster.Spec.Cluster.Etcd.Port,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: sshPort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: calicoBGPNetworkPort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t}\n}\n\n\/\/ workerRules returns the rules for the workers security group.\nfunc (ri rulesInput) workerRules() []awsresources.SecurityGroupRule {\n\treturn []awsresources.SecurityGroupRule{\n\t\t{\n\t\t\tPort: ri.Cluster.Spec.Cluster.Kubernetes.IngressController.SecurePort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: ri.Cluster.Spec.Cluster.Kubernetes.IngressController.InsecurePort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: ri.Cluster.Spec.Cluster.Kubernetes.Kubelet.Port,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: readOnlyKubeletPort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: sshPort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: calicoBGPNetworkPort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t}\n}\n\n\/\/ ingressRules returns the rules for the ingress ELB security group.\nfunc (ri rulesInput) ingressRules() []awsresources.SecurityGroupRule {\n\treturn []awsresources.SecurityGroupRule{\n\t\t{\n\t\t\tPort: httpPort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t\t{\n\t\t\tPort: httpsPort,\n\t\t\tSourceCIDR: defaultCIDR,\n\t\t},\n\t}\n}\n\nfunc securityGroupName(clusterName string, groupName string) string {\n\treturn fmt.Sprintf(\"%s-%s\", clusterName, groupName)\n}\n<|endoftext|>"} {"text":"<commit_before>package s3manager\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awsutil\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/client\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/request\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\/s3iface\"\n)\n\n\/\/ DefaultDownloadPartSize is the default range of bytes to get at a time when\n\/\/ using Download().\nconst DefaultDownloadPartSize = 1024 * 1024 * 5\n\n\/\/ DefaultDownloadConcurrency is the default number of goroutines to spin up\n\/\/ when using Download().\nconst DefaultDownloadConcurrency = 5\n\n\/\/ The Downloader structure that calls Download(). It is safe to call Download()\n\/\/ on this structure for multiple objects and across concurrent goroutines.\n\/\/ Mutating the Downloader's properties is not safe to be done concurrently.\ntype Downloader struct {\n\t\/\/ The buffer size (in bytes) to use when buffering data into chunks and\n\t\/\/ sending them as parts to S3. The minimum allowed part size is 5MB, and\n\t\/\/ if this value is set to zero, the DefaultPartSize value will be used.\n\tPartSize int64\n\n\t\/\/ The number of goroutines to spin up in parallel when sending parts.\n\t\/\/ If this is set to zero, the DefaultConcurrency value will be used.\n\tConcurrency int\n\n\t\/\/ An S3 client to use when performing downloads.\n\tS3 s3iface.S3API\n}\n\n\/\/ NewDownloader creates a new Downloader instance to downloads objects from\n\/\/ S3 in concurrent chunks. Pass in additional functional options to customize\n\/\/ the downloader behavior. Requires a client.ConfigProvider in order to create\n\/\/ a S3 service client. The session.Session satisfies the client.ConfigProvider\n\/\/ interface.\n\/\/\n\/\/ Example:\n\/\/ \/\/ The session the S3 Downloader will use\n\/\/ sess := session.New()\n\/\/\n\/\/ \/\/ Create a downloader with the session and default options\n\/\/ downloader := s3manager.NewDownloader(sess)\n\/\/\n\/\/ \/\/ Create a downloader with the session and custom options\n\/\/ downloader := s3manager.NewDownloader(sess, func(d *s3manager.Uploader) {\n\/\/ d.PartSize = 64 * 1024 * 1024 \/\/ 64MB per part\n\/\/ })\nfunc NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader {\n\td := &Downloader{\n\t\tS3: s3.New(c),\n\t\tPartSize: DefaultDownloadPartSize,\n\t\tConcurrency: DefaultDownloadConcurrency,\n\t}\n\tfor _, option := range options {\n\t\toption(d)\n\t}\n\n\treturn d\n}\n\n\/\/ NewDownloaderWithClient creates a new Downloader instance to downloads\n\/\/ objects from S3 in concurrent chunks. Pass in additional functional\n\/\/ options to customize the downloader behavior. Requires a S3 service client\n\/\/ to make S3 API calls.\n\/\/\n\/\/ Example:\n\/\/ \/\/ The S3 client the S3 Downloader will use\n\/\/ s3Svc := s3.new(session.New())\n\/\/\n\/\/ \/\/ Create a downloader with the s3 client and default options\n\/\/ downloader := s3manager.NewDownloaderWithClient(s3Svc)\n\/\/\n\/\/ \/\/ Create a downloader with the s3 client and custom options\n\/\/ downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Uploader) {\n\/\/ d.PartSize = 64 * 1024 * 1024 \/\/ 64MB per part\n\/\/ })\nfunc NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader {\n\td := &Downloader{\n\t\tS3: svc,\n\t\tPartSize: DefaultDownloadPartSize,\n\t\tConcurrency: DefaultDownloadConcurrency,\n\t}\n\tfor _, option := range options {\n\t\toption(d)\n\t}\n\n\treturn d\n}\n\n\/\/ Download downloads an object in S3 and writes the payload into w using\n\/\/ concurrent GET requests.\n\/\/\n\/\/ Additional functional options can be provided to configure the individual\n\/\/ upload. These options are copies of the Uploader instance Upload is called from.\n\/\/ Modifying the options will not impact the original Uploader instance.\n\/\/\n\/\/ It is safe to call this method concurrently across goroutines.\n\/\/\n\/\/ The w io.WriterAt can be satisfied by an os.File to do multipart concurrent\n\/\/ downloads, or in memory []byte wrapper using aws.WriteAtBuffer.\nfunc (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {\n\timpl := downloader{w: w, in: input, ctx: d}\n\n\tfor _, option := range options {\n\t\toption(&impl.ctx)\n\t}\n\n\treturn impl.download()\n}\n\n\/\/ downloader is the implementation structure used internally by Downloader.\ntype downloader struct {\n\tctx Downloader\n\n\tin *s3.GetObjectInput\n\tw io.WriterAt\n\n\twg sync.WaitGroup\n\tm sync.Mutex\n\n\tpos int64\n\ttotalBytes int64\n\twritten int64\n\terr error\n}\n\n\/\/ init initializes the downloader with default options.\nfunc (d *downloader) init() {\n\td.totalBytes = -1\n\n\tif d.ctx.Concurrency == 0 {\n\t\td.ctx.Concurrency = DefaultDownloadConcurrency\n\t}\n\n\tif d.ctx.PartSize == 0 {\n\t\td.ctx.PartSize = DefaultDownloadPartSize\n\t}\n}\n\n\/\/ download performs the implementation of the object download across ranged\n\/\/ GETs.\nfunc (d *downloader) download() (n int64, err error) {\n\td.init()\n\n\t\/\/ Spin off first worker to check additional header information\n\td.notThreadSafeGetChunk()\n\n\tif total := d.getTotalBytes(); total >= 0 {\n\t\t\/\/ Spin up workers\n\t\tch := make(chan dlchunk, d.ctx.Concurrency)\n\n\t\tfor i := 0; i < d.ctx.Concurrency; i++ {\n\t\t\td.wg.Add(1)\n\t\t\tgo d.downloadPart(ch)\n\t\t}\n\n\t\t\/\/ Assign work\n\t\tfor d.getErr() == nil {\n\t\t\tif d.pos >= total {\n\t\t\t\tbreak \/\/ We're finished queueing chunks\n\t\t\t}\n\n\t\t\t\/\/ Queue the next range of bytes to read.\n\t\t\tch <- dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize}\n\t\t\td.pos += d.ctx.PartSize\n\t\t}\n\n\t\t\/\/ Wait for completion\n\t\tclose(ch)\n\t\td.wg.Wait()\n\t} else {\n\t\t\/\/ Checking if we read anything new\n\t\tfor d.err == nil {\n\t\t\td.notThreadSafeGetChunk()\n\t\t}\n\n\t\t\/\/ We expect a 416 error letting us know we are done downloading the\n\t\t\/\/ total bytes. Since we do not know the content's length, this will\n\t\t\/\/ keep grabbing chunks of data until the range of bytes specified in\n\t\t\/\/ the request is out of range of the content. Once, this happens, a\n\t\t\/\/ 416 should occur.\n\t\te, ok := d.err.(awserr.RequestFailure)\n\t\tif ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable {\n\t\t\td.err = nil\n\t\t}\n\t}\n\n\t\/\/ Return error\n\treturn d.written, d.err\n}\n\n\/\/ downloadPart is an individual goroutine worker reading from the ch channel\n\/\/ and performing a GetObject request on the data with a given byte range.\n\/\/\n\/\/ If this is the first worker, this operation also resolves the total number\n\/\/ of bytes to be read so that the worker manager knows when it is finished.\nfunc (d *downloader) downloadPart(ch chan dlchunk) {\n\tdefer d.wg.Done()\n\tfor {\n\t\tchunk, ok := <-ch\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\td.downloadChunk(chunk)\n\t}\n}\n\n\/\/ notThreadSafeGetChunk grabs a chunk of data from the body.\n\/\/ Not thread safe. Should only used when grabbing data on a single thread.\nfunc (d *downloader) notThreadSafeGetChunk() {\n\tchunk := dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize}\n\td.pos += d.ctx.PartSize\n\td.downloadChunk(chunk)\n}\n\n\/\/ downloadChunk downloads the chunk froom s3\nfunc (d *downloader) downloadChunk(chunk dlchunk) {\n\tif d.getErr() != nil {\n\t\treturn\n\t}\n\t\/\/ Get the next byte range of data\n\tin := &s3.GetObjectInput{}\n\tawsutil.Copy(in, d.in)\n\trng := fmt.Sprintf(\"bytes=%d-%d\",\n\t\tchunk.start, chunk.start+chunk.size-1)\n\tin.Range = &rng\n\n\treq, resp := d.ctx.S3.GetObjectRequest(in)\n\treq.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler(\"S3Manager\"))\n\terr := req.Send()\n\n\tif err != nil {\n\t\td.setErr(err)\n\t} else {\n\t\td.setTotalBytes(resp) \/\/ Set total if not yet set.\n\n\t\tn, err := io.Copy(&chunk, resp.Body)\n\t\tresp.Body.Close()\n\n\t\tif err != nil {\n\t\t\td.setErr(err)\n\t\t}\n\t\td.incrWritten(n)\n\t}\n}\n\n\/\/ getTotalBytes is a thread-safe getter for retrieving the total byte status.\nfunc (d *downloader) getTotalBytes() int64 {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\treturn d.totalBytes\n}\n\n\/\/ setTotalBytes is a thread-safe setter for setting the total byte status.\n\/\/ Will extract the object's total bytes from the Content-Range if the file\n\/\/ will be chunked, or Content-Length. Content-Length is used when the response\n\/\/ does not include a Content-Range. Meaning the object was not chunked. This\n\/\/ occurs when the full file fits within the PartSize directive.\nfunc (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tif d.totalBytes >= 0 {\n\t\treturn\n\t}\n\n\tif resp.ContentRange == nil {\n\t\t\/\/ ContentRange is nil when the full file contents is provied, and\n\t\t\/\/ is not chunked. Use ContentLength instead.\n\t\tif resp.ContentLength != nil {\n\t\t\td.totalBytes = *resp.ContentLength\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tparts := strings.Split(*resp.ContentRange, \"\/\")\n\n\t\ttotal := int64(-1)\n\t\tvar err error\n\t\t\/\/ Checking for whether or not a numbered total exists\n\t\t\/\/ If one does not exist, we will assume the total to be -1, undefined,\n\t\t\/\/ and sequentially download each chunk until hitting a 416 error\n\t\ttotalStr := parts[len(parts)-1]\n\t\tif totalStr != \"*\" {\n\t\t\ttotal, err = strconv.ParseInt(totalStr, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\td.err = err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\td.totalBytes = total\n\t}\n}\n\nfunc (d *downloader) incrWritten(n int64) {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\td.written += n\n}\n\n\/\/ getErr is a thread-safe getter for the error object\nfunc (d *downloader) getErr() error {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\treturn d.err\n}\n\n\/\/ setErr is a thread-safe setter for the error object\nfunc (d *downloader) setErr(e error) {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\td.err = e\n}\n\n\/\/ dlchunk represents a single chunk of data to write by the worker routine.\n\/\/ This structure also implements an io.SectionReader style interface for\n\/\/ io.WriterAt, effectively making it an io.SectionWriter (which does not\n\/\/ exist).\ntype dlchunk struct {\n\tw io.WriterAt\n\tstart int64\n\tsize int64\n\tcur int64\n}\n\n\/\/ Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start\n\/\/ position to its end (or EOF).\nfunc (c *dlchunk) Write(p []byte) (n int, err error) {\n\tif c.cur >= c.size {\n\t\treturn 0, io.EOF\n\t}\n\n\tn, err = c.w.WriteAt(p, c.start+c.cur)\n\tc.cur += int64(n)\n\n\treturn\n}\n<commit_msg>Changing back the name<commit_after>package s3manager\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awsutil\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/client\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/request\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\/s3iface\"\n)\n\n\/\/ DefaultDownloadPartSize is the default range of bytes to get at a time when\n\/\/ using Download().\nconst DefaultDownloadPartSize = 1024 * 1024 * 5\n\n\/\/ DefaultDownloadConcurrency is the default number of goroutines to spin up\n\/\/ when using Download().\nconst DefaultDownloadConcurrency = 5\n\n\/\/ The Downloader structure that calls Download(). It is safe to call Download()\n\/\/ on this structure for multiple objects and across concurrent goroutines.\n\/\/ Mutating the Downloader's properties is not safe to be done concurrently.\ntype Downloader struct {\n\t\/\/ The buffer size (in bytes) to use when buffering data into chunks and\n\t\/\/ sending them as parts to S3. The minimum allowed part size is 5MB, and\n\t\/\/ if this value is set to zero, the DefaultPartSize value will be used.\n\tPartSize int64\n\n\t\/\/ The number of goroutines to spin up in parallel when sending parts.\n\t\/\/ If this is set to zero, the DefaultConcurrency value will be used.\n\tConcurrency int\n\n\t\/\/ An S3 client to use when performing downloads.\n\tS3 s3iface.S3API\n}\n\n\/\/ NewDownloader creates a new Downloader instance to downloads objects from\n\/\/ S3 in concurrent chunks. Pass in additional functional options to customize\n\/\/ the downloader behavior. Requires a client.ConfigProvider in order to create\n\/\/ a S3 service client. The session.Session satisfies the client.ConfigProvider\n\/\/ interface.\n\/\/\n\/\/ Example:\n\/\/ \/\/ The session the S3 Downloader will use\n\/\/ sess := session.New()\n\/\/\n\/\/ \/\/ Create a downloader with the session and default options\n\/\/ downloader := s3manager.NewDownloader(sess)\n\/\/\n\/\/ \/\/ Create a downloader with the session and custom options\n\/\/ downloader := s3manager.NewDownloader(sess, func(d *s3manager.Uploader) {\n\/\/ d.PartSize = 64 * 1024 * 1024 \/\/ 64MB per part\n\/\/ })\nfunc NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader {\n\td := &Downloader{\n\t\tS3: s3.New(c),\n\t\tPartSize: DefaultDownloadPartSize,\n\t\tConcurrency: DefaultDownloadConcurrency,\n\t}\n\tfor _, option := range options {\n\t\toption(d)\n\t}\n\n\treturn d\n}\n\n\/\/ NewDownloaderWithClient creates a new Downloader instance to downloads\n\/\/ objects from S3 in concurrent chunks. Pass in additional functional\n\/\/ options to customize the downloader behavior. Requires a S3 service client\n\/\/ to make S3 API calls.\n\/\/\n\/\/ Example:\n\/\/ \/\/ The S3 client the S3 Downloader will use\n\/\/ s3Svc := s3.new(session.New())\n\/\/\n\/\/ \/\/ Create a downloader with the s3 client and default options\n\/\/ downloader := s3manager.NewDownloaderWithClient(s3Svc)\n\/\/\n\/\/ \/\/ Create a downloader with the s3 client and custom options\n\/\/ downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Uploader) {\n\/\/ d.PartSize = 64 * 1024 * 1024 \/\/ 64MB per part\n\/\/ })\nfunc NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader {\n\td := &Downloader{\n\t\tS3: svc,\n\t\tPartSize: DefaultDownloadPartSize,\n\t\tConcurrency: DefaultDownloadConcurrency,\n\t}\n\tfor _, option := range options {\n\t\toption(d)\n\t}\n\n\treturn d\n}\n\n\/\/ Download downloads an object in S3 and writes the payload into w using\n\/\/ concurrent GET requests.\n\/\/\n\/\/ Additional functional options can be provided to configure the individual\n\/\/ upload. These options are copies of the Uploader instance Upload is called from.\n\/\/ Modifying the options will not impact the original Uploader instance.\n\/\/\n\/\/ It is safe to call this method concurrently across goroutines.\n\/\/\n\/\/ The w io.WriterAt can be satisfied by an os.File to do multipart concurrent\n\/\/ downloads, or in memory []byte wrapper using aws.WriteAtBuffer.\nfunc (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {\n\timpl := downloader{w: w, in: input, ctx: d}\n\n\tfor _, option := range options {\n\t\toption(&impl.ctx)\n\t}\n\n\treturn impl.download()\n}\n\n\/\/ downloader is the implementation structure used internally by Downloader.\ntype downloader struct {\n\tctx Downloader\n\n\tin *s3.GetObjectInput\n\tw io.WriterAt\n\n\twg sync.WaitGroup\n\tm sync.Mutex\n\n\tpos int64\n\ttotalBytes int64\n\twritten int64\n\terr error\n}\n\n\/\/ init initializes the downloader with default options.\nfunc (d *downloader) init() {\n\td.totalBytes = -1\n\n\tif d.ctx.Concurrency == 0 {\n\t\td.ctx.Concurrency = DefaultDownloadConcurrency\n\t}\n\n\tif d.ctx.PartSize == 0 {\n\t\td.ctx.PartSize = DefaultDownloadPartSize\n\t}\n}\n\n\/\/ download performs the implementation of the object download across ranged\n\/\/ GETs.\nfunc (d *downloader) download() (n int64, err error) {\n\td.init()\n\n\t\/\/ Spin off first worker to check additional header information\n\td.getChunk()\n\n\tif total := d.getTotalBytes(); total >= 0 {\n\t\t\/\/ Spin up workers\n\t\tch := make(chan dlchunk, d.ctx.Concurrency)\n\n\t\tfor i := 0; i < d.ctx.Concurrency; i++ {\n\t\t\td.wg.Add(1)\n\t\t\tgo d.downloadPart(ch)\n\t\t}\n\n\t\t\/\/ Assign work\n\t\tfor d.getErr() == nil {\n\t\t\tif d.pos >= total {\n\t\t\t\tbreak \/\/ We're finished queueing chunks\n\t\t\t}\n\n\t\t\t\/\/ Queue the next range of bytes to read.\n\t\t\tch <- dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize}\n\t\t\td.pos += d.ctx.PartSize\n\t\t}\n\n\t\t\/\/ Wait for completion\n\t\tclose(ch)\n\t\td.wg.Wait()\n\t} else {\n\t\t\/\/ Checking if we read anything new\n\t\tfor d.err == nil {\n\t\t\td.getChunk()\n\t\t}\n\n\t\t\/\/ We expect a 416 error letting us know we are done downloading the\n\t\t\/\/ total bytes. Since we do not know the content's length, this will\n\t\t\/\/ keep grabbing chunks of data until the range of bytes specified in\n\t\t\/\/ the request is out of range of the content. Once, this happens, a\n\t\t\/\/ 416 should occur.\n\t\te, ok := d.err.(awserr.RequestFailure)\n\t\tif ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable {\n\t\t\td.err = nil\n\t\t}\n\t}\n\n\t\/\/ Return error\n\treturn d.written, d.err\n}\n\n\/\/ downloadPart is an individual goroutine worker reading from the ch channel\n\/\/ and performing a GetObject request on the data with a given byte range.\n\/\/\n\/\/ If this is the first worker, this operation also resolves the total number\n\/\/ of bytes to be read so that the worker manager knows when it is finished.\nfunc (d *downloader) downloadPart(ch chan dlchunk) {\n\tdefer d.wg.Done()\n\tfor {\n\t\tchunk, ok := <-ch\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\td.downloadChunk(chunk)\n\t}\n}\n\n\/\/ getChunk grabs a chunk of data from the body.\n\/\/ Not thread safe. Should only used when grabbing data on a single thread.\nfunc (d *downloader) getChunk() {\n\tchunk := dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize}\n\td.pos += d.ctx.PartSize\n\td.downloadChunk(chunk)\n}\n\n\/\/ downloadChunk downloads the chunk froom s3\nfunc (d *downloader) downloadChunk(chunk dlchunk) {\n\tif d.getErr() != nil {\n\t\treturn\n\t}\n\t\/\/ Get the next byte range of data\n\tin := &s3.GetObjectInput{}\n\tawsutil.Copy(in, d.in)\n\trng := fmt.Sprintf(\"bytes=%d-%d\",\n\t\tchunk.start, chunk.start+chunk.size-1)\n\tin.Range = &rng\n\n\treq, resp := d.ctx.S3.GetObjectRequest(in)\n\treq.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler(\"S3Manager\"))\n\terr := req.Send()\n\n\tif err != nil {\n\t\td.setErr(err)\n\t} else {\n\t\td.setTotalBytes(resp) \/\/ Set total if not yet set.\n\n\t\tn, err := io.Copy(&chunk, resp.Body)\n\t\tresp.Body.Close()\n\n\t\tif err != nil {\n\t\t\td.setErr(err)\n\t\t}\n\t\td.incrWritten(n)\n\t}\n}\n\n\/\/ getTotalBytes is a thread-safe getter for retrieving the total byte status.\nfunc (d *downloader) getTotalBytes() int64 {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\treturn d.totalBytes\n}\n\n\/\/ setTotalBytes is a thread-safe setter for setting the total byte status.\n\/\/ Will extract the object's total bytes from the Content-Range if the file\n\/\/ will be chunked, or Content-Length. Content-Length is used when the response\n\/\/ does not include a Content-Range. Meaning the object was not chunked. This\n\/\/ occurs when the full file fits within the PartSize directive.\nfunc (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tif d.totalBytes >= 0 {\n\t\treturn\n\t}\n\n\tif resp.ContentRange == nil {\n\t\t\/\/ ContentRange is nil when the full file contents is provied, and\n\t\t\/\/ is not chunked. Use ContentLength instead.\n\t\tif resp.ContentLength != nil {\n\t\t\td.totalBytes = *resp.ContentLength\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tparts := strings.Split(*resp.ContentRange, \"\/\")\n\n\t\ttotal := int64(-1)\n\t\tvar err error\n\t\t\/\/ Checking for whether or not a numbered total exists\n\t\t\/\/ If one does not exist, we will assume the total to be -1, undefined,\n\t\t\/\/ and sequentially download each chunk until hitting a 416 error\n\t\ttotalStr := parts[len(parts)-1]\n\t\tif totalStr != \"*\" {\n\t\t\ttotal, err = strconv.ParseInt(totalStr, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\td.err = err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\td.totalBytes = total\n\t}\n}\n\nfunc (d *downloader) incrWritten(n int64) {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\td.written += n\n}\n\n\/\/ getErr is a thread-safe getter for the error object\nfunc (d *downloader) getErr() error {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\treturn d.err\n}\n\n\/\/ setErr is a thread-safe setter for the error object\nfunc (d *downloader) setErr(e error) {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\td.err = e\n}\n\n\/\/ dlchunk represents a single chunk of data to write by the worker routine.\n\/\/ This structure also implements an io.SectionReader style interface for\n\/\/ io.WriterAt, effectively making it an io.SectionWriter (which does not\n\/\/ exist).\ntype dlchunk struct {\n\tw io.WriterAt\n\tstart int64\n\tsize int64\n\tcur int64\n}\n\n\/\/ Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start\n\/\/ position to its end (or EOF).\nfunc (c *dlchunk) Write(p []byte) (n int, err error) {\n\tif c.cur >= c.size {\n\t\treturn 0, io.EOF\n\t}\n\n\tn, err = c.w.WriteAt(p, c.start+c.cur)\n\tc.cur += int64(n)\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package mysql\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/solefaucet\/sole-server\/errors\"\n\t\"github.com\/solefaucet\/sole-server\/models\"\n)\n\n\/\/ GetRewardIncomes get user's reward incomes\nfunc (s Storage) GetRewardIncomes(userID int64, limit, offset int64) ([]models.Income, error) {\n\trawSQL := \"SELECT * FROM incomes WHERE `user_id` = ? AND `type` = ? ORDER BY `id` DESC LIMIT ? OFFSET ?\"\n\targs := []interface{}{userID, models.IncomeTypeReward, limit, offset}\n\tincomes := []models.Income{}\n\terr := s.selects(&incomes, rawSQL, args...)\n\treturn incomes, err\n}\n\n\/\/ GetNumberOfRewardIncomes gets number of user's reward incomes\nfunc (s Storage) GetNumberOfRewardIncomes(userID int64) (int64, error) {\n\tvar count int64\n\terr := s.db.QueryRowx(\"SELECT COUNT(*) FROM incomes WHERE `user_id` = ? AND `type` = ?\", userID, models.IncomeTypeReward).Scan(&count)\n\treturn count, err\n}\n\n\/\/ CreateRewardIncome creates a new reward type income\nfunc (s Storage) CreateRewardIncome(income models.Income, now time.Time) error {\n\ttx := s.db.MustBegin()\n\n\tif err := createRewardIncomeWithTx(tx, income, now); err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\t\/\/ commit\n\tif err := tx.Commit(); err != nil {\n\t\treturn fmt.Errorf(\"create reward income commit transaction error: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc createRewardIncomeWithTx(tx *sqlx.Tx, income models.Income, now time.Time) error {\n\ttotalReward := income.Income\n\n\t\/\/ insert income into incomes table\n\tif _, err := addIncome(tx, income); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update user balance, total_income, referer_total_income\n\tif err := incrementUserBalance(tx, income.UserID, income.Income, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update user rewarded_at\n\tif _, err := tx.Exec(\"UPDATE users SET `rewarded_at` = ? WHERE `id` = ?\", now, income.UserID); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update referer balance\n\tif rowAffected, err := incrementRefererBalance(tx, income.RefererID, income.RefererIncome); err != nil {\n\t\treturn err\n\t} else if rowAffected == 1 {\n\t\ttotalReward += income.RefererIncome\n\t}\n\n\t\/\/ update total reward\n\tif err := incrementTotalReward(tx, totalReward, now); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ insert reward income into incomes table\nfunc addIncome(tx *sqlx.Tx, income models.Income) (sql.Result, error) {\n\tresult, err := tx.NamedExec(\"INSERT INTO incomes (`user_id`, `referer_id`, `type`, `income`, `referer_income`) VALUES (:user_id, :referer_id, :type, :income, :referer_income)\", income)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"add income error: %v\", err)\n\t}\n\n\treturn result, nil\n}\n\n\/\/ increment user balance, total_income, referer_total_income\nfunc incrementUserBalance(tx *sqlx.Tx, userID int64, delta, refererDelta float64) error {\n\trawSQL := \"UPDATE users SET `balance` = `balance` + ?, `total_income` = `total_income` + ?, `referer_total_income` = `referer_total_income` + ? WHERE id = ?\"\n\targs := []interface{}{delta, delta, refererDelta, userID}\n\tif result, err := tx.Exec(rawSQL, args...); err != nil {\n\t\treturn fmt.Errorf(\"increment user balance error: %v\", err)\n\t} else if rowAffected, _ := result.RowsAffected(); rowAffected != 1 {\n\t\treturn fmt.Errorf(\"increment user balance affected %v rows\", rowAffected)\n\t}\n\n\treturn nil\n}\n\n\/\/ increment referer balance\nfunc incrementRefererBalance(tx *sqlx.Tx, refererID int64, delta float64) (int64, error) {\n\tresult, err := tx.NamedExec(\"UPDATE users SET `balance` = `balance` + :delta, `total_income_from_referees` = `total_income_from_referees` + :delta WHERE id = :id\", map[string]interface{}{\n\t\t\"id\": refererID,\n\t\t\"delta\": delta,\n\t})\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"increment referer balance error: %v\", err)\n\t}\n\n\trowAffected, _ := result.RowsAffected()\n\treturn rowAffected, nil\n}\n\n\/\/ increment total reward\nfunc incrementTotalReward(tx *sqlx.Tx, totalReward float64, now time.Time) error {\n\tsql := \"INSERT INTO total_rewards (`total`, `created_at`) VALUES (:delta, :created_at) ON DUPLICATE KEY UPDATE `total` = `total` + :delta\"\n\targs := map[string]interface{}{\n\t\t\"delta\": totalReward,\n\t\t\"created_at\": now,\n\t}\n\n\tif _, err := tx.NamedExec(sql, args); err != nil {\n\t\treturn fmt.Errorf(\"increment total reward error: %v\", err)\n\t}\n\n\treturn nil\n}\n\n\/\/ GetOfferwowEventByID finds offerwow event by event id\nfunc (s Storage) GetOfferwowEventByID(eventID string) (models.OfferwowEvent, error) {\n\tevent := models.OfferwowEvent{}\n\terr := s.db.Get(&event, \"SELECT * FROM `offerwow` WHERE `event_id` = ?\", eventID)\n\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn event, errors.ErrNotFound\n\t\t}\n\n\t\treturn event, fmt.Errorf(\"query offerwow event by id error: %v\", err)\n\t}\n\n\treturn event, nil\n}\n\n\/\/ CreateOfferwowIncome creates a new offerwow type income\nfunc (s Storage) CreateOfferwowIncome(income models.Income, eventID string) error {\n\ttx := s.db.MustBegin()\n\n\tif err := createOfferwowIncomeWithTx(tx, income, eventID); err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\t\/\/ commit\n\tif err := tx.Commit(); err != nil {\n\t\treturn fmt.Errorf(\"create offerwow income commit transaction error: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc createOfferwowIncomeWithTx(tx *sqlx.Tx, income models.Income, eventID string) error {\n\t\/\/ insert offerwow income into incomes table\n\tresult, err := addIncome(tx, income)\n\tif err != nil {\n\t\treturn err\n\t}\n\tid, err := result.LastInsertId()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update user balance, total_income, referer_total_income\n\tif err := incrementUserBalance(tx, income.UserID, income.Income, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update referer balance\n\tif _, err := incrementRefererBalance(tx, income.RefererID, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ insert offerwow event\n\tofferwowEvent := models.OfferwowEvent{\n\t\tEventID: eventID,\n\t\tIncomeID: id,\n\t\tAmount: income.Income,\n\t}\n\t_, err = tx.NamedExec(\"INSERT INTO `offerwow` (`event_id`, `income_id`, `amount`) VALUE (:event_id, :income_id, :amount)\", offerwowEvent)\n\treturn err\n}\n\n\/\/ GetSuperrewardsOfferByID finds superrewards offer by transaction Id and user id\nfunc (s Storage) GetSuperrewardsOfferByID(transactionID string, userID int64) (models.SuperrewardsOffer, error) {\n\toffer := models.SuperrewardsOffer{}\n\terr := s.db.Get(&offer, \"SELECT * FROM `superrewards` WHERE `transaction_id` = ? AND `user_id` = ?\", transactionID, userID)\n\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn offer, errors.ErrNotFound\n\t\t}\n\n\t\treturn offer, fmt.Errorf(\"query superrewards offer by id error: %v\", err)\n\t}\n\n\treturn offer, nil\n}\n\n\/\/ CreateSuperrewardsIncome creates a new superrewards type income\nfunc (s Storage) CreateSuperrewardsIncome(income models.Income, transactionID, offerID string) error {\n\ttx := s.db.MustBegin()\n\n\tif err := createSuperrewardsIncomeWithTx(tx, income, transactionID, offerID); err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\t\/\/ commit\n\tif err := tx.Commit(); err != nil {\n\t\treturn fmt.Errorf(\"create superrewards income commit transaction error: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc createSuperrewardsIncomeWithTx(tx *sqlx.Tx, income models.Income, transactionID, offerID string) error {\n\t\/\/ insert superrewards income into incomes table\n\tresult, err := addIncome(tx, income)\n\tif err != nil {\n\t\treturn err\n\t}\n\tid, err := result.LastInsertId()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update user balance, total_income, referer_total_income\n\tif err := incrementUserBalance(tx, income.UserID, income.Income, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update referer balance\n\tif _, err := incrementRefererBalance(tx, income.RefererID, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ insert superrewards offer\n\toffer := models.SuperrewardsOffer{\n\t\tIncomeID: id,\n\t\tUserID: income.UserID,\n\t\tTransactionID: transactionID,\n\t\tOfferID: offerID,\n\t\tAmount: income.Income,\n\t}\n\t_, err = tx.NamedExec(\"INSERT INTO `superrewards` (`income_id`, `user_id`, `transaction_id`, `offer_id`, `amount`) VALUE (:income_id, :user_id, :transaction_id, :offer_id, :amount)\", offer)\n\treturn err\n}\n<commit_msg>Fix vetshadow issue<commit_after>package mysql\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/solefaucet\/sole-server\/errors\"\n\t\"github.com\/solefaucet\/sole-server\/models\"\n)\n\n\/\/ GetRewardIncomes get user's reward incomes\nfunc (s Storage) GetRewardIncomes(userID int64, limit, offset int64) ([]models.Income, error) {\n\trawSQL := \"SELECT * FROM incomes WHERE `user_id` = ? AND `type` = ? ORDER BY `id` DESC LIMIT ? OFFSET ?\"\n\targs := []interface{}{userID, models.IncomeTypeReward, limit, offset}\n\tincomes := []models.Income{}\n\terr := s.selects(&incomes, rawSQL, args...)\n\treturn incomes, err\n}\n\n\/\/ GetNumberOfRewardIncomes gets number of user's reward incomes\nfunc (s Storage) GetNumberOfRewardIncomes(userID int64) (int64, error) {\n\tvar count int64\n\terr := s.db.QueryRowx(\"SELECT COUNT(*) FROM incomes WHERE `user_id` = ? AND `type` = ?\", userID, models.IncomeTypeReward).Scan(&count)\n\treturn count, err\n}\n\n\/\/ CreateRewardIncome creates a new reward type income\nfunc (s Storage) CreateRewardIncome(income models.Income, now time.Time) error {\n\ttx := s.db.MustBegin()\n\n\tif err := createRewardIncomeWithTx(tx, income, now); err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\t\/\/ commit\n\tif err := tx.Commit(); err != nil {\n\t\treturn fmt.Errorf(\"create reward income commit transaction error: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc createRewardIncomeWithTx(tx *sqlx.Tx, income models.Income, now time.Time) error {\n\ttotalReward := income.Income\n\n\t\/\/ insert income into incomes table\n\tif _, err := addIncome(tx, income); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update user balance, total_income, referer_total_income\n\tif err := incrementUserBalance(tx, income.UserID, income.Income, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update user rewarded_at\n\tif _, err := tx.Exec(\"UPDATE users SET `rewarded_at` = ? WHERE `id` = ?\", now, income.UserID); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update referer balance\n\tif rowAffected, err := incrementRefererBalance(tx, income.RefererID, income.RefererIncome); err != nil {\n\t\treturn err\n\t} else if rowAffected == 1 {\n\t\ttotalReward += income.RefererIncome\n\t}\n\n\t\/\/ update total reward\n\tif err := incrementTotalReward(tx, totalReward, now); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ insert reward income into incomes table\nfunc addIncome(tx *sqlx.Tx, income models.Income) (sql.Result, error) {\n\tresult, err := tx.NamedExec(\"INSERT INTO incomes (`user_id`, `referer_id`, `type`, `income`, `referer_income`) VALUES (:user_id, :referer_id, :type, :income, :referer_income)\", income)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"add income error: %v\", err)\n\t}\n\n\treturn result, nil\n}\n\n\/\/ increment user balance, total_income, referer_total_income\nfunc incrementUserBalance(tx *sqlx.Tx, userID int64, delta, refererDelta float64) error {\n\trawSQL := \"UPDATE users SET `balance` = `balance` + ?, `total_income` = `total_income` + ?, `referer_total_income` = `referer_total_income` + ? WHERE id = ?\"\n\targs := []interface{}{delta, delta, refererDelta, userID}\n\tif result, err := tx.Exec(rawSQL, args...); err != nil {\n\t\treturn fmt.Errorf(\"increment user balance error: %v\", err)\n\t} else if rowAffected, _ := result.RowsAffected(); rowAffected != 1 {\n\t\treturn fmt.Errorf(\"increment user balance affected %v rows\", rowAffected)\n\t}\n\n\treturn nil\n}\n\n\/\/ increment referer balance\nfunc incrementRefererBalance(tx *sqlx.Tx, refererID int64, delta float64) (int64, error) {\n\tresult, err := tx.NamedExec(\"UPDATE users SET `balance` = `balance` + :delta, `total_income_from_referees` = `total_income_from_referees` + :delta WHERE id = :id\", map[string]interface{}{\n\t\t\"id\": refererID,\n\t\t\"delta\": delta,\n\t})\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"increment referer balance error: %v\", err)\n\t}\n\n\trowAffected, _ := result.RowsAffected()\n\treturn rowAffected, nil\n}\n\n\/\/ increment total reward\nfunc incrementTotalReward(tx *sqlx.Tx, totalReward float64, now time.Time) error {\n\tsql := \"INSERT INTO total_rewards (`total`, `created_at`) VALUES (:delta, :created_at) ON DUPLICATE KEY UPDATE `total` = `total` + :delta\"\n\targs := map[string]interface{}{\n\t\t\"delta\": totalReward,\n\t\t\"created_at\": now,\n\t}\n\n\tif _, err := tx.NamedExec(sql, args); err != nil {\n\t\treturn fmt.Errorf(\"increment total reward error: %v\", err)\n\t}\n\n\treturn nil\n}\n\n\/\/ GetOfferwowEventByID finds offerwow event by event id\nfunc (s Storage) GetOfferwowEventByID(eventID string) (models.OfferwowEvent, error) {\n\tevent := models.OfferwowEvent{}\n\terr := s.db.Get(&event, \"SELECT * FROM `offerwow` WHERE `event_id` = ?\", eventID)\n\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn event, errors.ErrNotFound\n\t\t}\n\n\t\treturn event, fmt.Errorf(\"query offerwow event by id error: %v\", err)\n\t}\n\n\treturn event, nil\n}\n\n\/\/ CreateOfferwowIncome creates a new offerwow type income\nfunc (s Storage) CreateOfferwowIncome(income models.Income, eventID string) error {\n\ttx := s.db.MustBegin()\n\n\tif err := createOfferwowIncomeWithTx(tx, income, eventID); err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\t\/\/ commit\n\tif err := tx.Commit(); err != nil {\n\t\treturn fmt.Errorf(\"create offerwow income commit transaction error: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc createOfferwowIncomeWithTx(tx *sqlx.Tx, income models.Income, eventID string) error {\n\t\/\/ insert offerwow income into incomes table\n\tresult, err := addIncome(tx, income)\n\tif err != nil {\n\t\treturn err\n\t}\n\tid, err := result.LastInsertId()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update user balance, total_income, referer_total_income\n\tif err = incrementUserBalance(tx, income.UserID, income.Income, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update referer balance\n\tif _, err = incrementRefererBalance(tx, income.RefererID, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ insert offerwow event\n\tofferwowEvent := models.OfferwowEvent{\n\t\tEventID: eventID,\n\t\tIncomeID: id,\n\t\tAmount: income.Income,\n\t}\n\t_, err = tx.NamedExec(\"INSERT INTO `offerwow` (`event_id`, `income_id`, `amount`) VALUE (:event_id, :income_id, :amount)\", offerwowEvent)\n\treturn err\n}\n\n\/\/ GetSuperrewardsOfferByID finds superrewards offer by transaction Id and user id\nfunc (s Storage) GetSuperrewardsOfferByID(transactionID string, userID int64) (models.SuperrewardsOffer, error) {\n\toffer := models.SuperrewardsOffer{}\n\terr := s.db.Get(&offer, \"SELECT * FROM `superrewards` WHERE `transaction_id` = ? AND `user_id` = ?\", transactionID, userID)\n\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn offer, errors.ErrNotFound\n\t\t}\n\n\t\treturn offer, fmt.Errorf(\"query superrewards offer by id error: %v\", err)\n\t}\n\n\treturn offer, nil\n}\n\n\/\/ CreateSuperrewardsIncome creates a new superrewards type income\nfunc (s Storage) CreateSuperrewardsIncome(income models.Income, transactionID, offerID string) error {\n\ttx := s.db.MustBegin()\n\n\tif err := createSuperrewardsIncomeWithTx(tx, income, transactionID, offerID); err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\t\/\/ commit\n\tif err := tx.Commit(); err != nil {\n\t\treturn fmt.Errorf(\"create superrewards income commit transaction error: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc createSuperrewardsIncomeWithTx(tx *sqlx.Tx, income models.Income, transactionID, offerID string) error {\n\t\/\/ insert superrewards income into incomes table\n\tresult, err := addIncome(tx, income)\n\tif err != nil {\n\t\treturn err\n\t}\n\tid, err := result.LastInsertId()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update user balance, total_income, referer_total_income\n\tif err = incrementUserBalance(tx, income.UserID, income.Income, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update referer balance\n\tif _, err = incrementRefererBalance(tx, income.RefererID, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ insert superrewards offer\n\toffer := models.SuperrewardsOffer{\n\t\tIncomeID: id,\n\t\tUserID: income.UserID,\n\t\tTransactionID: transactionID,\n\t\tOfferID: offerID,\n\t\tAmount: income.Income,\n\t}\n\t_, err = tx.NamedExec(\"INSERT INTO `superrewards` (`income_id`, `user_id`, `transaction_id`, `offer_id`, `amount`) VALUE (:income_id, :user_id, :transaction_id, :offer_id, :amount)\", offer)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package mysql\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/solefaucet\/sole-server\/errors\"\n\t\"github.com\/solefaucet\/sole-server\/models\"\n)\n\n\/\/ GetRewardIncomes get user's reward incomes\nfunc (s Storage) GetRewardIncomes(userID int64, limit, offset int64) ([]models.Income, error) {\n\trawSQL := \"SELECT * FROM incomes WHERE `user_id` = ? AND `type` = ? ORDER BY `id` DESC LIMIT ? OFFSET ?\"\n\targs := []interface{}{userID, models.IncomeTypeReward, limit, offset}\n\tincomes := []models.Income{}\n\terr := s.selects(&incomes, rawSQL, args...)\n\treturn incomes, err\n}\n\n\/\/ GetNumberOfRewardIncomes gets number of user's reward incomes\nfunc (s Storage) GetNumberOfRewardIncomes(userID int64) (int64, error) {\n\tvar count int64\n\terr := s.db.QueryRowx(\"SELECT COUNT(*) FROM incomes WHERE `user_id` = ? AND `type` = ?\", userID, models.IncomeTypeReward).Scan(&count)\n\treturn count, err\n}\n\n\/\/ CreateRewardIncome creates a new reward type income\nfunc (s Storage) CreateRewardIncome(income models.Income, now time.Time) error {\n\ttx := s.db.MustBegin()\n\n\tif err := createRewardIncomeWithTx(tx, income, now); err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\t\/\/ commit\n\tif err := tx.Commit(); err != nil {\n\t\treturn fmt.Errorf(\"create reward income commit transaction error: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc createRewardIncomeWithTx(tx *sqlx.Tx, income models.Income, now time.Time) error {\n\ttotalReward := income.Income\n\n\t\/\/ insert income into incomes table\n\tif _, err := addIncome(tx, income); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update user balance, total_income, referer_total_income\n\tif err := incrementUserBalance(tx, income.UserID, income.Income, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update user rewarded_at\n\tif _, err := tx.Exec(\"UPDATE users SET `rewarded_at` = ? WHERE `id` = ?\", now, income.UserID); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update referer balance\n\tif rowAffected, err := incrementRefererBalance(tx, income.RefererID, income.RefererIncome); err != nil {\n\t\treturn err\n\t} else if rowAffected == 1 {\n\t\ttotalReward += income.RefererIncome\n\t}\n\n\t\/\/ update total reward\n\tif err := incrementTotalReward(tx, totalReward, now); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ insert reward income into incomes table\nfunc addIncome(tx *sqlx.Tx, income models.Income) (sql.Result, error) {\n\tresult, err := tx.NamedExec(\"INSERT INTO incomes (`user_id`, `referer_id`, `type`, `income`, `referer_income`) VALUES (:user_id, :referer_id, :type, :income, :referer_income)\", income)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"add income error: %v\", err)\n\t}\n\n\treturn result, nil\n}\n\n\/\/ increment user balance, total_income, referer_total_income\nfunc incrementUserBalance(tx *sqlx.Tx, userID int64, delta, refererDelta float64) error {\n\trawSQL := \"UPDATE users SET `balance` = `balance` + ?, `total_income` = `total_income` + ?, `referer_total_income` = `referer_total_income` + ? WHERE id = ?\"\n\targs := []interface{}{delta, delta, refererDelta, userID}\n\tif result, err := tx.Exec(rawSQL, args...); err != nil {\n\t\treturn fmt.Errorf(\"increment user balance error: %v\", err)\n\t} else if rowAffected, _ := result.RowsAffected(); rowAffected != 1 {\n\t\treturn fmt.Errorf(\"increment user balance affected %v rows\", rowAffected)\n\t}\n\n\treturn nil\n}\n\n\/\/ increment referer balance\nfunc incrementRefererBalance(tx *sqlx.Tx, refererID int64, delta float64) (int64, error) {\n\tresult, err := tx.NamedExec(\"UPDATE users SET `balance` = `balance` + :delta, `total_income_from_referees` = `total_income_from_referees` + :delta WHERE id = :id\", map[string]interface{}{\n\t\t\"id\": refererID,\n\t\t\"delta\": delta,\n\t})\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"increment referer balance error: %v\", err)\n\t}\n\n\trowAffected, _ := result.RowsAffected()\n\treturn rowAffected, nil\n}\n\n\/\/ increment total reward\nfunc incrementTotalReward(tx *sqlx.Tx, totalReward float64, now time.Time) error {\n\tsql := \"INSERT INTO total_rewards (`total`, `created_at`) VALUES (:delta, :created_at) ON DUPLICATE KEY UPDATE `total` = `total` + :delta\"\n\targs := map[string]interface{}{\n\t\t\"delta\": totalReward,\n\t\t\"created_at\": now,\n\t}\n\n\tif _, err := tx.NamedExec(sql, args); err != nil {\n\t\treturn fmt.Errorf(\"increment total reward error: %v\", err)\n\t}\n\n\treturn nil\n}\n\n\/\/ GetOfferwowEventByID finds offerwow event by event id\nfunc (s Storage) GetOfferwowEventByID(eventID string) (models.OfferwowEvent, error) {\n\tevent := models.OfferwowEvent{}\n\terr := s.db.Get(&event, \"SELECT * FROM `offerwow` WHERE `event_id` = ?\", eventID)\n\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn event, errors.ErrNotFound\n\t\t}\n\n\t\treturn event, fmt.Errorf(\"query offerwow event by id error: %v\", err)\n\t}\n\n\treturn event, nil\n}\n\n\/\/ CreateOfferwowIncome creates a new offerwow type income\nfunc (s Storage) CreateOfferwowIncome(income models.Income, eventID string) error {\n\ttx := s.db.MustBegin()\n\n\tif err := createOfferwowIncomeWithTx(tx, income, eventID); err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\t\/\/ commit\n\tif err := tx.Commit(); err != nil {\n\t\treturn fmt.Errorf(\"create offerwow income commit transaction error: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc createOfferwowIncomeWithTx(tx *sqlx.Tx, income models.Income, eventID string) error {\n\t\/\/ insert offerwow income into incomes table\n\tresult, err := addIncome(tx, income)\n\tif err != nil {\n\t\treturn err\n\t}\n\tid, err := result.LastInsertId()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update user balance, total_income, referer_total_income\n\tif err := incrementUserBalance(tx, income.UserID, income.Income, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update referer balance\n\tif _, err := incrementRefererBalance(tx, income.RefererID, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ insert offerwow event\n\tofferwowEvent := models.OfferwowEvent{\n\t\tEventID: eventID,\n\t\tIncomeID: id,\n\t\tAmount: income.Income,\n\t}\n\t_, err = tx.NamedExec(\"INSERT INTO `offerwow` (`event_id`, `income_id`, `amount`) VALUE (:event_id, :income_id, :amount)\", offerwowEvent)\n\treturn err\n}\n\n\/\/ GetSuperrewardsOfferByID finds superrewards offer by transaction Id and user id\nfunc (s Storage) GetSuperrewardsOfferByID(transactionID string, userID int64) (models.SuperrewardsOffer, error) {\n\toffer := models.SuperrewardsOffer{}\n\terr := s.db.Get(&offer, \"SELECT * FROM `superrewards` WHERE `transaction_id` = ? AND `user_id` = ?\", transactionID, userID)\n\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn offer, errors.ErrNotFound\n\t\t}\n\n\t\treturn offer, fmt.Errorf(\"query superrewards offer by id error: %v\", err)\n\t}\n\n\treturn offer, nil\n}\n\n\/\/ CreateSuperrewardsIncome creates a new superrewards type income\nfunc (s Storage) CreateSuperrewardsIncome(income models.Income, transactionID, offerID string) error {\n\ttx := s.db.MustBegin()\n\n\tif err := createSuperrewardsIncomeWithTx(tx, income, transactionID, offerID); err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\t\/\/ commit\n\tif err := tx.Commit(); err != nil {\n\t\treturn fmt.Errorf(\"create reward income commit transaction error: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc createSuperrewardsIncomeWithTx(tx *sqlx.Tx, income models.Income, transactionID, offerID string) error {\n\t\/\/ insert superrewards income into incomes table\n\tresult, err := addIncome(tx, income)\n\tif err != nil {\n\t\treturn err\n\t}\n\tid, err := result.LastInsertId()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update user balance, total_income, referer_total_income\n\tif err := incrementUserBalance(tx, income.UserID, income.Income, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update referer balance\n\tif _, err := incrementRefererBalance(tx, income.RefererID, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ insert superrewards offer\n\toffer := models.SuperrewardsOffer{\n\t\tIncomeID: id,\n\t\tUserID: income.UserID,\n\t\tTransactionID: transactionID,\n\t\tOfferID: offerID,\n\t\tAmount: income.Income,\n\t}\n\t_, err = tx.NamedExec(\"INSERT INTO `superrewards` (`income_id`, `user_id`, `transaction_id`, `offer_id`, `amount`) VALUE (:income_id, :user_id, :transaction_id, :offer_id, :amount)\", offer)\n\treturn err\n}\n<commit_msg>Fix typo<commit_after>package mysql\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/solefaucet\/sole-server\/errors\"\n\t\"github.com\/solefaucet\/sole-server\/models\"\n)\n\n\/\/ GetRewardIncomes get user's reward incomes\nfunc (s Storage) GetRewardIncomes(userID int64, limit, offset int64) ([]models.Income, error) {\n\trawSQL := \"SELECT * FROM incomes WHERE `user_id` = ? AND `type` = ? ORDER BY `id` DESC LIMIT ? OFFSET ?\"\n\targs := []interface{}{userID, models.IncomeTypeReward, limit, offset}\n\tincomes := []models.Income{}\n\terr := s.selects(&incomes, rawSQL, args...)\n\treturn incomes, err\n}\n\n\/\/ GetNumberOfRewardIncomes gets number of user's reward incomes\nfunc (s Storage) GetNumberOfRewardIncomes(userID int64) (int64, error) {\n\tvar count int64\n\terr := s.db.QueryRowx(\"SELECT COUNT(*) FROM incomes WHERE `user_id` = ? AND `type` = ?\", userID, models.IncomeTypeReward).Scan(&count)\n\treturn count, err\n}\n\n\/\/ CreateRewardIncome creates a new reward type income\nfunc (s Storage) CreateRewardIncome(income models.Income, now time.Time) error {\n\ttx := s.db.MustBegin()\n\n\tif err := createRewardIncomeWithTx(tx, income, now); err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\t\/\/ commit\n\tif err := tx.Commit(); err != nil {\n\t\treturn fmt.Errorf(\"create reward income commit transaction error: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc createRewardIncomeWithTx(tx *sqlx.Tx, income models.Income, now time.Time) error {\n\ttotalReward := income.Income\n\n\t\/\/ insert income into incomes table\n\tif _, err := addIncome(tx, income); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update user balance, total_income, referer_total_income\n\tif err := incrementUserBalance(tx, income.UserID, income.Income, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update user rewarded_at\n\tif _, err := tx.Exec(\"UPDATE users SET `rewarded_at` = ? WHERE `id` = ?\", now, income.UserID); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update referer balance\n\tif rowAffected, err := incrementRefererBalance(tx, income.RefererID, income.RefererIncome); err != nil {\n\t\treturn err\n\t} else if rowAffected == 1 {\n\t\ttotalReward += income.RefererIncome\n\t}\n\n\t\/\/ update total reward\n\tif err := incrementTotalReward(tx, totalReward, now); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ insert reward income into incomes table\nfunc addIncome(tx *sqlx.Tx, income models.Income) (sql.Result, error) {\n\tresult, err := tx.NamedExec(\"INSERT INTO incomes (`user_id`, `referer_id`, `type`, `income`, `referer_income`) VALUES (:user_id, :referer_id, :type, :income, :referer_income)\", income)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"add income error: %v\", err)\n\t}\n\n\treturn result, nil\n}\n\n\/\/ increment user balance, total_income, referer_total_income\nfunc incrementUserBalance(tx *sqlx.Tx, userID int64, delta, refererDelta float64) error {\n\trawSQL := \"UPDATE users SET `balance` = `balance` + ?, `total_income` = `total_income` + ?, `referer_total_income` = `referer_total_income` + ? WHERE id = ?\"\n\targs := []interface{}{delta, delta, refererDelta, userID}\n\tif result, err := tx.Exec(rawSQL, args...); err != nil {\n\t\treturn fmt.Errorf(\"increment user balance error: %v\", err)\n\t} else if rowAffected, _ := result.RowsAffected(); rowAffected != 1 {\n\t\treturn fmt.Errorf(\"increment user balance affected %v rows\", rowAffected)\n\t}\n\n\treturn nil\n}\n\n\/\/ increment referer balance\nfunc incrementRefererBalance(tx *sqlx.Tx, refererID int64, delta float64) (int64, error) {\n\tresult, err := tx.NamedExec(\"UPDATE users SET `balance` = `balance` + :delta, `total_income_from_referees` = `total_income_from_referees` + :delta WHERE id = :id\", map[string]interface{}{\n\t\t\"id\": refererID,\n\t\t\"delta\": delta,\n\t})\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"increment referer balance error: %v\", err)\n\t}\n\n\trowAffected, _ := result.RowsAffected()\n\treturn rowAffected, nil\n}\n\n\/\/ increment total reward\nfunc incrementTotalReward(tx *sqlx.Tx, totalReward float64, now time.Time) error {\n\tsql := \"INSERT INTO total_rewards (`total`, `created_at`) VALUES (:delta, :created_at) ON DUPLICATE KEY UPDATE `total` = `total` + :delta\"\n\targs := map[string]interface{}{\n\t\t\"delta\": totalReward,\n\t\t\"created_at\": now,\n\t}\n\n\tif _, err := tx.NamedExec(sql, args); err != nil {\n\t\treturn fmt.Errorf(\"increment total reward error: %v\", err)\n\t}\n\n\treturn nil\n}\n\n\/\/ GetOfferwowEventByID finds offerwow event by event id\nfunc (s Storage) GetOfferwowEventByID(eventID string) (models.OfferwowEvent, error) {\n\tevent := models.OfferwowEvent{}\n\terr := s.db.Get(&event, \"SELECT * FROM `offerwow` WHERE `event_id` = ?\", eventID)\n\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn event, errors.ErrNotFound\n\t\t}\n\n\t\treturn event, fmt.Errorf(\"query offerwow event by id error: %v\", err)\n\t}\n\n\treturn event, nil\n}\n\n\/\/ CreateOfferwowIncome creates a new offerwow type income\nfunc (s Storage) CreateOfferwowIncome(income models.Income, eventID string) error {\n\ttx := s.db.MustBegin()\n\n\tif err := createOfferwowIncomeWithTx(tx, income, eventID); err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\t\/\/ commit\n\tif err := tx.Commit(); err != nil {\n\t\treturn fmt.Errorf(\"create offerwow income commit transaction error: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc createOfferwowIncomeWithTx(tx *sqlx.Tx, income models.Income, eventID string) error {\n\t\/\/ insert offerwow income into incomes table\n\tresult, err := addIncome(tx, income)\n\tif err != nil {\n\t\treturn err\n\t}\n\tid, err := result.LastInsertId()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update user balance, total_income, referer_total_income\n\tif err := incrementUserBalance(tx, income.UserID, income.Income, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update referer balance\n\tif _, err := incrementRefererBalance(tx, income.RefererID, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ insert offerwow event\n\tofferwowEvent := models.OfferwowEvent{\n\t\tEventID: eventID,\n\t\tIncomeID: id,\n\t\tAmount: income.Income,\n\t}\n\t_, err = tx.NamedExec(\"INSERT INTO `offerwow` (`event_id`, `income_id`, `amount`) VALUE (:event_id, :income_id, :amount)\", offerwowEvent)\n\treturn err\n}\n\n\/\/ GetSuperrewardsOfferByID finds superrewards offer by transaction Id and user id\nfunc (s Storage) GetSuperrewardsOfferByID(transactionID string, userID int64) (models.SuperrewardsOffer, error) {\n\toffer := models.SuperrewardsOffer{}\n\terr := s.db.Get(&offer, \"SELECT * FROM `superrewards` WHERE `transaction_id` = ? AND `user_id` = ?\", transactionID, userID)\n\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn offer, errors.ErrNotFound\n\t\t}\n\n\t\treturn offer, fmt.Errorf(\"query superrewards offer by id error: %v\", err)\n\t}\n\n\treturn offer, nil\n}\n\n\/\/ CreateSuperrewardsIncome creates a new superrewards type income\nfunc (s Storage) CreateSuperrewardsIncome(income models.Income, transactionID, offerID string) error {\n\ttx := s.db.MustBegin()\n\n\tif err := createSuperrewardsIncomeWithTx(tx, income, transactionID, offerID); err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\t\/\/ commit\n\tif err := tx.Commit(); err != nil {\n\t\treturn fmt.Errorf(\"create superrewards income commit transaction error: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc createSuperrewardsIncomeWithTx(tx *sqlx.Tx, income models.Income, transactionID, offerID string) error {\n\t\/\/ insert superrewards income into incomes table\n\tresult, err := addIncome(tx, income)\n\tif err != nil {\n\t\treturn err\n\t}\n\tid, err := result.LastInsertId()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update user balance, total_income, referer_total_income\n\tif err := incrementUserBalance(tx, income.UserID, income.Income, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ update referer balance\n\tif _, err := incrementRefererBalance(tx, income.RefererID, income.RefererIncome); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ insert superrewards offer\n\toffer := models.SuperrewardsOffer{\n\t\tIncomeID: id,\n\t\tUserID: income.UserID,\n\t\tTransactionID: transactionID,\n\t\tOfferID: offerID,\n\t\tAmount: income.Income,\n\t}\n\t_, err = tx.NamedExec(\"INSERT INTO `superrewards` (`income_id`, `user_id`, `transaction_id`, `offer_id`, `amount`) VALUE (:income_id, :user_id, :transaction_id, :offer_id, :amount)\", offer)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package tags manages the tags in a struct.\npackage tags\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com\/xgfone\/go-tools\/log2\"\n)\n\n\/\/ Tag represents a tag of a field in a struct.\ntype Tag struct {\n\t\/\/ The field name which the tag belongs to.\n\tField string\n\n\t\/\/ The name of the tag.\n\tName string\n\n\t\/\/ The value of the tag which defined in Field.\n\tValue string\n}\n\n\/\/ Tags is a struct to manage the tags of a struct.\ntype Tags struct {\n\tdebug bool\n\tname string\n\tcaches []*Tag\n\n\tf2t map[string]map[string]string\n\tt2f map[string]map[string]string\n}\n\n\/\/ New returns a new Tag to manage the tags in a certain struct.\n\/\/\n\/\/ v is a struct variable or a pointer to a struct.\n\/\/\n\/\/ If passing the second argument and it's true, it will enable the debug mode,\n\/\/ which will output the procedure building the tags.\nfunc New(v interface{}, debug ...bool) *Tags {\n\ttyp := reflect.TypeOf(v)\n\tif typ.Kind() == reflect.Ptr {\n\t\ttyp = typ.Elem()\n\t}\n\tif typ.Kind() != reflect.Struct {\n\t\treturn nil\n\t}\n\n\tsname := typ.Name()\n\ttags := newTag(sname)\n\tnf := typ.NumField()\n\tvar tag *Tag\n\tfor i := 0; i < nf; i++ {\n\t\tfield := typ.Field(i)\n\t\tfname := field.Name\n\n\t\ttags.f2t[fname] = make(map[string]string)\n\t\tfor k, v := range GetAllTags(field.Tag) {\n\t\t\ttags.debugf(\"Struct=%s, Field=%s, TagName=%s, TagValue=%s\",\n\t\t\t\tsname, fname, k, v)\n\n\t\t\ttag = &Tag{Field: fname, Name: k, Value: v}\n\t\t\ttags.caches = append(tags.caches, tag)\n\t\t\ttags.f2t[fname][k] = v\n\n\t\t\tif tf, ok := tags.t2f[k]; ok {\n\t\t\t\ttf[fname] = v\n\t\t\t} else {\n\t\t\t\ttags.t2f[k] = map[string]string{\n\t\t\t\t\tfname: v,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tags\n}\n\nfunc newTag(name string) *Tags {\n\treturn &Tags{\n\t\tname: name,\n\t\tcaches: make([]*Tag, 0),\n\n\t\tf2t: make(map[string]map[string]string),\n\t\tt2f: make(map[string]map[string]string),\n\t}\n}\n\nfunc (t Tags) debugf(format string, args ...interface{}) {\n\tif t.debug {\n\t\tlog2.DebugF(format, args...)\n\t}\n}\n\n\/\/ Name returns the name of Tag, which is the name of the struct.\nfunc (t Tags) Name() string {\n\treturn t.name\n}\n\n\/\/ GetAllValuesByTag returns the values of the corresponding tag.\n\/\/\n\/\/ Notice: There are more than one fields that has this tag.\nfunc (t Tags) GetAllValuesByTag(tag string) []string {\n\tif fv := t.t2f[tag]; fv != nil {\n\t\t_len := len(fv)\n\t\ti := 0\n\t\tvs := make([]string, _len)\n\t\tfor _, v := range fv {\n\t\t\tvs[i] = v\n\t\t\ti++\n\t\t}\n\t\treturn vs\n\t}\n\treturn nil\n}\n\n\/\/ GetAllFieldsAndValuesByTag is the same as GetValuesByTags(),\n\/\/ but also return the field name as the key.\nfunc (t Tags) GetAllFieldsAndValuesByTag(tag string) map[string]string {\n\tif v, ok := t.t2f[tag]; ok {\n\t\treturn v\n\t}\n\treturn nil\n}\n\n\/\/ GetValueByFieldAndTag returns the value of the tag in a specified field.\n\/\/ Return an empty string if the field doesn't have the tag.\nfunc (t Tags) GetValueByFieldAndTag(field, tag string) string {\n\tif tv, ok := t.f2t[field]; ok {\n\t\tfor t, v := range tv {\n\t\t\tif tag == t {\n\t\t\t\treturn v\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\"\n}\n\n\/\/ GetAllFieldsByTag returns the names of all the field where defined this tag.\n\/\/ Return nil if no field defines the tag.\nfunc (t Tags) GetAllFieldsByTag(tag string) []string {\n\tif fv, ok := t.t2f[tag]; ok {\n\t\t_len := len(fv)\n\t\ti := 0\n\t\tfs := make([]string, _len)\n\t\tfor f := range fv {\n\t\t\tfs[i] = f\n\t\t\ti++\n\t\t}\n\t\treturn fs\n\t}\n\n\treturn nil\n}\n\n\/\/ GetAllTagsAndValuesByField returns all the tags of the field.\n\/\/ Return nil if the field has no tags.\nfunc (t Tags) GetAllTagsAndValuesByField(field string) map[string]string {\n\tif v, ok := t.f2t[field]; ok {\n\t\treturn v\n\t}\n\treturn nil\n}\n\n\/\/ GetAll returns all the information parsed by the tag manager.\n\/\/ Return nil if no tag is parsed. It's almost used to debug or traverse the\n\/\/ tags in all the fields.\n\/\/\n\/\/ The returned list is sorted on the basis of the order of the field which is\n\/\/ defined in the struct.\nfunc (t Tags) GetAll() []*Tag {\n\treturn t.caches\n}\n\n\/\/ Audit the result that the manager parses the tags upon the struct.\n\/\/ It's almost used to debug. If having a question about the built result, you\n\/\/ can use it and print the returned value to check.\n\/\/\n\/\/ For the example above, the returned format is like:\n\/\/ \tName: TagTest\n\/\/ \tFieldName=field1, TagName=tag1, TagValue=value1\n\/\/ \tFieldName=field2, TagName=tag2, TagValue=value2\n\/\/ \tFieldName=field3, TagName=tag3, TagValue=value3\n\/\/ \t......\nfunc (t Tags) Audit() string {\n\tbuf := bytes.NewBufferString(fmt.Sprintf(\"Name: %v\\n\", t.name))\n\n\tfor _, tag := range t.caches {\n\t\tbuf.WriteString(fmt.Sprintf(\"FieldName=%s, TagName=%s, TagValue=%s\\n\",\n\t\t\ttag.Field, tag.Name, tag.Value))\n\t}\n\n\treturn buf.String()\n}\n\n\/\/ TravelByTag travels the information of the tag.\n\/\/\n\/\/ The type of the trvaeling function is func(string, string), which needs two\n\/\/ arguments and no return value. The first argument is the name of the field\n\/\/ where defined the tag, and the second is the value of the tag.\nfunc (t Tags) TravelByTag(tag string, f func(string, string)) {\n\tif fs, ok := t.t2f[tag]; ok {\n\t\tfor field, value := range fs {\n\t\t\tf(field, value)\n\t\t}\n\t}\n}\n\n\/\/ TravelByField travels the information of the field.\n\/\/\n\/\/ The type of the trvaeling function is func(string, string), which needs two\n\/\/ arguments and no return value. The first argument is the name of the tag,\n\/\/ and the second is the value of the tag.\nfunc (t Tags) TravelByField(field string, f func(string, string)) {\n\tif ts, ok := t.f2t[field]; ok {\n\t\tfor tag, value := range ts {\n\t\t\tf(tag, value)\n\t\t}\n\t}\n}\n<commit_msg>Use Tag instead of *Tag<commit_after>\/\/ Package tags manages the tags in a struct.\npackage tags\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"reflect\"\n)\n\n\/\/ Tag represents a tag of a field in a struct.\ntype Tag struct {\n\t\/\/ The field name which the tag belongs to.\n\tField string\n\n\t\/\/ The name of the tag.\n\tName string\n\n\t\/\/ The value of the tag which defined in Field.\n\tValue string\n}\n\n\/\/ Tags is a struct to manage the tags of a struct.\ntype Tags struct {\n\tdebug bool\n\tname string\n\tcaches []Tag\n\n\tf2t map[string]map[string]string\n\tt2f map[string]map[string]string\n}\n\n\/\/ New returns a new Tag to manage the tags in a certain struct.\n\/\/\n\/\/ v is a struct variable or a pointer to a struct.\n\/\/\n\/\/ If passing the second argument and it's true, it will enable the debug mode,\n\/\/ which will output the procedure building the tags.\nfunc New(v interface{}, debug ...bool) *Tags {\n\ttyp := reflect.TypeOf(v)\n\tif typ.Kind() == reflect.Ptr {\n\t\ttyp = typ.Elem()\n\t}\n\tif typ.Kind() != reflect.Struct {\n\t\treturn nil\n\t}\n\n\tsname := typ.Name()\n\ttags := newTag(sname)\n\tnf := typ.NumField()\n\tvar tag Tag\n\tfor i := 0; i < nf; i++ {\n\t\tfield := typ.Field(i)\n\t\tfname := field.Name\n\n\t\ttags.f2t[fname] = make(map[string]string)\n\t\tfor k, v := range GetAllTags(field.Tag) {\n\t\t\ttags.debugf(\"Struct=%s, Field=%s, TagName=%s, TagValue=%s\",\n\t\t\t\tsname, fname, k, v)\n\n\t\t\ttag = Tag{Field: fname, Name: k, Value: v}\n\t\t\ttags.caches = append(tags.caches, tag)\n\t\t\ttags.f2t[fname][k] = v\n\n\t\t\tif tf, ok := tags.t2f[k]; ok {\n\t\t\t\ttf[fname] = v\n\t\t\t} else {\n\t\t\t\ttags.t2f[k] = map[string]string{\n\t\t\t\t\tfname: v,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tags\n}\n\nfunc newTag(name string) *Tags {\n\treturn &Tags{\n\t\tname: name,\n\t\tcaches: make([]Tag, 0),\n\n\t\tf2t: make(map[string]map[string]string),\n\t\tt2f: make(map[string]map[string]string),\n\t}\n}\n\nfunc (t Tags) debugf(format string, args ...interface{}) {\n\tif t.debug {\n\t\tfmt.Printf(format, args...)\n\t}\n}\n\n\/\/ Name returns the name of Tag, which is the name of the struct.\nfunc (t Tags) Name() string {\n\treturn t.name\n}\n\n\/\/ GetAllValuesByTag returns the values of the corresponding tag.\n\/\/\n\/\/ Notice: There are more than one fields that has this tag.\nfunc (t Tags) GetAllValuesByTag(tag string) []string {\n\tif fv := t.t2f[tag]; fv != nil {\n\t\t_len := len(fv)\n\t\ti := 0\n\t\tvs := make([]string, _len)\n\t\tfor _, v := range fv {\n\t\t\tvs[i] = v\n\t\t\ti++\n\t\t}\n\t\treturn vs\n\t}\n\treturn nil\n}\n\n\/\/ GetAllFieldsAndValuesByTag is the same as GetValuesByTags(),\n\/\/ but also return the field name as the key.\nfunc (t Tags) GetAllFieldsAndValuesByTag(tag string) map[string]string {\n\tif v, ok := t.t2f[tag]; ok {\n\t\treturn v\n\t}\n\treturn nil\n}\n\n\/\/ GetValueByFieldAndTag returns the value of the tag in a specified field.\n\/\/ Return an empty string if the field doesn't have the tag.\nfunc (t Tags) GetValueByFieldAndTag(field, tag string) string {\n\tif tv, ok := t.f2t[field]; ok {\n\t\tfor t, v := range tv {\n\t\t\tif tag == t {\n\t\t\t\treturn v\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\"\n}\n\n\/\/ GetAllFieldsByTag returns the names of all the field where defined this tag.\n\/\/ Return nil if no field defines the tag.\nfunc (t Tags) GetAllFieldsByTag(tag string) []string {\n\tif fv, ok := t.t2f[tag]; ok {\n\t\t_len := len(fv)\n\t\ti := 0\n\t\tfs := make([]string, _len)\n\t\tfor f := range fv {\n\t\t\tfs[i] = f\n\t\t\ti++\n\t\t}\n\t\treturn fs\n\t}\n\n\treturn nil\n}\n\n\/\/ GetAllTagsAndValuesByField returns all the tags of the field.\n\/\/ Return nil if the field has no tags.\nfunc (t Tags) GetAllTagsAndValuesByField(field string) map[string]string {\n\tif v, ok := t.f2t[field]; ok {\n\t\treturn v\n\t}\n\treturn nil\n}\n\n\/\/ GetAll returns all the information parsed by the tag manager.\n\/\/ Return nil if no tag is parsed. It's almost used to debug or traverse the\n\/\/ tags in all the fields.\n\/\/\n\/\/ The returned list is sorted on the basis of the order of the field which is\n\/\/ defined in the struct.\nfunc (t Tags) GetAll() []Tag {\n\treturn t.caches\n}\n\n\/\/ Audit the result that the manager parses the tags upon the struct.\n\/\/ It's almost used to debug. If having a question about the built result, you\n\/\/ can use it and print the returned value to check.\n\/\/\n\/\/ For the example above, the returned format is like:\n\/\/ \tName: TagTest\n\/\/ \tFieldName=field1, TagName=tag1, TagValue=value1\n\/\/ \tFieldName=field2, TagName=tag2, TagValue=value2\n\/\/ \tFieldName=field3, TagName=tag3, TagValue=value3\n\/\/ \t......\nfunc (t Tags) Audit() string {\n\tbuf := bytes.NewBufferString(fmt.Sprintf(\"Name: %v\\n\", t.name))\n\n\tfor _, tag := range t.caches {\n\t\tbuf.WriteString(fmt.Sprintf(\"FieldName=%s, TagName=%s, TagValue=%s\\n\",\n\t\t\ttag.Field, tag.Name, tag.Value))\n\t}\n\n\treturn buf.String()\n}\n\n\/\/ TravelByTag travels the information of the tag.\n\/\/\n\/\/ The type of the trvaeling function is func(string, string), which needs two\n\/\/ arguments and no return value. The first argument is the name of the field\n\/\/ where defined the tag, and the second is the value of the tag.\nfunc (t Tags) TravelByTag(tag string, f func(string, string)) {\n\tif fs, ok := t.t2f[tag]; ok {\n\t\tfor field, value := range fs {\n\t\t\tf(field, value)\n\t\t}\n\t}\n}\n\n\/\/ TravelByField travels the information of the field.\n\/\/\n\/\/ The type of the trvaeling function is func(string, string), which needs two\n\/\/ arguments and no return value. The first argument is the name of the tag,\n\/\/ and the second is the value of the tag.\nfunc (t Tags) TravelByField(field string, f func(string, string)) {\n\tif ts, ok := t.f2t[field]; ok {\n\t\tfor tag, value := range ts {\n\t\t\tf(tag, value)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/bmizerany\/pat\"\n\t\"github.com\/globocom\/config\"\n\t\"github.com\/globocom\/gandalf\/api\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\/exec\"\n)\n\nfunc startGitDaemon() error {\n\tbLocation, err := config.GetString(\"git:bare:location\")\n\tif err != nil {\n\t\treturn err\n\t}\n\targs := []string{\"daemon\", fmt.Sprintf(\"--base-path=%s\", bLocation), \"--syslog\"}\n\tif exportAll, err := config.GetBool(\"git:daemon:export-all\"); err == nil && exportAll {\n\t\targs = append(args, \"--export-all\")\n\t}\n\treturn exec.Command(\"git\", args...).Run()\n}\n\nfunc main() {\n\tdry := flag.Bool(\"dry\", false, \"dry-run: does not start the server and git daemon (for testing purpose)\")\n\tconfigFile := flag.String(\"config\", \"\/etc\/gandalf.conf\", \"Gandalf configuration file\")\n\tflag.Parse()\n\n\terr := config.ReadConfigFile(*configFile)\n\tif err != nil {\n\t\tmsg := `Could not find gandalf config file. Searched on %s.\nFor an example conf check gandalf\/etc\/gandalf.conf file.`\n\t\tlog.Panicf(msg, *configFile)\n\t}\n\trouter := pat.New()\n\trouter.Post(\"\/user\", http.HandlerFunc(api.NewUser))\n\trouter.Del(\"\/user\/:name\", http.HandlerFunc(api.RemoveUser))\n\trouter.Post(\"\/repository\", http.HandlerFunc(api.NewRepository))\n\trouter.Del(\"\/repository\/:name\", http.HandlerFunc(api.RemoveRepository))\n\n\tport, err := config.GetString(\"webserver:port\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif !*dry {\n\t\tstartGitDaemon()\n\t\tlog.Fatal(http.ListenAndServe(port, router))\n\t}\n}\n<commit_msg>webserver: routing handlers that had no route<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/bmizerany\/pat\"\n\t\"github.com\/globocom\/config\"\n\t\"github.com\/globocom\/gandalf\/api\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\/exec\"\n)\n\nfunc startGitDaemon() error {\n\tbLocation, err := config.GetString(\"git:bare:location\")\n\tif err != nil {\n\t\treturn err\n\t}\n\targs := []string{\"daemon\", fmt.Sprintf(\"--base-path=%s\", bLocation), \"--syslog\"}\n\tif exportAll, err := config.GetBool(\"git:daemon:export-all\"); err == nil && exportAll {\n\t\targs = append(args, \"--export-all\")\n\t}\n\treturn exec.Command(\"git\", args...).Run()\n}\n\nfunc main() {\n\tdry := flag.Bool(\"dry\", false, \"dry-run: does not start the server and git daemon (for testing purpose)\")\n\tconfigFile := flag.String(\"config\", \"\/etc\/gandalf.conf\", \"Gandalf configuration file\")\n\tflag.Parse()\n\n\terr := config.ReadConfigFile(*configFile)\n\tif err != nil {\n\t\tmsg := `Could not find gandalf config file. Searched on %s.\nFor an example conf check gandalf\/etc\/gandalf.conf file.`\n\t\tlog.Panicf(msg, *configFile)\n\t}\n\trouter := pat.New()\n\trouter.Post(\"\/user\/:name\/key\", http.HandlerFunc(api.AddKey))\n\trouter.Del(\"\/user\/:name\/key\/:keyname\", http.HandlerFunc(api.RemoveKey))\n\trouter.Post(\"\/user\", http.HandlerFunc(api.NewUser))\n\trouter.Del(\"\/user\/:name\", http.HandlerFunc(api.RemoveUser))\n\trouter.Post(\"\/repository\", http.HandlerFunc(api.NewRepository))\n\trouter.Get(\"\/repository\/:name\/grant\/:username\", http.HandlerFunc(api.GrantAccess))\n\trouter.Del(\"\/repository\/:name\/revoke\/:username\", http.HandlerFunc(api.RevokeAccess))\n\trouter.Del(\"\/repository\/:name\", http.HandlerFunc(api.RemoveRepository))\n\n\tport, err := config.GetString(\"webserver:port\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif !*dry {\n\t\tstartGitDaemon()\n\t\tlog.Fatal(http.ListenAndServe(port, router))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"code.google.com\/p\/go.crypto\/ssh\"\n\t\"github.com\/flynn\/go-shlex\"\n\t\"github.com\/kr\/pty\"\n)\n\nvar host = flag.String(\"h\", \"\", \"host ip to listen on\")\nvar port = flag.String(\"p\", \"22\", \"port to listen on\")\nvar debug = flag.Bool(\"d\", false, \"debug mode displays handler output\")\nvar env = flag.Bool(\"e\", false, \"pass environment to handlers\")\nvar shell = flag.Bool(\"s\", false, \"run exec handler via SHELL\")\nvar keys = flag.String(\"k\", \"\", \"pem file of private keys (read from SSH_PRIVATE_KEYS by default)\")\n\nvar ErrUnauthorized = errors.New(\"execd: user is unauthorized\")\n\ntype exitStatusMsg struct {\n\tStatus uint32\n}\n\nfunc exitStatus(err error) (exitStatusMsg, error) {\n\tif err != nil {\n\t\tif exiterr, ok := err.(*exec.ExitError); ok {\n\t\t\t\/\/ There is no platform independent way to retrieve\n\t\t\t\/\/ the exit code, but the following will work on Unix\n\t\t\tif status, ok := exiterr.Sys().(syscall.WaitStatus); ok {\n\t\t\t\treturn exitStatusMsg{uint32(status.ExitStatus())}, nil\n\t\t\t}\n\t\t}\n\t\treturn exitStatusMsg{0}, err\n\t}\n\treturn exitStatusMsg{0}, nil\n}\n\nfunc attachCmd(cmd *exec.Cmd, stdout io.Writer, stderr io.Writer, stdin io.Reader) (*sync.WaitGroup, error) {\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\tif stdin != nil {\n\t\tstdinIn, err := cmd.StdinPipe()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tgo func() {\n\t\t\tio.Copy(stdinIn, stdin)\n\t\t\tstdinIn.Close()\n\t\t\t\/\/ FIXME: Do we care that this is not part of the WaitGroup?\n\t\t}()\n\t}\n\n\tstdoutOut, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgo func() {\n\t\tio.Copy(stdout, stdoutOut)\n\t\twg.Done()\n\t}()\n\n\tstderrOut, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgo func() {\n\t\tio.Copy(stderr, stderrOut)\n\t\twg.Done()\n\t}()\n\n\treturn &wg, nil\n}\n\nfunc attachShell(cmd *exec.Cmd, stdout io.Writer, stdin io.Reader) (*os.File, *sync.WaitGroup, error) {\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\t\/\/ Note that pty merges stdout and stderr.\n\tcmdPty, err := pty.Start(cmd)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tgo func() {\n\t\tio.Copy(stdout, cmdPty)\n\t\twg.Done()\n\t}()\n\tgo func() {\n\t\tio.Copy(cmdPty, stdin)\n\t\twg.Done()\n\t}()\n\n\treturn cmdPty, &wg, nil\n}\n\nfunc addKey(conf *ssh.ServerConfig, block *pem.Block) (err error) {\n\tvar key interface{}\n\tswitch block.Type {\n\tcase \"RSA PRIVATE KEY\":\n\t\tkey, err = x509.ParsePKCS1PrivateKey(block.Bytes)\n\tcase \"EC PRIVATE KEY\":\n\t\tkey, err = x509.ParseECPrivateKey(block.Bytes)\n\tcase \"DSA PRIVATE KEY\":\n\t\tkey, err = ssh.ParseDSAPrivateKey(block.Bytes)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported key type %q\", block.Type)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tsigner, err := ssh.NewSignerFromKey(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\tconf.AddHostKey(signer)\n\treturn nil\n}\n\nfunc parseKeys(conf *ssh.ServerConfig, pemData []byte) error {\n\tvar found bool\n\tfor {\n\t\tvar block *pem.Block\n\t\tblock, pemData = pem.Decode(pemData)\n\t\tif block == nil {\n\t\t\tif !found {\n\t\t\t\treturn errors.New(\"no private keys found\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tif err := addKey(conf, block); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfound = true\n\t}\n}\n\nfunc handleAuth(handler []string, conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {\n\tkeydata := string(bytes.TrimSpace(ssh.MarshalAuthorizedKey(key)))\n\tcmd := exec.Command(handler[0], append(handler[1:], conn.User(), keydata)...)\n\tvar output bytes.Buffer\n\tdone, err := attachCmd(cmd, &output, &output, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = cmd.Run()\n\tdone.Wait()\n\tstatus, err := exitStatus(err)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif status.Status == 0 {\n\t\treturn &ssh.Permissions{\n\t\t\tExtensions: map[string]string{\n\t\t\t\t\"environ\": strings.Trim(output.String(), \"\\n\"),\n\t\t\t\t\"user\": conn.User(),\n\t\t\t},\n\t\t}, nil\n\t} else {\n\t\tlog.Println(\"auth-handler status:\", status.Status)\n\t}\n\treturn nil, ErrUnauthorized\n}\n\nfunc init() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %v [options] <auth-handler> <exec-handler>\\n\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tif flag.NArg() < 2 {\n\t\tflag.Usage()\n\t\tos.Exit(64)\n\t}\n\n\texecHandler, err := shlex.Split(flag.Arg(1))\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to parse receiver command:\", err)\n\t}\n\texecHandler[0], err = filepath.Abs(execHandler[0])\n\tif err != nil {\n\t\tlog.Fatalln(\"Invalid receiver path:\", err)\n\t}\n\n\tauthHandler, err := shlex.Split(flag.Arg(0))\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to parse authchecker command:\", err)\n\t}\n\tauthHandler[0], err = filepath.Abs(authHandler[0])\n\tif err != nil {\n\t\tlog.Fatalln(\"Invalid authchecker path:\", err)\n\t}\n\tconfig := &ssh.ServerConfig{\n\t\tPublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {\n\t\t\treturn handleAuth(authHandler, conn, key)\n\t\t},\n\t}\n\n\tif keyEnv := os.Getenv(\"SSH_PRIVATE_KEYS\"); keyEnv != \"\" {\n\t\tif err := parseKeys(config, []byte(keyEnv)); err != nil {\n\t\t\tlog.Fatalln(\"Failed to parse private keys:\", err)\n\t\t}\n\t} else {\n\t\tpemBytes, err := ioutil.ReadFile(*keys)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Failed to load private keys:\", err)\n\t\t}\n\t\tif err := parseKeys(config, pemBytes); err != nil {\n\t\t\tlog.Fatalln(\"Failed to parse private keys:\", err)\n\t\t}\n\t}\n\n\tif p := os.Getenv(\"PORT\"); p != \"\" && *port == \"22\" {\n\t\t*port = p\n\t}\n\tlistener, err := net.Listen(\"tcp\", *host+\":\"+*port)\n\tif err != nil {\n\t\tlog.Fatalln(\"Failed to listen for connections:\", err)\n\t}\n\tfor {\n\t\t\/\/ SSH connections just house multiplexed connections\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to accept incoming connection:\", err)\n\t\t\tcontinue\n\t\t}\n\t\tgo handleConn(conn, config, execHandler)\n\t}\n}\n\nfunc handleConn(conn net.Conn, conf *ssh.ServerConfig, execHandler []string) {\n\tdefer conn.Close()\n\tsshConn, chans, reqs, err := ssh.NewServerConn(conn, conf)\n\tif err != nil {\n\t\tlog.Println(\"Failed to handshake:\", err)\n\t\treturn\n\t}\n\n\tgo ssh.DiscardRequests(reqs)\n\n\tfor ch := range chans {\n\t\tif ch.ChannelType() != \"session\" {\n\t\t\tch.Reject(ssh.UnknownChannelType, \"unknown channel type\")\n\t\t\tcontinue\n\t\t}\n\t\tgo handleChannel(sshConn, ch, execHandler)\n\t}\n}\n\nfunc handleChannel(conn *ssh.ServerConn, newChan ssh.NewChannel, execHandler []string) {\n\tch, reqs, err := newChan.Accept()\n\tif err != nil {\n\t\tlog.Println(\"newChan.Accept failed:\", err)\n\t\treturn\n\t}\n\n\tassert := func(at string, err error) bool {\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s failed: %s\", at, err)\n\t\t\tch.Stderr().Write([]byte(\"Internal error.\\n\"))\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tvar stdout, stderr io.Writer\n\tif *debug {\n\t\tstdout = io.MultiWriter(ch, os.Stdout)\n\t\tstderr = io.MultiWriter(ch.Stderr(), os.Stdout)\n\t} else {\n\t\tstdout = ch\n\t\tstderr = ch.Stderr()\n\t}\n\n\tvar ptyShell *os.File\n\n\tfor req := range reqs {\n\t\tswitch req.Type {\n\t\tcase \"exec\":\n\t\t\tdefer ch.Close()\n\n\t\t\tif req.WantReply {\n\t\t\t\treq.Reply(true, nil)\n\t\t\t}\n\n\t\t\tcmdline := string(req.Payload[4:])\n\t\t\tvar cmd *exec.Cmd\n\t\t\tif *shell {\n\t\t\t\tshellcmd := flag.Arg(1) + \" \" + cmdline\n\t\t\t\tcmd = exec.Command(os.Getenv(\"SHELL\"), \"-c\", shellcmd)\n\t\t\t} else {\n\t\t\t\tcmdargs, err := shlex.Split(cmdline)\n\t\t\t\tif assert(\"shlex.Split\", err) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcmd = exec.Command(execHandler[0], append(execHandler[1:], cmdargs...)...)\n\t\t\t}\n\t\t\tif *env {\n\t\t\t\tcmd.Env = os.Environ()\n\t\t\t} else {\n\t\t\t\tcmd.Env = []string{}\n\t\t\t}\n\t\t\tif conn.Permissions != nil {\n\t\t\t\t\/\/ Using Permissions.Extensions as a way to get state from PublicKeyCallback\n\t\t\t\tif conn.Permissions.Extensions[\"environ\"] != \"\" {\n\t\t\t\t\tcmd.Env = append(cmd.Env, strings.Split(conn.Permissions.Extensions[\"environ\"], \"\\n\")...)\n\t\t\t\t}\n\t\t\t\tcmd.Env = append(cmd.Env, \"USER=\"+conn.Permissions.Extensions[\"user\"])\n\t\t\t}\n\t\t\tcmd.Env = append(cmd.Env, \"SSH_ORIGINAL_COMMAND=\"+cmdline)\n\t\t\tdone, err := attachCmd(cmd, stdout, stderr, ch)\n\t\t\tif assert(\"attachCmd\", err) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif assert(\"cmd.Start\", cmd.Start()) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdone.Wait()\n\t\t\tstatus, err := exitStatus(cmd.Wait())\n\t\t\tif assert(\"exitStatus\", err) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t_, err = ch.SendRequest(\"exit-status\", false, ssh.Marshal(&status))\n\t\t\tassert(\"sendExit\", err)\n\t\t\treturn\n\t\tcase \"pty-req\":\n\t\t\twidth, height, okSize := parsePtyRequest(req.Payload)\n\n\t\t\tvar cmd *exec.Cmd\n\t\t\tif *shell {\n\t\t\t\tcmd = exec.Command(os.Getenv(\"SHELL\"))\n\t\t\t} else {\n\t\t\t\tcmd = exec.Command(execHandler[0], execHandler[1:]...)\n\t\t\t}\n\t\t\tif *env {\n\t\t\t\tcmd.Env = os.Environ()\n\t\t\t} else {\n\t\t\t\tcmd.Env = []string{}\n\t\t\t}\n\t\t\tif conn.Permissions != nil {\n\t\t\t\t\/\/ Using Permissions.Extensions as a way to get state from PublicKeyCallback\n\t\t\t\tif conn.Permissions.Extensions[\"environ\"] != \"\" {\n\t\t\t\t\tcmd.Env = append(cmd.Env, strings.Split(conn.Permissions.Extensions[\"environ\"], \"\\n\")...)\n\t\t\t\t}\n\t\t\t\tcmd.Env = append(cmd.Env, \"USER=\"+conn.Permissions.Extensions[\"user\"])\n\t\t\t}\n\t\t\tptyShell, _, err := attachShell(cmd, stdout, ch)\n\t\t\tif assert(\"attachShell\", err) {\n\t\t\t\tch.Close()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif okSize {\n\t\t\t\tsetWinsize(ptyShell.Fd(), width, height)\n\t\t\t\treq.Reply(true, nil)\n\t\t\t}\n\n\t\t\tgo func() {\n\t\t\t\tstatus, err := exitStatus(cmd.Wait())\n\t\t\t\tif !assert(\"exitStatus\", err) {\n\t\t\t\t\t_, err := ch.SendRequest(\"exit-status\", false, ssh.Marshal(&status))\n\t\t\t\t\tassert(\"sendExit\", err)\n\t\t\t\t}\n\t\t\t\tch.Close()\n\t\t\t}()\n\t\tcase \"window-change\":\n\t\t\twidth, height, okSize := parsePtyRequest(req.Payload)\n\t\t\tif okSize {\n\t\t\t\tsetWinsize(ptyShell.Fd(), width, height)\n\t\t\t}\n\t\t}\n\n\t\tif req.WantReply {\n\t\t\treq.Reply(true, nil)\n\t\t}\n\t}\n}\n<commit_msg>Replace attachCmd.<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"code.google.com\/p\/go.crypto\/ssh\"\n\t\"github.com\/flynn\/go-shlex\"\n\t\"github.com\/kr\/pty\"\n)\n\nvar host = flag.String(\"h\", \"\", \"host ip to listen on\")\nvar port = flag.String(\"p\", \"22\", \"port to listen on\")\nvar debug = flag.Bool(\"d\", false, \"debug mode displays handler output\")\nvar env = flag.Bool(\"e\", false, \"pass environment to handlers\")\nvar shell = flag.Bool(\"s\", false, \"run exec handler via SHELL\")\nvar keys = flag.String(\"k\", \"\", \"pem file of private keys (read from SSH_PRIVATE_KEYS by default)\")\n\nvar ErrUnauthorized = errors.New(\"execd: user is unauthorized\")\n\ntype exitStatusMsg struct {\n\tStatus uint32\n}\n\nfunc exitStatus(err error) (exitStatusMsg, error) {\n\tif err != nil {\n\t\tif exiterr, ok := err.(*exec.ExitError); ok {\n\t\t\t\/\/ There is no platform independent way to retrieve\n\t\t\t\/\/ the exit code, but the following will work on Unix\n\t\t\tif status, ok := exiterr.Sys().(syscall.WaitStatus); ok {\n\t\t\t\treturn exitStatusMsg{uint32(status.ExitStatus())}, nil\n\t\t\t}\n\t\t}\n\t\treturn exitStatusMsg{0}, err\n\t}\n\treturn exitStatusMsg{0}, nil\n}\n\nfunc attachShell(cmd *exec.Cmd, stdout io.Writer, stdin io.Reader) (*os.File, *sync.WaitGroup, error) {\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\t\/\/ Note that pty merges stdout and stderr.\n\tcmdPty, err := pty.Start(cmd)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tgo func() {\n\t\tio.Copy(stdout, cmdPty)\n\t\twg.Done()\n\t}()\n\tgo func() {\n\t\tio.Copy(cmdPty, stdin)\n\t\twg.Done()\n\t}()\n\n\treturn cmdPty, &wg, nil\n}\n\nfunc addKey(conf *ssh.ServerConfig, block *pem.Block) (err error) {\n\tvar key interface{}\n\tswitch block.Type {\n\tcase \"RSA PRIVATE KEY\":\n\t\tkey, err = x509.ParsePKCS1PrivateKey(block.Bytes)\n\tcase \"EC PRIVATE KEY\":\n\t\tkey, err = x509.ParseECPrivateKey(block.Bytes)\n\tcase \"DSA PRIVATE KEY\":\n\t\tkey, err = ssh.ParseDSAPrivateKey(block.Bytes)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported key type %q\", block.Type)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tsigner, err := ssh.NewSignerFromKey(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\tconf.AddHostKey(signer)\n\treturn nil\n}\n\nfunc parseKeys(conf *ssh.ServerConfig, pemData []byte) error {\n\tvar found bool\n\tfor {\n\t\tvar block *pem.Block\n\t\tblock, pemData = pem.Decode(pemData)\n\t\tif block == nil {\n\t\t\tif !found {\n\t\t\t\treturn errors.New(\"no private keys found\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tif err := addKey(conf, block); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfound = true\n\t}\n}\n\nfunc handleAuth(handler []string, conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {\n\tvar output bytes.Buffer\n\n\tkeydata := string(bytes.TrimSpace(ssh.MarshalAuthorizedKey(key)))\n\tcmd := exec.Command(handler[0], append(handler[1:], conn.User(), keydata)...)\n\tcmd.Stdout = &output\n\tcmd.Stderr = &output\n\tstatus, err := exitStatus(cmd.Run())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif status.Status == 0 {\n\t\treturn &ssh.Permissions{\n\t\t\tExtensions: map[string]string{\n\t\t\t\t\"environ\": strings.Trim(output.String(), \"\\n\"),\n\t\t\t\t\"user\": conn.User(),\n\t\t\t},\n\t\t}, nil\n\t} else {\n\t\tlog.Println(\"auth-handler status:\", status.Status)\n\t}\n\treturn nil, ErrUnauthorized\n}\n\nfunc init() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %v [options] <auth-handler> <exec-handler>\\n\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tif flag.NArg() < 2 {\n\t\tflag.Usage()\n\t\tos.Exit(64)\n\t}\n\n\texecHandler, err := shlex.Split(flag.Arg(1))\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to parse receiver command:\", err)\n\t}\n\texecHandler[0], err = filepath.Abs(execHandler[0])\n\tif err != nil {\n\t\tlog.Fatalln(\"Invalid receiver path:\", err)\n\t}\n\n\tauthHandler, err := shlex.Split(flag.Arg(0))\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to parse authchecker command:\", err)\n\t}\n\tauthHandler[0], err = filepath.Abs(authHandler[0])\n\tif err != nil {\n\t\tlog.Fatalln(\"Invalid authchecker path:\", err)\n\t}\n\tconfig := &ssh.ServerConfig{\n\t\tPublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {\n\t\t\treturn handleAuth(authHandler, conn, key)\n\t\t},\n\t}\n\n\tif keyEnv := os.Getenv(\"SSH_PRIVATE_KEYS\"); keyEnv != \"\" {\n\t\tif err := parseKeys(config, []byte(keyEnv)); err != nil {\n\t\t\tlog.Fatalln(\"Failed to parse private keys:\", err)\n\t\t}\n\t} else {\n\t\tpemBytes, err := ioutil.ReadFile(*keys)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Failed to load private keys:\", err)\n\t\t}\n\t\tif err := parseKeys(config, pemBytes); err != nil {\n\t\t\tlog.Fatalln(\"Failed to parse private keys:\", err)\n\t\t}\n\t}\n\n\tif p := os.Getenv(\"PORT\"); p != \"\" && *port == \"22\" {\n\t\t*port = p\n\t}\n\tlistener, err := net.Listen(\"tcp\", *host+\":\"+*port)\n\tif err != nil {\n\t\tlog.Fatalln(\"Failed to listen for connections:\", err)\n\t}\n\tfor {\n\t\t\/\/ SSH connections just house multiplexed connections\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to accept incoming connection:\", err)\n\t\t\tcontinue\n\t\t}\n\t\tgo handleConn(conn, config, execHandler)\n\t}\n}\n\nfunc handleConn(conn net.Conn, conf *ssh.ServerConfig, execHandler []string) {\n\tdefer conn.Close()\n\tsshConn, chans, reqs, err := ssh.NewServerConn(conn, conf)\n\tif err != nil {\n\t\tlog.Println(\"Failed to handshake:\", err)\n\t\treturn\n\t}\n\n\tgo ssh.DiscardRequests(reqs)\n\n\tfor ch := range chans {\n\t\tif ch.ChannelType() != \"session\" {\n\t\t\tch.Reject(ssh.UnknownChannelType, \"unknown channel type\")\n\t\t\tcontinue\n\t\t}\n\t\tgo handleChannel(sshConn, ch, execHandler)\n\t}\n}\n\nfunc handleChannel(conn *ssh.ServerConn, newChan ssh.NewChannel, execHandler []string) {\n\tch, reqs, err := newChan.Accept()\n\tif err != nil {\n\t\tlog.Println(\"newChan.Accept failed:\", err)\n\t\treturn\n\t}\n\n\tassert := func(at string, err error) bool {\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s failed: %s\", at, err)\n\t\t\tch.Stderr().Write([]byte(\"Internal error.\\n\"))\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tvar stdout, stderr io.Writer\n\tif *debug {\n\t\tstdout = io.MultiWriter(ch, os.Stdout)\n\t\tstderr = io.MultiWriter(ch.Stderr(), os.Stdout)\n\t} else {\n\t\tstdout = ch\n\t\tstderr = ch.Stderr()\n\t}\n\n\tvar ptyShell *os.File\n\n\tfor req := range reqs {\n\t\tswitch req.Type {\n\t\tcase \"exec\":\n\t\t\tif req.WantReply {\n\t\t\t\treq.Reply(true, nil)\n\t\t\t}\n\n\t\t\tcmdline := string(req.Payload[4:])\n\t\t\tvar cmd *exec.Cmd\n\t\t\tif *shell {\n\t\t\t\tshellcmd := flag.Arg(1) + \" \" + cmdline\n\t\t\t\tcmd = exec.Command(os.Getenv(\"SHELL\"), \"-c\", shellcmd)\n\t\t\t} else {\n\t\t\t\tcmdargs, err := shlex.Split(cmdline)\n\t\t\t\tif assert(\"shlex.Split\", err) {\n\t\t\t\t\tch.Close()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcmd = exec.Command(execHandler[0], append(execHandler[1:], cmdargs...)...)\n\t\t\t}\n\t\t\tif *env {\n\t\t\t\tcmd.Env = os.Environ()\n\t\t\t} else {\n\t\t\t\tcmd.Env = []string{}\n\t\t\t}\n\t\t\tif conn.Permissions != nil {\n\t\t\t\t\/\/ Using Permissions.Extensions as a way to get state from PublicKeyCallback\n\t\t\t\tif conn.Permissions.Extensions[\"environ\"] != \"\" {\n\t\t\t\t\tcmd.Env = append(cmd.Env, strings.Split(conn.Permissions.Extensions[\"environ\"], \"\\n\")...)\n\t\t\t\t}\n\t\t\t\tcmd.Env = append(cmd.Env, \"USER=\"+conn.Permissions.Extensions[\"user\"])\n\t\t\t}\n\t\t\tcmd.Env = append(cmd.Env, \"SSH_ORIGINAL_COMMAND=\"+cmdline)\n\n\t\t\t\/\/ cmd.Wait closes the stdin when it's done, so we need to proxy it through a pipe\n\t\t\tstdinPipe, err := cmd.StdinPipe()\n\t\t\tif assert(\"cmd.StdinPipe\", err) {\n\t\t\t\tch.Close()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgo io.Copy(stdinPipe, ch)\n\n\t\t\tcmd.Stdout = stdout\n\t\t\tcmd.Stderr = stderr\n\n\t\t\tgo func() {\n\t\t\t\tstatus, err := exitStatus(cmd.Run())\n\t\t\t\tif !assert(\"exec run\", err) {\n\t\t\t\t\t_, err := ch.SendRequest(\"exit-status\", false, ssh.Marshal(&status))\n\t\t\t\t\tassert(\"exec exit\", err)\n\t\t\t\t}\n\t\t\t\tch.Close()\n\t\t\t}()\n\t\tcase \"pty-req\":\n\t\t\twidth, height, okSize := parsePtyRequest(req.Payload)\n\n\t\t\tvar cmd *exec.Cmd\n\t\t\tif *shell {\n\t\t\t\tcmd = exec.Command(os.Getenv(\"SHELL\"))\n\t\t\t} else {\n\t\t\t\tcmd = exec.Command(execHandler[0], execHandler[1:]...)\n\t\t\t}\n\t\t\tif *env {\n\t\t\t\tcmd.Env = os.Environ()\n\t\t\t} else {\n\t\t\t\tcmd.Env = []string{}\n\t\t\t}\n\t\t\tif conn.Permissions != nil {\n\t\t\t\t\/\/ Using Permissions.Extensions as a way to get state from PublicKeyCallback\n\t\t\t\tif conn.Permissions.Extensions[\"environ\"] != \"\" {\n\t\t\t\t\tcmd.Env = append(cmd.Env, strings.Split(conn.Permissions.Extensions[\"environ\"], \"\\n\")...)\n\t\t\t\t}\n\t\t\t\tcmd.Env = append(cmd.Env, \"USER=\"+conn.Permissions.Extensions[\"user\"])\n\t\t\t}\n\t\t\tptyShell, _, err := attachShell(cmd, stdout, ch)\n\t\t\tif assert(\"attachShell\", err) {\n\t\t\t\tch.Close()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif okSize {\n\t\t\t\tsetWinsize(ptyShell.Fd(), width, height)\n\t\t\t\treq.Reply(true, nil)\n\t\t\t}\n\n\t\t\tgo func() {\n\t\t\t\tstatus, err := exitStatus(cmd.Wait())\n\t\t\t\tif !assert(\"pty run\", err) {\n\t\t\t\t\t_, err := ch.SendRequest(\"exit-status\", false, ssh.Marshal(&status))\n\t\t\t\t\tassert(\"pty exit\", err)\n\t\t\t\t}\n\t\t\t\tch.Close()\n\t\t\t}()\n\t\tcase \"window-change\":\n\t\t\twidth, height, okSize := parsePtyRequest(req.Payload)\n\t\t\tif okSize {\n\t\t\t\tsetWinsize(ptyShell.Fd(), width, height)\n\t\t\t}\n\t\t}\n\n\t\tif req.WantReply {\n\t\t\treq.Reply(true, nil)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package opts\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/docker\/docker\/api\"\n\tflag \"github.com\/docker\/docker\/pkg\/mflag\"\n\t\"github.com\/docker\/docker\/pkg\/parsers\"\n)\n\nvar (\n\talphaRegexp = regexp.MustCompile(`[a-zA-Z]`)\n\tdomainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9]))(:?\\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])))*)\\.?\\s*$`)\n)\n\nfunc ListVar(values *[]string, names []string, usage string) {\n\tflag.Var(newListOptsRef(values, nil), names, usage)\n}\n\nfunc HostListVar(values *[]string, names []string, usage string) {\n\tflag.Var(newListOptsRef(values, api.ValidateHost), names, usage)\n}\n\nfunc IPListVar(values *[]string, names []string, usage string) {\n\tflag.Var(newListOptsRef(values, ValidateIPAddress), names, usage)\n}\n\nfunc DnsSearchListVar(values *[]string, names []string, usage string) {\n\tflag.Var(newListOptsRef(values, ValidateDnsSearch), names, usage)\n}\n\nfunc IPVar(value *net.IP, names []string, defaultValue, usage string) {\n\tflag.Var(NewIpOpt(value, defaultValue), names, usage)\n}\n\nfunc MirrorListVar(values *[]string, names []string, usage string) {\n\tflag.Var(newListOptsRef(values, ValidateMirror), names, usage)\n}\n\n\/\/ ListOpts type\ntype ListOpts struct {\n\tvalues *[]string\n\tvalidator ValidatorFctType\n}\n\nfunc NewListOpts(validator ValidatorFctType) ListOpts {\n\tvar values []string\n\treturn *newListOptsRef(&values, validator)\n}\n\nfunc newListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {\n\treturn &ListOpts{\n\t\tvalues: values,\n\t\tvalidator: validator,\n\t}\n}\n\nfunc (opts *ListOpts) String() string {\n\treturn fmt.Sprintf(\"%v\", []string((*opts.values)))\n}\n\n\/\/ Set validates if needed the input value and add it to the\n\/\/ internal slice.\nfunc (opts *ListOpts) Set(value string) error {\n\tif opts.validator != nil {\n\t\tv, err := opts.validator(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvalue = v\n\t}\n\t(*opts.values) = append((*opts.values), value)\n\treturn nil\n}\n\n\/\/ Delete remove the given element from the slice.\nfunc (opts *ListOpts) Delete(key string) {\n\tfor i, k := range *opts.values {\n\t\tif k == key {\n\t\t\t(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ GetMap returns the content of values in a map in order to avoid\n\/\/ duplicates.\n\/\/ FIXME: can we remove this?\nfunc (opts *ListOpts) GetMap() map[string]struct{} {\n\tret := make(map[string]struct{})\n\tfor _, k := range *opts.values {\n\t\tret[k] = struct{}{}\n\t}\n\treturn ret\n}\n\n\/\/ GetAll returns the values' slice.\n\/\/ FIXME: Can we remove this?\nfunc (opts *ListOpts) GetAll() []string {\n\treturn (*opts.values)\n}\n\n\/\/ Get checks the existence of the given key.\nfunc (opts *ListOpts) Get(key string) bool {\n\tfor _, k := range *opts.values {\n\t\tif k == key {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Len returns the amount of element in the slice.\nfunc (opts *ListOpts) Len() int {\n\treturn len((*opts.values))\n}\n\n\/\/ Validators\ntype ValidatorFctType func(val string) (string, error)\n\nfunc ValidateAttach(val string) (string, error) {\n\ts := strings.ToLower(val)\n\tfor _, str := range []string{\"stdin\", \"stdout\", \"stderr\"} {\n\t\tif s == str {\n\t\t\treturn s, nil\n\t\t}\n\t}\n\treturn val, fmt.Errorf(\"valid streams are STDIN, STDOUT and STDERR.\")\n}\n\nfunc ValidateLink(val string) (string, error) {\n\tif _, err := parsers.PartParser(\"name:alias\", val); err != nil {\n\t\treturn val, err\n\t}\n\treturn val, nil\n}\n\nfunc ValidatePath(val string) (string, error) {\n\tvar containerPath string\n\n\tif strings.Count(val, \":\") > 2 {\n\t\treturn val, fmt.Errorf(\"bad format for volumes: %s\", val)\n\t}\n\n\tsplited := strings.SplitN(val, \":\", 2)\n\tif len(splited) == 1 {\n\t\tcontainerPath = splited[0]\n\t\tval = filepath.Clean(splited[0])\n\t} else {\n\t\tcontainerPath = splited[1]\n\t\tval = fmt.Sprintf(\"%s:%s\", splited[0], filepath.Clean(splited[1]))\n\t}\n\n\tif !filepath.IsAbs(containerPath) {\n\t\treturn val, fmt.Errorf(\"%s is not an absolute path\", containerPath)\n\t}\n\treturn val, nil\n}\n\nfunc ValidateEnv(val string) (string, error) {\n\tarr := strings.Split(val, \"=\")\n\tif len(arr) > 1 {\n\t\treturn val, nil\n\t}\n\treturn fmt.Sprintf(\"%s=%s\", val, os.Getenv(val)), nil\n}\n\nfunc ValidateIPAddress(val string) (string, error) {\n\tvar ip = net.ParseIP(strings.TrimSpace(val))\n\tif ip != nil {\n\t\treturn ip.String(), nil\n\t}\n\treturn \"\", fmt.Errorf(\"%s is not an ip address\", val)\n}\n\n\/\/ Validates domain for resolvconf search configuration.\n\/\/ A zero length domain is represented by .\nfunc ValidateDnsSearch(val string) (string, error) {\n\tif val = strings.Trim(val, \" \"); val == \".\" {\n\t\treturn val, nil\n\t}\n\treturn validateDomain(val)\n}\n\nfunc validateDomain(val string) (string, error) {\n\tif alphaRegexp.FindString(val) == \"\" {\n\t\treturn \"\", fmt.Errorf(\"%s is not a valid domain\", val)\n\t}\n\tns := domainRegexp.FindSubmatch([]byte(val))\n\tif len(ns) > 0 {\n\t\treturn string(ns[1]), nil\n\t}\n\treturn \"\", fmt.Errorf(\"%s is not a valid domain\", val)\n}\n\nfunc ValidateExtraHost(val string) (string, error) {\n\tarr := strings.Split(val, \":\")\n\tif len(arr) != 2 || len(arr[0]) == 0 {\n\t\treturn \"\", fmt.Errorf(\"bad format for add-host: %s\", val)\n\t}\n\tif _, err := ValidateIPAddress(arr[1]); err != nil {\n\t\treturn \"\", fmt.Errorf(\"bad format for add-host: %s\", val)\n\t}\n\treturn val, nil\n}\n\n\/\/ Validates an HTTP(S) registry mirror\nfunc ValidateMirror(val string) (string, error) {\n\turi, err := url.Parse(val)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%s is not a valid URI\", val)\n\t}\n\n\tif uri.Scheme != \"http\" && uri.Scheme != \"https\" {\n\t\treturn \"\", fmt.Errorf(\"Unsupported scheme %s\", uri.Scheme)\n\t}\n\n\tif uri.Path != \"\" || uri.RawQuery != \"\" || uri.Fragment != \"\" {\n\t\treturn \"\", fmt.Errorf(\"Unsupported path\/query\/fragment at end of the URI\")\n\t}\n\n\treturn fmt.Sprintf(\"%s:\/\/%s\/v1\/\", uri.Scheme, uri.Host), nil\n}\n<commit_msg>Fix input volume path check on Windows<commit_after>package opts\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/docker\/docker\/api\"\n\tflag \"github.com\/docker\/docker\/pkg\/mflag\"\n\t\"github.com\/docker\/docker\/pkg\/parsers\"\n)\n\nvar (\n\talphaRegexp = regexp.MustCompile(`[a-zA-Z]`)\n\tdomainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9]))(:?\\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])))*)\\.?\\s*$`)\n)\n\nfunc ListVar(values *[]string, names []string, usage string) {\n\tflag.Var(newListOptsRef(values, nil), names, usage)\n}\n\nfunc HostListVar(values *[]string, names []string, usage string) {\n\tflag.Var(newListOptsRef(values, api.ValidateHost), names, usage)\n}\n\nfunc IPListVar(values *[]string, names []string, usage string) {\n\tflag.Var(newListOptsRef(values, ValidateIPAddress), names, usage)\n}\n\nfunc DnsSearchListVar(values *[]string, names []string, usage string) {\n\tflag.Var(newListOptsRef(values, ValidateDnsSearch), names, usage)\n}\n\nfunc IPVar(value *net.IP, names []string, defaultValue, usage string) {\n\tflag.Var(NewIpOpt(value, defaultValue), names, usage)\n}\n\nfunc MirrorListVar(values *[]string, names []string, usage string) {\n\tflag.Var(newListOptsRef(values, ValidateMirror), names, usage)\n}\n\n\/\/ ListOpts type\ntype ListOpts struct {\n\tvalues *[]string\n\tvalidator ValidatorFctType\n}\n\nfunc NewListOpts(validator ValidatorFctType) ListOpts {\n\tvar values []string\n\treturn *newListOptsRef(&values, validator)\n}\n\nfunc newListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {\n\treturn &ListOpts{\n\t\tvalues: values,\n\t\tvalidator: validator,\n\t}\n}\n\nfunc (opts *ListOpts) String() string {\n\treturn fmt.Sprintf(\"%v\", []string((*opts.values)))\n}\n\n\/\/ Set validates if needed the input value and add it to the\n\/\/ internal slice.\nfunc (opts *ListOpts) Set(value string) error {\n\tif opts.validator != nil {\n\t\tv, err := opts.validator(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvalue = v\n\t}\n\t(*opts.values) = append((*opts.values), value)\n\treturn nil\n}\n\n\/\/ Delete remove the given element from the slice.\nfunc (opts *ListOpts) Delete(key string) {\n\tfor i, k := range *opts.values {\n\t\tif k == key {\n\t\t\t(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ GetMap returns the content of values in a map in order to avoid\n\/\/ duplicates.\n\/\/ FIXME: can we remove this?\nfunc (opts *ListOpts) GetMap() map[string]struct{} {\n\tret := make(map[string]struct{})\n\tfor _, k := range *opts.values {\n\t\tret[k] = struct{}{}\n\t}\n\treturn ret\n}\n\n\/\/ GetAll returns the values' slice.\n\/\/ FIXME: Can we remove this?\nfunc (opts *ListOpts) GetAll() []string {\n\treturn (*opts.values)\n}\n\n\/\/ Get checks the existence of the given key.\nfunc (opts *ListOpts) Get(key string) bool {\n\tfor _, k := range *opts.values {\n\t\tif k == key {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Len returns the amount of element in the slice.\nfunc (opts *ListOpts) Len() int {\n\treturn len((*opts.values))\n}\n\n\/\/ Validators\ntype ValidatorFctType func(val string) (string, error)\n\nfunc ValidateAttach(val string) (string, error) {\n\ts := strings.ToLower(val)\n\tfor _, str := range []string{\"stdin\", \"stdout\", \"stderr\"} {\n\t\tif s == str {\n\t\t\treturn s, nil\n\t\t}\n\t}\n\treturn val, fmt.Errorf(\"valid streams are STDIN, STDOUT and STDERR.\")\n}\n\nfunc ValidateLink(val string) (string, error) {\n\tif _, err := parsers.PartParser(\"name:alias\", val); err != nil {\n\t\treturn val, err\n\t}\n\treturn val, nil\n}\n\nfunc ValidatePath(val string) (string, error) {\n\tvar containerPath string\n\n\tif strings.Count(val, \":\") > 2 {\n\t\treturn val, fmt.Errorf(\"bad format for volumes: %s\", val)\n\t}\n\n\tsplited := strings.SplitN(val, \":\", 2)\n\tif len(splited) == 1 {\n\t\tcontainerPath = splited[0]\n\t\tval = path.Clean(splited[0])\n\t} else {\n\t\tcontainerPath = splited[1]\n\t\tval = fmt.Sprintf(\"%s:%s\", splited[0], path.Clean(splited[1]))\n\t}\n\n\tif !path.IsAbs(containerPath) {\n\t\treturn val, fmt.Errorf(\"%s is not an absolute path\", containerPath)\n\t}\n\treturn val, nil\n}\n\nfunc ValidateEnv(val string) (string, error) {\n\tarr := strings.Split(val, \"=\")\n\tif len(arr) > 1 {\n\t\treturn val, nil\n\t}\n\treturn fmt.Sprintf(\"%s=%s\", val, os.Getenv(val)), nil\n}\n\nfunc ValidateIPAddress(val string) (string, error) {\n\tvar ip = net.ParseIP(strings.TrimSpace(val))\n\tif ip != nil {\n\t\treturn ip.String(), nil\n\t}\n\treturn \"\", fmt.Errorf(\"%s is not an ip address\", val)\n}\n\n\/\/ Validates domain for resolvconf search configuration.\n\/\/ A zero length domain is represented by .\nfunc ValidateDnsSearch(val string) (string, error) {\n\tif val = strings.Trim(val, \" \"); val == \".\" {\n\t\treturn val, nil\n\t}\n\treturn validateDomain(val)\n}\n\nfunc validateDomain(val string) (string, error) {\n\tif alphaRegexp.FindString(val) == \"\" {\n\t\treturn \"\", fmt.Errorf(\"%s is not a valid domain\", val)\n\t}\n\tns := domainRegexp.FindSubmatch([]byte(val))\n\tif len(ns) > 0 {\n\t\treturn string(ns[1]), nil\n\t}\n\treturn \"\", fmt.Errorf(\"%s is not a valid domain\", val)\n}\n\nfunc ValidateExtraHost(val string) (string, error) {\n\tarr := strings.Split(val, \":\")\n\tif len(arr) != 2 || len(arr[0]) == 0 {\n\t\treturn \"\", fmt.Errorf(\"bad format for add-host: %s\", val)\n\t}\n\tif _, err := ValidateIPAddress(arr[1]); err != nil {\n\t\treturn \"\", fmt.Errorf(\"bad format for add-host: %s\", val)\n\t}\n\treturn val, nil\n}\n\n\/\/ Validates an HTTP(S) registry mirror\nfunc ValidateMirror(val string) (string, error) {\n\turi, err := url.Parse(val)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%s is not a valid URI\", val)\n\t}\n\n\tif uri.Scheme != \"http\" && uri.Scheme != \"https\" {\n\t\treturn \"\", fmt.Errorf(\"Unsupported scheme %s\", uri.Scheme)\n\t}\n\n\tif uri.Path != \"\" || uri.RawQuery != \"\" || uri.Fragment != \"\" {\n\t\treturn \"\", fmt.Errorf(\"Unsupported path\/query\/fragment at end of the URI\")\n\t}\n\n\treturn fmt.Sprintf(\"%s:\/\/%s\/v1\/\", uri.Scheme, uri.Host), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The go-ethereum Authors\n\/\/ This file is part of go-ethereum.\n\/\/\n\/\/ go-ethereum is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Lesser General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ go-ethereum is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Lesser General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Lesser General Public License\n\/\/ along with go-ethereum. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\n\/*\n\nThis key store behaves as KeyStorePlain with the difference that\nthe private key is encrypted and on disk uses another JSON encoding.\n\nThe crypto is documented at https:\/\/github.com\/ethereum\/wiki\/wiki\/Web3-Secret-Storage-Definition\n\n*\/\n\npackage crypto\n\nimport (\n\t\"bytes\"\n\t\"crypto\/aes\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\t\"github.com\/ethereum\/go-ethereum\/common\"\n\t\"github.com\/ethereum\/go-ethereum\/crypto\/randentropy\"\n\t\"golang.org\/x\/crypto\/pbkdf2\"\n\t\"golang.org\/x\/crypto\/scrypt\"\n)\n\nconst (\n\tkeyHeaderKDF = \"scrypt\"\n\t\/\/ 2^18 \/ 8 \/ 1 uses 256MB memory and approx 1s CPU time on a modern CPU.\n\tscryptN = 1 << 18\n\tscryptr = 8\n\tscryptp = 1\n\tscryptdkLen = 32\n)\n\ntype keyStorePassphrase struct {\n\tkeysDirPath string\n}\n\nfunc NewKeyStorePassphrase(path string) KeyStore {\n\treturn &keyStorePassphrase{path}\n}\n\nfunc (ks keyStorePassphrase) GenerateNewKey(rand io.Reader, auth string) (key *Key, err error) {\n\treturn GenerateNewKeyDefault(ks, rand, auth)\n}\n\nfunc (ks keyStorePassphrase) GetKey(keyAddr common.Address, auth string) (key *Key, err error) {\n\tkeyBytes, keyId, err := decryptKeyFromFile(ks.keysDirPath, keyAddr, auth)\n\tif err == nil {\n\t\tkey = &Key{\n\t\t\tId: uuid.UUID(keyId),\n\t\t\tAddress: keyAddr,\n\t\t\tPrivateKey: ToECDSA(keyBytes),\n\t\t}\n\t}\n\treturn\n}\n\nfunc (ks keyStorePassphrase) Cleanup(keyAddr common.Address) (err error) {\n\treturn cleanup(ks.keysDirPath, keyAddr)\n}\n\nfunc (ks keyStorePassphrase) GetKeyAddresses() (addresses []common.Address, err error) {\n\treturn getKeyAddresses(ks.keysDirPath)\n}\n\nfunc (ks keyStorePassphrase) StoreKey(key *Key, auth string) (err error) {\n\tauthArray := []byte(auth)\n\tsalt := randentropy.GetEntropyCSPRNG(32)\n\tderivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptr, scryptp, scryptdkLen)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tencryptKey := derivedKey[:16]\n\tkeyBytes := FromECDSA(key.PrivateKey)\n\n\tiv := randentropy.GetEntropyCSPRNG(aes.BlockSize) \/\/ 16\n\tcipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmac := Sha3(derivedKey[16:32], cipherText)\n\n\tscryptParamsJSON := make(map[string]interface{}, 5)\n\tscryptParamsJSON[\"n\"] = scryptN\n\tscryptParamsJSON[\"r\"] = scryptr\n\tscryptParamsJSON[\"p\"] = scryptp\n\tscryptParamsJSON[\"dklen\"] = scryptdkLen\n\tscryptParamsJSON[\"salt\"] = hex.EncodeToString(salt)\n\n\tcipherParamsJSON := cipherparamsJSON{\n\t\tIV: hex.EncodeToString(iv),\n\t}\n\n\tcryptoStruct := cryptoJSON{\n\t\tCipher: \"aes-128-ctr\",\n\t\tCipherText: hex.EncodeToString(cipherText),\n\t\tCipherParams: cipherParamsJSON,\n\t\tKDF: \"scrypt\",\n\t\tKDFParams: scryptParamsJSON,\n\t\tMAC: hex.EncodeToString(mac),\n\t}\n\tencryptedKeyJSONV3 := encryptedKeyJSONV3{\n\t\thex.EncodeToString(key.Address[:]),\n\t\tcryptoStruct,\n\t\tkey.Id.String(),\n\t\tversion,\n\t}\n\tkeyJSON, err := json.Marshal(encryptedKeyJSONV3)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn writeKeyFile(key.Address, ks.keysDirPath, keyJSON)\n}\n\nfunc (ks keyStorePassphrase) DeleteKey(keyAddr common.Address, auth string) (err error) {\n\t\/\/ only delete if correct passphrase is given\n\t_, _, err = decryptKeyFromFile(ks.keysDirPath, keyAddr, auth)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn deleteKey(ks.keysDirPath, keyAddr)\n}\n\nfunc decryptKeyFromFile(keysDirPath string, keyAddr common.Address, auth string) (keyBytes []byte, keyId []byte, err error) {\n\tfmt.Printf(\"%v\\n\", keyAddr.Hex())\n\tm := make(map[string]interface{})\n\terr = getKey(keysDirPath, keyAddr, &m)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tv := reflect.ValueOf(m[\"version\"])\n\tif v.Kind() == reflect.String && v.String() == \"1\" {\n\t\tk := new(encryptedKeyJSONV1)\n\t\terr = getKey(keysDirPath, keyAddr, &k)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn decryptKeyV1(k, auth)\n\t} else {\n\t\tk := new(encryptedKeyJSONV3)\n\t\terr = getKey(keysDirPath, keyAddr, &k)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn decryptKeyV3(k, auth)\n\t}\n}\n\nfunc decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) {\n\tif keyProtected.Version != version {\n\t\treturn nil, nil, fmt.Errorf(\"Version not supported: %v\", keyProtected.Version)\n\t}\n\n\tif keyProtected.Crypto.Cipher != \"aes-128-ctr\" {\n\t\treturn nil, nil, fmt.Errorf(\"Cipher not supported: %v\", keyProtected.Crypto.Cipher)\n\t}\n\n\tkeyId = uuid.Parse(keyProtected.Id)\n\tmac, err := hex.DecodeString(keyProtected.Crypto.MAC)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tiv, err := hex.DecodeString(keyProtected.Crypto.CipherParams.IV)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcipherText, err := hex.DecodeString(keyProtected.Crypto.CipherText)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tderivedKey, err := getKDFKey(keyProtected.Crypto, auth)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcalculatedMAC := Sha3(derivedKey[16:32], cipherText)\n\tif !bytes.Equal(calculatedMAC, mac) {\n\t\treturn nil, nil, errors.New(\"Decryption failed: MAC mismatch\")\n\t}\n\n\tplainText, err := aesCTRXOR(derivedKey[:16], cipherText, iv)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn plainText, keyId, err\n}\n\nfunc decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byte, keyId []byte, err error) {\n\tkeyId = uuid.Parse(keyProtected.Id)\n\tmac, err := hex.DecodeString(keyProtected.Crypto.MAC)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tiv, err := hex.DecodeString(keyProtected.Crypto.CipherParams.IV)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcipherText, err := hex.DecodeString(keyProtected.Crypto.CipherText)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tderivedKey, err := getKDFKey(keyProtected.Crypto, auth)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcalculatedMAC := Sha3(derivedKey[16:32], cipherText)\n\tif !bytes.Equal(calculatedMAC, mac) {\n\t\treturn nil, nil, errors.New(\"Decryption failed: MAC mismatch\")\n\t}\n\n\tplainText, err := aesCBCDecrypt(Sha3(derivedKey[:16])[:16], cipherText, iv)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn plainText, keyId, err\n}\n\nfunc getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) {\n\tauthArray := []byte(auth)\n\tsalt, err := hex.DecodeString(cryptoJSON.KDFParams[\"salt\"].(string))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdkLen := ensureInt(cryptoJSON.KDFParams[\"dklen\"])\n\n\tif cryptoJSON.KDF == \"scrypt\" {\n\t\tn := ensureInt(cryptoJSON.KDFParams[\"n\"])\n\t\tr := ensureInt(cryptoJSON.KDFParams[\"r\"])\n\t\tp := ensureInt(cryptoJSON.KDFParams[\"p\"])\n\t\treturn scrypt.Key(authArray, salt, n, r, p, dkLen)\n\n\t} else if cryptoJSON.KDF == \"pbkdf2\" {\n\t\tc := ensureInt(cryptoJSON.KDFParams[\"c\"])\n\t\tprf := cryptoJSON.KDFParams[\"prf\"].(string)\n\t\tif prf != \"hmac-sha256\" {\n\t\t\treturn nil, fmt.Errorf(\"Unsupported PBKDF2 PRF: \", prf)\n\t\t}\n\t\tkey := pbkdf2.Key(authArray, salt, c, dkLen, sha256.New)\n\t\treturn key, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"Unsupported KDF: \", cryptoJSON.KDF)\n}\n\n\/\/ TODO: can we do without this when unmarshalling dynamic JSON?\n\/\/ why do integers in KDF params end up as float64 and not int after\n\/\/ unmarshal?\nfunc ensureInt(x interface{}) int {\n\tres, ok := x.(int)\n\tif !ok {\n\t\tres = int(x.(float64))\n\t}\n\treturn res\n}\n<commit_msg>crypto: remove debug print call after decrypting a key from disk<commit_after>\/\/ Copyright 2014 The go-ethereum Authors\n\/\/ This file is part of go-ethereum.\n\/\/\n\/\/ go-ethereum is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Lesser General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ go-ethereum is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Lesser General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Lesser General Public License\n\/\/ along with go-ethereum. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\n\/*\n\nThis key store behaves as KeyStorePlain with the difference that\nthe private key is encrypted and on disk uses another JSON encoding.\n\nThe crypto is documented at https:\/\/github.com\/ethereum\/wiki\/wiki\/Web3-Secret-Storage-Definition\n\n*\/\n\npackage crypto\n\nimport (\n\t\"bytes\"\n\t\"crypto\/aes\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\t\"github.com\/ethereum\/go-ethereum\/common\"\n\t\"github.com\/ethereum\/go-ethereum\/crypto\/randentropy\"\n\t\"golang.org\/x\/crypto\/pbkdf2\"\n\t\"golang.org\/x\/crypto\/scrypt\"\n)\n\nconst (\n\tkeyHeaderKDF = \"scrypt\"\n\t\/\/ 2^18 \/ 8 \/ 1 uses 256MB memory and approx 1s CPU time on a modern CPU.\n\tscryptN = 1 << 18\n\tscryptr = 8\n\tscryptp = 1\n\tscryptdkLen = 32\n)\n\ntype keyStorePassphrase struct {\n\tkeysDirPath string\n}\n\nfunc NewKeyStorePassphrase(path string) KeyStore {\n\treturn &keyStorePassphrase{path}\n}\n\nfunc (ks keyStorePassphrase) GenerateNewKey(rand io.Reader, auth string) (key *Key, err error) {\n\treturn GenerateNewKeyDefault(ks, rand, auth)\n}\n\nfunc (ks keyStorePassphrase) GetKey(keyAddr common.Address, auth string) (key *Key, err error) {\n\tkeyBytes, keyId, err := decryptKeyFromFile(ks.keysDirPath, keyAddr, auth)\n\tif err == nil {\n\t\tkey = &Key{\n\t\t\tId: uuid.UUID(keyId),\n\t\t\tAddress: keyAddr,\n\t\t\tPrivateKey: ToECDSA(keyBytes),\n\t\t}\n\t}\n\treturn\n}\n\nfunc (ks keyStorePassphrase) Cleanup(keyAddr common.Address) (err error) {\n\treturn cleanup(ks.keysDirPath, keyAddr)\n}\n\nfunc (ks keyStorePassphrase) GetKeyAddresses() (addresses []common.Address, err error) {\n\treturn getKeyAddresses(ks.keysDirPath)\n}\n\nfunc (ks keyStorePassphrase) StoreKey(key *Key, auth string) (err error) {\n\tauthArray := []byte(auth)\n\tsalt := randentropy.GetEntropyCSPRNG(32)\n\tderivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptr, scryptp, scryptdkLen)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tencryptKey := derivedKey[:16]\n\tkeyBytes := FromECDSA(key.PrivateKey)\n\n\tiv := randentropy.GetEntropyCSPRNG(aes.BlockSize) \/\/ 16\n\tcipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmac := Sha3(derivedKey[16:32], cipherText)\n\n\tscryptParamsJSON := make(map[string]interface{}, 5)\n\tscryptParamsJSON[\"n\"] = scryptN\n\tscryptParamsJSON[\"r\"] = scryptr\n\tscryptParamsJSON[\"p\"] = scryptp\n\tscryptParamsJSON[\"dklen\"] = scryptdkLen\n\tscryptParamsJSON[\"salt\"] = hex.EncodeToString(salt)\n\n\tcipherParamsJSON := cipherparamsJSON{\n\t\tIV: hex.EncodeToString(iv),\n\t}\n\n\tcryptoStruct := cryptoJSON{\n\t\tCipher: \"aes-128-ctr\",\n\t\tCipherText: hex.EncodeToString(cipherText),\n\t\tCipherParams: cipherParamsJSON,\n\t\tKDF: \"scrypt\",\n\t\tKDFParams: scryptParamsJSON,\n\t\tMAC: hex.EncodeToString(mac),\n\t}\n\tencryptedKeyJSONV3 := encryptedKeyJSONV3{\n\t\thex.EncodeToString(key.Address[:]),\n\t\tcryptoStruct,\n\t\tkey.Id.String(),\n\t\tversion,\n\t}\n\tkeyJSON, err := json.Marshal(encryptedKeyJSONV3)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn writeKeyFile(key.Address, ks.keysDirPath, keyJSON)\n}\n\nfunc (ks keyStorePassphrase) DeleteKey(keyAddr common.Address, auth string) (err error) {\n\t\/\/ only delete if correct passphrase is given\n\t_, _, err = decryptKeyFromFile(ks.keysDirPath, keyAddr, auth)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn deleteKey(ks.keysDirPath, keyAddr)\n}\n\nfunc decryptKeyFromFile(keysDirPath string, keyAddr common.Address, auth string) (keyBytes []byte, keyId []byte, err error) {\n\tm := make(map[string]interface{})\n\terr = getKey(keysDirPath, keyAddr, &m)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tv := reflect.ValueOf(m[\"version\"])\n\tif v.Kind() == reflect.String && v.String() == \"1\" {\n\t\tk := new(encryptedKeyJSONV1)\n\t\terr = getKey(keysDirPath, keyAddr, &k)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn decryptKeyV1(k, auth)\n\t} else {\n\t\tk := new(encryptedKeyJSONV3)\n\t\terr = getKey(keysDirPath, keyAddr, &k)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn decryptKeyV3(k, auth)\n\t}\n}\n\nfunc decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) {\n\tif keyProtected.Version != version {\n\t\treturn nil, nil, fmt.Errorf(\"Version not supported: %v\", keyProtected.Version)\n\t}\n\n\tif keyProtected.Crypto.Cipher != \"aes-128-ctr\" {\n\t\treturn nil, nil, fmt.Errorf(\"Cipher not supported: %v\", keyProtected.Crypto.Cipher)\n\t}\n\n\tkeyId = uuid.Parse(keyProtected.Id)\n\tmac, err := hex.DecodeString(keyProtected.Crypto.MAC)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tiv, err := hex.DecodeString(keyProtected.Crypto.CipherParams.IV)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcipherText, err := hex.DecodeString(keyProtected.Crypto.CipherText)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tderivedKey, err := getKDFKey(keyProtected.Crypto, auth)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcalculatedMAC := Sha3(derivedKey[16:32], cipherText)\n\tif !bytes.Equal(calculatedMAC, mac) {\n\t\treturn nil, nil, errors.New(\"Decryption failed: MAC mismatch\")\n\t}\n\n\tplainText, err := aesCTRXOR(derivedKey[:16], cipherText, iv)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn plainText, keyId, err\n}\n\nfunc decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byte, keyId []byte, err error) {\n\tkeyId = uuid.Parse(keyProtected.Id)\n\tmac, err := hex.DecodeString(keyProtected.Crypto.MAC)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tiv, err := hex.DecodeString(keyProtected.Crypto.CipherParams.IV)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcipherText, err := hex.DecodeString(keyProtected.Crypto.CipherText)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tderivedKey, err := getKDFKey(keyProtected.Crypto, auth)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcalculatedMAC := Sha3(derivedKey[16:32], cipherText)\n\tif !bytes.Equal(calculatedMAC, mac) {\n\t\treturn nil, nil, errors.New(\"Decryption failed: MAC mismatch\")\n\t}\n\n\tplainText, err := aesCBCDecrypt(Sha3(derivedKey[:16])[:16], cipherText, iv)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn plainText, keyId, err\n}\n\nfunc getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) {\n\tauthArray := []byte(auth)\n\tsalt, err := hex.DecodeString(cryptoJSON.KDFParams[\"salt\"].(string))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdkLen := ensureInt(cryptoJSON.KDFParams[\"dklen\"])\n\n\tif cryptoJSON.KDF == \"scrypt\" {\n\t\tn := ensureInt(cryptoJSON.KDFParams[\"n\"])\n\t\tr := ensureInt(cryptoJSON.KDFParams[\"r\"])\n\t\tp := ensureInt(cryptoJSON.KDFParams[\"p\"])\n\t\treturn scrypt.Key(authArray, salt, n, r, p, dkLen)\n\n\t} else if cryptoJSON.KDF == \"pbkdf2\" {\n\t\tc := ensureInt(cryptoJSON.KDFParams[\"c\"])\n\t\tprf := cryptoJSON.KDFParams[\"prf\"].(string)\n\t\tif prf != \"hmac-sha256\" {\n\t\t\treturn nil, fmt.Errorf(\"Unsupported PBKDF2 PRF: \", prf)\n\t\t}\n\t\tkey := pbkdf2.Key(authArray, salt, c, dkLen, sha256.New)\n\t\treturn key, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"Unsupported KDF: \", cryptoJSON.KDF)\n}\n\n\/\/ TODO: can we do without this when unmarshalling dynamic JSON?\n\/\/ why do integers in KDF params end up as float64 and not int after\n\/\/ unmarshal?\nfunc ensureInt(x interface{}) int {\n\tres, ok := x.(int)\n\tif !ok {\n\t\tres = int(x.(float64))\n\t}\n\treturn res\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/AndrewVos\/colour\"\n\t\"gopkg.in\/yaml.v1\"\n)\n\ntype Config struct {\n\tExcludeLines []*regexp.Regexp\n\tExcludeFiles []*regexp.Regexp\n\tMinLineLength int\n\tMinHunkSize int\n}\n\nfunc LoadConfig() Config {\n\tconfig := Config{\n\t\tExcludeLines: []*regexp.Regexp{},\n\t\tExcludeFiles: []*regexp.Regexp{},\n\t\tMinLineLength: 10,\n\t\tMinHunkSize: 2,\n\t}\n\n\tdata, err := ioutil.ReadFile(\".cccv.yml\")\n\tif err != nil {\n\t\treturn config\n\t}\n\n\tt := struct {\n\t\tExcludeFiles []string \"exclude-files\"\n\t\tExcludeLines []string \"exclude-lines\"\n\t\tMinLineLength int \"min-line-length\"\n\t\tMinHunkSize int \"min-hunk-size\"\n\t}{}\n\terr = yaml.Unmarshal(data, &t)\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfor _, s := range t.ExcludeLines {\n\t\tr := regexp.MustCompile(s)\n\t\tconfig.ExcludeLines = append(config.ExcludeLines, r)\n\t}\n\tfor _, s := range t.ExcludeFiles {\n\t\tr := regexp.MustCompile(s)\n\t\tconfig.ExcludeFiles = append(config.ExcludeFiles, r)\n\t}\n\tif t.MinLineLength != 0 {\n\t\tconfig.MinLineLength = t.MinLineLength\n\t}\n\treturn config\n}\n\ntype FileName string\n\ntype Change struct {\n\tFileName\n\tLine\n}\n\ntype Line struct {\n\tNumber int\n\tText string\n}\n\ntype FileResult struct {\n\tFileName\n\tLines []*Line\n}\n\nfunc (fr *FileResult) HasDuplicates() bool {\n\treturn len(fr.Lines) > 0\n}\n\nfunc main() {\n\tvar wg sync.WaitGroup\n\tconfig := LoadConfig()\n\n\tresultChan := make(chan FileResult)\n\tresults := []FileResult{}\n\n\tchanges := getChanges(os.Stdin, config)\n\tgitFiles := gitLsFiles(config)\n\n\tgo func() {\n\t\tfor {\n\t\t\tr := <-resultChan\n\t\t\tresults = append(results, r)\n\t\t}\n\t}()\n\n\tfor _, fName := range gitFiles {\n\t\twg.Add(1)\n\t\tgo func(fName string, resultChan chan FileResult) {\n\t\t\tdefer wg.Done()\n\t\t\tr := GenResultForFile(fName, changes, config)\n\t\t\tif r.HasDuplicates() {\n\t\t\t\tresultChan <- r\n\t\t\t}\n\t\t}(fName, resultChan)\n\t}\n\twg.Wait()\n\n\tif len(results) > 0 {\n\t\tfmt.Printf(colour.White(\"Possible copy\/paste sources:\\n\"))\n\t\tfor _, r := range results {\n\t\t\tfmt.Printf(colour.Red(\"%s:\\n\"), r.FileName)\n\t\t\tfor _, l := range r.Lines {\n\t\t\t\tfmt.Printf(colour.Yellow(\"%d: \")+\"%s\\n\", l.Number, l.Text)\n\t\t\t}\n\t\t}\n\t\tos.Exit(1)\n\t} else {\n\t\tfmt.Printf(colour.Green(\"Good diff - no copy\/pasted code.\\n\"))\n\t}\n}\n\nfunc GenResultForFile(fName string, changes *[]*Change, config Config) FileResult {\n\tfile, _ := os.Open(fName)\n\tscanner := bufio.NewScanner(file)\n\tcurrentLineNumber := 0\n\tresult := FileResult{FileName: FileName(fName), Lines: []*Line{}}\n\nLOOP_LINES:\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tcurrentLineNumber++\n\n\t\tfor _, excludeLinesR := range config.ExcludeLines {\n\t\t\tif excludeLinesR.MatchString(line) {\n\t\t\t\tcontinue LOOP_LINES\n\t\t\t}\n\t\t}\n\n\t\tfor _, change := range *changes {\n\t\t\tif strings.TrimFunc(change.Text, TrimF) == strings.TrimFunc(line, TrimF) {\n\n\t\t\t\t\/\/ exclude lines from the diff itself\n\t\t\t\tif string(change.FileName) == fName && change.Line.Number == currentLineNumber {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tresultAlreadyRecorded := false\n\t\t\t\tfor _, resultLine := range result.Lines {\n\t\t\t\t\tif resultLine.Number == currentLineNumber && resultLine.Text == line {\n\t\t\t\t\t\tresultAlreadyRecorded = true\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !resultAlreadyRecorded {\n\t\t\t\t\tresult.Lines = append(result.Lines, &Line{Number: currentLineNumber, Text: line})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tresult.Lines = filteredByHunkSizeLines(result.Lines, config)\n\treturn result\n}\n\nfunc filteredByHunkSizeLines(lines []*Line, config Config) []*Line {\n\tvar currentHunk []*Line\n\thunks := [][]*Line{}\n\n\tfor i, l := range lines {\n\t\tif i == 0 {\n\t\t\tcurrentHunk = []*Line{l}\n\n\t\t\tif len(lines) == 1 {\n\t\t\t\thunks = append(hunks, currentHunk)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif l.Number-1 == lines[i-1].Number {\n\t\t\tcurrentHunk = append(currentHunk, l)\n\n\t\t} else {\n\t\t\thunks = append(hunks, currentHunk)\n\t\t\tcurrentHunk = []*Line{l}\n\t\t}\n\n\t\tif i == len(lines)-1 {\n\t\t\thunks = append(hunks, currentHunk)\n\t\t}\n\t}\n\n\tfilteredLines := []*Line{}\n\tfor _, h := range hunks {\n\t\tif len(h) >= config.MinHunkSize {\n\t\t\tfilteredLines = append(filteredLines, h...)\n\t\t}\n\t}\n\treturn filteredLines\n}\n\nfunc gitLsFiles(config Config) []string {\n\tfiles := []string{}\n\tcmd := exec.Command(\"git\", \"ls-files\")\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\tscanner := bufio.NewScanner(stdout)\n\nLOOP_FILES:\n\tfor scanner.Scan() {\n\t\tfor _, excludeFilesR := range config.ExcludeFiles {\n\t\t\tif excludeFilesR.MatchString(scanner.Text()) {\n\t\t\t\tcontinue LOOP_FILES\n\t\t\t}\n\t\t}\n\t\tfiles = append(files, scanner.Text())\n\t}\n\n\treturn files\n}\n\nfunc getChanges(reader io.Reader, config Config) *[]*Change {\n\tscanner := bufio.NewScanner(reader)\n\tvar currentFile string\n\tvar currentLineNumber int\n\tchanges := &[]*Change{}\n\n\tcurrentFileR := regexp.MustCompile(`^\\+\\+\\+ .\/(.*)$`)\n\tlineAddedR := regexp.MustCompile(`^\\+{1}(.*\\w+.*)`)\n\tlineRemovedR := regexp.MustCompile(`^\\-{1}`)\n\tlineRangeR := regexp.MustCompile(`^@@.*?\\+(\\d+?),`)\n\n\tfor scanner.Scan() {\n\t\tcurrentLine := scanner.Text()\n\n\t\tif res := currentFileR.FindStringSubmatch(currentLine); res != nil {\n\t\t\tcurrentFile = res[1]\n\n\t\t} else if res := lineRangeR.FindStringSubmatch(currentLine); res != nil {\n\t\t\tr, err := strconv.Atoi(res[1])\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tcurrentLineNumber = r\n\n\t\t} else if lineAddedR.MatchString(currentLine) {\n\t\t\tres := lineAddedR.FindStringSubmatch(currentLine)\n\n\t\t\tif len(strings.TrimFunc(res[1], TrimF)) <= config.MinLineLength {\n\t\t\t\tcurrentLineNumber++\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tnewChange := &Change{\n\t\t\t\tFileName: FileName(currentFile),\n\t\t\t\tLine: Line{Text: res[1], Number: currentLineNumber},\n\t\t\t}\n\t\t\t*changes = append(*changes, newChange)\n\t\t\tcurrentLineNumber++\n\n\t\t} else if !lineRemovedR.MatchString(currentLine) {\n\t\t\tcurrentLineNumber++\n\t\t}\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"reading standard input:\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn changes\n}\n\nfunc TrimF(c rune) bool { return c == 32 || c == 9 }\n<commit_msg>reading files in goroutines does not work<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/AndrewVos\/colour\"\n\t\"gopkg.in\/yaml.v1\"\n)\n\ntype Config struct {\n\tExcludeLines []*regexp.Regexp\n\tExcludeFiles []*regexp.Regexp\n\tMinLineLength int\n\tMinHunkSize int\n}\n\nfunc LoadConfig() Config {\n\tconfig := Config{\n\t\tExcludeLines: []*regexp.Regexp{},\n\t\tExcludeFiles: []*regexp.Regexp{},\n\t\tMinLineLength: 10,\n\t\tMinHunkSize: 2,\n\t}\n\n\tdata, err := ioutil.ReadFile(\".cccv.yml\")\n\tif err != nil {\n\t\treturn config\n\t}\n\n\tt := struct {\n\t\tExcludeFiles []string \"exclude-files\"\n\t\tExcludeLines []string \"exclude-lines\"\n\t\tMinLineLength int \"min-line-length\"\n\t\tMinHunkSize int \"min-hunk-size\"\n\t}{}\n\terr = yaml.Unmarshal(data, &t)\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfor _, s := range t.ExcludeLines {\n\t\tr := regexp.MustCompile(s)\n\t\tconfig.ExcludeLines = append(config.ExcludeLines, r)\n\t}\n\tfor _, s := range t.ExcludeFiles {\n\t\tr := regexp.MustCompile(s)\n\t\tconfig.ExcludeFiles = append(config.ExcludeFiles, r)\n\t}\n\tif t.MinLineLength != 0 {\n\t\tconfig.MinLineLength = t.MinLineLength\n\t}\n\treturn config\n}\n\ntype FileName string\n\ntype Change struct {\n\tFileName\n\tLine\n}\n\ntype Line struct {\n\tNumber int\n\tText string\n}\n\ntype FileResult struct {\n\tFileName\n\tLines []*Line\n}\n\nfunc (fr *FileResult) HasDuplicates() bool {\n\treturn len(fr.Lines) > 0\n}\n\nfunc main() {\n\tconfig := LoadConfig()\n\n\tresults := []FileResult{}\n\n\tchanges := getChanges(os.Stdin, config)\n\tgitFiles := gitLsFiles(config)\n\n\tfor _, fName := range gitFiles {\n\t\tr := GenResultForFile(fName, changes, config)\n\t\tif r.HasDuplicates() {\n\t\t\tresults = append(results, r)\n\t\t}\n\t}\n\n\tif len(results) > 0 {\n\t\tfmt.Printf(colour.White(\"Possible copy\/paste sources:\\n\"))\n\t\tfor _, r := range results {\n\t\t\tfmt.Printf(colour.Red(\"%s:\\n\"), r.FileName)\n\t\t\tfor _, l := range r.Lines {\n\t\t\t\tfmt.Printf(colour.Yellow(\"%d: \")+\"%s\\n\", l.Number, l.Text)\n\t\t\t}\n\t\t}\n\t\tos.Exit(1)\n\t} else {\n\t\tfmt.Printf(colour.Green(\"Good diff - no copy\/pasted code.\\n\"))\n\t}\n}\n\nfunc GenResultForFile(fName string, changes *[]*Change, config Config) FileResult {\n\tfile, _ := os.Open(fName)\n\tscanner := bufio.NewScanner(file)\n\tcurrentLineNumber := 0\n\tresult := FileResult{FileName: FileName(fName), Lines: []*Line{}}\n\nLOOP_LINES:\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tcurrentLineNumber++\n\n\t\tfor _, excludeLinesR := range config.ExcludeLines {\n\t\t\tif excludeLinesR.MatchString(line) {\n\t\t\t\tcontinue LOOP_LINES\n\t\t\t}\n\t\t}\n\n\t\tfor _, change := range *changes {\n\t\t\tif strings.TrimFunc(change.Text, TrimF) == strings.TrimFunc(line, TrimF) {\n\n\t\t\t\t\/\/ exclude lines from the diff itself\n\t\t\t\tif string(change.FileName) == fName && change.Line.Number == currentLineNumber {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tresultAlreadyRecorded := false\n\t\t\t\tfor _, resultLine := range result.Lines {\n\t\t\t\t\tif resultLine.Number == currentLineNumber && resultLine.Text == line {\n\t\t\t\t\t\tresultAlreadyRecorded = true\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !resultAlreadyRecorded {\n\t\t\t\t\tresult.Lines = append(result.Lines, &Line{Number: currentLineNumber, Text: line})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tresult.Lines = filteredByHunkSizeLines(result.Lines, config)\n\treturn result\n}\n\nfunc filteredByHunkSizeLines(lines []*Line, config Config) []*Line {\n\tvar currentHunk []*Line\n\thunks := [][]*Line{}\n\n\tfor i, l := range lines {\n\t\tif i == 0 {\n\t\t\tcurrentHunk = []*Line{l}\n\n\t\t\tif len(lines) == 1 {\n\t\t\t\thunks = append(hunks, currentHunk)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif l.Number-1 == lines[i-1].Number {\n\t\t\tcurrentHunk = append(currentHunk, l)\n\n\t\t} else {\n\t\t\thunks = append(hunks, currentHunk)\n\t\t\tcurrentHunk = []*Line{l}\n\t\t}\n\n\t\tif i == len(lines)-1 {\n\t\t\thunks = append(hunks, currentHunk)\n\t\t}\n\t}\n\n\tfilteredLines := []*Line{}\n\tfor _, h := range hunks {\n\t\tif len(h) >= config.MinHunkSize {\n\t\t\tfilteredLines = append(filteredLines, h...)\n\t\t}\n\t}\n\treturn filteredLines\n}\n\nfunc gitLsFiles(config Config) []string {\n\tfiles := []string{}\n\tcmd := exec.Command(\"git\", \"ls-files\")\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\tscanner := bufio.NewScanner(stdout)\n\nLOOP_FILES:\n\tfor scanner.Scan() {\n\t\tfor _, excludeFilesR := range config.ExcludeFiles {\n\t\t\tif excludeFilesR.MatchString(scanner.Text()) {\n\t\t\t\tcontinue LOOP_FILES\n\t\t\t}\n\t\t}\n\t\tfiles = append(files, scanner.Text())\n\t}\n\n\treturn files\n}\n\nfunc getChanges(reader io.Reader, config Config) *[]*Change {\n\tscanner := bufio.NewScanner(reader)\n\tvar currentFile string\n\tvar currentLineNumber int\n\tchanges := &[]*Change{}\n\n\tcurrentFileR := regexp.MustCompile(`^\\+\\+\\+ .\/(.*)$`)\n\tlineAddedR := regexp.MustCompile(`^\\+{1}(.*\\w+.*)`)\n\tlineRemovedR := regexp.MustCompile(`^\\-{1}`)\n\tlineRangeR := regexp.MustCompile(`^@@.*?\\+(\\d+?),`)\n\n\tfor scanner.Scan() {\n\t\tcurrentLine := scanner.Text()\n\n\t\tif res := currentFileR.FindStringSubmatch(currentLine); res != nil {\n\t\t\tcurrentFile = res[1]\n\n\t\t} else if res := lineRangeR.FindStringSubmatch(currentLine); res != nil {\n\t\t\tr, err := strconv.Atoi(res[1])\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tcurrentLineNumber = r\n\n\t\t} else if lineAddedR.MatchString(currentLine) {\n\t\t\tres := lineAddedR.FindStringSubmatch(currentLine)\n\n\t\t\tif len(strings.TrimFunc(res[1], TrimF)) <= config.MinLineLength {\n\t\t\t\tcurrentLineNumber++\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tnewChange := &Change{\n\t\t\t\tFileName: FileName(currentFile),\n\t\t\t\tLine: Line{Text: res[1], Number: currentLineNumber},\n\t\t\t}\n\t\t\t*changes = append(*changes, newChange)\n\t\t\tcurrentLineNumber++\n\n\t\t} else if !lineRemovedR.MatchString(currentLine) {\n\t\t\tcurrentLineNumber++\n\t\t}\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"reading standard input:\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn changes\n}\n\nfunc TrimF(c rune) bool { return c == 32 || c == 9 }\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ogletest\n\n\/\/ Equals returns a matcher that matches any value v such that v == x, with the\n\/\/ exception that if x is a numeric type, Equals(x) will match equivalent\n\/\/ numeric values of any type.\nfunc Equals(x interface{}) Matcher {\n}\n<commit_msg>Added a stub implementation.<commit_after>\/\/ Copyright 2011 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ogletest\n\n\/\/ Equals returns a matcher that matches any value v such that v == x, with the\n\/\/ exception that if x is a numeric type, Equals(x) will match equivalent\n\/\/ numeric values of any type.\nfunc Equals(x interface{}) Matcher {\n\treturn &equalsMatcher{x}\n}\n\ntype equalsMatcher struct {\n\texpected interface{}\n}\n\nfunc (m *equalsMatcher) Matches(val interface{}) (result MatchResult, err string) {\n\treturn MATCH_UNDEFINED, \"TODO\"\n}\n\nfunc (m *equalsMatcher) Description() string {\n\treturn \"TODO\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !appengine,!gopherjs,!windows\n\npackage logrus\n\nfunc (f *TextFormatter) initTerminal(entry *Entry) {\n}\n<commit_msg>Extended conditions to include non-native builds<commit_after>\/\/ +build appengine gopherjs !windows\n\npackage logrus\n\nfunc (f *TextFormatter) initTerminal(entry *Entry) {\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n\n\t\"github.com\/fatih\/color\"\n)\n\nfunc acceptCommand(flags acceptFlags, testname string) {\n\tif flags.all {\n\t\tacceptAll()\n\t\treturn\n\t}\n\n\tvar path string\n\tif flags.asm {\n\t\tfilename := testname + asmExt\n\t\tpath = buildPath(asmDir, filename)\n\t} else {\n\t\tfilename := testname + pikaExt\n\t\tpath = buildPath(pikaDir, filename)\n\t}\n\n\tif !exists(path) {\n\t\tcolor.Magenta(path + \" does not exist\")\n\t\tos.Exit(1)\n\t\treturn \/\/ not necessary, just to be explicit\n\t}\n\n\tif flags.asm {\n\t\tacceptOptimizedStandalone(testname)\n\t} else {\n\t\tacceptCodeGen(testname)\n\t\tacceptOptimized(testname)\n\t\tacceptCompile(testname)\n\t}\n}\n\nfunc acceptAll() {\n\tos.RemoveAll(backupDir)\n\tos.Rename(expectDir, backupDir)\n\tos.Rename(resultDir, expectDir)\n\tinitResultDirs()\n}\n\nfunc acceptCompile(testname string) {\n\tnew := buildPath(result.build.compiler, testname+txtExt)\n\told := buildPath(expect.build.compiler, testname+txtExt)\n\tmoveIfExists(old, new)\n\n\tnew = buildPath(result.asm.compiler, testname+asmExt)\n\told = buildPath(expect.asm.compiler, testname+asmExt)\n\tmoveIfExists(old, new)\n\n\tnew = buildPath(result.run.compiler, testname+txtExt)\n\told = buildPath(expect.run.compiler, testname+txtExt)\n\tmoveIfExists(old, new)\n}\n\nfunc acceptCodeGen(testname string) {\n\tnew := buildPath(result.build.codegenerator, testname+txtExt)\n\told := buildPath(expect.build.codegenerator, testname+txtExt)\n\tmoveIfExists(old, new)\n\n\tnew = buildPath(result.asm.codegenerator, testname+asmExt)\n\told = buildPath(expect.asm.codegenerator, testname+asmExt)\n\tmoveIfExists(old, new)\n\n\tnew = buildPath(result.run.codegenerator, testname+txtExt)\n\told = buildPath(expect.run.codegenerator, testname+txtExt)\n\tmoveIfExists(old, new)\n}\n\nfunc acceptOptimized(testname string) {\n\tnew := buildPath(result.build.optimizer, testname+txtExt)\n\told := buildPath(expect.build.optimizer, testname+txtExt)\n\tmoveIfExists(old, new)\n\n\tnew = buildPath(result.asm.optimizer, testname+asmoExt)\n\told = buildPath(expect.asm.optimizer, testname+asmoExt)\n\tmoveIfExists(old, new)\n\n\tnew = buildPath(result.run.optimizer, testname+txtExt)\n\told = buildPath(expect.run.optimizer, testname+txtExt)\n\tmoveIfExists(old, new)\n}\n\nfunc acceptOptimizedStandalone(testname string) {\n\tnew := buildPath(result.build.optimizerStandalone, testname+txtExt)\n\told := buildPath(expect.build.optimizerStandalone, testname+txtExt)\n\tmoveIfExists(old, new)\n\n\tnew = buildPath(result.asm.optimizerStandalone, testname+asmoExt)\n\told = buildPath(expect.asm.optimizerStandalone, testname+asmoExt)\n\tmoveIfExists(old, new)\n\n\tnew = buildPath(result.run.optimizerStandalone, testname+txtExt)\n\told = buildPath(expect.run.optimizerStandalone, testname+txtExt)\n\tmoveIfExists(old, new)\n}\n<commit_msg>fix bug in accepting single tests, and change accept -all for my workflow<commit_after>package main\n\nimport (\n\t\"os\"\n\t\"os\/exec\"\n\n\t\"github.com\/fatih\/color\"\n)\n\nfunc acceptCommand(flags acceptFlags, testname string) {\n\tif flags.all {\n\t\tacceptAll()\n\t\treturn\n\t}\n\n\tvar path string\n\tif flags.asm {\n\t\tfilename := testname + asmExt\n\t\tpath = buildPath(asmDir, filename)\n\t} else {\n\t\tfilename := testname + pikaExt\n\t\tpath = buildPath(pikaDir, filename)\n\t}\n\n\tif !exists(path) {\n\t\tcolor.Magenta(path + \" does not exist\")\n\t\tos.Exit(1)\n\t\treturn \/\/ not necessary, just to be explicit\n\t}\n\n\tif flags.asm {\n\t\tacceptOptimizedStandalone(testname)\n\t} else {\n\t\tacceptCodeGen(testname)\n\t\tacceptOptimized(testname)\n\t\tacceptCompile(testname)\n\t}\n}\n\nfunc acceptAll() {\n\tos.RemoveAll(backupDir)\n\t\/\/ os.Rename(expectDir, backupDir)\n\t\/\/ os.Rename(resultDir, expectDir)\n\t\/\/ initResultDirs()\n\tbackup()\n\tacceptAllPika()\n\tacceptAllStandalone()\n}\n\nfunc acceptAllPika() {\n\tfiles := getAllFiles(pikaDir)\n\tfiles = filterOutFiles(files, \".gitignore\")\n\n\tfor _, file := range files {\n\t\ttestname := replaceExtension(file.Name(), \"\")\n\t\tacceptCodeGen(testname)\n\t\tacceptOptimized(testname)\n\t\tacceptCompile(testname)\n\t}\n}\n\nfunc acceptAllStandalone() {\n\tfiles := getAllFiles(asmDir)\n\tfiles = filterOutFiles(files, \".gitignore\")\n\n\tfor _, file := range files {\n\t\ttestname := replaceExtension(file.Name(), \"\")\n\t\tacceptOptimizedStandalone(testname)\n\t}\n}\n\nfunc backup() {\n\texec.Command(\"cp\", \"-rf\", expectDir, backupDir).Run()\n}\n\nfunc acceptTest(testname, oldPath, newPath, ext string) {\n\told := buildPath(oldPath, testname+ext)\n\tnew := buildPath(newPath, testname+ext)\n\tmoveIfExists(old, new)\n}\n\nfunc acceptCompile(testname string) {\n\tacceptTest(testname, result.build.compiler, expect.build.compiler, txtExt)\n\tacceptTest(testname, result.run.compiler, expect.run.compiler, txtExt)\n\tacceptTest(testname, result.asm.compiler, expect.asm.compiler, asmExt)\n\n\tacceptTest(testname, result.buildo.compiler, expect.buildo.compiler, txtExt)\n\tacceptTest(testname, result.asmo.compiler, expect.asmo.compiler, asmoExt)\n}\n\nfunc acceptCodeGen(testname string) {\n\tacceptTest(testname, result.build.codegenerator, expect.build.codegenerator, txtExt)\n\tacceptTest(testname, result.run.codegenerator, expect.run.codegenerator, txtExt)\n\tacceptTest(testname, result.asm.codegenerator, expect.asm.codegenerator, asmExt)\n}\n\nfunc acceptOptimized(testname string) {\n\tacceptTest(testname, result.build.optimizer, expect.build.optimizer, txtExt)\n\tacceptTest(testname, result.run.optimizer, expect.run.optimizer, txtExt)\n\tacceptTest(testname, result.asm.optimizer, expect.asm.optimizer, asmoExt)\n\n\tacceptTest(testname, result.buildo.optimizer, expect.buildo.optimizer, txtExt)\n\tacceptTest(testname, result.asmo.optimizer, expect.asmo.optimizer, asmoExt)\n}\n\nfunc acceptOptimizedStandalone(testname string) {\n\tacceptTest(testname, result.build.optimizerStandalone, expect.build.optimizerStandalone, txtExt)\n\tacceptTest(testname, result.run.optimizerStandalone, expect.run.optimizerStandalone, txtExt)\n\tacceptTest(testname, result.asm.optimizerStandalone, expect.asm.optimizerStandalone, asmoExt)\n\n\tacceptTest(testname, result.buildo.optimizerStandalone, expect.buildo.optimizerStandalone, txtExt)\n\tacceptTest(testname, result.asmo.optimizerStandalone, expect.asmo.optimizerStandalone, asmoExt)\n}\n<|endoftext|>"} {"text":"<commit_before>package acomm\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/cerana\/cerana\/pkg\/logrusx\"\n)\n\nconst (\n\tstatusStarted = iota\n\tstatusStopping\n\tstatusStopped\n)\n\n\/\/ Tracker keeps track of requests waiting on a response.\ntype Tracker struct {\n\tstatus int\n\tresponseListener *UnixListener\n\thttpStreamURL *url.URL\n\texternalProxyURL *url.URL\n\tdefaultTimeout time.Duration\n\trequestsLock sync.Mutex \/\/ Protects requests\n\trequests map[string]*Request\n\tdsLock sync.Mutex \/\/ Protects dataStreams\n\tdataStreams map[string]*UnixListener\n\twaitgroup sync.WaitGroup\n}\n\n\/\/ NewTracker creates and initializes a new Tracker. If a socketPath is not\n\/\/ provided, the response socket will be created in a temporary directory.\nfunc NewTracker(socketPath string, httpStreamURL, externalProxyURL *url.URL, defaultTimeout time.Duration) (*Tracker, error) {\n\tif socketPath == \"\" {\n\t\tvar err error\n\t\tsocketPath, err = generateTempSocketPath(\"\", \"acommTrackerResponses-\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif defaultTimeout <= 0 {\n\t\tdefaultTimeout = time.Minute\n\t}\n\n\treturn &Tracker{\n\t\tstatus: statusStopped,\n\t\tresponseListener: NewUnixListener(socketPath, 0),\n\t\thttpStreamURL: httpStreamURL,\n\t\texternalProxyURL: externalProxyURL,\n\t\tdataStreams: make(map[string]*UnixListener),\n\t\tdefaultTimeout: defaultTimeout,\n\t}, nil\n}\n\nfunc generateTempSocketPath(dir, prefix string) (string, error) {\n\t\/\/ Use TempFile to allocate a uniquely named file in either the specified\n\t\/\/ dir or the default temp dir. It is then removed so that the unix socket\n\t\/\/ can be created with that name.\n\t\/\/ TODO: Decide on permissions\n\tif dir != \"\" {\n\t\tif err := os.MkdirAll(dir, os.ModePerm); err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"directory\": dir,\n\t\t\t\t\"perm\": os.ModePerm,\n\t\t\t\t\"error\": err,\n\t\t\t}).Error(\"failed to create directory for socket\")\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tf, err := ioutil.TempFile(dir, prefix)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t}).Error(\"failed to create temp file for response socket\")\n\t\treturn \"\", err\n\t}\n\t_ = f.Close()\n\t_ = os.Remove(f.Name())\n\n\treturn fmt.Sprintf(\"%s.sock\", f.Name()), nil\n}\n\n\/\/ NumRequests returns the number of tracked requests\nfunc (t *Tracker) NumRequests() int {\n\tt.requestsLock.Lock()\n\tdefer t.requestsLock.Unlock()\n\n\treturn len(t.requests)\n}\n\n\/\/ Addr returns the string representation of the Tracker's response listener socket.\nfunc (t *Tracker) Addr() string {\n\treturn t.responseListener.Addr()\n}\n\n\/\/ URL returns the URL of the Tracker's response listener socket.\nfunc (t *Tracker) URL() *url.URL {\n\treturn t.responseListener.URL()\n}\n\n\/\/ Start activates the tracker. This allows tracking of requests as well as\n\/\/ listening for and handling responses.\nfunc (t *Tracker) Start() error {\n\tif t.status == statusStarted {\n\t\treturn nil\n\t}\n\tif t.status == statusStopping {\n\t\treturn errors.New(\"can't start tracker while stopping\")\n\t}\n\n\tt.requests = make(map[string]*Request)\n\n\t\/\/ start the proxy response listener\n\tif err := t.responseListener.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tgo t.listenForResponses()\n\n\tt.status = statusStarted\n\n\treturn nil\n}\n\n\/\/ listenForResponse continually accepts new responses on the listener.\nfunc (t *Tracker) listenForResponses() {\n\tfor {\n\t\tconn := t.responseListener.NextConn()\n\t\tif conn == nil {\n\t\t\treturn\n\t\t}\n\n\t\tgo t.handleConn(conn)\n\t}\n}\n\n\/\/ handleConn handles the response connection and parses the data.\nfunc (t *Tracker) handleConn(conn net.Conn) {\n\tdefer t.responseListener.DoneConn(conn)\n\n\tresp := &Response{}\n\tif err := UnmarshalConnData(conn, resp); err != nil {\n\t\treturn\n\t}\n\n\t_ = SendConnData(conn, &Response{})\n\n\tgo t.HandleResponse(resp)\n}\n\n\/\/ HandleResponse associates a response with a request and either forwards the\n\/\/ response or calls the request's handler.\nfunc (t *Tracker) HandleResponse(resp *Response) {\n\treq := t.retrieveRequest(resp.ID)\n\tif req == nil {\n\t\terr := errors.New(\"response does not have tracked request\")\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t\t\"response\": resp,\n\t\t}).Error(err)\n\t\treturn\n\t}\n\tdefer t.waitgroup.Done()\n\n\t\/\/ Stop the request timeout. The result doesn't matter.\n\tif req.timeout != nil {\n\t\t_ = req.timeout.Stop()\n\t}\n\n\t\/\/ If there are handlers, this is the final destination, so handle the\n\t\/\/ response. Otherwise, forward the response along.\n\t\/\/ Known issue: If this is the final destination and there are\n\t\/\/ no handlers, there will be an extra redirects back here. Since the\n\t\/\/ request has already been removed from the tracker, it will only happen\n\t\/\/ once.\n\tif !req.proxied {\n\t\treq.HandleResponse(resp)\n\t\treturn\n\t}\n\n\tif resp.StreamURL != nil {\n\t\tstreamURL, err := t.ProxyStreamHTTPURL(resp.StreamURL) \/\/ Replace the StreamURL with a proxy stream url\n\t\tif err != nil {\n\t\t\tstreamURL = nil\n\t\t}\n\t\tresp.StreamURL = streamURL\n\t}\n\n\t\/\/ Forward the response along\n\t_ = req.Respond(resp)\n\treturn\n}\n\n\/\/ Stop deactivates the tracker. It blocks until all active connections or tracked requests to finish.\nfunc (t *Tracker) Stop() {\n\t\/\/ Nothing to do if it's not listening.\n\tif t.responseListener == nil {\n\t\treturn\n\t}\n\n\t\/\/ Prevent new requests from being tracked\n\tt.status = statusStopping\n\n\t\/\/ Handle any requests that are expected\n\tt.waitgroup.Wait()\n\n\t\/\/ Stop listening for responses\n\tt.responseListener.Stop(0)\n\n\t\/\/ Stop any data streamers\n\tvar dsWG sync.WaitGroup\n\tt.dsLock.Lock()\n\tfor _, ds := range t.dataStreams {\n\t\tdsWG.Add(1)\n\t\tgo func(ds *UnixListener) {\n\t\t\tdefer dsWG.Done()\n\t\t\tds.Stop(0)\n\t\t}(ds)\n\t}\n\tt.dsLock.Unlock()\n\tdsWG.Wait()\n\n\tt.status = statusStopped\n\treturn\n}\n\n\/\/ TrackRequest tracks a request. This does not need to be called after using\n\/\/ ProxyUnix.\nfunc (t *Tracker) TrackRequest(req *Request, timeout time.Duration) error {\n\tt.requestsLock.Lock()\n\tdefer t.requestsLock.Unlock()\n\n\tif t.status == statusStarted {\n\t\tif _, ok := t.requests[req.ID]; ok {\n\t\t\terr := errors.New(\"request id already traacked\")\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"request\": req,\n\t\t\t\t\"error\": err,\n\t\t\t}).Error(err)\n\t\t\treturn err\n\t\t}\n\t\tt.waitgroup.Add(1)\n\t\tt.requests[req.ID] = req\n\n\t\tt.setRequestTimeout(req, timeout)\n\t\treturn nil\n\t}\n\n\terr := errors.New(\"failed to track request in unstarted tracker\")\n\tlog.WithFields(log.Fields{\n\t\t\"request\": req,\n\t\t\"trackerStatus\": t.status,\n\t\t\"error\": err,\n\t}).Error(err)\n\treturn err\n}\n\n\/\/ RemoveRequest should be used to remove a tracked request. Use in cases such\n\/\/ as sending failures, where there is no hope of a response being received.\nfunc (t *Tracker) RemoveRequest(req *Request) bool {\n\tif r := t.retrieveRequest(req.ID); r != nil {\n\t\tif r.timeout != nil {\n\t\t\t_ = r.timeout.Stop()\n\t\t}\n\t\tt.waitgroup.Done()\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ retrieveRequest returns a tracked Request based on ID and stops tracking it.\nfunc (t *Tracker) retrieveRequest(id string) *Request {\n\tt.requestsLock.Lock()\n\tdefer t.requestsLock.Unlock()\n\n\tif req, ok := t.requests[id]; ok {\n\t\tdelete(t.requests, id)\n\t\treturn req\n\t}\n\n\treturn nil\n}\n\nfunc (t *Tracker) setRequestTimeout(req *Request, timeout time.Duration) {\n\t\/\/ Fallback to default timeout\n\tif timeout <= 0 {\n\t\ttimeout = t.defaultTimeout\n\t}\n\n\tresp, err := NewResponse(req, nil, nil, errors.New(\"response timeout\"))\n\tif err != nil {\n\t\treturn\n\t}\n\n\treq.timeout = time.AfterFunc(timeout, func() {\n\t\tt.HandleResponse(resp)\n\t})\n\treturn\n}\n\n\/\/ ProxyUnix proxies requests that have response hooks and stream urls of\n\/\/ non-unix sockets. If the response hook and stream url are already unix\n\/\/ sockets, it returns the original request. If the response hook is not, it\n\/\/ tracks the original request and returns a new request with a unix socket\n\/\/ response hook. If the stream url is not, it pipes the original stream\n\/\/ through a new unix socket and updates the stream url. The purpose of this is\n\/\/ so that there can be a single entry and exit point for external\n\/\/ communication, while local services can reply directly to each other.\nfunc (t *Tracker) ProxyUnix(req *Request, timeout time.Duration) (*Request, error) {\n\tif t.responseListener == nil {\n\t\terr := errors.New(\"request tracker's response listener not active\")\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t}).Error(err)\n\t\treturn nil, err\n\t}\n\n\tif req.StreamURL != nil && req.StreamURL.Scheme != \"unix\" {\n\t\t\/\/ proxy the stream\n\t\tr, w := io.Pipe()\n\n\t\tgo func(src *url.URL) {\n\t\t\tdefer logrusx.LogReturnedErr(w.Close, nil, \"failed to close proxy stream writer\")\n\t\t\tif err := Stream(w, src); err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"error\": err,\n\t\t\t\t\t\"streamURL\": src,\n\t\t\t\t}).Error(\"failed to stream\")\n\t\t\t}\n\t\t}(req.StreamURL)\n\n\t\taddr, err := t.NewStreamUnix(\"\", r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treq.StreamURL = addr\n\t}\n\n\tunixReq := req\n\tif req.ResponseHook.Scheme != \"unix\" {\n\t\t\/\/ proxy the request\n\t\tunixReq = &Request{\n\t\t\tID: req.ID,\n\t\t\tTask: req.Task,\n\t\t\tResponseHook: t.responseListener.URL(),\n\t\t\tStreamURL: req.StreamURL,\n\t\t\tArgs: req.Args,\n\t\t\t\/\/ Success and ErrorHandler are unnecessary here and intentionally\n\t\t\t\/\/ omitted.\n\t\t}\n\t\tif err := t.TrackRequest(req, timeout); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treq.proxied = true\n\t}\n\n\treturn unixReq, nil\n}\n\n\/\/ ProxyExternal proxies a request intended for an external destination\nfunc (t *Tracker) ProxyExternal(req *Request, timeout time.Duration) (*Request, error) {\n\tif t.externalProxyURL == nil {\n\t\terr := errors.New(\"tracker missing external proxy url\")\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t}).Error(err)\n\t\treturn nil, err\n\t}\n\tif t.responseListener == nil {\n\t\terr := errors.New(\"request tracker's response listener not active\")\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t}).Error(err)\n\t\treturn nil, err\n\t}\n\n\texternalReq := &Request{\n\t\tID: req.ID,\n\t\tTask: req.Task,\n\t\tResponseHook: t.externalProxyURL,\n\t\tStreamURL: req.StreamURL,\n\t\tArgs: req.Args,\n\t}\n\tif err := t.TrackRequest(req, timeout); err != nil {\n\t\treturn nil, err\n\t}\n\treq.proxied = true\n\n\treturn externalReq, nil\n}\n\n\/\/ ProxyExternalHandler is an HTTP HandlerFunc for proxying an external request.\nfunc (t *Tracker) ProxyExternalHandler(w http.ResponseWriter, r *http.Request) {\n\tresp := &Response{}\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err == nil {\n\t\terr = json.Unmarshal(body, resp)\n\t}\n\n\tack := &Response{\n\t\tError: err,\n\t}\n\tackJSON, err := json.Marshal(ack)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t\t\"ack\": ack,\n\t\t}).Error(\"failed to marshal ack\")\n\t\treturn\n\t}\n\tif _, err := w.Write(ackJSON); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t\t\"ack\": ack,\n\t\t}).Error(\"failed to ack response\")\n\t\treturn\n\t}\n\n\tif ack.Error != nil {\n\t\treturn\n\t}\n\tt.HandleResponse(resp)\n}\n<commit_msg>Proxy StreamURL for external requests<commit_after>package acomm\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/cerana\/cerana\/pkg\/logrusx\"\n)\n\nconst (\n\tstatusStarted = iota\n\tstatusStopping\n\tstatusStopped\n)\n\n\/\/ Tracker keeps track of requests waiting on a response.\ntype Tracker struct {\n\tstatus int\n\tresponseListener *UnixListener\n\thttpStreamURL *url.URL\n\texternalProxyURL *url.URL\n\tdefaultTimeout time.Duration\n\trequestsLock sync.Mutex \/\/ Protects requests\n\trequests map[string]*Request\n\tdsLock sync.Mutex \/\/ Protects dataStreams\n\tdataStreams map[string]*UnixListener\n\twaitgroup sync.WaitGroup\n}\n\n\/\/ NewTracker creates and initializes a new Tracker. If a socketPath is not\n\/\/ provided, the response socket will be created in a temporary directory.\nfunc NewTracker(socketPath string, httpStreamURL, externalProxyURL *url.URL, defaultTimeout time.Duration) (*Tracker, error) {\n\tif socketPath == \"\" {\n\t\tvar err error\n\t\tsocketPath, err = generateTempSocketPath(\"\", \"acommTrackerResponses-\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif defaultTimeout <= 0 {\n\t\tdefaultTimeout = time.Minute\n\t}\n\n\treturn &Tracker{\n\t\tstatus: statusStopped,\n\t\tresponseListener: NewUnixListener(socketPath, 0),\n\t\thttpStreamURL: httpStreamURL,\n\t\texternalProxyURL: externalProxyURL,\n\t\tdataStreams: make(map[string]*UnixListener),\n\t\tdefaultTimeout: defaultTimeout,\n\t}, nil\n}\n\nfunc generateTempSocketPath(dir, prefix string) (string, error) {\n\t\/\/ Use TempFile to allocate a uniquely named file in either the specified\n\t\/\/ dir or the default temp dir. It is then removed so that the unix socket\n\t\/\/ can be created with that name.\n\t\/\/ TODO: Decide on permissions\n\tif dir != \"\" {\n\t\tif err := os.MkdirAll(dir, os.ModePerm); err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"directory\": dir,\n\t\t\t\t\"perm\": os.ModePerm,\n\t\t\t\t\"error\": err,\n\t\t\t}).Error(\"failed to create directory for socket\")\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tf, err := ioutil.TempFile(dir, prefix)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t}).Error(\"failed to create temp file for response socket\")\n\t\treturn \"\", err\n\t}\n\t_ = f.Close()\n\t_ = os.Remove(f.Name())\n\n\treturn fmt.Sprintf(\"%s.sock\", f.Name()), nil\n}\n\n\/\/ NumRequests returns the number of tracked requests\nfunc (t *Tracker) NumRequests() int {\n\tt.requestsLock.Lock()\n\tdefer t.requestsLock.Unlock()\n\n\treturn len(t.requests)\n}\n\n\/\/ Addr returns the string representation of the Tracker's response listener socket.\nfunc (t *Tracker) Addr() string {\n\treturn t.responseListener.Addr()\n}\n\n\/\/ URL returns the URL of the Tracker's response listener socket.\nfunc (t *Tracker) URL() *url.URL {\n\treturn t.responseListener.URL()\n}\n\n\/\/ Start activates the tracker. This allows tracking of requests as well as\n\/\/ listening for and handling responses.\nfunc (t *Tracker) Start() error {\n\tif t.status == statusStarted {\n\t\treturn nil\n\t}\n\tif t.status == statusStopping {\n\t\treturn errors.New(\"can't start tracker while stopping\")\n\t}\n\n\tt.requests = make(map[string]*Request)\n\n\t\/\/ start the proxy response listener\n\tif err := t.responseListener.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tgo t.listenForResponses()\n\n\tt.status = statusStarted\n\n\treturn nil\n}\n\n\/\/ listenForResponse continually accepts new responses on the listener.\nfunc (t *Tracker) listenForResponses() {\n\tfor {\n\t\tconn := t.responseListener.NextConn()\n\t\tif conn == nil {\n\t\t\treturn\n\t\t}\n\n\t\tgo t.handleConn(conn)\n\t}\n}\n\n\/\/ handleConn handles the response connection and parses the data.\nfunc (t *Tracker) handleConn(conn net.Conn) {\n\tdefer t.responseListener.DoneConn(conn)\n\n\tresp := &Response{}\n\tif err := UnmarshalConnData(conn, resp); err != nil {\n\t\treturn\n\t}\n\n\t_ = SendConnData(conn, &Response{})\n\n\tgo t.HandleResponse(resp)\n}\n\n\/\/ HandleResponse associates a response with a request and either forwards the\n\/\/ response or calls the request's handler.\nfunc (t *Tracker) HandleResponse(resp *Response) {\n\treq := t.retrieveRequest(resp.ID)\n\tif req == nil {\n\t\terr := errors.New(\"response does not have tracked request\")\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t\t\"response\": resp,\n\t\t}).Error(err)\n\t\treturn\n\t}\n\tdefer t.waitgroup.Done()\n\n\t\/\/ Stop the request timeout. The result doesn't matter.\n\tif req.timeout != nil {\n\t\t_ = req.timeout.Stop()\n\t}\n\n\t\/\/ If there are handlers, this is the final destination, so handle the\n\t\/\/ response. Otherwise, forward the response along.\n\t\/\/ Known issue: If this is the final destination and there are\n\t\/\/ no handlers, there will be an extra redirects back here. Since the\n\t\/\/ request has already been removed from the tracker, it will only happen\n\t\/\/ once.\n\tif !req.proxied {\n\t\treq.HandleResponse(resp)\n\t\treturn\n\t}\n\n\tif resp.StreamURL != nil {\n\t\tstreamURL, err := t.ProxyStreamHTTPURL(resp.StreamURL) \/\/ Replace the StreamURL with a proxy stream url\n\t\tif err != nil {\n\t\t\tstreamURL = nil\n\t\t}\n\t\tresp.StreamURL = streamURL\n\t}\n\n\t\/\/ Forward the response along\n\t_ = req.Respond(resp)\n\treturn\n}\n\n\/\/ Stop deactivates the tracker. It blocks until all active connections or tracked requests to finish.\nfunc (t *Tracker) Stop() {\n\t\/\/ Nothing to do if it's not listening.\n\tif t.responseListener == nil {\n\t\treturn\n\t}\n\n\t\/\/ Prevent new requests from being tracked\n\tt.status = statusStopping\n\n\t\/\/ Handle any requests that are expected\n\tt.waitgroup.Wait()\n\n\t\/\/ Stop listening for responses\n\tt.responseListener.Stop(0)\n\n\t\/\/ Stop any data streamers\n\tvar dsWG sync.WaitGroup\n\tt.dsLock.Lock()\n\tfor _, ds := range t.dataStreams {\n\t\tdsWG.Add(1)\n\t\tgo func(ds *UnixListener) {\n\t\t\tdefer dsWG.Done()\n\t\t\tds.Stop(0)\n\t\t}(ds)\n\t}\n\tt.dsLock.Unlock()\n\tdsWG.Wait()\n\n\tt.status = statusStopped\n\treturn\n}\n\n\/\/ TrackRequest tracks a request. This does not need to be called after using\n\/\/ ProxyUnix.\nfunc (t *Tracker) TrackRequest(req *Request, timeout time.Duration) error {\n\tt.requestsLock.Lock()\n\tdefer t.requestsLock.Unlock()\n\n\tif t.status == statusStarted {\n\t\tif _, ok := t.requests[req.ID]; ok {\n\t\t\terr := errors.New(\"request id already traacked\")\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"request\": req,\n\t\t\t\t\"error\": err,\n\t\t\t}).Error(err)\n\t\t\treturn err\n\t\t}\n\t\tt.waitgroup.Add(1)\n\t\tt.requests[req.ID] = req\n\n\t\tt.setRequestTimeout(req, timeout)\n\t\treturn nil\n\t}\n\n\terr := errors.New(\"failed to track request in unstarted tracker\")\n\tlog.WithFields(log.Fields{\n\t\t\"request\": req,\n\t\t\"trackerStatus\": t.status,\n\t\t\"error\": err,\n\t}).Error(err)\n\treturn err\n}\n\n\/\/ RemoveRequest should be used to remove a tracked request. Use in cases such\n\/\/ as sending failures, where there is no hope of a response being received.\nfunc (t *Tracker) RemoveRequest(req *Request) bool {\n\tif r := t.retrieveRequest(req.ID); r != nil {\n\t\tif r.timeout != nil {\n\t\t\t_ = r.timeout.Stop()\n\t\t}\n\t\tt.waitgroup.Done()\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ retrieveRequest returns a tracked Request based on ID and stops tracking it.\nfunc (t *Tracker) retrieveRequest(id string) *Request {\n\tt.requestsLock.Lock()\n\tdefer t.requestsLock.Unlock()\n\n\tif req, ok := t.requests[id]; ok {\n\t\tdelete(t.requests, id)\n\t\treturn req\n\t}\n\n\treturn nil\n}\n\nfunc (t *Tracker) setRequestTimeout(req *Request, timeout time.Duration) {\n\t\/\/ Fallback to default timeout\n\tif timeout <= 0 {\n\t\ttimeout = t.defaultTimeout\n\t}\n\n\tresp, err := NewResponse(req, nil, nil, errors.New(\"response timeout\"))\n\tif err != nil {\n\t\treturn\n\t}\n\n\treq.timeout = time.AfterFunc(timeout, func() {\n\t\tt.HandleResponse(resp)\n\t})\n\treturn\n}\n\n\/\/ ProxyUnix proxies requests that have response hooks and stream urls of\n\/\/ non-unix sockets. If the response hook and stream url are already unix\n\/\/ sockets, it returns the original request. If the response hook is not, it\n\/\/ tracks the original request and returns a new request with a unix socket\n\/\/ response hook. If the stream url is not, it pipes the original stream\n\/\/ through a new unix socket and updates the stream url. The purpose of this is\n\/\/ so that there can be a single entry and exit point for external\n\/\/ communication, while local services can reply directly to each other.\nfunc (t *Tracker) ProxyUnix(req *Request, timeout time.Duration) (*Request, error) {\n\tif t.responseListener == nil {\n\t\terr := errors.New(\"request tracker's response listener not active\")\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t}).Error(err)\n\t\treturn nil, err\n\t}\n\n\tif req.StreamURL != nil && req.StreamURL.Scheme != \"unix\" {\n\t\t\/\/ proxy the stream\n\t\tr, w := io.Pipe()\n\n\t\tgo func(src *url.URL) {\n\t\t\tdefer logrusx.LogReturnedErr(w.Close, nil, \"failed to close proxy stream writer\")\n\t\t\tif err := Stream(w, src); err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"error\": err,\n\t\t\t\t\t\"streamURL\": src,\n\t\t\t\t}).Error(\"failed to stream\")\n\t\t\t}\n\t\t}(req.StreamURL)\n\n\t\taddr, err := t.NewStreamUnix(\"\", r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treq.StreamURL = addr\n\t}\n\n\tunixReq := req\n\tif req.ResponseHook.Scheme != \"unix\" {\n\t\t\/\/ proxy the request\n\t\tunixReq = &Request{\n\t\t\tID: req.ID,\n\t\t\tTask: req.Task,\n\t\t\tResponseHook: t.responseListener.URL(),\n\t\t\tStreamURL: req.StreamURL,\n\t\t\tArgs: req.Args,\n\t\t\t\/\/ Success and ErrorHandler are unnecessary here and intentionally\n\t\t\t\/\/ omitted.\n\t\t}\n\t\tif err := t.TrackRequest(req, timeout); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treq.proxied = true\n\t}\n\n\treturn unixReq, nil\n}\n\n\/\/ ProxyExternal proxies a request intended for an external destination\nfunc (t *Tracker) ProxyExternal(req *Request, timeout time.Duration) (*Request, error) {\n\tif t.externalProxyURL == nil {\n\t\terr := errors.New(\"tracker missing external proxy url\")\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t}).Error(err)\n\t\treturn nil, err\n\t}\n\tif t.responseListener == nil {\n\t\terr := errors.New(\"request tracker's response listener not active\")\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t}).Error(err)\n\t\treturn nil, err\n\t}\n\n\texternalReq := &Request{\n\t\tID: req.ID,\n\t\tTask: req.Task,\n\t\tResponseHook: t.externalProxyURL,\n\t\tArgs: req.Args,\n\t}\n\tif req.StreamURL != nil {\n\t\tstreamURL, err := t.ProxyStreamHTTPURL(req.StreamURL) \/\/ Replace the StreamURL with a proxy stream url\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\texternalReq.StreamURL = streamURL\n\t}\n\n\tif err := t.TrackRequest(req, timeout); err != nil {\n\t\treturn nil, err\n\t}\n\treq.proxied = true\n\n\treturn externalReq, nil\n}\n\n\/\/ ProxyExternalHandler is an HTTP HandlerFunc for proxying an external request.\nfunc (t *Tracker) ProxyExternalHandler(w http.ResponseWriter, r *http.Request) {\n\tresp := &Response{}\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err == nil {\n\t\terr = json.Unmarshal(body, resp)\n\t}\n\n\tack := &Response{\n\t\tError: err,\n\t}\n\tackJSON, err := json.Marshal(ack)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t\t\"ack\": ack,\n\t\t}).Error(\"failed to marshal ack\")\n\t\treturn\n\t}\n\tif _, err := w.Write(ackJSON); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t\t\"ack\": ack,\n\t\t}).Error(\"failed to ack response\")\n\t\treturn\n\t}\n\n\tif ack.Error != nil {\n\t\treturn\n\t}\n\tt.HandleResponse(resp)\n}\n<|endoftext|>"} {"text":"<commit_before>package action\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/Masterminds\/glide\/cfg\"\n\t\"github.com\/Masterminds\/glide\/msg\"\n\tgpath \"github.com\/Masterminds\/glide\/path\"\n\t\"github.com\/Masterminds\/glide\/util\"\n)\n\n\/\/ EnsureConfig loads and returns a config file.\n\/\/\n\/\/ Any error will cause an immediate exit, with an error printed to Stderr.\nfunc EnsureConfig() *cfg.Config {\n\tyamlpath, err := gpath.Glide()\n\tif err != nil {\n\t\tmsg.ExitCode(2)\n\t\tmsg.Die(\"Failed to find %s file in directory tree: %s\", gpath.GlideFile, err)\n\t}\n\n\tyml, err := ioutil.ReadFile(yamlpath)\n\tif err != nil {\n\t\tmsg.ExitCode(2)\n\t\tmsg.Die(\"Failed to load %s: %s\", yamlpath, err)\n\t}\n\tconf, err := cfg.ConfigFromYaml(yml)\n\tif err != nil {\n\t\tmsg.ExitCode(3)\n\t\tmsg.Die(\"Failed to parse %s: %s\", yamlpath, err)\n\t}\n\n\tb := filepath.Dir(yamlpath)\n\tbuildContext, err := util.GetBuildContext()\n\tif err != nil {\n\t\tmsg.Die(\"Failed to build an import context while ensuring config: %s\", err)\n\t}\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tmsg.Err(\"Unable to get the current working directory\")\n\t} else {\n\t\t\/\/ Determining a package name requires a relative path\n\t\tb, err = filepath.Rel(b, cwd)\n\t\tif err == nil {\n\t\t\tname := buildContext.PackageName(b)\n\t\t\tif name != conf.Name {\n\t\t\t\tmsg.Warn(\"The name listed in the config file (%s) does not match the current location (%s)\", conf.Name, name)\n\t\t\t}\n\t\t} else {\n\t\t\tmsg.Warn(\"Problem finding the config file path (%s) relative to the current directory (%s): %s\", b, cwd, err)\n\t\t}\n\t}\n\n\treturn conf\n}\n\n\/\/ EnsureCacheDir ensures the existence of the cache directory\nfunc EnsureCacheDir() {\n\tmsg.Warn(\"ensure.go: ensureCacheDir is not implemented.\")\n}\n\n\/\/ EnsureGoVendor ensures that the Go version is correct.\nfunc EnsureGoVendor() {\n\t\/\/ 6l was removed in 1.5, when vendoring was introduced.\n\tcmd := exec.Command(goExecutable(), \"tool\", \"6l\")\n\tif _, err := cmd.CombinedOutput(); err == nil {\n\t\tmsg.Warn(\"You must install the Go 1.5 or greater toolchain to work with Glide.\\n\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Check if this is go15, which requires GO15VENDOREXPERIMENT\n\t\/\/ Any release after go15 does not require that env var.\n\tcmd = exec.Command(goExecutable(), \"version\")\n\tif out, err := cmd.CombinedOutput(); err != nil {\n\t\tmsg.Err(\"Error getting version: %s.\\n\", err)\n\t\tos.Exit(1)\n\t} else if strings.HasPrefix(string(out), \"go version 1.5\") {\n\t\t\/\/ This works with 1.5 and 1.6.\n\t\tcmd = exec.Command(goExecutable(), \"env\", \"GO15VENDOREXPERIMENT\")\n\t\tif out, err := cmd.CombinedOutput(); err != nil {\n\t\t\tmsg.Err(\"Error looking for $GOVENDOREXPERIMENT: %s.\\n\", err)\n\t\t\tos.Exit(1)\n\t\t} else if strings.TrimSpace(string(out)) != \"1\" {\n\t\t\tmsg.Err(\"To use Glide, you must set GO15VENDOREXPERIMENT=1\")\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t\/\/ In the case where vendoring is explicitly disabled, balk.\n\tif os.Getenv(\"GO15VENDOREXPERIMENT\") == \"0\" {\n\t\tmsg.Err(\"To use Glide, you must set GO15VENDOREXPERIMENT=1\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Verify the setup isn't for the old version of glide. That is, this is\n\t\/\/ no longer assuming the _vendor directory as the GOPATH. Inform of\n\t\/\/ the change.\n\tif _, err := os.Stat(\"_vendor\/\"); err == nil {\n\t\tmsg.Warn(`Your setup appears to be for the previous version of Glide.\nPreviously, vendor packages were stored in _vendor\/src\/ and\n_vendor was set as your GOPATH. As of Go 1.5 the go tools\nrecognize the vendor directory as a location for these\nfiles. Glide has embraced this. Please remove the _vendor\ndirectory or move the _vendor\/src\/ directory to vendor\/.` + \"\\n\")\n\t\tos.Exit(1)\n\t}\n}\n\n\/\/ EnsureVendorDir ensures that a vendor\/ directory is present in the cwd.\nfunc EnsureVendorDir() {\n\tfi, err := os.Stat(gpath.VendorDir)\n\tif err != nil {\n\t\tmsg.Debug(\"Creating %s\", gpath.VendorDir)\n\t\tif err := os.MkdirAll(gpath.VendorDir, os.ModeDir|0755); err != nil {\n\t\t\tmsg.Die(\"Could not create %s: %s\", gpath.VendorDir, err)\n\t\t}\n\t} else if !fi.IsDir() {\n\t\tmsg.Die(\"Vendor is not a directory\")\n\t}\n}\n\n\/\/ EnsureGopath fails if GOPATH is not set, or if $GOPATH\/src is missing.\n\/\/\n\/\/ Otherwise it returns the value of GOPATH.\nfunc EnsureGopath() string {\n\tgps := gpath.Gopaths()\n\tif len(gps) == 0 {\n\t\tmsg.Die(\"$GOPATH is not set.\")\n\t}\n\n\tfor _, gp := range gps {\n\t\t_, err := os.Stat(path.Join(gp, \"src\"))\n\t\tif err != nil {\n\t\t\tmsg.Warn(\"%s\", err)\n\t\t\tcontinue\n\t\t}\n\t\treturn gp\n\t}\n\n\tmsg.Err(\"Could not find any of %s\/src.\\n\", strings.Join(gps, \"\/src, \"))\n\tmsg.Info(\"As of Glide 0.5\/Go 1.5, this is required.\\n\")\n\tmsg.Die(\"Wihtout src, cannot continue.\")\n\treturn \"\"\n}\n\n\n\/\/ goExecutable checks for a set environment variable of GLIDE_GO_EXECUTABLE\n\/\/ for the go executable name. The Google App Engine SDK ships with a python\n\/\/ wrapper called goapp\n\/\/\n\/\/ Example usage: GLIDE_GO_EXECUTABLE=goapp glide install\nfunc goExecutable() string {\n\tgoExecutable := os.Getenv(\"GLIDE_GO_EXECUTABLE\")\n\tif len(goExecutable) <= 0 {\n\t\tgoExecutable = \"go\"\n\t}\n\n\treturn goExecutable\n}\n<commit_msg>Fixed typo in action\/ensure.go<commit_after>package action\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/Masterminds\/glide\/cfg\"\n\t\"github.com\/Masterminds\/glide\/msg\"\n\tgpath \"github.com\/Masterminds\/glide\/path\"\n\t\"github.com\/Masterminds\/glide\/util\"\n)\n\n\/\/ EnsureConfig loads and returns a config file.\n\/\/\n\/\/ Any error will cause an immediate exit, with an error printed to Stderr.\nfunc EnsureConfig() *cfg.Config {\n\tyamlpath, err := gpath.Glide()\n\tif err != nil {\n\t\tmsg.ExitCode(2)\n\t\tmsg.Die(\"Failed to find %s file in directory tree: %s\", gpath.GlideFile, err)\n\t}\n\n\tyml, err := ioutil.ReadFile(yamlpath)\n\tif err != nil {\n\t\tmsg.ExitCode(2)\n\t\tmsg.Die(\"Failed to load %s: %s\", yamlpath, err)\n\t}\n\tconf, err := cfg.ConfigFromYaml(yml)\n\tif err != nil {\n\t\tmsg.ExitCode(3)\n\t\tmsg.Die(\"Failed to parse %s: %s\", yamlpath, err)\n\t}\n\n\tb := filepath.Dir(yamlpath)\n\tbuildContext, err := util.GetBuildContext()\n\tif err != nil {\n\t\tmsg.Die(\"Failed to build an import context while ensuring config: %s\", err)\n\t}\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tmsg.Err(\"Unable to get the current working directory\")\n\t} else {\n\t\t\/\/ Determining a package name requires a relative path\n\t\tb, err = filepath.Rel(b, cwd)\n\t\tif err == nil {\n\t\t\tname := buildContext.PackageName(b)\n\t\t\tif name != conf.Name {\n\t\t\t\tmsg.Warn(\"The name listed in the config file (%s) does not match the current location (%s)\", conf.Name, name)\n\t\t\t}\n\t\t} else {\n\t\t\tmsg.Warn(\"Problem finding the config file path (%s) relative to the current directory (%s): %s\", b, cwd, err)\n\t\t}\n\t}\n\n\treturn conf\n}\n\n\/\/ EnsureCacheDir ensures the existence of the cache directory\nfunc EnsureCacheDir() {\n\tmsg.Warn(\"ensure.go: ensureCacheDir is not implemented.\")\n}\n\n\/\/ EnsureGoVendor ensures that the Go version is correct.\nfunc EnsureGoVendor() {\n\t\/\/ 6l was removed in 1.5, when vendoring was introduced.\n\tcmd := exec.Command(goExecutable(), \"tool\", \"6l\")\n\tif _, err := cmd.CombinedOutput(); err == nil {\n\t\tmsg.Warn(\"You must install the Go 1.5 or greater toolchain to work with Glide.\\n\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Check if this is go15, which requires GO15VENDOREXPERIMENT\n\t\/\/ Any release after go15 does not require that env var.\n\tcmd = exec.Command(goExecutable(), \"version\")\n\tif out, err := cmd.CombinedOutput(); err != nil {\n\t\tmsg.Err(\"Error getting version: %s.\\n\", err)\n\t\tos.Exit(1)\n\t} else if strings.HasPrefix(string(out), \"go version 1.5\") {\n\t\t\/\/ This works with 1.5 and 1.6.\n\t\tcmd = exec.Command(goExecutable(), \"env\", \"GO15VENDOREXPERIMENT\")\n\t\tif out, err := cmd.CombinedOutput(); err != nil {\n\t\t\tmsg.Err(\"Error looking for $GOVENDOREXPERIMENT: %s.\\n\", err)\n\t\t\tos.Exit(1)\n\t\t} else if strings.TrimSpace(string(out)) != \"1\" {\n\t\t\tmsg.Err(\"To use Glide, you must set GO15VENDOREXPERIMENT=1\")\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t\/\/ In the case where vendoring is explicitly disabled, balk.\n\tif os.Getenv(\"GO15VENDOREXPERIMENT\") == \"0\" {\n\t\tmsg.Err(\"To use Glide, you must set GO15VENDOREXPERIMENT=1\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Verify the setup isn't for the old version of glide. That is, this is\n\t\/\/ no longer assuming the _vendor directory as the GOPATH. Inform of\n\t\/\/ the change.\n\tif _, err := os.Stat(\"_vendor\/\"); err == nil {\n\t\tmsg.Warn(`Your setup appears to be for the previous version of Glide.\nPreviously, vendor packages were stored in _vendor\/src\/ and\n_vendor was set as your GOPATH. As of Go 1.5 the go tools\nrecognize the vendor directory as a location for these\nfiles. Glide has embraced this. Please remove the _vendor\ndirectory or move the _vendor\/src\/ directory to vendor\/.` + \"\\n\")\n\t\tos.Exit(1)\n\t}\n}\n\n\/\/ EnsureVendorDir ensures that a vendor\/ directory is present in the cwd.\nfunc EnsureVendorDir() {\n\tfi, err := os.Stat(gpath.VendorDir)\n\tif err != nil {\n\t\tmsg.Debug(\"Creating %s\", gpath.VendorDir)\n\t\tif err := os.MkdirAll(gpath.VendorDir, os.ModeDir|0755); err != nil {\n\t\t\tmsg.Die(\"Could not create %s: %s\", gpath.VendorDir, err)\n\t\t}\n\t} else if !fi.IsDir() {\n\t\tmsg.Die(\"Vendor is not a directory\")\n\t}\n}\n\n\/\/ EnsureGopath fails if GOPATH is not set, or if $GOPATH\/src is missing.\n\/\/\n\/\/ Otherwise it returns the value of GOPATH.\nfunc EnsureGopath() string {\n\tgps := gpath.Gopaths()\n\tif len(gps) == 0 {\n\t\tmsg.Die(\"$GOPATH is not set.\")\n\t}\n\n\tfor _, gp := range gps {\n\t\t_, err := os.Stat(path.Join(gp, \"src\"))\n\t\tif err != nil {\n\t\t\tmsg.Warn(\"%s\", err)\n\t\t\tcontinue\n\t\t}\n\t\treturn gp\n\t}\n\n\tmsg.Err(\"Could not find any of %s\/src.\\n\", strings.Join(gps, \"\/src, \"))\n\tmsg.Info(\"As of Glide 0.5\/Go 1.5, this is required.\\n\")\n\tmsg.Die(\"Without src, cannot continue.\")\n\treturn \"\"\n}\n\n\n\/\/ goExecutable checks for a set environment variable of GLIDE_GO_EXECUTABLE\n\/\/ for the go executable name. The Google App Engine SDK ships with a python\n\/\/ wrapper called goapp\n\/\/\n\/\/ Example usage: GLIDE_GO_EXECUTABLE=goapp glide install\nfunc goExecutable() string {\n\tgoExecutable := os.Getenv(\"GLIDE_GO_EXECUTABLE\")\n\tif len(goExecutable) <= 0 {\n\t\tgoExecutable = \"go\"\n\t}\n\n\treturn goExecutable\n}\n<|endoftext|>"} {"text":"<commit_before>package actions\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\t\"github.com\/arschles\/gci\/config\"\n\t\"github.com\/arschles\/gci\/dockutil\"\n\t\"github.com\/arschles\/gci\/log\"\n\t\"github.com\/codegangsta\/cli\"\n)\n\nfunc Build(c *cli.Context) {\n\tconfig := config.ReadOrDie(c.String(FlagConfigFile))\n\tpaths := pathsOrDie()\n\tprojName := filepath.Base(paths.cwd)\n\tbinary := config.Build.GetOutputBinary(projName)\n\n\tdockerClient := dockutil.ClientOrDie()\n\n\tname := fmt.Sprintf(\"gci-build-%s-%s\", projName, uuid.New())\n\tcmd := []string{\"go\", \"build\", \"-o\", binary}\n\tcreateContainerOpts, hostConfig := dockutil.CreateAndStartContainerOpts(name, cmd, paths.gopath, paths.pkg)\n\tcontainer, err := dockerClient.CreateContainer(createContainerOpts)\n\tif err != nil {\n\t\tlog.Err(\"creating container [%s]\", err)\n\t\tos.Exit(1)\n\t}\n\n\tlog.Msg(dockutil.CmdStr(createContainerOpts, hostConfig))\n\n\tif err := dockerClient.StartContainer(container.ID, &hostConfig); err != nil {\n\t\tlog.Err(\"starting container [%s]\", err)\n\t\tos.Exit(1)\n\t}\n\n\tattachOpts := dockutil.AttachToContainerOpts(container.ID, os.Stdout, os.Stderr)\n\tattachErrCh, waitErrCh, waitCodeCh := dockutil.AttachAndWait(dockerClient, container.ID, attachOpts)\n\n\tselect {\n\tcase err := <-attachErrCh:\n\t\tlog.Err(\"Attaching to the build container [%s]\", err)\n\t\tos.Exit(1)\n\tcase err := <-waitErrCh:\n\t\tlog.Err(\"Waiting for the build container to finish [%s]\", err)\n\t\tos.Exit(1)\n\tcase code := <-waitCodeCh:\n\t\tif code != 0 {\n\t\t\tlog.Err(\"Build exited %d\", code)\n\t\t\tos.Exit(code)\n\t\t} else {\n\t\t\tlog.Info(\"Success\")\n\t\t}\n\t}\n}\n<commit_msg>avoiding name collision with imported package<commit_after>package actions\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\t\"github.com\/arschles\/gci\/config\"\n\t\"github.com\/arschles\/gci\/dockutil\"\n\t\"github.com\/arschles\/gci\/log\"\n\t\"github.com\/codegangsta\/cli\"\n)\n\nfunc Build(c *cli.Context) {\n\tcfg := config.ReadOrDie(c.String(FlagConfigFile))\n\tpaths := pathsOrDie()\n\tprojName := filepath.Base(paths.cwd)\n\tbinary := cfg.Build.GetOutputBinary(projName)\n\n\tdockerClient := dockutil.ClientOrDie()\n\n\tname := fmt.Sprintf(\"gci-build-%s-%s\", projName, uuid.New())\n\tcmd := []string{\"go\", \"build\", \"-o\", binary}\n\tcreateContainerOpts, hostConfig := dockutil.CreateAndStartContainerOpts(name, cmd, paths.gopath, paths.pkg)\n\tcontainer, err := dockerClient.CreateContainer(createContainerOpts)\n\tif err != nil {\n\t\tlog.Err(\"creating container [%s]\", err)\n\t\tos.Exit(1)\n\t}\n\n\tlog.Msg(dockutil.CmdStr(createContainerOpts, hostConfig))\n\n\tif err := dockerClient.StartContainer(container.ID, &hostConfig); err != nil {\n\t\tlog.Err(\"starting container [%s]\", err)\n\t\tos.Exit(1)\n\t}\n\n\tattachOpts := dockutil.AttachToContainerOpts(container.ID, os.Stdout, os.Stderr)\n\tattachErrCh, waitErrCh, waitCodeCh := dockutil.AttachAndWait(dockerClient, container.ID, attachOpts)\n\n\tselect {\n\tcase err := <-attachErrCh:\n\t\tlog.Err(\"Attaching to the build container [%s]\", err)\n\t\tos.Exit(1)\n\tcase err := <-waitErrCh:\n\t\tlog.Err(\"Waiting for the build container to finish [%s]\", err)\n\t\tos.Exit(1)\n\tcase code := <-waitCodeCh:\n\t\tif code != 0 {\n\t\t\tlog.Err(\"Build exited %d\", code)\n\t\t\tos.Exit(code)\n\t\t} else {\n\t\t\tlog.Info(\"Success\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2022 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/google\/syzkaller\/dashboard\/dashapi\"\n\t\"github.com\/google\/syzkaller\/pkg\/asset\"\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/sync\/errgroup\"\n\t\"google.golang.org\/appengine\/v2\"\n\tdb \"google.golang.org\/appengine\/v2\/datastore\"\n\t\"google.golang.org\/appengine\/v2\/log\"\n)\n\n\/\/ TODO: decide if we want to save job-related assets.\n\nfunc appendBuildAssets(c context.Context, ns, buildID string, assets []Asset) (*Build, error) {\n\tvar retBuild *Build\n\ttx := func(c context.Context) error {\n\t\tbuild, err := loadBuild(c, ns, buildID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tretBuild = build\n\t\tappendedOk := false\n\t\tvar appendErr error\n\t\tfor _, newAsset := range assets {\n\t\t\tappendErr = build.AppendAsset(newAsset)\n\t\t\tif appendErr == nil {\n\t\t\t\tappendedOk = true\n\t\t\t}\n\t\t}\n\t\t\/\/ It took quite a number of resources to upload the files, so we return success\n\t\t\/\/ even if we managed to save at least one of the new assets.\n\t\tif !appendedOk {\n\t\t\treturn fmt.Errorf(\"failed to append all assets, last error %w\", appendErr)\n\t\t}\n\t\tif _, err := db.Put(c, buildKey(c, ns, buildID), build); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to put build: %w\", err)\n\t\t}\n\t\tlog.Infof(c, \"updated build: %#v\", build)\n\t\treturn nil\n\t}\n\tif err := db.RunInTransaction(c, tx, &db.TransactionOptions{}); err != nil {\n\t\treturn nil, err\n\t}\n\treturn retBuild, nil\n}\n\nvar ErrAssetDuplicated = errors.New(\"an asset of this type is already present\")\n\nfunc (build *Build) AppendAsset(addAsset Asset) error {\n\ttypeInfo := asset.GetTypeDescription(addAsset.Type)\n\tif typeInfo == nil {\n\t\treturn fmt.Errorf(\"unknown asset type\")\n\t}\n\tif !typeInfo.AllowMultiple {\n\t\tfor _, obj := range build.Assets {\n\t\t\tif obj.Type == addAsset.Type {\n\t\t\t\treturn ErrAssetDuplicated\n\t\t\t}\n\t\t}\n\t}\n\tbuild.Assets = append(build.Assets, addAsset)\n\treturn nil\n}\n\nfunc queryNeededAssets(c context.Context) (*dashapi.NeededAssetsResp, error) {\n\t\/\/ So far only build assets.\n\tvar builds []*Build\n\t_, err := db.NewQuery(\"Build\").\n\t\tFilter(\"Assets.DownloadURL>\", \"\").\n\t\tProject(\"Assets.DownloadURL\").\n\t\tGetAll(c, &builds)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to query builds: %w\", err)\n\t}\n\tlog.Infof(c, \"queried %v builds with assets\", len(builds))\n\tresp := &dashapi.NeededAssetsResp{}\n\tfor _, build := range builds {\n\t\tfor _, asset := range build.Assets {\n\t\t\tresp.DownloadURLs = append(resp.DownloadURLs, asset.DownloadURL)\n\t\t}\n\t}\n\treturn resp, nil\n}\n\nfunc handleDeprecateAssets(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\tfor ns := range config.Namespaces {\n\t\terr := deprecateNamespaceAssets(c, ns)\n\t\tif err != nil {\n\t\t\tlog.Errorf(c, \"deprecateNamespaceAssets failed for ns=%v: %v\", ns, err)\n\t\t}\n\t}\n}\n\nfunc deprecateNamespaceAssets(c context.Context, ns string) error {\n\tad := assetDeprecator{\n\t\tns: ns,\n\t\tc: c,\n\t}\n\tconst buildBatchSize = 16\n\terr := ad.batchProcessBuilds(buildBatchSize)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"build batch processing failed: %w\", err)\n\t}\n\treturn nil\n}\n\ntype assetDeprecator struct {\n\tns string\n\tc context.Context\n\tbugsQueried bool\n\trelevantBugs map[string]bool\n}\n\nconst keepAssetsForClosedBugs = time.Hour * 24 * 30\n\nfunc (ad *assetDeprecator) queryBugs() error {\n\tif ad.bugsQueried {\n\t\treturn nil\n\t}\n\tvar openBugKeys []*db.Key\n\tvar closedBugKeys []*db.Key\n\tg, _ := errgroup.WithContext(context.Background())\n\tg.Go(func() error {\n\t\t\/\/ Query open bugs.\n\t\tvar err error\n\t\topenBugKeys, err = db.NewQuery(\"Bug\").\n\t\t\tFilter(\"Namespace=\", ad.ns).\n\t\t\tFilter(\"Status=\", BugStatusOpen).\n\t\t\tKeysOnly().\n\t\t\tGetAll(ad.c, nil)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to fetch open builds: %w\", err)\n\t\t}\n\t\treturn nil\n\t})\n\tg.Go(func() error {\n\t\t\/\/ Query recently closed bugs.\n\t\tvar err error\n\t\tclosedBugKeys, err = db.NewQuery(\"Bug\").\n\t\t\tFilter(\"Namespace=\", ad.ns).\n\t\t\tFilter(\"Closed>\", timeNow(ad.c).Add(-keepAssetsForClosedBugs)).\n\t\t\tKeysOnly().\n\t\t\tGetAll(ad.c, nil)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to fetch closed builds: %w\", err)\n\t\t}\n\t\treturn nil\n\t})\n\terr := g.Wait()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to query bugs: %w\", err)\n\t}\n\tad.relevantBugs = map[string]bool{}\n\tfor _, key := range append(append([]*db.Key{}, openBugKeys...), closedBugKeys...) {\n\t\tad.relevantBugs[key.String()] = true\n\t}\n\treturn nil\n}\n\nfunc (ad *assetDeprecator) buildArchivePolicy(build *Build, asset *Asset) (bool, error) {\n\t\/\/ If the asset is reasonably new, we always keep it.\n\tconst alwaysKeepPeriod = time.Hour * 24 * 14\n\tif asset.CreateDate.After(timeNow(ad.c).Add(-alwaysKeepPeriod)) {\n\t\treturn true, nil\n\t}\n\t\/\/ Query builds to see whether there's a newer same-type asset on the same week.\n\tvar builds []*Build\n\t_, err := db.NewQuery(\"Build\").\n\t\tFilter(\"Namespace=\", ad.ns).\n\t\tFilter(\"Manager=\", build.Manager).\n\t\tFilter(\"Assets.Type=\", asset.Type).\n\t\tFilter(\"Assets.CreateDate>\", asset.CreateDate).\n\t\tLimit(1).\n\t\tOrder(\"Assets.CreateDate\").\n\t\tGetAll(ad.c, &builds)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to query newer assets: %w\", err)\n\t}\n\tlog.Infof(ad.c, \"running archive policy for %s, date %s; queried %d builds\",\n\t\tasset.DownloadURL, asset.CreateDate, len(builds))\n\tsameWeek := false\n\tif len(builds) > 0 {\n\t\torigY, origW := asset.CreateDate.ISOWeek()\n\t\tfor _, nextAsset := range builds[0].Assets {\n\t\t\tif nextAsset.Type != asset.Type {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif nextAsset.CreateDate.Before(asset.CreateDate) ||\n\t\t\t\tnextAsset.CreateDate.Equal(asset.CreateDate) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnextY, nextW := nextAsset.CreateDate.ISOWeek()\n\t\t\tif origY == nextY && origW == nextW {\n\t\t\t\tlog.Infof(ad.c, \"found a newer asset: %s, date %s\",\n\t\t\t\t\tnextAsset.DownloadURL, nextAsset.CreateDate)\n\t\t\t\tsameWeek = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn !sameWeek, nil\n}\n\nfunc (ad *assetDeprecator) buildBugStatusPolicy(build *Build) (bool, error) {\n\tif err := ad.queryBugs(); err != nil {\n\t\treturn false, fmt.Errorf(\"failed to query bugs: %w\", err)\n\t}\n\tkeys, err := db.NewQuery(\"Crash\").\n\t\tFilter(\"BuildID=\", build.ID).\n\t\tKeysOnly().\n\t\tGetAll(ad.c, nil)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to query crashes: %w\", err)\n\t}\n\tfor _, key := range keys {\n\t\tbugKey := key.Parent()\n\t\tif _, ok := ad.relevantBugs[bugKey.String()]; ok {\n\t\t\t\/\/ At least one crash is related to an opened\/recently closed bug.\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}\n\nfunc (ad *assetDeprecator) needThisBuildAsset(build *Build, buildAsset *Asset) (bool, error) {\n\tif buildAsset.Type == dashapi.HTMLCoverageReport {\n\t\t\/\/ We want to keep coverage reports forever, not just\n\t\t\/\/ while there are any open bugs. But we don't want to\n\t\t\/\/ keep all coverage reports, just a share of them.\n\t\treturn ad.buildArchivePolicy(build, buildAsset)\n\t}\n\tif build.Type == BuildNormal {\n\t\t\/\/ A build-related asset, keep it only while there are open bugs with crashes\n\t\t\/\/ related to this build.\n\t\treturn ad.buildBugStatusPolicy(build)\n\t}\n\t\/\/ TODO: fix this once this is no longer the case.\n\treturn false, fmt.Errorf(\"job-related assets are not supported yet\")\n}\n\nfunc (ad *assetDeprecator) updateBuild(buildID string, urlsToDelete []string) error {\n\ttoDelete := map[string]bool{}\n\tfor _, url := range urlsToDelete {\n\t\ttoDelete[url] = true\n\t}\n\ttx := func(c context.Context) error {\n\t\tbuild, err := loadBuild(ad.c, ad.ns, buildID)\n\t\tif build == nil || err != nil {\n\t\t\t\/\/ Assume the DB has been updated in the meanwhile.\n\t\t\treturn nil\n\t\t}\n\t\tnewAssets := []Asset{}\n\t\tfor _, asset := range build.Assets {\n\t\t\tif _, ok := toDelete[asset.DownloadURL]; !ok {\n\t\t\t\tnewAssets = append(newAssets, asset)\n\t\t\t}\n\t\t}\n\t\tbuild.Assets = newAssets\n\t\tbuild.AssetsLastCheck = timeNow(ad.c)\n\t\tif _, err := db.Put(ad.c, buildKey(ad.c, ad.ns, buildID), build); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to save build: %w\", err)\n\t\t}\n\t\treturn nil\n\t}\n\tif err := db.RunInTransaction(ad.c, tx, nil); err != nil {\n\t\treturn fmt.Errorf(\"failed to update build: %w\", err)\n\t}\n\treturn nil\n}\n\nfunc (ad *assetDeprecator) batchProcessBuilds(count int) error {\n\t\/\/ We cannot query only the Build with non-empty Assets array and yet sort\n\t\/\/ by AssetsLastCheck. The datastore returns \"The first sort property must\n\t\/\/ be the same as the property to which the inequality filter is applied.\n\t\/\/ In your query the first sort property is AssetsLastCheck but the inequality\n\t\/\/ filter is on Assets.DownloadURL.\n\t\/\/ So we have to omit Filter(\"Assets.DownloadURL>\", \"\"). here.\n\tvar builds []*Build\n\t_, err := db.NewQuery(\"Build\").\n\t\tFilter(\"Namespace=\", ad.ns).\n\t\tOrder(\"AssetsLastCheck\").\n\t\tLimit(count).\n\t\tGetAll(ad.c, &builds)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to fetch builds: %w\", err)\n\t}\n\tfor _, build := range builds {\n\t\ttoDelete := []string{}\n\t\tfor _, asset := range build.Assets {\n\t\t\tneeded, err := ad.needThisBuildAsset(build, &asset)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to test asset: %w\", err)\n\t\t\t} else if !needed {\n\t\t\t\ttoDelete = append(toDelete, asset.DownloadURL)\n\t\t\t}\n\t\t}\n\t\terr := ad.updateBuild(build.ID, toDelete)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc queryLatestManagerAssets(c context.Context, ns string, assetType dashapi.AssetType,\n\tperiod time.Duration) (map[string]Asset, error) {\n\tvar builds []*Build\n\tstartTime := timeNow(c).Add(-period)\n\t_, err := db.NewQuery(\"Build\").\n\t\tFilter(\"Namespace=\", ns).\n\t\tFilter(\"Assets.Type=\", assetType).\n\t\tFilter(\"Assets.CreateDate>\", startTime).\n\t\tOrder(\"Assets.CreateDate\").\n\t\tGetAll(c, &builds)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret := map[string]Asset{}\n\tfor _, build := range builds {\n\t\tfor _, asset := range build.Assets {\n\t\t\tif asset.Type != assetType {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tret[build.Manager] = asset\n\t\t}\n\t}\n\treturn ret, nil\n}\n<commit_msg>dashboard: enable build error asset deprecation<commit_after>\/\/ Copyright 2022 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/google\/syzkaller\/dashboard\/dashapi\"\n\t\"github.com\/google\/syzkaller\/pkg\/asset\"\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/sync\/errgroup\"\n\t\"google.golang.org\/appengine\/v2\"\n\tdb \"google.golang.org\/appengine\/v2\/datastore\"\n\t\"google.golang.org\/appengine\/v2\/log\"\n)\n\n\/\/ TODO: decide if we want to save job-related assets.\n\nfunc appendBuildAssets(c context.Context, ns, buildID string, assets []Asset) (*Build, error) {\n\tvar retBuild *Build\n\ttx := func(c context.Context) error {\n\t\tbuild, err := loadBuild(c, ns, buildID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tretBuild = build\n\t\tappendedOk := false\n\t\tvar appendErr error\n\t\tfor _, newAsset := range assets {\n\t\t\tappendErr = build.AppendAsset(newAsset)\n\t\t\tif appendErr == nil {\n\t\t\t\tappendedOk = true\n\t\t\t}\n\t\t}\n\t\t\/\/ It took quite a number of resources to upload the files, so we return success\n\t\t\/\/ even if we managed to save at least one of the new assets.\n\t\tif !appendedOk {\n\t\t\treturn fmt.Errorf(\"failed to append all assets, last error %w\", appendErr)\n\t\t}\n\t\tif _, err := db.Put(c, buildKey(c, ns, buildID), build); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to put build: %w\", err)\n\t\t}\n\t\tlog.Infof(c, \"updated build: %#v\", build)\n\t\treturn nil\n\t}\n\tif err := db.RunInTransaction(c, tx, &db.TransactionOptions{}); err != nil {\n\t\treturn nil, err\n\t}\n\treturn retBuild, nil\n}\n\nvar ErrAssetDuplicated = errors.New(\"an asset of this type is already present\")\n\nfunc (build *Build) AppendAsset(addAsset Asset) error {\n\ttypeInfo := asset.GetTypeDescription(addAsset.Type)\n\tif typeInfo == nil {\n\t\treturn fmt.Errorf(\"unknown asset type\")\n\t}\n\tif !typeInfo.AllowMultiple {\n\t\tfor _, obj := range build.Assets {\n\t\t\tif obj.Type == addAsset.Type {\n\t\t\t\treturn ErrAssetDuplicated\n\t\t\t}\n\t\t}\n\t}\n\tbuild.Assets = append(build.Assets, addAsset)\n\treturn nil\n}\n\nfunc queryNeededAssets(c context.Context) (*dashapi.NeededAssetsResp, error) {\n\t\/\/ So far only build assets.\n\tvar builds []*Build\n\t_, err := db.NewQuery(\"Build\").\n\t\tFilter(\"Assets.DownloadURL>\", \"\").\n\t\tProject(\"Assets.DownloadURL\").\n\t\tGetAll(c, &builds)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to query builds: %w\", err)\n\t}\n\tlog.Infof(c, \"queried %v builds with assets\", len(builds))\n\tresp := &dashapi.NeededAssetsResp{}\n\tfor _, build := range builds {\n\t\tfor _, asset := range build.Assets {\n\t\t\tresp.DownloadURLs = append(resp.DownloadURLs, asset.DownloadURL)\n\t\t}\n\t}\n\treturn resp, nil\n}\n\nfunc handleDeprecateAssets(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\tfor ns := range config.Namespaces {\n\t\terr := deprecateNamespaceAssets(c, ns)\n\t\tif err != nil {\n\t\t\tlog.Errorf(c, \"deprecateNamespaceAssets failed for ns=%v: %v\", ns, err)\n\t\t}\n\t}\n}\n\nfunc deprecateNamespaceAssets(c context.Context, ns string) error {\n\tad := assetDeprecator{\n\t\tns: ns,\n\t\tc: c,\n\t}\n\tconst buildBatchSize = 16\n\terr := ad.batchProcessBuilds(buildBatchSize)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"build batch processing failed: %w\", err)\n\t}\n\treturn nil\n}\n\ntype assetDeprecator struct {\n\tns string\n\tc context.Context\n\tbugsQueried bool\n\trelevantBugs map[string]bool\n}\n\nconst keepAssetsForClosedBugs = time.Hour * 24 * 30\n\nfunc (ad *assetDeprecator) queryBugs() error {\n\tif ad.bugsQueried {\n\t\treturn nil\n\t}\n\tvar openBugKeys []*db.Key\n\tvar closedBugKeys []*db.Key\n\tg, _ := errgroup.WithContext(context.Background())\n\tg.Go(func() error {\n\t\t\/\/ Query open bugs.\n\t\tvar err error\n\t\topenBugKeys, err = db.NewQuery(\"Bug\").\n\t\t\tFilter(\"Namespace=\", ad.ns).\n\t\t\tFilter(\"Status=\", BugStatusOpen).\n\t\t\tKeysOnly().\n\t\t\tGetAll(ad.c, nil)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to fetch open builds: %w\", err)\n\t\t}\n\t\treturn nil\n\t})\n\tg.Go(func() error {\n\t\t\/\/ Query recently closed bugs.\n\t\tvar err error\n\t\tclosedBugKeys, err = db.NewQuery(\"Bug\").\n\t\t\tFilter(\"Namespace=\", ad.ns).\n\t\t\tFilter(\"Closed>\", timeNow(ad.c).Add(-keepAssetsForClosedBugs)).\n\t\t\tKeysOnly().\n\t\t\tGetAll(ad.c, nil)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to fetch closed builds: %w\", err)\n\t\t}\n\t\treturn nil\n\t})\n\terr := g.Wait()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to query bugs: %w\", err)\n\t}\n\tad.relevantBugs = map[string]bool{}\n\tfor _, key := range append(append([]*db.Key{}, openBugKeys...), closedBugKeys...) {\n\t\tad.relevantBugs[key.String()] = true\n\t}\n\treturn nil\n}\n\nfunc (ad *assetDeprecator) buildArchivePolicy(build *Build, asset *Asset) (bool, error) {\n\t\/\/ If the asset is reasonably new, we always keep it.\n\tconst alwaysKeepPeriod = time.Hour * 24 * 14\n\tif asset.CreateDate.After(timeNow(ad.c).Add(-alwaysKeepPeriod)) {\n\t\treturn true, nil\n\t}\n\t\/\/ Query builds to see whether there's a newer same-type asset on the same week.\n\tvar builds []*Build\n\t_, err := db.NewQuery(\"Build\").\n\t\tFilter(\"Namespace=\", ad.ns).\n\t\tFilter(\"Manager=\", build.Manager).\n\t\tFilter(\"Assets.Type=\", asset.Type).\n\t\tFilter(\"Assets.CreateDate>\", asset.CreateDate).\n\t\tLimit(1).\n\t\tOrder(\"Assets.CreateDate\").\n\t\tGetAll(ad.c, &builds)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to query newer assets: %w\", err)\n\t}\n\tlog.Infof(ad.c, \"running archive policy for %s, date %s; queried %d builds\",\n\t\tasset.DownloadURL, asset.CreateDate, len(builds))\n\tsameWeek := false\n\tif len(builds) > 0 {\n\t\torigY, origW := asset.CreateDate.ISOWeek()\n\t\tfor _, nextAsset := range builds[0].Assets {\n\t\t\tif nextAsset.Type != asset.Type {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif nextAsset.CreateDate.Before(asset.CreateDate) ||\n\t\t\t\tnextAsset.CreateDate.Equal(asset.CreateDate) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnextY, nextW := nextAsset.CreateDate.ISOWeek()\n\t\t\tif origY == nextY && origW == nextW {\n\t\t\t\tlog.Infof(ad.c, \"found a newer asset: %s, date %s\",\n\t\t\t\t\tnextAsset.DownloadURL, nextAsset.CreateDate)\n\t\t\t\tsameWeek = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn !sameWeek, nil\n}\n\nfunc (ad *assetDeprecator) buildBugStatusPolicy(build *Build) (bool, error) {\n\tif err := ad.queryBugs(); err != nil {\n\t\treturn false, fmt.Errorf(\"failed to query bugs: %w\", err)\n\t}\n\tkeys, err := db.NewQuery(\"Crash\").\n\t\tFilter(\"BuildID=\", build.ID).\n\t\tKeysOnly().\n\t\tGetAll(ad.c, nil)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to query crashes: %w\", err)\n\t}\n\tfor _, key := range keys {\n\t\tbugKey := key.Parent()\n\t\tif _, ok := ad.relevantBugs[bugKey.String()]; ok {\n\t\t\t\/\/ At least one crash is related to an opened\/recently closed bug.\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}\n\nfunc (ad *assetDeprecator) needThisBuildAsset(build *Build, buildAsset *Asset) (bool, error) {\n\tif buildAsset.Type == dashapi.HTMLCoverageReport {\n\t\t\/\/ We want to keep coverage reports forever, not just\n\t\t\/\/ while there are any open bugs. But we don't want to\n\t\t\/\/ keep all coverage reports, just a share of them.\n\t\treturn ad.buildArchivePolicy(build, buildAsset)\n\t}\n\tif build.Type == BuildNormal || build.Type == BuildFailed {\n\t\t\/\/ A build-related asset, keep it only while there are open bugs with crashes\n\t\t\/\/ related to this build.\n\t\treturn ad.buildBugStatusPolicy(build)\n\t}\n\t\/\/ TODO: fix this once this is no longer the case.\n\treturn false, fmt.Errorf(\"job-related assets are not supported yet\")\n}\n\nfunc (ad *assetDeprecator) updateBuild(buildID string, urlsToDelete []string) error {\n\ttoDelete := map[string]bool{}\n\tfor _, url := range urlsToDelete {\n\t\ttoDelete[url] = true\n\t}\n\ttx := func(c context.Context) error {\n\t\tbuild, err := loadBuild(ad.c, ad.ns, buildID)\n\t\tif build == nil || err != nil {\n\t\t\t\/\/ Assume the DB has been updated in the meanwhile.\n\t\t\treturn nil\n\t\t}\n\t\tnewAssets := []Asset{}\n\t\tfor _, asset := range build.Assets {\n\t\t\tif _, ok := toDelete[asset.DownloadURL]; !ok {\n\t\t\t\tnewAssets = append(newAssets, asset)\n\t\t\t}\n\t\t}\n\t\tbuild.Assets = newAssets\n\t\tbuild.AssetsLastCheck = timeNow(ad.c)\n\t\tif _, err := db.Put(ad.c, buildKey(ad.c, ad.ns, buildID), build); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to save build: %w\", err)\n\t\t}\n\t\treturn nil\n\t}\n\tif err := db.RunInTransaction(ad.c, tx, nil); err != nil {\n\t\treturn fmt.Errorf(\"failed to update build: %w\", err)\n\t}\n\treturn nil\n}\n\nfunc (ad *assetDeprecator) batchProcessBuilds(count int) error {\n\t\/\/ We cannot query only the Build with non-empty Assets array and yet sort\n\t\/\/ by AssetsLastCheck. The datastore returns \"The first sort property must\n\t\/\/ be the same as the property to which the inequality filter is applied.\n\t\/\/ In your query the first sort property is AssetsLastCheck but the inequality\n\t\/\/ filter is on Assets.DownloadURL.\n\t\/\/ So we have to omit Filter(\"Assets.DownloadURL>\", \"\"). here.\n\tvar builds []*Build\n\t_, err := db.NewQuery(\"Build\").\n\t\tFilter(\"Namespace=\", ad.ns).\n\t\tOrder(\"AssetsLastCheck\").\n\t\tLimit(count).\n\t\tGetAll(ad.c, &builds)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to fetch builds: %w\", err)\n\t}\n\tfor _, build := range builds {\n\t\ttoDelete := []string{}\n\t\tfor _, asset := range build.Assets {\n\t\t\tneeded, err := ad.needThisBuildAsset(build, &asset)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to test asset: %w\", err)\n\t\t\t} else if !needed {\n\t\t\t\ttoDelete = append(toDelete, asset.DownloadURL)\n\t\t\t}\n\t\t}\n\t\terr := ad.updateBuild(build.ID, toDelete)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc queryLatestManagerAssets(c context.Context, ns string, assetType dashapi.AssetType,\n\tperiod time.Duration) (map[string]Asset, error) {\n\tvar builds []*Build\n\tstartTime := timeNow(c).Add(-period)\n\t_, err := db.NewQuery(\"Build\").\n\t\tFilter(\"Namespace=\", ns).\n\t\tFilter(\"Assets.Type=\", assetType).\n\t\tFilter(\"Assets.CreateDate>\", startTime).\n\t\tOrder(\"Assets.CreateDate\").\n\t\tGetAll(c, &builds)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret := map[string]Asset{}\n\tfor _, build := range builds {\n\t\tfor _, asset := range build.Assets {\n\t\t\tif asset.Type != assetType {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tret[build.Manager] = asset\n\t\t}\n\t}\n\treturn ret, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016, Nick Patavalis (npat@efault.net).\n\/\/ All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style license that can\n\/\/ be found in the LICENSE.txt file.\n\n\/\/ Package task provides types and functions for managing tasks. Task\n\/\/ is a process performed by a goroutine, or a group of related\n\/\/ goroutines, that can be Kill'ed (stopped) and Wait'ed-for. In\n\/\/ addition, a helper type is provided for controlling the life-cycle\n\/\/ of servers (starting, stopping, waiting for, and restarting them).\npackage task\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"sync\"\n)\n\nvar (\n\tErrNotStarted = errors.New(\"Server not started\")\n)\n\n\/\/ Task is a process performed by a goroutine, or a group of related\n\/\/ goroutines, that can be Kill'ed (stopped) and Wait'ed-for. Starting\n\/\/ the task is beyond the scope of this interface. Specific\n\/\/ implementations (like the ones included in this package), provide\n\/\/ methods to start the respective tasks. All Task interface methods\n\/\/ can be safely called concurently from multiple goroutines.\ntype Task interface {\n\t\/\/ Kill requests that the task terminates as soon as\n\t\/\/ possible. Kill returns immediately and does not wait for\n\t\/\/ the task to terminate. It is ok to call Kill multiple\n\t\/\/ times. After the first, subsequent calls do nothing.\n\tKill() Task\n\t\/\/ Wait waits for the task to terminate, and returns its exit\n\t\/\/ status. It is ok to call Wait multiple times. If called\n\t\/\/ after the task has terminated, it returns its exit status\n\t\/\/ immediately.\n\tWait() error\n\t\/\/ WaitChan returns a channel that will be closed when the\n\t\/\/ task terminates. WaitChan does not wait for the task to\n\t\/\/ terminate. Receiving from the returned channel blocks until\n\t\/\/ it does. Once the receive from the returned channel\n\t\/\/ succeeds, Wait can be called to retrieve the task's exit\n\t\/\/ status.\n\tWaitChan() <-chan struct{}\n}\n\n\/\/ StartFunc is an entry-point function that can be run as a task. The\n\/\/ function must take a context.Context as an argument, which may be\n\/\/ canceled to request the task's termination.\ntype StartFunc func(context.Context) error\n\n\/\/ Single is used to start a signle goroutine as a task (a goroutine\n\/\/ that can be killed and waited-for). All Single methods can be\n\/\/ called concurently.\ntype Single struct {\n\tcancel func()\n\tend chan struct{}\n\terr error\n}\n\n\/\/ Go starts a task using the given function as entry point.\nfunc Go(f StartFunc) *Single {\n\treturn GoWithContext(context.Background(), f)\n}\n\n\/\/ GoWithContext is similar to Go, but uses ctx as the parent of\n\/\/ the context that will be used for the task's cancelation.\nfunc GoWithContext(ctx context.Context, f StartFunc) *Single {\n\ts := &Single{}\n\tctx, s.cancel = context.WithCancel(ctx)\n\ts.end = make(chan struct{})\n\tgo func() {\n\t\ts.err = f(ctx)\n\t\ts.cancel()\n\t\tclose(s.end)\n\t}()\n\treturn s\n}\n\n\/\/ Kill requests task s to quit. Kill returns imediatelly (does not\n\/\/ wait for the task to terminate).\nfunc (s *Single) Kill() Task {\n\ts.cancel()\n\treturn s\n}\n\n\/\/ Wait waits for task s to terminate and returns its exit status\n\/\/ (i.e. the return value of its entry-point StartFunc).\nfunc (s *Single) Wait() error {\n\t<-s.end\n\treturn s.err\n}\n\n\/\/ WaitChan returns a channel that will be closed when the task\n\/\/ terminates. After the receive from the returned channel suceeds,\n\/\/ Wait can be called to retrieve the task's exit status. Usefull for\n\/\/ select statements.\nfunc (s *Single) WaitChan() <-chan struct{} {\n\treturn s.end\n}\n\n\/\/ Grp is used to start a set (a group) of goroutines as a single\n\/\/ task. All goroutines share the same cancelation context and thus\n\/\/ can be killed and waited-for collectively.\ntype Grp struct {\n\tsync.Mutex\n\tctx context.Context\n\tcancel func()\n\twg sync.WaitGroup\n\tkillOnError bool\n\tend chan struct{}\n\terr error\n}\n\n\/\/ NewGrp creates and returns a new group. Grp implements the Task\n\/\/ interface. Initially the group is empty (no running\n\/\/ goroutines). Grp.Go must be subsequently called to start goroutines\n\/\/ in the group.\nfunc NewGrp() *Grp {\n\tg := &Grp{}\n\tg.ctx, g.cancel = context.WithCancel(context.Background())\n\treturn g\n}\n\n\/\/ NewGrpWithContext is similar to NewGrp, but uses ctx as the parent\n\/\/ of the context that will be used for the task's cancelation.\nfunc NewGrpWithContext(ctx context.Context) *Grp {\n\tg := &Grp{}\n\tg.ctx, g.cancel = context.WithCancel(ctx)\n\treturn g\n}\n\n\/\/ KillOnError enables the kill-on-error behavior for the group (by\n\/\/ default disabled). If enabled, the task is Kill'ed if one of it's\n\/\/ goroutines returns a non-nil error. Usually KillOnError is called\n\/\/ before starting the group's goroutines.\nfunc (g *Grp) KillOnError() *Grp {\n\tg.Lock()\n\tg.killOnError = true\n\tg.Unlock()\n\treturn g\n}\n\n\/\/ Go starts a goroutine in the group using the StartFunc f as an\n\/\/ entry point. Go can be called multiple times to start multiple\n\/\/ goroutines.\nfunc (g *Grp) Go(f StartFunc) *Grp {\n\tg.wg.Add(1)\n\tgo func() {\n\t\tdefer g.wg.Done()\n\t\tif err := f(g.ctx); err != nil {\n\t\t\tg.Lock()\n\t\t\tif g.err == nil {\n\t\t\t\t\/\/ keep first non-nil error\n\t\t\t\tg.err = err\n\t\t\t}\n\t\t\tif g.killOnError {\n\t\t\t\tg.cancel()\n\t\t\t}\n\t\t\tg.Unlock()\n\t\t}\n\t}()\n\treturn g\n}\n\n\/\/ Kill requests task g to quit (signals all its goroutines to\n\/\/ terminate). Kill returns imediatelly (does not wait for the task to\n\/\/ terminate).\nfunc (g *Grp) Kill() Task {\n\tg.cancel()\n\treturn g\n}\n\n\/\/ Wait waits for task g to terminate and returns its exit-status. The\n\/\/ task terminates when all it's goroutines exit. As exit status of\n\/\/ the task is considered the first non-nil value returned by the\n\/\/ StartFunc of one of it's goroutines.\nfunc (g *Grp) Wait() error {\n\tg.wg.Wait()\n\tg.cancel()\n\treturn g.err\n}\n\n\/\/ WaitChan returns a channel that will be closed when task g\n\/\/ terminates.\nfunc (g *Grp) WaitChan() <-chan struct{} {\n\tg.Lock()\n\tdefer g.Unlock()\n\tif g.end == nil {\n\t\tg.end = make(chan struct{})\n\t\tgo func() {\n\t\t\tg.Wait()\n\t\t\tclose(g.end)\n\t\t}()\n\t}\n\treturn g.end\n}\n\n\/\/ SrvCtl is a server controller. It is a helper type that povides\n\/\/ methods for starting, stopping, waiting, and re-starting\n\/\/ server-instances in a convenient, race-free manner. A pointer to\n\/\/ SrvCtl can be embedded (either anonymoysly or not) in the\n\/\/ controlled server type. SrvCtl is initialized (via NewSrvCtl) by\n\/\/ two functions (most likely method-values): One is spawned as the\n\/\/ sever-task (goroutine) when Start is called; the other (which may\n\/\/ be nil) is called immediately before. It is safe to call all SrvCtl\n\/\/ methods concurently. See example for details.\n\/\/\ntype SrvCtl struct {\n\tm sync.Mutex\n\tt *Single\n\tfnStart StartFunc \/\/ func(context.Context) error\n\tfnPre func()\n}\n\n\/\/ NewSrvCtl returns (a pointer to) an initialized server\n\/\/ controller. The returned pointer can be embedded (either\n\/\/ anonymously, or as a named field) in the controlled server type \/\n\/\/ structure. It is intialized with two functions (most likely\n\/\/ method-values): fnStart is the function that will be spawned as the\n\/\/ server task (goroutine). fnPre is a function to be called before\n\/\/ spawning the server goroutine. fnPre may be nil.\nfunc NewSrvCtl(fnStart StartFunc, fnPre func()) *SrvCtl {\n\treturn &SrvCtl{fnStart: fnStart, fnPre: fnPre}\n}\n\n\/\/ task is a helper that reads and returns a pointer to the\n\/\/ task-structure atomically\nfunc (sc *SrvCtl) task() *Single {\n\tsc.m.Lock()\n\tt := sc.t\n\tsc.m.Unlock()\n\treturn t\n}\n\n\/\/ Start starts the controlled server instance as a task (i.e in its\n\/\/ own goroutine). If already running, it stops it, waits for it to\n\/\/ terminate, and re-starts it.\nfunc (sc *SrvCtl) Start() Task {\n\tt := sc.task()\n\tif t != nil {\n\t\tt.Kill().Wait()\n\t}\n\tsc.m.Lock()\n\tdefer sc.m.Unlock()\n\tif sc.t != t {\n\t\t\/\/ Concurrent re-start.\n\t\t\/\/ Another racer won.\n\t\treturn sc\n\t}\n\tif sc.fnPre != nil {\n\t\tsc.fnPre()\n\t}\n\tsc.t = Go(sc.fnStart)\n\treturn sc\n}\n\n\/\/ Kill requests the termination of the controlled server instance. It\n\/\/ does not wait for the server to terminate.\nfunc (sc *SrvCtl) Kill() Task {\n\tt := sc.task()\n\tif t == nil {\n\t\treturn sc\n\t}\n\tt.Kill()\n\treturn sc\n}\n\n\/\/ Wait waits for the controlled server to terminate and returns it's\n\/\/ exit code (the return-value of the fnStart function, see\n\/\/ NewSrvCtl). If the managed server has never been started, returns\n\/\/ ErrNotStarted.\nfunc (sc *SrvCtl) Wait() error {\n\tt := sc.task()\n\tif t == nil {\n\t\treturn ErrNotStarted\n\t}\n\treturn t.Wait()\n}\n\n\/\/ WaitChan returns a channel from which the managed server's exit\n\/\/ code (the server's Serve return value) can be received. WaitChan\n\/\/ does not wait for the server to terminate. Reading from the channel\n\/\/ blocks the reader until it does. If the server has *never* been\n\/\/ started, a receive from the returned channel will yield\n\/\/ ErrNotStarted.\nfunc (sc *SrvCtl) WaitChan() <-chan struct{} {\n\tt := sc.task()\n\tif t == nil {\n\t\tcerr := make(chan struct{})\n\t\tclose(cerr)\n\t\treturn cerr\n\t}\n\treturn t.WaitChan()\n}\n<commit_msg>Comments<commit_after>\/\/ Copyright (c) 2016, Nick Patavalis (npat@efault.net).\n\/\/ All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style license that can\n\/\/ be found in the LICENSE.txt file.\n\n\/\/ Package task provides types and functions for managing tasks. Task\n\/\/ is a process performed by a goroutine, or a group of related\n\/\/ goroutines, that can be Kill'ed (stopped) and Wait'ed-for. In\n\/\/ addition, a helper type is provided for controlling the life-cycle\n\/\/ of servers (starting, stopping, waiting for, and restarting them).\npackage task\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"sync\"\n)\n\nvar (\n\tErrNotStarted = errors.New(\"Server not started\")\n)\n\n\/\/ Task is a process performed by a goroutine, or a group of related\n\/\/ goroutines, that can be Kill'ed (stopped) and Wait'ed-for. Starting\n\/\/ the task is beyond the scope of this interface. Specific\n\/\/ implementations (like the ones included in this package), provide\n\/\/ methods to start the respective tasks. All Task interface methods\n\/\/ can be safely called concurently from multiple goroutines.\ntype Task interface {\n\t\/\/ Kill requests that the task terminates as soon as\n\t\/\/ possible. Kill returns immediately and does not wait for\n\t\/\/ the task to terminate. It is ok to call Kill multiple\n\t\/\/ times. After the first, subsequent calls do nothing.\n\tKill() Task\n\t\/\/ Wait waits for the task to terminate, and returns its exit\n\t\/\/ status. It is ok to call Wait multiple times. If called\n\t\/\/ after the task has terminated, it returns its exit status\n\t\/\/ immediately.\n\tWait() error\n\t\/\/ WaitChan returns a channel that will be closed when the\n\t\/\/ task terminates. WaitChan does not wait for the task to\n\t\/\/ terminate. Receiving from the returned channel blocks until\n\t\/\/ it does. Once the receive from the returned channel\n\t\/\/ succeeds, Wait can be called to retrieve the task's exit\n\t\/\/ status.\n\tWaitChan() <-chan struct{}\n}\n\n\/\/ StartFunc is an entry-point function that can be run as a task. The\n\/\/ function must take a context.Context as an argument, which may be\n\/\/ canceled to request the task's termination.\ntype StartFunc func(context.Context) error\n\n\/\/ Single is used to start a signle goroutine as a task (a goroutine\n\/\/ that can be killed and waited-for). All Single methods can be\n\/\/ called concurently.\ntype Single struct {\n\tcancel func()\n\tend chan struct{}\n\terr error\n}\n\n\/\/ Go starts a task using the given function as entry point.\nfunc Go(f StartFunc) *Single {\n\treturn GoWithContext(context.Background(), f)\n}\n\n\/\/ GoWithContext is similar to Go, but uses ctx as the parent of\n\/\/ the context that will be used for the task's cancelation.\nfunc GoWithContext(ctx context.Context, f StartFunc) *Single {\n\ts := &Single{}\n\tctx, s.cancel = context.WithCancel(ctx)\n\ts.end = make(chan struct{})\n\tgo func() {\n\t\ts.err = f(ctx)\n\t\ts.cancel()\n\t\tclose(s.end)\n\t}()\n\treturn s\n}\n\n\/\/ Kill requests task s to quit. Kill returns imediatelly (does not\n\/\/ wait for the task to terminate).\nfunc (s *Single) Kill() Task {\n\ts.cancel()\n\treturn s\n}\n\n\/\/ Wait waits for task s to terminate and returns its exit status\n\/\/ (i.e. the return value of its entry-point StartFunc).\nfunc (s *Single) Wait() error {\n\t<-s.end\n\treturn s.err\n}\n\n\/\/ WaitChan returns a channel that will be closed when the task\n\/\/ terminates. After the receive from the returned channel suceeds,\n\/\/ Wait can be called to retrieve the task's exit status. Usefull for\n\/\/ select statements.\nfunc (s *Single) WaitChan() <-chan struct{} {\n\treturn s.end\n}\n\n\/\/ Grp is used to start a set (a group) of goroutines as a single\n\/\/ task. All goroutines share the same cancelation context and thus\n\/\/ can be killed and waited-for collectively.\ntype Grp struct {\n\tsync.Mutex\n\tctx context.Context\n\tcancel func()\n\twg sync.WaitGroup\n\tkillOnError bool\n\tend chan struct{}\n\terr error\n}\n\n\/\/ NewGrp creates and returns a new group. Grp implements the Task\n\/\/ interface. Initially the group is empty (no running\n\/\/ goroutines). Grp.Go must be subsequently called to start goroutines\n\/\/ in the group.\nfunc NewGrp() *Grp {\n\tg := &Grp{}\n\tg.ctx, g.cancel = context.WithCancel(context.Background())\n\treturn g\n}\n\n\/\/ NewGrpWithContext is similar to NewGrp, but uses ctx as the parent\n\/\/ of the context that will be used for the task's cancelation.\nfunc NewGrpWithContext(ctx context.Context) *Grp {\n\tg := &Grp{}\n\tg.ctx, g.cancel = context.WithCancel(ctx)\n\treturn g\n}\n\n\/\/ KillOnError enables the kill-on-error behavior for the group (by\n\/\/ default disabled). If enabled, the task is Kill'ed if one of it's\n\/\/ goroutines returns a non-nil error. Usually KillOnError is called\n\/\/ before starting the group's goroutines.\nfunc (g *Grp) KillOnError() *Grp {\n\tg.Lock()\n\tg.killOnError = true\n\tg.Unlock()\n\treturn g\n}\n\n\/\/ Go starts a goroutine in the group using the StartFunc f as an\n\/\/ entry point. Go can be called multiple times to start multiple\n\/\/ goroutines.\nfunc (g *Grp) Go(f StartFunc) *Grp {\n\tg.wg.Add(1)\n\tgo func() {\n\t\tdefer g.wg.Done()\n\t\tif err := f(g.ctx); err != nil {\n\t\t\tg.Lock()\n\t\t\tif g.err == nil {\n\t\t\t\t\/\/ keep first non-nil error\n\t\t\t\tg.err = err\n\t\t\t}\n\t\t\tif g.killOnError {\n\t\t\t\tg.cancel()\n\t\t\t}\n\t\t\tg.Unlock()\n\t\t}\n\t}()\n\treturn g\n}\n\n\/\/ Kill requests task g to quit (signals all its goroutines to\n\/\/ terminate). Kill returns imediatelly (does not wait for the task to\n\/\/ terminate).\nfunc (g *Grp) Kill() Task {\n\tg.cancel()\n\treturn g\n}\n\n\/\/ Wait waits for task g to terminate and returns its exit-status. The\n\/\/ task terminates when all it's goroutines exit. As exit status of\n\/\/ the task is considered the first non-nil value returned by the\n\/\/ StartFunc of one of it's goroutines.\nfunc (g *Grp) Wait() error {\n\tg.wg.Wait()\n\tg.cancel()\n\treturn g.err\n}\n\n\/\/ WaitChan returns a channel that will be closed when task g\n\/\/ terminates.\nfunc (g *Grp) WaitChan() <-chan struct{} {\n\tg.Lock()\n\tdefer g.Unlock()\n\tif g.end == nil {\n\t\tg.end = make(chan struct{})\n\t\tgo func() {\n\t\t\tg.Wait()\n\t\t\tclose(g.end)\n\t\t}()\n\t}\n\treturn g.end\n}\n\n\/\/ SrvCtl is a server controller. It is a helper type that povides\n\/\/ methods for starting, stopping, waiting, and re-starting\n\/\/ server-instances in a convenient, race-free manner. A pointer to\n\/\/ SrvCtl can be embedded (either anonymoysly or not) in the\n\/\/ controlled server type. SrvCtl is initialized (via NewSrvCtl) by\n\/\/ two functions (most likely method-values): One is spawned as the\n\/\/ sever-task (goroutine) when Start is called; the other (which may\n\/\/ be nil) is called immediately before. It is safe to call all SrvCtl\n\/\/ methods concurently. See example for details.\n\/\/\ntype SrvCtl struct {\n\tm sync.Mutex\n\tt *Single\n\tfnStart StartFunc \/\/ func(context.Context) error\n\tfnPre func()\n}\n\n\/\/ NewSrvCtl returns (a pointer to) an initialized server\n\/\/ controller. The returned pointer can be embedded (either\n\/\/ anonymously, or as a named field) in the controlled server type \/\n\/\/ structure. It is intialized with two functions (most likely\n\/\/ method-values): fnStart is the function that will be spawned as the\n\/\/ server task (goroutine). fnPre is a function to be called before\n\/\/ spawning the server goroutine. fnPre may be nil.\nfunc NewSrvCtl(fnStart StartFunc, fnPre func()) *SrvCtl {\n\treturn &SrvCtl{fnStart: fnStart, fnPre: fnPre}\n}\n\n\/\/ task is a helper that reads and returns a pointer to the\n\/\/ task-structure atomically\nfunc (sc *SrvCtl) task() *Single {\n\tsc.m.Lock()\n\tt := sc.t\n\tsc.m.Unlock()\n\treturn t\n}\n\n\/\/ Start starts the controlled server instance as a task (i.e in its\n\/\/ own goroutine). If already running, it stops it, waits for it to\n\/\/ terminate, and re-starts it.\nfunc (sc *SrvCtl) Start() Task {\n\tt := sc.task()\n\tif t != nil {\n\t\tt.Kill().Wait()\n\t}\n\tsc.m.Lock()\n\tdefer sc.m.Unlock()\n\tif sc.t != t {\n\t\t\/\/ Concurrent re-start.\n\t\t\/\/ Another racer won.\n\t\treturn sc\n\t}\n\tif sc.fnPre != nil {\n\t\tsc.fnPre()\n\t}\n\tsc.t = Go(sc.fnStart)\n\treturn sc\n}\n\n\/\/ Kill requests the termination of the controlled server instance. It\n\/\/ does not wait for the server to terminate.\nfunc (sc *SrvCtl) Kill() Task {\n\tt := sc.task()\n\tif t == nil {\n\t\treturn sc\n\t}\n\tt.Kill()\n\treturn sc\n}\n\n\/\/ Wait waits for the controlled server to terminate and returns it's\n\/\/ exit code (the return-value of the fnStart function, see\n\/\/ NewSrvCtl). If the managed server has never been started, returns\n\/\/ ErrNotStarted.\nfunc (sc *SrvCtl) Wait() error {\n\tt := sc.task()\n\tif t == nil {\n\t\treturn ErrNotStarted\n\t}\n\treturn t.Wait()\n}\n\n\/\/ WaitChan returns a channel that will be closed when the\n\/\/ controlled-server terminates. After the receive from the returned\n\/\/ channel suceeds, Wait can be called to retrieve the server's exit\n\/\/ status. Usefull for select statements.\nfunc (sc *SrvCtl) WaitChan() <-chan struct{} {\n\tt := sc.task()\n\tif t == nil {\n\t\tcerr := make(chan struct{})\n\t\tclose(cerr)\n\t\treturn cerr\n\t}\n\treturn t.WaitChan()\n}\n<|endoftext|>"} {"text":"<commit_before>package eval\n\nimport (\n\t\"os\"\n\t\"sync\"\n)\n\nconst (\n\tstdoutChanSize = 32\n\tstderrChanSize = 32\n)\n\ntype evalerPorts struct {\n\tports [3]*Port\n\trelayeWait *sync.WaitGroup\n}\n\nfunc newEvalerPorts(stdin, stdout, stderr *os.File, prefix *string) evalerPorts {\n\tstdoutChan := make(chan Value, stdoutChanSize)\n\tstderrChan := make(chan Value, stderrChanSize)\n\n\tvar relayerWait sync.WaitGroup\n\trelayerWait.Add(2)\n\tgo relayChanToFile(stdoutChan, stdout, prefix, &relayerWait)\n\tgo relayChanToFile(stderrChan, stderr, prefix, &relayerWait)\n\n\treturn evalerPorts{\n\t\t[3]*Port{\n\t\t\t&Port{File: stdin, Chan: ClosedChan},\n\t\t\t&Port{File: stdout, Chan: stdoutChan, CloseChan: true},\n\t\t\t&Port{File: stderr, Chan: stderrChan, CloseChan: true},\n\t\t},\n\t\t&relayerWait,\n\t}\n}\n\nfunc relayChanToFile(ch <-chan Value, file *os.File, prefix *string, w *sync.WaitGroup) {\n\tfor v := range ch {\n\t\tfile.WriteString(*prefix)\n\t\tfile.WriteString(v.Repr(initIndent))\n\t\tfile.WriteString(\"\\n\")\n\t}\n\tw.Done()\n}\n\nfunc (ep *evalerPorts) close() {\n\tep.ports[1].Close()\n\tep.ports[2].Close()\n\tep.relayeWait.Wait()\n}\n<commit_msg>gofmt -s<commit_after>package eval\n\nimport (\n\t\"os\"\n\t\"sync\"\n)\n\nconst (\n\tstdoutChanSize = 32\n\tstderrChanSize = 32\n)\n\ntype evalerPorts struct {\n\tports [3]*Port\n\trelayeWait *sync.WaitGroup\n}\n\nfunc newEvalerPorts(stdin, stdout, stderr *os.File, prefix *string) evalerPorts {\n\tstdoutChan := make(chan Value, stdoutChanSize)\n\tstderrChan := make(chan Value, stderrChanSize)\n\n\tvar relayerWait sync.WaitGroup\n\trelayerWait.Add(2)\n\tgo relayChanToFile(stdoutChan, stdout, prefix, &relayerWait)\n\tgo relayChanToFile(stderrChan, stderr, prefix, &relayerWait)\n\n\treturn evalerPorts{\n\t\t[3]*Port{\n\t\t\t{File: stdin, Chan: ClosedChan},\n\t\t\t{File: stdout, Chan: stdoutChan, CloseChan: true},\n\t\t\t{File: stderr, Chan: stderrChan, CloseChan: true},\n\t\t},\n\t\t&relayerWait,\n\t}\n}\n\nfunc relayChanToFile(ch <-chan Value, file *os.File, prefix *string, w *sync.WaitGroup) {\n\tfor v := range ch {\n\t\tfile.WriteString(*prefix)\n\t\tfile.WriteString(v.Repr(initIndent))\n\t\tfile.WriteString(\"\\n\")\n\t}\n\tw.Done()\n}\n\nfunc (ep *evalerPorts) close() {\n\tep.ports[1].Close()\n\tep.ports[2].Close()\n\tep.relayeWait.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>package evaluation\n\nimport \"fmt\"\n\nfunc Err(ctx *Context, msg, tag string, fmts ...interface{}) Object {\n\tmsg = fmt.Sprintf(msg, fmts...)\n\n\te := &Instance{\n\t\tBase: ctx.Get(\"Error\"),\n\t}\n\n\tif e.Base == nil {\n\t\tpanic(\"Since the prelude isn't loaded, errors cannot be thrown!\")\n\t}\n\n\te.Data = map[string]Object{\n\t\t\"tag\": &String{Value: tag},\n\t\t\"msg\": &String{Value: msg},\n\t}\n\n\treturn e\n}\n\nfunc IsErr(o Object) bool {\n\tif instance, ok := o.(*Instance); ok {\n\t\treturn instance.Base.(*Class).Name == \"Error\"\n\t}\n\n\treturn false\n}\n<commit_msg>Add comments to error functions<commit_after>package evaluation\n\nimport \"fmt\"\n\n\/\/ Err returns an error object with the message and tag provided\nfunc Err(ctx *Context, msg, tag string, fmts ...interface{}) Object {\n\tmsg = fmt.Sprintf(msg, fmts...)\n\n\te := &Instance{\n\t\tBase: ctx.Get(\"Error\"),\n\t}\n\n\tif e.Base == nil {\n\t\tpanic(\"Since the prelude isn't loaded, errors cannot be thrown!\")\n\t}\n\n\te.Data = map[string]Object{\n\t\t\"tag\": &String{Value: tag},\n\t\t\"msg\": &String{Value: msg},\n\t}\n\n\treturn e\n}\n\n\/\/ IsErr checks if an object is an instance of Error\nfunc IsErr(o Object) bool {\n\tif instance, ok := o.(*Instance); ok {\n\t\treturn instance.Base.(*Class).Name == \"Error\"\n\t}\n\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package aeutil\n\nimport (\n\t\"net\/http\"\n\t\"appengine\"\n)\n\nvar (\n\t\/\/ To be set by an external environment, if it wants control over context creation\n\tContextProvider func(*http.Request) appengine.Context\n)\n\nfunc init() {\n\tContextProvider = func(r *http.Request) appengine.Context {\n\t\treturn appengine.NewContext(r)\n\t}\n}\n\nfunc UncurryWithContext(handler func(http.ResponseWriter, *http.Request, appengine.Context)) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\thandler(w, r, ContextProvider(r))\n\t}\n}\n<commit_msg>more GAE work<commit_after>package aeutil\n\nimport (\n\t\"net\/http\"\n\t\"appengine\"\n\t\"appengine\/aetest\"\n)\n\nvar (\n\t\/\/ To be set by an external environment, if it wants control over context creation\n\tContextProvider func(*http.Request) appengine.Context\n)\n\nfunc init() {\n\tContextProvider = func(r *http.Request) appengine.Context {\n\t\treturn appengine.NewContext(r)\n\t}\n}\n\nfunc UncurryWithContext(handler func(http.ResponseWriter, *http.Request, appengine.Context)) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\thandler(w, r, ContextProvider(r))\n\t}\n}\n\n\/**\n * The Context returned from this function is registered with the current ContextProvider,\n * so calls to UncurryWithContext will resolve to this context.\n *\/\nfunc CreateAndRegisterTestContext() (context aetest.Context, err error) {\n\tcontext, err = aetest.NewContext(nil)\n\n\t\/\/ TODO change this function to maintain and consult a map, so we can\n\t\/\/ make multiple requests in parallel \/ in a recursive fashion\n\tContextProvider = func(*http.Request) appengine.Context { return context }\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package faker\n\nimport \"github.com\/raitucarp\/faker\/locales\"\n\ntype Gender int\n\nconst (\n\tMale Gender = iota\n\tFemale\n\tRandomGender\n)\n\nvar gender Gender\n\ntype Faker struct {\n\tName Name\n\tAddress Address\n\tCommerce Commerce\n\tCompany Company\n\tDate Date\n\tFinance Finance\n\tHacker Hacker\n\tImage Image\n\tInternet Internet\n\tLorem Lorem\n\n\t\/\/ temp data handling\n\tdata interface{}\n}\n\ntype Definitions map[string]interface{}\n\nvar data interface{}\n\nfunc (f *Faker) generate() {\n\terr := SetLocale(locales.EN)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc New() (f Faker) {\n\tf.generate()\n\n\treturn\n}\n\n\/*\nfunc SetGender(g Gender) {\n\tgender = g\n}*\/\n<commit_msg>add phone field<commit_after>package faker\n\nimport \"github.com\/raitucarp\/faker\/locales\"\n\ntype Gender int\n\nconst (\n\tMale Gender = iota\n\tFemale\n\tRandomGender\n)\n\nvar gender Gender\n\ntype Faker struct {\n\tName Name\n\tAddress Address\n\tCommerce Commerce\n\tCompany Company\n\tDate Date\n\tFinance Finance\n\tHacker Hacker\n\tImage Image\n\tInternet Internet\n\tLorem Lorem\n\tPhone Phone\n\t\/\/ temp data handling\n\tdata interface{}\n}\n\ntype Definitions map[string]interface{}\n\nvar data interface{}\n\nfunc (f *Faker) generate() {\n\terr := SetLocale(locales.EN)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc New() (f Faker) {\n\tf.generate()\n\n\treturn\n}\n\n\/*\nfunc SetGender(g Gender) {\n\tgender = g\n}*\/\n<|endoftext|>"} {"text":"<commit_before>package goinsta\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n)\n\n\/\/ Feed is the object for all feed endpoints.\ntype Feed struct {\n\tinst *Instagram\n}\n\n\/\/ newFeed creates new Feed structure\nfunc newFeed(inst *Instagram) *Feed {\n\treturn &Feed{\n\t\tinst: inst,\n\t}\n}\n\n\/\/ Feed search by locationID\nfunc (feed *Feed) LocationID(locationID int64) (*FeedLocation, error) {\n\tinsta := feed.inst\n\tbody, err := insta.sendRequest(\n\t\t&reqOptions{\n\t\t\tEndpoint: fmt.Sprintf(urlFeedLocationID, locationID),\n\t\t\tQuery: map[string]string{\n\t\t\t\t\"rank_token\": insta.rankToken,\n\t\t\t\t\"ranked_content\": \"true\",\n\t\t\t},\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres := &FeedLocation{}\n\terr = json.Unmarshal(body, res)\n\treturn res, err\n}\n\n\/\/ FeedLocation is the struct that fits the structure returned by instagram on LocationID search.\ntype FeedLocation struct {\n\tRankedItems []Item `json:\"ranked_items\"`\n\tItems []Item `json:\"items\"`\n\tNumResults int `json:\"num_results\"`\n\tNextID string `json:\"next_max_id\"`\n\tMoreAvailable bool `json:\"more_available\"`\n\tAutoLoadMoreEnabled bool `json:\"auto_load_more_enabled\"`\n\tMediaCount int `json:\"media_count\"`\n\tLocation Location `json:\"location\"`\n\tStatus string `json:\"status\"`\n}\n\n\/\/ Tags search by Tag in user Feed\n\/\/\n\/\/ (sorry for returning FeedTag. See #FeedTag)\nfunc (feed *Feed) Tags(tag string) (*FeedTag, error) {\n\tinsta := feed.inst\n\tbody, err := insta.sendRequest(\n\t\t&reqOptions{\n\t\t\tEndpoint: fmt.Sprintf(urlFeedTag, tag),\n\t\t\tQuery: map[string]string{\n\t\t\t\t\"rank_token\": insta.rankToken,\n\t\t\t\t\"ranked_content\": \"true\",\n\t\t\t},\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres := &FeedTag{}\n\terr = json.Unmarshal(body, res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := range res.RankedItems {\n\t\tres.RankedItems[i].media = &FeedMedia{\n\t\t\tinst: insta,\n\t\t\tNextID: res.RankedItems[i].ID,\n\t\t}\n\t}\n\treturn res, nil\n}\n\n\/\/ FeedTag is the struct that fits the structure returned by instagram on TagSearch.\ntype FeedTag struct {\n\tRankedItems []Item `json:\"ranked_items\"`\n\tImages []Item `json:\"items\"`\n\tNumResults int `json:\"num_results\"`\n\tNextID string `json:\"next_max_id\"`\n\tMoreAvailable bool `json:\"more_available\"`\n\tAutoLoadMoreEnabled bool `json:\"auto_load_more_enabled\"`\n\tStory StoryMedia `json:\"story\"`\n\tStatus string `json:\"status\"`\n}\n<commit_msg>Assign missing value to Image's media attribute (#252)<commit_after>package goinsta\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n)\n\n\/\/ Feed is the object for all feed endpoints.\ntype Feed struct {\n\tinst *Instagram\n}\n\n\/\/ newFeed creates new Feed structure\nfunc newFeed(inst *Instagram) *Feed {\n\treturn &Feed{\n\t\tinst: inst,\n\t}\n}\n\n\/\/ Feed search by locationID\nfunc (feed *Feed) LocationID(locationID int64) (*FeedLocation, error) {\n\tinsta := feed.inst\n\tbody, err := insta.sendRequest(\n\t\t&reqOptions{\n\t\t\tEndpoint: fmt.Sprintf(urlFeedLocationID, locationID),\n\t\t\tQuery: map[string]string{\n\t\t\t\t\"rank_token\": insta.rankToken,\n\t\t\t\t\"ranked_content\": \"true\",\n\t\t\t},\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres := &FeedLocation{}\n\terr = json.Unmarshal(body, res)\n\treturn res, err\n}\n\n\/\/ FeedLocation is the struct that fits the structure returned by instagram on LocationID search.\ntype FeedLocation struct {\n\tRankedItems []Item `json:\"ranked_items\"`\n\tItems []Item `json:\"items\"`\n\tNumResults int `json:\"num_results\"`\n\tNextID string `json:\"next_max_id\"`\n\tMoreAvailable bool `json:\"more_available\"`\n\tAutoLoadMoreEnabled bool `json:\"auto_load_more_enabled\"`\n\tMediaCount int `json:\"media_count\"`\n\tLocation Location `json:\"location\"`\n\tStatus string `json:\"status\"`\n}\n\n\/\/ Tags search by Tag in user Feed\n\/\/\n\/\/ (sorry for returning FeedTag. See #FeedTag)\nfunc (feed *Feed) Tags(tag string) (*FeedTag, error) {\n\tinsta := feed.inst\n\tbody, err := insta.sendRequest(\n\t\t&reqOptions{\n\t\t\tEndpoint: fmt.Sprintf(urlFeedTag, tag),\n\t\t\tQuery: map[string]string{\n\t\t\t\t\"rank_token\": insta.rankToken,\n\t\t\t\t\"ranked_content\": \"true\",\n\t\t\t},\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres := &FeedTag{}\n\terr = json.Unmarshal(body, res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := range res.RankedItems {\n\t\tres.RankedItems[i].media = &FeedMedia{\n\t\t\tinst: insta,\n\t\t\tNextID: res.RankedItems[i].ID,\n\t\t}\n\t}\n\n\tfor i := range res.Images {\n\t\tres.Images[i].media = &FeedMedia{\n\t\t\tinst: insta,\n\t\t\tNextID: res.Images[i].ID,\n\t\t}\n\t}\n\n\treturn res, nil\n}\n\n\/\/ FeedTag is the struct that fits the structure returned by instagram on TagSearch.\ntype FeedTag struct {\n\tRankedItems []Item `json:\"ranked_items\"`\n\tImages []Item `json:\"items\"`\n\tNumResults int `json:\"num_results\"`\n\tNextID string `json:\"next_max_id\"`\n\tMoreAvailable bool `json:\"more_available\"`\n\tAutoLoadMoreEnabled bool `json:\"auto_load_more_enabled\"`\n\tStory StoryMedia `json:\"story\"`\n\tStatus string `json:\"status\"`\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Feeds.go contain HTTP routes for rendering RSS and Atom feeds.\npackage main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t\"github.com\/gorilla\/feeds\"\n\t\"github.com\/jinzhu\/gorm\"\n\t_ \"github.com\/lib\/pq\"\n\t\"github.com\/martini-contrib\/render\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n)\n\n\/\/ ReadFeed renders RSS or Atom feed of latest published posts.\n\/\/ It determines the feed type with strings.Split(r.URL.Path[1:], \"\/\")[1].\nfunc ReadFeed(w http.ResponseWriter, res render.Render, db *gorm.DB, r *http.Request) {\n\n\tw.Header().Set(\"Content-Type\", \"application\/xml\")\n\n\turlhost := urlHost()\n\n\tfeed := &feeds.Feed{\n\t\tTitle: Settings.Name,\n\t\tLink: &feeds.Link{Href: urlhost},\n\t\tDescription: Settings.Description,\n\t}\n\n\tvar post Post\n\tposts, err := post.GetAll(db)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tres.JSON(500, map[string]interface{}{\"error\": \"Internal server error\"})\n\t\treturn\n\t}\n\n\tfor _, post := range posts {\n\n\t\tvar user User\n\t\tuser.ID = post.Author\n\t\tuser, err := user.Get(db)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tres.JSON(500, map[string]interface{}{\"error\": \"Internal server error\"})\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ The email in &feeds.Author is not actually exported, as it is left out by user.Get().\n\t\t\/\/ However, the package panics if too few values are exported, so that will do.\n\t\n\t\titem := &feeds.Item{\n\t\t\tTitle: post.Title,\n\t\t\tLink: &feeds.Link{Href: urlhost + post.Slug},\n\t\t\tDescription: post.Excerpt,\n\t\t\tAuthor: &feeds.Author{user.Name, user.Email},\n\t\t\tCreated: time.Unix(post.Date, 0),\n\t\t}\n\t\tfeed.Items = append(feed.Items, item)\n\n\t}\n\n\t\/\/ Default to RSS feed.\n\tresult, err := feed.ToRss()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tres.JSON(500, map[string]interface{}{\"error\": \"Internal server error\"})\n\t\treturn\n\t}\n\n\tformat := strings.Split(r.URL.Path[1:], \"\/\")[1]\n\tif format == \"atom\" {\n\t\tresult, err = feed.ToAtom()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tres.JSON(500, map[string]interface{}{\"error\": \"Internal server error\"})\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.Write([]byte(result))\n}\n<commit_msg>updated feeds.go to prevent exposure of unpublished items<commit_after>\/\/ Feeds.go contain HTTP routes for rendering RSS and Atom feeds.\npackage main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t\"github.com\/gorilla\/feeds\"\n\t\"github.com\/jinzhu\/gorm\"\n\t_ \"github.com\/lib\/pq\"\n\t\"github.com\/martini-contrib\/render\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n)\n\n\/\/ ReadFeed renders RSS or Atom feed of latest published posts.\n\/\/ It determines the feed type with strings.Split(r.URL.Path[1:], \"\/\")[1].\nfunc ReadFeed(w http.ResponseWriter, res render.Render, db *gorm.DB, r *http.Request) {\n\n\tw.Header().Set(\"Content-Type\", \"application\/xml\")\n\n\turlhost := urlHost()\n\n\tfeed := &feeds.Feed{\n\t\tTitle: Settings.Name,\n\t\tLink: &feeds.Link{Href: urlhost},\n\t\tDescription: Settings.Description,\n\t}\n\n\tvar post Post\n\tposts, err := post.GetAll(db)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tres.JSON(500, map[string]interface{}{\"error\": \"Internal server error\"})\n\t\treturn\n\t}\n\n\tfor _, post := range posts {\n\n\t\tvar user User\n\t\tuser.ID = post.Author\n\t\tuser, err := user.Get(db)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tres.JSON(500, map[string]interface{}{\"error\": \"Internal server error\"})\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Don't expose unpublished items to the feeds\n\t\tif !post.Published {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ The email in &feeds.Author is not actually exported, as it is left out by user.Get().\n\t\t\/\/ However, the package panics if too few values are exported, so that will do.\n\t\titem := &feeds.Item{\n\t\t\tTitle: post.Title,\n\t\t\tLink: &feeds.Link{Href: urlhost + post.Slug},\n\t\t\tDescription: post.Excerpt,\n\t\t\tAuthor: &feeds.Author{user.Name, user.Email},\n\t\t\tCreated: time.Unix(post.Date, 0),\n\t\t}\n\t\tfeed.Items = append(feed.Items, item)\n\n\t}\n\n\t\/\/ Default to RSS feed.\n\tresult, err := feed.ToRss()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tres.JSON(500, map[string]interface{}{\"error\": \"Internal server error\"})\n\t\treturn\n\t}\n\n\tformat := strings.Split(r.URL.Path[1:], \"\/\")[1]\n\tif format == \"atom\" {\n\t\tresult, err = feed.ToAtom()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tres.JSON(500, map[string]interface{}{\"error\": \"Internal server error\"})\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.Write([]byte(result))\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/mdlayher\/wavepipe\/data\"\n)\n\n\/\/ httpStream provides a common method to transfer a file stream using a HTTP response writer\nfunc httpStream(song *data.Song, mimeType string, fileSize int64, stream io.ReadCloser, httpRes http.ResponseWriter) error {\n\t\/\/ Total bytes transferred\n\tvar total int64\n\n\t\/\/ Track the stream's progress via log\n\tstopProgressChan := make(chan struct{})\n\tgo func() {\n\t\t\/\/ Track start time\n\t\tstartTime := time.Now()\n\n\t\t\/\/ Print progress every 5 seconds\n\t\tprogress := time.NewTicker(5 * time.Second)\n\n\t\t\/\/ Calculate total file size\n\t\ttotalSize := float64(fileSize) \/ 1024 \/ 1024\n\t\tfor {\n\t\t\tselect {\n\t\t\t\/\/ Print progress\n\t\t\tcase <-progress.C:\n\t\t\t\t\/\/ Capture current progress\n\t\t\t\tcurrTotal := atomic.LoadInt64(&total)\n\t\t\t\tcurrent := float64(currTotal) \/ 1024 \/ 1024\n\n\t\t\t\t\/\/ Capture current transfer rate\n\t\t\t\trate := float64(float64((currTotal*8)\/1024\/1024) \/ float64(time.Now().Sub(startTime).Seconds()))\n\n\t\t\t\t\/\/ If size available, we can print percentage and file sizes\n\t\t\t\tif fileSize > 0 {\n\t\t\t\t\t\/\/ Capture current percentage\n\t\t\t\t\tpercent := int64(float64(float64(currTotal)\/float64(fileSize)) * 100)\n\n\t\t\t\t\tlog.Printf(\"[%d] [%03d%%] %02.3f \/ %02.3f MB [%02.3f Mbps]\", song.ID, percent, current, totalSize, rate)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\t\/\/ Else, print the current transfer size and rate\n\t\t\t\tlog.Printf(\"[%d] sent: %02.3f MB [%02.3f Mbps]\", song.ID, current, rate)\n\t\t\t\/\/ Stop printing\n\t\t\tcase <-stopProgressChan:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ Stop progress on return\n\tdefer func() {\n\t\tclose(stopProgressChan)\n\t}()\n\n\t\/\/ Buffer to store and transfer file bytes\n\tbuf := make([]byte, 8192)\n\n\t\/\/ Indicate when stream is complete\n\tstreamComplete := false\n\n\t\/\/ Set necessary output HTTP headers\n\n\t\/\/ Set Content-Length if set\n\t\/\/ NOTE: HTTP standards specify that this must be an exact length, so we cannot estimate it for\n\t\/\/ transcodes unless the entire file is transcoded and then sent\n\tif fileSize > 0 {\n\t\thttpRes.Header().Set(\"Content-Length\", strconv.FormatInt(fileSize, 10))\n\t}\n\n\t\/\/ Override Content-Type if set\n\tcontentType := mime.TypeByExtension(path.Ext(song.FileName))\n\tif mimeType != \"\" {\n\t\tcontentType = mimeType\n\t}\n\thttpRes.Header().Set(\"Content-Type\", contentType)\n\n\t\/\/ Get song modify time in RFC1123 format, replace UTC with GMT\n\tlastMod := strings.Replace(time.Unix(song.LastModified, 0).UTC().Format(time.RFC1123), \"UTC\", \"GMT\", 1)\n\n\t\/\/ Set Last-Modified using filesystem modify time\n\thttpRes.Header().Set(\"Last-Modified\", lastMod)\n\n\t\/\/ Begin transferring the data stream\n\tfor {\n\t\t\/\/ Read in a buffer from the file\n\t\tn, err := stream.Read(buf)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn err\n\t\t} else if err == io.EOF {\n\t\t\t\/\/ Halt streaming after next write\n\t\t\tstreamComplete = true\n\t\t}\n\n\t\t\/\/ Count bytes\n\t\tatomic.AddInt64(&total, int64(n))\n\n\t\t\/\/ Write bytes over HTTP\n\t\tif _, err := httpRes.Write(buf[:n]); err != nil && err != io.EOF {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ If stream is complete, break loop\n\t\tif streamComplete {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ No errors\n\treturn nil\n}\n<commit_msg>Close connection on stream<commit_after>package api\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/mdlayher\/wavepipe\/data\"\n)\n\n\/\/ httpStream provides a common method to transfer a file stream using a HTTP response writer\nfunc httpStream(song *data.Song, mimeType string, fileSize int64, stream io.ReadCloser, httpRes http.ResponseWriter) error {\n\t\/\/ Total bytes transferred\n\tvar total int64\n\n\t\/\/ Track the stream's progress via log\n\tstopProgressChan := make(chan struct{})\n\tgo func() {\n\t\t\/\/ Track start time\n\t\tstartTime := time.Now()\n\n\t\t\/\/ Print progress every 5 seconds\n\t\tprogress := time.NewTicker(5 * time.Second)\n\n\t\t\/\/ Calculate total file size\n\t\ttotalSize := float64(fileSize) \/ 1024 \/ 1024\n\t\tfor {\n\t\t\tselect {\n\t\t\t\/\/ Print progress\n\t\t\tcase <-progress.C:\n\t\t\t\t\/\/ Capture current progress\n\t\t\t\tcurrTotal := atomic.LoadInt64(&total)\n\t\t\t\tcurrent := float64(currTotal) \/ 1024 \/ 1024\n\n\t\t\t\t\/\/ Capture current transfer rate\n\t\t\t\trate := float64(float64((currTotal*8)\/1024\/1024) \/ float64(time.Now().Sub(startTime).Seconds()))\n\n\t\t\t\t\/\/ If size available, we can print percentage and file sizes\n\t\t\t\tif fileSize > 0 {\n\t\t\t\t\t\/\/ Capture current percentage\n\t\t\t\t\tpercent := int64(float64(float64(currTotal)\/float64(fileSize)) * 100)\n\n\t\t\t\t\tlog.Printf(\"[%d] [%03d%%] %02.3f \/ %02.3f MB [%02.3f Mbps]\", song.ID, percent, current, totalSize, rate)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\t\/\/ Else, print the current transfer size and rate\n\t\t\t\tlog.Printf(\"[%d] sent: %02.3f MB [%02.3f Mbps]\", song.ID, current, rate)\n\t\t\t\/\/ Stop printing\n\t\t\tcase <-stopProgressChan:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ Stop progress on return\n\tdefer func() {\n\t\tclose(stopProgressChan)\n\t}()\n\n\t\/\/ Buffer to store and transfer file bytes\n\tbuf := make([]byte, 8192)\n\n\t\/\/ Indicate when stream is complete\n\tstreamComplete := false\n\n\t\/\/ Set necessary output HTTP headers\n\n\t\/\/ Set Content-Length if set\n\t\/\/ NOTE: HTTP standards specify that this must be an exact length, so we cannot estimate it for\n\t\/\/ transcodes unless the entire file is transcoded and then sent\n\tif fileSize > 0 {\n\t\thttpRes.Header().Set(\"Content-Length\", strconv.FormatInt(fileSize, 10))\n\t}\n\n\t\/\/ Override Content-Type if set\n\tcontentType := mime.TypeByExtension(path.Ext(song.FileName))\n\tif mimeType != \"\" {\n\t\tcontentType = mimeType\n\t}\n\thttpRes.Header().Set(\"Content-Type\", contentType)\n\n\t\/\/ Get song modify time in RFC1123 format, replace UTC with GMT\n\tlastMod := strings.Replace(time.Unix(song.LastModified, 0).UTC().Format(time.RFC1123), \"UTC\", \"GMT\", 1)\n\n\t\/\/ Set Last-Modified using filesystem modify time\n\thttpRes.Header().Set(\"Last-Modified\", lastMod)\n\n\t\/\/ Specify connection close on send\n\thttpRes.Header().Set(\"Connection\", \"close\")\n\n\t\/\/ Begin transferring the data stream\n\tfor {\n\t\t\/\/ Read in a buffer from the file\n\t\tn, err := stream.Read(buf)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn err\n\t\t} else if err == io.EOF {\n\t\t\t\/\/ Halt streaming after next write\n\t\t\tstreamComplete = true\n\t\t}\n\n\t\t\/\/ Count bytes\n\t\tatomic.AddInt64(&total, int64(n))\n\n\t\t\/\/ Write bytes over HTTP\n\t\tif _, err := httpRes.Write(buf[:n]); err != nil && err != io.EOF {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ If stream is complete, break loop\n\t\tif streamComplete {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ No errors\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"reflect\"\n)\n\nfunc TestActivate(t *testing.T) {\n\td := NewDenv(\"test\")\n\tcases := []struct {\n\t\tin string\n\t\twant *Denv\n\t}{\n\t\t{\"\", nil},\n\t\t{\"test\", d},\n\t\t{\"notexist\", nil},\n\t}\n\tfor _, c := range cases {\n\t\tfmt.Printf(\"Testing Activate(%s)\\n\", c.in)\n\t\tgot, err := Activate(c.in)\n\t\tif c.want == nil && got != nil && err != nil {\n\t\t\tt.Errorf(\"Activate(%q) returned an error, but not nil Denv\")\n\t\t} else if err != nil && c.want != nil {\n\t\t\tt.Errorf(\"Wanted something, but threw an error %s\", err)\n\t\t} else if !reflect.DeepEqual(c.want, got) {\n\t\t\tt.Errorf(\"Activate(%q) wanted %s, got %s\", c.in, c.want.ToString(), got.ToString())\n\t\t}\n\t}\n}\n<commit_msg>init local_test<commit_after>package api\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"reflect\"\n)\n\nfunc TestActivate(t *testing.T) {\n\td := NewDenv(\"test\")\n\tcases := []struct {\n\t\tin string\n\t\twant *Denv\n\t}{\n\t\t{\"\", nil},\n\t\t{\"test\", d},\n\t\t{\"notexist\", nil},\n\t}\n\tfor _, c := range cases {\n\t\tfmt.Printf(\"Testing Activate(%s)\\n\", c.in)\n\t\tgot, err := Activate(c.in)\n\t\tif c.want == nil && got != nil && err != nil {\n\t\t\tt.Errorf(\"Activate(%q) returned an error, but not nil Denv\")\n\t\t} else if err != nil && c.want != nil {\n\t\t\tt.Errorf(\"Wanted something, but threw an error %s\", err)\n\t\t} else if !reflect.DeepEqual(c.want, got) {\n\t\t\tt.Errorf(\"Activate(%q) wanted %s, got %s\", c.in, c.want.ToString(), got.ToString())\n\t\t}\n\t}\n\tdenv, err := Activate(\"test\")\n\tif err != nil {\n\t\tt.Errorf(\"Error creating Denv, %s\", err)\n\t}\n\tif Info.Current != denv {\n\t\tt.Errorf(\"Activate(test) did not properly assign Info.Current, %s\", Info.ToString())\n\t}\n}\n\nfunc TestDeactivate(t *testing.T) {\n\tNewDenv(\"test\") \/\/ Forces creation if not there\n\tactive, err := Activate(\"test\")\n\tif err != nil {\n\t\tt.Errorf(\"Could not Activate(test), %s\", err)\n\t}\n\tdeactive := Deactivate()\n\tif active != deactive {\n\t\tt.Errorf(\"Deactivate() reaturned different denv, %p, %p\", &active, &deactive)\n\t}\n\tif Info.Current != nil {\n\t\tt.Errorf(\"Deactivate() did not clear Info.Current, %s\", Info.ToString())\n\t}\n}\n\nfunc TestList(t *testing.T) {\n\t\/\/ Maybe implement a set\n\tdenvs := map[*Denv]bool {\n\t\tNewDenv(\"d1\"): true,\n\t\tNewDenv(\"d2\"): true,\n\t}\n\tls := List()\n\tset := make(map[string]int)\n\tfor d, _ := range ls {\n\t\tset[d.Path] += 1\n\t}\t\n\tfor d, _ := range denvs {\n\t\tif count, found := set[d.Path]; !found {\n\t\t\tt.Errorf(\"Denvs not subset of List()\\nDenvs %s\\nList() %s\", denvs, ls)\n\t\t} else if count < 1 {\n\t\t\tt.Errorf(\"Denvs not subset of List()\\nDenvs %s\\nList() %s\", denvs, ls)\n\t\t} else {\n\t\t\tset[d.Path] = count - 1\n\t\t}\n\t}\n}\n\nfunc TestWhich(t *testing.T) {\n\tNewDenv(\"test\")\n\td, _ := Activate(\"test\")\n\tif Which() != d {\n\t\tt.Errorf(\"Which() did not return active denv, %s, %s\", d, Which())\n\t}\n\td = Deactivate()\n\tif Which() != nil {\n\t\tt.Errorf(\"Which() did not return nil on deactivate, %s, %s\", d, Which())\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package hugolib\n\nimport (\n\t\"html\/template\"\n\t\"io\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar EMPTY_PAGE = \"\"\n\nvar SIMPLE_PAGE = `---\ntitle: Simple\n---\nSimple Page\n`\n\nvar INVALID_FRONT_MATTER_MISSING = `This is a test`\n\nvar INVALID_FRONT_MATTER_SHORT_DELIM = `\n--\ntitle: Short delim start\n---\nShort Delim\n`\n\nvar INVALID_FRONT_MATTER_SHORT_DELIM_ENDING = `\n---\ntitle: Short delim ending\n--\nShort Delim\n`\n\nvar INVALID_FRONT_MATTER_LEADING_WS = `\n \n ---\ntitle: Leading WS\n---\nLeading\n`\n\nvar SIMPLE_PAGE_JSON = `\n{\n\"title\": \"spf13-vim 3.0 release and new website\",\n\"description\": \"spf13-vim is a cross platform distribution of vim plugins and resources for Vim.\",\n\"tags\": [ \".vimrc\", \"plugins\", \"spf13-vim\", \"vim\" ],\n\"date\": \"2012-04-06\",\n\"categories\": [\n \"Development\",\n \"VIM\"\n],\n\"slug\": \"spf13-vim-3-0-release-and-new-website\"\n}\n\nContent of the file goes Here \n`\n\nvar SIMPLE_PAGE_JSON_MULTIPLE = `\n{\n\t\"title\": \"foobar\",\n\t\"customData\": { \"foo\": \"bar\" },\n\t\"date\": \"2012-08-06\"\n}\nSome text\n`\n\nvar SIMPLE_PAGE_JSON_COMPACT = `\n{\"title\":\"foobar\",\"customData\":{\"foo\":\"bar\"},\"date\":\"2012-08-06\"}\nText\n`\n\nvar PAGE_WITH_INVALID_DATE = `---\ndate: 2010-05-02 15:29:31+08:00\n---\nPage With Invalid Date (missing the T for RFC 3339)`\n\nvar PAGE_WITH_DATE_RFC3339 = `---\ndate: 2010-05-02T15:29:31+08:00\n---\nPage With Date RFC3339`\n\nvar PAGE_WITH_DATE_RFC1123 = `---\ndate: Sun, 02 May 2010 15:29:31 PST\n---\nPage With Date RFC1123`\n\nvar PAGE_WITH_DATE_RFC1123Z = `---\ndate: Sun, 02 May 2010 15:29:31 +0800\n---\nPage With Date RFC1123Z`\n\nvar PAGE_WITH_DATE_RFC822 = `---\ndate: 02 May 10 15:29 PST\n---\nPage With Date RFC822`\n\nvar PAGE_WITH_DATE_RFC822Z = `---\ndate: 02 May 10 15:29 +0800\n---\nPage With Date RFC822Z`\n\nvar PAGE_WITH_DATE_ANSIC = `---\ndate: Sun May 2 15:29:31 2010\n---\nPage With Date ANSIC`\n\nvar PAGE_WITH_DATE_UnixDate = `---\ndate: Sun May 2 15:29:31 PST 2010\n---\nPage With Date UnixDate`\n\nvar PAGE_WITH_DATE_RubyDate = `---\ndate: Sun May 02 15:29:31 +0800 2010\n---\nPage With Date RubyDate`\n\nfunc checkError(t *testing.T, err error, expected string) {\n\tif err == nil {\n\t\tt.Fatalf(\"err is nil\")\n\t}\n\tif err.Error() != expected {\n\t\tt.Errorf(\"err.Error() returned: '%s'. Expected: '%s'\", err.Error(), expected)\n\t}\n}\n\nfunc TestDegenerateEmptyPageZeroLengthName(t *testing.T) {\n\t_, err := ReadFrom(strings.NewReader(EMPTY_PAGE), \"\")\n\tif err == nil {\n\t\tt.Fatalf(\"A zero length page name must return an error\")\n\t}\n\n\tcheckError(t, err, \"Zero length page name\")\n}\n\nfunc TestDegenerateEmptyPage(t *testing.T) {\n\t_, err := ReadFrom(strings.NewReader(EMPTY_PAGE), \"test\")\n\tif err == nil {\n\t\tt.Fatalf(\"Expected ReadFrom to return an error when an empty buffer is passed.\")\n\t}\n\n\tcheckError(t, err, \"unable to locate front matter\")\n}\n\nfunc checkPageTitle(t *testing.T, page *Page, title string) {\n\tif page.Title != title {\n\t\tt.Fatalf(\"Page title is: %s. Expected %s\", page.Title, title)\n\t}\n}\n\nfunc checkPageContent(t *testing.T, page *Page, content string) {\n\tif page.Content != template.HTML(content) {\n\t\tt.Fatalf(\"Page content is: %s. Expected %s\", page.Content, content)\n\t}\n}\n\nfunc checkPageType(t *testing.T, page *Page, pageType string) {\n\tif page.Type() != pageType {\n\t\tt.Fatalf(\"Page type is: %s. Expected: %s\", page.Type(), pageType)\n\t}\n}\n\nfunc checkPageLayout(t *testing.T, page *Page, layout string) {\n\tif page.Layout() != layout {\n\t\tt.Fatalf(\"Page layout is: %s. Expected: %s\", page.Layout(), layout)\n\t}\n}\n\nfunc TestCreateNewPage(t *testing.T) {\n\tp, err := ReadFrom(strings.NewReader(SIMPLE_PAGE), \"simple\")\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to create a page with frontmatter and body content: %s\", err)\n\t}\n\tcheckPageTitle(t, p, \"Simple\")\n\tcheckPageContent(t, p, \"<p>Simple Page<\/p>\\n\")\n\tcheckPageType(t, p, \"page\")\n\tcheckPageLayout(t, p, \"page\/single.html\")\n}\n\nfunc TestCreatePage(t *testing.T) {\n\tvar tests = []struct {\n\t\tr io.Reader\n\t}{\n\t\t{strings.NewReader(SIMPLE_PAGE_JSON)},\n\t\t{strings.NewReader(SIMPLE_PAGE_JSON_MULTIPLE)},\n\t\t\/\/{strings.NewReader(SIMPLE_PAGE_JSON_COMPACT)},\n\t}\n\n\tfor _, test := range tests {\n\t\t_, err := ReadFrom(test.r, \"page\")\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unable to parse page: %s\", err)\n\t\t}\n\t}\n}\n\nfunc TestDegenerateInvalidFrontMatterShortDelim(t *testing.T) {\n\tvar tests = []struct {\n\t\tr io.Reader\n\t\terr string\n\t}{\n\t\t{strings.NewReader(INVALID_FRONT_MATTER_SHORT_DELIM), \"unable to match beginning front matter delimiter\"},\n\t\t{strings.NewReader(INVALID_FRONT_MATTER_SHORT_DELIM_ENDING), \"unable to match ending front matter delimiter\"},\n\t\t{strings.NewReader(INVALID_FRONT_MATTER_MISSING), \"unable to detect front matter\"},\n\t}\n\tfor _, test := range tests {\n\t\t_, err := ReadFrom(test.r, \"invalid\/front\/matter\/short\/delim\")\n\t\tcheckError(t, err, test.err)\n\t}\n}\n\nfunc TestDegenerateInvalidFrontMatterLeadingWhitespace(t *testing.T) {\n\t_, err := ReadFrom(strings.NewReader(INVALID_FRONT_MATTER_LEADING_WS), \"invalid\/front\/matter\/leading\/ws\")\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to parse front matter given leading whitespace: %s\", err)\n\t}\n}\n\nfunc TestDegenerateDateFrontMatter(t *testing.T) {\n\tp, _ := ReadFrom(strings.NewReader(PAGE_WITH_INVALID_DATE), \"page\/with\/invalid\/date\")\n\tif p.Date != time.Unix(0, 0) {\n\t\tt.Fatalf(\"Date should be set to computer epoch. Got: %s\", p.Date)\n\t}\n}\n\nfunc TestParsingDateInFrontMatter(t *testing.T) {\n\n\tfor _, test := range []struct {\n\t\tbuf string\n\t\tdt string\n\t}{\n\t\t{PAGE_WITH_DATE_RFC3339, \"2010-05-02T15:29:31+08:00\"},\n\t\t{PAGE_WITH_DATE_RFC1123, \"2010-05-02T15:29:31-08:00\"},\n\t\t{PAGE_WITH_DATE_RFC1123Z, \"2010-05-02T15:29:31+08:00\"},\n\t\t{PAGE_WITH_DATE_RFC822, \"2010-05-02T15:29:00-08:00\"},\n\t\t{PAGE_WITH_DATE_RFC822Z, \"2010-05-02T15:29:00+08:00\"},\n\t\t{PAGE_WITH_DATE_ANSIC, \"2010-05-02T15:29:31Z\"},\n\t\t{PAGE_WITH_DATE_UnixDate, \"2010-05-02T15:29:31-08:00\"},\n\t\t{PAGE_WITH_DATE_RubyDate, \"2010-05-02T15:29:31+08:00\"},\n\t} {\n\t\tdt, e := time.Parse(time.RFC3339, test.dt)\n\t\tif e != nil {\n\t\t\tt.Fatalf(\"Unable to parse date time (RFC3339) for running the test: %s\", e)\n\t\t}\n\t\tp, err := ReadFrom(strings.NewReader(test.buf), \"page\/with\/date\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected to be able to parse page.\")\n\t\t}\n\t\tif !dt.Equal(p.Date) {\n\t\t\tt.Errorf(\"Date does not equal frontmatter:\\n%s\\nGot: %s. Diff: %s\", test.buf, p.Date, dt.Sub(p.Date))\n\t\t}\n\t}\n}\n<commit_msg>Add additional details to date test cases.<commit_after>package hugolib\n\nimport (\n\t\"html\/template\"\n\t\"io\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar EMPTY_PAGE = \"\"\n\nvar SIMPLE_PAGE = `---\ntitle: Simple\n---\nSimple Page\n`\n\nvar INVALID_FRONT_MATTER_MISSING = `This is a test`\n\nvar INVALID_FRONT_MATTER_SHORT_DELIM = `\n--\ntitle: Short delim start\n---\nShort Delim\n`\n\nvar INVALID_FRONT_MATTER_SHORT_DELIM_ENDING = `\n---\ntitle: Short delim ending\n--\nShort Delim\n`\n\nvar INVALID_FRONT_MATTER_LEADING_WS = `\n \n ---\ntitle: Leading WS\n---\nLeading\n`\n\nvar SIMPLE_PAGE_JSON = `\n{\n\"title\": \"spf13-vim 3.0 release and new website\",\n\"description\": \"spf13-vim is a cross platform distribution of vim plugins and resources for Vim.\",\n\"tags\": [ \".vimrc\", \"plugins\", \"spf13-vim\", \"vim\" ],\n\"date\": \"2012-04-06\",\n\"categories\": [\n \"Development\",\n \"VIM\"\n],\n\"slug\": \"spf13-vim-3-0-release-and-new-website\"\n}\n\nContent of the file goes Here \n`\n\nvar SIMPLE_PAGE_JSON_MULTIPLE = `\n{\n\t\"title\": \"foobar\",\n\t\"customData\": { \"foo\": \"bar\" },\n\t\"date\": \"2012-08-06\"\n}\nSome text\n`\n\nvar SIMPLE_PAGE_JSON_COMPACT = `\n{\"title\":\"foobar\",\"customData\":{\"foo\":\"bar\"},\"date\":\"2012-08-06\"}\nText\n`\n\nvar PAGE_WITH_INVALID_DATE = `---\ndate: 2010-05-02 15:29:31+08:00\n---\nPage With Invalid Date (missing the T for RFC 3339)`\n\nvar PAGE_WITH_DATE_RFC3339 = `---\ndate: 2010-05-02T15:29:31+08:00\n---\nPage With Date RFC3339`\n\nvar PAGE_WITH_DATE_RFC1123 = `---\ndate: Sun, 02 May 2010 15:29:31 PST\n---\nPage With Date RFC1123`\n\nvar PAGE_WITH_DATE_RFC1123Z = `---\ndate: Sun, 02 May 2010 15:29:31 +0800\n---\nPage With Date RFC1123Z`\n\nvar PAGE_WITH_DATE_RFC822 = `---\ndate: 02 May 10 15:29 PST\n---\nPage With Date RFC822`\n\nvar PAGE_WITH_DATE_RFC822Z = `---\ndate: 02 May 10 15:29 +0800\n---\nPage With Date RFC822Z`\n\nvar PAGE_WITH_DATE_ANSIC = `---\ndate: Sun May 2 15:29:31 2010\n---\nPage With Date ANSIC`\n\nvar PAGE_WITH_DATE_UnixDate = `---\ndate: Sun May 2 15:29:31 PST 2010\n---\nPage With Date UnixDate`\n\nvar PAGE_WITH_DATE_RubyDate = `---\ndate: Sun May 02 15:29:31 +0800 2010\n---\nPage With Date RubyDate`\n\nfunc checkError(t *testing.T, err error, expected string) {\n\tif err == nil {\n\t\tt.Fatalf(\"err is nil\")\n\t}\n\tif err.Error() != expected {\n\t\tt.Errorf(\"err.Error() returned: '%s'. Expected: '%s'\", err.Error(), expected)\n\t}\n}\n\nfunc TestDegenerateEmptyPageZeroLengthName(t *testing.T) {\n\t_, err := ReadFrom(strings.NewReader(EMPTY_PAGE), \"\")\n\tif err == nil {\n\t\tt.Fatalf(\"A zero length page name must return an error\")\n\t}\n\n\tcheckError(t, err, \"Zero length page name\")\n}\n\nfunc TestDegenerateEmptyPage(t *testing.T) {\n\t_, err := ReadFrom(strings.NewReader(EMPTY_PAGE), \"test\")\n\tif err == nil {\n\t\tt.Fatalf(\"Expected ReadFrom to return an error when an empty buffer is passed.\")\n\t}\n\n\tcheckError(t, err, \"unable to locate front matter\")\n}\n\nfunc checkPageTitle(t *testing.T, page *Page, title string) {\n\tif page.Title != title {\n\t\tt.Fatalf(\"Page title is: %s. Expected %s\", page.Title, title)\n\t}\n}\n\nfunc checkPageContent(t *testing.T, page *Page, content string) {\n\tif page.Content != template.HTML(content) {\n\t\tt.Fatalf(\"Page content is: %s. Expected %s\", page.Content, content)\n\t}\n}\n\nfunc checkPageType(t *testing.T, page *Page, pageType string) {\n\tif page.Type() != pageType {\n\t\tt.Fatalf(\"Page type is: %s. Expected: %s\", page.Type(), pageType)\n\t}\n}\n\nfunc checkPageLayout(t *testing.T, page *Page, layout string) {\n\tif page.Layout() != layout {\n\t\tt.Fatalf(\"Page layout is: %s. Expected: %s\", page.Layout(), layout)\n\t}\n}\n\nfunc TestCreateNewPage(t *testing.T) {\n\tp, err := ReadFrom(strings.NewReader(SIMPLE_PAGE), \"simple\")\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to create a page with frontmatter and body content: %s\", err)\n\t}\n\tcheckPageTitle(t, p, \"Simple\")\n\tcheckPageContent(t, p, \"<p>Simple Page<\/p>\\n\")\n\tcheckPageType(t, p, \"page\")\n\tcheckPageLayout(t, p, \"page\/single.html\")\n}\n\nfunc TestCreatePage(t *testing.T) {\n\tvar tests = []struct {\n\t\tr io.Reader\n\t}{\n\t\t{strings.NewReader(SIMPLE_PAGE_JSON)},\n\t\t{strings.NewReader(SIMPLE_PAGE_JSON_MULTIPLE)},\n\t\t\/\/{strings.NewReader(SIMPLE_PAGE_JSON_COMPACT)},\n\t}\n\n\tfor _, test := range tests {\n\t\t_, err := ReadFrom(test.r, \"page\")\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unable to parse page: %s\", err)\n\t\t}\n\t}\n}\n\nfunc TestDegenerateInvalidFrontMatterShortDelim(t *testing.T) {\n\tvar tests = []struct {\n\t\tr io.Reader\n\t\terr string\n\t}{\n\t\t{strings.NewReader(INVALID_FRONT_MATTER_SHORT_DELIM), \"unable to match beginning front matter delimiter\"},\n\t\t{strings.NewReader(INVALID_FRONT_MATTER_SHORT_DELIM_ENDING), \"unable to match ending front matter delimiter\"},\n\t\t{strings.NewReader(INVALID_FRONT_MATTER_MISSING), \"unable to detect front matter\"},\n\t}\n\tfor _, test := range tests {\n\t\t_, err := ReadFrom(test.r, \"invalid\/front\/matter\/short\/delim\")\n\t\tcheckError(t, err, test.err)\n\t}\n}\n\nfunc TestDegenerateInvalidFrontMatterLeadingWhitespace(t *testing.T) {\n\t_, err := ReadFrom(strings.NewReader(INVALID_FRONT_MATTER_LEADING_WS), \"invalid\/front\/matter\/leading\/ws\")\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to parse front matter given leading whitespace: %s\", err)\n\t}\n}\n\nfunc TestDegenerateDateFrontMatter(t *testing.T) {\n\tp, _ := ReadFrom(strings.NewReader(PAGE_WITH_INVALID_DATE), \"page\/with\/invalid\/date\")\n\tif p.Date != time.Unix(0, 0) {\n\t\tt.Fatalf(\"Date should be set to computer epoch. Got: %s\", p.Date)\n\t}\n}\n\nfunc TestParsingDateInFrontMatter(t *testing.T) {\n\n\tfor _, test := range []struct {\n\t\tbuf string\n\t\tdt string\n\t}{\n\t\t{PAGE_WITH_DATE_RFC3339, \"2010-05-02T15:29:31+08:00\"},\n\t\t{PAGE_WITH_DATE_RFC1123, \"2010-05-02T15:29:31-08:00\"},\n\t\t{PAGE_WITH_DATE_RFC1123Z, \"2010-05-02T15:29:31+08:00\"},\n\t\t{PAGE_WITH_DATE_RFC822, \"2010-05-02T15:29:00-08:00\"},\n\t\t{PAGE_WITH_DATE_RFC822Z, \"2010-05-02T15:29:00+08:00\"},\n\t\t{PAGE_WITH_DATE_ANSIC, \"2010-05-02T15:29:31Z\"},\n\t\t{PAGE_WITH_DATE_UnixDate, \"2010-05-02T15:29:31-08:00\"},\n\t\t{PAGE_WITH_DATE_RubyDate, \"2010-05-02T15:29:31+08:00\"},\n\t} {\n\t\tdt, e := time.Parse(time.RFC3339, test.dt)\n\t\tif e != nil {\n\t\t\tt.Fatalf(\"Unable to parse date time (RFC3339) for running the test: %s\", e)\n\t\t}\n\t\tp, err := ReadFrom(strings.NewReader(test.buf), \"page\/with\/date\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected to be able to parse page.\")\n\t\t}\n\t\tif !dt.Equal(p.Date) {\n\t\t\/\/if dt != p.Date {\n\t\t\tt.Errorf(\"Date does not equal frontmatter:\\n%s\\nExpecting: %s\\n Got: %s. Diff: %s\\n internal: %#v\\n %#v\", test.buf, dt, p.Date, dt.Sub(p.Date), dt, p.Date)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\topentracing \"github.com\/opentracing\/opentracing-go\"\n\topentracing_log \"github.com\/opentracing\/opentracing-go\/log\"\n\n\tuuid \"github.com\/satori\/go.uuid\"\n\n\t\"github.com\/RichardKnop\/machinery\/v1\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/config\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/log\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/tasks\"\n\t\"github.com\/urfave\/cli\"\n\n\texampletasks \"github.com\/RichardKnop\/machinery\/example\/tasks\"\n\ttracers \"github.com\/RichardKnop\/machinery\/example\/tracers\"\n)\n\nvar (\n\tapp *cli.App\n\tconfigPath string\n)\n\nfunc init() {\n\t\/\/ Initialise a CLI app\n\tapp = cli.NewApp()\n\tapp.Name = \"machinery\"\n\tapp.Usage = \"machinery worker and send example tasks with machinery send\"\n\tapp.Author = \"Richard Knop\"\n\tapp.Email = \"risoknop@gmail.com\"\n\tapp.Version = \"0.0.0\"\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"c\",\n\t\t\tValue: \"\",\n\t\t\tDestination: &configPath,\n\t\t\tUsage: \"Path to a configuration file\",\n\t\t},\n\t}\n}\n\nfunc main() {\n\t\/\/ Set the CLI app commands\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"worker\",\n\t\t\tUsage: \"launch machinery worker\",\n\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\tif err := worker(); err != nil {\n\t\t\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"send\",\n\t\t\tUsage: \"send example tasks \",\n\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\tif err := send(); err != nil {\n\t\t\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\n\t\/\/ Run the CLI app\n\tapp.Run(os.Args)\n}\n\nfunc loadConfig() (*config.Config, error) {\n\tif configPath != \"\" {\n\t\treturn config.NewFromYaml(configPath, true)\n\t}\n\n\treturn config.NewFromEnvironment(true)\n}\n\nfunc startServer() (*machinery.Server, error) {\n\tcnf, err := loadConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Create server instance\n\tserver, err := machinery.NewServer(cnf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Register tasks\n\ttasks := map[string]interface{}{\n\t\t\"add\": exampletasks.Add,\n\t\t\"multiply\": exampletasks.Multiply,\n\t\t\"sum_ints\": exampletasks.SumInts,\n\t\t\"sum_floats\": exampletasks.SumFloats,\n\t\t\"concat\": exampletasks.Concat,\n\t\t\"split\": exampletasks.Split,\n\t\t\"panic_task\": exampletasks.PanicTask,\n\t\t\"long_running_task\": exampletasks.LongRunningTask,\n\t}\n\n\treturn server, server.RegisterTasks(tasks)\n}\n\nfunc worker() error {\n\tconsumerTag := \"machinery_worker\"\n\n\tcleanup, err := tracers.SetupTracer(consumerTag)\n\tif err != nil {\n\t\tlog.FATAL.Fatalln(\"Unable to instantiate a tracer:\", err)\n\t}\n\tdefer cleanup()\n\n\tserver, err := startServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ The second argument is a consumer tag\n\t\/\/ Ideally, each worker should have a unique tag (worker1, worker2 etc)\n\tworker := server.NewWorker(consumerTag, 0)\n\n\treturn worker.Launch()\n}\n\nfunc send() error {\n\tcleanup, err := tracers.SetupTracer(\"sender\")\n\tif err != nil {\n\t\tlog.FATAL.Fatalln(\"Unable to instantiate a tracer:\", err)\n\t}\n\tdefer cleanup()\n\n\tserver, err := startServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar (\n\t\taddTask0, addTask1, addTask2 tasks.Signature\n\t\tmultiplyTask0, multiplyTask1 tasks.Signature\n\t\tsumIntsTask, sumFloatsTask, concatTask, splitTask tasks.Signature\n\t\tpanicTask tasks.Signature\n\t\tlongRunningTask tasks.Signature\n\t)\n\n\tvar initTasks = func() {\n\t\taddTask0 = tasks.Signature{\n\t\t\tName: \"add\",\n\t\t\tArgs: []tasks.Arg{\n\t\t\t\t{\n\t\t\t\t\tType: \"int64\",\n\t\t\t\t\tValue: 1,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"int64\",\n\t\t\t\t\tValue: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\taddTask1 = tasks.Signature{\n\t\t\tName: \"add\",\n\t\t\tArgs: []tasks.Arg{\n\t\t\t\t{\n\t\t\t\t\tType: \"int64\",\n\t\t\t\t\tValue: 2,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"int64\",\n\t\t\t\t\tValue: 2,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\taddTask2 = tasks.Signature{\n\t\t\tName: \"add\",\n\t\t\tArgs: []tasks.Arg{\n\t\t\t\t{\n\t\t\t\t\tType: \"int64\",\n\t\t\t\t\tValue: 5,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"int64\",\n\t\t\t\t\tValue: 6,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tmultiplyTask0 = tasks.Signature{\n\t\t\tName: \"multiply\",\n\t\t\tArgs: []tasks.Arg{\n\t\t\t\t{\n\t\t\t\t\tType: \"int64\",\n\t\t\t\t\tValue: 4,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tmultiplyTask1 = tasks.Signature{\n\t\t\tName: \"multiply\",\n\t\t}\n\n\t\tsumIntsTask = tasks.Signature{\n\t\t\tName: \"sum_ints\",\n\t\t\tArgs: []tasks.Arg{\n\t\t\t\t{\n\t\t\t\t\tType: \"[]int64\",\n\t\t\t\t\tValue: []int64{1, 2},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tsumFloatsTask = tasks.Signature{\n\t\t\tName: \"sum_floats\",\n\t\t\tArgs: []tasks.Arg{\n\t\t\t\t{\n\t\t\t\t\tType: \"[]float64\",\n\t\t\t\t\tValue: []float64{1.5, 2.7},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tconcatTask = tasks.Signature{\n\t\t\tName: \"concat\",\n\t\t\tArgs: []tasks.Arg{\n\t\t\t\t{\n\t\t\t\t\tType: \"[]string\",\n\t\t\t\t\tValue: []string{\"foo\", \"bar\"},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tsplitTask = tasks.Signature{\n\t\t\tName: \"split\",\n\t\t\tArgs: []tasks.Arg{\n\t\t\t\t{\n\t\t\t\t\tType: \"string\",\n\t\t\t\t\tValue: \"foo\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tpanicTask = tasks.Signature{\n\t\t\tName: \"panic_task\",\n\t\t}\n\n\t\tlongRunningTask = tasks.Signature{\n\t\t\tName: \"long_running_task\",\n\t\t}\n\t}\n\n\t\/*\n\t * Lets start a span representing this run of the `send` command and\n\t * set a batch id as baggage so it can travel all the way into\n\t * the worker functions.\n\t *\/\n\tspan, ctx := opentracing.StartSpanFromContext(context.Background(), \"send\")\n\tdefer span.Finish()\n\n\tbatchID := uuid.NewV4().String()\n\tspan.SetBaggageItem(\"batch.id\", batchID)\n\tspan.LogFields(opentracing_log.String(\"batch.id\", batchID))\n\n\tlog.INFO.Println(\"Starting batch:\", batchID)\n\t\/*\n\t * First, let's try sending a single task\n\t *\/\n\tinitTasks()\n\n\tlog.INFO.Println(\"Single task:\")\n\n\tasyncResult, err := server.SendTaskWithContext(ctx, &addTask0)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send task: %s\", err.Error())\n\t}\n\n\tresults, err := asyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting task result failed with error: %s\", err.Error())\n\t}\n\tlog.INFO.Printf(\"1 + 1 = %v\\n\", tasks.HumanReadableResults(results))\n\n\t\/*\n\t * Try couple of tasks with a slice argument and slice return value\n\t *\/\n\tasyncResult, err = server.SendTaskWithContext(ctx, &sumIntsTask)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send task: %s\", err.Error())\n\t}\n\n\tresults, err = asyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting task result failed with error: %s\", err.Error())\n\t}\n\tlog.INFO.Printf(\"sum([1, 2]) = %v\\n\", tasks.HumanReadableResults(results))\n\n\tasyncResult, err = server.SendTaskWithContext(ctx, &sumFloatsTask)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send task: %s\", err.Error())\n\t}\n\n\tresults, err = asyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting task result failed with error: %s\", err.Error())\n\t}\n\tlog.INFO.Printf(\"sum([1.5, 2.7]) = %v\\n\", tasks.HumanReadableResults(results))\n\n\tasyncResult, err = server.SendTaskWithContext(ctx, &concatTask)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send task: %s\", err.Error())\n\t}\n\n\tresults, err = asyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting task result failed with error: %s\", err.Error())\n\t}\n\tlog.INFO.Printf(\"concat([\\\"foo\\\", \\\"bar\\\"]) = %v\\n\", tasks.HumanReadableResults(results))\n\n\tasyncResult, err = server.SendTask(&splitTask)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send task: %s\", err.Error())\n\t}\n\n\tresults, err = asyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting task result failed with error: %s\", err.Error())\n\t}\n\tlog.INFO.Printf(\"split([\\\"foo\\\"]) = %v\\n\", tasks.HumanReadableResults(results))\n\n\t\/*\n\t * Now let's explore ways of sending multiple tasks\n\t *\/\n\n\t\/\/ Now let's try a parallel execution\n\tinitTasks()\n\tlog.INFO.Println(\"Group of tasks (parallel execution):\")\n\n\tgroup := tasks.NewGroup(&addTask0, &addTask1, &addTask2)\n\tasyncResults, err := server.SendGroupWithContext(ctx, group, 10)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send group: %s\", err.Error())\n\t}\n\n\tfor _, asyncResult := range asyncResults {\n\t\tresults, err = asyncResult.Get(time.Duration(time.Millisecond * 5))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Getting task result failed with error: %s\", err.Error())\n\t\t}\n\t\tlog.INFO.Printf(\n\t\t\t\"%v + %v = %v\\n\",\n\t\t\tasyncResult.Signature.Args[0].Value,\n\t\t\tasyncResult.Signature.Args[1].Value,\n\t\t\ttasks.HumanReadableResults(results),\n\t\t)\n\t}\n\n\t\/\/ Now let's try a group with a chord\n\tinitTasks()\n\tlog.INFO.Println(\"Group of tasks with a callback (chord):\")\n\n\tgroup = tasks.NewGroup(&addTask0, &addTask1, &addTask2)\n\tchord := tasks.NewChord(group, &multiplyTask1)\n\tchordAsyncResult, err := server.SendChordWithContext(ctx, chord, 10)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send chord: %s\", err.Error())\n\t}\n\n\tresults, err = chordAsyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting chord result failed with error: %s\", err.Error())\n\t}\n\tlog.INFO.Printf(\"(1 + 1) * (2 + 2) * (5 + 6) = %v\\n\", tasks.HumanReadableResults(results))\n\n\t\/\/ Now let's try chaining task results\n\tinitTasks()\n\tlog.INFO.Println(\"Chain of tasks:\")\n\n\tchain := tasks.NewChain(&addTask0, &addTask1, &addTask2, &multiplyTask0)\n\tchainAsyncResult, err := server.SendChainWithContext(ctx, chain)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send chain: %s\", err.Error())\n\t}\n\n\tresults, err = chainAsyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting chain result failed with error: %s\", err.Error())\n\t}\n\tlog.INFO.Printf(\"(((1 + 1) + (2 + 2)) + (5 + 6)) * 4 = %v\\n\", tasks.HumanReadableResults(results))\n\n\t\/\/ Let's try a task which throws panic to make sure stack trace is not lost\n\tinitTasks()\n\tasyncResult, err = server.SendTaskWithContext(ctx, &panicTask)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send task: %s\", err.Error())\n\t}\n\n\t_, err = asyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err == nil {\n\t\treturn errors.New(\"Error should not be nil if task panicked\")\n\t}\n\tlog.INFO.Printf(\"Task panicked and returned error = %v\\n\", err.Error())\n\n\t\/\/ Let's try a long running task\n\tinitTasks()\n\tasyncResult, err = server.SendTaskWithContext(ctx, &longRunningTask)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send task: %s\", err.Error())\n\t}\n\n\tresults, err = asyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting long running task result failed with error: %s\", err.Error())\n\t}\n\tlog.INFO.Printf(\"Long running task returned = %v\\n\", tasks.HumanReadableResults(results))\n\n\treturn nil\n}\n<commit_msg>fix(example): make split example use traced function<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\topentracing \"github.com\/opentracing\/opentracing-go\"\n\topentracing_log \"github.com\/opentracing\/opentracing-go\/log\"\n\n\tuuid \"github.com\/satori\/go.uuid\"\n\n\t\"github.com\/RichardKnop\/machinery\/v1\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/config\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/log\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/tasks\"\n\t\"github.com\/urfave\/cli\"\n\n\texampletasks \"github.com\/RichardKnop\/machinery\/example\/tasks\"\n\ttracers \"github.com\/RichardKnop\/machinery\/example\/tracers\"\n)\n\nvar (\n\tapp *cli.App\n\tconfigPath string\n)\n\nfunc init() {\n\t\/\/ Initialise a CLI app\n\tapp = cli.NewApp()\n\tapp.Name = \"machinery\"\n\tapp.Usage = \"machinery worker and send example tasks with machinery send\"\n\tapp.Author = \"Richard Knop\"\n\tapp.Email = \"risoknop@gmail.com\"\n\tapp.Version = \"0.0.0\"\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"c\",\n\t\t\tValue: \"\",\n\t\t\tDestination: &configPath,\n\t\t\tUsage: \"Path to a configuration file\",\n\t\t},\n\t}\n}\n\nfunc main() {\n\t\/\/ Set the CLI app commands\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"worker\",\n\t\t\tUsage: \"launch machinery worker\",\n\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\tif err := worker(); err != nil {\n\t\t\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"send\",\n\t\t\tUsage: \"send example tasks \",\n\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\tif err := send(); err != nil {\n\t\t\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\n\t\/\/ Run the CLI app\n\tapp.Run(os.Args)\n}\n\nfunc loadConfig() (*config.Config, error) {\n\tif configPath != \"\" {\n\t\treturn config.NewFromYaml(configPath, true)\n\t}\n\n\treturn config.NewFromEnvironment(true)\n}\n\nfunc startServer() (*machinery.Server, error) {\n\tcnf, err := loadConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Create server instance\n\tserver, err := machinery.NewServer(cnf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Register tasks\n\ttasks := map[string]interface{}{\n\t\t\"add\": exampletasks.Add,\n\t\t\"multiply\": exampletasks.Multiply,\n\t\t\"sum_ints\": exampletasks.SumInts,\n\t\t\"sum_floats\": exampletasks.SumFloats,\n\t\t\"concat\": exampletasks.Concat,\n\t\t\"split\": exampletasks.Split,\n\t\t\"panic_task\": exampletasks.PanicTask,\n\t\t\"long_running_task\": exampletasks.LongRunningTask,\n\t}\n\n\treturn server, server.RegisterTasks(tasks)\n}\n\nfunc worker() error {\n\tconsumerTag := \"machinery_worker\"\n\n\tcleanup, err := tracers.SetupTracer(consumerTag)\n\tif err != nil {\n\t\tlog.FATAL.Fatalln(\"Unable to instantiate a tracer:\", err)\n\t}\n\tdefer cleanup()\n\n\tserver, err := startServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ The second argument is a consumer tag\n\t\/\/ Ideally, each worker should have a unique tag (worker1, worker2 etc)\n\tworker := server.NewWorker(consumerTag, 0)\n\n\treturn worker.Launch()\n}\n\nfunc send() error {\n\tcleanup, err := tracers.SetupTracer(\"sender\")\n\tif err != nil {\n\t\tlog.FATAL.Fatalln(\"Unable to instantiate a tracer:\", err)\n\t}\n\tdefer cleanup()\n\n\tserver, err := startServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar (\n\t\taddTask0, addTask1, addTask2 tasks.Signature\n\t\tmultiplyTask0, multiplyTask1 tasks.Signature\n\t\tsumIntsTask, sumFloatsTask, concatTask, splitTask tasks.Signature\n\t\tpanicTask tasks.Signature\n\t\tlongRunningTask tasks.Signature\n\t)\n\n\tvar initTasks = func() {\n\t\taddTask0 = tasks.Signature{\n\t\t\tName: \"add\",\n\t\t\tArgs: []tasks.Arg{\n\t\t\t\t{\n\t\t\t\t\tType: \"int64\",\n\t\t\t\t\tValue: 1,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"int64\",\n\t\t\t\t\tValue: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\taddTask1 = tasks.Signature{\n\t\t\tName: \"add\",\n\t\t\tArgs: []tasks.Arg{\n\t\t\t\t{\n\t\t\t\t\tType: \"int64\",\n\t\t\t\t\tValue: 2,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"int64\",\n\t\t\t\t\tValue: 2,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\taddTask2 = tasks.Signature{\n\t\t\tName: \"add\",\n\t\t\tArgs: []tasks.Arg{\n\t\t\t\t{\n\t\t\t\t\tType: \"int64\",\n\t\t\t\t\tValue: 5,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: \"int64\",\n\t\t\t\t\tValue: 6,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tmultiplyTask0 = tasks.Signature{\n\t\t\tName: \"multiply\",\n\t\t\tArgs: []tasks.Arg{\n\t\t\t\t{\n\t\t\t\t\tType: \"int64\",\n\t\t\t\t\tValue: 4,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tmultiplyTask1 = tasks.Signature{\n\t\t\tName: \"multiply\",\n\t\t}\n\n\t\tsumIntsTask = tasks.Signature{\n\t\t\tName: \"sum_ints\",\n\t\t\tArgs: []tasks.Arg{\n\t\t\t\t{\n\t\t\t\t\tType: \"[]int64\",\n\t\t\t\t\tValue: []int64{1, 2},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tsumFloatsTask = tasks.Signature{\n\t\t\tName: \"sum_floats\",\n\t\t\tArgs: []tasks.Arg{\n\t\t\t\t{\n\t\t\t\t\tType: \"[]float64\",\n\t\t\t\t\tValue: []float64{1.5, 2.7},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tconcatTask = tasks.Signature{\n\t\t\tName: \"concat\",\n\t\t\tArgs: []tasks.Arg{\n\t\t\t\t{\n\t\t\t\t\tType: \"[]string\",\n\t\t\t\t\tValue: []string{\"foo\", \"bar\"},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tsplitTask = tasks.Signature{\n\t\t\tName: \"split\",\n\t\t\tArgs: []tasks.Arg{\n\t\t\t\t{\n\t\t\t\t\tType: \"string\",\n\t\t\t\t\tValue: \"foo\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tpanicTask = tasks.Signature{\n\t\t\tName: \"panic_task\",\n\t\t}\n\n\t\tlongRunningTask = tasks.Signature{\n\t\t\tName: \"long_running_task\",\n\t\t}\n\t}\n\n\t\/*\n\t * Lets start a span representing this run of the `send` command and\n\t * set a batch id as baggage so it can travel all the way into\n\t * the worker functions.\n\t *\/\n\tspan, ctx := opentracing.StartSpanFromContext(context.Background(), \"send\")\n\tdefer span.Finish()\n\n\tbatchID := uuid.NewV4().String()\n\tspan.SetBaggageItem(\"batch.id\", batchID)\n\tspan.LogFields(opentracing_log.String(\"batch.id\", batchID))\n\n\tlog.INFO.Println(\"Starting batch:\", batchID)\n\t\/*\n\t * First, let's try sending a single task\n\t *\/\n\tinitTasks()\n\n\tlog.INFO.Println(\"Single task:\")\n\n\tasyncResult, err := server.SendTaskWithContext(ctx, &addTask0)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send task: %s\", err.Error())\n\t}\n\n\tresults, err := asyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting task result failed with error: %s\", err.Error())\n\t}\n\tlog.INFO.Printf(\"1 + 1 = %v\\n\", tasks.HumanReadableResults(results))\n\n\t\/*\n\t * Try couple of tasks with a slice argument and slice return value\n\t *\/\n\tasyncResult, err = server.SendTaskWithContext(ctx, &sumIntsTask)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send task: %s\", err.Error())\n\t}\n\n\tresults, err = asyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting task result failed with error: %s\", err.Error())\n\t}\n\tlog.INFO.Printf(\"sum([1, 2]) = %v\\n\", tasks.HumanReadableResults(results))\n\n\tasyncResult, err = server.SendTaskWithContext(ctx, &sumFloatsTask)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send task: %s\", err.Error())\n\t}\n\n\tresults, err = asyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting task result failed with error: %s\", err.Error())\n\t}\n\tlog.INFO.Printf(\"sum([1.5, 2.7]) = %v\\n\", tasks.HumanReadableResults(results))\n\n\tasyncResult, err = server.SendTaskWithContext(ctx, &concatTask)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send task: %s\", err.Error())\n\t}\n\n\tresults, err = asyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting task result failed with error: %s\", err.Error())\n\t}\n\tlog.INFO.Printf(\"concat([\\\"foo\\\", \\\"bar\\\"]) = %v\\n\", tasks.HumanReadableResults(results))\n\n\tasyncResult, err = server.SendTaskWithContext(ctx, &splitTask)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send task: %s\", err.Error())\n\t}\n\n\tresults, err = asyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting task result failed with error: %s\", err.Error())\n\t}\n\tlog.INFO.Printf(\"split([\\\"foo\\\"]) = %v\\n\", tasks.HumanReadableResults(results))\n\n\t\/*\n\t * Now let's explore ways of sending multiple tasks\n\t *\/\n\n\t\/\/ Now let's try a parallel execution\n\tinitTasks()\n\tlog.INFO.Println(\"Group of tasks (parallel execution):\")\n\n\tgroup := tasks.NewGroup(&addTask0, &addTask1, &addTask2)\n\tasyncResults, err := server.SendGroupWithContext(ctx, group, 10)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send group: %s\", err.Error())\n\t}\n\n\tfor _, asyncResult := range asyncResults {\n\t\tresults, err = asyncResult.Get(time.Duration(time.Millisecond * 5))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Getting task result failed with error: %s\", err.Error())\n\t\t}\n\t\tlog.INFO.Printf(\n\t\t\t\"%v + %v = %v\\n\",\n\t\t\tasyncResult.Signature.Args[0].Value,\n\t\t\tasyncResult.Signature.Args[1].Value,\n\t\t\ttasks.HumanReadableResults(results),\n\t\t)\n\t}\n\n\t\/\/ Now let's try a group with a chord\n\tinitTasks()\n\tlog.INFO.Println(\"Group of tasks with a callback (chord):\")\n\n\tgroup = tasks.NewGroup(&addTask0, &addTask1, &addTask2)\n\tchord := tasks.NewChord(group, &multiplyTask1)\n\tchordAsyncResult, err := server.SendChordWithContext(ctx, chord, 10)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send chord: %s\", err.Error())\n\t}\n\n\tresults, err = chordAsyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting chord result failed with error: %s\", err.Error())\n\t}\n\tlog.INFO.Printf(\"(1 + 1) * (2 + 2) * (5 + 6) = %v\\n\", tasks.HumanReadableResults(results))\n\n\t\/\/ Now let's try chaining task results\n\tinitTasks()\n\tlog.INFO.Println(\"Chain of tasks:\")\n\n\tchain := tasks.NewChain(&addTask0, &addTask1, &addTask2, &multiplyTask0)\n\tchainAsyncResult, err := server.SendChainWithContext(ctx, chain)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send chain: %s\", err.Error())\n\t}\n\n\tresults, err = chainAsyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting chain result failed with error: %s\", err.Error())\n\t}\n\tlog.INFO.Printf(\"(((1 + 1) + (2 + 2)) + (5 + 6)) * 4 = %v\\n\", tasks.HumanReadableResults(results))\n\n\t\/\/ Let's try a task which throws panic to make sure stack trace is not lost\n\tinitTasks()\n\tasyncResult, err = server.SendTaskWithContext(ctx, &panicTask)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send task: %s\", err.Error())\n\t}\n\n\t_, err = asyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err == nil {\n\t\treturn errors.New(\"Error should not be nil if task panicked\")\n\t}\n\tlog.INFO.Printf(\"Task panicked and returned error = %v\\n\", err.Error())\n\n\t\/\/ Let's try a long running task\n\tinitTasks()\n\tasyncResult, err = server.SendTaskWithContext(ctx, &longRunningTask)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not send task: %s\", err.Error())\n\t}\n\n\tresults, err = asyncResult.Get(time.Duration(time.Millisecond * 5))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting long running task result failed with error: %s\", err.Error())\n\t}\n\tlog.INFO.Printf(\"Long running task returned = %v\\n\", tasks.HumanReadableResults(results))\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype Login struct {\n\tUser string\n\tPassword string\n\tsync sync.Mutex\n}\ntype User struct {\n\tId string\n\tDisplayName string\n\tUrl string\n\tImageUrl string\n}\ntype RoomUser struct {\n\tRoomId int\n\tUser User\n\tLastActivity string\n\tJoinedDate string\n\tIsOnline bool\n}\ntype RoomUsers struct {\n\tCount int\n\tValue []RoomUser\n}\ntype Room struct {\n\tId int\n\tName string\n\tDescription string\n\tLastActivity string\n\tCreatedBy User\n\tCreatedDate string\n\tHasAdminPermissions bool\n\tHasReadWritePermissions bool\n}\ntype Rooms struct {\n\tCount int\n\tValue []Room\n}\n\nvar apiClient http.RoundTripper = &http.Transport{\n\tProxy: http.ProxyFromEnvironment,\n}\nvar apiBase string = os.Args[1]\nvar apiEndpoint []string = []string{\n\tapiBase + \"\/_apis\/chat\/rooms?api-version=1.0\",\n\tapiBase + \"\/_apis\/chat\/rooms\/%d\/users?api-version=1.0\",\n\tapiBase + \"\/_apis\/chat\/rooms\/%d\/users\/%s?api-version=1.0\",\n\tapiBase + \"\/_apis\/chat\/rooms\/%d\/messages?api-version=1.0\",\n\tapiBase + \"\/_apis\/chat\/rooms\/%d\/messages?$filter=PostedTime ge %s&api-version=1.0\",\n}\n\nfunc main() {\n\tlogin := Login{\n\t\tUser: os.Args[2],\n\t\tPassword: os.Args[3],\n\t}\n\tlistener := make(chan interface{}) \/\/ routines send updates here\n\tquitter := make(chan interface{}) \/\/ close to have all routines return\n\n\t\/\/ go pollClient() \/\/ get input\n\tgo pollRooms(5*time.Second, &login, &listener, &quitter)\n\n\tfor l := range listener {\n\t\tfmt.Println(l)\n\t}\n}\n\n\/\/ requests are mutually exclusive on login\n\/\/ so login can be updated on errors\n\/\/ without others triggering same\nfunc makeRequest(verb string, url string, body string, code int, login *Login, listener *chan interface{}) ([]byte, error) {\n\tlogin.sync.Lock()\n\tdefer login.sync.Unlock()\n\n\trequest, err := http.NewRequest(verb, url, strings.NewReader(body))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trequest.SetBasicAuth(login.User, login.Password)\n\n\tresponse, err := apiClient.RoundTrip(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer response.Body.Close()\n\tif response.StatusCode != code {\n\t\treturn nil, errors.New(response.Status)\n\t}\n\n\treturn ioutil.ReadAll(response.Body)\n}\n\nfunc pollRooms(d time.Duration, login *Login, listener *chan interface{}, quitter *chan interface{}) {\n\troomMap := make(map[int]*Room)\n\troomTicker := time.NewTicker(d)\n\tdefer roomTicker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-roomTicker.C:\n\t\t\t\/\/ api\n\t\t\tbody, err := makeRequest(\"GET\", apiEndpoint[0], \"\", 200, login, listener)\n\t\t\tif err != nil {\n\t\t\t\t*listener <- fmt.Sprintf(\"error pollRooms %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar rooms Rooms \/\/ or interface{} for generic\n\t\t\terr = json.Unmarshal(body, &rooms)\n\t\t\tif err != nil {\n\t\t\t\t*listener <- fmt.Sprintf(\"error pollRooms %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ added\n\t\t\tnewRoomMap := make(map[int]*Room)\n\t\t\tfor _, room := range rooms.Value {\n\t\t\t\tnewRoomMap[room.Id] = &room\n\t\t\t\tif _, ok := roomMap[room.Id]; ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tjson, err := json.Marshal(room)\n\t\t\t\tif err != nil {\n\t\t\t\t\t*listener <- fmt.Sprintf(\"error pollRooms %s\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t*listener <- fmt.Sprintf(\"room add %d %s\", room.Id, json)\n\t\t\t\troomMap[room.Id] = &room\n\n\t\t\t\t\/\/ need to track quitter for room users polling\n\t\t\t\tgo pollRoomUsers(3*time.Second, &room, login, listener, quitter)\n\t\t\t}\n\n\t\t\t\/\/ removed\n\t\t\tfor _, room := range roomMap {\n\t\t\t\tif _, ok := newRoomMap[room.Id]; ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tjson, err := json.Marshal(*room)\n\t\t\t\tif err != nil {\n\t\t\t\t\t*listener <- fmt.Sprintf(\"error pollRooms %s\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t*listener <- fmt.Sprintf(\"room remove %d %s\", room.Id, json)\n\t\t\t\tdelete(roomMap, room.Id)\n\n\t\t\t\t\/\/ need to clean up room users polling\n\t\t\t}\n\t\tcase <-*quitter:\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ if 401 or \"auth\" header, need U\/P from speaker, block everywhere?\n\t\t\/\/ for added rooms start ticker, for removed rooms stop ticker\n\t\t\/\/ need to signal \"done\"?\n\t}\n}\n\nfunc pollRoomUsers(d time.Duration, room *Room, login *Login, listener *chan interface{}, quitter *chan interface{}) {\n\tuserMap := make(map[string]*RoomUser)\n\tuserTicker := time.NewTicker(d)\n\tdefer userTicker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-userTicker.C:\n\t\t\t\/\/ api\n\t\t\tbody, err := makeRequest(\"GET\", fmt.Sprintf(apiEndpoint[1], room.Id), \"\", 200, login, listener)\n\t\t\tif err != nil {\n\t\t\t\t*listener <- fmt.Sprintf(\"error pollUsers %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar users RoomUsers\n\t\t\terr = json.Unmarshal(body, &users)\n\t\t\tif err != nil {\n\t\t\t\t*listener <- fmt.Sprintf(\"error pollUsers %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ added\n\t\t\tnewUserMap := make(map[string]*RoomUser)\n\t\t\tfor _, user := range users.Value {\n\t\t\t\tnewUserMap[user.User.Id] = &user\n\t\t\t\tif _, ok := userMap[user.User.Id]; ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tjson, err := json.Marshal(user)\n\t\t\t\tif err != nil {\n\t\t\t\t\t*listener <- fmt.Sprintf(\"error pollUsers %s\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t*listener <- fmt.Sprintf(\"user add %d %s %s\", user.RoomId, user.User.Id, json)\n\t\t\t\tuserMap[user.User.Id] = &user\n\t\t\t}\n\n\t\t\t\/\/ removed\n\t\t\tfor _, user := range userMap {\n\t\t\t\tif _, ok := newUserMap[user.User.Id]; ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tjson, err := json.Marshal(*user)\n\t\t\t\tif err != nil {\n\t\t\t\t\t*listener <- fmt.Sprintf(\"error pollUsers %s\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t*listener <- fmt.Sprintf(\"user remove %d %s %s\", user.RoomId, user.User.Id, json)\n\t\t\t\tdelete(userMap, user.User.Id)\n\t\t\t}\n\n\t\t\t\/\/ changed\n\t\t\tfor id, user := range newUserMap {\n\t\t\t\tif newUserMap[id].IsOnline != userMap[id].IsOnline {\n\t\t\t\t\tjson, err := json.Marshal(*user)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t*listener <- fmt.Sprintf(\"error pollUsers %s\", err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tuserMap[id] = newUserMap[id]\n\t\t\t\t\t*listener <- fmt.Sprintf(\"user change %d %s %s\", user.RoomId, user.User.Id, json)\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-*quitter:\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ if 401 or \"auth\" header, need U\/P from speaker, block everywhere?\n\t\t\/\/ for added rooms start ticker, for removed rooms stop ticker\n\t\t\/\/ need to signal \"done\"?\n\t}\n}\n\nfunc pollMessages(d time.Duration) {\n}\n<commit_msg>Fixed pointer bugs in set operations, and added some logging<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype Login struct {\n\tUser string\n\tPassword string\n\tsync sync.Mutex\n}\ntype User struct {\n\tId string\n\tDisplayName string\n\tUrl string\n\tImageUrl string\n}\ntype RoomUser struct {\n\tRoomId int\n\tUser *User\n\tLastActivity string\n\tJoinedDate string\n\tIsOnline bool\n}\ntype RoomUsers struct {\n\tCount int\n\tValue []*RoomUser\n}\ntype Room struct {\n\tId int\n\tName string\n\tDescription string\n\tLastActivity string\n\tCreatedBy *User\n\tCreatedDate string\n\tHasAdminPermissions bool\n\tHasReadWritePermissions bool\n}\ntype Rooms struct {\n\tCount int\n\tValue []*Room\n}\n\nvar apiClient http.RoundTripper = &http.Transport{\n\tProxy: http.ProxyFromEnvironment,\n}\nvar apiBase string = os.Args[1]\nvar apiEndpoint []string = []string{\n\tapiBase + \"\/_apis\/chat\/rooms?api-version=1.0\",\n\tapiBase + \"\/_apis\/chat\/rooms\/%d\/users?api-version=1.0\",\n\tapiBase + \"\/_apis\/chat\/rooms\/%d\/users\/%s?api-version=1.0\",\n\tapiBase + \"\/_apis\/chat\/rooms\/%d\/messages?api-version=1.0\",\n\tapiBase + \"\/_apis\/chat\/rooms\/%d\/messages?$filter=PostedTime ge %s&api-version=1.0\",\n}\n\nfunc main() {\n\tlogin := Login{\n\t\tUser: os.Args[2],\n\t\tPassword: os.Args[3],\n\t}\n\t\/\/log.Printf(\"main login=%s\", login)\n\n\tlistener := make(chan interface{}) \/\/ routines send updates here\n\tquitter := make(chan interface{}) \/\/ close to have all routines return\n\n\t\/\/ go pollClient() \/\/ get input\n\tgo pollRooms(5*time.Second, &login, &listener, &quitter)\n\n\tfor l := range listener {\n\t\tfmt.Println(l)\n\t}\n}\n\n\/\/ requests are mutually exclusive on login\n\/\/ so login can be updated on errors\n\/\/ without others triggering same\nfunc makeRequest(verb string, url string, body string, code int, login *Login, listener *chan interface{}) ([]byte, error) {\n\t\/\/log.Printf(\"makeRequest verb=%s url=%s body=%s code=%d\", verb, url, body, code)\n\tlogin.sync.Lock()\n\tdefer login.sync.Unlock()\n\t\n\trequest, err := http.NewRequest(verb, url, strings.NewReader(body))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trequest.SetBasicAuth(login.User, login.Password)\n\n\tresponse, err := apiClient.RoundTrip(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer response.Body.Close()\n\tif response.StatusCode != code {\n\t\treturn nil, errors.New(response.Status)\n\t}\n\n\treturn ioutil.ReadAll(response.Body)\n}\n\nfunc pollRooms(d time.Duration, login *Login, listener *chan interface{}, quitter *chan interface{}) {\n\t\/\/log.Printf(\"pollRooms d=%d\", d)\n\troomMap := make(map[int]*Room)\n\troomTicker := time.NewTicker(d)\n\tdefer roomTicker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-roomTicker.C:\n\t\t\t\/\/ api\n\t\t\tbody, err := makeRequest(\"GET\", apiEndpoint[0], \"\", 200, login, listener)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"pollRooms err=%s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar rooms Rooms \/\/ or interface{} for generic\n\t\t\terr = json.Unmarshal(body, &rooms)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"pollRooms err=%s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/log.Printf(\"pollRooms rooms=%s\", rooms)\n\n\t\t\t\/\/ added\n\t\t\tnewRoomMap := make(map[int]*Room)\n\t\t\tfor _, room := range rooms.Value {\n\t\t\t\tnewRoomMap[room.Id] = room\n\t\t\t\tif _, ok := roomMap[room.Id]; ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tjson, err := json.Marshal(room)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"pollRooms err=%s\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t*listener <- fmt.Sprintf(\"room add %d %s\", room.Id, json)\n\t\t\t\troomMap[room.Id] = room\n\n\t\t\t\t\/\/ need to track quitter for room users polling\n\t\t\t\tgo pollRoomUsers(3*time.Second, room, login, listener, quitter)\n\t\t\t}\n\n\t\t\t\/\/ removed\n\t\t\tfor _, room := range roomMap {\n\t\t\t\tif _, ok := newRoomMap[room.Id]; ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tjson, err := json.Marshal(*room)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"pollRooms err=%s\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t*listener <- fmt.Sprintf(\"room remove %d %s\", room.Id, json)\n\t\t\t\tdelete(roomMap, room.Id)\n\n\t\t\t\t\/\/ need to clean up room users polling\n\t\t\t}\n\t\tcase <-*quitter:\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ if 401 or \"auth\" header, need U\/P from speaker, block everywhere?\n\t\t\/\/ for added rooms start ticker, for removed rooms stop ticker\n\t\t\/\/ need to signal \"done\"?\n\t}\n}\n\nfunc pollRoomUsers(d time.Duration, room *Room, login *Login, listener *chan interface{}, quitter *chan interface{}) {\n\t\/\/log.Printf(\"pollRoomUsers d=%d room=%s\", d, *room)\n\tuserMap := make(map[string]*RoomUser)\n\tuserTicker := time.NewTicker(d)\n\tdefer userTicker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-userTicker.C:\n\t\t\t\/\/ api\n\t\t\tbody, err := makeRequest(\"GET\", fmt.Sprintf(apiEndpoint[1], room.Id), \"\", 200, login, listener)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar users RoomUsers\n\t\t\terr = json.Unmarshal(body, &users)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"pollRoomUsers err=%s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ added\n\t\t\tnewUserMap := make(map[string]*RoomUser)\n\t\t\tfor _, user := range users.Value {\n\t\t\t\tnewUserMap[user.User.Id] = user\n\t\t\t\tif _, ok := userMap[user.User.Id]; ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tjson, err := json.Marshal(user)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"pollRoomUsers err=%s\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t*listener <- fmt.Sprintf(\"user add %d %s %s\", user.RoomId, user.User.Id, json)\n\t\t\t\tuserMap[user.User.Id] = user\n\t\t\t}\n\n\t\t\t\/\/ removed\n\t\t\tfor _, user := range userMap {\n\t\t\t\tif _, ok := newUserMap[user.User.Id]; ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tjson, err := json.Marshal(*user)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"pollRoomUsers err=%s\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t*listener <- fmt.Sprintf(\"user remove %d %s %s\", user.RoomId, user.User.Id, json)\n\t\t\t\tdelete(userMap, user.User.Id)\n\t\t\t}\n\n\t\t\t\/\/ changed\n\t\t\tfor id, user := range newUserMap {\n\t\t\t\tif newUserMap[id].IsOnline != userMap[id].IsOnline {\n\t\t\t\t\tjson, err := json.Marshal(*user)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Printf(\"pollRoomUsers err=%s\", err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tuserMap[id] = newUserMap[id]\n\t\t\t\t\t*listener <- fmt.Sprintf(\"user change %d %s %s\", user.RoomId, user.User.Id, json)\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-*quitter:\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ if 401 or \"auth\" header, need U\/P from speaker, block everywhere?\n\t\t\/\/ for added rooms start ticker, for removed rooms stop ticker\n\t\t\/\/ need to signal \"done\"?\n\t}\n}\n\nfunc pollMessages(d time.Duration) {\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n\t\"github.com\/NebulousLabs\/Sia\/crypto\"\n\t\"github.com\/NebulousLabs\/Sia\/types\"\n)\n\n\/\/ TestIntegrationHosting tests that the host correctly receives payment for\n\/\/ hosting files.\nfunc TestIntegrationHosting(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\n\tst, err := createServerTester(\"TestIntegrationHosting\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ announce the host\n\terr = st.stdGetAPI(\"\/host\/announce?address=\" + string(st.host.Address()))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ we need to announce twice, or the renter will complain about not having enough hosts\n\tloopAddr := \"127.0.0.1:\" + st.host.Address().Port()\n\terr = st.stdGetAPI(\"\/host\/announce?address=\" + loopAddr)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ wait for announcement to register\n\tst.miner.AddBlock()\n\tvar hosts ActiveHosts\n\tst.getAPI(\"\/hostdb\/hosts\/active\", &hosts)\n\tif len(hosts.Hosts) == 0 {\n\t\tt.Fatal(\"host announcement not seen\")\n\t}\n\n\t\/\/ create a file\n\tpath := filepath.Join(build.SiaTestingDir, \"api\", \"TestIntegrationHosting\", \"test.dat\")\n\tdata, err := crypto.RandBytes(1024)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = ioutil.WriteFile(path, data, 0600)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ upload to host\n\terr = st.stdGetAPI(\"\/renter\/files\/upload?nickname=test&duration=10&source=\" + path)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar fi []FileInfo\n\tfor len(fi) != 1 || fi[0].UploadProgress != 100 {\n\t\tst.getAPI(\"\/renter\/files\/list\", &fi)\n\t\ttime.Sleep(3 * time.Second)\n\t}\n\n\t\/\/ mine blocks until storage proof is complete\n\tfor i := 0; i < 20+int(types.MaturityDelay); i++ {\n\t\tst.miner.AddBlock()\n\t}\n\n\t\/\/ check balance\n\tvar wi WalletGET\n\tst.getAPI(\"\/wallet\", &wi)\n\texpBal := \"7499794999617870000000002429474\"\n\tif wi.ConfirmedSiacoinBalance.String() != expBal {\n\t\tt.Fatalf(\"host's balance was not affected: expected %v, got %v\", expBal, wi.ConfirmedSiacoinBalance)\n\t}\n}\n<commit_msg>increase number of blocks mined in hosting test<commit_after>package api\n\nimport (\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n\t\"github.com\/NebulousLabs\/Sia\/crypto\"\n\t\"github.com\/NebulousLabs\/Sia\/types\"\n)\n\n\/\/ TestIntegrationHosting tests that the host correctly receives payment for\n\/\/ hosting files.\nfunc TestIntegrationHosting(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\n\tst, err := createServerTester(\"TestIntegrationHosting\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ announce the host\n\terr = st.stdGetAPI(\"\/host\/announce?address=\" + string(st.host.Address()))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ we need to announce twice, or the renter will complain about not having enough hosts\n\tloopAddr := \"127.0.0.1:\" + st.host.Address().Port()\n\terr = st.stdGetAPI(\"\/host\/announce?address=\" + loopAddr)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ wait for announcement to register\n\tst.miner.AddBlock()\n\tvar hosts ActiveHosts\n\tst.getAPI(\"\/hostdb\/hosts\/active\", &hosts)\n\tif len(hosts.Hosts) == 0 {\n\t\tt.Fatal(\"host announcement not seen\")\n\t}\n\n\t\/\/ create a file\n\tpath := filepath.Join(build.SiaTestingDir, \"api\", \"TestIntegrationHosting\", \"test.dat\")\n\tdata, err := crypto.RandBytes(1024)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = ioutil.WriteFile(path, data, 0600)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ upload to host\n\terr = st.stdGetAPI(\"\/renter\/files\/upload?nickname=test&duration=10&source=\" + path)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar fi []FileInfo\n\tvar loops int\n\tfor len(fi) != 1 || fi[0].UploadProgress != 100 {\n\t\tst.getAPI(\"\/renter\/files\/list\", &fi)\n\t\ttime.Sleep(3 * time.Second)\n\t\tloops++\n\t}\n\n\t\/\/ mine blocks until storage proof is complete\n\tfor i := 0; i < 50+int(types.MaturityDelay); i++ {\n\t\tst.miner.AddBlock()\n\t}\n\n\t\/\/ check balance\n\tvar wi WalletGET\n\tst.getAPI(\"\/wallet\", &wi)\n\texpBal := \"16499494999617870000000002429474\"\n\tif wi.ConfirmedSiacoinBalance.String() != expBal {\n\t\tt.Fatalf(\"host's balance was not affected: expected %v, got %v\", expBal, wi.ConfirmedSiacoinBalance)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package api_test\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/buildkite\/agent\/v3\/api\"\n\t\"github.com\/buildkite\/agent\/v3\/logger\"\n)\n\nfunc TestOidcToken(t *testing.T) {\n\tconst jobId = \"b078e2d2-86e9-4c12-bf3b-612a8058d0a4\"\n\tconst oidcToken = \"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWUsImlhdCI6MTUxNjIzOTAyMn0.NHVaYe26MbtOYhSKkoKYdFVomg4i8ZJd8_-RU8VNbftc4TSMb4bXP3l3YlNWACwyXPGffz5aXHc6lty1Y2t4SWRqGteragsVdZufDn5BlnJl9pdR_kdVFUsra2rWKEofkZeIC4yWytE58sMIihvo9H1ScmmVwBcQP6XETqYd0aSHp1gOa9RdUPDvoXQ5oqygTqVtxaDr6wUFKrKItgBMzWIdNZ6y7O9E0DhEPTbE9rfBo6KTFsHAZnMg4k68CDp2woYIaXbmYTWcvbzIuHO7_37GT79XdIwkm95QJ7hYC9RiwrV7mesbY4PAahERJawntho0my942XheVLmGwLMBkQ\"\n\tconst accessToken = \"llamas\"\n\n\tpath := fmt.Sprintf(\"\/jobs\/%s\/oidc\/tokens\", jobId)\n\tserver := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {\n\t\tswitch req.URL.Path {\n\t\tcase path:\n\t\t\tif got, want := authToken(req), accessToken; got != want {\n\t\t\t\thttp.Error(\n\t\t\t\t\trw,\n\t\t\t\t\tfmt.Sprintf(\"authToken(req) = %q, want %q\", got, want),\n\t\t\t\t\thttp.StatusUnauthorized,\n\t\t\t\t)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trw.WriteHeader(http.StatusOK)\n\t\t\tfmt.Fprint(rw, fmt.Sprintf(`{\"token\":\"%s\"}`, oidcToken))\n\n\t\tdefault:\n\t\t\thttp.Error(\n\t\t\t\trw,\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t`{\"message\": \"not found; method = %q, path = %q\"}`,\n\t\t\t\t\treq.Method,\n\t\t\t\t\treq.URL.Path,\n\t\t\t\t),\n\t\t\t\thttp.StatusNotFound,\n\t\t\t)\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\t\/\/ Initial client with a registration token\n\tclient := api.NewClient(logger.Discard, api.Config{\n\t\tUserAgent: \"Test\",\n\t\tEndpoint: server.URL,\n\t\tToken: accessToken,\n\t\tDebugHTTP: true,\n\t})\n\n\tfor _, testData := range []struct {\n\t\tJobId string\n\t\tAccessToken string\n\t\tAudience []string\n\t\tError error\n\t}{\n\t\t{\n\t\t\tJobId: jobId,\n\t\t\tAccessToken: accessToken,\n\t\t\tAudience: []string{},\n\t\t},\n\t\t{\n\t\t\tJobId: jobId,\n\t\t\tAccessToken: accessToken,\n\t\t\tAudience: []string{\"sts.amazonaws.com\"},\n\t\t},\n\t\t{\n\t\t\tJobId: jobId,\n\t\t\tAccessToken: accessToken,\n\t\t\tAudience: []string{\"sts.amazonaws.com\", \"buildkite.com\"},\n\t\t\tError: api.ErrAudienceTooLong,\n\t\t},\n\t} {\n\t\tif token, resp, err := client.OidcToken(testData.JobId, testData.Audience...); err != nil {\n\t\t\tif !errors.Is(err, testData.Error) {\n\t\t\t\tt.Fatalf(\"OidcToken(%v, %v) got error = %# v, want error = %# v\", testData.JobId, testData.Audience, err, testData.Error)\n\t\t\t}\n\t\t} else if token.Token != oidcToken {\n\t\t\tt.Fatalf(\"OidcToken(%v, %v) got token = %# v, want %# v\", testData.JobId, testData.Audience, token, &api.OidcToken{Token: oidcToken})\n\t\t} else if resp.StatusCode != http.StatusOK {\n\t\t\tt.Fatalf(\"OidcToken(%v, %v) got StatusCode = %# v, want %# v\", testData.JobId, testData.Audience, resp.StatusCode, http.StatusOK)\n\t\t}\n\t}\n}\n<commit_msg>Factor out oidc token into test table<commit_after>package api_test\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/buildkite\/agent\/v3\/api\"\n\t\"github.com\/buildkite\/agent\/v3\/logger\"\n)\n\nfunc TestOidcToken(t *testing.T) {\n\tconst jobId = \"b078e2d2-86e9-4c12-bf3b-612a8058d0a4\"\n\tconst oidcToken = \"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWUsImlhdCI6MTUxNjIzOTAyMn0.NHVaYe26MbtOYhSKkoKYdFVomg4i8ZJd8_-RU8VNbftc4TSMb4bXP3l3YlNWACwyXPGffz5aXHc6lty1Y2t4SWRqGteragsVdZufDn5BlnJl9pdR_kdVFUsra2rWKEofkZeIC4yWytE58sMIihvo9H1ScmmVwBcQP6XETqYd0aSHp1gOa9RdUPDvoXQ5oqygTqVtxaDr6wUFKrKItgBMzWIdNZ6y7O9E0DhEPTbE9rfBo6KTFsHAZnMg4k68CDp2woYIaXbmYTWcvbzIuHO7_37GT79XdIwkm95QJ7hYC9RiwrV7mesbY4PAahERJawntho0my942XheVLmGwLMBkQ\"\n\tconst accessToken = \"llamas\"\n\n\tpath := fmt.Sprintf(\"\/jobs\/%s\/oidc\/tokens\", jobId)\n\tserver := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {\n\t\tswitch req.URL.Path {\n\t\tcase path:\n\t\t\tif got, want := authToken(req), accessToken; got != want {\n\t\t\t\thttp.Error(\n\t\t\t\t\trw,\n\t\t\t\t\tfmt.Sprintf(\"authToken(req) = %q, want %q\", got, want),\n\t\t\t\t\thttp.StatusUnauthorized,\n\t\t\t\t)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trw.WriteHeader(http.StatusOK)\n\t\t\tfmt.Fprint(rw, fmt.Sprintf(`{\"token\":\"%s\"}`, oidcToken))\n\n\t\tdefault:\n\t\t\thttp.Error(\n\t\t\t\trw,\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t`{\"message\": \"not found; method = %q, path = %q\"}`,\n\t\t\t\t\treq.Method,\n\t\t\t\t\treq.URL.Path,\n\t\t\t\t),\n\t\t\t\thttp.StatusNotFound,\n\t\t\t)\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\t\/\/ Initial client with a registration token\n\tclient := api.NewClient(logger.Discard, api.Config{\n\t\tUserAgent: \"Test\",\n\t\tEndpoint: server.URL,\n\t\tToken: accessToken,\n\t\tDebugHTTP: true,\n\t})\n\n\tfor _, testData := range []struct {\n\t\tJobId string\n\t\tAccessToken string\n\t\tOidcToken *api.OidcToken\n\t\tAudience []string\n\t\tError error\n\t}{\n\t\t{\n\t\t\tJobId: jobId,\n\t\t\tAccessToken: accessToken,\n\t\t\tOidcToken: &api.OidcToken{Token: oidcToken},\n\t\t\tAudience: []string{},\n\t\t},\n\t\t{\n\t\t\tJobId: jobId,\n\t\t\tAccessToken: accessToken,\n\t\t\tOidcToken: &api.OidcToken{Token: oidcToken},\n\t\t\tAudience: []string{\"sts.amazonaws.com\"},\n\t\t},\n\t\t{\n\t\t\tJobId: jobId,\n\t\t\tAccessToken: accessToken,\n\t\t\tAudience: []string{\"sts.amazonaws.com\", \"buildkite.com\"},\n\t\t\tError: api.ErrAudienceTooLong,\n\t\t},\n\t} {\n\t\tif token, resp, err := client.OidcToken(testData.JobId, testData.Audience...); err != nil {\n\t\t\tif !errors.Is(err, testData.Error) {\n\t\t\t\tt.Fatalf(\n\t\t\t\t\t\"OidcToken(%v, %v) got error = %# v, want error = %# v\",\n\t\t\t\t\ttestData.JobId,\n\t\t\t\t\ttestData.Audience,\n\t\t\t\t\terr,\n\t\t\t\t\ttestData.Error,\n\t\t\t\t)\n\t\t\t}\n\t\t} else if token.Token != oidcToken {\n\t\t\tt.Fatalf(\"OidcToken(%v, %v) got token = %# v, want %# v\", testData.JobId, testData.Audience, token, testData.OidcToken)\n\t\t} else if resp.StatusCode != http.StatusOK {\n\t\t\tt.Fatalf(\"OidcToken(%v, %v) got StatusCode = %# v, want %# v\", testData.JobId, testData.Audience, resp.StatusCode, http.StatusOK)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 clair authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage v1\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/coreos\/pkg\/capnslog\"\n\t\"github.com\/fernet\/fernet-go\"\n\n\t\"github.com\/coreos\/clair\/database\"\n\t\"github.com\/coreos\/clair\/ext\/versionfmt\"\n\t\"github.com\/coreos\/clair\/utils\/types\"\n)\n\nvar log = capnslog.NewPackageLogger(\"github.com\/coreos\/clair\", \"v1\")\n\ntype Error struct {\n\tMessage string `json:\"Layer\"`\n}\n\ntype Layer struct {\n\tName string `json:\"Name,omitempty\"`\n\tNamespaceName string `json:\"NamespaceName,omitempty\"`\n\tPath string `json:\"Path,omitempty\"`\n\tHeaders map[string]string `json:\"Headers,omitempty\"`\n\tParentName string `json:\"ParentName,omitempty\"`\n\tFormat string `json:\"Format,omitempty\"`\n\tIndexedByVersion int `json:\"IndexedByVersion,omitempty\"`\n\tFeatures []Feature `json:\"Features,omitempty\"`\n}\n\nfunc LayerFromDatabaseModel(dbLayer database.Layer, withFeatures, withVulnerabilities bool) Layer {\n\tlayer := Layer{\n\t\tName: dbLayer.Name,\n\t\tIndexedByVersion: dbLayer.EngineVersion,\n\t}\n\n\tif dbLayer.Parent != nil {\n\t\tlayer.ParentName = dbLayer.Parent.Name\n\t}\n\n\tif dbLayer.Namespace != nil {\n\t\tlayer.NamespaceName = dbLayer.Namespace.Name\n\t}\n\n\tif withFeatures || withVulnerabilities && dbLayer.Features != nil {\n\t\tfor _, dbFeatureVersion := range dbLayer.Features {\n\t\t\tfeature := Feature{\n\t\t\t\tName: dbFeatureVersion.Feature.Name,\n\t\t\t\tNamespaceName: dbFeatureVersion.Feature.Namespace.Name,\n\t\t\t\tVersionFormat: dbFeatureVersion.Feature.Namespace.VersionFormat,\n\t\t\t\tVersion: dbFeatureVersion.Version,\n\t\t\t\tAddedBy: dbFeatureVersion.AddedBy.Name,\n\t\t\t}\n\n\t\t\tfor _, dbVuln := range dbFeatureVersion.AffectedBy {\n\t\t\t\tvuln := Vulnerability{\n\t\t\t\t\tName: dbVuln.Name,\n\t\t\t\t\tNamespaceName: dbVuln.Namespace.Name,\n\t\t\t\t\tDescription: dbVuln.Description,\n\t\t\t\t\tLink: dbVuln.Link,\n\t\t\t\t\tSeverity: string(dbVuln.Severity),\n\t\t\t\t\tMetadata: dbVuln.Metadata,\n\t\t\t\t}\n\n\t\t\t\tif dbVuln.FixedBy != versionfmt.MaxVersion {\n\t\t\t\t\tvuln.FixedBy = dbVuln.FixedBy\n\t\t\t\t}\n\t\t\t\tfeature.Vulnerabilities = append(feature.Vulnerabilities, vuln)\n\t\t\t}\n\t\t\tlayer.Features = append(layer.Features, feature)\n\t\t}\n\t}\n\n\treturn layer\n}\n\ntype Namespace struct {\n\tName string `json:\"Name,omitempty\"`\n\tVersionFormat string `json:\"VersionFormat,omitempty\"`\n}\n\ntype Vulnerability struct {\n\tName string `json:\"Name,omitempty\"`\n\tNamespaceName string `json:\"NamespaceName,omitempty\"`\n\tDescription string `json:\"Description,omitempty\"`\n\tLink string `json:\"Link,omitempty\"`\n\tSeverity string `json:\"Severity,omitempty\"`\n\tMetadata map[string]interface{} `json:\"Metadata,omitempty\"`\n\tFixedBy string `json:\"FixedBy,omitempty\"`\n\tFixedIn []Feature `json:\"FixedIn,omitempty\"`\n}\n\nfunc (v Vulnerability) DatabaseModel() (database.Vulnerability, error) {\n\tseverity := types.Priority(v.Severity)\n\tif !severity.IsValid() {\n\t\treturn database.Vulnerability{}, errors.New(\"Invalid severity\")\n\t}\n\n\tvar dbFeatures []database.FeatureVersion\n\tfor _, feature := range v.FixedIn {\n\t\tdbFeature, err := feature.DatabaseModel()\n\t\tif err != nil {\n\t\t\treturn database.Vulnerability{}, err\n\t\t}\n\n\t\tdbFeatures = append(dbFeatures, dbFeature)\n\t}\n\n\treturn database.Vulnerability{\n\t\tName: v.Name,\n\t\tNamespace: database.Namespace{Name: v.NamespaceName},\n\t\tDescription: v.Description,\n\t\tLink: v.Link,\n\t\tSeverity: severity,\n\t\tMetadata: v.Metadata,\n\t\tFixedIn: dbFeatures,\n\t}, nil\n}\n\nfunc VulnerabilityFromDatabaseModel(dbVuln database.Vulnerability, withFixedIn bool) Vulnerability {\n\tvuln := Vulnerability{\n\t\tName: dbVuln.Name,\n\t\tNamespaceName: dbVuln.Namespace.Name,\n\t\tDescription: dbVuln.Description,\n\t\tLink: dbVuln.Link,\n\t\tSeverity: string(dbVuln.Severity),\n\t\tMetadata: dbVuln.Metadata,\n\t}\n\n\tif withFixedIn {\n\t\tfor _, dbFeatureVersion := range dbVuln.FixedIn {\n\t\t\tvuln.FixedIn = append(vuln.FixedIn, FeatureFromDatabaseModel(dbFeatureVersion))\n\t\t}\n\t}\n\n\treturn vuln\n}\n\ntype Feature struct {\n\tName string `json:\"Name,omitempty\"`\n\tNamespaceName string `json:\"NamespaceName,omitempty\"`\n\tVersionFormat string `json:\"VersionFormat,omitempty\"`\n\tVersion string `json:\"Version,omitempty\"`\n\tVulnerabilities []Vulnerability `json:\"Vulnerabilities,omitempty\"`\n\tAddedBy string `json:\"AddedBy,omitempty\"`\n}\n\nfunc FeatureFromDatabaseModel(dbFeatureVersion database.FeatureVersion) Feature {\n\tversion := dbFeatureVersion.Version\n\tif version == versionfmt.MaxVersion {\n\t\tversion = \"None\"\n\t}\n\n\treturn Feature{\n\t\tName: dbFeatureVersion.Feature.Name,\n\t\tNamespaceName: dbFeatureVersion.Feature.Namespace.Name,\n\t\tVersionFormat: dbFeatureVersion.Feature.Namespace.VersionFormat,\n\t\tVersion: version,\n\t\tAddedBy: dbFeatureVersion.AddedBy.Name,\n\t}\n}\n\nfunc (f Feature) DatabaseModel() (fv database.FeatureVersion, err error) {\n\tvar version string\n\tif f.Version == \"None\" {\n\t\tversion = versionfmt.MaxVersion\n\t} else {\n\t\terr = versionfmt.Valid(f.VersionFormat, f.Version)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tversion = f.Version\n\t}\n\n\tfv = database.FeatureVersion{\n\t\tFeature: database.Feature{\n\t\t\tName: f.Name,\n\t\t\tNamespace: database.Namespace{\n\t\t\t\tName: f.NamespaceName,\n\t\t\t\tVersionFormat: f.VersionFormat,\n\t\t\t},\n\t\t},\n\t\tVersion: version,\n\t}\n\n\treturn\n}\n\ntype Notification struct {\n\tName string `json:\"Name,omitempty\"`\n\tCreated string `json:\"Created,omitempty\"`\n\tNotified string `json:\"Notified,omitempty\"`\n\tDeleted string `json:\"Deleted,omitempty\"`\n\tLimit int `json:\"Limit,omitempty\"`\n\tPage string `json:\"Page,omitempty\"`\n\tNextPage string `json:\"NextPage,omitempty\"`\n\tOld *VulnerabilityWithLayers `json:\"Old,omitempty\"`\n\tNew *VulnerabilityWithLayers `json:\"New,omitempty\"`\n}\n\nfunc NotificationFromDatabaseModel(dbNotification database.VulnerabilityNotification, limit int, pageToken string, nextPage database.VulnerabilityNotificationPageNumber, key string) Notification {\n\tvar oldVuln *VulnerabilityWithLayers\n\tif dbNotification.OldVulnerability != nil {\n\t\tv := VulnerabilityWithLayersFromDatabaseModel(*dbNotification.OldVulnerability)\n\t\toldVuln = &v\n\t}\n\n\tvar newVuln *VulnerabilityWithLayers\n\tif dbNotification.NewVulnerability != nil {\n\t\tv := VulnerabilityWithLayersFromDatabaseModel(*dbNotification.NewVulnerability)\n\t\tnewVuln = &v\n\t}\n\n\tvar nextPageStr string\n\tif nextPage != database.NoVulnerabilityNotificationPage {\n\t\tnextPageBytes, _ := tokenMarshal(nextPage, key)\n\t\tnextPageStr = string(nextPageBytes)\n\t}\n\n\tvar created, notified, deleted string\n\tif !dbNotification.Created.IsZero() {\n\t\tcreated = fmt.Sprintf(\"%d\", dbNotification.Created.Unix())\n\t}\n\tif !dbNotification.Notified.IsZero() {\n\t\tnotified = fmt.Sprintf(\"%d\", dbNotification.Notified.Unix())\n\t}\n\tif !dbNotification.Deleted.IsZero() {\n\t\tdeleted = fmt.Sprintf(\"%d\", dbNotification.Deleted.Unix())\n\t}\n\n\t\/\/ TODO(jzelinskie): implement \"changed\" key\n\tfmt.Println(dbNotification.Deleted.IsZero())\n\treturn Notification{\n\t\tName: dbNotification.Name,\n\t\tCreated: created,\n\t\tNotified: notified,\n\t\tDeleted: deleted,\n\t\tLimit: limit,\n\t\tPage: pageToken,\n\t\tNextPage: nextPageStr,\n\t\tOld: oldVuln,\n\t\tNew: newVuln,\n\t}\n}\n\ntype VulnerabilityWithLayers struct {\n\tVulnerability *Vulnerability `json:\"Vulnerability,omitempty\"`\n\n\t\/\/ This field is guaranteed to be in order only for pagination.\n\t\/\/ Indices from different notifications may not be comparable.\n\tOrderedLayersIntroducingVulnerability []OrderedLayerName `json:\"OrderedLayersIntroducingVulnerability,omitempty\"`\n\n\t\/\/ This field is deprecated.\n\tLayersIntroducingVulnerability []string `json:\"LayersIntroducingVulnerability,omitempty\"`\n}\n\ntype OrderedLayerName struct {\n\tIndex int `json:\"Index\"`\n\tLayerName string `json:\"LayerName\"`\n}\n\nfunc VulnerabilityWithLayersFromDatabaseModel(dbVuln database.Vulnerability) VulnerabilityWithLayers {\n\tvuln := VulnerabilityFromDatabaseModel(dbVuln, true)\n\n\tvar layers []string\n\tvar orderedLayers []OrderedLayerName\n\tfor _, layer := range dbVuln.LayersIntroducingVulnerability {\n\t\tlayers = append(layers, layer.Name)\n\t\torderedLayers = append(orderedLayers, OrderedLayerName{\n\t\t\tIndex: layer.ID,\n\t\t\tLayerName: layer.Name,\n\t\t})\n\t}\n\n\treturn VulnerabilityWithLayers{\n\t\tVulnerability: &vuln,\n\t\tOrderedLayersIntroducingVulnerability: orderedLayers,\n\t\tLayersIntroducingVulnerability: layers,\n\t}\n}\n\ntype LayerEnvelope struct {\n\tLayer *Layer `json:\"Layer,omitempty\"`\n\tError *Error `json:\"Error,omitempty\"`\n}\n\ntype NamespaceEnvelope struct {\n\tNamespaces *[]Namespace `json:\"Namespaces,omitempty\"`\n\tError *Error `json:\"Error,omitempty\"`\n}\n\ntype VulnerabilityEnvelope struct {\n\tVulnerability *Vulnerability `json:\"Vulnerability,omitempty\"`\n\tVulnerabilities *[]Vulnerability `json:\"Vulnerabilities,omitempty\"`\n\tNextPage string `json:\"NextPage,omitempty\"`\n\tError *Error `json:\"Error,omitempty\"`\n}\n\ntype NotificationEnvelope struct {\n\tNotification *Notification `json:\"Notification,omitempty\"`\n\tError *Error `json:\"Error,omitempty\"`\n}\n\ntype FeatureEnvelope struct {\n\tFeature *Feature `json:\"Feature,omitempty\"`\n\tFeatures *[]Feature `json:\"Features,omitempty\"`\n\tError *Error `json:\"Error,omitempty\"`\n}\n\nfunc tokenUnmarshal(token string, key string, v interface{}) error {\n\tk, _ := fernet.DecodeKey(key)\n\tmsg := fernet.VerifyAndDecrypt([]byte(token), time.Hour, []*fernet.Key{k})\n\tif msg == nil {\n\t\treturn errors.New(\"invalid or expired pagination token\")\n\t}\n\n\treturn json.NewDecoder(bytes.NewBuffer(msg)).Decode(&v)\n}\n\nfunc tokenMarshal(v interface{}, key string) ([]byte, error) {\n\tvar buf bytes.Buffer\n\terr := json.NewEncoder(&buf).Encode(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tk, _ := fernet.DecodeKey(key)\n\treturn fernet.EncryptAndSign(buf.Bytes(), k)\n}\n<commit_msg>api\/v1: fix JSON struct tag misnomer<commit_after>\/\/ Copyright 2015 clair authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage v1\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/coreos\/pkg\/capnslog\"\n\t\"github.com\/fernet\/fernet-go\"\n\n\t\"github.com\/coreos\/clair\/database\"\n\t\"github.com\/coreos\/clair\/ext\/versionfmt\"\n\t\"github.com\/coreos\/clair\/utils\/types\"\n)\n\nvar log = capnslog.NewPackageLogger(\"github.com\/coreos\/clair\", \"v1\")\n\ntype Error struct {\n\tMessage string `json:\"Message,omitempty\"`\n}\n\ntype Layer struct {\n\tName string `json:\"Name,omitempty\"`\n\tNamespaceName string `json:\"NamespaceName,omitempty\"`\n\tPath string `json:\"Path,omitempty\"`\n\tHeaders map[string]string `json:\"Headers,omitempty\"`\n\tParentName string `json:\"ParentName,omitempty\"`\n\tFormat string `json:\"Format,omitempty\"`\n\tIndexedByVersion int `json:\"IndexedByVersion,omitempty\"`\n\tFeatures []Feature `json:\"Features,omitempty\"`\n}\n\nfunc LayerFromDatabaseModel(dbLayer database.Layer, withFeatures, withVulnerabilities bool) Layer {\n\tlayer := Layer{\n\t\tName: dbLayer.Name,\n\t\tIndexedByVersion: dbLayer.EngineVersion,\n\t}\n\n\tif dbLayer.Parent != nil {\n\t\tlayer.ParentName = dbLayer.Parent.Name\n\t}\n\n\tif dbLayer.Namespace != nil {\n\t\tlayer.NamespaceName = dbLayer.Namespace.Name\n\t}\n\n\tif withFeatures || withVulnerabilities && dbLayer.Features != nil {\n\t\tfor _, dbFeatureVersion := range dbLayer.Features {\n\t\t\tfeature := Feature{\n\t\t\t\tName: dbFeatureVersion.Feature.Name,\n\t\t\t\tNamespaceName: dbFeatureVersion.Feature.Namespace.Name,\n\t\t\t\tVersionFormat: dbFeatureVersion.Feature.Namespace.VersionFormat,\n\t\t\t\tVersion: dbFeatureVersion.Version,\n\t\t\t\tAddedBy: dbFeatureVersion.AddedBy.Name,\n\t\t\t}\n\n\t\t\tfor _, dbVuln := range dbFeatureVersion.AffectedBy {\n\t\t\t\tvuln := Vulnerability{\n\t\t\t\t\tName: dbVuln.Name,\n\t\t\t\t\tNamespaceName: dbVuln.Namespace.Name,\n\t\t\t\t\tDescription: dbVuln.Description,\n\t\t\t\t\tLink: dbVuln.Link,\n\t\t\t\t\tSeverity: string(dbVuln.Severity),\n\t\t\t\t\tMetadata: dbVuln.Metadata,\n\t\t\t\t}\n\n\t\t\t\tif dbVuln.FixedBy != versionfmt.MaxVersion {\n\t\t\t\t\tvuln.FixedBy = dbVuln.FixedBy\n\t\t\t\t}\n\t\t\t\tfeature.Vulnerabilities = append(feature.Vulnerabilities, vuln)\n\t\t\t}\n\t\t\tlayer.Features = append(layer.Features, feature)\n\t\t}\n\t}\n\n\treturn layer\n}\n\ntype Namespace struct {\n\tName string `json:\"Name,omitempty\"`\n\tVersionFormat string `json:\"VersionFormat,omitempty\"`\n}\n\ntype Vulnerability struct {\n\tName string `json:\"Name,omitempty\"`\n\tNamespaceName string `json:\"NamespaceName,omitempty\"`\n\tDescription string `json:\"Description,omitempty\"`\n\tLink string `json:\"Link,omitempty\"`\n\tSeverity string `json:\"Severity,omitempty\"`\n\tMetadata map[string]interface{} `json:\"Metadata,omitempty\"`\n\tFixedBy string `json:\"FixedBy,omitempty\"`\n\tFixedIn []Feature `json:\"FixedIn,omitempty\"`\n}\n\nfunc (v Vulnerability) DatabaseModel() (database.Vulnerability, error) {\n\tseverity := types.Priority(v.Severity)\n\tif !severity.IsValid() {\n\t\treturn database.Vulnerability{}, errors.New(\"Invalid severity\")\n\t}\n\n\tvar dbFeatures []database.FeatureVersion\n\tfor _, feature := range v.FixedIn {\n\t\tdbFeature, err := feature.DatabaseModel()\n\t\tif err != nil {\n\t\t\treturn database.Vulnerability{}, err\n\t\t}\n\n\t\tdbFeatures = append(dbFeatures, dbFeature)\n\t}\n\n\treturn database.Vulnerability{\n\t\tName: v.Name,\n\t\tNamespace: database.Namespace{Name: v.NamespaceName},\n\t\tDescription: v.Description,\n\t\tLink: v.Link,\n\t\tSeverity: severity,\n\t\tMetadata: v.Metadata,\n\t\tFixedIn: dbFeatures,\n\t}, nil\n}\n\nfunc VulnerabilityFromDatabaseModel(dbVuln database.Vulnerability, withFixedIn bool) Vulnerability {\n\tvuln := Vulnerability{\n\t\tName: dbVuln.Name,\n\t\tNamespaceName: dbVuln.Namespace.Name,\n\t\tDescription: dbVuln.Description,\n\t\tLink: dbVuln.Link,\n\t\tSeverity: string(dbVuln.Severity),\n\t\tMetadata: dbVuln.Metadata,\n\t}\n\n\tif withFixedIn {\n\t\tfor _, dbFeatureVersion := range dbVuln.FixedIn {\n\t\t\tvuln.FixedIn = append(vuln.FixedIn, FeatureFromDatabaseModel(dbFeatureVersion))\n\t\t}\n\t}\n\n\treturn vuln\n}\n\ntype Feature struct {\n\tName string `json:\"Name,omitempty\"`\n\tNamespaceName string `json:\"NamespaceName,omitempty\"`\n\tVersionFormat string `json:\"VersionFormat,omitempty\"`\n\tVersion string `json:\"Version,omitempty\"`\n\tVulnerabilities []Vulnerability `json:\"Vulnerabilities,omitempty\"`\n\tAddedBy string `json:\"AddedBy,omitempty\"`\n}\n\nfunc FeatureFromDatabaseModel(dbFeatureVersion database.FeatureVersion) Feature {\n\tversion := dbFeatureVersion.Version\n\tif version == versionfmt.MaxVersion {\n\t\tversion = \"None\"\n\t}\n\n\treturn Feature{\n\t\tName: dbFeatureVersion.Feature.Name,\n\t\tNamespaceName: dbFeatureVersion.Feature.Namespace.Name,\n\t\tVersionFormat: dbFeatureVersion.Feature.Namespace.VersionFormat,\n\t\tVersion: version,\n\t\tAddedBy: dbFeatureVersion.AddedBy.Name,\n\t}\n}\n\nfunc (f Feature) DatabaseModel() (fv database.FeatureVersion, err error) {\n\tvar version string\n\tif f.Version == \"None\" {\n\t\tversion = versionfmt.MaxVersion\n\t} else {\n\t\terr = versionfmt.Valid(f.VersionFormat, f.Version)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tversion = f.Version\n\t}\n\n\tfv = database.FeatureVersion{\n\t\tFeature: database.Feature{\n\t\t\tName: f.Name,\n\t\t\tNamespace: database.Namespace{\n\t\t\t\tName: f.NamespaceName,\n\t\t\t\tVersionFormat: f.VersionFormat,\n\t\t\t},\n\t\t},\n\t\tVersion: version,\n\t}\n\n\treturn\n}\n\ntype Notification struct {\n\tName string `json:\"Name,omitempty\"`\n\tCreated string `json:\"Created,omitempty\"`\n\tNotified string `json:\"Notified,omitempty\"`\n\tDeleted string `json:\"Deleted,omitempty\"`\n\tLimit int `json:\"Limit,omitempty\"`\n\tPage string `json:\"Page,omitempty\"`\n\tNextPage string `json:\"NextPage,omitempty\"`\n\tOld *VulnerabilityWithLayers `json:\"Old,omitempty\"`\n\tNew *VulnerabilityWithLayers `json:\"New,omitempty\"`\n}\n\nfunc NotificationFromDatabaseModel(dbNotification database.VulnerabilityNotification, limit int, pageToken string, nextPage database.VulnerabilityNotificationPageNumber, key string) Notification {\n\tvar oldVuln *VulnerabilityWithLayers\n\tif dbNotification.OldVulnerability != nil {\n\t\tv := VulnerabilityWithLayersFromDatabaseModel(*dbNotification.OldVulnerability)\n\t\toldVuln = &v\n\t}\n\n\tvar newVuln *VulnerabilityWithLayers\n\tif dbNotification.NewVulnerability != nil {\n\t\tv := VulnerabilityWithLayersFromDatabaseModel(*dbNotification.NewVulnerability)\n\t\tnewVuln = &v\n\t}\n\n\tvar nextPageStr string\n\tif nextPage != database.NoVulnerabilityNotificationPage {\n\t\tnextPageBytes, _ := tokenMarshal(nextPage, key)\n\t\tnextPageStr = string(nextPageBytes)\n\t}\n\n\tvar created, notified, deleted string\n\tif !dbNotification.Created.IsZero() {\n\t\tcreated = fmt.Sprintf(\"%d\", dbNotification.Created.Unix())\n\t}\n\tif !dbNotification.Notified.IsZero() {\n\t\tnotified = fmt.Sprintf(\"%d\", dbNotification.Notified.Unix())\n\t}\n\tif !dbNotification.Deleted.IsZero() {\n\t\tdeleted = fmt.Sprintf(\"%d\", dbNotification.Deleted.Unix())\n\t}\n\n\t\/\/ TODO(jzelinskie): implement \"changed\" key\n\tfmt.Println(dbNotification.Deleted.IsZero())\n\treturn Notification{\n\t\tName: dbNotification.Name,\n\t\tCreated: created,\n\t\tNotified: notified,\n\t\tDeleted: deleted,\n\t\tLimit: limit,\n\t\tPage: pageToken,\n\t\tNextPage: nextPageStr,\n\t\tOld: oldVuln,\n\t\tNew: newVuln,\n\t}\n}\n\ntype VulnerabilityWithLayers struct {\n\tVulnerability *Vulnerability `json:\"Vulnerability,omitempty\"`\n\n\t\/\/ This field is guaranteed to be in order only for pagination.\n\t\/\/ Indices from different notifications may not be comparable.\n\tOrderedLayersIntroducingVulnerability []OrderedLayerName `json:\"OrderedLayersIntroducingVulnerability,omitempty\"`\n\n\t\/\/ This field is deprecated.\n\tLayersIntroducingVulnerability []string `json:\"LayersIntroducingVulnerability,omitempty\"`\n}\n\ntype OrderedLayerName struct {\n\tIndex int `json:\"Index\"`\n\tLayerName string `json:\"LayerName\"`\n}\n\nfunc VulnerabilityWithLayersFromDatabaseModel(dbVuln database.Vulnerability) VulnerabilityWithLayers {\n\tvuln := VulnerabilityFromDatabaseModel(dbVuln, true)\n\n\tvar layers []string\n\tvar orderedLayers []OrderedLayerName\n\tfor _, layer := range dbVuln.LayersIntroducingVulnerability {\n\t\tlayers = append(layers, layer.Name)\n\t\torderedLayers = append(orderedLayers, OrderedLayerName{\n\t\t\tIndex: layer.ID,\n\t\t\tLayerName: layer.Name,\n\t\t})\n\t}\n\n\treturn VulnerabilityWithLayers{\n\t\tVulnerability: &vuln,\n\t\tOrderedLayersIntroducingVulnerability: orderedLayers,\n\t\tLayersIntroducingVulnerability: layers,\n\t}\n}\n\ntype LayerEnvelope struct {\n\tLayer *Layer `json:\"Layer,omitempty\"`\n\tError *Error `json:\"Error,omitempty\"`\n}\n\ntype NamespaceEnvelope struct {\n\tNamespaces *[]Namespace `json:\"Namespaces,omitempty\"`\n\tError *Error `json:\"Error,omitempty\"`\n}\n\ntype VulnerabilityEnvelope struct {\n\tVulnerability *Vulnerability `json:\"Vulnerability,omitempty\"`\n\tVulnerabilities *[]Vulnerability `json:\"Vulnerabilities,omitempty\"`\n\tNextPage string `json:\"NextPage,omitempty\"`\n\tError *Error `json:\"Error,omitempty\"`\n}\n\ntype NotificationEnvelope struct {\n\tNotification *Notification `json:\"Notification,omitempty\"`\n\tError *Error `json:\"Error,omitempty\"`\n}\n\ntype FeatureEnvelope struct {\n\tFeature *Feature `json:\"Feature,omitempty\"`\n\tFeatures *[]Feature `json:\"Features,omitempty\"`\n\tError *Error `json:\"Error,omitempty\"`\n}\n\nfunc tokenUnmarshal(token string, key string, v interface{}) error {\n\tk, _ := fernet.DecodeKey(key)\n\tmsg := fernet.VerifyAndDecrypt([]byte(token), time.Hour, []*fernet.Key{k})\n\tif msg == nil {\n\t\treturn errors.New(\"invalid or expired pagination token\")\n\t}\n\n\treturn json.NewDecoder(bytes.NewBuffer(msg)).Decode(&v)\n}\n\nfunc tokenMarshal(v interface{}, key string) ([]byte, error) {\n\tvar buf bytes.Buffer\n\terr := json.NewEncoder(&buf).Encode(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tk, _ := fernet.DecodeKey(key)\n\treturn fernet.EncryptAndSign(buf.Bytes(), k)\n}\n<|endoftext|>"} {"text":"<commit_before>package examples\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\n\tiom \"github.com\/grokify\/gotilla\/io\/ioutilmore\"\n)\n\nconst (\n\tHandlersDir = \"github.com\/grokify\/chathooks\/docs\/handlers\"\n\tExamples = \"aha,appsignal,apteligent,circleci,codeship,confluence,datadog,deskdotcom,enchant,gosquared,heroku,librato,magnumci,marketo,opsgenie,papertrail,pingdom,raygun,runscope,semaphore,statuspage,travisci,userlike,victorops\"\n)\n\nfunc AbsDirGopath(dir string) string {\n\treturn filepath.Join(os.Getenv(\"GOPATH\"), \"src\", dir)\n}\n\nfunc DocsHandlersDirInfo() ([]string, []string, error) {\n\tdirname := AbsDirGopath(HandlersDir)\n\tfmt.Println(dirname)\n\n\tdirs := []string{}\n\texampleFiles := []string{}\n\tsdirs, _, err := iom.ReadDirSplit(dirname, true)\n\n\tif err != nil {\n\t\treturn dirs, exampleFiles, err\n\t}\n\n\tfor _, sdir := range sdirs {\n\t\tfmt.Printf(\"SDIR: %v\\n\", sdir.Name())\n\t\tabsSubDir := filepath.Join(dirname, sdir.Name())\n\t\tfiles, err := iom.DirEntriesReNotEmpty(absSubDir,\n\t\t\tregexp.MustCompile(`^event-example_.+\\.(json|txt)$`))\n\t\tif err != nil {\n\t\t\treturn dirs, exampleFiles, err\n\t\t}\n\t\tif len(files) > 0 {\n\t\t\tdirs = append(dirs, sdir.Name())\n\t\t\tfor _, f := range files {\n\t\t\t\tfmt.Printf(\"FILE: %v\\n\", f.Name())\n\t\t\t\texFilepath := filepath.Join(absSubDir, f.Name())\n\t\t\t\texampleFiles = append(exampleFiles, exFilepath)\n\t\t\t}\n\t\t}\n\t}\n\treturn dirs, exampleFiles, nil\n}\n<commit_msg>update style<commit_after>package examples\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\n\t\"github.com\/grokify\/gotilla\/io\/ioutilmore\"\n)\n\nconst (\n\tHandlersDir = \"github.com\/grokify\/chathooks\/docs\/handlers\"\n\tExamples = \"aha,appsignal,apteligent,circleci,codeship,confluence,datadog,deskdotcom,enchant,gosquared,heroku,librato,magnumci,marketo,opsgenie,papertrail,pingdom,raygun,runscope,semaphore,statuspage,travisci,userlike,victorops\"\n)\n\nfunc AbsDirGopath(dir string) string {\n\treturn filepath.Join(os.Getenv(\"GOPATH\"), \"src\", dir)\n}\n\nfunc DocsHandlersDirInfo() ([]string, []string, error) {\n\tdirname := AbsDirGopath(HandlersDir)\n\tfmt.Println(dirname)\n\n\tdirs := []string{}\n\texampleFiles := []string{}\n\tsdirs, _, err := ioutilmore.ReadDirSplit(dirname, true)\n\n\tif err != nil {\n\t\treturn dirs, exampleFiles, err\n\t}\n\n\tfor _, sdir := range sdirs {\n\t\tfmt.Printf(\"SDIR: %v\\n\", sdir.Name())\n\t\tabsSubDir := filepath.Join(dirname, sdir.Name())\n\t\tfiles, err := ioutilmore.DirEntriesReNotEmpty(absSubDir,\n\t\t\tregexp.MustCompile(`^event-example_.+\\.(json|txt)$`))\n\t\tif err != nil {\n\t\t\treturn dirs, exampleFiles, err\n\t\t}\n\t\tif len(files) > 0 {\n\t\t\tdirs = append(dirs, sdir.Name())\n\t\t\tfor _, f := range files {\n\t\t\t\tfmt.Printf(\"FILE: %v\\n\", f.Name())\n\t\t\t\texFilepath := filepath.Join(absSubDir, f.Name())\n\t\t\t\texampleFiles = append(exampleFiles, exFilepath)\n\t\t\t}\n\t\t}\n\t}\n\treturn dirs, exampleFiles, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Flake generates unique identifiers that are roughly sortable by time. Flake can\n\/\/ run on a cluster of machines and still generate unique IDs without requiring\n\/\/ worker coordination.\n\/\/\n\/\/ A Flake ID is a 64-bit integer will the following components:\n\/\/ - 41 bits is the timestamp with millisecond precision\n\/\/ - 10 bits is the host id (uses IP modulo 2^10)\n\/\/ - 13 bits is an auto-incrementing sequence for ID requests within the same millisecond\n\/\/\n\/\/ Note: In order to make a millisecond timestamp fit within 41 bits, a custom\n\/\/ epoch of Jan 1, 2015 00:00:00 is used.\n\npackage flake\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/binary\"\n\t\"net\"\n\t\"os\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tHostBits = 10\n\tSequenceBits = 13\n)\n\nvar (\n\t\/\/ Custom Epoch so the timestamp can fit into 41 bits.\n\t\/\/ Jan 1, 2015 00:00:00 UTC\n\tEpoch time.Time = time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)\n\tMaxWorkerId uint64 = (1 << HostBits) - 1\n\tMaxSequence uint64 = (1 << SequenceBits) - 1\n)\n\n\/\/ Id represents a unique k-ordered Id\ntype Id uint64\n\n\/\/ String formats the Id as a base36 string\nfunc (id Id) String() string {\n\treturn strconv.FormatUint(uint64(id), 36)\n}\n\n\/\/ Uint64 formats the Id as an unsigned integer\nfunc (id Id) Uint64() uint64 {\n\treturn uint64(id)\n}\n\n\/\/ Flake is a unique Id generator\ntype Flake struct {\n\tprevTime uint64\n\tworkerId uint64\n\tsequence uint64\n\tmu sync.Mutex\n}\n\n\/\/ New returns new Id generator\nfunc New(workerId uint64) *Flake {\n\treturn &Flake{\n\t\tsequence: 0,\n\t\tprevTime: getTimestamp(),\n\t\tworkerId: workerId % MaxWorkerId,\n\t}\n}\n\n\/\/ WithHostId creates new Id generator with host machine address as worker id\nfunc WithHostId() (*Flake, error) {\n\tworkerID, err := getHostId()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn New(workerID), nil\n}\n\n\/\/ WithRandomId creates new Id generator with random worker id\nfunc WithRandomId() (*Flake, error) {\n\tworkerID, err := getRandomId()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn New(workerID), nil\n}\n\n\/\/ NextId returns a new Id from the generator\nfunc (f *Flake) NextId() Id {\n\tnow := getTimestamp()\n\n\tf.mu.Lock()\n\tsequence := f.sequence\n\n\t\/\/ Use the sequence number if the id request is in the same millisecond as\n\t\/\/ the previous request.\n\tif now <= f.prevTime {\n\t\tnow = f.prevTime\n\t\tsequence++\n\t} else {\n\t\tsequence = 0\n\t}\n\n\t\/\/ Bump the timestamp by 1ms if we run out of sequence bits.\n\tif sequence > MaxSequence {\n\t\tnow++\n\t\tsequence = 0\n\t}\n\n\tf.prevTime = now\n\tf.sequence = sequence\n\tf.mu.Unlock()\n\n\ttimestamp := now << (HostBits + SequenceBits)\n\tworkerId := f.workerId << SequenceBits\n\treturn Id(timestamp | workerId | sequence)\n}\n\n\/\/ getTimestamp returns the timestamp in milliseconds adjusted for the custom\n\/\/ epoch\nfunc getTimestamp() uint64 {\n\treturn uint64(time.Since(Epoch).Nanoseconds() \/ 1e6)\n}\n\n\/\/ getHostId returns the host id using the IP address of the machine\nfunc getHostId() (uint64, error) {\n\th, err := os.Hostname()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\taddrs, err := net.LookupIP(h)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\ta := addrs[0].To4()\n\tip := binary.BigEndian.Uint32(a)\n\treturn uint64(ip), nil\n}\n\n\/\/ getRandomId generates random worker id\nfunc getRandomId() (uint64, error) {\n\tvar b [8]byte\n\tif _, err := rand.Read(b[:]); err != nil {\n\t\treturn 0, err\n\t}\n\treturn binary.BigEndian.Uint64(b[:]), nil\n}\n<commit_msg>Dont panic if hostname cannot be resolved<commit_after>\/\/ Flake generates unique identifiers that are roughly sortable by time. Flake can\n\/\/ run on a cluster of machines and still generate unique IDs without requiring\n\/\/ worker coordination.\n\/\/\n\/\/ A Flake ID is a 64-bit integer will the following components:\n\/\/ - 41 bits is the timestamp with millisecond precision\n\/\/ - 10 bits is the host id (uses IP modulo 2^10)\n\/\/ - 13 bits is an auto-incrementing sequence for ID requests within the same millisecond\n\/\/\n\/\/ Note: In order to make a millisecond timestamp fit within 41 bits, a custom\n\/\/ epoch of Jan 1, 2015 00:00:00 is used.\n\npackage flake\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"net\"\n\t\"os\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tHostBits = 10\n\tSequenceBits = 13\n)\n\nvar (\n\t\/\/ Custom Epoch so the timestamp can fit into 41 bits.\n\t\/\/ Jan 1, 2015 00:00:00 UTC\n\tEpoch time.Time = time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)\n\tMaxWorkerId uint64 = (1 << HostBits) - 1\n\tMaxSequence uint64 = (1 << SequenceBits) - 1\n)\n\n\/\/ Id represents a unique k-ordered Id\ntype Id uint64\n\n\/\/ String formats the Id as a base36 string\nfunc (id Id) String() string {\n\treturn strconv.FormatUint(uint64(id), 36)\n}\n\n\/\/ Uint64 formats the Id as an unsigned integer\nfunc (id Id) Uint64() uint64 {\n\treturn uint64(id)\n}\n\n\/\/ Flake is a unique Id generator\ntype Flake struct {\n\tprevTime uint64\n\tworkerId uint64\n\tsequence uint64\n\tmu sync.Mutex\n}\n\n\/\/ New returns new Id generator\nfunc New(workerId uint64) *Flake {\n\treturn &Flake{\n\t\tsequence: 0,\n\t\tprevTime: getTimestamp(),\n\t\tworkerId: workerId % MaxWorkerId,\n\t}\n}\n\n\/\/ WithHostId creates new Id generator with host machine address as worker id\nfunc WithHostId() (*Flake, error) {\n\tworkerID, err := getHostId()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn New(workerID), nil\n}\n\n\/\/ WithRandomId creates new Id generator with random worker id\nfunc WithRandomId() (*Flake, error) {\n\tworkerID, err := getRandomId()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn New(workerID), nil\n}\n\n\/\/ NextId returns a new Id from the generator\nfunc (f *Flake) NextId() Id {\n\tnow := getTimestamp()\n\n\tf.mu.Lock()\n\tsequence := f.sequence\n\n\t\/\/ Use the sequence number if the id request is in the same millisecond as\n\t\/\/ the previous request.\n\tif now <= f.prevTime {\n\t\tnow = f.prevTime\n\t\tsequence++\n\t} else {\n\t\tsequence = 0\n\t}\n\n\t\/\/ Bump the timestamp by 1ms if we run out of sequence bits.\n\tif sequence > MaxSequence {\n\t\tnow++\n\t\tsequence = 0\n\t}\n\n\tf.prevTime = now\n\tf.sequence = sequence\n\tf.mu.Unlock()\n\n\ttimestamp := now << (HostBits + SequenceBits)\n\tworkerId := f.workerId << SequenceBits\n\treturn Id(timestamp | workerId | sequence)\n}\n\n\/\/ getTimestamp returns the timestamp in milliseconds adjusted for the custom\n\/\/ epoch\nfunc getTimestamp() uint64 {\n\treturn uint64(time.Since(Epoch).Nanoseconds() \/ 1e6)\n}\n\n\/\/ getHostId returns the host id using the IP address of the machine\nfunc getHostId() (uint64, error) {\n\th, err := os.Hostname()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\taddrs, err := net.LookupIP(h)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\ta := addrs[0].To4()\n\tif len(a) < 4 {\n\t\treturn 0, errors.New(\"failed to resolve hostname\")\n\t}\n\n\tip := binary.BigEndian.Uint32(a)\n\treturn uint64(ip), nil\n}\n\n\/\/ getRandomId generates random worker id\nfunc getRandomId() (uint64, error) {\n\tvar b [8]byte\n\tif _, err := rand.Read(b[:]); err != nil {\n\t\treturn 0, err\n\t}\n\treturn binary.BigEndian.Uint64(b[:]), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package cmac implements CMAC as defined in RFC4493 and NIST SP800-38b.\npackage cmac\n\nimport (\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/subtle\"\n\t\"errors\"\n\t\"hash\"\n)\n\nconst (\n\t_Rb128 = 0x87\n\t_Rb64 = 0x1b\n)\n\nfunc shifted(x []byte) []byte {\n\td := make([]byte, len(x))\n\tcopy(d, x)\n\tfor i := range d {\n\t\tif i > 0 {\n\t\t\td[i-1] |= d[i] >> 7\n\t\t}\n\t\td[i] <<= 1\n\t}\n\treturn d\n}\n\nfunc gensubkey(c cipher.Block, l []byte, rb byte) []byte {\n\tsk := shifted(l)\n\tsk[len(sk)-1] ^= byte(subtle.ConstantTimeSelect(int(l[0]>>7), int(rb), 0))\n\treturn sk\n}\n\nfunc gensubkeys(c cipher.Block) ([]byte, []byte) {\n\tvar rb byte\n\n\tswitch c.BlockSize() {\n\tcase 16:\n\t\trb = _Rb128\n\tcase 8:\n\t\trb = _Rb64\n\tdefault:\n\t\tpanic(\"cmac: invalid block size\")\n\n\t}\n\n\tl := make([]byte, c.BlockSize())\n\tc.Encrypt(l, l)\n\n\tk1 := gensubkey(c, l, rb)\n\treturn k1, gensubkey(c, k1, rb)\n}\n\ntype cmac struct {\n\tc cipher.Block\n\tk1 []byte\n\tk2 []byte\n\n\tx []byte\n\n\tbuf []byte\n}\n\nfunc newcmac(c cipher.Block) *cmac {\n\tk1, k2 := gensubkeys(c)\n\tm := &cmac{c: c, k1: k1, k2: k2}\n\tm.Reset()\n\treturn m\n}\n\nfunc (m *cmac) block(b []byte) {\n\ty := make([]byte, m.c.BlockSize())\n\tfor i := range y {\n\t\ty[i] = m.x[i] ^ b[i]\n\t}\n\tm.c.Encrypt(m.x, y)\n}\n\nfunc (m *cmac) Write(b []byte) (int, error) {\n\td := append(m.buf, b...)\n\n\tfor len(d) > m.c.BlockSize() {\n\t\tm.block(d[:m.c.BlockSize()])\n\t\td = d[m.c.BlockSize():]\n\t}\n\n\tm.buf = d\n\n\treturn len(b), nil\n}\n\nfunc (m *cmac) Sum(b []byte) []byte {\n\tlast := make([]byte, m.c.BlockSize())\n\n\tif len(m.buf) > 0 && len(m.buf)%m.c.BlockSize() == 0 {\n\t\tfor i := range last {\n\t\t\tlast[i] = m.buf[i] ^ m.k1[i]\n\t\t}\n\t} else {\n\t\tcopy(last, m.buf)\n\t\tlast[len(m.buf)] = 0x80\n\t\tfor i := range last {\n\t\t\tlast[i] ^= m.k2[i]\n\t\t}\n\t}\n\n\tfor i := range last {\n\t\tlast[i] ^= m.x[i]\n\t}\n\tm.c.Encrypt(last, last)\n\n\treturn append(b, last...)\n}\n\nfunc (m *cmac) Reset() {\n\tm.buf = nil\n\tm.x = make([]byte, m.c.BlockSize())\n}\n\nfunc (m *cmac) Size() int {\n\treturn m.c.BlockSize()\n}\n\nfunc (m *cmac) BlockSize() int {\n\treturn m.c.BlockSize()\n}\n\n\/\/ New returns a hash.Hash computing AES-CMAC.\nfunc New(key []byte) (hash.Hash, error) {\n\tc, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewWithCipher(c)\n}\n\n\/\/ NewWithCipher returns a hash.Hash computing CMAC using the given\n\/\/ cipher.Block. The block cipher should have a block length of 8 or 16 bytes.\nfunc NewWithCipher(c cipher.Block) (hash.Hash, error) {\n\tswitch c.BlockSize() {\n\tcase 8, 16:\n\t\treturn newcmac(c), nil\n\tdefault:\n\t\treturn nil, errors.New(\"cmac: invalid blocksize\")\n\t}\n}\n<commit_msg>Cache block()'s scratch buffer<commit_after>\/\/ Package cmac implements CMAC as defined in RFC4493 and NIST SP800-38b.\npackage cmac\n\nimport (\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/subtle\"\n\t\"errors\"\n\t\"hash\"\n)\n\nconst (\n\t_Rb128 = 0x87\n\t_Rb64 = 0x1b\n)\n\nfunc shifted(x []byte) []byte {\n\td := make([]byte, len(x))\n\tcopy(d, x)\n\tfor i := range d {\n\t\tif i > 0 {\n\t\t\td[i-1] |= d[i] >> 7\n\t\t}\n\t\td[i] <<= 1\n\t}\n\treturn d\n}\n\nfunc gensubkey(c cipher.Block, l []byte, rb byte) []byte {\n\tsk := shifted(l)\n\tsk[len(sk)-1] ^= byte(subtle.ConstantTimeSelect(int(l[0]>>7), int(rb), 0))\n\treturn sk\n}\n\nfunc gensubkeys(c cipher.Block) ([]byte, []byte) {\n\tvar rb byte\n\n\tswitch c.BlockSize() {\n\tcase 16:\n\t\trb = _Rb128\n\tcase 8:\n\t\trb = _Rb64\n\tdefault:\n\t\tpanic(\"cmac: invalid block size\")\n\n\t}\n\n\tl := make([]byte, c.BlockSize())\n\tc.Encrypt(l, l)\n\n\tk1 := gensubkey(c, l, rb)\n\treturn k1, gensubkey(c, k1, rb)\n}\n\ntype cmac struct {\n\tc cipher.Block\n\tk1, k2 []byte\n\tbuf, x, tmp []byte\n}\n\nfunc newcmac(c cipher.Block) *cmac {\n\tk1, k2 := gensubkeys(c)\n\ttmp := make([]byte, c.BlockSize())\n\tm := &cmac{c: c, k1: k1, k2: k2, tmp: tmp}\n\tm.Reset()\n\treturn m\n}\n\nfunc (m *cmac) block(b []byte) {\n\tfor i := range m.tmp {\n\t\tm.tmp[i] = m.x[i] ^ b[i]\n\t}\n\tm.c.Encrypt(m.x, m.tmp)\n}\n\nfunc (m *cmac) Write(b []byte) (int, error) {\n\td := append(m.buf, b...)\n\n\tfor len(d) > m.c.BlockSize() {\n\t\tm.block(d[:m.c.BlockSize()])\n\t\td = d[m.c.BlockSize():]\n\t}\n\n\tm.buf = d\n\n\treturn len(b), nil\n}\n\nfunc (m *cmac) Sum(b []byte) []byte {\n\tlast := make([]byte, m.c.BlockSize())\n\n\tif len(m.buf) > 0 && len(m.buf)%m.c.BlockSize() == 0 {\n\t\tfor i := range last {\n\t\t\tlast[i] = m.buf[i] ^ m.k1[i]\n\t\t}\n\t} else {\n\t\tcopy(last, m.buf)\n\t\tlast[len(m.buf)] = 0x80\n\t\tfor i := range last {\n\t\t\tlast[i] ^= m.k2[i]\n\t\t}\n\t}\n\n\tfor i := range last {\n\t\tlast[i] ^= m.x[i]\n\t}\n\tm.c.Encrypt(last, last)\n\n\treturn append(b, last...)\n}\n\nfunc (m *cmac) Reset() {\n\tm.buf = nil\n\tm.x = make([]byte, m.c.BlockSize())\n}\n\nfunc (m *cmac) Size() int {\n\treturn m.c.BlockSize()\n}\n\nfunc (m *cmac) BlockSize() int {\n\treturn m.c.BlockSize()\n}\n\n\/\/ New returns a hash.Hash computing AES-CMAC.\nfunc New(key []byte) (hash.Hash, error) {\n\tc, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewWithCipher(c)\n}\n\n\/\/ NewWithCipher returns a hash.Hash computing CMAC using the given\n\/\/ cipher.Block. The block cipher should have a block length of 8 or 16 bytes.\nfunc NewWithCipher(c cipher.Block) (hash.Hash, error) {\n\tswitch c.BlockSize() {\n\tcase 8, 16:\n\t\treturn newcmac(c), nil\n\tdefault:\n\t\treturn nil, errors.New(\"cmac: invalid blocksize\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\txmpp \"..\"\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n)\n\ntype StdLogger struct {\n}\n\nfunc (s *StdLogger) Log(v ...interface{}) {\n\tlog.Println(v)\n}\n\nfunc (s *StdLogger) Logf(fmt string, v ...interface{}) {\n\tlog.Printf(fmt, v)\n}\n\nfunc init() {\n\tlogger := &StdLogger{}\n\txmpp.Debug = logger\n\txmpp.Info = logger\n\txmpp.Warn = logger\n\n\txmpp.TlsConfig = tls.Config{InsecureSkipVerify: true}\n}\n\n\/\/ Demonstrate the API, and allow the user to interact with an XMPP\n\/\/ server via the terminal.\nfunc main() {\n\tvar jid xmpp.JID\n\tflag.Var(&jid, \"jid\", \"JID to log in as\")\n\tvar pw *string = flag.String(\"pw\", \"\", \"password\")\n\tflag.Parse()\n\tif jid.Domain == \"\" || *pw == \"\" {\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\n\tc, err := xmpp.NewClient(&jid, *pw, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"NewClient(%v): %v\", jid, err)\n\t}\n\tdefer close(c.Out)\n\n\terr = c.StartSession(true, &xmpp.Presence{})\n\tif err != nil {\n\t\tlog.Fatalf(\"StartSession: %v\", err)\n\t}\n\troster := xmpp.Roster(c)\n\tfmt.Printf(\"%d roster entries:\\n\", len(roster))\n\tfor i, entry := range(roster) {\n\t\tfmt.Printf(\"%d: %v\\n\", i, entry)\n\t}\n\n\tgo func(ch <-chan xmpp.Stanza) {\n\t\tfor obj := range ch {\n\t\t\tfmt.Printf(\"s: %v\\n\", obj)\n\t\t}\n\t\tfmt.Println(\"done reading\")\n\t}(c.In)\n\n\tp := make([]byte, 1024)\n\tfor {\n\t\tnr, _ := os.Stdin.Read(p)\n\t\tif nr == 0 {\n\t\t\tbreak\n\t\t}\n\t\ts := string(p)\n\t\tstan, err := xmpp.ParseStanza(s)\n\t\tif err == nil {\n\t\t\tc.Out <- stan\n\t\t} else {\n\t\t\tfmt.Printf(\"Parse error: %v\\n\", err)\n\t\t\tbreak\n\t\t}\n\t}\n\tfmt.Println(\"done sending\")\n}\n<commit_msg>Disabled debug logging.<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\txmpp \"..\"\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n)\n\ntype StdLogger struct {\n}\n\nfunc (s *StdLogger) Log(v ...interface{}) {\n\tlog.Println(v)\n}\n\nfunc (s *StdLogger) Logf(fmt string, v ...interface{}) {\n\tlog.Printf(fmt, v)\n}\n\nfunc init() {\n\tlogger := &StdLogger{}\n\t\/\/ xmpp.Debug = logger\n\txmpp.Info = logger\n\txmpp.Warn = logger\n\n\txmpp.TlsConfig = tls.Config{InsecureSkipVerify: true}\n}\n\n\/\/ Demonstrate the API, and allow the user to interact with an XMPP\n\/\/ server via the terminal.\nfunc main() {\n\tvar jid xmpp.JID\n\tflag.Var(&jid, \"jid\", \"JID to log in as\")\n\tvar pw *string = flag.String(\"pw\", \"\", \"password\")\n\tflag.Parse()\n\tif jid.Domain == \"\" || *pw == \"\" {\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\n\tc, err := xmpp.NewClient(&jid, *pw, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"NewClient(%v): %v\", jid, err)\n\t}\n\tdefer close(c.Out)\n\n\terr = c.StartSession(true, &xmpp.Presence{})\n\tif err != nil {\n\t\tlog.Fatalf(\"StartSession: %v\", err)\n\t}\n\troster := xmpp.Roster(c)\n\tfmt.Printf(\"%d roster entries:\\n\", len(roster))\n\tfor i, entry := range(roster) {\n\t\tfmt.Printf(\"%d: %v\\n\", i, entry)\n\t}\n\n\tgo func(ch <-chan xmpp.Stanza) {\n\t\tfor obj := range ch {\n\t\t\tfmt.Printf(\"s: %v\\n\", obj)\n\t\t}\n\t\tfmt.Println(\"done reading\")\n\t}(c.In)\n\n\tp := make([]byte, 1024)\n\tfor {\n\t\tnr, _ := os.Stdin.Read(p)\n\t\tif nr == 0 {\n\t\t\tbreak\n\t\t}\n\t\ts := string(p)\n\t\tstan, err := xmpp.ParseStanza(s)\n\t\tif err == nil {\n\t\t\tc.Out <- stan\n\t\t} else {\n\t\t\tfmt.Printf(\"Parse error: %v\\n\", err)\n\t\t\tbreak\n\t\t}\n\t}\n\tfmt.Println(\"done sending\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package gogitlog implements a very simple wrapper around the\n\/\/ Go \"log\" package, providing support for a toggle-able debug flag\n\/\/ and a couple of functions that log or not based on that flag.\npackage slog\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar timeFormat = \"2006-01-02T15:04:05.000000\"\nvar Logger = New(INFO, \"\")\n\ntype severity int32 \/\/ sync\/atomic int32\n\nconst (\n\tDEBUG severity = iota\n\tINFO\n\tWARN\n\tERROR\n\tFATAL\n)\n\nvar severityName = []string{\n\tDEBUG: \"DEBUG\",\n\tINFO: \"INFO\",\n\tWARN: \"WARN\",\n\tERROR: \"ERROR\",\n\tFATAL: \"FATAL\",\n}\n\ntype LeveledLogger struct {\n\tprefix string\n\tseverity severity\n\tmx sync.Mutex\n}\n\nfunc (l *LeveledLogger) header(s severity, t *time.Time) *bytes.Buffer {\n\tb := new(bytes.Buffer)\n\tfmt.Fprintf(b, \"%s %-5.5s %s\", t.Format(timeFormat), severityName[s], l.prefix)\n\treturn b\n}\n\nfunc (l *LeveledLogger) logln(s severity, v ...interface{}) {\n\tif s >= l.severity {\n\t\tt := time.Now()\n\t\tbuf := l.header(s, &t)\n\t\tfmt.Fprintln(buf, v...)\n\t\tbuf.WriteTo(os.Stderr)\n\t}\n}\n\nfunc (l *LeveledLogger) logf(s severity, format string, v ...interface{}) {\n\tif s >= l.severity {\n\t\tt := time.Now()\n\t\tbuf := l.header(s, &t)\n\t\tfmt.Fprintf(buf, format, v...)\n\t\tif buf.Bytes()[buf.Len()-1] != '\\n' {\n\t\t\tbuf.WriteByte('\\n')\n\t\t}\n\t\tbuf.WriteTo(os.Stderr)\n\t}\n}\n\nfunc (l *LeveledLogger) Write(p []byte) (n int, err error) {\n\tt := time.Now()\n\tbuf := l.header(INFO, &t)\n\tbuf.Write(p)\n\tif buf.Bytes()[buf.Len()-1] != '\\n' {\n\t\tbuf.WriteByte('\\n')\n\t}\n\twritten, err := buf.WriteTo(os.Stderr)\n\tif err != nil {\n\t\treturn int(written), err\n\t}\n\treturn int(written), nil\n}\n\nfunc (l *LeveledLogger) GetLevel() severity {\n\treturn l.severity\n}\n\nfunc (l *LeveledLogger) SetLevel(s severity) {\n\tl.severity = s\n}\n\nfunc (l *LeveledLogger) IsDebug() bool {\n\tif l.severity == DEBUG {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (l *LeveledLogger) Debugf(format string, v ...interface{}) {\n\tl.logf(DEBUG, format, v...)\n}\n\nfunc (l *LeveledLogger) Debugln(v ...interface{}) {\n\tl.logln(DEBUG, v...)\n}\n\nfunc (l *LeveledLogger) Infof(format string, v ...interface{}) {\n\tl.logf(INFO, format, v...)\n}\n\nfunc (l *LeveledLogger) Infoln(v ...interface{}) {\n\tl.logln(INFO, v...)\n}\n\nfunc (l *LeveledLogger) Warnf(format string, v ...interface{}) {\n\tl.logf(WARN, format, v...)\n}\n\nfunc (l *LeveledLogger) Warnln(v ...interface{}) {\n\tl.logln(WARN, v...)\n}\n\nfunc (l *LeveledLogger) Errorf(format string, v ...interface{}) {\n\tl.logf(ERROR, format, v...)\n}\n\nfunc (l *LeveledLogger) Errorln(v ...interface{}) {\n\tl.logln(ERROR, v...)\n}\n\nfunc (l *LeveledLogger) Fatalf(format string, v ...interface{}) {\n\tl.logf(FATAL, format, v...)\n\tos.Exit(1)\n}\n\nfunc (l *LeveledLogger) Fatalln(v ...interface{}) {\n\tl.logln(FATAL, v...)\n\tos.Exit(1)\n}\n\nfunc (l *LeveledLogger) Panicf(format string, v ...interface{}) {\n\tl.logf(FATAL, format, v...)\n\tpanic(fmt.Sprintf(format, v...))\n}\n\nfunc (l *LeveledLogger) Panicln(v ...interface{}) {\n\tl.logln(FATAL, v...)\n\tpanic(fmt.Sprintln(v...))\n}\n\nfunc New(level severity, prefix string) *LeveledLogger {\n\treturn &LeveledLogger{\n\t\tprefix,\n\t\tlevel,\n\t\tsync.Mutex{},\n\t}\n}\n\n\/*\n\/\/ isatty returns true if f is a TTY, false otherwise.\nfunc isatty(f *os.File) bool {\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\tcase \"linux\":\n\tdefault:\n\t\treturn false\n\t}\n\tvar t [2]byte\n\t_, _, errno := syscall.Syscall(syscall.SYS_IOCTL,\n\t\tf.Fd(), syscall.TIOCGPGRP,\n\t\tuintptr(unsafe.Pointer(&t)))\n\treturn errno == 0\n}\n*\/\n\nfunc GetLevel() severity {\n\treturn Logger.GetLevel()\n}\n\nfunc SetLevel(s severity) {\n\tLogger.SetLevel(s)\n}\n\nfunc IsDebug() bool {\n\treturn Logger.IsDebug()\n}\n\nfunc Debugf(format string, v ...interface{}) {\n\tLogger.Debugf(format, v...)\n}\n\nfunc Debugln(v ...interface{}) {\n\tLogger.Debugln(v...)\n}\n\nfunc Infof(format string, v ...interface{}) {\n\tLogger.Infof(format, v...)\n}\n\nfunc Infoln(v ...interface{}) {\n\tLogger.Infoln(v...)\n}\n\nfunc Warnf(format string, v ...interface{}) {\n\tLogger.Warnf(format, v...)\n}\n\nfunc Warnln(v ...interface{}) {\n\tLogger.Warnln(v...)\n}\n\nfunc Errorf(format string, v ...interface{}) {\n\tLogger.Errorf(format, v...)\n}\n\nfunc Errorln(v ...interface{}) {\n\tLogger.Errorln(v...)\n}\n\nfunc Fatalf(format string, v ...interface{}) {\n\tLogger.Fatalf(format, v...)\n}\n\nfunc Fatalln(v ...interface{}) {\n\tLogger.Fatalln(v...)\n}\n\nfunc Panicf(format string, v ...interface{}) {\n\tLogger.Panicf(format, v...)\n}\n\nfunc Panicln(v ...interface{}) {\n\tLogger.Panicln(v...)\n}\n<commit_msg>add ability to remove timeprefix (for syslog)<commit_after>\/\/ Package gogitlog implements a very simple wrapper around the\n\/\/ Go \"log\" package, providing support for a toggle-able debug flag\n\/\/ and a couple of functions that log or not based on that flag.\npackage slog\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar timeFormat = \"2006-01-02T15:04:05.000000\"\nvar Logger = New(INFO, timeFormat, \"\")\n\ntype severity int32 \/\/ sync\/atomic int32\n\nconst (\n\tDEBUG severity = iota\n\tINFO\n\tWARN\n\tERROR\n\tFATAL\n)\n\nvar severityName = []string{\n\tDEBUG: \"DEBUG\",\n\tINFO: \"INFO\",\n\tWARN: \"WARN\",\n\tERROR: \"ERROR\",\n\tFATAL: \"FATAL\",\n}\n\ntype LeveledLogger struct {\n\tprefix string\n\ttimeformat string\n\tseverity severity\n\tmx sync.Mutex\n}\n\nfunc (l *LeveledLogger) header(s severity, t *time.Time) *bytes.Buffer {\n\tb := new(bytes.Buffer)\n\tif l.timeformat != \"\" {\n\t\tfmt.Fprintf(b, \"%s \", t.Format(l.timeformat))\n\t}\n\tfmt.Fprintf(b, \"%-5.5s %s\", severityName[s], l.prefix)\n\treturn b\n}\n\nfunc (l *LeveledLogger) logln(s severity, v ...interface{}) {\n\tif s >= l.severity {\n\t\tt := time.Now()\n\t\tbuf := l.header(s, &t)\n\t\tfmt.Fprintln(buf, v...)\n\t\tbuf.WriteTo(os.Stderr)\n\t}\n}\n\nfunc (l *LeveledLogger) logf(s severity, format string, v ...interface{}) {\n\tif s >= l.severity {\n\t\tt := time.Now()\n\t\tbuf := l.header(s, &t)\n\t\tfmt.Fprintf(buf, format, v...)\n\t\tif buf.Bytes()[buf.Len()-1] != '\\n' {\n\t\t\tbuf.WriteByte('\\n')\n\t\t}\n\t\tbuf.WriteTo(os.Stderr)\n\t}\n}\n\nfunc (l *LeveledLogger) Write(p []byte) (n int, err error) {\n\tt := time.Now()\n\tbuf := l.header(INFO, &t)\n\tbuf.Write(p)\n\tif buf.Bytes()[buf.Len()-1] != '\\n' {\n\t\tbuf.WriteByte('\\n')\n\t}\n\twritten, err := buf.WriteTo(os.Stderr)\n\tif err != nil {\n\t\treturn int(written), err\n\t}\n\treturn int(written), nil\n}\n\nfunc (l *LeveledLogger) GetLevel() severity {\n\treturn l.severity\n}\n\nfunc (l *LeveledLogger) SetLevel(s severity) {\n\tl.severity = s\n}\n\nfunc (l *LeveledLogger) IsDebug() bool {\n\tif l.severity == DEBUG {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (l *LeveledLogger) Debugf(format string, v ...interface{}) {\n\tl.logf(DEBUG, format, v...)\n}\n\nfunc (l *LeveledLogger) Debugln(v ...interface{}) {\n\tl.logln(DEBUG, v...)\n}\n\nfunc (l *LeveledLogger) Infof(format string, v ...interface{}) {\n\tl.logf(INFO, format, v...)\n}\n\nfunc (l *LeveledLogger) Infoln(v ...interface{}) {\n\tl.logln(INFO, v...)\n}\n\nfunc (l *LeveledLogger) Warnf(format string, v ...interface{}) {\n\tl.logf(WARN, format, v...)\n}\n\nfunc (l *LeveledLogger) Warnln(v ...interface{}) {\n\tl.logln(WARN, v...)\n}\n\nfunc (l *LeveledLogger) Errorf(format string, v ...interface{}) {\n\tl.logf(ERROR, format, v...)\n}\n\nfunc (l *LeveledLogger) Errorln(v ...interface{}) {\n\tl.logln(ERROR, v...)\n}\n\nfunc (l *LeveledLogger) Fatalf(format string, v ...interface{}) {\n\tl.logf(FATAL, format, v...)\n\tos.Exit(1)\n}\n\nfunc (l *LeveledLogger) Fatalln(v ...interface{}) {\n\tl.logln(FATAL, v...)\n\tos.Exit(1)\n}\n\nfunc (l *LeveledLogger) Panicf(format string, v ...interface{}) {\n\tl.logf(FATAL, format, v...)\n\tpanic(fmt.Sprintf(format, v...))\n}\n\nfunc (l *LeveledLogger) Panicln(v ...interface{}) {\n\tl.logln(FATAL, v...)\n\tpanic(fmt.Sprintln(v...))\n}\n\nfunc New(level severity, timeformat string, prefix string) *LeveledLogger {\n\treturn &LeveledLogger{\n\t\ttimeformat,\n\t\tprefix,\n\t\tlevel,\n\t\tsync.Mutex{},\n\t}\n}\n\n\/*\n\/\/ isatty returns true if f is a TTY, false otherwise.\nfunc isatty(f *os.File) bool {\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\tcase \"linux\":\n\tdefault:\n\t\treturn false\n\t}\n\tvar t [2]byte\n\t_, _, errno := syscall.Syscall(syscall.SYS_IOCTL,\n\t\tf.Fd(), syscall.TIOCGPGRP,\n\t\tuintptr(unsafe.Pointer(&t)))\n\treturn errno == 0\n}\n*\/\n\nfunc GetLevel() severity {\n\treturn Logger.GetLevel()\n}\n\nfunc SetLevel(s severity) {\n\tLogger.SetLevel(s)\n}\n\nfunc IsDebug() bool {\n\treturn Logger.IsDebug()\n}\n\nfunc Debugf(format string, v ...interface{}) {\n\tLogger.Debugf(format, v...)\n}\n\nfunc Debugln(v ...interface{}) {\n\tLogger.Debugln(v...)\n}\n\nfunc Infof(format string, v ...interface{}) {\n\tLogger.Infof(format, v...)\n}\n\nfunc Infoln(v ...interface{}) {\n\tLogger.Infoln(v...)\n}\n\nfunc Warnf(format string, v ...interface{}) {\n\tLogger.Warnf(format, v...)\n}\n\nfunc Warnln(v ...interface{}) {\n\tLogger.Warnln(v...)\n}\n\nfunc Errorf(format string, v ...interface{}) {\n\tLogger.Errorf(format, v...)\n}\n\nfunc Errorln(v ...interface{}) {\n\tLogger.Errorln(v...)\n}\n\nfunc Fatalf(format string, v ...interface{}) {\n\tLogger.Fatalf(format, v...)\n}\n\nfunc Fatalln(v ...interface{}) {\n\tLogger.Fatalln(v...)\n}\n\nfunc Panicf(format string, v ...interface{}) {\n\tLogger.Panicf(format, v...)\n}\n\nfunc Panicln(v ...interface{}) {\n\tLogger.Panicln(v...)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\n\/\/ index template\nconst index = `[[define \"index\"]]<!doctype html>\n<html ng-app=\"prim\" ng-strict-di lang=\"en\">\n[[template \"head\" . ]]\n<body>\n <ng-include src=\"'pages\/global.html'\"><\/ng-include>\n <div class=\"header\">\n[[template \"header\" . ]]\n <\/div>\n <div ng-view><\/div>\n<\/body>\n<\/html>[[end]]`\n\n\/\/ head items\nconst head = `[[define \"head\"]]<head>\n <base href=\"\/\">\n <title data-ng-bind=\"page.title\">[[ .title ]]<\/title>\n <meta charset=\"utf-8\" \/>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" \/>\n <meta name=\"description\" content=\"[[ .desc ]]\" \/>[[if .nsfw]]\n <meta name=\"rating\" content=\"adult\" \/>\n <meta name=\"rating\" content=\"RTA-5042-1996-1400-1577-RTA\" \/>[[end]]\n <script src=\"\/assets\/prim\/[[ .primjs ]]\"><\/script>\n <link rel=\"stylesheet\" href=\"\/assets\/prim\/[[ .primcss ]]\" \/>\n <link rel=\"stylesheet\" href=\"\/assets\/styles\/[[ .style ]]\" \/>\n <link rel=\"stylesheet\" href=\"https:\/\/maxcdn.bootstrapcdn.com\/font-awesome\/4.4.0\/css\/font-awesome.min.css\">\n [[template \"angular\" . ]]\n [[template \"headinclude\" . ]]<\/head>[[end]]`\n\n\/\/ angular config\nconst angular = `[[define \"angular\"]]<script>angular.module('prim').constant('config',{\n ib_id:[[ .ib ]],\n title:'[[ .title ]]',\n img_srv:'\/\/[[ .imgsrv ]]',\n api_srv:'\/\/[[ .apisrv ]]',\n csrf_token:'[[ .csrf ]]'\n });\n<\/script>[[end]]`\n\n\/\/ site header\nconst header = `[[define \"header\"]]<div class=\"header_bar\">\n <div class=\"left\">\n <div class=\"nav_menu\" ng-controller=\"NavMenuCtrl as navmenu\">\n <ul click-off=\"navmenu.close\" ng-click=\"navmenu.toggle()\" ng-mouseenter=\"navmenu.open()\" ng-mouseleave=\"navmenu.close()\">\n <li class=\"n1\"><a href><i class=\"fa fa-fw fa-bars\"><\/i><\/a>\n <ul ng-if=\"navmenu.visible\">\n[[template \"navmenuinclude\" . ]]\n[[template \"navmenu\" . ]]\n <\/ul>\n <\/li>\n <\/ul>\n <\/div>\n <div class=\"nav_items\" ng-controller=\"NavItemsCtrl as navitems\">\n <ul>\n <ng-include src=\"'pages\/menus\/nav.html'\"><\/ng-include>\n <\/ul>\n <\/div>\n <\/div>\n <div class=\"right\">\n <div class=\"user_menu\">\n <div ng-if=\"!authState.isAuthenticated\" class=\"login\">\n <a href=\"account\" class=\"button button-small button-login\">login<\/a>\n <\/div>\n <div ng-if=\"authState.isAuthenticated\" ng-controller=\"UserMenuCtrl as usermenu\">\n <ul click-off=\"usermenu.close\" ng-click=\"usermenu.toggle()\" ng-mouseenter=\"usermenu.open()\" ng-mouseleave=\"usermenu.close()\">\n <li>\n <div class=\"avatar avatar-medium\">\n <div class=\"avatar-inner\">\n <a href>\n <img ng-src=\"{{authState.avatar}}\" \/>\n <\/a>\n <\/div>\n <\/div>\n <ul ng-if=\"usermenu.visible\">\n <ng-include src=\"'pages\/menus\/user.html'\"><\/ng-include>\n <\/ul>\n <\/li>\n <\/ul>\n <\/div>\n <\/div>\n <div class=\"site_logo\">\n <a href=\"\/\">\n <img src=\"\/assets\/logo\/[[ .logo ]]\" title=\"[[ .title ]]\" \/>\n <\/a>\n <\/div>\n <\/div>\n<\/div>[[end]]`\n\nconst navmenu = `[[define \"navmenu\"]][[ range $ib := .imageboards ]]<li><a href=\"\/\/[[ $ib.Address ]]\">[[ $ib.Title ]]<\/a><\/li>\n[[end]][[end]]`\n<commit_msg>fix ib links<commit_after>package main\n\n\/\/ index template\nconst index = `[[define \"index\"]]<!doctype html>\n<html ng-app=\"prim\" ng-strict-di lang=\"en\">\n[[template \"head\" . ]]\n<body>\n <ng-include src=\"'pages\/global.html'\"><\/ng-include>\n <div class=\"header\">\n[[template \"header\" . ]]\n <\/div>\n <div ng-view><\/div>\n<\/body>\n<\/html>[[end]]`\n\n\/\/ head items\nconst head = `[[define \"head\"]]<head>\n <base href=\"\/\">\n <title data-ng-bind=\"page.title\">[[ .title ]]<\/title>\n <meta charset=\"utf-8\" \/>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" \/>\n <meta name=\"description\" content=\"[[ .desc ]]\" \/>[[if .nsfw]]\n <meta name=\"rating\" content=\"adult\" \/>\n <meta name=\"rating\" content=\"RTA-5042-1996-1400-1577-RTA\" \/>[[end]]\n <script src=\"\/assets\/prim\/[[ .primjs ]]\"><\/script>\n <link rel=\"stylesheet\" href=\"\/assets\/prim\/[[ .primcss ]]\" \/>\n <link rel=\"stylesheet\" href=\"\/assets\/styles\/[[ .style ]]\" \/>\n <link rel=\"stylesheet\" href=\"https:\/\/maxcdn.bootstrapcdn.com\/font-awesome\/4.4.0\/css\/font-awesome.min.css\">\n [[template \"angular\" . ]]\n [[template \"headinclude\" . ]]<\/head>[[end]]`\n\n\/\/ angular config\nconst angular = `[[define \"angular\"]]<script>angular.module('prim').constant('config',{\n ib_id:[[ .ib ]],\n title:'[[ .title ]]',\n img_srv:'\/\/[[ .imgsrv ]]',\n api_srv:'\/\/[[ .apisrv ]]',\n csrf_token:'[[ .csrf ]]'\n });\n<\/script>[[end]]`\n\n\/\/ site header\nconst header = `[[define \"header\"]]<div class=\"header_bar\">\n <div class=\"left\">\n <div class=\"nav_menu\" ng-controller=\"NavMenuCtrl as navmenu\">\n <ul click-off=\"navmenu.close\" ng-click=\"navmenu.toggle()\" ng-mouseenter=\"navmenu.open()\" ng-mouseleave=\"navmenu.close()\">\n <li class=\"n1\"><a href><i class=\"fa fa-fw fa-bars\"><\/i><\/a>\n <ul ng-if=\"navmenu.visible\">\n[[template \"navmenuinclude\" . ]]\n[[template \"navmenu\" . ]]\n <\/ul>\n <\/li>\n <\/ul>\n <\/div>\n <div class=\"nav_items\" ng-controller=\"NavItemsCtrl as navitems\">\n <ul>\n <ng-include src=\"'pages\/menus\/nav.html'\"><\/ng-include>\n <\/ul>\n <\/div>\n <\/div>\n <div class=\"right\">\n <div class=\"user_menu\">\n <div ng-if=\"!authState.isAuthenticated\" class=\"login\">\n <a href=\"account\" class=\"button button-small button-login\">login<\/a>\n <\/div>\n <div ng-if=\"authState.isAuthenticated\" ng-controller=\"UserMenuCtrl as usermenu\">\n <ul click-off=\"usermenu.close\" ng-click=\"usermenu.toggle()\" ng-mouseenter=\"usermenu.open()\" ng-mouseleave=\"usermenu.close()\">\n <li>\n <div class=\"avatar avatar-medium\">\n <div class=\"avatar-inner\">\n <a href>\n <img ng-src=\"{{authState.avatar}}\" \/>\n <\/a>\n <\/div>\n <\/div>\n <ul ng-if=\"usermenu.visible\">\n <ng-include src=\"'pages\/menus\/user.html'\"><\/ng-include>\n <\/ul>\n <\/li>\n <\/ul>\n <\/div>\n <\/div>\n <div class=\"site_logo\">\n <a href=\"\/\">\n <img src=\"\/assets\/logo\/[[ .logo ]]\" title=\"[[ .title ]]\" \/>\n <\/a>\n <\/div>\n <\/div>\n<\/div>[[end]]`\n\nconst navmenu = `[[define \"navmenu\"]][[ range $ib := .imageboards ]]<li><a target=\"_self\" href=\"\/\/[[ $ib.Address ]]\">[[ $ib.Title ]]<\/a><\/li>\n[[end]][[end]]`\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage zanzibar_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/uber\/zanzibar\/test\/lib\/bench_gateway\"\n\n\texampleGateway \"github.com\/uber\/zanzibar\/examples\/example-gateway\/build\/services\/example-gateway\"\n)\n\nfunc TestCreatingTChannel(t *testing.T) {\n\t_, err := benchGateway.CreateGateway(\n\t\tmap[string]interface{}{\n\t\t\t\"tchannel.serviceName\": \"\",\n\t\t},\n\t\tnil,\n\t\texampleGateway.CreateGateway,\n\t)\n\tassert.Error(t, err)\n\n\tassert.Contains(t, err.Error(), \"no service name provided\")\n}\n<commit_msg>tests: update tchannel test<commit_after>\/\/ Copyright (c) 2017 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage zanzibar_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/uber\/zanzibar\/test\/lib\/bench_gateway\"\n\n\texampleGateway \"github.com\/uber\/zanzibar\/examples\/example-gateway\/build\/services\/example-gateway\"\n)\n\nfunc TestCreatingTChannel(t *testing.T) {\n\t_, err := benchGateway.CreateGateway(\n\t\tmap[string]interface{}{\n\t\t\t\"tchannel.serviceName\": \"\",\n\t\t\t\"serviceName\": \"\",\n\t\t},\n\t\tnil,\n\t\texampleGateway.CreateGateway,\n\t)\n\tassert.Error(t, err)\n\n\tassert.Contains(t, err.Error(), \"no service name provided\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013-2014 Unknown\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\n\/\/ goconfig is a easy-use comments-support configuration file parser.\npackage goconfig\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\nconst (\n\t\/\/ Default section name.\n\tDEFAULT_SECTION = \"DEFAULT\"\n\t\/\/ Maximum allowed depth when recursively substituing variable names.\n\t_DEPTH_VALUES = 200\n\n\t\/\/ Get Errors.\n\tSectionNotFound = iota\n\tKeyNotFound\n\t\/\/ Read Errors.\n\tBlankSection\n\t\/\/ Get and Read Errors.\n\tCouldNotParse\n)\n\nvar (\n\tLineBreak = \"\\n\"\n\t\/\/ %(variable)s\n\tvarRegExp = regexp.MustCompile(`%\\(([a-zA-Z0-9_.\\-]+)\\)s`)\n)\n\nfunc init() {\n\tif runtime.GOOS == \"windows\" {\n\t\tLineBreak = \"\\r\\n\"\n\t}\n}\n\n\/\/ ConfigFile is the representation of configuration settings.\n\/\/ The public interface is entirely through methods.\ntype ConfigFile struct {\n\tlock sync.RWMutex\n\tfileNames []string \/\/ Support mutil-files.\n\tdata map[string]map[string]string \/\/ Section -> key : value\n\tsectionList []string \/\/ Section list\n\tkeyList map[string][]string \/\/ Section -> Key list\n\tsectionComments map[string]string \/\/ Sections comments\n\tkeyComments map[string]map[string]string \/\/ Keys comments\n\tBlockMode bool\n}\n\n\/\/ newConfigFile creates an empty configuration representation.\n\/\/ This representation can be filled with AddSection and AddKey and then\n\/\/ saved to a file using SaveConfigFile.\nfunc newConfigFile(fileNames []string) *ConfigFile {\n\tc := new(ConfigFile)\n\tc.fileNames = fileNames\n\tc.data = make(map[string]map[string]string)\n\tc.keyList = make(map[string][]string)\n\tc.sectionComments = make(map[string]string)\n\tc.keyComments = make(map[string]map[string]string)\n\tc.BlockMode = true\n\treturn c\n}\n\n\/\/ SetValue adds a new section-key-value to the configuration.\n\/\/ It returns true if the key and value were inserted, and false if the value was overwritten.\n\/\/ If the section does not exist in advance, it is created.\nfunc (c *ConfigFile) SetValue(section, key, value string) bool {\n\tif c.BlockMode {\n\t\tc.lock.Lock()\n\t\tdefer c.lock.Unlock()\n\t}\n\n\t\/\/ Check if section exists.\n\tif _, ok := c.data[section]; !ok {\n\t\t\/\/ Section not exists.\n\t\t\/\/ Execute add operation.\n\t\tc.data[section] = make(map[string]string)\n\t\t\/\/ Append section to list.\n\t\tc.sectionList = append(c.sectionList, section)\n\t}\n\n\t\/\/ Check if key exists.\n\t_, ok := c.data[section][key]\n\tc.data[section][key] = value\n\tif !ok {\n\t\t\/\/ If not exists, append to key list.\n\t\tc.keyList[section] = append(c.keyList[section], key)\n\t}\n\treturn !ok\n}\n\n\/\/ DeleteKey delete the key in given section.\n\/\/ It returns true if the key was deleted, and false if the section or key didn't exist.\nfunc (c *ConfigFile) DeleteKey(section, key string) bool {\n\t\/\/ Check if section exists.\n\tif _, ok := c.data[section]; !ok {\n\t\t\/\/ Section not exists.\n\t\treturn false\n\t}\n\n\t\/\/ Check if key exists\n\tif _, ok := c.data[section][key]; ok {\n\t\t\/\/ Execute remove operation\n\t\tdelete(c.data[section], key)\n\t\t\/\/ Remove comments of key\n\t\tc.SetKeyComments(section, key, \"\")\n\t\t\/\/ Get index of key\n\t\ti := 0\n\t\tfor _, keyName := range c.keyList[section] {\n\t\t\tif keyName == key {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\t\/\/ Remove from key list\n\t\tc.keyList[section] =\n\t\t\tappend(c.keyList[section][:i], c.keyList[section][i+1:]...)\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ GetValue returns the value of key available in the given section.\n\/\/ If the value needs to be unfolded (see e.g. %(google)s example in the GoConfig_test.go),\n\/\/ then String does this unfolding automatically, up to\n\/\/ _DEPTH_VALUES number of iterations.\n\/\/ It returns an error if the section or (default)key does not exist and empty string value.\nfunc (c *ConfigFile) GetValue(section, key string) (string, error) {\n\tif c.BlockMode {\n\t\tc.lock.RLock()\n\t\tdefer c.lock.RUnlock()\n\t}\n\n\t\/\/ Blank section name represents DEFAULT section.\n\tif len(section) == 0 {\n\t\tsection = DEFAULT_SECTION\n\t}\n\n\t\/\/ Check if section exists\n\tif _, ok := c.data[section]; !ok {\n\t\t\/\/ Section does not exist.\n\t\treturn \"\", getError{SectionNotFound, section}\n\t}\n\n\t\/\/ Section exists.\n\t\/\/ Check if key exists.\n\tvalue, ok := c.data[section][key]\n\tif !ok {\n\t\t\/\/ Check if it is a sub-section.\n\t\tif i := strings.LastIndex(section, \".\"); i > -1 {\n\t\t\treturn c.GetValue(section[:i], key)\n\t\t}\n\n\t\t\/\/ Return empty value.\n\t\treturn \"\", getError{KeyNotFound, key}\n\t}\n\n\t\/\/ Key exists.\n\tvar i int\n\tfor i = 0; i < _DEPTH_VALUES; i++ {\n\t\tvr := varRegExp.FindString(value)\n\t\tif len(vr) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Take off leading '%(' and trailing ')s'\n\t\tnoption := strings.TrimLeft(vr, \"%(\")\n\t\tnoption = strings.TrimRight(noption, \")s\")\n\n\t\t\/\/ Search variable in default section\n\t\tnvalue, _ := c.GetValue(DEFAULT_SECTION, noption)\n\t\t\/\/ Search in the same section\n\t\tif _, ok := c.data[section][noption]; ok {\n\t\t\tnvalue = c.data[section][noption]\n\t\t}\n\n\t\t\/\/ substitute by new value and take off leading '%(' and trailing ')s'\n\t\tvalue = strings.Replace(value, vr, nvalue, -1)\n\t}\n\treturn value, nil\n}\n\n\/\/ Bool returns bool type value.\nfunc (c *ConfigFile) Bool(section, key string) (bool, error) {\n\t\/\/ Get string format value.\n\tvalue, err := c.GetValue(section, key)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t\/\/ Convert type.\n\treturn strconv.ParseBool(value)\n}\n\n\/\/ Float64 returns float64 type value.\nfunc (c *ConfigFile) Float64(section, key string) (float64, error) {\n\t\/\/ Get string format value.\n\tvalue, err := c.GetValue(section, key)\n\tif err != nil {\n\t\treturn 0.0, err\n\t}\n\n\t\/\/ Convert type.\n\treturn strconv.ParseFloat(value, 64)\n}\n\n\/\/ Int returns int type value.\nfunc (c *ConfigFile) Int(section, key string) (int, error) {\n\t\/\/ Get string format value.\n\tvalue, err := c.GetValue(section, key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t\/\/ Convert type.\n\treturn strconv.Atoi(value)\n}\n\n\/\/ Int64 returns int64 type value.\nfunc (c *ConfigFile) Int64(section, key string) (int64, error) {\n\t\/\/ Get string format value.\n\tvalue, err := c.GetValue(section, key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t\/\/ Convert type.\n\treturn strconv.ParseInt(value, 10, 64)\n}\n\n\/\/ MustValue always returns value without error,\n\/\/ it returns empty string if error occurs.\nfunc (c *ConfigFile) MustValue(section, key string, defaultVal ...string) string {\n\tvalue, err := c.GetValue(section, key)\n\tif err != nil && len(defaultVal) > 0 {\n\t\treturn defaultVal[0]\n\t}\n\treturn value\n}\n\n\/\/ MustBool always returns value without error,\n\/\/ it returns false if error occurs.\nfunc (c *ConfigFile) MustBool(section, key string, defaultVal ...bool) bool {\n\tvalue, err := c.Bool(section, key)\n\tif err != nil && len(defaultVal) > 0 {\n\t\treturn defaultVal[0]\n\t}\n\treturn value\n}\n\n\/\/ MustFloat64 always returns value without error,\n\/\/ it returns 0.0 if error occurs.\nfunc (c *ConfigFile) MustFloat64(section, key string, defaultVal ...float64) float64 {\n\tvalue, err := c.Float64(section, key)\n\tif err != nil && len(defaultVal) > 0 {\n\t\treturn defaultVal[0]\n\t}\n\treturn value\n}\n\n\/\/ MustInt always returns value without error,\n\/\/ it returns 0 if error occurs.\nfunc (c *ConfigFile) MustInt(section, key string, defaultVal ...int) int {\n\tvalue, err := c.Int(section, key)\n\tif err != nil && len(defaultVal) > 0 {\n\t\treturn defaultVal[0]\n\t}\n\treturn value\n}\n\n\/\/ MustInt64 always returns value without error,\n\/\/ it returns 0 if error occurs.\nfunc (c *ConfigFile) MustInt64(section, key string, defaultVal ...int64) int64 {\n\tvalue, err := c.Int64(section, key)\n\tif err != nil && len(defaultVal) > 0 {\n\t\treturn defaultVal[0]\n\t}\n\treturn value\n}\n\n\/\/ GetSection returns key-value pairs in given section.\n\/\/ It section does not exist, returns nil and error.\nfunc (c *ConfigFile) GetSection(section string) (map[string]string, error) {\n\t\/\/ Check if section exists\n\tif _, ok := c.data[section]; !ok {\n\t\t\/\/ Section does not exist.\n\t\treturn nil, getError{SectionNotFound, section}\n\t}\n\n\t\/\/ Remove pre-defined key.\n\tsecMap := c.data[section]\n\tdelete(c.data[section], \" \")\n\n\t\/\/ Section exists.\n\treturn secMap, nil\n}\n\n\/\/ SetSectionComments adds new section comments to the configuration.\n\/\/ If comments are empty(0 length), it will remove its section comments!\n\/\/ It returns true if the comments were inserted or removed, and false if the comments were overwritten.\nfunc (c *ConfigFile) SetSectionComments(section, comments string) bool {\n\t\/\/ Check length of comments\n\tif len(comments) == 0 {\n\t\t\/\/ Check if section exists\n\t\tif _, ok := c.sectionComments[section]; ok {\n\t\t\t\/\/ Execute remove operation\n\t\t\tdelete(c.sectionComments, section)\n\t\t}\n\n\t\t\/\/ Not exists can be seen as remove\n\t\treturn true\n\t}\n\n\t\/\/ Check if comments exists\n\t_, ok := c.sectionComments[section]\n\tif comments[0] != '#' && comments[0] != ';' {\n\t\tcomments = \"; \" + comments\n\t}\n\tc.sectionComments[section] = comments\n\treturn !ok\n}\n\n\/\/ SetKeyComments adds new section-key comments to the configuration.\n\/\/ If comments are empty(0 length), it will remove its section-key comments!\n\/\/ It returns true if the comments were inserted or removed, and false if the comments were overwritten.\n\/\/ If the section does not exist in advance, it is created.\nfunc (c *ConfigFile) SetKeyComments(section, key, comments string) bool {\n\t\/\/ Check if section exists\n\tif _, ok := c.keyComments[section]; ok {\n\t\t\/\/ Section exists\n\t\t\/\/ Check length of comments\n\t\tif len(comments) == 0 {\n\t\t\t\/\/ Check if key exists\n\t\t\tif _, ok := c.keyComments[section][key]; ok {\n\t\t\t\t\/\/ Execute remove operation\n\t\t\t\tdelete(c.keyComments[section], key)\n\t\t\t}\n\n\t\t\t\/\/ Not exists can be seen as remove\n\t\t\treturn true\n\t\t}\n\t} else {\n\t\t\/\/ Section not exists\n\t\t\/\/ Check length of comments\n\t\tif len(comments) == 0 {\n\t\t\t\/\/ Not exists can be seen as remove\n\t\t\treturn true\n\t\t} else {\n\t\t\t\/\/ Execute add operation\n\t\t\tc.keyComments[section] = make(map[string]string)\n\t\t}\n\t}\n\n\t\/\/ Check if key exists\n\t_, ok := c.keyComments[section][key]\n\tif comments[0] != '#' && comments[0] != ';' {\n\t\tcomments = \"; \" + comments\n\t}\n\tc.keyComments[section][key] = comments\n\treturn !ok\n}\n\n\/\/ GetSectionComments returns the comments in the given section.\n\/\/ It returns an empty string(0 length) if the comments do not exist\nfunc (c *ConfigFile) GetSectionComments(section string) (comments string) {\n\treturn c.sectionComments[section]\n}\n\n\/\/ GetKeyComments returns the comments of key in the given section.\n\/\/ It returns an empty string(0 length) if the comments do not exist\nfunc (c *ConfigFile) GetKeyComments(section, key string) (comments string) {\n\t\/\/ Check if section exists\n\tif _, ok := c.keyComments[section]; ok {\n\t\t\/\/ Exists\n\t\treturn c.keyComments[section][key]\n\t}\n\n\t\/\/ Not exists\n\treturn \"\"\n}\n\n\/\/ getError occurs when get value in configuration file with invalid parameter\ntype getError struct {\n\tReason int \/\/ Error reason\n\tName string\n}\n\n\/\/ Implement Error method\nfunc (err getError) Error() string {\n\tswitch err.Reason {\n\tcase SectionNotFound:\n\t\treturn fmt.Sprintf(\"section '%s' not found\", string(err.Name))\n\t}\n\n\treturn \"invalid get error\"\n}\n<commit_msg>Add GetSectionList and GetKeyList<commit_after>\/\/ Copyright 2013-2014 Unknown\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\n\/\/ goconfig is a easy-use comments-support configuration file parser.\npackage goconfig\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\nconst (\n\t\/\/ Default section name.\n\tDEFAULT_SECTION = \"DEFAULT\"\n\t\/\/ Maximum allowed depth when recursively substituing variable names.\n\t_DEPTH_VALUES = 200\n\n\t\/\/ Get Errors.\n\tSectionNotFound = iota\n\tKeyNotFound\n\t\/\/ Read Errors.\n\tBlankSection\n\t\/\/ Get and Read Errors.\n\tCouldNotParse\n)\n\nvar (\n\tLineBreak = \"\\n\"\n\t\/\/ %(variable)s\n\tvarRegExp = regexp.MustCompile(`%\\(([a-zA-Z0-9_.\\-]+)\\)s`)\n)\n\nfunc init() {\n\tif runtime.GOOS == \"windows\" {\n\t\tLineBreak = \"\\r\\n\"\n\t}\n}\n\n\/\/ ConfigFile is the representation of configuration settings.\n\/\/ The public interface is entirely through methods.\ntype ConfigFile struct {\n\tlock sync.RWMutex\n\tfileNames []string \/\/ Support mutil-files.\n\tdata map[string]map[string]string \/\/ Section -> key : value\n\tsectionList []string \/\/ Section list\n\tkeyList map[string][]string \/\/ Section -> Key list\n\tsectionComments map[string]string \/\/ Sections comments\n\tkeyComments map[string]map[string]string \/\/ Keys comments\n\tBlockMode bool\n}\n\n\/\/ newConfigFile creates an empty configuration representation.\n\/\/ This representation can be filled with AddSection and AddKey and then\n\/\/ saved to a file using SaveConfigFile.\nfunc newConfigFile(fileNames []string) *ConfigFile {\n\tc := new(ConfigFile)\n\tc.fileNames = fileNames\n\tc.data = make(map[string]map[string]string)\n\tc.keyList = make(map[string][]string)\n\tc.sectionComments = make(map[string]string)\n\tc.keyComments = make(map[string]map[string]string)\n\tc.BlockMode = true\n\treturn c\n}\n\n\/\/ SetValue adds a new section-key-value to the configuration.\n\/\/ It returns true if the key and value were inserted, and false if the value was overwritten.\n\/\/ If the section does not exist in advance, it is created.\nfunc (c *ConfigFile) SetValue(section, key, value string) bool {\n\tif c.BlockMode {\n\t\tc.lock.Lock()\n\t\tdefer c.lock.Unlock()\n\t}\n\n\t\/\/ Check if section exists.\n\tif _, ok := c.data[section]; !ok {\n\t\t\/\/ Section not exists.\n\t\t\/\/ Execute add operation.\n\t\tc.data[section] = make(map[string]string)\n\t\t\/\/ Append section to list.\n\t\tc.sectionList = append(c.sectionList, section)\n\t}\n\n\t\/\/ Check if key exists.\n\t_, ok := c.data[section][key]\n\tc.data[section][key] = value\n\tif !ok {\n\t\t\/\/ If not exists, append to key list.\n\t\tc.keyList[section] = append(c.keyList[section], key)\n\t}\n\treturn !ok\n}\n\n\/\/ DeleteKey delete the key in given section.\n\/\/ It returns true if the key was deleted, and false if the section or key didn't exist.\nfunc (c *ConfigFile) DeleteKey(section, key string) bool {\n\t\/\/ Check if section exists.\n\tif _, ok := c.data[section]; !ok {\n\t\t\/\/ Section not exists.\n\t\treturn false\n\t}\n\n\t\/\/ Check if key exists\n\tif _, ok := c.data[section][key]; ok {\n\t\t\/\/ Execute remove operation\n\t\tdelete(c.data[section], key)\n\t\t\/\/ Remove comments of key\n\t\tc.SetKeyComments(section, key, \"\")\n\t\t\/\/ Get index of key\n\t\ti := 0\n\t\tfor _, keyName := range c.keyList[section] {\n\t\t\tif keyName == key {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\t\/\/ Remove from key list\n\t\tc.keyList[section] =\n\t\t\tappend(c.keyList[section][:i], c.keyList[section][i+1:]...)\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ GetValue returns the value of key available in the given section.\n\/\/ If the value needs to be unfolded (see e.g. %(google)s example in the GoConfig_test.go),\n\/\/ then String does this unfolding automatically, up to\n\/\/ _DEPTH_VALUES number of iterations.\n\/\/ It returns an error if the section or (default)key does not exist and empty string value.\nfunc (c *ConfigFile) GetValue(section, key string) (string, error) {\n\tif c.BlockMode {\n\t\tc.lock.RLock()\n\t\tdefer c.lock.RUnlock()\n\t}\n\n\t\/\/ Blank section name represents DEFAULT section.\n\tif len(section) == 0 {\n\t\tsection = DEFAULT_SECTION\n\t}\n\n\t\/\/ Check if section exists\n\tif _, ok := c.data[section]; !ok {\n\t\t\/\/ Section does not exist.\n\t\treturn \"\", getError{SectionNotFound, section}\n\t}\n\n\t\/\/ Section exists.\n\t\/\/ Check if key exists.\n\tvalue, ok := c.data[section][key]\n\tif !ok {\n\t\t\/\/ Check if it is a sub-section.\n\t\tif i := strings.LastIndex(section, \".\"); i > -1 {\n\t\t\treturn c.GetValue(section[:i], key)\n\t\t}\n\n\t\t\/\/ Return empty value.\n\t\treturn \"\", getError{KeyNotFound, key}\n\t}\n\n\t\/\/ Key exists.\n\tvar i int\n\tfor i = 0; i < _DEPTH_VALUES; i++ {\n\t\tvr := varRegExp.FindString(value)\n\t\tif len(vr) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Take off leading '%(' and trailing ')s'\n\t\tnoption := strings.TrimLeft(vr, \"%(\")\n\t\tnoption = strings.TrimRight(noption, \")s\")\n\n\t\t\/\/ Search variable in default section\n\t\tnvalue, _ := c.GetValue(DEFAULT_SECTION, noption)\n\t\t\/\/ Search in the same section\n\t\tif _, ok := c.data[section][noption]; ok {\n\t\t\tnvalue = c.data[section][noption]\n\t\t}\n\n\t\t\/\/ substitute by new value and take off leading '%(' and trailing ')s'\n\t\tvalue = strings.Replace(value, vr, nvalue, -1)\n\t}\n\treturn value, nil\n}\n\n\/\/ Bool returns bool type value.\nfunc (c *ConfigFile) Bool(section, key string) (bool, error) {\n\t\/\/ Get string format value.\n\tvalue, err := c.GetValue(section, key)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t\/\/ Convert type.\n\treturn strconv.ParseBool(value)\n}\n\n\/\/ Float64 returns float64 type value.\nfunc (c *ConfigFile) Float64(section, key string) (float64, error) {\n\t\/\/ Get string format value.\n\tvalue, err := c.GetValue(section, key)\n\tif err != nil {\n\t\treturn 0.0, err\n\t}\n\n\t\/\/ Convert type.\n\treturn strconv.ParseFloat(value, 64)\n}\n\n\/\/ Int returns int type value.\nfunc (c *ConfigFile) Int(section, key string) (int, error) {\n\t\/\/ Get string format value.\n\tvalue, err := c.GetValue(section, key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t\/\/ Convert type.\n\treturn strconv.Atoi(value)\n}\n\n\/\/ Int64 returns int64 type value.\nfunc (c *ConfigFile) Int64(section, key string) (int64, error) {\n\t\/\/ Get string format value.\n\tvalue, err := c.GetValue(section, key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t\/\/ Convert type.\n\treturn strconv.ParseInt(value, 10, 64)\n}\n\n\/\/ MustValue always returns value without error,\n\/\/ it returns empty string if error occurs.\nfunc (c *ConfigFile) MustValue(section, key string, defaultVal ...string) string {\n\tvalue, err := c.GetValue(section, key)\n\tif err != nil && len(defaultVal) > 0 {\n\t\treturn defaultVal[0]\n\t}\n\treturn value\n}\n\n\/\/ MustBool always returns value without error,\n\/\/ it returns false if error occurs.\nfunc (c *ConfigFile) MustBool(section, key string, defaultVal ...bool) bool {\n\tvalue, err := c.Bool(section, key)\n\tif err != nil && len(defaultVal) > 0 {\n\t\treturn defaultVal[0]\n\t}\n\treturn value\n}\n\n\/\/ MustFloat64 always returns value without error,\n\/\/ it returns 0.0 if error occurs.\nfunc (c *ConfigFile) MustFloat64(section, key string, defaultVal ...float64) float64 {\n\tvalue, err := c.Float64(section, key)\n\tif err != nil && len(defaultVal) > 0 {\n\t\treturn defaultVal[0]\n\t}\n\treturn value\n}\n\n\/\/ MustInt always returns value without error,\n\/\/ it returns 0 if error occurs.\nfunc (c *ConfigFile) MustInt(section, key string, defaultVal ...int) int {\n\tvalue, err := c.Int(section, key)\n\tif err != nil && len(defaultVal) > 0 {\n\t\treturn defaultVal[0]\n\t}\n\treturn value\n}\n\n\/\/ MustInt64 always returns value without error,\n\/\/ it returns 0 if error occurs.\nfunc (c *ConfigFile) MustInt64(section, key string, defaultVal ...int64) int64 {\n\tvalue, err := c.Int64(section, key)\n\tif err != nil && len(defaultVal) > 0 {\n\t\treturn defaultVal[0]\n\t}\n\treturn value\n}\n\n\/\/ GetSectionList returns the list of all sections\n\/\/ in the same order in the file.\nfunc (c *ConfigFile) GetSectionList() []string {\n\treturn c.sectionList\n}\n\n\/\/ GetKeyList returns the list of all key in give section\n\/\/ in the same order in the file.\nfunc (c *ConfigFile) GetKeyList(section string) []string {\n\treturn c.keyList[section]\n}\n\n\/\/ GetSection returns key-value pairs in given section.\n\/\/ It section does not exist, returns nil and error.\nfunc (c *ConfigFile) GetSection(section string) (map[string]string, error) {\n\t\/\/ Check if section exists\n\tif _, ok := c.data[section]; !ok {\n\t\t\/\/ Section does not exist.\n\t\treturn nil, getError{SectionNotFound, section}\n\t}\n\n\t\/\/ Remove pre-defined key.\n\tsecMap := c.data[section]\n\tdelete(c.data[section], \" \")\n\n\t\/\/ Section exists.\n\treturn secMap, nil\n}\n\n\/\/ SetSectionComments adds new section comments to the configuration.\n\/\/ If comments are empty(0 length), it will remove its section comments!\n\/\/ It returns true if the comments were inserted or removed, and false if the comments were overwritten.\nfunc (c *ConfigFile) SetSectionComments(section, comments string) bool {\n\t\/\/ Check length of comments\n\tif len(comments) == 0 {\n\t\t\/\/ Check if section exists\n\t\tif _, ok := c.sectionComments[section]; ok {\n\t\t\t\/\/ Execute remove operation\n\t\t\tdelete(c.sectionComments, section)\n\t\t}\n\n\t\t\/\/ Not exists can be seen as remove\n\t\treturn true\n\t}\n\n\t\/\/ Check if comments exists\n\t_, ok := c.sectionComments[section]\n\tif comments[0] != '#' && comments[0] != ';' {\n\t\tcomments = \"; \" + comments\n\t}\n\tc.sectionComments[section] = comments\n\treturn !ok\n}\n\n\/\/ SetKeyComments adds new section-key comments to the configuration.\n\/\/ If comments are empty(0 length), it will remove its section-key comments!\n\/\/ It returns true if the comments were inserted or removed, and false if the comments were overwritten.\n\/\/ If the section does not exist in advance, it is created.\nfunc (c *ConfigFile) SetKeyComments(section, key, comments string) bool {\n\t\/\/ Check if section exists\n\tif _, ok := c.keyComments[section]; ok {\n\t\t\/\/ Section exists\n\t\t\/\/ Check length of comments\n\t\tif len(comments) == 0 {\n\t\t\t\/\/ Check if key exists\n\t\t\tif _, ok := c.keyComments[section][key]; ok {\n\t\t\t\t\/\/ Execute remove operation\n\t\t\t\tdelete(c.keyComments[section], key)\n\t\t\t}\n\n\t\t\t\/\/ Not exists can be seen as remove\n\t\t\treturn true\n\t\t}\n\t} else {\n\t\t\/\/ Section not exists\n\t\t\/\/ Check length of comments\n\t\tif len(comments) == 0 {\n\t\t\t\/\/ Not exists can be seen as remove\n\t\t\treturn true\n\t\t} else {\n\t\t\t\/\/ Execute add operation\n\t\t\tc.keyComments[section] = make(map[string]string)\n\t\t}\n\t}\n\n\t\/\/ Check if key exists\n\t_, ok := c.keyComments[section][key]\n\tif comments[0] != '#' && comments[0] != ';' {\n\t\tcomments = \"; \" + comments\n\t}\n\tc.keyComments[section][key] = comments\n\treturn !ok\n}\n\n\/\/ GetSectionComments returns the comments in the given section.\n\/\/ It returns an empty string(0 length) if the comments do not exist\nfunc (c *ConfigFile) GetSectionComments(section string) (comments string) {\n\treturn c.sectionComments[section]\n}\n\n\/\/ GetKeyComments returns the comments of key in the given section.\n\/\/ It returns an empty string(0 length) if the comments do not exist\nfunc (c *ConfigFile) GetKeyComments(section, key string) (comments string) {\n\t\/\/ Check if section exists\n\tif _, ok := c.keyComments[section]; ok {\n\t\t\/\/ Exists\n\t\treturn c.keyComments[section][key]\n\t}\n\n\t\/\/ Not exists\n\treturn \"\"\n}\n\n\/\/ getError occurs when get value in configuration file with invalid parameter\ntype getError struct {\n\tReason int \/\/ Error reason\n\tName string\n}\n\n\/\/ Implement Error method\nfunc (err getError) Error() string {\n\tswitch err.Reason {\n\tcase SectionNotFound:\n\t\treturn fmt.Sprintf(\"section '%s' not found\", string(err.Name))\n\t}\n\n\treturn \"invalid get error\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Copyright 2022 The GUAC Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ite6\n\nimport (\n\t\"encoding\/json\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com\/guacsec\/guac\/pkg\/handler\/processor\"\n\tv02 \"github.com\/in-toto\/in-toto-golang\/in_toto\/slsa_provenance\/v0.2\"\n)\n\nvar (\n\tbadProvenance = `{\n\t\t\"_type\": [\"https:\/\/in-toto.io\/Statement\/v0.1\"],\n\t\t\"subject\": [{\"name\": \"_\", \"digest\": {\"sha256\": \"5678...\"}}],\n\t}`\n\tunknownProvenance = `{\n\t\t\"_type\": \"https:\/\/in-toto.io\/Statement\/v0.1\",\n\t\t\"subject\": [{\"name\": \"_\", \"digest\": {\"sha256\": \"5678...\"}}],\n\t\t\"predicateType\": \"https:\/\/hello-world\/provenance\/v0.2\",\n\t\t\"predicate\":\n\t\t\t{\n\t\t\t\t\"builder\": { \"id\": \"https:\/\/github.com\/Attestations\/GitHubHostedActions@v1\" },\n\t\t\t\t\"buildType\": \"https:\/\/github.com\/Attestations\/GitHubActionsWorkflow@v1\",\n\t\t\t\t\"invocation\": {\n\t\t\t\t \"configSource\": {\n\t\t\t\t\t\"uri\": \"git+https:\/\/github.com\/curl\/curl-docker@master\",\n\t\t\t\t\t\"digest\": { \"sha1\": \"d6525c840a62b398424a78d792f457477135d0cf\" }, \n\t\t\t\t\t\"entryPoint\": \"build.yaml:maketgz\"\n\t\t\t\t }\n\t\t\t\t},\n\t\t\t\t\"metadata\": {\n\t\t\t\t \"completeness\": {\n\t\t\t\t\t \"environment\": true\n\t\t\t\t }\n\t\t\t\t},\n\t\t\t\t\"materials\": [\n\t\t\t\t {\n\t\t\t\t\t\"uri\": \"git+https:\/\/github.com\/curl\/curl-docker@master\",\n\t\t\t\t\t\"digest\": { \"sha1\": \"d6525c840a62b398424a78d792f457477135d0cf\" }\n\t\t\t\t }, {\n\t\t\t\t\t\"uri\": \"github_hosted_vm:ubuntu-18.04:20210123.1\"\n\t\t\t\t }\n\t\t\t\t]\n\t\t\t}\n\t}`\n\tite6SLSA = `{\n\t\t\"_type\": \"https:\/\/in-toto.io\/Statement\/v0.1\",\n\t\t\"subject\": [{\"name\": \"_\", \"digest\": {\"sha256\": \"5678...\"}}],\n\t\t\"predicateType\": \"https:\/\/slsa.dev\/provenance\/v0.2\",\n\t\t\"predicate\":\n\t\t\t{\n\t\t\t\t\"builder\": { \"id\": \"https:\/\/github.com\/Attestations\/GitHubHostedActions@v1\" },\n\t\t\t\t\"buildType\": \"https:\/\/github.com\/Attestations\/GitHubActionsWorkflow@v1\",\n\t\t\t\t\"invocation\": {\n\t\t\t\t \"configSource\": {\n\t\t\t\t\t\"uri\": \"git+https:\/\/github.com\/curl\/curl-docker@master\",\n\t\t\t\t\t\"digest\": { \"sha1\": \"d6525c840a62b398424a78d792f457477135d0cf\" }, \n\t\t\t\t\t\"entryPoint\": \"build.yaml:maketgz\"\n\t\t\t\t }\n\t\t\t\t},\n\t\t\t\t\"metadata\": {\n\t\t\t\t \"completeness\": {\n\t\t\t\t\t \"environment\": true\n\t\t\t\t }\n\t\t\t\t},\n\t\t\t\t\"materials\": [\n\t\t\t\t {\n\t\t\t\t\t\"uri\": \"git+https:\/\/github.com\/curl\/curl-docker@master\",\n\t\t\t\t\t\"digest\": { \"sha1\": \"d6525c840a62b398424a78d792f457477135d0cf\" }\n\t\t\t\t }, {\n\t\t\t\t\t\"uri\": \"github_hosted_vm:ubuntu-18.04:20210123.1\"\n\t\t\t\t }\n\t\t\t\t]\n\t\t\t}\n\t}`\n\n\tpredicate = v02.ProvenancePredicate{\n\t\tBuilder: v02.ProvenanceBuilder{\n\t\t\tID: \"https:\/\/github.com\/Attestations\/GitHubHostedActions@v1\",\n\t\t},\n\t\tBuildType: \"https:\/\/github.com\/Attestations\/GitHubActionsWorkflow@v1\",\n\t\tInvocation: v02.ProvenanceInvocation{\n\t\t\tConfigSource: v02.ConfigSource{\n\t\t\t\tURI: \"git+https:\/\/github.com\/curl\/curl-docker@master\",\n\t\t\t\tDigest: v02.DigestSet{\n\t\t\t\t\t\"sha1\": \"d6525c840a62b398424a78d792f457477135d0cf\",\n\t\t\t\t},\n\t\t\t\tEntryPoint: \"build.yaml:maketgz\",\n\t\t\t},\n\t\t},\n\t\tMetadata: &v02.ProvenanceMetadata{\n\t\t\tCompleteness: v02.ProvenanceComplete{\n\t\t\t\tEnvironment: true,\n\t\t\t},\n\t\t},\n\t\tMaterials: []v02.ProvenanceMaterial{\n\t\t\t{\n\t\t\t\tURI: \"git+https:\/\/github.com\/curl\/curl-docker@master\",\n\t\t\t\tDigest: v02.DigestSet{\n\t\t\t\t\t\"sha1\": \"d6525c840a62b398424a78d792f457477135d0cf\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tURI: \"github_hosted_vm:ubuntu-18.04:20210123.1\",\n\t\t\t},\n\t\t},\n\t}\n)\n\nfunc TestITE6Processor_ValidateSchema(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\targs *processor.Document\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"ITE6 Doc with unknown payload\",\n\t\t\targs: &processor.Document{\n\t\t\t\tBlob: []byte(badProvenance),\n\t\t\t\tType: processor.DocumentITE6,\n\t\t\t\tFormat: processor.FormatJSON,\n\t\t\t\tSourceInformation: processor.SourceInformation{\n\t\t\t\t\tCollector: \"TestCollector\",\n\t\t\t\t\tSource: \"TestSource\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"ITE6 Doc with valid payload\",\n\t\t\targs: &processor.Document{\n\t\t\t\tBlob: []byte(ite6SLSA),\n\t\t\t\tType: processor.DocumentITE6,\n\t\t\t\tFormat: processor.FormatJSON,\n\t\t\t\tSourceInformation: processor.SourceInformation{\n\t\t\t\t\tCollector: \"TestCollector\",\n\t\t\t\t\tSource: \"TestSource\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\te := &ITE6Processor{}\n\t\t\tif err := e.ValidateSchema(tt.args); (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"ITE6Processor.ValidateSchema() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestITE6Processor_Unpack(t *testing.T) {\n\tpredicatePayload, _ := json.Marshal(predicate)\n\ttests := []struct {\n\t\tname string\n\t\targs *processor.Document\n\t\twant []*processor.Document\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"ITE6 Doc with unknown payload\",\n\t\t\targs: &processor.Document{\n\t\t\t\tBlob: []byte(unknownProvenance),\n\t\t\t\tType: processor.DocumentITE6,\n\t\t\t\tFormat: processor.FormatJSON,\n\t\t\t\tSourceInformation: processor.SourceInformation{\n\t\t\t\t\tCollector: \"TestCollector\",\n\t\t\t\t\tSource: \"TestSource\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*processor.Document{{\n\t\t\t\tBlob: predicatePayload,\n\t\t\t\tType: processor.DocumentUnknown,\n\t\t\t\tFormat: processor.FormatUnknown,\n\t\t\t\tSourceInformation: processor.SourceInformation{\n\t\t\t\t\tCollector: \"TestCollector\",\n\t\t\t\t\tSource: \"TestSource\",\n\t\t\t\t},\n\t\t\t}},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"ITE6 Doc with valid payload\",\n\t\t\targs: &processor.Document{\n\t\t\t\tBlob: []byte(ite6SLSA),\n\t\t\t\tType: processor.DocumentITE6,\n\t\t\t\tFormat: processor.FormatJSON,\n\t\t\t\tSourceInformation: processor.SourceInformation{\n\t\t\t\t\tCollector: \"TestCollector\",\n\t\t\t\t\tSource: \"TestSource\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*processor.Document{{\n\t\t\t\tBlob: predicatePayload,\n\t\t\t\tType: processor.DocumentSLSA,\n\t\t\t\tFormat: processor.FormatJSON,\n\t\t\t\tSourceInformation: processor.SourceInformation{\n\t\t\t\t\tCollector: \"TestCollector\",\n\t\t\t\t\tSource: \"TestSource\",\n\t\t\t\t},\n\t\t\t}},\n\t\t\twantErr: false,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\te := &ITE6Processor{}\n\t\t\tgot, err := e.Unpack(tt.args)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"ITE6Processor.Unpack() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tparsedGot := parseV02(got[0].Blob)\n\t\t\tparsedWant := parseV02(tt.want[0].Blob)\n\t\t\tif !reflect.DeepEqual(parsedGot, parsedWant) {\n\t\t\t\tt.Errorf(\"ITE6Processor.Unpack() blobs = %v, want %v\", parsedGot, parsedWant)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got[0].Type, tt.want[0].Type) {\n\t\t\t\tt.Errorf(\"ITE6Processor.Unpack() Type = %v, want %v\", got[0].Type, tt.want[0].Type)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got[0].Format, tt.want[0].Format) {\n\t\t\t\tt.Errorf(\"ITE6Processor.Unpack() Format = %v, want %v\", got[0].Format, tt.want[0].Format)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got[0].SourceInformation, tt.want[0].SourceInformation) {\n\t\t\t\tt.Errorf(\"ITE6Processor.Unpack() SourceInformation = %v, want %v\", got[0].SourceInformation, tt.want[0].SourceInformation)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc parseV02(p []byte) *v02.ProvenancePredicate {\n\tps := v02.ProvenancePredicate{}\n\tif err := json.Unmarshal(p, &ps); err != nil {\n\t\treturn nil\n\t}\n\treturn &ps\n}\n<commit_msg>added fixes based on comments<commit_after>\/\/\n\/\/ Copyright 2022 The GUAC Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ite6\n\nimport (\n\t\"encoding\/json\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com\/guacsec\/guac\/pkg\/handler\/processor\"\n)\n\nvar (\n\tbadProvenance = `{\n\t\t\"_type\": [\"https:\/\/in-toto.io\/Statement\/v0.1\"],\n\t\t\"subject\": [{\"name\": \"_\", \"digest\": {\"sha256\": \"5678...\"}}],\n\t}`\n\tunknownProvenance = `{\n\t\t\"_type\": \"https:\/\/in-toto.io\/Statement\/v0.1\",\n\t\t\"subject\": [{\"name\": \"_\", \"digest\": {\"sha256\": \"5678...\"}}],\n\t\t\"predicateType\": \"https:\/\/hello-world\/provenance\/v0.2\",\n\t\t\"predicate\":\n\t\t\t{\n\t\t\t\t\"builder\": { \"id\": \"https:\/\/github.com\/Attestations\/GitHubHostedActions@v1\" },\n\t\t\t\t\"buildType\": \"https:\/\/github.com\/Attestations\/GitHubActionsWorkflow@v1\",\n\t\t\t\t\"invocation\": {\n\t\t\t\t \"configSource\": {\n\t\t\t\t\t\"uri\": \"git+https:\/\/github.com\/curl\/curl-docker@master\",\n\t\t\t\t\t\"digest\": { \"sha1\": \"d6525c840a62b398424a78d792f457477135d0cf\" }, \n\t\t\t\t\t\"entryPoint\": \"build.yaml:maketgz\"\n\t\t\t\t }\n\t\t\t\t},\n\t\t\t\t\"metadata\": {\n\t\t\t\t \"completeness\": {\n\t\t\t\t\t \"environment\": true\n\t\t\t\t }\n\t\t\t\t},\n\t\t\t\t\"materials\": [\n\t\t\t\t {\n\t\t\t\t\t\"uri\": \"git+https:\/\/github.com\/curl\/curl-docker@master\",\n\t\t\t\t\t\"digest\": { \"sha1\": \"d6525c840a62b398424a78d792f457477135d0cf\" }\n\t\t\t\t }, {\n\t\t\t\t\t\"uri\": \"github_hosted_vm:ubuntu-18.04:20210123.1\"\n\t\t\t\t }\n\t\t\t\t]\n\t\t\t}\n\t}`\n\tite6SLSA = `{\n\t\t\"_type\": \"https:\/\/in-toto.io\/Statement\/v0.1\",\n\t\t\"subject\": [{\"name\": \"_\", \"digest\": {\"sha256\": \"5678...\"}}],\n\t\t\"predicateType\": \"https:\/\/slsa.dev\/provenance\/v0.2\",\n\t\t\"predicate\":\n\t\t\t{\n\t\t\t\t\"builder\": { \"id\": \"https:\/\/github.com\/Attestations\/GitHubHostedActions@v1\" },\n\t\t\t\t\"buildType\": \"https:\/\/github.com\/Attestations\/GitHubActionsWorkflow@v1\",\n\t\t\t\t\"invocation\": {\n\t\t\t\t \"configSource\": {\n\t\t\t\t\t\"uri\": \"git+https:\/\/github.com\/curl\/curl-docker@master\",\n\t\t\t\t\t\"digest\": { \"sha1\": \"d6525c840a62b398424a78d792f457477135d0cf\" }, \n\t\t\t\t\t\"entryPoint\": \"build.yaml:maketgz\"\n\t\t\t\t }\n\t\t\t\t},\n\t\t\t\t\"metadata\": {\n\t\t\t\t \"completeness\": {\n\t\t\t\t\t \"environment\": true\n\t\t\t\t }\n\t\t\t\t},\n\t\t\t\t\"materials\": [\n\t\t\t\t {\n\t\t\t\t\t\"uri\": \"git+https:\/\/github.com\/curl\/curl-docker@master\",\n\t\t\t\t\t\"digest\": { \"sha1\": \"d6525c840a62b398424a78d792f457477135d0cf\" }\n\t\t\t\t }, {\n\t\t\t\t\t\"uri\": \"github_hosted_vm:ubuntu-18.04:20210123.1\"\n\t\t\t\t }\n\t\t\t\t]\n\t\t\t}\n\t}`\n\n\tpredicate = `{\n\t\t\"builder\": { \"id\": \"https:\/\/github.com\/Attestations\/GitHubHostedActions@v1\" },\n\t\t\"buildType\": \"https:\/\/github.com\/Attestations\/GitHubActionsWorkflow@v1\",\n\t\t\"invocation\": {\n\t\t \"configSource\": {\n\t\t\t\"uri\": \"git+https:\/\/github.com\/curl\/curl-docker@master\",\n\t\t\t\"digest\": { \"sha1\": \"d6525c840a62b398424a78d792f457477135d0cf\" }, \n\t\t\t\"entryPoint\": \"build.yaml:maketgz\"\n\t\t }\n\t\t},\n\t\t\"metadata\": {\n\t\t \"completeness\": {\n\t\t\t \"environment\": true\n\t\t }\n\t\t},\n\t\t\"materials\": [\n\t\t {\n\t\t\t\"uri\": \"git+https:\/\/github.com\/curl\/curl-docker@master\",\n\t\t\t\"digest\": { \"sha1\": \"d6525c840a62b398424a78d792f457477135d0cf\" }\n\t\t }, {\n\t\t\t\"uri\": \"github_hosted_vm:ubuntu-18.04:20210123.1\"\n\t\t }\n\t\t]\n\t}`\n)\n\nfunc TestITE6Processor_ValidateSchema(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\targs *processor.Document\n\t\twantErr bool\n\t}{{\n\t\tname: \"ITE6 Doc with unknown payload\",\n\t\targs: &processor.Document{\n\t\t\tBlob: []byte(badProvenance),\n\t\t\tType: processor.DocumentITE6,\n\t\t\tFormat: processor.FormatJSON,\n\t\t\tSourceInformation: processor.SourceInformation{\n\t\t\t\tCollector: \"TestCollector\",\n\t\t\t\tSource: \"TestSource\",\n\t\t\t},\n\t\t},\n\t\twantErr: true,\n\t}, {\n\t\tname: \"ITE6 Doc with valid payload\",\n\t\targs: &processor.Document{\n\t\t\tBlob: []byte(ite6SLSA),\n\t\t\tType: processor.DocumentITE6,\n\t\t\tFormat: processor.FormatJSON,\n\t\t\tSourceInformation: processor.SourceInformation{\n\t\t\t\tCollector: \"TestCollector\",\n\t\t\t\tSource: \"TestSource\",\n\t\t\t},\n\t\t},\n\t\twantErr: false,\n\t}}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\te := &ITE6Processor{}\n\t\t\tif err := e.ValidateSchema(tt.args); (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"ITE6Processor.ValidateSchema() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestITE6Processor_Unpack(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\targs *processor.Document\n\t\twant []*processor.Document\n\t\twantErr bool\n\t}{{\n\t\tname: \"ITE6 Doc with unknown payload\",\n\t\targs: &processor.Document{\n\t\t\tBlob: []byte(unknownProvenance),\n\t\t\tType: processor.DocumentITE6,\n\t\t\tFormat: processor.FormatJSON,\n\t\t\tSourceInformation: processor.SourceInformation{\n\t\t\t\tCollector: \"TestCollector\",\n\t\t\t\tSource: \"TestSource\",\n\t\t\t},\n\t\t},\n\t\twant: []*processor.Document{{\n\t\t\tBlob: []byte(predicate),\n\t\t\tType: processor.DocumentUnknown,\n\t\t\tFormat: processor.FormatUnknown,\n\t\t\tSourceInformation: processor.SourceInformation{\n\t\t\t\tCollector: \"TestCollector\",\n\t\t\t\tSource: \"TestSource\",\n\t\t\t},\n\t\t}},\n\t\twantErr: false,\n\t}, {\n\t\tname: \"ITE6 Doc with valid payload\",\n\t\targs: &processor.Document{\n\t\t\tBlob: []byte(ite6SLSA),\n\t\t\tType: processor.DocumentITE6,\n\t\t\tFormat: processor.FormatJSON,\n\t\t\tSourceInformation: processor.SourceInformation{\n\t\t\t\tCollector: \"TestCollector\",\n\t\t\t\tSource: \"TestSource\",\n\t\t\t},\n\t\t},\n\t\twant: []*processor.Document{{\n\t\t\tBlob: []byte(predicate),\n\t\t\tType: processor.DocumentSLSA,\n\t\t\tFormat: processor.FormatJSON,\n\t\t\tSourceInformation: processor.SourceInformation{\n\t\t\t\tCollector: \"TestCollector\",\n\t\t\t\tSource: \"TestSource\",\n\t\t\t},\n\t\t}},\n\t\twantErr: false,\n\t}}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\te := &ITE6Processor{}\n\t\t\tgot, err := e.Unpack(tt.args)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"ITE6Processor.Unpack() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(got) > 0 {\n\t\t\t\tfor i := range got {\n\t\t\t\t\t\/\/ check if a and b Docuemnts are equal\n\t\t\t\t\tif !docEqual(got[i], tt.want[i]) {\n\t\t\t\t\t\tt.Errorf(\"ITE6Processor.Unpack() = %v, want %v\", got[i], tt.want[i])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc docEqual(a, b *processor.Document) bool {\n\ta.Blob = consistentJsonBytes(a.Blob)\n\tb.Blob = consistentJsonBytes(b.Blob)\n\treturn reflect.DeepEqual(a, b)\n}\n\n\/\/ consistentJsonBytes makes sure that the blob byte comparison\n\/\/ does not differ due to whitespace in testing definitions.\nfunc consistentJsonBytes(b []byte) []byte {\n\tvar v interface{}\n\terr := json.Unmarshal(b, &v)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tout, _ := json.Marshal(v)\n\treturn out\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage customresource\n\nimport (\n\t\"context\"\n\n\t\"github.com\/go-openapi\/validate\"\n\n\tapiequality \"k8s.io\/apimachinery\/pkg\/api\/equality\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/meta\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/fields\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/validation\/field\"\n\tapiserverstorage \"k8s.io\/apiserver\/pkg\/storage\"\n\t\"k8s.io\/apiserver\/pkg\/storage\/names\"\n\n\t\"k8s.io\/apiextensions-apiserver\/pkg\/apis\/apiextensions\"\n\tstructuralschema \"k8s.io\/apiextensions-apiserver\/pkg\/apiserver\/schema\"\n\tstructurallisttype \"k8s.io\/apiextensions-apiserver\/pkg\/apiserver\/schema\/listtype\"\n\tschemaobjectmeta \"k8s.io\/apiextensions-apiserver\/pkg\/apiserver\/schema\/objectmeta\"\n)\n\n\/\/ customResourceStrategy implements behavior for CustomResources.\ntype customResourceStrategy struct {\n\truntime.ObjectTyper\n\tnames.NameGenerator\n\n\tnamespaceScoped bool\n\tvalidator customResourceValidator\n\tstructuralSchemas map[string]*structuralschema.Structural\n\tstatus *apiextensions.CustomResourceSubresourceStatus\n\tscale *apiextensions.CustomResourceSubresourceScale\n}\n\nfunc NewStrategy(typer runtime.ObjectTyper, namespaceScoped bool, kind schema.GroupVersionKind, schemaValidator, statusSchemaValidator *validate.SchemaValidator, structuralSchemas map[string]*structuralschema.Structural, status *apiextensions.CustomResourceSubresourceStatus, scale *apiextensions.CustomResourceSubresourceScale) customResourceStrategy {\n\treturn customResourceStrategy{\n\t\tObjectTyper: typer,\n\t\tNameGenerator: names.SimpleNameGenerator,\n\t\tnamespaceScoped: namespaceScoped,\n\t\tstatus: status,\n\t\tscale: scale,\n\t\tvalidator: customResourceValidator{\n\t\t\tnamespaceScoped: namespaceScoped,\n\t\t\tkind: kind,\n\t\t\tschemaValidator: schemaValidator,\n\t\t\tstatusSchemaValidator: statusSchemaValidator,\n\t\t},\n\t\tstructuralSchemas: structuralSchemas,\n\t}\n}\n\nfunc (a customResourceStrategy) NamespaceScoped() bool {\n\treturn a.namespaceScoped\n}\n\n\/\/ PrepareForCreate clears the status of a CustomResource before creation.\nfunc (a customResourceStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {\n\tif a.status != nil {\n\t\tcustomResourceObject := obj.(*unstructured.Unstructured)\n\t\tcustomResource := customResourceObject.UnstructuredContent()\n\n\t\t\/\/ create cannot set status\n\t\tif _, ok := customResource[\"status\"]; ok {\n\t\t\tdelete(customResource, \"status\")\n\t\t}\n\t}\n\n\taccessor, _ := meta.Accessor(obj)\n\taccessor.SetGeneration(1)\n}\n\n\/\/ PrepareForUpdate clears fields that are not allowed to be set by end users on update.\nfunc (a customResourceStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {\n\tnewCustomResourceObject := obj.(*unstructured.Unstructured)\n\toldCustomResourceObject := old.(*unstructured.Unstructured)\n\n\tnewCustomResource := newCustomResourceObject.UnstructuredContent()\n\toldCustomResource := oldCustomResourceObject.UnstructuredContent()\n\n\t\/\/ If the \/status subresource endpoint is installed, update is not allowed to set status.\n\tif a.status != nil {\n\t\t_, ok1 := newCustomResource[\"status\"]\n\t\t_, ok2 := oldCustomResource[\"status\"]\n\t\tswitch {\n\t\tcase ok2:\n\t\t\tnewCustomResource[\"status\"] = oldCustomResource[\"status\"]\n\t\tcase ok1:\n\t\t\tdelete(newCustomResource, \"status\")\n\t\t}\n\t}\n\n\t\/\/ except for the changes to `metadata`, any other changes\n\t\/\/ cause the generation to increment.\n\tnewCopyContent := copyNonMetadata(newCustomResource)\n\toldCopyContent := copyNonMetadata(oldCustomResource)\n\tif !apiequality.Semantic.DeepEqual(newCopyContent, oldCopyContent) {\n\t\toldAccessor, _ := meta.Accessor(oldCustomResourceObject)\n\t\tnewAccessor, _ := meta.Accessor(newCustomResourceObject)\n\t\tnewAccessor.SetGeneration(oldAccessor.GetGeneration() + 1)\n\t}\n}\n\nfunc copyNonMetadata(original map[string]interface{}) map[string]interface{} {\n\tret := make(map[string]interface{})\n\tfor key, val := range original {\n\t\tif key == \"metadata\" {\n\t\t\tcontinue\n\t\t}\n\t\tret[key] = val\n\t}\n\treturn ret\n}\n\n\/\/ Validate validates a new CustomResource.\nfunc (a customResourceStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {\n\tvar errs field.ErrorList\n\terrs = append(errs, a.validator.Validate(ctx, obj, a.scale)...)\n\n\t\/\/ validate embedded resources\n\tif u, ok := obj.(*unstructured.Unstructured); ok {\n\t\tv := obj.GetObjectKind().GroupVersionKind().Version\n\t\terrs = append(errs, schemaobjectmeta.Validate(nil, u.Object, a.structuralSchemas[v], false)...)\n\n\t\t\/\/ validate x-kubernetes-list-type \"map\" and \"set\" invariant\n\t\terrs = append(errs, structurallisttype.ValidateListSetsAndMaps(nil, a.structuralSchemas[v], u.Object)...)\n\t}\n\n\treturn errs\n}\n\n\/\/ Canonicalize normalizes the object after validation.\nfunc (customResourceStrategy) Canonicalize(obj runtime.Object) {\n}\n\n\/\/ AllowCreateOnUpdate is false for CustomResources; this means a POST is\n\/\/ needed to create one.\nfunc (customResourceStrategy) AllowCreateOnUpdate() bool {\n\treturn false\n}\n\n\/\/ AllowUnconditionalUpdate is the default update policy for CustomResource objects.\nfunc (customResourceStrategy) AllowUnconditionalUpdate() bool {\n\treturn false\n}\n\n\/\/ ValidateUpdate is the default update validation for an end user updating status.\nfunc (a customResourceStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {\n\tvar errs field.ErrorList\n\terrs = append(errs, a.validator.ValidateUpdate(ctx, obj, old, a.scale)...)\n\n\tuNew, ok := obj.(*unstructured.Unstructured)\n\tif !ok {\n\t\treturn errs\n\t}\n\tuOld, ok := old.(*unstructured.Unstructured)\n\tif !ok {\n\t\treturn errs\n\t}\n\n\t\/\/ Checks the embedded objects. We don't make a difference between update and create for those.\n\tv := obj.GetObjectKind().GroupVersionKind().Version\n\terrs = append(errs, schemaobjectmeta.Validate(nil, uNew.Object, a.structuralSchemas[v], false)...)\n\n\t\/\/ ratcheting validation of x-kubernetes-list-type value map and set\n\tif oldErrs := structurallisttype.ValidateListSetsAndMaps(nil, a.structuralSchemas[v], uOld.Object); len(oldErrs) == 0 {\n\t\terrs = append(errs, structurallisttype.ValidateListSetsAndMaps(nil, a.structuralSchemas[v], uNew.Object)...)\n\t}\n\n\treturn errs\n}\n\n\/\/ GetAttrs returns labels and fields of a given object for filtering purposes.\nfunc (a customResourceStrategy) GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn labels.Set(accessor.GetLabels()), objectMetaFieldsSet(accessor, a.namespaceScoped), nil\n}\n\n\/\/ objectMetaFieldsSet returns a fields that represent the ObjectMeta.\nfunc objectMetaFieldsSet(objectMeta metav1.Object, namespaceScoped bool) fields.Set {\n\tif namespaceScoped {\n\t\treturn fields.Set{\n\t\t\t\"metadata.name\": objectMeta.GetName(),\n\t\t\t\"metadata.namespace\": objectMeta.GetNamespace(),\n\t\t}\n\t}\n\treturn fields.Set{\n\t\t\"metadata.name\": objectMeta.GetName(),\n\t}\n}\n\n\/\/ MatchCustomResourceDefinitionStorage is the filter used by the generic etcd backend to route\n\/\/ watch events from etcd to clients of the apiserver only interested in specific\n\/\/ labels\/fields.\nfunc (a customResourceStrategy) MatchCustomResourceDefinitionStorage(label labels.Selector, field fields.Selector) apiserverstorage.SelectionPredicate {\n\treturn apiserverstorage.SelectionPredicate{\n\t\tLabel: label,\n\t\tField: field,\n\t\tGetAttrs: a.GetAttrs,\n\t}\n}\n<commit_msg>[apiextensions] Remove nil check for map in PrepareForCreate<commit_after>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage customresource\n\nimport (\n\t\"context\"\n\n\t\"github.com\/go-openapi\/validate\"\n\n\tapiequality \"k8s.io\/apimachinery\/pkg\/api\/equality\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/meta\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/fields\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/validation\/field\"\n\tapiserverstorage \"k8s.io\/apiserver\/pkg\/storage\"\n\t\"k8s.io\/apiserver\/pkg\/storage\/names\"\n\n\t\"k8s.io\/apiextensions-apiserver\/pkg\/apis\/apiextensions\"\n\tstructuralschema \"k8s.io\/apiextensions-apiserver\/pkg\/apiserver\/schema\"\n\tstructurallisttype \"k8s.io\/apiextensions-apiserver\/pkg\/apiserver\/schema\/listtype\"\n\tschemaobjectmeta \"k8s.io\/apiextensions-apiserver\/pkg\/apiserver\/schema\/objectmeta\"\n)\n\n\/\/ customResourceStrategy implements behavior for CustomResources.\ntype customResourceStrategy struct {\n\truntime.ObjectTyper\n\tnames.NameGenerator\n\n\tnamespaceScoped bool\n\tvalidator customResourceValidator\n\tstructuralSchemas map[string]*structuralschema.Structural\n\tstatus *apiextensions.CustomResourceSubresourceStatus\n\tscale *apiextensions.CustomResourceSubresourceScale\n}\n\nfunc NewStrategy(typer runtime.ObjectTyper, namespaceScoped bool, kind schema.GroupVersionKind, schemaValidator, statusSchemaValidator *validate.SchemaValidator, structuralSchemas map[string]*structuralschema.Structural, status *apiextensions.CustomResourceSubresourceStatus, scale *apiextensions.CustomResourceSubresourceScale) customResourceStrategy {\n\treturn customResourceStrategy{\n\t\tObjectTyper: typer,\n\t\tNameGenerator: names.SimpleNameGenerator,\n\t\tnamespaceScoped: namespaceScoped,\n\t\tstatus: status,\n\t\tscale: scale,\n\t\tvalidator: customResourceValidator{\n\t\t\tnamespaceScoped: namespaceScoped,\n\t\t\tkind: kind,\n\t\t\tschemaValidator: schemaValidator,\n\t\t\tstatusSchemaValidator: statusSchemaValidator,\n\t\t},\n\t\tstructuralSchemas: structuralSchemas,\n\t}\n}\n\nfunc (a customResourceStrategy) NamespaceScoped() bool {\n\treturn a.namespaceScoped\n}\n\n\/\/ PrepareForCreate clears the status of a CustomResource before creation.\nfunc (a customResourceStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {\n\tif a.status != nil {\n\t\tcustomResourceObject := obj.(*unstructured.Unstructured)\n\t\tcustomResource := customResourceObject.UnstructuredContent()\n\n\t\t\/\/ create cannot set status\n\t\tdelete(customResource, \"status\")\n\t}\n\n\taccessor, _ := meta.Accessor(obj)\n\taccessor.SetGeneration(1)\n}\n\n\/\/ PrepareForUpdate clears fields that are not allowed to be set by end users on update.\nfunc (a customResourceStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {\n\tnewCustomResourceObject := obj.(*unstructured.Unstructured)\n\toldCustomResourceObject := old.(*unstructured.Unstructured)\n\n\tnewCustomResource := newCustomResourceObject.UnstructuredContent()\n\toldCustomResource := oldCustomResourceObject.UnstructuredContent()\n\n\t\/\/ If the \/status subresource endpoint is installed, update is not allowed to set status.\n\tif a.status != nil {\n\t\t_, ok1 := newCustomResource[\"status\"]\n\t\t_, ok2 := oldCustomResource[\"status\"]\n\t\tswitch {\n\t\tcase ok2:\n\t\t\tnewCustomResource[\"status\"] = oldCustomResource[\"status\"]\n\t\tcase ok1:\n\t\t\tdelete(newCustomResource, \"status\")\n\t\t}\n\t}\n\n\t\/\/ except for the changes to `metadata`, any other changes\n\t\/\/ cause the generation to increment.\n\tnewCopyContent := copyNonMetadata(newCustomResource)\n\toldCopyContent := copyNonMetadata(oldCustomResource)\n\tif !apiequality.Semantic.DeepEqual(newCopyContent, oldCopyContent) {\n\t\toldAccessor, _ := meta.Accessor(oldCustomResourceObject)\n\t\tnewAccessor, _ := meta.Accessor(newCustomResourceObject)\n\t\tnewAccessor.SetGeneration(oldAccessor.GetGeneration() + 1)\n\t}\n}\n\nfunc copyNonMetadata(original map[string]interface{}) map[string]interface{} {\n\tret := make(map[string]interface{})\n\tfor key, val := range original {\n\t\tif key == \"metadata\" {\n\t\t\tcontinue\n\t\t}\n\t\tret[key] = val\n\t}\n\treturn ret\n}\n\n\/\/ Validate validates a new CustomResource.\nfunc (a customResourceStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {\n\tvar errs field.ErrorList\n\terrs = append(errs, a.validator.Validate(ctx, obj, a.scale)...)\n\n\t\/\/ validate embedded resources\n\tif u, ok := obj.(*unstructured.Unstructured); ok {\n\t\tv := obj.GetObjectKind().GroupVersionKind().Version\n\t\terrs = append(errs, schemaobjectmeta.Validate(nil, u.Object, a.structuralSchemas[v], false)...)\n\n\t\t\/\/ validate x-kubernetes-list-type \"map\" and \"set\" invariant\n\t\terrs = append(errs, structurallisttype.ValidateListSetsAndMaps(nil, a.structuralSchemas[v], u.Object)...)\n\t}\n\n\treturn errs\n}\n\n\/\/ Canonicalize normalizes the object after validation.\nfunc (customResourceStrategy) Canonicalize(obj runtime.Object) {\n}\n\n\/\/ AllowCreateOnUpdate is false for CustomResources; this means a POST is\n\/\/ needed to create one.\nfunc (customResourceStrategy) AllowCreateOnUpdate() bool {\n\treturn false\n}\n\n\/\/ AllowUnconditionalUpdate is the default update policy for CustomResource objects.\nfunc (customResourceStrategy) AllowUnconditionalUpdate() bool {\n\treturn false\n}\n\n\/\/ ValidateUpdate is the default update validation for an end user updating status.\nfunc (a customResourceStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {\n\tvar errs field.ErrorList\n\terrs = append(errs, a.validator.ValidateUpdate(ctx, obj, old, a.scale)...)\n\n\tuNew, ok := obj.(*unstructured.Unstructured)\n\tif !ok {\n\t\treturn errs\n\t}\n\tuOld, ok := old.(*unstructured.Unstructured)\n\tif !ok {\n\t\treturn errs\n\t}\n\n\t\/\/ Checks the embedded objects. We don't make a difference between update and create for those.\n\tv := obj.GetObjectKind().GroupVersionKind().Version\n\terrs = append(errs, schemaobjectmeta.Validate(nil, uNew.Object, a.structuralSchemas[v], false)...)\n\n\t\/\/ ratcheting validation of x-kubernetes-list-type value map and set\n\tif oldErrs := structurallisttype.ValidateListSetsAndMaps(nil, a.structuralSchemas[v], uOld.Object); len(oldErrs) == 0 {\n\t\terrs = append(errs, structurallisttype.ValidateListSetsAndMaps(nil, a.structuralSchemas[v], uNew.Object)...)\n\t}\n\n\treturn errs\n}\n\n\/\/ GetAttrs returns labels and fields of a given object for filtering purposes.\nfunc (a customResourceStrategy) GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn labels.Set(accessor.GetLabels()), objectMetaFieldsSet(accessor, a.namespaceScoped), nil\n}\n\n\/\/ objectMetaFieldsSet returns a fields that represent the ObjectMeta.\nfunc objectMetaFieldsSet(objectMeta metav1.Object, namespaceScoped bool) fields.Set {\n\tif namespaceScoped {\n\t\treturn fields.Set{\n\t\t\t\"metadata.name\": objectMeta.GetName(),\n\t\t\t\"metadata.namespace\": objectMeta.GetNamespace(),\n\t\t}\n\t}\n\treturn fields.Set{\n\t\t\"metadata.name\": objectMeta.GetName(),\n\t}\n}\n\n\/\/ MatchCustomResourceDefinitionStorage is the filter used by the generic etcd backend to route\n\/\/ watch events from etcd to clients of the apiserver only interested in specific\n\/\/ labels\/fields.\nfunc (a customResourceStrategy) MatchCustomResourceDefinitionStorage(label labels.Selector, field fields.Selector) apiserverstorage.SelectionPredicate {\n\treturn apiserverstorage.SelectionPredicate{\n\t\tLabel: label,\n\t\tField: field,\n\t\tGetAttrs: a.GetAttrs,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 The Kubernetes Authors All rights reserved.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage system\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strings\"\n)\n\nvar cmdlineFilePath = \"\/proc\/cmdline\"\n\ntype CmdlineArg struct {\n\tKey string `json:\"key\"`\n\tValue string `json:\"value\"`\n}\n\nfunc (d CmdlineArg) String() string {\n\ts, _ := json.Marshal(d)\n\treturn string(s)\n}\n\n\/\/ As per the documentation kernel command line parameters can also be specified\n\/\/ within double quotes and are separated with spaces.\n\/\/ https:\/\/www.kernel.org\/doc\/html\/v4.14\/admin-guide\/kernel-parameters.html#the-kernel-s-command-line-parameters\nvar withinQuotes = false\n\nfunc splitAfterSpace(inputChar rune) bool {\n\t\/\/ words with space within double quotes cannot be split.\n\t\/\/ so we track the start quote and when we find the\n\t\/\/ end quote, then we reiterate.\n\tif inputChar == '\"' {\n\t\twithinQuotes = !withinQuotes\n\t\treturn false\n\t}\n\t\/\/ignore spaces when it is within quotes.\n\tif withinQuotes {\n\t\treturn false\n\t}\n\treturn inputChar == ' '\n}\n\n\/\/ CmdlineArgs returns all the kernel cmdline. It is read from cat \/proc\/cmdline.\nfunc CmdlineArgs() ([]CmdlineArg, error) {\n\tlines, err := ReadFileIntoLines(cmdlineFilePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading the file %v, %v\", cmdlineFilePath, err)\n\t}\n\tif len(lines) < 1 {\n\t\treturn nil, fmt.Errorf(\"no lines are retured\")\n\t}\n\tcmdlineArgs := strings.FieldsFunc(lines[0], splitAfterSpace)\n\tvar result = make([]CmdlineArg, 0, len(cmdlineArgs))\n\t\/\/ for commandline only one line is returned.\n\tfor _, words := range cmdlineArgs {\n\t\t\/\/ Ignore the keys that start with double quotes\n\t\tif strings.Index(words, \"\\\"\") == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tsplitsWords := strings.Split(words, \"=\")\n\t\tif len(splitsWords) < 2 {\n\t\t\tvar stats = CmdlineArg{\n\t\t\t\tKey: splitsWords[0],\n\t\t\t}\n\t\t\tresult = append(result, stats)\n\t\t} else {\n\t\t\t\/\/remove quotes in the values\n\t\t\ttrimmedValue := strings.Trim(splitsWords[1], \"\\\"'\")\n\t\t\tvar stats = CmdlineArg{\n\t\t\t\tKey: splitsWords[0],\n\t\t\t\tValue: trimmedValue,\n\t\t\t}\n\t\t\tresult = append(result, stats)\n\t\t}\n\t}\n\treturn result, nil\n}\n<commit_msg>renaming splitWords to tokens<commit_after>\/*\nCopyright 2020 The Kubernetes Authors All rights reserved.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage system\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strings\"\n)\n\nvar cmdlineFilePath = \"\/proc\/cmdline\"\n\ntype CmdlineArg struct {\n\tKey string `json:\"key\"`\n\tValue string `json:\"value\"`\n}\n\nfunc (d CmdlineArg) String() string {\n\ts, _ := json.Marshal(d)\n\treturn string(s)\n}\n\n\/\/ As per the documentation kernel command line parameters can also be specified\n\/\/ within double quotes and are separated with spaces.\n\/\/ https:\/\/www.kernel.org\/doc\/html\/v4.14\/admin-guide\/kernel-parameters.html#the-kernel-s-command-line-parameters\nvar withinQuotes = false\n\nfunc splitAfterSpace(inputChar rune) bool {\n\t\/\/ words with space within double quotes cannot be split.\n\t\/\/ so we track the start quote and when we find the\n\t\/\/ end quote, then we reiterate.\n\tif inputChar == '\"' {\n\t\twithinQuotes = !withinQuotes\n\t\treturn false\n\t}\n\t\/\/ignore spaces when it is within quotes.\n\tif withinQuotes {\n\t\treturn false\n\t}\n\treturn inputChar == ' '\n}\n\n\/\/ CmdlineArgs returns all the kernel cmdline. It is read from cat \/proc\/cmdline.\nfunc CmdlineArgs() ([]CmdlineArg, error) {\n\tlines, err := ReadFileIntoLines(cmdlineFilePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading the file %s, %v\", cmdlineFilePath, err)\n\t}\n\tif len(lines) < 1 {\n\t\treturn nil, fmt.Errorf(\"no lines are retured\")\n\t}\n\tcmdlineArgs := strings.FieldsFunc(lines[0], splitAfterSpace)\n\tvar result = make([]CmdlineArg, 0, len(cmdlineArgs))\n\t\/\/ for commandline only one line is returned.\n\tfor _, words := range cmdlineArgs {\n\t\t\/\/ Ignore the keys that start with double quotes\n\t\tif strings.Index(words, \"\\\"\") == 0 {\n\t\t\tcontinue\n\t\t}\n\t\ttokens := strings.Split(words, \"=\")\n\t\tif len(tokens) < 2 {\n\t\t\tvar stats = CmdlineArg{\n\t\t\t\tKey: tokens[0],\n\t\t\t}\n\t\t\tresult = append(result, stats)\n\t\t} else {\n\t\t\t\/\/remove quotes in the values\n\t\t\ttrimmedValue := strings.Trim(tokens[1], \"\\\"'\")\n\t\t\tvar stats = CmdlineArg{\n\t\t\t\tKey: tokens[0],\n\t\t\t\tValue: trimmedValue,\n\t\t\t}\n\t\t\tresult = append(result, stats)\n\t\t}\n\t}\n\treturn result, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage saltpack\n\nimport (\n\t\"strings\"\n\t\"regexp\"\n)\n\ntype headerOrFooterMarker string\n\nconst (\n\theaderMarker headerOrFooterMarker = \"BEGIN\"\n\tfooterMarker headerOrFooterMarker = \"END\"\n)\n\nfunc pop(v *([]string), n int) (ret []string) {\n\tret = (*v)[(len(*v) - n):]\n\t*v = (*v)[0:(len(*v) - n)]\n\treturn\n}\n\nfunc shift(v *([]string), n int) (ret []string) {\n\tret = (*v)[0:n]\n\t*v = (*v)[n:]\n\treturn\n}\n\nfunc makeFrame(which headerOrFooterMarker, typ MessageType, brand string) string {\n\tsffx := getStringForType(typ)\n\tif len(sffx) == 0 {\n\t\treturn sffx\n\t}\n\twords := []string{string(which)}\n\tif len(brand) > 0 {\n\t\twords = append(words, brand)\n\t\tbrand += \" \"\n\t}\n\twords = append(words, strings.ToUpper(SaltpackFormatName))\n\twords = append(words, sffx)\n\treturn strings.Join(words, \" \")\n}\n\n\/\/ MakeArmorHeader makes the armor header for the message type for the given \"brand\"\nfunc MakeArmorHeader(typ MessageType, brand string) string {\n\treturn makeFrame(headerMarker, typ, brand)\n}\n\n\/\/ MakeArmorFooter makes the armor footer for the message type for the given \"brand\"\nfunc MakeArmorFooter(typ MessageType, brand string) string {\n\treturn makeFrame(footerMarker, typ, brand)\n}\n\nfunc getStringForType(typ MessageType) string {\n\tswitch typ {\n\tcase MessageTypeEncryption:\n\t\treturn EncryptionArmorString\n\tcase MessageTypeAttachedSignature:\n\t\treturn SignedArmorString\n\tcase MessageTypeDetachedSignature:\n\t\treturn DetachedSignatureArmorString\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\nfunc parseFrame(m string, typ MessageType, hof headerOrFooterMarker) (brand string, err error) {\n\n\t\/\/ strip whitespace\n\tre := regexp.MustCompile(\">|[^\\\\S+]{1,}\")\n\ts := re.ReplaceAllString(m, \" \")\n\n\tsffx := getStringForType(typ)\n\tif len(sffx) == 0 {\n\t\terr = makeErrBadFrame(\"Message type %v not found\", typ)\n\t\treturn\n\t}\n\tv := strings.Split(s, \" \")\n\tif len(v) != 4 && len(v) != 5 {\n\t\terr = makeErrBadFrame(\"wrong number of words (%d)\", len(v))\n\t\treturn\n\t}\n\n\tfront := shift(&v, 1)\n\tif front[0] != string(hof) {\n\t\terr = makeErrBadFrame(\"Bad prefix: %s (wanted %s)\", front[0], string(hof))\n\t\treturn\n\t}\n\n\texpected := getStringForType(typ)\n\ttmp := pop(&v, 2)\n\treceived := strings.Join(tmp, \" \")\n\tif received != expected {\n\t\terr = makeErrBadFrame(\"wanted %q but got %q\", expected, received)\n\t\treturn\n\t}\n\tspfn := pop(&v, 1)\n\tif spfn[0] != strings.ToUpper(SaltpackFormatName) {\n\t\terr = makeErrBadFrame(\"bad format name (%s)\", spfn[0])\n\t\treturn\n\t}\n\tif len(v) > 0 {\n\t\tbrand = v[0]\n\t}\n\treturn brand, err\n}\n<commit_msg>Simplified the regex, including wakas<commit_after>\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage saltpack\n\nimport (\n\t\"strings\"\n\t\"regexp\"\n)\n\ntype headerOrFooterMarker string\n\nconst (\n\theaderMarker headerOrFooterMarker = \"BEGIN\"\n\tfooterMarker headerOrFooterMarker = \"END\"\n)\n\nfunc pop(v *([]string), n int) (ret []string) {\n\tret = (*v)[(len(*v) - n):]\n\t*v = (*v)[0:(len(*v) - n)]\n\treturn\n}\n\nfunc shift(v *([]string), n int) (ret []string) {\n\tret = (*v)[0:n]\n\t*v = (*v)[n:]\n\treturn\n}\n\nfunc makeFrame(which headerOrFooterMarker, typ MessageType, brand string) string {\n\tsffx := getStringForType(typ)\n\tif len(sffx) == 0 {\n\t\treturn sffx\n\t}\n\twords := []string{string(which)}\n\tif len(brand) > 0 {\n\t\twords = append(words, brand)\n\t\tbrand += \" \"\n\t}\n\twords = append(words, strings.ToUpper(SaltpackFormatName))\n\twords = append(words, sffx)\n\treturn strings.Join(words, \" \")\n}\n\n\/\/ MakeArmorHeader makes the armor header for the message type for the given \"brand\"\nfunc MakeArmorHeader(typ MessageType, brand string) string {\n\treturn makeFrame(headerMarker, typ, brand)\n}\n\n\/\/ MakeArmorFooter makes the armor footer for the message type for the given \"brand\"\nfunc MakeArmorFooter(typ MessageType, brand string) string {\n\treturn makeFrame(footerMarker, typ, brand)\n}\n\nfunc getStringForType(typ MessageType) string {\n\tswitch typ {\n\tcase MessageTypeEncryption:\n\t\treturn EncryptionArmorString\n\tcase MessageTypeAttachedSignature:\n\t\treturn SignedArmorString\n\tcase MessageTypeDetachedSignature:\n\t\treturn DetachedSignatureArmorString\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\nfunc parseFrame(m string, typ MessageType, hof headerOrFooterMarker) (brand string, err error) {\n\n\t\/\/ strip whitespace\n\tre := regexp.MustCompile(\"[>\\\\s]+\")\n\ts := re.ReplaceAllString(m, \" \")\n\n\tsffx := getStringForType(typ)\n\tif len(sffx) == 0 {\n\t\terr = makeErrBadFrame(\"Message type %v not found\", typ)\n\t\treturn\n\t}\n\tv := strings.Split(s, \" \")\n\tif len(v) != 4 && len(v) != 5 {\n\t\terr = makeErrBadFrame(\"wrong number of words (%d)\", len(v))\n\t\treturn\n\t}\n\n\tfront := shift(&v, 1)\n\tif front[0] != string(hof) {\n\t\terr = makeErrBadFrame(\"Bad prefix: %s (wanted %s)\", front[0], string(hof))\n\t\treturn\n\t}\n\n\texpected := getStringForType(typ)\n\ttmp := pop(&v, 2)\n\treceived := strings.Join(tmp, \" \")\n\tif received != expected {\n\t\terr = makeErrBadFrame(\"wanted %q but got %q\", expected, received)\n\t\treturn\n\t}\n\tspfn := pop(&v, 1)\n\tif spfn[0] != strings.ToUpper(SaltpackFormatName) {\n\t\terr = makeErrBadFrame(\"bad format name (%s)\", spfn[0])\n\t\treturn\n\t}\n\tif len(v) > 0 {\n\t\tbrand = v[0]\n\t}\n\treturn brand, err\n}\n<|endoftext|>"} {"text":"<commit_before>package mumgo\n\nimport (\n\tproto \"code.google.com\/p\/goprotobuf\/proto\"\n\tmumble \"github.com\/handymic\/MumbleProto-go\"\n\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"reflect\"\n)\n\n\/\/ Message to type mappings to ease building the appropriate\n\/\/ protobuf message on the fly\nvar messageTypes = map[reflect.Type]int{\n\treflect.TypeOf(&mumble.Version{}): 0,\n\treflect.TypeOf(&mumble.UDPTunnel{}): 1,\n\treflect.TypeOf(&mumble.Authenticate{}): 2,\n\treflect.TypeOf(&mumble.Ping{}): 3,\n\treflect.TypeOf(&mumble.Reject{}): 4,\n\treflect.TypeOf(&mumble.ServerSync{}): 5,\n\treflect.TypeOf(&mumble.ChannelRemove{}): 6,\n\treflect.TypeOf(&mumble.ChannelState{}): 7,\n\treflect.TypeOf(&mumble.UserRemove{}): 8,\n\treflect.TypeOf(&mumble.UserState{}): 9,\n\treflect.TypeOf(&mumble.BanList{}): 10,\n\treflect.TypeOf(&mumble.TextMessage{}): 11,\n\treflect.TypeOf(&mumble.PermissionDenied{}): 12,\n\treflect.TypeOf(&mumble.ACL{}): 13,\n\treflect.TypeOf(&mumble.QueryUsers{}): 14,\n\treflect.TypeOf(&mumble.CryptSetup{}): 15,\n\treflect.TypeOf(&mumble.ContextActionModify{}): 16,\n\treflect.TypeOf(&mumble.ContextAction{}): 17,\n\treflect.TypeOf(&mumble.UserList{}): 18,\n\treflect.TypeOf(&mumble.VoiceTarget{}): 19,\n\treflect.TypeOf(&mumble.PermissionQuery{}): 20,\n\treflect.TypeOf(&mumble.CodecVersion{}): 21,\n\treflect.TypeOf(&mumble.UserStats{}): 22,\n\treflect.TypeOf(&mumble.RequestBlob{}): 23,\n\treflect.TypeOf(&mumble.ServerConfig{}): 24,\n\treflect.TypeOf(&mumble.SuggestConfig{}): 25}\n\n\/\/\ntype Conn struct {\n\tconn *tls.Conn\n\tinitialized bool\n}\n\n\/\/ Creates a new initialized connection\nfunc NewConn(config Config) (Conn, error) {\n\tcert, err := config.LoadCert()\n\n\tif err != nil {\n\t\treturn Conn{}, err\n\t}\n\n\taddr := fmt.Sprint(config.host, \":\", config.port)\n\ttlsConf := &tls.Config{\n\t\tCertificates: []tls.Certificate{cert},\n\t\tInsecureSkipVerify: true} \/\/ TODO: fix hardcoding\n\n\t\/\/ Inits tls connection\n\ttlsConn, err := tls.Dial(\"tcp\", addr, tlsConf)\n\tif err != nil {\n\t\treturn Conn{}, err\n\t}\n\n\t\/\/ Inits client connection\n\tconn := Conn{conn: tlsConn}\n\tif err := conn.init(config); err != nil {\n\t\treturn Conn{}, err\n\t}\n\n\treturn conn, nil\n}\n\n\/\/ Close the connection\nfunc (c *Conn) Close() error {\n\tif c.initialized {\n\t\tif err := c.conn.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc.initialized = false\n\t}\n\n\treturn nil\n}\n\n\/\/ Write protobuf message to connection, taking care of header construction\nfunc (c *Conn) Write(message proto.Message) (int, error) {\n\tpayload, err := proto.Marshal(message)\n\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tvar buffer bytes.Buffer\n\tvar chunk []byte\n\n\t\/\/ Prepare *type* prefix\n\tmtype := messageTypes[reflect.TypeOf(message)]\n\tchunk = make([]byte, 2)\n\tbinary.PutVarint(chunk, int64(mtype))\n\n\tif _, err = buffer.Write(chunk); err != nil {\n\t\treturn -1, err\n\t}\n\n\t\/\/ Prepare *size* prefix\n\tchunk = make([]byte, 4)\n\tbinary.PutVarint(chunk, int64(len(payload)))\n\n\tif _, err = buffer.Write(chunk); err != nil {\n\t\treturn -1, err\n\t}\n\n\t\/\/ Prepare *payload* body\n\tif _, err = buffer.Write(payload); err != nil {\n\t\treturn -1, err\n\t}\n\n\treturn c.conn.Write(buffer.Bytes())\n}\n\n\/\/ Initializes the connection\nfunc (c *Conn) init(config Config) error {\n\tif err := c.exchangeVersion(); err != nil {\n\t\treturn err\n\t}\n\tif err := c.authenticate(config); err != nil {\n\t\treturn err\n\t}\n\n\tc.initialized = true\n\treturn nil\n}\n\n\/\/ Exchanges version info\nfunc (c *Conn) exchangeVersion() error {\n\tmajor, minor, patch := 1, 2, 5\n\n\tversion := uint32((major << 16) | (minor << 8) | (patch & 0xFF))\n\n\t\/\/ TODO: fix hardcoding !!!\n\trelease := \"mumgo 0.0.1\"\n\tos, osVersion := \"linux\", \"#1 SMP PREEMPT Tue May 13 16:41:39 CEST 2014\"\n\n\t_, err := c.Write(&mumble.Version{\n\t\tVersion: &version,\n\t\tRelease: &release,\n\t\tOs: &os,\n\t\tOsVersion: &osVersion})\n\n\treturn err\n}\n\n\/\/ Performs authentication\nfunc (c *Conn) authenticate(config Config) error {\n\topus := true\n\n\t_, err := c.Write(&mumble.Authenticate{\n\t\tUsername: &config.username,\n\t\tPassword: &config.password,\n\t\tOpus: &opus})\n\n\treturn err\n}\n<commit_msg>More appropriate indicator of connection state.<commit_after>package mumgo\n\nimport (\n\tproto \"code.google.com\/p\/goprotobuf\/proto\"\n\tmumble \"github.com\/handymic\/MumbleProto-go\"\n\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"reflect\"\n)\n\n\/\/ Message to type mappings to ease building the appropriate\n\/\/ protobuf message on the fly\nvar messageTypes = map[reflect.Type]int{\n\treflect.TypeOf(&mumble.Version{}): 0,\n\treflect.TypeOf(&mumble.UDPTunnel{}): 1,\n\treflect.TypeOf(&mumble.Authenticate{}): 2,\n\treflect.TypeOf(&mumble.Ping{}): 3,\n\treflect.TypeOf(&mumble.Reject{}): 4,\n\treflect.TypeOf(&mumble.ServerSync{}): 5,\n\treflect.TypeOf(&mumble.ChannelRemove{}): 6,\n\treflect.TypeOf(&mumble.ChannelState{}): 7,\n\treflect.TypeOf(&mumble.UserRemove{}): 8,\n\treflect.TypeOf(&mumble.UserState{}): 9,\n\treflect.TypeOf(&mumble.BanList{}): 10,\n\treflect.TypeOf(&mumble.TextMessage{}): 11,\n\treflect.TypeOf(&mumble.PermissionDenied{}): 12,\n\treflect.TypeOf(&mumble.ACL{}): 13,\n\treflect.TypeOf(&mumble.QueryUsers{}): 14,\n\treflect.TypeOf(&mumble.CryptSetup{}): 15,\n\treflect.TypeOf(&mumble.ContextActionModify{}): 16,\n\treflect.TypeOf(&mumble.ContextAction{}): 17,\n\treflect.TypeOf(&mumble.UserList{}): 18,\n\treflect.TypeOf(&mumble.VoiceTarget{}): 19,\n\treflect.TypeOf(&mumble.PermissionQuery{}): 20,\n\treflect.TypeOf(&mumble.CodecVersion{}): 21,\n\treflect.TypeOf(&mumble.UserStats{}): 22,\n\treflect.TypeOf(&mumble.RequestBlob{}): 23,\n\treflect.TypeOf(&mumble.ServerConfig{}): 24,\n\treflect.TypeOf(&mumble.SuggestConfig{}): 25}\n\n\/\/\ntype Conn struct {\n\tconn *tls.Conn\n\tconnected bool\n}\n\n\/\/ Creates a new connected connection\nfunc NewConn(config Config) (Conn, error) {\n\tcert, err := config.LoadCert()\n\n\tif err != nil {\n\t\treturn Conn{}, err\n\t}\n\n\taddr := fmt.Sprint(config.host, \":\", config.port)\n\ttlsConf := &tls.Config{\n\t\tCertificates: []tls.Certificate{cert},\n\t\tInsecureSkipVerify: true} \/\/ TODO: fix hardcoding\n\n\t\/\/ Inits tls connection\n\ttlsConn, err := tls.Dial(\"tcp\", addr, tlsConf)\n\tif err != nil {\n\t\treturn Conn{}, err\n\t}\n\n\t\/\/ Inits client connection\n\tconn := Conn{conn: tlsConn}\n\tif err := conn.init(config); err != nil {\n\t\treturn Conn{}, err\n\t}\n\n\treturn conn, nil\n}\n\n\/\/ Close the connection\nfunc (c *Conn) Close() error {\n\tif c.connected {\n\t\tif err := c.conn.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc.connected = false\n\t}\n\n\treturn nil\n}\n\n\/\/ Write protobuf message to connection, taking care of header construction\nfunc (c *Conn) Write(message proto.Message) (int, error) {\n\tpayload, err := proto.Marshal(message)\n\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tvar buffer bytes.Buffer\n\tvar chunk []byte\n\n\t\/\/ Prepare *type* prefix\n\tmtype := messageTypes[reflect.TypeOf(message)]\n\tchunk = make([]byte, 2)\n\tbinary.PutVarint(chunk, int64(mtype))\n\n\tif _, err = buffer.Write(chunk); err != nil {\n\t\treturn -1, err\n\t}\n\n\t\/\/ Prepare *size* prefix\n\tchunk = make([]byte, 4)\n\tbinary.PutVarint(chunk, int64(len(payload)))\n\n\tif _, err = buffer.Write(chunk); err != nil {\n\t\treturn -1, err\n\t}\n\n\t\/\/ Prepare *payload* body\n\tif _, err = buffer.Write(payload); err != nil {\n\t\treturn -1, err\n\t}\n\n\treturn c.conn.Write(buffer.Bytes())\n}\n\n\/\/ Initializes the connection\nfunc (c *Conn) init(config Config) error {\n\tif err := c.exchangeVersion(); err != nil {\n\t\treturn err\n\t}\n\tif err := c.authenticate(config); err != nil {\n\t\treturn err\n\t}\n\n\tc.connected = true\n\treturn nil\n}\n\n\/\/ Exchanges version info\nfunc (c *Conn) exchangeVersion() error {\n\tmajor, minor, patch := 1, 2, 5\n\n\tversion := uint32((major << 16) | (minor << 8) | (patch & 0xFF))\n\n\t\/\/ TODO: fix hardcoding !!!\n\trelease := \"mumgo 0.0.1\"\n\tos, osVersion := \"linux\", \"#1 SMP PREEMPT Tue May 13 16:41:39 CEST 2014\"\n\n\t_, err := c.Write(&mumble.Version{\n\t\tVersion: &version,\n\t\tRelease: &release,\n\t\tOs: &os,\n\t\tOsVersion: &osVersion})\n\n\treturn err\n}\n\n\/\/ Performs authentication\nfunc (c *Conn) authenticate(config Config) error {\n\topus := true\n\n\t_, err := c.Write(&mumble.Authenticate{\n\t\tUsername: &config.username,\n\t\tPassword: &config.password,\n\t\tOpus: &opus})\n\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) Liam Stanley <me@liamstanley.io>. All rights reserved. Use\n\/\/ of this source code is governed by the MIT license that can be found in\n\/\/ the LICENSE file.\n\npackage girc\n\nimport (\n\t\"bufio\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/url\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/net\/proxy\"\n)\n\n\/\/ Messages are delimited with CR and LF line endings, we're using the last\n\/\/ one to split the stream. Both are removed during parsing of the message.\nconst delim byte = '\\n'\n\nvar endline = []byte(\"\\r\\n\")\n\n\/\/ ircConn represents an IRC network protocol connection, it consists of an\n\/\/ Encoder and Decoder to manage i\/o.\ntype ircConn struct {\n\tircEncoder\n\tircDecoder\n\tlconn net.Conn\n\n\t\/\/ lastWrite is used ot keep track of when we last wrote to the server.\n\tlastWrite time.Time\n\t\/\/ writeDelay is used to keep track of rate limiting of events sent to\n\t\/\/ the server.\n\twriteDelay time.Duration\n\n\t\/\/ connected is true if we're actively connected to a server.\n\tconnected bool\n\t\/\/ connTime is the time at which the client has connected to a server.\n\tconnTime *time.Time\n\n\t\/\/ lastPing is the last time that we pinged the server.\n\tlastPing time.Time\n\t\/\/ lastPong is the last successful time that we pinged the server and\n\t\/\/ received a successful pong back.\n\tlastPong time.Time\n\tpingDelay time.Duration\n}\n\n\/\/ newConn sets up and returns a new connection to the server. This includes\n\/\/ setting up things like proxies, ssl\/tls, and other misc. things.\nfunc newConn(conf Config, addr string) (*ircConn, error) {\n\t\/\/ Sanity check a few options.\n\tif conf.Server == \"\" {\n\t\treturn nil, errors.New(\"invalid server specified\")\n\t}\n\n\tif conf.Port < 21 || conf.Port > 65535 {\n\t\treturn nil, errors.New(\"invalid port (21-65535)\")\n\t}\n\n\tif !IsValidNick(conf.Nick) || !IsValidUser(conf.User) {\n\t\treturn nil, errors.New(\"invalid nickname or user\")\n\t}\n\n\tvar conn net.Conn\n\tvar err error\n\n\tdialer := &net.Dialer{Timeout: 5 * time.Second}\n\n\tif conf.Bind != \"\" {\n\t\tvar local *net.TCPAddr\n\t\tlocal, err = net.ResolveTCPAddr(\"tcp\", conf.Bind+\":0\")\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to resolve bind address %s: %s\", conf.Bind, err)\n\t\t}\n\n\t\tdialer.LocalAddr = local\n\t}\n\n\tif conf.Proxy != \"\" {\n\t\tvar proxyURI *url.URL\n\t\tvar proxyDialer proxy.Dialer\n\n\t\tproxyURI, err = url.Parse(conf.Proxy)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to use proxy %q: %s\", conf.Proxy, err)\n\t\t}\n\n\t\tproxyDialer, err = proxy.FromURL(proxyURI, dialer)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to use proxy %q: %s\", conf.Proxy, err)\n\t\t}\n\n\t\tconn, err = proxyDialer.Dial(\"tcp\", addr)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to connect to proxy %q: %s\", conf.Proxy, err)\n\t\t}\n\t} else {\n\t\tconn, err = dialer.Dial(\"tcp\", addr)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to connect to %q: %s\", addr, err)\n\t\t}\n\t}\n\n\tif conf.SSL {\n\t\tvar sslConf *tls.Config\n\n\t\tif conf.TLSConfig == nil {\n\t\t\tsslConf = &tls.Config{ServerName: conf.Server}\n\t\t} else {\n\t\t\tsslConf = conf.TLSConfig\n\t\t}\n\n\t\ttlsConn := tls.Client(conn, sslConf)\n\t\tif err = tlsConn.Handshake(); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed handshake during tls conn to %q: %s\", addr, err)\n\t\t}\n\t\tconn = tlsConn\n\t}\n\n\tctime := time.Now()\n\n\treturn &ircConn{\n\t\tircEncoder: ircEncoder{writer: conn},\n\t\tircDecoder: ircDecoder{reader: bufio.NewReader(conn)},\n\t\tlconn: conn,\n\t\tconnTime: &ctime,\n\t\tconnected: true,\n\t}, nil\n}\n\n\/\/ Close closes the underlying ReadWriteCloser.\nfunc (c *ircConn) Close() error {\n\treturn c.lconn.Close()\n}\n\n\/\/ setTimeout applies a deadline that the connection must respond back with,\n\/\/ within the specified time.\nfunc (c *ircConn) setTimeout(timeout time.Duration) {\n\tc.lconn.SetDeadline(time.Now().Add(timeout))\n}\n\n\/\/ rate allows limiting events based on how frequent the event is being sent,\n\/\/ as well as how many characters each event has.\nfunc (c *ircConn) rate(chars int) time.Duration {\n\t_time := time.Second + ((time.Duration(chars) * time.Second) \/ 100)\n\tif c.writeDelay += _time - time.Now().Sub(c.lastWrite); c.writeDelay < 0 {\n\t\tc.writeDelay = 0\n\t}\n\n\tif c.writeDelay > (8 * time.Second) {\n\t\treturn _time\n\t}\n\n\treturn 0\n}\n\n\/\/ ircDecoder reads Event objects from an input stream.\ntype ircDecoder struct {\n\treader *bufio.Reader\n\tline string\n\tmu sync.Mutex\n}\n\n\/\/ Decode attempts to read a single Event from the stream, returns non-nil\n\/\/ error if read failed. event may be nil if unparseable.\nfunc (dec *ircDecoder) Decode() (event *Event, err error) {\n\tdec.mu.Lock()\n\tdec.line, err = dec.reader.ReadString(delim)\n\tdec.mu.Unlock()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ParseEvent(dec.line), nil\n}\n\n\/\/ ircEncoder writes Event objects to an output stream.\ntype ircEncoder struct {\n\twriter io.Writer\n\tmu sync.Mutex\n}\n\n\/\/ Encode writes the IRC encoding of m to the stream. Goroutine safe.\n\/\/ returns non-nil error if the write to the underlying stream stopped early.\nfunc (enc *ircEncoder) Encode(e *Event) (err error) {\n\t_, err = enc.Write(e.Bytes())\n\n\treturn\n}\n\n\/\/ Write writes len(p) bytes from p followed by CR+LF. Goroutine safe.\nfunc (enc *ircEncoder) Write(p []byte) (n int, err error) {\n\tenc.mu.Lock()\n\tdefer enc.mu.Unlock()\n\n\tn, err = enc.writer.Write(p)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t_, err = enc.writer.Write(endline)\n\n\treturn\n}\n\n\/\/ Connect attempts to connect to the given IRC server\nfunc (c *Client) Connect() error {\n\t\/\/ Clean up any old running stuff.\n\tc.cleanup(false)\n\n\t\/\/ We want to be the only one handling connects\/disconnects right now.\n\tc.cmux.Lock()\n\n\t\/\/ Reset the state.\n\tc.state = newState()\n\n\t\/\/ Validate info, and actually make the connection.\n\tc.debug.Printf(\"connecting to %s...\", c.Server())\n\tconn, err := newConn(c.Config, c.Server())\n\tif err != nil {\n\t\tc.cmux.Unlock()\n\t\treturn err\n\t}\n\n\tc.conn = conn\n\tc.cmux.Unlock()\n\n\t\/\/ Start read loop to process messages from the server.\n\tvar rctx, ectx, sctx, pctx context.Context\n\trctx, c.closeRead = context.WithCancel(context.Background())\n\tectx, c.closeExec = context.WithCancel(context.Background())\n\tsctx, c.closeSend = context.WithCancel(context.Background())\n\tpctx, c.closePing = context.WithCancel(context.Background())\n\tgo c.readLoop(rctx)\n\tgo c.execLoop(ectx)\n\tgo c.sendLoop(sctx)\n\tgo c.pingLoop(pctx)\n\n\t\/\/ Send a virtual event allowing hooks for successful socket connection.\n\tc.RunHandlers(&Event{Command: INITIALIZED, Trailing: c.Server()})\n\n\t\/\/ Passwords first.\n\tif c.Config.Password != \"\" {\n\t\tc.write(&Event{Command: PASS, Params: []string{c.Config.Password}})\n\t}\n\n\t\/\/ Then nickname.\n\tc.write(&Event{Command: NICK, Params: []string{c.Config.Nick}})\n\n\t\/\/ Then username and realname.\n\tif c.Config.Name == \"\" {\n\t\tc.Config.Name = c.Config.User\n\t}\n\n\tc.write(&Event{Command: USER, Params: []string{c.Config.User, \"+iw\", \"*\"}, Trailing: c.Config.Name})\n\n\t\/\/ List the IRCv3 capabilities, specifically with the max protocol we\n\t\/\/ support.\n\tc.listCAP()\n\n\t\/\/ Consider the connection a success at this point.\n\tc.tries = 0\n\tc.reconnecting = false\n\n\treturn nil\n}\n\n\/\/ reconnect is the internal wrapper for reconnecting to the IRC server (if\n\/\/ requested.)\nfunc (c *Client) reconnect(remoteInvoked bool) (err error) {\n\tif c.reconnecting {\n\t\treturn nil\n\t}\n\tc.reconnecting = true\n\tdefer func() {\n\t\tc.reconnecting = false\n\t}()\n\n\tc.cleanup(false)\n\n\tif c.Config.ReconnectDelay < (5 * time.Second) {\n\t\tc.Config.ReconnectDelay = 5 * time.Second\n\t}\n\n\tif c.Config.Retries < 1 && !remoteInvoked {\n\t\treturn ErrDisconnected\n\t}\n\n\tif !remoteInvoked {\n\t\t\/\/ Delay so we're not slaughtering the server with a bunch of\n\t\t\/\/ connections.\n\t\tc.debug.Printf(\"reconnecting to %s in %s\", c.Server(), c.Config.ReconnectDelay)\n\t\ttime.Sleep(c.Config.ReconnectDelay)\n\t}\n\n\tfor err = c.Connect(); err != nil && c.tries < c.Config.Retries; c.tries++ {\n\t\tc.debug.Printf(\"reconnecting to %s in %s (%d tries)\", c.Server(), c.Config.ReconnectDelay, c.tries)\n\t\ttime.Sleep(c.Config.ReconnectDelay)\n\t}\n\n\tif err != nil {\n\t\t\/\/ Too many errors at this point.\n\t\tc.cleanup(false)\n\t}\n\n\treturn err\n}\n\n\/\/ Reconnect checks to make sure we want to, and then attempts to reconnect\n\/\/ to the server. This will ignore the reconnect delay.\nfunc (c *Client) Reconnect() error {\n\treturn c.reconnect(true)\n}\n\nfunc (c *Client) disconnectHandler(err error) {\n\tif err != nil {\n\t\tc.debug.Println(\"disconnecting due to error: \" + err.Error())\n\t}\n\n\trerr := c.reconnect(false)\n\tif rerr != nil {\n\t\tc.debug.Println(\"error: \" + rerr.Error())\n\t\tif c.Config.HandleError != nil {\n\t\t\tif c.Config.Retries < 1 {\n\t\t\t\tc.Config.HandleError(err)\n\t\t\t}\n\n\t\t\tc.Config.HandleError(rerr)\n\t\t}\n\t}\n}\n\n\/\/ readLoop sets a timeout of 300 seconds, and then attempts to read from the\n\/\/ IRC server. If there is an error, it calls Reconnect.\nfunc (c *Client) readLoop(ctx context.Context) {\n\tvar event *Event\n\tvar err error\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tc.conn.setTimeout(300 * time.Second)\n\t\t\tevent, err = c.conn.Decode()\n\t\t\tif err != nil {\n\t\t\t\t\/\/ Attempt a reconnect (if applicable). If it fails, send\n\t\t\t\t\/\/ the error to c.Config.HandleError to be dealt with, if\n\t\t\t\t\/\/ the handler exists.\n\t\t\t\tc.disconnectHandler(err)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif event == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tc.rx <- event\n\t\t}\n\t}\n}\n\n\/\/ Send sends an event to the server. Use Client.RunHandlers() if you are\n\/\/ simply looking to trigger handlers with an event.\nfunc (c *Client) Send(event *Event) {\n\tif !c.Config.AllowFlood {\n\t\t<-time.After(c.conn.rate(event.Len()))\n\t}\n\n\tc.write(event)\n}\n\n\/\/ write is the lower level function to write an event. It does not have a\n\/\/ write-delay when sending events.\nfunc (c *Client) write(event *Event) {\n\tc.tx <- event\n}\n\nfunc (c *Client) sendLoop(ctx context.Context) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase event := <-c.tx:\n\t\t\t\/\/ Log the event.\n\t\t\tif !event.Sensitive {\n\t\t\t\tc.debug.Print(\"> \", StripRaw(event.String()))\n\t\t\t}\n\t\t\tif c.Config.Out != nil {\n\t\t\t\tif pretty, ok := event.Pretty(); ok {\n\t\t\t\t\tfmt.Fprintln(c.Config.Out, StripRaw(pretty))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tc.conn.lastWrite = time.Now()\n\n\t\t\terr := c.conn.Encode(event)\n\t\t\tif err != nil {\n\t\t\t\tc.disconnectHandler(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ flushTx empties c.tx.\nfunc (c *Client) flushTx() {\n\tfor {\n\t\tselect {\n\t\tcase <-c.tx:\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ ErrTimedOut is returned when we attempt to ping the server, and time out\n\/\/ before receiving a PONG back.\nvar ErrTimedOut = errors.New(\"timed out during ping to server\")\n\nfunc (c *Client) pingLoop(ctx context.Context) {\n\tc.conn.lastPing = time.Now()\n\tc.conn.lastPong = time.Now()\n\n\t\/\/ Delay for 30 seconds during connect to wait for the client to register\n\t\/\/ and what not.\n\ttime.Sleep(20 * time.Second)\n\n\ttick := time.NewTicker(c.Config.PingDelay)\n\tdefer tick.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-tick.C:\n\t\t\tif time.Since(c.conn.lastPong) > c.Config.PingDelay+(60*time.Second) {\n\t\t\t\t\/\/ It's 60 seconds over what out ping delay is, connection\n\t\t\t\t\/\/ has probably dropped.\n\t\t\t\tc.disconnectHandler(ErrTimedOut)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tc.conn.lastPing = time.Now()\n\t\t\tc.Commands.Ping(fmt.Sprintf(\"%d\", time.Now().UnixNano()))\n\t\t}\n\t}\n}\n<commit_msg>refractor conn.go<commit_after>\/\/ Copyright (c) Liam Stanley <me@liamstanley.io>. All rights reserved. Use\n\/\/ of this source code is governed by the MIT license that can be found in\n\/\/ the LICENSE file.\n\npackage girc\n\nimport (\n\t\"bufio\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/net\/proxy\"\n)\n\n\/\/ Messages are delimited with CR and LF line endings, we're using the last\n\/\/ one to split the stream. Both are removed during parsing of the message.\nconst delim byte = '\\n'\n\nvar endline = []byte(\"\\r\\n\")\n\n\/\/ ircConn represents an IRC network protocol connection, it consists of an\n\/\/ Encoder and Decoder to manage i\/o.\ntype ircConn struct {\n\tio *bufio.ReadWriter\n\tsock net.Conn\n\n\t\/\/ lastWrite is used ot keep track of when we last wrote to the server.\n\tlastWrite time.Time\n\t\/\/ writeDelay is used to keep track of rate limiting of events sent to\n\t\/\/ the server.\n\twriteDelay time.Duration\n\n\t\/\/ connected is true if we're actively connected to a server.\n\tconnected bool\n\t\/\/ connTime is the time at which the client has connected to a server.\n\tconnTime *time.Time\n\n\t\/\/ lastPing is the last time that we pinged the server.\n\tlastPing time.Time\n\t\/\/ lastPong is the last successful time that we pinged the server and\n\t\/\/ received a successful pong back.\n\tlastPong time.Time\n\tpingDelay time.Duration\n}\n\n\/\/ newConn sets up and returns a new connection to the server. This includes\n\/\/ setting up things like proxies, ssl\/tls, and other misc. things.\nfunc newConn(conf Config, addr string) (*ircConn, error) {\n\t\/\/ Sanity check a few options.\n\tif conf.Server == \"\" {\n\t\treturn nil, errors.New(\"invalid server specified\")\n\t}\n\n\tif conf.Port < 21 || conf.Port > 65535 {\n\t\treturn nil, errors.New(\"invalid port (21-65535)\")\n\t}\n\n\tif !IsValidNick(conf.Nick) || !IsValidUser(conf.User) {\n\t\treturn nil, errors.New(\"invalid nickname or user\")\n\t}\n\n\tvar conn net.Conn\n\tvar err error\n\n\tdialer := &net.Dialer{Timeout: 5 * time.Second}\n\n\tif conf.Bind != \"\" {\n\t\tvar local *net.TCPAddr\n\t\tlocal, err = net.ResolveTCPAddr(\"tcp\", conf.Bind+\":0\")\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to resolve bind address %s: %s\", conf.Bind, err)\n\t\t}\n\n\t\tdialer.LocalAddr = local\n\t}\n\n\tif conf.Proxy != \"\" {\n\t\tvar proxyURI *url.URL\n\t\tvar proxyDialer proxy.Dialer\n\n\t\tproxyURI, err = url.Parse(conf.Proxy)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to use proxy %q: %s\", conf.Proxy, err)\n\t\t}\n\n\t\tproxyDialer, err = proxy.FromURL(proxyURI, dialer)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to use proxy %q: %s\", conf.Proxy, err)\n\t\t}\n\n\t\tconn, err = proxyDialer.Dial(\"tcp\", addr)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to connect to proxy %q: %s\", conf.Proxy, err)\n\t\t}\n\t} else {\n\t\tconn, err = dialer.Dial(\"tcp\", addr)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to connect to %q: %s\", addr, err)\n\t\t}\n\t}\n\n\tif conf.SSL {\n\t\tvar tlsConn net.Conn\n\t\ttlsConn, err = tlsHandshake(conn, conf.TLSConfig, conf.Server, true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tconn = tlsConn\n\t}\n\n\tctime := time.Now()\n\n\tc := &ircConn{\n\t\tsock: conn,\n\t\tconnTime: &ctime,\n\t\tconnected: true,\n\t}\n\tc.newReadWriter()\n\n\treturn c, nil\n}\n\nfunc (c *ircConn) newReadWriter() {\n\tc.io = bufio.NewReadWriter(bufio.NewReader(c.sock), bufio.NewWriter(c.sock))\n}\n\nfunc tlsHandshake(conn net.Conn, conf *tls.Config, server string, validate bool) (net.Conn, error) {\n\tif conf == nil {\n\t\tconf = &tls.Config{ServerName: server, InsecureSkipVerify: !validate}\n\t}\n\n\ttlsConn := tls.Client(conn, conf)\n\treturn net.Conn(tlsConn), nil\n}\n\n\/\/ Close closes the underlying socket.\nfunc (c *ircConn) Close() error {\n\treturn c.sock.Close()\n}\n\n\/\/ Connect attempts to connect to the given IRC server\nfunc (c *Client) Connect() error {\n\t\/\/ Clean up any old running stuff.\n\tc.cleanup(false)\n\n\t\/\/ We want to be the only one handling connects\/disconnects right now.\n\tc.cmux.Lock()\n\n\t\/\/ Reset the state.\n\tc.state = newState()\n\n\t\/\/ Validate info, and actually make the connection.\n\tc.debug.Printf(\"connecting to %s...\", c.Server())\n\tconn, err := newConn(c.Config, c.Server())\n\tif err != nil {\n\t\tc.cmux.Unlock()\n\t\treturn err\n\t}\n\n\tc.conn = conn\n\tc.cmux.Unlock()\n\n\t\/\/ Start read loop to process messages from the server.\n\tvar rctx, ectx, sctx, pctx context.Context\n\trctx, c.closeRead = context.WithCancel(context.Background())\n\tectx, c.closeExec = context.WithCancel(context.Background())\n\tsctx, c.closeSend = context.WithCancel(context.Background())\n\tpctx, c.closePing = context.WithCancel(context.Background())\n\tgo c.readLoop(rctx)\n\tgo c.execLoop(ectx)\n\tgo c.sendLoop(sctx)\n\tgo c.pingLoop(pctx)\n\n\t\/\/ Send a virtual event allowing hooks for successful socket connection.\n\tc.RunHandlers(&Event{Command: INITIALIZED, Trailing: c.Server()})\n\n\t\/\/ Passwords first.\n\tif c.Config.Password != \"\" {\n\t\tc.write(&Event{Command: PASS, Params: []string{c.Config.Password}})\n\t}\n\n\t\/\/ Then nickname.\n\tc.write(&Event{Command: NICK, Params: []string{c.Config.Nick}})\n\n\t\/\/ Then username and realname.\n\tif c.Config.Name == \"\" {\n\t\tc.Config.Name = c.Config.User\n\t}\n\n\tc.write(&Event{Command: USER, Params: []string{c.Config.User, \"+iw\", \"*\"}, Trailing: c.Config.Name})\n\n\t\/\/ List the IRCv3 capabilities, specifically with the max protocol we\n\t\/\/ support.\n\tc.listCAP()\n\n\t\/\/ Consider the connection a success at this point.\n\tc.tries = 0\n\tc.reconnecting = false\n\n\treturn nil\n}\n\n\/\/ reconnect is the internal wrapper for reconnecting to the IRC server (if\n\/\/ requested.)\nfunc (c *Client) reconnect(remoteInvoked bool) (err error) {\n\tif c.reconnecting {\n\t\treturn nil\n\t}\n\tc.reconnecting = true\n\tdefer func() {\n\t\tc.reconnecting = false\n\t}()\n\n\tc.cleanup(false)\n\n\tif c.Config.ReconnectDelay < (5 * time.Second) {\n\t\tc.Config.ReconnectDelay = 5 * time.Second\n\t}\n\n\tif c.Config.Retries < 1 && !remoteInvoked {\n\t\treturn ErrDisconnected\n\t}\n\n\tif !remoteInvoked {\n\t\t\/\/ Delay so we're not slaughtering the server with a bunch of\n\t\t\/\/ connections.\n\t\tc.debug.Printf(\"reconnecting to %s in %s\", c.Server(), c.Config.ReconnectDelay)\n\t\ttime.Sleep(c.Config.ReconnectDelay)\n\t}\n\n\tfor err = c.Connect(); err != nil && c.tries < c.Config.Retries; c.tries++ {\n\t\tc.debug.Printf(\"reconnecting to %s in %s (%d tries)\", c.Server(), c.Config.ReconnectDelay, c.tries)\n\t\ttime.Sleep(c.Config.ReconnectDelay)\n\t}\n\n\tif err != nil {\n\t\t\/\/ Too many errors at this point.\n\t\tc.cleanup(false)\n\t}\n\n\treturn err\n}\n\n\/\/ Reconnect checks to make sure we want to, and then attempts to reconnect\n\/\/ to the server. This will ignore the reconnect delay.\nfunc (c *Client) Reconnect() error {\n\treturn c.reconnect(true)\n}\n\nfunc (c *Client) disconnectHandler(err error) {\n\tif err != nil {\n\t\tc.debug.Println(\"disconnecting due to error: \" + err.Error())\n\t}\n\n\trerr := c.reconnect(false)\n\tif rerr != nil {\n\t\tc.debug.Println(\"error: \" + rerr.Error())\n\t\tif c.Config.HandleError != nil {\n\t\t\tif c.Config.Retries < 1 {\n\t\t\t\tc.Config.HandleError(err)\n\t\t\t}\n\n\t\t\tc.Config.HandleError(rerr)\n\t\t}\n\t}\n}\n\n\/\/ readLoop sets a timeout of 300 seconds, and then attempts to read from the\n\/\/ IRC server. If there is an error, it calls Reconnect.\nfunc (c *Client) readLoop(ctx context.Context) {\n\tvar event *Event\n\tvar line string\n\tvar err error\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tc.conn.sock.SetDeadline(time.Now().Add(300 * time.Second))\n\t\t\tline, err = c.conn.io.ReadString(delim)\n\t\t\tif err != nil {\n\t\t\t\t\/\/ Attempt a reconnect (if applicable). If it fails, send\n\t\t\t\t\/\/ the error to c.Config.HandleError to be dealt with, if\n\t\t\t\t\/\/ the handler exists.\n\t\t\t\tc.disconnectHandler(err)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tevent = ParseEvent(line)\n\t\t\tif event == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tc.rx <- event\n\t\t}\n\t}\n}\n\n\/\/ Send sends an event to the server. Use Client.RunHandlers() if you are\n\/\/ simply looking to trigger handlers with an event.\nfunc (c *Client) Send(event *Event) {\n\tif !c.Config.AllowFlood {\n\t\t<-time.After(c.conn.rate(event.Len()))\n\t}\n\n\tc.write(event)\n}\n\n\/\/ write is the lower level function to write an event. It does not have a\n\/\/ write-delay when sending events.\nfunc (c *Client) write(event *Event) {\n\tc.tx <- event\n}\n\n\/\/ rate allows limiting events based on how frequent the event is being sent,\n\/\/ as well as how many characters each event has.\nfunc (c *ircConn) rate(chars int) time.Duration {\n\t_time := time.Second + ((time.Duration(chars) * time.Second) \/ 100)\n\tif c.writeDelay += _time - time.Now().Sub(c.lastWrite); c.writeDelay < 0 {\n\t\tc.writeDelay = 0\n\t}\n\n\tif c.writeDelay > (8 * time.Second) {\n\t\treturn _time\n\t}\n\n\treturn 0\n}\n\nfunc (c *Client) sendLoop(ctx context.Context) {\n\tvar err error\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase event := <-c.tx:\n\t\t\t\/\/ Log the event.\n\t\t\tif !event.Sensitive {\n\t\t\t\tc.debug.Print(\"> \", StripRaw(event.String()))\n\t\t\t}\n\t\t\tif c.Config.Out != nil {\n\t\t\t\tif pretty, ok := event.Pretty(); ok {\n\t\t\t\t\tfmt.Fprintln(c.Config.Out, StripRaw(pretty))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tc.conn.lastWrite = time.Now()\n\n\t\t\t\/\/ Write the raw line.\n\t\t\t_, err = c.conn.io.Write(event.Bytes())\n\t\t\tif err == nil {\n\t\t\t\t\/\/ And the \\r\\n.\n\t\t\t\t_, err = c.conn.io.Write(endline)\n\t\t\t\tif err == nil {\n\t\t\t\t\t\/\/ Lastly, flush everything to the socket.\n\t\t\t\t\terr = c.conn.io.Flush()\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tc.disconnectHandler(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ flushTx empties c.tx.\nfunc (c *Client) flushTx() {\n\tfor {\n\t\tselect {\n\t\tcase <-c.tx:\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ ErrTimedOut is returned when we attempt to ping the server, and time out\n\/\/ before receiving a PONG back.\nvar ErrTimedOut = errors.New(\"timed out during ping to server\")\n\nfunc (c *Client) pingLoop(ctx context.Context) {\n\tc.conn.lastPing = time.Now()\n\tc.conn.lastPong = time.Now()\n\n\t\/\/ Delay for 30 seconds during connect to wait for the client to register\n\t\/\/ and what not.\n\ttime.Sleep(20 * time.Second)\n\n\ttick := time.NewTicker(c.Config.PingDelay)\n\tdefer tick.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-tick.C:\n\t\t\tif time.Since(c.conn.lastPong) > c.Config.PingDelay+(60*time.Second) {\n\t\t\t\t\/\/ It's 60 seconds over what out ping delay is, connection\n\t\t\t\t\/\/ has probably dropped.\n\t\t\t\tc.disconnectHandler(ErrTimedOut)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tc.conn.lastPing = time.Now()\n\t\t\tc.Commands.Ping(fmt.Sprintf(\"%d\", time.Now().UnixNano()))\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage fs\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com\/jacobsa\/fuse\"\n\t\"github.com\/jacobsa\/fuse\/fuseutil\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/syncutil\"\n\t\"github.com\/jacobsa\/gcsfuse\/fs\/inode\"\n\t\"github.com\/jacobsa\/gcsfuse\/timeutil\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/cloud\/storage\"\n)\n\ntype fileSystem struct {\n\tfuseutil.NotImplementedFileSystem\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Dependencies\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tclock timeutil.Clock\n\tbucket gcs.Bucket\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ When acquiring this lock, the caller must hold no inode or dir handle\n\t\/\/ locks.\n\tmu syncutil.InvariantMutex\n\n\t\/\/ The user and group owning everything in the file system.\n\t\/\/\n\t\/\/ GUARDED_BY(Mu)\n\tuid uint32\n\tgid uint32\n\n\t\/\/ The collection of live inodes, keyed by inode ID. No ID less than\n\t\/\/ fuse.RootInodeID is ever used.\n\t\/\/\n\t\/\/ TODO(jacobsa): Implement ForgetInode support in the fuse package, then\n\t\/\/ implement the method here and clean up these maps.\n\t\/\/\n\t\/\/ INVARIANT: All values are of type *inode.DirInode or *inode.FileInode\n\t\/\/ INVARIANT: For all keys k, k >= fuse.RootInodeID\n\t\/\/ INVARIANT: For all keys k, inodes[k].ID() == k\n\t\/\/ INVARIANT: inodes[fuse.RootInodeID] is of type *inode.DirInode\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tinodes map[fuse.InodeID]inode.Inode\n\n\t\/\/ The next inode ID to hand out. We assume that this will never overflow,\n\t\/\/ since even if we were handing out inode IDs at 4 GHz, it would still take\n\t\/\/ over a century to do so.\n\t\/\/\n\t\/\/ INVARIANT: For all keys k in inodes, k < nextInodeID\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tnextInodeID fuse.InodeID\n\n\t\/\/ An index of all directory inodes by Name().\n\t\/\/\n\t\/\/ INVARIANT: For each key k, isDirName(k)\n\t\/\/ INVARIANT: For each key k, dirIndex[k].Name() == k\n\t\/\/ INVARIANT: The values are all and only the values of the inodes map of\n\t\/\/ type *inode.DirInode.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tdirIndex map[string]*inode.DirInode\n\n\t\/\/ An index of all file inodes by (Name(), SourceGeneration()) pairs.\n\t\/\/\n\t\/\/ INVARIANT: For each key k, !isDirName(k)\n\t\/\/ INVARIANT: For each key k, fileIndex[k].Name() == k.name\n\t\/\/ INVARIANT: For each key k, fileIndex[k].SourceGeneration() == k.gen\n\t\/\/ INVARIANT: The values are all and only the values of the inodes map of\n\t\/\/ type *inode.FileInode.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tfileIndex map[nameAndGen]*inode.FileInode\n\n\t\/\/ The collection of live handles, keyed by handle ID.\n\t\/\/\n\t\/\/ INVARIANT: All values are of type *dirHandle\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\thandles map[fuse.HandleID]interface{}\n\n\t\/\/ The next handle ID to hand out. We assume that this will never overflow.\n\t\/\/\n\t\/\/ INVARIANT: For all keys k in handles, k < nextHandleID\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tnextHandleID fuse.HandleID\n}\n\ntype nameAndGen struct {\n\tname string\n\tgen int64\n}\n\n\/\/ Create a fuse file system whose root directory is the root of the supplied\n\/\/ bucket. The supplied clock will be used for cache invalidation, modification\n\/\/ times, etc.\nfunc NewFileSystem(\n\tclock timeutil.Clock,\n\tbucket gcs.Bucket) (ffs fuse.FileSystem, err error) {\n\t\/\/ Set up the basic struct.\n\tfs := &fileSystem{\n\t\tclock: clock,\n\t\tbucket: bucket,\n\t\tinodes: make(map[fuse.InodeID]inode.Inode),\n\t\tnextInodeID: fuse.RootInodeID + 1,\n\t\tdirIndex: make(map[string]*inode.DirInode),\n\t\tfileIndex: make(map[nameAndGen]*inode.FileInode),\n\t\thandles: make(map[fuse.HandleID]interface{}),\n\t}\n\n\t\/\/ Set up the root inode.\n\troot := inode.NewDirInode(bucket, fuse.RootInodeID, \"\")\n\tfs.inodes[fuse.RootInodeID] = root\n\tfs.dirIndex[\"\"] = root\n\n\t\/\/ Set up invariant checking.\n\tfs.mu = syncutil.NewInvariantMutex(fs.checkInvariants)\n\n\tffs = fs\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc isDirName(name string) bool {\n\treturn name == \"\" || name[len(name)-1] == '\/'\n}\n\nfunc (fs *fileSystem) checkInvariants() {\n\t\/\/ Check inode keys.\n\tfor id, _ := range fs.inodes {\n\t\tif id < fuse.RootInodeID || id >= fs.nextInodeID {\n\t\t\tpanic(fmt.Sprintf(\"Illegal inode ID: %v\", id))\n\t\t}\n\t}\n\n\t\/\/ Check the root inode.\n\t_ = fs.inodes[fuse.RootInodeID].(*inode.DirInode)\n\n\t\/\/ Check each inode, and the indexes over them. Keep a count of each type\n\t\/\/ seen.\n\tdirsSeen := 0\n\tfilesSeen := 0\n\tfor id, in := range fs.inodes {\n\t\t\/\/ Check the ID.\n\t\tif in.ID() != id {\n\t\t\tpanic(fmt.Sprintf(\"ID mismatch: %v vs. %v\", in.ID(), id))\n\t\t}\n\n\t\t\/\/ Check type-specific stuff.\n\t\tswitch typed := in.(type) {\n\t\tcase *inode.DirInode:\n\t\t\tdirsSeen++\n\n\t\t\tif !isDirName(typed.Name()) {\n\t\t\t\tpanic(fmt.Sprintf(\"Unexpected directory name: %s\", typed.Name()))\n\t\t\t}\n\n\t\t\tif fs.dirIndex[typed.Name()] != typed {\n\t\t\t\tpanic(fmt.Sprintf(\"dirIndex mismatch: %s\", typed.Name()))\n\t\t\t}\n\n\t\tcase *inode.FileInode:\n\t\t\tfilesSeen++\n\n\t\t\tif isDirName(typed.Name()) {\n\t\t\t\tpanic(fmt.Sprintf(\"Unexpected file name: %s\", typed.Name()))\n\t\t\t}\n\n\t\t\tnandg := nameAndGen{typed.Name(), typed.SourceGeneration()}\n\t\t\tif fs.fileIndex[nandg] != typed {\n\t\t\t\tpanic(\n\t\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\t\"fileIndex mismatch: %s, %v\",\n\t\t\t\t\t\ttyped.Name(),\n\t\t\t\t\t\ttyped.SourceGeneration()))\n\t\t\t}\n\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"Unexpected inode type: %v\", reflect.TypeOf(in)))\n\t\t}\n\t}\n\n\t\/\/ Make sure that the indexes are exhaustive.\n\tif len(fs.dirIndex) != dirsSeen {\n\t\tpanic(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"dirIndex length mismatch: %v vs. %v\",\n\t\t\t\tlen(fs.dirIndex),\n\t\t\t\tdirsSeen))\n\t}\n\n\tif len(fs.fileIndex) != filesSeen {\n\t\tpanic(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"fileIndex length mismatch: %v vs. %v\",\n\t\t\t\tlen(fs.fileIndex),\n\t\t\t\tdirsSeen))\n\t}\n\n\t\/\/ Check handles.\n\tfor id, h := range fs.handles {\n\t\tif id >= fs.nextHandleID {\n\t\t\tpanic(fmt.Sprintf(\"Illegal handle ID: %v\", id))\n\t\t}\n\n\t\t_ = h.(*dirHandle)\n\t}\n}\n\n\/\/ Get attributes for the inode, fixing up ownership information.\n\/\/\n\/\/ SHARED_LOCKS_REQUIRED(fs.mu)\n\/\/ EXCLUSIVE_LOCKS_REQUIRED(in)\nfunc (fs *fileSystem) getAttributes(\n\tctx context.Context,\n\tin inode.Inode) (attrs fuse.InodeAttributes, err error) {\n\tattrs, err = in.Attributes(ctx)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tattrs.Uid = fs.uid\n\tattrs.Gid = fs.gid\n\n\treturn\n}\n\n\/\/ Find a directory inode for the given object record. Create one if there\n\/\/ isn't already one available.\n\/\/\n\/\/ EXCLUSIVE_LOCKS_REQUIRED(fs.mu)\nfunc (fs *fileSystem) lookUpOrCreateDirInode(\n\tctx context.Context,\n\to *storage.Object) (in *inode.DirInode, err error) {\n\t\/\/ Do we already have an inode for this name?\n\tif in = fs.dirIndex[o.Name]; in != nil {\n\t\treturn\n\t}\n\n\t\/\/ Mint an ID.\n\tid := fs.nextInodeID\n\tfs.nextInodeID++\n\n\t\/\/ Create and index an inode.\n\tin = inode.NewDirInode(fs.bucket, id, o.Name)\n\tfs.inodes[id] = in\n\tfs.dirIndex[in.Name()] = in\n\n\treturn\n}\n\n\/\/ Find a file inode for the given object record. Create one if there isn't\n\/\/ already one available.\n\/\/\n\/\/ EXCLUSIVE_LOCKS_REQUIRED(fs.mu)\nfunc (fs *fileSystem) lookUpOrCreateFileInode(\n\tctx context.Context,\n\to *storage.Object) (in *inode.FileInode, err error) {\n\tnandg := nameAndGen{\n\t\tname: o.Name,\n\t\tgen: o.Generation,\n\t}\n\n\t\/\/ Do we already have an inode for this (name, generation) pair?\n\tif in = fs.fileIndex[nandg]; in != nil {\n\t\treturn\n\t}\n\n\t\/\/ Mint an ID.\n\tid := fs.nextInodeID\n\tfs.nextInodeID++\n\n\t\/\/ Create and index an inode.\n\tin = inode.NewFileInode(fs.bucket, id, o)\n\tfs.inodes[id] = in\n\tfs.fileIndex[nandg] = in\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ fuse.FileSystem methods\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (fs *fileSystem) Init(\n\tctx context.Context,\n\treq *fuse.InitRequest) (resp *fuse.InitResponse, err error) {\n\tresp = &fuse.InitResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Store the mounting user's info for later.\n\tfs.uid = req.Header.Uid\n\tfs.gid = req.Header.Gid\n\n\treturn\n}\n\nfunc (fs *fileSystem) LookUpInode(\n\tctx context.Context,\n\treq *fuse.LookUpInodeRequest) (resp *fuse.LookUpInodeResponse, err error) {\n\tresp = &fuse.LookUpInodeResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Find the parent directory in question.\n\tparent := fs.inodes[req.Parent].(*inode.DirInode)\n\n\t\/\/ Find a record for the child with the given name.\n\to, err := parent.LookUpChild(ctx, req.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Is the child a directory or a file?\n\tvar in inode.Inode\n\tif isDirName(o.Name) {\n\t\tin, err = fs.lookUpOrCreateDirInode(ctx, o)\n\t} else {\n\t\tin, err = fs.lookUpOrCreateFileInode(ctx, o)\n\t}\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tin.Lock()\n\tdefer in.Unlock()\n\n\t\/\/ Fill out the response.\n\tresp.Entry.Child = in.ID()\n\tif resp.Entry.Attributes, err = fs.getAttributes(ctx, in); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (fs *fileSystem) GetInodeAttributes(\n\tctx context.Context,\n\treq *fuse.GetInodeAttributesRequest) (\n\tresp *fuse.GetInodeAttributesResponse, err error) {\n\tresp = &fuse.GetInodeAttributesResponse{}\n\n\tfs.mu.RLock()\n\tdefer fs.mu.RUnlock()\n\n\t\/\/ Find the inode.\n\tin := fs.inodes[req.Inode]\n\n\tin.Lock()\n\tdefer in.Unlock()\n\n\t\/\/ Grab its attributes.\n\tresp.Attributes, err = fs.getAttributes(ctx, in)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (fs *fileSystem) OpenDir(\n\tctx context.Context,\n\treq *fuse.OpenDirRequest) (resp *fuse.OpenDirResponse, err error) {\n\tresp = &fuse.OpenDirResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Make sure the inode still exists and is a directory. If not, something has\n\t\/\/ screwed up because the VFS layer shouldn't have let us forget the inode\n\t\/\/ before opening it.\n\tin := fs.inodes[req.Inode].(*inode.DirInode)\n\tin.Lock()\n\tdefer in.Unlock()\n\n\t\/\/ Allocate a handle.\n\thandleID := fs.nextHandleID\n\tfs.nextHandleID++\n\n\tfs.handles[handleID] = newDirHandle(in)\n\tresp.Handle = handleID\n\n\treturn\n}\n\nfunc (fs *fileSystem) ReadDir(\n\tctx context.Context,\n\treq *fuse.ReadDirRequest) (resp *fuse.ReadDirResponse, err error) {\n\tfs.mu.RLock()\n\tdefer fs.mu.RUnlock()\n\n\t\/\/ Find the handle.\n\tdh := fs.handles[req.Handle].(*dirHandle)\n\tdh.Mu.Lock()\n\tdefer dh.Mu.Unlock()\n\n\t\/\/ Serve the request.\n\tresp, err = dh.ReadDir(ctx, req)\n\n\treturn\n}\n\nfunc (fs *fileSystem) ReleaseDirHandle(\n\tctx context.Context,\n\treq *fuse.ReleaseDirHandleRequest) (\n\tresp *fuse.ReleaseDirHandleResponse, err error) {\n\tresp = &fuse.ReleaseDirHandleResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check that this handle exists and is of the correct type.\n\t_ = fs.handles[req.Handle].(*dirHandle)\n\n\t\/\/ Clear the entry from the map.\n\tdelete(fs.handles, req.Handle)\n\n\treturn\n}\n<commit_msg>Implemented OpenFile.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage fs\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com\/jacobsa\/fuse\"\n\t\"github.com\/jacobsa\/fuse\/fuseutil\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/syncutil\"\n\t\"github.com\/jacobsa\/gcsfuse\/fs\/inode\"\n\t\"github.com\/jacobsa\/gcsfuse\/timeutil\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/cloud\/storage\"\n)\n\ntype fileSystem struct {\n\tfuseutil.NotImplementedFileSystem\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Dependencies\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tclock timeutil.Clock\n\tbucket gcs.Bucket\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ When acquiring this lock, the caller must hold no inode or dir handle\n\t\/\/ locks.\n\tmu syncutil.InvariantMutex\n\n\t\/\/ The user and group owning everything in the file system.\n\t\/\/\n\t\/\/ GUARDED_BY(Mu)\n\tuid uint32\n\tgid uint32\n\n\t\/\/ The collection of live inodes, keyed by inode ID. No ID less than\n\t\/\/ fuse.RootInodeID is ever used.\n\t\/\/\n\t\/\/ TODO(jacobsa): Implement ForgetInode support in the fuse package, then\n\t\/\/ implement the method here and clean up these maps.\n\t\/\/\n\t\/\/ INVARIANT: All values are of type *inode.DirInode or *inode.FileInode\n\t\/\/ INVARIANT: For all keys k, k >= fuse.RootInodeID\n\t\/\/ INVARIANT: For all keys k, inodes[k].ID() == k\n\t\/\/ INVARIANT: inodes[fuse.RootInodeID] is of type *inode.DirInode\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tinodes map[fuse.InodeID]inode.Inode\n\n\t\/\/ The next inode ID to hand out. We assume that this will never overflow,\n\t\/\/ since even if we were handing out inode IDs at 4 GHz, it would still take\n\t\/\/ over a century to do so.\n\t\/\/\n\t\/\/ INVARIANT: For all keys k in inodes, k < nextInodeID\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tnextInodeID fuse.InodeID\n\n\t\/\/ An index of all directory inodes by Name().\n\t\/\/\n\t\/\/ INVARIANT: For each key k, isDirName(k)\n\t\/\/ INVARIANT: For each key k, dirIndex[k].Name() == k\n\t\/\/ INVARIANT: The values are all and only the values of the inodes map of\n\t\/\/ type *inode.DirInode.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tdirIndex map[string]*inode.DirInode\n\n\t\/\/ An index of all file inodes by (Name(), SourceGeneration()) pairs.\n\t\/\/\n\t\/\/ INVARIANT: For each key k, !isDirName(k)\n\t\/\/ INVARIANT: For each key k, fileIndex[k].Name() == k.name\n\t\/\/ INVARIANT: For each key k, fileIndex[k].SourceGeneration() == k.gen\n\t\/\/ INVARIANT: The values are all and only the values of the inodes map of\n\t\/\/ type *inode.FileInode.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tfileIndex map[nameAndGen]*inode.FileInode\n\n\t\/\/ The collection of live handles, keyed by handle ID.\n\t\/\/\n\t\/\/ INVARIANT: All values are of type *dirHandle\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\thandles map[fuse.HandleID]interface{}\n\n\t\/\/ The next handle ID to hand out. We assume that this will never overflow.\n\t\/\/\n\t\/\/ INVARIANT: For all keys k in handles, k < nextHandleID\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tnextHandleID fuse.HandleID\n}\n\ntype nameAndGen struct {\n\tname string\n\tgen int64\n}\n\n\/\/ Create a fuse file system whose root directory is the root of the supplied\n\/\/ bucket. The supplied clock will be used for cache invalidation, modification\n\/\/ times, etc.\nfunc NewFileSystem(\n\tclock timeutil.Clock,\n\tbucket gcs.Bucket) (ffs fuse.FileSystem, err error) {\n\t\/\/ Set up the basic struct.\n\tfs := &fileSystem{\n\t\tclock: clock,\n\t\tbucket: bucket,\n\t\tinodes: make(map[fuse.InodeID]inode.Inode),\n\t\tnextInodeID: fuse.RootInodeID + 1,\n\t\tdirIndex: make(map[string]*inode.DirInode),\n\t\tfileIndex: make(map[nameAndGen]*inode.FileInode),\n\t\thandles: make(map[fuse.HandleID]interface{}),\n\t}\n\n\t\/\/ Set up the root inode.\n\troot := inode.NewDirInode(bucket, fuse.RootInodeID, \"\")\n\tfs.inodes[fuse.RootInodeID] = root\n\tfs.dirIndex[\"\"] = root\n\n\t\/\/ Set up invariant checking.\n\tfs.mu = syncutil.NewInvariantMutex(fs.checkInvariants)\n\n\tffs = fs\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc isDirName(name string) bool {\n\treturn name == \"\" || name[len(name)-1] == '\/'\n}\n\nfunc (fs *fileSystem) checkInvariants() {\n\t\/\/ Check inode keys.\n\tfor id, _ := range fs.inodes {\n\t\tif id < fuse.RootInodeID || id >= fs.nextInodeID {\n\t\t\tpanic(fmt.Sprintf(\"Illegal inode ID: %v\", id))\n\t\t}\n\t}\n\n\t\/\/ Check the root inode.\n\t_ = fs.inodes[fuse.RootInodeID].(*inode.DirInode)\n\n\t\/\/ Check each inode, and the indexes over them. Keep a count of each type\n\t\/\/ seen.\n\tdirsSeen := 0\n\tfilesSeen := 0\n\tfor id, in := range fs.inodes {\n\t\t\/\/ Check the ID.\n\t\tif in.ID() != id {\n\t\t\tpanic(fmt.Sprintf(\"ID mismatch: %v vs. %v\", in.ID(), id))\n\t\t}\n\n\t\t\/\/ Check type-specific stuff.\n\t\tswitch typed := in.(type) {\n\t\tcase *inode.DirInode:\n\t\t\tdirsSeen++\n\n\t\t\tif !isDirName(typed.Name()) {\n\t\t\t\tpanic(fmt.Sprintf(\"Unexpected directory name: %s\", typed.Name()))\n\t\t\t}\n\n\t\t\tif fs.dirIndex[typed.Name()] != typed {\n\t\t\t\tpanic(fmt.Sprintf(\"dirIndex mismatch: %s\", typed.Name()))\n\t\t\t}\n\n\t\tcase *inode.FileInode:\n\t\t\tfilesSeen++\n\n\t\t\tif isDirName(typed.Name()) {\n\t\t\t\tpanic(fmt.Sprintf(\"Unexpected file name: %s\", typed.Name()))\n\t\t\t}\n\n\t\t\tnandg := nameAndGen{typed.Name(), typed.SourceGeneration()}\n\t\t\tif fs.fileIndex[nandg] != typed {\n\t\t\t\tpanic(\n\t\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\t\"fileIndex mismatch: %s, %v\",\n\t\t\t\t\t\ttyped.Name(),\n\t\t\t\t\t\ttyped.SourceGeneration()))\n\t\t\t}\n\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"Unexpected inode type: %v\", reflect.TypeOf(in)))\n\t\t}\n\t}\n\n\t\/\/ Make sure that the indexes are exhaustive.\n\tif len(fs.dirIndex) != dirsSeen {\n\t\tpanic(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"dirIndex length mismatch: %v vs. %v\",\n\t\t\t\tlen(fs.dirIndex),\n\t\t\t\tdirsSeen))\n\t}\n\n\tif len(fs.fileIndex) != filesSeen {\n\t\tpanic(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"fileIndex length mismatch: %v vs. %v\",\n\t\t\t\tlen(fs.fileIndex),\n\t\t\t\tdirsSeen))\n\t}\n\n\t\/\/ Check handles.\n\tfor id, h := range fs.handles {\n\t\tif id >= fs.nextHandleID {\n\t\t\tpanic(fmt.Sprintf(\"Illegal handle ID: %v\", id))\n\t\t}\n\n\t\t_ = h.(*dirHandle)\n\t}\n}\n\n\/\/ Get attributes for the inode, fixing up ownership information.\n\/\/\n\/\/ SHARED_LOCKS_REQUIRED(fs.mu)\n\/\/ EXCLUSIVE_LOCKS_REQUIRED(in)\nfunc (fs *fileSystem) getAttributes(\n\tctx context.Context,\n\tin inode.Inode) (attrs fuse.InodeAttributes, err error) {\n\tattrs, err = in.Attributes(ctx)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tattrs.Uid = fs.uid\n\tattrs.Gid = fs.gid\n\n\treturn\n}\n\n\/\/ Find a directory inode for the given object record. Create one if there\n\/\/ isn't already one available.\n\/\/\n\/\/ EXCLUSIVE_LOCKS_REQUIRED(fs.mu)\nfunc (fs *fileSystem) lookUpOrCreateDirInode(\n\tctx context.Context,\n\to *storage.Object) (in *inode.DirInode, err error) {\n\t\/\/ Do we already have an inode for this name?\n\tif in = fs.dirIndex[o.Name]; in != nil {\n\t\treturn\n\t}\n\n\t\/\/ Mint an ID.\n\tid := fs.nextInodeID\n\tfs.nextInodeID++\n\n\t\/\/ Create and index an inode.\n\tin = inode.NewDirInode(fs.bucket, id, o.Name)\n\tfs.inodes[id] = in\n\tfs.dirIndex[in.Name()] = in\n\n\treturn\n}\n\n\/\/ Find a file inode for the given object record. Create one if there isn't\n\/\/ already one available.\n\/\/\n\/\/ EXCLUSIVE_LOCKS_REQUIRED(fs.mu)\nfunc (fs *fileSystem) lookUpOrCreateFileInode(\n\tctx context.Context,\n\to *storage.Object) (in *inode.FileInode, err error) {\n\tnandg := nameAndGen{\n\t\tname: o.Name,\n\t\tgen: o.Generation,\n\t}\n\n\t\/\/ Do we already have an inode for this (name, generation) pair?\n\tif in = fs.fileIndex[nandg]; in != nil {\n\t\treturn\n\t}\n\n\t\/\/ Mint an ID.\n\tid := fs.nextInodeID\n\tfs.nextInodeID++\n\n\t\/\/ Create and index an inode.\n\tin = inode.NewFileInode(fs.bucket, id, o)\n\tfs.inodes[id] = in\n\tfs.fileIndex[nandg] = in\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ fuse.FileSystem methods\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *fileSystem) Init(\n\tctx context.Context,\n\treq *fuse.InitRequest) (resp *fuse.InitResponse, err error) {\n\tresp = &fuse.InitResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Store the mounting user's info for later.\n\tfs.uid = req.Header.Uid\n\tfs.gid = req.Header.Gid\n\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *fileSystem) LookUpInode(\n\tctx context.Context,\n\treq *fuse.LookUpInodeRequest) (resp *fuse.LookUpInodeResponse, err error) {\n\tresp = &fuse.LookUpInodeResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Find the parent directory in question.\n\tparent := fs.inodes[req.Parent].(*inode.DirInode)\n\n\t\/\/ Find a record for the child with the given name.\n\to, err := parent.LookUpChild(ctx, req.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Is the child a directory or a file?\n\tvar in inode.Inode\n\tif isDirName(o.Name) {\n\t\tin, err = fs.lookUpOrCreateDirInode(ctx, o)\n\t} else {\n\t\tin, err = fs.lookUpOrCreateFileInode(ctx, o)\n\t}\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tin.Lock()\n\tdefer in.Unlock()\n\n\t\/\/ Fill out the response.\n\tresp.Entry.Child = in.ID()\n\tif resp.Entry.Attributes, err = fs.getAttributes(ctx, in); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *fileSystem) GetInodeAttributes(\n\tctx context.Context,\n\treq *fuse.GetInodeAttributesRequest) (\n\tresp *fuse.GetInodeAttributesResponse, err error) {\n\tresp = &fuse.GetInodeAttributesResponse{}\n\n\tfs.mu.RLock()\n\tdefer fs.mu.RUnlock()\n\n\t\/\/ Find the inode.\n\tin := fs.inodes[req.Inode]\n\n\tin.Lock()\n\tdefer in.Unlock()\n\n\t\/\/ Grab its attributes.\n\tresp.Attributes, err = fs.getAttributes(ctx, in)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *fileSystem) OpenDir(\n\tctx context.Context,\n\treq *fuse.OpenDirRequest) (resp *fuse.OpenDirResponse, err error) {\n\tresp = &fuse.OpenDirResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Make sure the inode still exists and is a directory. If not, something has\n\t\/\/ screwed up because the VFS layer shouldn't have let us forget the inode\n\t\/\/ before opening it.\n\tin := fs.inodes[req.Inode].(*inode.DirInode)\n\tin.Lock()\n\tdefer in.Unlock()\n\n\t\/\/ Allocate a handle.\n\thandleID := fs.nextHandleID\n\tfs.nextHandleID++\n\n\tfs.handles[handleID] = newDirHandle(in)\n\tresp.Handle = handleID\n\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *fileSystem) ReadDir(\n\tctx context.Context,\n\treq *fuse.ReadDirRequest) (resp *fuse.ReadDirResponse, err error) {\n\tfs.mu.RLock()\n\tdefer fs.mu.RUnlock()\n\n\t\/\/ Find the handle.\n\tdh := fs.handles[req.Handle].(*dirHandle)\n\tdh.Mu.Lock()\n\tdefer dh.Mu.Unlock()\n\n\t\/\/ Serve the request.\n\tresp, err = dh.ReadDir(ctx, req)\n\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *fileSystem) ReleaseDirHandle(\n\tctx context.Context,\n\treq *fuse.ReleaseDirHandleRequest) (\n\tresp *fuse.ReleaseDirHandleResponse, err error) {\n\tresp = &fuse.ReleaseDirHandleResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check that this handle exists and is of the correct type.\n\t_ = fs.handles[req.Handle].(*dirHandle)\n\n\t\/\/ Clear the entry from the map.\n\tdelete(fs.handles, req.Handle)\n\n\treturn\n}\n\n\/\/ TODO(jacobsa): Make sure we have failing tests for O_TRUNC behavior, then\n\/\/ implement it.\n\/\/\n\/\/ LOCKS_EXCLUDED(fs.mu)\nfunc (fs *fileSystem) OpenFile(\n\tctx context.Context,\n\treq *fuse.OpenFileRequest) (\n\tresp *fuse.OpenFileResponse, err error) {\n\tresp = &fuse.OpenFileResponse{}\n\n\tfs.mu.RLock()\n\tdefer fs.mu.RUnlock()\n\n\t\/\/ Sanity check that this inode exists and is of the correct type.\n\t_ = fs.inodes[req.Inode].(*inode.FileInode)\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package errcode\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strings\"\n)\n\n\/\/ ErrorCoder is the base interface for ErrorCode and Error allowing\n\/\/ users of each to just call ErrorCode to get the real ID of each\ntype ErrorCoder interface {\n\tErrorCode() ErrorCode\n}\n\n\/\/ ErrorCode represents the error type. The errors are serialized via strings\n\/\/ and the integer format may change and should *never* be exported.\ntype ErrorCode int\n\nvar _ error = ErrorCode(0)\n\n\/\/ ErrorCode just returns itself\nfunc (ec ErrorCode) ErrorCode() ErrorCode {\n\treturn ec\n}\n\n\/\/ Error returns the ID\/Value\nfunc (ec ErrorCode) Error() string {\n\t\/\/ NOTE(stevvooe): Cannot use message here since it may have unpopulated args.\n\treturn strings.ToLower(strings.Replace(ec.String(), \"_\", \" \", -1))\n}\n\n\/\/ Descriptor returns the descriptor for the error code.\nfunc (ec ErrorCode) Descriptor() ErrorDescriptor {\n\td, ok := errorCodeToDescriptors[ec]\n\n\tif !ok {\n\t\treturn ErrorCodeUnknown.Descriptor()\n\t}\n\n\treturn d\n}\n\n\/\/ String returns the canonical identifier for this error code.\nfunc (ec ErrorCode) String() string {\n\treturn ec.Descriptor().Value\n}\n\n\/\/ Message returned the human-readable error message for this error code.\nfunc (ec ErrorCode) Message() string {\n\treturn ec.Descriptor().Message\n}\n\n\/\/ MarshalText encodes the receiver into UTF-8-encoded text and returns the\n\/\/ result.\nfunc (ec ErrorCode) MarshalText() (text []byte, err error) {\n\treturn []byte(ec.String()), nil\n}\n\n\/\/ UnmarshalText decodes the form generated by MarshalText.\nfunc (ec *ErrorCode) UnmarshalText(text []byte) error {\n\tdesc, ok := idToDescriptors[string(text)]\n\n\tif !ok {\n\t\tdesc = ErrorCodeUnknown.Descriptor()\n\t}\n\n\t*ec = desc.Code\n\n\treturn nil\n}\n\n\/\/ WithMessage creates a new Error struct based on the passed-in info and\n\/\/ overrides the Message property.\nfunc (ec ErrorCode) WithMessage(message string) Error {\n\treturn Error{\n\t\tCode: ec,\n\t\tMessage: message,\n\t}\n}\n\n\/\/ WithDetail creates a new Error struct based on the passed-in info and\n\/\/ set the Detail property appropriately\nfunc (ec ErrorCode) WithDetail(detail interface{}) Error {\n\treturn Error{\n\t\tCode: ec,\n\t\tMessage: ec.Message(),\n\t}.WithDetail(detail)\n}\n\n\/\/ WithArgs creates a new Error struct and sets the Args slice\nfunc (ec ErrorCode) WithArgs(args ...interface{}) Error {\n\treturn Error{\n\t\tCode: ec,\n\t\tMessage: ec.Message(),\n\t}.WithArgs(args...)\n}\n\n\/\/ Error provides a wrapper around ErrorCode with extra Details provided.\ntype Error struct {\n\tCode ErrorCode `json:\"code\"`\n\tMessage string `json:\"message\"`\n\tDetail interface{} `json:\"detail,omitempty\"`\n\n\t\/\/ TODO(duglin): See if we need an \"args\" property so we can do the\n\t\/\/ variable substitution right before showing the message to the user\n}\n\nvar _ error = Error{}\n\n\/\/ ErrorCode returns the ID\/Value of this Error\nfunc (e Error) ErrorCode() ErrorCode {\n\treturn e.Code\n}\n\n\/\/ Error returns a human readable representation of the error.\nfunc (e Error) Error() string {\n\treturn fmt.Sprintf(\"%s: %s\", e.Code.Error(), e.Message)\n}\n\n\/\/ WithDetail will return a new Error, based on the current one, but with\n\/\/ some Detail info added\nfunc (e Error) WithDetail(detail interface{}) Error {\n\treturn Error{\n\t\tCode: e.Code,\n\t\tMessage: e.Message,\n\t\tDetail: detail,\n\t}\n}\n\n\/\/ WithArgs uses the passed-in list of interface{} as the substitution\n\/\/ variables in the Error's Message string, but returns a new Error\nfunc (e Error) WithArgs(args ...interface{}) Error {\n\treturn Error{\n\t\tCode: e.Code,\n\t\tMessage: fmt.Sprintf(e.Code.Message(), args...),\n\t\tDetail: e.Detail,\n\t}\n}\n\n\/\/ ErrorDescriptor provides relevant information about a given error code.\ntype ErrorDescriptor struct {\n\t\/\/ Code is the error code that this descriptor describes.\n\tCode ErrorCode\n\n\t\/\/ Value provides a unique, string key, often captilized with\n\t\/\/ underscores, to identify the error code. This value is used as the\n\t\/\/ keyed value when serializing api errors.\n\tValue string\n\n\t\/\/ Message is a short, human readable decription of the error condition\n\t\/\/ included in API responses.\n\tMessage string\n\n\t\/\/ Description provides a complete account of the errors purpose, suitable\n\t\/\/ for use in documentation.\n\tDescription string\n\n\t\/\/ HTTPStatusCode provides the http status code that is associated with\n\t\/\/ this error condition.\n\tHTTPStatusCode int\n}\n\n\/\/ ParseErrorCode returns the value by the string error code.\n\/\/ `ErrorCodeUnknown` will be returned if the error is not known.\nfunc ParseErrorCode(value string) ErrorCode {\n\ted, ok := idToDescriptors[value]\n\tif ok {\n\t\treturn ed.Code\n\t}\n\n\treturn ErrorCodeUnknown\n}\n\n\/\/ Errors provides the envelope for multiple errors and a few sugar methods\n\/\/ for use within the application.\ntype Errors []error\n\nvar _ error = Errors{}\n\nfunc (errs Errors) Error() string {\n\tswitch len(errs) {\n\tcase 0:\n\t\treturn \"<nil>\"\n\tcase 1:\n\t\treturn errs[0].Error()\n\tdefault:\n\t\tmsg := \"errors:\\n\"\n\t\tfor _, err := range errs {\n\t\t\tmsg += err.Error() + \"\\n\"\n\t\t}\n\t\treturn msg\n\t}\n}\n\n\/\/ Len returns the current number of errors.\nfunc (errs Errors) Len() int {\n\treturn len(errs)\n}\n\n\/\/ MarshalJSON converts slice of error, ErrorCode or Error into a\n\/\/ slice of Error - then serializes\nfunc (errs Errors) MarshalJSON() ([]byte, error) {\n\tvar tmpErrs struct {\n\t\tErrors []Error `json:\"errors,omitempty\"`\n\t}\n\n\tfor _, daErr := range errs {\n\t\tvar err Error\n\n\t\tswitch daErr := daErr.(type) {\n\t\tcase ErrorCode:\n\t\t\terr = daErr.WithDetail(nil)\n\t\tcase Error:\n\t\t\terr = daErr\n\t\tdefault:\n\t\t\terr = ErrorCodeUnknown.WithDetail(daErr)\n\n\t\t}\n\n\t\t\/\/ If the Error struct was setup and they forgot to set the\n\t\t\/\/ Message field (meaning its \"\") then grab it from the ErrCode\n\t\tmsg := err.Message\n\t\tif msg == \"\" {\n\t\t\tmsg = err.Code.Message()\n\t\t}\n\n\t\ttmpErrs.Errors = append(tmpErrs.Errors, Error{\n\t\t\tCode: err.Code,\n\t\t\tMessage: msg,\n\t\t\tDetail: err.Detail,\n\t\t})\n\t}\n\n\treturn json.Marshal(tmpErrs)\n}\n\n\/\/ UnmarshalJSON deserializes []Error and then converts it into slice of\n\/\/ Error or ErrorCode\nfunc (errs *Errors) UnmarshalJSON(data []byte) error {\n\tvar tmpErrs struct {\n\t\tErrors []Error\n\t}\n\n\tif err := json.Unmarshal(data, &tmpErrs); err != nil {\n\t\treturn err\n\t}\n\n\tvar newErrs Errors\n\tfor _, daErr := range tmpErrs.Errors {\n\t\t\/\/ If Message is empty or exactly matches the Code's message string\n\t\t\/\/ then just use the Code, no need for a full Error struct\n\t\tif daErr.Detail == nil && (daErr.Message == \"\" || daErr.Message == daErr.Code.Message()) {\n\t\t\t\/\/ Error's w\/o details get converted to ErrorCode\n\t\t\tnewErrs = append(newErrs, daErr.Code)\n\t\t} else {\n\t\t\t\/\/ Error's w\/ details are untouched\n\t\t\tnewErrs = append(newErrs, Error{\n\t\t\t\tCode: daErr.Code,\n\t\t\t\tMessage: daErr.Message,\n\t\t\t\tDetail: daErr.Detail,\n\t\t\t})\n\t\t}\n\t}\n\n\t*errs = newErrs\n\treturn nil\n}\n<commit_msg>Correct spelling: decription -> description<commit_after>package errcode\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strings\"\n)\n\n\/\/ ErrorCoder is the base interface for ErrorCode and Error allowing\n\/\/ users of each to just call ErrorCode to get the real ID of each\ntype ErrorCoder interface {\n\tErrorCode() ErrorCode\n}\n\n\/\/ ErrorCode represents the error type. The errors are serialized via strings\n\/\/ and the integer format may change and should *never* be exported.\ntype ErrorCode int\n\nvar _ error = ErrorCode(0)\n\n\/\/ ErrorCode just returns itself\nfunc (ec ErrorCode) ErrorCode() ErrorCode {\n\treturn ec\n}\n\n\/\/ Error returns the ID\/Value\nfunc (ec ErrorCode) Error() string {\n\t\/\/ NOTE(stevvooe): Cannot use message here since it may have unpopulated args.\n\treturn strings.ToLower(strings.Replace(ec.String(), \"_\", \" \", -1))\n}\n\n\/\/ Descriptor returns the descriptor for the error code.\nfunc (ec ErrorCode) Descriptor() ErrorDescriptor {\n\td, ok := errorCodeToDescriptors[ec]\n\n\tif !ok {\n\t\treturn ErrorCodeUnknown.Descriptor()\n\t}\n\n\treturn d\n}\n\n\/\/ String returns the canonical identifier for this error code.\nfunc (ec ErrorCode) String() string {\n\treturn ec.Descriptor().Value\n}\n\n\/\/ Message returned the human-readable error message for this error code.\nfunc (ec ErrorCode) Message() string {\n\treturn ec.Descriptor().Message\n}\n\n\/\/ MarshalText encodes the receiver into UTF-8-encoded text and returns the\n\/\/ result.\nfunc (ec ErrorCode) MarshalText() (text []byte, err error) {\n\treturn []byte(ec.String()), nil\n}\n\n\/\/ UnmarshalText decodes the form generated by MarshalText.\nfunc (ec *ErrorCode) UnmarshalText(text []byte) error {\n\tdesc, ok := idToDescriptors[string(text)]\n\n\tif !ok {\n\t\tdesc = ErrorCodeUnknown.Descriptor()\n\t}\n\n\t*ec = desc.Code\n\n\treturn nil\n}\n\n\/\/ WithMessage creates a new Error struct based on the passed-in info and\n\/\/ overrides the Message property.\nfunc (ec ErrorCode) WithMessage(message string) Error {\n\treturn Error{\n\t\tCode: ec,\n\t\tMessage: message,\n\t}\n}\n\n\/\/ WithDetail creates a new Error struct based on the passed-in info and\n\/\/ set the Detail property appropriately\nfunc (ec ErrorCode) WithDetail(detail interface{}) Error {\n\treturn Error{\n\t\tCode: ec,\n\t\tMessage: ec.Message(),\n\t}.WithDetail(detail)\n}\n\n\/\/ WithArgs creates a new Error struct and sets the Args slice\nfunc (ec ErrorCode) WithArgs(args ...interface{}) Error {\n\treturn Error{\n\t\tCode: ec,\n\t\tMessage: ec.Message(),\n\t}.WithArgs(args...)\n}\n\n\/\/ Error provides a wrapper around ErrorCode with extra Details provided.\ntype Error struct {\n\tCode ErrorCode `json:\"code\"`\n\tMessage string `json:\"message\"`\n\tDetail interface{} `json:\"detail,omitempty\"`\n\n\t\/\/ TODO(duglin): See if we need an \"args\" property so we can do the\n\t\/\/ variable substitution right before showing the message to the user\n}\n\nvar _ error = Error{}\n\n\/\/ ErrorCode returns the ID\/Value of this Error\nfunc (e Error) ErrorCode() ErrorCode {\n\treturn e.Code\n}\n\n\/\/ Error returns a human readable representation of the error.\nfunc (e Error) Error() string {\n\treturn fmt.Sprintf(\"%s: %s\", e.Code.Error(), e.Message)\n}\n\n\/\/ WithDetail will return a new Error, based on the current one, but with\n\/\/ some Detail info added\nfunc (e Error) WithDetail(detail interface{}) Error {\n\treturn Error{\n\t\tCode: e.Code,\n\t\tMessage: e.Message,\n\t\tDetail: detail,\n\t}\n}\n\n\/\/ WithArgs uses the passed-in list of interface{} as the substitution\n\/\/ variables in the Error's Message string, but returns a new Error\nfunc (e Error) WithArgs(args ...interface{}) Error {\n\treturn Error{\n\t\tCode: e.Code,\n\t\tMessage: fmt.Sprintf(e.Code.Message(), args...),\n\t\tDetail: e.Detail,\n\t}\n}\n\n\/\/ ErrorDescriptor provides relevant information about a given error code.\ntype ErrorDescriptor struct {\n\t\/\/ Code is the error code that this descriptor describes.\n\tCode ErrorCode\n\n\t\/\/ Value provides a unique, string key, often captilized with\n\t\/\/ underscores, to identify the error code. This value is used as the\n\t\/\/ keyed value when serializing api errors.\n\tValue string\n\n\t\/\/ Message is a short, human readable description of the error condition\n\t\/\/ included in API responses.\n\tMessage string\n\n\t\/\/ Description provides a complete account of the errors purpose, suitable\n\t\/\/ for use in documentation.\n\tDescription string\n\n\t\/\/ HTTPStatusCode provides the http status code that is associated with\n\t\/\/ this error condition.\n\tHTTPStatusCode int\n}\n\n\/\/ ParseErrorCode returns the value by the string error code.\n\/\/ `ErrorCodeUnknown` will be returned if the error is not known.\nfunc ParseErrorCode(value string) ErrorCode {\n\ted, ok := idToDescriptors[value]\n\tif ok {\n\t\treturn ed.Code\n\t}\n\n\treturn ErrorCodeUnknown\n}\n\n\/\/ Errors provides the envelope for multiple errors and a few sugar methods\n\/\/ for use within the application.\ntype Errors []error\n\nvar _ error = Errors{}\n\nfunc (errs Errors) Error() string {\n\tswitch len(errs) {\n\tcase 0:\n\t\treturn \"<nil>\"\n\tcase 1:\n\t\treturn errs[0].Error()\n\tdefault:\n\t\tmsg := \"errors:\\n\"\n\t\tfor _, err := range errs {\n\t\t\tmsg += err.Error() + \"\\n\"\n\t\t}\n\t\treturn msg\n\t}\n}\n\n\/\/ Len returns the current number of errors.\nfunc (errs Errors) Len() int {\n\treturn len(errs)\n}\n\n\/\/ MarshalJSON converts slice of error, ErrorCode or Error into a\n\/\/ slice of Error - then serializes\nfunc (errs Errors) MarshalJSON() ([]byte, error) {\n\tvar tmpErrs struct {\n\t\tErrors []Error `json:\"errors,omitempty\"`\n\t}\n\n\tfor _, daErr := range errs {\n\t\tvar err Error\n\n\t\tswitch daErr := daErr.(type) {\n\t\tcase ErrorCode:\n\t\t\terr = daErr.WithDetail(nil)\n\t\tcase Error:\n\t\t\terr = daErr\n\t\tdefault:\n\t\t\terr = ErrorCodeUnknown.WithDetail(daErr)\n\n\t\t}\n\n\t\t\/\/ If the Error struct was setup and they forgot to set the\n\t\t\/\/ Message field (meaning its \"\") then grab it from the ErrCode\n\t\tmsg := err.Message\n\t\tif msg == \"\" {\n\t\t\tmsg = err.Code.Message()\n\t\t}\n\n\t\ttmpErrs.Errors = append(tmpErrs.Errors, Error{\n\t\t\tCode: err.Code,\n\t\t\tMessage: msg,\n\t\t\tDetail: err.Detail,\n\t\t})\n\t}\n\n\treturn json.Marshal(tmpErrs)\n}\n\n\/\/ UnmarshalJSON deserializes []Error and then converts it into slice of\n\/\/ Error or ErrorCode\nfunc (errs *Errors) UnmarshalJSON(data []byte) error {\n\tvar tmpErrs struct {\n\t\tErrors []Error\n\t}\n\n\tif err := json.Unmarshal(data, &tmpErrs); err != nil {\n\t\treturn err\n\t}\n\n\tvar newErrs Errors\n\tfor _, daErr := range tmpErrs.Errors {\n\t\t\/\/ If Message is empty or exactly matches the Code's message string\n\t\t\/\/ then just use the Code, no need for a full Error struct\n\t\tif daErr.Detail == nil && (daErr.Message == \"\" || daErr.Message == daErr.Code.Message()) {\n\t\t\t\/\/ Error's w\/o details get converted to ErrorCode\n\t\t\tnewErrs = append(newErrs, daErr.Code)\n\t\t} else {\n\t\t\t\/\/ Error's w\/ details are untouched\n\t\t\tnewErrs = append(newErrs, Error{\n\t\t\t\tCode: daErr.Code,\n\t\t\t\tMessage: daErr.Message,\n\t\t\t\tDetail: daErr.Detail,\n\t\t\t})\n\t\t}\n\t}\n\n\t*errs = newErrs\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage fs\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com\/jacobsa\/fuse\"\n\t\"github.com\/jacobsa\/fuse\/fuseutil\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/syncutil\"\n\t\"github.com\/jacobsa\/gcsfuse\/fs\/inode\"\n\t\"github.com\/jacobsa\/gcsfuse\/timeutil\"\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype fileSystem struct {\n\tfuseutil.NotImplementedFileSystem\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Dependencies\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tclock timeutil.Clock\n\tbucket gcs.Bucket\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ When acquiring this lock, the caller must hold no inode or dir handle\n\t\/\/ locks.\n\tmu syncutil.InvariantMutex\n\n\t\/\/ The collection of live inodes, keyed by inode ID. No ID less than\n\t\/\/ fuse.RootInodeID is ever used.\n\t\/\/\n\t\/\/ INVARIANT: All values are of type *inode.DirInode or *inode.FileInode\n\t\/\/ INVARIANT: For all keys k, k >= fuse.RootInodeID\n\t\/\/ INVARIANT: inodes[fuse.RootInodeID] is of type *inode.DirInode\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tinodes map[fuse.InodeID]interface{}\n\n\t\/\/ The next inode ID to hand out. We assume that this will never overflow,\n\t\/\/ since even if we were handing out inode IDs at 4 GHz, it would still take\n\t\/\/ over a century to do so.\n\t\/\/\n\t\/\/ INVARIANT: For all keys k in inodes, k < nextInodeID\n\tnextInodeID fuse.InodeID\n\n\t\/\/ The collection of live handles, keyed by handle ID.\n\t\/\/\n\t\/\/ INVARIANT: All values are of type *dirHandle\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\thandles map[fuse.HandleID]interface{}\n\n\t\/\/ The next handle ID to hand out. We assume that this will never overflow.\n\t\/\/\n\t\/\/ INVARIANT: For all keys k in handles, k < nextHandleID\n\tnextHandleID fuse.HandleID\n}\n\n\/\/ Create a fuse file system whose root directory is the root of the supplied\n\/\/ bucket. The supplied clock will be used for cache invalidation, modification\n\/\/ times, etc.\nfunc NewFileSystem(\n\tclock timeutil.Clock,\n\tbucket gcs.Bucket) (ffs fuse.FileSystem, err error) {\n\t\/\/ Set up the basic struct.\n\tfs := &fileSystem{\n\t\tclock: clock,\n\t\tbucket: bucket,\n\t\tinodes: make(map[fuse.InodeID]interface{}),\n\t\tnextInodeID: fuse.RootInodeID + 1,\n\t}\n\n\t\/\/ Set up the root inode.\n\tfs.inodes[fuse.RootInodeID] = inode.NewDirInode(bucket, \"\")\n\n\t\/\/ Set up invariant checking.\n\tfs.mu = syncutil.NewInvariantMutex(fs.checkInvariants)\n\n\tffs = fs\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (fs *fileSystem) checkInvariants() {\n\t\/\/ Check inode keys.\n\tfor id, _ := range fs.inodes {\n\t\tif id < fuse.RootInodeID || id >= fs.nextInodeID {\n\t\t\tpanic(fmt.Sprintf(\"Illegal inode ID: %v\", id))\n\t\t}\n\t}\n\n\t\/\/ Check the root inode.\n\t_ = fs.inodes[fuse.RootInodeID].(*inode.DirInode)\n\n\t\/\/ Check the type of each inode.\n\tfor _, in := range fs.inodes {\n\t\tswitch in.(type) {\n\t\tcase *inode.DirInode:\n\t\tcase *inode.FileInode:\n\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"Unexpected inode type: %v\", reflect.TypeOf(in)))\n\t\t}\n\t}\n\n\t\/\/ Check handles.\n\tfor id, h := range fs.handles {\n\t\tif id >= fs.nextHandleID {\n\t\t\tpanic(fmt.Sprintf(\"Illegal handle ID: %v\", id))\n\t\t}\n\n\t\t_ = h.(*dirHandle)\n\t}\n}\n\n\/\/ Find the given inode and return it with its lock held for reading. Panic if\n\/\/ it doesn't exist or is the wrong type.\n\/\/\n\/\/ SHARED_LOCKS_REQUIRED(fs.mu)\n\/\/ SHARED_LOCK_FUNCTION(inode.mu)\nfunc (fs *fileSystem) getDirForReadingOrDie(\n\tid fuse.InodeID) (in *inode.DirInode) {\n\tin = fs.inodes[id].(*inode.DirInode)\n\tin.Mu.RLock()\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ fuse.FileSystem methods\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (fs *fileSystem) Init(\n\tctx context.Context,\n\treq *fuse.InitRequest) (resp *fuse.InitResponse, err error) {\n\t\/\/ Nothing interesting to do.\n\tresp = &fuse.InitResponse{}\n\treturn\n}\n\nfunc (fs *fileSystem) OpenDir(\n\tctx context.Context,\n\treq *fuse.OpenDirRequest) (resp *fuse.OpenDirResponse, err error) {\n\tresp = &fuse.OpenDirResponse{}\n\n\tfs.mu.RLock()\n\tdefer fs.mu.RUnlock()\n\n\t\/\/ Make sure the inode still exists and is a directory. If not, something has\n\t\/\/ screwed up because the VFS layer shouldn't have let us forget the inode\n\t\/\/ before opening it.\n\tin := fs.getDirForReadingOrDie(req.Inode)\n\tdefer in.Mu.RUnlock()\n\n\treturn\n}\n<commit_msg>Allocate directory handles.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage fs\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com\/jacobsa\/fuse\"\n\t\"github.com\/jacobsa\/fuse\/fuseutil\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/syncutil\"\n\t\"github.com\/jacobsa\/gcsfuse\/fs\/inode\"\n\t\"github.com\/jacobsa\/gcsfuse\/timeutil\"\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype fileSystem struct {\n\tfuseutil.NotImplementedFileSystem\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Dependencies\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tclock timeutil.Clock\n\tbucket gcs.Bucket\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ When acquiring this lock, the caller must hold no inode or dir handle\n\t\/\/ locks.\n\tmu syncutil.InvariantMutex\n\n\t\/\/ The collection of live inodes, keyed by inode ID. No ID less than\n\t\/\/ fuse.RootInodeID is ever used.\n\t\/\/\n\t\/\/ INVARIANT: All values are of type *inode.DirInode or *inode.FileInode\n\t\/\/ INVARIANT: For all keys k, k >= fuse.RootInodeID\n\t\/\/ INVARIANT: inodes[fuse.RootInodeID] is of type *inode.DirInode\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tinodes map[fuse.InodeID]interface{}\n\n\t\/\/ The next inode ID to hand out. We assume that this will never overflow,\n\t\/\/ since even if we were handing out inode IDs at 4 GHz, it would still take\n\t\/\/ over a century to do so.\n\t\/\/\n\t\/\/ INVARIANT: For all keys k in inodes, k < nextInodeID\n\tnextInodeID fuse.InodeID\n\n\t\/\/ The collection of live handles, keyed by handle ID.\n\t\/\/\n\t\/\/ INVARIANT: All values are of type *dirHandle\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\thandles map[fuse.HandleID]interface{}\n\n\t\/\/ The next handle ID to hand out. We assume that this will never overflow.\n\t\/\/\n\t\/\/ INVARIANT: For all keys k in handles, k < nextHandleID\n\tnextHandleID fuse.HandleID\n}\n\n\/\/ Create a fuse file system whose root directory is the root of the supplied\n\/\/ bucket. The supplied clock will be used for cache invalidation, modification\n\/\/ times, etc.\nfunc NewFileSystem(\n\tclock timeutil.Clock,\n\tbucket gcs.Bucket) (ffs fuse.FileSystem, err error) {\n\t\/\/ Set up the basic struct.\n\tfs := &fileSystem{\n\t\tclock: clock,\n\t\tbucket: bucket,\n\t\tinodes: make(map[fuse.InodeID]interface{}),\n\t\tnextInodeID: fuse.RootInodeID + 1,\n\t}\n\n\t\/\/ Set up the root inode.\n\tfs.inodes[fuse.RootInodeID] = inode.NewDirInode(bucket, \"\")\n\n\t\/\/ Set up invariant checking.\n\tfs.mu = syncutil.NewInvariantMutex(fs.checkInvariants)\n\n\tffs = fs\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (fs *fileSystem) checkInvariants() {\n\t\/\/ Check inode keys.\n\tfor id, _ := range fs.inodes {\n\t\tif id < fuse.RootInodeID || id >= fs.nextInodeID {\n\t\t\tpanic(fmt.Sprintf(\"Illegal inode ID: %v\", id))\n\t\t}\n\t}\n\n\t\/\/ Check the root inode.\n\t_ = fs.inodes[fuse.RootInodeID].(*inode.DirInode)\n\n\t\/\/ Check the type of each inode.\n\tfor _, in := range fs.inodes {\n\t\tswitch in.(type) {\n\t\tcase *inode.DirInode:\n\t\tcase *inode.FileInode:\n\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"Unexpected inode type: %v\", reflect.TypeOf(in)))\n\t\t}\n\t}\n\n\t\/\/ Check handles.\n\tfor id, h := range fs.handles {\n\t\tif id >= fs.nextHandleID {\n\t\t\tpanic(fmt.Sprintf(\"Illegal handle ID: %v\", id))\n\t\t}\n\n\t\t_ = h.(*dirHandle)\n\t}\n}\n\n\/\/ Find the given inode and return it with its lock held for reading. Panic if\n\/\/ it doesn't exist or is the wrong type.\n\/\/\n\/\/ SHARED_LOCKS_REQUIRED(fs.mu)\n\/\/ SHARED_LOCK_FUNCTION(inode.mu)\nfunc (fs *fileSystem) getDirForReadingOrDie(\n\tid fuse.InodeID) (in *inode.DirInode) {\n\tin = fs.inodes[id].(*inode.DirInode)\n\tin.Mu.RLock()\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ fuse.FileSystem methods\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (fs *fileSystem) Init(\n\tctx context.Context,\n\treq *fuse.InitRequest) (resp *fuse.InitResponse, err error) {\n\t\/\/ Nothing interesting to do.\n\tresp = &fuse.InitResponse{}\n\treturn\n}\n\nfunc (fs *fileSystem) OpenDir(\n\tctx context.Context,\n\treq *fuse.OpenDirRequest) (resp *fuse.OpenDirResponse, err error) {\n\tresp = &fuse.OpenDirResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Make sure the inode still exists and is a directory. If not, something has\n\t\/\/ screwed up because the VFS layer shouldn't have let us forget the inode\n\t\/\/ before opening it.\n\tin := fs.getDirForReadingOrDie(req.Inode)\n\tdefer in.Mu.RUnlock()\n\n\t\/\/ Allocate a handle.\n\thandleID := fs.nextHandleID\n\tfs.nextHandleID++\n\n\tfs.handles[handleID] = newDirHandle(in)\n\tresp.Handle = handleID\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/xyproto\/browserspeak\"\n\t\"github.com\/xyproto\/genericsite\"\n\t\"github.com\/xyproto\/siteengines\"\n\t\"github.com\/xyproto\/web\"\n)\n\n\/\/ TODO: Norwegian everywhere\n\/\/ TODO: Different Redis database than the other sites\n\nconst JQUERY_VERSION = \"2.0.0\"\n\nfunc notFound2(ctx *web.Context, val string) {\n\tctx.ResponseWriter.WriteHeader(404)\n\tctx.ResponseWriter.Write([]byte(browserspeak.NotFound(ctx, val)))\n}\n\nfunc ServeEngines(userState *genericsite.UserState, mainMenuEntries genericsite.MenuEntries) {\n\t\/\/ The user engine\n\tuserEngine := siteengines.NewUserEngine(userState)\n\tuserEngine.ServePages(\"ftls2.roboticoverlords.org\")\n\n\t\/\/ The admin engine\n\tadminEngine := siteengines.NewAdminEngine(userState)\n\tadminEngine.ServePages(FTLSBaseCP, mainMenuEntries)\n\n\t\/\/ Wiki engine\n\twikiEngine := siteengines.NewWikiEngine(userState)\n\twikiEngine.ServePages(FTLSBaseCP, mainMenuEntries)\n}\n\n\/\/ TODO: Separate database for each site\nfunc main() {\n\n\t\/\/ UserState with a Redis Connection Pool\n\tuserState := genericsite.NewUserState()\n\tdefer userState.Close()\n\n\t\/\/ The archlinux.no webpage,\n\tmainMenuEntries := ServeFTLS(userState, \"\/js\/jquery-\"+JQUERY_VERSION+\".min.js\")\n\n\tServeEngines(userState, mainMenuEntries)\n\n\t\/\/ Compilation errors, vim-compatible filename\n\tweb.Get(\"\/error\", browserspeak.GenerateErrorHandle(\"errors.err\"))\n\tweb.Get(\"\/errors\", browserspeak.GenerateErrorHandle(\"errors.err\"))\n\n\t\/\/ Various .php and .asp urls that showed up in the log\n\tgenericsite.ServeForFun()\n\n\t\/\/ TODO: Incorporate this check into web.go, to only return\n\t\/\/ stuff in the header when the HEAD method is requested:\n\t\/\/ if ctx.Request.Method == \"HEAD\" { return }\n\t\/\/ See also: curl -I\n\n\t\/\/ Serve on port 3002 for the Nginx instance to use\n\tweb.Run(\"0.0.0.0:3002\")\n}\n<commit_msg>Enabled the FTLS engine<commit_after>package main\n\nimport (\n\t\"github.com\/xyproto\/browserspeak\"\n\t\"github.com\/xyproto\/genericsite\"\n\t\"github.com\/xyproto\/siteengines\"\n\t\"github.com\/xyproto\/web\"\n)\n\n\/\/ TODO: Norwegian everywhere\n\/\/ TODO: Different Redis database than the other sites\n\nconst JQUERY_VERSION = \"2.0.0\"\n\nfunc notFound2(ctx *web.Context, val string) {\n\tctx.ResponseWriter.WriteHeader(404)\n\tctx.ResponseWriter.Write([]byte(browserspeak.NotFound(ctx, val)))\n}\n\nfunc ServeEngines(userState *genericsite.UserState, mainMenuEntries genericsite.MenuEntries) {\n\t\/\/ The user engine\n\tuserEngine := siteengines.NewUserEngine(userState)\n\tuserEngine.ServePages(\"ftls2.roboticoverlords.org\")\n\n\t\/\/ The admin engine\n\tadminEngine := siteengines.NewAdminEngine(userState)\n\tadminEngine.ServePages(FTLSBaseCP, mainMenuEntries)\n\n\t\/\/ Wiki engine\n\twikiEngine := siteengines.NewWikiEngine(userState)\n\twikiEngine.ServePages(FTLSBaseCP, mainMenuEntries)\n\n\t\/\/ FTLS engine\n\tftlsEngine := siteengines.NewFTLSEngine(userState)\n\tftlsEngine.ServePages(FTLSBaseCP, mainMenuEntries)\n}\n\nfunc main() {\n\n\t\/\/ UserState with a Redis Connection Pool\n\tuserState := genericsite.NewUserState()\n\tdefer userState.Close()\n\n\t\/\/ The archlinux.no webpage,\n\tmainMenuEntries := ServeFTLS(userState, \"\/js\/jquery-\"+JQUERY_VERSION+\".min.js\")\n\n\tServeEngines(userState, mainMenuEntries)\n\n\t\/\/ Compilation errors, vim-compatible filename\n\tweb.Get(\"\/error\", browserspeak.GenerateErrorHandle(\"errors.err\"))\n\tweb.Get(\"\/errors\", browserspeak.GenerateErrorHandle(\"errors.err\"))\n\n\t\/\/ Various .php and .asp urls that showed up in the log\n\tgenericsite.ServeForFun()\n\n\t\/\/ TODO: Incorporate this check into web.go, to only return\n\t\/\/ stuff in the header when the HEAD method is requested:\n\t\/\/ if ctx.Request.Method == \"HEAD\" { return }\n\t\/\/ See also: curl -I\n\n\t\/\/ Serve on port 3002 for the Nginx instance to use\n\tweb.Run(\"0.0.0.0:3002\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage transl translates struct fields and store translations\nin the same struct.\n*\/\npackage transl\n\nimport (\n\t\"bytes\"\n\t\"database\/sql\/driver\"\n\t\"encoding\/json\"\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/text\/language\"\n\t\"reflect\"\n\t\"sync\"\n)\n\nvar defaultLanguageString = \"en\"\nvar defaultLanguageTag = language.English\n\n\/\/ SetDefaults redefines default language string and tag\nfunc SetDefaults(str string, tag language.Tag) {\n\tdefaultLanguageString = str\n\tdefaultLanguageTag = tag\n}\n\n\/\/ StringTable is a type for struct field to hold translations\n\/\/ e.g. Translations{\"en\": map[string]string{\"name\": \"John\"}}\ntype StringTable map[string]map[string]string\n\n\/\/ Scan unmarshals translations from JSON\nfunc (m *StringTable) Scan(value interface{}) error {\n\treturn json.Unmarshal(value.([]byte), m)\n}\n\n\/\/ Value marshals translations to JSON\nfunc (m StringTable) Value() (driver.Value, error) {\n\tb, err := json.Marshal(m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn string(b), nil\n}\n\n\/\/ Translate fills fields of `target` struct with translated values\n\/\/\nfunc Translate(ctx context.Context, target interface{}) {\n\tmeta := metas.getStructMeta(target)\n\tif !meta.valid {\n\t\treturn\n\t}\n\n\tstructValue := reflect.Indirect(reflect.ValueOf(target))\n\n\ttranslations, ok := structValue.Field(meta.trIndex).Interface().(StringTable)\n\tif !ok || len(translations) == 0 {\n\t\treturn\n\t}\n\n\ttargetLanguages, ok := AcceptedLanguagesFromContext(ctx)\n\tif !ok || len(targetLanguages) == 0 {\n\t\ttargetLanguages = []language.Tag{defaultLanguageTag}\n\t}\n\n\tfor _, trF := range meta.fields {\n\t\tf := structValue.FieldByName(trF.name)\n\t\tif f.IsValid() && f.CanSet() && f.Kind() == reflect.String {\n\t\t\ttranslateField(f, trF.key, translations, targetLanguages)\n\t\t}\n\t}\n}\n\nfunc translateField(field reflect.Value, fieldName string, translations StringTable, targetLanguages []language.Tag) {\n\tmatcher := getMatcher(fieldName, translations)\n\teffectiveLang, _, _ := matcher.Match(targetLanguages...)\n\tfield.SetString(translations[effectiveLang.String()][fieldName])\n}\n\nvar matchers = map[string]language.Matcher{}\nvar matchersMutex sync.RWMutex\n\nfunc getMatcher(fieldName string, translations StringTable) language.Matcher {\n\tvar langsKeyBuffer bytes.Buffer\n\n\t\/\/ Build languages string key\n\tdefaultFound := false\n\tv, ok := translations[defaultLanguageString]\n\tif ok {\n\t\t_, ok = v[fieldName]\n\t\tif ok {\n\t\t\tlangsKeyBuffer.WriteString(defaultLanguageString)\n\t\t}\n\t}\n\n\tfor lang, tr := range translations {\n\t\t_, ok := tr[fieldName]\n\n\t\tif ok {\n\t\t\tif lang == defaultLanguageString {\n\t\t\t\tdefaultFound = true\n\t\t\t} else {\n\t\t\t\tlangsKeyBuffer.WriteString(lang)\n\t\t\t}\n\t\t}\n\t}\n\tlangsKey := langsKeyBuffer.String()\n\n\t\/\/ Return cached matcher for that string key if it's set\n\tmatchersMutex.RLock()\n\tmatcher, ok := matchers[langsKey]\n\tmatchersMutex.RUnlock()\n\n\tif ok {\n\t\treturn matcher\n\t}\n\n\t\/\/ Cache missed. Lets create matcher and add it to cache\n\tvar langs []language.Tag\n\n\tif defaultFound {\n\t\tlangs = []language.Tag{defaultLanguageTag}\n\t} else {\n\t\tlangs = []language.Tag{}\n\t}\n\n\tfor lang, tr := range translations {\n\t\t_, ok = tr[fieldName]\n\t\tif ok {\n\t\t\t\/\/ default language already in slice if needed\n\t\t\tif lang != defaultLanguageString {\n\t\t\t\tlangs = append(langs, *getTagByString(lang))\n\t\t\t}\n\t\t}\n\t}\n\n\tmatcher = language.NewMatcher(langs)\n\n\tmatchersMutex.Lock()\n\tmatchers[langsKey] = matcher\n\tmatchersMutex.Unlock()\n\n\treturn matcher\n}\n\nvar tags = map[string]language.Tag{}\nvar tagsMutex sync.RWMutex\n\nfunc getTagByString(s string) *language.Tag {\n\ttagsMutex.RLock()\n\ttag, ok := tags[s]\n\ttagsMutex.RUnlock()\n\n\tif ok {\n\t\treturn &tag\n\t}\n\n\ttag = language.Make(s)\n\n\ttagsMutex.Lock()\n\ttags[s] = tag\n\ttagsMutex.Unlock()\n\n\treturn &tag\n}\n<commit_msg>Use [n]string as key in map. -50% mem & -10% time<commit_after>\/*\nPackage transl translates struct fields and store translations\nin the same struct.\n*\/\npackage transl\n\nimport (\n\t\"database\/sql\/driver\"\n\t\"encoding\/json\"\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/text\/language\"\n\t\"reflect\"\n\t\"sync\"\n)\n\nvar defaultLanguageString = \"en\"\nvar defaultLanguageTag = language.English\n\n\/\/ SetDefaults redefines default language string and tag\nfunc SetDefaults(str string, tag language.Tag) {\n\tdefaultLanguageString = str\n\tdefaultLanguageTag = tag\n}\n\n\/\/ StringTable is a type for struct field to hold translations\n\/\/ e.g. Translations{\"en\": map[string]string{\"name\": \"John\"}}\ntype StringTable map[string]map[string]string\n\n\/\/ Scan unmarshals translations from JSON\nfunc (m *StringTable) Scan(value interface{}) error {\n\treturn json.Unmarshal(value.([]byte), m)\n}\n\n\/\/ Value marshals translations to JSON\nfunc (m StringTable) Value() (driver.Value, error) {\n\tb, err := json.Marshal(m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn string(b), nil\n}\n\n\/\/ Translate fills fields of `target` struct with translated values\n\/\/\nfunc Translate(ctx context.Context, target interface{}) {\n\tmeta := metas.getStructMeta(target)\n\tif !meta.valid {\n\t\treturn\n\t}\n\n\tstructValue := reflect.Indirect(reflect.ValueOf(target))\n\n\ttranslations, ok := structValue.Field(meta.trIndex).Interface().(StringTable)\n\tif !ok || len(translations) == 0 {\n\t\treturn\n\t}\n\n\ttargetLanguages, ok := AcceptedLanguagesFromContext(ctx)\n\tif !ok || len(targetLanguages) == 0 {\n\t\ttargetLanguages = []language.Tag{defaultLanguageTag}\n\t}\n\n\tfor _, trF := range meta.fields {\n\t\tf := structValue.FieldByName(trF.name)\n\t\tif f.IsValid() && f.CanSet() && f.Kind() == reflect.String {\n\t\t\ttranslateField(f, trF.key, translations, targetLanguages)\n\t\t}\n\t}\n}\n\nfunc translateField(field reflect.Value, fieldName string, translations StringTable, targetLanguages []language.Tag) {\n\tmatcher := getMatcher(fieldName, translations)\n\teffectiveLang, _, _ := matcher.Match(targetLanguages...)\n\tfield.SetString(translations[effectiveLang.String()][fieldName])\n}\n\nconst maxLangs = int(10)\n\nvar matchers = map[[maxLangs]string]language.Matcher{}\nvar matchersMutex sync.RWMutex\n\nfunc getMatcher(fieldName string, translations StringTable) language.Matcher {\n\tvar langsKey [maxLangs]string\n\tvar i int\n\n\t\/\/ Build languages string key\n\tdefaultFound := false\n\tv, ok := translations[defaultLanguageString]\n\tif ok {\n\t\t_, ok = v[fieldName]\n\t\tif ok {\n\t\t\tlangsKey[i] = defaultLanguageString\n\t\t\ti++\n\t\t}\n\t}\n\n\tfor lang, tr := range translations {\n\t\t_, ok = tr[fieldName]\n\n\t\tif ok {\n\t\t\tif lang == defaultLanguageString {\n\t\t\t\tdefaultFound = true\n\t\t\t} else {\n\t\t\t\tlangsKey[i] = lang\n\t\t\t\ti++\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Return cached matcher for that string key if it's set\n\tmatchersMutex.RLock()\n\tmatcher, ok := matchers[langsKey]\n\tmatchersMutex.RUnlock()\n\n\tif ok {\n\t\treturn matcher\n\t}\n\n\t\/\/ Cache missed. Lets create matcher and add it to cache\n\tvar langs []language.Tag\n\n\tif defaultFound {\n\t\tlangs = []language.Tag{defaultLanguageTag}\n\t} else {\n\t\tlangs = []language.Tag{}\n\t}\n\n\tfor lang, tr := range translations {\n\t\t_, ok = tr[fieldName]\n\t\tif ok {\n\t\t\t\/\/ default language already in slice if needed\n\t\t\tif lang != defaultLanguageString {\n\t\t\t\tlangs = append(langs, *getTagByString(lang))\n\t\t\t}\n\t\t}\n\t}\n\n\tmatcher = language.NewMatcher(langs)\n\n\tmatchersMutex.Lock()\n\tmatchers[langsKey] = matcher\n\tmatchersMutex.Unlock()\n\n\treturn matcher\n}\n\nvar tags = map[string]language.Tag{}\nvar tagsMutex sync.RWMutex\n\nfunc getTagByString(s string) *language.Tag {\n\ttagsMutex.RLock()\n\ttag, ok := tags[s]\n\ttagsMutex.RUnlock()\n\n\tif ok {\n\t\treturn &tag\n\t}\n\n\ttag = language.Make(s)\n\n\ttagsMutex.Lock()\n\ttags[s] = tag\n\ttagsMutex.Unlock()\n\n\treturn &tag\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"net\"\n)\n\nfunc Serve(addr string) {\n\tln, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\t\/\/ handle error\n\t}\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\t\/\/ handle error\n\t\t\tcontinue\n\t\t}\n\n\t\tgo handleConnection(conn)\n\t}\n}\n\nfunc handleConnection(conn net.Conn) {\n\tb := bufio.NewReader(conn)\n\n\tfmt.Fprintf(conn, `<?xml version='1.0'?>\n\t\t<stream:stream\n\t\t\tfrom='localhost'\n\t\t\tid='abc123'\n\t\t\tto='djworth@localhost'\n\t\t\tversion='1.0'\n\t\t\txml:lang='en'\n\t\t\txmlns='jabber:server'\n\t\t\txmlns:stream='http:\/\/etherx.jabber.org\/streams'>`)\n\n\tfmt.Fprintf(conn, \"<stream:features\/>\")\n\tfor {\n\t\tline, _, err := b.ReadLine()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tfmt.Println(string(line))\n\t}\n\n}\n<commit_msg>Working on login<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"net\"\n \"io\"\n)\n\ntype Stream struct {\n XMLName xml.Name `xml:\"stream:stream\"`\n Iq\n}\n\ntype Query struct {\n XMLName xml.Name `xml:\"query\"`\n Xmlns string `xml:\"xmlns,attr\"`\n Username string `xml:\"username\"`\n Password string `xml:\"password\"`\n Digest string `xml:\"digest\"`\n Resource string `xml:\"resource\"`\n}\n\n\ntype Iq struct {\n XMLName xml.Name `xml:\"iq\"`\n\tType string `xml:\"type,attr\"`\n\tId string `xml:\"id,attr\"`\n Query\n}\n\n\ntype LoginReq struct {\n\t\/\/<?xml version='1.0' ?><stream:stream to='localhost' xmlns='jabber:client'\n\t\/\/xmlns:stream='http:\/\/etherx.jabber.org\/streams' version='1.0'>\n\t\/\/<iq type='get' id='purple3d9c697f'><query xmlns='jabber:iq:auth'>\n\t\/\/<username>james<\/username><\/query><\/iq><\/stream:stream>\n\tIq Iq `xml:\"iq\"`\n}\n\n\nfunc Serve(addr string) {\n\tln, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\t\/\/ handle error\n\t}\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\t\/\/ handle error\n\t\t\tcontinue\n\t\t}\n\t\tgo handleConnection(conn)\n\t}\n}\n\nfunc handleLogin(loginRequest string) (x string, e error) {\n\n\tv := LoginReq{}\n\n\terr := xml.Unmarshal([]byte(loginRequest), &v)\n\n\tif err != nil {\n\t\tfmt.Printf(\"err: %v\", err)\n\t\treturn \"\", err\n\t}\n\tfmt.Printf(\"Id: %v\\n\", v.Iq.Id)\n return v.Iq.Id, err\n}\n\nfunc handleConnection(conn net.Conn) {\n\tfmt.Printf(\"Handling connection....\")\n\n\tb := bufio.NewReader(conn)\n\n\tfmt.Fprintf(conn, `<?xml version='1.0'?>\n\t\t<stream:stream\n\t\t\tfrom='localhost'\n\t\t\tid='abc123'\n\t\t\tto='james@localhost'\n\t\t\tversion='1.0'\n\t\t\txml:lang='en'\n\t\t\txmlns='jabber:server'\n\t\t\txmlns:stream='http:\/\/etherx.jabber.org\/streams'>`)\n\n\tfmt.Fprintf(conn, \"<stream:features\/>\")\n\tfor {\n\t\tline, _, err := b.ReadLine()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tloginId, err := handleLogin(string(line))\n fmt.Println(loginId)\n\t\tfmt.Println(string(line))\n r := &Stream{}\n r.Iq = Iq{Id: loginId, Type: \"result\"}\n r.Iq.Query = Query{Xmlns: \"jabber:iq:auth\"}\n output, err := xml.Marshal(r)\n fmt.Println(string(output))\n \n \n line, _, err = b.ReadLine()\n loginId2, err := handleLogin(string(line))\n i := &Stream{}\n i.Iq = Iq{Id: loginId2, Type: \"result\"}\n output2, err := xml.Marshal(i)\n fmt.Println(string(output2))\n \/\/fmt.Fprintf(conn, string(output))\n\t}\n\n\n}\n<|endoftext|>"} {"text":"<commit_before>package ga\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\n\tserver \"github.com\/sunfish-shogi\/sunfish4-ga\/shogiserver\"\n)\n\ntype GAManager struct {\n\tConfig Config\n\n\tserver server.ShogiServer\n\tlastID int\n\tinds []*individual\n}\n\nfunc NewGAManager(config Config) *GAManager {\n\treturn &GAManager{\n\t\tConfig: config,\n\t}\n}\n\nfunc (ga *GAManager) Run() error {\n\tdefer ga.Destroy()\n\n\terr := ga.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor gn := 1; ; gn++ {\n\t\tga.PrintGeneration(gn)\n\n\t\ttime.Sleep(time.Hour * 4)\n\n\t\terr = ga.Next()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n}\n\nfunc (ga *GAManager) Start() error {\n\terr := ga.server.Setup()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tga.inds = make([]*individual, 0, ga.Config.NumberOfIndividual)\n\tga.lastID = 0\n\tfor i := 0; i < ga.Config.NumberOfIndividual; i++ {\n\t\tind := newIndividual(ga.nextID(), ga.Config)\n\t\tif i == 0 {\n\t\t\tind.initParamForFirstElite()\n\t\t} else {\n\t\t\tind.initParamByRandom()\n\t\t}\n\t\tga.inds = append(ga.inds, ind)\n\t}\n\n\terrs := ga.startIndividuals()\n\tif len(errs) != 0 {\n\t\treturn errs[0]\n\t}\n\treturn nil\n}\n\ntype indsDescScoreOrder []*individual\n\nfunc (inds indsDescScoreOrder) Len() int { return len(inds) }\nfunc (inds indsDescScoreOrder) Swap(i, j int) { inds[i], inds[j] = inds[j], inds[i] }\nfunc (inds indsDescScoreOrder) Less(i, j int) bool { return inds[i].score > inds[j].score }\n\nfunc (ga *GAManager) Next() error {\n\trate, err := ga.server.MakeRate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor pi := range rate.Players {\n\t\tfor _, player := range rate.Players[pi] {\n\t\t\tif player.Win+player.Loss < 100 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor i := range ga.inds {\n\t\t\t\tif ga.inds[i].id == player.Name {\n\t\t\t\t\tga.inds[i].score = player.Rate\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Sort by Score\n\tif ga.inds[0].score != 0 {\n\t\tsort.Stable(indsDescScoreOrder(ga.inds))\n\t} else {\n\t\tsort.Stable(indsDescScoreOrder(ga.inds[1:]))\n\t}\n\n\t\/\/ Print Scores\n\tlog.Println(\"Score\")\n\tfor i := range ga.inds {\n\t\tlog.Printf(\"%s %0.3f\", ga.inds[i].id, ga.inds[i].score)\n\t}\n\tlog.Println()\n\n\t\/\/ New Generation\n\tinds := make([]*individual, 0, ga.Config.NumberOfIndividual)\n\n\t\/\/ Elitism\n\tfor i := 0; i < 2; i++ {\n\t\tind := ga.copyElite(i)\n\t\tlog.Printf(\"elite: %s => %s\", ga.inds[i].id, ind.id)\n\t\tinds = append(inds, ind)\n\t}\n\n\tfor {\n\t\tif len(inds) >= ga.Config.NumberOfIndividual {\n\t\t\tbreak\n\t\t}\n\n\t\ti1 := ga.selectIndividual(\"\")\n\t\ti2 := ga.selectIndividual(i1.id)\n\n\t\tind := ga.crossover(i1, i2)\n\t\tlog.Printf(\"crossover: %s x %s => %s\", i1.id, i2.id, ind.id)\n\n\t\tif rand.Intn(20) < 1 \/* 1\/20 *\/ {\n\t\t\tga.mutate(ind)\n\t\t\tlog.Printf(\"mutate: %s\", ind.id)\n\t\t}\n\n\t\tinds = append(inds, ind)\n\t}\n\tlog.Println()\n\n\t\/\/ Stop Previous Generation\n\tfor i := range ga.inds {\n\t\tga.inds[i].stop()\n\t}\n\n\t\/\/ Replace to New Generation\n\tga.inds = inds\n\n\tga.startIndividuals()\n\n\treturn nil\n}\n\nfunc (ga *GAManager) startIndividuals() []error {\n\tvar errs []error\n\n\t\/\/ Setup\n\tvar wg sync.WaitGroup\n\tfor _, ind := range ga.inds {\n\t\twg.Add(1)\n\t\tgo func(ind *individual) {\n\t\t\tdefer wg.Done()\n\t\t\terr := ind.setup()\n\t\t\tif err != nil {\n\t\t\t\terr = errors.Wrap(err, fmt.Sprintf(\"failed to setup sunfish %s\", ind.id))\n\t\t\t\terrs = append(errs, err)\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}(ind)\n\t}\n\twg.Wait()\n\n\t\/\/ Start\n\tfor _, ind := range ga.inds {\n\t\terr := ind.start()\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, fmt.Sprintf(\"failed to start sunfish %s\", ind.id))\n\t\t\terrs = append(errs, err)\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n\n\treturn errs\n}\n\nfunc (ga *GAManager) copyElite(idx int) *individual {\n\tind := newIndividual(ga.nextID(), ga.Config)\n\tind.initParam(ga.inds[idx].values)\n\treturn ind\n}\n\nfunc (ga *GAManager) selectIndividual(excludeID string) *individual {\n\tweight := make([]int, len(ga.inds))\n\tvar sum int\n\tfor i := range weight {\n\t\tif ga.inds[i].id != excludeID {\n\t\t\tif i == 0 {\n\t\t\t\tweight[0] = 1024\n\t\t\t} else {\n\t\t\t\tweight[i] = weight[i-1]*4\/5 + 1\n\t\t\t}\n\t\t\tsum += weight[i]\n\t\t}\n\t}\n\n\tr := rand.Intn(sum)\n\n\tfor i := range weight {\n\t\tr -= weight[i]\n\t\tif r <= 0 {\n\t\t\treturn ga.inds[i]\n\t\t}\n\t}\n\treturn ga.inds[0]\n}\n\nfunc (ga *GAManager) crossover(i1, i2 *individual) *individual {\n\tind := newIndividual(ga.nextID(), ga.Config)\n\tvalues := make([]int32, len(ga.Config.Params))\n\tfor i := range values {\n\t\tif rand.Intn(2) == 0 {\n\t\t\tvalues[i] = i1.values[i]\n\t\t} else {\n\t\t\tvalues[i] = i2.values[i]\n\t\t}\n\t}\n\tind.initParam(values)\n\treturn ind\n}\n\nfunc (ga *GAManager) mutate(ind *individual) {\n\tn := rand.Intn(2) + 1\n\tfor ; n > 0; n-- {\n\t\ti := rand.Intn(len(ind.values))\n\t\tmin := ga.Config.Params[i].MinimumValue\n\t\tmax := ga.Config.Params[i].MaximumValue\n\t\tt := min + rand.Int31n(max-min+1)\n\t\tind.values[i] = (ind.values[i] + t) \/ 2\n\t}\n}\n\nfunc (ga *GAManager) nextID() string {\n\tga.lastID++\n\treturn strconv.Itoa(ga.lastID)\n}\n\nfunc (ga *GAManager) PrintGeneration(gn int) {\n\tlog.Printf(\"Generation: %d\\n\", gn)\n\tfor i := range ga.inds {\n\t\tss := make([]string, len(ga.inds[i].values))\n\t\tfor vi, v := range ga.inds[i].values {\n\t\t\tss[vi] = strconv.Itoa(int(v))\n\t\t}\n\t\tlog.Printf(\"%s [%s]\\n\", ga.inds[i].id, strings.Join(ss, \",\"))\n\t}\n\tlog.Println()\n}\n\nfunc (ga *GAManager) Destroy() {\n\tfor i := range ga.inds {\n\t\tga.inds[i].stop()\n\t}\n\tga.server.Stop()\n}\n<commit_msg>tuning<commit_after>package ga\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\n\tserver \"github.com\/sunfish-shogi\/sunfish4-ga\/shogiserver\"\n)\n\ntype GAManager struct {\n\tConfig Config\n\n\tserver server.ShogiServer\n\tlastID int\n\tinds []*individual\n}\n\nfunc NewGAManager(config Config) *GAManager {\n\treturn &GAManager{\n\t\tConfig: config,\n\t}\n}\n\nfunc (ga *GAManager) Run() error {\n\tdefer ga.Destroy()\n\n\terr := ga.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor gn := 1; ; gn++ {\n\t\tga.PrintGeneration(gn)\n\n\t\ttime.Sleep(time.Hour * 4)\n\n\t\terr = ga.Next()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n}\n\nfunc (ga *GAManager) Start() error {\n\terr := ga.server.Setup()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tga.inds = make([]*individual, 0, ga.Config.NumberOfIndividual)\n\tga.lastID = 0\n\tfor i := 0; i < ga.Config.NumberOfIndividual; i++ {\n\t\tind := newIndividual(ga.nextID(), ga.Config)\n\t\tif i == 0 {\n\t\t\tind.initParamForFirstElite()\n\t\t} else {\n\t\t\tind.initParamByRandom()\n\t\t}\n\t\tga.inds = append(ga.inds, ind)\n\t}\n\n\terrs := ga.startIndividuals()\n\tif len(errs) != 0 {\n\t\treturn errs[0]\n\t}\n\treturn nil\n}\n\ntype indsDescScoreOrder []*individual\n\nfunc (inds indsDescScoreOrder) Len() int { return len(inds) }\nfunc (inds indsDescScoreOrder) Swap(i, j int) { inds[i], inds[j] = inds[j], inds[i] }\nfunc (inds indsDescScoreOrder) Less(i, j int) bool { return inds[i].score > inds[j].score }\n\nfunc (ga *GAManager) Next() error {\n\trate, err := ga.server.MakeRate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor pi := range rate.Players {\n\t\tfor _, player := range rate.Players[pi] {\n\t\t\tif player.Win+player.Loss < 100 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor i := range ga.inds {\n\t\t\t\tif ga.inds[i].id == player.Name {\n\t\t\t\t\tga.inds[i].score = player.Rate\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Sort by Score\n\tif ga.inds[0].score != 0 {\n\t\tsort.Stable(indsDescScoreOrder(ga.inds))\n\t} else {\n\t\tsort.Stable(indsDescScoreOrder(ga.inds[1:]))\n\t}\n\n\t\/\/ Print Scores\n\tlog.Println(\"Score\")\n\tfor i := range ga.inds {\n\t\tlog.Printf(\"%s %0.3f\", ga.inds[i].id, ga.inds[i].score)\n\t}\n\tlog.Println()\n\n\t\/\/ New Generation\n\tinds := make([]*individual, 0, ga.Config.NumberOfIndividual)\n\n\t\/\/ Elitism\n\tind := ga.copyElite(0)\n\tlog.Printf(\"elite: %s => %s\", ga.inds[0].id, ind.id)\n\tinds = append(inds, ind)\n\n\t\/\/ Random\n\tind = newIndividual(ga.nextID(), ga.Config)\n\tind.initParamByRandom()\n\tlog.Printf(\"random: => %s\", ind.id)\n\tinds = append(inds, ind)\n\n\tfor {\n\t\tif len(inds) >= ga.Config.NumberOfIndividual {\n\t\t\tbreak\n\t\t}\n\n\t\ti1 := ga.selectIndividual(\"\")\n\t\ti2 := ga.selectIndividual(i1.id)\n\n\t\tind := ga.crossover(i1, i2)\n\t\tlog.Printf(\"crossover: %s x %s => %s\", i1.id, i2.id, ind.id)\n\n\t\tif rand.Intn(20) < 1 \/* 1\/20 *\/ {\n\t\t\tga.mutate(ind)\n\t\t\tlog.Printf(\"mutate: %s\", ind.id)\n\t\t}\n\n\t\tinds = append(inds, ind)\n\t}\n\tlog.Println()\n\n\t\/\/ Stop Previous Generation\n\tfor i := range ga.inds {\n\t\tga.inds[i].stop()\n\t}\n\n\t\/\/ Replace to New Generation\n\tga.inds = inds\n\n\tga.startIndividuals()\n\n\treturn nil\n}\n\nfunc (ga *GAManager) startIndividuals() []error {\n\tvar errs []error\n\n\t\/\/ Setup\n\tvar wg sync.WaitGroup\n\tfor _, ind := range ga.inds {\n\t\twg.Add(1)\n\t\tgo func(ind *individual) {\n\t\t\tdefer wg.Done()\n\t\t\terr := ind.setup()\n\t\t\tif err != nil {\n\t\t\t\terr = errors.Wrap(err, fmt.Sprintf(\"failed to setup sunfish %s\", ind.id))\n\t\t\t\terrs = append(errs, err)\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}(ind)\n\t}\n\twg.Wait()\n\n\t\/\/ Start\n\tfor _, ind := range ga.inds {\n\t\terr := ind.start()\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, fmt.Sprintf(\"failed to start sunfish %s\", ind.id))\n\t\t\terrs = append(errs, err)\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n\n\treturn errs\n}\n\nfunc (ga *GAManager) copyElite(idx int) *individual {\n\tind := newIndividual(ga.nextID(), ga.Config)\n\tind.initParam(ga.inds[idx].values)\n\treturn ind\n}\n\nfunc (ga *GAManager) selectIndividual(excludeID string) *individual {\n\tweight := make([]int, len(ga.inds))\n\tvar sum int\n\tfor i := range weight {\n\t\tif ga.inds[i].id != excludeID {\n\t\t\tif i == 0 {\n\t\t\t\tweight[0] = 1024\n\t\t\t} else {\n\t\t\t\tweight[i] = weight[i-1]*9\/10 + 1\n\t\t\t}\n\t\t\tsum += weight[i]\n\t\t}\n\t}\n\n\tr := rand.Intn(sum)\n\n\tfor i := range weight {\n\t\tr -= weight[i]\n\t\tif r <= 0 {\n\t\t\treturn ga.inds[i]\n\t\t}\n\t}\n\treturn ga.inds[0]\n}\n\nfunc (ga *GAManager) crossover(i1, i2 *individual) *individual {\n\tind := newIndividual(ga.nextID(), ga.Config)\n\tvalues := make([]int32, len(ga.Config.Params))\n\tfor i := range values {\n\t\tif rand.Intn(2) == 0 {\n\t\t\tvalues[i] = i1.values[i]\n\t\t} else {\n\t\t\tvalues[i] = i2.values[i]\n\t\t}\n\t}\n\tind.initParam(values)\n\treturn ind\n}\n\nfunc (ga *GAManager) mutate(ind *individual) {\n\tn := rand.Intn(2) + 1\n\tfor ; n > 0; n-- {\n\t\ti := rand.Intn(len(ind.values))\n\t\tmin := ga.Config.Params[i].MinimumValue\n\t\tmax := ga.Config.Params[i].MaximumValue\n\t\tt := min + rand.Int31n(max-min+1)\n\t\tind.values[i] = (ind.values[i] + t) \/ 2\n\t}\n}\n\nfunc (ga *GAManager) nextID() string {\n\tga.lastID++\n\treturn strconv.Itoa(ga.lastID)\n}\n\nfunc (ga *GAManager) PrintGeneration(gn int) {\n\tlog.Printf(\"Generation: %d\\n\", gn)\n\tfor i := range ga.inds {\n\t\tss := make([]string, len(ga.inds[i].values))\n\t\tfor vi, v := range ga.inds[i].values {\n\t\t\tss[vi] = strconv.Itoa(int(v))\n\t\t}\n\t\tlog.Printf(\"%s [%s]\\n\", ga.inds[i].id, strings.Join(ss, \",\"))\n\t}\n\tlog.Println()\n}\n\nfunc (ga *GAManager) Destroy() {\n\tfor i := range ga.inds {\n\t\tga.inds[i].stop()\n\t}\n\tga.server.Stop()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Prometheus Team\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage test\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/common\/model\"\n\n\t\"github.com\/prometheus\/alertmanager\/notify\"\n)\n\n\/\/ At is a convenience method to allow for declarative syntax of Acceptance\n\/\/ test definitions.\nfunc At(ts float64) float64 {\n\treturn ts\n}\n\ntype Interval struct {\n\tstart, end float64\n}\n\nfunc (iv Interval) String() string {\n\treturn fmt.Sprintf(\"[%v,%v]\", iv.start, iv.end)\n}\n\nfunc (iv Interval) contains(f float64) bool {\n\treturn f >= iv.start && f <= iv.end\n}\n\n\/\/ Between is a convenience constructor for an interval for declarative syntax\n\/\/ of Acceptance test definitions.\nfunc Between(start, end float64) Interval {\n\treturn Interval{start: start, end: end}\n}\n\n\/\/ TestSilence models a model.Silence with relative times.\ntype TestSilence struct {\n\tID uint64\n\tmatch []string\n\tmatchRE []string\n\tstartsAt, endsAt float64\n}\n\n\/\/ Silence creates a new TestSilence active for the relative interval given\n\/\/ by start and end.\nfunc Silence(start, end float64) *TestSilence {\n\treturn &TestSilence{\n\t\tstartsAt: start,\n\t\tendsAt: end,\n\t}\n}\n\n\/\/ Match adds a new plain matcher to the silence.\nfunc (s *TestSilence) Match(v ...string) *TestSilence {\n\ts.match = append(s.match, v...)\n\treturn s\n}\n\n\/\/ MatchRE adds a new regex matcher to the silence\nfunc (s *TestSilence) MatchRE(v ...string) *TestSilence {\n\tif len(v)%2 == 1 {\n\t\tpanic(\"bad key\/values\")\n\t}\n\ts.matchRE = append(s.matchRE, v...)\n\treturn s\n}\n\n\/\/ nativeSilence converts the declared test silence into a regular\n\/\/ silence with resolved times.\nfunc (sil *TestSilence) nativeSilence(opts *AcceptanceOpts) *model.Silence {\n\tnsil := &model.Silence{}\n\n\tfor i := 0; i < len(sil.match); i += 2 {\n\t\tnsil.Matchers = append(nsil.Matchers, &model.Matcher{\n\t\t\tName: model.LabelName(sil.match[i]),\n\t\t\tValue: sil.match[i+1],\n\t\t})\n\t}\n\tfor i := 0; i < len(sil.matchRE); i += 2 {\n\t\tnsil.Matchers = append(nsil.Matchers, &model.Matcher{\n\t\t\tName: model.LabelName(sil.match[i]),\n\t\t\tValue: sil.match[i+1],\n\t\t\tIsRegex: true,\n\t\t})\n\t}\n\n\tif sil.startsAt > 0 {\n\t\tnsil.StartsAt = opts.expandTime(sil.startsAt)\n\t}\n\tif sil.endsAt > 0 {\n\t\tnsil.EndsAt = opts.expandTime(sil.endsAt)\n\t}\n\treturn nsil\n}\n\n\/\/ TestAlert models a model.Alert with relative times.\ntype TestAlert struct {\n\tlabels model.LabelSet\n\tannotations model.LabelSet\n\tstartsAt, endsAt float64\n}\n\n\/\/ alert creates a new alert declaration with the given key\/value pairs\n\/\/ as identifying labels.\nfunc Alert(keyval ...interface{}) *TestAlert {\n\tif len(keyval)%2 == 1 {\n\t\tpanic(\"bad key\/values\")\n\t}\n\ta := &TestAlert{\n\t\tlabels: model.LabelSet{},\n\t\tannotations: model.LabelSet{},\n\t}\n\n\tfor i := 0; i < len(keyval); i += 2 {\n\t\tln := model.LabelName(keyval[i].(string))\n\t\tlv := model.LabelValue(keyval[i+1].(string))\n\n\t\ta.labels[ln] = lv\n\t}\n\n\treturn a\n}\n\n\/\/ nativeAlert converts the declared test alert into a full alert based\n\/\/ on the given paramters.\nfunc (a *TestAlert) nativeAlert(opts *AcceptanceOpts) *model.Alert {\n\tna := &model.Alert{\n\t\tLabels: a.labels,\n\t\tAnnotations: a.annotations,\n\t}\n\n\tif a.startsAt > 0 {\n\t\tna.StartsAt = opts.expandTime(a.startsAt)\n\t}\n\tif a.endsAt > 0 {\n\t\tna.EndsAt = opts.expandTime(a.endsAt)\n\t}\n\treturn na\n}\n\n\/\/ Annotate the alert with the given key\/value pairs.\nfunc (a *TestAlert) Annotate(keyval ...interface{}) *TestAlert {\n\tif len(keyval)%2 == 1 {\n\t\tpanic(\"bad key\/values\")\n\t}\n\n\tfor i := 0; i < len(keyval); i += 2 {\n\t\tln := model.LabelName(keyval[i].(string))\n\t\tlv := model.LabelValue(keyval[i+1].(string))\n\n\t\ta.annotations[ln] = lv\n\t}\n\n\treturn a\n}\n\n\/\/ Active declares the relative activity time for this alert. It\n\/\/ must be a single starting value or two values where the second value\n\/\/ declares the resolved time.\nfunc (a *TestAlert) Active(tss ...float64) *TestAlert {\n\tif len(tss) > 2 || len(tss) == 0 {\n\t\tpanic(\"only one or two timestamps allowed\")\n\t}\n\tif len(tss) == 2 {\n\t\ta.endsAt = tss[1]\n\t}\n\ta.startsAt = tss[0]\n\n\treturn a\n}\n\nfunc equalAlerts(a, b *model.Alert, opts *AcceptanceOpts) bool {\n\tif !reflect.DeepEqual(a.Labels, b.Labels) {\n\t\treturn false\n\t}\n\tif !reflect.DeepEqual(a.Annotations, b.Annotations) {\n\t\treturn false\n\t}\n\n\tif !equalTime(a.StartsAt, b.StartsAt, opts) {\n\t\treturn false\n\t}\n\tif !equalTime(a.EndsAt, b.EndsAt, opts) {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc equalTime(a, b time.Time, opts *AcceptanceOpts) bool {\n\tif a.IsZero() != b.IsZero() {\n\t\treturn false\n\t}\n\n\tdiff := a.Sub(b)\n\tif diff < 0 {\n\t\tdiff = -diff\n\t}\n\treturn diff <= opts.Tolerance\n}\n\ntype MockWebhook struct {\n\tcollector *Collector\n\tlistener net.Listener\n}\n\nfunc NewWebhook(c *Collector) *MockWebhook {\n\tl, err := net.Listen(\"tcp\", \":0\")\n\tif err != nil {\n\t\t\/\/ TODO(fabxc): if shutdown of mock destinations ever becomes a concern\n\t\t\/\/ we want to shut them down after test completion. Then we might want to\n\t\t\/\/ log the error properly, too.\n\t\tpanic(err)\n\t}\n\twh := &MockWebhook{\n\t\tlistener: l,\n\t\tcollector: c,\n\t}\n\tgo http.Serve(l, wh)\n\n\treturn wh\n}\n\nfunc (ws *MockWebhook) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tdec := json.NewDecoder(req.Body)\n\tdefer req.Body.Close()\n\n\tvar v notify.WebhookMessage\n\tif err := dec.Decode(&v); err != nil {\n\t\tpanic(err)\n\t}\n\n\tws.collector.add(v.Alerts...)\n}\n\nfunc (ws *MockWebhook) Address() string {\n\treturn ws.listener.Addr().String()\n}\n<commit_msg>Add injection function to webhook<commit_after>\/\/ Copyright 2015 Prometheus Team\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage test\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/common\/model\"\n\n\t\"github.com\/prometheus\/alertmanager\/notify\"\n)\n\n\/\/ At is a convenience method to allow for declarative syntax of Acceptance\n\/\/ test definitions.\nfunc At(ts float64) float64 {\n\treturn ts\n}\n\ntype Interval struct {\n\tstart, end float64\n}\n\nfunc (iv Interval) String() string {\n\treturn fmt.Sprintf(\"[%v,%v]\", iv.start, iv.end)\n}\n\nfunc (iv Interval) contains(f float64) bool {\n\treturn f >= iv.start && f <= iv.end\n}\n\n\/\/ Between is a convenience constructor for an interval for declarative syntax\n\/\/ of Acceptance test definitions.\nfunc Between(start, end float64) Interval {\n\treturn Interval{start: start, end: end}\n}\n\n\/\/ TestSilence models a model.Silence with relative times.\ntype TestSilence struct {\n\tID uint64\n\tmatch []string\n\tmatchRE []string\n\tstartsAt, endsAt float64\n}\n\n\/\/ Silence creates a new TestSilence active for the relative interval given\n\/\/ by start and end.\nfunc Silence(start, end float64) *TestSilence {\n\treturn &TestSilence{\n\t\tstartsAt: start,\n\t\tendsAt: end,\n\t}\n}\n\n\/\/ Match adds a new plain matcher to the silence.\nfunc (s *TestSilence) Match(v ...string) *TestSilence {\n\ts.match = append(s.match, v...)\n\treturn s\n}\n\n\/\/ MatchRE adds a new regex matcher to the silence\nfunc (s *TestSilence) MatchRE(v ...string) *TestSilence {\n\tif len(v)%2 == 1 {\n\t\tpanic(\"bad key\/values\")\n\t}\n\ts.matchRE = append(s.matchRE, v...)\n\treturn s\n}\n\n\/\/ nativeSilence converts the declared test silence into a regular\n\/\/ silence with resolved times.\nfunc (sil *TestSilence) nativeSilence(opts *AcceptanceOpts) *model.Silence {\n\tnsil := &model.Silence{}\n\n\tfor i := 0; i < len(sil.match); i += 2 {\n\t\tnsil.Matchers = append(nsil.Matchers, &model.Matcher{\n\t\t\tName: model.LabelName(sil.match[i]),\n\t\t\tValue: sil.match[i+1],\n\t\t})\n\t}\n\tfor i := 0; i < len(sil.matchRE); i += 2 {\n\t\tnsil.Matchers = append(nsil.Matchers, &model.Matcher{\n\t\t\tName: model.LabelName(sil.match[i]),\n\t\t\tValue: sil.match[i+1],\n\t\t\tIsRegex: true,\n\t\t})\n\t}\n\n\tif sil.startsAt > 0 {\n\t\tnsil.StartsAt = opts.expandTime(sil.startsAt)\n\t}\n\tif sil.endsAt > 0 {\n\t\tnsil.EndsAt = opts.expandTime(sil.endsAt)\n\t}\n\treturn nsil\n}\n\n\/\/ TestAlert models a model.Alert with relative times.\ntype TestAlert struct {\n\tlabels model.LabelSet\n\tannotations model.LabelSet\n\tstartsAt, endsAt float64\n}\n\n\/\/ alert creates a new alert declaration with the given key\/value pairs\n\/\/ as identifying labels.\nfunc Alert(keyval ...interface{}) *TestAlert {\n\tif len(keyval)%2 == 1 {\n\t\tpanic(\"bad key\/values\")\n\t}\n\ta := &TestAlert{\n\t\tlabels: model.LabelSet{},\n\t\tannotations: model.LabelSet{},\n\t}\n\n\tfor i := 0; i < len(keyval); i += 2 {\n\t\tln := model.LabelName(keyval[i].(string))\n\t\tlv := model.LabelValue(keyval[i+1].(string))\n\n\t\ta.labels[ln] = lv\n\t}\n\n\treturn a\n}\n\n\/\/ nativeAlert converts the declared test alert into a full alert based\n\/\/ on the given paramters.\nfunc (a *TestAlert) nativeAlert(opts *AcceptanceOpts) *model.Alert {\n\tna := &model.Alert{\n\t\tLabels: a.labels,\n\t\tAnnotations: a.annotations,\n\t}\n\n\tif a.startsAt > 0 {\n\t\tna.StartsAt = opts.expandTime(a.startsAt)\n\t}\n\tif a.endsAt > 0 {\n\t\tna.EndsAt = opts.expandTime(a.endsAt)\n\t}\n\treturn na\n}\n\n\/\/ Annotate the alert with the given key\/value pairs.\nfunc (a *TestAlert) Annotate(keyval ...interface{}) *TestAlert {\n\tif len(keyval)%2 == 1 {\n\t\tpanic(\"bad key\/values\")\n\t}\n\n\tfor i := 0; i < len(keyval); i += 2 {\n\t\tln := model.LabelName(keyval[i].(string))\n\t\tlv := model.LabelValue(keyval[i+1].(string))\n\n\t\ta.annotations[ln] = lv\n\t}\n\n\treturn a\n}\n\n\/\/ Active declares the relative activity time for this alert. It\n\/\/ must be a single starting value or two values where the second value\n\/\/ declares the resolved time.\nfunc (a *TestAlert) Active(tss ...float64) *TestAlert {\n\tif len(tss) > 2 || len(tss) == 0 {\n\t\tpanic(\"only one or two timestamps allowed\")\n\t}\n\tif len(tss) == 2 {\n\t\ta.endsAt = tss[1]\n\t}\n\ta.startsAt = tss[0]\n\n\treturn a\n}\n\nfunc equalAlerts(a, b *model.Alert, opts *AcceptanceOpts) bool {\n\tif !reflect.DeepEqual(a.Labels, b.Labels) {\n\t\treturn false\n\t}\n\tif !reflect.DeepEqual(a.Annotations, b.Annotations) {\n\t\treturn false\n\t}\n\n\tif !equalTime(a.StartsAt, b.StartsAt, opts) {\n\t\treturn false\n\t}\n\tif !equalTime(a.EndsAt, b.EndsAt, opts) {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc equalTime(a, b time.Time, opts *AcceptanceOpts) bool {\n\tif a.IsZero() != b.IsZero() {\n\t\treturn false\n\t}\n\n\tdiff := a.Sub(b)\n\tif diff < 0 {\n\t\tdiff = -diff\n\t}\n\treturn diff <= opts.Tolerance\n}\n\ntype MockWebhook struct {\n\topts *AcceptanceOpts\n\tcollector *Collector\n\tlistener net.Listener\n\n\tFunc func(timestamp float64) bool\n}\n\nfunc NewWebhook(c *Collector) *MockWebhook {\n\tl, err := net.Listen(\"tcp\", \":0\")\n\tif err != nil {\n\t\t\/\/ TODO(fabxc): if shutdown of mock destinations ever becomes a concern\n\t\t\/\/ we want to shut them down after test completion. Then we might want to\n\t\t\/\/ log the error properly, too.\n\t\tpanic(err)\n\t}\n\twh := &MockWebhook{\n\t\tlistener: l,\n\t\tcollector: c,\n\t}\n\tgo http.Serve(l, wh)\n\n\treturn wh\n}\n\nfunc (ws *MockWebhook) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\t\/\/ Inject Func if it exists.\n\tif ws.Func != nil {\n\t\tif ws.Func(ws.opts.relativeTime(time.Now())) {\n\t\t\treturn\n\t\t}\n\t}\n\n\tdec := json.NewDecoder(req.Body)\n\tdefer req.Body.Close()\n\n\tvar v notify.WebhookMessage\n\tif err := dec.Decode(&v); err != nil {\n\t\tpanic(err)\n\t}\n\n\tws.collector.add(v.Alerts...)\n}\n\nfunc (ws *MockWebhook) Address() string {\n\treturn ws.listener.Addr().String()\n}\n<|endoftext|>"} {"text":"<commit_before>package bwhatsapp\n\nimport (\n\t\"fmt\"\n\t\"mime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/42wim\/matterbridge\/bridge\/config\"\n\t\"github.com\/42wim\/matterbridge\/bridge\/helper\"\n\t\"github.com\/Rhymen\/go-whatsapp\"\n\t\"github.com\/jpillora\/backoff\"\n)\n\n\/*\nImplement handling messages coming from WhatsApp\nCheck:\n- https:\/\/github.com\/Rhymen\/go-whatsapp#add-message-handlers\n- https:\/\/github.com\/Rhymen\/go-whatsapp\/blob\/master\/handler.go\n- https:\/\/github.com\/tulir\/mautrix-whatsapp\/tree\/master\/whatsapp-ext for more advanced command handling\n*\/\n\n\/\/ HandleError received from WhatsApp\nfunc (b *Bwhatsapp) HandleError(err error) {\n\t\/\/ ignore received invalid data errors. https:\/\/github.com\/42wim\/matterbridge\/issues\/843\n\t\/\/ ignore tag 174 errors. https:\/\/github.com\/42wim\/matterbridge\/issues\/1094\n\tif strings.Contains(err.Error(), \"error processing data: received invalid data\") ||\n\t\tstrings.Contains(err.Error(), \"invalid string with tag 174\") {\n\t\treturn\n\t}\n\n\tswitch err.(type) {\n\tcase *whatsapp.ErrConnectionClosed, *whatsapp.ErrConnectionFailed:\n\t\tb.reconnect(err)\n\tdefault:\n\t\tswitch err {\n\t\tcase whatsapp.ErrConnectionTimeout:\n\t\t\tb.reconnect(err)\n\t\tdefault:\n\t\t\tb.Log.Errorf(\"%v\", err)\n\t\t}\n\t}\n}\n\nfunc (b *Bwhatsapp) reconnect(err error) {\n\tbf := &backoff.Backoff{\n\t\tMin: time.Second,\n\t\tMax: 5 * time.Minute,\n\t\tJitter: true,\n\t}\n\n\tfor {\n\t\td := bf.Duration()\n\n\t\tb.Log.Errorf(\"Connection failed, underlying error: %v\", err)\n\t\tb.Log.Infof(\"Waiting %s...\", d)\n\n\t\ttime.Sleep(d)\n\n\t\tb.Log.Info(\"Reconnecting...\")\n\n\t\terr := b.conn.Restore()\n\t\tif err == nil {\n\t\t\tbf.Reset()\n\t\t\tb.startedAt = uint64(time.Now().Unix())\n\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ HandleTextMessage sent from WhatsApp, relay it to the brige\nfunc (b *Bwhatsapp) HandleTextMessage(message whatsapp.TextMessage) {\n\tif message.Info.FromMe {\n\t\treturn\n\t}\n\t\/\/ whatsapp sends last messages to show context , cut them\n\tif message.Info.Timestamp < b.startedAt {\n\t\treturn\n\t}\n\n\tgroupJID := message.Info.RemoteJid\n\tsenderJID := message.Info.SenderJid\n\n\tif len(senderJID) == 0 {\n\t\tif message.Info.Source != nil && message.Info.Source.Participant != nil {\n\t\t\tsenderJID = *message.Info.Source.Participant\n\t\t}\n\t}\n\n\t\/\/ translate sender's JID to the nicest username we can get\n\tsenderName := b.getSenderName(senderJID)\n\tif senderName == \"\" {\n\t\tsenderName = \"Someone\" \/\/ don't expose telephone number\n\t}\n\n\textText := message.Info.Source.Message.ExtendedTextMessage\n\tif extText != nil && extText.ContextInfo != nil && extText.ContextInfo.MentionedJid != nil {\n\t\t\/\/ handle user mentions\n\t\tfor _, mentionedJID := range extText.ContextInfo.MentionedJid {\n\t\t\tnumberAndSuffix := strings.SplitN(mentionedJID, \"@\", 2)\n\n\t\t\t\/\/ mentions comes as telephone numbers and we don't want to expose it to other bridges\n\t\t\t\/\/ replace it with something more meaninful to others\n\t\t\tmention := b.getSenderNotify(numberAndSuffix[0] + \"@s.whatsapp.net\")\n\t\t\tif mention == \"\" {\n\t\t\t\tmention = \"someone\"\n\t\t\t}\n\n\t\t\tmessage.Text = strings.Replace(message.Text, \"@\"+numberAndSuffix[0], \"@\"+mention, 1)\n\t\t}\n\t}\n\n\trmsg := config.Message{\n\t\tUserID: senderJID,\n\t\tUsername: senderName,\n\t\tText: message.Text,\n\t\tChannel: groupJID,\n\t\tAccount: b.Account,\n\t\tProtocol: b.Protocol,\n\t\tExtra: make(map[string][]interface{}),\n\t\t\/\/\tParentID: TODO, \/\/ TODO handle thread replies \/\/ map from Info.QuotedMessageID string\n\t\tID: message.Info.Id,\n\t}\n\n\tif avatarURL, exists := b.userAvatars[senderJID]; exists {\n\t\trmsg.Avatar = avatarURL\n\t}\n\n\tb.Log.Debugf(\"<= Sending message from %s on %s to gateway\", senderJID, b.Account)\n\tb.Log.Debugf(\"<= Message is %#v\", rmsg)\n\n\tb.Remote <- rmsg\n}\n\n\/\/ HandleImageMessage sent from WhatsApp, relay it to the brige\nfunc (b *Bwhatsapp) HandleImageMessage(message whatsapp.ImageMessage) {\n\tif message.Info.FromMe || message.Info.Timestamp < b.startedAt {\n\t\treturn\n\t}\n\n\tsenderJID := message.Info.SenderJid\n\tif len(message.Info.SenderJid) == 0 && message.Info.Source != nil && message.Info.Source.Participant != nil {\n\t\tsenderJID = *message.Info.Source.Participant\n\t}\n\n\tsenderName := b.getSenderName(message.Info.SenderJid)\n\tif senderName == \"\" {\n\t\tsenderName = \"Someone\" \/\/ don't expose telephone number\n\t}\n\n\trmsg := config.Message{\n\t\tUserID: senderJID,\n\t\tUsername: senderName,\n\t\tChannel: message.Info.RemoteJid,\n\t\tAccount: b.Account,\n\t\tProtocol: b.Protocol,\n\t\tExtra: make(map[string][]interface{}),\n\t\tID: message.Info.Id,\n\t}\n\n\tif avatarURL, exists := b.userAvatars[senderJID]; exists {\n\t\trmsg.Avatar = avatarURL\n\t}\n\n\tfileExt, err := mime.ExtensionsByType(message.Type)\n\tif err != nil {\n\t\tb.Log.Errorf(\"Mimetype detection error: %s\", err)\n\n\t\treturn\n\t}\n\n\t\/\/ rename .jfif to .jpg https:\/\/github.com\/42wim\/matterbridge\/issues\/1292\n\tif fileExt[0] == \".jfif\" {\n\t\tfileExt[0] = \".jpg\"\n\t}\n\n\tfilename := fmt.Sprintf(\"%v%v\", message.Info.Id, fileExt[0])\n\n\tb.Log.Debugf(\"Trying to download %s with type %s\", filename, message.Type)\n\n\tdata, err := message.Download()\n\tif err != nil {\n\t\tb.Log.Errorf(\"Download image failed: %s\", err)\n\n\t\treturn\n\t}\n\n\t\/\/ Move file to bridge storage\n\thelper.HandleDownloadData(b.Log, &rmsg, filename, message.Caption, \"\", &data, b.General)\n\n\tb.Log.Debugf(\"<= Sending message from %s on %s to gateway\", senderJID, b.Account)\n\tb.Log.Debugf(\"<= Message is %#v\", rmsg)\n\n\tb.Remote <- rmsg\n}\n\n\/\/ HandleVideoMessage downloads video messages\nfunc (b *Bwhatsapp) HandleVideoMessage(message whatsapp.VideoMessage) {\n\tif message.Info.FromMe || message.Info.Timestamp < b.startedAt {\n\t\treturn\n\t}\n\n\tsenderJID := message.Info.SenderJid\n\tif len(message.Info.SenderJid) == 0 && message.Info.Source != nil && message.Info.Source.Participant != nil {\n\t\tsenderJID = *message.Info.Source.Participant\n\t}\n\n\tsenderName := b.getSenderName(message.Info.SenderJid)\n\tif senderName == \"\" {\n\t\tsenderName = \"Someone\" \/\/ don't expose telephone number\n\t}\n\n\trmsg := config.Message{\n\t\tUserID: senderJID,\n\t\tUsername: senderName,\n\t\tChannel: message.Info.RemoteJid,\n\t\tAccount: b.Account,\n\t\tProtocol: b.Protocol,\n\t\tExtra: make(map[string][]interface{}),\n\t\tID: message.Info.Id,\n\t}\n\n\tif avatarURL, exists := b.userAvatars[senderJID]; exists {\n\t\trmsg.Avatar = avatarURL\n\t}\n\n\tfileExt, err := mime.ExtensionsByType(message.Type)\n\tif err != nil {\n\t\tb.Log.Errorf(\"Mimetype detection error: %s\", err)\n\n\t\treturn\n\t}\n\n\tfilename := fmt.Sprintf(\"%v%v\", message.Info.Id, fileExt[0])\n\n\tb.Log.Debugf(\"Trying to download %s with size %#v and type %s\", filename, message.Length, message.Type)\n\n\tdata, err := message.Download()\n\tif err != nil {\n\t\tb.Log.Errorf(\"Download video failed: %s\", err)\n\n\t\treturn\n\t}\n\n\t\/\/ Move file to bridge storage\n\thelper.HandleDownloadData(b.Log, &rmsg, filename, message.Caption, \"\", &data, b.General)\n\n\tb.Log.Debugf(\"<= Sending message from %s on %s to gateway\", senderJID, b.Account)\n\tb.Log.Debugf(\"<= Message is %#v\", rmsg)\n\n\tb.Remote <- rmsg\n}\n\n\/\/ HandleAudioMessage downloads audio messages\nfunc (b *Bwhatsapp) HandleAudioMessage(message whatsapp.AudioMessage) {\n\tif message.Info.FromMe || message.Info.Timestamp < b.startedAt {\n\t\treturn\n\t}\n\n\tsenderJID := message.Info.SenderJid\n\tif len(message.Info.SenderJid) == 0 && message.Info.Source != nil && message.Info.Source.Participant != nil {\n\t\tsenderJID = *message.Info.Source.Participant\n\t}\n\n\tsenderName := b.getSenderName(message.Info.SenderJid)\n\tif senderName == \"\" {\n\t\tsenderName = \"Someone\" \/\/ don't expose telephone number\n\t}\n\n\trmsg := config.Message{\n\t\tUserID: senderJID,\n\t\tUsername: senderName,\n\t\tChannel: message.Info.RemoteJid,\n\t\tAccount: b.Account,\n\t\tProtocol: b.Protocol,\n\t\tExtra: make(map[string][]interface{}),\n\t\tID: message.Info.Id,\n\t}\n\n\tif avatarURL, exists := b.userAvatars[senderJID]; exists {\n\t\trmsg.Avatar = avatarURL\n\t}\n\n\tfileExt, err := mime.ExtensionsByType(message.Type)\n\tif err != nil {\n\t\tb.Log.Errorf(\"Mimetype detection error: %s\", err)\n\n\t\treturn\n\t}\n\n\tif len(fileExt) == 0 {\n\t\tfileExt = append(fileExt, \".ogg\")\n\t}\n\n\tfilename := fmt.Sprintf(\"%v%v\", message.Info.Id, fileExt[0])\n\n\tb.Log.Debugf(\"Trying to download %s with size %#v and type %s\", filename, message.Length, message.Type)\n\n\tdata, err := message.Download()\n\tif err != nil {\n\t\tb.Log.Errorf(\"Download audio failed: %s\", err)\n\n\t\treturn\n\t}\n\n\t\/\/ Move file to bridge storage\n\thelper.HandleDownloadData(b.Log, &rmsg, filename, \"audio message\", \"\", &data, b.General)\n\n\tb.Log.Debugf(\"<= Sending message from %s on %s to gateway\", senderJID, b.Account)\n\tb.Log.Debugf(\"<= Message is %#v\", rmsg)\n\n\tb.Remote <- rmsg\n}\n<commit_msg>Handle document messages (whatsapp) (#1475)<commit_after>package bwhatsapp\n\nimport (\n\t\"fmt\"\n\t\"mime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/42wim\/matterbridge\/bridge\/config\"\n\t\"github.com\/42wim\/matterbridge\/bridge\/helper\"\n\t\"github.com\/Rhymen\/go-whatsapp\"\n\t\"github.com\/jpillora\/backoff\"\n)\n\n\/*\nImplement handling messages coming from WhatsApp\nCheck:\n- https:\/\/github.com\/Rhymen\/go-whatsapp#add-message-handlers\n- https:\/\/github.com\/Rhymen\/go-whatsapp\/blob\/master\/handler.go\n- https:\/\/github.com\/tulir\/mautrix-whatsapp\/tree\/master\/whatsapp-ext for more advanced command handling\n*\/\n\n\/\/ HandleError received from WhatsApp\nfunc (b *Bwhatsapp) HandleError(err error) {\n\t\/\/ ignore received invalid data errors. https:\/\/github.com\/42wim\/matterbridge\/issues\/843\n\t\/\/ ignore tag 174 errors. https:\/\/github.com\/42wim\/matterbridge\/issues\/1094\n\tif strings.Contains(err.Error(), \"error processing data: received invalid data\") ||\n\t\tstrings.Contains(err.Error(), \"invalid string with tag 174\") {\n\t\treturn\n\t}\n\n\tswitch err.(type) {\n\tcase *whatsapp.ErrConnectionClosed, *whatsapp.ErrConnectionFailed:\n\t\tb.reconnect(err)\n\tdefault:\n\t\tswitch err {\n\t\tcase whatsapp.ErrConnectionTimeout:\n\t\t\tb.reconnect(err)\n\t\tdefault:\n\t\t\tb.Log.Errorf(\"%v\", err)\n\t\t}\n\t}\n}\n\nfunc (b *Bwhatsapp) reconnect(err error) {\n\tbf := &backoff.Backoff{\n\t\tMin: time.Second,\n\t\tMax: 5 * time.Minute,\n\t\tJitter: true,\n\t}\n\n\tfor {\n\t\td := bf.Duration()\n\n\t\tb.Log.Errorf(\"Connection failed, underlying error: %v\", err)\n\t\tb.Log.Infof(\"Waiting %s...\", d)\n\n\t\ttime.Sleep(d)\n\n\t\tb.Log.Info(\"Reconnecting...\")\n\n\t\terr := b.conn.Restore()\n\t\tif err == nil {\n\t\t\tbf.Reset()\n\t\t\tb.startedAt = uint64(time.Now().Unix())\n\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ HandleTextMessage sent from WhatsApp, relay it to the brige\nfunc (b *Bwhatsapp) HandleTextMessage(message whatsapp.TextMessage) {\n\tif message.Info.FromMe {\n\t\treturn\n\t}\n\t\/\/ whatsapp sends last messages to show context , cut them\n\tif message.Info.Timestamp < b.startedAt {\n\t\treturn\n\t}\n\n\tgroupJID := message.Info.RemoteJid\n\tsenderJID := message.Info.SenderJid\n\n\tif len(senderJID) == 0 {\n\t\tif message.Info.Source != nil && message.Info.Source.Participant != nil {\n\t\t\tsenderJID = *message.Info.Source.Participant\n\t\t}\n\t}\n\n\t\/\/ translate sender's JID to the nicest username we can get\n\tsenderName := b.getSenderName(senderJID)\n\tif senderName == \"\" {\n\t\tsenderName = \"Someone\" \/\/ don't expose telephone number\n\t}\n\n\textText := message.Info.Source.Message.ExtendedTextMessage\n\tif extText != nil && extText.ContextInfo != nil && extText.ContextInfo.MentionedJid != nil {\n\t\t\/\/ handle user mentions\n\t\tfor _, mentionedJID := range extText.ContextInfo.MentionedJid {\n\t\t\tnumberAndSuffix := strings.SplitN(mentionedJID, \"@\", 2)\n\n\t\t\t\/\/ mentions comes as telephone numbers and we don't want to expose it to other bridges\n\t\t\t\/\/ replace it with something more meaninful to others\n\t\t\tmention := b.getSenderNotify(numberAndSuffix[0] + \"@s.whatsapp.net\")\n\t\t\tif mention == \"\" {\n\t\t\t\tmention = \"someone\"\n\t\t\t}\n\n\t\t\tmessage.Text = strings.Replace(message.Text, \"@\"+numberAndSuffix[0], \"@\"+mention, 1)\n\t\t}\n\t}\n\n\trmsg := config.Message{\n\t\tUserID: senderJID,\n\t\tUsername: senderName,\n\t\tText: message.Text,\n\t\tChannel: groupJID,\n\t\tAccount: b.Account,\n\t\tProtocol: b.Protocol,\n\t\tExtra: make(map[string][]interface{}),\n\t\t\/\/\tParentID: TODO, \/\/ TODO handle thread replies \/\/ map from Info.QuotedMessageID string\n\t\tID: message.Info.Id,\n\t}\n\n\tif avatarURL, exists := b.userAvatars[senderJID]; exists {\n\t\trmsg.Avatar = avatarURL\n\t}\n\n\tb.Log.Debugf(\"<= Sending message from %s on %s to gateway\", senderJID, b.Account)\n\tb.Log.Debugf(\"<= Message is %#v\", rmsg)\n\n\tb.Remote <- rmsg\n}\n\n\/\/ HandleImageMessage sent from WhatsApp, relay it to the brige\nfunc (b *Bwhatsapp) HandleImageMessage(message whatsapp.ImageMessage) {\n\tif message.Info.FromMe || message.Info.Timestamp < b.startedAt {\n\t\treturn\n\t}\n\n\tsenderJID := message.Info.SenderJid\n\tif len(message.Info.SenderJid) == 0 && message.Info.Source != nil && message.Info.Source.Participant != nil {\n\t\tsenderJID = *message.Info.Source.Participant\n\t}\n\n\tsenderName := b.getSenderName(message.Info.SenderJid)\n\tif senderName == \"\" {\n\t\tsenderName = \"Someone\" \/\/ don't expose telephone number\n\t}\n\n\trmsg := config.Message{\n\t\tUserID: senderJID,\n\t\tUsername: senderName,\n\t\tChannel: message.Info.RemoteJid,\n\t\tAccount: b.Account,\n\t\tProtocol: b.Protocol,\n\t\tExtra: make(map[string][]interface{}),\n\t\tID: message.Info.Id,\n\t}\n\n\tif avatarURL, exists := b.userAvatars[senderJID]; exists {\n\t\trmsg.Avatar = avatarURL\n\t}\n\n\tfileExt, err := mime.ExtensionsByType(message.Type)\n\tif err != nil {\n\t\tb.Log.Errorf(\"Mimetype detection error: %s\", err)\n\n\t\treturn\n\t}\n\n\t\/\/ rename .jfif to .jpg https:\/\/github.com\/42wim\/matterbridge\/issues\/1292\n\tif fileExt[0] == \".jfif\" {\n\t\tfileExt[0] = \".jpg\"\n\t}\n\n\tfilename := fmt.Sprintf(\"%v%v\", message.Info.Id, fileExt[0])\n\n\tb.Log.Debugf(\"Trying to download %s with type %s\", filename, message.Type)\n\n\tdata, err := message.Download()\n\tif err != nil {\n\t\tb.Log.Errorf(\"Download image failed: %s\", err)\n\n\t\treturn\n\t}\n\n\t\/\/ Move file to bridge storage\n\thelper.HandleDownloadData(b.Log, &rmsg, filename, message.Caption, \"\", &data, b.General)\n\n\tb.Log.Debugf(\"<= Sending message from %s on %s to gateway\", senderJID, b.Account)\n\tb.Log.Debugf(\"<= Message is %#v\", rmsg)\n\n\tb.Remote <- rmsg\n}\n\n\/\/ HandleVideoMessage downloads video messages\nfunc (b *Bwhatsapp) HandleVideoMessage(message whatsapp.VideoMessage) {\n\tif message.Info.FromMe || message.Info.Timestamp < b.startedAt {\n\t\treturn\n\t}\n\n\tsenderJID := message.Info.SenderJid\n\tif len(message.Info.SenderJid) == 0 && message.Info.Source != nil && message.Info.Source.Participant != nil {\n\t\tsenderJID = *message.Info.Source.Participant\n\t}\n\n\tsenderName := b.getSenderName(message.Info.SenderJid)\n\tif senderName == \"\" {\n\t\tsenderName = \"Someone\" \/\/ don't expose telephone number\n\t}\n\n\trmsg := config.Message{\n\t\tUserID: senderJID,\n\t\tUsername: senderName,\n\t\tChannel: message.Info.RemoteJid,\n\t\tAccount: b.Account,\n\t\tProtocol: b.Protocol,\n\t\tExtra: make(map[string][]interface{}),\n\t\tID: message.Info.Id,\n\t}\n\n\tif avatarURL, exists := b.userAvatars[senderJID]; exists {\n\t\trmsg.Avatar = avatarURL\n\t}\n\n\tfileExt, err := mime.ExtensionsByType(message.Type)\n\tif err != nil {\n\t\tb.Log.Errorf(\"Mimetype detection error: %s\", err)\n\n\t\treturn\n\t}\n\n\tfilename := fmt.Sprintf(\"%v%v\", message.Info.Id, fileExt[0])\n\n\tb.Log.Debugf(\"Trying to download %s with size %#v and type %s\", filename, message.Length, message.Type)\n\n\tdata, err := message.Download()\n\tif err != nil {\n\t\tb.Log.Errorf(\"Download video failed: %s\", err)\n\n\t\treturn\n\t}\n\n\t\/\/ Move file to bridge storage\n\thelper.HandleDownloadData(b.Log, &rmsg, filename, message.Caption, \"\", &data, b.General)\n\n\tb.Log.Debugf(\"<= Sending message from %s on %s to gateway\", senderJID, b.Account)\n\tb.Log.Debugf(\"<= Message is %#v\", rmsg)\n\n\tb.Remote <- rmsg\n}\n\n\/\/ HandleAudioMessage downloads audio messages\nfunc (b *Bwhatsapp) HandleAudioMessage(message whatsapp.AudioMessage) {\n\tif message.Info.FromMe || message.Info.Timestamp < b.startedAt {\n\t\treturn\n\t}\n\n\tsenderJID := message.Info.SenderJid\n\tif len(message.Info.SenderJid) == 0 && message.Info.Source != nil && message.Info.Source.Participant != nil {\n\t\tsenderJID = *message.Info.Source.Participant\n\t}\n\n\tsenderName := b.getSenderName(message.Info.SenderJid)\n\tif senderName == \"\" {\n\t\tsenderName = \"Someone\" \/\/ don't expose telephone number\n\t}\n\n\trmsg := config.Message{\n\t\tUserID: senderJID,\n\t\tUsername: senderName,\n\t\tChannel: message.Info.RemoteJid,\n\t\tAccount: b.Account,\n\t\tProtocol: b.Protocol,\n\t\tExtra: make(map[string][]interface{}),\n\t\tID: message.Info.Id,\n\t}\n\n\tif avatarURL, exists := b.userAvatars[senderJID]; exists {\n\t\trmsg.Avatar = avatarURL\n\t}\n\n\tfileExt, err := mime.ExtensionsByType(message.Type)\n\tif err != nil {\n\t\tb.Log.Errorf(\"Mimetype detection error: %s\", err)\n\n\t\treturn\n\t}\n\n\tif len(fileExt) == 0 {\n\t\tfileExt = append(fileExt, \".ogg\")\n\t}\n\n\tfilename := fmt.Sprintf(\"%v%v\", message.Info.Id, fileExt[0])\n\n\tb.Log.Debugf(\"Trying to download %s with size %#v and type %s\", filename, message.Length, message.Type)\n\n\tdata, err := message.Download()\n\tif err != nil {\n\t\tb.Log.Errorf(\"Download audio failed: %s\", err)\n\n\t\treturn\n\t}\n\n\t\/\/ Move file to bridge storage\n\thelper.HandleDownloadData(b.Log, &rmsg, filename, \"audio message\", \"\", &data, b.General)\n\n\tb.Log.Debugf(\"<= Sending message from %s on %s to gateway\", senderJID, b.Account)\n\tb.Log.Debugf(\"<= Message is %#v\", rmsg)\n\n\tb.Remote <- rmsg\n}\n\n\/\/ HandleDocumentMessage downloads documents\nfunc (b *Bwhatsapp) HandleDocumentMessage(message whatsapp.DocumentMessage) {\n\tif message.Info.FromMe || message.Info.Timestamp < b.startedAt {\n\t\treturn\n\t}\n\n\tsenderJID := message.Info.SenderJid\n\tif len(message.Info.SenderJid) == 0 && message.Info.Source != nil && message.Info.Source.Participant != nil {\n\t\tsenderJID = *message.Info.Source.Participant\n\t}\n\n\tsenderName := b.getSenderName(message.Info.SenderJid)\n\tif senderName == \"\" {\n\t\tsenderName = \"Someone\" \/\/ don't expose telephone number\n\t}\n\n\trmsg := config.Message{\n\t\tUserID: senderJID,\n\t\tUsername: senderName,\n\t\tChannel: message.Info.RemoteJid,\n\t\tAccount: b.Account,\n\t\tProtocol: b.Protocol,\n\t\tExtra: make(map[string][]interface{}),\n\t\tID: message.Info.Id,\n\t}\n\n\tif avatarURL, exists := b.userAvatars[senderJID]; exists {\n\t\trmsg.Avatar = avatarURL\n\t}\n\n\tfileExt, err := mime.ExtensionsByType(message.Type)\n\tif err != nil {\n\t\tb.Log.Errorf(\"Mimetype detection error: %s\", err)\n\n\t\treturn\n\t}\n\n\tfilename := fmt.Sprintf(\"%v%v\", message.Info.Id, fileExt[0])\n\n\tb.Log.Debugf(\"Trying to download %s with type %s\", filename, message.Type)\n\n\tdata, err := message.Download()\n\tif err != nil {\n\t\tb.Log.Errorf(\"Download document message failed: %s\", err)\n\n\t\treturn\n\t}\n\n\t\/\/ Move file to bridge storage\n\thelper.HandleDownloadData(b.Log, &rmsg, filename, \"document\", \"\", &data, b.General)\n\n\tb.Log.Debugf(\"<= Sending message from %s on %s to gateway\", senderJID, b.Account)\n\tb.Log.Debugf(\"<= Message is %#v\", rmsg)\n\n\tb.Remote <- rmsg\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"github.com\/klauspost\/geoip-service\/geoip2\"\n\t\"github.com\/pmylund\/go-cache\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/go:generate ffjson --nodecoder $GOFILE\n\n\/\/ ffjson: nodecoder\ntype ResponseCity struct {\n\tData *geoip2.City `json:\",omitempty\"`\n\tError string `json:\",omitempty\"`\n}\n\n\/\/ ffjson: nodecoder\ntype ResponseCountry struct {\n\tData *geoip2.Country `json:\",omitempty\"`\n\tError string `json:\",omitempty\"`\n}\n\nfunc main() {\n\tvar dbName = flag.String(\"db\", \"GeoLite2-City.mmdb\", \"File name of MaxMind GeoIP2 and GeoLite2 database\")\n\tvar lookup = flag.String(\"lookup\", \"city\", \"Specify which value to look up. Can be 'city' or 'country' depending on which database you load.\")\n\tvar listen = flag.String(\"listen\", \":5000\", \"Listen address and port, for instance 127.0.0.1:5000\")\n\tvar threads = flag.Int(\"threads\", runtime.NumCPU(), \"Number of threads to use. Defaults to number of detected cores\")\n\tvar pretty = flag.Bool(\"pretty\", false, \"Should output be formatted with newlines and intentation\")\n\tvar cacheSecs = flag.Int(\"cache\", 0, \"How many seconds should requests be cached. Set to 0 to disable\")\n\tflag.Parse()\n\n\truntime.GOMAXPROCS(*threads)\n\tvar memCache *cache.Cache\n\tif *cacheSecs > 0 {\n\t\tmemCache = cache.New(time.Duration(*cacheSecs)*time.Second, 1*time.Second)\n\t}\n\tlookupCity := true\n\tif *lookup == \"country\" {\n\t\tlookupCity = false\n\t} else if *lookup != \"city\" {\n\t\tlog.Fatalf(\"lookup parameter should be either 'city', or 'country', it is '%s'\", *lookup)\n\t}\n\n\tdb, err := geoip2.Open(*dbName)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer db.Close()\n\tlog.Println(\"Loaded database \" + *dbName)\n\n\t\/\/ We dereference this to avoid a pretty big penalty under heavy load.\n\tprettyL := *pretty\n\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, req *http.Request) {\n\t\tvar ipText string\n\t\t\/\/ Prepare the response and queue sending the result.\n\t\tvar cached []byte\n\t\tvar returnError string\n\t\tvar result interface{} = nil\n\n\t\tdefer func() {\n\t\t\tvar j []byte\n\t\t\tvar err error\n\t\t\tif cached != nil {\n\t\t\t\tj = cached\n\t\t\t} else {\n\t\t\t\tcity, ok := result.(*geoip2.City)\n\t\t\t\tif ok {\n\t\t\t\t\tres := ResponseCity{Data: city, Error: returnError}\n\t\t\t\t\tif prettyL {\n\t\t\t\t\t\tj, err = json.MarshalIndent(res, \"\", \" \")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tj, err = res.MarshalJSON()\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tcountry, _ := result.(*geoip2.Country)\n\t\t\t\t\tres := ResponseCountry{Data: country, Error: returnError}\n\t\t\t\t\tif prettyL {\n\t\t\t\t\t\tj, err = json.MarshalIndent(res, \"\", \" \")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tj, err = res.MarshalJSON()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif memCache != nil && cached == nil {\n\t\t\t\tmemCache.Set(ipText, j, 0)\n\t\t\t}\n\t\t\tw.Write(j)\n\t\t}()\n\n\t\tipText = req.URL.Query().Get(\"ip\")\n\t\tif ipText == \"\" {\n\t\t\tipText = strings.Trim(req.URL.Path, \"\/\")\n\t\t}\n\t\tip := net.ParseIP(ipText)\n\t\tif ip == nil {\n\t\t\treturnError = \"unable to decode ip\"\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tif memCache != nil {\n\t\t\tv, found := memCache.Get(ipText)\n\t\t\tif found {\n\t\t\t\tcached = v.([]byte)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif lookupCity {\n\t\t\tresult, err = db.City(ip)\n\t\t\tif err != nil {\n\t\t\t\treturnError = err.Error()\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tresult, err = db.Country(ip)\n\t\t\tif err != nil {\n\t\t\t\treturnError = err.Error()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n\n\tlog.Println(\"Listening on \" + *listen)\n\tlog.Fatal(http.ListenAndServe(*listen, nil))\n}\n<commit_msg>Add optional 'Access-Control-Allow-Origin' and always set 'Content-Type' to \"application\/json\" and set 'Last-Modified' to server startup time.<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"github.com\/klauspost\/geoip-service\/geoip2\"\n\t\"github.com\/pmylund\/go-cache\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/go:generate ffjson --nodecoder $GOFILE\n\n\/\/ ffjson: nodecoder\ntype ResponseCity struct {\n\tData *geoip2.City `json:\",omitempty\"`\n\tError string `json:\",omitempty\"`\n}\n\n\/\/ ffjson: nodecoder\ntype ResponseCountry struct {\n\tData *geoip2.Country `json:\",omitempty\"`\n\tError string `json:\",omitempty\"`\n}\n\nfunc main() {\n\tvar dbName = flag.String(\"db\", \"GeoLite2-City.mmdb\", \"File name of MaxMind GeoIP2 and GeoLite2 database\")\n\tvar lookup = flag.String(\"lookup\", \"city\", \"Specify which value to look up. Can be 'city' or 'country' depending on which database you load.\")\n\tvar listen = flag.String(\"listen\", \":5000\", \"Listen address and port, for instance 127.0.0.1:5000\")\n\tvar threads = flag.Int(\"threads\", runtime.NumCPU(), \"Number of threads to use. Defaults to number of detected cores\")\n\tvar pretty = flag.Bool(\"pretty\", false, \"Should output be formatted with newlines and intentation\")\n\tvar cacheSecs = flag.Int(\"cache\", 0, \"How many seconds should requests be cached. Set to 0 to disable\")\n\tvar originPolicy = flag.String(\"origin\", \"*\", `Value sent in the 'Access-Control-Allow-Origin' header. Set to \"\" to disable.`)\n\tserverStart := time.Now().Format(http.TimeFormat)\n\n\tflag.Parse()\n\n\truntime.GOMAXPROCS(*threads)\n\tvar memCache *cache.Cache\n\tif *cacheSecs > 0 {\n\t\tmemCache = cache.New(time.Duration(*cacheSecs)*time.Second, 1*time.Second)\n\t}\n\tlookupCity := true\n\tif *lookup == \"country\" {\n\t\tlookupCity = false\n\t} else if *lookup != \"city\" {\n\t\tlog.Fatalf(\"lookup parameter should be either 'city', or 'country', it is '%s'\", *lookup)\n\t}\n\n\tdb, err := geoip2.Open(*dbName)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer db.Close()\n\tlog.Println(\"Loaded database \" + *dbName)\n\n\t\/\/ We dereference this to avoid a pretty big penalty under heavy load.\n\tprettyL := *pretty\n\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, req *http.Request) {\n\t\tvar ipText string\n\t\t\/\/ Prepare the response and queue sending the result.\n\t\tvar cached []byte\n\t\tvar returnError string\n\t\tvar result interface{} = nil\n\n\t\tdefer func() {\n\t\t\tvar j []byte\n\t\t\tvar err error\n\t\t\tif cached != nil {\n\t\t\t\tj = cached\n\t\t\t} else {\n\t\t\t\tcity, ok := result.(*geoip2.City)\n\t\t\t\tif ok {\n\t\t\t\t\tres := ResponseCity{Data: city, Error: returnError}\n\t\t\t\t\tif prettyL {\n\t\t\t\t\t\tj, err = json.MarshalIndent(res, \"\", \" \")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tj, err = res.MarshalJSON()\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tcountry, _ := result.(*geoip2.Country)\n\t\t\t\t\tres := ResponseCountry{Data: country, Error: returnError}\n\t\t\t\t\tif prettyL {\n\t\t\t\t\t\tj, err = json.MarshalIndent(res, \"\", \" \")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tj, err = res.MarshalJSON()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif memCache != nil && cached == nil {\n\t\t\t\tmemCache.Set(ipText, j, 0)\n\t\t\t}\n\t\t\tw.Write(j)\n\t\t}()\n\n\t\t\/\/ Set headers\n\t\tif *originPolicy != \"\" {\n\t\t\tw.Header().Set(\"Access-Control-Allow-Origin\", *originPolicy)\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tw.Header().Set(\"Last-Modified\", serverStart)\n\n\t\tipText = req.URL.Query().Get(\"ip\")\n\t\tif ipText == \"\" {\n\t\t\tipText = strings.Trim(req.URL.Path, \"\/\")\n\t\t}\n\t\tip := net.ParseIP(ipText)\n\t\tif ip == nil {\n\t\t\treturnError = \"unable to decode ip\"\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tif memCache != nil {\n\t\t\tv, found := memCache.Get(ipText)\n\t\t\tif found {\n\t\t\t\tcached = v.([]byte)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif lookupCity {\n\t\t\tresult, err = db.City(ip)\n\t\t\tif err != nil {\n\t\t\t\treturnError = err.Error()\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tresult, err = db.Country(ip)\n\t\t\tif err != nil {\n\t\t\t\treturnError = err.Error()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n\n\tlog.Println(\"Listening on \" + *listen)\n\tlog.Fatal(http.ListenAndServe(*listen, nil))\n}\n<|endoftext|>"} {"text":"<commit_before>package gh\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Masterminds\/semver\"\n\t\"github.com\/google\/go-github\/github\"\n\t\"golang.org\/x\/oauth2\"\n)\n\nfunc ClientFromCredentials(username string, password string, OTP string) *github.Client {\n\n\ttp := github.BasicAuthTransport{\n\t\tUsername: strings.TrimSpace(username),\n\t\tPassword: strings.TrimSpace(password),\n\t\tOTP: strings.TrimSpace(OTP),\n\t}\n\n\treturn github.NewClient(tp.Client())\n}\n\nfunc ClientFromToken(token string) *github.Client {\n\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: token},\n\t)\n\ttc := oauth2.NewClient(oauth2.NoContext, ts)\n\n\treturn github.NewClient(tc)\n}\n\nfunc AnonClient() *github.Client {\n\treturn github.NewClient(nil)\n}\n\n\/\/ Add a new personal access tokens\nfunc Add(client *github.Client, name string, permissions []string) (*github.Authorization, error) {\n\n\tauthReq, notFound := GeneratePersonalAuthTokenRequest(name, permissions)\n\tif len(notFound) > 0 {\n\t\treturn nil, errors.New(\"Unknown permissions: \" + strings.Join(notFound, \",\"))\n\t}\n\n\tcreatedAuth, _, err := client.Authorizations.Create(authReq)\n\n\treturn createdAuth, err\n}\n\n\/\/ List personal access tokens generated via gh-api-cli on the remote\nfunc List(client *github.Client) (map[string]*github.Authorization, error) {\n\n\tret := make(map[string]*github.Authorization)\n\n\topt := &github.ListOptions{Page: 1, PerPage: 200}\n\tgot, _, err := client.Authorizations.List(opt)\n\n\tnamedAuth := regexp.MustCompile(`^([^:]+): generated via (gh-auth|gh-api-cli)`)\n\tfor _, v := range got {\n\t\tnote := \"\"\n\t\tif v.Note != nil {\n\t\t\tnote = *v.Note\n\t\t}\n\t\tif namedAuth.MatchString(note) {\n\t\t\tparts := namedAuth.FindAllStringSubmatch(note, -1)\n\t\t\tname := parts[0][1]\n\t\t\tret[name] = v\n\t\t}\n\t}\n\n\treturn ret, err\n}\n\n\/\/ Delete a personal access token on the remote\nfunc Delete(client *github.Client, id int) error {\n\n\t_, err := client.Authorizations.Delete(id)\n\n\treturn err\n}\n\n\/\/ Forge a request to create the personal access token on the remote\nfunc GeneratePersonalAuthTokenRequest(name string, permissions []string) (*github.AuthorizationRequest, []string) {\n\tscopes := map[string]github.Scope{\n\t\t\"user\": github.ScopeUser,\n\t\t\"user:email\": github.ScopeUserEmail,\n\t\t\"user:follow\": github.ScopeUserFollow,\n\t\t\"public_repo\": github.ScopePublicRepo,\n\t\t\"repo\": github.ScopeRepo,\n\t\t\"repo_deployment\": github.ScopeRepoDeployment,\n\t\t\"repo:status\": github.ScopeRepoStatus,\n\t\t\"delete_repo\": github.ScopeDeleteRepo,\n\t\t\"notifications\": github.ScopeNotifications,\n\t\t\"gist\": github.ScopeGist,\n\t\t\"read:repo_hook\": github.ScopeReadRepoHook,\n\t\t\"write:repo_hook\": github.ScopeWriteRepoHook,\n\t\t\"admin:repo_hook\": github.ScopeAdminRepoHook,\n\t\t\"admin:org_hook\": github.ScopeAdminOrgHook,\n\t\t\"read:org\": github.ScopeReadOrg,\n\t\t\"write:org\": github.ScopeWriteOrg,\n\t\t\"admin:org\": github.ScopeAdminOrg,\n\t\t\"read:public_key\": github.ScopeReadPublicKey,\n\t\t\"write:public_key\": github.ScopeWritePublicKey,\n\t\t\"admin:public_key\": github.ScopeAdminPublicKey,\n\t\t\"read:gpg_key\": github.ScopeReadGPGKey,\n\t\t\"write:gpg_key\": github.ScopeWriteGPGKey,\n\t\t\"admin:gpg_key\": github.ScopeAdminGPGKey,\n\t}\n\tnotFound := make([]string, 0)\n\tauth := github.AuthorizationRequest{\n\t\tNote: github.String(name + \": generated via gh-api-cli\"),\n\t\tScopes: []github.Scope{},\n\t\tFingerprint: github.String(name + time.Now().String()),\n\t}\n\tfor _, p := range permissions {\n\t\tif val, ok := scopes[p]; ok {\n\t\t\tauth.Scopes = append(auth.Scopes, val)\n\t\t} else {\n\t\t\tnotFound = append(notFound, p)\n\t\t}\n\t}\n\treturn &auth, notFound\n}\n\n\/\/ List all releases on the remote\nfunc ListReleases(client *github.Client, owner string, repo string) ([]*github.RepositoryRelease, error) {\n\n\topt := &github.ListOptions{Page: 1, PerPage: 200}\n\tgot, _, err := client.Repositories.ListReleases(owner, repo, opt)\n\n\treturn got, err\n}\n\n\/\/ Get a release by its id on the the remote\nfunc GetReleaseById(client *github.Client, owner string, repo string, id int) (*github.RepositoryRelease, error) {\n\tvar ret *github.RepositoryRelease\n\n\treleases, err := ListReleases(client, owner, repo)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tfor _, r := range releases {\n\t\tif *r.ID == id {\n\t\t\tret = r\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn ret, nil\n}\n\n\/\/ List public releases on the remote\nfunc ListPublicReleases(client *github.Client, owner string, repo string) ([]*github.RepositoryRelease, error) {\n\n\topt := &github.ListOptions{Page: 1, PerPage: 200}\n\tgot, _, err := client.Repositories.ListReleases(owner, repo, opt)\n\n\treturn got, err\n}\n\n\/\/ List public release assets on the remote\nfunc ListReleaseAssets(client *github.Client, owner string, repo string, release github.RepositoryRelease) ([]*github.ReleaseAsset, error) {\n\n\topt := &github.ListOptions{Page: 1, PerPage: 200}\n\tgot, _, err := client.Repositories.ListReleaseAssets(owner, repo, *release.ID, opt)\n\n\treturn got, err\n}\n\n\/\/ Tells if a release exitst on the remote\nfunc ReleaseExists(client *github.Client, owner string, repo string, version string, draft bool) (bool, error) {\n\n\treleases, err := ListReleases(client, owner, repo)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\n\texists := false\n\tfor _, r := range releases {\n\t\tif (*r.TagName == version || *r.Name == version) && *r.Draft == draft {\n\t\t\texists = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn exists, err\n}\n\n\/\/ Transform a version string into its id\nfunc ReleaseId(client *github.Client, owner string, repo string, version string) (int, error) {\n\n\tid := -1\n\n\treleases, err := ListReleases(client, owner, repo)\n\tif err != nil {\n\t\treturn id, err\n\t}\n\n\tfor _, r := range releases {\n\t\tif *r.TagName == version || *r.Name == version {\n\t\t\tid = *r.ID\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif id == -1 {\n\t\terr = errors.New(\"Release '\" + version + \"' not found!\")\n\t}\n\n\treturn id, err\n}\n\n\/\/ Create a new release on the remote\nfunc CreateRelease(client *github.Client, owner string, repo string, version string, authorName string, authorEmail string, draft bool, body string) (*github.RepositoryRelease, error) {\n\n\texists, err := ReleaseExists(client, owner, repo, version, draft)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif exists {\n\t\treturn nil, errors.New(\"Release '\" + version + \"' already exists!\")\n\t}\n\tv, err := semver.NewVersion(version)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\topt := &github.RepositoryRelease{\n\t\tName: github.String(version),\n\t\tTagName: github.String(version),\n\t\tDraft: github.Bool(draft),\n\t\tBody: github.String(body),\n\t\tPrerelease: github.Bool(v.Prerelease() != \"\"),\n\t\tAuthor: &github.CommitAuthor{\n\t\t\tName: github.String(authorName),\n\t\t\tEmail: github.String(authorEmail),\n\t\t},\n\t}\n\trelease, _, err := client.Repositories.CreateRelease(owner, repo, opt)\n\n\treturn release, err\n}\n\n\/\/ Delete a release on the remote\nfunc DeleteRelease(client *github.Client, owner string, repo string, version string) error {\n\n\tid, err := ReleaseId(client, owner, repo, version)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = client.Repositories.DeleteRelease(owner, repo, id)\n\n\treturn err\n}\n\n\/\/ Delete a release asset on the remote\nfunc DeleteReleaseAsset(client *github.Client, owner string, repo string, id int) error {\n\n\t_, err := client.Repositories.DeleteReleaseAsset(owner, repo, id)\n\n\treturn err\n}\n\n\/\/ Upload multiple assets the remote release\nfunc UploadReleaseAssets(client *github.Client, owner string, repo string, version string, files []string) []error {\n\n\terrs := make([]error, 0)\n\n\tid, err := ReleaseId(client, owner, repo, version)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t\treturn errs\n\t}\n\n\tc := make(chan error)\n\tUploadMultipleReleaseAssets(client, owner, repo, id, files, c)\n\n\tindex := 0\n\tfor upErr := range c {\n\t\tif upErr != nil {\n\t\t\terrs = append(errs, upErr)\n\t\t}\n\t\tindex++\n\t\tif index == len(files)-1 {\n\t\t\tclose(c)\n\t\t}\n\t}\n\n\treturn errs\n}\nfunc UploadMultipleReleaseAssets(client *github.Client, owner string, repo string, releaseId int, files []string, info chan<- error) {\n\tfor index, file := range files {\n\t\tgo func(index int, file string) {\n\t\t\tinfo <- UploadReleaseAsset(client, owner, repo, releaseId, file)\n\t\t}(index, file)\n\t}\n}\n\n\/\/ Upload one asset on the remote\nfunc UploadReleaseAsset(client *github.Client, owner string, repo string, releaseId int, file string) error {\n\n\tf, err := os.Open(file)\n\tdefer f.Close()\n\tif err == nil {\n\t\topt := &github.UploadOptions{Name: filepath.Base(file)}\n\t\t_, _, err = client.Repositories.UploadReleaseAsset(owner, repo, releaseId, opt, f)\n\t}\n\n\treturn err\n}\n\n\/\/ Download an asset from a release, handles redirect.\nfunc DownloadAsset(url string, out io.Writer) error {\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\ts3URL, err := res.Location()\n\tif err == nil && s3URL.String() != \"\" {\n\t\tres, err = http.Get(s3URL.String())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer res.Body.Close()\n\t}\n\n\t_, err = io.Copy(out, res.Body)\n\n\treturn err\n}\n<commit_msg>GetReleaseById(break): renamed to GetReleaseByID, ReleaseId(break): renamed to ReleaseID, CreateRelease(break): removed authoremail parameter, applied linter<commit_after>package gh\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Masterminds\/semver\"\n\t\"github.com\/google\/go-github\/github\"\n\t\"golang.org\/x\/oauth2\"\n)\n\n\/\/ ClientFromCredentials creates a new github.Client instance,\n\/\/ using a BasicAuthTransport.\nfunc ClientFromCredentials(username string, password string, OTP string) *github.Client {\n\n\ttp := github.BasicAuthTransport{\n\t\tUsername: strings.TrimSpace(username),\n\t\tPassword: strings.TrimSpace(password),\n\t\tOTP: strings.TrimSpace(OTP),\n\t}\n\n\treturn github.NewClient(tp.Client())\n}\n\n\/\/ ClientFromToken creates a new github.Client instance,\n\/\/ using an OAuth2 transport.\nfunc ClientFromToken(token string) *github.Client {\n\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: token},\n\t)\n\ttc := oauth2.NewClient(oauth2.NoContext, ts)\n\n\treturn github.NewClient(tc)\n}\n\n\/\/ AnonClient creates a new github.Client instance,\n\/\/ using an anonymous transport.\nfunc AnonClient() *github.Client {\n\treturn github.NewClient(nil)\n}\n\n\/\/ Add creates a new authorization token on the remote.\nfunc Add(client *github.Client, name string, permissions []string) (*github.Authorization, error) {\n\n\tauthReq, notFound := GeneratePersonalAuthTokenRequest(name, permissions)\n\tif len(notFound) > 0 {\n\t\treturn nil, errors.New(\"Unknown permissions: \" + strings.Join(notFound, \",\"))\n\t}\n\n\tcreatedAuth, _, err := client.Authorizations.Create(authReq)\n\n\treturn createdAuth, err\n}\n\n\/\/ List retrieve existing authorizations generated by gh-api-cli.\nfunc List(client *github.Client) (map[string]*github.Authorization, error) {\n\n\tret := make(map[string]*github.Authorization)\n\n\topt := &github.ListOptions{Page: 1, PerPage: 200}\n\tgot, _, err := client.Authorizations.List(opt)\n\n\tnamedAuth := regexp.MustCompile(`^([^:]+): generated via (gh-auth|gh-api-cli)`)\n\tfor _, v := range got {\n\t\tnote := \"\"\n\t\tif v.Note != nil {\n\t\t\tnote = *v.Note\n\t\t}\n\t\tif namedAuth.MatchString(note) {\n\t\t\tparts := namedAuth.FindAllStringSubmatch(note, -1)\n\t\t\tname := parts[0][1]\n\t\t\tret[name] = v\n\t\t}\n\t}\n\n\treturn ret, err\n}\n\n\/\/ Delete removes a personal access token on the remote\nfunc Delete(client *github.Client, id int) error {\n\n\t_, err := client.Authorizations.Delete(id)\n\n\treturn err\n}\n\n\/\/ GeneratePersonalAuthTokenRequest forges a request to create the personal access token on the remote\nfunc GeneratePersonalAuthTokenRequest(name string, permissions []string) (*github.AuthorizationRequest, []string) {\n\tscopes := map[string]github.Scope{\n\t\t\"user\": github.ScopeUser,\n\t\t\"user:email\": github.ScopeUserEmail,\n\t\t\"user:follow\": github.ScopeUserFollow,\n\t\t\"public_repo\": github.ScopePublicRepo,\n\t\t\"repo\": github.ScopeRepo,\n\t\t\"repo_deployment\": github.ScopeRepoDeployment,\n\t\t\"repo:status\": github.ScopeRepoStatus,\n\t\t\"delete_repo\": github.ScopeDeleteRepo,\n\t\t\"notifications\": github.ScopeNotifications,\n\t\t\"gist\": github.ScopeGist,\n\t\t\"read:repo_hook\": github.ScopeReadRepoHook,\n\t\t\"write:repo_hook\": github.ScopeWriteRepoHook,\n\t\t\"admin:repo_hook\": github.ScopeAdminRepoHook,\n\t\t\"admin:org_hook\": github.ScopeAdminOrgHook,\n\t\t\"read:org\": github.ScopeReadOrg,\n\t\t\"write:org\": github.ScopeWriteOrg,\n\t\t\"admin:org\": github.ScopeAdminOrg,\n\t\t\"read:public_key\": github.ScopeReadPublicKey,\n\t\t\"write:public_key\": github.ScopeWritePublicKey,\n\t\t\"admin:public_key\": github.ScopeAdminPublicKey,\n\t\t\"read:gpg_key\": github.ScopeReadGPGKey,\n\t\t\"write:gpg_key\": github.ScopeWriteGPGKey,\n\t\t\"admin:gpg_key\": github.ScopeAdminGPGKey,\n\t}\n\tnotFound := make([]string, 0)\n\tauth := github.AuthorizationRequest{\n\t\tNote: github.String(name + \": generated via gh-api-cli\"),\n\t\tScopes: []github.Scope{},\n\t\tFingerprint: github.String(name + time.Now().String()),\n\t}\n\tfor _, p := range permissions {\n\t\tif val, ok := scopes[p]; ok {\n\t\t\tauth.Scopes = append(auth.Scopes, val)\n\t\t} else {\n\t\t\tnotFound = append(notFound, p)\n\t\t}\n\t}\n\treturn &auth, notFound\n}\n\n\/\/ ListReleases fetches first 200 releases on the remote.\nfunc ListReleases(client *github.Client, owner string, repo string) ([]*github.RepositoryRelease, error) {\n\n\topt := &github.ListOptions{Page: 1, PerPage: 200}\n\tgot, _, err := client.Repositories.ListReleases(owner, repo, opt)\n\n\treturn got, err\n}\n\n\/\/ GetReleaseByID gets a release by its id on the the remote\nfunc GetReleaseByID(client *github.Client, owner string, repo string, id int) (*github.RepositoryRelease, error) {\n\tvar ret *github.RepositoryRelease\n\n\treleases, err := ListReleases(client, owner, repo)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tfor _, r := range releases {\n\t\tif *r.ID == id {\n\t\t\tret = r\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn ret, nil\n}\n\n\/\/ ListReleaseAssets retrieves the first 200 release assets on the remote\nfunc ListReleaseAssets(client *github.Client, owner string, repo string, release github.RepositoryRelease) ([]*github.ReleaseAsset, error) {\n\n\topt := &github.ListOptions{Page: 1, PerPage: 200}\n\tgot, _, err := client.Repositories.ListReleaseAssets(owner, repo, *release.ID, opt)\n\n\treturn got, err\n}\n\n\/\/ ReleaseExists tells if a release exits by its version on the remote\nfunc ReleaseExists(client *github.Client, owner string, repo string, version string, draft bool) (bool, error) {\n\n\treleases, err := ListReleases(client, owner, repo)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\n\texists := false\n\tfor _, r := range releases {\n\t\tif (*r.TagName == version || *r.Name == version) && *r.Draft == draft {\n\t\t\texists = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn exists, err\n}\n\n\/\/ ReleaseID get ID of a release givne its version.\nfunc ReleaseID(client *github.Client, owner string, repo string, version string) (int, error) {\n\n\tid := -1\n\n\treleases, err := ListReleases(client, owner, repo)\n\tif err != nil {\n\t\treturn id, err\n\t}\n\n\tfor _, r := range releases {\n\t\tif *r.TagName == version || *r.Name == version {\n\t\t\tid = *r.ID\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif id == -1 {\n\t\terr = errors.New(\"Release '\" + version + \"' not found!\")\n\t}\n\n\treturn id, err\n}\n\n\/\/ CreateRelease creates a new release on the remote repository.\nfunc CreateRelease(client *github.Client, owner, repo, version, author string, draft bool, body string) (*github.RepositoryRelease, error) {\n\n\texists, err := ReleaseExists(client, owner, repo, version, draft)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif exists {\n\t\treturn nil, errors.New(\"Release '\" + version + \"' already exists!\")\n\t}\n\tv, err := semver.NewVersion(version)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tuserAuthor := author\n\tif userAuthor == \"\" {\n\t\tuserAuthor = owner\n\t}\n\n\tuser, resp, err := client.Users.Get(userAuthor)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"err: %v\\nresp: %v\", err, resp)\n\t}\n\n\topt := &github.RepositoryRelease{\n\t\tName: github.String(version),\n\t\tTagName: github.String(version),\n\t\tDraft: github.Bool(draft),\n\t\tBody: github.String(body),\n\t\tPrerelease: github.Bool(v.Prerelease() != \"\"),\n\t\tAuthor: user,\n\t}\n\trelease, _, err := client.Repositories.CreateRelease(owner, repo, opt)\n\n\treturn release, err\n}\n\n\/\/ DeleteRelease deletes a release from the remote repository.\nfunc DeleteRelease(client *github.Client, owner string, repo string, version string) error {\n\n\tid, err := ReleaseID(client, owner, repo, version)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = client.Repositories.DeleteRelease(owner, repo, id)\n\n\treturn err\n}\n\n\/\/ DeleteReleaseAsset deletes a release asset from the remote repository.\nfunc DeleteReleaseAsset(client *github.Client, owner string, repo string, id int) error {\n\n\t_, err := client.Repositories.DeleteReleaseAsset(owner, repo, id)\n\n\treturn err\n}\n\n\/\/ UploadReleaseAssets uploads multiple assets to the remote release identified by its version.\nfunc UploadReleaseAssets(client *github.Client, owner string, repo string, version string, files []string) []error {\n\n\terrs := make([]error, 0)\n\n\tid, err := ReleaseID(client, owner, repo, version)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t\treturn errs\n\t}\n\n\tc := make(chan error)\n\tUploadMultipleReleaseAssets(client, owner, repo, id, files, c)\n\n\tindex := 0\n\tfor upErr := range c {\n\t\tif upErr != nil {\n\t\t\terrs = append(errs, upErr)\n\t\t}\n\t\tindex++\n\t\tif index == len(files)-1 {\n\t\t\tclose(c)\n\t\t}\n\t}\n\n\treturn errs\n}\n\n\/\/ UploadMultipleReleaseAssets uploads multiple assets to the remote release identified by its release ID.\nfunc UploadMultipleReleaseAssets(client *github.Client, owner string, repo string, releaseID int, files []string, info chan<- error) {\n\tfor index, file := range files {\n\t\tgo func(index int, file string) {\n\t\t\tinfo <- UploadReleaseAsset(client, owner, repo, releaseID, file)\n\t\t}(index, file)\n\t}\n}\n\n\/\/ UploadReleaseAsset uploads one asset to the remote release identified by its release ID.\nfunc UploadReleaseAsset(client *github.Client, owner string, repo string, releaseID int, file string) error {\n\n\tf, err := os.Open(file)\n\tdefer f.Close()\n\tif err == nil {\n\t\topt := &github.UploadOptions{Name: filepath.Base(file)}\n\t\t_, _, err = client.Repositories.UploadReleaseAsset(owner, repo, releaseID, opt, f)\n\t}\n\n\treturn err\n}\n\n\/\/ DownloadAsset downloads an asset from a release, it handles redirect.\nfunc DownloadAsset(url string, out io.Writer) error {\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\ts3URL, err := res.Location()\n\tif err == nil && s3URL.String() != \"\" {\n\t\tres, err = http.Get(s3URL.String())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer res.Body.Close()\n\t}\n\n\t_, err = io.Copy(out, res.Body)\n\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 Google Inc. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main_test\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"context\"\n)\n\nfunc checkRequest(proxyURL, testPath, want string, timeout time.Duration) error {\n\tclient := &http.Client{\n\t\tTimeout: timeout,\n\t}\n\tresp, err := client.Get(proxyURL + testPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to issue a frontend GET request: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read the response body: %v\", err)\n\t}\n\tif got := string(body); got != want {\n\t\treturn fmt.Errorf(\"unexpected proxy frontend response; got %q, want %q\", got, want)\n\t}\n\treturn nil\n}\n\nfunc RunLocalProxy(ctx context.Context, t *testing.T) (int, error) {\n\t\/\/ This assumes that \"Make build\" has been run\n\tproxyArgs := strings.Join(append(\n\t\t[]string{\"${GOPATH}\/bin\/inverting-proxy\"},\n\t\t\"--port=0\"),\n\t\t\" \")\n\tproxyCmd := exec.CommandContext(ctx, \"\/bin\/bash\", \"-c\", proxyArgs)\n\n\tvar proxyOut bytes.Buffer\n\tproxyCmd.Stdout = &proxyOut\n\tproxyCmd.Stderr = &proxyOut\n\tif err := proxyCmd.Start(); err != nil {\n\t\tt.Fatalf(\"Failed to start the inverting-proxy binary: %v\", err)\n\t}\n\tgo func() {\n\t\terr := proxyCmd.Wait()\n\t\tt.Logf(\"Proxy result: %v, stdout\/stderr: %q\", err, proxyOut.String())\n\t}()\n\tfor i := 0; i < 30; i++ {\n\t\tfor _, line := range strings.Split(proxyOut.String(), \"\\n\") {\n\t\t\tif strings.Contains(line, \"Listening on [::]:\") {\n\t\t\t\tportStr := strings.TrimSpace(strings.Split(line, \"Listening on [::]:\")[1])\n\t\t\t\treturn strconv.Atoi(portStr)\n\t\t\t}\n\t\t}\n\t\tt.Logf(\"Waiting for the locally running proxy to start...\")\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\treturn 0, fmt.Errorf(\"Locally-running proxy failed to start up in time: %q\", proxyOut.String())\n}\n\nfunc TestWithInMemoryProxyAndBackend(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tbackendHomeDir, err := ioutil.TempDir(\"\", \"backend-home\")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to set up a temporary home directory for the test: %v\", err)\n\t}\n\tgcloudCfg := filepath.Join(backendHomeDir, \".config\", \"gcloud\")\n\tif err := os.MkdirAll(gcloudCfg, os.ModePerm); err != nil {\n\t\tt.Fatalf(\"Failed to set up a temporary home directory for the test: %v\", err)\n\t}\n\tbackend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Logf(\"Responding to backend request to %q\", r.URL.Path)\n\t\tw.Write([]byte(r.URL.Path))\n\t}))\n\tfakeMetadata := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\/\/ Emulate slow responses from the metadata server, to check that the agent\n\t\t\/\/ is appropriately caching the results.\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\tif strings.HasPrefix(r.URL.Path, \"\/computeMetadata\/v1\/project\/project-id\") {\n\t\t\tio.WriteString(w, \"12345\")\n\t\t\treturn\n\t\t}\n\t\tif !(strings.HasPrefix(r.URL.Path, \"\/computeMetadata\/v1\/instance\/service-accounts\/\") && strings.HasSuffix(r.URL.Path, \"\/token\")) {\n\t\t\tio.WriteString(w, \"ok\")\n\t\t\treturn\n\t\t}\n\t\tvar fakeToken struct {\n\t\t\tAccessToken string `json:\"access_token\"`\n\t\t\tExpiresInSec int `json:\"expires_in\"`\n\t\t\tTokenType string `json:\"token_type\"`\n\t\t}\n\t\tfakeToken.AccessToken = \"fakeToken\"\n\t\tfakeToken.ExpiresInSec = 1000\n\t\tfakeToken.TokenType = \"Bearer\"\n\t\tif err := json.NewEncoder(w).Encode(&fakeToken); err != nil {\n\t\t\tt.Logf(\"Failed to encode a fake service account credential: %v\", err)\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}))\n\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tbackend.Close()\n\t\tfakeMetadata.Close()\n\t}()\n\tbackendURL, err := url.Parse(backend.URL)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to parse the backend URL: %v\", err)\n\t}\n\tproxyPort, err := RunLocalProxy(ctx, t)\n\tproxyURL := fmt.Sprintf(\"http:\/\/localhost:%d\", proxyPort)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to run the local inverting proxy: %v\", err)\n\t}\n\tt.Logf(\"Started backend at localhost:%s and proxy at %s\", backendURL.Port(), proxyURL)\n\n\t\/\/ This assumes that \"Make build\" has been run\n\targs := strings.Join(append(\n\t\t[]string{\"${GOPATH}\/bin\/proxy-forwarding-agent\"},\n\t\t\"--debug=true\",\n\t\t\"--backend=testBackend\",\n\t\t\"--proxy\", proxyURL+\"\/\",\n\t\t\"--host=localhost:\"+backendURL.Port(),\n\t\t\"--inject-banner=\\\\<div\\\\>FOO\\\\<\/div\\\\>\"),\n\t\t\" \")\n\tagentCmd := exec.CommandContext(ctx, \"\/bin\/bash\", \"-c\", args)\n\n\tvar out bytes.Buffer\n\tagentCmd.Stdout = &out\n\tagentCmd.Stderr = &out\n\tagentCmd.Env = append(os.Environ(), \"PATH=\", \"HOME=\"+backendHomeDir, \"GCE_METADATA_HOST=\"+strings.TrimPrefix(fakeMetadata.URL, \"http:\/\/\"))\n\tif err := agentCmd.Start(); err != nil {\n\t\tt.Fatalf(\"Failed to start the agent binary: %v\", err)\n\t}\n\tdefer func() {\n\t\tcancel()\n\t\terr := agentCmd.Wait()\n\t\tt.Logf(\"Agent result: %v, stdout\/stderr: %q\", err, out.String())\n\t}()\n\n\t\/\/ Send one request through the proxy to make sure the agent has come up.\n\t\/\/\n\t\/\/ We give this initial request a long time to complete, as the agent takes\n\t\/\/ a long time to start up.\n\ttestPath := \"\/some\/request\/path\"\n\tif err := checkRequest(proxyURL, testPath, testPath, time.Second); err != nil {\n\t\tt.Fatalf(\"Failed to send the initial request: %v\", err)\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\t\/\/ The timeout below was chosen to be overly generous to prevent test flakiness.\n\t\t\/\/\n\t\t\/\/ This has the consequence that it will only catch severe latency regressions.\n\t\t\/\/\n\t\t\/\/ The specific value was chosen by running the test in a loop 100 times and\n\t\t\/\/ incrementing the value until all 100 runs passed.\n\t\tif err := checkRequest(proxyURL, testPath, testPath, 100*time.Millisecond); err != nil {\n\t\t\tt.Fatalf(\"Failed to send request %d: %v\", i, err)\n\t\t}\n\t}\n}\n<commit_msg>Remove flag that is not (yet) defined<commit_after>\/*\nCopyright 2018 Google Inc. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main_test\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"context\"\n)\n\nfunc checkRequest(proxyURL, testPath, want string, timeout time.Duration) error {\n\tclient := &http.Client{\n\t\tTimeout: timeout,\n\t}\n\tresp, err := client.Get(proxyURL + testPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to issue a frontend GET request: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read the response body: %v\", err)\n\t}\n\tif got := string(body); got != want {\n\t\treturn fmt.Errorf(\"unexpected proxy frontend response; got %q, want %q\", got, want)\n\t}\n\treturn nil\n}\n\nfunc RunLocalProxy(ctx context.Context, t *testing.T) (int, error) {\n\t\/\/ This assumes that \"Make build\" has been run\n\tproxyArgs := strings.Join(append(\n\t\t[]string{\"${GOPATH}\/bin\/inverting-proxy\"},\n\t\t\"--port=0\"),\n\t\t\" \")\n\tproxyCmd := exec.CommandContext(ctx, \"\/bin\/bash\", \"-c\", proxyArgs)\n\n\tvar proxyOut bytes.Buffer\n\tproxyCmd.Stdout = &proxyOut\n\tproxyCmd.Stderr = &proxyOut\n\tif err := proxyCmd.Start(); err != nil {\n\t\tt.Fatalf(\"Failed to start the inverting-proxy binary: %v\", err)\n\t}\n\tgo func() {\n\t\terr := proxyCmd.Wait()\n\t\tt.Logf(\"Proxy result: %v, stdout\/stderr: %q\", err, proxyOut.String())\n\t}()\n\tfor i := 0; i < 30; i++ {\n\t\tfor _, line := range strings.Split(proxyOut.String(), \"\\n\") {\n\t\t\tif strings.Contains(line, \"Listening on [::]:\") {\n\t\t\t\tportStr := strings.TrimSpace(strings.Split(line, \"Listening on [::]:\")[1])\n\t\t\t\treturn strconv.Atoi(portStr)\n\t\t\t}\n\t\t}\n\t\tt.Logf(\"Waiting for the locally running proxy to start...\")\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\treturn 0, fmt.Errorf(\"Locally-running proxy failed to start up in time: %q\", proxyOut.String())\n}\n\nfunc TestWithInMemoryProxyAndBackend(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tbackendHomeDir, err := ioutil.TempDir(\"\", \"backend-home\")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to set up a temporary home directory for the test: %v\", err)\n\t}\n\tgcloudCfg := filepath.Join(backendHomeDir, \".config\", \"gcloud\")\n\tif err := os.MkdirAll(gcloudCfg, os.ModePerm); err != nil {\n\t\tt.Fatalf(\"Failed to set up a temporary home directory for the test: %v\", err)\n\t}\n\tbackend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Logf(\"Responding to backend request to %q\", r.URL.Path)\n\t\tw.Write([]byte(r.URL.Path))\n\t}))\n\tfakeMetadata := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\/\/ Emulate slow responses from the metadata server, to check that the agent\n\t\t\/\/ is appropriately caching the results.\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\tif strings.HasPrefix(r.URL.Path, \"\/computeMetadata\/v1\/project\/project-id\") {\n\t\t\tio.WriteString(w, \"12345\")\n\t\t\treturn\n\t\t}\n\t\tif !(strings.HasPrefix(r.URL.Path, \"\/computeMetadata\/v1\/instance\/service-accounts\/\") && strings.HasSuffix(r.URL.Path, \"\/token\")) {\n\t\t\tio.WriteString(w, \"ok\")\n\t\t\treturn\n\t\t}\n\t\tvar fakeToken struct {\n\t\t\tAccessToken string `json:\"access_token\"`\n\t\t\tExpiresInSec int `json:\"expires_in\"`\n\t\t\tTokenType string `json:\"token_type\"`\n\t\t}\n\t\tfakeToken.AccessToken = \"fakeToken\"\n\t\tfakeToken.ExpiresInSec = 1000\n\t\tfakeToken.TokenType = \"Bearer\"\n\t\tif err := json.NewEncoder(w).Encode(&fakeToken); err != nil {\n\t\t\tt.Logf(\"Failed to encode a fake service account credential: %v\", err)\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}))\n\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tbackend.Close()\n\t\tfakeMetadata.Close()\n\t}()\n\tbackendURL, err := url.Parse(backend.URL)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to parse the backend URL: %v\", err)\n\t}\n\tproxyPort, err := RunLocalProxy(ctx, t)\n\tproxyURL := fmt.Sprintf(\"http:\/\/localhost:%d\", proxyPort)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to run the local inverting proxy: %v\", err)\n\t}\n\tt.Logf(\"Started backend at localhost:%s and proxy at %s\", backendURL.Port(), proxyURL)\n\n\t\/\/ This assumes that \"Make build\" has been run\n\targs := strings.Join(append(\n\t\t[]string{\"${GOPATH}\/bin\/proxy-forwarding-agent\"},\n\t\t\"--debug=true\",\n\t\t\"--backend=testBackend\",\n\t\t\"--proxy\", proxyURL+\"\/\",\n\t\t\"--host=localhost:\"+backendURL.Port(),\n\t\t\" \")\n\tagentCmd := exec.CommandContext(ctx, \"\/bin\/bash\", \"-c\", args)\n\n\tvar out bytes.Buffer\n\tagentCmd.Stdout = &out\n\tagentCmd.Stderr = &out\n\tagentCmd.Env = append(os.Environ(), \"PATH=\", \"HOME=\"+backendHomeDir, \"GCE_METADATA_HOST=\"+strings.TrimPrefix(fakeMetadata.URL, \"http:\/\/\"))\n\tif err := agentCmd.Start(); err != nil {\n\t\tt.Fatalf(\"Failed to start the agent binary: %v\", err)\n\t}\n\tdefer func() {\n\t\tcancel()\n\t\terr := agentCmd.Wait()\n\t\tt.Logf(\"Agent result: %v, stdout\/stderr: %q\", err, out.String())\n\t}()\n\n\t\/\/ Send one request through the proxy to make sure the agent has come up.\n\t\/\/\n\t\/\/ We give this initial request a long time to complete, as the agent takes\n\t\/\/ a long time to start up.\n\ttestPath := \"\/some\/request\/path\"\n\tif err := checkRequest(proxyURL, testPath, testPath, time.Second); err != nil {\n\t\tt.Fatalf(\"Failed to send the initial request: %v\", err)\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\t\/\/ The timeout below was chosen to be overly generous to prevent test flakiness.\n\t\t\/\/\n\t\t\/\/ This has the consequence that it will only catch severe latency regressions.\n\t\t\/\/\n\t\t\/\/ The specific value was chosen by running the test in a loop 100 times and\n\t\t\/\/ incrementing the value until all 100 runs passed.\n\t\tif err := checkRequest(proxyURL, testPath, testPath, 100*time.Millisecond); err != nil {\n\t\t\tt.Fatalf(\"Failed to send request %d: %v\", i, err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package lib\n\nimport (\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\/\/ \"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/subutai-io\/base\/agent\/config\"\n\t\"github.com\/subutai-io\/base\/agent\/lib\/container\"\n\t\"github.com\/subutai-io\/base\/agent\/log\"\n)\n\nfunc getBody(url string) (response *http.Response) {\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\tclient := &http.Client{Transport: tr}\n\tresponse, err := client.Get(url)\n\tlog.Check(log.FatalLevel, \"Getting response from \"+url, err)\n\t\/\/ defer response.Body.Close()\n\treturn\n}\n\nfunc getList() (list []map[string]interface{}) {\n\tresp := getBody(\"https:\/\/peer.noip.me:8338\/kurjun\/rest\/file\/list\")\n\tdefer resp.Body.Close()\n\tjsonlist, err := ioutil.ReadAll(resp.Body)\n\tlog.Check(log.FatalLevel, \"Reading response\", err)\n\tlog.Check(log.FatalLevel, \"Parsing file list\", json.Unmarshal(jsonlist, &list))\n\treturn\n}\n\nfunc Update(name string, check bool) {\n\tswitch name {\n\tcase \"rh\":\n\t\tavlb := 0\n\t\tlcl := 0\n\n\t\tf, err := ioutil.ReadFile(config.Agent.AppPrefix + \"\/meta\/package.yaml\")\n\t\tif !log.Check(log.DebugLevel, \"Reading file package.yaml\", err) {\n\t\t\tlines := strings.Split(string(f), \"\\n\")\n\t\t\tfor _, v := range lines {\n\t\t\t\tif strings.HasPrefix(v, \"version: \") {\n\t\t\t\t\tif version := strings.Split(strings.TrimPrefix(v, \"version: \"), \"-\"); len(version) > 1 {\n\t\t\t\t\t\tlcl, err = strconv.Atoi(version[1])\n\t\t\t\t\t\tlog.Check(log.FatalLevel, \"Converting timestamp to int\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, v := range getList() {\n\t\t\tname := v[\"name\"].(string)\n\t\t\t\/\/ md5 := strings.TrimPrefix(v[\"id\"].(string), \"raw.\")\n\t\t\tif strings.HasPrefix(name, \"subutai\") && strings.HasSuffix(name, \".snap\") {\n\t\t\t\ttrim := strings.TrimPrefix(name, \"subutai_\")\n\t\t\t\ttrim = strings.TrimRight(trim, \".snap\")\n\t\t\t\tif version := strings.Split(trim, \"-\"); len(version) > 1 {\n\t\t\t\t\tavlb, err = strconv.Atoi(version[1])\n\t\t\t\t\tlog.Check(log.FatalLevel, \"Converting timestamp to int\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif avlb <= lcl {\n\t\t\tlog.Info(\"No update is available\")\n\t\t\tos.Exit(1)\n\t\t} else if check {\n\t\t\tlog.Info(\"Update is avalable\")\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\tfile, err := os.Create(\"\/tmp\/\" + name)\n\t\tlog.Check(log.FatalLevel, \"Creating update file\", err)\n\t\tdefer file.Close()\n\t\tresp := getBody(\"http:\/\/peer.noip.me:8338\/kurjun\/rest\/file\/get?id=\" + name)\n\t\tdefer resp.Body.Close()\n\t\t_, err = io.Copy(file, resp.Body)\n\t\tlog.Check(log.FatalLevel, \"Writing response to file\", err)\n\n\t\tlog.Check(log.FatalLevel, \"Installing update\",\n\t\t\texec.Command(\"snappy\", \"install\", \"--allow-unauthenticated\", \"\/tmp\/\"+name).Start())\n\n\tdefault:\n\t\tif !container.IsContainer(name) {\n\t\t\tlog.Error(name + \" - no such instance\")\n\t\t}\n\t\t_, err := container.AttachExec(name, []string{\"apt-get\", \"update\", \"-y\", \"--force-yes\", \"-o\", \"Acquire::http::Timeout=5\", \"-qq\"})\n\t\tlog.Check(log.FatalLevel, \"Updating apt index\", err)\n\t\toutput, err := container.AttachExec(name, []string{\"apt-get\", \"upgrade\", \"-y\", \"--force-yes\", \"-o\", \"Acquire::http::Timeout=5\", \"-s\", \"-qq\"})\n\t\tlog.Check(log.FatalLevel, \"Checking for available updade\", err)\n\t\tif len(output) == 0 {\n\t\t\tlog.Info(\"No update is available\")\n\t\t\tos.Exit(1)\n\t\t} else if check {\n\t\t\tlog.Info(\"Update is avalable\")\n\t\t\tos.Exit(0)\n\t\t}\n\t\t_, err = container.AttachExec(name, []string{\"apt-get\", \"upgrade\", \"-y\", \"--force-yes\", \"-o\", \"Acquire::http::Timeout=5\", \"-qq\"})\n\t\tlog.Check(log.FatalLevel, \"Updating container\", err)\n\t}\n}\n<commit_msg>Added check for multiple updates<commit_after>package lib\n\nimport (\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/subutai-io\/base\/agent\/config\"\n\t\"github.com\/subutai-io\/base\/agent\/lib\/container\"\n\t\"github.com\/subutai-io\/base\/agent\/log\"\n)\n\nfunc getBody(url string) (response *http.Response) {\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\tclient := &http.Client{Transport: tr}\n\tresponse, err := client.Get(url)\n\tlog.Check(log.FatalLevel, \"Getting response from \"+url, err)\n\treturn\n}\n\nfunc getList() (list []map[string]interface{}) {\n\tresp := getBody(\"https:\/\/peer.noip.me:8338\/kurjun\/rest\/file\/list\")\n\tdefer resp.Body.Close()\n\tjsonlist, err := ioutil.ReadAll(resp.Body)\n\tlog.Check(log.FatalLevel, \"Reading response\", err)\n\tlog.Check(log.FatalLevel, \"Parsing file list\", json.Unmarshal(jsonlist, &list))\n\treturn\n}\n\nfunc Update(name string, check bool) {\n\tswitch name {\n\tcase \"rh\":\n\t\tvar date int64\n\t\tavlb := 0\n\t\tlcl := 0\n\n\t\tf, err := ioutil.ReadFile(config.Agent.AppPrefix + \"\/meta\/package.yaml\")\n\t\tif !log.Check(log.DebugLevel, \"Reading file package.yaml\", err) {\n\t\t\tlines := strings.Split(string(f), \"\\n\")\n\t\t\tfor _, v := range lines {\n\t\t\t\tif strings.HasPrefix(v, \"version: \") {\n\t\t\t\t\tif version := strings.Split(strings.TrimPrefix(v, \"version: \"), \"-\"); len(version) > 1 {\n\t\t\t\t\t\tlcl, err = strconv.Atoi(version[1])\n\t\t\t\t\t\tlog.Check(log.FatalLevel, \"Converting timestamp to int\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, v := range getList() {\n\t\t\tname := v[\"name\"].(string)\n\t\t\t\/\/ md5 := strings.TrimPrefix(v[\"id\"].(string), \"raw.\")\n\t\t\tif strings.HasPrefix(name, \"subutai\") && strings.HasSuffix(name, \".snap\") {\n\t\t\t\ttrim := strings.TrimPrefix(name, \"subutai_\")\n\t\t\t\ttrim = strings.TrimRight(trim, \".snap\")\n\t\t\t\tif version := strings.Split(trim, \"-\"); len(version) > 1 {\n\t\t\t\t\ttmp, err := strconv.Atoi(version[1])\n\t\t\t\t\tlog.Check(log.FatalLevel, \"Converting timestamp to int\", err)\n\t\t\t\t\tif tmp > avlb {\n\t\t\t\t\t\tavlb = tmp\n\t\t\t\t\t\tdate, err = strconv.ParseInt(version[1], 10, 64)\n\t\t\t\t\t\tlog.Check(log.FatalLevel, \"Getting update info\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif lcl > avlb {\n\t\t\tlog.Info(\"No update is available\")\n\t\t\tos.Exit(1)\n\t\t} else if check {\n\t\t\tlog.Info(\"Update from \" + time.Unix(date, 0).String() + \" is avalable\")\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\tfile, err := os.Create(\"\/tmp\/\" + name)\n\t\tlog.Check(log.FatalLevel, \"Creating update file\", err)\n\t\tdefer file.Close()\n\t\tresp := getBody(\"http:\/\/peer.noip.me:8338\/kurjun\/rest\/file\/get?id=\" + name)\n\t\tdefer resp.Body.Close()\n\t\t_, err = io.Copy(file, resp.Body)\n\t\tlog.Check(log.FatalLevel, \"Writing response to file\", err)\n\n\t\tlog.Check(log.FatalLevel, \"Installing update\",\n\t\t\texec.Command(\"snappy\", \"install\", \"--allow-unauthenticated\", \"\/tmp\/\"+name).Start())\n\t\tlog.Check(log.FatalLevel, \"Removing update file\", os.Remove(\"\/tmp\/\"+name))\n\n\tdefault:\n\t\tif !container.IsContainer(name) {\n\t\t\tlog.Error(name + \" - no such instance\")\n\t\t}\n\t\t_, err := container.AttachExec(name, []string{\"apt-get\", \"update\", \"-y\", \"--force-yes\", \"-o\", \"Acquire::http::Timeout=5\", \"-qq\"})\n\t\tlog.Check(log.FatalLevel, \"Updating apt index\", err)\n\t\toutput, err := container.AttachExec(name, []string{\"apt-get\", \"upgrade\", \"-y\", \"--force-yes\", \"-o\", \"Acquire::http::Timeout=5\", \"-s\", \"-qq\"})\n\t\tlog.Check(log.FatalLevel, \"Checking for available updade\", err)\n\t\tif len(output) == 0 {\n\t\t\tlog.Info(\"No update is available\")\n\t\t\tos.Exit(1)\n\t\t} else if check {\n\t\t\tlog.Info(\"Update is avalable\")\n\t\t\tos.Exit(0)\n\t\t}\n\t\t_, err = container.AttachExec(name, []string{\"apt-get\", \"upgrade\", \"-y\", \"--force-yes\", \"-o\", \"Acquire::http::Timeout=5\", \"-qq\"})\n\t\tlog.Check(log.FatalLevel, \"Updating container\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package csi\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/status\"\n\n\t\"github.com\/container-storage-interface\/spec\/lib\/go\/csi\"\n\t\"github.com\/docker\/swarmkit\/api\"\n)\n\ntype NodePluginInterface interface {\n\tNodeGetInfo(ctx context.Context) (*api.NodeCSIInfo, error)\n\tNodeStageVolume(ctx context.Context, req *api.VolumeAssignment) error\n\tNodeUnstageVolume(ctx context.Context, req []*api.VolumeAssignment) error\n\tNodePublishVolume(ctx context.Context, req []*api.VolumeAssignment) error\n\tNodeUnpublishVolume(ctx context.Context, req []*api.VolumeAssignment) error\n}\n\ntype volumePublishStatus struct {\n\t\/\/ stagingPath is staging path of volume\n\tstagingPath string\n\n\t\/\/ isPublished keeps track if the volume is published.\n\tisPublished bool\n\n\t\/\/ publishedPath is published path of volume\n\tpublishedPath string\n}\n\n\/\/ plugin represents an individual CSI node plugin\ntype NodePlugin struct {\n\t\/\/ name is the name of the plugin, which is also the name used as the\n\t\/\/ Driver.Name field\n\tname string\n\n\t\/\/ node ID is identifier for the node.\n\tnodeID string\n\n\t\/\/ socket is the unix socket to connect to this plugin at.\n\tsocket string\n\n\t\/\/ cc is the grpc client connection\n\tcc *grpc.ClientConn\n\n\t\/\/ idClient is the identity service client\n\tidClient csi.IdentityClient\n\n\t\/\/ nodeClient is the node service client\n\tnodeClient csi.NodeClient\n\n\t\/\/ volumeMap is the map from volume ID to Volume. Will place a volume once it is staged,\n\t\/\/ remove it from the map for unstage.\n\t\/\/ TODO: Make this map persistent if the swarm node goes down\n\tvolumeMap map[string]*volumePublishStatus\n\n\t\/\/ mu for volumeMap\n\tmu sync.RWMutex\n\n\t\/\/ staging indicates that the plugin has staging capabilities.\n\tstaging bool\n}\n\nconst TargetStagePath string = \"\/var\/lib\/docker\/stage\/%s\"\n\nconst TargetPublishPath string = \"\/var\/lib\/docker\/publish\/%s\"\n\nfunc NewNodePlugin(name string, nodeID string) *NodePlugin {\n\treturn &NodePlugin{\n\t\tname: name,\n\t\tnodeID: nodeID,\n\t\tvolumeMap: make(map[string]*volumePublishStatus),\n\t}\n}\n\n\/\/ connect is a private method that sets up the identity client and node\n\/\/ client from a grpc client. it exists separately so that testing code can\n\/\/ substitute in fake clients without a grpc connection\nfunc (np *NodePlugin) connect(ctx context.Context, cc *grpc.ClientConn) error {\n\tnp.cc = cc\n\t\/\/ first, probe the plugin, to ensure that it exists and is ready to go\n\tidc := csi.NewIdentityClient(cc)\n\tnp.idClient = idc\n\n\tnp.nodeClient = csi.NewNodeClient(cc)\n\n\treturn nil\n}\n\nfunc (np *NodePlugin) Client() csi.NodeClient {\n\treturn np.nodeClient\n}\n\nfunc (np *NodePlugin) init(ctx context.Context) error {\n\tprobe, err := np.idClient.Probe(ctx, &csi.ProbeRequest{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif probe.Ready != nil && !probe.Ready.Value {\n\t\treturn status.Error(codes.FailedPrecondition, \"Plugin is not Ready\")\n\t}\n\n\tresp, err := np.nodeClient.NodeGetCapabilities(ctx, &csi.NodeGetCapabilitiesRequest{})\n\tif err != nil {\n\t\t\/\/ TODO(ameyag): handle\n\t\treturn err\n\t}\n\tif resp == nil {\n\t\treturn nil\n\t}\n\tfor _, c := range resp.Capabilities {\n\t\tif rpc := c.GetRpc(); rpc != nil {\n\t\t\tswitch rpc.Type {\n\t\t\tcase csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME:\n\t\t\t\tnp.staging = true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ GetPublishedPath returns the path at which the provided volume ID is published.\n\/\/ Returns an empty string if the volume does not exist.\nfunc (np *NodePlugin) GetPublishedPath(volumeID string) string {\n\tnp.mu.RLock()\n\tdefer np.mu.RUnlock()\n\tif volInfo, ok := np.volumeMap[volumeID]; ok {\n\t\tif volInfo.isPublished {\n\t\t\treturn volInfo.publishedPath\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc (np *NodePlugin) NodeGetInfo(ctx context.Context) (*api.NodeCSIInfo, error) {\n\n\tresp := &csi.NodeGetInfoResponse{\n\t\tNodeId: np.nodeID,\n\t}\n\n\treturn makeNodeInfo(resp), nil\n}\n\nfunc (np *NodePlugin) NodeStageVolume(ctx context.Context, req *api.VolumeAssignment) error {\n\n\tif !np.staging {\n\t\treturn nil\n\t}\n\n\tvolID := req.VolumeID\n\tstagingTarget := fmt.Sprintf(TargetStagePath, volID)\n\n\t\/\/ Check arguments\n\tif len(volID) == 0 {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume ID missing in request\")\n\t}\n\n\tnp.mu.Lock()\n\tdefer np.mu.Unlock()\n\n\tv := &volumePublishStatus{\n\t\tstagingPath: stagingTarget,\n\t}\n\n\tnp.volumeMap[volID] = v\n\n\treturn nil\n}\n\nfunc (np *NodePlugin) NodeUnstageVolume(ctx context.Context, req *api.VolumeAssignment) error {\n\n\tif !np.staging {\n\t\treturn nil\n\t}\n\n\tvolID := req.VolumeID\n\n\t\/\/ Check arguments\n\tif len(volID) == 0 {\n\t\treturn status.Error(codes.FailedPrecondition, \"Volume ID missing in request\")\n\t}\n\n\tnp.mu.Lock()\n\tdefer np.mu.Unlock()\n\tif v, ok := np.volumeMap[volID]; ok {\n\t\tif v.isPublished {\n\t\t\treturn status.Errorf(codes.FailedPrecondition, \"VolumeID %s is not unpublished\", volID)\n\t\t}\n\t\tdelete(np.volumeMap, volID)\n\t\treturn nil\n\t}\n\n\treturn status.Errorf(codes.FailedPrecondition, \"VolumeID %s is not staged\", volID)\n}\n\nfunc (np *NodePlugin) NodePublishVolume(ctx context.Context, req *api.VolumeAssignment) error {\n\n\tvolID := req.VolumeID\n\n\t\/\/ Check arguments\n\tif len(volID) == 0 {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume ID missing in request\")\n\t}\n\n\tnp.mu.Lock()\n\tdefer np.mu.Unlock()\n\tpublishPath := fmt.Sprintf(TargetPublishPath, volID)\n\tif v, ok := np.volumeMap[volID]; ok {\n\t\tv.publishedPath = publishPath\n\t\tv.isPublished = true\n\t\treturn nil\n\t} else if !np.staging {\n\t\t\/\/ If staging is not supported on plugin, we need to add volume to the map.\n\t\tv := &volumePublishStatus{\n\t\t\tpublishedPath: publishPath,\n\t\t\tisPublished: true,\n\t\t}\n\t\tnp.volumeMap[volID] = v\n\t\treturn nil\n\t}\n\n\treturn status.Errorf(codes.FailedPrecondition, \"VolumeID %s is not staged\", volID)\n}\n\nfunc (np *NodePlugin) NodeUnpublishVolume(ctx context.Context, req *api.VolumeAssignment) error {\n\n\tvolID := req.VolumeID\n\n\t\/\/ Check arguments\n\tif len(volID) == 0 {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume ID missing in request\")\n\t}\n\n\tnp.mu.Lock()\n\tdefer np.mu.Unlock()\n\tif v, ok := np.volumeMap[volID]; ok {\n\t\tv.publishedPath = \"\"\n\t\tv.isPublished = false\n\t\treturn nil\n\t}\n\n\treturn status.Errorf(codes.FailedPrecondition, \"VolumeID %s is not staged\", volID)\n}\n<commit_msg>Add rpc call<commit_after>package csi\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/status\"\n\n\t\"github.com\/container-storage-interface\/spec\/lib\/go\/csi\"\n\t\"github.com\/docker\/swarmkit\/api\"\n)\n\ntype NodePluginInterface interface {\n\tNodeGetInfo(ctx context.Context) (*api.NodeCSIInfo, error)\n\tNodeStageVolume(ctx context.Context, req *api.VolumeAssignment) error\n\tNodeUnstageVolume(ctx context.Context, req []*api.VolumeAssignment) error\n\tNodePublishVolume(ctx context.Context, req []*api.VolumeAssignment) error\n\tNodeUnpublishVolume(ctx context.Context, req []*api.VolumeAssignment) error\n}\n\ntype volumePublishStatus struct {\n\t\/\/ stagingPath is staging path of volume\n\tstagingPath string\n\n\t\/\/ isPublished keeps track if the volume is published.\n\tisPublished bool\n\n\t\/\/ publishedPath is published path of volume\n\tpublishedPath string\n}\n\n\/\/ plugin represents an individual CSI node plugin\ntype NodePlugin struct {\n\t\/\/ name is the name of the plugin, which is also the name used as the\n\t\/\/ Driver.Name field\n\tname string\n\n\t\/\/ node ID is identifier for the node.\n\tnodeID string\n\n\t\/\/ socket is the unix socket to connect to this plugin at.\n\tsocket string\n\n\t\/\/ cc is the grpc client connection\n\tcc *grpc.ClientConn\n\n\t\/\/ idClient is the identity service client\n\tidClient csi.IdentityClient\n\n\t\/\/ nodeClient is the node service client\n\tnodeClient csi.NodeClient\n\n\t\/\/ volumeMap is the map from volume ID to Volume. Will place a volume once it is staged,\n\t\/\/ remove it from the map for unstage.\n\t\/\/ TODO: Make this map persistent if the swarm node goes down\n\tvolumeMap map[string]*volumePublishStatus\n\n\t\/\/ mu for volumeMap\n\tmu sync.RWMutex\n\n\t\/\/ staging indicates that the plugin has staging capabilities.\n\tstaging bool\n}\n\nconst TargetStagePath string = \"\/var\/lib\/docker\/stage\/%s\"\n\nconst TargetPublishPath string = \"\/var\/lib\/docker\/publish\/%s\"\n\nfunc NewNodePlugin(name string, nodeID string) *NodePlugin {\n\treturn &NodePlugin{\n\t\tname: name,\n\t\tnodeID: nodeID,\n\t\tvolumeMap: make(map[string]*volumePublishStatus),\n\t}\n}\n\n\/\/ connect is a private method that sets up the identity client and node\n\/\/ client from a grpc client. it exists separately so that testing code can\n\/\/ substitute in fake clients without a grpc connection\nfunc (np *NodePlugin) connect(ctx context.Context) error {\n\tcc, err := grpc.DialContext(ctx, np.socket)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnp.cc = cc\n\t\/\/ first, probe the plugin, to ensure that it exists and is ready to go\n\tidc := csi.NewIdentityClient(cc)\n\tnp.idClient = idc\n\n\tnp.nodeClient = csi.NewNodeClient(cc)\n\n\treturn nil\n}\n\nfunc (np *NodePlugin) Client(ctx context.Context) (csi.NodeClient, error) {\n\tif np.nodeClient == nil {\n\t\tif err := np.connect(ctx); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn np.nodeClient, nil\n}\n\nfunc (np *NodePlugin) init(ctx context.Context) error {\n\tprobe, err := np.idClient.Probe(ctx, &csi.ProbeRequest{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif probe.Ready != nil && !probe.Ready.Value {\n\t\treturn status.Error(codes.FailedPrecondition, \"Plugin is not Ready\")\n\t}\n\n\tc, err := np.Client(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := c.NodeGetCapabilities(ctx, &csi.NodeGetCapabilitiesRequest{})\n\tif err != nil {\n\t\t\/\/ TODO(ameyag): handle\n\t\treturn err\n\t}\n\tif resp == nil {\n\t\treturn nil\n\t}\n\tfor _, c := range resp.Capabilities {\n\t\tif rpc := c.GetRpc(); rpc != nil {\n\t\t\tswitch rpc.Type {\n\t\t\tcase csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME:\n\t\t\t\tnp.staging = true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ GetPublishedPath returns the path at which the provided volume ID is published.\n\/\/ Returns an empty string if the volume does not exist.\nfunc (np *NodePlugin) GetPublishedPath(volumeID string) string {\n\tnp.mu.RLock()\n\tdefer np.mu.RUnlock()\n\tif volInfo, ok := np.volumeMap[volumeID]; ok {\n\t\tif volInfo.isPublished {\n\t\t\treturn volInfo.publishedPath\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc (np *NodePlugin) NodeGetInfo(ctx context.Context) (*api.NodeCSIInfo, error) {\n\n\tc, err := np.Client(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.NodeGetInfo(ctx, &csi.NodeGetInfoRequest{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn makeNodeInfo(resp), nil\n}\n\nfunc (np *NodePlugin) NodeStageVolume(ctx context.Context, req *api.VolumeAssignment) error {\n\n\tnp.mu.Lock()\n\tdefer np.mu.Unlock()\n\tif !np.staging {\n\t\treturn nil\n\t}\n\n\tvolID := req.VolumeID\n\tstagingTarget := fmt.Sprintf(TargetStagePath, volID)\n\n\t\/\/ Check arguments\n\tif len(volID) == 0 {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume ID missing in request\")\n\t}\n\n\tc, err := np.Client(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = c.NodeStageVolume(ctx, &csi.NodeStageVolumeRequest{\n\t\tVolumeId: volID,\n\t\tStagingTargetPath: stagingTarget,\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tv := &volumePublishStatus{\n\t\tstagingPath: stagingTarget,\n\t}\n\n\tnp.volumeMap[volID] = v\n\n\treturn nil\n}\n\nfunc (np *NodePlugin) NodeUnstageVolume(ctx context.Context, req *api.VolumeAssignment) error {\n\n\tnp.mu.Lock()\n\tdefer np.mu.Unlock()\n\tif !np.staging {\n\t\treturn nil\n\t}\n\n\tvolID := req.VolumeID\n\tstagingTarget := fmt.Sprintf(TargetStagePath, volID)\n\n\t\/\/ Check arguments\n\tif len(volID) == 0 {\n\t\treturn status.Error(codes.FailedPrecondition, \"Volume ID missing in request\")\n\t}\n\n\tc, err := np.Client(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = c.NodeUnstageVolume(ctx, &csi.NodeUnstageVolumeRequest{\n\t\tVolumeId: volID,\n\t\tStagingTargetPath: stagingTarget,\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif v, ok := np.volumeMap[volID]; ok {\n\t\tif v.isPublished {\n\t\t\treturn status.Errorf(codes.FailedPrecondition, \"VolumeID %s is not unpublished\", volID)\n\t\t}\n\t\tdelete(np.volumeMap, volID)\n\t\treturn nil\n\t}\n\n\treturn status.Errorf(codes.FailedPrecondition, \"VolumeID %s is not staged\", volID)\n}\n\nfunc (np *NodePlugin) NodePublishVolume(ctx context.Context, req *api.VolumeAssignment) error {\n\n\tvolID := req.VolumeID\n\n\t\/\/ Check arguments\n\tif len(volID) == 0 {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume ID missing in request\")\n\t}\n\n\tnp.mu.Lock()\n\tdefer np.mu.Unlock()\n\n\tpublishPath := fmt.Sprintf(TargetPublishPath, volID)\n\n\tc, err := np.Client(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = c.NodePublishVolume(ctx, &csi.NodePublishVolumeRequest{\n\t\tVolumeId: volID,\n\t\tTargetPath: publishPath,\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif v, ok := np.volumeMap[volID]; ok {\n\t\tv.publishedPath = publishPath\n\t\tv.isPublished = true\n\t\treturn nil\n\t} else if !np.staging {\n\t\t\/\/ If staging is not supported on plugin, we need to add volume to the map.\n\t\tv := &volumePublishStatus{\n\t\t\tpublishedPath: publishPath,\n\t\t\tisPublished: true,\n\t\t}\n\t\tnp.volumeMap[volID] = v\n\t\treturn nil\n\t}\n\n\treturn status.Errorf(codes.FailedPrecondition, \"VolumeID %s is not staged\", volID)\n}\n\nfunc (np *NodePlugin) NodeUnpublishVolume(ctx context.Context, req *api.VolumeAssignment) error {\n\n\tvolID := req.VolumeID\n\n\t\/\/ Check arguments\n\tif len(volID) == 0 {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume ID missing in request\")\n\t}\n\n\tnp.mu.Lock()\n\tdefer np.mu.Unlock()\n\tpublishPath := fmt.Sprintf(TargetPublishPath, volID)\n\n\tc, err := np.Client(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = c.NodeUnpublishVolume(ctx, &csi.NodeUnpublishVolumeRequest{\n\t\tVolumeId: volID,\n\t\tTargetPath: publishPath,\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif v, ok := np.volumeMap[volID]; ok {\n\t\tv.publishedPath = \"\"\n\t\tv.isPublished = false\n\t\treturn nil\n\t}\n\n\treturn status.Errorf(codes.FailedPrecondition, \"VolumeID %s is not staged\", volID)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype CronJob struct {\n\tminute []int\n\thour []int\n\tdayOfMonth []int\n\tmonth []int\n\tdayOfWeek []int\n\tline string\n}\n\n\/\/ NewCronJob creates CronJob from crontab line\nfunc NewCronJob(job string) CronJob {\n\tsplited := strings.Split(job, \" \")\n\tminuteBlock := splited[0]\n\t\/\/ hourBlock := splited[1]\n\t\/\/ dayOfMonthBlock := splited[2]\n\t\/\/ monthBlock := splited[3]\n\t\/\/ dayOfWeekBlock := splited[4]\n\n\tvar cycle int\n\tif strings.Contains(minuteBlock, \"\/\") {\n\t\tsplited := strings.Split(minuteBlock, \"\/\")\n\t\tminuteBlock = splited[0]\n\t\tcycle, _ = strconv.Atoi(splited[1])\n\t}\n\n\tvar minuteRange []int\n\tif minuteBlock == \"*\" {\n\t\tminuteRange = MinutesRange.all\n\n\t} else if strings.Contains(minuteBlock, \",\") {\n\t\tminuteStrs := strings.Split(minuteBlock, \",\")\n\t\tminuteRange = make([]int, 0, len(minuteStrs))\n\t\tfor _, minuteStr := range minuteStrs {\n\t\t\tminute, _ := strconv.Atoi(minuteStr)\n\t\t\tminuteRange = append(minuteRange, minute)\n\t\t}\n\n\t} else if strings.Contains(minuteBlock, \"-\") {\n\t\tminuteStrs := strings.Split(minuteBlock, \"-\")\n\t\tstart, _ := strconv.Atoi(minuteStrs[0])\n\t\tend, _ := strconv.Atoi(minuteStrs[1])\n\t\tminuteRange = newCronRange(start, end).all\n\n\t} else {\n\t\tminute, _ := strconv.Atoi(minuteBlock)\n\t\tminuteRange = []int{minute}\n\t}\n\n\tif cycle != 0 {\n\t\tcycles := make([]int, 0)\n\t\tfor index, minute := range minuteRange {\n\t\t\tif index%cycle == 0 {\n\t\t\t\tcycles = append(cycles, minute)\n\t\t\t}\n\t\t}\n\t\tminuteRange = cycles\n\t}\n\n\treturn CronJob{\n\t\tminute: minuteRange,\n\t\thour: []int{17},\n\t\tdayOfMonth: []int{3},\n\t\tmonth: []int{10},\n\t\tdayOfWeek: []int{2},\n\t\tline: job,\n\t}\n}\n\ntype CronRange struct {\n\tfrom int\n\tto int\n\tall []int\n}\n\nfunc newCronRange(from int, to int) CronRange {\n\tr := CronRange{from: from, to: to}\n\tall := make([]int, 0, r.to-r.from+1)\n\tfor index := r.from; index <= r.to; index++ {\n\t\tall = append(all, index)\n\t}\n\tr.all = all\n\treturn r\n}\n\nvar MinutesRange = newCronRange(0, 59)\n<commit_msg>refactoring<commit_after>package main\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype CronJob struct {\n\tminute []int\n\thour []int\n\tdayOfMonth []int\n\tmonth []int\n\tdayOfWeek []int\n\tline string\n}\n\n\/\/ NewCronJob creates CronJob from crontab line\nfunc NewCronJob(job string) CronJob {\n\tsplited := strings.Split(job, \" \")\n\tminuteBlock := splited[0]\n\t\/\/ hourBlock := splited[1]\n\t\/\/ dayOfMonthBlock := splited[2]\n\t\/\/ monthBlock := splited[3]\n\t\/\/ dayOfWeekBlock := splited[4]\n\n\treturn CronJob{\n\t\tminute: parse(minuteBlock, MinutesRange),\n\t\thour: []int{17},\n\t\tdayOfMonth: []int{3},\n\t\tmonth: []int{10},\n\t\tdayOfWeek: []int{2},\n\t\tline: job,\n\t}\n}\n\ntype CronRange struct {\n\tfrom int\n\tto int\n\tall []int\n}\n\nfunc newCronRange(from int, to int) CronRange {\n\tr := CronRange{from: from, to: to}\n\tall := make([]int, 0, r.to-r.from+1)\n\tfor index := r.from; index <= r.to; index++ {\n\t\tall = append(all, index)\n\t}\n\tr.all = all\n\treturn r\n}\n\nvar MinutesRange = newCronRange(0, 59)\n\nfunc parse(block string, maxRange CronRange) []int {\n\tvar cycle int\n\tif strings.Contains(block, \"\/\") {\n\t\tsplited := strings.Split(block, \"\/\")\n\t\tblock = splited[0]\n\t\tcycle, _ = strconv.Atoi(splited[1])\n\t}\n\n\tvar blockRange []int\n\tif block == \"*\" {\n\t\tblockRange = maxRange.all\n\n\t} else if strings.Contains(block, \",\") {\n\t\tsplited := strings.Split(block, \",\")\n\t\tblockRange = make([]int, 0, len(splited))\n\t\tfor _, s := range splited {\n\t\t\titem, _ := strconv.Atoi(s)\n\t\t\tblockRange = append(blockRange, item)\n\t\t}\n\n\t} else if strings.Contains(block, \"-\") {\n\t\tsplited := strings.Split(block, \"-\")\n\t\tstart, _ := strconv.Atoi(splited[0])\n\t\tend, _ := strconv.Atoi(splited[1])\n\t\tblockRange = newCronRange(start, end).all\n\n\t} else {\n\t\titem, _ := strconv.Atoi(block)\n\t\tblockRange = []int{item}\n\t}\n\n\t\/\/ `\/`を含んでいる場合は、skipする要素を除外して詰め直し\n\tif cycle != 0 {\n\t\tcycles := make([]int, 0)\n\t\tfor index, item := range blockRange {\n\t\t\tif index%cycle == 0 {\n\t\t\t\tcycles = append(cycles, item)\n\t\t\t}\n\t\t}\n\t\tblockRange = cycles\n\t}\n\n\treturn blockRange\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/appc\/spec\/aci\"\n\t\"github.com\/appc\/spec\/schema\"\n\t\"github.com\/appc\/spec\/schema\/types\"\n)\n\nvar Debug bool\n\nfunc warn(s string, i ...interface{}) {\n\ts = fmt.Sprintf(s, i...)\n\tfmt.Fprintln(os.Stderr, strings.TrimSuffix(s, \"\\n\"))\n}\n\nfunc die(s string, i ...interface{}) {\n\twarn(s, i...)\n\tos.Exit(1)\n}\n\nfunc debug(i ...interface{}) {\n\tif Debug {\n\t\ts := fmt.Sprint(i...)\n\t\tfmt.Fprintln(os.Stderr, strings.TrimSuffix(s, \"\\n\"))\n\t}\n}\n\ntype StringVector []string\n\nfunc (v *StringVector) String() string {\n\treturn `\"` + strings.Join(*v, `\" \"`) + `\"`\n}\n\nfunc (v *StringVector) Set(str string) error {\n\t*v = append(*v, str)\n\treturn nil\n}\n\nfunc main() {\n\tvar (\n\t\texecOpts StringVector\n\t\tgoDefaultBinaryDesc string\n\t\tgoBinaryOpt string\n\t)\n\n\t\/\/ Find the go binary\n\tgocmd, err := exec.LookPath(\"go\")\n\n\tflag.Var(&execOpts, \"exec\", \"Parameters passed to app, can be used multiple times\")\n\tif err != nil {\n\t\tgoDefaultBinaryDesc = \"Go binary to use (default: none found in $PATH, so it must be provided)\"\n\t} else {\n\t\tgoDefaultBinaryDesc = \"Go binary to use (default: whatever go in $PATH)\"\n\t}\n\tflag.StringVar(&goBinaryOpt, \"go-binary\", gocmd, goDefaultBinaryDesc)\n\tflag.Parse()\n\tif os.Getenv(\"GOPATH\") != \"\" {\n\t\twarn(\"GOPATH envvar is ignored\")\n\t}\n\tif os.Getenv(\"GOROOT\") != \"\" {\n\t\twarn(\"GOROOT envvar is ignored, use --go-binary=\\\"$GOROOT\/bin\/go\\\" option instead\")\n\t}\n\tif os.Getenv(\"GOACI_DEBUG\") != \"\" {\n\t\tDebug = true\n\t}\n\n\tif goBinaryOpt == \"\" {\n\t\tdie(\"go binary not found\")\n\t}\n\n\t\/\/ Set up a temporary directory for everything (gopath and builds)\n\ttmpdir, err := ioutil.TempDir(\"\", \"goaci\")\n\tif err != nil {\n\t\tdie(\"error setting up temporary directory: %v\", err)\n\t}\n\tdefer os.RemoveAll(tmpdir)\n\n\t\/\/ Scratch build dir for aci\n\tacidir := filepath.Join(tmpdir, \"aci\")\n\n\t\/\/ Be explicit with gobin\n\tgobin := filepath.Join(tmpdir, \"bin\")\n\n\t\/\/ Construct args for a go get that does a static build\n\targs := []string{\n\t\tgoBinaryOpt,\n\t\t\"get\",\n\t\t\"-a\",\n\t\t\"-tags\", \"netgo\",\n\t\t\"-ldflags\", \"'-w'\",\n\t\t\/\/ 1.4\n\t\t\"-installsuffix\", \"cgo\",\n\t}\n\n\t\/\/ Extract the package name (which is the last arg).\n\tvar ns string\n\tfor _, arg := range os.Args[1:] {\n\t\t\/\/ TODO(jonboulle): try to pass the other args on to go get?\n\t\t\/\/\t\targs = append(args, arg)\n\t\tns = arg\n\t}\n\n\tname, err := types.NewACName(ns)\n\t\/\/ TODO(jonboulle): could this ever actually happen?\n\tif err != nil {\n\t\tdie(\"bad app name: %v\", err)\n\t}\n\targs = append(args, ns)\n\n\t\/\/ Use the last component, e.g. example.com\/my\/app --> app\n\tofn := filepath.Base(ns) + \".aci\"\n\tmode := os.O_CREATE | os.O_WRONLY | os.O_TRUNC\n\tof, err := os.OpenFile(ofn, mode, 0644)\n\tif err != nil {\n\t\tdie(\"error opening output file: %v\", err)\n\t}\n\n\tcmd := exec.Cmd{\n\t\tEnv: []string{\n\t\t\t\"GOPATH=\" + tmpdir,\n\t\t\t\"GOBIN=\" + gobin,\n\t\t\t\"CGO_ENABLED=0\",\n\t\t\t\"PATH=\" + os.Getenv(\"PATH\"),\n\t\t},\n\t\tPath: goBinaryOpt,\n\t\tArgs: args,\n\t\tStderr: os.Stderr,\n\t\tStdout: os.Stdout,\n\t}\n\tdebug(\"env:\", cmd.Env)\n\tdebug(\"running command:\", strings.Join(cmd.Args, \" \"))\n\tif err := cmd.Run(); err != nil {\n\t\tdie(\"error running go: %v\", err)\n\t}\n\n\t\/\/ Check that we got 1 binary from the go get command\n\tfi, err := ioutil.ReadDir(gobin)\n\tif err != nil {\n\t\tdie(err.Error())\n\t}\n\tswitch {\n\tcase len(fi) < 1:\n\t\tdie(\"no binaries found in gobin\")\n\tcase len(fi) > 1:\n\t\tdebug(fmt.Sprint(fi))\n\t\tdie(\"can't handle multiple binaries\")\n\t}\n\tfn := fi[0].Name()\n\tdebug(\"found binary: \", fn)\n\n\t\/\/ Set up rootfs for ACI layout\n\trfs := filepath.Join(acidir, \"rootfs\")\n\terr = os.MkdirAll(rfs, 0755)\n\tif err != nil {\n\t\tdie(err.Error())\n\t}\n\n\t\/\/ Move the binary into the rootfs\n\tep := filepath.Join(rfs, fn)\n\terr = os.Rename(filepath.Join(gobin, fn), ep)\n\tif err != nil {\n\t\tdie(err.Error())\n\t}\n\tdebug(\"moved binary to:\", ep)\n\n\texec := []string{filepath.Join(\"\/\", fn)}\n\texec = append(exec, execOpts...)\n\t\/\/ Build the ACI\n\tim := schema.ImageManifest{\n\t\tACKind: types.ACKind(\"ImageManifest\"),\n\t\tACVersion: schema.AppContainerVersion,\n\t\tName: *name,\n\t\tApp: &types.App{\n\t\t\tExec: exec,\n\t\t\tUser: \"0\",\n\t\t\tGroup: \"0\",\n\t\t},\n\t}\n\tdebug(im)\n\n\tgw := gzip.NewWriter(of)\n\ttr := tar.NewWriter(gw)\n\n\tdefer func() {\n\t\ttr.Close()\n\t\tgw.Close()\n\t\tof.Close()\n\t}()\n\n\tiw := aci.NewImageWriter(im, tr)\n\terr = filepath.Walk(acidir, aci.BuildWalker(acidir, iw))\n\tif err != nil {\n\t\tdie(err.Error())\n\t}\n\terr = iw.Close()\n\tif err != nil {\n\t\tdie(err.Error())\n\t}\n\tfmt.Println(\"Wrote\", of.Name())\n}\n\n\/\/ strip replaces all characters that are not [a-Z_] with _\nfunc strip(in string) string {\n\tout := bytes.Buffer{}\n\tfor _, c := range in {\n\t\tif !strings.ContainsRune(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTVWXYZ0123456789_\", c) {\n\t\t\tc = '_'\n\t\t}\n\t\tif _, err := out.WriteRune(c); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\treturn out.String()\n}\n<commit_msg>Allow specifying custom GOPATH<commit_after>package main\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/appc\/spec\/aci\"\n\t\"github.com\/appc\/spec\/schema\"\n\t\"github.com\/appc\/spec\/schema\/types\"\n)\n\nvar Debug bool\n\nfunc warn(s string, i ...interface{}) {\n\ts = fmt.Sprintf(s, i...)\n\tfmt.Fprintln(os.Stderr, strings.TrimSuffix(s, \"\\n\"))\n}\n\nfunc die(s string, i ...interface{}) {\n\twarn(s, i...)\n\tos.Exit(1)\n}\n\nfunc debug(i ...interface{}) {\n\tif Debug {\n\t\ts := fmt.Sprint(i...)\n\t\tfmt.Fprintln(os.Stderr, strings.TrimSuffix(s, \"\\n\"))\n\t}\n}\n\ntype StringVector []string\n\nfunc (v *StringVector) String() string {\n\treturn `\"` + strings.Join(*v, `\" \"`) + `\"`\n}\n\nfunc (v *StringVector) Set(str string) error {\n\t*v = append(*v, str)\n\treturn nil\n}\n\nfunc main() {\n\tvar (\n\t\texecOpts StringVector\n\t\tgoDefaultBinaryDesc string\n\t\tgoBinaryOpt string\n\t\tgoPathOpt string\n\t)\n\n\t\/\/ Find the go binary\n\tgocmd, err := exec.LookPath(\"go\")\n\n\tflag.Var(&execOpts, \"exec\", \"Parameters passed to app, can be used multiple times\")\n\tif err != nil {\n\t\tgoDefaultBinaryDesc = \"Go binary to use (default: none found in $PATH, so it must be provided)\"\n\t} else {\n\t\tgoDefaultBinaryDesc = \"Go binary to use (default: whatever go in $PATH)\"\n\t}\n\tflag.StringVar(&goBinaryOpt, \"go-binary\", gocmd, goDefaultBinaryDesc)\n\tflag.StringVar(&goPathOpt, \"go-path\", \"\", \"Custom GOPATH (default: a temporary directory)\")\n\tflag.Parse()\n\tif os.Getenv(\"GOPATH\") != \"\" {\n\t\twarn(\"GOPATH envvar is ignored, use --go-path=\\\"$GOPATH\\\" option instead\")\n\t}\n\tif os.Getenv(\"GOROOT\") != \"\" {\n\t\twarn(\"GOROOT envvar is ignored, use --go-binary=\\\"$GOROOT\/bin\/go\\\" option instead\")\n\t}\n\tif os.Getenv(\"GOACI_DEBUG\") != \"\" {\n\t\tDebug = true\n\t}\n\n\tif goBinaryOpt == \"\" {\n\t\tdie(\"go binary not found\")\n\t}\n\n\t\/\/ Set up a temporary directory for everything (gopath and builds)\n\ttmpdir, err := ioutil.TempDir(\"\", \"goaci\")\n\tif err != nil {\n\t\tdie(\"error setting up temporary directory: %v\", err)\n\t}\n\tdefer os.RemoveAll(tmpdir)\n\tif goPathOpt == \"\" {\n\t\tgoPathOpt = tmpdir\n\t}\n\n\t\/\/ Scratch build dir for aci\n\tacidir := filepath.Join(tmpdir, \"aci\")\n\n\t\/\/ Let's put final binary in tmpdir\n\tgobin := filepath.Join(tmpdir, \"bin\")\n\n\t\/\/ Construct args for a go get that does a static build\n\targs := []string{\n\t\tgoBinaryOpt,\n\t\t\"get\",\n\t\t\"-a\",\n\t\t\"-tags\", \"netgo\",\n\t\t\"-ldflags\", \"'-w'\",\n\t\t\/\/ 1.4\n\t\t\"-installsuffix\", \"cgo\",\n\t}\n\n\t\/\/ Extract the package name (which is the last arg).\n\tvar ns string\n\tfor _, arg := range os.Args[1:] {\n\t\t\/\/ TODO(jonboulle): try to pass the other args on to go get?\n\t\t\/\/\t\targs = append(args, arg)\n\t\tns = arg\n\t}\n\n\tname, err := types.NewACName(ns)\n\t\/\/ TODO(jonboulle): could this ever actually happen?\n\tif err != nil {\n\t\tdie(\"bad app name: %v\", err)\n\t}\n\targs = append(args, ns)\n\n\t\/\/ Use the last component, e.g. example.com\/my\/app --> app\n\tofn := filepath.Base(ns) + \".aci\"\n\tmode := os.O_CREATE | os.O_WRONLY | os.O_TRUNC\n\tof, err := os.OpenFile(ofn, mode, 0644)\n\tif err != nil {\n\t\tdie(\"error opening output file: %v\", err)\n\t}\n\n\tcmd := exec.Cmd{\n\t\tEnv: []string{\n\t\t\t\"GOPATH=\" + goPathOpt,\n\t\t\t\"GOBIN=\" + gobin,\n\t\t\t\"CGO_ENABLED=0\",\n\t\t\t\"PATH=\" + os.Getenv(\"PATH\"),\n\t\t},\n\t\tPath: goBinaryOpt,\n\t\tArgs: args,\n\t\tStderr: os.Stderr,\n\t\tStdout: os.Stdout,\n\t}\n\tdebug(\"env:\", cmd.Env)\n\tdebug(\"running command:\", strings.Join(cmd.Args, \" \"))\n\tif err := cmd.Run(); err != nil {\n\t\tdie(\"error running go: %v\", err)\n\t}\n\n\t\/\/ Check that we got 1 binary from the go get command\n\tfi, err := ioutil.ReadDir(gobin)\n\tif err != nil {\n\t\tdie(err.Error())\n\t}\n\tswitch {\n\tcase len(fi) < 1:\n\t\tdie(\"no binaries found in gobin\")\n\tcase len(fi) > 1:\n\t\tdebug(fmt.Sprint(fi))\n\t\tdie(\"can't handle multiple binaries\")\n\t}\n\tfn := fi[0].Name()\n\tdebug(\"found binary: \", fn)\n\n\t\/\/ Set up rootfs for ACI layout\n\trfs := filepath.Join(acidir, \"rootfs\")\n\terr = os.MkdirAll(rfs, 0755)\n\tif err != nil {\n\t\tdie(err.Error())\n\t}\n\n\t\/\/ Move the binary into the rootfs\n\tep := filepath.Join(rfs, fn)\n\terr = os.Rename(filepath.Join(gobin, fn), ep)\n\tif err != nil {\n\t\tdie(err.Error())\n\t}\n\tdebug(\"moved binary to:\", ep)\n\n\texec := []string{filepath.Join(\"\/\", fn)}\n\texec = append(exec, execOpts...)\n\t\/\/ Build the ACI\n\tim := schema.ImageManifest{\n\t\tACKind: types.ACKind(\"ImageManifest\"),\n\t\tACVersion: schema.AppContainerVersion,\n\t\tName: *name,\n\t\tApp: &types.App{\n\t\t\tExec: exec,\n\t\t\tUser: \"0\",\n\t\t\tGroup: \"0\",\n\t\t},\n\t}\n\tdebug(im)\n\n\tgw := gzip.NewWriter(of)\n\ttr := tar.NewWriter(gw)\n\n\tdefer func() {\n\t\ttr.Close()\n\t\tgw.Close()\n\t\tof.Close()\n\t}()\n\n\tiw := aci.NewImageWriter(im, tr)\n\terr = filepath.Walk(acidir, aci.BuildWalker(acidir, iw))\n\tif err != nil {\n\t\tdie(err.Error())\n\t}\n\terr = iw.Close()\n\tif err != nil {\n\t\tdie(err.Error())\n\t}\n\tfmt.Println(\"Wrote\", of.Name())\n}\n\n\/\/ strip replaces all characters that are not [a-Z_] with _\nfunc strip(in string) string {\n\tout := bytes.Buffer{}\n\tfor _, c := range in {\n\t\tif !strings.ContainsRune(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTVWXYZ0123456789_\", c) {\n\t\t\tc = '_'\n\t\t}\n\t\tif _, err := out.WriteRune(c); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\treturn out.String()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"code.google.com\/p\/go.net\/html\" \/\/Tokenizer für HTML\n\t\"database\/sql\"\n\t\"fmt\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst MaxOutstanding_Fetcher = 100 \/\/ Maximale Anzahl gleichzeitger Fetcher\nconst DEBUG = 1 \/\/ Debugausgabe an\/aus\nconst MaxLinkDepth = 3 \/\/ Maximale Linktiefe\n\ntype URLINDEX struct {\n\tURL string\n\tWORDS map[string]int\n}\ntype HTTPRESP struct {\n\tURL string\n\tLINKDEPTH int\n\tFD io.Reader\n}\ntype URL struct {\n\tURL string\n\tLINKDEPTH int\n}\n\nvar crwldurls map[string]bool \/\/ globale URL Map - um doppeltes HTTP GET zu vermeiden\n\n\/\/Channels\nvar chan_urls = make(chan URL, 100000) \/\/ buffered channel of strings\n\/\/bei aktueller implementierung des Parser aufrufs, müsste der io_reader channel nicht buffered sein\nvar chan_ioreaders = make(chan HTTPRESP, 10) \/\/ buffered channel of structs\nvar chan_urlindexes = make(chan URLINDEX, 100) \/\/ buffered channel of structs\n\n\/\/Semaphore Channel\nvar sem_Fetcher = make(chan int, MaxOutstanding_Fetcher)\n\nfunc debugausgabe(msg string) {\n\tif DEBUG == 1 {\n\t\tfmt.Printf(\"%s DEBUG: %s\\n\", time.Now(), msg)\n\t}\n}\n\nfunc handleFetcher(url URL) {\n\t<-sem_Fetcher \/\/ Eine Ressource verbrauchen: Lock falls bereits alle verbraucht\n\tfetchURL(url)\n\tsem_Fetcher <- 1 \/\/ Eine Ressource wieder freigeben\n}\n\nfunc starten() {\n\t\/\/ Initiales setzen der Ressourcen\n\tfor i := 0; i < MaxOutstanding_Fetcher; i++ {\n\t\tsem_Fetcher <- 1\n\t}\n\t\/\/ Endless (as long as the channel is not empty) Fetcher Spawning\n\tfor {\n\t\tgo handleFetcher(<-chan_urls)\n\t}\n}\n\n\/\/ parseHTML bekommt eine komplette HTML Seite\n\/\/ und legt eine Map mit Wörtern und (viele) einzelne Links in entsprechende Channels\nfunc parseHtml(a HTTPRESP) {\n\t\/\/start := time.Now()\n\td := html.NewTokenizer(a.FD)\n\tvar words map[string]int\n\twords = make(map[string]int)\n\n\tfor {\n\t\t\/\/ token type\n\t\ttokenType := d.Next()\n\t\t\/\/ ErrorToken kommt (auch) beim Ende der Daten\n\t\tif tokenType == html.ErrorToken {\n\t\t\tchan_urlindexes <- URLINDEX{a.URL, words} \/\/ WORD-Map in den Channel legen\n\t\t\t\/\/fmt.Printf(\"Parse-Dauer : [%.2fs] URL: %s\\n\", time.Since(start).Seconds(), a.URL)\n\t\t\treturn\n\t\t}\n\t\ttoken := d.Token()\n\t\tswitch tokenType {\n\t\tcase html.StartTagToken: \/\/ <tag>\n\t\t\t\/\/ Links finden\n\t\t\tif token.Data == \"a\" {\n\t\t\t\tfor _, element := range token.Attr {\n\t\t\t\t\tif element.Key == \"href\" {\n\t\t\t\t\t\t\/\/ Link normalisieren\n\t\t\t\t\t\tref_url, err := url.Parse(element.Val) \/\/ geparste URL\n\t\t\t\t\t\tbase_url, _ := url.Parse(a.URL) \/\/ Basis URL der geparsten Seite\n\t\t\t\t\t\tcomp_url := base_url.ResolveReference(ref_url) \/\/ zusammengesetzte url oder falls ref_url==absoluteurl->ref_url\n\t\t\t\t\t\t\/\/ Nur Links die nicht in der globalen Link Map sind\n\t\t\t\t\t\tif err == nil && comp_url.Scheme == \"http\" && crwldurls[comp_url.String()] != true && a.LINKDEPTH < MaxLinkDepth {\n\t\t\t\t\t\t\tcrwldurls[url.URL] = true \/\/URL in die globale URL Liste aufnehmen damit sie nicht nochmal in den Work Queue kommt.\n\t\t\t\t\t\t\tchan_urls <- URL{comp_url.String(), a.LINKDEPTH + 1} \/\/ Die URL in den Channel legen und Linktiefe hochzählen\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase html.TextToken: \/\/ text between start and end tag\n\t\t\t\/\/Map mit Wörtern erstellen\n\t\t\ttemp := strings.Fields(token.Data) \/\/Aufteilen in Einzelne Wörter, trennen bei Whitespace\n\t\t\tfor _, element := range temp {\n\t\t\t\t\/\/TODO: einzelne Örter noch besser von Sonderzeichen trennen z.b. mit TRIM()\n\t\t\t\twords[element] = words[element] + 1\n\t\t\t}\n\n\t\t\t\/\/fmt.Printf(\"%q\\n\", temp)\n\t\tcase html.EndTagToken: \/\/ <\/tag>\n\t\tcase html.SelfClosingTagToken: \/\/ <tag\/>\n\n\t\t}\n\t}\n}\n\n\/\/ fetchURL Bekommt eine URL\n\/\/ lädt die Seite herunter\n\/\/ und legt ein struct vom Typ HTTPRESP in den Channel chan_ioreaders\nfunc fetchURL(url URL) {\n\tstart := time.Now()\n\tresponse, err := http.Get(url.URL)\n\tif err != nil {\n\t\tfmt.Printf(\"Fehler: %s beim HTTP GET von: %s\\n\", err, url.URL)\n\t\treturn\n\t}\n\t\/\/fmt.Printf(\"%T\", response.Body)\n\tchan_ioreaders <- HTTPRESP{url.URL, url.LINKDEPTH, response.Body}\n\tfmt.Printf(\"HTTP GET Dauer: [%.2fs] URL: %s Tiefe: %d\\n\", time.Since(start).Seconds(), url.URL, url.LINKDEPTH)\n\t\/\/TODO io.reader channel umstellen auf direkte übergabe des structs\n\tgo parseHtml(<-chan_ioreaders) \/\/ Für jeden Fetcher wird ein Parser aufgerufen, der unabhängig läuft\n\treturn\n}\n\n\/\/save holt sich daten (structs vom Typ URLINDEX) aus dem Channel chan_urlindexes\n\/\/ und schreibt die Daten in eine Datei \"output.txt\"\nfunc save() {\n\n\t\/\/fo, err := os.OpenFile(\"output.txt\", os.O_APPEND, 0777)\n\tfo, err := os.Create(\"output.txt\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t\/\/ close fo on exit and check for its returned error\n\tdefer func() {\n\t\tif err := fo.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\t\/\/ make a write buffer\n\tw := bufio.NewWriter(fo)\n\tfor {\n\t\tdata, ok := <-chan_urlindexes\n\n\t\tif ok == false {\n\t\t\treturn \/\/ Abbruch wenn der Channel geschlossen ist\n\t\t}\n\n\t\tfmt.Fprintf(w, \"\\nURL: %s\\nMAP:\\n%v\\n\\n\\n\\n\", data.URL, data.WORDS)\n\t\tw.Flush()\n\t}\n}\n\n\/\/writeDB holt sich daten (structs vom Typ URLINDEX) aus dem Channel chan_urlindexes\n\/\/ und schreibt die Daten in eine sqlite3 Datenbank: crwld.db\nfunc writeDB() {\n\tos.Remove(\".\/crwld.db\")\n\n\tdb, err := sql.Open(\"sqlite3\", \".\/crwld.db\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\t\/\/Neue Tabelle(n) erstellen\n\tsqls := []string{\n\t\t\"create table data (url text not null, word text, count int)\",\n\t\t\"delete from data\",\n\t}\n\tfor _, sql := range sqls {\n\t\t_, err = db.Exec(sql)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"%q: %s\\n\", err, sql)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Endlosworker\n\tfor {\n\t\tindex, ok := <-chan_urlindexes \/\/Neue Arbeit aus dem Channel holen\n\n\t\tif ok == false {\n\t\t\treturn \/\/ Abbruch wenn der Channel geschlossen ist\n\t\t}\n\n\t\ttx, err := db.Begin()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tstmt, err := tx.Prepare(\"insert into data(url, word, count) values(?, ?, ?)\") \/\/Maske für das SQL Statement setzen\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t\tdefer stmt.Close()\n\n\t\t\/\/ Die gesamte MAP durchlaufen und für jeden Key ein SQl Statement zusammensetzen\n\t\tfor element := range index.WORDS {\n\t\t\t_, err = stmt.Exec(index.URL, element, index.WORDS[element])\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\ttx.Commit()\n\t}\n}\n\nfunc main() {\n\t\/\/ \"http:\/\/www.rsdk.net\/test2\/\" das letzte \/ ist wichtig für die Auflösung von relativen URLs\n\t\/\/ aber am besten eine \"echte\" Startseite wie z.B. index.html angeben.\n\tstarturl := URL{\"http:\/\/www.rsdk.net\/test2\/index.html\", 0} \/\/ Start URL mit Linktiefe 0 festlegen\n\tchan_urls <- starturl \/\/ URL in den Channel legen\n\tcrwldurls = make(map[string]bool)\n\t\/\/go save() \/\/ File Writer starten\n\tdebugausgabe(\"Starte DB Writer\")\n\tgo writeDB() \/\/DB Writer starten\n\tdebugausgabe(\"Starte Crawler\")\n\tstarten() \/\/ Crawler starten\n\n}\n<commit_msg>Crawler beendet sich selbst wenn es keine Schreibarbeit mehr gibt<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"code.google.com\/p\/go.net\/html\" \/\/Tokenizer für HTML\n\t\"database\/sql\"\n\t\"fmt\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst MaxOutstanding_Fetcher = 40 \/\/ Maximale Anzahl gleichzeitger Fetcher\nconst DEBUG = 1 \/\/ Debugausgabe an\/aus\nconst MaxLinkDepth = 2 \/\/ Maximale Linktiefe\nvar stop bool\n\ntype URLINDEX struct {\n\tURL string\n\tWORDS map[string]int\n}\ntype HTTPRESP struct {\n\tURL string\n\tLINKDEPTH int\n\tFD io.Reader\n}\ntype URL struct {\n\tURL string\n\tLINKDEPTH int\n}\n\nvar crwldurls map[string]bool \/\/ globale URL Map - um doppeltes HTTP GET zu vermeiden\n\n\/\/Channels\nvar chan_urls = make(chan URL, 100000) \/\/ buffered channel of strings\n\/\/bei aktueller implementierung des Parser aufrufs, müsste der io_reader channel nicht buffered sein\nvar chan_ioreaders = make(chan HTTPRESP, 10) \/\/ buffered channel of structs\nvar chan_urlindexes = make(chan URLINDEX, 100) \/\/ buffered channel of structs\n\n\/\/Semaphore Channel\nvar sem_Fetcher = make(chan int, MaxOutstanding_Fetcher)\n\nfunc debugausgabe(msg string) {\n\tif DEBUG == 1 {\n\t\tfmt.Printf(\"%s DEBUG: %s\\n\", time.Now(), msg)\n\t}\n}\n\nfunc handleFetcher(url URL) {\n\t<-sem_Fetcher \/\/ Eine Ressource verbrauchen: Lock falls bereits alle verbraucht\n\tfetchURL(url)\n\tsem_Fetcher <- 1 \/\/ Eine Ressource wieder freigeben\n}\n\nfunc starten() {\n\t\/\/ Initiales setzen der Ressourcen\n\tfor i := 0; i < MaxOutstanding_Fetcher; i++ {\n\t\tsem_Fetcher <- 1\n\t}\n\twaittime, _ := time.ParseDuration(\"300ms\")\n\t\/\/ Endless (as long as the channel is not empty) Fetcher Spawning\n\tfor {\n\t\tselect {\n\t\tcase element := <-chan_urls: \/\/ Neue Arbeit aus dem Channel holen\n\t\t\tgo handleFetcher(element)\n\t\tdefault:\n\t\t\t\/\/keine arbeit da\n\t\t\tif stop {\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttime.Sleep(waittime)\n\n\t\t}\n\t}\n}\n\n\/\/ parseHTML bekommt eine komplette HTML Seite\n\/\/ und legt eine Map mit Wörtern und (viele) einzelne Links in entsprechende Channels\nfunc parseHtml(a HTTPRESP) {\n\t\/\/start := time.Now()\n\td := html.NewTokenizer(a.FD)\n\tvar words map[string]int\n\twords = make(map[string]int)\n\n\tfor {\n\t\t\/\/ token type\n\t\ttokenType := d.Next()\n\t\t\/\/ ErrorToken kommt (auch) beim Ende der Daten\n\t\tif tokenType == html.ErrorToken {\n\t\t\tchan_urlindexes <- URLINDEX{a.URL, words} \/\/ WORD-Map in den Channel legen\n\t\t\t\/\/fmt.Printf(\"Parse-Dauer : [%.2fs] URL: %s\\n\", time.Since(start).Seconds(), a.URL)\n\t\t\treturn\n\t\t}\n\t\ttoken := d.Token()\n\t\tswitch tokenType {\n\t\tcase html.StartTagToken: \/\/ <tag>\n\t\t\t\/\/ Links finden\n\t\t\tif token.Data == \"a\" {\n\t\t\t\tfor _, element := range token.Attr {\n\t\t\t\t\tif element.Key == \"href\" {\n\t\t\t\t\t\t\/\/ Link normalisieren\n\t\t\t\t\t\tref_url, err := url.Parse(element.Val) \/\/ geparste URL\n\t\t\t\t\t\tbase_url, _ := url.Parse(a.URL) \/\/ Basis URL der geparsten Seite\n\t\t\t\t\t\tcomp_url := base_url.ResolveReference(ref_url) \/\/ zusammengesetzte url oder falls ref_url==absoluteurl->ref_url\n\t\t\t\t\t\t\/\/ Nur Links die nicht in der globalen Link Map sind\n\t\t\t\t\t\tif err == nil && comp_url.Scheme == \"http\" && crwldurls[comp_url.String()] != true && a.LINKDEPTH < MaxLinkDepth {\n\t\t\t\t\t\t\tcrwldurls[comp_url.String()] = true \/\/URL in die globale URL Liste aufnehmen damit sie nicht nochmal in den Work Queue kommt.\n\t\t\t\t\t\t\tchan_urls <- URL{comp_url.String(), a.LINKDEPTH + 1} \/\/ Die URL in den Channel legen und Linktiefe hochzählen\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase html.TextToken: \/\/ text between start and end tag\n\t\t\t\/\/Map mit Wörtern erstellen\n\t\t\ttemp := strings.Fields(token.Data) \/\/Aufteilen in Einzelne Wörter, trennen bei Whitespace\n\t\t\tfor _, element := range temp {\n\t\t\t\t\/\/TODO: einzelne Örter noch besser von Sonderzeichen trennen z.b. mit TRIM()\n\t\t\t\twords[element] = words[element] + 1\n\t\t\t}\n\n\t\t\t\/\/fmt.Printf(\"%q\\n\", temp)\n\t\tcase html.EndTagToken: \/\/ <\/tag>\n\t\tcase html.SelfClosingTagToken: \/\/ <tag\/>\n\n\t\t}\n\t}\n}\n\n\/\/ fetchURL Bekommt eine URL\n\/\/ lädt die Seite herunter\n\/\/ und legt ein struct vom Typ HTTPRESP in den Channel chan_ioreaders\nfunc fetchURL(url URL) {\n\tstart := time.Now()\n\tresponse, err := http.Get(url.URL)\n\tif err != nil {\n\t\tfmt.Printf(\"Fehler: %s beim HTTP GET von: %s\\n\", err, url.URL)\n\t\treturn\n\t}\n\t\/\/fmt.Printf(\"%T\", response.Body)\n\tchan_ioreaders <- HTTPRESP{url.URL, url.LINKDEPTH, response.Body}\n\tfmt.Printf(\"HTTP GET Dauer: [%.2fs] URL: %s Tiefe: %d\\n\", time.Since(start).Seconds(), url.URL, url.LINKDEPTH)\n\t\/\/TODO io.reader channel umstellen auf direkte übergabe des structs\n\tgo parseHtml(<-chan_ioreaders) \/\/ Für jeden Fetcher wird ein Parser aufgerufen, der unabhängig läuft\n\treturn\n}\n\n\/\/save holt sich daten (structs vom Typ URLINDEX) aus dem Channel chan_urlindexes\n\/\/ und schreibt die Daten in eine Datei \"output.txt\"\nfunc save() {\n\n\t\/\/fo, err := os.OpenFile(\"output.txt\", os.O_APPEND, 0777)\n\tfo, err := os.Create(\"output.txt\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t\/\/ close fo on exit and check for its returned error\n\tdefer func() {\n\t\tif err := fo.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\t\/\/ make a write buffer\n\tw := bufio.NewWriter(fo)\n\tfor {\n\t\tdata, ok := <-chan_urlindexes\n\n\t\tif ok == false {\n\t\t\treturn \/\/ Abbruch wenn der Channel geschlossen ist\n\t\t}\n\n\t\tfmt.Fprintf(w, \"\\nURL: %s\\nMAP:\\n%v\\n\\n\\n\\n\", data.URL, data.WORDS)\n\t\tw.Flush()\n\t}\n}\n\n\/\/writeDB holt sich daten (structs vom Typ URLINDEX) aus dem Channel chan_urlindexes\n\/\/ und schreibt die Daten in eine sqlite3 Datenbank: crwld.db\nfunc writeDB() {\n\tos.Remove(\".\/crwld.db\")\n\n\tdb, err := sql.Open(\"sqlite3\", \".\/crwld.db\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\t\/\/Neue Tabelle(n) erstellen\n\tsqls := []string{\n\t\t\"create table data (url text not null, word text, count int)\",\n\t\t\"delete from data\",\n\t}\n\tfor _, sql := range sqls {\n\t\t_, err = db.Exec(sql)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"%q: %s\\n\", err, sql)\n\t\t\treturn\n\t\t}\n\t}\n\tstart := time.Now() \/\/ Initialiseren\n\twaittime, _ := time.ParseDuration(\"300ms\")\n\t\/\/ Endlosworker\n\tfor {\n\t\tvar index URLINDEX\n\t\tselect {\n\t\tcase index = <-chan_urlindexes: \/\/ Neue Arbeit aus dem Channel holen\n\t\t\tstart = time.Now() \/\/ zu diesem Zeitpunkt gab es das letzte mal Arbeit\n\t\tdefault:\n\t\t\t\/\/keine arbeit da\n\t\t\tif time.Since(start).Seconds() > 3 {\n\t\t\t\tstop = true \/\/ wenn länger als 3 Sekunden auf neue Schreibarbeit gewartet wurde -->beenden\n\t\t\t}\n\t\t\ttime.Sleep(waittime) \/\/kurz abwarten\n\t\t\tcontinue\n\t\t}\n\n\t\ttx, err := db.Begin()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tstmt, err := tx.Prepare(\"insert into data(url, word, count) values(?, ?, ?)\") \/\/Maske für das SQL Statement setzen\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t\tdefer stmt.Close()\n\n\t\t\/\/ Die gesamte MAP durchlaufen und für jeden Key ein SQl Statement zusammensetzen\n\t\tfor element := range index.WORDS {\n\t\t\t_, err = stmt.Exec(index.URL, element, index.WORDS[element])\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\ttx.Commit()\n\t}\n}\n\nfunc main() {\n\tstop = false\n\t\/\/ \"http:\/\/www.rsdk.net\/test2\/\" das letzte \/ ist wichtig für die Auflösung von relativen URLs\n\t\/\/ aber am besten eine \"echte\" Startseite wie z.B. index.html angeben.\n\tstarturl := URL{\"http:\/\/www.rsdk.net\/test2\/index.html\", 0} \/\/ Start URL mit Linktiefe 0 festlegen\n\tchan_urls <- starturl \/\/ URL in den Channel legen\n\tcrwldurls = make(map[string]bool)\n\t\/\/go save() \/\/ File Writer starten\n\tdebugausgabe(\"Starte DB Writer\")\n\tgo writeDB() \/\/DB Writer starten\n\tdebugausgabe(\"Starte Crawler\")\n\tstarten() \/\/ Crawler starten\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\n\/*\n#cgo LDFLAGS: -lX11\n#include <X11\/Xlib.h>\n*\/\nimport \"C\"\n\nimport (\n\t\"fmt\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/gotk3\/gotk3\/gtk\"\n)\n\ntype gobarConfig struct {\n\tListen string `yaml:\"listen\"`\n\tCssPath string `yaml:\"css_path\"`\n\tPosition struct {\n\t\tX int `yaml:\"x\"`\n\t\tY int `yaml:\"y\"`\n\t} `yaml:\"position\"`\n\tBarSize struct {\n\t\tX int `yaml:\"x\"`\n\t\tY int `yaml:\"y\"`\n\t} `yaml:\"bar_size\"`\n\tActions map[string]struct {\n\t\tCommand string `yaml:\"command\"`\n\t\tValue string `yaml:\"value\"`\n\t\tLabel string `yaml:\"label\"`\n\t\tDuration string `yaml:\"duration\"`\n\t\tMin float64 `yaml:\"min\"`\n\t\tMax float64 `yaml:\"max\"`\n\t} `yaml:\"actions\"`\n}\n\nfunc getLabelHandler(label *gtk.Label, bar *gtk.LevelBar, win *gtk.Window) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar err error\n\t\tmin := 0.0\n\t\tmax := 100.0\n\t\tlevel := 50.0\n\t\tduration, _ := time.ParseDuration(\"700ms\")\n\t\tlabelText := \"label string xD\"\n\n\t\tvars := r.URL.Query()\n\t\tr.Body.Close()\n\n\t\tif val, ok := vars[\"label\"]; ok {\n\t\t\tlabelText = val[0]\n\t\t}\n\t\tif val, ok := vars[\"min\"]; ok {\n\t\t\tmin, err = strconv.ParseFloat(val[0], 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Got a wrong min value\", val, err)\n\t\t\t}\n\t\t}\n\t\tif val, ok := vars[\"max\"]; ok {\n\t\t\tmax, err = strconv.ParseFloat(val[0], 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Got a wrong max value\", val, err)\n\t\t\t}\n\t\t}\n\t\tif val, ok := vars[\"level\"]; ok {\n\t\t\tlevel, err = strconv.ParseFloat(val[0], 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Got a wrong level value\", val, err)\n\t\t\t}\n\t\t}\n\t\tif val, ok := vars[\"duration\"]; ok {\n\t\t\tduration, err = time.ParseDuration(val[0])\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Got a wrong duration value\", val, err)\n\t\t\t}\n\t\t}\n\n\t\t_, _ = w.Write([]byte(\"OK\\n\"))\n\n\t\tgo func() {\n\t\t\tbar.SetMinValue(min)\n\t\t\tbar.SetMaxValue(max)\n\t\t\tbar.SetValue(level)\n\t\t\tlabel.SetLabel(labelText)\n\n\t\t\twin.ShowAll()\n\n\t\t\ttime.Sleep(duration)\n\n\t\t\twin.Resize(10, 10)\n\t\t\twin.Hide()\n\t\t}()\n\t}\n}\n\nfunc main() {\n\tvar config gobarConfig\n\n\tif len(os.Args) != 2 {\n\t\tlog.Fatalln(\"Usage:\", os.Args[0], \"<configuration file>\")\n\t}\n\n\tdata, err := ioutil.ReadFile(os.Args[1])\n\tif err != nil {\n\t\tlog.Fatalln(\"Error reading configuration file:\", err)\n\t}\n\n\tif err := yaml.Unmarshal(data, &config); err != nil {\n\t\tlog.Fatalln(\"Error parsing configuration file:\", err)\n\t}\n\n\tC.XInitThreads()\n\tgtk.Init(nil)\n\n\twin, err := gtk.WindowNew(gtk.WINDOW_TOPLEVEL)\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to create window:\", err)\n\t}\n\twin.Connect(\"destroy\", func() {\n\t\tgtk.MainQuit()\n\t})\n\n\tgrid, err := gtk.GridNew()\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to create grid:\", err)\n\t}\n\n\tcss, err := gtk.CssProviderNew()\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to create css provider:\", err)\n\t}\n\n\tlb := label()\n\tbar := levelBar(config.BarSize.X, config.BarSize.Y)\n\n\tgrid.SetOrientation(gtk.ORIENTATION_HORIZONTAL)\n\tgrid.Add(bar)\n\tgrid.Add(lb)\n\n\tcss.LoadFromPath(config.CssPath)\n\n\tstyle_context, err := lb.GetStyleContext()\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to get label style context:\", err)\n\t}\n\tstyle_context.AddProvider(css, gtk.STYLE_PROVIDER_PRIORITY_USER)\n\n\twin.SetWMClass(\"gobar\", \"gobar\")\n\twin.SetTitle(\"gobar\")\n\twin.Add(grid)\n\twin.SetAcceptFocus(false)\n\twin.Move(config.Position.X, config.Position.Y)\n\n\thttp.HandleFunc(\"\/api\/v1\/bar\", getLabelHandler(lb, bar, win))\n\n\tfor key, value := range config.Actions {\n\t\tkey := key\n\t\tvalue := value\n\t\thttp.HandleFunc(\"\/api\/v1\/action\/\"+key, func(w http.ResponseWriter, r *http.Request) {\n\t\t\tr.Body.Close()\n\t\t\terr := exec.Command(\"sh\", \"-c\", value.Command).Run()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Command:\", value.Command, \"error:\", err)\n\t\t\t\tfmt.Fprintln(w, \"error:\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tout, err := exec.Command(\"sh\", \"-c\", value.Value).Output()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Command:\", value.Command, \"error:\", err)\n\t\t\t\tfmt.Fprintln(w, \"error:\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tval, err := strconv.ParseFloat(strings.TrimSpace(string(out)), 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Error parsing as float\", string(out), \"error:\", err)\n\t\t\t\tfmt.Fprintln(w, \"error:\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tduration, err := time.ParseDuration(value.Duration)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Error paring as duration\", duration, \"error:\", err)\n\t\t\t\tfmt.Fprintln(w, \"error:\", err)\n\t\t\t}\n\n\t\t\tfmt.Fprintln(w, \"status: OK\\nvalue:\", val)\n\n\t\t\tgo func() {\n\t\t\t\tlb.SetLabel(value.Label)\n\n\t\t\t\tbar.SetMinValue(value.Min)\n\t\t\t\tbar.SetMaxValue(value.Max)\n\t\t\t\tbar.SetValue(val)\n\n\t\t\t\twin.ShowAll()\n\n\t\t\t\ttime.Sleep(duration)\n\n\t\t\t\twin.Resize(10, 10)\n\t\t\t\twin.Hide()\n\t\t\t}()\n\t\t})\n\t}\n\n\tgo gtk.Main()\n\n\thttp.ListenAndServe(config.Listen, nil)\n}\n\nfunc label() *gtk.Label {\n\tlabel, err := gtk.LabelNew(\"xD\")\n\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to create label:\", err)\n\t}\n\n\treturn label\n}\n\nfunc levelBar(x, y int) *gtk.LevelBar {\n\tlb, err := gtk.LevelBarNew()\n\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to create level bar:\", err)\n\t}\n\n\tlb.SetSizeRequest(x, y)\n\n\treturn lb\n}\n<commit_msg>welp… C works.<commit_after>package main\n\n\/*\n#cgo LDFLAGS: -lX11\n#cgo pkg-config: gtk+-3.0\n#include <X11\/Xlib.h>\n#include <gtk\/gtk.h>\n\nvoid setup_css(char *path) {\n GtkCssProvider *provider;\n GdkDisplay *display;\n GdkScreen *screen;\n provider = gtk_css_provider_new();\n display = gdk_display_get_default();\n screen = gdk_display_get_default_screen(display);\n gtk_style_context_add_provider_for_screen(screen,\n GTK_STYLE_PROVIDER(provider),\n GTK_STYLE_PROVIDER_PRIORITY_APPLICATION);\n gtk_css_provider_load_from_path(GTK_CSS_PROVIDER(provider), path, NULL);\n g_object_unref(provider);\n}\n*\/\nimport \"C\"\n\nimport (\n\t\"fmt\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/gotk3\/gotk3\/gtk\"\n)\n\ntype gobarConfig struct {\n\tListen string `yaml:\"listen\"`\n\tCssPath string `yaml:\"css_path\"`\n\tPosition struct {\n\t\tX int `yaml:\"x\"`\n\t\tY int `yaml:\"y\"`\n\t} `yaml:\"position\"`\n\tBarSize struct {\n\t\tX int `yaml:\"x\"`\n\t\tY int `yaml:\"y\"`\n\t} `yaml:\"bar_size\"`\n\tActions map[string]struct {\n\t\tCommand string `yaml:\"command\"`\n\t\tValue string `yaml:\"value\"`\n\t\tLabel string `yaml:\"label\"`\n\t\tDuration string `yaml:\"duration\"`\n\t\tMin float64 `yaml:\"min\"`\n\t\tMax float64 `yaml:\"max\"`\n\t} `yaml:\"actions\"`\n}\n\nfunc getLabelHandler(label *gtk.Label, bar *gtk.LevelBar, win *gtk.Window) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar err error\n\t\tmin := 0.0\n\t\tmax := 100.0\n\t\tlevel := 50.0\n\t\tduration, _ := time.ParseDuration(\"700ms\")\n\t\tlabelText := \"label string xD\"\n\n\t\tvars := r.URL.Query()\n\t\tr.Body.Close()\n\n\t\tif val, ok := vars[\"label\"]; ok {\n\t\t\tlabelText = val[0]\n\t\t}\n\t\tif val, ok := vars[\"min\"]; ok {\n\t\t\tmin, err = strconv.ParseFloat(val[0], 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Got a wrong min value\", val, err)\n\t\t\t}\n\t\t}\n\t\tif val, ok := vars[\"max\"]; ok {\n\t\t\tmax, err = strconv.ParseFloat(val[0], 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Got a wrong max value\", val, err)\n\t\t\t}\n\t\t}\n\t\tif val, ok := vars[\"level\"]; ok {\n\t\t\tlevel, err = strconv.ParseFloat(val[0], 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Got a wrong level value\", val, err)\n\t\t\t}\n\t\t}\n\t\tif val, ok := vars[\"duration\"]; ok {\n\t\t\tduration, err = time.ParseDuration(val[0])\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Got a wrong duration value\", val, err)\n\t\t\t}\n\t\t}\n\n\t\t_, _ = w.Write([]byte(\"OK\\n\"))\n\n\t\tgo func() {\n\t\t\tbar.SetMinValue(min)\n\t\t\tbar.SetMaxValue(max)\n\t\t\tbar.SetValue(level)\n\t\t\tlabel.SetLabel(labelText)\n\n\t\t\twin.ShowAll()\n\n\t\t\ttime.Sleep(duration)\n\n\t\t\twin.Resize(10, 10)\n\t\t\twin.Hide()\n\t\t}()\n\t}\n}\n\nfunc main() {\n\tvar config gobarConfig\n\n\tif len(os.Args) != 2 {\n\t\tlog.Fatalln(\"Usage:\", os.Args[0], \"<configuration file>\")\n\t}\n\n\tdata, err := ioutil.ReadFile(os.Args[1])\n\tif err != nil {\n\t\tlog.Fatalln(\"Error reading configuration file:\", err)\n\t}\n\n\tif err := yaml.Unmarshal(data, &config); err != nil {\n\t\tlog.Fatalln(\"Error parsing configuration file:\", err)\n\t}\n\n\tC.XInitThreads()\n\tgtk.Init(nil)\n\n\tC.setup_css(C.CString(config.CssPath))\n\n\twin, err := gtk.WindowNew(gtk.WINDOW_TOPLEVEL)\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to create window:\", err)\n\t}\n\twin.Connect(\"destroy\", func() {\n\t\tgtk.MainQuit()\n\t})\n\n\tgrid, err := gtk.GridNew()\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to create grid:\", err)\n\t}\n\n\tlb := label()\n\tbar := levelBar(config.BarSize.X, config.BarSize.Y)\n\n\tgrid.SetOrientation(gtk.ORIENTATION_HORIZONTAL)\n\tgrid.Add(bar)\n\tgrid.Add(lb)\n\n\twin.SetWMClass(\"gobar\", \"gobar\")\n\twin.SetTitle(\"gobar\")\n\twin.Add(grid)\n\twin.SetAcceptFocus(false)\n\twin.Move(config.Position.X, config.Position.Y)\n\n\thttp.HandleFunc(\"\/api\/v1\/bar\", getLabelHandler(lb, bar, win))\n\n\tfor key, value := range config.Actions {\n\t\tkey := key\n\t\tvalue := value\n\t\thttp.HandleFunc(\"\/api\/v1\/action\/\"+key, func(w http.ResponseWriter, r *http.Request) {\n\t\t\tr.Body.Close()\n\t\t\terr := exec.Command(\"sh\", \"-c\", value.Command).Run()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Command:\", value.Command, \"error:\", err)\n\t\t\t\tfmt.Fprintln(w, \"error:\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tout, err := exec.Command(\"sh\", \"-c\", value.Value).Output()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Command:\", value.Command, \"error:\", err)\n\t\t\t\tfmt.Fprintln(w, \"error:\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tval, err := strconv.ParseFloat(strings.TrimSpace(string(out)), 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Error parsing as float\", string(out), \"error:\", err)\n\t\t\t\tfmt.Fprintln(w, \"error:\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tduration, err := time.ParseDuration(value.Duration)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Error paring as duration\", duration, \"error:\", err)\n\t\t\t\tfmt.Fprintln(w, \"error:\", err)\n\t\t\t}\n\n\t\t\tfmt.Fprintln(w, \"status: OK\\nvalue:\", val)\n\n\t\t\tgo func() {\n\t\t\t\tlb.SetLabel(value.Label)\n\n\t\t\t\tbar.SetMinValue(value.Min)\n\t\t\t\tbar.SetMaxValue(value.Max)\n\t\t\t\tbar.SetValue(val)\n\n\t\t\t\twin.ShowAll()\n\n\t\t\t\ttime.Sleep(duration)\n\n\t\t\t\twin.Resize(10, 10)\n\t\t\t\twin.Hide()\n\t\t\t}()\n\t\t})\n\t}\n\n\tgo gtk.Main()\n\n\thttp.ListenAndServe(config.Listen, nil)\n}\n\nfunc label() *gtk.Label {\n\tlabel, err := gtk.LabelNew(\"xD\")\n\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to create label:\", err)\n\t}\n\n\treturn label\n}\n\nfunc levelBar(x, y int) *gtk.LevelBar {\n\tlb, err := gtk.LevelBarNew()\n\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to create level bar:\", err)\n\t}\n\n\tlb.SetSizeRequest(x, y)\n\n\treturn lb\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/musl\/libgofr\"\n\t\"github.com\/nfnt\/resize\"\n\t\"image\"\n\t\"image\/png\"\n\t\"log\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"time\"\n)\n\nvar id_chan = make(chan int, 1)\n\nfunc finish(w http.ResponseWriter, status int, message string) {\n\tw.WriteHeader(status)\n\tfmt.Fprintf(w, message)\n}\n\nfunc logDuration(message string, start time.Time) {\n\tend := time.Now()\n\tlog.Printf(\"%s: %v\\n\", message, end.Sub(start))\n}\n\nfunc route_png(w http.ResponseWriter, r *http.Request) {\n\tid := <-id_chan\n\tstart := time.Now()\n\tdefer logDuration(fmt.Sprintf(\"%08d %s\", id, r.URL.Path), start)\n\n\tif r.Method != \"GET\" {\n\t\tfinish(w, http.StatusMethodNotAllowed, \"Method not allowed.\")\n\t\treturn\n\t}\n\n\tq := r.URL.Query()\n\n\ts, err := strconv.Atoi(q.Get(\"s\"))\n\tif err != nil {\n\t\ts = 1\n\t}\n\n\twidth, err := strconv.Atoi(q.Get(\"w\"))\n\tif err != nil {\n\t\tfinish(w, http.StatusUnprocessableEntity, \"Invalid width\")\n\t\treturn\n\t}\n\n\theight, err := strconv.Atoi(q.Get(\"h\"))\n\tif err != nil {\n\t\tfinish(w, http.StatusUnprocessableEntity, \"Invalid height\")\n\t\treturn\n\t}\n\n\titerations, err := strconv.Atoi(q.Get(\"i\"))\n\tif err != nil {\n\t\tfinish(w, http.StatusUnprocessableEntity, \"Invalid i\")\n\t\treturn\n\t}\n\n\ter, err := strconv.ParseFloat(q.Get(\"e\"), 64)\n\tif err != nil {\n\t\tfinish(w, http.StatusUnprocessableEntity, \"Invalid e\")\n\t\treturn\n\t}\n\n\trmin, err := strconv.ParseFloat(q.Get(\"rmin\"), 64)\n\tif err != nil {\n\t\tfinish(w, http.StatusUnprocessableEntity, \"Invalid rmin\")\n\t\treturn\n\t}\n\n\timin, err := strconv.ParseFloat(q.Get(\"imin\"), 64)\n\tif err != nil {\n\t\tfinish(w, http.StatusUnprocessableEntity, \"Invalid rmin\")\n\t\treturn\n\t}\n\n\trmax, err := strconv.ParseFloat(q.Get(\"rmax\"), 64)\n\tif err != nil {\n\t\tfinish(w, http.StatusUnprocessableEntity, \"Invalid rmax\")\n\t\treturn\n\t}\n\n\timax, err := strconv.ParseFloat(q.Get(\"imax\"), 64)\n\tif err != nil {\n\t\tfinish(w, http.StatusUnprocessableEntity, \"Invalid rmin\")\n\t\treturn\n\t}\n\n\tc := q.Get(\"c\")\n\thex := q.Get(\"m\")\n\n\tp := gofr.Parameters{\n\t\tImageWidth: width * s,\n\t\tImageHeight: height * s,\n\t\tMaxI: iterations,\n\t\tEscapeRadius: er,\n\t\tMin: complex(rmin, imin),\n\t\tMax: complex(rmax, imax),\n\t\tColorFunc: c,\n\t\tMemberColor: hex,\n\t}\n\n\tlog.Printf(\"%08d rendering\\n\", id)\n\n\t\/\/ TODO: Check parameters and set reasonable bounds on what we can\n\t\/\/ quickly calculate.\n\n\timg := image.NewNRGBA64(image.Rect(0, 0, p.ImageWidth, p.ImageHeight))\n\tn := runtime.NumCPU()\n\tcontexts := gofr.MakeContexts(img, n, &p)\n\tgofr.Render(n, contexts, gofr.Mandelbrot)\n\n\tscaled_img := resize.Resize(uint(width), uint(height), image.Image(img), resize.Lanczos3)\n\n\tw.Header().Set(\"Content-Type\", \"image\/png\")\n\tpng.Encode(w, scaled_img)\n}\n\nfunc main() {\n\tfs := http.FileServer(http.Dir(\"static\"))\n\tbind_addr := \"0.0.0.0:8000\"\n\n\tgo func() {\n\t\tfor i := 0; ; i++ {\n\t\t\tid_chan <- i\n\t\t}\n\t}()\n\n\thttp.Handle(\"\/\", fs)\n\thttp.HandleFunc(\"\/png\", route_png)\n\tlog.Printf(\"Listening on: %s\\n\", bind_addr)\n\tlog.Fatal(http.ListenAndServe(bind_addr, nil))\n}\n<commit_msg>Add version constant<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/musl\/libgofr\"\n\t\"github.com\/nfnt\/resize\"\n\t\"image\"\n\t\"image\/png\"\n\t\"log\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst Version = \"0.0.2\"\n\nvar id_chan = make(chan int, 1)\n\nfunc finish(w http.ResponseWriter, status int, message string) {\n\tw.WriteHeader(status)\n\tfmt.Fprintf(w, message)\n}\n\nfunc logDuration(message string, start time.Time) {\n\tend := time.Now()\n\tlog.Printf(\"%s: %v\\n\", message, end.Sub(start))\n}\n\nfunc route_png(w http.ResponseWriter, r *http.Request) {\n\tid := <-id_chan\n\tstart := time.Now()\n\tdefer logDuration(fmt.Sprintf(\"%08d %s\", id, r.URL.Path), start)\n\n\tif r.Method != \"GET\" {\n\t\tfinish(w, http.StatusMethodNotAllowed, \"Method not allowed.\")\n\t\treturn\n\t}\n\n\tq := r.URL.Query()\n\n\ts, err := strconv.Atoi(q.Get(\"s\"))\n\tif err != nil {\n\t\ts = 1\n\t}\n\n\twidth, err := strconv.Atoi(q.Get(\"w\"))\n\tif err != nil {\n\t\tfinish(w, http.StatusUnprocessableEntity, \"Invalid width\")\n\t\treturn\n\t}\n\n\theight, err := strconv.Atoi(q.Get(\"h\"))\n\tif err != nil {\n\t\tfinish(w, http.StatusUnprocessableEntity, \"Invalid height\")\n\t\treturn\n\t}\n\n\titerations, err := strconv.Atoi(q.Get(\"i\"))\n\tif err != nil {\n\t\tfinish(w, http.StatusUnprocessableEntity, \"Invalid i\")\n\t\treturn\n\t}\n\n\ter, err := strconv.ParseFloat(q.Get(\"e\"), 64)\n\tif err != nil {\n\t\tfinish(w, http.StatusUnprocessableEntity, \"Invalid e\")\n\t\treturn\n\t}\n\n\trmin, err := strconv.ParseFloat(q.Get(\"rmin\"), 64)\n\tif err != nil {\n\t\tfinish(w, http.StatusUnprocessableEntity, \"Invalid rmin\")\n\t\treturn\n\t}\n\n\timin, err := strconv.ParseFloat(q.Get(\"imin\"), 64)\n\tif err != nil {\n\t\tfinish(w, http.StatusUnprocessableEntity, \"Invalid rmin\")\n\t\treturn\n\t}\n\n\trmax, err := strconv.ParseFloat(q.Get(\"rmax\"), 64)\n\tif err != nil {\n\t\tfinish(w, http.StatusUnprocessableEntity, \"Invalid rmax\")\n\t\treturn\n\t}\n\n\timax, err := strconv.ParseFloat(q.Get(\"imax\"), 64)\n\tif err != nil {\n\t\tfinish(w, http.StatusUnprocessableEntity, \"Invalid rmin\")\n\t\treturn\n\t}\n\n\tc := q.Get(\"c\")\n\thex := q.Get(\"m\")\n\n\tp := gofr.Parameters{\n\t\tImageWidth: width * s,\n\t\tImageHeight: height * s,\n\t\tMaxI: iterations,\n\t\tEscapeRadius: er,\n\t\tMin: complex(rmin, imin),\n\t\tMax: complex(rmax, imax),\n\t\tColorFunc: c,\n\t\tMemberColor: hex,\n\t}\n\n\tlog.Printf(\"%08d rendering\\n\", id)\n\n\t\/\/ TODO: Check parameters and set reasonable bounds on what we can\n\t\/\/ quickly calculate.\n\n\timg := image.NewNRGBA64(image.Rect(0, 0, p.ImageWidth, p.ImageHeight))\n\tn := runtime.NumCPU()\n\tcontexts := gofr.MakeContexts(img, n, &p)\n\tgofr.Render(n, contexts, gofr.Mandelbrot)\n\n\tscaled_img := resize.Resize(uint(width), uint(height), image.Image(img), resize.Lanczos3)\n\n\tw.Header().Set(\"Content-Type\", \"image\/png\")\n\tpng.Encode(w, scaled_img)\n}\n\nfunc main() {\n\tfs := http.FileServer(http.Dir(\"static\"))\n\tbind_addr := \"0.0.0.0:8000\"\n\n\tgo func() {\n\t\tfor i := 0; ; i++ {\n\t\t\tid_chan <- i\n\t\t}\n\t}()\n\n\thttp.Handle(\"\/\", fs)\n\thttp.HandleFunc(\"\/png\", route_png)\n\tlog.Printf(\"Listening on: %s\\n\", bind_addr)\n\tlog.Fatal(http.ListenAndServe(bind_addr, nil))\n}\n<|endoftext|>"} {"text":"<commit_before>package goreq\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Request struct {\n\theaders []headerTuple\n\tMethod string\n\tUri string\n\tBody interface{}\n\tQueryString interface{}\n\tTimeout time.Duration\n\tContentType string\n\tAccept string\n\tHost string\n\tUserAgent string\n\tInsecure bool\n\tMaxRedirects int\n\tCompressed bool\n}\n\ntype Response struct {\n\tStatusCode int\n\tContentLength int64\n\tBody Body\n\tHeader http.Header\n}\n\ntype headerTuple struct {\n\tname string\n\tvalue string\n}\n\ntype Body struct {\n\tio.ReadCloser\n}\n\ntype Error struct {\n\ttimeout bool\n\tErr error\n}\n\nfunc (e *Error) Timeout() bool {\n\treturn e.timeout\n}\n\nfunc (e *Error) Error() string {\n\treturn e.Err.Error()\n}\n\nfunc (b *Body) FromJsonTo(o interface{}) error {\n\tif body, err := ioutil.ReadAll(b); err != nil {\n\t\treturn err\n\t} else if err := json.Unmarshal(body, o); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *Body) ToString() (string, error) {\n\tbody, err := ioutil.ReadAll(b)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(body), nil\n}\n\nfunc paramParse(query interface{}) (string, error) {\n\tvar (\n\t\tv = &url.Values{}\n\t\ts = reflect.ValueOf(query)\n\t\tt = reflect.TypeOf(query)\n\t)\n\n\tswitch query.(type) {\n\tcase url.Values:\n\t\treturn query.(url.Values).Encode(), nil\n\tdefault:\n\t\tfor i := 0; i < s.NumField(); i++ {\n\t\t\tv.Add(strings.ToLower(t.Field(i).Name), fmt.Sprintf(\"%v\", s.Field(i).Interface()))\n\t\t}\n\t\treturn v.Encode(), nil\n\t}\n}\n\nfunc prepareRequestBody(b interface{}) (io.Reader, error) {\n\tswitch b.(type) {\n\tcase string:\n\t\t\/\/ treat is as text\n\t\treturn strings.NewReader(b.(string)), nil\n\tcase io.Reader:\n\t\t\/\/ treat is as text\n\t\treturn b.(io.Reader), nil\n\tcase []byte:\n\t\t\/\/treat as byte array\n\t\treturn bytes.NewReader(b.([]byte)), nil\n\tcase nil:\n\t\treturn nil, nil\n\tdefault:\n\t\t\/\/ try to jsonify it\n\t\tj, err := json.Marshal(b)\n\t\tif err == nil {\n\t\t\treturn bytes.NewReader(j), nil\n\t\t}\n\t\treturn nil, err\n\t}\n}\n\nfunc newResponse(res *http.Response) *Response {\n\treturn &Response{StatusCode: res.StatusCode, ContentLength: res.ContentLength, Header: res.Header, Body: Body{res.Body}}\n}\n\nvar defaultDialer = &net.Dialer{Timeout: 1000 * time.Millisecond}\nvar defaultTransport = &http.Transport{Dial: defaultDialer.Dial}\nvar defaultClient = &http.Client{Transport: defaultTransport}\n\nfunc SetConnectTimeout(duration time.Duration) {\n\tdefaultDialer.Timeout = duration\n}\n\nfunc (r *Request) AddHeader(name string, value string) {\n\tif r.headers == nil {\n\t\tr.headers = []headerTuple{}\n\t}\n\tr.headers = append(r.headers, headerTuple{name: name, value: value})\n}\n\nfunc (r Request) Do() (*Response, error) {\n\tvar req *http.Request\n\tvar er error\n\n\tif r.Insecure {\n\t\tdefaultTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n\t} else if defaultTransport.TLSClientConfig != nil {\n\t\t\/\/ the default TLS client (when transport.TLSClientConfig==nil) is\n\t\t\/\/ already set to verify, so do nothing in that case\n\t\tdefaultTransport.TLSClientConfig.InsecureSkipVerify = false\n\t}\n\n\tb, e := prepareRequestBody(r.Body)\n\tif e != nil {\n\t\t\/\/ there was a problem marshaling the body\n\t\treturn nil, &Error{Err: e}\n\t}\n\n\tif r.QueryString != nil {\n\t\tparam, e := paramParse(r.QueryString)\n\t\tif e != nil {\n\t\t\treturn nil, &Error{Err: e}\n\t\t}\n\t\tr.Uri = r.Uri + \"?\" + param\n\t}\n\n\tif r.Compressed && b != nil {\n\t\tgzipBuffer := bytes.NewBuffer([]byte{})\n\t\treadBuffer := bufio.NewReader(b)\n\t\tgzipWriter := gzip.NewWriter(gzipBuffer)\n\t\t_, e = readBuffer.WriteTo(gzipWriter)\n\t\tgzipWriter.Close()\n\t\tif e != nil {\n\t\t\tfmt.Println(\"error: \", e)\n\t\t\treturn nil, &Error{Err: e}\n\t\t}\n\t\treq, er = http.NewRequest(r.Method, r.Uri, gzipBuffer)\n\t} else {\n\t\treq, er = http.NewRequest(r.Method, r.Uri, b)\n\t}\n\n\tif er != nil {\n\t\t\/\/ we couldn't parse the URL.\n\t\treturn nil, &Error{Err: er}\n\t}\n\n\t\/\/ add headers to the request\n\treq.Host = r.Host\n\treq.Header.Add(\"User-Agent\", r.UserAgent)\n\treq.Header.Add(\"Content-Type\", r.ContentType)\n\treq.Header.Add(\"Accept\", r.Accept)\n\tif r.Compressed {\n\t\treq.Header.Add(\"Content-Encoding\", \"gzip\")\n\t\treq.Header.Add(\"Accept-Encoding\", \"gzip\")\n\t}\n\tif r.headers != nil {\n\t\tfor _, header := range r.headers {\n\t\t\treq.Header.Add(header.name, header.value)\n\t\t}\n\t}\n\n\ttimeout := false\n\tvar timer *time.Timer\n\tif r.Timeout > 0 {\n\t\ttimer = time.AfterFunc(r.Timeout, func() {\n\t\t\tdefaultTransport.CancelRequest(req)\n\t\t\ttimeout = true\n\t\t})\n\t}\n\n\tres, err := defaultClient.Do(req)\n\tif timer != nil {\n\t\ttimer.Stop()\n\t}\n\n\tif err != nil {\n\t\tif op, ok := err.(*net.OpError); !timeout && ok {\n\t\t\ttimeout = op.Timeout()\n\t\t}\n\t\treturn nil, &Error{timeout: timeout, Err: err}\n\t}\n\n\tif isRedirect(res.StatusCode) && r.MaxRedirects > 0 {\n\t\tloc, _ := res.Location()\n\t\tr.MaxRedirects--\n\t\tr.Uri = loc.String()\n\t\treturn r.Do()\n\t}\n\n\tif r.Compressed && strings.Contains(res.Header.Get(\"Content-Encoding\"), \"gzip\") {\n\t\tgzipReader, err := gzip.NewReader(res.Body)\n\t\tdefer res.Body.Close()\n\t\tif err != nil {\n\t\t\treturn nil, &Error{Err: err}\n\t\t}\n\t\treturn &Response{StatusCode: res.StatusCode, ContentLength: res.ContentLength, Header: res.Header, Body: Body{gzipReader}}, nil\n\t} else {\n\t\treturn &Response{StatusCode: res.StatusCode, ContentLength: res.ContentLength, Header: res.Header, Body: Body{res.Body}}, nil\n\t}\n\treturn newResponse(res), nil\n}\n\nfunc isRedirect(status int) bool {\n\tswitch status {\n\tcase http.StatusMovedPermanently:\n\t\treturn true\n\tcase http.StatusFound:\n\t\treturn true\n\tcase http.StatusSeeOther:\n\t\treturn true\n\tcase http.StatusTemporaryRedirect:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n<commit_msg>remove useless newResponse call and method<commit_after>package goreq\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Request struct {\n\theaders []headerTuple\n\tMethod string\n\tUri string\n\tBody interface{}\n\tQueryString interface{}\n\tTimeout time.Duration\n\tContentType string\n\tAccept string\n\tHost string\n\tUserAgent string\n\tInsecure bool\n\tMaxRedirects int\n\tCompressed bool\n}\n\ntype Response struct {\n\tStatusCode int\n\tContentLength int64\n\tBody Body\n\tHeader http.Header\n}\n\ntype headerTuple struct {\n\tname string\n\tvalue string\n}\n\ntype Body struct {\n\tio.ReadCloser\n}\n\ntype Error struct {\n\ttimeout bool\n\tErr error\n}\n\nfunc (e *Error) Timeout() bool {\n\treturn e.timeout\n}\n\nfunc (e *Error) Error() string {\n\treturn e.Err.Error()\n}\n\nfunc (b *Body) FromJsonTo(o interface{}) error {\n\tif body, err := ioutil.ReadAll(b); err != nil {\n\t\treturn err\n\t} else if err := json.Unmarshal(body, o); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *Body) ToString() (string, error) {\n\tbody, err := ioutil.ReadAll(b)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(body), nil\n}\n\nfunc paramParse(query interface{}) (string, error) {\n\tvar (\n\t\tv = &url.Values{}\n\t\ts = reflect.ValueOf(query)\n\t\tt = reflect.TypeOf(query)\n\t)\n\n\tswitch query.(type) {\n\tcase url.Values:\n\t\treturn query.(url.Values).Encode(), nil\n\tdefault:\n\t\tfor i := 0; i < s.NumField(); i++ {\n\t\t\tv.Add(strings.ToLower(t.Field(i).Name), fmt.Sprintf(\"%v\", s.Field(i).Interface()))\n\t\t}\n\t\treturn v.Encode(), nil\n\t}\n}\n\nfunc prepareRequestBody(b interface{}) (io.Reader, error) {\n\tswitch b.(type) {\n\tcase string:\n\t\t\/\/ treat is as text\n\t\treturn strings.NewReader(b.(string)), nil\n\tcase io.Reader:\n\t\t\/\/ treat is as text\n\t\treturn b.(io.Reader), nil\n\tcase []byte:\n\t\t\/\/treat as byte array\n\t\treturn bytes.NewReader(b.([]byte)), nil\n\tcase nil:\n\t\treturn nil, nil\n\tdefault:\n\t\t\/\/ try to jsonify it\n\t\tj, err := json.Marshal(b)\n\t\tif err == nil {\n\t\t\treturn bytes.NewReader(j), nil\n\t\t}\n\t\treturn nil, err\n\t}\n}\n\nvar defaultDialer = &net.Dialer{Timeout: 1000 * time.Millisecond}\nvar defaultTransport = &http.Transport{Dial: defaultDialer.Dial}\nvar defaultClient = &http.Client{Transport: defaultTransport}\n\nfunc SetConnectTimeout(duration time.Duration) {\n\tdefaultDialer.Timeout = duration\n}\n\nfunc (r *Request) AddHeader(name string, value string) {\n\tif r.headers == nil {\n\t\tr.headers = []headerTuple{}\n\t}\n\tr.headers = append(r.headers, headerTuple{name: name, value: value})\n}\n\nfunc (r Request) Do() (*Response, error) {\n\tvar req *http.Request\n\tvar er error\n\n\tif r.Insecure {\n\t\tdefaultTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n\t} else if defaultTransport.TLSClientConfig != nil {\n\t\t\/\/ the default TLS client (when transport.TLSClientConfig==nil) is\n\t\t\/\/ already set to verify, so do nothing in that case\n\t\tdefaultTransport.TLSClientConfig.InsecureSkipVerify = false\n\t}\n\n\tb, e := prepareRequestBody(r.Body)\n\tif e != nil {\n\t\t\/\/ there was a problem marshaling the body\n\t\treturn nil, &Error{Err: e}\n\t}\n\n\tif r.QueryString != nil {\n\t\tparam, e := paramParse(r.QueryString)\n\t\tif e != nil {\n\t\t\treturn nil, &Error{Err: e}\n\t\t}\n\t\tr.Uri = r.Uri + \"?\" + param\n\t}\n\n\tif r.Compressed && b != nil {\n\t\tgzipBuffer := bytes.NewBuffer([]byte{})\n\t\treadBuffer := bufio.NewReader(b)\n\t\tgzipWriter := gzip.NewWriter(gzipBuffer)\n\t\t_, e = readBuffer.WriteTo(gzipWriter)\n\t\tgzipWriter.Close()\n\t\tif e != nil {\n\t\t\tfmt.Println(\"error: \", e)\n\t\t\treturn nil, &Error{Err: e}\n\t\t}\n\t\treq, er = http.NewRequest(r.Method, r.Uri, gzipBuffer)\n\t} else {\n\t\treq, er = http.NewRequest(r.Method, r.Uri, b)\n\t}\n\n\tif er != nil {\n\t\t\/\/ we couldn't parse the URL.\n\t\treturn nil, &Error{Err: er}\n\t}\n\n\t\/\/ add headers to the request\n\treq.Host = r.Host\n\treq.Header.Add(\"User-Agent\", r.UserAgent)\n\treq.Header.Add(\"Content-Type\", r.ContentType)\n\treq.Header.Add(\"Accept\", r.Accept)\n\tif r.Compressed {\n\t\treq.Header.Add(\"Content-Encoding\", \"gzip\")\n\t\treq.Header.Add(\"Accept-Encoding\", \"gzip\")\n\t}\n\tif r.headers != nil {\n\t\tfor _, header := range r.headers {\n\t\t\treq.Header.Add(header.name, header.value)\n\t\t}\n\t}\n\n\ttimeout := false\n\tvar timer *time.Timer\n\tif r.Timeout > 0 {\n\t\ttimer = time.AfterFunc(r.Timeout, func() {\n\t\t\tdefaultTransport.CancelRequest(req)\n\t\t\ttimeout = true\n\t\t})\n\t}\n\n\tres, err := defaultClient.Do(req)\n\tif timer != nil {\n\t\ttimer.Stop()\n\t}\n\n\tif err != nil {\n\t\tif op, ok := err.(*net.OpError); !timeout && ok {\n\t\t\ttimeout = op.Timeout()\n\t\t}\n\t\treturn nil, &Error{timeout: timeout, Err: err}\n\t}\n\n\tif isRedirect(res.StatusCode) && r.MaxRedirects > 0 {\n\t\tloc, _ := res.Location()\n\t\tr.MaxRedirects--\n\t\tr.Uri = loc.String()\n\t\treturn r.Do()\n\t}\n\n\tif r.Compressed && strings.Contains(res.Header.Get(\"Content-Encoding\"), \"gzip\") {\n\t\tgzipReader, err := gzip.NewReader(res.Body)\n\t\tdefer res.Body.Close()\n\t\tif err != nil {\n\t\t\treturn nil, &Error{Err: err}\n\t\t}\n\t\treturn &Response{StatusCode: res.StatusCode, ContentLength: res.ContentLength, Header: res.Header, Body: Body{gzipReader}}, nil\n\t} else {\n\t\treturn &Response{StatusCode: res.StatusCode, ContentLength: res.ContentLength, Header: res.Header, Body: Body{res.Body}}, nil\n\t}\n}\n\nfunc isRedirect(status int) bool {\n\tswitch status {\n\tcase http.StatusMovedPermanently:\n\t\treturn true\n\tcase http.StatusFound:\n\t\treturn true\n\tcase http.StatusSeeOther:\n\t\treturn true\n\tcase http.StatusTemporaryRedirect:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/go:generate bitfanDoc\n\/\/ Receive event on a ws connection\npackage websocketinput\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/vjeantet\/bitfan\/codecs\"\n\t\"github.com\/vjeantet\/bitfan\/processors\"\n)\n\nconst (\n\t\/\/ Time allowed to write a message to the peer.\n\twriteWait = 10 * time.Second\n\n\t\/\/ Time allowed to read the next pong message from the peer.\n\tpongWait = 60 * time.Second\n\n\t\/\/ Send pings to peer with this period. Must be less than pongWait.\n\tpingPeriod = (pongWait * 9) \/ 10\n\n\t\/\/ Maximum message size allowed from peer.\n\tmaxMessageSize = 512\n)\n\n\/\/ Receive event on a ws connection\nfunc New() processors.Processor {\n\treturn &processor{opt: &options{}}\n}\n\ntype options struct {\n\tprocessors.CommonOptions `mapstructure:\",squash\"`\n\n\t\/\/ The codec used for outputed data.\n\t\/\/ @Default \"json\"\n\t\/\/ @Type codec\n\tCodec codecs.CodecCollection\n\n\t\/\/ URI path\n\t\/\/ @Default \"wsin\"\n\tUri string\n}\n\n\/\/ Reads events from standard input\ntype processor struct {\n\tprocessors.Base\n\n\topt *options\n\tq chan bool\n\thost string\n\n\twsupgrader websocket.Upgrader\n\n\tHub *Hub\n}\n\nfunc (p *processor) Configure(ctx processors.ProcessorContext, conf map[string]interface{}) error {\n\tdefaults := options{\n\t\tCodec: codecs.CodecCollection{\n\t\t\tDec: codecs.New(\"json\", nil, ctx.Log(), ctx.ConfigWorkingLocation()),\n\t\t},\n\t\tUri: \"wsin\",\n\t}\n\tp.opt = &defaults\n\terr := p.ConfigureAndValidate(ctx, conf, p.opt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif p.host, err = os.Hostname(); err != nil {\n\t\tp.Logger.Warnf(\"can not get hostname : %v\", err)\n\t}\n\n\tp.wsupgrader = websocket.Upgrader{\n\t\tReadBufferSize: 1024,\n\t\tWriteBufferSize: 1024,\n\t\tCheckOrigin: func(r *http.Request) bool {\n\t\t\treturn true\n\t\t},\n\t}\n\n\treturn err\n}\n\nfunc (p *processor) Start(e processors.IPacket) error {\n\n\tp.Hub = newHub(p.wellcome)\n\tgo p.Hub.run()\n\tp.WebHook.Add(p.opt.Uri, p.HttpHandler)\n\treturn nil\n}\n\nfunc (p *processor) Stop(e processors.IPacket) error {\n\tp.Hub.stop()\n\treturn nil\n}\n\nfunc (p *processor) processMessage(m []byte) {\n\tr := bytes.NewReader(m)\n\n\tvar dec codecs.Decoder\n\tvar err error\n\tif dec, err = p.opt.Codec.NewDecoder(r); err != nil {\n\t\tp.Logger.Errorln(\"decoder error : \", err.Error())\n\t\treturn\n\t}\n\n\tfor dec.More() {\n\t\tvar record interface{}\n\t\tif err = dec.Decode(&record); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tp.Logger.Debugln(\"error while decoding : \", err)\n\t\t\t} else {\n\t\t\t\tp.Logger.Errorln(\"error while decoding : \", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tvar e processors.IPacket\n\t\tswitch v := record.(type) {\n\t\tcase nil:\n\t\t\te = p.NewPacket(string(m), map[string]interface{}{})\n\t\tcase string:\n\t\t\te = p.NewPacket(v, map[string]interface{}{})\n\t\tcase map[string]interface{}:\n\t\t\te = p.NewPacket(\"\", v)\n\t\tcase []interface{}:\n\t\t\te = p.NewPacket(\"\", map[string]interface{}{\n\t\t\t\t\"request\": v,\n\t\t\t})\n\t\tdefault:\n\t\t\tp.Logger.Errorf(\"Unknow structure %#v\", v)\n\t\t\tcontinue\n\t\t}\n\n\t\tp.opt.ProcessCommonOptions(e.Fields())\n\t\tp.Send(e)\n\t}\n}\n\n\/\/ Handle Request received by bitfan for this agent (url hook should be registered during p.Start)\nfunc (p *processor) HttpHandler(w http.ResponseWriter, r *http.Request) {\n\tconn, err := p.wsupgrader.Upgrade(w, r, nil)\n\n\tif err != nil {\n\t\tp.Logger.Errorf(\"websocket upgrade - %s\", err.Error())\n\t\treturn\n\t}\n\n\tclient := &Client{\n\t\thub: p.Hub,\n\t\tconn: conn,\n\t\tonMessage: p.processMessage,\n\t}\n\tp.Hub.register <- client\n\n\tgo client.writePump()\n\tgo client.readPump()\n}\n\nfunc (p *processor) wellcome() [][]byte {\n\treturn [][]byte{}\n}\n\n\/\/ Client is a middleman between the websocket connection and the hub.\ntype Client struct {\n\thub *Hub\n\n\t\/\/ The websocket connection.\n\tconn *websocket.Conn\n\n\tonMessage func([]byte)\n\n\tdone chan struct{}\n}\n\n\/\/ readPump pumps messages from the websocket connection to the hub.\n\n\/\/ The application runs readPump in a per-connection goroutine. The application\n\/\/ ensures that there is at most one reader on a connection by executing all\n\/\/ reads from this goroutine.\nfunc (c *Client) readPump() {\n\tdefer func() {\n\t\tc.conn.Close()\n\t\tclose(c.done)\n\t\tc.hub.unregister <- c\n\t}()\n\tc.conn.SetReadLimit(maxMessageSize)\n\tc.conn.SetReadDeadline(time.Now().Add(pongWait))\n\tc.conn.SetPongHandler(func(string) error { c.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })\n\tfor {\n\t\t_, message, err := c.conn.ReadMessage()\n\t\tif err != nil {\n\t\t\tif websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway) {\n\t\t\t\tlog.Printf(\"IsUnexpectedCloseError error: %v\", err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Build PACKET\n\t\tc.onMessage(message)\n\t}\n\n}\n\n\/\/ writePump pumps messages from the hub to the websocket connection.\n\/\/\n\/\/ A goroutine running writePump is started for each connection. The\n\/\/ application ensures that there is at most one writer to a connection by\n\/\/ executing all writes from this goroutine.\nfunc (c *Client) writePump() {\n\tticker := time.NewTicker(pingPeriod)\n\tdefer func() {\n\t\tticker.Stop()\n\t\tc.conn.Close()\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tc.conn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tif err := c.conn.WriteMessage(websocket.PingMessage, []byte{}); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-c.done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ hub maintains the set of active clients and Broadcasts messages to the\n\/\/ clients.\ntype Hub struct {\n\t\/\/ Registered clients.\n\tclients map[*Client]bool\n\n\t\/\/ Register requests from the clients.\n\tregister chan *Client\n\n\t\/\/ Unregister requests from clients.\n\tunregister chan *Client\n\n\tdone chan struct{}\n}\n\nfunc newHub(wellcomeMessage func() [][]byte) *Hub {\n\treturn &Hub{\n\t\tregister: make(chan *Client),\n\t\tunregister: make(chan *Client),\n\t\tclients: make(map[*Client]bool),\n\t\tdone: make(chan struct{}),\n\t}\n}\n\nfunc (h *Hub) stop() {\n\tfor c, _ := range h.clients {\n\t\th.unregister <- c\n\t}\n\tclose(h.done)\n}\n\nfunc (h *Hub) run() {\n\tfor {\n\t\tselect {\n\t\tcase <-h.done:\n\t\t\treturn\n\t\tcase client := <-h.register:\n\t\t\tclient.done = make(chan struct{}, 1)\n\t\t\th.clients[client] = true\n\n\t\tcase client := <-h.unregister:\n\t\t\tif _, ok := h.clients[client]; ok {\n\t\t\t\tclient.conn.Close()\n\t\t\t\tdelete(h.clients, client)\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>processor : websocket input - add option maxMessageSize<commit_after>\/\/go:generate bitfanDoc\n\/\/ Receive event on a ws connection\npackage websocketinput\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/vjeantet\/bitfan\/codecs\"\n\t\"github.com\/vjeantet\/bitfan\/processors\"\n)\n\nconst (\n\t\/\/ Time allowed to write a message to the peer.\n\twriteWait = 10 * time.Second\n\n\t\/\/ Time allowed to read the next pong message from the peer.\n\tpongWait = 60 * time.Second\n\n\t\/\/ Send pings to peer with this period. Must be less than pongWait.\n\tpingPeriod = (pongWait * 9) \/ 10\n\n\t\/\/ Maximum message size allowed from peer.\n\tmaxMessageSize = 512\n)\n\n\/\/ Receive event on a ws connection\nfunc New() processors.Processor {\n\treturn &processor{opt: &options{}}\n}\n\ntype options struct {\n\tprocessors.CommonOptions `mapstructure:\",squash\"`\n\n\t\/\/ The codec used for outputed data.\n\t\/\/ @Default \"json\"\n\t\/\/ @Type codec\n\tCodec codecs.CodecCollection\n\n\t\/\/ URI path\n\t\/\/ @Default \"wsin\"\n\tUri string\n\n\t\/\/ Maximum message size allowed from peer.\n\tMaxMessageSize int `mapstructure:\"max_message_size\"`\n}\n\n\/\/ Reads events from standard input\ntype processor struct {\n\tprocessors.Base\n\n\topt *options\n\tq chan bool\n\thost string\n\n\twsupgrader websocket.Upgrader\n\n\tHub *Hub\n}\n\nfunc (p *processor) Configure(ctx processors.ProcessorContext, conf map[string]interface{}) error {\n\tdefaults := options{\n\t\tCodec: codecs.CodecCollection{\n\t\t\tDec: codecs.New(\"json\", nil, ctx.Log(), ctx.ConfigWorkingLocation()),\n\t\t},\n\t\tUri: \"wsin\",\n\t}\n\tp.opt = &defaults\n\terr := p.ConfigureAndValidate(ctx, conf, p.opt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif p.host, err = os.Hostname(); err != nil {\n\t\tp.Logger.Warnf(\"can not get hostname : %v\", err)\n\t}\n\n\tp.wsupgrader = websocket.Upgrader{\n\t\tReadBufferSize: 1024,\n\t\tWriteBufferSize: 1024,\n\t\tCheckOrigin: func(r *http.Request) bool {\n\t\t\treturn true\n\t\t},\n\t}\n\n\treturn err\n}\n\nfunc (p *processor) Start(e processors.IPacket) error {\n\n\tp.Hub = newHub(p.wellcome)\n\tgo p.Hub.run()\n\tp.WebHook.Add(p.opt.Uri, p.HttpHandler)\n\treturn nil\n}\n\nfunc (p *processor) Stop(e processors.IPacket) error {\n\tp.Hub.stop()\n\treturn nil\n}\n\nfunc (p *processor) processMessage(m []byte) {\n\tr := bytes.NewReader(m)\n\n\tvar dec codecs.Decoder\n\tvar err error\n\tif dec, err = p.opt.Codec.NewDecoder(r); err != nil {\n\t\tp.Logger.Errorln(\"decoder error : \", err.Error())\n\t\treturn\n\t}\n\n\tfor dec.More() {\n\t\tvar record interface{}\n\t\tif err = dec.Decode(&record); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tp.Logger.Debugln(\"error while decoding : \", err)\n\t\t\t} else {\n\t\t\t\tp.Logger.Errorln(\"error while decoding : \", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tvar e processors.IPacket\n\t\tswitch v := record.(type) {\n\t\tcase nil:\n\t\t\tcontinue\n\t\tcase string:\n\t\t\te = p.NewPacket(v, map[string]interface{}{})\n\t\tcase map[string]interface{}:\n\t\t\te = p.NewPacket(\"\", v)\n\t\tcase []interface{}:\n\t\t\te = p.NewPacket(\"\", map[string]interface{}{\n\t\t\t\t\"request\": v,\n\t\t\t})\n\t\tdefault:\n\t\t\tp.Logger.Errorf(\"Unknow structure %#v\", v)\n\t\t\tcontinue\n\t\t}\n\n\t\tp.opt.ProcessCommonOptions(e.Fields())\n\t\tp.Send(e)\n\t}\n}\n\n\/\/ Handle Request received by bitfan for this agent (url hook should be registered during p.Start)\nfunc (p *processor) HttpHandler(w http.ResponseWriter, r *http.Request) {\n\tconn, err := p.wsupgrader.Upgrade(w, r, nil)\n\n\tif err != nil {\n\t\tp.Logger.Errorf(\"websocket upgrade - %s\", err.Error())\n\t\treturn\n\t}\n\n\tclient := &Client{\n\t\thub: p.Hub,\n\t\tconn: conn,\n\t\tonMessage: p.processMessage,\n\t}\n\tp.Hub.register <- client\n\n\tgo client.writePump()\n\tgo client.readPump(int64(p.opt.MaxMessageSize))\n}\n\nfunc (p *processor) wellcome() [][]byte {\n\treturn [][]byte{}\n}\n\n\/\/ Client is a middleman between the websocket connection and the hub.\ntype Client struct {\n\thub *Hub\n\n\t\/\/ The websocket connection.\n\tconn *websocket.Conn\n\n\tonMessage func([]byte)\n\n\tdone chan struct{}\n}\n\n\/\/ readPump pumps messages from the websocket connection to the hub.\n\n\/\/ The application runs readPump in a per-connection goroutine. The application\n\/\/ ensures that there is at most one reader on a connection by executing all\n\/\/ reads from this goroutine.\nfunc (c *Client) readPump(maxMessageSize int64) {\n\tdefer func() {\n\t\tc.conn.Close()\n\t\tclose(c.done)\n\t\tc.hub.unregister <- c\n\t}()\n\tif maxMessageSize > 0 {\n\t\tc.conn.SetReadLimit(maxMessageSize)\n\t}\n\n\tc.conn.SetReadDeadline(time.Now().Add(pongWait))\n\tc.conn.SetPongHandler(func(string) error { c.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })\n\tfor {\n\t\t_, message, err := c.conn.ReadMessage()\n\t\tif err != nil {\n\t\t\tif websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway) {\n\t\t\t\tlog.Printf(\"IsUnexpectedCloseError error: %v\", err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Build PACKET\n\t\tc.onMessage(message)\n\t}\n\n}\n\n\/\/ writePump pumps messages from the hub to the websocket connection.\n\/\/\n\/\/ A goroutine running writePump is started for each connection. The\n\/\/ application ensures that there is at most one writer to a connection by\n\/\/ executing all writes from this goroutine.\nfunc (c *Client) writePump() {\n\tticker := time.NewTicker(pingPeriod)\n\tdefer func() {\n\t\tticker.Stop()\n\t\tc.conn.Close()\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tc.conn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tif err := c.conn.WriteMessage(websocket.PingMessage, []byte{}); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-c.done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ hub maintains the set of active clients and Broadcasts messages to the\n\/\/ clients.\ntype Hub struct {\n\t\/\/ Registered clients.\n\tclients map[*Client]bool\n\n\t\/\/ Register requests from the clients.\n\tregister chan *Client\n\n\t\/\/ Unregister requests from clients.\n\tunregister chan *Client\n\n\tdone chan struct{}\n}\n\nfunc newHub(wellcomeMessage func() [][]byte) *Hub {\n\treturn &Hub{\n\t\tregister: make(chan *Client),\n\t\tunregister: make(chan *Client),\n\t\tclients: make(map[*Client]bool),\n\t\tdone: make(chan struct{}),\n\t}\n}\n\nfunc (h *Hub) stop() {\n\tfor c, _ := range h.clients {\n\t\th.unregister <- c\n\t}\n\tclose(h.done)\n}\n\nfunc (h *Hub) run() {\n\tfor {\n\t\tselect {\n\t\tcase <-h.done:\n\t\t\treturn\n\t\tcase client := <-h.register:\n\t\t\tclient.done = make(chan struct{}, 1)\n\t\t\th.clients[client] = true\n\n\t\tcase client := <-h.unregister:\n\t\t\tif _, ok := h.clients[client]; ok {\n\t\t\t\tclient.conn.Close()\n\t\t\t\tdelete(h.clients, client)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package image\n\nimport (\n\t\"io\"\n\t\"os\"\n\n\t\"github.com\/dnephin\/dobi\/tasks\/context\"\n\tdocker \"github.com\/fsouza\/go-dockerclient\"\n)\n\n\/\/ RunPush pushes an image to the registry\nfunc RunPush(ctx *context.ExecuteContext, t *Task, _ bool) (bool, error) {\n\tpushTag := func(tag string) error {\n\t\treturn pushImage(ctx, t, tag)\n\t}\n\tif err := t.ForEachTag(ctx, pushTag); err != nil {\n\t\treturn false, err\n\t}\n\tt.logger().Info(\"Pushed\")\n\treturn true, nil\n}\n\nfunc pushImage(ctx *context.ExecuteContext, t *Task, tag string) error {\n\trepo, err := parseAuthRepo(tag)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn Stream(os.Stdout, func(out io.Writer) error {\n\t\treturn ctx.Client.PushImage(docker.PushImageOptions{\n\t\t\tName: tag,\n\t\t\tOutputStream: out,\n\t\t\tRawJSONStream: true,\n\t\t\t\/\/ TODO: timeout\n\t\t}, ctx.GetAuthConfig(repo))\n\t})\n}\n<commit_msg>Fix lint error, unused param<commit_after>package image\n\nimport (\n\t\"io\"\n\t\"os\"\n\n\t\"github.com\/dnephin\/dobi\/tasks\/context\"\n\tdocker \"github.com\/fsouza\/go-dockerclient\"\n)\n\n\/\/ RunPush pushes an image to the registry\nfunc RunPush(ctx *context.ExecuteContext, t *Task, _ bool) (bool, error) {\n\tpushTag := func(tag string) error {\n\t\treturn pushImage(ctx, tag)\n\t}\n\tif err := t.ForEachTag(ctx, pushTag); err != nil {\n\t\treturn false, err\n\t}\n\tt.logger().Info(\"Pushed\")\n\treturn true, nil\n}\n\nfunc pushImage(ctx *context.ExecuteContext, tag string) error {\n\trepo, err := parseAuthRepo(tag)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn Stream(os.Stdout, func(out io.Writer) error {\n\t\treturn ctx.Client.PushImage(docker.PushImageOptions{\n\t\t\tName: tag,\n\t\t\tOutputStream: out,\n\t\t\tRawJSONStream: true,\n\t\t\t\/\/ TODO: timeout\n\t\t}, ctx.GetAuthConfig(repo))\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright (C) 2016, 2017 Sam Kumar, Michael Andersen, and the University\n * of California, Berkeley.\n *\n * This file is part of Mr. Plotter (the Multi-Resolution Plotter).\n *\n * Mr. Plotter is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as published\n * by the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * Mr. Plotter is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with Mr. Plotter. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\/\n\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\n\t\"gopkg.in\/BTrDB\/btrdb.v4\"\n\n\tws \"github.com\/gorilla\/websocket\"\n\tuuid \"github.com\/pborman\/uuid\"\n)\n\nconst (\n\tQUASAR_LOW int64 = 1 - (16 << 56)\n\tQUASAR_HIGH int64 = (48 << 56) - 1\n\tINVALID_TIME int64 = -0x8000000000000000\n)\n\nfunc splitTime(time int64) (millis int64, nanos int32) {\n\tmillis = time \/ 1000000\n\tnanos = int32(time % 1000000)\n\tif nanos < 0 {\n\t\tnanos += 1000000\n\t\tmillis++\n\t}\n\treturn\n}\n\ntype Writable interface {\n\tGetWriter() io.Writer\n}\n\ntype ConnWrapper struct {\n\tWriting *sync.Mutex\n\tConn *ws.Conn\n\tCurrWriter io.WriteCloser\n}\n\nfunc (cw *ConnWrapper) GetWriter() io.Writer {\n\tcw.Writing.Lock()\n\tw, err := cw.Conn.NextWriter(ws.TextMessage)\n\tif err == nil {\n\t\tcw.CurrWriter = w\n\t\treturn w\n\t} else {\n\t\tlog.Printf(\"Could not get writer on WebSocket: %v\", err)\n\t\treturn nil\n\t}\n}\n\n\/\/ DataRequester encapsulates a series of connections used for obtaining data\n\/\/ from QUASAR.\ntype DataRequester struct {\n\ttotalWaiting uint64\n\tpending uint32\n\tmaxPending uint32\n\tpendingLock *sync.Mutex\n\tpendingCondVar *sync.Cond\n\tstateLock *sync.RWMutex\n\talive bool\n\n\tbtrdb *btrdb.BTrDB\n}\n\n\/\/ NewDataRequester creates a new DataRequester object.\n\/\/ btrdbConn - established connection to a BTrDB cluster.\n\/\/ maxPending - a limit on the maximum number of pending requests. *\/\nfunc NewDataRequester(btrdbConn *btrdb.BTrDB, maxPending uint32) *DataRequester {\n\tpendingLock := &sync.Mutex{}\n\tvar dr *DataRequester = &DataRequester{\n\t\ttotalWaiting: 0,\n\t\tpending: 0,\n\t\tmaxPending: maxPending,\n\t\tpendingLock: pendingLock,\n\t\tpendingCondVar: sync.NewCond(pendingLock),\n\t\tbtrdb: btrdbConn,\n\t}\n\n\treturn dr\n}\n\n\/* Makes a request for data and writes the result to the specified Writer. *\/\nfunc (dr *DataRequester) MakeDataRequest(ctx context.Context, uuidBytes uuid.UUID, startTime int64, endTime int64, pw uint8, writ Writable) {\n\tatomic.AddUint64(&dr.totalWaiting, 1)\n\tdefer atomic.AddUint64(&dr.totalWaiting, 0xFFFFFFFFFFFFFFFF)\n\n\tdr.pendingLock.Lock()\n\tfor dr.pending == dr.maxPending {\n\t\tdr.pendingCondVar.Wait()\n\t}\n\tdr.pending += 1\n\tdr.pendingLock.Unlock()\n\n\tdefer func() {\n\t\tdr.pendingLock.Lock()\n\t\tdr.pending -= 1\n\t\tdr.pendingCondVar.Signal()\n\t\tdr.pendingLock.Unlock()\n\t}()\n\n\tvar w io.Writer\n\n\tvar stream = dr.btrdb.StreamFromUUID(uuidBytes)\n\n\tvar exists bool\n\tvar err error\n\texists, err = stream.Exists(ctx)\n\tif err != nil || !exists {\n\t\tw = writ.GetWriter()\n\t\tw.Write([]byte(\"[]\"))\n\t\treturn\n\t}\n\n\tvar results chan btrdb.StatPoint\n\tvar errors chan error\n\tresults, _, errors = stream.AlignedWindows(ctx, startTime, endTime, pw, 0)\n\n\tw = writ.GetWriter()\n\tw.Write([]byte(\"[\"))\n\n\tvar firstpt bool = true\n\tfor statpt := range results {\n\t\tmillis, nanos := splitTime(statpt.Time)\n\t\tif firstpt {\n\t\t\tw.Write([]byte(fmt.Sprintf(\"[%v,%v,%v,%v,%v,%v]\", millis, nanos, statpt.Min, statpt.Mean, statpt.Max, statpt.Count)))\n\t\t\tfirstpt = false\n\t\t} else {\n\t\t\tw.Write([]byte(fmt.Sprintf(\",[%v,%v,%v,%v,%v,%v]\", millis, nanos, statpt.Min, statpt.Mean, statpt.Max, statpt.Count)))\n\t\t}\n\t}\n\n\tvar waserror bool = false\n\tfor err = range errors {\n\t\tw.Write([]byte(\"\\nError: \"))\n\t\tw.Write([]byte(err.Error()))\n\t\twaserror = true\n\t}\n\n\tif !waserror {\n\t\tw.Write([]byte(\"]\"))\n\t}\n}\n\nfunc (dr *DataRequester) MakeBracketRequest(ctx context.Context, uuids []uuid.UUID, writ Writable) {\n\tatomic.AddUint64(&dr.totalWaiting, 1)\n\tdefer atomic.AddUint64(&dr.totalWaiting, 0xFFFFFFFFFFFFFFFF)\n\n\tdr.pendingLock.Lock()\n\tfor dr.pending == dr.maxPending {\n\t\tdr.pendingCondVar.Wait()\n\t}\n\tdr.pending += 1\n\tdr.pendingLock.Unlock()\n\n\tdefer func() {\n\t\tdr.pendingLock.Lock()\n\t\tdr.pending -= 1\n\t\tdr.pendingCondVar.Signal()\n\t\tdr.pendingLock.Unlock()\n\t}()\n\n\tvar numResponses int = len(uuids) << 1\n\tvar boundarySlice []int64 = make([]int64, numResponses)\n\n\tvar wg sync.WaitGroup\n\n\tvar stream *btrdb.Stream\n\n\tvar i int\n\tfor i = 0; i != numResponses; i++ {\n\t\tvar seconditer bool = ((i & 1) != 0)\n\n\t\tif !seconditer {\n\t\t\tstream = dr.btrdb.StreamFromUUID(uuids[i>>1])\n\n\t\t\tvar exists bool\n\t\t\tvar err error\n\t\t\texists, err = stream.Exists(ctx)\n\t\t\tif err != nil || !exists {\n\t\t\t\tboundarySlice[i] = INVALID_TIME\n\t\t\t\ti++\n\t\t\t\tboundarySlice[i] = INVALID_TIME\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\twg.Add(2)\n\t\t\t}\n\t\t}\n\n\t\tgo func(stream *btrdb.Stream, loc *int64, high bool) {\n\t\t\tvar rawpoint btrdb.RawPoint\n\t\t\tvar e error\n\t\t\tvar ref int64\n\t\t\tif high {\n\t\t\t\tref = QUASAR_HIGH\n\t\t\t} else {\n\t\t\t\tref = QUASAR_LOW\n\t\t\t}\n\t\t\trawpoint, _, e = stream.Nearest(ctx, ref, 0, high)\n\n\t\t\tif e == nil {\n\t\t\t\t*loc = rawpoint.Time\n\t\t\t} else {\n\t\t\t\t*loc = INVALID_TIME\n\t\t\t}\n\n\t\t\twg.Done()\n\t\t}(stream, &boundarySlice[i], seconditer)\n\t}\n\n\twg.Wait()\n\n\t\/\/ For final processing once all responses are received\n\tvar (\n\t\tboundary int64\n\t\tlNanos int32\n\t\tlMillis int64\n\t\trNanos int32\n\t\trMillis int64\n\t\tlowest int64 = QUASAR_HIGH\n\t\thighest int64 = QUASAR_LOW\n\t\ttrailchar rune = ','\n\t\tw io.Writer = writ.GetWriter()\n\t)\n\n\tw.Write([]byte(\"{\\\"Brackets\\\": [\"))\n\n\tfor i = 0; i < len(uuids); i++ {\n\t\tboundary = boundarySlice[i<<1]\n\t\tif boundary != INVALID_TIME && boundary < lowest {\n\t\t\tlowest = boundary\n\t\t}\n\t\tlMillis, lNanos = splitTime(boundary)\n\t\tboundary = boundarySlice[(i<<1)+1]\n\t\tif boundary != INVALID_TIME && boundary > highest {\n\t\t\thighest = boundary\n\t\t}\n\t\trMillis, rNanos = splitTime(boundary)\n\t\tif i == len(uuids)-1 {\n\t\t\ttrailchar = ']'\n\t\t}\n\t\tw.Write([]byte(fmt.Sprintf(\"[[%v,%v],[%v,%v]]%c\", lMillis, lNanos, rMillis, rNanos, trailchar)))\n\t}\n\tif len(uuids) == 0 {\n\t\tw.Write([]byte(\"]\"))\n\t}\n\tlMillis, lNanos = splitTime(lowest)\n\trMillis, rNanos = splitTime(highest)\n\tw.Write([]byte(fmt.Sprintf(\",\\\"Merged\\\":[[%v,%v],[%v,%v]]}\", lMillis, lNanos, rMillis, rNanos)))\n}\n<commit_msg>Fix super minor backend bug<commit_after>\/*\n * Copyright (C) 2016, 2017 Sam Kumar, Michael Andersen, and the University\n * of California, Berkeley.\n *\n * This file is part of Mr. Plotter (the Multi-Resolution Plotter).\n *\n * Mr. Plotter is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as published\n * by the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * Mr. Plotter is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with Mr. Plotter. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\/\n\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\n\t\"gopkg.in\/BTrDB\/btrdb.v4\"\n\n\tws \"github.com\/gorilla\/websocket\"\n\tuuid \"github.com\/pborman\/uuid\"\n)\n\nconst (\n\tQUASAR_LOW int64 = 1 - (16 << 56)\n\tQUASAR_HIGH int64 = (48 << 56) - 1\n\tINVALID_TIME int64 = -0x8000000000000000\n)\n\nfunc splitTime(time int64) (millis int64, nanos int32) {\n\tmillis = time \/ 1000000\n\tnanos = int32(time % 1000000)\n\tif nanos < 0 {\n\t\tnanos += 1000000\n\t\tmillis--\n\t}\n\treturn\n}\n\ntype Writable interface {\n\tGetWriter() io.Writer\n}\n\ntype ConnWrapper struct {\n\tWriting *sync.Mutex\n\tConn *ws.Conn\n\tCurrWriter io.WriteCloser\n}\n\nfunc (cw *ConnWrapper) GetWriter() io.Writer {\n\tcw.Writing.Lock()\n\tw, err := cw.Conn.NextWriter(ws.TextMessage)\n\tif err == nil {\n\t\tcw.CurrWriter = w\n\t\treturn w\n\t} else {\n\t\tlog.Printf(\"Could not get writer on WebSocket: %v\", err)\n\t\treturn nil\n\t}\n}\n\n\/\/ DataRequester encapsulates a series of connections used for obtaining data\n\/\/ from QUASAR.\ntype DataRequester struct {\n\ttotalWaiting uint64\n\tpending uint32\n\tmaxPending uint32\n\tpendingLock *sync.Mutex\n\tpendingCondVar *sync.Cond\n\tstateLock *sync.RWMutex\n\talive bool\n\n\tbtrdb *btrdb.BTrDB\n}\n\n\/\/ NewDataRequester creates a new DataRequester object.\n\/\/ btrdbConn - established connection to a BTrDB cluster.\n\/\/ maxPending - a limit on the maximum number of pending requests. *\/\nfunc NewDataRequester(btrdbConn *btrdb.BTrDB, maxPending uint32) *DataRequester {\n\tpendingLock := &sync.Mutex{}\n\tvar dr *DataRequester = &DataRequester{\n\t\ttotalWaiting: 0,\n\t\tpending: 0,\n\t\tmaxPending: maxPending,\n\t\tpendingLock: pendingLock,\n\t\tpendingCondVar: sync.NewCond(pendingLock),\n\t\tbtrdb: btrdbConn,\n\t}\n\n\treturn dr\n}\n\n\/* Makes a request for data and writes the result to the specified Writer. *\/\nfunc (dr *DataRequester) MakeDataRequest(ctx context.Context, uuidBytes uuid.UUID, startTime int64, endTime int64, pw uint8, writ Writable) {\n\tatomic.AddUint64(&dr.totalWaiting, 1)\n\tdefer atomic.AddUint64(&dr.totalWaiting, 0xFFFFFFFFFFFFFFFF)\n\n\tdr.pendingLock.Lock()\n\tfor dr.pending == dr.maxPending {\n\t\tdr.pendingCondVar.Wait()\n\t}\n\tdr.pending += 1\n\tdr.pendingLock.Unlock()\n\n\tdefer func() {\n\t\tdr.pendingLock.Lock()\n\t\tdr.pending -= 1\n\t\tdr.pendingCondVar.Signal()\n\t\tdr.pendingLock.Unlock()\n\t}()\n\n\tvar w io.Writer\n\n\tvar stream = dr.btrdb.StreamFromUUID(uuidBytes)\n\n\tvar exists bool\n\tvar err error\n\texists, err = stream.Exists(ctx)\n\tif err != nil || !exists {\n\t\tw = writ.GetWriter()\n\t\tw.Write([]byte(\"[]\"))\n\t\treturn\n\t}\n\n\tvar results chan btrdb.StatPoint\n\tvar errors chan error\n\tresults, _, errors = stream.AlignedWindows(ctx, startTime, endTime, pw, 0)\n\n\tw = writ.GetWriter()\n\tw.Write([]byte(\"[\"))\n\n\tvar firstpt bool = true\n\tfor statpt := range results {\n\t\tmillis, nanos := splitTime(statpt.Time)\n\t\tif firstpt {\n\t\t\tw.Write([]byte(fmt.Sprintf(\"[%v,%v,%v,%v,%v,%v]\", millis, nanos, statpt.Min, statpt.Mean, statpt.Max, statpt.Count)))\n\t\t\tfirstpt = false\n\t\t} else {\n\t\t\tw.Write([]byte(fmt.Sprintf(\",[%v,%v,%v,%v,%v,%v]\", millis, nanos, statpt.Min, statpt.Mean, statpt.Max, statpt.Count)))\n\t\t}\n\t}\n\n\tvar waserror bool = false\n\tfor err = range errors {\n\t\tw.Write([]byte(\"\\nError: \"))\n\t\tw.Write([]byte(err.Error()))\n\t\twaserror = true\n\t}\n\n\tif !waserror {\n\t\tw.Write([]byte(\"]\"))\n\t}\n}\n\nfunc (dr *DataRequester) MakeBracketRequest(ctx context.Context, uuids []uuid.UUID, writ Writable) {\n\tatomic.AddUint64(&dr.totalWaiting, 1)\n\tdefer atomic.AddUint64(&dr.totalWaiting, 0xFFFFFFFFFFFFFFFF)\n\n\tdr.pendingLock.Lock()\n\tfor dr.pending == dr.maxPending {\n\t\tdr.pendingCondVar.Wait()\n\t}\n\tdr.pending += 1\n\tdr.pendingLock.Unlock()\n\n\tdefer func() {\n\t\tdr.pendingLock.Lock()\n\t\tdr.pending -= 1\n\t\tdr.pendingCondVar.Signal()\n\t\tdr.pendingLock.Unlock()\n\t}()\n\n\tvar numResponses int = len(uuids) << 1\n\tvar boundarySlice []int64 = make([]int64, numResponses)\n\n\tvar wg sync.WaitGroup\n\n\tvar stream *btrdb.Stream\n\n\tvar i int\n\tfor i = 0; i != numResponses; i++ {\n\t\tvar seconditer bool = ((i & 1) != 0)\n\n\t\tif !seconditer {\n\t\t\tstream = dr.btrdb.StreamFromUUID(uuids[i>>1])\n\n\t\t\tvar exists bool\n\t\t\tvar err error\n\t\t\texists, err = stream.Exists(ctx)\n\t\t\tif err != nil || !exists {\n\t\t\t\tboundarySlice[i] = INVALID_TIME\n\t\t\t\ti++\n\t\t\t\tboundarySlice[i] = INVALID_TIME\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\twg.Add(2)\n\t\t\t}\n\t\t}\n\n\t\tgo func(stream *btrdb.Stream, loc *int64, high bool) {\n\t\t\tvar rawpoint btrdb.RawPoint\n\t\t\tvar e error\n\t\t\tvar ref int64\n\t\t\tif high {\n\t\t\t\tref = QUASAR_HIGH\n\t\t\t} else {\n\t\t\t\tref = QUASAR_LOW\n\t\t\t}\n\t\t\trawpoint, _, e = stream.Nearest(ctx, ref, 0, high)\n\n\t\t\tif e == nil {\n\t\t\t\t*loc = rawpoint.Time\n\t\t\t} else {\n\t\t\t\t*loc = INVALID_TIME\n\t\t\t}\n\n\t\t\twg.Done()\n\t\t}(stream, &boundarySlice[i], seconditer)\n\t}\n\n\twg.Wait()\n\n\t\/\/ For final processing once all responses are received\n\tvar (\n\t\tboundary int64\n\t\tlNanos int32\n\t\tlMillis int64\n\t\trNanos int32\n\t\trMillis int64\n\t\tlowest int64 = QUASAR_HIGH\n\t\thighest int64 = QUASAR_LOW\n\t\ttrailchar rune = ','\n\t\tw io.Writer = writ.GetWriter()\n\t)\n\n\tw.Write([]byte(\"{\\\"Brackets\\\": [\"))\n\n\tfor i = 0; i < len(uuids); i++ {\n\t\tboundary = boundarySlice[i<<1]\n\t\tif boundary != INVALID_TIME && boundary < lowest {\n\t\t\tlowest = boundary\n\t\t}\n\t\tlMillis, lNanos = splitTime(boundary)\n\t\tboundary = boundarySlice[(i<<1)+1]\n\t\tif boundary != INVALID_TIME && boundary > highest {\n\t\t\thighest = boundary\n\t\t}\n\t\trMillis, rNanos = splitTime(boundary)\n\t\tif i == len(uuids)-1 {\n\t\t\ttrailchar = ']'\n\t\t}\n\t\tw.Write([]byte(fmt.Sprintf(\"[[%v,%v],[%v,%v]]%c\", lMillis, lNanos, rMillis, rNanos, trailchar)))\n\t}\n\tif len(uuids) == 0 {\n\t\tw.Write([]byte(\"]\"))\n\t}\n\tlMillis, lNanos = splitTime(lowest)\n\trMillis, rNanos = splitTime(highest)\n\tw.Write([]byte(fmt.Sprintf(\",\\\"Merged\\\":[[%v,%v],[%v,%v]]}\", lMillis, lNanos, rMillis, rNanos)))\n}\n<|endoftext|>"} {"text":"<commit_before>package exprel \/\/ import \"layeh.com\/exprel\"\n\nimport (\n\t\"strconv\"\n)\n\n\/\/ Func is a function that can be executed from an Expression.\ntype Func func(call *Call) (interface{}, error)\n\n\/\/ Call contains information about an expression function call.\ntype Call struct {\n\t\/\/ The name used to invoke the function.\n\tName string\n\t\/\/ The arguments passed to the function.\n\tValues []interface{}\n}\n\n\/\/ String returns the ith argument, iff it is a string. Otherwise, the function\n\/\/ panics with a *RuntimeError.\nfunc (c *Call) String(i int) string {\n\tif len(c.Values) <= i {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be string\")\n\t}\n\tvalue, ok := c.Values[i].(string)\n\tif !ok {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be string\")\n\t}\n\treturn value\n}\n\n\/\/ OptString returns the ith argument, iff it is a string. If the ith argument\n\/\/ does not exist, def is returned. If the ith argument is not a string, the\n\/\/ function panics with a *RuntimeError.\nfunc (c *Call) OptString(i int, def string) string {\n\tif len(c.Values) <= i {\n\t\treturn def\n\t}\n\tvalue, ok := c.Values[i].(string)\n\tif !ok {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be string\")\n\t}\n\treturn value\n}\n\n\/\/ Number returns the ith argument, iff it is a float64. Otherwise, the\n\/\/ function panics with a *RuntimeError.\nfunc (c *Call) Number(i int) float64 {\n\tif len(c.Values) <= i {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be float64\")\n\t}\n\tvalue, ok := c.Values[i].(float64)\n\tif !ok {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be float64\")\n\t}\n\treturn value\n}\n\n\/\/ OptNumber returns the ith argument, iff it is a float64. If the ith argument\n\/\/ does not exist, def is returned. If the ith argument is not a number, the\n\/\/ function panics with a *RuntimeError.\nfunc (c *Call) OptNumber(i int, def float64) float64 {\n\tif len(c.Values) <= i {\n\t\treturn def\n\t}\n\tvalue, ok := c.Values[i].(float64)\n\tif !ok {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be float64\")\n\t}\n\treturn value\n}\n\n\/\/ Boolean returns the ith argument, iff it is a bool. Otherwise, the function\n\/\/ panics with a *RuntimeError.\nfunc (c *Call) Boolean(i int) bool {\n\tif len(c.Values) <= i {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be bool\")\n\t}\n\tvalue, ok := c.Values[i].(bool)\n\tif !ok {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be bool\")\n\t}\n\treturn value\n}\n\n\/\/ OptBoolean returns the ith argument, iff it is a bool. If the ith argument\n\/\/ does not exist, def is returned. If the ith argument is not a bool, the\n\/\/ function panics with a *RuntimeError.\nfunc (c *Call) OptBoolean(i int, def bool) bool {\n\tif len(c.Values) <= i {\n\t\treturn def\n\t}\n\tvalue, ok := c.Values[i].(bool)\n\tif !ok {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be bool\")\n\t}\n\treturn value\n}\n\n\/\/ Source is a source of data for an expression. Get is called when an\n\/\/ identifier needs to be evaluated.\ntype Source interface {\n\tGet(name string) (value interface{}, ok bool)\n}\n\n\/\/ EmptySource is a Source that contains no values.\nvar EmptySource Source\n\ntype emptySource struct{}\n\nfunc (emptySource) Get(name string) (interface{}, bool) {\n\treturn nil, false\n}\n\nfunc init() {\n\tEmptySource = emptySource{}\n}\n\n\/\/ SourceFunc is a Source that looks up an identifier via a function.\ntype SourceFunc func(name string) (value interface{}, ok bool)\n\n\/\/ Get implements Source.\nfunc (fn SourceFunc) Get(name string) (interface{}, bool) {\n\treturn fn(name)\n}\n\n\/\/ SourceMap is a Source that looks up an identifier in a map.\ntype SourceMap map[string]interface{}\n\n\/\/ Get implements Source.\nfunc (m SourceMap) Get(name string) (interface{}, bool) {\n\tvalue, ok := m[name]\n\tif !ok {\n\t\treturn nil, false\n\t}\n\tswitch value.(type) {\n\tcase bool, string, float64, Func, func(*Call) (interface{}, error):\n\t\treturn value, true\n\tdefault:\n\t\treturn nil, false\n\t}\n}\n\n\/\/ Sources is a slice of sources. The first Source, in order, to return ok,\n\/\/ will have its value returned.\ntype Sources []Source\n\n\/\/ Get implements Source.\nfunc (so Sources) Get(name string) (interface{}, bool) {\n\tfor _, s := range so {\n\t\tvalue, ok := s.Get(name)\n\t\tif ok {\n\t\t\treturn value, true\n\t\t}\n\t}\n\treturn nil, false\n}\n<commit_msg>use single letter variable name in receiver functions<commit_after>package exprel \/\/ import \"layeh.com\/exprel\"\n\nimport (\n\t\"strconv\"\n)\n\n\/\/ Func is a function that can be executed from an Expression.\ntype Func func(call *Call) (interface{}, error)\n\n\/\/ Call contains information about an expression function call.\ntype Call struct {\n\t\/\/ The name used to invoke the function.\n\tName string\n\t\/\/ The arguments passed to the function.\n\tValues []interface{}\n}\n\n\/\/ String returns the ith argument, iff it is a string. Otherwise, the function\n\/\/ panics with a *RuntimeError.\nfunc (c *Call) String(i int) string {\n\tif len(c.Values) <= i {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be string\")\n\t}\n\tvalue, ok := c.Values[i].(string)\n\tif !ok {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be string\")\n\t}\n\treturn value\n}\n\n\/\/ OptString returns the ith argument, iff it is a string. If the ith argument\n\/\/ does not exist, def is returned. If the ith argument is not a string, the\n\/\/ function panics with a *RuntimeError.\nfunc (c *Call) OptString(i int, def string) string {\n\tif len(c.Values) <= i {\n\t\treturn def\n\t}\n\tvalue, ok := c.Values[i].(string)\n\tif !ok {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be string\")\n\t}\n\treturn value\n}\n\n\/\/ Number returns the ith argument, iff it is a float64. Otherwise, the\n\/\/ function panics with a *RuntimeError.\nfunc (c *Call) Number(i int) float64 {\n\tif len(c.Values) <= i {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be float64\")\n\t}\n\tvalue, ok := c.Values[i].(float64)\n\tif !ok {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be float64\")\n\t}\n\treturn value\n}\n\n\/\/ OptNumber returns the ith argument, iff it is a float64. If the ith argument\n\/\/ does not exist, def is returned. If the ith argument is not a number, the\n\/\/ function panics with a *RuntimeError.\nfunc (c *Call) OptNumber(i int, def float64) float64 {\n\tif len(c.Values) <= i {\n\t\treturn def\n\t}\n\tvalue, ok := c.Values[i].(float64)\n\tif !ok {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be float64\")\n\t}\n\treturn value\n}\n\n\/\/ Boolean returns the ith argument, iff it is a bool. Otherwise, the function\n\/\/ panics with a *RuntimeError.\nfunc (c *Call) Boolean(i int) bool {\n\tif len(c.Values) <= i {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be bool\")\n\t}\n\tvalue, ok := c.Values[i].(bool)\n\tif !ok {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be bool\")\n\t}\n\treturn value\n}\n\n\/\/ OptBoolean returns the ith argument, iff it is a bool. If the ith argument\n\/\/ does not exist, def is returned. If the ith argument is not a bool, the\n\/\/ function panics with a *RuntimeError.\nfunc (c *Call) OptBoolean(i int, def bool) bool {\n\tif len(c.Values) <= i {\n\t\treturn def\n\t}\n\tvalue, ok := c.Values[i].(bool)\n\tif !ok {\n\t\tre(c.Name + \" expects argument \" + strconv.Itoa(i) + \" to be bool\")\n\t}\n\treturn value\n}\n\n\/\/ Source is a source of data for an expression. Get is called when an\n\/\/ identifier needs to be evaluated.\ntype Source interface {\n\tGet(name string) (value interface{}, ok bool)\n}\n\n\/\/ EmptySource is a Source that contains no values.\nvar EmptySource Source\n\ntype emptySource struct{}\n\nfunc (emptySource) Get(name string) (interface{}, bool) {\n\treturn nil, false\n}\n\nfunc init() {\n\tEmptySource = emptySource{}\n}\n\n\/\/ SourceFunc is a Source that looks up an identifier via a function.\ntype SourceFunc func(name string) (value interface{}, ok bool)\n\n\/\/ Get implements Source.\nfunc (f SourceFunc) Get(name string) (interface{}, bool) {\n\treturn f(name)\n}\n\n\/\/ SourceMap is a Source that looks up an identifier in a map.\ntype SourceMap map[string]interface{}\n\n\/\/ Get implements Source.\nfunc (m SourceMap) Get(name string) (interface{}, bool) {\n\tvalue, ok := m[name]\n\tif !ok {\n\t\treturn nil, false\n\t}\n\tswitch value.(type) {\n\tcase bool, string, float64, Func, func(*Call) (interface{}, error):\n\t\treturn value, true\n\tdefault:\n\t\treturn nil, false\n\t}\n}\n\n\/\/ Sources is a slice of sources. The first Source, in order, to return ok,\n\/\/ will have its value returned.\ntype Sources []Source\n\n\/\/ Get implements Source.\nfunc (s Sources) Get(name string) (interface{}, bool) {\n\tfor _, s := range s {\n\t\tvalue, ok := s.Get(name)\n\t\tif ok {\n\t\t\treturn value, true\n\t\t}\n\t}\n\treturn nil, false\n}\n<|endoftext|>"} {"text":"<commit_before>\/*******************************************************************************\nThe MIT License (MIT)\n\nCopyright (c) 2013 Hajime Nakagami\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and\/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n*******************************************************************************\/\n\npackage firebirdsql\n\nimport (\n \"errors\"\n \"database\/sql\/driver\"\n )\n\n\ntype _result struct {\n}\nfunc (r *_result) LastInsertId() (int64, error) {\n return 0, nil\n}\n\nfunc (r *_result) RowsAffected() (int64, error) {\n return 0, nil\n}\n\n\ntype firebirdsqlStmt struct {\n wp *wireProtocol\n stmtHandle int32\n tx *firebirdsqlTx\n}\n\nfunc (stmt *firebirdsqlStmt) Close() (err error) {\n stmt.wp.opFreeStatement(stmt.stmtHandle, 2) \/\/ DSQL_drop\n return\n}\n\nfunc (stmt *firebirdsqlStmt) NumInput() int {\n return -1\n}\n\nfunc (stmt *firebirdsqlStmt) Exec(args []driver.Value) (result driver.Result, err error) {\n stmt.wp.opExecute(stmt.stmtHandle, stmt.tx.transHandle, args)\n _, _, _, err = stmt.wp.opResponse()\n\n result = new(_result)\n return\n}\n\nfunc (stmt *firebirdsqlStmt) Query(args []driver.Value) (driver.Rows, error) {\n var err error\n err = errors.New(\"Not implement\")\n return nil, err\n}\n\nfunc newFirebirdsqlStmt(fc *firebirdsqlConn, query string) (*firebirdsqlStmt, error) {\n var err error\n stmt := new(firebirdsqlStmt)\n stmt.wp = fc.wp\n stmt.tx = fc.tx\n fc.wp.opAllocateStatement()\n\n stmt.stmtHandle, _, _, err = fc.wp.opResponse()\n return stmt, err\n}\n<commit_msg>opPrepareStatement()<commit_after>\/*******************************************************************************\nThe MIT License (MIT)\n\nCopyright (c) 2013 Hajime Nakagami\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and\/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n*******************************************************************************\/\n\npackage firebirdsql\n\nimport (\n \"errors\"\n \"database\/sql\/driver\"\n )\n\n\ntype _result struct {\n}\nfunc (r *_result) LastInsertId() (int64, error) {\n return 0, nil\n}\n\nfunc (r *_result) RowsAffected() (int64, error) {\n return 0, nil\n}\n\n\ntype firebirdsqlStmt struct {\n wp *wireProtocol\n stmtHandle int32\n tx *firebirdsqlTx\n}\n\nfunc (stmt *firebirdsqlStmt) Close() (err error) {\n stmt.wp.opFreeStatement(stmt.stmtHandle, 2) \/\/ DSQL_drop\n return\n}\n\nfunc (stmt *firebirdsqlStmt) NumInput() int {\n return -1\n}\n\nfunc (stmt *firebirdsqlStmt) Exec(args []driver.Value) (result driver.Result, err error) {\n stmt.wp.opExecute(stmt.stmtHandle, stmt.tx.transHandle, args)\n _, _, _, err = stmt.wp.opResponse()\n\n result = new(_result)\n return\n}\n\nfunc (stmt *firebirdsqlStmt) Query(args []driver.Value) (driver.Rows, error) {\n var err error\n err = errors.New(\"Not implement\")\n return nil, err\n}\n\nfunc newFirebirdsqlStmt(fc *firebirdsqlConn, query string) (stmt *firebirdsqlStmt, err error) {\n stmt = new(firebirdsqlStmt)\n stmt.wp = fc.wp\n stmt.tx = fc.tx\n fc.wp.opAllocateStatement()\n stmt.stmtHandle, _, _, err = fc.wp.opResponse()\n if err != nil {\n return\n }\n fc.wp.opPrepareStatement(stmt.stmtHandle, stmt.tx.transHandle, query)\n _, _, _, err = fc.wp.opResponse()\n return\n}\n<|endoftext|>"} {"text":"<commit_before>package asset\n\nimport (\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"chain\/api\/txdb\"\n\t\"chain\/api\/utxodb\"\n\t\"chain\/database\/pg\"\n\t\"chain\/errors\"\n\t\"chain\/fedchain-sandbox\/txscript\"\n\t\"chain\/fedchain\/bc\"\n\t\"chain\/fedchain\/state\"\n\t\"chain\/metrics\"\n)\n\ntype sqlUTXODB struct{}\n\nfunc (sqlUTXODB) LoadUTXOs(ctx context.Context, accountID, assetID string) ([]*utxodb.UTXO, error) {\n\tbcOuts, err := txdb.LoadUTXOs(ctx, accountID, assetID)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"load blockchain outputs\")\n\t}\n\tpoolOuts, err := txdb.LoadPoolUTXOs(ctx, accountID, assetID)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"load pool outputs\")\n\t}\n\treturn append(bcOuts, poolOuts...), nil\n}\n\nfunc (sqlUTXODB) SaveReservations(ctx context.Context, utxos []*utxodb.UTXO, exp time.Time) error {\n\tdefer metrics.RecordElapsed(time.Now())\n\tconst q = `\n\t\tUPDATE utxos\n\t\tSET reserved_until=$3\n\t\tWHERE (txid, index) IN (SELECT unnest($1::text[]), unnest($2::integer[]))\n\t`\n\tvar txids []string\n\tvar indexes []uint32\n\tfor _, u := range utxos {\n\t\ttxids = append(txids, u.Outpoint.Hash.String())\n\t\tindexes = append(indexes, u.Outpoint.Index)\n\t}\n\t_, err := pg.FromContext(ctx).Exec(q, pg.Strings(txids), pg.Uint32s(indexes), exp)\n\treturn errors.Wrap(err, \"update utxo reserve expiration\")\n}\n\n\/\/ applyTx updates the output set to reflect\n\/\/ the effects of tx. It deletes consumed utxos\n\/\/ and inserts newly-created outputs.\n\/\/ Must be called inside a transaction.\nfunc applyTx(ctx context.Context, tx *bc.Tx, outRecs []*utxodb.Receiver) (deleted []bc.Outpoint, inserted []*txdb.Output, err error) {\n\tdefer metrics.RecordElapsed(time.Now())\n\n\thash := tx.Hash()\n\t_ = pg.FromContext(ctx).(pg.Tx) \/\/ panics if not in a db transaction\n\n\terr = txdb.InsertTx(ctx, tx)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"insert into txs\")\n\t}\n\n\terr = txdb.InsertPoolTx(ctx, tx)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"insert into pool_txs\")\n\t}\n\n\tinserted, err = insertUTXOs(ctx, hash, tx.Outputs, outRecs)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"insert outputs\")\n\t}\n\n\tfor _, in := range tx.Inputs {\n\t\tif in.IsIssuance() {\n\t\t\tcontinue\n\t\t}\n\t\tdeleted = append(deleted, in.Previous)\n\t}\n\terr = txdb.InsertPoolInputs(ctx, deleted)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"delete\")\n\t}\n\n\treturn deleted, inserted, err\n}\n\nfunc insertUTXOs(ctx context.Context, hash bc.Hash, txouts []*bc.TxOutput, recs []*utxodb.Receiver) ([]*txdb.Output, error) {\n\tif len(txouts) != len(recs) {\n\t\treturn nil, errors.New(\"length mismatch\")\n\t}\n\tdefer metrics.RecordElapsed(time.Now())\n\n\t\/\/ This function inserts utxos into the db, and maps\n\t\/\/ them to receiver info (account id and addr index).\n\t\/\/ There are three cases:\n\t\/\/ 1. UTXO pays change or to an \"immediate\" account receiver.\n\t\/\/ In this case, we get the receiver info from recs\n\t\/\/ (which came from the client and was validated\n\t\/\/ in FinalizeTx).\n\t\/\/ 2. UTXO pays to an address receiver record.\n\t\/\/ In this case, we get the receiver info from\n\t\/\/ the addresses table (and eventually delete\n\t\/\/ the record).\n\t\/\/ 3. UTXO pays to an unknown address.\n\t\/\/ In this case, there is no receiver info.\n\touts := initAddrInfoFromRecs(hash, txouts, recs) \/\/ case 1\n\terr := loadAddrInfoFromDB(ctx, outs) \/\/ case 2\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = txdb.InsertPoolOutputs(ctx, hash, outs)\n\treturn outs, errors.Wrap(err)\n}\n\nfunc initAddrInfoFromRecs(hash bc.Hash, txouts []*bc.TxOutput, recs []*utxodb.Receiver) []*txdb.Output {\n\tinsert := make([]*txdb.Output, len(txouts))\n\tfor i, txo := range txouts {\n\t\to := &txdb.Output{\n\t\t\tOutput: state.Output{\n\t\t\t\tTxOutput: *txo,\n\t\t\t\tOutpoint: bc.Outpoint{Hash: hash, Index: uint32(i)},\n\t\t\t},\n\t\t}\n\t\tif rec := recs[i]; rec != nil {\n\t\t\to.AccountID = rec.AccountID\n\t\t\to.ManagerNodeID = rec.ManagerNodeID\n\t\t\tcopy(o.AddrIndex[:], rec.AddrIndex)\n\t\t}\n\t\tinsert[i] = o\n\t}\n\treturn insert\n}\n\n\/\/ loadAddrInfoFromDB loads account ID, manager node ID, and addr index\n\/\/ from the addresses table for outputs that need it.\n\/\/ Not all are guaranteed to be in the database;\n\/\/ some outputs will be owned by third parties.\n\/\/ This function loads what it can.\nfunc loadAddrInfoFromDB(ctx context.Context, outs []*txdb.Output) error {\n\tvar (\n\t\taddrs []string\n\t\toutsByAddr = make(map[string]*txdb.Output)\n\t)\n\tfor i, o := range outs {\n\t\tif o.AccountID != \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\taddr, err := txscript.PkScriptAddr(o.Script)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"bad pk script in output %d\", i)\n\t\t}\n\n\t\taddrs = append(addrs, addr.String())\n\t\toutsByAddr[addr.String()] = o\n\t}\n\n\tconst q = `\n\t\tSELECT address, account_id, manager_node_id, key_index(key_index)\n\t\tFROM addresses\n\t\tWHERE address IN (SELECT unnest($1::text[]))\n\t`\n\trows, err := pg.FromContext(ctx).Query(q, pg.Strings(addrs))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"select\")\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tvar (\n\t\t\taddr string\n\t\t\tmanagerNodeID string\n\t\t\taccountID string\n\t\t\taddrIndex []uint32\n\t\t)\n\t\terr = rows.Scan(\n\t\t\t&addr,\n\t\t\t&accountID,\n\t\t\t&managerNodeID,\n\t\t\t(*pg.Uint32s)(&addrIndex),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"scan\")\n\t\t}\n\n\t\to := outsByAddr[addr]\n\t\to.AccountID = accountID\n\t\to.ManagerNodeID = managerNodeID\n\t\tcopy(o.AddrIndex[:], addrIndex)\n\t}\n\n\treturn errors.Wrap(rows.Err(), \"rows\")\n}\n<commit_msg>api\/asset: fix loading initial reserver utxo set<commit_after>package asset\n\nimport (\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"chain\/api\/txdb\"\n\t\"chain\/api\/utxodb\"\n\t\"chain\/database\/pg\"\n\t\"chain\/errors\"\n\t\"chain\/fedchain-sandbox\/txscript\"\n\t\"chain\/fedchain\/bc\"\n\t\"chain\/fedchain\/state\"\n\t\"chain\/metrics\"\n)\n\ntype sqlUTXODB struct{}\n\nfunc (sqlUTXODB) LoadUTXOs(ctx context.Context, accountID, assetID string) (resvOuts []*utxodb.UTXO, err error) {\n\tbcOuts, err := txdb.LoadUTXOs(ctx, accountID, assetID)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"load blockchain outputs\")\n\t}\n\tpoolOuts, err := txdb.LoadPoolUTXOs(ctx, accountID, assetID)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"load pool outputs\")\n\t}\n\n\tvar bcOutpoints []bc.Outpoint\n\tfor _, o := range bcOuts {\n\t\tbcOutpoints = append(bcOutpoints, o.Outpoint)\n\t}\n\tpoolView, err := txdb.NewPoolView(ctx, bcOutpoints)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err)\n\t}\n\tinBC := make(map[bc.Outpoint]bool)\n\tfor _, o := range bcOuts {\n\t\tif !isSpent(ctx, o.Outpoint, poolView) {\n\t\t\tresvOuts = append(resvOuts, o)\n\t\t\tinBC[o.Outpoint] = true\n\t\t}\n\t}\n\tfor _, o := range poolOuts {\n\t\tif !inBC[o.Outpoint] {\n\t\t\tresvOuts = append(resvOuts, o)\n\t\t}\n\t}\n\treturn resvOuts, nil\n}\n\nfunc isSpent(ctx context.Context, p bc.Outpoint, v state.ViewReader) bool {\n\to := v.Output(ctx, p)\n\treturn o != nil && o.Spent\n}\n\nfunc (sqlUTXODB) SaveReservations(ctx context.Context, utxos []*utxodb.UTXO, exp time.Time) error {\n\tdefer metrics.RecordElapsed(time.Now())\n\tconst q = `\n\t\tUPDATE utxos\n\t\tSET reserved_until=$3\n\t\tWHERE (txid, index) IN (SELECT unnest($1::text[]), unnest($2::integer[]))\n\t`\n\tvar txids []string\n\tvar indexes []uint32\n\tfor _, u := range utxos {\n\t\ttxids = append(txids, u.Outpoint.Hash.String())\n\t\tindexes = append(indexes, u.Outpoint.Index)\n\t}\n\t_, err := pg.FromContext(ctx).Exec(q, pg.Strings(txids), pg.Uint32s(indexes), exp)\n\treturn errors.Wrap(err, \"update utxo reserve expiration\")\n}\n\n\/\/ applyTx updates the output set to reflect\n\/\/ the effects of tx. It deletes consumed utxos\n\/\/ and inserts newly-created outputs.\n\/\/ Must be called inside a transaction.\nfunc applyTx(ctx context.Context, tx *bc.Tx, outRecs []*utxodb.Receiver) (deleted []bc.Outpoint, inserted []*txdb.Output, err error) {\n\tdefer metrics.RecordElapsed(time.Now())\n\n\thash := tx.Hash()\n\t_ = pg.FromContext(ctx).(pg.Tx) \/\/ panics if not in a db transaction\n\n\terr = txdb.InsertTx(ctx, tx)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"insert into txs\")\n\t}\n\n\terr = txdb.InsertPoolTx(ctx, tx)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"insert into pool_txs\")\n\t}\n\n\tinserted, err = insertUTXOs(ctx, hash, tx.Outputs, outRecs)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"insert outputs\")\n\t}\n\n\tfor _, in := range tx.Inputs {\n\t\tif in.IsIssuance() {\n\t\t\tcontinue\n\t\t}\n\t\tdeleted = append(deleted, in.Previous)\n\t}\n\terr = txdb.InsertPoolInputs(ctx, deleted)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"delete\")\n\t}\n\n\treturn deleted, inserted, err\n}\n\nfunc insertUTXOs(ctx context.Context, hash bc.Hash, txouts []*bc.TxOutput, recs []*utxodb.Receiver) ([]*txdb.Output, error) {\n\tif len(txouts) != len(recs) {\n\t\treturn nil, errors.New(\"length mismatch\")\n\t}\n\tdefer metrics.RecordElapsed(time.Now())\n\n\t\/\/ This function inserts utxos into the db, and maps\n\t\/\/ them to receiver info (account id and addr index).\n\t\/\/ There are three cases:\n\t\/\/ 1. UTXO pays change or to an \"immediate\" account receiver.\n\t\/\/ In this case, we get the receiver info from recs\n\t\/\/ (which came from the client and was validated\n\t\/\/ in FinalizeTx).\n\t\/\/ 2. UTXO pays to an address receiver record.\n\t\/\/ In this case, we get the receiver info from\n\t\/\/ the addresses table (and eventually delete\n\t\/\/ the record).\n\t\/\/ 3. UTXO pays to an unknown address.\n\t\/\/ In this case, there is no receiver info.\n\touts := initAddrInfoFromRecs(hash, txouts, recs) \/\/ case 1\n\terr := loadAddrInfoFromDB(ctx, outs) \/\/ case 2\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = txdb.InsertPoolOutputs(ctx, hash, outs)\n\treturn outs, errors.Wrap(err)\n}\n\nfunc initAddrInfoFromRecs(hash bc.Hash, txouts []*bc.TxOutput, recs []*utxodb.Receiver) []*txdb.Output {\n\tinsert := make([]*txdb.Output, len(txouts))\n\tfor i, txo := range txouts {\n\t\to := &txdb.Output{\n\t\t\tOutput: state.Output{\n\t\t\t\tTxOutput: *txo,\n\t\t\t\tOutpoint: bc.Outpoint{Hash: hash, Index: uint32(i)},\n\t\t\t},\n\t\t}\n\t\tif rec := recs[i]; rec != nil {\n\t\t\to.AccountID = rec.AccountID\n\t\t\to.ManagerNodeID = rec.ManagerNodeID\n\t\t\tcopy(o.AddrIndex[:], rec.AddrIndex)\n\t\t}\n\t\tinsert[i] = o\n\t}\n\treturn insert\n}\n\n\/\/ loadAddrInfoFromDB loads account ID, manager node ID, and addr index\n\/\/ from the addresses table for outputs that need it.\n\/\/ Not all are guaranteed to be in the database;\n\/\/ some outputs will be owned by third parties.\n\/\/ This function loads what it can.\nfunc loadAddrInfoFromDB(ctx context.Context, outs []*txdb.Output) error {\n\tvar (\n\t\taddrs []string\n\t\toutsByAddr = make(map[string]*txdb.Output)\n\t)\n\tfor i, o := range outs {\n\t\tif o.AccountID != \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\taddr, err := txscript.PkScriptAddr(o.Script)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"bad pk script in output %d\", i)\n\t\t}\n\n\t\taddrs = append(addrs, addr.String())\n\t\toutsByAddr[addr.String()] = o\n\t}\n\n\tconst q = `\n\t\tSELECT address, account_id, manager_node_id, key_index(key_index)\n\t\tFROM addresses\n\t\tWHERE address IN (SELECT unnest($1::text[]))\n\t`\n\trows, err := pg.FromContext(ctx).Query(q, pg.Strings(addrs))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"select\")\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tvar (\n\t\t\taddr string\n\t\t\tmanagerNodeID string\n\t\t\taccountID string\n\t\t\taddrIndex []uint32\n\t\t)\n\t\terr = rows.Scan(\n\t\t\t&addr,\n\t\t\t&accountID,\n\t\t\t&managerNodeID,\n\t\t\t(*pg.Uint32s)(&addrIndex),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"scan\")\n\t\t}\n\n\t\to := outsByAddr[addr]\n\t\to.AccountID = accountID\n\t\to.ManagerNodeID = managerNodeID\n\t\tcopy(o.AddrIndex[:], addrIndex)\n\t}\n\n\treturn errors.Wrap(rows.Err(), \"rows\")\n}\n<|endoftext|>"} {"text":"<commit_before>package haigo\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"text\/template\"\n\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\ntype Param struct {\n\tName string\n\tType string\n}\n\ntype HaigoQuery struct {\n\tName string `yaml:\"name\"`\n\tDescription string `yaml:\"description,omitempty\"`\n\tQueryString string `yaml:\"query\"`\n\tParams []Param \/\/ TODO\n}\n\ntype HaigoParams map[string]interface{}\n\nfunc (h *HaigoQuery) Execute(col *mgo.Collection, params HaigoParams) (*mgo.Query, error) {\n\n\tq, err := h.Query(params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn col.Find(q), nil\n}\n\ntype HaigoFile struct {\n\tQueries map[string]*HaigoQuery\n}\n\nfunc (m *HaigoFile) unmarshalYAML(data []byte) error {\n\n\tvar hqs []HaigoQuery\n\n\terr := yaml.Unmarshal(data, &hqs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tqm := make(map[string]*HaigoQuery)\n\n\tfor i := range hqs {\n\t\tqm[hqs[i].Name] = &hqs[i]\n\t}\n\n\tm.Queries = qm\n\n\treturn nil\n}\n\n\/\/ sanitizeParams - Adds single quotes if param is a string (as needed by Mongo).\nfunc sanitizeParams(params HaigoParams) HaigoParams {\n\tfor k, v := range params {\n\t\tswitch v.(type) {\n\t\tcase string:\n\t\t\tparams[k] = fmt.Sprintf(\"\\\"%s\\\"\", v)\n\t\t}\n\n\t}\n\treturn params\n}\n\n\/\/ Query - Accepts a params map and returns a bson.M.\nfunc (h *HaigoQuery) Query(params HaigoParams) (map[string]interface{}, error) {\n\n\t\/\/ Create the template\n\tt, err := template.New(\"haigo\").Parse(h.QueryString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Buffer to capture string\n\tbuf := new(bytes.Buffer)\n\n\t\/\/ Execute template\n\terr = t.Execute(buf, sanitizeParams(params))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Unmarshal JSON into Map\n\tvar m map[string]interface{}\n\terr = json.Unmarshal(buf.Bytes(), &m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m, nil\n}\n\n\/\/ LoadQueryFile - Reads in Mongo Query File for use with Haigo.\nfunc LoadQueryFile(file string) (*HaigoFile, error) {\n\treturn parseMongoFile(file)\n}\n<commit_msg>unexport internal param<commit_after>package haigo\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"text\/template\"\n\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\ntype param struct {\n\tName string\n\tType string\n}\n\ntype HaigoQuery struct {\n\tName string `yaml:\"name\"`\n\tDescription string `yaml:\"description,omitempty\"`\n\tQueryString string `yaml:\"query\"`\n\tparams []param \/\/ TODO\n}\n\ntype HaigoParams map[string]interface{}\n\nfunc (h *HaigoQuery) Execute(col *mgo.Collection, params HaigoParams) (*mgo.Query, error) {\n\n\tq, err := h.Query(params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn col.Find(q), nil\n}\n\ntype HaigoFile struct {\n\tQueries map[string]*HaigoQuery\n}\n\nfunc (m *HaigoFile) unmarshalYAML(data []byte) error {\n\n\tvar hqs []HaigoQuery\n\n\terr := yaml.Unmarshal(data, &hqs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tqm := make(map[string]*HaigoQuery)\n\n\tfor i := range hqs {\n\t\tqm[hqs[i].Name] = &hqs[i]\n\t}\n\n\tm.Queries = qm\n\n\treturn nil\n}\n\n\/\/ sanitizeParams - Adds single quotes if param is a string (as needed by Mongo).\nfunc sanitizeParams(params HaigoParams) HaigoParams {\n\tfor k, v := range params {\n\t\tswitch v.(type) {\n\t\tcase string:\n\t\t\tparams[k] = fmt.Sprintf(\"\\\"%s\\\"\", v)\n\t\t}\n\n\t}\n\treturn params\n}\n\n\/\/ Query - Accepts a params map and returns a bson.M.\nfunc (h *HaigoQuery) Query(params HaigoParams) (map[string]interface{}, error) {\n\n\t\/\/ Create the template\n\tt, err := template.New(\"haigo\").Parse(h.QueryString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Buffer to capture string\n\tbuf := new(bytes.Buffer)\n\n\t\/\/ Execute template\n\terr = t.Execute(buf, sanitizeParams(params))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Unmarshal JSON into Map\n\tvar m map[string]interface{}\n\terr = json.Unmarshal(buf.Bytes(), &m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m, nil\n}\n\n\/\/ LoadQueryFile - Reads in Mongo Query File for use with Haigo.\nfunc LoadQueryFile(file string) (*HaigoFile, error) {\n\treturn parseMongoFile(file)\n}\n<|endoftext|>"} {"text":"<commit_before>package apps\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\tginkgoconfig \"github.com\/onsi\/ginkgo\/config\"\n\t\"github.com\/onsi\/ginkgo\/reporters\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/vito\/cmdtest\"\n\n\t. \"github.com\/cloudfoundry\/cf-acceptance-tests\/helpers\"\n\t. \"github.com\/pivotal-cf-experimental\/cf-test-helpers\/cf\"\n\t. \"github.com\/pivotal-cf-experimental\/cf-test-helpers\/runner\"\n)\n\nfunc TestLifecycle(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\n\tAsUser(RegularUserContext, func () {\n\t\tRunSpecsWithDefaultAndCustomReporters(t, \"Application Lifecycle\", []Reporter{reporters.NewJUnitReporter(fmt.Sprintf(\"junit_%d.xml\", ginkgoconfig.GinkgoConfig.ParallelNode))})\n\t})\n\n}\n\nvar config = LoadConfig()\nvar AppName = \"\"\n\nvar doraPath = \"..\/assets\/dora\"\nvar helloPath = \"..\/assets\/hello-world\"\nvar serviceBrokerPath = \"..\/assets\/service_broker\"\n\nfunc Curling(endpoint string) func() *cmdtest.Session {\n\treturn func() *cmdtest.Session {\n\t\treturn Curl(AppUri(AppName, endpoint, config.AppsDomain))\n\t}\n}\n<commit_msg>Remove unused variable<commit_after>package apps\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\tginkgoconfig \"github.com\/onsi\/ginkgo\/config\"\n\t\"github.com\/onsi\/ginkgo\/reporters\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/vito\/cmdtest\"\n\n\t. \"github.com\/cloudfoundry\/cf-acceptance-tests\/helpers\"\n\t. \"github.com\/pivotal-cf-experimental\/cf-test-helpers\/cf\"\n\t. \"github.com\/pivotal-cf-experimental\/cf-test-helpers\/runner\"\n)\n\nfunc TestLifecycle(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\n\tAsUser(RegularUserContext, func () {\n\t\tRunSpecsWithDefaultAndCustomReporters(t, \"Application Lifecycle\", []Reporter{reporters.NewJUnitReporter(fmt.Sprintf(\"junit_%d.xml\", ginkgoconfig.GinkgoConfig.ParallelNode))})\n\t})\n\n}\n\nvar config = LoadConfig()\nvar AppName = \"\"\n\nvar doraPath = \"..\/assets\/dora\"\nvar helloPath = \"..\/assets\/hello-world\"\n\nfunc Curling(endpoint string) func() *cmdtest.Session {\n\treturn func() *cmdtest.Session {\n\t\treturn Curl(AppUri(AppName, endpoint, config.AppsDomain))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package builtin\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\tflow \"github.com\/wanliu\/goflow\"\n)\n\nvar REPLACE_DICT map[string]string = map[string]string{\n\t\"今早\": \"今天早上\",\n\t\"今下\": \"今天下午\",\n\t\"明早\": \"明天早上\",\n\t\"明下\": \"明天下午\",\n\t\"今早上\": \"今天早上\",\n\t\"今下午\": \"今天下午\",\n\t\"明早上\": \"明天早上\",\n\t\"明下午\": \"明天下午\",\n\t\"1.1红\": \"1100红\",\n\t\"1.1原\": \"1100原\",\n}\n\ntype TextPreprocesor struct {\n\tflow.Component\n\n\tMultiField\n\n\tOut chan<- string\n\tIn <-chan string\n}\n\nfunc NewTextPreprocesor() interface{} {\n\treturn new(TextPreprocesor)\n}\n\nfunc (c *TextPreprocesor) OnIn(input string) {\n\toutput := atFilter(input)\n\toutput = numberAfterLetter(output)\n\toutput = dateTransfer(output)\n\toutput = dictTransfer(output)\n\toutput = replaceUnit(output)\n\tc.Out <- output\n}\n\nfunc numberAfterLetter(s string) string {\n\tr := regexp.MustCompile(\"[a-zA-Z][0-9]\")\n\n\tis := r.FindStringIndex(s)\n\n\tfor len(is) == 2 {\n\t\ti := (is[0] + is[1]) \/ 2\n\t\ts = s[:i] + \" \" + s[i:]\n\n\t\tis = r.FindStringIndex(s)\n\t}\n\n\treturn s\n}\n\n\/\/ 今早 => 今天早上\n\/\/ 今下 => 今天下午\n\/\/ 明早 => 明天早上\n\/\/ 明下 => 明天下午\n\/\/ 今早上 => 今天早上\n\/\/ 今下午 => 今天下午\n\/\/ 明早上 => 明天早上\n\/\/ 明下午 => 明天下午\n\/\/ 后天\n\/\/ [星期日,星期天,周日]\nfunc dateTransfer(s string) string {\n\tdat := time.Now().AddDate(0, 0, 2)\n\tdatStr := dat.Format(\"1月2日\")\n\ts = strings.Replace(s, \"后天\", datStr, -1)\n\n\tstep := int(7 - time.Now().Weekday())\n\n\tif step == 7 {\n\t\tstep = 0\n\t}\n\n\tdat = time.Now().AddDate(0, 0, step)\n\tdatStr = dat.Format(\"1月2日\")\n\ts = strings.Replace(s, \"星期日\", datStr, -1)\n\ts = strings.Replace(s, \"星期天\", datStr, -1)\n\ts = strings.Replace(s, \"周日\", datStr, -1)\n\n\treturn s\n}\n\nfunc dictTransfer(s string) string {\n\tfor k, v := range REPLACE_DICT {\n\t\ts = strings.Replace(s, k, v, -1)\n\t}\n\n\treturn s\n}\n\nfunc atFilter(s string) string {\n\tr := regexp.MustCompile(\"^@[\\u4e00-\\u9fa5\\\\w]+\\\\s\")\n\tis := r.FindStringIndex(s)\n\n\tif len(is) == 2 {\n\t\ti := is[1]\n\t\ts = s[i:]\n\t}\n\n\treturn s\n}\n\n\/\/ 件 条 个 支 => 龘件 龘条 龘个 龘支\nfunc replaceUnit(s string) string {\n\tpalceholder := \"龘\"\n\tunits := []string{\n\t\t\"件\", \"条\", \"个\", \"支\",\n\t}\n\n\tfor _, unit := range units {\n\t\tr := regexp.MustCompile(\"\\\\d\" + unit)\n\t\tis := r.FindStringIndex(s)\n\t\tfor len(is) == 2 {\n\t\t\ttotal := is[1] - is[0]\n\t\t\tunitlen := len(unit)\n\t\t\ts = fmt.Sprintf(\"%v%v%v\", s[:is[0]+total-unitlen], palceholder, s[is[1]-unitlen:])\n\t\t\tis = r.FindStringIndex(s)\n\t\t}\n\t}\n\n\treturn s\n}\n<commit_msg>fix iphone @<commit_after>package builtin\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\tflow \"github.com\/wanliu\/goflow\"\n)\n\nvar REPLACE_DICT map[string]string = map[string]string{\n\t\"今早\": \"今天早上\",\n\t\"今下\": \"今天下午\",\n\t\"明早\": \"明天早上\",\n\t\"明下\": \"明天下午\",\n\t\"今早上\": \"今天早上\",\n\t\"今下午\": \"今天下午\",\n\t\"明早上\": \"明天早上\",\n\t\"明下午\": \"明天下午\",\n\t\"1.1红\": \"1100红\",\n\t\"1.1原\": \"1100原\",\n}\n\ntype TextPreprocesor struct {\n\tflow.Component\n\n\tMultiField\n\n\tOut chan<- string\n\tIn <-chan string\n}\n\nfunc NewTextPreprocesor() interface{} {\n\treturn new(TextPreprocesor)\n}\n\nfunc (c *TextPreprocesor) OnIn(input string) {\n\toutput := atFilter(input)\n\toutput = numberAfterLetter(output)\n\toutput = dateTransfer(output)\n\toutput = dictTransfer(output)\n\toutput = replaceUnit(output)\n\tc.Out <- output\n}\n\nfunc numberAfterLetter(s string) string {\n\tr := regexp.MustCompile(\"[a-zA-Z][0-9]\")\n\n\tis := r.FindStringIndex(s)\n\n\tfor len(is) == 2 {\n\t\ti := (is[0] + is[1]) \/ 2\n\t\ts = s[:i] + \" \" + s[i:]\n\n\t\tis = r.FindStringIndex(s)\n\t}\n\n\treturn s\n}\n\n\/\/ 今早 => 今天早上\n\/\/ 今下 => 今天下午\n\/\/ 明早 => 明天早上\n\/\/ 明下 => 明天下午\n\/\/ 今早上 => 今天早上\n\/\/ 今下午 => 今天下午\n\/\/ 明早上 => 明天早上\n\/\/ 明下午 => 明天下午\n\/\/ 后天\n\/\/ [星期日,星期天,周日]\nfunc dateTransfer(s string) string {\n\tdat := time.Now().AddDate(0, 0, 2)\n\tdatStr := dat.Format(\"1月2日\")\n\ts = strings.Replace(s, \"后天\", datStr, -1)\n\n\tstep := int(7 - time.Now().Weekday())\n\n\tif step == 7 {\n\t\tstep = 0\n\t}\n\n\tdat = time.Now().AddDate(0, 0, step)\n\tdatStr = dat.Format(\"1月2日\")\n\ts = strings.Replace(s, \"星期日\", datStr, -1)\n\ts = strings.Replace(s, \"星期天\", datStr, -1)\n\ts = strings.Replace(s, \"周日\", datStr, -1)\n\n\treturn s\n}\n\nfunc dictTransfer(s string) string {\n\tfor k, v := range REPLACE_DICT {\n\t\ts = strings.Replace(s, k, v, -1)\n\t}\n\n\treturn s\n}\n\nfunc atFilter(s string) string {\n\tr := regexp.MustCompile(\"^@[\\u4e00-\\u9fa5\\\\w]+[\\\\s\\u2005]\")\n\tis := r.FindStringIndex(s)\n\n\tif len(is) == 2 {\n\t\ti := is[1]\n\t\ts = s[i:]\n\t}\n\n\treturn s\n}\n\n\/\/ 件 条 个 支 => 龘件 龘条 龘个 龘支\nfunc replaceUnit(s string) string {\n\tpalceholder := \"龘\"\n\tunits := []string{\n\t\t\"件\", \"条\", \"个\", \"支\",\n\t}\n\n\tfor _, unit := range units {\n\t\tr := regexp.MustCompile(\"\\\\d\" + unit)\n\t\tis := r.FindStringIndex(s)\n\t\tfor len(is) == 2 {\n\t\t\ttotal := is[1] - is[0]\n\t\t\tunitlen := len(unit)\n\t\t\ts = fmt.Sprintf(\"%v%v%v\", s[:is[0]+total-unitlen], palceholder, s[is[1]-unitlen:])\n\t\t\tis = r.FindStringIndex(s)\n\t\t}\n\t}\n\n\treturn s\n}\n<|endoftext|>"} {"text":"<commit_before>package telegram\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\ttgbotapi \"github.com\/go-telegram-bot-api\/telegram-bot-api\/v5\"\n\tjsoniter \"github.com\/json-iterator\/go\"\n\t\"github.com\/kak-tus\/irma_bot\/storage\"\n)\n\nconst usageText = `\nTo enable AntiSpam protection of your group:\n\n1. Add this bot to group.\n2. Grant administrator permissions to bot (this allow bot kick spammers).\n\nBy default bot uses URL protection: if newbie user send URL or forward message - bot kicks user.\nYou can disable or enable this protection by sending to bot:\n\n__IRMA_BOT_NAME__ use_ban_url\n\nor\n\n__IRMA_BOT_NAME__ no_ban_url\n\nAdditionaly, you can add questions protection\nSend message in group, format it like this:\n\n__IRMA_BOT_NAME__\nHello. This group has AntiSpam protection.\nYou must get correct answer to next question in one minute or you will be kicked.\nIn case of incorrect answer you can try join group after one day.\n\nQuestion 1?+Correct answer 1;Incorrect answer 1;Incorrect answer 2\nQuestion 2?+Correct answer 1;+Correct answer 2;Incorrect answer 1\n\nDisable or enable this by\n\n__IRMA_BOT_NAME__ use_ban_question\n\nor\n\n__IRMA_BOT_NAME__ no_ban_question\n\nTo setup wait time before ban user send\n\n__IRMA_BOT_NAME__ set_ban_timeout <timeout in minutes from 1 to 60>\n\nas example\n\n__IRMA_BOT_NAME__ set_ban_timeout 5\n\nhttps:\/\/github.com\/kak-tus\/irma_bot\n`\n\nconst botNameTemplate = \"__IRMA_BOT_NAME__\"\n\nfunc (o *InstanceObj) process(ctx context.Context, msg tgbotapi.Update) error {\n\tif msg.Message != nil {\n\t\treturn o.processMsg(ctx, msg.Message)\n\t} else if msg.CallbackQuery != nil {\n\t\treturn o.processCallback(ctx, msg.CallbackQuery)\n\t}\n\n\treturn nil\n}\n\nfunc (o *InstanceObj) processMsg(ctx context.Context, msg *tgbotapi.Message) error {\n\ttextWithBotName := strings.ReplaceAll(usageText, botNameTemplate, o.cnf.Telegram.BotName)\n\n\tif msg.Chat.IsPrivate() {\n\t\tresp := tgbotapi.NewMessage(msg.Chat.ID, textWithBotName)\n\n\t\t_, err := o.bot.Send(resp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t\/\/ Ban users with extra long names\n\t\/\/ It's probably \"name spammers\"\n\tbanned, err := o.banLongNames(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif banned {\n\t\treturn nil\n\t}\n\n\tbanned, err = o.banKickPool(ctx, msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif banned {\n\t\treturn nil\n\t}\n\n\t\/\/ Special protection from immediately added messages.\n\t\/\/ If user send message and newbie message is not processed yet.\n\t\/\/ Over some time we got this action and delete message\/kick user\n\t\/\/ if it is in kick pool\n\tact := storage.Action{\n\t\tChatID: msg.Chat.ID,\n\t\tType: storage.ActionTypeDelete,\n\t\tMessageID: msg.MessageID,\n\t\tUserID: int(msg.From.ID),\n\t}\n\tif err := o.stor.AddToActionPool(ctx, act, time.Second); err != nil {\n\t\treturn err\n\t}\n\n\tact = storage.Action{\n\t\tChatID: msg.Chat.ID,\n\t\tType: storage.ActionTypeKick,\n\t\tUserID: int(msg.From.ID),\n\t}\n\n\tif err := o.stor.AddToActionPool(ctx, act, time.Second); err != nil {\n\t\treturn err\n\t}\n\n\tcnt, err := o.stor.GetNewbieMessages(ctx, msg.Chat.ID, int(msg.From.ID))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ In case of newbie we got count >0, for ordinary user count=0\n\tif cnt > 0 && cnt <= 4 {\n\t\t\/\/ if cnt > 0 && cnt <= 40 {\n\t\treturn o.messageFromNewbie(ctx, msg)\n\t}\n\n\tif msg.NewChatMembers != nil {\n\t\treturn o.newMembers(ctx, msg)\n\t}\n\n\tname := fmt.Sprintf(\"@%s\", o.cnf.Telegram.BotName)\n\n\tif strings.HasPrefix(msg.Text, name) {\n\t\treturn o.messageToBot(ctx, msg)\n\t}\n\n\treturn nil\n}\n\nfunc (o *InstanceObj) processCallback(ctx context.Context, msg *tgbotapi.CallbackQuery) error {\n\t\/\/ UserID_ChatID_QuestionID_AnswerNum\n\ttkns := strings.Split(msg.Data, \"_\")\n\tif len(tkns) != 4 {\n\t\treturn nil\n\t}\n\n\tuserID, err := strconv.ParseInt(tkns[0], 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tchatID, err := strconv.ParseInt(tkns[1], 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif msg.From.ID != userID || msg.Message.Chat.ID != chatID {\n\t\treturn nil\n\t}\n\n\tgr, err := o.model.Queries.GetGroup(ctx, chatID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tquestionID, err := strconv.Atoi(tkns[2])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tquest := defaultQuestions\n\tif len(gr.Questions) != 0 {\n\t\terr := jsoniter.Unmarshal(gr.Questions, &quest)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif questionID >= len(quest) {\n\t\treturn errors.New(\"Question id from callback greater, then questions count\")\n\t}\n\n\tanswerNum, err := strconv.Atoi(tkns[3])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif answerNum >= len(quest[questionID].Answers) {\n\t\treturn errors.New(\"Answer num from callback greater, then answers count\")\n\t}\n\n\tif err := o.deleteMessage(chatID, msg.Message.MessageID); err != nil {\n\t\treturn err\n\t}\n\n\tif quest[questionID].Answers[answerNum].Correct == 1 {\n\t\terr := o.stor.DelKicked(ctx, chatID, userID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>Cosmetic code changes.<commit_after>package telegram\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\ttgbotapi \"github.com\/go-telegram-bot-api\/telegram-bot-api\/v5\"\n\tjsoniter \"github.com\/json-iterator\/go\"\n\t\"github.com\/kak-tus\/irma_bot\/storage\"\n)\n\nconst usageText = `\nTo enable AntiSpam protection of your group:\n\n1. Add this bot to group.\n2. Grant administrator permissions to bot (this allow bot kick spammers).\n\nBy default bot uses URL protection: if newbie user send URL or forward message - bot kicks user.\nYou can disable or enable this protection by sending to bot:\n\n__IRMA_BOT_NAME__ use_ban_url\n\nor\n\n__IRMA_BOT_NAME__ no_ban_url\n\nAdditionaly, you can add questions protection\nSend message in group, format it like this:\n\n__IRMA_BOT_NAME__\nHello. This group has AntiSpam protection.\nYou must get correct answer to next question in one minute or you will be kicked.\nIn case of incorrect answer you can try join group after one day.\n\nQuestion 1?+Correct answer 1;Incorrect answer 1;Incorrect answer 2\nQuestion 2?+Correct answer 1;+Correct answer 2;Incorrect answer 1\n\nDisable or enable this by\n\n__IRMA_BOT_NAME__ use_ban_question\n\nor\n\n__IRMA_BOT_NAME__ no_ban_question\n\nTo setup wait time before ban user send\n\n__IRMA_BOT_NAME__ set_ban_timeout <timeout in minutes from 1 to 60>\n\nas example\n\n__IRMA_BOT_NAME__ set_ban_timeout 5\n\nhttps:\/\/github.com\/kak-tus\/irma_bot\n`\n\nconst botNameTemplate = \"__IRMA_BOT_NAME__\"\n\nfunc (o *InstanceObj) process(ctx context.Context, msg tgbotapi.Update) error {\n\tif msg.Message != nil {\n\t\treturn o.processMsg(ctx, msg.Message)\n\t} else if msg.CallbackQuery != nil {\n\t\treturn o.processCallback(ctx, msg.CallbackQuery)\n\t}\n\n\treturn nil\n}\n\nfunc (o *InstanceObj) processMsg(ctx context.Context, msg *tgbotapi.Message) error {\n\ttextWithBotName := strings.ReplaceAll(usageText, botNameTemplate, o.cnf.Telegram.BotName)\n\n\tif msg.Chat.IsPrivate() {\n\t\tresp := tgbotapi.NewMessage(msg.Chat.ID, textWithBotName)\n\n\t\t_, err := o.bot.Send(resp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t\/\/ Ban users with extra long names\n\t\/\/ It's probably \"name spammers\"\n\tbanned, err := o.banLongNames(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif banned {\n\t\treturn nil\n\t}\n\n\tbanned, err = o.banKickPool(ctx, msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif banned {\n\t\treturn nil\n\t}\n\n\t\/\/ Special protection from immediately added messages.\n\t\/\/ If user send message and newbie message is not processed yet.\n\t\/\/ Over some time we got this action and delete message\/kick user\n\t\/\/ if it is in kick pool\n\t\/\/ Add all messages from all users\n\t\/\/ This is not good for huge count of messages\n\t\/\/ TODO\n\tact := storage.Action{\n\t\tChatID: msg.Chat.ID,\n\t\tType: storage.ActionTypeDelete,\n\t\tMessageID: msg.MessageID,\n\t\tUserID: int(msg.From.ID),\n\t}\n\tif err := o.stor.AddToActionPool(ctx, act, time.Second); err != nil {\n\t\treturn err\n\t}\n\n\tact = storage.Action{\n\t\tChatID: msg.Chat.ID,\n\t\tType: storage.ActionTypeKick,\n\t\tUserID: int(msg.From.ID),\n\t}\n\n\tif err := o.stor.AddToActionPool(ctx, act, time.Second); err != nil {\n\t\treturn err\n\t}\n\n\tcnt, err := o.stor.GetNewbieMessages(ctx, msg.Chat.ID, int(msg.From.ID))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ In case of newbie we got count >0, for ordinary user count=0\n\tif cnt > 0 && cnt <= 4 {\n\t\treturn o.messageFromNewbie(ctx, msg)\n\t}\n\n\tif msg.NewChatMembers != nil {\n\t\treturn o.newMembers(ctx, msg)\n\t}\n\n\tname := fmt.Sprintf(\"@%s\", o.cnf.Telegram.BotName)\n\n\tif strings.HasPrefix(msg.Text, name) {\n\t\treturn o.messageToBot(ctx, msg)\n\t}\n\n\treturn nil\n}\n\nfunc (o *InstanceObj) processCallback(ctx context.Context, msg *tgbotapi.CallbackQuery) error {\n\t\/\/ UserID_ChatID_QuestionID_AnswerNum\n\ttkns := strings.Split(msg.Data, \"_\")\n\tif len(tkns) != 4 {\n\t\treturn nil\n\t}\n\n\tuserID, err := strconv.ParseInt(tkns[0], 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tchatID, err := strconv.ParseInt(tkns[1], 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif msg.From.ID != userID || msg.Message.Chat.ID != chatID {\n\t\treturn nil\n\t}\n\n\tgr, err := o.model.Queries.GetGroup(ctx, chatID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tquestionID, err := strconv.Atoi(tkns[2])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tquest := defaultQuestions\n\tif len(gr.Questions) != 0 {\n\t\terr := jsoniter.Unmarshal(gr.Questions, &quest)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif questionID >= len(quest) {\n\t\treturn errors.New(\"Question id from callback greater, then questions count\")\n\t}\n\n\tanswerNum, err := strconv.Atoi(tkns[3])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif answerNum >= len(quest[questionID].Answers) {\n\t\treturn errors.New(\"Answer num from callback greater, then answers count\")\n\t}\n\n\tif err := o.deleteMessage(chatID, msg.Message.MessageID); err != nil {\n\t\treturn err\n\t}\n\n\tif quest[questionID].Answers[answerNum].Correct == 1 {\n\t\terr := o.stor.DelKicked(ctx, chatID, userID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package toolbox\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\n\/\/Matcher represents a matcher, that matches input from offset position, it returns number of characters matched.\ntype Matcher interface {\n\t\/\/Match matches input starting from offset, it return number of characters matched\n\tMatch(input string, offset int) (matched int)\n}\n\n\/\/Token a matchable input\ntype Token struct {\n\tToken int\n\tMatched string\n}\n\n\/\/Tokenizer represents a token scanner.\ntype Tokenizer struct {\n\tmatchers map[int]Matcher\n\tInput string\n\tIndex int\n\tInvalidToken int\n\tEndOfFileToken int\n}\n\n\/\/Nexts matches the first of the candidates\nfunc (t *Tokenizer) Nexts(candidates ...int) *Token {\n\tfor _, candidate := range candidates {\n\t\tresult := t.Next(candidate)\n\t\tif result.Token != t.InvalidToken {\n\t\t\treturn result\n\n\t\t}\n\t}\n\treturn &Token{t.InvalidToken, \"\"}\n}\n\n\/\/Next tries to match a candidate, it returns token if imatching is successful.\nfunc (t *Tokenizer) Next(candidate int) *Token {\n\toffset := t.Index\n\tif !(offset < len(t.Input)) {\n\t\treturn &Token{t.EndOfFileToken, \"\"}\n\t}\n\tif matcher, ok := t.matchers[candidate]; ok {\n\t\tmatchedSize := matcher.Match(t.Input, offset)\n\t\tif matchedSize > 0 {\n\t\t\tt.Index = t.Index + matchedSize\n\t\t\treturn &Token{candidate, t.Input[offset : offset+matchedSize]}\n\t\t}\n\n\t} else {\n\t\tpanic(fmt.Sprintf(\"failed to lookup matcher for %v\", candidate))\n\t}\n\treturn &Token{t.InvalidToken, \"\"}\n}\n\n\/\/NewTokenizer creates a new NewTokenizer, it takes input, invalidToken, endOfFileToeken, and matchers.\nfunc NewTokenizer(input string, invalidToken int, endOfFileToken int, matcher map[int]Matcher) *Tokenizer {\n\treturn &Tokenizer{\n\t\tmatchers: matcher,\n\t\tInput: input,\n\t\tIndex: 0,\n\t\tInvalidToken: invalidToken,\n\t\tEndOfFileToken: endOfFileToken,\n\t}\n}\n\n\/\/CharactersMatcher represents a matcher, that matches any of Chars.\ntype CharactersMatcher struct {\n\tChars string \/\/characters to be matched\n}\n\n\/\/Match matches any characters defined in Chars in the input, returns 1 if character has been matched\nfunc (m CharactersMatcher) Match(input string, offset int) (matched int) {\n\tvar result = 0\nouter:\n\tfor i := 0; i < len(input)-offset; i++ {\n\t\taChar := input[offset+i : offset+i+1]\n\t\tfor j := 0; j < len(m.Chars); j++ {\n\t\t\tif aChar == m.Chars[j:j+1] {\n\t\t\t\tresult++\n\t\t\t\tcontinue outer\n\t\t\t}\n\t\t}\n\t\tbreak\n\t}\n\treturn result\n}\n\nfunc isLetter(aChar string) bool {\n\treturn (aChar >= \"a\" && aChar <= \"z\") || (aChar >= \"A\" && aChar <= \"Z\")\n}\n\nfunc isDigit(aChar string) bool {\n\treturn (aChar >= \"0\" && aChar <= \"9\")\n}\n\n\/\/EOFMatcher represents end of input matcher\ntype EOFMatcher struct {\n}\n\n\/\/Match returns 1 if end of input has been reached otherwise 0\nfunc (m EOFMatcher) Match(input string, offset int) (matched int) {\n\tif offset+1 == len(input) {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\n\/\/IntMatcher represents a matcher that finds any int in the input\ntype IntMatcher struct{}\n\n\/\/Match matches a literal in the input, it returns number of character matched.\nfunc (m IntMatcher) Match(input string, offset int) (matched int) {\n\tif !isDigit(input[offset : offset+1]) {\n\t\treturn 0\n\t}\n\tvar i = 1\n\tfor ; i < len(input)-offset; i++ {\n\t\taChar := input[offset+i : offset+i+1]\n\t\tif !isDigit(aChar) {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn i\n}\n\n\/\/LiteralMatcher represents a matcher that finds any literals in the input\ntype LiteralMatcher struct{}\n\n\/\/Match matches a literal in the input, it returns number of character matched.\nfunc (m LiteralMatcher) Match(input string, offset int) (matched int) {\n\tif !isLetter(input[offset : offset+1]) {\n\t\treturn 0\n\t}\n\tvar i = 1\n\tfor ; i < len(input)-offset; i++ {\n\t\taChar := input[offset+i : offset+i+1]\n\t\tif !((isLetter(aChar)) || isDigit(aChar) || aChar == \"_\" || aChar == \".\") {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn i\n}\n\n\/\/LiteralMatcher represents a matcher that finds any literals in the input\ntype IdMatcher struct{}\n\n\/\/Match matches a literal in the input, it returns number of character matched.\nfunc (m IdMatcher) Match(input string, offset int) (matched int) {\n\tif !isLetter(input[offset:offset+1]) && !isDigit(input[offset:offset+1]) {\n\t\treturn 0\n\t}\n\tvar i = 1\n\tfor ; i < len(input)-offset; i++ {\n\t\taChar := input[offset+i : offset+i+1]\n\t\tif !((isLetter(aChar)) || isDigit(aChar) || aChar == \"_\" || aChar == \".\") {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn i\n}\n\n\/\/SequenceMatcher represents a matcher that finds any sequence until find provided terminators\ntype SequenceMatcher struct {\n\tTerminators []string\n\tCaseSensitive bool\n}\n\nfunc (m *SequenceMatcher) hasTerminator(candidate string) bool {\n\tvar candidateLength = len(candidate)\n\tfor _, terminator := range m.Terminators {\n\t\tterminatorLength := len(terminator)\n\t\tif len(terminator) > candidateLength {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !m.CaseSensitive {\n\t\t\tif strings.ToLower(terminator) == strings.ToLower(string(candidate[:terminatorLength])) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\tif terminator == string(candidate[:terminatorLength]) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/Match matches a literal in the input, it returns number of character matched.\nfunc (m *SequenceMatcher) Match(input string, offset int) (matched int) {\n\tvar i = 0\n\tfor ; i < len(input)-offset; i++ {\n\t\tif m.hasTerminator(string(input[offset+i:])) {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn i\n}\n\n\/\/NewSequenceMatcher creates a new matcher that finds any sequence until find provided terminators\nfunc NewSequenceMatcher(terminators ...string) Matcher {\n\treturn &SequenceMatcher{\n\t\tTerminators: terminators,\n\t}\n}\n\n\/\/CustomIdMatcher represents a matcher that finds any literals with additional custom set of characters in the input\ntype customIdMatcher struct {\n\tAllowed map[string]bool\n}\n\nfunc (m *customIdMatcher) isValid(aChar string) bool {\n\tif isLetter(aChar) {\n\t\treturn true\n\t}\n\tif isDigit(aChar) {\n\t\treturn true\n\t}\n\treturn m.Allowed[aChar]\n}\n\n\/\/Match matches a literal in the input, it returns number of character matched.\nfunc (m *customIdMatcher) Match(input string, offset int) (matched int) {\n\n\tif !m.isValid(input[offset : offset+1]) {\n\t\treturn 0\n\t}\n\tvar i = 1\n\tfor ; i < len(input)-offset; i++ {\n\t\taChar := input[offset+i : offset+i+1]\n\t\tif !m.isValid(aChar) {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn i\n}\n\n\/\/NewCustomIdMatcher creates new custom matcher\nfunc NewCustomIdMatcher(allowedChars ...string) Matcher {\n\tvar result = &customIdMatcher{\n\t\tAllowed: make(map[string]bool),\n\t}\n\n\tif len(allowedChars) == 1 && len(allowedChars[0]) > 0 {\n\t\tfor _, allowed := range allowedChars[0] {\n\t\t\tresult.Allowed[string(allowed)] = true\n\t\t}\n\t}\n\tfor _, allowed := range allowedChars {\n\t\tresult.Allowed[allowed] = true\n\t}\n\treturn result\n}\n\n\/\/LiteralMatcher represents a matcher that finds any literals in the input\ntype BodyMatcher struct {\n\tBegin string\n\tEnd string\n}\n\n\/\/Match matches a literal in the input, it returns number of character matched.\nfunc (m *BodyMatcher) Match(input string, offset int) (matched int) {\n\tbeginLen := len(m.Begin)\n\tendLen := len(m.End)\n\tuniEclosed := m.Begin == m.End\n\n\tif offset+beginLen >= len(input) {\n\t\treturn 0\n\t}\n\tif input[offset:offset+beginLen] != m.Begin {\n\t\treturn 0\n\t}\n\n\tvar depth = 1\n\tvar i = 1\n\tfor ; i < len(input)-offset; i++ {\n\n\t\tcanCheckEnd := offset+i+endLen <= len(input)\n\t\tif !canCheckEnd {\n\t\t\treturn 0\n\t\t}\n\t\tif !uniEclosed {\n\t\t\tcanCheckBegin := offset+i+beginLen <= len(input)\n\t\t\tif canCheckBegin {\n\t\t\t\tif string(input[offset+i:offset+i+beginLen]) == m.Begin {\n\t\t\t\t\tdepth++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif string(input[offset+i:offset+i+endLen]) == m.End {\n\t\t\tdepth--\n\t\t}\n\t\tif depth == 0 {\n\t\t\ti += endLen\n\t\t\tbreak\n\t\t}\n\t}\n\treturn i\n}\n\n\/\/KeywordMatcher represents a keyword matcher\ntype KeywordMatcher struct {\n\tKeyword string\n\tCaseSensitive bool\n}\n\n\/\/Match matches keyword in the input, it returns number of character matched.\nfunc (m KeywordMatcher) Match(input string, offset int) (matched int) {\n\tif !(offset+len(m.Keyword)-1 < len(input)) {\n\t\treturn 0\n\t}\n\n\tif m.CaseSensitive {\n\t\tif input[offset:offset+len(m.Keyword)] == m.Keyword {\n\t\t\treturn len(m.Keyword)\n\t\t}\n\t} else {\n\t\tif strings.ToLower(input[offset:offset+len(m.Keyword)]) == strings.ToLower(m.Keyword) {\n\t\t\treturn len(m.Keyword)\n\t\t}\n\t}\n\treturn 0\n}\n\n\/\/KeywordsMatcher represents a matcher that finds any of specified keywords in the input\ntype KeywordsMatcher struct {\n\tKeywords []string\n\tCaseSensitive bool\n}\n\n\/\/Match matches any specified keyword, it returns number of character matched.\nfunc (m KeywordsMatcher) Match(input string, offset int) (matched int) {\n\tfor _, keyword := range m.Keywords {\n\t\tif len(input)-offset < len(keyword) {\n\t\t\tcontinue\n\t\t}\n\t\tif m.CaseSensitive {\n\t\t\tif input[offset:offset+len(keyword)] == keyword {\n\t\t\t\treturn len(keyword)\n\t\t\t}\n\t\t} else {\n\t\t\tif strings.ToLower(input[offset:offset+len(keyword)]) == strings.ToLower(keyword) {\n\t\t\t\treturn len(keyword)\n\t\t\t}\n\t\t}\n\t}\n\treturn 0\n}\n<commit_msg>added helpers<commit_after>package toolbox\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\n\/\/Matcher represents a matcher, that matches input from offset position, it returns number of characters matched.\ntype Matcher interface {\n\t\/\/Match matches input starting from offset, it return number of characters matched\n\tMatch(input string, offset int) (matched int)\n}\n\n\/\/Token a matchable input\ntype Token struct {\n\tToken int\n\tMatched string\n}\n\n\/\/Tokenizer represents a token scanner.\ntype Tokenizer struct {\n\tmatchers map[int]Matcher\n\tInput string\n\tIndex int\n\tInvalidToken int\n\tEndOfFileToken int\n}\n\n\/\/Nexts matches the first of the candidates\nfunc (t *Tokenizer) Nexts(candidates ...int) *Token {\n\tfor _, candidate := range candidates {\n\t\tresult := t.Next(candidate)\n\t\tif result.Token != t.InvalidToken {\n\t\t\treturn result\n\n\t\t}\n\t}\n\treturn &Token{t.InvalidToken, \"\"}\n}\n\n\/\/Next tries to match a candidate, it returns token if imatching is successful.\nfunc (t *Tokenizer) Next(candidate int) *Token {\n\toffset := t.Index\n\tif !(offset < len(t.Input)) {\n\t\treturn &Token{t.EndOfFileToken, \"\"}\n\t}\n\n\tif candidate == t.EndOfFileToken {\n\t\treturn &Token{t.InvalidToken, \"\"}\n\t}\n\tif matcher, ok := t.matchers[candidate]; ok {\n\t\tmatchedSize := matcher.Match(t.Input, offset)\n\t\tif matchedSize > 0 {\n\t\t\tt.Index = t.Index + matchedSize\n\t\t\treturn &Token{candidate, t.Input[offset: offset+matchedSize]}\n\t\t}\n\n\t} else {\n\t\tpanic(fmt.Sprintf(\"failed to lookup matcher for %v\", candidate))\n\t}\n\treturn &Token{t.InvalidToken, \"\"}\n}\n\n\/\/NewTokenizer creates a new NewTokenizer, it takes input, invalidToken, endOfFileToeken, and matchers.\nfunc NewTokenizer(input string, invalidToken int, endOfFileToken int, matcher map[int]Matcher) *Tokenizer {\n\treturn &Tokenizer{\n\t\tmatchers: matcher,\n\t\tInput: input,\n\t\tIndex: 0,\n\t\tInvalidToken: invalidToken,\n\t\tEndOfFileToken: endOfFileToken,\n\t}\n}\n\n\/\/CharactersMatcher represents a matcher, that matches any of Chars.\ntype CharactersMatcher struct {\n\tChars string \/\/characters to be matched\n}\n\n\/\/Match matches any characters defined in Chars in the input, returns 1 if character has been matched\nfunc (m CharactersMatcher) Match(input string, offset int) (matched int) {\n\tvar result = 0\nouter:\n\tfor i := 0; i < len(input)-offset; i++ {\n\t\taChar := input[offset+i: offset+i+1]\n\t\tfor j := 0; j < len(m.Chars); j++ {\n\t\t\tif aChar == m.Chars[j:j+1] {\n\t\t\t\tresult++\n\t\t\t\tcontinue outer\n\t\t\t}\n\t\t}\n\t\tbreak\n\t}\n\treturn result\n}\n\nfunc isLetter(aChar string) bool {\n\treturn (aChar >= \"a\" && aChar <= \"z\") || (aChar >= \"A\" && aChar <= \"Z\")\n}\n\nfunc isDigit(aChar string) bool {\n\treturn (aChar >= \"0\" && aChar <= \"9\")\n}\n\n\/\/EOFMatcher represents end of input matcher\ntype EOFMatcher struct {\n}\n\n\/\/Match returns 1 if end of input has been reached otherwise 0\nfunc (m EOFMatcher) Match(input string, offset int) (matched int) {\n\tif offset+1 == len(input) {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\n\/\/IntMatcher represents a matcher that finds any int in the input\ntype IntMatcher struct{}\n\n\/\/Match matches a literal in the input, it returns number of character matched.\nfunc (m IntMatcher) Match(input string, offset int) (matched int) {\n\tif !isDigit(input[offset: offset+1]) {\n\t\treturn 0\n\t}\n\tvar i = 1\n\tfor ; i < len(input)-offset; i++ {\n\t\taChar := input[offset+i: offset+i+1]\n\t\tif !isDigit(aChar) {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn i\n}\n\n\/\/NewIntMatcher returns a new integer matcher\nfunc NewIntMatcher() Matcher {\n\treturn &IntMatcher{}\n}\n\n\/\/LiteralMatcher represents a matcher that finds any literals in the input\ntype LiteralMatcher struct{}\n\n\/\/Match matches a literal in the input, it returns number of character matched.\nfunc (m LiteralMatcher) Match(input string, offset int) (matched int) {\n\tif !isLetter(input[offset: offset+1]) {\n\t\treturn 0\n\t}\n\tvar i = 1\n\tfor ; i < len(input)-offset; i++ {\n\t\taChar := input[offset+i: offset+i+1]\n\t\tif !((isLetter(aChar)) || isDigit(aChar) || aChar == \"_\" || aChar == \".\") {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn i\n}\n\n\/\/LiteralMatcher represents a matcher that finds any literals in the input\ntype IdMatcher struct{}\n\n\/\/Match matches a literal in the input, it returns number of character matched.\nfunc (m IdMatcher) Match(input string, offset int) (matched int) {\n\tif !isLetter(input[offset:offset+1]) && !isDigit(input[offset:offset+1]) {\n\t\treturn 0\n\t}\n\tvar i = 1\n\tfor ; i < len(input)-offset; i++ {\n\t\taChar := input[offset+i: offset+i+1]\n\t\tif !((isLetter(aChar)) || isDigit(aChar) || aChar == \"_\" || aChar == \".\") {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn i\n}\n\n\/\/SequenceMatcher represents a matcher that finds any sequence until find provided terminators\ntype SequenceMatcher struct {\n\tTerminators []string\n\tCaseSensitive bool\n}\n\nfunc (m *SequenceMatcher) hasTerminator(candidate string) bool {\n\tvar candidateLength = len(candidate)\n\tfor _, terminator := range m.Terminators {\n\t\tterminatorLength := len(terminator)\n\t\tif len(terminator) > candidateLength {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !m.CaseSensitive {\n\t\t\tif strings.ToLower(terminator) == strings.ToLower(string(candidate[:terminatorLength])) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\tif terminator == string(candidate[:terminatorLength]) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/Match matches a literal in the input, it returns number of character matched.\nfunc (m *SequenceMatcher) Match(input string, offset int) (matched int) {\n\tvar i = 0\n\tfor ; i < len(input)-offset; i++ {\n\t\tif m.hasTerminator(string(input[offset+i:])) {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn i\n}\n\n\/\/NewSequenceMatcher creates a new matcher that finds any sequence until find provided terminators\nfunc NewSequenceMatcher(terminators ...string) Matcher {\n\treturn &SequenceMatcher{\n\t\tTerminators: terminators,\n\t}\n}\n\n\n\n\/\/remainingSequenceMatcher represents a matcher that matches all reamining input\ntype remainingSequenceMatcher struct {}\n\n\/\/Match matches a literal in the input, it returns number of character matched.\nfunc (m *remainingSequenceMatcher) Match(input string, offset int) (matched int) {\n\treturn len(input)-offset\n}\n\n\n\/\/Creates a matcher that matches all remaining input\nfunc NewRemainingSequenceMatcher() Matcher {\n\treturn &remainingSequenceMatcher{}\n}\n\n\/\/CustomIdMatcher represents a matcher that finds any literals with additional custom set of characters in the input\ntype customIdMatcher struct {\n\tAllowed map[string]bool\n}\n\nfunc (m *customIdMatcher) isValid(aChar string) bool {\n\tif isLetter(aChar) {\n\t\treturn true\n\t}\n\tif isDigit(aChar) {\n\t\treturn true\n\t}\n\treturn m.Allowed[aChar]\n}\n\n\/\/Match matches a literal in the input, it returns number of character matched.\nfunc (m *customIdMatcher) Match(input string, offset int) (matched int) {\n\n\tif !m.isValid(input[offset: offset+1]) {\n\t\treturn 0\n\t}\n\tvar i = 1\n\tfor ; i < len(input)-offset; i++ {\n\t\taChar := input[offset+i: offset+i+1]\n\t\tif !m.isValid(aChar) {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn i\n}\n\n\/\/NewCustomIdMatcher creates new custom matcher\nfunc NewCustomIdMatcher(allowedChars ...string) Matcher {\n\tvar result = &customIdMatcher{\n\t\tAllowed: make(map[string]bool),\n\t}\n\n\tif len(allowedChars) == 1 && len(allowedChars[0]) > 0 {\n\t\tfor _, allowed := range allowedChars[0] {\n\t\t\tresult.Allowed[string(allowed)] = true\n\t\t}\n\t}\n\tfor _, allowed := range allowedChars {\n\t\tresult.Allowed[allowed] = true\n\t}\n\treturn result\n}\n\n\/\/LiteralMatcher represents a matcher that finds any literals in the input\ntype BodyMatcher struct {\n\tBegin string\n\tEnd string\n}\n\n\/\/Match matches a literal in the input, it returns number of character matched.\nfunc (m *BodyMatcher) Match(input string, offset int) (matched int) {\n\tbeginLen := len(m.Begin)\n\tendLen := len(m.End)\n\tuniEclosed := m.Begin == m.End\n\n\tif offset+beginLen >= len(input) {\n\t\treturn 0\n\t}\n\tif input[offset:offset+beginLen] != m.Begin {\n\t\treturn 0\n\t}\n\n\tvar depth = 1\n\tvar i = 1\n\tfor ; i < len(input)-offset; i++ {\n\n\t\tcanCheckEnd := offset+i+endLen <= len(input)\n\t\tif !canCheckEnd {\n\t\t\treturn 0\n\t\t}\n\t\tif !uniEclosed {\n\t\t\tcanCheckBegin := offset+i+beginLen <= len(input)\n\t\t\tif canCheckBegin {\n\t\t\t\tif string(input[offset+i:offset+i+beginLen]) == m.Begin {\n\t\t\t\t\tdepth++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif string(input[offset+i:offset+i+endLen]) == m.End {\n\t\t\tdepth--\n\t\t}\n\t\tif depth == 0 {\n\t\t\ti += endLen\n\t\t\tbreak\n\t\t}\n\t}\n\treturn i\n}\n\n\/\/KeywordMatcher represents a keyword matcher\ntype KeywordMatcher struct {\n\tKeyword string\n\tCaseSensitive bool\n}\n\n\/\/Match matches keyword in the input, it returns number of character matched.\nfunc (m KeywordMatcher) Match(input string, offset int) (matched int) {\n\tif !(offset+len(m.Keyword)-1 < len(input)) {\n\t\treturn 0\n\t}\n\n\tif m.CaseSensitive {\n\t\tif input[offset:offset+len(m.Keyword)] == m.Keyword {\n\t\t\treturn len(m.Keyword)\n\t\t}\n\t} else {\n\t\tif strings.ToLower(input[offset:offset+len(m.Keyword)]) == strings.ToLower(m.Keyword) {\n\t\t\treturn len(m.Keyword)\n\t\t}\n\t}\n\treturn 0\n}\n\n\/\/KeywordsMatcher represents a matcher that finds any of specified keywords in the input\ntype KeywordsMatcher struct {\n\tKeywords []string\n\tCaseSensitive bool\n}\n\n\/\/Match matches any specified keyword, it returns number of character matched.\nfunc (m KeywordsMatcher) Match(input string, offset int) (matched int) {\n\tfor _, keyword := range m.Keywords {\n\t\tif len(input)-offset < len(keyword) {\n\t\t\tcontinue\n\t\t}\n\t\tif m.CaseSensitive {\n\t\t\tif input[offset:offset+len(keyword)] == keyword {\n\t\t\t\treturn len(keyword)\n\t\t\t}\n\t\t} else {\n\t\t\tif strings.ToLower(input[offset:offset+len(keyword)]) == strings.ToLower(keyword) {\n\t\t\t\treturn len(keyword)\n\t\t\t}\n\t\t}\n\t}\n\treturn 0\n}\n\n\/\/NewKeywordsMatcher returns a matcher for supplied keywords\nfunc NewKeywordsMatcher(caseSensitive bool, keywords ... string) Matcher {\n\treturn &KeywordsMatcher{CaseSensitive: caseSensitive, Keywords: keywords}\n}\n\n\/\/IllegalTokenError represents illegal token error\ntype IllegalTokenError struct {\n\tIllegal *Token\n\tMessage string\n\tExpected []int\n\tPosition int\n}\n\nfunc (e *IllegalTokenError) Error() string {\n\treturn fmt.Sprintf(\"%v; illegal token at %v [%v], expected %v, but had: %v\", e.Message, e.Position, e.Illegal.Matched, e.Expected, e.Illegal.Token)\n}\n\n\/\/NewIllegalTokenError create a new illegal token error\nfunc NewIllegalTokenError(message string, expected []int, position int, found *Token) error {\n\treturn &IllegalTokenError{\n\t\tMessage: message,\n\t\tIllegal: found,\n\t\tExpected: expected,\n\t\tPosition: position,\n\t}\n}\n\n\/\/ExpectTokenOptionallyFollowedBy returns second matched token or error if first and second group was not matched\nfunc ExpectTokenOptionallyFollowedBy(tokenizer *Tokenizer, first int, errorMessage string, second ... int) (*Token, error) {\n\t_, _ = ExpectToken(tokenizer, \"\", first)\n\treturn ExpectToken(tokenizer, errorMessage, second...)\n}\n\n\/\/ExpectToken returns the matched token or error\nfunc ExpectToken(tokenizer *Tokenizer, errorMessage string, candidates ... int) (*Token, error) {\n\ttoken := tokenizer.Nexts(candidates...)\n\thasMatch := HasSliceAnyElements(candidates, token.Token)\n\tif ! hasMatch {\n\t\treturn nil, NewIllegalTokenError(errorMessage, candidates, tokenizer.Index, token)\n\t}\n\treturn token, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package parser_test\n\nimport (\n\t\"github.com\/centurylinkcloud\/clc-go-cli\/parser\"\n\t\"reflect\"\n\t\"testing\"\n)\n\ntype parserTestParam struct {\n\tinput []string\n\terr string\n\tres map[string]interface{}\n}\n\nvar testCases = []parserTestParam{\n\t{input: []string{}, res: map[string]interface{}{}},\n\t{\n\t\tinput: []string{`{\"P1\":\"val1\",\"P2\":[1,2],\"P3\":true,\"P4\":{\"P41\":\"val41\",\"P42\":[\"str\"]}}`},\n\t\tres: map[string]interface{}{\n\t\t\t\"P1\": \"val1\",\n\t\t\t\"P2\": []interface{}{1., 2.},\n\t\t\t\"P3\": true,\n\t\t\t\"P4\": map[string]interface{}{\"P41\": \"val41\", \"P42\": []interface{}{\"str\"}},\n\t\t},\n\t},\n\t\/\/ Parses root values without keys from JSON.\n\t{input: []string{`{\"some-key\": \"value\"}`}, res: map[string]interface{}{\"SomeKey\": \"value\"}},\n\t\/\/ Does not allow duplicate keys.\n\t{input: []string{`{\"some-key\": \"value2\"}`, `{\"some-key\": \"value1\"}`}, err: \"Option 'SomeKey' is specified twice.\"},\n\t\/\/ Parses --some-key=value.\n\t{input: []string{\"--some-key\", \"value\"}, res: map[string]interface{}{\"SomeKey\": \"value\"}},\n\t\/\/ Does not allow duplicate keys.\n\t{input: []string{\"--some-key\", \"value1\", \"--some-key\", \"value2\"}, err: \"Option 'SomeKey' is specified twice.\"},\n\t{input: []string{`{\"some-key\": \"value\"}`, \"--some-key\", \"value2\"}, err: \"Option 'SomeKey' is specified twice.\"},\n\t\/\/ Does not parse root values not in JSON format.\n\t{input: []string{\"value\", \"value2\"}, err: \"Invalid JSON: value.\"},\n\t\/\/ Parses keys without values.\n\t{input: []string{\"--some-key\"}, res: map[string]interface{}{\"SomeKey\": nil}},\n\t\/\/ Does not parse key values from JSON or key1=value1,key2=value2,.. notation.\n\t{input: []string{\"--some-key\", `{\"key\": \"value\"}`}, res: map[string]interface{}{\"SomeKey\": `{\"key\": \"value\"}`}},\n\t{input: []string{\"--some-key\", \"p1-key=10,p2-key=true,p3=',=!@=$ ,%^ &\\\"%<,.=\\\"'\"}, res: map[string]interface{}{\n\t\t\"SomeKey\": \"p1-key=10,p2-key=true,p3=',=!@=$ ,%^ &\\\"%<,.=\\\"'\",\n\t}},\n\t\/\/ Parses --key element1 element2 element3.\n\t{input: []string{\"--some-key\", \"value1\", \"value2\", `{\"value1\":[1,2,3]}`, \"a=b\"}, res: map[string]interface{}{\n\t\t\"SomeKey\": []interface{}{\"value1\", \"value2\", `{\"value1\":[1,2,3]}`, \"a=b\"},\n\t}},\n\t\/\/ Parses --key element1 element2 --another-key.\n\t{input: []string{\"--some-key\", `{\"key\":\"value\"}`, \"value2\", \"--another-key\"}, res: map[string]interface{}{\n\t\t\"SomeKey\": []interface{}{`{\"key\":\"value\"}`, \"value2\"}, \"AnotherKey\": nil,\n\t}},\n\t\/\/ Fails with -- argument.\n\t{input: []string{\"--\"}, err: \"-- is an invalid argument.\"},\n\t\/\/ Parses nested JSON objects and arrays properly.\n\t{input: []string{`{\"k1\":{\"k2\":{\"k3\":[1,2,3]}}}`}, res: map[string]interface{}{\n\t\t\"K1\": map[string]interface{}{\n\t\t\t\"K2\": map[string]interface{}{\n\t\t\t\t\"K3\": []interface{}{1., 2., 3.},\n\t\t\t},\n\t\t},\n\t}},\n\t\/\/ Parses a complex case.\n\t{\n\t\tinput: []string{`{\"a\":{\"b\":\"c\"}}`, \"--some-long-key\", \"--another-key\", `{\"a\":\"b\"}`, \"a=b?,c=d\", \"--yet-another-key\"},\n\t\tres: map[string]interface{}{\n\t\t\t\"A\": map[string]interface{}{\n\t\t\t\t\"B\": \"c\",\n\t\t\t},\n\t\t\t\"SomeLongKey\": nil,\n\t\t\t\"AnotherKey\": []interface{}{`{\"a\":\"b\"}`, \"a=b?,c=d\"},\n\t\t\t\"YetAnotherKey\": nil,\n\t\t},\n\t},\n}\n\nfunc TestArgumentParser(t *testing.T) {\n\tfor i, testCase := range testCases {\n\t\tt.Logf(\"Executing %d test case.\", i+1)\n\t\tres, err := parser.ParseArguments(testCase.input)\n\t\tif testCase.err != \"\" && err.Error() != testCase.err {\n\t\t\tt.Errorf(\"Invalid error. Expected: %s, obtained %s\", testCase.err, err.Error())\n\t\t}\n\t\tif testCase.res != nil && !reflect.DeepEqual(testCase.res, res) {\n\t\t\tt.Errorf(\"Invalid result. expected %#v, obtained %#v\", testCase.res, res)\n\t\t}\n\t}\n}\n<commit_msg>add possibility to skip argument parser tests<commit_after>package parser_test\n\nimport (\n\t\"github.com\/centurylinkcloud\/clc-go-cli\/parser\"\n\t\"reflect\"\n\t\"testing\"\n)\n\ntype parserTestParam struct {\n\tinput []string\n\terr string\n\tres map[string]interface{}\n\tskip bool\n}\n\nvar testCases = []parserTestParam{\n\t{input: []string{}, res: map[string]interface{}{}},\n\t{\n\t\tinput: []string{`{\"P1\":\"val1\",\"P2\":[1,2],\"P3\":true,\"P4\":{\"P41\":\"val41\",\"P42\":[\"str\"]}}`},\n\t\tres: map[string]interface{}{\n\t\t\t\"P1\": \"val1\",\n\t\t\t\"P2\": []interface{}{1., 2.},\n\t\t\t\"P3\": true,\n\t\t\t\"P4\": map[string]interface{}{\"P41\": \"val41\", \"P42\": []interface{}{\"str\"}},\n\t\t},\n\t},\n\t\/\/ Parses root values without keys from JSON.\n\t{input: []string{`{\"some-key\": \"value\"}`}, res: map[string]interface{}{\"SomeKey\": \"value\"}},\n\t\/\/ Does not allow duplicate keys.\n\t{input: []string{`{\"some-key\": \"value2\"}`, `{\"some-key\": \"value1\"}`}, err: \"Option 'SomeKey' is specified twice.\"},\n\t\/\/ Parses --some-key=value.\n\t{input: []string{\"--some-key\", \"value\"}, res: map[string]interface{}{\"SomeKey\": \"value\"}},\n\t\/\/ Does not allow duplicate keys.\n\t{input: []string{\"--some-key\", \"value1\", \"--some-key\", \"value2\"}, err: \"Option 'SomeKey' is specified twice.\"},\n\t{input: []string{`{\"some-key\": \"value\"}`, \"--some-key\", \"value2\"}, err: \"Option 'SomeKey' is specified twice.\"},\n\t\/\/ Does not parse root values not in JSON format.\n\t{input: []string{\"value\", \"value2\"}, err: \"Invalid JSON: value.\"},\n\t\/\/ Parses keys without values.\n\t{input: []string{\"--some-key\"}, res: map[string]interface{}{\"SomeKey\": nil}},\n\t\/\/ Does not parse key values from JSON or key1=value1,key2=value2,.. notation.\n\t{input: []string{\"--some-key\", `{\"key\": \"value\"}`}, res: map[string]interface{}{\"SomeKey\": `{\"key\": \"value\"}`}},\n\t{input: []string{\"--some-key\", \"p1-key=10,p2-key=true,p3=',=!@=$ ,%^ &\\\"%<,.=\\\"'\"}, res: map[string]interface{}{\n\t\t\"SomeKey\": \"p1-key=10,p2-key=true,p3=',=!@=$ ,%^ &\\\"%<,.=\\\"'\",\n\t}},\n\t\/\/ Parses --key element1 element2 element3.\n\t{input: []string{\"--some-key\", \"value1\", \"value2\", `{\"value1\":[1,2,3]}`, \"a=b\"}, res: map[string]interface{}{\n\t\t\"SomeKey\": []interface{}{\"value1\", \"value2\", `{\"value1\":[1,2,3]}`, \"a=b\"},\n\t}},\n\t\/\/ Parses --key element1 element2 --another-key.\n\t{input: []string{\"--some-key\", `{\"key\":\"value\"}`, \"value2\", \"--another-key\"}, res: map[string]interface{}{\n\t\t\"SomeKey\": []interface{}{`{\"key\":\"value\"}`, \"value2\"}, \"AnotherKey\": nil,\n\t}},\n\t\/\/ Fails with -- argument.\n\t{input: []string{\"--\"}, err: \"-- is an invalid argument.\"},\n\t\/\/ Parses nested JSON objects and arrays properly.\n\t{input: []string{`{\"k1\":{\"k2\":{\"k3\":[1,2,3]}}}`}, res: map[string]interface{}{\n\t\t\"K1\": map[string]interface{}{\n\t\t\t\"K2\": map[string]interface{}{\n\t\t\t\t\"K3\": []interface{}{1., 2., 3.},\n\t\t\t},\n\t\t},\n\t}},\n\t\/\/ Parses a complex case.\n\t{\n\t\tinput: []string{`{\"a\":{\"b\":\"c\"}}`, \"--some-long-key\", \"--another-key\", `{\"a\":\"b\"}`, \"a=b?,c=d\", \"--yet-another-key\"},\n\t\tres: map[string]interface{}{\n\t\t\t\"A\": map[string]interface{}{\n\t\t\t\t\"B\": \"c\",\n\t\t\t},\n\t\t\t\"SomeLongKey\": nil,\n\t\t\t\"AnotherKey\": []interface{}{`{\"a\":\"b\"}`, \"a=b?,c=d\"},\n\t\t\t\"YetAnotherKey\": nil,\n\t\t},\n\t},\n}\n\nfunc TestArgumentParser(t *testing.T) {\n\tfor i, testCase := range testCases {\n\t\tif testCase.skip {\n\t\t\tt.Logf(\"Skipping %d test case.\", i+1)\n\t\t\tcontinue\n\t\t}\n\t\tt.Logf(\"Executing %d test case.\", i+1)\n\t\tres, err := parser.ParseArguments(testCase.input)\n\t\tif testCase.err != \"\" && err.Error() != testCase.err {\n\t\t\tt.Errorf(\"Invalid error. Expected: %s, obtained %s\", testCase.err, err.Error())\n\t\t}\n\t\tif testCase.res != nil && !reflect.DeepEqual(testCase.res, res) {\n\t\t\tt.Errorf(\"Invalid result. expected %#v, obtained %#v\", testCase.res, res)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\tdocker \"github.com\/fsouza\/go-dockerclient\"\n)\n\nvar pools map[string]*Pool\n\ntype Pool struct {\n\tConfig *Config\n\tClient *docker.Client\n\tContainers map[string]int\n\tImage string\n\tCapacity int\n\tsync.Mutex\n}\n\nfunc findImage(client *docker.Client, image string) (*docker.APIImages, error) {\n\timages, err := client.ListImages(docker.ListImagesOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, img := range images {\n\t\tfor _, t := range img.RepoTags {\n\t\t\tif t == image {\n\t\t\t\treturn &img, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"invalid image:\", image)\n}\n\nfunc NewPool(config *Config, client *docker.Client, image string, capacity int) (*Pool, error) {\n\t_, err := findImage(client, image)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpool := &Pool{\n\t\tConfig: config,\n\t\tClient: client,\n\t\tContainers: map[string]int{},\n\t\tImage: image,\n\t\tCapacity: capacity,\n\t}\n\n\treturn pool, nil\n}\n\nfunc (pool *Pool) Load() error {\n\tpool.Lock()\n\tdefer pool.Unlock()\n\n\tcontainers, err := pool.Client.ListContainers(docker.ListContainersOptions{All: true})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, c := range containers {\n\t\tif c.Image == pool.Image {\n\t\t\tpool.Containers[c.ID] = 0\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (pool *Pool) Add() error {\n\tid, _ := randomHex(20)\n\tvolumePath := fmt.Sprintf(\"%s\/%s\", pool.Config.SharedPath, id)\n\n\topts := docker.CreateContainerOptions{\n\t\tName: fmt.Sprintf(\"reserved-%v\", time.Now().UnixNano()),\n\t\tHostConfig: &docker.HostConfig{\n\t\t\tBinds: []string{\n\t\t\t\tvolumePath + \":\/code\",\n\t\t\t\tvolumePath + \":\/tmp\",\n\t\t\t},\n\t\t\tReadonlyRootfs: true,\n\t\t\tMemory: pool.Config.MemoryLimit,\n\t\t\tMemorySwap: 0,\n\t\t},\n\t\tConfig: &docker.Config{\n\t\t\tHostname: \"bitrun\",\n\t\t\tImage: pool.Image,\n\t\t\tAttachStdout: true,\n\t\t\tAttachStderr: true,\n\t\t\tAttachStdin: false,\n\t\t\tOpenStdin: false,\n\t\t\tTty: true,\n\t\t\tNetworkDisabled: pool.Config.NetworkDisabled,\n\t\t\tWorkingDir: \"\/code\",\n\t\t\tCmd: []string{\"sleep\", \"86400\"},\n\t\t},\n\t}\n\n\tcontainer, err := pool.Client.CreateContainer(opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = pool.Client.StartContainer(container.ID, container.HostConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpool.Lock()\n\tdefer pool.Unlock()\n\n\tpool.Containers[container.ID] = 0\n\treturn nil\n}\n\nfunc (pool *Pool) Fill() {\n\tnum := pool.Capacity - len(pool.Containers)\n\n\t\/\/ Pool is full\n\tif num == 0 {\n\t\treturn\n\t}\n\n\tlog.Printf(\"adding %v containers to %v pool\\n\", num, pool.Image)\n\n\tfor i := 0; i < num; i++ {\n\t\terr := pool.Add()\n\t\tif err != nil {\n\t\t\tlog.Println(\"error while adding to pool:\", err)\n\t\t}\n\t}\n}\n\nfunc (pool *Pool) Monitor() {\n\tfor {\n\t\tpool.Fill()\n\t\ttime.Sleep(time.Second * 3)\n\t}\n}\n\nfunc (pool *Pool) Get() (string, error) {\n\tpool.Lock()\n\tdefer pool.Unlock()\n\n\tid := \"\"\n\n\tfor k, v := range pool.Containers {\n\t\tif v == 0 {\n\t\t\tid = k\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif id != \"\" {\n\t\tdelete(pool.Containers, id)\n\t\treturn id, nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"no contaienrs are available\")\n}\n\nfunc RunPool(config *Config, client *docker.Client) {\n\tfor _, cfg := range config.Pools {\n\t\tlog.Println(\"initializing pool for:\", cfg.Image)\n\n\t\tpool, err := NewPool(config, client, cfg.Image, cfg.Capacity)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\terr = pool.Load()\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\tgo pool.Monitor()\n\t\tpools[cfg.Image] = pool\n\t}\n}\n<commit_msg>Properly initialze map<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\tdocker \"github.com\/fsouza\/go-dockerclient\"\n)\n\nvar pools map[string]*Pool\n\ntype Pool struct {\n\tConfig *Config\n\tClient *docker.Client\n\tContainers map[string]int\n\tImage string\n\tCapacity int\n\tsync.Mutex\n}\n\nfunc findImage(client *docker.Client, image string) (*docker.APIImages, error) {\n\timages, err := client.ListImages(docker.ListImagesOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, img := range images {\n\t\tfor _, t := range img.RepoTags {\n\t\t\tif t == image {\n\t\t\t\treturn &img, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"invalid image:\", image)\n}\n\nfunc NewPool(config *Config, client *docker.Client, image string, capacity int) (*Pool, error) {\n\t_, err := findImage(client, image)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpool := &Pool{\n\t\tConfig: config,\n\t\tClient: client,\n\t\tContainers: map[string]int{},\n\t\tImage: image,\n\t\tCapacity: capacity,\n\t}\n\n\treturn pool, nil\n}\n\nfunc (pool *Pool) Load() error {\n\tpool.Lock()\n\tdefer pool.Unlock()\n\n\tcontainers, err := pool.Client.ListContainers(docker.ListContainersOptions{All: true})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, c := range containers {\n\t\tif c.Image == pool.Image {\n\t\t\tpool.Containers[c.ID] = 0\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (pool *Pool) Add() error {\n\tid, _ := randomHex(20)\n\tvolumePath := fmt.Sprintf(\"%s\/%s\", pool.Config.SharedPath, id)\n\n\topts := docker.CreateContainerOptions{\n\t\tName: fmt.Sprintf(\"reserved-%v\", time.Now().UnixNano()),\n\t\tHostConfig: &docker.HostConfig{\n\t\t\tBinds: []string{\n\t\t\t\tvolumePath + \":\/code\",\n\t\t\t\tvolumePath + \":\/tmp\",\n\t\t\t},\n\t\t\tReadonlyRootfs: true,\n\t\t\tMemory: pool.Config.MemoryLimit,\n\t\t\tMemorySwap: 0,\n\t\t},\n\t\tConfig: &docker.Config{\n\t\t\tHostname: \"bitrun\",\n\t\t\tImage: pool.Image,\n\t\t\tAttachStdout: true,\n\t\t\tAttachStderr: true,\n\t\t\tAttachStdin: false,\n\t\t\tOpenStdin: false,\n\t\t\tTty: true,\n\t\t\tNetworkDisabled: pool.Config.NetworkDisabled,\n\t\t\tWorkingDir: \"\/code\",\n\t\t\tCmd: []string{\"sleep\", \"86400\"},\n\t\t},\n\t}\n\n\tcontainer, err := pool.Client.CreateContainer(opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = pool.Client.StartContainer(container.ID, container.HostConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpool.Lock()\n\tdefer pool.Unlock()\n\n\tpool.Containers[container.ID] = 0\n\treturn nil\n}\n\nfunc (pool *Pool) Fill() {\n\tnum := pool.Capacity - len(pool.Containers)\n\n\t\/\/ Pool is full\n\tif num == 0 {\n\t\treturn\n\t}\n\n\tlog.Printf(\"adding %v containers to %v pool\\n\", num, pool.Image)\n\n\tfor i := 0; i < num; i++ {\n\t\terr := pool.Add()\n\t\tif err != nil {\n\t\t\tlog.Println(\"error while adding to pool:\", err)\n\t\t}\n\t}\n}\n\nfunc (pool *Pool) Monitor() {\n\tfor {\n\t\tpool.Fill()\n\t\ttime.Sleep(time.Second * 3)\n\t}\n}\n\nfunc (pool *Pool) Get() (string, error) {\n\tpool.Lock()\n\tdefer pool.Unlock()\n\n\tid := \"\"\n\n\tfor k, v := range pool.Containers {\n\t\tif v == 0 {\n\t\t\tid = k\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif id != \"\" {\n\t\tdelete(pool.Containers, id)\n\t\treturn id, nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"no contaienrs are available\")\n}\n\nfunc RunPool(config *Config, client *docker.Client) {\n\tpools = make(map[string]*Pool)\n\n\tfor _, cfg := range config.Pools {\n\t\tlog.Println(\"initializing pool for:\", cfg.Image)\n\n\t\tpool, err := NewPool(config, client, cfg.Image, cfg.Capacity)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\terr = pool.Load()\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\tgo pool.Monitor()\n\t\tpools[cfg.Image] = pool\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ Network transport layer\n\ntype NetworkTransport struct {\n\tprotocol string\n\tport int\n\tserviceName string\n\tlistenAddr string \/\/ Local listen address\n\treceiveBufferLen int\n\t\/\/ Handlers\n\t_onMessage func(*TransportConnectionMeta, []byte)\n\t_onConnect func(*TransportConnectionMeta, string)\n\t\/\/ Connections\n\tconnections map[string]*TransportConnection\n\tconnectionsMux sync.RWMutex\n\t\/\/ Is connecting\n\tisConnectingMux sync.RWMutex\n\tisConnecting map[string]bool\n}\n\n\/\/ Listen\nfunc (this *NetworkTransport) listen() {\n\tlog.Infof(\"Starting %s on port %s\/%d\", this.serviceName, strings.ToUpper(this.protocol), this.port)\n\tswitch this.protocol {\n\tcase \"tcp\":\n\t\tthis._listenTcp()\n\t\tbreak\n\tcase \"udp\":\n\t\tthis._listenUdp()\n\t\tbreak\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Protocol %s not supported\", this.protocol))\n\t}\n}\n\n\/\/ Listen TCP\nfunc (this *NetworkTransport) _listenTcp() {\n\tln, err := net.Listen(this.protocol, fmt.Sprintf(\":%d\", this.port))\n\tthis.listenAddr = ln.Addr().String()\n\tif err != nil {\n\t\tpanicErr(err)\n\t}\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\t\/\/ handle error\n\t\t}\n\t\tgo this.handleConnection(conn)\n\t}\n}\n\n\/\/ Listen TCP\nfunc (this *NetworkTransport) _listenUdp() {\n\t\/\/ Prepare address\n\tserverAddr, err := net.ResolveUDPAddr(this.protocol, fmt.Sprintf(\":%d\", this.port))\n\tpanicErr(err)\n\n\tln, err := net.ListenUDP(this.protocol, serverAddr)\n\tthis.listenAddr = ln.LocalAddr().String()\n\tif err != nil {\n\t\tpanicErr(err)\n\t}\n\tfor {\n\t\ttbuf := make([]byte, this.receiveBufferLen)\n\t\tn, addr, err := ln.ReadFromUDP(tbuf)\n\t\tlog.Infof(\"Received \", string(tbuf[0:n]), \" from \", addr)\n\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"UDP receive error: %s\", err)\n\t\t} else {\n\t\t\t\/\/ Read message\n\t\t\tthis._onMessage(newTransportConnectionMeta(ln.RemoteAddr().String()), tbuf[0:n])\n\t\t}\n\t}\n}\n\n\/\/ Handle connection\nfunc (this *NetworkTransport) handleConnection(conn net.Conn) {\n\t\/\/ Read bytes\n\ttbuf := make([]byte, this.receiveBufferLen)\n\n\tfor {\n\t\tn, err := conn.Read(tbuf)\n\t\t\/\/ Was there an error in reading ?\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tlog.Printf(\"Read error: %s\", err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Read message\n\t\tthis._onMessage(newTransportConnectionMeta(conn.RemoteAddr().String()), tbuf[0:n])\n\t}\n}\n\n\/\/ Discover a single seed\nfunc (this *NetworkTransport) _connect(node string) {\n\t\/\/ Are we already waiting for this?\n\tthis.isConnectingMux.Lock()\n\tif this.isConnecting[node] {\n\t\tthis.isConnectingMux.Unlock()\n\t\tlog.Infof(\"Ignore connect, we are aleady connecting to %s\", node)\n\t\treturn\n\t}\n\tthis.isConnecting[node] = true\n\tthis.isConnectingMux.Unlock()\n\n\t\/\/ Are we already connected?\n\tthis.connectionsMux.RLock()\n\tif this.connections[node] != nil {\n\t\tthis.connectionsMux.RUnlock()\n\t\tlog.Infof(\"Ignore connect, we are aleady connected to %s\", node)\n\t\treturn\n\t}\n\tthis.connectionsMux.RUnlock()\n\n\t\/\/ Do not connect to ourselves\n\tlistenSplit := strings.Split(this.listenAddr, \":\")\n\tif listenSplit[0] == node {\n\t\tlog.Infof(\"Not connecting to address (%s) on which are are listening ourselves\", listenSplit[0])\n\t\treturn\n\t}\n\n\t\/\/ Start contacting node\n\tlog.Infof(\"Contacting node %s for %s\", node, this.serviceName)\n\tvar conn net.Conn\n\tvar err error\n\tfor {\n\t\tconn, err = net.Dial(this.protocol, fmt.Sprintf(\"%s:%d\", node, this.port))\n\t\tif err != nil {\n\t\t\t\/\/ handle error\n\t\t\tlog.Errorf(\"Failed to contact node %s for %s: %s\", node, this.serviceName, err)\n\n\t\t\t\/\/ Close connection\n\t\t\tif conn != nil {\n\t\t\t\tconn.Close()\n\t\t\t}\n\n\t\t\t\/\/ Wait a bit\n\t\t\ttime.Sleep(1 * time.Second)\n\n\t\t\t\/\/ Try again\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Infof(\"Established connection with node %s for %s\", node, this.serviceName)\n\t\tbreak\n\t}\n\n\t\/\/ Keep connection\n\tthis.connectionsMux.Lock()\n\tthis.connections[node] = newTransportConnection(node, &conn)\n\tthis.connectionsMux.Unlock()\n\n\t\/\/ Done connecting\n\tthis.isConnectingMux.Lock()\n\tdelete(this.isConnecting, node)\n\tthis.isConnectingMux.Unlock()\n\n\t\/\/ Send HELLO message\n\tif this._onConnect != nil {\n\t\tthis._onConnect(newTransportConnectionMeta(conn.RemoteAddr().String()), node)\n\t}\n}\n\n\/\/ Send message\nfunc (this *NetworkTransport) _send(node string, b []byte) error {\n\tthis.connectionsMux.Lock()\n\tdefer this.connectionsMux.Unlock()\n\tif this.connections[node] == nil {\n\t\t\/\/ No connection\n\t\terrorMsg := fmt.Sprintf(\"No connection found to %s for %s\", node, this.serviceName)\n\t\tlog.Warn(errorMsg)\n\n\t\t\/\/ We will try to open one for next time\n\t\tgo this._connect(node)\n\n\t\t\/\/ Return error\n\t\treturn errors.New(errorMsg)\n\t}\n\tvar connection *TransportConnection = this.connections[node]\n\tvar conn net.Conn = *connection.conn\n\t_, err := conn.Write(b)\n\tif err != nil {\n\t\tlog.Warnf(\"Failed to write %d %s bytes to %s: %s\", len(b), this.serviceName, node, err)\n\n\t\t\/\/ Reset connection\n\t\tdelete(this.connections, node)\n\t} else {\n\t\tlog.Infof(\"Written %d %s bytes to %s\", len(b), this.serviceName, node)\n\t}\n\treturn err\n}\n\n\/\/ Start\nfunc (this *NetworkTransport) start() {\n\t\/\/ Validate transport\n\tif this.protocol == \"tcp\" && this._onConnect == nil {\n\t\tpanic(\"No on-connect defined for TCP\")\n\t}\n\tif this._onMessage == nil {\n\t\tpanic(\"No on-message defined\")\n\t}\n\n\t\/\/ Start server\n\tgo this.listen()\n}\n\n\/\/ New NetworkTransport service\nfunc newNetworkTransport(protocol string, serviceName string, port int, receiveBufferLen int) *NetworkTransport {\n\tg := &NetworkTransport{\n\t\tprotocol: protocol,\n\t\tport: port,\n\t\tserviceName: serviceName,\n\t\tconnections: make(map[string]*TransportConnection),\n\t\tisConnecting: make(map[string]bool),\n\t\treceiveBufferLen: receiveBufferLen,\n\t}\n\n\treturn g\n}\n<commit_msg>Resolve data race, fixes #3<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ Network transport layer\n\ntype NetworkTransport struct {\n\t\/\/ Generic mutex\n\tmux sync.RWMutex\n\t\/\/ Config\n\tprotocol string\n\tport int\n\tserviceName string\n\tlistenAddr string \/\/ Local listen address\n\treceiveBufferLen int\n\t\/\/ Handlers\n\t_onMessage func(*TransportConnectionMeta, []byte)\n\t_onConnect func(*TransportConnectionMeta, string)\n\t\/\/ Connections\n\tconnections map[string]*TransportConnection\n\tconnectionsMux sync.RWMutex\n\t\/\/ Is connecting\n\tisConnectingMux sync.RWMutex\n\tisConnecting map[string]bool\n}\n\n\/\/ Listen\nfunc (this *NetworkTransport) listen() {\n\tlog.Infof(\"Starting %s on port %s\/%d\", this.serviceName, strings.ToUpper(this.protocol), this.port)\n\tswitch this.protocol {\n\tcase \"tcp\":\n\t\tthis._listenTcp()\n\t\tbreak\n\tcase \"udp\":\n\t\tthis._listenUdp()\n\t\tbreak\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Protocol %s not supported\", this.protocol))\n\t}\n}\n\n\/\/ Listen TCP\nfunc (this *NetworkTransport) _listenTcp() {\n\tln, err := net.Listen(this.protocol, fmt.Sprintf(\":%d\", this.port))\n\tthis.mux.Lock()\n\tthis.listenAddr = ln.Addr().String()\n\tthis.mux.Unlock()\n\tif err != nil {\n\t\tpanicErr(err)\n\t}\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\t\/\/ handle error\n\t\t}\n\t\tgo this.handleConnection(conn)\n\t}\n}\n\n\/\/ Listen TCP\nfunc (this *NetworkTransport) _listenUdp() {\n\t\/\/ Prepare address\n\tserverAddr, err := net.ResolveUDPAddr(this.protocol, fmt.Sprintf(\":%d\", this.port))\n\tpanicErr(err)\n\n\tln, err := net.ListenUDP(this.protocol, serverAddr)\n\tthis.mux.Lock()\n\tthis.listenAddr = ln.LocalAddr().String()\n\tthis.mux.Unlock()\n\tif err != nil {\n\t\tpanicErr(err)\n\t}\n\tfor {\n\t\ttbuf := make([]byte, this.receiveBufferLen)\n\t\tn, addr, err := ln.ReadFromUDP(tbuf)\n\t\tlog.Infof(\"Received \", string(tbuf[0:n]), \" from \", addr)\n\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"UDP receive error: %s\", err)\n\t\t} else {\n\t\t\t\/\/ Read message\n\t\t\tthis._onMessage(newTransportConnectionMeta(ln.RemoteAddr().String()), tbuf[0:n])\n\t\t}\n\t}\n}\n\n\/\/ Handle connection\nfunc (this *NetworkTransport) handleConnection(conn net.Conn) {\n\t\/\/ Read bytes\n\ttbuf := make([]byte, this.receiveBufferLen)\n\n\tfor {\n\t\tn, err := conn.Read(tbuf)\n\t\t\/\/ Was there an error in reading ?\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tlog.Printf(\"Read error: %s\", err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Read message\n\t\tthis._onMessage(newTransportConnectionMeta(conn.RemoteAddr().String()), tbuf[0:n])\n\t}\n}\n\n\/\/ Discover a single seed\nfunc (this *NetworkTransport) _connect(node string) {\n\t\/\/ Are we already waiting for this?\n\tthis.isConnectingMux.Lock()\n\tif this.isConnecting[node] {\n\t\tthis.isConnectingMux.Unlock()\n\t\tlog.Infof(\"Ignore connect, we are aleady connecting to %s\", node)\n\t\treturn\n\t}\n\tthis.isConnecting[node] = true\n\tthis.isConnectingMux.Unlock()\n\n\t\/\/ Are we already connected?\n\tthis.connectionsMux.RLock()\n\tif this.connections[node] != nil {\n\t\tthis.connectionsMux.RUnlock()\n\t\tlog.Infof(\"Ignore connect, we are aleady connected to %s\", node)\n\t\treturn\n\t}\n\tthis.connectionsMux.RUnlock()\n\n\t\/\/ Do not connect to ourselves\n\tthis.mux.RLock()\n\tlistenSplit := strings.Split(this.listenAddr, \":\")\n\tthis.mux.RUnlock()\n\tif listenSplit[0] == node {\n\t\tlog.Infof(\"Not connecting to address (%s) on which are are listening ourselves\", listenSplit[0])\n\t\treturn\n\t}\n\n\t\/\/ Start contacting node\n\tlog.Infof(\"Contacting node %s for %s\", node, this.serviceName)\n\tvar conn net.Conn\n\tvar err error\n\tfor {\n\t\tconn, err = net.Dial(this.protocol, fmt.Sprintf(\"%s:%d\", node, this.port))\n\t\tif err != nil {\n\t\t\t\/\/ handle error\n\t\t\tlog.Errorf(\"Failed to contact node %s for %s: %s\", node, this.serviceName, err)\n\n\t\t\t\/\/ Close connection\n\t\t\tif conn != nil {\n\t\t\t\tconn.Close()\n\t\t\t}\n\n\t\t\t\/\/ Wait a bit\n\t\t\ttime.Sleep(1 * time.Second)\n\n\t\t\t\/\/ Try again\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Infof(\"Established connection with node %s for %s\", node, this.serviceName)\n\t\tbreak\n\t}\n\n\t\/\/ Keep connection\n\tthis.connectionsMux.Lock()\n\tthis.connections[node] = newTransportConnection(node, &conn)\n\tthis.connectionsMux.Unlock()\n\n\t\/\/ Done connecting\n\tthis.isConnectingMux.Lock()\n\tdelete(this.isConnecting, node)\n\tthis.isConnectingMux.Unlock()\n\n\t\/\/ Send HELLO message\n\tif this._onConnect != nil {\n\t\tthis._onConnect(newTransportConnectionMeta(conn.RemoteAddr().String()), node)\n\t}\n}\n\n\/\/ Send message\nfunc (this *NetworkTransport) _send(node string, b []byte) error {\n\tthis.connectionsMux.Lock()\n\tdefer this.connectionsMux.Unlock()\n\tif this.connections[node] == nil {\n\t\t\/\/ No connection\n\t\terrorMsg := fmt.Sprintf(\"No connection found to %s for %s\", node, this.serviceName)\n\t\tlog.Warn(errorMsg)\n\n\t\t\/\/ We will try to open one for next time\n\t\tgo this._connect(node)\n\n\t\t\/\/ Return error\n\t\treturn errors.New(errorMsg)\n\t}\n\tvar connection *TransportConnection = this.connections[node]\n\tvar conn net.Conn = *connection.conn\n\t_, err := conn.Write(b)\n\tif err != nil {\n\t\tlog.Warnf(\"Failed to write %d %s bytes to %s: %s\", len(b), this.serviceName, node, err)\n\n\t\t\/\/ Reset connection\n\t\tdelete(this.connections, node)\n\t} else {\n\t\tlog.Infof(\"Written %d %s bytes to %s\", len(b), this.serviceName, node)\n\t}\n\treturn err\n}\n\n\/\/ Start\nfunc (this *NetworkTransport) start() {\n\t\/\/ Validate transport\n\tif this.protocol == \"tcp\" && this._onConnect == nil {\n\t\tpanic(\"No on-connect defined for TCP\")\n\t}\n\tif this._onMessage == nil {\n\t\tpanic(\"No on-message defined\")\n\t}\n\n\t\/\/ Start server\n\tgo this.listen()\n}\n\n\/\/ New NetworkTransport service\nfunc newNetworkTransport(protocol string, serviceName string, port int, receiveBufferLen int) *NetworkTransport {\n\tg := &NetworkTransport{\n\t\tprotocol: protocol,\n\t\tport: port,\n\t\tserviceName: serviceName,\n\t\tconnections: make(map[string]*TransportConnection),\n\t\tisConnecting: make(map[string]bool),\n\t\treceiveBufferLen: receiveBufferLen,\n\t}\n\n\treturn g\n}\n<|endoftext|>"} {"text":"<commit_before>package fourchan\n\nimport (\n\t\"fmt\"\n)\n\nconst (\n\tBoardsURL = \"a.4cdn.org\"\n\tImagesURL = \"i.4cdn.org\"\n\tThumbsURL = \"t.4cdn.org\"\n)\n\ntype Post struct {\n\tThread *Thread\n\tData *PostData\n}\n\ntype PostData struct {\n\tPostNumber int `json:\"no\"`\n\tResto int `json:\"resto\"`\n\tSticky int `json:\"sticky\"`\n\tClosed int `json:\"closed\"`\n\tArchived int `json:\"archived\"`\n\tNow string `json:\"now\"`\n\tTime int `json:\"time\"`\n\tName string `json:\"tripcode\"`\n\tTripcode string `json:\"trip\"`\n\tId string `json:\"id\"`\n\tCapcode string `json:\"capcode\"`\n\tCountry string `json:\"country\"`\n\tCountryName string `json:\"country_name\"`\n\tSubject string `json:\"sub\"`\n\tComment string `json:\"com\"`\n\tTim int `json:\"time\"`\n\tFilename string `json:filename`\n\tExtension string `json:\"extension\"`\n\tFilesize int `json:\"fsize\"`\n\tMd5 string `json:\"md5\"`\n\tImageWidth int `json:\"w\"`\n\tImageHeight int `json:\"h\"`\n\tThumbnailWidth int `json:\"tn_w\"`\n\tThumbnailHeight int `json:\"tn_h\"`\n\tFileDeleted int `json:\"filedeleted\"`\n\tSpoiler int `json:\"spoiler\"`\n\tCustomSpoiler int `json:\"custom_spoiler\"`\n\tOmittedPosts int `json:\"omitted_posts\"`\n\tOmittedImages int `json:\"omitted_images\"`\n\tReplies int `json:\"replies\"`\n\tImages int `json:\"images\"`\n\tBumpLimit int `json:\"bumplimit\"`\n\tImageLimit int `json:\"imagelimit\"`\n\tCapcodeReplies []int `json:\"capcode_replies\"`\n\tLastModified int `json:\"last_modified\"`\n\tTag string `json:\"tag\"`\n\tSemanticURL string `json:\"semantic_url\"`\n\tLastReplies []PostData `json:\"last_replies\"`\n}\n\n\/\/ Post struct has structure for JSON unmarshaller.\n\/\/ See the 4chan API at https:\/\/github.com\/4chan\/4chan-API for me details.\ntype PostsInfo struct {\n\tPosts []PostData `json:\"posts\"`\n}\n\nfunc NewPost(t *Thread, data *PostData) *Post {\n\treturn &Post{\n\t\tThread: t,\n\t\tData: data,\n\t}\n}\n\n\/\/ Returns the URL of the attached file.\n\/\/ Returns an empty string if there is no attached file.\nfunc (p *Post) FileURL() string {\n\tif !p.HasFile() {\n\t\treturn \"\"\n\t}\n\n\tboard := p.Thread.Board\n\tvar protocol string\n\tif board.Https {\n\t\tprotocol = \"https\"\n\t} else {\n\t\tprotocol = \"http\"\n\t}\n\n\treturn fmt.Sprintf(\"%s:\/\/%s\/%s\/src\/%d%s\", protocol, ImagesURL, board.Name, p.Data.Tim, p.Data.Extension)\n}\n\n\/\/ Returns true if the Post has an attached file.\nfunc (p *Post) HasFile() bool {\n\treturn p.Data.Filename != \"\"\n}\n\n\/\/ Returns the URL of the attached thumbnail.\n\/\/ Returns an empty string if there is no attached file.\nfunc (p *Post) ThumbnailURL() string {\n\tif !p.HasFile() {\n\t\treturn \"\"\n\t}\n\n\tboard := p.Thread.Board\n\tvar protocol string\n\tif board.Https {\n\t\tprotocol = \"https\"\n\t} else {\n\t\tprotocol = \"http\"\n\t}\n\n\treturn fmt.Sprintf(\"%s:\/\/%s\/%s\/thumb\/%ds.jpg\", protocol, ThumbsURL, board.Name, p.Data.Tim)\n}\n<commit_msg>Fixed typo in json parsing key.<commit_after>package fourchan\n\nimport (\n\t\"fmt\"\n)\n\nconst (\n\tBoardsURL = \"a.4cdn.org\"\n\tImagesURL = \"i.4cdn.org\"\n\tThumbsURL = \"t.4cdn.org\"\n)\n\ntype Post struct {\n\tThread *Thread\n\tData *PostData\n}\n\ntype PostData struct {\n\tPostNumber int `json:\"no\"`\n\tResto int `json:\"resto\"`\n\tSticky int `json:\"sticky\"`\n\tClosed int `json:\"closed\"`\n\tArchived int `json:\"archived\"`\n\tNow string `json:\"now\"`\n\tTime int `json:\"time\"`\n\tName string `json:\"tripcode\"`\n\tTripcode string `json:\"trip\"`\n\tId string `json:\"id\"`\n\tCapcode string `json:\"capcode\"`\n\tCountry string `json:\"country\"`\n\tCountryName string `json:\"country_name\"`\n\tSubject string `json:\"sub\"`\n\tComment string `json:\"com\"`\n\tTim int `json:\"tim\"`\n\tFilename string `json:\"filename\"`\n\tExtension string `json:\"extension\"`\n\tFilesize int `json:\"fsize\"`\n\tMd5 string `json:\"md5\"`\n\tImageWidth int `json:\"w\"`\n\tImageHeight int `json:\"h\"`\n\tThumbnailWidth int `json:\"tn_w\"`\n\tThumbnailHeight int `json:\"tn_h\"`\n\tFileDeleted int `json:\"filedeleted\"`\n\tSpoiler int `json:\"spoiler\"`\n\tCustomSpoiler int `json:\"custom_spoiler\"`\n\tOmittedPosts int `json:\"omitted_posts\"`\n\tOmittedImages int `json:\"omitted_images\"`\n\tReplies int `json:\"replies\"`\n\tImages int `json:\"images\"`\n\tBumpLimit int `json:\"bumplimit\"`\n\tImageLimit int `json:\"imagelimit\"`\n\tCapcodeReplies []int `json:\"capcode_replies\"`\n\tLastModified int `json:\"last_modified\"`\n\tTag string `json:\"tag\"`\n\tSemanticURL string `json:\"semantic_url\"`\n\tLastReplies []PostData `json:\"last_replies\"`\n}\n\n\/\/ Post struct has structure for JSON unmarshaller.\n\/\/ See the 4chan API at https:\/\/github.com\/4chan\/4chan-API for me details.\ntype PostsInfo struct {\n\tPosts []PostData `json:\"posts\"`\n}\n\nfunc NewPost(t *Thread, data *PostData) *Post {\n\treturn &Post{\n\t\tThread: t,\n\t\tData: data,\n\t}\n}\n\n\/\/ Returns the URL of the attached file.\n\/\/ Returns an empty string if there is no attached file.\nfunc (p *Post) FileURL() string {\n\tif !p.HasFile() {\n\t\treturn \"\"\n\t}\n\n\tboard := p.Thread.Board\n\tvar protocol string\n\tif board.Https {\n\t\tprotocol = \"https\"\n\t} else {\n\t\tprotocol = \"http\"\n\t}\n\n\treturn fmt.Sprintf(\"%s:\/\/%s\/%s\/src\/%d%s\", protocol, ImagesURL, board.Name, p.Data.Tim, p.Data.Extension)\n}\n\n\/\/ Returns true if the Post has an attached file.\nfunc (p *Post) HasFile() bool {\n\treturn p.Data.Filename != \"\"\n}\n\n\/\/ Returns the URL of the attached thumbnail.\n\/\/ Returns an empty string if there is no attached file.\nfunc (p *Post) ThumbnailURL() string {\n\tif !p.HasFile() {\n\t\treturn \"\"\n\t}\n\n\tboard := p.Thread.Board\n\tvar protocol string\n\tif board.Https {\n\t\tprotocol = \"https\"\n\t} else {\n\t\tprotocol = \"http\"\n\t}\n\n\treturn fmt.Sprintf(\"%s:\/\/%s\/%s\/thumb\/%ds.jpg\", protocol, ThumbsURL, board.Name, p.Data.Tim)\n}\n<|endoftext|>"} {"text":"<commit_before>package ebuf\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n)\n\nfunc TestUndo(t *testing.T) {\n\tb := New()\n\tb.SetBytes([]byte(\"foo\"))\n\tb.Undo()\n\tif !bytes.Equal(b.Bytes(), []byte{}) {\n\t\tt.Fatal()\n\t}\n\tb.Redo()\n\tif !bytes.Equal(b.Bytes(), []byte(\"foo\")) {\n\t\tt.Fatal()\n\t}\n\tb.Undo()\n\tif !bytes.Equal(b.Bytes(), []byte{}) {\n\t\tt.Fatal()\n\t}\n\tb.Undo()\n\tif !bytes.Equal(b.Bytes(), []byte{}) {\n\t\tt.Fatal()\n\t}\n\tb.Redo()\n\tif !bytes.Equal(b.Bytes(), []byte(\"foo\")) {\n\t\tt.Fatal()\n\t}\n\tb.Redo()\n\tif !bytes.Equal(b.Bytes(), []byte(\"foo\")) {\n\t\tt.Fatal()\n\t}\n}\n<commit_msg>add Redo test<commit_after>package ebuf\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n)\n\nfunc TestUndo(t *testing.T) {\n\tb := New()\n\tb.SetBytes([]byte(\"foo\"))\n\tb.Undo()\n\tif !bytes.Equal(b.Bytes(), []byte{}) {\n\t\tt.Fatal()\n\t}\n\tb.Redo()\n\tif !bytes.Equal(b.Bytes(), []byte(\"foo\")) {\n\t\tt.Fatal()\n\t}\n\tb.Undo()\n\tif !bytes.Equal(b.Bytes(), []byte{}) {\n\t\tt.Fatal()\n\t}\n\tb.Undo()\n\tif !bytes.Equal(b.Bytes(), []byte{}) {\n\t\tt.Fatal()\n\t}\n\tb.Redo()\n\tif !bytes.Equal(b.Bytes(), []byte(\"foo\")) {\n\t\tt.Fatal()\n\t}\n\tb.Redo()\n\tif !bytes.Equal(b.Bytes(), []byte(\"foo\")) {\n\t\tt.Fatal()\n\t}\n}\n\nfunc TestRedo(t *testing.T) {\n\tb := New()\n\tb.SetBytes([]byte(\"foo\"))\n\tb.Undo()\n\tb.Redo()\n\tif !bytes.Equal(b.Bytes(), []byte(\"foo\")) {\n\t\tt.Fatal()\n\t}\n\tb.Undo()\n\tb.SetBytes([]byte(\"bar\")) \/\/ this should clear the redo states\n\tb.Redo()\n\tif !bytes.Equal(b.Bytes(), []byte(\"bar\")) {\n\t\tt.Fatal()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package extension\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/kayex\/sirius\"\n\t\"golang.org\/x\/net\/context\"\n\t\"googlemaps.github.io\/maps\"\n)\n\ntype Geocode struct {\n\tAPIKey string\n}\n\nfunc (x *Geocode) Run(m sirius.Message, cfg sirius.ExtensionConfig) (sirius.MessageAction, error) {\n\tcmd, match := m.Command(\"geocode\")\n\n\tif !match {\n\t\treturn sirius.NoAction(), nil\n\t}\n\n\tc, err := maps.NewClient(maps.WithAPIKey(x.APIKey))\n\n\tif err != nil {\n\t\treturn nil, errors.New(fmt.Sprintf(\"Maps client error: %v\", err.Error()))\n\t}\n\n\tr := &maps.GeocodingRequest{\n\t\tAddress: cmd.Args[0],\n\t}\n\n\tres, err := c.Geocode(context.Background(), r)\n\n\tif err != nil {\n\t\treturn nil, errors.New(fmt.Sprintf(\"Geocoding error: %v\", err.Error()))\n\t}\n\n\tpos := res[0]\n\tlocation := pos.Geometry.Location\n\tformatted := fmt.Sprintf(\"*%v*\\n`(%.6f, %.6f)`\", pos.FormattedAddress, location.Lat, location.Lng)\n\n\tedit := m.EditText().ReplaceWith(formatted)\n\n\treturn edit, nil\n}\n<commit_msg>Remove custom error messages<commit_after>package extension\n\nimport (\n\t\"fmt\"\n\t\"github.com\/kayex\/sirius\"\n\t\"golang.org\/x\/net\/context\"\n\t\"googlemaps.github.io\/maps\"\n)\n\ntype Geocode struct {\n\tAPIKey string\n}\n\nfunc (x *Geocode) Run(m sirius.Message, cfg sirius.ExtensionConfig) (sirius.MessageAction, error) {\n\tcmd, match := m.Command(\"geocode\")\n\n\tif !match {\n\t\treturn sirius.NoAction(), nil\n\t}\n\n\tc, err := maps.NewClient(maps.WithAPIKey(x.APIKey))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := &maps.GeocodingRequest{\n\t\tAddress: cmd.Args[0],\n\t}\n\n\tres, err := c.Geocode(context.Background(), r)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpos := res[0]\n\tlocation := pos.Geometry.Location\n\tformatted := fmt.Sprintf(\"*%v*\\n`(%.6f, %.6f)`\", pos.FormattedAddress, location.Lat, location.Lng)\n\n\tedit := m.EditText().ReplaceWith(formatted)\n\n\treturn edit, nil\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Fix args<commit_after><|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"net\/http\"\n\n\t. \"gopkg.in\/check.v1\"\n\n\t\"github.com\/hoffie\/larasync\/repository\"\n)\n\ntype NIBPutTest struct {\n\tNIBItemTest\n}\n\nvar _ = Suite(&NIBPutTest{getNIBItemTest()})\n\nfunc (t *NIBPutTest) SetUpTest(c *C) {\n\tt.NIBItemTest.SetUpTest(c)\n\tt.httpMethod = \"PUT\"\n\tt.req = t.requestWithBytes(c, t.signNIBBytes(c, t.getTestNIBBytes()))\n\tt.createRepository(c)\n}\n\nfunc (t *NIBPutTest) TestUnauthorized(c *C) {\n\tresp := t.getResponse(t.req)\n\tc.Assert(resp.Code, Equals, http.StatusUnauthorized)\n}\n\nfunc (t *NIBPutTest) TestRepositoryNotExisting(c *C) {\n\tt.repositoryName = \"does-not-exist\"\n\tt.req = t.requestEmptyBody(c)\n\tt.signRequest()\n\tresp := t.getResponse(t.req)\n\tc.Assert(resp.Code, Equals, http.StatusUnauthorized)\n}\n\nfunc (t *NIBPutTest) TestPutMalformedSignature(c *C) {\n\tdata := t.getTestNIBBytes()\n\tdata = t.signNIBBytes(c, data)\n\tdata[len(data)-1] = 0\n\tt.req = t.requestWithBytes(c, data)\n\tt.signRequest()\n\n\tresp := t.getResponse(t.req)\n\n\tc.Assert(resp.Code, Equals, http.StatusUnauthorized)\n\trepo := t.getRepository(c)\n\tc.Assert(repo.HasNIB(t.nibID), Equals, false)\n}\n\nfunc (t *NIBPutTest) TestPutMalformedData(c *C) {\n\tdata := t.getTestNIBBytes()\n\tdata[0] = 0\n\tdata = t.signNIBBytes(c, data)\n\tt.req = t.requestWithBytes(c, data)\n\tt.signRequest()\n\n\tresp := t.getResponse(t.req)\n\n\tc.Assert(resp.Code, Equals, http.StatusBadRequest)\n\trepo := t.getRepository(c)\n\tc.Assert(repo.HasNIB(t.nibID), Equals, false)\n}\n\nfunc (t *NIBPutTest) TestPutNew(c *C) {\n\tt.signRequest()\n\tresp := t.getResponse(t.req)\n\tc.Assert(resp.Code, Equals, http.StatusCreated)\n}\n\nfunc (t *NIBPutTest) TestPutNewStore(c *C) {\n\tt.signRequest()\n\tt.getResponse(t.req)\n\tr := t.getRepository(c)\n\n\tnib, err := r.GetNIB(t.nibID)\n\tc.Assert(err, IsNil)\n\tc.Assert(nib.ID, Equals, t.nibID)\n}\n\nfunc (t *NIBPutTest) TestPutNewPrecondition(c *C) {\n\tt.addTestNIB(c)\n\theader := t.req.Header\n\trep := t.getRepository(c)\n\ttransaction, err := rep.CurrentTransaction()\n\tc.Assert(err, IsNil)\n\theader.Set(\"If-Match\", transaction.IDString())\n\tt.signRequest()\n\n\tresp := t.getResponse(t.req)\n\tc.Assert(resp.Code, Equals, http.StatusOK)\n}\n\nfunc (t *NIBPutTest) TestPutNewPreconditionFailed(c *C) {\n\tt.addTestNIB(c)\n\theader := t.req.Header\n\trep := t.getRepository(c)\n\ttransaction, err := rep.CurrentTransaction()\n\tc.Assert(err, IsNil)\n\theader.Set(\"If-Match\", transaction.PreviousIDString())\n\tt.signRequest()\n\n\tresp := t.getResponse(t.req)\n\tc.Assert(resp.Code, Equals, http.StatusPreconditionFailed)\n}\n\nfunc (t *NIBPutTest) changeNIBForPut(c *C, nib *repository.NIB) *repository.NIB {\n\trevision := generateTestRevision()\n\trevision.MetadataID = \"other-metadata\"\n\trevision.DeviceID = \"other-id\"\n\n\tnib.AppendRevision(revision)\n\treturn nib\n}\n\nfunc (t *NIBPutTest) requestWithNib(c *C, nib *repository.NIB) *http.Request {\n\tsignedData := t.signNIBBytes(\n\t\tc,\n\t\tt.nibToBytes(nib),\n\t)\n\treturn t.requestWithBytes(c, signedData)\n}\n\nfunc (t *NIBPutTest) TestPutUpdate(c *C) {\n\tnib := t.addTestNIB(c)\n\tt.req = t.requestWithNib(c, nib)\n\tt.signRequest()\n\n\tresp := t.getResponse(t.req)\n\tc.Assert(resp.Code, Equals, http.StatusOK)\n}\n\nfunc (t *NIBPutTest) TestPutChanged(c *C) {\n\tnib := t.addTestNIB(c)\n\tnib = t.changeNIBForPut(c, nib)\n\tt.req = t.requestWithNib(c, nib)\n\tt.signRequest()\n\n\tt.getResponse(t.req)\n\trepo := t.getRepository(c)\n\trepoNib, err := repo.GetNIB(nib.ID)\n\tc.Assert(err, IsNil)\n\n\trevisions := repoNib.Revisions\n\tc.Assert(len(revisions), Equals, 2)\n\n\tc.Assert(revisions[1].DeviceID, Equals, \"other-id\")\n}\n<commit_msg>api: test: guarantee a bad signature when checking for it<commit_after>package api\n\nimport (\n\t\"net\/http\"\n\n\t. \"gopkg.in\/check.v1\"\n\n\t\"github.com\/hoffie\/larasync\/repository\"\n)\n\ntype NIBPutTest struct {\n\tNIBItemTest\n}\n\nvar _ = Suite(&NIBPutTest{getNIBItemTest()})\n\nfunc (t *NIBPutTest) SetUpTest(c *C) {\n\tt.NIBItemTest.SetUpTest(c)\n\tt.httpMethod = \"PUT\"\n\tt.req = t.requestWithBytes(c, t.signNIBBytes(c, t.getTestNIBBytes()))\n\tt.createRepository(c)\n}\n\nfunc (t *NIBPutTest) TestUnauthorized(c *C) {\n\tresp := t.getResponse(t.req)\n\tc.Assert(resp.Code, Equals, http.StatusUnauthorized)\n}\n\nfunc (t *NIBPutTest) TestRepositoryNotExisting(c *C) {\n\tt.repositoryName = \"does-not-exist\"\n\tt.req = t.requestEmptyBody(c)\n\tt.signRequest()\n\tresp := t.getResponse(t.req)\n\tc.Assert(resp.Code, Equals, http.StatusUnauthorized)\n}\n\nfunc (t *NIBPutTest) TestPutMalformedSignature(c *C) {\n\tdata := t.getTestNIBBytes()\n\tdata = t.signNIBBytes(c, data)\n\n\t\/\/ destroy signature:\n\tfor x := 0; x < SignatureSize; x++ {\n\t\tdata[len(data)-1-x] = 0\n\t}\n\tt.req = t.requestWithBytes(c, data)\n\tt.signRequest()\n\n\tresp := t.getResponse(t.req)\n\n\tc.Assert(resp.Code, Equals, http.StatusUnauthorized)\n\trepo := t.getRepository(c)\n\tc.Assert(repo.HasNIB(t.nibID), Equals, false)\n}\n\nfunc (t *NIBPutTest) TestPutMalformedData(c *C) {\n\tdata := t.getTestNIBBytes()\n\tdata[0] = 0\n\tdata = t.signNIBBytes(c, data)\n\tt.req = t.requestWithBytes(c, data)\n\tt.signRequest()\n\n\tresp := t.getResponse(t.req)\n\n\tc.Assert(resp.Code, Equals, http.StatusBadRequest)\n\trepo := t.getRepository(c)\n\tc.Assert(repo.HasNIB(t.nibID), Equals, false)\n}\n\nfunc (t *NIBPutTest) TestPutNew(c *C) {\n\tt.signRequest()\n\tresp := t.getResponse(t.req)\n\tc.Assert(resp.Code, Equals, http.StatusCreated)\n}\n\nfunc (t *NIBPutTest) TestPutNewStore(c *C) {\n\tt.signRequest()\n\tt.getResponse(t.req)\n\tr := t.getRepository(c)\n\n\tnib, err := r.GetNIB(t.nibID)\n\tc.Assert(err, IsNil)\n\tc.Assert(nib.ID, Equals, t.nibID)\n}\n\nfunc (t *NIBPutTest) TestPutNewPrecondition(c *C) {\n\tt.addTestNIB(c)\n\theader := t.req.Header\n\trep := t.getRepository(c)\n\ttransaction, err := rep.CurrentTransaction()\n\tc.Assert(err, IsNil)\n\theader.Set(\"If-Match\", transaction.IDString())\n\tt.signRequest()\n\n\tresp := t.getResponse(t.req)\n\tc.Assert(resp.Code, Equals, http.StatusOK)\n}\n\nfunc (t *NIBPutTest) TestPutNewPreconditionFailed(c *C) {\n\tt.addTestNIB(c)\n\theader := t.req.Header\n\trep := t.getRepository(c)\n\ttransaction, err := rep.CurrentTransaction()\n\tc.Assert(err, IsNil)\n\theader.Set(\"If-Match\", transaction.PreviousIDString())\n\tt.signRequest()\n\n\tresp := t.getResponse(t.req)\n\tc.Assert(resp.Code, Equals, http.StatusPreconditionFailed)\n}\n\nfunc (t *NIBPutTest) changeNIBForPut(c *C, nib *repository.NIB) *repository.NIB {\n\trevision := generateTestRevision()\n\trevision.MetadataID = \"other-metadata\"\n\trevision.DeviceID = \"other-id\"\n\n\tnib.AppendRevision(revision)\n\treturn nib\n}\n\nfunc (t *NIBPutTest) requestWithNib(c *C, nib *repository.NIB) *http.Request {\n\tsignedData := t.signNIBBytes(\n\t\tc,\n\t\tt.nibToBytes(nib),\n\t)\n\treturn t.requestWithBytes(c, signedData)\n}\n\nfunc (t *NIBPutTest) TestPutUpdate(c *C) {\n\tnib := t.addTestNIB(c)\n\tt.req = t.requestWithNib(c, nib)\n\tt.signRequest()\n\n\tresp := t.getResponse(t.req)\n\tc.Assert(resp.Code, Equals, http.StatusOK)\n}\n\nfunc (t *NIBPutTest) TestPutChanged(c *C) {\n\tnib := t.addTestNIB(c)\n\tnib = t.changeNIBForPut(c, nib)\n\tt.req = t.requestWithNib(c, nib)\n\tt.signRequest()\n\n\tt.getResponse(t.req)\n\trepo := t.getRepository(c)\n\trepoNib, err := repo.GetNIB(nib.ID)\n\tc.Assert(err, IsNil)\n\n\trevisions := repoNib.Revisions\n\tc.Assert(len(revisions), Equals, 2)\n\n\tc.Assert(revisions[1].DeviceID, Equals, \"other-id\")\n}\n<|endoftext|>"} {"text":"<commit_before>package rain\n\nimport (\n\t\"crypto\/rand\"\n\t\"net\"\n\t\"runtime\"\n\t\"sync\"\n\n\t\"github.com\/cenkalti\/log\"\n\n\t\"github.com\/cenkalti\/rain\/internal\/connection\"\n\t\"github.com\/cenkalti\/rain\/internal\/logger\"\n\t\"github.com\/cenkalti\/rain\/internal\/protocol\"\n\t\"github.com\/cenkalti\/rain\/internal\/torrent\"\n)\n\n\/\/ Limits\nconst (\n\tmaxPeerServe = 200\n\tmaxPeerPerTorrent = 50\n\tdownloadSlots = 4\n\tuploadSlots = 4\n)\n\n\/\/ http:\/\/www.bittorrent.org\/beps\/bep_0020.html\nvar peerIDPrefix = []byte(\"-RN\" + build + \"-\")\nvar build = \"0001\"\n\nfunc SetLogLevel(l log.Level) { logger.DefaultHandler.SetLevel(l) }\n\ntype Rain struct {\n\tconfig *Config\n\tpeerID protocol.PeerID\n\tlistener *net.TCPListener\n\ttransfers map[protocol.InfoHash]*transfer \/\/ all active transfers\n\ttransfersSKey map[[20]byte]*transfer \/\/ for encryption\n\ttransfersM sync.Mutex\n\tlog logger.Logger\n}\n\n\/\/ New returns a pointer to new Rain BitTorrent client.\nfunc New(c *Config) (*Rain, error) {\n\tpeerID, err := generatePeerID()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Rain{\n\t\tconfig: c,\n\t\tpeerID: peerID,\n\t\ttransfers: make(map[protocol.InfoHash]*transfer),\n\t\ttransfersSKey: make(map[[20]byte]*transfer),\n\t\tlog: logger.New(\"rain\"),\n\t}, nil\n}\n\nfunc generatePeerID() (protocol.PeerID, error) {\n\tvar id protocol.PeerID\n\tcopy(id[:], peerIDPrefix)\n\t_, err := rand.Read(id[len(peerIDPrefix):])\n\treturn id, err\n}\n\n\/\/ Listen peer port and accept incoming peer connections.\nfunc (r *Rain) Listen() error {\n\tvar err error\n\taddr := &net.TCPAddr{Port: int(r.config.Port)}\n\tr.listener, err = net.ListenTCP(\"tcp4\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.log.Notice(\"Listening peers on tcp:\/\/\" + r.listener.Addr().String())\n\tgo r.accepter()\n\treturn nil\n}\n\nfunc (r *Rain) Close() error { return r.listener.Close() }\n\nfunc (r *Rain) accepter() {\n\tlimit := make(chan struct{}, maxPeerServe)\n\tfor {\n\t\tconn, err := r.listener.Accept()\n\t\tif err != nil {\n\t\t\tr.log.Error(err)\n\t\t\treturn\n\t\t}\n\t\tlimit <- struct{}{}\n\t\tgo func(c net.Conn) {\n\t\t\tdefer func() {\n\t\t\t\tif err := recover(); err != nil {\n\t\t\t\t\tbuf := make([]byte, 10000)\n\t\t\t\t\tr.log.Critical(err, \"\\n\", string(buf[:runtime.Stack(buf, false)]))\n\t\t\t\t}\n\t\t\t\tc.Close()\n\t\t\t\t<-limit\n\t\t\t}()\n\t\t\tr.servePeer(c)\n\t\t}(conn)\n\t}\n}\n\nfunc (r *Rain) PeerID() protocol.PeerID { return r.peerID }\nfunc (r *Rain) Port() uint16 { return r.config.Port }\n\nfunc (r *Rain) servePeer(conn net.Conn) {\n\tp := newPeer(conn, incoming)\n\n\tgetSKey := func(sKeyHash [20]byte) (sKey []byte) {\n\t\tr.transfersM.Lock()\n\t\tt, ok := r.transfersSKey[sKeyHash]\n\t\tr.transfersM.Unlock()\n\t\tif ok {\n\t\t\tsKey = t.torrent.Info.Hash[:]\n\t\t}\n\t\treturn\n\t}\n\n\thasInfoHash := func(ih protocol.InfoHash) bool {\n\t\tr.transfersM.Lock()\n\t\t_, ok := r.transfers[ih]\n\t\tr.transfersM.Unlock()\n\t\treturn ok\n\t}\n\n\tencConn, _, _, ih, _, err := connection.Accept(p.conn, getSKey, r.config.Encryption.ForceIncoming, hasInfoHash, [8]byte{}, r.peerID)\n\tif err != nil {\n\t\tif err == connection.ErrOwnConnection {\n\t\t\tr.log.Debug(err)\n\t\t} else {\n\t\t\tr.log.Error(err)\n\t\t}\n\t\treturn\n\t}\n\tp.conn = encConn\n\tp.log.Info(\"Connection accepted\")\n\n\tr.transfersM.Lock()\n\tt, ok := r.transfers[ih]\n\tr.transfersM.Unlock()\n\tif !ok {\n\t\tr.log.Debug(\"Transfer is removed during incoming handshake\")\n\t\treturn\n\t}\n\n\tp.log.Debugln(\"servePeerConn: Handshake completed\")\n\tp.Serve(t)\n}\n\nfunc (r *Rain) Add(torrentPath, where string) (*transfer, error) {\n\ttorrent, err := torrent.New(torrentPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.log.Debugf(\"Parsed torrent file: %#v\", torrent)\n\treturn r.newTransfer(torrent, where)\n}\n\nfunc (r *Rain) AddMagnet(url, where string) (*transfer, error) { panic(\"not implemented\") }\n\nfunc (r *Rain) Start(t *transfer) { go t.Run() }\nfunc (r *Rain) Stop(t *transfer) { panic(\"not implemented\") }\nfunc (r *Rain) Remove(t *transfer) { panic(\"not implemented\") }\n<commit_msg>refactor<commit_after>package rain\n\nimport (\n\t\"crypto\/rand\"\n\t\"net\"\n\t\"runtime\"\n\t\"sync\"\n\n\t\"github.com\/cenkalti\/log\"\n\n\t\"github.com\/cenkalti\/rain\/internal\/connection\"\n\t\"github.com\/cenkalti\/rain\/internal\/logger\"\n\t\"github.com\/cenkalti\/rain\/internal\/protocol\"\n\t\"github.com\/cenkalti\/rain\/internal\/torrent\"\n)\n\n\/\/ Limits\nconst (\n\tmaxPeerServe = 200\n\tmaxPeerPerTorrent = 50\n\tdownloadSlots = 4\n\tuploadSlots = 4\n)\n\n\/\/ http:\/\/www.bittorrent.org\/beps\/bep_0020.html\nvar peerIDPrefix = []byte(\"-RN\" + build + \"-\")\nvar build = \"0001\"\n\nfunc SetLogLevel(l log.Level) { logger.DefaultHandler.SetLevel(l) }\n\ntype Rain struct {\n\tconfig *Config\n\tpeerID protocol.PeerID\n\tlistener *net.TCPListener\n\ttransfers map[protocol.InfoHash]*transfer \/\/ all active transfers\n\ttransfersSKey map[[20]byte]*transfer \/\/ for encryption\n\ttransfersM sync.Mutex\n\tlog logger.Logger\n}\n\n\/\/ New returns a pointer to new Rain BitTorrent client.\nfunc New(c *Config) (*Rain, error) {\n\tpeerID, err := generatePeerID()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Rain{\n\t\tconfig: c,\n\t\tpeerID: peerID,\n\t\ttransfers: make(map[protocol.InfoHash]*transfer),\n\t\ttransfersSKey: make(map[[20]byte]*transfer),\n\t\tlog: logger.New(\"rain\"),\n\t}, nil\n}\n\nfunc generatePeerID() (protocol.PeerID, error) {\n\tvar id protocol.PeerID\n\tcopy(id[:], peerIDPrefix)\n\t_, err := rand.Read(id[len(peerIDPrefix):])\n\treturn id, err\n}\n\n\/\/ Listen peer port and accept incoming peer connections.\nfunc (r *Rain) Listen() error {\n\tvar err error\n\taddr := &net.TCPAddr{Port: int(r.config.Port)}\n\tr.listener, err = net.ListenTCP(\"tcp4\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.log.Notice(\"Listening peers on tcp:\/\/\" + r.listener.Addr().String())\n\tgo r.accepter()\n\treturn nil\n}\n\nfunc (r *Rain) Close() error { return r.listener.Close() }\n\nfunc (r *Rain) accepter() {\n\tlimit := make(chan struct{}, maxPeerServe)\n\tfor {\n\t\tconn, err := r.listener.Accept()\n\t\tif err != nil {\n\t\t\tr.log.Error(err)\n\t\t\treturn\n\t\t}\n\t\tlimit <- struct{}{}\n\t\tgo func(c net.Conn) {\n\t\t\tdefer func() {\n\t\t\t\tif err := recover(); err != nil {\n\t\t\t\t\tbuf := make([]byte, 10000)\n\t\t\t\t\tr.log.Critical(err, \"\\n\", string(buf[:runtime.Stack(buf, false)]))\n\t\t\t\t}\n\t\t\t\tc.Close()\n\t\t\t\t<-limit\n\t\t\t}()\n\t\t\tr.servePeer(c)\n\t\t}(conn)\n\t}\n}\n\nfunc (r *Rain) PeerID() protocol.PeerID { return r.peerID }\nfunc (r *Rain) Port() uint16 { return r.config.Port }\n\nfunc (r *Rain) servePeer(conn net.Conn) {\n\tgetSKey := func(sKeyHash [20]byte) (sKey []byte) {\n\t\tr.transfersM.Lock()\n\t\tt, ok := r.transfersSKey[sKeyHash]\n\t\tr.transfersM.Unlock()\n\t\tif ok {\n\t\t\tsKey = t.torrent.Info.Hash[:]\n\t\t}\n\t\treturn\n\t}\n\n\thasInfoHash := func(ih protocol.InfoHash) bool {\n\t\tr.transfersM.Lock()\n\t\t_, ok := r.transfers[ih]\n\t\tr.transfersM.Unlock()\n\t\treturn ok\n\t}\n\n\tencConn, _, _, ih, _, err := connection.Accept(conn, getSKey, r.config.Encryption.ForceIncoming, hasInfoHash, [8]byte{}, r.peerID)\n\tif err != nil {\n\t\tif err == connection.ErrOwnConnection {\n\t\t\tr.log.Debug(err)\n\t\t} else {\n\t\t\tr.log.Error(err)\n\t\t}\n\t\treturn\n\t}\n\n\tp := newPeer(encConn, incoming)\n\tp.log.Info(\"Connection accepted\")\n\n\tr.transfersM.Lock()\n\tt, ok := r.transfers[ih]\n\tr.transfersM.Unlock()\n\tif !ok {\n\t\tr.log.Debug(\"Transfer is removed during incoming handshake\")\n\t\treturn\n\t}\n\n\tp.log.Debugln(\"servePeerConn: Handshake completed\")\n\tp.Serve(t)\n}\n\nfunc (r *Rain) Add(torrentPath, where string) (*transfer, error) {\n\ttorrent, err := torrent.New(torrentPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.log.Debugf(\"Parsed torrent file: %#v\", torrent)\n\treturn r.newTransfer(torrent, where)\n}\n\nfunc (r *Rain) AddMagnet(url, where string) (*transfer, error) { panic(\"not implemented\") }\n\nfunc (r *Rain) Start(t *transfer) { go t.Run() }\nfunc (r *Rain) Stop(t *transfer) { panic(\"not implemented\") }\nfunc (r *Rain) Remove(t *transfer) { panic(\"not implemented\") }\n<|endoftext|>"} {"text":"<commit_before>\/\/ 文件数据记录类\n\/\/ create by gloomy 2017-04-06 10:11:35\npackage gutil\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst maxFileDataRecordingBytes = 1000000 \/\/ 默认文件大小\n\n\/\/ 文件数据记录对象\n\/\/ create by gloomy 2017-04-06 10:15:00\ntype FileDataRecording struct {\n\tsync.Mutex \/\/ 锁\n\tF *os.File \/\/ 文件对象\n\tFilePre string \/\/ 文件开头字符串\n\tFn string \/\/ 文件路径\n\tBytes int \/\/ 文件大小\n\tSeq int \/\/ 第几个\n\tFileProgram string \/\/ 文件存放路径\n\tMaxFileDataRecordingBytes int \/\/ 文件大小\n}\n\n\/\/ 打开文件数据记录\n\/\/ create by gloomy 2017-04-06 10:17:38\n\/\/ 文件存放目录地址 文件开头字符串 文件大小\n\/\/ 文件数据对象\nfunc OpenLoadFile(fileProgram, filePre string, maxSize int) *FileDataRecording {\n\tif maxSize == 0 {\n\t\tmaxSize = maxFileDataRecordingBytes\n\t}\n\tlf := &FileDataRecording{\n\t\tFilePre: filePre,\n\t\tFileProgram: fileProgram,\n\t\tMaxFileDataRecordingBytes: maxSize,\n\t}\n\tlf.Rotate()\n\treturn lf\n}\n\n\/\/ 文件退出\n\/\/ create by gloomy 2017-04-06 10:27:58\nfunc (f *FileDataRecording) Exit() {\n\tf.Lock()\n\tf.Close()\n\tf.F = nil\n\tf.Unlock()\n}\n\n\/\/ 文件关闭\n\/\/ create by gloomy 2017-04-06 10:22:14\nfunc (f *FileDataRecording) Close() {\n\tif f.F != nil {\n\t\tf.F.Close()\n\t\tos.Rename(f.Fn, f.Fn[0:len(f.Fn)-4]) \/\/去掉末尾的.tmp\n\t}\n}\n\n\/\/ 文件切换\n\/\/ create by gloomy 2017-04-06 10:30:05\nfunc (f *FileDataRecording) Rotate() {\n\tf.Lock()\n\tf.Seq = 0\n\tf.Close()\n\tf.CreateNewFile()\n\tf.Unlock()\n}\n\n\/\/ 创建新文件\n\/\/ create by gloomy 2017-04-06 10:33:11\n\/\/ 错误对象\nfunc (f *FileDataRecording) CreateNewFile() (err error) {\n\tf.Bytes = 0\n\tif !strings.HasSuffix(f.FileProgram, \"\/\") {\n\t\tf.FileProgram += \"\/\"\n\t}\n\tf.Fn = fmt.Sprintf(\"%s%s-%d-%d.tmp\", f.FileProgram, f.FilePre, time.Now().UnixNano(), f.Seq)\n\tf.Seq++\n\tf.F, err = os.OpenFile(f.Fn, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\tfmt.Printf(\"create file %s failed: %s \\n\", f.Fn, err.Error())\n\t}\n\treturn\n}\n\n\/\/ 写入数据\n\/\/ create by gloomy 2017-04-06 11:40:55\n\/\/ 需要写入的数据\n\/\/ 错误对象\nfunc (f *FileDataRecording) WriteData(dataStr string) (err error) {\n\tf.Lock()\n\tdefer f.Unlock()\n\tif f.F == nil {\n\t\terr = f.CreateNewFile()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tdataStrLen := len(dataStr)\n\tif f.Bytes+dataStrLen > f.MaxFileDataRecordingBytes {\n\t\tf.Close()\n\t\tif err = f.CreateNewFile(); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tf.Bytes += dataStrLen\n\t_, err = f.F.WriteString(dataStr)\n\treturn\n}\n\n\/\/ 获取所有完成的文件列表\n\/\/ create by gloomy 2017-04-06 13:46:51\n\/\/ 文件列表\nfunc (f *FileDataRecording) FileList() *[]string {\n\tvar (\n\t\tfileArray []string\n\t)\n\tfilepath.Walk(f.FileProgram, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tif ext := filepath.Ext(path); ext == \".tmp\" {\n\t\t\treturn nil\n\t\t}\n\t\tif info.Size() == 0 {\n\t\t\tos.Remove(path)\n\t\t\treturn nil\n\t\t}\n\t\tif strings.HasPrefix(path, f.FilePre) {\n\t\t\tfileArray = append(fileArray, path)\n\t\t}\n\t\treturn nil\n\t})\n\treturn &fileArray\n}\n\n\/\/ 删除过期文件\n\/\/ create by gloomy 2017-04-06 22:53:17\n\/\/ 几天前\nfunc (f *FileDataRecording) RemoveOldFileList(days int) {\n\tvar (\n\t\ttimeDate = time.Now().AddDate(0, 0, days).UnixNano()\n\t\tunixNumber int\n\t\tfileName []string\n\t)\n\tfilepath.Walk(f.FileProgram, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tif ext := filepath.Ext(path); ext == \".tmp\" {\n\t\t\treturn nil\n\t\t}\n\t\tif info.Size() == 0 {\n\t\t\tos.Remove(path)\n\t\t\treturn nil\n\t\t}\n\t\tif strings.HasPrefix(path, f.FilePre) {\n\t\t\tfileName = strings.Split(path, \"-\")\n\t\t\tif len(fileName) == 3 {\n\t\t\t\tunixNumber, err = strconv.Atoi(fileName[1])\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"FileListRemoveOld fileProgram: %s path: %s err: %s \\n\", f.FileProgram, path, err.Error())\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif timeDate >= int64(unixNumber) {\n\t\t\t\t\tos.Remove(path)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}\n<commit_msg>修正获取文件列表及删除旧文件数据方法<commit_after>\/\/ 文件数据记录类\n\/\/ create by gloomy 2017-04-06 10:11:35\npackage gutil\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst maxFileDataRecordingBytes = 1000000 \/\/ 默认文件大小\n\n\/\/ 文件数据记录对象\n\/\/ create by gloomy 2017-04-06 10:15:00\ntype FileDataRecording struct {\n\tsync.Mutex \/\/ 锁\n\tF *os.File \/\/ 文件对象\n\tFilePre string \/\/ 文件开头字符串\n\tFn string \/\/ 文件路径\n\tBytes int \/\/ 文件大小\n\tSeq int \/\/ 第几个\n\tFileProgram string \/\/ 文件存放路径\n\tMaxFileDataRecordingBytes int \/\/ 文件大小\n}\n\n\/\/ 打开文件数据记录\n\/\/ create by gloomy 2017-04-06 10:17:38\n\/\/ 文件存放目录地址 文件开头字符串 文件大小\n\/\/ 文件数据对象\nfunc OpenLoadFile(fileProgram, filePre string, maxSize int) *FileDataRecording {\n\tif maxSize == 0 {\n\t\tmaxSize = maxFileDataRecordingBytes\n\t}\n\tlf := &FileDataRecording{\n\t\tFilePre: filePre,\n\t\tFileProgram: fileProgram,\n\t\tMaxFileDataRecordingBytes: maxSize,\n\t}\n\tlf.Rotate()\n\treturn lf\n}\n\n\/\/ 文件退出\n\/\/ create by gloomy 2017-04-06 10:27:58\nfunc (f *FileDataRecording) Exit() {\n\tf.Lock()\n\tf.Close()\n\tf.F = nil\n\tf.Unlock()\n}\n\n\/\/ 文件关闭\n\/\/ create by gloomy 2017-04-06 10:22:14\nfunc (f *FileDataRecording) Close() {\n\tif f.F != nil {\n\t\tf.F.Close()\n\t\tos.Rename(f.Fn, f.Fn[0:len(f.Fn)-4]) \/\/去掉末尾的.tmp\n\t}\n}\n\n\/\/ 文件切换\n\/\/ create by gloomy 2017-04-06 10:30:05\nfunc (f *FileDataRecording) Rotate() {\n\tf.Lock()\n\tf.Seq = 0\n\tf.Close()\n\tf.CreateNewFile()\n\tf.Unlock()\n}\n\n\/\/ 创建新文件\n\/\/ create by gloomy 2017-04-06 10:33:11\n\/\/ 错误对象\nfunc (f *FileDataRecording) CreateNewFile() (err error) {\n\tf.Bytes = 0\n\tif !strings.HasSuffix(f.FileProgram, \"\/\") {\n\t\tf.FileProgram += \"\/\"\n\t}\n\tf.Fn = fmt.Sprintf(\"%s%s-%d-%d.tmp\", f.FileProgram, f.FilePre, time.Now().UnixNano(), f.Seq)\n\tf.Seq++\n\tf.F, err = os.OpenFile(f.Fn, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\tfmt.Printf(\"create file %s failed: %s \\n\", f.Fn, err.Error())\n\t}\n\treturn\n}\n\n\/\/ 写入数据\n\/\/ create by gloomy 2017-04-06 11:40:55\n\/\/ 需要写入的数据\n\/\/ 错误对象\nfunc (f *FileDataRecording) WriteData(dataStr string) (err error) {\n\tf.Lock()\n\tdefer f.Unlock()\n\tif f.F == nil {\n\t\terr = f.CreateNewFile()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tdataStrLen := len(dataStr)\n\tif f.Bytes+dataStrLen > f.MaxFileDataRecordingBytes {\n\t\tf.Close()\n\t\tif err = f.CreateNewFile(); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tf.Bytes += dataStrLen\n\t_, err = f.F.WriteString(dataStr)\n\treturn\n}\n\n\/\/ 获取所有完成的文件列表\n\/\/ create by gloomy 2017-04-06 13:46:51\n\/\/ 文件列表\nfunc (f *FileDataRecording) FileList() *[]string {\n\tvar (\n\t\tfileArray []string\n\t\text string\n\t\terr error\n\t\tfileInfo os.FileInfo\n\t\tfileNamePath string\n\t)\n\tfileNameIn, err := GetMyAllFileByDir(f.FileProgram)\n\tif err != nil {\n\t\treturn &fileArray\n\t}\n\tfor _, fileName := range *fileNameIn {\n\t\tif ext = filepath.Ext(fileName); ext == \".tmp\" {\n\t\t\tcontinue\n\t\t}\n\t\tfileNamePath = fmt.Sprintf(\"%s%s\", f.FileProgram, fileName)\n\t\tfileInfo, err = os.Stat(fileNamePath)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif fileInfo.Size() == 0 {\n\t\t\tos.Remove(fileNamePath)\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(fileName, f.FilePre) {\n\t\t\tfileArray = append(fileArray, fileNamePath)\n\t\t}\n\t}\n\treturn &fileArray\n}\n\n\/\/ 删除过期文件\n\/\/ create by gloomy 2017-04-06 22:53:17\n\/\/ 几天前\nfunc (f *FileDataRecording) RemoveOldFileList(days int) {\n\tvar (\n\t\ttimeDate = time.Now().AddDate(0, 0, days).UnixNano()\n\t\tunixNumber int\n\t\tfileNameSplit []string\n\t\terr error\n\t\text string\n\t\tfileInfo os.FileInfo\n\t\tfileNamePath string\n\t)\n\tfileNameIn, err := GetMyAllFileByDir(f.FileProgram)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, fileName := range *fileNameIn {\n\t\tif ext = filepath.Ext(fileName); ext == \".tmp\" {\n\t\t\tcontinue\n\t\t}\n\t\tfileNamePath = fmt.Sprintf(\"%s%s\", f.FileProgram, fileName)\n\t\tfileInfo, err = os.Stat(fileNamePath)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif fileInfo.Size() == 0 {\n\t\t\tos.Remove(fileNamePath)\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(fileName, f.FilePre) {\n\t\t\tfileNameSplit = strings.Split(fileName, \"-\")\n\t\t\tif len(fileNameSplit) == 3 {\n\t\t\t\tunixNumber, err = strconv.Atoi(fileNameSplit[1])\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"FileListRemoveOld fileProgram: %s path: %s err: %s \\n\", f.FileProgram, fileName, err.Error())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif timeDate >= int64(unixNumber) {\n\t\t\t\t\tos.Remove(fileNamePath)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage xfs_test\n\nimport (\n\t\"log\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/prometheus\/procfs\"\n\t\"github.com\/prometheus\/procfs\/xfs\"\n)\n\nfunc TestParseStats(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\ts string\n\t\tfs bool\n\t\tstats *xfs.Stats\n\t\tinvalid bool\n\t}{\n\t\t{\n\t\t\tname: \"empty file OK\",\n\t\t},\n\t\t{\n\t\t\tname: \"short or empty lines and unknown labels ignored\",\n\t\t\ts: \"one\\n\\ntwo 1 2 3\\n\",\n\t\t\tstats: &xfs.Stats{},\n\t\t},\n\t\t{\n\t\t\tname: \"bad uint32\",\n\t\t\ts: \"extent_alloc XXX\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"bad uint64\",\n\t\t\ts: \"xpc XXX\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"extent_alloc bad\",\n\t\t\ts: \"extent_alloc 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"extent_alloc OK\",\n\t\t\ts: \"extent_alloc 1 2 3 4\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tExtentAllocation: xfs.ExtentAllocationStats{\n\t\t\t\t\tExtentsAllocated: 1,\n\t\t\t\t\tBlocksAllocated: 2,\n\t\t\t\t\tExtentsFreed: 3,\n\t\t\t\t\tBlocksFreed: 4,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"abt bad\",\n\t\t\ts: \"abt 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"abt OK\",\n\t\t\ts: \"abt 1 2 3 4\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tAllocationBTree: xfs.BTreeStats{\n\t\t\t\t\tLookups: 1,\n\t\t\t\t\tCompares: 2,\n\t\t\t\t\tRecordsInserted: 3,\n\t\t\t\t\tRecordsDeleted: 4,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"blk_map bad\",\n\t\t\ts: \"blk_map 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"blk_map OK\",\n\t\t\ts: \"blk_map 1 2 3 4 5 6 7\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tBlockMapping: xfs.BlockMappingStats{\n\t\t\t\t\tReads: 1,\n\t\t\t\t\tWrites: 2,\n\t\t\t\t\tUnmaps: 3,\n\t\t\t\t\tExtentListInsertions: 4,\n\t\t\t\t\tExtentListDeletions: 5,\n\t\t\t\t\tExtentListLookups: 6,\n\t\t\t\t\tExtentListCompares: 7,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"bmbt bad\",\n\t\t\ts: \"bmbt 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"bmbt OK\",\n\t\t\ts: \"bmbt 1 2 3 4\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tBlockMapBTree: xfs.BTreeStats{\n\t\t\t\t\tLookups: 1,\n\t\t\t\t\tCompares: 2,\n\t\t\t\t\tRecordsInserted: 3,\n\t\t\t\t\tRecordsDeleted: 4,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"dir bad\",\n\t\t\ts: \"dir 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"dir OK\",\n\t\t\ts: \"dir 1 2 3 4\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tDirectoryOperation: xfs.DirectoryOperationStats{\n\t\t\t\t\tLookups: 1,\n\t\t\t\t\tCreates: 2,\n\t\t\t\t\tRemoves: 3,\n\t\t\t\t\tGetdents: 4,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"trans bad\",\n\t\t\ts: \"trans 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"trans OK\",\n\t\t\ts: \"trans 1 2 3\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tTransaction: xfs.TransactionStats{\n\t\t\t\t\tSync: 1,\n\t\t\t\t\tAsync: 2,\n\t\t\t\t\tEmpty: 3,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ig bad\",\n\t\t\ts: \"ig 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"ig OK\",\n\t\t\ts: \"ig 1 2 3 4 5 6 7\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tInodeOperation: xfs.InodeOperationStats{\n\t\t\t\t\tAttempts: 1,\n\t\t\t\t\tFound: 2,\n\t\t\t\t\tRecycle: 3,\n\t\t\t\t\tMissed: 4,\n\t\t\t\t\tDuplicate: 5,\n\t\t\t\t\tReclaims: 6,\n\t\t\t\t\tAttributeChange: 7,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"log bad\",\n\t\t\ts: \"log 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"log OK\",\n\t\t\ts: \"log 1 2 3 4 5\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tLogOperation: xfs.LogOperationStats{\n\t\t\t\t\tWrites: 1,\n\t\t\t\t\tBlocks: 2,\n\t\t\t\t\tNoInternalBuffers: 3,\n\t\t\t\t\tForce: 4,\n\t\t\t\t\tForceSleep: 5,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"rw bad\",\n\t\t\ts: \"rw 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"rw OK\",\n\t\t\ts: \"rw 1 2\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tReadWrite: xfs.ReadWriteStats{\n\t\t\t\t\tRead: 1,\n\t\t\t\t\tWrite: 2,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"attr bad\",\n\t\t\ts: \"attr 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"attr OK\",\n\t\t\ts: \"attr 1 2 3 4\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tAttributeOperation: xfs.AttributeOperationStats{\n\t\t\t\t\tGet: 1,\n\t\t\t\t\tSet: 2,\n\t\t\t\t\tRemove: 3,\n\t\t\t\t\tList: 4,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"icluster bad\",\n\t\t\ts: \"icluster 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"icluster OK\",\n\t\t\ts: \"icluster 1 2 3\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tInodeClustering: xfs.InodeClusteringStats{\n\t\t\t\t\tIflush: 1,\n\t\t\t\t\tFlush: 2,\n\t\t\t\t\tFlushInode: 3,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"vnodes bad\",\n\t\t\ts: \"vnodes 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"vnodes (missing free) OK\",\n\t\t\ts: \"vnodes 1 2 3 4 5 6 7\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tVnode: xfs.VnodeStats{\n\t\t\t\t\tActive: 1,\n\t\t\t\t\tAllocate: 2,\n\t\t\t\t\tGet: 3,\n\t\t\t\t\tHold: 4,\n\t\t\t\t\tRelease: 5,\n\t\t\t\t\tReclaim: 6,\n\t\t\t\t\tRemove: 7,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"vnodes (with free) OK\",\n\t\t\ts: \"vnodes 1 2 3 4 5 6 7 8\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tVnode: xfs.VnodeStats{\n\t\t\t\t\tActive: 1,\n\t\t\t\t\tAllocate: 2,\n\t\t\t\t\tGet: 3,\n\t\t\t\t\tHold: 4,\n\t\t\t\t\tRelease: 5,\n\t\t\t\t\tReclaim: 6,\n\t\t\t\t\tRemove: 7,\n\t\t\t\t\tFree: 8,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"buf bad\",\n\t\t\ts: \"buf 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"buf OK\",\n\t\t\ts: \"buf 1 2 3 4 5 6 7 8 9\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tBuffer: xfs.BufferStats{\n\t\t\t\t\tGet: 1,\n\t\t\t\t\tCreate: 2,\n\t\t\t\t\tGetLocked: 3,\n\t\t\t\t\tGetLockedWaited: 4,\n\t\t\t\t\tBusyLocked: 5,\n\t\t\t\t\tMissLocked: 6,\n\t\t\t\t\tPageRetries: 7,\n\t\t\t\t\tPageFound: 8,\n\t\t\t\t\tGetRead: 9,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"xpc bad\",\n\t\t\ts: \"xpc 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"xpc OK\",\n\t\t\ts: \"xpc 1 2 3\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tExtendedPrecision: xfs.ExtendedPrecisionStats{\n\t\t\t\t\tFlushBytes: 1,\n\t\t\t\t\tWriteBytes: 2,\n\t\t\t\t\tReadBytes: 3,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"fixtures OK\",\n\t\t\tfs: true,\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tExtentAllocation: xfs.ExtentAllocationStats{\n\t\t\t\t\tExtentsAllocated: 92447,\n\t\t\t\t\tBlocksAllocated: 97589,\n\t\t\t\t\tExtentsFreed: 92448,\n\t\t\t\t\tBlocksFreed: 93751,\n\t\t\t\t},\n\t\t\t\tAllocationBTree: xfs.BTreeStats{\n\t\t\t\t\tLookups: 0,\n\t\t\t\t\tCompares: 0,\n\t\t\t\t\tRecordsInserted: 0,\n\t\t\t\t\tRecordsDeleted: 0,\n\t\t\t\t},\n\t\t\t\tBlockMapping: xfs.BlockMappingStats{\n\t\t\t\t\tReads: 1767055,\n\t\t\t\t\tWrites: 188820,\n\t\t\t\t\tUnmaps: 184891,\n\t\t\t\t\tExtentListInsertions: 92447,\n\t\t\t\t\tExtentListDeletions: 92448,\n\t\t\t\t\tExtentListLookups: 2140766,\n\t\t\t\t\tExtentListCompares: 0,\n\t\t\t\t},\n\t\t\t\tBlockMapBTree: xfs.BTreeStats{\n\t\t\t\t\tLookups: 0,\n\t\t\t\t\tCompares: 0,\n\t\t\t\t\tRecordsInserted: 0,\n\t\t\t\t\tRecordsDeleted: 0,\n\t\t\t\t},\n\t\t\t\tDirectoryOperation: xfs.DirectoryOperationStats{\n\t\t\t\t\tLookups: 185039,\n\t\t\t\t\tCreates: 92447,\n\t\t\t\t\tRemoves: 92444,\n\t\t\t\t\tGetdents: 136422,\n\t\t\t\t},\n\t\t\t\tTransaction: xfs.TransactionStats{\n\t\t\t\t\tSync: 706,\n\t\t\t\t\tAsync: 944304,\n\t\t\t\t\tEmpty: 0,\n\t\t\t\t},\n\t\t\t\tInodeOperation: xfs.InodeOperationStats{\n\t\t\t\t\tAttempts: 185045,\n\t\t\t\t\tFound: 58807,\n\t\t\t\t\tRecycle: 0,\n\t\t\t\t\tMissed: 126238,\n\t\t\t\t\tDuplicate: 0,\n\t\t\t\t\tReclaims: 33637,\n\t\t\t\t\tAttributeChange: 22,\n\t\t\t\t},\n\t\t\t\tLogOperation: xfs.LogOperationStats{\n\t\t\t\t\tWrites: 2883,\n\t\t\t\t\tBlocks: 113448,\n\t\t\t\t\tNoInternalBuffers: 9,\n\t\t\t\t\tForce: 17360,\n\t\t\t\t\tForceSleep: 739,\n\t\t\t\t},\n\t\t\t\tReadWrite: xfs.ReadWriteStats{\n\t\t\t\t\tRead: 107739,\n\t\t\t\t\tWrite: 94045,\n\t\t\t\t},\n\t\t\t\tAttributeOperation: xfs.AttributeOperationStats{\n\t\t\t\t\tGet: 4,\n\t\t\t\t\tSet: 0,\n\t\t\t\t\tRemove: 0,\n\t\t\t\t\tList: 0,\n\t\t\t\t},\n\t\t\t\tInodeClustering: xfs.InodeClusteringStats{\n\t\t\t\t\tIflush: 8677,\n\t\t\t\t\tFlush: 7849,\n\t\t\t\t\tFlushInode: 135802,\n\t\t\t\t},\n\t\t\t\tVnode: xfs.VnodeStats{\n\t\t\t\t\tActive: 92601,\n\t\t\t\t\tAllocate: 0,\n\t\t\t\t\tGet: 0,\n\t\t\t\t\tHold: 0,\n\t\t\t\t\tRelease: 92444,\n\t\t\t\t\tReclaim: 92444,\n\t\t\t\t\tRemove: 92444,\n\t\t\t\t\tFree: 0,\n\t\t\t\t},\n\t\t\t\tBuffer: xfs.BufferStats{\n\t\t\t\t\tGet: 2666287,\n\t\t\t\t\tCreate: 7122,\n\t\t\t\t\tGetLocked: 2659202,\n\t\t\t\t\tGetLockedWaited: 3599,\n\t\t\t\t\tBusyLocked: 2,\n\t\t\t\t\tMissLocked: 7085,\n\t\t\t\t\tPageRetries: 0,\n\t\t\t\t\tPageFound: 10297,\n\t\t\t\t\tGetRead: 7085,\n\t\t\t\t},\n\t\t\t\tExtendedPrecision: xfs.ExtendedPrecisionStats{\n\t\t\t\t\tFlushBytes: 399724544,\n\t\t\t\t\tWriteBytes: 92823103,\n\t\t\t\t\tReadBytes: 86219234,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tt.Logf(\"[%02d] test %q\", i, tt.name)\n\n\t\tvar (\n\t\t\tstats *xfs.Stats\n\t\t\terr error\n\t\t)\n\n\t\tif tt.s != \"\" {\n\t\t\tstats, err = xfs.ParseStats(strings.NewReader(tt.s))\n\t\t}\n\t\tif tt.fs {\n\t\t\tstats, err = procfs.FS(\"..\/fixtures\").XFSStats()\n\t\t}\n\n\t\tif tt.invalid && err == nil {\n\t\t\tt.Error(\"expected an error, but none occurred\")\n\t\t}\n\t\tif !tt.invalid && err != nil {\n\t\t\tt.Errorf(\"unexpected error: %v\", err)\n\t\t}\n\n\t\tif want, have := tt.stats, stats; !reflect.DeepEqual(want, have) {\n\t\t\tlog.Printf(\"stats: %#v\", have)\n\t\t\tt.Errorf(\"unexpected XFS stats:\\nwant:\\n%v\\nhave:\\n%v\", want, have)\n\t\t}\n\t}\n}\n<commit_msg>Remove logging from xfs_test<commit_after>\/\/ Copyright 2017 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage xfs_test\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/prometheus\/procfs\"\n\t\"github.com\/prometheus\/procfs\/xfs\"\n)\n\nfunc TestParseStats(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\ts string\n\t\tfs bool\n\t\tstats *xfs.Stats\n\t\tinvalid bool\n\t}{\n\t\t{\n\t\t\tname: \"empty file OK\",\n\t\t},\n\t\t{\n\t\t\tname: \"short or empty lines and unknown labels ignored\",\n\t\t\ts: \"one\\n\\ntwo 1 2 3\\n\",\n\t\t\tstats: &xfs.Stats{},\n\t\t},\n\t\t{\n\t\t\tname: \"bad uint32\",\n\t\t\ts: \"extent_alloc XXX\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"bad uint64\",\n\t\t\ts: \"xpc XXX\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"extent_alloc bad\",\n\t\t\ts: \"extent_alloc 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"extent_alloc OK\",\n\t\t\ts: \"extent_alloc 1 2 3 4\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tExtentAllocation: xfs.ExtentAllocationStats{\n\t\t\t\t\tExtentsAllocated: 1,\n\t\t\t\t\tBlocksAllocated: 2,\n\t\t\t\t\tExtentsFreed: 3,\n\t\t\t\t\tBlocksFreed: 4,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"abt bad\",\n\t\t\ts: \"abt 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"abt OK\",\n\t\t\ts: \"abt 1 2 3 4\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tAllocationBTree: xfs.BTreeStats{\n\t\t\t\t\tLookups: 1,\n\t\t\t\t\tCompares: 2,\n\t\t\t\t\tRecordsInserted: 3,\n\t\t\t\t\tRecordsDeleted: 4,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"blk_map bad\",\n\t\t\ts: \"blk_map 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"blk_map OK\",\n\t\t\ts: \"blk_map 1 2 3 4 5 6 7\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tBlockMapping: xfs.BlockMappingStats{\n\t\t\t\t\tReads: 1,\n\t\t\t\t\tWrites: 2,\n\t\t\t\t\tUnmaps: 3,\n\t\t\t\t\tExtentListInsertions: 4,\n\t\t\t\t\tExtentListDeletions: 5,\n\t\t\t\t\tExtentListLookups: 6,\n\t\t\t\t\tExtentListCompares: 7,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"bmbt bad\",\n\t\t\ts: \"bmbt 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"bmbt OK\",\n\t\t\ts: \"bmbt 1 2 3 4\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tBlockMapBTree: xfs.BTreeStats{\n\t\t\t\t\tLookups: 1,\n\t\t\t\t\tCompares: 2,\n\t\t\t\t\tRecordsInserted: 3,\n\t\t\t\t\tRecordsDeleted: 4,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"dir bad\",\n\t\t\ts: \"dir 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"dir OK\",\n\t\t\ts: \"dir 1 2 3 4\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tDirectoryOperation: xfs.DirectoryOperationStats{\n\t\t\t\t\tLookups: 1,\n\t\t\t\t\tCreates: 2,\n\t\t\t\t\tRemoves: 3,\n\t\t\t\t\tGetdents: 4,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"trans bad\",\n\t\t\ts: \"trans 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"trans OK\",\n\t\t\ts: \"trans 1 2 3\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tTransaction: xfs.TransactionStats{\n\t\t\t\t\tSync: 1,\n\t\t\t\t\tAsync: 2,\n\t\t\t\t\tEmpty: 3,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ig bad\",\n\t\t\ts: \"ig 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"ig OK\",\n\t\t\ts: \"ig 1 2 3 4 5 6 7\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tInodeOperation: xfs.InodeOperationStats{\n\t\t\t\t\tAttempts: 1,\n\t\t\t\t\tFound: 2,\n\t\t\t\t\tRecycle: 3,\n\t\t\t\t\tMissed: 4,\n\t\t\t\t\tDuplicate: 5,\n\t\t\t\t\tReclaims: 6,\n\t\t\t\t\tAttributeChange: 7,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"log bad\",\n\t\t\ts: \"log 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"log OK\",\n\t\t\ts: \"log 1 2 3 4 5\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tLogOperation: xfs.LogOperationStats{\n\t\t\t\t\tWrites: 1,\n\t\t\t\t\tBlocks: 2,\n\t\t\t\t\tNoInternalBuffers: 3,\n\t\t\t\t\tForce: 4,\n\t\t\t\t\tForceSleep: 5,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"rw bad\",\n\t\t\ts: \"rw 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"rw OK\",\n\t\t\ts: \"rw 1 2\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tReadWrite: xfs.ReadWriteStats{\n\t\t\t\t\tRead: 1,\n\t\t\t\t\tWrite: 2,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"attr bad\",\n\t\t\ts: \"attr 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"attr OK\",\n\t\t\ts: \"attr 1 2 3 4\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tAttributeOperation: xfs.AttributeOperationStats{\n\t\t\t\t\tGet: 1,\n\t\t\t\t\tSet: 2,\n\t\t\t\t\tRemove: 3,\n\t\t\t\t\tList: 4,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"icluster bad\",\n\t\t\ts: \"icluster 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"icluster OK\",\n\t\t\ts: \"icluster 1 2 3\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tInodeClustering: xfs.InodeClusteringStats{\n\t\t\t\t\tIflush: 1,\n\t\t\t\t\tFlush: 2,\n\t\t\t\t\tFlushInode: 3,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"vnodes bad\",\n\t\t\ts: \"vnodes 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"vnodes (missing free) OK\",\n\t\t\ts: \"vnodes 1 2 3 4 5 6 7\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tVnode: xfs.VnodeStats{\n\t\t\t\t\tActive: 1,\n\t\t\t\t\tAllocate: 2,\n\t\t\t\t\tGet: 3,\n\t\t\t\t\tHold: 4,\n\t\t\t\t\tRelease: 5,\n\t\t\t\t\tReclaim: 6,\n\t\t\t\t\tRemove: 7,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"vnodes (with free) OK\",\n\t\t\ts: \"vnodes 1 2 3 4 5 6 7 8\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tVnode: xfs.VnodeStats{\n\t\t\t\t\tActive: 1,\n\t\t\t\t\tAllocate: 2,\n\t\t\t\t\tGet: 3,\n\t\t\t\t\tHold: 4,\n\t\t\t\t\tRelease: 5,\n\t\t\t\t\tReclaim: 6,\n\t\t\t\t\tRemove: 7,\n\t\t\t\t\tFree: 8,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"buf bad\",\n\t\t\ts: \"buf 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"buf OK\",\n\t\t\ts: \"buf 1 2 3 4 5 6 7 8 9\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tBuffer: xfs.BufferStats{\n\t\t\t\t\tGet: 1,\n\t\t\t\t\tCreate: 2,\n\t\t\t\t\tGetLocked: 3,\n\t\t\t\t\tGetLockedWaited: 4,\n\t\t\t\t\tBusyLocked: 5,\n\t\t\t\t\tMissLocked: 6,\n\t\t\t\t\tPageRetries: 7,\n\t\t\t\t\tPageFound: 8,\n\t\t\t\t\tGetRead: 9,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"xpc bad\",\n\t\t\ts: \"xpc 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"xpc OK\",\n\t\t\ts: \"xpc 1 2 3\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tExtendedPrecision: xfs.ExtendedPrecisionStats{\n\t\t\t\t\tFlushBytes: 1,\n\t\t\t\t\tWriteBytes: 2,\n\t\t\t\t\tReadBytes: 3,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"fixtures OK\",\n\t\t\tfs: true,\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tExtentAllocation: xfs.ExtentAllocationStats{\n\t\t\t\t\tExtentsAllocated: 92447,\n\t\t\t\t\tBlocksAllocated: 97589,\n\t\t\t\t\tExtentsFreed: 92448,\n\t\t\t\t\tBlocksFreed: 93751,\n\t\t\t\t},\n\t\t\t\tAllocationBTree: xfs.BTreeStats{\n\t\t\t\t\tLookups: 0,\n\t\t\t\t\tCompares: 0,\n\t\t\t\t\tRecordsInserted: 0,\n\t\t\t\t\tRecordsDeleted: 0,\n\t\t\t\t},\n\t\t\t\tBlockMapping: xfs.BlockMappingStats{\n\t\t\t\t\tReads: 1767055,\n\t\t\t\t\tWrites: 188820,\n\t\t\t\t\tUnmaps: 184891,\n\t\t\t\t\tExtentListInsertions: 92447,\n\t\t\t\t\tExtentListDeletions: 92448,\n\t\t\t\t\tExtentListLookups: 2140766,\n\t\t\t\t\tExtentListCompares: 0,\n\t\t\t\t},\n\t\t\t\tBlockMapBTree: xfs.BTreeStats{\n\t\t\t\t\tLookups: 0,\n\t\t\t\t\tCompares: 0,\n\t\t\t\t\tRecordsInserted: 0,\n\t\t\t\t\tRecordsDeleted: 0,\n\t\t\t\t},\n\t\t\t\tDirectoryOperation: xfs.DirectoryOperationStats{\n\t\t\t\t\tLookups: 185039,\n\t\t\t\t\tCreates: 92447,\n\t\t\t\t\tRemoves: 92444,\n\t\t\t\t\tGetdents: 136422,\n\t\t\t\t},\n\t\t\t\tTransaction: xfs.TransactionStats{\n\t\t\t\t\tSync: 706,\n\t\t\t\t\tAsync: 944304,\n\t\t\t\t\tEmpty: 0,\n\t\t\t\t},\n\t\t\t\tInodeOperation: xfs.InodeOperationStats{\n\t\t\t\t\tAttempts: 185045,\n\t\t\t\t\tFound: 58807,\n\t\t\t\t\tRecycle: 0,\n\t\t\t\t\tMissed: 126238,\n\t\t\t\t\tDuplicate: 0,\n\t\t\t\t\tReclaims: 33637,\n\t\t\t\t\tAttributeChange: 22,\n\t\t\t\t},\n\t\t\t\tLogOperation: xfs.LogOperationStats{\n\t\t\t\t\tWrites: 2883,\n\t\t\t\t\tBlocks: 113448,\n\t\t\t\t\tNoInternalBuffers: 9,\n\t\t\t\t\tForce: 17360,\n\t\t\t\t\tForceSleep: 739,\n\t\t\t\t},\n\t\t\t\tReadWrite: xfs.ReadWriteStats{\n\t\t\t\t\tRead: 107739,\n\t\t\t\t\tWrite: 94045,\n\t\t\t\t},\n\t\t\t\tAttributeOperation: xfs.AttributeOperationStats{\n\t\t\t\t\tGet: 4,\n\t\t\t\t\tSet: 0,\n\t\t\t\t\tRemove: 0,\n\t\t\t\t\tList: 0,\n\t\t\t\t},\n\t\t\t\tInodeClustering: xfs.InodeClusteringStats{\n\t\t\t\t\tIflush: 8677,\n\t\t\t\t\tFlush: 7849,\n\t\t\t\t\tFlushInode: 135802,\n\t\t\t\t},\n\t\t\t\tVnode: xfs.VnodeStats{\n\t\t\t\t\tActive: 92601,\n\t\t\t\t\tAllocate: 0,\n\t\t\t\t\tGet: 0,\n\t\t\t\t\tHold: 0,\n\t\t\t\t\tRelease: 92444,\n\t\t\t\t\tReclaim: 92444,\n\t\t\t\t\tRemove: 92444,\n\t\t\t\t\tFree: 0,\n\t\t\t\t},\n\t\t\t\tBuffer: xfs.BufferStats{\n\t\t\t\t\tGet: 2666287,\n\t\t\t\t\tCreate: 7122,\n\t\t\t\t\tGetLocked: 2659202,\n\t\t\t\t\tGetLockedWaited: 3599,\n\t\t\t\t\tBusyLocked: 2,\n\t\t\t\t\tMissLocked: 7085,\n\t\t\t\t\tPageRetries: 0,\n\t\t\t\t\tPageFound: 10297,\n\t\t\t\t\tGetRead: 7085,\n\t\t\t\t},\n\t\t\t\tExtendedPrecision: xfs.ExtendedPrecisionStats{\n\t\t\t\t\tFlushBytes: 399724544,\n\t\t\t\t\tWriteBytes: 92823103,\n\t\t\t\t\tReadBytes: 86219234,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tvar (\n\t\t\tstats *xfs.Stats\n\t\t\terr error\n\t\t)\n\n\t\tif tt.s != \"\" {\n\t\t\tstats, err = xfs.ParseStats(strings.NewReader(tt.s))\n\t\t}\n\t\tif tt.fs {\n\t\t\tstats, err = procfs.FS(\"..\/fixtures\").XFSStats()\n\t\t}\n\n\t\tif tt.invalid && err == nil {\n\t\t\tt.Error(\"expected an error, but none occurred\")\n\t\t}\n\t\tif !tt.invalid && err != nil {\n\t\t\tt.Errorf(\"unexpected error: %v\", err)\n\t\t}\n\n\t\tif want, have := tt.stats, stats; !reflect.DeepEqual(want, have) {\n\t\t\tt.Errorf(\"unexpected XFS stats:\\nwant:\\n%v\\nhave:\\n%v\", want, have)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package st300\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/larixsource\/suntech\/st\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc equalEMG(t *testing.T, expected *EmergencyReport, actual *EmergencyReport) {\n\trequire.NotNil(t, expected)\n\trequire.NotNil(t, actual)\n\tassert.Equal(t, expected.DevID, actual.DevID)\n\tassert.Equal(t, expected.Model, actual.Model)\n\tassert.Equal(t, expected.SwVer, actual.SwVer)\n\tassert.Equal(t, expected.Timestamp, actual.Timestamp)\n\tassert.Equal(t, expected.Cell, actual.Cell)\n\tassert.InEpsilon(t, expected.Latitude, actual.Latitude, epsilon)\n\tassert.InEpsilon(t, expected.Longitude, actual.Longitude, epsilon)\n\tassert.Equal(t, expected.Speed, actual.Speed)\n\tassert.Equal(t, expected.Course, actual.Course)\n\tassert.Equal(t, expected.Satellites, actual.Satellites)\n\tassert.Equal(t, expected.GPSFixed, actual.GPSFixed)\n\tassert.Equal(t, expected.Distance, actual.Distance)\n\tassert.Equal(t, expected.PowerVolt, actual.PowerVolt)\n\tassert.Equal(t, expected.IO, actual.IO)\n\tassert.Equal(t, expected.EmgID, actual.EmgID)\n\tassert.Equal(t, expected.DrivingHourMeter, actual.DrivingHourMeter)\n\tassert.Equal(t, expected.BackupVolt, actual.BackupVolt)\n\tassert.Equal(t, expected.RealTime, actual.RealTime)\n}\n\nfunc TestEMG300(t *testing.T) {\n\tframe := \"ST300EMG;100850000;01;010;20081017;07:41:56;00100;+37.478519;+126.886819;000.012;000.00;9;1;0;15.30;001100;1;0;4.5;1\\r\"\n\tp := ParseString(frame, ParserOpts{})\n\tassert.True(t, p.Next())\n\tassert.Nil(t, p.Error())\n\tmsg := p.Msg()\n\trequire.NotNil(t, msg)\n\tspew.Dump(msg)\n\n\texpectedEMG := &EmergencyReport{\n\t\tHdr: EMGReport,\n\t\tDevID: \"100850000\",\n\t\tModel: st.ST300,\n\t\tSwVer: 10,\n\t\tTimestamp: time.Date(2008, 10, 17, 7, 41, 56, 0, time.UTC),\n\t\tCell: \"00100\",\n\t\tLatitude: 37.478519,\n\t\tLongitude: 126.886819,\n\t\tSpeed: 0.012,\n\t\tCourse: 0,\n\t\tSatellites: 9,\n\t\tGPSFixed: true,\n\t\tDistance: 0,\n\t\tPowerVolt: 15.3,\n\t\tIO: \"001100\",\n\t\tEmgID: st.PanicButtonEmg,\n\t\tDrivingHourMeter: 0,\n\t\tBackupVolt: 4.5,\n\t\tRealTime: true,\n\t}\n\n\tassert.EqualValues(t, st.ST300, msg.Model)\n\tassert.Equal(t, []byte(frame), msg.Frame)\n\tassert.Nil(t, msg.ParsingError)\n\n\tassert.Equal(t, msg.Type, EMGReport)\n\tequalEMG(t, expectedEMG, msg.EMG)\n\n\tassert.False(t, p.Next())\n\n}\n<commit_msg>Adds tests for all types of EMG ID<commit_after>package st300\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/larixsource\/suntech\/st\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc equalEMG(t *testing.T, expected *EmergencyReport, actual *EmergencyReport) {\n\trequire.NotNil(t, expected)\n\trequire.NotNil(t, actual)\n\tassert.Equal(t, expected.DevID, actual.DevID)\n\tassert.Equal(t, expected.Model, actual.Model)\n\tassert.Equal(t, expected.SwVer, actual.SwVer)\n\tassert.Equal(t, expected.Timestamp, actual.Timestamp)\n\tassert.Equal(t, expected.Cell, actual.Cell)\n\tassert.InEpsilon(t, expected.Latitude, actual.Latitude, epsilon)\n\tassert.InEpsilon(t, expected.Longitude, actual.Longitude, epsilon)\n\tassert.Equal(t, expected.Speed, actual.Speed)\n\tassert.Equal(t, expected.Course, actual.Course)\n\tassert.Equal(t, expected.Satellites, actual.Satellites)\n\tassert.Equal(t, expected.GPSFixed, actual.GPSFixed)\n\tassert.Equal(t, expected.Distance, actual.Distance)\n\tassert.Equal(t, expected.PowerVolt, actual.PowerVolt)\n\tassert.Equal(t, expected.IO, actual.IO)\n\tassert.Equal(t, expected.EmgID, actual.EmgID)\n\tassert.Equal(t, expected.DrivingHourMeter, actual.DrivingHourMeter)\n\tassert.Equal(t, expected.BackupVolt, actual.BackupVolt)\n\tassert.Equal(t, expected.RealTime, actual.RealTime)\n}\n\nvar testEMG = EmergencyReport{\n\tHdr: EMGReport,\n\tDevID: \"100850000\",\n\tModel: st.ST300,\n\tSwVer: 10,\n\tTimestamp: time.Date(2008, 10, 17, 7, 41, 56, 0, time.UTC),\n\tCell: \"00100\",\n\tLatitude: 37.478519,\n\tLongitude: 126.886819,\n\tSpeed: 0.012,\n\tCourse: 0,\n\tSatellites: 9,\n\tGPSFixed: true,\n\tDistance: 0,\n\tPowerVolt: 15.3,\n\tIO: \"001100\",\n\tEmgID: st.PanicButtonEmg,\n\tDrivingHourMeter: 0,\n\tBackupVolt: 4.5,\n\tRealTime: true,\n}\n\nfunc TestEMG300PanicButton(t *testing.T) {\n\texpectedEMG := testEMG\n\n\tframe := \"ST300EMG;100850000;01;010;20081017;07:41:56;00100;+37.478519;+126.886819;000.012;000.00;9;1;0;15.30;001100;1;0;4.5;1\\r\"\n\tp := ParseString(frame, ParserOpts{})\n\tassert.True(t, p.Next())\n\tassert.Nil(t, p.Error())\n\tmsg := p.Msg()\n\trequire.NotNil(t, msg)\n\tspew.Dump(msg)\n\n\tassert.EqualValues(t, st.ST300, msg.Model)\n\tassert.Equal(t, []byte(frame), msg.Frame)\n\tassert.Nil(t, msg.ParsingError)\n\n\tassert.Equal(t, msg.Type, EMGReport)\n\tequalEMG(t, &expectedEMG, msg.EMG)\n\n\tassert.False(t, p.Next())\n}\n\nfunc TestEMG300ParkingLock(t *testing.T) {\n\texpectedEMG := testEMG\n\texpectedEMG.EmgID = st.ParkingLockEmg\n\n\tframe := \"ST300EMG;100850000;01;010;20081017;07:41:56;00100;+37.478519;+126.886819;000.012;000.00;9;1;0;15.30;001100;2;0;4.5;1\\r\"\n\tp := ParseString(frame, ParserOpts{})\n\tassert.True(t, p.Next())\n\tassert.Nil(t, p.Error())\n\tmsg := p.Msg()\n\trequire.NotNil(t, msg)\n\tspew.Dump(msg)\n\n\tassert.EqualValues(t, st.ST300, msg.Model)\n\tassert.Equal(t, []byte(frame), msg.Frame)\n\tassert.Nil(t, msg.ParsingError)\n\n\tassert.Equal(t, msg.Type, EMGReport)\n\tequalEMG(t, &expectedEMG, msg.EMG)\n\n\tassert.False(t, p.Next())\n}\n\nfunc TestEMG300RemovingMainPower(t *testing.T) {\n\texpectedEMG := testEMG\n\texpectedEMG.EmgID = st.RemovingMainPowerEmg\n\n\tframe := \"ST300EMG;100850000;01;010;20081017;07:41:56;00100;+37.478519;+126.886819;000.012;000.00;9;1;0;15.30;001100;3;0;4.5;1\\r\"\n\tp := ParseString(frame, ParserOpts{})\n\tassert.True(t, p.Next())\n\tassert.Nil(t, p.Error())\n\tmsg := p.Msg()\n\trequire.NotNil(t, msg)\n\tspew.Dump(msg)\n\n\tassert.EqualValues(t, st.ST300, msg.Model)\n\tassert.Equal(t, []byte(frame), msg.Frame)\n\tassert.Nil(t, msg.ParsingError)\n\n\tassert.Equal(t, msg.Type, EMGReport)\n\tequalEMG(t, &expectedEMG, msg.EMG)\n\n\tassert.False(t, p.Next())\n}\n\nfunc TestEMG300AntiThef(t *testing.T) {\n\texpectedEMG := testEMG\n\texpectedEMG.EmgID = st.AntiTheftEmg\n\n\tframe := \"ST300EMG;100850000;01;010;20081017;07:41:56;00100;+37.478519;+126.886819;000.012;000.00;9;1;0;15.30;001100;5;0;4.5;1\\r\"\n\tp := ParseString(frame, ParserOpts{})\n\tassert.True(t, p.Next())\n\tassert.Nil(t, p.Error())\n\tmsg := p.Msg()\n\trequire.NotNil(t, msg)\n\tspew.Dump(msg)\n\n\tassert.EqualValues(t, st.ST300, msg.Model)\n\tassert.Equal(t, []byte(frame), msg.Frame)\n\tassert.Nil(t, msg.ParsingError)\n\n\tassert.Equal(t, msg.Type, EMGReport)\n\tequalEMG(t, &expectedEMG, msg.EMG)\n\n\tassert.False(t, p.Next())\n}\n\nfunc TestEMG300AntiTheftDoor(t *testing.T) {\n\texpectedEMG := testEMG\n\texpectedEMG.EmgID = st.AntiTheftDoorEmg\n\n\tframe := \"ST300EMG;100850000;01;010;20081017;07:41:56;00100;+37.478519;+126.886819;000.012;000.00;9;1;0;15.30;001100;6;0;4.5;1\\r\"\n\tp := ParseString(frame, ParserOpts{})\n\tassert.True(t, p.Next())\n\tassert.Nil(t, p.Error())\n\tmsg := p.Msg()\n\trequire.NotNil(t, msg)\n\tspew.Dump(msg)\n\n\tassert.EqualValues(t, st.ST300, msg.Model)\n\tassert.Equal(t, []byte(frame), msg.Frame)\n\tassert.Nil(t, msg.ParsingError)\n\n\tassert.Equal(t, msg.Type, EMGReport)\n\tequalEMG(t, &expectedEMG, msg.EMG)\n\n\tassert.False(t, p.Next())\n}\n\nfunc TestEMG300Motion(t *testing.T) {\n\texpectedEMG := testEMG\n\texpectedEMG.EmgID = st.MotionEmg\n\n\tframe := \"ST300EMG;100850000;01;010;20081017;07:41:56;00100;+37.478519;+126.886819;000.012;000.00;9;1;0;15.30;001100;7;0;4.5;1\\r\"\n\tp := ParseString(frame, ParserOpts{})\n\tassert.True(t, p.Next())\n\tassert.Nil(t, p.Error())\n\tmsg := p.Msg()\n\trequire.NotNil(t, msg)\n\tspew.Dump(msg)\n\n\tassert.EqualValues(t, st.ST300, msg.Model)\n\tassert.Equal(t, []byte(frame), msg.Frame)\n\tassert.Nil(t, msg.ParsingError)\n\n\tassert.Equal(t, msg.Type, EMGReport)\n\tequalEMG(t, &expectedEMG, msg.EMG)\n\n\tassert.False(t, p.Next())\n}\n\nfunc TestEMG300AntiTheftShock(t *testing.T) {\n\texpectedEMG := testEMG\n\texpectedEMG.EmgID = st.AntiTheftShockEmg\n\n\tframe := \"ST300EMG;100850000;01;010;20081017;07:41:56;00100;+37.478519;+126.886819;000.012;000.00;9;1;0;15.30;001100;8;0;4.5;1\\r\"\n\tp := ParseString(frame, ParserOpts{})\n\tassert.True(t, p.Next())\n\tassert.Nil(t, p.Error())\n\tmsg := p.Msg()\n\trequire.NotNil(t, msg)\n\tspew.Dump(msg)\n\n\tassert.EqualValues(t, st.ST300, msg.Model)\n\tassert.Equal(t, []byte(frame), msg.Frame)\n\tassert.Nil(t, msg.ParsingError)\n\n\tassert.Equal(t, msg.Type, EMGReport)\n\tequalEMG(t, &expectedEMG, msg.EMG)\n\n\tassert.False(t, p.Next())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package function implements function-level operations.\npackage function\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/apex\/log\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/lambda\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/lambda\/lambdaiface\"\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/jpillora\/archive\"\n\t\"gopkg.in\/validator.v2\"\n\n\t\"github.com\/apex\/apex\/hooks\"\n\t\"github.com\/apex\/apex\/utils\"\n)\n\n\/\/ defaultPlugins are the default plugins which are required by Apex. Note that\n\/\/ the order here is important for some plugins such as inference before the\n\/\/ runtimes.\nvar defaultPlugins = []string{\n\t\"inference\",\n\t\"golang\",\n\t\"python\",\n\t\"nodejs\",\n\t\"hooks\",\n\t\"env\",\n\t\"shim\",\n}\n\n\/\/ InvocationType determines how an invocation request is made.\ntype InvocationType string\n\n\/\/ Invocation types.\nconst (\n\tRequestResponse InvocationType = \"RequestResponse\"\n\tEvent = \"Event\"\n\tDryRun = \"DryRun\"\n)\n\n\/\/ CurrentAlias name.\nconst CurrentAlias = \"current\"\n\n\/\/ InvokeError records an error from an invocation.\ntype InvokeError struct {\n\tMessage string `json:\"errorMessage\"`\n\tType string `json:\"errorType\"`\n\tStack []string `json:\"stackTrace\"`\n\tHandled bool\n}\n\n\/\/ Error message.\nfunc (e *InvokeError) Error() string {\n\treturn e.Message\n}\n\n\/\/ Config for a Lambda function.\ntype Config struct {\n\tDescription string `json:\"description\"`\n\tRuntime string `json:\"runtime\" validate:\"nonzero\"`\n\tMemory int64 `json:\"memory\" validate:\"nonzero\"`\n\tTimeout int64 `json:\"timeout\" validate:\"nonzero\"`\n\tRole string `json:\"role\" validate:\"nonzero\"`\n\tHandler string `json:\"handler\" validate:\"nonzero\"`\n\tShim bool `json:\"shim\"`\n\tEnvironment map[string]string `json:\"environment\"`\n\tHooks hooks.Hooks `json:\"hooks\"`\n}\n\n\/\/ Function represents a Lambda function, with configuration loaded\n\/\/ from the \"function.json\" file on disk.\ntype Function struct {\n\tConfig\n\tName string\n\tFunctionName string\n\tPath string\n\tService lambdaiface.LambdaAPI\n\tLog log.Interface\n\tIgnoredPatterns []string\n\tPlugins []string\n}\n\n\/\/ Open the function.json file and prime the config.\nfunc (f *Function) Open() error {\n\tf.Log = f.Log.WithField(\"function\", f.Name)\n\n\tif f.Plugins == nil {\n\t\tf.Plugins = defaultPlugins\n\t}\n\n\tif f.Environment == nil {\n\t\tf.Environment = make(map[string]string)\n\t}\n\n\tp, err := os.Open(filepath.Join(f.Path, \"function.json\"))\n\tif err == nil {\n\t\tif err := json.NewDecoder(p).Decode(&f.Config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := f.hookOpen(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := validator.Validate(&f.Config); err != nil {\n\t\treturn fmt.Errorf(\"error opening function %s: %s\", f.Name, err.Error())\n\t}\n\n\tpatterns, err := utils.ReadIgnoreFile(f.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.IgnoredPatterns = append(f.IgnoredPatterns, patterns...)\n\n\treturn nil\n}\n\n\/\/ Setenv sets environment variable `name` to `value`.\nfunc (f *Function) Setenv(name, value string) {\n\tf.Environment[name] = value\n}\n\n\/\/ Deploy code and then configuration.\nfunc (f *Function) Deploy() error {\n\tif err := f.DeployCode(); err != nil {\n\t\treturn err\n\t}\n\n\treturn f.DeployConfig()\n}\n\n\/\/ DeployCode generates a zip and creates or updates the function.\nfunc (f *Function) DeployCode() error {\n\tf.Log.Info(\"deploying\")\n\n\tzip, err := f.BuildBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := f.hookDeploy(); err != nil {\n\t\treturn err\n\t}\n\n\tconfig, err := f.GetConfig()\n\n\tif e, ok := err.(awserr.Error); ok {\n\t\tif e.Code() == \"ResourceNotFoundException\" {\n\t\t\treturn f.Create(zip)\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tremoteHash := *config.Configuration.CodeSha256\n\tlocalHash := utils.Sha256(zip)\n\n\tif localHash == remoteHash {\n\t\tf.Log.Info(\"unchanged\")\n\t\treturn nil\n\t}\n\n\tf.Log.WithFields(log.Fields{\n\t\t\"local\": localHash,\n\t\t\"remote\": remoteHash,\n\t}).Debug(\"changed\")\n\n\treturn f.Update(zip)\n}\n\n\/\/ DeployConfig deploys changes to configuration.\nfunc (f *Function) DeployConfig() error {\n\tf.Log.Info(\"deploying config\")\n\n\t_, err := f.Service.UpdateFunctionConfiguration(&lambda.UpdateFunctionConfigurationInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tMemorySize: &f.Memory,\n\t\tTimeout: &f.Timeout,\n\t\tDescription: &f.Description,\n\t\tRole: &f.Role,\n\t\tHandler: &f.Handler,\n\t})\n\n\treturn err\n}\n\n\/\/ Delete the function including all its versions\nfunc (f *Function) Delete() error {\n\tf.Log.Info(\"deleting\")\n\t_, err := f.Service.DeleteFunction(&lambda.DeleteFunctionInput{\n\t\tFunctionName: &f.FunctionName,\n\t})\n\treturn err\n}\n\n\/\/ GetConfig returns the function configuration.\nfunc (f *Function) GetConfig() (*lambda.GetFunctionOutput, error) {\n\tf.Log.Debug(\"fetching config\")\n\treturn f.Service.GetFunction(&lambda.GetFunctionInput{\n\t\tFunctionName: &f.FunctionName,\n\t})\n}\n\n\/\/ GetConfigQualifier returns the function configuration for the given qualifier.\nfunc (f *Function) GetConfigQualifier(s string) (*lambda.GetFunctionOutput, error) {\n\tf.Log.Debug(\"fetching config\")\n\treturn f.Service.GetFunction(&lambda.GetFunctionInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tQualifier: &s,\n\t})\n}\n\n\/\/ GetConfigCurrent returns the function configuration for the current version.\nfunc (f *Function) GetConfigCurrent() (*lambda.GetFunctionOutput, error) {\n\treturn f.GetConfigQualifier(CurrentAlias)\n}\n\n\/\/ Update the function with the given `zip`.\nfunc (f *Function) Update(zip []byte) error {\n\tf.Log.Info(\"updating function\")\n\n\tupdated, err := f.Service.UpdateFunctionCode(&lambda.UpdateFunctionCodeInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tPublish: aws.Bool(true),\n\t\tZipFile: zip,\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf.Log.Info(\"updating alias\")\n\n\t_, err = f.Service.UpdateAlias(&lambda.UpdateAliasInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tName: aws.String(CurrentAlias),\n\t\tFunctionVersion: updated.Version,\n\t})\n\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tf.Log.WithFields(log.Fields{\n\t\t\"version\": *updated.Version,\n\t\t\"name\": f.FunctionName,\n\t}).Info(\"deployed\")\n\n\treturn nil\n}\n\n\/\/ Create the function with the given `zip`.\nfunc (f *Function) Create(zip []byte) error {\n\tf.Log.Info(\"creating function\")\n\n\tcreated, err := f.Service.CreateFunction(&lambda.CreateFunctionInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tDescription: &f.Description,\n\t\tMemorySize: &f.Memory,\n\t\tTimeout: &f.Timeout,\n\t\tRuntime: &f.Runtime,\n\t\tHandler: &f.Handler,\n\t\tRole: &f.Role,\n\t\tPublish: aws.Bool(true),\n\t\tCode: &lambda.FunctionCode{\n\t\t\tZipFile: zip,\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf.Log.Info(\"creating alias\")\n\n\t_, err = f.Service.CreateAlias(&lambda.CreateAliasInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tFunctionVersion: created.Version,\n\t\tName: aws.String(CurrentAlias),\n\t})\n\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tf.Log.WithFields(log.Fields{\n\t\t\"version\": *created.Version,\n\t\t\"name\": f.FunctionName,\n\t}).Info(\"deployed\")\n\n\treturn nil\n}\n\n\/\/ Invoke the remote Lambda function, returning the response and logs, if any.\nfunc (f *Function) Invoke(event, context interface{}) (reply, logs io.Reader, err error) {\n\teventBytes, err := json.Marshal(event)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcontextBytes, err := json.Marshal(context)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tres, err := f.Service.Invoke(&lambda.InvokeInput{\n\t\tClientContext: aws.String(base64.StdEncoding.EncodeToString(contextBytes)),\n\t\tFunctionName: &f.FunctionName,\n\t\tInvocationType: aws.String(string(RequestResponse)),\n\t\tLogType: aws.String(\"Tail\"),\n\t\tQualifier: aws.String(CurrentAlias),\n\t\tPayload: eventBytes,\n\t})\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tlogs = base64.NewDecoder(base64.StdEncoding, strings.NewReader(*res.LogResult))\n\n\tif res.FunctionError != nil {\n\t\te := &InvokeError{\n\t\t\tHandled: *res.FunctionError == \"Handled\",\n\t\t}\n\n\t\tif err := json.Unmarshal(res.Payload, e); err != nil {\n\t\t\treturn nil, logs, err\n\t\t}\n\n\t\treturn nil, logs, e\n\t}\n\n\treply = bytes.NewReader(res.Payload)\n\treturn reply, logs, nil\n}\n\n\/\/ Rollback the function to the previous.\nfunc (f *Function) Rollback() error {\n\tf.Log.Info(\"rolling back\")\n\n\talias, err := f.Service.GetAlias(&lambda.GetAliasInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tName: aws.String(CurrentAlias),\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf.Log.Infof(\"current version: %s\", *alias.FunctionVersion)\n\n\tlist, err := f.Service.ListVersionsByFunction(&lambda.ListVersionsByFunctionInput{\n\t\tFunctionName: &f.FunctionName,\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tversions := list.Versions[1:] \/\/ remove $LATEST\n\tif len(versions) < 2 {\n\t\treturn errors.New(\"Can't rollback. Only one version deployed.\")\n\t}\n\n\tlatest := *versions[len(versions)-1].Version\n\tprev := *versions[len(versions)-2].Version\n\trollback := latest\n\n\tif *alias.FunctionVersion == latest {\n\t\trollback = prev\n\t}\n\n\tf.Log.Infof(\"rollback to version: %s\", rollback)\n\n\t_, err = f.Service.UpdateAlias(&lambda.UpdateAliasInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tName: aws.String(CurrentAlias),\n\t\tFunctionVersion: &rollback,\n\t})\n\n\treturn err\n}\n\n\/\/ RollbackVersion the function to the specified version.\nfunc (f *Function) RollbackVersion(version string) error {\n\tf.Log.Info(\"rolling back\")\n\n\talias, err := f.Service.GetAlias(&lambda.GetAliasInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tName: aws.String(CurrentAlias),\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf.Log.Infof(\"current version: %s\", *alias.FunctionVersion)\n\n\tif version == *alias.FunctionVersion {\n\t\treturn errors.New(\"Specified version currently deployed.\")\n\t}\n\n\t_, err = f.Service.UpdateAlias(&lambda.UpdateAliasInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tName: aws.String(CurrentAlias),\n\t\tFunctionVersion: &version,\n\t})\n\n\treturn err\n}\n\n\/\/ BuildBytes returns the generated zip as bytes.\nfunc (f *Function) BuildBytes() ([]byte, error) {\n\tr, err := f.Build()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tf.Log.Infof(\"created build (%s)\", humanize.Bytes(uint64(len(b))))\n\treturn b, nil\n}\n\n\/\/ Build returns the zipped contents of the function.\nfunc (f *Function) Build() (io.Reader, error) {\n\tf.Log.Debugf(\"creating build\")\n\n\tbuf := new(bytes.Buffer)\n\tzip := archive.NewZipWriter(buf)\n\n\tif err := f.hookBuild(zip); err != nil {\n\t\treturn nil, err\n\t}\n\n\tfiles, err := utils.LoadFiles(f.Path, f.IgnoredPatterns)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, path := range files {\n\t\tf.Log.WithField(\"file\", path).Debug(\"add file to zip\")\n\n\t\tfile, err := os.Open(filepath.Join(f.Path, path))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := zip.AddFile(path, file); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := file.Close(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif err := zip.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf, nil\n}\n\n\/\/ Clean invokes the CleanHook, useful for removing build artifacts and so on.\nfunc (f *Function) Clean() error {\n\treturn f.hookClean()\n}\n\n\/\/ GroupName returns the CloudWatchLogs group name.\nfunc (f *Function) GroupName() string {\n\treturn fmt.Sprintf(\"\/aws\/lambda\/%s\", f.FunctionName)\n}\n\n\/\/ hookOpen calls Openers.\nfunc (f *Function) hookOpen() error {\n\tfor _, name := range f.Plugins {\n\t\tif p, ok := plugins[name].(Opener); ok {\n\t\t\tif err := p.Open(f); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ hookBuild calls Builders.\nfunc (f *Function) hookBuild(zip *archive.Archive) error {\n\tfor _, name := range f.Plugins {\n\t\tif p, ok := plugins[name].(Builder); ok {\n\t\t\tif err := p.Build(f, zip); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ hookClean calls Cleaners.\nfunc (f *Function) hookClean() error {\n\tfor _, name := range f.Plugins {\n\t\tif p, ok := plugins[name].(Cleaner); ok {\n\t\t\tif err := p.Clean(f); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ hookDeploy calls Deployers.\nfunc (f *Function) hookDeploy() error {\n\tfor _, name := range f.Plugins {\n\t\tif p, ok := plugins[name].(Deployer); ok {\n\t\t\tif err := p.Deploy(f); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>fix updating configuration. Closes #206<commit_after>\/\/ Package function implements function-level operations.\npackage function\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/apex\/log\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/lambda\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/lambda\/lambdaiface\"\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/jpillora\/archive\"\n\t\"gopkg.in\/validator.v2\"\n\n\t\"github.com\/apex\/apex\/hooks\"\n\t\"github.com\/apex\/apex\/utils\"\n)\n\n\/\/ defaultPlugins are the default plugins which are required by Apex. Note that\n\/\/ the order here is important for some plugins such as inference before the\n\/\/ runtimes.\nvar defaultPlugins = []string{\n\t\"inference\",\n\t\"golang\",\n\t\"python\",\n\t\"nodejs\",\n\t\"hooks\",\n\t\"env\",\n\t\"shim\",\n}\n\n\/\/ InvocationType determines how an invocation request is made.\ntype InvocationType string\n\n\/\/ Invocation types.\nconst (\n\tRequestResponse InvocationType = \"RequestResponse\"\n\tEvent = \"Event\"\n\tDryRun = \"DryRun\"\n)\n\n\/\/ CurrentAlias name.\nconst CurrentAlias = \"current\"\n\n\/\/ InvokeError records an error from an invocation.\ntype InvokeError struct {\n\tMessage string `json:\"errorMessage\"`\n\tType string `json:\"errorType\"`\n\tStack []string `json:\"stackTrace\"`\n\tHandled bool\n}\n\n\/\/ Error message.\nfunc (e *InvokeError) Error() string {\n\treturn e.Message\n}\n\n\/\/ Config for a Lambda function.\ntype Config struct {\n\tDescription string `json:\"description\"`\n\tRuntime string `json:\"runtime\" validate:\"nonzero\"`\n\tMemory int64 `json:\"memory\" validate:\"nonzero\"`\n\tTimeout int64 `json:\"timeout\" validate:\"nonzero\"`\n\tRole string `json:\"role\" validate:\"nonzero\"`\n\tHandler string `json:\"handler\" validate:\"nonzero\"`\n\tShim bool `json:\"shim\"`\n\tEnvironment map[string]string `json:\"environment\"`\n\tHooks hooks.Hooks `json:\"hooks\"`\n}\n\n\/\/ Function represents a Lambda function, with configuration loaded\n\/\/ from the \"function.json\" file on disk.\ntype Function struct {\n\tConfig\n\tName string\n\tFunctionName string\n\tPath string\n\tService lambdaiface.LambdaAPI\n\tLog log.Interface\n\tIgnoredPatterns []string\n\tPlugins []string\n}\n\n\/\/ Open the function.json file and prime the config.\nfunc (f *Function) Open() error {\n\tf.Log = f.Log.WithField(\"function\", f.Name)\n\n\tif f.Plugins == nil {\n\t\tf.Plugins = defaultPlugins\n\t}\n\n\tif f.Environment == nil {\n\t\tf.Environment = make(map[string]string)\n\t}\n\n\tp, err := os.Open(filepath.Join(f.Path, \"function.json\"))\n\tif err == nil {\n\t\tif err := json.NewDecoder(p).Decode(&f.Config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := f.hookOpen(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := validator.Validate(&f.Config); err != nil {\n\t\treturn fmt.Errorf(\"error opening function %s: %s\", f.Name, err.Error())\n\t}\n\n\tpatterns, err := utils.ReadIgnoreFile(f.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.IgnoredPatterns = append(f.IgnoredPatterns, patterns...)\n\n\treturn nil\n}\n\n\/\/ Setenv sets environment variable `name` to `value`.\nfunc (f *Function) Setenv(name, value string) {\n\tf.Environment[name] = value\n}\n\n\/\/ Deploy generates a zip and creates or deploy the function.\n\/\/ If the configuration hasn't been changed it will deploy only code,\n\/\/ otherwise it will deploy both configuration and code.\nfunc (f *Function) Deploy() error {\n\tf.Log.Info(\"deploying\")\n\n\tzip, err := f.BuildBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := f.hookDeploy(); err != nil {\n\t\treturn err\n\t}\n\n\tconfig, err := f.GetConfig()\n\tif e, ok := err.(awserr.Error); ok {\n\t\tif e.Code() == \"ResourceNotFoundException\" {\n\t\t\treturn f.Create(zip)\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif f.configChanged(config) {\n\t\tf.Log.Info(\"config changed\")\n\t\treturn f.DeployConfigAndCode(zip)\n\t}\n\n\tf.Log.Info(\"config unchanged\")\n\treturn f.DeployCode(zip, config)\n}\n\n\/\/ DeployCode deploys function code when changed.\nfunc (f *Function) DeployCode(zip []byte, config *lambda.GetFunctionOutput) error {\n\tremoteHash := *config.Configuration.CodeSha256\n\tlocalHash := utils.Sha256(zip)\n\n\tif localHash == remoteHash {\n\t\tf.Log.Info(\"code unchanged\")\n\t\treturn nil\n\t}\n\n\tf.Log.WithFields(log.Fields{\n\t\t\"local\": localHash,\n\t\t\"remote\": remoteHash,\n\t}).Debug(\"code changed\")\n\n\treturn f.Update(zip)\n}\n\n\/\/ DeployConfigAndCode updates config and updates function code.\nfunc (f *Function) DeployConfigAndCode(zip []byte) error {\n\tf.Log.Info(\"updating config\")\n\n\t_, err := f.Service.UpdateFunctionConfiguration(&lambda.UpdateFunctionConfigurationInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tMemorySize: &f.Memory,\n\t\tTimeout: &f.Timeout,\n\t\tDescription: &f.Description,\n\t\tRole: &f.Role,\n\t\tHandler: &f.Handler,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn f.Update(zip)\n}\n\n\/\/ Delete the function including all its versions\nfunc (f *Function) Delete() error {\n\tf.Log.Info(\"deleting\")\n\t_, err := f.Service.DeleteFunction(&lambda.DeleteFunctionInput{\n\t\tFunctionName: &f.FunctionName,\n\t})\n\treturn err\n}\n\n\/\/ GetConfig returns the function configuration.\nfunc (f *Function) GetConfig() (*lambda.GetFunctionOutput, error) {\n\tf.Log.Debug(\"fetching config\")\n\treturn f.Service.GetFunction(&lambda.GetFunctionInput{\n\t\tFunctionName: &f.FunctionName,\n\t})\n}\n\n\/\/ GetConfigQualifier returns the function configuration for the given qualifier.\nfunc (f *Function) GetConfigQualifier(s string) (*lambda.GetFunctionOutput, error) {\n\tf.Log.Debug(\"fetching config\")\n\treturn f.Service.GetFunction(&lambda.GetFunctionInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tQualifier: &s,\n\t})\n}\n\n\/\/ GetConfigCurrent returns the function configuration for the current version.\nfunc (f *Function) GetConfigCurrent() (*lambda.GetFunctionOutput, error) {\n\treturn f.GetConfigQualifier(CurrentAlias)\n}\n\n\/\/ Update the function with the given `zip`.\nfunc (f *Function) Update(zip []byte) error {\n\tf.Log.Info(\"updating function\")\n\n\tupdated, err := f.Service.UpdateFunctionCode(&lambda.UpdateFunctionCodeInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tPublish: aws.Bool(true),\n\t\tZipFile: zip,\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf.Log.Info(\"updating alias\")\n\n\t_, err = f.Service.UpdateAlias(&lambda.UpdateAliasInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tName: aws.String(CurrentAlias),\n\t\tFunctionVersion: updated.Version,\n\t})\n\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tf.Log.WithFields(log.Fields{\n\t\t\"version\": *updated.Version,\n\t\t\"name\": f.FunctionName,\n\t}).Info(\"function updated\")\n\n\treturn nil\n}\n\n\/\/ Create the function with the given `zip`.\nfunc (f *Function) Create(zip []byte) error {\n\tf.Log.Info(\"creating function\")\n\n\tcreated, err := f.Service.CreateFunction(&lambda.CreateFunctionInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tDescription: &f.Description,\n\t\tMemorySize: &f.Memory,\n\t\tTimeout: &f.Timeout,\n\t\tRuntime: &f.Runtime,\n\t\tHandler: &f.Handler,\n\t\tRole: &f.Role,\n\t\tPublish: aws.Bool(true),\n\t\tCode: &lambda.FunctionCode{\n\t\t\tZipFile: zip,\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf.Log.Info(\"creating alias\")\n\n\t_, err = f.Service.CreateAlias(&lambda.CreateAliasInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tFunctionVersion: created.Version,\n\t\tName: aws.String(CurrentAlias),\n\t})\n\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tf.Log.WithFields(log.Fields{\n\t\t\"version\": *created.Version,\n\t\t\"name\": f.FunctionName,\n\t}).Info(\"function created\")\n\n\treturn nil\n}\n\n\/\/ Invoke the remote Lambda function, returning the response and logs, if any.\nfunc (f *Function) Invoke(event, context interface{}) (reply, logs io.Reader, err error) {\n\teventBytes, err := json.Marshal(event)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcontextBytes, err := json.Marshal(context)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tres, err := f.Service.Invoke(&lambda.InvokeInput{\n\t\tClientContext: aws.String(base64.StdEncoding.EncodeToString(contextBytes)),\n\t\tFunctionName: &f.FunctionName,\n\t\tInvocationType: aws.String(string(RequestResponse)),\n\t\tLogType: aws.String(\"Tail\"),\n\t\tQualifier: aws.String(CurrentAlias),\n\t\tPayload: eventBytes,\n\t})\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tlogs = base64.NewDecoder(base64.StdEncoding, strings.NewReader(*res.LogResult))\n\n\tif res.FunctionError != nil {\n\t\te := &InvokeError{\n\t\t\tHandled: *res.FunctionError == \"Handled\",\n\t\t}\n\n\t\tif err := json.Unmarshal(res.Payload, e); err != nil {\n\t\t\treturn nil, logs, err\n\t\t}\n\n\t\treturn nil, logs, e\n\t}\n\n\treply = bytes.NewReader(res.Payload)\n\treturn reply, logs, nil\n}\n\n\/\/ Rollback the function to the previous.\nfunc (f *Function) Rollback() error {\n\tf.Log.Info(\"rolling back\")\n\n\talias, err := f.Service.GetAlias(&lambda.GetAliasInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tName: aws.String(CurrentAlias),\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf.Log.Infof(\"current version: %s\", *alias.FunctionVersion)\n\n\tlist, err := f.Service.ListVersionsByFunction(&lambda.ListVersionsByFunctionInput{\n\t\tFunctionName: &f.FunctionName,\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tversions := list.Versions[1:] \/\/ remove $LATEST\n\tif len(versions) < 2 {\n\t\treturn errors.New(\"Can't rollback. Only one version deployed.\")\n\t}\n\n\tlatest := *versions[len(versions)-1].Version\n\tprev := *versions[len(versions)-2].Version\n\trollback := latest\n\n\tif *alias.FunctionVersion == latest {\n\t\trollback = prev\n\t}\n\n\tf.Log.Infof(\"rollback to version: %s\", rollback)\n\n\t_, err = f.Service.UpdateAlias(&lambda.UpdateAliasInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tName: aws.String(CurrentAlias),\n\t\tFunctionVersion: &rollback,\n\t})\n\n\treturn err\n}\n\n\/\/ RollbackVersion the function to the specified version.\nfunc (f *Function) RollbackVersion(version string) error {\n\tf.Log.Info(\"rolling back\")\n\n\talias, err := f.Service.GetAlias(&lambda.GetAliasInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tName: aws.String(CurrentAlias),\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf.Log.Infof(\"current version: %s\", *alias.FunctionVersion)\n\n\tif version == *alias.FunctionVersion {\n\t\treturn errors.New(\"Specified version currently deployed.\")\n\t}\n\n\t_, err = f.Service.UpdateAlias(&lambda.UpdateAliasInput{\n\t\tFunctionName: &f.FunctionName,\n\t\tName: aws.String(CurrentAlias),\n\t\tFunctionVersion: &version,\n\t})\n\n\treturn err\n}\n\n\/\/ BuildBytes returns the generated zip as bytes.\nfunc (f *Function) BuildBytes() ([]byte, error) {\n\tr, err := f.Build()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tf.Log.Infof(\"created build (%s)\", humanize.Bytes(uint64(len(b))))\n\treturn b, nil\n}\n\n\/\/ Build returns the zipped contents of the function.\nfunc (f *Function) Build() (io.Reader, error) {\n\tf.Log.Debugf(\"creating build\")\n\n\tbuf := new(bytes.Buffer)\n\tzip := archive.NewZipWriter(buf)\n\n\tif err := f.hookBuild(zip); err != nil {\n\t\treturn nil, err\n\t}\n\n\tfiles, err := utils.LoadFiles(f.Path, f.IgnoredPatterns)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, path := range files {\n\t\tf.Log.WithField(\"file\", path).Debug(\"add file to zip\")\n\n\t\tfile, err := os.Open(filepath.Join(f.Path, path))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := zip.AddFile(path, file); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := file.Close(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif err := zip.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf, nil\n}\n\n\/\/ Clean invokes the CleanHook, useful for removing build artifacts and so on.\nfunc (f *Function) Clean() error {\n\treturn f.hookClean()\n}\n\n\/\/ GroupName returns the CloudWatchLogs group name.\nfunc (f *Function) GroupName() string {\n\treturn fmt.Sprintf(\"\/aws\/lambda\/%s\", f.FunctionName)\n}\n\n\/\/ configChanged checks if function configuration differs from configuration stored in AWS Lambda\nfunc (f *Function) configChanged(config *lambda.GetFunctionOutput) bool {\n\tif f.Description != *config.Configuration.Description {\n\t\treturn true\n\t}\n\n\tif f.Memory != *config.Configuration.MemorySize {\n\t\treturn true\n\t}\n\n\tif f.Timeout != *config.Configuration.Timeout {\n\t\treturn true\n\t}\n\n\tif f.Role != *config.Configuration.Role {\n\t\treturn true\n\t}\n\n\tif f.Handler != *config.Configuration.Handler {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/ hookOpen calls Openers.\nfunc (f *Function) hookOpen() error {\n\tfor _, name := range f.Plugins {\n\t\tif p, ok := plugins[name].(Opener); ok {\n\t\t\tif err := p.Open(f); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ hookBuild calls Builders.\nfunc (f *Function) hookBuild(zip *archive.Archive) error {\n\tfor _, name := range f.Plugins {\n\t\tif p, ok := plugins[name].(Builder); ok {\n\t\t\tif err := p.Build(f, zip); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ hookClean calls Cleaners.\nfunc (f *Function) hookClean() error {\n\tfor _, name := range f.Plugins {\n\t\tif p, ok := plugins[name].(Cleaner); ok {\n\t\t\tif err := p.Clean(f); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ hookDeploy calls Deployers.\nfunc (f *Function) hookDeploy() error {\n\tfor _, name := range f.Plugins {\n\t\tif p, ok := plugins[name].(Deployer); ok {\n\t\t\tif err := p.Deploy(f); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Help fix for --exclude-patterns - only \"jfrog rt u\" supports regexps.<commit_after><|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"log\"\n \"github.com\/gorilla\/mux\"\n \"net\/http\"\n \"net\/http\/httputil\"\n \"net\/url\"\n \"github.com\/fhs\/gompd\/mpd\"\n \"github.com\/ascherkus\/go-id3\/src\/id3\"\n \"os\"\n \"fmt\"\n \"encoding\/json\"\n \"strconv\"\n \"time\"\n)\n\n\/\/ TODO: consider if global is really the best idea, or if we should \n\/\/ make some classes, or something...\nvar mpd_conn *mpd.Client\n\n\nfunc main() {\n mpd_conn = mpdConnect(\"localhost:6600\")\n defer mpd_conn.Close()\n\n \/\/log.Println(mpd_conn)\n if mpd_conn == nil {\n log.Fatal(\"MPD Connection is nil!\")\n }\n\n \/\/ create a new mux router for our server.\n r := mux.NewRouter()\n\n \/\/ requests to `\/stream` are proxied to the MPD httpd.\n r.HandleFunc(\"\/stream\", \n httputil.NewSingleHostReverseProxy(\n &url.URL{\n Scheme:\"http\", \n Host: \"localhost:8000\", \n Path: \"\/\",\n }).ServeHTTP)\n\n r.HandleFunc(\"\/songs\", listSongs)\n r.HandleFunc(\"\/current\", getCurrentSong)\n r.HandleFunc(\"\/upcoming\", getUpcomingSongs)\n\n \/\/ This MUST go last! It takes precidence over any after it, meaning\n \/\/ the server will try to serve a file, which most likely doesn't exist,\n \/\/ and will 404.\n \/\/\n \/\/ serve up the frontend files.\n r.PathPrefix(\"\/\").Handler(http.FileServer(http.Dir(\"..\/frontend\/turbo_wookie\/web\")))\n\n\n \/\/ sit, waiting, like a hunter, spying on its prey.\n log.Println(\"Starting server on port 9000\")\n http.ListenAndServe(\":9000\", r)\n}\n\n\n\/********************\n Handler Functions\n ********************\/\n\n\/\/ return all songs known to MPD to the client.\nfunc listSongs(w http.ResponseWriter, r *http.Request) {\n \/\/ get all files from MPD\n mpdfiles, err := mpd_conn.GetFiles()\n if err != nil {\n log.Println(\"Couldn't get a list of files...\")\n log.Fatal(err)\n }\n\n \/\/ create a slice of id3.File s\n files := make([]*id3.File, 0)\n\n for _, song := range mpdfiles {\n \/\/ grab the file on the filesystem\n file, err := os.Open(\"mpd\/music\/\" + song)\n if err != nil {\n log.Println(\"Couldn't open file: \" + song)\n log.Fatal(err)\n }\n\n \/\/ add the current file to our slice\n id3_file := id3.Read(file)\n files = append(files, id3_file)\n }\n\n \/\/ send the json to the client.\n fmt.Fprintf(w, jsoniffy(files))\n}\n\n\n\/\/ Return a JSON representation of the currently playing song.\nfunc getCurrentSong(w http.ResponseWriter, r *http.Request) {\n currentSong, err := mpd_conn.CurrentSong()\n if err != nil {\n log.Println(\"Couldn't get current song info\")\n log.Fatal(err)\n }\n\n fmt.Fprintf(w, jsoniffy(currentSong))\n}\n\n\nfunc getUpcomingSongs(w http.ResponseWriter, r *http.Request) {\n currentSong, err := mpd_conn.CurrentSong()\n if err != nil {\n\n count := 0;\n for err != nil && count < 10 {\n time.Sleep(10)\n\n currentSong, err = mpd_conn.CurrentSong()\n count ++\n }\n\n if err != nil {\n log.Println(\"Couldn't get current song info for upcoming list\")\n log.Fatal(err)\n }\n }\n\n pos, err := strconv.Atoi(currentSong[\"Pos\"])\n if err != nil {\n log.Fatal(err)\n }\n\n playlist, err := mpd_conn.PlaylistInfo(-1, -1)\n if err != nil {\n log.Fatal(err)\n }\n\n upcoming := playlist[pos:]\n\n fmt.Fprintf(w, jsoniffy(upcoming))\n}\n\n\n\n\/*******************\n Helper Functions \n *******************\/\n\n\/\/ Connect to MPD's control channel, and set the global mpd_conn to it.\nfunc mpdConnect(url string) *mpd.Client {\n conn, err := mpd.Dial(\"tcp\", url)\n \n \/\/ if we can't connect to MPD everything's fucked, nothing's going to work\n \/\/ kill all humans, and die, respectfully, after explaining what the issue\n \/\/ is.\n if err != nil {\n log.Println(\"\\n\\nServer quiting because it can't connect to MPD\");\n log.Println(err)\n\n return nil\n }\n\n return conn\n}\n\n\n\/\/ turn anything into JSON.\nfunc jsoniffy(v interface {}) string {\n obj, err := json.MarshalIndent(v, \"\", \" \")\n if err != nil {\n log.Print(\"Couldn't turn something into JSON: \", v)\n log.Fatal(err)\n }\n\n return string(obj)\n}<commit_msg>so maybe this time?<commit_after>package main\n\nimport (\n \"log\"\n \"github.com\/gorilla\/mux\"\n \"net\/http\"\n \"net\/http\/httputil\"\n \"net\/url\"\n \"github.com\/fhs\/gompd\/mpd\"\n \"github.com\/ascherkus\/go-id3\/src\/id3\"\n \"os\"\n \"fmt\"\n \"encoding\/json\"\n \"strconv\"\n \"time\"\n)\n\n\/\/ TODO: consider if global is really the best idea, or if we should \n\/\/ make some classes, or something...\nvar mpd_conn *mpd.Client\n\n\nfunc main() {\n mpd_conn = mpdConnect(\"localhost:6600\")\n defer mpd_conn.Close()\n\n \/\/log.Println(mpd_conn)\n if mpd_conn == nil {\n log.Fatal(\"MPD Connection is nil!\")\n }\n\n \/\/ create a new mux router for our server.\n r := mux.NewRouter()\n\n \/\/ requests to `\/stream` are proxied to the MPD httpd.\n r.HandleFunc(\"\/stream\", \n httputil.NewSingleHostReverseProxy(\n &url.URL{\n Scheme:\"http\", \n Host: \"localhost:8000\", \n Path: \"\/\",\n }).ServeHTTP)\n\n r.HandleFunc(\"\/songs\", listSongs)\n r.HandleFunc(\"\/current\", getCurrentSong)\n r.HandleFunc(\"\/upcoming\", getUpcomingSongs)\n\n \/\/ This MUST go last! It takes precidence over any after it, meaning\n \/\/ the server will try to serve a file, which most likely doesn't exist,\n \/\/ and will 404.\n \/\/\n \/\/ serve up the frontend files.\n r.PathPrefix(\"\/\").Handler(http.FileServer(http.Dir(\"..\/frontend\/turbo_wookie\/web\")))\n\n\n \/\/ sit, waiting, like a hunter, spying on its prey.\n log.Println(\"Starting server on port 9000\")\n http.ListenAndServe(\":9000\", r)\n}\n\n\n\/********************\n Handler Functions\n ********************\/\n\n\/\/ return all songs known to MPD to the client.\nfunc listSongs(w http.ResponseWriter, r *http.Request) {\n \/\/ get all files from MPD\n mpdfiles, err := mpd_conn.GetFiles()\n if err != nil {\n log.Println(\"Couldn't get a list of files...\")\n log.Fatal(err)\n }\n\n \/\/ create a slice of id3.File s\n files := make([]*id3.File, 0)\n\n for _, song := range mpdfiles {\n \/\/ grab the file on the filesystem\n file, err := os.Open(\"mpd\/music\/\" + song)\n if err != nil {\n log.Println(\"Couldn't open file: \" + song)\n log.Fatal(err)\n }\n\n \/\/ add the current file to our slice\n id3_file := id3.Read(file)\n files = append(files, id3_file)\n }\n\n \/\/ send the json to the client.\n fmt.Fprintf(w, jsoniffy(files))\n}\n\n\n\/\/ Return a JSON representation of the currently playing song.\nfunc getCurrentSong(w http.ResponseWriter, r *http.Request) {\n currentSong, err := mpd_conn.CurrentSong()\n if err != nil {\n\n count := 0;\n for err != nil && count < 10 {\n time.Sleep(10)\n\n currentSong, err = mpd_conn.CurrentSong()\n count ++\n }\n\n if err != nil {\n log.Println(\"Couldn't get current song info for upcoming list\")\n log.Fatal(err)\n }\n }\n\n fmt.Fprintf(w, jsoniffy(currentSong))\n}\n\n\nfunc getUpcomingSongs(w http.ResponseWriter, r *http.Request) {\n currentSong, err := mpd_conn.CurrentSong()\n if err != nil {\n\n count := 0;\n for err != nil && count < 10 {\n time.Sleep(10)\n\n currentSong, err = mpd_conn.CurrentSong()\n count ++\n }\n\n if err != nil {\n log.Println(\"Couldn't get current song info for upcoming list\")\n log.Fatal(err)\n }\n }\n\n pos, err := strconv.Atoi(currentSong[\"Pos\"])\n if err != nil {\n log.Fatal(err)\n }\n\n playlist, err := mpd_conn.PlaylistInfo(-1, -1)\n if err != nil {\n log.Fatal(err)\n }\n\n upcoming := playlist[pos:]\n\n fmt.Fprintf(w, jsoniffy(upcoming))\n}\n\n\n\n\/*******************\n Helper Functions \n *******************\/\n\n\/\/ Connect to MPD's control channel, and set the global mpd_conn to it.\nfunc mpdConnect(url string) *mpd.Client {\n conn, err := mpd.Dial(\"tcp\", url)\n \n \/\/ if we can't connect to MPD everything's fucked, nothing's going to work\n \/\/ kill all humans, and die, respectfully, after explaining what the issue\n \/\/ is.\n if err != nil {\n log.Println(\"\\n\\nServer quiting because it can't connect to MPD\");\n log.Println(err)\n\n return nil\n }\n\n return conn\n}\n\n\n\/\/ turn anything into JSON.\nfunc jsoniffy(v interface {}) string {\n obj, err := json.MarshalIndent(v, \"\", \" \")\n if err != nil {\n log.Print(\"Couldn't turn something into JSON: \", v)\n log.Fatal(err)\n }\n\n return string(obj)\n}\n\n<|endoftext|>"} {"text":"<commit_before>package ingester\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\tnsq \"github.com\/bitly\/go-nsq\"\n\tlog \"github.com\/cihub\/seelog\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\n\t\"github.com\/mattheath\/phosphor\/domain\"\n\ttraceproto \"github.com\/mattheath\/phosphor\/proto\"\n\t\"github.com\/mattheath\/phosphor\/store\"\n)\n\nvar (\n\ttopic = \"trace\"\n\tchannel = \"phosphor-server\"\n\n\tmaxInFlight = 200\n)\n\nfunc Run(nsqLookupdHTTPAddrs []string, store *store.MemoryStore) {\n\n\tcfg := nsq.NewConfig()\n\tcfg.UserAgent = fmt.Sprintf(\"phosphor go-nsq\/%s\", nsq.VERSION)\n\tcfg.MaxInFlight = maxInFlight\n\n\tconsumer, err := nsq.NewConsumer(topic, channel, cfg)\n\tif err != nil {\n\t\tlog.Critical(err)\n\t\tos.Exit(1)\n\t}\n\n\tconsumer.AddHandler(&IngestionHandler{})\n\n\terr = consumer.ConnectToNSQLookupds(nsqLookupdHTTPAddrs)\n\tif err != nil {\n\t\tlog.Critical(err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Block until exit\n\t<-consumer.StopChan\n}\n\n\/\/ IngestionHandler exists to match the NSQ handler interface\ntype IngestionHandler struct{}\n\n\/\/ HandleMessage delivered by NSQ\nfunc (ih *IngestionHandler) HandleMessage(message *nsq.Message) error {\n\n\tp := &traceproto.TraceFrame{}\n\terr := proto.Unmarshal(message.Body, p)\n\tif err != nil {\n\t\t\/\/ returning an error to NSQ will requeue this\n\t\t\/\/ failure to unmarshal is permanent\n\t\treturn nil\n\t}\n\n\tt := domain.FrameFromProto(p)\n\n\tlog.Debugf(\"Received trace: %+v\", t)\n\n\treturn nil\n}\n<commit_msg>Update ingester to take store interface<commit_after>package ingester\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\tnsq \"github.com\/bitly\/go-nsq\"\n\tlog \"github.com\/cihub\/seelog\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\n\t\"github.com\/mattheath\/phosphor\/domain\"\n\ttraceproto \"github.com\/mattheath\/phosphor\/proto\"\n\t\"github.com\/mattheath\/phosphor\/store\"\n)\n\nvar (\n\ttopic = \"trace\"\n\tchannel = \"phosphor-server\"\n\n\tmaxInFlight = 200\n)\n\nfunc Run(nsqLookupdHTTPAddrs []string, st store.Store) {\n\n\tcfg := nsq.NewConfig()\n\tcfg.UserAgent = fmt.Sprintf(\"phosphor go-nsq\/%s\", nsq.VERSION)\n\tcfg.MaxInFlight = maxInFlight\n\n\tconsumer, err := nsq.NewConsumer(topic, channel, cfg)\n\tif err != nil {\n\t\tlog.Critical(err)\n\t\tos.Exit(1)\n\t}\n\n\tconsumer.AddHandler(&IngestionHandler{})\n\n\terr = consumer.ConnectToNSQLookupds(nsqLookupdHTTPAddrs)\n\tif err != nil {\n\t\tlog.Critical(err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Block until exit\n\t<-consumer.StopChan\n}\n\n\/\/ IngestionHandler exists to match the NSQ handler interface\ntype IngestionHandler struct{}\n\n\/\/ HandleMessage delivered by NSQ\nfunc (ih *IngestionHandler) HandleMessage(message *nsq.Message) error {\n\n\tp := &traceproto.TraceFrame{}\n\terr := proto.Unmarshal(message.Body, p)\n\tif err != nil {\n\t\t\/\/ returning an error to NSQ will requeue this\n\t\t\/\/ failure to unmarshal is permanent\n\t\treturn nil\n\t}\n\n\tt := domain.FrameFromProto(p)\n\n\tlog.Debugf(\"Received trace: %+v\", t)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Copyright (C) 2013 The Docker Cloud authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\npackage backends\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/dotcloud\/docker\/engine\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n)\n\n\/\/ The Cloud interface provides the contract that cloud providers should implement to enable\n\/\/ running Docker containers in their cloud.\n\/\/ TODO(bburns): Restructure this into Cloud, Instance and Tunnel interfaces\ntype Cloud interface {\n\t\/\/ GetPublicIPAddress returns the stringified address (e.g \"1.2.3.4\") of the runtime\n\tGetPublicIPAddress(name string, zone string) (string, error)\n\n\t\/\/ CreateInstance creates a virtual machine instance given a name and a zone. Returns the\n\t\/\/ IP address of the instance. Waits until Docker is up and functioning on the machine\n\t\/\/ before returning.\n\tCreateInstance(name string, zone string) (string, error)\n\n\t\/\/ DeleteInstance deletes a virtual machine instance, given the instance name and zone.\n\tDeleteInstance(name string, zone string) error\n\n\t\/\/ Open a secure tunnel (generally SSH) between the local host and a remote host.\n\tOpenSecureTunnel(name string, zone string, localPort int, remotePort int) (*os.Process, error)\n}\n\nfunc CloudBackend() engine.Installer {\n\treturn &cloud{}\n}\n\ntype cloud struct {\n}\n\ntype Container struct {\n\tId string\n\tImage string\n\tTty bool\n}\n\n\/\/ returns true, if the connection was successful, false otherwise\ntype Tunnel struct {\n\turl.URL\n}\n\nfunc (t Tunnel) isActive() bool {\n\t_, err := http.Get(t.String())\n\treturn err == nil\n}\n\nfunc (s *cloud) Install(eng *engine.Engine) error {\n\teng.Register(\"cloud\", func(job *engine.Job) engine.Status {\n\t\tif len(job.Args) < 3 {\n\t\t\treturn job.Errorf(\"usage: %s <provider> <instance> <zone> <aditional> <provider> <args> <proto>:\/\/<addr>\", job.Name)\n\t\t}\n\t\tinstance := job.Args[1]\n\t\tzone := job.Args[2]\n\t\tvar cloud Cloud\n\t\tvar err error\n\t\tswitch job.Args[0] {\n\t\tcase \"gce\":\n\t\t\tif len(job.Args) < 4 {\n\t\t\t\treturn job.Errorf(\"usage: %s gce <instance> <zone> <project>\")\n\t\t\t}\n\t\t\tcloud, err = NewCloudGCE(job.Args[3])\n\t\t\tif err != nil {\n\t\t\t\tjob.Errorf(\"Unexpected error: %#v\", err)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn job.Errorf(\"Unknown cloud provider: %s\", job.Args[0])\n\t\t}\n\t\tip, err := cloud.GetPublicIPAddress(instance, zone)\n\t\tinstanceRunning := len(ip) > 0\n\t\tif !instanceRunning {\n\t\t\tlog.Print(\"Instance doesn't exist, creating....\")\n\t\t\t_, err = cloud.CreateInstance(instance, zone)\n\t\t}\n\t\tif err != nil {\n\t\t\tjob.Errorf(\"Unexpected error: %#v\", err)\n\t\t}\n\t\tremotePort := 8000\n\t\tlocalPort := 8001\n\t\tapiVersion := \"v1.10\"\n\t\ttunnelUrl, err := url.Parse(fmt.Sprintf(\"http:\/\/localhost:%d\/%s\/containers\/json\", localPort, apiVersion))\n\t\tif err != nil {\n\t\t\treturn job.Errorf(\"Unexpected error: %#v\", err)\n\t\t}\n\t\ttunnel := Tunnel{*tunnelUrl}\n\n\t\tif !tunnel.isActive() {\n\t\t\tfmt.Printf(\"Creating tunnel\")\n\t\t\t_, err = cloud.OpenSecureTunnel(instance, zone, localPort, remotePort)\n\t\t\tif err != nil {\n\t\t\t\tjob.Errorf(\"Failed to open tunnel: %#v\", err)\n\t\t\t}\n\t\t}\n\t\thost := fmt.Sprintf(\"tcp:\/\/localhost:%d\", localPort)\n\t\tclient, err := newClient(host, apiVersion)\n\t\tif err != nil {\n\t\t\tjob.Errorf(\"Unexpected error: %#v\", err)\n\t\t}\n\t\t\/\/job.Eng.Register(\"inspect\", func(job *engine.Job) engine.Status {\n\t\t\/\/\tresp, err := client.call(\"GET\", \"\/containers\/\n\t\tjob.Eng.Register(\"create\", func(job *engine.Job) engine.Status {\n\t\t\tcontainer := Container{\n\t\t\t\tImage: job.Getenv(\"Image\"),\n\t\t\t\tTty: job.Getenv(\"Tty\") == \"true\",\n\t\t\t}\n\t\t\tdata, err := json.Marshal(container)\n\t\t\tresp, err := client.call(\"POST\", \"\/containers\/create\", string(data))\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: post: %v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: read body: %#v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tvar containerOut Container\n\t\t\terr = json.Unmarshal([]byte(body), &containerOut)\n\t\t\t_, err = job.Printf(\"%s\\n\", containerOut.Id)\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: write body: %#v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tlog.Printf(\"%s\", string(body))\n\t\t\treturn engine.StatusOK\n\t\t})\n\n\t\tjob.Eng.Register(\"start\", func(job *engine.Job) engine.Status {\n\t\t\tpath := fmt.Sprintf(\"\/containers\/%s\/start\", job.Args[0])\n\t\t\tresp, err := client.call(\"POST\", path, \"{\\\"Binds\\\":[],\\\"ContainerIDFile\\\":\\\"\\\",\\\"LxcConf\\\":[],\\\"Privileged\\\":false,\\\"PortBindings\\\":{},\\\"Links\\\":null,\\\"PublishAllPorts\\\":false,\\\"Dns\\\":null,\\\"DnsSearch\\\":[],\\\"VolumesFrom\\\":[]}\")\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: post: %v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: read body: %#v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tlog.Printf(\"%s\", string(body))\n\t\t\treturn engine.StatusOK\n\t\t})\n\n\t\tjob.Eng.Register(\"containers\", func(job *engine.Job) engine.Status {\n\t\t\tpath := fmt.Sprintf(\n\t\t\t\t\"\/containers\/json?all=%s&size=%s&since=%s&before=%s&limit=%s\",\n\t\t\t\turl.QueryEscape(job.Getenv(\"all\")),\n\t\t\t\turl.QueryEscape(job.Getenv(\"size\")),\n\t\t\t\turl.QueryEscape(job.Getenv(\"since\")),\n\t\t\t\turl.QueryEscape(job.Getenv(\"before\")),\n\t\t\t\turl.QueryEscape(job.Getenv(\"limit\")),\n\t\t\t)\n\t\t\tresp, err := client.call(\"GET\", path, \"\")\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: get: %v\", client.URL.String(), err)\n\t\t\t}\n\t\t\t\/\/ FIXME: check for response error\n\t\t\tc := engine.NewTable(\"Created\", 0)\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: read body: %v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tfmt.Printf(\"---> '%s'\\n\", body)\n\t\t\tif _, err := c.ReadListFrom(body); err != nil {\n\t\t\t\treturn job.Errorf(\"%s: readlist: %v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tc.WriteListTo(job.Stdout)\n\t\t\treturn engine.StatusOK\n\t\t})\n\n\t\tjob.Eng.Register(\"container_delete\", func(job *engine.Job) engine.Status {\n\t\t\tlog.Printf(\"%#v\", job.Args)\n\t\t\tpath := \"\/containers\/\" + job.Args[0]\n\n\t\t\tresp, err := client.call(\"DELETE\", path, \"\")\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: delete: %v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tlog.Printf(\"%#v\", resp)\n\t\t\treturn engine.StatusOK\n\t\t})\n\n\t\tjob.Eng.Register(\"stop\", func(job *engine.Job) engine.Status {\n\t\t\tlog.Printf(\"%#v\", job.Args)\n\t\t\tpath := \"\/containers\/\" + job.Args[0] + \"\/stop\"\n\n\t\t\tresp, err := client.call(\"POST\", path, \"\")\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: delete: %v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tlog.Printf(\"%#v\", resp)\n\t\t\treturn engine.StatusOK\n\t\t})\n\n\t\tjob.Eng.RegisterCatchall(func(job *engine.Job) engine.Status {\n\t\t\tlog.Printf(\"%#v %#v %#v\", *job, job.Env(), job.Args)\n\t\t\treturn engine.StatusOK\n\t\t})\n\t\treturn engine.StatusOK\n\t})\n\treturn nil\n}\n<commit_msg>Add some missing return statements<commit_after>\/\/\n\/\/ Copyright (C) 2013 The Docker Cloud authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\npackage backends\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/dotcloud\/docker\/engine\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n)\n\n\/\/ The Cloud interface provides the contract that cloud providers should implement to enable\n\/\/ running Docker containers in their cloud.\n\/\/ TODO(bburns): Restructure this into Cloud, Instance and Tunnel interfaces\ntype Cloud interface {\n\t\/\/ GetPublicIPAddress returns the stringified address (e.g \"1.2.3.4\") of the runtime\n\tGetPublicIPAddress(name string, zone string) (string, error)\n\n\t\/\/ CreateInstance creates a virtual machine instance given a name and a zone. Returns the\n\t\/\/ IP address of the instance. Waits until Docker is up and functioning on the machine\n\t\/\/ before returning.\n\tCreateInstance(name string, zone string) (string, error)\n\n\t\/\/ DeleteInstance deletes a virtual machine instance, given the instance name and zone.\n\tDeleteInstance(name string, zone string) error\n\n\t\/\/ Open a secure tunnel (generally SSH) between the local host and a remote host.\n\tOpenSecureTunnel(name string, zone string, localPort int, remotePort int) (*os.Process, error)\n}\n\nfunc CloudBackend() engine.Installer {\n\treturn &cloud{}\n}\n\ntype cloud struct {\n}\n\ntype Container struct {\n\tId string\n\tImage string\n\tTty bool\n}\n\n\/\/ returns true, if the connection was successful, false otherwise\ntype Tunnel struct {\n\turl.URL\n}\n\nfunc (t Tunnel) isActive() bool {\n\t_, err := http.Get(t.String())\n\treturn err == nil\n}\n\nfunc (s *cloud) Install(eng *engine.Engine) error {\n\teng.Register(\"cloud\", func(job *engine.Job) engine.Status {\n\t\tif len(job.Args) < 3 {\n\t\t\treturn job.Errorf(\"usage: %s <provider> <instance> <zone> <aditional> <provider> <args> <proto>:\/\/<addr>\", job.Name)\n\t\t}\n\t\tinstance := job.Args[1]\n\t\tzone := job.Args[2]\n\t\tvar cloud Cloud\n\t\tvar err error\n\t\tswitch job.Args[0] {\n\t\tcase \"gce\":\n\t\t\tif len(job.Args) < 4 {\n\t\t\t\treturn job.Errorf(\"usage: %s gce <instance> <zone> <project>\")\n\t\t\t}\n\t\t\tcloud, err = NewCloudGCE(job.Args[3])\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"Unexpected error: %#v\", err)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn job.Errorf(\"Unknown cloud provider: %s\", job.Args[0])\n\t\t}\n\t\tip, err := cloud.GetPublicIPAddress(instance, zone)\n\t\tinstanceRunning := len(ip) > 0\n\t\tif !instanceRunning {\n\t\t\tlog.Print(\"Instance doesn't exist, creating....\")\n\t\t\t_, err = cloud.CreateInstance(instance, zone)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn job.Errorf(\"Unexpected error: %#v\", err)\n\t\t}\n\t\tremotePort := 8000\n\t\tlocalPort := 8001\n\t\tapiVersion := \"v1.10\"\n\t\ttunnelUrl, err := url.Parse(fmt.Sprintf(\"http:\/\/localhost:%d\/%s\/containers\/json\", localPort, apiVersion))\n\t\tif err != nil {\n\t\t\treturn job.Errorf(\"Unexpected error: %#v\", err)\n\t\t}\n\t\ttunnel := Tunnel{*tunnelUrl}\n\n\t\tif !tunnel.isActive() {\n\t\t\tfmt.Printf(\"Creating tunnel\")\n\t\t\t_, err = cloud.OpenSecureTunnel(instance, zone, localPort, remotePort)\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"Failed to open tunnel: %#v\", err)\n\t\t\t}\n\t\t}\n\t\thost := fmt.Sprintf(\"tcp:\/\/localhost:%d\", localPort)\n\t\tclient, err := newClient(host, apiVersion)\n\t\tif err != nil {\n\t\t\treturn job.Errorf(\"Unexpected error: %#v\", err)\n\t\t}\n\t\t\/\/job.Eng.Register(\"inspect\", func(job *engine.Job) engine.Status {\n\t\t\/\/\tresp, err := client.call(\"GET\", \"\/containers\/\n\t\tjob.Eng.Register(\"create\", func(job *engine.Job) engine.Status {\n\t\t\tcontainer := Container{\n\t\t\t\tImage: job.Getenv(\"Image\"),\n\t\t\t\tTty: job.Getenv(\"Tty\") == \"true\",\n\t\t\t}\n\t\t\tdata, err := json.Marshal(container)\n\t\t\tresp, err := client.call(\"POST\", \"\/containers\/create\", string(data))\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: post: %v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: read body: %#v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tvar containerOut Container\n\t\t\terr = json.Unmarshal([]byte(body), &containerOut)\n\t\t\t_, err = job.Printf(\"%s\\n\", containerOut.Id)\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: write body: %#v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tlog.Printf(\"%s\", string(body))\n\t\t\treturn engine.StatusOK\n\t\t})\n\n\t\tjob.Eng.Register(\"start\", func(job *engine.Job) engine.Status {\n\t\t\tpath := fmt.Sprintf(\"\/containers\/%s\/start\", job.Args[0])\n\t\t\tresp, err := client.call(\"POST\", path, \"{\\\"Binds\\\":[],\\\"ContainerIDFile\\\":\\\"\\\",\\\"LxcConf\\\":[],\\\"Privileged\\\":false,\\\"PortBindings\\\":{},\\\"Links\\\":null,\\\"PublishAllPorts\\\":false,\\\"Dns\\\":null,\\\"DnsSearch\\\":[],\\\"VolumesFrom\\\":[]}\")\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: post: %v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: read body: %#v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tlog.Printf(\"%s\", string(body))\n\t\t\treturn engine.StatusOK\n\t\t})\n\n\t\tjob.Eng.Register(\"containers\", func(job *engine.Job) engine.Status {\n\t\t\tpath := fmt.Sprintf(\n\t\t\t\t\"\/containers\/json?all=%s&size=%s&since=%s&before=%s&limit=%s\",\n\t\t\t\turl.QueryEscape(job.Getenv(\"all\")),\n\t\t\t\turl.QueryEscape(job.Getenv(\"size\")),\n\t\t\t\turl.QueryEscape(job.Getenv(\"since\")),\n\t\t\t\turl.QueryEscape(job.Getenv(\"before\")),\n\t\t\t\turl.QueryEscape(job.Getenv(\"limit\")),\n\t\t\t)\n\t\t\tresp, err := client.call(\"GET\", path, \"\")\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: get: %v\", client.URL.String(), err)\n\t\t\t}\n\t\t\t\/\/ FIXME: check for response error\n\t\t\tc := engine.NewTable(\"Created\", 0)\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: read body: %v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tfmt.Printf(\"---> '%s'\\n\", body)\n\t\t\tif _, err := c.ReadListFrom(body); err != nil {\n\t\t\t\treturn job.Errorf(\"%s: readlist: %v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tc.WriteListTo(job.Stdout)\n\t\t\treturn engine.StatusOK\n\t\t})\n\n\t\tjob.Eng.Register(\"container_delete\", func(job *engine.Job) engine.Status {\n\t\t\tlog.Printf(\"%#v\", job.Args)\n\t\t\tpath := \"\/containers\/\" + job.Args[0]\n\n\t\t\tresp, err := client.call(\"DELETE\", path, \"\")\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: delete: %v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tlog.Printf(\"%#v\", resp)\n\t\t\treturn engine.StatusOK\n\t\t})\n\n\t\tjob.Eng.Register(\"stop\", func(job *engine.Job) engine.Status {\n\t\t\tlog.Printf(\"%#v\", job.Args)\n\t\t\tpath := \"\/containers\/\" + job.Args[0] + \"\/stop\"\n\n\t\t\tresp, err := client.call(\"POST\", path, \"\")\n\t\t\tif err != nil {\n\t\t\t\treturn job.Errorf(\"%s: delete: %v\", client.URL.String(), err)\n\t\t\t}\n\t\t\tlog.Printf(\"%#v\", resp)\n\t\t\treturn engine.StatusOK\n\t\t})\n\n\t\tjob.Eng.RegisterCatchall(func(job *engine.Job) engine.Status {\n\t\t\tlog.Printf(\"%#v %#v %#v\", *job, job.Env(), job.Args)\n\t\t\treturn engine.StatusOK\n\t\t})\n\t\treturn engine.StatusOK\n\t})\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/client\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/client\/cache\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/controller\/framework\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/fields\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/labels\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/runtime\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/util\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/watch\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\n\/\/ This test suite can take a long time to run, so by default it is added to\n\/\/ the ginkgo.skip list (see driver.go).\n\/\/ To run this suite you must explicitly ask for it by setting the\n\/\/ -t\/--test flag or ginkgo.focus flag.\nvar _ = Describe(\"Density\", func() {\n\tvar c *client.Client\n\tvar minionCount int\n\tvar RCName string\n\tvar ns string\n\tvar uuid string\n\n\tBeforeEach(func() {\n\t\tvar err error\n\t\tc, err = loadClient()\n\t\texpectNoError(err)\n\t\tminions, err := c.Nodes().List(labels.Everything(), fields.Everything())\n\t\texpectNoError(err)\n\t\tminionCount = len(minions.Items)\n\t\tExpect(minionCount).NotTo(BeZero())\n\t\tnsForTesting, err := createTestingNS(\"density\", c)\n\t\tns = nsForTesting.Name\n\t\texpectNoError(err)\n\t\tuuid = string(util.NewUUID())\n\n\t\texpectNoError(os.Mkdir(fmt.Sprintf(testContext.OutputDir+\"\/%s\", uuid), 0777))\n\t\texpectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+\"\/%s\", uuid), \"before\"))\n\t})\n\n\tAfterEach(func() {\n\t\t\/\/ Remove any remaining pods from this test if the\n\t\t\/\/ replication controller still exists and the replica count\n\t\t\/\/ isn't 0. This means the controller wasn't cleaned up\n\t\t\/\/ during the test so clean it up here\n\t\trc, err := c.ReplicationControllers(ns).Get(RCName)\n\t\tif err == nil && rc.Spec.Replicas != 0 {\n\t\t\tBy(\"Cleaning up the replication controller\")\n\t\t\terr := DeleteRC(c, ns, RCName)\n\t\t\texpectNoError(err)\n\t\t}\n\n\t\tBy(fmt.Sprintf(\"Destroying namespace for this suite %v\", ns))\n\t\tif err := c.Namespaces().Delete(ns); err != nil {\n\t\t\tFailf(\"Couldn't delete ns %s\", err)\n\t\t}\n\n\t\texpectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+\"\/%s\", uuid), \"after\"))\n\n\t\t\/\/ Verify latency metrics\n\t\t\/\/ TODO: We should reset metrics before the test. Currently previous tests influence latency metrics.\n\t\thighLatencyRequests, err := HighLatencyRequests(c, 2*time.Second, util.NewStringSet(\"events\"))\n\t\texpectNoError(err)\n\t\tExpect(highLatencyRequests).NotTo(BeNumerically(\">\", 0))\n\t})\n\n\t\/\/ Tests with \"Skipped\" substring in their name will be skipped when running\n\t\/\/ e2e test suite without --ginkgo.focus & --ginkgo.skip flags.\n\ttype Density struct {\n\t\tskip bool\n\t\tpodsPerMinion int\n\t\t\/* Controls how often the apiserver is polled for pods *\/\n\t\tinterval int\n\t}\n\n\tdensityTests := []Density{\n\t\t\/\/ This test should always run, even if larger densities are skipped.\n\t\t{podsPerMinion: 3, skip: false, interval: 10},\n\t\t{podsPerMinion: 30, skip: false, interval: 10},\n\t\t\/\/ More than 30 pods per node is outside our v1.0 goals.\n\t\t\/\/ We might want to enable those tests in the future.\n\t\t{podsPerMinion: 50, skip: true, interval: 10},\n\t\t{podsPerMinion: 100, skip: true, interval: 1},\n\t}\n\n\tfor _, testArg := range densityTests {\n\t\tname := fmt.Sprintf(\"should allow starting %d pods per node\", testArg.podsPerMinion)\n\t\tif testArg.podsPerMinion <= 30 {\n\t\t\tname = \"[Performance suite] \" + name\n\t\t}\n\t\tif testArg.skip {\n\t\t\tname = \"[Skipped] \" + name\n\t\t}\n\t\titArg := testArg\n\t\tIt(name, func() {\n\t\t\ttotalPods := itArg.podsPerMinion * minionCount\n\t\t\tRCName = \"density\" + strconv.Itoa(totalPods) + \"-\" + uuid\n\t\t\tfileHndl, err := os.Create(fmt.Sprintf(testContext.OutputDir+\"\/%s\/pod_states.csv\", uuid))\n\t\t\texpectNoError(err)\n\t\t\tdefer fileHndl.Close()\n\n\t\t\tconfig := RCConfig{Client: c,\n\t\t\t\tImage: \"gcr.io\/google_containers\/pause:go\",\n\t\t\t\tName: RCName,\n\t\t\t\tNamespace: ns,\n\t\t\t\tPollInterval: itArg.interval,\n\t\t\t\tPodStatusFile: fileHndl,\n\t\t\t\tReplicas: totalPods,\n\t\t\t}\n\n\t\t\t\/\/ Create a listener for events.\n\t\t\tevents := make([](*api.Event), 0)\n\t\t\t_, controller := framework.NewInformer(\n\t\t\t\t&cache.ListWatch{\n\t\t\t\t\tListFunc: func() (runtime.Object, error) {\n\t\t\t\t\t\treturn c.Events(ns).List(labels.Everything(), fields.Everything())\n\t\t\t\t\t},\n\t\t\t\t\tWatchFunc: func(rv string) (watch.Interface, error) {\n\t\t\t\t\t\treturn c.Events(ns).Watch(labels.Everything(), fields.Everything(), rv)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&api.Event{},\n\t\t\t\t0,\n\t\t\t\tframework.ResourceEventHandlerFuncs{\n\t\t\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\t\t\tevents = append(events, obj.(*api.Event))\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\t\t\tstop := make(chan struct{})\n\t\t\tgo controller.Run(stop)\n\n\t\t\t\/\/ Start the replication controller.\n\t\t\tstartTime := time.Now()\n\t\t\texpectNoError(RunRC(config))\n\t\t\te2eStartupTime := time.Now().Sub(startTime)\n\t\t\tLogf(\"E2E startup time for %d pods: %v\", totalPods, e2eStartupTime)\n\n\t\t\tBy(\"Waiting for all events to be recorded\")\n\t\t\tlast := -1\n\t\t\tcurrent := len(events)\n\t\t\ttimeout := 10 * time.Minute\n\t\t\tfor start := time.Now(); last < current && time.Since(start) < timeout; time.Sleep(10 * time.Second) {\n\t\t\t\tlast = current\n\t\t\t\tcurrent = len(events)\n\t\t\t}\n\t\t\tclose(stop)\n\n\t\t\tif current != last {\n\t\t\t\tLogf(\"Warning: Not all events were recorded after waiting %.2f minutes\", timeout.Minutes())\n\t\t\t}\n\t\t\tLogf(\"Found %d events\", current)\n\n\t\t\t\/\/ Tune the threshold for allowed failures.\n\t\t\tbadEvents := BadEvents(events)\n\t\t\tExpect(badEvents).NotTo(BeNumerically(\">\", int(math.Floor(0.01*float64(totalPods)))))\n\t\t})\n\t}\n})\n<commit_msg>Print latency metrics before the density test<commit_after>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/client\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/client\/cache\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/controller\/framework\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/fields\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/labels\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/runtime\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/util\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/watch\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\n\/\/ This test suite can take a long time to run, so by default it is added to\n\/\/ the ginkgo.skip list (see driver.go).\n\/\/ To run this suite you must explicitly ask for it by setting the\n\/\/ -t\/--test flag or ginkgo.focus flag.\nvar _ = Describe(\"Density\", func() {\n\tvar c *client.Client\n\tvar minionCount int\n\tvar RCName string\n\tvar ns string\n\tvar uuid string\n\n\tBeforeEach(func() {\n\t\tvar err error\n\t\tc, err = loadClient()\n\t\texpectNoError(err)\n\t\tminions, err := c.Nodes().List(labels.Everything(), fields.Everything())\n\t\texpectNoError(err)\n\t\tminionCount = len(minions.Items)\n\t\tExpect(minionCount).NotTo(BeZero())\n\t\tnsForTesting, err := createTestingNS(\"density\", c)\n\t\tns = nsForTesting.Name\n\t\texpectNoError(err)\n\t\tuuid = string(util.NewUUID())\n\n\t\t\/\/ Print latency metrics before the test.\n\t\t\/\/ TODO: Remove this once we reset metrics before the test.\n\t\t_, err = HighLatencyRequests(c, 2*time.Second, util.NewStringSet(\"events\"))\n\t\texpectNoError(err)\n\n\t\texpectNoError(os.Mkdir(fmt.Sprintf(testContext.OutputDir+\"\/%s\", uuid), 0777))\n\t\texpectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+\"\/%s\", uuid), \"before\"))\n\t})\n\n\tAfterEach(func() {\n\t\t\/\/ Remove any remaining pods from this test if the\n\t\t\/\/ replication controller still exists and the replica count\n\t\t\/\/ isn't 0. This means the controller wasn't cleaned up\n\t\t\/\/ during the test so clean it up here\n\t\trc, err := c.ReplicationControllers(ns).Get(RCName)\n\t\tif err == nil && rc.Spec.Replicas != 0 {\n\t\t\tBy(\"Cleaning up the replication controller\")\n\t\t\terr := DeleteRC(c, ns, RCName)\n\t\t\texpectNoError(err)\n\t\t}\n\n\t\tBy(fmt.Sprintf(\"Destroying namespace for this suite %v\", ns))\n\t\tif err := c.Namespaces().Delete(ns); err != nil {\n\t\t\tFailf(\"Couldn't delete ns %s\", err)\n\t\t}\n\n\t\texpectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+\"\/%s\", uuid), \"after\"))\n\n\t\t\/\/ Verify latency metrics\n\t\t\/\/ TODO: We should reset metrics before the test. Currently previous tests influence latency metrics.\n\t\thighLatencyRequests, err := HighLatencyRequests(c, 2*time.Second, util.NewStringSet(\"events\"))\n\t\texpectNoError(err)\n\t\tExpect(highLatencyRequests).NotTo(BeNumerically(\">\", 0))\n\t})\n\n\t\/\/ Tests with \"Skipped\" substring in their name will be skipped when running\n\t\/\/ e2e test suite without --ginkgo.focus & --ginkgo.skip flags.\n\ttype Density struct {\n\t\tskip bool\n\t\tpodsPerMinion int\n\t\t\/* Controls how often the apiserver is polled for pods *\/\n\t\tinterval int\n\t}\n\n\tdensityTests := []Density{\n\t\t\/\/ This test should always run, even if larger densities are skipped.\n\t\t{podsPerMinion: 3, skip: false, interval: 10},\n\t\t{podsPerMinion: 30, skip: false, interval: 10},\n\t\t\/\/ More than 30 pods per node is outside our v1.0 goals.\n\t\t\/\/ We might want to enable those tests in the future.\n\t\t{podsPerMinion: 50, skip: true, interval: 10},\n\t\t{podsPerMinion: 100, skip: true, interval: 1},\n\t}\n\n\tfor _, testArg := range densityTests {\n\t\tname := fmt.Sprintf(\"should allow starting %d pods per node\", testArg.podsPerMinion)\n\t\tif testArg.podsPerMinion <= 30 {\n\t\t\tname = \"[Performance suite] \" + name\n\t\t}\n\t\tif testArg.skip {\n\t\t\tname = \"[Skipped] \" + name\n\t\t}\n\t\titArg := testArg\n\t\tIt(name, func() {\n\t\t\ttotalPods := itArg.podsPerMinion * minionCount\n\t\t\tRCName = \"density\" + strconv.Itoa(totalPods) + \"-\" + uuid\n\t\t\tfileHndl, err := os.Create(fmt.Sprintf(testContext.OutputDir+\"\/%s\/pod_states.csv\", uuid))\n\t\t\texpectNoError(err)\n\t\t\tdefer fileHndl.Close()\n\n\t\t\tconfig := RCConfig{Client: c,\n\t\t\t\tImage: \"gcr.io\/google_containers\/pause:go\",\n\t\t\t\tName: RCName,\n\t\t\t\tNamespace: ns,\n\t\t\t\tPollInterval: itArg.interval,\n\t\t\t\tPodStatusFile: fileHndl,\n\t\t\t\tReplicas: totalPods,\n\t\t\t}\n\n\t\t\t\/\/ Create a listener for events.\n\t\t\tevents := make([](*api.Event), 0)\n\t\t\t_, controller := framework.NewInformer(\n\t\t\t\t&cache.ListWatch{\n\t\t\t\t\tListFunc: func() (runtime.Object, error) {\n\t\t\t\t\t\treturn c.Events(ns).List(labels.Everything(), fields.Everything())\n\t\t\t\t\t},\n\t\t\t\t\tWatchFunc: func(rv string) (watch.Interface, error) {\n\t\t\t\t\t\treturn c.Events(ns).Watch(labels.Everything(), fields.Everything(), rv)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&api.Event{},\n\t\t\t\t0,\n\t\t\t\tframework.ResourceEventHandlerFuncs{\n\t\t\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\t\t\tevents = append(events, obj.(*api.Event))\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\t\t\tstop := make(chan struct{})\n\t\t\tgo controller.Run(stop)\n\n\t\t\t\/\/ Start the replication controller.\n\t\t\tstartTime := time.Now()\n\t\t\texpectNoError(RunRC(config))\n\t\t\te2eStartupTime := time.Now().Sub(startTime)\n\t\t\tLogf(\"E2E startup time for %d pods: %v\", totalPods, e2eStartupTime)\n\n\t\t\tBy(\"Waiting for all events to be recorded\")\n\t\t\tlast := -1\n\t\t\tcurrent := len(events)\n\t\t\ttimeout := 10 * time.Minute\n\t\t\tfor start := time.Now(); last < current && time.Since(start) < timeout; time.Sleep(10 * time.Second) {\n\t\t\t\tlast = current\n\t\t\t\tcurrent = len(events)\n\t\t\t}\n\t\t\tclose(stop)\n\n\t\t\tif current != last {\n\t\t\t\tLogf(\"Warning: Not all events were recorded after waiting %.2f minutes\", timeout.Minutes())\n\t\t\t}\n\t\t\tLogf(\"Found %d events\", current)\n\n\t\t\t\/\/ Tune the threshold for allowed failures.\n\t\t\tbadEvents := BadEvents(events)\n\t\t\tExpect(badEvents).NotTo(BeNumerically(\">\", int(math.Floor(0.01*float64(totalPods)))))\n\t\t})\n\t}\n})\n<|endoftext|>"} {"text":"<commit_before>package test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n)\n\n\/\/ Ideas\n\/\/ - run command w\/ matcher\n\/\/ - verify arbitrary fs layer\n\/\/ - debug, replay commands\/output on failure\n\ntype Cmd struct {\n\t*Cmd\n\n\t\/\/ Embed a `Once` so we can call `exec` only once\n\tsync.Once\n\n\t\/\/ Represents the arguments to `os\/exec.Command`\n\tcommand string\n\targs []string\n\n\t\/\/ Holder of output, always converted to a string for\n\t\/\/ easier comparison and print\n\toutput string\n\n\t\/\/ Holds the error from an execution\n\terr error\n}\n\nfunc Command(cmd string, args ...string) *Cmd {\n\treturn &Cmd{\n\t\tcommand: cmd,\n\t\targs: args,\n\t}\n}\n\n\/\/ Quick wrapper around platform\/arch specific call to cert-manage\n\/\/ which is located at ..\/bin\/cert-manage-GOOS-GOARCH\nfunc CertManage(args ...string) *Cmd {\n\trender := func(tpl string) string {\n\t\treturn fmt.Sprintf(tpl, runtime.GOARCH)\n\t}\n\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\t\treturn Command(render(\"..\/bin\/cert-manage-osx-%s\"), args...)\n\tcase \"linux\":\n\t\treturn Command(render(\"..\/bin\/cert-manage-linux-%s\"), args...)\n\tcase \"windows\":\n\t\treturn Command(render(\"..\/bin\/cert-manage-%s.exe\"), args...)\n\t}\n\treturn nil\n}\n\nfunc (c *Cmd) exec() {\n\tc.Do(func() {\n\t\tcmd := exec.Command(c.command, c.args...)\n\n\t\t\/\/ Set output collectors\n\t\tvar stdout bytes.Buffer\n\t\tcmd.Stdout = &stdout\n\t\tvar stderr bytes.Buffer\n\t\tcmd.Stderr = &stderr\n\n\t\terr := cmd.Run()\n\t\tif err == nil {\n\t\t\tc.output = stdout.String()\n\t\t} else {\n\t\t\tc.output = stderr.String()\n\t\t}\n\t\tc.err = err\n\t})\n\treturn\n}\n\nfunc (c *Cmd) Trim() *Cmd {\n\tc.exec()\n\tif c.output != \"\" {\n\t\tc.output = strings.TrimSpace(c.output)\n\t}\n\treturn c\n}\n\nfunc (c *Cmd) CmpInt(t *testing.T, n1 int) {\n\tt.Helper()\n\tc.exec()\n\n\tc.CmpIntF(t, func(n2 int) bool { return n1 == n2 })\n}\n\nfunc (c *Cmd) CmpIntF(t *testing.T, f func(int) bool) {\n\tt.Helper()\n\tc.exec()\n\n\tn, err := strconv.Atoi(c.output)\n\tif err != nil {\n\t\tt.Errorf(\"ERROR: converting '%s' to integer failed, err=%v\", c.output, err)\n\t}\n\tif !f(n) {\n\t\tt.Errorf(\"ERROR: got %d\", n)\n\t}\n}\n\nfunc (c *Cmd) FailedT(t *testing.T) {\n\tt.Helper()\n\tc.exec()\n\n\tif c.err == nil {\n\t\tt.Errorf(\"Expected failure, but seeig none.\\n Output: %s\", c.output)\n\t}\n}\n\nfunc (c *Cmd) EqualT(t *testing.T, ans string) {\n\tt.Helper()\n\tc.exec()\n\tif c.output != ans {\n\t\tt.Errorf(\"ERROR: Output did not match expected answer!\\n Output: %s\\n Answer: %s\", c.output, ans)\n\t}\n}\n\nfunc (c *Cmd) String() string {\n\tc.exec()\n\n\tbuf := bytes.NewBufferString(fmt.Sprintf(\"Command:\\n %s %s\\nOutput:\\n %s\", c.command, strings.Join(c.args, \" \"), c.output))\n\tif c.err != nil {\n\t\tbuf.WriteString(fmt.Sprintf(\"\\nError:\\n %v\", c.err))\n\t}\n\treturn buf.String()\n}\n\nfunc (c *Cmd) Success() bool {\n\tc.exec()\n\treturn c.err == nil\n}\n\nfunc (c *Cmd) SuccessT(t *testing.T) {\n\tif !c.Success() {\n\t\tt.Errorf(\"Expected no error, got err=%v\\n Output: %s\", c.err, c.output)\n\t}\n}\n\nfunc (c *Cmd) PendingT(t *testing.T, reason string) {\n\tt.Helper()\n\tc.exec()\n\n\tif c.err == nil {\n\t\tt.Errorf(\"Expected failing test, got success. Pending because %s\", reason)\n\t} else {\n\t\tt.Skip(reason)\n\t}\n}\n<commit_msg>test: remove Cmd self-embed<commit_after>package test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n)\n\n\/\/ Ideas\n\/\/ - run command w\/ matcher\n\/\/ - verify arbitrary fs layer\n\/\/ - debug, replay commands\/output on failure\n\ntype Cmd struct {\n\t\/\/ Embed a `Once` so we can call `exec` only once\n\tsync.Once\n\n\t\/\/ Represents the arguments to `os\/exec.Command`\n\tcommand string\n\targs []string\n\n\t\/\/ Holder of output, always converted to a string for\n\t\/\/ easier comparison and print\n\toutput string\n\n\t\/\/ Holds the error from an execution\n\terr error\n}\n\nfunc Command(cmd string, args ...string) *Cmd {\n\treturn &Cmd{\n\t\tcommand: cmd,\n\t\targs: args,\n\t}\n}\n\n\/\/ Quick wrapper around platform\/arch specific call to cert-manage\n\/\/ which is located at ..\/bin\/cert-manage-GOOS-GOARCH\nfunc CertManage(args ...string) *Cmd {\n\trender := func(tpl string) string {\n\t\treturn fmt.Sprintf(tpl, runtime.GOARCH)\n\t}\n\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\t\treturn Command(render(\"..\/bin\/cert-manage-osx-%s\"), args...)\n\tcase \"linux\":\n\t\treturn Command(render(\"..\/bin\/cert-manage-linux-%s\"), args...)\n\tcase \"windows\":\n\t\treturn Command(render(\"..\/bin\/cert-manage-%s.exe\"), args...)\n\t}\n\treturn nil\n}\n\nfunc (c *Cmd) exec() {\n\tc.Do(func() {\n\t\tcmd := exec.Command(c.command, c.args...)\n\n\t\t\/\/ Set output collectors\n\t\tvar stdout bytes.Buffer\n\t\tcmd.Stdout = &stdout\n\t\tvar stderr bytes.Buffer\n\t\tcmd.Stderr = &stderr\n\n\t\terr := cmd.Run()\n\t\tif err == nil {\n\t\t\tc.output = stdout.String()\n\t\t} else {\n\t\t\tc.output = stderr.String()\n\t\t}\n\t\tc.err = err\n\t})\n\treturn\n}\n\nfunc (c *Cmd) Trim() *Cmd {\n\tc.exec()\n\tif c.output != \"\" {\n\t\tc.output = strings.TrimSpace(c.output)\n\t}\n\treturn c\n}\n\nfunc (c *Cmd) CmpInt(t *testing.T, n1 int) {\n\tt.Helper()\n\tc.exec()\n\n\tc.CmpIntF(t, func(n2 int) bool { return n1 == n2 })\n}\n\nfunc (c *Cmd) CmpIntF(t *testing.T, f func(int) bool) {\n\tt.Helper()\n\tc.exec()\n\n\tn, err := strconv.Atoi(c.output)\n\tif err != nil {\n\t\tt.Errorf(\"ERROR: converting '%s' to integer failed, err=%v\", c.output, err)\n\t}\n\tif !f(n) {\n\t\tt.Errorf(\"ERROR: got %d\", n)\n\t}\n}\n\nfunc (c *Cmd) FailedT(t *testing.T) {\n\tt.Helper()\n\tc.exec()\n\n\tif c.err == nil {\n\t\tt.Errorf(\"Expected failure, but seeig none.\\n Output: %s\", c.output)\n\t}\n}\n\nfunc (c *Cmd) EqualT(t *testing.T, ans string) {\n\tt.Helper()\n\tc.exec()\n\tif c.output != ans {\n\t\tt.Errorf(\"ERROR: Output did not match expected answer!\\n Output: %s\\n Answer: %s\", c.output, ans)\n\t}\n}\n\nfunc (c *Cmd) String() string {\n\tc.exec()\n\n\tbuf := bytes.NewBufferString(fmt.Sprintf(\"Command:\\n %s %s\\nOutput:\\n %s\", c.command, strings.Join(c.args, \" \"), c.output))\n\tif c.err != nil {\n\t\tbuf.WriteString(fmt.Sprintf(\"\\nError:\\n %v\", c.err))\n\t}\n\treturn buf.String()\n}\n\nfunc (c *Cmd) Success() bool {\n\tc.exec()\n\treturn c.err == nil\n}\n\nfunc (c *Cmd) SuccessT(t *testing.T) {\n\tif !c.Success() {\n\t\tt.Errorf(\"Expected no error, got err=%v\\n Output: %s\", c.err, c.output)\n\t}\n}\n\nfunc (c *Cmd) PendingT(t *testing.T, reason string) {\n\tt.Helper()\n\tc.exec()\n\n\tif c.err == nil {\n\t\tt.Errorf(\"Expected failing test, got success. Pending because %s\", reason)\n\t} else {\n\t\tt.Skip(reason)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"testing\"\n)\n\nfunc TestCountWords(t *testing.T) {\n\ts := \"Hello these are some words. Count them for me, will you?\"\n\tcount := countWords(s)\n\tif count != 11 {\n\t\tt.Errorf(\"Expected %d, got %d\", 11, count)\n\t}\n}\n\nfunc TestCountSentences(t *testing.T) {\n\ts := \"Hello these are some words. Count them for me, will you? Let's continue; with more text. And then end it!\"\n\tcount := countSentences(s)\n\tif count != 5 {\n\t\tt.Errorf(\"Expected %d, got %d\", 5, count)\n\t}\n}\n\nfunc TestCountSyllables(t *testing.T) {\n\ts := \"Ambulance\"\n\tcount := countSyllables(s)\n\tif count != 3 {\n\t\tt.Errorf(\"Expected %d, got %d\", 3, count)\n\t}\n\n\ts = \"Ape\"\n\tcount = countSyllables(s)\n\tif count != 1 {\n\t\tt.Errorf(\"Expected %d, got %d\", 1, count)\n\t}\n\n\ts = \"Booze\"\n\tcount = countSyllables(s)\n\tif count != 1 {\n\t\tt.Errorf(\"Expected %d, got %d\", 1, count)\n\t}\n}\n<commit_msg>add tests for isVowel and getWords<commit_after>package main\n\nimport (\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestGetWords(t *testing.T) {\n\ts := \"Hello world\"\n\twords := getWords(s)\n\tif len(words) != 2 || words[0] != \"Hello\" || words[1] != \"world\" {\n\t\tt.Errorf(\"Invalid result %s\", words)\n\t}\n}\n\nfunc TestCountWords(t *testing.T) {\n\ts := \"Hello these are some words. Count them for me, will you?\"\n\tcount := countWords(s)\n\tif count != 11 {\n\t\tt.Errorf(\"Expected %d, got %d\", 11, count)\n\t}\n}\n\nfunc TestCountSentences(t *testing.T) {\n\ts := \"Hello these are some words. Count them for me, will you? Let's continue; with more text. And then end it!\"\n\tcount := countSentences(s)\n\tif count != 5 {\n\t\tt.Errorf(\"Expected %d, got %d\", 5, count)\n\t}\n}\n\nfunc TestCountSyllables(t *testing.T) {\n\ts := \"Ambulance\"\n\tcount := countSyllables(s)\n\tif count != 3 {\n\t\tt.Errorf(\"Expected %d, got %d\", 3, count)\n\t}\n\n\ts = \"Ape\"\n\tcount = countSyllables(s)\n\tif count != 1 {\n\t\tt.Errorf(\"Expected %d, got %d\", 1, count)\n\t}\n\n\ts = \"Booze\"\n\tcount = countSyllables(s)\n\tif count != 1 {\n\t\tt.Errorf(\"Expected %d, got %d\", 1, count)\n\t}\n}\n\nfunc TestIsVowel(t *testing.T) {\n\tvowels := []string{\"a\", \"o\", \"i\", \"u\", \"e\"}\n\tfor _, c := range vowels {\n\t\tif !isVowel(c) {\n\t\t\tt.Errorf(\"%s should return true\", c)\n\t\t}\n\t}\n\n\tnonVowels := strings.Split(\"zxcvbnmqwrtypsdfghjkl\", \"\")\n\tfor _, c := range nonVowels {\n\t\tif isVowel(c) {\n\t\t\tt.Errorf(\"%s should return false\", c)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package textmagic\n\nimport (\n\t\"encoding\/json\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\n\t\"github.com\/MJKWoolnough\/memio\"\n)\n\nvar apiUrlPrefix = \"https:\/\/www.textmagic.com\/app\/api?\"\n\nconst (\n\tcmdAccount = \"account\"\n\tcmdCheckNumber = \"check_number\"\n\tcmdDeleteReply = \"delete_reply\"\n\tcmdMessageStatus = \"message_status\"\n\tcmdReceive = \"receive\"\n\tcmdSend = \"send\"\n)\n\ntype TextMagic struct {\n\tusername, password string\n}\n\nfunc New(username, password string) TextMagic {\n\treturn TextMagic{username, password}\n}\n\nfunc (t TextMagic) sendAPI(cmd string, params url.Values, data interface{}) error {\n\tparams.Add(\"username\", t.username)\n\tparams.Add(\"password\", t.password)\n\tparams.Add(\"cmd\", cmd)\n\tr, err := http.Get(apiUrlPrefix + params.Encode())\n\tif err != nil {\n\t\treturn RequestError{cmd, err}\n\t}\n\tdefer r.Body.Close()\n\tif r.StatusCode != http.StatusOK {\n\t\treturn StatusError{cmd, r.StatusCode}\n\t}\n\tjsonData := make([]byte, r.ContentLength) \/\/ avoid allocation using io.Pipe?\n\tvar apiError APIError\n\terr = json.NewDecoder(io.TeeReader(r.Body, memio.Create(&jsonData))).Decode(&apiError)\n\tif err != nil {\n\t\treturn JSONError{cmd, err}\n\t}\n\tif apiError.Code != 0 {\n\t\tapiError.Cmd = cmd\n\t\treturn apiError\n\t}\n\tjson.Unmarshal(jsonData, data)\n\treturn nil\n}\n\ntype balance struct {\n\tBalance float32 `json:\"balance\"`\n}\n\nfunc (t TextMagic) Account() (float32, error) {\n\tvar b balance\n\tif err := t.sendAPI(cmdAccount, url.Values{}, &b); err != nil {\n\t\treturn 0, err\n\t}\n\treturn b.Balance, nil\n}\n\ntype Status struct {\n\tText string `json:\"text\"`\n\tStatus string `json:\"status\"`\n\tCreated int64 `json:\"created_time\"`\n\tReply string `json:\"reply_number\"`\n\tCost float32 `json:\"credits_cost\"`\n\tCompleted int64 `json:\"completed_time\"`\n}\n\nfunc (t TextMagic) MessageStatus(ids ...uint) (map[uint]Status, error) {\n\tstatuses := make(map[uint]Status)\n\tfor _, tIds := range splitSlice(ids) {\n\t\tmessageIds := joinUints(tIds...)\n\t\tstrStatuses := make(map[string]Status)\n\t\terr := t.sendAPI(cmdMessageStatus, url.Values{\"ids\": {messageIds}}, strStatuses)\n\t\tif err != nil {\n\t\t\treturn statuses, err\n\t\t}\n\t\tfor messageID, status := range strStatuses {\n\t\t\tid, err := strconv.Atoi(messageID)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstatuses[uint(id)] = status\n\t\t}\n\t}\n\treturn statuses, nil\n}\n\ntype Number struct {\n\tPrice float32 `json:\"price\"`\n\tCountry string `json:\"country\"`\n}\n\nfunc (t TextMagic) CheckNumber(numbers ...uint) (map[uint]Number, error) {\n\tns := make(map[string]Number)\n\tif err := t.sendAPI(cmdCheckNumber, url.Values{\"phone\": {joinUints(numbers...)}}, ns); err != nil {\n\t\treturn nil, err\n\t}\n\ttoRet := make(map[uint]Number)\n\tfor n, data := range ns {\n\t\tnumber, err := strconv.Atoi(n)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ttoRet[uint(number)] = data\n\t}\n\treturn toRet, nil\n}\n\nconst joinSep = ','\n\nfunc joinUints(u ...uint) string {\n\ttoStr := make([]byte, 0, 10*len(u))\n\tvar digits [21]byte\n\tfor n, num := range u {\n\t\tif n > 0 {\n\t\t\ttoStr = append(toStr, joinSep)\n\t\t}\n\t\tif num == 0 {\n\t\t\ttoStr = append(toStr, '0')\n\t\t\tcontinue\n\t\t}\n\t\tpos := 21\n\t\tfor ; num > 0; num \/= 10 {\n\t\t\tpos--\n\t\t\tdigits[pos] = '0' + byte(num%10)\n\t\t}\n\t\ttoStr = append(toStr, digits[pos:]...)\n\t}\n\treturn string(toStr)\n}\n\nconst maxInSlice = 100\n\nfunc splitSlice(slice []uint) [][]uint {\n\ttoRet := make([][]uint, 0, len(slice)\/maxInSlice+1)\n\tfor len(slice) > 100 {\n\t\ttoRet = append(toRet, slice[:100])\n\t\tslice = slice[100:]\n\t}\n\tif len(slice) > 0 {\n\t\ttoRet = append(toRet, slice)\n\t}\n\treturn toRet\n}\n\n\/\/ Errors\n\ntype APIError struct {\n\tCmd string\n\tCode int `json:\"error_code\"`\n\tMessage string `json:\"error_message\"`\n}\n\nfunc (a APIError) Error() string {\n\treturn \"command \" + a.Cmd + \" returned the following API error: \" + a.Message\n}\n\ntype RequestError struct {\n\tCmd string\n\tErr error\n}\n\nfunc (r RequestError) Error() string {\n\treturn \"command \" + r.Cmd + \" returned the following error while makeing the API call: \" + r.Err.Error()\n}\n\ntype StatusError struct {\n\tCmd string\n\tStatusCode int\n}\n\nfunc (s StatusError) Error() string {\n\treturn \"command \" + s.Cmd + \" returned a non-200 OK response: \" + http.StatusText(s.StatusCode)\n}\n\ntype JSONError struct {\n\tCmd string\n\tErr error\n}\n\nfunc (j JSONError) Error() string {\n\treturn \"command \" + j.Cmd + \" returned malformed JSON: \" + j.Err.Error()\n}\n<commit_msg>Added DeleteReply<commit_after>package textmagic\n\nimport (\n\t\"encoding\/json\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\n\t\"github.com\/MJKWoolnough\/memio\"\n)\n\nvar apiUrlPrefix = \"https:\/\/www.textmagic.com\/app\/api?\"\n\nconst (\n\tcmdAccount = \"account\"\n\tcmdCheckNumber = \"check_number\"\n\tcmdDeleteReply = \"delete_reply\"\n\tcmdMessageStatus = \"message_status\"\n\tcmdReceive = \"receive\"\n\tcmdSend = \"send\"\n)\n\ntype TextMagic struct {\n\tusername, password string\n}\n\nfunc New(username, password string) TextMagic {\n\treturn TextMagic{username, password}\n}\n\nfunc (t TextMagic) sendAPI(cmd string, params url.Values, data interface{}) error {\n\tparams.Add(\"username\", t.username)\n\tparams.Add(\"password\", t.password)\n\tparams.Add(\"cmd\", cmd)\n\tr, err := http.Get(apiUrlPrefix + params.Encode())\n\tif err != nil {\n\t\treturn RequestError{cmd, err}\n\t}\n\tdefer r.Body.Close()\n\tif r.StatusCode != http.StatusOK {\n\t\treturn StatusError{cmd, r.StatusCode}\n\t}\n\tjsonData := make([]byte, r.ContentLength) \/\/ avoid allocation using io.Pipe?\n\tvar apiError APIError\n\terr = json.NewDecoder(io.TeeReader(r.Body, memio.Create(&jsonData))).Decode(&apiError)\n\tif err != nil {\n\t\treturn JSONError{cmd, err}\n\t}\n\tif apiError.Code != 0 {\n\t\tapiError.Cmd = cmd\n\t\treturn apiError\n\t}\n\tjson.Unmarshal(jsonData, data)\n\treturn nil\n}\n\ntype balance struct {\n\tBalance float32 `json:\"balance\"`\n}\n\nfunc (t TextMagic) Account() (float32, error) {\n\tvar b balance\n\tif err := t.sendAPI(cmdAccount, url.Values{}, &b); err != nil {\n\t\treturn 0, err\n\t}\n\treturn b.Balance, nil\n}\n\ntype Status struct {\n\tText string `json:\"text\"`\n\tStatus string `json:\"status\"`\n\tCreated int64 `json:\"created_time\"`\n\tReply string `json:\"reply_number\"`\n\tCost float32 `json:\"credits_cost\"`\n\tCompleted int64 `json:\"completed_time\"`\n}\n\nfunc (t TextMagic) MessageStatus(ids ...uint) (map[uint]Status, error) {\n\tstatuses := make(map[uint]Status)\n\tfor _, tIds := range splitSlice(ids) {\n\t\tmessageIds := joinUints(tIds...)\n\t\tstrStatuses := make(map[string]Status)\n\t\terr := t.sendAPI(cmdMessageStatus, url.Values{\"ids\": {messageIds}}, strStatuses)\n\t\tif err != nil {\n\t\t\treturn statuses, err\n\t\t}\n\t\tfor messageID, status := range strStatuses {\n\t\t\tid, err := strconv.Atoi(messageID)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstatuses[uint(id)] = status\n\t\t}\n\t}\n\treturn statuses, nil\n}\n\ntype Number struct {\n\tPrice float32 `json:\"price\"`\n\tCountry string `json:\"country\"`\n}\n\nfunc (t TextMagic) CheckNumber(numbers ...uint) (map[uint]Number, error) {\n\tns := make(map[string]Number)\n\tif err := t.sendAPI(cmdCheckNumber, url.Values{\"phone\": {joinUints(numbers...)}}, ns); err != nil {\n\t\treturn nil, err\n\t}\n\ttoRet := make(map[uint]Number)\n\tfor n, data := range ns {\n\t\tnumber, err := strconv.Atoi(n)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ttoRet[uint(number)] = data\n\t}\n\treturn toRet, nil\n}\n\ntype deleted struct {\n\tDeleted []uint `json:\"deleted\"`\n}\n\nfunc (t TextMagic) DeleteReply(ids ...uint) ([]uint, error) {\n\ttoRet := make([]uint, 0, len(ids))\n\tfor _, tIds := range splitSlice(ids) {\n\t\tvar d deleted\n\t\tif err := t.sendAPI(cmdDeleteReply, url.Values{\"deleted\": {joinUints(tIds...)}}, &d); err != nil {\n\t\t\treturn toRet, err\n\t\t}\n\t\ttoRet = append(toRet, d.Deleted...)\n\t}\n\treturn toRet, nil\n}\n\nconst joinSep = ','\n\nfunc joinUints(u ...uint) string {\n\ttoStr := make([]byte, 0, 10*len(u))\n\tvar digits [21]byte\n\tfor n, num := range u {\n\t\tif n > 0 {\n\t\t\ttoStr = append(toStr, joinSep)\n\t\t}\n\t\tif num == 0 {\n\t\t\ttoStr = append(toStr, '0')\n\t\t\tcontinue\n\t\t}\n\t\tpos := 21\n\t\tfor ; num > 0; num \/= 10 {\n\t\t\tpos--\n\t\t\tdigits[pos] = '0' + byte(num%10)\n\t\t}\n\t\ttoStr = append(toStr, digits[pos:]...)\n\t}\n\treturn string(toStr)\n}\n\nconst maxInSlice = 100\n\nfunc splitSlice(slice []uint) [][]uint {\n\ttoRet := make([][]uint, 0, len(slice)\/maxInSlice+1)\n\tfor len(slice) > 100 {\n\t\ttoRet = append(toRet, slice[:100])\n\t\tslice = slice[100:]\n\t}\n\tif len(slice) > 0 {\n\t\ttoRet = append(toRet, slice)\n\t}\n\treturn toRet\n}\n\n\/\/ Errors\n\ntype APIError struct {\n\tCmd string\n\tCode int `json:\"error_code\"`\n\tMessage string `json:\"error_message\"`\n}\n\nfunc (a APIError) Error() string {\n\treturn \"command \" + a.Cmd + \" returned the following API error: \" + a.Message\n}\n\ntype RequestError struct {\n\tCmd string\n\tErr error\n}\n\nfunc (r RequestError) Error() string {\n\treturn \"command \" + r.Cmd + \" returned the following error while makeing the API call: \" + r.Err.Error()\n}\n\ntype StatusError struct {\n\tCmd string\n\tStatusCode int\n}\n\nfunc (s StatusError) Error() string {\n\treturn \"command \" + s.Cmd + \" returned a non-200 OK response: \" + http.StatusText(s.StatusCode)\n}\n\ntype JSONError struct {\n\tCmd string\n\tErr error\n}\n\nfunc (j JSONError) Error() string {\n\treturn \"command \" + j.Cmd + \" returned malformed JSON: \" + j.Err.Error()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/sha1\"\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/coreos\/go-etcd\/etcd\"\n\t\"github.com\/krolaw\/dhcp4\"\n)\n\n\/\/ DHCPService is the DHCP server instance\ntype DHCPService struct {\n\tip net.IP\n\tdomain string\n\tguestPool *net.IPNet\n\tleaseDuration time.Duration\n\tdefaultOptions dhcp4.Options \/\/ FIXME: make different options per pool?\n\tetcdClient *etcd.Client\n}\n\nfunc dhcpSetup(cfg *Config, etc *etcd.Client) chan error {\n\tetc.CreateDir(\"dhcp\", 0)\n\texit := make(chan error, 1)\n\tgo func() {\n\t\texit <- dhcp4.ListenAndServeIf(cfg.DHCPNIC(), &DHCPService{\n\t\t\tip: cfg.DHCPIP(),\n\t\t\tleaseDuration: cfg.DHCPLeaseDuration(),\n\t\t\tetcdClient: etc,\n\t\t\tguestPool: cfg.DHCPSubnet(),\n\t\t\tdomain: cfg.Domain(),\n\t\t\tdefaultOptions: dhcp4.Options{\n\t\t\t\tdhcp4.OptionSubnetMask: net.IP(cfg.Subnet().Mask),\n\t\t\t\tdhcp4.OptionRouter: cfg.Gateway(),\n\t\t\t\tdhcp4.OptionDomainNameServer: cfg.DHCPIP(),\n\t\t\t},\n\t\t})\n\t}()\n\treturn exit\n}\n\n\/\/ ServeDHCP is called by dhcp4.ListenAndServe when the service is started\nfunc (d *DHCPService) ServeDHCP(packet dhcp4.Packet, msgType dhcp4.MessageType, reqOptions dhcp4.Options) (response dhcp4.Packet) {\n\tswitch msgType {\n\tcase dhcp4.Discover:\n\t\t\/\/ RFC 2131 4.3.1\n\t\t\/\/ FIXME: send to StatHat and\/or increment a counter\n\t\tmac := packet.CHAddr()\n\t\tfmt.Printf(\"DHCP Discover from %s\\n\", mac.String())\n\t\tip := d.getIPFromMAC(mac, packet, reqOptions)\n\t\tif ip != nil {\n\t\t\toptions := d.getOptionsFromMAC(mac)\n\t\t\tfmt.Printf(\"DHCP Discover from %s (we return %s)\\n\", mac.String(), ip.String())\n\t\t\t\/\/ for x, y := range reqOptions {\n\t\t\t\/\/ \tfmt.Printf(\"\\tR[%v] %v %s\\n\", x, y, y)\n\t\t\t\/\/ }\n\t\t\t\/\/ for x, y := range options {\n\t\t\t\/\/ \tfmt.Printf(\"\\tO[%v] %v %s\\n\", x, y, y)\n\t\t\t\/\/ }\n\t\t\treturn dhcp4.ReplyPacket(packet, dhcp4.Offer, d.ip.To4(), ip.To4(), d.leaseDuration, options.SelectOrderOrAll(reqOptions[dhcp4.OptionParameterRequestList]))\n\t\t}\n\t\treturn nil\n\tcase dhcp4.Request:\n\t\t\/\/ RFC 2131 4.3.2\n\t\t\/\/ FIXME: send to StatHat and\/or increment a counter\n\t\tmac := packet.CHAddr()\n\t\tfmt.Printf(\"DHCP Request from %s...\\n\", mac.String())\n\t\tstate := \"NEW\"\n\t\trequestedIP := net.IP(reqOptions[dhcp4.OptionRequestedIPAddress])\n\t\tif len(requestedIP) == 0 { \/\/ empty\n\t\t\tstate = \"RENEWAL\"\n\t\t\trequestedIP = packet.CIAddr()\n\t\t}\n\t\tif len(requestedIP) == 4 { \/\/ valid and IPv4\n\t\t\tfmt.Printf(\"DHCP Request (%s) from %s wanting %s\\n\", state, mac.String(), requestedIP.String())\n\t\t\tip := d.getIPFromMAC(mac, packet, reqOptions)\n\t\t\tif ip.Equal(requestedIP) {\n\t\t\t\toptions := d.getOptionsFromMAC(mac)\n\t\t\t\tfmt.Printf(\"DHCP Request (%s) from %s wanting %s (we agree)\\n\", state, mac.String(), requestedIP.String())\n\t\t\t\t\/\/ for x, y := range reqOptions {\n\t\t\t\t\/\/ \tfmt.Printf(\"\\tR[%v] %v %s\\n\", x, y, y)\n\t\t\t\t\/\/ }\n\t\t\t\t\/\/ for x, y := range options {\n\t\t\t\t\/\/ \tfmt.Printf(\"\\tO[%v] %v %s\\n\", x, y, y)\n\t\t\t\t\/\/ }\n\t\t\t\treturn dhcp4.ReplyPacket(packet, dhcp4.ACK, d.ip.To4(), requestedIP.To4(), d.leaseDuration, options.SelectOrderOrAll(reqOptions[dhcp4.OptionParameterRequestList]))\n\t\t\t}\n\t\t}\n\t\tif len(requestedIP) == 0 { \/\/ no IP provided at all... why? FIXME\n\t\t\tfmt.Printf(\"DHCP Request (%s) from %s (empty IP, so we're just ignoring this request)\\n\", state, mac.String())\n\t\t\treturn nil\n\t\t}\n\t\tfmt.Printf(\"DHCP Request (%s) from %s (we disagree about %s)\\n\", state, mac.String(), requestedIP)\n\t\treturn dhcp4.ReplyPacket(packet, dhcp4.NAK, d.ip.To4(), nil, 0, nil)\n\tcase dhcp4.Decline:\n\t\t\/\/ RFC 2131 4.3.3\n\t\t\/\/ FIXME: release from DB? tick a flag? increment a counter? send to StatHat?\n\t\tmac := packet.CHAddr()\n\t\tfmt.Printf(\"DHCP Decline from %s\\n\", mac.String())\n\tcase dhcp4.Release:\n\t\t\/\/ RFC 2131 4.3.4\n\t\t\/\/ FIXME: release from DB? tick a flag? increment a counter? send to StatHat?\n\t\tmac := packet.CHAddr()\n\t\tfmt.Printf(\"DHCP Release from %s\\n\", mac.String())\n\tcase dhcp4.Inform:\n\t\t\/\/ RFC 2131 4.3.5\n\t\t\/\/ FIXME: release from DB? tick a flag? increment a counter? send to StatHat?\n\t\t\/\/ FIXME: we should reply with valuable info, but not assign an IP to this client, per RFC 2131 for DHCPINFORM\n\t\t\/\/ NOTE: the client's IP is supposed to only be in the ciaddr field, not the requested IP field, per RFC 2131 4.4.3\n\t\tmac := packet.CHAddr()\n\t\tfmt.Printf(\"DHCP Inform from %s\\n\", mac.String())\n\t}\n\treturn nil\n}\n\nfunc (d *DHCPService) getIPFromMAC(mac net.HardwareAddr, packet dhcp4.Packet, reqOptions dhcp4.Options) net.IP {\n\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+mac.String()+\"\/ip\", false, false)\n\tif response != nil && response.Node != nil {\n\t\tip := net.ParseIP(response.Node.Value)\n\t\tif ip != nil {\n\t\t\td.etcdClient.Set(\"dhcp\/\"+ip.String(), mac.String(), uint64(d.leaseDuration.Seconds()+0.5))\n\t\t\td.etcdClient.Set(\"dhcp\/\"+mac.String()+\"\/ip\", ip.String(), uint64(d.leaseDuration.Seconds()+0.5))\n\t\t\td.maintainDNSRecords(mac, ip, packet, reqOptions)\n\t\t\treturn ip\n\t\t}\n\t}\n\n\t\/\/ TODO: determine whether or not this MAC should be permitted to get an IP at all (blacklist? whitelist?)\n\n\t\/\/ locate an unused IP address (can this be more efficient? yes! FIXME)\n\tvar ip net.IP\n\tfor testIP := dhcp4.IPAdd(d.guestPool.IP, 1); d.guestPool.Contains(testIP); testIP = dhcp4.IPAdd(testIP, 1) {\n\t\tfmt.Println(testIP.String())\n\t\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+testIP.String(), false, false)\n\t\tif response == nil || response.Node == nil { \/\/ this means that the IP is not already occupied\n\t\t\tip = testIP\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif ip != nil { \/\/ if nil then we're out of IP addresses!\n\t\td.etcdClient.CreateDir(\"dhcp\/\"+mac.String(), 0)\n\t\td.etcdClient.Set(\"dhcp\/\"+mac.String()+\"\/ip\", ip.String(), uint64(d.leaseDuration.Seconds()+0.5))\n\t\td.etcdClient.Set(\"dhcp\/\"+ip.String(), mac.String(), uint64(d.leaseDuration.Seconds()+0.5))\n\n\t\td.maintainDNSRecords(mac, ip, packet, reqOptions)\n\n\t\treturn ip\n\t}\n\n\treturn nil\n}\n\nfunc (d *DHCPService) maintainDNSRecords(mac net.HardwareAddr, ip net.IP, packet dhcp4.Packet, reqOptions dhcp4.Options) {\n\toptions := d.getOptionsFromMAC(mac)\n\tif domain, ok := options[dhcp4.OptionDomainName]; ok {\n\t\t\/\/ FIXME: danger! we're mixing systems here... if we keep this up, we will have spaghetti!\n\t\tname := \"\"\n\t\tif val, ok := options[dhcp4.OptionHostName]; ok {\n\t\t\tname = string(val)\n\t\t} else if val, ok := reqOptions[dhcp4.OptionHostName]; ok {\n\t\t\tname = string(val)\n\t\t}\n\t\tif name != \"\" {\n\t\t\thost := strings.ToLower(strings.Join([]string{name, string(domain)}, \".\"))\n\t\t\tipHash := fmt.Sprintf(\"%x\", sha1.Sum([]byte(ip.String()))) \/\/ hash the IP address so we can have a unique key name (no other reason for this, honestly)\n\t\t\tpathParts := strings.Split(strings.TrimSuffix(host, \".\"), \".\") \/\/ breakup the name\n\t\t\tqueryPath := strings.Join(reverseSlice(pathParts), \"\/\") \/\/ reverse and join them with a slash delimiter\n\t\t\tfmt.Printf(\"Wanting to register against %s\/%s\\n\", queryPath, name)\n\t\t\td.etcdClient.Set(\"dns\/\"+queryPath+\"\/@a\/val\/\"+ipHash, ip.String(), uint64(d.leaseDuration.Seconds()+0.5))\n\t\t\thostHash := fmt.Sprintf(\"%x\", sha1.Sum([]byte(host))) \/\/ hash the hostname so we can have a unique key name (no other reason for this, honestly)\n\t\t\tslashedIP := strings.Replace(ip.To4().String(), \".\", \"\/\", -1)\n\t\t\td.etcdClient.Set(\"dns\/arpa\/in-addr\/\"+slashedIP+\"\/@ptr\/val\/\"+hostHash, host, uint64(d.leaseDuration.Seconds()+0.5))\n\t\t} else {\n\t\t\tfmt.Println(\">> No host name\")\n\t\t}\n\t} else {\n\t\tfmt.Println(\">> No domain name\")\n\t}\n}\n\nfunc (d *DHCPService) getOptionsFromMAC(mac net.HardwareAddr) dhcp4.Options {\n\toptions := dhcp4.Options{}\n\n\tfor i := range d.defaultOptions {\n\t\toptions[i] = d.defaultOptions[i]\n\t\tfmt.Printf(\"OPTION:[%d][%+v]\\n\", i, d.defaultOptions[i])\n\t}\n\n\t{ \/\/ Subnet Mask\n\t\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+mac.String()+\"\/mask\", false, false)\n\t\tif response != nil && response.Node != nil {\n\t\t\tif response.Node.Value == \"\" {\n\t\t\t\tdelete(options, dhcp4.OptionSubnetMask)\n\t\t\t} else {\n\t\t\t\toptions[dhcp4.OptionSubnetMask] = []byte(response.Node.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\t{ \/\/ Gateway\/Router\n\t\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+mac.String()+\"\/gw\", false, false)\n\t\tif response != nil && response.Node != nil {\n\t\t\tif response.Node.Value == \"\" {\n\t\t\t\tdelete(options, dhcp4.OptionRouter)\n\t\t\t} else {\n\t\t\t\toptions[dhcp4.OptionRouter] = []byte(response.Node.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\t{ \/\/ Name Server\n\t\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+mac.String()+\"\/ns\", false, false)\n\t\tif response != nil && response.Node != nil {\n\t\t\tif response.Node.Value == \"\" {\n\t\t\t\tdelete(options, dhcp4.OptionDomainNameServer)\n\t\t\t} else {\n\t\t\t\toptions[dhcp4.OptionDomainNameServer] = []byte(response.Node.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\t{ \/\/ Host Name\n\t\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+mac.String()+\"\/name\", false, false)\n\t\tif response != nil && response.Node != nil {\n\t\t\tif response.Node.Value == \"\" {\n\t\t\t\tdelete(options, dhcp4.OptionHostName)\n\t\t\t} else {\n\t\t\t\toptions[dhcp4.OptionHostName] = []byte(response.Node.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\t{ \/\/ Domain Name\n\t\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+mac.String()+\"\/domain\", false, false)\n\t\tif response != nil && response.Node != nil {\n\t\t\tif response.Node.Value != \"\" {\n\t\t\t\toptions[dhcp4.OptionDomainName] = []byte(response.Node.Value)\n\t\t\t}\n\t\t}\n\t\tif len(options[dhcp4.OptionDomainName]) == 0 {\n\t\t\tif d.domain != \"\" {\n\t\t\t\toptions[dhcp4.OptionDomainName] = []byte(d.domain)\n\t\t\t} else {\n\t\t\t\tdelete(options, dhcp4.OptionDomainName)\n\t\t\t}\n\t\t}\n\t}\n\n\t{ \/\/ Broadcast Address\n\t\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+mac.String()+\"\/broadcast\", false, false)\n\t\tif response != nil && response.Node != nil {\n\t\t\tif response.Node.Value == \"\" {\n\t\t\t\tdelete(options, dhcp4.OptionBroadcastAddress)\n\t\t\t} else {\n\t\t\t\toptions[dhcp4.OptionBroadcastAddress] = []byte(response.Node.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\t{ \/\/ NTP Server\n\t\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+mac.String()+\"\/ntp\", false, false)\n\t\tif response != nil && response.Node != nil {\n\t\t\tif response.Node.Value == \"\" {\n\t\t\t\tdelete(options, dhcp4.OptionNetworkTimeProtocolServers)\n\t\t\t} else {\n\t\t\t\toptions[dhcp4.OptionNetworkTimeProtocolServers] = []byte(response.Node.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn options\n}\n<commit_msg>Significant refactoring of ServeDCHP and etcd access functions<commit_after>package main\n\nimport (\n\t\"crypto\/sha1\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/coreos\/go-etcd\/etcd\"\n\t\"github.com\/krolaw\/dhcp4\"\n)\n\n\/\/ DHCPService is the DHCP server instance\ntype DHCPService struct {\n\tip net.IP\n\tdomain string\n\tguestPool *net.IPNet\n\tleaseDuration time.Duration\n\tdefaultOptions dhcp4.Options \/\/ FIXME: make different options per pool?\n\tetcdClient *etcd.Client\n}\n\ntype dhcpLease struct {\n\tmac net.HardwareAddr\n\tip net.IP\n\tduration time.Duration\n}\n\nfunc dhcpSetup(cfg *Config, etc *etcd.Client) chan error {\n\tetc.CreateDir(\"dhcp\", 0)\n\texit := make(chan error, 1)\n\tgo func() {\n\t\texit <- dhcp4.ListenAndServeIf(cfg.DHCPNIC(), &DHCPService{\n\t\t\tip: cfg.DHCPIP(),\n\t\t\tleaseDuration: cfg.DHCPLeaseDuration(),\n\t\t\tetcdClient: etc,\n\t\t\tguestPool: cfg.DHCPSubnet(),\n\t\t\tdomain: cfg.Domain(),\n\t\t\tdefaultOptions: dhcp4.Options{\n\t\t\t\tdhcp4.OptionSubnetMask: net.IP(cfg.Subnet().Mask),\n\t\t\t\tdhcp4.OptionRouter: cfg.Gateway(),\n\t\t\t\tdhcp4.OptionDomainNameServer: cfg.DHCPIP(),\n\t\t\t},\n\t\t})\n\t}()\n\treturn exit\n}\n\n\/\/ ServeDHCP is called by dhcp4.ListenAndServe when the service is started\nfunc (d *DHCPService) ServeDHCP(packet dhcp4.Packet, msgType dhcp4.MessageType, reqOptions dhcp4.Options) (response dhcp4.Packet) {\n\tswitch msgType {\n\tcase dhcp4.Discover:\n\t\t\/\/ RFC 2131 4.3.1\n\t\t\/\/ FIXME: send to StatHat and\/or increment a counter\n\t\tmac := packet.CHAddr()\n\t\tif !d.isMACPermitted(mac) {\n\t\t\tfmt.Printf(\"DHCP Discover from %s\\n is not permitted\", mac.String())\n\t\t\treturn nil\n\t\t}\n\t\tfmt.Printf(\"DHCP Discover from %s\\n\", mac.String())\n\t\t\/\/ Existing Lease\n\t\tlease, err := d.getLease(mac)\n\t\tif err == nil {\n\t\t\toptions := d.getOptionsFromMAC(mac)\n\t\t\tfmt.Printf(\"DHCP Discover from %s (we offer %s from current lease)\\n\", mac.String(), lease.ip.String())\n\t\t\t\/\/ for x, y := range reqOptions {\n\t\t\t\/\/ \tfmt.Printf(\"\\tR[%v] %v %s\\n\", x, y, y)\n\t\t\t\/\/ }\n\t\t\t\/\/ for x, y := range options {\n\t\t\t\/\/ \tfmt.Printf(\"\\tO[%v] %v %s\\n\", x, y, y)\n\t\t\t\/\/ }\n\t\t\treturn dhcp4.ReplyPacket(packet, dhcp4.Offer, d.ip.To4(), lease.ip.To4(), d.leaseDuration, options.SelectOrderOrAll(reqOptions[dhcp4.OptionParameterRequestList]))\n\t\t}\n\t\t\/\/ New Lease\n\t\tip := d.getIPFromPool()\n\t\tif ip != nil {\n\t\t\toptions := d.getOptionsFromMAC(mac)\n\t\t\tfmt.Printf(\"DHCP Discover from %s (we offer %s from pool)\\n\", mac.String(), ip.String())\n\t\t\t\/\/ for x, y := range reqOptions {\n\t\t\t\/\/ \tfmt.Printf(\"\\tR[%v] %v %s\\n\", x, y, y)\n\t\t\t\/\/ }\n\t\t\t\/\/ for x, y := range options {\n\t\t\t\/\/ \tfmt.Printf(\"\\tO[%v] %v %s\\n\", x, y, y)\n\t\t\t\/\/ }\n\t\t\treturn dhcp4.ReplyPacket(packet, dhcp4.Offer, d.ip.To4(), ip.To4(), d.leaseDuration, options.SelectOrderOrAll(reqOptions[dhcp4.OptionParameterRequestList]))\n\t\t}\n\n\t\tfmt.Printf(\"DHCP Discover from %s (no offer due to no addresses available in pool)\\n\", mac.String())\n\t\t\/\/ FIXME: Send to StatHat and\/or increment a counter\n\t\t\/\/ TODO: Send an email?\n\n\t\treturn nil\n\tcase dhcp4.Request:\n\t\t\/\/ RFC 2131 4.3.2\n\t\t\/\/ FIXME: send to StatHat and\/or increment a counter\n\t\tmac := packet.CHAddr()\n\t\t\/\/ Check MAC blacklist\n\t\tif !d.isMACPermitted(mac) {\n\t\t\tfmt.Printf(\"DHCP Request from %s\\n is not permitted\", mac.String())\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ Check IP presence\n\t\tstate, requestedIP := d.getRequestState(packet, reqOptions)\n\t\tfmt.Printf(\"DHCP Request (%s) from %s...\\n\", state, mac.String())\n\t\tif len(requestedIP) == 0 { \/\/ no IP provided at all... why? FIXME\n\t\t\tfmt.Printf(\"DHCP Request (%s) from %s (empty IP, so we're just ignoring this request)\\n\", state, mac.String())\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ Check IPv4\n\t\tif len(requestedIP) != 4 {\n\t\t\tfmt.Printf(\"DHCP Request (%s) from %s wanting %s (IPv6 address requested, so we're just ignoring this request)\\n\", state, mac.String(), requestedIP.String())\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ Check IP subnet\n\t\tif !d.guestPool.Contains(requestedIP) {\n\t\t\tfmt.Printf(\"DHCP Request (%s) from %s wanting %s (we reject due to wrong subnet)\\n\", state, mac.String(), requestedIP.String())\n\t\t\treturn dhcp4.ReplyPacket(packet, dhcp4.NAK, d.ip.To4(), nil, 0, nil)\n\t\t}\n\t\t\/\/ Check Target Server\n\t\ttargetServerIP := packet.SIAddr()\n\t\tif len(targetServerIP) == 4 {\n\t\t\tfmt.Printf(\"DHCP Request (%s) from %s wanting %s is in response to a DHCP offer from %s\\n\", state, mac.String(), requestedIP.String(), targetServerIP.String())\n\t\t\tif d.ip.Equal(targetServerIP) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\t\/\/ Process Request\n\t\tfmt.Printf(\"DHCP Request (%s) from %s wanting %s...\\n\", state, mac.String(), requestedIP.String())\n\t\tlease, err := d.getLease(mac)\n\t\tif err == nil {\n\t\t\t\/\/ Existing Lease\n\t\t\tlease.duration = d.leaseDuration\n\t\t\tif lease.ip.Equal(requestedIP) {\n\t\t\t\terr = d.renewLease(lease)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"DHCP Request (%s) from %s wanting %s (we reject due to lease mismatch, should be %s)\\n\", state, mac.String(), requestedIP.String(), lease.ip.String())\n\t\t\t\treturn dhcp4.ReplyPacket(packet, dhcp4.NAK, d.ip.To4(), nil, 0, nil)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ New lease\n\t\t\tlease = dhcpLease{\n\t\t\t\tmac: mac,\n\t\t\t\tip: requestedIP,\n\t\t\t\tduration: d.leaseDuration,\n\t\t\t}\n\t\t\terr = d.createLease(lease)\n\t\t}\n\n\t\tif err != nil {\n\t\t\td.maintainDNSRecords(lease.mac, lease.ip, packet, reqOptions) \/\/ TODO: Move this?\n\t\t\toptions := d.getOptionsFromMAC(mac)\n\t\t\tfmt.Printf(\"DHCP Request (%s) from %s wanting %s (we agree)\\n\", state, mac.String(), requestedIP.String())\n\t\t\treturn dhcp4.ReplyPacket(packet, dhcp4.ACK, d.ip.To4(), requestedIP.To4(), d.leaseDuration, options.SelectOrderOrAll(reqOptions[dhcp4.OptionParameterRequestList]))\n\t\t}\n\n\t\tfmt.Printf(\"DHCP Request (%s) from %s wanting %s (we reject due to address collision)\\n\", state, mac.String(), requestedIP.String())\n\t\treturn dhcp4.ReplyPacket(packet, dhcp4.NAK, d.ip.To4(), nil, 0, nil)\n\tcase dhcp4.Decline:\n\t\t\/\/ RFC 2131 4.3.3\n\t\t\/\/ FIXME: release from DB? tick a flag? increment a counter? send to StatHat?\n\t\tmac := packet.CHAddr()\n\t\tfmt.Printf(\"DHCP Decline from %s\\n\", mac.String())\n\tcase dhcp4.Release:\n\t\t\/\/ RFC 2131 4.3.4\n\t\t\/\/ FIXME: release from DB? tick a flag? increment a counter? send to StatHat?\n\t\tmac := packet.CHAddr()\n\t\tfmt.Printf(\"DHCP Release from %s\\n\", mac.String())\n\tcase dhcp4.Inform:\n\t\t\/\/ RFC 2131 4.3.5\n\t\t\/\/ FIXME: release from DB? tick a flag? increment a counter? send to StatHat?\n\t\t\/\/ FIXME: we should reply with valuable info, but not assign an IP to this client, per RFC 2131 for DHCPINFORM\n\t\t\/\/ NOTE: the client's IP is supposed to only be in the ciaddr field, not the requested IP field, per RFC 2131 4.4.3\n\t\tmac := packet.CHAddr()\n\t\tfmt.Printf(\"DHCP Inform from %s\\n\", mac.String())\n\t}\n\treturn nil\n}\n\nfunc (d *DHCPService) isMACPermitted(mac net.HardwareAddr) bool {\n\t\/\/ TODO: determine whether or not this MAC should be permitted to get an IP at all (blacklist? whitelist?)\n\treturn true\n}\n\nfunc (d *DHCPService) getRequestState(packet dhcp4.Packet, reqOptions dhcp4.Options) (string, net.IP) {\n\tstate := \"NEW\"\n\trequestedIP := net.IP(reqOptions[dhcp4.OptionRequestedIPAddress])\n\tif len(requestedIP) == 0 { \/\/ empty\n\t\tstate = \"RENEWAL\"\n\t\trequestedIP = packet.CIAddr()\n\t}\n\treturn state, requestedIP\n}\n\nfunc (d *DHCPService) getIPFromPool() net.IP {\n\t\/\/ locate an unused IP address (can this be more efficient? yes! FIXME)\n\t\/\/ TODO: Create a channel and spawn a goproc with something like this function to feed it; then have the server pull addresses from that channel\n\tfor ip := dhcp4.IPAdd(d.guestPool.IP, 1); d.guestPool.Contains(ip); ip = dhcp4.IPAdd(ip, 1) {\n\t\tfmt.Println(ip.String())\n\t\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+ip.String(), false, false)\n\t\tif response == nil || response.Node == nil { \/\/ this means that the IP is not already occupied\n\t\t\treturn ip\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *DHCPService) getLease(mac net.HardwareAddr) (dhcpLease, error) {\n\tlease := dhcpLease{}\n\tresponse, err := d.etcdClient.Get(\"dhcp\/\"+mac.String()+\"\/ip\", false, false)\n\tif err == nil {\n\t\tif response != nil && response.Node != nil {\n\t\t\tlease.mac = mac\n\t\t\tlease.ip = net.ParseIP(response.Node.Value)\n\t\t\tlease.duration = time.Duration(response.Node.TTL)\n\t\t} else {\n\t\t\terr = errors.New(\"Not Found\")\n\t\t}\n\t}\n\treturn lease, err\n}\n\nfunc (d *DHCPService) renewLease(lease dhcpLease) error {\n\tduration := uint64(lease.duration.Seconds() + 0.5) \/\/ Half second jitter to hide network delay\n\t_, err := d.etcdClient.CompareAndSwap(\"dhcp\/\"+lease.ip.String(), lease.mac.String(), duration, lease.mac.String(), 0)\n\tif err == nil {\n\t\treturn d.writeLease(lease)\n\t}\n\treturn err\n}\n\nfunc (d *DHCPService) createLease(lease dhcpLease) error {\n\tduration := uint64(lease.duration.Seconds() + 0.5)\n\t_, err := d.etcdClient.Create(\"dhcp\/\"+lease.ip.String(), lease.mac.String(), duration)\n\tif err == nil {\n\t\treturn d.writeLease(lease)\n\t}\n\treturn err\n}\n\nfunc (d *DHCPService) writeLease(lease dhcpLease) error {\n\tduration := uint64(lease.duration.Seconds() + 0.5) \/\/ Half second jitter to hide network delay\n\t\/\/ FIXME: Decide what to do if either of these calls returns an error\n\td.etcdClient.CreateDir(\"dhcp\/\"+lease.mac.String(), 0)\n\td.etcdClient.Set(\"dhcp\/\"+lease.mac.String()+\"\/ip\", lease.ip.String(), duration)\n\treturn nil\n}\n\nfunc (d *DHCPService) maintainDNSRecords(mac net.HardwareAddr, ip net.IP, packet dhcp4.Packet, reqOptions dhcp4.Options) {\n\toptions := d.getOptionsFromMAC(mac)\n\tif domain, ok := options[dhcp4.OptionDomainName]; ok {\n\t\t\/\/ FIXME: danger! we're mixing systems here... if we keep this up, we will have spaghetti!\n\t\tname := \"\"\n\t\tif val, ok := options[dhcp4.OptionHostName]; ok {\n\t\t\tname = string(val)\n\t\t} else if val, ok := reqOptions[dhcp4.OptionHostName]; ok {\n\t\t\tname = string(val)\n\t\t}\n\t\tif name != \"\" {\n\t\t\thost := strings.ToLower(strings.Join([]string{name, string(domain)}, \".\"))\n\t\t\tipHash := fmt.Sprintf(\"%x\", sha1.Sum([]byte(ip.String()))) \/\/ hash the IP address so we can have a unique key name (no other reason for this, honestly)\n\t\t\tpathParts := strings.Split(strings.TrimSuffix(host, \".\"), \".\") \/\/ breakup the name\n\t\t\tqueryPath := strings.Join(reverseSlice(pathParts), \"\/\") \/\/ reverse and join them with a slash delimiter\n\t\t\tfmt.Printf(\"Wanting to register against %s\/%s\\n\", queryPath, name)\n\t\t\td.etcdClient.Set(\"dns\/\"+queryPath+\"\/@a\/val\/\"+ipHash, ip.String(), uint64(d.leaseDuration.Seconds()+0.5))\n\t\t\thostHash := fmt.Sprintf(\"%x\", sha1.Sum([]byte(host))) \/\/ hash the hostname so we can have a unique key name (no other reason for this, honestly)\n\t\t\tslashedIP := strings.Replace(ip.To4().String(), \".\", \"\/\", -1)\n\t\t\td.etcdClient.Set(\"dns\/arpa\/in-addr\/\"+slashedIP+\"\/@ptr\/val\/\"+hostHash, host, uint64(d.leaseDuration.Seconds()+0.5))\n\t\t} else {\n\t\t\tfmt.Println(\">> No host name\")\n\t\t}\n\t} else {\n\t\tfmt.Println(\">> No domain name\")\n\t}\n}\n\nfunc (d *DHCPService) getOptionsFromMAC(mac net.HardwareAddr) dhcp4.Options {\n\toptions := dhcp4.Options{}\n\n\tfor i := range d.defaultOptions {\n\t\toptions[i] = d.defaultOptions[i]\n\t\tfmt.Printf(\"OPTION:[%d][%+v]\\n\", i, d.defaultOptions[i])\n\t}\n\n\t{ \/\/ Subnet Mask\n\t\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+mac.String()+\"\/mask\", false, false)\n\t\tif response != nil && response.Node != nil {\n\t\t\tif response.Node.Value == \"\" {\n\t\t\t\tdelete(options, dhcp4.OptionSubnetMask)\n\t\t\t} else {\n\t\t\t\toptions[dhcp4.OptionSubnetMask] = []byte(response.Node.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\t{ \/\/ Gateway\/Router\n\t\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+mac.String()+\"\/gw\", false, false)\n\t\tif response != nil && response.Node != nil {\n\t\t\tif response.Node.Value == \"\" {\n\t\t\t\tdelete(options, dhcp4.OptionRouter)\n\t\t\t} else {\n\t\t\t\toptions[dhcp4.OptionRouter] = []byte(response.Node.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\t{ \/\/ Name Server\n\t\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+mac.String()+\"\/ns\", false, false)\n\t\tif response != nil && response.Node != nil {\n\t\t\tif response.Node.Value == \"\" {\n\t\t\t\tdelete(options, dhcp4.OptionDomainNameServer)\n\t\t\t} else {\n\t\t\t\toptions[dhcp4.OptionDomainNameServer] = []byte(response.Node.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\t{ \/\/ Host Name\n\t\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+mac.String()+\"\/name\", false, false)\n\t\tif response != nil && response.Node != nil {\n\t\t\tif response.Node.Value == \"\" {\n\t\t\t\tdelete(options, dhcp4.OptionHostName)\n\t\t\t} else {\n\t\t\t\toptions[dhcp4.OptionHostName] = []byte(response.Node.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\t{ \/\/ Domain Name\n\t\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+mac.String()+\"\/domain\", false, false)\n\t\tif response != nil && response.Node != nil {\n\t\t\tif response.Node.Value != \"\" {\n\t\t\t\toptions[dhcp4.OptionDomainName] = []byte(response.Node.Value)\n\t\t\t}\n\t\t}\n\t\tif len(options[dhcp4.OptionDomainName]) == 0 {\n\t\t\tif d.domain != \"\" {\n\t\t\t\toptions[dhcp4.OptionDomainName] = []byte(d.domain)\n\t\t\t} else {\n\t\t\t\tdelete(options, dhcp4.OptionDomainName)\n\t\t\t}\n\t\t}\n\t}\n\n\t{ \/\/ Broadcast Address\n\t\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+mac.String()+\"\/broadcast\", false, false)\n\t\tif response != nil && response.Node != nil {\n\t\t\tif response.Node.Value == \"\" {\n\t\t\t\tdelete(options, dhcp4.OptionBroadcastAddress)\n\t\t\t} else {\n\t\t\t\toptions[dhcp4.OptionBroadcastAddress] = []byte(response.Node.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\t{ \/\/ NTP Server\n\t\tresponse, _ := d.etcdClient.Get(\"dhcp\/\"+mac.String()+\"\/ntp\", false, false)\n\t\tif response != nil && response.Node != nil {\n\t\t\tif response.Node.Value == \"\" {\n\t\t\t\tdelete(options, dhcp4.OptionNetworkTimeProtocolServers)\n\t\t\t} else {\n\t\t\t\toptions[dhcp4.OptionNetworkTimeProtocolServers] = []byte(response.Node.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn options\n}\n<|endoftext|>"} {"text":"<commit_before>package dnsr\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tlru \"github.com\/hashicorp\/golang-lru\"\n\t\"github.com\/miekg\/dns\"\n)\n\ntype Resolver struct {\n\tcache *lru.Cache\n\tclient *dns.Client\n}\n\nfunc New(size int) *Resolver {\n\tif size < 0 {\n\t\tsize = 10000\n\t}\n\tcache, _ := lru.New(size)\n\tr := &Resolver{\n\t\tclient: &dns.Client{},\n\t\tcache: cache,\n\t}\n\tr.cacheRoot()\n\treturn r\n}\n\nfunc (r *Resolver) Resolve(qname string, qtype uint16) <-chan dns.RR {\n\tc := make(chan dns.RR, 20)\n\tgo func() {\n\t\tq := &dns.Question{qname, qtype, dns.ClassINET}\n\t\tdefer fmt.Printf(\";; QUESTION:\\n%s\\n\\n\\n\", q.String())\n\t\tdefer close(c)\n\t\tqname = toLowerFQDN(qname)\n\t\tif rrs := r.cacheGet(qname, qtype); rrs != nil {\n\t\t\tinject(c, rrs...)\n\t\t\treturn\n\t\t}\n\t\tpname, ok := parent(qname)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tfor nrr := range r.Resolve(pname, dns.TypeNS) {\n\t\t\tns, ok := nrr.(*dns.NS)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\touter:\n\t\t\tfor arr := range r.Resolve(ns.Ns, dns.TypeA) {\n\t\t\t\ta, ok := arr.(*dns.A)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\taddr := a.A.String() + \":53\"\n\t\t\t\tqmsg := &dns.Msg{}\n\t\t\t\tqmsg.SetQuestion(qname, qtype)\n\t\t\t\tqmsg.MsgHdr.RecursionDesired = false\n\t\t\t\tfmt.Printf(\"; Querying DNS server %s for %s\\n\", addr, qname)\n\t\t\t\trmsg, _, err := r.client.Exchange(qmsg, addr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"; ERROR querying DNS server %s for %s: %s\\n\", addr, qname, err.Error())\n\t\t\t\t\tcontinue \/\/ FIXME: handle errors better from flaky\/failing NS servers\n\t\t\t\t}\n\t\t\t\tif rmsg.Rcode == dns.RcodeNameError {\n\t\t\t\t\tr.cacheAdd(qname, qtype) \/\/ FIXME: cache NXDOMAIN responses responsibly\n\t\t\t\t}\n\t\t\t\tr.cacheSave(rmsg.Answer...)\n\t\t\t\tr.cacheSave(rmsg.Ns...)\n\t\t\t\tr.cacheSave(rmsg.Extra...)\n\t\t\t\tif rrs := r.cacheGet(qname, qtype); rrs != nil {\n\t\t\t\t\tinject(c, rrs...)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbreak outer\n\t\t\t}\n\t\t}\n\t\tfor _, crr := range r.cacheGet(qname, dns.TypeCNAME) {\n\t\t\tcn, ok := crr.(*dns.CNAME)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor rr := range r.Resolve(cn.Target, qtype) {\n\t\t\t\tr.cacheAdd(qname, qtype, rr)\n\t\t\t\tc <- rr\n\t\t\t}\n\t\t}\n\t}()\n\treturn c\n}\n\nfunc inject(c chan<- dns.RR, rrs ...dns.RR) {\n\tfor _, rr := range rrs {\n\t\tc <- rr\n\t}\n}\n\nfunc parent(name string) (string, bool) {\n\tlabels := dns.SplitDomainName(name)\n\tif labels == nil {\n\t\treturn \"\", false\n\t}\n\treturn toLowerFQDN(strings.Join(labels[1:], \".\")), true\n}\n\nfunc toLowerFQDN(name string) string {\n\treturn dns.Fqdn(strings.ToLower(name))\n}\n\ntype key struct {\n\tqname string\n\tqtype uint16\n}\n\ntype entry struct {\n\tm sync.RWMutex\n\texp time.Time\n\trrs map[dns.RR]struct{}\n}\n\nfunc (r *Resolver) cacheRoot() {\n\tfor t := range dns.ParseZone(strings.NewReader(root), \"\", \"\") {\n\t\tif t.Error == nil {\n\t\t\tr.cacheSave(t.RR)\n\t\t}\n\t}\n}\n\n\/\/ cacheGet returns a randomly ordered slice of DNS records.\nfunc (r *Resolver) cacheGet(qname string, qtype uint16) []dns.RR {\n\te := r.getEntry(qname, qtype)\n\tif e == nil {\n\t\treturn nil\n\t}\n\te.m.RLock()\n\tdefer e.m.RUnlock()\n\trrs := make([]dns.RR, len(e.rrs))\n\tfor rr, _ := range e.rrs {\n\t\trrs = append(rrs, rr)\n\t}\n\treturn rrs\n}\n\n\/\/ cacheSave saves 1 or more DNS records to the resolver cache.\nfunc (r *Resolver) cacheSave(rrs ...dns.RR) {\n\tfor _, rr := range rrs {\n\t\th := rr.Header()\n\t\tr.cacheAdd(toLowerFQDN(h.Name), h.Rrtype, rr)\n\t}\n}\n\n\/\/ cacheAdd adds 0 or more DNS records to the resolver cache for a specific\n\/\/ domain name and record type. This ensures the cache entry exists, even\n\/\/ if empty, for NXDOMAIN responses.\nfunc (r *Resolver) cacheAdd(qname string, qtype uint16, rrs ...dns.RR) {\n\tnow := time.Now()\n\te := r.getEntry(qname, qtype)\n\tif e == nil {\n\t\te = &entry{\n\t\t\texp: now.Add(24 * time.Hour),\n\t\t\trrs: make(map[dns.RR]struct{}, 0),\n\t\t}\n\t\tr.cache.Add(key{qname, qtype}, e)\n\t}\n\te.m.Lock()\n\tdefer e.m.Unlock()\n\tfor _, rr := range rrs {\n\t\t\/\/ fmt.Printf(\"%s\\n\", rr.String())\n\t\te.rrs[rr] = struct{}{}\n\t\texp := now.Add(time.Duration(rr.Header().Ttl) * time.Second)\n\t\tif exp.Before(e.exp) {\n\t\t\te.exp = exp\n\t\t}\n\t}\n}\n\nfunc (r *Resolver) getEntry(qname string, qtype uint16) *entry {\n\tc, ok := r.cache.Get(key{qname, qtype})\n\tif !ok {\n\t\treturn nil\n\t}\n\te := c.(*entry)\n\tif time.Now().After(e.exp) {\n\t\treturn nil\n\t}\n\tfmt.Printf(\"; CACHE HIT: %s\\n\", qname)\n\treturn e\n}\n<commit_msg>less logging<commit_after>package dnsr\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tlru \"github.com\/hashicorp\/golang-lru\"\n\t\"github.com\/miekg\/dns\"\n)\n\ntype Resolver struct {\n\tcache *lru.Cache\n\tclient *dns.Client\n}\n\nfunc New(size int) *Resolver {\n\tif size < 0 {\n\t\tsize = 10000\n\t}\n\tcache, _ := lru.New(size)\n\tr := &Resolver{\n\t\tclient: &dns.Client{},\n\t\tcache: cache,\n\t}\n\tr.cacheRoot()\n\treturn r\n}\n\nfunc (r *Resolver) Resolve(qname string, qtype uint16) <-chan dns.RR {\n\tc := make(chan dns.RR, 20)\n\tgo func() {\n\t\tq := &dns.Question{qname, qtype, dns.ClassINET}\n\t\tdefer fmt.Printf(\";; QUESTION:\\n%s\\n\\n\\n\", q.String())\n\t\tdefer close(c)\n\t\tqname = toLowerFQDN(qname)\n\t\tif rrs := r.cacheGet(qname, qtype); rrs != nil {\n\t\t\tinject(c, rrs...)\n\t\t\treturn\n\t\t}\n\t\tpname, ok := parent(qname)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tfor nrr := range r.Resolve(pname, dns.TypeNS) {\n\t\t\tns, ok := nrr.(*dns.NS)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\touter:\n\t\t\tfor arr := range r.Resolve(ns.Ns, dns.TypeA) {\n\t\t\t\ta, ok := arr.(*dns.A)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\taddr := a.A.String() + \":53\"\n\t\t\t\tqmsg := &dns.Msg{}\n\t\t\t\tqmsg.SetQuestion(qname, qtype)\n\t\t\t\tqmsg.MsgHdr.RecursionDesired = false\n\t\t\t\trmsg, _, err := r.client.Exchange(qmsg, addr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"; ERROR querying DNS server %s for %s: %s\\n\", addr, qname, err.Error())\n\t\t\t\t\tcontinue \/\/ FIXME: handle errors better from flaky\/failing NS servers\n\t\t\t\t}\n\t\t\t\tif rmsg.Rcode == dns.RcodeNameError {\n\t\t\t\t\tr.cacheAdd(qname, qtype) \/\/ FIXME: cache NXDOMAIN responses responsibly\n\t\t\t\t}\n\t\t\t\tr.cacheSave(rmsg.Answer...)\n\t\t\t\tr.cacheSave(rmsg.Ns...)\n\t\t\t\tr.cacheSave(rmsg.Extra...)\n\t\t\t\tif rrs := r.cacheGet(qname, qtype); rrs != nil {\n\t\t\t\t\tinject(c, rrs...)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbreak outer\n\t\t\t}\n\t\t}\n\t\tfor _, crr := range r.cacheGet(qname, dns.TypeCNAME) {\n\t\t\tcn, ok := crr.(*dns.CNAME)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor rr := range r.Resolve(cn.Target, qtype) {\n\t\t\t\tr.cacheAdd(qname, qtype, rr)\n\t\t\t\tc <- rr\n\t\t\t}\n\t\t}\n\t}()\n\treturn c\n}\n\nfunc inject(c chan<- dns.RR, rrs ...dns.RR) {\n\tfor _, rr := range rrs {\n\t\tc <- rr\n\t}\n}\n\nfunc parent(name string) (string, bool) {\n\tlabels := dns.SplitDomainName(name)\n\tif labels == nil {\n\t\treturn \"\", false\n\t}\n\treturn toLowerFQDN(strings.Join(labels[1:], \".\")), true\n}\n\nfunc toLowerFQDN(name string) string {\n\treturn dns.Fqdn(strings.ToLower(name))\n}\n\ntype key struct {\n\tqname string\n\tqtype uint16\n}\n\ntype entry struct {\n\tm sync.RWMutex\n\texp time.Time\n\trrs map[dns.RR]struct{}\n}\n\nfunc (r *Resolver) cacheRoot() {\n\tfor t := range dns.ParseZone(strings.NewReader(root), \"\", \"\") {\n\t\tif t.Error == nil {\n\t\t\tr.cacheSave(t.RR)\n\t\t}\n\t}\n}\n\n\/\/ cacheGet returns a randomly ordered slice of DNS records.\nfunc (r *Resolver) cacheGet(qname string, qtype uint16) []dns.RR {\n\te := r.getEntry(qname, qtype)\n\tif e == nil {\n\t\treturn nil\n\t}\n\te.m.RLock()\n\tdefer e.m.RUnlock()\n\trrs := make([]dns.RR, len(e.rrs))\n\tfor rr, _ := range e.rrs {\n\t\trrs = append(rrs, rr)\n\t}\n\treturn rrs\n}\n\n\/\/ cacheSave saves 1 or more DNS records to the resolver cache.\nfunc (r *Resolver) cacheSave(rrs ...dns.RR) {\n\tfor _, rr := range rrs {\n\t\th := rr.Header()\n\t\tr.cacheAdd(toLowerFQDN(h.Name), h.Rrtype, rr)\n\t}\n}\n\n\/\/ cacheAdd adds 0 or more DNS records to the resolver cache for a specific\n\/\/ domain name and record type. This ensures the cache entry exists, even\n\/\/ if empty, for NXDOMAIN responses.\nfunc (r *Resolver) cacheAdd(qname string, qtype uint16, rrs ...dns.RR) {\n\tnow := time.Now()\n\te := r.getEntry(qname, qtype)\n\tif e == nil {\n\t\te = &entry{\n\t\t\texp: now.Add(24 * time.Hour),\n\t\t\trrs: make(map[dns.RR]struct{}, 0),\n\t\t}\n\t\tr.cache.Add(key{qname, qtype}, e)\n\t}\n\te.m.Lock()\n\tdefer e.m.Unlock()\n\tfor _, rr := range rrs {\n\t\t\/\/ fmt.Printf(\"%s\\n\", rr.String())\n\t\te.rrs[rr] = struct{}{}\n\t\texp := now.Add(time.Duration(rr.Header().Ttl) * time.Second)\n\t\tif exp.Before(e.exp) {\n\t\t\te.exp = exp\n\t\t}\n\t}\n}\n\nfunc (r *Resolver) getEntry(qname string, qtype uint16) *entry {\n\tc, ok := r.cache.Get(key{qname, qtype})\n\tif !ok {\n\t\treturn nil\n\t}\n\te := c.(*entry)\n\tif time.Now().After(e.exp) {\n\t\treturn nil\n\t}\n\tfmt.Printf(\"; CACHE HIT: %s\\n\", qname)\n\treturn e\n}\n<|endoftext|>"} {"text":"<commit_before>package git\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n\/\/ Edit opens a file in the users editor and returns the title and body. It\n\/\/ store a temporary file in your .git directory or \/tmp if accessed outside of\n\/\/ a git repo.\nfunc Edit(filePrefix, message string) (string, string, error) {\n\tvar (\n\t\tdir string\n\t\terr error\n\t)\n\tif InsideGitRepo() {\n\t\tdir, err = GitDir()\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t} else {\n\t\tdir = \"\/tmp\"\n\t}\n\tfilePath := filepath.Join(dir, fmt.Sprintf(\"%s_EDITMSG\", filePrefix))\n\teditorPath, err := editorPath()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tdefer os.Remove(filePath)\n\n\t\/\/ Write generated\/template message to file\n\tif _, err := os.Stat(filePath); os.IsNotExist(err) && message != \"\" {\n\t\terr = ioutil.WriteFile(filePath, []byte(message), 0644)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\n\tcmd := editorCMD(editorPath, filePath)\n\terr = cmd.Run()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tcontents, err := ioutil.ReadFile(filePath)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn parseTitleBody(strings.TrimSpace(string(contents)))\n}\n\nfunc editorPath() (string, error) {\n\tcmd := New(\"var\", \"GIT_EDITOR\")\n\tcmd.Stdout = nil\n\te, err := cmd.Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.TrimSpace(string(e)), nil\n}\n\nfunc editorCMD(editorPath, filePath string) *exec.Cmd {\n\tparts := strings.Split(editorPath, \" \")\n\tr := regexp.MustCompile(\"[nmg]?vi[m]?$\")\n\targs := make([]string, 0, 3)\n\tif r.MatchString(editorPath) {\n\t\targs = append(args, \"--cmd\", \"set ft=gitcommit tw=0 wrap lbr\")\n\t}\n\targs = append(args, parts[1:]...)\n\targs = append(args, filePath)\n\tcmd := exec.Command(parts[0], args...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd\n}\n\nfunc parseTitleBody(message string) (string, string, error) {\n\t\/\/ Grab all the lines that don't start with the comment char\n\tcc := CommentChar()\n\tr := regexp.MustCompile(`(?m:^)[^` + cc + `].*(?m:$)`)\n\tcr := regexp.MustCompile(`(?m:^)\\s*` + cc)\n\tparts := r.FindAllString(message, -1)\n\tnoComments := make([]string, 0)\n\tfor _, p := range parts {\n\t\tif !cr.MatchString(p) {\n\t\t\tnoComments = append(noComments, p)\n\t\t}\n\t}\n\tmsg := strings.Join(noComments, \"\\n\")\n\tif strings.TrimSpace(msg) == \"\" {\n\t\treturn \"\", \"\", nil\n\t}\n\n\tr = regexp.MustCompile(`\\n\\s*\\n`)\n\tmsg = strings.Replace(msg, \"\\\\#\", \"#\", -1)\n\tparts = r.Split(msg, 2)\n\n\tif strings.Contains(parts[0], \"\\n\") {\n\t\treturn \"\\n\", parts[0], nil\n\t}\n\tif len(parts) < 2 {\n\t\treturn parts[0], \"\", nil\n\t}\n\treturn parts[0], parts[1], nil\n}\n<commit_msg>refactor git\/edit to prepare for issue notes<commit_after>package git\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n\/\/ Edit opens a file in the users editor and returns the title and body.\nfunc Edit(filePrefix, message string) (string, string, error) {\n\tcontents, err := EditFile(filePrefix, message)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn parseTitleBody(strings.TrimSpace(string(contents)))\n}\n\n\/\/ EditFile opens a file in the users editor and returns the contents. It\n\/\/ stores a temporary file in your .git directory or \/tmp if accessed outside of\n\/\/ a git repo.\nfunc EditFile(filePrefix, message string) (string, error) {\n\tvar (\n\t\tdir string\n\t\terr error\n\t)\n\tif InsideGitRepo() {\n\t\tdir, err = GitDir()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t} else {\n\t\tdir = \"\/tmp\"\n\t}\n\tfilePath := filepath.Join(dir, fmt.Sprintf(\"%s_EDITMSG\", filePrefix))\n\teditorPath, err := editorPath()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer os.Remove(filePath)\n\n\t\/\/ Write generated\/template message to file\n\tif _, err := os.Stat(filePath); os.IsNotExist(err) && message != \"\" {\n\t\terr = ioutil.WriteFile(filePath, []byte(message), 0644)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tcmd := editorCMD(editorPath, filePath)\n\terr = cmd.Run()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcontents, err := ioutil.ReadFile(filePath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn removeComments(string(contents))\n}\n\nfunc editorPath() (string, error) {\n\tcmd := New(\"var\", \"GIT_EDITOR\")\n\tcmd.Stdout = nil\n\te, err := cmd.Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.TrimSpace(string(e)), nil\n}\n\nfunc editorCMD(editorPath, filePath string) *exec.Cmd {\n\tparts := strings.Split(editorPath, \" \")\n\tr := regexp.MustCompile(\"[nmg]?vi[m]?$\")\n\targs := make([]string, 0, 3)\n\tif r.MatchString(editorPath) {\n\t\targs = append(args, \"--cmd\", \"set ft=gitcommit tw=0 wrap lbr\")\n\t}\n\targs = append(args, parts[1:]...)\n\targs = append(args, filePath)\n\tcmd := exec.Command(parts[0], args...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd\n}\n\nfunc removeComments(message string) (string, error) {\n\t\/\/ Grab all the lines that don't start with the comment char\n\tcc := CommentChar()\n\tr := regexp.MustCompile(`(?m:^)[^` + cc + `].*(?m:$)`)\n\tcr := regexp.MustCompile(`(?m:^)\\s*` + cc)\n\tparts := r.FindAllString(message, -1)\n\tnoComments := make([]string, 0)\n\tfor _, p := range parts {\n\t\tif !cr.MatchString(p) {\n\t\t\tnoComments = append(noComments, p)\n\t\t}\n\t}\n\treturn strings.TrimSpace(strings.Join(noComments, \"\\n\")), nil\n}\n\nfunc parseTitleBody(message string) (string, string, error) {\n\tmsg, err := removeComments(message)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tif msg == \"\" {\n\t\treturn \"\", \"\", nil\n\t}\n\n\tr := regexp.MustCompile(`\\n\\s*\\n`)\n\tmsg = strings.Replace(msg, \"\\\\#\", \"#\", -1)\n\tparts := r.Split(msg, 2)\n\n\tif strings.Contains(parts[0], \"\\n\") {\n\t\treturn \"\\n\", parts[0], nil\n\t}\n\tif len(parts) < 2 {\n\t\treturn parts[0], \"\", nil\n\t}\n\treturn parts[0], parts[1], nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Google Inc. All rights reserved.\n\/\/ Use of this source code is governed by the Apache 2.0\n\/\/ license that can be found in the LICENSE file.\n\npackage internal\n\n\/\/ This file has code for accessing metadata.\n\/\/\n\/\/ References:\n\/\/\thttps:\/\/cloud.google.com\/compute\/docs\/metadata\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\nconst (\n\tmetadataHost = \"metadata\"\n\tmetadataPath = \"\/computeMetadata\/v1\/\"\n)\n\nvar (\n\tmetadataRequestHeaders = http.Header{\n\t\t\"Metadata-Flavor\": []string{\"Google\"},\n\t}\n)\n\n\/\/ TODO(dsymonds): Do we need to support default values, like Python?\nfunc mustGetMetadata(key string) []byte {\n\tb, err := getMetadata(key)\n\tif err != nil {\n\t\tlog.Fatalf(\"Metadata fetch failed: %v\", err)\n\t}\n\treturn b\n}\n\nfunc getMetadata(key string) ([]byte, error) {\n\t\/\/ TODO(dsymonds): May need to use url.Parse to support keys with query args.\n\treq := &http.Request{\n\t\tMethod: \"GET\",\n\t\tURL: &url.URL{\n\t\t\tScheme: \"http\",\n\t\t\tHost: metadataHost,\n\t\t\tPath: metadataPath + key,\n\t\t},\n\t\tHeader: metadataRequestHeaders,\n\t\tHost: metadataHost,\n\t}\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"metadata server returned HTTP %d\", resp.StatusCode)\n\t}\n\treturn ioutil.ReadAll(resp.Body)\n}\n<commit_msg>[internal] Panic instead of log.Fatal when metadata fetch fails<commit_after>\/\/ Copyright 2014 Google Inc. All rights reserved.\n\/\/ Use of this source code is governed by the Apache 2.0\n\/\/ license that can be found in the LICENSE file.\n\npackage internal\n\n\/\/ This file has code for accessing metadata.\n\/\/\n\/\/ References:\n\/\/\thttps:\/\/cloud.google.com\/compute\/docs\/metadata\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\nconst (\n\tmetadataHost = \"metadata\"\n\tmetadataPath = \"\/computeMetadata\/v1\/\"\n)\n\nvar (\n\tmetadataRequestHeaders = http.Header{\n\t\t\"Metadata-Flavor\": []string{\"Google\"},\n\t}\n)\n\n\/\/ TODO(dsymonds): Do we need to support default values, like Python?\nfunc mustGetMetadata(key string) []byte {\n\tb, err := getMetadata(key)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Metadata fetch failed: %v\", err))\n\t}\n\treturn b\n}\n\nfunc getMetadata(key string) ([]byte, error) {\n\t\/\/ TODO(dsymonds): May need to use url.Parse to support keys with query args.\n\treq := &http.Request{\n\t\tMethod: \"GET\",\n\t\tURL: &url.URL{\n\t\t\tScheme: \"http\",\n\t\t\tHost: metadataHost,\n\t\t\tPath: metadataPath + key,\n\t\t},\n\t\tHeader: metadataRequestHeaders,\n\t\tHost: metadataHost,\n\t}\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"metadata server returned HTTP %d\", resp.StatusCode)\n\t}\n\treturn ioutil.ReadAll(resp.Body)\n}\n<|endoftext|>"} {"text":"<commit_before>package yum\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/gonuts\/logger\"\n)\n\n\/\/ global registry of known backends\nvar g_backends = make(map[string]func(repo *Repository) (Backend, error))\n\n\/\/ NewBackend returns a new backend of type \"backend\"\nfunc NewBackend(backend string, repo *Repository) (Backend, error) {\n\tfactory, ok := g_backends[backend]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"yum: no such backend [%s]\", backend)\n\t}\n\treturn factory(repo)\n}\n\n\/\/ Backend queries a YUM DB repository\ntype Backend interface {\n\n\t\/\/ YumDataType returns the ID for the data type as used in the repomd.xml file\n\tYumDataType() string\n\n\t\/\/ Download the DB from server\n\tGetLatestDB(url string) error\n\n\t\/\/ Check whether the DB is there\n\tHasDB() bool\n\n\t\/\/ Load loads the DB\n\tLoadDB() error\n\n\t\/\/ FindLatestMatchingName locats a package by name, returns the latest available version.\n\tFindLatestMatchingName(name, version, release string) (*Package, error)\n\n\t\/\/ FindLatestMatchingRequire locates a package providing a given functionality.\n\tFindLatestMatchingRequire(requirement string) (*Package, error)\n\n\t\/\/ GetPackages returns all the packages known by a YUM repository\n\tGetPackages() []*Package\n}\n\n\/\/ Repository represents a YUM repository with all associated metadata.\ntype Repository struct {\n\tmsg *logger.Logger\n\tName string\n\tRepoUrl string\n\tRepoMdUrl string\n\tLocalRepoMdXml string\n\tCacheDir string\n\tBackends []string\n\tBackend Backend\n}\n\n\/\/ NewRepository create a new Repository with name and from url.\nfunc NewRepository(name, url, cachedir string, backends []string, setupBackend, checkForUpdates bool) (*Repository, error) {\n\n\trepo := Repository{\n\t\tmsg: logger.NewLogger(\"yum\", logger.INFO, os.Stdout),\n\t\tName: name,\n\t\tRepoUrl: url,\n\t\tRepoMdUrl: url + \"\/repodata\/repomd.xml\",\n\t\tLocalRepoMdXml: filepath.Join(cachedir, \"repomd.xml\"),\n\t\tCacheDir: cachedir,\n\t\tBackends: make([]string, len(backends)),\n\t}\n\tcopy(repo.Backends, backends)\n\n\terr := os.MkdirAll(cachedir, 0644)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ load appropriate backend if requested\n\tif setupBackend {\n\t\tif checkForUpdates {\n\t\t\terr = repo.setupBackendFromRemote()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\terr = repo.setupBackendFromLocal()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\treturn &repo, err\n}\n\n\/\/ FindLatestMatchingName locats a package by name, returns the latest available version.\nfunc (repo *Repository) FindLatestMatchingName(name, version, release string) (*Package, error) {\n\treturn repo.Backend.FindLatestMatchingName(name, version, release)\n}\n\n\/\/ FindLatestMatchingRequire locates a package providing a given functionality.\nfunc (repo *Repository) FindLatestMatchingRequire(requirement string) (*Package, error) {\n\treturn repo.Backend.FindLatestMatchingRequire(requirement)\n}\n\n\/\/ GetPackages returns all the packages known by a YUM repository\nfunc (repo *Repository) GetPackages() []*Package {\n\treturn repo.Backend.GetPackages()\n}\n\n\/\/ setupBackendFromRemote checks which backend should be used and updates the DB files.\nfunc (repo *Repository) setupBackendFromRemote() error {\n\tvar err error\n\tvar backend Backend\n\t\/\/ get repo metadata with list of available files\n\tremotedata, err := repo.remoteMetadata()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tremotemd, err := repo.checkRepoMD(remotedata)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlocaldata, err := repo.localMetadata()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlocalmd, err := repo.checkRepoMD(localdata)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, bname := range repo.Backends {\n\t\trepo.msg.Infof(\"checking availability of backend [%s]\\n\", bname)\n\t\tba, err := NewBackend(bname, repo)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\trrepomd, ok := remotemd[ba.YumDataType()]\n\t\tif !ok {\n\t\t\trepo.msg.Warnf(\"remote repository does not provide [%s] DB\\n\", bname)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ a priori a match\n\t\tbackend = ba\n\t\trepo.Backend = backend\n\n\t\tlrepomd, ok := localmd[ba.YumDataType()]\n\t\tif !ok {\n\t\t\t\/\/ doesn't matter, we download the DB in any case\n\t\t}\n\n\t\tif !repo.Backend.HasDB() || rrepomd.Timestamp.After(lrepomd.Timestamp) {\n\t\t\t\/\/ we need to update the DB\n\t\t\turl := repo.RepoUrl + \"\/\" + rrepomd.Location\n\t\t\trepo.msg.Infof(\"updating the RPM database for %s\\n\", bname)\n\t\t\terr = repo.Backend.GetLatestDB(url)\n\t\t\tif err != nil {\n\t\t\t\trepo.msg.Warnf(\"problem updating RPM database for backend [%s]: %v\\n\", bname, err)\n\t\t\t\terr = nil\n\t\t\t\tbackend = nil\n\t\t\t\trepo.Backend = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ save metadata to local repomd file\n\t\t\terr = ioutil.WriteFile(repo.LocalRepoMdXml, remotedata, 0644)\n\t\t\tif err != nil {\n\t\t\t\trepo.msg.Warnf(\"problem updating local repomd.xml file for backend [%s]: %v\\n\", bname, err)\n\t\t\t\terr = nil\n\t\t\t\tbackend = nil\n\t\t\t\trepo.Backend = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t\/\/ load data necessary for the backend\n\t\terr = repo.Backend.LoadDB()\n\t\tif err != nil {\n\t\t\trepo.msg.Warnf(\"problem loading data for backend [%s]: %v\\n\", bname, err)\n\t\t\terr = nil\n\t\t\tbackend = nil\n\t\t\trepo.Backend = nil\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ stop at first one found\n\t\tbreak\n\t}\n\n\tif backend == nil {\n\t\trepo.msg.Errorf(\"No valid backend found\\n\")\n\t\treturn fmt.Errorf(\"No valid backend found\")\n\t}\n\n\trepo.msg.Infof(\"repository [%s] - chosen backend [%T]\\n\", repo.Name, repo.Backend)\n\treturn err\n}\n\nfunc (repo *Repository) setupBackendFromLocal() error {\n\tvar err error\n\tdata, err := repo.localMetadata()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmd, err := repo.checkRepoMD(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar backend Backend\n\tfor _, bname := range repo.Backends {\n\t\trepo.msg.Infof(\"checking availability of backend [%s]\\n\", bname)\n\t\tba, err := NewBackend(bname, repo)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\t_ \/*repomd*\/, ok := md[ba.YumDataType()]\n\t\tif !ok {\n\t\t\trepo.msg.Warnf(\"local repository does not provide [%s] DB\\n\", bname)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ a priori a match\n\t\tbackend = ba\n\t\trepo.Backend = backend\n\n\t\t\/\/ loading data necessary for the backend\n\t\terr = repo.Backend.LoadDB()\n\t\tif err != nil {\n\t\t\trepo.msg.Warnf(\"problem loading data for backend [%s]: %v\\n\", bname, err)\n\t\t\terr = nil\n\t\t\tbackend = nil\n\t\t\trepo.Backend = nil\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ stop at first one found.\n\t\tbreak\n\t}\n\n\tif backend == nil {\n\t\trepo.msg.Errorf(\"No valid backend found\\n\")\n\t\treturn fmt.Errorf(\"No valid backend found\")\n\t}\n\n\trepo.msg.Infof(\"repository [%s] - chosen backend [%T]\\n\", repo.Name, repo.Backend)\n\treturn err\n}\n\n\/\/ remoteMetadata retrieves the repo metadata file content\nfunc (repo *Repository) remoteMetadata() ([]byte, error) {\n\tresp, err := http.Get(repo.RepoMdUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tbuf := new(bytes.Buffer)\n\t_, err = io.Copy(buf, resp.Body)\n\tif err != nil && err != io.EOF {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), err\n}\n\n\/\/ localMetadata retrieves the repo metadata from the repomd file\nfunc (repo *Repository) localMetadata() ([]byte, error) {\n\tif !path_exists(repo.LocalRepoMdXml) {\n\t\treturn nil, nil\n\t}\n\tf, err := os.Open(repo.LocalRepoMdXml)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tbuf := new(bytes.Buffer)\n\t_, err = io.Copy(buf, f)\n\tif err != nil && err != io.EOF {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), err\n}\n\n\/\/ checkRepoMD parses the Repository metadata XML content\nfunc (repo *Repository) checkRepoMD(data []byte) (map[string]RepoMD, error) {\n\n\tif len(data) <= 0 {\n\t\treturn nil, nil\n\t}\n\n\ttype xmlTree struct {\n\t\tXMLName xml.Name `xml:\"repomd\"`\n\t\tData []struct {\n\t\t\tType string `xml:\"type,attr\"`\n\t\t\tChecksum string `xml:\"checksum\"`\n\t\t\tLocation struct {\n\t\t\t\tHref string `xml:\"href,attr\"`\n\t\t\t} `xml:\"location\"`\n\t\t\tTimestamp float64 `xml:\"timestamp\"`\n\t\t}\n\t}\n\n\tvar tree xmlTree\n\terr := xml.Unmarshal(data, &tree)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb := make(map[string]RepoMD)\n\tfor _, data := range tree.Data {\n\t\tsec := int64(math.Floor(data.Timestamp))\n\t\tnsec := int64((data.Timestamp - float64(sec)) * 1e9)\n\t\tdb[data.Type] = RepoMD{\n\t\t\tChecksum: data.Checksum,\n\t\t\tTimestamp: time.Unix(sec, nsec),\n\t\t\tLocation: data.Location.Href,\n\t\t}\n\t\trepo.msg.Infof(\">>> %s: %v\\n\", data.Type, db[data.Type])\n\t}\n\treturn db, err\n}\n\ntype RepoMD struct {\n\tChecksum string\n\tTimestamp time.Time\n\tLocation string\n}\n\n\/\/ EOF\n<commit_msg>yum: debug printouts<commit_after>package yum\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/gonuts\/logger\"\n)\n\n\/\/ global registry of known backends\nvar g_backends = make(map[string]func(repo *Repository) (Backend, error))\n\n\/\/ NewBackend returns a new backend of type \"backend\"\nfunc NewBackend(backend string, repo *Repository) (Backend, error) {\n\tfactory, ok := g_backends[backend]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"yum: no such backend [%s]\", backend)\n\t}\n\treturn factory(repo)\n}\n\n\/\/ Backend queries a YUM DB repository\ntype Backend interface {\n\n\t\/\/ YumDataType returns the ID for the data type as used in the repomd.xml file\n\tYumDataType() string\n\n\t\/\/ Download the DB from server\n\tGetLatestDB(url string) error\n\n\t\/\/ Check whether the DB is there\n\tHasDB() bool\n\n\t\/\/ Load loads the DB\n\tLoadDB() error\n\n\t\/\/ FindLatestMatchingName locats a package by name, returns the latest available version.\n\tFindLatestMatchingName(name, version, release string) (*Package, error)\n\n\t\/\/ FindLatestMatchingRequire locates a package providing a given functionality.\n\tFindLatestMatchingRequire(requirement string) (*Package, error)\n\n\t\/\/ GetPackages returns all the packages known by a YUM repository\n\tGetPackages() []*Package\n}\n\n\/\/ Repository represents a YUM repository with all associated metadata.\ntype Repository struct {\n\tmsg *logger.Logger\n\tName string\n\tRepoUrl string\n\tRepoMdUrl string\n\tLocalRepoMdXml string\n\tCacheDir string\n\tBackends []string\n\tBackend Backend\n}\n\n\/\/ NewRepository create a new Repository with name and from url.\nfunc NewRepository(name, url, cachedir string, backends []string, setupBackend, checkForUpdates bool) (*Repository, error) {\n\n\trepo := Repository{\n\t\tmsg: logger.NewLogger(\"yum\", logger.INFO, os.Stdout),\n\t\tName: name,\n\t\tRepoUrl: url,\n\t\tRepoMdUrl: url + \"\/repodata\/repomd.xml\",\n\t\tLocalRepoMdXml: filepath.Join(cachedir, \"repomd.xml\"),\n\t\tCacheDir: cachedir,\n\t\tBackends: make([]string, len(backends)),\n\t}\n\tcopy(repo.Backends, backends)\n\n\terr := os.MkdirAll(cachedir, 0644)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ load appropriate backend if requested\n\tif setupBackend {\n\t\tif checkForUpdates {\n\t\t\terr = repo.setupBackendFromRemote()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\terr = repo.setupBackendFromLocal()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\treturn &repo, err\n}\n\n\/\/ FindLatestMatchingName locats a package by name, returns the latest available version.\nfunc (repo *Repository) FindLatestMatchingName(name, version, release string) (*Package, error) {\n\treturn repo.Backend.FindLatestMatchingName(name, version, release)\n}\n\n\/\/ FindLatestMatchingRequire locates a package providing a given functionality.\nfunc (repo *Repository) FindLatestMatchingRequire(requirement string) (*Package, error) {\n\treturn repo.Backend.FindLatestMatchingRequire(requirement)\n}\n\n\/\/ GetPackages returns all the packages known by a YUM repository\nfunc (repo *Repository) GetPackages() []*Package {\n\treturn repo.Backend.GetPackages()\n}\n\n\/\/ setupBackendFromRemote checks which backend should be used and updates the DB files.\nfunc (repo *Repository) setupBackendFromRemote() error {\n\trepo.msg.Infof(\"setupBackendFromRemote...\\n\")\n\tvar err error\n\tvar backend Backend\n\t\/\/ get repo metadata with list of available files\n\tremotedata, err := repo.remoteMetadata()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tremotemd, err := repo.checkRepoMD(remotedata)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlocaldata, err := repo.localMetadata()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlocalmd, err := repo.checkRepoMD(localdata)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, bname := range repo.Backends {\n\t\trepo.msg.Infof(\"checking availability of backend [%s]\\n\", bname)\n\t\tba, err := NewBackend(bname, repo)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\trrepomd, ok := remotemd[ba.YumDataType()]\n\t\tif !ok {\n\t\t\trepo.msg.Warnf(\"remote repository does not provide [%s] DB\\n\", bname)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ a priori a match\n\t\tbackend = ba\n\t\trepo.Backend = backend\n\n\t\tlrepomd, ok := localmd[ba.YumDataType()]\n\t\tif !ok {\n\t\t\t\/\/ doesn't matter, we download the DB in any case\n\t\t}\n\n\t\tif !repo.Backend.HasDB() || rrepomd.Timestamp.After(lrepomd.Timestamp) {\n\t\t\t\/\/ we need to update the DB\n\t\t\turl := repo.RepoUrl + \"\/\" + rrepomd.Location\n\t\t\trepo.msg.Infof(\"updating the RPM database for %s\\n\", bname)\n\t\t\terr = repo.Backend.GetLatestDB(url)\n\t\t\tif err != nil {\n\t\t\t\trepo.msg.Warnf(\"problem updating RPM database for backend [%s]: %v\\n\", bname, err)\n\t\t\t\terr = nil\n\t\t\t\tbackend = nil\n\t\t\t\trepo.Backend = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ save metadata to local repomd file\n\t\t\terr = ioutil.WriteFile(repo.LocalRepoMdXml, remotedata, 0644)\n\t\t\tif err != nil {\n\t\t\t\trepo.msg.Warnf(\"problem updating local repomd.xml file for backend [%s]: %v\\n\", bname, err)\n\t\t\t\terr = nil\n\t\t\t\tbackend = nil\n\t\t\t\trepo.Backend = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t\/\/ load data necessary for the backend\n\t\terr = repo.Backend.LoadDB()\n\t\tif err != nil {\n\t\t\trepo.msg.Warnf(\"problem loading data for backend [%s]: %v\\n\", bname, err)\n\t\t\terr = nil\n\t\t\tbackend = nil\n\t\t\trepo.Backend = nil\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ stop at first one found\n\t\tbreak\n\t}\n\n\tif backend == nil {\n\t\trepo.msg.Errorf(\"No valid backend found\\n\")\n\t\treturn fmt.Errorf(\"No valid backend found\")\n\t}\n\n\trepo.msg.Infof(\"repository [%s] - chosen backend [%T]\\n\", repo.Name, repo.Backend)\n\treturn err\n}\n\nfunc (repo *Repository) setupBackendFromLocal() error {\n\trepo.msg.Infof(\"setupBackendFromLocal...\\n\")\n\tvar err error\n\tdata, err := repo.localMetadata()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmd, err := repo.checkRepoMD(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar backend Backend\n\tfor _, bname := range repo.Backends {\n\t\trepo.msg.Infof(\"checking availability of backend [%s]\\n\", bname)\n\t\tba, err := NewBackend(bname, repo)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\t_ \/*repomd*\/, ok := md[ba.YumDataType()]\n\t\tif !ok {\n\t\t\trepo.msg.Warnf(\"local repository does not provide [%s] DB\\n\", bname)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ a priori a match\n\t\tbackend = ba\n\t\trepo.Backend = backend\n\n\t\t\/\/ loading data necessary for the backend\n\t\terr = repo.Backend.LoadDB()\n\t\tif err != nil {\n\t\t\trepo.msg.Warnf(\"problem loading data for backend [%s]: %v\\n\", bname, err)\n\t\t\terr = nil\n\t\t\tbackend = nil\n\t\t\trepo.Backend = nil\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ stop at first one found.\n\t\tbreak\n\t}\n\n\tif backend == nil {\n\t\trepo.msg.Errorf(\"No valid backend found\\n\")\n\t\treturn fmt.Errorf(\"No valid backend found\")\n\t}\n\n\trepo.msg.Infof(\"repository [%s] - chosen backend [%T]\\n\", repo.Name, repo.Backend)\n\treturn err\n}\n\n\/\/ remoteMetadata retrieves the repo metadata file content\nfunc (repo *Repository) remoteMetadata() ([]byte, error) {\n\tresp, err := http.Get(repo.RepoMdUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tbuf := new(bytes.Buffer)\n\t_, err = io.Copy(buf, resp.Body)\n\tif err != nil && err != io.EOF {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), err\n}\n\n\/\/ localMetadata retrieves the repo metadata from the repomd file\nfunc (repo *Repository) localMetadata() ([]byte, error) {\n\tif !path_exists(repo.LocalRepoMdXml) {\n\t\treturn nil, nil\n\t}\n\tf, err := os.Open(repo.LocalRepoMdXml)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tbuf := new(bytes.Buffer)\n\t_, err = io.Copy(buf, f)\n\tif err != nil && err != io.EOF {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), err\n}\n\n\/\/ checkRepoMD parses the Repository metadata XML content\nfunc (repo *Repository) checkRepoMD(data []byte) (map[string]RepoMD, error) {\n\n\tif len(data) <= 0 {\n\t\treturn nil, nil\n\t}\n\n\ttype xmlTree struct {\n\t\tXMLName xml.Name `xml:\"repomd\"`\n\t\tData []struct {\n\t\t\tType string `xml:\"type,attr\"`\n\t\t\tChecksum string `xml:\"checksum\"`\n\t\t\tLocation struct {\n\t\t\t\tHref string `xml:\"href,attr\"`\n\t\t\t} `xml:\"location\"`\n\t\t\tTimestamp float64 `xml:\"timestamp\"`\n\t\t}\n\t}\n\n\tvar tree xmlTree\n\terr := xml.Unmarshal(data, &tree)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb := make(map[string]RepoMD)\n\tfor _, data := range tree.Data {\n\t\tsec := int64(math.Floor(data.Timestamp))\n\t\tnsec := int64((data.Timestamp - float64(sec)) * 1e9)\n\t\tdb[data.Type] = RepoMD{\n\t\t\tChecksum: data.Checksum,\n\t\t\tTimestamp: time.Unix(sec, nsec),\n\t\t\tLocation: data.Location.Href,\n\t\t}\n\t\trepo.msg.Infof(\">>> %s: %v\\n\", data.Type, db[data.Type])\n\t}\n\treturn db, err\n}\n\ntype RepoMD struct {\n\tChecksum string\n\tTimestamp time.Time\n\tLocation string\n}\n\n\/\/ EOF\n<|endoftext|>"} {"text":"<commit_before>package lib\n\nimport (\n\tmesos \"github.com\/boldfield\/go-mesos\"\n\t\"log\"\n)\n\ntype FrameworkMap map[string]*mesos.Framework\n\ntype Mesos struct {\n\tHost string\n\tCluster *mesos.Cluster\n\t_client *mesos.Client\n\tinitialized bool\n}\n\ntype MesosSlave struct {\n\tSlave *mesos.Slave\n}\n\nfunc (m *Mesos) Client() *mesos.Client {\n\tif m._client != nil {\n\t\treturn m._client\n\t}\n\n\tconfig := mesos.NewDefaultConfig()\n\tconfig.DiscoveryURL = m.Host\n\tm._client = mesos.NewClient(config)\n\treturn m._client\n}\n\n\/\/ Loads the entire cluster, including information about slaves\nfunc (m *Mesos) LoadCluster(c *mesos.Client) error {\n\tif err := m.LoadClusterInfo(c); err != nil {\n\t\treturn err\n\t}\n\n\tif err := m.Cluster.LoadSlaveStates(c); err != nil {\n\t\tlog.Printf(\"An error was encountered loading slave states: %s\", err)\n\t\treturn err\n\t}\n\n\tif err := m.Cluster.LoadSlaveStats(c); err != nil {\n\t\tlog.Printf(\"An error was encountered loading slave states: %s\", err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Loads information about the cluster from the master, does not check slaves\n\/\/ TODO: (michaelb) what should this be called?\nfunc (m *Mesos) LoadClusterInfo(c *mesos.Client) error {\n\tif cluster, err := mesos.DiscoverCluster(c); err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t} else {\n\t\tm.Cluster = cluster\n\t}\n\treturn nil\n}\n\nfunc (m *Mesos) Framework(framework string) FrameworkMap {\n\treturn m.Cluster.GetFramework(framework)\n}\n\nfunc (m *Mesos) LoadSlaveState(host string, c *mesos.Client) (*MesosSlave, error) {\n\tslave := &mesos.Slave{HostName: host}\n\terr := slave.LoadState(c)\n\treturn &MesosSlave{Slave: slave}, err\n}\n\nfunc (m *Mesos) LoadSlaveStats(host string, c *mesos.Client) (*MesosSlave, error) {\n\tslave := &mesos.Slave{HostName: host}\n\terr := slave.LoadStats(c)\n\treturn &MesosSlave{Slave: slave}, err\n}\n\nfunc (s *MesosSlave) Framework(framework string) FrameworkMap {\n\treturn s.Slave.GetFramework(framework)\n}\n<commit_msg>remove TODO<commit_after>package lib\n\nimport (\n\tmesos \"github.com\/boldfield\/go-mesos\"\n\t\"log\"\n)\n\ntype FrameworkMap map[string]*mesos.Framework\n\ntype Mesos struct {\n\tHost string\n\tCluster *mesos.Cluster\n\t_client *mesos.Client\n\tinitialized bool\n}\n\ntype MesosSlave struct {\n\tSlave *mesos.Slave\n}\n\nfunc (m *Mesos) Client() *mesos.Client {\n\tif m._client != nil {\n\t\treturn m._client\n\t}\n\n\tconfig := mesos.NewDefaultConfig()\n\tconfig.DiscoveryURL = m.Host\n\tm._client = mesos.NewClient(config)\n\treturn m._client\n}\n\n\/\/ Loads the entire cluster, including information about slaves\nfunc (m *Mesos) LoadCluster(c *mesos.Client) error {\n\tif err := m.LoadClusterInfo(c); err != nil {\n\t\treturn err\n\t}\n\n\tif err := m.Cluster.LoadSlaveStates(c); err != nil {\n\t\tlog.Printf(\"An error was encountered loading slave states: %s\", err)\n\t\treturn err\n\t}\n\n\tif err := m.Cluster.LoadSlaveStats(c); err != nil {\n\t\tlog.Printf(\"An error was encountered loading slave states: %s\", err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Loads information about the cluster from the master, does not check slaves\nfunc (m *Mesos) LoadClusterInfo(c *mesos.Client) error {\n\tif cluster, err := mesos.DiscoverCluster(c); err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t} else {\n\t\tm.Cluster = cluster\n\t}\n\treturn nil\n}\n\nfunc (m *Mesos) Framework(framework string) FrameworkMap {\n\treturn m.Cluster.GetFramework(framework)\n}\n\nfunc (m *Mesos) LoadSlaveState(host string, c *mesos.Client) (*MesosSlave, error) {\n\tslave := &mesos.Slave{HostName: host}\n\terr := slave.LoadState(c)\n\treturn &MesosSlave{Slave: slave}, err\n}\n\nfunc (m *Mesos) LoadSlaveStats(host string, c *mesos.Client) (*MesosSlave, error) {\n\tslave := &mesos.Slave{HostName: host}\n\terr := slave.LoadStats(c)\n\treturn &MesosSlave{Slave: slave}, err\n}\n\nfunc (s *MesosSlave) Framework(framework string) FrameworkMap {\n\treturn s.Slave.GetFramework(framework)\n}\n<|endoftext|>"} {"text":"<commit_before>package lib\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\n\t\"github.com\/bruceadowns\/syslogparser\"\n\t\"github.com\/bruceadowns\/syslogparser\/mako\"\n\t\"github.com\/bruceadowns\/syslogparser\/rfc3164\"\n\t\"github.com\/bruceadowns\/syslogparser\/rfc3164raw\"\n\t\"github.com\/bruceadowns\/syslogparser\/rfc5424\"\n\t\"github.com\/bruceadowns\/syslogparser\/rfc5424raw\"\n\t\"github.com\/bruceadowns\/syslogparser\/syslogmako\"\n)\n\n\/\/ Packet holds the incoming traffic info\ntype Packet struct {\n\tAddress net.Addr\n\tMessage []byte\n\tLogEvent *LogEvent\n}\n\nvar remoteTypeCache = make(map[string]string)\n\nfunc (p *Packet) String() string {\n\treturn fmt.Sprintf(\"Address: %s '%s'\", p.Address, p.Message)\n}\n\n\/\/ IsValid returns T\/F\nfunc (p *Packet) IsValid() bool {\n\tif len(p.Address.String()) == 0 {\n\t\tlog.Print(\"Address is empty\")\n\t\treturn false\n\t}\n\n\tif len(p.Message) == 0 {\n\t\tlog.Print(\"Message is empty\")\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc populate(p syslogparser.LogParser) (res *LogEvent) {\n\tif p == nil {\n\t\treturn\n\t}\n\n\tlogParts := p.Dump()\n\n\tapp := logParts[\"app_name\"]\n\tif len(app) == 0 {\n\t\tapp = logParts[\"service_name\"]\n\t}\n\n\tpid := logParts[\"service_version\"]\n\tif len(pid) == 0 {\n\t\tpid = logParts[\"proc_id\"]\n\t\tif len(pid) == 0 {\n\t\t\tpid = logParts[\"tag\"]\n\t\t}\n\t}\n\n\tversion := logParts[\"@version\"]\n\tif len(version) == 0 {\n\t\tversion = logParts[\"version\"]\n\t}\n\n\tmessage := logParts[\"message\"]\n\tif len(message) == 0 {\n\t\tmessage = logParts[\"content\"]\n\t}\n\n\ttimestamp := logParts[\"@timestamp\"]\n\tif len(timestamp) == 0 {\n\t\ttimestamp = logParts[\"timestamp\"]\n\t}\n\n\tres = &LogEvent{\n\t\tDataCenter: logParts[\"service_environment\"],\n\t\tCluster: logParts[\"service_pipeline\"],\n\t\tHost: logParts[\"hostname\"],\n\t\tService: app,\n\t\tInstance: pid,\n\t\tVersion: version,\n\t\tLevel: logParts[\"level\"],\n\t\tThreadName: logParts[\"thread_name\"],\n\t\tLoggerName: logParts[\"logger_name\"],\n\t\tMessage: message,\n\t\tTimestamp: timestamp,\n\t\t\/\/ThrownStackTrace\n\t}\n\n\treturn\n}\n\n\/\/ Mill determines message type and parses into a LogEvent\nfunc (p *Packet) Mill() (res *LogEvent) {\n\tlog.Printf(\"%s\", p)\n\n\tvar parser syslogparser.LogParser\n\n\tfor {\n\t\t\/\/ check cache for known type\n\n\t\t{\n\t\t\tparser = mako.NewParser(p.Message, p.Address.String())\n\t\t\tif err := parser.Parse(); err == nil {\n\t\t\t\tremoteTypeCache[p.Address.String()] = \"mako\"\n\t\t\t\tlog.Printf(\"mako\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t{\n\t\t\tparser = syslogmako.NewParser(p.Message)\n\t\t\tif err := parser.Parse(); err == nil {\n\t\t\t\tremoteTypeCache[p.Address.String()] = \"syslogmako\"\n\t\t\t\tlog.Printf(\"syslogmako\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t{\n\t\t\tparser = rfc5424raw.NewParser(p.Message)\n\t\t\tif err := parser.Parse(); err == nil {\n\t\t\t\tremoteTypeCache[p.Address.String()] = \"rfc5424raw\"\n\t\t\t\tlog.Printf(\"rfc5424raw\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t{\n\t\t\tparser = rfc3164raw.NewParser(p.Message)\n\t\t\tif err := parser.Parse(); err == nil {\n\t\t\t\tremoteTypeCache[p.Address.String()] = \"rfc3164raw\"\n\t\t\t\tlog.Printf(\"rfc3164raw\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t{\n\t\t\tparser = rfc3164.NewParser(p.Message)\n\t\t\tif err := parser.Parse(); err == nil {\n\t\t\t\tremoteTypeCache[p.Address.String()] = \"rfc3164\"\n\t\t\t\tlog.Printf(\"rfc3164\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t{\n\t\t\tparser = rfc5424.NewParser(p.Message)\n\t\t\tif err := parser.Parse(); err == nil {\n\t\t\t\tremoteTypeCache[p.Address.String()] = \"rfc5424\"\n\t\t\t\tlog.Printf(\"rfc5424\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tlog.Printf(\"none\")\n\t\tparser = nil\n\t\tbreak\n\t}\n\n\tres = populate(parser)\n\tif res == nil {\n\t\tlog.Printf(\"Message from %s not parsed: %s\", p.Address, p.Message)\n\t} else {\n\t\tlog.Print(res)\n\t}\n\n\treturn\n}\n<commit_msg>Added stack trace json field<commit_after>package lib\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"strings\"\n\n\t\"github.com\/bruceadowns\/syslogparser\"\n\t\"github.com\/bruceadowns\/syslogparser\/mako\"\n\t\"github.com\/bruceadowns\/syslogparser\/rfc3164\"\n\t\"github.com\/bruceadowns\/syslogparser\/rfc3164raw\"\n\t\"github.com\/bruceadowns\/syslogparser\/rfc5424\"\n\t\"github.com\/bruceadowns\/syslogparser\/rfc5424raw\"\n\t\"github.com\/bruceadowns\/syslogparser\/syslogmako\"\n)\n\n\/\/ Packet holds the incoming traffic info\ntype Packet struct {\n\tAddress net.Addr\n\tMessage []byte\n\tLogEvent *LogEvent\n}\n\nvar remoteTypeCache = make(map[string]string)\n\nfunc (p *Packet) String() string {\n\treturn fmt.Sprintf(\"Address: %s '%s'\", p.Address, p.Message)\n}\n\n\/\/ IsValid returns T\/F\nfunc (p *Packet) IsValid() bool {\n\tif len(p.Address.String()) == 0 {\n\t\tlog.Print(\"Address is empty\")\n\t\treturn false\n\t}\n\n\tif len(p.Message) == 0 {\n\t\tlog.Print(\"Message is empty\")\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc populate(p syslogparser.LogParser) (res *LogEvent) {\n\tif p == nil {\n\t\treturn\n\t}\n\n\tlogParts := p.Dump()\n\n\tapp := logParts[\"app_name\"]\n\tif len(app) == 0 {\n\t\tapp = logParts[\"service_name\"]\n\t}\n\n\tpid := logParts[\"service_version\"]\n\tif len(pid) == 0 {\n\t\tpid = logParts[\"proc_id\"]\n\t\tif len(pid) == 0 {\n\t\t\tpid = logParts[\"tag\"]\n\t\t}\n\t}\n\n\tversion := logParts[\"@version\"]\n\tif len(version) == 0 {\n\t\tversion = logParts[\"version\"]\n\t}\n\n\tmessage := logParts[\"message\"]\n\tif len(message) == 0 {\n\t\tmessage = logParts[\"content\"]\n\t}\n\n\ttimestamp := logParts[\"@timestamp\"]\n\tif len(timestamp) == 0 {\n\t\ttimestamp = logParts[\"timestamp\"]\n\t}\n\n\tstackTrace := []string{}\n\tfullStackTrace := logParts[\"stack_trace\"]\n\tif len(fullStackTrace) > 0 {\n\t\tstackTrace = strings.Split(fullStackTrace, \"\\n\")\n\t}\n\n\tres = &LogEvent{\n\t\tDataCenter: logParts[\"service_environment\"],\n\t\tCluster: logParts[\"service_pipeline\"],\n\t\tHost: logParts[\"hostname\"],\n\t\tService: app,\n\t\tInstance: pid,\n\t\tVersion: version,\n\t\tLevel: logParts[\"level\"],\n\t\tThreadName: logParts[\"thread_name\"],\n\t\tLoggerName: logParts[\"logger_name\"],\n\t\tMessage: message,\n\t\tTimestamp: timestamp,\n\t\tThrownStackTrace: stackTrace,\n\t}\n\n\treturn\n}\n\n\/\/ Mill determines message type and parses into a LogEvent\nfunc (p *Packet) Mill() (res *LogEvent) {\n\tlog.Printf(\"%s\", p)\n\n\tvar parser syslogparser.LogParser\n\n\tfor {\n\t\t\/\/ check cache for known type\n\n\t\t{\n\t\t\tparser = mako.NewParser(p.Message, p.Address.String())\n\t\t\tif err := parser.Parse(); err == nil {\n\t\t\t\tremoteTypeCache[p.Address.String()] = \"mako\"\n\t\t\t\tlog.Printf(\"mako\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t{\n\t\t\tparser = syslogmako.NewParser(p.Message)\n\t\t\tif err := parser.Parse(); err == nil {\n\t\t\t\tremoteTypeCache[p.Address.String()] = \"syslogmako\"\n\t\t\t\tlog.Printf(\"syslogmako\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t{\n\t\t\tparser = rfc5424raw.NewParser(p.Message)\n\t\t\tif err := parser.Parse(); err == nil {\n\t\t\t\tremoteTypeCache[p.Address.String()] = \"rfc5424raw\"\n\t\t\t\tlog.Printf(\"rfc5424raw\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t{\n\t\t\tparser = rfc3164raw.NewParser(p.Message)\n\t\t\tif err := parser.Parse(); err == nil {\n\t\t\t\tremoteTypeCache[p.Address.String()] = \"rfc3164raw\"\n\t\t\t\tlog.Printf(\"rfc3164raw\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t{\n\t\t\tparser = rfc3164.NewParser(p.Message)\n\t\t\tif err := parser.Parse(); err == nil {\n\t\t\t\tremoteTypeCache[p.Address.String()] = \"rfc3164\"\n\t\t\t\tlog.Printf(\"rfc3164\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t{\n\t\t\tparser = rfc5424.NewParser(p.Message)\n\t\t\tif err := parser.Parse(); err == nil {\n\t\t\t\tremoteTypeCache[p.Address.String()] = \"rfc5424\"\n\t\t\t\tlog.Printf(\"rfc5424\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tlog.Printf(\"none\")\n\t\tparser = nil\n\t\tbreak\n\t}\n\n\tres = populate(parser)\n\tif res == nil {\n\t\tlog.Printf(\"Message from %s not parsed: %s\", p.Address, p.Message)\n\t} else {\n\t\tlog.Print(res)\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package tmpl\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\tgateway \"github.com\/gengo\/grpc-gateway\/protoc-gen-grpc-gateway\/descriptor\"\n\t\"github.com\/gengo\/grpc-gateway\/protoc-gen-grpc-gateway\/httprule\"\n\tdescriptor \"github.com\/golang\/protobuf\/protoc-gen-go\/descriptor\"\n\t\"sourcegraph.com\/sourcegraph\/prototools\/util\"\n)\n\n\/\/ unixPath takes a path, cleans it, and replaces any windows separators (\\\\)\n\/\/ with unix ones (\/). This is needed because plugin.CodeGeneratorResponse_File\n\/\/ is defined as having always unix path separators for the file name.\nfunc unixPath(s string) string {\n\ts = filepath.Clean(s)\n\ts = strings.Replace(s, \"\\\\\", \"\/\", -1)\n\n\t\/\/ Duplicate clean for trailing slashes that were previously windows ones.\n\treturn filepath.Clean(s)\n}\n\n\/\/ stripExt strips the extension off the path and returns it.\nfunc stripExt(s string) string {\n\text := filepath.Ext(s)\n\tif len(ext) > 0 {\n\t\treturn s[:len(s)-len(ext)]\n\t}\n\treturn s\n}\n\nvar Preload = (&tmplFuncs{}).funcMap()\n\n\/\/ cacheItem is a single cache item with a value and a location -- effectively\n\/\/ it is just used for searching.\ntype cacheItem struct {\n\tV interface{}\n\tL *descriptor.SourceCodeInfo_Location\n}\n\n\/\/ Functions exposed to templates. The user of the package must first preload\n\/\/ the FuncMap above for these to be called properly (as they are actually\n\/\/ closures with context).\ntype tmplFuncs struct {\n\tf *descriptor.FileDescriptorProto\n\toutputFile, rootDir string\n\tprotoFile []*descriptor.FileDescriptorProto\n\tregistry *gateway.Registry\n\tapiHost string\n\n\tlocCache []cacheItem\n}\n\n\/\/ funcMap returns the function map for feeding into templates.\nfunc (f *tmplFuncs) funcMap() template.FuncMap {\n\treturn map[string]interface{}{\n\t\t\"cleanLabel\": f.cleanLabel,\n\t\t\"cleanType\": f.cleanType,\n\t\t\"fieldType\": f.fieldType,\n\t\t\"dict\": f.dict,\n\t\t\"ext\": filepath.Ext,\n\t\t\"dir\": func(s string) string {\n\t\t\tdir, _ := path.Split(s)\n\t\t\treturn dir\n\t\t},\n\t\t\"trimExt\": stripExt,\n\t\t\"sub\": f.sub,\n\t\t\"filepath\": f.filepath,\n\t\t\"gatewayMethod\": f.gatewayMethod,\n\t\t\"gatewayPath\": f.gatewayPath,\n\t\t\"urlToType\": f.urlToType,\n\t\t\"jsonMessage\": f.jsonMessage,\n\t\t\"location\": f.location,\n\t\t\"AllMessages\": func(fixNames bool) []*descriptor.DescriptorProto {\n\t\t\treturn util.AllMessages(f.f, fixNames)\n\t\t},\n\t\t\"AllEnums\": func(fixNames bool) []*descriptor.EnumDescriptorProto {\n\t\t\treturn util.AllEnums(f.f, fixNames)\n\t\t},\n\t}\n}\n\n\/\/ cleanLabel returns the clean (i.e. human-readable \/ protobuf-style) version\n\/\/ of a label.\nfunc (f *tmplFuncs) cleanLabel(l *descriptor.FieldDescriptorProto_Label) string {\n\tswitch int32(*l) {\n\tcase 1:\n\t\treturn \"optional\"\n\tcase 2:\n\t\treturn \"required\"\n\tcase 3:\n\t\treturn \"repeated\"\n\tdefault:\n\t\tpanic(\"unknown label\")\n\t}\n}\n\n\/\/ cleanType returns the last part of a types name, i.e. for a fully-qualified\n\/\/ type \".foo.bar.baz\" it would return just \"baz\".\nfunc (f *tmplFuncs) cleanType(path string) string {\n\tsplit := strings.Split(path, \".\")\n\treturn split[len(split)-1]\n}\n\n\/\/ fieldType returns the clean (i.e. human-readable \/ protobuf-style) version\n\/\/ of a field type.\nfunc (f *tmplFuncs) fieldType(field *descriptor.FieldDescriptorProto) string {\n\tif field.TypeName != nil {\n\t\treturn f.cleanType(*field.TypeName)\n\t}\n\treturn util.FieldTypeName(field.Type)\n}\n\n\/\/ dict builds a map of paired items, allowing you to invoke a template with\n\/\/ multiple parameters.\nfunc (f *tmplFuncs) dict(pairs ...interface{}) (map[string]interface{}, error) {\n\tif len(pairs)%2 != 0 {\n\t\treturn nil, errors.New(\"expected pairs\")\n\t}\n\tm := make(map[string]interface{}, len(pairs)\/2)\n\tfor i := 0; i < len(pairs); i += 2 {\n\t\tm[pairs[i].(string)] = pairs[i+1]\n\t}\n\treturn m, nil\n}\n\n\/\/ sub performs simple x-y subtraction on integers.\nfunc (f *tmplFuncs) sub(x, y int) int { return x - y }\n\n\/\/ filepath returns the output filepath (prefixed by the root directory).\nfunc (f *tmplFuncs) filepath() string {\n\treturn path.Join(f.rootDir, f.outputFile)\n}\n\n\/\/ gatewayMethod returns the grpc-gateway method for a given service method.\nfunc (f *tmplFuncs) gatewayMethod(target *descriptor.MethodDescriptorProto) (*gateway.Method, error) {\n\tfile, err := f.registry.LookupFile(f.f.GetName())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, s := range file.Services {\n\t\tfor _, m := range s.Methods {\n\t\t\tif m.MethodDescriptorProto == target {\n\t\t\t\treturn m, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, nil\n}\n\n\/\/ gatewayPath renders the given grpc-gateway HTTP rule template (i.e. the HTTP\n\/\/ route to be bound). The method parameter is used to insert a link to the\n\/\/ method type for any HTTP fields (which will be marked clearly in \"{text}\").\n\/\/\n\/\/ The returned string will always be prefixed by the APIHost string.\nfunc (f *tmplFuncs) gatewayPath(r *httprule.Template, method *descriptor.MethodDescriptorProto) template.HTML {\n\tvar final string\npool:\n\tfor _, pathElem := range r.Pool {\n\t\tfor _, fieldName := range r.Fields {\n\t\t\tif pathElem != fieldName {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tu := fmt.Sprintf(`<a href=\"%s\">{%s}<\/a>`, f.urlToType(method.GetInputType()), pathElem)\n\t\t\tfinal = path.Join(final, u)\n\t\t\tcontinue pool\n\t\t}\n\t\tfinal = path.Join(final, pathElem)\n\t}\n\treturn template.HTML(f.apiHost + final)\n}\n\n\/\/ urlToType returns a URL to the documentation file for the given type. The\n\/\/ input type path can be either fully-qualified or not, regardless, the URL\n\/\/ returned will always have a fully-qualified hash.\n\/\/\n\/\/ TODO(slimsag): have the template pass in the relative type instead of nil,\n\/\/ so that relative symbol paths work.\nfunc (f *tmplFuncs) urlToType(symbolPath string) string {\n\tif !util.IsFullyQualified(symbolPath) {\n\t\tpanic(\"urlToType: not a fully-qualified symbol path\")\n\t}\n\n\t\/\/ Resolve the package path for the type.\n\tfile := util.NewResolver(f.protoFile).ResolveFile(symbolPath, nil)\n\tif file == nil {\n\t\treturn \"\"\n\t}\n\tpkgPath := file.GetName()\n\n\t\/\/ Remove the package prefix from types, for example:\n\t\/\/\n\t\/\/ pkg.html#.pkg.Type.SubType\n\t\/\/ ->\n\t\/\/ pkg.html#Type.SubType\n\t\/\/\n\ttypePath := util.TrimElem(symbolPath, util.CountElem(file.GetPackage()))\n\n\t\/\/ Prefix the absolute path with the root directory and swap the extension out\n\t\/\/ with the correct one.\n\tp := stripExt(pkgPath) + path.Ext(f.outputFile)\n\tp = path.Join(f.rootDir, p)\n\treturn fmt.Sprintf(\"%s#%s\", p, typePath)\n}\n\n\/\/ resolvePkgPath resolves the named protobuf package, returning its file path.\n\/\/\n\/\/ TODO(slimsag): This function assumes that the package (\"package foo;\") is\n\/\/ named identically to its file name (\"foo.proto\"). Protoc doesn't pass such\n\/\/ information to us because it hasn't parsed all the files yet -- we will most\n\/\/ likely have to scan for the package statement in these dependency files\n\/\/ ourselves.\nfunc (f *tmplFuncs) resolvePkgPath(pkg string) string {\n\t\/\/ Test this proto file itself:\n\tif stripExt(filepath.Base(*f.f.Name)) == pkg {\n\t\treturn *f.f.Name\n\t}\n\n\t\/\/ Test each dependency:\n\tfor _, p := range f.f.Dependency {\n\t\tif stripExt(filepath.Base(p)) == pkg {\n\t\t\treturn p\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ location returns the source code info location for the generic AST-like node\n\/\/ from the descriptor package.\nfunc (f *tmplFuncs) location(x interface{}) *descriptor.SourceCodeInfo_Location {\n\t\/\/ Validate that we got a sane type from the template.\n\tpkgPath := reflect.Indirect(reflect.ValueOf(x)).Type().PkgPath()\n\tif pkgPath != \"\" && pkgPath != \"github.com\/golang\/protobuf\/protoc-gen-go\/descriptor\" {\n\t\tpanic(\"expected descriptor type; got \" + fmt.Sprintf(\"%q\", pkgPath))\n\t}\n\n\t\/\/ If the location cache is empty; we build it now.\n\tif f.locCache == nil {\n\t\tfor _, loc := range f.f.SourceCodeInfo.Location {\n\t\t\tf.locCache = append(f.locCache, cacheItem{\n\t\t\t\tV: f.walkPath(loc.Path),\n\t\t\t\tL: loc,\n\t\t\t})\n\t\t}\n\t}\n\treturn f.findCachedItem(x)\n}\n\n\/\/ findCachedItem finds and returns a cached location for x.\nfunc (f *tmplFuncs) findCachedItem(x interface{}) *descriptor.SourceCodeInfo_Location {\n\tfor _, i := range f.locCache {\n\t\tif i.V == x {\n\t\t\treturn i.L\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ walkPath walks through the root node (the f.f file) descending down the path\n\/\/ until it is resolved, at which point the value is returned.\nfunc (f *tmplFuncs) walkPath(path []int32) interface{} {\n\tif len(path) == 0 {\n\t\treturn f.f\n\t}\n\tvar (\n\t\twalker func(id int, v interface{}) bool\n\t\tfound interface{}\n\t\ttarget = int(path[0])\n\t)\n\tpath = path[1:]\n\twalker = func(id int, v interface{}) bool {\n\t\tif id != target {\n\t\t\treturn true\n\t\t}\n\t\tif len(path) == 0 {\n\t\t\tfound = v\n\t\t\treturn false\n\t\t}\n\t\ttarget = int(path[0])\n\t\tpath = path[1:]\n\t\tf.protoFields(reflect.ValueOf(v), walker)\n\t\treturn false\n\t}\n\tf.protoFields(reflect.ValueOf(f.f), walker)\n\treturn found\n}\n\n\/\/ protoFields invokes fn with the protobuf tag ID and its in-memory Go value\n\/\/ given a descriptor node type. It stops invoking fn when it returns false.\nfunc (f *tmplFuncs) protoFields(node reflect.Value, fn func(id int, v interface{}) bool) {\n\tindirect := reflect.Indirect(node)\n\n\tswitch indirect.Kind() {\n\tcase reflect.Slice:\n\t\tfor i := 0; i < indirect.Len(); i++ {\n\t\t\tif !fn(i, indirect.Index(i).Interface()) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\tcase reflect.Struct:\n\t\t\/\/ Iterate each field.\n\t\tfor i := 0; i < indirect.NumField(); i++ {\n\t\t\t\/\/ Parse the protobuf tag for the ID, e.g. the 49 in:\n\t\t\t\/\/ \"bytes,49,opt,name=foo,def=hello!\"\n\t\t\ttag := indirect.Type().Field(i).Tag.Get(\"protobuf\")\n\t\t\tfields := strings.Split(tag, \",\")\n\t\t\tif len(fields) < 2 {\n\t\t\t\tcontinue \/\/ too few fields\n\t\t\t}\n\n\t\t\t\/\/ Parse the tag ID.\n\t\t\ttagID, err := strconv.Atoi(fields[1])\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !fn(tagID, indirect.Field(i).Interface()) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>tmpl: expose a simple slug creation function to templates<commit_after>package tmpl\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n\n\tgateway \"github.com\/gengo\/grpc-gateway\/protoc-gen-grpc-gateway\/descriptor\"\n\t\"github.com\/gengo\/grpc-gateway\/protoc-gen-grpc-gateway\/httprule\"\n\tdescriptor \"github.com\/golang\/protobuf\/protoc-gen-go\/descriptor\"\n\t\"sourcegraph.com\/sourcegraph\/prototools\/util\"\n)\n\n\/\/ unixPath takes a path, cleans it, and replaces any windows separators (\\\\)\n\/\/ with unix ones (\/). This is needed because plugin.CodeGeneratorResponse_File\n\/\/ is defined as having always unix path separators for the file name.\nfunc unixPath(s string) string {\n\ts = filepath.Clean(s)\n\ts = strings.Replace(s, \"\\\\\", \"\/\", -1)\n\n\t\/\/ Duplicate clean for trailing slashes that were previously windows ones.\n\treturn filepath.Clean(s)\n}\n\n\/\/ stripExt strips the extension off the path and returns it.\nfunc stripExt(s string) string {\n\text := filepath.Ext(s)\n\tif len(ext) > 0 {\n\t\treturn s[:len(s)-len(ext)]\n\t}\n\treturn s\n}\n\n\/\/ slug returns a simple slug string by making everything lowercase and\n\/\/ replacing anything no a unicode letter with a dash separator.\nfunc slug(s string) string {\n\ts = strings.ToLower(s)\n\tfields := strings.FieldsFunc(s, func(c rune) bool {\n\t\treturn !unicode.IsLetter(c)\n\t})\n\treturn strings.Join(fields, \"-\")\n}\n\nvar Preload = (&tmplFuncs{}).funcMap()\n\n\/\/ cacheItem is a single cache item with a value and a location -- effectively\n\/\/ it is just used for searching.\ntype cacheItem struct {\n\tV interface{}\n\tL *descriptor.SourceCodeInfo_Location\n}\n\n\/\/ Functions exposed to templates. The user of the package must first preload\n\/\/ the FuncMap above for these to be called properly (as they are actually\n\/\/ closures with context).\ntype tmplFuncs struct {\n\tf *descriptor.FileDescriptorProto\n\toutputFile, rootDir string\n\tprotoFile []*descriptor.FileDescriptorProto\n\tregistry *gateway.Registry\n\tapiHost string\n\n\tlocCache []cacheItem\n}\n\n\/\/ funcMap returns the function map for feeding into templates.\nfunc (f *tmplFuncs) funcMap() template.FuncMap {\n\treturn map[string]interface{}{\n\t\t\"cleanLabel\": f.cleanLabel,\n\t\t\"cleanType\": f.cleanType,\n\t\t\"fieldType\": f.fieldType,\n\t\t\"dict\": f.dict,\n\t\t\"ext\": filepath.Ext,\n\t\t\"dir\": func(s string) string {\n\t\t\tdir, _ := path.Split(s)\n\t\t\treturn dir\n\t\t},\n\t\t\"trimExt\": stripExt,\n\t\t\"slug\": slug,\n\t\t\"sub\": f.sub,\n\t\t\"filepath\": f.filepath,\n\t\t\"gatewayMethod\": f.gatewayMethod,\n\t\t\"gatewayPath\": f.gatewayPath,\n\t\t\"urlToType\": f.urlToType,\n\t\t\"jsonMessage\": f.jsonMessage,\n\t\t\"location\": f.location,\n\t\t\"AllMessages\": func(fixNames bool) []*descriptor.DescriptorProto {\n\t\t\treturn util.AllMessages(f.f, fixNames)\n\t\t},\n\t\t\"AllEnums\": func(fixNames bool) []*descriptor.EnumDescriptorProto {\n\t\t\treturn util.AllEnums(f.f, fixNames)\n\t\t},\n\t}\n}\n\n\/\/ cleanLabel returns the clean (i.e. human-readable \/ protobuf-style) version\n\/\/ of a label.\nfunc (f *tmplFuncs) cleanLabel(l *descriptor.FieldDescriptorProto_Label) string {\n\tswitch int32(*l) {\n\tcase 1:\n\t\treturn \"optional\"\n\tcase 2:\n\t\treturn \"required\"\n\tcase 3:\n\t\treturn \"repeated\"\n\tdefault:\n\t\tpanic(\"unknown label\")\n\t}\n}\n\n\/\/ cleanType returns the last part of a types name, i.e. for a fully-qualified\n\/\/ type \".foo.bar.baz\" it would return just \"baz\".\nfunc (f *tmplFuncs) cleanType(path string) string {\n\tsplit := strings.Split(path, \".\")\n\treturn split[len(split)-1]\n}\n\n\/\/ fieldType returns the clean (i.e. human-readable \/ protobuf-style) version\n\/\/ of a field type.\nfunc (f *tmplFuncs) fieldType(field *descriptor.FieldDescriptorProto) string {\n\tif field.TypeName != nil {\n\t\treturn f.cleanType(*field.TypeName)\n\t}\n\treturn util.FieldTypeName(field.Type)\n}\n\n\/\/ dict builds a map of paired items, allowing you to invoke a template with\n\/\/ multiple parameters.\nfunc (f *tmplFuncs) dict(pairs ...interface{}) (map[string]interface{}, error) {\n\tif len(pairs)%2 != 0 {\n\t\treturn nil, errors.New(\"expected pairs\")\n\t}\n\tm := make(map[string]interface{}, len(pairs)\/2)\n\tfor i := 0; i < len(pairs); i += 2 {\n\t\tm[pairs[i].(string)] = pairs[i+1]\n\t}\n\treturn m, nil\n}\n\n\/\/ sub performs simple x-y subtraction on integers.\nfunc (f *tmplFuncs) sub(x, y int) int { return x - y }\n\n\/\/ filepath returns the output filepath (prefixed by the root directory).\nfunc (f *tmplFuncs) filepath() string {\n\treturn path.Join(f.rootDir, f.outputFile)\n}\n\n\/\/ gatewayMethod returns the grpc-gateway method for a given service method.\nfunc (f *tmplFuncs) gatewayMethod(target *descriptor.MethodDescriptorProto) (*gateway.Method, error) {\n\tfile, err := f.registry.LookupFile(f.f.GetName())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, s := range file.Services {\n\t\tfor _, m := range s.Methods {\n\t\t\tif m.MethodDescriptorProto == target {\n\t\t\t\treturn m, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, nil\n}\n\n\/\/ gatewayPath renders the given grpc-gateway HTTP rule template (i.e. the HTTP\n\/\/ route to be bound). The method parameter is used to insert a link to the\n\/\/ method type for any HTTP fields (which will be marked clearly in \"{text}\").\n\/\/\n\/\/ The returned string will always be prefixed by the APIHost string.\nfunc (f *tmplFuncs) gatewayPath(r *httprule.Template, method *descriptor.MethodDescriptorProto) template.HTML {\n\tvar final string\npool:\n\tfor _, pathElem := range r.Pool {\n\t\tfor _, fieldName := range r.Fields {\n\t\t\tif pathElem != fieldName {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tu := fmt.Sprintf(`<a href=\"%s\">{%s}<\/a>`, f.urlToType(method.GetInputType()), pathElem)\n\t\t\tfinal = path.Join(final, u)\n\t\t\tcontinue pool\n\t\t}\n\t\tfinal = path.Join(final, pathElem)\n\t}\n\treturn template.HTML(f.apiHost + final)\n}\n\n\/\/ urlToType returns a URL to the documentation file for the given type. The\n\/\/ input type path can be either fully-qualified or not, regardless, the URL\n\/\/ returned will always have a fully-qualified hash.\n\/\/\n\/\/ TODO(slimsag): have the template pass in the relative type instead of nil,\n\/\/ so that relative symbol paths work.\nfunc (f *tmplFuncs) urlToType(symbolPath string) string {\n\tif !util.IsFullyQualified(symbolPath) {\n\t\tpanic(\"urlToType: not a fully-qualified symbol path\")\n\t}\n\n\t\/\/ Resolve the package path for the type.\n\tfile := util.NewResolver(f.protoFile).ResolveFile(symbolPath, nil)\n\tif file == nil {\n\t\treturn \"\"\n\t}\n\tpkgPath := file.GetName()\n\n\t\/\/ Remove the package prefix from types, for example:\n\t\/\/\n\t\/\/ pkg.html#.pkg.Type.SubType\n\t\/\/ ->\n\t\/\/ pkg.html#Type.SubType\n\t\/\/\n\ttypePath := util.TrimElem(symbolPath, util.CountElem(file.GetPackage()))\n\n\t\/\/ Prefix the absolute path with the root directory and swap the extension out\n\t\/\/ with the correct one.\n\tp := stripExt(pkgPath) + path.Ext(f.outputFile)\n\tp = path.Join(f.rootDir, p)\n\treturn fmt.Sprintf(\"%s#%s\", p, typePath)\n}\n\n\/\/ resolvePkgPath resolves the named protobuf package, returning its file path.\n\/\/\n\/\/ TODO(slimsag): This function assumes that the package (\"package foo;\") is\n\/\/ named identically to its file name (\"foo.proto\"). Protoc doesn't pass such\n\/\/ information to us because it hasn't parsed all the files yet -- we will most\n\/\/ likely have to scan for the package statement in these dependency files\n\/\/ ourselves.\nfunc (f *tmplFuncs) resolvePkgPath(pkg string) string {\n\t\/\/ Test this proto file itself:\n\tif stripExt(filepath.Base(*f.f.Name)) == pkg {\n\t\treturn *f.f.Name\n\t}\n\n\t\/\/ Test each dependency:\n\tfor _, p := range f.f.Dependency {\n\t\tif stripExt(filepath.Base(p)) == pkg {\n\t\t\treturn p\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ location returns the source code info location for the generic AST-like node\n\/\/ from the descriptor package.\nfunc (f *tmplFuncs) location(x interface{}) *descriptor.SourceCodeInfo_Location {\n\t\/\/ Validate that we got a sane type from the template.\n\tpkgPath := reflect.Indirect(reflect.ValueOf(x)).Type().PkgPath()\n\tif pkgPath != \"\" && pkgPath != \"github.com\/golang\/protobuf\/protoc-gen-go\/descriptor\" {\n\t\tpanic(\"expected descriptor type; got \" + fmt.Sprintf(\"%q\", pkgPath))\n\t}\n\n\t\/\/ If the location cache is empty; we build it now.\n\tif f.locCache == nil {\n\t\tfor _, loc := range f.f.SourceCodeInfo.Location {\n\t\t\tf.locCache = append(f.locCache, cacheItem{\n\t\t\t\tV: f.walkPath(loc.Path),\n\t\t\t\tL: loc,\n\t\t\t})\n\t\t}\n\t}\n\treturn f.findCachedItem(x)\n}\n\n\/\/ findCachedItem finds and returns a cached location for x.\nfunc (f *tmplFuncs) findCachedItem(x interface{}) *descriptor.SourceCodeInfo_Location {\n\tfor _, i := range f.locCache {\n\t\tif i.V == x {\n\t\t\treturn i.L\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ walkPath walks through the root node (the f.f file) descending down the path\n\/\/ until it is resolved, at which point the value is returned.\nfunc (f *tmplFuncs) walkPath(path []int32) interface{} {\n\tif len(path) == 0 {\n\t\treturn f.f\n\t}\n\tvar (\n\t\twalker func(id int, v interface{}) bool\n\t\tfound interface{}\n\t\ttarget = int(path[0])\n\t)\n\tpath = path[1:]\n\twalker = func(id int, v interface{}) bool {\n\t\tif id != target {\n\t\t\treturn true\n\t\t}\n\t\tif len(path) == 0 {\n\t\t\tfound = v\n\t\t\treturn false\n\t\t}\n\t\ttarget = int(path[0])\n\t\tpath = path[1:]\n\t\tf.protoFields(reflect.ValueOf(v), walker)\n\t\treturn false\n\t}\n\tf.protoFields(reflect.ValueOf(f.f), walker)\n\treturn found\n}\n\n\/\/ protoFields invokes fn with the protobuf tag ID and its in-memory Go value\n\/\/ given a descriptor node type. It stops invoking fn when it returns false.\nfunc (f *tmplFuncs) protoFields(node reflect.Value, fn func(id int, v interface{}) bool) {\n\tindirect := reflect.Indirect(node)\n\n\tswitch indirect.Kind() {\n\tcase reflect.Slice:\n\t\tfor i := 0; i < indirect.Len(); i++ {\n\t\t\tif !fn(i, indirect.Index(i).Interface()) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\tcase reflect.Struct:\n\t\t\/\/ Iterate each field.\n\t\tfor i := 0; i < indirect.NumField(); i++ {\n\t\t\t\/\/ Parse the protobuf tag for the ID, e.g. the 49 in:\n\t\t\t\/\/ \"bytes,49,opt,name=foo,def=hello!\"\n\t\t\ttag := indirect.Type().Field(i).Tag.Get(\"protobuf\")\n\t\t\tfields := strings.Split(tag, \",\")\n\t\t\tif len(fields) < 2 {\n\t\t\t\tcontinue \/\/ too few fields\n\t\t\t}\n\n\t\t\t\/\/ Parse the tag ID.\n\t\t\ttagID, err := strconv.Atoi(fields[1])\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !fn(tagID, indirect.Field(i).Interface()) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package k8s\n\nimport (\n\t\"testing\"\n\n\tktesting \"k8s.io\/client-go\/testing\"\n\n\tmeta_v1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/client-go\/kubernetes\/fake\"\n\t\"k8s.io\/client-go\/pkg\/api\/v1\"\n\t\"k8s.io\/client-go\/pkg\/apis\/apps\/v1beta1\"\n)\n\nfunc mockUnmountedK8sClient() *fake.Clientset {\n\tk8sMock := &fake.Clientset{}\n\tk8sMock.AddReactor(\"list\", \"secrets\", func(a ktesting.Action) (bool, runtime.Object, error) {\n\t\treturn true, &v1.SecretList{\n\t\t\tItems: []v1.Secret{\n\t\t\t\t{\n\t\t\t\t\tData: map[string][]byte{\n\t\t\t\t\t\t\"name\": []byte(\"test-service\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tData: map[string][]byte{\n\t\t\t\t\t\t\"now\": []byte(\"something\"),\n\t\t\t\t\t\t\"completely\": []byte(\"different\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t})\n\tk8sMock.AddReactor(\"get\", \"deployments\", func(action ktesting.Action) (bool, runtime.Object, error) {\n\t\treturn true, &v1beta1.Deployment{\n\t\t\tSpec: v1beta1.DeploymentSpec{\n\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\tVolumes: []v1.Volume{},\n\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"test-service\",\n\t\t\t\t\t\t\t\tVolumeMounts: []v1.VolumeMount{},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t})\n\tk8sMock.AddReactor(\"get\", \"secrets\", func(action ktesting.Action) (bool, runtime.Object, error) {\n\t\treturn true, &v1.Secret{\n\t\t\tObjectMeta: meta_v1.ObjectMeta{\n\t\t\t\tName: \"test-secret\",\n\t\t\t},\n\t\t\tData: map[string][]byte{},\n\t\t}, nil\n\t})\n\tk8sMock.AddReactor(\"Update\", \"deployments\", func(action ktesting.Action) (bool, runtime.Object, error) {\n\t\treturn true, &v1beta1.Deployment{\n\t\t\tSpec: v1beta1.DeploymentSpec{\n\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\tVolumes: []v1.Volume{},\n\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"fh-sync-server\",\n\t\t\t\t\t\t\t\tVolumeMounts: []v1.VolumeMount{},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t})\n\treturn k8sMock\n}\n\nfunc mockMountedK8sClient() *fake.Clientset {\n\tk8sMock := &fake.Clientset{}\n\tk8sMock.AddReactor(\"list\", \"secrets\", func(a ktesting.Action) (bool, runtime.Object, error) {\n\t\treturn true, &v1.SecretList{\n\t\t\tItems: []v1.Secret{\n\t\t\t\t{\n\t\t\t\t\tData: map[string][]byte{\n\t\t\t\t\t\t\"name\": []byte(\"test-service\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tData: map[string][]byte{\n\t\t\t\t\t\t\"now\": []byte(\"something\"),\n\t\t\t\t\t\t\"completely\": []byte(\"different\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t})\n\tk8sMock.AddReactor(\"get\", \"deployments\", func(action ktesting.Action) (bool, runtime.Object, error) {\n\t\treturn true, &v1beta1.Deployment{\n\t\t\tSpec: v1beta1.DeploymentSpec{\n\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\tVolumes: []v1.Volume{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"test-secret\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"test-service\",\n\t\t\t\t\t\t\t\tVolumeMounts: []v1.VolumeMount{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"test-secret\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t})\n\tk8sMock.AddReactor(\"get\", \"secrets\", func(action ktesting.Action) (bool, runtime.Object, error) {\n\t\treturn true, &v1.Secret{\n\t\t\tObjectMeta: meta_v1.ObjectMeta{\n\t\t\t\tName: \"test-secret\",\n\t\t\t},\n\t\t\tData: map[string][]byte{},\n\t\t}, nil\n\t})\n\tk8sMock.AddReactor(\"Update\", \"deployments\", func(action ktesting.Action) (bool, runtime.Object, error) {\n\t\treturn true, &v1beta1.Deployment{\n\t\t\tSpec: v1beta1.DeploymentSpec{\n\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\tVolumes: []v1.Volume{},\n\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"fh-sync-server\",\n\t\t\t\t\t\t\t\tVolumeMounts: []v1.VolumeMount{},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t})\n\treturn k8sMock\n}\n\nfunc TestMount(t *testing.T) {\n\tcases := []struct {\n\t\tName string\n\t\tK8sClient func() *fake.Clientset\n\t\tNamespace string\n\t\tService string\n\t\tSecret string\n\t\tValidate func(t *testing.T, mountRes error)\n\t}{\n\t\t{\n\t\t\tName: \"Valid mount request\",\n\t\t\tK8sClient: mockUnmountedK8sClient,\n\t\t\tNamespace: \"test-namespace\",\n\t\t\tService: \"test-service\",\n\t\t\tSecret: \"test-secret\",\n\t\t\tValidate: func(t *testing.T, mountRes error) {\n\t\t\t\tif mountRes != nil {\n\t\t\t\t\tt.Fatalf(\"Did not expect error mounting in testCase: Valid mount request, got: %v\", mountRes)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Bad service name to mount request\",\n\t\t\tK8sClient: mockUnmountedK8sClient,\n\t\t\tNamespace: \"test-namespace\",\n\t\t\tService: \"test-bad-service\",\n\t\t\tSecret: \"test-secret\",\n\t\t\tValidate: func(t *testing.T, mountRes error) {\n\t\t\t\tif mountRes == nil {\n\t\t\t\t\tt.Fatalf(\"expected error when providing bad clientService name, but got none\")\n\t\t\t\t}\n\t\t\t\tif mountRes.Error() != \"k8s.mm.Mount -> could not find container in deployment with name: test-bad-service\" {\n\t\t\t\t\tt.Fatalf(\"Expected 'k8s.mm.Mount -> could not find container in deployment with name: test-bad-service' but got: %s\", mountRes.Error())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Bad secret name to mount request\",\n\t\t\tK8sClient: mockUnmountedK8sClient,\n\t\t\tNamespace: \"test-namespace\",\n\t\t\tService: \"test-service\",\n\t\t\tSecret: \"test-bad-secret\",\n\t\t\tValidate: func(t *testing.T, mountRes error) {\n\t\t\t\tif mountRes == nil {\n\t\t\t\t\tt.Fatalf(\"expected error when providing bad secret name, but got none\")\n\t\t\t\t}\n\t\t\t\tif mountRes.Error() != \"k8s.mm.Mount -> could not find secret: test-bad-secret\" {\n\t\t\t\t\tt.Fatalf(\"Expected 'k8s.mm.Mount -> could not find secret: test-bad-secret' but got: '%s'\", mountRes.Error())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, testCase := range cases {\n\t\tmb := NewMounterBuilder(testCase.Namespace).WithK8s(testCase.K8sClient()).Build()\n\t\terr := mb.Mount(testCase.Secret, testCase.Service)\n\t\ttestCase.Validate(t, err)\n\t}\n}\n\nfunc TestUnmount(t *testing.T) {\n\tcases := []struct {\n\t\tName string\n\t\tK8sClient func() *fake.Clientset\n\t\tNamespace string\n\t\tService string\n\t\tSecret string\n\t\tValidate func(t *testing.T, unmountRes error)\n\t}{\n\t\t{\n\t\t\tName: \"Valid unmount request\",\n\t\t\tK8sClient: mockMountedK8sClient,\n\t\t\tNamespace: \"test-namespace\",\n\t\t\tService: \"test-service\",\n\t\t\tSecret: \"test-secret\",\n\t\t\tValidate: func(t *testing.T, unmountRes error) {\n\t\t\t\tif unmountRes != nil {\n\t\t\t\t\tt.Fatalf(\"Did not expect error mounting in testCase: Valid mount request, got: %v\", unmountRes)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Bad service name to mount request\",\n\t\t\tK8sClient: mockMountedK8sClient,\n\t\t\tNamespace: \"test-namespace\",\n\t\t\tService: \"test-bad-service\",\n\t\t\tSecret: \"test-secret\",\n\t\t\tValidate: func(t *testing.T, unmountRes error) {\n\t\t\t\tif unmountRes == nil {\n\t\t\t\t\tt.Fatalf(\"expected error when providing bad clientService name, but got none\")\n\t\t\t\t}\n\t\t\t\tif unmountRes.Error() != \"k8s.mm.Mount -> could not find container in deployment with name: test-bad-service\" {\n\t\t\t\t\tt.Fatalf(\"Expected 'k8s.mm.Mount -> could not find container in deployment with name: test-bad-service' but got: %s\", unmountRes.Error())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Bad secret name to unmount request\",\n\t\t\tK8sClient: mockMountedK8sClient,\n\t\t\tNamespace: \"test-namespace\",\n\t\t\tService: \"test-service\",\n\t\t\tSecret: \"test-bad-secret\",\n\t\t\tValidate: func(t *testing.T, unmountRes error) {\n\t\t\t\tif unmountRes == nil {\n\t\t\t\t\tt.Fatalf(\"expected error when providing bad secret name, but got none\")\n\t\t\t\t}\n\t\t\t\tif unmountRes.Error() != \"k8s.mm.Unmount -> could not find secret: test-bad-secret\" {\n\t\t\t\t\tt.Fatalf(\"Expected 'k8s.mm.Unmount -> could not find secret: test-bad-secret' but got: '%s'\", unmountRes.Error())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, testCase := range cases {\n\t\tmb := NewMounterBuilder(testCase.Namespace).WithK8s(testCase.K8sClient()).Build()\n\t\terr := mb.Unmount(testCase.Secret, testCase.Service)\n\t\ttestCase.Validate(t, err)\n\t}\n}\n<commit_msg>reduce duplication in test<commit_after>package k8s\n\nimport (\n\t\"testing\"\n\n\tktesting \"k8s.io\/client-go\/testing\"\n\n\tmeta_v1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/client-go\/kubernetes\/fake\"\n\t\"k8s.io\/client-go\/pkg\/api\/v1\"\n\t\"k8s.io\/client-go\/pkg\/apis\/apps\/v1beta1\"\n)\n\nfunc mockK8sClient() *fake.Clientset {\n\tk8sMock := &fake.Clientset{}\n\tk8sMock.AddReactor(\"list\", \"secrets\", func(a ktesting.Action) (bool, runtime.Object, error) {\n\t\treturn true, &v1.SecretList{\n\t\t\tItems: []v1.Secret{\n\t\t\t\t{\n\t\t\t\t\tData: map[string][]byte{\n\t\t\t\t\t\t\"name\": []byte(\"test-service\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tData: map[string][]byte{\n\t\t\t\t\t\t\"now\": []byte(\"something\"),\n\t\t\t\t\t\t\"completely\": []byte(\"different\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t})\n\tk8sMock.AddReactor(\"get\", \"secrets\", func(action ktesting.Action) (bool, runtime.Object, error) {\n\t\treturn true, &v1.Secret{\n\t\t\tObjectMeta: meta_v1.ObjectMeta{\n\t\t\t\tName: \"test-secret\",\n\t\t\t},\n\t\t\tData: map[string][]byte{},\n\t\t}, nil\n\t})\n\tk8sMock.AddReactor(\"Update\", \"deployments\", func(action ktesting.Action) (bool, runtime.Object, error) {\n\t\treturn true, &v1beta1.Deployment{\n\t\t\tSpec: v1beta1.DeploymentSpec{\n\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\tVolumes: []v1.Volume{},\n\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"fh-sync-server\",\n\t\t\t\t\t\t\t\tVolumeMounts: []v1.VolumeMount{},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t})\n\treturn k8sMock\n}\n\nfunc mockUnmountedK8sClient() *fake.Clientset {\n\tk8sMock := mockK8sClient()\n\tk8sMock.AddReactor(\"get\", \"deployments\", func(action ktesting.Action) (bool, runtime.Object, error) {\n\t\treturn true, &v1beta1.Deployment{\n\t\t\tSpec: v1beta1.DeploymentSpec{\n\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\tVolumes: []v1.Volume{},\n\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"test-service\",\n\t\t\t\t\t\t\t\tVolumeMounts: []v1.VolumeMount{},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t})\n\n\treturn k8sMock\n}\n\nfunc mockMountedK8sClient() *fake.Clientset {\n\tk8sMock := mockK8sClient()\n\tk8sMock.AddReactor(\"get\", \"deployments\", func(action ktesting.Action) (bool, runtime.Object, error) {\n\t\treturn true, &v1beta1.Deployment{\n\t\t\tSpec: v1beta1.DeploymentSpec{\n\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\tVolumes: []v1.Volume{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"test-secret\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"test-service\",\n\t\t\t\t\t\t\t\tVolumeMounts: []v1.VolumeMount{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"test-secret\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t})\n\treturn k8sMock\n}\n\nfunc TestMount(t *testing.T) {\n\tcases := []struct {\n\t\tName string\n\t\tK8sClient func() *fake.Clientset\n\t\tNamespace string\n\t\tService string\n\t\tSecret string\n\t\tValidate func(t *testing.T, mountRes error)\n\t}{\n\t\t{\n\t\t\tName: \"Valid mount request\",\n\t\t\tK8sClient: mockUnmountedK8sClient,\n\t\t\tNamespace: \"test-namespace\",\n\t\t\tService: \"test-service\",\n\t\t\tSecret: \"test-secret\",\n\t\t\tValidate: func(t *testing.T, mountRes error) {\n\t\t\t\tif mountRes != nil {\n\t\t\t\t\tt.Fatalf(\"Did not expect error mounting in testCase: Valid mount request, got: %v\", mountRes)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Bad service name to mount request\",\n\t\t\tK8sClient: mockUnmountedK8sClient,\n\t\t\tNamespace: \"test-namespace\",\n\t\t\tService: \"test-bad-service\",\n\t\t\tSecret: \"test-secret\",\n\t\t\tValidate: func(t *testing.T, mountRes error) {\n\t\t\t\tif mountRes == nil {\n\t\t\t\t\tt.Fatalf(\"expected error when providing bad clientService name, but got none\")\n\t\t\t\t}\n\t\t\t\tif mountRes.Error() != \"k8s.mm.Mount -> could not find container in deployment with name: test-bad-service\" {\n\t\t\t\t\tt.Fatalf(\"Expected 'k8s.mm.Mount -> could not find container in deployment with name: test-bad-service' but got: %s\", mountRes.Error())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Bad secret name to mount request\",\n\t\t\tK8sClient: mockUnmountedK8sClient,\n\t\t\tNamespace: \"test-namespace\",\n\t\t\tService: \"test-service\",\n\t\t\tSecret: \"test-bad-secret\",\n\t\t\tValidate: func(t *testing.T, mountRes error) {\n\t\t\t\tif mountRes == nil {\n\t\t\t\t\tt.Fatalf(\"expected error when providing bad secret name, but got none\")\n\t\t\t\t}\n\t\t\t\tif mountRes.Error() != \"k8s.mm.Mount -> could not find secret: test-bad-secret\" {\n\t\t\t\t\tt.Fatalf(\"Expected 'k8s.mm.Mount -> could not find secret: test-bad-secret' but got: '%s'\", mountRes.Error())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, testCase := range cases {\n\t\tmb := NewMounterBuilder(testCase.Namespace).WithK8s(testCase.K8sClient()).Build()\n\t\terr := mb.Mount(testCase.Secret, testCase.Service)\n\t\ttestCase.Validate(t, err)\n\t}\n}\n\nfunc TestUnmount(t *testing.T) {\n\tcases := []struct {\n\t\tName string\n\t\tK8sClient func() *fake.Clientset\n\t\tNamespace string\n\t\tService string\n\t\tSecret string\n\t\tValidate func(t *testing.T, unmountRes error)\n\t}{\n\t\t{\n\t\t\tName: \"Valid unmount request\",\n\t\t\tK8sClient: mockMountedK8sClient,\n\t\t\tNamespace: \"test-namespace\",\n\t\t\tService: \"test-service\",\n\t\t\tSecret: \"test-secret\",\n\t\t\tValidate: func(t *testing.T, unmountRes error) {\n\t\t\t\tif unmountRes != nil {\n\t\t\t\t\tt.Fatalf(\"Did not expect error mounting in testCase: Valid mount request, got: %v\", unmountRes)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Bad service name to mount request\",\n\t\t\tK8sClient: mockMountedK8sClient,\n\t\t\tNamespace: \"test-namespace\",\n\t\t\tService: \"test-bad-service\",\n\t\t\tSecret: \"test-secret\",\n\t\t\tValidate: func(t *testing.T, unmountRes error) {\n\t\t\t\tif unmountRes == nil {\n\t\t\t\t\tt.Fatalf(\"expected error when providing bad clientService name, but got none\")\n\t\t\t\t}\n\t\t\t\tif unmountRes.Error() != \"k8s.mm.Mount -> could not find container in deployment with name: test-bad-service\" {\n\t\t\t\t\tt.Fatalf(\"Expected 'k8s.mm.Mount -> could not find container in deployment with name: test-bad-service' but got: %s\", unmountRes.Error())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Bad secret name to unmount request\",\n\t\t\tK8sClient: mockMountedK8sClient,\n\t\t\tNamespace: \"test-namespace\",\n\t\t\tService: \"test-service\",\n\t\t\tSecret: \"test-bad-secret\",\n\t\t\tValidate: func(t *testing.T, unmountRes error) {\n\t\t\t\tif unmountRes == nil {\n\t\t\t\t\tt.Fatalf(\"expected error when providing bad secret name, but got none\")\n\t\t\t\t}\n\t\t\t\tif unmountRes.Error() != \"k8s.mm.Unmount -> could not find secret: test-bad-secret\" {\n\t\t\t\t\tt.Fatalf(\"Expected 'k8s.mm.Unmount -> could not find secret: test-bad-secret' but got: '%s'\", unmountRes.Error())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, testCase := range cases {\n\t\tmb := NewMounterBuilder(testCase.Namespace).WithK8s(testCase.K8sClient()).Build()\n\t\terr := mb.Unmount(testCase.Secret, testCase.Service)\n\t\ttestCase.Validate(t, err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package opentracing\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/hellofresh\/gcloud-opentracing\"\n\t\"github.com\/hellofresh\/janus\/pkg\/config\"\n\t\"github.com\/hellofresh\/janus\/pkg\/errors\"\n\t\"github.com\/opentracing\/opentracing-go\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\tjaegercfg \"github.com\/uber\/jaeger-client-go\/config\"\n\t\"github.com\/uber\/jaeger-lib\/metrics\"\n)\n\nconst (\n\tgcloudTracing = \"googleCloud\"\n\tjaegerTracing = \"jaeger\"\n)\n\n\/\/ Tracing is the tracing functionality\ntype Tracing struct {\n\tconfig config.Tracing\n\ttracer opentracing.Tracer\n\tcloser io.Closer\n}\n\ntype noopCloser struct{}\n\nfunc (n noopCloser) Close() error { return nil }\n\n\/\/ New creates a new instance of Tracing\nfunc New(config config.Tracing) *Tracing {\n\treturn &Tracing{config: config}\n}\n\n\/\/ Setup a tracer based on the configuration provided\nfunc (t *Tracing) Setup() {\n\tvar err error\n\n\tlog.Debug(\"Initializing distributed tracing\")\n\tswitch t.config.Provider {\n\tcase gcloudTracing:\n\t\tlog.Debug(\"Using google cloud platform (stackdriver trace) as tracing system\")\n\t\tt.tracer, t.closer, err = t.buildGCloud(t.config.GoogleCloudTracing)\n\tcase jaegerTracing:\n\t\tlog.Debug(\"Using Jaeger as tracing system\")\n\t\tt.tracer, t.closer, err = t.buildJaeger(t.config.ServiceName, t.config.JaegerTracing)\n\tdefault:\n\t\tlog.Debug(\"No tracer selected\")\n\t\tt.tracer, t.closer, err = &opentracing.NoopTracer{}, noopCloser{}, nil\n\t}\n\n\tif err != nil {\n\t\tlog.WithError(err).WithField(\"provider\", t.config.Provider).Warnf(\"Could not initialize tracing\")\n\t\treturn\n\t}\n\n\topentracing.SetGlobalTracer(t.tracer)\n}\n\n\/\/ Close tracer\nfunc (t *Tracing) Close() {\n\tif t.closer != nil {\n\t\tt.closer.Close()\n\t}\n}\n\nfunc (t *Tracing) buildGCloud(config config.GoogleCloudTracing) (opentracing.Tracer, io.Closer, error) {\n\ttracer, err := gcloudtracer.NewTracer(\n\t\tcontext.Background(),\n\t\tgcloudtracer.WithLogger(log.StandardLogger()),\n\t\tgcloudtracer.WithProject(config.ProjectID),\n\t\tgcloudtracer.WithJWTCredentials(gcloudtracer.JWTCredentials{\n\t\t\tEmail: config.Email,\n\t\t\tPrivateKey: []byte(config.PrivateKey),\n\t\t\tPrivateKeyID: config.PrivateKeyID,\n\t\t}),\n\t)\n\n\treturn tracer, noopCloser{}, err\n}\n\nfunc (t *Tracing) buildJaeger(componentName string, c config.JaegerTracing) (opentracing.Tracer, io.Closer, error) {\n\tbufferFLushInterval, err := time.ParseDuration(c.BufferFlushInterval)\n\tif err != nil {\n\t\treturn nil, noopCloser{}, errors.Wrap(err, \"could not parse buffer flush interval for jaeger\")\n\t}\n\n\tcfg := jaegercfg.Configuration{\n\t\tSampler: &jaegercfg.SamplerConfig{\n\t\t\tType: c.SamplingType,\n\t\t\tParam: c.SamplingParam,\n\t\t},\n\t\tReporter: &jaegercfg.ReporterConfig{\n\t\t\tLogSpans: c.LogSpans,\n\t\t\tBufferFlushInterval: bufferFLushInterval,\n\t\t\tLocalAgentHostPort: c.SamplingServerURL,\n\t\t\tQueueSize: c.QueueSize,\n\t\t},\n\t}\n\n\treturn cfg.New(\n\t\tcomponentName,\n\t\tjaegercfg.Logger(jaegerLoggerAdapter{log.StandardLogger()}),\n\t\tjaegercfg.Metrics(metrics.NullFactory),\n\t)\n}\n\n\/\/ FromContext creates a span from a context that contains a parent span\nfunc FromContext(ctx context.Context, name string) opentracing.Span {\n\tspan, _ := opentracing.StartSpanFromContext(ctx, name)\n\treturn span\n}\n\n\/\/ ToContext sets a span to a context\nfunc ToContext(r *http.Request, span opentracing.Span) *http.Request {\n\treturn r.WithContext(opentracing.ContextWithSpan(r.Context(), span))\n}\n<commit_msg>Fixed typo<commit_after>package opentracing\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/hellofresh\/gcloud-opentracing\"\n\t\"github.com\/hellofresh\/janus\/pkg\/config\"\n\t\"github.com\/hellofresh\/janus\/pkg\/errors\"\n\t\"github.com\/opentracing\/opentracing-go\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\tjaegercfg \"github.com\/uber\/jaeger-client-go\/config\"\n\t\"github.com\/uber\/jaeger-lib\/metrics\"\n)\n\nconst (\n\tgcloudTracing = \"googleCloud\"\n\tjaegerTracing = \"jaeger\"\n)\n\n\/\/ Tracing is the tracing functionality\ntype Tracing struct {\n\tconfig config.Tracing\n\ttracer opentracing.Tracer\n\tcloser io.Closer\n}\n\ntype noopCloser struct{}\n\nfunc (n noopCloser) Close() error { return nil }\n\n\/\/ New creates a new instance of Tracing\nfunc New(config config.Tracing) *Tracing {\n\treturn &Tracing{config: config}\n}\n\n\/\/ Setup a tracer based on the configuration provided\nfunc (t *Tracing) Setup() {\n\tvar err error\n\n\tlog.Debug(\"Initializing distributed tracing\")\n\tswitch t.config.Provider {\n\tcase gcloudTracing:\n\t\tlog.Debug(\"Using google cloud platform (stackdriver trace) as tracing system\")\n\t\tt.tracer, t.closer, err = t.buildGCloud(t.config.GoogleCloudTracing)\n\tcase jaegerTracing:\n\t\tlog.Debug(\"Using Jaeger as tracing system\")\n\t\tt.tracer, t.closer, err = t.buildJaeger(t.config.ServiceName, t.config.JaegerTracing)\n\tdefault:\n\t\tlog.Debug(\"No tracer selected\")\n\t\tt.tracer, t.closer, err = &opentracing.NoopTracer{}, noopCloser{}, nil\n\t}\n\n\tif err != nil {\n\t\tlog.WithError(err).WithField(\"provider\", t.config.Provider).Warn(\"Could not initialize tracing\")\n\t\treturn\n\t}\n\n\topentracing.SetGlobalTracer(t.tracer)\n}\n\n\/\/ Close tracer\nfunc (t *Tracing) Close() {\n\tif t.closer != nil {\n\t\tt.closer.Close()\n\t}\n}\n\nfunc (t *Tracing) buildGCloud(config config.GoogleCloudTracing) (opentracing.Tracer, io.Closer, error) {\n\ttracer, err := gcloudtracer.NewTracer(\n\t\tcontext.Background(),\n\t\tgcloudtracer.WithLogger(log.StandardLogger()),\n\t\tgcloudtracer.WithProject(config.ProjectID),\n\t\tgcloudtracer.WithJWTCredentials(gcloudtracer.JWTCredentials{\n\t\t\tEmail: config.Email,\n\t\t\tPrivateKey: []byte(config.PrivateKey),\n\t\t\tPrivateKeyID: config.PrivateKeyID,\n\t\t}),\n\t)\n\n\treturn tracer, noopCloser{}, err\n}\n\nfunc (t *Tracing) buildJaeger(componentName string, c config.JaegerTracing) (opentracing.Tracer, io.Closer, error) {\n\tbufferFLushInterval, err := time.ParseDuration(c.BufferFlushInterval)\n\tif err != nil {\n\t\treturn nil, noopCloser{}, errors.Wrap(err, \"could not parse buffer flush interval for jaeger\")\n\t}\n\n\tcfg := jaegercfg.Configuration{\n\t\tSampler: &jaegercfg.SamplerConfig{\n\t\t\tType: c.SamplingType,\n\t\t\tParam: c.SamplingParam,\n\t\t},\n\t\tReporter: &jaegercfg.ReporterConfig{\n\t\t\tLogSpans: c.LogSpans,\n\t\t\tBufferFlushInterval: bufferFLushInterval,\n\t\t\tLocalAgentHostPort: c.SamplingServerURL,\n\t\t\tQueueSize: c.QueueSize,\n\t\t},\n\t}\n\n\treturn cfg.New(\n\t\tcomponentName,\n\t\tjaegercfg.Logger(jaegerLoggerAdapter{log.StandardLogger()}),\n\t\tjaegercfg.Metrics(metrics.NullFactory),\n\t)\n}\n\n\/\/ FromContext creates a span from a context that contains a parent span\nfunc FromContext(ctx context.Context, name string) opentracing.Span {\n\tspan, _ := opentracing.StartSpanFromContext(ctx, name)\n\treturn span\n}\n\n\/\/ ToContext sets a span to a context\nfunc ToContext(r *http.Request, span opentracing.Span) *http.Request {\n\treturn r.WithContext(opentracing.ContextWithSpan(r.Context(), span))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Wandoujia Inc. All Rights Reserved.\n\/\/ Licensed under the MIT (MIT-LICENSE.txt) license.\n\npackage router\n<commit_msg>Remove, useless slots_test.go<commit_after><|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2021 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage v2\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\n\t\/\/nolint:golint,staticcheck\n\t\"github.com\/golang\/protobuf\/jsonpb\"\n\t\"github.com\/golang\/protobuf\/ptypes\"\n\t\"github.com\/golang\/protobuf\/ptypes\/timestamp\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/constants\"\n\tsErrors \"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/errors\"\n\tproto \"github.com\/GoogleContainerTools\/skaffold\/proto\/v2\"\n)\n\nconst (\n\tNotStarted = \"NotStarted\"\n\tInProgress = \"InProgress\"\n\tComplete = \"Complete\"\n\tFailed = \"Failed\"\n\tInfo = \"Information\"\n\tStarted = \"Started\"\n\tSucceeded = \"Succeeded\"\n\tTerminated = \"Terminated\"\n\tCanceled = \"Canceled\"\n)\n\nvar handler = newHandler()\n\nfunc newHandler() *eventHandler {\n\th := &eventHandler{\n\t\teventChan: make(chan *proto.Event),\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tev, open := <-h.eventChan\n\t\t\tif !open {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\th.handleExec(ev)\n\t\t}\n\t}()\n\treturn h\n}\n\ntype eventHandler struct {\n\teventLog []proto.Event\n\tlogLock sync.Mutex\n\tapplicationLogs []proto.Event\n\tapplicationLogsLock sync.Mutex\n\tskaffoldLogs []proto.Event\n\tskaffoldLogsLock sync.Mutex\n\tcfg Config\n\n\titeration int\n\tstate proto.State\n\tstateLock sync.Mutex\n\teventChan chan *proto.Event\n\teventListeners []*listener\n\tapplicationLogListeners []*listener\n\tskaffoldLogListeners []*listener\n}\n\ntype listener struct {\n\tcallback func(*proto.Event) error\n\terrors chan error\n\tclosed bool\n}\n\nfunc GetIteration() int {\n\treturn handler.iteration\n}\n\nfunc GetState() (*proto.State, error) {\n\tstate := handler.getState()\n\treturn &state, nil\n}\n\nfunc ForEachEvent(callback func(*proto.Event) error) error {\n\treturn handler.forEachEvent(callback)\n}\n\nfunc ForEachApplicationLog(callback func(*proto.Event) error) error {\n\treturn handler.forEachApplicationLog(callback)\n}\n\nfunc ForEachSkaffoldLog(callback func(*proto.Event) error) error {\n\treturn handler.forEachSkaffoldLog(callback)\n}\n\nfunc Handle(event *proto.Event) error {\n\tif event != nil {\n\t\thandler.handle(event)\n\t}\n\treturn nil\n}\n\nfunc (ev *eventHandler) getState() proto.State {\n\tev.stateLock.Lock()\n\t\/\/ Deep copy\n\tbuf, _ := json.Marshal(ev.state)\n\tev.stateLock.Unlock()\n\n\tvar state proto.State\n\tjson.Unmarshal(buf, &state)\n\n\treturn state\n}\n\nfunc (ev *eventHandler) log(event *proto.Event, listeners *[]*listener, log *[]proto.Event, lock sync.Locker) {\n\tlock.Lock()\n\n\tfor _, listener := range *listeners {\n\t\tif listener.closed {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := listener.callback(event); err != nil {\n\t\t\tlistener.errors <- err\n\t\t\tlistener.closed = true\n\t\t}\n\t}\n\t*log = append(*log, *event)\n\n\tlock.Unlock()\n}\n\nfunc (ev *eventHandler) logEvent(event *proto.Event) {\n\tev.log(event, &ev.eventListeners, &ev.eventLog, &ev.logLock)\n}\n\nfunc (ev *eventHandler) logApplicationLog(event *proto.Event) {\n\tev.log(event, &ev.applicationLogListeners, &ev.applicationLogs, &ev.applicationLogsLock)\n}\n\nfunc (ev *eventHandler) logSkaffoldLog(event *proto.Event) {\n\tev.log(event, &ev.skaffoldLogListeners, &ev.skaffoldLogs, &ev.skaffoldLogsLock)\n}\n\nfunc (ev *eventHandler) forEach(listeners *[]*listener, log *[]proto.Event, lock sync.Locker, callback func(*proto.Event) error) error {\n\tlistener := &listener{\n\t\tcallback: callback,\n\t\terrors: make(chan error),\n\t}\n\n\tlock.Lock()\n\n\toldEvents := make([]proto.Event, len(*log))\n\tcopy(oldEvents, *log)\n\t*listeners = append(*listeners, listener)\n\n\tlock.Unlock()\n\n\tfor i := range oldEvents {\n\t\tif err := callback(&oldEvents[i]); err != nil {\n\t\t\t\/\/ listener should maybe be closed\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn <-listener.errors\n}\n\nfunc (ev *eventHandler) forEachEvent(callback func(*proto.Event) error) error {\n\treturn ev.forEach(&ev.eventListeners, &ev.eventLog, &ev.logLock, callback)\n}\n\nfunc (ev *eventHandler) forEachApplicationLog(callback func(*proto.Event) error) error {\n\treturn ev.forEach(&ev.applicationLogListeners, &ev.applicationLogs, &ev.applicationLogsLock, callback)\n}\n\nfunc (ev *eventHandler) forEachSkaffoldLog(callback func(*proto.Event) error) error {\n\treturn ev.forEach(&ev.skaffoldLogListeners, &ev.skaffoldLogs, &ev.skaffoldLogsLock, callback)\n}\n\nfunc emptyState(cfg Config) proto.State {\n\tbuilds := map[string]string{}\n\tfor _, p := range cfg.GetPipelines() {\n\t\tfor _, a := range p.Build.Artifacts {\n\t\t\tbuilds[a.ImageName] = NotStarted\n\t\t}\n\t}\n\tmetadata := initializeMetadata(cfg.GetPipelines(), cfg.GetKubeContext())\n\treturn emptyStateWithArtifacts(builds, metadata, cfg.AutoBuild(), cfg.AutoDeploy(), cfg.AutoSync())\n}\n\nfunc emptyStateWithArtifacts(builds map[string]string, metadata *proto.Metadata, autoBuild, autoDeploy, autoSync bool) proto.State {\n\treturn proto.State{\n\t\tBuildState: &proto.BuildState{\n\t\t\tArtifacts: builds,\n\t\t\tAutoTrigger: autoBuild,\n\t\t\tStatusCode: proto.StatusCode_OK,\n\t\t},\n\t\tTestState: &proto.TestState{\n\t\t\tStatus: NotStarted,\n\t\t\tStatusCode: proto.StatusCode_OK,\n\t\t},\n\t\tDeployState: &proto.DeployState{\n\t\t\tStatus: NotStarted,\n\t\t\tAutoTrigger: autoDeploy,\n\t\t\tStatusCode: proto.StatusCode_OK,\n\t\t},\n\t\tStatusCheckState: emptyStatusCheckState(),\n\t\tForwardedPorts: make(map[int32]*proto.PortForwardEvent),\n\t\tFileSyncState: &proto.FileSyncState{\n\t\t\tStatus: NotStarted,\n\t\t\tAutoTrigger: autoSync,\n\t\t},\n\t\tMetadata: metadata,\n\t}\n}\n\n\/\/ ResetStateOnBuild resets the build, test, deploy and sync state\nfunc ResetStateOnBuild() {\n\tbuilds := map[string]string{}\n\tfor k := range handler.getState().BuildState.Artifacts {\n\t\tbuilds[k] = NotStarted\n\t}\n\tautoBuild, autoDeploy, autoSync := handler.getState().BuildState.AutoTrigger, handler.getState().DeployState.AutoTrigger, handler.getState().FileSyncState.AutoTrigger\n\tnewState := emptyStateWithArtifacts(builds, handler.getState().Metadata, autoBuild, autoDeploy, autoSync)\n\thandler.setState(newState)\n}\n\n\/\/ ResetStateOnTest resets the test, deploy, sync and status check state\nfunc ResetStateOnTest() {\n\tnewState := handler.getState()\n\tnewState.TestState.Status = NotStarted\n\thandler.setState(newState)\n}\n\n\/\/ ResetStateOnDeploy resets the deploy, sync and status check state\nfunc ResetStateOnDeploy() {\n\tnewState := handler.getState()\n\tnewState.DeployState.Status = NotStarted\n\tnewState.DeployState.StatusCode = proto.StatusCode_OK\n\tnewState.StatusCheckState = emptyStatusCheckState()\n\tnewState.ForwardedPorts = map[int32]*proto.PortForwardEvent{}\n\tnewState.DebuggingContainers = nil\n\thandler.setState(newState)\n}\n\nfunc UpdateStateAutoBuildTrigger(t bool) {\n\tnewState := handler.getState()\n\tnewState.BuildState.AutoTrigger = t\n\thandler.setState(newState)\n}\n\nfunc UpdateStateAutoDeployTrigger(t bool) {\n\tnewState := handler.getState()\n\tnewState.DeployState.AutoTrigger = t\n\thandler.setState(newState)\n}\n\nfunc UpdateStateAutoSyncTrigger(t bool) {\n\tnewState := handler.getState()\n\tnewState.FileSyncState.AutoTrigger = t\n\thandler.setState(newState)\n}\n\nfunc emptyStatusCheckState() *proto.StatusCheckState {\n\treturn &proto.StatusCheckState{\n\t\tStatus: NotStarted,\n\t\tResources: map[string]string{},\n\t\tStatusCode: proto.StatusCode_OK,\n\t}\n}\n\n\/\/ InitializeState instantiates the global state of the skaffold runner, as well as the event log.\nfunc InitializeState(cfg Config) {\n\thandler.cfg = cfg\n\thandler.setState(emptyState(cfg))\n}\n\nfunc AutoTriggerDiff(phase constants.Phase, val bool) (bool, error) {\n\tswitch phase {\n\tcase constants.Build:\n\t\treturn val != handler.getState().BuildState.AutoTrigger, nil\n\tcase constants.Sync:\n\t\treturn val != handler.getState().FileSyncState.AutoTrigger, nil\n\tcase constants.Deploy:\n\t\treturn val != handler.getState().DeployState.AutoTrigger, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"unknown Phase %v not found in handler state\", phase)\n\t}\n}\n\nfunc TaskInProgress(task constants.Phase, description string) {\n\tif task == constants.DevLoop {\n\t\thandler.iteration++\n\t}\n\n\thandler.handleTaskEvent(&proto.TaskEvent{\n\t\tId: fmt.Sprintf(\"%s-%d\", task, handler.iteration),\n\t\tTask: string(task),\n\t\tDescription: description,\n\t\tIteration: int32(handler.iteration),\n\t\tStatus: InProgress,\n\t})\n}\n\nfunc TaskFailed(task constants.Phase, err error) {\n\tae := sErrors.ActionableErrV2(handler.cfg, task, err)\n\thandler.handleTaskEvent(&proto.TaskEvent{\n\t\tId: fmt.Sprintf(\"%s-%d\", task, handler.iteration),\n\t\tTask: string(task),\n\t\tIteration: int32(handler.iteration),\n\t\tStatus: Failed,\n\t\tActionableErr: ae,\n\t})\n}\n\nfunc TaskSucceeded(task constants.Phase) {\n\thandler.handleTaskEvent(&proto.TaskEvent{\n\t\tId: fmt.Sprintf(\"%s-%d\", task, handler.iteration),\n\t\tTask: string(task),\n\t\tIteration: int32(handler.iteration),\n\t\tStatus: Succeeded,\n\t})\n}\n\nfunc (ev *eventHandler) setState(state proto.State) {\n\tev.stateLock.Lock()\n\tev.state = state\n\tev.stateLock.Unlock()\n}\n\nfunc (ev *eventHandler) handle(event *proto.Event) {\n\tgo func(t *timestamp.Timestamp) {\n\t\tevent.Timestamp = t\n\t\tev.eventChan <- event\n\t\tif _, ok := event.GetEventType().(*proto.Event_TerminationEvent); ok {\n\t\t\t\/\/ close the event channel indicating there are no more events to all the\n\t\t\t\/\/ receivers\n\t\t\tclose(ev.eventChan)\n\t\t}\n\t}(ptypes.TimestampNow())\n}\n\nfunc (ev *eventHandler) handleTaskEvent(e *proto.TaskEvent) {\n\tev.handle(&proto.Event{\n\t\tEventType: &proto.Event_TaskEvent{\n\t\t\tTaskEvent: e,\n\t\t},\n\t})\n}\n\nfunc (ev *eventHandler) handleExec(event *proto.Event) {\n\tswitch e := event.GetEventType().(type) {\n\tcase *proto.Event_ApplicationLogEvent:\n\t\tev.logApplicationLog(event)\n\t\treturn\n\tcase *proto.Event_SkaffoldLogEvent:\n\t\tev.logSkaffoldLog(event)\n\t\treturn\n\tcase *proto.Event_BuildSubtaskEvent:\n\t\tbe := e.BuildSubtaskEvent\n\t\tif be.Step == Build {\n\t\t\tev.stateLock.Lock()\n\t\t\tev.state.BuildState.Artifacts[be.Artifact] = be.Status\n\t\t\tev.stateLock.Unlock()\n\t\t}\n\tcase *proto.Event_TestEvent:\n\t\tte := e.TestEvent\n\t\tev.stateLock.Lock()\n\t\tev.state.TestState.Status = te.Status\n\t\tev.stateLock.Unlock()\n\tcase *proto.Event_DeploySubtaskEvent:\n\t\tde := e.DeploySubtaskEvent\n\t\tev.stateLock.Lock()\n\t\tev.state.DeployState.Status = de.Status\n\t\tev.stateLock.Unlock()\n\tcase *proto.Event_PortEvent:\n\t\tpe := e.PortEvent\n\t\tev.stateLock.Lock()\n\t\tev.state.ForwardedPorts[pe.LocalPort] = pe\n\t\tev.stateLock.Unlock()\n\tcase *proto.Event_StatusCheckSubtaskEvent:\n\t\tse := e.StatusCheckSubtaskEvent\n\t\tev.stateLock.Lock()\n\t\tev.state.StatusCheckState.Resources[se.Resource] = se.Status\n\t\tev.stateLock.Unlock()\n\tcase *proto.Event_FileSyncEvent:\n\t\tfse := e.FileSyncEvent\n\t\tev.stateLock.Lock()\n\t\tev.state.FileSyncState.Status = fse.Status\n\t\tev.stateLock.Unlock()\n\tcase *proto.Event_DebuggingContainerEvent:\n\t\tde := e.DebuggingContainerEvent\n\t\tev.stateLock.Lock()\n\t\tswitch de.Status {\n\t\tcase Started:\n\t\t\tev.state.DebuggingContainers = append(ev.state.DebuggingContainers, de)\n\t\tcase Terminated:\n\t\t\tn := 0\n\t\t\tfor _, x := range ev.state.DebuggingContainers {\n\t\t\t\tif x.Namespace != de.Namespace || x.PodName != de.PodName || x.ContainerName != de.ContainerName {\n\t\t\t\t\tev.state.DebuggingContainers[n] = x\n\t\t\t\t\tn++\n\t\t\t\t}\n\t\t\t}\n\t\t\tev.state.DebuggingContainers = ev.state.DebuggingContainers[:n]\n\t\t}\n\t\tev.stateLock.Unlock()\n\t}\n\tev.logEvent(event)\n}\n\n\/\/ SaveEventsToFile saves the current event log to the filepath provided\nfunc SaveEventsToFile(fp string) error {\n\thandler.logLock.Lock()\n\tf, err := os.OpenFile(fp, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"opening %s: %w\", fp, err)\n\t}\n\tdefer f.Close()\n\tmarshaller := jsonpb.Marshaler{}\n\tfor _, ev := range handler.eventLog {\n\t\tcontents := bytes.NewBuffer([]byte{})\n\t\tif err := marshaller.Marshal(contents, &ev); err != nil {\n\t\t\treturn fmt.Errorf(\"marshalling event: %w\", err)\n\t\t}\n\t\tif _, err := f.WriteString(contents.String() + \"\\n\"); err != nil {\n\t\t\treturn fmt.Errorf(\"writing string: %w\", err)\n\t\t}\n\t}\n\thandler.logLock.Unlock()\n\treturn nil\n}\n<commit_msg>Clear `ApplicationLogEvent` and `SkaffoldLogEvent` buffers on new dev iteration (#5993)<commit_after>\/*\nCopyright 2021 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage v2\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\n\t\/\/nolint:golint,staticcheck\n\t\"github.com\/golang\/protobuf\/jsonpb\"\n\t\"github.com\/golang\/protobuf\/ptypes\"\n\t\"github.com\/golang\/protobuf\/ptypes\/timestamp\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/constants\"\n\tsErrors \"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/errors\"\n\tproto \"github.com\/GoogleContainerTools\/skaffold\/proto\/v2\"\n)\n\nconst (\n\tNotStarted = \"NotStarted\"\n\tInProgress = \"InProgress\"\n\tComplete = \"Complete\"\n\tFailed = \"Failed\"\n\tInfo = \"Information\"\n\tStarted = \"Started\"\n\tSucceeded = \"Succeeded\"\n\tTerminated = \"Terminated\"\n\tCanceled = \"Canceled\"\n)\n\nvar handler = newHandler()\n\nfunc newHandler() *eventHandler {\n\th := &eventHandler{\n\t\teventChan: make(chan *proto.Event),\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tev, open := <-h.eventChan\n\t\t\tif !open {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\th.handleExec(ev)\n\t\t}\n\t}()\n\treturn h\n}\n\ntype eventHandler struct {\n\teventLog []proto.Event\n\tlogLock sync.Mutex\n\tapplicationLogs []proto.Event\n\tapplicationLogsLock sync.Mutex\n\tskaffoldLogs []proto.Event\n\tskaffoldLogsLock sync.Mutex\n\tcfg Config\n\n\titeration int\n\tstate proto.State\n\tstateLock sync.Mutex\n\teventChan chan *proto.Event\n\teventListeners []*listener\n\tapplicationLogListeners []*listener\n\tskaffoldLogListeners []*listener\n}\n\ntype listener struct {\n\tcallback func(*proto.Event) error\n\terrors chan error\n\tclosed bool\n}\n\nfunc GetIteration() int {\n\treturn handler.iteration\n}\n\nfunc GetState() (*proto.State, error) {\n\tstate := handler.getState()\n\treturn &state, nil\n}\n\nfunc ForEachEvent(callback func(*proto.Event) error) error {\n\treturn handler.forEachEvent(callback)\n}\n\nfunc ForEachApplicationLog(callback func(*proto.Event) error) error {\n\treturn handler.forEachApplicationLog(callback)\n}\n\nfunc ForEachSkaffoldLog(callback func(*proto.Event) error) error {\n\treturn handler.forEachSkaffoldLog(callback)\n}\n\nfunc Handle(event *proto.Event) error {\n\tif event != nil {\n\t\thandler.handle(event)\n\t}\n\treturn nil\n}\n\nfunc (ev *eventHandler) getState() proto.State {\n\tev.stateLock.Lock()\n\t\/\/ Deep copy\n\tbuf, _ := json.Marshal(ev.state)\n\tev.stateLock.Unlock()\n\n\tvar state proto.State\n\tjson.Unmarshal(buf, &state)\n\n\treturn state\n}\n\nfunc (ev *eventHandler) log(event *proto.Event, listeners *[]*listener, log *[]proto.Event, lock sync.Locker) {\n\tlock.Lock()\n\n\tfor _, listener := range *listeners {\n\t\tif listener.closed {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := listener.callback(event); err != nil {\n\t\t\tlistener.errors <- err\n\t\t\tlistener.closed = true\n\t\t}\n\t}\n\t*log = append(*log, *event)\n\n\tlock.Unlock()\n}\n\nfunc (ev *eventHandler) logEvent(event *proto.Event) {\n\tev.log(event, &ev.eventListeners, &ev.eventLog, &ev.logLock)\n}\n\nfunc (ev *eventHandler) logApplicationLog(event *proto.Event) {\n\tev.log(event, &ev.applicationLogListeners, &ev.applicationLogs, &ev.applicationLogsLock)\n}\n\nfunc (ev *eventHandler) logSkaffoldLog(event *proto.Event) {\n\tev.log(event, &ev.skaffoldLogListeners, &ev.skaffoldLogs, &ev.skaffoldLogsLock)\n}\n\nfunc (ev *eventHandler) forEach(listeners *[]*listener, log *[]proto.Event, lock sync.Locker, callback func(*proto.Event) error) error {\n\tlistener := &listener{\n\t\tcallback: callback,\n\t\terrors: make(chan error),\n\t}\n\n\tlock.Lock()\n\n\toldEvents := make([]proto.Event, len(*log))\n\tcopy(oldEvents, *log)\n\t*listeners = append(*listeners, listener)\n\n\tlock.Unlock()\n\n\tfor i := range oldEvents {\n\t\tif err := callback(&oldEvents[i]); err != nil {\n\t\t\t\/\/ listener should maybe be closed\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn <-listener.errors\n}\n\nfunc (ev *eventHandler) forEachEvent(callback func(*proto.Event) error) error {\n\treturn ev.forEach(&ev.eventListeners, &ev.eventLog, &ev.logLock, callback)\n}\n\nfunc (ev *eventHandler) forEachApplicationLog(callback func(*proto.Event) error) error {\n\treturn ev.forEach(&ev.applicationLogListeners, &ev.applicationLogs, &ev.applicationLogsLock, callback)\n}\n\nfunc (ev *eventHandler) forEachSkaffoldLog(callback func(*proto.Event) error) error {\n\treturn ev.forEach(&ev.skaffoldLogListeners, &ev.skaffoldLogs, &ev.skaffoldLogsLock, callback)\n}\n\nfunc emptyState(cfg Config) proto.State {\n\tbuilds := map[string]string{}\n\tfor _, p := range cfg.GetPipelines() {\n\t\tfor _, a := range p.Build.Artifacts {\n\t\t\tbuilds[a.ImageName] = NotStarted\n\t\t}\n\t}\n\tmetadata := initializeMetadata(cfg.GetPipelines(), cfg.GetKubeContext())\n\treturn emptyStateWithArtifacts(builds, metadata, cfg.AutoBuild(), cfg.AutoDeploy(), cfg.AutoSync())\n}\n\nfunc emptyStateWithArtifacts(builds map[string]string, metadata *proto.Metadata, autoBuild, autoDeploy, autoSync bool) proto.State {\n\treturn proto.State{\n\t\tBuildState: &proto.BuildState{\n\t\t\tArtifacts: builds,\n\t\t\tAutoTrigger: autoBuild,\n\t\t\tStatusCode: proto.StatusCode_OK,\n\t\t},\n\t\tTestState: &proto.TestState{\n\t\t\tStatus: NotStarted,\n\t\t\tStatusCode: proto.StatusCode_OK,\n\t\t},\n\t\tDeployState: &proto.DeployState{\n\t\t\tStatus: NotStarted,\n\t\t\tAutoTrigger: autoDeploy,\n\t\t\tStatusCode: proto.StatusCode_OK,\n\t\t},\n\t\tStatusCheckState: emptyStatusCheckState(),\n\t\tForwardedPorts: make(map[int32]*proto.PortForwardEvent),\n\t\tFileSyncState: &proto.FileSyncState{\n\t\t\tStatus: NotStarted,\n\t\t\tAutoTrigger: autoSync,\n\t\t},\n\t\tMetadata: metadata,\n\t}\n}\n\n\/\/ ResetStateOnBuild resets the build, test, deploy and sync state\nfunc ResetStateOnBuild() {\n\tbuilds := map[string]string{}\n\tfor k := range handler.getState().BuildState.Artifacts {\n\t\tbuilds[k] = NotStarted\n\t}\n\tautoBuild, autoDeploy, autoSync := handler.getState().BuildState.AutoTrigger, handler.getState().DeployState.AutoTrigger, handler.getState().FileSyncState.AutoTrigger\n\tnewState := emptyStateWithArtifacts(builds, handler.getState().Metadata, autoBuild, autoDeploy, autoSync)\n\thandler.setState(newState)\n}\n\n\/\/ ResetStateOnTest resets the test, deploy, sync and status check state\nfunc ResetStateOnTest() {\n\tnewState := handler.getState()\n\tnewState.TestState.Status = NotStarted\n\thandler.setState(newState)\n}\n\n\/\/ ResetStateOnDeploy resets the deploy, sync and status check state\nfunc ResetStateOnDeploy() {\n\tnewState := handler.getState()\n\tnewState.DeployState.Status = NotStarted\n\tnewState.DeployState.StatusCode = proto.StatusCode_OK\n\tnewState.StatusCheckState = emptyStatusCheckState()\n\tnewState.ForwardedPorts = map[int32]*proto.PortForwardEvent{}\n\tnewState.DebuggingContainers = nil\n\thandler.setState(newState)\n}\n\nfunc UpdateStateAutoBuildTrigger(t bool) {\n\tnewState := handler.getState()\n\tnewState.BuildState.AutoTrigger = t\n\thandler.setState(newState)\n}\n\nfunc UpdateStateAutoDeployTrigger(t bool) {\n\tnewState := handler.getState()\n\tnewState.DeployState.AutoTrigger = t\n\thandler.setState(newState)\n}\n\nfunc UpdateStateAutoSyncTrigger(t bool) {\n\tnewState := handler.getState()\n\tnewState.FileSyncState.AutoTrigger = t\n\thandler.setState(newState)\n}\n\nfunc emptyStatusCheckState() *proto.StatusCheckState {\n\treturn &proto.StatusCheckState{\n\t\tStatus: NotStarted,\n\t\tResources: map[string]string{},\n\t\tStatusCode: proto.StatusCode_OK,\n\t}\n}\n\n\/\/ InitializeState instantiates the global state of the skaffold runner, as well as the event log.\nfunc InitializeState(cfg Config) {\n\thandler.cfg = cfg\n\thandler.setState(emptyState(cfg))\n}\n\nfunc AutoTriggerDiff(phase constants.Phase, val bool) (bool, error) {\n\tswitch phase {\n\tcase constants.Build:\n\t\treturn val != handler.getState().BuildState.AutoTrigger, nil\n\tcase constants.Sync:\n\t\treturn val != handler.getState().FileSyncState.AutoTrigger, nil\n\tcase constants.Deploy:\n\t\treturn val != handler.getState().DeployState.AutoTrigger, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"unknown Phase %v not found in handler state\", phase)\n\t}\n}\n\nfunc TaskInProgress(task constants.Phase, description string) {\n\t\/\/ Special casing to increment iteration and clear application and skaffold logs\n\tif task == constants.DevLoop {\n\t\thandler.iteration++\n\n\t\thandler.applicationLogs = []proto.Event{}\n\t\thandler.skaffoldLogs = []proto.Event{}\n\t}\n\n\thandler.handleTaskEvent(&proto.TaskEvent{\n\t\tId: fmt.Sprintf(\"%s-%d\", task, handler.iteration),\n\t\tTask: string(task),\n\t\tDescription: description,\n\t\tIteration: int32(handler.iteration),\n\t\tStatus: InProgress,\n\t})\n}\n\nfunc TaskFailed(task constants.Phase, err error) {\n\tae := sErrors.ActionableErrV2(handler.cfg, task, err)\n\thandler.handleTaskEvent(&proto.TaskEvent{\n\t\tId: fmt.Sprintf(\"%s-%d\", task, handler.iteration),\n\t\tTask: string(task),\n\t\tIteration: int32(handler.iteration),\n\t\tStatus: Failed,\n\t\tActionableErr: ae,\n\t})\n}\n\nfunc TaskSucceeded(task constants.Phase) {\n\thandler.handleTaskEvent(&proto.TaskEvent{\n\t\tId: fmt.Sprintf(\"%s-%d\", task, handler.iteration),\n\t\tTask: string(task),\n\t\tIteration: int32(handler.iteration),\n\t\tStatus: Succeeded,\n\t})\n}\n\nfunc (ev *eventHandler) setState(state proto.State) {\n\tev.stateLock.Lock()\n\tev.state = state\n\tev.stateLock.Unlock()\n}\n\nfunc (ev *eventHandler) handle(event *proto.Event) {\n\tgo func(t *timestamp.Timestamp) {\n\t\tevent.Timestamp = t\n\t\tev.eventChan <- event\n\t\tif _, ok := event.GetEventType().(*proto.Event_TerminationEvent); ok {\n\t\t\t\/\/ close the event channel indicating there are no more events to all the\n\t\t\t\/\/ receivers\n\t\t\tclose(ev.eventChan)\n\t\t}\n\t}(ptypes.TimestampNow())\n}\n\nfunc (ev *eventHandler) handleTaskEvent(e *proto.TaskEvent) {\n\tev.handle(&proto.Event{\n\t\tEventType: &proto.Event_TaskEvent{\n\t\t\tTaskEvent: e,\n\t\t},\n\t})\n}\n\nfunc (ev *eventHandler) handleExec(event *proto.Event) {\n\tswitch e := event.GetEventType().(type) {\n\tcase *proto.Event_ApplicationLogEvent:\n\t\tev.logApplicationLog(event)\n\t\treturn\n\tcase *proto.Event_SkaffoldLogEvent:\n\t\tev.logSkaffoldLog(event)\n\t\treturn\n\tcase *proto.Event_BuildSubtaskEvent:\n\t\tbe := e.BuildSubtaskEvent\n\t\tif be.Step == Build {\n\t\t\tev.stateLock.Lock()\n\t\t\tev.state.BuildState.Artifacts[be.Artifact] = be.Status\n\t\t\tev.stateLock.Unlock()\n\t\t}\n\tcase *proto.Event_TestEvent:\n\t\tte := e.TestEvent\n\t\tev.stateLock.Lock()\n\t\tev.state.TestState.Status = te.Status\n\t\tev.stateLock.Unlock()\n\tcase *proto.Event_DeploySubtaskEvent:\n\t\tde := e.DeploySubtaskEvent\n\t\tev.stateLock.Lock()\n\t\tev.state.DeployState.Status = de.Status\n\t\tev.stateLock.Unlock()\n\tcase *proto.Event_PortEvent:\n\t\tpe := e.PortEvent\n\t\tev.stateLock.Lock()\n\t\tev.state.ForwardedPorts[pe.LocalPort] = pe\n\t\tev.stateLock.Unlock()\n\tcase *proto.Event_StatusCheckSubtaskEvent:\n\t\tse := e.StatusCheckSubtaskEvent\n\t\tev.stateLock.Lock()\n\t\tev.state.StatusCheckState.Resources[se.Resource] = se.Status\n\t\tev.stateLock.Unlock()\n\tcase *proto.Event_FileSyncEvent:\n\t\tfse := e.FileSyncEvent\n\t\tev.stateLock.Lock()\n\t\tev.state.FileSyncState.Status = fse.Status\n\t\tev.stateLock.Unlock()\n\tcase *proto.Event_DebuggingContainerEvent:\n\t\tde := e.DebuggingContainerEvent\n\t\tev.stateLock.Lock()\n\t\tswitch de.Status {\n\t\tcase Started:\n\t\t\tev.state.DebuggingContainers = append(ev.state.DebuggingContainers, de)\n\t\tcase Terminated:\n\t\t\tn := 0\n\t\t\tfor _, x := range ev.state.DebuggingContainers {\n\t\t\t\tif x.Namespace != de.Namespace || x.PodName != de.PodName || x.ContainerName != de.ContainerName {\n\t\t\t\t\tev.state.DebuggingContainers[n] = x\n\t\t\t\t\tn++\n\t\t\t\t}\n\t\t\t}\n\t\t\tev.state.DebuggingContainers = ev.state.DebuggingContainers[:n]\n\t\t}\n\t\tev.stateLock.Unlock()\n\t}\n\tev.logEvent(event)\n}\n\n\/\/ SaveEventsToFile saves the current event log to the filepath provided\nfunc SaveEventsToFile(fp string) error {\n\thandler.logLock.Lock()\n\tf, err := os.OpenFile(fp, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"opening %s: %w\", fp, err)\n\t}\n\tdefer f.Close()\n\tmarshaller := jsonpb.Marshaler{}\n\tfor _, ev := range handler.eventLog {\n\t\tcontents := bytes.NewBuffer([]byte{})\n\t\tif err := marshaller.Marshal(contents, &ev); err != nil {\n\t\t\treturn fmt.Errorf(\"marshalling event: %w\", err)\n\t\t}\n\t\tif _, err := f.WriteString(contents.String() + \"\\n\"); err != nil {\n\t\t\treturn fmt.Errorf(\"writing string: %w\", err)\n\t\t}\n\t}\n\thandler.logLock.Unlock()\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage kubernetes\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"k8s.io\/api\/core\/v1\"\n\tmeta_v1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\tcorev1 \"k8s.io\/client-go\/kubernetes\/typed\/core\/v1\"\n\trestclient \"k8s.io\/client-go\/rest\"\n)\n\nconst defaultRetry int = 5\n\nvar colors = []int{\n\t31, \/\/ red\n\t32, \/\/ green\n\t33, \/\/ yellow\n\t34, \/\/ blue\n\t35, \/\/ magenta\n\t36, \/\/ cyan\n\t37, \/\/ lightGray\n\t90, \/\/ darkGray\n\t91, \/\/ lightRed\n\t92, \/\/ lightGreen\n\t93, \/\/ lightYellow\n\t94, \/\/ lightBlue\n\t95, \/\/ lightPurple\n\t96, \/\/ lightCyan\n\t97, \/\/ white\n}\n\n\/\/ LogAggregator aggregates the logs for all the deployed pods.\ntype LogAggregator struct {\n\tmuted int32\n\tcreationTime time.Time\n\toutput io.Writer\n\tretries int\n\tnextColorIndex int\n\tlockColor sync.Mutex\n}\n\n\/\/ NewLogAggregator creates a new LogAggregator for a given output.\nfunc NewLogAggregator(out io.Writer) *LogAggregator {\n\treturn &LogAggregator{\n\t\tcreationTime: time.Now(),\n\t\toutput: out,\n\t\tretries: defaultRetry,\n\t}\n}\n\nconst streamRetryDelay = 1 * time.Second\n\n\/\/ TODO(@r2d4): Figure out how to mock this out. fake.NewSimpleClient\n\/\/ won't mock out restclient.Request and will just return a nil stream.\nvar getStream = func(r *restclient.Request) (io.ReadCloser, error) {\n\treturn r.Stream()\n}\n\nfunc (a *LogAggregator) StreamLogs(client corev1.CoreV1Interface, image string) {\n\tfor i := 0; i < a.retries; i++ {\n\t\tif err := a.streamLogs(client, image); err != nil {\n\t\t\tlogrus.Infof(\"Error getting logs %s\", err)\n\t\t}\n\t\ttime.Sleep(streamRetryDelay)\n\t}\n}\n\nfunc (a *LogAggregator) SetCreationTime(t time.Time) {\n\ta.creationTime = t\n}\n\n\/\/ nolint: interfacer\nfunc (a *LogAggregator) streamLogs(client corev1.CoreV1Interface, image string) error {\n\tpods, err := client.Pods(\"\").List(meta_v1.ListOptions{\n\t\tIncludeUninitialized: true,\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting pods\")\n\t}\n\n\tfound := false\n\n\tlogrus.Infof(\"Looking for logs to stream for %s\", image)\n\tfor _, p := range pods.Items {\n\t\tfor _, c := range p.Spec.Containers {\n\t\t\tlogrus.Debugf(\"Found container %s with image %s\", c.Name, c.Image)\n\t\t\tif c.Image != image {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlogrus.Infof(\"Trying to stream logs from pod: %s container: %s\", p.Name, c.Name)\n\t\t\tpods := client.Pods(p.Namespace)\n\t\t\tif err := WaitForPodReady(pods, p.Name); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"waiting for pod ready\")\n\t\t\t}\n\t\t\treq := pods.GetLogs(p.Name, &v1.PodLogOptions{\n\t\t\t\tFollow: true,\n\t\t\t\tContainer: c.Name,\n\t\t\t\tSinceTime: &meta_v1.Time{\n\t\t\t\t\tTime: a.creationTime,\n\t\t\t\t},\n\t\t\t})\n\t\t\trc, err := getStream(req)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"setting up container log stream\")\n\t\t\t}\n\t\t\tdefer rc.Close()\n\n\t\t\tcolor := a.nextColor()\n\n\t\t\theader := fmt.Sprintf(\"\\033[1;%dm[%s %s]\\033[0m\", color, p.Name, c.Name)\n\t\t\tif err := a.streamRequest(header, rc); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"streaming request\")\n\t\t\t}\n\n\t\t\tfound = true\n\t\t}\n\t}\n\n\tif !found {\n\t\treturn fmt.Errorf(\"Image %s not found\", image)\n\t}\n\n\treturn nil\n}\n\nfunc (a *LogAggregator) nextColor() int {\n\ta.lockColor.Lock()\n\tcolor := colors[a.nextColorIndex]\n\ta.nextColorIndex++\n\ta.lockColor.Unlock()\n\n\treturn color\n}\n\nfunc (a *LogAggregator) streamRequest(header string, rc io.Reader) error {\n\tr := bufio.NewReader(rc)\n\tfor {\n\t\t\/\/ Read up to newline\n\t\tline, err := r.ReadBytes('\\n')\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"reading bytes from log stream\")\n\t\t}\n\n\t\tif a.IsMuted() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, err := fmt.Fprintf(a.output, \"%s %s\", header, line); err != nil {\n\t\t\treturn errors.Wrap(err, \"writing to out\")\n\t\t}\n\t}\n\tlogrus.Infof(\"%s exited\", header)\n\treturn nil\n}\n\n\/\/ Mute mutes the logs.\nfunc (a *LogAggregator) Mute() {\n\tatomic.StoreInt32(&a.muted, 1)\n}\n\n\/\/ Unmute unmute the logs.\nfunc (a *LogAggregator) Unmute() {\n\tatomic.StoreInt32(&a.muted, 0)\n}\n\n\/\/ IsMuted says if the logs are to be muted.\nfunc (a *LogAggregator) IsMuted() bool {\n\treturn atomic.LoadInt32(&a.muted) == 1\n}\n<commit_msg>Stop running out of colors<commit_after>\/*\nCopyright 2018 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage kubernetes\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"k8s.io\/api\/core\/v1\"\n\tmeta_v1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\tcorev1 \"k8s.io\/client-go\/kubernetes\/typed\/core\/v1\"\n\trestclient \"k8s.io\/client-go\/rest\"\n)\n\nconst defaultRetry int = 5\n\nvar colors = []int{\n\t31, \/\/ red\n\t32, \/\/ green\n\t33, \/\/ yellow\n\t34, \/\/ blue\n\t35, \/\/ magenta\n\t36, \/\/ cyan\n\t37, \/\/ lightGray\n\t90, \/\/ darkGray\n\t91, \/\/ lightRed\n\t92, \/\/ lightGreen\n\t93, \/\/ lightYellow\n\t94, \/\/ lightBlue\n\t95, \/\/ lightPurple\n\t96, \/\/ lightCyan\n\t97, \/\/ white\n}\n\n\/\/ LogAggregator aggregates the logs for all the deployed pods.\ntype LogAggregator struct {\n\tmuted int32\n\tcreationTime time.Time\n\toutput io.Writer\n\tretries int\n\tnextColorIndex int\n\tlockColor sync.Mutex\n}\n\n\/\/ NewLogAggregator creates a new LogAggregator for a given output.\nfunc NewLogAggregator(out io.Writer) *LogAggregator {\n\treturn &LogAggregator{\n\t\tcreationTime: time.Now(),\n\t\toutput: out,\n\t\tretries: defaultRetry,\n\t}\n}\n\nconst streamRetryDelay = 1 * time.Second\n\n\/\/ TODO(@r2d4): Figure out how to mock this out. fake.NewSimpleClient\n\/\/ won't mock out restclient.Request and will just return a nil stream.\nvar getStream = func(r *restclient.Request) (io.ReadCloser, error) {\n\treturn r.Stream()\n}\n\nfunc (a *LogAggregator) StreamLogs(client corev1.CoreV1Interface, image string) {\n\tfor i := 0; i < a.retries; i++ {\n\t\tif err := a.streamLogs(client, image); err != nil {\n\t\t\tlogrus.Infof(\"Error getting logs %s\", err)\n\t\t}\n\t\ttime.Sleep(streamRetryDelay)\n\t}\n}\n\nfunc (a *LogAggregator) SetCreationTime(t time.Time) {\n\ta.creationTime = t\n}\n\n\/\/ nolint: interfacer\nfunc (a *LogAggregator) streamLogs(client corev1.CoreV1Interface, image string) error {\n\tpods, err := client.Pods(\"\").List(meta_v1.ListOptions{\n\t\tIncludeUninitialized: true,\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting pods\")\n\t}\n\n\tfound := false\n\n\tlogrus.Infof(\"Looking for logs to stream for %s\", image)\n\tfor _, p := range pods.Items {\n\t\tfor _, c := range p.Spec.Containers {\n\t\t\tlogrus.Debugf(\"Found container %s with image %s\", c.Name, c.Image)\n\t\t\tif c.Image != image {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlogrus.Infof(\"Trying to stream logs from pod: %s container: %s\", p.Name, c.Name)\n\t\t\tpods := client.Pods(p.Namespace)\n\t\t\tif err := WaitForPodReady(pods, p.Name); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"waiting for pod ready\")\n\t\t\t}\n\t\t\treq := pods.GetLogs(p.Name, &v1.PodLogOptions{\n\t\t\t\tFollow: true,\n\t\t\t\tContainer: c.Name,\n\t\t\t\tSinceTime: &meta_v1.Time{\n\t\t\t\t\tTime: a.creationTime,\n\t\t\t\t},\n\t\t\t})\n\t\t\trc, err := getStream(req)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"setting up container log stream\")\n\t\t\t}\n\t\t\tdefer rc.Close()\n\n\t\t\tcolor := a.nextColor()\n\n\t\t\theader := fmt.Sprintf(\"\\033[1;%dm[%s %s]\\033[0m\", color, p.Name, c.Name)\n\t\t\tif err := a.streamRequest(header, rc); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"streaming request\")\n\t\t\t}\n\n\t\t\tfound = true\n\t\t}\n\t}\n\n\tif !found {\n\t\treturn fmt.Errorf(\"Image %s not found\", image)\n\t}\n\n\treturn nil\n}\n\nfunc (a *LogAggregator) nextColor() int {\n\ta.lockColor.Lock()\n\tcolor := colors[a.nextColorIndex]\n\ta.nextColorIndex = (a.nextColorIndex + 1) % len(colors)\n\ta.lockColor.Unlock()\n\n\treturn color\n}\n\nfunc (a *LogAggregator) streamRequest(header string, rc io.Reader) error {\n\tr := bufio.NewReader(rc)\n\tfor {\n\t\t\/\/ Read up to newline\n\t\tline, err := r.ReadBytes('\\n')\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"reading bytes from log stream\")\n\t\t}\n\n\t\tif a.IsMuted() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, err := fmt.Fprintf(a.output, \"%s %s\", header, line); err != nil {\n\t\t\treturn errors.Wrap(err, \"writing to out\")\n\t\t}\n\t}\n\tlogrus.Infof(\"%s exited\", header)\n\treturn nil\n}\n\n\/\/ Mute mutes the logs.\nfunc (a *LogAggregator) Mute() {\n\tatomic.StoreInt32(&a.muted, 1)\n}\n\n\/\/ Unmute unmute the logs.\nfunc (a *LogAggregator) Unmute() {\n\tatomic.StoreInt32(&a.muted, 0)\n}\n\n\/\/ IsMuted says if the logs are to be muted.\nfunc (a *LogAggregator) IsMuted() bool {\n\treturn atomic.LoadInt32(&a.muted) == 1\n}\n<|endoftext|>"} {"text":"<commit_before>package stats\n\nimport (\n\t\"net\/http\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/labstack\/echo\"\n)\n\ntype (\n\tStats struct {\n\t\tUptime time.Time `json:\"uptime\"`\n\t\tRequestCount uint64 `json:\"requestCount\"`\n\t\tStatuses map[string]int `json:\"statuses\"`\n\t\tmutex sync.RWMutex\n\t}\n)\n\nfunc NewStats() *Stats {\n\treturn &Stats{\n\t\tUptime: time.Now(),\n\t\tStatuses: map[string]int{},\n\t}\n}\n\n\/\/ Process is the middleware function.\nfunc (s *Stats) Process(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tif err := next(c); err != nil {\n\t\t\tc.Error(err)\n\t\t}\n\t\ts.mutex.Lock()\n\t\tdefer s.mutex.Unlock()\n\t\ts.RequestCount++\n\t\tstatus := strconv.Itoa(c.Response().Status)\n\t\ts.Statuses[status]++\n\t\treturn nil\n\t}\n}\n\n\/\/ Handle is the endpoint to get stats.\nfunc (s *Stats) Handle(c echo.Context) error {\n\ts.mutex.RLock()\n\tdefer s.mutex.RUnlock()\n\treturn c.JSON(http.StatusOK, s)\n}\n\n\/\/ ServerHeader middleware adds a `Server` header to the response.\nfunc ServerHeader(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tc.Response().Header().Set(echo.HeaderServer, \"Echo\/3.0\")\n\t\treturn next(c)\n\t}\n}\n<commit_msg>Updated stats module<commit_after>package stats\n\nimport (\n\t\"net\/http\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/robjporter\/go-functions\/browser\"\n)\n\ntype (\n\tStats struct {\n\t\tUptime time.Time `json:\"uptime\"`\n\t\tRequestCount uint64 `json:\"requestCount\"`\n\t\tStatuses map[string]int `json:\"statuses\"`\n\t\tBrowsers map[string]int `json:\"browsers\"`\n\t\tBrowserVersion map[string]int `json:\"browserversion\"`\n\t\tOS map[string]int `json:\"os\"`\n\t\tDevice map[string]int `json:\"device\"`\n\t\tOSVersion map[string]int `json:\"osversion\"`\n\t\tmutex sync.RWMutex\n\t}\n)\n\nfunc NewStats() *Stats {\n\treturn &Stats{\n\t\tUptime: time.Now(),\n\t\tStatuses: map[string]int{},\n\t\tBrowsers: map[string]int{},\n\t\tBrowserVersion: map[string]int{},\n\t\tOS: map[string]int{},\n\t\tDevice: map[string]int{},\n\t\tOSVersion: map[string]int{},\n\t}\n}\n\n\/\/ Process is the middleware function.\nfunc (s *Stats) Process(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tif err := next(c); err != nil {\n\t\t\tc.Error(err)\n\t\t}\n\t\ts.mutex.Lock()\n\t\tdefer s.mutex.Unlock()\n\t\ts.RequestCount++\n\t\tstatus := strconv.Itoa(c.Response().Status)\n\t\tbrowser, browserversion, devicename, osname, osversion := getBrowser(c.Request().UserAgent())\n\t\ts.Statuses[status]++\n\t\ts.Browsers[browser]++\n\t\ts.BrowserVersion[browser+\" \"+browserversion]++\n\t\ts.OS[osname]++\n\t\ts.OSVersion[osname+\" \"+osversion]++\n\t\ts.Device[devicename]++\n\t\treturn nil\n\t}\n}\n\nfunc getBrowser(agent string) (string, string, string, string, string) {\n\tua1 := browser.Parse(agent)\n\treturn ua1.Browser.Name, ua1.Browser.Version, ua1.Device.Name, ua1.OS.Name, ua1.OS.Version\n}\n\n\/\/ Handle is the endpoint to get stats.\nfunc (s *Stats) Handle(c echo.Context) error {\n\ts.mutex.RLock()\n\tdefer s.mutex.RUnlock()\n\treturn c.JSON(http.StatusOK, s)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The zlib package implements reading and writing of zlib\n\/\/ format compressed files, as specified in RFC 1950.\npackage zlib\n\nimport (\n\t\"bufio\"\n\t\"compress\/flate\"\n\t\"hash\"\n\t\"hash\/adler32\"\n\t\"io\"\n\t\"os\"\n)\n\nconst zlibDeflate = 8\n\nvar ChecksumError os.Error = os.ErrorString(\"zlib checksum error\")\nvar HeaderError os.Error = os.ErrorString(\"invalid zlib header\")\nvar UnsupportedError os.Error = os.ErrorString(\"unsupported zlib format\")\n\ntype reader struct {\n\tr flate.Reader\n\tinflater io.ReadCloser\n\tdigest hash.Hash32\n\terr os.Error\n\tscratch [4]byte\n}\n\n\/\/ NewInflater creates a new io.ReadCloser that satisfies reads by decompressing data read from r.\n\/\/ The implementation buffers input and may read more data than necessary from r.\n\/\/ It is the caller's responsibility to call Close on the ReadCloser when done.\nfunc NewInflater(r io.Reader) (io.ReadCloser, os.Error) {\n\tz := new(reader)\n\tif fr, ok := r.(flate.Reader); ok {\n\t\tz.r = fr\n\t} else {\n\t\tz.r = bufio.NewReader(r)\n\t}\n\t_, err := io.ReadFull(z.r, z.scratch[0:2])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\th := uint(z.scratch[0])<<8 | uint(z.scratch[1])\n\tif (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) {\n\t\treturn nil, HeaderError\n\t}\n\tif z.scratch[1]&0x20 != 0 {\n\t\t\/\/ BUG(nigeltao): The zlib package does not implement the FDICT flag.\n\t\treturn nil, UnsupportedError\n\t}\n\tz.digest = adler32.New()\n\tz.inflater = flate.NewInflater(z.r)\n\treturn z, nil\n}\n\nfunc (z *reader) Read(p []byte) (n int, err os.Error) {\n\tif z.err != nil {\n\t\treturn 0, z.err\n\t}\n\tif len(p) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tn, err = z.inflater.Read(p)\n\tz.digest.Write(p[0:n])\n\tif n != 0 || err != os.EOF {\n\t\tz.err = err\n\t\treturn\n\t}\n\n\t\/\/ Finished file; check checksum.\n\tif _, err := io.ReadFull(z.r, z.scratch[0:4]); err != nil {\n\t\tz.err = err\n\t\treturn 0, err\n\t}\n\t\/\/ ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952).\n\tchecksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])\n\tif checksum != z.digest.Sum32() {\n\t\tz.err = ChecksumError\n\t\treturn 0, z.err\n\t}\n\treturn\n}\n\n\/\/ Calling Close does not close the wrapped io.Reader originally passed to NewInflater.\nfunc (z *reader) Close() os.Error {\n\tif z.err != nil {\n\t\treturn z.err\n\t}\n\tz.err = z.inflater.Close()\n\treturn z.err\n}\n<commit_msg>compress\/zlib: add example to doc comment<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nThe zlib package implements reading and writing of zlib\nformat compressed data, as specified in RFC 1950.\n\nThe implementation provides filters that uncompress during reading\nand compress during writing. For example, to write compressed data\nto a buffer:\n\n\tvar b bytes.Buffer\n\tw, err := zlib.NewDeflater(&b)\n\tw.Write(strings.Bytes(\"hello, world\\n\"))\n\tw.Close()\n\nand to read that data back:\n\n\tr, err := zlib.NewInflater(&b)\n\tio.Copy(os.Stdout, r)\n\tr.Close()\n*\/\npackage zlib\n\nimport (\n\t\"bufio\"\n\t\"compress\/flate\"\n\t\"hash\"\n\t\"hash\/adler32\"\n\t\"io\"\n\t\"os\"\n)\n\nconst zlibDeflate = 8\n\nvar ChecksumError os.Error = os.ErrorString(\"zlib checksum error\")\nvar HeaderError os.Error = os.ErrorString(\"invalid zlib header\")\nvar UnsupportedError os.Error = os.ErrorString(\"unsupported zlib format\")\n\ntype reader struct {\n\tr flate.Reader\n\tinflater io.ReadCloser\n\tdigest hash.Hash32\n\terr os.Error\n\tscratch [4]byte\n}\n\n\/\/ NewInflater creates a new io.ReadCloser that satisfies reads by decompressing data read from r.\n\/\/ The implementation buffers input and may read more data than necessary from r.\n\/\/ It is the caller's responsibility to call Close on the ReadCloser when done.\nfunc NewInflater(r io.Reader) (io.ReadCloser, os.Error) {\n\tz := new(reader)\n\tif fr, ok := r.(flate.Reader); ok {\n\t\tz.r = fr\n\t} else {\n\t\tz.r = bufio.NewReader(r)\n\t}\n\t_, err := io.ReadFull(z.r, z.scratch[0:2])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\th := uint(z.scratch[0])<<8 | uint(z.scratch[1])\n\tif (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) {\n\t\treturn nil, HeaderError\n\t}\n\tif z.scratch[1]&0x20 != 0 {\n\t\t\/\/ BUG(nigeltao): The zlib package does not implement the FDICT flag.\n\t\treturn nil, UnsupportedError\n\t}\n\tz.digest = adler32.New()\n\tz.inflater = flate.NewInflater(z.r)\n\treturn z, nil\n}\n\nfunc (z *reader) Read(p []byte) (n int, err os.Error) {\n\tif z.err != nil {\n\t\treturn 0, z.err\n\t}\n\tif len(p) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tn, err = z.inflater.Read(p)\n\tz.digest.Write(p[0:n])\n\tif n != 0 || err != os.EOF {\n\t\tz.err = err\n\t\treturn\n\t}\n\n\t\/\/ Finished file; check checksum.\n\tif _, err := io.ReadFull(z.r, z.scratch[0:4]); err != nil {\n\t\tz.err = err\n\t\treturn 0, err\n\t}\n\t\/\/ ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952).\n\tchecksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])\n\tif checksum != z.digest.Sum32() {\n\t\tz.err = ChecksumError\n\t\treturn 0, z.err\n\t}\n\treturn\n}\n\n\/\/ Calling Close does not close the wrapped io.Reader originally passed to NewInflater.\nfunc (z *reader) Close() os.Error {\n\tif z.err != nil {\n\t\treturn z.err\n\t}\n\tz.err = z.inflater.Close()\n\treturn z.err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage http_test\n\nimport (\n\t\"net\/http\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc interestingGoroutines() (gs []string) {\n\tbuf := make([]byte, 2<<20)\n\tbuf = buf[:runtime.Stack(buf, true)]\n\tfor _, g := range strings.Split(string(buf), \"\\n\\n\") {\n\t\tsl := strings.SplitN(g, \"\\n\", 2)\n\t\tif len(sl) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tstack := strings.TrimSpace(sl[1])\n\t\tif stack == \"\" ||\n\t\t\tstrings.Contains(stack, \"created by net.newPollServer\") ||\n\t\t\tstrings.Contains(stack, \"created by net.startServer\") ||\n\t\t\tstrings.Contains(stack, \"created by testing.RunTests\") ||\n\t\t\tstrings.Contains(stack, \"closeWriteAndWait\") ||\n\t\t\tstrings.Contains(stack, \"testing.Main(\") {\n\t\t\tcontinue\n\t\t}\n\t\tgs = append(gs, stack)\n\t}\n\tsort.Strings(gs)\n\treturn\n}\n\n\/\/ Verify the other tests didn't leave any goroutines running.\n\/\/ This is in a file named z_last_test.go so it sorts at the end.\nfunc TestGoroutinesRunning(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"not counting goroutines for leakage in -short mode\")\n\t}\n\tgs := interestingGoroutines()\n\n\tn := 0\n\tstackCount := make(map[string]int)\n\tfor _, g := range gs {\n\t\tstackCount[g]++\n\t\tn++\n\t}\n\n\tt.Logf(\"num goroutines = %d\", n)\n\tif n > 0 {\n\t\tt.Error(\"Too many goroutines.\")\n\t\tfor stack, count := range stackCount {\n\t\t\tt.Logf(\"%d instances of:\\n%s\", count, stack)\n\t\t}\n\t}\n}\n\nfunc afterTest(t *testing.T) {\n\thttp.DefaultTransport.(*http.Transport).CloseIdleConnections()\n\tif testing.Short() {\n\t\treturn\n\t}\n\tvar bad string\n\tbadSubstring := map[string]string{\n\t\t\").readLoop(\": \"a Transport\",\n\t\t\").writeLoop(\": \"a Transport\",\n\t\t\"created by net\/http\/httptest.(*Server).Start\": \"an httptest.Server\",\n\t\t\"timeoutHandler\": \"a TimeoutHandler\",\n\t\t\"net.(*netFD).connect(\": \"a timing out dial\",\n\t}\n\tvar stacks string\n\tfor i := 0; i < 4; i++ {\n\t\tbad = \"\"\n\t\tstacks = strings.Join(interestingGoroutines(), \"\\n\\n\")\n\t\tfor substr, what := range badSubstring {\n\t\t\tif strings.Contains(stacks, substr) {\n\t\t\t\tbad = what\n\t\t\t}\n\t\t}\n\t\tif bad == \"\" {\n\t\t\treturn\n\t\t}\n\t\t\/\/ Bad stuff found, but goroutines might just still be\n\t\t\/\/ shutting down, so give it some time.\n\t\ttime.Sleep(250 * time.Millisecond)\n\t}\n\tt.Errorf(\"Test appears to have leaked %s:\\n%s\", bad, stacks)\n}\n<commit_msg>net\/http: ignore more uninteresting goroutines<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage http_test\n\nimport (\n\t\"net\/http\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc interestingGoroutines() (gs []string) {\n\tbuf := make([]byte, 2<<20)\n\tbuf = buf[:runtime.Stack(buf, true)]\n\tfor _, g := range strings.Split(string(buf), \"\\n\\n\") {\n\t\tsl := strings.SplitN(g, \"\\n\", 2)\n\t\tif len(sl) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tstack := strings.TrimSpace(sl[1])\n\t\tif stack == \"\" ||\n\t\t\tstrings.Contains(stack, \"created by net.newPollServer\") ||\n\t\t\tstrings.Contains(stack, \"created by net.startServer\") ||\n\t\t\tstrings.Contains(stack, \"created by testing.RunTests\") ||\n\t\t\tstrings.Contains(stack, \"closeWriteAndWait\") ||\n\t\t\tstrings.Contains(stack, \"testing.Main(\") ||\n\t\t\t\/\/ These only show up with GOTRACEBACK=2; Issue 5005 (comment 28)\n\t\t\tstrings.Contains(stack, \"runtime.goexit\") ||\n\t\t\tstrings.Contains(stack, \"created by runtime.gc\") ||\n\t\t\tstrings.Contains(stack, \"runtime.MHeap_Scavenger\") {\n\t\t\tcontinue\n\t\t}\n\t\tgs = append(gs, stack)\n\t}\n\tsort.Strings(gs)\n\treturn\n}\n\n\/\/ Verify the other tests didn't leave any goroutines running.\n\/\/ This is in a file named z_last_test.go so it sorts at the end.\nfunc TestGoroutinesRunning(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"not counting goroutines for leakage in -short mode\")\n\t}\n\tgs := interestingGoroutines()\n\n\tn := 0\n\tstackCount := make(map[string]int)\n\tfor _, g := range gs {\n\t\tstackCount[g]++\n\t\tn++\n\t}\n\n\tt.Logf(\"num goroutines = %d\", n)\n\tif n > 0 {\n\t\tt.Error(\"Too many goroutines.\")\n\t\tfor stack, count := range stackCount {\n\t\t\tt.Logf(\"%d instances of:\\n%s\", count, stack)\n\t\t}\n\t}\n}\n\nfunc afterTest(t *testing.T) {\n\thttp.DefaultTransport.(*http.Transport).CloseIdleConnections()\n\tif testing.Short() {\n\t\treturn\n\t}\n\tvar bad string\n\tbadSubstring := map[string]string{\n\t\t\").readLoop(\": \"a Transport\",\n\t\t\").writeLoop(\": \"a Transport\",\n\t\t\"created by net\/http\/httptest.(*Server).Start\": \"an httptest.Server\",\n\t\t\"timeoutHandler\": \"a TimeoutHandler\",\n\t\t\"net.(*netFD).connect(\": \"a timing out dial\",\n\t}\n\tvar stacks string\n\tfor i := 0; i < 4; i++ {\n\t\tbad = \"\"\n\t\tstacks = strings.Join(interestingGoroutines(), \"\\n\\n\")\n\t\tfor substr, what := range badSubstring {\n\t\t\tif strings.Contains(stacks, substr) {\n\t\t\t\tbad = what\n\t\t\t}\n\t\t}\n\t\tif bad == \"\" {\n\t\t\treturn\n\t\t}\n\t\t\/\/ Bad stuff found, but goroutines might just still be\n\t\t\/\/ shutting down, so give it some time.\n\t\ttime.Sleep(250 * time.Millisecond)\n\t}\n\tt.Errorf(\"Test appears to have leaked %s:\\n%s\", bad, stacks)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage strings_test\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\n\/\/ Fields are: [\"foo\" \"bar\" \"baz\"]\nfunc ExampleFields() {\n\tfmt.Printf(\"Fields are: %q\", strings.Fields(\" foo bar baz \"))\n}\n\n\/\/ true\n\/\/ false\n\/\/ true\n\/\/ true\nfunc ExampleContains() {\n\tfmt.Println(strings.Contains(\"seafood\", \"foo\"))\n\tfmt.Println(strings.Contains(\"seafood\", \"bar\"))\n\tfmt.Println(strings.Contains(\"seafood\", \"\"))\n\tfmt.Println(strings.Contains(\"\", \"\"))\n}\n\n\/\/ false\n\/\/ true\n\/\/ false\n\/\/ false\nfunc ExampleContainsAny() {\n\tfmt.Println(strings.ContainsAny(\"team\", \"i\"))\n\tfmt.Println(strings.ContainsAny(\"failure\", \"u & i\"))\n\tfmt.Println(strings.ContainsAny(\"foo\", \"\"))\n\tfmt.Println(strings.ContainsAny(\"\", \"\"))\n\n}\n\n\/\/ 3\n\/\/ 5\nfunc ExampleCount() {\n\tfmt.Println(strings.Count(\"cheese\", \"e\"))\n\tfmt.Println(strings.Count(\"five\", \"\")) \/\/ before & after each rune\n}\n\n\/\/ true\nfunc ExampleEqualFold() {\n\tfmt.Println(strings.EqualFold(\"Go\", \"go\"))\n}\n\n\/\/ 4\n\/\/ -1\nfunc ExampleIndex() {\n\tfmt.Println(strings.Index(\"chicken\", \"ken\"))\n\tfmt.Println(strings.Index(\"chicken\", \"dmr\"))\n}\n\n\/\/ 4\n\/\/ -1\nfunc ExampleRune() {\n\tfmt.Println(strings.IndexRune(\"chicken\", 'k'))\n\tfmt.Println(strings.IndexRune(\"chicken\", 'd'))\n}\n\n\/\/ 0\n\/\/ 3\n\/\/ -1\nfunc ExampleLastIndex() {\n\tfmt.Println(strings.Index(\"go gopher\", \"go\"))\n\tfmt.Println(strings.LastIndex(\"go gopher\", \"go\"))\n\tfmt.Println(strings.LastIndex(\"go gopher\", \"rodent\"))\n}\n\n\/\/ foo, bar, baz\nfunc ExampleJoin() {\n\ts := []string{\"foo\", \"bar\", \"baz\"}\n\tfmt.Println(strings.Join(s, \", \"))\n}\n\n\/\/ banana\nfunc ExampleRepeat() {\n\tfmt.Println(\"ba\" + strings.Repeat(\"na\", 2))\n}\n\n\/\/ oinky oinky oink\n\/\/ moo moo moo\nfunc ExampleReplace() {\n\tfmt.Println(strings.Replace(\"oink oink oink\", \"k\", \"ky\", 2))\n\tfmt.Println(strings.Replace(\"oink oink oink\", \"oink\", \"moo\", -1))\n}\n\n\/\/ [\"a\" \"b\" \"c\"]\n\/\/ [\"\" \"man \" \"plan \" \"canal panama\"]\n\/\/ [\" \" \"x\" \"y\" \"z\" \" \"]\nfunc ExampleSplit() {\n\tfmt.Printf(\"%q\\n\", strings.Split(\"a,b,c\", \",\"))\n\tfmt.Printf(\"%q\\n\", strings.Split(\"a man a plan a canal panama\", \"a \"))\n\tfmt.Printf(\"%q\\n\", strings.Split(\" xyz \", \"\"))\n}\n\n\/\/ [\"a\" \"b,c\"]\n\/\/ [] (nil = true)\nfunc ExampleSplitN() {\n\tfmt.Printf(\"%q\\n\", strings.SplitN(\"a,b,c\", \",\", 2))\n\tz := strings.SplitN(\"a,b,c\", \",\", 0)\n\tfmt.Printf(\"%q (nil = %v)\\n\", z, z == nil)\n}\n\n\/\/ [\"a,\" \"b,\" \"c\"]\nfunc ExampleSplitAfter() {\n\tfmt.Printf(\"%q\\n\", strings.SplitAfter(\"a,b,c\", \",\"))\n}\n\n\/\/ [\"a,\" \"b,c\"]\nfunc ExampleSplitAfterN() {\n\tfmt.Printf(\"%q\\n\", strings.SplitAfterN(\"a,b,c\", \",\", 2))\n}\n\n\/\/ Her Royal Highness\nfunc ExampleTitle() {\n\tfmt.Println(strings.Title(\"her royal highness\"))\n}\n\n\/\/ LOUD NOISES\n\/\/ ХЛЕБ\nfunc ExampleToTitle() {\n\tfmt.Println(strings.ToTitle(\"loud noises\"))\n\tfmt.Println(strings.ToTitle(\"хлеб\"))\n}\n\n\/\/ [Achtung]\nfunc ExampleTrim() {\n\tfmt.Printf(\"[%s]\", strings.Trim(\" !!! Achtung !!! \", \"! \"))\n}\n\n\/\/ 'Gjnf oevyyvt naq gur fyvgul tbcure...\nfunc ExampleMap() {\n\trot13 := func(r rune) rune {\n\t\tswitch {\n\t\tcase r >= 'A' && r <= 'Z':\n\t\t\treturn 'A' + (r-'A'+13)%26\n\t\tcase r >= 'a' && r <= 'z':\n\t\t\treturn 'a' + (r-'a'+13)%26\n\t\t}\n\t\treturn r\n\t}\n\tfmt.Println(strings.Map(rot13, \"'Twas brillig and the slithy gopher...\"))\n}\n\n\/\/ a lone gopher\nfunc ExampleTrimSpace() {\n\tfmt.Println(strings.TrimSpace(\" \\t\\n a lone gopher \\n\\t\\r\\n\"))\n}\n\n\/\/ This is <b>HTML<\/b>!\nfunc ExampleNewReplacer() {\n\tr := strings.NewReplacer(\"<\", \"<\", \">\", \">\")\n\tfmt.Println(r.Replace(\"This is <b>HTML<\/b>!\"))\n}\n\n\/\/ GOPHER\nfunc ExampleToUpper() {\n\tfmt.Println(strings.ToUpper(\"Gopher\"))\n}\n\n\/\/ gopher\nfunc ExampleToLower() {\n\tfmt.Println(strings.ToLower(\"Gopher\"))\n}\n<commit_msg>strings: add Bernardo O'Higgins example<commit_after>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage strings_test\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\n\/\/ Fields are: [\"foo\" \"bar\" \"baz\"]\nfunc ExampleFields() {\n\tfmt.Printf(\"Fields are: %q\", strings.Fields(\" foo bar baz \"))\n}\n\n\/\/ true\n\/\/ false\n\/\/ true\n\/\/ true\nfunc ExampleContains() {\n\tfmt.Println(strings.Contains(\"seafood\", \"foo\"))\n\tfmt.Println(strings.Contains(\"seafood\", \"bar\"))\n\tfmt.Println(strings.Contains(\"seafood\", \"\"))\n\tfmt.Println(strings.Contains(\"\", \"\"))\n}\n\n\/\/ false\n\/\/ true\n\/\/ false\n\/\/ false\nfunc ExampleContainsAny() {\n\tfmt.Println(strings.ContainsAny(\"team\", \"i\"))\n\tfmt.Println(strings.ContainsAny(\"failure\", \"u & i\"))\n\tfmt.Println(strings.ContainsAny(\"foo\", \"\"))\n\tfmt.Println(strings.ContainsAny(\"\", \"\"))\n\n}\n\n\/\/ 3\n\/\/ 5\nfunc ExampleCount() {\n\tfmt.Println(strings.Count(\"cheese\", \"e\"))\n\tfmt.Println(strings.Count(\"five\", \"\")) \/\/ before & after each rune\n}\n\n\/\/ true\nfunc ExampleEqualFold() {\n\tfmt.Println(strings.EqualFold(\"Go\", \"go\"))\n}\n\n\/\/ 4\n\/\/ -1\nfunc ExampleIndex() {\n\tfmt.Println(strings.Index(\"chicken\", \"ken\"))\n\tfmt.Println(strings.Index(\"chicken\", \"dmr\"))\n}\n\n\/\/ 4\n\/\/ -1\nfunc ExampleRune() {\n\tfmt.Println(strings.IndexRune(\"chicken\", 'k'))\n\tfmt.Println(strings.IndexRune(\"chicken\", 'd'))\n}\n\n\/\/ 0\n\/\/ 3\n\/\/ -1\nfunc ExampleLastIndex() {\n\tfmt.Println(strings.Index(\"go gopher\", \"go\"))\n\tfmt.Println(strings.LastIndex(\"go gopher\", \"go\"))\n\tfmt.Println(strings.LastIndex(\"go gopher\", \"rodent\"))\n}\n\n\/\/ foo, bar, baz\nfunc ExampleJoin() {\n\ts := []string{\"foo\", \"bar\", \"baz\"}\n\tfmt.Println(strings.Join(s, \", \"))\n}\n\n\/\/ banana\nfunc ExampleRepeat() {\n\tfmt.Println(\"ba\" + strings.Repeat(\"na\", 2))\n}\n\n\/\/ oinky oinky oink\n\/\/ moo moo moo\nfunc ExampleReplace() {\n\tfmt.Println(strings.Replace(\"oink oink oink\", \"k\", \"ky\", 2))\n\tfmt.Println(strings.Replace(\"oink oink oink\", \"oink\", \"moo\", -1))\n}\n\n\/\/ [\"a\" \"b\" \"c\"]\n\/\/ [\"\" \"man \" \"plan \" \"canal panama\"]\n\/\/ [\" \" \"x\" \"y\" \"z\" \" \"]\n\/\/ [\"\"]\nfunc ExampleSplit() {\n\tfmt.Printf(\"%q\\n\", strings.Split(\"a,b,c\", \",\"))\n\tfmt.Printf(\"%q\\n\", strings.Split(\"a man a plan a canal panama\", \"a \"))\n\tfmt.Printf(\"%q\\n\", strings.Split(\" xyz \", \"\"))\n\tfmt.Printf(\"%q\\n\", strings.Split(\"\", \"Bernardo O'Higgins\"))\n}\n\n\/\/ [\"a\" \"b,c\"]\n\/\/ [] (nil = true)\nfunc ExampleSplitN() {\n\tfmt.Printf(\"%q\\n\", strings.SplitN(\"a,b,c\", \",\", 2))\n\tz := strings.SplitN(\"a,b,c\", \",\", 0)\n\tfmt.Printf(\"%q (nil = %v)\\n\", z, z == nil)\n}\n\n\/\/ [\"a,\" \"b,\" \"c\"]\nfunc ExampleSplitAfter() {\n\tfmt.Printf(\"%q\\n\", strings.SplitAfter(\"a,b,c\", \",\"))\n}\n\n\/\/ [\"a,\" \"b,c\"]\nfunc ExampleSplitAfterN() {\n\tfmt.Printf(\"%q\\n\", strings.SplitAfterN(\"a,b,c\", \",\", 2))\n}\n\n\/\/ Her Royal Highness\nfunc ExampleTitle() {\n\tfmt.Println(strings.Title(\"her royal highness\"))\n}\n\n\/\/ LOUD NOISES\n\/\/ ХЛЕБ\nfunc ExampleToTitle() {\n\tfmt.Println(strings.ToTitle(\"loud noises\"))\n\tfmt.Println(strings.ToTitle(\"хлеб\"))\n}\n\n\/\/ [Achtung]\nfunc ExampleTrim() {\n\tfmt.Printf(\"[%s]\", strings.Trim(\" !!! Achtung !!! \", \"! \"))\n}\n\n\/\/ 'Gjnf oevyyvt naq gur fyvgul tbcure...\nfunc ExampleMap() {\n\trot13 := func(r rune) rune {\n\t\tswitch {\n\t\tcase r >= 'A' && r <= 'Z':\n\t\t\treturn 'A' + (r-'A'+13)%26\n\t\tcase r >= 'a' && r <= 'z':\n\t\t\treturn 'a' + (r-'a'+13)%26\n\t\t}\n\t\treturn r\n\t}\n\tfmt.Println(strings.Map(rot13, \"'Twas brillig and the slithy gopher...\"))\n}\n\n\/\/ a lone gopher\nfunc ExampleTrimSpace() {\n\tfmt.Println(strings.TrimSpace(\" \\t\\n a lone gopher \\n\\t\\r\\n\"))\n}\n\n\/\/ This is <b>HTML<\/b>!\nfunc ExampleNewReplacer() {\n\tr := strings.NewReplacer(\"<\", \"<\", \">\", \">\")\n\tfmt.Println(r.Replace(\"This is <b>HTML<\/b>!\"))\n}\n\n\/\/ GOPHER\nfunc ExampleToUpper() {\n\tfmt.Println(strings.ToUpper(\"Gopher\"))\n}\n\n\/\/ gopher\nfunc ExampleToLower() {\n\tfmt.Println(strings.ToLower(\"Gopher\"))\n}\n<|endoftext|>"} {"text":"<commit_before>package cloudformation\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/client\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/sqs\"\n\t\"github.com\/remind101\/pkg\/reporter\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar (\n\tdefaultVisibilityHeartbeat = 1 * time.Minute\n\tdefaultNumWorkers = 10\n)\n\n\/\/ sqsClient duck types the sqs.SQS interface.\ntype sqsClient interface {\n\tReceiveMessage(*sqs.ReceiveMessageInput) (*sqs.ReceiveMessageOutput, error)\n\tDeleteMessage(*sqs.DeleteMessageInput) (*sqs.DeleteMessageOutput, error)\n\tChangeMessageVisibility(*sqs.ChangeMessageVisibilityInput) (*sqs.ChangeMessageVisibilityOutput, error)\n}\n\n\/\/ SQSDispatcher pulls messages from SQS, and dispatches them to a handler.\ntype SQSDispatcher struct {\n\t\/\/ Root context.Context to use. If a reporter.Reporter is embedded,\n\t\/\/ errors generated will be reporter there. If a logger.Logger is\n\t\/\/ embedded, logging will be logged there.\n\tContext context.Context\n\n\t\/\/ The SQS queue url to listen for CloudFormation Custom Resource\n\t\/\/ requests.\n\tQueueURL string\n\n\t\/\/ When a message is pulled off of sqs, the visibility timeout will be\n\t\/\/ extended by this much, and periodically extended while the handler\n\t\/\/ performs it's work. If this process crashes, then the sqs message\n\t\/\/ will be redelivered later.\n\tVisibilityHeartbeat time.Duration\n\n\t\/\/ Number of worker goroutines to start for receiving messages.\n\tNumWorkers int\n\n\tstopped chan struct{}\n\tafter func(time.Duration) <-chan time.Time\n\tsqs sqsClient\n}\n\nfunc newSQSDispatcher(config client.ConfigProvider) *SQSDispatcher {\n\treturn &SQSDispatcher{\n\t\tVisibilityHeartbeat: defaultVisibilityHeartbeat,\n\t\tNumWorkers: defaultNumWorkers,\n\t\tafter: time.After,\n\t\tsqs: sqs.New(config),\n\t}\n}\n\n\/\/ Start starts multiple goroutines pulling messages off of the queue.\nfunc (q *SQSDispatcher) Start(handle func(context.Context, *sqs.Message) error) {\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < q.NumWorkers; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tq.start(handle)\n\t\t}()\n\t}\n\n\twg.Wait()\n}\n\nfunc (q *SQSDispatcher) Stop() {\n\tclose(q.stopped)\n}\n\n\/\/ start starts a pulling messages off of the queue and passing them to the\n\/\/ handler.\nfunc (q *SQSDispatcher) start(handle func(context.Context, *sqs.Message) error) {\n\tvar wg sync.WaitGroup\n\tfor {\n\t\tselect {\n\t\tcase <-q.stopped:\n\t\t\twg.Wait()\n\t\t\treturn\n\t\tdefault:\n\t\t\tctx := q.Context\n\n\t\t\tresp, err := q.sqs.ReceiveMessage(&sqs.ReceiveMessageInput{\n\t\t\t\tQueueUrl: aws.String(q.QueueURL),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treporter.Report(ctx, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, m := range resp.Messages {\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func(m *sqs.Message) {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\tif err := q.handle(ctx, handle, m); err != nil {\n\t\t\t\t\t\treporter.Report(ctx, err)\n\t\t\t\t\t}\n\t\t\t\t}(m)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (q *SQSDispatcher) handle(ctx context.Context, handle func(context.Context, *sqs.Message) error, message *sqs.Message) (err error) {\n\tctx, cancel := context.WithCancel(ctx)\n\n\tdefer func() {\n\t\tif err == nil {\n\t\t\t_, err = q.sqs.DeleteMessage(&sqs.DeleteMessageInput{\n\t\t\t\tQueueUrl: aws.String(q.QueueURL),\n\t\t\t\tReceiptHandle: message.ReceiptHandle,\n\t\t\t})\n\t\t}\n\t}()\n\n\tvar t <-chan time.Time\n\tt, err = q.extendMessageVisibilityTimeout(message.ReceiptHandle)\n\n\terrCh := make(chan error)\n\tgo func() { errCh <- handle(ctx, message) }()\n\n\tfor {\n\t\tselect {\n\t\tcase err = <-errCh:\n\t\t\treturn\n\t\tcase <-q.stopped:\n\t\t\tcancel()\n\t\tcase <-t:\n\t\t\tt, err = q.extendMessageVisibilityTimeout(message.ReceiptHandle)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ extendMessageVisibilityTimeout extends the messages visibility timeout\n\/\/ extends the timeout by VisibilityHeartbeat, and returns a channel that will\n\/\/ receive after half of VisibilityTimeout has elapsed.\nfunc (q *SQSDispatcher) extendMessageVisibilityTimeout(receiptHandle *string) (<-chan time.Time, error) {\n\tvisibilityTimeout := int64(float64(q.VisibilityHeartbeat) \/ float64(time.Second))\n\n\t_, err := q.sqs.ChangeMessageVisibility(&sqs.ChangeMessageVisibilityInput{\n\t\tQueueUrl: aws.String(q.QueueURL),\n\t\tReceiptHandle: receiptHandle,\n\t\tVisibilityTimeout: aws.Int64(visibilityTimeout),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error extending message visibility timeout: %v\", err)\n\t}\n\n\treturn q.after(q.VisibilityHeartbeat \/ 2), nil\n}\n<commit_msg>Fix typo.<commit_after>package cloudformation\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/client\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/sqs\"\n\t\"github.com\/remind101\/pkg\/reporter\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar (\n\tdefaultVisibilityHeartbeat = 1 * time.Minute\n\tdefaultNumWorkers = 10\n)\n\n\/\/ sqsClient duck types the sqs.SQS interface.\ntype sqsClient interface {\n\tReceiveMessage(*sqs.ReceiveMessageInput) (*sqs.ReceiveMessageOutput, error)\n\tDeleteMessage(*sqs.DeleteMessageInput) (*sqs.DeleteMessageOutput, error)\n\tChangeMessageVisibility(*sqs.ChangeMessageVisibilityInput) (*sqs.ChangeMessageVisibilityOutput, error)\n}\n\n\/\/ SQSDispatcher pulls messages from SQS, and dispatches them to a handler.\ntype SQSDispatcher struct {\n\t\/\/ Root context.Context to use. If a reporter.Reporter is embedded,\n\t\/\/ errors generated will be reporter there. If a logger.Logger is\n\t\/\/ embedded, logging will be logged there.\n\tContext context.Context\n\n\t\/\/ The SQS queue url to listen for CloudFormation Custom Resource\n\t\/\/ requests.\n\tQueueURL string\n\n\t\/\/ When a message is pulled off of sqs, the visibility timeout will be\n\t\/\/ extended by this much, and periodically extended while the handler\n\t\/\/ performs it's work. If this process crashes, then the sqs message\n\t\/\/ will be redelivered later.\n\tVisibilityHeartbeat time.Duration\n\n\t\/\/ Number of worker goroutines to start for receiving messages.\n\tNumWorkers int\n\n\tstopped chan struct{}\n\tafter func(time.Duration) <-chan time.Time\n\tsqs sqsClient\n}\n\nfunc newSQSDispatcher(config client.ConfigProvider) *SQSDispatcher {\n\treturn &SQSDispatcher{\n\t\tVisibilityHeartbeat: defaultVisibilityHeartbeat,\n\t\tNumWorkers: defaultNumWorkers,\n\t\tafter: time.After,\n\t\tsqs: sqs.New(config),\n\t}\n}\n\n\/\/ Start starts multiple goroutines pulling messages off of the queue.\nfunc (q *SQSDispatcher) Start(handle func(context.Context, *sqs.Message) error) {\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < q.NumWorkers; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tq.start(handle)\n\t\t}()\n\t}\n\n\twg.Wait()\n}\n\nfunc (q *SQSDispatcher) Stop() {\n\tclose(q.stopped)\n}\n\n\/\/ start starts a pulling messages off of the queue and passing them to the\n\/\/ handler.\nfunc (q *SQSDispatcher) start(handle func(context.Context, *sqs.Message) error) {\n\tvar wg sync.WaitGroup\n\tfor {\n\t\tselect {\n\t\tcase <-q.stopped:\n\t\t\twg.Wait()\n\t\t\treturn\n\t\tdefault:\n\t\t\tctx := q.Context\n\n\t\t\tresp, err := q.sqs.ReceiveMessage(&sqs.ReceiveMessageInput{\n\t\t\t\tQueueUrl: aws.String(q.QueueURL),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treporter.Report(ctx, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, m := range resp.Messages {\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func(m *sqs.Message) {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\tif err := q.handle(ctx, handle, m); err != nil {\n\t\t\t\t\t\treporter.Report(ctx, err)\n\t\t\t\t\t}\n\t\t\t\t}(m)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (q *SQSDispatcher) handle(ctx context.Context, handle func(context.Context, *sqs.Message) error, message *sqs.Message) (err error) {\n\tctx, cancel := context.WithCancel(ctx)\n\n\tdefer func() {\n\t\tif err == nil {\n\t\t\t_, err = q.sqs.DeleteMessage(&sqs.DeleteMessageInput{\n\t\t\t\tQueueUrl: aws.String(q.QueueURL),\n\t\t\t\tReceiptHandle: message.ReceiptHandle,\n\t\t\t})\n\t\t}\n\t}()\n\n\tvar t <-chan time.Time\n\tt, err = q.extendMessageVisibilityTimeout(message.ReceiptHandle)\n\n\terrCh := make(chan error)\n\tgo func() { errCh <- handle(ctx, message) }()\n\n\tfor {\n\t\tselect {\n\t\tcase err = <-errCh:\n\t\t\treturn\n\t\tcase <-q.stopped:\n\t\t\tcancel()\n\t\tcase <-t:\n\t\t\tt, err = q.extendMessageVisibilityTimeout(message.ReceiptHandle)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ extendMessageVisibilityTimeout extends the messages visibility timeout by\n\/\/ VisibilityHeartbeat, and returns a channel that will receive after half of\n\/\/ VisibilityTimeout has elapsed.\nfunc (q *SQSDispatcher) extendMessageVisibilityTimeout(receiptHandle *string) (<-chan time.Time, error) {\n\tvisibilityTimeout := int64(float64(q.VisibilityHeartbeat) \/ float64(time.Second))\n\n\t_, err := q.sqs.ChangeMessageVisibility(&sqs.ChangeMessageVisibilityInput{\n\t\tQueueUrl: aws.String(q.QueueURL),\n\t\tReceiptHandle: receiptHandle,\n\t\tVisibilityTimeout: aws.Int64(visibilityTimeout),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error extending message visibility timeout: %v\", err)\n\t}\n\n\treturn q.after(q.VisibilityHeartbeat \/ 2), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package apiproxy\n\nimport (\n\t\"net\/http\"\n)\n\n\/\/ RevalidateFunc is the signature of functions used to determine whether the\n\/\/ HTTP resource indicated by req should be revalidated by contacting the\n\/\/ destination server.\ntype RevalidateFunc func(req *http.Request) bool\n\n\/\/ RevalidationTransport is an implementation of net\/http.RoundTripper that\n\/\/ permits custom behavior with respect to cache entry revalidation for\n\/\/ resources on the target server.\n\/\/\n\/\/ If the request contains cache validators (an If-None-Match or\n\/\/ If-Modified-Since header), then Revalidate is called to determine whether the\n\/\/ cache entry should be revalidated (by being passed to the underlying\n\/\/ transport). In this way, the Revalidate function can effectively extend or shorten cache\n\/\/ age limits.\n\/\/\n\/\/ If the request does not contain cache validators, then it is passed to the\n\/\/ underlying transport.\ntype RevalidationTransport struct {\n\t\/\/ Revalidate is called on each request in RoundTrip. If it returns true,\n\t\/\/ RoundTrip synthesizes and returns an HTTP 304 Not Modified response.\n\t\/\/ Otherwise, the request is passed through to the underlying transport.\n\tRevalidate RevalidateFunc\n\n\t\/\/ Transport is the underlying transport. If nil, net\/http.DefaultTransport is used.\n\tTransport http.RoundTripper\n}\n\n\/\/ RoundTrip takes a Request and returns a Response.\nfunc (t *RevalidationTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {\n\tif t.Revalidate != nil && hasCacheValidator(req.Header) && !t.Revalidate(req) {\n\t\tresp = &http.Response{\n\t\t\tRequest: req,\n\t\t\tTransferEncoding: req.TransferEncoding,\n\t\t\tStatusCode: http.StatusNotModified,\n\t\t}\n\t\treturn\n\t}\n\n\ttransport := t.Transport\n\tif transport == nil {\n\t\ttransport = http.DefaultTransport\n\t}\n\n\treturn transport.RoundTrip(req)\n}\n\n\/\/ hasCacheValidator returns true if the headers contain cache validators. See\n\/\/ http:\/\/www.w3.org\/Protocols\/rfc2616\/rfc2616-sec13.html#sec13.3 for more\n\/\/ information.\nfunc hasCacheValidator(headers http.Header) bool {\n\treturn headers.Get(\"if-none-match\") != \"\" || headers.Get(\"if-modified-since\") != \"\"\n}\n<commit_msg>Set empty body on synthetized HTTP response to prevent a panic in net\/http<commit_after>package apiproxy\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n)\n\n\/\/ RevalidateFunc is the signature of functions used to determine whether the\n\/\/ HTTP resource indicated by req should be revalidated by contacting the\n\/\/ destination server.\ntype RevalidateFunc func(req *http.Request) bool\n\n\/\/ RevalidationTransport is an implementation of net\/http.RoundTripper that\n\/\/ permits custom behavior with respect to cache entry revalidation for\n\/\/ resources on the target server.\n\/\/\n\/\/ If the request contains cache validators (an If-None-Match or\n\/\/ If-Modified-Since header), then Revalidate is called to determine whether the\n\/\/ cache entry should be revalidated (by being passed to the underlying\n\/\/ transport). In this way, the Revalidate function can effectively extend or shorten cache\n\/\/ age limits.\n\/\/\n\/\/ If the request does not contain cache validators, then it is passed to the\n\/\/ underlying transport.\ntype RevalidationTransport struct {\n\t\/\/ Revalidate is called on each request in RoundTrip. If it returns true,\n\t\/\/ RoundTrip synthesizes and returns an HTTP 304 Not Modified response.\n\t\/\/ Otherwise, the request is passed through to the underlying transport.\n\tRevalidate RevalidateFunc\n\n\t\/\/ Transport is the underlying transport. If nil, net\/http.DefaultTransport is used.\n\tTransport http.RoundTripper\n}\n\n\/\/ RoundTrip takes a Request and returns a Response.\nfunc (t *RevalidationTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {\n\tif t.Revalidate != nil && hasCacheValidator(req.Header) && !t.Revalidate(req) {\n\t\tresp = &http.Response{\n\t\t\tRequest: req,\n\t\t\tTransferEncoding: req.TransferEncoding,\n\t\t\tStatusCode: http.StatusNotModified,\n\t\t\tBody: ioutil.NopCloser(bytes.NewReader([]byte(\"\"))),\n\t\t}\n\t\treturn\n\t}\n\n\ttransport := t.Transport\n\tif transport == nil {\n\t\ttransport = http.DefaultTransport\n\t}\n\n\treturn transport.RoundTrip(req)\n}\n\n\/\/ hasCacheValidator returns true if the headers contain cache validators. See\n\/\/ http:\/\/www.w3.org\/Protocols\/rfc2616\/rfc2616-sec13.html#sec13.3 for more\n\/\/ information.\nfunc hasCacheValidator(headers http.Header) bool {\n\treturn headers.Get(\"if-none-match\") != \"\" || headers.Get(\"if-modified-since\") != \"\"\n}\n<|endoftext|>"} {"text":"<commit_before>package irma\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/go-errors\/errors\"\n\t\"github.com\/hashicorp\/go-retryablehttp\"\n\t\"github.com\/privacybydesign\/gabi\"\n\t\"github.com\/privacybydesign\/gabi\/revocation\"\n\tsseclient \"github.com\/sietseringers\/go-sse\"\n\t\"github.com\/sirupsen\/logrus\"\n\tprefixed \"github.com\/x-cray\/logrus-prefixed-formatter\"\n\n\t\"github.com\/privacybydesign\/irmago\/internal\/common\"\n\t\"github.com\/privacybydesign\/irmago\/internal\/disable_sigpipe\"\n)\n\n\/\/ HTTPTransport sends and receives JSON messages to a HTTP server.\ntype HTTPTransport struct {\n\tServer string\n\tBinary bool\n\tForceHTTPS bool\n\tclient *retryablehttp.Client\n\theaders http.Header\n}\n\nvar HTTPHeaders = map[string]http.Header{}\n\n\/\/ Logger is used for logging. If not set, init() will initialize it to logrus.StandardLogger().\nvar Logger *logrus.Logger\n\nvar transportlogger *log.Logger\n\nfunc init() {\n\tlogger := logrus.New()\n\tlogger.SetFormatter(&prefixed.TextFormatter{\n\t\tDisableColors: true,\n\t\tFullTimestamp: true,\n\t\tTimestampFormat: \"15:04:05.000000\",\n\t})\n\tSetLogger(logger)\n}\n\nfunc SetLogger(logger *logrus.Logger) {\n\tLogger = logger\n\tgabi.Logger = Logger\n\tcommon.Logger = Logger\n\trevocation.Logger = Logger\n\tsseclient.Logger = log.New(Logger.WithField(\"type\", \"sseclient\").WriterLevel(logrus.TraceLevel), \"\", 0)\n}\n\n\/\/ NewHTTPTransport returns a new HTTPTransport.\nfunc NewHTTPTransport(serverURL string, forceHTTPS bool) *HTTPTransport {\n\tif Logger.IsLevelEnabled(logrus.TraceLevel) {\n\t\ttransportlogger = log.New(Logger.WriterLevel(logrus.TraceLevel), \"transport: \", 0)\n\t} else {\n\t\ttransportlogger = log.New(ioutil.Discard, \"\", 0)\n\t}\n\n\tif serverURL != \"\" && !strings.HasSuffix(serverURL, \"\/\") {\n\t\tserverURL += \"\/\"\n\t}\n\n\t\/\/ Create a transport that dials with a SIGPIPE handler (which is only active on iOS)\n\tvar innerTransport http.Transport\n\n\tinnerTransport.Dial = func(network, addr string) (c net.Conn, err error) {\n\t\tc, err = net.Dial(network, addr)\n\t\tif err != nil {\n\t\t\treturn c, err\n\t\t}\n\t\tif err = disable_sigpipe.DisableSigPipe(c); err != nil {\n\t\t\treturn c, err\n\t\t}\n\t\treturn c, nil\n\t}\n\n\tclient := &retryablehttp.Client{\n\t\tLogger: transportlogger,\n\t\tRetryWaitMin: 100 * time.Millisecond,\n\t\tRetryWaitMax: 200 * time.Millisecond,\n\t\tRetryMax: 2,\n\t\tBackoff: retryablehttp.DefaultBackoff,\n\t\tCheckRetry: func(ctx context.Context, resp *http.Response, err error) (bool, error) {\n\t\t\t\/\/ Don't retry on 5xx (which retryablehttp does by default)\n\t\t\treturn err != nil || resp.StatusCode == 0, err\n\t\t},\n\t\tHTTPClient: &http.Client{\n\t\t\tTimeout: time.Second * 3,\n\t\t\tTransport: &innerTransport,\n\t\t},\n\t}\n\n\tvar host string\n\tu, err := url.Parse(serverURL)\n\tif err != nil {\n\t\tLogger.Warnf(\"failed to parse URL %s: %s\", serverURL, err.Error())\n\t} else {\n\t\thost = u.Host\n\t}\n\theaders := HTTPHeaders[host].Clone()\n\tif headers == nil {\n\t\theaders = http.Header{}\n\t}\n\treturn &HTTPTransport{\n\t\tServer: serverURL,\n\t\tForceHTTPS: forceHTTPS,\n\t\theaders: headers,\n\t\tclient: client,\n\t}\n}\n\nfunc (transport *HTTPTransport) marshal(o interface{}) ([]byte, error) {\n\tif transport.Binary {\n\t\treturn MarshalBinary(o)\n\t}\n\treturn json.Marshal(o)\n}\n\nfunc (transport *HTTPTransport) unmarshal(data []byte, dst interface{}) error {\n\tif transport.Binary {\n\t\treturn UnmarshalBinary(data, dst)\n\t}\n\treturn json.Unmarshal(data, dst)\n}\n\nfunc (transport *HTTPTransport) unmarshalValidate(data []byte, dst interface{}) error {\n\tif transport.Binary {\n\t\treturn UnmarshalValidateBinary(data, dst)\n\t}\n\treturn UnmarshalValidate(data, dst)\n}\n\nfunc (transport *HTTPTransport) log(prefix string, message interface{}, binary bool) {\n\tif !Logger.IsLevelEnabled(logrus.TraceLevel) {\n\t\treturn \/\/ do nothing if nothing would be printed anyway\n\t}\n\tvar str string\n\tswitch s := message.(type) {\n\tcase []byte:\n\t\tstr = string(s)\n\tcase string:\n\t\tstr = s\n\tdefault:\n\t\ttmp, _ := json.Marshal(message)\n\t\tstr = string(tmp)\n\t\tbinary = false\n\t}\n\tif !binary {\n\t\tLogger.Tracef(\"transport: %s: %s\", prefix, str)\n\t} else {\n\t\tLogger.Tracef(\"transport: %s (hex): %s\", prefix, hex.EncodeToString([]byte(str)))\n\t}\n}\n\n\/\/ SetHeader sets a header to be sent in requests.\nfunc (transport *HTTPTransport) SetHeader(name, val string) {\n\ttransport.headers.Set(name, val)\n}\n\nfunc (transport *HTTPTransport) request(\n\turl string, method string, reader io.Reader, contenttype string,\n) (response *http.Response, err error) {\n\tvar req retryablehttp.Request\n\tu := transport.Server + url\n\tif common.ForceHTTPS && transport.ForceHTTPS && !strings.HasPrefix(u, \"https\") {\n\t\treturn nil, &SessionError{ErrorType: ErrorHTTPS, Err: errors.New(\"remote server does not use https\")}\n\t}\n\treq.Request, err = http.NewRequest(method, u, reader)\n\tif err != nil {\n\t\treturn nil, &SessionError{ErrorType: ErrorTransport, Err: err}\n\t}\n\treq.Header = transport.headers.Clone()\n\tif req.Header.Get(\"User-agent\") == \"\" {\n\t\treq.Header.Set(\"User-Agent\", \"irmago\")\n\t}\n\tif reader != nil && contenttype != \"\" {\n\t\treq.Header.Set(\"Content-Type\", contenttype)\n\t}\n\tres, err := transport.client.Do(&req)\n\tif err != nil {\n\t\treturn nil, &SessionError{ErrorType: ErrorTransport, Err: err}\n\t}\n\treturn res, nil\n}\n\nfunc (transport *HTTPTransport) jsonRequest(url string, method string, result interface{}, object interface{}) error {\n\tif method != http.MethodPost && method != http.MethodGet && method != http.MethodDelete {\n\t\tpanic(\"Unsupported HTTP method \" + method)\n\t}\n\tif method == http.MethodGet && object != nil {\n\t\tpanic(\"Cannot GET and also post an object\")\n\t}\n\n\tvar reader io.Reader\n\tvar contenttype string\n\tif object != nil {\n\t\tswitch o := object.(type) {\n\t\tcase []byte:\n\t\t\ttransport.log(\"body\", o, true)\n\t\t\tcontenttype = \"application\/octet-stream\"\n\t\t\treader = bytes.NewBuffer(o)\n\t\tcase string:\n\t\t\ttransport.log(\"body\", o, false)\n\t\t\tcontenttype = \"text\/plain; charset=UTF-8\"\n\t\t\treader = bytes.NewBuffer([]byte(o))\n\t\tdefault:\n\t\t\tmarshaled, err := transport.marshal(object)\n\t\t\tif err != nil {\n\t\t\t\treturn &SessionError{ErrorType: ErrorSerialization, Err: err}\n\t\t\t}\n\t\t\ttransport.log(\"body\", string(marshaled), transport.Binary)\n\t\t\tif transport.Binary {\n\t\t\t\tcontenttype = \"application\/octet-stream\"\n\t\t\t} else {\n\t\t\t\tcontenttype = \"application\/json; charset=UTF-8\"\n\t\t\t}\n\t\t\treader = bytes.NewBuffer(marshaled)\n\t\t}\n\t}\n\n\tres, err := transport.request(url, method, reader, contenttype)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif method == http.MethodDelete {\n\t\treturn nil\n\t}\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn &SessionError{ErrorType: ErrorServerResponse, Err: err, RemoteStatus: res.StatusCode}\n\t}\n\tif res.StatusCode == http.StatusNoContent {\n\t\tif result != nil {\n\t\t\treturn &SessionError{\n\t\t\t\tErrorType: ErrorServerResponse,\n\t\t\t\tErr: errors.New(\"'204 No Content' received, but result was expected\"),\n\t\t\t\tRemoteStatus: res.StatusCode,\n\t\t\t}\n\t\t}\n\t} else if res.StatusCode != http.StatusOK {\n\t\tapierr := &RemoteError{}\n\t\terr = transport.unmarshal(body, apierr)\n\t\tif err != nil || apierr.ErrorName == \"\" { \/\/ Not an ApiErrorMessage\n\t\t\treturn &SessionError{ErrorType: ErrorServerResponse, Err: err, RemoteStatus: res.StatusCode}\n\t\t}\n\t\ttransport.log(\"error\", apierr, false)\n\t\treturn &SessionError{ErrorType: ErrorApi, RemoteStatus: res.StatusCode, RemoteError: apierr}\n\t}\n\n\ttransport.log(\"response\", body, transport.Binary)\n\tif result == nil { \/\/ caller doesn't care about server response\n\t\treturn nil\n\t}\n\tif _, resultstr := result.(*string); resultstr {\n\t\t*result.(*string) = string(body)\n\t} else {\n\t\terr = transport.unmarshalValidate(body, result)\n\t\tif err != nil {\n\t\t\treturn &SessionError{ErrorType: ErrorServerResponse, Err: err, RemoteStatus: res.StatusCode}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (transport *HTTPTransport) GetBytes(url string) ([]byte, error) {\n\tres, err := transport.request(url, http.MethodGet, nil, \"\")\n\tif err != nil {\n\t\treturn nil, &SessionError{ErrorType: ErrorTransport, Err: err}\n\t}\n\n\tif res.StatusCode != 200 {\n\t\treturn nil, &SessionError{ErrorType: ErrorServerResponse, RemoteStatus: res.StatusCode}\n\t}\n\tb, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, &SessionError{ErrorType: ErrorServerResponse, Err: err, RemoteStatus: res.StatusCode}\n\t}\n\treturn b, nil\n}\n\n\/\/ Post sends the object to the server and parses its response into result.\nfunc (transport *HTTPTransport) Post(url string, result interface{}, object interface{}) error {\n\treturn transport.jsonRequest(url, http.MethodPost, result, object)\n}\n\n\/\/ Get performs a GET request and parses the server's response into result.\nfunc (transport *HTTPTransport) Get(url string, result interface{}) error {\n\treturn transport.jsonRequest(url, http.MethodGet, result, nil)\n}\n\n\/\/ Delete performs a DELETE.\nfunc (transport *HTTPTransport) Delete() {\n\t_ = transport.jsonRequest(\"\", http.MethodDelete, nil, nil)\n}\n<commit_msg>Feat: added option to specify which TLS configuration to use for outbound connections<commit_after>package irma\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/go-errors\/errors\"\n\t\"github.com\/hashicorp\/go-retryablehttp\"\n\t\"github.com\/privacybydesign\/gabi\"\n\t\"github.com\/privacybydesign\/gabi\/revocation\"\n\tsseclient \"github.com\/sietseringers\/go-sse\"\n\t\"github.com\/sirupsen\/logrus\"\n\tprefixed \"github.com\/x-cray\/logrus-prefixed-formatter\"\n\n\t\"github.com\/privacybydesign\/irmago\/internal\/common\"\n\t\"github.com\/privacybydesign\/irmago\/internal\/disable_sigpipe\"\n)\n\n\/\/ HTTPTransport sends and receives JSON messages to a HTTP server.\ntype HTTPTransport struct {\n\tServer string\n\tBinary bool\n\tForceHTTPS bool\n\tclient *retryablehttp.Client\n\theaders http.Header\n}\n\nvar HTTPHeaders = map[string]http.Header{}\n\n\/\/ Logger is used for logging. If not set, init() will initialize it to logrus.StandardLogger().\nvar Logger *logrus.Logger\n\nvar transportlogger *log.Logger\n\nvar tlsClientConfig *tls.Config\n\nfunc init() {\n\tlogger := logrus.New()\n\tlogger.SetFormatter(&prefixed.TextFormatter{\n\t\tDisableColors: true,\n\t\tFullTimestamp: true,\n\t\tTimestampFormat: \"15:04:05.000000\",\n\t})\n\tSetLogger(logger)\n}\n\nfunc SetLogger(logger *logrus.Logger) {\n\tLogger = logger\n\tgabi.Logger = Logger\n\tcommon.Logger = Logger\n\trevocation.Logger = Logger\n\tsseclient.Logger = log.New(Logger.WithField(\"type\", \"sseclient\").WriterLevel(logrus.TraceLevel), \"\", 0)\n}\n\n\/\/ SetTLSClientConfig sets the TLS configuration being used for future outbound connections.\n\/\/ A TLS configuration instance should not be modified after being set.\nfunc SetTLSClientConfig(config *tls.Config) {\n\ttlsClientConfig = config\n}\n\n\/\/ NewHTTPTransport returns a new HTTPTransport.\nfunc NewHTTPTransport(serverURL string, forceHTTPS bool) *HTTPTransport {\n\tif Logger.IsLevelEnabled(logrus.TraceLevel) {\n\t\ttransportlogger = log.New(Logger.WriterLevel(logrus.TraceLevel), \"transport: \", 0)\n\t} else {\n\t\ttransportlogger = log.New(ioutil.Discard, \"\", 0)\n\t}\n\n\tif serverURL != \"\" && !strings.HasSuffix(serverURL, \"\/\") {\n\t\tserverURL += \"\/\"\n\t}\n\n\t\/\/ Create a transport that dials with a SIGPIPE handler (which is only active on iOS)\n\tinnerTransport := &http.Transport{\n\t\tTLSClientConfig: tlsClientConfig,\n\t\tDial: func(network, addr string) (c net.Conn, err error) {\n\t\t\tc, err = net.Dial(network, addr)\n\t\t\tif err != nil {\n\t\t\t\treturn c, err\n\t\t\t}\n\t\t\tif err = disable_sigpipe.DisableSigPipe(c); err != nil {\n\t\t\t\treturn c, err\n\t\t\t}\n\t\t\treturn c, nil\n\t\t},\n\t}\n\n\tclient := &retryablehttp.Client{\n\t\tLogger: transportlogger,\n\t\tRetryWaitMin: 100 * time.Millisecond,\n\t\tRetryWaitMax: 200 * time.Millisecond,\n\t\tRetryMax: 2,\n\t\tBackoff: retryablehttp.DefaultBackoff,\n\t\tCheckRetry: func(ctx context.Context, resp *http.Response, err error) (bool, error) {\n\t\t\t\/\/ Don't retry on 5xx (which retryablehttp does by default)\n\t\t\treturn err != nil || resp.StatusCode == 0, err\n\t\t},\n\t\tHTTPClient: &http.Client{\n\t\t\tTimeout: time.Second * 3,\n\t\t\tTransport: innerTransport,\n\t\t},\n\t}\n\n\tvar host string\n\tu, err := url.Parse(serverURL)\n\tif err != nil {\n\t\tLogger.Warnf(\"failed to parse URL %s: %s\", serverURL, err.Error())\n\t} else {\n\t\thost = u.Host\n\t}\n\theaders := HTTPHeaders[host].Clone()\n\tif headers == nil {\n\t\theaders = http.Header{}\n\t}\n\treturn &HTTPTransport{\n\t\tServer: serverURL,\n\t\tForceHTTPS: forceHTTPS,\n\t\theaders: headers,\n\t\tclient: client,\n\t}\n}\n\nfunc (transport *HTTPTransport) marshal(o interface{}) ([]byte, error) {\n\tif transport.Binary {\n\t\treturn MarshalBinary(o)\n\t}\n\treturn json.Marshal(o)\n}\n\nfunc (transport *HTTPTransport) unmarshal(data []byte, dst interface{}) error {\n\tif transport.Binary {\n\t\treturn UnmarshalBinary(data, dst)\n\t}\n\treturn json.Unmarshal(data, dst)\n}\n\nfunc (transport *HTTPTransport) unmarshalValidate(data []byte, dst interface{}) error {\n\tif transport.Binary {\n\t\treturn UnmarshalValidateBinary(data, dst)\n\t}\n\treturn UnmarshalValidate(data, dst)\n}\n\nfunc (transport *HTTPTransport) log(prefix string, message interface{}, binary bool) {\n\tif !Logger.IsLevelEnabled(logrus.TraceLevel) {\n\t\treturn \/\/ do nothing if nothing would be printed anyway\n\t}\n\tvar str string\n\tswitch s := message.(type) {\n\tcase []byte:\n\t\tstr = string(s)\n\tcase string:\n\t\tstr = s\n\tdefault:\n\t\ttmp, _ := json.Marshal(message)\n\t\tstr = string(tmp)\n\t\tbinary = false\n\t}\n\tif !binary {\n\t\tLogger.Tracef(\"transport: %s: %s\", prefix, str)\n\t} else {\n\t\tLogger.Tracef(\"transport: %s (hex): %s\", prefix, hex.EncodeToString([]byte(str)))\n\t}\n}\n\n\/\/ SetHeader sets a header to be sent in requests.\nfunc (transport *HTTPTransport) SetHeader(name, val string) {\n\ttransport.headers.Set(name, val)\n}\n\nfunc (transport *HTTPTransport) request(\n\turl string, method string, reader io.Reader, contenttype string,\n) (response *http.Response, err error) {\n\tvar req retryablehttp.Request\n\tu := transport.Server + url\n\tif common.ForceHTTPS && transport.ForceHTTPS && !strings.HasPrefix(u, \"https\") {\n\t\treturn nil, &SessionError{ErrorType: ErrorHTTPS, Err: errors.New(\"remote server does not use https\")}\n\t}\n\treq.Request, err = http.NewRequest(method, u, reader)\n\tif err != nil {\n\t\treturn nil, &SessionError{ErrorType: ErrorTransport, Err: err}\n\t}\n\treq.Header = transport.headers.Clone()\n\tif req.Header.Get(\"User-agent\") == \"\" {\n\t\treq.Header.Set(\"User-Agent\", \"irmago\")\n\t}\n\tif reader != nil && contenttype != \"\" {\n\t\treq.Header.Set(\"Content-Type\", contenttype)\n\t}\n\tres, err := transport.client.Do(&req)\n\tif err != nil {\n\t\treturn nil, &SessionError{ErrorType: ErrorTransport, Err: err}\n\t}\n\treturn res, nil\n}\n\nfunc (transport *HTTPTransport) jsonRequest(url string, method string, result interface{}, object interface{}) error {\n\tif method != http.MethodPost && method != http.MethodGet && method != http.MethodDelete {\n\t\tpanic(\"Unsupported HTTP method \" + method)\n\t}\n\tif method == http.MethodGet && object != nil {\n\t\tpanic(\"Cannot GET and also post an object\")\n\t}\n\n\tvar reader io.Reader\n\tvar contenttype string\n\tif object != nil {\n\t\tswitch o := object.(type) {\n\t\tcase []byte:\n\t\t\ttransport.log(\"body\", o, true)\n\t\t\tcontenttype = \"application\/octet-stream\"\n\t\t\treader = bytes.NewBuffer(o)\n\t\tcase string:\n\t\t\ttransport.log(\"body\", o, false)\n\t\t\tcontenttype = \"text\/plain; charset=UTF-8\"\n\t\t\treader = bytes.NewBuffer([]byte(o))\n\t\tdefault:\n\t\t\tmarshaled, err := transport.marshal(object)\n\t\t\tif err != nil {\n\t\t\t\treturn &SessionError{ErrorType: ErrorSerialization, Err: err}\n\t\t\t}\n\t\t\ttransport.log(\"body\", string(marshaled), transport.Binary)\n\t\t\tif transport.Binary {\n\t\t\t\tcontenttype = \"application\/octet-stream\"\n\t\t\t} else {\n\t\t\t\tcontenttype = \"application\/json; charset=UTF-8\"\n\t\t\t}\n\t\t\treader = bytes.NewBuffer(marshaled)\n\t\t}\n\t}\n\n\tres, err := transport.request(url, method, reader, contenttype)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif method == http.MethodDelete {\n\t\treturn nil\n\t}\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn &SessionError{ErrorType: ErrorServerResponse, Err: err, RemoteStatus: res.StatusCode}\n\t}\n\tif res.StatusCode == http.StatusNoContent {\n\t\tif result != nil {\n\t\t\treturn &SessionError{\n\t\t\t\tErrorType: ErrorServerResponse,\n\t\t\t\tErr: errors.New(\"'204 No Content' received, but result was expected\"),\n\t\t\t\tRemoteStatus: res.StatusCode,\n\t\t\t}\n\t\t}\n\t} else if res.StatusCode != http.StatusOK {\n\t\tapierr := &RemoteError{}\n\t\terr = transport.unmarshal(body, apierr)\n\t\tif err != nil || apierr.ErrorName == \"\" { \/\/ Not an ApiErrorMessage\n\t\t\treturn &SessionError{ErrorType: ErrorServerResponse, Err: err, RemoteStatus: res.StatusCode}\n\t\t}\n\t\ttransport.log(\"error\", apierr, false)\n\t\treturn &SessionError{ErrorType: ErrorApi, RemoteStatus: res.StatusCode, RemoteError: apierr}\n\t}\n\n\ttransport.log(\"response\", body, transport.Binary)\n\tif result == nil { \/\/ caller doesn't care about server response\n\t\treturn nil\n\t}\n\tif _, resultstr := result.(*string); resultstr {\n\t\t*result.(*string) = string(body)\n\t} else {\n\t\terr = transport.unmarshalValidate(body, result)\n\t\tif err != nil {\n\t\t\treturn &SessionError{ErrorType: ErrorServerResponse, Err: err, RemoteStatus: res.StatusCode}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (transport *HTTPTransport) GetBytes(url string) ([]byte, error) {\n\tres, err := transport.request(url, http.MethodGet, nil, \"\")\n\tif err != nil {\n\t\treturn nil, &SessionError{ErrorType: ErrorTransport, Err: err}\n\t}\n\n\tif res.StatusCode != 200 {\n\t\treturn nil, &SessionError{ErrorType: ErrorServerResponse, RemoteStatus: res.StatusCode}\n\t}\n\tb, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, &SessionError{ErrorType: ErrorServerResponse, Err: err, RemoteStatus: res.StatusCode}\n\t}\n\treturn b, nil\n}\n\n\/\/ Post sends the object to the server and parses its response into result.\nfunc (transport *HTTPTransport) Post(url string, result interface{}, object interface{}) error {\n\treturn transport.jsonRequest(url, http.MethodPost, result, object)\n}\n\n\/\/ Get performs a GET request and parses the server's response into result.\nfunc (transport *HTTPTransport) Get(url string, result interface{}) error {\n\treturn transport.jsonRequest(url, http.MethodGet, result, nil)\n}\n\n\/\/ Delete performs a DELETE.\nfunc (transport *HTTPTransport) Delete() {\n\t_ = transport.jsonRequest(\"\", http.MethodDelete, nil, nil)\n}\n<|endoftext|>"} {"text":"<commit_before>package momos\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/nats-io\/nuid\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\tclientCache \"github.com\/gregjones\/httpcache\"\n)\n\nvar (\n\tErrRequest = errors.New(\"Request error\")\n\tErrTimeout = errors.New(\"Timeout error\")\n\tErrInvalidStatusCode = errors.New(\"Invalid status code\")\n\tErrInvalidContentType = errors.New(\"Invalid content type\")\n)\n\nvar cache = clientCache.NewMemoryCacheTransport()\n\ntype ssiResult struct {\n\tname string\n\tpayload []byte\n\terror error\n}\n\ntype proxyTransport struct {\n\thttp.RoundTripper\n}\n\nfunc (t *proxyTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {\n\n\tLog.Debugf(\"start processing request %q\", req.URL)\n\ttimeStart := time.Now()\n\n\tresp, err = t.RoundTripper.RoundTrip(req)\n\tif err != nil {\n\t\tLog.Errorf(\"could not create RoundTripper from %q\", req.URL)\n\t\treturn nil, err\n\t}\n\n\t\/\/ Only html files are scanned\n\tcontentType := resp.Header.Get(\"Content-Type\")\n\tif !strings.HasPrefix(contentType, \"text\/html\") {\n\t\treturn resp, nil\n\t}\n\n\tdoc, err := goquery.NewDocumentFromReader(resp.Body)\n\n\tif err != nil {\n\t\tLog.Errorf(\"illegal response body from %q\", req.URL)\n\t\treturn nil, err\n\t}\n\n\tssiCount := 0\n\tssiElements := map[string]SSIElement{}\n\n\tdoc.Find(\"ssi\").Each(func(i int, element *goquery.Selection) {\n\t\tse := SSIElement{Element: element}\n\n\t\tse.SetTimeout(element.AttrOr(\"timeout\", \"2000\"))\n\t\tse.SetSrc(element.AttrOr(\"src\", \"\"))\n\t\tse.SetName(element.AttrOr(\"name\", nuid.Next()))\n\t\tse.SetTemplate(element.AttrOr(\"template\", \"false\"))\n\t\tse.SetFilterIncludes(element.AttrOr(\"no-scripts\", \"true\"))\n\n\t\tse.GetErrorTag()\n\t\tse.GetTimeoutTag()\n\n\t\tse.templateContext = TemplateContext{\n\t\t\tDateLocal: time.Now().Local().Format(\"2006-01-02\"),\n\t\t\tDate: time.Now().Format(time.RFC3339),\n\t\t\tRequestId: req.Header.Get(\"X-Request-Id\"),\n\t\t\tName: se.name,\n\t\t}\n\n\t\tssiElements[se.name] = se\n\t\tssiCount++\n\t})\n\n\tch := make(chan ssiResult)\n\n\ttimeStartRequest := time.Now()\n\n\tfor _, el := range ssiElements {\n\t\tgo makeRequest(el.name, el.src, ch, el.timeout)\n\t}\n\n\tfor i := 0; i < ssiCount; i++ {\n\t\tres := <-ch\n\t\tel := ssiElements[res.name]\n\t\tif res.error == nil {\n\t\t\tLog.Debugf(\"fragment (%v) - Request to %v took %v\", el.name, el.src, time.Since(timeStartRequest))\n\t\t\tel.SetupSuccess(res.payload)\n\t\t} else {\n\t\t\tel.SetupFallback(res.error)\n\t\t\tLog.Debugf(\"Fragment (%v) - Request to %v error: %q\", el.name, el.src, res.error)\n\t\t}\n\t}\n\n\tclose(ch)\n\n\thtmlDoc, err := doc.Html()\n\n\tif err != nil {\n\t\tLog.Errorf(\"Could not get html from document %q\", req.URL)\n\t\treturn nil, err\n\t}\n\n\t\/\/ assign new reader with content\n\tcontent := []byte(htmlDoc)\n\tbody := ioutil.NopCloser(bytes.NewReader(content))\n\tresp.Body = body\n\tresp.ContentLength = int64(len(content)) \/\/ update content length\n\tresp.Header.Set(\"Content-Length\", strconv.Itoa(len(content)))\n\n\tLog.Debugf(\"Processing complete %q took %q\", req.URL, time.Since(timeStart))\n\n\treturn resp, nil\n}\n\nfunc makeRequest(name string, url string, ch chan<- ssiResult, timeout time.Duration) {\n\t\/\/ @TODO don't create a new client per request\n\tvar Client = &http.Client{\n\t\tTransport: cache,\n\t\tTimeout: timeout,\n\t}\n\n\tresp, err := Client.Get(url)\n\n\tif err != nil {\n\t\tch <- ssiResult{name: name, error: err}\n\t} else {\n\t\tcontentType := resp.Header.Get(\"Content-Type\")\n\t\tif !strings.HasPrefix(contentType, \"text\/html\") {\n\t\t\tch <- ssiResult{name: name, error: ErrInvalidContentType}\n\t\t} else if resp.StatusCode > 199 && resp.StatusCode < 300 {\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\tdefer resp.Body.Close()\n\n\t\t\t\/\/ https:\/\/github.com\/gregjones\/httpcache\n\t\t\tif resp.Header.Get(\"X-From-Cache\") == \"1\" {\n\t\t\t\tLog.Debugf(\"Fragment (%v) - Response was cached\", name)\n\t\t\t} else {\n\t\t\t\tLog.Debugf(\"Fragment (%v) - Response was refreshed\", name)\n\t\t\t}\n\n\t\t\tch <- ssiResult{name: name, payload: body}\n\t\t} else {\n\t\t\tch <- ssiResult{name: name, error: ErrInvalidStatusCode}\n\t\t}\n\t}\n}\n<commit_msg>log as error<commit_after>package momos\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/nats-io\/nuid\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\tclientCache \"github.com\/gregjones\/httpcache\"\n)\n\nvar (\n\tErrRequest = errors.New(\"Request error\")\n\tErrTimeout = errors.New(\"Timeout error\")\n\tErrInvalidStatusCode = errors.New(\"Invalid status code\")\n\tErrInvalidContentType = errors.New(\"Invalid content type\")\n)\n\nvar cache = clientCache.NewMemoryCacheTransport()\n\ntype ssiResult struct {\n\tname string\n\tpayload []byte\n\terror error\n}\n\ntype proxyTransport struct {\n\thttp.RoundTripper\n}\n\nfunc (t *proxyTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {\n\n\tLog.Debugf(\"start processing request %q\", req.URL)\n\ttimeStart := time.Now()\n\n\tresp, err = t.RoundTripper.RoundTrip(req)\n\tif err != nil {\n\t\tLog.Errorf(\"could not create RoundTripper from %q\", req.URL)\n\t\treturn nil, err\n\t}\n\n\t\/\/ Only html files are scanned\n\tcontentType := resp.Header.Get(\"Content-Type\")\n\tif !strings.HasPrefix(contentType, \"text\/html\") {\n\t\treturn resp, nil\n\t}\n\n\tdoc, err := goquery.NewDocumentFromReader(resp.Body)\n\n\tif err != nil {\n\t\tLog.Errorf(\"illegal response body from %q\", req.URL)\n\t\treturn nil, err\n\t}\n\n\tssiCount := 0\n\tssiElements := map[string]SSIElement{}\n\n\tdoc.Find(\"ssi\").Each(func(i int, element *goquery.Selection) {\n\t\tse := SSIElement{Element: element}\n\n\t\tse.SetTimeout(element.AttrOr(\"timeout\", \"2000\"))\n\t\tse.SetSrc(element.AttrOr(\"src\", \"\"))\n\t\tse.SetName(element.AttrOr(\"name\", nuid.Next()))\n\t\tse.SetTemplate(element.AttrOr(\"template\", \"false\"))\n\t\tse.SetFilterIncludes(element.AttrOr(\"no-scripts\", \"true\"))\n\n\t\tse.GetErrorTag()\n\t\tse.GetTimeoutTag()\n\n\t\tse.templateContext = TemplateContext{\n\t\t\tDateLocal: time.Now().Local().Format(\"2006-01-02\"),\n\t\t\tDate: time.Now().Format(time.RFC3339),\n\t\t\tRequestId: req.Header.Get(\"X-Request-Id\"),\n\t\t\tName: se.name,\n\t\t}\n\n\t\tssiElements[se.name] = se\n\t\tssiCount++\n\t})\n\n\tch := make(chan ssiResult)\n\n\ttimeStartRequest := time.Now()\n\n\tfor _, el := range ssiElements {\n\t\tgo makeRequest(el.name, el.src, ch, el.timeout)\n\t}\n\n\tfor i := 0; i < ssiCount; i++ {\n\t\tres := <-ch\n\t\tel := ssiElements[res.name]\n\t\tif res.error == nil {\n\t\t\tLog.Debugf(\"fragment (%v) - Request to %v took %v\", el.name, el.src, time.Since(timeStartRequest))\n\t\t\tel.SetupSuccess(res.payload)\n\t\t} else {\n\t\t\tel.SetupFallback(res.error)\n\t\t\tLog.Errorf(\"Fragment (%v) - Request to %v \\nerror: %q\", el.name, el.src, res.error)\n\t\t}\n\t}\n\n\tclose(ch)\n\n\thtmlDoc, err := doc.Html()\n\n\tif err != nil {\n\t\tLog.Errorf(\"Could not get html from document %q\", req.URL)\n\t\treturn nil, err\n\t}\n\n\t\/\/ assign new reader with content\n\tcontent := []byte(htmlDoc)\n\tbody := ioutil.NopCloser(bytes.NewReader(content))\n\tresp.Body = body\n\tresp.ContentLength = int64(len(content)) \/\/ update content length\n\tresp.Header.Set(\"Content-Length\", strconv.Itoa(len(content)))\n\n\tLog.Debugf(\"Processing complete %q took %q\", req.URL, time.Since(timeStart))\n\n\treturn resp, nil\n}\n\nfunc makeRequest(name string, url string, ch chan<- ssiResult, timeout time.Duration) {\n\t\/\/ @TODO don't create a new client per request\n\tvar Client = &http.Client{\n\t\tTransport: cache,\n\t\tTimeout: timeout,\n\t}\n\n\tresp, err := Client.Get(url)\n\n\tif err != nil {\n\t\tch <- ssiResult{name: name, error: err}\n\t} else {\n\t\tcontentType := resp.Header.Get(\"Content-Type\")\n\t\tif !strings.HasPrefix(contentType, \"text\/html\") {\n\t\t\tch <- ssiResult{name: name, error: ErrInvalidContentType}\n\t\t} else if resp.StatusCode > 199 && resp.StatusCode < 300 {\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\tdefer resp.Body.Close()\n\n\t\t\t\/\/ https:\/\/github.com\/gregjones\/httpcache\n\t\t\tif resp.Header.Get(\"X-From-Cache\") == \"1\" {\n\t\t\t\tLog.Debugf(\"Fragment (%v) - Response was cached\", name)\n\t\t\t} else {\n\t\t\t\tLog.Debugf(\"Fragment (%v) - Response was refreshed\", name)\n\t\t\t}\n\n\t\t\tch <- ssiResult{name: name, payload: body}\n\t\t} else {\n\t\t\tch <- ssiResult{name: name, error: ErrInvalidStatusCode}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\tgitmedia \"..\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\nvar (\n\tallCommands map[string]map[string]string\n\tgitMediaBin string\n)\n\nfunc main() {\n\tfor wd, commands := range allCommands {\n\t\tfmt.Println(\"Integration tests for\", wd)\n\t\tfor cmd, expected := range commands {\n\t\t\tif err := os.Chdir(wd); err != nil {\n\t\t\t\tfmt.Println(\"Cannot chdir to\", wd)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tfmt.Println(\"$ git-media\", cmd)\n\t\t\tactual := gitmedia.SimpleExec(gitMediaBin, cmd)\n\t\t\tif actual != expected {\n\t\t\t\tfmt.Printf(\"expected:\\n%s\\n\\n\", expected)\n\t\t\t\tfmt.Printf(\"actual:\\n%s\\n\", actual)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc init() {\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tgitMediaBin = filepath.Join(wd, \"bin\", \"git-media\")\n\n\tallCommands = make(map[string]map[string]string)\n\n\t\/\/ tests on the git-media repository, which has no actual git-media assets :)\n\tallCommands[wd] = map[string]string{\n\t\t\"version\": \"git-media v\" + gitmedia.Version,\n\t\t\"config\": \"Endpoint=\\n\" +\n\t\t\t\"LocalMediaDir=\" + filepath.Join(wd, \".git\", \"media\") + \"\\n\" +\n\t\t\t\"TempDir=\" + filepath.Join(os.TempDir(), \"git-media\"),\n\t}\n}\n<commit_msg>ンンー ンンンン ンーンン<commit_after>package main\n\nimport (\n\tgitmedia \"..\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\nvar (\n\tallCommands map[string]map[string]string\n\tgitMediaBin string\n)\n\nfunc main() {\n\tfor wd, commands := range allCommands {\n\t\tfmt.Println(\"Integration tests for\", wd)\n\t\tfor cmd, expected := range commands {\n\t\t\tif err := os.Chdir(wd); err != nil {\n\t\t\t\tfmt.Println(\"Cannot chdir to\", wd)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tfmt.Println(\"$ git-media\", cmd)\n\t\t\tactual := gitmedia.SimpleExec(gitMediaBin, cmd)\n\t\t\tif actual != expected {\n\t\t\t\tfmt.Printf(\"- expected\\n%s\\n\\n\", expected)\n\t\t\t\tfmt.Printf(\"- actual\\n%s\\n\", actual)\n\t\t\t}\n\t\t}\n\t\tfmt.Println(\"\")\n\t}\n}\n\nfunc init() {\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tgitMediaBin = filepath.Join(wd, \"bin\", \"git-media\")\n\n\tallCommands = make(map[string]map[string]string)\n\n\t\/\/ tests on the git-media repository, which has no actual git-media assets :)\n\tallCommands[wd] = map[string]string{\n\t\t\"version\": \"git-media v\" + gitmedia.Version,\n\t\t\"config\": \"Endpoint=\\n\" +\n\t\t\t\"LocalMediaDir=\" + filepath.Join(wd, \".git\", \"media\") + \"\\n\" +\n\t\t\t\"TempDir=\" + filepath.Join(os.TempDir(), \"git-media\"),\n\t}\n\n\t\/\/ tests on the git-media .git dir\n\tallCommands[filepath.Join(wd, \".git\")] = allCommands[wd]\n}\n<|endoftext|>"} {"text":"<commit_before>package internal\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/kms\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestKms(t *testing.T) {\n\tmockKMS := &MockKMSAPI{}\n\tdefer mockKMS.AssertExpectations(t)\n\tkmsClients[\"myregion:myprofile\"] = mockKMS\n\tkmsCrypter := KMSCrypter{}\n\n\tmockKMS.On(\"Encrypt\",\n\t\t&kms.EncryptInput{\n\t\t\tKeyId: aws.String(\"mykey\"),\n\t\t\tPlaintext: []byte(\"mypass\"),\n\t\t},\n\t).Return(\n\t\t&kms.EncryptOutput{\n\t\t\tCiphertextBlob: []byte(\"myciphertextblob\"),\n\t\t},\n\t\tnil,\n\t)\n\tsecret, myDecryptParams, err := kmsCrypter.Encrypt(\"mypass\", map[string]string{\n\t\t\"region\": \"myregion\",\n\t\t\"profile\": \"myprofile\",\n\t\t\"keyID\": \"mykey\",\n\t})\n\tassert.Equal(t, myDecryptParams, DecryptParams{\n\t\t\"region\": \"myregion\",\n\t})\n\tassert.Nil(t, err)\n\n\tmockKMS.On(\"Decrypt\",\n\t\t&kms.DecryptInput{\n\t\t\tCiphertextBlob: []byte(\"myciphertextblob\"),\n\t\t},\n\t).Return(\n\t\t&kms.DecryptOutput{\n\t\t\tPlaintext: []byte(\"mypass\"),\n\t\t},\n\t\tnil,\n\t)\n\tmyDecryptParams[\"profile\"] = \"myprofile\"\n\n\tplaintext, err := kmsCrypter.Decrypt(secret, myDecryptParams)\n\tassert.Nil(t, err)\n\tassert.Equal(t, \"mypass\", plaintext)\n}\n\nfunc TestKmsDefaultProfile(t *testing.T) {\n\tmockKMS := &MockKMSAPI{}\n\tdefer mockKMS.AssertExpectations(t)\n\tkmsClients[\"myregion:default\"] = mockKMS\n\tkmsCrypter := KMSCrypter{}\n\n\tmockKMS.On(\"Encrypt\",\n\t\t&kms.EncryptInput{\n\t\t\tKeyId: aws.String(\"mykey\"),\n\t\t\tPlaintext: []byte(\"mypass\"),\n\t\t},\n\t).Return(\n\t\t&kms.EncryptOutput{\n\t\t\tCiphertextBlob: []byte(\"myciphertextblob\"),\n\t\t},\n\t\tnil,\n\t)\n\tsecret, myDecryptParams, err := kmsCrypter.Encrypt(\"mypass\", map[string]string{\n\t\t\"region\": \"myregion\",\n\t\t\"keyID\": \"mykey\",\n\t})\n\tassert.Equal(t, myDecryptParams, DecryptParams{\n\t\t\"region\": \"myregion\",\n\t})\n\tassert.Nil(t, err)\n\n\tmockKMS.On(\"Decrypt\",\n\t\t&kms.DecryptInput{\n\t\t\tCiphertextBlob: []byte(\"myciphertextblob\"),\n\t\t},\n\t).Return(\n\t\t&kms.DecryptOutput{\n\t\t\tPlaintext: []byte(\"mypass\"),\n\t\t},\n\t\tnil,\n\t)\n\n\tplaintext, err := kmsCrypter.Decrypt(secret, myDecryptParams)\n\tassert.Nil(t, err)\n\tassert.Equal(t, \"mypass\", plaintext)\n}\n<commit_msg>go fmt (#11)<commit_after>package internal\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/kms\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestKms(t *testing.T) {\n\tmockKMS := &MockKMSAPI{}\n\tdefer mockKMS.AssertExpectations(t)\n\tkmsClients[\"myregion:myprofile\"] = mockKMS\n\tkmsCrypter := KMSCrypter{}\n\n\tmockKMS.On(\"Encrypt\",\n\t\t&kms.EncryptInput{\n\t\t\tKeyId: aws.String(\"mykey\"),\n\t\t\tPlaintext: []byte(\"mypass\"),\n\t\t},\n\t).Return(\n\t\t&kms.EncryptOutput{\n\t\t\tCiphertextBlob: []byte(\"myciphertextblob\"),\n\t\t},\n\t\tnil,\n\t)\n\tsecret, myDecryptParams, err := kmsCrypter.Encrypt(\"mypass\", map[string]string{\n\t\t\"region\": \"myregion\",\n\t\t\"profile\": \"myprofile\",\n\t\t\"keyID\": \"mykey\",\n\t})\n\tassert.Equal(t, myDecryptParams, DecryptParams{\n\t\t\"region\": \"myregion\",\n\t})\n\tassert.Nil(t, err)\n\n\tmockKMS.On(\"Decrypt\",\n\t\t&kms.DecryptInput{\n\t\t\tCiphertextBlob: []byte(\"myciphertextblob\"),\n\t\t},\n\t).Return(\n\t\t&kms.DecryptOutput{\n\t\t\tPlaintext: []byte(\"mypass\"),\n\t\t},\n\t\tnil,\n\t)\n\tmyDecryptParams[\"profile\"] = \"myprofile\"\n\n\tplaintext, err := kmsCrypter.Decrypt(secret, myDecryptParams)\n\tassert.Nil(t, err)\n\tassert.Equal(t, \"mypass\", plaintext)\n}\n\nfunc TestKmsDefaultProfile(t *testing.T) {\n\tmockKMS := &MockKMSAPI{}\n\tdefer mockKMS.AssertExpectations(t)\n\tkmsClients[\"myregion:default\"] = mockKMS\n\tkmsCrypter := KMSCrypter{}\n\n\tmockKMS.On(\"Encrypt\",\n\t\t&kms.EncryptInput{\n\t\t\tKeyId: aws.String(\"mykey\"),\n\t\t\tPlaintext: []byte(\"mypass\"),\n\t\t},\n\t).Return(\n\t\t&kms.EncryptOutput{\n\t\t\tCiphertextBlob: []byte(\"myciphertextblob\"),\n\t\t},\n\t\tnil,\n\t)\n\tsecret, myDecryptParams, err := kmsCrypter.Encrypt(\"mypass\", map[string]string{\n\t\t\"region\": \"myregion\",\n\t\t\"keyID\": \"mykey\",\n\t})\n\tassert.Equal(t, myDecryptParams, DecryptParams{\n\t\t\"region\": \"myregion\",\n\t})\n\tassert.Nil(t, err)\n\n\tmockKMS.On(\"Decrypt\",\n\t\t&kms.DecryptInput{\n\t\t\tCiphertextBlob: []byte(\"myciphertextblob\"),\n\t\t},\n\t).Return(\n\t\t&kms.DecryptOutput{\n\t\t\tPlaintext: []byte(\"mypass\"),\n\t\t},\n\t\tnil,\n\t)\n\n\tplaintext, err := kmsCrypter.Decrypt(secret, myDecryptParams)\n\tassert.Nil(t, err)\n\tassert.Equal(t, \"mypass\", plaintext)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Jetstack cert-manager contributors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage certificates\n\nimport (\n\t\"context\"\n\t\"crypto\"\n\t\"crypto\/x509\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"math\/big\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com\/kr\/pretty\"\n\tk8sErrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tcorelisters \"k8s.io\/client-go\/listers\/core\/v1\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\n\tapiutil \"github.com\/jetstack\/cert-manager\/pkg\/api\/util\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/apis\/certmanager\/v1alpha2\"\n\tcmclient \"github.com\/jetstack\/cert-manager\/pkg\/client\/clientset\/versioned\"\n\tcmlisters \"github.com\/jetstack\/cert-manager\/pkg\/client\/listers\/certmanager\/v1alpha2\"\n\tcontrollerpkg \"github.com\/jetstack\/cert-manager\/pkg\/controller\"\n\tlogf \"github.com\/jetstack\/cert-manager\/pkg\/logs\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/metrics\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/util\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/util\/errors\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/util\/kube\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/util\/pki\"\n)\n\nvar (\n\tcertificateGvk = v1alpha2.SchemeGroupVersion.WithKind(\"Certificate\")\n)\n\ntype calculateDurationUntilRenewFn func(context.Context, *x509.Certificate, *v1alpha2.Certificate) time.Duration\n\nfunc getCertificateForKey(ctx context.Context, key string, lister cmlisters.CertificateLister) (*v1alpha2.Certificate, error) {\n\tnamespace, name, err := cache.SplitMetaNamespaceKey(key)\n\tif err != nil {\n\t\treturn nil, nil\n\t}\n\n\tcrt, err := lister.Certificates(namespace).Get(name)\n\tif k8sErrors.IsNotFound(err) {\n\t\treturn nil, nil\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn crt, nil\n}\n\nfunc certificateGetter(lister cmlisters.CertificateLister) func(namespace, name string) (interface{}, error) {\n\treturn func(namespace, name string) (interface{}, error) {\n\t\treturn lister.Certificates(namespace).Get(name)\n\t}\n}\n\nvar keyFunc = controllerpkg.KeyFunc\n\nfunc certificateMatchesSpec(crt *v1alpha2.Certificate, key crypto.Signer, cert *x509.Certificate, secretLister corelisters.SecretLister) (bool, []string) {\n\tvar errs []string\n\n\t\/\/ TODO: add checks for KeySize, KeyAlgorithm fields\n\t\/\/ TODO: add checks for Organization field\n\t\/\/ TODO: add checks for IsCA field\n\n\t\/\/ check if the private key is the corresponding pair to the certificate\n\n\tmatches, err := pki.PublicKeyMatchesCertificate(key.Public(), cert)\n\tif err != nil {\n\t\terrs = append(errs, err.Error())\n\t} else if !matches {\n\t\terrs = append(errs, fmt.Sprintf(\"Certificate private key does not match certificate\"))\n\t}\n\n\t\/\/ If CN is set on the resource then it should exist on the certificate as\n\t\/\/ the Common Name or a DNS Name\n\texpectedCN := crt.Spec.CommonName\n\tgotCN := append(cert.DNSNames, cert.Subject.CommonName)\n\tif !util.Contains(gotCN, expectedCN) {\n\t\terrs = append(errs, fmt.Sprintf(\"Common Name on TLS certificate not up to date (%q): %s\",\n\t\t\texpectedCN, gotCN))\n\t}\n\n\t\/\/ validate the dns names are correct\n\texpectedDNSNames := crt.Spec.DNSNames\n\tif !util.Subset(cert.DNSNames, expectedDNSNames) {\n\t\terrs = append(errs, fmt.Sprintf(\"DNS names on TLS certificate not up to date: %q\", cert.DNSNames))\n\t}\n\n\texpectedURIs := crt.Spec.URISANs\n\tif !util.EqualUnsorted(pki.URLsToString(cert.URIs), expectedURIs) {\n\t\terrs = append(errs, fmt.Sprintf(\"URI SANs on TLS certificate not up to date: %q\", cert.URIs))\n\t}\n\n\t\/\/ validate the ip addresses are correct\n\tif !util.EqualUnsorted(pki.IPAddressesToString(cert.IPAddresses), crt.Spec.IPAddresses) {\n\t\terrs = append(errs, fmt.Sprintf(\"IP addresses on TLS certificate not up to date: %q\", pki.IPAddressesToString(cert.IPAddresses)))\n\t}\n\n\t\/\/ get a copy of the current secret resource\n\t\/\/ Note that we already know that it exists, no need to check for errors\n\t\/\/ TODO: Refactor so that the secret is passed as argument?\n\tsecret, err := secretLister.Secrets(crt.Namespace).Get(crt.Spec.SecretName)\n\n\t\/\/ validate that the issuer is correct\n\tif crt.Spec.IssuerRef.Name != secret.Annotations[v1alpha2.IssuerNameAnnotationKey] {\n\t\terrs = append(errs, fmt.Sprintf(\"Issuer of the certificate is not up to date: %q\", secret.Annotations[v1alpha2.IssuerNameAnnotationKey]))\n\t}\n\n\t\/\/ validate that the issuer kind is correct\n\tif apiutil.IssuerKind(crt.Spec.IssuerRef) != secret.Annotations[v1alpha2.IssuerKindAnnotationKey] {\n\t\terrs = append(errs, fmt.Sprintf(\"Issuer kind of the certificate is not up to date: %q\", secret.Annotations[v1alpha2.IssuerKindAnnotationKey]))\n\t}\n\n\treturn len(errs) == 0, errs\n}\n\nfunc scheduleRenewal(ctx context.Context, lister corelisters.SecretLister, calc calculateDurationUntilRenewFn, queueFn func(interface{}, time.Duration), crt *v1alpha2.Certificate) {\n\tlog := logf.FromContext(ctx)\n\tlog = log.WithValues(\n\t\tlogf.RelatedResourceNameKey, crt.Spec.SecretName,\n\t\tlogf.RelatedResourceNamespaceKey, crt.Namespace,\n\t\tlogf.RelatedResourceKindKey, \"Secret\",\n\t)\n\n\tkey, err := keyFunc(crt)\n\tif err != nil {\n\t\tlog.Error(err, \"error getting key for certificate resource\")\n\t\treturn\n\t}\n\n\tcert, err := kube.SecretTLSCert(ctx, lister, crt.Namespace, crt.Spec.SecretName)\n\tif err != nil {\n\t\tif !errors.IsInvalidData(err) {\n\t\t\tlog.Error(err, \"error getting secret for certificate resource\")\n\t\t}\n\t\treturn\n\t}\n\n\trenewIn := calc(ctx, cert, crt)\n\tqueueFn(key, renewIn)\n\n\tlog.WithValues(\"duration_until_renewal\", renewIn.String()).Info(\"certificate scheduled for renewal\")\n}\n\n\/\/ staticTemporarySerialNumber is a fixed serial number we check for when\n\/\/ updating the status of a certificate.\n\/\/ It is used to identify temporarily generated certificates, so that friendly\n\/\/ status messages can be displayed to users.\nconst staticTemporarySerialNumber = 0x1234567890\n\nfunc isTemporaryCertificate(cert *x509.Certificate) bool {\n\tif cert == nil {\n\t\treturn false\n\t}\n\treturn cert.SerialNumber.Int64() == staticTemporarySerialNumber\n}\n\n\/\/ generateLocallySignedTemporaryCertificate signs a temporary certificate for\n\/\/ the given certificate resource using a one-use temporary CA that is then\n\/\/ discarded afterwards.\n\/\/ This is to mitigate a potential attack against x509 certificates that use a\n\/\/ predictable serial number and weak MD5 hashing algorithms.\n\/\/ In practice, this shouldn't really be a concern anyway.\nfunc generateLocallySignedTemporaryCertificate(crt *v1alpha2.Certificate, pk []byte) ([]byte, error) {\n\t\/\/ generate a throwaway self-signed root CA\n\tcaPk, err := pki.GenerateECPrivateKey(pki.ECCurve521)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcaCertTemplate, err := pki.GenerateTemplate(&v1alpha2.Certificate{\n\t\tSpec: v1alpha2.CertificateSpec{\n\t\t\tCommonName: \"cert-manager.local\",\n\t\t\tIsCA: true,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, caCert, err := pki.SignCertificate(caCertTemplate, caCertTemplate, caPk.Public(), caPk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ sign a temporary certificate using the root CA\n\ttemplate, err := pki.GenerateTemplate(crt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttemplate.SerialNumber = big.NewInt(staticTemporarySerialNumber)\n\n\tsigneeKey, err := pki.DecodePrivateKeyBytes(pk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb, _, err := pki.SignCertificate(template, caCert, signeeKey.Public(), caPk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn b, nil\n}\n\nfunc updateCertificateStatus(ctx context.Context, m *metrics.Metrics, cmClient cmclient.Interface, old, new *v1alpha2.Certificate) (*v1alpha2.Certificate, error) {\n\tdefer m.UpdateCertificateStatus(new)\n\n\tlog := logf.FromContext(ctx, \"updateStatus\")\n\toldBytes, _ := json.Marshal(old.Status)\n\tnewBytes, _ := json.Marshal(new.Status)\n\tif reflect.DeepEqual(oldBytes, newBytes) {\n\t\treturn nil, nil\n\t}\n\tlog.V(logf.DebugLevel).Info(\"updating resource due to change in status\", \"diff\", pretty.Diff(string(oldBytes), string(newBytes)))\n\treturn cmClient.CertmanagerV1alpha2().Certificates(new.Namespace).UpdateStatus(new)\n}\n\nfunc certificateHasTemporaryCertificateAnnotation(crt *v1alpha2.Certificate) bool {\n\tif crt.Annotations == nil {\n\t\treturn false\n\t}\n\n\tif val, ok := crt.Annotations[v1alpha2.IssueTemporaryCertificateAnnotation]; ok && val == \"true\" {\n\t\treturn true\n\t}\n\n\treturn false\n}\n<commit_msg>Make annotation of secret if nil to prevent panic<commit_after>\/*\nCopyright 2019 The Jetstack cert-manager contributors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage certificates\n\nimport (\n\t\"context\"\n\t\"crypto\"\n\t\"crypto\/x509\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"math\/big\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com\/kr\/pretty\"\n\tk8sErrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tcorelisters \"k8s.io\/client-go\/listers\/core\/v1\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\n\tapiutil \"github.com\/jetstack\/cert-manager\/pkg\/api\/util\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/apis\/certmanager\/v1alpha2\"\n\tcmclient \"github.com\/jetstack\/cert-manager\/pkg\/client\/clientset\/versioned\"\n\tcmlisters \"github.com\/jetstack\/cert-manager\/pkg\/client\/listers\/certmanager\/v1alpha2\"\n\tcontrollerpkg \"github.com\/jetstack\/cert-manager\/pkg\/controller\"\n\tlogf \"github.com\/jetstack\/cert-manager\/pkg\/logs\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/metrics\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/util\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/util\/errors\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/util\/kube\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/util\/pki\"\n)\n\nvar (\n\tcertificateGvk = v1alpha2.SchemeGroupVersion.WithKind(\"Certificate\")\n)\n\ntype calculateDurationUntilRenewFn func(context.Context, *x509.Certificate, *v1alpha2.Certificate) time.Duration\n\nfunc getCertificateForKey(ctx context.Context, key string, lister cmlisters.CertificateLister) (*v1alpha2.Certificate, error) {\n\tnamespace, name, err := cache.SplitMetaNamespaceKey(key)\n\tif err != nil {\n\t\treturn nil, nil\n\t}\n\n\tcrt, err := lister.Certificates(namespace).Get(name)\n\tif k8sErrors.IsNotFound(err) {\n\t\treturn nil, nil\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn crt, nil\n}\n\nfunc certificateGetter(lister cmlisters.CertificateLister) func(namespace, name string) (interface{}, error) {\n\treturn func(namespace, name string) (interface{}, error) {\n\t\treturn lister.Certificates(namespace).Get(name)\n\t}\n}\n\nvar keyFunc = controllerpkg.KeyFunc\n\nfunc certificateMatchesSpec(crt *v1alpha2.Certificate, key crypto.Signer, cert *x509.Certificate, secretLister corelisters.SecretLister) (bool, []string) {\n\tvar errs []string\n\n\t\/\/ TODO: add checks for KeySize, KeyAlgorithm fields\n\t\/\/ TODO: add checks for Organization field\n\t\/\/ TODO: add checks for IsCA field\n\n\t\/\/ check if the private key is the corresponding pair to the certificate\n\n\tmatches, err := pki.PublicKeyMatchesCertificate(key.Public(), cert)\n\tif err != nil {\n\t\terrs = append(errs, err.Error())\n\t} else if !matches {\n\t\terrs = append(errs, fmt.Sprintf(\"Certificate private key does not match certificate\"))\n\t}\n\n\t\/\/ If CN is set on the resource then it should exist on the certificate as\n\t\/\/ the Common Name or a DNS Name\n\texpectedCN := crt.Spec.CommonName\n\tgotCN := append(cert.DNSNames, cert.Subject.CommonName)\n\tif !util.Contains(gotCN, expectedCN) {\n\t\terrs = append(errs, fmt.Sprintf(\"Common Name on TLS certificate not up to date (%q): %s\",\n\t\t\texpectedCN, gotCN))\n\t}\n\n\t\/\/ validate the dns names are correct\n\texpectedDNSNames := crt.Spec.DNSNames\n\tif !util.Subset(cert.DNSNames, expectedDNSNames) {\n\t\terrs = append(errs, fmt.Sprintf(\"DNS names on TLS certificate not up to date: %q\", cert.DNSNames))\n\t}\n\n\texpectedURIs := crt.Spec.URISANs\n\tif !util.EqualUnsorted(pki.URLsToString(cert.URIs), expectedURIs) {\n\t\terrs = append(errs, fmt.Sprintf(\"URI SANs on TLS certificate not up to date: %q\", cert.URIs))\n\t}\n\n\t\/\/ validate the ip addresses are correct\n\tif !util.EqualUnsorted(pki.IPAddressesToString(cert.IPAddresses), crt.Spec.IPAddresses) {\n\t\terrs = append(errs, fmt.Sprintf(\"IP addresses on TLS certificate not up to date: %q\", pki.IPAddressesToString(cert.IPAddresses)))\n\t}\n\n\t\/\/ get a copy of the current secret resource\n\t\/\/ Note that we already know that it exists, no need to check for errors\n\t\/\/ TODO: Refactor so that the secret is passed as argument?\n\tsecret, err := secretLister.Secrets(crt.Namespace).Get(crt.Spec.SecretName)\n\n\tif secret.Annotations == nil {\n\t\tsecret.Annotations = make(map[string]string)\n\t}\n\n\t\/\/ validate that the issuer is correct\n\tif crt.Spec.IssuerRef.Name != secret.Annotations[v1alpha2.IssuerNameAnnotationKey] {\n\t\terrs = append(errs, fmt.Sprintf(\"Issuer of the certificate is not up to date: %q\", secret.Annotations[v1alpha2.IssuerNameAnnotationKey]))\n\t}\n\n\t\/\/ validate that the issuer kind is correct\n\tif apiutil.IssuerKind(crt.Spec.IssuerRef) != secret.Annotations[v1alpha2.IssuerKindAnnotationKey] {\n\t\terrs = append(errs, fmt.Sprintf(\"Issuer kind of the certificate is not up to date: %q\", secret.Annotations[v1alpha2.IssuerKindAnnotationKey]))\n\t}\n\n\treturn len(errs) == 0, errs\n}\n\nfunc scheduleRenewal(ctx context.Context, lister corelisters.SecretLister, calc calculateDurationUntilRenewFn, queueFn func(interface{}, time.Duration), crt *v1alpha2.Certificate) {\n\tlog := logf.FromContext(ctx)\n\tlog = log.WithValues(\n\t\tlogf.RelatedResourceNameKey, crt.Spec.SecretName,\n\t\tlogf.RelatedResourceNamespaceKey, crt.Namespace,\n\t\tlogf.RelatedResourceKindKey, \"Secret\",\n\t)\n\n\tkey, err := keyFunc(crt)\n\tif err != nil {\n\t\tlog.Error(err, \"error getting key for certificate resource\")\n\t\treturn\n\t}\n\n\tcert, err := kube.SecretTLSCert(ctx, lister, crt.Namespace, crt.Spec.SecretName)\n\tif err != nil {\n\t\tif !errors.IsInvalidData(err) {\n\t\t\tlog.Error(err, \"error getting secret for certificate resource\")\n\t\t}\n\t\treturn\n\t}\n\n\trenewIn := calc(ctx, cert, crt)\n\tqueueFn(key, renewIn)\n\n\tlog.WithValues(\"duration_until_renewal\", renewIn.String()).Info(\"certificate scheduled for renewal\")\n}\n\n\/\/ staticTemporarySerialNumber is a fixed serial number we check for when\n\/\/ updating the status of a certificate.\n\/\/ It is used to identify temporarily generated certificates, so that friendly\n\/\/ status messages can be displayed to users.\nconst staticTemporarySerialNumber = 0x1234567890\n\nfunc isTemporaryCertificate(cert *x509.Certificate) bool {\n\tif cert == nil {\n\t\treturn false\n\t}\n\treturn cert.SerialNumber.Int64() == staticTemporarySerialNumber\n}\n\n\/\/ generateLocallySignedTemporaryCertificate signs a temporary certificate for\n\/\/ the given certificate resource using a one-use temporary CA that is then\n\/\/ discarded afterwards.\n\/\/ This is to mitigate a potential attack against x509 certificates that use a\n\/\/ predictable serial number and weak MD5 hashing algorithms.\n\/\/ In practice, this shouldn't really be a concern anyway.\nfunc generateLocallySignedTemporaryCertificate(crt *v1alpha2.Certificate, pk []byte) ([]byte, error) {\n\t\/\/ generate a throwaway self-signed root CA\n\tcaPk, err := pki.GenerateECPrivateKey(pki.ECCurve521)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcaCertTemplate, err := pki.GenerateTemplate(&v1alpha2.Certificate{\n\t\tSpec: v1alpha2.CertificateSpec{\n\t\t\tCommonName: \"cert-manager.local\",\n\t\t\tIsCA: true,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, caCert, err := pki.SignCertificate(caCertTemplate, caCertTemplate, caPk.Public(), caPk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ sign a temporary certificate using the root CA\n\ttemplate, err := pki.GenerateTemplate(crt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttemplate.SerialNumber = big.NewInt(staticTemporarySerialNumber)\n\n\tsigneeKey, err := pki.DecodePrivateKeyBytes(pk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb, _, err := pki.SignCertificate(template, caCert, signeeKey.Public(), caPk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn b, nil\n}\n\nfunc updateCertificateStatus(ctx context.Context, m *metrics.Metrics, cmClient cmclient.Interface, old, new *v1alpha2.Certificate) (*v1alpha2.Certificate, error) {\n\tdefer m.UpdateCertificateStatus(new)\n\n\tlog := logf.FromContext(ctx, \"updateStatus\")\n\toldBytes, _ := json.Marshal(old.Status)\n\tnewBytes, _ := json.Marshal(new.Status)\n\tif reflect.DeepEqual(oldBytes, newBytes) {\n\t\treturn nil, nil\n\t}\n\tlog.V(logf.DebugLevel).Info(\"updating resource due to change in status\", \"diff\", pretty.Diff(string(oldBytes), string(newBytes)))\n\treturn cmClient.CertmanagerV1alpha2().Certificates(new.Namespace).UpdateStatus(new)\n}\n\nfunc certificateHasTemporaryCertificateAnnotation(crt *v1alpha2.Certificate) bool {\n\tif crt.Annotations == nil {\n\t\treturn false\n\t}\n\n\tif val, ok := crt.Annotations[v1alpha2.IssueTemporaryCertificateAnnotation]; ok && val == \"true\" {\n\t\treturn true\n\t}\n\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package cluster\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\n\tkapi \"k8s.io\/kubernetes\/pkg\/api\"\n\tkerrs \"k8s.io\/kubernetes\/pkg\/api\/errors\"\n\tkclient \"k8s.io\/kubernetes\/pkg\/client\/unversioned\"\n\t\"k8s.io\/kubernetes\/pkg\/fields\"\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n\n\tauthorizationapi \"github.com\/openshift\/origin\/pkg\/authorization\/api\"\n\tosclient \"github.com\/openshift\/origin\/pkg\/client\"\n\t\"github.com\/openshift\/origin\/pkg\/diagnostics\/types\"\n\tosapi \"github.com\/openshift\/origin\/pkg\/image\/api\"\n)\n\n\/\/ ClusterRegistry is a Diagnostic to check that there is a working Docker registry.\ntype ClusterRegistry struct {\n\tKubeClient *kclient.Client\n\tOsClient *osclient.Client\n}\n\nconst (\n\tClusterRegistryName = \"ClusterRegistry\"\n\n\tregistryName = \"docker-registry\"\n\tregistryVolume = \"registry-storage\"\n\tclGetRegNone = `\nThere is no \"%s\" service in project \"%s\". This is not strictly required to\nbe present; however, it is required for builds, and its absence probably\nindicates an incomplete installation.\n\nPlease consult the documentation and use the 'oadm registry' command\nto create a Docker registry.`\n\n\tclGetRegFailed = `\nClient error while retrieving registry service. Client retrieved records\nduring discovery, so this is likely to be a transient error. Try running\ndiagnostics again. If this message persists, there may be a permissions\nproblem with getting records. The error was:\n\n(%T) %[1]v `\n\n\tclRegNoPods = `\nThe \"%s\" service exists but has no associated pods, so it\nis not available. Builds and deployments that use the registry will fail.`\n\n\tclRegNoRunningPods = `\nThe \"%s\" service exists but no pods currently running, so it\nis not available. Builds and deployments that use the registry will fail.`\n\n\tclRegMultiPods = `\nThe \"%s\" service has multiple associated pods each using\nephemeral storage. These are likely to have inconsistent stores of\nimages. Builds and deployments that use images from the registry may\nfail sporadically. Use a single registry or add a shared storage volume\nto the registries.`\n\n\tclRegPodDown = `\nThe \"%s\" pod for the \"%s\" service is not running.\nThis may be transient, a scheduling error, or something else.`\n\tclRegPodLog = `\nFailed to read the logs for the \"%s\" pod belonging to\nthe \"%s\" service. This is not a problem by itself but\nprevents diagnostics from looking for errors in those logs. The\nerror encountered was:\n%s`\n\n\tclRegPodConn = `\nThe pod logs for the \"%s\" pod belonging to\nthe \"%s\" service indicated a problem connecting to the\nmaster to notify it about a new image. This typically results in builds\nsucceeding but not triggering deployments (as they wait on notifications\nto the ImageStream from the build).\n\nThere are many reasons for this step to fail, including invalid\ncredentials, master outages, DNS failures, network errors, and so on. It\ncan be temporary or ongoing. Check the most recent error message from the\nregistry pod logs to determine the nature of the problem:\n\n%s`\n\n\tclRegPodErr = `\nThe pod logs for the \"%s\" pod belonging to\nthe \"%s\" service indicated unknown errors.\nThis could result in problems with builds or deployments.\nPlease examine the log entries to determine if there might be\nany related problems:\n%s`\n\n\tclRegNoEP = `\nThe \"%[1]s\" service exists with %d associated pod(s), but there\nare %d endpoints in the \"%[1]s\" service.\nThis mismatch likely indicates a system bug, and builds and\ndeployments that require the registry may fail sporadically.`\n\n\tclRegISDelFail = `\nThe diagnostics created an ImageStream named \"%[1]s\"\nfor test purposes and then attempted to delete it, which failed. This\nshould be an unusual, transient occurrence. The error encountered in\ndeleting it was:\n\n%s\n\nThis message is just to notify you that this object exists.\nYou ought to be able to delete this object with:\n\noc delete imagestream\/%[1]s -n default\n`\n\n\tclRegISMismatch = `\nDiagnostics created a test ImageStream and compared the registry IP\nit received to the registry IP available via the %[1]s service.\n\n%[1]s : %[2]s\nImageStream registry : %[3]s\n\nThey do not match, which probably means that an administrator re-created\nthe %[1]s service but the master has cached the old service\nIP address. Builds or deployments that use ImageStreams with the wrong\n%[1]s IP will fail under this condition.\n\nTo resolve this issue, restarting the master (to clear the cache) should\nbe sufficient. Existing ImageStreams may need to be re-created.`\n)\n\nfunc (d *ClusterRegistry) Name() string {\n\treturn ClusterRegistryName\n}\n\nfunc (d *ClusterRegistry) Description() string {\n\treturn \"Check that there is a working Docker registry\"\n}\n\nfunc (d *ClusterRegistry) CanRun() (bool, error) {\n\tif d.OsClient == nil || d.KubeClient == nil {\n\t\treturn false, fmt.Errorf(\"must have kube and os clients\")\n\t}\n\treturn userCan(d.OsClient, authorizationapi.AuthorizationAttributes{\n\t\tNamespace: kapi.NamespaceDefault,\n\t\tVerb: \"get\",\n\t\tResource: \"services\",\n\t\tResourceName: registryName,\n\t})\n}\n\nfunc (d *ClusterRegistry) Check() types.DiagnosticResult {\n\tr := types.NewDiagnosticResult(ClusterRegistryName)\n\tif service := d.getRegistryService(r); service != nil {\n\t\t\/\/ Check that it actually has pod(s) selected and running\n\t\tif runningPods := d.getRegistryPods(service, r); len(runningPods) == 0 {\n\t\t\tr.Error(\"DClu1001\", nil, fmt.Sprintf(clRegNoRunningPods, registryName))\n\t\t\treturn r\n\t\t} else if d.checkRegistryEndpoints(runningPods, r) { \/\/ Check that matching endpoint exists on the service\n\t\t\t\/\/ attempt to create an imagestream and see if it gets the same registry service IP from the service cache\n\t\t\td.verifyRegistryImageStream(service, r)\n\t\t}\n\t}\n\treturn r\n}\n\nfunc (d *ClusterRegistry) getRegistryService(r types.DiagnosticResult) *kapi.Service {\n\tservice, err := d.KubeClient.Services(kapi.NamespaceDefault).Get(registryName)\n\tif err != nil && reflect.TypeOf(err) == reflect.TypeOf(&kerrs.StatusError{}) {\n\t\tr.Warn(\"DClu1002\", err, fmt.Sprintf(clGetRegNone, registryName, kapi.NamespaceDefault))\n\t\treturn nil\n\t} else if err != nil {\n\t\tr.Error(\"DClu1003\", err, fmt.Sprintf(clGetRegFailed, err))\n\t\treturn nil\n\t}\n\tr.Debug(\"DClu1004\", fmt.Sprintf(\"Found %s service with ports %v\", registryName, service.Spec.Ports))\n\treturn service\n}\n\nfunc (d *ClusterRegistry) getRegistryPods(service *kapi.Service, r types.DiagnosticResult) []*kapi.Pod {\n\trunningPods := []*kapi.Pod{}\n\tpods, err := d.KubeClient.Pods(kapi.NamespaceDefault).List(labels.SelectorFromSet(service.Spec.Selector), fields.Everything())\n\tif err != nil {\n\t\tr.Error(\"DClu1005\", err, fmt.Sprintf(\"Finding pods for '%s' service failed. This should never happen. Error: (%T) %[2]v\", registryName, err))\n\t\treturn runningPods\n\t} else if len(pods.Items) < 1 {\n\t\tr.Error(\"DClu1006\", nil, fmt.Sprintf(clRegNoPods, registryName))\n\t\treturn runningPods\n\t} else if len(pods.Items) > 1 {\n\t\t\/\/ multiple registry pods using EmptyDir will be inconsistent\n\t\tfor _, volume := range pods.Items[0].Spec.Volumes {\n\t\t\tif volume.Name == registryVolume && volume.EmptyDir != nil {\n\t\t\t\tr.Error(\"DClu1007\", nil, fmt.Sprintf(clRegMultiPods, registryName))\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tfor _, pod := range pods.Items {\n\t\tr.Debug(\"DClu1008\", fmt.Sprintf(\"Found %s pod with name %s\", registryName, pod.ObjectMeta.Name))\n\t\tif pod.Status.Phase != kapi.PodRunning {\n\t\t\tr.Warn(\"DClu1009\", nil, fmt.Sprintf(clRegPodDown, pod.ObjectMeta.Name, registryName))\n\t\t} else {\n\t\t\trunningPods = append(runningPods, &pod)\n\t\t\t\/\/ Check the logs for that pod for common issues (credentials, DNS resolution failure)\n\t\t\td.checkRegistryLogs(&pod, r)\n\t\t}\n\t}\n\treturn runningPods\n}\n\nfunc (d *ClusterRegistry) checkRegistryLogs(pod *kapi.Pod, r types.DiagnosticResult) {\n\t\/\/ pull out logs from the pod\n\treadCloser, err := d.KubeClient.RESTClient.Get().\n\t\tNamespace(\"default\").Name(pod.ObjectMeta.Name).\n\t\tResource(\"pods\").SubResource(\"log\").\n\t\tParam(\"follow\", \"false\").\n\t\tParam(\"container\", pod.Spec.Containers[0].Name).\n\t\tStream()\n\tif err != nil {\n\t\tr.Warn(\"DClu1010\", nil, fmt.Sprintf(clRegPodLog, pod.ObjectMeta.Name, registryName, fmt.Sprintf(\"(%T) %[1]v\", err)))\n\t\treturn\n\t}\n\tdefer readCloser.Close()\n\n\tclientError := \"\"\n\tregistryError := \"\"\n\tscanner := bufio.NewScanner(readCloser)\n\tfor scanner.Scan() {\n\t\tlogLine := scanner.Text()\n\t\t\/\/ TODO: once the logging API gets \"since\" and \"tail\" and \"limit\", limit to more recent log entries\n\t\t\/\/ https:\/\/github.com\/kubernetes\/kubernetes\/issues\/12447\n\t\tif strings.Contains(logLine, `level=error msg=\"client error:`) {\n\t\t\tclientError = logLine \/\/ end up showing only the most recent client error\n\t\t} else if strings.Contains(logLine, \"level=error msg=\") {\n\t\t\tregistryError += \"\\n\" + logLine \/\/ gather generic errors\n\t\t}\n\t}\n\tif clientError != \"\" {\n\t\tr.Error(\"DClu1011\", nil, fmt.Sprintf(clRegPodConn, pod.ObjectMeta.Name, registryName, clientError))\n\t}\n\tif registryError != \"\" {\n\t\tr.Warn(\"DClu1012\", nil, fmt.Sprintf(clRegPodErr, pod.ObjectMeta.Name, registryName, registryError))\n\t}\n\n}\n\nfunc (d *ClusterRegistry) checkRegistryEndpoints(pods []*kapi.Pod, r types.DiagnosticResult) bool {\n\tendPoint, err := d.KubeClient.Endpoints(kapi.NamespaceDefault).Get(registryName)\n\tif err != nil {\n\t\tr.Error(\"DClu1013\", err, fmt.Sprintf(`Finding endpoints for \"%s\" service failed. This should never happen. Error: (%[2]T) %[2]v`, registryName, err))\n\t\treturn false\n\t}\n\tnumEP := 0\n\tfor _, subs := range endPoint.Subsets {\n\t\tnumEP += len(subs.Addresses)\n\t}\n\tif numEP != len(pods) {\n\t\tr.Warn(\"DClu1014\", nil, fmt.Sprintf(clRegNoEP, registryName, len(pods), numEP))\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (d *ClusterRegistry) verifyRegistryImageStream(service *kapi.Service, r types.DiagnosticResult) {\n\timgStream, err := d.OsClient.ImageStreams(kapi.NamespaceDefault).Create(&osapi.ImageStream{ObjectMeta: kapi.ObjectMeta{GenerateName: \"diagnostic-test\"}})\n\tif err != nil {\n\t\tr.Error(\"DClu1015\", err, fmt.Sprintf(\"Creating test ImageStream failed. Error: (%T) %[1]v\", err))\n\t\treturn\n\t}\n\tdefer func() { \/\/ delete what we created, or notify that we couldn't\n\t\tif err := d.OsClient.ImageStreams(kapi.NamespaceDefault).Delete(imgStream.ObjectMeta.Name); err != nil {\n\t\t\tr.Warn(\"DClu1016\", err, fmt.Sprintf(clRegISDelFail, imgStream.ObjectMeta.Name, fmt.Sprintf(\"(%T) %[1]s\", err)))\n\t\t}\n\t}()\n\timgStream, err = d.OsClient.ImageStreams(kapi.NamespaceDefault).Get(imgStream.ObjectMeta.Name) \/\/ status is filled in post-create\n\tif err != nil {\n\t\tr.Error(\"DClu1017\", err, fmt.Sprintf(\"Getting created test ImageStream failed. Error: (%T) %[1]v\", err))\n\t\treturn\n\t}\n\tr.Debug(\"DClu1018\", fmt.Sprintf(\"Created test ImageStream: %[1]v\", imgStream))\n\tcacheHost := strings.SplitN(imgStream.Status.DockerImageRepository, \"\/\", 2)[0]\n\tserviceHost := fmt.Sprintf(\"%s:%d\", service.Spec.ClusterIP, service.Spec.Ports[0].Port)\n\tif cacheHost != serviceHost {\n\t\tr.Error(\"DClu1019\", nil, fmt.Sprintf(clRegISMismatch, registryName, serviceHost, cacheHost))\n\t}\n}\n<commit_msg>Scan for selinux write error in registry diagnostic.<commit_after>package cluster\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strings\"\n\n\tkapi \"k8s.io\/kubernetes\/pkg\/api\"\n\tkerrs \"k8s.io\/kubernetes\/pkg\/api\/errors\"\n\tkclient \"k8s.io\/kubernetes\/pkg\/client\/unversioned\"\n\t\"k8s.io\/kubernetes\/pkg\/fields\"\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n\n\tauthorizationapi \"github.com\/openshift\/origin\/pkg\/authorization\/api\"\n\tosclient \"github.com\/openshift\/origin\/pkg\/client\"\n\t\"github.com\/openshift\/origin\/pkg\/diagnostics\/types\"\n\tosapi \"github.com\/openshift\/origin\/pkg\/image\/api\"\n)\n\n\/\/ ClusterRegistry is a Diagnostic to check that there is a working Docker registry.\ntype ClusterRegistry struct {\n\tKubeClient *kclient.Client\n\tOsClient *osclient.Client\n}\n\nconst (\n\tClusterRegistryName = \"ClusterRegistry\"\n\n\tregistryName = \"docker-registry\"\n\tregistryVolume = \"registry-storage\"\n\tclGetRegNone = `\nThere is no \"%s\" service in project \"%s\". This is not strictly required to\nbe present; however, it is required for builds, and its absence probably\nindicates an incomplete installation.\n\nPlease consult the documentation and use the 'oadm registry' command\nto create a Docker registry.`\n\n\tclGetRegFailed = `\nClient error while retrieving registry service. Client retrieved records\nduring discovery, so this is likely to be a transient error. Try running\ndiagnostics again. If this message persists, there may be a permissions\nproblem with getting records. The error was:\n\n(%T) %[1]v `\n\n\tclRegNoPods = `\nThe \"%s\" service exists but has no associated pods, so it\nis not available. Builds and deployments that use the registry will fail.`\n\n\tclRegNoRunningPods = `\nThe \"%s\" service exists but no pods currently running, so it\nis not available. Builds and deployments that use the registry will fail.`\n\n\tclRegMultiPods = `\nThe \"%s\" service has multiple associated pods each using\nephemeral storage. These are likely to have inconsistent stores of\nimages. Builds and deployments that use images from the registry may\nfail sporadically. Use a single registry or add a shared storage volume\nto the registries.`\n\n\tclRegPodDown = `\nThe \"%s\" pod for the \"%s\" service is not running.\nThis may be transient, a scheduling error, or something else.`\n\tclRegPodLog = `\nFailed to read the logs for the \"%s\" pod belonging to\nthe \"%s\" service. This is not a problem by itself but\nprevents diagnostics from looking for errors in those logs. The\nerror encountered was:\n%s`\n\n\tclRegPodConn = `\nThe pod logs for the \"%s\" pod belonging to\nthe \"%s\" service indicated a problem connecting to the\nmaster to notify it about a new image. This typically results in builds\nsucceeding but not triggering deployments (as they wait on notifications\nto the ImageStream from the build).\n\nThere are many reasons for this step to fail, including invalid\ncredentials, master outages, DNS failures, network errors, and so on. It\ncan be temporary or ongoing. Check the most recent error message from the\nregistry pod logs to determine the nature of the problem:\n\n%s`\n\n\tclRegPodErr = `\nThe pod logs for the \"%s\" pod belonging to\nthe \"%s\" service indicated unknown errors.\nThis could result in problems with builds or deployments.\nPlease examine the log entries to determine if there might be\nany related problems:\n%s`\n\n\tclRegSelinuxErr = `\nThe pod logs for the \"%s\" pod belonging to\nthe \"%s\" service indicated the registry is unable to write to disk.\nThis may indicate an SELinux denial, or problems with volume\nownership\/permissions.\n\nFor volume permission problems please consult the Persistent Storage section\nof the Administrator's Guide.\n\nIn the case of SELinux this may be resolved on the node by running:\n\n sudo chcon -R -t svirt_sandbox_file_t [PATH_TO]\/openshift.local.volumes\n\n%s`\n\n\tclRegNoEP = `\nThe \"%[1]s\" service exists with %d associated pod(s), but there\nare %d endpoints in the \"%[1]s\" service.\nThis mismatch likely indicates a system bug, and builds and\ndeployments that require the registry may fail sporadically.`\n\n\tclRegISDelFail = `\nThe diagnostics created an ImageStream named \"%[1]s\"\nfor test purposes and then attempted to delete it, which failed. This\nshould be an unusual, transient occurrence. The error encountered in\ndeleting it was:\n\n%s\n\nThis message is just to notify you that this object exists.\nYou ought to be able to delete this object with:\n\noc delete imagestream\/%[1]s -n default\n`\n\n\tclRegISMismatch = `\nDiagnostics created a test ImageStream and compared the registry IP\nit received to the registry IP available via the %[1]s service.\n\n%[1]s : %[2]s\nImageStream registry : %[3]s\n\nThey do not match, which probably means that an administrator re-created\nthe %[1]s service but the master has cached the old service\nIP address. Builds or deployments that use ImageStreams with the wrong\n%[1]s IP will fail under this condition.\n\nTo resolve this issue, restarting the master (to clear the cache) should\nbe sufficient. Existing ImageStreams may need to be re-created.`\n)\n\nfunc (d *ClusterRegistry) Name() string {\n\treturn ClusterRegistryName\n}\n\nfunc (d *ClusterRegistry) Description() string {\n\treturn \"Check that there is a working Docker registry\"\n}\n\nfunc (d *ClusterRegistry) CanRun() (bool, error) {\n\tif d.OsClient == nil || d.KubeClient == nil {\n\t\treturn false, fmt.Errorf(\"must have kube and os clients\")\n\t}\n\treturn userCan(d.OsClient, authorizationapi.AuthorizationAttributes{\n\t\tNamespace: kapi.NamespaceDefault,\n\t\tVerb: \"get\",\n\t\tResource: \"services\",\n\t\tResourceName: registryName,\n\t})\n}\n\nfunc (d *ClusterRegistry) Check() types.DiagnosticResult {\n\tr := types.NewDiagnosticResult(ClusterRegistryName)\n\tif service := d.getRegistryService(r); service != nil {\n\t\t\/\/ Check that it actually has pod(s) selected and running\n\t\tif runningPods := d.getRegistryPods(service, r); len(runningPods) == 0 {\n\t\t\tr.Error(\"DClu1001\", nil, fmt.Sprintf(clRegNoRunningPods, registryName))\n\t\t\treturn r\n\t\t} else if d.checkRegistryEndpoints(runningPods, r) { \/\/ Check that matching endpoint exists on the service\n\t\t\t\/\/ attempt to create an imagestream and see if it gets the same registry service IP from the service cache\n\t\t\td.verifyRegistryImageStream(service, r)\n\t\t}\n\t}\n\treturn r\n}\n\nfunc (d *ClusterRegistry) getRegistryService(r types.DiagnosticResult) *kapi.Service {\n\tservice, err := d.KubeClient.Services(kapi.NamespaceDefault).Get(registryName)\n\tif err != nil && reflect.TypeOf(err) == reflect.TypeOf(&kerrs.StatusError{}) {\n\t\tr.Warn(\"DClu1002\", err, fmt.Sprintf(clGetRegNone, registryName, kapi.NamespaceDefault))\n\t\treturn nil\n\t} else if err != nil {\n\t\tr.Error(\"DClu1003\", err, fmt.Sprintf(clGetRegFailed, err))\n\t\treturn nil\n\t}\n\tr.Debug(\"DClu1004\", fmt.Sprintf(\"Found %s service with ports %v\", registryName, service.Spec.Ports))\n\treturn service\n}\n\nfunc (d *ClusterRegistry) getRegistryPods(service *kapi.Service, r types.DiagnosticResult) []*kapi.Pod {\n\trunningPods := []*kapi.Pod{}\n\tpods, err := d.KubeClient.Pods(kapi.NamespaceDefault).List(labels.SelectorFromSet(service.Spec.Selector), fields.Everything())\n\tif err != nil {\n\t\tr.Error(\"DClu1005\", err, fmt.Sprintf(\"Finding pods for '%s' service failed. This should never happen. Error: (%T) %[2]v\", registryName, err))\n\t\treturn runningPods\n\t} else if len(pods.Items) < 1 {\n\t\tr.Error(\"DClu1006\", nil, fmt.Sprintf(clRegNoPods, registryName))\n\t\treturn runningPods\n\t} else if len(pods.Items) > 1 {\n\t\t\/\/ multiple registry pods using EmptyDir will be inconsistent\n\t\tfor _, volume := range pods.Items[0].Spec.Volumes {\n\t\t\tif volume.Name == registryVolume && volume.EmptyDir != nil {\n\t\t\t\tr.Error(\"DClu1007\", nil, fmt.Sprintf(clRegMultiPods, registryName))\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tfor _, pod := range pods.Items {\n\t\tr.Debug(\"DClu1008\", fmt.Sprintf(\"Found %s pod with name %s\", registryName, pod.ObjectMeta.Name))\n\t\tif pod.Status.Phase != kapi.PodRunning {\n\t\t\tr.Warn(\"DClu1009\", nil, fmt.Sprintf(clRegPodDown, pod.ObjectMeta.Name, registryName))\n\t\t} else {\n\t\t\trunningPods = append(runningPods, &pod)\n\t\t\t\/\/ Check the logs for that pod for common issues (credentials, DNS resolution failure)\n\t\t\td.checkRegistryLogs(&pod, r)\n\t\t}\n\t}\n\treturn runningPods\n}\n\nfunc (d *ClusterRegistry) checkRegistryLogs(pod *kapi.Pod, r types.DiagnosticResult) {\n\t\/\/ pull out logs from the pod\n\treadCloser, err := d.KubeClient.RESTClient.Get().\n\t\tNamespace(\"default\").Name(pod.ObjectMeta.Name).\n\t\tResource(\"pods\").SubResource(\"log\").\n\t\tParam(\"follow\", \"false\").\n\t\tParam(\"container\", pod.Spec.Containers[0].Name).\n\t\tStream()\n\tif err != nil {\n\t\tr.Warn(\"DClu1010\", nil, fmt.Sprintf(clRegPodLog, pod.ObjectMeta.Name, registryName, fmt.Sprintf(\"(%T) %[1]v\", err)))\n\t\treturn\n\t}\n\tdefer readCloser.Close()\n\n\t\/\/ Indicator that selinux is blocking the registry from writing to disk:\n\tselinuxErrorRegex, _ := regexp.Compile(\".*level=error.*mkdir.*permission denied.*\")\n\t\/\/ If seen after the above error regex, we know the problem has since been fixed:\n\tselinuxSuccessRegex, _ := regexp.Compile(\".*level=info.*response completed.*http.request.method=PUT.*\")\n\n\tclientError := \"\"\n\tregistryError := \"\"\n\tselinuxError := \"\"\n\n\tscanner := bufio.NewScanner(readCloser)\n\tfor scanner.Scan() {\n\t\tlogLine := scanner.Text()\n\t\t\/\/ TODO: once the logging API gets \"since\" and \"tail\" and \"limit\", limit to more recent log entries\n\t\t\/\/ https:\/\/github.com\/kubernetes\/kubernetes\/issues\/12447\n\t\tif strings.Contains(logLine, `level=error msg=\"client error:`) {\n\t\t\tclientError = logLine \/\/ end up showing only the most recent client error\n\t\t} else if selinuxErrorRegex.MatchString(logLine) {\n\t\t\tselinuxError = logLine\n\t\t} else if selinuxSuccessRegex.MatchString(logLine) {\n\t\t\t\/\/ Check for a successful registry push, if this occurs after a selinux error\n\t\t\t\/\/ we can safely clear it, the problem has already been fixed.\n\t\t\tselinuxError = \"\"\n\t\t} else if strings.Contains(logLine, \"level=error msg=\") {\n\t\t\tregistryError += \"\\n\" + logLine \/\/ gather generic errors\n\t\t}\n\t}\n\tif clientError != \"\" {\n\t\tr.Error(\"DClu1011\", nil, fmt.Sprintf(clRegPodConn, pod.ObjectMeta.Name, registryName, clientError))\n\t}\n\tif selinuxError != \"\" {\n\t\tr.Error(\"DClu1020\", nil, fmt.Sprintf(clRegSelinuxErr, pod.ObjectMeta.Name, registryName, selinuxError))\n\t}\n\tif registryError != \"\" {\n\t\tr.Warn(\"DClu1012\", nil, fmt.Sprintf(clRegPodErr, pod.ObjectMeta.Name, registryName, registryError))\n\t}\n}\n\nfunc (d *ClusterRegistry) checkRegistryEndpoints(pods []*kapi.Pod, r types.DiagnosticResult) bool {\n\tendPoint, err := d.KubeClient.Endpoints(kapi.NamespaceDefault).Get(registryName)\n\tif err != nil {\n\t\tr.Error(\"DClu1013\", err, fmt.Sprintf(`Finding endpoints for \"%s\" service failed. This should never happen. Error: (%[2]T) %[2]v`, registryName, err))\n\t\treturn false\n\t}\n\tnumEP := 0\n\tfor _, subs := range endPoint.Subsets {\n\t\tnumEP += len(subs.Addresses)\n\t}\n\tif numEP != len(pods) {\n\t\tr.Warn(\"DClu1014\", nil, fmt.Sprintf(clRegNoEP, registryName, len(pods), numEP))\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (d *ClusterRegistry) verifyRegistryImageStream(service *kapi.Service, r types.DiagnosticResult) {\n\timgStream, err := d.OsClient.ImageStreams(kapi.NamespaceDefault).Create(&osapi.ImageStream{ObjectMeta: kapi.ObjectMeta{GenerateName: \"diagnostic-test\"}})\n\tif err != nil {\n\t\tr.Error(\"DClu1015\", err, fmt.Sprintf(\"Creating test ImageStream failed. Error: (%T) %[1]v\", err))\n\t\treturn\n\t}\n\tdefer func() { \/\/ delete what we created, or notify that we couldn't\n\t\tif err := d.OsClient.ImageStreams(kapi.NamespaceDefault).Delete(imgStream.ObjectMeta.Name); err != nil {\n\t\t\tr.Warn(\"DClu1016\", err, fmt.Sprintf(clRegISDelFail, imgStream.ObjectMeta.Name, fmt.Sprintf(\"(%T) %[1]s\", err)))\n\t\t}\n\t}()\n\timgStream, err = d.OsClient.ImageStreams(kapi.NamespaceDefault).Get(imgStream.ObjectMeta.Name) \/\/ status is filled in post-create\n\tif err != nil {\n\t\tr.Error(\"DClu1017\", err, fmt.Sprintf(\"Getting created test ImageStream failed. Error: (%T) %[1]v\", err))\n\t\treturn\n\t}\n\tr.Debug(\"DClu1018\", fmt.Sprintf(\"Created test ImageStream: %[1]v\", imgStream))\n\tcacheHost := strings.SplitN(imgStream.Status.DockerImageRepository, \"\/\", 2)[0]\n\tserviceHost := fmt.Sprintf(\"%s:%d\", service.Spec.ClusterIP, service.Spec.Ports[0].Port)\n\tif cacheHost != serviceHost {\n\t\tr.Error(\"DClu1019\", nil, fmt.Sprintf(clRegISMismatch, registryName, serviceHost, cacheHost))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package integration\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"google.golang.org\/grpc\"\n\n\t\"github.com\/havoc-io\/mutagen\/cmd\"\n\t\"github.com\/havoc-io\/mutagen\/pkg\/agent\"\n\t\"github.com\/havoc-io\/mutagen\/pkg\/daemon\"\n\tmgrpc \"github.com\/havoc-io\/mutagen\/pkg\/grpc\"\n\tdaemonsvc \"github.com\/havoc-io\/mutagen\/pkg\/service\/daemon\"\n\tpromptsvc \"github.com\/havoc-io\/mutagen\/pkg\/service\/prompt\"\n\tsessionsvc \"github.com\/havoc-io\/mutagen\/pkg\/service\/session\"\n\t\"github.com\/havoc-io\/mutagen\/pkg\/session\"\n\n\t\/\/ Explicitly import packages that need to register protocol handlers.\n\t_ \"github.com\/havoc-io\/mutagen\/pkg\/integration\/protocols\/netpipe\"\n\t_ \"github.com\/havoc-io\/mutagen\/pkg\/protocols\/docker\"\n\t_ \"github.com\/havoc-io\/mutagen\/pkg\/protocols\/local\"\n\t_ \"github.com\/havoc-io\/mutagen\/pkg\/protocols\/ssh\"\n)\n\n\/\/ sessionManager is the session manager for the integration testing daemon. It\n\/\/ is exposed for integration tests that operate at the API level (as opposed to\n\/\/ the gRPC or command line level).\nvar sessionManager *session.Manager\n\n\/\/ testMainInternal is the internal testing entry point, needed so that shutdown\n\/\/ operations can be deferred (since TestMain will invoke os.Exit). It copies\n\/\/ the mutagen executable to a well-known path, sets up the agent bundle to work\n\/\/ during testing, sets up a functionally complete daemon instance for testing,\n\/\/ runs integration tests, and finally tears down all of the aforementioned\n\/\/ infrastructure.\nfunc testMainInternal(m *testing.M) (int, error) {\n\t\/\/ Override the expected agent bundle location.\n\tagent.ExpectedBundleLocation = agent.BundleLocationBuildDirectory\n\n\t\/\/ Acquire the daemon lock and defer its release.\n\tlock, err := daemon.AcquireLock()\n\tif err != nil {\n\t\treturn -1, errors.Wrap(err, \"unable to acquire daemon lock\")\n\t}\n\tdefer lock.Unlock()\n\n\t\/\/ Create a session manager and defer its shutdown. Note that we assign to\n\t\/\/ the global instance here.\n\tsessionManager, err = session.NewManager()\n\tif err != nil {\n\t\treturn -1, errors.Wrap(err, \"unable to create session manager\")\n\t}\n\tdefer sessionManager.Shutdown()\n\n\t\/\/ Create the gRPC server and defer its stoppage. We use a hard stop rather\n\t\/\/ than a graceful stop so that it doesn't hang on open requests.\n\tserver := grpc.NewServer(\n\t\tgrpc.MaxSendMsgSize(mgrpc.MaximumIPCMessageSize),\n\t\tgrpc.MaxRecvMsgSize(mgrpc.MaximumIPCMessageSize),\n\t)\n\tdefer server.Stop()\n\n\t\/\/ Create and register the daemon service and defer its shutdown.\n\tdaemonServer := daemonsvc.NewServer()\n\tdaemonsvc.RegisterDaemonServer(server, daemonServer)\n\tdefer daemonServer.Shutdown()\n\n\t\/\/ Create and register the prompt service.\n\tpromptsvc.RegisterPromptingServer(server, promptsvc.NewServer())\n\n\t\/\/ Create and register the session service.\n\tsessionsvc.RegisterSessionsServer(server, sessionsvc.NewServer(sessionManager))\n\n\t\/\/ Create the daemon listener and defer its closure.\n\tlistener, err := daemon.NewListener()\n\tif err != nil {\n\t\treturn -1, errors.Wrap(err, \"unable to create daemon listener\")\n\t}\n\tdefer listener.Close()\n\n\t\/\/ Serve incoming connections in a separate Goroutine. We don't monitor for\n\t\/\/ errors since there's nothing that we can do about them and because\n\t\/\/ they'll likely show up in the test output anyway.\n\tgo server.Serve(listener)\n\n\t\/\/ Run tests.\n\treturn m.Run(), nil\n}\n\n\/\/ TestMain is the entry point for integration tests (overriding the default\n\/\/ generated entry point).\nfunc TestMain(m *testing.M) {\n\t\/\/ Invoke the internal entry point. If there's an error, print it out before\n\t\/\/ exiting.\n\tresult, err := testMainInternal(m)\n\tif err != nil {\n\t\tcmd.Error(err)\n\t}\n\n\t\/\/ Exit with the result.\n\tos.Exit(result)\n}\n<commit_msg>Added fix for integration test imports.<commit_after>package integration\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"google.golang.org\/grpc\"\n\n\t\"github.com\/havoc-io\/mutagen\/cmd\"\n\t\"github.com\/havoc-io\/mutagen\/pkg\/agent\"\n\t\"github.com\/havoc-io\/mutagen\/pkg\/daemon\"\n\tdaemonsvc \"github.com\/havoc-io\/mutagen\/pkg\/service\/daemon\"\n\tpromptsvc \"github.com\/havoc-io\/mutagen\/pkg\/service\/prompt\"\n\tsessionsvc \"github.com\/havoc-io\/mutagen\/pkg\/service\/session\"\n\t\"github.com\/havoc-io\/mutagen\/pkg\/session\"\n\n\t\/\/ Explicitly import packages that need to register protocol handlers.\n\t_ \"github.com\/havoc-io\/mutagen\/pkg\/integration\/protocols\/netpipe\"\n\t_ \"github.com\/havoc-io\/mutagen\/pkg\/protocols\/docker\"\n\t_ \"github.com\/havoc-io\/mutagen\/pkg\/protocols\/local\"\n\t_ \"github.com\/havoc-io\/mutagen\/pkg\/protocols\/ssh\"\n)\n\n\/\/ sessionManager is the session manager for the integration testing daemon. It\n\/\/ is exposed for integration tests that operate at the API level (as opposed to\n\/\/ the gRPC or command line level).\nvar sessionManager *session.Manager\n\n\/\/ testMainInternal is the internal testing entry point, needed so that shutdown\n\/\/ operations can be deferred (since TestMain will invoke os.Exit). It copies\n\/\/ the mutagen executable to a well-known path, sets up the agent bundle to work\n\/\/ during testing, sets up a functionally complete daemon instance for testing,\n\/\/ runs integration tests, and finally tears down all of the aforementioned\n\/\/ infrastructure.\nfunc testMainInternal(m *testing.M) (int, error) {\n\t\/\/ Override the expected agent bundle location.\n\tagent.ExpectedBundleLocation = agent.BundleLocationBuildDirectory\n\n\t\/\/ Acquire the daemon lock and defer its release.\n\tlock, err := daemon.AcquireLock()\n\tif err != nil {\n\t\treturn -1, errors.Wrap(err, \"unable to acquire daemon lock\")\n\t}\n\tdefer lock.Unlock()\n\n\t\/\/ Create a session manager and defer its shutdown. Note that we assign to\n\t\/\/ the global instance here.\n\tsessionManager, err = session.NewManager()\n\tif err != nil {\n\t\treturn -1, errors.Wrap(err, \"unable to create session manager\")\n\t}\n\tdefer sessionManager.Shutdown()\n\n\t\/\/ Create the gRPC server and defer its stoppage. We use a hard stop rather\n\t\/\/ than a graceful stop so that it doesn't hang on open requests.\n\tserver := grpc.NewServer(\n\t\tgrpc.MaxSendMsgSize(daemon.MaximumIPCMessageSize),\n\t\tgrpc.MaxRecvMsgSize(daemon.MaximumIPCMessageSize),\n\t)\n\tdefer server.Stop()\n\n\t\/\/ Create and register the daemon service and defer its shutdown.\n\tdaemonServer := daemonsvc.NewServer()\n\tdaemonsvc.RegisterDaemonServer(server, daemonServer)\n\tdefer daemonServer.Shutdown()\n\n\t\/\/ Create and register the prompt service.\n\tpromptsvc.RegisterPromptingServer(server, promptsvc.NewServer())\n\n\t\/\/ Create and register the session service.\n\tsessionsvc.RegisterSessionsServer(server, sessionsvc.NewServer(sessionManager))\n\n\t\/\/ Create the daemon listener and defer its closure.\n\tlistener, err := daemon.NewListener()\n\tif err != nil {\n\t\treturn -1, errors.Wrap(err, \"unable to create daemon listener\")\n\t}\n\tdefer listener.Close()\n\n\t\/\/ Serve incoming connections in a separate Goroutine. We don't monitor for\n\t\/\/ errors since there's nothing that we can do about them and because\n\t\/\/ they'll likely show up in the test output anyway.\n\tgo server.Serve(listener)\n\n\t\/\/ Run tests.\n\treturn m.Run(), nil\n}\n\n\/\/ TestMain is the entry point for integration tests (overriding the default\n\/\/ generated entry point).\nfunc TestMain(m *testing.M) {\n\t\/\/ Invoke the internal entry point. If there's an error, print it out before\n\t\/\/ exiting.\n\tresult, err := testMainInternal(m)\n\tif err != nil {\n\t\tcmd.Error(err)\n\t}\n\n\t\/\/ Exit with the result.\n\tos.Exit(result)\n}\n<|endoftext|>"} {"text":"<commit_before>package workers\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/cozy\/cozy-stack\/client\/auth\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/consts\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\/mango\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/jobs\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/permissions\"\n\t\"github.com\/cozy\/cozy-stack\/web\/jsonapi\"\n)\n\nfunc init() {\n\tjobs.AddWorker(\"sharingupdates\", &jobs.WorkerConfig{\n\t\tConcurrency: 4,\n\t\tMaxExecCount: 3,\n\t\tTimeout: 10 * time.Second,\n\t\tWorkerFunc: SharingUpdates,\n\t})\n}\n\nvar (\n\t\/\/ ErrSharingIDNotUnique is used when several occurences of the same sharing id are found\n\tErrSharingIDNotUnique = errors.New(\"Several sharings with this id found\")\n\t\/\/ ErrSharingDoesNotExist is used when the given sharing does not exist.\n\tErrSharingDoesNotExist = errors.New(\"Sharing does not exist\")\n\t\/\/ ErrDocumentNotLegitimate is used when a shared document is triggered but\n\t\/\/ not legitimate for this sharing\n\tErrDocumentNotLegitimate = errors.New(\"Triggered illegitimate shared document\")\n\t\/\/ErrRecipientDoesNotExist is used when the given recipient does not exist\n\tErrRecipientDoesNotExist = errors.New(\"Recipient with given ID does not exist\")\n\t\/\/ ErrRecipientHasNoURL is used to signal that a recipient has no URL.\n\tErrRecipientHasNoURL = errors.New(\"Recipient has no URL\")\n)\n\n\/\/ TriggerEvent describes the fields retrieved after a triggered event\ntype TriggerEvent struct {\n\tEvent *EventDoc `json:\"event\"`\n\tMessage *SharingMessage `json:\"message\"`\n}\n\n\/\/ EventDoc describes the event returned by the trigger\ntype EventDoc struct {\n\tType string `json:\"type\"`\n\tDoc *couchdb.JSONDoc\n}\n\n\/\/ SharingMessage describes a sharing message\ntype SharingMessage struct {\n\tSharingID string `json:\"sharing_id\"`\n\tDocType string `json:\"doctype\"`\n}\n\n\/\/ Sharing describes the sharing document structure\ntype Sharing struct {\n\tSharingType string `json:\"sharing_type\"`\n\tPermissions permissions.Set `json:\"permissions,omitempty\"`\n\tRecipientsStatus []*RecipientStatus `json:\"recipients,omitempty\"`\n}\n\n\/\/ RecipientStatus contains the information about a recipient for a sharing\ntype RecipientStatus struct {\n\tStatus string `json:\"status,omitempty\"`\n\tRefRecipient jsonapi.ResourceIdentifier `json:\"recipient,omitempty\"`\n\tAccessToken *auth.AccessToken\n}\n\n\/\/ SharingUpdates handles shared document updates\nfunc SharingUpdates(ctx context.Context, m *jobs.Message) error {\n\tdomain := ctx.Value(jobs.ContextDomainKey).(string)\n\n\tevent := &TriggerEvent{}\n\terr := m.Unmarshal(&event)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsharingID := event.Message.SharingID\n\tdocType := event.Message.DocType\n\tdocID := event.Event.Doc.M[\"_id\"].(string)\n\n\t\/\/ Get the sharing document\n\tdb := couchdb.SimpleDatabasePrefix(domain)\n\tvar res []Sharing\n\terr = couchdb.FindDocs(db, consts.Sharings, &couchdb.FindRequest{\n\t\tUseIndex: \"by-sharing-id\",\n\t\tSelector: mango.Equal(\"sharing_id\", sharingID),\n\t}, &res)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(res) < 1 {\n\t\treturn ErrSharingDoesNotExist\n\t} else if len(res) > 1 {\n\t\treturn ErrSharingIDNotUnique\n\t}\n\tsharing := &res[0]\n\n\t\/\/ Check the updated document is legitimate for this sharing\n\tif err = checkDocument(sharing, docID); err != nil {\n\t\treturn err\n\t}\n\n\treturn sendToRecipients(db, domain, sharing, docType, docID)\n}\n\n\/\/ checkDocument checks the legitimity of the updated document to be shared\nfunc checkDocument(sharing *Sharing, docID string) error {\n\t\/\/ Check sharing type\n\tif sharing.SharingType == consts.OneShotSharing {\n\t\treturn ErrDocumentNotLegitimate\n\t}\n\t\/\/ Check permissions\n\tfor _, rule := range sharing.Permissions {\n\t\tfor _, val := range rule.Values {\n\t\t\tif val == docID {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\treturn ErrDocumentNotLegitimate\n}\n\n\/\/ sendToRecipients retreives the recipients and send the document\nfunc sendToRecipients(db couchdb.Database, domain string, sharing *Sharing, docType, docID string) error {\n\n\trecInfos := make([]*RecipientInfo, len(sharing.RecipientsStatus))\n\tfor i, rec := range sharing.RecipientsStatus {\n\t\trecDoc, err := GetRecipient(db, rec.RefRecipient.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tu, err := ExtractHost(recDoc.M[\"url\"].(string))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinfo := &RecipientInfo{\n\t\t\tURL: u,\n\t\t\tToken: rec.AccessToken.AccessToken,\n\t\t}\n\t\trecInfos[i] = info\n\t}\n\topts := &SendOptions{\n\t\tDocID: docID,\n\t\tDocType: docType,\n\t\tUpdate: true,\n\t\tRecipients: recInfos,\n\t}\n\t\/\/ TODO: handle file sharing\n\tif opts.DocType != consts.Files {\n\t\treturn SendDoc(domain, opts)\n\t}\n\treturn nil\n}\n\n\/\/ GetRecipient returns the Recipient stored in database from a given ID\nfunc GetRecipient(db couchdb.Database, recID string) (*couchdb.JSONDoc, error) {\n\tdoc := &couchdb.JSONDoc{}\n\terr := couchdb.GetDoc(db, consts.Recipients, recID, doc)\n\tif couchdb.IsNotFoundError(err) {\n\t\terr = ErrRecipientDoesNotExist\n\t}\n\treturn doc, err\n}\n\n\/\/ ExtractHost returns the recipient's host, without the scheme\nfunc ExtractHost(fullURL string) (string, error) {\n\tif fullURL == \"\" {\n\t\treturn \"\", ErrRecipientHasNoURL\n\t}\n\tu, err := url.Parse(fullURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn u.Host, nil\n}\n<commit_msg>Fix build with couchdb.DocReference<commit_after>package workers\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/cozy\/cozy-stack\/client\/auth\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/consts\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\/mango\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/jobs\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/permissions\"\n)\n\nfunc init() {\n\tjobs.AddWorker(\"sharingupdates\", &jobs.WorkerConfig{\n\t\tConcurrency: 4,\n\t\tMaxExecCount: 3,\n\t\tTimeout: 10 * time.Second,\n\t\tWorkerFunc: SharingUpdates,\n\t})\n}\n\nvar (\n\t\/\/ ErrSharingIDNotUnique is used when several occurences of the same sharing id are found\n\tErrSharingIDNotUnique = errors.New(\"Several sharings with this id found\")\n\t\/\/ ErrSharingDoesNotExist is used when the given sharing does not exist.\n\tErrSharingDoesNotExist = errors.New(\"Sharing does not exist\")\n\t\/\/ ErrDocumentNotLegitimate is used when a shared document is triggered but\n\t\/\/ not legitimate for this sharing\n\tErrDocumentNotLegitimate = errors.New(\"Triggered illegitimate shared document\")\n\t\/\/ErrRecipientDoesNotExist is used when the given recipient does not exist\n\tErrRecipientDoesNotExist = errors.New(\"Recipient with given ID does not exist\")\n\t\/\/ ErrRecipientHasNoURL is used to signal that a recipient has no URL.\n\tErrRecipientHasNoURL = errors.New(\"Recipient has no URL\")\n)\n\n\/\/ TriggerEvent describes the fields retrieved after a triggered event\ntype TriggerEvent struct {\n\tEvent *EventDoc `json:\"event\"`\n\tMessage *SharingMessage `json:\"message\"`\n}\n\n\/\/ EventDoc describes the event returned by the trigger\ntype EventDoc struct {\n\tType string `json:\"type\"`\n\tDoc *couchdb.JSONDoc\n}\n\n\/\/ SharingMessage describes a sharing message\ntype SharingMessage struct {\n\tSharingID string `json:\"sharing_id\"`\n\tDocType string `json:\"doctype\"`\n}\n\n\/\/ Sharing describes the sharing document structure\ntype Sharing struct {\n\tSharingType string `json:\"sharing_type\"`\n\tPermissions permissions.Set `json:\"permissions,omitempty\"`\n\tRecipientsStatus []*RecipientStatus `json:\"recipients,omitempty\"`\n}\n\n\/\/ RecipientStatus contains the information about a recipient for a sharing\ntype RecipientStatus struct {\n\tStatus string `json:\"status,omitempty\"`\n\tRefRecipient couchdb.DocReference `json:\"recipient,omitempty\"`\n\tAccessToken *auth.AccessToken\n}\n\n\/\/ SharingUpdates handles shared document updates\nfunc SharingUpdates(ctx context.Context, m *jobs.Message) error {\n\tdomain := ctx.Value(jobs.ContextDomainKey).(string)\n\n\tevent := &TriggerEvent{}\n\terr := m.Unmarshal(&event)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsharingID := event.Message.SharingID\n\tdocType := event.Message.DocType\n\tdocID := event.Event.Doc.M[\"_id\"].(string)\n\n\t\/\/ Get the sharing document\n\tdb := couchdb.SimpleDatabasePrefix(domain)\n\tvar res []Sharing\n\terr = couchdb.FindDocs(db, consts.Sharings, &couchdb.FindRequest{\n\t\tUseIndex: \"by-sharing-id\",\n\t\tSelector: mango.Equal(\"sharing_id\", sharingID),\n\t}, &res)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(res) < 1 {\n\t\treturn ErrSharingDoesNotExist\n\t} else if len(res) > 1 {\n\t\treturn ErrSharingIDNotUnique\n\t}\n\tsharing := &res[0]\n\n\t\/\/ Check the updated document is legitimate for this sharing\n\tif err = checkDocument(sharing, docID); err != nil {\n\t\treturn err\n\t}\n\n\treturn sendToRecipients(db, domain, sharing, docType, docID)\n}\n\n\/\/ checkDocument checks the legitimity of the updated document to be shared\nfunc checkDocument(sharing *Sharing, docID string) error {\n\t\/\/ Check sharing type\n\tif sharing.SharingType == consts.OneShotSharing {\n\t\treturn ErrDocumentNotLegitimate\n\t}\n\t\/\/ Check permissions\n\tfor _, rule := range sharing.Permissions {\n\t\tfor _, val := range rule.Values {\n\t\t\tif val == docID {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\treturn ErrDocumentNotLegitimate\n}\n\n\/\/ sendToRecipients retreives the recipients and send the document\nfunc sendToRecipients(db couchdb.Database, domain string, sharing *Sharing, docType, docID string) error {\n\n\trecInfos := make([]*RecipientInfo, len(sharing.RecipientsStatus))\n\tfor i, rec := range sharing.RecipientsStatus {\n\t\trecDoc, err := GetRecipient(db, rec.RefRecipient.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tu, err := ExtractHost(recDoc.M[\"url\"].(string))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinfo := &RecipientInfo{\n\t\t\tURL: u,\n\t\t\tToken: rec.AccessToken.AccessToken,\n\t\t}\n\t\trecInfos[i] = info\n\t}\n\topts := &SendOptions{\n\t\tDocID: docID,\n\t\tDocType: docType,\n\t\tUpdate: true,\n\t\tRecipients: recInfos,\n\t}\n\t\/\/ TODO: handle file sharing\n\tif opts.DocType != consts.Files {\n\t\treturn SendDoc(domain, opts)\n\t}\n\treturn nil\n}\n\n\/\/ GetRecipient returns the Recipient stored in database from a given ID\nfunc GetRecipient(db couchdb.Database, recID string) (*couchdb.JSONDoc, error) {\n\tdoc := &couchdb.JSONDoc{}\n\terr := couchdb.GetDoc(db, consts.Recipients, recID, doc)\n\tif couchdb.IsNotFoundError(err) {\n\t\terr = ErrRecipientDoesNotExist\n\t}\n\treturn doc, err\n}\n\n\/\/ ExtractHost returns the recipient's host, without the scheme\nfunc ExtractHost(fullURL string) (string, error) {\n\tif fullURL == \"\" {\n\t\treturn \"\", ErrRecipientHasNoURL\n\t}\n\tu, err := url.Parse(fullURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn u.Host, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage constants\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n)\n\n\/\/ MachineName is the name to use for the VM.\nconst MachineName = \"minikubeVM\"\n\n\/\/ Fix for windows\nvar Minipath = filepath.Join(os.Getenv(\"HOME\"), \".minikube\")\n\n\/\/ TODO: Fix for windows\n\/\/ KubeconfigPath is the path to the Kubernetes client config\nvar KubeconfigPath = filepath.Join(os.Getenv(\"HOME\"), \".kube\", \"config\")\n\n\/\/ MinikubeContext is the kubeconfig context name used for minikube\nconst MinikubeContext = \"minikube\"\n\n\/\/ MakeMiniPath is a utility to calculate a relative path to our directory.\nfunc MakeMiniPath(fileName string) string {\n\treturn filepath.Join(Minipath, fileName)\n}\n\n\/\/ Only pass along these flags to localkube.\nvar LogFlags = [...]string{\n\t\"v\",\n\t\"vmodule\",\n}\n\nconst DefaultIsoUrl = \"https:\/\/storage.googleapis.com\/minikube\/minikube-0.1.iso\"\n\nconst (\n\tRemoteLocalKubeErrPath = \"\/var\/log\/localkube.err\"\n\tRemoteLocalKubeOutPath = \"\/var\/log\/localkube.out\"\n)\n<commit_msg>Bump the ISO URL.<commit_after>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage constants\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n)\n\n\/\/ MachineName is the name to use for the VM.\nconst MachineName = \"minikubeVM\"\n\n\/\/ Fix for windows\nvar Minipath = filepath.Join(os.Getenv(\"HOME\"), \".minikube\")\n\n\/\/ TODO: Fix for windows\n\/\/ KubeconfigPath is the path to the Kubernetes client config\nvar KubeconfigPath = filepath.Join(os.Getenv(\"HOME\"), \".kube\", \"config\")\n\n\/\/ MinikubeContext is the kubeconfig context name used for minikube\nconst MinikubeContext = \"minikube\"\n\n\/\/ MakeMiniPath is a utility to calculate a relative path to our directory.\nfunc MakeMiniPath(fileName string) string {\n\treturn filepath.Join(Minipath, fileName)\n}\n\n\/\/ Only pass along these flags to localkube.\nvar LogFlags = [...]string{\n\t\"v\",\n\t\"vmodule\",\n}\n\nconst DefaultIsoUrl = \"https:\/\/storage.googleapis.com\/minikube\/minikube-0.2.iso\"\n\nconst (\n\tRemoteLocalKubeErrPath = \"\/var\/log\/localkube.err\"\n\tRemoteLocalKubeOutPath = \"\/var\/log\/localkube.out\"\n)\n<|endoftext|>"} {"text":"<commit_before>package sqlstore\n\nimport (\n\t\"time\"\n\n\t\"github.com\/go-xorm\/xorm\"\n\n\t\"github.com\/grafana\/grafana\/pkg\/bus\"\n\t\"github.com\/grafana\/grafana\/pkg\/components\/securejsondata\"\n\t\"github.com\/grafana\/grafana\/pkg\/metrics\"\n\tm \"github.com\/grafana\/grafana\/pkg\/models\"\n)\n\nfunc init() {\n\tbus.AddHandler(\"sql\", GetDataSources)\n\tbus.AddHandler(\"sql\", GetAllDataSources)\n\tbus.AddHandler(\"sql\", AddDataSource)\n\tbus.AddHandler(\"sql\", DeleteDataSourceById)\n\tbus.AddHandler(\"sql\", DeleteDataSourceByName)\n\tbus.AddHandler(\"sql\", UpdateDataSource)\n\tbus.AddHandler(\"sql\", GetDataSourceById)\n\tbus.AddHandler(\"sql\", GetDataSourceByName)\n}\n\nfunc GetDataSourceById(query *m.GetDataSourceByIdQuery) error {\n\tmetrics.M_DB_DataSource_QueryById.Inc()\n\n\tdatasource := m.DataSource{OrgId: query.OrgId, Id: query.Id}\n\thas, err := x.Get(&datasource)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !has {\n\t\treturn m.ErrDataSourceNotFound\n\t}\n\n\tquery.Result = &datasource\n\treturn err\n}\n\nfunc GetDataSourceByName(query *m.GetDataSourceByNameQuery) error {\n\tdatasource := m.DataSource{OrgId: query.OrgId, Name: query.Name}\n\thas, err := x.Get(&datasource)\n\n\tif !has {\n\t\treturn m.ErrDataSourceNotFound\n\t}\n\n\tquery.Result = &datasource\n\treturn err\n}\n\nfunc GetDataSources(query *m.GetDataSourcesQuery) error {\n\tsess := x.Limit(1000, 0).Where(\"org_id=?\", query.OrgId).Asc(\"name\")\n\n\tquery.Result = make([]*m.DataSource, 0)\n\treturn sess.Find(&query.Result)\n}\n\nfunc GetAllDataSources(query *m.GetAllDataSourcesQuery) error {\n\tsess := x.Limit(1000, 0).Asc(\"name\")\n\n\tquery.Result = make([]*m.DataSource, 0)\n\treturn sess.Find(&query.Result)\n}\n\nfunc DeleteDataSourceById(cmd *m.DeleteDataSourceByIdCommand) error {\n\treturn inTransaction(func(sess *DBSession) error {\n\t\tvar rawSql = \"DELETE FROM data_source WHERE id=? and org_id=?\"\n\t\tresult, err := sess.Exec(rawSql, cmd.Id, cmd.OrgId)\n\t\taffected, _ := result.RowsAffected()\n\t\tcmd.DeletedDatasourcesCount = affected\n\t\treturn err\n\t})\n}\n\nfunc DeleteDataSourceByName(cmd *m.DeleteDataSourceByNameCommand) error {\n\treturn inTransaction(func(sess *DBSession) error {\n\t\tvar rawSql = \"DELETE FROM data_source WHERE name=? and org_id=?\"\n\t\tresult, err := sess.Exec(rawSql, cmd.Name, cmd.OrgId)\n\t\taffected, _ := result.RowsAffected()\n\t\tcmd.DeletedDatasourcesCount = affected\n\t\treturn err\n\t})\n}\n\nfunc AddDataSource(cmd *m.AddDataSourceCommand) error {\n\treturn inTransaction(func(sess *DBSession) error {\n\t\texisting := m.DataSource{OrgId: cmd.OrgId, Name: cmd.Name}\n\t\thas, _ := sess.Get(&existing)\n\n\t\tif has {\n\t\t\treturn m.ErrDataSourceNameExists\n\t\t}\n\n\t\tds := &m.DataSource{\n\t\t\tOrgId: cmd.OrgId,\n\t\t\tName: cmd.Name,\n\t\t\tType: cmd.Type,\n\t\t\tAccess: cmd.Access,\n\t\t\tUrl: cmd.Url,\n\t\t\tUser: cmd.User,\n\t\t\tPassword: cmd.Password,\n\t\t\tDatabase: cmd.Database,\n\t\t\tIsDefault: cmd.IsDefault,\n\t\t\tBasicAuth: cmd.BasicAuth,\n\t\t\tBasicAuthUser: cmd.BasicAuthUser,\n\t\t\tBasicAuthPassword: cmd.BasicAuthPassword,\n\t\t\tWithCredentials: cmd.WithCredentials,\n\t\t\tJsonData: cmd.JsonData,\n\t\t\tSecureJsonData: securejsondata.GetEncryptedJsonData(cmd.SecureJsonData),\n\t\t\tCreated: time.Now(),\n\t\t\tUpdated: time.Now(),\n\t\t\tVersion: 1,\n\t\t\tReadOnly: cmd.ReadOnly,\n\t\t}\n\n\t\tif _, err := sess.Insert(ds); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := updateIsDefaultFlag(ds, sess); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcmd.Result = ds\n\t\treturn nil\n\t})\n}\n\nfunc updateIsDefaultFlag(ds *m.DataSource, sess *DBSession) error {\n\t\/\/ Handle is default flag\n\tif ds.IsDefault {\n\t\trawSql := \"UPDATE data_source SET is_default=? WHERE org_id=? AND id <> ?\"\n\t\tif _, err := sess.Exec(rawSql, false, ds.OrgId, ds.Id); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc UpdateDataSource(cmd *m.UpdateDataSourceCommand) error {\n\treturn inTransaction(func(sess *DBSession) error {\n\t\tds := &m.DataSource{\n\t\t\tId: cmd.Id,\n\t\t\tOrgId: cmd.OrgId,\n\t\t\tName: cmd.Name,\n\t\t\tType: cmd.Type,\n\t\t\tAccess: cmd.Access,\n\t\t\tUrl: cmd.Url,\n\t\t\tUser: cmd.User,\n\t\t\tPassword: cmd.Password,\n\t\t\tDatabase: cmd.Database,\n\t\t\tIsDefault: cmd.IsDefault,\n\t\t\tBasicAuth: cmd.BasicAuth,\n\t\t\tBasicAuthUser: cmd.BasicAuthUser,\n\t\t\tBasicAuthPassword: cmd.BasicAuthPassword,\n\t\t\tWithCredentials: cmd.WithCredentials,\n\t\t\tJsonData: cmd.JsonData,\n\t\t\tSecureJsonData: securejsondata.GetEncryptedJsonData(cmd.SecureJsonData),\n\t\t\tUpdated: time.Now(),\n\t\t\tReadOnly: cmd.ReadOnly,\n\t\t\tVersion: cmd.Version + 1,\n\t\t}\n\n\t\tsess.UseBool(\"is_default\")\n\t\tsess.UseBool(\"basic_auth\")\n\t\tsess.UseBool(\"with_credentials\")\n\t\tsess.UseBool(\"read_only\")\n\n\t\tvar updateSession *xorm.Session\n\t\tif cmd.Version != 0 {\n\t\t\t\/\/ the reason we allow cmd.version > db.version is make it possible for people to force\n\t\t\t\/\/ updates to datasources using the datasource.yaml file without knowing exactly what version\n\t\t\t\/\/ a datasource have in the db.\n\t\t\tupdateSession = sess.Where(\"id=? and org_id=? and version < ?\", ds.Id, ds.OrgId, ds.Version)\n\n\t\t} else {\n\t\t\tupdateSession = sess.Where(\"id=? and org_id=?\", ds.Id, ds.OrgId)\n\t\t}\n\n\t\taffected, err := updateSession.Update(ds)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif affected == 0 {\n\t\t\treturn m.ErrDataSourceUpdatingOldVersion\n\t\t}\n\n\t\terr = updateIsDefaultFlag(ds, sess)\n\n\t\tcmd.Result = ds\n\t\treturn err\n\t})\n}\n<commit_msg>Raise datasources number to 5000<commit_after>package sqlstore\n\nimport (\n\t\"time\"\n\n\t\"github.com\/go-xorm\/xorm\"\n\n\t\"github.com\/grafana\/grafana\/pkg\/bus\"\n\t\"github.com\/grafana\/grafana\/pkg\/components\/securejsondata\"\n\t\"github.com\/grafana\/grafana\/pkg\/metrics\"\n\tm \"github.com\/grafana\/grafana\/pkg\/models\"\n)\n\nfunc init() {\n\tbus.AddHandler(\"sql\", GetDataSources)\n\tbus.AddHandler(\"sql\", GetAllDataSources)\n\tbus.AddHandler(\"sql\", AddDataSource)\n\tbus.AddHandler(\"sql\", DeleteDataSourceById)\n\tbus.AddHandler(\"sql\", DeleteDataSourceByName)\n\tbus.AddHandler(\"sql\", UpdateDataSource)\n\tbus.AddHandler(\"sql\", GetDataSourceById)\n\tbus.AddHandler(\"sql\", GetDataSourceByName)\n}\n\nfunc GetDataSourceById(query *m.GetDataSourceByIdQuery) error {\n\tmetrics.M_DB_DataSource_QueryById.Inc()\n\n\tdatasource := m.DataSource{OrgId: query.OrgId, Id: query.Id}\n\thas, err := x.Get(&datasource)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !has {\n\t\treturn m.ErrDataSourceNotFound\n\t}\n\n\tquery.Result = &datasource\n\treturn err\n}\n\nfunc GetDataSourceByName(query *m.GetDataSourceByNameQuery) error {\n\tdatasource := m.DataSource{OrgId: query.OrgId, Name: query.Name}\n\thas, err := x.Get(&datasource)\n\n\tif !has {\n\t\treturn m.ErrDataSourceNotFound\n\t}\n\n\tquery.Result = &datasource\n\treturn err\n}\n\nfunc GetDataSources(query *m.GetDataSourcesQuery) error {\n\tsess := x.Limit(5000, 0).Where(\"org_id=?\", query.OrgId).Asc(\"name\")\n\n\tquery.Result = make([]*m.DataSource, 0)\n\treturn sess.Find(&query.Result)\n}\n\nfunc GetAllDataSources(query *m.GetAllDataSourcesQuery) error {\n\tsess := x.Limit(5000, 0).Asc(\"name\")\n\n\tquery.Result = make([]*m.DataSource, 0)\n\treturn sess.Find(&query.Result)\n}\n\nfunc DeleteDataSourceById(cmd *m.DeleteDataSourceByIdCommand) error {\n\treturn inTransaction(func(sess *DBSession) error {\n\t\tvar rawSql = \"DELETE FROM data_source WHERE id=? and org_id=?\"\n\t\tresult, err := sess.Exec(rawSql, cmd.Id, cmd.OrgId)\n\t\taffected, _ := result.RowsAffected()\n\t\tcmd.DeletedDatasourcesCount = affected\n\t\treturn err\n\t})\n}\n\nfunc DeleteDataSourceByName(cmd *m.DeleteDataSourceByNameCommand) error {\n\treturn inTransaction(func(sess *DBSession) error {\n\t\tvar rawSql = \"DELETE FROM data_source WHERE name=? and org_id=?\"\n\t\tresult, err := sess.Exec(rawSql, cmd.Name, cmd.OrgId)\n\t\taffected, _ := result.RowsAffected()\n\t\tcmd.DeletedDatasourcesCount = affected\n\t\treturn err\n\t})\n}\n\nfunc AddDataSource(cmd *m.AddDataSourceCommand) error {\n\treturn inTransaction(func(sess *DBSession) error {\n\t\texisting := m.DataSource{OrgId: cmd.OrgId, Name: cmd.Name}\n\t\thas, _ := sess.Get(&existing)\n\n\t\tif has {\n\t\t\treturn m.ErrDataSourceNameExists\n\t\t}\n\n\t\tds := &m.DataSource{\n\t\t\tOrgId: cmd.OrgId,\n\t\t\tName: cmd.Name,\n\t\t\tType: cmd.Type,\n\t\t\tAccess: cmd.Access,\n\t\t\tUrl: cmd.Url,\n\t\t\tUser: cmd.User,\n\t\t\tPassword: cmd.Password,\n\t\t\tDatabase: cmd.Database,\n\t\t\tIsDefault: cmd.IsDefault,\n\t\t\tBasicAuth: cmd.BasicAuth,\n\t\t\tBasicAuthUser: cmd.BasicAuthUser,\n\t\t\tBasicAuthPassword: cmd.BasicAuthPassword,\n\t\t\tWithCredentials: cmd.WithCredentials,\n\t\t\tJsonData: cmd.JsonData,\n\t\t\tSecureJsonData: securejsondata.GetEncryptedJsonData(cmd.SecureJsonData),\n\t\t\tCreated: time.Now(),\n\t\t\tUpdated: time.Now(),\n\t\t\tVersion: 1,\n\t\t\tReadOnly: cmd.ReadOnly,\n\t\t}\n\n\t\tif _, err := sess.Insert(ds); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := updateIsDefaultFlag(ds, sess); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcmd.Result = ds\n\t\treturn nil\n\t})\n}\n\nfunc updateIsDefaultFlag(ds *m.DataSource, sess *DBSession) error {\n\t\/\/ Handle is default flag\n\tif ds.IsDefault {\n\t\trawSql := \"UPDATE data_source SET is_default=? WHERE org_id=? AND id <> ?\"\n\t\tif _, err := sess.Exec(rawSql, false, ds.OrgId, ds.Id); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc UpdateDataSource(cmd *m.UpdateDataSourceCommand) error {\n\treturn inTransaction(func(sess *DBSession) error {\n\t\tds := &m.DataSource{\n\t\t\tId: cmd.Id,\n\t\t\tOrgId: cmd.OrgId,\n\t\t\tName: cmd.Name,\n\t\t\tType: cmd.Type,\n\t\t\tAccess: cmd.Access,\n\t\t\tUrl: cmd.Url,\n\t\t\tUser: cmd.User,\n\t\t\tPassword: cmd.Password,\n\t\t\tDatabase: cmd.Database,\n\t\t\tIsDefault: cmd.IsDefault,\n\t\t\tBasicAuth: cmd.BasicAuth,\n\t\t\tBasicAuthUser: cmd.BasicAuthUser,\n\t\t\tBasicAuthPassword: cmd.BasicAuthPassword,\n\t\t\tWithCredentials: cmd.WithCredentials,\n\t\t\tJsonData: cmd.JsonData,\n\t\t\tSecureJsonData: securejsondata.GetEncryptedJsonData(cmd.SecureJsonData),\n\t\t\tUpdated: time.Now(),\n\t\t\tReadOnly: cmd.ReadOnly,\n\t\t\tVersion: cmd.Version + 1,\n\t\t}\n\n\t\tsess.UseBool(\"is_default\")\n\t\tsess.UseBool(\"basic_auth\")\n\t\tsess.UseBool(\"with_credentials\")\n\t\tsess.UseBool(\"read_only\")\n\n\t\tvar updateSession *xorm.Session\n\t\tif cmd.Version != 0 {\n\t\t\t\/\/ the reason we allow cmd.version > db.version is make it possible for people to force\n\t\t\t\/\/ updates to datasources using the datasource.yaml file without knowing exactly what version\n\t\t\t\/\/ a datasource have in the db.\n\t\t\tupdateSession = sess.Where(\"id=? and org_id=? and version < ?\", ds.Id, ds.OrgId, ds.Version)\n\n\t\t} else {\n\t\t\tupdateSession = sess.Where(\"id=? and org_id=?\", ds.Id, ds.OrgId)\n\t\t}\n\n\t\taffected, err := updateSession.Update(ds)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif affected == 0 {\n\t\t\treturn m.ErrDataSourceUpdatingOldVersion\n\t\t}\n\n\t\terr = updateIsDefaultFlag(ds, sess)\n\n\t\tcmd.Result = ds\n\t\treturn err\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package sqlstore\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/grafana\/grafana\/pkg\/components\/simplejson\"\n\t\"github.com\/grafana\/grafana\/pkg\/events\"\n\t\"github.com\/grafana\/grafana\/pkg\/infra\/metrics\"\n\t\"github.com\/grafana\/grafana\/pkg\/models\"\n\tac \"github.com\/grafana\/grafana\/pkg\/services\/accesscontrol\"\n\t\"github.com\/grafana\/grafana\/pkg\/util\/errutil\"\n\t\"xorm.io\/xorm\"\n)\n\n\/\/ GetDataSource adds a datasource to the query model by querying by org_id as well as\n\/\/ either uid (preferred), id, or name and is added to the bus.\nfunc (ss *SQLStore) GetDataSource(ctx context.Context, query *models.GetDataSourceQuery) error {\n\tmetrics.MDBDataSourceQueryByID.Inc()\n\n\treturn ss.WithDbSession(ctx, func(sess *DBSession) error {\n\t\treturn ss.getDataSource(ctx, query, sess)\n\t})\n}\n\nfunc (ss *SQLStore) getDataSource(ctx context.Context, query *models.GetDataSourceQuery, sess *DBSession) error {\n\tif query.OrgId == 0 || (query.Id == 0 && len(query.Name) == 0 && len(query.Uid) == 0) {\n\t\treturn models.ErrDataSourceIdentifierNotSet\n\t}\n\n\tdatasource := &models.DataSource{Name: query.Name, OrgId: query.OrgId, Id: query.Id, Uid: query.Uid}\n\thas, err := sess.Get(datasource)\n\n\tif err != nil {\n\t\tsqlog.Error(\"Failed getting data source\", \"err\", err, \"uid\", query.Uid, \"id\", query.Id, \"name\", query.Name, \"orgId\", query.OrgId)\n\t\treturn err\n\t} else if !has {\n\t\treturn models.ErrDataSourceNotFound\n\t}\n\n\tquery.Result = datasource\n\n\treturn nil\n}\n\nfunc (ss *SQLStore) GetDataSources(ctx context.Context, query *models.GetDataSourcesQuery) error {\n\tvar sess *xorm.Session\n\treturn ss.WithDbSession(ctx, func(dbSess *DBSession) error {\n\t\tif query.DataSourceLimit <= 0 {\n\t\t\tsess = dbSess.Where(\"org_id=?\", query.OrgId).Asc(\"name\")\n\t\t} else {\n\t\t\tsess = dbSess.Limit(query.DataSourceLimit, 0).Where(\"org_id=?\", query.OrgId).Asc(\"name\")\n\t\t}\n\n\t\tquery.Result = make([]*models.DataSource, 0)\n\t\treturn sess.Find(&query.Result)\n\t})\n}\n\n\/\/ GetDataSourcesByType returns all datasources for a given type or an error if the specified type is an empty string\nfunc (ss *SQLStore) GetDataSourcesByType(ctx context.Context, query *models.GetDataSourcesByTypeQuery) error {\n\tif query.Type == \"\" {\n\t\treturn fmt.Errorf(\"datasource type cannot be empty\")\n\t}\n\n\tquery.Result = make([]*models.DataSource, 0)\n\treturn ss.WithDbSession(ctx, func(sess *DBSession) error {\n\t\treturn sess.Where(\"type=?\", query.Type).Asc(\"id\").Find(&query.Result)\n\t})\n}\n\n\/\/ GetDefaultDataSource is used to get the default datasource of organization\nfunc (ss *SQLStore) GetDefaultDataSource(ctx context.Context, query *models.GetDefaultDataSourceQuery) error {\n\tdatasource := models.DataSource{}\n\treturn ss.WithDbSession(ctx, func(sess *DBSession) error {\n\t\texists, err := sess.Where(\"org_id=? AND is_default=?\", query.OrgId, true).Get(&datasource)\n\n\t\tif !exists {\n\t\t\treturn models.ErrDataSourceNotFound\n\t\t}\n\n\t\tquery.Result = &datasource\n\t\treturn err\n\t})\n}\n\n\/\/ DeleteDataSource removes a datasource by org_id as well as either uid (preferred), id, or name\n\/\/ and is added to the bus. It also removes permissions related to the team.\nfunc (ss *SQLStore) DeleteDataSource(ctx context.Context, cmd *models.DeleteDataSourceCommand) error {\n\treturn ss.WithTransactionalDbSession(ctx, func(sess *DBSession) error {\n\t\tdsQuery := &models.GetDataSourceQuery{Id: cmd.ID, Uid: cmd.UID, Name: cmd.Name, OrgId: cmd.OrgID}\n\t\terrGettingDS := ss.getDataSource(ctx, dsQuery, sess)\n\n\t\tif errGettingDS != nil && !errors.Is(errGettingDS, models.ErrDataSourceNotFound) {\n\t\t\treturn errGettingDS\n\t\t}\n\n\t\tds := dsQuery.Result\n\t\tif ds != nil {\n\t\t\t\/\/ Delete the data source\n\t\t\tresult, err := sess.Exec(\"DELETE FROM data_source WHERE org_id=? AND id=?\", ds.OrgId, ds.Id)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcmd.DeletedDatasourcesCount, _ = result.RowsAffected()\n\n\t\t\t\/\/ Remove associated AccessControl permissions\n\t\t\tif _, errDeletingPerms := sess.Exec(\"DELETE FROM permission WHERE scope=?\",\n\t\t\t\tac.Scope(\"datasources\", \"id\", fmt.Sprint(dsQuery.Result.Id))); errDeletingPerms != nil {\n\t\t\t\treturn errDeletingPerms\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Publish data source deletion event\n\t\tsess.publishAfterCommit(&events.DataSourceDeleted{\n\t\t\tTimestamp: time.Now(),\n\t\t\tName: cmd.Name,\n\t\t\tID: cmd.ID,\n\t\t\tUID: cmd.UID,\n\t\t\tOrgID: cmd.OrgID,\n\t\t})\n\n\t\treturn nil\n\t})\n}\n\nfunc (ss *SQLStore) AddDataSource(ctx context.Context, cmd *models.AddDataSourceCommand) error {\n\treturn ss.WithTransactionalDbSession(ctx, func(sess *DBSession) error {\n\t\texisting := models.DataSource{OrgId: cmd.OrgId, Name: cmd.Name}\n\t\thas, _ := sess.Get(&existing)\n\n\t\tif has {\n\t\t\treturn models.ErrDataSourceNameExists\n\t\t}\n\n\t\tif cmd.JsonData == nil {\n\t\t\tcmd.JsonData = simplejson.New()\n\t\t}\n\n\t\tif cmd.Uid == \"\" {\n\t\t\tuid, err := generateNewDatasourceUid(sess, cmd.OrgId)\n\t\t\tif err != nil {\n\t\t\t\treturn errutil.Wrapf(err, \"Failed to generate UID for datasource %q\", cmd.Name)\n\t\t\t}\n\t\t\tcmd.Uid = uid\n\t\t}\n\n\t\tds := &models.DataSource{\n\t\t\tOrgId: cmd.OrgId,\n\t\t\tName: cmd.Name,\n\t\t\tType: cmd.Type,\n\t\t\tAccess: cmd.Access,\n\t\t\tUrl: cmd.Url,\n\t\t\tUser: cmd.User,\n\t\t\tPassword: cmd.Password,\n\t\t\tDatabase: cmd.Database,\n\t\t\tIsDefault: cmd.IsDefault,\n\t\t\tBasicAuth: cmd.BasicAuth,\n\t\t\tBasicAuthUser: cmd.BasicAuthUser,\n\t\t\tBasicAuthPassword: cmd.BasicAuthPassword,\n\t\t\tWithCredentials: cmd.WithCredentials,\n\t\t\tJsonData: cmd.JsonData,\n\t\t\tSecureJsonData: cmd.EncryptedSecureJsonData,\n\t\t\tCreated: time.Now(),\n\t\t\tUpdated: time.Now(),\n\t\t\tVersion: 1,\n\t\t\tReadOnly: cmd.ReadOnly,\n\t\t\tUid: cmd.Uid,\n\t\t}\n\n\t\tif _, err := sess.Insert(ds); err != nil {\n\t\t\tif dialect.IsUniqueConstraintViolation(err) && strings.Contains(strings.ToLower(dialect.ErrorMessage(err)), \"uid\") {\n\t\t\t\treturn models.ErrDataSourceUidExists\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tif err := updateIsDefaultFlag(ds, sess); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcmd.Result = ds\n\n\t\tsess.publishAfterCommit(&events.DataSourceCreated{\n\t\t\tTimestamp: time.Now(),\n\t\t\tName: cmd.Name,\n\t\t\tID: ds.Id,\n\t\t\tUID: cmd.Uid,\n\t\t\tOrgID: cmd.OrgId,\n\t\t})\n\t\treturn nil\n\t})\n}\n\nfunc updateIsDefaultFlag(ds *models.DataSource, sess *DBSession) error {\n\t\/\/ Handle is default flag\n\tif ds.IsDefault {\n\t\trawSQL := \"UPDATE data_source SET is_default=? WHERE org_id=? AND id <> ?\"\n\t\tif _, err := sess.Exec(rawSQL, false, ds.OrgId, ds.Id); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (ss *SQLStore) UpdateDataSource(ctx context.Context, cmd *models.UpdateDataSourceCommand) error {\n\treturn ss.WithTransactionalDbSession(ctx, func(sess *DBSession) error {\n\t\tif cmd.JsonData == nil {\n\t\t\tcmd.JsonData = simplejson.New()\n\t\t}\n\n\t\tds := &models.DataSource{\n\t\t\tId: cmd.Id,\n\t\t\tOrgId: cmd.OrgId,\n\t\t\tName: cmd.Name,\n\t\t\tType: cmd.Type,\n\t\t\tAccess: cmd.Access,\n\t\t\tUrl: cmd.Url,\n\t\t\tUser: cmd.User,\n\t\t\tPassword: cmd.Password,\n\t\t\tDatabase: cmd.Database,\n\t\t\tIsDefault: cmd.IsDefault,\n\t\t\tBasicAuth: cmd.BasicAuth,\n\t\t\tBasicAuthUser: cmd.BasicAuthUser,\n\t\t\tBasicAuthPassword: cmd.BasicAuthPassword,\n\t\t\tWithCredentials: cmd.WithCredentials,\n\t\t\tJsonData: cmd.JsonData,\n\t\t\tSecureJsonData: cmd.EncryptedSecureJsonData,\n\t\t\tUpdated: time.Now(),\n\t\t\tReadOnly: cmd.ReadOnly,\n\t\t\tVersion: cmd.Version + 1,\n\t\t\tUid: cmd.Uid,\n\t\t}\n\n\t\tsess.UseBool(\"is_default\")\n\t\tsess.UseBool(\"basic_auth\")\n\t\tsess.UseBool(\"with_credentials\")\n\t\tsess.UseBool(\"read_only\")\n\t\t\/\/ Make sure password are zeroed out if empty. We do this as we want to migrate passwords from\n\t\t\/\/ plain text fields to SecureJsonData.\n\t\tsess.MustCols(\"password\")\n\t\tsess.MustCols(\"basic_auth_password\")\n\t\tsess.MustCols(\"user\")\n\n\t\tvar updateSession *xorm.Session\n\t\tif cmd.Version != 0 {\n\t\t\t\/\/ the reason we allow cmd.version > db.version is make it possible for people to force\n\t\t\t\/\/ updates to datasources using the datasource.yaml file without knowing exactly what version\n\t\t\t\/\/ a datasource have in the db.\n\t\t\tupdateSession = sess.Where(\"id=? and org_id=? and version < ?\", ds.Id, ds.OrgId, ds.Version)\n\t\t} else {\n\t\t\tupdateSession = sess.Where(\"id=? and org_id=?\", ds.Id, ds.OrgId)\n\t\t}\n\n\t\taffected, err := updateSession.Update(ds)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif affected == 0 {\n\t\t\treturn models.ErrDataSourceUpdatingOldVersion\n\t\t}\n\n\t\terr = updateIsDefaultFlag(ds, sess)\n\n\t\tcmd.Result = ds\n\t\treturn err\n\t})\n}\n\nfunc generateNewDatasourceUid(sess *DBSession, orgId int64) (string, error) {\n\tfor i := 0; i < 3; i++ {\n\t\tuid := generateNewUid()\n\n\t\texists, err := sess.Where(\"org_id=? AND uid=?\", orgId, uid).Get(&models.DataSource{})\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif !exists {\n\t\t\treturn uid, nil\n\t\t}\n\t}\n\n\treturn \"\", models.ErrDataSourceFailedGenerateUniqueUid\n}\n<commit_msg>Chore: fix comment (#45593)<commit_after>package sqlstore\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/grafana\/grafana\/pkg\/components\/simplejson\"\n\t\"github.com\/grafana\/grafana\/pkg\/events\"\n\t\"github.com\/grafana\/grafana\/pkg\/infra\/metrics\"\n\t\"github.com\/grafana\/grafana\/pkg\/models\"\n\tac \"github.com\/grafana\/grafana\/pkg\/services\/accesscontrol\"\n\t\"github.com\/grafana\/grafana\/pkg\/util\/errutil\"\n\t\"xorm.io\/xorm\"\n)\n\n\/\/ GetDataSource adds a datasource to the query model by querying by org_id as well as\n\/\/ either uid (preferred), id, or name and is added to the bus.\nfunc (ss *SQLStore) GetDataSource(ctx context.Context, query *models.GetDataSourceQuery) error {\n\tmetrics.MDBDataSourceQueryByID.Inc()\n\n\treturn ss.WithDbSession(ctx, func(sess *DBSession) error {\n\t\treturn ss.getDataSource(ctx, query, sess)\n\t})\n}\n\nfunc (ss *SQLStore) getDataSource(ctx context.Context, query *models.GetDataSourceQuery, sess *DBSession) error {\n\tif query.OrgId == 0 || (query.Id == 0 && len(query.Name) == 0 && len(query.Uid) == 0) {\n\t\treturn models.ErrDataSourceIdentifierNotSet\n\t}\n\n\tdatasource := &models.DataSource{Name: query.Name, OrgId: query.OrgId, Id: query.Id, Uid: query.Uid}\n\thas, err := sess.Get(datasource)\n\n\tif err != nil {\n\t\tsqlog.Error(\"Failed getting data source\", \"err\", err, \"uid\", query.Uid, \"id\", query.Id, \"name\", query.Name, \"orgId\", query.OrgId)\n\t\treturn err\n\t} else if !has {\n\t\treturn models.ErrDataSourceNotFound\n\t}\n\n\tquery.Result = datasource\n\n\treturn nil\n}\n\nfunc (ss *SQLStore) GetDataSources(ctx context.Context, query *models.GetDataSourcesQuery) error {\n\tvar sess *xorm.Session\n\treturn ss.WithDbSession(ctx, func(dbSess *DBSession) error {\n\t\tif query.DataSourceLimit <= 0 {\n\t\t\tsess = dbSess.Where(\"org_id=?\", query.OrgId).Asc(\"name\")\n\t\t} else {\n\t\t\tsess = dbSess.Limit(query.DataSourceLimit, 0).Where(\"org_id=?\", query.OrgId).Asc(\"name\")\n\t\t}\n\n\t\tquery.Result = make([]*models.DataSource, 0)\n\t\treturn sess.Find(&query.Result)\n\t})\n}\n\n\/\/ GetDataSourcesByType returns all datasources for a given type or an error if the specified type is an empty string\nfunc (ss *SQLStore) GetDataSourcesByType(ctx context.Context, query *models.GetDataSourcesByTypeQuery) error {\n\tif query.Type == \"\" {\n\t\treturn fmt.Errorf(\"datasource type cannot be empty\")\n\t}\n\n\tquery.Result = make([]*models.DataSource, 0)\n\treturn ss.WithDbSession(ctx, func(sess *DBSession) error {\n\t\treturn sess.Where(\"type=?\", query.Type).Asc(\"id\").Find(&query.Result)\n\t})\n}\n\n\/\/ GetDefaultDataSource is used to get the default datasource of organization\nfunc (ss *SQLStore) GetDefaultDataSource(ctx context.Context, query *models.GetDefaultDataSourceQuery) error {\n\tdatasource := models.DataSource{}\n\treturn ss.WithDbSession(ctx, func(sess *DBSession) error {\n\t\texists, err := sess.Where(\"org_id=? AND is_default=?\", query.OrgId, true).Get(&datasource)\n\n\t\tif !exists {\n\t\t\treturn models.ErrDataSourceNotFound\n\t\t}\n\n\t\tquery.Result = &datasource\n\t\treturn err\n\t})\n}\n\n\/\/ DeleteDataSource removes a datasource by org_id as well as either uid (preferred), id, or name\n\/\/ and is added to the bus. It also removes permissions related to the datasource.\nfunc (ss *SQLStore) DeleteDataSource(ctx context.Context, cmd *models.DeleteDataSourceCommand) error {\n\treturn ss.WithTransactionalDbSession(ctx, func(sess *DBSession) error {\n\t\tdsQuery := &models.GetDataSourceQuery{Id: cmd.ID, Uid: cmd.UID, Name: cmd.Name, OrgId: cmd.OrgID}\n\t\terrGettingDS := ss.getDataSource(ctx, dsQuery, sess)\n\n\t\tif errGettingDS != nil && !errors.Is(errGettingDS, models.ErrDataSourceNotFound) {\n\t\t\treturn errGettingDS\n\t\t}\n\n\t\tds := dsQuery.Result\n\t\tif ds != nil {\n\t\t\t\/\/ Delete the data source\n\t\t\tresult, err := sess.Exec(\"DELETE FROM data_source WHERE org_id=? AND id=?\", ds.OrgId, ds.Id)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcmd.DeletedDatasourcesCount, _ = result.RowsAffected()\n\n\t\t\t\/\/ Remove associated AccessControl permissions\n\t\t\tif _, errDeletingPerms := sess.Exec(\"DELETE FROM permission WHERE scope=?\",\n\t\t\t\tac.Scope(\"datasources\", \"id\", fmt.Sprint(dsQuery.Result.Id))); errDeletingPerms != nil {\n\t\t\t\treturn errDeletingPerms\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Publish data source deletion event\n\t\tsess.publishAfterCommit(&events.DataSourceDeleted{\n\t\t\tTimestamp: time.Now(),\n\t\t\tName: cmd.Name,\n\t\t\tID: cmd.ID,\n\t\t\tUID: cmd.UID,\n\t\t\tOrgID: cmd.OrgID,\n\t\t})\n\n\t\treturn nil\n\t})\n}\n\nfunc (ss *SQLStore) AddDataSource(ctx context.Context, cmd *models.AddDataSourceCommand) error {\n\treturn ss.WithTransactionalDbSession(ctx, func(sess *DBSession) error {\n\t\texisting := models.DataSource{OrgId: cmd.OrgId, Name: cmd.Name}\n\t\thas, _ := sess.Get(&existing)\n\n\t\tif has {\n\t\t\treturn models.ErrDataSourceNameExists\n\t\t}\n\n\t\tif cmd.JsonData == nil {\n\t\t\tcmd.JsonData = simplejson.New()\n\t\t}\n\n\t\tif cmd.Uid == \"\" {\n\t\t\tuid, err := generateNewDatasourceUid(sess, cmd.OrgId)\n\t\t\tif err != nil {\n\t\t\t\treturn errutil.Wrapf(err, \"Failed to generate UID for datasource %q\", cmd.Name)\n\t\t\t}\n\t\t\tcmd.Uid = uid\n\t\t}\n\n\t\tds := &models.DataSource{\n\t\t\tOrgId: cmd.OrgId,\n\t\t\tName: cmd.Name,\n\t\t\tType: cmd.Type,\n\t\t\tAccess: cmd.Access,\n\t\t\tUrl: cmd.Url,\n\t\t\tUser: cmd.User,\n\t\t\tPassword: cmd.Password,\n\t\t\tDatabase: cmd.Database,\n\t\t\tIsDefault: cmd.IsDefault,\n\t\t\tBasicAuth: cmd.BasicAuth,\n\t\t\tBasicAuthUser: cmd.BasicAuthUser,\n\t\t\tBasicAuthPassword: cmd.BasicAuthPassword,\n\t\t\tWithCredentials: cmd.WithCredentials,\n\t\t\tJsonData: cmd.JsonData,\n\t\t\tSecureJsonData: cmd.EncryptedSecureJsonData,\n\t\t\tCreated: time.Now(),\n\t\t\tUpdated: time.Now(),\n\t\t\tVersion: 1,\n\t\t\tReadOnly: cmd.ReadOnly,\n\t\t\tUid: cmd.Uid,\n\t\t}\n\n\t\tif _, err := sess.Insert(ds); err != nil {\n\t\t\tif dialect.IsUniqueConstraintViolation(err) && strings.Contains(strings.ToLower(dialect.ErrorMessage(err)), \"uid\") {\n\t\t\t\treturn models.ErrDataSourceUidExists\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tif err := updateIsDefaultFlag(ds, sess); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcmd.Result = ds\n\n\t\tsess.publishAfterCommit(&events.DataSourceCreated{\n\t\t\tTimestamp: time.Now(),\n\t\t\tName: cmd.Name,\n\t\t\tID: ds.Id,\n\t\t\tUID: cmd.Uid,\n\t\t\tOrgID: cmd.OrgId,\n\t\t})\n\t\treturn nil\n\t})\n}\n\nfunc updateIsDefaultFlag(ds *models.DataSource, sess *DBSession) error {\n\t\/\/ Handle is default flag\n\tif ds.IsDefault {\n\t\trawSQL := \"UPDATE data_source SET is_default=? WHERE org_id=? AND id <> ?\"\n\t\tif _, err := sess.Exec(rawSQL, false, ds.OrgId, ds.Id); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (ss *SQLStore) UpdateDataSource(ctx context.Context, cmd *models.UpdateDataSourceCommand) error {\n\treturn ss.WithTransactionalDbSession(ctx, func(sess *DBSession) error {\n\t\tif cmd.JsonData == nil {\n\t\t\tcmd.JsonData = simplejson.New()\n\t\t}\n\n\t\tds := &models.DataSource{\n\t\t\tId: cmd.Id,\n\t\t\tOrgId: cmd.OrgId,\n\t\t\tName: cmd.Name,\n\t\t\tType: cmd.Type,\n\t\t\tAccess: cmd.Access,\n\t\t\tUrl: cmd.Url,\n\t\t\tUser: cmd.User,\n\t\t\tPassword: cmd.Password,\n\t\t\tDatabase: cmd.Database,\n\t\t\tIsDefault: cmd.IsDefault,\n\t\t\tBasicAuth: cmd.BasicAuth,\n\t\t\tBasicAuthUser: cmd.BasicAuthUser,\n\t\t\tBasicAuthPassword: cmd.BasicAuthPassword,\n\t\t\tWithCredentials: cmd.WithCredentials,\n\t\t\tJsonData: cmd.JsonData,\n\t\t\tSecureJsonData: cmd.EncryptedSecureJsonData,\n\t\t\tUpdated: time.Now(),\n\t\t\tReadOnly: cmd.ReadOnly,\n\t\t\tVersion: cmd.Version + 1,\n\t\t\tUid: cmd.Uid,\n\t\t}\n\n\t\tsess.UseBool(\"is_default\")\n\t\tsess.UseBool(\"basic_auth\")\n\t\tsess.UseBool(\"with_credentials\")\n\t\tsess.UseBool(\"read_only\")\n\t\t\/\/ Make sure password are zeroed out if empty. We do this as we want to migrate passwords from\n\t\t\/\/ plain text fields to SecureJsonData.\n\t\tsess.MustCols(\"password\")\n\t\tsess.MustCols(\"basic_auth_password\")\n\t\tsess.MustCols(\"user\")\n\n\t\tvar updateSession *xorm.Session\n\t\tif cmd.Version != 0 {\n\t\t\t\/\/ the reason we allow cmd.version > db.version is make it possible for people to force\n\t\t\t\/\/ updates to datasources using the datasource.yaml file without knowing exactly what version\n\t\t\t\/\/ a datasource have in the db.\n\t\t\tupdateSession = sess.Where(\"id=? and org_id=? and version < ?\", ds.Id, ds.OrgId, ds.Version)\n\t\t} else {\n\t\t\tupdateSession = sess.Where(\"id=? and org_id=?\", ds.Id, ds.OrgId)\n\t\t}\n\n\t\taffected, err := updateSession.Update(ds)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif affected == 0 {\n\t\t\treturn models.ErrDataSourceUpdatingOldVersion\n\t\t}\n\n\t\terr = updateIsDefaultFlag(ds, sess)\n\n\t\tcmd.Result = ds\n\t\treturn err\n\t})\n}\n\nfunc generateNewDatasourceUid(sess *DBSession, orgId int64) (string, error) {\n\tfor i := 0; i < 3; i++ {\n\t\tuid := generateNewUid()\n\n\t\texists, err := sess.Where(\"org_id=? AND uid=?\", orgId, uid).Get(&models.DataSource{})\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif !exists {\n\t\t\treturn uid, nil\n\t\t}\n\t}\n\n\treturn \"\", models.ErrDataSourceFailedGenerateUniqueUid\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage deploy\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/color\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/deploy\/resource\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/event\"\n\tpkgkubernetes \"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/kubernetes\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/runner\/runcontext\"\n)\n\nvar (\n\tdefaultStatusCheckDeadline = 2 * time.Minute\n\n\t\/\/ Poll period for checking set to 100 milliseconds\n\tdefaultPollPeriodInMilliseconds = 100\n\n\t\/\/ report resource status for pending resources 0.5 second.\n\treportStatusTime = 500 * time.Millisecond\n)\n\nconst (\n\ttabHeader = \" -\"\n)\n\ntype resourceCounter struct {\n\tdeployments *counter\n\tpods *counter\n}\n\ntype counter struct {\n\ttotal int\n\tpending int32\n\tfailed int32\n}\n\nfunc StatusCheck(ctx context.Context, defaultLabeller *DefaultLabeller, runCtx *runcontext.RunContext, out io.Writer) error {\n\tclient, err := pkgkubernetes.Client()\n\tevent.StatusCheckEventStarted()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting Kubernetes client: %w\", err)\n\t}\n\n\tdeployments, err := getDeployments(client, runCtx.Opts.Namespace, defaultLabeller,\n\t\tgetDeadline(runCtx.Cfg.Deploy.StatusCheckDeadlineSeconds))\n\n\tdeadline := statusCheckMaxDeadline(runCtx.Cfg.Deploy.StatusCheckDeadlineSeconds, deployments)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not fetch deployments: %w\", err)\n\t}\n\n\tvar wg sync.WaitGroup\n\n\trc := newResourceCounter(len(deployments))\n\n\tfor _, d := range deployments {\n\t\twg.Add(1)\n\t\tgo func(r Resource) {\n\t\t\tdefer wg.Done()\n\t\t\tpollResourceStatus(ctx, runCtx, r)\n\t\t\trcCopy := rc.markProcessed(r.Status().Error())\n\t\t\tprintStatusCheckSummary(out, r, rcCopy)\n\t\t}(d)\n\t}\n\n\t\/\/ Retrieve pending resource states\n\tgo func() {\n\t\tprintResourceStatus(ctx, out, deployments, deadline)\n\t}()\n\n\t\/\/ Wait for all deployment status to be fetched\n\twg.Wait()\n\treturn getSkaffoldDeployStatus(rc.deployments)\n}\n\nfunc getDeployments(client kubernetes.Interface, ns string, l *DefaultLabeller, deadlineDuration time.Duration) ([]Resource, error) {\n\tdeps, err := client.AppsV1().Deployments(ns).List(metav1.ListOptions{\n\t\tLabelSelector: l.RunIDKeyValueString(),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not fetch deployments: %w\", err)\n\t}\n\n\tdeployments := make([]Resource, 0, len(deps.Items))\n\tfor _, d := range deps.Items {\n\t\tvar deadline time.Duration\n\t\tif d.Spec.ProgressDeadlineSeconds == nil {\n\t\t\tdeadline = deadlineDuration\n\t\t} else {\n\t\t\tdeadline = time.Duration(*d.Spec.ProgressDeadlineSeconds) * time.Second\n\t\t}\n\t\tdeployments = append(deployments, resource.NewDeployment(d.Name, d.Namespace, deadline))\n\t}\n\n\treturn deployments, nil\n}\n\nfunc pollResourceStatus(ctx context.Context, runCtx *runcontext.RunContext, r Resource) {\n\tpollDuration := time.Duration(defaultPollPeriodInMilliseconds) * time.Millisecond\n\t\/\/ Add poll duration to account for one last attempt after progressDeadlineSeconds.\n\ttimeoutContext, cancel := context.WithTimeout(ctx, r.Deadline()+pollDuration)\n\tlogrus.Debugf(\"checking status %s\", r)\n\tdefer cancel()\n\tfor {\n\t\tselect {\n\t\tcase <-timeoutContext.Done():\n\t\t\terr := fmt.Errorf(\"could not stabilize within %v: %w\", r.Deadline(), timeoutContext.Err())\n\t\t\tr.UpdateStatus(err.Error(), err)\n\t\t\treturn\n\t\tcase <-time.After(pollDuration):\n\t\t\tr.CheckStatus(timeoutContext, runCtx)\n\t\t\tif r.IsStatusCheckComplete() {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc getSkaffoldDeployStatus(c *counter) error {\n\tif c.failed == 0 {\n\t\tevent.StatusCheckEventSucceeded()\n\t\treturn nil\n\t}\n\terr := fmt.Errorf(\"%d\/%d deployment(s) failed\", c.failed, c.total)\n\tevent.StatusCheckEventFailed(err)\n\treturn err\n}\n\nfunc getDeadline(d int) time.Duration {\n\tif d > 0 {\n\t\treturn time.Duration(d) * time.Second\n\t}\n\treturn defaultStatusCheckDeadline\n}\n\nfunc printStatusCheckSummary(out io.Writer, r Resource, rc resourceCounter) {\n\tstatus := fmt.Sprintf(\"%s %s\", tabHeader, r)\n\tif err := r.Status().Error(); err != nil {\n\t\tevent.ResourceStatusCheckEventFailed(r.String(), err)\n\t\tstatus = fmt.Sprintf(\"%s failed.%s Error: %s.\",\n\t\t\tstatus,\n\t\t\ttrimNewLine(getPendingMessage(rc.deployments.pending, rc.deployments.total)),\n\t\t\ttrimNewLine(err.Error()),\n\t\t)\n\t} else {\n\t\tevent.ResourceStatusCheckEventSucceeded(r.String())\n\t\tstatus = fmt.Sprintf(\"%s is ready.%s\", status, getPendingMessage(rc.deployments.pending, rc.deployments.total))\n\t}\n\tcolor.Default.Fprintln(out, status)\n}\n\n\/\/ Print resource statuses until all status check are completed or context is cancelled.\nfunc printResourceStatus(ctx context.Context, out io.Writer, resources []Resource, deadline time.Duration) {\n\ttimeoutContext, cancel := context.WithTimeout(ctx, deadline)\n\tdefer cancel()\n\tfor {\n\t\tvar allResourcesCheckComplete bool\n\t\tselect {\n\t\tcase <-timeoutContext.Done():\n\t\t\treturn\n\t\tcase <-time.After(reportStatusTime):\n\t\t\tallResourcesCheckComplete = printStatus(resources, out)\n\t\t}\n\t\tif allResourcesCheckComplete {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc printStatus(resources []Resource, out io.Writer) bool {\n\tallResourcesCheckComplete := true\n\tfor _, r := range resources {\n\t\tif r.IsStatusCheckComplete() {\n\t\t\tcontinue\n\t\t}\n\t\tallResourcesCheckComplete = false\n\t\tif str := r.ReportSinceLastUpdated(); str != \"\" {\n\t\t\tevent.ResourceStatusCheckEventUpdated(r.String(), str)\n\t\t\tcolor.Default.Fprintln(out, tabHeader, trimNewLine(str))\n\t\t}\n\t}\n\treturn allResourcesCheckComplete\n}\n\nfunc getPendingMessage(pending int32, total int) string {\n\tif pending > 0 {\n\t\treturn fmt.Sprintf(\" [%d\/%d deployment(s) still pending]\", pending, total)\n\t}\n\treturn \"\"\n}\n\nfunc trimNewLine(msg string) string {\n\treturn strings.TrimSuffix(msg, \"\\n\")\n}\n\nfunc newCounter(i int) *counter {\n\treturn &counter{\n\t\ttotal: i,\n\t\tpending: int32(i),\n\t}\n}\n\nfunc (c *counter) markProcessed(err error) counter {\n\tif err != nil {\n\t\tatomic.AddInt32(&c.failed, 1)\n\t}\n\tatomic.AddInt32(&c.pending, -1)\n\treturn c.copy()\n}\n\nfunc (c *counter) copy() counter {\n\treturn counter{\n\t\ttotal: c.total,\n\t\tpending: c.pending,\n\t\tfailed: c.failed,\n\t}\n}\n\nfunc newResourceCounter(d int) *resourceCounter {\n\treturn &resourceCounter{\n\t\tdeployments: newCounter(d),\n\t\tpods: newCounter(0),\n\t}\n}\n\nfunc (c *resourceCounter) markProcessed(err error) resourceCounter {\n\tdepCp := c.deployments.markProcessed(err)\n\tpodCp := c.pods.copy()\n\treturn resourceCounter{\n\t\tdeployments: &depCp,\n\t\tpods: &podCp,\n\t}\n}\n\nfunc statusCheckMaxDeadline(value int, deployments []Resource) time.Duration {\n\tif value > 0 {\n\t\treturn time.Duration(value) * time.Second\n\t}\n\td := time.Duration(0)\n\tfor _, r := range deployments {\n\t\tif r.Deadline() > d {\n\t\t\td = r.Deadline()\n\t\t}\n\t}\n\treturn d\n}\n<commit_msg>fix max deadline logic<commit_after>\/*\nCopyright 2019 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage deploy\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/color\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/deploy\/resource\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/event\"\n\tpkgkubernetes \"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/kubernetes\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/runner\/runcontext\"\n)\n\nvar (\n\tdefaultStatusCheckDeadline = 2 * time.Minute\n\n\t\/\/ Poll period for checking set to 100 milliseconds\n\tdefaultPollPeriodInMilliseconds = 100\n\n\t\/\/ report resource status for pending resources 0.5 second.\n\treportStatusTime = 500 * time.Millisecond\n)\n\nconst (\n\ttabHeader = \" -\"\n\tkubernetesMaxDeadline = 600\n)\n\ntype resourceCounter struct {\n\tdeployments *counter\n\tpods *counter\n}\n\ntype counter struct {\n\ttotal int\n\tpending int32\n\tfailed int32\n}\n\nfunc StatusCheck(ctx context.Context, defaultLabeller *DefaultLabeller, runCtx *runcontext.RunContext, out io.Writer) error {\n\tclient, err := pkgkubernetes.Client()\n\tevent.StatusCheckEventStarted()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting Kubernetes client: %w\", err)\n\t}\n\n\tdeployments, err := getDeployments(client, runCtx.Opts.Namespace, defaultLabeller,\n\t\tgetDeadline(runCtx.Cfg.Deploy.StatusCheckDeadlineSeconds))\n\n\tdeadline := statusCheckMaxDeadline(runCtx.Cfg.Deploy.StatusCheckDeadlineSeconds, deployments)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not fetch deployments: %w\", err)\n\t}\n\n\tvar wg sync.WaitGroup\n\n\trc := newResourceCounter(len(deployments))\n\n\tfor _, d := range deployments {\n\t\twg.Add(1)\n\t\tgo func(r Resource) {\n\t\t\tdefer wg.Done()\n\t\t\tpollResourceStatus(ctx, runCtx, r)\n\t\t\trcCopy := rc.markProcessed(r.Status().Error())\n\t\t\tprintStatusCheckSummary(out, r, rcCopy)\n\t\t}(d)\n\t}\n\n\t\/\/ Retrieve pending resource states\n\tgo func() {\n\t\tprintResourceStatus(ctx, out, deployments, deadline)\n\t}()\n\n\t\/\/ Wait for all deployment status to be fetched\n\twg.Wait()\n\treturn getSkaffoldDeployStatus(rc.deployments)\n}\n\nfunc getDeployments(client kubernetes.Interface, ns string, l *DefaultLabeller, deadlineDuration time.Duration) ([]Resource, error) {\n\tdeps, err := client.AppsV1().Deployments(ns).List(metav1.ListOptions{\n\t\tLabelSelector: l.RunIDKeyValueString(),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not fetch deployments: %w\", err)\n\t}\n\n\tdeployments := make([]Resource, 0, len(deps.Items))\n\tfor _, d := range deps.Items {\n\t\tvar deadline time.Duration\n\t\tif d.Spec.ProgressDeadlineSeconds == nil || *d.Spec.ProgressDeadlineSeconds == kubernetesMaxDeadline {\n\t\t\tdeadline = deadlineDuration\n\t\t} else {\n\t\t\tdeadline = time.Duration(*d.Spec.ProgressDeadlineSeconds) * time.Second\n\t\t}\n\t\tdeployments = append(deployments, resource.NewDeployment(d.Name, d.Namespace, deadline))\n\t}\n\n\treturn deployments, nil\n}\n\nfunc pollResourceStatus(ctx context.Context, runCtx *runcontext.RunContext, r Resource) {\n\tpollDuration := time.Duration(defaultPollPeriodInMilliseconds) * time.Millisecond\n\t\/\/ Add poll duration to account for one last attempt after progressDeadlineSeconds.\n\ttimeoutContext, cancel := context.WithTimeout(ctx, r.Deadline()+pollDuration)\n\tlogrus.Debugf(\"checking status %s\", r)\n\tdefer cancel()\n\tfor {\n\t\tselect {\n\t\tcase <-timeoutContext.Done():\n\t\t\terr := fmt.Errorf(\"could not stabilize within %v: %w\", r.Deadline(), timeoutContext.Err())\n\t\t\tr.UpdateStatus(err.Error(), err)\n\t\t\treturn\n\t\tcase <-time.After(pollDuration):\n\t\t\tr.CheckStatus(timeoutContext, runCtx)\n\t\t\tif r.IsStatusCheckComplete() {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc getSkaffoldDeployStatus(c *counter) error {\n\tif c.failed == 0 {\n\t\tevent.StatusCheckEventSucceeded()\n\t\treturn nil\n\t}\n\terr := fmt.Errorf(\"%d\/%d deployment(s) failed\", c.failed, c.total)\n\tevent.StatusCheckEventFailed(err)\n\treturn err\n}\n\nfunc getDeadline(d int) time.Duration {\n\tif d > 0 {\n\t\treturn time.Duration(d) * time.Second\n\t}\n\treturn defaultStatusCheckDeadline\n}\n\nfunc printStatusCheckSummary(out io.Writer, r Resource, rc resourceCounter) {\n\tstatus := fmt.Sprintf(\"%s %s\", tabHeader, r)\n\tif err := r.Status().Error(); err != nil {\n\t\tevent.ResourceStatusCheckEventFailed(r.String(), err)\n\t\tstatus = fmt.Sprintf(\"%s failed.%s Error: %s.\",\n\t\t\tstatus,\n\t\t\ttrimNewLine(getPendingMessage(rc.deployments.pending, rc.deployments.total)),\n\t\t\ttrimNewLine(err.Error()),\n\t\t)\n\t} else {\n\t\tevent.ResourceStatusCheckEventSucceeded(r.String())\n\t\tstatus = fmt.Sprintf(\"%s is ready.%s\", status, getPendingMessage(rc.deployments.pending, rc.deployments.total))\n\t}\n\tcolor.Default.Fprintln(out, status)\n}\n\n\/\/ Print resource statuses until all status check are completed or context is cancelled.\nfunc printResourceStatus(ctx context.Context, out io.Writer, resources []Resource, deadline time.Duration) {\n\ttimeoutContext, cancel := context.WithTimeout(ctx, deadline)\n\tdefer cancel()\n\tfor {\n\t\tvar allResourcesCheckComplete bool\n\t\tselect {\n\t\tcase <-timeoutContext.Done():\n\t\t\treturn\n\t\tcase <-time.After(reportStatusTime):\n\t\t\tallResourcesCheckComplete = printStatus(resources, out)\n\t\t}\n\t\tif allResourcesCheckComplete {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc printStatus(resources []Resource, out io.Writer) bool {\n\tallResourcesCheckComplete := true\n\tfor _, r := range resources {\n\t\tif r.IsStatusCheckComplete() {\n\t\t\tcontinue\n\t\t}\n\t\tallResourcesCheckComplete = false\n\t\tif str := r.ReportSinceLastUpdated(); str != \"\" {\n\t\t\tevent.ResourceStatusCheckEventUpdated(r.String(), str)\n\t\t\tcolor.Default.Fprintln(out, tabHeader, trimNewLine(str))\n\t\t}\n\t}\n\treturn allResourcesCheckComplete\n}\n\nfunc getPendingMessage(pending int32, total int) string {\n\tif pending > 0 {\n\t\treturn fmt.Sprintf(\" [%d\/%d deployment(s) still pending]\", pending, total)\n\t}\n\treturn \"\"\n}\n\nfunc trimNewLine(msg string) string {\n\treturn strings.TrimSuffix(msg, \"\\n\")\n}\n\nfunc newCounter(i int) *counter {\n\treturn &counter{\n\t\ttotal: i,\n\t\tpending: int32(i),\n\t}\n}\n\nfunc (c *counter) markProcessed(err error) counter {\n\tif err != nil {\n\t\tatomic.AddInt32(&c.failed, 1)\n\t}\n\tatomic.AddInt32(&c.pending, -1)\n\treturn c.copy()\n}\n\nfunc (c *counter) copy() counter {\n\treturn counter{\n\t\ttotal: c.total,\n\t\tpending: c.pending,\n\t\tfailed: c.failed,\n\t}\n}\n\nfunc newResourceCounter(d int) *resourceCounter {\n\treturn &resourceCounter{\n\t\tdeployments: newCounter(d),\n\t\tpods: newCounter(0),\n\t}\n}\n\nfunc (c *resourceCounter) markProcessed(err error) resourceCounter {\n\tdepCp := c.deployments.markProcessed(err)\n\tpodCp := c.pods.copy()\n\treturn resourceCounter{\n\t\tdeployments: &depCp,\n\t\tpods: &podCp,\n\t}\n}\n\nfunc statusCheckMaxDeadline(value int, deployments []Resource) time.Duration {\n\tif value > 0 {\n\t\treturn time.Duration(value) * time.Second\n\t}\n\td := time.Duration(0)\n\tfor _, r := range deployments {\n\t\tif r.Deadline() > d {\n\t\t\td = r.Deadline()\n\t\t}\n\t}\n\treturn d\n}\n<|endoftext|>"} {"text":"<commit_before>package cairo\n\n\/\/#cgo pkg-config: cairo\n\/\/#include <stdlib.h>\n\/\/#include <cairo\/cairo.h>\nimport \"C\"\n\nimport (\n\t\"runtime\"\n\t\"unsafe\"\n)\n\ntype ImageSurface struct {\n\t*XtensionSurface\n\tformat Format\n\twidth, height, stride int\n}\n\nfunc newImg(s *C.cairo_surface_t, format Format, width, height, stride int) (ImageSurface, error) {\n\tS := ImageSurface{\n\t\tXtensionSurface: NewXtensionSurface(s),\n\t\tformat: format,\n\t\twidth: width,\n\t\theight: height,\n\t\tstride: stride,\n\t}\n\treturn S, S.Err()\n}\n\n\/\/Originally cairo_image_surface_create.\nfunc NewImageSurface(format Format, width, height int) (ImageSurface, error) {\n\tis := C.cairo_image_surface_create(format.c(), C.int(width), C.int(height))\n\tstride := int(C.cairo_image_surface_get_stride(is))\n\treturn newImg(is, format, width, height, stride)\n}\n\nfunc cNewImageSurface(s *C.cairo_surface_t) (Surface, error) {\n\tformat := Format(C.cairo_image_surface_get_format(s))\n\twidth := int(C.cairo_image_surface_get_width(s))\n\theight := int(C.cairo_image_surface_get_height(s))\n\tstride := int(C.cairo_image_surface_get_stride(s))\n\n\treturn newImg(s, format, width, height, stride)\n}\n\n\/\/NewImageSurfaceFromPNG creates a new image surface and initalizes\n\/\/it with the given PNG file.\n\/\/\n\/\/Originally cairo_image_surface_create_from_png_stream.\nfunc NewImageSurfaceFromPNG(r Reader) (ImageSurface, error) {\n\trp := wrapReader(r)\n\tis := C.cairo_image_surface_create_from_png_stream(cairoreadfunct, rp)\n\ts, err := cNewImageSurface(is)\n\tS := s.(ImageSurface)\n\tif err != nil {\n\t\treturn S, err\n\t}\n\tS.registerReader(rp)\n\treturn S, nil\n}\n\n\/\/NewImageSurfaceFromPNGFile creates a new image surface and initializes\n\/\/it with the contents of the given PNG file.\n\/\/\n\/\/Originally cairo_image_surface_create_from_png.\nfunc NewImageSurfaceFromPNGFile(filename string) (ImageSurface, error) {\n\tf := C.CString(filename)\n\tis := C.cairo_image_surface_create_from_png(f)\n\tC.free(unsafe.Pointer(f))\n\ts, err := cNewImageSurface(is)\n\treturn s.(ImageSurface), err\n}\n\n\/\/BUG(jmf): ImageSurface: need safe wrapper around get_data\n\n\/\/BUG(jmf): ImageSurface: need image_surface_create_for_data analog(s)\n\n\/\/Format reports the format of the surface.\n\/\/\n\/\/Originally cairo_image_surface_get_format.\nfunc (i ImageSurface) Format() Format {\n\treturn i.format\n}\n\n\/\/Width reports the width of the surface in pixels.\n\/\/\n\/\/Originally cairo_image_surface_get_width.\nfunc (i ImageSurface) Width() int {\n\treturn i.width\n}\n\n\/\/Height reports the height of the surface in pixels.\n\/\/\n\/\/Originally cairo_image_surface_get_height.\nfunc (i ImageSurface) Height() int {\n\treturn i.height\n}\n\n\/\/Stride reports the stride of the image surface in number of bytes.\n\/\/\n\/\/Originally cairo_image_surface_get_stride.\nfunc (i ImageSurface) Stride() int {\n\treturn i.stride\n}\n\ntype mappedImageSurface struct {\n\tImageSurface\n\tfrom *C.cairo_surface_t\n}\n\nfunc newMappedImageSurface(s, from *C.cairo_surface_t) (m mappedImageSurface, err error) {\n\tim, err1 := cNewImageSurface(s)\n\tif err1 != nil {\n\t\terr = err1\n\t\treturn\n\t}\n\t\/\/Clear default finalizer so GC doesn't call surface_destroy.\n\t\/\/We do not set a new finalizer on mappedImageSurface.Close,\n\t\/\/because that would not do the right thing and the user is expected to close\n\t\/\/manually when done.\n\truntime.SetFinalizer(m.XtensionSurface, nil)\n\tm = mappedImageSurface{\n\t\tImageSurface: im.(ImageSurface),\n\t\tfrom: from,\n\t}\n\terr = m.Err()\n\tif err != nil {\n\t\tm.s = nil\n\t}\n\treturn\n}\n\nfunc (m mappedImageSurface) Close() error {\n\terr := m.Err()\n\tC.cairo_surface_unmap_image(m.from, m.s)\n\tm.from = nil\n\tm.s = nil\n\treturn err\n}\n<commit_msg>documented image surface and factory<commit_after>package cairo\n\n\/\/#cgo pkg-config: cairo\n\/\/#include <stdlib.h>\n\/\/#include <cairo\/cairo.h>\nimport \"C\"\n\nimport (\n\t\"runtime\"\n\t\"unsafe\"\n)\n\n\/\/An ImageSurface is an in-memory surface.\ntype ImageSurface struct {\n\t*XtensionSurface\n\tformat Format\n\twidth, height, stride int\n}\n\nfunc newImg(s *C.cairo_surface_t, format Format, width, height, stride int) (ImageSurface, error) {\n\tS := ImageSurface{\n\t\tXtensionSurface: NewXtensionSurface(s),\n\t\tformat: format,\n\t\twidth: width,\n\t\theight: height,\n\t\tstride: stride,\n\t}\n\treturn S, S.Err()\n}\n\n\/\/NewImageSurface creates an image surface of the given width, height,\n\/\/and format.\n\/\/\n\/\/Originally cairo_image_surface_create.\nfunc NewImageSurface(format Format, width, height int) (ImageSurface, error) {\n\tis := C.cairo_image_surface_create(format.c(), C.int(width), C.int(height))\n\tstride := int(C.cairo_image_surface_get_stride(is))\n\treturn newImg(is, format, width, height, stride)\n}\n\nfunc cNewImageSurface(s *C.cairo_surface_t) (Surface, error) {\n\tformat := Format(C.cairo_image_surface_get_format(s))\n\twidth := int(C.cairo_image_surface_get_width(s))\n\theight := int(C.cairo_image_surface_get_height(s))\n\tstride := int(C.cairo_image_surface_get_stride(s))\n\n\treturn newImg(s, format, width, height, stride)\n}\n\n\/\/NewImageSurfaceFromPNG creates a new image surface and initalizes\n\/\/it with the given PNG file.\n\/\/\n\/\/Originally cairo_image_surface_create_from_png_stream.\nfunc NewImageSurfaceFromPNG(r Reader) (ImageSurface, error) {\n\trp := wrapReader(r)\n\tis := C.cairo_image_surface_create_from_png_stream(cairoreadfunct, rp)\n\ts, err := cNewImageSurface(is)\n\tS := s.(ImageSurface)\n\tif err != nil {\n\t\treturn S, err\n\t}\n\tS.registerReader(rp)\n\treturn S, nil\n}\n\n\/\/NewImageSurfaceFromPNGFile creates a new image surface and initializes\n\/\/it with the contents of the given PNG file.\n\/\/\n\/\/Originally cairo_image_surface_create_from_png.\nfunc NewImageSurfaceFromPNGFile(filename string) (ImageSurface, error) {\n\tf := C.CString(filename)\n\tis := C.cairo_image_surface_create_from_png(f)\n\tC.free(unsafe.Pointer(f))\n\ts, err := cNewImageSurface(is)\n\treturn s.(ImageSurface), err\n}\n\n\/\/BUG(jmf): ImageSurface: need safe wrapper around get_data\n\n\/\/BUG(jmf): ImageSurface: need image_surface_create_for_data analog(s)\n\n\/\/Format reports the format of the surface.\n\/\/\n\/\/Originally cairo_image_surface_get_format.\nfunc (i ImageSurface) Format() Format {\n\treturn i.format\n}\n\n\/\/Width reports the width of the surface in pixels.\n\/\/\n\/\/Originally cairo_image_surface_get_width.\nfunc (i ImageSurface) Width() int {\n\treturn i.width\n}\n\n\/\/Height reports the height of the surface in pixels.\n\/\/\n\/\/Originally cairo_image_surface_get_height.\nfunc (i ImageSurface) Height() int {\n\treturn i.height\n}\n\n\/\/Stride reports the stride of the image surface in number of bytes.\n\/\/\n\/\/Originally cairo_image_surface_get_stride.\nfunc (i ImageSurface) Stride() int {\n\treturn i.stride\n}\n\ntype mappedImageSurface struct {\n\tImageSurface\n\tfrom *C.cairo_surface_t\n}\n\nfunc newMappedImageSurface(s, from *C.cairo_surface_t) (m mappedImageSurface, err error) {\n\tim, err1 := cNewImageSurface(s)\n\tif err1 != nil {\n\t\terr = err1\n\t\treturn\n\t}\n\t\/\/Clear default finalizer so GC doesn't call surface_destroy.\n\t\/\/We do not set a new finalizer on mappedImageSurface.Close,\n\t\/\/because that would not do the right thing and the user is expected to close\n\t\/\/manually when done.\n\truntime.SetFinalizer(m.XtensionSurface, nil)\n\tm = mappedImageSurface{\n\t\tImageSurface: im.(ImageSurface),\n\t\tfrom: from,\n\t}\n\terr = m.Err()\n\tif err != nil {\n\t\tm.s = nil\n\t}\n\treturn\n}\n\nfunc (m mappedImageSurface) Close() error {\n\terr := m.Err()\n\tC.cairo_surface_unmap_image(m.from, m.s)\n\tm.from = nil\n\tm.s = nil\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package storage\n\nimport (\n\t\"fmt\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/viant\/toolbox\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n)\n\nvar fileMode os.FileMode = 0644\nvar execFileMode os.FileMode = 0755\n\n\/\/Service represents abstract way to accessing local or remote storage\ntype fileStorageService struct{}\n\n\/\/List returns a list of object for supplied url\nfunc (s *fileStorageService) List(URL string) ([]Object, error) {\n\tfile, err := toolbox.OpenFile(URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\tstat, err := file.Stat()\n\tif err == nil {\n\t\tif !stat.IsDir() {\n\t\t\treturn []Object{\n\t\t\t\tnewFileObject(URL, stat),\n\t\t\t}, nil\n\t\t}\n\t}\n\tfiles, err := file.Readdir(0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar result = make([]Object, 0)\n\tresult = append(result, newFileObject(URL, stat))\n\n\tvar parsedURL, _ = url.Parse(URL)\n\tfor _, fileInfo := range files {\n\t\tvar fileName = fileInfo.Name()\n\t\tif parsedURL != nil {\n\t\t\tfileName = strings.Replace(fileName, parsedURL.Path, \"\", 1)\n\t\t}\n\t\tfileURL := toolbox.URLPathJoin(URL, fileName)\n\t\tresult = append(result, newFileObject(fileURL, fileInfo))\n\t}\n\treturn result, nil\n}\n\n\/\/Exists returns true if resource exists\nfunc (s *fileStorageService) Exists(URL string) (bool, error) {\n\tparsedUrl, err := url.Parse(URL)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif parsedUrl.Scheme != \"file\" {\n\t\treturn false, fmt.Errorf(\"Invalid schema, expected file but had: %v\", parsedUrl.Scheme)\n\t}\n\treturn toolbox.FileExists(parsedUrl.Path), nil\n}\n\nfunc (s *fileStorageService) Close() error {\n\treturn nil\n}\n\n\/\/Object returns a Object for supplied url\nfunc (s *fileStorageService) StorageObject(URL string) (Object, error) {\n\tfile, err := toolbox.OpenFile(URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\tfileInfo, err := os.Stat(file.Name())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newFileObject(URL, fileInfo), nil\n}\n\n\/\/Download returns reader for downloaded storage object\nfunc (s *fileStorageService) Download(object Object) (io.ReadCloser, error) {\n\treturn toolbox.OpenFile(object.URL())\n}\n\n\/\/Upload uploads provided reader content for supplied url.\nfunc (s *fileStorageService) Upload(URL string, reader io.Reader) error {\n\tparsedUrl, err := url.Parse(URL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif parsedUrl.Scheme != \"file\" {\n\t\treturn fmt.Errorf(\"Invalid schema, expected file but had: %v\", parsedUrl.Scheme)\n\t}\n\n\tparentDir, _ := path.Split(parsedUrl.Path)\n\n\terr = toolbox.CreateDirIfNotExist(parentDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata, err := ioutil.ReadAll(reader)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif path.Ext(parsedUrl.Path) == \"\" {\n\t\treturn ioutil.WriteFile(parsedUrl.Path, data, execFileMode)\n\t}\n\treturn ioutil.WriteFile(parsedUrl.Path, data, fileMode)\n}\n\nfunc (s *fileStorageService) Register(schema string, service Service) error {\n\treturn errors.New(\"unsupported\")\n}\n\n\/\/Delete removes passed in storage object\nfunc (s *fileStorageService) Delete(object Object) error {\n\tfileName := toolbox.Filename(object.URL())\n\treturn os.Remove(fileName)\n}\n\ntype fileStorageObject struct {\n\t*AbstractObject\n}\n\nfunc NewFileStorage() Service {\n\treturn &fileStorageService{}\n}\n\nfunc (o *fileStorageObject) Unwrap(target interface{}) error {\n\tif fileInfo, casted := target.(*os.FileInfo); casted {\n\t\tsource, ok := o.Source.(os.FileInfo)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"failed to cast %T into %T\", o.Source, target)\n\t\t}\n\t\t*fileInfo = source\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"unsuported target %T\", target)\n}\n\nfunc (o *fileStorageObject) FileInfo() os.FileInfo {\n\tif source, ok := o.Source.(os.FileInfo); ok {\n\t\treturn source\n\t}\n\treturn nil\n}\n\nfunc newFileObject(url string, fileInfo os.FileInfo) Object {\n\tabstract := NewAbstractStorageObject(url, fileInfo, fileInfo)\n\tresult := &fileStorageObject{\n\t\tAbstractObject: abstract,\n\t}\n\tresult.AbstractObject.Object = result\n\treturn result\n}\n<commit_msg>added DownloadWithURL to storage service<commit_after>package storage\n\nimport (\n\t\"fmt\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/viant\/toolbox\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n)\n\nvar fileMode os.FileMode = 0644\nvar execFileMode os.FileMode = 0755\n\n\/\/Service represents abstract way to accessing local or remote storage\ntype fileStorageService struct{}\n\n\/\/List returns a list of object for supplied url\nfunc (s *fileStorageService) List(URL string) ([]Object, error) {\n\tfile, err := toolbox.OpenFile(URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\tstat, err := file.Stat()\n\tif err == nil {\n\t\tif !stat.IsDir() {\n\t\t\treturn []Object{\n\t\t\t\tnewFileObject(URL, stat),\n\t\t\t}, nil\n\t\t}\n\t}\n\tfiles, err := file.Readdir(0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar result = make([]Object, 0)\n\tresult = append(result, newFileObject(URL, stat))\n\n\tvar parsedURL, _ = url.Parse(URL)\n\tfor _, fileInfo := range files {\n\t\tvar fileName = fileInfo.Name()\n\t\tif parsedURL != nil {\n\t\t\tfileName = strings.Replace(fileName, parsedURL.Path, \"\", 1)\n\t\t}\n\t\tfileURL := toolbox.URLPathJoin(URL, fileName)\n\t\tresult = append(result, newFileObject(fileURL, fileInfo))\n\t}\n\treturn result, nil\n}\n\n\/\/Exists returns true if resource exists\nfunc (s *fileStorageService) Exists(URL string) (bool, error) {\n\tparsedUrl, err := url.Parse(URL)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif parsedUrl.Scheme != \"file\" {\n\t\treturn false, fmt.Errorf(\"Invalid schema, expected file but had: %v\", parsedUrl.Scheme)\n\t}\n\treturn toolbox.FileExists(parsedUrl.Path), nil\n}\n\nfunc (s *fileStorageService) Close() error {\n\treturn nil\n}\n\n\/\/Object returns a Object for supplied url\nfunc (s *fileStorageService) StorageObject(URL string) (Object, error) {\n\tfile, err := toolbox.OpenFile(URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\tfileInfo, err := os.Stat(file.Name())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newFileObject(URL, fileInfo), nil\n}\n\n\/\/Download returns reader for downloaded storage object\nfunc (s *fileStorageService) Download(object Object) (io.ReadCloser, error) {\n\treturn toolbox.OpenFile(object.URL())\n}\n\n\n\/\/DownloadWithURL downloads content for passed in object URL\nfunc (s *fileStorageService) DownloadWithURL(URL string) (io.ReadCloser, error) {\n\tobject, err := s.StorageObject(URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s.Download(object)\n}\n\n\n\/\/Upload uploads provided reader content for supplied url.\nfunc (s *fileStorageService) Upload(URL string, reader io.Reader) error {\n\tparsedUrl, err := url.Parse(URL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif parsedUrl.Scheme != \"file\" {\n\t\treturn fmt.Errorf(\"Invalid schema, expected file but had: %v\", parsedUrl.Scheme)\n\t}\n\n\tparentDir, _ := path.Split(parsedUrl.Path)\n\n\terr = toolbox.CreateDirIfNotExist(parentDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata, err := ioutil.ReadAll(reader)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif path.Ext(parsedUrl.Path) == \"\" {\n\t\treturn ioutil.WriteFile(parsedUrl.Path, data, execFileMode)\n\t}\n\treturn ioutil.WriteFile(parsedUrl.Path, data, fileMode)\n}\n\nfunc (s *fileStorageService) Register(schema string, service Service) error {\n\treturn errors.New(\"unsupported\")\n}\n\n\/\/Delete removes passed in storage object\nfunc (s *fileStorageService) Delete(object Object) error {\n\tfileName := toolbox.Filename(object.URL())\n\treturn os.Remove(fileName)\n}\n\ntype fileStorageObject struct {\n\t*AbstractObject\n}\n\nfunc NewFileStorage() Service {\n\treturn &fileStorageService{}\n}\n\nfunc (o *fileStorageObject) Unwrap(target interface{}) error {\n\tif fileInfo, casted := target.(*os.FileInfo); casted {\n\t\tsource, ok := o.Source.(os.FileInfo)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"failed to cast %T into %T\", o.Source, target)\n\t\t}\n\t\t*fileInfo = source\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"unsuported target %T\", target)\n}\n\nfunc (o *fileStorageObject) FileInfo() os.FileInfo {\n\tif source, ok := o.Source.(os.FileInfo); ok {\n\t\treturn source\n\t}\n\treturn nil\n}\n\nfunc newFileObject(url string, fileInfo os.FileInfo) Object {\n\tabstract := NewAbstractStorageObject(url, fileInfo, fileInfo)\n\tresult := &fileStorageObject{\n\t\tAbstractObject: abstract,\n\t}\n\tresult.AbstractObject.Object = result\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License. See the AUTHORS file\n\/\/ for names of contributors.\n\/\/\n\/\/ Author: Spencer Kimball (spencer.kimball@gmail.com)\n\npackage storage\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/cockroachdb\/cockroach\/proto\"\n\t\"github.com\/cockroachdb\/cockroach\/util\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/hlc\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/leaktest\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/log\"\n\t\"github.com\/google\/btree\"\n)\n\n\/\/ Test implementation of a range set backed by btree.BTree.\ntype testRangeSet struct {\n\tsync.Mutex\n\trangesByKey *btree.BTree\n\tvisited int\n}\n\n\/\/ newTestRangeSet creates a new range set that has the count number of ranges.\nfunc newTestRangeSet(count int, t *testing.T) *testRangeSet {\n\trs := &testRangeSet{rangesByKey: btree.New(64 \/* degree *\/)}\n\tfor i := 0; i < count; i++ {\n\t\tdesc := &proto.RangeDescriptor{\n\t\t\tRaftID: proto.RaftID(i),\n\t\t\tStartKey: proto.Key(fmt.Sprintf(\"%03d\", i)),\n\t\t\tEndKey: proto.Key(fmt.Sprintf(\"%03d\", i+1)),\n\t\t}\n\t\t\/\/ Initialize the range stat so the scanner can use it.\n\t\trng := &Range{\n\t\t\tstats: &rangeStats{\n\t\t\t\traftID: desc.RaftID,\n\t\t\t\tMVCCStats: proto.MVCCStats{\n\t\t\t\t\tKeyBytes: 1,\n\t\t\t\t\tValBytes: 2,\n\t\t\t\t\tKeyCount: 1,\n\t\t\t\t\tLiveCount: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tif err := rng.setDesc(desc); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif exRngItem := rs.rangesByKey.ReplaceOrInsert(rng); exRngItem != nil {\n\t\t\tt.Fatalf(\"failed to insert range %s\", rng)\n\t\t}\n\t}\n\treturn rs\n}\n\nfunc (rs *testRangeSet) Visit(visitor func(*Range) bool) {\n\trs.Lock()\n\tdefer rs.Unlock()\n\trs.visited = 0\n\trs.rangesByKey.Ascend(func(i btree.Item) bool {\n\t\trs.visited++\n\t\trs.Unlock()\n\t\tdefer rs.Lock()\n\t\treturn visitor(i.(*Range))\n\t})\n}\n\nfunc (rs *testRangeSet) EstimatedCount() int {\n\trs.Lock()\n\tdefer rs.Unlock()\n\tcount := rs.rangesByKey.Len() - rs.visited\n\tif count < 1 {\n\t\tcount = 1\n\t}\n\treturn count\n}\n\n\/\/ removeRange removes the i-th range from the range set.\nfunc (rs *testRangeSet) remove(index int, t *testing.T) *Range {\n\tendKey := proto.Key(fmt.Sprintf(\"%03d\", index+1))\n\trs.Lock()\n\tdefer rs.Unlock()\n\trng := rs.rangesByKey.Delete((rangeBTreeKey)(endKey))\n\tif rng == nil {\n\t\tt.Fatalf(\"failed to delete range of end key %s\", endKey)\n\t}\n\treturn rng.(*Range)\n}\n\n\/\/ Test implementation of a range queue which adds range to an\n\/\/ internal slice.\ntype testQueue struct {\n\tsync.Mutex \/\/ Protects ranges, done & processed count\n\tranges []*Range\n\tdone bool\n\tprocessed int\n\tdisabled bool\n}\n\n\/\/ setDisabled suspends processing of items from the queue.\nfunc (tq *testQueue) setDisabled(d bool) {\n\ttq.Lock()\n\tdefer tq.Unlock()\n\ttq.disabled = d\n}\n\nfunc (tq *testQueue) Start(clock *hlc.Clock, stopper *util.Stopper) {\n\tstopper.RunWorker(func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.After(1 * time.Millisecond):\n\t\t\t\ttq.Lock()\n\t\t\t\tif !tq.disabled && len(tq.ranges) > 0 {\n\t\t\t\t\ttq.ranges = tq.ranges[1:]\n\t\t\t\t\ttq.processed++\n\t\t\t\t}\n\t\t\t\ttq.Unlock()\n\t\t\tcase <-stopper.ShouldStop():\n\t\t\t\ttq.Lock()\n\t\t\t\ttq.done = true\n\t\t\t\ttq.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc (tq *testQueue) MaybeAdd(rng *Range, now proto.Timestamp) {\n\ttq.Lock()\n\tdefer tq.Unlock()\n\tif index := tq.indexOf(rng); index == -1 {\n\t\ttq.ranges = append(tq.ranges, rng)\n\t}\n}\n\nfunc (tq *testQueue) MaybeRemove(rng *Range) {\n\ttq.Lock()\n\tdefer tq.Unlock()\n\tif index := tq.indexOf(rng); index != -1 {\n\t\ttq.ranges = append(tq.ranges[:index], tq.ranges[index+1:]...)\n\t}\n}\n\nfunc (tq *testQueue) count() int {\n\ttq.Lock()\n\tdefer tq.Unlock()\n\treturn len(tq.ranges)\n}\n\nfunc (tq *testQueue) indexOf(rng *Range) int {\n\tfor i, r := range tq.ranges {\n\t\tif r == rng {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\nfunc (tq *testQueue) isDone() bool {\n\ttq.Lock()\n\tdefer tq.Unlock()\n\treturn tq.done\n}\n\n\/\/ TestScannerAddToQueues verifies that ranges are added to and\n\/\/ removed from multiple queues.\nfunc TestScannerAddToQueues(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tconst count = 3\n\tranges := newTestRangeSet(count, t)\n\tq1, q2 := &testQueue{}, &testQueue{}\n\t\/\/ We don't want to actually consume entries from the queues during this test.\n\tq1.setDisabled(true)\n\tq2.setDisabled(true)\n\ts := newRangeScanner(1*time.Millisecond, 0, ranges, nil)\n\ts.AddQueues(q1, q2)\n\tmc := hlc.NewManualClock(0)\n\tclock := hlc.NewClock(mc.UnixNano)\n\tstopper := util.NewStopper()\n\n\t\/\/ Start queue and verify that all ranges are added to both queues.\n\ts.Start(clock, stopper)\n\tif err := util.IsTrueWithin(func() bool {\n\t\treturn q1.count() == count && q2.count() == count\n\t}, 50*time.Millisecond); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t\/\/ Remove first range and verify it does not exist in either range.\n\trng := ranges.remove(0, t)\n\ts.RemoveRange(rng)\n\tif err := util.IsTrueWithin(func() bool {\n\t\treturn q1.count() == count-1 && q2.count() == count-1\n\t}, 10*time.Millisecond); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t\/\/ Stop scanner and verify both queues are stopped.\n\tstopper.Stop()\n\tif !q1.isDone() || !q2.isDone() {\n\t\tt.Errorf(\"expected all queues to stop; got %t, %t\", q1.isDone(), q2.isDone())\n\t}\n}\n\n\/\/ TestScannerTiming verifies that ranges are scanned, regardless\n\/\/ of how many, to match scanInterval.\nfunc TestScannerTiming(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tconst count = 3\n\tconst runTime = 100 * time.Millisecond\n\tconst maxError = 7500 * time.Microsecond\n\tdurations := []time.Duration{\n\t\t10 * time.Millisecond,\n\t\t25 * time.Millisecond,\n\t}\n\tfor i, duration := range durations {\n\t\tranges := newTestRangeSet(count, t)\n\t\tq := &testQueue{}\n\t\ts := newRangeScanner(duration, 0, ranges, nil)\n\t\ts.AddQueues(q)\n\t\tmc := hlc.NewManualClock(0)\n\t\tclock := hlc.NewClock(mc.UnixNano)\n\t\tstopper := util.NewStopper()\n\t\tdefer stopper.Stop()\n\t\ts.Start(clock, stopper)\n\t\ttime.Sleep(runTime)\n\n\t\tavg := s.avgScan()\n\t\tlog.Infof(\"%d: average scan: %s\\n\", i, avg)\n\t\tif avg.Nanoseconds()-duration.Nanoseconds() > maxError.Nanoseconds() ||\n\t\t\tduration.Nanoseconds()-avg.Nanoseconds() > maxError.Nanoseconds() {\n\t\t\tt.Errorf(\"expected %s, got %s: exceeds max error of %s\", duration, avg, maxError)\n\t\t}\n\t}\n}\n\n\/\/ TestScannerPaceInterval tests that paceInterval returns the correct interval.\nfunc TestScannerPaceInterval(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tconst count = 3\n\tdurations := []time.Duration{\n\t\t30 * time.Millisecond,\n\t\t60 * time.Millisecond,\n\t\t500 * time.Millisecond,\n\t}\n\t\/\/ function logs an error when the actual value is not close\n\t\/\/ to the expected value\n\tlogErrorWhenNotCloseTo := func(expected, actual time.Duration) {\n\t\tdelta := 1 * time.Millisecond\n\t\tif actual < expected-delta || actual > expected+delta {\n\t\t\tt.Errorf(\"Expected duration %s, got %s\", expected, actual)\n\t\t}\n\t}\n\tfor _, duration := range durations {\n\t\tstartTime := time.Now()\n\t\tranges := newTestRangeSet(count, t)\n\t\ts := newRangeScanner(duration, 0, ranges, nil)\n\t\tinterval := s.paceInterval(startTime, startTime)\n\t\tlogErrorWhenNotCloseTo(duration\/count, interval)\n\t\t\/\/ The range set is empty\n\t\tranges = newTestRangeSet(0, t)\n\t\ts = newRangeScanner(duration, 0, ranges, nil)\n\t\tinterval = s.paceInterval(startTime, startTime)\n\t\tlogErrorWhenNotCloseTo(duration, interval)\n\t\tranges = newTestRangeSet(count, t)\n\t\ts = newRangeScanner(duration, 0, ranges, nil)\n\t\t\/\/ Move the present to duration time into the future\n\t\tinterval = s.paceInterval(startTime, startTime.Add(duration))\n\t\tlogErrorWhenNotCloseTo(0, interval)\n\t}\n}\n\n\/\/ TestScannerEmptyRangeSet verifies that an empty range set doesn't busy loop.\nfunc TestScannerEmptyRangeSet(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tranges := newTestRangeSet(0, t)\n\tq := &testQueue{}\n\ts := newRangeScanner(1*time.Millisecond, 0, ranges, nil)\n\ts.AddQueues(q)\n\tmc := hlc.NewManualClock(0)\n\tclock := hlc.NewClock(mc.UnixNano)\n\tstopper := util.NewStopper()\n\tdefer stopper.Stop()\n\ts.Start(clock, stopper)\n\ttime.Sleep(3 * time.Millisecond)\n\tif count := s.Count(); count > 3 {\n\t\tt.Errorf(\"expected three loops; got %d\", count)\n\t}\n}\n\n\/\/ TestScannerStats verifies that stats accumulate from all ranges.\nfunc TestScannerStats(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tconst count = 3\n\tranges := newTestRangeSet(count, t)\n\tq := &testQueue{}\n\tstopper := util.NewStopper()\n\tdefer stopper.Stop()\n\ts := newRangeScanner(1*time.Millisecond, 0, ranges, nil)\n\ts.AddQueues(q)\n\tmc := hlc.NewManualClock(0)\n\tclock := hlc.NewClock(mc.UnixNano)\n\t\/\/ At start, scanner stats should be blank for MVCC, but have accurate number of ranges.\n\tif rc := s.Stats().RangeCount; rc != count {\n\t\tt.Errorf(\"range count expected %d; got %d\", count, rc)\n\t}\n\tif vb := s.Stats().MVCC.ValBytes; vb != 0 {\n\t\tt.Errorf(\"value bytes expected %d; got %d\", 0, vb)\n\t}\n\ts.Start(clock, stopper)\n\t\/\/ We expect a full run to accumulate stats from all ranges.\n\tif err := util.IsTrueWithin(func() bool {\n\t\tif rc := s.Stats().RangeCount; rc != count {\n\t\t\treturn false\n\t\t}\n\t\tif vb := s.Stats().MVCC.ValBytes; vb != count*2 {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}, 100*time.Millisecond); err != nil {\n\t\tt.Error(err)\n\t}\n}\n<commit_msg>fix TestScannerAddToQueues<commit_after>\/\/ Copyright 2014 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License. See the AUTHORS file\n\/\/ for names of contributors.\n\/\/\n\/\/ Author: Spencer Kimball (spencer.kimball@gmail.com)\n\npackage storage\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/cockroachdb\/cockroach\/proto\"\n\t\"github.com\/cockroachdb\/cockroach\/util\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/hlc\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/leaktest\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/log\"\n\t\"github.com\/google\/btree\"\n)\n\n\/\/ Test implementation of a range set backed by btree.BTree.\ntype testRangeSet struct {\n\tsync.Mutex\n\trangesByKey *btree.BTree\n\tvisited int\n}\n\n\/\/ newTestRangeSet creates a new range set that has the count number of ranges.\nfunc newTestRangeSet(count int, t *testing.T) *testRangeSet {\n\trs := &testRangeSet{rangesByKey: btree.New(64 \/* degree *\/)}\n\tfor i := 0; i < count; i++ {\n\t\tdesc := &proto.RangeDescriptor{\n\t\t\tRaftID: proto.RaftID(i),\n\t\t\tStartKey: proto.Key(fmt.Sprintf(\"%03d\", i)),\n\t\t\tEndKey: proto.Key(fmt.Sprintf(\"%03d\", i+1)),\n\t\t}\n\t\t\/\/ Initialize the range stat so the scanner can use it.\n\t\trng := &Range{\n\t\t\tstats: &rangeStats{\n\t\t\t\traftID: desc.RaftID,\n\t\t\t\tMVCCStats: proto.MVCCStats{\n\t\t\t\t\tKeyBytes: 1,\n\t\t\t\t\tValBytes: 2,\n\t\t\t\t\tKeyCount: 1,\n\t\t\t\t\tLiveCount: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tif err := rng.setDesc(desc); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif exRngItem := rs.rangesByKey.ReplaceOrInsert(rng); exRngItem != nil {\n\t\t\tt.Fatalf(\"failed to insert range %s\", rng)\n\t\t}\n\t}\n\treturn rs\n}\n\nfunc (rs *testRangeSet) Visit(visitor func(*Range) bool) {\n\trs.Lock()\n\tdefer rs.Unlock()\n\trs.visited = 0\n\trs.rangesByKey.Ascend(func(i btree.Item) bool {\n\t\trs.visited++\n\t\trs.Unlock()\n\t\tdefer rs.Lock()\n\t\treturn visitor(i.(*Range))\n\t})\n}\n\nfunc (rs *testRangeSet) EstimatedCount() int {\n\trs.Lock()\n\tdefer rs.Unlock()\n\tcount := rs.rangesByKey.Len() - rs.visited\n\tif count < 1 {\n\t\tcount = 1\n\t}\n\treturn count\n}\n\n\/\/ removeRange removes the i-th range from the range set.\nfunc (rs *testRangeSet) remove(index int, t *testing.T) *Range {\n\tendKey := proto.Key(fmt.Sprintf(\"%03d\", index+1))\n\trs.Lock()\n\tdefer rs.Unlock()\n\trng := rs.rangesByKey.Delete((rangeBTreeKey)(endKey))\n\tif rng == nil {\n\t\tt.Fatalf(\"failed to delete range of end key %s\", endKey)\n\t}\n\treturn rng.(*Range)\n}\n\n\/\/ Test implementation of a range queue which adds range to an\n\/\/ internal slice.\ntype testQueue struct {\n\tsync.Mutex \/\/ Protects ranges, done & processed count\n\tranges []*Range\n\tdone bool\n\tprocessed int\n\tdisabled bool\n}\n\n\/\/ setDisabled suspends processing of items from the queue.\nfunc (tq *testQueue) setDisabled(d bool) {\n\ttq.Lock()\n\tdefer tq.Unlock()\n\ttq.disabled = d\n}\n\nfunc (tq *testQueue) Start(clock *hlc.Clock, stopper *util.Stopper) {\n\tstopper.RunWorker(func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.After(1 * time.Millisecond):\n\t\t\t\ttq.Lock()\n\t\t\t\tif !tq.disabled && len(tq.ranges) > 0 {\n\t\t\t\t\ttq.ranges = tq.ranges[1:]\n\t\t\t\t\ttq.processed++\n\t\t\t\t}\n\t\t\t\ttq.Unlock()\n\t\t\tcase <-stopper.ShouldStop():\n\t\t\t\ttq.Lock()\n\t\t\t\ttq.done = true\n\t\t\t\ttq.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc (tq *testQueue) MaybeAdd(rng *Range, now proto.Timestamp) {\n\ttq.Lock()\n\tdefer tq.Unlock()\n\tif index := tq.indexOf(rng); index == -1 {\n\t\ttq.ranges = append(tq.ranges, rng)\n\t}\n}\n\nfunc (tq *testQueue) MaybeRemove(rng *Range) {\n\ttq.Lock()\n\tdefer tq.Unlock()\n\tif index := tq.indexOf(rng); index != -1 {\n\t\ttq.ranges = append(tq.ranges[:index], tq.ranges[index+1:]...)\n\t}\n}\n\nfunc (tq *testQueue) count() int {\n\ttq.Lock()\n\tdefer tq.Unlock()\n\treturn len(tq.ranges)\n}\n\nfunc (tq *testQueue) indexOf(rng *Range) int {\n\tfor i, r := range tq.ranges {\n\t\tif r == rng {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\nfunc (tq *testQueue) isDone() bool {\n\ttq.Lock()\n\tdefer tq.Unlock()\n\treturn tq.done\n}\n\n\/\/ TestScannerAddToQueues verifies that ranges are added to and\n\/\/ removed from multiple queues.\nfunc TestScannerAddToQueues(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tconst count = 3\n\tranges := newTestRangeSet(count, t)\n\tq1, q2 := &testQueue{}, &testQueue{}\n\t\/\/ We don't want to actually consume entries from the queues during this test.\n\tq1.setDisabled(true)\n\tq2.setDisabled(true)\n\ts := newRangeScanner(1*time.Millisecond, 0, ranges, nil)\n\ts.AddQueues(q1, q2)\n\tmc := hlc.NewManualClock(0)\n\tclock := hlc.NewClock(mc.UnixNano)\n\tstopper := util.NewStopper()\n\n\t\/\/ Start queue and verify that all ranges are added to both queues.\n\ts.Start(clock, stopper)\n\tif err := util.IsTrueWithin(func() bool {\n\t\treturn q1.count() == count && q2.count() == count\n\t}, 50*time.Millisecond); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t\/\/ Remove first range and verify it does not exist in either range.\n\trng := ranges.remove(0, t)\n\tif err := util.IsTrueWithin(func() bool {\n\t\t\/\/ This is intentionally inside the loop, otherwise this test races as\n\t\t\/\/ our removal of the range may be processed before a stray re-queue.\n\t\t\/\/ Removing on each attempt makes sure we clean this up as we retry.\n\t\ts.RemoveRange(rng)\n\t\tc1 := q1.count()\n\t\tc2 := q2.count()\n\t\tlog.Infof(\"q1: %d, q2: %d, wanted: %d\", c1, c2, count-1)\n\t\treturn c1 == count-1 && c2 == count-1\n\t}, time.Second); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t\/\/ Stop scanner and verify both queues are stopped.\n\tstopper.Stop()\n\tif !q1.isDone() || !q2.isDone() {\n\t\tt.Errorf(\"expected all queues to stop; got %t, %t\", q1.isDone(), q2.isDone())\n\t}\n}\n\n\/\/ TestScannerTiming verifies that ranges are scanned, regardless\n\/\/ of how many, to match scanInterval.\nfunc TestScannerTiming(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tconst count = 3\n\tconst runTime = 100 * time.Millisecond\n\tconst maxError = 7500 * time.Microsecond\n\tdurations := []time.Duration{\n\t\t10 * time.Millisecond,\n\t\t25 * time.Millisecond,\n\t}\n\tfor i, duration := range durations {\n\t\tranges := newTestRangeSet(count, t)\n\t\tq := &testQueue{}\n\t\ts := newRangeScanner(duration, 0, ranges, nil)\n\t\ts.AddQueues(q)\n\t\tmc := hlc.NewManualClock(0)\n\t\tclock := hlc.NewClock(mc.UnixNano)\n\t\tstopper := util.NewStopper()\n\t\tdefer stopper.Stop()\n\t\ts.Start(clock, stopper)\n\t\ttime.Sleep(runTime)\n\n\t\tavg := s.avgScan()\n\t\tlog.Infof(\"%d: average scan: %s\\n\", i, avg)\n\t\tif avg.Nanoseconds()-duration.Nanoseconds() > maxError.Nanoseconds() ||\n\t\t\tduration.Nanoseconds()-avg.Nanoseconds() > maxError.Nanoseconds() {\n\t\t\tt.Errorf(\"expected %s, got %s: exceeds max error of %s\", duration, avg, maxError)\n\t\t}\n\t}\n}\n\n\/\/ TestScannerPaceInterval tests that paceInterval returns the correct interval.\nfunc TestScannerPaceInterval(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tconst count = 3\n\tdurations := []time.Duration{\n\t\t30 * time.Millisecond,\n\t\t60 * time.Millisecond,\n\t\t500 * time.Millisecond,\n\t}\n\t\/\/ function logs an error when the actual value is not close\n\t\/\/ to the expected value\n\tlogErrorWhenNotCloseTo := func(expected, actual time.Duration) {\n\t\tdelta := 1 * time.Millisecond\n\t\tif actual < expected-delta || actual > expected+delta {\n\t\t\tt.Errorf(\"Expected duration %s, got %s\", expected, actual)\n\t\t}\n\t}\n\tfor _, duration := range durations {\n\t\tstartTime := time.Now()\n\t\tranges := newTestRangeSet(count, t)\n\t\ts := newRangeScanner(duration, 0, ranges, nil)\n\t\tinterval := s.paceInterval(startTime, startTime)\n\t\tlogErrorWhenNotCloseTo(duration\/count, interval)\n\t\t\/\/ The range set is empty\n\t\tranges = newTestRangeSet(0, t)\n\t\ts = newRangeScanner(duration, 0, ranges, nil)\n\t\tinterval = s.paceInterval(startTime, startTime)\n\t\tlogErrorWhenNotCloseTo(duration, interval)\n\t\tranges = newTestRangeSet(count, t)\n\t\ts = newRangeScanner(duration, 0, ranges, nil)\n\t\t\/\/ Move the present to duration time into the future\n\t\tinterval = s.paceInterval(startTime, startTime.Add(duration))\n\t\tlogErrorWhenNotCloseTo(0, interval)\n\t}\n}\n\n\/\/ TestScannerEmptyRangeSet verifies that an empty range set doesn't busy loop.\nfunc TestScannerEmptyRangeSet(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tranges := newTestRangeSet(0, t)\n\tq := &testQueue{}\n\ts := newRangeScanner(1*time.Millisecond, 0, ranges, nil)\n\ts.AddQueues(q)\n\tmc := hlc.NewManualClock(0)\n\tclock := hlc.NewClock(mc.UnixNano)\n\tstopper := util.NewStopper()\n\tdefer stopper.Stop()\n\ts.Start(clock, stopper)\n\ttime.Sleep(3 * time.Millisecond)\n\tif count := s.Count(); count > 3 {\n\t\tt.Errorf(\"expected three loops; got %d\", count)\n\t}\n}\n\n\/\/ TestScannerStats verifies that stats accumulate from all ranges.\nfunc TestScannerStats(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tconst count = 3\n\tranges := newTestRangeSet(count, t)\n\tq := &testQueue{}\n\tstopper := util.NewStopper()\n\tdefer stopper.Stop()\n\ts := newRangeScanner(1*time.Millisecond, 0, ranges, nil)\n\ts.AddQueues(q)\n\tmc := hlc.NewManualClock(0)\n\tclock := hlc.NewClock(mc.UnixNano)\n\t\/\/ At start, scanner stats should be blank for MVCC, but have accurate number of ranges.\n\tif rc := s.Stats().RangeCount; rc != count {\n\t\tt.Errorf(\"range count expected %d; got %d\", count, rc)\n\t}\n\tif vb := s.Stats().MVCC.ValBytes; vb != 0 {\n\t\tt.Errorf(\"value bytes expected %d; got %d\", 0, vb)\n\t}\n\ts.Start(clock, stopper)\n\t\/\/ We expect a full run to accumulate stats from all ranges.\n\tif err := util.IsTrueWithin(func() bool {\n\t\tif rc := s.Stats().RangeCount; rc != count {\n\t\t\treturn false\n\t\t}\n\t\tif vb := s.Stats().MVCC.ValBytes; vb != count*2 {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}, 100*time.Millisecond); err != nil {\n\t\tt.Error(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package etcdv3\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/coreos\/etcd\/pkg\/transport\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/coreos\/etcd\/clientv3\"\n\t\"github.com\/coreos\/etcd\/mvcc\/mvccpb\"\n\t\"github.com\/projecteru2\/core\/engine\"\n\t\"github.com\/projecteru2\/core\/lock\"\n\t\"github.com\/projecteru2\/core\/lock\/etcdlock\"\n\t\"github.com\/projecteru2\/core\/store\/etcdv3\/embeded\"\n\t\"github.com\/projecteru2\/core\/types\"\n\t\"github.com\/projecteru2\/core\/utils\"\n)\n\nconst (\n\tnodeTCPPrefixKey = \"tcp:\/\/\"\n\tnodeSockPrefixKey = \"unix:\/\/\"\n\tnodeMockPrefixKey = \"mock:\/\/\"\n\tnodeVirtPrefixKey = \"virt:\/\/\"\n\n\tpodInfoKey = \"\/pod\/info\/%s\" \/\/ \/pod\/info\/{podname}\n\tpodNodesKey = \"\/pod\/%s:nodes\" \/\/ \/pod\/{podname}:nodes -> for list pod nodes\n\tnodeInfoKey = \"\/pod\/%s:nodes\/%s\" \/\/ \/pod\/{podname}:nodes\/{nodenmae} -> for node info\n\n\tnodeCaKey = \"\/node\/%s:ca\" \/\/ \/node\/{nodename}:ca\n\tnodeCertKey = \"\/node\/%s:cert\" \/\/ \/node\/{nodename}:cert\n\tnodeKeyKey = \"\/node\/%s:key\" \/\/ \/node\/{nodename}:key\n\tnodePodKey = \"\/node\/%s:pod\" \/\/ \/node\/{nodename}:pod value -> podname\n\tnodeContainersKey = \"\/node\/%s:containers\/%s\" \/\/ \/node\/{nodename}:containers\/{containerID}\n\n\tcontainerInfoKey = \"\/containers\/%s\" \/\/ \/containers\/{containerID}\n\tcontainerDeployPrefix = \"\/deploy\" \/\/ \/deploy\/{appname}\/{entrypoint}\/{nodename}\/{containerID} value -> something by agent\n\tcontainerProcessingPrefix = \"\/processing\" \/\/ \/processing\/{appname}\/{entrypoint}\/{nodename}\/{opsIdent} value -> count\n)\n\n\/\/ Mercury means store with etcdv3\ntype Mercury struct {\n\tcliv3 *clientv3.Client\n\tconfig types.Config\n}\n\n\/\/ New for create a Mercury instance\nfunc New(config types.Config, embededStorage bool) (*Mercury, error) {\n\tvar cliv3 *clientv3.Client\n\tvar err error\n\tvar tlsConfig *tls.Config\n\n\tif config.Etcd.Ca != \"\" && config.Etcd.Key != \"\" && config.Etcd.Cert != \"\" {\n\t\ttlsInfo := transport.TLSInfo{\n\t\t\tTrustedCAFile: config.Etcd.Ca,\n\t\t\tKeyFile: config.Etcd.Key,\n\t\t\tCertFile: config.Etcd.Cert,\n\t\t}\n\t\ttlsConfig, err = tlsInfo.ClientConfig()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif embededStorage {\n\t\tcliv3 = embeded.NewCluster()\n\t\tlog.Info(\"[Mercury] use embeded cluster\")\n\t} else if cliv3, err = clientv3.New(clientv3.Config{\n\t\tEndpoints: config.Etcd.Machines,\n\t\tUsername: config.Etcd.Auth.Username,\n\t\tPassword: config.Etcd.Auth.Password,\n\t\tTLS: tlsConfig,\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Mercury{cliv3: cliv3, config: config}, nil\n}\n\n\/\/ TerminateEmbededStorage terminate embeded storage\nfunc (m *Mercury) TerminateEmbededStorage() {\n\tembeded.TerminateCluster()\n}\n\n\/\/ CreateLock create a lock instance\nfunc (m *Mercury) CreateLock(key string, ttl int) (lock.DistributedLock, error) {\n\tlockKey := fmt.Sprintf(\"%s\/%s\", m.config.Etcd.LockPrefix, key)\n\tmutex, err := etcdlock.New(m.cliv3, lockKey, ttl)\n\treturn mutex, err\n}\n\n\/\/ Get get results or noting\nfunc (m *Mercury) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {\n\treturn m.cliv3.Get(ctx, m.parseKey(key), opts...)\n}\n\nfunc (m *Mercury) batchGet(ctx context.Context, keys []string, opt ...clientv3.OpOption) (txnResponse *clientv3.TxnResponse, err error) {\n\tops := []clientv3.Op{}\n\tfor _, key := range keys {\n\t\top := clientv3.OpGet(m.parseKey(key), opt...)\n\t\tops = append(ops, op)\n\t}\n\treturn m.doBatchOp(ctx, nil, ops)\n}\n\n\/\/ GetOne get one result or noting\nfunc (m *Mercury) GetOne(ctx context.Context, key string, opts ...clientv3.OpOption) (*mvccpb.KeyValue, error) {\n\tresp, err := m.Get(ctx, key, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.Count != 1 {\n\t\treturn nil, types.NewDetailedErr(types.ErrBadCount, fmt.Sprintf(\"key: %s\", key))\n\t}\n\n\treturn resp.Kvs[0], nil\n}\n\n\/\/ GetMulti gets several results\nfunc (m *Mercury) GetMulti(ctx context.Context, keys []string, opts ...clientv3.OpOption) (kvs []*mvccpb.KeyValue, err error) {\n\tvar txnResponse *clientv3.TxnResponse\n\tif txnResponse, err = m.batchGet(ctx, keys); err != nil {\n\t\treturn\n\t}\n\n\tfor idx, responseOp := range txnResponse.Responses {\n\t\tresp := responseOp.GetResponseRange()\n\t\tif resp.Count != 1 {\n\t\t\terr = types.NewDetailedErr(types.ErrBadCount, fmt.Sprintf(\"key: %s\", keys[idx]))\n\t\t\treturn\n\t\t}\n\n\t\tkvs = append(kvs, resp.Kvs[0])\n\t}\n\n\tif len(kvs) != len(keys) {\n\t\terr = types.NewDetailedErr(types.ErrBadCount, fmt.Sprintf(\"keys: %v\", keys))\n\t}\n\n\treturn\n}\n\n\/\/ Delete delete key\nfunc (m *Mercury) Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) {\n\treturn m.cliv3.Delete(ctx, m.parseKey(key), opts...)\n}\n\n\/\/ BatchDelete batch delete keys\nfunc (m *Mercury) batchDelete(ctx context.Context, keys []string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) {\n\tops := []clientv3.Op{}\n\tfor _, key := range keys {\n\t\top := clientv3.OpDelete(m.parseKey(key), opts...)\n\t\tops = append(ops, op)\n\t}\n\n\treturn m.doBatchOp(ctx, nil, ops)\n}\n\n\/\/ Put save a key value\nfunc (m *Mercury) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {\n\treturn m.cliv3.Put(ctx, m.parseKey(key), val, opts...)\n}\n\nfunc (m *Mercury) batchPut(ctx context.Context, data map[string]string, limit map[string]map[int]string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) {\n\tops := []clientv3.Op{}\n\tconds := []clientv3.Cmp{}\n\tfor key, val := range data {\n\t\tprefixKey := m.parseKey(key)\n\t\top := clientv3.OpPut(prefixKey, val, opts...)\n\t\tops = append(ops, op)\n\t\tif v, ok := limit[key]; ok {\n\t\t\tfor rev, condition := range v {\n\t\t\t\tcond := clientv3.Compare(clientv3.Version(prefixKey), condition, rev)\n\t\t\t\tconds = append(conds, cond)\n\t\t\t}\n\t\t}\n\t}\n\treturn m.doBatchOp(ctx, conds, ops)\n}\n\n\/\/ Create create a key if not exists\nfunc (m *Mercury) Create(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) {\n\treturn m.BatchCreate(ctx, map[string]string{key: val}, opts...)\n}\n\n\/\/ BatchCreate create key values if not exists\nfunc (m *Mercury) BatchCreate(ctx context.Context, data map[string]string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) {\n\tlimit := map[string]map[int]string{}\n\tfor key := range data {\n\t\tlimit[key] = map[int]string{0: \"=\"}\n\t}\n\tresp, err := m.batchPut(ctx, data, limit, opts...)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\tif !resp.Succeeded {\n\t\treturn resp, types.ErrKeyExists\n\t}\n\treturn resp, nil\n}\n\n\/\/ Update update a key if exists\nfunc (m *Mercury) Update(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) {\n\treturn m.BatchUpdate(ctx, map[string]string{key: val}, opts...)\n}\n\n\/\/ BatchUpdate update keys if not exists\nfunc (m *Mercury) BatchUpdate(ctx context.Context, data map[string]string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) {\n\tlimit := map[string]map[int]string{}\n\tfor key := range data {\n\t\tlimit[key] = map[int]string{0: \"!=\"}\n\t}\n\tresp, err := m.batchPut(ctx, data, limit, opts...)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\tif !resp.Succeeded {\n\t\treturn resp, types.ErrKeyNotExists\n\t}\n\treturn resp, nil\n}\n\n\/\/ Watch wath a key\nfunc (m *Mercury) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {\n\tkey = m.parseKey(key)\n\treturn m.cliv3.Watch(ctx, key, opts...)\n}\n\nfunc (m *Mercury) parseKey(key string) string {\n\tafter := filepath.Join(m.config.Etcd.Prefix, key)\n\tif strings.HasSuffix(key, \"\/\") {\n\t\tafter = after + \"\/\"\n\t}\n\treturn after\n}\n\nfunc (m *Mercury) doBatchOp(ctx context.Context, conds []clientv3.Cmp, ops []clientv3.Op) (*clientv3.TxnResponse, error) {\n\tif len(ops) == 0 {\n\t\treturn nil, types.ErrNoOps\n\t}\n\n\tconst txnLimit = 125\n\tcount := len(ops) \/ txnLimit \/\/ stupid etcd txn, default limit is 128\n\ttail := len(ops) % txnLimit\n\tlength := count\n\tif tail != 0 {\n\t\tlength++\n\t}\n\n\tresps := make([]*clientv3.TxnResponse, length)\n\terrs := make([]error, length)\n\n\twg := sync.WaitGroup{}\n\tdoOp := func(index int, ops []clientv3.Op) {\n\t\tdefer wg.Done()\n\t\ttxn := m.cliv3.Txn(ctx)\n\t\tif len(conds) != 0 {\n\t\t\ttxn = txn.If(conds...)\n\t\t}\n\t\tresp, err := txn.Then(ops...).Commit()\n\t\tresps[index] = resp\n\t\terrs[index] = err\n\t}\n\n\tif tail != 0 {\n\t\twg.Add(1)\n\t\tgo doOp(length-1, ops[count*txnLimit:])\n\t}\n\n\tfor i := 0; i < count; i++ {\n\t\twg.Add(1)\n\t\tgo doOp(i, ops[i*txnLimit:(i+1)*txnLimit])\n\t}\n\twg.Wait()\n\n\tfor _, err := range errs {\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(resps) == 0 {\n\t\treturn &clientv3.TxnResponse{}, nil\n\t}\n\n\tresp := resps[0]\n\tfor i := 1; i < len(resps); i++ {\n\t\tresp.Succeeded = resp.Succeeded && resps[i].Succeeded\n\t\tresp.Responses = append(resp.Responses, resps[i].Responses...)\n\t}\n\treturn resp, nil\n}\n\nvar _cache = &utils.Cache{Clients: make(map[string]engine.API)}\n<commit_msg>minor revise list ops, if there is no containers, do not show no txn ops<commit_after>package etcdv3\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/coreos\/etcd\/pkg\/transport\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/coreos\/etcd\/clientv3\"\n\t\"github.com\/coreos\/etcd\/mvcc\/mvccpb\"\n\t\"github.com\/projecteru2\/core\/engine\"\n\t\"github.com\/projecteru2\/core\/lock\"\n\t\"github.com\/projecteru2\/core\/lock\/etcdlock\"\n\t\"github.com\/projecteru2\/core\/store\/etcdv3\/embeded\"\n\t\"github.com\/projecteru2\/core\/types\"\n\t\"github.com\/projecteru2\/core\/utils\"\n)\n\nconst (\n\tnodeTCPPrefixKey = \"tcp:\/\/\"\n\tnodeSockPrefixKey = \"unix:\/\/\"\n\tnodeMockPrefixKey = \"mock:\/\/\"\n\tnodeVirtPrefixKey = \"virt:\/\/\"\n\n\tpodInfoKey = \"\/pod\/info\/%s\" \/\/ \/pod\/info\/{podname}\n\tpodNodesKey = \"\/pod\/%s:nodes\" \/\/ \/pod\/{podname}:nodes -> for list pod nodes\n\tnodeInfoKey = \"\/pod\/%s:nodes\/%s\" \/\/ \/pod\/{podname}:nodes\/{nodenmae} -> for node info\n\n\tnodeCaKey = \"\/node\/%s:ca\" \/\/ \/node\/{nodename}:ca\n\tnodeCertKey = \"\/node\/%s:cert\" \/\/ \/node\/{nodename}:cert\n\tnodeKeyKey = \"\/node\/%s:key\" \/\/ \/node\/{nodename}:key\n\tnodePodKey = \"\/node\/%s:pod\" \/\/ \/node\/{nodename}:pod value -> podname\n\tnodeContainersKey = \"\/node\/%s:containers\/%s\" \/\/ \/node\/{nodename}:containers\/{containerID}\n\n\tcontainerInfoKey = \"\/containers\/%s\" \/\/ \/containers\/{containerID}\n\tcontainerDeployPrefix = \"\/deploy\" \/\/ \/deploy\/{appname}\/{entrypoint}\/{nodename}\/{containerID} value -> something by agent\n\tcontainerProcessingPrefix = \"\/processing\" \/\/ \/processing\/{appname}\/{entrypoint}\/{nodename}\/{opsIdent} value -> count\n)\n\n\/\/ Mercury means store with etcdv3\ntype Mercury struct {\n\tcliv3 *clientv3.Client\n\tconfig types.Config\n}\n\n\/\/ New for create a Mercury instance\nfunc New(config types.Config, embededStorage bool) (*Mercury, error) {\n\tvar cliv3 *clientv3.Client\n\tvar err error\n\tvar tlsConfig *tls.Config\n\n\tif config.Etcd.Ca != \"\" && config.Etcd.Key != \"\" && config.Etcd.Cert != \"\" {\n\t\ttlsInfo := transport.TLSInfo{\n\t\t\tTrustedCAFile: config.Etcd.Ca,\n\t\t\tKeyFile: config.Etcd.Key,\n\t\t\tCertFile: config.Etcd.Cert,\n\t\t}\n\t\ttlsConfig, err = tlsInfo.ClientConfig()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif embededStorage {\n\t\tcliv3 = embeded.NewCluster()\n\t\tlog.Info(\"[Mercury] use embeded cluster\")\n\t} else if cliv3, err = clientv3.New(clientv3.Config{\n\t\tEndpoints: config.Etcd.Machines,\n\t\tUsername: config.Etcd.Auth.Username,\n\t\tPassword: config.Etcd.Auth.Password,\n\t\tTLS: tlsConfig,\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Mercury{cliv3: cliv3, config: config}, nil\n}\n\n\/\/ TerminateEmbededStorage terminate embeded storage\nfunc (m *Mercury) TerminateEmbededStorage() {\n\tembeded.TerminateCluster()\n}\n\n\/\/ CreateLock create a lock instance\nfunc (m *Mercury) CreateLock(key string, ttl int) (lock.DistributedLock, error) {\n\tlockKey := fmt.Sprintf(\"%s\/%s\", m.config.Etcd.LockPrefix, key)\n\tmutex, err := etcdlock.New(m.cliv3, lockKey, ttl)\n\treturn mutex, err\n}\n\n\/\/ Get get results or noting\nfunc (m *Mercury) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {\n\treturn m.cliv3.Get(ctx, m.parseKey(key), opts...)\n}\n\nfunc (m *Mercury) batchGet(ctx context.Context, keys []string, opt ...clientv3.OpOption) (txnResponse *clientv3.TxnResponse, err error) {\n\tops := []clientv3.Op{}\n\tfor _, key := range keys {\n\t\top := clientv3.OpGet(m.parseKey(key), opt...)\n\t\tops = append(ops, op)\n\t}\n\treturn m.doBatchOp(ctx, nil, ops)\n}\n\n\/\/ GetOne get one result or noting\nfunc (m *Mercury) GetOne(ctx context.Context, key string, opts ...clientv3.OpOption) (*mvccpb.KeyValue, error) {\n\tresp, err := m.Get(ctx, key, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.Count != 1 {\n\t\treturn nil, types.NewDetailedErr(types.ErrBadCount, fmt.Sprintf(\"key: %s\", key))\n\t}\n\n\treturn resp.Kvs[0], nil\n}\n\n\/\/ GetMulti gets several results\nfunc (m *Mercury) GetMulti(ctx context.Context, keys []string, opts ...clientv3.OpOption) (kvs []*mvccpb.KeyValue, err error) {\n\tvar txnResponse *clientv3.TxnResponse\n\tif len(keys) == 0 {\n\t\treturn\n\t}\n\n\tif txnResponse, err = m.batchGet(ctx, keys); err != nil {\n\t\treturn\n\t}\n\n\tfor idx, responseOp := range txnResponse.Responses {\n\t\tresp := responseOp.GetResponseRange()\n\t\tif resp.Count != 1 {\n\t\t\terr = types.NewDetailedErr(types.ErrBadCount, fmt.Sprintf(\"key: %s\", keys[idx]))\n\t\t\treturn\n\t\t}\n\n\t\tkvs = append(kvs, resp.Kvs[0])\n\t}\n\n\tif len(kvs) != len(keys) {\n\t\terr = types.NewDetailedErr(types.ErrBadCount, fmt.Sprintf(\"keys: %v\", keys))\n\t}\n\n\treturn\n}\n\n\/\/ Delete delete key\nfunc (m *Mercury) Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) {\n\treturn m.cliv3.Delete(ctx, m.parseKey(key), opts...)\n}\n\n\/\/ BatchDelete batch delete keys\nfunc (m *Mercury) batchDelete(ctx context.Context, keys []string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) {\n\tops := []clientv3.Op{}\n\tfor _, key := range keys {\n\t\top := clientv3.OpDelete(m.parseKey(key), opts...)\n\t\tops = append(ops, op)\n\t}\n\n\treturn m.doBatchOp(ctx, nil, ops)\n}\n\n\/\/ Put save a key value\nfunc (m *Mercury) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {\n\treturn m.cliv3.Put(ctx, m.parseKey(key), val, opts...)\n}\n\nfunc (m *Mercury) batchPut(ctx context.Context, data map[string]string, limit map[string]map[int]string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) {\n\tops := []clientv3.Op{}\n\tconds := []clientv3.Cmp{}\n\tfor key, val := range data {\n\t\tprefixKey := m.parseKey(key)\n\t\top := clientv3.OpPut(prefixKey, val, opts...)\n\t\tops = append(ops, op)\n\t\tif v, ok := limit[key]; ok {\n\t\t\tfor rev, condition := range v {\n\t\t\t\tcond := clientv3.Compare(clientv3.Version(prefixKey), condition, rev)\n\t\t\t\tconds = append(conds, cond)\n\t\t\t}\n\t\t}\n\t}\n\treturn m.doBatchOp(ctx, conds, ops)\n}\n\n\/\/ Create create a key if not exists\nfunc (m *Mercury) Create(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) {\n\treturn m.BatchCreate(ctx, map[string]string{key: val}, opts...)\n}\n\n\/\/ BatchCreate create key values if not exists\nfunc (m *Mercury) BatchCreate(ctx context.Context, data map[string]string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) {\n\tlimit := map[string]map[int]string{}\n\tfor key := range data {\n\t\tlimit[key] = map[int]string{0: \"=\"}\n\t}\n\tresp, err := m.batchPut(ctx, data, limit, opts...)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\tif !resp.Succeeded {\n\t\treturn resp, types.ErrKeyExists\n\t}\n\treturn resp, nil\n}\n\n\/\/ Update update a key if exists\nfunc (m *Mercury) Update(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) {\n\treturn m.BatchUpdate(ctx, map[string]string{key: val}, opts...)\n}\n\n\/\/ BatchUpdate update keys if not exists\nfunc (m *Mercury) BatchUpdate(ctx context.Context, data map[string]string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) {\n\tlimit := map[string]map[int]string{}\n\tfor key := range data {\n\t\tlimit[key] = map[int]string{0: \"!=\"}\n\t}\n\tresp, err := m.batchPut(ctx, data, limit, opts...)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\tif !resp.Succeeded {\n\t\treturn resp, types.ErrKeyNotExists\n\t}\n\treturn resp, nil\n}\n\n\/\/ Watch wath a key\nfunc (m *Mercury) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {\n\tkey = m.parseKey(key)\n\treturn m.cliv3.Watch(ctx, key, opts...)\n}\n\nfunc (m *Mercury) parseKey(key string) string {\n\tafter := filepath.Join(m.config.Etcd.Prefix, key)\n\tif strings.HasSuffix(key, \"\/\") {\n\t\tafter = after + \"\/\"\n\t}\n\treturn after\n}\n\nfunc (m *Mercury) doBatchOp(ctx context.Context, conds []clientv3.Cmp, ops []clientv3.Op) (*clientv3.TxnResponse, error) {\n\tif len(ops) == 0 {\n\t\treturn nil, types.ErrNoOps\n\t}\n\n\tconst txnLimit = 125\n\tcount := len(ops) \/ txnLimit \/\/ stupid etcd txn, default limit is 128\n\ttail := len(ops) % txnLimit\n\tlength := count\n\tif tail != 0 {\n\t\tlength++\n\t}\n\n\tresps := make([]*clientv3.TxnResponse, length)\n\terrs := make([]error, length)\n\n\twg := sync.WaitGroup{}\n\tdoOp := func(index int, ops []clientv3.Op) {\n\t\tdefer wg.Done()\n\t\ttxn := m.cliv3.Txn(ctx)\n\t\tif len(conds) != 0 {\n\t\t\ttxn = txn.If(conds...)\n\t\t}\n\t\tresp, err := txn.Then(ops...).Commit()\n\t\tresps[index] = resp\n\t\terrs[index] = err\n\t}\n\n\tif tail != 0 {\n\t\twg.Add(1)\n\t\tgo doOp(length-1, ops[count*txnLimit:])\n\t}\n\n\tfor i := 0; i < count; i++ {\n\t\twg.Add(1)\n\t\tgo doOp(i, ops[i*txnLimit:(i+1)*txnLimit])\n\t}\n\twg.Wait()\n\n\tfor _, err := range errs {\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(resps) == 0 {\n\t\treturn &clientv3.TxnResponse{}, nil\n\t}\n\n\tresp := resps[0]\n\tfor i := 1; i < len(resps); i++ {\n\t\tresp.Succeeded = resp.Succeeded && resps[i].Succeeded\n\t\tresp.Responses = append(resp.Responses, resps[i].Responses...)\n\t}\n\treturn resp, nil\n}\n\nvar _cache = &utils.Cache{Clients: make(map[string]engine.API)}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n)\n\nconst (\n\tVERSION = \"0.3.0\"\n)\n\nvar options struct {\n\tPath string\n\tHost string\n\tPort int\n\tToken string\n\tAuth bool\n}\n\nvar services []Service\n\nfunc initOptions() {\n\tflag.StringVar(&options.Path, \"c\", \"\", \"Path to config directory\")\n\tflag.StringVar(&options.Host, \"h\", \"0.0.0.0\", \"Host to bind to\")\n\tflag.IntVar(&options.Port, \"p\", 3050, \"Port to listen on\")\n\tflag.StringVar(&options.Token, \"t\", \"\", \"Authentication token\")\n\n\tflag.Parse()\n\n\tif options.Path == \"\" {\n\t\tfmt.Println(\"Please specify -c option\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Load token from environment variable if not set\n\tif options.Token == \"\" {\n\t\toptions.Token = os.Getenv(\"TOKEN\")\n\t}\n\n\t\/\/ Do not require authentication if token is not set\n\tif options.Token == \"\" {\n\t\toptions.Auth = false\n\t} else {\n\t\toptions.Auth = true\n\t}\n}\n\nfunc main() {\n\tinitOptions()\n\n\tvar err error\n\tservices, err = readServices(options.Path)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error:\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"envd v%s\\n\", VERSION)\n\tfmt.Println(\"config path:\", options.Path)\n\tfmt.Println(\"services detected:\", len(services))\n\n\tstartServer()\n}\n<commit_msg>Add -v flag to print version<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n)\n\nconst (\n\tVERSION = \"0.3.0\"\n)\n\nvar options struct {\n\tPath string\n\tHost string\n\tPort int\n\tToken string\n\tAuth bool\n}\n\nvar services []Service\n\nfunc initOptions() {\n\tvar printVersion bool\n\n\tflag.StringVar(&options.Path, \"c\", \"\", \"Path to config directory\")\n\tflag.StringVar(&options.Host, \"h\", \"0.0.0.0\", \"Host to bind to\")\n\tflag.IntVar(&options.Port, \"p\", 3050, \"Port to listen on\")\n\tflag.StringVar(&options.Token, \"t\", \"\", \"Authentication token\")\n\tflag.BoolVar(&printVersion, \"v\", false, \"Print version\")\n\n\tflag.Parse()\n\n\tif printVersion {\n\t\tfmt.Println(VERSION)\n\t\tos.Exit(0)\n\t}\n\n\tif options.Path == \"\" {\n\t\tfmt.Println(\"Please specify -c option\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Load token from environment variable if not set\n\tif options.Token == \"\" {\n\t\toptions.Token = os.Getenv(\"TOKEN\")\n\t}\n\n\t\/\/ Do not require authentication if token is not set\n\tif options.Token == \"\" {\n\t\toptions.Auth = false\n\t} else {\n\t\toptions.Auth = true\n\t}\n}\n\nfunc main() {\n\tinitOptions()\n\n\tvar err error\n\tservices, err = readServices(options.Path)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error:\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"envd v%s\\n\", VERSION)\n\tfmt.Println(\"config path:\", options.Path)\n\tfmt.Println(\"services detected:\", len(services))\n\n\tstartServer()\n}\n<|endoftext|>"} {"text":"<commit_before>package fit\n\nimport \"fmt\"\n\n\/\/ Manually generated types: The 'Bool' type is not found in the SDK\n\/\/ specification, so it won't be auto-generated, but it is also not a base\n\/\/ type.\n\ntype Bool byte\n\nconst (\n\tBoolFalse Bool = 0\n\tBoolTrue Bool = 1\n\tBoolInvalid Bool = 255\n)\nconst (\n\t_Bool_name_0 = \"FalseTrue\"\n\t_Bool_name_1 = \"Invalid\"\n)\n\nvar (\n\t_Bool_index_0 = [...]uint8{0, 5, 9}\n)\n\nfunc (i Bool) String() string {\n\tswitch {\n\t\/\/lint:ignore SA4003 Check unsigned >= 0, but this matches the stringer autogen output\n\tcase 0 <= i && i <= 1:\n\t\treturn _Bool_name_0[_Bool_index_0[i]:_Bool_index_0[i+1]]\n\tcase i == 255:\n\t\treturn _Bool_name_1\n\tdefault:\n\t\treturn fmt.Sprintf(\"Bool(%d)\", i)\n\t}\n}\n<commit_msg>types_man: regenerate using latest version of stringer<commit_after>package fit\n\nimport \"strconv\"\n\n\/\/ Manually generated types: The 'Bool' type is not found in the SDK\n\/\/ specification, so it won't be auto-generated, but it is also not a base\n\/\/ type.\n\ntype Bool byte\n\nconst (\n\tBoolFalse Bool = 0\n\tBoolTrue Bool = 1\n\tBoolInvalid Bool = 255\n)\n\nfunc _() {\n\t\/\/ An \"invalid array index\" compiler error signifies that the constant values have changed.\n\t\/\/ Re-run the stringer command to generate them again.\n\tvar x [1]struct{}\n\t_ = x[BoolFalse-0]\n\t_ = x[BoolTrue-1]\n\t_ = x[BoolInvalid-255]\n}\n\nconst (\n\t_Bool_name_0 = \"BoolFalseBoolTrue\"\n\t_Bool_name_1 = \"BoolInvalid\"\n)\n\nvar (\n\t_Bool_index_0 = [...]uint8{0, 9, 17}\n)\n\nfunc (i Bool) String() string {\n\tswitch {\n\t\/\/lint:ignore SA4003 Check unsigned >= 0, but this matches the stringer autogen output\n\tcase 0 <= i && i <= 1:\n\t\treturn _Bool_name_0[_Bool_index_0[i]:_Bool_index_0[i+1]]\n\tcase i == 255:\n\t\treturn _Bool_name_1\n\tdefault:\n\t\treturn \"Bool(\" + strconv.FormatInt(int64(i), 10) + \")\"\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2014 Couchbase, Inc.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file\n\/\/ except in compliance with the License. You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing, software distributed under the\n\/\/ License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,\n\/\/ either express or implied. See the License for the specific language governing permissions\n\/\/ and limitations under the License.\n\npackage bleve\n\nimport (\n\t\"github.com\/blevesearch\/bleve\/document\"\n\t\"github.com\/blevesearch\/bleve\/index\"\n\t\"github.com\/blevesearch\/bleve\/index\/store\"\n)\n\n\/\/ A Batch groups together multiple Index and Delete\n\/\/ operations you would like performed at the same\n\/\/ time. The Batch structure is NOT thread-safe.\n\/\/ You should only perform operations on a batch\n\/\/ from a single thread at a time. Once batch\n\/\/ execution has started, you may not modify it.\ntype Batch struct {\n\tindex Index\n\tinternal *index.Batch\n}\n\n\/\/ Index adds the specified index operation to the\n\/\/ batch. NOTE: the bleve Index is not updated\n\/\/ until the batch is executed.\nfunc (b *Batch) Index(id string, data interface{}) error {\n\tdoc := document.NewDocument(id)\n\terr := b.index.Mapping().mapDocument(doc, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tb.internal.Update(doc)\n\treturn nil\n}\n\n\/\/ Delete adds the specified delete operation to the\n\/\/ batch. NOTE: the bleve Index is not updated until\n\/\/ the batch is executed.\nfunc (b *Batch) Delete(id string) {\n\tb.internal.Delete(id)\n}\n\n\/\/ SetInternal adds the specified set internal\n\/\/ operation to the batch. NOTE: the bleve Index is\n\/\/ not updated until the batch is executed.\nfunc (b *Batch) SetInternal(key, val []byte) {\n\tb.internal.SetInternal(key, val)\n}\n\n\/\/ SetInternal adds the specified delete internal\n\/\/ operation to the batch. NOTE: the bleve Index is\n\/\/ not updated until the batch is executed.\nfunc (b *Batch) DeleteInternal(key []byte) {\n\tb.internal.DeleteInternal(key)\n}\n\n\/\/ Size returns the total number of operations inside the batch\n\/\/ including normal index operations and internal operations.\nfunc (b *Batch) Size() int {\n\treturn len(b.internal.IndexOps) + len(b.internal.InternalOps)\n}\n\n\/\/ String prints a user friendly string represenation of what\n\/\/ is inside this batch.\nfunc (b *Batch) String() string {\n\treturn b.internal.String()\n}\n\n\/\/ Reset returns a Batch to the empty state so that it can\n\/\/ be re-used in the future.\nfunc (b *Batch) Reset() {\n\tb.internal.Reset()\n}\n\n\/\/ An Index implements all the indexing and searching\n\/\/ capabilities of bleve. An Index can be created\n\/\/ using the New() and Open() methods.\ntype Index interface {\n\tIndex(id string, data interface{}) error\n\tDelete(id string) error\n\n\tNewBatch() *Batch\n\tBatch(b *Batch) error\n\n\tDocument(id string) (*document.Document, error)\n\tDocCount() (uint64, error)\n\n\tSearch(req *SearchRequest) (*SearchResult, error)\n\n\tFields() ([]string, error)\n\n\tFieldDict(field string) (index.FieldDict, error)\n\tFieldDictRange(field string, startTerm []byte, endTerm []byte) (index.FieldDict, error)\n\tFieldDictPrefix(field string, termPrefix []byte) (index.FieldDict, error)\n\n\tDumpAll() chan interface{}\n\tDumpDoc(id string) chan interface{}\n\tDumpFields() chan interface{}\n\n\tClose() error\n\n\tMapping() *IndexMapping\n\n\tStats() *IndexStat\n\n\tGetInternal(key []byte) ([]byte, error)\n\tSetInternal(key, val []byte) error\n\tDeleteInternal(key []byte) error\n\n\tAdvanced() (index.Index, store.KVStore, error)\n}\n\n\/\/ A Classifier is an interface describing any object\n\/\/ which knows how to identify its own type.\ntype Classifier interface {\n\tType() string\n}\n\n\/\/ New index at the specified path, must not exist.\n\/\/ The provided mapping will be used for all\n\/\/ Index\/Search operations.\nfunc New(path string, mapping *IndexMapping) (Index, error) {\n\treturn newIndexUsing(path, mapping, Config.DefaultIndexType, Config.DefaultKVStore, nil)\n}\n\n\/\/ NewUsing creates index at the specified path,\n\/\/ which must not already exist.\n\/\/ The provided mapping will be used for all\n\/\/ Index\/Search operations.\n\/\/ The specified index type will be used\n\/\/ The specified kvstore implemenation will be used\n\/\/ and the provided kvconfig will be passed to its\n\/\/ constructor.\nfunc NewUsing(path string, mapping *IndexMapping, indexType string, kvstore string, kvconfig map[string]interface{}) (Index, error) {\n\treturn newIndexUsing(path, mapping, indexType, kvstore, kvconfig)\n}\n\n\/\/ Open index at the specified path, must exist.\n\/\/ The mapping used when it was created will be used for all Index\/Search operations.\nfunc Open(path string) (Index, error) {\n\treturn openIndexUsing(path, nil)\n}\n\n\/\/ OpenUsing opens index at the specified path, must exist.\n\/\/ The mapping used when it was created will be used for all Index\/Search operations.\n\/\/ The provided runtimeConfig can override settings\n\/\/ persisted when the kvstore was created.\nfunc OpenUsing(path string, runtimeConfig map[string]interface{}) (Index, error) {\n\treturn openIndexUsing(path, runtimeConfig)\n}\n<commit_msg>doc: document indexed value\/mappings interactions<commit_after>\/\/ Copyright (c) 2014 Couchbase, Inc.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file\n\/\/ except in compliance with the License. You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing, software distributed under the\n\/\/ License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,\n\/\/ either express or implied. See the License for the specific language governing permissions\n\/\/ and limitations under the License.\n\npackage bleve\n\nimport (\n\t\"github.com\/blevesearch\/bleve\/document\"\n\t\"github.com\/blevesearch\/bleve\/index\"\n\t\"github.com\/blevesearch\/bleve\/index\/store\"\n)\n\n\/\/ A Batch groups together multiple Index and Delete\n\/\/ operations you would like performed at the same\n\/\/ time. The Batch structure is NOT thread-safe.\n\/\/ You should only perform operations on a batch\n\/\/ from a single thread at a time. Once batch\n\/\/ execution has started, you may not modify it.\ntype Batch struct {\n\tindex Index\n\tinternal *index.Batch\n}\n\n\/\/ Index adds the specified index operation to the\n\/\/ batch. NOTE: the bleve Index is not updated\n\/\/ until the batch is executed.\nfunc (b *Batch) Index(id string, data interface{}) error {\n\tdoc := document.NewDocument(id)\n\terr := b.index.Mapping().mapDocument(doc, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tb.internal.Update(doc)\n\treturn nil\n}\n\n\/\/ Delete adds the specified delete operation to the\n\/\/ batch. NOTE: the bleve Index is not updated until\n\/\/ the batch is executed.\nfunc (b *Batch) Delete(id string) {\n\tb.internal.Delete(id)\n}\n\n\/\/ SetInternal adds the specified set internal\n\/\/ operation to the batch. NOTE: the bleve Index is\n\/\/ not updated until the batch is executed.\nfunc (b *Batch) SetInternal(key, val []byte) {\n\tb.internal.SetInternal(key, val)\n}\n\n\/\/ SetInternal adds the specified delete internal\n\/\/ operation to the batch. NOTE: the bleve Index is\n\/\/ not updated until the batch is executed.\nfunc (b *Batch) DeleteInternal(key []byte) {\n\tb.internal.DeleteInternal(key)\n}\n\n\/\/ Size returns the total number of operations inside the batch\n\/\/ including normal index operations and internal operations.\nfunc (b *Batch) Size() int {\n\treturn len(b.internal.IndexOps) + len(b.internal.InternalOps)\n}\n\n\/\/ String prints a user friendly string represenation of what\n\/\/ is inside this batch.\nfunc (b *Batch) String() string {\n\treturn b.internal.String()\n}\n\n\/\/ Reset returns a Batch to the empty state so that it can\n\/\/ be re-used in the future.\nfunc (b *Batch) Reset() {\n\tb.internal.Reset()\n}\n\n\/\/ An Index implements all the indexing and searching\n\/\/ capabilities of bleve. An Index can be created\n\/\/ using the New() and Open() methods.\n\/\/\n\/\/ Index() takes an input value, deduces a DocumentMapping for its type,\n\/\/ assigns string paths to its fields or values then applies field mappings on\n\/\/ them.\n\/\/\n\/\/ If the value is a []byte, the indexer attempts to convert it to something\n\/\/ else using the ByteArrayConverter registered as\n\/\/ IndexMapping.ByteArrayConverter. By default, it interprets the value as a\n\/\/ JSON payload and unmarshals it to map[string]interface{}.\n\/\/\n\/\/ The DocumentMapping used to index a value is deduced by the following rules:\n\/\/ 1) If value implements Classifier interface, resolve the mapping from Type().\n\/\/ 2) If value has a string field or value at IndexMapping.TypeField.\n\/\/ (defaulting to \"_type\"), use it to resolve the mapping. Fields addressing\n\/\/ is described below.\n\/\/ 3) If IndexMapping.DefaultType is registered, return it.\n\/\/ 4) Return IndexMapping.DefaultMapping.\n\/\/\n\/\/ Each field or nested field of the value is identified by a string path, then\n\/\/ mapped to one or several FieldMappings which extract the result for analysis.\n\/\/\n\/\/ Struct values fields are identified by their \"json:\" tag, or by their name.\n\/\/ Nested fields are identified by prefixing with their parent identifier,\n\/\/ separated by a dot.\n\/\/\n\/\/ Map values entries are identified by their string key. Entries not indexed\n\/\/ by strings are ignored. Entry values are identified recursively like struct\n\/\/ fields.\n\/\/\n\/\/ Slice and array values are identified by their field name. Their elements\n\/\/ are processed sequentially with the same FieldMapping.\n\/\/\n\/\/ String, float64 and time.Time values are identified by their field name.\n\/\/ Other types are ignored.\n\/\/\n\/\/ Each value identifier is decomposed in its parts and recursively address\n\/\/ SubDocumentMappings in the tree starting at the root DocumentMapping. If a\n\/\/ mapping is found, all its FieldMappings are applied to the value. If no\n\/\/ mapping is found and the root DocumentMapping is dynamic, default mappings\n\/\/ are used based on value type and IndexMapping default configurations.\n\/\/\n\/\/ Finally, mapped values are analyzed, indexed or stored. Examples:\n\/\/\n\/\/ type Date struct {\n\/\/ Day string `json:\"day\"`\n\/\/ Month string\n\/\/ Year string\n\/\/ }\n\/\/\n\/\/ type Person struct {\n\/\/ FirstName string `json:\"first_name\"`\n\/\/ LastName string\n\/\/ BirthDate Date `json:\"birth_date\"`\n\/\/ }\n\/\/\n\/\/ A Person value FirstName is mapped by the SubDocumentMapping at\n\/\/ \"first_name\". Its LastName is mapped by the one at \"LastName\". The day of\n\/\/ BirthDate is mapped to the SubDocumentMapping \"day\" of the root\n\/\/ SubDocumentMapping \"birth_date\". It will appear as the \"birth_date.day\"\n\/\/ field in the index. The month is mapped to \"birth_date.Month\".\ntype Index interface {\n\t\/\/ Index analyzes, indexes or stores mapped data fields. Supplied\n\t\/\/ identifier is bound to analyzed data and will be retrieved by search\n\t\/\/ requests. See Index interface documentation for details about mapping\n\t\/\/ rules.\n\tIndex(id string, data interface{}) error\n\tDelete(id string) error\n\n\tNewBatch() *Batch\n\tBatch(b *Batch) error\n\n\tDocument(id string) (*document.Document, error)\n\tDocCount() (uint64, error)\n\n\tSearch(req *SearchRequest) (*SearchResult, error)\n\n\tFields() ([]string, error)\n\n\tFieldDict(field string) (index.FieldDict, error)\n\tFieldDictRange(field string, startTerm []byte, endTerm []byte) (index.FieldDict, error)\n\tFieldDictPrefix(field string, termPrefix []byte) (index.FieldDict, error)\n\n\tDumpAll() chan interface{}\n\tDumpDoc(id string) chan interface{}\n\tDumpFields() chan interface{}\n\n\tClose() error\n\n\tMapping() *IndexMapping\n\n\tStats() *IndexStat\n\n\tGetInternal(key []byte) ([]byte, error)\n\tSetInternal(key, val []byte) error\n\tDeleteInternal(key []byte) error\n\n\tAdvanced() (index.Index, store.KVStore, error)\n}\n\n\/\/ A Classifier is an interface describing any object\n\/\/ which knows how to identify its own type.\ntype Classifier interface {\n\tType() string\n}\n\n\/\/ New index at the specified path, must not exist.\n\/\/ The provided mapping will be used for all\n\/\/ Index\/Search operations.\nfunc New(path string, mapping *IndexMapping) (Index, error) {\n\treturn newIndexUsing(path, mapping, Config.DefaultIndexType, Config.DefaultKVStore, nil)\n}\n\n\/\/ NewUsing creates index at the specified path,\n\/\/ which must not already exist.\n\/\/ The provided mapping will be used for all\n\/\/ Index\/Search operations.\n\/\/ The specified index type will be used\n\/\/ The specified kvstore implemenation will be used\n\/\/ and the provided kvconfig will be passed to its\n\/\/ constructor.\nfunc NewUsing(path string, mapping *IndexMapping, indexType string, kvstore string, kvconfig map[string]interface{}) (Index, error) {\n\treturn newIndexUsing(path, mapping, indexType, kvstore, kvconfig)\n}\n\n\/\/ Open index at the specified path, must exist.\n\/\/ The mapping used when it was created will be used for all Index\/Search operations.\nfunc Open(path string) (Index, error) {\n\treturn openIndexUsing(path, nil)\n}\n\n\/\/ OpenUsing opens index at the specified path, must exist.\n\/\/ The mapping used when it was created will be used for all Index\/Search operations.\n\/\/ The provided runtimeConfig can override settings\n\/\/ persisted when the kvstore was created.\nfunc OpenUsing(path string, runtimeConfig map[string]interface{}) (Index, error) {\n\treturn openIndexUsing(path, runtimeConfig)\n}\n<|endoftext|>"} {"text":"<commit_before>package goriak\n\nimport (\n\t\"errors\"\n\n\triak \"github.com\/basho\/riak-go-client\"\n)\n\nfunc KeysInIndex(bucket, bucketType, indexName, indexValue string) ([]string, error) {\n\tresult := []string{}\n\n\tcmd, err := riak.NewSecondaryIndexQueryCommandBuilder().\n\t\tWithBucket(bucket).\n\t\tWithBucketType(bucketType).\n\t\tWithIndexName(indexName).\n\t\tWithIndexKey(indexValue).\n\t\tBuild()\n\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\terr = connect().Execute(cmd)\n\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\tres, ok := cmd.(*riak.SecondaryIndexQueryCommand)\n\n\tif !ok {\n\t\treturn result, errors.New(\"Could not convert\")\n\t}\n\n\tif !res.Success() {\n\t\treturn result, errors.New(\"Not successful\")\n\t}\n\n\tresult = make([]string, len(res.Response.Results))\n\n\tfor i, v := range res.Response.Results {\n\t\tresult[i] = string(v.ObjectKey)\n\t}\n\n\treturn result, nil\n}\n<commit_msg>index: Limit parameter<commit_after>package goriak\n\nimport (\n\t\"errors\"\n\n\triak \"github.com\/basho\/riak-go-client\"\n)\n\nfunc KeysInIndex(bucket, bucketType, indexName, indexValue string, limit uint32) ([]string, error) {\n\tresult := []string{}\n\n\tcmd, err := riak.NewSecondaryIndexQueryCommandBuilder().\n\t\tWithBucket(bucket).\n\t\tWithBucketType(bucketType).\n\t\tWithIndexName(indexName).\n\t\tWithIndexKey(indexValue).\n\t\tWithMaxResults(limit).\n\t\tBuild()\n\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\terr = connect().Execute(cmd)\n\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\tres, ok := cmd.(*riak.SecondaryIndexQueryCommand)\n\n\tif !ok {\n\t\treturn result, errors.New(\"Could not convert\")\n\t}\n\n\tif !res.Success() {\n\t\treturn result, errors.New(\"Not successful\")\n\t}\n\n\tresult = make([]string, len(res.Response.Results))\n\n\tfor i, v := range res.Response.Results {\n\t\tresult[i] = string(v.ObjectKey)\n\t}\n\n\treturn result, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2015, Daniel Martí <mvdan@mvdan.cc>\n\/\/ See LICENSE for licensing information\n\npackage fdroidcl\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/mvdan\/adb\"\n)\n\ntype Index struct {\n\tRepo Repo `xml:\"repo\"`\n\tApps []App `xml:\"application\"`\n}\n\ntype Repo struct {\n\tName string `xml:\"name,attr\"`\n\tPubKey string `xml:\"pubkey,attr\"`\n\tTimestamp int `xml:\"timestamp,attr\"`\n\tURL string `xml:\"url,attr\"`\n\tVersion int `xml:\"version,attr\"`\n\tMaxAge int `xml:\"maxage,attr\"`\n\tDescription string `xml:\"description\"`\n}\n\n\/\/ App is an Android application\ntype App struct {\n\tID string `xml:\"id\"`\n\tName string `xml:\"name\"`\n\tSummary string `xml:\"summary\"`\n\tAdded DateVal `xml:\"added\"`\n\tUpdated DateVal `xml:\"lastupdated\"`\n\tIcon string `xml:\"icon\"`\n\tDesc string `xml:\"desc\"`\n\tLicense string `xml:\"license\"`\n\tCategs CommaList `xml:\"categories\"`\n\tWebsite string `xml:\"web\"`\n\tSource string `xml:\"source\"`\n\tTracker string `xml:\"tracker\"`\n\tChangelog string `xml:\"changelog\"`\n\tDonate string `xml:\"donate\"`\n\tBitcoin string `xml:\"bitcoin\"`\n\tLitecoin string `xml:\"litecoin\"`\n\tFlattrID string `xml:\"flattr\"`\n\tApks []Apk `xml:\"package\"`\n\tCVName string `xml:\"marketversion\"`\n\tCVCode int `xml:\"marketvercode\"`\n}\n\ntype IconDensity uint\n\nconst (\n\tUnknownDensity IconDensity = 0\n\tLowDensity IconDensity = 120\n\tMediumDensity IconDensity = 160\n\tHighDensity IconDensity = 240\n\tXHighDensity IconDensity = 320\n\tXXHighDensity IconDensity = 480\n\tXXXHighDensity IconDensity = 640\n)\n\nfunc getIconsDir(density IconDensity) string {\n\tif density == UnknownDensity {\n\t\treturn \"icons\"\n\t}\n\tfor _, d := range [...]IconDensity{\n\t\tXXXHighDensity,\n\t\tXXHighDensity,\n\t\tXHighDensity,\n\t\tHighDensity,\n\t\tMediumDensity,\n\t} {\n\t\tif density >= d {\n\t\t\treturn fmt.Sprintf(\"icons-%d\", d)\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"icons-%d\", LowDensity)\n}\n\nfunc (a *App) IconURLForDensity(density IconDensity) string {\n\tif len(a.Apks) == 0 {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%s\/%s\/%s\", a.Apks[0].Repo.URL,\n\t\tgetIconsDir(density), a.Icon)\n}\n\nfunc (a *App) IconURL() string {\n\treturn a.IconURLForDensity(UnknownDensity)\n}\n\nfunc (a *App) TextDesc(w io.Writer) {\n\treader := strings.NewReader(a.Desc)\n\tdecoder := xml.NewDecoder(reader)\n\tfirstParagraph := true\n\tlinePrefix := \"\"\n\tcolsUsed := 0\n\tvar links []string\n\tlinked := false\n\tfor {\n\t\ttoken, err := decoder.Token()\n\t\tif err == io.EOF || token == nil {\n\t\t\tbreak\n\t\t}\n\t\tswitch t := token.(type) {\n\t\tcase xml.StartElement:\n\t\t\tswitch t.Name.Local {\n\t\t\tcase \"p\":\n\t\t\t\tif firstParagraph {\n\t\t\t\t\tfirstParagraph = false\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Fprintln(w)\n\t\t\t\t}\n\t\t\t\tlinePrefix = \"\"\n\t\t\t\tcolsUsed = 0\n\t\t\tcase \"li\":\n\t\t\t\tfmt.Fprint(w, \"\\n *\")\n\t\t\t\tlinePrefix = \" \"\n\t\t\t\tcolsUsed = 0\n\t\t\tcase \"a\":\n\t\t\t\tfor _, attr := range t.Attr {\n\t\t\t\t\tif attr.Name.Local == \"href\" {\n\t\t\t\t\t\tlinks = append(links, attr.Value)\n\t\t\t\t\t\tlinked = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase xml.EndElement:\n\t\t\tswitch t.Name.Local {\n\t\t\tcase \"p\":\n\t\t\t\tfmt.Fprintln(w)\n\t\t\tcase \"ul\":\n\t\t\t\tfmt.Fprintln(w)\n\t\t\tcase \"ol\":\n\t\t\t\tfmt.Fprintln(w)\n\t\t\t}\n\t\tcase xml.CharData:\n\t\t\tleft := string(t)\n\t\t\tif linked {\n\t\t\t\tleft += fmt.Sprintf(\"[%d]\", len(links)-1)\n\t\t\t\tlinked = false\n\t\t\t}\n\t\t\tlimit := 80 - len(linePrefix) - colsUsed\n\t\t\tfirstLine := true\n\t\t\tfor len(left) > limit {\n\t\t\t\tlast := 0\n\t\t\t\tfor i, c := range left {\n\t\t\t\t\tif i >= limit {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tif c == ' ' {\n\t\t\t\t\t\tlast = i\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif firstLine {\n\t\t\t\t\tfirstLine = false\n\t\t\t\t\tlimit += colsUsed\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Fprint(w, linePrefix)\n\t\t\t\t}\n\t\t\t\tfmt.Fprintln(w, left[:last])\n\t\t\t\tleft = left[last+1:]\n\t\t\t\tcolsUsed = 0\n\t\t\t}\n\t\t\tif !firstLine {\n\t\t\t\tfmt.Fprint(w, linePrefix)\n\t\t\t}\n\t\t\tfmt.Fprint(w, left)\n\t\t\tcolsUsed += len(left)\n\t\t}\n\t}\n\tif len(links) > 0 {\n\t\tfmt.Fprintln(w)\n\t\tfor i, link := range links {\n\t\t\tfmt.Fprintf(w, \"[%d] %s\\n\", i, link)\n\t\t}\n\t}\n}\n\n\/\/ Apk is an Android package\ntype Apk struct {\n\tVName string `xml:\"version\"`\n\tVCode int `xml:\"versioncode\"`\n\tSize int64 `xml:\"size\"`\n\tMinSdk int `xml:\"sdkver\"`\n\tMaxSdk int `xml:\"maxsdkver\"`\n\tABIs CommaList `xml:\"nativecode\"`\n\tApkName string `xml:\"apkname\"`\n\tSrcName string `xml:\"srcname\"`\n\tSig HexVal `xml:\"sig\"`\n\tAdded DateVal `xml:\"added\"`\n\tPerms CommaList `xml:\"permissions\"`\n\tFeats CommaList `xml:\"features\"`\n\tHash HexHash `xml:\"hash\"`\n\n\tApp *App `xml:\"-\"`\n\tRepo *Repo `xml:\"-\"`\n}\n\nfunc (a *Apk) URL() string {\n\treturn fmt.Sprintf(\"%s\/%s\", a.Repo.URL, a.ApkName)\n}\n\nfunc (a *Apk) SrcURL() string {\n\treturn fmt.Sprintf(\"%s\/%s\", a.Repo.URL, a.SrcName)\n}\n\nfunc (apk *Apk) IsCompatibleABI(ABIs []string) bool {\n\tif len(apk.ABIs) == 0 {\n\t\treturn true \/\/ APK does not contain native code\n\t}\n\tfor i := range apk.ABIs {\n\t\tfor j := range ABIs {\n\t\t\tif apk.ABIs[i] == ABIs[j] {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (apk *Apk) IsCompatibleAPILevel(sdk int) bool {\n\treturn sdk >= apk.MinSdk && (apk.MaxSdk == 0 || sdk <= apk.MaxSdk)\n}\n\nfunc (apk *Apk) IsCompatible(device *adb.Device) bool {\n\tif device == nil {\n\t\treturn true\n\t}\n\treturn apk.IsCompatibleABI(device.ABIs) &&\n\t\tapk.IsCompatibleAPILevel(device.APILevel)\n}\n\ntype AppList []App\n\nfunc (al AppList) Len() int { return len(al) }\nfunc (al AppList) Swap(i, j int) { al[i], al[j] = al[j], al[i] }\nfunc (al AppList) Less(i, j int) bool { return al[i].ID < al[j].ID }\n\ntype ApkList []Apk\n\nfunc (al ApkList) Len() int { return len(al) }\nfunc (al ApkList) Swap(i, j int) { al[i], al[j] = al[j], al[i] }\nfunc (al ApkList) Less(i, j int) bool { return al[i].VCode > al[j].VCode }\n\nfunc LoadIndexXML(r io.Reader) (*Index, error) {\n\tvar index Index\n\tdecoder := xml.NewDecoder(r)\n\tif err := decoder.Decode(&index); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsort.Sort(AppList(index.Apps))\n\n\tfor i := range index.Apps {\n\t\tapp := &index.Apps[i]\n\t\tsort.Sort(ApkList(app.Apks))\n\t\tfor j := range app.Apks {\n\t\t\tapk := &app.Apks[j]\n\t\t\tapk.App = app\n\t\t\tapk.Repo = &index.Repo\n\t\t}\n\t}\n\treturn &index, nil\n}\n\nfunc (a *App) SuggestedApk(device *adb.Device) *Apk {\n\tfor i := range a.Apks {\n\t\tapk := &a.Apks[i]\n\t\tif a.CVCode >= apk.VCode && apk.IsCompatible(device) {\n\t\t\treturn apk\n\t\t}\n\t}\n\t\/\/ fall back to the first compatible apk\n\tfor i := range a.Apks {\n\t\tapk := &a.Apks[i]\n\t\tif apk.IsCompatible(device) {\n\t\t\treturn apk\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>index: simplify IsCompatibleABI<commit_after>\/\/ Copyright (c) 2015, Daniel Martí <mvdan@mvdan.cc>\n\/\/ See LICENSE for licensing information\n\npackage fdroidcl\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/mvdan\/adb\"\n)\n\ntype Index struct {\n\tRepo Repo `xml:\"repo\"`\n\tApps []App `xml:\"application\"`\n}\n\ntype Repo struct {\n\tName string `xml:\"name,attr\"`\n\tPubKey string `xml:\"pubkey,attr\"`\n\tTimestamp int `xml:\"timestamp,attr\"`\n\tURL string `xml:\"url,attr\"`\n\tVersion int `xml:\"version,attr\"`\n\tMaxAge int `xml:\"maxage,attr\"`\n\tDescription string `xml:\"description\"`\n}\n\n\/\/ App is an Android application\ntype App struct {\n\tID string `xml:\"id\"`\n\tName string `xml:\"name\"`\n\tSummary string `xml:\"summary\"`\n\tAdded DateVal `xml:\"added\"`\n\tUpdated DateVal `xml:\"lastupdated\"`\n\tIcon string `xml:\"icon\"`\n\tDesc string `xml:\"desc\"`\n\tLicense string `xml:\"license\"`\n\tCategs CommaList `xml:\"categories\"`\n\tWebsite string `xml:\"web\"`\n\tSource string `xml:\"source\"`\n\tTracker string `xml:\"tracker\"`\n\tChangelog string `xml:\"changelog\"`\n\tDonate string `xml:\"donate\"`\n\tBitcoin string `xml:\"bitcoin\"`\n\tLitecoin string `xml:\"litecoin\"`\n\tFlattrID string `xml:\"flattr\"`\n\tApks []Apk `xml:\"package\"`\n\tCVName string `xml:\"marketversion\"`\n\tCVCode int `xml:\"marketvercode\"`\n}\n\ntype IconDensity uint\n\nconst (\n\tUnknownDensity IconDensity = 0\n\tLowDensity IconDensity = 120\n\tMediumDensity IconDensity = 160\n\tHighDensity IconDensity = 240\n\tXHighDensity IconDensity = 320\n\tXXHighDensity IconDensity = 480\n\tXXXHighDensity IconDensity = 640\n)\n\nfunc getIconsDir(density IconDensity) string {\n\tif density == UnknownDensity {\n\t\treturn \"icons\"\n\t}\n\tfor _, d := range [...]IconDensity{\n\t\tXXXHighDensity,\n\t\tXXHighDensity,\n\t\tXHighDensity,\n\t\tHighDensity,\n\t\tMediumDensity,\n\t} {\n\t\tif density >= d {\n\t\t\treturn fmt.Sprintf(\"icons-%d\", d)\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"icons-%d\", LowDensity)\n}\n\nfunc (a *App) IconURLForDensity(density IconDensity) string {\n\tif len(a.Apks) == 0 {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%s\/%s\/%s\", a.Apks[0].Repo.URL,\n\t\tgetIconsDir(density), a.Icon)\n}\n\nfunc (a *App) IconURL() string {\n\treturn a.IconURLForDensity(UnknownDensity)\n}\n\nfunc (a *App) TextDesc(w io.Writer) {\n\treader := strings.NewReader(a.Desc)\n\tdecoder := xml.NewDecoder(reader)\n\tfirstParagraph := true\n\tlinePrefix := \"\"\n\tcolsUsed := 0\n\tvar links []string\n\tlinked := false\n\tfor {\n\t\ttoken, err := decoder.Token()\n\t\tif err == io.EOF || token == nil {\n\t\t\tbreak\n\t\t}\n\t\tswitch t := token.(type) {\n\t\tcase xml.StartElement:\n\t\t\tswitch t.Name.Local {\n\t\t\tcase \"p\":\n\t\t\t\tif firstParagraph {\n\t\t\t\t\tfirstParagraph = false\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Fprintln(w)\n\t\t\t\t}\n\t\t\t\tlinePrefix = \"\"\n\t\t\t\tcolsUsed = 0\n\t\t\tcase \"li\":\n\t\t\t\tfmt.Fprint(w, \"\\n *\")\n\t\t\t\tlinePrefix = \" \"\n\t\t\t\tcolsUsed = 0\n\t\t\tcase \"a\":\n\t\t\t\tfor _, attr := range t.Attr {\n\t\t\t\t\tif attr.Name.Local == \"href\" {\n\t\t\t\t\t\tlinks = append(links, attr.Value)\n\t\t\t\t\t\tlinked = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase xml.EndElement:\n\t\t\tswitch t.Name.Local {\n\t\t\tcase \"p\":\n\t\t\t\tfmt.Fprintln(w)\n\t\t\tcase \"ul\":\n\t\t\t\tfmt.Fprintln(w)\n\t\t\tcase \"ol\":\n\t\t\t\tfmt.Fprintln(w)\n\t\t\t}\n\t\tcase xml.CharData:\n\t\t\tleft := string(t)\n\t\t\tif linked {\n\t\t\t\tleft += fmt.Sprintf(\"[%d]\", len(links)-1)\n\t\t\t\tlinked = false\n\t\t\t}\n\t\t\tlimit := 80 - len(linePrefix) - colsUsed\n\t\t\tfirstLine := true\n\t\t\tfor len(left) > limit {\n\t\t\t\tlast := 0\n\t\t\t\tfor i, c := range left {\n\t\t\t\t\tif i >= limit {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tif c == ' ' {\n\t\t\t\t\t\tlast = i\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif firstLine {\n\t\t\t\t\tfirstLine = false\n\t\t\t\t\tlimit += colsUsed\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Fprint(w, linePrefix)\n\t\t\t\t}\n\t\t\t\tfmt.Fprintln(w, left[:last])\n\t\t\t\tleft = left[last+1:]\n\t\t\t\tcolsUsed = 0\n\t\t\t}\n\t\t\tif !firstLine {\n\t\t\t\tfmt.Fprint(w, linePrefix)\n\t\t\t}\n\t\t\tfmt.Fprint(w, left)\n\t\t\tcolsUsed += len(left)\n\t\t}\n\t}\n\tif len(links) > 0 {\n\t\tfmt.Fprintln(w)\n\t\tfor i, link := range links {\n\t\t\tfmt.Fprintf(w, \"[%d] %s\\n\", i, link)\n\t\t}\n\t}\n}\n\n\/\/ Apk is an Android package\ntype Apk struct {\n\tVName string `xml:\"version\"`\n\tVCode int `xml:\"versioncode\"`\n\tSize int64 `xml:\"size\"`\n\tMinSdk int `xml:\"sdkver\"`\n\tMaxSdk int `xml:\"maxsdkver\"`\n\tABIs CommaList `xml:\"nativecode\"`\n\tApkName string `xml:\"apkname\"`\n\tSrcName string `xml:\"srcname\"`\n\tSig HexVal `xml:\"sig\"`\n\tAdded DateVal `xml:\"added\"`\n\tPerms CommaList `xml:\"permissions\"`\n\tFeats CommaList `xml:\"features\"`\n\tHash HexHash `xml:\"hash\"`\n\n\tApp *App `xml:\"-\"`\n\tRepo *Repo `xml:\"-\"`\n}\n\nfunc (a *Apk) URL() string {\n\treturn fmt.Sprintf(\"%s\/%s\", a.Repo.URL, a.ApkName)\n}\n\nfunc (a *Apk) SrcURL() string {\n\treturn fmt.Sprintf(\"%s\/%s\", a.Repo.URL, a.SrcName)\n}\n\nfunc (apk *Apk) IsCompatibleABI(ABIs []string) bool {\n\tif len(apk.ABIs) == 0 {\n\t\treturn true \/\/ APK does not contain native code\n\t}\n\tfor _, apkABI := range apk.ABIs {\n\t\tfor _, abi := range ABIs {\n\t\t\tif apkABI == abi {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (apk *Apk) IsCompatibleAPILevel(sdk int) bool {\n\treturn sdk >= apk.MinSdk && (apk.MaxSdk == 0 || sdk <= apk.MaxSdk)\n}\n\nfunc (apk *Apk) IsCompatible(device *adb.Device) bool {\n\tif device == nil {\n\t\treturn true\n\t}\n\treturn apk.IsCompatibleABI(device.ABIs) &&\n\t\tapk.IsCompatibleAPILevel(device.APILevel)\n}\n\ntype AppList []App\n\nfunc (al AppList) Len() int { return len(al) }\nfunc (al AppList) Swap(i, j int) { al[i], al[j] = al[j], al[i] }\nfunc (al AppList) Less(i, j int) bool { return al[i].ID < al[j].ID }\n\ntype ApkList []Apk\n\nfunc (al ApkList) Len() int { return len(al) }\nfunc (al ApkList) Swap(i, j int) { al[i], al[j] = al[j], al[i] }\nfunc (al ApkList) Less(i, j int) bool { return al[i].VCode > al[j].VCode }\n\nfunc LoadIndexXML(r io.Reader) (*Index, error) {\n\tvar index Index\n\tdecoder := xml.NewDecoder(r)\n\tif err := decoder.Decode(&index); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsort.Sort(AppList(index.Apps))\n\n\tfor i := range index.Apps {\n\t\tapp := &index.Apps[i]\n\t\tsort.Sort(ApkList(app.Apks))\n\t\tfor j := range app.Apks {\n\t\t\tapk := &app.Apks[j]\n\t\t\tapk.App = app\n\t\t\tapk.Repo = &index.Repo\n\t\t}\n\t}\n\treturn &index, nil\n}\n\nfunc (a *App) SuggestedApk(device *adb.Device) *Apk {\n\tfor i := range a.Apks {\n\t\tapk := &a.Apks[i]\n\t\tif a.CVCode >= apk.VCode && apk.IsCompatible(device) {\n\t\t\treturn apk\n\t\t}\n\t}\n\t\/\/ fall back to the first compatible apk\n\tfor i := range a.Apks {\n\t\tapk := &a.Apks[i]\n\t\tif apk.IsCompatible(device) {\n\t\t\treturn apk\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !windows\n\n\/\/ Licensed under the Apache License, Version 2.0; See LICENSE.APACHE\n\npackage symlink\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n)\n\n\/\/ TODO Windows: This needs some serious work to port to Windows. For now,\n\/\/ turning off testing in this package.\n\ntype dirOrLink struct {\n\tpath string\n\ttarget string\n}\n\nfunc makeFs(tmpdir string, fs []dirOrLink) error {\n\tfor _, s := range fs {\n\t\ts.path = filepath.Join(tmpdir, s.path)\n\t\tif s.target == \"\" {\n\t\t\t_ = os.MkdirAll(s.path, 0755)\n\t\t\tcontinue\n\t\t}\n\t\tif err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc testSymlink(tmpdir, path, expected, scope string) error {\n\trewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope))\n\tif err != nil {\n\t\treturn err\n\t}\n\texpected, err = filepath.Abs(filepath.Join(tmpdir, expected))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif expected != rewrite {\n\t\treturn fmt.Errorf(\"expected %q got %q\", expected, rewrite)\n\t}\n\treturn nil\n}\n\nfunc TestFollowSymlinkAbsolute(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"testdata\/fs\/a\/d\", target: \"\/b\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/a\/d\/c\/data\", \"testdata\/b\/c\/data\", \"testdata\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkRelativePath(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"testdata\/fs\/i\", target: \"a\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/i\", \"testdata\/fs\/a\", \"testdata\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\n\tif err := makeFs(tmpdir, []dirOrLink{\n\t\t{path: \"linkdir\", target: \"realdir\"},\n\t\t{path: \"linkdir\/foo\/bar\"},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"linkdir\/foo\/bar\", \"linkdir\/foo\/bar\", \"linkdir\/foo\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkInvalidScopePathPair(t *testing.T) {\n\tif _, err := FollowSymlinkInScope(\"toto\", \"testdata\"); err == nil {\n\t\tt.Fatal(\"expected an error\")\n\t}\n}\n\nfunc TestFollowSymlinkLastLink(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"testdata\/fs\/a\/d\", target: \"\/b\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/a\/d\", \"testdata\/b\", \"testdata\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"testdata\/fs\/a\/e\", target: \"..\/b\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/a\/e\/c\/data\", \"testdata\/fs\/b\/c\/data\", \"testdata\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ avoid letting allowing symlink e lead us to ..\/b\n\t\/\/ normalize to the \"testdata\/fs\/a\"\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/a\/e\", \"testdata\/fs\/a\/b\", \"testdata\/fs\/a\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"testdata\/fs\/a\/f\", target: \"..\/..\/..\/..\/test\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ avoid letting symlink f lead us out of the \"testdata\" scope\n\t\/\/ we don't normalize because symlink f is in scope and there is no\n\t\/\/ information leak\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/a\/f\", \"testdata\/test\", \"testdata\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ avoid letting symlink f lead us out of the \"testdata\/fs\" scope\n\t\/\/ we don't normalize because symlink f is in scope and there is no\n\t\/\/ information leak\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/a\/f\", \"testdata\/fs\/test\", \"testdata\/fs\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkRelativeLinkChain(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\n\t\/\/ avoid letting symlink g (pointed at by symlink h) take out of scope\n\t\/\/ TODO: we should probably normalize to scope here because ..\/[....]\/root\n\t\/\/ is out of scope and we leak information\n\tif err := makeFs(tmpdir, []dirOrLink{\n\t\t{path: \"testdata\/fs\/b\/h\", target: \"..\/g\"},\n\t\t{path: \"testdata\/fs\/g\", target: \"..\/..\/..\/..\/..\/..\/..\/..\/..\/..\/..\/..\/root\"},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/b\/h\", \"testdata\/root\", \"testdata\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkBreakoutPath(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\n\t\/\/ avoid letting symlink -> ..\/directory\/file escape from scope\n\t\/\/ normalize to \"testdata\/fs\/j\"\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"testdata\/fs\/j\/k\", target: \"..\/i\/a\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/j\/k\", \"testdata\/fs\/j\/i\/a\", \"testdata\/fs\/j\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkToRoot(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\n\t\/\/ make sure we don't allow escaping to \/\n\t\/\/ normalize to dir\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"foo\", target: \"\/\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"foo\", \"\", \"\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkSlashDotdot(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\ttmpdir = filepath.Join(tmpdir, \"dir\", \"subdir\")\n\n\t\/\/ make sure we don't allow escaping to \/\n\t\/\/ normalize to dir\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"foo\", target: \"\/..\/..\/\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"foo\", \"\", \"\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkDotdot(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\ttmpdir = filepath.Join(tmpdir, \"dir\", \"subdir\")\n\n\t\/\/ make sure we stay in scope without leaking information\n\t\/\/ this also checks for escaping to \/\n\t\/\/ normalize to dir\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"foo\", target: \"..\/..\/\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"foo\", \"\", \"\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkRelativePath2(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"bar\/foo\", target: \"baz\/target\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"bar\/foo\", \"bar\/baz\/target\", \"\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkScopeLink(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\n\tif err := makeFs(tmpdir, []dirOrLink{\n\t\t{path: \"root2\"},\n\t\t{path: \"root\", target: \"root2\"},\n\t\t{path: \"root2\/foo\", target: \"..\/bar\"},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"root\/foo\", \"root\/bar\", \"root\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkRootScope(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\n\texpected, err := filepath.EvalSymlinks(tmpdir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trewrite, err := FollowSymlinkInScope(tmpdir, \"\/\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif rewrite != expected {\n\t\tt.Fatalf(\"expected %q got %q\", expected, rewrite)\n\t}\n}\n\nfunc TestFollowSymlinkEmpty(t *testing.T) {\n\tres, err := FollowSymlinkInScope(\"\", \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif res != wd {\n\t\tt.Fatalf(\"expected %q got %q\", wd, res)\n\t}\n}\n\nfunc TestFollowSymlinkCircular(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"root\/foo\", target: \"foo\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"root\/foo\", \"\", \"root\"); err == nil {\n\t\tt.Fatal(\"expected an error for foo -> foo\")\n\t}\n\n\tif err := makeFs(tmpdir, []dirOrLink{\n\t\t{path: \"root\/bar\", target: \"baz\"},\n\t\t{path: \"root\/baz\", target: \"..\/bak\"},\n\t\t{path: \"root\/bak\", target: \"\/bar\"},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"root\/foo\", \"\", \"root\"); err == nil {\n\t\tt.Fatal(\"expected an error for bar -> baz -> bak -> bar\")\n\t}\n}\n\nfunc TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\n\tif err := makeFs(tmpdir, []dirOrLink{\n\t\t{path: \"root2\"},\n\t\t{path: \"root\", target: \"root2\"},\n\t\t{path: \"root\/a\", target: \"r\/s\"},\n\t\t{path: \"root\/r\", target: \"..\/root\/t\"},\n\t\t{path: \"root\/root\/t\/s\/b\", target: \"\/..\/u\"},\n\t\t{path: \"root\/u\/c\", target: \".\"},\n\t\t{path: \"root\/u\/x\/y\", target: \"..\/v\"},\n\t\t{path: \"root\/u\/v\", target: \"\/..\/w\"},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"root\/a\/b\/c\/x\/y\/z\", \"root\/w\/z\", \"root\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkBreakoutNonExistent(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\n\tif err := makeFs(tmpdir, []dirOrLink{\n\t\t{path: \"root\/slash\", target: \"\/\"},\n\t\t{path: \"root\/sym\", target: \"\/idontexist\/..\/slash\"},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"root\/sym\/file\", \"root\/file\", \"root\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkNoLexicalCleaning(t *testing.T) {\n\ttmpdir := mkTempDir(t)\n\n\tif err := makeFs(tmpdir, []dirOrLink{\n\t\t{path: \"root\/sym\", target: \"\/foo\/bar\"},\n\t\t{path: \"root\/hello\", target: \"\/sym\/..\/baz\"},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"root\/hello\", \"root\/foo\/baz\", \"root\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\n\/\/ TODO use testing.TempDir() instead (https:\/\/golang.org\/pkg\/testing\/#T.TempDir)\n\/\/ once we no longer test on Go 1.14 and older.\nfunc mkTempDir(t *testing.T) string {\n\tt.Helper()\n\ttmpdir, err := ioutil.TempDir(\"\", t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Cleanup(func() { _ = os.RemoveAll(tmpdir) })\n\treturn tmpdir\n}\n<commit_msg>symlink: use t.TempDir in tests<commit_after>\/\/go:build !windows\n\/\/ +build !windows\n\n\/\/ Licensed under the Apache License, Version 2.0; See LICENSE.APACHE\n\npackage symlink\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n)\n\n\/\/ TODO Windows: This needs some serious work to port to Windows. For now,\n\/\/ turning off testing in this package.\n\ntype dirOrLink struct {\n\tpath string\n\ttarget string\n}\n\nfunc makeFs(tmpdir string, fs []dirOrLink) error {\n\tfor _, s := range fs {\n\t\ts.path = filepath.Join(tmpdir, s.path)\n\t\tif s.target == \"\" {\n\t\t\t_ = os.MkdirAll(s.path, 0755)\n\t\t\tcontinue\n\t\t}\n\t\tif err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc testSymlink(tmpdir, path, expected, scope string) error {\n\trewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope))\n\tif err != nil {\n\t\treturn err\n\t}\n\texpected, err = filepath.Abs(filepath.Join(tmpdir, expected))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif expected != rewrite {\n\t\treturn fmt.Errorf(\"expected %q got %q\", expected, rewrite)\n\t}\n\treturn nil\n}\n\nfunc TestFollowSymlinkAbsolute(t *testing.T) {\n\ttmpdir := t.TempDir()\n\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"testdata\/fs\/a\/d\", target: \"\/b\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/a\/d\/c\/data\", \"testdata\/b\/c\/data\", \"testdata\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkRelativePath(t *testing.T) {\n\ttmpdir := t.TempDir()\n\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"testdata\/fs\/i\", target: \"a\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/i\", \"testdata\/fs\/a\", \"testdata\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) {\n\ttmpdir := t.TempDir()\n\n\tif err := makeFs(tmpdir, []dirOrLink{\n\t\t{path: \"linkdir\", target: \"realdir\"},\n\t\t{path: \"linkdir\/foo\/bar\"},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"linkdir\/foo\/bar\", \"linkdir\/foo\/bar\", \"linkdir\/foo\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkInvalidScopePathPair(t *testing.T) {\n\tif _, err := FollowSymlinkInScope(\"toto\", \"testdata\"); err == nil {\n\t\tt.Fatal(\"expected an error\")\n\t}\n}\n\nfunc TestFollowSymlinkLastLink(t *testing.T) {\n\ttmpdir := t.TempDir()\n\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"testdata\/fs\/a\/d\", target: \"\/b\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/a\/d\", \"testdata\/b\", \"testdata\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) {\n\ttmpdir := t.TempDir()\n\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"testdata\/fs\/a\/e\", target: \"..\/b\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/a\/e\/c\/data\", \"testdata\/fs\/b\/c\/data\", \"testdata\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ avoid letting allowing symlink e lead us to ..\/b\n\t\/\/ normalize to the \"testdata\/fs\/a\"\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/a\/e\", \"testdata\/fs\/a\/b\", \"testdata\/fs\/a\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) {\n\ttmpdir := t.TempDir()\n\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"testdata\/fs\/a\/f\", target: \"..\/..\/..\/..\/test\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ avoid letting symlink f lead us out of the \"testdata\" scope\n\t\/\/ we don't normalize because symlink f is in scope and there is no\n\t\/\/ information leak\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/a\/f\", \"testdata\/test\", \"testdata\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ avoid letting symlink f lead us out of the \"testdata\/fs\" scope\n\t\/\/ we don't normalize because symlink f is in scope and there is no\n\t\/\/ information leak\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/a\/f\", \"testdata\/fs\/test\", \"testdata\/fs\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkRelativeLinkChain(t *testing.T) {\n\ttmpdir := t.TempDir()\n\n\t\/\/ avoid letting symlink g (pointed at by symlink h) take out of scope\n\t\/\/ TODO: we should probably normalize to scope here because ..\/[....]\/root\n\t\/\/ is out of scope and we leak information\n\tif err := makeFs(tmpdir, []dirOrLink{\n\t\t{path: \"testdata\/fs\/b\/h\", target: \"..\/g\"},\n\t\t{path: \"testdata\/fs\/g\", target: \"..\/..\/..\/..\/..\/..\/..\/..\/..\/..\/..\/..\/root\"},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/b\/h\", \"testdata\/root\", \"testdata\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkBreakoutPath(t *testing.T) {\n\ttmpdir := t.TempDir()\n\n\t\/\/ avoid letting symlink -> ..\/directory\/file escape from scope\n\t\/\/ normalize to \"testdata\/fs\/j\"\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"testdata\/fs\/j\/k\", target: \"..\/i\/a\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"testdata\/fs\/j\/k\", \"testdata\/fs\/j\/i\/a\", \"testdata\/fs\/j\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkToRoot(t *testing.T) {\n\ttmpdir := t.TempDir()\n\n\t\/\/ make sure we don't allow escaping to \/\n\t\/\/ normalize to dir\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"foo\", target: \"\/\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"foo\", \"\", \"\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkSlashDotdot(t *testing.T) {\n\ttmpdir := t.TempDir()\n\ttmpdir = filepath.Join(tmpdir, \"dir\", \"subdir\")\n\n\t\/\/ make sure we don't allow escaping to \/\n\t\/\/ normalize to dir\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"foo\", target: \"\/..\/..\/\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"foo\", \"\", \"\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkDotdot(t *testing.T) {\n\ttmpdir := t.TempDir()\n\ttmpdir = filepath.Join(tmpdir, \"dir\", \"subdir\")\n\n\t\/\/ make sure we stay in scope without leaking information\n\t\/\/ this also checks for escaping to \/\n\t\/\/ normalize to dir\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"foo\", target: \"..\/..\/\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"foo\", \"\", \"\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkRelativePath2(t *testing.T) {\n\ttmpdir := t.TempDir()\n\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"bar\/foo\", target: \"baz\/target\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"bar\/foo\", \"bar\/baz\/target\", \"\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkScopeLink(t *testing.T) {\n\ttmpdir := t.TempDir()\n\n\tif err := makeFs(tmpdir, []dirOrLink{\n\t\t{path: \"root2\"},\n\t\t{path: \"root\", target: \"root2\"},\n\t\t{path: \"root2\/foo\", target: \"..\/bar\"},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"root\/foo\", \"root\/bar\", \"root\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkRootScope(t *testing.T) {\n\ttmpdir := t.TempDir()\n\n\texpected, err := filepath.EvalSymlinks(tmpdir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trewrite, err := FollowSymlinkInScope(tmpdir, \"\/\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif rewrite != expected {\n\t\tt.Fatalf(\"expected %q got %q\", expected, rewrite)\n\t}\n}\n\nfunc TestFollowSymlinkEmpty(t *testing.T) {\n\tres, err := FollowSymlinkInScope(\"\", \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif res != wd {\n\t\tt.Fatalf(\"expected %q got %q\", wd, res)\n\t}\n}\n\nfunc TestFollowSymlinkCircular(t *testing.T) {\n\ttmpdir := t.TempDir()\n\n\tif err := makeFs(tmpdir, []dirOrLink{{path: \"root\/foo\", target: \"foo\"}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"root\/foo\", \"\", \"root\"); err == nil {\n\t\tt.Fatal(\"expected an error for foo -> foo\")\n\t}\n\n\tif err := makeFs(tmpdir, []dirOrLink{\n\t\t{path: \"root\/bar\", target: \"baz\"},\n\t\t{path: \"root\/baz\", target: \"..\/bak\"},\n\t\t{path: \"root\/bak\", target: \"\/bar\"},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"root\/foo\", \"\", \"root\"); err == nil {\n\t\tt.Fatal(\"expected an error for bar -> baz -> bak -> bar\")\n\t}\n}\n\nfunc TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) {\n\ttmpdir := t.TempDir()\n\n\tif err := makeFs(tmpdir, []dirOrLink{\n\t\t{path: \"root2\"},\n\t\t{path: \"root\", target: \"root2\"},\n\t\t{path: \"root\/a\", target: \"r\/s\"},\n\t\t{path: \"root\/r\", target: \"..\/root\/t\"},\n\t\t{path: \"root\/root\/t\/s\/b\", target: \"\/..\/u\"},\n\t\t{path: \"root\/u\/c\", target: \".\"},\n\t\t{path: \"root\/u\/x\/y\", target: \"..\/v\"},\n\t\t{path: \"root\/u\/v\", target: \"\/..\/w\"},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"root\/a\/b\/c\/x\/y\/z\", \"root\/w\/z\", \"root\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkBreakoutNonExistent(t *testing.T) {\n\ttmpdir := t.TempDir()\n\n\tif err := makeFs(tmpdir, []dirOrLink{\n\t\t{path: \"root\/slash\", target: \"\/\"},\n\t\t{path: \"root\/sym\", target: \"\/idontexist\/..\/slash\"},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"root\/sym\/file\", \"root\/file\", \"root\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestFollowSymlinkNoLexicalCleaning(t *testing.T) {\n\ttmpdir := t.TempDir()\n\n\tif err := makeFs(tmpdir, []dirOrLink{\n\t\t{path: \"root\/sym\", target: \"\/foo\/bar\"},\n\t\t{path: \"root\/hello\", target: \"\/sym\/..\/baz\"},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := testSymlink(tmpdir, \"root\/hello\", \"root\/foo\/baz\", \"root\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package fftw\n\n\/\/ #include <fftw3.h>\nimport \"C\"\n\nimport (\n \"runtime\"\n \"unsafe\"\n)\n\ntype plan struct {\n fftw_p C.fftw_plan\n}\nfunc destroyPlan(p *plan) {\n C.fftw_destroy_plan(p.fftw_p)\n}\nfunc newPlan(fftw_p C.fftw_plan) *plan {\n np := new(plan)\n np.fftw_p = fftw_p\n runtime.SetFinalizer(np, destroyPlan)\n return np\n}\nfunc (p *plan) Execute() {\n C.fftw_execute(p.fftw_p)\n}\n\n\ntype Direction int\nvar Forward Direction = C.FFTW_FORWARD\nvar Backward Direction = C.FFTW_BACKWARD\n\nfunc Alloc1d(n int) []complex128 {\n return make([]complex128, n)\n}\nfunc Alloc2d(n0,n1 int) [][]complex128 {\n a := make([]complex128, n0*n1)\n r := make([][]complex128, n0)\n for i := range r {\n r[i] = a[i*n1 : (i+1)*n1]\n }\n return r\n}\n\n\nfunc PlanDft1d(in,out []complex128, dir Direction) *plan {\n \/\/ TODO: check that len(in) == len(out)\n fftw_in := (*C.fftw_complex)((unsafe.Pointer)(&in[0]))\n fftw_out := (*C.fftw_complex)((unsafe.Pointer)(&out[0]))\n p := C.fftw_plan_dft_1d((C.int)(len(in)), fftw_in, fftw_out, C.int(dir), C.FFTW_ESTIMATE)\n return newPlan(p)\n}\n\nfunc PlanDft2d(in,out [][]complex128, dir Direction) *plan {\n \/\/ TODO: check that in and out have the same dimensions\n fftw_in := (*C.fftw_complex)((unsafe.Pointer)(&in[0][0]))\n fftw_out := (*C.fftw_complex)((unsafe.Pointer)(&out[0][0]))\n p := C.fftw_plan_dft_2d((C.int)(len(in)), (C.int)(len(in[0])), fftw_in, fftw_out, C.int(dir), C.FFTW_ESTIMATE)\n return newPlan(p)\n}\n\n<commit_msg>Made FFTW_ESTIMATE and FFTW_MEASURE available<commit_after>package fftw\n\n\/\/ #include <fftw3.h>\nimport \"C\"\n\nimport (\n \"runtime\"\n \"unsafe\"\n)\n\ntype plan struct {\n fftw_p C.fftw_plan\n}\nfunc destroyPlan(p *plan) {\n C.fftw_destroy_plan(p.fftw_p)\n}\nfunc newPlan(fftw_p C.fftw_plan) *plan {\n np := new(plan)\n np.fftw_p = fftw_p\n runtime.SetFinalizer(np, destroyPlan)\n return np\n}\nfunc (p *plan) Execute() {\n C.fftw_execute(p.fftw_p)\n}\n\n\ntype Direction int\nvar Forward Direction = C.FFTW_FORWARD\nvar Backward Direction = C.FFTW_BACKWARD\n\ntype Flag uint\nvar Estimate Flag = C.FFTW_ESTIMATE\nvar Measure Flag = C.FFTW_MEASURE\n\nfunc Alloc1d(n int) []complex128 {\n return make([]complex128, n)\n}\nfunc Alloc2d(n0,n1 int) [][]complex128 {\n a := make([]complex128, n0*n1)\n r := make([][]complex128, n0)\n for i := range r {\n r[i] = a[i*n1 : (i+1)*n1]\n }\n return r\n}\n\n\nfunc PlanDft1d(in,out []complex128, dir Direction, flag Flag) *plan {\n \/\/ TODO: check that len(in) == len(out)\n fftw_in := (*C.fftw_complex)((unsafe.Pointer)(&in[0]))\n fftw_out := (*C.fftw_complex)((unsafe.Pointer)(&out[0]))\n p := C.fftw_plan_dft_1d((C.int)(len(in)), fftw_in, fftw_out, C.int(dir), C.uint(flag))\n return newPlan(p)\n}\n\nfunc PlanDft2d(in,out [][]complex128, dir Direction, flag Flag) *plan {\n \/\/ TODO: check that in and out have the same dimensions\n fftw_in := (*C.fftw_complex)((unsafe.Pointer)(&in[0][0]))\n fftw_out := (*C.fftw_complex)((unsafe.Pointer)(&out[0][0]))\n p := C.fftw_plan_dft_2d((C.int)(len(in)), (C.int)(len(in[0])), fftw_in, fftw_out, C.int(dir), C.uint(flag))\n return newPlan(p)\n}\n\n<|endoftext|>"} {"text":"<commit_before>package bitrise\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"runtime\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/bitrise-io\/bitrise\/configs\"\n\t\"github.com\/bitrise-io\/bitrise\/toolkits\"\n\t\"github.com\/bitrise-io\/go-utils\/colorstring\"\n)\n\nconst (\n\tminEnvmanVersion = \"1.1.1\"\n\tminStepmanVersion = \"0.9.23\"\n)\n\n\/\/ PluginDependency ..\ntype PluginDependency struct {\n\tSource string\n\tBinary string\n\tMinVersion string\n}\n\n\/\/ OSXPluginDependencyMap ...\nvar OSXPluginDependencyMap = map[string]PluginDependency{\n\t\"analytics\": PluginDependency{\n\t\tSource: \"https:\/\/github.com\/bitrise-core\/bitrise-plugins-analytics.git\",\n\t\tBinary: \"https:\/\/github.com\/bitrise-core\/bitrise-plugins-analytics\/releases\/download\/0.9.4\/analytics-Darwin-x86_64\",\n\t\tMinVersion: \"0.9.4\",\n\t},\n}\n\n\/\/ LinuxPluginDependencyMap ...\nvar LinuxPluginDependencyMap = map[string]PluginDependency{\n\t\"analytics\": PluginDependency{\n\t\tSource: \"https:\/\/github.com\/bitrise-core\/bitrise-plugins-analytics.git\",\n\t\tBinary: \"https:\/\/github.com\/bitrise-core\/bitrise-plugins-analytics\/releases\/download\/0.9.4\/analytics-Linux-x86_64\",\n\t\tMinVersion: \"0.9.4\",\n\t},\n}\n\n\/\/ RunSetup ...\nfunc RunSetup(appVersion string, isFullSetupMode bool) error {\n\tlog.Infoln(\"Setup\")\n\tlog.Infof(\"Full setup: %v\", isFullSetupMode)\n\tlog.Infoln(\"Detected OS:\", runtime.GOOS)\n\n\tif err := doSetupBitriseCoreTools(); err != nil {\n\t\treturn fmt.Errorf(\"Failed to do common\/platform independent setup, error: %s\", err)\n\t}\n\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\t\tif err := doSetupOnOSX(isFullSetupMode); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to do MacOS specific setup, error: %s\", err)\n\t\t}\n\tcase \"linux\":\n\t\tif err := doSetupOnLinux(); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to do Linux specific setup, error: %s\", err)\n\t\t}\n\tdefault:\n\t\treturn errors.New(\"unsupported platform :(\")\n\t}\n\n\tif err := doSetupToolkits(); err != nil {\n\t\treturn fmt.Errorf(\"Failed to do Toolkits setup, error: %s\", err)\n\t}\n\n\tlog.Infoln(\"All the required tools are installed!\")\n\n\tif err := configs.SaveSetupSuccessForVersion(appVersion); err != nil {\n\t\treturn fmt.Errorf(\"failed to save setup-success into config file, error: %s\", err)\n\t}\n\n\t\/\/ guide\n\tlog.Infoln(\"We're ready to rock!!\")\n\tfmt.Println()\n\n\treturn nil\n}\n\nfunc doSetupToolkits() error {\n\tlog.Infoln(\"Checking Bitrise Toolkits...\")\n\n\tcoreToolkits := toolkits.AllSupportedToolkits()\n\n\tfor _, aCoreTK := range coreToolkits {\n\t\ttoolkitName := aCoreTK.ToolkitName()\n\t\tisInstallRequired, checkResult, err := aCoreTK.Check()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to perform toolkit check (%s), error: %s\", toolkitName, err)\n\t\t}\n\n\t\tif isInstallRequired {\n\t\t\tlog.Infoln(\"No installed\/suitable '\" + toolkitName + \"' found, installing toolkit ...\")\n\t\t\tif err := aCoreTK.Install(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to install toolkit (%s), error: %s\", toolkitName, err)\n\t\t\t}\n\n\t\t\tisInstallRequired, checkResult, err = aCoreTK.Check()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to perform toolkit check (%s), error: %s\", toolkitName, err)\n\t\t\t}\n\t\t}\n\t\tif isInstallRequired {\n\t\t\treturn fmt.Errorf(\"Toolkit (%s) still reports that it isn't (properly) installed\", toolkitName)\n\t\t}\n\n\t\tlog.Infoln(\" * \"+colorstring.Green(\"[OK]\")+\" \"+toolkitName+\" :\", checkResult.Path)\n\t\tlog.Infoln(\" version :\", checkResult.Version)\n\t}\n\n\treturn nil\n}\n\nfunc doSetupBitriseCoreTools() error {\n\tlog.Infoln(\"Checking Bitrise Core tools...\")\n\n\tif err := CheckIsEnvmanInstalled(minEnvmanVersion); err != nil {\n\t\treturn fmt.Errorf(\"Envman failed to install: %s\", err)\n\t}\n\tif err := CheckIsStepmanInstalled(minStepmanVersion); err != nil {\n\t\treturn fmt.Errorf(\"Stepman failed to install: %s\", err)\n\t}\n\treturn nil\n}\n\nfunc doSetupOnOSX(isMinimalSetupMode bool) error {\n\tlog.Infoln(\"Doing OS X specific setup\")\n\tlog.Infoln(\"Checking required tools...\")\n\tif err := CheckIsHomebrewInstalled(isMinimalSetupMode); err != nil {\n\t\treturn errors.New(fmt.Sprint(\"Homebrew not installed or has some issues. Please fix these before calling setup again. Err:\", err))\n\t}\n\n\tif err := PrintInstalledXcodeInfos(); err != nil {\n\t\treturn errors.New(fmt.Sprint(\"Failed to detect installed Xcode and Xcode Command Line Tools infos. Err:\", err))\n\t}\n\t\/\/ if err := CheckIsXcodeCLTInstalled(); err != nil {\n\t\/\/ \treturn errors.New(fmt.Sprint(\"Xcode Command Line Tools not installed. Err:\", err))\n\t\/\/ }\n\t\/\/ if err := checkIsAnsibleInstalled(); err != nil {\n\t\/\/ \treturn errors.New(\"Ansible failed to install\")\n\t\/\/ }\n\n\tfor pluginName, pluginDependency := range OSXPluginDependencyMap {\n\t\tif err := CheckIsPluginInstalled(pluginName, pluginDependency); err != nil {\n\t\t\treturn fmt.Errorf(\"Plugin (%s) failed to install: %s\", pluginName, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc doSetupOnLinux() error {\n\tlog.Infoln(\"Doing Linux specific setup\")\n\tlog.Infoln(\"Checking required tools...\")\n\n\tfor pluginName, pluginDependency := range LinuxPluginDependencyMap {\n\t\tif err := CheckIsPluginInstalled(pluginName, pluginDependency); err != nil {\n\t\t\treturn fmt.Errorf(\"Plugin (%s) failed to install: %s\", pluginName, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>stepman version: 0.9.24, analitics version: 0.9.5 (#423)<commit_after>package bitrise\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"runtime\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/bitrise-io\/bitrise\/configs\"\n\t\"github.com\/bitrise-io\/bitrise\/toolkits\"\n\t\"github.com\/bitrise-io\/go-utils\/colorstring\"\n)\n\nconst (\n\tminEnvmanVersion = \"1.1.1\"\n\tminStepmanVersion = \"0.9.24\"\n)\n\n\/\/ PluginDependency ..\ntype PluginDependency struct {\n\tSource string\n\tBinary string\n\tMinVersion string\n}\n\n\/\/ OSXPluginDependencyMap ...\nvar OSXPluginDependencyMap = map[string]PluginDependency{\n\t\"analytics\": PluginDependency{\n\t\tSource: \"https:\/\/github.com\/bitrise-core\/bitrise-plugins-analytics.git\",\n\t\tBinary: \"https:\/\/github.com\/bitrise-core\/bitrise-plugins-analytics\/releases\/download\/0.9.5\/analytics-Darwin-x86_64\",\n\t\tMinVersion: \"0.9.5\",\n\t},\n}\n\n\/\/ LinuxPluginDependencyMap ...\nvar LinuxPluginDependencyMap = map[string]PluginDependency{\n\t\"analytics\": PluginDependency{\n\t\tSource: \"https:\/\/github.com\/bitrise-core\/bitrise-plugins-analytics.git\",\n\t\tBinary: \"https:\/\/github.com\/bitrise-core\/bitrise-plugins-analytics\/releases\/download\/0.9.5\/analytics-Linux-x86_64\",\n\t\tMinVersion: \"0.9.5\",\n\t},\n}\n\n\/\/ RunSetup ...\nfunc RunSetup(appVersion string, isFullSetupMode bool) error {\n\tlog.Infoln(\"Setup\")\n\tlog.Infof(\"Full setup: %v\", isFullSetupMode)\n\tlog.Infoln(\"Detected OS:\", runtime.GOOS)\n\n\tif err := doSetupBitriseCoreTools(); err != nil {\n\t\treturn fmt.Errorf(\"Failed to do common\/platform independent setup, error: %s\", err)\n\t}\n\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\t\tif err := doSetupOnOSX(isFullSetupMode); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to do MacOS specific setup, error: %s\", err)\n\t\t}\n\tcase \"linux\":\n\t\tif err := doSetupOnLinux(); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to do Linux specific setup, error: %s\", err)\n\t\t}\n\tdefault:\n\t\treturn errors.New(\"unsupported platform :(\")\n\t}\n\n\tif err := doSetupToolkits(); err != nil {\n\t\treturn fmt.Errorf(\"Failed to do Toolkits setup, error: %s\", err)\n\t}\n\n\tlog.Infoln(\"All the required tools are installed!\")\n\n\tif err := configs.SaveSetupSuccessForVersion(appVersion); err != nil {\n\t\treturn fmt.Errorf(\"failed to save setup-success into config file, error: %s\", err)\n\t}\n\n\t\/\/ guide\n\tlog.Infoln(\"We're ready to rock!!\")\n\tfmt.Println()\n\n\treturn nil\n}\n\nfunc doSetupToolkits() error {\n\tlog.Infoln(\"Checking Bitrise Toolkits...\")\n\n\tcoreToolkits := toolkits.AllSupportedToolkits()\n\n\tfor _, aCoreTK := range coreToolkits {\n\t\ttoolkitName := aCoreTK.ToolkitName()\n\t\tisInstallRequired, checkResult, err := aCoreTK.Check()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to perform toolkit check (%s), error: %s\", toolkitName, err)\n\t\t}\n\n\t\tif isInstallRequired {\n\t\t\tlog.Infoln(\"No installed\/suitable '\" + toolkitName + \"' found, installing toolkit ...\")\n\t\t\tif err := aCoreTK.Install(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to install toolkit (%s), error: %s\", toolkitName, err)\n\t\t\t}\n\n\t\t\tisInstallRequired, checkResult, err = aCoreTK.Check()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to perform toolkit check (%s), error: %s\", toolkitName, err)\n\t\t\t}\n\t\t}\n\t\tif isInstallRequired {\n\t\t\treturn fmt.Errorf(\"Toolkit (%s) still reports that it isn't (properly) installed\", toolkitName)\n\t\t}\n\n\t\tlog.Infoln(\" * \"+colorstring.Green(\"[OK]\")+\" \"+toolkitName+\" :\", checkResult.Path)\n\t\tlog.Infoln(\" version :\", checkResult.Version)\n\t}\n\n\treturn nil\n}\n\nfunc doSetupBitriseCoreTools() error {\n\tlog.Infoln(\"Checking Bitrise Core tools...\")\n\n\tif err := CheckIsEnvmanInstalled(minEnvmanVersion); err != nil {\n\t\treturn fmt.Errorf(\"Envman failed to install: %s\", err)\n\t}\n\tif err := CheckIsStepmanInstalled(minStepmanVersion); err != nil {\n\t\treturn fmt.Errorf(\"Stepman failed to install: %s\", err)\n\t}\n\treturn nil\n}\n\nfunc doSetupOnOSX(isMinimalSetupMode bool) error {\n\tlog.Infoln(\"Doing OS X specific setup\")\n\tlog.Infoln(\"Checking required tools...\")\n\tif err := CheckIsHomebrewInstalled(isMinimalSetupMode); err != nil {\n\t\treturn errors.New(fmt.Sprint(\"Homebrew not installed or has some issues. Please fix these before calling setup again. Err:\", err))\n\t}\n\n\tif err := PrintInstalledXcodeInfos(); err != nil {\n\t\treturn errors.New(fmt.Sprint(\"Failed to detect installed Xcode and Xcode Command Line Tools infos. Err:\", err))\n\t}\n\t\/\/ if err := CheckIsXcodeCLTInstalled(); err != nil {\n\t\/\/ \treturn errors.New(fmt.Sprint(\"Xcode Command Line Tools not installed. Err:\", err))\n\t\/\/ }\n\t\/\/ if err := checkIsAnsibleInstalled(); err != nil {\n\t\/\/ \treturn errors.New(\"Ansible failed to install\")\n\t\/\/ }\n\n\tfor pluginName, pluginDependency := range OSXPluginDependencyMap {\n\t\tif err := CheckIsPluginInstalled(pluginName, pluginDependency); err != nil {\n\t\t\treturn fmt.Errorf(\"Plugin (%s) failed to install: %s\", pluginName, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc doSetupOnLinux() error {\n\tlog.Infoln(\"Doing Linux specific setup\")\n\tlog.Infoln(\"Checking required tools...\")\n\n\tfor pluginName, pluginDependency := range LinuxPluginDependencyMap {\n\t\tif err := CheckIsPluginInstalled(pluginName, pluginDependency); err != nil {\n\t\t\treturn fmt.Errorf(\"Plugin (%s) failed to install: %s\", pluginName, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package blocks\n\nimport (\n\t\"errors\"\n\t\"github.com\/bitly\/go-simplejson\"\n)\n\nconst (\n\tCREATE_OUT_CHAN = iota\n\tDELETE_OUT_CHAN = iota\n)\n\n\/\/ Block is the basic interface for processing units in streamtools\ntype BlockTemplate struct {\n\tBlockType string\n\tRouteNames []string\n\t\/\/ BlockRoutine is the central processing routine for a block. All the work gets done in here\n\tRoutine BlockRoutine\n}\n\ntype Block struct {\n\tBlockType string\n\tID string\n\tInChan chan *simplejson.Json\n\tOutChans map[string]chan *simplejson.Json\n\tRoutes map[string]chan RouteResponse\n\tAddChan chan *OutChanMsg\n}\n\ntype OutChanMsg struct {\n\tAction int\n\tOutChan chan *simplejson.Json\n\tID string\n}\n\ntype BlockRoutine func(*Block)\n\n\/\/ RouteResponse is passed into a block to query via established handlers\ntype RouteResponse struct {\n\tMsg string\n\tResponseChan chan string\n}\n\nfunc NewBlock(name string, ID string) (*Block, error) {\n\troutes := make(map[string]chan RouteResponse)\n\n\tif _, ok := Library[name]; !ok {\n\t\treturn nil, errors.New(\"cannot find \" + name + \" in the Library\")\n\t}\n\n\tfor _, name := range Library[name].RouteNames {\n\t\troutes[name] = make(chan RouteResponse)\n\t}\n\n\tb := &Block{\n\t\tBlockType: name,\n\t\tID: ID,\n\t\tInChan: make(chan *simplejson.Json),\n\t\tRoutes: routes,\n\t\tAddChan: make(chan *OutChanMsg),\n\t}\n\n\treturn b, nil\n}\n<commit_msg>comments<commit_after>package blocks\n\nimport (\n\t\"errors\"\n\t\"github.com\/bitly\/go-simplejson\"\n)\n\nconst (\n\tCREATE_OUT_CHAN = iota\n\tDELETE_OUT_CHAN = iota\n)\n\n\/\/ Block is the basic interface for processing units in streamtools\ntype BlockTemplate struct {\n\tBlockType string\n\tRouteNames []string\n\t\/\/ BlockRoutine is the central processing routine for a block. All the work gets done in here\n\tRoutine BlockRoutine\n}\n\ntype Block struct {\n\tBlockType string\n\tID string\n\tInChan chan *simplejson.Json\n\tOutChans map[string]chan *simplejson.Json\n\tRoutes map[string]chan RouteResponse\n\tAddChan chan *OutChanMsg\n}\n\ntype OutChanMsg struct {\n\t\/\/ type of action to perform\n\tAction int\n\t\/\/ new channel to introduce to a block's outChan array\n\tOutChan chan *simplejson.Json\n\t\/\/ ID of the connection block\n\tID string\n}\n\ntype BlockRoutine func(*Block)\n\n\/\/ RouteResponse is passed into a block to query via established handlers\ntype RouteResponse struct {\n\tMsg string\n\tResponseChan chan string\n}\n\nfunc NewBlock(name string, ID string) (*Block, error) {\n\troutes := make(map[string]chan RouteResponse)\n\n\tif _, ok := Library[name]; !ok {\n\t\treturn nil, errors.New(\"cannot find \" + name + \" in the Library\")\n\t}\n\n\tfor _, name := range Library[name].RouteNames {\n\t\troutes[name] = make(chan RouteResponse)\n\t}\n\n\tb := &Block{\n\t\tBlockType: name,\n\t\tID: ID,\n\t\tInChan: make(chan *simplejson.Json),\n\t\tRoutes: routes,\n\t\tAddChan: make(chan *OutChanMsg),\n\t}\n\n\treturn b, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage policy\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nconst (\n\ttestTagPairSeparator = \",\"\n\ttestTagValueSeparator = \"=\"\n)\n\ntype testFilterData struct {\n\tid string\n\tmatch bool\n}\n\ntype testTagPair struct {\n\tname string\n\tvalue string\n}\n\ntype testSortedTagIterator struct {\n\tidx int\n\terr error\n\tpairs []testTagPair\n}\n\nfunc idToTestTagPairs(id string) []testTagPair {\n\ttagPairs := strings.Split(id, testTagPairSeparator)\n\tvar pairs []testTagPair\n\tfor _, pair := range tagPairs {\n\t\tp := strings.Split(pair, testTagValueSeparator)\n\t\tpairs = append(pairs, testTagPair{name: p[0], value: p[1]})\n\t}\n\treturn pairs\n}\n\nfunc newTestSortedTagIterator(id string) SortedTagIterator {\n\tpairs := idToTestTagPairs(id)\n\treturn &testSortedTagIterator{idx: -1, pairs: pairs}\n}\n\nfunc (it *testSortedTagIterator) Next() bool {\n\tif it.err != nil || it.idx >= len(it.pairs) {\n\t\treturn false\n\t}\n\tit.idx++\n\treturn it.err == nil && it.idx < len(it.pairs)\n}\n\nfunc (it *testSortedTagIterator) Current() (string, string) {\n\treturn it.pairs[it.idx].name, it.pairs[it.idx].value\n}\n\nfunc (it *testSortedTagIterator) Err() error {\n\treturn it.err\n}\n\nfunc (it *testSortedTagIterator) Close() {}\n\nfunc TestEqualityFilter(t *testing.T) {\n\tinputs := []testFilterData{\n\t\t{id: \"foo\", match: true},\n\t\t{id: \"fo\", match: false},\n\t\t{id: \"foob\", match: false},\n\t}\n\tf := newEqualityFilter(\"foo\")\n\tfor _, input := range inputs {\n\t\trequire.Equal(t, input.match, f.Matches(input.id))\n\t}\n}\n\nfunc TestEmptyTagsFilterMatches(t *testing.T) {\n\tf := newTagsFilter(nil, newTestSortedTagIterator)\n\trequire.True(t, f.Matches(\"foo\"))\n}\n\nfunc TestTagsFilterMatches(t *testing.T) {\n\tfilters := map[string]string{\n\t\t\"tagName1\": \"tagValue1\",\n\t\t\"tagName2\": \"tagValue2\",\n\t}\n\tf := newTagsFilter(filters, newTestSortedTagIterator)\n\tinputs := []testFilterData{\n\t\t{id: \"tagName1=tagValue1,tagName2=tagValue2\", match: true},\n\t\t{id: \"tagName0=tagValue0,tagName1=tagValue1,tagName2=tagValue2,tagName3=tagValue3\", match: true},\n\t\t{id: \"tagName1=tagValue1\", match: false},\n\t\t{id: \"tagName1=tagValue1\", match: false},\n\t\t{id: \"tagName2=tagValue2\", match: false},\n\t\t{id: \"tagName1=tagValue2,tagName2=tagValue1\", match: false},\n\t}\n\tfor _, input := range inputs {\n\t\trequire.Equal(t, input.match, f.Matches(input.id))\n\t}\n}\n\nfunc TestTagsFilterString(t *testing.T) {\n\tfilters := map[string]string{\n\t\t\"tagName1\": \"tagValue1\",\n\t\t\"tagName2\": \"tagValue2\",\n\t}\n\tf := newTagsFilter(filters, newTestSortedTagIterator)\n\trequire.Equal(t, `tagName1:Equals(\"tagValue1\") && tagName2:Equals(\"tagValue2\")`, f.String())\n}\n<commit_msg>Remove a duplicate test input<commit_after>\/\/ Copyright (c) 2016 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage policy\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nconst (\n\ttestTagPairSeparator = \",\"\n\ttestTagValueSeparator = \"=\"\n)\n\ntype testFilterData struct {\n\tid string\n\tmatch bool\n}\n\ntype testTagPair struct {\n\tname string\n\tvalue string\n}\n\ntype testSortedTagIterator struct {\n\tidx int\n\terr error\n\tpairs []testTagPair\n}\n\nfunc idToTestTagPairs(id string) []testTagPair {\n\ttagPairs := strings.Split(id, testTagPairSeparator)\n\tvar pairs []testTagPair\n\tfor _, pair := range tagPairs {\n\t\tp := strings.Split(pair, testTagValueSeparator)\n\t\tpairs = append(pairs, testTagPair{name: p[0], value: p[1]})\n\t}\n\treturn pairs\n}\n\nfunc newTestSortedTagIterator(id string) SortedTagIterator {\n\tpairs := idToTestTagPairs(id)\n\treturn &testSortedTagIterator{idx: -1, pairs: pairs}\n}\n\nfunc (it *testSortedTagIterator) Next() bool {\n\tif it.err != nil || it.idx >= len(it.pairs) {\n\t\treturn false\n\t}\n\tit.idx++\n\treturn it.err == nil && it.idx < len(it.pairs)\n}\n\nfunc (it *testSortedTagIterator) Current() (string, string) {\n\treturn it.pairs[it.idx].name, it.pairs[it.idx].value\n}\n\nfunc (it *testSortedTagIterator) Err() error {\n\treturn it.err\n}\n\nfunc (it *testSortedTagIterator) Close() {}\n\nfunc TestEqualityFilter(t *testing.T) {\n\tinputs := []testFilterData{\n\t\t{id: \"foo\", match: true},\n\t\t{id: \"fo\", match: false},\n\t\t{id: \"foob\", match: false},\n\t}\n\tf := newEqualityFilter(\"foo\")\n\tfor _, input := range inputs {\n\t\trequire.Equal(t, input.match, f.Matches(input.id))\n\t}\n}\n\nfunc TestEmptyTagsFilterMatches(t *testing.T) {\n\tf := newTagsFilter(nil, newTestSortedTagIterator)\n\trequire.True(t, f.Matches(\"foo\"))\n}\n\nfunc TestTagsFilterMatches(t *testing.T) {\n\tfilters := map[string]string{\n\t\t\"tagName1\": \"tagValue1\",\n\t\t\"tagName2\": \"tagValue2\",\n\t}\n\tf := newTagsFilter(filters, newTestSortedTagIterator)\n\tinputs := []testFilterData{\n\t\t{id: \"tagName1=tagValue1,tagName2=tagValue2\", match: true},\n\t\t{id: \"tagName0=tagValue0,tagName1=tagValue1,tagName2=tagValue2,tagName3=tagValue3\", match: true},\n\t\t{id: \"tagName1=tagValue1\", match: false},\n\t\t{id: \"tagName2=tagValue2\", match: false},\n\t\t{id: \"tagName1=tagValue2,tagName2=tagValue1\", match: false},\n\t}\n\tfor _, input := range inputs {\n\t\trequire.Equal(t, input.match, f.Matches(input.id))\n\t}\n}\n\nfunc TestTagsFilterString(t *testing.T) {\n\tfilters := map[string]string{\n\t\t\"tagName1\": \"tagValue1\",\n\t\t\"tagName2\": \"tagValue2\",\n\t}\n\tf := newTagsFilter(filters, newTestSortedTagIterator)\n\trequire.Equal(t, `tagName1:Equals(\"tagValue1\") && tagName2:Equals(\"tagValue2\")`, f.String())\n}\n<|endoftext|>"} {"text":"<commit_before>package web_test\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/concourse\/atc\"\n\t\"github.com\/concourse\/testflight\/helpers\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/sclevine\/agouti\/matchers\"\n)\n\nvar _ = Describe(\"PipelinePausing\", func() {\n\tvar (\n\t\tloadingTimeout time.Duration\n\t)\n\n\tContext(\"with a job in the configuration\", func() {\n\t\tBeforeEach(func() {\n\t\t\t_, _, _, err := team.CreateOrUpdatePipelineConfig(pipelineName, \"0\", atc.Config{\n\t\t\t\tJobs: []atc.JobConfig{\n\t\t\t\t\t{Name: \"some-job-name\"},\n\t\t\t\t},\n\t\t\t})\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t_, err = team.UnpausePipeline(pipelineName)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t_, err = team.DeletePipeline(\"another-pipeline\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t_, _, _, err = team.CreateOrUpdatePipelineConfig(\"another-pipeline\", \"0\", atc.Config{\n\t\t\t\tJobs: []atc.JobConfig{\n\t\t\t\t\t{Name: \"another-job-name\"},\n\t\t\t\t},\n\t\t\t})\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t_, err = team.UnpausePipeline(\"another-pipeline\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tloadingTimeout = 10 * time.Second\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\terr := helpers.DeleteAllContainers(client, \"another-pipeline\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t_, err = team.DeletePipeline(\"another-pipeline\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\n\t\thomeLink := \".top-bar.test li:nth-of-type(2) a\"\n\t\tnavList := \".sidebar.test\"\n\n\t\tIt(\"can pause the pipelines\", func() {\n\t\t\tExpect(page.Navigate(atcURL)).To(Succeed())\n\t\t\tEventually(page, loadingTimeout).Should(HaveURL(atcRoute(\"\/\")))\n\n\t\t\tBy(\"toggling the nav\")\n\t\t\tExpect(page.Find(\".sidebar-toggle.test\").Click()).To(Succeed())\n\n\t\t\tBy(\"clicking another-pipeline\")\n\t\t\tEventually(page.All(navList).FindByLink(\"another-pipeline\")).Should(BeFound())\n\t\t\tExpect(page.All(navList).FindByLink(\"another-pipeline\").Click()).To(Succeed())\n\t\t\tEventually(page, loadingTimeout).Should(HaveURL(atcRoute(fmt.Sprintf(\"\/teams\/%s\/pipelines\/another-pipeline\", teamName))))\n\n\t\t\tBy(\"clicking home button\")\n\t\t\tExpect(page.Find(homeLink).Click()).To(Succeed())\n\t\t\tEventually(page, loadingTimeout).Should(HaveURL(atcRoute(fmt.Sprintf(\"\/teams\/%s\/pipelines\/another-pipeline\", teamName))))\n\n\t\t\tBy(\"toggling the nav\")\n\t\t\tExpect(page.Find(\".sidebar-toggle.test\").Click()).To(Succeed())\n\t\t\tEventually(page.Find(\"#pipeline\").Text, loadingTimeout).Should(ContainSubstring(\"another-job-name\"))\n\n\t\t\tBy(\"pausing another-pipeline\")\n\t\t\tspanXPath := fmt.Sprintf(\"\/\/a[@href='\/teams\/%s\/pipelines\/another-pipeline']\/parent::li\/span\", teamName)\n\t\t\tEventually(page.All(navList).FindByXPath(spanXPath), loadingTimeout).Should(BeVisible())\n\t\t\tExpect(page.All(navList).FindByXPath(spanXPath + \"[contains(@class, 'disabled')]\")).To(BeFound())\n\t\t\tExpect(page.FindByXPath(spanXPath).Click()).To(Succeed())\n\n\t\t\t\/\/ top bar should show the pipeline is paused\n\t\t\tEventually(page.Find(\".top-bar.test.paused\"), loadingTimeout).Should(BeFound())\n\n\t\t\tBy(\"refreshing the page\")\n\t\t\tpage.Refresh()\n\n\t\t\tEventually(page.Find(\".top-bar.test.paused\"), loadingTimeout).Should(BeFound())\n\t\t\tExpect(page.Find(\".sidebar-toggle.test\").Click()).To(Succeed())\n\n\t\t\tEventually(page.All(navList).FindByXPath(spanXPath), loadingTimeout).Should(BeVisible())\n\t\t\tExpect(page.All(navList).FindByXPath(spanXPath + \"[contains(@class, 'enabled')]\")).To(BeFound())\n\n\t\t\tBy(\"unpausing the pipeline\")\n\t\t\tExpect(page.FindByXPath(spanXPath).Click()).To(Succeed())\n\t\t\tEventually(page.All(navList).FindByXPath(spanXPath + \"[contains(@class, 'disabled')]\")).Should(BeFound())\n\n\t\t\tConsistently(page.Find(\".top-bar.test.paused\")).ShouldNot(BeFound())\n\n\t\t\tBy(\"refreshing the page\")\n\t\t\tpage.Refresh()\n\n\t\t\tBy(\"pausing the pipeline\")\n\t\t\tExpect(page.Find(\".sidebar-toggle.test\").Click()).To(Succeed())\n\t\t\tExpect(page.FindByXPath(spanXPath).Click()).To(Succeed())\n\t\t\tEventually(page.All(navList).FindByXPath(spanXPath + \"[contains(@class, 'enabled')]\")).Should(BeFound())\n\t\t})\n\t})\n})\n<commit_msg>update selectors<commit_after>package web_test\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/concourse\/atc\"\n\t\"github.com\/concourse\/testflight\/helpers\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/sclevine\/agouti\/matchers\"\n)\n\nvar _ = Describe(\"PipelinePausing\", func() {\n\tvar (\n\t\tloadingTimeout time.Duration\n\t)\n\n\tContext(\"with a job in the configuration\", func() {\n\t\tBeforeEach(func() {\n\t\t\t_, _, _, err := team.CreateOrUpdatePipelineConfig(pipelineName, \"0\", atc.Config{\n\t\t\t\tJobs: []atc.JobConfig{\n\t\t\t\t\t{Name: \"some-job-name\"},\n\t\t\t\t},\n\t\t\t})\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t_, err = team.UnpausePipeline(pipelineName)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t_, err = team.DeletePipeline(\"another-pipeline\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t_, _, _, err = team.CreateOrUpdatePipelineConfig(\"another-pipeline\", \"0\", atc.Config{\n\t\t\t\tJobs: []atc.JobConfig{\n\t\t\t\t\t{Name: \"another-job-name\"},\n\t\t\t\t},\n\t\t\t})\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t_, err = team.UnpausePipeline(\"another-pipeline\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tloadingTimeout = 10 * time.Second\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\terr := helpers.DeleteAllContainers(client, \"another-pipeline\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t_, err = team.DeletePipeline(\"another-pipeline\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\n\t\t\/\/ homeLink := \".top-bar.test li:nth-of-type(2) a\"\n\t\tnavList := \".sidebar.test .team ul\"\n\n\t\tIt(\"can pause the pipelines\", func() {\n\t\t\tExpect(page.Navigate(atcURL)).To(Succeed())\n\t\t\tEventually(page, loadingTimeout).Should(HaveURL(atcRoute(\"\/\")))\n\n\t\t\tBy(\"toggling the nav\")\n\t\t\tExpect(page.Find(\".sidebar-toggle.test\").Click()).To(Succeed())\n\n\t\t\tBy(\"clicking another-pipeline\")\n\t\t\tEventually(page.All(navList).FindByLink(\"another-pipeline\")).Should(BeFound())\n\t\t\tExpect(page.All(navList).FindByLink(\"another-pipeline\").Click()).To(Succeed())\n\t\t\tEventually(page, loadingTimeout).Should(HaveURL(atcRoute(fmt.Sprintf(\"\/teams\/%s\/pipelines\/another-pipeline\", teamName))))\n\n\t\t\tBy(\"toggling the nav\")\n\t\t\tExpect(page.Find(\".sidebar-toggle.test\").Click()).To(Succeed())\n\t\t\tEventually(page.Find(\"#pipeline\").Text, loadingTimeout).Should(ContainSubstring(\"another-job-name\"))\n\n\t\t\tBy(\"pausing another-pipeline\")\n\t\t\tspanXPath := fmt.Sprintf(\"\/\/a[@href='\/teams\/%s\/pipelines\/another-pipeline']\/preceding-sibling::span\", teamName)\n\t\t\tEventually(page.All(navList).FindByXPath(spanXPath), loadingTimeout).Should(BeVisible())\n\t\t\tExpect(page.All(navList).FindByXPath(spanXPath + \"[contains(@class, 'disabled')]\")).To(BeFound())\n\t\t\tExpect(page.FindByXPath(spanXPath).Click()).To(Succeed())\n\n\t\t\t\/\/ top bar should show the pipeline is paused\n\t\t\tEventually(page.Find(\".top-bar.test.paused\"), loadingTimeout).Should(BeFound())\n\n\t\t\tBy(\"refreshing the page\")\n\t\t\tpage.Refresh()\n\n\t\t\tEventually(page.Find(\".top-bar.test.paused\"), loadingTimeout).Should(BeFound())\n\t\t\tExpect(page.Find(\".sidebar-toggle.test\").Click()).To(Succeed())\n\n\t\t\tEventually(page.All(navList).FindByXPath(spanXPath), loadingTimeout).Should(BeVisible())\n\t\t\tExpect(page.All(navList).FindByXPath(spanXPath + \"[contains(@class, 'enabled')]\")).To(BeFound())\n\n\t\t\tBy(\"unpausing the pipeline\")\n\t\t\tExpect(page.FindByXPath(spanXPath).Click()).To(Succeed())\n\t\t\tEventually(page.All(navList).FindByXPath(spanXPath + \"[contains(@class, 'disabled')]\")).Should(BeFound())\n\n\t\t\tEventually(page.Find(\".top-bar.test.paused\")).ShouldNot(BeFound())\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package build\n\nimport (\n\t\"io\/ioutil\"\n\t\"strings\"\n\n\t\"gopkg.in\/yaml.v2\"\n)\n\n\/\/ Artefact holds a parsed source for a build artefact\ntype Artefact struct {\n\tOrder int\n\tStep Step\n\tSource string\n\tDest string \/\/ this is only the folder. Filename comes from the source\n}\n\n\/\/ Step Holds a single step in the build process\n\/\/ Public structs. They are used to store the build for the builders\ntype Step struct {\n\tOrder int\n\tName string\n\tDockerfile string\n\tKeep bool\n\tArtefacts []Artefact\n\tManifest Manifest\n}\n\n\/\/ Manifest Holds the whole build process\ntype Manifest struct {\n\tWorkdir string\n\tSteps []Step\n}\n\n\/\/ Private structs. They are used to load from yaml\ntype step struct {\n\tName string\n\tDockerfile string\n\tKeep bool\n\tArtefacts []string\n}\n\ntype build struct {\n\tWorkdir string\n\tSteps []step\n}\n\n\/\/ LoadBuildFromFile loads Build from a yaml file\nfunc LoadBuildFromFile(file string) (*Manifest, error) {\n\tt := build{}\n\n\tdata, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = yaml.Unmarshal([]byte(data), &t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn t.convertToBuild()\n}\n\nfunc (b *build) convertToBuild() (*Manifest, error) {\n\tr := Manifest{}\n\tr.Workdir = b.Workdir\n\tr.Steps = []Step{}\n\n\tfor idx, s := range b.Steps {\n\t\tconvertedStep := Step{}\n\n\t\tconvertedStep.Manifest = r\n\t\tconvertedStep.Dockerfile = s.Dockerfile\n\t\tconvertedStep.Name = s.Name\n\t\tconvertedStep.Order = idx\n\t\tconvertedStep.Keep = s.Keep\n\t\tconvertedStep.Artefacts = []Artefact{}\n\n\t\tfor kdx, a := range s.Artefacts {\n\t\t\tconvertedArt := Artefact{}\n\n\t\t\tconvertedArt.Order = kdx\n\t\t\tconvertedArt.Step = convertedStep\n\t\t\tparts := strings.Split(a, \":\")\n\t\t\tconvertedArt.Source = parts[0]\n\t\t\tif len(parts) == 1 {\n\t\t\t\t\/\/ only one use the base\n\t\t\t\tconvertedArt.Dest = \".\"\n\t\t\t} else {\n\t\t\t\tconvertedArt.Dest = parts[1]\n\t\t\t}\n\n\t\t\tconvertedStep.Artefacts = append(convertedStep.Artefacts, convertedArt)\n\t\t}\n\n\t\t\/\/ TODO: validate (unique name,...)\n\n\t\tr.Steps = append(r.Steps, convertedStep)\n\t}\n\n\treturn &r, nil\n}\n\n\/\/ FindStepByName finds a step by name. Returns nil if not found\nfunc (m *Manifest) FindStepByName(name string) (*Step, error) {\n\tfor _, step := range m.Steps {\n\t\tif step.Name == name {\n\t\t\treturn &step, nil\n\t\t}\n\t}\n\n\treturn nil, nil\n}\n<commit_msg>check for step name uniqueness<commit_after>package build\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\n\t\"gopkg.in\/yaml.v2\"\n)\n\n\/\/ Artefact holds a parsed source for a build artefact\ntype Artefact struct {\n\tOrder int\n\tStep Step\n\tSource string\n\tDest string \/\/ this is only the folder. Filename comes from the source\n}\n\n\/\/ Step Holds a single step in the build process\n\/\/ Public structs. They are used to store the build for the builders\ntype Step struct {\n\tOrder int\n\tName string\n\tDockerfile string\n\tKeep bool\n\tArtefacts []Artefact\n\tManifest Manifest\n}\n\n\/\/ Manifest Holds the whole build process\ntype Manifest struct {\n\tWorkdir string\n\tSteps []Step\n}\n\n\/\/ Private structs. They are used to load from yaml\ntype step struct {\n\tName string\n\tDockerfile string\n\tKeep bool\n\tArtefacts []string\n}\n\ntype build struct {\n\tWorkdir string\n\tSteps []step\n}\n\n\/\/ LoadBuildFromFile loads Build from a yaml file\nfunc LoadBuildFromFile(file string) (*Manifest, error) {\n\tt := build{}\n\n\tdata, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = yaml.Unmarshal([]byte(data), &t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn t.convertToBuild()\n}\n\nfunc (b *build) convertToBuild() (*Manifest, error) {\n\tr := Manifest{}\n\tr.Workdir = b.Workdir\n\tr.Steps = []Step{}\n\n\tfor idx, s := range b.Steps {\n\t\tconvertedStep := Step{}\n\n\t\tconvertedStep.Manifest = r\n\t\tconvertedStep.Dockerfile = s.Dockerfile\n\t\tconvertedStep.Name = s.Name\n\t\tconvertedStep.Order = idx\n\t\tconvertedStep.Keep = s.Keep\n\t\tconvertedStep.Artefacts = []Artefact{}\n\n\t\tfor kdx, a := range s.Artefacts {\n\t\t\tconvertedArt := Artefact{}\n\n\t\t\tconvertedArt.Order = kdx\n\t\t\tconvertedArt.Step = convertedStep\n\t\t\tparts := strings.Split(a, \":\")\n\t\t\tconvertedArt.Source = parts[0]\n\t\t\tif len(parts) == 1 {\n\t\t\t\t\/\/ only one use the base\n\t\t\t\tconvertedArt.Dest = \".\"\n\t\t\t} else {\n\t\t\t\tconvertedArt.Dest = parts[1]\n\t\t\t}\n\n\t\t\tconvertedStep.Artefacts = append(convertedStep.Artefacts, convertedArt)\n\t\t}\n\n\t\t\/\/ is it unique?\n\t\tfor _, s := range r.Steps {\n\t\t\tif s.Name == convertedStep.Name {\n\t\t\t\treturn nil, fmt.Errorf(\"Step name '%s' is not unique\", convertedStep.Name)\n\t\t\t}\n\t\t}\n\n\t\tr.Steps = append(r.Steps, convertedStep)\n\t}\n\n\treturn &r, nil\n}\n\n\/\/ FindStepByName finds a step by name. Returns nil if not found\nfunc (m *Manifest) FindStepByName(name string) (*Step, error) {\n\tfor _, step := range m.Steps {\n\t\tif step.Name == name {\n\t\t\treturn &step, nil\n\t\t}\n\t}\n\n\treturn nil, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ebiten\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sync\"\n\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/buffered\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/clock\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/driver\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/graphicscommand\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/hooks\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/shareable\"\n)\n\nfunc init() {\n\tshareable.SetGraphicsDriver(graphicsDriver())\n\tgraphicscommand.SetGraphicsDriver(graphicsDriver())\n}\n\ntype defaultGame struct {\n\tupdate func(screen *Image) error\n\twidth int\n\theight int\n}\n\nfunc (d *defaultGame) Update(screen *Image) error {\n\treturn d.update(screen)\n}\n\nfunc (d *defaultGame) Layout(outsideWidth, outsideHeight int) (screenWidth, screenHeight int) {\n\t\/\/ Ignore the outside size.\n\treturn d.width, d.height\n}\n\nfunc (d *defaultGame) setScreenSize(width, height int) {\n\td.width = width\n\td.height = height\n}\n\nfunc newUIContext(game Game, width, height int, scaleForWindow float64) *uiContext {\n\treturn &uiContext{\n\t\tgame: game,\n\t\tscaleForWindow: scaleForWindow,\n\t}\n}\n\ntype uiContext struct {\n\tgame Game\n\toffscreen *Image\n\tscreen *Image\n\tscreenScale float64\n\tscaleForWindow float64\n\toffsetX float64\n\toffsetY float64\n\n\toutsideSizeUpdated bool\n\toutsideWidth float64\n\toutsideHeight float64\n\n\tm sync.Mutex\n}\n\nvar theUIContext *uiContext\n\nfunc (c *uiContext) setScaleForWindow(scale float64) {\n\tg, ok := c.game.(*defaultGame)\n\tif !ok {\n\t\tpanic(\"ebiten: setScaleForWindow must be called when Run is used\")\n\t}\n\tw, h := g.width, g.height\n\tc.m.Lock()\n\tc.scaleForWindow = scale\n\tuiDriver().SetWindowSize(int(float64(w)*scale), int(float64(h)*scale))\n\tc.m.Unlock()\n}\n\nfunc (c *uiContext) getScaleForWindow() float64 {\n\tif _, ok := c.game.(*defaultGame); !ok {\n\t\tpanic(\"ebiten: getScaleForWindow must be called when Run is used\")\n\t}\n\tc.m.Lock()\n\ts := c.scaleForWindow\n\tc.m.Unlock()\n\treturn s\n}\n\nfunc (c *uiContext) SetScreenSize(width, height int) {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\n\tg, ok := c.game.(*defaultGame)\n\tif !ok {\n\t\tpanic(\"ebiten: SetScreenSize must be called when Run is used\")\n\t}\n\tg.setScreenSize(width, height)\n\ts := c.scaleForWindow\n\tuiDriver().SetWindowSize(int(float64(width)*s), int(float64(height)*s))\n}\n\nfunc (c *uiContext) Layout(outsideWidth, outsideHeight float64) {\n\tc.outsideSizeUpdated = true\n\tc.outsideWidth = outsideWidth\n\tc.outsideHeight = outsideHeight\n}\n\nfunc (c *uiContext) updateOffscreen() {\n\tsw, sh := c.game.Layout(int(c.outsideWidth), int(c.outsideHeight))\n\n\tif c.offscreen != nil && !c.outsideSizeUpdated {\n\t\tif w, h := c.offscreen.Size(); w == sw && h == sh {\n\t\t\treturn\n\t\t}\n\t}\n\tc.outsideSizeUpdated = false\n\n\tif c.screen != nil {\n\t\t_ = c.screen.Dispose()\n\t\tc.screen = nil\n\t}\n\n\tif c.offscreen != nil {\n\t\tif w, h := c.offscreen.Size(); w != sw || h != sh {\n\t\t\t_ = c.offscreen.Dispose()\n\t\t\tc.offscreen = nil\n\t\t}\n\t}\n\tif c.offscreen == nil {\n\t\tc.offscreen = newImage(sw, sh, FilterDefault, true)\n\t}\n\tc.SetScreenSize(sw, sh)\n\n\t\/\/ TODO: This is duplicated with mobile\/ebitenmobileview\/funcs.go. Refactor this.\n\td := uiDriver().DeviceScaleFactor()\n\tc.screen = newScreenFramebufferImage(int(c.outsideWidth*d), int(c.outsideHeight*d))\n\n\tscaleX := c.outsideWidth \/ float64(sw) * d\n\tscaleY := c.outsideHeight \/ float64(sh) * d\n\tc.screenScale = math.Min(scaleX, scaleY)\n\tif uiDriver().CanHaveWindow() && !uiDriver().IsFullscreen() {\n\t\tc.setScaleForWindow(c.screenScale \/ d)\n\t}\n\n\twidth := float64(sw) * c.screenScale\n\theight := float64(sh) * c.screenScale\n\tc.offsetX = (c.outsideWidth*d - width) \/ 2\n\tc.offsetY = (c.outsideHeight*d - height) \/ 2\n}\n\nfunc (c *uiContext) Update(afterFrameUpdate func()) error {\n\tupdateCount := clock.Update(MaxTPS())\n\n\t\/\/ TODO: If updateCount is 0 and vsync is disabled, swapping buffers can be skipped.\n\n\tif err := buffered.BeginFrame(); err != nil {\n\t\treturn err\n\t}\n\n\tfor i := 0; i < updateCount; i++ {\n\t\tc.updateOffscreen()\n\n\t\t\/\/ Mipmap images should be disposed by Clear.\n\t\tc.offscreen.Clear()\n\n\t\tsetDrawingSkipped(i < updateCount-1)\n\n\t\tif err := hooks.RunBeforeUpdateHooks(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := c.game.Update(c.offscreen); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tuiDriver().Input().ResetForFrame()\n\t\tafterFrameUpdate()\n\t}\n\n\t\/\/ This clear is needed for fullscreen mode or some mobile platforms (#622).\n\tc.screen.Clear()\n\n\top := &DrawImageOptions{}\n\n\tswitch vd := graphicsDriver().VDirection(); vd {\n\tcase driver.VDownward:\n\t\t\/\/ c.screen is special: its Y axis is down to up,\n\t\t\/\/ and the origin point is lower left.\n\t\top.GeoM.Scale(c.screenScale, -c.screenScale)\n\t\t_, h := c.offscreen.Size()\n\t\top.GeoM.Translate(0, float64(h)*c.screenScale)\n\tcase driver.VUpward:\n\t\top.GeoM.Scale(c.screenScale, c.screenScale)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"ebiten: invalid v-direction: %d\", vd))\n\t}\n\n\top.GeoM.Translate(c.offsetX, c.offsetY)\n\top.CompositeMode = CompositeModeCopy\n\n\t\/\/ filterScreen works with >=1 scale, but does not well with <1 scale.\n\t\/\/ Use regular FilterLinear instead so far (#669).\n\tif c.screenScale >= 1 {\n\t\top.Filter = filterScreen\n\t} else {\n\t\top.Filter = FilterLinear\n\t}\n\t_ = c.screen.DrawImage(c.offscreen, op)\n\n\tif err := buffered.EndFrame(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *uiContext) AdjustPosition(x, y float64) (float64, float64) {\n\td := uiDriver().DeviceScaleFactor()\n\treturn (x*d - c.offsetX) \/ c.screenScale, (y*d - c.offsetY) \/ c.screenScale\n}\n<commit_msg>ui: Protect defaultGame's memers with mutex<commit_after>\/\/ Copyright 2014 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ebiten\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sync\"\n\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/buffered\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/clock\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/driver\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/graphicscommand\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/hooks\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/shareable\"\n)\n\nfunc init() {\n\tshareable.SetGraphicsDriver(graphicsDriver())\n\tgraphicscommand.SetGraphicsDriver(graphicsDriver())\n}\n\ntype defaultGame struct {\n\tupdate func(screen *Image) error\n\twidth int\n\theight int\n\tcontext *uiContext\n}\n\nfunc (d *defaultGame) Update(screen *Image) error {\n\treturn d.update(screen)\n}\n\nfunc (d *defaultGame) Layout(outsideWidth, outsideHeight int) (screenWidth, screenHeight int) {\n\t\/\/ Ignore the outside size.\n\td.context.m.Lock()\n\tw, h := d.width, d.height\n\td.context.m.Unlock()\n\treturn w, h\n}\n\nfunc newUIContext(game Game, width, height int, scaleForWindow float64) *uiContext {\n\tu := &uiContext{\n\t\tgame: game,\n\t\tscaleForWindow: scaleForWindow,\n\t}\n\tif g, ok := game.(*defaultGame); ok {\n\t\tg.context = u\n\t}\n\treturn u\n}\n\ntype uiContext struct {\n\tgame Game\n\toffscreen *Image\n\tscreen *Image\n\tscreenScale float64\n\tscaleForWindow float64\n\toffsetX float64\n\toffsetY float64\n\n\toutsideSizeUpdated bool\n\toutsideWidth float64\n\toutsideHeight float64\n\n\tm sync.Mutex\n}\n\nvar theUIContext *uiContext\n\nfunc (c *uiContext) setScaleForWindow(scale float64) {\n\tg, ok := c.game.(*defaultGame)\n\tif !ok {\n\t\tpanic(\"ebiten: setScaleForWindow must be called when Run is used\")\n\t}\n\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\n\tw, h := g.width, g.height\n\tc.scaleForWindow = scale\n\tuiDriver().SetWindowSize(int(float64(w)*scale), int(float64(h)*scale))\n}\n\nfunc (c *uiContext) getScaleForWindow() float64 {\n\tif _, ok := c.game.(*defaultGame); !ok {\n\t\tpanic(\"ebiten: getScaleForWindow must be called when Run is used\")\n\t}\n\n\tc.m.Lock()\n\ts := c.scaleForWindow\n\tc.m.Unlock()\n\treturn s\n}\n\nfunc (c *uiContext) SetScreenSize(width, height int) {\n\tg, ok := c.game.(*defaultGame)\n\tif !ok {\n\t\tpanic(\"ebiten: SetScreenSize must be called when Run is used\")\n\t}\n\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\n\tg.width = width\n\tg.height = height\n\ts := c.scaleForWindow\n\tuiDriver().SetWindowSize(int(float64(width)*s), int(float64(height)*s))\n}\n\nfunc (c *uiContext) Layout(outsideWidth, outsideHeight float64) {\n\tc.outsideSizeUpdated = true\n\tc.outsideWidth = outsideWidth\n\tc.outsideHeight = outsideHeight\n}\n\nfunc (c *uiContext) updateOffscreen() {\n\tsw, sh := c.game.Layout(int(c.outsideWidth), int(c.outsideHeight))\n\n\tif c.offscreen != nil && !c.outsideSizeUpdated {\n\t\tif w, h := c.offscreen.Size(); w == sw && h == sh {\n\t\t\treturn\n\t\t}\n\t}\n\tc.outsideSizeUpdated = false\n\n\tif c.screen != nil {\n\t\t_ = c.screen.Dispose()\n\t\tc.screen = nil\n\t}\n\n\tif c.offscreen != nil {\n\t\tif w, h := c.offscreen.Size(); w != sw || h != sh {\n\t\t\t_ = c.offscreen.Dispose()\n\t\t\tc.offscreen = nil\n\t\t}\n\t}\n\tif c.offscreen == nil {\n\t\tc.offscreen = newImage(sw, sh, FilterDefault, true)\n\t}\n\tc.SetScreenSize(sw, sh)\n\n\t\/\/ TODO: This is duplicated with mobile\/ebitenmobileview\/funcs.go. Refactor this.\n\td := uiDriver().DeviceScaleFactor()\n\tc.screen = newScreenFramebufferImage(int(c.outsideWidth*d), int(c.outsideHeight*d))\n\n\tscaleX := c.outsideWidth \/ float64(sw) * d\n\tscaleY := c.outsideHeight \/ float64(sh) * d\n\tc.screenScale = math.Min(scaleX, scaleY)\n\tif uiDriver().CanHaveWindow() && !uiDriver().IsFullscreen() {\n\t\tc.setScaleForWindow(c.screenScale \/ d)\n\t}\n\n\twidth := float64(sw) * c.screenScale\n\theight := float64(sh) * c.screenScale\n\tc.offsetX = (c.outsideWidth*d - width) \/ 2\n\tc.offsetY = (c.outsideHeight*d - height) \/ 2\n}\n\nfunc (c *uiContext) Update(afterFrameUpdate func()) error {\n\tupdateCount := clock.Update(MaxTPS())\n\n\t\/\/ TODO: If updateCount is 0 and vsync is disabled, swapping buffers can be skipped.\n\n\tif err := buffered.BeginFrame(); err != nil {\n\t\treturn err\n\t}\n\n\tfor i := 0; i < updateCount; i++ {\n\t\tc.updateOffscreen()\n\n\t\t\/\/ Mipmap images should be disposed by Clear.\n\t\tc.offscreen.Clear()\n\n\t\tsetDrawingSkipped(i < updateCount-1)\n\n\t\tif err := hooks.RunBeforeUpdateHooks(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := c.game.Update(c.offscreen); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tuiDriver().Input().ResetForFrame()\n\t\tafterFrameUpdate()\n\t}\n\n\t\/\/ This clear is needed for fullscreen mode or some mobile platforms (#622).\n\tc.screen.Clear()\n\n\top := &DrawImageOptions{}\n\n\tswitch vd := graphicsDriver().VDirection(); vd {\n\tcase driver.VDownward:\n\t\t\/\/ c.screen is special: its Y axis is down to up,\n\t\t\/\/ and the origin point is lower left.\n\t\top.GeoM.Scale(c.screenScale, -c.screenScale)\n\t\t_, h := c.offscreen.Size()\n\t\top.GeoM.Translate(0, float64(h)*c.screenScale)\n\tcase driver.VUpward:\n\t\top.GeoM.Scale(c.screenScale, c.screenScale)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"ebiten: invalid v-direction: %d\", vd))\n\t}\n\n\top.GeoM.Translate(c.offsetX, c.offsetY)\n\top.CompositeMode = CompositeModeCopy\n\n\t\/\/ filterScreen works with >=1 scale, but does not well with <1 scale.\n\t\/\/ Use regular FilterLinear instead so far (#669).\n\tif c.screenScale >= 1 {\n\t\top.Filter = filterScreen\n\t} else {\n\t\top.Filter = FilterLinear\n\t}\n\t_ = c.screen.DrawImage(c.offscreen, op)\n\n\tif err := buffered.EndFrame(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *uiContext) AdjustPosition(x, y float64) (float64, float64) {\n\td := uiDriver().DeviceScaleFactor()\n\treturn (x*d - c.offsetX) \/ c.screenScale, (y*d - c.offsetY) \/ c.screenScale\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package boomer provides commands to run load tests and display results.\npackage boomer\n\nimport (\n\t\"crypto\/tls\"\n\t\"github.com\/valyala\/fasthttp\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/sschepens\/pb\"\n)\n\nvar client *fasthttp.Client\n\ntype result struct {\n\terr error\n\tstatusCode int\n\tduration time.Duration\n\tcontentLength int\n}\n\ntype Boomer struct {\n\t\/\/ Request is the request to be made.\n\tRequest *fasthttp.Request\n\n\t\/\/ N is the total number of requests to make.\n\tN int\n\n\t\/\/ C is the concurrency level, the number of concurrent workers to run.\n\tC int\n\n\t\/\/ Timeout in seconds.\n\tTimeout time.Duration\n\n\t\/\/ Qps is the rate limit.\n\tQps int\n\n\t\/\/ AllowInsecure is an option to allow insecure TLS\/SSL certificates.\n\tAllowInsecure bool\n\n\t\/\/ Output represents the output type. If \"csv\" is provided, the\n\t\/\/ output will be dumped as a csv stream.\n\tOutput string\n\n\t\/\/ ProxyAddr is the address of HTTP proxy server in the format on \"host:port\".\n\t\/\/ Optional.\n\tProxyAddr *url.URL\n\n\t\/\/ ReadAll determines whether the body of the response needs\n\t\/\/ to be fully consumed.\n\tReadAll bool\n\n\tbar *pb.ProgressBar\n\tresults chan *result\n\tstop chan struct{}\n}\n\nfunc (b *Boomer) startProgress() {\n\tif b.Output != \"\" {\n\t\treturn\n\t}\n\tb.bar = pb.New(b.N)\n\tb.bar.Format(\"Bom !\")\n\tb.bar.BarStart = \"Pl\"\n\tb.bar.BarEnd = \"!\"\n\tb.bar.Empty = \" \"\n\tb.bar.Current = \"a\"\n\tb.bar.CurrentN = \"a\"\n\tb.bar.Start()\n}\n\nfunc (b *Boomer) finalizeProgress() {\n\tif b.Output != \"\" {\n\t\treturn\n\t}\n\tb.bar.Finish()\n}\n\nfunc (b *Boomer) incProgress() {\n\tif b.Output != \"\" {\n\t\treturn\n\t}\n\tb.bar.Increment()\n}\n\n\/\/ Run makes all the requests, prints the summary. It blocks until\n\/\/ all work is done.\nfunc (b *Boomer) Run() {\n\tb.results = make(chan *result, b.C)\n\tb.stop = make(chan struct{})\n\tb.startProgress()\n\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\n\tgo func() {\n\t\t<-c\n\t\tb.finalizeProgress()\n\t\tclose(b.stop)\n\t}()\n\n\tr := newReport(b.N, b.results, b.Output)\n\tb.runWorkers()\n\tclose(b.results)\n\tb.finalizeProgress()\n\tr.finalize()\n}\n\nfunc (b *Boomer) runWorker(wg *sync.WaitGroup, ch chan struct{}) {\n\tresp := fasthttp.AcquireResponse()\n\treq := fasthttp.AcquireRequest()\n\tb.Request.CopyTo(req)\n\tfor range ch {\n\t\ts := time.Now()\n\n\t\tvar code int\n\t\tvar size int\n\n\t\tresp.Reset()\n\t\tvar err error\n\t\tif b.Timeout > 0 {\n\t\t\terr = client.DoTimeout(req, resp, b.Timeout)\n\t\t} else {\n\t\t\terr = client.Do(req, resp)\n\t\t}\n\t\tif err == nil {\n\t\t\tsize = resp.Header.ContentLength()\n\t\t\tcode = resp.Header.StatusCode()\n\t\t}\n\n\t\tif b.ReadAll {\n\t\t\tresp.Body()\n\t\t}\n\n\t\tb.incProgress()\n\t\tb.results <- &result{\n\t\t\tstatusCode: code,\n\t\t\tduration: time.Now().Sub(s),\n\t\t\terr: err,\n\t\t\tcontentLength: size,\n\t\t}\n\t}\n\tfasthttp.ReleaseResponse(resp)\n\tfasthttp.ReleaseRequest(req)\n\twg.Done()\n}\n\nfunc (b *Boomer) runWorkers() {\n\tclient = &fasthttp.Client{\n\t\tTLSConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: b.AllowInsecure,\n\t\t},\n\t\tMaxConnsPerHost: b.C * 2,\n\t}\n\tvar wg sync.WaitGroup\n\twg.Add(b.C)\n\n\tvar throttle <-chan time.Time\n\tif b.Qps > 0 {\n\t\tthrottle = time.Tick(time.Duration(1e6\/(b.Qps)) * time.Microsecond)\n\t}\n\n\tjobsch := make(chan struct{}, b.C)\n\tfor i := 0; i < b.C; i++ {\n\t\tgo b.runWorker(&wg, jobsch)\n\t}\n\nLoop:\n\tfor i := 0; i < b.N; i++ {\n\t\tif b.Qps > 0 {\n\t\t\t<-throttle\n\t\t}\n\t\tselect {\n\t\tcase <-b.stop:\n\t\t\tbreak Loop\n\t\tcase jobsch <- struct{}{}:\n\t\t\tcontinue\n\t\t}\n\t}\n\tclose(jobsch)\n\twg.Wait()\n}\n\n\/\/ cloneRequest returns a clone of the provided *http.Request.\n\/\/ The clone is a shallow copy of the struct and its Header map.\nfunc cloneRequest(r *fasthttp.Request) *fasthttp.Request {\n\treq := fasthttp.AcquireRequest()\n\tr.CopyTo(req)\n\treturn req\n}\n<commit_msg>Wait up to 10 seconds for requests to finish on interrupt<commit_after>\/\/ Copyright 2014 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package boomer provides commands to run load tests and display results.\npackage boomer\n\nimport (\n\t\"crypto\/tls\"\n\t\"github.com\/valyala\/fasthttp\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/sschepens\/pb\"\n)\n\nvar client *fasthttp.Client\n\ntype result struct {\n\terr error\n\tstatusCode int\n\tduration time.Duration\n\tcontentLength int\n}\n\ntype Boomer struct {\n\t\/\/ Request is the request to be made.\n\tRequest *fasthttp.Request\n\n\t\/\/ N is the total number of requests to make.\n\tN int\n\n\t\/\/ C is the concurrency level, the number of concurrent workers to run.\n\tC int\n\n\t\/\/ Timeout in seconds.\n\tTimeout time.Duration\n\n\t\/\/ Qps is the rate limit.\n\tQps int\n\n\t\/\/ AllowInsecure is an option to allow insecure TLS\/SSL certificates.\n\tAllowInsecure bool\n\n\t\/\/ Output represents the output type. If \"csv\" is provided, the\n\t\/\/ output will be dumped as a csv stream.\n\tOutput string\n\n\t\/\/ ProxyAddr is the address of HTTP proxy server in the format on \"host:port\".\n\t\/\/ Optional.\n\tProxyAddr *url.URL\n\n\t\/\/ ReadAll determines whether the body of the response needs\n\t\/\/ to be fully consumed.\n\tReadAll bool\n\n\tbar *pb.ProgressBar\n\tresults chan *result\n\tstop chan struct{}\n}\n\nfunc (b *Boomer) startProgress() {\n\tif b.Output != \"\" {\n\t\treturn\n\t}\n\tb.bar = pb.New(b.N)\n\tb.bar.Format(\"Bom !\")\n\tb.bar.BarStart = \"Pl\"\n\tb.bar.BarEnd = \"!\"\n\tb.bar.Empty = \" \"\n\tb.bar.Current = \"a\"\n\tb.bar.CurrentN = \"a\"\n\tb.bar.Start()\n}\n\nfunc (b *Boomer) finalizeProgress() {\n\tif b.Output != \"\" {\n\t\treturn\n\t}\n\tb.bar.Finish()\n}\n\nfunc (b *Boomer) incProgress() {\n\tif b.Output != \"\" {\n\t\treturn\n\t}\n\tb.bar.Increment()\n}\n\n\/\/ Run makes all the requests, prints the summary. It blocks until\n\/\/ all work is done.\nfunc (b *Boomer) Run() {\n\tvar shutdownTimer *time.Timer\n\tb.results = make(chan *result, b.C)\n\tb.stop = make(chan struct{})\n\tb.startProgress()\n\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\n\tgo func() {\n\t\t<-c\n\t\tshutdownTimer = time.AfterFunc(10*time.Second, func() {\n\t\t\tos.Exit(1)\n\t\t})\n\t\tb.finalizeProgress()\n\t\tclose(b.stop)\n\t}()\n\n\tr := newReport(b.N, b.results, b.Output)\n\tb.runWorkers()\n\tif shutdownTimer != nil {\n\t\tshutdownTimer.Stop()\n\t}\n\tclose(b.results)\n\tb.finalizeProgress()\n\tr.finalize()\n}\n\nfunc (b *Boomer) runWorker(wg *sync.WaitGroup, ch chan struct{}) {\n\tresp := fasthttp.AcquireResponse()\n\treq := fasthttp.AcquireRequest()\n\tb.Request.CopyTo(req)\n\tfor range ch {\n\t\ts := time.Now()\n\n\t\tvar code int\n\t\tvar size int\n\n\t\tresp.Reset()\n\t\tvar err error\n\t\tif b.Timeout > 0 {\n\t\t\terr = client.DoTimeout(req, resp, b.Timeout)\n\t\t} else {\n\t\t\terr = client.Do(req, resp)\n\t\t}\n\t\tif err == nil {\n\t\t\tsize = resp.Header.ContentLength()\n\t\t\tcode = resp.Header.StatusCode()\n\t\t}\n\n\t\tif b.ReadAll {\n\t\t\tresp.Body()\n\t\t}\n\n\t\tb.incProgress()\n\t\tb.results <- &result{\n\t\t\tstatusCode: code,\n\t\t\tduration: time.Now().Sub(s),\n\t\t\terr: err,\n\t\t\tcontentLength: size,\n\t\t}\n\t}\n\tfasthttp.ReleaseResponse(resp)\n\tfasthttp.ReleaseRequest(req)\n\twg.Done()\n}\n\nfunc (b *Boomer) runWorkers() {\n\tclient = &fasthttp.Client{\n\t\tTLSConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: b.AllowInsecure,\n\t\t},\n\t\tMaxConnsPerHost: b.C * 2,\n\t}\n\tvar wg sync.WaitGroup\n\twg.Add(b.C)\n\n\tvar throttle <-chan time.Time\n\tif b.Qps > 0 {\n\t\tthrottle = time.Tick(time.Duration(1e6\/(b.Qps)) * time.Microsecond)\n\t}\n\n\tjobsch := make(chan struct{}, b.C)\n\tfor i := 0; i < b.C; i++ {\n\t\tgo b.runWorker(&wg, jobsch)\n\t}\n\nLoop:\n\tfor i := 0; i < b.N; i++ {\n\t\tif b.Qps > 0 {\n\t\t\t<-throttle\n\t\t}\n\t\tselect {\n\t\tcase <-b.stop:\n\t\t\tbreak Loop\n\t\tcase jobsch <- struct{}{}:\n\t\t\tcontinue\n\t\t}\n\t}\n\tclose(jobsch)\n\twg.Wait()\n}\n\n\/\/ cloneRequest returns a clone of the provided *http.Request.\n\/\/ The clone is a shallow copy of the struct and its Header map.\nfunc cloneRequest(r *fasthttp.Request) *fasthttp.Request {\n\treq := fasthttp.AcquireRequest()\n\tr.CopyTo(req)\n\treturn req\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n)\n\nfunc file(name string, create bool) (*os.File, error) {\n\tswitch name {\n\tcase \"stdin\":\n\t\treturn os.Stdin, nil\n\tcase \"stdout\":\n\t\treturn os.Stdout, nil\n\tdefault:\n\t\tif create {\n\t\t\treturn os.Create(name)\n\t\t}\n\t\treturn os.Open(name)\n\t}\n}\n<commit_msg>vegeta: Helper decoder function<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\n\tvegeta \"github.com\/tsenart\/vegeta\/lib\"\n)\n\nfunc file(name string, create bool) (*os.File, error) {\n\tswitch name {\n\tcase \"stdin\":\n\t\treturn os.Stdin, nil\n\tcase \"stdout\":\n\t\treturn os.Stdout, nil\n\tdefault:\n\t\tif create {\n\t\t\treturn os.Create(name)\n\t\t}\n\t\treturn os.Open(name)\n\t}\n}\n\nfunc decoder(files []string) (vegeta.Decoder, io.Closer, error) {\n\tcloser := make(multiCloser, 0, len(files))\n\tdecs := make([]vegeta.Decoder, 0, len(files))\n\tfor _, f := range files {\n\t\trc, err := file(f, false)\n\t\tif err != nil {\n\t\t\treturn nil, closer, err\n\t\t}\n\n\t\tdec := vegeta.DecoderFor(rc)\n\t\tif dec == nil {\n\t\t\treturn nil, closer, fmt.Errorf(\"encode: can't detect encoding of %q\", f)\n\t\t}\n\n\t\tdecs = append(decs, dec)\n\t\tcloser = append(closer, rc)\n\t}\n\treturn vegeta.NewRoundRobinDecoder(decs...), closer, nil\n}\n\ntype multiCloser []io.Closer\n\nfunc (mc multiCloser) Close() error {\n\tvar errs []string\n\tfor _, c := range mc {\n\t\tif err := c.Close(); err != nil {\n\t\t\terrs = append(errs, err.Error())\n\t\t}\n\t}\n\n\tif len(errs) > 0 {\n\t\treturn errors.New(strings.Join(errs, \"; \"))\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\nvar (\n\tDataChan chan map[string]string\n\tFillerSig chan bool\n)\n\ntype Dirstate struct {\n\tLocation string\n\tContents map[string][]string\n}\n\ntype Crossover struct {\n\tSource string\n\tDest string\n\tContents map[string][]string\n}\n\nfunc Filler() {\n\tfor {\n\t\tselect {\n\t\tcase msg := <-DataChan:\n\t\t\tDebug <- fmt.Sprintf(\"DATA %+v\", msg[\"Source\"])\n\t\t\tDebug <- fmt.Sprintf(\"DATA %+v\", msg[\"Destination\"])\n\t\tcase <-FillerSig:\n\t\t\tFillerSig <- true\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc BackFill(c *cli.Context) {\n\tif len(c.Args()) < 2 {\n\t\tError <- \"Invalid arguments. See 'flustro help fill' for more information.\"\n\t\tos.Exit(1)\n\t} else {\n\t\t\/\/ declare our variables\n\t\tvar srcObj interface{}\n\t\tvar dstObj interface{}\n\n\t\t\/\/ let's get our source object\n\t\tsrcDir := c.Args().Get(0)\n\t\tif isDir(srcDir) {\n\t\t\tsrcObj = ListDir(srcDir)\n\t\t} else if isFile(srcDir) {\n\t\t\tsrcObj = srcDir\n\t\t} else {\n\t\t\tError <- fmt.Sprintf(\"%s is not a file or directory.\", srcDir)\n\t\t}\n\n\t\t\/\/ and our dest object\n\t\tdstDir := c.Args().Get(1)\n\t\tif isDir(dstDir) {\n\t\t\tdstObj = ListDir(fmt.Sprintf(\"%s\", dstDir))\n\t\t} else if isFile(dstDir) {\n\t\t\tdstObj = fmt.Sprintf(\"%s\", dstDir)\n\t\t} else {\n\t\t\tError <- fmt.Sprintf(\"%s is not a file or directory.\", dstDir)\n\t\t}\n\n\t\t\/\/ Now let's spawn our pool of workers if our args are both directories\n\t\tfor i := 0; i < c.Int(\"j\"); i++ {\n\t\t\tgo Filler()\n\t\t}\n\t\t\/\/ next we'll start processing through our srcObj and dstObj lists and\n\t\t\/\/ backfill everything that's present in both locations\n\t\t\/\/ for k, _ := range srcObj.Contents {\n\n\t\t\/\/ or simply spawn a single Filler and let it do it's job if both args are files\n\t\tgo Filler()\n\t\tdata := map[string]string{\"Source\": fmt.Sprintf(\"%+v\", srcObj), \"Destination\": fmt.Sprintf(\"%+v\", dstObj)}\n\t\tDataChan <- data\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\treturn\n}\n\n\/*\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/fuzzy\/gcl\"\n\t\"github.com\/kisielk\/whisper-go\/whisper\"\n\t\"github.com\/urfave\/cli\"\n)\n\ntype FillState struct {\n\tStartTime int64\n\tCount int64\n\tCountLock sync.Mutex\n\tCountChan chan bool\n\tSrcTotal int64\n\tDstTotal int64\n\tOverlap int64\n}\n\nfunc (f FillState) Increment() {\n\tf.CountLock.Lock()\n\tf.Count++\n\tf.CountChan <- true\n\tf.CountLock.Unlock()\n}\n\nfunc (f FillState) DumpState() {\n\t\/\/ grab our lock, this ensures accurate reporting, and gives a bit more\n\t\/\/ of a concurrency throttle. This is not necessarily a bad thing.\n\tf.CountLock.Lock()\n\tr := (time.Now().Unix() - f.StartTime)\n\tc := f.Count\n\tif r > 0 {\n\t\tc = (f.Count \/ r)\n\t}\n\tp := (float64(f.Count) \/ float64(f.Overlap)) * 100.00\n\tfmt.Printf(\"%s %-6s\/%-6s (%6.02f%%) in %d seconds @ %-6s\/sec\",\n\t\tgcl.String(\"Info\").Green().Bold(),\n\t\thumanize.Comma(f.Count),\n\t\thumanize.Comma(f.Overlap),\n\t\tp, HumanTime(int(r)), c)\n\tf.CountLock.Unlock()\n}\n\nvar (\n\tState FillState\n)\n\nfunc stateWorker(t chan bool) {\n\tfor {\n\t\tselect {\n\t\tcase sig := <-State.CountChan:\n\t\t\tif sig {\n\t\t\t\tState.DumpState()\n\t\t\t} else {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-t:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc fillWorker(d chan map[string]string, t chan bool) {\n\tfor {\n\t\tmsg := <-d\n\t\tif msg[\"SRC\"] == \"__EXIT__\" && msg[\"DST\"] == \"__EXIT__\" {\n\t\t\treturn\n\t\t} else {\n\t\t\tif isFile(msg[\"SRC\"]) && isFile(msg[\"DST\"]) {\n\t\t\t\tif !backfillFile(msg[\"SRC\"], msg[\"DST\"]) {\n\t\t\t\t\tError.Printf(\"S:%s D:%s - Backfill operation failed.\",\n\t\t\t\t\t\tmsg[\"SRC\"],\n\t\t\t\t\t\tmsg[\"DST\"])\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t} else {\n\t\t\t\t\tState.Increment()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc backfillFile(s string, d string) bool {\n\t\/\/ Open our filehandles\n\tsDb, sErr := whisper.Open(s)\n\tdDb, dErr := whisper.Open(d)\n\tif !chkErr(sErr) || !chkErr(dErr) {\n\t\tos.Exit(1)\n\t}\n\t\/\/ Defer their closings\n\tdefer sDb.Close()\n\tdefer dDb.Close()\n\n\t\/\/ Now for a series of checks, first to ensure that both\n\t\/\/ files have the same number of archives in them.\n\tif sDb.Header.Metadata.ArchiveCount != dDb.Header.Metadata.ArchiveCount {\n\t\tError.Println(\"The files have a mismatched set of archives.\")\n\t\treturn false\n\t}\n\n\t\/\/ Now we'll start processing the archives, checking as we go to see if they\n\t\/\/ are matched. That way we at least fill in what we can, possibly....\n\tfor i, a := range sDb.Header.Archives {\n\t\t\/\/ The offset\n\t\tif a.Offset == dDb.Header.Archives[i].Offset {\n\t\t\t\/\/ and the number of points\n\t\t\tif a.Points == dDb.Header.Archives[i].Points {\n\t\t\t\t\/\/ and finally the interval\n\t\t\t\tif a.SecondsPerPoint == dDb.Header.Archives[i].SecondsPerPoint {\n\t\t\t\t\t\/\/ ok, now let's get rolling through the archives\n\t\t\t\t\tsp, se := sDb.DumpArchive(i)\n\t\t\t\t\tif se != nil {\n\t\t\t\t\t\tError.Println(se.Error())\n\t\t\t\t\t\tos.Exit(1)\n\t\t\t\t\t}\n\t\t\t\t\tdp, de := dDb.DumpArchive(i)\n\t\t\t\t\tif de != nil {\n\t\t\t\t\t\tError.Println(de.Error())\n\t\t\t\t\t\tos.Exit(1)\n\t\t\t\t\t}\n\t\t\t\t\tfor idx := 0; idx < len(sp); idx++ {\n\t\t\t\t\t\tif sp[idx].Timestamp != 0 && sp[idx].Value != 0 {\n\t\t\t\t\t\t\tif dp[idx].Timestamp == 0 || dp[idx].Value == 0 {\n\t\t\t\t\t\t\t\tdp[idx].Timestamp = sp[idx].Timestamp\n\t\t\t\t\t\t\t\tdp[idx].Value = sp[idx].Value\n\t\t\t\t\t\t\t\tdDb.Update(dp[idx])\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\nfunc Filler(c *cli.Context) error {\n\tif len(c.Args()) == 2 {\n\t\targs := c.Args()\n\t\tif isDir(args[0]) && isDir(args[1]) {\n\t\t\tState.Count = 0\n\n\t\t\t\/\/ First things first, let's spawn our pool of workers.\n\t\t\tdataCh := make(chan map[string]string, 1)\n\t\t\ttimeout := make(chan bool, 30)\n\t\t\tgo stateWorker(timeout)\n\t\t\tfor i := 0; i < c.Int(\"j\"); i++ {\n\t\t\t\tgo fillWorker(dataCh, timeout)\n\t\t\t}\n\n\t\t\t\/\/ Let's get this dir walking in there then shall we?\n\t\t\tsrcFiles := listFiles(args[0])\n\t\t\tdstFiles := listFiles(args[1])\n\n\t\t\t\/\/ Now let's find all our overlap\n\t\t\toverlap := []string{}\n\t\t\tfor a := 0; a < len(srcFiles); a++ {\n\t\t\t\tfor b := 0; b < len(dstFiles); b++ {\n\t\t\t\t\tif srcFiles[a] == dstFiles[b] {\n\t\t\t\t\t\toverlap = append(overlap, srcFiles[a])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ And display some stats\n\t\t\tInfo.Printf(\"srcDir: %d files, dstDir: %d files, %d overlap.\",\n\t\t\t\tlen(srcFiles),\n\t\t\t\tlen(dstFiles),\n\t\t\t\tlen(overlap))\n\t\t\tState.Overlap = int64(len(overlap))\n\t\t\tState.SrcTotal = int64(len(srcFiles))\n\t\t\tState.DstTotal = int64(len(dstFiles))\n\n\t\t\t\/\/ Now we can push in all our data, and let our workers do their\n\t\t\t\/\/ lovely little thing. Ahhhhh concurrency.\n\t\t\tStartTime = time.Now().Unix()\n\t\t\tfor i := 0; i < len(overlap); i++ {\n\t\t\t\tdataCh <- map[string]string{\n\t\t\t\t\t\"SRC\": fmt.Sprintf(\"%s\/%s\", args[0], overlap[i]),\n\t\t\t\t\t\"DST\": fmt.Sprintf(\"%s\/%s\", args[1], overlap[i]),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ And while they're off doing that, here we will just sit and watch\n\t\t\t\/\/ the return channel and count things. Once we have all our\n\t\t\t\/\/ backfill operations accounted for, we can reap all of our workers\n\t\t\t\/\/ and carry on.\n\t\t\tfor State.Count < int64(len(overlap)) {\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\t\t\t\/\/ And finally let's reap all our children\n\t\t\tfor idx := 0; idx < c.Int(\"j\"); idx++ {\n\t\t\t\tdataCh <- map[string]string{\n\t\t\t\t\t\"SRC\": \"__EXIT__\",\n\t\t\t\t\t\"DST\": \"__EXIT__\",\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfmt.Println(\"\")\n\t\t\tInfo.Println((State.Overlap \/ (time.Now().Unix() - StartTime)),\n\t\t\t\t\"whisper files processed per second.\")\n\t\t} else {\n\t\t\tif !backfillFile(args[0], args[1]) {\n\t\t\t\te := fmt.Sprintf(\"Error while backfilling.\")\n\t\t\t\tError.Println(e)\n\t\t\t\treturn errors.New(e)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tvar e string\n\t\te = fmt.Sprintf(\"Wrong number of paramters given.\")\n\t\tError.Println(e)\n\t\tError.Printf(\"Try '%s help fill' for more information\",\n\t\t\tpath.Base(os.Args[0]))\n\t\treturn errors.New(e)\n\t}\n\treturn nil\n}\n*\/\nfunc init() {\n\tDataChan = make(chan map[string]string)\n\tCommands = append(Commands, cli.Command{\n\t\tName: \"fill\",\n\t\tAliases: []string{\"f\"},\n\t\tUsage: \"Backfill datapoints in the dst from the src\",\n\t\tDescription: \"Backfill datapoints in the dst from the src\",\n\t\tArgsUsage: \"<src(File|Dir)> <dst(File|Dir)>\",\n\t\tFlags: []cli.Flag{\n\t\t\tcli.IntFlag{\n\t\t\t\tName: \"j\",\n\t\t\t\tUsage: \"Number of workers (for directory recursion)\",\n\t\t\t\tValue: runtime.GOMAXPROCS(0),\n\t\t\t},\n\t\t},\n\t\tSkipFlagParsing: false,\n\t\tHideHelp: false,\n\t\tHidden: false,\n\t\tAction: BackFill,\n\t})\n}\n<commit_msg>proper argument\/file\/dir detection and error handling<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\nvar (\n\tDataChan chan map[string]string\n\tFillerSig chan bool\n)\n\ntype Dirstate struct {\n\tLocation string\n\tContents map[string][]string\n}\n\nfunc Filler() {\n\tfor {\n\t\tselect {\n\t\tcase msg := <-DataChan:\n\t\t\tDebug <- fmt.Sprintf(\"Backfill: %s -> %s\", msg[\"Source\"], msg[\"Destination\"])\n\t\tcase <-FillerSig:\n\t\t\tFillerSig <- true\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc BackFill(c *cli.Context) {\n\tif len(c.Args()) < 2 {\n\t\tError <- \"Invalid arguments. See 'flustro help fill' for more information.\"\n\t\tos.Exit(1)\n\t} else {\n\t\t\/\/ declare our variables\n\t\tvar srcObj Dirstate\n\t\tvar dstObj Dirstate\n\t\tsrcDir := c.Args().Get(0)\n\t\tdstDir := c.Args().Get(1)\n\n\t\t\/\/ Now let's do the heavy lifting\n\t\tif isDir(srcDir) && isDir(dstDir) {\n\t\t\t\/\/ First let's get our dir contents\n\t\t\tsrcObj = ListDir(srcDir)\n\t\t\tdstObj = ListDir(dstDir)\n\t\t\t\/\/ then spawn our worker pool, and get to processing\n\t\t\tfor i := 0; i < c.Int(\"j\"); i++ {\n\t\t\t\tgo Filler()\n\t\t\t}\n\t\t\t\/\/ next we'll start processing through our srcObj and dstObj lists and\n\t\t\t\/\/ backfill everything that's present in both locations\n\t\t\tfor k, _ := range srcObj.Contents {\n\t\t\t\tif _, ok := dstObj.Contents[k]; ok {\n\t\t\t\t\tfor _, v := range srcObj.Contents[k] {\n\t\t\t\t\t\tfor _, dv := range dstObj.Contents[k] {\n\t\t\t\t\t\t\tif v == dv {\n\t\t\t\t\t\t\t\tDataChan <- map[string]string{\n\t\t\t\t\t\t\t\t\t\"Source\": fmt.Sprintf(\"%s\/%s\/%s\", srcObj.Location, k, v),\n\t\t\t\t\t\t\t\t\t\"Destination\": fmt.Sprintf(\"%s\/%s\/%s\", dstObj.Location, k, v),\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else if isFile(srcDir) && isFile(dstDir) {\n\t\t\t\/\/ we only need one worker for this job\n\t\t\tgo Filler()\n\t\t\tDataChan <- map[string]string{\n\t\t\t\t\"Source\": srcDir,\n\t\t\t\t\"Destination\": dstDir,\n\t\t\t}\n\t\t} else {\n\t\t\tError <- fmt.Sprintf(\"SRC and DST must be either both files or both dirs.\")\n\t\t}\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n\treturn\n}\n\n\/*\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/fuzzy\/gcl\"\n\t\"github.com\/kisielk\/whisper-go\/whisper\"\n\t\"github.com\/urfave\/cli\"\n)\n\ntype FillState struct {\n\tStartTime int64\n\tCount int64\n\tCountLock sync.Mutex\n\tCountChan chan bool\n\tSrcTotal int64\n\tDstTotal int64\n\tOverlap int64\n}\n\nfunc (f FillState) Increment() {\n\tf.CountLock.Lock()\n\tf.Count++\n\tf.CountChan <- true\n\tf.CountLock.Unlock()\n}\n\nfunc (f FillState) DumpState() {\n\t\/\/ grab our lock, this ensures accurate reporting, and gives a bit more\n\t\/\/ of a concurrency throttle. This is not necessarily a bad thing.\n\tf.CountLock.Lock()\n\tr := (time.Now().Unix() - f.StartTime)\n\tc := f.Count\n\tif r > 0 {\n\t\tc = (f.Count \/ r)\n\t}\n\tp := (float64(f.Count) \/ float64(f.Overlap)) * 100.00\n\tfmt.Printf(\"%s %-6s\/%-6s (%6.02f%%) in %d seconds @ %-6s\/sec\",\n\t\tgcl.String(\"Info\").Green().Bold(),\n\t\thumanize.Comma(f.Count),\n\t\thumanize.Comma(f.Overlap),\n\t\tp, HumanTime(int(r)), c)\n\tf.CountLock.Unlock()\n}\n\nvar (\n\tState FillState\n)\n\nfunc stateWorker(t chan bool) {\n\tfor {\n\t\tselect {\n\t\tcase sig := <-State.CountChan:\n\t\t\tif sig {\n\t\t\t\tState.DumpState()\n\t\t\t} else {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-t:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc fillWorker(d chan map[string]string, t chan bool) {\n\tfor {\n\t\tmsg := <-d\n\t\tif msg[\"SRC\"] == \"__EXIT__\" && msg[\"DST\"] == \"__EXIT__\" {\n\t\t\treturn\n\t\t} else {\n\t\t\tif isFile(msg[\"SRC\"]) && isFile(msg[\"DST\"]) {\n\t\t\t\tif !backfillFile(msg[\"SRC\"], msg[\"DST\"]) {\n\t\t\t\t\tError.Printf(\"S:%s D:%s - Backfill operation failed.\",\n\t\t\t\t\t\tmsg[\"SRC\"],\n\t\t\t\t\t\tmsg[\"DST\"])\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t} else {\n\t\t\t\t\tState.Increment()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc backfillFile(s string, d string) bool {\n\t\/\/ Open our filehandles\n\tsDb, sErr := whisper.Open(s)\n\tdDb, dErr := whisper.Open(d)\n\tif !chkErr(sErr) || !chkErr(dErr) {\n\t\tos.Exit(1)\n\t}\n\t\/\/ Defer their closings\n\tdefer sDb.Close()\n\tdefer dDb.Close()\n\n\t\/\/ Now for a series of checks, first to ensure that both\n\t\/\/ files have the same number of archives in them.\n\tif sDb.Header.Metadata.ArchiveCount != dDb.Header.Metadata.ArchiveCount {\n\t\tError.Println(\"The files have a mismatched set of archives.\")\n\t\treturn false\n\t}\n\n\t\/\/ Now we'll start processing the archives, checking as we go to see if they\n\t\/\/ are matched. That way we at least fill in what we can, possibly....\n\tfor i, a := range sDb.Header.Archives {\n\t\t\/\/ The offset\n\t\tif a.Offset == dDb.Header.Archives[i].Offset {\n\t\t\t\/\/ and the number of points\n\t\t\tif a.Points == dDb.Header.Archives[i].Points {\n\t\t\t\t\/\/ and finally the interval\n\t\t\t\tif a.SecondsPerPoint == dDb.Header.Archives[i].SecondsPerPoint {\n\t\t\t\t\t\/\/ ok, now let's get rolling through the archives\n\t\t\t\t\tsp, se := sDb.DumpArchive(i)\n\t\t\t\t\tif se != nil {\n\t\t\t\t\t\tError.Println(se.Error())\n\t\t\t\t\t\tos.Exit(1)\n\t\t\t\t\t}\n\t\t\t\t\tdp, de := dDb.DumpArchive(i)\n\t\t\t\t\tif de != nil {\n\t\t\t\t\t\tError.Println(de.Error())\n\t\t\t\t\t\tos.Exit(1)\n\t\t\t\t\t}\n\t\t\t\t\tfor idx := 0; idx < len(sp); idx++ {\n\t\t\t\t\t\tif sp[idx].Timestamp != 0 && sp[idx].Value != 0 {\n\t\t\t\t\t\t\tif dp[idx].Timestamp == 0 || dp[idx].Value == 0 {\n\t\t\t\t\t\t\t\tdp[idx].Timestamp = sp[idx].Timestamp\n\t\t\t\t\t\t\t\tdp[idx].Value = sp[idx].Value\n\t\t\t\t\t\t\t\tdDb.Update(dp[idx])\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\nfunc Filler(c *cli.Context) error {\n\tif len(c.Args()) == 2 {\n\t\targs := c.Args()\n\t\tif isDir(args[0]) && isDir(args[1]) {\n\t\t\tState.Count = 0\n\n\t\t\t\/\/ First things first, let's spawn our pool of workers.\n\t\t\tdataCh := make(chan map[string]string, 1)\n\t\t\ttimeout := make(chan bool, 30)\n\t\t\tgo stateWorker(timeout)\n\t\t\tfor i := 0; i < c.Int(\"j\"); i++ {\n\t\t\t\tgo fillWorker(dataCh, timeout)\n\t\t\t}\n\n\t\t\t\/\/ Let's get this dir walking in there then shall we?\n\t\t\tsrcFiles := listFiles(args[0])\n\t\t\tdstFiles := listFiles(args[1])\n\n\t\t\t\/\/ Now let's find all our overlap\n\t\t\toverlap := []string{}\n\t\t\tfor a := 0; a < len(srcFiles); a++ {\n\t\t\t\tfor b := 0; b < len(dstFiles); b++ {\n\t\t\t\t\tif srcFiles[a] == dstFiles[b] {\n\t\t\t\t\t\toverlap = append(overlap, srcFiles[a])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ And display some stats\n\t\t\tInfo.Printf(\"srcDir: %d files, dstDir: %d files, %d overlap.\",\n\t\t\t\tlen(srcFiles),\n\t\t\t\tlen(dstFiles),\n\t\t\t\tlen(overlap))\n\t\t\tState.Overlap = int64(len(overlap))\n\t\t\tState.SrcTotal = int64(len(srcFiles))\n\t\t\tState.DstTotal = int64(len(dstFiles))\n\n\t\t\t\/\/ Now we can push in all our data, and let our workers do their\n\t\t\t\/\/ lovely little thing. Ahhhhh concurrency.\n\t\t\tStartTime = time.Now().Unix()\n\t\t\tfor i := 0; i < len(overlap); i++ {\n\t\t\t\tdataCh <- map[string]string{\n\t\t\t\t\t\"SRC\": fmt.Sprintf(\"%s\/%s\", args[0], overlap[i]),\n\t\t\t\t\t\"DST\": fmt.Sprintf(\"%s\/%s\", args[1], overlap[i]),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ And while they're off doing that, here we will just sit and watch\n\t\t\t\/\/ the return channel and count things. Once we have all our\n\t\t\t\/\/ backfill operations accounted for, we can reap all of our workers\n\t\t\t\/\/ and carry on.\n\t\t\tfor State.Count < int64(len(overlap)) {\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\t\t\t\/\/ And finally let's reap all our children\n\t\t\tfor idx := 0; idx < c.Int(\"j\"); idx++ {\n\t\t\t\tdataCh <- map[string]string{\n\t\t\t\t\t\"SRC\": \"__EXIT__\",\n\t\t\t\t\t\"DST\": \"__EXIT__\",\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfmt.Println(\"\")\n\t\t\tInfo.Println((State.Overlap \/ (time.Now().Unix() - StartTime)),\n\t\t\t\t\"whisper files processed per second.\")\n\t\t} else {\n\t\t\tif !backfillFile(args[0], args[1]) {\n\t\t\t\te := fmt.Sprintf(\"Error while backfilling.\")\n\t\t\t\tError.Println(e)\n\t\t\t\treturn errors.New(e)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tvar e string\n\t\te = fmt.Sprintf(\"Wrong number of paramters given.\")\n\t\tError.Println(e)\n\t\tError.Printf(\"Try '%s help fill' for more information\",\n\t\t\tpath.Base(os.Args[0]))\n\t\treturn errors.New(e)\n\t}\n\treturn nil\n}\n*\/\nfunc init() {\n\tDataChan = make(chan map[string]string)\n\tCommands = append(Commands, cli.Command{\n\t\tName: \"fill\",\n\t\tAliases: []string{\"f\"},\n\t\tUsage: \"Backfill datapoints in the dst from the src\",\n\t\tDescription: \"Backfill datapoints in the dst from the src\",\n\t\tArgsUsage: \"<src(File|Dir)> <dst(File|Dir)>\",\n\t\tFlags: []cli.Flag{\n\t\t\tcli.IntFlag{\n\t\t\t\tName: \"j\",\n\t\t\t\tUsage: \"Number of workers (for directory recursion)\",\n\t\t\t\tValue: runtime.GOMAXPROCS(0),\n\t\t\t},\n\t\t},\n\t\tSkipFlagParsing: false,\n\t\tHideHelp: false,\n\t\tHidden: false,\n\t\tAction: BackFill,\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package cli\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\n\/\/ ToFishCompletion creates a fish completion string for the `*App`\n\/\/ The function errors if either parsing or writing of the string fails.\nfunc (a *App) ToFishCompletion() (string, error) {\n\tvar w bytes.Buffer\n\tif err := a.writeFishCompletionTemplate(&w); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn w.String(), nil\n}\n\ntype fishCompletionTemplate struct {\n\tApp *App\n\tCompletions []string\n\tAllCommands []string\n}\n\nfunc (a *App) writeFishCompletionTemplate(w io.Writer) error {\n\tconst name = \"cli\"\n\tt, err := template.New(name).Parse(FishCompletionTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\tallCommands := []string{}\n\n\t\/\/ Add global flags\n\tcompletions := a.prepareFishFlags(a.VisibleFlags(), allCommands)\n\n\t\/\/ Add help flag\n\tif !a.HideHelp {\n\t\tcompletions = append(\n\t\t\tcompletions,\n\t\t\ta.prepareFishFlags([]Flag{HelpFlag}, allCommands)...,\n\t\t)\n\t}\n\n\t\/\/ Add version flag\n\tif !a.HideVersion {\n\t\tcompletions = append(\n\t\t\tcompletions,\n\t\t\ta.prepareFishFlags([]Flag{VersionFlag}, allCommands)...,\n\t\t)\n\t}\n\n\t\/\/ Add commands and their flags\n\tcompletions = append(\n\t\tcompletions,\n\t\ta.prepareFishCommands(a.VisibleCommands(), &allCommands, []string{})...,\n\t)\n\n\treturn t.ExecuteTemplate(w, name, &fishCompletionTemplate{\n\t\tApp: a,\n\t\tCompletions: completions,\n\t\tAllCommands: allCommands,\n\t})\n}\n\nfunc (a *App) prepareFishCommands(\n\tcommands []Command,\n\tallCommands *[]string,\n\tpreviousCommands []string,\n) []string {\n\tcompletions := []string{}\n\tfor i := range commands {\n\t\tcommand := &commands[i]\n\n\t\tvar completion strings.Builder\n\t\tcompletion.WriteString(fmt.Sprintf(\n\t\t\t\"complete -r -c %s -n '%s' -a '%s'\",\n\t\t\ta.Name,\n\t\t\ta.fishSubcommandHelper(previousCommands),\n\t\t\tstrings.Join(command.Names(), \" \"),\n\t\t))\n\n\t\tif command.Usage != \"\" {\n\t\t\tcompletion.WriteString(fmt.Sprintf(\" -d '%s'\", command.Usage))\n\t\t}\n\n\t\tif !command.HideHelp {\n\t\t\tcompletions = append(\n\t\t\t\tcompletions,\n\t\t\t\ta.prepareFishFlags([]Flag{HelpFlag}, command.Names())...,\n\t\t\t)\n\t\t}\n\n\t\t*allCommands = append(*allCommands, command.Names()...)\n\t\tcompletions = append(completions, completion.String())\n\t\tcompletions = append(\n\t\t\tcompletions,\n\t\t\ta.prepareFishFlags(command.Flags, command.Names())...,\n\t\t)\n\n\t\t\/\/ recursevly iterate subcommands\n\t\tif len(command.Subcommands) > 0 {\n\t\t\tcompletions = append(\n\t\t\t\tcompletions,\n\t\t\t\ta.prepareFishCommands(\n\t\t\t\t\tcommand.Subcommands, allCommands, command.Names(),\n\t\t\t\t)...,\n\t\t\t)\n\t\t}\n\t}\n\n\treturn completions\n}\n\nfunc (a *App) prepareFishFlags(\n\tflags []Flag,\n\tpreviousCommands []string,\n) []string {\n\tcompletions := []string{}\n\tfor _, f := range flags {\n\t\tflag, ok := f.(DocGenerationFlag)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tcompletion := &strings.Builder{}\n\t\tcompletion.WriteString(fmt.Sprintf(\n\t\t\t\"complete -c %s -n '%s'\",\n\t\t\ta.Name,\n\t\t\ta.fishSubcommandHelper(previousCommands),\n\t\t))\n\n\t\tfishAddFileFlag(f, completion)\n\n\t\tfor idx, opt := range strings.Split(flag.GetName(), \",\") {\n\t\t\tif idx == 0 {\n\t\t\t\tcompletion.WriteString(fmt.Sprintf(\n\t\t\t\t\t\" -l %s\", strings.TrimSpace(opt),\n\t\t\t\t))\n\t\t\t} else {\n\t\t\t\tcompletion.WriteString(fmt.Sprintf(\n\t\t\t\t\t\" -s %s\", strings.TrimSpace(opt),\n\t\t\t\t))\n\n\t\t\t}\n\t\t}\n\n\t\tif flag.TakesValue() {\n\t\t\tcompletion.WriteString(\" -r\")\n\t\t}\n\n\t\tif flag.GetUsage() != \"\" {\n\t\t\tcompletion.WriteString(fmt.Sprintf(\" -d '%s'\", flag.GetUsage()))\n\t\t}\n\n\t\tcompletions = append(completions, completion.String())\n\t}\n\n\treturn completions\n}\n\nfunc fishAddFileFlag(\n\tflag Flag,\n\tcompletion *strings.Builder,\n) {\n\tswitch f := flag.(type) {\n\tcase GenericFlag:\n\t\tif f.TakesFile {\n\t\t\treturn\n\t\t}\n\tcase StringFlag:\n\t\tif f.TakesFile {\n\t\t\treturn\n\t\t}\n\tcase StringSliceFlag:\n\t\tif f.TakesFile {\n\t\t\treturn\n\t\t}\n\t}\n\tcompletion.WriteString(\" -f\")\n}\n\nfunc (a *App) fishSubcommandHelper(allCommands []string) string {\n\tfishHelper := fmt.Sprintf(\"__fish_%s_no_subcommand\", a.Name)\n\tif len(allCommands) > 0 {\n\t\tfishHelper = fmt.Sprintf(\n\t\t\t\"__fish_seen_subcommand_from %s\",\n\t\t\tstrings.Join(allCommands, \" \"),\n\t\t)\n\t}\n\treturn fishHelper\n\n}\n<commit_msg>Update function alignment<commit_after>package cli\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\n\/\/ ToFishCompletion creates a fish completion string for the `*App`\n\/\/ The function errors if either parsing or writing of the string fails.\nfunc (a *App) ToFishCompletion() (string, error) {\n\tvar w bytes.Buffer\n\tif err := a.writeFishCompletionTemplate(&w); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn w.String(), nil\n}\n\ntype fishCompletionTemplate struct {\n\tApp *App\n\tCompletions []string\n\tAllCommands []string\n}\n\nfunc (a *App) writeFishCompletionTemplate(w io.Writer) error {\n\tconst name = \"cli\"\n\tt, err := template.New(name).Parse(FishCompletionTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\tallCommands := []string{}\n\n\t\/\/ Add global flags\n\tcompletions := a.prepareFishFlags(a.VisibleFlags(), allCommands)\n\n\t\/\/ Add help flag\n\tif !a.HideHelp {\n\t\tcompletions = append(\n\t\t\tcompletions,\n\t\t\ta.prepareFishFlags([]Flag{HelpFlag}, allCommands)...,\n\t\t)\n\t}\n\n\t\/\/ Add version flag\n\tif !a.HideVersion {\n\t\tcompletions = append(\n\t\t\tcompletions,\n\t\t\ta.prepareFishFlags([]Flag{VersionFlag}, allCommands)...,\n\t\t)\n\t}\n\n\t\/\/ Add commands and their flags\n\tcompletions = append(\n\t\tcompletions,\n\t\ta.prepareFishCommands(a.VisibleCommands(), &allCommands, []string{})...,\n\t)\n\n\treturn t.ExecuteTemplate(w, name, &fishCompletionTemplate{\n\t\tApp: a,\n\t\tCompletions: completions,\n\t\tAllCommands: allCommands,\n\t})\n}\n\nfunc (a *App) prepareFishCommands(commands []Command, allCommands *[]string, previousCommands []string) []string {\n\tcompletions := []string{}\n\tfor i := range commands {\n\t\tcommand := &commands[i]\n\n\t\tvar completion strings.Builder\n\t\tcompletion.WriteString(fmt.Sprintf(\n\t\t\t\"complete -r -c %s -n '%s' -a '%s'\",\n\t\t\ta.Name,\n\t\t\ta.fishSubcommandHelper(previousCommands),\n\t\t\tstrings.Join(command.Names(), \" \"),\n\t\t))\n\n\t\tif command.Usage != \"\" {\n\t\t\tcompletion.WriteString(fmt.Sprintf(\" -d '%s'\", command.Usage))\n\t\t}\n\n\t\tif !command.HideHelp {\n\t\t\tcompletions = append(\n\t\t\t\tcompletions,\n\t\t\t\ta.prepareFishFlags([]Flag{HelpFlag}, command.Names())...,\n\t\t\t)\n\t\t}\n\n\t\t*allCommands = append(*allCommands, command.Names()...)\n\t\tcompletions = append(completions, completion.String())\n\t\tcompletions = append(\n\t\t\tcompletions,\n\t\t\ta.prepareFishFlags(command.Flags, command.Names())...,\n\t\t)\n\n\t\t\/\/ recursevly iterate subcommands\n\t\tif len(command.Subcommands) > 0 {\n\t\t\tcompletions = append(\n\t\t\t\tcompletions,\n\t\t\t\ta.prepareFishCommands(\n\t\t\t\t\tcommand.Subcommands, allCommands, command.Names(),\n\t\t\t\t)...,\n\t\t\t)\n\t\t}\n\t}\n\n\treturn completions\n}\n\nfunc (a *App) prepareFishFlags(flags []Flag, previousCommands []string) []string {\n\tcompletions := []string{}\n\tfor _, f := range flags {\n\t\tflag, ok := f.(DocGenerationFlag)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tcompletion := &strings.Builder{}\n\t\tcompletion.WriteString(fmt.Sprintf(\n\t\t\t\"complete -c %s -n '%s'\",\n\t\t\ta.Name,\n\t\t\ta.fishSubcommandHelper(previousCommands),\n\t\t))\n\n\t\tfishAddFileFlag(f, completion)\n\n\t\tfor idx, opt := range strings.Split(flag.GetName(), \",\") {\n\t\t\tif idx == 0 {\n\t\t\t\tcompletion.WriteString(fmt.Sprintf(\n\t\t\t\t\t\" -l %s\", strings.TrimSpace(opt),\n\t\t\t\t))\n\t\t\t} else {\n\t\t\t\tcompletion.WriteString(fmt.Sprintf(\n\t\t\t\t\t\" -s %s\", strings.TrimSpace(opt),\n\t\t\t\t))\n\n\t\t\t}\n\t\t}\n\n\t\tif flag.TakesValue() {\n\t\t\tcompletion.WriteString(\" -r\")\n\t\t}\n\n\t\tif flag.GetUsage() != \"\" {\n\t\t\tcompletion.WriteString(fmt.Sprintf(\" -d '%s'\", flag.GetUsage()))\n\t\t}\n\n\t\tcompletions = append(completions, completion.String())\n\t}\n\n\treturn completions\n}\n\nfunc fishAddFileFlag(flag Flag, completion *strings.Builder) {\n\tswitch f := flag.(type) {\n\tcase GenericFlag:\n\t\tif f.TakesFile {\n\t\t\treturn\n\t\t}\n\tcase StringFlag:\n\t\tif f.TakesFile {\n\t\t\treturn\n\t\t}\n\tcase StringSliceFlag:\n\t\tif f.TakesFile {\n\t\t\treturn\n\t\t}\n\t}\n\tcompletion.WriteString(\" -f\")\n}\n\nfunc (a *App) fishSubcommandHelper(allCommands []string) string {\n\tfishHelper := fmt.Sprintf(\"__fish_%s_no_subcommand\", a.Name)\n\tif len(allCommands) > 0 {\n\t\tfishHelper = fmt.Sprintf(\n\t\t\t\"__fish_seen_subcommand_from %s\",\n\t\t\tstrings.Join(allCommands, \" \"),\n\t\t)\n\t}\n\treturn fishHelper\n\n}\n<|endoftext|>"} {"text":"<commit_before>package butlerd\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/itchio\/wharf\/werrors\"\n\n\t\"github.com\/itchio\/httpkit\/neterr\"\n\t\"github.com\/itchio\/httpkit\/progress\"\n\n\t\"crawshaw.io\/sqlite\"\n\t\"github.com\/itchio\/butler\/database\/models\"\n\titchio \"github.com\/itchio\/go-itchio\"\n\t\"github.com\/itchio\/wharf\/state\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sourcegraph\/jsonrpc2\"\n)\n\ntype RequestHandler func(rc *RequestContext) (interface{}, error)\ntype NotificationHandler func(rc *RequestContext)\n\ntype GetClientFunc func(key string) *itchio.Client\n\ntype Router struct {\n\tHandlers map[string]RequestHandler\n\tNotificationHandlers map[string]NotificationHandler\n\tCancelFuncs *CancelFuncs\n\tdbPool *sqlite.Pool\n\tgetClient GetClientFunc\n\n\tButlerVersion string\n\tButlerVersionString string\n}\n\nfunc NewRouter(dbPool *sqlite.Pool, getClient GetClientFunc) *Router {\n\treturn &Router{\n\t\tHandlers: make(map[string]RequestHandler),\n\t\tNotificationHandlers: make(map[string]NotificationHandler),\n\t\tCancelFuncs: &CancelFuncs{\n\t\t\tFuncs: make(map[string]context.CancelFunc),\n\t\t},\n\t\tdbPool: dbPool,\n\t\tgetClient: getClient,\n\t}\n}\n\nfunc (r *Router) Register(method string, rh RequestHandler) {\n\tif _, ok := r.Handlers[method]; ok {\n\t\tpanic(fmt.Sprintf(\"Can't register handler twice for %s\", method))\n\t}\n\tr.Handlers[method] = rh\n}\n\nfunc (r *Router) RegisterNotification(method string, nh NotificationHandler) {\n\tif _, ok := r.NotificationHandlers[method]; ok {\n\t\tpanic(fmt.Sprintf(\"Can't register handler twice for %s\", method))\n\t}\n\tr.NotificationHandlers[method] = nh\n}\n\nfunc (r *Router) Dispatch(ctx context.Context, origConn *jsonrpc2.Conn, req *jsonrpc2.Request) {\n\tmethod := req.Method\n\tvar res interface{}\n\n\tconn := &JsonRPC2Conn{origConn}\n\tconsumer, cErr := NewStateConsumer(&NewStateConsumerParams{\n\t\tCtx: ctx,\n\t\tConn: conn,\n\t})\n\tif cErr != nil {\n\t\treturn\n\t}\n\n\terr := func() (err error) {\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\tif rErr, ok := r.(error); ok {\n\t\t\t\t\terr = errors.WithStack(rErr)\n\t\t\t\t} else {\n\t\t\t\t\terr = errors.Errorf(\"panic: %v\", r)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\n\t\trc := &RequestContext{\n\t\t\tCtx: ctx,\n\t\t\tConsumer: consumer,\n\t\t\tParams: req.Params,\n\t\t\tConn: conn,\n\t\t\tCancelFuncs: r.CancelFuncs,\n\t\t\tdbPool: r.dbPool,\n\t\t\tClient: r.getClient,\n\n\t\t\tButlerVersion: r.ButlerVersion,\n\t\t\tButlerVersionString: r.ButlerVersionString,\n\t\t}\n\n\t\tif req.Notif {\n\t\t\tif nh, ok := r.NotificationHandlers[req.Method]; ok {\n\t\t\t\tnh(rc)\n\t\t\t}\n\t\t} else {\n\t\t\tif h, ok := r.Handlers[method]; ok {\n\t\t\t\trc.Consumer.OnProgress = func(alpha float64) {\n\t\t\t\t\tif rc.tracker == nil {\n\t\t\t\t\t\t\/\/ skip\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\trc.tracker.SetProgress(alpha)\n\t\t\t\t\tnotif := ProgressNotification{\n\t\t\t\t\t\tProgress: alpha,\n\t\t\t\t\t\tETA: rc.tracker.ETA().Seconds(),\n\t\t\t\t\t\tBPS: rc.tracker.BPS(),\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ cannot use autogenerated wrappers to avoid import cycles\n\t\t\t\t\trc.Notify(\"Progress\", notif)\n\t\t\t\t}\n\t\t\t\trc.Consumer.OnProgressLabel = func(label string) {\n\t\t\t\t\t\/\/ muffin\n\t\t\t\t}\n\t\t\t\trc.Consumer.OnPauseProgress = func() {\n\t\t\t\t\tif rc.tracker != nil {\n\t\t\t\t\t\trc.tracker.Pause()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\trc.Consumer.OnResumeProgress = func() {\n\t\t\t\t\tif rc.tracker != nil {\n\t\t\t\t\t\trc.tracker.Resume()\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tres, err = h(rc)\n\t\t\t} else {\n\t\t\t\terr = &RpcError{\n\t\t\t\t\tCode: jsonrpc2.CodeMethodNotFound,\n\t\t\t\t\tMessage: fmt.Sprintf(\"Method '%s' not found\", req.Method),\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}()\n\n\tif req.Notif {\n\t\treturn\n\t}\n\n\tif err == nil {\n\t\terr = origConn.Reply(ctx, req.ID, res)\n\t\tif err != nil {\n\t\t\tconsumer.Errorf(\"Error while replying: %s\", err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tvar code int64\n\tvar message string\n\tvar data map[string]interface{}\n\n\tif ee, ok := AsButlerdError(err); ok {\n\t\tcode = ee.RpcErrorCode()\n\t\tmessage = ee.RpcErrorMessage()\n\t\tdata = ee.RpcErrorData()\n\t} else {\n\t\tif neterr.IsNetworkError(err) {\n\t\t\tcode = int64(CodeNetworkDisconnected)\n\t\t\tmessage = CodeNetworkDisconnected.Error()\n\t\t} else if errors.Cause(err) == werrors.ErrCancelled {\n\t\t\tcode = int64(CodeOperationCancelled)\n\t\t\tmessage = CodeOperationCancelled.Error()\n\t\t} else {\n\t\t\tcode = jsonrpc2.CodeInternalError\n\t\t\tmessage = err.Error()\n\t\t}\n\t}\n\n\tvar rawData *json.RawMessage\n\tif data == nil {\n\t\tdata = make(map[string]interface{})\n\t}\n\tdata[\"stack\"] = fmt.Sprintf(\"%+v\", err)\n\tdata[\"butlerVersion\"] = r.ButlerVersionString\n\n\tmarshalledData, marshalErr := json.Marshal(data)\n\tif marshalErr == nil {\n\t\trawMessage := json.RawMessage(marshalledData)\n\t\trawData = &rawMessage\n\t}\n\n\torigConn.ReplyWithError(ctx, req.ID, &jsonrpc2.Error{\n\t\tCode: code,\n\t\tMessage: message,\n\t\tData: rawData,\n\t})\n}\n\ntype RequestContext struct {\n\tCtx context.Context\n\tConsumer *state.Consumer\n\tParams *json.RawMessage\n\tConn Conn\n\tCancelFuncs *CancelFuncs\n\tdbPool *sqlite.Pool\n\tClient GetClientFunc\n\n\tButlerVersion string\n\tButlerVersionString string\n\n\tnotificationInterceptors map[string]NotificationInterceptor\n\ttracker *progress.Tracker\n}\n\ntype WithParamsFunc func() (interface{}, error)\n\ntype NotificationInterceptor func(method string, params interface{}) error\n\nfunc (rc *RequestContext) Call(method string, params interface{}, res interface{}) error {\n\treturn rc.Conn.Call(rc.Ctx, method, params, res)\n}\n\nfunc (rc *RequestContext) InterceptNotification(method string, interceptor NotificationInterceptor) {\n\tif rc.notificationInterceptors == nil {\n\t\trc.notificationInterceptors = make(map[string]NotificationInterceptor)\n\t}\n\trc.notificationInterceptors[method] = interceptor\n}\n\nfunc (rc *RequestContext) StopInterceptingNotification(method string) {\n\tif rc.notificationInterceptors == nil {\n\t\treturn\n\t}\n\tdelete(rc.notificationInterceptors, method)\n}\n\nfunc (rc *RequestContext) Notify(method string, params interface{}) error {\n\tif rc.notificationInterceptors != nil {\n\t\tif ni, ok := rc.notificationInterceptors[method]; ok {\n\t\t\treturn ni(method, params)\n\t\t}\n\t}\n\treturn rc.Conn.Notify(rc.Ctx, method, params)\n}\n\nfunc (rc *RequestContext) RootClient() *itchio.Client {\n\treturn rc.Client(\"<keyless>\")\n}\n\nfunc (rc *RequestContext) ProfileClient(profileID int64) (*models.Profile, *itchio.Client) {\n\tif profileID == 0 {\n\t\tpanic(errors.New(\"profileId must be non-zero\"))\n\t}\n\n\tconn := rc.GetConn()\n\tdefer rc.PutConn(conn)\n\n\tprofile := models.ProfileByID(conn, profileID)\n\tif profile == nil {\n\t\tpanic(errors.Errorf(\"Could not find profile %d\", profileID))\n\t}\n\n\tif profile.APIKey == \"\" {\n\t\tpanic(errors.Errorf(\"Profile %d lacks API key\", profileID))\n\t}\n\n\treturn profile, rc.Client(profile.APIKey)\n}\n\nfunc (rc *RequestContext) StartProgress() {\n\trc.StartProgressWithTotalBytes(0)\n}\n\nfunc (rc *RequestContext) StartProgressWithTotalBytes(totalBytes int64) {\n\trc.StartProgressWithInitialAndTotal(0.0, totalBytes)\n}\n\nfunc (rc *RequestContext) StartProgressWithInitialAndTotal(initialProgress float64, totalBytes int64) {\n\tif rc.tracker != nil {\n\t\trc.Consumer.Warnf(\"Asked to start progress but already tracking progress!\")\n\t\treturn\n\t}\n\n\trc.tracker = progress.NewTracker()\n\trc.tracker.SetSilent(true)\n\trc.tracker.SetProgress(initialProgress)\n\trc.tracker.SetTotalBytes(totalBytes)\n\trc.tracker.Start()\n}\n\nfunc (rc *RequestContext) EndProgress() {\n\tif rc.tracker != nil {\n\t\trc.tracker.Finish()\n\t\trc.tracker = nil\n\t} else {\n\t\trc.Consumer.Warnf(\"Asked to stop progress but wasn't tracking progress!\")\n\t}\n}\n\nfunc (rc *RequestContext) GetConn() *sqlite.Conn {\n\tgetCtx, cancel := context.WithTimeout(rc.Ctx, 1*time.Second)\n\tdefer cancel()\n\tconn := rc.dbPool.Get(getCtx.Done())\n\tif conn != nil {\n\t\tconn.SetInterrupt(rc.Ctx.Done())\n\t\treturn conn\n\t}\n\n\tpanic(errors.WithStack(CodeDatabaseBusy))\n\treturn nil\n}\n\nfunc (rc *RequestContext) PutConn(conn *sqlite.Conn) {\n\trc.dbPool.Put(conn)\n}\n\nfunc (rc *RequestContext) WithConn(f func(conn *sqlite.Conn)) {\n\tconn := rc.GetConn()\n\tdefer rc.PutConn(conn)\n\tf(conn)\n}\n\nfunc (rc *RequestContext) WithConnBool(f func(conn *sqlite.Conn) bool) bool {\n\tconn := rc.GetConn()\n\tdefer rc.PutConn(conn)\n\treturn f(conn)\n}\n\ntype CancelFuncs struct {\n\tFuncs map[string]context.CancelFunc\n}\n\nfunc (cf *CancelFuncs) Add(id string, f context.CancelFunc) {\n\tcf.Funcs[id] = f\n}\n\nfunc (cf *CancelFuncs) Remove(id string) {\n\tdelete(cf.Funcs, id)\n}\n\nfunc (cf *CancelFuncs) Call(id string) bool {\n\tif f, ok := cf.Funcs[id]; ok {\n\t\tf()\n\t\tdelete(cf.Funcs, id)\n\t\treturn true\n\t}\n\n\treturn false\n}\n<commit_msg>Keep track of origConn and method in RequestContext<commit_after>package butlerd\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/itchio\/wharf\/werrors\"\n\n\t\"github.com\/itchio\/httpkit\/neterr\"\n\t\"github.com\/itchio\/httpkit\/progress\"\n\n\t\"crawshaw.io\/sqlite\"\n\t\"github.com\/itchio\/butler\/database\/models\"\n\titchio \"github.com\/itchio\/go-itchio\"\n\t\"github.com\/itchio\/wharf\/state\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sourcegraph\/jsonrpc2\"\n)\n\ntype RequestHandler func(rc *RequestContext) (interface{}, error)\ntype NotificationHandler func(rc *RequestContext)\n\ntype GetClientFunc func(key string) *itchio.Client\n\ntype Router struct {\n\tHandlers map[string]RequestHandler\n\tNotificationHandlers map[string]NotificationHandler\n\tCancelFuncs *CancelFuncs\n\tdbPool *sqlite.Pool\n\tgetClient GetClientFunc\n\n\tButlerVersion string\n\tButlerVersionString string\n}\n\nfunc NewRouter(dbPool *sqlite.Pool, getClient GetClientFunc) *Router {\n\treturn &Router{\n\t\tHandlers: make(map[string]RequestHandler),\n\t\tNotificationHandlers: make(map[string]NotificationHandler),\n\t\tCancelFuncs: &CancelFuncs{\n\t\t\tFuncs: make(map[string]context.CancelFunc),\n\t\t},\n\t\tdbPool: dbPool,\n\t\tgetClient: getClient,\n\t}\n}\n\nfunc (r *Router) Register(method string, rh RequestHandler) {\n\tif _, ok := r.Handlers[method]; ok {\n\t\tpanic(fmt.Sprintf(\"Can't register handler twice for %s\", method))\n\t}\n\tr.Handlers[method] = rh\n}\n\nfunc (r *Router) RegisterNotification(method string, nh NotificationHandler) {\n\tif _, ok := r.NotificationHandlers[method]; ok {\n\t\tpanic(fmt.Sprintf(\"Can't register handler twice for %s\", method))\n\t}\n\tr.NotificationHandlers[method] = nh\n}\n\nfunc (r *Router) Dispatch(ctx context.Context, origConn *jsonrpc2.Conn, req *jsonrpc2.Request) {\n\tmethod := req.Method\n\tvar res interface{}\n\n\tconn := &JsonRPC2Conn{origConn}\n\tconsumer, cErr := NewStateConsumer(&NewStateConsumerParams{\n\t\tCtx: ctx,\n\t\tConn: conn,\n\t})\n\tif cErr != nil {\n\t\treturn\n\t}\n\n\terr := func() (err error) {\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\tif rErr, ok := r.(error); ok {\n\t\t\t\t\terr = errors.WithStack(rErr)\n\t\t\t\t} else {\n\t\t\t\t\terr = errors.Errorf(\"panic: %v\", r)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\n\t\trc := &RequestContext{\n\t\t\tCtx: ctx,\n\t\t\tConsumer: consumer,\n\t\t\tParams: req.Params,\n\t\t\tConn: conn,\n\t\t\tCancelFuncs: r.CancelFuncs,\n\t\t\tdbPool: r.dbPool,\n\t\t\tClient: r.getClient,\n\n\t\t\tButlerVersion: r.ButlerVersion,\n\t\t\tButlerVersionString: r.ButlerVersionString,\n\n\t\t\torigConn: origConn,\n\t\t\tmethod: method,\n\t\t}\n\n\t\tif req.Notif {\n\t\t\tif nh, ok := r.NotificationHandlers[req.Method]; ok {\n\t\t\t\tnh(rc)\n\t\t\t}\n\t\t} else {\n\t\t\tif h, ok := r.Handlers[method]; ok {\n\t\t\t\trc.Consumer.OnProgress = func(alpha float64) {\n\t\t\t\t\tif rc.tracker == nil {\n\t\t\t\t\t\t\/\/ skip\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\trc.tracker.SetProgress(alpha)\n\t\t\t\t\tnotif := ProgressNotification{\n\t\t\t\t\t\tProgress: alpha,\n\t\t\t\t\t\tETA: rc.tracker.ETA().Seconds(),\n\t\t\t\t\t\tBPS: rc.tracker.BPS(),\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ cannot use autogenerated wrappers to avoid import cycles\n\t\t\t\t\trc.Notify(\"Progress\", notif)\n\t\t\t\t}\n\t\t\t\trc.Consumer.OnProgressLabel = func(label string) {\n\t\t\t\t\t\/\/ muffin\n\t\t\t\t}\n\t\t\t\trc.Consumer.OnPauseProgress = func() {\n\t\t\t\t\tif rc.tracker != nil {\n\t\t\t\t\t\trc.tracker.Pause()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\trc.Consumer.OnResumeProgress = func() {\n\t\t\t\t\tif rc.tracker != nil {\n\t\t\t\t\t\trc.tracker.Resume()\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tres, err = h(rc)\n\t\t\t} else {\n\t\t\t\terr = &RpcError{\n\t\t\t\t\tCode: jsonrpc2.CodeMethodNotFound,\n\t\t\t\t\tMessage: fmt.Sprintf(\"Method '%s' not found\", req.Method),\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}()\n\n\tif req.Notif {\n\t\treturn\n\t}\n\n\tif err == nil {\n\t\terr = origConn.Reply(ctx, req.ID, res)\n\t\tif err != nil {\n\t\t\tconsumer.Errorf(\"Error while replying: %s\", err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tvar code int64\n\tvar message string\n\tvar data map[string]interface{}\n\n\tif ee, ok := AsButlerdError(err); ok {\n\t\tcode = ee.RpcErrorCode()\n\t\tmessage = ee.RpcErrorMessage()\n\t\tdata = ee.RpcErrorData()\n\t} else {\n\t\tif neterr.IsNetworkError(err) {\n\t\t\tcode = int64(CodeNetworkDisconnected)\n\t\t\tmessage = CodeNetworkDisconnected.Error()\n\t\t} else if errors.Cause(err) == werrors.ErrCancelled {\n\t\t\tcode = int64(CodeOperationCancelled)\n\t\t\tmessage = CodeOperationCancelled.Error()\n\t\t} else {\n\t\t\tcode = jsonrpc2.CodeInternalError\n\t\t\tmessage = err.Error()\n\t\t}\n\t}\n\n\tvar rawData *json.RawMessage\n\tif data == nil {\n\t\tdata = make(map[string]interface{})\n\t}\n\tdata[\"stack\"] = fmt.Sprintf(\"%+v\", err)\n\tdata[\"butlerVersion\"] = r.ButlerVersionString\n\n\tmarshalledData, marshalErr := json.Marshal(data)\n\tif marshalErr == nil {\n\t\trawMessage := json.RawMessage(marshalledData)\n\t\trawData = &rawMessage\n\t}\n\n\torigConn.ReplyWithError(ctx, req.ID, &jsonrpc2.Error{\n\t\tCode: code,\n\t\tMessage: message,\n\t\tData: rawData,\n\t})\n}\n\ntype RequestContext struct {\n\tCtx context.Context\n\tConsumer *state.Consumer\n\tParams *json.RawMessage\n\tConn Conn\n\tCancelFuncs *CancelFuncs\n\tdbPool *sqlite.Pool\n\tClient GetClientFunc\n\n\tButlerVersion string\n\tButlerVersionString string\n\n\tnotificationInterceptors map[string]NotificationInterceptor\n\ttracker *progress.Tracker\n\n\tmethod string\n\torigConn *jsonrpc2.Conn\n}\n\ntype WithParamsFunc func() (interface{}, error)\n\ntype NotificationInterceptor func(method string, params interface{}) error\n\nfunc (rc *RequestContext) Call(method string, params interface{}, res interface{}) error {\n\treturn rc.Conn.Call(rc.Ctx, method, params, res)\n}\n\nfunc (rc *RequestContext) InterceptNotification(method string, interceptor NotificationInterceptor) {\n\tif rc.notificationInterceptors == nil {\n\t\trc.notificationInterceptors = make(map[string]NotificationInterceptor)\n\t}\n\trc.notificationInterceptors[method] = interceptor\n}\n\nfunc (rc *RequestContext) StopInterceptingNotification(method string) {\n\tif rc.notificationInterceptors == nil {\n\t\treturn\n\t}\n\tdelete(rc.notificationInterceptors, method)\n}\n\nfunc (rc *RequestContext) Notify(method string, params interface{}) error {\n\tif rc.notificationInterceptors != nil {\n\t\tif ni, ok := rc.notificationInterceptors[method]; ok {\n\t\t\treturn ni(method, params)\n\t\t}\n\t}\n\treturn rc.Conn.Notify(rc.Ctx, method, params)\n}\n\nfunc (rc *RequestContext) RootClient() *itchio.Client {\n\treturn rc.Client(\"<keyless>\")\n}\n\nfunc (rc *RequestContext) ProfileClient(profileID int64) (*models.Profile, *itchio.Client) {\n\tif profileID == 0 {\n\t\tpanic(errors.New(\"profileId must be non-zero\"))\n\t}\n\n\tconn := rc.GetConn()\n\tdefer rc.PutConn(conn)\n\n\tprofile := models.ProfileByID(conn, profileID)\n\tif profile == nil {\n\t\tpanic(errors.Errorf(\"Could not find profile %d\", profileID))\n\t}\n\n\tif profile.APIKey == \"\" {\n\t\tpanic(errors.Errorf(\"Profile %d lacks API key\", profileID))\n\t}\n\n\treturn profile, rc.Client(profile.APIKey)\n}\n\nfunc (rc *RequestContext) StartProgress() {\n\trc.StartProgressWithTotalBytes(0)\n}\n\nfunc (rc *RequestContext) StartProgressWithTotalBytes(totalBytes int64) {\n\trc.StartProgressWithInitialAndTotal(0.0, totalBytes)\n}\n\nfunc (rc *RequestContext) StartProgressWithInitialAndTotal(initialProgress float64, totalBytes int64) {\n\tif rc.tracker != nil {\n\t\trc.Consumer.Warnf(\"Asked to start progress but already tracking progress!\")\n\t\treturn\n\t}\n\n\trc.tracker = progress.NewTracker()\n\trc.tracker.SetSilent(true)\n\trc.tracker.SetProgress(initialProgress)\n\trc.tracker.SetTotalBytes(totalBytes)\n\trc.tracker.Start()\n}\n\nfunc (rc *RequestContext) EndProgress() {\n\tif rc.tracker != nil {\n\t\trc.tracker.Finish()\n\t\trc.tracker = nil\n\t} else {\n\t\trc.Consumer.Warnf(\"Asked to stop progress but wasn't tracking progress!\")\n\t}\n}\n\nfunc (rc *RequestContext) GetConn() *sqlite.Conn {\n\tgetCtx, cancel := context.WithTimeout(rc.Ctx, 1*time.Second)\n\tdefer cancel()\n\tconn := rc.dbPool.Get(getCtx.Done())\n\tif conn != nil {\n\t\tconn.SetInterrupt(rc.Ctx.Done())\n\t\treturn conn\n\t}\n\n\tpanic(errors.WithStack(CodeDatabaseBusy))\n\treturn nil\n}\n\nfunc (rc *RequestContext) PutConn(conn *sqlite.Conn) {\n\trc.dbPool.Put(conn)\n}\n\nfunc (rc *RequestContext) WithConn(f func(conn *sqlite.Conn)) {\n\tconn := rc.GetConn()\n\tdefer rc.PutConn(conn)\n\tf(conn)\n}\n\nfunc (rc *RequestContext) WithConnBool(f func(conn *sqlite.Conn) bool) bool {\n\tconn := rc.GetConn()\n\tdefer rc.PutConn(conn)\n\treturn f(conn)\n}\n\ntype CancelFuncs struct {\n\tFuncs map[string]context.CancelFunc\n}\n\nfunc (cf *CancelFuncs) Add(id string, f context.CancelFunc) {\n\tcf.Funcs[id] = f\n}\n\nfunc (cf *CancelFuncs) Remove(id string) {\n\tdelete(cf.Funcs, id)\n}\n\nfunc (cf *CancelFuncs) Call(id string) bool {\n\tif f, ok := cf.Funcs[id]; ok {\n\t\tf()\n\t\tdelete(cf.Funcs, id)\n\t\treturn true\n\t}\n\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package broker is the micro broker\npackage broker\n\nimport (\n\t\"time\"\n\n\t\"github.com\/micro\/cli\"\n\t\"github.com\/micro\/go-micro\"\n\t\"github.com\/micro\/go-micro\/util\/log\"\n\t\"github.com\/micro\/go-micro\/broker\/service\/handler\"\n\tpb \"github.com\/micro\/go-micro\/broker\/service\/proto\"\n\trcli \"github.com\/micro\/micro\/cli\"\n)\n\nvar (\n\t\/\/ Name of the broker\n\tName = \"go.micro.broker\"\n\t\/\/ The address of the broker\n\tAddress = \":8001\"\n)\n\nfunc run(ctx *cli.Context, srvOpts ...micro.Option) {\n\tlog.Name(\"broker\")\n\n\tif len(ctx.GlobalString(\"server_name\")) > 0 {\n\t\tName = ctx.GlobalString(\"server_name\")\n\t}\n\tif len(ctx.String(\"address\")) > 0 {\n\t\tAddress = ctx.String(\"address\")\n\t}\n\n\t\/\/ Init plugins\n\tfor _, p := range Plugins() {\n\t\tp.Init(ctx)\n\t}\n\n\t\/\/ service opts\n\tsrvOpts = append(srvOpts, micro.Name(Name))\n\tif i := time.Duration(ctx.GlobalInt(\"register_ttl\")); i > 0 {\n\t\tsrvOpts = append(srvOpts, micro.RegisterTTL(i*time.Second))\n\t}\n\tif i := time.Duration(ctx.GlobalInt(\"register_interval\")); i > 0 {\n\t\tsrvOpts = append(srvOpts, micro.RegisterInterval(i*time.Second))\n\t}\n\n\t\/\/ set address\n\tif len(Address) > 0 {\n\t\tsrvOpts = append(srvOpts, micro.Address(Address))\n\t}\n\n\t\/\/ new service\n\tservice := micro.NewService(srvOpts...)\n\n\t\/\/ register the broker handler\n\tpb.RegisterBrokerHandler(service.Server(), &handler.Broker{\n\t\t\/\/ using the mdns broker\n\t\tBroker: service.Options().Broker,\n\t})\n\n\t\/\/ run the service\n\tservice.Run()\n}\n\nfunc Commands(options ...micro.Option) []cli.Command {\n\tcommand := cli.Command{\n\t\tName: \"broker\",\n\t\tUsage: \"Run the message broker\",\n\t\tFlags: []cli.Flag{\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"address\",\n\t\t\t\tUsage: \"Set the broker http address e.g 0.0.0.0:8001\",\n\t\t\t\tEnvVar: \"MICRO_SERVER_ADDRESS\",\n\t\t\t},\n\t\t},\n\t\tAction: func(ctx *cli.Context) {\n\t\t\trun(ctx, options...)\n\t\t},\n\t\tSubcommands: rcli.RegistryCommands(),\n\t}\n\n\tfor _, p := range Plugins() {\n\t\tif cmds := p.Commands(); len(cmds) > 0 {\n\t\t\tcommand.Subcommands = append(command.Subcommands, cmds...)\n\t\t}\n\n\t\tif flags := p.Flags(); len(flags) > 0 {\n\t\t\tcommand.Flags = append(command.Flags, flags...)\n\t\t}\n\t}\n\n\treturn []cli.Command{command}\n}\n<commit_msg>Go fmt<commit_after>\/\/ Package broker is the micro broker\npackage broker\n\nimport (\n\t\"time\"\n\n\t\"github.com\/micro\/cli\"\n\t\"github.com\/micro\/go-micro\"\n\t\"github.com\/micro\/go-micro\/broker\/service\/handler\"\n\tpb \"github.com\/micro\/go-micro\/broker\/service\/proto\"\n\t\"github.com\/micro\/go-micro\/util\/log\"\n\trcli \"github.com\/micro\/micro\/cli\"\n)\n\nvar (\n\t\/\/ Name of the broker\n\tName = \"go.micro.broker\"\n\t\/\/ The address of the broker\n\tAddress = \":8001\"\n)\n\nfunc run(ctx *cli.Context, srvOpts ...micro.Option) {\n\tlog.Name(\"broker\")\n\n\tif len(ctx.GlobalString(\"server_name\")) > 0 {\n\t\tName = ctx.GlobalString(\"server_name\")\n\t}\n\tif len(ctx.String(\"address\")) > 0 {\n\t\tAddress = ctx.String(\"address\")\n\t}\n\n\t\/\/ Init plugins\n\tfor _, p := range Plugins() {\n\t\tp.Init(ctx)\n\t}\n\n\t\/\/ service opts\n\tsrvOpts = append(srvOpts, micro.Name(Name))\n\tif i := time.Duration(ctx.GlobalInt(\"register_ttl\")); i > 0 {\n\t\tsrvOpts = append(srvOpts, micro.RegisterTTL(i*time.Second))\n\t}\n\tif i := time.Duration(ctx.GlobalInt(\"register_interval\")); i > 0 {\n\t\tsrvOpts = append(srvOpts, micro.RegisterInterval(i*time.Second))\n\t}\n\n\t\/\/ set address\n\tif len(Address) > 0 {\n\t\tsrvOpts = append(srvOpts, micro.Address(Address))\n\t}\n\n\t\/\/ new service\n\tservice := micro.NewService(srvOpts...)\n\n\t\/\/ register the broker handler\n\tpb.RegisterBrokerHandler(service.Server(), &handler.Broker{\n\t\t\/\/ using the mdns broker\n\t\tBroker: service.Options().Broker,\n\t})\n\n\t\/\/ run the service\n\tservice.Run()\n}\n\nfunc Commands(options ...micro.Option) []cli.Command {\n\tcommand := cli.Command{\n\t\tName: \"broker\",\n\t\tUsage: \"Run the message broker\",\n\t\tFlags: []cli.Flag{\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"address\",\n\t\t\t\tUsage: \"Set the broker http address e.g 0.0.0.0:8001\",\n\t\t\t\tEnvVar: \"MICRO_SERVER_ADDRESS\",\n\t\t\t},\n\t\t},\n\t\tAction: func(ctx *cli.Context) {\n\t\t\trun(ctx, options...)\n\t\t},\n\t\tSubcommands: rcli.RegistryCommands(),\n\t}\n\n\tfor _, p := range Plugins() {\n\t\tif cmds := p.Commands(); len(cmds) > 0 {\n\t\t\tcommand.Subcommands = append(command.Subcommands, cmds...)\n\t\t}\n\n\t\tif flags := p.Flags(); len(flags) > 0 {\n\t\t\tcommand.Flags = append(command.Flags, flags...)\n\t\t}\n\t}\n\n\treturn []cli.Command{command}\n}\n<|endoftext|>"} {"text":"<commit_before>package terraform\n\nimport (\n\t\"github.com\/hashicorp\/terraform\/dag\"\n\t\"github.com\/hashicorp\/terraform\/tfdiags\"\n)\n\n\/\/ NodePlannableResource represents a resource that is \"plannable\":\n\/\/ it is ready to be planned in order to create a diff.\ntype NodePlannableResource struct {\n\t*NodeAbstractResource\n}\n\nvar (\n\t_ GraphNodeSubPath = (*NodePlannableResource)(nil)\n\t_ GraphNodeDynamicExpandable = (*NodePlannableResource)(nil)\n\t_ GraphNodeReferenceable = (*NodePlannableResource)(nil)\n\t_ GraphNodeReferencer = (*NodePlannableResource)(nil)\n\t_ GraphNodeResource = (*NodePlannableResource)(nil)\n\t_ GraphNodeAttachResourceConfig = (*NodePlannableResource)(nil)\n)\n\n\/\/ GraphNodeDynamicExpandable\nfunc (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {\n\tvar diags tfdiags.Diagnostics\n\n\tcount, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx)\n\tdiags = diags.Append(countDiags)\n\tif countDiags.HasErrors() {\n\t\treturn nil, diags.Err()\n\t}\n\n\t\/\/ Next we need to potentially rename an instance address in the state\n\t\/\/ if we're transitioning whether \"count\" is set at all.\n\tfixResourceCountSetTransition(ctx, n.ResourceAddr().Resource, count != -1)\n\n\t\/\/ Grab the state which we read\n\tstate, lock := ctx.State()\n\tlock.RLock()\n\tdefer lock.RUnlock()\n\n\t\/\/ The concrete resource factory we'll use\n\tconcreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex {\n\t\t\/\/ Add the config and state since we don't do that via transforms\n\t\ta.Config = n.Config\n\t\ta.ResolvedProvider = n.ResolvedProvider\n\t\ta.Schema = n.Schema\n\n\t\treturn &NodePlannableResourceInstance{\n\t\t\tNodeAbstractResourceInstance: a,\n\t\t}\n\t}\n\n\t\/\/ The concrete resource factory we'll use for orphans\n\tconcreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex {\n\t\t\/\/ Add the config and state since we don't do that via transforms\n\t\ta.Config = n.Config\n\t\ta.ResolvedProvider = n.ResolvedProvider\n\t\ta.Schema = n.Schema\n\n\t\treturn &NodePlannableResourceInstanceOrphan{\n\t\t\tNodeAbstractResourceInstance: a,\n\t\t}\n\t}\n\n\t\/\/ Start creating the steps\n\tsteps := []GraphTransformer{\n\t\t\/\/ Expand the count.\n\t\t&ResourceCountTransformer{\n\t\t\tConcrete: concreteResource,\n\t\t\tSchema: n.Schema,\n\t\t\tCount: count,\n\t\t\tAddr: n.ResourceAddr(),\n\t\t},\n\n\t\t\/\/ Add the count orphans\n\t\t&OrphanResourceCountTransformer{\n\t\t\tConcrete: concreteResourceOrphan,\n\t\t\tCount: count,\n\t\t\tAddr: n.ResourceAddr(),\n\t\t\tState: state,\n\t\t},\n\n\t\t\/\/ Attach the state\n\t\t&AttachStateTransformer{State: state},\n\n\t\t\/\/ Targeting\n\t\t&TargetsTransformer{Targets: n.Targets},\n\n\t\t\/\/ Connect references so ordering is correct\n\t\t&ReferenceTransformer{},\n\n\t\t\/\/ Make sure there is a single root\n\t\t&RootTransformer{},\n\t}\n\n\t\/\/ Build the graph\n\tb := &BasicGraphBuilder{\n\t\tSteps: steps,\n\t\tValidate: true,\n\t\tName: \"NodePlannableResource\",\n\t}\n\tgraph, diags := b.Build(ctx.Path())\n\treturn graph, diags.ErrWithWarnings()\n}\n<commit_msg>core: attach provisioner schemas in subgraphs<commit_after>package terraform\n\nimport (\n\t\"github.com\/hashicorp\/terraform\/dag\"\n\t\"github.com\/hashicorp\/terraform\/tfdiags\"\n)\n\n\/\/ NodePlannableResource represents a resource that is \"plannable\":\n\/\/ it is ready to be planned in order to create a diff.\ntype NodePlannableResource struct {\n\t*NodeAbstractResource\n}\n\nvar (\n\t_ GraphNodeSubPath = (*NodePlannableResource)(nil)\n\t_ GraphNodeDynamicExpandable = (*NodePlannableResource)(nil)\n\t_ GraphNodeReferenceable = (*NodePlannableResource)(nil)\n\t_ GraphNodeReferencer = (*NodePlannableResource)(nil)\n\t_ GraphNodeResource = (*NodePlannableResource)(nil)\n\t_ GraphNodeAttachResourceConfig = (*NodePlannableResource)(nil)\n)\n\n\/\/ GraphNodeDynamicExpandable\nfunc (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {\n\tvar diags tfdiags.Diagnostics\n\n\tcount, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx)\n\tdiags = diags.Append(countDiags)\n\tif countDiags.HasErrors() {\n\t\treturn nil, diags.Err()\n\t}\n\n\t\/\/ Next we need to potentially rename an instance address in the state\n\t\/\/ if we're transitioning whether \"count\" is set at all.\n\tfixResourceCountSetTransition(ctx, n.ResourceAddr().Resource, count != -1)\n\n\t\/\/ Grab the state which we read\n\tstate, lock := ctx.State()\n\tlock.RLock()\n\tdefer lock.RUnlock()\n\n\t\/\/ The concrete resource factory we'll use\n\tconcreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex {\n\t\t\/\/ Add the config and state since we don't do that via transforms\n\t\ta.Config = n.Config\n\t\ta.ResolvedProvider = n.ResolvedProvider\n\t\ta.Schema = n.Schema\n\t\ta.ProvisionerSchemas = n.ProvisionerSchemas\n\n\t\treturn &NodePlannableResourceInstance{\n\t\t\tNodeAbstractResourceInstance: a,\n\t\t}\n\t}\n\n\t\/\/ The concrete resource factory we'll use for orphans\n\tconcreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex {\n\t\t\/\/ Add the config and state since we don't do that via transforms\n\t\ta.Config = n.Config\n\t\ta.ResolvedProvider = n.ResolvedProvider\n\t\ta.Schema = n.Schema\n\t\ta.ProvisionerSchemas = n.ProvisionerSchemas\n\n\t\treturn &NodePlannableResourceInstanceOrphan{\n\t\t\tNodeAbstractResourceInstance: a,\n\t\t}\n\t}\n\n\t\/\/ Start creating the steps\n\tsteps := []GraphTransformer{\n\t\t\/\/ Expand the count.\n\t\t&ResourceCountTransformer{\n\t\t\tConcrete: concreteResource,\n\t\t\tSchema: n.Schema,\n\t\t\tCount: count,\n\t\t\tAddr: n.ResourceAddr(),\n\t\t},\n\n\t\t\/\/ Add the count orphans\n\t\t&OrphanResourceCountTransformer{\n\t\t\tConcrete: concreteResourceOrphan,\n\t\t\tCount: count,\n\t\t\tAddr: n.ResourceAddr(),\n\t\t\tState: state,\n\t\t},\n\n\t\t\/\/ Attach the state\n\t\t&AttachStateTransformer{State: state},\n\n\t\t\/\/ Targeting\n\t\t&TargetsTransformer{Targets: n.Targets},\n\n\t\t\/\/ Connect references so ordering is correct\n\t\t&ReferenceTransformer{},\n\n\t\t\/\/ Make sure there is a single root\n\t\t&RootTransformer{},\n\t}\n\n\t\/\/ Build the graph\n\tb := &BasicGraphBuilder{\n\t\tSteps: steps,\n\t\tValidate: true,\n\t\tName: \"NodePlannableResource\",\n\t}\n\tgraph, diags := b.Build(ctx.Path())\n\treturn graph, diags.ErrWithWarnings()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 Rob Thornton. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build !windows\n\npackage goncurses\n\n\/\/ #cgo pkg-config: form\n\/\/ #include <form.h>\n\/\/ #include <stdlib.h>\nimport \"C\"\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\ntype Field struct {\n\tfield *C.FIELD\n}\n\ntype Form struct {\n\tform *C.FORM\n}\n\nfunc NewField(h, w, tr, lc, oscr, nbuf int) (*Field, error) {\n\tvar new_field Field\n\tvar err error\n\tnew_field.field, err = C.new_field(C.int(h), C.int(w), C.int(tr), C.int(lc),\n\t\tC.int(oscr), C.int(nbuf))\n\treturn &new_field, ncursesError(err)\n}\n\n\/\/ Background returns the field's background character attributes\nfunc (f *Field) Background() Char {\n\treturn Char(C.field_back(f.field))\n}\n\n\/\/ Buffer returns a string containing the contents of the buffer. The returned\n\/\/ string will contain whitespace up to the buffer size as set by SetMax or\n\/\/ the value by the call to NewField\nfunc (f *Field) Buffer() string {\n\tstr := C.field_buffer(f.field, C.int(0))\n\n\treturn C.GoString(str)\n}\n\n\/\/ Duplicate the field at the specified coordinates, returning a pointer\n\/\/ to the newly allocated object.\nfunc (f *Field) Duplicate(y, x int) (*Field, error) {\n\tvar new_field Field\n\tvar err error\n\tnew_field.field, err = C.dup_field(f.field, C.int(y), C.int(x))\n\treturn &new_field, ncursesError(err)\n}\n\n\/\/ Foreground returns the field's foreground character attributes\nfunc (f *Field) Foreground() Char {\n\treturn Char(C.field_fore(f.field))\n}\n\n\/\/ Free field's allocated memory. This must be called to prevent memory\n\/\/ leaks\nfunc (f *Field) Free() error {\n\terr := C.free_field(f.field)\n\tf = nil\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ Info retrieves the height, width, y, x, offset and buffer size of the\n\/\/ given field. Pass the memory addess of the variable to store the data\n\/\/ in or nil.\nfunc (f *Field) Info(h, w, y, x, off, nbuf *int) error {\n\terr := C.field_info(f.field, (*C.int)(unsafe.Pointer(h)),\n\t\t(*C.int)(unsafe.Pointer(w)), (*C.int)(unsafe.Pointer(y)),\n\t\t(*C.int)(unsafe.Pointer(x)), (*C.int)(unsafe.Pointer(off)),\n\t\t(*C.int)(unsafe.Pointer(nbuf)))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ Just returns the justification type of the field\nfunc (f *Field) Justification() int {\n\treturn int(C.field_just(f.field))\n}\n\n\/\/ Move the field to the location of the specified coordinates\nfunc (f *Field) Move(y, x int) error {\n\terr := C.move_field(f.field, C.int(y), C.int(x))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ Options turns features on and off\nfunc (f *Field) Options(opts int, on bool) {\n\tif on {\n\t\tC.field_opts_on(f.field, C.Field_Options(opts))\n\t\treturn\n\t}\n\tC.field_opts_off(f.field, C.Field_Options(opts))\n}\n\n\/\/ Pad returns the padding character of the field\nfunc (f *Field) Pad() int {\n\treturn int(C.field_pad(f.field))\n}\n\n\/\/ SetBuffer sets the visible characters in the field. A buffer is empty by\n\/\/ default.\nfunc (f *Field) SetBuffer(s string) error {\n\tcstr := C.CString(s)\n\tdefer C.free(unsafe.Pointer(cstr))\n\n\terr := C.set_field_buffer(f.field, C.int(0), cstr)\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ SetJustification of the field\nfunc (f *Field) SetJustification(just int) error {\n\terr := C.set_field_just(f.field, C.int(just))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ SetMax sets the maximum size of a field\nfunc (f *Field) SetMax(max int) error {\n\terr := C.set_max_field(f.field, C.int(max))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ OptionsOff turns feature(s) off\nfunc (f *Field) SetOptionsOff(opts Char) error {\n\terr := int(C.field_opts_off(f.field, C.Field_Options(opts)))\n\tif err != C.E_OK {\n\t\treturn ncursesError(syscall.Errno(err))\n\t}\n\treturn nil\n}\n\n\/\/ OptionsOn turns feature(s) on\nfunc (f *Field) SetOptionsOn(opts Char) error {\n\terr := int(C.field_opts_on(f.field, C.Field_Options(opts)))\n\tif err != C.E_OK {\n\t\treturn ncursesError(syscall.Errno(err))\n\t}\n\treturn nil\n}\n\n\/\/ SetPad sets the padding character of the field\nfunc (f *Field) SetPad(padch int) error {\n\terr := C.set_field_pad(f.field, C.int(padch))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ SetBackground character and attributes (colours, etc)\nfunc (f *Field) SetBackground(ch Char) error {\n\terr := C.set_field_back(f.field, C.chtype(ch))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ SetForeground character and attributes (colours, etc)\nfunc (f *Field) SetForeground(ch Char) error {\n\terr := C.set_field_fore(f.field, C.chtype(ch))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ NewForm returns a new form object using the fields array supplied as\n\/\/ an argument\nfunc NewForm(fields []*Field) (Form, error) {\n\tcfields := make([]*C.FIELD, len(fields)+1)\n\tfor index, field := range fields {\n\t\tcfields[index] = field.field\n\t}\n\tcfields[len(fields)] = nil\n\n\tvar form *C.FORM\n\tvar err error\n\tform, err = C.new_form((**C.FIELD)(&cfields[0]))\n\n\treturn Form{form}, ncursesError(err)\n}\n\n\/\/ FieldCount returns the number of fields attached to the Form\nfunc (f *Form) FieldCount() int {\n\treturn int(C.field_count(f.form))\n}\n\n\/\/ Driver issues the actions requested to the form itself. See the\n\/\/ corresponding REQ_* constants\nfunc (f *Form) Driver(drvract Key) error {\n\terr := C.form_driver(f.form, C.int(drvract))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ Free the memory allocated to the form. Forms are not automatically\n\/\/ free'd by Go's garbage collection system so the memory allocated to\n\/\/ it must be explicitely free'd\nfunc (f *Form) Free() error {\n\terr := C.free_form(f.form)\n\tf = nil\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ Post the form, making it visible and interactive\nfunc (f *Form) Post() error {\n\terr := C.post_form(f.form)\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ SetFields overwrites the current fields for the Form with new ones.\n\/\/ It is important to make sure all prior fields have been freed otherwise\n\/\/ this action will result in a memory leak\nfunc (f *Form) SetFields(fields []*Field) error {\n\tcfields := make([]*C.FIELD, len(fields)+1)\n\tfor index, field := range fields {\n\t\tcfields[index] = field.field\n\t}\n\tcfields[len(fields)] = nil\n\terr := C.set_form_fields(f.form, (**C.FIELD)(&cfields[0]))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ SetOptions for the form\nfunc (f *Form) SetOptions(opts int) error {\n\t_, err := C.set_form_opts(f.form, (C.Form_Options)(opts))\n\treturn ncursesError(err)\n}\n\n\/\/ SetSub sets the subwindow associated with the form\nfunc (f *Form) SetSub(w *Window) error {\n\terr := int(C.set_form_sub(f.form, w.win))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ SetWindow sets the window associated with the form\nfunc (f *Form) SetWindow(w *Window) error {\n\terr := int(C.set_form_win(f.form, w.win))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ Sub returns the subwindow assocaiated with the form\nfunc (f *Form) Sub() Window {\n\treturn Window{C.form_sub(f.form)}\n}\n\n\/\/ UnPost the form, removing it from the interface\nfunc (f *Form) UnPost() error {\n\terr := C.unpost_form(f.form)\n\treturn ncursesError(syscall.Errno(err))\n}\n<commit_msg>convert form fields from struct to type<commit_after>\/\/ Copyright 2011 Rob Thornton. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build !windows\n\npackage goncurses\n\n\/\/ #cgo pkg-config: form\n\/\/ #include <form.h>\n\/\/ #include <stdlib.h>\nimport \"C\"\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\ntype Field C.FIELD\n\ntype Form struct {\n\tform *C.FORM\n}\n\nfunc NewField(h, w, tr, lc, oscr, nbuf int) (*Field, error) {\n\tf, err := C.new_field(C.int(h), C.int(w), C.int(tr), C.int(lc),\n\t\tC.int(oscr), C.int(nbuf))\n\treturn (*Field)(f), ncursesError(err)\n}\n\n\/\/ Background returns the field's background character attributes\nfunc (f *Field) Background() Char {\n\treturn Char(C.field_back((*C.FIELD)(f)))\n}\n\n\/\/ Buffer returns a string containing the contents of the buffer. The returned\n\/\/ string will contain whitespace up to the buffer size as set by SetMax or\n\/\/ the value by the call to NewField\nfunc (f *Field) Buffer() string {\n\tstr := C.field_buffer((*C.FIELD)(f), C.int(0))\n\n\treturn C.GoString(str)\n}\n\n\/\/ Duplicate the field at the specified coordinates, returning a pointer\n\/\/ to the newly allocated object.\nfunc (f *Field) Duplicate(y, x int) (*Field, error) {\n\tnf, err := C.dup_field((*C.FIELD)(f), C.int(y), C.int(x))\n\treturn (*Field)(nf), ncursesError(err)\n}\n\n\/\/ Foreground returns the field's foreground character attributes\nfunc (f *Field) Foreground() Char {\n\treturn Char(C.field_fore((*C.FIELD)(f)))\n}\n\n\/\/ Free field's allocated memory. This must be called to prevent memory\n\/\/ leaks\nfunc (f *Field) Free() error {\n\terr := C.free_field((*C.FIELD)(f))\n\tf = nil\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ Info retrieves the height, width, y, x, offset and buffer size of the\n\/\/ given field. Pass the memory addess of the variable to store the data\n\/\/ in or nil.\nfunc (f *Field) Info(h, w, y, x, off, nbuf *int) error {\n\terr := C.field_info((*C.FIELD)(f), (*C.int)(unsafe.Pointer(h)),\n\t\t(*C.int)(unsafe.Pointer(w)), (*C.int)(unsafe.Pointer(y)),\n\t\t(*C.int)(unsafe.Pointer(x)), (*C.int)(unsafe.Pointer(off)),\n\t\t(*C.int)(unsafe.Pointer(nbuf)))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ Just returns the justification type of the field\nfunc (f *Field) Justification() int {\n\treturn int(C.field_just((*C.FIELD)(f)))\n}\n\n\/\/ Move the field to the location of the specified coordinates\nfunc (f *Field) Move(y, x int) error {\n\terr := C.move_field((*C.FIELD)(f), C.int(y), C.int(x))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ Options turns features on and off\nfunc (f *Field) Options(opts int, on bool) {\n\tif on {\n\t\tC.field_opts_on((*C.FIELD)(f), C.Field_Options(opts))\n\t\treturn\n\t}\n\tC.field_opts_off((*C.FIELD)(f), C.Field_Options(opts))\n}\n\n\/\/ Pad returns the padding character of the field\nfunc (f *Field) Pad() int {\n\treturn int(C.field_pad((*C.FIELD)(f)))\n}\n\n\/\/ SetBuffer sets the visible characters in the field. A buffer is empty by\n\/\/ default.\nfunc (f *Field) SetBuffer(s string) error {\n\tcstr := C.CString(s)\n\tdefer C.free(unsafe.Pointer(cstr))\n\n\terr := C.set_field_buffer((*C.FIELD)(f), C.int(0), cstr)\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ SetJustification of the field\nfunc (f *Field) SetJustification(just int) error {\n\terr := C.set_field_just((*C.FIELD)(f), C.int(just))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ SetMax sets the maximum size of a field\nfunc (f *Field) SetMax(max int) error {\n\terr := C.set_max_field((*C.FIELD)(f), C.int(max))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ OptionsOff turns feature(s) off\nfunc (f *Field) SetOptionsOff(opts Char) error {\n\terr := int(C.field_opts_off((*C.FIELD)(f), C.Field_Options(opts)))\n\tif err != C.E_OK {\n\t\treturn ncursesError(syscall.Errno(err))\n\t}\n\treturn nil\n}\n\n\/\/ OptionsOn turns feature(s) on\nfunc (f *Field) SetOptionsOn(opts Char) error {\n\terr := int(C.field_opts_on((*C.FIELD)(f), C.Field_Options(opts)))\n\tif err != C.E_OK {\n\t\treturn ncursesError(syscall.Errno(err))\n\t}\n\treturn nil\n}\n\n\/\/ SetPad sets the padding character of the field\nfunc (f *Field) SetPad(padch int) error {\n\terr := C.set_field_pad((*C.FIELD)(f), C.int(padch))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ SetBackground character and attributes (colours, etc)\nfunc (f *Field) SetBackground(ch Char) error {\n\terr := C.set_field_back((*C.FIELD)(f), C.chtype(ch))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ SetForeground character and attributes (colours, etc)\nfunc (f *Field) SetForeground(ch Char) error {\n\terr := C.set_field_fore((*C.FIELD)(f), C.chtype(ch))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ NewForm returns a new form object using the fields array supplied as\n\/\/ an argument\nfunc NewForm(fields []*Field) (Form, error) {\n\t\/\/cfields := make([]*C.FIELD, len(fields)+1)\n\t\/\/for index, field := range fields {\n\t\/\/cfields[index] = field.field\n\t\/\/}\n\t\/\/cfields[len(fields)] = nil\n\n\t\/* append nil field? *\/\n\n\t\/\/var form *C.FORM\n\t\/\/var err error\n\tform, err := C.new_form((**C.FIELD)(unsafe.Pointer(&fields[0])))\n\n\treturn Form{form}, ncursesError(err)\n}\n\n\/\/ FieldCount returns the number of fields attached to the Form\nfunc (f *Form) FieldCount() int {\n\treturn int(C.field_count(f.form))\n}\n\n\/\/ Driver issues the actions requested to the form itself. See the\n\/\/ corresponding REQ_* constants\nfunc (f *Form) Driver(drvract Key) error {\n\terr := C.form_driver(f.form, C.int(drvract))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ Free the memory allocated to the form. Forms are not automatically\n\/\/ free'd by Go's garbage collection system so the memory allocated to\n\/\/ it must be explicitely free'd\nfunc (f *Form) Free() error {\n\terr := C.free_form(f.form)\n\tf = nil\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ Post the form, making it visible and interactive\nfunc (f *Form) Post() error {\n\terr := C.post_form(f.form)\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ SetFields overwrites the current fields for the Form with new ones.\n\/\/ It is important to make sure all prior fields have been freed otherwise\n\/\/ this action will result in a memory leak\nfunc (f *Form) SetFields(fields []*Field) error {\n\t\/\/cfields := make([]*C.FIELD, len(fields)+1)\n\t\/\/for index, field := range fields {\n\t\/\/cfields[index] = field.field\n\t\/\/}\n\t\/\/cfields[len(fields)] = nil\n\terr := C.set_form_fields(f.form, (**C.FIELD)(unsafe.Pointer(&fields[0])))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ SetOptions for the form\nfunc (f *Form) SetOptions(opts int) error {\n\t_, err := C.set_form_opts(f.form, (C.Form_Options)(opts))\n\treturn ncursesError(err)\n}\n\n\/\/ SetSub sets the subwindow associated with the form\nfunc (f *Form) SetSub(w *Window) error {\n\terr := int(C.set_form_sub(f.form, w.win))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ SetWindow sets the window associated with the form\nfunc (f *Form) SetWindow(w *Window) error {\n\terr := int(C.set_form_win(f.form, w.win))\n\treturn ncursesError(syscall.Errno(err))\n}\n\n\/\/ Sub returns the subwindow assocaiated with the form\nfunc (f *Form) Sub() Window {\n\treturn Window{C.form_sub(f.form)}\n}\n\n\/\/ UnPost the form, removing it from the interface\nfunc (f *Form) UnPost() error {\n\terr := C.unpost_form(f.form)\n\treturn ncursesError(syscall.Errno(err))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ See LICENSE.txt for licensing information.\n\npackage main\n\nimport (\n\t\"archive\/zip\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n)\n\ntype Game struct {\n\tName string \/\/ user-given mnemonic, also used for making save storage directory\n\tPath string \/\/ absolute path to the source save file\/directory\n\tRoot string \/\/ absolute path to the saved saves directory\n\tStamp time.Time \/\/ last modification datetime stamp\n\tSaves []*Save \/\/ slice of saves\n}\n\nfunc (g *Game) PrintHeader() {\n\tfmt.Printf(\"%-32s %-3s %24s\\n\", \"Name\", \"Len\", \"Last backup\")\n}\n\nfunc (g *Game) Print() {\n\tfmt.Printf(\"%-32s %-3d %24s\\n\", g.Name, len(g.Saves), g.Stamp.Format(timeFmt))\n}\n\nfunc (g *Game) PrintWhole() {\n\tg.PrintHeader()\n\tg.Print()\n\tfmt.Println()\n\tif len(g.Saves) > 0 {\n\t\tfmt.Printf(\"%3s \", \"ID\")\n\t\tg.Saves[0].PrintHeader()\n\t\tfor i, s := range g.Saves {\n\t\t\tfmt.Printf(\"%3d \", i+1)\n\t\t\ts.Print()\n\t\t}\n\t\tfmt.Println()\n\t}\n}\n\nfunc (g *Game) Delete(from, to int) (n int, err error) {\n\tif from < 1 {\n\t\treturn n, fmt.Errorf(\"Index from %d out of range\", from)\n\t}\n\tif to > len(g.Saves) {\n\t\treturn n, fmt.Errorf(\"Index to %d out of range\", from)\n\t}\n\tfrom--\n\ti := from\n\tfor ; i < to; i++ {\n\t\ts := g.Saves[i]\n\t\tif flagVerbose {\n\t\t\tfmt.Fprintf(os.Stderr, \"removing save %d from %s\\n\", i+1, s.Stamp.Format(timeFmt))\n\t\t}\n\t\terr = os.Remove(s.Path)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tn++\n\t}\n\tcopy(g.Saves[from:], g.Saves[i:])\n\tfor k, n := len(g.Saves)-i+from, len(g.Saves); k < n; k++ {\n\t\tg.Saves[k] = nil\n\t}\n\tg.Saves = g.Saves[:len(g.Saves)-i+from]\n\treturn n, err\n}\n\n\/\/ Backup copies a save file, or zips a save directory.\n\/\/ Note that currently proper file closing is depending on total\n\/\/ exit of the program on any file operation failure.\nfunc (g *Game) Backup() (sv *Save, err error) {\n\tfi, err := os.Stat(g.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts := time.Now()\n\tp := filepath.Join(g.Root, s.Format(fileFmt))\n\to, err := os.Create(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\to.Close()\n\t\tif err != nil {\n\t\t\tif lerr := os.Remove(p); lerr != nil {\n\t\t\t\tfmt.Fprintln(os.Stderr, \"Error removing partial backup file:\", lerr)\n\t\t\t}\n\t\t}\n\t}()\n\t\/\/ single file\n\tif fi.Mode().IsRegular() {\n\t\ti, err := os.Open(g.Path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer i.Close()\n\t\tfmt.Println(\"Backing up\", i.Name(), \"...\")\n\t\tif flagVerbose {\n\t\t\tfmt.Fprintf(os.Stderr, \"copying %s to %s\\n\", g.Path, p)\n\t\t}\n\t\tn, err := io.Copy(o, i)\n\t\tif err != nil {\n\t\t\tif flagVerbose {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"error, copied %d\\n\", n)\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t\tif flagVerbose {\n\t\t\tfmt.Fprintf(os.Stderr, \"ok, copied %d\\n\", n)\n\t\t}\n\t} else {\n\t\t\/\/ directory\n\t\tfmt.Println(\"Backing up save directory...\")\n\t\tif flagVerbose {\n\t\t\tfmt.Fprintf(os.Stderr, \"zipping %s to %s\\n\", g.Path, p)\n\t\t}\n\t\tz := zip.NewWriter(o)\n\t\terr = filepath.Walk(g.Path, func(path string, info os.FileInfo, ierr error) (err error) {\n\t\t\tif ierr != nil {\n\t\t\t\treturn ierr\n\t\t\t}\n\t\t\tif info.IsDir() {\n\t\t\t\tif flagVerbose {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"skipping %s\\n\", path)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\trp, err := filepath.Rel(g.Path, path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trp = filepath.ToSlash(rp)\n\t\t\ti, err := os.Open(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\to, err := z.Create(rp)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif flagVerbose {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"compressing %s\\n\", rp)\n\t\t\t}\n\t\t\t_, err = io.Copy(o, i)\n\t\t\ti.Close()\n\t\t\treturn err\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err = z.Close(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tsv = &Save{\n\t\tStamp: s,\n\t\tPath: p,\n\t}\n\tg.Saves = append(g.Saves, sv)\n\treturn sv, nil\n}\n\n\/\/ Restore removes the current save file or directory, and repopulates\n\/\/ it with the data from given backup save file.\n\/\/ Note that currently proper file closing is depending on total\n\/\/ exit of the program on any file operation failure.\nfunc (g *Game) Restore(index int) (sv *Save, err error) {\n\tvar i int\n\n\tif index > 0 {\n\t\ti = index - 1\n\t\tif i > len(g.Saves) {\n\t\t\treturn nil, fmt.Errorf(\"Save ID %d out of range (1 ~ %d)\", index, len(g.Saves))\n\t\t}\n\t} else {\n\t\ti = len(g.Saves) - 1 + index\n\t\tif i < 0 {\n\t\t\treturn nil, fmt.Errorf(\"Save offset %d out of range (%d ~ 0)\", index, -len(g.Saves)+1)\n\t\t}\n\t}\n\tsv = g.Saves[i]\n\tfi, err := os.Stat(g.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ single file\n\tif fi.Mode().IsRegular() {\n\t\ti, err := os.Open(sv.Path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer i.Close()\n\t\to, err := os.Create(g.Path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer o.Close()\n\t\tfmt.Println(\"Restoring save from\", sv.Path, \"...\")\n\t\tif flagVerbose {\n\t\t\tfmt.Fprintf(os.Stderr, \"copying %s to %s\\n\", sv.Path, g.Path)\n\t\t}\n\t\tn, err := io.Copy(o, i)\n\t\tif flagVerbose {\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"error, copied %d\\n\", n)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"ok, copied %d\\n\", n)\n\t\t\t}\n\t\t}\n\t\treturn sv, nil\n\t}\n\t\/\/ directory\n\tfmt.Println(\"Restoring save directory from\", sv.Stamp.Format(timeFmt), \"...\")\n\tif flagVerbose {\n\t\tfmt.Fprintf(os.Stderr, \"removing all from %s\\n\", g.Path)\n\t}\n\terr = os.RemoveAll(g.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif flagVerbose {\n\t\tfmt.Fprintf(os.Stderr, \"unzipping %s to %s\\n\", sv.Path, g.Path)\n\t}\n\tz, err := zip.OpenReader(sv.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer z.Close()\n\tfor _, f := range z.File {\n\t\ti, err := f.Open()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttp := filepath.Join(g.Path, filepath.FromSlash(f.Name))\n\t\tdp := filepath.Dir(tp)\n\t\terr = os.MkdirAll(dp, 0777)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\to, err := os.Create(tp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif flagVerbose {\n\t\t\tfmt.Fprintf(os.Stderr, \"decompressing %s\\n\", f.Name)\n\t\t}\n\t\tn, err := io.Copy(o, i)\n\t\tif err != nil {\n\t\t\tif flagVerbose {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"error, copied %d\\n\", n)\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t\to.Close()\n\t\ti.Close()\n\t\tif flagVerbose {\n\t\t\tfmt.Fprintf(os.Stderr, \"ok, copied %d\\n\", n)\n\t\t}\n\t}\n\treturn sv, nil\n}\n<commit_msg>Better UI 2: \"Len\" -> \"# Backups\"<commit_after>\/\/ See LICENSE.txt for licensing information.\n\npackage main\n\nimport (\n\t\"archive\/zip\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n)\n\ntype Game struct {\n\tName string \/\/ user-given mnemonic, also used for making save storage directory\n\tPath string \/\/ absolute path to the source save file\/directory\n\tRoot string \/\/ absolute path to the saved saves directory\n\tStamp time.Time \/\/ last modification datetime stamp\n\tSaves []*Save \/\/ slice of saves\n}\n\nfunc (g *Game) PrintHeader() {\n\tfmt.Printf(\"%-32s %-9s %24s\\n\", \"Name\", \"# Backups\", \"Last backup\")\n}\n\nfunc (g *Game) Print() {\n\tfmt.Printf(\"%-32s %-3d %24s\\n\", g.Name, len(g.Saves), g.Stamp.Format(timeFmt))\n}\n\nfunc (g *Game) PrintWhole() {\n\tg.PrintHeader()\n\tg.Print()\n\tfmt.Println()\n\tif len(g.Saves) > 0 {\n\t\tfmt.Printf(\"%3s \", \"ID\")\n\t\tg.Saves[0].PrintHeader()\n\t\tfor i, s := range g.Saves {\n\t\t\tfmt.Printf(\"%3d \", i+1)\n\t\t\ts.Print()\n\t\t}\n\t\tfmt.Println()\n\t}\n}\n\nfunc (g *Game) Delete(from, to int) (n int, err error) {\n\tif from < 1 {\n\t\treturn n, fmt.Errorf(\"Index from %d out of range\", from)\n\t}\n\tif to > len(g.Saves) {\n\t\treturn n, fmt.Errorf(\"Index to %d out of range\", from)\n\t}\n\tfrom--\n\ti := from\n\tfor ; i < to; i++ {\n\t\ts := g.Saves[i]\n\t\tif flagVerbose {\n\t\t\tfmt.Fprintf(os.Stderr, \"removing save %d from %s\\n\", i+1, s.Stamp.Format(timeFmt))\n\t\t}\n\t\terr = os.Remove(s.Path)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tn++\n\t}\n\tcopy(g.Saves[from:], g.Saves[i:])\n\tfor k, n := len(g.Saves)-i+from, len(g.Saves); k < n; k++ {\n\t\tg.Saves[k] = nil\n\t}\n\tg.Saves = g.Saves[:len(g.Saves)-i+from]\n\treturn n, err\n}\n\n\/\/ Backup copies a save file, or zips a save directory.\n\/\/ Note that currently proper file closing is depending on total\n\/\/ exit of the program on any file operation failure.\nfunc (g *Game) Backup() (sv *Save, err error) {\n\tfi, err := os.Stat(g.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts := time.Now()\n\tp := filepath.Join(g.Root, s.Format(fileFmt))\n\to, err := os.Create(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\to.Close()\n\t\tif err != nil {\n\t\t\tif lerr := os.Remove(p); lerr != nil {\n\t\t\t\tfmt.Fprintln(os.Stderr, \"Error removing partial backup file:\", lerr)\n\t\t\t}\n\t\t}\n\t}()\n\t\/\/ single file\n\tif fi.Mode().IsRegular() {\n\t\ti, err := os.Open(g.Path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer i.Close()\n\t\tfmt.Println(\"Backing up\", i.Name(), \"...\")\n\t\tif flagVerbose {\n\t\t\tfmt.Fprintf(os.Stderr, \"copying %s to %s\\n\", g.Path, p)\n\t\t}\n\t\tn, err := io.Copy(o, i)\n\t\tif err != nil {\n\t\t\tif flagVerbose {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"error, copied %d\\n\", n)\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t\tif flagVerbose {\n\t\t\tfmt.Fprintf(os.Stderr, \"ok, copied %d\\n\", n)\n\t\t}\n\t} else {\n\t\t\/\/ directory\n\t\tfmt.Println(\"Backing up save directory...\")\n\t\tif flagVerbose {\n\t\t\tfmt.Fprintf(os.Stderr, \"zipping %s to %s\\n\", g.Path, p)\n\t\t}\n\t\tz := zip.NewWriter(o)\n\t\terr = filepath.Walk(g.Path, func(path string, info os.FileInfo, ierr error) (err error) {\n\t\t\tif ierr != nil {\n\t\t\t\treturn ierr\n\t\t\t}\n\t\t\tif info.IsDir() {\n\t\t\t\tif flagVerbose {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"skipping %s\\n\", path)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\trp, err := filepath.Rel(g.Path, path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trp = filepath.ToSlash(rp)\n\t\t\ti, err := os.Open(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\to, err := z.Create(rp)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif flagVerbose {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"compressing %s\\n\", rp)\n\t\t\t}\n\t\t\t_, err = io.Copy(o, i)\n\t\t\ti.Close()\n\t\t\treturn err\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err = z.Close(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tsv = &Save{\n\t\tStamp: s,\n\t\tPath: p,\n\t}\n\tg.Saves = append(g.Saves, sv)\n\treturn sv, nil\n}\n\n\/\/ Restore removes the current save file or directory, and repopulates\n\/\/ it with the data from given backup save file.\n\/\/ Note that currently proper file closing is depending on total\n\/\/ exit of the program on any file operation failure.\nfunc (g *Game) Restore(index int) (sv *Save, err error) {\n\tvar i int\n\n\tif index > 0 {\n\t\ti = index - 1\n\t\tif i > len(g.Saves) {\n\t\t\treturn nil, fmt.Errorf(\"Save ID %d out of range (1 ~ %d)\", index, len(g.Saves))\n\t\t}\n\t} else {\n\t\ti = len(g.Saves) - 1 + index\n\t\tif i < 0 {\n\t\t\treturn nil, fmt.Errorf(\"Save offset %d out of range (%d ~ 0)\", index, -len(g.Saves)+1)\n\t\t}\n\t}\n\tsv = g.Saves[i]\n\tfi, err := os.Stat(g.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ single file\n\tif fi.Mode().IsRegular() {\n\t\ti, err := os.Open(sv.Path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer i.Close()\n\t\to, err := os.Create(g.Path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer o.Close()\n\t\tfmt.Println(\"Restoring save from\", sv.Path, \"...\")\n\t\tif flagVerbose {\n\t\t\tfmt.Fprintf(os.Stderr, \"copying %s to %s\\n\", sv.Path, g.Path)\n\t\t}\n\t\tn, err := io.Copy(o, i)\n\t\tif flagVerbose {\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"error, copied %d\\n\", n)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"ok, copied %d\\n\", n)\n\t\t\t}\n\t\t}\n\t\treturn sv, nil\n\t}\n\t\/\/ directory\n\tfmt.Println(\"Restoring save directory from\", sv.Stamp.Format(timeFmt), \"...\")\n\tif flagVerbose {\n\t\tfmt.Fprintf(os.Stderr, \"removing all from %s\\n\", g.Path)\n\t}\n\terr = os.RemoveAll(g.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif flagVerbose {\n\t\tfmt.Fprintf(os.Stderr, \"unzipping %s to %s\\n\", sv.Path, g.Path)\n\t}\n\tz, err := zip.OpenReader(sv.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer z.Close()\n\tfor _, f := range z.File {\n\t\ti, err := f.Open()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttp := filepath.Join(g.Path, filepath.FromSlash(f.Name))\n\t\tdp := filepath.Dir(tp)\n\t\terr = os.MkdirAll(dp, 0777)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\to, err := os.Create(tp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif flagVerbose {\n\t\t\tfmt.Fprintf(os.Stderr, \"decompressing %s\\n\", f.Name)\n\t\t}\n\t\tn, err := io.Copy(o, i)\n\t\tif err != nil {\n\t\t\tif flagVerbose {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"error, copied %d\\n\", n)\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t\to.Close()\n\t\ti.Close()\n\t\tif flagVerbose {\n\t\t\tfmt.Fprintf(os.Stderr, \"ok, copied %d\\n\", n)\n\t\t}\n\t}\n\treturn sv, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport (\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/Returns true if the file on path a was modified later than the file on path b.\n\/\/If an error is encountered, returns false and the error.\nfunc IsNewer(a, b string) (bool, error) {\n\tfia, err := os.Stat(a)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfib, err := os.Stat(b)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn fia.ModTime().After(fib.ModTime()), nil\n}\n\nfunc NormalizeDate(t time.Time) time.Time {\n\treturn time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC)\n}\n\nfunc NormalizeTime(t time.Time) time.Time {\n\treturn time.Date(1, 1, 1, t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), time.UTC)\n}\n\nfunc Sleep(seconds int) {\n\tdel, _ := time.ParseDuration(strconv.Itoa(seconds) + \"s\")\n\tt := time.NewTimer(del)\n\t<-t.C\n}\n\nfunc GetFileType(filename string) string {\n\tparts := strings.Split(filename, \".\")\n\tif len(parts) < 2 {\n\t\treturn \"\"\n\t}\n\n\treturn parts[len(parts)-1]\n}\n<commit_msg>Removed unused function util.Sleep<commit_after>package util\n\nimport (\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/Returns true if the file on path a was modified later than the file on path b.\n\/\/If an error is encountered, returns false and the error.\nfunc IsNewer(a, b string) (bool, error) {\n\tfia, err := os.Stat(a)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfib, err := os.Stat(b)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn fia.ModTime().After(fib.ModTime()), nil\n}\n\nfunc NormalizeDate(t time.Time) time.Time {\n\treturn time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC)\n}\n\nfunc NormalizeTime(t time.Time) time.Time {\n\treturn time.Date(1, 1, 1, t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), time.UTC)\n}\n\nfunc GetFileType(filename string) string {\n\tparts := strings.Split(filename, \".\")\n\tif len(parts) < 2 {\n\t\treturn \"\"\n\t}\n\n\treturn parts[len(parts)-1]\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Albert Nigmatzianov. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage util\n\nconst (\n\tbytesPerInt = 4\n\tsizeBase = 7\n)\n\nvar (\n\tbyteSize = make([]byte, bytesPerInt) \/\/ Made for reusing in FormSize\n)\n\n\/\/ ParseSize parses byte slice with ID3v2 size (4 * 0b0xxxxxxx) and returns\n\/\/ uint32 integer.\n\/\/\n\/\/ If length of slice is more than 4 or if there is invalid size format (e.g.\n\/\/ one byte in slice is like 0b1xxxxxxx), then panic occurs.\nfunc ParseSize(data []byte) uint32 {\n\tvar size uint32\n\n\tif len(data) > bytesPerInt {\n\t\tpanic(\"Invalid data length (it must be equal or less than 4)\")\n\t}\n\n\tfor _, b := range data {\n\t\tif b&0x80 > 0 { \/\/ 0x80 = 0b1000_0000\n\t\t\tpanic(\"Invalid size format\")\n\t\t}\n\n\t\tsize = (size << sizeBase) | uint32(b)\n\t}\n\n\treturn size\n}\n\n\/\/ FormSize transforms uint32 integer to byte slice with\n\/\/ ID3v2 size (4 * 0b0xxxxxxx).\n\/\/\n\/\/ If size more than allowed (256MB), then panic occurs.\nfunc FormSize(n uint32) []byte {\n\tallowedSize := uint32(268435455) \/\/ 4 * 0b01111111\n\tif n > allowedSize {\n\t\tpanic(\"Size is more than allowed in id3 tag\")\n\t}\n\n\tmask := uint32(1<<sizeBase - 1)\n\n\tfor i, _ := range byteSize {\n\t\tbyteSize[len(byteSize)-i-1] = byte(n & mask)\n\t\tn >>= sizeBase\n\t}\n\n\treturn byteSize\n}\n<commit_msg>Sort methods in util.go in alphabetical order<commit_after>\/\/ Copyright 2016 Albert Nigmatzianov. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage util\n\nconst (\n\tbytesPerInt = 4\n\tsizeBase = 7\n)\n\nvar (\n\tbyteSize = make([]byte, bytesPerInt) \/\/ Made for reusing in FormSize\n)\n\n\/\/ FormSize transforms uint32 integer to byte slice with\n\/\/ ID3v2 size (4 * 0b0xxxxxxx).\n\/\/\n\/\/ If size more than allowed (256MB), then panic occurs.\nfunc FormSize(n uint32) []byte {\n\tallowedSize := uint32(268435455) \/\/ 4 * 0b01111111\n\tif n > allowedSize {\n\t\tpanic(\"Size is more than allowed in id3 tag\")\n\t}\n\n\tmask := uint32(1<<sizeBase - 1)\n\n\tfor i, _ := range byteSize {\n\t\tbyteSize[len(byteSize)-i-1] = byte(n & mask)\n\t\tn >>= sizeBase\n\t}\n\n\treturn byteSize\n}\n\n\/\/ ParseSize parses byte slice with ID3v2 size (4 * 0b0xxxxxxx) and returns\n\/\/ uint32 integer.\n\/\/\n\/\/ If length of slice is more than 4 or if there is invalid size format (e.g.\n\/\/ one byte in slice is like 0b1xxxxxxx), then panic occurs.\nfunc ParseSize(data []byte) uint32 {\n\tvar size uint32\n\n\tif len(data) > bytesPerInt {\n\t\tpanic(\"Invalid data length (it must be equal or less than 4)\")\n\t}\n\n\tfor _, b := range data {\n\t\tif b&0x80 > 0 { \/\/ 0x80 = 0b1000_0000\n\t\t\tpanic(\"Invalid size format\")\n\t\t}\n\n\t\tsize = (size << sizeBase) | uint32(b)\n\t}\n\n\treturn size\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n)\n\nconst (\n\tconfigMode = 0755\n\tdbName = \"mailman.db\"\n\tLogName = \"mailman.log\"\n\tDefaultSMTPPort = 25\n\t\/\/ magic key (avoid content conflict)\n\tMailBodyKey = \"YTua0G1ViXGg9fxvrtwVRNfKD\"\n\tMailTemplatePath = \".\/ui\/mail-template\"\n\t\/\/MailTemplateType = \"responsive\"\n\tMailTemplateType = \"stationery\"\n)\n\nvar (\n\tFileLog = logrus.New()\n\tConfigPath = map[string]string{\n\t\t\"dbPath\": \"\/.mailman\/db\",\n\t\t\"logPath\": \"\/.mailman\/log\",\n\t\t\"tmpPath\": \"\/.mailman\/tmp\",\n\t}\n\tDBPath string\n\tDefaultLang = \"en\"\n\tDefaultSMTPServer = map[string]string{\n\t\t\"@qq.com\": \"smtp.qq.com\",\n\t\t\"@hotmail.com\": \"smtp.live.com\",\n\t\t\"@outlook.com\": \"smtp.live.com\",\n\t\t\"@gmail.com\": \"smtp.gmail.com\",\n\t}\n\tSMTPServerNotFoundErr = errors.New(\"SMTP Server Not Found\")\n)\n\nfunc init() {\n\n\thomeDir := GetHomeDir()\n\tCreateConfigDir()\n\tlogFilePath := filepath.Join(homeDir, ConfigPath[\"logPath\"], LogName)\n\tlogFile, err := os.OpenFile(logFilePath, os.O_WRONLY|os.O_CREATE, configMode)\n\tif err != nil {\n\t\t\/\/ mailman.log not exist\n\t\tFileLog.Fatal(err.Error())\n\t\t\/\/log.Fatal(err)\n\t}\n\tFileLog.Out = logFile\n\tFileLog.Formatter = &logrus.TextFormatter{DisableColors: true}\n\tDBPath = filepath.Join(homeDir, ConfigPath[\"dbPath\"], dbName)\n}\n\ntype Msg struct {\n\tSuccess bool `json:\"success\"`\n\tMessage string `json:\"message\"`\n\tData interface{} `json:\"data,emitempty\"`\n}\n\nfunc GetHomeDir() string {\n\n\treturn os.Getenv(\"HOME\")\n}\n\nfunc CreateConfigDir() error {\n\n\thomeDir := GetHomeDir()\n\tfor _, path := range ConfigPath {\n\t\tvar p = homeDir + path\n\t\tif _, err := os.Stat(p); err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\tif err := os.MkdirAll(p, configMode); err != nil {\n\t\t\t\t\tFileLog.Fatal(err.Error())\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tFileLog.Fatal(err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc GetTmpDir() string {\n\n\thomeDir := GetHomeDir()\n\treturn filepath.Join(homeDir, ConfigPath[\"tmpPath\"])\n}\n\nfunc GetUserName() string {\n\n\tu, _ := user.Current()\n\treturn u.Username\n}\n<commit_msg>switch back mail template<commit_after>package util\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n)\n\nconst (\n\tconfigMode = 0755\n\tdbName = \"mailman.db\"\n\tLogName = \"mailman.log\"\n\tDefaultSMTPPort = 25\n\t\/\/ magic key (avoid content conflict)\n\tMailBodyKey = \"YTua0G1ViXGg9fxvrtwVRNfKD\"\n\tMailTemplatePath = \".\/ui\/mail-template\"\n\tMailTemplateType = \"responsive\"\n\t\/\/MailTemplateType = \"stationery\"\n)\n\nvar (\n\tFileLog = logrus.New()\n\tConfigPath = map[string]string{\n\t\t\"dbPath\": \"\/.mailman\/db\",\n\t\t\"logPath\": \"\/.mailman\/log\",\n\t\t\"tmpPath\": \"\/.mailman\/tmp\",\n\t}\n\tDBPath string\n\tDefaultLang = \"en\"\n\tDefaultSMTPServer = map[string]string{\n\t\t\"@qq.com\": \"smtp.qq.com\",\n\t\t\"@hotmail.com\": \"smtp.live.com\",\n\t\t\"@outlook.com\": \"smtp.live.com\",\n\t\t\"@gmail.com\": \"smtp.gmail.com\",\n\t}\n\tSMTPServerNotFoundErr = errors.New(\"SMTP Server Not Found\")\n)\n\nfunc init() {\n\n\thomeDir := GetHomeDir()\n\tCreateConfigDir()\n\tlogFilePath := filepath.Join(homeDir, ConfigPath[\"logPath\"], LogName)\n\tlogFile, err := os.OpenFile(logFilePath, os.O_WRONLY|os.O_CREATE, configMode)\n\tif err != nil {\n\t\t\/\/ mailman.log not exist\n\t\tFileLog.Fatal(err.Error())\n\t\t\/\/log.Fatal(err)\n\t}\n\tFileLog.Out = logFile\n\tFileLog.Formatter = &logrus.TextFormatter{DisableColors: true}\n\tDBPath = filepath.Join(homeDir, ConfigPath[\"dbPath\"], dbName)\n}\n\ntype Msg struct {\n\tSuccess bool `json:\"success\"`\n\tMessage string `json:\"message\"`\n\tData interface{} `json:\"data,emitempty\"`\n}\n\nfunc GetHomeDir() string {\n\n\treturn os.Getenv(\"HOME\")\n}\n\nfunc CreateConfigDir() error {\n\n\thomeDir := GetHomeDir()\n\tfor _, path := range ConfigPath {\n\t\tvar p = homeDir + path\n\t\tif _, err := os.Stat(p); err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\tif err := os.MkdirAll(p, configMode); err != nil {\n\t\t\t\t\tFileLog.Fatal(err.Error())\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tFileLog.Fatal(err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc GetTmpDir() string {\n\n\thomeDir := GetHomeDir()\n\treturn filepath.Join(homeDir, ConfigPath[\"tmpPath\"])\n}\n\nfunc GetUserName() string {\n\n\tu, _ := user.Current()\n\treturn u.Username\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Util\", func() {\n\tDescribe(\"Size methods\", func() {\n\n\t\tIt(\"formats sizes\", func() {\n\n\t\t\tvar str string\n\t\t\tstr = FormatSize(55)\n\t\t\tExpect(str).To(Equal(\"55\"))\n\t\t\tstr = FormatSize(1024)\n\t\t\tExpect(str).To(Equal(\"1KB\"))\n\t\t\tstr = FormatSize(2000)\n\t\t\tExpect(str).To(Equal(\"1.95KB\"))\n\t\t\tstr = FormatSize(1572864)\n\t\t\tExpect(str).To(Equal(\"1.5MB\"))\n\t\t\tstr = FormatSize(157286400)\n\t\t\tExpect(str).To(Equal(\"150MB\"))\n\t\t\tstr = FormatSize(44023414784)\n\t\t\tExpect(str).To(Equal(\"41GB\"))\n\t\t\tstr = FormatSize(44475414800)\n\t\t\tExpect(str).To(Equal(\"41.4GB\"))\n\t\t\tstr = FormatSize(1319413953331)\n\t\t\tExpect(str).To(Equal(\"1.2TB\"))\n\t\t\tstr = FormatSize(395824185999360)\n\t\t\tExpect(str).To(Equal(\"360TB\"))\n\t\t\tstr = FormatSize(2260595906707456)\n\t\t\tExpect(str).To(Equal(\"2.01PB\"))\n\n\t\t})\n\t\tIt(\"parses sizes\", func() {\n\t\t\tvar val int64\n\t\t\tvar err error\n\t\t\tval, err = ParseSize(\"5a67\")\n\t\t\tExpect(err).ToNot(BeNil(), \"Should not parse\")\n\t\t\tval, err = ParseSize(\"567\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(567))\n\t\t\tval, err = ParseSize(\"567B\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(567))\n\t\t\tval, err = ParseSize(\"567b\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(567))\n\t\t\tval, err = ParseSize(\" 567 B \")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(567))\n\t\t\tval, err = ParseSize(\"1KB\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(1024))\n\t\t\tval, err = ParseSize(\"2.5KB\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(2560))\n\t\t\tval, err = ParseSize(\"5.25M\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(5505024))\n\t\t\tval, err = ParseSize(\"75.0Gb\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(80530636800))\n\t\t\tval, err = ParseSize(\"300T\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(329853488332800))\n\t\t\tval, err = ParseSize(\"1.5pb\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(1688849860263936))\n\t\t})\n\n\t})\n\n})\n<commit_msg>Binary search tests<commit_after>package main\n\nimport (\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Util\", func() {\n\tDescribe(\"Size methods\", func() {\n\n\t\tIt(\"formats sizes\", func() {\n\n\t\t\tvar str string\n\t\t\tstr = FormatSize(55)\n\t\t\tExpect(str).To(Equal(\"55\"))\n\t\t\tstr = FormatSize(1024)\n\t\t\tExpect(str).To(Equal(\"1KB\"))\n\t\t\tstr = FormatSize(2000)\n\t\t\tExpect(str).To(Equal(\"1.95KB\"))\n\t\t\tstr = FormatSize(1572864)\n\t\t\tExpect(str).To(Equal(\"1.5MB\"))\n\t\t\tstr = FormatSize(157286400)\n\t\t\tExpect(str).To(Equal(\"150MB\"))\n\t\t\tstr = FormatSize(44023414784)\n\t\t\tExpect(str).To(Equal(\"41GB\"))\n\t\t\tstr = FormatSize(44475414800)\n\t\t\tExpect(str).To(Equal(\"41.4GB\"))\n\t\t\tstr = FormatSize(1319413953331)\n\t\t\tExpect(str).To(Equal(\"1.2TB\"))\n\t\t\tstr = FormatSize(395824185999360)\n\t\t\tExpect(str).To(Equal(\"360TB\"))\n\t\t\tstr = FormatSize(2260595906707456)\n\t\t\tExpect(str).To(Equal(\"2.01PB\"))\n\n\t\t})\n\t\tIt(\"parses sizes\", func() {\n\t\t\tvar val int64\n\t\t\tvar err error\n\t\t\tval, err = ParseSize(\"5a67\")\n\t\t\tExpect(err).ToNot(BeNil(), \"Should not parse\")\n\t\t\tval, err = ParseSize(\"567\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(567))\n\t\t\tval, err = ParseSize(\"567B\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(567))\n\t\t\tval, err = ParseSize(\"567b\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(567))\n\t\t\tval, err = ParseSize(\" 567 B \")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(567))\n\t\t\tval, err = ParseSize(\"1KB\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(1024))\n\t\t\tval, err = ParseSize(\"2.5KB\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(2560))\n\t\t\tval, err = ParseSize(\"5.25M\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(5505024))\n\t\t\tval, err = ParseSize(\"75.0Gb\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(80530636800))\n\t\t\tval, err = ParseSize(\"300T\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(329853488332800))\n\t\t\tval, err = ParseSize(\"1.5pb\")\n\t\t\tExpect(err).To(BeNil(), \"Should parse without error\")\n\t\t\tExpect(val).To(BeEquivalentTo(1688849860263936))\n\t\t})\n\n\t})\n\n\tDescribe(\"StringBinarySearch\", func() {\n\t\t\/\/ Note capitalised ordering\n\t\tsortedSlice := []string{\"Cheetah\", \"Frog\", \"aardvark\", \"bear\", \"cheetah\", \"dog\", \"elephant\", \"zebra\"}\n\n\t\tIt(\"behaves correctly on empty lists\", func() {\n\n\t\t\tlist := make([]string, 0)\n\t\t\tfound, insertAt := StringBinarySearch(list, \"test\")\n\t\t\tExpect(found).To(BeFalse(), \"Should not find in empty list\")\n\t\t\tExpect(insertAt).To(BeEquivalentTo(0), \"Empty list insertion should be zero\")\n\n\t\t})\n\t\tIt(\"behaves correctly on empty search term\", func() {\n\n\t\t\tfound, insertAt := StringBinarySearch(sortedSlice, \"\")\n\t\t\tExpect(found).To(BeFalse(), \"Should not find empty string\")\n\t\t\tExpect(insertAt).To(BeEquivalentTo(0), \"Should insert empty string at start\")\n\n\t\t})\n\t\tIt(\"inserts at start\", func() {\n\n\t\t\tfound, insertAt := StringBinarySearch(sortedSlice, \"Aardvark\")\n\t\t\tExpect(found).To(BeFalse(), \"Should not find string\")\n\t\t\tExpect(insertAt).To(BeEquivalentTo(0), \"Should insert string at start\")\n\n\t\t})\n\t\tIt(\"inserts at end\", func() {\n\n\t\t\tfound, insertAt := StringBinarySearch(sortedSlice, \"zoltan\")\n\t\t\tExpect(found).To(BeFalse(), \"Should not find string\")\n\t\t\tExpect(insertAt).To(BeEquivalentTo(len(sortedSlice)), \"Should insert string at end\")\n\n\t\t})\n\t\tIt(\"inserts in middle\", func() {\n\n\t\t\tfound, insertAt := StringBinarySearch(sortedSlice, \"Dingo\")\n\t\t\tExpect(found).To(BeFalse(), \"Should not find string\")\n\t\t\tExpect(insertAt).To(BeEquivalentTo(1), \"Should insert at correct point\")\n\n\t\t\tfound, insertAt = StringBinarySearch(sortedSlice, \"anteater\")\n\t\t\tExpect(found).To(BeFalse(), \"Should not find string\")\n\t\t\tExpect(insertAt).To(BeEquivalentTo(3), \"Should insert at correct point\")\n\n\t\t\tfound, insertAt = StringBinarySearch(sortedSlice, \"chaffinch\")\n\t\t\tExpect(found).To(BeFalse(), \"Should not find string\")\n\t\t\tExpect(insertAt).To(BeEquivalentTo(4), \"Should insert at correct point\")\n\n\t\t\tfound, insertAt = StringBinarySearch(sortedSlice, \"fox\")\n\t\t\tExpect(found).To(BeFalse(), \"Should not find string\")\n\t\t\tExpect(insertAt).To(BeEquivalentTo(7), \"Should insert at correct point\")\n\n\t\t})\n\t\tIt(\"is case sensitive\", func() {\n\n\t\t\tfound, insertAt := StringBinarySearch(sortedSlice, \"Dog\")\n\t\t\tExpect(found).To(BeFalse(), \"Should not find string\")\n\t\t\tExpect(insertAt).To(BeEquivalentTo(1), \"Should insert at correct point\")\n\n\t\t\tfound, insertAt = StringBinarySearch(sortedSlice, \"frog\")\n\t\t\tExpect(found).To(BeFalse(), \"Should not find string\")\n\t\t\tExpect(insertAt).To(BeEquivalentTo(7), \"Should insert at correct point\")\n\n\t\t})\n\t\tIt(\"finds existing\", func() {\n\t\t\t\/\/ Note not using loop and sortedSlice[i] to test for equality not identity\n\t\t\tfound, at := StringBinarySearch(sortedSlice, \"Cheetah\")\n\t\t\tExpect(found).To(BeTrue(), \"Should find string\")\n\t\t\tExpect(at).To(BeEquivalentTo(0), \"Should insert at correct point\")\n\t\t\tfound, at = StringBinarySearch(sortedSlice, \"Frog\")\n\t\t\tExpect(found).To(BeTrue(), \"Should find string\")\n\t\t\tExpect(at).To(BeEquivalentTo(1), \"Should insert at correct point\")\n\t\t\tfound, at = StringBinarySearch(sortedSlice, \"aardvark\")\n\t\t\tExpect(found).To(BeTrue(), \"Should find string\")\n\t\t\tExpect(at).To(BeEquivalentTo(2), \"Should insert at correct point\")\n\t\t\tfound, at = StringBinarySearch(sortedSlice, \"bear\")\n\t\t\tExpect(found).To(BeTrue(), \"Should find string\")\n\t\t\tExpect(at).To(BeEquivalentTo(3), \"Should insert at correct point\")\n\t\t\tfound, at = StringBinarySearch(sortedSlice, \"cheetah\")\n\t\t\tExpect(found).To(BeTrue(), \"Should find string\")\n\t\t\tExpect(at).To(BeEquivalentTo(4), \"Should insert at correct point\")\n\t\t\tfound, at = StringBinarySearch(sortedSlice, \"dog\")\n\t\t\tExpect(found).To(BeTrue(), \"Should find string\")\n\t\t\tExpect(at).To(BeEquivalentTo(5), \"Should insert at correct point\")\n\t\t\tfound, at = StringBinarySearch(sortedSlice, \"elephant\")\n\t\t\tExpect(found).To(BeTrue(), \"Should find string\")\n\t\t\tExpect(at).To(BeEquivalentTo(6), \"Should insert at correct point\")\n\t\t\tfound, at = StringBinarySearch(sortedSlice, \"zebra\")\n\t\t\tExpect(found).To(BeTrue(), \"Should find string\")\n\t\t\tExpect(at).To(BeEquivalentTo(7), \"Should insert at correct point\")\n\t\t})\n\n\t})\n\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Hugo Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage goldmark\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/gohugoio\/hugo\/markup\/goldmark\/goldmark_config\"\n\n\t\"github.com\/gohugoio\/hugo\/markup\/highlight\"\n\n\t\"github.com\/gohugoio\/hugo\/markup\/markup_config\"\n\n\t\"github.com\/gohugoio\/hugo\/common\/loggers\"\n\n\t\"github.com\/gohugoio\/hugo\/markup\/converter\"\n\n\tqt \"github.com\/frankban\/quicktest\"\n)\n\nfunc convert(c *qt.C, mconf markup_config.Config, content string) converter.Result {\n\n\tp, err := Provider.New(\n\t\tconverter.ProviderConfig{\n\t\t\tMarkupConfig: mconf,\n\t\t\tLogger: loggers.NewErrorLogger(),\n\t\t},\n\t)\n\tc.Assert(err, qt.IsNil)\n\tconv, err := p.New(converter.DocumentContext{DocumentID: \"thedoc\"})\n\tc.Assert(err, qt.IsNil)\n\tb, err := conv.Convert(converter.RenderContext{RenderTOC: true, Src: []byte(content)})\n\tc.Assert(err, qt.IsNil)\n\n\treturn b\n}\n\nfunc TestConvert(t *testing.T) {\n\tc := qt.New(t)\n\n\t\/\/ Smoke test of the default configuration.\n\tcontent := `\n## Links\n\nhttps:\/\/github.com\/gohugoio\/hugo\/issues\/6528\n[Live Demo here!](https:\/\/docuapi.netlify.com\/)\n\n[I'm an inline-style link with title](https:\/\/www.google.com \"Google's Homepage\")\n\n\n## Code Fences\n\n§§§bash\nLINE1\n§§§\n\n## Code Fences No Lexer\n\n§§§moo\nLINE1\n§§§\n\n## Custom ID {#custom}\n\n## Auto ID\n\n* Autolink: https:\/\/gohugo.io\/\n* Strikethrough:~~Hi~~ Hello, world!\n \n## Table\n\n| foo | bar |\n| --- | --- |\n| baz | bim |\n\n## Task Lists (default on)\n\n- [x] Finish my changes[^1]\n- [ ] Push my commits to GitHub\n- [ ] Open a pull request\n\n\n## Smartypants (default on)\n\n* Straight double \"quotes\" and single 'quotes' into “curly” quote HTML entities\n* Dashes (“--” and “---”) into en- and em-dash entities\n* Three consecutive dots (“...”) into an ellipsis entity\n* Apostrophes are also converted: \"That was back in the '90s, that's a long time ago\"\n\n## Footnotes\n\nThat's some text with a footnote.[^1]\n\n## Definition Lists\n\ndate\n: the datetime assigned to this page. \n\ndescription\n: the description for the content.\n\n\n## 神真美好\n\n## 神真美好\n\n## 神真美好\n\n[^1]: And that's the footnote.\n\n`\n\n\t\/\/ Code fences\n\tcontent = strings.Replace(content, \"§§§\", \"```\", -1)\n\tmconf := markup_config.Default\n\tmconf.Highlight.NoClasses = false\n\tmconf.Goldmark.Renderer.Unsafe = true\n\n\tb := convert(c, mconf, content)\n\tgot := string(b.Bytes())\n\n\t\/\/ Links\n\t\/\/\tc.Assert(got, qt.Contains, `<a href=\"https:\/\/docuapi.netlify.com\/\">Live Demo here!<\/a>`)\n\n\t\/\/ Header IDs\n\tc.Assert(got, qt.Contains, `<h2 id=\"custom\">Custom ID<\/h2>`, qt.Commentf(got))\n\tc.Assert(got, qt.Contains, `<h2 id=\"auto-id\">Auto ID<\/h2>`, qt.Commentf(got))\n\tc.Assert(got, qt.Contains, `<h2 id=\"神真美好\">神真美好<\/h2>`, qt.Commentf(got))\n\tc.Assert(got, qt.Contains, `<h2 id=\"神真美好-1\">神真美好<\/h2>`, qt.Commentf(got))\n\tc.Assert(got, qt.Contains, `<h2 id=\"神真美好-2\">神真美好<\/h2>`, qt.Commentf(got))\n\n\t\/\/ Code fences\n\tc.Assert(got, qt.Contains, \"<div class=\\\"highlight\\\"><pre class=\\\"chroma\\\"><code class=\\\"language-bash\\\" data-lang=\\\"bash\\\">LINE1\\n<\/code><\/pre><\/div>\")\n\tc.Assert(got, qt.Contains, \"Code Fences No Lexer<\/h2>\\n<pre><code class=\\\"language-moo\\\" data-lang=\\\"moo\\\">LINE1\\n<\/code><\/pre>\")\n\n\t\/\/ Extensions\n\tc.Assert(got, qt.Contains, `Autolink: <a href=\"https:\/\/gohugo.io\/\">https:\/\/gohugo.io\/<\/a>`)\n\tc.Assert(got, qt.Contains, `Strikethrough:<del>Hi<\/del> Hello, world`)\n\tc.Assert(got, qt.Contains, `<th>foo<\/th>`)\n\tc.Assert(got, qt.Contains, `<li><input disabled=\"\" type=\"checkbox\"> Push my commits to GitHub<\/li>`)\n\n\tc.Assert(got, qt.Contains, `Straight double “quotes” and single ‘quotes’`)\n\tc.Assert(got, qt.Contains, `Dashes (“–” and “—”) `)\n\tc.Assert(got, qt.Contains, `Three consecutive dots (“…”)`)\n\tc.Assert(got, qt.Contains, `“That was back in the ’90s, that’s a long time ago”`)\n\tc.Assert(got, qt.Contains, `footnote.<sup id=\"fnref:1\"><a href=\"#fn:1\" class=\"footnote-ref\" role=\"doc-noteref\">1<\/a><\/sup>`)\n\tc.Assert(got, qt.Contains, `<section class=\"footnotes\" role=\"doc-endnotes\">`)\n\tc.Assert(got, qt.Contains, `<dt>date<\/dt>`)\n\n\ttoc, ok := b.(converter.TableOfContentsProvider)\n\tc.Assert(ok, qt.Equals, true)\n\ttocHTML := toc.TableOfContents().ToHTML(1, 2, false)\n\tc.Assert(tocHTML, qt.Contains, \"TableOfContents\")\n\n}\n\nfunc TestConvertAutoIDAsciiOnly(t *testing.T) {\n\tc := qt.New(t)\n\n\tcontent := `\n## God is Good: 神真美好\n`\n\tmconf := markup_config.Default\n\tmconf.Goldmark.Parser.AutoHeadingIDType = goldmark_config.AutoHeadingIDTypeGitHubAscii\n\tb := convert(c, mconf, content)\n\tgot := string(b.Bytes())\n\n\tc.Assert(got, qt.Contains, \"<h2 id=\\\"god-is-good-\\\">\")\n}\n\nfunc TestConvertAutoIDBlackfriday(t *testing.T) {\n\tc := qt.New(t)\n\n\tcontent := `\n## Let's try this, shall we?\n\n`\n\tmconf := markup_config.Default\n\tmconf.Goldmark.Parser.AutoHeadingIDType = goldmark_config.AutoHeadingIDTypeBlackfriday\n\tb := convert(c, mconf, content)\n\tgot := string(b.Bytes())\n\n\tc.Assert(got, qt.Contains, \"<h2 id=\\\"let-s-try-this-shall-we\\\">\")\n}\n\nfunc TestCodeFence(t *testing.T) {\n\tc := qt.New(t)\n\n\tlines := `LINE1\nLINE2\nLINE3\nLINE4\nLINE5\n`\n\n\tconvertForConfig := func(c *qt.C, conf highlight.Config, code, language string) string {\n\t\tmconf := markup_config.Default\n\t\tmconf.Highlight = conf\n\n\t\tp, err := Provider.New(\n\t\t\tconverter.ProviderConfig{\n\t\t\t\tMarkupConfig: mconf,\n\t\t\t\tLogger: loggers.NewErrorLogger(),\n\t\t\t},\n\t\t)\n\n\t\tcontent := \"```\" + language + \"\\n\" + code + \"\\n```\"\n\n\t\tc.Assert(err, qt.IsNil)\n\t\tconv, err := p.New(converter.DocumentContext{})\n\t\tc.Assert(err, qt.IsNil)\n\t\tb, err := conv.Convert(converter.RenderContext{Src: []byte(content)})\n\t\tc.Assert(err, qt.IsNil)\n\n\t\treturn string(b.Bytes())\n\t}\n\n\tc.Run(\"Basic\", func(c *qt.C) {\n\t\tcfg := highlight.DefaultConfig\n\t\tcfg.NoClasses = false\n\n\t\tresult := convertForConfig(c, cfg, `echo \"Hugo Rocks!\"`, \"bash\")\n\t\t\/\/ TODO(bep) there is a whitespace mismatch (\\n) between this and the highlight template func.\n\t\tc.Assert(result, qt.Equals, `<div class=\"highlight\"><pre class=\"chroma\"><code class=\"language-bash\" data-lang=\"bash\"><span class=\"nb\">echo<\/span> <span class=\"s2\">"Hugo Rocks!"<\/span>\n<\/code><\/pre><\/div>`)\n\t\tresult = convertForConfig(c, cfg, `echo \"Hugo Rocks!\"`, \"unknown\")\n\t\tc.Assert(result, qt.Equals, \"<pre><code class=\\\"language-unknown\\\" data-lang=\\\"unknown\\\">echo "Hugo Rocks!"\\n<\/code><\/pre>\")\n\n\t})\n\n\tc.Run(\"Highlight lines, default config\", func(c *qt.C) {\n\t\tcfg := highlight.DefaultConfig\n\t\tcfg.NoClasses = false\n\n\t\tresult := convertForConfig(c, cfg, lines, `bash {linenos=table,hl_lines=[2 \"4-5\"],linenostart=3}`)\n\t\tc.Assert(result, qt.Contains, \"<div class=\\\"highlight\\\"><div class=\\\"chroma\\\">\\n<table class=\\\"lntable\\\"><tr><td class=\\\"lntd\\\">\\n<pre class=\\\"chroma\\\"><code><span class\")\n\t\tc.Assert(result, qt.Contains, \"<span class=\\\"hl\\\"><span class=\\\"lnt\\\">4\")\n\n\t\tresult = convertForConfig(c, cfg, lines, \"bash {linenos=inline,hl_lines=[2]}\")\n\t\tc.Assert(result, qt.Contains, \"<span class=\\\"ln\\\">2<\/span>LINE2\\n<\/span>\")\n\t\tc.Assert(result, qt.Not(qt.Contains), \"<table\")\n\n\t\tresult = convertForConfig(c, cfg, lines, \"bash {linenos=true,hl_lines=[2]}\")\n\t\tc.Assert(result, qt.Contains, \"<table\")\n\t\tc.Assert(result, qt.Contains, \"<span class=\\\"hl\\\"><span class=\\\"lnt\\\">2\\n<\/span>\")\n\t})\n\n\tc.Run(\"Highlight lines, linenumbers default on\", func(c *qt.C) {\n\t\tcfg := highlight.DefaultConfig\n\t\tcfg.NoClasses = false\n\t\tcfg.LineNos = true\n\n\t\tresult := convertForConfig(c, cfg, lines, \"bash\")\n\t\tc.Assert(result, qt.Contains, \"<span class=\\\"lnt\\\">2\\n<\/span>\")\n\n\t\tresult = convertForConfig(c, cfg, lines, \"bash {linenos=false,hl_lines=[2]}\")\n\t\tc.Assert(result, qt.Not(qt.Contains), \"class=\\\"lnt\\\"\")\n\t})\n\n\tc.Run(\"Highlight lines, linenumbers default on, linenumbers in table default off\", func(c *qt.C) {\n\t\tcfg := highlight.DefaultConfig\n\t\tcfg.NoClasses = false\n\t\tcfg.LineNos = true\n\t\tcfg.LineNumbersInTable = false\n\n\t\tresult := convertForConfig(c, cfg, lines, \"bash\")\n\t\tc.Assert(result, qt.Contains, \"<span class=\\\"ln\\\">2<\/span>LINE2\\n<\")\n\t\tresult = convertForConfig(c, cfg, lines, \"bash {linenos=table}\")\n\t\tc.Assert(result, qt.Contains, \"<span class=\\\"lnt\\\">1\\n<\/span>\")\n\t})\n\n\tc.Run(\"No language\", func(c *qt.C) {\n\t\tcfg := highlight.DefaultConfig\n\t\tcfg.NoClasses = false\n\t\tcfg.LineNos = true\n\t\tcfg.LineNumbersInTable = false\n\n\t\tresult := convertForConfig(c, cfg, lines, \"\")\n\t\tc.Assert(result, qt.Contains, \"<pre><code>LINE1\\n\")\n\t})\n\n\tc.Run(\"No language, guess syntax\", func(c *qt.C) {\n\t\tcfg := highlight.DefaultConfig\n\t\tcfg.NoClasses = false\n\t\tcfg.GuessSyntax = true\n\t\tcfg.LineNos = true\n\t\tcfg.LineNumbersInTable = false\n\n\t\tresult := convertForConfig(c, cfg, lines, \"\")\n\t\tc.Assert(result, qt.Contains, \"<span class=\\\"ln\\\">2<\/span>LINE2\\n<\")\n\t})\n}\n<commit_msg>markup\/goldmark: Add a test case<commit_after>\/\/ Copyright 2019 The Hugo Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage goldmark\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/gohugoio\/hugo\/markup\/goldmark\/goldmark_config\"\n\n\t\"github.com\/gohugoio\/hugo\/markup\/highlight\"\n\n\t\"github.com\/gohugoio\/hugo\/markup\/markup_config\"\n\n\t\"github.com\/gohugoio\/hugo\/common\/loggers\"\n\n\t\"github.com\/gohugoio\/hugo\/markup\/converter\"\n\n\tqt \"github.com\/frankban\/quicktest\"\n)\n\nfunc convert(c *qt.C, mconf markup_config.Config, content string) converter.Result {\n\n\tp, err := Provider.New(\n\t\tconverter.ProviderConfig{\n\t\t\tMarkupConfig: mconf,\n\t\t\tLogger: loggers.NewErrorLogger(),\n\t\t},\n\t)\n\tc.Assert(err, qt.IsNil)\n\tconv, err := p.New(converter.DocumentContext{DocumentID: \"thedoc\"})\n\tc.Assert(err, qt.IsNil)\n\tb, err := conv.Convert(converter.RenderContext{RenderTOC: true, Src: []byte(content)})\n\tc.Assert(err, qt.IsNil)\n\n\treturn b\n}\n\nfunc TestConvert(t *testing.T) {\n\tc := qt.New(t)\n\n\t\/\/ Smoke test of the default configuration.\n\tcontent := `\n## Links\n\nhttps:\/\/github.com\/gohugoio\/hugo\/issues\/6528\n[Live Demo here!](https:\/\/docuapi.netlify.com\/)\n\n[I'm an inline-style link with title](https:\/\/www.google.com \"Google's Homepage\")\n\n\n## Code Fences\n\n§§§bash\nLINE1\n§§§\n\n## Code Fences No Lexer\n\n§§§moo\nLINE1\n§§§\n\n## Custom ID {#custom}\n\n## Auto ID\n\n* Autolink: https:\/\/gohugo.io\/\n* Strikethrough:~~Hi~~ Hello, world!\n \n## Table\n\n| foo | bar |\n| --- | --- |\n| baz | bim |\n\n## Task Lists (default on)\n\n- [x] Finish my changes[^1]\n- [ ] Push my commits to GitHub\n- [ ] Open a pull request\n\n\n## Smartypants (default on)\n\n* Straight double \"quotes\" and single 'quotes' into “curly” quote HTML entities\n* Dashes (“--” and “---”) into en- and em-dash entities\n* Three consecutive dots (“...”) into an ellipsis entity\n* Apostrophes are also converted: \"That was back in the '90s, that's a long time ago\"\n\n## Footnotes\n\nThat's some text with a footnote.[^1]\n\n## Definition Lists\n\ndate\n: the datetime assigned to this page. \n\ndescription\n: the description for the content.\n\n\n## 神真美好\n\n## 神真美好\n\n## 神真美好\n\n[^1]: And that's the footnote.\n\n`\n\n\t\/\/ Code fences\n\tcontent = strings.Replace(content, \"§§§\", \"```\", -1)\n\tmconf := markup_config.Default\n\tmconf.Highlight.NoClasses = false\n\tmconf.Goldmark.Renderer.Unsafe = true\n\n\tb := convert(c, mconf, content)\n\tgot := string(b.Bytes())\n\n\t\/\/ Links\n\t\/\/\tc.Assert(got, qt.Contains, `<a href=\"https:\/\/docuapi.netlify.com\/\">Live Demo here!<\/a>`)\n\n\t\/\/ Header IDs\n\tc.Assert(got, qt.Contains, `<h2 id=\"custom\">Custom ID<\/h2>`, qt.Commentf(got))\n\tc.Assert(got, qt.Contains, `<h2 id=\"auto-id\">Auto ID<\/h2>`, qt.Commentf(got))\n\tc.Assert(got, qt.Contains, `<h2 id=\"神真美好\">神真美好<\/h2>`, qt.Commentf(got))\n\tc.Assert(got, qt.Contains, `<h2 id=\"神真美好-1\">神真美好<\/h2>`, qt.Commentf(got))\n\tc.Assert(got, qt.Contains, `<h2 id=\"神真美好-2\">神真美好<\/h2>`, qt.Commentf(got))\n\n\t\/\/ Code fences\n\tc.Assert(got, qt.Contains, \"<div class=\\\"highlight\\\"><pre class=\\\"chroma\\\"><code class=\\\"language-bash\\\" data-lang=\\\"bash\\\">LINE1\\n<\/code><\/pre><\/div>\")\n\tc.Assert(got, qt.Contains, \"Code Fences No Lexer<\/h2>\\n<pre><code class=\\\"language-moo\\\" data-lang=\\\"moo\\\">LINE1\\n<\/code><\/pre>\")\n\n\t\/\/ Extensions\n\tc.Assert(got, qt.Contains, `Autolink: <a href=\"https:\/\/gohugo.io\/\">https:\/\/gohugo.io\/<\/a>`)\n\tc.Assert(got, qt.Contains, `Strikethrough:<del>Hi<\/del> Hello, world`)\n\tc.Assert(got, qt.Contains, `<th>foo<\/th>`)\n\tc.Assert(got, qt.Contains, `<li><input disabled=\"\" type=\"checkbox\"> Push my commits to GitHub<\/li>`)\n\n\tc.Assert(got, qt.Contains, `Straight double “quotes” and single ‘quotes’`)\n\tc.Assert(got, qt.Contains, `Dashes (“–” and “—”) `)\n\tc.Assert(got, qt.Contains, `Three consecutive dots (“…”)`)\n\tc.Assert(got, qt.Contains, `“That was back in the ’90s, that’s a long time ago”`)\n\tc.Assert(got, qt.Contains, `footnote.<sup id=\"fnref:1\"><a href=\"#fn:1\" class=\"footnote-ref\" role=\"doc-noteref\">1<\/a><\/sup>`)\n\tc.Assert(got, qt.Contains, `<section class=\"footnotes\" role=\"doc-endnotes\">`)\n\tc.Assert(got, qt.Contains, `<dt>date<\/dt>`)\n\n\ttoc, ok := b.(converter.TableOfContentsProvider)\n\tc.Assert(ok, qt.Equals, true)\n\ttocHTML := toc.TableOfContents().ToHTML(1, 2, false)\n\tc.Assert(tocHTML, qt.Contains, \"TableOfContents\")\n\n}\n\nfunc TestConvertAutoIDAsciiOnly(t *testing.T) {\n\tc := qt.New(t)\n\n\tcontent := `\n## God is Good: 神真美好\n`\n\tmconf := markup_config.Default\n\tmconf.Goldmark.Parser.AutoHeadingIDType = goldmark_config.AutoHeadingIDTypeGitHubAscii\n\tb := convert(c, mconf, content)\n\tgot := string(b.Bytes())\n\n\tc.Assert(got, qt.Contains, \"<h2 id=\\\"god-is-good-\\\">\")\n}\n\nfunc TestConvertAutoIDBlackfriday(t *testing.T) {\n\tc := qt.New(t)\n\n\tcontent := `\n## Let's try this, shall we?\n\n`\n\tmconf := markup_config.Default\n\tmconf.Goldmark.Parser.AutoHeadingIDType = goldmark_config.AutoHeadingIDTypeBlackfriday\n\tb := convert(c, mconf, content)\n\tgot := string(b.Bytes())\n\n\tc.Assert(got, qt.Contains, \"<h2 id=\\\"let-s-try-this-shall-we\\\">\")\n}\n\nfunc TestConvertIssues(t *testing.T) {\n\tc := qt.New(t)\n\n\t\/\/ https:\/\/github.com\/gohugoio\/hugo\/issues\/7619\n\tc.Run(\"Hyphen in HTML attributes\", func(c *qt.C) {\n\t\tmconf := markup_config.Default\n\t\tmconf.Goldmark.Renderer.Unsafe = true\n\t\tinput := `<custom-element>\n <div>This will be \"slotted\" into the custom element.<\/div>\n<\/custom-element>\n`\n\n\t\tb := convert(c, mconf, input)\n\t\tgot := string(b.Bytes())\n\n\t\tc.Assert(got, qt.Contains, \"<p><custom-element>\\n<div>This will be “slotted” into the custom element.<\/div>\\n<\/custom-element><\/p>\\n\")\n\t})\n\n}\n\nfunc TestCodeFence(t *testing.T) {\n\tc := qt.New(t)\n\n\tlines := `LINE1\nLINE2\nLINE3\nLINE4\nLINE5\n`\n\n\tconvertForConfig := func(c *qt.C, conf highlight.Config, code, language string) string {\n\t\tmconf := markup_config.Default\n\t\tmconf.Highlight = conf\n\n\t\tp, err := Provider.New(\n\t\t\tconverter.ProviderConfig{\n\t\t\t\tMarkupConfig: mconf,\n\t\t\t\tLogger: loggers.NewErrorLogger(),\n\t\t\t},\n\t\t)\n\n\t\tcontent := \"```\" + language + \"\\n\" + code + \"\\n```\"\n\n\t\tc.Assert(err, qt.IsNil)\n\t\tconv, err := p.New(converter.DocumentContext{})\n\t\tc.Assert(err, qt.IsNil)\n\t\tb, err := conv.Convert(converter.RenderContext{Src: []byte(content)})\n\t\tc.Assert(err, qt.IsNil)\n\n\t\treturn string(b.Bytes())\n\t}\n\n\tc.Run(\"Basic\", func(c *qt.C) {\n\t\tcfg := highlight.DefaultConfig\n\t\tcfg.NoClasses = false\n\n\t\tresult := convertForConfig(c, cfg, `echo \"Hugo Rocks!\"`, \"bash\")\n\t\t\/\/ TODO(bep) there is a whitespace mismatch (\\n) between this and the highlight template func.\n\t\tc.Assert(result, qt.Equals, `<div class=\"highlight\"><pre class=\"chroma\"><code class=\"language-bash\" data-lang=\"bash\"><span class=\"nb\">echo<\/span> <span class=\"s2\">"Hugo Rocks!"<\/span>\n<\/code><\/pre><\/div>`)\n\t\tresult = convertForConfig(c, cfg, `echo \"Hugo Rocks!\"`, \"unknown\")\n\t\tc.Assert(result, qt.Equals, \"<pre><code class=\\\"language-unknown\\\" data-lang=\\\"unknown\\\">echo "Hugo Rocks!"\\n<\/code><\/pre>\")\n\n\t})\n\n\tc.Run(\"Highlight lines, default config\", func(c *qt.C) {\n\t\tcfg := highlight.DefaultConfig\n\t\tcfg.NoClasses = false\n\n\t\tresult := convertForConfig(c, cfg, lines, `bash {linenos=table,hl_lines=[2 \"4-5\"],linenostart=3}`)\n\t\tc.Assert(result, qt.Contains, \"<div class=\\\"highlight\\\"><div class=\\\"chroma\\\">\\n<table class=\\\"lntable\\\"><tr><td class=\\\"lntd\\\">\\n<pre class=\\\"chroma\\\"><code><span class\")\n\t\tc.Assert(result, qt.Contains, \"<span class=\\\"hl\\\"><span class=\\\"lnt\\\">4\")\n\n\t\tresult = convertForConfig(c, cfg, lines, \"bash {linenos=inline,hl_lines=[2]}\")\n\t\tc.Assert(result, qt.Contains, \"<span class=\\\"ln\\\">2<\/span>LINE2\\n<\/span>\")\n\t\tc.Assert(result, qt.Not(qt.Contains), \"<table\")\n\n\t\tresult = convertForConfig(c, cfg, lines, \"bash {linenos=true,hl_lines=[2]}\")\n\t\tc.Assert(result, qt.Contains, \"<table\")\n\t\tc.Assert(result, qt.Contains, \"<span class=\\\"hl\\\"><span class=\\\"lnt\\\">2\\n<\/span>\")\n\t})\n\n\tc.Run(\"Highlight lines, linenumbers default on\", func(c *qt.C) {\n\t\tcfg := highlight.DefaultConfig\n\t\tcfg.NoClasses = false\n\t\tcfg.LineNos = true\n\n\t\tresult := convertForConfig(c, cfg, lines, \"bash\")\n\t\tc.Assert(result, qt.Contains, \"<span class=\\\"lnt\\\">2\\n<\/span>\")\n\n\t\tresult = convertForConfig(c, cfg, lines, \"bash {linenos=false,hl_lines=[2]}\")\n\t\tc.Assert(result, qt.Not(qt.Contains), \"class=\\\"lnt\\\"\")\n\t})\n\n\tc.Run(\"Highlight lines, linenumbers default on, linenumbers in table default off\", func(c *qt.C) {\n\t\tcfg := highlight.DefaultConfig\n\t\tcfg.NoClasses = false\n\t\tcfg.LineNos = true\n\t\tcfg.LineNumbersInTable = false\n\n\t\tresult := convertForConfig(c, cfg, lines, \"bash\")\n\t\tc.Assert(result, qt.Contains, \"<span class=\\\"ln\\\">2<\/span>LINE2\\n<\")\n\t\tresult = convertForConfig(c, cfg, lines, \"bash {linenos=table}\")\n\t\tc.Assert(result, qt.Contains, \"<span class=\\\"lnt\\\">1\\n<\/span>\")\n\t})\n\n\tc.Run(\"No language\", func(c *qt.C) {\n\t\tcfg := highlight.DefaultConfig\n\t\tcfg.NoClasses = false\n\t\tcfg.LineNos = true\n\t\tcfg.LineNumbersInTable = false\n\n\t\tresult := convertForConfig(c, cfg, lines, \"\")\n\t\tc.Assert(result, qt.Contains, \"<pre><code>LINE1\\n\")\n\t})\n\n\tc.Run(\"No language, guess syntax\", func(c *qt.C) {\n\t\tcfg := highlight.DefaultConfig\n\t\tcfg.NoClasses = false\n\t\tcfg.GuessSyntax = true\n\t\tcfg.LineNos = true\n\t\tcfg.LineNumbersInTable = false\n\n\t\tresult := convertForConfig(c, cfg, lines, \"\")\n\t\tc.Assert(result, qt.Contains, \"<span class=\\\"ln\\\">2<\/span>LINE2\\n<\")\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Google Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage zoekt\n\nimport (\n\t\"encoding\/binary\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sort\"\n)\n\n\/\/ IndexFile is a file suitable for concurrent read access. For performance\n\/\/ reasons, it allows a mmap'd implementation.\ntype IndexFile interface {\n\tRead(off uint32, sz uint32) ([]byte, error)\n\tSize() (uint32, error)\n\tClose()\n\tName() string\n}\n\n\/\/ reader is a stateful file\ntype reader struct {\n\tr IndexFile\n\toff uint32\n}\n\nfunc (r *reader) seek(off uint32) {\n\tr.off = off\n}\n\nfunc (r *reader) U32() (uint32, error) {\n\tb, err := r.r.Read(r.off, 4)\n\tr.off += 4\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn binary.BigEndian.Uint32(b), nil\n}\n\nfunc (r *reader) U64() (uint64, error) {\n\tb, err := r.r.Read(r.off, 8)\n\tr.off += 8\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn binary.BigEndian.Uint64(b), nil\n}\n\nfunc (r *reader) readTOC(toc *indexTOC) error {\n\tsz, err := r.r.Size()\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.off = sz - 8\n\n\tvar tocSection simpleSection\n\tif err := tocSection.read(r); err != nil {\n\t\treturn err\n\t}\n\n\tr.seek(tocSection.off)\n\n\tsectionCount, err := r.U32()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsecs := toc.sections()\n\n\tif len(secs) != int(sectionCount) {\n\t\treturn fmt.Errorf(\"section count mismatch: got %d want %d\", sectionCount, len(secs))\n\t}\n\n\tfor _, s := range toc.sections() {\n\t\tif err := s.read(r); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (r *indexData) readSectionBlob(sec simpleSection) ([]byte, error) {\n\treturn r.file.Read(sec.off, sec.sz)\n}\n\nfunc readSectionU32(f IndexFile, sec simpleSection) ([]uint32, error) {\n\tif sec.sz%4 != 0 {\n\t\treturn nil, fmt.Errorf(\"barf: section size %% 4 != 0: sz %d \", sec.sz)\n\t}\n\tblob, err := f.Read(sec.off, sec.sz)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tarr := make([]uint32, 0, len(blob)\/4)\n\tfor len(blob) > 0 {\n\t\tarr = append(arr, binary.BigEndian.Uint32(blob))\n\t\tblob = blob[4:]\n\t}\n\treturn arr, nil\n}\n\nfunc readSectionU64(f IndexFile, sec simpleSection) ([]uint64, error) {\n\tif sec.sz%8 != 0 {\n\t\treturn nil, fmt.Errorf(\"barf: section size %% 8 != 0: sz %d \", sec.sz)\n\t}\n\tblob, err := f.Read(sec.off, sec.sz)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tarr := make([]uint64, 0, len(blob)\/8)\n\tfor len(blob) > 0 {\n\t\tarr = append(arr, binary.BigEndian.Uint64(blob))\n\t\tblob = blob[8:]\n\t}\n\treturn arr, nil\n}\n\nfunc (r *reader) readJSON(data interface{}, sec *simpleSection) error {\n\tblob, err := r.r.Read(sec.off, sec.sz)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn json.Unmarshal(blob, data)\n}\n\nfunc (r *reader) readIndexData(toc *indexTOC) (*indexData, error) {\n\td := indexData{\n\t\tfile: r.r,\n\t\tngrams: map[ngram]simpleSection{},\n\t\tfileNameNgrams: map[ngram][]uint32{},\n\t\tbranchIDs: map[string]uint{},\n\t\tbranchNames: map[uint]string{},\n\t}\n\n\tblob, err := d.readSectionBlob(toc.metaData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := json.Unmarshal(blob, &d.metaData); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif d.metaData.IndexFormatVersion != IndexFormatVersion {\n\t\treturn nil, fmt.Errorf(\"file is v%d, want v%d\", d.metaData.IndexFormatVersion, IndexFormatVersion)\n\t}\n\n\tblob, err = d.readSectionBlob(toc.repoMetaData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := json.Unmarshal(blob, &d.repoMetaData); err != nil {\n\t\treturn nil, err\n\t}\n\n\td.boundariesStart = toc.fileContents.data.off\n\td.boundaries = toc.fileContents.relativeIndex()\n\td.newlinesStart = toc.newlines.data.off\n\td.newlinesIndex = toc.newlines.relativeIndex()\n\td.docSectionsStart = toc.fileSections.data.off\n\td.docSectionsIndex = toc.fileSections.relativeIndex()\n\n\td.checksums, err = d.readSectionBlob(toc.contentChecksums)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td.languages, err = d.readSectionBlob(toc.languages)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttextContent, err := d.readSectionBlob(toc.ngramText)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpostingsIndex := toc.postings.relativeIndex()\n\n\tconst ngramEncoding = 8\n\tfor i := 0; i < len(textContent); i += ngramEncoding {\n\t\tj := i \/ ngramEncoding\n\t\tng := ngram(binary.BigEndian.Uint64(textContent[i : i+ngramEncoding]))\n\t\td.ngrams[ng] = simpleSection{\n\t\t\ttoc.postings.data.off + postingsIndex[j],\n\t\t\tpostingsIndex[j+1] - postingsIndex[j],\n\t\t}\n\t}\n\n\td.fileBranchMasks, err = readSectionU64(d.file, toc.branchMasks)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td.fileNameContent, err = d.readSectionBlob(toc.fileNames.data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td.fileNameIndex = toc.fileNames.relativeIndex()\n\n\tnameNgramText, err := d.readSectionBlob(toc.nameNgramText)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfileNamePostingsData, err := d.readSectionBlob(toc.namePostings.data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfileNamePostingsIndex := toc.namePostings.relativeIndex()\n\tfor i := 0; i < len(nameNgramText); i += ngramEncoding {\n\t\tj := i \/ ngramEncoding\n\t\toff := fileNamePostingsIndex[j]\n\t\tend := fileNamePostingsIndex[j+1]\n\t\tng := ngram(binary.BigEndian.Uint64(nameNgramText[i : i+ngramEncoding]))\n\t\td.fileNameNgrams[ng] = fromDeltas(fileNamePostingsData[off:end], nil)\n\t}\n\n\tfor j, br := range d.repoMetaData.Branches {\n\t\tid := uint(1) << uint(j)\n\t\td.branchIDs[br.Name] = id\n\t\td.branchNames[id] = br.Name\n\t}\n\n\tblob, err = d.readSectionBlob(toc.runeDocSections)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td.runeDocSections = unmarshalDocSections(blob, nil)\n\n\tfor sect, dest := range map[simpleSection]*[]uint32{\n\t\ttoc.subRepos: &d.subRepos,\n\t\ttoc.runeOffsets: &d.runeOffsets,\n\t\ttoc.nameRuneOffsets: &d.fileNameRuneOffsets,\n\t\ttoc.nameEndRunes: &d.fileNameEndRunes,\n\t\ttoc.fileEndRunes: &d.fileEndRunes,\n\t} {\n\t\tif blob, err := d.readSectionBlob(sect); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\t*dest = fromSizedDeltas(blob, nil)\n\t\t}\n\t}\n\n\tvar keys []string\n\tfor k := range d.repoMetaData.SubRepoMap {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\td.subRepoPaths = keys\n\n\td.languageMap = map[byte]string{}\n\tfor k, v := range d.metaData.LanguageMap {\n\t\td.languageMap[v] = k\n\t}\n\n\tif err := d.verify(); err != nil {\n\t\treturn nil, err\n\t}\n\n\td.calculateStats()\n\treturn &d, nil\n}\n\nfunc (d *indexData) verify() error {\n\t\/\/ This is not an exhaustive check: the postings can easily\n\t\/\/ generate OOB acccesses, and are expensive to check, but this lets us rule out\n\t\/\/ other sources of OOB access.\n\tn := len(d.fileNameIndex)\n\tif n == 0 {\n\t\treturn nil\n\t}\n\n\tn--\n\tfor what, got := range map[string]int{\n\t\t\"boundaries\": len(d.boundaries) - 1,\n\t\t\"branch masks\": len(d.fileBranchMasks),\n\t\t\"doc section index\": len(d.docSectionsIndex) - 1,\n\t\t\"newlines index\": len(d.newlinesIndex) - 1,\n\t} {\n\t\tif got != n {\n\t\t\treturn fmt.Errorf(\"got %s %d, want %d\", what, got, n)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *indexData) readContents(i uint32) ([]byte, error) {\n\treturn d.readSectionBlob(simpleSection{\n\t\toff: d.boundariesStart + d.boundaries[i],\n\t\tsz: d.boundaries[i+1] - d.boundaries[i],\n\t})\n}\n\nfunc (d *indexData) readContentSlice(off uint32, sz uint32) ([]byte, error) {\n\t\/\/ TODO(hanwen): cap result if it is at the end of the content\n\t\/\/ section.\n\treturn d.readSectionBlob(simpleSection{\n\t\toff: d.boundariesStart + off,\n\t\tsz: sz,\n\t})\n}\n\nfunc (d *indexData) readNewlines(i uint32, buf []uint32) ([]uint32, uint32, error) {\n\tsec := simpleSection{\n\t\toff: d.newlinesStart + d.newlinesIndex[i],\n\t\tsz: d.newlinesIndex[i+1] - d.newlinesIndex[i],\n\t}\n\tblob, err := d.readSectionBlob(sec)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn fromSizedDeltas(blob, buf), sec.sz, nil\n}\n\nfunc (d *indexData) readDocSections(i uint32, buf []DocumentSection) ([]DocumentSection, uint32, error) {\n\tsec := simpleSection{\n\t\toff: d.docSectionsStart + d.docSectionsIndex[i],\n\t\tsz: d.docSectionsIndex[i+1] - d.docSectionsIndex[i],\n\t}\n\tblob, err := d.readSectionBlob(sec)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn unmarshalDocSections(blob, buf), sec.sz, nil\n}\n\n\/\/ NewSearcher creates a Searcher for a single index file. Search\n\/\/ results coming from this searcher are valid only for the lifetime\n\/\/ of the Searcher itself, ie. []byte members should be copied into\n\/\/ fresh buffers if the result is to survive closing the shard.\nfunc NewSearcher(r IndexFile) (Searcher, error) {\n\trd := &reader{r: r}\n\n\tvar toc indexTOC\n\tif err := rd.readTOC(&toc); err != nil {\n\t\treturn nil, err\n\t}\n\tindexData, err := rd.readIndexData(&toc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tindexData.file = r\n\treturn indexData, nil\n}\n\n\/\/ ReadMetadata returns the metadata of index shard without reading\n\/\/ the index data. The IndexFile is not closed.\nfunc ReadMetadata(inf IndexFile) (*Repository, *IndexMetadata, error) {\n\trd := &reader{r: inf}\n\tvar toc indexTOC\n\tif err := rd.readTOC(&toc); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar md IndexMetadata\n\tif err := rd.readJSON(&md, &toc.metaData); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar repo Repository\n\tif err := rd.readJSON(&repo, &toc.repoMetaData); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &repo, &md, nil\n}\n<commit_msg>index: read in ngrams in own function<commit_after>\/\/ Copyright 2016 Google Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage zoekt\n\nimport (\n\t\"encoding\/binary\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sort\"\n)\n\n\/\/ IndexFile is a file suitable for concurrent read access. For performance\n\/\/ reasons, it allows a mmap'd implementation.\ntype IndexFile interface {\n\tRead(off uint32, sz uint32) ([]byte, error)\n\tSize() (uint32, error)\n\tClose()\n\tName() string\n}\n\n\/\/ reader is a stateful file\ntype reader struct {\n\tr IndexFile\n\toff uint32\n}\n\nfunc (r *reader) seek(off uint32) {\n\tr.off = off\n}\n\nfunc (r *reader) U32() (uint32, error) {\n\tb, err := r.r.Read(r.off, 4)\n\tr.off += 4\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn binary.BigEndian.Uint32(b), nil\n}\n\nfunc (r *reader) U64() (uint64, error) {\n\tb, err := r.r.Read(r.off, 8)\n\tr.off += 8\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn binary.BigEndian.Uint64(b), nil\n}\n\nfunc (r *reader) readTOC(toc *indexTOC) error {\n\tsz, err := r.r.Size()\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.off = sz - 8\n\n\tvar tocSection simpleSection\n\tif err := tocSection.read(r); err != nil {\n\t\treturn err\n\t}\n\n\tr.seek(tocSection.off)\n\n\tsectionCount, err := r.U32()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsecs := toc.sections()\n\n\tif len(secs) != int(sectionCount) {\n\t\treturn fmt.Errorf(\"section count mismatch: got %d want %d\", sectionCount, len(secs))\n\t}\n\n\tfor _, s := range toc.sections() {\n\t\tif err := s.read(r); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (r *indexData) readSectionBlob(sec simpleSection) ([]byte, error) {\n\treturn r.file.Read(sec.off, sec.sz)\n}\n\nfunc readSectionU32(f IndexFile, sec simpleSection) ([]uint32, error) {\n\tif sec.sz%4 != 0 {\n\t\treturn nil, fmt.Errorf(\"barf: section size %% 4 != 0: sz %d \", sec.sz)\n\t}\n\tblob, err := f.Read(sec.off, sec.sz)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tarr := make([]uint32, 0, len(blob)\/4)\n\tfor len(blob) > 0 {\n\t\tarr = append(arr, binary.BigEndian.Uint32(blob))\n\t\tblob = blob[4:]\n\t}\n\treturn arr, nil\n}\n\nfunc readSectionU64(f IndexFile, sec simpleSection) ([]uint64, error) {\n\tif sec.sz%8 != 0 {\n\t\treturn nil, fmt.Errorf(\"barf: section size %% 8 != 0: sz %d \", sec.sz)\n\t}\n\tblob, err := f.Read(sec.off, sec.sz)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tarr := make([]uint64, 0, len(blob)\/8)\n\tfor len(blob) > 0 {\n\t\tarr = append(arr, binary.BigEndian.Uint64(blob))\n\t\tblob = blob[8:]\n\t}\n\treturn arr, nil\n}\n\nfunc (r *reader) readJSON(data interface{}, sec *simpleSection) error {\n\tblob, err := r.r.Read(sec.off, sec.sz)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn json.Unmarshal(blob, data)\n}\n\nfunc (r *reader) readIndexData(toc *indexTOC) (*indexData, error) {\n\td := indexData{\n\t\tfile: r.r,\n\t\tngrams: map[ngram]simpleSection{},\n\t\tfileNameNgrams: map[ngram][]uint32{},\n\t\tbranchIDs: map[string]uint{},\n\t\tbranchNames: map[uint]string{},\n\t}\n\n\tblob, err := d.readSectionBlob(toc.metaData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := json.Unmarshal(blob, &d.metaData); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif d.metaData.IndexFormatVersion != IndexFormatVersion {\n\t\treturn nil, fmt.Errorf(\"file is v%d, want v%d\", d.metaData.IndexFormatVersion, IndexFormatVersion)\n\t}\n\n\tblob, err = d.readSectionBlob(toc.repoMetaData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := json.Unmarshal(blob, &d.repoMetaData); err != nil {\n\t\treturn nil, err\n\t}\n\n\td.boundariesStart = toc.fileContents.data.off\n\td.boundaries = toc.fileContents.relativeIndex()\n\td.newlinesStart = toc.newlines.data.off\n\td.newlinesIndex = toc.newlines.relativeIndex()\n\td.docSectionsStart = toc.fileSections.data.off\n\td.docSectionsIndex = toc.fileSections.relativeIndex()\n\n\td.checksums, err = d.readSectionBlob(toc.contentChecksums)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td.languages, err = d.readSectionBlob(toc.languages)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td.ngrams, err = d.readNgrams(toc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td.fileBranchMasks, err = readSectionU64(d.file, toc.branchMasks)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td.fileNameContent, err = d.readSectionBlob(toc.fileNames.data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td.fileNameIndex = toc.fileNames.relativeIndex()\n\n\td.fileNameNgrams, err = d.readFileNameNgrams(toc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor j, br := range d.repoMetaData.Branches {\n\t\tid := uint(1) << uint(j)\n\t\td.branchIDs[br.Name] = id\n\t\td.branchNames[id] = br.Name\n\t}\n\n\tblob, err = d.readSectionBlob(toc.runeDocSections)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td.runeDocSections = unmarshalDocSections(blob, nil)\n\n\tfor sect, dest := range map[simpleSection]*[]uint32{\n\t\ttoc.subRepos: &d.subRepos,\n\t\ttoc.runeOffsets: &d.runeOffsets,\n\t\ttoc.nameRuneOffsets: &d.fileNameRuneOffsets,\n\t\ttoc.nameEndRunes: &d.fileNameEndRunes,\n\t\ttoc.fileEndRunes: &d.fileEndRunes,\n\t} {\n\t\tif blob, err := d.readSectionBlob(sect); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\t*dest = fromSizedDeltas(blob, nil)\n\t\t}\n\t}\n\n\tvar keys []string\n\tfor k := range d.repoMetaData.SubRepoMap {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\td.subRepoPaths = keys\n\n\td.languageMap = map[byte]string{}\n\tfor k, v := range d.metaData.LanguageMap {\n\t\td.languageMap[v] = k\n\t}\n\n\tif err := d.verify(); err != nil {\n\t\treturn nil, err\n\t}\n\n\td.calculateStats()\n\treturn &d, nil\n}\n\nconst ngramEncoding = 8\n\nfunc (d *indexData) readNgrams(toc *indexTOC) (map[ngram]simpleSection, error) {\n\ttextContent, err := d.readSectionBlob(toc.ngramText)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpostingsIndex := toc.postings.relativeIndex()\n\n\tngrams := map[ngram]simpleSection{}\n\tfor i := 0; i < len(textContent); i += ngramEncoding {\n\t\tj := i \/ ngramEncoding\n\t\tng := ngram(binary.BigEndian.Uint64(textContent[i : i+ngramEncoding]))\n\t\tngrams[ng] = simpleSection{\n\t\t\ttoc.postings.data.off + postingsIndex[j],\n\t\t\tpostingsIndex[j+1] - postingsIndex[j],\n\t\t}\n\t}\n\n\treturn ngrams, nil\n}\n\nfunc (d *indexData) readFileNameNgrams(toc *indexTOC) (map[ngram][]uint32, error) {\n\tnameNgramText, err := d.readSectionBlob(toc.nameNgramText)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfileNamePostingsData, err := d.readSectionBlob(toc.namePostings.data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfileNamePostingsIndex := toc.namePostings.relativeIndex()\n\n\tfileNameNgrams := map[ngram][]uint32{}\n\tfor i := 0; i < len(nameNgramText); i += ngramEncoding {\n\t\tj := i \/ ngramEncoding\n\t\toff := fileNamePostingsIndex[j]\n\t\tend := fileNamePostingsIndex[j+1]\n\t\tng := ngram(binary.BigEndian.Uint64(nameNgramText[i : i+ngramEncoding]))\n\t\tfileNameNgrams[ng] = fromDeltas(fileNamePostingsData[off:end], nil)\n\t}\n\n\treturn fileNameNgrams, nil\n}\n\nfunc (d *indexData) verify() error {\n\t\/\/ This is not an exhaustive check: the postings can easily\n\t\/\/ generate OOB acccesses, and are expensive to check, but this lets us rule out\n\t\/\/ other sources of OOB access.\n\tn := len(d.fileNameIndex)\n\tif n == 0 {\n\t\treturn nil\n\t}\n\n\tn--\n\tfor what, got := range map[string]int{\n\t\t\"boundaries\": len(d.boundaries) - 1,\n\t\t\"branch masks\": len(d.fileBranchMasks),\n\t\t\"doc section index\": len(d.docSectionsIndex) - 1,\n\t\t\"newlines index\": len(d.newlinesIndex) - 1,\n\t} {\n\t\tif got != n {\n\t\t\treturn fmt.Errorf(\"got %s %d, want %d\", what, got, n)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *indexData) readContents(i uint32) ([]byte, error) {\n\treturn d.readSectionBlob(simpleSection{\n\t\toff: d.boundariesStart + d.boundaries[i],\n\t\tsz: d.boundaries[i+1] - d.boundaries[i],\n\t})\n}\n\nfunc (d *indexData) readContentSlice(off uint32, sz uint32) ([]byte, error) {\n\t\/\/ TODO(hanwen): cap result if it is at the end of the content\n\t\/\/ section.\n\treturn d.readSectionBlob(simpleSection{\n\t\toff: d.boundariesStart + off,\n\t\tsz: sz,\n\t})\n}\n\nfunc (d *indexData) readNewlines(i uint32, buf []uint32) ([]uint32, uint32, error) {\n\tsec := simpleSection{\n\t\toff: d.newlinesStart + d.newlinesIndex[i],\n\t\tsz: d.newlinesIndex[i+1] - d.newlinesIndex[i],\n\t}\n\tblob, err := d.readSectionBlob(sec)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn fromSizedDeltas(blob, buf), sec.sz, nil\n}\n\nfunc (d *indexData) readDocSections(i uint32, buf []DocumentSection) ([]DocumentSection, uint32, error) {\n\tsec := simpleSection{\n\t\toff: d.docSectionsStart + d.docSectionsIndex[i],\n\t\tsz: d.docSectionsIndex[i+1] - d.docSectionsIndex[i],\n\t}\n\tblob, err := d.readSectionBlob(sec)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn unmarshalDocSections(blob, buf), sec.sz, nil\n}\n\n\/\/ NewSearcher creates a Searcher for a single index file. Search\n\/\/ results coming from this searcher are valid only for the lifetime\n\/\/ of the Searcher itself, ie. []byte members should be copied into\n\/\/ fresh buffers if the result is to survive closing the shard.\nfunc NewSearcher(r IndexFile) (Searcher, error) {\n\trd := &reader{r: r}\n\n\tvar toc indexTOC\n\tif err := rd.readTOC(&toc); err != nil {\n\t\treturn nil, err\n\t}\n\tindexData, err := rd.readIndexData(&toc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tindexData.file = r\n\treturn indexData, nil\n}\n\n\/\/ ReadMetadata returns the metadata of index shard without reading\n\/\/ the index data. The IndexFile is not closed.\nfunc ReadMetadata(inf IndexFile) (*Repository, *IndexMetadata, error) {\n\trd := &reader{r: inf}\n\tvar toc indexTOC\n\tif err := rd.readTOC(&toc); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar md IndexMetadata\n\tif err := rd.readJSON(&md, &toc.metaData); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar repo Repository\n\tif err := rd.readJSON(&repo, &toc.repoMetaData); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &repo, &md, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage restplugin\n\nimport (\n\t\"encoding\/json\"\n\t\"git.fd.io\/govpp.git\/core\/bin_api\/vpe\"\n\taclplugin \"github.com\/ligato\/vpp-agent\/plugins\/defaultplugins\/aclplugin\/vppcalls\"\n\tifplugin \"github.com\/ligato\/vpp-agent\/plugins\/defaultplugins\/ifplugin\/vppdump\"\n\tl2plugin \"github.com\/ligato\/vpp-agent\/plugins\/defaultplugins\/l2plugin\/vppdump\"\n\t\"github.com\/unrolled\/render\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n)\n\n\/\/interfacesGetHandler - used to get list of all interfaces\nfunc (plugin *RESTAPIPlugin) interfacesGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\n\t\tplugin.Deps.Log.Info(\"Getting list of all interfaces\")\n\n\t\t\/\/ create an API channel\n\t\tch, err := plugin.Deps.GoVppmux.NewAPIChannel()\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t} else {\n\t\t\tres, err := ifplugin.DumpInterfaces(plugin.Deps.Log, ch, nil)\n\t\t\tif err != nil {\n\t\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t\t} else {\n\t\t\t\tplugin.Deps.Log.Debug(res)\n\t\t\t\tformatter.JSON(w, http.StatusOK, res)\n\t\t\t}\n\t\t}\n\t\tdefer ch.Close()\n\t}\n}\n\n\/\/bridgeDomainIdsGetHandler - used to get list of all bridge domain ids\nfunc (plugin *RESTAPIPlugin) bridgeDomainIdsGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\n\t\tplugin.Deps.Log.Info(\"Getting list of all bridge domain ids\")\n\n\t\t\/\/ create an API channel\n\t\tch, err := plugin.Deps.GoVppmux.NewAPIChannel()\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t} else {\n\t\t\tres, err := l2plugin.DumpBridgeDomainIDs(plugin.Deps.Log, ch, nil)\n\t\t\tif err != nil {\n\t\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t\t} else {\n\t\t\t\tplugin.Deps.Log.Debug(res)\n\t\t\t\tformatter.JSON(w, http.StatusOK, res)\n\t\t\t}\n\t\t}\n\t\tdefer ch.Close()\n\t}\n}\n\n\/\/bridgeDomainsGetHandler - used to get list of all bridge domains\nfunc (plugin *RESTAPIPlugin) bridgeDomainsGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\n\t\tplugin.Deps.Log.Info(\"Getting list of all bridge domains\")\n\n\t\t\/\/ create an API channel\n\t\tch, err := plugin.Deps.GoVppmux.NewAPIChannel()\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t} else {\n\t\t\tres, err := l2plugin.DumpBridgeDomains(plugin.Deps.Log, ch, nil)\n\t\t\tif err != nil {\n\t\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t\t} else {\n\t\t\t\tplugin.Deps.Log.Debug(res)\n\t\t\t\tformatter.JSON(w, http.StatusOK, res)\n\t\t\t}\n\t\t}\n\t\tdefer ch.Close()\n\t}\n}\n\n\/\/fibTableEntriesGetHandler - used to get list of all fib entries\nfunc (plugin *RESTAPIPlugin) fibTableEntriesGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\n\t\tplugin.Deps.Log.Info(\"Getting list of all fibs\")\n\n\t\t\/\/ create an API channel\n\t\tch, err := plugin.Deps.GoVppmux.NewAPIChannel()\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t} else {\n\t\t\tres, err := l2plugin.DumpFIBTableEntries(plugin.Deps.Log, ch, nil)\n\t\t\tif err != nil {\n\t\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t\t} else {\n\t\t\t\tplugin.Deps.Log.Debug(res)\n\t\t\t\tformatter.JSON(w, http.StatusOK, res)\n\t\t\t}\n\t\t}\n\t\tdefer ch.Close()\n\t}\n}\n\n\/\/xconnectPairsGetHandler - used to get list of all connect pairs (transmit and receive interfaces)\nfunc (plugin *RESTAPIPlugin) xconnectPairsGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\n\t\tplugin.Deps.Log.Info(\"Getting list of all xconnect pairs\")\n\n\t\t\/\/ create an API channel\n\t\tch, err := plugin.Deps.GoVppmux.NewAPIChannel()\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t} else {\n\t\t\tres, err := l2plugin.DumpXConnectPairs(plugin.Deps.Log, ch, nil)\n\t\t\tif err != nil {\n\t\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t\t} else {\n\t\t\t\tplugin.Deps.Log.Debug(res)\n\t\t\t\tformatter.JSON(w, http.StatusOK, res)\n\t\t\t}\n\t\t}\n\t\tdefer ch.Close()\n\t}\n}\n\n\/\/staticRoutesGetHandler - used to get list of all static routes\n\/*\nfunc (plugin *RESTAPIPlugin) staticRoutesGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\n\t\tplugin.Deps.Log.Info(\"Getting list of all static routes\")\n\n\t\t\/\/ create an API channel\n\t\tch, err := plugin.Deps.GoVppmux.NewAPIChannel()\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t} else {\n\t\t\tres, err := l3plugin.DumpStaticRoutes(plugin.Deps.Log, ch, nil)\n\t\t\tif err != nil {\n\t\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t\t} else {\n\t\t\t\tplugin.Deps.Log.Debug(res)\n\t\t\t\tformatter.JSON(w, http.StatusOK, res)\n\t\t\t}\n\t\t}\n\t\tdefer ch.Close()\n\t}\n}\n*\/\n\n\/\/interfaceACLPostHandler - used to get acl configuration for a particular interface\nfunc (plugin *RESTAPIPlugin) interfaceACLPostHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\n\t\tplugin.Deps.Log.Info(\"Getting acl configuration of interface\")\n\n\t\tvar reqParam map[string]string\n\t\tbody, err := ioutil.ReadAll(req.Body)\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Error(\"Failed to parse request body.\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tplugin.Deps.Log.Infof(\"request body = %v\", body)\n\t\terr = json.Unmarshal(body, &reqParam)\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Error(\"Failed to unmarshal request body.\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tswIndexStr, ok := reqParam[\"swIndex\"]\n\n\t\tif !ok {\n\t\t\tplugin.Deps.Log.Error(\"swIndex paramenter not included.\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tplugin.Deps.Log.Infof(\"Received request for swIndex :: %v \", swIndexStr)\n\n\t\tif swIndexStr != \"\" {\n\t\t\tswIndexuInt64, err := strconv.ParseUint(swIndexStr, 10, 32)\n\t\t\tswIndex := uint32(swIndexuInt64)\n\t\t\tif err != nil {\n\t\t\t\t\/\/ create an API channel\n\t\t\t\tch, err := plugin.Deps.GoVppmux.NewAPIChannel()\n\t\t\t\tif err != nil {\n\t\t\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\t\t} else {\n\t\t\t\t\tres, err := aclplugin.DumpInterface(swIndex, ch, nil)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tplugin.Deps.Log.Debug(res)\n\t\t\t\t\t\tformatter.JSON(w, http.StatusOK, res)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tdefer ch.Close()\n\t\t\t}\n\t\t} else {\n\t\t\tformatter.JSON(w, http.StatusBadRequest, \"swIndex parameter not found\")\n\t\t}\n\t}\n}\n\n\/\/showCommandHandler - used to execute VPP CLI commands\nfunc (plugin *RESTAPIPlugin) showCommandHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\n\t\tvar reqParam map[string]string\n\t\tbody, err := ioutil.ReadAll(req.Body)\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Error(\"Failed to parse request body.\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\terr = json.Unmarshal(body, &reqParam)\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Error(\"Failed to unmarshal request body.\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tcommand, ok := reqParam[\"vppclicommand\"]\n\n\t\tif !ok {\n\t\t\tplugin.Deps.Log.Error(\"command parameter not included.\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tif command != \"\" {\n\n\t\t\tplugin.Deps.Log.WithField(\"VPPCLI command\", command).Infof(\"Received command: %v\", command)\n\n\t\t\tch, err := plugin.Deps.GoVppmux.NewAPIChannel()\n\t\t\tif err != nil {\n\t\t\t\tplugin.Deps.Log.Errorf(\"Error creating channel: %v\", err)\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\t} else {\n\t\t\t\treq := &vpe.CliInband{}\n\t\t\t\treq.Length = uint32(len(command))\n\t\t\t\treq.Cmd = []byte(command)\n\n\t\t\t\treply := &vpe.CliInbandReply{}\n\t\t\t\terr = ch.SendRequest(req).ReceiveReply(reply)\n\t\t\t\tif err != nil {\n\t\t\t\t\tplugin.Deps.Log.Errorf(\"Error processing request: %v\", err)\n\t\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\t\t}\n\n\t\t\t\tif reply.Retval > 0 {\n\t\t\t\t\tplugin.Deps.Log.Errorf(\"Command returned code: %v\", reply.Retval)\n\t\t\t\t}\n\n\t\t\t\tplugin.Deps.Log.WithField(\"VPPCLI response\", string(reply.Reply)).Infof(\"Command returned reply :: %v\", string(reply.Reply))\n\n\t\t\t\tformatter.JSON(w, http.StatusOK, string(reply.Reply))\n\t\t\t}\n\t\t\tdefer ch.Close()\n\t\t} else {\n\t\t\tformatter.JSON(w, http.StatusBadRequest, \"showCommand parameter is empty\")\n\t\t}\n\t}\n}\n<commit_msg>SPOPT-1690 - REST API for VPP incorporated code review comments<commit_after>\/\/ Copyright (c) 2017 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage restplugin\n\nimport (\n\t\"encoding\/json\"\n\t\"git.fd.io\/govpp.git\/core\/bin_api\/vpe\"\n\taclplugin \"github.com\/ligato\/vpp-agent\/plugins\/defaultplugins\/aclplugin\/vppcalls\"\n\tifplugin \"github.com\/ligato\/vpp-agent\/plugins\/defaultplugins\/ifplugin\/vppdump\"\n\tl2plugin \"github.com\/ligato\/vpp-agent\/plugins\/defaultplugins\/l2plugin\/vppdump\"\n\t\"github.com\/unrolled\/render\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n)\n\n\/\/interfacesGetHandler - used to get list of all interfaces\nfunc (plugin *RESTAPIPlugin) interfacesGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\n\t\tplugin.Deps.Log.Info(\"Getting list of all interfaces\")\n\n\t\t\/\/ create an API channel\n\t\tch, err := plugin.Deps.GoVppmux.NewAPIChannel()\n\t\tdefer ch.Close()\n\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t} else {\n\t\t\tres, err := ifplugin.DumpInterfaces(plugin.Deps.Log, ch, nil)\n\t\t\tif err != nil {\n\t\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t\t} else {\n\t\t\t\tplugin.Deps.Log.Debug(res)\n\t\t\t\tformatter.JSON(w, http.StatusOK, res)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/bridgeDomainIdsGetHandler - used to get list of all bridge domain ids\nfunc (plugin *RESTAPIPlugin) bridgeDomainIdsGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\n\t\tplugin.Deps.Log.Info(\"Getting list of all bridge domain ids\")\n\n\t\t\/\/ create an API channel\n\t\tch, err := plugin.Deps.GoVppmux.NewAPIChannel()\n\t\tdefer ch.Close()\n\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t} else {\n\t\t\tres, err := l2plugin.DumpBridgeDomainIDs(plugin.Deps.Log, ch, nil)\n\t\t\tif err != nil {\n\t\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t\t} else {\n\t\t\t\tplugin.Deps.Log.Debug(res)\n\t\t\t\tformatter.JSON(w, http.StatusOK, res)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/bridgeDomainsGetHandler - used to get list of all bridge domains\nfunc (plugin *RESTAPIPlugin) bridgeDomainsGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\n\t\tplugin.Deps.Log.Info(\"Getting list of all bridge domains\")\n\n\t\t\/\/ create an API channel\n\t\tch, err := plugin.Deps.GoVppmux.NewAPIChannel()\n\t\tdefer ch.Close()\n\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t} else {\n\t\t\tres, err := l2plugin.DumpBridgeDomains(plugin.Deps.Log, ch, nil)\n\t\t\tif err != nil {\n\t\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t\t} else {\n\t\t\t\tplugin.Deps.Log.Debug(res)\n\t\t\t\tformatter.JSON(w, http.StatusOK, res)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/fibTableEntriesGetHandler - used to get list of all fib entries\nfunc (plugin *RESTAPIPlugin) fibTableEntriesGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\n\t\tplugin.Deps.Log.Info(\"Getting list of all fibs\")\n\n\t\t\/\/ create an API channel\n\t\tch, err := plugin.Deps.GoVppmux.NewAPIChannel()\n\t\tdefer ch.Close()\n\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t} else {\n\t\t\tres, err := l2plugin.DumpFIBTableEntries(plugin.Deps.Log, ch, nil)\n\t\t\tif err != nil {\n\t\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t\t} else {\n\t\t\t\tplugin.Deps.Log.Debug(res)\n\t\t\t\tformatter.JSON(w, http.StatusOK, res)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/xconnectPairsGetHandler - used to get list of all connect pairs (transmit and receive interfaces)\nfunc (plugin *RESTAPIPlugin) xconnectPairsGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\n\t\tplugin.Deps.Log.Info(\"Getting list of all xconnect pairs\")\n\n\t\t\/\/ create an API channel\n\t\tch, err := plugin.Deps.GoVppmux.NewAPIChannel()\n\t\tdefer ch.Close()\n\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t} else {\n\t\t\tres, err := l2plugin.DumpXConnectPairs(plugin.Deps.Log, ch, nil)\n\t\t\tif err != nil {\n\t\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t\t} else {\n\t\t\t\tplugin.Deps.Log.Debug(res)\n\t\t\t\tformatter.JSON(w, http.StatusOK, res)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/staticRoutesGetHandler - used to get list of all static routes\n\/*\nfunc (plugin *RESTAPIPlugin) staticRoutesGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\n\t\tplugin.Deps.Log.Info(\"Getting list of all static routes\")\n\n\t\t\/\/ create an API channel\n\t\tch, err := plugin.Deps.GoVppmux.NewAPIChannel()\n\t\tdefer ch.Close()\n\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t} else {\n\t\t\tres, err := l3plugin.DumpStaticRoutes(plugin.Deps.Log, ch, nil)\n\t\t\tif err != nil {\n\t\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, nil)\n\t\t\t} else {\n\t\t\t\tplugin.Deps.Log.Debug(res)\n\t\t\t\tformatter.JSON(w, http.StatusOK, res)\n\t\t\t}\n\t\t}\n\t}\n}\n*\/\n\n\/\/interfaceACLPostHandler - used to get acl configuration for a particular interface\nfunc (plugin *RESTAPIPlugin) interfaceACLPostHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\n\t\tplugin.Deps.Log.Info(\"Getting acl configuration of interface\")\n\n\t\tvar reqParam map[string]string\n\t\tbody, err := ioutil.ReadAll(req.Body)\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Error(\"Failed to parse request body.\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tplugin.Deps.Log.Infof(\"request body = %v\", body)\n\t\terr = json.Unmarshal(body, &reqParam)\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Error(\"Failed to unmarshal request body.\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tswIndexStr, ok := reqParam[\"swIndex\"]\n\n\t\tif !ok {\n\t\t\tplugin.Deps.Log.Error(\"swIndex paramenter not included.\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tplugin.Deps.Log.Infof(\"Received request for swIndex :: %v \", swIndexStr)\n\n\t\tif swIndexStr != \"\" {\n\t\t\tswIndexuInt64, err := strconv.ParseUint(swIndexStr, 10, 32)\n\t\t\tswIndex := uint32(swIndexuInt64)\n\t\t\tif err != nil {\n\t\t\t\t\/\/ create an API channel\n\t\t\t\tch, err := plugin.Deps.GoVppmux.NewAPIChannel()\n\t\t\t\tdefer ch.Close()\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\t\t} else {\n\t\t\t\t\tres, err := aclplugin.DumpInterface(swIndex, ch, nil)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tplugin.Deps.Log.Errorf(\"Error: %v\", err)\n\t\t\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tplugin.Deps.Log.Debug(res)\n\t\t\t\t\t\tformatter.JSON(w, http.StatusOK, res)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tformatter.JSON(w, http.StatusBadRequest, \"swIndex parameter not found\")\n\t\t}\n\t}\n}\n\n\/\/showCommandHandler - used to execute VPP CLI commands\nfunc (plugin *RESTAPIPlugin) showCommandHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\n\t\tvar reqParam map[string]string\n\t\tbody, err := ioutil.ReadAll(req.Body)\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Error(\"Failed to parse request body.\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\terr = json.Unmarshal(body, &reqParam)\n\t\tif err != nil {\n\t\t\tplugin.Deps.Log.Error(\"Failed to unmarshal request body.\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tcommand, ok := reqParam[\"vppclicommand\"]\n\n\t\tif !ok {\n\t\t\tplugin.Deps.Log.Error(\"command parameter not included.\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tif command != \"\" {\n\n\t\t\tplugin.Deps.Log.WithField(\"VPPCLI command\", command).Infof(\"Received command: %v\", command)\n\n\t\t\tch, err := plugin.Deps.GoVppmux.NewAPIChannel()\n\t\t\tdefer ch.Close()\n\n\t\t\tif err != nil {\n\t\t\t\tplugin.Deps.Log.Errorf(\"Error creating channel: %v\", err)\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\t} else {\n\t\t\t\treq := &vpe.CliInband{}\n\t\t\t\treq.Length = uint32(len(command))\n\t\t\t\treq.Cmd = []byte(command)\n\n\t\t\t\treply := &vpe.CliInbandReply{}\n\t\t\t\terr = ch.SendRequest(req).ReceiveReply(reply)\n\t\t\t\tif err != nil {\n\t\t\t\t\tplugin.Deps.Log.Errorf(\"Error processing request: %v\", err)\n\t\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\t\t}\n\n\t\t\t\tif reply.Retval > 0 {\n\t\t\t\t\tplugin.Deps.Log.Errorf(\"Command returned code: %v\", reply.Retval)\n\t\t\t\t}\n\n\t\t\t\tplugin.Deps.Log.WithField(\"VPPCLI response\", string(reply.Reply)).Infof(\"Command returned reply :: %v\", string(reply.Reply))\n\n\t\t\t\tformatter.JSON(w, http.StatusOK, string(reply.Reply))\n\t\t\t}\n\t\t} else {\n\t\t\tformatter.JSON(w, http.StatusBadRequest, \"showCommand parameter is empty\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package carton\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/megamsys\/libgo\/api\"\n\t\"github.com\/megamsys\/libgo\/cmd\"\n\t\"github.com\/megamsys\/libgo\/pairs\"\n\t\"github.com\/megamsys\/libgo\/utils\"\n\tlw \"github.com\/megamsys\/libgo\/writer\"\n\t\"github.com\/megamsys\/vertice\/meta\"\n\t\"github.com\/megamsys\/vertice\/provision\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"io\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tAPIBACKUPS = \"\/backups\/\"\n\tBACKUPS_SHOW = \"\/backups\/show\/\"\n\tUPDATE = \"update\"\n\tDELETE = \"delete\/\"\n\tACCOUNTID = \"account_id\"\n\tASSEMBLYID = \"asm_id\"\n\tPUBLIC_URL = \"public_url\"\n)\n\ntype ApiBackups struct {\n\tJsonClaz string `json:\"json_claz\" cql:\"json_claz\"`\n\tResults []Backups `json:\"results\" cql:\"results\"`\n}\n\n\/\/The grand elephant for megam cloud platform.\ntype Backups struct {\n\tId string `json:\"id\" cql:\"id\"`\n\tImageId string `json:\"image_id\" cql:\"image_id\"`\n\tOrgId string `json:\"org_id\" cql:\"org_id\"`\n\tAccountId string `json:\"account_id\" cql:\"account_id\"`\n\tName string `json:\"name\" cql:\"name\"`\n\tAssemblyId string `json:\"asm_id\" cql:\"asm_id\"`\n\tJsonClaz string `json:\"json_claz\" cql:\"json_claz\"`\n\tCreatedAt string `json:\"created_at\" cql:\"created_at\"`\n\tStatus string `json:\"status\" cql:\"status\"`\n\tTosca string `json:\"tosca_type\" cql:\"tosca_type\"`\n\tInputs pairs.JsonPairs `json:\"inputs\" cql:\"inputs\"`\n\tOutputs pairs.JsonPairs `json:\"outputs\" cql:\"outputs\"`\n}\n\nfunc (s *Backups) String() string {\n\tif d, err := yaml.Marshal(s); err != nil {\n\t\treturn err.Error()\n\t} else {\n\t\treturn string(d)\n\t}\n}\n\n\/\/ ChangeState runs a state increment of a machine or a container.\nfunc CreateImage(opts *DiskOpts) error {\n\tvar outBuffer bytes.Buffer\n\tstart := time.Now()\n\tlogWriter := lw.LogWriter{Box: opts.B}\n\tlogWriter.Async()\n\tdefer logWriter.Close()\n\twriter := io.MultiWriter(&outBuffer, &logWriter)\n\terr := ProvisionerMap[opts.B.Provider].SaveImage(opts.B, writer)\n\telapsed := time.Since(start)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tslog := outBuffer.String()\n\tlog.Debugf(\"%s in (%s)\\n%s\",\n\t\tcmd.Colorfy(opts.B.GetFullName(), \"cyan\", \"\", \"bold\"),\n\t\tcmd.Colorfy(elapsed.String(), \"green\", \"\", \"bold\"),\n\t\tcmd.Colorfy(slog, \"yellow\", \"\", \"\"))\n\treturn nil\n}\n\n\/\/ ChangeState runs a state increment of a machine or a container.\nfunc DeleteImage(opts *DiskOpts) error {\n\tvar outBuffer bytes.Buffer\n\tstart := time.Now()\n\tlogWriter := lw.LogWriter{Box: opts.B}\n\tlogWriter.Async()\n\tdefer logWriter.Close()\n\twriter := io.MultiWriter(&outBuffer, &logWriter)\n\terr := ProvisionerMap[opts.B.Provider].DeleteImage(opts.B, writer)\n\telapsed := time.Since(start)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tslog := outBuffer.String()\n\tlog.Debugf(\"%s in (%s)\\n%s\",\n\t\tcmd.Colorfy(opts.B.GetFullName(), \"cyan\", \"\", \"bold\"),\n\t\tcmd.Colorfy(elapsed.String(), \"green\", \"\", \"bold\"),\n\t\tcmd.Colorfy(slog, \"yellow\", \"\", \"\"))\n\treturn nil\n}\n\n\/** A public function which pulls the backup for disk save as image.\nand any others we do. **\/\nfunc GetBackup(id, email string) (*Backups, error) {\n\tcl := api.NewClient(newArgs(email, \"\"), BACKUPS_SHOW+id)\n\n\tresponse, err := cl.Get()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres := &ApiBackups{}\n\terr = json.Unmarshal(response, res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ta := &res.Results[0]\n\treturn a, nil\n}\n\n\/** A public function which pulls the snapshot for disk save as image.\nand any others we do. **\/\nfunc (s *Backups) GetBox() ([]Backups, error) {\n\tcl := api.NewClient(newArgs(meta.MC.MasterUser, \"\"), \"\/admin\"+APIBACKUPS)\n\tresponse, err := cl.Get()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres := &ApiBackups{}\n\terr = json.Unmarshal(response, res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Results, nil\n}\n\nfunc (s *Backups) UpdateBackup() error {\n\tcl := api.NewClient(newArgs(s.AccountId, s.OrgId), APIBACKUPS+UPDATE)\n\tif _, err := cl.Post(s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}\n\nfunc (s *Backups) RemoveBackup() error {\n\tcl := api.NewClient(newArgs(s.AccountId, s.OrgId), APIBACKUPS+s.AssemblyId+\"\/\"+s.Id)\n\tif _, err := cl.Delete(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/make cartons from backups.\nfunc (a *Backups) MkCartons() (Cartons, error) {\n\tnewCs := make(Cartons, 0, 1)\n\tif len(strings.TrimSpace(a.AssemblyId)) > 1 && a.Tosca != utils.BACKUP_NEW {\n\t\tif ca, err := mkCarton(a.Id, a.AssemblyId, a.AccountId); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tca.toBox() \/\/on success, make a carton2box if BoxLevel is BoxZero\n\t\t\tnewCs = append(newCs, ca) \/\/on success append carton\n\t\t}\n\t} else {\n\t\tca, err := a.mkCarton()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tca.toBox()\n\t\tnewCs = append(newCs, ca)\n\t}\n\n\tlog.Debugf(\"Cartons %v\", newCs)\n\treturn newCs, nil\n}\n\nfunc (a *Backups) mkCarton() (*Carton, error) {\n\tact, err := new(Account).get(newArgs(a.AccountId, \"\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb := make([]provision.Box, 0, 0)\n\treturn &Carton{\n\t\tId: a.AssemblyId,\n\t\tCartonsId: a.Id,\n\t\tOrgId: a.OrgId,\n\t\tName: a.Name,\n\t\tTosca: a.Tosca,\n\t\tAccountId: a.AccountId,\n\t\tAuthority: act.States.Authority,\n\t\tImageVersion: a.imageVersion(),\n\t\tProvider: a.provider(),\n\t\tRegion: a.region(),\n\t\tImageName: a.imageName(),\n\t\tPublicUrl: a.publicUrl(),\n\t\tBoxes: &b,\n\t\tStatus: utils.Status(a.Status),\n\t}, nil\n}\n\nfunc (s *Backups) Sizeof() string {\n\treturn s.Outputs.Match(\"image_size\")\n}\n\nfunc (b *Backups) region() string {\n\treturn b.Inputs.Match(REGION)\n}\n\nfunc (b *Backups) provider() string {\n\tp := b.Inputs.Match(utils.PROVIDER)\n\tif p != \"\" {\n\t\treturn p\n\t}\n\treturn utils.PROVIDER_ONE\n}\n\nfunc (b *Backups) imageVersion() string {\n\treturn b.Inputs.Match(IMAGE_VERSION)\n}\n\nfunc (b *Backups) publicUrl() string {\n\treturn b.Inputs.Match(PUBLIC_URL)\n}\n\nfunc (b *Backups) imageName() string {\n\tp := b.Inputs.Match(BACKUPNAME)\n\tif p != \"\" {\n\t\treturn p\n\t}\n\treturn b.Name\n}\n<commit_msg>key changes for backup<commit_after>package carton\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/megamsys\/libgo\/api\"\n\t\"github.com\/megamsys\/libgo\/cmd\"\n\t\"github.com\/megamsys\/libgo\/pairs\"\n\t\"github.com\/megamsys\/libgo\/utils\"\n\tlw \"github.com\/megamsys\/libgo\/writer\"\n\t\"github.com\/megamsys\/vertice\/meta\"\n\t\"github.com\/megamsys\/vertice\/provision\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"io\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tAPIBACKUPS = \"\/backups\/\"\n\tBACKUPS_SHOW = \"\/backups\/show\/\"\n\tUPDATE = \"update\"\n\tDELETE = \"delete\/\"\n\tACCOUNTID = \"account_id\"\n\tASSEMBLYID = \"asm_id\"\n\tBACKUP_PUBLIC_URL = \"backup_public_url\"\n)\n\ntype ApiBackups struct {\n\tJsonClaz string `json:\"json_claz\" cql:\"json_claz\"`\n\tResults []Backups `json:\"results\" cql:\"results\"`\n}\n\n\/\/The grand elephant for megam cloud platform.\ntype Backups struct {\n\tId string `json:\"id\" cql:\"id\"`\n\tImageId string `json:\"image_id\" cql:\"image_id\"`\n\tOrgId string `json:\"org_id\" cql:\"org_id\"`\n\tAccountId string `json:\"account_id\" cql:\"account_id\"`\n\tName string `json:\"name\" cql:\"name\"`\n\tAssemblyId string `json:\"asm_id\" cql:\"asm_id\"`\n\tJsonClaz string `json:\"json_claz\" cql:\"json_claz\"`\n\tCreatedAt string `json:\"created_at\" cql:\"created_at\"`\n\tStatus string `json:\"status\" cql:\"status\"`\n\tTosca string `json:\"tosca_type\" cql:\"tosca_type\"`\n\tInputs pairs.JsonPairs `json:\"inputs\" cql:\"inputs\"`\n\tOutputs pairs.JsonPairs `json:\"outputs\" cql:\"outputs\"`\n}\n\nfunc (s *Backups) String() string {\n\tif d, err := yaml.Marshal(s); err != nil {\n\t\treturn err.Error()\n\t} else {\n\t\treturn string(d)\n\t}\n}\n\n\/\/ ChangeState runs a state increment of a machine or a container.\nfunc CreateImage(opts *DiskOpts) error {\n\tvar outBuffer bytes.Buffer\n\tstart := time.Now()\n\tlogWriter := lw.LogWriter{Box: opts.B}\n\tlogWriter.Async()\n\tdefer logWriter.Close()\n\twriter := io.MultiWriter(&outBuffer, &logWriter)\n\terr := ProvisionerMap[opts.B.Provider].SaveImage(opts.B, writer)\n\telapsed := time.Since(start)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tslog := outBuffer.String()\n\tlog.Debugf(\"%s in (%s)\\n%s\",\n\t\tcmd.Colorfy(opts.B.GetFullName(), \"cyan\", \"\", \"bold\"),\n\t\tcmd.Colorfy(elapsed.String(), \"green\", \"\", \"bold\"),\n\t\tcmd.Colorfy(slog, \"yellow\", \"\", \"\"))\n\treturn nil\n}\n\n\/\/ ChangeState runs a state increment of a machine or a container.\nfunc DeleteImage(opts *DiskOpts) error {\n\tvar outBuffer bytes.Buffer\n\tstart := time.Now()\n\tlogWriter := lw.LogWriter{Box: opts.B}\n\tlogWriter.Async()\n\tdefer logWriter.Close()\n\twriter := io.MultiWriter(&outBuffer, &logWriter)\n\terr := ProvisionerMap[opts.B.Provider].DeleteImage(opts.B, writer)\n\telapsed := time.Since(start)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tslog := outBuffer.String()\n\tlog.Debugf(\"%s in (%s)\\n%s\",\n\t\tcmd.Colorfy(opts.B.GetFullName(), \"cyan\", \"\", \"bold\"),\n\t\tcmd.Colorfy(elapsed.String(), \"green\", \"\", \"bold\"),\n\t\tcmd.Colorfy(slog, \"yellow\", \"\", \"\"))\n\treturn nil\n}\n\n\/** A public function which pulls the backup for disk save as image.\nand any others we do. **\/\nfunc GetBackup(id, email string) (*Backups, error) {\n\tcl := api.NewClient(newArgs(email, \"\"), BACKUPS_SHOW+id)\n\n\tresponse, err := cl.Get()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres := &ApiBackups{}\n\terr = json.Unmarshal(response, res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ta := &res.Results[0]\n\treturn a, nil\n}\n\n\/** A public function which pulls the snapshot for disk save as image.\nand any others we do. **\/\nfunc (s *Backups) GetBox() ([]Backups, error) {\n\tcl := api.NewClient(newArgs(meta.MC.MasterUser, \"\"), \"\/admin\"+APIBACKUPS)\n\tresponse, err := cl.Get()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres := &ApiBackups{}\n\terr = json.Unmarshal(response, res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Results, nil\n}\n\nfunc (s *Backups) UpdateBackup() error {\n\tcl := api.NewClient(newArgs(s.AccountId, s.OrgId), APIBACKUPS+UPDATE)\n\tif _, err := cl.Post(s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}\n\nfunc (s *Backups) RemoveBackup() error {\n\tcl := api.NewClient(newArgs(s.AccountId, s.OrgId), APIBACKUPS+s.AssemblyId+\"\/\"+s.Id)\n\tif _, err := cl.Delete(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/make cartons from backups.\nfunc (a *Backups) MkCartons() (Cartons, error) {\n\tnewCs := make(Cartons, 0, 1)\n\tif len(strings.TrimSpace(a.AssemblyId)) > 1 && a.Tosca != utils.BACKUP_NEW {\n\t\tif ca, err := mkCarton(a.Id, a.AssemblyId, a.AccountId); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tca.toBox() \/\/on success, make a carton2box if BoxLevel is BoxZero\n\t\t\tnewCs = append(newCs, ca) \/\/on success append carton\n\t\t}\n\t} else {\n\t\tca, err := a.mkCarton()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tca.toBox()\n\t\tnewCs = append(newCs, ca)\n\t}\n\n\tlog.Debugf(\"Cartons %v\", newCs)\n\treturn newCs, nil\n}\n\nfunc (a *Backups) mkCarton() (*Carton, error) {\n\tact, err := new(Account).get(newArgs(a.AccountId, \"\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb := make([]provision.Box, 0, 0)\n\treturn &Carton{\n\t\tId: a.AssemblyId,\n\t\tCartonsId: a.Id,\n\t\tOrgId: a.OrgId,\n\t\tName: a.Name,\n\t\tTosca: a.Tosca,\n\t\tAccountId: a.AccountId,\n\t\tAuthority: act.States.Authority,\n\t\tImageVersion: a.imageVersion(),\n\t\tProvider: a.provider(),\n\t\tRegion: a.region(),\n\t\tImageName: a.imageName(),\n\t\tPublicUrl: a.publicUrl(),\n\t\tBoxes: &b,\n\t\tStatus: utils.Status(a.Status),\n\t}, nil\n}\n\nfunc (s *Backups) Sizeof() string {\n\treturn s.Outputs.Match(\"image_size\")\n}\n\nfunc (b *Backups) region() string {\n\treturn b.Inputs.Match(REGION)\n}\n\nfunc (b *Backups) provider() string {\n\tp := b.Inputs.Match(utils.PROVIDER)\n\tif p != \"\" {\n\t\treturn p\n\t}\n\treturn utils.PROVIDER_ONE\n}\n\nfunc (b *Backups) imageVersion() string {\n\treturn b.Inputs.Match(IMAGE_VERSION)\n}\n\nfunc (b *Backups) publicUrl() string {\n\treturn b.Inputs.Match(BACKUP_PUBLIC_URL)\n}\n\nfunc (b *Backups) imageName() string {\n\tp := b.Inputs.Match(BACKUPNAME)\n\tif p != \"\" {\n\t\treturn p\n\t}\n\treturn b.Name\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"log\"\n\t\"strconv\"\n)\n\nfunc NewContactList(size int) ContactList {\n\treturn ContactList{i: -1, list: make([]*Contact, 0, size)}\n}\n\ntype ContactList struct {\n\ti int\n\tlist []*Contact\n}\n\nfunc (c *ContactList) Next() *Contact {\n\tc.i++\n\n\tif len(c.list) < c.i+1 {\n\t\tc.i--\n\t\treturn nil\n\t}\n\n\treturn c.list[c.i]\n}\n\nfunc (c *ContactList) Append(contact *Contact) {\n\tc.list = append(c.list, contact)\n}\n\nfunc (c ContactList) Ids() []byte {\n\tids := bytes.NewBuffer([]byte(\"{\"))\n\n\tstrings := make([][]byte, 0)\n\tfor _, next := range c.list {\n\t\tif id, err := next.Id.Value(); err == nil {\n\t\t\tstrings = append(strings, []byte(strconv.FormatInt(id.(int64), 10)))\n\t\t}\n\t}\n\tresult := bytes.Join(strings, []byte(\",\"))\n\n\tif _, err := ids.Write(result); err != nil {\n\t\tlog.Print(err)\n\t}\n\n\tif _, err := ids.Write([]byte(\"}\")); err != nil {\n\t\tlog.Print(err)\n\t\treturn []byte(\"{}\")\n\t}\n\n\treturn ids.Bytes()\n}\n\nfunc (c ContactList) Any() bool {\n\treturn len(c.list) > 0\n}\n\nfunc (c ContactList) Items() []*Contact {\n\treturn c.list\n}\n<commit_msg>[kami][ContactList] Refactor<commit_after>package main\n\nfunc NewContactList(size int) ContactList {\n\treturn ContactList{i: -1, Items: make([]*Contact, 0, size)}\n}\n\ntype ContactList struct {\n\ti int\n\tItems []*Contact\n}\n\nfunc (c *ContactList) Next() *Contact {\n\tc.i++\n\n\tif len(c.Items) < c.i+1 {\n\t\tc.i--\n\t\treturn nil\n\t}\n\n\treturn c.Items[c.i]\n}\n\nfunc (c ContactList) Any() bool {\n\treturn len(c.Items) > 0\n}\n\nfunc (c ContactList) Ids() []int32 {\n\tarr := make([]int32, len(c.Items))\n\n\tfor i, contact := range c.Items {\n\t\tarr[i] = *contact.Id\n\t}\n\n\treturn arr\n}\n\nfunc (c ContactList) IntIds() []int {\n\tarr := make([]int, len(c.Items))\n\n\tfor i, contact := range c.Items {\n\t\tarr[i] = int(*contact.Id)\n\t}\n\n\treturn arr\n}\n<|endoftext|>"} {"text":"<commit_before>package deps\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"runtime\"\n\t\"sort\"\n\n\t\"github.com\/fatih\/set\"\n)\n\nconst (\n\tDepsGoPath = \"gopackage\"\n\tgopackageFile = \"gopackage.json\"\n)\n\ntype Deps struct {\n\t\/\/ Packages is written as the importPath of a given package(s).\n\tPackages []string `json:\"packages\"`\n\n\t\/\/ GoVersion defines the Go version needed at least as a minumum.\n\tGoVersion string `json:\"goVersion\"`\n\n\t\/\/ Dependencies defines the dependency of the given Packages. If multiple\n\t\/\/ packages are defined, each dependency will point to the HEAD unless\n\t\/\/ changed manually.\n\tDependencies []string `json:\"dependencies\"`\n\n\t\/\/ BuildGoPath is used to fetch dependencies of the given Packages\n\tBuildGoPath string\n\n\t\/\/ currentGoPath, is taken from current GOPATH environment variable\n\tcurrentGoPath string\n}\n\n\/\/ LoadDeps returns a new Deps struct with the given packages. It founds the\n\/\/ dependencies and populates the fields in Deps. After LoadDeps one can use\n\/\/ InstallDeps() to install\/build the binary for the given pkg or use\n\/\/ GetDeps() to download and vendorize the dependencies of the given pkg.\nfunc LoadDeps(pkgs ...string) (*Deps, error) {\n\tpackages, err := listPackages(pkgs...)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\t\/\/ get all dependencies for applications defined above\n\tdependencies := set.New()\n\tfor _, pkg := range packages {\n\t\tfor _, imp := range pkg.Deps {\n\t\t\tdependencies.Add(imp)\n\t\t}\n\t}\n\n\t\/\/ clean up deps\n\t\/\/ 1. remove std lib paths\n\tcontext := build.Default\n\tthirdPartyDeps := make([]string, 0)\n\n\tfor _, importPath := range dependencies.StringSlice() {\n\t\tp, err := context.Import(importPath, \".\", build.AllowBinary)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\t\/\/ do not include std lib\n\t\tif p.Goroot {\n\t\t\tcontinue\n\t\t}\n\n\t\tthirdPartyDeps = append(thirdPartyDeps, importPath)\n\t}\n\n\tsort.Strings(thirdPartyDeps)\n\n\tdeps := &Deps{\n\t\tPackages: pkgs,\n\t\tDependencies: thirdPartyDeps,\n\t\tGoVersion: runtime.Version(),\n\t}\n\n\terr = deps.populateGoPaths()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn deps, nil\n}\n\nfunc (d *Deps) populateGoPaths() error {\n\tgopath := os.Getenv(\"GOPATH\")\n\tif gopath == \"\" {\n\t\treturn errors.New(\"GOPATH is not set\")\n\t}\n\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.currentGoPath = gopath\n\td.BuildGoPath = path.Join(pwd, DepsGoPath)\n\treturn nil\n}\n\n\/\/ InstallDeps calls \"go install\" on the given packages and installs them\n\/\/ to deps.BuildGoPath\/pkgname\nfunc (d *Deps) InstallDeps() error {\n\tif !compareGoVersions(d.GoVersion, runtime.Version()) {\n\t\treturn fmt.Errorf(\"Go Version is not satisfied\\nSystem Go Version: '%s' Expected: '%s'\",\n\t\t\truntime.Version(), d.GoVersion)\n\t}\n\n\t\/\/ expand current path\n\tif d.BuildGoPath != d.currentGoPath {\n\t\tos.Setenv(\"GOPATH\", fmt.Sprintf(\"%s:%s\", d.BuildGoPath, d.currentGoPath))\n\t}\n\n\t\/\/ another approach is let them building with a single gobin and then move\n\t\/\/ the final binaries into new directories based on the binary filename.\n\tfor _, pkg := range d.Packages {\n\t\tpkgname := path.Base(pkg)\n\t\tbinpath := fmt.Sprintf(\"%s\/%s\/\", d.BuildGoPath, pkgname)\n\n\t\tos.MkdirAll(binpath, 0755)\n\t\tos.Setenv(\"GOBIN\", binpath)\n\n\t\targs := []string{\"install\", pkg}\n\t\tcmd := exec.Command(\"go\", args...)\n\t\tcmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr\n\n\t\terr := cmd.Run()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc goPackagePath() string {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn path.Join(pwd, gopackageFile)\n}\n\n\/\/ WriteJSON writes the state of d into a json file, useful to restore again\n\/\/ with ReadJSON().\nfunc (d *Deps) WriteJSON() error {\n\tdata, err := json.MarshalIndent(d, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(goPackagePath(), data, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ ReadJSON() restores a given gopackage.json to create a new deps struct.\nfunc ReadJson() (*Deps, error) {\n\tdata, err := ioutil.ReadFile(goPackagePath())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td := new(Deps)\n\terr = json.Unmarshal(data, d)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = d.populateGoPaths()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn d, nil\n}\n\n\/\/ GetDeps calls \"go get -d\" to download all dependencies for the packages\n\/\/ defined in d.\nfunc (d *Deps) GetDeps() error {\n\tos.MkdirAll(d.BuildGoPath, 0755)\n\tos.Setenv(\"GOPATH\", d.BuildGoPath)\n\n\tfor _, pkg := range d.Dependencies {\n\t\tfmt.Println(\"go get\", pkg)\n\t\tcmd := exec.Command(\"go\", []string{\"get\", \"-d\", pkg}...)\n\t\tcmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr\n\n\t\terr := cmd.Run()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>util\/deps: revert gopath back to original state<commit_after>package deps\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"runtime\"\n\t\"sort\"\n\n\t\"github.com\/fatih\/set\"\n)\n\nconst (\n\tDepsGoPath = \"gopackage\"\n\tgopackageFile = \"gopackage.json\"\n)\n\ntype Deps struct {\n\t\/\/ Packages is written as the importPath of a given package(s).\n\tPackages []string `json:\"packages\"`\n\n\t\/\/ GoVersion defines the Go version needed at least as a minumum.\n\tGoVersion string `json:\"goVersion\"`\n\n\t\/\/ Dependencies defines the dependency of the given Packages. If multiple\n\t\/\/ packages are defined, each dependency will point to the HEAD unless\n\t\/\/ changed manually.\n\tDependencies []string `json:\"dependencies\"`\n\n\t\/\/ BuildGoPath is used to fetch dependencies of the given Packages\n\tBuildGoPath string\n\n\t\/\/ currentGoPath, is taken from current GOPATH environment variable\n\tcurrentGoPath string\n}\n\n\/\/ LoadDeps returns a new Deps struct with the given packages. It founds the\n\/\/ dependencies and populates the fields in Deps. After LoadDeps one can use\n\/\/ InstallDeps() to install\/build the binary for the given pkg or use\n\/\/ GetDeps() to download and vendorize the dependencies of the given pkg.\nfunc LoadDeps(pkgs ...string) (*Deps, error) {\n\tpackages, err := listPackages(pkgs...)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\t\/\/ get all dependencies for applications defined above\n\tdependencies := set.New()\n\tfor _, pkg := range packages {\n\t\tfor _, imp := range pkg.Deps {\n\t\t\tdependencies.Add(imp)\n\t\t}\n\t}\n\n\t\/\/ clean up deps\n\t\/\/ 1. remove std lib paths\n\tcontext := build.Default\n\tthirdPartyDeps := make([]string, 0)\n\n\tfor _, importPath := range dependencies.StringSlice() {\n\t\tp, err := context.Import(importPath, \".\", build.AllowBinary)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\t\/\/ do not include std lib\n\t\tif p.Goroot {\n\t\t\tcontinue\n\t\t}\n\n\t\tthirdPartyDeps = append(thirdPartyDeps, importPath)\n\t}\n\n\tsort.Strings(thirdPartyDeps)\n\n\tdeps := &Deps{\n\t\tPackages: pkgs,\n\t\tDependencies: thirdPartyDeps,\n\t\tGoVersion: runtime.Version(),\n\t}\n\n\terr = deps.populateGoPaths()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn deps, nil\n}\n\nfunc (d *Deps) populateGoPaths() error {\n\tgopath := os.Getenv(\"GOPATH\")\n\tif gopath == \"\" {\n\t\treturn errors.New(\"GOPATH is not set\")\n\t}\n\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.currentGoPath = gopath\n\td.BuildGoPath = path.Join(pwd, DepsGoPath)\n\treturn nil\n}\n\n\/\/ InstallDeps calls \"go install\" on the given packages and installs them\n\/\/ to deps.BuildGoPath\/pkgname\nfunc (d *Deps) InstallDeps() error {\n\tif !compareGoVersions(d.GoVersion, runtime.Version()) {\n\t\treturn fmt.Errorf(\"Go Version is not satisfied\\nSystem Go Version: '%s' Expected: '%s'\",\n\t\t\truntime.Version(), d.GoVersion)\n\t}\n\n\t\/\/ expand current path\n\tif d.BuildGoPath != d.currentGoPath {\n\t\tos.Setenv(\"GOPATH\", fmt.Sprintf(\"%s:%s\", d.BuildGoPath, d.currentGoPath))\n\t}\n\n\t\/\/ revert gopath back, so that we don't mess up gopath for other applications\n\tdefer os.Setenv(\"GOPATH\", d.currentGoPath)\n\n\t\/\/ another approach is let them building with a single gobin and then move\n\t\/\/ the final binaries into new directories based on the binary filename.\n\tfor _, pkg := range d.Packages {\n\t\tpkgname := path.Base(pkg)\n\t\tbinpath := fmt.Sprintf(\"%s\/%s\/\", d.BuildGoPath, pkgname)\n\n\t\tos.MkdirAll(binpath, 0755)\n\t\tos.Setenv(\"GOBIN\", binpath)\n\n\t\targs := []string{\"install\", pkg}\n\t\tcmd := exec.Command(\"go\", args...)\n\t\tcmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr\n\n\t\terr := cmd.Run()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc goPackagePath() string {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn path.Join(pwd, gopackageFile)\n}\n\n\/\/ WriteJSON writes the state of d into a json file, useful to restore again\n\/\/ with ReadJSON().\nfunc (d *Deps) WriteJSON() error {\n\tdata, err := json.MarshalIndent(d, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(goPackagePath(), data, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ ReadJSON() restores a given gopackage.json to create a new deps struct.\nfunc ReadJson() (*Deps, error) {\n\tdata, err := ioutil.ReadFile(goPackagePath())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td := new(Deps)\n\terr = json.Unmarshal(data, d)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = d.populateGoPaths()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn d, nil\n}\n\n\/\/ GetDeps calls \"go get -d\" to download all dependencies for the packages\n\/\/ defined in d.\nfunc (d *Deps) GetDeps() error {\n\tos.MkdirAll(d.BuildGoPath, 0755)\n\tos.Setenv(\"GOPATH\", d.BuildGoPath)\n\n\tfor _, pkg := range d.Dependencies {\n\t\tfmt.Println(\"go get\", pkg)\n\t\tcmd := exec.Command(\"go\", []string{\"get\", \"-d\", pkg}...)\n\t\tcmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr\n\n\t\terr := cmd.Run()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Keithley RF Switch driver\r\n\r\npackage keithley\r\n\r\nimport (\r\n\t\"fmt\"\r\n\r\n\tvi \"github.com\/jpoirier\/visa\"\r\n)\r\n\r\ntype Keithley struct {\r\n\tvi.Visa\r\n\tinstr vi.Object\r\n}\r\n\r\n\/\/ RF_Switch Class - works with Keithley S46 RF Switch.\r\n\/\/ Eight SPDT unterminated coaxial relays (2-pole) and four multi-pole unterminated coaxial relays.\r\n\/\/ Relay A, multipole = Chan 1...6\r\n\/\/ Relay B, multipole = Chan 7..12\r\n\/\/ Relay C, multipole = Chan 13..18\r\n\/\/ Relay D, multipole = Chan 19..24\r\n\/\/ Relay 1..8, 2-pole = Chan 25..32\r\n\/\/ Caution: Do not close more than one RF path per multiport switch.\r\n\r\n\/\/ OpenGpib Opens a session to the specified resource.\r\nfunc (k *Keithley) OpenGpib(rm vi.Session, ctrl, addr, mode, timeout uint32) (status vi.Status) {\r\n\tname := fmt.Sprintf(\"GPIB%d::%d\", ctrl, ddr)\r\n\tk.instr, status = rm.Open(name, mode, timeout)\r\n\treturn\r\n}\r\n\r\n\/\/ OpenTcp Opens a session to the specified resource.\r\nfunc (k *Keithley) OpenTcp(rm vi.Session, ip string, mode, timeout uint32) (status vi.Status) {\r\n\tname := fmt.Sprintf(\"TCPIP::%s::INSTR\", addr)\r\n\tk.instr, status = rm.Open(name, mode, timeout)\r\n\treturn\r\n}\r\n\r\n\/\/ Reset Resets the switch unit.\r\nfunc (k *Keithley) Reset() (status vi.Status) {\r\n\tb := []byte(\"*RST\")\r\n\t_, status = k.instr.Write(b, uint32(len(b)))\r\n\treturn\r\n}\r\n\r\n\/\/ OpenChan Opens the specified channel. Where an open channel does not\r\n\/\/ allow a signal to pass through.\r\nfunc (k *Keithley) OpenChan(ch uint32) (status vi.Status) {\r\n\tb := fmt.Sprintf(\"OPEN (@%d)\", ch)\r\n\t_, status = k.instr.Write([]byte(b), uint32(len(b)))\r\n\treturn\r\n}\r\n\r\n\/\/ OpenAllChans Opens all channels.\r\nfunc (k *Keithley) OpenAllChans() (status vi.Status) {\r\n\tb := []byte(\"OPEN:ALL\")\r\n\t_, status = k.instr.Write(b, uint32(len(b)))\r\n\treturn\r\n}\r\n\r\n\/\/ CloseChan Closes the specified channel. Note, All other channels on relay\r\n\/\/ are opened first to prevent multiple closed relays leading to damage.\r\nfunc (k *Keithley) CloseChan(ch uint32) (status vi.Status) {\r\n\t\/\/ Determine if ch is part of 2-port relay or multi-port relay (A..D)\r\n\t\/\/ Multi-Port Relay, A..D\r\n\tif ch > 0 && ch < 25 {\r\n\t\t\/\/ Open all ports on this relay\r\n\t\tfor i := 1; i < 7; i++ {\r\n\t\t\tc := int((ch-1)\/6)*6 + i\r\n\t\t\tk.OpenChan(uint32(c))\r\n\t\t}\r\n\t}\r\n\tb := fmt.Sprintf(\"CLOSE (@%d)\", ch)\r\n\t_, status = k.instr.Write([]byte(b), uint32(len(b)))\r\n\treturn\r\n}\r\n\r\n\/\/ ClosedChanList Returns a list of closed channels.\r\nfunc (k *Keithley) ClosedChanList(ch uint32) (list string, status vi.Status) {\r\n\t\/\/ Returns list of closed channels.\r\n\t\/\/ RF Switch returns format '(@1,2,3)'.\r\n\t\/\/ If no channels closed, switch returns '(@)'.\r\n\r\n\tb := []byte(\"CLOSE?\")\r\n\t_, status = k.instr.Write(b, uint32(len(b)))\r\n\tif status < vi.SUCCESS {\r\n\t\treturn\r\n\t}\r\n\tbuffer, _, status := k.instr.Read(100)\r\n\tif status < vi.SUCCESS {\r\n\t\treturn\r\n\t} else {\r\n\t\treturn string(buffer), status\r\n\t}\r\n}\r\n\r\n\/\/ if len(strClosedChans) > 3: # Len always greater than 3 if channel in the list.\r\n\/\/ if strClosedChans.find(\",\") >= 0:\r\n\/\/ closedChans = strClosedChans[2:len(strClosedChans)-1].split(\",\")\r\n\/\/ else:\r\n\/\/ closedChans.append(strClosedChans[2:len(strClosedChans)-1])\r\n<commit_msg>keithley driver fixes<commit_after>\/\/ Keithley RF Switch driver\r\n\r\npackage keithley\r\n\r\nimport (\r\n\t\"fmt\"\r\n\r\n\tvi \"github.com\/jpoirier\/visa\"\r\n)\r\n\r\ntype Keithley struct {\r\n\tvi.Visa\r\n\tinstr vi.Object\r\n}\r\n\r\n\/\/ RF_Switch Class - works with Keithley S46 RF Switch.\r\n\/\/ Eight SPDT unterminated coaxial relays (2-pole) and four multi-pole unterminated coaxial relays.\r\n\/\/ Relay A, multipole = Chan 1...6\r\n\/\/ Relay B, multipole = Chan 7..12\r\n\/\/ Relay C, multipole = Chan 13..18\r\n\/\/ Relay D, multipole = Chan 19..24\r\n\/\/ Relay 1..8, 2-pole = Chan 25..32\r\n\/\/ Caution: Do not close more than one RF path per multiport switch.\r\n\r\n\/\/ OpenGpib Opens a session to the specified resource.\r\nfunc (k *Keithley) OpenGpib(rm vi.Session, ctrl, addr, mode, timeout uint32) (status vi.Status) {\r\n\tname := fmt.Sprintf(\"GPIB%d::%d\", ctrl, addr)\r\n\tk.instr, status = rm.Open(name, mode, timeout)\r\n\treturn\r\n}\r\n\r\n\/\/ OpenTcp Opens a session to the specified resource.\r\nfunc (k *Keithley) OpenTcp(rm vi.Session, ip string, mode, timeout uint32) (status vi.Status) {\r\n\tname := fmt.Sprintf(\"TCPIP::%s::INSTR\", ip)\r\n\tk.instr, status = rm.Open(name, mode, timeout)\r\n\treturn\r\n}\r\n\r\n\/\/ Reset Resets the switch unit.\r\nfunc (k *Keithley) Reset() (status vi.Status) {\r\n\tb := []byte(\"*RST\")\r\n\t_, status = k.instr.Write(b, uint32(len(b)))\r\n\treturn\r\n}\r\n\r\n\/\/ OpenChan Opens the specified channel. Where an open channel does not\r\n\/\/ allow a signal to pass through.\r\nfunc (k *Keithley) OpenChan(ch uint32) (status vi.Status) {\r\n\tb := fmt.Sprintf(\"OPEN (@%d)\", ch)\r\n\t_, status = k.instr.Write([]byte(b), uint32(len(b)))\r\n\treturn\r\n}\r\n\r\n\/\/ OpenAllChans Opens all channels.\r\nfunc (k *Keithley) OpenAllChans() (status vi.Status) {\r\n\tb := []byte(\"OPEN:ALL\")\r\n\t_, status = k.instr.Write(b, uint32(len(b)))\r\n\treturn\r\n}\r\n\r\n\/\/ CloseChan Closes the specified channel. Note, All other channels on relay\r\n\/\/ are opened first to prevent multiple closed relays leading to damage.\r\nfunc (k *Keithley) CloseChan(ch uint32) (status vi.Status) {\r\n\t\/\/ Determine if ch is part of 2-port relay or multi-port relay (A..D)\r\n\t\/\/ Multi-Port Relay, A..D\r\n\tif ch > 0 && ch < 25 {\r\n\t\t\/\/ Open all ports on this relay\r\n\t\tfor i := 1; i < 7; i++ {\r\n\t\t\tc := int((ch-1)\/6)*6 + i\r\n\t\t\tk.OpenChan(uint32(c))\r\n\t\t}\r\n\t}\r\n\tb := fmt.Sprintf(\"CLOSE (@%d)\", ch)\r\n\t_, status = k.instr.Write([]byte(b), uint32(len(b)))\r\n\treturn\r\n}\r\n\r\n\/\/ ClosedChanList Returns a list of closed channels.\r\nfunc (k *Keithley) ClosedChanList(ch uint32) (list string, status vi.Status) {\r\n\t\/\/ Returns list of closed channels.\r\n\t\/\/ RF Switch returns format '(@1,2,3)'.\r\n\t\/\/ If no channels closed, switch returns '(@)'.\r\n\r\n\tb := []byte(\"CLOSE?\")\r\n\t_, status = k.instr.Write(b, uint32(len(b)))\r\n\tif status < vi.SUCCESS {\r\n\t\treturn\r\n\t}\r\n\tbuffer, _, status := k.instr.Read(100)\r\n\tif status < vi.SUCCESS {\r\n\t\treturn\r\n\t} else {\r\n\t\treturn string(buffer), status\r\n\t}\r\n}\r\n\r\n\/\/ if len(strClosedChans) > 3: # Len always greater than 3 if channel in the list.\r\n\/\/ if strClosedChans.find(\",\") >= 0:\r\n\/\/ closedChans = strClosedChans[2:len(strClosedChans)-1].split(\",\")\r\n\/\/ else:\r\n\/\/ closedChans.append(strClosedChans[2:len(strClosedChans)-1])\r\n<|endoftext|>"} {"text":"<commit_before>package runc\n\nimport (\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\n\t\"github.com\/inconshreveable\/log15\"\n\t\"github.com\/polydawn\/gosh\"\n\t\"github.com\/spacemonkeygo\/errors\"\n\t\"github.com\/spacemonkeygo\/errors\/try\"\n\n\t\"polydawn.net\/repeatr\/def\"\n\t\"polydawn.net\/repeatr\/executor\"\n\t\"polydawn.net\/repeatr\/executor\/basicjob\"\n\t\"polydawn.net\/repeatr\/executor\/util\"\n\t\"polydawn.net\/repeatr\/io\"\n\t\"polydawn.net\/repeatr\/io\/assets\"\n\t\"polydawn.net\/repeatr\/lib\/flak\"\n\t\"polydawn.net\/repeatr\/lib\/streamer\"\n)\n\n\/\/ interface assertion\nvar _ executor.Executor = &Executor{}\n\ntype Executor struct {\n\tworkspacePath string\n}\n\nfunc (e *Executor) Configure(workspacePath string) {\n\te.workspacePath = workspacePath\n}\n\nfunc (e *Executor) Start(f def.Formula, id def.JobID, stdin io.Reader, journal io.Writer) def.Job {\n\t\/\/ TODO this function sig and its interface are long overdue for an aggressive refactor.\n\t\/\/ - `journal` is Rong. The streams mux should be accessible after this function's scope!\n\t\/\/ - either that or it's time to get cracking on saving the stream mux as an output\n\t\/\/ - `journal` should still be a thing, but it should be a logger.\n\t\/\/ - All these other values should move along in a `Job` struct\n\t\/\/ - `BasicJob` sorta started, but is drunk:\n\t\/\/ - if we're gonna have that, it's incomplete on the inputs\n\t\/\/ - for some reason it mixes in responsibility for waiting for some of the ouputs\n\t\/\/ - that use of channels and public fields is stupidly indefensive\n\t\/\/ - The current `Job` interface is in the wrong package\n\t\/\/ - almost all of the scopes in these functions is wrong\n\t\/\/ - they should be realigned until they actually assist the defers and cleanups\n\t\/\/ - e.g. withErrorCapture, withJobWorkPath, withFilesystems, etc\n\n\t\/\/ Prepare the forumla for execution on this host\n\tdef.ValidateAll(&f)\n\n\tjob := basicjob.New(id)\n\tjobReady := make(chan struct{})\n\n\tgo func() {\n\t\t\/\/ Run the formula in a temporary directory\n\t\tflak.WithDir(func(dir string) {\n\n\t\t\t\/\/ spool our output to a muxed stream\n\t\t\tvar strm streamer.Mux\n\t\t\tstrm = streamer.CborFileMux(filepath.Join(dir, \"log\"))\n\t\t\toutS := strm.Appender(1)\n\t\t\terrS := strm.Appender(2)\n\t\t\tjob.Streams = strm\n\t\t\tdefer func() {\n\t\t\t\t\/\/ Regardless of how the job ends (or even if it fails the remaining setup), output streams must be terminated.\n\t\t\t\toutS.Close()\n\t\t\t\terrS.Close()\n\t\t\t}()\n\n\t\t\t\/\/ Job is ready to stream process output\n\t\t\tclose(jobReady)\n\n\t\t\t\/\/ Set up a logger. Tag all messages with this jobid.\n\t\t\tlogger := log15.New(log15.Ctx{\"JobID\": id})\n\t\t\tlogger.SetHandler(log15.StreamHandler(journal, log15.TerminalFormat()))\n\n\t\t\tjob.Result = e.Run(f, job, dir, stdin, outS, errS, logger)\n\t\t}, e.workspacePath, \"job\", string(job.Id()))\n\n\t\t\/\/ Directory is clean; job complete\n\t\tclose(job.WaitChan)\n\t}()\n\n\t<-jobReady\n\treturn job\n}\n\n\/\/ Executes a job, catching any panics.\nfunc (e *Executor) Run(f def.Formula, j def.Job, d string, stdin io.Reader, outS, errS io.WriteCloser, journal log15.Logger) def.JobResult {\n\tr := def.JobResult{\n\t\tID: j.Id(),\n\t\tExitCode: -1,\n\t}\n\n\ttry.Do(func() {\n\t\te.Execute(f, j, d, &r, outS, errS, journal)\n\t}).Catch(executor.Error, func(err *errors.Error) {\n\t\tr.Error = err\n\t}).Catch(integrity.Error, func(err *errors.Error) {\n\t\tr.Error = err\n\t}).CatchAll(func(err error) {\n\t\tr.Error = executor.UnknownError.Wrap(err).(*errors.Error)\n\t}).Done()\n\n\treturn r\n}\n\n\/\/ Execute a formula in a specified directory. MAY PANIC.\nfunc (e *Executor) Execute(formula def.Formula, job def.Job, jobPath string, result *def.JobResult, stdout, stderr io.WriteCloser, journal log15.Logger) {\n\trootfsPath := filepath.Join(jobPath, \"rootfs\")\n\n\t\/\/ Prepare inputs\n\ttransmat := util.DefaultTransmat()\n\tinputArenas := util.ProvisionInputs(transmat, formula.Inputs, journal)\n\tutil.ProvisionOutputs(formula.Outputs, rootfsPath, journal)\n\n\t\/\/ Assemble filesystem\n\tassembly := util.AssembleFilesystem(\n\t\tutil.BestAssembler(),\n\t\trootfsPath,\n\t\tformula.Inputs,\n\t\tinputArenas,\n\t\tformula.Action.Escapes.Mounts,\n\t\tjournal,\n\t)\n\tdefer assembly.Teardown()\n\n\t\/\/ Emit configs for runc.\n\truncConfigJsonPath := filepath.Join(jobPath, \"config.json\")\n\tcfg := EmitRuncConfigStruct(formula, rootfsPath)\n\tbuf, err := json.Marshal(cfg)\n\tif err != nil {\n\t\tpanic(executor.UnknownError.Wrap(err))\n\t}\n\tioutil.WriteFile(runcConfigJsonPath, buf, 0600)\n\truncRuntimeJsonPath := filepath.Join(jobPath, \"runtime.json\")\n\tcfg = EmitRuncRuntimeStruct(formula)\n\tbuf, err = json.Marshal(cfg)\n\tif err != nil {\n\t\tpanic(executor.UnknownError.Wrap(err))\n\t}\n\tioutil.WriteFile(runcRuntimeJsonPath, buf, 0600)\n\n\t\/\/ Routing logs through a fifo appears to work, but we're going to use a file as a buffer anyway:\n\t\/\/ in the event of nasty breakdowns, it's preferable that the runc log remain readable even if repeatr was the process to end first.\n\tlogPath := filepath.Join(jobPath, \"runc-debug.log\")\n\n\t\/\/ Get handle to invokable runc plugin.\n\truncPath := filepath.Join(assets.Get(\"runc\"), \"bin\/runc\")\n\n\t\/\/ Prepare command to exec\n\targs := []string{\n\t\t\"--root\", filepath.Join(e.workspacePath, \"shared\"), \/\/ a tmpfs would be appropriate\n\t\t\"--log\", logPath,\n\t\t\"--log-format\", \"json\",\n\t\t\"start\",\n\t\t\"--config-file\", runcConfigJsonPath,\n\t\t\"--runtime-file\", runcRuntimeJsonPath,\n\t}\n\tcmd := exec.Command(runcPath, args...)\n\tcmd.Stdin = nil\n\tcmd.Stdout = stdout\n\tcmd.Stderr = stderr\n\n\t\/\/ launch execution.\n\t\/\/ transform gosh's typed errors to repeatr's hierarchical errors.\n\t\/\/ this is... not untroubled code: since we're invoking a helper that's then\n\t\/\/ proxying the exec even further, most errors are fatal (the mapping here is\n\t\/\/ very different than in e.g. chroot executor, and provides much less meaning).\n\tvar proc gosh.Proc\n\ttry.Do(func() {\n\t\tproc = gosh.ExecProcCmd(cmd)\n\t}).CatchAll(func(err error) {\n\t\tswitch err.(type) {\n\t\tcase gosh.NoSuchCommandError:\n\t\t\tpanic(executor.ConfigError.New(\"runc binary is missing\"))\n\t\tcase gosh.NoArgumentsError:\n\t\t\tpanic(executor.UnknownError.Wrap(err))\n\t\tcase gosh.NoSuchCwdError:\n\t\t\tpanic(executor.UnknownError.Wrap(err))\n\t\tcase gosh.ProcMonitorError:\n\t\t\tpanic(executor.TaskExecError.Wrap(err))\n\t\tdefault:\n\t\t\tpanic(executor.UnknownError.Wrap(err))\n\t\t}\n\t}).Done()\n\n\tvar runcLog io.ReadCloser\n\truncLog, err = os.OpenFile(logPath, os.O_CREATE|os.O_RDONLY, 0644)\n\t\/\/ note this open races child; doesn't matter.\n\tif err != nil {\n\t\tpanic(executor.TaskExecError.New(\"failed to tail runc log: %s\", err))\n\t}\n\t\/\/ swaddle the file in userland-interruptable reader;\n\t\/\/ obviously we don't want to stop watching the logs when we hit the end of the still-growing file.\n\truncLog = streamer.NewTailReader(runcLog)\n\t\/\/ close the reader when we return (which means after waiting for the exit code, which overall DTRT).\n\tdefer runcLog.Close()\n\n\t\/\/ Proxy runc's logs out in realtime; also, detect errors and exit statuses from the stream.\n\tvar realError error\n\tgo func() {\n\t\tdec := json.NewDecoder(runcLog)\n\t\tfor {\n\t\t\tvar logMsg map[string]string\n\t\t\terr := dec.Decode(&logMsg)\n\t\t\tif err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tpanic(executor.TaskExecError.New(\"unparsable log from runc: %s\", err))\n\t\t\t}\n\t\t\t\/\/ remap\n\t\t\tif _, ok := logMsg[\"msg\"]; !ok {\n\t\t\t\tlogMsg[\"msg\"] = \"\"\n\t\t\t}\n\t\t\tctx := log15.Ctx{}\n\t\t\tfor k, v := range logMsg {\n\t\t\t\tif k == \"msg\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tctx[\"runc-\"+k] = v\n\t\t\t}\n\t\t\t\/\/ with runc, everything we hear is at least a warning.\n\t\t\tjournal.Warn(logMsg[\"msg\"], ctx)\n\t\t\t\/\/ actually filtering the interesting structures and raising issues\n\t\t\t\/\/ note that we don't need to capture the \"exit status\" message, because that\n\t\t\t\/\/ code *does* come out correctly... but we do need to sometimes override it again.\n\t\t\tswitch logMsg[\"msg\"] {\n\t\t\tcase \"Container start failed: [8] System error: no such file or directory\":\n\t\t\t\trealError = executor.NoSuchCwdError.New(\"cannot set cwd to %q: no such file or directory\", formula.Action.Cwd)\n\t\t\tcase \"Container start failed: [8] System error: not a directory\":\n\t\t\t\trealError = executor.NoSuchCwdError.New(\"cannot set cwd to %q: not a directory\", formula.Action.Cwd)\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ Wait for the job to complete\n\tresult.ExitCode = proc.GetExitCode()\n\t\/\/runcLog.Close() \/\/ this could\/should happen before PreserveOutputs. see todo about fixing scopes.\n\n\t\/\/ If we had a CnC error (rather than the real subprocess exit code), reset code, and raise\n\tif realError != nil {\n\t\tresult.ExitCode = -1\n\t\tpanic(realError)\n\t}\n\n\t\/\/ Save outputs\n\tresult.Outputs = util.PreserveOutputs(transmat, formula.Outputs, rootfsPath, journal)\n}\n<commit_msg>Handle command-not-found in runc. With todos.<commit_after>package runc\n\nimport (\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/inconshreveable\/log15\"\n\t\"github.com\/polydawn\/gosh\"\n\t\"github.com\/spacemonkeygo\/errors\"\n\t\"github.com\/spacemonkeygo\/errors\/try\"\n\n\t\"polydawn.net\/repeatr\/def\"\n\t\"polydawn.net\/repeatr\/executor\"\n\t\"polydawn.net\/repeatr\/executor\/basicjob\"\n\t\"polydawn.net\/repeatr\/executor\/util\"\n\t\"polydawn.net\/repeatr\/io\"\n\t\"polydawn.net\/repeatr\/io\/assets\"\n\t\"polydawn.net\/repeatr\/lib\/flak\"\n\t\"polydawn.net\/repeatr\/lib\/streamer\"\n)\n\n\/\/ interface assertion\nvar _ executor.Executor = &Executor{}\n\ntype Executor struct {\n\tworkspacePath string\n}\n\nfunc (e *Executor) Configure(workspacePath string) {\n\te.workspacePath = workspacePath\n}\n\nfunc (e *Executor) Start(f def.Formula, id def.JobID, stdin io.Reader, journal io.Writer) def.Job {\n\t\/\/ TODO this function sig and its interface are long overdue for an aggressive refactor.\n\t\/\/ - `journal` is Rong. The streams mux should be accessible after this function's scope!\n\t\/\/ - either that or it's time to get cracking on saving the stream mux as an output\n\t\/\/ - `journal` should still be a thing, but it should be a logger.\n\t\/\/ - All these other values should move along in a `Job` struct\n\t\/\/ - `BasicJob` sorta started, but is drunk:\n\t\/\/ - if we're gonna have that, it's incomplete on the inputs\n\t\/\/ - for some reason it mixes in responsibility for waiting for some of the ouputs\n\t\/\/ - that use of channels and public fields is stupidly indefensive\n\t\/\/ - The current `Job` interface is in the wrong package\n\t\/\/ - almost all of the scopes in these functions is wrong\n\t\/\/ - they should be realigned until they actually assist the defers and cleanups\n\t\/\/ - e.g. withErrorCapture, withJobWorkPath, withFilesystems, etc\n\n\t\/\/ Prepare the forumla for execution on this host\n\tdef.ValidateAll(&f)\n\n\tjob := basicjob.New(id)\n\tjobReady := make(chan struct{})\n\n\tgo func() {\n\t\t\/\/ Run the formula in a temporary directory\n\t\tflak.WithDir(func(dir string) {\n\n\t\t\t\/\/ spool our output to a muxed stream\n\t\t\tvar strm streamer.Mux\n\t\t\tstrm = streamer.CborFileMux(filepath.Join(dir, \"log\"))\n\t\t\toutS := strm.Appender(1)\n\t\t\terrS := strm.Appender(2)\n\t\t\tjob.Streams = strm\n\t\t\tdefer func() {\n\t\t\t\t\/\/ Regardless of how the job ends (or even if it fails the remaining setup), output streams must be terminated.\n\t\t\t\toutS.Close()\n\t\t\t\terrS.Close()\n\t\t\t}()\n\n\t\t\t\/\/ Job is ready to stream process output\n\t\t\tclose(jobReady)\n\n\t\t\t\/\/ Set up a logger. Tag all messages with this jobid.\n\t\t\tlogger := log15.New(log15.Ctx{\"JobID\": id})\n\t\t\tlogger.SetHandler(log15.StreamHandler(journal, log15.TerminalFormat()))\n\n\t\t\tjob.Result = e.Run(f, job, dir, stdin, outS, errS, logger)\n\t\t}, e.workspacePath, \"job\", string(job.Id()))\n\n\t\t\/\/ Directory is clean; job complete\n\t\tclose(job.WaitChan)\n\t}()\n\n\t<-jobReady\n\treturn job\n}\n\n\/\/ Executes a job, catching any panics.\nfunc (e *Executor) Run(f def.Formula, j def.Job, d string, stdin io.Reader, outS, errS io.WriteCloser, journal log15.Logger) def.JobResult {\n\tr := def.JobResult{\n\t\tID: j.Id(),\n\t\tExitCode: -1,\n\t}\n\n\ttry.Do(func() {\n\t\te.Execute(f, j, d, &r, outS, errS, journal)\n\t}).Catch(executor.Error, func(err *errors.Error) {\n\t\tr.Error = err\n\t}).Catch(integrity.Error, func(err *errors.Error) {\n\t\tr.Error = err\n\t}).CatchAll(func(err error) {\n\t\tr.Error = executor.UnknownError.Wrap(err).(*errors.Error)\n\t}).Done()\n\n\treturn r\n}\n\n\/\/ Execute a formula in a specified directory. MAY PANIC.\nfunc (e *Executor) Execute(formula def.Formula, job def.Job, jobPath string, result *def.JobResult, stdout, stderr io.WriteCloser, journal log15.Logger) {\n\trootfsPath := filepath.Join(jobPath, \"rootfs\")\n\n\t\/\/ Prepare inputs\n\ttransmat := util.DefaultTransmat()\n\tinputArenas := util.ProvisionInputs(transmat, formula.Inputs, journal)\n\tutil.ProvisionOutputs(formula.Outputs, rootfsPath, journal)\n\n\t\/\/ Assemble filesystem\n\tassembly := util.AssembleFilesystem(\n\t\tutil.BestAssembler(),\n\t\trootfsPath,\n\t\tformula.Inputs,\n\t\tinputArenas,\n\t\tformula.Action.Escapes.Mounts,\n\t\tjournal,\n\t)\n\tdefer assembly.Teardown()\n\n\t\/\/ Emit configs for runc.\n\truncConfigJsonPath := filepath.Join(jobPath, \"config.json\")\n\tcfg := EmitRuncConfigStruct(formula, rootfsPath)\n\tbuf, err := json.Marshal(cfg)\n\tif err != nil {\n\t\tpanic(executor.UnknownError.Wrap(err))\n\t}\n\tioutil.WriteFile(runcConfigJsonPath, buf, 0600)\n\truncRuntimeJsonPath := filepath.Join(jobPath, \"runtime.json\")\n\tcfg = EmitRuncRuntimeStruct(formula)\n\tbuf, err = json.Marshal(cfg)\n\tif err != nil {\n\t\tpanic(executor.UnknownError.Wrap(err))\n\t}\n\tioutil.WriteFile(runcRuntimeJsonPath, buf, 0600)\n\n\t\/\/ Routing logs through a fifo appears to work, but we're going to use a file as a buffer anyway:\n\t\/\/ in the event of nasty breakdowns, it's preferable that the runc log remain readable even if repeatr was the process to end first.\n\tlogPath := filepath.Join(jobPath, \"runc-debug.log\")\n\n\t\/\/ Get handle to invokable runc plugin.\n\truncPath := filepath.Join(assets.Get(\"runc\"), \"bin\/runc\")\n\n\t\/\/ Prepare command to exec\n\targs := []string{\n\t\t\"--root\", filepath.Join(e.workspacePath, \"shared\"), \/\/ a tmpfs would be appropriate\n\t\t\"--log\", logPath,\n\t\t\"--log-format\", \"json\",\n\t\t\"start\",\n\t\t\"--config-file\", runcConfigJsonPath,\n\t\t\"--runtime-file\", runcRuntimeJsonPath,\n\t}\n\tcmd := exec.Command(runcPath, args...)\n\tcmd.Stdin = nil\n\tcmd.Stdout = stdout\n\tcmd.Stderr = stderr\n\n\t\/\/ launch execution.\n\t\/\/ transform gosh's typed errors to repeatr's hierarchical errors.\n\t\/\/ this is... not untroubled code: since we're invoking a helper that's then\n\t\/\/ proxying the exec even further, most errors are fatal (the mapping here is\n\t\/\/ very different than in e.g. chroot executor, and provides much less meaning).\n\tvar proc gosh.Proc\n\ttry.Do(func() {\n\t\tproc = gosh.ExecProcCmd(cmd)\n\t}).CatchAll(func(err error) {\n\t\tswitch err.(type) {\n\t\tcase gosh.NoSuchCommandError:\n\t\t\tpanic(executor.ConfigError.New(\"runc binary is missing\"))\n\t\tcase gosh.NoArgumentsError:\n\t\t\tpanic(executor.UnknownError.Wrap(err))\n\t\tcase gosh.NoSuchCwdError:\n\t\t\tpanic(executor.UnknownError.Wrap(err))\n\t\tcase gosh.ProcMonitorError:\n\t\t\tpanic(executor.TaskExecError.Wrap(err))\n\t\tdefault:\n\t\t\tpanic(executor.UnknownError.Wrap(err))\n\t\t}\n\t}).Done()\n\n\tvar runcLog io.ReadCloser\n\truncLog, err = os.OpenFile(logPath, os.O_CREATE|os.O_RDONLY, 0644)\n\t\/\/ note this open races child; doesn't matter.\n\tif err != nil {\n\t\tpanic(executor.TaskExecError.New(\"failed to tail runc log: %s\", err))\n\t}\n\t\/\/ swaddle the file in userland-interruptable reader;\n\t\/\/ obviously we don't want to stop watching the logs when we hit the end of the still-growing file.\n\truncLog = streamer.NewTailReader(runcLog)\n\t\/\/ close the reader when we return (which means after waiting for the exit code, which overall DTRT).\n\tdefer runcLog.Close()\n\n\t\/\/ Proxy runc's logs out in realtime; also, detect errors and exit statuses from the stream.\n\tvar realError error\n\t\/\/var unknownError bool \/\/ see the \"NOTE WELL\" section below -.-\n\tgo func() {\n\t\tdec := json.NewDecoder(runcLog)\n\t\tfor {\n\t\t\tvar logMsg map[string]string\n\t\t\terr := dec.Decode(&logMsg)\n\t\t\tif err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tpanic(executor.TaskExecError.New(\"unparsable log from runc: %s\", err))\n\t\t\t}\n\t\t\t\/\/ remap\n\t\t\tif _, ok := logMsg[\"msg\"]; !ok {\n\t\t\t\tlogMsg[\"msg\"] = \"\"\n\t\t\t}\n\t\t\tctx := log15.Ctx{}\n\t\t\tfor k, v := range logMsg {\n\t\t\t\tif k == \"msg\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tctx[\"runc-\"+k] = v\n\t\t\t}\n\t\t\t\/\/ with runc, everything we hear is at least a warning.\n\t\t\tjournal.Warn(logMsg[\"msg\"], ctx)\n\t\t\t\/\/ actually filtering the interesting structures and raising issues\n\t\t\t\/\/ note that we don't need to capture the \"exit status\" message, because that\n\t\t\t\/\/ code *does* come out correctly... but we do need to sometimes override it again.\n\t\t\t\/\/ NOTE WELL: we cannot guarantee to capture all semantic runc failure modes.\n\t\t\t\/\/ Errors may slip through with exit status 1: there are still many fail states\n\t\t\t\/\/ which runc does not log with sufficient consistency or a sufficiently separate\n\t\t\t\/\/ control channel for us to be able to reliably disambiguate them from stderr\n\t\t\t\/\/ output of a successfully executing job!\n\t\t\t\/\/ We have whitelisted what we can; the following oddities remain:\n\t\t\t\/\/ - runc will log an \"exit status ${n}\" message for other failures of its internal forking\n\t\t\t\/\/ - this one is at least on a clear control channel, so we can raise it as a panic, even if we don't know what it is\n\t\t\t\/\/ - TODO do so\n\t\t\t\/\/ - lots of system initialization paths in runc will error directly stderr with no clear sigils or separation from usermode stderr.\n\t\t\t\/\/ - and these mean we're just screwed, and require additional upstream patches to address.\n\t\t\tswitch logMsg[\"msg\"] {\n\t\t\tcase \"Container start failed: [8] System error: no such file or directory\":\n\t\t\t\trealError = executor.NoSuchCwdError.New(\"cannot set cwd to %q: no such file or directory\", formula.Action.Cwd)\n\t\t\tcase \"Container start failed: [8] System error: not a directory\":\n\t\t\t\trealError = executor.NoSuchCwdError.New(\"cannot set cwd to %q: not a directory\", formula.Action.Cwd)\n\t\t\tdefault:\n\t\t\t\t\/\/ broader patterns required for some of these so we can ignore the vagaries of how the command name was quoted\n\t\t\t\tif strings.HasPrefix(logMsg[\"msg\"], \"Container start failed: [8] System error: exec: \") {\n\t\t\t\t\tif strings.HasSuffix(logMsg[\"msg\"], \": executable file not found in $PATH\") {\n\t\t\t\t\t\trealError = executor.NoSuchCommandError.New(\"command %q not found\", formula.Action.Entrypoint[0])\n\t\t\t\t\t} else if strings.HasSuffix(logMsg[\"msg\"], \": no such file or directory\") {\n\t\t\t\t\t\trealError = executor.NoSuchCommandError.New(\"command %q not found\", formula.Action.Entrypoint[0])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ Wait for the job to complete\n\tresult.ExitCode = proc.GetExitCode()\n\t\/\/runcLog.Close() \/\/ this could\/should happen before PreserveOutputs. see todo about fixing scopes.\n\n\t\/\/ If we had a CnC error (rather than the real subprocess exit code):\n\t\/\/ - reset code to -1 because the runc exit code wasn't really from the job command\n\t\/\/ - zero the output buffers because runc (again) doesn't understand what control channels are\n\t\/\/ - finally, raise the error\n\tif realError != nil {\n\t\tresult.ExitCode = -1\n\t\t\/\/ TODO just overwrite the streamer, i guess?\n\t\tpanic(realError)\n\t}\n\n\t\/\/ Save outputs\n\tresult.Outputs = util.PreserveOutputs(transmat, formula.Outputs, rootfsPath, journal)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2012 The gocql Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The uuid package can be used to generate and parse universally unique\n\/\/ identifiers, a standardized format in the form of a 128 bit number.\n\/\/\n\/\/ http:\/\/tools.ietf.org\/html\/rfc4122\npackage uuid\n\nimport (\n \"crypto\/rand\"\n \"fmt\"\n \"io\"\n \"net\"\n \"time\"\n\n \"github.com\/0x6e6562\/gocql\"\n)\n\ntype UUID [16]byte\n\nvar hardwareAddr []byte\n\nconst (\n VariantNCSCompat = 0\n VariantIETF = 2\n VariantMicrosoft = 6\n VariantFuture = 7\n)\n\nfunc init() {\n if interfaces, err := net.Interfaces(); err == nil {\n for _, i := range interfaces {\n if i.Flags&net.FlagLoopback == 0 && len(i.HardwareAddr) > 0 {\n hardwareAddr = i.HardwareAddr\n break\n }\n }\n }\n if hardwareAddr == nil {\n \/\/ If we failed to obtain the MAC address of the current computer,\n \/\/ we will use a randomly generated 6 byte sequence instead and set\n \/\/ the multicast bit as recommended in RFC 4122.\n hardwareAddr = make([]byte, 6)\n _, err := io.ReadFull(rand.Reader, hardwareAddr)\n if err != nil {\n panic(err)\n }\n hardwareAddr[0] = hardwareAddr[0] | 0x01\n }\n}\n\n\/\/ ParseUUID parses a 32 digit hexadecimal number (that might contain hypens)\n\/\/ represanting an UUID.\nfunc ParseUUID(input string) (UUID, error) {\n var u UUID\n j := 0\n for _, r := range input {\n switch {\n case r == '-' && j&1 == 0:\n continue\n case r >= '0' && r <= '9' && j < 32:\n u[j\/2] |= byte(r-'0') << uint(4-j&1*4)\n case r >= 'a' && r <= 'f' && j < 32:\n u[j\/2] |= byte(r-'a'+10) << uint(4-j&1*4)\n case r >= 'A' && r <= 'F' && j < 32:\n u[j\/2] |= byte(r-'A'+10) << uint(4-j&1*4)\n default:\n return UUID{}, fmt.Errorf(\"invalid UUID %q\", input)\n }\n j += 1\n }\n if j != 32 {\n return UUID{}, fmt.Errorf(\"invalid UUID %q\", input)\n }\n return u, nil\n}\n\n\/\/ FromBytes converts a raw byte slice to an UUID. It will panic if the slice\n\/\/ isn't exactly 16 bytes long.\nfunc FromBytes(input []byte) UUID {\n var u UUID\n if len(input) != 16 {\n panic(\"UUIDs must be exactly 16 bytes long\")\n }\n copy(u[:], input)\n return u\n}\n\n\/\/ RandomUUID generates a totally random UUID (version 4) as described in\n\/\/ RFC 4122.\nfunc RandomUUID() UUID {\n var u UUID\n io.ReadFull(rand.Reader, u[:])\n u[6] &= 0x0F \/\/ clear version\n u[6] |= 0x40 \/\/ set version to 4 (random uuid)\n u[8] &= 0x3F \/\/ clear variant\n u[8] |= 0x80 \/\/ set to IETF variant\n return u\n}\n\nvar timeBase = time.Date(1582, time.October, 15, 0, 0, 0, 0, time.UTC).Unix()\n\n\/\/ TimeUUID generates a new time based UUID (version 1) as described in RFC\n\/\/ 4122. This UUID contains the MAC address of the node that generated the\n\/\/ UUID, a timestamp and a sequence number.\nfunc TimeUUID() UUID {\n var u UUID\n\n now := time.Now().In(time.UTC)\n t := uint64(now.Unix()-timeBase)*10000000 + uint64(now.Nanosecond()\/100)\n u[0], u[1], u[2], u[3] = byte(t>>24), byte(t>>16), byte(t>>8), byte(t)\n u[4], u[5] = byte(t>>40), byte(t>>32)\n u[6], u[7] = byte(t>>56)&0x0F, byte(t>>48)\n\n var clockSeq [2]byte\n io.ReadFull(rand.Reader, clockSeq[:])\n u[8] = clockSeq[1]\n u[9] = clockSeq[0]\n\n copy(u[10:], hardwareAddr)\n\n u[6] |= 0x10 \/\/ set version to 1 (time based uuid)\n u[8] &= 0x3F \/\/ clear variant\n u[8] |= 0x80 \/\/ set to IETF variant\n\n return u\n}\n\n\/\/ String returns the UUID in it's canonical form, a 32 digit hexadecimal\n\/\/ number in the form of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.\nfunc (u UUID) String() string {\n return fmt.Sprintf(\"%x-%x-%x-%x-%x\",\n u[0:4], u[4:6], u[6:8], u[8:10], u[10:16])\n}\n\n\/\/ Bytes returns the raw byte slice for this UUID. A UUID is always 128 bits\n\/\/ (16 bytes) long.\nfunc (u UUID) Bytes() []byte {\n return u[:]\n}\n\n\/\/ Variant returns the variant of this UUID. This package will only generate\n\/\/ UUIDs in the IETF variant.\nfunc (u UUID) Variant() int {\n x := u[8]\n if x&0x80 == 0 {\n return VariantNCSCompat\n }\n if x&0x40 == 0 {\n return VariantIETF\n }\n if x&0x20 == 0 {\n return VariantMicrosoft\n }\n return VariantFuture\n}\n\n\/\/ Version extracts the version of this UUID variant. The RFC 4122 describes\n\/\/ five kinds of UUIDs.\nfunc (u UUID) Version() int {\n return int(u[6] & 0xF0 >> 4)\n}\n\n\/\/ Node extracts the MAC address of the node who generated this UUID. It will\n\/\/ return nil if the UUID is not a time based UUID (version 1).\nfunc (u UUID) Node() []byte {\n if u.Version() != 1 {\n return nil\n }\n return u[10:]\n}\n\n\/\/ Timestamp extracts the timestamp information from a time based UUID\n\/\/ (version 1).\nfunc (u UUID) Timestamp() uint64 {\n if u.Version() != 1 {\n return 0\n }\n return uint64(u[0])<<24 + uint64(u[1])<<16 + uint64(u[2])<<8 +\n uint64(u[3]) + uint64(u[4])<<40 + uint64(u[5])<<32 +\n uint64(u[7])<<48 + uint64(u[6]&0x0F)<<56\n}\n\n\/\/ Time is like Timestamp, except that it returns a time.Time.\nfunc (u UUID) Time() time.Time {\n t := u.Timestamp()\n if t == 0 {\n return time.Time{}\n }\n sec := t \/ 10000000\n nsec := t - sec\n return time.Unix(int64(sec)+timeBase, int64(nsec))\n}\n\nfunc (u UUID) MarshalCQL(info *gocql.TypeInfo) ([]byte, error) {\n switch info.Type {\n case gocql.TypeUUID, gocql.TypeTimeUUID:\n return u[:], nil\n }\n return gocql.Marshal(info, u[:])\n}\n\nfunc (u *UUID) UnmarshalCQL(info *gocql.TypeInfo, data []byte) error {\n switch info.Type {\n case gocql.TypeUUID, gocql.TypeTimeUUID:\n *u = FromBytes(data)\n return nil\n }\n var val []byte\n if err := gocql.Unmarshal(info, data, &val); err != nil {\n return err\n }\n *u = FromBytes(val)\n return nil\n}\n<commit_msg>rebase old namespace<commit_after>\/\/ Copyright (c) 2012 The gocql Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The uuid package can be used to generate and parse universally unique\n\/\/ identifiers, a standardized format in the form of a 128 bit number.\n\/\/\n\/\/ http:\/\/tools.ietf.org\/html\/rfc4122\npackage uuid\n\nimport (\n \"crypto\/rand\"\n \"fmt\"\n \"io\"\n \"net\"\n \"time\"\n\n \"github.com\/QTAB\/gocql\"\n)\n\ntype UUID [16]byte\n\nvar hardwareAddr []byte\n\nconst (\n VariantNCSCompat = 0\n VariantIETF = 2\n VariantMicrosoft = 6\n VariantFuture = 7\n)\n\nfunc init() {\n if interfaces, err := net.Interfaces(); err == nil {\n for _, i := range interfaces {\n if i.Flags&net.FlagLoopback == 0 && len(i.HardwareAddr) > 0 {\n hardwareAddr = i.HardwareAddr\n break\n }\n }\n }\n if hardwareAddr == nil {\n \/\/ If we failed to obtain the MAC address of the current computer,\n \/\/ we will use a randomly generated 6 byte sequence instead and set\n \/\/ the multicast bit as recommended in RFC 4122.\n hardwareAddr = make([]byte, 6)\n _, err := io.ReadFull(rand.Reader, hardwareAddr)\n if err != nil {\n panic(err)\n }\n hardwareAddr[0] = hardwareAddr[0] | 0x01\n }\n}\n\n\/\/ ParseUUID parses a 32 digit hexadecimal number (that might contain hypens)\n\/\/ represanting an UUID.\nfunc ParseUUID(input string) (UUID, error) {\n var u UUID\n j := 0\n for _, r := range input {\n switch {\n case r == '-' && j&1 == 0:\n continue\n case r >= '0' && r <= '9' && j < 32:\n u[j\/2] |= byte(r-'0') << uint(4-j&1*4)\n case r >= 'a' && r <= 'f' && j < 32:\n u[j\/2] |= byte(r-'a'+10) << uint(4-j&1*4)\n case r >= 'A' && r <= 'F' && j < 32:\n u[j\/2] |= byte(r-'A'+10) << uint(4-j&1*4)\n default:\n return UUID{}, fmt.Errorf(\"invalid UUID %q\", input)\n }\n j += 1\n }\n if j != 32 {\n return UUID{}, fmt.Errorf(\"invalid UUID %q\", input)\n }\n return u, nil\n}\n\n\/\/ FromBytes converts a raw byte slice to an UUID. It will panic if the slice\n\/\/ isn't exactly 16 bytes long.\nfunc FromBytes(input []byte) UUID {\n var u UUID\n if len(input) != 16 {\n panic(\"UUIDs must be exactly 16 bytes long\")\n }\n copy(u[:], input)\n return u\n}\n\n\/\/ RandomUUID generates a totally random UUID (version 4) as described in\n\/\/ RFC 4122.\nfunc RandomUUID() UUID {\n var u UUID\n io.ReadFull(rand.Reader, u[:])\n u[6] &= 0x0F \/\/ clear version\n u[6] |= 0x40 \/\/ set version to 4 (random uuid)\n u[8] &= 0x3F \/\/ clear variant\n u[8] |= 0x80 \/\/ set to IETF variant\n return u\n}\n\nvar timeBase = time.Date(1582, time.October, 15, 0, 0, 0, 0, time.UTC).Unix()\n\n\/\/ TimeUUID generates a new time based UUID (version 1) as described in RFC\n\/\/ 4122. This UUID contains the MAC address of the node that generated the\n\/\/ UUID, a timestamp and a sequence number.\nfunc TimeUUID() UUID {\n var u UUID\n\n now := time.Now().In(time.UTC)\n t := uint64(now.Unix()-timeBase)*10000000 + uint64(now.Nanosecond()\/100)\n u[0], u[1], u[2], u[3] = byte(t>>24), byte(t>>16), byte(t>>8), byte(t)\n u[4], u[5] = byte(t>>40), byte(t>>32)\n u[6], u[7] = byte(t>>56)&0x0F, byte(t>>48)\n\n var clockSeq [2]byte\n io.ReadFull(rand.Reader, clockSeq[:])\n u[8] = clockSeq[1]\n u[9] = clockSeq[0]\n\n copy(u[10:], hardwareAddr)\n\n u[6] |= 0x10 \/\/ set version to 1 (time based uuid)\n u[8] &= 0x3F \/\/ clear variant\n u[8] |= 0x80 \/\/ set to IETF variant\n\n return u\n}\n\n\/\/ String returns the UUID in it's canonical form, a 32 digit hexadecimal\n\/\/ number in the form of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.\nfunc (u UUID) String() string {\n return fmt.Sprintf(\"%x-%x-%x-%x-%x\",\n u[0:4], u[4:6], u[6:8], u[8:10], u[10:16])\n}\n\n\/\/ Bytes returns the raw byte slice for this UUID. A UUID is always 128 bits\n\/\/ (16 bytes) long.\nfunc (u UUID) Bytes() []byte {\n return u[:]\n}\n\n\/\/ Variant returns the variant of this UUID. This package will only generate\n\/\/ UUIDs in the IETF variant.\nfunc (u UUID) Variant() int {\n x := u[8]\n if x&0x80 == 0 {\n return VariantNCSCompat\n }\n if x&0x40 == 0 {\n return VariantIETF\n }\n if x&0x20 == 0 {\n return VariantMicrosoft\n }\n return VariantFuture\n}\n\n\/\/ Version extracts the version of this UUID variant. The RFC 4122 describes\n\/\/ five kinds of UUIDs.\nfunc (u UUID) Version() int {\n return int(u[6] & 0xF0 >> 4)\n}\n\n\/\/ Node extracts the MAC address of the node who generated this UUID. It will\n\/\/ return nil if the UUID is not a time based UUID (version 1).\nfunc (u UUID) Node() []byte {\n if u.Version() != 1 {\n return nil\n }\n return u[10:]\n}\n\n\/\/ Timestamp extracts the timestamp information from a time based UUID\n\/\/ (version 1).\nfunc (u UUID) Timestamp() uint64 {\n if u.Version() != 1 {\n return 0\n }\n return uint64(u[0])<<24 + uint64(u[1])<<16 + uint64(u[2])<<8 +\n uint64(u[3]) + uint64(u[4])<<40 + uint64(u[5])<<32 +\n uint64(u[7])<<48 + uint64(u[6]&0x0F)<<56\n}\n\n\/\/ Time is like Timestamp, except that it returns a time.Time.\nfunc (u UUID) Time() time.Time {\n t := u.Timestamp()\n if t == 0 {\n return time.Time{}\n }\n sec := t \/ 10000000\n nsec := t - sec\n return time.Unix(int64(sec)+timeBase, int64(nsec))\n}\n\nfunc (u UUID) MarshalCQL(info *gocql.TypeInfo) ([]byte, error) {\n switch info.Type {\n case gocql.TypeUUID, gocql.TypeTimeUUID:\n return u[:], nil\n }\n return gocql.Marshal(info, u[:])\n}\n\nfunc (u *UUID) UnmarshalCQL(info *gocql.TypeInfo, data []byte) error {\n switch info.Type {\n case gocql.TypeUUID, gocql.TypeTimeUUID:\n *u = FromBytes(data)\n return nil\n }\n var val []byte\n if err := gocql.Unmarshal(info, data, &val); err != nil {\n return err\n }\n *u = FromBytes(val)\n return nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n \nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage service\n\nimport (\n\t\"github.com\/kubernetes\/deployment-manager\/expandybird\/expander\"\n\t\"github.com\/kubernetes\/deployment-manager\/common\"\n\t\"github.com\/kubernetes\/deployment-manager\/util\"\n\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\n\trestful \"github.com\/emicklei\/go-restful\"\n)\n\n\/\/ A Service wraps a web service that performs template expansion.\ntype Service struct {\n\t*restful.WebService\n}\n\n\/\/ NewService creates and returns a new Service, initialized with a new\n\/\/ restful.WebService configured with a route that dispatches to the supplied\n\/\/ handler. The new Service must be registered before accepting traffic by\n\/\/ calling Register.\nfunc NewService(handler restful.RouteFunction) *Service {\n\trestful.EnableTracing(true)\n\twebService := new(restful.WebService)\n\twebService.Consumes(restful.MIME_JSON, restful.MIME_XML)\n\twebService.Produces(restful.MIME_JSON, restful.MIME_XML)\n\twebService.Route(webService.POST(\"\/expand\").To(handler).\n\t\tDoc(\"Expand a template.\").\n\t\tReads(&common.Template{}))\n\treturn &Service{webService}\n}\n\n\/\/ Register adds the web service wrapped by the Service to the supplied\n\/\/ container. If the supplied container is nil, then the default container is\n\/\/ used, instead.\nfunc (s *Service) Register(container *restful.Container) {\n\tif container == nil {\n\t\tcontainer = restful.DefaultContainer\n\t}\n\n\tcontainer.Add(s.WebService)\n}\n\n\/\/ NewExpansionHandler returns a route function that handles an incoming\n\/\/ template expansion request, bound to the supplied expander.\nfunc NewExpansionHandler(backend expander.Expander) restful.RouteFunction {\n\treturn func(req *restful.Request, resp *restful.Response) {\n\t\tutil.LogHandlerEntry(\"expandybird: expand\", req.Request)\n\t\ttemplate := &common.Template{}\n\t\tif err := req.ReadEntity(&template); err != nil {\n\t\t\tlogAndReturnErrorFromHandler(http.StatusBadRequest, err.Error(), resp)\n\t\t\treturn\n\t\t}\n\n\t\toutput, err := backend.ExpandTemplate(template)\n\t\tif err != nil {\n\t\t\tmessage := fmt.Sprintf(\"error expanding template: %s\", err)\n\t\t\tlogAndReturnErrorFromHandler(http.StatusBadRequest, message, resp)\n\t\t\treturn\n\t\t}\n\n\t\tresponse, err := expander.NewExpansionResponse(output)\n\t\tif err != nil {\n\t\t\tmessage := fmt.Sprintf(\"error marshaling output: %s\", err)\n\t\t\tlogAndReturnErrorFromHandler(http.StatusBadRequest, message, resp)\n\t\t\treturn\n\t\t}\n\n\t\tutil.LogHandlerExit(\"expandybird\", http.StatusOK, \"OK\", resp.ResponseWriter)\n\t\tresp.WriteEntity(response)\n\t}\n}\n\nfunc logAndReturnErrorFromHandler(statusCode int, message string, resp *restful.Response) {\n\tutil.LogHandlerExit(\"expandybird: expand\", statusCode, message, resp.ResponseWriter)\n\tresp.WriteError(statusCode, errors.New(message))\n}\n<commit_msg>Fix test failure in expandybird.<commit_after>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage service\n\nimport (\n\t\"github.com\/kubernetes\/deployment-manager\/common\"\n\t\"github.com\/kubernetes\/deployment-manager\/expandybird\/expander\"\n\t\"github.com\/kubernetes\/deployment-manager\/util\"\n\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\n\trestful \"github.com\/emicklei\/go-restful\"\n)\n\n\/\/ A Service wraps a web service that performs template expansion.\ntype Service struct {\n\t*restful.WebService\n}\n\n\/\/ NewService creates and returns a new Service, initialized with a new\n\/\/ restful.WebService configured with a route that dispatches to the supplied\n\/\/ handler. The new Service must be registered before accepting traffic by\n\/\/ calling Register.\nfunc NewService(handler restful.RouteFunction) *Service {\n\trestful.EnableTracing(true)\n\twebService := new(restful.WebService)\n\twebService.Consumes(restful.MIME_JSON, restful.MIME_XML)\n\twebService.Produces(restful.MIME_JSON, restful.MIME_XML)\n\twebService.Route(webService.POST(\"\/expand\").To(handler).\n\t\tDoc(\"Expand a template.\").\n\t\tReads(&common.Template{}).\n\t\tWrites(&expander.ExpansionResponse{}))\n\treturn &Service{webService}\n}\n\n\/\/ Register adds the web service wrapped by the Service to the supplied\n\/\/ container. If the supplied container is nil, then the default container is\n\/\/ used, instead.\nfunc (s *Service) Register(container *restful.Container) {\n\tif container == nil {\n\t\tcontainer = restful.DefaultContainer\n\t}\n\n\tcontainer.Add(s.WebService)\n}\n\n\/\/ NewExpansionHandler returns a route function that handles an incoming\n\/\/ template expansion request, bound to the supplied expander.\nfunc NewExpansionHandler(backend expander.Expander) restful.RouteFunction {\n\treturn func(req *restful.Request, resp *restful.Response) {\n\t\tutil.LogHandlerEntry(\"expandybird: expand\", req.Request)\n\t\ttemplate := &common.Template{}\n\t\tif err := req.ReadEntity(&template); err != nil {\n\t\t\tlogAndReturnErrorFromHandler(http.StatusBadRequest, err.Error(), resp)\n\t\t\treturn\n\t\t}\n\n\t\toutput, err := backend.ExpandTemplate(template)\n\t\tif err != nil {\n\t\t\tmessage := fmt.Sprintf(\"error expanding template: %s\", err)\n\t\t\tlogAndReturnErrorFromHandler(http.StatusBadRequest, message, resp)\n\t\t\treturn\n\t\t}\n\n\t\tresponse, err := expander.NewExpansionResponse(output)\n\t\tif err != nil {\n\t\t\tmessage := fmt.Sprintf(\"error marshaling output: %s\", err)\n\t\t\tlogAndReturnErrorFromHandler(http.StatusBadRequest, message, resp)\n\t\t\treturn\n\t\t}\n\n\t\tutil.LogHandlerExit(\"expandybird\", http.StatusOK, \"OK\", resp.ResponseWriter)\n\t\tresp.WriteEntity(response)\n\t}\n}\n\nfunc logAndReturnErrorFromHandler(statusCode int, message string, resp *restful.Response) {\n\tutil.LogHandlerExit(\"expandybird: expand\", statusCode, message, resp.ResponseWriter)\n\tresp.WriteError(statusCode, errors.New(message))\n}\n<|endoftext|>"} {"text":"<commit_before>package solidserver\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"log\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc resourcednsrr() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourcednsrrCreate,\n\t\tRead: resourcednsrrRead,\n\t\tUpdate: resourcednsrrUpdate,\n\t\tDelete: resourcednsrrDelete,\n\t\tExists: resourcednsrrExists,\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: resourcednsrrImportState,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"dnsserver\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tDescription: \"The managed SMART DNS server name, or DNS server name hosting the RR's zone.\",\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"dnsview\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tDescription: \"The View name of the RR to create.\",\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: \"\",\n\t\t\t},\n\t\t\t\"name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tDescription: \"The Fully Qualified Domain Name of the RR to create.\",\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"type\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tDescription: \"The type of the RR to create (Supported: A, AAAA, CNAME, TXT).\",\n\t\t\t\tValidateFunc: resourcednsrrvalidatetype,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"value\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tDescription: \"The value od the RR to create.\",\n\t\t\t\tComputed: false,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"ttl\": {\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tDescription: \"The DNS Time To Live of the RR to create.\",\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: 3600,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourcednsrrvalidatetype(v interface{}, _ string) ([]string, []error) {\n\tswitch strings.ToUpper(v.(string)) {\n\tcase \"A\":\n\t\treturn nil, nil\n\tcase \"AAAA\":\n\t\treturn nil, nil\n\tcase \"PTR\":\n\t\treturn nil, nil\n\tcase \"CNAME\":\n\t\treturn nil, nil\n\tcase \"DNAME\":\n\t\treturn nil, nil\n\tcase \"TXT\":\n\t\treturn nil, nil\n\tcase \"NS\":\n\t\treturn nil, nil\n\tdefault:\n\t\treturn nil, []error{fmt.Errorf(\"Unsupported RR type.\")}\n\t}\n}\n\nfunc resourcednsrrExists(d *schema.ResourceData, meta interface{}) (bool, error) {\n\ts := meta.(*SOLIDserver)\n\n\t\/\/ Building parameters\n\tparameters := url.Values{}\n\tparameters.Add(\"rr_id\", d.Id())\n\n\tlog.Printf(\"[DEBUG] Checking existence of RR (oid): %s\\n\", d.Id())\n\n\t\/\/ Sending the read request\n\tresp, body, err := s.Request(\"get\", \"rest\/dns_rr_info\", ¶meters)\n\n\tif err == nil {\n\t\tvar buf [](map[string]interface{})\n\t\tjson.Unmarshal([]byte(body), &buf)\n\n\t\t\/\/ Checking the answer\n\t\tif (resp.StatusCode == 200 || resp.StatusCode == 201) && len(buf) > 0 {\n\t\t\treturn true, nil\n\t\t}\n\n\t\tif len(buf) > 0 {\n\t\t\tif errMsg, errExist := buf[0][\"errmsg\"].(string); errExist {\n\t\t\t\t\/\/ Log the error\n\t\t\t\tlog.Printf(\"[DEBUG] SOLIDServer - Unable to find RR (oid): %s (%s)\\n\", d.Id(), errMsg)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Log the error\n\t\t\tlog.Printf(\"[DEBUG] SOLIDServer - Unable to find RR (oid): %s\\n\", d.Id())\n\t\t}\n\n\t\t\/\/ Unset local ID\n\t\td.SetId(\"\")\n\t}\n\n\t\/\/ Reporting a failure\n\treturn false, err\n}\n\nfunc resourcednsrrCreate(d *schema.ResourceData, meta interface{}) error {\n\ts := meta.(*SOLIDserver)\n\n\t\/\/ Building parameters\n\tparameters := url.Values{}\n\tparameters.Add(\"add_flag\", \"new_only\")\n\tparameters.Add(\"dns_name\", d.Get(\"dnsserver\").(string))\n\tparameters.Add(\"rr_name\", d.Get(\"name\").(string))\n\tparameters.Add(\"rr_type\", strings.ToUpper(d.Get(\"type\").(string)))\n\tparameters.Add(\"value1\", d.Get(\"value\").(string))\n\tparameters.Add(\"rr_ttl\", strconv.Itoa(d.Get(\"ttl\").(int)))\n\n\t\/\/ Add dnsview parameter if it is supplied\n\tif len(d.Get(\"dnsview\").(string)) != 0 {\n\t\tparameters.Add(\"dnsview_name\", strings.ToLower(d.Get(\"dnsview\").(string)))\n\t}\n\n\t\/\/ Sending the creation request\n\tresp, body, err := s.Request(\"post\", \"rest\/dns_rr_add\", ¶meters)\n\n\tif err == nil {\n\t\tvar buf [](map[string]interface{})\n\t\tjson.Unmarshal([]byte(body), &buf)\n\n\t\t\/\/ Checking the answer\n\t\tif (resp.StatusCode == 200 || resp.StatusCode == 201) && len(buf) > 0 {\n\t\t\tif oid, oidExist := buf[0][\"ret_oid\"].(string); oidExist {\n\t\t\t\tlog.Printf(\"[DEBUG] SOLIDServer - Created RR (oid): %s\\n\", oid)\n\t\t\t\td.SetId(oid)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Reporting a failure\n\t\tif len(buf) > 0 {\n\t\t\tif errMsg, errExist := buf[0][\"errmsg\"].(string); errExist {\n\t\t\t\treturn fmt.Errorf(\"SOLIDServer - Unable to create RR: %s (%s)\", d.Get(\"name\").(string), errMsg)\n\t\t\t}\n\t\t}\n\n\t\treturn fmt.Errorf(\"SOLIDServer - Unable to create RR: %s\\n\", d.Get(\"name\").(string))\n\t}\n\n\t\/\/ Reporting a failure\n\treturn err\n}\n\nfunc resourcednsrrUpdate(d *schema.ResourceData, meta interface{}) error {\n\ts := meta.(*SOLIDserver)\n\n\t\/\/ Building parameters\n\tparameters := url.Values{}\n\tparameters.Add(\"rr_id\", d.Id())\n\tparameters.Add(\"add_flag\", \"edit_only\")\n\tparameters.Add(\"dns_name\", d.Get(\"dnsserver\").(string))\n\tparameters.Add(\"rr_name\", d.Get(\"name\").(string))\n\tparameters.Add(\"rr_type\", strings.ToUpper(d.Get(\"type\").(string)))\n\tparameters.Add(\"value1\", d.Get(\"value\").(string))\n\tparameters.Add(\"rr_ttl\", strconv.Itoa(d.Get(\"ttl\").(int)))\n\n\t\/\/ Add dnsview parameter if it is supplied\n\tif len(d.Get(\"dnsview\").(string)) != 0 {\n\t\tparameters.Add(\"dnsview_name\", strings.ToLower(d.Get(\"dnsview\").(string)))\n\t}\n\n\t\/\/ Sending the update request\n\tresp, body, err := s.Request(\"put\", \"rest\/dns_rr_add\", ¶meters)\n\n\tif err == nil {\n\t\tvar buf [](map[string]interface{})\n\t\tjson.Unmarshal([]byte(body), &buf)\n\n\t\t\/\/ Checking the answer\n\t\tif (resp.StatusCode == 200 || resp.StatusCode == 201) && len(buf) > 0 {\n\t\t\tif oid, oidExist := buf[0][\"ret_oid\"].(string); oidExist {\n\t\t\t\tlog.Printf(\"[DEBUG] SOLIDServer - Updated RR (oid): %s\\n\", oid)\n\t\t\t\td.SetId(oid)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Reporting a failure\n\t\tif len(buf) > 0 {\n\t\t\tif errMsg, errExist := buf[0][\"errmsg\"].(string); errExist {\n\t\t\t\treturn fmt.Errorf(\"SOLIDServer - Unable to update RR: %s (%s)\", d.Get(\"name\").(string), errMsg)\n\t\t\t}\n\t\t}\n\n\t\treturn fmt.Errorf(\"SOLIDServer - Unable to update RR: %s\\n\", d.Get(\"name\").(string))\n\t}\n\n\t\/\/ Reporting a failure\n\treturn err\n}\n\nfunc resourcednsrrDelete(d *schema.ResourceData, meta interface{}) error {\n\ts := meta.(*SOLIDserver)\n\n\t\/\/ Building parameters\n\tparameters := url.Values{}\n\tparameters.Add(\"rr_id\", d.Id())\n\n\t\/\/ Add dnsview parameter if it is supplied\n\tif len(d.Get(\"dnsview\").(string)) != 0 {\n\t\tparameters.Add(\"dnsview_name\", strings.ToLower(d.Get(\"dnsview\").(string)))\n\t}\n\n\t\/\/ Sending the deletion request\n\tresp, body, err := s.Request(\"delete\", \"rest\/dns_rr_delete\", ¶meters)\n\n\tif err == nil {\n\t\tvar buf [](map[string]interface{})\n\t\tjson.Unmarshal([]byte(body), &buf)\n\n\t\t\/\/ Checking the answer\n\t\tif resp.StatusCode != 200 && resp.StatusCode != 204 {\n\t\t\t\/\/ Reporting a failure\n\t\t\tif len(buf) > 0 {\n\t\t\t\tif errMsg, errExist := buf[0][\"errmsg\"].(string); errExist {\n\t\t\t\t\treturn fmt.Errorf(\"SOLIDServer - Unable to delete RR: %s (%s)\", d.Get(\"name\").(string), errMsg)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn fmt.Errorf(\"SOLIDServer - Unable to delete RR: %s\", d.Get(\"name\").(string))\n\t\t}\n\n\t\t\/\/ Log deletion\n\t\tlog.Printf(\"[DEBUG] SOLIDServer - Deleted RR (oid): %s\\n\", d.Id())\n\n\t\t\/\/ Unset local ID\n\t\td.SetId(\"\")\n\n\t\t\/\/ Reporting a success\n\t\treturn nil\n\t}\n\n\t\/\/ Reporting a failure\n\treturn err\n}\n\nfunc resourcednsrrRead(d *schema.ResourceData, meta interface{}) error {\n\ts := meta.(*SOLIDserver)\n\n\t\/\/ Building parameters\n\tparameters := url.Values{}\n\tparameters.Add(\"rr_id\", d.Id())\n\n\t\/\/ Sending the read request\n\tresp, body, err := s.Request(\"get\", \"rest\/dns_rr_info\", ¶meters)\n\n\tif err == nil {\n\t\tvar buf [](map[string]interface{})\n\t\tjson.Unmarshal([]byte(body), &buf)\n\n\t\t\/\/ Checking the answer\n\t\tif resp.StatusCode == 200 && len(buf) > 0 {\n\t\t\tttl, _ := strconv.Atoi(buf[0][\"ttl\"].(string))\n\n\t\t\td.Set(\"dnsserver\", buf[0][\"dns_name\"].(string))\n\t\t\td.Set(\"name\", buf[0][\"rr_full_name\"].(string))\n\t\t\td.Set(\"type\", buf[0][\"rr_type\"].(string))\n\t\t\td.Set(\"value\", buf[0][\"value1\"].(string))\n\t\t\td.Set(\"ttl\", ttl)\n\n\t\t\tif buf[0][\"dnsview_name\"].(string) != \"#\" {\n\t\t\t\td.Set(\"dnsview\", buf[0][\"dnsview_name\"].(string))\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\n\t\tif len(buf) > 0 {\n\t\t\tif errMsg, errExist := buf[0][\"errmsg\"].(string); errExist {\n\t\t\t\t\/\/ Log the error\n\t\t\t\tlog.Printf(\"[DEBUG] SOLIDServer - Unable to find RR: %s (%s)\\n\", d.Get(\"name\"), errMsg)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Log the error\n\t\t\tlog.Printf(\"[DEBUG] SOLIDServer - Unable to find RR (oid): %s\\n\", d.Id())\n\t\t}\n\n\t\t\/\/ Do not unset the local ID to avoid inconsistency\n\n\t\t\/\/ Reporting a failure\n\t\treturn fmt.Errorf(\"SOLIDServer - Unable to find RR: %s\\n\", d.Get(\"name\").(string))\n\t}\n\n\t\/\/ Reporting a failure\n\treturn err\n}\n\nfunc resourcednsrrImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {\n\ts := meta.(*SOLIDserver)\n\n\t\/\/ Building parameters\n\tparameters := url.Values{}\n\tparameters.Add(\"rr_id\", d.Id())\n\n\t\/\/ Sending the read request\n\tresp, body, err := s.Request(\"get\", \"rest\/dns_rr_info\", ¶meters)\n\n\tif err == nil {\n\t\tvar buf [](map[string]interface{})\n\t\tjson.Unmarshal([]byte(body), &buf)\n\n\t\t\/\/ Checking the answer\n\t\tif resp.StatusCode == 200 && len(buf) > 0 {\n\t\t\tttl, _ := strconv.Atoi(buf[0][\"ttl\"].(string))\n\n\t\t\td.Set(\"dnsserver\", buf[0][\"dns_name\"].(string))\n\t\t\td.Set(\"name\", buf[0][\"rr_full_name\"].(string))\n\t\t\td.Set(\"type\", buf[0][\"rr_type\"].(string))\n\t\t\td.Set(\"value\", buf[0][\"value1\"].(string))\n\t\t\td.Set(\"ttl\", ttl)\n\n\t\t\tif buf[0][\"dnsview_name\"].(string) != \"#\" {\n\t\t\t\td.Set(\"dnsview\", buf[0][\"dnsview_name\"].(string))\n\t\t\t}\n\n\t\t\treturn []*schema.ResourceData{d}, nil\n\t\t}\n\n\t\tif len(buf) > 0 {\n\t\t\tif errMsg, errExist := buf[0][\"errmsg\"].(string); errExist {\n\t\t\t\t\/\/ Log the error\n\t\t\t\tlog.Printf(\"[DEBUG] SOLIDServer - Unable to import RR (oid): %s (%s)\\n\", d.Id(), errMsg)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Log the error\n\t\t\tlog.Printf(\"[DEBUG] SOLIDServer - Unable to find and import RR (oid): %s\\n\", d.Id())\n\t\t}\n\n\t\t\/\/ Reporting a failure\n\t\treturn nil, fmt.Errorf(\"SOLIDServer - Unable to find and import RR (oid): %s\\n\", d.Id())\n\t}\n\n\t\/\/ Reporting a failure\n\treturn nil, err\n}\n<commit_msg>DNS RR's property dnsview should require delete\/create operation on change<commit_after>package solidserver\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"log\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc resourcednsrr() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourcednsrrCreate,\n\t\tRead: resourcednsrrRead,\n\t\tUpdate: resourcednsrrUpdate,\n\t\tDelete: resourcednsrrDelete,\n\t\tExists: resourcednsrrExists,\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: resourcednsrrImportState,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"dnsserver\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tDescription: \"The managed SMART DNS server name, or DNS server name hosting the RR's zone.\",\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"dnsview\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tDescription: \"The View name of the RR to create.\",\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tDefault: \"\",\n\t\t\t},\n\t\t\t\"name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tDescription: \"The Fully Qualified Domain Name of the RR to create.\",\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"type\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tDescription: \"The type of the RR to create (Supported: A, AAAA, CNAME, TXT).\",\n\t\t\t\tValidateFunc: resourcednsrrvalidatetype,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"value\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tDescription: \"The value od the RR to create.\",\n\t\t\t\tComputed: false,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"ttl\": {\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tDescription: \"The DNS Time To Live of the RR to create.\",\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: 3600,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourcednsrrvalidatetype(v interface{}, _ string) ([]string, []error) {\n\tswitch strings.ToUpper(v.(string)) {\n\tcase \"A\":\n\t\treturn nil, nil\n\tcase \"AAAA\":\n\t\treturn nil, nil\n\tcase \"PTR\":\n\t\treturn nil, nil\n\tcase \"CNAME\":\n\t\treturn nil, nil\n\tcase \"DNAME\":\n\t\treturn nil, nil\n\tcase \"TXT\":\n\t\treturn nil, nil\n\tcase \"NS\":\n\t\treturn nil, nil\n\tdefault:\n\t\treturn nil, []error{fmt.Errorf(\"Unsupported RR type.\")}\n\t}\n}\n\nfunc resourcednsrrExists(d *schema.ResourceData, meta interface{}) (bool, error) {\n\ts := meta.(*SOLIDserver)\n\n\t\/\/ Building parameters\n\tparameters := url.Values{}\n\tparameters.Add(\"rr_id\", d.Id())\n\n\tlog.Printf(\"[DEBUG] Checking existence of RR (oid): %s\\n\", d.Id())\n\n\t\/\/ Sending the read request\n\tresp, body, err := s.Request(\"get\", \"rest\/dns_rr_info\", ¶meters)\n\n\tif err == nil {\n\t\tvar buf [](map[string]interface{})\n\t\tjson.Unmarshal([]byte(body), &buf)\n\n\t\t\/\/ Checking the answer\n\t\tif (resp.StatusCode == 200 || resp.StatusCode == 201) && len(buf) > 0 {\n\t\t\treturn true, nil\n\t\t}\n\n\t\tif len(buf) > 0 {\n\t\t\tif errMsg, errExist := buf[0][\"errmsg\"].(string); errExist {\n\t\t\t\t\/\/ Log the error\n\t\t\t\tlog.Printf(\"[DEBUG] SOLIDServer - Unable to find RR (oid): %s (%s)\\n\", d.Id(), errMsg)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Log the error\n\t\t\tlog.Printf(\"[DEBUG] SOLIDServer - Unable to find RR (oid): %s\\n\", d.Id())\n\t\t}\n\n\t\t\/\/ Unset local ID\n\t\td.SetId(\"\")\n\t}\n\n\t\/\/ Reporting a failure\n\treturn false, err\n}\n\nfunc resourcednsrrCreate(d *schema.ResourceData, meta interface{}) error {\n\ts := meta.(*SOLIDserver)\n\n\t\/\/ Building parameters\n\tparameters := url.Values{}\n\tparameters.Add(\"add_flag\", \"new_only\")\n\tparameters.Add(\"dns_name\", d.Get(\"dnsserver\").(string))\n\tparameters.Add(\"rr_name\", d.Get(\"name\").(string))\n\tparameters.Add(\"rr_type\", strings.ToUpper(d.Get(\"type\").(string)))\n\tparameters.Add(\"value1\", d.Get(\"value\").(string))\n\tparameters.Add(\"rr_ttl\", strconv.Itoa(d.Get(\"ttl\").(int)))\n\n\t\/\/ Add dnsview parameter if it is supplied\n\tif len(d.Get(\"dnsview\").(string)) != 0 {\n\t\tparameters.Add(\"dnsview_name\", strings.ToLower(d.Get(\"dnsview\").(string)))\n\t}\n\n\t\/\/ Sending the creation request\n\tresp, body, err := s.Request(\"post\", \"rest\/dns_rr_add\", ¶meters)\n\n\tif err == nil {\n\t\tvar buf [](map[string]interface{})\n\t\tjson.Unmarshal([]byte(body), &buf)\n\n\t\t\/\/ Checking the answer\n\t\tif (resp.StatusCode == 200 || resp.StatusCode == 201) && len(buf) > 0 {\n\t\t\tif oid, oidExist := buf[0][\"ret_oid\"].(string); oidExist {\n\t\t\t\tlog.Printf(\"[DEBUG] SOLIDServer - Created RR (oid): %s\\n\", oid)\n\t\t\t\td.SetId(oid)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Reporting a failure\n\t\tif len(buf) > 0 {\n\t\t\tif errMsg, errExist := buf[0][\"errmsg\"].(string); errExist {\n\t\t\t\treturn fmt.Errorf(\"SOLIDServer - Unable to create RR: %s (%s)\", d.Get(\"name\").(string), errMsg)\n\t\t\t}\n\t\t}\n\n\t\treturn fmt.Errorf(\"SOLIDServer - Unable to create RR: %s\\n\", d.Get(\"name\").(string))\n\t}\n\n\t\/\/ Reporting a failure\n\treturn err\n}\n\nfunc resourcednsrrUpdate(d *schema.ResourceData, meta interface{}) error {\n\ts := meta.(*SOLIDserver)\n\n\t\/\/ Building parameters\n\tparameters := url.Values{}\n\tparameters.Add(\"rr_id\", d.Id())\n\tparameters.Add(\"add_flag\", \"edit_only\")\n\tparameters.Add(\"dns_name\", d.Get(\"dnsserver\").(string))\n\tparameters.Add(\"rr_name\", d.Get(\"name\").(string))\n\tparameters.Add(\"rr_type\", strings.ToUpper(d.Get(\"type\").(string)))\n\tparameters.Add(\"value1\", d.Get(\"value\").(string))\n\tparameters.Add(\"rr_ttl\", strconv.Itoa(d.Get(\"ttl\").(int)))\n\n\t\/\/ Add dnsview parameter if it is supplied\n\tif len(d.Get(\"dnsview\").(string)) != 0 {\n\t\tparameters.Add(\"dnsview_name\", strings.ToLower(d.Get(\"dnsview\").(string)))\n\t}\n\n\t\/\/ Sending the update request\n\tresp, body, err := s.Request(\"put\", \"rest\/dns_rr_add\", ¶meters)\n\n\tif err == nil {\n\t\tvar buf [](map[string]interface{})\n\t\tjson.Unmarshal([]byte(body), &buf)\n\n\t\t\/\/ Checking the answer\n\t\tif (resp.StatusCode == 200 || resp.StatusCode == 201) && len(buf) > 0 {\n\t\t\tif oid, oidExist := buf[0][\"ret_oid\"].(string); oidExist {\n\t\t\t\tlog.Printf(\"[DEBUG] SOLIDServer - Updated RR (oid): %s\\n\", oid)\n\t\t\t\td.SetId(oid)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Reporting a failure\n\t\tif len(buf) > 0 {\n\t\t\tif errMsg, errExist := buf[0][\"errmsg\"].(string); errExist {\n\t\t\t\treturn fmt.Errorf(\"SOLIDServer - Unable to update RR: %s (%s)\", d.Get(\"name\").(string), errMsg)\n\t\t\t}\n\t\t}\n\n\t\treturn fmt.Errorf(\"SOLIDServer - Unable to update RR: %s\\n\", d.Get(\"name\").(string))\n\t}\n\n\t\/\/ Reporting a failure\n\treturn err\n}\n\nfunc resourcednsrrDelete(d *schema.ResourceData, meta interface{}) error {\n\ts := meta.(*SOLIDserver)\n\n\t\/\/ Building parameters\n\tparameters := url.Values{}\n\tparameters.Add(\"rr_id\", d.Id())\n\n\t\/\/ Add dnsview parameter if it is supplied\n\tif len(d.Get(\"dnsview\").(string)) != 0 {\n\t\tparameters.Add(\"dnsview_name\", strings.ToLower(d.Get(\"dnsview\").(string)))\n\t}\n\n\t\/\/ Sending the deletion request\n\tresp, body, err := s.Request(\"delete\", \"rest\/dns_rr_delete\", ¶meters)\n\n\tif err == nil {\n\t\tvar buf [](map[string]interface{})\n\t\tjson.Unmarshal([]byte(body), &buf)\n\n\t\t\/\/ Checking the answer\n\t\tif resp.StatusCode != 200 && resp.StatusCode != 204 {\n\t\t\t\/\/ Reporting a failure\n\t\t\tif len(buf) > 0 {\n\t\t\t\tif errMsg, errExist := buf[0][\"errmsg\"].(string); errExist {\n\t\t\t\t\treturn fmt.Errorf(\"SOLIDServer - Unable to delete RR: %s (%s)\", d.Get(\"name\").(string), errMsg)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn fmt.Errorf(\"SOLIDServer - Unable to delete RR: %s\", d.Get(\"name\").(string))\n\t\t}\n\n\t\t\/\/ Log deletion\n\t\tlog.Printf(\"[DEBUG] SOLIDServer - Deleted RR (oid): %s\\n\", d.Id())\n\n\t\t\/\/ Unset local ID\n\t\td.SetId(\"\")\n\n\t\t\/\/ Reporting a success\n\t\treturn nil\n\t}\n\n\t\/\/ Reporting a failure\n\treturn err\n}\n\nfunc resourcednsrrRead(d *schema.ResourceData, meta interface{}) error {\n\ts := meta.(*SOLIDserver)\n\n\t\/\/ Building parameters\n\tparameters := url.Values{}\n\tparameters.Add(\"rr_id\", d.Id())\n\n\t\/\/ Sending the read request\n\tresp, body, err := s.Request(\"get\", \"rest\/dns_rr_info\", ¶meters)\n\n\tif err == nil {\n\t\tvar buf [](map[string]interface{})\n\t\tjson.Unmarshal([]byte(body), &buf)\n\n\t\t\/\/ Checking the answer\n\t\tif resp.StatusCode == 200 && len(buf) > 0 {\n\t\t\tttl, _ := strconv.Atoi(buf[0][\"ttl\"].(string))\n\n\t\t\td.Set(\"dnsserver\", buf[0][\"dns_name\"].(string))\n\t\t\td.Set(\"name\", buf[0][\"rr_full_name\"].(string))\n\t\t\td.Set(\"type\", buf[0][\"rr_type\"].(string))\n\t\t\td.Set(\"value\", buf[0][\"value1\"].(string))\n\t\t\td.Set(\"ttl\", ttl)\n\n\t\t\tif buf[0][\"dnsview_name\"].(string) != \"#\" {\n\t\t\t\td.Set(\"dnsview\", buf[0][\"dnsview_name\"].(string))\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\n\t\tif len(buf) > 0 {\n\t\t\tif errMsg, errExist := buf[0][\"errmsg\"].(string); errExist {\n\t\t\t\t\/\/ Log the error\n\t\t\t\tlog.Printf(\"[DEBUG] SOLIDServer - Unable to find RR: %s (%s)\\n\", d.Get(\"name\"), errMsg)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Log the error\n\t\t\tlog.Printf(\"[DEBUG] SOLIDServer - Unable to find RR (oid): %s\\n\", d.Id())\n\t\t}\n\n\t\t\/\/ Do not unset the local ID to avoid inconsistency\n\n\t\t\/\/ Reporting a failure\n\t\treturn fmt.Errorf(\"SOLIDServer - Unable to find RR: %s\\n\", d.Get(\"name\").(string))\n\t}\n\n\t\/\/ Reporting a failure\n\treturn err\n}\n\nfunc resourcednsrrImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {\n\ts := meta.(*SOLIDserver)\n\n\t\/\/ Building parameters\n\tparameters := url.Values{}\n\tparameters.Add(\"rr_id\", d.Id())\n\n\t\/\/ Sending the read request\n\tresp, body, err := s.Request(\"get\", \"rest\/dns_rr_info\", ¶meters)\n\n\tif err == nil {\n\t\tvar buf [](map[string]interface{})\n\t\tjson.Unmarshal([]byte(body), &buf)\n\n\t\t\/\/ Checking the answer\n\t\tif resp.StatusCode == 200 && len(buf) > 0 {\n\t\t\tttl, _ := strconv.Atoi(buf[0][\"ttl\"].(string))\n\n\t\t\td.Set(\"dnsserver\", buf[0][\"dns_name\"].(string))\n\t\t\td.Set(\"name\", buf[0][\"rr_full_name\"].(string))\n\t\t\td.Set(\"type\", buf[0][\"rr_type\"].(string))\n\t\t\td.Set(\"value\", buf[0][\"value1\"].(string))\n\t\t\td.Set(\"ttl\", ttl)\n\n\t\t\tif buf[0][\"dnsview_name\"].(string) != \"#\" {\n\t\t\t\td.Set(\"dnsview\", buf[0][\"dnsview_name\"].(string))\n\t\t\t}\n\n\t\t\treturn []*schema.ResourceData{d}, nil\n\t\t}\n\n\t\tif len(buf) > 0 {\n\t\t\tif errMsg, errExist := buf[0][\"errmsg\"].(string); errExist {\n\t\t\t\t\/\/ Log the error\n\t\t\t\tlog.Printf(\"[DEBUG] SOLIDServer - Unable to import RR (oid): %s (%s)\\n\", d.Id(), errMsg)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Log the error\n\t\t\tlog.Printf(\"[DEBUG] SOLIDServer - Unable to find and import RR (oid): %s\\n\", d.Id())\n\t\t}\n\n\t\t\/\/ Reporting a failure\n\t\treturn nil, fmt.Errorf(\"SOLIDServer - Unable to find and import RR (oid): %s\\n\", d.Id())\n\t}\n\n\t\/\/ Reporting a failure\n\treturn nil, err\n}\n<|endoftext|>"} {"text":"<commit_before>package keystone\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\ntype ServiceCatalog struct {\n\tEndpoints []map[string]string\n\tType string\n\tName string\n}\n\ntype Client struct {\n\tToken string\n\tCatalogs []ServiceCatalog\n\tauthUrl string\n}\n\ntype Tenant struct {\n\tId string\n\tName string\n\tDescription string\n}\n\ntype User struct {\n\tId string\n\tName string\n\tEmail string\n}\n\ntype Ec2 struct {\n\tAccess string\n\tSecret string\n}\n\nfunc NewClient(username, password, tenantName, authUrl string) (*Client, error) {\n\tb := bytes.NewBufferString(fmt.Sprintf(`{\"auth\": {\"passwordCredentials\": {\"username\": \"%s\", \"password\":\"%s\"}, \"tenantName\": \"%s\"}}`, username, password, tenantName))\n\tresponse, err := http.Post(authUrl+\"\/tokens\", \"application\/json\", b)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer response.Body.Close()\n\tresult, _ := ioutil.ReadAll(response.Body)\n\tvar data map[string]map[string]interface{}\n\terr = json.Unmarshal(result, &data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif response.StatusCode > 399 {\n\t\treturn nil, errors.New(data[\"error\"][\"title\"].(string))\n\t}\n\ttoken := data[\"access\"][\"token\"].(map[string]interface{})[\"id\"].(string)\n\tclient := Client{Token: token, authUrl: authUrl}\n\tcatalogs := data[\"access\"][\"serviceCatalog\"].([]interface{})\n\tfor _, c := range catalogs {\n\t\tcatalog := c.(map[string]interface{})\n\t\tserviceCatalog := ServiceCatalog{\n\t\t\tName: catalog[\"name\"].(string),\n\t\t\tType: catalog[\"type\"].(string),\n\t\t}\n\t\tfor _, e := range catalog[\"endpoints\"].([]interface{}) {\n\t\t\tendpoint := map[string]string{}\n\t\t\tfor k, v := range e.(map[string]interface{}) {\n\t\t\t\tendpoint[k] = v.(string)\n\t\t\t}\n\t\t\tserviceCatalog.Endpoints = append(serviceCatalog.Endpoints, endpoint)\n\t\t}\n\t\tclient.Catalogs = append(client.Catalogs, serviceCatalog)\n\t}\n\treturn &client, nil\n}\n\nfunc (c *Client) Endpoint(service, which string) string {\n\tvar (\n\t\tendpoint string\n\t\tcatalog ServiceCatalog\n\t)\n\tfor _, catalog = range c.Catalogs {\n\t\tif catalog.Type == service {\n\t\t\tbreak\n\t\t}\n\t}\n\tif catalog.Type == service {\n\t\tif !strings.Contains(which, \"URL\") {\n\t\t\twhich += \"URL\"\n\t\t}\n\t\tendpoint = catalog.Endpoints[0][which] \/\/ TODO(fsouza): choose region\n\t}\n\treturn endpoint\n}\n\nfunc (c *Client) do(method, urlStr string, body io.Reader) (*http.Response, error) {\n\trequest, _ := http.NewRequest(method, urlStr, body)\n\trequest.Header.Set(\"X-Auth-Token\", c.Token)\n\tif body != nil {\n\t\trequest.Header.Set(\"Content-Type\", \"application\/json\")\n\t}\n\thttpClient := &http.Client{}\n\treturn httpClient.Do(request)\n}\n\nfunc (c *Client) NewTenant(name, description string, enabled bool) (*Tenant, error) {\n\tb := bytes.NewBufferString(fmt.Sprintf(`{\"tenant\": {\"name\": \"%s\", \"description\": \"%s\", \"enabled\": %t}}`, name, description, enabled))\n\tresponse, _ := c.do(\"POST\", c.authUrl+\"\/tenants\", b)\n\tdefer response.Body.Close()\n\tresult, _ := ioutil.ReadAll(response.Body)\n\tvar data map[string]map[string]interface{}\n\t_ = json.Unmarshal(result, &data)\n\ttenant := Tenant{\n\t\tId: data[\"tenant\"][\"id\"].(string),\n\t\tName: data[\"tenant\"][\"name\"].(string),\n\t\tDescription: data[\"tenant\"][\"description\"].(string),\n\t}\n\treturn &tenant, nil\n}\n\nfunc (c *Client) NewUser(name, password, email, tenantId, roleId string, enabled bool) (*User, error) {\n\tb := bytes.NewBufferString(fmt.Sprintf(`{\"user\": {\"name\": \"%s\", \"password\": \"%s\", \"tenantId\": \"%s\", \"email\": \"%s\", \"enabled\": %t}}`, name, password, tenantId, email, enabled))\n\tresponse, err := c.do(\"POST\", c.authUrl+\"\/users\", b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer response.Body.Close()\n\tresult, _ := ioutil.ReadAll(response.Body)\n\tvar data map[string]map[string]interface{}\n\t_ = json.Unmarshal(result, &data)\n\tuser := User{\n\t\tId: data[\"user\"][\"id\"].(string),\n\t\tName: data[\"user\"][\"name\"].(string),\n\t\tEmail: data[\"user\"][\"email\"].(string),\n\t}\n\t\/\/ http:\/\/nova.nimbus.dev.globoi.com:35357\/v2.0\/tenants\/d34f63dc72904510a5b3dd25bee6ceaf\/users\/ce038d17773a4e398df562aa121b7923\/roles\/OS-KSADM\/b07bef22c3264c2f8a4c434eee912b4c\n\tresponse, err = c.do(\"PUT\", c.authUrl+\"\/tenants\/\"+tenantId+\"\/users\/\"+user.Id+\"\/roles\/OS-KSADM\/\"+roleId, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &user, nil\n}\n\nfunc (c *Client) NewEc2(userId, tenantId string) (*Ec2, error) {\n\tb := bytes.NewBufferString(fmt.Sprintf(`{\"tenant_id\": \"%s\"}`, tenantId))\n\tresponse, _ := c.do(\"POST\", c.authUrl+\"\/users\/\"+userId+\"\/credentials\/OS-EC2\", b)\n\tdefer response.Body.Close()\n\tresult, _ := ioutil.ReadAll(response.Body)\n\tvar data map[string]map[string]interface{}\n\t_ = json.Unmarshal(result, &data)\n\tec2 := Ec2{\n\t\tAccess: data[\"credential\"][\"access\"].(string),\n\t\tSecret: data[\"credential\"][\"secret\"].(string),\n\t}\n\treturn &ec2, nil\n}\n\nfunc (c *Client) RemoveEc2(userId, access string) error {\n\treturn c.delete(c.authUrl + \"\/users\/\" + userId + \"\/credentials\/OS-EC2\/\" + access)\n}\n\nfunc (c *Client) RemoveUser(userId, tenantId, roleId string) error {\n\t\/\/ FIXME(fsouza): deal with errors. Keystone keep returning malformed response.\n\tc.delete(c.authUrl + \"\/tenants\/\" + tenantId + \"\/users\/\" + userId + \"\/roles\/OS-KSADM\/\" + roleId)\n\treturn c.delete(c.authUrl + \"\/users\/\" + userId)\n}\n\nfunc (c *Client) RemoveTenant(tenantId string) error {\n\t\/\/ FIXME(fsouza): deal with errors. Keystone keep returning malformed response.\n\tc.delete(c.authUrl + \"\/tenants\/\" + tenantId)\n\treturn nil\n}\n\nfunc (c *Client) delete(url string) error {\n\tif resp, err := c.do(\"DELETE\", url, nil); err != nil {\n\t\treturn err\n\t} else if resp.StatusCode > 299 {\n\t\treturn errorFromResponse(resp)\n\t}\n\treturn nil\n}\n\nfunc errorFromResponse(response *http.Response) error {\n\tdefer response.Body.Close()\n\tb, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn errors.New(string(b))\n}\n<commit_msg>keystone: removed comment<commit_after>package keystone\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\ntype ServiceCatalog struct {\n\tEndpoints []map[string]string\n\tType string\n\tName string\n}\n\ntype Client struct {\n\tToken string\n\tCatalogs []ServiceCatalog\n\tauthUrl string\n}\n\ntype Tenant struct {\n\tId string\n\tName string\n\tDescription string\n}\n\ntype User struct {\n\tId string\n\tName string\n\tEmail string\n}\n\ntype Ec2 struct {\n\tAccess string\n\tSecret string\n}\n\nfunc NewClient(username, password, tenantName, authUrl string) (*Client, error) {\n\tb := bytes.NewBufferString(fmt.Sprintf(`{\"auth\": {\"passwordCredentials\": {\"username\": \"%s\", \"password\":\"%s\"}, \"tenantName\": \"%s\"}}`, username, password, tenantName))\n\tresponse, err := http.Post(authUrl+\"\/tokens\", \"application\/json\", b)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer response.Body.Close()\n\tresult, _ := ioutil.ReadAll(response.Body)\n\tvar data map[string]map[string]interface{}\n\terr = json.Unmarshal(result, &data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif response.StatusCode > 399 {\n\t\treturn nil, errors.New(data[\"error\"][\"title\"].(string))\n\t}\n\ttoken := data[\"access\"][\"token\"].(map[string]interface{})[\"id\"].(string)\n\tclient := Client{Token: token, authUrl: authUrl}\n\tcatalogs := data[\"access\"][\"serviceCatalog\"].([]interface{})\n\tfor _, c := range catalogs {\n\t\tcatalog := c.(map[string]interface{})\n\t\tserviceCatalog := ServiceCatalog{\n\t\t\tName: catalog[\"name\"].(string),\n\t\t\tType: catalog[\"type\"].(string),\n\t\t}\n\t\tfor _, e := range catalog[\"endpoints\"].([]interface{}) {\n\t\t\tendpoint := map[string]string{}\n\t\t\tfor k, v := range e.(map[string]interface{}) {\n\t\t\t\tendpoint[k] = v.(string)\n\t\t\t}\n\t\t\tserviceCatalog.Endpoints = append(serviceCatalog.Endpoints, endpoint)\n\t\t}\n\t\tclient.Catalogs = append(client.Catalogs, serviceCatalog)\n\t}\n\treturn &client, nil\n}\n\nfunc (c *Client) Endpoint(service, which string) string {\n\tvar (\n\t\tendpoint string\n\t\tcatalog ServiceCatalog\n\t)\n\tfor _, catalog = range c.Catalogs {\n\t\tif catalog.Type == service {\n\t\t\tbreak\n\t\t}\n\t}\n\tif catalog.Type == service {\n\t\tif !strings.Contains(which, \"URL\") {\n\t\t\twhich += \"URL\"\n\t\t}\n\t\tendpoint = catalog.Endpoints[0][which] \/\/ TODO(fsouza): choose region\n\t}\n\treturn endpoint\n}\n\nfunc (c *Client) do(method, urlStr string, body io.Reader) (*http.Response, error) {\n\trequest, _ := http.NewRequest(method, urlStr, body)\n\trequest.Header.Set(\"X-Auth-Token\", c.Token)\n\tif body != nil {\n\t\trequest.Header.Set(\"Content-Type\", \"application\/json\")\n\t}\n\thttpClient := &http.Client{}\n\treturn httpClient.Do(request)\n}\n\nfunc (c *Client) NewTenant(name, description string, enabled bool) (*Tenant, error) {\n\tb := bytes.NewBufferString(fmt.Sprintf(`{\"tenant\": {\"name\": \"%s\", \"description\": \"%s\", \"enabled\": %t}}`, name, description, enabled))\n\tresponse, _ := c.do(\"POST\", c.authUrl+\"\/tenants\", b)\n\tdefer response.Body.Close()\n\tresult, _ := ioutil.ReadAll(response.Body)\n\tvar data map[string]map[string]interface{}\n\t_ = json.Unmarshal(result, &data)\n\ttenant := Tenant{\n\t\tId: data[\"tenant\"][\"id\"].(string),\n\t\tName: data[\"tenant\"][\"name\"].(string),\n\t\tDescription: data[\"tenant\"][\"description\"].(string),\n\t}\n\treturn &tenant, nil\n}\n\nfunc (c *Client) NewUser(name, password, email, tenantId, roleId string, enabled bool) (*User, error) {\n\tb := bytes.NewBufferString(fmt.Sprintf(`{\"user\": {\"name\": \"%s\", \"password\": \"%s\", \"tenantId\": \"%s\", \"email\": \"%s\", \"enabled\": %t}}`, name, password, tenantId, email, enabled))\n\tresponse, err := c.do(\"POST\", c.authUrl+\"\/users\", b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer response.Body.Close()\n\tresult, _ := ioutil.ReadAll(response.Body)\n\tvar data map[string]map[string]interface{}\n\t_ = json.Unmarshal(result, &data)\n\tuser := User{\n\t\tId: data[\"user\"][\"id\"].(string),\n\t\tName: data[\"user\"][\"name\"].(string),\n\t\tEmail: data[\"user\"][\"email\"].(string),\n\t}\n\tresponse, err = c.do(\"PUT\", c.authUrl+\"\/tenants\/\"+tenantId+\"\/users\/\"+user.Id+\"\/roles\/OS-KSADM\/\"+roleId, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &user, nil\n}\n\nfunc (c *Client) NewEc2(userId, tenantId string) (*Ec2, error) {\n\tb := bytes.NewBufferString(fmt.Sprintf(`{\"tenant_id\": \"%s\"}`, tenantId))\n\tresponse, _ := c.do(\"POST\", c.authUrl+\"\/users\/\"+userId+\"\/credentials\/OS-EC2\", b)\n\tdefer response.Body.Close()\n\tresult, _ := ioutil.ReadAll(response.Body)\n\tvar data map[string]map[string]interface{}\n\t_ = json.Unmarshal(result, &data)\n\tec2 := Ec2{\n\t\tAccess: data[\"credential\"][\"access\"].(string),\n\t\tSecret: data[\"credential\"][\"secret\"].(string),\n\t}\n\treturn &ec2, nil\n}\n\nfunc (c *Client) RemoveEc2(userId, access string) error {\n\treturn c.delete(c.authUrl + \"\/users\/\" + userId + \"\/credentials\/OS-EC2\/\" + access)\n}\n\nfunc (c *Client) RemoveUser(userId, tenantId, roleId string) error {\n\t\/\/ FIXME(fsouza): deal with errors. Keystone keep returning malformed response.\n\tc.delete(c.authUrl + \"\/tenants\/\" + tenantId + \"\/users\/\" + userId + \"\/roles\/OS-KSADM\/\" + roleId)\n\treturn c.delete(c.authUrl + \"\/users\/\" + userId)\n}\n\nfunc (c *Client) RemoveTenant(tenantId string) error {\n\t\/\/ FIXME(fsouza): deal with errors. Keystone keep returning malformed response.\n\tc.delete(c.authUrl + \"\/tenants\/\" + tenantId)\n\treturn nil\n}\n\nfunc (c *Client) delete(url string) error {\n\tif resp, err := c.do(\"DELETE\", url, nil); err != nil {\n\t\treturn err\n\t} else if resp.StatusCode > 299 {\n\t\treturn errorFromResponse(resp)\n\t}\n\treturn nil\n}\n\nfunc errorFromResponse(response *http.Response) error {\n\tdefer response.Body.Close()\n\tb, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn errors.New(string(b))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Gogs Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gogs\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"time\"\n)\n\n\/\/ Permission represents a API permission.\ntype Permission struct {\n\tAdmin bool `json:\"admin\"`\n\tPush bool `json:\"push\"`\n\tPull bool `json:\"pull\"`\n}\n\n\/\/ Repository represents a API repository.\ntype Repository struct {\n\tID int64 `json:\"id\"`\n\tOwner *User `json:\"owner\"`\n\tName string `json:\"name\"`\n\tFullName string `json:\"full_name\"`\n\tDescription string `json:\"description\"`\n\tPrivate bool `json:\"private\"`\n\tFork bool `json:\"fork\"`\n\tParent *Repository `json:\"parent\"`\n\tEmpty bool `json:\"empty\"`\n\tMirror bool `json:\"mirror\"`\n\tSize int64 `json:\"size\"`\n\tHTMLURL string `json:\"html_url\"`\n\tSSHURL string `json:\"ssh_url\"`\n\tCloneURL string `json:\"clone_url\"`\n\tWebsite string `json:\"website\"`\n\tStars int `json:\"stars_count\"`\n\tForks int `json:\"forks_count\"`\n\tWatchers int `json:\"watchers_count\"`\n\tOpenIssues int `json:\"open_issues_count\"`\n\tDefaultBranch string `json:\"default_branch\"`\n\tCreated time.Time `json:\"created_at\"`\n\tUpdated time.Time `json:\"updated_at\"`\n\tPermissions *Permission `json:\"permissions,omitempty\"`\n}\n\n\/\/ ListMyRepos lists all repositories for the authenticated user that has access to.\nfunc (c *Client) ListMyRepos() ([]*Repository, error) {\n\trepos := make([]*Repository, 0, 10)\n\treturn repos, c.getParsedResponse(\"GET\", \"\/user\/repos\", nil, nil, &repos)\n}\n\nfunc (c *Client) ListUserRepos(user string) ([]*Repository, error) {\n\trepos := make([]*Repository, 0, 10)\n\treturn repos, c.getParsedResponse(\"GET\", fmt.Sprintf(\"\/users\/%s\/repos\", user), nil, nil, &repos)\n}\n\nfunc (c *Client) ListOrgRepos(org string) ([]*Repository, error) {\n\trepos := make([]*Repository, 0, 10)\n\treturn repos, c.getParsedResponse(\"GET\", fmt.Sprintf(\"\/orgs\/%s\/repos\", org), nil, nil, &repos)\n}\n\ntype CreateRepoOption struct {\n\tName string `json:\"name\" binding:\"Required;AlphaDashDot;MaxSize(100)\"`\n\tDescription string `json:\"description\" binding:\"MaxSize(255)\"`\n\tPrivate bool `json:\"private\"`\n\tAutoInit bool `json:\"auto_init\"`\n\tGitignores string `json:\"gitignores\"`\n\tLicense string `json:\"license\"`\n\tReadme string `json:\"readme\"`\n}\n\n\/\/ CreateRepo creates a repository for authenticated user.\nfunc (c *Client) CreateRepo(opt CreateRepoOption) (*Repository, error) {\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trepo := new(Repository)\n\treturn repo, c.getParsedResponse(\"POST\", \"\/user\/repos\", jsonHeader, bytes.NewReader(body), repo)\n}\n\n\/\/ CreateOrgRepo creates an organization repository for authenticated user.\nfunc (c *Client) CreateOrgRepo(org string, opt CreateRepoOption) (*Repository, error) {\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trepo := new(Repository)\n\treturn repo, c.getParsedResponse(\"POST\", fmt.Sprintf(\"\/org\/%s\/repos\", org), jsonHeader, bytes.NewReader(body), repo)\n}\n\n\/\/ GetRepo returns information of a repository of given owner.\nfunc (c *Client) GetRepo(owner, reponame string) (*Repository, error) {\n\trepo := new(Repository)\n\treturn repo, c.getParsedResponse(\"GET\", fmt.Sprintf(\"\/repos\/%s\/%s\", owner, reponame), nil, nil, repo)\n}\n\n\/\/ DeleteRepo deletes a repository of user or organization.\nfunc (c *Client) DeleteRepo(owner, repo string) error {\n\t_, err := c.getResponse(\"DELETE\", fmt.Sprintf(\"\/repos\/%s\/%s\", owner, repo), nil, nil)\n\treturn err\n}\n\ntype MigrateRepoOption struct {\n\tCloneAddr string `json:\"clone_addr\" binding:\"Required\"`\n\tAuthUsername string `json:\"auth_username\"`\n\tAuthPassword string `json:\"auth_password\"`\n\tUID int `json:\"uid\" binding:\"Required\"`\n\tRepoName string `json:\"repo_name\" binding:\"Required\"`\n\tMirror bool `json:\"mirror\"`\n\tPrivate bool `json:\"private\"`\n\tDescription string `json:\"description\"`\n}\n\n\/\/ MigrateRepo migrates a repository from other Git hosting sources for the\n\/\/ authenticated user.\n\/\/\n\/\/ To migrate a repository for a organization, the authenticated user must be a\n\/\/ owner of the specified organization.\nfunc (c *Client) MigrateRepo(opt MigrateRepoOption) (*Repository, error) {\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trepo := new(Repository)\n\treturn repo, c.getParsedResponse(\"POST\", \"\/repos\/migrate\", jsonHeader, bytes.NewReader(body), repo)\n}\n\ntype EditIssueTrackerOption struct {\n\tEnableIssues *bool `json:\"enable_issues\"`\n\tEnableExternalTracker *bool `json:\"enable_external_tracker\"`\n\tExternalTrackerURL *string `json:\"external_tracker_url\"`\n\tTrackerURLFormat *string `json:\"tracker_url_format\"`\n\tTrackerIssueStyle *string `json:\"tracker_issue_style\"`\n}\n\n\/\/ EditIssueTracker updates issue tracker options of the repository.\nfunc (c *Client) EditIssueTracker(owner, repo string, opt EditIssueTrackerOption) error {\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.getResponse(\"PATCH\", fmt.Sprintf(\"\/repos\/%s\/%s\/issue-tracker\", owner, repo), jsonHeader, bytes.NewReader(body))\n\treturn err\n}\n\nfunc (c *Client) MirrorSync(owner, repo string) error {\n\t_, err := c.getResponse(\"POST\", fmt.Sprintf(\"\/repos\/%s\/%s\/mirror-sync\", owner, repo), jsonHeader, nil)\n\treturn err\n}\n<commit_msg>repo: EditWiki implementation (#113)<commit_after>\/\/ Copyright 2014 The Gogs Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gogs\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"time\"\n)\n\n\/\/ Permission represents a API permission.\ntype Permission struct {\n\tAdmin bool `json:\"admin\"`\n\tPush bool `json:\"push\"`\n\tPull bool `json:\"pull\"`\n}\n\n\/\/ Repository represents a API repository.\ntype Repository struct {\n\tID int64 `json:\"id\"`\n\tOwner *User `json:\"owner\"`\n\tName string `json:\"name\"`\n\tFullName string `json:\"full_name\"`\n\tDescription string `json:\"description\"`\n\tPrivate bool `json:\"private\"`\n\tFork bool `json:\"fork\"`\n\tParent *Repository `json:\"parent\"`\n\tEmpty bool `json:\"empty\"`\n\tMirror bool `json:\"mirror\"`\n\tSize int64 `json:\"size\"`\n\tHTMLURL string `json:\"html_url\"`\n\tSSHURL string `json:\"ssh_url\"`\n\tCloneURL string `json:\"clone_url\"`\n\tWebsite string `json:\"website\"`\n\tStars int `json:\"stars_count\"`\n\tForks int `json:\"forks_count\"`\n\tWatchers int `json:\"watchers_count\"`\n\tOpenIssues int `json:\"open_issues_count\"`\n\tDefaultBranch string `json:\"default_branch\"`\n\tCreated time.Time `json:\"created_at\"`\n\tUpdated time.Time `json:\"updated_at\"`\n\tPermissions *Permission `json:\"permissions,omitempty\"`\n}\n\n\/\/ ListMyRepos lists all repositories for the authenticated user that has access to.\nfunc (c *Client) ListMyRepos() ([]*Repository, error) {\n\trepos := make([]*Repository, 0, 10)\n\treturn repos, c.getParsedResponse(\"GET\", \"\/user\/repos\", nil, nil, &repos)\n}\n\nfunc (c *Client) ListUserRepos(user string) ([]*Repository, error) {\n\trepos := make([]*Repository, 0, 10)\n\treturn repos, c.getParsedResponse(\"GET\", fmt.Sprintf(\"\/users\/%s\/repos\", user), nil, nil, &repos)\n}\n\nfunc (c *Client) ListOrgRepos(org string) ([]*Repository, error) {\n\trepos := make([]*Repository, 0, 10)\n\treturn repos, c.getParsedResponse(\"GET\", fmt.Sprintf(\"\/orgs\/%s\/repos\", org), nil, nil, &repos)\n}\n\ntype CreateRepoOption struct {\n\tName string `json:\"name\" binding:\"Required;AlphaDashDot;MaxSize(100)\"`\n\tDescription string `json:\"description\" binding:\"MaxSize(255)\"`\n\tPrivate bool `json:\"private\"`\n\tAutoInit bool `json:\"auto_init\"`\n\tGitignores string `json:\"gitignores\"`\n\tLicense string `json:\"license\"`\n\tReadme string `json:\"readme\"`\n}\n\n\/\/ CreateRepo creates a repository for authenticated user.\nfunc (c *Client) CreateRepo(opt CreateRepoOption) (*Repository, error) {\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trepo := new(Repository)\n\treturn repo, c.getParsedResponse(\"POST\", \"\/user\/repos\", jsonHeader, bytes.NewReader(body), repo)\n}\n\n\/\/ CreateOrgRepo creates an organization repository for authenticated user.\nfunc (c *Client) CreateOrgRepo(org string, opt CreateRepoOption) (*Repository, error) {\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trepo := new(Repository)\n\treturn repo, c.getParsedResponse(\"POST\", fmt.Sprintf(\"\/org\/%s\/repos\", org), jsonHeader, bytes.NewReader(body), repo)\n}\n\n\/\/ GetRepo returns information of a repository of given owner.\nfunc (c *Client) GetRepo(owner, reponame string) (*Repository, error) {\n\trepo := new(Repository)\n\treturn repo, c.getParsedResponse(\"GET\", fmt.Sprintf(\"\/repos\/%s\/%s\", owner, reponame), nil, nil, repo)\n}\n\n\/\/ DeleteRepo deletes a repository of user or organization.\nfunc (c *Client) DeleteRepo(owner, repo string) error {\n\t_, err := c.getResponse(\"DELETE\", fmt.Sprintf(\"\/repos\/%s\/%s\", owner, repo), nil, nil)\n\treturn err\n}\n\ntype MigrateRepoOption struct {\n\tCloneAddr string `json:\"clone_addr\" binding:\"Required\"`\n\tAuthUsername string `json:\"auth_username\"`\n\tAuthPassword string `json:\"auth_password\"`\n\tUID int `json:\"uid\" binding:\"Required\"`\n\tRepoName string `json:\"repo_name\" binding:\"Required\"`\n\tMirror bool `json:\"mirror\"`\n\tPrivate bool `json:\"private\"`\n\tDescription string `json:\"description\"`\n}\n\n\/\/ MigrateRepo migrates a repository from other Git hosting sources for the\n\/\/ authenticated user.\n\/\/\n\/\/ To migrate a repository for a organization, the authenticated user must be a\n\/\/ owner of the specified organization.\nfunc (c *Client) MigrateRepo(opt MigrateRepoOption) (*Repository, error) {\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trepo := new(Repository)\n\treturn repo, c.getParsedResponse(\"POST\", \"\/repos\/migrate\", jsonHeader, bytes.NewReader(body), repo)\n}\n\ntype EditIssueTrackerOption struct {\n\tEnableIssues *bool `json:\"enable_issues\"`\n\tEnableExternalTracker *bool `json:\"enable_external_tracker\"`\n\tExternalTrackerURL *string `json:\"external_tracker_url\"`\n\tTrackerURLFormat *string `json:\"tracker_url_format\"`\n\tTrackerIssueStyle *string `json:\"tracker_issue_style\"`\n}\n\n\/\/ EditIssueTracker updates issue tracker options of the repository.\nfunc (c *Client) EditIssueTracker(owner, repo string, opt EditIssueTrackerOption) error {\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.getResponse(\"PATCH\", fmt.Sprintf(\"\/repos\/%s\/%s\/issue-tracker\", owner, repo), jsonHeader, bytes.NewReader(body))\n\treturn err\n}\n\ntype EditWikiOption struct {\n\tEnableWiki *bool `json:\"enable_wiki\"`\n\tAllowPublicWiki *bool `json:\"allow_public_wiki\"`\n\tEnableExternalWiki *bool `json:\"enable_external_wiki\"`\n\tExternalWikiURL *string `json:\"external_wiki_url\"`\n}\n\n\/\/ EditWiki updates wiki options of the repository.\nfunc (c *Client) EditWiki(owner, repo string, opt EditWikiOption) error {\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.getResponse(\"PATCH\", fmt.Sprintf(\"\/repos\/%s\/%s\/wiki\", owner, repo), jsonHeader, bytes.NewReader(body))\n\treturn err\n}\n\nfunc (c *Client) MirrorSync(owner, repo string) error {\n\t_, err := c.getResponse(\"POST\", fmt.Sprintf(\"\/repos\/%s\/%s\/mirror-sync\", owner, repo), jsonHeader, nil)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package logprefix\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strconv\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/shirou\/gopsutil\/host\"\n\tschematypes \"github.com\/taskcluster\/go-schematypes\"\n\t\"github.com\/taskcluster\/taskcluster-worker\/plugins\"\n)\n\ntype provider struct {\n\tplugins.PluginProviderBase\n}\n\ntype plugin struct {\n\tplugins.PluginBase\n\tkeys map[string]string\n\tbootTime string\n\ttaskCount int64 \/\/ Count number of tasks processed\n}\n\nfunc init() {\n\tplugins.Register(\"logprefix\", provider{})\n}\n\nfunc (provider) ConfigSchema() schematypes.Schema {\n\treturn configSchema\n}\n\nfunc (provider) NewPlugin(options plugins.PluginOptions) (plugins.Plugin, error) {\n\tkeys := make(map[string]string)\n\tschematypes.MustValidateAndMap(configSchema, options.Config, &keys)\n\n\t\/\/ Print a neat message to make debugging config easier.\n\t\/\/ Presumably, config files inject stuff into this section using\n\t\/\/ transforms, so it's nice to have some log.\n\tfor k, v := range keys {\n\t\toptions.Monitor.Infof(\"Prefixing task logs: '%s' = '%s'\", k, v)\n\t}\n\n\t\/\/ Obtain the system boottime\n\tboottime, err := host.BootTime()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to determine the system boot time\")\n\t}\n\n\treturn &plugin{\n\t\tkeys: keys,\n\t\tbootTime: time.Unix(int64(boottime), 0).Format(time.RFC3339),\n\t}, nil\n}\n\nfunc (p *plugin) NewTaskPlugin(options plugins.TaskPluginOptions) (plugins.TaskPlugin, error) {\n\t\/\/ Increment number of tasks processed, get the count and subtract one as we\n\t\/\/ want to report zero for the first task\n\ttaskCount := atomic.AddInt64(&p.taskCount, 1) - 1\n\n\tkeys := map[string]string{\n\t\t\"TaskId\": options.TaskContext.TaskID,\n\t\t\"RunId\": strconv.Itoa(options.TaskContext.RunID),\n\t\t\"HostBootTime\": p.bootTime,\n\t\t\"TasksSinceBoot\": strconv.FormatInt(taskCount, 10),\n\t}\n\n\t\/\/ Construct list of all keys (so we can sort it)\n\tvar allKeys []string\n\tfor k := range keys {\n\t\tallKeys = append(allKeys, k)\n\t}\n\tfor k := range p.keys {\n\t\tif !stringContains(allKeys, k) {\n\t\t\tallKeys = append(allKeys, k)\n\t\t} else {\n\t\t\tdebug(\"overwriting: %s\", k)\n\t\t}\n\t}\n\t\/\/ Sort list of allKeys (to ensure consistency)\n\tsort.Strings(allKeys)\n\n\t\/\/ Print keys to task log\n\tfor _, k := range allKeys {\n\t\tv, ok := p.keys[k]\n\t\tif !ok {\n\t\t\tv = keys[k]\n\t\t}\n\n\t\toptions.TaskContext.Log(fmt.Sprintf(\"%s: %s\", k, v))\n\t}\n\n\t\/\/ Return a plugin that does nothing\n\treturn plugins.TaskPluginBase{}, nil\n}\n\nfunc stringContains(list []string, element string) bool {\n\tfor _, e := range list {\n\t\tif e == element {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<commit_msg>TasksSinceBoot -> TasksSinceStartup<commit_after>package logprefix\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strconv\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/shirou\/gopsutil\/host\"\n\tschematypes \"github.com\/taskcluster\/go-schematypes\"\n\t\"github.com\/taskcluster\/taskcluster-worker\/plugins\"\n)\n\ntype provider struct {\n\tplugins.PluginProviderBase\n}\n\ntype plugin struct {\n\tplugins.PluginBase\n\tkeys map[string]string\n\tbootTime string\n\ttaskCount int64 \/\/ Count number of tasks processed\n}\n\nfunc init() {\n\tplugins.Register(\"logprefix\", provider{})\n}\n\nfunc (provider) ConfigSchema() schematypes.Schema {\n\treturn configSchema\n}\n\nfunc (provider) NewPlugin(options plugins.PluginOptions) (plugins.Plugin, error) {\n\tkeys := make(map[string]string)\n\tschematypes.MustValidateAndMap(configSchema, options.Config, &keys)\n\n\t\/\/ Print a neat message to make debugging config easier.\n\t\/\/ Presumably, config files inject stuff into this section using\n\t\/\/ transforms, so it's nice to have some log.\n\tfor k, v := range keys {\n\t\toptions.Monitor.Infof(\"Prefixing task logs: '%s' = '%s'\", k, v)\n\t}\n\n\t\/\/ Obtain the system boottime\n\tboottime, err := host.BootTime()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to determine the system boot time\")\n\t}\n\n\treturn &plugin{\n\t\tkeys: keys,\n\t\tbootTime: time.Unix(int64(boottime), 0).Format(time.RFC3339),\n\t}, nil\n}\n\nfunc (p *plugin) NewTaskPlugin(options plugins.TaskPluginOptions) (plugins.TaskPlugin, error) {\n\t\/\/ Increment number of tasks processed, get the count and subtract one as we\n\t\/\/ want to report zero for the first task\n\ttaskCount := atomic.AddInt64(&p.taskCount, 1) - 1\n\n\tkeys := map[string]string{\n\t\t\"TaskId\": options.TaskContext.TaskID,\n\t\t\"RunId\": strconv.Itoa(options.TaskContext.RunID),\n\t\t\"HostBootTime\": p.bootTime,\n\t\t\"TasksSinceStartup\": strconv.FormatInt(taskCount, 10),\n\t}\n\n\t\/\/ Construct list of all keys (so we can sort it)\n\tvar allKeys []string\n\tfor k := range keys {\n\t\tallKeys = append(allKeys, k)\n\t}\n\tfor k := range p.keys {\n\t\tif !stringContains(allKeys, k) {\n\t\t\tallKeys = append(allKeys, k)\n\t\t} else {\n\t\t\tdebug(\"overwriting: %s\", k)\n\t\t}\n\t}\n\t\/\/ Sort list of allKeys (to ensure consistency)\n\tsort.Strings(allKeys)\n\n\t\/\/ Print keys to task log\n\tfor _, k := range allKeys {\n\t\tv, ok := p.keys[k]\n\t\tif !ok {\n\t\t\tv = keys[k]\n\t\t}\n\n\t\toptions.TaskContext.Log(fmt.Sprintf(\"%s: %s\", k, v))\n\t}\n\n\t\/\/ Return a plugin that does nothing\n\treturn plugins.TaskPluginBase{}, nil\n}\n\nfunc stringContains(list []string, element string) bool {\n\tfor _, e := range list {\n\t\tif e == element {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Gogs Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gogs\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n)\n\n\/\/ Permission represents a API permission.\ntype Permission struct {\n\tAdmin bool `json:\"admin\"`\n\tPush bool `json:\"push\"`\n\tPull bool `json:\"pull\"`\n}\n\n\/\/ Repository represents a API repository.\ntype Repository struct {\n\tId int64 `json:\"id\"`\n\tOwner User `json:\"owner\"`\n\tFullName string `json:\"full_name\"`\n\tPrivate bool `json:\"private\"`\n\tFork bool `json:\"fork\"`\n\tHtmlUrl string `json:\"html_url\"`\n\tCloneUrl string `json:\"clone_url\"`\n\tSshUrl string `json:\"ssh_url\"`\n\tPermissions Permission `json:\"permissions\"`\n}\n\n\/\/ ListMyRepos lists all repositories for the authenticated user that has access to.\nfunc (c *Client) ListMyRepos() ([]*Repository, error) {\n\trepos := make([]*Repository, 0, 10)\n\terr := c.getParsedResponse(\"GET\", \"\/user\/repos\", nil, nil, &repos)\n\treturn repos, err\n}\n\ntype CreateRepoOption struct {\n\tName string `json:\"name\" binding:\"Required\"`\n\tDescription string `json:\"description\" binding:\"MaxSize(255)\"`\n\tPrivate bool `json:\"private\"`\n\tAutoInit bool `json:\"auto_init\"`\n\tGitignores string `json:\"gitignores\"`\n\tLicense string `json:\"license\"`\n\tReadme string `json:\"readme\"`\n}\n\n\/\/ CreateRepo creates a repository for authenticated user.\nfunc (c *Client) CreateRepo(opt CreateRepoOption) (*Repository, error) {\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trepo := new(Repository)\n\treturn repo, c.getParsedResponse(\"POST\", \"\/user\/repos\",\n\t\thttp.Header{\"content-type\": []string{\"application\/json\"}}, bytes.NewReader(body), repo)\n}\n\n\/\/ CreateOrgRepo creates an organization repository for authenticated user.\nfunc (c *Client) CreateOrgRepo(org string, opt CreateRepoOption) (*Repository, error) {\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trepo := new(Repository)\n\treturn repo, c.getParsedResponse(\"POST\", fmt.Sprintf(\"\/org\/%s\/repos\", org),\n\t\thttp.Header{\"content-type\": []string{\"application\/json\"}}, bytes.NewReader(body), repo)\n}\n\n\/\/ GetRepo returns information of a repository of given owner.\nfunc (c *Client) GetRepo(owner, reponame string) (*Repository, error) {\n\trepo := new(Repository)\n\treturn repo, c.getParsedResponse(\"GET\", fmt.Sprintf(\"\/repos\/%s\/%s\", owner, reponame),\n\t\thttp.Header{\"content-type\": []string{\"application\/json\"}}, nil, repo)\n}\n\n\/\/ DeleteRepo deletes a repository of user or organization.\nfunc (c *Client) DeleteRepo(owner, repo string) error {\n\t_, err := c.getResponse(\"DELETE\", fmt.Sprintf(\"\/repos\/%s\/%s\", owner, repo), nil, nil)\n\treturn err\n}\n\ntype MigrateRepoOption struct {\n\tCloneAddr string `json:\"clone_addr\" binding:\"Required\"`\n\tAuthUsername string `json:\"auth_username\"`\n\tAuthPassword string `json:\"auth_password\"`\n\tUid int `json:\"uid\" binding:\"Required\"`\n\tRepoName string `json:\"repo_name\" binding:\"Required\"`\n\tMirror bool `json:\"mirror\"`\n\tPrivate bool `json:\"private\"`\n\tDescription string `json:\"description\"`\n}\n\n\/\/ MigrateRepo migrates a repository from other Git hosting sources for the\n\/\/ authenticated user.\n\/\/\n\/\/ To migrate a repository for a organization, the authenticated user must be a\n\/\/ owner of the specified organization.\nfunc (c *Client) MigrateRepo(opt MigrateRepoOption) (*Repository, error) {\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trepo := new(Repository)\n\treturn repo, c.getParsedResponse(\"POST\", \"\/repos\/migrate\",\n\t\thttp.Header{\"content-type\": []string{\"application\/json\"}}, bytes.NewReader(body), repo)\n}\n<commit_msg>Rename Uid -> UID<commit_after>\/\/ Copyright 2014 The Gogs Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gogs\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n)\n\n\/\/ Permission represents a API permission.\ntype Permission struct {\n\tAdmin bool `json:\"admin\"`\n\tPush bool `json:\"push\"`\n\tPull bool `json:\"pull\"`\n}\n\n\/\/ Repository represents a API repository.\ntype Repository struct {\n\tId int64 `json:\"id\"`\n\tOwner User `json:\"owner\"`\n\tFullName string `json:\"full_name\"`\n\tPrivate bool `json:\"private\"`\n\tFork bool `json:\"fork\"`\n\tHtmlUrl string `json:\"html_url\"`\n\tCloneUrl string `json:\"clone_url\"`\n\tSshUrl string `json:\"ssh_url\"`\n\tPermissions Permission `json:\"permissions\"`\n}\n\n\/\/ ListMyRepos lists all repositories for the authenticated user that has access to.\nfunc (c *Client) ListMyRepos() ([]*Repository, error) {\n\trepos := make([]*Repository, 0, 10)\n\terr := c.getParsedResponse(\"GET\", \"\/user\/repos\", nil, nil, &repos)\n\treturn repos, err\n}\n\ntype CreateRepoOption struct {\n\tName string `json:\"name\" binding:\"Required\"`\n\tDescription string `json:\"description\" binding:\"MaxSize(255)\"`\n\tPrivate bool `json:\"private\"`\n\tAutoInit bool `json:\"auto_init\"`\n\tGitignores string `json:\"gitignores\"`\n\tLicense string `json:\"license\"`\n\tReadme string `json:\"readme\"`\n}\n\n\/\/ CreateRepo creates a repository for authenticated user.\nfunc (c *Client) CreateRepo(opt CreateRepoOption) (*Repository, error) {\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trepo := new(Repository)\n\treturn repo, c.getParsedResponse(\"POST\", \"\/user\/repos\",\n\t\thttp.Header{\"content-type\": []string{\"application\/json\"}}, bytes.NewReader(body), repo)\n}\n\n\/\/ CreateOrgRepo creates an organization repository for authenticated user.\nfunc (c *Client) CreateOrgRepo(org string, opt CreateRepoOption) (*Repository, error) {\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trepo := new(Repository)\n\treturn repo, c.getParsedResponse(\"POST\", fmt.Sprintf(\"\/org\/%s\/repos\", org),\n\t\thttp.Header{\"content-type\": []string{\"application\/json\"}}, bytes.NewReader(body), repo)\n}\n\n\/\/ GetRepo returns information of a repository of given owner.\nfunc (c *Client) GetRepo(owner, reponame string) (*Repository, error) {\n\trepo := new(Repository)\n\treturn repo, c.getParsedResponse(\"GET\", fmt.Sprintf(\"\/repos\/%s\/%s\", owner, reponame),\n\t\thttp.Header{\"content-type\": []string{\"application\/json\"}}, nil, repo)\n}\n\n\/\/ DeleteRepo deletes a repository of user or organization.\nfunc (c *Client) DeleteRepo(owner, repo string) error {\n\t_, err := c.getResponse(\"DELETE\", fmt.Sprintf(\"\/repos\/%s\/%s\", owner, repo), nil, nil)\n\treturn err\n}\n\ntype MigrateRepoOption struct {\n\tCloneAddr string `json:\"clone_addr\" binding:\"Required\"`\n\tAuthUsername string `json:\"auth_username\"`\n\tAuthPassword string `json:\"auth_password\"`\n\tUID int `json:\"uid\" binding:\"Required\"`\n\tRepoName string `json:\"repo_name\" binding:\"Required\"`\n\tMirror bool `json:\"mirror\"`\n\tPrivate bool `json:\"private\"`\n\tDescription string `json:\"description\"`\n}\n\n\/\/ MigrateRepo migrates a repository from other Git hosting sources for the\n\/\/ authenticated user.\n\/\/\n\/\/ To migrate a repository for a organization, the authenticated user must be a\n\/\/ owner of the specified organization.\nfunc (c *Client) MigrateRepo(opt MigrateRepoOption) (*Repository, error) {\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trepo := new(Repository)\n\treturn repo, c.getParsedResponse(\"POST\", \"\/repos\/migrate\",\n\t\thttp.Header{\"content-type\": []string{\"application\/json\"}}, bytes.NewReader(body), repo)\n}\n<|endoftext|>"} {"text":"<commit_before>package storage\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"sync\"\n)\n\nconst (\n\tSuperBlockSize = 8\n)\n\ntype SuperBlock struct {\n\tVersion Version\n\tReplicaType ReplicationType\n}\n\nfunc (s *SuperBlock) Bytes() []byte {\n\theader := make([]byte, SuperBlockSize)\n\theader[0] = byte(s.Version)\n\theader[1] = s.ReplicaType.Byte()\n\treturn header\n}\n\ntype Volume struct {\n\tId VolumeId\n\tdir string\n\tdataFile *os.File\n\tnm *NeedleMap\n\n\tSuperBlock\n\n\taccessLock sync.Mutex\n}\n\nfunc NewVolume(dirname string, id VolumeId, replicationType ReplicationType) (v *Volume, e error) {\n\tv = &Volume{dir: dirname, Id: id}\n\tv.SuperBlock = SuperBlock{ReplicaType: replicationType}\n\te = v.load(true)\n\treturn\n}\nfunc LoadVolumeOnly(dirname string, id VolumeId) (v *Volume, e error) {\n\tv = &Volume{dir: dirname, Id: id}\n\tv.SuperBlock = SuperBlock{ReplicaType: CopyNil}\n\te = v.load(false)\n\treturn\n}\nfunc (v *Volume) load(alsoLoadIndex bool) error {\n\tvar e error\n\tfileName := path.Join(v.dir, v.Id.String())\n\tv.dataFile, e = os.OpenFile(fileName+\".dat\", os.O_RDWR|os.O_CREATE, 0644)\n\tif e != nil {\n\t\treturn fmt.Errorf(\"cannot create Volume Data %s.dat: %s\", fileName, e)\n\t}\n\tif v.ReplicaType == CopyNil {\n\t\te = v.readSuperBlock()\n\t} else {\n\t\te = v.maybeWriteSuperBlock()\n\t}\n\tif e == nil && alsoLoadIndex {\n\t\tindexFile, ie := os.OpenFile(fileName+\".idx\", os.O_RDWR|os.O_CREATE, 0644)\n\t\tif ie != nil {\n\t\t\treturn fmt.Errorf(\"cannot create Volume Data %s.dat: %s\", fileName, e)\n\t\t}\n\t\tv.nm, e = LoadNeedleMap(indexFile)\n\t}\n\treturn e\n}\nfunc (v *Volume) Version() Version {\n\treturn v.SuperBlock.Version\n}\nfunc (v *Volume) Size() int64 {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tstat, e := v.dataFile.Stat()\n\tif e == nil {\n\t\treturn stat.Size()\n\t}\n\tfmt.Printf(\"Failed to read file size %s %s\\n\", v.dataFile.Name(), e.Error())\n\treturn -1\n}\nfunc (v *Volume) Close() {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tv.nm.Close()\n\tv.dataFile.Close()\n}\nfunc (v *Volume) maybeWriteSuperBlock() error {\n\tstat, e := v.dataFile.Stat()\n\tif e != nil {\n\t\tfmt.Printf(\"failed to stat datafile %s: %s\", v.dataFile, e)\n\t\treturn e\n\t}\n\tif stat.Size() == 0 {\n\t\tv.SuperBlock.Version = CurrentVersion\n\t\t_, e = v.dataFile.Write(v.SuperBlock.Bytes())\n\t}\n\treturn e\n}\nfunc (v *Volume) readSuperBlock() (err error) {\n\tv.dataFile.Seek(0, 0)\n\theader := make([]byte, SuperBlockSize)\n\tif _, e := v.dataFile.Read(header); e != nil {\n\t\treturn fmt.Errorf(\"cannot read superblock: %s\", e)\n\t}\n\tv.SuperBlock, err = ParseSuperBlock(header)\n\treturn err\n}\nfunc ParseSuperBlock(header []byte) (superBlock SuperBlock, err error) {\n\tsuperBlock.Version = Version(header[0])\n\tif superBlock.ReplicaType, err = NewReplicationTypeFromByte(header[1]); err != nil {\n\t\terr = fmt.Errorf(\"cannot read replica type: %s\", err)\n\t}\n\treturn\n}\nfunc (v *Volume) NeedToReplicate() bool {\n\treturn v.ReplicaType.GetCopyCount() > 1\n}\n\nfunc (v *Volume) write(n *Needle) (size uint32, err error) {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tvar offset int64\n\tif offset, err = v.dataFile.Seek(0, 2); err != nil {\n\t\treturn\n\t}\n\tif size, err = n.Append(v.dataFile, v.Version()); err != nil {\n\t\tv.dataFile.Truncate(offset)\n\t\treturn\n\t}\n\tnv, ok := v.nm.Get(n.Id)\n\tif !ok || int64(nv.Offset)*NeedlePaddingSize < offset {\n\t\t_, err = v.nm.Put(n.Id, uint32(offset\/NeedlePaddingSize), n.Size)\n\t}\n\treturn\n}\nfunc (v *Volume) delete(n *Needle) (uint32, error) {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tnv, ok := v.nm.Get(n.Id)\n\t\/\/fmt.Println(\"key\", n.Id, \"volume offset\", nv.Offset, \"data_size\", n.Size, \"cached size\", nv.Size)\n\tif ok {\n\t\tif err := v.nm.Delete(n.Id); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\toffset, err := v.dataFile.Seek(int64(nv.Offset*NeedlePaddingSize), 0)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"cannot get datafile (%s) position: %s\", v.dataFile, err)\n\t\t}\n\t\tif _, err = n.Append(v.dataFile, v.Version()); err != nil {\n\t\t\tv.dataFile.Truncate(offset)\n\t\t\treturn 0, err\n\t\t}\n\t\treturn nv.Size, err\n\t}\n\treturn 0, nil\n}\n\nfunc (v *Volume) read(n *Needle) (int, error) {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tnv, ok := v.nm.Get(n.Id)\n\tif ok && nv.Offset > 0 {\n\t\tv.dataFile.Seek(int64(nv.Offset)*NeedlePaddingSize, 0)\n\t\treturn n.Read(v.dataFile, nv.Size, v.Version())\n\t}\n\treturn -1, errors.New(\"Not Found\")\n}\n\nfunc (v *Volume) garbageLevel() float64 {\n\treturn float64(v.nm.deletionByteCounter) \/ float64(v.ContentSize())\n}\n\nfunc (v *Volume) compact() error {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\n\tfilePath := path.Join(v.dir, v.Id.String())\n\treturn v.copyDataAndGenerateIndexFile(filePath+\".cpd\", filePath+\".cpx\")\n}\nfunc (v *Volume) commitCompact() error {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tv.dataFile.Close()\n\tvar e error\n\tif e = os.Rename(path.Join(v.dir, v.Id.String()+\".cpd\"), path.Join(v.dir, v.Id.String()+\".dat\")); e != nil {\n\t\treturn e\n\t}\n\tif e = os.Rename(path.Join(v.dir, v.Id.String()+\".cpx\"), path.Join(v.dir, v.Id.String()+\".idx\")); e != nil {\n\t\treturn e\n\t}\n\tif e = v.load(true); e != nil {\n\t\treturn e\n\t}\n\treturn nil\n}\n\nfunc ScanVolumeFile(dirname string, id VolumeId,\n\tvisitSuperBlock func(SuperBlock) error,\n\tvisitNeedle func(n *Needle, offset uint32) error) (err error) {\n\tvar v *Volume\n\tif v, err = LoadVolumeOnly(dirname, id); err != nil {\n\t\treturn\n\t}\n\tif err = visitSuperBlock(v.SuperBlock); err != nil {\n\t\treturn\n\t}\n\n\tversion := v.Version()\n\n\toffset := uint32(SuperBlockSize)\n\tn, rest, e := ReadNeedleHeader(v.dataFile, version)\n\tif e != nil {\n\t\terr = fmt.Errorf(\"cannot read needle header: %s\", e)\n\t\treturn\n\t}\n\tfor n != nil {\n\t\tif err = n.ReadNeedleBody(v.dataFile, version, rest); err != nil {\n\t\t\terr = fmt.Errorf(\"cannot read needle body: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tif err = visitNeedle(n, offset); err != nil {\n\t\t\treturn\n\t\t}\n\t\toffset += NeedleHeaderSize + rest\n\t\tif n, rest, err = ReadNeedleHeader(v.dataFile, version); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"cannot read needle header: %s\", err)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string) (err error) {\n\tvar (\n\t\tdst, idx *os.File\n\t)\n\tif dst, err = os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE, 0644); err != nil {\n\t\treturn\n\t}\n\tdefer dst.Close()\n\n\tif idx, err = os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE, 0644); err != nil {\n\t\treturn\n\t}\n\tdefer idx.Close()\n\n\tnm := NewNeedleMap(idx)\n\tnew_offset := uint32(SuperBlockSize)\n\n\terr = ScanVolumeFile(v.dir, v.Id, func(superBlock SuperBlock) error {\n\t\t_, err = dst.Write(superBlock.Bytes())\n\t\treturn err\n\t}, func(n *Needle, offset uint32) error {\n\t\tnv, ok := v.nm.Get(n.Id)\n\t\t\/\/log.Println(\"file size is\", n.Size, \"rest\", rest)\n\t\tif ok && nv.Offset*NeedlePaddingSize == offset {\n\t\t\tif nv.Size > 0 {\n\t\t\t\tif _, err = nm.Put(n.Id, new_offset\/NeedlePaddingSize, n.Size); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"cannot put needle: %s\", err)\n\t\t\t\t}\n\t\t\t\tif _, err = n.Append(dst, v.Version()); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"cannot append needle: %s\", err)\n\t\t\t\t}\n\t\t\t\tnew_offset += n.DiskSize()\n\t\t\t\t\/\/log.Println(\"saving key\", n.Id, \"volume offset\", old_offset, \"=>\", new_offset, \"data_size\", n.Size, \"rest\", rest)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn\n}\nfunc (v *Volume) ContentSize() uint64 {\n\treturn v.nm.fileByteCounter\n}\n<commit_msg>do not truncate file if any error happens. Truncating will lose all files after this file entry.<commit_after>package storage\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"sync\"\n)\n\nconst (\n\tSuperBlockSize = 8\n)\n\ntype SuperBlock struct {\n\tVersion Version\n\tReplicaType ReplicationType\n}\n\nfunc (s *SuperBlock) Bytes() []byte {\n\theader := make([]byte, SuperBlockSize)\n\theader[0] = byte(s.Version)\n\theader[1] = s.ReplicaType.Byte()\n\treturn header\n}\n\ntype Volume struct {\n\tId VolumeId\n\tdir string\n\tdataFile *os.File\n\tnm *NeedleMap\n\n\tSuperBlock\n\n\taccessLock sync.Mutex\n}\n\nfunc NewVolume(dirname string, id VolumeId, replicationType ReplicationType) (v *Volume, e error) {\n\tv = &Volume{dir: dirname, Id: id}\n\tv.SuperBlock = SuperBlock{ReplicaType: replicationType}\n\te = v.load(true)\n\treturn\n}\nfunc LoadVolumeOnly(dirname string, id VolumeId) (v *Volume, e error) {\n\tv = &Volume{dir: dirname, Id: id}\n\tv.SuperBlock = SuperBlock{ReplicaType: CopyNil}\n\te = v.load(false)\n\treturn\n}\nfunc (v *Volume) load(alsoLoadIndex bool) error {\n\tvar e error\n\tfileName := path.Join(v.dir, v.Id.String())\n\tv.dataFile, e = os.OpenFile(fileName+\".dat\", os.O_RDWR|os.O_CREATE, 0644)\n\tif e != nil {\n\t\treturn fmt.Errorf(\"cannot create Volume Data %s.dat: %s\", fileName, e)\n\t}\n\tif v.ReplicaType == CopyNil {\n\t\te = v.readSuperBlock()\n\t} else {\n\t\te = v.maybeWriteSuperBlock()\n\t}\n\tif e == nil && alsoLoadIndex {\n\t\tindexFile, ie := os.OpenFile(fileName+\".idx\", os.O_RDWR|os.O_CREATE, 0644)\n\t\tif ie != nil {\n\t\t\treturn fmt.Errorf(\"cannot create Volume Data %s.dat: %s\", fileName, e)\n\t\t}\n\t\tv.nm, e = LoadNeedleMap(indexFile)\n\t}\n\treturn e\n}\nfunc (v *Volume) Version() Version {\n\treturn v.SuperBlock.Version\n}\nfunc (v *Volume) Size() int64 {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tstat, e := v.dataFile.Stat()\n\tif e == nil {\n\t\treturn stat.Size()\n\t}\n\tfmt.Printf(\"Failed to read file size %s %s\\n\", v.dataFile.Name(), e.Error())\n\treturn -1\n}\nfunc (v *Volume) Close() {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tv.nm.Close()\n\tv.dataFile.Close()\n}\nfunc (v *Volume) maybeWriteSuperBlock() error {\n\tstat, e := v.dataFile.Stat()\n\tif e != nil {\n\t\tfmt.Printf(\"failed to stat datafile %s: %s\", v.dataFile, e)\n\t\treturn e\n\t}\n\tif stat.Size() == 0 {\n\t\tv.SuperBlock.Version = CurrentVersion\n\t\t_, e = v.dataFile.Write(v.SuperBlock.Bytes())\n\t}\n\treturn e\n}\nfunc (v *Volume) readSuperBlock() (err error) {\n\tv.dataFile.Seek(0, 0)\n\theader := make([]byte, SuperBlockSize)\n\tif _, e := v.dataFile.Read(header); e != nil {\n\t\treturn fmt.Errorf(\"cannot read superblock: %s\", e)\n\t}\n\tv.SuperBlock, err = ParseSuperBlock(header)\n\treturn err\n}\nfunc ParseSuperBlock(header []byte) (superBlock SuperBlock, err error) {\n\tsuperBlock.Version = Version(header[0])\n\tif superBlock.ReplicaType, err = NewReplicationTypeFromByte(header[1]); err != nil {\n\t\terr = fmt.Errorf(\"cannot read replica type: %s\", err)\n\t}\n\treturn\n}\nfunc (v *Volume) NeedToReplicate() bool {\n\treturn v.ReplicaType.GetCopyCount() > 1\n}\n\nfunc (v *Volume) write(n *Needle) (size uint32, err error) {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tvar offset int64\n\tif offset, err = v.dataFile.Seek(0, 2); err != nil {\n\t\treturn\n\t}\n\tif size, err = n.Append(v.dataFile, v.Version()); err != nil {\n\t\tv.dataFile.Truncate(offset)\n\t\treturn\n\t}\n\tnv, ok := v.nm.Get(n.Id)\n\tif !ok || int64(nv.Offset)*NeedlePaddingSize < offset {\n\t\t_, err = v.nm.Put(n.Id, uint32(offset\/NeedlePaddingSize), n.Size)\n\t}\n\treturn\n}\nfunc (v *Volume) delete(n *Needle) (uint32, error) {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tnv, ok := v.nm.Get(n.Id)\n\t\/\/fmt.Println(\"key\", n.Id, \"volume offset\", nv.Offset, \"data_size\", n.Size, \"cached size\", nv.Size)\n\tif ok {\n\t\tv.nm.Delete(n.Id)\n\t\tv.dataFile.Seek(int64(nv.Offset*NeedlePaddingSize), 0)\n\t\t_, err := n.Append(v.dataFile, v.Version())\n\t\treturn nv.Size, err\n\t}\n\treturn 0, nil\n}\n\nfunc (v *Volume) read(n *Needle) (int, error) {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tnv, ok := v.nm.Get(n.Id)\n\tif ok && nv.Offset > 0 {\n\t\tv.dataFile.Seek(int64(nv.Offset)*NeedlePaddingSize, 0)\n\t\treturn n.Read(v.dataFile, nv.Size, v.Version())\n\t}\n\treturn -1, errors.New(\"Not Found\")\n}\n\nfunc (v *Volume) garbageLevel() float64 {\n\treturn float64(v.nm.deletionByteCounter) \/ float64(v.ContentSize())\n}\n\nfunc (v *Volume) compact() error {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\n\tfilePath := path.Join(v.dir, v.Id.String())\n\treturn v.copyDataAndGenerateIndexFile(filePath+\".cpd\", filePath+\".cpx\")\n}\nfunc (v *Volume) commitCompact() error {\n\tv.accessLock.Lock()\n\tdefer v.accessLock.Unlock()\n\tv.dataFile.Close()\n\tvar e error\n\tif e = os.Rename(path.Join(v.dir, v.Id.String()+\".cpd\"), path.Join(v.dir, v.Id.String()+\".dat\")); e != nil {\n\t\treturn e\n\t}\n\tif e = os.Rename(path.Join(v.dir, v.Id.String()+\".cpx\"), path.Join(v.dir, v.Id.String()+\".idx\")); e != nil {\n\t\treturn e\n\t}\n\tif e = v.load(true); e != nil {\n\t\treturn e\n\t}\n\treturn nil\n}\n\nfunc ScanVolumeFile(dirname string, id VolumeId,\n\tvisitSuperBlock func(SuperBlock) error,\n\tvisitNeedle func(n *Needle, offset uint32) error) (err error) {\n\tvar v *Volume\n\tif v, err = LoadVolumeOnly(dirname, id); err != nil {\n\t\treturn\n\t}\n\tif err = visitSuperBlock(v.SuperBlock); err != nil {\n\t\treturn\n\t}\n\n\tversion := v.Version()\n\n\toffset := uint32(SuperBlockSize)\n\tn, rest, e := ReadNeedleHeader(v.dataFile, version)\n\tif e != nil {\n\t\terr = fmt.Errorf(\"cannot read needle header: %s\", e)\n\t\treturn\n\t}\n\tfor n != nil {\n\t\tif err = n.ReadNeedleBody(v.dataFile, version, rest); err != nil {\n\t\t\terr = fmt.Errorf(\"cannot read needle body: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tif err = visitNeedle(n, offset); err != nil {\n\t\t\treturn\n\t\t}\n\t\toffset += NeedleHeaderSize + rest\n\t\tif n, rest, err = ReadNeedleHeader(v.dataFile, version); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"cannot read needle header: %s\", err)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string) (err error) {\n\tvar (\n\t\tdst, idx *os.File\n\t)\n\tif dst, err = os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE, 0644); err != nil {\n\t\treturn\n\t}\n\tdefer dst.Close()\n\n\tif idx, err = os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE, 0644); err != nil {\n\t\treturn\n\t}\n\tdefer idx.Close()\n\n\tnm := NewNeedleMap(idx)\n\tnew_offset := uint32(SuperBlockSize)\n\n\terr = ScanVolumeFile(v.dir, v.Id, func(superBlock SuperBlock) error {\n\t\t_, err = dst.Write(superBlock.Bytes())\n\t\treturn err\n\t}, func(n *Needle, offset uint32) error {\n\t\tnv, ok := v.nm.Get(n.Id)\n\t\t\/\/log.Println(\"file size is\", n.Size, \"rest\", rest)\n\t\tif ok && nv.Offset*NeedlePaddingSize == offset {\n\t\t\tif nv.Size > 0 {\n\t\t\t\tif _, err = nm.Put(n.Id, new_offset\/NeedlePaddingSize, n.Size); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"cannot put needle: %s\", err)\n\t\t\t\t}\n\t\t\t\tif _, err = n.Append(dst, v.Version()); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"cannot append needle: %s\", err)\n\t\t\t\t}\n\t\t\t\tnew_offset += n.DiskSize()\n\t\t\t\t\/\/log.Println(\"saving key\", n.Id, \"volume offset\", old_offset, \"=>\", new_offset, \"data_size\", n.Size, \"rest\", rest)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn\n}\nfunc (v *Volume) ContentSize() uint64 {\n\treturn v.nm.fileByteCounter\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ A quick and easy way to setup a RESTful JSON API\n\/\/\n\/\/ Go-JSON-REST is a thin layer on top of net\/http that helps building RESTful JSON APIs easily.\n\/\/ It provides fast URL routing using https:\/\/github.com\/ant0ine\/go-urlrouter, and helpers to deal\n\/\/ with JSON requests and responses. It is not a high-level REST framework that transparently maps\n\/\/ HTTP requests to language procedure calls, on the opposite, you constantly have access to the\n\/\/ underlying net\/http objects.\n\/\/\n\/\/ Example:\n\/\/\n\/\/ package main\n\/\/\n\/\/ import (\n\/\/ \"github.com\/ant0ine\/go-json-rest\"\n\/\/ \"net\/http\"\n\/\/ )\n\/\/\n\/\/ type User struct {\n\/\/ Id string\n\/\/ Name string\n\/\/ }\n\/\/\n\/\/ func GetUser(w *rest.ResponseWriter, req *rest.Request) {\n\/\/ user := User{\n\/\/ Id: req.PathParam(\"id\"),\n\/\/ Name: \"Antoine\",\n\/\/ }\n\/\/ w.WriteJson(&user)\n\/\/ }\n\/\/\n\/\/ func main() {\n\/\/ handler := ResourceHandler{}\n\/\/ handler.SetRoutes(\n\/\/ rest.Route{\"GET\", \"\/users\/:id\", GetUser},\n\/\/ )\n\/\/ http.ListenAndServe(\":8080\", &handler)\n\/\/ }\n\/\/\npackage rest\n\nimport (\n\t\"compress\/gzip\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/ant0ine\/go-urlrouter\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"runtime\/debug\"\n\t\"strings\"\n)\n\n\/\/ Implement the http.Handler interface and act as a router for the defined Routes.\n\/\/ The defaults are intended to be developemnt friendly, for production you may want\n\/\/ to turn on gzip and disable the JSON indentation.\ntype ResourceHandler struct {\n\trouter urlrouter.Router\n\n\t\/\/ If true and if the client accepts the Gzip encoding, the response payloads\n\t\/\/ will be compressed using gzip, and the corresponding response header will set.\n\tEnableGzip bool\n\n\t\/\/ If true the JSON payload will be written in one line with no space.\n\tDisableJsonIndent bool\n\n\t\/\/ If true, when a \"panic\" happens, the error string and the stack trace will be\n\t\/\/ printed in the 500 response body.\n\tEnableResponseStackTrace bool\n}\n\n\/\/ Used with SetRoutes.\ntype Route struct {\n\tHttpMethod string\n\tPathExp string\n\tFunc func(*ResponseWriter, *Request)\n}\n\n\/\/ Create a Route that points to an object method. It can be convenient to point to an object method instead\n\/\/ of a function, this helper makes it easy by passing the object instance and the method name as parameters.\nfunc RouteObjectMethod(http_method string, path_exp string, object_instance interface{}, object_method string) Route {\n\n\tvalue := reflect.ValueOf(object_instance)\n\tfunc_value := value.MethodByName(object_method)\n\tif func_value.IsValid() == false {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"Cannot find the object method %s on %s\",\n\t\t\tobject_method,\n\t\t\tvalue,\n\t\t))\n\t}\n\troute_func := func(w *ResponseWriter, r *Request) {\n\t\tfunc_value.Call([]reflect.Value{\n\t\t\treflect.ValueOf(w),\n\t\t\treflect.ValueOf(r),\n\t\t})\n\t}\n\n\treturn Route{\n\t\tHttpMethod: http_method,\n\t\tPathExp: path_exp,\n\t\tFunc: route_func,\n\t}\n}\n\n\/\/ Define the Routes. The order the Routes matters,\n\/\/ if a request matches multiple Routes, the first one will be used.\n\/\/ Note that the underlying router is https:\/\/github.com\/ant0ine\/go-urlrouter.\nfunc (self *ResourceHandler) SetRoutes(routes ...Route) error {\n\tself.router = urlrouter.Router{\n\t\tRoutes: []urlrouter.Route{},\n\t}\n\tfor _, route := range routes {\n\t\t\/\/ make sure the method is uppercase\n\t\thttp_method := strings.ToUpper(route.HttpMethod)\n\n\t\tself.router.Routes = append(\n\t\t\tself.router.Routes,\n\t\t\turlrouter.Route{\n\t\t\t\tPathExp: http_method + route.PathExp,\n\t\t\t\tDest: route.Func,\n\t\t\t},\n\t\t)\n\t}\n\treturn self.router.Start()\n}\n\n\/\/ This makes ResourceHandler implement the http.Handler interface\nfunc (self *ResourceHandler) ServeHTTP(orig_writer http.ResponseWriter, orig_request *http.Request) {\n\n\t\/\/ catch user code's panic, and convert to http response\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tmessage := \"Internal Server Error\"\n\t\t\tif self.EnableResponseStackTrace {\n\t\t\t\tmessage = fmt.Sprintf(\"%s\\n\\n%s\", r, debug.Stack())\n\t\t\t}\n\t\t\thttp.Error(orig_writer, message, http.StatusInternalServerError)\n\t\t}\n\t}()\n\n\t\/\/ find the route\n\troute, params, err := self.router.FindRoute(\n\t\tstrings.ToUpper(orig_request.Method) + orig_request.URL.Path,\n\t)\n\tif err != nil {\n\t\t\/\/ should never happen has the URL has already been parsed\n\t\tpanic(err)\n\t}\n\tif route == nil {\n\t\t\/\/ no route found\n\t\thttp.NotFound(orig_writer, orig_request)\n\t\treturn\n\t}\n\n\t\/\/ determine if gzip is needed\n\tis_gzipped := self.EnableGzip == true &&\n\t\tstrings.Contains(orig_request.Header.Get(\"Accept-Encoding\"), \"gzip\")\n\n\tis_indented := !self.DisableJsonIndent\n\n\trequest := Request{\n\t\torig_request,\n\t\tparams,\n\t}\n\n\twriter := ResponseWriter{\n\t\torig_writer,\n\t\tis_gzipped,\n\t\tis_indented,\n\t}\n\n\t\/\/ run the user code\n\thandler := route.Dest.(func(*ResponseWriter, *Request))\n\thandler(&writer, &request)\n}\n\n\/\/ Inherit from http.Request, and provide additional methods.\ntype Request struct {\n\t*http.Request\n\t\/\/ map of parameters that have been matched in the URL Path.\n\tPathParams map[string]string\n}\n\n\/\/ Provide a convenient access to the PathParams map\nfunc (self *Request) PathParam(name string) string {\n\treturn self.PathParams[name]\n}\n\n\/\/ Read the request body and decode the JSON using json.Unmarshal\nfunc (self *Request) DecodeJsonPayload(v interface{}) error {\n\tcontent, err := ioutil.ReadAll(self.Body)\n\tself.Body.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = json.Unmarshal(content, v)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Inherit from an object implementing the http.ResponseWriter interface, and provide additional methods.\ntype ResponseWriter struct {\n\thttp.ResponseWriter\n\tis_gzipped bool\n\tis_indented bool\n}\n\n\/\/ Overloading of the http.ResponseWriter method.\n\/\/ Provide additional capabilities, like transparent gzip encoding.\nfunc (self *ResponseWriter) Write(b []byte) (int, error) {\n\tif self.is_gzipped {\n\t\tself.Header().Set(\"Content-Encoding\", \"gzip\")\n\t\tgzip_writer := gzip.NewWriter(self.ResponseWriter)\n\t\tdefer gzip_writer.Close()\n\t\treturn gzip_writer.Write(b)\n\t}\n\treturn self.ResponseWriter.Write(b)\n}\n\n\/\/ Encode the object in JSON, set the content-type header,\n\/\/ and call Write\nfunc (self *ResponseWriter) WriteJson(v interface{}) error {\n\tself.Header().Set(\"content-type\", \"application\/json\")\n\tvar b []byte\n\tvar err error\n\tif self.is_indented {\n\t\tb, err = json.MarshalIndent(v, \"\", \" \")\n\t} else {\n\t\tb, err = json.Marshal(v)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tself.Write(b)\n\treturn nil\n}\n<commit_msg>add basic logging<commit_after>\/\/ A quick and easy way to setup a RESTful JSON API\n\/\/\n\/\/ Go-JSON-REST is a thin layer on top of net\/http that helps building RESTful JSON APIs easily.\n\/\/ It provides fast URL routing using https:\/\/github.com\/ant0ine\/go-urlrouter, and helpers to deal\n\/\/ with JSON requests and responses. It is not a high-level REST framework that transparently maps\n\/\/ HTTP requests to language procedure calls, on the opposite, you constantly have access to the\n\/\/ underlying net\/http objects.\n\/\/\n\/\/ Example:\n\/\/\n\/\/ package main\n\/\/\n\/\/ import (\n\/\/ \"github.com\/ant0ine\/go-json-rest\"\n\/\/ \"net\/http\"\n\/\/ )\n\/\/\n\/\/ type User struct {\n\/\/ Id string\n\/\/ Name string\n\/\/ }\n\/\/\n\/\/ func GetUser(w *rest.ResponseWriter, req *rest.Request) {\n\/\/ user := User{\n\/\/ Id: req.PathParam(\"id\"),\n\/\/ Name: \"Antoine\",\n\/\/ }\n\/\/ w.WriteJson(&user)\n\/\/ }\n\/\/\n\/\/ func main() {\n\/\/ handler := ResourceHandler{}\n\/\/ handler.SetRoutes(\n\/\/ rest.Route{\"GET\", \"\/users\/:id\", GetUser},\n\/\/ )\n\/\/ http.ListenAndServe(\":8080\", &handler)\n\/\/ }\n\/\/\npackage rest\n\nimport (\n\t\"compress\/gzip\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/ant0ine\/go-urlrouter\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"runtime\/debug\"\n\t\"strings\"\n)\n\n\/\/ Implement the http.Handler interface and act as a router for the defined Routes.\n\/\/ The defaults are intended to be developemnt friendly, for production you may want\n\/\/ to turn on gzip and disable the JSON indentation.\ntype ResourceHandler struct {\n\trouter urlrouter.Router\n\n\t\/\/ If true and if the client accepts the Gzip encoding, the response payloads\n\t\/\/ will be compressed using gzip, and the corresponding response header will set.\n\tEnableGzip bool\n\n\t\/\/ If true the JSON payload will be written in one line with no space.\n\tDisableJsonIndent bool\n\n\t\/\/ If true, when a \"panic\" happens, the error string and the stack trace will be\n\t\/\/ printed in the 500 response body.\n\tEnableResponseStackTrace bool\n}\n\n\/\/ Used with SetRoutes.\ntype Route struct {\n\tHttpMethod string\n\tPathExp string\n\tFunc func(*ResponseWriter, *Request)\n}\n\n\/\/ Create a Route that points to an object method. It can be convenient to point to an object method instead\n\/\/ of a function, this helper makes it easy by passing the object instance and the method name as parameters.\nfunc RouteObjectMethod(http_method string, path_exp string, object_instance interface{}, object_method string) Route {\n\n\tvalue := reflect.ValueOf(object_instance)\n\tfunc_value := value.MethodByName(object_method)\n\tif func_value.IsValid() == false {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"Cannot find the object method %s on %s\",\n\t\t\tobject_method,\n\t\t\tvalue,\n\t\t))\n\t}\n\troute_func := func(w *ResponseWriter, r *Request) {\n\t\tfunc_value.Call([]reflect.Value{\n\t\t\treflect.ValueOf(w),\n\t\t\treflect.ValueOf(r),\n\t\t})\n\t}\n\n\treturn Route{\n\t\tHttpMethod: http_method,\n\t\tPathExp: path_exp,\n\t\tFunc: route_func,\n\t}\n}\n\n\/\/ Define the Routes. The order the Routes matters,\n\/\/ if a request matches multiple Routes, the first one will be used.\n\/\/ Note that the underlying router is https:\/\/github.com\/ant0ine\/go-urlrouter.\nfunc (self *ResourceHandler) SetRoutes(routes ...Route) error {\n\tself.router = urlrouter.Router{\n\t\tRoutes: []urlrouter.Route{},\n\t}\n\tfor _, route := range routes {\n\t\t\/\/ make sure the method is uppercase\n\t\thttp_method := strings.ToUpper(route.HttpMethod)\n\n\t\tself.router.Routes = append(\n\t\t\tself.router.Routes,\n\t\t\turlrouter.Route{\n\t\t\t\tPathExp: http_method + route.PathExp,\n\t\t\t\tDest: route.Func,\n\t\t\t},\n\t\t)\n\t}\n\treturn self.router.Start()\n}\n\n\/\/ This makes ResourceHandler implement the http.Handler interface\nfunc (self *ResourceHandler) ServeHTTP(orig_writer http.ResponseWriter, orig_request *http.Request) {\n\n\t\/\/ catch user code's panic, and convert to http response\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\ttrace := debug.Stack()\n\t\t\tlog.Printf(\"%s\\n%s\", r, trace)\n\n\t\t\t\/\/ 500 response\n\t\t\tmessage := \"Internal Server Error\"\n\t\t\tif self.EnableResponseStackTrace {\n\t\t\t\tmessage = fmt.Sprintf(\"%s\\n\\n%s\", r, trace)\n\t\t\t}\n\t\t\thttp.Error(orig_writer, message, http.StatusInternalServerError)\n\t\t}\n\t}()\n\n\t\/\/ find the route\n\troute, params, err := self.router.FindRoute(\n\t\tstrings.ToUpper(orig_request.Method) + orig_request.URL.Path,\n\t)\n\tif err != nil {\n\t\t\/\/ should never happen has the URL has already been parsed\n\t\tpanic(err)\n\t}\n\tif route == nil {\n\t\t\/\/ no route found\n\t\tlog.Printf(\"%s %s => No Route Found (404)\", orig_request.Method, orig_request.URL)\n\t\thttp.NotFound(orig_writer, orig_request)\n\t\treturn\n\t}\n\n\t\/\/ determine if gzip is needed\n\tis_gzipped := self.EnableGzip == true &&\n\t\tstrings.Contains(orig_request.Header.Get(\"Accept-Encoding\"), \"gzip\")\n\n\tis_indented := !self.DisableJsonIndent\n\n\trequest := Request{\n\t\torig_request,\n\t\tparams,\n\t}\n\n\twriter := ResponseWriter{\n\t\torig_writer,\n\t\tis_gzipped,\n\t\tis_indented,\n\t}\n\n\t\/\/ run the user code\n\thandler := route.Dest.(func(*ResponseWriter, *Request))\n\tlog.Printf(\"%s %s => Dispatching...\", orig_request.Method, orig_request.URL)\n\thandler(&writer, &request)\n}\n\n\/\/ Inherit from http.Request, and provide additional methods.\ntype Request struct {\n\t*http.Request\n\t\/\/ map of parameters that have been matched in the URL Path.\n\tPathParams map[string]string\n}\n\n\/\/ Provide a convenient access to the PathParams map\nfunc (self *Request) PathParam(name string) string {\n\treturn self.PathParams[name]\n}\n\n\/\/ Read the request body and decode the JSON using json.Unmarshal\nfunc (self *Request) DecodeJsonPayload(v interface{}) error {\n\tcontent, err := ioutil.ReadAll(self.Body)\n\tself.Body.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = json.Unmarshal(content, v)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Inherit from an object implementing the http.ResponseWriter interface, and provide additional methods.\ntype ResponseWriter struct {\n\thttp.ResponseWriter\n\tis_gzipped bool\n\tis_indented bool\n}\n\n\/\/ Overloading of the http.ResponseWriter method.\n\/\/ Provide additional capabilities, like transparent gzip encoding.\nfunc (self *ResponseWriter) Write(b []byte) (int, error) {\n\tif self.is_gzipped {\n\t\tself.Header().Set(\"Content-Encoding\", \"gzip\")\n\t\tgzip_writer := gzip.NewWriter(self.ResponseWriter)\n\t\tdefer gzip_writer.Close()\n\t\treturn gzip_writer.Write(b)\n\t}\n\treturn self.ResponseWriter.Write(b)\n}\n\n\/\/ Encode the object in JSON, set the content-type header,\n\/\/ and call Write\nfunc (self *ResponseWriter) WriteJson(v interface{}) error {\n\tself.Header().Set(\"content-type\", \"application\/json\")\n\tvar b []byte\n\tvar err error\n\tif self.is_indented {\n\t\tb, err = json.MarshalIndent(v, \"\", \" \")\n\t} else {\n\t\tb, err = json.Marshal(v)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tself.Write(b)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build scale\n\n\/*\n * Copyright (C) 2017 Red Hat, Inc.\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\n *\/\n\npackage tests\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/skydive-project\/skydive\/api\"\n\tgclient \"github.com\/skydive-project\/skydive\/cmd\/client\"\n\t\"github.com\/skydive-project\/skydive\/common\"\n\t\"github.com\/skydive-project\/skydive\/config\"\n\t\"github.com\/skydive-project\/skydive\/flow\"\n\tshttp \"github.com\/skydive-project\/skydive\/http\"\n\t\"github.com\/skydive-project\/skydive\/tests\/helper\"\n\t\"github.com\/skydive-project\/skydive\/topology\/graph\"\n)\n\nfunc TestScaleHA(t *testing.T) {\n\tgopath := os.Getenv(\"GOPATH\")\n\tscale := gopath + \"\/src\/github.com\/skydive-project\/skydive\/scripts\/scale.sh\"\n\n\tsetupCmds := []helper.Cmd{\n\t\t{fmt.Sprintf(\"%s start 2 2 2\", scale), true},\n\t\t{\"sleep 30\", false},\n\t}\n\thelper.ExecCmds(t, setupCmds...)\n\n\ttearDownCmds := []helper.Cmd{\n\t\t{fmt.Sprintf(\"%s stop 2 4 2\", scale), false},\n\t}\n\tdefer helper.ExecCmds(t, tearDownCmds...)\n\n\t\/\/ Load Agent-1 as default config for our client\n\tconfig.InitConfig(\"file\", []string{\"\/tmp\/skydive-scale\/agent-1.yml\"})\n\tauthOptions := &shttp.AuthenticationOpts{}\n\n\tclient, err := api.NewCrudClientFromConfig(authOptions)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create client: %s\", err.Error())\n\t}\n\n\t\/\/ switch to the other analyzer\n\tos.Setenv(\"SKYDIVE_ANALYZERS\", \"localhost:8084\")\n\n\tgh := gclient.NewGremlinQueryHelper(authOptions)\n\n\tvar retry func() error\n\tvar nodes []*graph.Node\n\tvar flows []*flow.Flow\n\n\tcheckHostNodes := func(nodeExpected int) {\n\t\tt.Logf(\"Check for host node: %d\", nodeExpected)\n\t\tretry = func() error {\n\t\t\tif nodes, err = gh.GetNodes(`g.V().Has(\"Type\", \"host\")`); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(nodes) != nodeExpected {\n\t\t\t\treturn fmt.Errorf(\"Should return %d host nodes got : %v\", nodeExpected, nodes)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t\tif err = common.Retry(retry, 10, 5*time.Second); err != nil {\n\t\t\thelper.ExecCmds(t, tearDownCmds...)\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\t}\n\n\tcheckICMPv4Flows := func(flowExpected int) {\n\t\tt.Logf(\"Check for flows: %d\", flowExpected)\n\t\tretry = func() error {\n\t\t\tif flows, err = gh.GetFlows(\"G.Flows().Has('LayersPath', 'Ethernet\/IPv4\/ICMPv4')\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ two capture 2 flows\n\t\t\tif len(flows) != flowExpected {\n\t\t\t\treturn fmt.Errorf(\"Should get %d ICMPv4 flow got %d : %v\", flowExpected, len(flows), flows)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t\tif err = common.Retry(retry, 10, time.Second); err != nil {\n\t\t\thelper.ExecCmds(t, tearDownCmds...)\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\n\t\t\/\/ check in the storage\n\t\tretry = func() error {\n\t\t\tif flows, err = gh.GetFlows(\"G.At('-1s', 300).Flows().Has('LayersPath', 'Ethernet\/IPv4\/ICMPv4')\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(flows) != flowExpected {\n\t\t\t\treturn fmt.Errorf(\"Should get %d ICMPv4 flow from datastore got %d : %v\", flowExpected, len(flows), flows)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t\tif err = common.Retry(retry, 40, time.Second); err != nil {\n\t\t\thelper.ExecCmds(t, tearDownCmds...)\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\t}\n\n\tcheckIPerfFlows := func(flowExpected int) {\n\t\tt.Logf(\"Check for flows: %d\", flowExpected)\n\t\tretry = func() error {\n\t\t\tif flows, err = gh.GetFlows(\"G.Flows().Has('LayersPath', 'Ethernet\/IPv4\/TCP').Has('Transport.B', '5001')\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ two capture 2 flows\n\t\t\tif len(flows) != flowExpected {\n\t\t\t\tvar flowsTCP []*flow.Flow\n\t\t\t\tif flowsTCP, err = gh.GetFlows(\"G.Flows().Has('LayersPath', 'Ethernet\/IPv4\/TCP')\"); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn fmt.Errorf(\"Should get %d iperf(tcp\/5001) flow got %d : %v\", flowExpected, len(flows), flowsTCP)\n\t\t\t}\n\n\t\t\tfor _, f := range flows {\n\t\t\t\tif f.SocketA == nil || f.SocketA.Process != \"\/usr\/bin\/iperf\" || f.SocketB == nil || f.SocketB.Process != \"\/usr\/bin\/iperf\" {\n\t\t\t\t\treturn fmt.Errorf(\"Should get iperf exe %v\", f)\n\t\t\t\t}\n\t\t\t\tif f.SocketA.Name != \"iperf\" || f.SocketB.Name != \"iperf\" {\n\t\t\t\t\treturn fmt.Errorf(\"Should get iperf thread name %v\", f)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tif err = common.Retry(retry, 10, time.Second); err != nil {\n\t\t\thelper.ExecCmds(t, tearDownCmds...)\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\n\t\t\/\/ check in the storage\n\t\tretry = func() error {\n\t\t\tif flows, err = gh.GetFlows(\"G.At('-1s', 300).Flows().Has('LayersPath', 'Ethernet\/IPv4\/TCP').Has('Transport.B', '5001')\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(flows) != flowExpected {\n\t\t\t\tvar flowsTCP []*flow.Flow\n\t\t\t\tif flowsTCP, err = gh.GetFlows(\"G.At('-1s', 300).Flows().Has('LayersPath', 'Ethernet\/IPv4\/TCP')\"); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn fmt.Errorf(\"Should get %d iperf(tcp\/5001) flow from datastore got %d : %#+v\", flowExpected, len(flows), flowsTCP)\n\t\t\t}\n\n\t\t\tfor _, f := range flows {\n\t\t\t\tif f.SocketA.Process != \"\/usr\/bin\/iperf\" || f.SocketB.Process != \"\/usr\/bin\/iperf\" {\n\t\t\t\t\treturn fmt.Errorf(\"Should get iperf exe %v\", f)\n\t\t\t\t}\n\t\t\t\tif f.SocketA.Name != \"iperf\" || f.SocketB.Name != \"iperf\" {\n\t\t\t\t\treturn fmt.Errorf(\"Should get iperf thread name %v\", f)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tif err = common.Retry(retry, 40, time.Second); err != nil {\n\t\t\thelper.ExecCmds(t, tearDownCmds...)\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\t}\n\n\t\/\/ test if we have our 2 hosts\n\tcheckHostNodes(2)\n\n\t\/\/ before creating the capture check that ovs if ready and the connectivity is ok\n\tcommon.Retry(func() error {\n\t\tpings := []helper.Cmd{\n\t\t\t{fmt.Sprintf(\"%s ping agent-1-vm1 agent-2-vm1 -c 1\", scale), false},\n\t\t\t{fmt.Sprintf(\"%s ping agent-3-vm1 agent-1-vm1 -c 1\", scale), false},\n\t\t}\n\t\treturn helper.ExecCmds(t, pings...)\n\t}, 30, time.Second)\n\n\t\/\/ start a capture\n\tcapture := api.NewCapture(\"g.V().Has('Type', 'netns', 'Name', 'vm1').Out().Has('Name', 'eth0')\", \"\")\n\tcapture.SocketInfo = true\n\tif err = client.Create(\"capture\", capture); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcheckCaptures := func(captureExpected int) {\n\t\tt.Logf(\"Check for captures: %d\", captureExpected)\n\t\tretry = func() error {\n\t\t\tif nodes, err = gh.GetNodes(`g.V().HasKey(\"Capture.ID\")`); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(nodes) != captureExpected {\n\t\t\t\treturn fmt.Errorf(\"Should return %d capture got : %v\", captureExpected, nodes)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\n\t\tif err = common.Retry(retry, 10, time.Second); err != nil {\n\t\t\thelper.ExecCmds(t, tearDownCmds...)\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\t}\n\n\t\/\/ check that we have 2 captures, one per vm1\n\tcheckCaptures(2)\n\n\t\/\/ generate some packet and wait for seeing them, to be sure that the capture is started\n\twaitForFirstFlows := func() error {\n\t\tt.Logf(\"Wait for first flows...\")\n\n\t\tretry = func() error {\n\t\t\tif flows, err = gh.GetFlows(\"G.Flows().Has('LayersPath', 'Ethernet\/IPv4\/ICMPv4')\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(flows) != 2 {\n\t\t\t\treturn errors.New(\"Should get at least one flow\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tif err = common.Retry(retry, 10, time.Second); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tfirstFlowsFnc := func() error {\n\t\tsetupCmds = []helper.Cmd{\n\t\t\t{fmt.Sprintf(\"%s ping agent-1-vm1 agent-2-vm1 -c 1\", scale), false},\n\t\t}\n\t\thelper.ExecCmds(t, setupCmds...)\n\n\t\treturn waitForFirstFlows()\n\t}\n\tif err = common.Retry(firstFlowsFnc, 10, time.Second); err != nil {\n\t\thelper.ExecCmds(t, tearDownCmds...)\n\t\tt.Fatalf(err.Error())\n\t}\n\n\t\/\/ generate some packet, do not check because connectivity is not ensured\n\tfor i := 0; i != 30; i++ {\n\t\tsetupCmds = []helper.Cmd{\n\t\t\t{fmt.Sprintf(\"%s ping agent-1-vm1 agent-2-vm1 -c 1\", scale), false},\n\t\t}\n\t\thelper.ExecCmds(t, setupCmds...)\n\t}\n\n\t\/\/ 60 flows expected as we have two captures\n\tcheckICMPv4Flows(60 + 2)\n\n\t\/\/ increase the agent number\n\tsetupCmds = []helper.Cmd{\n\t\t{fmt.Sprintf(\"%s start 2 4 2\", scale), false},\n\t}\n\thelper.ExecCmds(t, setupCmds...)\n\n\t\/\/ test if we have now 4 hosts\n\tcheckHostNodes(4)\n\n\t\/\/ check that we have 4 captures, one per vm1\n\tcheckCaptures(4)\n\n\t\/\/ kill the last agent\n\tsetupCmds = []helper.Cmd{\n\t\t{fmt.Sprintf(\"%s stop-agent 4\", scale), false},\n\t}\n\thelper.ExecCmds(t, setupCmds...)\n\n\t\/\/ test if we have now 3 hosts\n\tcheckHostNodes(3)\n\n\t\/\/ switch back to the first analyzer\n\tos.Setenv(\"SKYDIVE_ANALYZERS\", \"localhost:8082\")\n\n\t\/\/ test if we have now 3 hosts\n\tcheckHostNodes(3)\n\n\t\/\/ destroy the second analyzer\n\tsetupCmds = []helper.Cmd{\n\t\t{fmt.Sprintf(\"%s stop-analyzer 2\", scale), false},\n\t\t{\"sleep 5\", false},\n\t}\n\thelper.ExecCmds(t, setupCmds...)\n\n\t\/\/ test if the remaining analyzer have a correct graph\n\tcheckHostNodes(3)\n\n\t\/\/ generate more icmp traffic\n\tfor i := 0; i != 30; i++ {\n\t\tsetupCmds = []helper.Cmd{\n\t\t\t{fmt.Sprintf(\"%s ping agent-3-vm1 agent-1-vm1 -c 1\", scale), false},\n\t\t}\n\t\thelper.ExecCmds(t, setupCmds...)\n\t}\n\n\t\/\/ 4*30 expected because the gremlin expression matches all the eth0\n\tcheckICMPv4Flows(120 + 2)\n\n\t\/\/ iperf test 10 sec, 1Mbits\/s\n\tsetupCmds = []helper.Cmd{\n\t\t{fmt.Sprintf(\"%s iperf agent-3-vm1 agent-1-vm1\", scale), false},\n\t}\n\thelper.ExecCmds(t, setupCmds...)\n\tcheckIPerfFlows(2)\n\n\t\/\/ delete the capture to check that all captures will be delete at the agent side\n\tclient.Delete(\"capture\", capture.ID())\n\tcheckCaptures(0)\n\n\t\/\/ delete an agent\n\tsetupCmds = []helper.Cmd{\n\t\t{fmt.Sprintf(\"%s stop-agent 1\", scale), false},\n\t}\n\thelper.ExecCmds(t, setupCmds...)\n\n\t\/\/ test if we have now 2 hosts\n\tcheckHostNodes(2)\n}\n<commit_msg>test: scale do not check SocketA\/B in live<commit_after>\/\/ +build scale\n\n\/*\n * Copyright (C) 2017 Red Hat, Inc.\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\n *\/\n\npackage tests\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/skydive-project\/skydive\/api\"\n\tgclient \"github.com\/skydive-project\/skydive\/cmd\/client\"\n\t\"github.com\/skydive-project\/skydive\/common\"\n\t\"github.com\/skydive-project\/skydive\/config\"\n\t\"github.com\/skydive-project\/skydive\/flow\"\n\tshttp \"github.com\/skydive-project\/skydive\/http\"\n\t\"github.com\/skydive-project\/skydive\/tests\/helper\"\n\t\"github.com\/skydive-project\/skydive\/topology\/graph\"\n)\n\nfunc TestScaleHA(t *testing.T) {\n\tgopath := os.Getenv(\"GOPATH\")\n\tscale := gopath + \"\/src\/github.com\/skydive-project\/skydive\/scripts\/scale.sh\"\n\n\tsetupCmds := []helper.Cmd{\n\t\t{fmt.Sprintf(\"%s start 2 2 2\", scale), true},\n\t\t{\"sleep 30\", false},\n\t}\n\thelper.ExecCmds(t, setupCmds...)\n\n\ttearDownCmds := []helper.Cmd{\n\t\t{fmt.Sprintf(\"%s stop 2 4 2\", scale), false},\n\t}\n\tdefer helper.ExecCmds(t, tearDownCmds...)\n\n\t\/\/ Load Agent-1 as default config for our client\n\tconfig.InitConfig(\"file\", []string{\"\/tmp\/skydive-scale\/agent-1.yml\"})\n\tauthOptions := &shttp.AuthenticationOpts{}\n\n\tclient, err := api.NewCrudClientFromConfig(authOptions)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create client: %s\", err.Error())\n\t}\n\n\t\/\/ switch to the other analyzer\n\tos.Setenv(\"SKYDIVE_ANALYZERS\", \"localhost:8084\")\n\n\tgh := gclient.NewGremlinQueryHelper(authOptions)\n\n\tvar retry func() error\n\tvar nodes []*graph.Node\n\tvar flows []*flow.Flow\n\n\tcheckHostNodes := func(nodeExpected int) {\n\t\tt.Logf(\"Check for host node: %d\", nodeExpected)\n\t\tretry = func() error {\n\t\t\tif nodes, err = gh.GetNodes(`g.V().Has(\"Type\", \"host\")`); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(nodes) != nodeExpected {\n\t\t\t\treturn fmt.Errorf(\"Should return %d host nodes got : %v\", nodeExpected, nodes)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t\tif err = common.Retry(retry, 10, 5*time.Second); err != nil {\n\t\t\thelper.ExecCmds(t, tearDownCmds...)\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\t}\n\n\tcheckICMPv4Flows := func(flowExpected int) {\n\t\tt.Logf(\"Check for flows: %d\", flowExpected)\n\t\tretry = func() error {\n\t\t\tif flows, err = gh.GetFlows(\"G.Flows().Has('LayersPath', 'Ethernet\/IPv4\/ICMPv4')\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ two capture 2 flows\n\t\t\tif len(flows) != flowExpected {\n\t\t\t\treturn fmt.Errorf(\"Should get %d ICMPv4 flow got %d : %v\", flowExpected, len(flows), flows)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t\tif err = common.Retry(retry, 10, time.Second); err != nil {\n\t\t\thelper.ExecCmds(t, tearDownCmds...)\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\n\t\t\/\/ check in the storage\n\t\tretry = func() error {\n\t\t\tif flows, err = gh.GetFlows(\"G.At('-1s', 300).Flows().Has('LayersPath', 'Ethernet\/IPv4\/ICMPv4')\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(flows) != flowExpected {\n\t\t\t\treturn fmt.Errorf(\"Should get %d ICMPv4 flow from datastore got %d : %v\", flowExpected, len(flows), flows)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t\tif err = common.Retry(retry, 40, time.Second); err != nil {\n\t\t\thelper.ExecCmds(t, tearDownCmds...)\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\t}\n\n\tcheckIPerfFlows := func(flowExpected int) {\n\t\tt.Logf(\"Check for flows: %d\", flowExpected)\n\t\tretry = func() error {\n\t\t\tif flows, err = gh.GetFlows(\"G.Flows().Has('LayersPath', 'Ethernet\/IPv4\/TCP').Has('Transport.B', '5001')\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ two capture 2 flows\n\t\t\tif len(flows) != flowExpected {\n\t\t\t\tvar flowsTCP []*flow.Flow\n\t\t\t\tif flowsTCP, err = gh.GetFlows(\"G.Flows().Has('LayersPath', 'Ethernet\/IPv4\/TCP')\"); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn fmt.Errorf(\"Should get %d iperf(tcp\/5001) flow got %d : %v\", flowExpected, len(flows), flowsTCP)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t\tif err = common.Retry(retry, 10, time.Second); err != nil {\n\t\t\thelper.ExecCmds(t, tearDownCmds...)\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\n\t\t\/\/ check in the storage\n\t\tretry = func() error {\n\t\t\tif flows, err = gh.GetFlows(\"G.At('-1s', 300).Flows().Has('LayersPath', 'Ethernet\/IPv4\/TCP').Has('Transport.B', '5001')\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(flows) != flowExpected {\n\t\t\t\tvar flowsTCP []*flow.Flow\n\t\t\t\tif flowsTCP, err = gh.GetFlows(\"G.At('-1s', 300).Flows().Has('LayersPath', 'Ethernet\/IPv4\/TCP')\"); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn fmt.Errorf(\"Should get %d iperf(tcp\/5001) flow from datastore got %d : %#+v\", flowExpected, len(flows), flowsTCP)\n\t\t\t}\n\n\t\t\tfor _, f := range flows {\n\t\t\t\tif f.SocketA == nil || f.SocketA.Process != \"\/usr\/bin\/iperf\" || f.SocketB == nil || f.SocketB.Process != \"\/usr\/bin\/iperf\" {\n\t\t\t\t\treturn fmt.Errorf(\"Should get iperf exe as socket info %v\", f)\n\t\t\t\t}\n\t\t\t\tif f.SocketA.Name != \"iperf\" || f.SocketB.Name != \"iperf\" {\n\t\t\t\t\treturn fmt.Errorf(\"Should get iperf thread name %v\", f)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tif err = common.Retry(retry, 40, time.Second); err != nil {\n\t\t\thelper.ExecCmds(t, tearDownCmds...)\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\t}\n\n\t\/\/ test if we have our 2 hosts\n\tcheckHostNodes(2)\n\n\t\/\/ before creating the capture check that ovs if ready and the connectivity is ok\n\tcommon.Retry(func() error {\n\t\tpings := []helper.Cmd{\n\t\t\t{fmt.Sprintf(\"%s ping agent-1-vm1 agent-2-vm1 -c 1\", scale), false},\n\t\t\t{fmt.Sprintf(\"%s ping agent-3-vm1 agent-1-vm1 -c 1\", scale), false},\n\t\t}\n\t\treturn helper.ExecCmds(t, pings...)\n\t}, 30, time.Second)\n\n\t\/\/ start a capture\n\tcapture := api.NewCapture(\"g.V().Has('Type', 'netns', 'Name', 'vm1').Out().Has('Name', 'eth0')\", \"\")\n\tcapture.SocketInfo = true\n\tcapture.Type = \"pcap\"\n\tif err = client.Create(\"capture\", capture); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcheckCaptures := func(captureExpected int) {\n\t\tt.Logf(\"Check for captures: %d\", captureExpected)\n\t\tretry = func() error {\n\t\t\tif nodes, err = gh.GetNodes(`g.V().HasKey(\"Capture.ID\")`); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(nodes) != captureExpected {\n\t\t\t\treturn fmt.Errorf(\"Should return %d capture got : %v\", captureExpected, nodes)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\n\t\tif err = common.Retry(retry, 10, time.Second); err != nil {\n\t\t\thelper.ExecCmds(t, tearDownCmds...)\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\t}\n\n\t\/\/ check that we have 2 captures, one per vm1\n\tcheckCaptures(2)\n\n\t\/\/ generate some packet and wait for seeing them, to be sure that the capture is started\n\twaitForFirstFlows := func() error {\n\t\tt.Logf(\"Wait for first flows...\")\n\n\t\tretry = func() error {\n\t\t\tif flows, err = gh.GetFlows(\"G.Flows().Has('LayersPath', 'Ethernet\/IPv4\/ICMPv4')\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(flows) != 2 {\n\t\t\t\treturn errors.New(\"Should get at least one flow\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tif err = common.Retry(retry, 10, time.Second); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tfirstFlowsFnc := func() error {\n\t\tsetupCmds = []helper.Cmd{\n\t\t\t{fmt.Sprintf(\"%s ping agent-1-vm1 agent-2-vm1 -c 1\", scale), false},\n\t\t}\n\t\thelper.ExecCmds(t, setupCmds...)\n\n\t\treturn waitForFirstFlows()\n\t}\n\tif err = common.Retry(firstFlowsFnc, 10, time.Second); err != nil {\n\t\thelper.ExecCmds(t, tearDownCmds...)\n\t\tt.Fatalf(err.Error())\n\t}\n\n\t\/\/ generate some packet, do not check because connectivity is not ensured\n\tfor i := 0; i != 30; i++ {\n\t\tsetupCmds = []helper.Cmd{\n\t\t\t{fmt.Sprintf(\"%s ping agent-1-vm1 agent-2-vm1 -c 1\", scale), false},\n\t\t}\n\t\thelper.ExecCmds(t, setupCmds...)\n\t}\n\n\t\/\/ 60 flows expected as we have two captures\n\tcheckICMPv4Flows(60 + 2)\n\n\t\/\/ increase the agent number\n\tsetupCmds = []helper.Cmd{\n\t\t{fmt.Sprintf(\"%s start 2 4 2\", scale), false},\n\t}\n\thelper.ExecCmds(t, setupCmds...)\n\n\t\/\/ test if we have now 4 hosts\n\tcheckHostNodes(4)\n\n\t\/\/ check that we have 4 captures, one per vm1\n\tcheckCaptures(4)\n\n\t\/\/ kill the last agent\n\tsetupCmds = []helper.Cmd{\n\t\t{fmt.Sprintf(\"%s stop-agent 4\", scale), false},\n\t}\n\thelper.ExecCmds(t, setupCmds...)\n\n\t\/\/ test if we have now 3 hosts\n\tcheckHostNodes(3)\n\n\t\/\/ switch back to the first analyzer\n\tos.Setenv(\"SKYDIVE_ANALYZERS\", \"localhost:8082\")\n\n\t\/\/ test if we have now 3 hosts\n\tcheckHostNodes(3)\n\n\t\/\/ destroy the second analyzer\n\tsetupCmds = []helper.Cmd{\n\t\t{fmt.Sprintf(\"%s stop-analyzer 2\", scale), false},\n\t\t{\"sleep 5\", false},\n\t}\n\thelper.ExecCmds(t, setupCmds...)\n\n\t\/\/ test if the remaining analyzer have a correct graph\n\tcheckHostNodes(3)\n\n\t\/\/ generate more icmp traffic\n\tfor i := 0; i != 30; i++ {\n\t\tsetupCmds = []helper.Cmd{\n\t\t\t{fmt.Sprintf(\"%s ping agent-3-vm1 agent-1-vm1 -c 1\", scale), false},\n\t\t}\n\t\thelper.ExecCmds(t, setupCmds...)\n\t}\n\n\t\/\/ 4*30 expected because the gremlin expression matches all the eth0\n\tcheckICMPv4Flows(120 + 2)\n\n\t\/\/ iperf test 10 sec, 1Mbits\/s\n\tsetupCmds = []helper.Cmd{\n\t\t{fmt.Sprintf(\"%s iperf agent-3-vm1 agent-1-vm1\", scale), false},\n\t}\n\thelper.ExecCmds(t, setupCmds...)\n\tcheckIPerfFlows(2)\n\n\t\/\/ delete the capture to check that all captures will be delete at the agent side\n\tclient.Delete(\"capture\", capture.ID())\n\tcheckCaptures(0)\n\n\t\/\/ delete an agent\n\tsetupCmds = []helper.Cmd{\n\t\t{fmt.Sprintf(\"%s stop-agent 1\", scale), false},\n\t}\n\thelper.ExecCmds(t, setupCmds...)\n\n\t\/\/ test if we have now 2 hosts\n\tcheckHostNodes(2)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage common\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\n\tv1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/uuid\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\timageutils \"k8s.io\/kubernetes\/test\/utils\/image\"\n\n\t\"github.com\/onsi\/ginkgo\"\n)\n\nvar _ = ginkgo.Describe(\"[sig-node] PodTemplates\", func() {\n\tf := framework.NewDefaultFramework(\"podtemplate\")\n\t\/*\n\t Release : v1.19\n\t Testname: PodTemplate lifecycle\n\t Description: Attempt to create a PodTemplate. Patch the created PodTemplate. Fetching the PodTemplate MUST reflect changes.\n\t By fetching all the PodTemplates via a Label selector it MUST find the PodTemplate by it's static label and updated value. The PodTemplate must be deleted.\n\t*\/\n\tframework.ConformanceIt(\"should run the lifecycle of PodTemplates\", func() {\n\t\ttestNamespaceName := f.Namespace.Name\n\t\tpodTemplateName := \"nginx-pod-template-\" + string(uuid.NewUUID())\n\n\t\t\/\/ get a list of PodTemplates (in all namespaces to hit endpoint)\n\t\tpodTemplateList, err := f.ClientSet.CoreV1().PodTemplates(\"\").List(context.TODO(), metav1.ListOptions{\n\t\t\tLabelSelector: \"podtemplate-static=true\",\n\t\t})\n\t\tframework.ExpectNoError(err, \"failed to list all PodTemplates\")\n\t\tframework.ExpectEqual(len(podTemplateList.Items), 0, \"unable to find templates\")\n\n\t\t\/\/ create a PodTemplate\n\t\t_, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Create(context.TODO(), &v1.PodTemplate{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: podTemplateName,\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\"podtemplate-static\": \"true\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t{Name: \"nginx\", Image: \"nginx\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, metav1.CreateOptions{})\n\t\tframework.ExpectNoError(err, \"failed to create PodTemplate\")\n\n\t\t\/\/ get template\n\t\tpodTemplateRead, err := f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(context.TODO(), podTemplateName, metav1.GetOptions{})\n\t\tframework.ExpectNoError(err, \"failed to get created PodTemplate\")\n\t\tframework.ExpectEqual(podTemplateRead.ObjectMeta.Name, podTemplateName)\n\n\t\t\/\/ patch template\n\t\tpodTemplatePatch, err := json.Marshal(map[string]interface{}{\n\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\"labels\": map[string]string{\n\t\t\t\t\t\"podtemplate\": \"patched\",\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tframework.ExpectNoError(err, \"failed to marshal patch data\")\n\t\t_, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Patch(context.TODO(), podTemplateName, types.StrategicMergePatchType, []byte(podTemplatePatch), metav1.PatchOptions{})\n\t\tframework.ExpectNoError(err, \"failed to patch PodTemplate\")\n\n\t\t\/\/ get template (ensure label is there)\n\t\tpodTemplateRead, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(context.TODO(), podTemplateName, metav1.GetOptions{})\n\t\tframework.ExpectNoError(err, \"failed to get PodTemplate\")\n\t\tframework.ExpectEqual(podTemplateRead.ObjectMeta.Labels[\"podtemplate\"], \"patched\", \"failed to patch template, new label not found\")\n\n\t\t\/\/ delete the PodTemplate\n\t\terr = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Delete(context.TODO(), podTemplateName, metav1.DeleteOptions{})\n\t\tframework.ExpectNoError(err, \"failed to delete PodTemplate\")\n\n\t\t\/\/ list the PodTemplates\n\t\tpodTemplateList, err = f.ClientSet.CoreV1().PodTemplates(\"\").List(context.TODO(), metav1.ListOptions{\n\t\t\tLabelSelector: \"podtemplate-static=true\",\n\t\t})\n\t\tframework.ExpectNoError(err, \"failed to list PodTemplate\")\n\t\tframework.ExpectEqual(len(podTemplateList.Items), 0, \"PodTemplate list returned items, failed to delete PodTemplate\")\n\t})\n\n\tginkgo.It(\"should delete a collection of pod templates\", func() {\n\t\tpodTemplateNames := []string{\"test-podtemplate-1\", \"test-podtemplate-2\", \"test-podtemplate-3\"}\n\n\t\tginkgo.By(\"Create set of pod templates\")\n\t\t\/\/ create a set of pod templates in test namespace\n\t\tfor _, podTemplateName := range podTemplateNames {\n\t\t\t_, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).Create(context.TODO(), &v1.PodTemplate{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: podTemplateName,\n\t\t\t\t\tLabels: map[string]string{\"podtemplate-set\": \"true\"},\n\t\t\t\t},\n\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t{Name: \"token-test\", Image: imageutils.GetE2EImage(imageutils.Agnhost)},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, metav1.CreateOptions{})\n\t\t\tframework.ExpectNoError(err, \"failed to create pod template\")\n\t\t\tframework.Logf(\"created %v\", podTemplateName)\n\t\t}\n\n\t\tginkgo.By(\"get a list of pod templates with a label in the current namespace\")\n\t\t\/\/ get a list of pod templates\n\t\tpodTemplateList, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{\n\t\t\tLabelSelector: \"podtemplate-set=true\",\n\t\t})\n\t\tframework.ExpectNoError(err, \"failed to get a list of pod templates\")\n\n\t\tframework.ExpectEqual(len(podTemplateList.Items), len(podTemplateNames), \"looking for expected number of pod templates\")\n\n\t\tginkgo.By(\"delete collection of pod templates\")\n\t\t\/\/ delete collection\n\n\t\tframework.Logf(\"requesting DeleteCollection of pod templates\")\n\t\terr = f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{\n\t\t\tLabelSelector: \"podtemplate-set=true\"})\n\t\tframework.ExpectNoError(err, \"failed to delete all pod templates\")\n\n\t\tginkgo.By(\"get a list of pod templates with a label in the current namespace\")\n\t\t\/\/ get list of pod templates\n\t\tpodTemplateList, err = f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{\n\t\t\tLabelSelector: \"podtemplate-set=true\",\n\t\t})\n\t\tframework.ExpectNoError(err, \"failed to get a list of pod templates\")\n\n\t\tframework.ExpectEqual(len(podTemplateList.Items), 0, \"pod templates should all be deleted\")\n\n\t})\n\n})\n<commit_msg>Use polling while deleting the collection of pod templates<commit_after>\/*\nCopyright 2020 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage common\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\n\tv1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/uuid\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\timageutils \"k8s.io\/kubernetes\/test\/utils\/image\"\n\t\"time\"\n\n\t\"github.com\/onsi\/ginkgo\"\n)\n\nconst (\n\tpodTemplateRetryPeriod = 1 * time.Second\n\tpodTemplateRetryTimeout = 1 * time.Minute\n)\n\nvar _ = ginkgo.Describe(\"[sig-node] PodTemplates\", func() {\n\tf := framework.NewDefaultFramework(\"podtemplate\")\n\t\/*\n\t Release : v1.19\n\t Testname: PodTemplate lifecycle\n\t Description: Attempt to create a PodTemplate. Patch the created PodTemplate. Fetching the PodTemplate MUST reflect changes.\n\t By fetching all the PodTemplates via a Label selector it MUST find the PodTemplate by it's static label and updated value. The PodTemplate must be deleted.\n\t*\/\n\tframework.ConformanceIt(\"should run the lifecycle of PodTemplates\", func() {\n\t\ttestNamespaceName := f.Namespace.Name\n\t\tpodTemplateName := \"nginx-pod-template-\" + string(uuid.NewUUID())\n\n\t\t\/\/ get a list of PodTemplates (in all namespaces to hit endpoint)\n\t\tpodTemplateList, err := f.ClientSet.CoreV1().PodTemplates(\"\").List(context.TODO(), metav1.ListOptions{\n\t\t\tLabelSelector: \"podtemplate-static=true\",\n\t\t})\n\t\tframework.ExpectNoError(err, \"failed to list all PodTemplates\")\n\t\tframework.ExpectEqual(len(podTemplateList.Items), 0, \"unable to find templates\")\n\n\t\t\/\/ create a PodTemplate\n\t\t_, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Create(context.TODO(), &v1.PodTemplate{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: podTemplateName,\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\"podtemplate-static\": \"true\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t{Name: \"nginx\", Image: \"nginx\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, metav1.CreateOptions{})\n\t\tframework.ExpectNoError(err, \"failed to create PodTemplate\")\n\n\t\t\/\/ get template\n\t\tpodTemplateRead, err := f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(context.TODO(), podTemplateName, metav1.GetOptions{})\n\t\tframework.ExpectNoError(err, \"failed to get created PodTemplate\")\n\t\tframework.ExpectEqual(podTemplateRead.ObjectMeta.Name, podTemplateName)\n\n\t\t\/\/ patch template\n\t\tpodTemplatePatch, err := json.Marshal(map[string]interface{}{\n\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\"labels\": map[string]string{\n\t\t\t\t\t\"podtemplate\": \"patched\",\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tframework.ExpectNoError(err, \"failed to marshal patch data\")\n\t\t_, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Patch(context.TODO(), podTemplateName, types.StrategicMergePatchType, []byte(podTemplatePatch), metav1.PatchOptions{})\n\t\tframework.ExpectNoError(err, \"failed to patch PodTemplate\")\n\n\t\t\/\/ get template (ensure label is there)\n\t\tpodTemplateRead, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(context.TODO(), podTemplateName, metav1.GetOptions{})\n\t\tframework.ExpectNoError(err, \"failed to get PodTemplate\")\n\t\tframework.ExpectEqual(podTemplateRead.ObjectMeta.Labels[\"podtemplate\"], \"patched\", \"failed to patch template, new label not found\")\n\n\t\t\/\/ delete the PodTemplate\n\t\terr = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Delete(context.TODO(), podTemplateName, metav1.DeleteOptions{})\n\t\tframework.ExpectNoError(err, \"failed to delete PodTemplate\")\n\n\t\t\/\/ list the PodTemplates\n\t\tpodTemplateList, err = f.ClientSet.CoreV1().PodTemplates(\"\").List(context.TODO(), metav1.ListOptions{\n\t\t\tLabelSelector: \"podtemplate-static=true\",\n\t\t})\n\t\tframework.ExpectNoError(err, \"failed to list PodTemplate\")\n\t\tframework.ExpectEqual(len(podTemplateList.Items), 0, \"PodTemplate list returned items, failed to delete PodTemplate\")\n\t})\n\n\tginkgo.It(\"should delete a collection of pod templates\", func() {\n\t\tpodTemplateNames := []string{\"test-podtemplate-1\", \"test-podtemplate-2\", \"test-podtemplate-3\"}\n\n\t\tginkgo.By(\"Create set of pod templates\")\n\t\t\/\/ create a set of pod templates in test namespace\n\t\tfor _, podTemplateName := range podTemplateNames {\n\t\t\t_, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).Create(context.TODO(), &v1.PodTemplate{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: podTemplateName,\n\t\t\t\t\tLabels: map[string]string{\"podtemplate-set\": \"true\"},\n\t\t\t\t},\n\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t{Name: \"token-test\", Image: imageutils.GetE2EImage(imageutils.Agnhost)},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, metav1.CreateOptions{})\n\t\t\tframework.ExpectNoError(err, \"failed to create pod template\")\n\t\t\tframework.Logf(\"created %v\", podTemplateName)\n\t\t}\n\n\t\tginkgo.By(\"get a list of pod templates with a label in the current namespace\")\n\t\t\/\/ get a list of pod templates\n\t\tpodTemplateList, err := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{\n\t\t\tLabelSelector: \"podtemplate-set=true\",\n\t\t})\n\t\tframework.ExpectNoError(err, \"failed to get a list of pod templates\")\n\n\t\tframework.ExpectEqual(len(podTemplateList.Items), len(podTemplateNames), \"looking for expected number of pod templates\")\n\n\t\tginkgo.By(\"delete collection of pod templates\")\n\t\t\/\/ confirm that delete collection does remove all pod templates\n\n\t\terr = wait.PollImmediate(podTemplateRetryPeriod, podTemplateRetryTimeout, deletePodTemplateCollection(f, \"podtemplate-set=true\"))\n\t\tframework.ExpectNoError(err, \"failed to delete collection\")\n\n\t\tginkgo.By(\"get a list of pod templates with a label in the current namespace\")\n\t\t\/\/ get list of pod templates\n\t\tpodTemplateList, err = f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{\n\t\t\tLabelSelector: \"podtemplate-set=true\",\n\t\t})\n\t\tframework.ExpectNoError(err, \"failed to get a list of pod templates\")\n\n\t\tframework.ExpectEqual(len(podTemplateList.Items), 0, \"pod templates should all be deleted\")\n\n\t})\n\n})\n\nfunc deletePodTemplateCollection(f *framework.Framework, label string) func() (bool, error) {\n\treturn func() (bool, error) {\n\t\tvar err error\n\n\t\tframework.Logf(\"requesting DeleteCollection of pod templates\")\n\n\t\terr = f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{\n\t\t\tLabelSelector: label})\n\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t} else {\n\t\t\treturn true, nil\n\t\t}\n\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package app\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\tv1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\t\"github.com\/argoproj\/argo-cd\/errors\"\n\t. \"github.com\/argoproj\/argo-cd\/pkg\/apis\/application\/v1alpha1\"\n\t\"github.com\/argoproj\/argo-cd\/test\/e2e\/fixture\"\n\t\"github.com\/argoproj\/argo-cd\/util\/json\"\n)\n\n\/\/ this implements the \"when\" part of given\/when\/then\n\/\/\n\/\/ none of the func implement error checks, and that is complete intended, you should check for errors\n\/\/ using the Then()\ntype Actions struct {\n\tcontext *Context\n\tlastOutput string\n\tlastError error\n\tignoreErrors bool\n}\n\nfunc (a *Actions) IgnoreErrors() *Actions {\n\ta.ignoreErrors = true\n\treturn a\n}\nfunc (a *Actions) DoNotIgnoreErrors() *Actions {\n\ta.ignoreErrors = false\n\treturn a\n}\n\nfunc (a *Actions) DoNotIgnoreErrors() *Actions {\n\ta.ignoreErrors = false\n\treturn a\n}\n\nfunc (a *Actions) PatchFile(file string, jsonPath string) *Actions {\n\ta.context.t.Helper()\n\tfixture.Patch(a.context.path+\"\/\"+file, jsonPath)\n\treturn a\n}\n\nfunc (a *Actions) DeleteFile(file string) *Actions {\n\ta.context.t.Helper()\n\tfixture.Delete(a.context.path + \"\/\" + file)\n\treturn a\n}\n\nfunc (a *Actions) AddFile(fileName, fileContents string) *Actions {\n\ta.context.t.Helper()\n\tfixture.AddFile(a.context.path+\"\/\"+fileName, fileContents)\n\treturn a\n}\n\nfunc (a *Actions) CreateFromFile(handler func(app *Application)) *Actions {\n\ta.context.t.Helper()\n\tapp := &Application{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: a.context.name,\n\t\t},\n\t\tSpec: ApplicationSpec{\n\t\t\tProject: a.context.project,\n\t\t\tSource: ApplicationSource{\n\t\t\t\tRepoURL: fixture.RepoURL(a.context.repoURLType),\n\t\t\t\tPath: a.context.path,\n\t\t\t},\n\t\t\tDestination: ApplicationDestination{\n\t\t\t\tServer: a.context.destServer,\n\t\t\t\tNamespace: fixture.DeploymentNamespace(),\n\t\t\t},\n\t\t},\n\t}\n\tif a.context.env != \"\" {\n\t\tapp.Spec.Source.Ksonnet = &ApplicationSourceKsonnet{\n\t\t\tEnvironment: a.context.env,\n\t\t}\n\t}\n\tif a.context.namePrefix != \"\" {\n\t\tapp.Spec.Source.Kustomize = &ApplicationSourceKustomize{\n\t\t\tNamePrefix: a.context.namePrefix,\n\t\t}\n\t}\n\tif a.context.configManagementPlugin != \"\" {\n\t\tapp.Spec.Source.Plugin = &ApplicationSourcePlugin{\n\t\t\tName: a.context.configManagementPlugin,\n\t\t}\n\t}\n\n\tif len(a.context.jsonnetTLAStr) > 0 || len(a.context.parameters) > 0 {\n\t\tlogrus.Fatal(\"Application parameters or json tlas are not supported\")\n\t}\n\n\thandler(app)\n\tdata := json.MustMarshal(app)\n\ttmpFile, err := ioutil.TempFile(\"\", \"\")\n\terrors.CheckError(err)\n\t_, err = tmpFile.Write(data)\n\terrors.CheckError(err)\n\n\ta.runCli(\"app\", \"create\", \"-f\", tmpFile.Name())\n\treturn a\n}\n\nfunc (a *Actions) Create() *Actions {\n\ta.context.t.Helper()\n\targs := []string{\n\t\t\"app\", \"create\", a.context.name,\n\t\t\"--repo\", fixture.RepoURL(a.context.repoURLType),\n\t\t\"--path\", a.context.path,\n\t\t\"--dest-server\", a.context.destServer,\n\t\t\"--dest-namespace\", fixture.DeploymentNamespace(),\n\t}\n\n\tif a.context.env != \"\" {\n\t\targs = append(args, \"--env\", a.context.env)\n\t}\n\n\tfor _, parameter := range a.context.parameters {\n\t\targs = append(args, \"--parameter\", parameter)\n\t}\n\n\targs = append(args, \"--project\", a.context.project)\n\n\tfor _, jsonnetTLAParameter := range a.context.jsonnetTLAStr {\n\t\targs = append(args, \"--jsonnet-tla-str\", jsonnetTLAParameter)\n\t}\n\n\tfor _, jsonnetTLAParameter := range a.context.jsonnetTLACode {\n\t\targs = append(args, \"--jsonnet-tla-code\", jsonnetTLAParameter)\n\t}\n\n\tif a.context.namePrefix != \"\" {\n\t\targs = append(args, \"--nameprefix\", a.context.namePrefix)\n\t}\n\n\tif a.context.configManagementPlugin != \"\" {\n\t\targs = append(args, \"--config-management-plugin\", a.context.configManagementPlugin)\n\t}\n\n\ta.runCli(args...)\n\n\treturn a\n}\n\nfunc (a *Actions) Declarative(filename string) *Actions {\n\ta.context.t.Helper()\n\treturn a.DeclarativeWithCustomRepo(filename, fixture.RepoURL(a.context.repoURLType))\n}\n\nfunc (a *Actions) DeclarativeWithCustomRepo(filename string, repoURL string) *Actions {\n\ta.context.t.Helper()\n\tvalues := map[string]interface{}{\n\t\t\"ArgoCDNamespace\": fixture.ArgoCDNamespace,\n\t\t\"DeploymentNamespace\": fixture.DeploymentNamespace(),\n\t\t\"Name\": a.context.name,\n\t\t\"Path\": a.context.path,\n\t\t\"Project\": a.context.project,\n\t\t\"RepoURL\": repoURL,\n\t}\n\ta.lastOutput, a.lastError = fixture.Declarative(filename, values)\n\ta.verifyAction()\n\treturn a\n}\n\nfunc (a *Actions) PatchApp(patch string) *Actions {\n\ta.context.t.Helper()\n\ta.runCli(\"app\", \"patch\", a.context.name, \"--patch\", patch)\n\treturn a\n}\n\nfunc (a *Actions) AppSet(flags ...string) *Actions {\n\ta.context.t.Helper()\n\targs := []string{\"app\", \"set\", a.context.name}\n\targs = append(args, flags...)\n\ta.runCli(args...)\n\treturn a\n}\n\nfunc (a *Actions) Sync() *Actions {\n\ta.context.t.Helper()\n\targs := []string{\"app\", \"sync\", a.context.name, \"--timeout\", fmt.Sprintf(\"%v\", a.context.timeout)}\n\n\tif a.context.async {\n\t\targs = append(args, \"--async\")\n\t}\n\n\tif a.context.prune {\n\t\targs = append(args, \"--prune\")\n\t}\n\n\tif a.context.resource != \"\" {\n\t\targs = append(args, \"--resource\", a.context.resource)\n\t}\n\n\tif a.context.localPath != \"\" {\n\t\targs = append(args, \"--local\", a.context.localPath)\n\t}\n\n\tif a.context.force {\n\t\targs = append(args, \"--force\")\n\t}\n\n\ta.runCli(args...)\n\n\treturn a\n}\n\nfunc (a *Actions) TerminateOp() *Actions {\n\ta.context.t.Helper()\n\ta.runCli(\"app\", \"terminate-op\", a.context.name)\n\treturn a\n}\n\nfunc (a *Actions) Refresh(refreshType RefreshType) *Actions {\n\ta.context.t.Helper()\n\tflag := map[RefreshType]string{\n\t\tRefreshTypeNormal: \"--refresh\",\n\t\tRefreshTypeHard: \"--hard-refresh\",\n\t}[refreshType]\n\n\ta.runCli(\"app\", \"get\", a.context.name, flag)\n\n\treturn a\n}\n\nfunc (a *Actions) Delete(cascade bool) *Actions {\n\ta.context.t.Helper()\n\ta.runCli(\"app\", \"delete\", a.context.name, fmt.Sprintf(\"--cascade=%v\", cascade))\n\treturn a\n}\n\nfunc (a *Actions) And(block func()) *Actions {\n\ta.context.t.Helper()\n\tblock()\n\treturn a\n}\n\nfunc (a *Actions) Then() *Consequences {\n\ta.context.t.Helper()\n\treturn &Consequences{a.context, a}\n}\n\nfunc (a *Actions) runCli(args ...string) {\n\ta.context.t.Helper()\n\ta.lastOutput, a.lastError = fixture.RunCli(args...)\n\ta.verifyAction()\n}\n\nfunc (a *Actions) verifyAction() {\n\ta.context.t.Helper()\n\tif !a.ignoreErrors {\n\t\ta.Then().Expect(Success(\"\"))\n\t}\n}\n<commit_msg>Remove duplicated DoNotIgnoreErrors method (#2196)<commit_after>package app\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\tv1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\t\"github.com\/argoproj\/argo-cd\/errors\"\n\t. \"github.com\/argoproj\/argo-cd\/pkg\/apis\/application\/v1alpha1\"\n\t\"github.com\/argoproj\/argo-cd\/test\/e2e\/fixture\"\n\t\"github.com\/argoproj\/argo-cd\/util\/json\"\n)\n\n\/\/ this implements the \"when\" part of given\/when\/then\n\/\/\n\/\/ none of the func implement error checks, and that is complete intended, you should check for errors\n\/\/ using the Then()\ntype Actions struct {\n\tcontext *Context\n\tlastOutput string\n\tlastError error\n\tignoreErrors bool\n}\n\nfunc (a *Actions) IgnoreErrors() *Actions {\n\ta.ignoreErrors = true\n\treturn a\n}\n\nfunc (a *Actions) DoNotIgnoreErrors() *Actions {\n\ta.ignoreErrors = false\n\treturn a\n}\n\nfunc (a *Actions) PatchFile(file string, jsonPath string) *Actions {\n\ta.context.t.Helper()\n\tfixture.Patch(a.context.path+\"\/\"+file, jsonPath)\n\treturn a\n}\n\nfunc (a *Actions) DeleteFile(file string) *Actions {\n\ta.context.t.Helper()\n\tfixture.Delete(a.context.path + \"\/\" + file)\n\treturn a\n}\n\nfunc (a *Actions) AddFile(fileName, fileContents string) *Actions {\n\ta.context.t.Helper()\n\tfixture.AddFile(a.context.path+\"\/\"+fileName, fileContents)\n\treturn a\n}\n\nfunc (a *Actions) CreateFromFile(handler func(app *Application)) *Actions {\n\ta.context.t.Helper()\n\tapp := &Application{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: a.context.name,\n\t\t},\n\t\tSpec: ApplicationSpec{\n\t\t\tProject: a.context.project,\n\t\t\tSource: ApplicationSource{\n\t\t\t\tRepoURL: fixture.RepoURL(a.context.repoURLType),\n\t\t\t\tPath: a.context.path,\n\t\t\t},\n\t\t\tDestination: ApplicationDestination{\n\t\t\t\tServer: a.context.destServer,\n\t\t\t\tNamespace: fixture.DeploymentNamespace(),\n\t\t\t},\n\t\t},\n\t}\n\tif a.context.env != \"\" {\n\t\tapp.Spec.Source.Ksonnet = &ApplicationSourceKsonnet{\n\t\t\tEnvironment: a.context.env,\n\t\t}\n\t}\n\tif a.context.namePrefix != \"\" {\n\t\tapp.Spec.Source.Kustomize = &ApplicationSourceKustomize{\n\t\t\tNamePrefix: a.context.namePrefix,\n\t\t}\n\t}\n\tif a.context.configManagementPlugin != \"\" {\n\t\tapp.Spec.Source.Plugin = &ApplicationSourcePlugin{\n\t\t\tName: a.context.configManagementPlugin,\n\t\t}\n\t}\n\n\tif len(a.context.jsonnetTLAStr) > 0 || len(a.context.parameters) > 0 {\n\t\tlogrus.Fatal(\"Application parameters or json tlas are not supported\")\n\t}\n\n\thandler(app)\n\tdata := json.MustMarshal(app)\n\ttmpFile, err := ioutil.TempFile(\"\", \"\")\n\terrors.CheckError(err)\n\t_, err = tmpFile.Write(data)\n\terrors.CheckError(err)\n\n\ta.runCli(\"app\", \"create\", \"-f\", tmpFile.Name())\n\treturn a\n}\n\nfunc (a *Actions) Create() *Actions {\n\ta.context.t.Helper()\n\targs := []string{\n\t\t\"app\", \"create\", a.context.name,\n\t\t\"--repo\", fixture.RepoURL(a.context.repoURLType),\n\t\t\"--path\", a.context.path,\n\t\t\"--dest-server\", a.context.destServer,\n\t\t\"--dest-namespace\", fixture.DeploymentNamespace(),\n\t}\n\n\tif a.context.env != \"\" {\n\t\targs = append(args, \"--env\", a.context.env)\n\t}\n\n\tfor _, parameter := range a.context.parameters {\n\t\targs = append(args, \"--parameter\", parameter)\n\t}\n\n\targs = append(args, \"--project\", a.context.project)\n\n\tfor _, jsonnetTLAParameter := range a.context.jsonnetTLAStr {\n\t\targs = append(args, \"--jsonnet-tla-str\", jsonnetTLAParameter)\n\t}\n\n\tfor _, jsonnetTLAParameter := range a.context.jsonnetTLACode {\n\t\targs = append(args, \"--jsonnet-tla-code\", jsonnetTLAParameter)\n\t}\n\n\tif a.context.namePrefix != \"\" {\n\t\targs = append(args, \"--nameprefix\", a.context.namePrefix)\n\t}\n\n\tif a.context.configManagementPlugin != \"\" {\n\t\targs = append(args, \"--config-management-plugin\", a.context.configManagementPlugin)\n\t}\n\n\ta.runCli(args...)\n\n\treturn a\n}\n\nfunc (a *Actions) Declarative(filename string) *Actions {\n\ta.context.t.Helper()\n\treturn a.DeclarativeWithCustomRepo(filename, fixture.RepoURL(a.context.repoURLType))\n}\n\nfunc (a *Actions) DeclarativeWithCustomRepo(filename string, repoURL string) *Actions {\n\ta.context.t.Helper()\n\tvalues := map[string]interface{}{\n\t\t\"ArgoCDNamespace\": fixture.ArgoCDNamespace,\n\t\t\"DeploymentNamespace\": fixture.DeploymentNamespace(),\n\t\t\"Name\": a.context.name,\n\t\t\"Path\": a.context.path,\n\t\t\"Project\": a.context.project,\n\t\t\"RepoURL\": repoURL,\n\t}\n\ta.lastOutput, a.lastError = fixture.Declarative(filename, values)\n\ta.verifyAction()\n\treturn a\n}\n\nfunc (a *Actions) PatchApp(patch string) *Actions {\n\ta.context.t.Helper()\n\ta.runCli(\"app\", \"patch\", a.context.name, \"--patch\", patch)\n\treturn a\n}\n\nfunc (a *Actions) AppSet(flags ...string) *Actions {\n\ta.context.t.Helper()\n\targs := []string{\"app\", \"set\", a.context.name}\n\targs = append(args, flags...)\n\ta.runCli(args...)\n\treturn a\n}\n\nfunc (a *Actions) Sync() *Actions {\n\ta.context.t.Helper()\n\targs := []string{\"app\", \"sync\", a.context.name, \"--timeout\", fmt.Sprintf(\"%v\", a.context.timeout)}\n\n\tif a.context.async {\n\t\targs = append(args, \"--async\")\n\t}\n\n\tif a.context.prune {\n\t\targs = append(args, \"--prune\")\n\t}\n\n\tif a.context.resource != \"\" {\n\t\targs = append(args, \"--resource\", a.context.resource)\n\t}\n\n\tif a.context.localPath != \"\" {\n\t\targs = append(args, \"--local\", a.context.localPath)\n\t}\n\n\tif a.context.force {\n\t\targs = append(args, \"--force\")\n\t}\n\n\ta.runCli(args...)\n\n\treturn a\n}\n\nfunc (a *Actions) TerminateOp() *Actions {\n\ta.context.t.Helper()\n\ta.runCli(\"app\", \"terminate-op\", a.context.name)\n\treturn a\n}\n\nfunc (a *Actions) Refresh(refreshType RefreshType) *Actions {\n\ta.context.t.Helper()\n\tflag := map[RefreshType]string{\n\t\tRefreshTypeNormal: \"--refresh\",\n\t\tRefreshTypeHard: \"--hard-refresh\",\n\t}[refreshType]\n\n\ta.runCli(\"app\", \"get\", a.context.name, flag)\n\n\treturn a\n}\n\nfunc (a *Actions) Delete(cascade bool) *Actions {\n\ta.context.t.Helper()\n\ta.runCli(\"app\", \"delete\", a.context.name, fmt.Sprintf(\"--cascade=%v\", cascade))\n\treturn a\n}\n\nfunc (a *Actions) And(block func()) *Actions {\n\ta.context.t.Helper()\n\tblock()\n\treturn a\n}\n\nfunc (a *Actions) Then() *Consequences {\n\ta.context.t.Helper()\n\treturn &Consequences{a.context, a}\n}\n\nfunc (a *Actions) runCli(args ...string) {\n\ta.context.t.Helper()\n\ta.lastOutput, a.lastError = fixture.RunCli(args...)\n\ta.verifyAction()\n}\n\nfunc (a *Actions) verifyAction() {\n\ta.context.t.Helper()\n\tif !a.ignoreErrors {\n\t\ta.Then().Expect(Success(\"\"))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gorest\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tContentType = \"Content-Type\"\n\tJsonContentType = \"application\/json\"\n\tTextContentType = \"application\/text\"\n\tXmlContentType = \"application\/xml\"\n\ttimeOutSeconds = 10\n)\n\ntype Rest struct {\n\thttpClient http.Client\n\tbaseurl string\n\tverb string\n\theaders http.Header\n\turiparams []string\n\turl string\n\tqueryvalues url.Values\n\tpayload io.Reader\n}\n\nfunc New() *Rest {\n\treturn &Rest{\n\t\thttpClient: http.Client{Timeout: time.Second * timeOutSeconds},\n\t\theaders: make(map[string][]string),\n\t\tqueryvalues: make(url.Values),\n\t}\n}\n\nfunc (client *Rest) Get() *Rest {\n\tclient.verb = \"GET\"\n\treturn client\n}\n\nfunc (client *Rest) Post(payload interface{}) *Rest {\n\tclient.verb = \"POST\"\n\tif payload != nil {\n\t\tclient.addPayload(payload)\n\t}\n\treturn client\n}\n\nfunc (client *Rest) Put(payload interface{}) *Rest {\n\tclient.verb = \"PUT\"\n\tif payload != nil {\n\t\tclient.addPayload(payload)\n\t}\n\treturn client\n}\n\nfunc (client *Rest) Delete() *Rest {\n\tclient.verb = \"DELETE\"\n\treturn client\n}\n\nfunc (client *Rest) Patch(payload interface{}) *Rest {\n\tclient.verb = \"PATCH\"\n\tif payload != nil {\n\t\tclient.addPayload(payload)\n\t}\n\treturn client\n}\n\nfunc (client *Rest) Head() *Rest {\n\tclient.verb = \"HEAD\"\n\treturn client\n}\n\nfunc (client *Rest) Option() *Rest {\n\tclient.verb = \"OPTIONS\"\n\treturn client\n}\n\nfunc (client *Rest) Path(param string) *Rest {\n\tif client.baseurl == \"\" {\n\t\tpanic(\"BASE URL Not Present\")\n\t} else if strings.Contains(param, \"\/\") {\n\t\tpanic(\"Incorrect PATH Param\")\n\t}\n\tclient.uriparams = append(client.uriparams, param)\n\treturn client\n}\n\nfunc (client *Rest) Base(baseurl string) *Rest {\n\tv, err := url.Parse(baseurl)\n\tif err != nil {\n\t\tpanic(\"Url is incorrect\")\n\t}\n\tclient.baseurl = v.String()\n\treturn client\n}\n\nfunc (client *Rest) Header(key, value string) *Rest {\n\tclient.headers.Add(key, value)\n\treturn client\n}\n\nfunc (client *Rest) addPayload(payload interface{}) {\n\tvar b []byte\n\tb, _ = json.Marshal(payload)\n\tclient.payload = bytes.NewBuffer(b)\n\n}\n\nfunc (client *Rest) Query(options ...interface{}) *Rest {\n\tif len(options) > 0 {\n\t\tqry, ok := options[0].(map[string]string)\n\t\tif ok {\n\t\t\tfor k, v := range qry {\n\t\t\t\tclient.queryvalues.Set(k, v)\n\t\t\t}\n\t\t}\n\t}\n\treturn client\n}\n\nfunc (client *Rest) Request() (*http.Request, error) {\n\t\/\/Add all URI params to baseurl\n\tif !strings.HasSuffix(client.baseurl, \"\/\") {\n\t\tclient.baseurl = client.baseurl + \"\/\"\n\t}\n\n\tclient.url = client.baseurl\n\tparams := strings.Join(client.uriparams, \"\/\")\n\tclient.url = client.baseurl + params\n\tv, err := url.Parse(client.url)\n\tif err != nil {\n\t\tpanic(\"Complete URL is not correct\")\n\t}\n\n\tclient.url = v.String()\n\n\t\/\/Adding Query String\n\tvar requrl url.URL\n\trequrl.Path = client.url\n\trequrl.RawQuery = client.queryvalues.Encode()\n\tfmt.Println(requrl.String())\n\n\treq, err := http.NewRequest(client.verb, requrl.String(), client.payload)\n\tif err != nil {\n\t\tpanic(\"Request Object is not proper\")\n\t}\n\n\t\/\/Add headers to the Request\n\tfor key, values := range client.headers {\n\t\tfor _, value := range values {\n\t\t\treq.Header.Add(key, value)\n\t\t}\n\n\t}\n\treturn req, err\n}\n\nfunc (client *Rest) ResponseBodyString(response *http.Response) string {\n\tresponsedata, _ := ioutil.ReadAll(response.Body)\n\treturn string(responsedata)\n}\n\nfunc (client *Rest) Send(req *http.Request, successM, failureM interface{}) (*http.Response, error) {\n\tresponse, err := client.httpClient.Do(req)\n\n\tif err != nil {\n\t\tpanic(\"Send request Failed\")\n\t}\n\treturn response, err\n}\n<commit_msg>introducing COPY HTTP Verb<commit_after>package gorest\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tContentType = \"Content-Type\"\n\tJsonContentType = \"application\/json\"\n\tTextContentType = \"application\/text\"\n\tXmlContentType = \"application\/xml\"\n\ttimeOutSeconds = 10\n)\n\ntype Rest struct {\n\thttpClient http.Client\n\tbaseurl string\n\tverb string\n\theaders http.Header\n\turiparams []string\n\turl string\n\tqueryvalues url.Values\n\tpayload io.Reader\n}\n\nfunc New() *Rest {\n\treturn &Rest{\n\t\thttpClient: http.Client{Timeout: time.Second * timeOutSeconds},\n\t\theaders: make(map[string][]string),\n\t\tqueryvalues: make(url.Values),\n\t}\n}\n\nfunc (client *Rest) Get() *Rest {\n\tclient.verb = \"GET\"\n\treturn client\n}\n\nfunc (client *Rest) Post(payload interface{}) *Rest {\n\tclient.verb = \"POST\"\n\tif payload != nil {\n\t\tclient.addPayload(payload)\n\t}\n\treturn client\n}\n\nfunc (client *Rest) Put(payload interface{}) *Rest {\n\tclient.verb = \"PUT\"\n\tif payload != nil {\n\t\tclient.addPayload(payload)\n\t}\n\treturn client\n}\n\nfunc (client *Rest) Delete(payload interface{}) *Rest {\n\tclient.verb = \"DELETE\"\n\tif payload != nil {\n\t\tclient.addPayload(payload)\n\t}\n\treturn client\n}\n\nfunc (client *Rest) Patch(payload interface{}) *Rest {\n\tclient.verb = \"PATCH\"\n\tif payload != nil {\n\t\tclient.addPayload(payload)\n\t}\n\treturn client\n}\n\nfunc (client *Rest) Head() *Rest {\n\tclient.verb = \"HEAD\"\n\treturn client\n}\n\nfunc (client *Rest) Option(payload interface{}) *Rest {\n\tclient.verb = \"OPTIONS\"\n\tif payload != nil {\n\t\tclient.addPayload(payload)\n\t}\n\treturn client\n}\n\nfunc (client *Rest) Copy() *Rest {\n\tclient.verb = \"COPY\"\n\treturn client\n}\n\nfunc (client *Rest) Path(param string) *Rest {\n\tif client.baseurl == \"\" {\n\t\tpanic(\"BASE URL Not Present\")\n\t} else if strings.Contains(param, \"\/\") {\n\t\tpanic(\"Incorrect PATH Param\")\n\t}\n\tclient.uriparams = append(client.uriparams, param)\n\treturn client\n}\n\nfunc (client *Rest) Base(baseurl string) *Rest {\n\tv, err := url.Parse(baseurl)\n\tif err != nil {\n\t\tpanic(\"Url is incorrect\")\n\t}\n\tclient.baseurl = v.String()\n\treturn client\n}\n\nfunc (client *Rest) Header(key, value string) *Rest {\n\tclient.headers.Add(key, value)\n\treturn client\n}\n\nfunc (client *Rest) addPayload(payload interface{}) {\n\tvar b []byte\n\tb, _ = json.Marshal(payload)\n\tclient.payload = bytes.NewBuffer(b)\n\n}\n\nfunc (client *Rest) Query(options ...interface{}) *Rest {\n\tif len(options) > 0 {\n\t\tqry, ok := options[0].(map[string]string)\n\t\tif ok {\n\t\t\tfor k, v := range qry {\n\t\t\t\tclient.queryvalues.Set(k, v)\n\t\t\t}\n\t\t}\n\t}\n\treturn client\n}\n\nfunc (client *Rest) Request() (*http.Request, error) {\n\t\/\/Add all URI params to baseurl\n\tif !strings.HasSuffix(client.baseurl, \"\/\") {\n\t\tclient.baseurl = client.baseurl + \"\/\"\n\t}\n\n\tclient.url = client.baseurl\n\tparams := strings.Join(client.uriparams, \"\/\")\n\tclient.url = client.baseurl + params\n\tv, err := url.Parse(client.url)\n\tif err != nil {\n\t\tpanic(\"Complete URL is not correct\")\n\t}\n\n\tclient.url = v.String()\n\n\t\/\/Adding Query String\n\tvar requrl url.URL\n\trequrl.Path = client.url\n\trequrl.RawQuery = client.queryvalues.Encode()\n\tfmt.Println(requrl.String())\n\n\treq, err := http.NewRequest(client.verb, requrl.String(), client.payload)\n\tif err != nil {\n\t\tpanic(\"Request Object is not proper\")\n\t}\n\n\t\/\/Add headers to the Request\n\tfor key, values := range client.headers {\n\t\tfor _, value := range values {\n\t\t\treq.Header.Add(key, value)\n\t\t}\n\n\t}\n\treturn req, err\n}\n\nfunc (client *Rest) ResponseBodyString(response *http.Response) string {\n\tresponsedata, _ := ioutil.ReadAll(response.Body)\n\treturn string(responsedata)\n}\n\nfunc (client *Rest) Send(req *http.Request, successM, failureM interface{}) (*http.Response, error) {\n\tresponse, err := client.httpClient.Do(req)\n\n\tif err != nil {\n\t\tpanic(\"Send request Failed\")\n\t}\n\treturn response, err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The http2amqp Authors. All rights reserved. Use of this\n\/\/ source code is governed by a MIT-style license that can be found in the\n\/\/ LICENSE file.\n\npackage queries_service_test\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/aleasoluciones\/http2amqp\/queries_service\/mocks\"\n\t\"github.com\/aleasoluciones\/simpleamqp\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/stretchr\/testify\/mock\"\n\t\"time\"\n)\n\nconst (\n\tA_TOPIC = \"a-topic\"\n)\n\nvar _ = Describe(\"Queries service\", func() {\n\tIt(\"query returns response when a response for the query has been received\", func() {\n\t\t\/\/ eventPublisher.publish A_TPIC, values\n\t\t\/\/ waits for a respons\n\t\tamqpResponses := make(chan simpleamqp.AmqpMessage)\n\t\tamqpConsumer := new(mocks.AMQPConsumer)\n\t\tamqpConsumer.On(\"Receive\", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(amqpResponses)\n\n\t\tgo func() {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tamqpResponses <- newAmqpResponse([]string{\"foo\", \"bar\"})\n\t\t}()\n\n\t\tqueriesService := NewQueriesService(amqpConsumer)\n\t\tresponse := queriesService.Query(A_TOPIC, map[string]interface{}{\"q\": \"foo\"})\n\n\t\tExpect(response.Content).To(ConsistOf(\"foo\", \"bar\"))\n\t})\n})\n\nfunc NewQueriesService(amqpConsumer simpleamqp.AMQPConsumer) QueriesService {\n\treturn &queriesServiceT{amqpConsumer: amqpConsumer}\n}\n\ntype QueriesService interface {\n\tQuery(topic string, criteria map[string]interface{}) queryResponse\n}\n\ntype queriesServiceT struct {\n\tamqpConsumer simpleamqp.AMQPConsumer\n}\n\nfunc (service *queriesServiceT) Query(topic string, criteria map[string]interface{}) queryResponse {\n\tamqpResponses := service.amqpConsumer.Receive(\"foo exchange\", []string{\"foo routing key\"}, \" no se\", simpleamqp.QueueOptions{}, 10)\n\tamqpResponse := <-amqpResponses\n\tvar content interface{}\n\t_ = json.Unmarshal([]byte(amqpResponse.Body), &content)\n\treturn queryResponse{Content: content}\n}\n\ntype queryResponse struct {\n\tContent interface{}\n}\n\nfunc newAmqpResponse(response interface{}) simpleamqp.AmqpMessage {\n\tserialized, _ := json.Marshal(response)\n\treturn simpleamqp.AmqpMessage{Body: string(serialized)}\n}\n<commit_msg>Test timeout error<commit_after>\/\/ Copyright 2015 The http2amqp Authors. All rights reserved. Use of this\n\/\/ source code is governed by a MIT-style license that can be found in the\n\/\/ LICENSE file.\n\npackage queries_service_test\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com\/aleasoluciones\/http2amqp\/queries_service\/mocks\"\n\t\"github.com\/aleasoluciones\/simpleamqp\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/stretchr\/testify\/mock\"\n)\n\nconst (\n\tA_TOPIC = \"a-topic\"\n\tA_QUERY_TIMEOUT = 10 * time.Millisecond\n)\n\nvar _ = Describe(\"Queries service\", func() {\n\tDescribe(\"Query\", func() {\n\t\tvar (\n\t\t\tamqpResponses chan simpleamqp.AmqpMessage\n\t\t\tqueriesService QueriesService\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tamqpResponses = make(chan simpleamqp.AmqpMessage)\n\t\t\tamqpConsumer := new(mocks.AMQPConsumer)\n\t\t\tamqpConsumer.On(\"Receive\", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(amqpResponses)\n\n\t\t\tqueriesService = NewQueriesService(amqpConsumer, A_QUERY_TIMEOUT)\n\t\t})\n\n\t\tIt(\"returns response when a response for the query has been received\", func() {\n\t\t\tgo func() {\n\t\t\t\ttime.Sleep(A_QUERY_TIMEOUT \/ time.Duration(2))\n\t\t\t\tamqpResponses <- newAmqpResponse([]string{\"foo\", \"bar\"})\n\t\t\t}()\n\n\t\t\tresponse := queriesService.Query(A_TOPIC, map[string]interface{}{\"q\": \"foo\"})\n\n\t\t\tExpect(response.Error).NotTo(HaveOccurred())\n\t\t\tExpect(response.Content).To(ConsistOf(\"foo\", \"bar\"))\n\t\t})\n\n\t\tIt(\"returns timeout error when no response for the query has been received in time\", func() {\n\t\t\tgo func() {\n\t\t\t\ttime.Sleep(A_QUERY_TIMEOUT * time.Duration(2))\n\t\t\t\tamqpResponses <- newAmqpResponse([]string{\"foo\", \"bar\"})\n\t\t\t}()\n\n\t\t\tresponse := queriesService.Query(A_TOPIC, map[string]interface{}{\"q\": \"foo\"})\n\n\t\t\tExpect(response.Error).To(MatchError(\"Timeout\"))\n\t\t\tExpect(response.Content).To(BeNil())\n\t\t})\n\t})\n})\n\nfunc NewQueriesService(amqpConsumer simpleamqp.AMQPConsumer, timeout time.Duration) QueriesService {\n\treturn &queriesServiceT{\n\t\tamqpConsumer: amqpConsumer,\n\t\tqueryTimeout: timeout,\n\t}\n}\n\ntype QueriesService interface {\n\tQuery(topic string, criteria map[string]interface{}) queryResponse\n}\n\ntype queriesServiceT struct {\n\tamqpConsumer simpleamqp.AMQPConsumer\n\tqueryTimeout time.Duration\n}\n\nfunc (service *queriesServiceT) Query(topic string, criteria map[string]interface{}) queryResponse {\n\tvar content interface{}\n\tvar err error\n\n\tamqpResponses := service.amqpConsumer.Receive(\"foo exchange\", []string{\"foo routing key\"}, \" no se\", simpleamqp.QueueOptions{}, 10)\n\n\tafterTimeoutTicker := time.NewTicker(service.queryTimeout)\n\tdefer afterTimeoutTicker.Stop()\n\tafterTimeout := afterTimeoutTicker.C\n\n\tselect {\n\tcase amqpResponse := <-amqpResponses:\n\t\t_ = json.Unmarshal([]byte(amqpResponse.Body), &content)\n\tcase <-afterTimeout:\n\t\terr = errors.New(\"Timeout\")\n\t}\n\n\treturn queryResponse{Content: content, Error: err}\n}\n\ntype queryResponse struct {\n\tContent interface{}\n\tError error\n}\n\nfunc newAmqpResponse(response interface{}) simpleamqp.AmqpMessage {\n\tserialized, _ := json.Marshal(response)\n\treturn simpleamqp.AmqpMessage{Body: string(serialized)}\n}\n<|endoftext|>"} {"text":"<commit_before>package alert\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\tg \"github.com\/onsi\/ginkgo\"\n\to \"github.com\/onsi\/gomega\"\n\t\"github.com\/openshift\/origin\/pkg\/synthetictests\/allowedalerts\"\n\texutil \"github.com\/openshift\/origin\/test\/extended\/util\"\n\t\"github.com\/openshift\/origin\/test\/extended\/util\/disruption\"\n\thelper \"github.com\/openshift\/origin\/test\/extended\/util\/prometheus\"\n\tprometheusv1 \"github.com\/prometheus\/client_golang\/api\/prometheus\/v1\"\n\t\"github.com\/prometheus\/common\/model\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/upgrades\"\n)\n\n\/\/ UpgradeTest runs verifies invariants regarding what alerts are allowed to fire\n\/\/ during the upgrade process.\ntype UpgradeTest struct {\n\toc *exutil.CLI\n\tprometheusClient prometheusv1.API\n}\n\nfunc (UpgradeTest) Name() string { return \"check-for-alerts\" }\nfunc (UpgradeTest) DisplayName() string {\n\treturn \"[sig-arch] Check if alerts are firing during or after upgrade success\"\n}\n\n\/\/ Setup creates parameters to query Prometheus\nfunc (t *UpgradeTest) Setup(f *framework.Framework) {\n\tg.By(\"Setting up upgrade alert test\")\n\n\toc := exutil.NewCLIWithFramework(f)\n\tt.oc = oc\n\tt.prometheusClient = oc.NewPrometheusClient(context.TODO())\n\tframework.Logf(\"Post-upgrade alert test setup complete\")\n}\n\n\/\/ Test checks if alerts are firing at various points during upgrade.\n\/\/ An alert firing during an upgrade is a high severity bug - it either points to a real issue in\n\/\/ a dependency, or a failure of the component, and therefore must be fixed.\nfunc (t *UpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {\n\ttolerateDuringSkew := exutil.TolerateVersionSkewInTests()\n\tfiringAlertsWithBugs := helper.MetricConditions{\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubePodNotReady\", \"namespace\": \"openshift-kube-apiserver-operator\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1939580\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubePodNotReady\", \"namespace\": \"openshift-kube-apiserver-operator\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1939580\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"ClusterOperatorDegraded\", \"name\": \"openshift-apiserver\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1939580\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"ClusterOperatorDown\", \"name\": \"authentication\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1939580\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"ClusterOperatorDown\", \"name\": \"machine-config\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1955300\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"ClusterOperatorDegraded\", \"name\": \"authentication\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1939580\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubeDaemonSetRolloutStuck\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1943667\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubeAPIErrorBudgetBurn\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1953798\",\n\t\t\tMatches: func(_ *model.Sample) bool {\n\t\t\t\treturn framework.ProviderIs(\"gce\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"AggregatedAPIDown\", \"namespace\": \"default\", \"name\": \"v1beta1.metrics.k8s.io\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1970624\",\n\t\t\tMatches: func(_ *model.Sample) bool {\n\t\t\t\treturn framework.ProviderIs(\"gce\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\/\/ Should be removed one release after the attached bugzilla is fixed.\n\t\t\tSelector: map[string]string{\"alertname\": \"HighlyAvailableWorkloadIncorrectlySpread\", \"namespace\": \"openshift-monitoring\", \"workload\": \"prometheus-k8s\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1949262\",\n\t\t},\n\t\t{\n\t\t\t\/\/ Should be removed one release after the attached bugzilla is fixed.\n\t\t\tSelector: map[string]string{\"alertname\": \"HighlyAvailableWorkloadIncorrectlySpread\", \"namespace\": \"openshift-monitoring\", \"workload\": \"alertmanager-main\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1955489\",\n\t\t},\n\t\t{\n\t\t\t\/\/ Should be removed one release after the attached bugzilla is fixed, or after that bug is fixed in a backport to the previous minor.\n\t\t\tSelector: map[string]string{\"alertname\": \"ExtremelyHighIndividualControlPlaneCPU\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1985073\",\n\t\t\tMatches: func(_ *model.Sample) bool {\n\t\t\t\treturn framework.ProviderIs(\"gce\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubeJobFailed\", \"namespace\": \"openshift-multus\"}, \/\/ not sure how to do a job_name prefix\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=2054426\",\n\t\t},\n\t}\n\tallowedFiringAlerts := helper.MetricConditions{\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"TargetDown\", \"namespace\": \"openshift-e2e-loki\"},\n\t\t\tText: \"Loki is nice to have, but we can allow it to be down\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubePodNotReady\", \"namespace\": \"openshift-e2e-loki\"},\n\t\t\tText: \"Loki is nice to have, but we can allow it to be down\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubeDeploymentReplicasMismatch\", \"namespace\": \"openshift-e2e-loki\"},\n\t\t\tText: \"Loki is nice to have, but we can allow it to be down\",\n\t\t},\n\t}\n\n\tpendingAlertsWithBugs := helper.MetricConditions{\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"ClusterMonitoringOperatorReconciliationErrors\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1932624\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubeClientErrors\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1925698\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"NetworkPodsCrashLooping\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=2009078\",\n\t\t},\n\t}\n\tallowedPendingAlerts := helper.MetricConditions{\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"etcdMemberCommunicationSlow\"},\n\t\t\tText: \"Excluded because it triggers during upgrade (detects ~5m of high latency immediately preceeding the end of the test), and we don't want to change the alert because it is correct\",\n\t\t},\n\t}\n\n\t\/\/ we exclude alerts that have their own separate tests.\n\tfor _, alertTest := range allowedalerts.AllAlertTests() {\n\t\tswitch alertTest.AlertState() {\n\t\tcase allowedalerts.AlertPending:\n\t\t\t\/\/ a pending test covers pending and everything above (firing)\n\t\t\tallowedPendingAlerts = append(allowedPendingAlerts,\n\t\t\t\thelper.MetricCondition{\n\t\t\t\t\tSelector: map[string]string{\"alertname\": alertTest.AlertName()},\n\t\t\t\t\tText: \"has a separate e2e test\",\n\t\t\t\t},\n\t\t\t)\n\t\t\tallowedFiringAlerts = append(allowedFiringAlerts,\n\t\t\t\thelper.MetricCondition{\n\t\t\t\t\tSelector: map[string]string{\"alertname\": alertTest.AlertName()},\n\t\t\t\t\tText: \"has a separate e2e test\",\n\t\t\t\t},\n\t\t\t)\n\t\tcase allowedalerts.AlertInfo:\n\t\t\t\/\/ an info test covers all firing\n\t\t\tallowedFiringAlerts = append(allowedFiringAlerts,\n\t\t\t\thelper.MetricCondition{\n\t\t\t\t\tSelector: map[string]string{\"alertname\": alertTest.AlertName()},\n\t\t\t\t\tText: \"has a separate e2e test\",\n\t\t\t\t},\n\t\t\t)\n\t\t}\n\t}\n\n\tknownViolations := sets.NewString()\n\tunexpectedViolations := sets.NewString()\n\tunexpectedViolationsAsFlakes := sets.NewString()\n\tdebug := sets.NewString()\n\n\tg.By(\"Checking for alerts\")\n\n\tstart := time.Now()\n\n\t\/\/ Block until upgrade is done\n\tg.By(\"Waiting for upgrade to finish before checking for alerts\")\n\t<-done\n\n\t\/\/ Additonal delay after upgrade completion to allow pending alerts to settle\n\tg.By(\"Waiting before checking for alerts\")\n\ttime.Sleep(1 * time.Minute)\n\n\ttestDuration := time.Now().Sub(start).Round(time.Second)\n\n\t\/\/ Invariant: No non-info level alerts should have fired during the upgrade\n\tfiringAlertQuery := fmt.Sprintf(`\nsort_desc(\ncount_over_time(ALERTS{alertstate=\"firing\",severity!=\"info\",alertname!~\"Watchdog|AlertmanagerReceiversNotConfigured\"}[%[1]s:1s])\n) > 0\n`, testDuration)\n\tresult, err := helper.RunQuery(context.TODO(), t.prometheusClient, firingAlertQuery)\n\to.Expect(err).NotTo(o.HaveOccurred(), \"unable to check firing alerts during upgrade\")\n\tfor _, series := range result.Data.Result {\n\t\tlabels := helper.StripLabels(series.Metric, \"alertname\", \"alertstate\", \"prometheus\")\n\t\tviolation := fmt.Sprintf(\"alert %s fired for %s seconds with labels: %s\", series.Metric[\"alertname\"], series.Value, helper.LabelsAsSelector(labels))\n\t\tif cause := allowedFiringAlerts.Matches(series); cause != nil {\n\t\t\tdebug.Insert(fmt.Sprintf(\"%s (allowed: %s)\", violation, cause.Text))\n\t\t\tcontinue\n\t\t}\n\t\tif cause := firingAlertsWithBugs.Matches(series); cause != nil {\n\t\t\tknownViolations.Insert(fmt.Sprintf(\"%s (open bug: %s)\", violation, cause.Text))\n\t\t} else {\n\t\t\tunexpectedViolations.Insert(violation)\n\t\t}\n\t}\n\n\t\/\/ Invariant: There should be no pending alerts 1m after the upgrade completes\n\tpendingAlertQuery := fmt.Sprintf(`\nsort_desc(\n time() * ALERTS + 1\n -\n last_over_time((\n time() * ALERTS{alertname!~\"Watchdog|AlertmanagerReceiversNotConfigured\",alertstate=\"pending\",severity!=\"info\"}\n unless\n ALERTS offset 1s\n )[%[1]s:1s])\n)\n`, testDuration)\n\tresult, err = helper.RunQuery(context.TODO(), t.prometheusClient, pendingAlertQuery)\n\to.Expect(err).NotTo(o.HaveOccurred(), \"unable to retrieve pending alerts after upgrade\")\n\tfor _, series := range result.Data.Result {\n\t\tlabels := helper.StripLabels(series.Metric, \"alertname\", \"alertstate\", \"prometheus\")\n\t\tviolation := fmt.Sprintf(\"alert %s pending for %s seconds with labels: %s\", series.Metric[\"alertname\"], series.Value, helper.LabelsAsSelector(labels))\n\t\tif cause := allowedPendingAlerts.Matches(series); cause != nil {\n\t\t\tdebug.Insert(fmt.Sprintf(\"%s (allowed: %s)\", violation, cause.Text))\n\t\t\tcontinue\n\t\t}\n\t\tif cause := pendingAlertsWithBugs.Matches(series); cause != nil {\n\t\t\tknownViolations.Insert(fmt.Sprintf(\"%s (open bug: %s)\", violation, cause.Text))\n\t\t} else {\n\t\t\t\/\/ treat pending errors as a flake right now because we are still trying to determine the scope\n\t\t\t\/\/ TODO: move this to unexpectedViolations later\n\t\t\tunexpectedViolationsAsFlakes.Insert(violation)\n\t\t}\n\t}\n\n\tif len(debug) > 0 {\n\t\tframework.Logf(\"Alerts were detected during upgrade which are allowed:\\n\\n%s\", strings.Join(debug.List(), \"\\n\"))\n\t}\n\tif len(unexpectedViolations) > 0 {\n\t\tif !tolerateDuringSkew {\n\t\t\tframework.Failf(\"Unexpected alerts fired or pending during the upgrade:\\n\\n%s\", strings.Join(unexpectedViolations.List(), \"\\n\"))\n\t\t}\n\t}\n\tif flakes := sets.NewString().Union(knownViolations).Union(unexpectedViolations).Union(unexpectedViolationsAsFlakes); len(flakes) > 0 {\n\t\tdisruption.FrameworkFlakef(f, \"Unexpected alert behavior during upgrade:\\n\\n%s\", strings.Join(flakes.List(), \"\\n\"))\n\t}\n\tframework.Logf(\"No alerts fired during upgrade\")\n}\n\n\/\/ Teardown cleans up any remaining resources.\nfunc (t *UpgradeTest) Teardown(f *framework.Framework) {\n\t\/\/ rely on the namespace deletion to clean up everything\n}\n<commit_msg>Improve ignore for ip-reconciler KubeJobFailed alert.<commit_after>package alert\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\tg \"github.com\/onsi\/ginkgo\"\n\to \"github.com\/onsi\/gomega\"\n\t\"github.com\/openshift\/origin\/pkg\/synthetictests\/allowedalerts\"\n\texutil \"github.com\/openshift\/origin\/test\/extended\/util\"\n\t\"github.com\/openshift\/origin\/test\/extended\/util\/disruption\"\n\thelper \"github.com\/openshift\/origin\/test\/extended\/util\/prometheus\"\n\tprometheusv1 \"github.com\/prometheus\/client_golang\/api\/prometheus\/v1\"\n\t\"github.com\/prometheus\/common\/model\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/upgrades\"\n)\n\n\/\/ UpgradeTest runs verifies invariants regarding what alerts are allowed to fire\n\/\/ during the upgrade process.\ntype UpgradeTest struct {\n\toc *exutil.CLI\n\tprometheusClient prometheusv1.API\n}\n\nfunc (UpgradeTest) Name() string { return \"check-for-alerts\" }\nfunc (UpgradeTest) DisplayName() string {\n\treturn \"[sig-arch] Check if alerts are firing during or after upgrade success\"\n}\n\n\/\/ Setup creates parameters to query Prometheus\nfunc (t *UpgradeTest) Setup(f *framework.Framework) {\n\tg.By(\"Setting up upgrade alert test\")\n\n\toc := exutil.NewCLIWithFramework(f)\n\tt.oc = oc\n\tt.prometheusClient = oc.NewPrometheusClient(context.TODO())\n\tframework.Logf(\"Post-upgrade alert test setup complete\")\n}\n\n\/\/ Test checks if alerts are firing at various points during upgrade.\n\/\/ An alert firing during an upgrade is a high severity bug - it either points to a real issue in\n\/\/ a dependency, or a failure of the component, and therefore must be fixed.\nfunc (t *UpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {\n\ttolerateDuringSkew := exutil.TolerateVersionSkewInTests()\n\tfiringAlertsWithBugs := helper.MetricConditions{\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubePodNotReady\", \"namespace\": \"openshift-kube-apiserver-operator\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1939580\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubePodNotReady\", \"namespace\": \"openshift-kube-apiserver-operator\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1939580\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"ClusterOperatorDegraded\", \"name\": \"openshift-apiserver\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1939580\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"ClusterOperatorDown\", \"name\": \"authentication\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1939580\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"ClusterOperatorDown\", \"name\": \"machine-config\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1955300\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"ClusterOperatorDegraded\", \"name\": \"authentication\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1939580\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubeDaemonSetRolloutStuck\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1943667\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubeAPIErrorBudgetBurn\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1953798\",\n\t\t\tMatches: func(_ *model.Sample) bool {\n\t\t\t\treturn framework.ProviderIs(\"gce\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"AggregatedAPIDown\", \"namespace\": \"default\", \"name\": \"v1beta1.metrics.k8s.io\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1970624\",\n\t\t\tMatches: func(_ *model.Sample) bool {\n\t\t\t\treturn framework.ProviderIs(\"gce\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\/\/ Should be removed one release after the attached bugzilla is fixed.\n\t\t\tSelector: map[string]string{\"alertname\": \"HighlyAvailableWorkloadIncorrectlySpread\", \"namespace\": \"openshift-monitoring\", \"workload\": \"prometheus-k8s\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1949262\",\n\t\t},\n\t\t{\n\t\t\t\/\/ Should be removed one release after the attached bugzilla is fixed.\n\t\t\tSelector: map[string]string{\"alertname\": \"HighlyAvailableWorkloadIncorrectlySpread\", \"namespace\": \"openshift-monitoring\", \"workload\": \"alertmanager-main\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1955489\",\n\t\t},\n\t\t{\n\t\t\t\/\/ Should be removed one release after the attached bugzilla is fixed, or after that bug is fixed in a backport to the previous minor.\n\t\t\tSelector: map[string]string{\"alertname\": \"ExtremelyHighIndividualControlPlaneCPU\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1985073\",\n\t\t\tMatches: func(_ *model.Sample) bool {\n\t\t\t\treturn framework.ProviderIs(\"gce\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubeJobFailed\", \"namespace\": \"openshift-multus\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=2054426\",\n\t\t\tMatches: func(sample *model.Sample) bool {\n\t\t\t\t\/\/ Only match if the job_name label starts with ip-reconciler:\n\t\t\t\tif strings.HasPrefix(string(sample.Metric[model.LabelName(\"job_name\")]), \"ip-reconciler-\") {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t\treturn false\n\t\t\t},\n\t\t},\n\t}\n\tallowedFiringAlerts := helper.MetricConditions{\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"TargetDown\", \"namespace\": \"openshift-e2e-loki\"},\n\t\t\tText: \"Loki is nice to have, but we can allow it to be down\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubePodNotReady\", \"namespace\": \"openshift-e2e-loki\"},\n\t\t\tText: \"Loki is nice to have, but we can allow it to be down\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubeDeploymentReplicasMismatch\", \"namespace\": \"openshift-e2e-loki\"},\n\t\t\tText: \"Loki is nice to have, but we can allow it to be down\",\n\t\t},\n\t}\n\n\tpendingAlertsWithBugs := helper.MetricConditions{\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"ClusterMonitoringOperatorReconciliationErrors\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1932624\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"KubeClientErrors\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1925698\",\n\t\t},\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"NetworkPodsCrashLooping\"},\n\t\t\tText: \"https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=2009078\",\n\t\t},\n\t}\n\tallowedPendingAlerts := helper.MetricConditions{\n\t\t{\n\t\t\tSelector: map[string]string{\"alertname\": \"etcdMemberCommunicationSlow\"},\n\t\t\tText: \"Excluded because it triggers during upgrade (detects ~5m of high latency immediately preceeding the end of the test), and we don't want to change the alert because it is correct\",\n\t\t},\n\t}\n\n\t\/\/ we exclude alerts that have their own separate tests.\n\tfor _, alertTest := range allowedalerts.AllAlertTests() {\n\t\tswitch alertTest.AlertState() {\n\t\tcase allowedalerts.AlertPending:\n\t\t\t\/\/ a pending test covers pending and everything above (firing)\n\t\t\tallowedPendingAlerts = append(allowedPendingAlerts,\n\t\t\t\thelper.MetricCondition{\n\t\t\t\t\tSelector: map[string]string{\"alertname\": alertTest.AlertName()},\n\t\t\t\t\tText: \"has a separate e2e test\",\n\t\t\t\t},\n\t\t\t)\n\t\t\tallowedFiringAlerts = append(allowedFiringAlerts,\n\t\t\t\thelper.MetricCondition{\n\t\t\t\t\tSelector: map[string]string{\"alertname\": alertTest.AlertName()},\n\t\t\t\t\tText: \"has a separate e2e test\",\n\t\t\t\t},\n\t\t\t)\n\t\tcase allowedalerts.AlertInfo:\n\t\t\t\/\/ an info test covers all firing\n\t\t\tallowedFiringAlerts = append(allowedFiringAlerts,\n\t\t\t\thelper.MetricCondition{\n\t\t\t\t\tSelector: map[string]string{\"alertname\": alertTest.AlertName()},\n\t\t\t\t\tText: \"has a separate e2e test\",\n\t\t\t\t},\n\t\t\t)\n\t\t}\n\t}\n\n\tknownViolations := sets.NewString()\n\tunexpectedViolations := sets.NewString()\n\tunexpectedViolationsAsFlakes := sets.NewString()\n\tdebug := sets.NewString()\n\n\tg.By(\"Checking for alerts\")\n\n\tstart := time.Now()\n\n\t\/\/ Block until upgrade is done\n\tg.By(\"Waiting for upgrade to finish before checking for alerts\")\n\t<-done\n\n\t\/\/ Additonal delay after upgrade completion to allow pending alerts to settle\n\tg.By(\"Waiting before checking for alerts\")\n\ttime.Sleep(1 * time.Minute)\n\n\ttestDuration := time.Now().Sub(start).Round(time.Second)\n\n\t\/\/ Invariant: No non-info level alerts should have fired during the upgrade\n\tfiringAlertQuery := fmt.Sprintf(`\nsort_desc(\ncount_over_time(ALERTS{alertstate=\"firing\",severity!=\"info\",alertname!~\"Watchdog|AlertmanagerReceiversNotConfigured\"}[%[1]s:1s])\n) > 0\n`, testDuration)\n\tresult, err := helper.RunQuery(context.TODO(), t.prometheusClient, firingAlertQuery)\n\to.Expect(err).NotTo(o.HaveOccurred(), \"unable to check firing alerts during upgrade\")\n\tfor _, series := range result.Data.Result {\n\t\tlabels := helper.StripLabels(series.Metric, \"alertname\", \"alertstate\", \"prometheus\")\n\t\tviolation := fmt.Sprintf(\"alert %s fired for %s seconds with labels: %s\", series.Metric[\"alertname\"], series.Value, helper.LabelsAsSelector(labels))\n\t\tif cause := allowedFiringAlerts.Matches(series); cause != nil {\n\t\t\tdebug.Insert(fmt.Sprintf(\"%s (allowed: %s)\", violation, cause.Text))\n\t\t\tcontinue\n\t\t}\n\t\tif cause := firingAlertsWithBugs.Matches(series); cause != nil {\n\t\t\tknownViolations.Insert(fmt.Sprintf(\"%s (open bug: %s)\", violation, cause.Text))\n\t\t} else {\n\t\t\tunexpectedViolations.Insert(violation)\n\t\t}\n\t}\n\n\t\/\/ Invariant: There should be no pending alerts 1m after the upgrade completes\n\tpendingAlertQuery := fmt.Sprintf(`\nsort_desc(\n time() * ALERTS + 1\n -\n last_over_time((\n time() * ALERTS{alertname!~\"Watchdog|AlertmanagerReceiversNotConfigured\",alertstate=\"pending\",severity!=\"info\"}\n unless\n ALERTS offset 1s\n )[%[1]s:1s])\n)\n`, testDuration)\n\tresult, err = helper.RunQuery(context.TODO(), t.prometheusClient, pendingAlertQuery)\n\to.Expect(err).NotTo(o.HaveOccurred(), \"unable to retrieve pending alerts after upgrade\")\n\tfor _, series := range result.Data.Result {\n\t\tlabels := helper.StripLabels(series.Metric, \"alertname\", \"alertstate\", \"prometheus\")\n\t\tviolation := fmt.Sprintf(\"alert %s pending for %s seconds with labels: %s\", series.Metric[\"alertname\"], series.Value, helper.LabelsAsSelector(labels))\n\t\tif cause := allowedPendingAlerts.Matches(series); cause != nil {\n\t\t\tdebug.Insert(fmt.Sprintf(\"%s (allowed: %s)\", violation, cause.Text))\n\t\t\tcontinue\n\t\t}\n\t\tif cause := pendingAlertsWithBugs.Matches(series); cause != nil {\n\t\t\tknownViolations.Insert(fmt.Sprintf(\"%s (open bug: %s)\", violation, cause.Text))\n\t\t} else {\n\t\t\t\/\/ treat pending errors as a flake right now because we are still trying to determine the scope\n\t\t\t\/\/ TODO: move this to unexpectedViolations later\n\t\t\tunexpectedViolationsAsFlakes.Insert(violation)\n\t\t}\n\t}\n\n\tif len(debug) > 0 {\n\t\tframework.Logf(\"Alerts were detected during upgrade which are allowed:\\n\\n%s\", strings.Join(debug.List(), \"\\n\"))\n\t}\n\tif len(unexpectedViolations) > 0 {\n\t\tif !tolerateDuringSkew {\n\t\t\tframework.Failf(\"Unexpected alerts fired or pending during the upgrade:\\n\\n%s\", strings.Join(unexpectedViolations.List(), \"\\n\"))\n\t\t}\n\t}\n\tif flakes := sets.NewString().Union(knownViolations).Union(unexpectedViolations).Union(unexpectedViolationsAsFlakes); len(flakes) > 0 {\n\t\tdisruption.FrameworkFlakef(f, \"Unexpected alert behavior during upgrade:\\n\\n%s\", strings.Join(flakes.List(), \"\\n\"))\n\t}\n\tframework.Logf(\"No alerts fired during upgrade\")\n}\n\n\/\/ Teardown cleans up any remaining resources.\nfunc (t *UpgradeTest) Teardown(f *framework.Framework) {\n\t\/\/ rely on the namespace deletion to clean up everything\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/rs\/cors\"\n\t\/\/ postgres driver for migrate\n\t_ \"github.com\/mattes\/migrate\/driver\/postgres\"\n\t\"github.com\/nuveo\/prest\/config\"\n\tcfgMiddleware \"github.com\/nuveo\/prest\/config\/middlewares\"\n\t\"github.com\/nuveo\/prest\/config\/router\"\n\t\"github.com\/nuveo\/prest\/controllers\"\n\t\"github.com\/nuveo\/prest\/middlewares\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/urfave\/negroni\"\n)\n\nvar cfgFile string\nvar prestConfig config.Prest\n\n\/\/ RootCmd represents the base command when called without any subcommands\nvar RootCmd = &cobra.Command{\n\tUse: \"prest\",\n\tShort: \"Serve a RESTful API from any PostgreSQL database\",\n\tLong: `Serve a RESTful API from any PostgreSQL database, start HTTP server`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tapp()\n\t},\n}\n\n\/\/ Execute adds all child commands to the root command sets flags appropriately.\n\/\/ This is called by main.main(). It only needs to happen once to the rootCmd.\nfunc Execute() {\n\tif err := RootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(-1)\n\t}\n}\n\nfunc init() {\n\tconfig.InitConf()\n}\n\nfunc app() {\n\tcfg := config.Prest{}\n\n\terr := config.Parse(&cfg)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error parsing conf: %s\", err)\n\t}\n\n\tn := cfgMiddleware.GetApp()\n\n\tr := router.Get()\n\n\tr.HandleFunc(\"\/databases\", controllers.GetDatabases).Methods(\"GET\")\n\tr.HandleFunc(\"\/schemas\", controllers.GetSchemas).Methods(\"GET\")\n\tr.HandleFunc(\"\/tables\", controllers.GetTables).Methods(\"GET\")\n\tr.HandleFunc(\"\/_QUERIES\/{queriesLocation}\/{script}\", controllers.ExecuteFromScripts)\n\tr.HandleFunc(\"\/{database}\/{schema}\", controllers.GetTablesByDatabaseAndSchema).Methods(\"GET\")\n\n\tcrudRoutes := mux.NewRouter().PathPrefix(\"\/\").Subrouter().StrictSlash(true)\n\n\tcrudRoutes.HandleFunc(\"\/{database}\/{schema}\/{table}\", controllers.SelectFromTables).Methods(\"GET\")\n\tcrudRoutes.HandleFunc(\"\/{database}\/{schema}\/{table}\", controllers.InsertInTables).Methods(\"POST\")\n\tcrudRoutes.HandleFunc(\"\/{database}\/{schema}\/{table}\", controllers.DeleteFromTable).Methods(\"DELETE\")\n\tcrudRoutes.HandleFunc(\"\/{database}\/{schema}\/{table}\", controllers.UpdateTable).Methods(\"PUT\", \"PATCH\")\n\n\tr.PathPrefix(\"\/\").Handler(negroni.New(\n\t\tmiddlewares.AccessControl(),\n\t\tnegroni.Wrap(crudRoutes),\n\t))\n\n\tif cfg.CORSAllowOrigin != nil {\n\t\tc := cors.New(cors.Options{\n\t\t\tAllowedOrigins: cfg.CORSAllowOrigin,\n\t\t})\n\t\tn.Use(c)\n\t}\n\n\tn.UseHandler(r)\n\tn.Run(fmt.Sprintf(\":%v\", cfg.HTTPPort))\n}\n<commit_msg>Refactoring to prevent multiple configuration loads (#131)<commit_after>package cmd\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/rs\/cors\"\n\t\/\/ postgres driver for migrate\n\t_ \"github.com\/mattes\/migrate\/driver\/postgres\"\n\t\"github.com\/nuveo\/prest\/config\"\n\tcfgMiddleware \"github.com\/nuveo\/prest\/config\/middlewares\"\n\t\"github.com\/nuveo\/prest\/config\/router\"\n\t\"github.com\/nuveo\/prest\/controllers\"\n\t\"github.com\/nuveo\/prest\/middlewares\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/urfave\/negroni\"\n)\n\nvar cfgFile string\nvar prestConfig config.Prest\n\n\/\/ RootCmd represents the base command when called without any subcommands\nvar RootCmd = &cobra.Command{\n\tUse: \"prest\",\n\tShort: \"Serve a RESTful API from any PostgreSQL database\",\n\tLong: `Serve a RESTful API from any PostgreSQL database, start HTTP server`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tapp()\n\t},\n}\n\n\/\/ Execute adds all child commands to the root command sets flags appropriately.\n\/\/ This is called by main.main(). It only needs to happen once to the rootCmd.\nfunc Execute() {\n\tif err := RootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(-1)\n\t}\n}\n\nfunc app() {\n\tn := cfgMiddleware.GetApp()\n\tr := router.Get()\n\n\tr.HandleFunc(\"\/databases\", controllers.GetDatabases).Methods(\"GET\")\n\tr.HandleFunc(\"\/schemas\", controllers.GetSchemas).Methods(\"GET\")\n\tr.HandleFunc(\"\/tables\", controllers.GetTables).Methods(\"GET\")\n\tr.HandleFunc(\"\/_QUERIES\/{queriesLocation}\/{script}\", controllers.ExecuteFromScripts)\n\tr.HandleFunc(\"\/{database}\/{schema}\", controllers.GetTablesByDatabaseAndSchema).Methods(\"GET\")\n\n\tcrudRoutes := mux.NewRouter().PathPrefix(\"\/\").Subrouter().StrictSlash(true)\n\n\tcrudRoutes.HandleFunc(\"\/{database}\/{schema}\/{table}\", controllers.SelectFromTables).Methods(\"GET\")\n\tcrudRoutes.HandleFunc(\"\/{database}\/{schema}\/{table}\", controllers.InsertInTables).Methods(\"POST\")\n\tcrudRoutes.HandleFunc(\"\/{database}\/{schema}\/{table}\", controllers.DeleteFromTable).Methods(\"DELETE\")\n\tcrudRoutes.HandleFunc(\"\/{database}\/{schema}\/{table}\", controllers.UpdateTable).Methods(\"PUT\", \"PATCH\")\n\n\tr.PathPrefix(\"\/\").Handler(negroni.New(\n\t\tmiddlewares.AccessControl(),\n\t\tnegroni.Wrap(crudRoutes),\n\t))\n\n\tif config.PrestConf.CORSAllowOrigin != nil {\n\t\tc := cors.New(cors.Options{\n\t\t\tAllowedOrigins: config.PrestConf.CORSAllowOrigin,\n\t\t})\n\t\tn.Use(c)\n\t}\n\n\tn.UseHandler(r)\n\tn.Run(fmt.Sprintf(\":%v\", config.PrestConf.HTTPPort))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Xorm Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage xorm\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com\/go-xorm\/core\"\n)\n\n\/\/ Rows rows wrapper a rows to\ntype Rows struct {\n\tNoTypeCheck bool\n\n\tsession *Session\n\tstmt *core.Stmt\n\trows *core.Rows\n\tfields []string\n\tfieldsCount int\n\tbeanType reflect.Type\n\tlastError error\n}\n\nfunc newRows(session *Session, bean interface{}) (*Rows, error) {\n\trows := new(Rows)\n\trows.session = session\n\trows.beanType = reflect.Indirect(reflect.ValueOf(bean)).Type()\n\n\tdefer rows.session.resetStatement()\n\n\tvar sqlStr string\n\tvar args []interface{}\n\n\trows.session.Statement.setRefValue(rValue(bean))\n\tif len(session.Statement.TableName()) <= 0 {\n\t\treturn nil, ErrTableNotFound\n\t}\n\n\tif rows.session.Statement.RawSQL == \"\" {\n\t\tsqlStr, args = rows.session.Statement.genGetSQL(bean)\n\t} else {\n\t\tsqlStr = rows.session.Statement.RawSQL\n\t\targs = rows.session.Statement.RawParams\n\t}\n\n\tfor _, filter := range rows.session.Engine.dialect.Filters() {\n\t\tsqlStr = filter.Do(sqlStr, session.Engine.dialect, rows.session.Statement.RefTable)\n\t}\n\n\trows.session.saveLastSQL(sqlStr, args...)\n\tvar err error\n\tif rows.session.prepareStmt {\n\t\trows.stmt, err = rows.session.DB().Prepare(sqlStr)\n\t\tif err != nil {\n\t\t\trows.lastError = err\n\t\t\trows.Close()\n\t\t\treturn nil, err\n\t\t}\n\n\t\trows.rows, err = rows.stmt.Query(args...)\n\t\tif err != nil {\n\t\t\trows.lastError = err\n\t\t\trows.Close()\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\trows.rows, err = rows.session.DB().Query(sqlStr, args...)\n\t\tif err != nil {\n\t\t\trows.lastError = err\n\t\t\trows.Close()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\trows.fields, err = rows.rows.Columns()\n\tif err != nil {\n\t\trows.lastError = err\n\t\trows.Close()\n\t\treturn nil, err\n\t}\n\trows.fieldsCount = len(rows.fields)\n\n\treturn rows, nil\n}\n\n\/\/ Next move cursor to next record, return false if end has reached\nfunc (rows *Rows) Next() bool {\n\tif rows.lastError == nil && rows.rows != nil {\n\t\thasNext := rows.rows.Next()\n\t\tif !hasNext {\n\t\t\trows.lastError = sql.ErrNoRows\n\t\t}\n\t\treturn hasNext\n\t}\n\treturn false\n}\n\n\/\/ Err returns the error, if any, that was encountered during iteration. Err may be called after an explicit or implicit Close.\nfunc (rows *Rows) Err() error {\n\treturn rows.lastError\n}\n\n\/\/ Scan row record to bean properties\nfunc (rows *Rows) Scan(bean interface{}) error {\n\tif rows.lastError != nil {\n\t\treturn rows.lastError\n\t}\n\n\tif !rows.NoTypeCheck && reflect.Indirect(reflect.ValueOf(bean)).Type() != rows.beanType {\n\t\treturn fmt.Errorf(\"scan arg is incompatible type to [%v]\", rows.beanType)\n\t}\n\n\t_, err := rows.session.row2Bean(rows.rows, rows.fields, rows.fieldsCount, bean)\n\treturn err\n}\n\n\/\/ Close session if session.IsAutoClose is true, and claimed any opened resources\nfunc (rows *Rows) Close() error {\n\tif rows.session.IsAutoClose {\n\t\tdefer rows.session.Close()\n\t}\n\n\tif rows.lastError == nil {\n\t\tif rows.rows != nil {\n\t\t\trows.lastError = rows.rows.Close()\n\t\t\tif rows.lastError != nil {\n\t\t\t\tdefer rows.stmt.Close()\n\t\t\t\treturn rows.lastError\n\t\t\t}\n\t\t}\n\t\tif rows.stmt != nil {\n\t\t\trows.lastError = rows.stmt.Close()\n\t\t}\n\t} else {\n\t\tif rows.stmt != nil {\n\t\t\tdefer rows.stmt.Close()\n\t\t}\n\t\tif rows.rows != nil {\n\t\t\tdefer rows.rows.Close()\n\t\t}\n\t}\n\treturn rows.lastError\n}\n<commit_msg>remove unused field of rows<commit_after>\/\/ Copyright 2015 The Xorm Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage xorm\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com\/go-xorm\/core\"\n)\n\n\/\/ Rows rows wrapper a rows to\ntype Rows struct {\n\tNoTypeCheck bool\n\n\tsession *Session\n\tstmt *core.Stmt\n\trows *core.Rows\n\tfields []string\n\tbeanType reflect.Type\n\tlastError error\n}\n\nfunc newRows(session *Session, bean interface{}) (*Rows, error) {\n\trows := new(Rows)\n\trows.session = session\n\trows.beanType = reflect.Indirect(reflect.ValueOf(bean)).Type()\n\n\tdefer rows.session.resetStatement()\n\n\tvar sqlStr string\n\tvar args []interface{}\n\n\trows.session.Statement.setRefValue(rValue(bean))\n\tif len(session.Statement.TableName()) <= 0 {\n\t\treturn nil, ErrTableNotFound\n\t}\n\n\tif rows.session.Statement.RawSQL == \"\" {\n\t\tsqlStr, args = rows.session.Statement.genGetSQL(bean)\n\t} else {\n\t\tsqlStr = rows.session.Statement.RawSQL\n\t\targs = rows.session.Statement.RawParams\n\t}\n\n\tfor _, filter := range rows.session.Engine.dialect.Filters() {\n\t\tsqlStr = filter.Do(sqlStr, session.Engine.dialect, rows.session.Statement.RefTable)\n\t}\n\n\trows.session.saveLastSQL(sqlStr, args...)\n\tvar err error\n\tif rows.session.prepareStmt {\n\t\trows.stmt, err = rows.session.DB().Prepare(sqlStr)\n\t\tif err != nil {\n\t\t\trows.lastError = err\n\t\t\trows.Close()\n\t\t\treturn nil, err\n\t\t}\n\n\t\trows.rows, err = rows.stmt.Query(args...)\n\t\tif err != nil {\n\t\t\trows.lastError = err\n\t\t\trows.Close()\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\trows.rows, err = rows.session.DB().Query(sqlStr, args...)\n\t\tif err != nil {\n\t\t\trows.lastError = err\n\t\t\trows.Close()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\trows.fields, err = rows.rows.Columns()\n\tif err != nil {\n\t\trows.lastError = err\n\t\trows.Close()\n\t\treturn nil, err\n\t}\n\n\treturn rows, nil\n}\n\n\/\/ Next move cursor to next record, return false if end has reached\nfunc (rows *Rows) Next() bool {\n\tif rows.lastError == nil && rows.rows != nil {\n\t\thasNext := rows.rows.Next()\n\t\tif !hasNext {\n\t\t\trows.lastError = sql.ErrNoRows\n\t\t}\n\t\treturn hasNext\n\t}\n\treturn false\n}\n\n\/\/ Err returns the error, if any, that was encountered during iteration. Err may be called after an explicit or implicit Close.\nfunc (rows *Rows) Err() error {\n\treturn rows.lastError\n}\n\n\/\/ Scan row record to bean properties\nfunc (rows *Rows) Scan(bean interface{}) error {\n\tif rows.lastError != nil {\n\t\treturn rows.lastError\n\t}\n\n\tif !rows.NoTypeCheck && reflect.Indirect(reflect.ValueOf(bean)).Type() != rows.beanType {\n\t\treturn fmt.Errorf(\"scan arg is incompatible type to [%v]\", rows.beanType)\n\t}\n\n\t_, err := rows.session.row2Bean(rows.rows, rows.fields, len(rows.fields), bean)\n\treturn err\n}\n\n\/\/ Close session if session.IsAutoClose is true, and claimed any opened resources\nfunc (rows *Rows) Close() error {\n\tif rows.session.IsAutoClose {\n\t\tdefer rows.session.Close()\n\t}\n\n\tif rows.lastError == nil {\n\t\tif rows.rows != nil {\n\t\t\trows.lastError = rows.rows.Close()\n\t\t\tif rows.lastError != nil {\n\t\t\t\tdefer rows.stmt.Close()\n\t\t\t\treturn rows.lastError\n\t\t\t}\n\t\t}\n\t\tif rows.stmt != nil {\n\t\t\trows.lastError = rows.stmt.Close()\n\t\t}\n\t} else {\n\t\tif rows.stmt != nil {\n\t\t\tdefer rows.stmt.Close()\n\t\t}\n\t\tif rows.rows != nil {\n\t\t\tdefer rows.rows.Close()\n\t\t}\n\t}\n\treturn rows.lastError\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"database\/sql\"\n\t\"flag\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/crypto\/acme\/autocert\"\n\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/labstack\/echo\/middleware\"\n\t\"github.com\/labstack\/gommon\/color\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n)\n\nvar authDisabled bool\nvar dbFile = \"scan.db\"\n\ntype port struct {\n\tPort int `json:\"port\"`\n\tProto string `json:\"proto\"`\n\tStatus string `json:\"status\"`\n\tService struct {\n\t\tName string `json:\"name\"`\n\t\tBanner string `json:\"banner\"`\n\t} `json:\"service\"`\n}\n\n\/\/ Results posted from masscan\ntype result struct {\n\tIP string `json:\"ip\"`\n\tPorts []port `json:\"ports\"`\n}\n\n\/\/ Data retrieved from the database for display\ntype ipInfo struct {\n\tIP string\n\tPort int\n\tProto string\n\tFirstSeen string\n\tLastSeen string\n\tNew bool\n}\n\n\/\/ Load all data for displaying in the browser\nfunc load(s, fs, ls string) ([]ipInfo, error) {\n\tdb, err := sql.Open(\"sqlite3\", dbFile)\n\tif err != nil {\n\t\treturn []ipInfo{}, err\n\t}\n\tdefer db.Close()\n\n\tvar where string\n\tvar cond []string\n\tvar params []interface{}\n\tif s != \"\" {\n\t\tcond = append(cond, `ip LIKE ?`)\n\t\tparams = append(params, fmt.Sprintf(\"%%%s%%\", s))\n\t}\n\tif fs != \"\" {\n\t\tcond = append(cond, `firstseen= ?`)\n\t\tparams = append(params, fs)\n\t}\n\tif ls != \"\" {\n\t\tcond = append(cond, `lastseen= ?`)\n\t\tparams = append(params, ls)\n\t}\n\tif len(cond) > 0 {\n\t\twhere = fmt.Sprintf(\"WHERE %s\", strings.Join(cond, \" AND \"))\n\t}\n\n\tqry := fmt.Sprintf(`SELECT ip, port, proto, firstseen, lastseen FROM scan %s ORDER BY port, proto, ip, lastseen`, where)\n\trows, err := db.Query(qry, params...)\n\tif err != nil {\n\t\treturn []ipInfo{}, err\n\t}\n\n\tdefer rows.Close()\n\n\tvar data []ipInfo\n\tvar ip, proto, firstseen, lastseen string\n\tvar port int\n\tvar latest time.Time\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(&ip, &port, &proto, &firstseen, &lastseen)\n\t\tif err != nil {\n\t\t\treturn []ipInfo{}, err\n\t\t}\n\t\tlast, _ := time.Parse(\"2006-01-02 15:04\", lastseen)\n\t\tif last.After(latest) {\n\t\t\tlatest = last\n\t\t}\n\t\tdata = append(data, ipInfo{ip, port, proto, firstseen, lastseen, false})\n\t}\n\n\tfor i := range data {\n\t\tf, _ := time.Parse(\"2006-01-02 15:04\", data[i].FirstSeen)\n\t\tl, _ := time.Parse(\"2006-01-02 15:04\", data[i].LastSeen)\n\t\tif f.Equal(l) && l == latest {\n\t\t\tdata[i].New = true\n\t\t}\n\t}\n\n\treturn data, nil\n}\n\n\/\/ Save the results posted\nfunc save(results []result) error {\n\tdb, err := sql.Open(\"sqlite3\", dbFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\ttxn, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinsert, err := txn.Prepare(`INSERT INTO scan (ip, port, proto, firstseen, lastseen) VALUES (?, ?, ?, ?, ?)`)\n\tif err != nil {\n\t\ttxn.Rollback()\n\t\treturn err\n\t}\n\tqry, err := db.Prepare(`SELECT 1 FROM scan WHERE ip=? AND port=? AND proto=?`)\n\tif err != nil {\n\t\ttxn.Rollback()\n\t\treturn err\n\t}\n\tupdate, err := txn.Prepare(`UPDATE scan SET lastseen=? WHERE ip=? AND port=? AND proto=?`)\n\tif err != nil {\n\t\ttxn.Rollback()\n\t\treturn err\n\t}\n\n\tnow := time.Now()\n\tnowString := now.Format(\"2006-01-02 15:04\")\n\n\tfor _, r := range results {\n\t\t\/\/ Although it's an array, only one port is in each\n\t\tport := r.Ports[0]\n\n\t\t\/\/ Search for the IP\/port\/proto combo\n\t\t\/\/ If it exists, update `lastseen`, else insert a new record\n\n\t\t\/\/ Because we have to scan into something\n\t\tvar x int\n\t\terr := qry.QueryRow(r.IP, port.Port, port.Proto).Scan(&x)\n\t\tswitch {\n\t\tcase err == sql.ErrNoRows:\n\t\t\t_, err = insert.Exec(r.IP, port.Port, port.Proto, nowString, nowString)\n\t\t\tif err != nil {\n\t\t\t\ttxn.Rollback()\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\tcase err != nil:\n\t\t\ttxn.Rollback()\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = update.Exec(nowString, r.IP, port.Port, port.Proto)\n\t\tif err != nil {\n\t\t\ttxn.Rollback()\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttxn.Commit()\n\treturn nil\n}\n\n\/\/ Template is a template\ntype Template struct {\n\ttemplates *template.Template\n}\n\n\/\/ Render renders template\nfunc (t *Template) Render(w io.Writer, name string, data interface{}, c echo.Context) error {\n\treturn t.templates.ExecuteTemplate(w, name, data)\n}\n\ntype indexData struct {\n\tAuthenticated bool\n\tUser string\n\tTotal int\n\tLatest int\n\tNew int\n\tLastSeen string\n\tResults []ipInfo\n}\n\n\/\/ Handler for GET \/\nfunc index(c echo.Context) error {\n\tvar user string\n\tif !authDisabled {\n\t\tsession, err := store.Get(c.Request(), \"user\")\n\t\tif err != nil {\n\t\t\treturn c.String(http.StatusInternalServerError, err.Error())\n\t\t}\n\t\tif _, ok := session.Values[\"user\"]; !ok {\n\t\t\tdata := indexData{}\n\t\t\treturn c.Render(http.StatusOK, \"index\", data)\n\t\t}\n\t\tuser = session.Values[\"user\"].(string)\n\t}\n\n\tip := c.QueryParam(\"ip\")\n\tfirstSeen := c.QueryParam(\"firstseen\")\n\tlastSeen := c.QueryParam(\"lastseen\")\n\tresults, err := load(ip, firstSeen, lastSeen)\n\tif err != nil {\n\t\treturn c.String(http.StatusInternalServerError, err.Error())\n\t}\n\n\tdata := indexData{Authenticated: true, User: user, Results: results, Total: len(results)}\n\n\ttimeFmt := \"2006-01-02 15:04\"\n\n\t\/\/ Find all the latest results and store the number in the struct\n\tvar latest time.Time\n\tfor _, r := range results {\n\t\tlast, _ := time.Parse(timeFmt, r.LastSeen)\n\t\tif last.After(latest) {\n\t\t\tlatest = last\n\t\t}\n\t}\n\tfor _, r := range results {\n\t\tlast, _ := time.Parse(timeFmt, r.LastSeen)\n\t\tif last.Equal(latest) {\n\t\t\tdata.Latest++\n\t\t}\n\t\tif r.New {\n\t\t\tdata.New++\n\t\t}\n\t}\n\tdata.LastSeen = latest.Format(timeFmt)\n\n\treturn c.Render(http.StatusOK, \"index\", data)\n}\n\n\/\/ Handler for GET \/ips.json\n\/\/ This is used as the prefetch for Typeahead.js\nfunc ips(c echo.Context) error {\n\tdata, err := load(\"\", \"\", \"\")\n\tif err != nil {\n\t\tc.String(http.StatusInternalServerError, err.Error())\n\t}\n\tvar ips []string\n\tfor _, r := range data {\n\t\tips = append(ips, r.IP)\n\t}\n\treturn c.JSON(http.StatusOK, ips)\n}\n\n\/\/ Handler for POST \/results\nfunc recvResults(c echo.Context) error {\n\tres := new([]result)\n\terr := c.Bind(res)\n\tif err != nil {\n\t\treturn c.String(http.StatusInternalServerError, err.Error())\n\t}\n\n\terr = save(*res)\n\tif err != nil {\n\t\treturn c.String(http.StatusInternalServerError, err.Error())\n\t}\n\n\treturn c.NoContent(http.StatusOK)\n}\n\nfunc main() {\n\tflag.BoolVar(&authDisabled, \"no-auth\", false, \"Disable authentication\")\n\thttpAddr := flag.String(\"http.addr\", \":80\", \"HTTP address:port\")\n\thttpsAddr := flag.String(\"https.addr\", \":443\", \"HTTPS address:port\")\n\ttls := flag.Bool(\"tls\", false, \"Enable AutoTLS\")\n\ttlsHostname := flag.String(\"tls.hostname\", \"\", \"(Optional) Hostname to restrict AutoTLS\")\n\tflag.Parse()\n\n\tt := &Template{\n\t\ttemplates: template.Must(template.ParseGlob(\"views\/*.html\")),\n\t}\n\n\te := echo.New()\n\n\tif authDisabled {\n\t\tcolor.Println(color.Red(\"Authentication Disabled\"))\n\t}\n\n\tif *tls {\n\t\tif *tlsHostname != \"\" {\n\t\t\te.AutoTLSManager.HostPolicy = autocert.HostWhitelist(*tlsHostname)\n\t\t}\n\t\te.AutoTLSManager.Cache = autocert.DirCache(\".cache\")\n\t\te.Pre(middleware.HTTPSRedirect())\n\t}\n\n\te.Renderer = t\n\te.Use(middleware.Logger())\n\te.GET(\"\/\", index)\n\te.GET(\"\/auth\", authHandler)\n\te.GET(\"\/login\", loginHandler)\n\te.GET(\"\/ips.json\", ips)\n\te.POST(\"\/results\", recvResults)\n\te.Static(\"\/static\", \"static\")\n\n\tif *tls {\n\t\tgo func() { e.Logger.Fatal(e.Start(*httpAddr)) }()\n\t\te.Logger.Fatal(e.StartAutoTLS(*httpsAddr))\n\t}\n\te.Logger.Fatal(e.Start(*httpAddr))\n}\n<commit_msg>Split results parsing from index handler<commit_after>package main\n\nimport (\n\t\"database\/sql\"\n\t\"flag\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/crypto\/acme\/autocert\"\n\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/labstack\/echo\/middleware\"\n\t\"github.com\/labstack\/gommon\/color\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n)\n\nvar authDisabled bool\nvar dbFile = \"scan.db\"\n\ntype port struct {\n\tPort int `json:\"port\"`\n\tProto string `json:\"proto\"`\n\tStatus string `json:\"status\"`\n\tService struct {\n\t\tName string `json:\"name\"`\n\t\tBanner string `json:\"banner\"`\n\t} `json:\"service\"`\n}\n\n\/\/ Results posted from masscan\ntype result struct {\n\tIP string `json:\"ip\"`\n\tPorts []port `json:\"ports\"`\n}\n\n\/\/ Data retrieved from the database for display\ntype ipInfo struct {\n\tIP string\n\tPort int\n\tProto string\n\tFirstSeen string\n\tLastSeen string\n\tNew bool\n}\n\n\/\/ Load all data for displaying in the browser\nfunc load(s, fs, ls string) ([]ipInfo, error) {\n\tdb, err := sql.Open(\"sqlite3\", dbFile)\n\tif err != nil {\n\t\treturn []ipInfo{}, err\n\t}\n\tdefer db.Close()\n\n\tvar where string\n\tvar cond []string\n\tvar params []interface{}\n\tif s != \"\" {\n\t\tcond = append(cond, `ip LIKE ?`)\n\t\tparams = append(params, fmt.Sprintf(\"%%%s%%\", s))\n\t}\n\tif fs != \"\" {\n\t\tcond = append(cond, `firstseen= ?`)\n\t\tparams = append(params, fs)\n\t}\n\tif ls != \"\" {\n\t\tcond = append(cond, `lastseen= ?`)\n\t\tparams = append(params, ls)\n\t}\n\tif len(cond) > 0 {\n\t\twhere = fmt.Sprintf(\"WHERE %s\", strings.Join(cond, \" AND \"))\n\t}\n\n\tqry := fmt.Sprintf(`SELECT ip, port, proto, firstseen, lastseen FROM scan %s ORDER BY port, proto, ip, lastseen`, where)\n\trows, err := db.Query(qry, params...)\n\tif err != nil {\n\t\treturn []ipInfo{}, err\n\t}\n\n\tdefer rows.Close()\n\n\tvar data []ipInfo\n\tvar ip, proto, firstseen, lastseen string\n\tvar port int\n\tvar latest time.Time\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(&ip, &port, &proto, &firstseen, &lastseen)\n\t\tif err != nil {\n\t\t\treturn []ipInfo{}, err\n\t\t}\n\t\tlast, _ := time.Parse(\"2006-01-02 15:04\", lastseen)\n\t\tif last.After(latest) {\n\t\t\tlatest = last\n\t\t}\n\t\tdata = append(data, ipInfo{ip, port, proto, firstseen, lastseen, false})\n\t}\n\n\tfor i := range data {\n\t\tf, _ := time.Parse(\"2006-01-02 15:04\", data[i].FirstSeen)\n\t\tl, _ := time.Parse(\"2006-01-02 15:04\", data[i].LastSeen)\n\t\tif f.Equal(l) && l == latest {\n\t\t\tdata[i].New = true\n\t\t}\n\t}\n\n\treturn data, nil\n}\n\n\/\/ Save the results posted\nfunc save(results []result) error {\n\tdb, err := sql.Open(\"sqlite3\", dbFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\ttxn, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinsert, err := txn.Prepare(`INSERT INTO scan (ip, port, proto, firstseen, lastseen) VALUES (?, ?, ?, ?, ?)`)\n\tif err != nil {\n\t\ttxn.Rollback()\n\t\treturn err\n\t}\n\tqry, err := db.Prepare(`SELECT 1 FROM scan WHERE ip=? AND port=? AND proto=?`)\n\tif err != nil {\n\t\ttxn.Rollback()\n\t\treturn err\n\t}\n\tupdate, err := txn.Prepare(`UPDATE scan SET lastseen=? WHERE ip=? AND port=? AND proto=?`)\n\tif err != nil {\n\t\ttxn.Rollback()\n\t\treturn err\n\t}\n\n\tnow := time.Now()\n\tnowString := now.Format(\"2006-01-02 15:04\")\n\n\tfor _, r := range results {\n\t\t\/\/ Although it's an array, only one port is in each\n\t\tport := r.Ports[0]\n\n\t\t\/\/ Search for the IP\/port\/proto combo\n\t\t\/\/ If it exists, update `lastseen`, else insert a new record\n\n\t\t\/\/ Because we have to scan into something\n\t\tvar x int\n\t\terr := qry.QueryRow(r.IP, port.Port, port.Proto).Scan(&x)\n\t\tswitch {\n\t\tcase err == sql.ErrNoRows:\n\t\t\t_, err = insert.Exec(r.IP, port.Port, port.Proto, nowString, nowString)\n\t\t\tif err != nil {\n\t\t\t\ttxn.Rollback()\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\tcase err != nil:\n\t\t\ttxn.Rollback()\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = update.Exec(nowString, r.IP, port.Port, port.Proto)\n\t\tif err != nil {\n\t\t\ttxn.Rollback()\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttxn.Commit()\n\treturn nil\n}\n\n\/\/ Template is a template\ntype Template struct {\n\ttemplates *template.Template\n}\n\n\/\/ Render renders template\nfunc (t *Template) Render(w io.Writer, name string, data interface{}, c echo.Context) error {\n\treturn t.templates.ExecuteTemplate(w, name, data)\n}\n\ntype indexData struct {\n\tAuthenticated bool\n\tUser string\n\tscanData\n}\n\ntype scanData struct {\n\tTotal int\n\tLatest int\n\tNew int\n\tLastSeen string\n\tResults []ipInfo\n}\n\nfunc resultData(ip, fs, ls string) (scanData, error) {\n\tresults, err := load(ip, fs, ls)\n\tif err != nil {\n\t\treturn scanData{}, err\n\t}\n\n\ttimeFmt := \"2006-01-02 15:04\"\n\tdata := scanData{\n\t\tResults: results,\n\t\tTotal: len(results),\n\t}\n\n\t\/\/ Find all the latest results and store the number in the struct\n\tvar latest time.Time\n\tfor _, r := range results {\n\t\tlast, _ := time.Parse(timeFmt, r.LastSeen)\n\t\tif last.After(latest) {\n\t\t\tlatest = last\n\t\t}\n\t}\n\tfor _, r := range results {\n\t\tlast, _ := time.Parse(timeFmt, r.LastSeen)\n\t\tif last.Equal(latest) {\n\t\t\tdata.Latest++\n\t\t}\n\t\tif r.New {\n\t\t\tdata.New++\n\t\t}\n\t}\n\tdata.LastSeen = latest.Format(timeFmt)\n\n\treturn data, nil\n}\n\n\/\/ Handler for GET \/\nfunc index(c echo.Context) error {\n\tvar user string\n\tif !authDisabled {\n\t\tsession, err := store.Get(c.Request(), \"user\")\n\t\tif err != nil {\n\t\t\treturn c.String(http.StatusInternalServerError, err.Error())\n\t\t}\n\t\tif _, ok := session.Values[\"user\"]; !ok {\n\t\t\tdata := indexData{}\n\t\t\treturn c.Render(http.StatusOK, \"index\", data)\n\t\t}\n\t\tuser = session.Values[\"user\"].(string)\n\t}\n\n\tip := c.QueryParam(\"ip\")\n\tfirstSeen := c.QueryParam(\"firstseen\")\n\tlastSeen := c.QueryParam(\"lastseen\")\n\n\tresults, err := resultData(ip, firstSeen, lastSeen)\n\tif err != nil {\n\t\treturn c.String(http.StatusInternalServerError, err.Error())\n\t}\n\n\tdata := indexData{Authenticated: true, User: user, scanData: results}\n\n\treturn c.Render(http.StatusOK, \"index\", data)\n}\n\n\/\/ Handler for GET \/ips.json\n\/\/ This is used as the prefetch for Typeahead.js\nfunc ips(c echo.Context) error {\n\tdata, err := load(\"\", \"\", \"\")\n\tif err != nil {\n\t\tc.String(http.StatusInternalServerError, err.Error())\n\t}\n\tvar ips []string\n\tfor _, r := range data {\n\t\tips = append(ips, r.IP)\n\t}\n\treturn c.JSON(http.StatusOK, ips)\n}\n\n\/\/ Handler for POST \/results\nfunc recvResults(c echo.Context) error {\n\tres := new([]result)\n\terr := c.Bind(res)\n\tif err != nil {\n\t\treturn c.String(http.StatusInternalServerError, err.Error())\n\t}\n\n\terr = save(*res)\n\tif err != nil {\n\t\treturn c.String(http.StatusInternalServerError, err.Error())\n\t}\n\n\treturn c.NoContent(http.StatusOK)\n}\n\nfunc main() {\n\tflag.BoolVar(&authDisabled, \"no-auth\", false, \"Disable authentication\")\n\thttpAddr := flag.String(\"http.addr\", \":80\", \"HTTP address:port\")\n\thttpsAddr := flag.String(\"https.addr\", \":443\", \"HTTPS address:port\")\n\ttls := flag.Bool(\"tls\", false, \"Enable AutoTLS\")\n\ttlsHostname := flag.String(\"tls.hostname\", \"\", \"(Optional) Hostname to restrict AutoTLS\")\n\tflag.Parse()\n\n\tt := &Template{\n\t\ttemplates: template.Must(template.ParseGlob(\"views\/*.html\")),\n\t}\n\n\te := echo.New()\n\n\tif authDisabled {\n\t\tcolor.Println(color.Red(\"Authentication Disabled\"))\n\t}\n\n\tif *tls {\n\t\tif *tlsHostname != \"\" {\n\t\t\te.AutoTLSManager.HostPolicy = autocert.HostWhitelist(*tlsHostname)\n\t\t}\n\t\te.AutoTLSManager.Cache = autocert.DirCache(\".cache\")\n\t\te.Pre(middleware.HTTPSRedirect())\n\t}\n\n\te.Renderer = t\n\te.Use(middleware.Logger())\n\te.GET(\"\/\", index)\n\te.GET(\"\/auth\", authHandler)\n\te.GET(\"\/login\", loginHandler)\n\te.GET(\"\/ips.json\", ips)\n\te.POST(\"\/results\", recvResults)\n\te.Static(\"\/static\", \"static\")\n\n\tif *tls {\n\t\tgo func() { e.Logger.Fatal(e.Start(*httpAddr)) }()\n\t\te.Logger.Fatal(e.StartAutoTLS(*httpsAddr))\n\t}\n\te.Logger.Fatal(e.Start(*httpAddr))\n}\n<|endoftext|>"} {"text":"<commit_before>package runner\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/garden\/client\"\n\t\"code.cloudfoundry.org\/garden\/client\/connection\"\n\t\"code.cloudfoundry.org\/lager\"\n\t\"code.cloudfoundry.org\/lager\/lagertest\"\n\t\"github.com\/eapache\/go-resiliency\/retrier\"\n\t\"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/ginkgomon\"\n)\n\nconst MNT_DETACH = 0x2\n\nvar RootFSPath = os.Getenv(\"GARDEN_TEST_ROOTFS\")\nvar DataDir = os.Getenv(\"GARDEN_TEST_GRAPHPATH\")\nvar TarBin = os.Getenv(\"GARDEN_TAR_PATH\")\n\ntype RunningGarden struct {\n\tclient.Client\n\n\trunner *ginkgomon.Runner\n\tprocess ifrit.Process\n\n\tdebugIP string\n\tdebugPort int\n\n\tPid int\n\n\tTmpdir string\n\n\tDepotDir string\n\tDataDir string\n\tGraphPath string\n\n\tlogger lager.Logger\n}\n\nfunc Start(bin, initBin, nstarBin, dadooBin string, supplyDefaultRootfs bool, argv ...string) *RunningGarden {\n\tnetwork := \"unix\"\n\taddr := fmt.Sprintf(\"\/tmp\/garden_%d.sock\", GinkgoParallelNode())\n\ttmpDir := filepath.Join(\n\t\tos.TempDir(),\n\t\tfmt.Sprintf(\"test-garden-%d\", ginkgo.GinkgoParallelNode()),\n\t)\n\n\tif DataDir == \"\" {\n\t\t\/\/ This must be set outside of the Ginkgo node directory (tmpDir) because\n\t\t\/\/ otherwise the Concourse worker may run into one of the AUFS kernel\n\t\t\/\/ module bugs that cause the VM to become unresponsive.\n\t\tDataDir = \"\/tmp\/aufs_mount\"\n\t}\n\n\tgraphPath := filepath.Join(DataDir, fmt.Sprintf(\"node-%d\", ginkgo.GinkgoParallelNode()))\n\tdepotDir := filepath.Join(tmpDir, \"containers\")\n\n\tMustMountTmpfs(graphPath)\n\n\tr := &RunningGarden{\n\t\tDepotDir: depotDir,\n\n\t\tDataDir: DataDir,\n\t\tGraphPath: graphPath,\n\t\tTmpdir: tmpDir,\n\t\tlogger: lagertest.NewTestLogger(\"garden-runner\"),\n\n\t\tClient: client.New(connection.New(network, addr)),\n\t}\n\n\tc := cmd(tmpDir, depotDir, graphPath, network, addr, bin, initBin, nstarBin, dadooBin, TarBin, supplyDefaultRootfs, argv...)\n\tc.Env = append(os.Environ(), fmt.Sprintf(\"TMPDIR=%s\", tmpDir))\n\tr.runner = ginkgomon.New(ginkgomon.Config{\n\t\tName: \"guardian\",\n\t\tCommand: c,\n\t\tAnsiColorCode: \"31m\",\n\t\tStartCheck: \"guardian.started\",\n\t\tStartCheckTimeout: 30 * time.Second,\n\t})\n\tr.process = ifrit.Invoke(r.runner)\n\tr.Pid = c.Process.Pid\n\n\tfor i, arg := range c.Args {\n\t\tif arg == \"--debug-bind-ip\" {\n\t\t\tr.debugIP = c.Args[i+1]\n\t\t}\n\t\tif arg == \"--debug-bind-port\" {\n\t\t\tr.debugPort, _ = strconv.Atoi(c.Args[i+1])\n\t\t}\n\t}\n\n\treturn r\n}\n\nfunc (r *RunningGarden) Kill() error {\n\tr.process.Signal(syscall.SIGKILL)\n\tselect {\n\tcase err := <-r.process.Wait():\n\t\treturn err\n\tcase <-time.After(time.Second * 10):\n\t\tr.process.Signal(syscall.SIGKILL)\n\t\treturn errors.New(\"timed out waiting for garden to shutdown after 10 seconds\")\n\t}\n}\n\nfunc (r *RunningGarden) DestroyAndStop() error {\n\tif err := r.DestroyContainers(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.Stop(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (r *RunningGarden) Stop() error {\n\tr.process.Signal(syscall.SIGTERM)\n\n\tvar err error\n\tfor i := 0; i < 5; i++ {\n\t\tselect {\n\t\tcase err := <-r.process.Wait():\n\t\t\treturn err\n\t\tcase <-time.After(time.Second * 5):\n\t\t\tr.process.Signal(syscall.SIGTERM)\n\t\t\terr = errors.New(\"timed out waiting for garden to shutdown after 5 seconds\")\n\t\t}\n\t}\n\n\tr.process.Signal(syscall.SIGKILL)\n\treturn err\n}\n\nfunc cmd(tmpdir, depotDir, graphPath, network, addr, bin, initBin, nstarBin, dadooBin, tarBin string, supplyDefaultRootfs bool, argv ...string) *exec.Cmd {\n\tExpect(os.MkdirAll(tmpdir, 0755)).To(Succeed())\n\n\tsnapshotsPath := filepath.Join(tmpdir, \"snapshots\")\n\n\tExpect(os.MkdirAll(depotDir, 0755)).To(Succeed())\n\n\tExpect(os.MkdirAll(snapshotsPath, 0755)).To(Succeed())\n\n\tappendDefaultFlag := func(ar []string, key, value string) []string {\n\t\tfor _, a := range argv {\n\t\t\tif a == key {\n\t\t\t\treturn ar\n\t\t\t}\n\t\t}\n\n\t\tif value != \"\" {\n\t\t\treturn append(ar, key, value)\n\t\t} else {\n\t\t\treturn append(ar, key)\n\t\t}\n\t}\n\n\tgardenArgs := make([]string, len(argv))\n\tcopy(gardenArgs, argv)\n\n\tswitch network {\n\tcase \"tcp\":\n\t\tgardenArgs = appendDefaultFlag(gardenArgs, \"--bind-ip\", addr)\n\tcase \"unix\":\n\t\tgardenArgs = appendDefaultFlag(gardenArgs, \"--bind-socket\", addr)\n\t}\n\n\tif supplyDefaultRootfs {\n\t\tgardenArgs = appendDefaultFlag(gardenArgs, \"--default-rootfs\", RootFSPath)\n\t}\n\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--depot\", depotDir)\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--graph\", graphPath)\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--tag\", fmt.Sprintf(\"%d\", GinkgoParallelNode()))\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--network-pool\", fmt.Sprintf(\"10.254.%d.0\/22\", 4*GinkgoParallelNode()))\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--init-bin\", initBin)\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--dadoo-bin\", dadooBin)\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--nstar-bin\", nstarBin)\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--tar-bin\", tarBin)\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--port-pool-start\", fmt.Sprintf(\"%d\", GinkgoParallelNode()*10000))\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--debug-bind-ip\", \"0.0.0.0\")\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--debug-bind-port\", fmt.Sprintf(\"%d\", 8080+ginkgo.GinkgoParallelNode()))\n\n\treturn exec.Command(bin, gardenArgs...)\n}\n\nfunc (r *RunningGarden) Cleanup() {\n\t\/\/ unmount aufs since the docker graph driver leaves this around,\n\t\/\/ otherwise the following commands might fail\n\tretry := retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil)\n\n\terr := retry.Run(func() error {\n\t\tif err := os.RemoveAll(path.Join(r.GraphPath, \"aufs\")); err == nil {\n\t\t\treturn nil \/\/ if we can remove it, it's already unmounted\n\t\t}\n\n\t\tif err := syscall.Unmount(path.Join(r.GraphPath, \"aufs\"), MNT_DETACH); err != nil {\n\t\t\tr.logger.Error(\"failed-unmount-attempt\", err)\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tr.logger.Error(\"failed-to-unmount\", err)\n\t}\n\n\tMustUnmountTmpfs(r.GraphPath)\n\n\t\/\/ In the kernel version 3.19.0-51-generic the code bellow results in\n\t\/\/ hanging the running VM. We are not deleting the node-X directories. They\n\t\/\/ are empty and the next test will re-use them. We will stick with that\n\t\/\/ workaround until we can test on a newer kernel that will hopefully not\n\t\/\/ have this bug.\n\t\/\/\n\t\/\/ if err := os.RemoveAll(r.GraphPath); err != nil {\n\t\/\/ \tr.logger.Error(\"remove-graph\", err)\n\t\/\/ }\n\n\tr.logger.Info(\"cleanup-tempdirs\")\n\tif err := os.RemoveAll(r.Tmpdir); err != nil {\n\t\tr.logger.Error(\"cleanup-tempdirs-failed\", err, lager.Data{\"tmpdir\": r.Tmpdir})\n\t} else {\n\t\tr.logger.Info(\"tempdirs-removed\")\n\t}\n}\n\nfunc (r *RunningGarden) DestroyContainers() error {\n\tcontainers, err := r.Containers(nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, container := range containers {\n\t\tr.Destroy(container.Handle())\n\t}\n\n\treturn nil\n}\n\ntype debugVars struct {\n\tNumGoRoutines int `json:\"numGoRoutines\"`\n}\n\nfunc (r *RunningGarden) NumGoroutines() (int, error) {\n\tdebugURL := fmt.Sprintf(\"http:\/\/%s:%d\/debug\/vars\", r.debugIP, r.debugPort)\n\tres, err := http.Get(debugURL)\n\tdefer res.Body.Close()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tdecoder := json.NewDecoder(res.Body)\n\tvar debugVarsData debugVars\n\terr = decoder.Decode(&debugVarsData)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn debugVarsData.NumGoRoutines, nil\n}\n\nfunc (r *RunningGarden) Buffer() *gbytes.Buffer {\n\treturn r.runner.Buffer()\n}\n\nfunc (r *RunningGarden) ExitCode() int {\n\treturn r.runner.ExitCode()\n}\n<commit_msg>Expose an ifrit.Runner that can be used to run guardian<commit_after>package runner\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/garden\/client\"\n\t\"code.cloudfoundry.org\/garden\/client\/connection\"\n\t\"code.cloudfoundry.org\/lager\"\n\t\"code.cloudfoundry.org\/lager\/lagertest\"\n\t\"github.com\/eapache\/go-resiliency\/retrier\"\n\t\"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/ginkgomon\"\n)\n\nconst MNT_DETACH = 0x2\n\nvar RootFSPath = os.Getenv(\"GARDEN_TEST_ROOTFS\")\nvar DataDir string\nvar TarBin = os.Getenv(\"GARDEN_TAR_PATH\")\n\ntype RunningGarden struct {\n\tclient.Client\n\n\trunner GardenRunner\n\tprocess ifrit.Process\n\n\tdebugIP string\n\tdebugPort int\n\n\tPid int\n\n\tTmpdir string\n\n\tDepotDir string\n\tDataDir string\n\tGraphPath string\n\n\tlogger lager.Logger\n}\n\ntype GardenRunner struct {\n\t*ginkgomon.Runner\n\tCmd *exec.Cmd\n\tTmpDir string\n\tGraphPath string\n\tDepotDir string\n\tDebugIp string\n\tDebugPort int\n\tNetwork, Addr string\n}\n\nfunc init() {\n\tDataDir = os.Getenv(\"GARDEN_TEST_GRAPHPATH\")\n\tif DataDir == \"\" {\n\t\t\/\/ This must be set outside of the Ginkgo node directory (tmpDir) because\n\t\t\/\/ otherwise the Concourse worker may run into one of the AUFS kernel\n\t\t\/\/ module bugs that cause the VM to become unresponsive.\n\t\tDataDir = \"\/tmp\/aufs_mount\"\n\t}\n}\n\nfunc NewGardenRunner(bin, initBin, nstarBin, dadooBin string, supplyDefaultRootfs bool, argv ...string) GardenRunner {\n\tr := GardenRunner{}\n\n\tr.Network = \"unix\"\n\tr.Addr = fmt.Sprintf(\"\/tmp\/garden_%d.sock\", GinkgoParallelNode())\n\n\tr.TmpDir = filepath.Join(\n\t\tos.TempDir(),\n\t\tfmt.Sprintf(\"test-garden-%d\", ginkgo.GinkgoParallelNode()),\n\t)\n\n\tr.GraphPath = filepath.Join(DataDir, fmt.Sprintf(\"node-%d\", ginkgo.GinkgoParallelNode()))\n\tr.DepotDir = filepath.Join(r.TmpDir, \"containers\")\n\n\tMustMountTmpfs(r.GraphPath)\n\n\tr.Cmd = cmd(r.TmpDir, r.DepotDir, r.GraphPath, r.Network, r.Addr, bin, initBin, nstarBin, dadooBin, TarBin, supplyDefaultRootfs, argv...)\n\tr.Cmd.Env = append(os.Environ(), fmt.Sprintf(\"TMPDIR=%s\", r.TmpDir))\n\n\tfor i, arg := range r.Cmd.Args {\n\t\tif arg == \"--debug-bind-ip\" {\n\t\t\tr.DebugIp = r.Cmd.Args[i+1]\n\t\t}\n\t\tif arg == \"--debug-bind-port\" {\n\t\t\tr.DebugPort, _ = strconv.Atoi(r.Cmd.Args[i+1])\n\t\t}\n\t}\n\n\tr.Runner = ginkgomon.New(ginkgomon.Config{\n\t\tName: \"guardian\",\n\t\tCommand: r.Cmd,\n\t\tAnsiColorCode: \"31m\",\n\t\tStartCheck: \"guardian.started\",\n\t\tStartCheckTimeout: 30 * time.Second,\n\t})\n\n\treturn r\n}\n\nfunc Start(bin, initBin, nstarBin, dadooBin string, supplyDefaultRootfs bool, argv ...string) *RunningGarden {\n\trunner := NewGardenRunner(bin, initBin, nstarBin, dadooBin, supplyDefaultRootfs, argv...)\n\n\tr := &RunningGarden{\n\t\trunner: runner,\n\t\tDepotDir: runner.DepotDir,\n\n\t\tDataDir: DataDir,\n\t\tGraphPath: runner.GraphPath,\n\t\tTmpdir: runner.TmpDir,\n\t\tlogger: lagertest.NewTestLogger(\"garden-runner\"),\n\n\t\tdebugIP: runner.DebugIp,\n\t\tdebugPort: runner.DebugPort,\n\n\t\tClient: client.New(connection.New(runner.Network, runner.Addr)),\n\t}\n\n\tr.process = ifrit.Invoke(r.runner)\n\tr.Pid = runner.Cmd.Process.Pid\n\n\treturn r\n}\n\nfunc (r *RunningGarden) Kill() error {\n\tr.process.Signal(syscall.SIGKILL)\n\tselect {\n\tcase err := <-r.process.Wait():\n\t\treturn err\n\tcase <-time.After(time.Second * 10):\n\t\tr.process.Signal(syscall.SIGKILL)\n\t\treturn errors.New(\"timed out waiting for garden to shutdown after 10 seconds\")\n\t}\n}\n\nfunc (r *RunningGarden) DestroyAndStop() error {\n\tif err := r.DestroyContainers(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.Stop(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (r *RunningGarden) Stop() error {\n\tr.process.Signal(syscall.SIGTERM)\n\n\tvar err error\n\tfor i := 0; i < 5; i++ {\n\t\tselect {\n\t\tcase err := <-r.process.Wait():\n\t\t\treturn err\n\t\tcase <-time.After(time.Second * 5):\n\t\t\tr.process.Signal(syscall.SIGTERM)\n\t\t\terr = errors.New(\"timed out waiting for garden to shutdown after 5 seconds\")\n\t\t}\n\t}\n\n\tr.process.Signal(syscall.SIGKILL)\n\treturn err\n}\n\nfunc cmd(tmpdir, depotDir, graphPath, network, addr, bin, initBin, nstarBin, dadooBin, tarBin string, supplyDefaultRootfs bool, argv ...string) *exec.Cmd {\n\tExpect(os.MkdirAll(tmpdir, 0755)).To(Succeed())\n\n\tsnapshotsPath := filepath.Join(tmpdir, \"snapshots\")\n\n\tExpect(os.MkdirAll(depotDir, 0755)).To(Succeed())\n\n\tExpect(os.MkdirAll(snapshotsPath, 0755)).To(Succeed())\n\n\tappendDefaultFlag := func(ar []string, key, value string) []string {\n\t\tfor _, a := range argv {\n\t\t\tif a == key {\n\t\t\t\treturn ar\n\t\t\t}\n\t\t}\n\n\t\tif value != \"\" {\n\t\t\treturn append(ar, key, value)\n\t\t} else {\n\t\t\treturn append(ar, key)\n\t\t}\n\t}\n\n\tgardenArgs := make([]string, len(argv))\n\tcopy(gardenArgs, argv)\n\n\tswitch network {\n\tcase \"tcp\":\n\t\tgardenArgs = appendDefaultFlag(gardenArgs, \"--bind-ip\", addr)\n\tcase \"unix\":\n\t\tgardenArgs = appendDefaultFlag(gardenArgs, \"--bind-socket\", addr)\n\t}\n\n\tif supplyDefaultRootfs {\n\t\tgardenArgs = appendDefaultFlag(gardenArgs, \"--default-rootfs\", RootFSPath)\n\t}\n\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--depot\", depotDir)\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--graph\", graphPath)\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--tag\", fmt.Sprintf(\"%d\", GinkgoParallelNode()))\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--network-pool\", fmt.Sprintf(\"10.254.%d.0\/22\", 4*GinkgoParallelNode()))\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--init-bin\", initBin)\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--dadoo-bin\", dadooBin)\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--nstar-bin\", nstarBin)\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--tar-bin\", tarBin)\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--port-pool-start\", fmt.Sprintf(\"%d\", GinkgoParallelNode()*10000))\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--debug-bind-ip\", \"0.0.0.0\")\n\tgardenArgs = appendDefaultFlag(gardenArgs, \"--debug-bind-port\", fmt.Sprintf(\"%d\", 8080+ginkgo.GinkgoParallelNode()))\n\n\treturn exec.Command(bin, gardenArgs...)\n}\n\nfunc (r *RunningGarden) Cleanup() {\n\t\/\/ unmount aufs since the docker graph driver leaves this around,\n\t\/\/ otherwise the following commands might fail\n\tretry := retrier.New(retrier.ConstantBackoff(200, 500*time.Millisecond), nil)\n\n\terr := retry.Run(func() error {\n\t\tif err := os.RemoveAll(path.Join(r.GraphPath, \"aufs\")); err == nil {\n\t\t\treturn nil \/\/ if we can remove it, it's already unmounted\n\t\t}\n\n\t\tif err := syscall.Unmount(path.Join(r.GraphPath, \"aufs\"), MNT_DETACH); err != nil {\n\t\t\tr.logger.Error(\"failed-unmount-attempt\", err)\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tr.logger.Error(\"failed-to-unmount\", err)\n\t}\n\n\tMustUnmountTmpfs(r.GraphPath)\n\n\t\/\/ In the kernel version 3.19.0-51-generic the code bellow results in\n\t\/\/ hanging the running VM. We are not deleting the node-X directories. They\n\t\/\/ are empty and the next test will re-use them. We will stick with that\n\t\/\/ workaround until we can test on a newer kernel that will hopefully not\n\t\/\/ have this bug.\n\t\/\/\n\t\/\/ if err := os.RemoveAll(r.GraphPath); err != nil {\n\t\/\/ \tr.logger.Error(\"remove-graph\", err)\n\t\/\/ }\n\n\tr.logger.Info(\"cleanup-tempdirs\")\n\tif err := os.RemoveAll(r.Tmpdir); err != nil {\n\t\tr.logger.Error(\"cleanup-tempdirs-failed\", err, lager.Data{\"tmpdir\": r.Tmpdir})\n\t} else {\n\t\tr.logger.Info(\"tempdirs-removed\")\n\t}\n}\n\nfunc (r *RunningGarden) DestroyContainers() error {\n\tcontainers, err := r.Containers(nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, container := range containers {\n\t\tr.Destroy(container.Handle())\n\t}\n\n\treturn nil\n}\n\ntype debugVars struct {\n\tNumGoRoutines int `json:\"numGoRoutines\"`\n}\n\nfunc (r *RunningGarden) NumGoroutines() (int, error) {\n\tdebugURL := fmt.Sprintf(\"http:\/\/%s:%d\/debug\/vars\", r.debugIP, r.debugPort)\n\tres, err := http.Get(debugURL)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer res.Body.Close()\n\n\tdecoder := json.NewDecoder(res.Body)\n\tvar debugVarsData debugVars\n\terr = decoder.Decode(&debugVarsData)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn debugVarsData.NumGoRoutines, nil\n}\n\nfunc (r *RunningGarden) Buffer() *gbytes.Buffer {\n\treturn r.runner.Buffer()\n}\n\nfunc (r *RunningGarden) ExitCode() int {\n\treturn r.runner.ExitCode()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"log\"\n\t\"net\/http\"\n)\n\ntype Handler struct {\n\tPattern, Root string\n}\n\nfunc registerHandlers(mux *http.ServeMux, paths map[string]string) {\n\tfor pattern, root := range paths {\n\t\tlog.Printf(\"Registering handler with pattern: %s, root path: %s\",\n\t\t\tpattern, root)\n\t\tmux.Handle(pattern, http.StripPrefix(pattern, http.FileServer(http.Dir(root))))\n\t}\n}\n\nfunc parsePaths(paths string) (err error, pathMap map[string]string) {\n\terr = json.Unmarshal([]byte(paths), &pathMap)\n\treturn\n}\n\nfunc main() {\n\tvar listen = flag.String(\"listen\", \":8080\",\n\t\t\"Interface\/port to listen on. eg. :8080 or 127.0.0.1:8080\")\n\tvar pathsRaw = flag.String(\"paths\", `{\"\/\": \".\"}`,\n\t\t\"Paths to serve. A json object with the keys as the url pattern, and \"+\n\t\t\t\"the value as the root. Default serves current folder.\")\n\tflag.Parse()\n\n\terr, paths := parsePaths(*pathsRaw)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tmux := http.NewServeMux()\n\tregisterHandlers(mux, paths)\n\tlog.Println(\"Listening on: \", *listen)\n\n\tlog.Fatal(http.ListenAndServe(*listen, mux))\n}\n<commit_msg>Code cleanup<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"log\"\n\t\"net\/http\"\n)\n\nfunc registerHandlers(mux *http.ServeMux, paths map[string]string) {\n\tfor pattern, root := range paths {\n\t\tlog.Printf(\"Registering handler with pattern: %s, root path: %s\",\n\t\t\tpattern, root)\n\t\tmux.Handle(pattern, http.StripPrefix(pattern, http.FileServer(http.Dir(root))))\n\t}\n}\n\nfunc parsePaths(paths string) (pathMap map[string]string, err error) {\n\terr = json.Unmarshal([]byte(paths), &pathMap)\n\treturn\n}\n\nfunc main() {\n\tvar listen = flag.String(\"listen\", \":8080\",\n\t\t\"Interface\/port to listen on. eg. :8080 or 127.0.0.1:8080\")\n\tvar pathsRaw = flag.String(\"paths\", `{\"\/\": \".\"}`,\n\t\t\"Paths to serve. A json object with the keys as the url pattern, and \"+\n\t\t\t\"the value as the root. Default serves current folder.\")\n\tflag.Parse()\n\n\tpaths, err := parsePaths(*pathsRaw)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tmux := http.NewServeMux()\n\tregisterHandlers(mux, paths)\n\tlog.Println(\"Listening on: \", *listen)\n\n\tlog.Fatal(http.ListenAndServe(*listen, mux))\n}\n<|endoftext|>"} {"text":"<commit_before>package goemail\n\nimport \"time\"\n\nvar pools []*Pool\n\nfunc SetConfig(cs ...*Config) {\n\tif cs == nil || len(cs) == 0 {\n\t\tpanic(ERR_CONFIG)\n\t}\n\tpools = make([]*Pool, 0, len(cs))\n\tfor _, c := range cs {\n\t\tif c == nil || !c.Valid() {\n\t\t\tcontinue\n\t\t}\n\t\tpools = append(pools, NewPool(c))\n\t}\n}\n\nfunc StartService() error {\n\tif pools == nil || len(pools) == 0 {\n\t\treturn ERR_POOL\n\t}\n\tfor _, p := range pools {\n\t\tif p != nil {\n\t\t\tp.Start()\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc StopService() error {\n\tif pools == nil || len(pools) == 0 {\n\t\treturn ERR_POOL\n\t}\n\tfor _, p := range pools {\n\t\tif p != nil {\n\t\t\tp.Stop()\n\t\t}\n\t}\n\treturn nil\n}\n\nvar sendIdx int = 0\n\nfunc SendEmail(e *Email) error {\n\tif e == nil || !e.Valid() {\n\t\treturn ERR_EMAIL\n\t} else if pools == nil || len(pools) == 0 {\n\t\treturn ERR_POOL\n\t}\n\tif sendIdx > 0 && sendIdx >= len(pools) {\n\t\tsendIdx = 0\n\t}\n\tif p := pools[sendIdx]; p != nil {\n\t\tsendIdx++\n\t\tif e.TryCount >= 5 {\n\t\t\treturn ERR_EMAIL_TOO_MUCH\n\t\t} else if err := p.Send(e.SetFrom(p.config.User).Increase()); err != nil {\n\t\t\treturn SendEmail(e)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc SendEmailDelay(e *Email) {\n\tgo func() {\n\t\ttime.Sleep(time.Second * 3)\n\t\tSendEmail(e)\n\t}()\n}\n<commit_msg>fix: some bugs<commit_after>package goemail\n\nimport (\n\t\"strconv\"\n\t\"time\"\n)\n\nvar pools []*Pool\n\nfunc SetConfig(cs ...*Config) {\n\tif cs == nil || len(cs) == 0 {\n\t\tpanic(ERR_CONFIG)\n\t}\n\tpools = make([]*Pool, 0, len(cs))\n\tfor _, c := range cs {\n\t\tif c == nil || !c.Valid() {\n\t\t\tcontinue\n\t\t}\n\t\tpools = append(pools, NewPool(c))\n\t}\n}\n\nfunc StartService() error {\n\tif pools == nil || len(pools) == 0 {\n\t\treturn ERR_POOL\n\t}\n\tfor _, p := range pools {\n\t\tif p != nil {\n\t\t\tp.Start()\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc StopService() error {\n\tif pools == nil || len(pools) == 0 {\n\t\treturn ERR_POOL\n\t}\n\tfor _, p := range pools {\n\t\tif p != nil {\n\t\t\tp.Stop()\n\t\t}\n\t}\n\treturn nil\n}\n\nvar sendIdx int = 0\n\nfunc SendEmail(e *Email) error {\n\tif e == nil || !e.Valid() {\n\t\treturn ERR_EMAIL\n\t} else if pools == nil || len(pools) == 0 {\n\t\treturn ERR_POOL\n\t}\n\tif sendIdx > 0 && sendIdx >= len(pools) {\n\t\tsendIdx = 0\n\t}\n\tif p := pools[sendIdx]; p != nil {\n\t\tsendIdx++\n\t\tif e.TryCount >= 5 {\n\t\t\treturn ERR_EMAIL_TOO_MUCH\n\t\t} else if err := p.Send(e.\n\t\t\tAddTag(strconv.Itoa(sendIdx)).\n\t\t\tSetFrom(p.config.User).\n\t\t\tIncrease()); err != nil {\n\t\t\treturn SendEmail(e)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc SendEmailDelay(e *Email) {\n\tgo func() {\n\t\ttime.Sleep(time.Second * 3)\n\t\tSendEmail(e)\n\t}()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 HyperHQ Inc.\n\/\/\n\/\/ SPDX-License-Identifier: Apache-2.0\n\/\/\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"github.com\/moby\/moby\/pkg\/term\"\n\tcontext \"golang.org\/x\/net\/context\"\n\n\tpb \"github.com\/kata-containers\/agent\/protocols\/grpc\"\n)\n\nconst sigChanSize = 2048\n\nvar sigIgnored = map[syscall.Signal]bool{\n\tsyscall.SIGCHLD: true,\n\tsyscall.SIGPIPE: true,\n\tsyscall.SIGWINCH: true,\n}\n\ntype shim struct {\n\tcontainerID string\n\texecID string\n\n\tctx context.Context\n\tagent *shimAgent\n}\n\nfunc newShim(addr, containerID, execID string) (*shim, error) {\n\tagent, err := newShimAgent(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &shim{containerID: containerID,\n\t\texecID: execID,\n\t\tctx: context.Background(),\n\t\tagent: agent}, nil\n}\n\nfunc (s *shim) proxyStdio(wg *sync.WaitGroup, terminal bool) {\n\t\/\/ don't wait the copying of the stdin, because `io.Copy(inPipe, os.Stdin)`\n\t\/\/ can't terminate when no input. todo: find a better way.\n\twg.Add(1)\n\tif !terminal {\n\t\t\/\/ In case it's not a terminal, we also need to get the output\n\t\t\/\/ from stderr.\n\t\twg.Add(1)\n\t}\n\n\tinPipe, outPipe, errPipe := shimStdioPipe(s.ctx, s.agent, s.containerID, s.execID)\n\tgo func() {\n\t\t_, err1 := io.Copy(inPipe, os.Stdin)\n\t\t_, err2 := s.agent.CloseStdin(s.ctx, &pb.CloseStdinRequest{\n\t\t\tContainerId: s.containerID,\n\t\t\tExecId: s.execID})\n\t\tif err1 != nil {\n\t\t\tlogger().WithError(err1).Warn(\"copy stdin failed\")\n\t\t}\n\t\tif err2 != nil {\n\t\t\tlogger().WithError(err2).Warn(\"close stdin failed\")\n\t\t}\n\t}()\n\n\tgo func() {\n\t\t_, err := io.Copy(os.Stdout, outPipe)\n\t\tlogger().WithError(err).Info(\"copy stdout failed\")\n\t\twg.Done()\n\t}()\n\n\tif !terminal {\n\t\tgo func() {\n\t\t\t_, err := io.Copy(os.Stderr, errPipe)\n\t\t\tlogger().WithError(err).Info(\"copy stderr failed\")\n\t\t\twg.Done()\n\t\t}()\n\t}\n}\n\nfunc (s *shim) forwardAllSignals() chan os.Signal {\n\tsigc := make(chan os.Signal, sigChanSize)\n\t\/\/ handle all signals for the process.\n\tsignal.Notify(sigc)\n\tsignal.Ignore(syscall.SIGCHLD, syscall.SIGPIPE)\n\n\tgo func() {\n\t\tfor sig := range sigc {\n\t\t\tsysSig, ok := sig.(syscall.Signal)\n\t\t\tif !ok {\n\t\t\t\terr := errors.New(\"unknown signal\")\n\t\t\t\tlogger().WithError(err).WithField(\"signal\", sig.String()).Error(\"\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif fatalSignal(sysSig) {\n\t\t\t\tlogger().WithField(\"signal\", sig).Error(\"received fatal signal\")\n\t\t\t\tdie()\n\t\t\t}\n\n\t\t\tif sigIgnored[sysSig] {\n\t\t\t\t\/\/ignore these\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ forward this signal to container\n\t\t\t_, err := s.agent.SignalProcess(s.ctx, &pb.SignalProcessRequest{\n\t\t\t\tContainerId: s.containerID,\n\t\t\t\tExecId: s.execID,\n\t\t\t\tSignal: uint32(sysSig)})\n\t\t\tif err != nil {\n\t\t\t\tlogger().WithError(err).WithField(\"signal\", sig.String()).Error(\"forward signal failed\")\n\t\t\t}\n\t\t}\n\t}()\n\treturn sigc\n}\n\nfunc (s *shim) resizeTty(fromTty *os.File) error {\n\tws, err := term.GetWinsize(fromTty.Fd())\n\tif err != nil {\n\t\tlogger().WithError(err).Info(\"Error getting size\")\n\t\treturn nil\n\t}\n\n\t_, err = s.agent.TtyWinResize(s.ctx, &pb.TtyWinResizeRequest{\n\t\tContainerId: s.containerID,\n\t\tExecId: s.execID,\n\t\tRow: uint32(ws.Height),\n\t\tColumn: uint32(ws.Width)})\n\tif err != nil {\n\t\tlogger().WithError(err).Error(\"set winsize failed\")\n\t}\n\n\treturn err\n}\n\nfunc (s *shim) monitorTtySize(tty *os.File) {\n\ts.resizeTty(tty)\n\tsigchan := make(chan os.Signal, 1)\n\tsignal.Notify(sigchan, syscall.SIGWINCH)\n\tgo func() {\n\t\tfor range sigchan {\n\t\t\ts.resizeTty(tty)\n\t\t}\n\t}()\n}\n\nfunc (s *shim) wait() (int32, error) {\n\tresp, err := s.agent.WaitProcess(s.ctx, &pb.WaitProcessRequest{\n\t\tContainerId: s.containerID,\n\t\tExecId: s.execID})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn resp.Status, nil\n}\n<commit_msg>logging: Improve window resize messages<commit_after>\/\/ Copyright 2017 HyperHQ Inc.\n\/\/\n\/\/ SPDX-License-Identifier: Apache-2.0\n\/\/\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"github.com\/moby\/moby\/pkg\/term\"\n\tcontext \"golang.org\/x\/net\/context\"\n\n\tpb \"github.com\/kata-containers\/agent\/protocols\/grpc\"\n)\n\nconst sigChanSize = 2048\n\nvar sigIgnored = map[syscall.Signal]bool{\n\tsyscall.SIGCHLD: true,\n\tsyscall.SIGPIPE: true,\n\tsyscall.SIGWINCH: true,\n}\n\ntype shim struct {\n\tcontainerID string\n\texecID string\n\n\tctx context.Context\n\tagent *shimAgent\n}\n\nfunc newShim(addr, containerID, execID string) (*shim, error) {\n\tagent, err := newShimAgent(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &shim{containerID: containerID,\n\t\texecID: execID,\n\t\tctx: context.Background(),\n\t\tagent: agent}, nil\n}\n\nfunc (s *shim) proxyStdio(wg *sync.WaitGroup, terminal bool) {\n\t\/\/ don't wait the copying of the stdin, because `io.Copy(inPipe, os.Stdin)`\n\t\/\/ can't terminate when no input. todo: find a better way.\n\twg.Add(1)\n\tif !terminal {\n\t\t\/\/ In case it's not a terminal, we also need to get the output\n\t\t\/\/ from stderr.\n\t\twg.Add(1)\n\t}\n\n\tinPipe, outPipe, errPipe := shimStdioPipe(s.ctx, s.agent, s.containerID, s.execID)\n\tgo func() {\n\t\t_, err1 := io.Copy(inPipe, os.Stdin)\n\t\t_, err2 := s.agent.CloseStdin(s.ctx, &pb.CloseStdinRequest{\n\t\t\tContainerId: s.containerID,\n\t\t\tExecId: s.execID})\n\t\tif err1 != nil {\n\t\t\tlogger().WithError(err1).Warn(\"copy stdin failed\")\n\t\t}\n\t\tif err2 != nil {\n\t\t\tlogger().WithError(err2).Warn(\"close stdin failed\")\n\t\t}\n\t}()\n\n\tgo func() {\n\t\t_, err := io.Copy(os.Stdout, outPipe)\n\t\tlogger().WithError(err).Info(\"copy stdout failed\")\n\t\twg.Done()\n\t}()\n\n\tif !terminal {\n\t\tgo func() {\n\t\t\t_, err := io.Copy(os.Stderr, errPipe)\n\t\t\tlogger().WithError(err).Info(\"copy stderr failed\")\n\t\t\twg.Done()\n\t\t}()\n\t}\n}\n\nfunc (s *shim) forwardAllSignals() chan os.Signal {\n\tsigc := make(chan os.Signal, sigChanSize)\n\t\/\/ handle all signals for the process.\n\tsignal.Notify(sigc)\n\tsignal.Ignore(syscall.SIGCHLD, syscall.SIGPIPE)\n\n\tgo func() {\n\t\tfor sig := range sigc {\n\t\t\tsysSig, ok := sig.(syscall.Signal)\n\t\t\tif !ok {\n\t\t\t\terr := errors.New(\"unknown signal\")\n\t\t\t\tlogger().WithError(err).WithField(\"signal\", sig.String()).Error(\"\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif fatalSignal(sysSig) {\n\t\t\t\tlogger().WithField(\"signal\", sig).Error(\"received fatal signal\")\n\t\t\t\tdie()\n\t\t\t}\n\n\t\t\tif sigIgnored[sysSig] {\n\t\t\t\t\/\/ignore these\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ forward this signal to container\n\t\t\t_, err := s.agent.SignalProcess(s.ctx, &pb.SignalProcessRequest{\n\t\t\t\tContainerId: s.containerID,\n\t\t\t\tExecId: s.execID,\n\t\t\t\tSignal: uint32(sysSig)})\n\t\t\tif err != nil {\n\t\t\t\tlogger().WithError(err).WithField(\"signal\", sig.String()).Error(\"forward signal failed\")\n\t\t\t}\n\t\t}\n\t}()\n\treturn sigc\n}\n\nfunc (s *shim) resizeTty(fromTty *os.File) error {\n\tws, err := term.GetWinsize(fromTty.Fd())\n\tif err != nil {\n\t\tlogger().WithError(err).Info(\"Error getting window size\")\n\t\treturn nil\n\t}\n\n\t_, err = s.agent.TtyWinResize(s.ctx, &pb.TtyWinResizeRequest{\n\t\tContainerId: s.containerID,\n\t\tExecId: s.execID,\n\t\tRow: uint32(ws.Height),\n\t\tColumn: uint32(ws.Width)})\n\tif err != nil {\n\t\tlogger().WithError(err).Error(\"set window size failed\")\n\t}\n\n\treturn err\n}\n\nfunc (s *shim) monitorTtySize(tty *os.File) {\n\ts.resizeTty(tty)\n\tsigchan := make(chan os.Signal, 1)\n\tsignal.Notify(sigchan, syscall.SIGWINCH)\n\tgo func() {\n\t\tfor range sigchan {\n\t\t\ts.resizeTty(tty)\n\t\t}\n\t}()\n}\n\nfunc (s *shim) wait() (int32, error) {\n\tresp, err := s.agent.WaitProcess(s.ctx, &pb.WaitProcessRequest{\n\t\tContainerId: s.containerID,\n\t\tExecId: s.execID})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn resp.Status, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package vmwarevsphere\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/rancher\/machine\/libmachine\/log\"\n\t\"github.com\/vmware\/govmomi\/find\"\n\t\"github.com\/vmware\/govmomi\/object\"\n\t\"github.com\/vmware\/govmomi\/vapi\/library\"\n\tvapifinder \"github.com\/vmware\/govmomi\/vapi\/library\/finder\"\n\t\"github.com\/vmware\/govmomi\/vapi\/vcenter\"\n\t\"github.com\/vmware\/govmomi\/vim25\/soap\"\n\t\"github.com\/vmware\/govmomi\/vim25\/types\"\n)\n\nfunc (d *Driver) preCreate() error {\n\tc, err := d.getSoapClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.finder = find.NewFinder(c.Client, true)\n\n\td.datacenter, err = d.finder.DatacenterOrDefault(d.getCtx(), d.Datacenter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.finder.SetDatacenter(d.datacenter)\n\tfor _, netName := range d.Networks {\n\t\tnet, err := d.finder.NetworkOrDefault(d.getCtx(), netName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.networks[netName] = net\n\t}\n\n\tif d.HostSystem != \"\" {\n\t\tvar err error\n\t\td.hostsystem, err = d.finder.HostSystemOrDefault(d.getCtx(), d.HostSystem)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif d.Pool != \"\" {\n\t\t\/\/ Find specified Resource Pool\n\t\td.resourcepool, err = d.finder.ResourcePool(d.getCtx(), d.Pool)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if d.HostSystem != \"\" {\n\t\t\/\/ Pick default Resource Pool for Host System\n\t\td.resourcepool, err = d.hostsystem.ResourcePool(d.getCtx())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t\/\/ Pick the default Resource Pool for the Datacenter.\n\t\td.resourcepool, err = d.finder.DefaultResourcePool(d.getCtx())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (d *Driver) postCreate(vm *object.VirtualMachine) error {\n\tif err := d.addConfigParams(vm); err != nil {\n\t\treturn err\n\t}\n\n\tif err := d.cloudInit(vm); err != nil {\n\t\treturn err\n\t}\n\n\tif err := d.addTags(vm); err != nil {\n\t\treturn err\n\t}\n\n\tif err := d.addCustomAttributes(vm); err != nil {\n\t\treturn err\n\t}\n\n\treturn d.Start()\n}\n\nfunc (d *Driver) createLegacy() error {\n\tc, err := d.getSoapClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tspec := types.VirtualMachineConfigSpec{\n\t\tName: d.MachineName,\n\t\tGuestId: \"otherLinux64Guest\",\n\t\tNumCPUs: int32(d.CPU),\n\t\tMemoryMB: int64(d.Memory),\n\t}\n\n\tscsi, err := object.SCSIControllerTypes().CreateSCSIController(\"pvscsi\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tspec.DeviceChange = append(spec.DeviceChange, &types.VirtualDeviceConfigSpec{\n\t\tOperation: types.VirtualDeviceConfigSpecOperationAdd,\n\t\tDevice: scsi,\n\t})\n\n\tif d.VAppTransport == \"com.vmware.guestInfo\" ||\n\t\td.VAppTransport == \"iso\" {\n\n\t\tvApp := types.VmConfigSpec{\n\t\t\tOvfEnvironmentTransport: []string{d.VAppTransport},\n\t\t}\n\n\t\tif d.VAppIpAllocationPolicy == \"dhcp\" ||\n\t\t\td.VAppIpAllocationPolicy == \"fixed\" ||\n\t\t\td.VAppIpAllocationPolicy == \"transient\" ||\n\t\t\td.VAppIpAllocationPolicy == \"fixedAllocated\" {\n\n\t\t\tif d.VAppIpProtocol != \"IPv4\" &&\n\t\t\t\td.VAppIpProtocol != \"IPv6\" {\n\t\t\t\td.VAppIpProtocol = \"IPv4\"\n\t\t\t}\n\n\t\t\tsupportedAllocationScheme := \"ovfenv\"\n\t\t\tif d.VAppIpAllocationPolicy == \"dhcp\" {\n\t\t\t\tsupportedAllocationScheme = \"dhcp\"\n\t\t\t}\n\n\t\t\tvApp.IpAssignment = &types.VAppIPAssignmentInfo{\n\t\t\t\tSupportedIpProtocol: []string{d.VAppIpProtocol},\n\t\t\t\tSupportedAllocationScheme: []string{supportedAllocationScheme},\n\t\t\t\tIpProtocol: d.VAppIpProtocol,\n\t\t\t\tIpAllocationPolicy: d.VAppIpAllocationPolicy + \"Policy\",\n\t\t\t}\n\t\t}\n\n\t\tfor i, prop := range d.VAppProperties {\n\t\t\tv := strings.SplitN(prop, \"=\", 2)\n\t\t\tkey := v[0]\n\t\t\ttyp := \"string\"\n\t\t\tvalue := \"\"\n\t\t\tif len(v) > 1 {\n\t\t\t\tvalue = v[1]\n\t\t\t}\n\t\t\tif strings.HasPrefix(value, \"ip:\") {\n\t\t\t\ttyp = value\n\t\t\t\tvalue = \"\"\n\t\t\t} else if strings.HasPrefix(value, \"${\") &&\n\t\t\t\tstrings.HasSuffix(value, \"}\") {\n\t\t\t\ttyp = \"expression\"\n\t\t\t}\n\t\t\tvApp.Property = append(vApp.Property, types.VAppPropertySpec{\n\t\t\t\tArrayUpdateSpec: types.ArrayUpdateSpec{\n\t\t\t\t\tOperation: types.ArrayUpdateOperationAdd,\n\t\t\t\t},\n\t\t\t\tInfo: &types.VAppPropertyInfo{\n\t\t\t\t\tKey: int32(i),\n\t\t\t\t\tId: key,\n\t\t\t\t\tType: typ,\n\t\t\t\t\tDefaultValue: value,\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t\tspec.VAppConfig = &vApp\n\t}\n\n\tfolder, err := d.findFolder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tds, err := d.getDatastore(&spec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tspec.Files = &types.VirtualMachineFileInfo{\n\t\tVmPathName: fmt.Sprintf(\"[%s]\", ds.Name()),\n\t}\n\n\ttask, err := folder.CreateVM(d.getCtx(), spec, d.resourcepool, d.hostsystem)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinfo, err := task.WaitForResult(d.getCtx(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Infof(\"Uploading Boot2docker ISO ...\")\n\tvm := object.NewVirtualMachine(c.Client, info.Result.(types.ManagedObjectReference))\n\tvmPath, err := d.getVmFolder(vm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdsurl := ds.NewURL(filepath.Join(vmPath, isoFilename))\n\tp := soap.DefaultUpload\n\tif err = c.Client.UploadFile(d.getCtx(), d.ISO, dsurl, &p); err != nil {\n\t\treturn err\n\t}\n\n\tdevices, err := vm.Device(d.getCtx())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar add []types.BaseVirtualDevice\n\tcontroller, err := devices.FindDiskController(\"scsi\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdisk := devices.CreateDisk(controller, ds.Reference(),\n\t\tds.Path(fmt.Sprintf(\"%s\/%s.vmdk\", vmPath, d.MachineName)))\n\n\t\/\/ Convert MB to KB\n\tdisk.CapacityInKB = int64(d.DiskSize) * 1024\n\tadd = append(add, disk)\n\tide, err := devices.FindIDEController(\"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcdrom, err := devices.CreateCdrom(ide)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tadd = append(add, devices.InsertIso(cdrom, ds.Path(fmt.Sprintf(\"%s\/%s\", vmPath, isoFilename))))\n\tif err := vm.AddDevice(d.getCtx(), add...); err != nil {\n\t\treturn err\n\t}\n\n\tif err := d.addNetworks(vm, d.networks); err != nil {\n\t\treturn err\n\t}\n\n\terr = d.postCreate(vm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := d.provisionVm(vm); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (d *Driver) createFromVmName() error {\n\tc, err := d.getSoapClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar info *types.TaskInfo\n\tpoolref := d.resourcepool.Reference()\n\thostref := d.hostsystem.Reference()\n\n\tspec := types.VirtualMachineCloneSpec{\n\t\tLocation: types.VirtualMachineRelocateSpec{\n\t\t\tPool: &poolref,\n\t\t\tHost: &hostref,\n\t\t},\n\t\tConfig: &types.VirtualMachineConfigSpec{\n\t\t\tGuestId: \"otherLinux64Guest\",\n\t\t\tNumCPUs: int32(d.CPU),\n\t\t\tMemoryMB: int64(d.Memory),\n\t\t},\n\t}\n\n\tds, err := d.getDatastore(spec.Config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tspec.Config.Files = &types.VirtualMachineFileInfo{\n\t\tVmPathName: fmt.Sprintf(\"[%s]\", ds.Name()),\n\t}\n\n\tdsref := ds.Reference()\n\tspec.Location.Datastore = &dsref\n\n\tvm2Clone, err := d.fetchVM(d.CloneFrom)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfolder, err := d.findFolder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttask, err := vm2Clone.Clone(d.getCtx(), folder, d.MachineName, spec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinfo, err = task.WaitForResult(d.getCtx(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Retrieve the new VM\n\tvm := object.NewVirtualMachine(c.Client, info.Result.(types.ManagedObjectReference))\n\tif err := d.addNetworks(vm, d.networks); err != nil {\n\t\treturn err\n\t}\n\n\tif err := d.resizeDisk(vm); err != nil {\n\t\treturn err\n\t}\n\n\treturn d.postCreate(vm)\n}\n\nfunc (d *Driver) createFromLibraryName() error {\n\tc, err := d.getSoapClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfolders, err := d.datacenter.Folders(d.getCtx())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlibManager := library.NewManager(d.getRestLogin(c.Client))\n\tif err := libManager.Login(d.getCtx(), d.getUserInfo()); err != nil {\n\t\treturn err\n\t}\n\n\tquery := fmt.Sprintf(\"\/%s\/%s\", d.ContentLibrary, d.CloneFrom)\n\tresults, err := vapifinder.NewFinder(libManager).Find(d.getCtx(), query)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(results) < 1 {\n\t\treturn fmt.Errorf(\"No results found in content library: %s\", d.CloneFrom)\n\t}\n\n\tif len(results) > 1 {\n\t\treturn fmt.Errorf(\"More than one result returned from finder query: %s\", d.CloneFrom)\n\t}\n\n\titem, ok := results[0].GetResult().(library.Item)\n\tif !ok {\n\t\treturn fmt.Errorf(\"Content Library item is not a template: %q is a %T\", d.CloneFrom, item)\n\t}\n\n\tvar nets []vcenter.NetworkMapping\n\tfor k, n := range d.networks {\n\t\tnets = append(nets, vcenter.NetworkMapping{\n\t\t\tKey: k,\n\t\t\tValue: n.Reference().Value,\n\t\t})\n\t}\n\n\thostId := \"\"\n\tif d.hostsystem != nil {\n\t\thostId = d.hostsystem.Reference().Value\n\t}\n\n\tds, err := d.getDatastore(&types.VirtualMachineConfigSpec{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdeploy := vcenter.Deploy{\n\t\tDeploymentSpec: vcenter.DeploymentSpec{\n\t\t\tName: d.MachineName,\n\t\t\tDefaultDatastoreID: ds.Reference().Value,\n\t\t\tAcceptAllEULA: true,\n\t\t\tNetworkMappings: nets,\n\t\t\tStorageProvisioning: \"thin\",\n\t\t},\n\t\tTarget: vcenter.Target{\n\t\t\tResourcePoolID: d.resourcepool.Reference().Value,\n\t\t\tHostID: hostId,\n\t\t\tFolderID: folders.VmFolder.Reference().Value,\n\t\t},\n\t}\n\n\tm := vcenter.NewManager(libManager.Client)\n\n\tref, err := m.DeployLibraryItem(d.getCtx(), item.ID, deploy)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tobj, err := d.finder.ObjectReference(d.getCtx(), *ref)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvm := obj.(*object.VirtualMachine)\n\tif err := d.resizeDisk(vm); err != nil {\n\t\treturn err\n\t}\n\n\treturn d.postCreate(vm)\n}\n<commit_msg>safer pointer checks on host\/pool<commit_after>package vmwarevsphere\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/rancher\/machine\/libmachine\/log\"\n\t\"github.com\/vmware\/govmomi\/find\"\n\t\"github.com\/vmware\/govmomi\/object\"\n\t\"github.com\/vmware\/govmomi\/vapi\/library\"\n\tvapifinder \"github.com\/vmware\/govmomi\/vapi\/library\/finder\"\n\t\"github.com\/vmware\/govmomi\/vapi\/vcenter\"\n\t\"github.com\/vmware\/govmomi\/vim25\/soap\"\n\t\"github.com\/vmware\/govmomi\/vim25\/types\"\n)\n\nfunc (d *Driver) preCreate() error {\n\tc, err := d.getSoapClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.finder = find.NewFinder(c.Client, true)\n\n\td.datacenter, err = d.finder.DatacenterOrDefault(d.getCtx(), d.Datacenter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.finder.SetDatacenter(d.datacenter)\n\tfor _, netName := range d.Networks {\n\t\tnet, err := d.finder.NetworkOrDefault(d.getCtx(), netName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.networks[netName] = net\n\t}\n\n\tif d.HostSystem != \"\" {\n\t\tvar err error\n\t\td.hostsystem, err = d.finder.HostSystemOrDefault(d.getCtx(), d.HostSystem)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif d.Pool != \"\" {\n\t\t\/\/ Find specified Resource Pool\n\t\td.resourcepool, err = d.finder.ResourcePool(d.getCtx(), d.Pool)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if d.HostSystem != \"\" {\n\t\t\/\/ Pick default Resource Pool for Host System\n\t\td.resourcepool, err = d.hostsystem.ResourcePool(d.getCtx())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t\/\/ Pick the default Resource Pool for the Datacenter.\n\t\td.resourcepool, err = d.finder.DefaultResourcePool(d.getCtx())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (d *Driver) postCreate(vm *object.VirtualMachine) error {\n\tif err := d.addConfigParams(vm); err != nil {\n\t\treturn err\n\t}\n\n\tif err := d.cloudInit(vm); err != nil {\n\t\treturn err\n\t}\n\n\tif err := d.addTags(vm); err != nil {\n\t\treturn err\n\t}\n\n\tif err := d.addCustomAttributes(vm); err != nil {\n\t\treturn err\n\t}\n\n\treturn d.Start()\n}\n\nfunc (d *Driver) createLegacy() error {\n\tc, err := d.getSoapClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tspec := types.VirtualMachineConfigSpec{\n\t\tName: d.MachineName,\n\t\tGuestId: \"otherLinux64Guest\",\n\t\tNumCPUs: int32(d.CPU),\n\t\tMemoryMB: int64(d.Memory),\n\t}\n\n\tscsi, err := object.SCSIControllerTypes().CreateSCSIController(\"pvscsi\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tspec.DeviceChange = append(spec.DeviceChange, &types.VirtualDeviceConfigSpec{\n\t\tOperation: types.VirtualDeviceConfigSpecOperationAdd,\n\t\tDevice: scsi,\n\t})\n\n\tif d.VAppTransport == \"com.vmware.guestInfo\" ||\n\t\td.VAppTransport == \"iso\" {\n\n\t\tvApp := types.VmConfigSpec{\n\t\t\tOvfEnvironmentTransport: []string{d.VAppTransport},\n\t\t}\n\n\t\tif d.VAppIpAllocationPolicy == \"dhcp\" ||\n\t\t\td.VAppIpAllocationPolicy == \"fixed\" ||\n\t\t\td.VAppIpAllocationPolicy == \"transient\" ||\n\t\t\td.VAppIpAllocationPolicy == \"fixedAllocated\" {\n\n\t\t\tif d.VAppIpProtocol != \"IPv4\" &&\n\t\t\t\td.VAppIpProtocol != \"IPv6\" {\n\t\t\t\td.VAppIpProtocol = \"IPv4\"\n\t\t\t}\n\n\t\t\tsupportedAllocationScheme := \"ovfenv\"\n\t\t\tif d.VAppIpAllocationPolicy == \"dhcp\" {\n\t\t\t\tsupportedAllocationScheme = \"dhcp\"\n\t\t\t}\n\n\t\t\tvApp.IpAssignment = &types.VAppIPAssignmentInfo{\n\t\t\t\tSupportedIpProtocol: []string{d.VAppIpProtocol},\n\t\t\t\tSupportedAllocationScheme: []string{supportedAllocationScheme},\n\t\t\t\tIpProtocol: d.VAppIpProtocol,\n\t\t\t\tIpAllocationPolicy: d.VAppIpAllocationPolicy + \"Policy\",\n\t\t\t}\n\t\t}\n\n\t\tfor i, prop := range d.VAppProperties {\n\t\t\tv := strings.SplitN(prop, \"=\", 2)\n\t\t\tkey := v[0]\n\t\t\ttyp := \"string\"\n\t\t\tvalue := \"\"\n\t\t\tif len(v) > 1 {\n\t\t\t\tvalue = v[1]\n\t\t\t}\n\t\t\tif strings.HasPrefix(value, \"ip:\") {\n\t\t\t\ttyp = value\n\t\t\t\tvalue = \"\"\n\t\t\t} else if strings.HasPrefix(value, \"${\") &&\n\t\t\t\tstrings.HasSuffix(value, \"}\") {\n\t\t\t\ttyp = \"expression\"\n\t\t\t}\n\t\t\tvApp.Property = append(vApp.Property, types.VAppPropertySpec{\n\t\t\t\tArrayUpdateSpec: types.ArrayUpdateSpec{\n\t\t\t\t\tOperation: types.ArrayUpdateOperationAdd,\n\t\t\t\t},\n\t\t\t\tInfo: &types.VAppPropertyInfo{\n\t\t\t\t\tKey: int32(i),\n\t\t\t\t\tId: key,\n\t\t\t\t\tType: typ,\n\t\t\t\t\tDefaultValue: value,\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t\tspec.VAppConfig = &vApp\n\t}\n\n\tfolder, err := d.findFolder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tds, err := d.getDatastore(&spec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tspec.Files = &types.VirtualMachineFileInfo{\n\t\tVmPathName: fmt.Sprintf(\"[%s]\", ds.Name()),\n\t}\n\n\ttask, err := folder.CreateVM(d.getCtx(), spec, d.resourcepool, d.hostsystem)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinfo, err := task.WaitForResult(d.getCtx(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Infof(\"Uploading Boot2docker ISO ...\")\n\tvm := object.NewVirtualMachine(c.Client, info.Result.(types.ManagedObjectReference))\n\tvmPath, err := d.getVmFolder(vm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdsurl := ds.NewURL(filepath.Join(vmPath, isoFilename))\n\tp := soap.DefaultUpload\n\tif err = c.Client.UploadFile(d.getCtx(), d.ISO, dsurl, &p); err != nil {\n\t\treturn err\n\t}\n\n\tdevices, err := vm.Device(d.getCtx())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar add []types.BaseVirtualDevice\n\tcontroller, err := devices.FindDiskController(\"scsi\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdisk := devices.CreateDisk(controller, ds.Reference(),\n\t\tds.Path(fmt.Sprintf(\"%s\/%s.vmdk\", vmPath, d.MachineName)))\n\n\t\/\/ Convert MB to KB\n\tdisk.CapacityInKB = int64(d.DiskSize) * 1024\n\tadd = append(add, disk)\n\tide, err := devices.FindIDEController(\"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcdrom, err := devices.CreateCdrom(ide)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tadd = append(add, devices.InsertIso(cdrom, ds.Path(fmt.Sprintf(\"%s\/%s\", vmPath, isoFilename))))\n\tif err := vm.AddDevice(d.getCtx(), add...); err != nil {\n\t\treturn err\n\t}\n\n\tif err := d.addNetworks(vm, d.networks); err != nil {\n\t\treturn err\n\t}\n\n\terr = d.postCreate(vm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := d.provisionVm(vm); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (d *Driver) createFromVmName() error {\n\tc, err := d.getSoapClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar info *types.TaskInfo\n\tvar loc types.VirtualMachineRelocateSpec\n\n\tif d.resourcepool != nil {\n\t\tpool := d.resourcepool.Reference()\n\t\tloc.Pool = &pool\n\t}\n\n\tif d.hostsystem != nil {\n\t\thost := d.hostsystem.Reference()\n\t\tloc.Host = &host\n\t}\n\n\tspec := types.VirtualMachineCloneSpec{\n\t\tLocation: loc,\n\t\tConfig: &types.VirtualMachineConfigSpec{\n\t\t\tGuestId: \"otherLinux64Guest\",\n\t\t\tNumCPUs: int32(d.CPU),\n\t\t\tMemoryMB: int64(d.Memory),\n\t\t},\n\t}\n\n\tds, err := d.getDatastore(spec.Config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tspec.Config.Files = &types.VirtualMachineFileInfo{\n\t\tVmPathName: fmt.Sprintf(\"[%s]\", ds.Name()),\n\t}\n\n\tdsref := ds.Reference()\n\tspec.Location.Datastore = &dsref\n\n\tvm2Clone, err := d.fetchVM(d.CloneFrom)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfolder, err := d.findFolder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttask, err := vm2Clone.Clone(d.getCtx(), folder, d.MachineName, spec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinfo, err = task.WaitForResult(d.getCtx(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Retrieve the new VM\n\tvm := object.NewVirtualMachine(c.Client, info.Result.(types.ManagedObjectReference))\n\tif err := d.addNetworks(vm, d.networks); err != nil {\n\t\treturn err\n\t}\n\n\tif err := d.resizeDisk(vm); err != nil {\n\t\treturn err\n\t}\n\n\treturn d.postCreate(vm)\n}\n\nfunc (d *Driver) createFromLibraryName() error {\n\tc, err := d.getSoapClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfolders, err := d.datacenter.Folders(d.getCtx())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlibManager := library.NewManager(d.getRestLogin(c.Client))\n\tif err := libManager.Login(d.getCtx(), d.getUserInfo()); err != nil {\n\t\treturn err\n\t}\n\n\tquery := fmt.Sprintf(\"\/%s\/%s\", d.ContentLibrary, d.CloneFrom)\n\tresults, err := vapifinder.NewFinder(libManager).Find(d.getCtx(), query)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(results) < 1 {\n\t\treturn fmt.Errorf(\"No results found in content library: %s\", d.CloneFrom)\n\t}\n\n\tif len(results) > 1 {\n\t\treturn fmt.Errorf(\"More than one result returned from finder query: %s\", d.CloneFrom)\n\t}\n\n\titem, ok := results[0].GetResult().(library.Item)\n\tif !ok {\n\t\treturn fmt.Errorf(\"Content Library item is not a template: %q is a %T\", d.CloneFrom, item)\n\t}\n\n\tvar nets []vcenter.NetworkMapping\n\tfor k, n := range d.networks {\n\t\tnets = append(nets, vcenter.NetworkMapping{\n\t\t\tKey: k,\n\t\t\tValue: n.Reference().Value,\n\t\t})\n\t}\n\n\thostId := \"\"\n\tif d.hostsystem != nil {\n\t\thostId = d.hostsystem.Reference().Value\n\t}\n\n\tds, err := d.getDatastore(&types.VirtualMachineConfigSpec{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdeploy := vcenter.Deploy{\n\t\tDeploymentSpec: vcenter.DeploymentSpec{\n\t\t\tName: d.MachineName,\n\t\t\tDefaultDatastoreID: ds.Reference().Value,\n\t\t\tAcceptAllEULA: true,\n\t\t\tNetworkMappings: nets,\n\t\t\tStorageProvisioning: \"thin\",\n\t\t},\n\t\tTarget: vcenter.Target{\n\t\t\tResourcePoolID: d.resourcepool.Reference().Value,\n\t\t\tHostID: hostId,\n\t\t\tFolderID: folders.VmFolder.Reference().Value,\n\t\t},\n\t}\n\n\tm := vcenter.NewManager(libManager.Client)\n\n\tref, err := m.DeployLibraryItem(d.getCtx(), item.ID, deploy)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tobj, err := d.finder.ObjectReference(d.getCtx(), *ref)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvm := obj.(*object.VirtualMachine)\n\tif err := d.resizeDisk(vm); err != nil {\n\t\treturn err\n\t}\n\n\treturn d.postCreate(vm)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package gcfg reads \"gitconfig-like\" text-based configuration files with\n\/\/ \"name=value\" pairs grouped into sections (gcfg files).\n\/\/ Support for writing gcfg files may be added later.\n\/\/\n\/\/ See ReadInto and the examples to get an idea of how to use it.\n\/\/\n\/\/ This package is still a work in progress, and both the supported syntax and\n\/\/ the API is subject to change.\n\/\/\n\/\/ The syntax is based on that used by git config:\n\/\/ http:\/\/git-scm.com\/docs\/git-config#_syntax .\n\/\/ There are some (planned) differences compared to the git config format:\n\/\/ - improve data portability:\n\/\/ - must be encoded in UTF-8 (for now) and must not contain the 0 byte\n\/\/ - include and \"path\" type is not supported\n\/\/ (path type may be implementable as a user-defined type)\n\/\/ - disallow potentially ambiguous or misleading definitions:\n\/\/ - `[sec.sub]` format is not allowed (deprecated in gitconfig)\n\/\/ - `[sec \"\"]` is not allowed\n\/\/ - use `[sec]` for section name \"sec\" and empty subsection name\n\/\/ - (planned) within a single file, definitions must be contiguous for each:\n\/\/ - section: '[secA]' -> '[secB]' -> '[secA]' is an error\n\/\/ - subsection: '[sec \"A\"]' -> '[sec \"B\"]' -> '[sec \"A\"]' is an error\n\/\/ - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error\n\/\/\n\/\/ The package may be usable for handling some of the various \"INI file\" formats\n\/\/ used by some programs and libraries, but achieving or maintaining\n\/\/ compatibility with any of those is not a primary concern.\n\/\/\n\/\/ TODO:\n\/\/ - format\n\/\/ - define valid section and variable names\n\/\/ - reconsider valid escape sequences\n\/\/ (gitconfig doesn't support \\r in value, \\t in subsection name, etc.)\n\/\/ - define handling of default value\n\/\/ - complete syntax documentation\n\/\/ - reading\n\/\/ - define internal representation structure\n\/\/ - support multi-value variables\n\/\/ - support multiple inputs (readers, strings, files)\n\/\/ - support declaring encoding (?)\n\/\/ - support pointer fields\n\/\/ - support varying fields sets for subsections (?)\n\/\/ - scanEnum\n\/\/ - should use longest match (?)\n\/\/ - support matching on unique prefix (?)\n\/\/ - writing gcfg files\n\/\/ - error handling\n\/\/ - make error context accessible programmatically?\n\/\/ - limit input size?\n\/\/ - move TODOs to issue tracker (eventually)\n\/\/\npackage gcfg\n<commit_msg>add and clarify TODOs<commit_after>\/\/ Package gcfg reads \"gitconfig-like\" text-based configuration files with\n\/\/ \"name=value\" pairs grouped into sections (gcfg files).\n\/\/ Support for writing gcfg files may be added later.\n\/\/\n\/\/ See ReadInto and the examples to get an idea of how to use it.\n\/\/\n\/\/ This package is still a work in progress, and both the supported syntax and\n\/\/ the API is subject to change.\n\/\/\n\/\/ The syntax is based on that used by git config:\n\/\/ http:\/\/git-scm.com\/docs\/git-config#_syntax .\n\/\/ There are some (planned) differences compared to the git config format:\n\/\/ - improve data portability:\n\/\/ - must be encoded in UTF-8 (for now) and must not contain the 0 byte\n\/\/ - include and \"path\" type is not supported\n\/\/ (path type may be implementable as a user-defined type)\n\/\/ - disallow potentially ambiguous or misleading definitions:\n\/\/ - `[sec.sub]` format is not allowed (deprecated in gitconfig)\n\/\/ - `[sec \"\"]` is not allowed\n\/\/ - use `[sec]` for section name \"sec\" and empty subsection name\n\/\/ - (planned) within a single file, definitions must be contiguous for each:\n\/\/ - section: '[secA]' -> '[secB]' -> '[secA]' is an error\n\/\/ - subsection: '[sec \"A\"]' -> '[sec \"B\"]' -> '[sec \"A\"]' is an error\n\/\/ - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error\n\/\/\n\/\/ The package may be usable for handling some of the various \"INI file\" formats\n\/\/ used by some programs and libraries, but achieving or maintaining\n\/\/ compatibility with any of those is not a primary concern.\n\/\/\n\/\/ TODO:\n\/\/ - format\n\/\/ - define valid section and variable names\n\/\/ - reconsider valid escape sequences\n\/\/ (gitconfig doesn't support \\r in value, \\t in subsection name, etc.)\n\/\/ - define handling of \"implicit value\" for types other than bool\n\/\/ - consider handling of numeric values (decimal only by default?)\n\/\/ - complete syntax documentation\n\/\/ - reading\n\/\/ - define internal representation structure\n\/\/ - support multi-value variables\n\/\/ - support multiple inputs (readers, strings, files)\n\/\/ - support declaring encoding (?)\n\/\/ - support automatic derefereining of pointer fields (?)\n\/\/ - support varying fields sets for subsections (?)\n\/\/ - scanEnum\n\/\/ - should use longest match (?)\n\/\/ - support matching on unique prefix (?)\n\/\/ - writing gcfg files\n\/\/ - error handling\n\/\/ - make error context accessible programmatically?\n\/\/ - limit input size?\n\/\/ - move TODOs to issue tracker (eventually)\n\/\/\npackage gcfg\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/kr\/pretty\"\n)\n\ntype testCase struct {\n\txsd string\n\txml xmlTree\n\tgosrc string\n}\n\nvar (\n\ttests = []struct {\n\t\texported bool\n\t\tprefix string\n\t\txsd string\n\t\txml xmlTree\n\t\tgosrc string\n\t}{\n\n\t\t{\n\t\t\texported: false,\n\t\t\tprefix: \"\",\n\t\t\txsd: `<schema>\n\t<element name=\"titleList\" type=\"titleListType\">\n\t<\/element>\n\t<complexType name=\"titleListType\">\n\t\t<sequence>\n\t\t\t<element name=\"title\" type=\"originalTitleType\" maxOccurs=\"unbounded\" \/>\n\t\t<\/sequence>\n\t<\/complexType>\n\t<complexType name=\"originalTitleType\">\n\t\t<simpleContent>\n\t\t\t<extension base=\"titleType\">\n\t\t\t\t<attribute name=\"original\" type=\"boolean\">\n\t\t\t\t<\/attribute>\n\t\t\t<\/extension>\n\t\t<\/simpleContent>\n\t<\/complexType>\n\t<complexType name=\"titleType\">\n\t\t<simpleContent>\n\t\t\t<restriction base=\"textType\">\n\t\t\t\t<maxLength value=\"300\" \/>\n\t\t\t<\/restriction>\n\t\t<\/simpleContent>\n\t<\/complexType>\n\t<complexType name=\"textType\">\n\t\t<simpleContent>\n\t\t\t<extension base=\"string\">\n\t\t\t\t<attribute name=\"language\" type=\"language\">\n\t\t\t\t<\/attribute>\n\t\t\t<\/extension>\n\t\t<\/simpleContent>\n\t<\/complexType>\n<\/schema>`,\n\t\t\txml: xmlTree{\n\t\t\t\tName: \"titleList\",\n\t\t\t\tType: \"titleList\",\n\t\t\t\tChildren: []*xmlTree{\n\t\t\t\t\t&xmlTree{\n\t\t\t\t\t\tName: \"title\",\n\t\t\t\t\t\tType: \"string\",\n\t\t\t\t\t\tCdata: true,\n\t\t\t\t\t\tList: true,\n\t\t\t\t\t\tAttribs: []xmlAttrib{\n\t\t\t\t\t\t\t{Name: \"language\", Type: \"string\"},\n\t\t\t\t\t\t\t{Name: \"original\", Type: \"bool\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tgosrc: `\ntype titleList struct {\n\tTitle []title ` + \"`xml:\\\"title\\\"`\" + `\n}\n\ntype title struct {\n\tLanguage string ` + \"`xml:\\\"language,attr\\\"`\" + `\n\tOriginal bool ` + \"`xml:\\\"original,attr\\\"`\" + `\n\tTitle string ` + \"`xml:\\\",chardata\\\"`\" + `\n}\n\n\t\t\t\t`,\n\t\t},\n\n\t\t{\n\t\t\texported: false,\n\t\t\tprefix: \"\",\n\t\t\txsd: `<schema>\n\t<element name=\"tagList\">\n\t\t<complexType>\n\t\t\t<sequence>\n\t\t\t\t<element name=\"tag\" type=\"tagReferenceType\" minOccurs=\"0\" maxOccurs=\"unbounded\" \/>\n\t\t\t<\/sequence>\n\t\t<\/complexType>\n\t<\/element>\n\t<complexType name=\"tagReferenceType\">\n\t\t<simpleContent>\n\t\t\t<extension base=\"nidType\">\n\t\t\t\t<attribute name=\"type\" type=\"tagTypeType\" use=\"required\" \/>\n\t\t\t<\/extension>\n\t\t<\/simpleContent>\n\t<\/complexType>\n\t<simpleType name=\"nidType\">\n\t\t<restriction base=\"string\">\n\t\t\t<pattern value=\"[0-9a-zA-Z\\-]+\" \/>\n\t\t<\/restriction>\n\t<\/simpleType>\n\t<simpleType name=\"tagTypeType\">\n\t\t<restriction base=\"string\">\n\t\t<\/restriction>\n\t<\/simpleType>\n<\/schema>`,\n\t\t\txml: xmlTree{\n\t\t\t\tName: \"tagList\",\n\t\t\t\tType: \"tagList\",\n\t\t\t\tChildren: []*xmlTree{\n\t\t\t\t\t&xmlTree{\n\t\t\t\t\t\tName: \"tag\",\n\t\t\t\t\t\tType: \"string\",\n\t\t\t\t\t\tList: true,\n\t\t\t\t\t\tCdata: true,\n\t\t\t\t\t\tAttribs: []xmlAttrib{\n\t\t\t\t\t\t\t{Name: \"type\", Type: \"string\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tgosrc: `\ntype tagList struct {\n\tTag []tag ` + \"`xml:\\\"tag\\\"`\" + `\n}\n\ntype tag struct {\n\tType string ` + \"`xml:\\\"type,attr\\\"`\" + `\n\tTag string ` + \"`xml:\\\",chardata\\\"`\" + `\n}\n\t\t\t`,\n\t\t},\n\n\t\t{\n\t\t\texported: false,\n\t\t\tprefix: \"\",\n\t\t\txsd: `<schema>\n\t\t\t\t<element name=\"tagId\" type=\"tagReferenceType\" \/>\n\t<complexType name=\"tagReferenceType\">\n\t\t<simpleContent>\n\t\t\t<extension base=\"string\">\n\t\t\t\t<attribute name=\"type\" type=\"string\" use=\"required\" \/>\n\t\t\t<\/extension>\n\t\t<\/simpleContent>\n\t<\/complexType>\n<\/schema>`,\n\t\t\txml: xmlTree{\n\t\t\t\tName: \"tagId\",\n\t\t\t\tType: \"string\",\n\t\t\t\tList: false,\n\t\t\t\tCdata: true,\n\t\t\t\tAttribs: []xmlAttrib{\n\t\t\t\t\t{Name: \"type\", Type: \"string\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tgosrc: `\ntype tagID struct {\n\tType string ` + \"`xml:\\\"type,attr\\\"`\" + `\n\tTagID string ` + \"`xml:\\\",chardata\\\"`\" + `\n}\n\t\t\t`,\n\t\t},\n\n\t\t{\n\t\t\texported: true,\n\t\t\tprefix: \"xxx\",\n\t\t\txsd: `<schema>\n\t<element name=\"url\" type=\"tagReferenceType\" \/>\n\t<complexType name=\"tagReferenceType\">\n\t\t<simpleContent>\n\t\t\t<extension base=\"string\">\n\t\t\t\t<attribute name=\"type\" type=\"string\" use=\"required\" \/>\n\t\t\t<\/extension>\n\t\t<\/simpleContent>\n\t<\/complexType>\n<\/schema>`,\n\t\t\txml: xmlTree{\n\t\t\t\tName: \"url\",\n\t\t\t\tType: \"string\",\n\t\t\t\tList: false,\n\t\t\t\tCdata: true,\n\t\t\t\tAttribs: []xmlAttrib{\n\t\t\t\t\t{Name: \"type\", Type: \"string\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tgosrc: `\ntype XxxURL struct {\n\tType string ` + \"`xml:\\\"type,attr\\\"`\" + `\n\tURL string ` + \"`xml:\\\",chardata\\\"`\" + `\n}\n\t\t\t`,\n\t\t},\n\t}\n)\n\nfunc removeComments(buf bytes.Buffer) bytes.Buffer {\n\tlines := strings.Split(buf.String(), \"\\n\")\n\tfor i, l := range lines {\n\t\tif strings.HasPrefix(l, \"\/\/\") {\n\t\t\tlines = append(lines[:i], lines[i+1:]...)\n\t\t}\n\t}\n\treturn *bytes.NewBufferString(strings.Join(lines, \"\\n\"))\n}\n\nfunc TestBuildXmlElem(t *testing.T) {\n\tfor _, tst := range tests {\n\t\tvar schema xsdSchema\n\t\tif err := xml.Unmarshal([]byte(tst.xsd), &schema); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\n\t\tbldr := builder{\n\t\t\tschemas: []xsdSchema{schema},\n\t\t\tcomplTypes: make(map[string]xsdComplexType),\n\t\t\tsimplTypes: make(map[string]xsdSimpleType),\n\t\t}\n\t\telems := bldr.buildXML()\n\t\tif len(elems) != 1 {\n\t\t\tt.Errorf(\"wrong number of xml elements\")\n\t\t}\n\t\te := elems[0]\n\t\tif !reflect.DeepEqual(tst.xml, *e) {\n\t\t\tt.Errorf(\"Unexpected XML element: %s\", e.Name)\n\t\t\tpretty.Println(tst.xml)\n\t\t\tpretty.Println(e)\n\t\t}\n\t}\n}\n\nfunc TestGenerateGo(t *testing.T) {\n\tfor _, tst := range tests {\n\t\tvar out bytes.Buffer\n\t\tg := generator{prefix: tst.prefix, exported: tst.exported}\n\t\tg.do(&out, []*xmlTree{&tst.xml})\n\t\tout = removeComments(out)\n\t\tif strings.Join(strings.Fields(out.String()), \"\") != strings.Join(strings.Fields(tst.gosrc), \"\") {\n\t\t\tt.Errorf(\"Unexpected generated Go source: %s\", tst.xml.Name)\n\t\t\tt.Logf(out.String())\n\t\t\tt.Logf(strings.Join(strings.Fields(out.String()), \"\"))\n\t\t\tt.Logf(strings.Join(strings.Fields(tst.gosrc), \"\"))\n\t\t}\n\t}\n}\n\nfunc TestLintTitle(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\tinput, want string\n\t}{\n\t\t{\"foo cpu baz\", \"FooCPUBaz\"},\n\t\t{\"test Id\", \"TestID\"},\n\t\t{\"json and html\", \"JSONAndHTML\"},\n\t} {\n\t\tif got := lintTitle(tt.input); got != tt.want {\n\t\t\tt.Errorf(\"[%d] title(%q) = %q, want %q\", i, tt.input, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestSquish(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\tinput, want string\n\t}{\n\t\t{\"Foo CPU Baz\", \"FooCPUBaz\"},\n\t\t{\"Test ID\", \"TestID\"},\n\t\t{\"JSON And HTML\", \"JSONAndHTML\"},\n\t} {\n\t\tif got := squish(tt.input); got != tt.want {\n\t\t\tt.Errorf(\"[%d] squish(%q) = %q, want %q\", i, tt.input, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestReplace(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\tinput, want string\n\t}{\n\t\t{\"foo Cpu baz\", \"foo CPU baz\"},\n\t\t{\"test Id\", \"test ID\"},\n\t\t{\"Json and Html\", \"JSON and HTML\"},\n\t} {\n\t\tif got := initialisms.Replace(tt.input); got != tt.want {\n\t\t\tt.Errorf(\"[%d] replace(%q) = %q, want %q\", i, tt.input, got, tt.want)\n\t\t}\n\t}\n\n\tc := len(initialismPairs)\n\n\tfor i := 0; i < c; i++ {\n\t\tinput, want := initialismPairs[i], initialismPairs[i+1]\n\n\t\tif got := initialisms.Replace(input); got != want {\n\t\t\tt.Errorf(\"[%d] replace(%q) = %q, want %q\", i, input, got, want)\n\t\t}\n\n\t\ti++\n\t}\n}\n<commit_msg>Add test for Windows-1252 encoding<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/kr\/pretty\"\n)\n\ntype testCase struct {\n\txsd string\n\txml xmlTree\n\tgosrc string\n}\n\nvar (\n\ttests = []struct {\n\t\texported bool\n\t\tprefix string\n\t\txsd string\n\t\txml xmlTree\n\t\tgosrc string\n\t}{\n\n\t\t{\n\t\t\texported: false,\n\t\t\tprefix: \"\",\n\t\t\txsd: `<schema>\n\t<element name=\"titleList\" type=\"titleListType\">\n\t<\/element>\n\t<complexType name=\"titleListType\">\n\t\t<sequence>\n\t\t\t<element name=\"title\" type=\"originalTitleType\" maxOccurs=\"unbounded\" \/>\n\t\t<\/sequence>\n\t<\/complexType>\n\t<complexType name=\"originalTitleType\">\n\t\t<simpleContent>\n\t\t\t<extension base=\"titleType\">\n\t\t\t\t<attribute name=\"original\" type=\"boolean\">\n\t\t\t\t<\/attribute>\n\t\t\t<\/extension>\n\t\t<\/simpleContent>\n\t<\/complexType>\n\t<complexType name=\"titleType\">\n\t\t<simpleContent>\n\t\t\t<restriction base=\"textType\">\n\t\t\t\t<maxLength value=\"300\" \/>\n\t\t\t<\/restriction>\n\t\t<\/simpleContent>\n\t<\/complexType>\n\t<complexType name=\"textType\">\n\t\t<simpleContent>\n\t\t\t<extension base=\"string\">\n\t\t\t\t<attribute name=\"language\" type=\"language\">\n\t\t\t\t<\/attribute>\n\t\t\t<\/extension>\n\t\t<\/simpleContent>\n\t<\/complexType>\n<\/schema>`,\n\t\t\txml: xmlTree{\n\t\t\t\tName: \"titleList\",\n\t\t\t\tType: \"titleList\",\n\t\t\t\tChildren: []*xmlTree{\n\t\t\t\t\t&xmlTree{\n\t\t\t\t\t\tName: \"title\",\n\t\t\t\t\t\tType: \"string\",\n\t\t\t\t\t\tCdata: true,\n\t\t\t\t\t\tList: true,\n\t\t\t\t\t\tAttribs: []xmlAttrib{\n\t\t\t\t\t\t\t{Name: \"language\", Type: \"string\"},\n\t\t\t\t\t\t\t{Name: \"original\", Type: \"bool\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tgosrc: `\ntype titleList struct {\n\tTitle []title ` + \"`xml:\\\"title\\\"`\" + `\n}\n\ntype title struct {\n\tLanguage string ` + \"`xml:\\\"language,attr\\\"`\" + `\n\tOriginal bool ` + \"`xml:\\\"original,attr\\\"`\" + `\n\tTitle string ` + \"`xml:\\\",chardata\\\"`\" + `\n}\n\n\t\t\t\t`,\n\t\t},\n\n\t\t{\n\t\t\texported: false,\n\t\t\tprefix: \"\",\n\t\t\txsd: `<schema>\n\t<element name=\"tagList\">\n\t\t<complexType>\n\t\t\t<sequence>\n\t\t\t\t<element name=\"tag\" type=\"tagReferenceType\" minOccurs=\"0\" maxOccurs=\"unbounded\" \/>\n\t\t\t<\/sequence>\n\t\t<\/complexType>\n\t<\/element>\n\t<complexType name=\"tagReferenceType\">\n\t\t<simpleContent>\n\t\t\t<extension base=\"nidType\">\n\t\t\t\t<attribute name=\"type\" type=\"tagTypeType\" use=\"required\" \/>\n\t\t\t<\/extension>\n\t\t<\/simpleContent>\n\t<\/complexType>\n\t<simpleType name=\"nidType\">\n\t\t<restriction base=\"string\">\n\t\t\t<pattern value=\"[0-9a-zA-Z\\-]+\" \/>\n\t\t<\/restriction>\n\t<\/simpleType>\n\t<simpleType name=\"tagTypeType\">\n\t\t<restriction base=\"string\">\n\t\t<\/restriction>\n\t<\/simpleType>\n<\/schema>`,\n\t\t\txml: xmlTree{\n\t\t\t\tName: \"tagList\",\n\t\t\t\tType: \"tagList\",\n\t\t\t\tChildren: []*xmlTree{\n\t\t\t\t\t&xmlTree{\n\t\t\t\t\t\tName: \"tag\",\n\t\t\t\t\t\tType: \"string\",\n\t\t\t\t\t\tList: true,\n\t\t\t\t\t\tCdata: true,\n\t\t\t\t\t\tAttribs: []xmlAttrib{\n\t\t\t\t\t\t\t{Name: \"type\", Type: \"string\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tgosrc: `\ntype tagList struct {\n\tTag []tag ` + \"`xml:\\\"tag\\\"`\" + `\n}\n\ntype tag struct {\n\tType string ` + \"`xml:\\\"type,attr\\\"`\" + `\n\tTag string ` + \"`xml:\\\",chardata\\\"`\" + `\n}\n\t\t\t`,\n\t\t},\n\n\t\t{\n\t\t\texported: false,\n\t\t\tprefix: \"\",\n\t\t\txsd: `<schema>\n\t\t\t\t<element name=\"tagId\" type=\"tagReferenceType\" \/>\n\t<complexType name=\"tagReferenceType\">\n\t\t<simpleContent>\n\t\t\t<extension base=\"string\">\n\t\t\t\t<attribute name=\"type\" type=\"string\" use=\"required\" \/>\n\t\t\t<\/extension>\n\t\t<\/simpleContent>\n\t<\/complexType>\n<\/schema>`,\n\t\t\txml: xmlTree{\n\t\t\t\tName: \"tagId\",\n\t\t\t\tType: \"string\",\n\t\t\t\tList: false,\n\t\t\t\tCdata: true,\n\t\t\t\tAttribs: []xmlAttrib{\n\t\t\t\t\t{Name: \"type\", Type: \"string\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tgosrc: `\ntype tagID struct {\n\tType string ` + \"`xml:\\\"type,attr\\\"`\" + `\n\tTagID string ` + \"`xml:\\\",chardata\\\"`\" + `\n}\n\t\t\t`,\n\t\t},\n\n\t\t{\n\t\t\texported: true,\n\t\t\tprefix: \"xxx\",\n\t\t\txsd: `<schema>\n\t<element name=\"url\" type=\"tagReferenceType\" \/>\n\t<complexType name=\"tagReferenceType\">\n\t\t<simpleContent>\n\t\t\t<extension base=\"string\">\n\t\t\t\t<attribute name=\"type\" type=\"string\" use=\"required\" \/>\n\t\t\t<\/extension>\n\t\t<\/simpleContent>\n\t<\/complexType>\n<\/schema>`,\n\t\t\txml: xmlTree{\n\t\t\t\tName: \"url\",\n\t\t\t\tType: \"string\",\n\t\t\t\tList: false,\n\t\t\t\tCdata: true,\n\t\t\t\tAttribs: []xmlAttrib{\n\t\t\t\t\t{Name: \"type\", Type: \"string\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tgosrc: `\ntype XxxURL struct {\n\tType string ` + \"`xml:\\\"type,attr\\\"`\" + `\n\tURL string ` + \"`xml:\\\",chardata\\\"`\" + `\n}\n\t\t\t`,\n\t\t},\n\n\t\t\/\/ Windows-1252 encoding\n\t\t{\n\t\t\txsd: `<?xml version=\"1.0\" encoding=\"Windows-1252\"?>\n\t<schema>\n\t\t<element name=\"empty\" type=\"tagReferenceType\" \/>\n\t\t<complexType name=\"tagReferenceType\"\/>\n\t<\/schema>`,\n\t\t\txml: xmlTree{\n\t\t\t\tName: \"empty\",\n\t\t\t\tType: \"empty\",\n\t\t\t},\n\t\t\tgosrc: `\ntype empty struct {}\n\t\t\t`,\n\t\t},\n\t}\n)\n\nfunc removeComments(buf bytes.Buffer) bytes.Buffer {\n\tlines := strings.Split(buf.String(), \"\\n\")\n\tfor i, l := range lines {\n\t\tif strings.HasPrefix(l, \"\/\/\") {\n\t\t\tlines = append(lines[:i], lines[i+1:]...)\n\t\t}\n\t}\n\treturn *bytes.NewBufferString(strings.Join(lines, \"\\n\"))\n}\n\nfunc TestBuildXmlElem(t *testing.T) {\n\tfor _, tst := range tests {\n\t\tvar schema xsdSchema\n\n\t\td := xml.NewDecoder(strings.NewReader(tst.xsd))\n\t\t\/\/ handle special character sets\n\t\td.CharsetReader = makeCharsetReader\n\t\tif err := d.Decode(&schema); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\n\t\tbldr := builder{\n\t\t\tschemas: []xsdSchema{schema},\n\t\t\tcomplTypes: make(map[string]xsdComplexType),\n\t\t\tsimplTypes: make(map[string]xsdSimpleType),\n\t\t}\n\t\telems := bldr.buildXML()\n\t\tif len(elems) != 1 {\n\t\t\tt.Errorf(\"wrong number of xml elements\")\n\t\t}\n\t\te := elems[0]\n\t\tif !reflect.DeepEqual(tst.xml, *e) {\n\t\t\tt.Errorf(\"Unexpected XML element: %s\", e.Name)\n\t\t\tpretty.Println(tst.xml)\n\t\t\tpretty.Println(e)\n\t\t}\n\t}\n}\n\nfunc TestGenerateGo(t *testing.T) {\n\tfor _, tst := range tests {\n\t\tvar out bytes.Buffer\n\t\tg := generator{prefix: tst.prefix, exported: tst.exported}\n\t\tg.do(&out, []*xmlTree{&tst.xml})\n\t\tout = removeComments(out)\n\t\tif strings.Join(strings.Fields(out.String()), \"\") != strings.Join(strings.Fields(tst.gosrc), \"\") {\n\t\t\tt.Errorf(\"Unexpected generated Go source: %s\", tst.xml.Name)\n\t\t\tt.Logf(out.String())\n\t\t\tt.Logf(strings.Join(strings.Fields(out.String()), \"\"))\n\t\t\tt.Logf(strings.Join(strings.Fields(tst.gosrc), \"\"))\n\t\t}\n\t}\n}\n\nfunc TestLintTitle(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\tinput, want string\n\t}{\n\t\t{\"foo cpu baz\", \"FooCPUBaz\"},\n\t\t{\"test Id\", \"TestID\"},\n\t\t{\"json and html\", \"JSONAndHTML\"},\n\t} {\n\t\tif got := lintTitle(tt.input); got != tt.want {\n\t\t\tt.Errorf(\"[%d] title(%q) = %q, want %q\", i, tt.input, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestSquish(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\tinput, want string\n\t}{\n\t\t{\"Foo CPU Baz\", \"FooCPUBaz\"},\n\t\t{\"Test ID\", \"TestID\"},\n\t\t{\"JSON And HTML\", \"JSONAndHTML\"},\n\t} {\n\t\tif got := squish(tt.input); got != tt.want {\n\t\t\tt.Errorf(\"[%d] squish(%q) = %q, want %q\", i, tt.input, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestReplace(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\tinput, want string\n\t}{\n\t\t{\"foo Cpu baz\", \"foo CPU baz\"},\n\t\t{\"test Id\", \"test ID\"},\n\t\t{\"Json and Html\", \"JSON and HTML\"},\n\t} {\n\t\tif got := initialisms.Replace(tt.input); got != tt.want {\n\t\t\tt.Errorf(\"[%d] replace(%q) = %q, want %q\", i, tt.input, got, tt.want)\n\t\t}\n\t}\n\n\tc := len(initialismPairs)\n\n\tfor i := 0; i < c; i++ {\n\t\tinput, want := initialismPairs[i], initialismPairs[i+1]\n\n\t\tif got := initialisms.Replace(input); got != want {\n\t\t\tt.Errorf(\"[%d] replace(%q) = %q, want %q\", i, input, got, want)\n\t\t}\n\n\t\ti++\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar TIMEOUTDURATION = time.Duration(10 * time.Second)\nvar Queues map[string]*Queue\n\ntype JobEntry struct {\n\tQueueName string `json:\"queue\"`\n\tID int64\n\tEntryDate time.Time\n}\n\ntype Jobs []*JobEntry\ntype JobFile struct {\n\tJobs Jobs `json:\"jobs\"`\n}\n\ntype Queue struct {\n\tentries chan *JobEntry\n\tj Jobs\n\tm sync.Mutex \/\/ Whole queue mutex\n\tt sync.Mutex \/\/ just the timer mutex\n\trw sync.RWMutex \/\/ For reading and setting fields\n\tondisk bool\n\tondiskfile string\n\ttimer *time.Timer\n}\n\n\/\/ Initialize a new queue using the filepath as it's disk storage.\nfunc NewQueue(filepath string) *Queue {\n\tq := &Queue{\n\t\tondiskfile: filepath,\n\t\tentries: make(chan *JobEntry),\n\t\ttimer: time.NewTimer(TIMEOUTDURATION),\n\t}\n\tgo MonitorQueue(q)\n\treturn q\n}\n\n\/\/ SaveToDisk will write any unused entries to disk\n\/\/\n\/\/ This is not thread safe and external locking should be used.\nfunc (q *Queue) SaveToDisk() {\n\tvar jobfile JobFile\n\tif q.HasOnDisk() {\n\t\tq.PopOnDiskJobs(&jobfile)\n\t}\n\tfor {\n\t\tselect {\n\t\tcase job := <-q.entries:\n\t\t\tjobfile.Jobs = append(jobfile.Jobs, job)\n\t\tdefault:\n\t\t\tif len(jobfile.Jobs) < 1 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw, err := os.OpenFile(q.ondiskfile, os.O_CREATE|os.O_RDWR, 0666)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tw.Write(jobfile.Jobs.Serialize())\n\t\t\tq.rw.Lock()\n\t\t\tdefer q.rw.Unlock()\n\t\t\tq.ondisk = true\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ HasOnDisk returns the ondisk state\nfunc (q Queue) HasOnDisk() bool {\n\tq.rw.RLock()\n\tdefer q.rw.RUnlock()\n\treturn q.ondisk\n}\n\n\/\/ Reset the underlying timer. Used when we interact with the queue so\n\/\/ we can organize the writes to disk into low-activity periods.\nfunc (q *Queue) Reset() bool {\n\tq.t.Lock()\n\tdefer q.t.Unlock()\n\treturn q.timer.Reset(TIMEOUTDURATION)\n}\n\n\/\/ Pop off all the on-disk jobs into the supplied JobFile.\nfunc (q *Queue) PopOnDiskJobs(j *JobFile) {\n\tif q.ondiskfile == \"\" {\n\t\tpanic(\"Queue's job file is inaccessible.\")\n\t}\n\tq.m.Lock()\n\tdefer q.m.Unlock()\n\tb, err := ioutil.ReadFile(q.ondiskfile)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\terr = json.Unmarshal(b, j)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tq.ondisk = false\n\tif err := os.Remove(q.ondiskfile); err != nil {\n\t\tlog.Println(err)\n\t}\n}\n\n\/\/ Monitors a queue for inactivity, if there is inactivity for the\n\/\/ specified timeout duration, we write to disk.\nfunc MonitorQueue(q *Queue) {\n\tlog.Printf(\"Monitoring %s\\n\", q.ondiskfile)\n\tfor {\n\t\t<-q.timer.C\n\t\tq.m.Lock()\n\t\tq.SaveToDisk()\n\t\tq.Reset()\n\t\tq.m.Unlock()\n\t}\n}\n\nfunc (j Jobs) Serialize() []byte {\n\tvar b bytes.Buffer\n\tb.WriteString(`{\"jobs\":[`)\n\tfor idx, job := range j {\n\t\tb.Write(job.Serialize())\n\t\tif idx+1 != len(j) {\n\t\t\tb.WriteString(\",\")\n\t\t}\n\t}\n\tb.WriteString(\"]}\")\n\treturn b.Bytes()\n}\n\nfunc (j JobEntry) Serialize() []byte {\n\tb, _ := json.Marshal(j)\n\treturn b\n}\n\nfunc Pop(w http.ResponseWriter, req *http.Request) {\n\tjob, err := readbody(req.Body)\n\tif err != nil {\n\t\twritefail(w)\n\t\treturn\n\t}\n\tqueue, ok := Queues[job.QueueName]\n\tif !ok {\n\t\twritefail(w)\n\t\treturn\n\t}\n\n\t\/\/ We're interacting with the queue so we'll reset the timer to\n\t\/\/ show that\n\tqueue.Reset()\n\tvar jobfile JobFile\n\n\t\/\/ Get all the pending jobs from disk\n\tif queue.HasOnDisk() {\n\t\tqueue.PopOnDiskJobs(&jobfile)\n\t}\n\n\t\/\/ Doing it this way allows other readers to take jobs out the\n\t\/\/ queue without duplicating our jobs.\n\tfor {\n\t\tselect {\n\t\tcase job := <-queue.entries:\n\t\t\tjobfile.Jobs = append(jobfile.Jobs, job)\n\t\tdefault:\n\t\t\tif len(jobfile.Jobs) < 1 {\n\t\t\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.Write(jobfile.Jobs.Serialize())\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Pushes a new job into the queue.\nfunc Push(w http.ResponseWriter, req *http.Request) {\n\tjob, err := readbody(req.Body)\n\tif err != nil {\n\t\twritefail(w)\n\t\treturn\n\t}\n\tgo func() {\n\t\tif queue, ok := Queues[job.QueueName]; ok {\n\t\t\tqueue.Reset()\n\t\t\tqueue.entries <- job\n\t\t}\n\t}()\n\twritesuccess(w)\n}\n\nfunc readbody(b io.ReadCloser) (*JobEntry, error) {\n\tif b == nil {\n\t\treturn nil, errors.New(\"Nil body supplied!\")\n\t}\n\tdefer b.Close()\n\tbody, err := ioutil.ReadAll(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\te := &JobEntry{}\n\terr = json.Unmarshal(body, e)\n\treturn e, err\n}\n\nfunc writesuccess(w http.ResponseWriter) {\n\tw.Write([]byte(`{\"success\": true}`))\n}\n\nfunc writefail(w http.ResponseWriter) {\n\tw.WriteHeader(http.StatusBadRequest)\n\tw.Write([]byte(`{\"success\": false}`))\n}\n\nfunc main() {\n\n\t\/\/ Save to disk in the case of horrible failure.\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Println(err)\n\t\t\tfor _, queue := range Queues {\n\t\t\t\tqueue.SaveToDisk()\n\t\t\t}\n\t\t}\n\t}()\n\tQueues = make(map[string]*Queue)\n\tQueues[\"tt\"] = NewQueue(\"tt.q\")\n\thttp.HandleFunc(\"\/push\/\", Push)\n\thttp.HandleFunc(\"\/pop\/\", Pop)\n\tlog.Fatal(http.ListenAndServe(\":8080\", nil))\n}\n<commit_msg>Added a signal catcher to retrieve the OS signals and cleanup properly.<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar TIMEOUTDURATION = time.Duration(10 * time.Second)\nvar Queues map[string]*Queue\n\ntype JobEntry struct {\n\tQueueName string `json:\"queue\"`\n\tID int64\n\tEntryDate time.Time\n}\n\ntype Jobs []*JobEntry\ntype JobFile struct {\n\tJobs Jobs `json:\"jobs\"`\n}\n\ntype Queue struct {\n\tentries chan *JobEntry\n\tj Jobs\n\tm sync.Mutex \/\/ Whole queue mutex\n\tt sync.Mutex \/\/ just the timer mutex\n\trw sync.RWMutex \/\/ For reading and setting fields\n\tondisk bool\n\tondiskfile string\n\ttimer *time.Timer\n}\n\n\/\/ Initialize a new queue using the filepath as it's disk storage.\nfunc NewQueue(filepath string) *Queue {\n\tq := &Queue{\n\t\tondiskfile: filepath,\n\t\tentries: make(chan *JobEntry),\n\t\ttimer: time.NewTimer(TIMEOUTDURATION),\n\t}\n\tgo MonitorQueue(q)\n\treturn q\n}\n\n\/\/ SaveToDisk will write any unused entries to disk\n\/\/\n\/\/ This is not thread safe and external locking should be used.\nfunc (q *Queue) SaveToDisk() {\n\tvar jobfile JobFile\n\tif q.HasOnDisk() {\n\t\tq.PopOnDiskJobs(&jobfile)\n\t}\n\tfor {\n\t\tselect {\n\t\tcase job := <-q.entries:\n\t\t\tjobfile.Jobs = append(jobfile.Jobs, job)\n\t\tdefault:\n\t\t\tif len(jobfile.Jobs) < 1 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw, err := os.OpenFile(q.ondiskfile, os.O_CREATE|os.O_RDWR, 0666)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tw.Write(jobfile.Jobs.Serialize())\n\t\t\tq.rw.Lock()\n\t\t\tdefer q.rw.Unlock()\n\t\t\tq.ondisk = true\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ HasOnDisk returns the ondisk state\nfunc (q Queue) HasOnDisk() bool {\n\tq.rw.RLock()\n\tdefer q.rw.RUnlock()\n\treturn q.ondisk\n}\n\n\/\/ Reset the underlying timer. Used when we interact with the queue so\n\/\/ we can organize the writes to disk into low-activity periods.\nfunc (q *Queue) Reset() bool {\n\tq.t.Lock()\n\tdefer q.t.Unlock()\n\treturn q.timer.Reset(TIMEOUTDURATION)\n}\n\n\/\/ Pop off all the on-disk jobs into the supplied JobFile.\nfunc (q *Queue) PopOnDiskJobs(j *JobFile) {\n\tif q.ondiskfile == \"\" {\n\t\tpanic(\"Queue's job file is inaccessible.\")\n\t}\n\tq.m.Lock()\n\tdefer q.m.Unlock()\n\tb, err := ioutil.ReadFile(q.ondiskfile)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\terr = json.Unmarshal(b, j)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tq.ondisk = false\n\tif err := os.Remove(q.ondiskfile); err != nil {\n\t\tlog.Println(err)\n\t}\n}\n\n\/\/ Monitors a queue for inactivity, if there is inactivity for the\n\/\/ specified timeout duration, we write to disk.\nfunc MonitorQueue(q *Queue) {\n\tlog.Printf(\"Monitoring %s\\n\", q.ondiskfile)\n\tfor {\n\t\t<-q.timer.C\n\t\tq.m.Lock()\n\t\tq.SaveToDisk()\n\t\tq.Reset()\n\t\tq.m.Unlock()\n\t}\n}\n\nfunc (j Jobs) Serialize() []byte {\n\tvar b bytes.Buffer\n\tb.WriteString(`{\"jobs\":[`)\n\tfor idx, job := range j {\n\t\tb.Write(job.Serialize())\n\t\tif idx+1 != len(j) {\n\t\t\tb.WriteString(\",\")\n\t\t}\n\t}\n\tb.WriteString(\"]}\")\n\treturn b.Bytes()\n}\n\nfunc (j JobEntry) Serialize() []byte {\n\tb, _ := json.Marshal(j)\n\treturn b\n}\n\nfunc Pop(w http.ResponseWriter, req *http.Request) {\n\tjob, err := readbody(req.Body)\n\tif err != nil {\n\t\twritefail(w)\n\t\treturn\n\t}\n\tqueue, ok := Queues[job.QueueName]\n\tif !ok {\n\t\twritefail(w)\n\t\treturn\n\t}\n\n\t\/\/ We're interacting with the queue so we'll reset the timer to\n\t\/\/ show that\n\tqueue.Reset()\n\tvar jobfile JobFile\n\n\t\/\/ Get all the pending jobs from disk\n\tif queue.HasOnDisk() {\n\t\tqueue.PopOnDiskJobs(&jobfile)\n\t}\n\n\t\/\/ Doing it this way allows other readers to take jobs out the\n\t\/\/ queue without duplicating our jobs.\n\tfor {\n\t\tselect {\n\t\tcase job := <-queue.entries:\n\t\t\tjobfile.Jobs = append(jobfile.Jobs, job)\n\t\tdefault:\n\t\t\tif len(jobfile.Jobs) < 1 {\n\t\t\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.Write(jobfile.Jobs.Serialize())\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Pushes a new job into the queue.\nfunc Push(w http.ResponseWriter, req *http.Request) {\n\tjob, err := readbody(req.Body)\n\tif err != nil {\n\t\twritefail(w)\n\t\treturn\n\t}\n\tgo func() {\n\t\tif queue, ok := Queues[job.QueueName]; ok {\n\t\t\tqueue.Reset()\n\t\t\tqueue.entries <- job\n\t\t}\n\t}()\n\twritesuccess(w)\n}\n\nfunc readbody(b io.ReadCloser) (*JobEntry, error) {\n\tif b == nil {\n\t\treturn nil, errors.New(\"Nil body supplied!\")\n\t}\n\tdefer b.Close()\n\tbody, err := ioutil.ReadAll(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\te := &JobEntry{}\n\terr = json.Unmarshal(body, e)\n\treturn e, err\n}\n\nfunc writesuccess(w http.ResponseWriter) {\n\tw.Write([]byte(`{\"success\": true}`))\n}\n\nfunc writefail(w http.ResponseWriter) {\n\tw.WriteHeader(http.StatusBadRequest)\n\tw.Write([]byte(`{\"success\": false}`))\n}\n\nfunc main() {\n\n\t\/\/ Save to disk in the case of horrible failure.\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Println(err)\n\t\t\tfor _, queue := range Queues {\n\t\t\t\tqueue.SaveToDisk()\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tsigchan := make(chan os.Signal)\n\t\tsignal.Notify(sigchan, os.Interrupt)\n\t\t<-sigchan\n\t\tlog.Println(\"Caught SIGINT, saving state\")\n\t\tfor _, queue := range Queues {\n\t\t\tqueue.SaveToDisk()\n\t\t}\n\t\tos.Exit(0)\n\t}()\n\n\tQueues = make(map[string]*Queue)\n\tQueues[\"tt\"] = NewQueue(\"tt.q\")\n\thttp.HandleFunc(\"\/push\/\", Push)\n\thttp.HandleFunc(\"\/pop\/\", Pop)\n\tlog.Fatal(http.ListenAndServe(\":8080\", nil))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/amahi\/spdy\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"time\"\n)\n\ntype SpdyGun struct {\n\tpingPeriod time.Duration\n\ttarget string\n\tclient *spdy.Client\n}\n\nfunc (sg *SpdyGun) Run(a Ammo, results chan<- Sample) {\n\tif sg.client == nil {\n\t\tsg.Connect(results)\n\t}\n\tif sg.pingPeriod > 0 {\n\t\tpingTimer := time.NewTicker(sg.pingPeriod)\n\t\tgo func() {\n\t\t\tfor range pingTimer.C {\n\t\t\t\tsg.Ping(results)\n\t\t\t}\n\t\t}()\n\t}\n\tstart := time.Now()\n\tss := &SpdySample{ts: float64(start.UnixNano()) \/ 1e9, tag: \"REQUEST\"}\n\tdefer func() {\n\t\tss.rt = int(time.Since(start).Seconds() * 1e6)\n\t\tresults <- ss\n\t}()\n\t\/\/ now send the request to obtain a http response\n\tha, ok := a.(*HttpAmmo)\n\tif !ok {\n\t\terrStr := fmt.Sprintf(\"Got '%T' instead of 'HttpAmmo'\", a)\n\t\tlog.Println(errStr)\n\t\tss.err = errors.New(errStr)\n\t\treturn\n\t}\n\tif ha.Tag != \"\" {\n\t\tss.tag += \"|\" + ha.Tag\n\t}\n\treq, err := ha.Request()\n\tif err != nil {\n\t\tlog.Printf(\"Error making HTTP request: %s\\n\", err)\n\t\tss.err = err\n\t\treturn\n\t}\n\tres, err := sg.client.Do(req)\n\tif err != nil {\n\t\tlog.Printf(\"Error performing a request: %s\\n\", err)\n\t\tss.err = err\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\t_, err = io.Copy(ioutil.Discard, res.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error reading response body: %s\\n\", err)\n\t\tss.err = err\n\t\treturn\n\t}\n\n\t\/\/ TODO: make this an optional verbose answ_log output\n\t\/\/data := make([]byte, int(res.ContentLength))\n\t\/\/ _, err = res.Body.(io.Reader).Read(data)\n\t\/\/ fmt.Println(string(data))\n\tss.StatusCode = res.StatusCode\n\treturn\n}\n\nfunc (sg *SpdyGun) Close() {\n\tif sg.client != nil {\n\t\tsg.client.Close()\n\t\tsg.client = nil\n\t}\n}\n\nfunc (sg *SpdyGun) Connect(results chan<- Sample) {\n\tsg.Close()\n\tconnectStart := time.Now()\n\tconfig := tls.Config{\n\t\tInsecureSkipVerify: true,\n\t\tNextProtos: []string{\"spdy\/3.1\"},\n\t}\n\n\tconn, err := tls.Dial(\"tcp\", sg.target, &config)\n\tif err != nil {\n\t\tlog.Printf(\"client: dial: %s\\n\", err)\n\t\treturn\n\t}\n\tsg.client, err = spdy.NewClientConn(conn)\n\tif err != nil {\n\t\tlog.Printf(\"client: connect: %s\\n\", err)\n\t\treturn\n\t}\n\tss := &SpdySample{ts: float64(connectStart.UnixNano()) \/ 1e9, tag: \"CONNECT\"}\n\tss.rt = int(time.Since(connectStart).Seconds() * 1e6)\n\tss.err = err\n\tif ss.err == nil {\n\t\tss.StatusCode = 200\n\t}\n\tresults <- ss\n}\n\nfunc (sg *SpdyGun) Ping(results chan<- Sample) {\n\tsg.Close()\n\tpingStart := time.Now()\n\n\tpinged, err := sg.client.Ping()\n\tif err != nil {\n\t\tlog.Printf(\"client: ping: %s\\n\", err)\n\t}\n\n\tss := &SpdySample{ts: float64(pingStart.UnixNano()) \/ 1e9, tag: \"PING\"}\n\tss.rt = int(time.Since(pingStart).Seconds() * 1e6)\n\tss.err = err\n\tif ss.err == nil && pinged {\n\t\tss.StatusCode = 200\n\t} else {\n\t\tss.StatusCode = 500\n\t}\n\tresults <- ss\n\tif err != nil {\n\t\tsg.Connect(results)\n\t}\n}\n\ntype SpdySample struct {\n\tts float64 \/\/ Unix Timestamp in seconds\n\trt int \/\/ response time in milliseconds\n\tStatusCode int \/\/ protocol status code\n\ttag string\n\terr error\n}\n\nfunc (ds *SpdySample) PhoutSample() *PhoutSample {\n\tvar protoCode, netCode int\n\tif ds.err != nil {\n\t\tprotoCode = 500\n\t\tnetCode = 999\n\t} else {\n\t\tnetCode = 0\n\t\tprotoCode = ds.StatusCode\n\t}\n\treturn &PhoutSample{\n\t\tts: ds.ts,\n\t\ttag: ds.tag,\n\t\trt: ds.rt,\n\t\tconnect: 0,\n\t\tsend: 0,\n\t\tlatency: 0,\n\t\treceive: 0,\n\t\tinterval_event: 0,\n\t\tegress: 0,\n\t\tigress: 0,\n\t\tnetCode: netCode,\n\t\tprotoCode: protoCode,\n\t}\n}\n\nfunc (ds *SpdySample) String() string {\n\treturn fmt.Sprintf(\"rt: %d [%d] %s\", ds.rt, ds.StatusCode, ds.tag)\n}\n\nfunc NewSpdyGunFromConfig(c *GunConfig) (g Gun, err error) {\n\tparams := c.Parameters\n\tif params == nil {\n\t\treturn nil, errors.New(\"Parameters not specified\")\n\t}\n\ttarget, ok := params[\"Target\"]\n\tif !ok {\n\t\treturn nil, errors.New(\"Target not specified\")\n\t}\n\tvar pingPeriod float64\n\tparamPingPeriod, ok = params[\"PingPeriod\"]\n\tif !ok {\n\t\tpingPeriod = 120.0 \/\/ TODO: move this default elsewhere\n\t}\n\tswitch t := paramPingPeriod.(type) {\n\tcase float64:\n\t\tpingPeriod = time.Duration(paramPingPeriod.(float64)*1e3) * time.Millisecond\n\tdefault:\n\t\treturn nil, errors.New(fmt.Sprintf(\"Period is of the wrong type.\"+\n\t\t\t\" Expected 'float64' got '%T'\", t))\n\t}\n\tswitch t := target.(type) {\n\tcase string:\n\t\tg = &SpdyGun{\n\t\t\tpingPeriod: pingPeriod,\n\t\t\ttarget: target.(string),\n\t\t}\n\tdefault:\n\t\treturn nil, errors.New(fmt.Sprintf(\"Target is of the wrong type.\"+\n\t\t\t\" Expected 'string' got '%T'\", t))\n\t}\n\treturn g, nil\n}\n<commit_msg>fix ping<commit_after>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/amahi\/spdy\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"time\"\n)\n\ntype SpdyGun struct {\n\tpingPeriod time.Duration\n\ttarget string\n\tclient *spdy.Client\n}\n\nfunc (sg *SpdyGun) Run(a Ammo, results chan<- Sample) {\n\tif sg.client == nil {\n\t\tsg.Connect(results)\n\t}\n\tif sg.pingPeriod > 0 {\n\t\tpingTimer := time.NewTicker(sg.pingPeriod)\n\t\tgo func() {\n\t\t\tfor range pingTimer.C {\n\t\t\t\tsg.Ping(results)\n\t\t\t}\n\t\t}()\n\t}\n\tstart := time.Now()\n\tss := &SpdySample{ts: float64(start.UnixNano()) \/ 1e9, tag: \"REQUEST\"}\n\tdefer func() {\n\t\tss.rt = int(time.Since(start).Seconds() * 1e6)\n\t\tresults <- ss\n\t}()\n\t\/\/ now send the request to obtain a http response\n\tha, ok := a.(*HttpAmmo)\n\tif !ok {\n\t\terrStr := fmt.Sprintf(\"Got '%T' instead of 'HttpAmmo'\", a)\n\t\tlog.Println(errStr)\n\t\tss.err = errors.New(errStr)\n\t\treturn\n\t}\n\tif ha.Tag != \"\" {\n\t\tss.tag += \"|\" + ha.Tag\n\t}\n\treq, err := ha.Request()\n\tif err != nil {\n\t\tlog.Printf(\"Error making HTTP request: %s\\n\", err)\n\t\tss.err = err\n\t\treturn\n\t}\n\tres, err := sg.client.Do(req)\n\tif err != nil {\n\t\tlog.Printf(\"Error performing a request: %s\\n\", err)\n\t\tss.err = err\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\t_, err = io.Copy(ioutil.Discard, res.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error reading response body: %s\\n\", err)\n\t\tss.err = err\n\t\treturn\n\t}\n\n\t\/\/ TODO: make this an optional verbose answ_log output\n\t\/\/data := make([]byte, int(res.ContentLength))\n\t\/\/ _, err = res.Body.(io.Reader).Read(data)\n\t\/\/ fmt.Println(string(data))\n\tss.StatusCode = res.StatusCode\n\treturn\n}\n\nfunc (sg *SpdyGun) Close() {\n\tif sg.client != nil {\n\t\tsg.client.Close()\n\t\tsg.client = nil\n\t}\n}\n\nfunc (sg *SpdyGun) Connect(results chan<- Sample) {\n\tsg.Close()\n\tconnectStart := time.Now()\n\tconfig := tls.Config{\n\t\tInsecureSkipVerify: true,\n\t\tNextProtos: []string{\"spdy\/3.1\"},\n\t}\n\n\tconn, err := tls.Dial(\"tcp\", sg.target, &config)\n\tif err != nil {\n\t\tlog.Printf(\"client: dial: %s\\n\", err)\n\t\treturn\n\t}\n\tsg.client, err = spdy.NewClientConn(conn)\n\tif err != nil {\n\t\tlog.Printf(\"client: connect: %s\\n\", err)\n\t\treturn\n\t}\n\tss := &SpdySample{ts: float64(connectStart.UnixNano()) \/ 1e9, tag: \"CONNECT\"}\n\tss.rt = int(time.Since(connectStart).Seconds() * 1e6)\n\tss.err = err\n\tif ss.err == nil {\n\t\tss.StatusCode = 200\n\t}\n\tresults <- ss\n}\n\nfunc (sg *SpdyGun) Ping(results chan<- Sample) {\n\tif sg.client == nil { \/\/ TODO: this might be a faulty behaviour\n\t\tsg.Connect(results)\n\t}\n\tpingStart := time.Now()\n\n\tpinged, err := sg.client.Ping(time.Second * 15)\n\tif err != nil {\n\t\tlog.Printf(\"client: ping: %s\\n\", err)\n\t}\n\tif !pinged {\n\t\tlog.Printf(\"client: ping: timed out\\n\")\n\t}\n\tss := &SpdySample{ts: float64(pingStart.UnixNano()) \/ 1e9, tag: \"PING\"}\n\tss.rt = int(time.Since(pingStart).Seconds() * 1e6)\n\tss.err = err\n\tif ss.err == nil && pinged {\n\t\tss.StatusCode = 200\n\t} else {\n\t\tss.StatusCode = 500\n\t}\n\tresults <- ss\n\tif err != nil {\n\t\tsg.Connect(results)\n\t}\n}\n\ntype SpdySample struct {\n\tts float64 \/\/ Unix Timestamp in seconds\n\trt int \/\/ response time in milliseconds\n\tStatusCode int \/\/ protocol status code\n\ttag string\n\terr error\n}\n\nfunc (ds *SpdySample) PhoutSample() *PhoutSample {\n\tvar protoCode, netCode int\n\tif ds.err != nil {\n\t\tprotoCode = 500\n\t\tnetCode = 999\n\t} else {\n\t\tnetCode = 0\n\t\tprotoCode = ds.StatusCode\n\t}\n\treturn &PhoutSample{\n\t\tts: ds.ts,\n\t\ttag: ds.tag,\n\t\trt: ds.rt,\n\t\tconnect: 0,\n\t\tsend: 0,\n\t\tlatency: 0,\n\t\treceive: 0,\n\t\tinterval_event: 0,\n\t\tegress: 0,\n\t\tigress: 0,\n\t\tnetCode: netCode,\n\t\tprotoCode: protoCode,\n\t}\n}\n\nfunc (ds *SpdySample) String() string {\n\treturn fmt.Sprintf(\"rt: %d [%d] %s\", ds.rt, ds.StatusCode, ds.tag)\n}\n\nfunc NewSpdyGunFromConfig(c *GunConfig) (g Gun, err error) {\n\tparams := c.Parameters\n\tif params == nil {\n\t\treturn nil, errors.New(\"Parameters not specified\")\n\t}\n\ttarget, ok := params[\"Target\"]\n\tif !ok {\n\t\treturn nil, errors.New(\"Target not specified\")\n\t}\n\tvar pingPeriod time.Duration\n\tparamPingPeriod, ok := params[\"PingPeriod\"]\n\tif !ok {\n\t\tparamPingPeriod = 120.0 \/\/ TODO: move this default elsewhere\n\t}\n\tswitch t := paramPingPeriod.(type) {\n\tcase float64:\n\t\tpingPeriod = time.Duration(paramPingPeriod.(float64)*1e3) * time.Millisecond\n\tdefault:\n\t\treturn nil, errors.New(fmt.Sprintf(\"Period is of the wrong type.\"+\n\t\t\t\" Expected 'float64' got '%T'\", t))\n\t}\n\tswitch t := target.(type) {\n\tcase string:\n\t\tg = &SpdyGun{\n\t\t\tpingPeriod: pingPeriod,\n\t\t\ttarget: target.(string),\n\t\t}\n\tdefault:\n\t\treturn nil, errors.New(fmt.Sprintf(\"Target is of the wrong type.\"+\n\t\t\t\" Expected 'string' got '%T'\", t))\n\t}\n\treturn g, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\ntype SchemaType string\n\nconst (\n\tSchemaTypeNull SchemaType = \"null\"\n\tSchemaTypeBoolean = \"boolean\"\n\tSchemaTypeString = \"string\"\n\tSchemaTypeInteger = \"integer\"\n\tSchemaTypeNumber = \"number\"\n\tSchemaTypeObject = \"object\"\n\tSchemaTypeArray = \"array\"\n)\n\nvar SchemaTypes = [...]SchemaType{\n\tSchemaTypeNull,\n\tSchemaTypeBoolean,\n\tSchemaTypeString,\n\tSchemaTypeInteger,\n\tSchemaTypeNumber,\n\tSchemaTypeObject,\n\tSchemaTypeArray,\n}\n\nfunc SchemaTypeFromString(str string) (SchemaType) {\n\tfor _, v := range SchemaTypes {\n\t\tif str == string(v) {\n\t\t\treturn v\n\t\t}\n\t}\n\tpanic(\"undefined type\")\n}\n<commit_msg>deleted SchemaType<commit_after>package main\n\nconst (\n\tSchemaTypeNull = \"null\"\n\tSchemaTypeBoolean = \"boolean\"\n\tSchemaTypeString = \"string\"\n\tSchemaTypeInteger = \"integer\"\n\tSchemaTypeNumber = \"number\"\n\tSchemaTypeObject = \"object\"\n\tSchemaTypeArray = \"array\"\n)\n\nvar SchemaTypes = [...]string{\n\tSchemaTypeNull,\n\tSchemaTypeBoolean,\n\tSchemaTypeString,\n\tSchemaTypeInteger,\n\tSchemaTypeNumber,\n\tSchemaTypeObject,\n\tSchemaTypeArray,\n}\n\nfunc IsPrimitiveSchemaType(target string) (bool) {\n\tswitch target {\n\tcase SchemaTypeNull, SchemaTypeBoolean, SchemaTypeString, SchemaTypeInteger, SchemaTypeNumber:\n\t\treturn true\n\t}\n\treturn false\n}\n\n<|endoftext|>"} {"text":"<commit_before>package gmgo\n\nimport (\n\t\"errors\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n\t\"log\"\n\t\"reflect\"\n\t\"time\"\n)\n\n\/* DB Connection map *\/\nvar connectionMap = make(map[string]Db)\n\n\/**\n * Interface implemented by structs that needs to be persisted. It should provide collection name, as in the database.\n * Also, a way to create new object id before saving.\n *\/\ntype Document interface {\n\tCollectionName() string\n}\n\n\/** Database config for the connection *\/\ntype DbConfig struct {\n\tHost, DBName, UserName, Password string\n}\n\n\/**\n * Mongo database connection which holds reference to global session and config it was used.\n * Both of these are immutable once initialized and not exposed to clients.\n *\/\ntype Db struct {\n\tConfig DbConfig\n\tSession *mgo.Session\n}\n\n\/\/ Public Db functions\n\/\/ private method to construct collection object based on the name and session\nfunc (db Db) collection(collectionName string, session *mgo.Session) *mgo.Collection {\n\treturn db.Session.DB(db.Config.DBName).C(collectionName)\n}\n\n\/\/ Insert the given db object representing the document to the database\nfunc (db Db) Save(object Document) error {\n\tsession := db.Session.Copy()\n\tdefer session.Close()\n\n\t\/\/objectId := object.CreateNewObjectId()\n\tcoll := db.collection(object.CollectionName(), session)\n\tif err := coll.Insert(object); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Println(\"Document inserted successfully!\")\n\treturn nil\n}\n\n\/\/ Find the object by id. Returns error if it's not able to find the document. If document is found\n\/\/ it's copied to the passed in result object.\nfunc (db Db) FindById(id string, result Document) error {\n\tsession := db.Session.Copy()\n\tdefer session.Close()\n\n\tcoll := db.collection(result.CollectionName(), session)\n\tif err := coll.FindId(bson.ObjectIdHex(id)).One(result); err != nil {\n\t\tlog.Printf(\"Error fetching %s with id %s. Error: %s\\n\", result.CollectionName(), id, err)\n\t\treturn err\n\t} else {\n\t\tlog.Printf(\"Found data for id %s\\n\", id)\n\t}\n\treturn nil\n}\n\nfunc (db Db) Find(query map[string]interface{}, result Document) error {\n\tsession := db.Session.Copy()\n\tdefer session.Close()\n\n\tcoll := db.collection(result.CollectionName(), session)\n\tif err := coll.Find(query).One(result); err != nil {\n\t\tlog.Printf(\"Error fetching %s with query %s. Error: %s\\n\", result.CollectionName(), query, err)\n\t\treturn err\n\t} else {\n\t\tlog.Printf(\"Found data for query %s\\n\", query)\n\t}\n\n\treturn nil\n}\n\nfunc (db Db) FindAll(query map[string]interface{}, document Document) (interface{}, error) {\n\tsession := db.Session.Copy()\n\tdefer session.Close()\n\n\t\/\/collection pointer for the given document\n\tcoll := db.collection(document.CollectionName(), session)\n\n\tdocumentType := reflect.TypeOf(document)\n\tdocumentSlice := reflect.MakeSlice(reflect.SliceOf(documentType), 0, 0)\n\n\t\/\/ Create a pointer to a slice value and set it to the slice\n\tdocuments := reflect.New(documentSlice.Type())\n\n\tif err := coll.Find(query).All(documents.Interface()); err != nil {\n\t\tlog.Printf(\"Error fetching %s with id %s. Error: %s\\n\", document.CollectionName(), err)\n\t\treturn nil, err\n\t}\n\n\treturn documents.Elem().Interface(), nil\n}\n\nfunc New(dbName string) (Db, error) {\n\tif db, ok := connectionMap[dbName]; ok {\n\t\treturn db, nil\n\t}\n\treturn Db{}, errors.New(\"Database connection not available. Perform 'Setup' first\")\n}\n\n\/\/ Setup the MongoDB connection based on passed in config. It can be called multiple times to setup connection to\n\/\/ multiple MongoDB instances.\nfunc Setup(dbConfig DbConfig) error {\n\tlog.Println(\"Connecting to MongoDB...\")\n\n\tmongoDBDialInfo := &mgo.DialInfo{\n\t\tAddrs: []string{dbConfig.Host},\n\t\tTimeout: 5 * time.Second,\n\t\tDatabase: dbConfig.DBName,\n\t\tUsername: dbConfig.UserName,\n\t\tPassword: dbConfig.Password,\n\t}\n\n\tdbSession, err := mgo.DialWithInfo(mongoDBDialInfo)\n\tif err != nil {\n\t\tlog.Printf(\"MongoDB connection failed : %s. Exiting the program.\\n\", err)\n\t\treturn err\n\t}\n\n\tlog.Println(\"Connected to MongoDB successfully\")\n\n\t\/* Initialized database object with global session*\/\n\tconnectionMap[dbConfig.DBName] = Db{Session: dbSession, Config: dbConfig}\n\n\treturn nil\n}\n<commit_msg>find by ref, and cleaned up other functions<commit_after>package gmgo\n\nimport (\n\t\"errors\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n\t\"log\"\n\t\"reflect\"\n\t\"time\"\n)\n\n\/\/query representation to hide bson.M type to single file\ntype Q map[string]interface{}\n\ntype queryFunc func(q *mgo.Query, result interface{}) error\n\n\/\/ connectionMap holds all the db connection per database name\nvar connectionMap = make(map[string]Db)\n\n\/\/ Document interface implemented by structs that needs to be persisted. It should provide collection name,\n\/\/ as in the database. Also, a way to create new object id before saving.\ntype Document interface {\n\tCollectionName() string\n}\n\n\/\/ DbConfig represents the configuration params needed for MongoDB connection\ntype DbConfig struct {\n\tHost, DBName, UserName, Password string\n}\n\n\/\/ Db represents database connection which holds reference to global session and configuration for that database.\ntype Db struct {\n\tConfig DbConfig\n\tSession *mgo.Session\n}\n\n\/\/ collection returns a mgo.Collection representation for given collection name and session\nfunc (db Db) collection(collectionName string, session *mgo.Session) *mgo.Collection {\n\treturn session.DB(db.Config.DBName).C(collectionName)\n}\n\n\/\/ slice returns the interface representation of actual collection type for returning list data\nfunc (db Db) slice(d Document) interface{} {\n\tdocumentType := reflect.TypeOf(d)\n\tdocumentSlice := reflect.MakeSlice(reflect.SliceOf(documentType), 0, 0)\n\n\t\/\/ Create a pointer to a slice value and set it to the slice\n\treturn reflect.New(documentSlice.Type()).Interface()\n}\n\nfunc (db Db) findQuery(d Document, s *mgo.Session, q interface{}) *mgo.Query {\n\t\/\/collection pointer for the given document\n\treturn db.collection(d.CollectionName(), s).Find(q)\n}\n\nfunc (db Db) executeFindAll(query Q, document Document, qf queryFunc) (interface{}, error) {\n\tsession := db.Session.Copy()\n\tdefer session.Close()\n\n\t\/\/collection pointer for the given document\n\tdocuments := db.slice(document)\n\tq := db.findQuery(document, session, query)\n\n\tif err := qf(q, documents); err != nil {\n\t\tlog.Printf(\"Error fetching %s list. Error: %s\\n\", document.CollectionName(), err)\n\t\treturn nil, err\n\t}\n\treturn results(documents)\n}\n\n\/\/ Save inserts the given document that represents the collection to the database.\nfunc (db Db) Save(document Document) error {\n\tsession := db.Session.Copy()\n\tdefer session.Close()\n\n\tcoll := db.collection(document.CollectionName(), session)\n\tif err := coll.Insert(document); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Println(\"Document inserted successfully!\")\n\treturn nil\n}\n\n\/\/ Find the object by id. Returns error if it's not able to find the document. If document is found\n\/\/ it's copied to the passed in result object.\nfunc (db Db) FindById(id string, result Document) error {\n\tsession := db.Session.Copy()\n\tdefer session.Close()\n\n\tcoll := db.collection(result.CollectionName(), session)\n\tif err := coll.FindId(bson.ObjectIdHex(id)).One(result); err != nil {\n\t\tlog.Printf(\"Error fetching %s with id %s. Error: %s\\n\", result.CollectionName(), id, err)\n\t\treturn err\n\t} else {\n\t\tlog.Printf(\"Found data for id %s\\n\", id)\n\t}\n\treturn nil\n}\n\nfunc (db Db) Find(query map[string]interface{}, document Document) error {\n\tsession := db.Session.Copy()\n\tdefer session.Close()\n\n\tq := db.findQuery(document, session, query)\n\tif err := q.One(document); err != nil {\n\t\tlog.Printf(\"Error fetching %s with query %s. Error: %s\\n\", document.CollectionName(), query, err)\n\t\treturn err\n\t} else {\n\t\tlog.Printf(\"Found data for query %s\\n\", query)\n\t}\n\n\treturn nil\n}\n\nfunc (db Db) FindByRef(ref *mgo.DBRef, document Document) error {\n\tsession := db.Session.Copy()\n\tdefer session.Close()\n\n\tq := session.DB(db.Config.DBName).FindRef(ref)\n\tif err := q.One(document); err != nil {\n\t\tlog.Printf(\"Error fetching %s. Error: %s\\n\", document.CollectionName(), err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (db Db) FindAll(query Q, document Document) (interface{}, error) {\n\tfn := func(q *mgo.Query, result interface{}) error {\n\t\treturn q.All(result)\n\t}\n\treturn db.executeFindAll(query, document, fn)\n}\n\nfunc (db Db) FindWithLimit(limit int, query Q, document Document) (interface{}, error) {\n\tfn := func(q *mgo.Query, result interface{}) error {\n\t\treturn q.Limit(limit).All(result)\n\t}\n\treturn db.executeFindAll(query, document, fn)\n}\n\nfunc New(dbName string) (Db, error) {\n\tif db, ok := connectionMap[dbName]; ok {\n\t\treturn db, nil\n\t}\n\treturn Db{}, errors.New(\"Database connection not available. Perform 'Setup' first\")\n}\n\n\/\/ Setup the MongoDB connection based on passed in config. It can be called multiple times to setup connection to\n\/\/ multiple MongoDB instances.\nfunc Setup(dbConfig DbConfig) error {\n\tlog.Println(\"Connecting to MongoDB...\")\n\n\tmongoDBDialInfo := &mgo.DialInfo{\n\t\tAddrs: []string{dbConfig.Host},\n\t\tTimeout: 5 * time.Second,\n\t\tDatabase: dbConfig.DBName,\n\t\tUsername: dbConfig.UserName,\n\t\tPassword: dbConfig.Password,\n\t}\n\n\tdbSession, err := mgo.DialWithInfo(mongoDBDialInfo)\n\tif err != nil {\n\t\tlog.Printf(\"MongoDB connection failed : %s. Exiting the program.\\n\", err)\n\t\treturn err\n\t}\n\n\tlog.Println(\"Connected to MongoDB successfully\")\n\n\t\/* Initialized database object with global session*\/\n\tconnectionMap[dbConfig.DBName] = Db{Session: dbSession, Config: dbConfig}\n\n\treturn nil\n}\n\nfunc results(documents interface{}) (interface{}, error) {\n\treturn reflect.ValueOf(documents).Elem().Interface(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package gofn\n\nimport (\n\tdocker \"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/nuveo\/gofn\/iaas\"\n\t\"github.com\/nuveo\/gofn\/provision\"\n)\n\nconst dockerPort = \":2375\"\n\n\/\/ Run runs the designed image\nfunc Run(buildOpts *provision.BuildOptions, volumeOpts *provision.VolumeOptions) (stdout string, err error) {\n\tclient := provision.FnClient(\"\")\n\tif buildOpts.Iaas != nil {\n\t\tvar machine *iaas.Machine\n\t\tmachine, err = buildOpts.Iaas.CreateMachine()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer buildOpts.Iaas.DeleteMachine(machine)\n\t\tclient = provision.FnClient(machine.IP + dockerPort)\n\t}\n\n\tvolume := \"\"\n\tif volumeOpts != nil {\n\t\tvolume = provision.FnConfigVolume(volumeOpts)\n\t}\n\n\timg, err := provision.FnFindImage(client, buildOpts.ImageName)\n\tif err != nil && err != provision.ErrImageNotFound {\n\t\treturn\n\t}\n\n\tvar image string\n\tvar container *docker.Container\n\n\tif img.ID == \"\" {\n\t\timage, _ = provision.FnImageBuild(client, buildOpts)\n\t} else {\n\t\timage = \"gofn\/\" + buildOpts.ImageName\n\t}\n\n\tcontainer, err = provision.FnContainer(client, image, volume)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tstdout = provision.FnRun(client, container.ID).String()\n\n\terr = provision.FnRemove(client, container.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn\n}\n<commit_msg>verify if machine is already created before shutdown<commit_after>package gofn\n\nimport (\n\tdocker \"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/nuveo\/gofn\/iaas\"\n\t\"github.com\/nuveo\/gofn\/provision\"\n)\n\nconst dockerPort = \":2375\"\n\n\/\/ Run runs the designed image\nfunc Run(buildOpts *provision.BuildOptions, volumeOpts *provision.VolumeOptions) (stdout string, err error) {\n\tclient := provision.FnClient(\"\")\n\tif buildOpts.Iaas != nil {\n\t\tvar machine *iaas.Machine\n\t\tmachine, err = buildOpts.Iaas.CreateMachine()\n\t\tif err != nil {\n\t\t\tif machine != nil {\n\t\t\t\tbuildOpts.Iaas.DeleteMachine(machine)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tdefer buildOpts.Iaas.DeleteMachine(machine)\n\t\tclient = provision.FnClient(machine.IP + dockerPort)\n\t}\n\n\tvolume := \"\"\n\tif volumeOpts != nil {\n\t\tvolume = provision.FnConfigVolume(volumeOpts)\n\t}\n\n\timg, err := provision.FnFindImage(client, buildOpts.ImageName)\n\tif err != nil && err != provision.ErrImageNotFound {\n\t\treturn\n\t}\n\n\tvar image string\n\tvar container *docker.Container\n\n\tif img.ID == \"\" {\n\t\timage, _ = provision.FnImageBuild(client, buildOpts)\n\t} else {\n\t\timage = \"gofn\/\" + buildOpts.ImageName\n\t}\n\n\tcontainer, err = provision.FnContainer(client, image, volume)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tstdout = provision.FnRun(client, container.ID).String()\n\n\terr = provision.FnRemove(client, container.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package goha\n\nimport (\n\t\"net\/http\"\n\t\"strings\"\n)\n\nfunc NewClient(username, password string) *http.Client {\n\tt := &transport{username, password, http.DefaultTransport}\n\treturn &http.Client{Transport: t}\n}\n\n\/\/ Transport is an implementation of http.RoundTripper that takes care of http authentication.\ntype transport struct {\n\tusername string\n\tpassword string\n\ttransport http.RoundTripper\n}\n\n\/\/ RoundTrip makes an authorized request using digest authentication.\nfunc (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {\n\t\/\/ Clones the request so the input is not modified.\n\tcreq := cloneRequest(req)\n\n\t\/\/ Make a request to get the 401 that contains the challenge.\n\tresp, err := t.transport.RoundTrip(req)\n\n\tif err != nil || resp.StatusCode != 401 {\n\t\treturn resp, err\n\t}\n\n\theader := resp.Header.Get(\"WWW-Authenticate\")\n\n\tif strings.HasPrefix(header, \"Digest \") {\n\t\tc := newCredentials(t.username, t.password, \"\", creq.URL.RequestURI(), creq.Method)\n\t\tcreq.Header.Set(\"Authorization\", c.authHeader())\n\t}\n\n\tif strings.HasPrefix(header, \"Basic \") {\n\t\tcreq.SetBasicAuth(t.username, t.password)\n\t}\n\n\t\/\/ Make authenticated request.\n\treturn t.transport.RoundTrip(creq)\n}\n\nfunc cloneRequest(r *http.Request) *http.Request {\n\t\/\/ shallow copy of the struct\n\tr2 := new(http.Request)\n\t*r2 = *r\n\n\t\/\/ deep copy of the Header\n\tr2.Header = make(http.Header, len(r.Header))\n\tfor k, s := range r.Header {\n\t\tr2.Header[k] = append([]string(nil), s...)\n\t}\n\n\treturn r2\n}\n<commit_msg>fix bug with auth header parsing<commit_after>package goha\n\nimport (\n\t\"net\/http\"\n\t\"strings\"\n)\n\nfunc NewClient(username, password string) *http.Client {\n\tt := &transport{username, password, http.DefaultTransport}\n\treturn &http.Client{Transport: t}\n}\n\n\/\/ Transport is an implementation of http.RoundTripper that takes care of http authentication.\ntype transport struct {\n\tusername string\n\tpassword string\n\ttransport http.RoundTripper\n}\n\n\/\/ RoundTrip makes an authorized request using digest authentication.\nfunc (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {\n\t\/\/ Clones the request so the input is not modified.\n\tcreq := cloneRequest(req)\n\n\t\/\/ Make a request to get the 401 that contains the challenge.\n\tresp, err := t.transport.RoundTrip(req)\n\n\tif err != nil || resp.StatusCode != 401 {\n\t\treturn resp, err\n\t}\n\n\theader := resp.Header.Get(\"WWW-Authenticate\")\n\n\tif strings.HasPrefix(header, \"Digest \") { \/\/ TODO: Case sensitive\n\t\tc := newCredentials(t.username, t.password, header, creq.URL.RequestURI(), creq.Method)\n\t\tcreq.Header.Set(\"Authorization\", c.authHeader())\n\t}\n\n\tif strings.HasPrefix(header, \"Basic \") { \/\/ TODO: Case sensitive\n\t\tcreq.SetBasicAuth(t.username, t.password)\n\t}\n\n\t\/\/ Make authenticated request.\n\treturn t.transport.RoundTrip(creq)\n}\n\nfunc cloneRequest(r *http.Request) *http.Request {\n\t\/\/ shallow copy of the struct\n\tr2 := new(http.Request)\n\t*r2 = *r\n\n\t\/\/ deep copy of the Header\n\tr2.Header = make(http.Header, len(r.Header))\n\tfor k, s := range r.Header {\n\t\tr2.Header[k] = append([]string(nil), s...)\n\t}\n\n\treturn r2\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package modal provides a group with various \"modes\", each mode identified by\n\/\/ a string key and containing multiple modules. It switches between the modes\n\/\/ using a control similar to the workspace switcher.\n\/\/\n\/\/ When adding modules to a mode, certain modules can be marked as \"summary\"\n\/\/ modules, to be displayed when no mode is active. When a mode is active, only\n\/\/ the modules associated with it are displayed.\n\/\/\n\/\/ For example, if a modal group is constructed with the following sets, where\n\/\/ uppercase letters indicate summary modules:\n\/\/\n\/\/ - \"A\" => \"A0\", \"a1\", \"a2\", \"a3\"\n\/\/ - \"B\" => \"b0\", \"B1\", \"B2\"\n\/\/ - \"C\" => \"c0\", \"c1\", \"c2\"\n\/\/\n\/\/ Then by default the modules displayed will be [\"A0\", \"B1\", \"B2\"].\n\/\/ Activating \"A\" will replace that with [\"A0\", \"a1\", \"a2\", \"a3\"],\n\/\/ \"B\" will show [\"b0\", \"B1\", \"B2\"], and \"C\" will show [\"c0\", \"c1\", \"c2\"].\npackage modal \/\/ import \"barista.run\/group\/modal\"\n\nimport (\n\t\"sync\"\n\t\"sync\/atomic\"\n\n\t\"barista.run\/bar\"\n\t\"barista.run\/base\/click\"\n\t\"barista.run\/base\/notifier\"\n\t\"barista.run\/colors\"\n\t\"barista.run\/group\"\n\tl \"barista.run\/logging\"\n\t\"barista.run\/outputs\"\n)\n\nconst (\n\tshowWhenSummary int = 1 << iota\n\tshowWhenDetail\n)\n\n\/\/ Controller provides an interface to control a modal group.\ntype Controller interface {\n\t\/\/ Modes returns all the modes in this modal group.\n\tModes() []string\n\t\/\/ Current returns the currently active mode, or an empty string if no mode\n\t\/\/ is active.\n\tCurrent() string\n\t\/\/ Activate activates the given mode.\n\tActivate(string)\n\t\/\/ Toggle toggles between the given mode and no active mode.\n\tToggle(string)\n\t\/\/ Reset clears the active mode.\n\tReset()\n\t\/\/ SetOutput sets the output segment for a given mode. The default output\n\t\/\/ is a plain text segment with the mode name.\n\tSetOutput(string, *bar.Segment)\n}\n\n\/\/ grouper implements a modal grouper.\ntype grouper struct {\n\tcurrent atomic.Value \/\/ of string\n\tshowWhen map[int]int\n\tmode map[int]string\n\tmodeNames []string\n\toutput map[string]*bar.Segment\n\n\tsync.Mutex\n\tnotifyCh <-chan struct{}\n\tnotifyFn func()\n}\n\n\/\/ Modal represents a partially constructed modal group. Modes and modules can\n\/\/ only be added to a Modal before it is finalised, and can only be added to\n\/\/ the bar after it is finalised.\ntype Modal struct {\n\tmodes map[string]*Mode\n\tmodeNames []string\n}\n\n\/\/ Mode represents a mode added to an existing modal group. It provides methods\n\/\/ to add additional outputs and optionally set the default output.\ntype Mode struct {\n\toutput *bar.Segment\n\tmodules []bar.Module\n\tshowWhen map[int]int\n}\n\n\/\/ New creates a new modal group.\nfunc New() *Modal {\n\treturn &Modal{modes: map[string]*Mode{}}\n}\n\n\/\/ Mode creates a new mode.\nfunc (m *Modal) Mode(label string) *Mode {\n\tmd := &Mode{\n\t\toutput: outputs.Text(label),\n\t\tshowWhen: map[int]int{},\n\t}\n\tm.modeNames = append(m.modeNames, label)\n\tm.modes[label] = md\n\treturn md\n}\n\n\/\/ Summary adds a summary module to the mode. Summary modules are shown when the\n\/\/ no mode is active.\nfunc (m *Mode) Summary(modules ...bar.Module) *Mode {\n\treturn m.add(showWhenSummary, modules)\n}\n\n\/\/ Detail adds a detail module to a mode. Modules added here are only shown when\n\/\/ this mode is active.\nfunc (m *Mode) Detail(modules ...bar.Module) *Mode {\n\treturn m.add(showWhenDetail, modules)\n}\n\n\/\/ Add adds a module in both summary and detail modes. Modules added here are\n\/\/ shown both when the current mode is active and when no mode is active. They\n\/\/ are only hidden when a different mode is active.\nfunc (m *Mode) Add(modules ...bar.Module) *Mode {\n\treturn m.add(showWhenSummary|showWhenDetail, modules)\n}\n\n\/\/ add adds modules with the given visibility flags.\nfunc (m *Mode) add(flags int, modules []bar.Module) *Mode {\n\tfor _, mod := range modules {\n\t\tm.showWhen[len(m.modules)] = flags\n\t\tm.modules = append(m.modules, mod)\n\t}\n\treturn m\n}\n\n\/\/ SetOutput sets the output shown in the mode switcher. The default output\n\/\/ is just the name of the mode.\nfunc (m *Mode) SetOutput(s *bar.Segment) *Mode {\n\tm.output = s\n\treturn m\n}\n\n\/\/ Build constructs the modal group, and returns a linked controller.\nfunc (m *Modal) Build() (bar.Module, Controller) {\n\tg := &grouper{\n\t\tmodeNames: m.modeNames,\n\t\tshowWhen: map[int]int{},\n\t\tmode: map[int]string{},\n\t\toutput: map[string]*bar.Segment{},\n\t}\n\tmodules := []bar.Module{}\n\tfor _, modeName := range m.modeNames {\n\t\tmode := m.modes[modeName]\n\t\tstart := len(modules)\n\t\tg.output[modeName] = mode.output\n\t\tmodules = append(modules, mode.modules...)\n\t\tfor k, v := range mode.showWhen {\n\t\t\tg.showWhen[k+start] = v\n\t\t}\n\t\tfor i := start; i < len(modules); i++ {\n\t\t\tg.mode[i] = modeName\n\t\t}\n\t}\n\tg.current.Store(\"\")\n\tg.notifyFn, g.notifyCh = notifier.New()\n\treturn group.New(g, modules...), g\n}\n\nfunc (g *grouper) Visible(idx int) bool {\n\tswitch g.Current() {\n\tcase g.mode[idx]:\n\t\treturn g.showWhen[idx]&showWhenDetail > 0\n\tcase \"\":\n\t\treturn g.showWhen[idx]&showWhenSummary > 0\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (g *grouper) Buttons() (start, end bar.Output) {\n\tout := outputs.Group().Glue()\n\tfor _, mode := range g.modeNames {\n\t\ts := g.output[mode]\n\t\tif s == nil {\n\t\t\tcontinue\n\t\t}\n\t\tcolorKey := \"inactive\"\n\t\tif g.Current() == mode {\n\t\t\tcolorKey = \"focused\"\n\t\t}\n\t\tmode := mode\n\t\tout.Append(s.Background(colors.Scheme(colorKey + \"_workspace_bg\")).\n\t\t\tColor(colors.Scheme(colorKey + \"_workspace_text\")).\n\t\t\tBorder(colors.Scheme(colorKey + \"_workspace_border\")).\n\t\t\tOnClick(click.Left(func() { g.Toggle(mode) })))\n\t}\n\treturn nil, out\n}\n\nfunc (g *grouper) Signal() <-chan struct{} {\n\treturn g.notifyCh\n}\n\nfunc (g *grouper) Modes() []string {\n\treturn g.modeNames\n}\n\nfunc (g *grouper) Current() string {\n\treturn g.current.Load().(string)\n}\n\nfunc (g *grouper) Activate(mode string) {\n\tg.set(mode)\n}\n\nfunc (g *grouper) Toggle(mode string) {\n\tif g.Current() == mode {\n\t\tg.set(\"\")\n\t} else {\n\t\tg.set(mode)\n\t}\n}\n\nfunc (g *grouper) Reset() {\n\tg.set(\"\")\n}\n\nfunc (g *grouper) set(mode string) {\n\tg.Lock()\n\tdefer g.Unlock()\n\tif g.Current() == mode {\n\t\treturn\n\t}\n\tg.current.Store(mode)\n\tl.Fine(\"%s switched to '%s'\", l.ID(g), mode)\n\tg.notifyFn()\n}\n\nfunc (g *grouper) SetOutput(mode string, segment *bar.Segment) {\n\tg.Lock()\n\tdefer g.Unlock()\n\tif segment != nil {\n\t\tsegment = segment.Clone()\n\t}\n\tg.output[mode] = segment\n\tg.notifyFn()\n}\n<commit_msg>Autoload configured bar colours in modal init()<commit_after>\/\/ Copyright 2017 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package modal provides a group with various \"modes\", each mode identified by\n\/\/ a string key and containing multiple modules. It switches between the modes\n\/\/ using a control similar to the workspace switcher.\n\/\/\n\/\/ When adding modules to a mode, certain modules can be marked as \"summary\"\n\/\/ modules, to be displayed when no mode is active. When a mode is active, only\n\/\/ the modules associated with it are displayed.\n\/\/\n\/\/ For example, if a modal group is constructed with the following sets, where\n\/\/ uppercase letters indicate summary modules:\n\/\/\n\/\/ - \"A\" => \"A0\", \"a1\", \"a2\", \"a3\"\n\/\/ - \"B\" => \"b0\", \"B1\", \"B2\"\n\/\/ - \"C\" => \"c0\", \"c1\", \"c2\"\n\/\/\n\/\/ Then by default the modules displayed will be [\"A0\", \"B1\", \"B2\"].\n\/\/ Activating \"A\" will replace that with [\"A0\", \"a1\", \"a2\", \"a3\"],\n\/\/ \"B\" will show [\"b0\", \"B1\", \"B2\"], and \"C\" will show [\"c0\", \"c1\", \"c2\"].\npackage modal \/\/ import \"barista.run\/group\/modal\"\n\nimport (\n\t\"sync\"\n\t\"sync\/atomic\"\n\n\t\"barista.run\/bar\"\n\t\"barista.run\/base\/click\"\n\t\"barista.run\/base\/notifier\"\n\t\"barista.run\/colors\"\n\t\"barista.run\/group\"\n\tl \"barista.run\/logging\"\n\t\"barista.run\/outputs\"\n)\n\nconst (\n\tshowWhenSummary int = 1 << iota\n\tshowWhenDetail\n)\n\n\/\/ Controller provides an interface to control a modal group.\ntype Controller interface {\n\t\/\/ Modes returns all the modes in this modal group.\n\tModes() []string\n\t\/\/ Current returns the currently active mode, or an empty string if no mode\n\t\/\/ is active.\n\tCurrent() string\n\t\/\/ Activate activates the given mode.\n\tActivate(string)\n\t\/\/ Toggle toggles between the given mode and no active mode.\n\tToggle(string)\n\t\/\/ Reset clears the active mode.\n\tReset()\n\t\/\/ SetOutput sets the output segment for a given mode. The default output\n\t\/\/ is a plain text segment with the mode name.\n\tSetOutput(string, *bar.Segment)\n}\n\n\/\/ grouper implements a modal grouper.\ntype grouper struct {\n\tcurrent atomic.Value \/\/ of string\n\tshowWhen map[int]int\n\tmode map[int]string\n\tmodeNames []string\n\toutput map[string]*bar.Segment\n\n\tsync.Mutex\n\tnotifyCh <-chan struct{}\n\tnotifyFn func()\n}\n\n\/\/ Modal represents a partially constructed modal group. Modes and modules can\n\/\/ only be added to a Modal before it is finalised, and can only be added to\n\/\/ the bar after it is finalised.\ntype Modal struct {\n\tmodes map[string]*Mode\n\tmodeNames []string\n}\n\n\/\/ Mode represents a mode added to an existing modal group. It provides methods\n\/\/ to add additional outputs and optionally set the default output.\ntype Mode struct {\n\toutput *bar.Segment\n\tmodules []bar.Module\n\tshowWhen map[int]int\n}\n\n\/\/ New creates a new modal group.\nfunc New() *Modal {\n\treturn &Modal{modes: map[string]*Mode{}}\n}\n\n\/\/ Mode creates a new mode.\nfunc (m *Modal) Mode(label string) *Mode {\n\tmd := &Mode{\n\t\toutput: outputs.Text(label),\n\t\tshowWhen: map[int]int{},\n\t}\n\tm.modeNames = append(m.modeNames, label)\n\tm.modes[label] = md\n\treturn md\n}\n\n\/\/ Summary adds a summary module to the mode. Summary modules are shown when the\n\/\/ no mode is active.\nfunc (m *Mode) Summary(modules ...bar.Module) *Mode {\n\treturn m.add(showWhenSummary, modules)\n}\n\n\/\/ Detail adds a detail module to a mode. Modules added here are only shown when\n\/\/ this mode is active.\nfunc (m *Mode) Detail(modules ...bar.Module) *Mode {\n\treturn m.add(showWhenDetail, modules)\n}\n\n\/\/ Add adds a module in both summary and detail modes. Modules added here are\n\/\/ shown both when the current mode is active and when no mode is active. They\n\/\/ are only hidden when a different mode is active.\nfunc (m *Mode) Add(modules ...bar.Module) *Mode {\n\treturn m.add(showWhenSummary|showWhenDetail, modules)\n}\n\n\/\/ add adds modules with the given visibility flags.\nfunc (m *Mode) add(flags int, modules []bar.Module) *Mode {\n\tfor _, mod := range modules {\n\t\tm.showWhen[len(m.modules)] = flags\n\t\tm.modules = append(m.modules, mod)\n\t}\n\treturn m\n}\n\n\/\/ SetOutput sets the output shown in the mode switcher. The default output\n\/\/ is just the name of the mode.\nfunc (m *Mode) SetOutput(s *bar.Segment) *Mode {\n\tm.output = s\n\treturn m\n}\n\n\/\/ Build constructs the modal group, and returns a linked controller.\nfunc (m *Modal) Build() (bar.Module, Controller) {\n\tg := &grouper{\n\t\tmodeNames: m.modeNames,\n\t\tshowWhen: map[int]int{},\n\t\tmode: map[int]string{},\n\t\toutput: map[string]*bar.Segment{},\n\t}\n\tmodules := []bar.Module{}\n\tfor _, modeName := range m.modeNames {\n\t\tmode := m.modes[modeName]\n\t\tstart := len(modules)\n\t\tg.output[modeName] = mode.output\n\t\tmodules = append(modules, mode.modules...)\n\t\tfor k, v := range mode.showWhen {\n\t\t\tg.showWhen[k+start] = v\n\t\t}\n\t\tfor i := start; i < len(modules); i++ {\n\t\t\tg.mode[i] = modeName\n\t\t}\n\t}\n\tg.current.Store(\"\")\n\tg.notifyFn, g.notifyCh = notifier.New()\n\treturn group.New(g, modules...), g\n}\n\nfunc (g *grouper) Visible(idx int) bool {\n\tswitch g.Current() {\n\tcase g.mode[idx]:\n\t\treturn g.showWhen[idx]&showWhenDetail > 0\n\tcase \"\":\n\t\treturn g.showWhen[idx]&showWhenSummary > 0\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc init() {\n\t\/\/ Modal depends on the workspace colours for the mode switcher.\n\t\/\/ Load colours in init to allow overriding during setup.\n\tcolors.LoadBarConfig()\n}\n\nfunc (g *grouper) Buttons() (start, end bar.Output) {\n\tout := outputs.Group().Glue()\n\tfor _, mode := range g.modeNames {\n\t\ts := g.output[mode]\n\t\tif s == nil {\n\t\t\tcontinue\n\t\t}\n\t\tcolorKey := \"inactive\"\n\t\tif g.Current() == mode {\n\t\t\tcolorKey = \"focused\"\n\t\t}\n\t\tmode := mode\n\t\tout.Append(s.Background(colors.Scheme(colorKey + \"_workspace_bg\")).\n\t\t\tColor(colors.Scheme(colorKey + \"_workspace_text\")).\n\t\t\tBorder(colors.Scheme(colorKey + \"_workspace_border\")).\n\t\t\tOnClick(click.Left(func() { g.Toggle(mode) })))\n\t}\n\treturn nil, out\n}\n\nfunc (g *grouper) Signal() <-chan struct{} {\n\treturn g.notifyCh\n}\n\nfunc (g *grouper) Modes() []string {\n\treturn g.modeNames\n}\n\nfunc (g *grouper) Current() string {\n\treturn g.current.Load().(string)\n}\n\nfunc (g *grouper) Activate(mode string) {\n\tg.set(mode)\n}\n\nfunc (g *grouper) Toggle(mode string) {\n\tif g.Current() == mode {\n\t\tg.set(\"\")\n\t} else {\n\t\tg.set(mode)\n\t}\n}\n\nfunc (g *grouper) Reset() {\n\tg.set(\"\")\n}\n\nfunc (g *grouper) set(mode string) {\n\tg.Lock()\n\tdefer g.Unlock()\n\tif g.Current() == mode {\n\t\treturn\n\t}\n\tg.current.Store(mode)\n\tl.Fine(\"%s switched to '%s'\", l.ID(g), mode)\n\tg.notifyFn()\n}\n\nfunc (g *grouper) SetOutput(mode string, segment *bar.Segment) {\n\tg.Lock()\n\tdefer g.Unlock()\n\tif segment != nil {\n\t\tsegment = segment.Clone()\n\t}\n\tg.output[mode] = segment\n\tg.notifyFn()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"gomp\/routers\"\n\n\t\"github.com\/go-macaron\/binding\"\n\t\"gopkg.in\/macaron.v1\"\n)\n\nfunc main() {\n\tm := macaron.Classic()\n\tm.Use(macaron.Renderer())\n\tm.Use(macaron.Static(\"public\"))\n\n\tm.Get(\"\/\", routers.CheckInstalled, routers.Home)\n\tm.Group(\"\/recipes\", func() {\n\t\tm.Get(\"\/\", routers.ListRecipes)\n\t\tm.Get(\"\/:id:int\", routers.GetRecipe)\n\t\tm.Get(\"\/create\", routers.CreateRecipe)\n\t\tm.Post(\"\/create\", binding.Bind(routers.RecipeForm{}), routers.CreateRecipePost)\n\t\tm.Get(\"\/edit\/:id:int\", routers.EditRecipe)\n\t\tm.Post(\"\/edit\/:id:int\", binding.Bind(routers.RecipeForm{}), routers.EditRecipePost)\n\t\tm.Get(\"\/delete\/:id:int\", routers.DeleteRecipe)\n\t}, routers.CheckInstalled)\n\t\/\/m.Group(\"\/meals\", func() {\n\t\/\/ m.Get(\"\/\", routers.Meal)\n\t\/\/ m.Get(\/:id:int, routers.Meals)\n\t\/\/}, routers.CheckInstalled)\n\tm.Get(\"\/install\", routers.Install)\n\n\tm.Run()\n}\n<commit_msg>Fixing build error from the removal of routers\/install.go<commit_after>package main\n\nimport (\n\t\"gomp\/routers\"\n\n\t\"github.com\/go-macaron\/binding\"\n\t\"gopkg.in\/macaron.v1\"\n)\n\nfunc main() {\n\tm := macaron.Classic()\n\tm.Use(macaron.Renderer())\n\tm.Use(macaron.Static(\"public\"))\n\n\tm.Get(\"\/\", routers.CheckInstalled, routers.Home)\n\tm.Group(\"\/recipes\", func() {\n\t\tm.Get(\"\/\", routers.ListRecipes)\n\t\tm.Get(\"\/:id:int\", routers.GetRecipe)\n\t\tm.Get(\"\/create\", routers.CreateRecipe)\n\t\tm.Post(\"\/create\", binding.Bind(routers.RecipeForm{}), routers.CreateRecipePost)\n\t\tm.Get(\"\/edit\/:id:int\", routers.EditRecipe)\n\t\tm.Post(\"\/edit\/:id:int\", binding.Bind(routers.RecipeForm{}), routers.EditRecipePost)\n\t\tm.Get(\"\/delete\/:id:int\", routers.DeleteRecipe)\n\t}, routers.CheckInstalled)\n\t\/\/m.Group(\"\/meals\", func() {\n\t\/\/ m.Get(\"\/\", routers.Meal)\n\t\/\/ m.Get(\/:id:int, routers.Meals)\n\t\/\/}, routers.CheckInstalled)\n\n\tm.Run()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Gone Time Tracker -or- Where has my time gone?\npackage main\n\nimport (\n\t\"encoding\/gob\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/mewkiz\/pkg\/goutil\"\n)\n\nvar (\n\tgoneDir string\n\tdumpFileName string\n\tlogFileName string\n\tindexFileName string\n\ttracks Tracks\n\tzzz bool\n\tlogger *log.Logger\n\tcurrent Window\n\tdisplay string\n\tlisten string\n\ttimeout int\n\texpire int\n)\n\nfunc init() {\n\tvar err error\n\tgoneDir, err = goutil.SrcDir(\"github.com\/dim13\/gone\")\n\tif err != nil {\n\t\tlog.Fatal(\"init: \", err)\n\t}\n\tdumpFileName = filepath.Join(goneDir, \"gone.gob\")\n\tlogFileName = filepath.Join(goneDir, \"gone.log\")\n\tindexFileName = filepath.Join(goneDir, \"root.tmpl\")\n\tinitTemplate(indexFileName)\n\n\tflag.StringVar(&display, \"display\", \":0\", \"X11 display\")\n\tflag.StringVar(&listen, \"listen\", \"127.0.0.1:8001\", \"web reporter\")\n\tflag.IntVar(&timeout, \"timeout\", 60, \"idle time in seconds\")\n\tflag.IntVar(&expire, \"expire\", 8, \"expire time in hours\")\n\tflag.Parse()\n}\n\ntype Tracks map[Window]Track\n\ntype Track struct {\n\tSeen time.Time\n\tSpent time.Duration\n\tIdle time.Duration\n}\n\nfunc (t Track) String() string {\n\treturn fmt.Sprintf(\"%s %s\",\n\t\tt.Seen.Format(\"2006\/01\/02 15:04:05\"), t.Spent)\n}\n\nfunc (w Window) String() string {\n\treturn fmt.Sprintf(\"%s %s\", w.Class, w.Name)\n}\n\nfunc (t Tracks) Snooze(idle time.Duration) {\n\tif !zzz {\n\t\tlogger.Println(\"away from keyboard, idle for\", idle)\n\t\tif c, ok := t[current]; ok {\n\t\t\tif c.Idle < idle {\n\t\t\t\tc.Idle = idle\n\t\t\t}\n\t\t\tt[current] = c\n\t\t}\n\t\tzzz = true\n\t}\n}\n\nfunc (t Tracks) Wakeup() {\n\tif zzz {\n\t\tlogger.Println(\"back to keyboard\")\n\t\tif c, ok := t[current]; ok {\n\t\t\tc.Seen = time.Now()\n\t\t\tt[current] = c\n\t\t}\n\t\tzzz = false\n\t}\n}\n\nfunc (t Tracks) Update(w Window) {\n\tif !zzz {\n\t\tif c, ok := t[current]; ok {\n\t\t\tc.Spent += time.Since(c.Seen)\n\t\t\tt[current] = c\n\t\t}\n\t}\n\n\tif _, ok := t[w]; !ok {\n\t\tt[w] = Track{}\n\t}\n\n\ts := t[w]\n\ts.Seen = time.Now()\n\tt[w] = s\n\n\tcurrent = w\n}\n\nfunc (t Tracks) Remove(d time.Duration) {\n\tfor k, v := range t {\n\t\tif time.Since(v.Seen) > d || v.Idle > d {\n\t\t\tlogger.Println(v, k)\n\t\t\tdelete(t, k)\n\t\t}\n\t}\n}\n\nfunc Load(fname string) Tracks {\n\tt := make(Tracks)\n\tdump, err := os.Open(fname)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn t\n\t}\n\tdefer dump.Close()\n\tdec := gob.NewDecoder(dump)\n\terr = dec.Decode(&t)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\treturn t\n}\n\nfunc (t Tracks) Store(fname string) {\n\ttmp := fname + \".tmp\"\n\tdump, err := os.Create(tmp)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tdefer dump.Close()\n\tenc := gob.NewEncoder(dump)\n\terr = enc.Encode(t)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tos.Remove(tmp)\n\t\treturn\n\t}\n\tos.Rename(tmp, fname)\n}\n\nfunc (t Tracks) Cleanup() {\n\tfor {\n\t\ttracks.Remove(time.Duration(expire) * time.Hour)\n\t\ttracks.Store(dumpFileName)\n\t\ttime.Sleep(time.Minute)\n\t}\n}\n\nfunc main() {\n\tX := Connect()\n\tdefer X.Close()\n\n\tlogfile, err := os.OpenFile(logFileName,\n\t\tos.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer logfile.Close()\n\tlogger = log.New(logfile, \"\", log.LstdFlags)\n\n\ttracks = Load(dumpFileName)\n\n\tgo X.Collect(tracks, time.Duration(timeout)*time.Second)\n\tgo tracks.Cleanup()\n\n\twebReporter(listen)\n}\n<commit_msg>Use time.Duration for time related args<commit_after>\/\/ Gone Time Tracker -or- Where has my time gone?\npackage main\n\nimport (\n\t\"encoding\/gob\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/mewkiz\/pkg\/goutil\"\n)\n\nvar (\n\tgoneDir string\n\tdumpFileName string\n\tlogFileName string\n\tindexFileName string\n\ttracks Tracks\n\tzzz bool\n\tlogger *log.Logger\n\tcurrent Window\n\tdisplay string\n\tlisten string\n\ttimeout time.Duration\n\texpire time.Duration\n)\n\nfunc init() {\n\tvar err error\n\tgoneDir, err = goutil.SrcDir(\"github.com\/dim13\/gone\")\n\tif err != nil {\n\t\tlog.Fatal(\"init: \", err)\n\t}\n\tdumpFileName = filepath.Join(goneDir, \"gone.gob\")\n\tlogFileName = filepath.Join(goneDir, \"gone.log\")\n\tindexFileName = filepath.Join(goneDir, \"root.tmpl\")\n\tinitTemplate(indexFileName)\n\n\tflag.StringVar(&display, \"display\", \":0\", \"X11 display\")\n\tflag.StringVar(&listen, \"listen\", \"127.0.0.1:8001\", \"web reporter\")\n\tflag.DurationVar(&timeout, \"timeout\", time.Minute, \"idle time\")\n\tflag.DurationVar(&expire, \"expire\", time.Hour*8, \"expire time\")\n\tflag.Parse()\n}\n\ntype Tracks map[Window]Track\n\ntype Track struct {\n\tSeen time.Time\n\tSpent time.Duration\n\tIdle time.Duration\n}\n\nfunc (t Track) String() string {\n\treturn fmt.Sprintf(\"%s %s\",\n\t\tt.Seen.Format(\"2006\/01\/02 15:04:05\"), t.Spent)\n}\n\nfunc (w Window) String() string {\n\treturn fmt.Sprintf(\"%s %s\", w.Class, w.Name)\n}\n\nfunc (t Tracks) Snooze(idle time.Duration) {\n\tif !zzz {\n\t\tlogger.Println(\"away from keyboard, idle for\", idle)\n\t\tif c, ok := t[current]; ok {\n\t\t\tif c.Idle < idle {\n\t\t\t\tc.Idle = idle\n\t\t\t}\n\t\t\tt[current] = c\n\t\t}\n\t\tzzz = true\n\t}\n}\n\nfunc (t Tracks) Wakeup() {\n\tif zzz {\n\t\tlogger.Println(\"back to keyboard\")\n\t\tif c, ok := t[current]; ok {\n\t\t\tc.Seen = time.Now()\n\t\t\tt[current] = c\n\t\t}\n\t\tzzz = false\n\t}\n}\n\nfunc (t Tracks) Update(w Window) {\n\tif !zzz {\n\t\tif c, ok := t[current]; ok {\n\t\t\tc.Spent += time.Since(c.Seen)\n\t\t\tt[current] = c\n\t\t}\n\t}\n\n\tif _, ok := t[w]; !ok {\n\t\tt[w] = Track{}\n\t}\n\n\ts := t[w]\n\ts.Seen = time.Now()\n\tt[w] = s\n\n\tcurrent = w\n}\n\nfunc (t Tracks) Remove(d time.Duration) {\n\tfor k, v := range t {\n\t\tif time.Since(v.Seen) > d || v.Idle > d {\n\t\t\tlogger.Println(v, k)\n\t\t\tdelete(t, k)\n\t\t}\n\t}\n}\n\nfunc Load(fname string) Tracks {\n\tt := make(Tracks)\n\tdump, err := os.Open(fname)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn t\n\t}\n\tdefer dump.Close()\n\tdec := gob.NewDecoder(dump)\n\terr = dec.Decode(&t)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\treturn t\n}\n\nfunc (t Tracks) Store(fname string) {\n\ttmp := fname + \".tmp\"\n\tdump, err := os.Create(tmp)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tdefer dump.Close()\n\tenc := gob.NewEncoder(dump)\n\terr = enc.Encode(t)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tos.Remove(tmp)\n\t\treturn\n\t}\n\tos.Rename(tmp, fname)\n}\n\nfunc (t Tracks) Cleanup() {\n\tfor {\n\t\ttracks.Remove(expire)\n\t\ttracks.Store(dumpFileName)\n\t\ttime.Sleep(time.Minute)\n\t}\n}\n\nfunc main() {\n\tX := Connect()\n\tdefer X.Close()\n\n\tlogfile, err := os.OpenFile(logFileName,\n\t\tos.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer logfile.Close()\n\tlogger = log.New(logfile, \"\", log.LstdFlags)\n\n\ttracks = Load(dumpFileName)\n\n\tgo X.Collect(tracks, timeout)\n\tgo tracks.Cleanup()\n\n\twebReporter(listen)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/sbinet\/go-readline\/pkg\/readline\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/SaviorPhoenix\/gosh\/builtins\"\n\t\"github.com\/SaviorPhoenix\/gosh\/cmd\"\n\t\"github.com\/SaviorPhoenix\/gosh\/sh\"\n)\n\nfunc executeCommand(c cmd.GoshCmd) int {\n\tstr := strings.Join(c.Tokens[1:len(c.Tokens)], \" \")\n\tparts := strings.Fields(str)\n\tfile := c.GetNameStr()\n\targs := parts[0:len(parts)]\n\n\trun := exec.Command(*file, args...)\n\n\trun.Stdout = os.Stdout\n\trun.Stdin = os.Stdin\n\trun.Stderr = os.Stderr\n\n\terr := run.Run()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn 0\n}\n\nfunc main() {\n\tshell.Sh.InitShell()\n\tenv := shell.Sh.GetEnv()\n\n\tfor {\n\t\tprompt := env.GetEnvVar(\"prompt\")\n\t\tinput := readline.ReadLine(&prompt)\n\n\t\t\/\/Don't bother parsing input if it's empty\n\t\tif *input == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif env.VarCmp(\"history\", \"on\") == true {\n\t\t\treadline.AddHistory(*input)\n\t\t}\n\n\t\tc := cmd.ParseInput(input)\n\n\t\t\/\/CheckBuiltin will return 1 if the command was a builtin,\n\t\t\/\/and -1 if we're exiting the shell.\n\t\tisBuiltin := builtins.CheckBuiltin(c)\n\t\tif isBuiltin == -1 {\n\t\t\tbreak\n\t\t} else if isBuiltin == 1 {\n\t\t\tcontinue\n\t\t}\n\t\texecuteCommand(c)\n\t}\n}\n<commit_msg>Change gosh.go\/executeCommand() to return an error instead of an int<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/sbinet\/go-readline\/pkg\/readline\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/SaviorPhoenix\/gosh\/builtins\"\n\t\"github.com\/SaviorPhoenix\/gosh\/cmd\"\n\t\"github.com\/SaviorPhoenix\/gosh\/sh\"\n)\n\nfunc executeCommand(c cmd.GoshCmd) error {\n\tstr := strings.Join(c.Tokens[1:len(c.Tokens)], \" \")\n\tparts := strings.Fields(str)\n\tfile := c.GetNameStr()\n\targs := parts[0:len(parts)]\n\n\trun := exec.Command(*file, args...)\n\n\trun.Stdout = os.Stdout\n\trun.Stdin = os.Stdin\n\trun.Stderr = os.Stderr\n\n\terr := run.Run()\n\treturn err\n}\n\nfunc main() {\n\tshell.Sh.InitShell()\n\tenv := shell.Sh.GetEnv()\n\n\tfor {\n\t\tprompt := env.GetEnvVar(\"prompt\")\n\t\tinput := readline.ReadLine(&prompt)\n\n\t\t\/\/Don't bother parsing input if it's empty\n\t\tif *input == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif env.VarCmp(\"history\", \"on\") == true {\n\t\t\treadline.AddHistory(*input)\n\t\t}\n\n\t\tc := cmd.ParseInput(input)\n\n\t\t\/\/CheckBuiltin will return 1 if the command was a builtin,\n\t\t\/\/and -1 if we're exiting the shell.\n\t\tisBuiltin := builtins.CheckBuiltin(c)\n\t\tif isBuiltin == -1 {\n\t\t\tbreak\n\t\t} else if isBuiltin == 1 {\n\t\t\tcontinue\n\t\t}\n\t\tif err := executeCommand(c); err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"regexp\"\n)\n\n\/\/ Does the grepping\nfunc grep(r io.Reader, re *regexp.Regexp, opt *grepOpt) error {\n\tbuf := bufio.NewReader(r)\n\tn := 1\n\tfor {\n\t\tb, _, err := buf.ReadLine()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tline := string(b)\n\t\tif re.MatchString(line) {\n\t\t\tif opt.optFilename {\n\t\t\t\tfmt.Printf(\"%s:\", opt.filename)\n\t\t\t}\n\t\t\tif opt.optNumber {\n\t\t\t\tfmt.Printf(\"%d:\", n)\n\t\t\t}\n\t\t\tfmt.Println(line)\n\t\t\tn++\n\t\t}\n\t}\n\treturn nil\n}\n\n<commit_msg>Fix line-number increment<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"regexp\"\n)\n\n\/\/ Does the grepping\nfunc grep(r io.Reader, re *regexp.Regexp, opt *grepOpt) error {\n\tbuf := bufio.NewReader(r)\n\tn := 1\n\tfor {\n\t\tb, _, err := buf.ReadLine()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tline := string(b)\n\t\tif re.MatchString(line) {\n\t\t\tif opt.optFilename {\n\t\t\t\tfmt.Printf(\"%s:\", opt.filename)\n\t\t\t}\n\t\t\tif opt.optNumber {\n\t\t\t\tfmt.Printf(\"%d:\", n)\n\t\t\t}\n\t\t\tfmt.Println(line)\n\t\t}\n\t\tn++\n\t}\n\treturn nil\n}\n\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/ Author: jacobsa@google.com (Aaron Jacobs)\n\npackage gcsproxy_test\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com\/jacobsa\/gcloud\/gcs\/mock_gcs\"\n\t\"github.com\/jacobsa\/gcsfuse\/gcsproxy\"\n\t\"github.com\/jacobsa\/gcsfuse\/timeutil\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t\"github.com\/jacobsa\/oglemock\"\n\t. \"github.com\/jacobsa\/ogletest\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/cloud\/storage\"\n)\n\nfunc TestListingProxy(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Invariant-checking listing proxy\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ A wrapper around ListingProxy that calls CheckInvariants whenever invariants\n\/\/ should hold. For catching logic errors early in the test.\ntype checkingListingProxy struct {\n\twrapped *gcsproxy.ListingProxy\n}\n\nfunc (lp *checkingListingProxy) Name() string {\n\tlp.wrapped.CheckInvariants()\n\tdefer lp.wrapped.CheckInvariants()\n\treturn lp.wrapped.Name()\n}\n\nfunc (lp *checkingListingProxy) List() ([]*storage.Object, []string, error) {\n\tlp.wrapped.CheckInvariants()\n\tdefer lp.wrapped.CheckInvariants()\n\treturn lp.wrapped.List(context.Background())\n}\n\nfunc (lp *checkingListingProxy) NoteNewObject(o *storage.Object) error {\n\tlp.wrapped.CheckInvariants()\n\tdefer lp.wrapped.CheckInvariants()\n\treturn lp.wrapped.NoteNewObject(o)\n}\n\nfunc (lp *checkingListingProxy) NoteNewSubdirectory(name string) error {\n\tlp.wrapped.CheckInvariants()\n\tdefer lp.wrapped.CheckInvariants()\n\treturn lp.wrapped.NoteNewSubdirectory(name)\n}\n\nfunc (lp *checkingListingProxy) NoteRemoval(name string) error {\n\tlp.wrapped.CheckInvariants()\n\tdefer lp.wrapped.CheckInvariants()\n\treturn lp.wrapped.NoteRemoval(name)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype ListingProxyTest struct {\n\tdirName string\n\tbucket mock_gcs.MockBucket\n\tclock timeutil.SimulatedClock\n\tlp checkingListingProxy\n}\n\nvar _ SetUpInterface = &ListingProxyTest{}\n\nfunc init() { RegisterTestSuite(&ListingProxyTest{}) }\n\nfunc (t *ListingProxyTest) SetUp(ti *TestInfo) {\n\tt.dirName = \"some\/dir\/\"\n\tt.bucket = mock_gcs.NewMockBucket(ti.MockController, \"bucket\")\n\n\tvar err error\n\tt.lp.wrapped, err = gcsproxy.NewListingProxy(t.bucket, &t.clock, t.dirName)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Test functions\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *ListingProxyTest) CreateForRootDirectory() {\n\t_, err := gcsproxy.NewListingProxy(t.bucket, &t.clock, \"\")\n\tAssertEq(nil, err)\n}\n\nfunc (t *ListingProxyTest) CreateForIllegalDirectoryName() {\n\t_, err := gcsproxy.NewListingProxy(t.bucket, &t.clock, \"foo\/bar\")\n\n\tAssertNe(nil, err)\n\tExpectThat(err, Error(HasSubstr(\"foo\/bar\")))\n\tExpectThat(err, Error(HasSubstr(\"directory name\")))\n}\n\nfunc (t *ListingProxyTest) Name() {\n\tExpectEq(t.dirName, t.lp.Name())\n}\n\nfunc (t *ListingProxyTest) List_CallsBucket() {\n\t\/\/ Bucket.ListObjects\n\tvar query *storage.Query\n\tsaveQuery := func(\n\t\tctx context.Context,\n\t\tq *storage.Query) (*storage.Objects, error) {\n\t\tquery = q\n\t\treturn nil, errors.New(\"\")\n\t}\n\n\tExpectCall(t.bucket, \"ListObjects\")(Any(), Any()).\n\t\tWillOnce(oglemock.Invoke(saveQuery))\n\n\t\/\/ List\n\tt.lp.List()\n\n\tAssertNe(nil, query)\n\tExpectEq(\"\/\", query.Delimiter)\n\tExpectEq(t.dirName, query.Prefix)\n\tExpectFalse(query.Versions)\n\tExpectEq(\"\", query.Cursor)\n\tExpectEq(0, query.MaxResults)\n}\n\nfunc (t *ListingProxyTest) List_BucketFails() {\n\t\/\/ Bucket.ListObjects\n\tExpectCall(t.bucket, \"ListObjects\")(Any(), Any()).\n\t\tWillOnce(oglemock.Return(nil, errors.New(\"taco\")))\n\n\t\/\/ List\n\t_, _, err := t.lp.List()\n\n\tAssertNe(nil, err)\n\tExpectThat(err, Error(HasSubstr(\"List\")))\n\tExpectThat(err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *ListingProxyTest) List_BucketReturnsIllegalObjectName() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) List_BucketReturnsIllegalDirectoryName() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) List_EmptyResult() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) List_OnlyPlaceholderForProxiedDir() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) List_NonEmptyResult() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) List_CacheHasExpired() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) NoteNewObject() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) NoteNewObject_IllegalNames() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) NoteNewSubdirectory() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) NoteNewSubdirectory_IllegalNames() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) NoteRemoval() {\n\tAssertTrue(false, \"TODO\")\n}\n<commit_msg>ListingProxyTest.List_BucketReturnsIllegalObjectName<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/ Author: jacobsa@google.com (Aaron Jacobs)\n\npackage gcsproxy_test\n\nimport (\n\t\"errors\"\n\t\"path\"\n\t\"testing\"\n\n\t\"github.com\/jacobsa\/gcloud\/gcs\/mock_gcs\"\n\t\"github.com\/jacobsa\/gcsfuse\/gcsproxy\"\n\t\"github.com\/jacobsa\/gcsfuse\/timeutil\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t\"github.com\/jacobsa\/oglemock\"\n\t. \"github.com\/jacobsa\/ogletest\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/cloud\/storage\"\n)\n\nfunc TestListingProxy(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Invariant-checking listing proxy\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ A wrapper around ListingProxy that calls CheckInvariants whenever invariants\n\/\/ should hold. For catching logic errors early in the test.\ntype checkingListingProxy struct {\n\twrapped *gcsproxy.ListingProxy\n}\n\nfunc (lp *checkingListingProxy) Name() string {\n\tlp.wrapped.CheckInvariants()\n\tdefer lp.wrapped.CheckInvariants()\n\treturn lp.wrapped.Name()\n}\n\nfunc (lp *checkingListingProxy) List() ([]*storage.Object, []string, error) {\n\tlp.wrapped.CheckInvariants()\n\tdefer lp.wrapped.CheckInvariants()\n\treturn lp.wrapped.List(context.Background())\n}\n\nfunc (lp *checkingListingProxy) NoteNewObject(o *storage.Object) error {\n\tlp.wrapped.CheckInvariants()\n\tdefer lp.wrapped.CheckInvariants()\n\treturn lp.wrapped.NoteNewObject(o)\n}\n\nfunc (lp *checkingListingProxy) NoteNewSubdirectory(name string) error {\n\tlp.wrapped.CheckInvariants()\n\tdefer lp.wrapped.CheckInvariants()\n\treturn lp.wrapped.NoteNewSubdirectory(name)\n}\n\nfunc (lp *checkingListingProxy) NoteRemoval(name string) error {\n\tlp.wrapped.CheckInvariants()\n\tdefer lp.wrapped.CheckInvariants()\n\treturn lp.wrapped.NoteRemoval(name)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype ListingProxyTest struct {\n\tdirName string\n\tbucket mock_gcs.MockBucket\n\tclock timeutil.SimulatedClock\n\tlp checkingListingProxy\n}\n\nvar _ SetUpInterface = &ListingProxyTest{}\n\nfunc init() { RegisterTestSuite(&ListingProxyTest{}) }\n\nfunc (t *ListingProxyTest) SetUp(ti *TestInfo) {\n\tt.dirName = \"some\/dir\/\"\n\tt.bucket = mock_gcs.NewMockBucket(ti.MockController, \"bucket\")\n\n\tvar err error\n\tt.lp.wrapped, err = gcsproxy.NewListingProxy(t.bucket, &t.clock, t.dirName)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Test functions\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *ListingProxyTest) CreateForRootDirectory() {\n\t_, err := gcsproxy.NewListingProxy(t.bucket, &t.clock, \"\")\n\tAssertEq(nil, err)\n}\n\nfunc (t *ListingProxyTest) CreateForIllegalDirectoryName() {\n\t_, err := gcsproxy.NewListingProxy(t.bucket, &t.clock, \"foo\/bar\")\n\n\tAssertNe(nil, err)\n\tExpectThat(err, Error(HasSubstr(\"foo\/bar\")))\n\tExpectThat(err, Error(HasSubstr(\"directory name\")))\n}\n\nfunc (t *ListingProxyTest) Name() {\n\tExpectEq(t.dirName, t.lp.Name())\n}\n\nfunc (t *ListingProxyTest) List_CallsBucket() {\n\t\/\/ Bucket.ListObjects\n\tvar query *storage.Query\n\tsaveQuery := func(\n\t\tctx context.Context,\n\t\tq *storage.Query) (*storage.Objects, error) {\n\t\tquery = q\n\t\treturn nil, errors.New(\"\")\n\t}\n\n\tExpectCall(t.bucket, \"ListObjects\")(Any(), Any()).\n\t\tWillOnce(oglemock.Invoke(saveQuery))\n\n\t\/\/ List\n\tt.lp.List()\n\n\tAssertNe(nil, query)\n\tExpectEq(\"\/\", query.Delimiter)\n\tExpectEq(t.dirName, query.Prefix)\n\tExpectFalse(query.Versions)\n\tExpectEq(\"\", query.Cursor)\n\tExpectEq(0, query.MaxResults)\n}\n\nfunc (t *ListingProxyTest) List_BucketFails() {\n\t\/\/ Bucket.ListObjects\n\tExpectCall(t.bucket, \"ListObjects\")(Any(), Any()).\n\t\tWillOnce(oglemock.Return(nil, errors.New(\"taco\")))\n\n\t\/\/ List\n\t_, _, err := t.lp.List()\n\n\tAssertNe(nil, err)\n\tExpectThat(err, Error(HasSubstr(\"List\")))\n\tExpectThat(err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *ListingProxyTest) List_BucketReturnsIllegalObjectName() {\n\tbadObj := &storage.Object{\n\t\tName: path.Join(t.dirName, \"foo\/\"),\n\t}\n\n\tbadListing := &storage.Objects{\n\t\tResults: []*storage.Object{badObj},\n\t}\n\n\t\/\/ Bucket.ListObjects\n\tExpectCall(t.bucket, \"ListObjects\")(Any(), Any()).\n\t\tWillOnce(oglemock.Return(badListing, nil))\n\n\t\/\/ List\n\t_, _, err := t.lp.List()\n\n\tAssertNe(nil, err)\n\tExpectThat(err, Error(HasSubstr(\"object name\")))\n\tExpectThat(err, Error(HasSubstr(badObj.Name)))\n}\n\nfunc (t *ListingProxyTest) List_BucketReturnsIllegalDirectoryName() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) List_EmptyResult() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) List_OnlyPlaceholderForProxiedDir() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) List_NonEmptyResult() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) List_CacheHasExpired() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) NoteNewObject() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) NoteNewObject_IllegalNames() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) NoteNewSubdirectory() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) NoteNewSubdirectory_IllegalNames() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ListingProxyTest) NoteRemoval() {\n\tAssertTrue(false, \"TODO\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gcsproxy\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/lease\"\n\t\"github.com\/googlecloudplatform\/gcsfuse\/mutable\"\n\t\"github.com\/googlecloudplatform\/gcsfuse\/timeutil\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\/gcsfake\"\n\t. \"github.com\/jacobsa\/ogletest\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc TestObjectSyncer(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ fakeObjectCreator\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ An objectCreator that records the arguments it is called with, returning\n\/\/ canned results.\ntype fakeObjectCreator struct {\n\tcalled bool\n\n\t\/\/ Supplied arguments\n\tsrcObject *gcs.Object\n\tcontents []byte\n\n\t\/\/ Canned results\n\to *gcs.Object\n\terr error\n}\n\nfunc (oc *fakeObjectCreator) Create(\n\tctx context.Context,\n\tsrcObject *gcs.Object,\n\tr io.Reader) (o *gcs.Object, err error) {\n\t\/\/ Have we been called more than once?\n\tAssertFalse(oc.called)\n\toc.called = true\n\n\t\/\/ Record args.\n\toc.srcObject = srcObject\n\toc.contents, err = ioutil.ReadAll(r)\n\tAssertEq(nil, err)\n\n\t\/\/ Return results.\n\to, err = oc.o, oc.err\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nconst srcObjectContents = \"taco\"\nconst appendThreshold = uint64(len(srcObjectContents))\n\ntype ObjectSyncerTest struct {\n\tctx context.Context\n\n\tfullCreator fakeObjectCreator\n\tappendCreator fakeObjectCreator\n\n\tbucket gcs.Bucket\n\tleaser lease.FileLeaser\n\tsyncer ObjectSyncer\n\tclock timeutil.SimulatedClock\n\n\tsrcObject *gcs.Object\n\tcontent mutable.Content\n}\n\nvar _ SetUpInterface = &ObjectSyncerTest{}\n\nfunc init() { RegisterTestSuite(&ObjectSyncerTest{}) }\n\nfunc (t *ObjectSyncerTest) SetUp(ti *TestInfo) {\n\tvar err error\n\tt.ctx = ti.Ctx\n\n\t\/\/ Set up dependencies.\n\tt.bucket = gcsfake.NewFakeBucket(&t.clock, \"some_bucket\")\n\tt.leaser = lease.NewFileLeaser(\"\", math.MaxInt32, math.MaxInt32)\n\tt.syncer = newObjectSyncer(\n\t\tappendThreshold,\n\t\t&t.fullCreator,\n\t\t&t.appendCreator)\n\n\tt.clock.SetTime(time.Date(2015, 4, 5, 2, 15, 0, 0, time.Local))\n\n\t\/\/ Set up a source object.\n\tt.srcObject, err = t.bucket.CreateObject(\n\t\tt.ctx,\n\t\t&gcs.CreateObjectRequest{\n\t\t\tName: \"foo\",\n\t\t\tContents: strings.NewReader(srcObjectContents),\n\t\t})\n\n\tAssertEq(nil, err)\n\n\t\/\/ Wrap a mutable.Content around it.\n\tt.content = mutable.NewContent(\n\t\tNewReadProxy(\n\t\t\tt.srcObject,\n\t\t\tnil, \/\/ Initial read lease\n\t\t\tmath.MaxUint64, \/\/ Chunk size\n\t\t\tt.leaser,\n\t\t\tt.bucket),\n\t\t&t.clock)\n}\n\nfunc (t *ObjectSyncerTest) call() (\n\trl lease.ReadLease, o *gcs.Object, err error) {\n\trl, o, err = t.syncer.SyncObject(t.ctx, t.srcObject, t.content)\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *ObjectSyncerTest) NotDirty() {\n\t\/\/ Call\n\trl, o, err := t.call()\n\n\tAssertEq(nil, err)\n\tExpectEq(nil, rl)\n\tExpectEq(nil, o)\n\n\t\/\/ Neither creater should have been called.\n\tExpectFalse(t.fullCreator.called)\n\tExpectFalse(t.appendCreator.called)\n}\n\nfunc (t *ObjectSyncerTest) SmallerThanSource() {\n\tt.fullCreator.err = errors.New(\"\")\n\n\t\/\/ Truncate downward.\n\terr := t.content.Truncate(t.ctx, int64(len(srcObjectContents)-1))\n\tAssertEq(nil, err)\n\n\t\/\/ The full creator should be called.\n\tt.call()\n\n\tExpectTrue(t.fullCreator.called)\n\tExpectFalse(t.appendCreator.called)\n}\n\nfunc (t *ObjectSyncerTest) SameSizeAsSource() {\n\tt.fullCreator.err = errors.New(\"\")\n\n\t\/\/ Dirty a byte without changing the length.\n\t_, err := t.content.WriteAt(\n\t\tt.ctx,\n\t\t[]byte(\"a\"),\n\t\tint64(len(srcObjectContents)-1))\n\n\tAssertEq(nil, err)\n\n\t\/\/ The full creator should be called.\n\tt.call()\n\n\tExpectTrue(t.fullCreator.called)\n\tExpectFalse(t.appendCreator.called)\n}\n\nfunc (t *ObjectSyncerTest) LargerThanSource_ThresholdInSource() {\n\tvar err error\n\tt.fullCreator.err = errors.New(\"\")\n\n\t\/\/ Extend the length of the content.\n\terr = t.content.Truncate(t.ctx, int64(len(srcObjectContents)+100))\n\tAssertEq(nil, err)\n\n\t\/\/ But dirty a byte within the initial content.\n\t_, err = t.content.WriteAt(\n\t\tt.ctx,\n\t\t[]byte(\"a\"),\n\t\tint64(len(srcObjectContents)-1))\n\n\tAssertEq(nil, err)\n\n\t\/\/ The full creator should be called.\n\tt.call()\n\n\tExpectTrue(t.fullCreator.called)\n\tExpectFalse(t.appendCreator.called)\n}\n\nfunc (t *ObjectSyncerTest) SourceTooShortForAppend() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ObjectSyncerTest) SourceComponentCountTooHigh() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ObjectSyncerTest) LargerThanSource_ThresholdAtEndOfSource() {\n\tvar err error\n\tt.fullCreator.err = errors.New(\"\")\n\n\t\/\/ Extend the length of the content.\n\terr = t.content.Truncate(t.ctx, int64(len(srcObjectContents)+1))\n\tAssertEq(nil, err)\n\n\t\/\/ The append creator should be called.\n\tt.call()\n\n\tExpectFalse(t.fullCreator.called)\n\tExpectTrue(t.appendCreator.called)\n}\n\nfunc (t *ObjectSyncerTest) FullCreatorFails() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ObjectSyncerTest) FullCreatorReturnsPreconditionError() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ObjectSyncerTest) FullCreatorSucceeds() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ObjectSyncerTest) AppendCreatorFails() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ObjectSyncerTest) AppendCreatorReturnsPreconditionError() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ObjectSyncerTest) AppendCreatorSucceeds() {\n\tAssertTrue(false, \"TODO\")\n}\n<commit_msg>Return errors by default.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gcsproxy\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/lease\"\n\t\"github.com\/googlecloudplatform\/gcsfuse\/mutable\"\n\t\"github.com\/googlecloudplatform\/gcsfuse\/timeutil\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\/gcsfake\"\n\t. \"github.com\/jacobsa\/ogletest\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc TestObjectSyncer(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ fakeObjectCreator\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ An objectCreator that records the arguments it is called with, returning\n\/\/ canned results.\ntype fakeObjectCreator struct {\n\tcalled bool\n\n\t\/\/ Supplied arguments\n\tsrcObject *gcs.Object\n\tcontents []byte\n\n\t\/\/ Canned results\n\to *gcs.Object\n\terr error\n}\n\nfunc (oc *fakeObjectCreator) Create(\n\tctx context.Context,\n\tsrcObject *gcs.Object,\n\tr io.Reader) (o *gcs.Object, err error) {\n\t\/\/ Have we been called more than once?\n\tAssertFalse(oc.called)\n\toc.called = true\n\n\t\/\/ Record args.\n\toc.srcObject = srcObject\n\toc.contents, err = ioutil.ReadAll(r)\n\tAssertEq(nil, err)\n\n\t\/\/ Return results.\n\to, err = oc.o, oc.err\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nconst srcObjectContents = \"taco\"\nconst appendThreshold = uint64(len(srcObjectContents))\n\ntype ObjectSyncerTest struct {\n\tctx context.Context\n\n\tfullCreator fakeObjectCreator\n\tappendCreator fakeObjectCreator\n\n\tbucket gcs.Bucket\n\tleaser lease.FileLeaser\n\tsyncer ObjectSyncer\n\tclock timeutil.SimulatedClock\n\n\tsrcObject *gcs.Object\n\tcontent mutable.Content\n}\n\nvar _ SetUpInterface = &ObjectSyncerTest{}\n\nfunc init() { RegisterTestSuite(&ObjectSyncerTest{}) }\n\nfunc (t *ObjectSyncerTest) SetUp(ti *TestInfo) {\n\tvar err error\n\tt.ctx = ti.Ctx\n\n\t\/\/ Set up dependencies.\n\tt.bucket = gcsfake.NewFakeBucket(&t.clock, \"some_bucket\")\n\tt.leaser = lease.NewFileLeaser(\"\", math.MaxInt32, math.MaxInt32)\n\tt.syncer = newObjectSyncer(\n\t\tappendThreshold,\n\t\t&t.fullCreator,\n\t\t&t.appendCreator)\n\n\tt.clock.SetTime(time.Date(2015, 4, 5, 2, 15, 0, 0, time.Local))\n\n\t\/\/ Set up a source object.\n\tt.srcObject, err = t.bucket.CreateObject(\n\t\tt.ctx,\n\t\t&gcs.CreateObjectRequest{\n\t\t\tName: \"foo\",\n\t\t\tContents: strings.NewReader(srcObjectContents),\n\t\t})\n\n\tAssertEq(nil, err)\n\n\t\/\/ Wrap a mutable.Content around it.\n\tt.content = mutable.NewContent(\n\t\tNewReadProxy(\n\t\t\tt.srcObject,\n\t\t\tnil, \/\/ Initial read lease\n\t\t\tmath.MaxUint64, \/\/ Chunk size\n\t\t\tt.leaser,\n\t\t\tt.bucket),\n\t\t&t.clock)\n\n\t\/\/ Return errors from the fakes by default.\n\tt.fullCreator.err = errors.New(\"Fake error\")\n\tt.appendCreator.err = errors.New(\"Fake error\")\n}\n\nfunc (t *ObjectSyncerTest) call() (\n\trl lease.ReadLease, o *gcs.Object, err error) {\n\trl, o, err = t.syncer.SyncObject(t.ctx, t.srcObject, t.content)\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *ObjectSyncerTest) NotDirty() {\n\t\/\/ Call\n\trl, o, err := t.call()\n\n\tAssertEq(nil, err)\n\tExpectEq(nil, rl)\n\tExpectEq(nil, o)\n\n\t\/\/ Neither creater should have been called.\n\tExpectFalse(t.fullCreator.called)\n\tExpectFalse(t.appendCreator.called)\n}\n\nfunc (t *ObjectSyncerTest) SmallerThanSource() {\n\t\/\/ Truncate downward.\n\terr := t.content.Truncate(t.ctx, int64(len(srcObjectContents)-1))\n\tAssertEq(nil, err)\n\n\t\/\/ The full creator should be called.\n\tt.call()\n\n\tExpectTrue(t.fullCreator.called)\n\tExpectFalse(t.appendCreator.called)\n}\n\nfunc (t *ObjectSyncerTest) SameSizeAsSource() {\n\t\/\/ Dirty a byte without changing the length.\n\t_, err := t.content.WriteAt(\n\t\tt.ctx,\n\t\t[]byte(\"a\"),\n\t\tint64(len(srcObjectContents)-1))\n\n\tAssertEq(nil, err)\n\n\t\/\/ The full creator should be called.\n\tt.call()\n\n\tExpectTrue(t.fullCreator.called)\n\tExpectFalse(t.appendCreator.called)\n}\n\nfunc (t *ObjectSyncerTest) LargerThanSource_ThresholdInSource() {\n\tvar err error\n\n\t\/\/ Extend the length of the content.\n\terr = t.content.Truncate(t.ctx, int64(len(srcObjectContents)+100))\n\tAssertEq(nil, err)\n\n\t\/\/ But dirty a byte within the initial content.\n\t_, err = t.content.WriteAt(\n\t\tt.ctx,\n\t\t[]byte(\"a\"),\n\t\tint64(len(srcObjectContents)-1))\n\n\tAssertEq(nil, err)\n\n\t\/\/ The full creator should be called.\n\tt.call()\n\n\tExpectTrue(t.fullCreator.called)\n\tExpectFalse(t.appendCreator.called)\n}\n\nfunc (t *ObjectSyncerTest) SourceTooShortForAppend() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ObjectSyncerTest) SourceComponentCountTooHigh() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ObjectSyncerTest) LargerThanSource_ThresholdAtEndOfSource() {\n\tvar err error\n\n\t\/\/ Extend the length of the content.\n\terr = t.content.Truncate(t.ctx, int64(len(srcObjectContents)+1))\n\tAssertEq(nil, err)\n\n\t\/\/ The append creator should be called.\n\tt.call()\n\n\tExpectFalse(t.fullCreator.called)\n\tExpectTrue(t.appendCreator.called)\n}\n\nfunc (t *ObjectSyncerTest) FullCreatorFails() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ObjectSyncerTest) FullCreatorReturnsPreconditionError() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ObjectSyncerTest) FullCreatorSucceeds() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ObjectSyncerTest) AppendCreatorFails() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ObjectSyncerTest) AppendCreatorReturnsPreconditionError() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *ObjectSyncerTest) AppendCreatorSucceeds() {\n\tAssertTrue(false, \"TODO\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/c-14\/grue\/config\"\n\t\"os\"\n)\n\nconst version = \"0.2.0-alpha\"\n\nfunc usage() string {\n\treturn `usage: grue [--help] {add|fetch|import|init_cfg} ...\n\nSubcommands:\n\tadd <name> <url>\n\tfetch [-init]\n\timport <config>\n\tinit_cfg`\n}\n\nfunc add(args []string, conf *config.GrueConfig) error {\n\tif len(args) != 2 {\n\t\treturn errors.New(\"usage: grue add <name> <url>\")\n\t}\n\tvar name string = args[0]\n\tvar uri string = args[1]\n\treturn conf.AddAccount(name, uri)\n}\n\nfunc main() {\n\tif len(os.Args) < 2 {\n\t\tfmt.Fprintln(os.Stderr, usage())\n\t\tos.Exit(EX_USAGE)\n\t}\n\tconf, err := config.ReadConfig()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(EX_TEMPFAIL)\n\t}\n\tdefer conf.Unlock()\n\tswitch cmd := os.Args[1]; cmd {\n\tcase \"add\":\n\t\terr = add(os.Args[2:], conf)\n\tcase \"fetch\":\n\t\tvar fetchCmd = flag.NewFlagSet(\"fetch\", flag.ContinueOnError)\n\t\tvar initFlag = fetchCmd.Bool(\"init\", false, \"Don't send emails, only initialize database of read entries\")\n\t\terr = fetchCmd.Parse(os.Args[2:])\n\t\tif err == nil {\n\t\t\terr = fetchFeeds(conf, *initFlag)\n\t\t}\n\tcase \"import\":\n\t\terr = config.ImportCfg(os.Args[2:])\n\tcase \"init_cfg\":\n\t\tbreak\n\tcase \"-v\":\n\t\tfallthrough\n\tcase \"--version\":\n\t\tfmt.Println(version)\n\tcase \"-h\":\n\t\tfallthrough\n\tcase \"--help\":\n\t\tfmt.Println(usage())\n\tdefault:\n\t\tfmt.Fprintln(os.Stderr, usage())\n\t\tconf.Unlock()\n\t\tos.Exit(EX_USAGE)\n\t}\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t}\n}\n<commit_msg>Add list command<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/c-14\/grue\/config\"\n\t\"os\"\n\t\"text\/tabwriter\"\n)\n\nconst version = \"0.2.0-alpha\"\n\nfunc usage() string {\n\treturn `usage: grue [--help] {add|fetch|import|init_cfg|list} ...\n\nSubcommands:\n\tadd <name> <url>\n\tfetch [-init]\n\timport <config>\n\tinit_cfg\n\tlist [name] [--full]`\n}\n\nfunc add(args []string, conf *config.GrueConfig) error {\n\tif len(args) != 2 {\n\t\treturn errors.New(\"usage: grue add <name> <url>\")\n\t}\n\tvar name string = args[0]\n\tvar uri string = args[1]\n\treturn conf.AddAccount(name, uri)\n}\n\nfunc list(args []string, conf *config.GrueConfig) error {\n\tconst (\n\t\tfmtShort = \"%s\\t%s\\n\"\n\t\tfmtFull = \"%s:\\n%s\\n\"\n\t)\n\tvar full bool\n\tvar listCmd = flag.NewFlagSet(\"list\", flag.ContinueOnError)\n\tlistCmd.BoolVar(&full, \"full\", false, \"Show full account info\")\n\tif err := listCmd.Parse(args); err != nil {\n\t\treturn err\n\t}\n\tif len(listCmd.Args()) == 0 {\n\t\tif full {\n\t\t\tfor name, cfg := range conf.Accounts {\n\t\t\t\tfmt.Printf(fmtFull, name, cfg.String())\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tw := tabwriter.NewWriter(os.Stdout, 0, 8, 0, '\\t', 0)\n\t\tfor name, cfg := range conf.Accounts {\n\t\t\tfmt.Fprintf(w, fmtShort, name, cfg.URI)\n\t\t}\n\t\tw.Flush()\n\t\treturn nil\n\t}\n\n\tname := listCmd.Args()[0]\n\tif cfg, ok := conf.Accounts[name]; ok {\n\t\tif full {\n\t\t\tfmt.Printf(fmtFull, name, cfg.String())\n\t\t} else {\n\t\t\tfmt.Printf(fmtShort, name, cfg.URI)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tif len(os.Args) < 2 {\n\t\tfmt.Fprintln(os.Stderr, usage())\n\t\tos.Exit(EX_USAGE)\n\t}\n\tconf, err := config.ReadConfig()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(EX_TEMPFAIL)\n\t}\n\tdefer conf.Unlock()\n\tswitch cmd := os.Args[1]; cmd {\n\tcase \"add\":\n\t\terr = add(os.Args[2:], conf)\n\tcase \"fetch\":\n\t\tvar fetchCmd = flag.NewFlagSet(\"fetch\", flag.ContinueOnError)\n\t\tvar initFlag = fetchCmd.Bool(\"init\", false, \"Don't send emails, only initialize database of read entries\")\n\t\terr = fetchCmd.Parse(os.Args[2:])\n\t\tif err == nil {\n\t\t\terr = fetchFeeds(conf, *initFlag)\n\t\t}\n\tcase \"import\":\n\t\terr = config.ImportCfg(os.Args[2:])\n\tcase \"init_cfg\":\n\t\tbreak\n\tcase \"list\":\n\t\terr = list(os.Args[2:], conf)\n\t\tbreak\n\tcase \"-v\":\n\t\tfallthrough\n\tcase \"--version\":\n\t\tfmt.Println(version)\n\tcase \"-h\":\n\t\tfallthrough\n\tcase \"--help\":\n\t\tfmt.Println(usage())\n\tdefault:\n\t\tfmt.Fprintln(os.Stderr, usage())\n\t\tconf.Unlock()\n\t\tos.Exit(EX_USAGE)\n\t}\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2015 Thomas de Zeeuw.\n\/\/\n\/\/ Licensed under the MIT license that can be found in the LICENSE file.\n\npackage logger\n\nimport \"strconv\"\n\n\/\/ Tags are keywords usefull in searching through logs, for example:\n\/\/\n\/\/\ttags := []Tags{\"file.go\", \"myFn\", \"user:$user_id\", \"input:$input\"}\n\/\/\n\/\/ With this information you can lookup any logs for a specific user reporting\n\/\/ an issue. Then you can find which function, in which file, is throwing the\n\/\/ error.\ntype Tags []string\n\n\/\/ String creates a comma separated list from the tags in string.\nfunc (tags Tags) String() string {\n\treturn string(tags.Bytes())\n}\n\n\/\/ Bytes does the same as Tags.String, but returns a byte slice.\nfunc (tags Tags) Bytes() []byte {\n\tif len(tags) == 0 {\n\t\treturn []byte{}\n\t}\n\n\t\/\/ Add each tag in the form of \"tag, \".\n\tvar buf []byte\n\tfor _, tag := range tags {\n\t\tbuf = append(buf, tag...)\n\t\tbuf = append(buf, ',')\n\t\tbuf = append(buf, ' ')\n\t}\n\n\t\/\/ Drop the last \", \".\n\tbuf = buf[:len(buf)-2]\n\treturn buf\n}\n\n\/\/ MarshalJSON returns an JSON formatted string slice (or array).\nfunc (tags Tags) MarshalJSON() ([]byte, error) {\n\tif len(tags) == 0 {\n\t\treturn []byte(\"[]\"), nil\n\t}\n\n\t\/\/ Add each tag in the form of `\"tag\", `\n\tbuf := []byte(\"[\")\n\tfor _, tag := range tags {\n\t\tqoutedTag := strconv.Quote(tag)\n\t\tbuf = append(buf, qoutedTag...)\n\t\tbuf = append(buf, ',')\n\t\tbuf = append(buf, ' ')\n\t}\n\n\t\/\/ Drop the last \",\" and a closing bracket.\n\tbuf = append(buf[:len(buf)-2], ']')\n\treturn buf, nil\n}\n<commit_msg>Fix tags example<commit_after>\/\/ Copyright (C) 2015 Thomas de Zeeuw.\n\/\/\n\/\/ Licensed under the MIT license that can be found in the LICENSE file.\n\npackage logger\n\nimport \"strconv\"\n\n\/\/ Tags are keywords usefull in searching through logs, for example:\n\/\/\n\/\/\ttags := Tags{\"file.go\", \"myFn\", \"user:$user_id\", \"input:$input\"}\n\/\/\n\/\/ With this information you can lookup any logs for a specific user reporting\n\/\/ an issue. Then you can find which function, in which file, is throwing the\n\/\/ error.\ntype Tags []string\n\n\/\/ String creates a comma separated list from the tags in string.\nfunc (tags Tags) String() string {\n\treturn string(tags.Bytes())\n}\n\n\/\/ Bytes does the same as Tags.String, but returns a byte slice.\nfunc (tags Tags) Bytes() []byte {\n\tif len(tags) == 0 {\n\t\treturn []byte{}\n\t}\n\n\t\/\/ Add each tag in the form of \"tag, \".\n\tvar buf []byte\n\tfor _, tag := range tags {\n\t\tbuf = append(buf, tag...)\n\t\tbuf = append(buf, ',')\n\t\tbuf = append(buf, ' ')\n\t}\n\n\t\/\/ Drop the last \", \".\n\tbuf = buf[:len(buf)-2]\n\treturn buf\n}\n\n\/\/ MarshalJSON returns an JSON formatted string slice (or array).\nfunc (tags Tags) MarshalJSON() ([]byte, error) {\n\tif len(tags) == 0 {\n\t\treturn []byte(\"[]\"), nil\n\t}\n\n\t\/\/ Add each tag in the form of `\"tag\", `\n\tbuf := []byte(\"[\")\n\tfor _, tag := range tags {\n\t\tqoutedTag := strconv.Quote(tag)\n\t\tbuf = append(buf, qoutedTag...)\n\t\tbuf = append(buf, ',')\n\t\tbuf = append(buf, ' ')\n\t}\n\n\t\/\/ Drop the last \",\" and a closing bracket.\n\tbuf = append(buf[:len(buf)-2], ']')\n\treturn buf, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"reflect\"\n)\n\ntype Address struct {\n\tStreet1 string `json:\"street_1\"`\n\tStreet2 *string `json:\"street_2\"`\n\tCity string `json:\"city\"`\n\tRegion string `json:\"state\"`\n\tZipcode string `json:\"zip_code\"`\n\t\/\/status bool `json:\"fraud_address\"`\n}\n\nfunc main() {\n\teric := &Address{\n\t\t\"1 Main St\",\n\t\tnil,\n\t\t\"Buffalo\",\n\t\t\"NY\",\n\t\t\"14086\",\n\t\t\/\/ true,\n\t}\n\tdisplayStructTags(eric)\n}\n\nfunc displayStructTags(s interface{}) {\n\t\/\/ get the value and type of s\n\tt := reflect.TypeOf(s)\n\tv := reflect.ValueOf(s)\n\n\t\/\/ lets make sure we are actually working with a struct\n\tif t.Kind() != reflect.Struct {\n\t\tlog.Fatalf(\"Wow, you can't even get the right type in a demo.... fail!\\nKind() was: %s\\n \", t.Kind())\n\t}\n\tlog.Println(t.Kind())\n\n\t\/\/ get the number of fields and loop through them\n\tfieldCount := t.NumField()\n\tfor i := 0; i < fieldCount; i++ {\n\t\t\/\/ tag is accessed through the type, the value through value.. derp\n\t\tfmt.Printf(\"{\\\"%s\\\":\\\"%s\\\"}\\n\", t.Field(i).Tag.Get(\"json\"), v.Field(i))\n\t}\n}\n\n\/*\n \/\/ check for pointer and get underlying type\n if t.Kind() == reflect.Ptr {\n t = t.Elem()\n }\n\tif v.Kind() == reflect.Ptr {\n\t\tv = v.Elem()\n\t}\n\n \/\/ check to see if a feed is exported and settable\n\t\tif v.Field(i).CanSet() {\n*\/\n<commit_msg>added a few more comments to the tags example<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"reflect\"\n)\n\ntype Address struct {\n\tStreet1 string `json:\"street_1\"`\n\tStreet2 *string `json:\"street_2\"`\n\tCity string `json:\"city\"`\n\tRegion string `json:\"state\"`\n\tZipcode string `json:\"zip_code\"`\n\t\/\/status bool `json:\"fraud_address\"` --uncomment to test unexported fields\n}\n\nfunc main() {\n\teric := &Address{\n\t\t\"1 Main St\",\n\t\tnil,\n\t\t\"Buffalo\",\n\t\t\"NY\",\n\t\t\"14086\",\n\t\t\/\/ true,--uncomment to test unexported fields\n\t}\n\tdisplayStructTags(eric)\n}\n\nfunc displayStructTags(s interface{}) {\n\t\/\/ get the value and type of s\n\tt := reflect.TypeOf(s)\n\tv := reflect.ValueOf(s)\n\n\t\/\/ #1\n\n\t\/\/ lets make sure we are actually working with a struct\n\tif t.Kind() != reflect.Struct {\n\t\tlog.Fatalf(\"Wow, you can't even get the right type in a demo.... fail!\\nKind() was: %s\\n \", t.Kind())\n\t}\n\tlog.Println(t.Kind())\n\n\t\/\/ get the number of fields and loop through them\n\tfieldCount := t.NumField()\n\tfor i := 0; i < fieldCount; i++ {\n\t\t\/\/ tag is accessed through the type, the value through value.. derp\n\t\t\/\/ #2 see below\n\t\tfmt.Printf(\"{\\\"%s\\\":\\\"%s\\\"}\\n\", t.Field(i).Tag.Get(\"json\"), v.Field(i))\n\t}\n}\n\n\/*\n #1 to get the type of the right kin,d use elem which returns the value pointed to by a ptr\n in the real world, it would be necessary to check the underlying type to check for a ptr or nil\n \/\/ check for pointer and get underlying type\n if t.Kind() == reflect.Ptr {\n t = t.Elem()\n }\n\tif v.Kind() == reflect.Ptr {\n\t\tv = v.Elem()\n\t}\n\n\n #2 Use CanSet() to see if a field is exported. You can also use the PkgPath to determine if a field is exported\n\t if v.Field(i).CanSet() {\n\t\tfmt.Printf(\"{\\\"%s\\\":\\\"%s\\\"}\\n\", t.Field(i).Tag.Get(\"json\"), v.Field(i))\n\t }\n*\/\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"strings\"\n)\n\nconst (\n\ttaskAPI = \"\/task\/\"\n)\n\ntype taskStatus struct {\n\tCreator string\n\n\tRepo string\n\tRev string\n\tPPA string\n\n\tStatus string\n\tProcess string\n\tError string\n\n\tWorkingDir string\n}\n\n\/*\n * @args[0]: task id\n * @args[1](optional): sub-action\n *\/\nfunc cmdTask(args []string) error {\n\ttaskId := args[0]\n\n\tif len(args) == 1 {\n\t\t\/\/ Get task status\n\n\t\tresp, _ := request(\"GET\", servAddr+taskAPI+taskId, nil, \"\")\n\t\tdefer resp.Body.Close()\n\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar ts taskStatus\n\t\tif resp.StatusCode == 200 {\n\t\t\tdec := json.NewDecoder(strings.NewReader(string(body)))\n\t\t\tdec.Decode(&ts)\n\t\t\tfmt.Printf(\"%+v\\n\", ts)\n\t\t}\n\t\treturn nil\n\t}\n\n\tswitch args[1] {\n\tcase \"output\":\n\t\treturn cmdTaskOutput(taskId)\n\tdefault:\n\t\tpanic(\"ERR\")\n\t}\n\n\treturn nil\n}\n\nfunc cmdTaskOutput(taskId string) error {\n\taction := \"\/output\"\n\tresp, _ := request(\"GET\", servAddr+taskAPI+taskId+action, nil, \"\")\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode == 200 {\n\t\tfmt.Println(string(body))\n\t}\n\n\treturn nil\n}\n<commit_msg>ID will be recorded in task status<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"strings\"\n)\n\nconst (\n\ttaskAPI = \"\/task\/\"\n)\n\ntype taskStatus struct {\n\tID uint64\n\tCreator string\n\n\tRepo string\n\tRev string\n\tPPA string\n\n\tStatus string\n\tProcess string\n\tError string\n\n\tWorkingDir string\n}\n\n\/*\n * @args[0]: task id\n * @args[1](optional): sub-action\n *\/\nfunc cmdTask(args []string) error {\n\ttaskId := args[0]\n\n\tif len(args) == 1 {\n\t\t\/\/ Get task status\n\n\t\tresp, _ := request(\"GET\", servAddr+taskAPI+taskId, nil, \"\")\n\t\tdefer resp.Body.Close()\n\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar ts taskStatus\n\t\tif resp.StatusCode == 200 {\n\t\t\tdec := json.NewDecoder(strings.NewReader(string(body)))\n\t\t\tdec.Decode(&ts)\n\t\t\tfmt.Printf(\"%+v\\n\", ts)\n\t\t}\n\t\treturn nil\n\t}\n\n\tswitch args[1] {\n\tcase \"output\":\n\t\treturn cmdTaskOutput(taskId)\n\tdefault:\n\t\tpanic(\"ERR\")\n\t}\n\n\treturn nil\n}\n\nfunc cmdTaskOutput(taskId string) error {\n\taction := \"\/output\"\n\tresp, _ := request(\"GET\", servAddr+taskAPI+taskId+action, nil, \"\")\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode == 200 {\n\t\tfmt.Println(string(body))\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package scipipe\n\nimport (\n\t\"crypto\/sha1\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ Task represents a single static shell command, or go function, to be\n\/\/ executed, and are scheduled and managed by a corresponding Process\ntype Task struct {\n\tName string\n\tCommand string\n\tCustomExecute func(*Task)\n\tInIPs map[string]*FileIP\n\tOutIPs map[string]*FileIP\n\tParams map[string]string\n\tTags map[string]string\n\tDone chan int\n\tcores int\n\tworkflow *Workflow\n\tprocess *Process\n}\n\n\/\/ ------------------------------------------------------------------------\n\/\/ Factory method(s)\n\/\/ ------------------------------------------------------------------------\n\n\/\/ NewTask instantiates and initializes a new Task\nfunc NewTask(workflow *Workflow, process *Process, name string, cmdPat string, inIPs map[string]*FileIP, outPathFuncs map[string]func(*Task) string, outPortsDoStream map[string]bool, params map[string]string, tags map[string]string, prepend string, customExecute func(*Task), cores int) *Task {\n\tt := &Task{\n\t\tName: name,\n\t\tInIPs: inIPs,\n\t\tOutIPs: make(map[string]*FileIP),\n\t\tParams: params,\n\t\tTags: tags,\n\t\tCommand: \"\",\n\t\tCustomExecute: customExecute,\n\t\tDone: make(chan int),\n\t\tcores: cores,\n\t\tworkflow: workflow,\n\t\tprocess: process,\n\t}\n\n\t\/\/ Create Out-IPs\n\tfor oname, outPathFunc := range outPathFuncs {\n\t\toip := NewFileIP(outPathFunc(t))\n\t\tif outPortsDoStream[oname] {\n\t\t\toip.doStream = true\n\t\t}\n\t\tt.OutIPs[oname] = oip\n\t}\n\tt.Command = formatCommand(cmdPat, inIPs, t.OutIPs, params, tags, prepend)\n\treturn t\n}\n\n\/\/ formatCommand is a helper function for NewTask, that formats a shell command\n\/\/ based on concrete file paths and parameter values\nfunc formatCommand(cmd string, inIPs map[string]*FileIP, outIPs map[string]*FileIP, params map[string]string, tags map[string]string, prepend string) string {\n\tr := getShellCommandPlaceHolderRegex()\n\tplaceHolderMatches := r.FindAllStringSubmatch(cmd, -1)\n\tportInfos := map[string]map[string]string{}\n\tfor _, placeHolderMatch := range placeHolderMatches {\n\t\tportName := placeHolderMatch[2]\n\t\tportInfos[portName] = map[string]string{}\n\t\tportInfos[portName][\"match\"] = placeHolderMatch[0]\n\t\tportInfos[portName][\"port_type\"] = placeHolderMatch[1]\n\t\tportInfos[portName][\"port_name\"] = portName\n\t\tportInfos[portName][\"reduce_inputs\"] = \"false\"\n\t\t\/\/ Identify if the place holder represents a reduce-type in-port\n\t\tif len(placeHolderMatch) > 5 {\n\t\t\tportInfos[portName][\"reduce_inputs\"] = \"true\"\n\t\t\tportInfos[portName][\"sep\"] = placeHolderMatch[7]\n\t\t}\n\t}\n\n\tfor portName, portInfo := range portInfos {\n\t\tvar filePath string\n\t\tswitch portInfo[\"port_type\"] {\n\t\tcase \"o\":\n\t\t\tif outIPs[portName] == nil {\n\t\t\t\tFail(\"Missing outpath for outport '\", portName, \"' for command '\", cmd, \"'\")\n\t\t\t}\n\t\t\tfilePath = outIPs[portName].TempPath()\n\t\tcase \"os\":\n\t\t\tif outIPs[portName] == nil {\n\t\t\t\tFail(\"Missing outpath for outport '\", portName, \"' for command '\", cmd, \"'\")\n\t\t\t}\n\t\t\tfilePath = outIPs[portName].FifoPath()\n\t\tcase \"i\":\n\t\t\tif inIPs[portName] == nil {\n\t\t\t\tFail(\"Missing in-IP for inport '\", portName, \"' for command '\", cmd, \"'\")\n\t\t\t}\n\t\t\tif portInfo[\"reduce_inputs\"] == \"true\" && inIPs[portName].Path() == \"\" {\n\t\t\t\t\/\/ Merge multiple input paths from a substream on the IP, into one string\n\t\t\t\tips := []*FileIP{}\n\t\t\t\tfor ip := range inIPs[portName].SubStream.Chan {\n\t\t\t\t\tips = append(ips, ip)\n\t\t\t\t}\n\t\t\t\tpaths := []string{}\n\t\t\t\tfor _, ip := range ips {\n\t\t\t\t\tpaths = append(paths, parentDirPath(ip.Path()))\n\t\t\t\t}\n\t\t\t\tfilePath = strings.Join(paths, portInfo[\"sep\"])\n\t\t\t} else {\n\t\t\t\tif inIPs[portName].Path() == \"\" {\n\t\t\t\t\tFail(\"Missing inpath for inport '\", portName, \"', and no substream, for command '\", cmd, \"'\")\n\t\t\t\t}\n\t\t\t\tif inIPs[portName].doStream {\n\t\t\t\t\tfilePath = parentDirPath(inIPs[portName].FifoPath())\n\t\t\t\t} else {\n\t\t\t\t\tfilePath = parentDirPath(inIPs[portName].Path())\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"p\":\n\t\t\tif params[portName] == \"\" {\n\t\t\t\tmsg := fmt.Sprint(\"Missing param value for param '\", portName, \"' for command '\", cmd, \"'\")\n\t\t\t\tFail(msg)\n\t\t\t} else {\n\t\t\t\tfilePath = params[portName]\n\t\t\t}\n\t\tcase \"t\":\n\t\t\tif tags[portName] == \"\" {\n\t\t\t\tmsg := fmt.Sprint(\"Missing tag value for tag '\", portName, \"' for command '\", cmd, \"'\")\n\t\t\t\tFail(msg)\n\t\t\t} else {\n\t\t\t\tfilePath = tags[portName]\n\t\t\t}\n\t\tdefault:\n\t\t\tFail(\"Replace failed for port \", portName, \" for command '\", cmd, \"'\")\n\t\t}\n\t\tcmd = strings.Replace(cmd, portInfo[\"match\"], filePath, -1)\n\t}\n\n\t\/\/ Add prepend string to the command\n\tif prepend != \"\" {\n\t\tcmd = fmt.Sprintf(\"%s %s\", prepend, cmd)\n\t}\n\n\treturn cmd\n}\n\n\/\/ ------------------------------------------------------------------------\n\/\/ Main API methods: Accessor methods\n\/\/ ------------------------------------------------------------------------\n\n\/\/ InIP returns an IP for the in-port with name portName\nfunc (t *Task) InIP(portName string) *FileIP {\n\tif t.InIPs[portName] == nil {\n\t\tFailf(\"No such in-portname (%s) in task (%s)\\n\", portName, t.Name)\n\t}\n\treturn t.InIPs[portName]\n}\n\n\/\/ InPath returns the path name of an input file for the task\nfunc (t *Task) InPath(portName string) string {\n\treturn t.InIP(portName).Path()\n}\n\n\/\/ OutIP returns an IP for the in-port with name portName\nfunc (t *Task) OutIP(portName string) *FileIP {\n\tif t.OutIPs[portName] == nil {\n\t\tFailf(\"No such out-portname (%s) in task (%s)\\n\", portName, t.Name)\n\t}\n\treturn t.OutIPs[portName]\n}\n\n\/\/ OutPath returns the path name of an input file for the task\nfunc (t *Task) OutPath(portName string) string {\n\treturn t.OutIP(portName).Path()\n}\n\n\/\/ Param returns the value of a param, for the task\nfunc (t *Task) Param(portName string) string {\n\tif param, ok := t.Params[portName]; ok {\n\t\treturn param\n\t}\n\tFailf(\"No such param port '%s' for task '%s'\\n\", portName, t.Name)\n\treturn \"invalid\"\n}\n\n\/\/ Tag returns the value of a param, for the task\nfunc (t *Task) Tag(tagName string) string {\n\tif tag, ok := t.Tags[tagName]; ok {\n\t\treturn tag\n\t}\n\tFailf(\"No such tag '%s' for task '%s'\\n\", tagName, t.Name)\n\treturn \"invalid\"\n}\n\n\/\/ ------------------------------------------------------------------------\n\/\/ Execute the task\n\/\/ ------------------------------------------------------------------------\n\n\/\/ Execute executes the task (the shell command or go function in CustomExecute)\nfunc (t *Task) Execute() {\n\tdefer close(t.Done)\n\n\t\/\/ Do some sanity checks\n\tif t.anyTempfileExists() {\n\t\tFailf(\"| %-32s | Existing temp files found so existing. Clean up .tmp files before restarting the workflow!\", t.Name)\n\t}\n\n\tif t.anyOutputsExist() {\n\t\tt.Done <- 1\n\t\treturn\n\t}\n\n\t\/\/ Execute task\n\tt.workflow.IncConcurrentTasks(t.cores) \/\/ Will block if max concurrent tasks is reached\n\tt.createDirs() \/\/ Create output directories needed for any outputs\n\tstartTime := time.Now()\n\tif t.CustomExecute != nil {\n\t\toutputsStr := \"\"\n\t\tfor oipName, oip := range t.OutIPs {\n\t\t\toutputsStr += \" \" + oipName + \": \" + oip.Path()\n\t\t}\n\t\tAudit.Printf(\"| %-32s | Executing: Custom Go function with outputs: %s\\n\", t.Name, outputsStr)\n\t\tt.CustomExecute(t)\n\t\tAudit.Printf(\"| %-32s | Finished: Custom Go function with outputs: %s\\n\", t.Name, outputsStr)\n\t} else {\n\t\tAudit.Printf(\"| %-32s | Executing: %s\\n\", t.Name, t.Command)\n\t\tt.executeCommand(t.Command)\n\t\tAudit.Printf(\"| %-32s | Finished: %s\\n\", t.Name, t.Command)\n\t}\n\tfinishTime := time.Now()\n\tt.writeAuditLogs(startTime, finishTime)\n\tt.atomizeIPs()\n\tt.workflow.DecConcurrentTasks(t.cores)\n\n\tt.Done <- 1\n}\n\n\/\/ ------------------------------------------------------------------------\n\/\/ Helper methods for the Execute method\n\/\/ ------------------------------------------------------------------------\n\n\/\/ anyTempFileExists checks if any temporary workflow files exist and if so, returns true\nfunc (t *Task) anyTempfileExists() (anyTempfileExists bool) {\n\tanyTempfileExists = false\n\tfor _, oip := range t.OutIPs {\n\t\tif !oip.doStream {\n\t\t\totmpPath := oip.TempPath()\n\t\t\tif _, err := os.Stat(otmpPath); err == nil {\n\t\t\t\tWarning.Printf(\"| %-32s | Temp file already exists: %s (Note: If resuming from a failed run, clean up .tmp files first. Also, make sure that two processes don't produce the same output files!).\\n\", t.Name, otmpPath)\n\t\t\t\tanyTempfileExists = true\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ anyOutputsExist if any output file IP, or temporary file IPs, exist\nfunc (t *Task) anyOutputsExist() (anyFileExists bool) {\n\tanyFileExists = false\n\tfor _, oip := range t.OutIPs {\n\t\tif !oip.doStream {\n\t\t\topath := oip.Path()\n\t\t\tif _, err := os.Stat(opath); err == nil {\n\t\t\t\tAudit.Printf(\"| %-32s | Output file already exists, so skipping: %s\\n\", t.Name, opath)\n\t\t\t\tanyFileExists = true\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ createDirs creates directories for out-IPs of the task\nfunc (t *Task) createDirs() {\n\tos.MkdirAll(t.TempDir(), 0777)\n\tfor _, oip := range t.OutIPs {\n\t\toipDir := oip.TempDir() \/\/ This will create all out dirs, including the temp dir\n\t\tif oip.doStream { \/\/ Temp dirs are not created for fifo files\n\t\t\toipDir = filepath.Dir(oip.FifoPath())\n\t\t} else {\n\t\t\toipDir = t.TempDir() + \"\/\" + oipDir\n\t\t}\n\t\terr := os.MkdirAll(oipDir, 0777)\n\t\tCheckWithMsg(err, \"Could not create directory: \"+oipDir)\n\t}\n\n}\n\n\/\/ executeCommand executes the shell command cmd via bash\nfunc (t *Task) executeCommand(cmd string) {\n\t\/\/ cd into the task's tempdir, execute the command, and cd back\n\tout, err := exec.Command(\"bash\", \"-c\", \"cd \"+t.TempDir()+\" && \"+cmd+\" && cd ..\").CombinedOutput()\n\tif err != nil {\n\t\tFailf(\"Command failed!\\nCommand:\\n%s\\n\\nOutput:\\n%s\\nOriginal error:%s\\n\", cmd, string(out), err.Error())\n\t}\n}\n\nfunc (t *Task) writeAuditLogs(startTime time.Time, finishTime time.Time) {\n\t\/\/ Append audit info for the task to all its output IPs\n\tauditInfo := NewAuditInfo()\n\tauditInfo.Command = t.Command\n\tauditInfo.ProcessName = t.process.Name()\n\tauditInfo.Params = t.Params\n\tauditInfo.StartTime = startTime\n\tauditInfo.FinishTime = finishTime\n\tauditInfo.ExecTimeMS = finishTime.Sub(startTime) \/ time.Millisecond\n\t\/\/ Set the audit infos from incoming IPs into the \"Upstream\" map\n\tfor _, iip := range t.InIPs {\n\t\tiipPath := iip.Path()\n\t\tiipAuditInfo := iip.AuditInfo()\n\t\tauditInfo.Upstream[iipPath] = iipAuditInfo\n\t}\n\t\/\/ Add the current audit info to output ips and write them to file\n\tfor _, oip := range t.OutIPs {\n\t\toip.SetAuditInfo(auditInfo)\n\t\tfor _, iip := range t.InIPs {\n\t\t\toip.AddTags(iip.Tags())\n\t\t}\n\t\toip.WriteAuditLogToFile()\n\t}\n}\n\n\/\/ atomizeIPs renames temporary output files\/directories to their proper paths\nfunc (t *Task) atomizeIPs() {\n\tfor _, oip := range t.OutIPs {\n\t\t\/\/ Move paths for ports, to final destinations\n\t\tif !oip.doStream {\n\t\t\tos.Rename(t.TempDir()+\"\/\"+oip.TempPath(), oip.Path())\n\t\t}\n\t}\n\t\/\/ For remaining paths in temporary execution dir, just move out of it\n\tfilepath.Walk(t.TempDir(), func(tempPath string, fileInfo os.FileInfo, err error) error {\n\t\tif !fileInfo.IsDir() {\n\t\t\tnewPath := strings.Replace(tempPath, t.TempDir()+\"\/\", \"\", 1)\n\t\t\tnewPath = strings.Replace(newPath, AbsPathPlaceholder+\"\/\", \"\/\", 1)\n\t\t\tnewPathDir := filepath.Dir(newPath)\n\t\t\tif _, err := os.Stat(newPathDir); os.IsNotExist(err) {\n\t\t\t\tos.MkdirAll(newPathDir, 0777)\n\t\t\t}\n\t\t\tDebug.Println(\"Moving: \", tempPath, \" -> \", newPath)\n\t\t\trenameErr := os.Rename(tempPath, newPath)\n\t\t\tCheckWithMsg(renameErr, \"Could not rename file \"+tempPath+\" to \"+newPath)\n\t\t}\n\t\treturn err\n\t})\n\t\/\/ Remove temporary execution dir\n\tremErr := os.RemoveAll(t.TempDir())\n\tCheckWithMsg(remErr, \"Could not remove temp dir: \"+t.TempDir())\n}\n\n\/\/ TempDir returns a string that is unique to a task, suitable for use\n\/\/ in file paths. It is built up by merging all input filenames and parameter\n\/\/ values that a task takes as input, joined with dots.\nfunc (t *Task) TempDir() string {\n\tpathPcs := []string{\"tmp.\" + t.Name}\n\tfor _, ipName := range sortedFileIPMapKeys(t.InIPs) {\n\t\tpathPcs = append(pathPcs, filepath.Base(t.InIP(ipName).Path()))\n\t}\n\tfor _, paramName := range sortedStringMapKeys(t.Params) {\n\t\tpathPcs = append(pathPcs, paramName+\"_\"+t.Param(paramName))\n\t}\n\tfor _, tagName := range sortedStringMapKeys(t.Tags) {\n\t\tpathPcs = append(pathPcs, tagName+\"_\"+t.Tag(tagName))\n\t}\n\tpathSegment := strings.Join(pathPcs, \".\")\n\tif len(pathSegment) > 255 {\n\t\tsha1sum := sha1.Sum([]byte(pathSegment))\n\t\tpathSegment = t.Name + \".\" + hex.EncodeToString(sha1sum[:])\n\t}\n\treturn pathSegment\n}\n\nfunc parentDirPath(path string) string {\n\tif path[0] == '\/' {\n\t\treturn path\n\t}\n\t\/\/ For relative paths, add \"..\" to get out of current dir\n\treturn \"..\/\" + path\n}\n<commit_msg>Fix bug: Should check for existing temp files with temp dir included<commit_after>package scipipe\n\nimport (\n\t\"crypto\/sha1\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ Task represents a single static shell command, or go function, to be\n\/\/ executed, and are scheduled and managed by a corresponding Process\ntype Task struct {\n\tName string\n\tCommand string\n\tCustomExecute func(*Task)\n\tInIPs map[string]*FileIP\n\tOutIPs map[string]*FileIP\n\tParams map[string]string\n\tTags map[string]string\n\tDone chan int\n\tcores int\n\tworkflow *Workflow\n\tprocess *Process\n}\n\n\/\/ ------------------------------------------------------------------------\n\/\/ Factory method(s)\n\/\/ ------------------------------------------------------------------------\n\n\/\/ NewTask instantiates and initializes a new Task\nfunc NewTask(workflow *Workflow, process *Process, name string, cmdPat string, inIPs map[string]*FileIP, outPathFuncs map[string]func(*Task) string, outPortsDoStream map[string]bool, params map[string]string, tags map[string]string, prepend string, customExecute func(*Task), cores int) *Task {\n\tt := &Task{\n\t\tName: name,\n\t\tInIPs: inIPs,\n\t\tOutIPs: make(map[string]*FileIP),\n\t\tParams: params,\n\t\tTags: tags,\n\t\tCommand: \"\",\n\t\tCustomExecute: customExecute,\n\t\tDone: make(chan int),\n\t\tcores: cores,\n\t\tworkflow: workflow,\n\t\tprocess: process,\n\t}\n\n\t\/\/ Create Out-IPs\n\tfor oname, outPathFunc := range outPathFuncs {\n\t\toip := NewFileIP(outPathFunc(t))\n\t\tif outPortsDoStream[oname] {\n\t\t\toip.doStream = true\n\t\t}\n\t\tt.OutIPs[oname] = oip\n\t}\n\tt.Command = formatCommand(cmdPat, inIPs, t.OutIPs, params, tags, prepend)\n\treturn t\n}\n\n\/\/ formatCommand is a helper function for NewTask, that formats a shell command\n\/\/ based on concrete file paths and parameter values\nfunc formatCommand(cmd string, inIPs map[string]*FileIP, outIPs map[string]*FileIP, params map[string]string, tags map[string]string, prepend string) string {\n\tr := getShellCommandPlaceHolderRegex()\n\tplaceHolderMatches := r.FindAllStringSubmatch(cmd, -1)\n\tportInfos := map[string]map[string]string{}\n\tfor _, placeHolderMatch := range placeHolderMatches {\n\t\tportName := placeHolderMatch[2]\n\t\tportInfos[portName] = map[string]string{}\n\t\tportInfos[portName][\"match\"] = placeHolderMatch[0]\n\t\tportInfos[portName][\"port_type\"] = placeHolderMatch[1]\n\t\tportInfos[portName][\"port_name\"] = portName\n\t\tportInfos[portName][\"reduce_inputs\"] = \"false\"\n\t\t\/\/ Identify if the place holder represents a reduce-type in-port\n\t\tif len(placeHolderMatch) > 5 {\n\t\t\tportInfos[portName][\"reduce_inputs\"] = \"true\"\n\t\t\tportInfos[portName][\"sep\"] = placeHolderMatch[7]\n\t\t}\n\t}\n\n\tfor portName, portInfo := range portInfos {\n\t\tvar filePath string\n\t\tswitch portInfo[\"port_type\"] {\n\t\tcase \"o\":\n\t\t\tif outIPs[portName] == nil {\n\t\t\t\tFail(\"Missing outpath for outport '\", portName, \"' for command '\", cmd, \"'\")\n\t\t\t}\n\t\t\tfilePath = outIPs[portName].TempPath()\n\t\tcase \"os\":\n\t\t\tif outIPs[portName] == nil {\n\t\t\t\tFail(\"Missing outpath for outport '\", portName, \"' for command '\", cmd, \"'\")\n\t\t\t}\n\t\t\tfilePath = outIPs[portName].FifoPath()\n\t\tcase \"i\":\n\t\t\tif inIPs[portName] == nil {\n\t\t\t\tFail(\"Missing in-IP for inport '\", portName, \"' for command '\", cmd, \"'\")\n\t\t\t}\n\t\t\tif portInfo[\"reduce_inputs\"] == \"true\" && inIPs[portName].Path() == \"\" {\n\t\t\t\t\/\/ Merge multiple input paths from a substream on the IP, into one string\n\t\t\t\tips := []*FileIP{}\n\t\t\t\tfor ip := range inIPs[portName].SubStream.Chan {\n\t\t\t\t\tips = append(ips, ip)\n\t\t\t\t}\n\t\t\t\tpaths := []string{}\n\t\t\t\tfor _, ip := range ips {\n\t\t\t\t\tpaths = append(paths, parentDirPath(ip.Path()))\n\t\t\t\t}\n\t\t\t\tfilePath = strings.Join(paths, portInfo[\"sep\"])\n\t\t\t} else {\n\t\t\t\tif inIPs[portName].Path() == \"\" {\n\t\t\t\t\tFail(\"Missing inpath for inport '\", portName, \"', and no substream, for command '\", cmd, \"'\")\n\t\t\t\t}\n\t\t\t\tif inIPs[portName].doStream {\n\t\t\t\t\tfilePath = parentDirPath(inIPs[portName].FifoPath())\n\t\t\t\t} else {\n\t\t\t\t\tfilePath = parentDirPath(inIPs[portName].Path())\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"p\":\n\t\t\tif params[portName] == \"\" {\n\t\t\t\tmsg := fmt.Sprint(\"Missing param value for param '\", portName, \"' for command '\", cmd, \"'\")\n\t\t\t\tFail(msg)\n\t\t\t} else {\n\t\t\t\tfilePath = params[portName]\n\t\t\t}\n\t\tcase \"t\":\n\t\t\tif tags[portName] == \"\" {\n\t\t\t\tmsg := fmt.Sprint(\"Missing tag value for tag '\", portName, \"' for command '\", cmd, \"'\")\n\t\t\t\tFail(msg)\n\t\t\t} else {\n\t\t\t\tfilePath = tags[portName]\n\t\t\t}\n\t\tdefault:\n\t\t\tFail(\"Replace failed for port \", portName, \" for command '\", cmd, \"'\")\n\t\t}\n\t\tcmd = strings.Replace(cmd, portInfo[\"match\"], filePath, -1)\n\t}\n\n\t\/\/ Add prepend string to the command\n\tif prepend != \"\" {\n\t\tcmd = fmt.Sprintf(\"%s %s\", prepend, cmd)\n\t}\n\n\treturn cmd\n}\n\n\/\/ ------------------------------------------------------------------------\n\/\/ Main API methods: Accessor methods\n\/\/ ------------------------------------------------------------------------\n\n\/\/ InIP returns an IP for the in-port with name portName\nfunc (t *Task) InIP(portName string) *FileIP {\n\tif t.InIPs[portName] == nil {\n\t\tFailf(\"No such in-portname (%s) in task (%s)\\n\", portName, t.Name)\n\t}\n\treturn t.InIPs[portName]\n}\n\n\/\/ InPath returns the path name of an input file for the task\nfunc (t *Task) InPath(portName string) string {\n\treturn t.InIP(portName).Path()\n}\n\n\/\/ OutIP returns an IP for the in-port with name portName\nfunc (t *Task) OutIP(portName string) *FileIP {\n\tif t.OutIPs[portName] == nil {\n\t\tFailf(\"No such out-portname (%s) in task (%s)\\n\", portName, t.Name)\n\t}\n\treturn t.OutIPs[portName]\n}\n\n\/\/ OutPath returns the path name of an input file for the task\nfunc (t *Task) OutPath(portName string) string {\n\treturn t.OutIP(portName).Path()\n}\n\n\/\/ Param returns the value of a param, for the task\nfunc (t *Task) Param(portName string) string {\n\tif param, ok := t.Params[portName]; ok {\n\t\treturn param\n\t}\n\tFailf(\"No such param port '%s' for task '%s'\\n\", portName, t.Name)\n\treturn \"invalid\"\n}\n\n\/\/ Tag returns the value of a param, for the task\nfunc (t *Task) Tag(tagName string) string {\n\tif tag, ok := t.Tags[tagName]; ok {\n\t\treturn tag\n\t}\n\tFailf(\"No such tag '%s' for task '%s'\\n\", tagName, t.Name)\n\treturn \"invalid\"\n}\n\n\/\/ ------------------------------------------------------------------------\n\/\/ Execute the task\n\/\/ ------------------------------------------------------------------------\n\n\/\/ Execute executes the task (the shell command or go function in CustomExecute)\nfunc (t *Task) Execute() {\n\tdefer close(t.Done)\n\n\t\/\/ Do some sanity checks\n\tif t.anyTempfileExists() {\n\t\tFailf(\"| %-32s | Existing temp files found so existing. Clean up .tmp files before restarting the workflow!\", t.Name)\n\t}\n\n\tif t.anyOutputsExist() {\n\t\tt.Done <- 1\n\t\treturn\n\t}\n\n\t\/\/ Execute task\n\tt.workflow.IncConcurrentTasks(t.cores) \/\/ Will block if max concurrent tasks is reached\n\tt.createDirs() \/\/ Create output directories needed for any outputs\n\tstartTime := time.Now()\n\tif t.CustomExecute != nil {\n\t\toutputsStr := \"\"\n\t\tfor oipName, oip := range t.OutIPs {\n\t\t\toutputsStr += \" \" + oipName + \": \" + oip.Path()\n\t\t}\n\t\tAudit.Printf(\"| %-32s | Executing: Custom Go function with outputs: %s\\n\", t.Name, outputsStr)\n\t\tt.CustomExecute(t)\n\t\tAudit.Printf(\"| %-32s | Finished: Custom Go function with outputs: %s\\n\", t.Name, outputsStr)\n\t} else {\n\t\tAudit.Printf(\"| %-32s | Executing: %s\\n\", t.Name, t.Command)\n\t\tt.executeCommand(t.Command)\n\t\tAudit.Printf(\"| %-32s | Finished: %s\\n\", t.Name, t.Command)\n\t}\n\tfinishTime := time.Now()\n\tt.writeAuditLogs(startTime, finishTime)\n\tt.atomizeIPs()\n\tt.workflow.DecConcurrentTasks(t.cores)\n\n\tt.Done <- 1\n}\n\n\/\/ ------------------------------------------------------------------------\n\/\/ Helper methods for the Execute method\n\/\/ ------------------------------------------------------------------------\n\n\/\/ anyTempFileExists checks if any temporary workflow files exist and if so, returns true\nfunc (t *Task) anyTempfileExists() (anyTempfileExists bool) {\n\tanyTempfileExists = false\n\tfor _, oip := range t.OutIPs {\n\t\tif !oip.doStream {\n\t\t\totmpPath := t.TempDir() + \"\/\" + oip.TempPath()\n\t\t\tif _, err := os.Stat(otmpPath); err == nil {\n\t\t\t\tWarning.Printf(\"| %-32s | Temp file already exists: %s (Note: If resuming from a failed run, clean up .tmp files first. Also, make sure that two processes don't produce the same output files!).\\n\", t.Name, otmpPath)\n\t\t\t\tanyTempfileExists = true\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ anyOutputsExist if any output file IP, or temporary file IPs, exist\nfunc (t *Task) anyOutputsExist() (anyFileExists bool) {\n\tanyFileExists = false\n\tfor _, oip := range t.OutIPs {\n\t\tif !oip.doStream {\n\t\t\topath := oip.Path()\n\t\t\tif _, err := os.Stat(opath); err == nil {\n\t\t\t\tAudit.Printf(\"| %-32s | Output file already exists, so skipping: %s\\n\", t.Name, opath)\n\t\t\t\tanyFileExists = true\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ createDirs creates directories for out-IPs of the task\nfunc (t *Task) createDirs() {\n\tos.MkdirAll(t.TempDir(), 0777)\n\tfor _, oip := range t.OutIPs {\n\t\toipDir := oip.TempDir() \/\/ This will create all out dirs, including the temp dir\n\t\tif oip.doStream { \/\/ Temp dirs are not created for fifo files\n\t\t\toipDir = filepath.Dir(oip.FifoPath())\n\t\t} else {\n\t\t\toipDir = t.TempDir() + \"\/\" + oipDir\n\t\t}\n\t\terr := os.MkdirAll(oipDir, 0777)\n\t\tCheckWithMsg(err, \"Could not create directory: \"+oipDir)\n\t}\n\n}\n\n\/\/ executeCommand executes the shell command cmd via bash\nfunc (t *Task) executeCommand(cmd string) {\n\t\/\/ cd into the task's tempdir, execute the command, and cd back\n\tout, err := exec.Command(\"bash\", \"-c\", \"cd \"+t.TempDir()+\" && \"+cmd+\" && cd ..\").CombinedOutput()\n\tif err != nil {\n\t\tFailf(\"Command failed!\\nCommand:\\n%s\\n\\nOutput:\\n%s\\nOriginal error:%s\\n\", cmd, string(out), err.Error())\n\t}\n}\n\nfunc (t *Task) writeAuditLogs(startTime time.Time, finishTime time.Time) {\n\t\/\/ Append audit info for the task to all its output IPs\n\tauditInfo := NewAuditInfo()\n\tauditInfo.Command = t.Command\n\tauditInfo.ProcessName = t.process.Name()\n\tauditInfo.Params = t.Params\n\tauditInfo.StartTime = startTime\n\tauditInfo.FinishTime = finishTime\n\tauditInfo.ExecTimeMS = finishTime.Sub(startTime) \/ time.Millisecond\n\t\/\/ Set the audit infos from incoming IPs into the \"Upstream\" map\n\tfor _, iip := range t.InIPs {\n\t\tiipPath := iip.Path()\n\t\tiipAuditInfo := iip.AuditInfo()\n\t\tauditInfo.Upstream[iipPath] = iipAuditInfo\n\t}\n\t\/\/ Add the current audit info to output ips and write them to file\n\tfor _, oip := range t.OutIPs {\n\t\toip.SetAuditInfo(auditInfo)\n\t\tfor _, iip := range t.InIPs {\n\t\t\toip.AddTags(iip.Tags())\n\t\t}\n\t\toip.WriteAuditLogToFile()\n\t}\n}\n\n\/\/ atomizeIPs renames temporary output files\/directories to their proper paths\nfunc (t *Task) atomizeIPs() {\n\tfor _, oip := range t.OutIPs {\n\t\t\/\/ Move paths for ports, to final destinations\n\t\tif !oip.doStream {\n\t\t\tos.Rename(t.TempDir()+\"\/\"+oip.TempPath(), oip.Path())\n\t\t}\n\t}\n\t\/\/ For remaining paths in temporary execution dir, just move out of it\n\tfilepath.Walk(t.TempDir(), func(tempPath string, fileInfo os.FileInfo, err error) error {\n\t\tif !fileInfo.IsDir() {\n\t\t\tnewPath := strings.Replace(tempPath, t.TempDir()+\"\/\", \"\", 1)\n\t\t\tnewPath = strings.Replace(newPath, AbsPathPlaceholder+\"\/\", \"\/\", 1)\n\t\t\tnewPathDir := filepath.Dir(newPath)\n\t\t\tif _, err := os.Stat(newPathDir); os.IsNotExist(err) {\n\t\t\t\tos.MkdirAll(newPathDir, 0777)\n\t\t\t}\n\t\t\tDebug.Println(\"Moving: \", tempPath, \" -> \", newPath)\n\t\t\trenameErr := os.Rename(tempPath, newPath)\n\t\t\tCheckWithMsg(renameErr, \"Could not rename file \"+tempPath+\" to \"+newPath)\n\t\t}\n\t\treturn err\n\t})\n\t\/\/ Remove temporary execution dir\n\tremErr := os.RemoveAll(t.TempDir())\n\tCheckWithMsg(remErr, \"Could not remove temp dir: \"+t.TempDir())\n}\n\n\/\/ TempDir returns a string that is unique to a task, suitable for use\n\/\/ in file paths. It is built up by merging all input filenames and parameter\n\/\/ values that a task takes as input, joined with dots.\nfunc (t *Task) TempDir() string {\n\tpathPcs := []string{\"tmp.\" + t.Name}\n\tfor _, ipName := range sortedFileIPMapKeys(t.InIPs) {\n\t\tpathPcs = append(pathPcs, filepath.Base(t.InIP(ipName).Path()))\n\t}\n\tfor _, paramName := range sortedStringMapKeys(t.Params) {\n\t\tpathPcs = append(pathPcs, paramName+\"_\"+t.Param(paramName))\n\t}\n\tfor _, tagName := range sortedStringMapKeys(t.Tags) {\n\t\tpathPcs = append(pathPcs, tagName+\"_\"+t.Tag(tagName))\n\t}\n\tpathSegment := strings.Join(pathPcs, \".\")\n\tif len(pathSegment) > 255 {\n\t\tsha1sum := sha1.Sum([]byte(pathSegment))\n\t\tpathSegment = t.Name + \".\" + hex.EncodeToString(sha1sum[:])\n\t}\n\treturn pathSegment\n}\n\nfunc parentDirPath(path string) string {\n\tif path[0] == '\/' {\n\t\treturn path\n\t}\n\t\/\/ For relative paths, add \"..\" to get out of current dir\n\treturn \"..\/\" + path\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/olekukonko\/tablewriter\"\n\tsf \"github.com\/snowflakedb\/gosnowflake\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ Specific for Snowflake db\nconst (\n\tloginTimeout = 5 * time.Second \/\/ by default is 60\n\tmultiStmtName = \"multiple statement execution\" \/\/ https:\/\/github.com\/snowflakedb\/gosnowflake\/blob\/e909f00ff624a7e60d4f91718f6adc92cbd0d80f\/connection.go#L57-L61\n)\n\ntype SnowFlakeTarget struct {\n\tTarget\n\tClient *sql.DB\n}\n\nfunc (sft SnowFlakeTarget) IsConnectable() bool {\n\tclient := sft.Client\n\terr := client.Ping()\n\treturn err == nil\n}\n\nfunc NewSnowflakeTarget(target Target) *SnowFlakeTarget {\n\t\/\/ Note: region connection parameter is deprecated\n\tvar region string\n\tif target.Region == \"us-west-1\" {\n\t\tregion = \"\"\n\t} else {\n\t\tregion = target.Region\n\t}\n\n\tconfigStr, err := sf.DSN(&sf.Config{\n\t\tRegion: region,\n\t\tAccount: target.Account,\n\t\tUser: target.Username,\n\t\tPassword: target.Password,\n\t\tDatabase: target.Database,\n\t\tWarehouse: target.Warehouse,\n\t\tLoginTimeout: loginTimeout,\n\t})\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdb, err := sql.Open(\"snowflake\", configStr)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn &SnowFlakeTarget{target, db}\n}\n\nfunc (sft SnowFlakeTarget) GetTarget() Target {\n\treturn sft.Target\n}\n\n\/\/ Run a query against the target\nfunc (sft SnowFlakeTarget) RunQuery(query ReadyQuery, dryRun bool, showQueryOutput bool) QueryStatus {\n\tvar affected int64 = 0\n\tvar err error\n\n\tif dryRun {\n\t\tif sft.IsConnectable() {\n\t\t\tlog.Printf(\"SUCCESS: Able to connect to target database, %s\\n.\", sft.Account)\n\t\t} else {\n\t\t\tlog.Printf(\"ERROR: Cannot connect to target database, %s\\n.\", sft.Account)\n\t\t}\n\n\t\treturn QueryStatus{query, query.Path, 0, nil}\n\t}\n\n\t\/\/ 0 allows arbitrary number of statements\n\tctx, _ := sf.WithMultiStatement(context.Background(), 0)\n\tscript := query.Script\n\n\tif len(strings.TrimSpace(script)) > 0 {\n\t\tif showQueryOutput {\n\t\t\trows, err := sft.Client.QueryContext(ctx, script)\n\t\t\tif err != nil {\n\t\t\t\treturn QueryStatus{query, query.Path, int(affected), err}\n\t\t\t}\n\t\t\tdefer rows.Close()\n\n\t\t\terr = printSfTable(rows)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"ERROR: %s.\", err)\n\t\t\t\treturn QueryStatus{query, query.Path, int(affected), err}\n\t\t\t}\n\n\t\t\tfor rows.NextResultSet() {\n\t\t\t\terr = printSfTable(rows)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"ERROR: %s.\", err)\n\t\t\t\t\treturn QueryStatus{query, query.Path, int(affected), err}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tres, err := sft.Client.ExecContext(ctx, script)\n\t\t\tif err != nil {\n\t\t\t\treturn QueryStatus{query, query.Path, int(affected), err}\n\t\t\t}\n\n\t\t\taff, _ := res.RowsAffected()\n\t\t\taffected += aff\n\t\t}\n\t}\n\n\treturn QueryStatus{query, query.Path, int(affected), err}\n}\n\nfunc printSfTable(rows *sql.Rows) error {\n\toutputBuffer := make([][]string, 0, 10)\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn errors.New(\"Unable to read columns\")\n\t}\n\n\t\/\/ check to prevent rows.Next() on multi-statement\n\t\/\/ see also: https:\/\/github.com\/snowflakedb\/gosnowflake\/issues\/365\n\tfor _, c := range cols {\n\t\tif c == multiStmtName {\n\t\t\treturn errors.New(\"Unable to showQueryOutput for multi-statement queries\")\n\t\t}\n\t}\n\n\tvals := make([]interface{}, len(cols))\n\tfor i := range cols {\n\t\tvals[i] = new(sql.RawBytes)\n\t}\n\n\tfor rows.Next() {\n\t\terr = rows.Scan(vals...)\n\t\tif err != nil {\n\t\t\treturn errors.New(\"Unable to read row\")\n\t\t}\n\n\t\tif len(vals) > 0 {\n\t\t\toutputBuffer = append(outputBuffer, stringify(vals))\n\t\t}\n\t}\n\n\tif len(outputBuffer) > 0 {\n\t\tlog.Printf(\"QUERY OUTPUT:\\n\")\n\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\ttable.SetHeader(cols)\n\t\ttable.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})\n\t\ttable.SetCenterSeparator(\"|\")\n\n\t\tfor _, row := range outputBuffer {\n\t\t\ttable.Append(row)\n\t\t}\n\n\t\ttable.Render() \/\/ Send output\n\t}\n\treturn nil\n}\n\nfunc stringify(row []interface{}) []string {\n\tvar line []string\n\tfor _, element := range row {\n\t\tline = append(line, fmt.Sprint(element))\n\t}\n\treturn line\n}\n<commit_msg>Snowflake: convert query output to string before printing (closes #166)<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/olekukonko\/tablewriter\"\n\tsf \"github.com\/snowflakedb\/gosnowflake\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ Specific for Snowflake db\nconst (\n\tloginTimeout = 5 * time.Second \/\/ by default is 60\n\tmultiStmtName = \"multiple statement execution\" \/\/ https:\/\/github.com\/snowflakedb\/gosnowflake\/blob\/e909f00ff624a7e60d4f91718f6adc92cbd0d80f\/connection.go#L57-L61\n)\n\ntype SnowFlakeTarget struct {\n\tTarget\n\tClient *sql.DB\n}\n\nfunc (sft SnowFlakeTarget) IsConnectable() bool {\n\tclient := sft.Client\n\terr := client.Ping()\n\treturn err == nil\n}\n\nfunc NewSnowflakeTarget(target Target) *SnowFlakeTarget {\n\t\/\/ Note: region connection parameter is deprecated\n\tvar region string\n\tif target.Region == \"us-west-1\" {\n\t\tregion = \"\"\n\t} else {\n\t\tregion = target.Region\n\t}\n\n\tconfigStr, err := sf.DSN(&sf.Config{\n\t\tRegion: region,\n\t\tAccount: target.Account,\n\t\tUser: target.Username,\n\t\tPassword: target.Password,\n\t\tDatabase: target.Database,\n\t\tWarehouse: target.Warehouse,\n\t\tLoginTimeout: loginTimeout,\n\t})\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdb, err := sql.Open(\"snowflake\", configStr)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn &SnowFlakeTarget{target, db}\n}\n\nfunc (sft SnowFlakeTarget) GetTarget() Target {\n\treturn sft.Target\n}\n\n\/\/ Run a query against the target\nfunc (sft SnowFlakeTarget) RunQuery(query ReadyQuery, dryRun bool, showQueryOutput bool) QueryStatus {\n\tvar affected int64 = 0\n\tvar err error\n\n\tif dryRun {\n\t\tif sft.IsConnectable() {\n\t\t\tlog.Printf(\"SUCCESS: Able to connect to target database, %s\\n.\", sft.Account)\n\t\t} else {\n\t\t\tlog.Printf(\"ERROR: Cannot connect to target database, %s\\n.\", sft.Account)\n\t\t}\n\n\t\treturn QueryStatus{query, query.Path, 0, nil}\n\t}\n\n\t\/\/ 0 allows arbitrary number of statements\n\tctx, _ := sf.WithMultiStatement(context.Background(), 0)\n\tscript := query.Script\n\n\tif len(strings.TrimSpace(script)) > 0 {\n\t\tif showQueryOutput {\n\t\t\trows, err := sft.Client.QueryContext(ctx, script)\n\t\t\tif err != nil {\n\t\t\t\treturn QueryStatus{query, query.Path, int(affected), err}\n\t\t\t}\n\t\t\tdefer rows.Close()\n\n\t\t\terr = printSfTable(rows)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"ERROR: %s.\", err)\n\t\t\t\treturn QueryStatus{query, query.Path, int(affected), err}\n\t\t\t}\n\n\t\t\tfor rows.NextResultSet() {\n\t\t\t\terr = printSfTable(rows)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"ERROR: %s.\", err)\n\t\t\t\t\treturn QueryStatus{query, query.Path, int(affected), err}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tres, err := sft.Client.ExecContext(ctx, script)\n\t\t\tif err != nil {\n\t\t\t\treturn QueryStatus{query, query.Path, int(affected), err}\n\t\t\t}\n\n\t\t\taff, _ := res.RowsAffected()\n\t\t\taffected += aff\n\t\t}\n\t}\n\n\treturn QueryStatus{query, query.Path, int(affected), err}\n}\n\nfunc printSfTable(rows *sql.Rows) error {\n\toutputBuffer := make([][]string, 0, 10)\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn errors.New(\"Unable to read columns\")\n\t}\n\n\t\/\/ check to prevent rows.Next() on multi-statement\n\t\/\/ see also: https:\/\/github.com\/snowflakedb\/gosnowflake\/issues\/365\n\tfor _, c := range cols {\n\t\tif c == multiStmtName {\n\t\t\treturn errors.New(\"Unable to showQueryOutput for multi-statement queries\")\n\t\t}\n\t}\n\n\tvals := make([]interface{}, len(cols))\n\trawResult := make([][]byte, len(cols))\n\tfor i := range rawResult {\n\t\tvals[i] = &rawResult[i]\n\t}\n\n\tfor rows.Next() {\n\t\terr = rows.Scan(vals...)\n\t\tif err != nil {\n\t\t\treturn errors.New(\"Unable to read row\")\n\t\t}\n\n\t\tif len(vals) > 0 {\n\t\t\toutputBuffer = append(outputBuffer, stringify(rawResult))\n\t\t}\n\t}\n\n\tif len(outputBuffer) > 0 {\n\t\tlog.Printf(\"QUERY OUTPUT:\\n\")\n\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\ttable.SetHeader(cols)\n\t\ttable.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})\n\t\ttable.SetCenterSeparator(\"|\")\n\n\t\tfor _, row := range outputBuffer {\n\t\t\ttable.Append(row)\n\t\t}\n\n\t\ttable.Render() \/\/ Send output\n\t}\n\treturn nil\n}\n\nfunc stringify(row [][]byte) []string {\n\tvar line []string\n\tfor _, element := range row {\n\t\tline = append(line, fmt.Sprint(string(element)))\n\t}\n\treturn line\n}\n<|endoftext|>"} {"text":"<commit_before>package hdf5\n\n\/\/ #include \"hdf5.h\"\n\/\/ #include \"hdf5_hl.h\"\n\/\/ #include <stdlib.h>\n\/\/ #include <string.h>\nimport \"C\"\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"unsafe\"\n)\n\n\/\/ Table is an hdf5 packet-table.\ntype Table struct {\n\tLocation\n}\n\nfunc newPacketTable(id C.hid_t) *Table {\n\tt := &Table{Location{Identifier{id}}}\n\truntime.SetFinalizer(t, (*Table).finalizer)\n\treturn t\n}\n\nfunc (t *Table) finalizer() {\n\tif err := t.Close(); err != nil {\n\t\tpanic(fmt.Errorf(\"error closing packet table: %s\", err))\n\t}\n}\n\n\/\/ Close closes an open packet table.\nfunc (t *Table) Close() error {\n\tif t.id > 0 {\n\t\terr := h5err(C.H5PTclose(t.id))\n\t\tif err != nil {\n\t\t\tt.id = 0\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ IsValid returns whether or not an indentifier points to a packet table.\nfunc (t *Table) IsValid() bool {\n\treturn C.H5PTis_valid(t.id) >= 0\n}\n\nfunc (t *Table) Id() int {\n\treturn int(t.id)\n}\n\n\/\/ ReadPackets reads a number of packets from a packet table.\nfunc (t *Table) ReadPackets(start, nrecords int, data interface{}) error {\n\tc_start := C.hsize_t(start)\n\tc_nrecords := C.size_t(nrecords)\n\trv := reflect.Indirect(reflect.ValueOf(data))\n\trt := rv.Type()\n\tc_data := unsafe.Pointer(nil)\n\tswitch rt.Kind() {\n\tcase reflect.Array:\n\t\tif rv.Len() < nrecords {\n\t\t\tpanic(fmt.Errorf(\"not enough capacity in array (cap=%d)\", rv.Len()))\n\t\t}\n\t\tc_data = unsafe.Pointer(rv.Index(0).UnsafeAddr())\n\n\tcase reflect.Slice:\n\t\tif rv.Len() < nrecords {\n\t\t\tpanic(fmt.Errorf(\"not enough capacity in slice (cap=%d)\", rv.Len()))\n\t\t}\n\t\tslice := (*reflect.SliceHeader)(unsafe.Pointer(rv.UnsafeAddr()))\n\t\tc_data = unsafe.Pointer(slice.Data)\n\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unhandled kind (%s), need slice or array\", rt.Kind()))\n\t}\n\terr := C.H5PTread_packets(t.id, c_start, c_nrecords, c_data)\n\treturn h5err(err)\n}\n\n\/\/ Append appends packets to the end of a packet table.\nfunc (t *Table) Append(data interface{}) error {\n\trv := reflect.Indirect(reflect.ValueOf(data))\n\trt := rv.Type()\n\tc_nrecords := C.size_t(0)\n\tc_data := unsafe.Pointer(nil)\n\n\tswitch rt.Kind() {\n\n\tcase reflect.Array:\n\t\tc_nrecords = C.size_t(rv.Len())\n\t\tc_data = unsafe.Pointer(rv.UnsafeAddr())\n\n\tcase reflect.Slice:\n\t\tc_nrecords = C.size_t(rv.Len())\n\t\tslice := (*reflect.SliceHeader)(unsafe.Pointer(rv.UnsafeAddr()))\n\t\tc_data = unsafe.Pointer(slice.Data)\n\n\tcase reflect.String:\n\t\tc_nrecords = C.size_t(rv.Len())\n\t\tstr := (*reflect.StringHeader)(unsafe.Pointer(rv.UnsafeAddr()))\n\t\tc_data = unsafe.Pointer(str.Data)\n\n\tcase reflect.Ptr:\n\t\tc_nrecords = C.size_t(1)\n\t\tc_data = unsafe.Pointer(rv.Elem().UnsafeAddr())\n\n\tdefault:\n\t\tc_nrecords = C.size_t(1)\n\t\tc_data = unsafe.Pointer(rv.UnsafeAddr())\n\t}\n\n\terr := C.H5PTappend(t.id, c_nrecords, c_data)\n\treturn h5err(err)\n}\n\n\/\/ Next reads packets from a packet table starting at the current index.\nfunc (t *Table) Next(data interface{}) error {\n\trt := reflect.TypeOf(data)\n\trv := reflect.ValueOf(data)\n\tn := C.size_t(0)\n\tcdata := unsafe.Pointer(nil)\n\tswitch rt.Kind() {\n\tcase reflect.Array:\n\t\tif rv.Cap() <= 0 {\n\t\t\tpanic(fmt.Errorf(\"not enough capacity in array (cap=%d)\", rv.Cap()))\n\t\t}\n\t\tcdata = unsafe.Pointer(rv.Index(0).UnsafeAddr())\n\t\tn = C.size_t(rv.Cap())\n\tcase reflect.Slice:\n\t\tif rv.Cap() <= 0 {\n\t\t\tpanic(fmt.Errorf(\"not enough capacity in slice (cap=%d)\", rv.Cap()))\n\t\t}\n\t\tcdata = unsafe.Pointer(rv.Index(0).UnsafeAddr())\n\t\tn = C.size_t(rv.Cap())\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unsupported kind (%s), need slice or array\", rt.Kind()))\n\t}\n\terr := C.H5PTget_next(t.id, n, cdata)\n\treturn h5err(err)\n}\n\n\/\/ NumPackets returns the number of packets in a packet table.\nfunc (t *Table) NumPackets() (int, error) {\n\tc_nrecords := C.hsize_t(0)\n\terr := C.H5PTget_num_packets(t.id, &c_nrecords)\n\treturn int(c_nrecords), h5err(err)\n}\n\n\/\/ CreateIndex resets a packet table's index to the first packet.\nfunc (t *Table) CreateIndex() error {\n\terr := C.H5PTcreate_index(t.id)\n\treturn h5err(err)\n}\n\n\/\/ SetIndex sets a packet table's index.\nfunc (t *Table) SetIndex(index int) error {\n\tc_idx := C.hsize_t(index)\n\terr := C.H5PTset_index(t.id, c_idx)\n\treturn h5err(err)\n}\n\n\/\/ Type returns an identifier for a copy of the datatype for a dataset.\nfunc (t *Table) Type() (*Datatype, error) {\n\thid := C.H5Dget_type(t.id)\n\tif err := checkID(hid); err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewDatatype(hid), nil\n}\n\nfunc createTable(id C.hid_t, name string, dtype *Datatype, chunkSize, compression int) (*Table, error) {\n\tc_name := C.CString(name)\n\tdefer C.free(unsafe.Pointer(c_name))\n\n\tchunk := C.hsize_t(chunkSize)\n\tcompr := C.int(compression)\n\thid := C.H5PTcreate_fl(id, c_name, dtype.id, chunk, compr)\n\tif err := checkID(hid); err != nil {\n\t\treturn nil, err\n\t}\n\treturn newPacketTable(hid), nil\n}\n\nfunc createTableFrom(id C.hid_t, name string, dtype interface{}, chunkSize, compression int) (*Table, error) {\n\tvar err error\n\tswitch dt := dtype.(type) {\n\tcase reflect.Type:\n\t\tif hdfDtype, err := NewDataTypeFromType(dt); err == nil {\n\t\t\treturn createTable(id, name, hdfDtype, chunkSize, compression)\n\t\t}\n\tcase *Datatype:\n\t\treturn createTable(id, name, dt, chunkSize, compression)\n\tdefault:\n\t\tif hdfDtype, err := NewDataTypeFromType(reflect.TypeOf(dtype)); err == nil {\n\t\t\treturn createTable(id, name, hdfDtype, chunkSize, compression)\n\t\t}\n\t}\n\treturn nil, err\n}\n\nfunc openTable(id C.hid_t, name string) (*Table, error) {\n\tc_name := C.CString(name)\n\tdefer C.free(unsafe.Pointer(c_name))\n\n\thid := C.H5PTopen(id, c_name)\n\tif err := checkID(hid); err != nil {\n\t\treturn nil, err\n\t}\n\treturn newPacketTable(hid), nil\n}\n<commit_msg>h5pt: enforce Table.Next to take a pointer to an array\/slice<commit_after>package hdf5\n\n\/\/ #include \"hdf5.h\"\n\/\/ #include \"hdf5_hl.h\"\n\/\/ #include <stdlib.h>\n\/\/ #include <string.h>\nimport \"C\"\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"unsafe\"\n)\n\n\/\/ Table is an hdf5 packet-table.\ntype Table struct {\n\tLocation\n}\n\nfunc newPacketTable(id C.hid_t) *Table {\n\tt := &Table{Location{Identifier{id}}}\n\truntime.SetFinalizer(t, (*Table).finalizer)\n\treturn t\n}\n\nfunc (t *Table) finalizer() {\n\tif err := t.Close(); err != nil {\n\t\tpanic(fmt.Errorf(\"error closing packet table: %s\", err))\n\t}\n}\n\n\/\/ Close closes an open packet table.\nfunc (t *Table) Close() error {\n\tif t.id > 0 {\n\t\terr := h5err(C.H5PTclose(t.id))\n\t\tif err != nil {\n\t\t\tt.id = 0\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ IsValid returns whether or not an indentifier points to a packet table.\nfunc (t *Table) IsValid() bool {\n\treturn C.H5PTis_valid(t.id) >= 0\n}\n\nfunc (t *Table) Id() int {\n\treturn int(t.id)\n}\n\n\/\/ ReadPackets reads a number of packets from a packet table.\nfunc (t *Table) ReadPackets(start, nrecords int, data interface{}) error {\n\tc_start := C.hsize_t(start)\n\tc_nrecords := C.size_t(nrecords)\n\trv := reflect.Indirect(reflect.ValueOf(data))\n\trt := rv.Type()\n\tc_data := unsafe.Pointer(nil)\n\tswitch rt.Kind() {\n\tcase reflect.Array:\n\t\tif rv.Len() < nrecords {\n\t\t\tpanic(fmt.Errorf(\"not enough capacity in array (cap=%d)\", rv.Len()))\n\t\t}\n\t\tc_data = unsafe.Pointer(rv.Index(0).UnsafeAddr())\n\n\tcase reflect.Slice:\n\t\tif rv.Len() < nrecords {\n\t\t\tpanic(fmt.Errorf(\"not enough capacity in slice (cap=%d)\", rv.Len()))\n\t\t}\n\t\tslice := (*reflect.SliceHeader)(unsafe.Pointer(rv.UnsafeAddr()))\n\t\tc_data = unsafe.Pointer(slice.Data)\n\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unhandled kind (%s), need slice or array\", rt.Kind()))\n\t}\n\terr := C.H5PTread_packets(t.id, c_start, c_nrecords, c_data)\n\treturn h5err(err)\n}\n\n\/\/ Append appends packets to the end of a packet table.\nfunc (t *Table) Append(data interface{}) error {\n\trv := reflect.Indirect(reflect.ValueOf(data))\n\trt := rv.Type()\n\tc_nrecords := C.size_t(0)\n\tc_data := unsafe.Pointer(nil)\n\n\tswitch rt.Kind() {\n\n\tcase reflect.Array:\n\t\tc_nrecords = C.size_t(rv.Len())\n\t\tc_data = unsafe.Pointer(rv.UnsafeAddr())\n\n\tcase reflect.Slice:\n\t\tc_nrecords = C.size_t(rv.Len())\n\t\tslice := (*reflect.SliceHeader)(unsafe.Pointer(rv.UnsafeAddr()))\n\t\tc_data = unsafe.Pointer(slice.Data)\n\n\tcase reflect.String:\n\t\tc_nrecords = C.size_t(rv.Len())\n\t\tstr := (*reflect.StringHeader)(unsafe.Pointer(rv.UnsafeAddr()))\n\t\tc_data = unsafe.Pointer(str.Data)\n\n\tcase reflect.Ptr:\n\t\tc_nrecords = C.size_t(1)\n\t\tc_data = unsafe.Pointer(rv.Elem().UnsafeAddr())\n\n\tdefault:\n\t\tc_nrecords = C.size_t(1)\n\t\tc_data = unsafe.Pointer(rv.UnsafeAddr())\n\t}\n\n\terr := C.H5PTappend(t.id, c_nrecords, c_data)\n\treturn h5err(err)\n}\n\n\/\/ Next reads packets from a packet table starting at the current index into the value pointed at by data.\n\/\/ i.e. data is a pointer to an array or a slice.\nfunc (t *Table) Next(data interface{}) error {\n\trt := reflect.TypeOf(data)\n\tif rt.Kind() != reflect.Ptr {\n\t\treturn fmt.Errorf(\"hdf5: invalid value type. got=%v, want pointer\", rt.Kind())\n\t}\n\trt = rt.Elem()\n\trv := reflect.Indirect(reflect.ValueOf(data))\n\n\tn := C.size_t(0)\n\tcdata := unsafe.Pointer(nil)\n\tswitch rt.Kind() {\n\tcase reflect.Array:\n\t\tif rv.Cap() <= 0 {\n\t\t\tpanic(fmt.Errorf(\"not enough capacity in array (cap=%d)\", rv.Cap()))\n\t\t}\n\t\tcdata = unsafe.Pointer(rv.UnsafeAddr())\n\t\tn = C.size_t(rv.Cap())\n\n\tcase reflect.Slice:\n\t\tif rv.Cap() <= 0 {\n\t\t\tpanic(fmt.Errorf(\"not enough capacity in slice (cap=%d)\", rv.Cap()))\n\t\t}\n\t\tslice := (*reflect.SliceHeader)(unsafe.Pointer(rv.UnsafeAddr()))\n\t\tcdata = unsafe.Pointer(slice.Data)\n\t\tn = C.size_t(rv.Cap())\n\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unsupported kind (%s), need slice or array\", rt.Kind()))\n\t}\n\terr := C.H5PTget_next(t.id, n, cdata)\n\treturn h5err(err)\n}\n\n\/\/ NumPackets returns the number of packets in a packet table.\nfunc (t *Table) NumPackets() (int, error) {\n\tc_nrecords := C.hsize_t(0)\n\terr := C.H5PTget_num_packets(t.id, &c_nrecords)\n\treturn int(c_nrecords), h5err(err)\n}\n\n\/\/ CreateIndex resets a packet table's index to the first packet.\nfunc (t *Table) CreateIndex() error {\n\terr := C.H5PTcreate_index(t.id)\n\treturn h5err(err)\n}\n\n\/\/ SetIndex sets a packet table's index.\nfunc (t *Table) SetIndex(index int) error {\n\tc_idx := C.hsize_t(index)\n\terr := C.H5PTset_index(t.id, c_idx)\n\treturn h5err(err)\n}\n\n\/\/ Type returns an identifier for a copy of the datatype for a dataset.\nfunc (t *Table) Type() (*Datatype, error) {\n\thid := C.H5Dget_type(t.id)\n\tif err := checkID(hid); err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewDatatype(hid), nil\n}\n\nfunc createTable(id C.hid_t, name string, dtype *Datatype, chunkSize, compression int) (*Table, error) {\n\tc_name := C.CString(name)\n\tdefer C.free(unsafe.Pointer(c_name))\n\n\tchunk := C.hsize_t(chunkSize)\n\tcompr := C.int(compression)\n\thid := C.H5PTcreate_fl(id, c_name, dtype.id, chunk, compr)\n\tif err := checkID(hid); err != nil {\n\t\treturn nil, err\n\t}\n\treturn newPacketTable(hid), nil\n}\n\nfunc createTableFrom(id C.hid_t, name string, dtype interface{}, chunkSize, compression int) (*Table, error) {\n\tvar err error\n\tswitch dt := dtype.(type) {\n\tcase reflect.Type:\n\t\tif hdfDtype, err := NewDataTypeFromType(dt); err == nil {\n\t\t\treturn createTable(id, name, hdfDtype, chunkSize, compression)\n\t\t}\n\tcase *Datatype:\n\t\treturn createTable(id, name, dt, chunkSize, compression)\n\tdefault:\n\t\tif hdfDtype, err := NewDataTypeFromType(reflect.TypeOf(dtype)); err == nil {\n\t\t\treturn createTable(id, name, hdfDtype, chunkSize, compression)\n\t\t}\n\t}\n\treturn nil, err\n}\n\nfunc openTable(id C.hid_t, name string) (*Table, error) {\n\tc_name := C.CString(name)\n\tdefer C.free(unsafe.Pointer(c_name))\n\n\thid := C.H5PTopen(id, c_name)\n\tif err := checkID(hid); err != nil {\n\t\treturn nil, err\n\t}\n\treturn newPacketTable(hid), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package task\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/go-task\/task\/v3\/internal\/logger\"\n\t\"github.com\/go-task\/task\/v3\/taskfile\"\n)\n\n\/\/ ListTasksWithDesc reports tasks that have a description spec.\nfunc (e *Executor) ListTasksWithDesc() {\n\te.printTasks(false)\n}\n\n\/\/ ListAllTasks reports all tasks, with or without a description spec.\nfunc (e *Executor) ListAllTasks() {\n\te.printTasks(true)\n}\n\nfunc (e *Executor) printTasks(listAll bool) {\n\tvar tasks []*taskfile.Task\n\tif listAll {\n\t\ttasks = e.allTaskNames()\n\t} else {\n\t\ttasks = e.tasksWithDesc()\n\t}\n\n\tif len(tasks) == 0 {\n\t\tif listAll {\n\t\t\te.Logger.Outf(logger.Yellow, \"task: No tasks available\")\n\t\t} else {\n\t\t\te.Logger.Outf(logger.Yellow, \"task: No tasks with description available. Try --list-all to list all tasks\")\n\t\t}\n\t\treturn\n\t}\n\te.Logger.Outf(logger.Default, \"task: Available tasks for this project:\")\n\n\t\/\/ Format in tab-separated columns with a tab stop of 8.\n\tw := tabwriter.NewWriter(e.Stdout, 0, 8, 0, '\\t', 0)\n\tfor _, task := range tasks {\n\t\tfmt.Fprintf(w, \"* %s: \\t%s\\n\", task.Name(), task.Desc)\n\t}\n\tw.Flush()\n}\n\nfunc (e *Executor) allTaskNames() (tasks []*taskfile.Task) {\n\ttasks = make([]*taskfile.Task, 0, len(e.Taskfile.Tasks))\n\tfor _, task := range e.Taskfile.Tasks {\n\t\tif !task.Internal {\n\t\t\ttasks = append(tasks, task)\n\t\t}\n\t}\n\tsort.Slice(tasks, func(i, j int) bool { return tasks[i].Task < tasks[j].Task })\n\treturn\n}\n\nfunc (e *Executor) tasksWithDesc() (tasks []*taskfile.Task) {\n\ttasks = make([]*taskfile.Task, 0, len(e.Taskfile.Tasks))\n\tfor _, task := range e.Taskfile.Tasks {\n\t\tif !task.Internal && task.Desc != \"\" {\n\t\t\tcompiledTask, err := e.FastCompiledTask(taskfile.Call{Task: task.Task})\n\t\t\tif err == nil {\n\t\t\t\ttask = compiledTask\n\t\t\t}\n\t\t\ttasks = append(tasks, task)\n\t\t}\n\t}\n\tsort.Slice(tasks, func(i, j int) bool { return tasks[i].Task < tasks[j].Task })\n\treturn\n}\n\n\/\/ PrintTaskNames prints only the task names in a Taskfile.\n\/\/ Only tasks with a non-empty description are printed if allTasks is false.\n\/\/ Otherwise, all task names are printed.\nfunc (e *Executor) ListTaskNames(allTasks bool) {\n\t\/\/ if called from cmd\/task.go, e.Taskfile has not yet been parsed\n\tif e.Taskfile == nil {\n\t\tif err := e.readTaskfile(); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\t}\n\t\/\/ use stdout if no output defined\n\tvar w io.Writer = os.Stdout\n\tif e.Stdout != nil {\n\t\tw = e.Stdout\n\t}\n\t\/\/ create a string slice from all map values (*taskfile.Task)\n\ts := make([]string, 0, len(e.Taskfile.Tasks))\n\tfor _, t := range e.Taskfile.Tasks {\n\t\tif (allTasks || t.Desc != \"\") && !t.Internal {\n\t\t\ts = append(s, strings.TrimRight(t.Task, \":\"))\n\t\t}\n\t}\n\t\/\/ sort and print all task names\n\tsort.Strings(s)\n\tfor _, t := range s {\n\t\tfmt.Fprintln(w, t)\n\t}\n}\n<commit_msg>Fix task names for `--list` and `--list-all`<commit_after>package task\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/go-task\/task\/v3\/internal\/logger\"\n\t\"github.com\/go-task\/task\/v3\/taskfile\"\n)\n\n\/\/ ListTasksWithDesc reports tasks that have a description spec.\nfunc (e *Executor) ListTasksWithDesc() {\n\te.printTasks(false)\n}\n\n\/\/ ListAllTasks reports all tasks, with or without a description spec.\nfunc (e *Executor) ListAllTasks() {\n\te.printTasks(true)\n}\n\nfunc (e *Executor) printTasks(listAll bool) {\n\tvar tasks []*taskfile.Task\n\tif listAll {\n\t\ttasks = e.allTaskNames()\n\t} else {\n\t\ttasks = e.tasksWithDesc()\n\t}\n\n\tif len(tasks) == 0 {\n\t\tif listAll {\n\t\t\te.Logger.Outf(logger.Yellow, \"task: No tasks available\")\n\t\t} else {\n\t\t\te.Logger.Outf(logger.Yellow, \"task: No tasks with description available. Try --list-all to list all tasks\")\n\t\t}\n\t\treturn\n\t}\n\te.Logger.Outf(logger.Default, \"task: Available tasks for this project:\")\n\n\t\/\/ Format in tab-separated columns with a tab stop of 8.\n\tw := tabwriter.NewWriter(e.Stdout, 0, 8, 0, '\\t', 0)\n\tfor _, task := range tasks {\n\t\tfmt.Fprintf(w, \"* %s: \\t%s\\n\", task.Task, task.Desc)\n\t}\n\tw.Flush()\n}\n\nfunc (e *Executor) allTaskNames() (tasks []*taskfile.Task) {\n\ttasks = make([]*taskfile.Task, 0, len(e.Taskfile.Tasks))\n\tfor _, task := range e.Taskfile.Tasks {\n\t\tif !task.Internal {\n\t\t\ttasks = append(tasks, task)\n\t\t}\n\t}\n\tsort.Slice(tasks, func(i, j int) bool { return tasks[i].Task < tasks[j].Task })\n\treturn\n}\n\nfunc (e *Executor) tasksWithDesc() (tasks []*taskfile.Task) {\n\ttasks = make([]*taskfile.Task, 0, len(e.Taskfile.Tasks))\n\tfor _, task := range e.Taskfile.Tasks {\n\t\tif !task.Internal && task.Desc != \"\" {\n\t\t\tcompiledTask, err := e.FastCompiledTask(taskfile.Call{Task: task.Task})\n\t\t\tif err == nil {\n\t\t\t\ttask = compiledTask\n\t\t\t}\n\t\t\ttasks = append(tasks, task)\n\t\t}\n\t}\n\tsort.Slice(tasks, func(i, j int) bool { return tasks[i].Task < tasks[j].Task })\n\treturn\n}\n\n\/\/ PrintTaskNames prints only the task names in a Taskfile.\n\/\/ Only tasks with a non-empty description are printed if allTasks is false.\n\/\/ Otherwise, all task names are printed.\nfunc (e *Executor) ListTaskNames(allTasks bool) {\n\t\/\/ if called from cmd\/task.go, e.Taskfile has not yet been parsed\n\tif e.Taskfile == nil {\n\t\tif err := e.readTaskfile(); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\t}\n\t\/\/ use stdout if no output defined\n\tvar w io.Writer = os.Stdout\n\tif e.Stdout != nil {\n\t\tw = e.Stdout\n\t}\n\t\/\/ create a string slice from all map values (*taskfile.Task)\n\ts := make([]string, 0, len(e.Taskfile.Tasks))\n\tfor _, t := range e.Taskfile.Tasks {\n\t\tif (allTasks || t.Desc != \"\") && !t.Internal {\n\t\t\ts = append(s, strings.TrimRight(t.Task, \":\"))\n\t\t}\n\t}\n\t\/\/ sort and print all task names\n\tsort.Strings(s)\n\tfor _, t := range s {\n\t\tfmt.Fprintln(w, t)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n)\n\nvar cmdTest = &Command{\n\tUsage: \"test (all | classname...)\",\n\tShort: \"Run apex tests\",\n\tLong: `\nRun apex tests\n\nTest Options\n -namespace=<namespace> Select namespace to run test from\n\nExamples:\n\n force test all\n force test Test1 Test2 Test3\n force test -namespace=ns Test4 \n`,\n}\n\nfunc init() {\n\tcmdTest.Run = runTests\n}\n\nvar (\n\tnamespaceTestFlag = cmdTest.Flag.String(\"namespace\", \"\", \"namespace to run tests in\")\n)\n\nfunc runTests(cmd *Command, args []string) {\n\tif len(args) < 1 {\n\t\tErrorAndExit(\"must specify tests to run\")\n\t}\n\tforce, _ := ActiveForce()\n\toutput, err := force.Partner.RunTests(args, *namespaceTestFlag)\n\tsuccess := false\n\tif err != nil {\n\t\tErrorAndExit(err.Error())\n\t} else {\n\t\t\/\/working on a better way to do this - catches when no class are found and ran\n\t\tif output.NumberRun == 0 {\n\t\t\tfmt.Println(\"Test classes specified not found\")\n\t\t} else {\n\t\t\tvar percent string\n\t\t\tfmt.Println(\"Coverage:\")\n\t\t\tfmt.Println()\n\t\t\tfor index := range output.NumberLocations {\n\t\t\t\tif output.NumberLocations[index] != 0 {\n\t\t\t\t\tpercent = strconv.Itoa(((output.NumberLocations[index]-output.NumberLocationsNotCovered[index])\/output.NumberLocations[index])*100) + \"%\"\n\t\t\t\t} else {\n\t\t\t\t\tpercent = \"0%\"\n\t\t\t\t}\n\t\t\t\tfmt.Println(\" \" + percent + \" \" + output.Name[index])\n\t\t\t}\n\t\t\tfmt.Println()\n\t\t\tfmt.Println()\n\t\t\tfmt.Println(\"Results:\")\n\t\t\tfmt.Println()\n\t\t\tfor index := range output.SMethodNames {\n\t\t\t\tfmt.Println(\" [PASS] \" + output.SClassNames[index] + \"::\" + output.SMethodNames[index])\n\t\t\t}\n\n\t\t\tfor index := range output.FMethodNames {\n\t\t\t\tfmt.Println(\" [FAIL] \" + output.FClassNames[index] + \"::\" + output.FMethodNames[index] + \": \" + output.FMessage[index])\n\t\t\t\tfmt.Println(\" \" + output.FStackTrace[index])\n\t\t\t}\n\t\t\tfmt.Println()\n\t\t\tfmt.Println()\n\n\t\t\tsuccess = len(output.FMethodNames) == 0\n\t\t}\n\n\t\t\/\/ Handle notifications\n\t\tnotifySuccess(\"test\", success)\n\t}\n}\n<commit_msg>Add verbose logging to test runs<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n)\n\nvar cmdTest = &Command{\n\tUsage: \"test (all | classname...)\",\n\tShort: \"Run apex tests\",\n\tLong: `\nRun apex tests\n\nTest Options\n -namespace=<namespace> Select namespace to run test from\n -v Verbose logging\n\nExamples:\n\n force test all\n force test Test1 Test2 Test3\n force test -namespace=ns Test4 \n force test -v Test1\n`,\n}\n\nfunc init() {\n\tcmdTest.Flag.BoolVar(&verboselogging, \"v\", false, \"set verbose logging\")\n\tcmdTest.Run = runTests\n}\n\nvar (\n\tnamespaceTestFlag = cmdTest.Flag.String(\"namespace\", \"\", \"namespace to run tests in\")\n\tverboselogging bool\n)\n\nfunc runTests(cmd *Command, args []string) {\n\tif len(args) < 1 {\n\t\tErrorAndExit(\"must specify tests to run\")\n\t}\n\tforce, _ := ActiveForce()\n\toutput, err := force.Partner.RunTests(args, *namespaceTestFlag)\n\tsuccess := false\n\tif err != nil {\n\t\tErrorAndExit(err.Error())\n\t} else {\n\n\t\tif verboselogging {\n\t\t\tfmt.Println(output)\n\t\t\tfmt.Println()\n\t\t}\n\t\t\/\/working on a better way to do this - catches when no class are found and ran\n\t\tif output.NumberRun == 0 {\n\t\t\tfmt.Println(\"Test classes specified not found\")\n\t\t} else {\n\t\t\tvar percent string\n\t\t\tfmt.Println(\"Coverage:\")\n\t\t\tfmt.Println()\n\t\t\tfor index := range output.NumberLocations {\n\t\t\t\tif output.NumberLocations[index] != 0 {\n\t\t\t\t\tpercent = strconv.Itoa(((output.NumberLocations[index]-output.NumberLocationsNotCovered[index])\/output.NumberLocations[index])*100) + \"%\"\n\t\t\t\t} else {\n\t\t\t\t\tpercent = \"0%\"\n\t\t\t\t}\n\t\t\t\tfmt.Println(\" \" + percent + \" \" + output.Name[index])\n\t\t\t}\n\t\t\tfmt.Println()\n\t\t\tfmt.Println()\n\t\t\tfmt.Println(\"Results:\")\n\t\t\tfmt.Println()\n\t\t\tfor index := range output.SMethodNames {\n\t\t\t\tfmt.Println(\" [PASS] \" + output.SClassNames[index] + \"::\" + output.SMethodNames[index])\n\t\t\t}\n\n\t\t\tfor index := range output.FMethodNames {\n\t\t\t\tfmt.Println(\" [FAIL] \" + output.FClassNames[index] + \"::\" + output.FMethodNames[index] + \": \" + output.FMessage[index])\n\t\t\t\tfmt.Println(\" \" + output.FStackTrace[index])\n\t\t\t}\n\t\t\tfmt.Println()\n\t\t\tfmt.Println()\n\n\t\t\tsuccess = len(output.FMethodNames) == 0\n\t\t}\n\n\t\t\/\/ Handle notifications\n\t\tnotifySuccess(\"test\", success)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package otp\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"math\"\n\t\"net\/url\"\n\t\"strconv\"\n)\n\n\/\/ Hotp is a hotp key\ntype Hotp struct {\n\t*otpKey\n\tCounter int\n}\n\n\/\/ NewHotp creates a new HOTP key\n\/\/ keyLen <= 0, defaults to 10\n\/\/ digits <= 0, defaults to 6\n\/\/ algorithm == \"\", defaults to \"sha1\"\nfunc NewHotp(keyLen int, label, issuer, algorithm string, digits, counter int) (*Hotp, error) {\n\tk, err := newOtpKey(keyLen, label, issuer, algorithm, digits)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Hotp{otpKey: k, Counter: counter}, nil\n}\n\n\/\/ NewHotpWithDefaults calls NewHotp with the default values\nfunc NewHotpWithDefaults(label, issuer string) (*Hotp, error) {\n\treturn NewHotp(0, label, issuer, \"\", 0, 0)\n}\n\nfunc importHotp(k *otpKey, params url.Values) (*Hotp, error) {\n\tr := &Hotp{otpKey: k}\n\t\/\/ verify counter\n\tctr := params.Get(\"counter\")\n\tif ctr == \"\" {\n\t\treturn nil, &Error{ECMissingCounter, \"counter parameter is missing\", nil}\n\t}\n\tif i, err := strconv.Atoi(ctr); err != nil {\n\t\treturn nil, &Error{ECInvalidCounter, fmt.Sprintf(\"invalid counter: %v\", ctr), nil}\n\t} else {\n\t\tr.Counter = i\n\t}\n\treturn r, nil\n}\n\n\/\/ ImportHotp imports an url in the otpauth format\nfunc ImportHotp(u string) (*Hotp, error) {\n\t\/\/ import key\n\tk, t, p, err := importOtpKey(u)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ check for HOTP\n\tif t != TypeHotp {\n\t\treturn nil, &Error{ECNotHotp, \"not a hotp key\", nil}\n\t}\n\treturn importHotp(k, p)\n}\n\n\/\/ Url returns the key in the otpauth format\nfunc (h *Hotp) Url() string {\n\treturn h.url(TypeHotp, url.Values{\"counter\": []string{strconv.Itoa(h.Counter)}})\n}\n\n\/\/ String returns the same as Url()\nfunc (h *Hotp) String() string { return h.Url() }\n\n\/\/ Code returns the current code\nfunc (h *Hotp) Code() int {\n\tc := h.hashTruncateInt(h.Counter)\n\treturn int(binary.BigEndian.Uint32(c)) % int(math.Pow10(h.Digits))\n}\n\n\/\/ Type returns \"hotp\"\nfunc (h *Hotp) Type() string { return TypeHotp }\n<commit_msg>added CodeCounter function<commit_after>package otp\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"math\"\n\t\"net\/url\"\n\t\"strconv\"\n)\n\n\/\/ Hotp is a hotp key\ntype Hotp struct {\n\t*otpKey\n\tCounter int\n}\n\n\/\/ NewHotp creates a new HOTP key\n\/\/ keyLen <= 0, defaults to 10\n\/\/ digits <= 0, defaults to 6\n\/\/ algorithm == \"\", defaults to \"sha1\"\nfunc NewHotp(keyLen int, label, issuer, algorithm string, digits, counter int) (*Hotp, error) {\n\tk, err := newOtpKey(keyLen, label, issuer, algorithm, digits)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Hotp{otpKey: k, Counter: counter}, nil\n}\n\n\/\/ NewHotpWithDefaults calls NewHotp with the default values\nfunc NewHotpWithDefaults(label, issuer string) (*Hotp, error) {\n\treturn NewHotp(0, label, issuer, \"\", 0, 0)\n}\n\nfunc importHotp(k *otpKey, params url.Values) (*Hotp, error) {\n\tr := &Hotp{otpKey: k}\n\t\/\/ verify counter\n\tctr := params.Get(\"counter\")\n\tif ctr == \"\" {\n\t\treturn nil, &Error{ECMissingCounter, \"counter parameter is missing\", nil}\n\t}\n\tif i, err := strconv.Atoi(ctr); err != nil {\n\t\treturn nil, &Error{ECInvalidCounter, fmt.Sprintf(\"invalid counter: %v\", ctr), nil}\n\t} else {\n\t\tr.Counter = i\n\t}\n\treturn r, nil\n}\n\n\/\/ ImportHotp imports an url in the otpauth format\nfunc ImportHotp(u string) (*Hotp, error) {\n\t\/\/ import key\n\tk, t, p, err := importOtpKey(u)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ check for HOTP\n\tif t != TypeHotp {\n\t\treturn nil, &Error{ECNotHotp, \"not a hotp key\", nil}\n\t}\n\treturn importHotp(k, p)\n}\n\n\/\/ Url returns the key in the otpauth format\nfunc (h *Hotp) Url() string {\n\treturn h.url(TypeHotp, url.Values{\"counter\": []string{strconv.Itoa(h.Counter)}})\n}\n\n\/\/ String returns the same as Url()\nfunc (h *Hotp) String() string { return h.Url() }\n\n\/\/ Code returns the current code\nfunc (h *Hotp) Code() int { return h.codeCounter(h.Counter) }\n\n\/\/ CodeCounter returns the code for the counter c\nfunc (h *Hotp) CodeCounter(c int) int { return h.codeCounter(c) }\n\n\/\/ returns the code for counter c\nfunc (h *Hotp) codeCounter(c int) int {\n\treturn int(binary.BigEndian.Uint32(h.hashTruncateInt(c))) % int(math.Pow10(h.Digits))\n}\n\n\/\/ Type returns \"hotp\"\nfunc (h *Hotp) Type() string { return TypeHotp }\n<|endoftext|>"} {"text":"<commit_before>package deploy\n\nimport \"testing\"\n\nfunc TestIsEncrypted(t *testing.T) {\n\ttests := []struct {\n\t\tinput string\n\t\twant bool\n\t}{\n\t\t{\n\t\t\t``,\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t`{}`,\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t`{\"cipher\"}`,\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t`{\"cipher\": \"hoge\"}`,\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\t`{\"cipher\":{}}`,\n\t\t\tfalse,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tgot := isEncrypted([]byte(test.input))\n\t\tif got != test.want {\n\t\t\tt.Fatalf(\"want %q, but %q:\", test.want, got)\n\t\t}\n\t}\n}\n<commit_msg>test case追加<commit_after>package deploy\n\nimport \"testing\"\n\nfunc TestIsEncrypted(t *testing.T) {\n\ttests := []struct {\n\t\tinput string\n\t\twant bool\n\t}{\n\t\t{\n\t\t\t``,\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t`{}`,\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t`{\"cipher\"}`,\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t`{\"cipher\": \"hoge\"}`,\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\t`{\"cipher\":{}}`,\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t`{\"hoge\":\"huga\"}`,\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t`{\"hoge\":\"huga\", \"cipher\": \"hoge\"}`,\n\t\t\ttrue,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tgot := isEncrypted([]byte(test.input))\n\t\tif got != test.want {\n\t\t\tt.Fatalf(\"want %q, but %q:\", test.want, got)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package htcat\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"sync\"\n)\n\nconst (\n\t_ = iota\n\tkB int64 = 1 << (10 * iota)\n\tmB\n\tgB\n\ttB\n\tpB\n\teB\n)\n\ntype HtCat struct {\n\tio.WriterTo\n\td defrag\n\tu *url.URL\n\tcl *http.Client\n\n\t\/\/ Protect httpFragGen with a Mutex.\n\thttpFragGenMu sync.Mutex\n\thfg httpFragGen\n}\n\ntype HttpStatusError struct {\n\terror\n\tStatus string\n}\n\nfunc (cat *HtCat) startup(parallelism int) {\n\treq := http.Request{\n\t\tMethod: \"GET\",\n\t\tURL: cat.u,\n\t\tProto: \"HTTP\/1.1\",\n\t\tProtoMajor: 1,\n\t\tProtoMinor: 1,\n\t\tBody: nil,\n\t\tHost: cat.u.Host,\n\t}\n\n\tresp, err := cat.cl.Do(&req)\n\tif err != nil {\n\t\tgo cat.d.cancel(err)\n\t\treturn\n\t}\n\n\t\/\/ Check for non-200 OK response codes from the startup-GET.\n\tif resp.Status != \"200 OK\" {\n\t\terr = HttpStatusError{\n\t\t\terror: fmt.Errorf(\n\t\t\t\t\"Expected HTTP Status 200, received: %q\",\n\t\t\t\tresp.Status),\n\t\t\tStatus: resp.Status}\n\t\tgo cat.d.cancel(err)\n\t\treturn\n\t}\n\n\tl := resp.Header.Get(\"Content-Length\")\n\n\t\/\/ Some kinds of small or indeterminate-length files will\n\t\/\/ receive no parallelism. This procedure helps prepare the\n\t\/\/ HtCat value for a one-HTTP-Request GET.\n\tnoParallel := func(wtc writerToCloser) {\n\t\tf := cat.d.nextFragment()\n\t\tcat.d.setLast(cat.d.lastAllocated())\n\t\tf.contents = wtc\n\t\tcat.d.register(f)\n\t}\n\n\tif l == \"\" {\n\t\t\/\/ No Content-Length, stream without parallelism nor\n\t\t\/\/ assumptions about the length of the stream.\n\t\tgo noParallel(struct {\n\t\t\tio.WriterTo\n\t\t\tio.Closer\n\t\t}{\n\t\t\tWriterTo: bufio.NewReader(resp.Body),\n\t\t\tCloser: resp.Body,\n\t\t})\n\t\treturn\n\t}\n\n\tlength, err := strconv.ParseInt(l, 10, 64)\n\tif err != nil {\n\t\t\/\/ Invalid integer for Content-Length, defer reporting\n\t\t\/\/ the error until a WriteTo call is made.\n\t\tgo cat.d.cancel(err)\n\t\treturn\n\t}\n\n\t\/\/ Set up httpFrag generator state.\n\tcat.hfg.totalSize = length\n\tcat.hfg.targetFragSize = length \/ int64(parallelism)\n\tif cat.hfg.targetFragSize > 20*mB {\n\t\tcat.hfg.targetFragSize = 20 * mB\n\t}\n\n\t\/\/ Very small fragments are probably not worthwhile to start\n\t\/\/ up new requests for, but it in this case it was possible to\n\t\/\/ ascertain the size, so take advantage of that to start\n\t\/\/ reading in the background as eagerly as possible.\n\tif cat.hfg.targetFragSize < 1*mB {\n\t\tcat.hfg.curPos = cat.hfg.totalSize\n\t\ter := newEagerReader(resp.Body, cat.hfg.totalSize)\n\t\tgo noParallel(er)\n\t\tgo er.WaitClosed()\n\t\treturn\n\t}\n\n\t\/\/ None of the other special short-circuit cases have been\n\t\/\/ triggered, so begin preparation for full-blown parallel\n\t\/\/ GET. One GET worker is started here to take advantage of\n\t\/\/ the already pending response (which has no determinate\n\t\/\/ length, so it must be limited).\n\thf := cat.nextFragment()\n\tgo func() {\n\t\ter := newEagerReader(\n\t\t\tstruct {\n\t\t\t\tio.Reader\n\t\t\t\tio.Closer\n\t\t\t}{\n\t\t\t\tReader: io.LimitReader(resp.Body, hf.size),\n\t\t\t\tCloser: resp.Body,\n\t\t\t},\n\t\t\thf.size)\n\n\t\thf.fragment.contents = er\n\t\tcat.d.register(hf.fragment)\n\t\ter.WaitClosed()\n\n\t\t\/\/ Chain into being a regular worker, having finished\n\t\t\/\/ the special start-up segment.\n\t\tcat.get()\n\t}()\n}\n\nfunc New(client *http.Client, u *url.URL, parallelism int) *HtCat {\n\tcat := HtCat{\n\t\tu: u,\n\t\tcl: client,\n\t}\n\n\tcat.d.initDefrag()\n\tcat.WriterTo = &cat.d\n\tcat.startup(parallelism)\n\n\tif cat.hfg.curPos == cat.hfg.totalSize {\n\t\treturn &cat\n\t}\n\n\t\/\/ Start background workers.\n\t\/\/\n\t\/\/ \"startup\" starts one worker that is specially constructed\n\t\/\/ to deal with the first request, so back off by one to\n\t\/\/ prevent performing with too much parallelism.\n\tfor i := 1; i < parallelism; i += 1 {\n\t\tgo cat.get()\n\t}\n\n\treturn &cat\n}\n\nfunc (cat *HtCat) nextFragment() *httpFrag {\n\tcat.httpFragGenMu.Lock()\n\tdefer cat.httpFragGenMu.Unlock()\n\n\tvar hf *httpFrag\n\n\tif cat.hfg.hasNext() {\n\t\tf := cat.d.nextFragment()\n\t\thf = cat.hfg.nextFragment(f)\n\t} else {\n\t\tcat.d.setLast(cat.d.lastAllocated())\n\t}\n\n\treturn hf\n}\n\nfunc (cat *HtCat) get() {\n\tfor {\n\t\thf := cat.nextFragment()\n\t\tif hf == nil {\n\t\t\treturn\n\t\t}\n\n\t\treq := http.Request{\n\t\t\tMethod: \"GET\",\n\t\t\tURL: cat.u,\n\t\t\tProto: \"HTTP\/1.1\",\n\t\t\tProtoMajor: 1,\n\t\t\tProtoMinor: 1,\n\t\t\tHeader: hf.header,\n\t\t\tBody: nil,\n\t\t\tHost: cat.u.Host,\n\t\t}\n\n\t\tresp, err := cat.cl.Do(&req)\n\t\tif err != nil {\n\t\t\tcat.d.cancel(err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Check for an acceptable HTTP status code.\n\t\tif !(resp.Status == \"206 Partial Content\" ||\n\t\t\tresp.Status == \"200 OK\") {\n\t\t\terr = HttpStatusError{\n\t\t\t\terror: fmt.Errorf(\"Expected HTTP Status \"+\n\t\t\t\t\t\"206 or 200, received: %q\",\n\t\t\t\t\tresp.Status),\n\t\t\t\tStatus: resp.Status}\n\t\t\tgo cat.d.cancel(err)\n\t\t\treturn\n\t\t}\n\n\t\ter := newEagerReader(resp.Body, hf.size)\n\t\thf.fragment.contents = er\n\t\tcat.d.register(hf.fragment)\n\t\ter.WaitClosed()\n\t}\n}\n<commit_msg>Round up target fragment size to avoid tiny tail-end request<commit_after>package htcat\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"sync\"\n)\n\nconst (\n\t_ = iota\n\tkB int64 = 1 << (10 * iota)\n\tmB\n\tgB\n\ttB\n\tpB\n\teB\n)\n\ntype HtCat struct {\n\tio.WriterTo\n\td defrag\n\tu *url.URL\n\tcl *http.Client\n\n\t\/\/ Protect httpFragGen with a Mutex.\n\thttpFragGenMu sync.Mutex\n\thfg httpFragGen\n}\n\ntype HttpStatusError struct {\n\terror\n\tStatus string\n}\n\nfunc (cat *HtCat) startup(parallelism int) {\n\treq := http.Request{\n\t\tMethod: \"GET\",\n\t\tURL: cat.u,\n\t\tProto: \"HTTP\/1.1\",\n\t\tProtoMajor: 1,\n\t\tProtoMinor: 1,\n\t\tBody: nil,\n\t\tHost: cat.u.Host,\n\t}\n\n\tresp, err := cat.cl.Do(&req)\n\tif err != nil {\n\t\tgo cat.d.cancel(err)\n\t\treturn\n\t}\n\n\t\/\/ Check for non-200 OK response codes from the startup-GET.\n\tif resp.Status != \"200 OK\" {\n\t\terr = HttpStatusError{\n\t\t\terror: fmt.Errorf(\n\t\t\t\t\"Expected HTTP Status 200, received: %q\",\n\t\t\t\tresp.Status),\n\t\t\tStatus: resp.Status}\n\t\tgo cat.d.cancel(err)\n\t\treturn\n\t}\n\n\tl := resp.Header.Get(\"Content-Length\")\n\n\t\/\/ Some kinds of small or indeterminate-length files will\n\t\/\/ receive no parallelism. This procedure helps prepare the\n\t\/\/ HtCat value for a one-HTTP-Request GET.\n\tnoParallel := func(wtc writerToCloser) {\n\t\tf := cat.d.nextFragment()\n\t\tcat.d.setLast(cat.d.lastAllocated())\n\t\tf.contents = wtc\n\t\tcat.d.register(f)\n\t}\n\n\tif l == \"\" {\n\t\t\/\/ No Content-Length, stream without parallelism nor\n\t\t\/\/ assumptions about the length of the stream.\n\t\tgo noParallel(struct {\n\t\t\tio.WriterTo\n\t\t\tio.Closer\n\t\t}{\n\t\t\tWriterTo: bufio.NewReader(resp.Body),\n\t\t\tCloser: resp.Body,\n\t\t})\n\t\treturn\n\t}\n\n\tlength, err := strconv.ParseInt(l, 10, 64)\n\tif err != nil {\n\t\t\/\/ Invalid integer for Content-Length, defer reporting\n\t\t\/\/ the error until a WriteTo call is made.\n\t\tgo cat.d.cancel(err)\n\t\treturn\n\t}\n\n\t\/\/ Set up httpFrag generator state.\n\tcat.hfg.totalSize = length\n\tcat.hfg.targetFragSize = 1 + ((length - 1) \/ int64(parallelism))\n\tif cat.hfg.targetFragSize > 20*mB {\n\t\tcat.hfg.targetFragSize = 20 * mB\n\t}\n\n\t\/\/ Very small fragments are probably not worthwhile to start\n\t\/\/ up new requests for, but it in this case it was possible to\n\t\/\/ ascertain the size, so take advantage of that to start\n\t\/\/ reading in the background as eagerly as possible.\n\tif cat.hfg.targetFragSize < 1*mB {\n\t\tcat.hfg.curPos = cat.hfg.totalSize\n\t\ter := newEagerReader(resp.Body, cat.hfg.totalSize)\n\t\tgo noParallel(er)\n\t\tgo er.WaitClosed()\n\t\treturn\n\t}\n\n\t\/\/ None of the other special short-circuit cases have been\n\t\/\/ triggered, so begin preparation for full-blown parallel\n\t\/\/ GET. One GET worker is started here to take advantage of\n\t\/\/ the already pending response (which has no determinate\n\t\/\/ length, so it must be limited).\n\thf := cat.nextFragment()\n\tgo func() {\n\t\ter := newEagerReader(\n\t\t\tstruct {\n\t\t\t\tio.Reader\n\t\t\t\tio.Closer\n\t\t\t}{\n\t\t\t\tReader: io.LimitReader(resp.Body, hf.size),\n\t\t\t\tCloser: resp.Body,\n\t\t\t},\n\t\t\thf.size)\n\n\t\thf.fragment.contents = er\n\t\tcat.d.register(hf.fragment)\n\t\ter.WaitClosed()\n\n\t\t\/\/ Chain into being a regular worker, having finished\n\t\t\/\/ the special start-up segment.\n\t\tcat.get()\n\t}()\n}\n\nfunc New(client *http.Client, u *url.URL, parallelism int) *HtCat {\n\tcat := HtCat{\n\t\tu: u,\n\t\tcl: client,\n\t}\n\n\tcat.d.initDefrag()\n\tcat.WriterTo = &cat.d\n\tcat.startup(parallelism)\n\n\tif cat.hfg.curPos == cat.hfg.totalSize {\n\t\treturn &cat\n\t}\n\n\t\/\/ Start background workers.\n\t\/\/\n\t\/\/ \"startup\" starts one worker that is specially constructed\n\t\/\/ to deal with the first request, so back off by one to\n\t\/\/ prevent performing with too much parallelism.\n\tfor i := 1; i < parallelism; i += 1 {\n\t\tgo cat.get()\n\t}\n\n\treturn &cat\n}\n\nfunc (cat *HtCat) nextFragment() *httpFrag {\n\tcat.httpFragGenMu.Lock()\n\tdefer cat.httpFragGenMu.Unlock()\n\n\tvar hf *httpFrag\n\n\tif cat.hfg.hasNext() {\n\t\tf := cat.d.nextFragment()\n\t\thf = cat.hfg.nextFragment(f)\n\t} else {\n\t\tcat.d.setLast(cat.d.lastAllocated())\n\t}\n\n\treturn hf\n}\n\nfunc (cat *HtCat) get() {\n\tfor {\n\t\thf := cat.nextFragment()\n\t\tif hf == nil {\n\t\t\treturn\n\t\t}\n\n\t\treq := http.Request{\n\t\t\tMethod: \"GET\",\n\t\t\tURL: cat.u,\n\t\t\tProto: \"HTTP\/1.1\",\n\t\t\tProtoMajor: 1,\n\t\t\tProtoMinor: 1,\n\t\t\tHeader: hf.header,\n\t\t\tBody: nil,\n\t\t\tHost: cat.u.Host,\n\t\t}\n\n\t\tresp, err := cat.cl.Do(&req)\n\t\tif err != nil {\n\t\t\tcat.d.cancel(err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Check for an acceptable HTTP status code.\n\t\tif !(resp.Status == \"206 Partial Content\" ||\n\t\t\tresp.Status == \"200 OK\") {\n\t\t\terr = HttpStatusError{\n\t\t\t\terror: fmt.Errorf(\"Expected HTTP Status \"+\n\t\t\t\t\t\"206 or 200, received: %q\",\n\t\t\t\t\tresp.Status),\n\t\t\t\tStatus: resp.Status}\n\t\t\tgo cat.d.cancel(err)\n\t\t\treturn\n\t\t}\n\n\t\ter := newEagerReader(resp.Body, hf.size)\n\t\thf.fragment.contents = er\n\t\tcat.d.register(hf.fragment)\n\t\ter.WaitClosed()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2012-2016 The Revel Framework Authors, All rights reserved.\n\/\/ Revel Framework source code and usage is governed by a MIT style\n\/\/ license that can be found in the LICENSE file.\n\npackage revel\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/websocket\"\n)\n\n\/\/ Request Revel's HTTP request object structure\ntype Request struct {\n\t*http.Request\n\tContentType string\n\tFormat string \/\/ \"html\", \"xml\", \"json\", or \"txt\"\n\tAcceptLanguages AcceptLanguages\n\tLocale string\n\tWebsocket *websocket.Conn\n}\n\n\/\/ Response Revel's HTTP response object structure\ntype Response struct {\n\tStatus int\n\tContentType string\n\n\tOut http.ResponseWriter\n}\n\n\/\/ NewResponse returns a Revel's HTTP response instance with given instance\nfunc NewResponse(w http.ResponseWriter) *Response {\n\treturn &Response{Out: w}\n}\n\n\/\/ NewRequest returns a Revel's HTTP request instance with given HTTP instance\nfunc NewRequest(r *http.Request) *Request {\n\treturn &Request{\n\t\tRequest: r,\n\t\tContentType: ResolveContentType(r),\n\t\tFormat: ResolveFormat(r),\n\t\tAcceptLanguages: ResolveAcceptLanguage(r),\n\t}\n}\n\n\/\/ WriteHeader writes the header (for now, just the status code).\n\/\/ The status may be set directly by the application (c.Response.Status = 501).\n\/\/ if it isn't, then fall back to the provided status code.\nfunc (resp *Response) WriteHeader(defaultStatusCode int, defaultContentType string) {\n\tif resp.Status == 0 {\n\t\tresp.Status = defaultStatusCode\n\t}\n\tif resp.ContentType == \"\" {\n\t\tresp.ContentType = defaultContentType\n\t}\n\tresp.Out.Header().Set(\"Content-Type\", resp.ContentType)\n\tresp.Out.WriteHeader(resp.Status)\n}\n\n\/\/ ResolveContentType gets the content type.\n\/\/ e.g. From \"multipart\/form-data; boundary=--\" to \"multipart\/form-data\"\n\/\/ If none is specified, returns \"text\/html\" by default.\nfunc ResolveContentType(req *http.Request) string {\n\tcontentType := req.Header.Get(\"Content-Type\")\n\tif contentType == \"\" {\n\t\treturn \"text\/html\"\n\t}\n\treturn strings.ToLower(strings.TrimSpace(strings.Split(contentType, \";\")[0]))\n}\n\n\/\/ ResolveFormat maps the request's Accept MIME type declaration to\n\/\/ a Request.Format attribute, specifically \"html\", \"xml\", \"json\", or \"txt\",\n\/\/ returning a default of \"html\" when Accept header cannot be mapped to a\n\/\/ value above.\nfunc ResolveFormat(req *http.Request) string {\n\taccept := req.Header.Get(\"accept\")\n\n\tswitch {\n\tcase accept == \"\",\n\t\tstrings.HasPrefix(accept, \"*\/*\"), \/\/ *\/\n\t\tstrings.Contains(accept, \"application\/xhtml\"),\n\t\tstrings.Contains(accept, \"text\/html\"):\n\t\treturn \"html\"\n\tcase strings.Contains(accept, \"application\/json\"),\n\t\tstrings.Contains(accept, \"text\/javascript\"),\n\t\tstrings.Contains(accept, \"application\/javascript\"):\n\t\treturn \"json\"\n\tcase strings.Contains(accept, \"application\/xml\"),\n\t\tstrings.Contains(accept, \"text\/xml\"):\n\t\treturn \"xml\"\n\tcase strings.Contains(accept, \"text\/plain\"):\n\t\treturn \"txt\"\n\t}\n\n\treturn \"html\"\n}\n\n\/\/ AcceptLanguage is a single language from the Accept-Language HTTP header.\ntype AcceptLanguage struct {\n\tLanguage string\n\tQuality float32\n}\n\n\/\/ AcceptLanguages is collection of sortable AcceptLanguage instances.\ntype AcceptLanguages []AcceptLanguage\n\nfunc (al AcceptLanguages) Len() int { return len(al) }\nfunc (al AcceptLanguages) Swap(i, j int) { al[i], al[j] = al[j], al[i] }\nfunc (al AcceptLanguages) Less(i, j int) bool { return al[i].Quality > al[j].Quality }\nfunc (al AcceptLanguages) String() string {\n\toutput := bytes.NewBufferString(\"\")\n\tfor i, language := range al {\n\t\tif _, err := output.WriteString(fmt.Sprintf(\"%s (%1.1f)\", language.Language, language.Quality)); err != nil {\n\t\t\tERROR.Println(\"WriteString failed:\", err)\n\t\t}\n\t\tif i != len(al)-1 {\n\t\t\tif _, err := output.WriteString(\", \"); err != nil {\n\t\t\t\tERROR.Println(\"WriteString failed:\", err)\n\t\t\t}\n\t\t}\n\t}\n\treturn output.String()\n}\n\n\/\/ ResolveAcceptLanguage returns a sorted list of Accept-Language\n\/\/ header values.\n\/\/\n\/\/ The results are sorted using the quality defined in the header for each\n\/\/ language range with the most qualified language range as the first\n\/\/ element in the slice.\n\/\/\n\/\/ See the HTTP header fields specification\n\/\/ (http:\/\/www.w3.org\/Protocols\/rfc2616\/rfc2616-sec14.html#sec14.4) for more details.\nfunc ResolveAcceptLanguage(req *http.Request) AcceptLanguages {\n\theader := req.Header.Get(\"Accept-Language\")\n\tif header == \"\" {\n\t\treturn nil\n\t}\n\n\tacceptLanguageHeaderValues := strings.Split(header, \",\")\n\tacceptLanguages := make(AcceptLanguages, len(acceptLanguageHeaderValues))\n\n\tfor i, languageRange := range acceptLanguageHeaderValues {\n\t\tif qualifiedRange := strings.Split(languageRange, \";q=\"); len(qualifiedRange) == 2 {\n\t\t\tquality, error := strconv.ParseFloat(qualifiedRange[1], 32)\n\t\t\tif error != nil {\n\t\t\t\tWARN.Printf(\"Detected malformed Accept-Language header quality in '%s', assuming quality is 1\", languageRange)\n\t\t\t\tacceptLanguages[i] = AcceptLanguage{qualifiedRange[0], 1}\n\t\t\t} else {\n\t\t\t\tacceptLanguages[i] = AcceptLanguage{qualifiedRange[0], float32(quality)}\n\t\t\t}\n\t\t} else {\n\t\t\tacceptLanguages[i] = AcceptLanguage{languageRange, 1}\n\t\t}\n\t}\n\n\tsort.Sort(acceptLanguages)\n\treturn acceptLanguages\n}\n<commit_msg>Based on the \"extension\" of the requested path, returns an appropriate content type<commit_after>\/\/ Copyright (c) 2012-2016 The Revel Framework Authors, All rights reserved.\n\/\/ Revel Framework source code and usage is governed by a MIT style\n\/\/ license that can be found in the LICENSE file.\n\npackage revel\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/websocket\"\n\t\"path\/filepath\"\n)\n\n\/\/ Request Revel's HTTP request object structure\ntype Request struct {\n\t*http.Request\n\tContentType string\n\tFormat string \/\/ \"html\", \"xml\", \"json\", or \"txt\"\n\tAcceptLanguages AcceptLanguages\n\tLocale string\n\tWebsocket *websocket.Conn\n}\n\n\/\/ Response Revel's HTTP response object structure\ntype Response struct {\n\tStatus int\n\tContentType string\n\n\tOut http.ResponseWriter\n}\n\n\/\/ NewResponse returns a Revel's HTTP response instance with given instance\nfunc NewResponse(w http.ResponseWriter) *Response {\n\treturn &Response{Out: w}\n}\n\n\/\/ NewRequest returns a Revel's HTTP request instance with given HTTP instance\nfunc NewRequest(r *http.Request) *Request {\n\treturn &Request{\n\t\tRequest: r,\n\t\tContentType: ResolveContentType(r),\n\t\tFormat: ResolveFormat(r),\n\t\tAcceptLanguages: ResolveAcceptLanguage(r),\n\t}\n}\n\n\/\/ WriteHeader writes the header (for now, just the status code).\n\/\/ The status may be set directly by the application (c.Response.Status = 501).\n\/\/ if it isn't, then fall back to the provided status code.\nfunc (resp *Response) WriteHeader(defaultStatusCode int, defaultContentType string) {\n\tif resp.Status == 0 {\n\t\tresp.Status = defaultStatusCode\n\t}\n\tif resp.ContentType == \"\" {\n\t\tresp.ContentType = defaultContentType\n\t}\n\tresp.Out.Header().Set(\"Content-Type\", resp.ContentType)\n\tresp.Out.WriteHeader(resp.Status)\n}\n\n\/\/ ResolveContentType gets the content type.\n\/\/ e.g. From \"multipart\/form-data; boundary=--\" to \"multipart\/form-data\"\n\/\/ If none is specified, returns \"text\/html\" by default.\nfunc ResolveContentType(req *http.Request) string {\n\tcontentType := req.Header.Get(\"Content-Type\")\n\tif contentType == \"\" {\n\t\treturn \"text\/html\"\n\t}\n\treturn strings.ToLower(strings.TrimSpace(strings.Split(contentType, \";\")[0]))\n}\n\n\/\/ ResolveFormat maps the request's Accept MIME type declaration to\n\/\/ a Request.Format attribute, specifically \"html\", \"xml\", \"json\", or \"txt\",\n\/\/ returning a default of \"html\" when Accept header cannot be mapped to a\n\/\/ value above.\nfunc ResolveFormat(req *http.Request) string {\n\text := strings.ToLower(filepath.Ext(req.URL.Path))\n\tswitch ext {\n\tcase \".html\":\n\t\treturn \"html\"\n\tcase \".json\":\n\t\treturn \"json\"\n\tcase \".xml\":\n\t\treturn \"xml\"\n\tcase \".txt\":\n\t\treturn \"txt\"\n\t}\n\n\taccept := req.Header.Get(\"accept\")\n\n\tswitch {\n\tcase accept == \"\",\n\t\tstrings.HasPrefix(accept, \"*\/*\"), \/\/ *\/\n\t\tstrings.Contains(accept, \"application\/xhtml\"),\n\t\tstrings.Contains(accept, \"text\/html\"):\n\t\treturn \"html\"\n\tcase strings.Contains(accept, \"application\/json\"),\n\t\tstrings.Contains(accept, \"text\/javascript\"),\n\t\tstrings.Contains(accept, \"application\/javascript\"):\n\t\treturn \"json\"\n\tcase strings.Contains(accept, \"application\/xml\"),\n\t\tstrings.Contains(accept, \"text\/xml\"):\n\t\treturn \"xml\"\n\tcase strings.Contains(accept, \"text\/plain\"):\n\t\treturn \"txt\"\n\t}\n\n\treturn \"html\"\n}\n\n\/\/ AcceptLanguage is a single language from the Accept-Language HTTP header.\ntype AcceptLanguage struct {\n\tLanguage string\n\tQuality float32\n}\n\n\/\/ AcceptLanguages is collection of sortable AcceptLanguage instances.\ntype AcceptLanguages []AcceptLanguage\n\nfunc (al AcceptLanguages) Len() int { return len(al) }\nfunc (al AcceptLanguages) Swap(i, j int) { al[i], al[j] = al[j], al[i] }\nfunc (al AcceptLanguages) Less(i, j int) bool { return al[i].Quality > al[j].Quality }\nfunc (al AcceptLanguages) String() string {\n\toutput := bytes.NewBufferString(\"\")\n\tfor i, language := range al {\n\t\tif _, err := output.WriteString(fmt.Sprintf(\"%s (%1.1f)\", language.Language, language.Quality)); err != nil {\n\t\t\tERROR.Println(\"WriteString failed:\", err)\n\t\t}\n\t\tif i != len(al)-1 {\n\t\t\tif _, err := output.WriteString(\", \"); err != nil {\n\t\t\t\tERROR.Println(\"WriteString failed:\", err)\n\t\t\t}\n\t\t}\n\t}\n\treturn output.String()\n}\n\n\/\/ ResolveAcceptLanguage returns a sorted list of Accept-Language\n\/\/ header values.\n\/\/\n\/\/ The results are sorted using the quality defined in the header for each\n\/\/ language range with the most qualified language range as the first\n\/\/ element in the slice.\n\/\/\n\/\/ See the HTTP header fields specification\n\/\/ (http:\/\/www.w3.org\/Protocols\/rfc2616\/rfc2616-sec14.html#sec14.4) for more details.\nfunc ResolveAcceptLanguage(req *http.Request) AcceptLanguages {\n\theader := req.Header.Get(\"Accept-Language\")\n\tif header == \"\" {\n\t\treturn nil\n\t}\n\n\tacceptLanguageHeaderValues := strings.Split(header, \",\")\n\tacceptLanguages := make(AcceptLanguages, len(acceptLanguageHeaderValues))\n\n\tfor i, languageRange := range acceptLanguageHeaderValues {\n\t\tif qualifiedRange := strings.Split(languageRange, \";q=\"); len(qualifiedRange) == 2 {\n\t\t\tquality, error := strconv.ParseFloat(qualifiedRange[1], 32)\n\t\t\tif error != nil {\n\t\t\t\tWARN.Printf(\"Detected malformed Accept-Language header quality in '%s', assuming quality is 1\", languageRange)\n\t\t\t\tacceptLanguages[i] = AcceptLanguage{qualifiedRange[0], 1}\n\t\t\t} else {\n\t\t\t\tacceptLanguages[i] = AcceptLanguage{qualifiedRange[0], float32(quality)}\n\t\t\t}\n\t\t} else {\n\t\t\tacceptLanguages[i] = AcceptLanguage{languageRange, 1}\n\t\t}\n\t}\n\n\tsort.Sort(acceptLanguages)\n\treturn acceptLanguages\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n)\n\n\/\/ Store a geolocation\ntype Location struct {\n lat float64\n long float64\n}\n\n\/\/ Helper function to take care of errors\nfunc handleErrors(err error) {\n if err != nil {\n panic(err)\n }\n}\n\n\/\/ Haversine distance computation\n\/\/ Takes two locations and returns the distance between the two\nfunc haversineDistance(loc0 Location, loc1 Location) float64 {\n return 42.0\n}\n\n\/\/ Fetches location data from a string postcode\nfunc getLocationFromPostcode(postcode string) Location {\n return Location{0, 0}\n}\n\nfunc main() {\n\n}\n<commit_msg>Impl. degreesToRadians<commit_after>package main\n\nimport (\n \"math\"\n)\n\n\/\/ Store a geolocation\ntype Location struct {\n lat float64\n long float64\n}\n\n\/\/ Helper function to take care of errors\nfunc handleErrors(err error) {\n if err != nil {\n panic(err)\n }\n}\n\n\/\/ Converts degrees into radians\nfunc degreesToRadians(degrees float64) float64 {\n for degrees >= 360.0 {\n degrees -= 360\n }\n\n return degrees * (math.Pi \/ 180)\n}\n\n\/\/ Haversine distance computation\n\/\/ Takes two locations and returns the distance between the two\nfunc haversineDistance(loc0 Location, loc1 Location) float64 {\n return 42.0\n}\n\n\/\/ Fetches location data from a string postcode\nfunc getLocationFromPostcode(postcode string) Location {\n return Location{0, 0}\n}\n\nfunc main() {\n\n}\n<|endoftext|>"} {"text":"<commit_before>package slack\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\n\/\/ XXX: Need to implement\ntype UserPrefs struct {\n\t\/\/ \"highlight_words\":\"\",\n\t\/\/ \"user_colors\":\"\",\n\t\/\/ \"color_names_in_list\":true,\n\t\/\/ \"growls_enabled\":true,\n\t\/\/ \"tz\":\"Europe\\\/London\",\n\t\/\/ \"push_dm_alert\":true,\n\t\/\/ \"push_mention_alert\":true,\n\t\/\/ \"push_everything\":true,\n\t\/\/ \"push_idle_wait\":2,\n\t\/\/ \"push_sound\":\"b2.mp3\",\n\t\/\/ \"push_loud_channels\":\"\",\n\t\/\/ \"push_mention_channels\":\"\",\n\t\/\/ \"push_loud_channels_set\":\"\",\n\t\/\/ \"email_alerts\":\"instant\",\n\t\/\/ \"email_alerts_sleep_until\":0,\n\t\/\/ \"email_misc\":false,\n\t\/\/ \"email_weekly\":true,\n\t\/\/ \"welcome_message_hidden\":false,\n\t\/\/ \"all_channels_loud\":true,\n\t\/\/ \"loud_channels\":\"\",\n\t\/\/ \"never_channels\":\"\",\n\t\/\/ \"loud_channels_set\":\"\",\n\t\/\/ \"show_member_presence\":true,\n\t\/\/ \"search_sort\":\"timestamp\",\n\t\/\/ \"expand_inline_imgs\":true,\n\t\/\/ \"expand_internal_inline_imgs\":true,\n\t\/\/ \"expand_snippets\":false,\n\t\/\/ \"posts_formatting_guide\":true,\n\t\/\/ \"seen_welcome_2\":true,\n\t\/\/ \"seen_ssb_prompt\":false,\n\t\/\/ \"search_only_my_channels\":false,\n\t\/\/ \"emoji_mode\":\"default\",\n\t\/\/ \"has_invited\":true,\n\t\/\/ \"has_uploaded\":false,\n\t\/\/ \"has_created_channel\":true,\n\t\/\/ \"search_exclude_channels\":\"\",\n\t\/\/ \"messages_theme\":\"default\",\n\t\/\/ \"webapp_spellcheck\":true,\n\t\/\/ \"no_joined_overlays\":false,\n\t\/\/ \"no_created_overlays\":true,\n\t\/\/ \"dropbox_enabled\":false,\n\t\/\/ \"seen_user_menu_tip_card\":true,\n\t\/\/ \"seen_team_menu_tip_card\":true,\n\t\/\/ \"seen_channel_menu_tip_card\":true,\n\t\/\/ \"seen_message_input_tip_card\":true,\n\t\/\/ \"seen_channels_tip_card\":true,\n\t\/\/ \"seen_domain_invite_reminder\":false,\n\t\/\/ \"seen_member_invite_reminder\":false,\n\t\/\/ \"seen_flexpane_tip_card\":true,\n\t\/\/ \"seen_search_input_tip_card\":true,\n\t\/\/ \"mute_sounds\":false,\n\t\/\/ \"arrow_history\":false,\n\t\/\/ \"tab_ui_return_selects\":true,\n\t\/\/ \"obey_inline_img_limit\":true,\n\t\/\/ \"new_msg_snd\":\"knock_brush.mp3\",\n\t\/\/ \"collapsible\":false,\n\t\/\/ \"collapsible_by_click\":true,\n\t\/\/ \"require_at\":false,\n\t\/\/ \"mac_ssb_bounce\":\"\",\n\t\/\/ \"mac_ssb_bullet\":true,\n\t\/\/ \"win_ssb_bullet\":true,\n\t\/\/ \"expand_non_media_attachments\":true,\n\t\/\/ \"show_typing\":true,\n\t\/\/ \"pagekeys_handled\":true,\n\t\/\/ \"last_snippet_type\":\"\",\n\t\/\/ \"display_real_names_override\":0,\n\t\/\/ \"time24\":false,\n\t\/\/ \"enter_is_special_in_tbt\":false,\n\t\/\/ \"graphic_emoticons\":false,\n\t\/\/ \"convert_emoticons\":true,\n\t\/\/ \"autoplay_chat_sounds\":true,\n\t\/\/ \"ss_emojis\":true,\n\t\/\/ \"sidebar_behavior\":\"\",\n\t\/\/ \"mark_msgs_read_immediately\":true,\n\t\/\/ \"start_scroll_at_oldest\":true,\n\t\/\/ \"snippet_editor_wrap_long_lines\":false,\n\t\/\/ \"ls_disabled\":false,\n\t\/\/ \"sidebar_theme\":\"default\",\n\t\/\/ \"sidebar_theme_custom_values\":\"\",\n\t\/\/ \"f_key_search\":false,\n\t\/\/ \"k_key_omnibox\":true,\n\t\/\/ \"speak_growls\":false,\n\t\/\/ \"mac_speak_voice\":\"com.apple.speech.synthesis.voice.Alex\",\n\t\/\/ \"mac_speak_speed\":250,\n\t\/\/ \"comma_key_prefs\":false,\n\t\/\/ \"at_channel_suppressed_channels\":\"\",\n\t\/\/ \"push_at_channel_suppressed_channels\":\"\",\n\t\/\/ \"prompted_for_email_disabling\":false,\n\t\/\/ \"full_text_extracts\":false,\n\t\/\/ \"no_text_in_notifications\":false,\n\t\/\/ \"muted_channels\":\"\",\n\t\/\/ \"no_macssb1_banner\":false,\n\t\/\/ \"privacy_policy_seen\":true,\n\t\/\/ \"search_exclude_bots\":false,\n\t\/\/ \"fuzzy_matching\":false\n}\n\ntype UserDetails struct {\n\tId string `json:\"id\"`\n\tName string `json:\"name\"`\n\tCreated JSONTime `json:\"created\"`\n\tManualPresence string `json:\"manual_presence\"`\n\tPrefs UserPrefs `json:\"prefs\"`\n}\n\ntype JSONTime int64\n\nfunc (t JSONTime) String() string {\n\ttm := time.Unix(int64(t), 0)\n\treturn fmt.Sprintf(\"\\\"%s\\\"\", tm.Format(\"Mon Jan _2\"))\n}\n\ntype Team struct {\n\tId string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDomain string `json:\"name\"`\n}\n\ntype Info struct {\n\tUrl string `json:\"url,omitempty\"`\n\tUser UserDetails `json:\"self,omitempty\"`\n\tTeam Team `json:\"team,omitempty\"`\n\tUsers []User `json:\"users,omitempty\"`\n\tChannels []Channel `json:\"channels,omitempty\"`\n}\n\ntype infoResponseFull struct {\n\tInfo\n\tSlackResponse\n}\n\nfunc (info Info) GetUserById(id string) *User {\n\tfor _, user := range info.Users {\n\t\tif user.Id == id {\n\t\t\treturn &user\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (info Info) GetChannelById(id string) *Channel {\n\tfor _, channel := range info.Channels {\n\t\tif channel.Id == id {\n\t\t\treturn &channel\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Add Bot parsing<commit_after>package slack\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\n\/\/ XXX: Need to implement\ntype UserPrefs struct {\n\t\/\/ \"highlight_words\":\"\",\n\t\/\/ \"user_colors\":\"\",\n\t\/\/ \"color_names_in_list\":true,\n\t\/\/ \"growls_enabled\":true,\n\t\/\/ \"tz\":\"Europe\\\/London\",\n\t\/\/ \"push_dm_alert\":true,\n\t\/\/ \"push_mention_alert\":true,\n\t\/\/ \"push_everything\":true,\n\t\/\/ \"push_idle_wait\":2,\n\t\/\/ \"push_sound\":\"b2.mp3\",\n\t\/\/ \"push_loud_channels\":\"\",\n\t\/\/ \"push_mention_channels\":\"\",\n\t\/\/ \"push_loud_channels_set\":\"\",\n\t\/\/ \"email_alerts\":\"instant\",\n\t\/\/ \"email_alerts_sleep_until\":0,\n\t\/\/ \"email_misc\":false,\n\t\/\/ \"email_weekly\":true,\n\t\/\/ \"welcome_message_hidden\":false,\n\t\/\/ \"all_channels_loud\":true,\n\t\/\/ \"loud_channels\":\"\",\n\t\/\/ \"never_channels\":\"\",\n\t\/\/ \"loud_channels_set\":\"\",\n\t\/\/ \"show_member_presence\":true,\n\t\/\/ \"search_sort\":\"timestamp\",\n\t\/\/ \"expand_inline_imgs\":true,\n\t\/\/ \"expand_internal_inline_imgs\":true,\n\t\/\/ \"expand_snippets\":false,\n\t\/\/ \"posts_formatting_guide\":true,\n\t\/\/ \"seen_welcome_2\":true,\n\t\/\/ \"seen_ssb_prompt\":false,\n\t\/\/ \"search_only_my_channels\":false,\n\t\/\/ \"emoji_mode\":\"default\",\n\t\/\/ \"has_invited\":true,\n\t\/\/ \"has_uploaded\":false,\n\t\/\/ \"has_created_channel\":true,\n\t\/\/ \"search_exclude_channels\":\"\",\n\t\/\/ \"messages_theme\":\"default\",\n\t\/\/ \"webapp_spellcheck\":true,\n\t\/\/ \"no_joined_overlays\":false,\n\t\/\/ \"no_created_overlays\":true,\n\t\/\/ \"dropbox_enabled\":false,\n\t\/\/ \"seen_user_menu_tip_card\":true,\n\t\/\/ \"seen_team_menu_tip_card\":true,\n\t\/\/ \"seen_channel_menu_tip_card\":true,\n\t\/\/ \"seen_message_input_tip_card\":true,\n\t\/\/ \"seen_channels_tip_card\":true,\n\t\/\/ \"seen_domain_invite_reminder\":false,\n\t\/\/ \"seen_member_invite_reminder\":false,\n\t\/\/ \"seen_flexpane_tip_card\":true,\n\t\/\/ \"seen_search_input_tip_card\":true,\n\t\/\/ \"mute_sounds\":false,\n\t\/\/ \"arrow_history\":false,\n\t\/\/ \"tab_ui_return_selects\":true,\n\t\/\/ \"obey_inline_img_limit\":true,\n\t\/\/ \"new_msg_snd\":\"knock_brush.mp3\",\n\t\/\/ \"collapsible\":false,\n\t\/\/ \"collapsible_by_click\":true,\n\t\/\/ \"require_at\":false,\n\t\/\/ \"mac_ssb_bounce\":\"\",\n\t\/\/ \"mac_ssb_bullet\":true,\n\t\/\/ \"win_ssb_bullet\":true,\n\t\/\/ \"expand_non_media_attachments\":true,\n\t\/\/ \"show_typing\":true,\n\t\/\/ \"pagekeys_handled\":true,\n\t\/\/ \"last_snippet_type\":\"\",\n\t\/\/ \"display_real_names_override\":0,\n\t\/\/ \"time24\":false,\n\t\/\/ \"enter_is_special_in_tbt\":false,\n\t\/\/ \"graphic_emoticons\":false,\n\t\/\/ \"convert_emoticons\":true,\n\t\/\/ \"autoplay_chat_sounds\":true,\n\t\/\/ \"ss_emojis\":true,\n\t\/\/ \"sidebar_behavior\":\"\",\n\t\/\/ \"mark_msgs_read_immediately\":true,\n\t\/\/ \"start_scroll_at_oldest\":true,\n\t\/\/ \"snippet_editor_wrap_long_lines\":false,\n\t\/\/ \"ls_disabled\":false,\n\t\/\/ \"sidebar_theme\":\"default\",\n\t\/\/ \"sidebar_theme_custom_values\":\"\",\n\t\/\/ \"f_key_search\":false,\n\t\/\/ \"k_key_omnibox\":true,\n\t\/\/ \"speak_growls\":false,\n\t\/\/ \"mac_speak_voice\":\"com.apple.speech.synthesis.voice.Alex\",\n\t\/\/ \"mac_speak_speed\":250,\n\t\/\/ \"comma_key_prefs\":false,\n\t\/\/ \"at_channel_suppressed_channels\":\"\",\n\t\/\/ \"push_at_channel_suppressed_channels\":\"\",\n\t\/\/ \"prompted_for_email_disabling\":false,\n\t\/\/ \"full_text_extracts\":false,\n\t\/\/ \"no_text_in_notifications\":false,\n\t\/\/ \"muted_channels\":\"\",\n\t\/\/ \"no_macssb1_banner\":false,\n\t\/\/ \"privacy_policy_seen\":true,\n\t\/\/ \"search_exclude_bots\":false,\n\t\/\/ \"fuzzy_matching\":false\n}\n\ntype UserDetails struct {\n\tId string `json:\"id\"`\n\tName string `json:\"name\"`\n\tCreated JSONTime `json:\"created\"`\n\tManualPresence string `json:\"manual_presence\"`\n\tPrefs UserPrefs `json:\"prefs\"`\n}\n\ntype JSONTime int64\n\nfunc (t JSONTime) String() string {\n\ttm := time.Unix(int64(t), 0)\n\treturn fmt.Sprintf(\"\\\"%s\\\"\", tm.Format(\"Mon Jan _2\"))\n}\n\ntype Team struct {\n\tId string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDomain string `json:\"name\"`\n}\n\ntype Icons struct {\n\tImage48 string `json:\"image_48\"`\n}\n\ntype Bot struct {\n\tId string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDeleted bool `json:\"deleted\"`\n\tIcons Icons `json:\"icons\"`\n}\n\ntype Info struct {\n\tUrl string `json:\"url,omitempty\"`\n\tUser UserDetails `json:\"self,omitempty\"`\n\tTeam Team `json:\"team,omitempty\"`\n\tUsers []User `json:\"users,omitempty\"`\n\tChannels []Channel `json:\"channels,omitempty\"`\n\tBots []Bot `json:\"bots,omitempty\"`\n}\n\ntype infoResponseFull struct {\n\tInfo\n\tSlackResponse\n}\n\nfunc (info Info) GetBotById(id string) *Bot {\n\tfor _, bot := range info.Bots {\n\t\tif bot.Id == id {\n\t\t\treturn &bot\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (info Info) GetUserById(id string) *User {\n\tfor _, user := range info.Users {\n\t\tif user.Id == id {\n\t\t\treturn &user\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (info Info) GetChannelById(id string) *Channel {\n\tfor _, channel := range info.Channels {\n\t\tif channel.Id == id {\n\t\t\treturn &channel\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"go\/build\"\n\t\"sort\"\n\t\"strings\"\n)\n\n\/\/ list runs the list subcommand, listing all the dependencies of the package\n\/\/ at the specified path, relative paths are resolved from the current working\n\/\/ directory.\nfunc list(ctx *build.Context, cwd, path string) error {\n\timps := make([]string, 0)\n\tvar parentPkg *build.Package\n\tprocess := func(pkg *build.Package, err error) error {\n\t\t\/\/ Set the parent package so child filters work properly as the\n\t\t\/\/ command recurses.\n\t\tif parentPkg == nil {\n\t\t\tparentPkg = pkg\n\t\t}\n\t\tf := listFilter(ctx, cwd, parentPkg.ImportPath, opt.child, opt.standard)\n\t\tfor _, add := range filterImports(getImports(pkg, opt.tests), f) {\n\t\t\timps = appendUnique(imps, add)\n\t\t}\n\t\treturn nil\n\t}\n\t\/\/ Compile list of unique import paths, recurse if asked.\n\tif opt.recurse {\n\t\tif abs, err := cwdAbs(cwd, path); err != nil {\n\t\t\treturn err\n\t\t} else if err := recursePackages(ctx, abs, process); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if pkg, err := getPackage(ctx, cwd, path); err != nil {\n\t\treturn err\n\t} else {\n\t\tprocess(pkg, nil)\n\t}\n\t\/\/ Output the imports\n\tsort.Strings(imps)\n\tfor _, imp := range imps {\n\t\tif opt.quite {\n\t\t\tfmt.Println(imp)\n\t\t} else if err := info(ctx, cwd, imp); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ listFilter makes an import filter for the list command for the package\n\/\/ specified by the import path.\n\/\/ Can specify whether to omit child or standard packages.\nfunc listFilter(ctx *build.Context, cwd, path string, omitChild, omitStd bool) func(i string) bool {\n\treturn func(i string) bool {\n\t\tswitch {\n\t\tcase omitChild && isChildPackage(path, i):\n\t\t\treturn false\n\t\tcase omitStd && isStandardPackage(ctx, cwd, i):\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n}\n\n\/\/ info runs the info subcommand, printing information about a given package.\n\/\/ Also used by the list command to output details about imports, the quite and\n\/\/ verbose flags determine the output.\nfunc info(ctx *build.Context, cwd, path string) error {\n\tpkg, err := getPackage(ctx, cwd, path)\n\t\/\/ Error could be that the directory had multiple packages, if the\n\t\/\/ import path was determined proceed.\n\tif err != nil && len(pkg.ImportPath) == 0 {\n\t\treturn err\n\t}\n\t\/\/ Default output\n\tif len(pkg.Name) == 0 {\n\t\tprintBold(fmt.Sprintf(\"%s\", pkg.ImportPath))\n\t} else {\n\t\tprintBold(fmt.Sprintf(\"%s (%s)\", pkg.ImportPath, pkg.Name))\n\t}\n\t\/\/ Print package doc with line breaks\n\tif len(pkg.Doc) > 0 {\n\t\tprintWrap(72, pkg.Doc)\n\t} else {\n\t\tfmt.Println(\"No package documentation.\")\n\t}\n\t\/\/ Verbose output\n\tif opt.verbose {\n\t\tfmt.Println(\" Standard :\\t\", pkg.Goroot)\n\t\tfmt.Println(\" Directory :\\t\", pkg.Dir)\n\t\tif len(pkg.AllTags) > 0 {\n\t\t\tfmt.Println(\" Tags :\\t\",\n\t\t\t\tstrings.Join(pkg.AllTags, \" \"))\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>List usages of dependencies.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"go\/build\"\n\t\"sort\"\n\t\"strings\"\n)\n\n\/\/ list runs the list subcommand, listing all the dependencies of the package\n\/\/ at the specified path, relative paths are resolved from the current working\n\/\/ directory.\nfunc list(ctx *build.Context, cwd, path string) error {\n\timps := make(map[string][]*build.Package, 0)\n\timpKeys := make([]string, 0) \/\/ for sorting later\n\tvar parentPkg *build.Package\n\tprocess := func(pkg *build.Package, err error) error {\n\t\t\/\/ Set the parent package so child filters work properly as the\n\t\t\/\/ command recurses.\n\t\tif parentPkg == nil {\n\t\t\tparentPkg = pkg\n\t\t}\n\t\tf := listFilter(ctx, cwd, parentPkg.ImportPath, opt.child, opt.standard)\n\t\tfor _, add := range filterImports(getImports(pkg, opt.tests), f) {\n\t\t\timpKeys = appendUnique(impKeys, add)\n\t\t\t\/\/ Keep track of packages that use each import.\n\t\t\tif mentions, ok := imps[add]; ok {\n\t\t\t\timps[add] = append(mentions, pkg)\n\t\t\t} else {\n\t\t\t\timps[add] = []*build.Package{pkg}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\t\/\/ Compile list of unique import paths, recurse if asked.\n\tif opt.recurse {\n\t\tif abs, err := cwdAbs(cwd, path); err != nil {\n\t\t\treturn err\n\t\t} else if err := recursePackages(ctx, abs, process); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if pkg, err := getPackage(ctx, cwd, path); err != nil {\n\t\treturn err\n\t} else {\n\t\tprocess(pkg, nil)\n\t}\n\t\/\/ Output the imports\n\tsort.Strings(impKeys)\n\tfor _, imp := range impKeys {\n\t\tif opt.quite {\n\t\t\tfmt.Println(imp)\n\t\t} else if err := info(ctx, cwd, imp); err != nil {\n\t\t\treturn err\n\t\t} else if opt.verbose {\n\t\t\t\/\/ Output packages that use the import.\n\t\t\tfmt.Println(\"\\nUsages :\")\n\t\t\tfor _, mention := range imps[imp] {\n\t\t\t\tfmt.Printf(\"%s (%s)\\n\", mention.ImportPath, mention.Name)\n\t\t\t}\n\t\t\tfmt.Println()\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ listFilter makes an import filter for the list command for the package\n\/\/ specified by the import path.\n\/\/ Can specify whether to omit child or standard packages.\nfunc listFilter(ctx *build.Context, cwd, path string, omitChild, omitStd bool) func(i string) bool {\n\treturn func(i string) bool {\n\t\tswitch {\n\t\tcase omitChild && isChildPackage(path, i):\n\t\t\treturn false\n\t\tcase omitStd && isStandardPackage(ctx, cwd, i):\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n}\n\n\/\/ info runs the info subcommand, printing information about a given package.\n\/\/ Also used by the list command to output details about imports, the quite and\n\/\/ verbose flags determine the output.\nfunc info(ctx *build.Context, cwd, path string) error {\n\tpkg, err := getPackage(ctx, cwd, path)\n\t\/\/ Error could be that the directory had multiple packages, if the\n\t\/\/ import path was determined proceed.\n\tif err != nil && len(pkg.ImportPath) == 0 {\n\t\treturn err\n\t}\n\t\/\/ Default output\n\tif len(pkg.Name) == 0 {\n\t\tprintBold(fmt.Sprintf(\"%s\", pkg.ImportPath))\n\t} else {\n\t\tprintBold(fmt.Sprintf(\"%s (%s)\", pkg.ImportPath, pkg.Name))\n\t}\n\t\/\/ Print package doc with line breaks\n\tif len(pkg.Doc) > 0 {\n\t\tprintWrap(72, pkg.Doc)\n\t} else {\n\t\tfmt.Println(\"No package documentation.\")\n\t}\n\t\/\/ Verbose output\n\tif opt.verbose {\n\t\tfmt.Printf(\"\\nStandard :\\n%t\\n\", pkg.Goroot)\n\t\tif len(pkg.Dir) > 0 {\n\t\t\tfmt.Printf(\"Directory :\\n%s\\n\", pkg.Dir)\n\t\t}\n\t\tif len(pkg.AllTags) > 0 {\n\t\t\tfmt.Printf(\"Tags :\\n%s\\n\",\n\t\t\t\tstrings.Join(pkg.AllTags, \" \"))\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package builder\n\n\/\/ nginx\nconst (\n\tNginxVersion = \"1.14.0\"\n\tNginxDownloadURLPrefix = \"https:\/\/nginx.org\/download\"\n)\n\n\/\/ pcre\nconst (\n\tPcreVersion = \"8.41\"\n\tPcreDownloadURLPrefix = \"https:\/\/ftp.pcre.org\/pub\/pcre\"\n)\n\n\/\/ openssl\nconst (\n\tOpenSSLVersion = \"1.0.2o\"\n\tOpenSSLDownloadURLPrefix = \"https:\/\/www.openssl.org\/source\"\n)\n\n\/\/ libressl\nconst (\n\tLibreSSLVersion = \"2.7.2\"\n\tLibreSSLDownloadURLPrefix = \"https:\/\/ftp.openbsd.org\/pub\/OpenBSD\/LibreSSL\"\n)\n\n\/\/ zlib\nconst (\n\tZlibVersion = \"1.2.11\"\n\tZlibDownloadURLPrefix = \"https:\/\/zlib.net\/fossils\"\n)\n\n\/\/ openResty\nconst (\n\tOpenRestyVersion = \"1.13.6.1\"\n\tOpenRestyDownloadURLPrefix = \"https:\/\/openresty.org\/download\"\n)\n\n\/\/ tengine\nconst (\n\tTengineVersion = \"2.2.2\"\n\tTengineDownloadURLPrefix = \"http:\/\/tengine.taobao.org\/download\"\n)\n\n\/\/ component enumerations\nconst (\n\tComponentNginx = iota\n\tComponentOpenResty\n\tComponentTengine\n\tComponentPcre\n\tComponentOpenSSL\n\tComponentLibreSSL\n\tComponentZlib\n\tComponentMax\n)\n<commit_msg>bumped default pcre version to 8.42.<commit_after>package builder\n\n\/\/ nginx\nconst (\n\tNginxVersion = \"1.14.0\"\n\tNginxDownloadURLPrefix = \"https:\/\/nginx.org\/download\"\n)\n\n\/\/ pcre\nconst (\n\tPcreVersion = \"8.42\"\n\tPcreDownloadURLPrefix = \"https:\/\/ftp.pcre.org\/pub\/pcre\"\n)\n\n\/\/ openssl\nconst (\n\tOpenSSLVersion = \"1.0.2o\"\n\tOpenSSLDownloadURLPrefix = \"https:\/\/www.openssl.org\/source\"\n)\n\n\/\/ libressl\nconst (\n\tLibreSSLVersion = \"2.7.2\"\n\tLibreSSLDownloadURLPrefix = \"https:\/\/ftp.openbsd.org\/pub\/OpenBSD\/LibreSSL\"\n)\n\n\/\/ zlib\nconst (\n\tZlibVersion = \"1.2.11\"\n\tZlibDownloadURLPrefix = \"https:\/\/zlib.net\/fossils\"\n)\n\n\/\/ openResty\nconst (\n\tOpenRestyVersion = \"1.13.6.1\"\n\tOpenRestyDownloadURLPrefix = \"https:\/\/openresty.org\/download\"\n)\n\n\/\/ tengine\nconst (\n\tTengineVersion = \"2.2.2\"\n\tTengineDownloadURLPrefix = \"http:\/\/tengine.taobao.org\/download\"\n)\n\n\/\/ component enumerations\nconst (\n\tComponentNginx = iota\n\tComponentOpenResty\n\tComponentTengine\n\tComponentPcre\n\tComponentOpenSSL\n\tComponentLibreSSL\n\tComponentZlib\n\tComponentMax\n)\n<|endoftext|>"} {"text":"<commit_before>package builder\n\n\/\/ nginx\nconst (\n\tNginxVersion = \"1.19.7\"\n\tNginxDownloadURLPrefix = \"https:\/\/nginx.org\/download\"\n)\n\n\/\/ pcre\nconst (\n\tPcreVersion = \"8.44\"\n\tPcreDownloadURLPrefix = \"https:\/\/ftp.pcre.org\/pub\/pcre\"\n)\n\n\/\/ openssl\nconst (\n\tOpenSSLVersion = \"1.1.1i\"\n\tOpenSSLDownloadURLPrefix = \"https:\/\/www.openssl.org\/source\"\n)\n\n\/\/ libressl\nconst (\n\tLibreSSLVersion = \"3.0.2\"\n\tLibreSSLDownloadURLPrefix = \"https:\/\/ftp.openbsd.org\/pub\/OpenBSD\/LibreSSL\"\n)\n\n\/\/ zlib\nconst (\n\tZlibVersion = \"1.2.11\"\n\tZlibDownloadURLPrefix = \"https:\/\/zlib.net\/fossils\"\n)\n\n\/\/ openResty\nconst (\n\tOpenRestyVersion = \"1.19.3.1\"\n\tOpenRestyDownloadURLPrefix = \"https:\/\/openresty.org\/download\"\n)\n\n\/\/ tengine\nconst (\n\tTengineVersion = \"2.3.2\"\n\tTengineDownloadURLPrefix = \"https:\/\/tengine.taobao.org\/download\"\n)\n\n\/\/ component enumerations\nconst (\n\tComponentNginx = iota\n\tComponentOpenResty\n\tComponentTengine\n\tComponentPcre\n\tComponentOpenSSL\n\tComponentLibreSSL\n\tComponentZlib\n\tComponentMax\n)\n<commit_msg>bumped default openssl version to 1.1.1j.<commit_after>package builder\n\n\/\/ nginx\nconst (\n\tNginxVersion = \"1.19.7\"\n\tNginxDownloadURLPrefix = \"https:\/\/nginx.org\/download\"\n)\n\n\/\/ pcre\nconst (\n\tPcreVersion = \"8.44\"\n\tPcreDownloadURLPrefix = \"https:\/\/ftp.pcre.org\/pub\/pcre\"\n)\n\n\/\/ openssl\nconst (\n\tOpenSSLVersion = \"1.1.1j\"\n\tOpenSSLDownloadURLPrefix = \"https:\/\/www.openssl.org\/source\"\n)\n\n\/\/ libressl\nconst (\n\tLibreSSLVersion = \"3.0.2\"\n\tLibreSSLDownloadURLPrefix = \"https:\/\/ftp.openbsd.org\/pub\/OpenBSD\/LibreSSL\"\n)\n\n\/\/ zlib\nconst (\n\tZlibVersion = \"1.2.11\"\n\tZlibDownloadURLPrefix = \"https:\/\/zlib.net\/fossils\"\n)\n\n\/\/ openResty\nconst (\n\tOpenRestyVersion = \"1.19.3.1\"\n\tOpenRestyDownloadURLPrefix = \"https:\/\/openresty.org\/download\"\n)\n\n\/\/ tengine\nconst (\n\tTengineVersion = \"2.3.2\"\n\tTengineDownloadURLPrefix = \"https:\/\/tengine.taobao.org\/download\"\n)\n\n\/\/ component enumerations\nconst (\n\tComponentNginx = iota\n\tComponentOpenResty\n\tComponentTengine\n\tComponentPcre\n\tComponentOpenSSL\n\tComponentLibreSSL\n\tComponentZlib\n\tComponentMax\n)\n<|endoftext|>"} {"text":"<commit_before>package logger\n\nimport (\n \t\"bytes\"\n \t\"fmt\"\n \t\"io\"\n \t\"log\"\n)\n\nvar colors []string\n\ntype color int\n\nconst (\n\tBlack = (iota + 30)\n\tRed\n\tGreen\n\tYellow\n\tBlue\n\tMagenta\n\tCyan\n\tWhite\n)\n\ntype Log struct {\n\tWorker *log.Logger\n\tColor bool\n}\n\nfunc (l *Logger) Log(level Level, calldepth int, info *Info) error {\n\tif b.Color {\n\t\tbuf := &bytes.Buffer{}\n\t\tbuf.Write([]byte(colors[level]))\n\t\tbuf.Write([]byte(info.Formatted()))\n\t\tbuf.Write([]byte(\"\\033[0m\"))\n\t\treturn b.Worker.Output(calldepth+1, buf.String())\n\t} else {\n\t\treturn b.Worker.Output(calldepth+1, info.Formatted())\n\t}\n}\n\nfunc colorString(color color) string {\n\treturn fmt.Sprintf(\"\\033[%dm\", int(color))\n}\n\nfunc initColors() {\n\tcolors = []{\n\t\tCRITICAL: colorString(Magenta),\n\t\tERROR: colorString(Red),\n\t\tWARNING: colorString(Yellow),\n\t\tNOTICE: colorString(Green),\n\t\tDEBUG: colorString(Cyan)\n\t}\n}<commit_msg>Removes `init.go`<commit_after><|endoftext|>"} {"text":"<commit_before>package handler_test\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"testing\"\n\n\t\"github.com\/gitpods\/gitpods\"\n\t\"github.com\/gitpods\/gitpods\/handler\"\n\t\"github.com\/gitpods\/gitpods\/store\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestUserList(t *testing.T) {\n\trouterStore := DefaultRouterStore()\n\tr := DefaultTestAuthRouterWithStore(routerStore)\n\n\tres, content, err := Request(r, http.MethodGet, \"\/api\/users\", nil)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, http.StatusOK, res.StatusCode)\n\tassert.Equal(t, \"application\/json; charset=utf-8\", res.Header.Get(\"Content-Type\"))\n\tassert.JSONEq(\n\t\tt,\n\t\t`[{\"id\":\"25558000-2565-48dc-84eb-18754da2b0a2\",\"username\":\"metalmatze\",\"name\":\"Matthias Loibl\",\"email\":\"metalmatze@example.com\"},{\"id\":\"911d24ae-ad9b-4e50-bf23-9dcbdc8134c6\",\"username\":\"tboerger\",\"name\":\"Thomas Boerger\",\"email\":\"tboerger@example.com\"}]`,\n\t\tstring(content),\n\t)\n}\n\nfunc TestUserListUnauthorized(t *testing.T) {\n\tr := DefaultTestRouter()\n\tres, content, err := Request(r, http.MethodGet, \"\/api\/users\", nil)\n\n\tassert.NoError(t, err)\n\tassertUnauthorized(t, res, content)\n}\n\nfunc TestUser(t *testing.T) {\n\tr := DefaultTestAuthRouter()\n\tres, content, err := Request(r, http.MethodGet, \"\/api\/users\/metalmatze\", nil)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, http.StatusOK, res.StatusCode)\n\tassert.Equal(t, \"application\/json; charset=utf-8\", res.Header.Get(\"Content-Type\"))\n\tassert.JSONEq(\n\t\tt,\n\t\t`{\"id\":\"25558000-2565-48dc-84eb-18754da2b0a2\",\"username\":\"metalmatze\",\"name\":\"Matthias Loibl\",\"email\":\"metalmatze@example.com\"}`,\n\t\tstring(content),\n\t)\n}\n\nfunc TestUserNotFound(t *testing.T) {\n\tr := DefaultTestAuthRouter()\n\tres, content, err := Request(r, http.MethodGet, \"\/api\/users\/foobar\", nil)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, http.StatusNotFound, res.StatusCode)\n\tassert.Equal(t, \"application\/json; charset=utf-8\", res.Header.Get(\"Content-Type\"))\n\tassert.JSONEq(t, string(handler.JsonNotFound), string(content))\n}\n\nfunc TestUserCreate(t *testing.T) {\n\trouterStore := DefaultRouterStore()\n\tr := DefaultTestAuthRouterWithStore(routerStore)\n\n\tpayloadUser := gitpod.User{\n\t\tID: \"28195928-2e77-431b-b1fc-43f543cfdc2a\",\n\t\tUsername: \"foobar\",\n\t\tName: \"Foo Bar\",\n\t\tEmail: \"foobar@example.com\",\n\t}\n\n\tpayload, err := json.Marshal(payloadUser)\n\tassert.NoError(t, err)\n\n\tres, content, err := Request(r, http.MethodPost, \"\/api\/users\", payload)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, http.StatusOK, res.StatusCode)\n\tassert.Equal(t, string(payload), string(content))\n\n\tuser, err := routerStore.UserStore.GetUser(payloadUser.Username)\n\tassert.NoError(t, err)\n\tassert.Equal(t, payloadUser, user)\n}\n\nfunc TestUserUpdate(t *testing.T) {\n\trouterStore := DefaultRouterStore()\n\tr := DefaultTestAuthRouterWithStore(routerStore)\n\n\tuser, err := routerStore.UserStore.GetUser(\"metalmatze\")\n\tassert.NoError(t, err)\n\n\tnewEmail := \"matze@example.com\"\n\tuser.Email = newEmail\n\n\tpayload, err := json.Marshal(user)\n\tassert.NoError(t, err)\n\n\tres, content, err := Request(r, http.MethodPut, \"\/api\/users\/metalmatze\", payload)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, http.StatusOK, res.StatusCode)\n\tassert.Equal(t, string(payload), string(content))\n\n\t\/\/ Test if store was really updated\n\tuser, err = routerStore.UserStore.GetUser(\"metalmatze\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, newEmail, user.Email)\n}\n\nfunc TestUserUpdateBadRequest(t *testing.T) {\n\tr := DefaultTestAuthRouter()\n\tres, content, err := Request(r, http.MethodPut, \"\/api\/users\/metalmatze\", nil)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, http.StatusBadRequest, res.StatusCode)\n\tassert.Equal(t, \"application\/json; charset=utf-8\", res.Header.Get(\"Content-Type\"))\n\tassert.JSONEq(t, `{\"message\":\"failed to unmarshal user\"}`, string(content))\n}\n\nfunc TestUserUpdateNotFound(t *testing.T) {\n\trouterStore := DefaultRouterStore()\n\tr := DefaultTestAuthRouterWithStore(routerStore)\n\n\tuser, err := routerStore.UserStore.GetUser(\"metalmatze\")\n\tassert.NoError(t, err)\n\n\tpayload, err := json.Marshal(user)\n\tassert.NoError(t, err)\n\n\tres, content, err := Request(r, http.MethodPut, \"\/api\/users\/foobar\", payload)\n\n\tassert.NoError(t, err)\n\tassertNotFoundJson(t, res, content)\n}\n\nfunc TestUserDelete(t *testing.T) {\n\trouterStore := DefaultRouterStore()\n\tr := DefaultTestAuthRouterWithStore(routerStore)\n\n\t_, err := routerStore.UserStore.GetUser(\"metalmatze\")\n\tassert.NoError(t, err)\n\n\tres, content, err := Request(r, http.MethodDelete, \"\/api\/users\/metalmatze\", nil)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, http.StatusOK, res.StatusCode)\n\tassert.Equal(t, \"\", string(content))\n\n\t\/\/ Test if store was really updated\n\t_, err = routerStore.UserStore.GetUser(\"metalmatze\")\n\tassert.Equal(t, err, store.UserNotFound)\n}\n\nfunc TestUserDeleteNotFound(t *testing.T) {\n\tr := DefaultTestAuthRouter()\n\n\tres, content, err := Request(r, http.MethodDelete, \"\/api\/users\/foobar\", nil)\n\tassert.NoError(t, err)\n\tassertNotFoundJson(t, res, content)\n}\n<commit_msg>Fix import of gitpods root package<commit_after>package handler_test\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"testing\"\n\n\t\"github.com\/gitpods\/gitpods\"\n\t\"github.com\/gitpods\/gitpods\/handler\"\n\t\"github.com\/gitpods\/gitpods\/store\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestUserList(t *testing.T) {\n\trouterStore := DefaultRouterStore()\n\tr := DefaultTestAuthRouterWithStore(routerStore)\n\n\tres, content, err := Request(r, http.MethodGet, \"\/api\/users\", nil)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, http.StatusOK, res.StatusCode)\n\tassert.Equal(t, \"application\/json; charset=utf-8\", res.Header.Get(\"Content-Type\"))\n\tassert.JSONEq(\n\t\tt,\n\t\t`[{\"id\":\"25558000-2565-48dc-84eb-18754da2b0a2\",\"username\":\"metalmatze\",\"name\":\"Matthias Loibl\",\"email\":\"metalmatze@example.com\"},{\"id\":\"911d24ae-ad9b-4e50-bf23-9dcbdc8134c6\",\"username\":\"tboerger\",\"name\":\"Thomas Boerger\",\"email\":\"tboerger@example.com\"}]`,\n\t\tstring(content),\n\t)\n}\n\nfunc TestUserListUnauthorized(t *testing.T) {\n\tr := DefaultTestRouter()\n\tres, content, err := Request(r, http.MethodGet, \"\/api\/users\", nil)\n\n\tassert.NoError(t, err)\n\tassertUnauthorized(t, res, content)\n}\n\nfunc TestUser(t *testing.T) {\n\tr := DefaultTestAuthRouter()\n\tres, content, err := Request(r, http.MethodGet, \"\/api\/users\/metalmatze\", nil)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, http.StatusOK, res.StatusCode)\n\tassert.Equal(t, \"application\/json; charset=utf-8\", res.Header.Get(\"Content-Type\"))\n\tassert.JSONEq(\n\t\tt,\n\t\t`{\"id\":\"25558000-2565-48dc-84eb-18754da2b0a2\",\"username\":\"metalmatze\",\"name\":\"Matthias Loibl\",\"email\":\"metalmatze@example.com\"}`,\n\t\tstring(content),\n\t)\n}\n\nfunc TestUserNotFound(t *testing.T) {\n\tr := DefaultTestAuthRouter()\n\tres, content, err := Request(r, http.MethodGet, \"\/api\/users\/foobar\", nil)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, http.StatusNotFound, res.StatusCode)\n\tassert.Equal(t, \"application\/json; charset=utf-8\", res.Header.Get(\"Content-Type\"))\n\tassert.JSONEq(t, string(handler.JsonNotFound), string(content))\n}\n\nfunc TestUserCreate(t *testing.T) {\n\trouterStore := DefaultRouterStore()\n\tr := DefaultTestAuthRouterWithStore(routerStore)\n\n\tpayloadUser := gitpods.User{\n\t\tID: \"28195928-2e77-431b-b1fc-43f543cfdc2a\",\n\t\tUsername: \"foobar\",\n\t\tName: \"Foo Bar\",\n\t\tEmail: \"foobar@example.com\",\n\t}\n\n\tpayload, err := json.Marshal(payloadUser)\n\tassert.NoError(t, err)\n\n\tres, content, err := Request(r, http.MethodPost, \"\/api\/users\", payload)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, http.StatusOK, res.StatusCode)\n\tassert.Equal(t, string(payload), string(content))\n\n\tuser, err := routerStore.UserStore.GetUser(payloadUser.Username)\n\tassert.NoError(t, err)\n\tassert.Equal(t, payloadUser, user)\n}\n\nfunc TestUserUpdate(t *testing.T) {\n\trouterStore := DefaultRouterStore()\n\tr := DefaultTestAuthRouterWithStore(routerStore)\n\n\tuser, err := routerStore.UserStore.GetUser(\"metalmatze\")\n\tassert.NoError(t, err)\n\n\tnewEmail := \"matze@example.com\"\n\tuser.Email = newEmail\n\n\tpayload, err := json.Marshal(user)\n\tassert.NoError(t, err)\n\n\tres, content, err := Request(r, http.MethodPut, \"\/api\/users\/metalmatze\", payload)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, http.StatusOK, res.StatusCode)\n\tassert.Equal(t, string(payload), string(content))\n\n\t\/\/ Test if store was really updated\n\tuser, err = routerStore.UserStore.GetUser(\"metalmatze\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, newEmail, user.Email)\n}\n\nfunc TestUserUpdateBadRequest(t *testing.T) {\n\tr := DefaultTestAuthRouter()\n\tres, content, err := Request(r, http.MethodPut, \"\/api\/users\/metalmatze\", nil)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, http.StatusBadRequest, res.StatusCode)\n\tassert.Equal(t, \"application\/json; charset=utf-8\", res.Header.Get(\"Content-Type\"))\n\tassert.JSONEq(t, `{\"message\":\"failed to unmarshal user\"}`, string(content))\n}\n\nfunc TestUserUpdateNotFound(t *testing.T) {\n\trouterStore := DefaultRouterStore()\n\tr := DefaultTestAuthRouterWithStore(routerStore)\n\n\tuser, err := routerStore.UserStore.GetUser(\"metalmatze\")\n\tassert.NoError(t, err)\n\n\tpayload, err := json.Marshal(user)\n\tassert.NoError(t, err)\n\n\tres, content, err := Request(r, http.MethodPut, \"\/api\/users\/foobar\", payload)\n\n\tassert.NoError(t, err)\n\tassertNotFoundJson(t, res, content)\n}\n\nfunc TestUserDelete(t *testing.T) {\n\trouterStore := DefaultRouterStore()\n\tr := DefaultTestAuthRouterWithStore(routerStore)\n\n\t_, err := routerStore.UserStore.GetUser(\"metalmatze\")\n\tassert.NoError(t, err)\n\n\tres, content, err := Request(r, http.MethodDelete, \"\/api\/users\/metalmatze\", nil)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, http.StatusOK, res.StatusCode)\n\tassert.Equal(t, \"\", string(content))\n\n\t\/\/ Test if store was really updated\n\t_, err = routerStore.UserStore.GetUser(\"metalmatze\")\n\tassert.Equal(t, err, store.UserNotFound)\n}\n\nfunc TestUserDeleteNotFound(t *testing.T) {\n\tr := DefaultTestAuthRouter()\n\n\tres, content, err := Request(r, http.MethodDelete, \"\/api\/users\/foobar\", nil)\n\tassert.NoError(t, err)\n\tassertNotFoundJson(t, res, content)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/Provides crypto hash Writer\/Reader support for large files or streaming. <br> Inspired by Java's DigestInputStream\/DigestOutputStream https:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/security\/DigestInputStream.html\npackage hashstream\n\nimport (\n\t\"crypto\/md5\"\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"crypto\/sha512\"\n\t\"errors\"\n\t\"hash\"\n\t\"io\"\n)\n\nfunc NewHashReader(r io.Reader, h hash.Hash) (*HashReader, error) {\n\tif r == nil || h == nil {\n\t\treturn nil, errors.New(\"Reader or hash are nil: no allowed\")\n\t}\n\treturn &HashReader{\n\t\treader: r,\n\t\thash: h,\n\t}, nil\n}\n\nfunc NewMD5Reader(r io.Reader) (*HashReader, error) {\n\treturn NewHashReader(r, md5.New())\n}\n\nfunc NewSHA1Reader(r io.Reader) (*HashReader, error) {\n\treturn NewHashReader(r, sha1.New())\n\n}\n\nfunc NewSHA224Reader(r io.Reader) (*HashReader, error) {\n\treturn NewHashReader(r, sha256.New224())\n}\n\nfunc NewSHA256Reader(r io.Reader) (*HashReader, error) {\n\treturn NewHashReader(r, sha256.New())\n}\n\nfunc NewSHA384Reader(r io.Reader) (*HashReader, error) {\n\treturn NewHashReader(r, sha512.New384())\n}\n\nfunc NewSHA512Reader(r io.Reader) (*HashReader, error) {\n\treturn NewHashReader(r, sha512.New())\n}\n\ntype HashReader struct {\n\treader io.Reader\n\thash hash.Hash\n}\n\nfunc (l *HashReader) Sum() []byte {\n\treturn sum(l.hash)\n}\n\nfunc sum(h hash.Hash) []byte {\n\treturn h.Sum(nil)\n}\n\nfunc (l *HashReader) Read(p []byte) (n int, err error) {\n\tif len(p) == 0 {\n\t\tn = -1\n\t\terr = errors.New(\"Buffer cannot be length=zero\")\n\t\treturn\n\t}\n\n\tn, err = l.reader.Read(p)\n\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tif n > 0 {\n\t\tvar buf []byte\n\t\tif n == len(p) {\n\t\t\tbuf = p\n\t\t} else {\n\t\t\tbuf = p[0:n]\n\t\t}\n\t\tn, err = l.hash.Write(buf)\n\t\tif err != nil {\n\t\t\treturn -1, nil\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>Updated docs<commit_after>\/\/ Provides crypto hash Writer\/Reader support for large files or streaming.\n\/\/ Inspired by Java's DigestInputStream\/DigestOutputStream https:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/security\/DigestInputStream.html\npackage hashstream\n\nimport (\n\t\"crypto\/md5\"\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"crypto\/sha512\"\n\t\"errors\"\n\t\"hash\"\n\t\"io\"\n)\n\nfunc NewHashReader(r io.Reader, h hash.Hash) (*HashReader, error) {\n\tif r == nil || h == nil {\n\t\treturn nil, errors.New(\"Reader or hash are nil: no allowed\")\n\t}\n\treturn &HashReader{\n\t\treader: r,\n\t\thash: h,\n\t}, nil\n}\n\nfunc NewMD5Reader(r io.Reader) (*HashReader, error) {\n\treturn NewHashReader(r, md5.New())\n}\n\nfunc NewSHA1Reader(r io.Reader) (*HashReader, error) {\n\treturn NewHashReader(r, sha1.New())\n\n}\n\nfunc NewSHA224Reader(r io.Reader) (*HashReader, error) {\n\treturn NewHashReader(r, sha256.New224())\n}\n\nfunc NewSHA256Reader(r io.Reader) (*HashReader, error) {\n\treturn NewHashReader(r, sha256.New())\n}\n\nfunc NewSHA384Reader(r io.Reader) (*HashReader, error) {\n\treturn NewHashReader(r, sha512.New384())\n}\n\nfunc NewSHA512Reader(r io.Reader) (*HashReader, error) {\n\treturn NewHashReader(r, sha512.New())\n}\n\ntype HashReader struct {\n\treader io.Reader\n\thash hash.Hash\n}\n\nfunc (l *HashReader) Sum() []byte {\n\treturn sum(l.hash)\n}\n\nfunc sum(h hash.Hash) []byte {\n\treturn h.Sum(nil)\n}\n\nfunc (l *HashReader) Read(p []byte) (n int, err error) {\n\tif len(p) == 0 {\n\t\tn = -1\n\t\terr = errors.New(\"Buffer cannot be length=zero\")\n\t\treturn\n\t}\n\n\tn, err = l.reader.Read(p)\n\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tif n > 0 {\n\t\tvar buf []byte\n\t\tif n == len(p) {\n\t\t\tbuf = p\n\t\t} else {\n\t\t\tbuf = p[0:n]\n\t\t}\n\t\tn, err = l.hash.Write(buf)\n\t\tif err != nil {\n\t\t\treturn -1, nil\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/stevejaxon\/learning-go-lang\/hello-world\/stringutil\"\n)\n\nfunc main() {\n fmt.Printf(stringutil.Reverse(\"!oG ,olleH\"))\n}\n<commit_msg>working through the go-lang tutorial<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\t\"math\"\n\t\"math\/rand\"\n \"math\/cmplx\"\n\t\"runtime\"\n\t\"github.com\/stevejaxon\/learning-go-lang\/hello-world\/stringutil\"\n)\n\nfunc add(x int, y int) int {\n\treturn x + y\n}\n\n\/\/ Note: When two or more consecutive named function parameters share a type, you can omit the type from all but the last\nfunc sub(x, y int) int {\n\treturn x-y\n}\n\n\/\/ Note: A function can return any number of results\nfunc swap(x, y string) (string, string) {\n\treturn y, x\n}\n\n\/\/ Note: Go's return values may be named. If so, they are treated as variables defined at the top of the function.\n\/\/ Note: A return statement without arguments returns the named return values. This is known as a \"naked\" return. (Naked return statements should be used only in short functions; they can harm readability in longer functions.)\nfunc split(sum int) (x, y int) {\n\tx = sum * 4 \/ 9\n\ty = sum - x\n\treturn\n}\n\n\/\/ Note: The var statement declares a list of variables; as in function argument lists, the type is last.\nvar foo, bar, foobar bool\n\n\/\/ Note: A var declaration can include initializers, one per variable. If an initializer is present, the type can be omitted; the variable will take the type of the initializer.\nvar c, python, java = true, false, \"no!\"\n\n\/\/ Note: variables of several types, and also that variable declarations may be \"factored\" into blocks, as with import statements.\nvar (\n\tToBe bool = false\n\tMaxInt uint64 = 1<<64 - 1\n\tz complex128 = cmplx.Sqrt(-5 + 12i)\n\tuni rune = '世'\n)\n\n\/\/ Note: Variables declared without an explicit initial value are given their zero value.\nvar i int \/\/0\nvar f float64 \/\/0\nvar b bool \/\/ false\nvar s string \/\/ \"\"\n\n\/\/ Note: The expression T(v) converts the value v to the type T. (Unlike in C, in Go assignment between items of different type requires an explicit conversion.)\nvar in int = 42\nvar fl float64 = float64(in)\nvar un uint = uint(fl)\n\n\/\/ Note: Constants are declared like variables, but with the const keyword. Constants cannot be declared using the := syntax.\nconst World = \"世界\"\n\n\/\/ Note: Go has only one looping construct, the for loop.\nfunc forLoop() {\n\tsum := 0\n\t\/\/ The init statement will often be a short variable declaration, and the variables declared there are visible only in the scope of the for statement.\n \/\/ Note: Unlike other languages like C, Java, or Javascript there are no parentheses surrounding the three components of the for statement and the braces { } are always required.\n\tfor i := 0; i < 10; i++ {\n\t\tsum += i\n\t}\n\tfmt.Println(sum)\n\n\t\/\/ Note: The init and post statement are optional.\n\tsum = 1\n\tfor ; sum < 1000; {\n\t\tsum += sum\n\t}\n\tfmt.Println(sum)\n\n\t\/\/ Note: C's 'while' is spelled for in Go.\n\tsum = 1\n\tfor sum < 1025 {\n\t\tsum += sum\n\t}\n\tfmt.Println(sum)\n}\n\n\/\/ Note: Go's if statements are like its for loops; the expression need not be surrounded by parentheses ( ) but the braces { } are required.\nfunc ifStatement() {\n\tx := 8.0\n\t\/\/ Go's if statements are like its for loops; the expression need not be surrounded by parentheses ( ) but the braces { } are required.\n\tif x > 0 {\n\t\tfmt.Println(math.Sqrt(x))\n\t}\n\n\t\/\/ Like for, the if statement can start with a short statement to execute before the condition. Variables declared by the statement are only in scope until the end of the if.\n\tif y:= math.Pow(3, 2); y < 10 {\n\t\tfmt.Println(y)\t\n\t} \n}\n\n\nfunc isNextDay(today time.Weekday, incrementBy time.Weekday) time.Weekday {\n\treturn today + incrementBy\n}\n\nfunc switchStatement() {\n\tswitch os := runtime.GOOS; os {\n\tcase \"darwin\":\n\t\tfmt.Println(\"OS X.\")\n\tcase \"linux\":\n\t\tfmt.Println(\"Linux.\")\n\t\t\/\/ Note: A case body breaks automatically, unless it ends with a fallthrough statement.\n\t\tfallthrough\n\tcase \"fake\":\n\t\tfmt.Println(\"Please Mind The Gap\")\n\tdefault:\n\t\t\/\/ freebsd, openbsd,\n\t\t\/\/ plan9, windows...\n\t\tfmt.Printf(\"%s.\", os)\n\t}\n\n\tfmt.Println(\"When's Saturday?\")\n\ttoday := time.Now().Weekday()\n\t\/\/ Note: Switch cases can evaluate functions.\n\t\/\/ Note: Switch cases evaluate cases from top to bottom, stopping when a case succeeds. \n\tswitch time.Saturday {\n\tcase isNextDay(today, 0):\n\t\tfmt.Println(\"Today.\")\n\tcase isNextDay(today, 1):\n\t\tfmt.Println(\"Tomorrow.\")\n\tcase isNextDay(today, 2):\n\t\tfmt.Println(\"In two days.\")\n\tdefault:\n\t\tfmt.Println(\"Too far away.\")\n\t}\n\n\t\/\/ Note: Switch without a condition is the same as switch true. This construct can be a clean way to write long if-then-else chains.\n\tt := time.Now()\n\tswitch {\n\tcase t.Hour() < 12:\n\t\tfmt.Println(\"Good morning!\")\n\tcase t.Hour() < 17:\n\t\tfmt.Println(\"Good afternoon.\")\n\tdefault:\n\t\tfmt.Println(\"Good evening.\")\n\t}\n}\n\n\/\/ Note: A defer statement defers the execution of a function until the surrounding function returns.\nfunc deferStatement() {\n\tdefer fmt.Println(\"world\")\n\n\tfmt.Println(\"hello\")\n\n\t\/\/ Deferred function calls are pushed onto a stack. When a function returns, its deferred calls are executed in last-in-first-out order.\n\tfmt.Println(\"counting\")\n\n\tfor i := 0; i < 10; i++ {\n\t\tdefer fmt.Println(i)\n\t}\n\n\tfmt.Println(\"done\")\n}\n\n\/\/ Note: Go has pointers. A pointer holds the memory address of a value.\n\/\/ Note: The type *T is a pointer to a T value. Its zero value is nil.\n\/\/ Note: The & operator generates a pointer to its operand.\n\/\/ Note: The * operator denotes the pointer's underlying value. This is known as \"dereferencing\" or \"indirecting\".\n\/\/ Note: Unlike C, Go has no pointer arithmetic.\n\nfunc pointers() {\n\ti, j := 42, 2701\n\n\tp := &i \/\/ point to i\n\tfmt.Println(*p) \/\/ read i through the pointer\n\t*p = 21 \/\/ set i through the pointer\n\tfmt.Println(i) \/\/ see the new value of i\n\n\tp = &j \/\/ point to j\n\t*p = *p \/ 37 \/\/ divide j through the pointer\n\tfmt.Println(j) \/\/ see the new value of j\n}\n\n\/\/ Note: A struct is a collection of fields. (And a type declaration does what you'd expect.)\ntype Vertex struct {\n\tX int\n\tY int\n}\n\nfunc structs() {\n\tfmt.Println(Vertex{1, 2})\n\n\t\/\/ Struct fields are accessed using a dot.\n\tv := Vertex{1, 2}\n\tfmt.Println(v)\n\tv.X = 4\n\tfmt.Println(v)\n\n\t\/\/ Struct fields can be accessed through a struct pointer.\n\n\t\/\/ NOTE: To access the field X of a struct when we have the struct pointer p we could write (*p).X. However, that notation is cumbersome, so the language permits us instead to write just p.X, without the explicit dereference.\n\tp := &v\n\tp.Y = 1e9\n\tfmt.Println(v)\n}\n\n\nfunc main() {\n\t\/\/ Using a user-created library\n\tfmt.Println(stringutil.Reverse(\"!oG ,olleH\"))\n\t\/\/ Using the standard time library\n\tfmt.Println(\"The time is\", time.Now())\n\t\/\/ Using the math library\n\tfmt.Println(\"My favorite number is\", rand.Intn(43))\n\tfmt.Printf(\"Now you have %g problems.\\n\", math.Sqrt(7))\n\t\/\/ Calling local functions\n\tfmt.Println(add(42, 13))\n\tfmt.Println(sub(100, 38))\n\tfmt.Println(\"returning multiple results from a function: \")\n\ta, b := swap(\"hello\", \"world\")\n\tfmt.Println(a, b)\n\tfmt.Println(\"returning multiple, naked, results from a function: \")\n\tfmt.Println(split(17))\n\t\/\/ Variables\n\tfoo = true;\n\tfoobar = true;\n\tfmt.Println(\"printing the satate of the package level variables: \")\n\tfmt.Println(foo, bar, foobar)\n\tfmt.Println(\"printing the satate of the, initialised, package level variables: \")\n\tfmt.Println(c, python, java)\n\tfmt.Println(\"printing the satate of the, initialised, function level variables: \")\n\tvar x, y, z = 100, 99.999999, \"99.99998\"\n\tfmt.Println(x, y, z)\n\t\/\/ Note: inside a function a shorthand for creating and initialising a variable exists; using ':='\n\tk := 3\n\tfmt.Printf(\"Type: %T Value: %v\\n\", k, k)\n\tfmt.Printf(\"Type: %T Value: %v\\n\", ToBe, ToBe)\n\tfmt.Printf(\"Type: %T Value: %v\\n\", MaxInt, MaxInt)\n\tfmt.Printf(\"Type: %T Value: %v\\n\", z, z)\n\tfmt.Printf(\"Type: %T Value: %v\\n\", uni, uni)\n\tfmt.Printf(\"%v %v %v %q\\n\", i, f, b, s)\n\tfmt.Println(\"printing the converted \/ casted values: \")\n\tfmt.Println(in, fl, un)\n\tfmt.Println(\"Hello\", World)\n\tfmt.Println(\"calling the for statements\")\n\tforLoop()\n\tfmt.Println(\"calling the if statements\")\n\tifStatement()\n\tfmt.Println(\"calling the switch statements\")\n\tswitchStatement()\n\tfmt.Println(\"calling the defer statements\")\n\tdeferStatement()\n\tfmt.Println(\"calling the pointer statements\")\n\tpointers()\n\tfmt.Println(\"calling the struct statements\")\n\tstructs()\n}\n<|endoftext|>"} {"text":"<commit_before>package msgpack\n\nimport (\n\t\"reflect\"\n\t\"time\"\n)\n\nvar (\n\ttimeType = reflect.TypeOf((*time.Time)(nil)).Elem()\n)\n\nfunc init() {\n\tRegister(timeType, encodeTime, decodeTime)\n}\n\nfunc (e *Encoder) EncodeTime(tm time.Time) error {\n\tif err := e.EncodeInt64(tm.Unix()); err != nil {\n\t\treturn err\n\t}\n\treturn e.EncodeInt(tm.Nanosecond())\n}\n\nfunc (d *Decoder) DecodeTime() (time.Time, error) {\n\tsec, err := d.DecodeInt64()\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\tnsec, err := d.DecodeInt64()\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\treturn time.Unix(sec, nsec), nil\n}\n\nfunc encodeTime(e *Encoder, v reflect.Value) error {\n\ttm := v.Interface().(time.Time)\n\treturn e.EncodeTime(tm)\n}\n\nfunc decodeTime(d *Decoder, v reflect.Value) error {\n\ttm, err := d.DecodeTime()\n\tif err != nil {\n\t\treturn err\n\t}\n\tv.Set(reflect.ValueOf(tm))\n\treturn nil\n}\n<commit_msg>Encode time as slice of 2 ints.<commit_after>package msgpack\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n)\n\nvar (\n\ttimeType = reflect.TypeOf((*time.Time)(nil)).Elem()\n)\n\nfunc init() {\n\tRegister(timeType, encodeTime, decodeTime)\n}\n\nfunc (e *Encoder) EncodeTime(tm time.Time) error {\n\tif err := e.W.WriteByte(0x92); err != nil {\n\t\treturn err\n\t}\n\tif err := e.EncodeInt64(tm.Unix()); err != nil {\n\t\treturn err\n\t}\n\treturn e.EncodeInt(tm.Nanosecond())\n}\n\nfunc (d *Decoder) DecodeTime() (time.Time, error) {\n\tb, err := d.R.ReadByte()\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\tif b != 0x92 {\n\t\treturn time.Time{}, fmt.Errorf(\"msgpack: invalid code %x decoding time\", b)\n\t}\n\n\tsec, err := d.DecodeInt64()\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\tnsec, err := d.DecodeInt64()\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\treturn time.Unix(sec, nsec), nil\n}\n\nfunc encodeTime(e *Encoder, v reflect.Value) error {\n\ttm := v.Interface().(time.Time)\n\treturn e.EncodeTime(tm)\n}\n\nfunc decodeTime(d *Decoder, v reflect.Value) error {\n\ttm, err := d.DecodeTime()\n\tif err != nil {\n\t\treturn err\n\t}\n\tv.Set(reflect.ValueOf(tm))\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n)\n\nconst (\n\tconfigurationFileName = \".toni.yml\"\n\tapiTokenHeaderName = \"X-Api-Token\"\n\tgitBranchCmd = \"git symbolic-ref --short HEAD\"\n\tgitCommitCmd = \"git rev-parse HEAD\"\n)\n\nvar usage = `toni is the CLI for Applikatoni.\n\nUsage:\n\n\ttoni [-c=<commit SHA>] [-b=<commit branch>] -t <target> -m <comment>\n\nArguments:\n\n\tREQUIRED:\n\n\t-t\t\tTarget of the deployment\n\n\t-m\t\tThe deployment comment\n\n\tOPTIONAL:\n\n\t-c\t\tThe deployment commit SHA\n\t\t\t(if unspecified toni uses the current git HEAD)\n\n\t-b\t\tThe branch of the commit SHA\n\t\t\t(if unspecified toni uses the current git HEAD)\n\nConfiguration:\n\nWhen starting up, toni tries to read the \".toni.yml\" configuration file in the\ncurrent working directoy. The configuration MUST specify the HOST, APPLICATION,\nAPI TOKEN and the STAGES of deployments.\n\nAn example \".toni.yml\" looks like this:\n\n\thost: http:\/\/toni.shippingcompany.com\n\tapplication: shippingcompany-main-application\n\tapi_token: 4fdd575f-FOOO-BAAR-af1e-ce3e9f75367d\n\tstages:\n\t production:\n\t\t- CHECK_CONNECTION\n\t\t- PRE_DEPLOYMENT\n\t\t- CODE_DEPLOYMENT\n\t\t- POST_DEPLOYMENT\n\t staging:\n\t\t- CHECK_CONNECTION\n\t\t- PRE_DEPLOYMENT\n\t\t- CODE_DEPLOYMENT\n\t\t- POST_DEPLOYMENT\n`\n\nvar (\n\ttarget string\n\tcomment string\n\tbranch string\n\tcommitSHA string\n\n\tprintHelp bool\n)\n\nvar (\n\tcurrentDeploymentLocation string\n\tcurrentDeploymentMx *sync.Mutex\n)\n\nfunc init() {\n\tflag.StringVar(&target, \"t\", \"\", \"Target of the deployment [required]\")\n\tflag.StringVar(&comment, \"m\", \"\", \"Deployment comment [required]\")\n\tflag.StringVar(&commitSHA, \"c\", \"\", \"Deployment commit\")\n\tflag.StringVar(&branch, \"b\", \"\", \"Deployment branch\")\n\n\tflag.BoolVar(&printHelp, \"h\", false, \"Print the help and usage information\")\n}\n\nfunc main() {\n\tflag.Parse()\n\tif printHelp {\n\t\tprintUsage()\n\t\tos.Exit(0)\n\t}\n\n\tconfigurationFilePath, err := filepath.Abs(configurationFileName)\n\tif err != nil {\n\t\tconfigErrorExit(\"Error when trying to open configuration file (%s):\\n\",\n\t\t\tconfigurationFileName,\n\t\t\terr)\n\t}\n\n\tconfig, err := readConfiguration(configurationFilePath)\n\tif err != nil {\n\t\tconfigErrorExit(\"Error when trying to parse configuration file (%s):\\n\",\n\t\t\tconfigurationFileName,\n\t\t\terr)\n\t}\n\n\terr = config.Validate()\n\tif err != nil {\n\t\tconfigErrorExit(\"Configuration file is invalid (%s):\\n\",\n\t\t\tconfigurationFileName,\n\t\t\terr)\n\t}\n\n\tif target == \"\" {\n\t\tfmt.Fprintf(os.Stderr, \"No target specified (use -t option to specify target)\\n\")\n\t\tos.Exit(1)\n\t}\n\n\tif _, ok := config.Stages[target]; !ok {\n\t\tfmt.Fprintf(os.Stderr, \"Target %q not found in configuration file\\n\", target)\n\t\tos.Exit(1)\n\t}\n\n\tif comment == \"\" {\n\t\tfmt.Fprintf(os.Stderr, \"No comment specified (use -m option to specify comment)\\n\")\n\t\tos.Exit(1)\n\t}\n\n\tif branch == \"\" {\n\t\tbranch, err = runGitCmd(gitBranchCmd)\n\t\tif branch == \"\" || err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Getting current branch with `%s` failed:\\n\", gitBranchCmd)\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\tif commitSHA == \"\" {\n\t\tcommitSHA, err = runGitCmd(gitCommitCmd)\n\t\tif commitSHA == \"\" || err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Getting current commit SHA with `%s` failed:\\n\", gitCommitCmd)\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\tcurrentDeploymentMx = &sync.Mutex{}\n\tsigChan := make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, os.Interrupt)\n\tgo func() {\n\t\t<-sigChan\n\t\tkillCurrentDeployment(config)\n\t}()\n\n\tfmt.Println(\"Creating deployment...\\n\")\n\tprintDeploymentAttribute(\"host\", config.Host)\n\tprintDeploymentAttribute(\"application\", config.Application)\n\tprintDeploymentAttribute(\"target\", target)\n\tprintDeploymentAttribute(\"sha\", commitSHA)\n\tprintDeploymentAttribute(\"branch\", branch)\n\tprintDeploymentAttribute(\"comment\", comment)\n\tprintDeploymentAttribute(\"stages\", strings.Join(config.Stages[target], \", \"))\n\n\tdeploymentURL, err := buildDeploymentURL(config.Host, config.Application)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"creating a deployment failed: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tdata := buildDeploymentData(target, commitSHA, branch, comment, config.Stages[target])\n\tdeploymentLocation, err := createDeployment(config, deploymentURL, data)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"creating a deployment failed: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Println(\"\\nSuccessfully created. Streaming logs...\\n\")\n\n\tsetCurrentDeploymentLocation(deploymentLocation)\n\n\tlogURL, err := buildDeploymentLogURL(config.Host, deploymentLocation)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"streaming deployment logs failed: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\terr = streamDeploymentLog(config, logURL)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"streaming deployment logs failed: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc printUsage() {\n\tfmt.Printf(usage)\n}\n\nfunc configErrorExit(message, configPath string, err error) {\n\tfmt.Fprintf(os.Stderr, message, configPath)\n\tfmt.Fprintf(os.Stderr, \"%s\\n\", err)\n\tos.Exit(1)\n}\n\nfunc runGitCmd(gitCmd string) (string, error) {\n\targv := strings.Split(gitCmd, \" \")\n\n\tout, err := exec.Command(argv[0], argv[1:]...).Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn strings.Trim(string(out), \"\\n\"), nil\n}\n\nfunc printDeploymentAttribute(name, val string) {\n\tfmt.Printf(\"\\t%-15s = %s\\n\", name, val)\n}\n\nfunc setCurrentDeploymentLocation(location string) {\n\tcurrentDeploymentMx.Lock()\n\tcurrentDeploymentLocation = location\n\tcurrentDeploymentMx.Unlock()\n}\n\nfunc killCurrentDeployment(c *Configuration) {\n\tcurrentDeploymentMx.Lock()\n\tdefer currentDeploymentMx.Unlock()\n\n\tif currentDeploymentLocation != \"\" {\n\t\tfmt.Printf(\"\\nReceived INTERRUPT - \")\n\t\tfmt.Printf(\"Sending kill request to server (%s)...\\n\\n\",\n\t\t\tcurrentDeploymentLocation)\n\n\t\tkillURL, err := buildDeploymentKillURL(c.Host, currentDeploymentLocation)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Killing deployment failed: %s\\n\", err)\n\t\t\treturn\n\t\t}\n\n\t\terr = killDeployment(c, killURL)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Killing deployment failed: %s\\n\", err)\n\t\t\treturn\n\t\t}\n\t}\n}\n<commit_msg>Output WARNING\/ERROR if commits are unpushed<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n)\n\nconst (\n\tconfigurationFileName = \".toni.yml\"\n\tapiTokenHeaderName = \"X-Api-Token\"\n\tgitBranchCmd = \"git symbolic-ref --short HEAD\"\n\tgitCommitCmd = \"git rev-parse HEAD\"\n\tgitUnpushedCmd = \"git cherry -v\"\n)\n\nvar usage = `toni is the CLI for Applikatoni.\n\nUsage:\n\n\ttoni [-c=<commit SHA>] [-b=<commit branch>] -t <target> -m <comment>\n\nArguments:\n\n\tREQUIRED:\n\n\t-t\t\tTarget of the deployment\n\n\t-m\t\tThe deployment comment\n\n\tOPTIONAL:\n\n\t-c\t\tThe deployment commit SHA\n\t\t\t(if unspecified toni uses the current git HEAD)\n\n\t-b\t\tThe branch of the commit SHA\n\t\t\t(if unspecified toni uses the current git HEAD)\n\nConfiguration:\n\nWhen starting up, toni tries to read the \".toni.yml\" configuration file in the\ncurrent working directoy. The configuration MUST specify the HOST, APPLICATION,\nAPI TOKEN and the STAGES of deployments.\n\nAn example \".toni.yml\" looks like this:\n\n\thost: http:\/\/toni.shippingcompany.com\n\tapplication: shippingcompany-main-application\n\tapi_token: 4fdd575f-FOOO-BAAR-af1e-ce3e9f75367d\n\tstages:\n\t production:\n\t\t- CHECK_CONNECTION\n\t\t- PRE_DEPLOYMENT\n\t\t- CODE_DEPLOYMENT\n\t\t- POST_DEPLOYMENT\n\t staging:\n\t\t- CHECK_CONNECTION\n\t\t- PRE_DEPLOYMENT\n\t\t- CODE_DEPLOYMENT\n\t\t- POST_DEPLOYMENT\n`\n\nvar (\n\ttarget string\n\tcomment string\n\tbranch string\n\tcommitSHA string\n\n\tprintHelp bool\n)\n\nvar (\n\tcurrentDeploymentLocation string\n\tcurrentDeploymentMx *sync.Mutex\n)\n\nfunc init() {\n\tflag.StringVar(&target, \"t\", \"\", \"Target of the deployment [required]\")\n\tflag.StringVar(&comment, \"m\", \"\", \"Deployment comment [required]\")\n\tflag.StringVar(&commitSHA, \"c\", \"\", \"Deployment commit\")\n\tflag.StringVar(&branch, \"b\", \"\", \"Deployment branch\")\n\n\tflag.BoolVar(&printHelp, \"h\", false, \"Print the help and usage information\")\n}\n\nfunc main() {\n\tflag.Parse()\n\tif printHelp {\n\t\tprintUsage()\n\t\tos.Exit(0)\n\t}\n\n\tconfigurationFilePath, err := filepath.Abs(configurationFileName)\n\tif err != nil {\n\t\tconfigErrorExit(\"Error when trying to open configuration file (%s):\\n\",\n\t\t\tconfigurationFileName,\n\t\t\terr)\n\t}\n\n\tconfig, err := readConfiguration(configurationFilePath)\n\tif err != nil {\n\t\tconfigErrorExit(\"Error when trying to parse configuration file (%s):\\n\",\n\t\t\tconfigurationFileName,\n\t\t\terr)\n\t}\n\n\terr = config.Validate()\n\tif err != nil {\n\t\tconfigErrorExit(\"Configuration file is invalid (%s):\\n\",\n\t\t\tconfigurationFileName,\n\t\t\terr)\n\t}\n\n\tif target == \"\" {\n\t\tfmt.Fprintf(os.Stderr, \"No target specified (use -t option to specify target)\\n\")\n\t\tos.Exit(1)\n\t}\n\n\tif _, ok := config.Stages[target]; !ok {\n\t\tfmt.Fprintf(os.Stderr, \"Target %q not found in configuration file\\n\", target)\n\t\tos.Exit(1)\n\t}\n\n\tif comment == \"\" {\n\t\tfmt.Fprintf(os.Stderr, \"No comment specified (use -m option to specify comment)\\n\")\n\t\tos.Exit(1)\n\t}\n\n\tif branch == \"\" {\n\t\tbranch, err = runGitCmd(gitBranchCmd)\n\t\tif branch == \"\" || err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Getting current branch with `%s` failed:\\n\", gitBranchCmd)\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\tif commitSHA == \"\" {\n\t\tcommitSHA, err = runGitCmd(gitCommitCmd)\n\t\tif commitSHA == \"\" || err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Getting current commit SHA with `%s` failed:\\n\", gitCommitCmd)\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\tunpushed, err := runGitCmd(gitUnpushedCmd)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tif strings.Contains(unpushed, commitSHA) {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s has not been pushed to remote\\n\", commitSHA)\n\t\tos.Exit(1)\n\t}\n\tif unpushed != \"\" {\n\t\tfmt.Fprintf(os.Stderr, \"WARNING: These commits have not been pushed to remote\\n\")\n\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", unpushed)\n\t}\n\n\tcurrentDeploymentMx = &sync.Mutex{}\n\tsigChan := make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, os.Interrupt)\n\tgo func() {\n\t\t<-sigChan\n\t\tkillCurrentDeployment(config)\n\t}()\n\n\tfmt.Println(\"Creating deployment...\\n\")\n\tprintDeploymentAttribute(\"host\", config.Host)\n\tprintDeploymentAttribute(\"application\", config.Application)\n\tprintDeploymentAttribute(\"target\", target)\n\tprintDeploymentAttribute(\"sha\", commitSHA)\n\tprintDeploymentAttribute(\"branch\", branch)\n\tprintDeploymentAttribute(\"comment\", comment)\n\tprintDeploymentAttribute(\"stages\", strings.Join(config.Stages[target], \", \"))\n\n\tdeploymentURL, err := buildDeploymentURL(config.Host, config.Application)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"creating a deployment failed: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tdata := buildDeploymentData(target, commitSHA, branch, comment, config.Stages[target])\n\tdeploymentLocation, err := createDeployment(config, deploymentURL, data)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"creating a deployment failed: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Println(\"\\nSuccessfully created. Streaming logs...\\n\")\n\n\tsetCurrentDeploymentLocation(deploymentLocation)\n\n\tlogURL, err := buildDeploymentLogURL(config.Host, deploymentLocation)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"streaming deployment logs failed: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\terr = streamDeploymentLog(config, logURL)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"streaming deployment logs failed: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc printUsage() {\n\tfmt.Printf(usage)\n}\n\nfunc configErrorExit(message, configPath string, err error) {\n\tfmt.Fprintf(os.Stderr, message, configPath)\n\tfmt.Fprintf(os.Stderr, \"%s\\n\", err)\n\tos.Exit(1)\n}\n\nfunc runGitCmd(gitCmd string) (string, error) {\n\targv := strings.Split(gitCmd, \" \")\n\n\tout, err := exec.Command(argv[0], argv[1:]...).Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn strings.Trim(string(out), \"\\n\"), nil\n}\n\nfunc printDeploymentAttribute(name, val string) {\n\tfmt.Printf(\"\\t%-15s = %s\\n\", name, val)\n}\n\nfunc setCurrentDeploymentLocation(location string) {\n\tcurrentDeploymentMx.Lock()\n\tcurrentDeploymentLocation = location\n\tcurrentDeploymentMx.Unlock()\n}\n\nfunc killCurrentDeployment(c *Configuration) {\n\tcurrentDeploymentMx.Lock()\n\tdefer currentDeploymentMx.Unlock()\n\n\tif currentDeploymentLocation != \"\" {\n\t\tfmt.Printf(\"\\nReceived INTERRUPT - \")\n\t\tfmt.Printf(\"Sending kill request to server (%s)...\\n\\n\",\n\t\t\tcurrentDeploymentLocation)\n\n\t\tkillURL, err := buildDeploymentKillURL(c.Host, currentDeploymentLocation)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Killing deployment failed: %s\\n\", err)\n\t\t\treturn\n\t\t}\n\n\t\terr = killDeployment(c, killURL)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Killing deployment failed: %s\\n\", err)\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 The Terraformer Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage openstack\n\nimport (\n\t\"log\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/GoogleCloudPlatform\/terraformer\/terraformutils\"\n\t\"github.com\/gophercloud\/gophercloud\"\n\t\"github.com\/gophercloud\/gophercloud\/openstack\"\n\t\"github.com\/gophercloud\/gophercloud\/openstack\/blockstorage\/v3\/volumes\"\n\t\"github.com\/gophercloud\/gophercloud\/openstack\/compute\/v2\/servers\"\n\t\"github.com\/gophercloud\/gophercloud\/pagination\"\n)\n\ntype ComputeGenerator struct {\n\tOpenStackService\n}\n\n\/\/ createResources iterate on all openstack_compute_instance_v2\nfunc (g *ComputeGenerator) createResources(list *pagination.Pager, volclient *gophercloud.ServiceClient) []terraformutils.Resource {\n\tresources := []terraformutils.Resource{}\n\n\terr := list.EachPage(func(page pagination.Page) (bool, error) {\n\t\tservers, err := servers.ExtractServers(page)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tfor _, s := range servers {\n\t\t\tvar bds = []map[string]interface{}{}\n\t\t\tvar vol []volumes.Volume\n\t\t\tt := map[string]interface{}{}\n\t\t\tif volclient != nil {\n\t\t\t\tfor _, av := range s.AttachedVolumes {\n\t\t\t\t\tonevol, err := volumes.Get(volclient, av.ID).Extract()\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tvol = append(vol, *onevol)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tsort.SliceStable(vol, func(i, j int) bool {\n\t\t\t\t\treturn vol[i].Attachments[0].Device < vol[j].Attachments[0].Device\n\t\t\t\t})\n\n\t\t\t\tvar bindex = 0\n\t\t\t\tvar dependsOn = \"\"\n\t\t\t\tfor _, v := range vol {\n\t\t\t\t\tif v.Bootable == \"true\" && v.VolumeImageMetadata != nil {\n\t\t\t\t\t\tbds = append(bds, map[string]interface{}{\n\t\t\t\t\t\t\t\"source_type\": \"image\",\n\t\t\t\t\t\t\t\"uuid\": v.VolumeImageMetadata[\"image_id\"],\n\t\t\t\t\t\t\t\"volume_size\": strconv.Itoa(v.Size),\n\t\t\t\t\t\t\t\"boot_index\": strconv.Itoa(bindex),\n\t\t\t\t\t\t\t\"destination_type\": \"volume\",\n\t\t\t\t\t\t\t\"delete_on_termination\": \"false\",\n\t\t\t\t\t\t})\n\t\t\t\t\t\tbindex++\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttv := map[string]interface{}{}\n\t\t\t\t\t\tif dependsOn != \"\" {\n\t\t\t\t\t\t\ttv[\"depends_on\"] = []string{dependsOn}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tname := s.Name + strings.ReplaceAll(v.Attachments[0].Device, \"\/dev\/\", \"\")\n\t\t\t\t\t\trid := s.ID + \"\/\" + v.ID\n\t\t\t\t\t\tresource := terraformutils.NewResource(\n\t\t\t\t\t\t\trid,\n\t\t\t\t\t\t\tname,\n\t\t\t\t\t\t\t\"openstack_compute_volume_attach_v2\",\n\t\t\t\t\t\t\t\"openstack\",\n\t\t\t\t\t\t\tmap[string]string{},\n\t\t\t\t\t\t\t[]string{},\n\t\t\t\t\t\t\ttv,\n\t\t\t\t\t\t)\n\t\t\t\t\t\tdependsOn = \"openstack_compute_volume_attach_v2.tfer--\" + name\n\t\t\t\t\t\ttv[\"instance_name\"] = terraformutils.TfSanitize(s.Name)\n\t\t\t\t\t\tif v.Name == \"\" {\n\t\t\t\t\t\t\tv.Name = v.ID\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttv[\"volume_name\"] = terraformutils.TfSanitize(v.Name)\n\t\t\t\t\t\tresources = append(resources, resource)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(bds) > 0 {\n\t\t\t\tt = map[string]interface{}{\"block_device\": bds}\n\t\t\t}\n\n\t\t\tresource := terraformutils.NewResource(\n\t\t\t\ts.ID,\n\t\t\t\ts.Name,\n\t\t\t\t\"openstack_compute_instance_v2\",\n\t\t\t\t\"openstack\",\n\t\t\t\tmap[string]string{},\n\t\t\t\t[]string{},\n\t\t\t\tt,\n\t\t\t)\n\n\t\t\tresources = append(resources, resource)\n\t\t}\n\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\treturn resources\n}\n\n\/\/ Generate TerraformResources from OpenStack API,\nfunc (g *ComputeGenerator) InitResources() error {\n\topts, err := openstack.AuthOptionsFromEnv()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprovider, err := openstack.AuthenticatedClient(opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{\n\t\tRegion: g.GetArgs()[\"region\"].(string),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlist := servers.List(client, nil)\n\tvolclient, err := openstack.NewBlockStorageV3(provider, gophercloud.EndpointOpts{\n\t\tRegion: g.GetArgs()[\"region\"].(string)})\n\tif err != nil {\n\t\tlog.Println(\"VolumeImageMetadata requires blockStorage API v3\")\n\t\tvolclient = nil\n\t}\n\tg.Resources = g.createResources(&list, volclient)\n\n\treturn nil\n}\n\nfunc (g *ComputeGenerator) PostConvertHook() error {\n\tfor i, r := range g.Resources {\n\t\tif r.InstanceInfo.Type == \"openstack_compute_volume_attach_v2\" {\n\t\t\tg.Resources[i].Item[\"volume_id\"] = \"${openstack_blockstorage_volume_v3.\" + r.AdditionalFields[\"volume_name\"].(string) + \".id}\"\n\t\t\tg.Resources[i].Item[\"instance_id\"] = \"${openstack_compute_instance_v2.\" + r.AdditionalFields[\"instance_name\"].(string) + \".id}\"\n\t\t\tdelete(g.Resources[i].Item, \"volume_name\")\n\t\t\tdelete(g.Resources[i].Item, \"instance_name\")\n\t\t\tdelete(g.Resources[i].Item, \"device\")\n\t\t}\n\t\tif r.InstanceInfo.Type != \"openstack_compute_instance_v2\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Copy \"all_metadata.%\" to \"metadata.%\"\n\t\tfor k, v := range g.Resources[i].InstanceState.Attributes {\n\t\t\tif strings.HasPrefix(k, \"all_metadata\") {\n\t\t\t\tnewKey := strings.Replace(k, \"all_metadata\", \"metadata\", 1)\n\t\t\t\tg.Resources[i].InstanceState.Attributes[newKey] = v\n\t\t\t}\n\t\t}\n\t\t\/\/ Replace \"all_metadata\" to \"metadata\"\n\t\t\/\/ because \"all_metadata\" field cannot be set as resource argument\n\t\tfor k, v := range g.Resources[i].Item {\n\t\t\tif strings.HasPrefix(k, \"all_metadata\") {\n\t\t\t\tnewKey := strings.Replace(k, \"all_metadata\", \"metadata\", 1)\n\t\t\t\tg.Resources[i].Item[newKey] = v\n\t\t\t\tdelete(g.Resources[i].Item, k)\n\t\t\t}\n\t\t}\n\t\tif r.AdditionalFields[\"block_device\"] != nil {\n\t\t\tbds := r.AdditionalFields[\"block_device\"].([]map[string]interface{})\n\t\t\tfor bi, bd := range bds {\n\t\t\t\tfor k, v := range bd {\n\t\t\t\t\tg.Resources[i].InstanceState.Attributes[\"block_device.\"+strconv.Itoa(bi)+\".\"+k] = v.(string)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tg.Resources[i].InstanceState.Attributes[\"block_device.#\"] = strconv.Itoa(len(bds))\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>Add missing terraformutils.TfSanitize<commit_after>\/\/ Copyright 2018 The Terraformer Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage openstack\n\nimport (\n\t\"log\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/GoogleCloudPlatform\/terraformer\/terraformutils\"\n\t\"github.com\/gophercloud\/gophercloud\"\n\t\"github.com\/gophercloud\/gophercloud\/openstack\"\n\t\"github.com\/gophercloud\/gophercloud\/openstack\/blockstorage\/v3\/volumes\"\n\t\"github.com\/gophercloud\/gophercloud\/openstack\/compute\/v2\/servers\"\n\t\"github.com\/gophercloud\/gophercloud\/pagination\"\n)\n\ntype ComputeGenerator struct {\n\tOpenStackService\n}\n\n\/\/ createResources iterate on all openstack_compute_instance_v2\nfunc (g *ComputeGenerator) createResources(list *pagination.Pager, volclient *gophercloud.ServiceClient) []terraformutils.Resource {\n\tresources := []terraformutils.Resource{}\n\n\terr := list.EachPage(func(page pagination.Page) (bool, error) {\n\t\tservers, err := servers.ExtractServers(page)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tfor _, s := range servers {\n\t\t\tvar bds = []map[string]interface{}{}\n\t\t\tvar vol []volumes.Volume\n\t\t\tt := map[string]interface{}{}\n\t\t\tif volclient != nil {\n\t\t\t\tfor _, av := range s.AttachedVolumes {\n\t\t\t\t\tonevol, err := volumes.Get(volclient, av.ID).Extract()\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tvol = append(vol, *onevol)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tsort.SliceStable(vol, func(i, j int) bool {\n\t\t\t\t\treturn vol[i].Attachments[0].Device < vol[j].Attachments[0].Device\n\t\t\t\t})\n\n\t\t\t\tvar bindex = 0\n\t\t\t\tvar dependsOn = \"\"\n\t\t\t\tfor _, v := range vol {\n\t\t\t\t\tif v.Bootable == \"true\" && v.VolumeImageMetadata != nil {\n\t\t\t\t\t\tbds = append(bds, map[string]interface{}{\n\t\t\t\t\t\t\t\"source_type\": \"image\",\n\t\t\t\t\t\t\t\"uuid\": v.VolumeImageMetadata[\"image_id\"],\n\t\t\t\t\t\t\t\"volume_size\": strconv.Itoa(v.Size),\n\t\t\t\t\t\t\t\"boot_index\": strconv.Itoa(bindex),\n\t\t\t\t\t\t\t\"destination_type\": \"volume\",\n\t\t\t\t\t\t\t\"delete_on_termination\": \"false\",\n\t\t\t\t\t\t})\n\t\t\t\t\t\tbindex++\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttv := map[string]interface{}{}\n\t\t\t\t\t\tif dependsOn != \"\" {\n\t\t\t\t\t\t\ttv[\"depends_on\"] = []string{dependsOn}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tname := s.Name + strings.ReplaceAll(v.Attachments[0].Device, \"\/dev\/\", \"\")\n\t\t\t\t\t\trid := s.ID + \"\/\" + v.ID\n\t\t\t\t\t\tresource := terraformutils.NewResource(\n\t\t\t\t\t\t\trid,\n\t\t\t\t\t\t\tname,\n\t\t\t\t\t\t\t\"openstack_compute_volume_attach_v2\",\n\t\t\t\t\t\t\t\"openstack\",\n\t\t\t\t\t\t\tmap[string]string{},\n\t\t\t\t\t\t\t[]string{},\n\t\t\t\t\t\t\ttv,\n\t\t\t\t\t\t)\n\t\t\t\t\t\tdependsOn = \"openstack_compute_volume_attach_v2.\"+terraformutils.TfSanitize(name)\n\t\t\t\t\t\ttv[\"instance_name\"] = terraformutils.TfSanitize(s.Name)\n\t\t\t\t\t\tif v.Name == \"\" {\n\t\t\t\t\t\t\tv.Name = v.ID\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttv[\"volume_name\"] = terraformutils.TfSanitize(v.Name)\n\t\t\t\t\t\tresources = append(resources, resource)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(bds) > 0 {\n\t\t\t\tt = map[string]interface{}{\"block_device\": bds}\n\t\t\t}\n\n\t\t\tresource := terraformutils.NewResource(\n\t\t\t\ts.ID,\n\t\t\t\ts.Name,\n\t\t\t\t\"openstack_compute_instance_v2\",\n\t\t\t\t\"openstack\",\n\t\t\t\tmap[string]string{},\n\t\t\t\t[]string{},\n\t\t\t\tt,\n\t\t\t)\n\n\t\t\tresources = append(resources, resource)\n\t\t}\n\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\treturn resources\n}\n\n\/\/ Generate TerraformResources from OpenStack API,\nfunc (g *ComputeGenerator) InitResources() error {\n\topts, err := openstack.AuthOptionsFromEnv()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprovider, err := openstack.AuthenticatedClient(opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{\n\t\tRegion: g.GetArgs()[\"region\"].(string),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlist := servers.List(client, nil)\n\tvolclient, err := openstack.NewBlockStorageV3(provider, gophercloud.EndpointOpts{\n\t\tRegion: g.GetArgs()[\"region\"].(string)})\n\tif err != nil {\n\t\tlog.Println(\"VolumeImageMetadata requires blockStorage API v3\")\n\t\tvolclient = nil\n\t}\n\tg.Resources = g.createResources(&list, volclient)\n\n\treturn nil\n}\n\nfunc (g *ComputeGenerator) PostConvertHook() error {\n\tfor i, r := range g.Resources {\n\t\tif r.InstanceInfo.Type == \"openstack_compute_volume_attach_v2\" {\n\t\t\tg.Resources[i].Item[\"volume_id\"] = \"${openstack_blockstorage_volume_v3.\" + r.AdditionalFields[\"volume_name\"].(string) + \".id}\"\n\t\t\tg.Resources[i].Item[\"instance_id\"] = \"${openstack_compute_instance_v2.\" + r.AdditionalFields[\"instance_name\"].(string) + \".id}\"\n\t\t\tdelete(g.Resources[i].Item, \"volume_name\")\n\t\t\tdelete(g.Resources[i].Item, \"instance_name\")\n\t\t\tdelete(g.Resources[i].Item, \"device\")\n\t\t}\n\t\tif r.InstanceInfo.Type != \"openstack_compute_instance_v2\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Copy \"all_metadata.%\" to \"metadata.%\"\n\t\tfor k, v := range g.Resources[i].InstanceState.Attributes {\n\t\t\tif strings.HasPrefix(k, \"all_metadata\") {\n\t\t\t\tnewKey := strings.Replace(k, \"all_metadata\", \"metadata\", 1)\n\t\t\t\tg.Resources[i].InstanceState.Attributes[newKey] = v\n\t\t\t}\n\t\t}\n\t\t\/\/ Replace \"all_metadata\" to \"metadata\"\n\t\t\/\/ because \"all_metadata\" field cannot be set as resource argument\n\t\tfor k, v := range g.Resources[i].Item {\n\t\t\tif strings.HasPrefix(k, \"all_metadata\") {\n\t\t\t\tnewKey := strings.Replace(k, \"all_metadata\", \"metadata\", 1)\n\t\t\t\tg.Resources[i].Item[newKey] = v\n\t\t\t\tdelete(g.Resources[i].Item, k)\n\t\t\t}\n\t\t}\n\t\tif r.AdditionalFields[\"block_device\"] != nil {\n\t\t\tbds := r.AdditionalFields[\"block_device\"].([]map[string]interface{})\n\t\t\tfor bi, bd := range bds {\n\t\t\t\tfor k, v := range bd {\n\t\t\t\t\tg.Resources[i].InstanceState.Attributes[\"block_device.\"+strconv.Itoa(bi)+\".\"+k] = v.(string)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tg.Resources[i].InstanceState.Attributes[\"block_device.#\"] = strconv.Itoa(len(bds))\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage healer\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/tsuru\/gnuflag\"\n\t\"github.com\/tsuru\/tsuru\/cmd\"\n)\n\ntype ListHealingHistoryCmd struct {\n\tfs *gnuflag.FlagSet\n\tnodeOnly bool\n\tcontainerOnly bool\n}\n\nfunc (c *ListHealingHistoryCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"docker-healing-list\",\n\t\tUsage: \"docker-healing-list [--node] [--container]\",\n\t\tDesc: \"List healing history for nodes or containers.\",\n\t}\n}\n\nfunc renderHistoryTable(history []HealingEvent, filter string, ctx *cmd.Context) {\n\tfmt.Fprintln(ctx.Stdout, strings.ToUpper(filter[:1])+filter[1:]+\":\")\n\theaders := cmd.Row([]string{\"Start\", \"Finish\", \"Success\", \"Failing\", \"Created\", \"Error\"})\n\tt := cmd.Table{Headers: headers}\n\tfor i := len(history) - 1; i >= 0; i-- {\n\t\tevent := history[i]\n\t\tif event.Action != filter+\"-healing\" {\n\t\t\tcontinue\n\t\t}\n\t\tdata := make([]string, 2)\n\t\tif filter == \"node\" {\n\t\t\tdata[0] = event.FailingNode.Address\n\t\t\tdata[1] = event.CreatedNode.Address\n\t\t} else {\n\t\t\tdata[0] = event.FailingContainer.ID\n\t\t\tdata[1] = event.CreatedContainer.ID\n\t\t\tif len(data[0]) > 10 {\n\t\t\t\tdata[0] = data[0][:10]\n\t\t\t}\n\t\t\tif len(data[1]) > 10 {\n\t\t\t\tdata[1] = data[1][:10]\n\t\t\t}\n\t\t}\n\t\tt.AddRow(cmd.Row([]string{\n\t\t\tevent.StartTime.Local().Format(time.Stamp),\n\t\t\tevent.EndTime.Local().Format(time.Stamp),\n\t\t\tfmt.Sprintf(\"%t\", event.Successful),\n\t\t\tdata[0],\n\t\t\tdata[1],\n\t\t\tevent.Error,\n\t\t}))\n\t}\n\tt.LineSeparator = true\n\tt.Reverse()\n\tctx.Stdout.Write(t.Bytes())\n}\n\nfunc (c *ListHealingHistoryCmd) Run(ctx *cmd.Context, client *cmd.Client) error {\n\tvar filter string\n\tif c.nodeOnly && !c.containerOnly {\n\t\tfilter = \"node\"\n\t}\n\tif c.containerOnly && !c.nodeOnly {\n\t\tfilter = \"container\"\n\t}\n\turl, err := cmd.GetURL(fmt.Sprintf(\"\/docker\/healing?filter=%s\", filter))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tvar history []HealingEvent\n\terr = json.NewDecoder(resp.Body).Decode(&history)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif filter != \"\" {\n\t\trenderHistoryTable(history, filter, ctx)\n\t} else {\n\t\trenderHistoryTable(history, \"node\", ctx)\n\t\trenderHistoryTable(history, \"container\", ctx)\n\t}\n\treturn nil\n}\n\nfunc (c *ListHealingHistoryCmd) Flags() *gnuflag.FlagSet {\n\tif c.fs == nil {\n\t\tc.fs = gnuflag.NewFlagSet(\"with-flags\", gnuflag.ContinueOnError)\n\t\tc.fs.BoolVar(&c.nodeOnly, \"node\", false, \"List only healing process started for nodes\")\n\t\tc.fs.BoolVar(&c.containerOnly, \"container\", false, \"List only healing process started for containers\")\n\t}\n\treturn c.fs\n}\n\ntype GetNodeHealingConfigCmd struct{}\n\nfunc (c *GetNodeHealingConfigCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"docker-healing-info\",\n\t\tUsage: \"docker-healing-info\",\n\t\tDesc: \"Show the current configuration for active healing nodes.\",\n\t}\n}\n\nfunc (c *GetNodeHealingConfigCmd) Run(ctx *cmd.Context, client *cmd.Client) error {\n\turl, err := cmd.GetURL(\"\/docker\/healing\/node\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tvar conf map[string]NodeHealerConfig\n\terr = json.NewDecoder(resp.Body).Decode(&conf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv := func(v *int) string {\n\t\tif v == nil || *v == 0 {\n\t\t\treturn \"disabled\"\n\t\t}\n\t\treturn fmt.Sprintf(\"%ds\", *v)\n\t}\n\tbaseConf := conf[\"\"]\n\tdelete(conf, \"\")\n\tfmt.Fprint(ctx.Stdout, \"Default:\\n\")\n\ttbl := cmd.NewTable()\n\ttbl.Headers = cmd.Row{\"Config\", \"Value\"}\n\ttbl.AddRow(cmd.Row{\"Enabled\", fmt.Sprintf(\"%v\", baseConf.Enabled != nil && *baseConf.Enabled)})\n\ttbl.AddRow(cmd.Row{\"Max unresponsive time\", v(baseConf.MaxUnresponsiveTime)})\n\ttbl.AddRow(cmd.Row{\"Max time since success\", v(baseConf.MaxTimeSinceSuccess)})\n\tfmt.Fprint(ctx.Stdout, tbl.String())\n\tif len(conf) > 0 {\n\t\tfmt.Fprintln(ctx.Stdout)\n\t}\n\tpoolNames := make([]string, 0, len(conf))\n\tfor pool := range conf {\n\t\tpoolNames = append(poolNames, pool)\n\t}\n\tsort.Strings(poolNames)\n\tfor i, name := range poolNames {\n\t\tpoolConf := conf[name]\n\t\tfmt.Fprintf(ctx.Stdout, \"Pool %q:\\n\", name)\n\t\ttbl := cmd.NewTable()\n\t\ttbl.Headers = cmd.Row{\"Config\", \"Value\", \"Inherited\"}\n\t\ttbl.AddRow(cmd.Row{\"Enabled\", fmt.Sprintf(\"%v\", poolConf.Enabled != nil && *poolConf.Enabled), strconv.FormatBool(poolConf.EnabledInherited)})\n\t\ttbl.AddRow(cmd.Row{\"Max unresponsive time\", v(poolConf.MaxUnresponsiveTime), strconv.FormatBool(poolConf.MaxUnresponsiveTimeInherited)})\n\t\ttbl.AddRow(cmd.Row{\"Max time since success\", v(poolConf.MaxTimeSinceSuccess), strconv.FormatBool(poolConf.MaxTimeSinceSuccessInherited)})\n\t\tfmt.Fprint(ctx.Stdout, tbl.String())\n\t\tif i < len(poolNames)-1 {\n\t\t\tfmt.Fprintln(ctx.Stdout)\n\t\t}\n\t}\n\treturn nil\n}\n\ntype SetNodeHealingConfigCmd struct {\n\tfs *gnuflag.FlagSet\n\tenable bool\n\tdisable bool\n\tpool string\n\tmaxUnresponsive int\n\tmaxUnsuccessful int\n}\n\nfunc (c *SetNodeHealingConfigCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"docker-healing-update\",\n\t\tUsage: \"docker-healing-update [-p\/--pool pool] [--enable] [--disable] [--max-unresponsive <seconds>] [--max-unsuccessful <seconds>]\",\n\t\tDesc: \"Update node healing configuration\",\n\t}\n}\n\nfunc (c *SetNodeHealingConfigCmd) Flags() *gnuflag.FlagSet {\n\tif c.fs == nil {\n\t\tc.fs = gnuflag.NewFlagSet(\"with-flags\", gnuflag.ContinueOnError)\n\t}\n\tmsg := \"The pool name to which the configuration will apply. If unset it'll be set as default for all pools.\"\n\tc.fs.StringVar(&c.pool, \"p\", \"\", msg)\n\tc.fs.StringVar(&c.pool, \"pool\", \"\", msg)\n\tc.fs.BoolVar(&c.enable, \"enable\", false, \"Enable active node healing\")\n\tc.fs.BoolVar(&c.disable, \"disable\", false, \"Disable active node healing\")\n\tc.fs.IntVar(&c.maxUnresponsive, \"max-unresponsive\", -1, \"Number of seconds tsuru will wait for the node to notify it's alive\")\n\tc.fs.IntVar(&c.maxUnsuccessful, \"max-unsuccessful\", -1, \"Number of seconds tsuru will wait for the node to run successul checks\")\n\treturn c.fs\n}\n\nfunc (c *SetNodeHealingConfigCmd) Run(ctx *cmd.Context, client *cmd.Client) error {\n\tif c.enable && c.disable {\n\t\treturn errors.New(\"conflicting flags --enable and --disable\")\n\t}\n\tv := url.Values{}\n\tv.Set(\"pool\", c.pool)\n\tif c.maxUnresponsive >= 0 {\n\t\tv.Set(\"MaxUnresponsiveTime\", strconv.FormatInt(int64(c.maxUnresponsive), 10))\n\t}\n\tif c.maxUnsuccessful >= 0 {\n\t\tv.Set(\"MaxTimeSinceSuccess\", strconv.FormatInt(int64(c.maxUnsuccessful), 10))\n\t}\n\tif c.enable {\n\t\tv.Set(\"Enabled\", strconv.FormatBool(true))\n\t}\n\tif c.disable {\n\t\tv.Set(\"Enabled\", strconv.FormatBool(false))\n\t}\n\tbody := strings.NewReader(v.Encode())\n\tu, err := cmd.GetURL(\"\/docker\/healing\/node\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, err := http.NewRequest(\"POST\", u, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\t_, err = client.Do(req)\n\tif err == nil {\n\t\tfmt.Fprintln(ctx.Stdout, \"Node healing configuration successfully updated.\")\n\t}\n\treturn err\n}\n\ntype DeleteNodeHealingConfigCmd struct {\n\tcmd.ConfirmationCommand\n\tfs *gnuflag.FlagSet\n\tpool string\n\tenabled bool\n\tmaxUnresponsive bool\n\tmaxUnsuccessful bool\n}\n\nfunc (c *DeleteNodeHealingConfigCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"docker-healing-delete\",\n\t\tUsage: \"docker-healing-delete [-p\/--pool pool] [--enabled] [--max-unresponsive] [--max-unsuccessful]\",\n\t\tDesc: `Delete a node healing configuration entry.\n\nIf [[--pool]] is provided the configuration entries from the specified pool\nwill be removed and the default value will be used.\n\nIf [[--pool]] is not provided the configuration entry will be removed from the\ndefault configuration.`,\n\t}\n}\n\nfunc (c *DeleteNodeHealingConfigCmd) Flags() *gnuflag.FlagSet {\n\tif c.fs == nil {\n\t\tc.fs = c.ConfirmationCommand.Flags()\n\t}\n\tmsg := \"The pool name from where the configuration will be removed. If unset it'll delete the default healing configuration.\"\n\tc.fs.StringVar(&c.pool, \"p\", \"\", msg)\n\tc.fs.StringVar(&c.pool, \"pool\", \"\", msg)\n\tc.fs.BoolVar(&c.enabled, \"enabled\", false, \"Remove the 'enabled' configuration option\")\n\tc.fs.BoolVar(&c.maxUnresponsive, \"max-unresponsive\", false, \"Remove the 'max-unresponsive' configuration option\")\n\tc.fs.BoolVar(&c.maxUnsuccessful, \"max-unsuccessful\", false, \"Remove the 'max-unsuccessful' configuration option\")\n\treturn c.fs\n}\n\nfunc (c *DeleteNodeHealingConfigCmd) Run(ctx *cmd.Context, client *cmd.Client) error {\n\tif !(c.enabled || c.maxUnresponsive || c.maxUnsuccessful) {\n\t\treturn errors.New(\"At least one configuration option must be selected for removal.\")\n\t}\n\tmsg := \"Are you sure you want to remove %snode healing configuration%s?\"\n\tif c.pool == \"\" {\n\t\tmsg = fmt.Sprintf(msg, \"the default \", \"\")\n\t} else {\n\t\tmsg = fmt.Sprintf(msg, \"\", \" for pool \"+c.pool)\n\t}\n\tif !c.Confirm(ctx, msg) {\n\t\treturn errors.New(\"command aborted by user\")\n\t}\n\tv := url.Values{}\n\tv.Set(\"pool\", c.pool)\n\tif c.enabled {\n\t\tv.Add(\"name\", \"enabled\")\n\t}\n\tif c.maxUnresponsive {\n\t\tv.Add(\"name\", \"maxUnresponsiveTime\")\n\t}\n\tif c.maxUnsuccessful {\n\t\tv.Add(\"name\", \"maxTimeSinceSuccess\")\n\t}\n\tu, err := cmd.GetURL(\"\/docker\/healing\/node?\" + v.Encode())\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, err := http.NewRequest(\"DELETE\", u, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = client.Do(req)\n\tif err == nil {\n\t\tfmt.Fprintln(ctx.Stdout, \"Node healing configuration successfully removed.\")\n\t}\n\treturn err\n}\n<commit_msg>update license year<commit_after>\/\/ Copyright 2016 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage healer\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/tsuru\/gnuflag\"\n\t\"github.com\/tsuru\/tsuru\/cmd\"\n)\n\ntype ListHealingHistoryCmd struct {\n\tfs *gnuflag.FlagSet\n\tnodeOnly bool\n\tcontainerOnly bool\n}\n\nfunc (c *ListHealingHistoryCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"docker-healing-list\",\n\t\tUsage: \"docker-healing-list [--node] [--container]\",\n\t\tDesc: \"List healing history for nodes or containers.\",\n\t}\n}\n\nfunc renderHistoryTable(history []HealingEvent, filter string, ctx *cmd.Context) {\n\tfmt.Fprintln(ctx.Stdout, strings.ToUpper(filter[:1])+filter[1:]+\":\")\n\theaders := cmd.Row([]string{\"Start\", \"Finish\", \"Success\", \"Failing\", \"Created\", \"Error\"})\n\tt := cmd.Table{Headers: headers}\n\tfor i := len(history) - 1; i >= 0; i-- {\n\t\tevent := history[i]\n\t\tif event.Action != filter+\"-healing\" {\n\t\t\tcontinue\n\t\t}\n\t\tdata := make([]string, 2)\n\t\tif filter == \"node\" {\n\t\t\tdata[0] = event.FailingNode.Address\n\t\t\tdata[1] = event.CreatedNode.Address\n\t\t} else {\n\t\t\tdata[0] = event.FailingContainer.ID\n\t\t\tdata[1] = event.CreatedContainer.ID\n\t\t\tif len(data[0]) > 10 {\n\t\t\t\tdata[0] = data[0][:10]\n\t\t\t}\n\t\t\tif len(data[1]) > 10 {\n\t\t\t\tdata[1] = data[1][:10]\n\t\t\t}\n\t\t}\n\t\tt.AddRow(cmd.Row([]string{\n\t\t\tevent.StartTime.Local().Format(time.Stamp),\n\t\t\tevent.EndTime.Local().Format(time.Stamp),\n\t\t\tfmt.Sprintf(\"%t\", event.Successful),\n\t\t\tdata[0],\n\t\t\tdata[1],\n\t\t\tevent.Error,\n\t\t}))\n\t}\n\tt.LineSeparator = true\n\tt.Reverse()\n\tctx.Stdout.Write(t.Bytes())\n}\n\nfunc (c *ListHealingHistoryCmd) Run(ctx *cmd.Context, client *cmd.Client) error {\n\tvar filter string\n\tif c.nodeOnly && !c.containerOnly {\n\t\tfilter = \"node\"\n\t}\n\tif c.containerOnly && !c.nodeOnly {\n\t\tfilter = \"container\"\n\t}\n\turl, err := cmd.GetURL(fmt.Sprintf(\"\/docker\/healing?filter=%s\", filter))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tvar history []HealingEvent\n\terr = json.NewDecoder(resp.Body).Decode(&history)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif filter != \"\" {\n\t\trenderHistoryTable(history, filter, ctx)\n\t} else {\n\t\trenderHistoryTable(history, \"node\", ctx)\n\t\trenderHistoryTable(history, \"container\", ctx)\n\t}\n\treturn nil\n}\n\nfunc (c *ListHealingHistoryCmd) Flags() *gnuflag.FlagSet {\n\tif c.fs == nil {\n\t\tc.fs = gnuflag.NewFlagSet(\"with-flags\", gnuflag.ContinueOnError)\n\t\tc.fs.BoolVar(&c.nodeOnly, \"node\", false, \"List only healing process started for nodes\")\n\t\tc.fs.BoolVar(&c.containerOnly, \"container\", false, \"List only healing process started for containers\")\n\t}\n\treturn c.fs\n}\n\ntype GetNodeHealingConfigCmd struct{}\n\nfunc (c *GetNodeHealingConfigCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"docker-healing-info\",\n\t\tUsage: \"docker-healing-info\",\n\t\tDesc: \"Show the current configuration for active healing nodes.\",\n\t}\n}\n\nfunc (c *GetNodeHealingConfigCmd) Run(ctx *cmd.Context, client *cmd.Client) error {\n\turl, err := cmd.GetURL(\"\/docker\/healing\/node\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tvar conf map[string]NodeHealerConfig\n\terr = json.NewDecoder(resp.Body).Decode(&conf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv := func(v *int) string {\n\t\tif v == nil || *v == 0 {\n\t\t\treturn \"disabled\"\n\t\t}\n\t\treturn fmt.Sprintf(\"%ds\", *v)\n\t}\n\tbaseConf := conf[\"\"]\n\tdelete(conf, \"\")\n\tfmt.Fprint(ctx.Stdout, \"Default:\\n\")\n\ttbl := cmd.NewTable()\n\ttbl.Headers = cmd.Row{\"Config\", \"Value\"}\n\ttbl.AddRow(cmd.Row{\"Enabled\", fmt.Sprintf(\"%v\", baseConf.Enabled != nil && *baseConf.Enabled)})\n\ttbl.AddRow(cmd.Row{\"Max unresponsive time\", v(baseConf.MaxUnresponsiveTime)})\n\ttbl.AddRow(cmd.Row{\"Max time since success\", v(baseConf.MaxTimeSinceSuccess)})\n\tfmt.Fprint(ctx.Stdout, tbl.String())\n\tif len(conf) > 0 {\n\t\tfmt.Fprintln(ctx.Stdout)\n\t}\n\tpoolNames := make([]string, 0, len(conf))\n\tfor pool := range conf {\n\t\tpoolNames = append(poolNames, pool)\n\t}\n\tsort.Strings(poolNames)\n\tfor i, name := range poolNames {\n\t\tpoolConf := conf[name]\n\t\tfmt.Fprintf(ctx.Stdout, \"Pool %q:\\n\", name)\n\t\ttbl := cmd.NewTable()\n\t\ttbl.Headers = cmd.Row{\"Config\", \"Value\", \"Inherited\"}\n\t\ttbl.AddRow(cmd.Row{\"Enabled\", fmt.Sprintf(\"%v\", poolConf.Enabled != nil && *poolConf.Enabled), strconv.FormatBool(poolConf.EnabledInherited)})\n\t\ttbl.AddRow(cmd.Row{\"Max unresponsive time\", v(poolConf.MaxUnresponsiveTime), strconv.FormatBool(poolConf.MaxUnresponsiveTimeInherited)})\n\t\ttbl.AddRow(cmd.Row{\"Max time since success\", v(poolConf.MaxTimeSinceSuccess), strconv.FormatBool(poolConf.MaxTimeSinceSuccessInherited)})\n\t\tfmt.Fprint(ctx.Stdout, tbl.String())\n\t\tif i < len(poolNames)-1 {\n\t\t\tfmt.Fprintln(ctx.Stdout)\n\t\t}\n\t}\n\treturn nil\n}\n\ntype SetNodeHealingConfigCmd struct {\n\tfs *gnuflag.FlagSet\n\tenable bool\n\tdisable bool\n\tpool string\n\tmaxUnresponsive int\n\tmaxUnsuccessful int\n}\n\nfunc (c *SetNodeHealingConfigCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"docker-healing-update\",\n\t\tUsage: \"docker-healing-update [-p\/--pool pool] [--enable] [--disable] [--max-unresponsive <seconds>] [--max-unsuccessful <seconds>]\",\n\t\tDesc: \"Update node healing configuration\",\n\t}\n}\n\nfunc (c *SetNodeHealingConfigCmd) Flags() *gnuflag.FlagSet {\n\tif c.fs == nil {\n\t\tc.fs = gnuflag.NewFlagSet(\"with-flags\", gnuflag.ContinueOnError)\n\t}\n\tmsg := \"The pool name to which the configuration will apply. If unset it'll be set as default for all pools.\"\n\tc.fs.StringVar(&c.pool, \"p\", \"\", msg)\n\tc.fs.StringVar(&c.pool, \"pool\", \"\", msg)\n\tc.fs.BoolVar(&c.enable, \"enable\", false, \"Enable active node healing\")\n\tc.fs.BoolVar(&c.disable, \"disable\", false, \"Disable active node healing\")\n\tc.fs.IntVar(&c.maxUnresponsive, \"max-unresponsive\", -1, \"Number of seconds tsuru will wait for the node to notify it's alive\")\n\tc.fs.IntVar(&c.maxUnsuccessful, \"max-unsuccessful\", -1, \"Number of seconds tsuru will wait for the node to run successul checks\")\n\treturn c.fs\n}\n\nfunc (c *SetNodeHealingConfigCmd) Run(ctx *cmd.Context, client *cmd.Client) error {\n\tif c.enable && c.disable {\n\t\treturn errors.New(\"conflicting flags --enable and --disable\")\n\t}\n\tv := url.Values{}\n\tv.Set(\"pool\", c.pool)\n\tif c.maxUnresponsive >= 0 {\n\t\tv.Set(\"MaxUnresponsiveTime\", strconv.FormatInt(int64(c.maxUnresponsive), 10))\n\t}\n\tif c.maxUnsuccessful >= 0 {\n\t\tv.Set(\"MaxTimeSinceSuccess\", strconv.FormatInt(int64(c.maxUnsuccessful), 10))\n\t}\n\tif c.enable {\n\t\tv.Set(\"Enabled\", strconv.FormatBool(true))\n\t}\n\tif c.disable {\n\t\tv.Set(\"Enabled\", strconv.FormatBool(false))\n\t}\n\tbody := strings.NewReader(v.Encode())\n\tu, err := cmd.GetURL(\"\/docker\/healing\/node\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, err := http.NewRequest(\"POST\", u, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\t_, err = client.Do(req)\n\tif err == nil {\n\t\tfmt.Fprintln(ctx.Stdout, \"Node healing configuration successfully updated.\")\n\t}\n\treturn err\n}\n\ntype DeleteNodeHealingConfigCmd struct {\n\tcmd.ConfirmationCommand\n\tfs *gnuflag.FlagSet\n\tpool string\n\tenabled bool\n\tmaxUnresponsive bool\n\tmaxUnsuccessful bool\n}\n\nfunc (c *DeleteNodeHealingConfigCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"docker-healing-delete\",\n\t\tUsage: \"docker-healing-delete [-p\/--pool pool] [--enabled] [--max-unresponsive] [--max-unsuccessful]\",\n\t\tDesc: `Delete a node healing configuration entry.\n\nIf [[--pool]] is provided the configuration entries from the specified pool\nwill be removed and the default value will be used.\n\nIf [[--pool]] is not provided the configuration entry will be removed from the\ndefault configuration.`,\n\t}\n}\n\nfunc (c *DeleteNodeHealingConfigCmd) Flags() *gnuflag.FlagSet {\n\tif c.fs == nil {\n\t\tc.fs = c.ConfirmationCommand.Flags()\n\t}\n\tmsg := \"The pool name from where the configuration will be removed. If unset it'll delete the default healing configuration.\"\n\tc.fs.StringVar(&c.pool, \"p\", \"\", msg)\n\tc.fs.StringVar(&c.pool, \"pool\", \"\", msg)\n\tc.fs.BoolVar(&c.enabled, \"enabled\", false, \"Remove the 'enabled' configuration option\")\n\tc.fs.BoolVar(&c.maxUnresponsive, \"max-unresponsive\", false, \"Remove the 'max-unresponsive' configuration option\")\n\tc.fs.BoolVar(&c.maxUnsuccessful, \"max-unsuccessful\", false, \"Remove the 'max-unsuccessful' configuration option\")\n\treturn c.fs\n}\n\nfunc (c *DeleteNodeHealingConfigCmd) Run(ctx *cmd.Context, client *cmd.Client) error {\n\tif !(c.enabled || c.maxUnresponsive || c.maxUnsuccessful) {\n\t\treturn errors.New(\"At least one configuration option must be selected for removal.\")\n\t}\n\tmsg := \"Are you sure you want to remove %snode healing configuration%s?\"\n\tif c.pool == \"\" {\n\t\tmsg = fmt.Sprintf(msg, \"the default \", \"\")\n\t} else {\n\t\tmsg = fmt.Sprintf(msg, \"\", \" for pool \"+c.pool)\n\t}\n\tif !c.Confirm(ctx, msg) {\n\t\treturn errors.New(\"command aborted by user\")\n\t}\n\tv := url.Values{}\n\tv.Set(\"pool\", c.pool)\n\tif c.enabled {\n\t\tv.Add(\"name\", \"enabled\")\n\t}\n\tif c.maxUnresponsive {\n\t\tv.Add(\"name\", \"maxUnresponsiveTime\")\n\t}\n\tif c.maxUnsuccessful {\n\t\tv.Add(\"name\", \"maxTimeSinceSuccess\")\n\t}\n\tu, err := cmd.GetURL(\"\/docker\/healing\/node?\" + v.Encode())\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, err := http.NewRequest(\"DELETE\", u, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = client.Do(req)\n\tif err == nil {\n\t\tfmt.Fprintln(ctx.Stdout, \"Node healing configuration successfully removed.\")\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package encoding\n\nimport (\n\t\"crypto\/md5\"\n\t\"encoding\/binary\"\n\t\"hash\/fnv\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"sync\"\n\t\"time\"\n\t\"v2ray.com\/core\/common\/dice\"\n\n\t\"golang.org\/x\/crypto\/chacha20poly1305\"\n\n\t\"v2ray.com\/core\/common\"\n\t\"v2ray.com\/core\/common\/bitmask\"\n\t\"v2ray.com\/core\/common\/buf\"\n\t\"v2ray.com\/core\/common\/crypto\"\n\t\"v2ray.com\/core\/common\/net\"\n\t\"v2ray.com\/core\/common\/protocol\"\n\t\"v2ray.com\/core\/common\/task\"\n\t\"v2ray.com\/core\/proxy\/vmess\"\n)\n\ntype sessionId struct {\n\tuser [16]byte\n\tkey [16]byte\n\tnonce [16]byte\n}\n\n\/\/ SessionHistory keeps track of historical session ids, to prevent replay attacks.\ntype SessionHistory struct {\n\tsync.RWMutex\n\tcache map[sessionId]time.Time\n\ttask *task.Periodic\n}\n\n\/\/ NewSessionHistory creates a new SessionHistory object.\nfunc NewSessionHistory() *SessionHistory {\n\th := &SessionHistory{\n\t\tcache: make(map[sessionId]time.Time, 128),\n\t}\n\th.task = &task.Periodic{\n\t\tInterval: time.Second * 30,\n\t\tExecute: h.removeExpiredEntries,\n\t}\n\treturn h\n}\n\n\/\/ Close implements common.Closable.\nfunc (h *SessionHistory) Close() error {\n\treturn h.task.Close()\n}\n\nfunc (h *SessionHistory) addIfNotExits(session sessionId) bool {\n\th.Lock()\n\n\tif expire, found := h.cache[session]; found && expire.After(time.Now()) {\n\t\th.Unlock()\n\t\treturn false\n\t}\n\n\th.cache[session] = time.Now().Add(time.Minute * 3)\n\th.Unlock()\n\tcommon.Must(h.task.Start())\n\treturn true\n}\n\nfunc (h *SessionHistory) removeExpiredEntries() error {\n\tnow := time.Now()\n\n\th.Lock()\n\tdefer h.Unlock()\n\n\tif len(h.cache) == 0 {\n\t\treturn newError(\"nothing to do\")\n\t}\n\n\tfor session, expire := range h.cache {\n\t\tif expire.Before(now) {\n\t\t\tdelete(h.cache, session)\n\t\t}\n\t}\n\n\tif len(h.cache) == 0 {\n\t\th.cache = make(map[sessionId]time.Time, 128)\n\t}\n\n\treturn nil\n}\n\n\/\/ ServerSession keeps information for a session in VMess server.\ntype ServerSession struct {\n\tuserValidator *vmess.TimedUserValidator\n\tsessionHistory *SessionHistory\n\trequestBodyKey [16]byte\n\trequestBodyIV [16]byte\n\tresponseBodyKey [16]byte\n\tresponseBodyIV [16]byte\n\tresponseWriter io.Writer\n\tresponseHeader byte\n}\n\n\/\/ NewServerSession creates a new ServerSession, using the given UserValidator.\n\/\/ The ServerSession instance doesn't take ownership of the validator.\nfunc NewServerSession(validator *vmess.TimedUserValidator, sessionHistory *SessionHistory) *ServerSession {\n\treturn &ServerSession{\n\t\tuserValidator: validator,\n\t\tsessionHistory: sessionHistory,\n\t}\n}\n\nfunc parseSecurityType(b byte) protocol.SecurityType {\n\tif _, f := protocol.SecurityType_name[int32(b)]; f {\n\t\tst := protocol.SecurityType(b)\n\t\t\/\/ For backward compatibility.\n\t\tif st == protocol.SecurityType_UNKNOWN {\n\t\t\tst = protocol.SecurityType_LEGACY\n\t\t}\n\t\treturn st\n\t}\n\treturn protocol.SecurityType_UNKNOWN\n}\n\n\/\/ DecodeRequestHeader decodes and returns (if successful) a RequestHeader from an input stream.\nfunc (s *ServerSession) DecodeRequestHeader(reader io.Reader) (*protocol.RequestHeader, error) {\n\tbuffer := buf.New()\n\tbehaviorRand := dice.NewDeterministicDice(int64(s.userValidator.GetBehaviorSeed()))\n\tDrainSize := behaviorRand.Roll(387) + 16 + 38\n\treadSizeRemain := DrainSize\n\n\tdrainConnection := func(e error) error {\n\t\t\/\/We read a deterministic generated length of data before closing the connection to offset padding read pattern\n\t\treadSizeRemain -= int(buffer.Len())\n\t\tif readSizeRemain > 0 {\n\t\t\terr := s.DrainConnN(reader, readSizeRemain)\n\t\t\tif err != nil {\n\t\t\t\treturn newError(\"failed to drain connection\").Base(err).Base(e)\n\t\t\t}\n\t\t\treturn newError(\"connection drained DrainSize = \", DrainSize).Base(e)\n\t\t}\n\t\treturn e\n\t}\n\n\tdefer func() {\n\t\tbuffer.Release()\n\t}()\n\n\tif _, err := buffer.ReadFullFrom(reader, protocol.IDBytesLen); err != nil {\n\t\treturn nil, newError(\"failed to read request header\").Base(err)\n\t}\n\n\tuser, timestamp, valid := s.userValidator.Get(buffer.Bytes())\n\tif !valid {\n\t\t\/\/It is possible that we are under attack described in https:\/\/github.com\/v2ray\/v2ray-core\/issues\/2523\n\t\treturn nil, drainConnection(newError(\"invalid user\"))\n\t}\n\n\tiv := hashTimestamp(md5.New(), timestamp)\n\tvmessAccount := user.Account.(*vmess.MemoryAccount)\n\n\taesStream := crypto.NewAesDecryptionStream(vmessAccount.ID.CmdKey(), iv[:])\n\tdecryptor := crypto.NewCryptionReader(aesStream, reader)\n\n\treadSizeRemain -= int(buffer.Len())\n\tbuffer.Clear()\n\tif _, err := buffer.ReadFullFrom(decryptor, 38); err != nil {\n\t\treturn nil, newError(\"failed to read request header\").Base(err)\n\t}\n\n\trequest := &protocol.RequestHeader{\n\t\tUser: user,\n\t\tVersion: buffer.Byte(0),\n\t}\n\n\tcopy(s.requestBodyIV[:], buffer.BytesRange(1, 17)) \/\/ 16 bytes\n\tcopy(s.requestBodyKey[:], buffer.BytesRange(17, 33)) \/\/ 16 bytes\n\tvar sid sessionId\n\tcopy(sid.user[:], vmessAccount.ID.Bytes())\n\tsid.key = s.requestBodyKey\n\tsid.nonce = s.requestBodyIV\n\tif !s.sessionHistory.addIfNotExits(sid) {\n\t\treturn nil, newError(\"duplicated session id, possibly under replay attack\")\n\t}\n\n\ts.responseHeader = buffer.Byte(33) \/\/ 1 byte\n\trequest.Option = bitmask.Byte(buffer.Byte(34)) \/\/ 1 byte\n\tpadingLen := int(buffer.Byte(35) >> 4)\n\trequest.Security = parseSecurityType(buffer.Byte(35) & 0x0F)\n\t\/\/ 1 bytes reserved\n\trequest.Command = protocol.RequestCommand(buffer.Byte(37))\n\n\tswitch request.Command {\n\tcase protocol.RequestCommandMux:\n\t\trequest.Address = net.DomainAddress(\"v1.mux.cool\")\n\t\trequest.Port = 0\n\tcase protocol.RequestCommandTCP, protocol.RequestCommandUDP:\n\t\tif addr, port, err := addrParser.ReadAddressPort(buffer, decryptor); err == nil {\n\t\t\trequest.Address = addr\n\t\t\trequest.Port = port\n\t\t}\n\t}\n\n\tif padingLen > 0 {\n\t\tif _, err := buffer.ReadFullFrom(decryptor, int32(padingLen)); err != nil {\n\t\t\treturn nil, newError(\"failed to read padding\").Base(err)\n\t\t}\n\t}\n\n\tif _, err := buffer.ReadFullFrom(decryptor, 4); err != nil {\n\t\treturn nil, newError(\"failed to read checksum\").Base(err)\n\t}\n\n\tfnv1a := fnv.New32a()\n\tcommon.Must2(fnv1a.Write(buffer.BytesTo(-4)))\n\tactualHash := fnv1a.Sum32()\n\texpectedHash := binary.BigEndian.Uint32(buffer.BytesFrom(-4))\n\n\tif actualHash != expectedHash {\n\t\t\/\/It is possible that we are under attack described in https:\/\/github.com\/v2ray\/v2ray-core\/issues\/2523\n\t\treturn nil, drainConnection(newError(\"invalid auth\"))\n\t}\n\n\tif request.Address == nil {\n\t\treturn nil, newError(\"invalid remote address\")\n\t}\n\n\tif request.Security == protocol.SecurityType_UNKNOWN || request.Security == protocol.SecurityType_AUTO {\n\t\treturn nil, newError(\"unknown security type: \", request.Security)\n\t}\n\n\treturn request, nil\n}\n\n\/\/ DecodeRequestBody returns Reader from which caller can fetch decrypted body.\nfunc (s *ServerSession) DecodeRequestBody(request *protocol.RequestHeader, reader io.Reader) buf.Reader {\n\tvar sizeParser crypto.ChunkSizeDecoder = crypto.PlainChunkSizeParser{}\n\tif request.Option.Has(protocol.RequestOptionChunkMasking) {\n\t\tsizeParser = NewShakeSizeParser(s.requestBodyIV[:])\n\t}\n\tvar padding crypto.PaddingLengthGenerator\n\tif request.Option.Has(protocol.RequestOptionGlobalPadding) {\n\t\tpadding = sizeParser.(crypto.PaddingLengthGenerator)\n\t}\n\n\tswitch request.Security {\n\tcase protocol.SecurityType_NONE:\n\t\tif request.Option.Has(protocol.RequestOptionChunkStream) {\n\t\t\tif request.Command.TransferType() == protocol.TransferTypeStream {\n\t\t\t\treturn crypto.NewChunkStreamReader(sizeParser, reader)\n\t\t\t}\n\n\t\t\tauth := &crypto.AEADAuthenticator{\n\t\t\t\tAEAD: new(NoOpAuthenticator),\n\t\t\t\tNonceGenerator: crypto.GenerateEmptyBytes(),\n\t\t\t\tAdditionalDataGenerator: crypto.GenerateEmptyBytes(),\n\t\t\t}\n\t\t\treturn crypto.NewAuthenticationReader(auth, sizeParser, reader, protocol.TransferTypePacket, padding)\n\t\t}\n\n\t\treturn buf.NewReader(reader)\n\tcase protocol.SecurityType_LEGACY:\n\t\taesStream := crypto.NewAesDecryptionStream(s.requestBodyKey[:], s.requestBodyIV[:])\n\t\tcryptionReader := crypto.NewCryptionReader(aesStream, reader)\n\t\tif request.Option.Has(protocol.RequestOptionChunkStream) {\n\t\t\tauth := &crypto.AEADAuthenticator{\n\t\t\t\tAEAD: new(FnvAuthenticator),\n\t\t\t\tNonceGenerator: crypto.GenerateEmptyBytes(),\n\t\t\t\tAdditionalDataGenerator: crypto.GenerateEmptyBytes(),\n\t\t\t}\n\t\t\treturn crypto.NewAuthenticationReader(auth, sizeParser, cryptionReader, request.Command.TransferType(), padding)\n\t\t}\n\n\t\treturn buf.NewReader(cryptionReader)\n\tcase protocol.SecurityType_AES128_GCM:\n\t\taead := crypto.NewAesGcm(s.requestBodyKey[:])\n\n\t\tauth := &crypto.AEADAuthenticator{\n\t\t\tAEAD: aead,\n\t\t\tNonceGenerator: GenerateChunkNonce(s.requestBodyIV[:], uint32(aead.NonceSize())),\n\t\t\tAdditionalDataGenerator: crypto.GenerateEmptyBytes(),\n\t\t}\n\t\treturn crypto.NewAuthenticationReader(auth, sizeParser, reader, request.Command.TransferType(), padding)\n\tcase protocol.SecurityType_CHACHA20_POLY1305:\n\t\taead, _ := chacha20poly1305.New(GenerateChacha20Poly1305Key(s.requestBodyKey[:]))\n\n\t\tauth := &crypto.AEADAuthenticator{\n\t\t\tAEAD: aead,\n\t\t\tNonceGenerator: GenerateChunkNonce(s.requestBodyIV[:], uint32(aead.NonceSize())),\n\t\t\tAdditionalDataGenerator: crypto.GenerateEmptyBytes(),\n\t\t}\n\t\treturn crypto.NewAuthenticationReader(auth, sizeParser, reader, request.Command.TransferType(), padding)\n\tdefault:\n\t\tpanic(\"Unknown security type.\")\n\t}\n}\n\n\/\/ EncodeResponseHeader writes encoded response header into the given writer.\nfunc (s *ServerSession) EncodeResponseHeader(header *protocol.ResponseHeader, writer io.Writer) {\n\ts.responseBodyKey = md5.Sum(s.requestBodyKey[:])\n\ts.responseBodyIV = md5.Sum(s.requestBodyIV[:])\n\n\taesStream := crypto.NewAesEncryptionStream(s.responseBodyKey[:], s.responseBodyIV[:])\n\tencryptionWriter := crypto.NewCryptionWriter(aesStream, writer)\n\ts.responseWriter = encryptionWriter\n\n\tcommon.Must2(encryptionWriter.Write([]byte{s.responseHeader, byte(header.Option)}))\n\terr := MarshalCommand(header.Command, encryptionWriter)\n\tif err != nil {\n\t\tcommon.Must2(encryptionWriter.Write([]byte{0x00, 0x00}))\n\t}\n}\n\n\/\/ EncodeResponseBody returns a Writer that auto-encrypt content written by caller.\nfunc (s *ServerSession) EncodeResponseBody(request *protocol.RequestHeader, writer io.Writer) buf.Writer {\n\tvar sizeParser crypto.ChunkSizeEncoder = crypto.PlainChunkSizeParser{}\n\tif request.Option.Has(protocol.RequestOptionChunkMasking) {\n\t\tsizeParser = NewShakeSizeParser(s.responseBodyIV[:])\n\t}\n\tvar padding crypto.PaddingLengthGenerator\n\tif request.Option.Has(protocol.RequestOptionGlobalPadding) {\n\t\tpadding = sizeParser.(crypto.PaddingLengthGenerator)\n\t}\n\n\tswitch request.Security {\n\tcase protocol.SecurityType_NONE:\n\t\tif request.Option.Has(protocol.RequestOptionChunkStream) {\n\t\t\tif request.Command.TransferType() == protocol.TransferTypeStream {\n\t\t\t\treturn crypto.NewChunkStreamWriter(sizeParser, writer)\n\t\t\t}\n\n\t\t\tauth := &crypto.AEADAuthenticator{\n\t\t\t\tAEAD: new(NoOpAuthenticator),\n\t\t\t\tNonceGenerator: crypto.GenerateEmptyBytes(),\n\t\t\t\tAdditionalDataGenerator: crypto.GenerateEmptyBytes(),\n\t\t\t}\n\t\t\treturn crypto.NewAuthenticationWriter(auth, sizeParser, writer, protocol.TransferTypePacket, padding)\n\t\t}\n\n\t\treturn buf.NewWriter(writer)\n\tcase protocol.SecurityType_LEGACY:\n\t\tif request.Option.Has(protocol.RequestOptionChunkStream) {\n\t\t\tauth := &crypto.AEADAuthenticator{\n\t\t\t\tAEAD: new(FnvAuthenticator),\n\t\t\t\tNonceGenerator: crypto.GenerateEmptyBytes(),\n\t\t\t\tAdditionalDataGenerator: crypto.GenerateEmptyBytes(),\n\t\t\t}\n\t\t\treturn crypto.NewAuthenticationWriter(auth, sizeParser, s.responseWriter, request.Command.TransferType(), padding)\n\t\t}\n\n\t\treturn &buf.SequentialWriter{Writer: s.responseWriter}\n\tcase protocol.SecurityType_AES128_GCM:\n\t\taead := crypto.NewAesGcm(s.responseBodyKey[:])\n\n\t\tauth := &crypto.AEADAuthenticator{\n\t\t\tAEAD: aead,\n\t\t\tNonceGenerator: GenerateChunkNonce(s.responseBodyIV[:], uint32(aead.NonceSize())),\n\t\t\tAdditionalDataGenerator: crypto.GenerateEmptyBytes(),\n\t\t}\n\t\treturn crypto.NewAuthenticationWriter(auth, sizeParser, writer, request.Command.TransferType(), padding)\n\tcase protocol.SecurityType_CHACHA20_POLY1305:\n\t\taead, _ := chacha20poly1305.New(GenerateChacha20Poly1305Key(s.responseBodyKey[:]))\n\n\t\tauth := &crypto.AEADAuthenticator{\n\t\t\tAEAD: aead,\n\t\t\tNonceGenerator: GenerateChunkNonce(s.responseBodyIV[:], uint32(aead.NonceSize())),\n\t\t\tAdditionalDataGenerator: crypto.GenerateEmptyBytes(),\n\t\t}\n\t\treturn crypto.NewAuthenticationWriter(auth, sizeParser, writer, request.Command.TransferType(), padding)\n\tdefault:\n\t\tpanic(\"Unknown security type.\")\n\t}\n}\n\nfunc (s *ServerSession) DrainConnN(reader io.Reader, n int) error {\n\t_, err := io.CopyN(ioutil.Discard, reader, int64(n))\n\treturn err\n}\n<commit_msg>Drain Connection Based on uuid based behavior seed(skip auth info drain for now)<commit_after>package encoding\n\nimport (\n\t\"crypto\/md5\"\n\t\"encoding\/binary\"\n\t\"hash\/fnv\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"sync\"\n\t\"time\"\n\t\"v2ray.com\/core\/common\/dice\"\n\n\t\"golang.org\/x\/crypto\/chacha20poly1305\"\n\n\t\"v2ray.com\/core\/common\"\n\t\"v2ray.com\/core\/common\/bitmask\"\n\t\"v2ray.com\/core\/common\/buf\"\n\t\"v2ray.com\/core\/common\/crypto\"\n\t\"v2ray.com\/core\/common\/net\"\n\t\"v2ray.com\/core\/common\/protocol\"\n\t\"v2ray.com\/core\/common\/task\"\n\t\"v2ray.com\/core\/proxy\/vmess\"\n)\n\ntype sessionId struct {\n\tuser [16]byte\n\tkey [16]byte\n\tnonce [16]byte\n}\n\n\/\/ SessionHistory keeps track of historical session ids, to prevent replay attacks.\ntype SessionHistory struct {\n\tsync.RWMutex\n\tcache map[sessionId]time.Time\n\ttask *task.Periodic\n}\n\n\/\/ NewSessionHistory creates a new SessionHistory object.\nfunc NewSessionHistory() *SessionHistory {\n\th := &SessionHistory{\n\t\tcache: make(map[sessionId]time.Time, 128),\n\t}\n\th.task = &task.Periodic{\n\t\tInterval: time.Second * 30,\n\t\tExecute: h.removeExpiredEntries,\n\t}\n\treturn h\n}\n\n\/\/ Close implements common.Closable.\nfunc (h *SessionHistory) Close() error {\n\treturn h.task.Close()\n}\n\nfunc (h *SessionHistory) addIfNotExits(session sessionId) bool {\n\th.Lock()\n\n\tif expire, found := h.cache[session]; found && expire.After(time.Now()) {\n\t\th.Unlock()\n\t\treturn false\n\t}\n\n\th.cache[session] = time.Now().Add(time.Minute * 3)\n\th.Unlock()\n\tcommon.Must(h.task.Start())\n\treturn true\n}\n\nfunc (h *SessionHistory) removeExpiredEntries() error {\n\tnow := time.Now()\n\n\th.Lock()\n\tdefer h.Unlock()\n\n\tif len(h.cache) == 0 {\n\t\treturn newError(\"nothing to do\")\n\t}\n\n\tfor session, expire := range h.cache {\n\t\tif expire.Before(now) {\n\t\t\tdelete(h.cache, session)\n\t\t}\n\t}\n\n\tif len(h.cache) == 0 {\n\t\th.cache = make(map[sessionId]time.Time, 128)\n\t}\n\n\treturn nil\n}\n\n\/\/ ServerSession keeps information for a session in VMess server.\ntype ServerSession struct {\n\tuserValidator *vmess.TimedUserValidator\n\tsessionHistory *SessionHistory\n\trequestBodyKey [16]byte\n\trequestBodyIV [16]byte\n\tresponseBodyKey [16]byte\n\tresponseBodyIV [16]byte\n\tresponseWriter io.Writer\n\tresponseHeader byte\n}\n\n\/\/ NewServerSession creates a new ServerSession, using the given UserValidator.\n\/\/ The ServerSession instance doesn't take ownership of the validator.\nfunc NewServerSession(validator *vmess.TimedUserValidator, sessionHistory *SessionHistory) *ServerSession {\n\treturn &ServerSession{\n\t\tuserValidator: validator,\n\t\tsessionHistory: sessionHistory,\n\t}\n}\n\nfunc parseSecurityType(b byte) protocol.SecurityType {\n\tif _, f := protocol.SecurityType_name[int32(b)]; f {\n\t\tst := protocol.SecurityType(b)\n\t\t\/\/ For backward compatibility.\n\t\tif st == protocol.SecurityType_UNKNOWN {\n\t\t\tst = protocol.SecurityType_LEGACY\n\t\t}\n\t\treturn st\n\t}\n\treturn protocol.SecurityType_UNKNOWN\n}\n\n\/\/ DecodeRequestHeader decodes and returns (if successful) a RequestHeader from an input stream.\nfunc (s *ServerSession) DecodeRequestHeader(reader io.Reader) (*protocol.RequestHeader, error) {\n\tbuffer := buf.New()\n\tbehaviorRand := dice.NewDeterministicDice(int64(s.userValidator.GetBehaviorSeed()))\n\tDrainSize := behaviorRand.Roll(387) + 16 + 38\n\treadSizeRemain := DrainSize\n\n\tdrainConnection := func(e error) error {\n\t\t\/\/We read a deterministic generated length of data before closing the connection to offset padding read pattern\n\t\treadSizeRemain -= int(buffer.Len())\n\t\tif readSizeRemain > 0 {\n\t\t\terr := s.DrainConnN(reader, readSizeRemain)\n\t\t\tif err != nil {\n\t\t\t\treturn newError(\"failed to drain connection\").Base(err).Base(e)\n\t\t\t}\n\t\t\treturn newError(\"connection drained DrainSize = \", DrainSize).Base(e)\n\t\t}\n\t\treturn e\n\t}\n\n\tdefer func() {\n\t\tbuffer.Release()\n\t}()\n\n\tif _, err := buffer.ReadFullFrom(reader, protocol.IDBytesLen); err != nil {\n\t\treturn nil, newError(\"failed to read request header\").Base(err)\n\t}\n\n\tuser, timestamp, valid := s.userValidator.Get(buffer.Bytes())\n\tif !valid {\n\t\treturn nil, newError(\"invalid user\")\n\t}\n\n\tiv := hashTimestamp(md5.New(), timestamp)\n\tvmessAccount := user.Account.(*vmess.MemoryAccount)\n\n\taesStream := crypto.NewAesDecryptionStream(vmessAccount.ID.CmdKey(), iv[:])\n\tdecryptor := crypto.NewCryptionReader(aesStream, reader)\n\n\treadSizeRemain -= int(buffer.Len())\n\tbuffer.Clear()\n\tif _, err := buffer.ReadFullFrom(decryptor, 38); err != nil {\n\t\treturn nil, newError(\"failed to read request header\").Base(err)\n\t}\n\n\trequest := &protocol.RequestHeader{\n\t\tUser: user,\n\t\tVersion: buffer.Byte(0),\n\t}\n\n\tcopy(s.requestBodyIV[:], buffer.BytesRange(1, 17)) \/\/ 16 bytes\n\tcopy(s.requestBodyKey[:], buffer.BytesRange(17, 33)) \/\/ 16 bytes\n\tvar sid sessionId\n\tcopy(sid.user[:], vmessAccount.ID.Bytes())\n\tsid.key = s.requestBodyKey\n\tsid.nonce = s.requestBodyIV\n\tif !s.sessionHistory.addIfNotExits(sid) {\n\t\treturn nil, newError(\"duplicated session id, possibly under replay attack\")\n\t}\n\n\ts.responseHeader = buffer.Byte(33) \/\/ 1 byte\n\trequest.Option = bitmask.Byte(buffer.Byte(34)) \/\/ 1 byte\n\tpadingLen := int(buffer.Byte(35) >> 4)\n\trequest.Security = parseSecurityType(buffer.Byte(35) & 0x0F)\n\t\/\/ 1 bytes reserved\n\trequest.Command = protocol.RequestCommand(buffer.Byte(37))\n\n\tswitch request.Command {\n\tcase protocol.RequestCommandMux:\n\t\trequest.Address = net.DomainAddress(\"v1.mux.cool\")\n\t\trequest.Port = 0\n\tcase protocol.RequestCommandTCP, protocol.RequestCommandUDP:\n\t\tif addr, port, err := addrParser.ReadAddressPort(buffer, decryptor); err == nil {\n\t\t\trequest.Address = addr\n\t\t\trequest.Port = port\n\t\t}\n\t}\n\n\tif padingLen > 0 {\n\t\tif _, err := buffer.ReadFullFrom(decryptor, int32(padingLen)); err != nil {\n\t\t\treturn nil, newError(\"failed to read padding\").Base(err)\n\t\t}\n\t}\n\n\tif _, err := buffer.ReadFullFrom(decryptor, 4); err != nil {\n\t\treturn nil, newError(\"failed to read checksum\").Base(err)\n\t}\n\n\tfnv1a := fnv.New32a()\n\tcommon.Must2(fnv1a.Write(buffer.BytesTo(-4)))\n\tactualHash := fnv1a.Sum32()\n\texpectedHash := binary.BigEndian.Uint32(buffer.BytesFrom(-4))\n\n\tif actualHash != expectedHash {\n\t\t\/\/It is possible that we are under attack described in https:\/\/github.com\/v2ray\/v2ray-core\/issues\/2523\n\t\treturn nil, drainConnection(newError(\"invalid auth\"))\n\t}\n\n\tif request.Address == nil {\n\t\treturn nil, newError(\"invalid remote address\")\n\t}\n\n\tif request.Security == protocol.SecurityType_UNKNOWN || request.Security == protocol.SecurityType_AUTO {\n\t\treturn nil, newError(\"unknown security type: \", request.Security)\n\t}\n\n\treturn request, nil\n}\n\n\/\/ DecodeRequestBody returns Reader from which caller can fetch decrypted body.\nfunc (s *ServerSession) DecodeRequestBody(request *protocol.RequestHeader, reader io.Reader) buf.Reader {\n\tvar sizeParser crypto.ChunkSizeDecoder = crypto.PlainChunkSizeParser{}\n\tif request.Option.Has(protocol.RequestOptionChunkMasking) {\n\t\tsizeParser = NewShakeSizeParser(s.requestBodyIV[:])\n\t}\n\tvar padding crypto.PaddingLengthGenerator\n\tif request.Option.Has(protocol.RequestOptionGlobalPadding) {\n\t\tpadding = sizeParser.(crypto.PaddingLengthGenerator)\n\t}\n\n\tswitch request.Security {\n\tcase protocol.SecurityType_NONE:\n\t\tif request.Option.Has(protocol.RequestOptionChunkStream) {\n\t\t\tif request.Command.TransferType() == protocol.TransferTypeStream {\n\t\t\t\treturn crypto.NewChunkStreamReader(sizeParser, reader)\n\t\t\t}\n\n\t\t\tauth := &crypto.AEADAuthenticator{\n\t\t\t\tAEAD: new(NoOpAuthenticator),\n\t\t\t\tNonceGenerator: crypto.GenerateEmptyBytes(),\n\t\t\t\tAdditionalDataGenerator: crypto.GenerateEmptyBytes(),\n\t\t\t}\n\t\t\treturn crypto.NewAuthenticationReader(auth, sizeParser, reader, protocol.TransferTypePacket, padding)\n\t\t}\n\n\t\treturn buf.NewReader(reader)\n\tcase protocol.SecurityType_LEGACY:\n\t\taesStream := crypto.NewAesDecryptionStream(s.requestBodyKey[:], s.requestBodyIV[:])\n\t\tcryptionReader := crypto.NewCryptionReader(aesStream, reader)\n\t\tif request.Option.Has(protocol.RequestOptionChunkStream) {\n\t\t\tauth := &crypto.AEADAuthenticator{\n\t\t\t\tAEAD: new(FnvAuthenticator),\n\t\t\t\tNonceGenerator: crypto.GenerateEmptyBytes(),\n\t\t\t\tAdditionalDataGenerator: crypto.GenerateEmptyBytes(),\n\t\t\t}\n\t\t\treturn crypto.NewAuthenticationReader(auth, sizeParser, cryptionReader, request.Command.TransferType(), padding)\n\t\t}\n\n\t\treturn buf.NewReader(cryptionReader)\n\tcase protocol.SecurityType_AES128_GCM:\n\t\taead := crypto.NewAesGcm(s.requestBodyKey[:])\n\n\t\tauth := &crypto.AEADAuthenticator{\n\t\t\tAEAD: aead,\n\t\t\tNonceGenerator: GenerateChunkNonce(s.requestBodyIV[:], uint32(aead.NonceSize())),\n\t\t\tAdditionalDataGenerator: crypto.GenerateEmptyBytes(),\n\t\t}\n\t\treturn crypto.NewAuthenticationReader(auth, sizeParser, reader, request.Command.TransferType(), padding)\n\tcase protocol.SecurityType_CHACHA20_POLY1305:\n\t\taead, _ := chacha20poly1305.New(GenerateChacha20Poly1305Key(s.requestBodyKey[:]))\n\n\t\tauth := &crypto.AEADAuthenticator{\n\t\t\tAEAD: aead,\n\t\t\tNonceGenerator: GenerateChunkNonce(s.requestBodyIV[:], uint32(aead.NonceSize())),\n\t\t\tAdditionalDataGenerator: crypto.GenerateEmptyBytes(),\n\t\t}\n\t\treturn crypto.NewAuthenticationReader(auth, sizeParser, reader, request.Command.TransferType(), padding)\n\tdefault:\n\t\tpanic(\"Unknown security type.\")\n\t}\n}\n\n\/\/ EncodeResponseHeader writes encoded response header into the given writer.\nfunc (s *ServerSession) EncodeResponseHeader(header *protocol.ResponseHeader, writer io.Writer) {\n\ts.responseBodyKey = md5.Sum(s.requestBodyKey[:])\n\ts.responseBodyIV = md5.Sum(s.requestBodyIV[:])\n\n\taesStream := crypto.NewAesEncryptionStream(s.responseBodyKey[:], s.responseBodyIV[:])\n\tencryptionWriter := crypto.NewCryptionWriter(aesStream, writer)\n\ts.responseWriter = encryptionWriter\n\n\tcommon.Must2(encryptionWriter.Write([]byte{s.responseHeader, byte(header.Option)}))\n\terr := MarshalCommand(header.Command, encryptionWriter)\n\tif err != nil {\n\t\tcommon.Must2(encryptionWriter.Write([]byte{0x00, 0x00}))\n\t}\n}\n\n\/\/ EncodeResponseBody returns a Writer that auto-encrypt content written by caller.\nfunc (s *ServerSession) EncodeResponseBody(request *protocol.RequestHeader, writer io.Writer) buf.Writer {\n\tvar sizeParser crypto.ChunkSizeEncoder = crypto.PlainChunkSizeParser{}\n\tif request.Option.Has(protocol.RequestOptionChunkMasking) {\n\t\tsizeParser = NewShakeSizeParser(s.responseBodyIV[:])\n\t}\n\tvar padding crypto.PaddingLengthGenerator\n\tif request.Option.Has(protocol.RequestOptionGlobalPadding) {\n\t\tpadding = sizeParser.(crypto.PaddingLengthGenerator)\n\t}\n\n\tswitch request.Security {\n\tcase protocol.SecurityType_NONE:\n\t\tif request.Option.Has(protocol.RequestOptionChunkStream) {\n\t\t\tif request.Command.TransferType() == protocol.TransferTypeStream {\n\t\t\t\treturn crypto.NewChunkStreamWriter(sizeParser, writer)\n\t\t\t}\n\n\t\t\tauth := &crypto.AEADAuthenticator{\n\t\t\t\tAEAD: new(NoOpAuthenticator),\n\t\t\t\tNonceGenerator: crypto.GenerateEmptyBytes(),\n\t\t\t\tAdditionalDataGenerator: crypto.GenerateEmptyBytes(),\n\t\t\t}\n\t\t\treturn crypto.NewAuthenticationWriter(auth, sizeParser, writer, protocol.TransferTypePacket, padding)\n\t\t}\n\n\t\treturn buf.NewWriter(writer)\n\tcase protocol.SecurityType_LEGACY:\n\t\tif request.Option.Has(protocol.RequestOptionChunkStream) {\n\t\t\tauth := &crypto.AEADAuthenticator{\n\t\t\t\tAEAD: new(FnvAuthenticator),\n\t\t\t\tNonceGenerator: crypto.GenerateEmptyBytes(),\n\t\t\t\tAdditionalDataGenerator: crypto.GenerateEmptyBytes(),\n\t\t\t}\n\t\t\treturn crypto.NewAuthenticationWriter(auth, sizeParser, s.responseWriter, request.Command.TransferType(), padding)\n\t\t}\n\n\t\treturn &buf.SequentialWriter{Writer: s.responseWriter}\n\tcase protocol.SecurityType_AES128_GCM:\n\t\taead := crypto.NewAesGcm(s.responseBodyKey[:])\n\n\t\tauth := &crypto.AEADAuthenticator{\n\t\t\tAEAD: aead,\n\t\t\tNonceGenerator: GenerateChunkNonce(s.responseBodyIV[:], uint32(aead.NonceSize())),\n\t\t\tAdditionalDataGenerator: crypto.GenerateEmptyBytes(),\n\t\t}\n\t\treturn crypto.NewAuthenticationWriter(auth, sizeParser, writer, request.Command.TransferType(), padding)\n\tcase protocol.SecurityType_CHACHA20_POLY1305:\n\t\taead, _ := chacha20poly1305.New(GenerateChacha20Poly1305Key(s.responseBodyKey[:]))\n\n\t\tauth := &crypto.AEADAuthenticator{\n\t\t\tAEAD: aead,\n\t\t\tNonceGenerator: GenerateChunkNonce(s.responseBodyIV[:], uint32(aead.NonceSize())),\n\t\t\tAdditionalDataGenerator: crypto.GenerateEmptyBytes(),\n\t\t}\n\t\treturn crypto.NewAuthenticationWriter(auth, sizeParser, writer, request.Command.TransferType(), padding)\n\tdefault:\n\t\tpanic(\"Unknown security type.\")\n\t}\n}\n\nfunc (s *ServerSession) DrainConnN(reader io.Reader, n int) error {\n\t_, err := io.CopyN(ioutil.Discard, reader, int64(n))\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nImplementation of an R-Way Trie data structure.\n\nA Trie has a root Node which is the base of the tree.\nEach subsequent Node has a letter and children, which are\nnodes that have letter values associated with them.\n*\/\n\npackage trie\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n)\n\ntype Node struct {\n\tval int\n\tmask uint64\n\tchildren map[rune]*Node\n\tchildvals []rune\n}\n\ntype Trie struct {\n\troot *Node\n\tsize int\n}\n\nfunc newNode(val int, m uint64) *Node {\n\treturn &Node{\n\t\tval: val,\n\t\tmask: m,\n\t\tchildren: make(map[rune]*Node),\n\t}\n}\n\n\/\/ Creates and returns a pointer to a new child for the node.\nfunc (n *Node) NewChild(r rune, bitmask uint64, val int) *Node {\n\tnode := newNode(val, bitmask)\n\tn.children[r] = node\n\tn.childvals = append(n.childvals, r)\n\treturn node\n}\n\n\/\/ Returns the children of this node.\nfunc (n Node) Children() map[rune]*Node {\n\treturn n.children\n}\n\n\/\/ Returns all runes stores beneath this node.\n\/\/ This list does not contain duplicates.\nfunc (n Node) ChildVals() []rune {\n\treturn n.childvals\n}\n\nfunc (n Node) Val() int {\n\treturn n.val\n}\n\n\/\/ Returns a uint64 representing the current\n\/\/ mask of this node.\nfunc (n Node) Mask() uint64 {\n\treturn n.mask\n}\n\n\/\/ Creates a new Trie with an initialized root Node.\nfunc CreateTrie() *Trie {\n\tnode := newNode(0, 0)\n\treturn &Trie{\n\t\troot: node,\n\t\tsize: 0,\n\t}\n}\n\n\/\/ Returns the root node for the Trie.\nfunc (t *Trie) Root() *Node {\n\treturn t.root\n}\n\n\/\/ Adds the key to the Trie.\nfunc (t *Trie) AddKey(key string) int {\n\tt.size++\n\trunes := []rune(key)\n\treturn t.addrune(t.Root(), runes, 0)\n}\n\n\/\/ Reads words from a file and adds them to the\n\/\/ trie. Expects words to be seperated by a newline.\nfunc (t *Trie) AddKeysFromFile(path string) {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treader := bufio.NewReader(file)\n\n\tfor {\n\t\tline, err := reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tline = strings.TrimSuffix(line, \"\\n\")\n\t\tt.AddKey(line)\n\t}\n}\n\n\/\/ Returns all the keys currently stored in the trie.\nfunc (t *Trie) Keys() []string {\n\treturn t.PrefixSearch(\"\")\n}\n\n\/\/ Performs a fuzzy search against the keys in the trie.\nfunc (t Trie) FuzzySearch(pre string) []string {\n\tvar (\n\t\tkeys []string\n\t\tpm = make([]rune, 35)\n\t)\n\n\tfuzzycollect(t.Root(), pm, []rune(pre), &keys)\n\tsort.Strings(keys)\n\treturn keys\n}\n\n\/\/ Performs a prefix search against the keys in the trie.\nfunc (t Trie) PrefixSearch(pre string) []string {\n\tvar keys []string\n\n\tnode := t.nodeAtPath(pre)\n\tif node == nil {\n\t\treturn keys\n\t}\n\n\tcollect(node, []rune(pre), &keys)\n\treturn keys\n}\n\nfunc (t Trie) nodeAtPath(pre string) *Node {\n\trunes := []rune(pre)\n\treturn findNode(t.Root(), runes, 0)\n}\n\nfunc findNode(node *Node, runes []rune, d int) *Node {\n\tif node == nil {\n\t\treturn nil\n\t}\n\n\tif len(runes) == 0 {\n\t\treturn node\n\t}\n\n\tupper := len(runes)\n\tif d == upper {\n\t\treturn node\n\t}\n\n\tn, ok := node.Children()[runes[d]]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\td++\n\treturn findNode(n, runes, d)\n}\n\nfunc (t Trie) addrune(node *Node, runes []rune, i int) int {\n\tif len(runes) == 0 {\n\t\tnode.NewChild(0, 0, t.size)\n\t\treturn i\n\t}\n\n\tr := runes[0]\n\tc := node.Children()\n\n\tn, ok := c[r]\n\tbitmask := maskruneslice(runes)\n\tif !ok {\n\t\tn = node.NewChild(r, bitmask, 0)\n\t}\n\tn.mask |= bitmask\n\n\ti++\n\treturn t.addrune(n, runes[1:], i)\n}\n\nfunc maskruneslice(rs []rune) uint64 {\n\tvar m uint64\n\ti := uint64(1)\n\tfor _, r := range rs {\n\t\th := i << (uint64(r) - 97)\n\t\tm |= h\n\t}\n\n\treturn m\n}\n\nfunc maskrune(r rune) uint64 {\n\tvar m uint64\n\ti := uint64(1)\n\th := i << (uint64(r) - 97)\n\tm |= h\n\n\treturn m\n}\n\nfunc collect(node *Node, pre []rune, keys *[]string) {\n\tchildren := node.Children()\n\tfor _, r := range node.ChildVals() {\n\t\tn := children[r]\n\t\tif n.Val() > 0 {\n\t\t\t*keys = append(*keys, string(pre))\n\t\t\tcontinue\n\t\t}\n\n\t\tnpre := append(pre, r)\n\t\tcollect(n, npre, keys)\n\t}\n}\n\nfunc fuzzycollect(node *Node, partialmatch, partial []rune, keys *[]string) {\n\tpartiallen := len(partial)\n\n\tif partiallen == 0 {\n\t\tcollect(node, partialmatch, keys)\n\t\treturn\n\t}\n\n\tm := maskruneslice(partial)\n\tchildren := node.Children()\n\tfor _, v := range node.ChildVals() {\n\t\tn := children[v]\n\n\t\txor := n.Mask() ^ m\n\t\tif (xor & m) != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tnpartial := partial\n\t\tif v == partial[0] {\n\t\t\tif partiallen > 1 {\n\t\t\t\tnpartial = partial[1:]\n\t\t\t} else {\n\t\t\t\tnpartial = partial[0:0]\n\t\t\t}\n\t\t}\n\n\t\tfuzzycollect(n, append(partialmatch, v), npartial, keys)\n\t}\n}\n<commit_msg>Prefer scanner for AddKeysFromFile<commit_after>\/*\nImplementation of an R-Way Trie data structure.\n\nA Trie has a root Node which is the base of the tree.\nEach subsequent Node has a letter and children, which are\nnodes that have letter values associated with them.\n*\/\n\npackage trie\n\nimport (\n\t\"bufio\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n)\n\ntype Node struct {\n\tval int\n\tmask uint64\n\tchildren map[rune]*Node\n\tchildvals []rune\n}\n\ntype Trie struct {\n\troot *Node\n\tsize int\n}\n\nfunc newNode(val int, m uint64) *Node {\n\treturn &Node{\n\t\tval: val,\n\t\tmask: m,\n\t\tchildren: make(map[rune]*Node),\n\t}\n}\n\n\/\/ Creates and returns a pointer to a new child for the node.\nfunc (n *Node) NewChild(r rune, bitmask uint64, val int) *Node {\n\tnode := newNode(val, bitmask)\n\tn.children[r] = node\n\tn.childvals = append(n.childvals, r)\n\treturn node\n}\n\n\/\/ Returns the children of this node.\nfunc (n Node) Children() map[rune]*Node {\n\treturn n.children\n}\n\n\/\/ Returns all runes stores beneath this node.\n\/\/ This list does not contain duplicates.\nfunc (n Node) ChildVals() []rune {\n\treturn n.childvals\n}\n\nfunc (n Node) Val() int {\n\treturn n.val\n}\n\n\/\/ Returns a uint64 representing the current\n\/\/ mask of this node.\nfunc (n Node) Mask() uint64 {\n\treturn n.mask\n}\n\n\/\/ Creates a new Trie with an initialized root Node.\nfunc CreateTrie() *Trie {\n\tnode := newNode(0, 0)\n\treturn &Trie{\n\t\troot: node,\n\t\tsize: 0,\n\t}\n}\n\n\/\/ Returns the root node for the Trie.\nfunc (t *Trie) Root() *Node {\n\treturn t.root\n}\n\n\/\/ Adds the key to the Trie.\nfunc (t *Trie) AddKey(key string) int {\n\tt.size++\n\trunes := []rune(key)\n\treturn t.addrune(t.Root(), runes, 0)\n}\n\n\/\/ Reads words from a file and adds them to the\n\/\/ trie. Expects words to be seperated by a newline.\nfunc (t *Trie) AddKeysFromFile(path string) {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treader := bufio.NewScanner(file)\n\n\tfor reader.Scan() {\n\t\tt.AddKey(reader.Text())\n\t}\n\n\tif reader.Err() != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\n\/\/ Returns all the keys currently stored in the trie.\nfunc (t *Trie) Keys() []string {\n\treturn t.PrefixSearch(\"\")\n}\n\n\/\/ Performs a fuzzy search against the keys in the trie.\nfunc (t Trie) FuzzySearch(pre string) []string {\n\tvar (\n\t\tkeys []string\n\t\tpm = make([]rune, 35)\n\t)\n\n\tfuzzycollect(t.Root(), pm, []rune(pre), &keys)\n\tsort.Strings(keys)\n\treturn keys\n}\n\n\/\/ Performs a prefix search against the keys in the trie.\nfunc (t Trie) PrefixSearch(pre string) []string {\n\tvar keys []string\n\n\tnode := t.nodeAtPath(pre)\n\tif node == nil {\n\t\treturn keys\n\t}\n\n\tcollect(node, []rune(pre), &keys)\n\treturn keys\n}\n\nfunc (t Trie) nodeAtPath(pre string) *Node {\n\trunes := []rune(pre)\n\treturn findNode(t.Root(), runes, 0)\n}\n\nfunc findNode(node *Node, runes []rune, d int) *Node {\n\tif node == nil {\n\t\treturn nil\n\t}\n\n\tif len(runes) == 0 {\n\t\treturn node\n\t}\n\n\tupper := len(runes)\n\tif d == upper {\n\t\treturn node\n\t}\n\n\tn, ok := node.Children()[runes[d]]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\td++\n\treturn findNode(n, runes, d)\n}\n\nfunc (t Trie) addrune(node *Node, runes []rune, i int) int {\n\tif len(runes) == 0 {\n\t\tnode.NewChild(0, 0, t.size)\n\t\treturn i\n\t}\n\n\tr := runes[0]\n\tc := node.Children()\n\n\tn, ok := c[r]\n\tbitmask := maskruneslice(runes)\n\tif !ok {\n\t\tn = node.NewChild(r, bitmask, 0)\n\t}\n\tn.mask |= bitmask\n\n\ti++\n\treturn t.addrune(n, runes[1:], i)\n}\n\nfunc maskruneslice(rs []rune) uint64 {\n\tvar m uint64\n\ti := uint64(1)\n\tfor _, r := range rs {\n\t\th := i << (uint64(r) - 97)\n\t\tm |= h\n\t}\n\n\treturn m\n}\n\nfunc maskrune(r rune) uint64 {\n\tvar m uint64\n\ti := uint64(1)\n\th := i << (uint64(r) - 97)\n\tm |= h\n\n\treturn m\n}\n\nfunc collect(node *Node, pre []rune, keys *[]string) {\n\tchildren := node.Children()\n\tfor _, r := range node.ChildVals() {\n\t\tn := children[r]\n\t\tif n.Val() > 0 {\n\t\t\t*keys = append(*keys, string(pre))\n\t\t\tcontinue\n\t\t}\n\n\t\tnpre := append(pre, r)\n\t\tcollect(n, npre, keys)\n\t}\n}\n\nfunc fuzzycollect(node *Node, partialmatch, partial []rune, keys *[]string) {\n\tpartiallen := len(partial)\n\n\tif partiallen == 0 {\n\t\tcollect(node, partialmatch, keys)\n\t\treturn\n\t}\n\n\tm := maskruneslice(partial)\n\tchildren := node.Children()\n\tfor _, v := range node.ChildVals() {\n\t\tn := children[v]\n\n\t\txor := n.Mask() ^ m\n\t\tif (xor & m) != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tnpartial := partial\n\t\tif v == partial[0] {\n\t\t\tif partiallen > 1 {\n\t\t\t\tnpartial = partial[1:]\n\t\t\t} else {\n\t\t\t\tnpartial = partial[0:0]\n\t\t\t}\n\t\t}\n\n\t\tfuzzycollect(n, append(partialmatch, v), npartial, keys)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package killswitch\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"syscall\"\n\n\t\"golang.org\/x\/net\/route\"\n)\n\nconst (\n\t\/\/ UGSH man netstat\n\tUGSH = syscall.RTF_UP | syscall.RTF_GATEWAY | syscall.RTF_STATIC | syscall.RTF_HOST\n\t\/\/ UGSc man netstat\n\tUGSc = syscall.RTF_UP | syscall.RTF_GATEWAY | syscall.RTF_STATIC | syscall.RTF_PRCLONING\n)\n\nfunc privateIP(ip string) (bool, error) {\n\tvar err error\n\tprivate := false\n\tIP := net.ParseIP(ip)\n\tif IP == nil {\n\t\terr = errors.New(\"Invalid IP\")\n\t} else {\n\t\t_, private24BitBlock, _ := net.ParseCIDR(\"10.0.0.0\/8\")\n\t\t_, private20BitBlock, _ := net.ParseCIDR(\"172.16.0.0\/12\")\n\t\t_, private16BitBlock, _ := net.ParseCIDR(\"192.168.0.0\/16\")\n\t\tprivate = private24BitBlock.Contains(IP) || private20BitBlock.Contains(IP) || private16BitBlock.Contains(IP)\n\t}\n\treturn private, err\n}\n\n\/\/ UGSX find IP of the vpn by matching existing route flags, man netstat\nfunc UGSX() (net.IP, error) {\n\trib, err := route.FetchRIB(syscall.AF_UNSPEC, route.RIBTypeRoute, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmsgs, err := route.ParseRIB(route.RIBTypeRoute, rib)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar ip net.IP\n\tfor _, msg := range msgs {\n\t\tm := msg.(*route.RouteMessage)\n\t\tif m.Flags == UGSH || m.Flags == UGSc {\n\t\t\tswitch a := m.Addrs[syscall.AF_UNSPEC].(type) {\n\t\t\tcase *route.Inet4Addr:\n\t\t\t\tip = net.IPv4(a.IP[0], a.IP[1], a.IP[2], a.IP[3])\n\t\t\tcase *route.Inet6Addr:\n\t\t\t\tip = make(net.IP, net.IPv6len)\n\t\t\t\tcopy(ip, a.IP[:])\n\t\t\t}\n\t\t\tif ok, err := privateIP(ip.String()); err != nil {\n\t\t\t\tcontinue\n\t\t\t} else if !ok {\n\t\t\t\tswitch ip.String() {\n\t\t\t\tcase \"0.0.0.0\":\n\t\t\t\t\tcontinue\n\t\t\t\tcase \"128.0.0.0\":\n\t\t\t\t\tcontinue\n\t\t\t\tdefault:\n\t\t\t\t\treturn ip, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn ip, nil\n}\n<commit_msg>\tmodified: ugsx.go<commit_after>package killswitch\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"syscall\"\n\n\t\"golang.org\/x\/net\/route\"\n)\n\n\/\/ UGSH, UGSc man netstat\nconst (\n\tUGSH = syscall.RTF_UP | syscall.RTF_GATEWAY | syscall.RTF_STATIC | syscall.RTF_HOST\n\tUGSc = syscall.RTF_UP | syscall.RTF_GATEWAY | syscall.RTF_STATIC | syscall.RTF_PRCLONING\n)\n\nfunc privateIP(ip string) (bool, error) {\n\tvar err error\n\tprivate := false\n\tIP := net.ParseIP(ip)\n\tif IP == nil {\n\t\terr = errors.New(\"Invalid IP\")\n\t} else {\n\t\t_, private24BitBlock, _ := net.ParseCIDR(\"10.0.0.0\/8\")\n\t\t_, private20BitBlock, _ := net.ParseCIDR(\"172.16.0.0\/12\")\n\t\t_, private16BitBlock, _ := net.ParseCIDR(\"192.168.0.0\/16\")\n\t\tprivate = private24BitBlock.Contains(IP) || private20BitBlock.Contains(IP) || private16BitBlock.Contains(IP)\n\t}\n\treturn private, err\n}\n\n\/\/ UGSX find IP of the vpn by matching existing route flags, man netstat\nfunc UGSX() (net.IP, error) {\n\trib, err := route.FetchRIB(syscall.AF_UNSPEC, route.RIBTypeRoute, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmsgs, err := route.ParseRIB(route.RIBTypeRoute, rib)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar ip net.IP\n\tfor _, msg := range msgs {\n\t\tm := msg.(*route.RouteMessage)\n\t\tif m.Flags == UGSH || m.Flags == UGSc {\n\t\t\tswitch a := m.Addrs[syscall.AF_UNSPEC].(type) {\n\t\t\tcase *route.Inet4Addr:\n\t\t\t\tip = net.IPv4(a.IP[0], a.IP[1], a.IP[2], a.IP[3])\n\t\t\tcase *route.Inet6Addr:\n\t\t\t\tip = make(net.IP, net.IPv6len)\n\t\t\t\tcopy(ip, a.IP[:])\n\t\t\t}\n\t\t\tif ok, err := privateIP(ip.String()); err != nil {\n\t\t\t\tcontinue\n\t\t\t} else if !ok {\n\t\t\t\tswitch ip.String() {\n\t\t\t\tcase \"0.0.0.0\":\n\t\t\t\t\tcontinue\n\t\t\t\tcase \"128.0.0.0\":\n\t\t\t\t\tcontinue\n\t\t\t\tdefault:\n\t\t\t\t\treturn ip, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn ip, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/sha1\"\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strings\"\n\t\"errors\"\n)\n\ntype User struct {\n\tUsername string\n\tNetwork string\n\tPassword string\n\tHashedPassword string\n\tPermissions []Permission\n\tGrantOption bool\n}\n\nfunc (u *User) calcUserHashPassword() {\n\th := sha1.New()\n\tio.WriteString(h, u.Password)\n\th2 := sha1.New()\n\th2.Write(h.Sum(nil))\n\n\tu.HashedPassword = strings.ToUpper(strings.Replace(fmt.Sprintf(\"*% x\", h2.Sum(nil)), \" \", \"\", -1))\n}\n\nfunc (u *User) compare(usr *User) bool {\n\tif u.Username == usr.Username && u.Network == usr.Network && u.HashedPassword == usr.HashedPassword &&\n\t\tlen(u.Permissions) == len(usr.Permissions) {\n\t\tfor _, up := range u.Permissions {\n\t\t\tvar found bool\n\t\t\tfor _, uperm := range usr.Permissions {\n\t\t\t\tif reflect.DeepEqual(up, uperm) {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n\n}\n\nfunc (u *User) dropUser(tx *sql.Tx, execute bool) bool {\n\tquery := \"DROP USER '\" + u.Username + \"'@'\" + u.Network + \"'\"\n\tif execute {\n\t\t_, err := tx.Exec(query)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\tfmt.Println(query)\n\t}\n\n\treturn true\n}\n\nfunc (u *User) addUser(tx *sql.Tx, execute bool) bool {\n\n\tfor _, permission := range u.Permissions {\n\t\tdatabase := permission.Database\n\t\ttable := permission.Table\n\t\tprivileges := permission.Privileges\n\n\t\tif strings.Contains(database, \"%\") {\n\t\t\tdatabase = \"`\" + database + \"`\"\n\t\t}\n\n\t\tif strings.Contains(table, \"%\") {\n\t\t\ttable = \"`\" + table + \"`\"\n\t\t}\n\n\t\tquery := \"GRANT \" + strings.Join(privileges, \", \") + \" ON \" + database + \".\" + table + \" TO '\" +\n\t\tu.Username + \"'@'\" + u.Network + \"' IDENTIFIED BY PASSWORD '\" + u.HashedPassword + \"'\"\n\n\t\tif u.GrantOption {\n\t\t\tquery += \" WITH GRANT OPTION\"\n\t\t}\n\n\t\tif execute {\n\t\t\t_, err := tx.Exec(query)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(query)\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc getUserFromDatabase(username, network string, db *sql.DB) (User, error) {\n\n\tvar grantPriv string\n\tvar user User\n\tquery := \"SELECT User, Host, Password, Grant_priv FROM mysql.user WHERE User='\" + username + \"' and Host='\" + network + \"'\"\n\terr := db.QueryRow(query).Scan(&user.Username, &user.Network, &user.HashedPassword, &grantPriv)\n\tif err != nil {\n\t\tfmt.Println(\"Error querying \" + query + \": \", err.Error())\n\t\treturn user, err\n\t} else {\n\t\tif grantPriv == \"Y\" {\n\t\t\tuser.GrantOption = true\n\t\t}\n\t\trows, err := db.Query(\"SHOW GRANTS FOR '\" + username + \"'@'\" + network + \"'\")\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\treturn user, err\n\t\t} else {\n\t\t\tdefer rows.Close()\n\t\t\tvar grantLines []string\n\t\t\tfor rows.Next() {\n\t\t\t\tvar grantLine string\n\t\t\t\tif err := rows.Scan(&grantLine); err != nil {\n\t\t\t\t\treturn user, err\n\t\t\t\t}\n\t\t\t\tgrantLines = append(grantLines, grantLine)\n\t\t\t}\n\t\t\tvar permissions []Permission\n\t\t\tif len(grantLines) == 1 {\n\t\t\t\tvar tmpPerm Permission\n\t\t\t\ttmpPerm.parseUserFromGrantLine(grantLines[0])\n\t\t\t\tpermissions = append(permissions, tmpPerm)\n\n\t\t\t} else {\n\t\t\t\tfor _, grantLine := range grantLines[1:] {\n\t\t\t\t\tvar tmpPerm Permission\n\t\t\t\t\ttmpPerm.parseUserFromGrantLine(grantLine)\n\t\t\t\t\tpermissions = append(permissions, tmpPerm)\n\t\t\t\t}\n\t\t\t}\n\t\t\tuser.Permissions = permissions\n\t\t}\n\t}\n\treturn user, nil\n}\n\nfunc getAllUsersFromDB(db *sql.DB) ([]User, error) {\n\tvar users []User\n\trows, err := db.Query(selectAllUsers)\n\tif err != nil {\n\t\treturn users, err\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tvar username, host string\n\t\tif err := rows.Scan(&username, &host); err != nil {\n\t\t\treturn users, err\n\t\t}\n\t\tuser, err := getUserFromDatabase(username, host, db)\n\t\tif err != nil {\n\t\t\treturn users, err\n\t\t} else {\n\t\t\tusers = append(users, user)\n\t\t}\n\t}\n\tif err := rows.Err(); err != nil {\n\t\treturn users, err\n\t}\n\n\treturn users, nil\n}\n\nfunc validateUsers(users []User) ([]User, error) {\n\tvar resultUsers []User\n\tfor _, u := range users {\n\t\tif u.Username != \"\" && u.Network != \"\" && len(u.Permissions) > 0 && (u.Password != \"\" || u.HashedPassword != \"\") {\n\t\t\tfor _, permission := range u.Permissions {\n\t\t\t\tif permission.Database == \"\" || permission.Database == \"\" || len(permission.Privileges) == 0 {\n\t\t\t\t\treturn resultUsers, errors.New(\"Permissions for user set incorrectly\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tif u.Password != \"\" {\n\t\t\t\tu.calcUserHashPassword()\n\t\t\t}\n\t\t\tresultUsers = append(resultUsers, u)\n\t\t} else {\n\t\t\terrorDescription := \"Username, Network, Permissions, Passowrd or HashedPassword must be set set\"\n\t\t\treturn resultUsers, errors.New(errorDescription)\n\t\t}\n\t}\n\treturn resultUsers, nil\n}\n\nfunc getUsersToRemove(usersFromConf, usersFromDB []User) []User {\n\tvar usersToRemove []User\n\tfor _, userDB := range usersFromDB {\n\t\tvar found bool\n\t\tfor _, userConf := range usersFromConf {\n\t\t\tif userConf.compare(&userDB) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tusersToRemove = append(usersToRemove, userDB)\n\t\t}\n\t}\n\n\treturn usersToRemove\n}\n\nfunc getUsersToAdd(usersFromConf, usersFromDB []User) []User {\n\tvar usersToAdd []User\n\tfor _, userConf := range usersFromConf {\n\t\tvar found bool\n\t\tfor _, userDB := range usersFromDB {\n\t\t\tif userConf.compare(&userDB) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tusersToAdd = append(usersToAdd, userConf)\n\t\t}\n\t}\n\n\treturn usersToAdd\n}<commit_msg>Fixed typo in error message<commit_after>package main\n\nimport (\n\t\"crypto\/sha1\"\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strings\"\n\t\"errors\"\n)\n\ntype User struct {\n\tUsername string\n\tNetwork string\n\tPassword string\n\tHashedPassword string\n\tPermissions []Permission\n\tGrantOption bool\n}\n\nfunc (u *User) calcUserHashPassword() {\n\th := sha1.New()\n\tio.WriteString(h, u.Password)\n\th2 := sha1.New()\n\th2.Write(h.Sum(nil))\n\n\tu.HashedPassword = strings.ToUpper(strings.Replace(fmt.Sprintf(\"*% x\", h2.Sum(nil)), \" \", \"\", -1))\n}\n\nfunc (u *User) compare(usr *User) bool {\n\tif u.Username == usr.Username && u.Network == usr.Network && u.HashedPassword == usr.HashedPassword &&\n\t\tlen(u.Permissions) == len(usr.Permissions) {\n\t\tfor _, up := range u.Permissions {\n\t\t\tvar found bool\n\t\t\tfor _, uperm := range usr.Permissions {\n\t\t\t\tif reflect.DeepEqual(up, uperm) {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n\n}\n\nfunc (u *User) dropUser(tx *sql.Tx, execute bool) bool {\n\tquery := \"DROP USER '\" + u.Username + \"'@'\" + u.Network + \"'\"\n\tif execute {\n\t\t_, err := tx.Exec(query)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\tfmt.Println(query)\n\t}\n\n\treturn true\n}\n\nfunc (u *User) addUser(tx *sql.Tx, execute bool) bool {\n\n\tfor _, permission := range u.Permissions {\n\t\tdatabase := permission.Database\n\t\ttable := permission.Table\n\t\tprivileges := permission.Privileges\n\n\t\tif strings.Contains(database, \"%\") {\n\t\t\tdatabase = \"`\" + database + \"`\"\n\t\t}\n\n\t\tif strings.Contains(table, \"%\") {\n\t\t\ttable = \"`\" + table + \"`\"\n\t\t}\n\n\t\tquery := \"GRANT \" + strings.Join(privileges, \", \") + \" ON \" + database + \".\" + table + \" TO '\" +\n\t\tu.Username + \"'@'\" + u.Network + \"' IDENTIFIED BY PASSWORD '\" + u.HashedPassword + \"'\"\n\n\t\tif u.GrantOption {\n\t\t\tquery += \" WITH GRANT OPTION\"\n\t\t}\n\n\t\tif execute {\n\t\t\t_, err := tx.Exec(query)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(query)\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc getUserFromDatabase(username, network string, db *sql.DB) (User, error) {\n\n\tvar grantPriv string\n\tvar user User\n\tquery := \"SELECT User, Host, Password, Grant_priv FROM mysql.user WHERE User='\" + username + \"' and Host='\" + network + \"'\"\n\terr := db.QueryRow(query).Scan(&user.Username, &user.Network, &user.HashedPassword, &grantPriv)\n\tif err != nil {\n\t\tfmt.Println(\"Error querying \" + query + \": \", err.Error())\n\t\treturn user, err\n\t} else {\n\t\tif grantPriv == \"Y\" {\n\t\t\tuser.GrantOption = true\n\t\t}\n\t\trows, err := db.Query(\"SHOW GRANTS FOR '\" + username + \"'@'\" + network + \"'\")\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\treturn user, err\n\t\t} else {\n\t\t\tdefer rows.Close()\n\t\t\tvar grantLines []string\n\t\t\tfor rows.Next() {\n\t\t\t\tvar grantLine string\n\t\t\t\tif err := rows.Scan(&grantLine); err != nil {\n\t\t\t\t\treturn user, err\n\t\t\t\t}\n\t\t\t\tgrantLines = append(grantLines, grantLine)\n\t\t\t}\n\t\t\tvar permissions []Permission\n\t\t\tif len(grantLines) == 1 {\n\t\t\t\tvar tmpPerm Permission\n\t\t\t\ttmpPerm.parseUserFromGrantLine(grantLines[0])\n\t\t\t\tpermissions = append(permissions, tmpPerm)\n\n\t\t\t} else {\n\t\t\t\tfor _, grantLine := range grantLines[1:] {\n\t\t\t\t\tvar tmpPerm Permission\n\t\t\t\t\ttmpPerm.parseUserFromGrantLine(grantLine)\n\t\t\t\t\tpermissions = append(permissions, tmpPerm)\n\t\t\t\t}\n\t\t\t}\n\t\t\tuser.Permissions = permissions\n\t\t}\n\t}\n\treturn user, nil\n}\n\nfunc getAllUsersFromDB(db *sql.DB) ([]User, error) {\n\tvar users []User\n\trows, err := db.Query(selectAllUsers)\n\tif err != nil {\n\t\treturn users, err\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tvar username, host string\n\t\tif err := rows.Scan(&username, &host); err != nil {\n\t\t\treturn users, err\n\t\t}\n\t\tuser, err := getUserFromDatabase(username, host, db)\n\t\tif err != nil {\n\t\t\treturn users, err\n\t\t} else {\n\t\t\tusers = append(users, user)\n\t\t}\n\t}\n\tif err := rows.Err(); err != nil {\n\t\treturn users, err\n\t}\n\n\treturn users, nil\n}\n\nfunc validateUsers(users []User) ([]User, error) {\n\tvar resultUsers []User\n\tfor _, u := range users {\n\t\tif u.Username != \"\" && u.Network != \"\" && len(u.Permissions) > 0 && (u.Password != \"\" || u.HashedPassword != \"\") {\n\t\t\tfor _, permission := range u.Permissions {\n\t\t\t\tif permission.Database == \"\" || permission.Database == \"\" || len(permission.Privileges) == 0 {\n\t\t\t\t\treturn resultUsers, errors.New(\"Permissions for user set incorrectly\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tif u.Password != \"\" {\n\t\t\t\tu.calcUserHashPassword()\n\t\t\t}\n\t\t\tresultUsers = append(resultUsers, u)\n\t\t} else {\n\t\t\terrorDescription := \"Username, Network, Permissions, Passowrd or HashedPassword must be set\"\n\t\t\treturn resultUsers, errors.New(errorDescription)\n\t\t}\n\t}\n\treturn resultUsers, nil\n}\n\nfunc getUsersToRemove(usersFromConf, usersFromDB []User) []User {\n\tvar usersToRemove []User\n\tfor _, userDB := range usersFromDB {\n\t\tvar found bool\n\t\tfor _, userConf := range usersFromConf {\n\t\t\tif userConf.compare(&userDB) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tusersToRemove = append(usersToRemove, userDB)\n\t\t}\n\t}\n\n\treturn usersToRemove\n}\n\nfunc getUsersToAdd(usersFromConf, usersFromDB []User) []User {\n\tvar usersToAdd []User\n\tfor _, userConf := range usersFromConf {\n\t\tvar found bool\n\t\tfor _, userDB := range usersFromDB {\n\t\t\tif userConf.compare(&userDB) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tusersToAdd = append(usersToAdd, userConf)\n\t\t}\n\t}\n\n\treturn usersToAdd\n}<|endoftext|>"} {"text":"<commit_before>package stockfighter\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\nfunc Format(value interface{}) string {\n\tvar (\n\t\tf = \"%+v\"\n\t\targs = []interface{}{value}\n\t)\n\tswitch v := value.(type) {\n\tcase time.Time:\n\t\treturn v.Format(time.StampNano)\n\tcase *Quote:\n\t\tif v.Ask > 0 {\n\t\t\tf = \"%s Venue: %s Symbol: %s Ask: %6d Quantity: %6d Depth: %6d Last: (%6d,%6d,%s)\"\n\t\t\targs = []interface{}{Format(v.QuoteTime), v.Venue, v.Symbol, v.Ask, v.AskSize, v.AskDepth, v.Last, v.LastSize, Format(v.LastTrade)}\n\t\t} else {\n\t\t\tf = \"%s Venue: %s Symbol: %s Bid: %6d Quantity: %6d Depth: %6d Last: (%6d,%6d,%s)\"\n\t\t\targs = []interface{}{Format(v.QuoteTime), v.Venue, v.Symbol, v.Bid, v.BidSize, v.BidDepth, v.Last, v.LastSize, Format(v.LastTrade)}\n\t\t}\n\tcase *Execution:\n\t\tf = \"%s Venue: %s Symbol: %s Price: %6d Quantity: %6d Account: %s Id: %6d Incoming: %t Standing: %t\"\n\t\targs = []interface{}{Format(v.FilledAt), v.Venue, v.Symbol, v.Price, v.Filled, v.Account, v.StandingId, v.IncomingComplete, v.StandingComplete}\n\t}\n\treturn fmt.Sprintf(f, args...)\n}\n<commit_msg>Fix Quote formatting and add more Format types<commit_after>package stockfighter\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc Depth(orders []StandingOrder) uint64 {\n\tvar depth uint64\n\tfor _, o := range orders {\n\t\tdepth += o.Quantity\n\t}\n\treturn depth\n}\n\nfunc Format(value interface{}) string {\n\tvar (\n\t\tf = \"%+v\"\n\t\targs = []interface{}{value}\n\t)\n\tswitch v := value.(type) {\n\tcase Fill:\n\t\tf = \"%s Price: %6d Quantity: %6d\"\n\t\targs = []interface{}{Format(v.TimeStamp), v.Price, v.Quantity}\n\tcase []Fill:\n\t\tvar s []string\n\t\tfor _, fill := range v {\n\t\t\ts = append(s, Format(fill))\n\t\t}\n\t\treturn strings.Join(s, \"\\n\")\n\tcase StandingOrder:\n\t\tf = \"(%d,%d)\"\n\t\targs = []interface{}{v.Price, v.Quantity}\n\tcase []StandingOrder:\n\t\tvar s []string\n\t\tfor _, fill := range v {\n\t\t\ts = append(s, Format(fill))\n\t\t}\n\t\treturn strings.Join(s, \",\")\n\tcase *OrderBook:\n\t\tf = \"%s Venue: %s Symbol: %s Asks: [%s] AskDepth: %d Bids: [%s] BidDepth: %d\"\n\t\targs = []interface{}{Format(v.TimeStamp), v.Venue, v.Symbol, Format(v.Asks), Depth(v.Asks), Format(v.Bids), Depth(v.Bids)}\n\tcase *OrderState:\n\t\tf = \"%s Venue: %s Symbol: %s Price: %6d Quantity: %6d Account: %s Id: %6d Direction: %s Type: %s\\n%s\"\n\t\targs = []interface{}{Format(v.Timestamp), v.Venue, v.Symbol, v.Price, v.Quantity, v.Account, v.Id, v.Direction, v.Type, Format(v.Fills)}\n\tcase *Quote:\n\t\tf = \"%s Venue: %s Symbol: %s Ask: %6d AskSize: %6d AskDepth: %6d Bid: %6d BidSize %6d BidDepth: %6d Last: (%6d,%6d,%s)\"\n\t\targs = []interface{}{Format(v.QuoteTime), v.Venue, v.Symbol, v.Ask, v.AskSize, v.AskDepth, v.Bid, v.BidSize, v.BidDepth, v.Last, v.LastSize, Format(v.LastTrade)}\n\tcase *Execution:\n\t\tf = \"%s Venue: %s Symbol: %s Price: %6d Quantity: %6d Account: %s Id: %6d Incoming: %t Standing: %t\\n%s\"\n\t\targs = []interface{}{Format(v.FilledAt), v.Venue, v.Symbol, v.Price, v.Filled, v.Account, v.StandingId, v.IncomingComplete, v.StandingComplete, Format(&v.Order)}\n\tcase time.Time:\n\t\treturn v.Format(time.StampNano)\n\t}\n\treturn fmt.Sprintf(f, args...)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Walk Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage walk\n\nimport (\n\t\"bytes\"\n\t\"math\/big\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\nimport (\n\t. \"github.com\/lxn\/go-winapi\"\n)\n\nvar (\n\tdecimalSepB byte\n\tdecimalSepUint16 uint16\n\tdecimalSepS string\n\tgroupSepB byte\n\tgroupSepUint16 uint16\n\tgroupSepS string\n)\n\nfunc init() {\n\tsPtr := syscall.StringToUTF16Ptr(\"1000.00\")\n\n\tvar buf [9]uint16\n\n\tif 0 == GetNumberFormat(\n\t\tLOCALE_USER_DEFAULT,\n\t\t0,\n\t\tsPtr,\n\t\tnil,\n\t\t&buf[0],\n\t\tint32(len(buf))) {\n\n\t\tpanic(\"GetNumberFormat\")\n\t}\n\n\ts := syscall.UTF16ToString(buf[:])\n\n\tdecimalSepB = s[5]\n\tdecimalSepUint16 = buf[5]\n\tdecimalSepS = s[5:6]\n\tgroupSepB = s[1]\n\tgroupSepUint16 = buf[1]\n\tgroupSepS = s[1:2]\n}\n\nfunc maxi(a, b int) int {\n\tif a > b {\n\t\treturn a\n\t}\n\n\treturn b\n}\n\nfunc mini(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\n\treturn b\n}\n\nfunc boolToInt(value bool) int {\n\tif value {\n\t\treturn 1\n\t}\n\n\treturn 0\n}\n\nfunc uint16IndexUint16(s []uint16, v uint16) int {\n\tfor i, u := range s {\n\t\tif u == v {\n\t\t\treturn i\n\t\t}\n\t}\n\n\treturn -1\n}\n\nfunc uint16ContainsUint16(s []uint16, v uint16) bool {\n\treturn uint16IndexUint16(s, v) != -1\n}\n\nfunc uint16CountUint16(s []uint16, v uint16) int {\n\tvar count int\n\n\tfor _, u := range s {\n\t\tif u == v {\n\t\t\tcount++\n\t\t}\n\t}\n\n\treturn count\n}\n\nfunc uint16RemoveUint16(s []uint16, v uint16) []uint16 {\n\tcount := uint16CountUint16(s, v)\n\tif count == 0 {\n\t\treturn s\n\t}\n\n\tret := make([]uint16, 0, len(s)-count)\n\n\tfor _, u := range s {\n\t\tif u != v {\n\t\t\tret = append(ret, u)\n\t\t}\n\t}\n\n\treturn ret\n}\n\nfunc ParseFloat(s string) (float64, error) {\n\ts = strings.TrimSpace(s)\n\n\tt := FormatFloatGrouped(1000, 2)\n\n\treplaceSep := func(new string, index func(string, func(rune) bool) int) {\n\t\ti := index(t, func(r rune) bool {\n\t\t\treturn r < '0' || r > '9'\n\t\t})\n\n\t\tvar sep string\n\t\tif i > -1 {\n\t\t\tsep = string(t[i])\n\t\t}\n\t\tif sep != \"\" {\n\t\t\ts = strings.Replace(s, string(sep), new, -1)\n\t\t}\n\t}\n\n\treplaceSep(\"\", strings.IndexFunc)\n\treplaceSep(\".\", strings.LastIndexFunc)\n\n\treturn strconv.ParseFloat(s, 64)\n}\n\nfunc FormatFloat(f float64, prec int) string {\n\treturn formatFloatString(strconv.FormatFloat(f, 'f', prec, 64), prec, false)\n}\n\nfunc FormatFloatGrouped(f float64, prec int) string {\n\treturn formatFloatString(strconv.FormatFloat(f, 'f', prec, 64), prec, true)\n}\n\nfunc formatBigRat(r *big.Rat, prec int) string {\n\treturn formatFloatString(r.FloatString(prec), prec, false)\n}\n\nfunc formatBigRatGrouped(r *big.Rat, prec int) string {\n\treturn formatFloatString(r.FloatString(prec), prec, true)\n}\n\nfunc formatFloatString(s string, prec int, grouped bool) string {\n\tswitch s {\n\tcase \"NaN\", \"-Inf\", \"+Inf\":\n\t\treturn s\n\t}\n\n\ts = strings.Replace(s, \".\", decimalSepS, 1)\n\tif !grouped {\n\t\treturn s\n\t}\n\n\tb := new(bytes.Buffer)\n\n\tvar firstDigit int\n\tif len(s) > 0 && s[0] == '-' {\n\t\tfirstDigit = 1\n\t\tb.WriteByte('-')\n\t\ts = s[1:]\n\t}\n\n\tintLen := len(s) - prec - 1\n\n\tn := intLen % 3\n\tif n != 0 {\n\t\tb.WriteString(s[:n])\n\t}\n\tfor i := n; i < intLen; i += 3 {\n\t\tif b.Len() > firstDigit {\n\t\t\tb.WriteByte(groupSepB)\n\t\t}\n\t\tb.WriteString(s[i : i+3])\n\t}\n\n\tb.WriteString(s[intLen:])\n\n\treturn b.String()\n}\n\nfunc setDescendantsEnabled(widget Widget, enabled bool) {\n\twb := widget.BaseWidget()\n\twb.SetEnabled(enabled)\n\n\twalkDescendants(widget, func(w Widget) bool {\n\t\tif w.Handle() == wb.hWnd {\n\t\t\treturn true\n\t\t}\n\n\t\tEnableWindow(w.Handle(), enabled && w.BaseWidget().enabled)\n\n\t\treturn true\n\t})\n}\n\nfunc setDescendantsFont(widget Widget, f *Font) {\n\twb := widget.BaseWidget()\n\twb.SetFont(f)\n\n\twalkDescendants(widget, func(w Widget) bool {\n\t\tif w.Handle() == wb.hWnd {\n\t\t\treturn true\n\t\t}\n\n\t\tif w.BaseWidget().font != nil {\n\t\t\treturn false\n\t\t}\n\n\t\tsetWidgetFont(w.Handle(), f)\n\n\t\treturn true\n\t})\n}\n\nfunc walkDescendants(widget Widget, f func(w Widget) bool) {\n\tif widget == nil || !f(widget) {\n\t\treturn\n\t}\n\n\tvar children []Widget\n\n\tswitch w := widget.(type) {\n\tcase *NumberEdit:\n\t\tchildren = append(children, w.edit)\n\n\tcase *TabWidget:\n\t\tfor _, p := range w.Pages().items {\n\t\t\tchildren = append(children, p)\n\t\t}\n\n\tcase Container:\n\t\tif c := w.Children(); c != nil {\n\t\t\tchildren = c.items\n\t\t} else {\n\t\t\tchildren = nil\n\t\t}\n\t}\n\n\tfor _, w := range children {\n\t\twalkDescendants(w, f)\n\t}\n}\n\nfunc less(a, b interface{}, order SortOrder) bool {\n\tc := func(ls bool) bool {\n\t\tif order == SortAscending {\n\t\t\treturn ls\n\t\t}\n\n\t\treturn !ls\n\t}\n\n\tif _, ok := a.(error); ok {\n\t\t_, bIsErr := b.(error)\n\n\t\treturn c(!bIsErr)\n\t}\n\tif _, ok := b.(error); ok {\n\t\treturn c(false)\n\t}\n\n\tif a == nil {\n\t\treturn c(b != nil)\n\t}\n\tif b == nil {\n\t\treturn c(false)\n\t}\n\n\tswitch av := a.(type) {\n\tcase string:\n\t\tif bv, ok := b.(string); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase int:\n\t\tif bv, ok := b.(int); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase float64:\n\t\tif bv, ok := b.(float64); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase float32:\n\t\tif bv, ok := b.(float32); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase int64:\n\t\tif bv, ok := b.(int64); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase int32:\n\t\tif bv, ok := b.(int32); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase int16:\n\t\tif bv, ok := b.(int16); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase int8:\n\t\tif bv, ok := b.(int8); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase uint:\n\t\tif bv, ok := b.(uint); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase uint64:\n\t\tif bv, ok := b.(uint64); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase uint32:\n\t\tif bv, ok := b.(uint32); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase uint16:\n\t\tif bv, ok := b.(uint16); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase uint8:\n\t\tif bv, ok := b.(uint8); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase time.Time:\n\t\tif bv, ok := b.(time.Time); ok {\n\t\t\treturn c(av.Before(bv))\n\t\t}\n\n\tcase bool:\n\t\tif bv, ok := b.(bool); ok {\n\t\t\treturn c(!av && bv)\n\t\t}\n\t}\n\n\treturn false\n}\n<commit_msg>Figure out number format chars more sanely, fixes #57 - thanks fur-q<commit_after>\/\/ Copyright 2010 The Walk Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage walk\n\nimport (\n\t\"bytes\"\n\t\"math\/big\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\nimport (\n\t. \"github.com\/lxn\/go-winapi\"\n)\n\nvar (\n\tdecimalSepB byte\n\tdecimalSepUint16 uint16\n\tdecimalSepS string\n\tgroupSepB byte\n\tgroupSepUint16 uint16\n\tgroupSepS string\n)\n\nfunc init() {\n\tvar buf [4]uint16\n\n\tGetLocaleInfo(LOCALE_USER_DEFAULT, LOCALE_SDECIMAL, &buf[0], int32(len(buf)))\n\tdecimalSepB = byte(buf[0])\n\tdecimalSepS = syscall.UTF16ToString(buf[0:1])\n\tdecimalSepUint16 = buf[0]\n\n\tGetLocaleInfo(LOCALE_USER_DEFAULT, LOCALE_STHOUSAND, &buf[0], int32(len(buf)))\n\tgroupSepB = byte(buf[0])\n\tgroupSepS = syscall.UTF16ToString(buf[0:1])\n\tgroupSepUint16 = buf[0]\n}\n\nfunc maxi(a, b int) int {\n\tif a > b {\n\t\treturn a\n\t}\n\n\treturn b\n}\n\nfunc mini(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\n\treturn b\n}\n\nfunc boolToInt(value bool) int {\n\tif value {\n\t\treturn 1\n\t}\n\n\treturn 0\n}\n\nfunc uint16IndexUint16(s []uint16, v uint16) int {\n\tfor i, u := range s {\n\t\tif u == v {\n\t\t\treturn i\n\t\t}\n\t}\n\n\treturn -1\n}\n\nfunc uint16ContainsUint16(s []uint16, v uint16) bool {\n\treturn uint16IndexUint16(s, v) != -1\n}\n\nfunc uint16CountUint16(s []uint16, v uint16) int {\n\tvar count int\n\n\tfor _, u := range s {\n\t\tif u == v {\n\t\t\tcount++\n\t\t}\n\t}\n\n\treturn count\n}\n\nfunc uint16RemoveUint16(s []uint16, v uint16) []uint16 {\n\tcount := uint16CountUint16(s, v)\n\tif count == 0 {\n\t\treturn s\n\t}\n\n\tret := make([]uint16, 0, len(s)-count)\n\n\tfor _, u := range s {\n\t\tif u != v {\n\t\t\tret = append(ret, u)\n\t\t}\n\t}\n\n\treturn ret\n}\n\nfunc ParseFloat(s string) (float64, error) {\n\ts = strings.TrimSpace(s)\n\n\tt := FormatFloatGrouped(1000, 2)\n\n\treplaceSep := func(new string, index func(string, func(rune) bool) int) {\n\t\ti := index(t, func(r rune) bool {\n\t\t\treturn r < '0' || r > '9'\n\t\t})\n\n\t\tvar sep string\n\t\tif i > -1 {\n\t\t\tsep = string(t[i])\n\t\t}\n\t\tif sep != \"\" {\n\t\t\ts = strings.Replace(s, string(sep), new, -1)\n\t\t}\n\t}\n\n\treplaceSep(\"\", strings.IndexFunc)\n\treplaceSep(\".\", strings.LastIndexFunc)\n\n\treturn strconv.ParseFloat(s, 64)\n}\n\nfunc FormatFloat(f float64, prec int) string {\n\treturn formatFloatString(strconv.FormatFloat(f, 'f', prec, 64), prec, false)\n}\n\nfunc FormatFloatGrouped(f float64, prec int) string {\n\treturn formatFloatString(strconv.FormatFloat(f, 'f', prec, 64), prec, true)\n}\n\nfunc formatBigRat(r *big.Rat, prec int) string {\n\treturn formatFloatString(r.FloatString(prec), prec, false)\n}\n\nfunc formatBigRatGrouped(r *big.Rat, prec int) string {\n\treturn formatFloatString(r.FloatString(prec), prec, true)\n}\n\nfunc formatFloatString(s string, prec int, grouped bool) string {\n\tswitch s {\n\tcase \"NaN\", \"-Inf\", \"+Inf\":\n\t\treturn s\n\t}\n\n\ts = strings.Replace(s, \".\", decimalSepS, 1)\n\tif !grouped {\n\t\treturn s\n\t}\n\n\tb := new(bytes.Buffer)\n\n\tvar firstDigit int\n\tif len(s) > 0 && s[0] == '-' {\n\t\tfirstDigit = 1\n\t\tb.WriteByte('-')\n\t\ts = s[1:]\n\t}\n\n\tintLen := len(s) - prec - 1\n\n\tn := intLen % 3\n\tif n != 0 {\n\t\tb.WriteString(s[:n])\n\t}\n\tfor i := n; i < intLen; i += 3 {\n\t\tif b.Len() > firstDigit {\n\t\t\tb.WriteByte(groupSepB)\n\t\t}\n\t\tb.WriteString(s[i : i+3])\n\t}\n\n\tb.WriteString(s[intLen:])\n\n\treturn b.String()\n}\n\nfunc setDescendantsEnabled(widget Widget, enabled bool) {\n\twb := widget.BaseWidget()\n\twb.SetEnabled(enabled)\n\n\twalkDescendants(widget, func(w Widget) bool {\n\t\tif w.Handle() == wb.hWnd {\n\t\t\treturn true\n\t\t}\n\n\t\tEnableWindow(w.Handle(), enabled && w.BaseWidget().enabled)\n\n\t\treturn true\n\t})\n}\n\nfunc setDescendantsFont(widget Widget, f *Font) {\n\twb := widget.BaseWidget()\n\twb.SetFont(f)\n\n\twalkDescendants(widget, func(w Widget) bool {\n\t\tif w.Handle() == wb.hWnd {\n\t\t\treturn true\n\t\t}\n\n\t\tif w.BaseWidget().font != nil {\n\t\t\treturn false\n\t\t}\n\n\t\tsetWidgetFont(w.Handle(), f)\n\n\t\treturn true\n\t})\n}\n\nfunc walkDescendants(widget Widget, f func(w Widget) bool) {\n\tif widget == nil || !f(widget) {\n\t\treturn\n\t}\n\n\tvar children []Widget\n\n\tswitch w := widget.(type) {\n\tcase *NumberEdit:\n\t\tchildren = append(children, w.edit)\n\n\tcase *TabWidget:\n\t\tfor _, p := range w.Pages().items {\n\t\t\tchildren = append(children, p)\n\t\t}\n\n\tcase Container:\n\t\tif c := w.Children(); c != nil {\n\t\t\tchildren = c.items\n\t\t} else {\n\t\t\tchildren = nil\n\t\t}\n\t}\n\n\tfor _, w := range children {\n\t\twalkDescendants(w, f)\n\t}\n}\n\nfunc less(a, b interface{}, order SortOrder) bool {\n\tc := func(ls bool) bool {\n\t\tif order == SortAscending {\n\t\t\treturn ls\n\t\t}\n\n\t\treturn !ls\n\t}\n\n\tif _, ok := a.(error); ok {\n\t\t_, bIsErr := b.(error)\n\n\t\treturn c(!bIsErr)\n\t}\n\tif _, ok := b.(error); ok {\n\t\treturn c(false)\n\t}\n\n\tif a == nil {\n\t\treturn c(b != nil)\n\t}\n\tif b == nil {\n\t\treturn c(false)\n\t}\n\n\tswitch av := a.(type) {\n\tcase string:\n\t\tif bv, ok := b.(string); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase int:\n\t\tif bv, ok := b.(int); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase float64:\n\t\tif bv, ok := b.(float64); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase float32:\n\t\tif bv, ok := b.(float32); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase int64:\n\t\tif bv, ok := b.(int64); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase int32:\n\t\tif bv, ok := b.(int32); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase int16:\n\t\tif bv, ok := b.(int16); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase int8:\n\t\tif bv, ok := b.(int8); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase uint:\n\t\tif bv, ok := b.(uint); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase uint64:\n\t\tif bv, ok := b.(uint64); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase uint32:\n\t\tif bv, ok := b.(uint32); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase uint16:\n\t\tif bv, ok := b.(uint16); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase uint8:\n\t\tif bv, ok := b.(uint8); ok {\n\t\t\treturn c(av < bv)\n\t\t}\n\n\tcase time.Time:\n\t\tif bv, ok := b.(time.Time); ok {\n\t\t\treturn c(av.Before(bv))\n\t\t}\n\n\tcase bool:\n\t\tif bv, ok := b.(bool); ok {\n\t\t\treturn c(!av && bv)\n\t\t}\n\t}\n\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/coreos\/etcd\/web\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n)\n\n\/\/--------------------------------------\n\/\/ Web Helper\n\/\/--------------------------------------\n\nfunc webHelper() {\n\tstoreMsg = make(chan string)\n\tfor {\n\t\tweb.Hub().Send(<-storeMsg)\n\t}\n}\n\n\/\/--------------------------------------\n\/\/ HTTP Utilities\n\/\/--------------------------------------\n\nfunc decodeJsonRequest(req *http.Request, data interface{}) error {\n\tdecoder := json.NewDecoder(req.Body)\n\tif err := decoder.Decode(&data); err != nil && err != io.EOF {\n\t\tlogger.Println(\"Malformed json request: %v\", err)\n\t\treturn fmt.Errorf(\"Malformed json request: %v\", err)\n\t}\n\treturn nil\n}\n\nfunc encodeJsonResponse(w http.ResponseWriter, status int, data interface{}) {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(status)\n\n\tif data != nil {\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(data)\n\t}\n}\n\nfunc Post(t *transHandler, path string, body io.Reader) (*http.Response, error) {\n\n\tif t.client != nil {\n\t\tresp, err := t.client.Post(\"https:\/\/\"+path, \"application\/json\", body)\n\t\treturn resp, err\n\t} else {\n\t\tresp, err := http.Post(\"http:\/\/\"+path, \"application\/json\", body)\n\t\treturn resp, err\n\t}\n}\n\nfunc Get(t *transHandler, path string) (*http.Response, error) {\n\tif t.client != nil {\n\t\tresp, err := t.client.Get(\"https:\/\/\" + path)\n\t\treturn resp, err\n\t} else {\n\t\tresp, err := http.Get(\"http:\/\/\" + path)\n\t\treturn resp, err\n\t}\n}\n\nfunc leaderClient() string {\n\tresp, _ := Get(&serverTransHandler, server.Leader()+\"\/client\")\n\tif resp != nil {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\tresp.Body.Close()\n\t\treturn string(body)\n\t}\n\treturn \"\"\n}\n\n\/\/--------------------------------------\n\/\/ Log\n\/\/--------------------------------------\n\nfunc debug(msg string, v ...interface{}) {\n\tif verbose {\n\t\tlogger.Printf(\"DEBUG \"+msg+\"\\n\", v...)\n\t}\n}\n\nfunc info(msg string, v ...interface{}) {\n\tlogger.Printf(\"INFO \"+msg+\"\\n\", v...)\n}\n\nfunc warn(msg string, v ...interface{}) {\n\tlogger.Printf(\"Alpaca Server: WARN \"+msg+\"\\n\", v...)\n}\n\nfunc fatal(msg string, v ...interface{}) {\n\tlogger.Printf(\"FATAL \"+msg+\"\\n\", v...)\n\tos.Exit(1)\n}\n<commit_msg>add [etcd] as the prefix of the logging info<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/coreos\/etcd\/web\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n)\n\n\/\/--------------------------------------\n\/\/ Web Helper\n\/\/--------------------------------------\nvar storeMsg chan string\n\n\/\/ Help to send msg from stroe to webHub\nfunc webHelper() {\n\tstoreMsg = make(chan string)\n\tfor {\n\t\t\/\/ transfere the new msg to webHub\n\t\tweb.Hub().Send(<-storeMsg)\n\t}\n}\n\n\/\/--------------------------------------\n\/\/ HTTP Utilities\n\/\/--------------------------------------\n\nfunc decodeJsonRequest(req *http.Request, data interface{}) error {\n\tdecoder := json.NewDecoder(req.Body)\n\tif err := decoder.Decode(&data); err != nil && err != io.EOF {\n\t\twarn(\"Malformed json request: %v\", err)\n\t\treturn fmt.Errorf(\"Malformed json request: %v\", err)\n\t}\n\treturn nil\n}\n\nfunc encodeJsonResponse(w http.ResponseWriter, status int, data interface{}) {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(status)\n\n\tif data != nil {\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(data)\n\t}\n}\n\nfunc Post(t *transHandler, path string, body io.Reader) (*http.Response, error) {\n\n\tif t.client != nil {\n\t\tresp, err := t.client.Post(\"https:\/\/\"+path, \"application\/json\", body)\n\t\treturn resp, err\n\t} else {\n\t\tresp, err := http.Post(\"http:\/\/\"+path, \"application\/json\", body)\n\t\treturn resp, err\n\t}\n}\n\nfunc Get(t *transHandler, path string) (*http.Response, error) {\n\tif t.client != nil {\n\t\tresp, err := t.client.Get(\"https:\/\/\" + path)\n\t\treturn resp, err\n\t} else {\n\t\tresp, err := http.Get(\"http:\/\/\" + path)\n\t\treturn resp, err\n\t}\n}\n\nfunc leaderClient() string {\n\tresp, _ := Get(&serverTransHandler, server.Leader()+\"\/client\")\n\tif resp != nil {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\tresp.Body.Close()\n\t\treturn string(body)\n\t}\n\treturn \"\"\n}\n\n\/\/--------------------------------------\n\/\/ Log\n\/\/--------------------------------------\n\nvar logger *log.Logger\n\nfunc init() {\n\tlogger = log.New(os.Stdout, \"[etcd] \", log.Lmicroseconds)\n}\n\nfunc debug(msg string, v ...interface{}) {\n\tif verbose {\n\t\tlogger.Printf(\"DEBUG \"+msg+\"\\n\", v...)\n\t}\n}\n\nfunc info(msg string, v ...interface{}) {\n\tlogger.Printf(\"INFO \"+msg+\"\\n\", v...)\n}\n\nfunc warn(msg string, v ...interface{}) {\n\tlogger.Printf(\"WARN \"+msg+\"\\n\", v...)\n}\n\nfunc fatal(msg string, v ...interface{}) {\n\tlogger.Printf(\"FATAL \"+msg+\"\\n\", v...)\n\tos.Exit(1)\n}\n<|endoftext|>"} {"text":"<commit_before>package rtmp\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/jcoene\/gologger\"\n\t\"time\"\n)\n\nvar log logger.Logger = *logger.NewLogger(logger.LOG_LEVEL_WARN, \"rtmp\")\n\nfunc DumpBytes(label string, buf []byte, size int) {\n\tfmt.Printf(\"Dumping %s (%d bytes):\\n\", label, size)\n\tfor i := 0; i < size; i++ {\n\t\tfmt.Printf(\"0x%02x \", buf[i])\n\t}\n\tfmt.Printf(\"\\n\")\n}\n\nfunc Dump(label string, val interface{}) error {\n\tjson, err := json.MarshalIndent(val, \"\", \" \")\n\tif err != nil {\n\t\treturn Error(\"Error dumping %s: %s\", label, err)\n\t}\n\n\tfmt.Printf(\"Dumping %s:\\n%s\\n\", label, json)\n\treturn nil\n}\n\nfunc Error(f string, v ...interface{}) error {\n\treturn errors.New(fmt.Sprintf(f, v...))\n}\n\nfunc GetCurrentTimestamp() uint32 {\n\treturn uint32(time.Now().UnixNano()\/int64(1000000)) % TIMESTAMP_MAX\n}\n<commit_msg>Reduce log level<commit_after>package rtmp\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/jcoene\/gologger\"\n\t\"time\"\n)\n\nvar log logger.Logger = *logger.NewLogger(logger.LOG_LEVEL_INFO, \"rtmp\")\n\nfunc DumpBytes(label string, buf []byte, size int) {\n\tfmt.Printf(\"Dumping %s (%d bytes):\\n\", label, size)\n\tfor i := 0; i < size; i++ {\n\t\tfmt.Printf(\"0x%02x \", buf[i])\n\t}\n\tfmt.Printf(\"\\n\")\n}\n\nfunc Dump(label string, val interface{}) error {\n\tjson, err := json.MarshalIndent(val, \"\", \" \")\n\tif err != nil {\n\t\treturn Error(\"Error dumping %s: %s\", label, err)\n\t}\n\n\tfmt.Printf(\"Dumping %s:\\n%s\\n\", label, json)\n\treturn nil\n}\n\nfunc Error(f string, v ...interface{}) error {\n\treturn errors.New(fmt.Sprintf(f, v...))\n}\n\nfunc GetCurrentTimestamp() uint32 {\n\treturn uint32(time.Now().UnixNano()\/int64(1000000)) % TIMESTAMP_MAX\n}\n<|endoftext|>"} {"text":"<commit_before>package common\n\n\/\/ ----------------------------------------------------------------------------------------------------------\n\/\/ Atlantis Common Types\n\/\/ ----------------------------------------------------------------------------------------------------------\n\nconst (\n\tStatusOk = \"OK\"\n\tStatusMaintenance = \"MAINTENANCE\"\n\tStatusError = \"ERROR\"\n\tStatusDegraded = \"DEGRADED\"\n\tStatusUnknown = \"UNKNOWN\"\n\tStatusDone = \"DONE\"\n\tStatusInit = \"INIT\"\n\tStatusFull = \"FULL\" \/\/ Supervisor Health Check status when no more containers are available\n\tManifestFile = \"manifest.toml\"\n\tDefaultLDAPPort = uint16(636)\n\tDefaultRegion = \"dev\"\n\tDefaultZone = \"dev\"\n)\n\n\/\/ ------------ Version -----------\n\/\/ used to check manager or supervisor version\ntype VersionArg struct {\n}\n\ntype VersionReply struct {\n\tRPCVersion string\n\tAPIVersion string\n}\n\n\/\/ ------------ Async -----------\n\/\/ used to for async requests\ntype AsyncReply struct {\n\tID string\n}\n\n\/\/ ----------------------------------------------------------------------------------------------------------\n\/\/ Utility Functions\n\/\/ ----------------------------------------------------------------------------------------------------------\n\nfunc DiffSlices(s1, s2 []string) (onlyInS1, onlyInS2 []string) {\n\tonlyInS1 = []string{}\n\tonlyInS2 = []string{}\n\tif s1 == nil && s2 == nil {\n\t\treturn\n\t} else if s1 == nil {\n\t\treturn onlyInS1, s2\n\t} else if s2 == nil {\n\t\treturn s1, onlyInS2\n\t}\n\tcounts := map[string]int{}\n\tfor _, s1str := range s1 {\n\t\tcounts[s1str]++\n\t}\n\tfor _, s2str := range s2 {\n\t\tif count, present := counts[s2str]; !present || count == 0 {\n\t\t\tonlyInS2 = append(onlyInS2, s2str)\n\t\t} else {\n\t\t\tcounts[s2str]--\n\t\t}\n\t}\n\tfor s1str, count := range counts {\n\t\tfor i := count; i > 0; i-- {\n\t\t\tonlyInS1 = append(onlyInS1, s1str)\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>add Default proxy stuff<commit_after>package common\n\n\/\/ ----------------------------------------------------------------------------------------------------------\n\/\/ Atlantis Common Types\n\/\/ ----------------------------------------------------------------------------------------------------------\n\nconst (\n\tStatusOk = \"OK\"\n\tStatusMaintenance = \"MAINTENANCE\"\n\tStatusError = \"ERROR\"\n\tStatusDegraded = \"DEGRADED\"\n\tStatusUnknown = \"UNKNOWN\"\n\tStatusDone = \"DONE\"\n\tStatusInit = \"INIT\"\n\tStatusFull = \"FULL\" \/\/ Supervisor Health Check status when no more containers are available\n\tManifestFile = \"manifest.toml\"\n\tDefaultLDAPPort = uint16(636)\n\tDefaultRegion = \"dev\"\n\tDefaultZone = \"dev\"\n\tDefaultMinProxyPort = uint16(40000)\n\tDefaultMaxProxyPort = uint16(65535)\n\tDefaultProxyIP = \"172.31.254.254\"\n)\n\n\/\/ ------------ Version -----------\n\/\/ used to check manager or supervisor version\ntype VersionArg struct {\n}\n\ntype VersionReply struct {\n\tRPCVersion string\n\tAPIVersion string\n}\n\n\/\/ ------------ Async -----------\n\/\/ used to for async requests\ntype AsyncReply struct {\n\tID string\n}\n\n\/\/ ----------------------------------------------------------------------------------------------------------\n\/\/ Utility Functions\n\/\/ ----------------------------------------------------------------------------------------------------------\n\nfunc DiffSlices(s1, s2 []string) (onlyInS1, onlyInS2 []string) {\n\tonlyInS1 = []string{}\n\tonlyInS2 = []string{}\n\tif s1 == nil && s2 == nil {\n\t\treturn\n\t} else if s1 == nil {\n\t\treturn onlyInS1, s2\n\t} else if s2 == nil {\n\t\treturn s1, onlyInS2\n\t}\n\tcounts := map[string]int{}\n\tfor _, s1str := range s1 {\n\t\tcounts[s1str]++\n\t}\n\tfor _, s2str := range s2 {\n\t\tif count, present := counts[s2str]; !present || count == 0 {\n\t\t\tonlyInS2 = append(onlyInS2, s2str)\n\t\t} else {\n\t\t\tcounts[s2str]--\n\t\t}\n\t}\n\tfor s1str, count := range counts {\n\t\tfor i := count; i > 0; i-- {\n\t\t\tonlyInS1 = append(onlyInS1, s1str)\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package activation\n\nimport (\n\t\"appengine\"\n\t\"appengine\/datastore\"\n\t\"models\/presentation\"\n\t\"time\"\n)\n\ntype Activation struct {\n\tTime time.Time\n\tPresentation presentation.Presentation\n\n\tKey string `datastore:\"-\"`\n}\n\nfunc New(t time.Time, p presentation.Presentation) (a *Activation) {\n\ta = new(Activation)\n\ta.Time = t\n\ta.Presentation = p\n\treturn\n}\n\nfunc Make(t time.Time, p presentation.Presentation, c appengine.Context) (a *Activation, err error) {\n\ta = New(t, p)\n\terr = a.Save(c)\n\treturn\n}\n\nfunc GetByKey(k string, c appengine.Context) (a *Activation, err error) {\n\tdk, err := datastore.DecodeKey(k)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = datastore.Get(c, dk, a)\n\treturn\n\n}\n\nfunc GetForPresentation(p presentation.Presentation, c appengine.Context) (as []*Activation, err error) {\n\tas = make([]*Activation, 0)\n\tpk, err := datastore.DecodeKey(p.Key)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tkeys, err := datastore.NewQuery(\"Activation\").Ancestor(pk).GetAll(c, as)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor i, k := range keys {\n\t\tas[i].Key = k.Encode()\n\t}\n\treturn\n}\n\nfunc GetBeforeTime(t time.Time, c appengine.Context) ([]*Activation, error) {\n\treturn timeQuery(t, \"<\", c)\n}\n\nfunc GetAfterTime(t time.Time, c appengine.Context) ([]*Activation, error) {\n\treturn timeQuery(t, \">\", c)\n}\n\nfunc (a *Activation) Save(c appengine.Context) (err error) {\n\tvar k *datastore.Key\n\tif a.Key == \"\" {\n\t\tvar pKey *datastore.Key\n\t\tpKey, err = datastore.DecodeKey(a.Presentation.Key)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tk, err = datastore.Put(c, datastore.NewIncompleteKey(c, \"Activation\", pKey), a)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\ta.Key = k.Encode()\n\t} else {\n\t\tk, err = datastore.DecodeKey(a.Key)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t_, err = datastore.Put(c, k, a)\n\t\treturn\n\t}\n\treturn\n}\n\nfunc timeQuery(t time.Time, sign string, c appengine.Context) (as []*Activation, err error) {\n\tas = make([]*Activation, 0)\n\tkeys, err := datastore.NewQuery(\"Activation\").Filter(\"Time \"+sign, t).GetAll(c, as)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor i, k := range keys {\n\t\tas[i].Key = k.Encode()\n\t}\n\treturn\n}\n<commit_msg>Improved Activation model<commit_after>package activation\n\nimport (\n\t\"appengine\"\n\t\"appengine\/datastore\"\n\t\"models\/presentation\"\n\t\"time\"\n)\n\ntype Activation struct {\n\tTime time.Time\n\tPresentation *datastore.Key\n\n\tKey string `datastore:\"-\"`\n}\n\nfunc New(t time.Time, p *datastore.Key) (a *Activation) {\n\ta = new(Activation)\n\ta.Time = t\n\ta.Presentation = p\n\treturn\n}\n\nfunc Make(t time.Time, p *datastore.Key, c appengine.Context) (a *Activation, err error) {\n\ta = New(t, p)\n\terr = a.Save(c)\n\treturn\n}\n\nfunc GetByKey(k string, c appengine.Context) (a *Activation, err error) {\n\tdk, err := datastore.DecodeKey(k)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = datastore.Get(c, dk, a)\n\treturn\n\n}\n\nfunc GetForPresentation(p presentation.Presentation, c appengine.Context) (as []*Activation, err error) {\n\tas = make([]*Activation, 0)\n\tpk, err := datastore.DecodeKey(p.Key)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tkeys, err := datastore.NewQuery(\"Activation\").Ancestor(pk).GetAll(c, as)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor i, k := range keys {\n\t\tas[i].Key = k.Encode()\n\t}\n\treturn\n}\n\nfunc GetBeforeTime(t time.Time, c appengine.Context) ([]*Activation, error) {\n\treturn timeQuery(t, \"<\", c)\n}\n\nfunc GetAfterTime(t time.Time, c appengine.Context) ([]*Activation, error) {\n\treturn timeQuery(t, \">\", c)\n}\n\nfunc (a *Activation) Save(c appengine.Context) (err error) {\n\tvar k *datastore.Key\n\tif a.Key == \"\" {\n\t\tvar pKey *datastore.Key\n\t\tpKey, err = datastore.DecodeKey(a.Presentation.Encode())\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tk, err = datastore.Put(c, datastore.NewIncompleteKey(c, \"Activation\", pKey), a)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\ta.Key = k.Encode()\n\t} else {\n\t\tk, err = datastore.DecodeKey(a.Key)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t_, err = datastore.Put(c, k, a)\n\t\treturn\n\t}\n\treturn\n}\n\nfunc (a *Activation) Delete(c appengine.Context) (err error) {\n\tk, err := datastore.DecodeKey(a.Key)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = datastore.Delete(c, k)\n\tif err != nil {\n\t\treturn\n\t}\n\ta.Key = \"\"\n\n\treturn\n}\n\nfunc timeQuery(t time.Time, sign string, c appengine.Context) (as []*Activation, err error) {\n\tas = make([]*Activation, 0)\n\tkeys, err := datastore.NewQuery(\"Activation\").Filter(\"Time \"+sign, t).Order(\"Time\").GetAll(c, &as)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor i, k := range keys {\n\t\tas[i].Key = k.Encode()\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package ntw\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\nfunc init() {\n\t\/\/ register the language\n\tLanguages[\"ir-ir\"] = Language{\n\t\tName: \"Iranian\",\n\t\tAliases: []string{\"ir\", \"ir-ir\", \"ir_IR\", \"iranian\"},\n\t\tFlag: \"🇮🇷\",\n\n\t\tIntegerToWords: IntegerToIrIr,\n\t}\n}\n\nfunc IntegerToEnUs(input int) string {\n\tvar iranianMegas = []string{\"\", \"هزار\", \"میلیون\", \"میلیارد\", \"بیلیون\", \"بیلیارد\", \"تریلیون\", \"تریلیارد\"}\n\tvar iranianUnits = []string{\"\", \"یک\", \"دو\", \"سه\", \"چهار\", \"پنج\", \"شش\", \"هفت\", \"هشت\", \"نه\"}\n\tvar iranianTens = []string{\"\", \"ده\", \"بیست\", \"سی\", \"چهل\", \"پنجاه\", \"شصت\", \"هفتاد\", \"هشتاد\", \"نود\"}\n\tvar iranianTeens = []string{\"ده\", \"یازده\", \"دوازده\", \"سیزده\", \"چهارده\", \"پانزده\", \"شانزده\", \"هفده\", \"هجده\", \"نوزده\"}\n\tvar iranianHundreds = []string{\"\", \"صد\", \"دویست\", \"سیصد\", \"چهارصد\", \"پانصد\", \"ششصد\", \"هفتصد\", \"هشتصد\", \"نهصد\"}\n\n\t\/\/log.Printf(\"Input: %d\\n\", input)\n\twords := []string{}\n\n\tif input < 0 {\n\t\twords = append(words, \"منفی\")\n\t\tinput *= -1\n\t}\n\n\t\/\/ split integer in triplets\n\ttriplets := integerToTriplets(input)\n\n\t\/\/ zero is a special case\n\tif len(triplets) == 0 {\n\t\treturn \"صفر\"\n\t}\n\n\t\/\/ iterate over triplets\n\tfor idx := len(triplets) - 1; idx >= 0; idx-- {\n\t\ttriplet := triplets[idx]\n\t\t\/\/log.Printf(\"Triplet: %d (idx=%d)\\n\", triplet, idx)\n\n\t\tif triplet == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ three-digits\n\t\thundreds := triplet \/ 100 % 10\n\t\ttens := triplet \/ 10 % 10\n\t\tunits := triplet % 10\n\n\t\tif hundreds > 0 {\n\t\t\twords = append(words, iranianHundreds[hundreds])\n\t\t}\n\n\t\tif tens == 0 && units == 0 {\n\t\t\tgoto tripletEnd\n\t\t}\n\n\t\tswitch tens {\n\t\tcase 0:\n\t\t\twords = append(words, iranianUnits[units])\n\t\tcase 1:\n\t\t\twords = append(words, iranianTeens[units])\n\t\t\tbreak\n\t\tdefault:\n\t\t\tif units > 0 {\n\t\t\t\tword := fmt.Sprintf(\"%s و %s\", iranianTens[tens], iranianUnits[units])\n\t\t\t\twords = append(words, word)\n\t\t\t} else {\n\t\t\t\twords = append(words, iranianTens[tens])\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\ttripletEnd:\n\t\tif mega := iranianMegas[idx]; mega != \"\" {\n\t\t\twords = append(words, mega)\n\t\t}\n\t}\n\n\treturn strings.TrimSpace(strings.Join(words, \" \"))\n}\n<commit_msg>update Iranian function name<commit_after>package ntw\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\nfunc init() {\n\t\/\/ register the language\n\tLanguages[\"ir-ir\"] = Language{\n\t\tName: \"Iranian\",\n\t\tAliases: []string{\"ir\", \"ir-ir\", \"ir_IR\", \"iranian\"},\n\t\tFlag: \"🇮🇷\",\n\n\t\tIntegerToWords: IntegerToIrIr,\n\t}\n}\n\nfunc IntegerToIrIr(input int) string {\n\tvar iranianMegas = []string{\"\", \"هزار\", \"میلیون\", \"میلیارد\", \"بیلیون\", \"بیلیارد\", \"تریلیون\", \"تریلیارد\"}\n\tvar iranianUnits = []string{\"\", \"یک\", \"دو\", \"سه\", \"چهار\", \"پنج\", \"شش\", \"هفت\", \"هشت\", \"نه\"}\n\tvar iranianTens = []string{\"\", \"ده\", \"بیست\", \"سی\", \"چهل\", \"پنجاه\", \"شصت\", \"هفتاد\", \"هشتاد\", \"نود\"}\n\tvar iranianTeens = []string{\"ده\", \"یازده\", \"دوازده\", \"سیزده\", \"چهارده\", \"پانزده\", \"شانزده\", \"هفده\", \"هجده\", \"نوزده\"}\n\tvar iranianHundreds = []string{\"\", \"صد\", \"دویست\", \"سیصد\", \"چهارصد\", \"پانصد\", \"ششصد\", \"هفتصد\", \"هشتصد\", \"نهصد\"}\n\n\t\/\/log.Printf(\"Input: %d\\n\", input)\n\twords := []string{}\n\n\tif input < 0 {\n\t\twords = append(words, \"منفی\")\n\t\tinput *= -1\n\t}\n\n\t\/\/ split integer in triplets\n\ttriplets := integerToTriplets(input)\n\n\t\/\/ zero is a special case\n\tif len(triplets) == 0 {\n\t\treturn \"صفر\"\n\t}\n\n\t\/\/ iterate over triplets\n\tfor idx := len(triplets) - 1; idx >= 0; idx-- {\n\t\ttriplet := triplets[idx]\n\t\t\/\/log.Printf(\"Triplet: %d (idx=%d)\\n\", triplet, idx)\n\n\t\tif triplet == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ three-digits\n\t\thundreds := triplet \/ 100 % 10\n\t\ttens := triplet \/ 10 % 10\n\t\tunits := triplet % 10\n\n\t\tif hundreds > 0 {\n\t\t\twords = append(words, iranianHundreds[hundreds])\n\t\t}\n\n\t\tif tens == 0 && units == 0 {\n\t\t\tgoto tripletEnd\n\t\t}\n\n\t\tswitch tens {\n\t\tcase 0:\n\t\t\twords = append(words, iranianUnits[units])\n\t\tcase 1:\n\t\t\twords = append(words, iranianTeens[units])\n\t\t\tbreak\n\t\tdefault:\n\t\t\tif units > 0 {\n\t\t\t\tword := fmt.Sprintf(\"%s و %s\", iranianTens[tens], iranianUnits[units])\n\t\t\t\twords = append(words, word)\n\t\t\t} else {\n\t\t\t\twords = append(words, iranianTens[tens])\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\ttripletEnd:\n\t\tif mega := iranianMegas[idx]; mega != \"\" {\n\t\t\twords = append(words, mega)\n\t\t}\n\t}\n\n\treturn strings.TrimSpace(strings.Join(words, \" \"))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package jq provides go bindings for libjq providing a streaming filter of\n\/\/ JSON documents.\n\/\/\n\/\/ This package provides a thin layer on top of stedolan's libjq -- it would\n\/\/ likely be helpful to read through the wiki pages about it:\n\/\/\n\/\/ jv: the JSON value type https:\/\/github.com\/stedolan\/jq\/wiki\/C-API:-jv\n\/\/\n\/\/ libjq: https:\/\/github.com\/stedolan\/jq\/wiki\/C-API:-libjq\npackage jq\n\n\/*\nTo install\n$ .\/configure --disable-maintainer-mode --prefix=$PWD\/BUILD\n$ make install-libLTLIBRARIES install-includeHEADERS\n*\/\n\n\/*\n#cgo CFLAGS: -I ${SRCDIR}\/..\/jq-1.5\/BUILD\/include\n#cgo LDFLAGS: ${SRCDIR}\/..\/jq-1.5\/BUILD\/lib\/libjq.a\n\n#include <jq.h>\n#include <jv.h>\n\n#include <stdlib.h>\n\nvoid install_jq_error_cb(jq_state *jq, unsigned long long id);\n*\/\nimport \"C\"\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"unsafe\"\n)\n\n\/\/ Jq encapsulates the state needed to interface with the libjq C library\ntype Jq struct {\n\t_state *C.struct_jq_state\n\terrorStoreId uint64\n}\n\n\/\/ New initializes a new JQ object and the underlying C library.\nfunc New() (*Jq, error) {\n\tjq := new(Jq)\n\n\tvar err error\n\tjq._state, err = C.jq_init()\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else if jq == nil {\n\t\treturn nil, errors.New(\"jq_init returned nil -- out of memory?\")\n\t}\n\n\treturn jq, nil\n}\n\n\/\/ Close the handle to libjq and free C resources\nfunc (jq *Jq) Close() {\n\tif jq._state != nil {\n\t\tC.jq_teardown(&jq._state)\n\t\tjq._state = nil\n\t}\n\tif jq.errorStoreId != 0 {\n\t\tglobalErrorChannels.Delete(jq.errorStoreId)\n\t\tjq.errorStoreId = 0\n\t}\n}\n\n\/\/ We cant pass many things over the Go\/C boundary, so instead of passing the error channel we pass an opaque indentifier (a 64bit int as it turns out) and use that to look up in a global variable\ntype errorLookupState struct {\n\tsync.RWMutex\n\tidCounter uint64\n\tchannels map[uint64]chan<- error\n}\n\nfunc (e *errorLookupState) Add(c chan<- error) uint64 {\n\tnewID := atomic.AddUint64(&e.idCounter, 1)\n\te.RWMutex.Lock()\n\tdefer e.RWMutex.Unlock()\n\te.channels[newID] = c\n\treturn newID\n}\n\nfunc (e *errorLookupState) Get(id uint64) chan<- error {\n\te.RWMutex.RLock()\n\tdefer e.RWMutex.RUnlock()\n\tc, ok := e.channels[id]\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"Tried to get error channel #%d out of store but it wasn't there!\", id))\n\t}\n\treturn c\n}\n\nfunc (e *errorLookupState) Delete(id uint64) {\n\te.RWMutex.Lock()\n\tdefer e.RWMutex.Unlock()\n\tdelete(e.channels, id)\n}\n\n\/\/ The global state - this also serves to keep the channel in scope by keeping\n\/\/ a reference to it that the GC can see\nvar globalErrorChannels = errorLookupState{\n\tchannels: make(map[uint64]chan<- error),\n}\n\n\/\/export goLibjqErrorHandler\nfunc goLibjqErrorHandler(id uint64, jv C.jv) {\n\tch := globalErrorChannels.Get(id)\n\n\terr := _ConvertError(jv)\n\tch <- err\n}\n\n\/\/ Start will compile `program` and return a three channels: input, output and\n\/\/ error. Sending a jq.Jv* to input cause the program to be run to it and\n\/\/ one-or-more results returned as jq.Jv* on the output channel, or one or more\n\/\/ error values sent to the error channel. When you are done sending values\n\/\/ close the input channel.\n\/\/\n\/\/ args is a list of key\/value pairs to bind as variables into the program, and\n\/\/ must be an array type even if empty. Each element of the array should be an\n\/\/ object with a \"name\" and \"value\" properties. Name should exclude the \"$\"\n\/\/ sign. For example this is `[ {\"name\": \"n\", \"value\": 1 } ]` would then be\n\/\/ `$n` in the programm.\n\/\/\n\/\/ This function is not reentereant -- in that you cannot and should not call\n\/\/ Start again until you have closed the previous input channel.\n\/\/\n\/\/ If there is a problem compiling the JQ program then the errors will be\n\/\/ reported on error channel before any input is read so makle sure you account\n\/\/ for this case.\n\/\/\n\/\/ Any jq.Jv* values passed to the input channel will be owned by the channel.\n\/\/ If you want to keep them afterwards ensure you Copy() them before passing to\n\/\/ the channel\nfunc (jq *Jq) Start(program string, args *Jv) (in chan<- *Jv, out <-chan *Jv, errs <-chan error) {\n\t\/\/ Create out two way copy of the channels. We need to be able to recv from\n\t\/\/ input, so need to store the original channel\n\tcIn := make(chan *Jv)\n\tcOut := make(chan *Jv)\n\tcErr := make(chan error)\n\n\t\/\/ And assign the read\/write only versions to the output fars\n\tin = cIn\n\tout = cOut\n\terrs = cErr\n\n\t\/\/ Before setting up any of the global error handling state, lets check that\n\t\/\/ args is of the right type!\n\tif args.Kind() != JV_KIND_ARRAY {\n\t\tgo func() {\n\t\t\t\/\/ Take ownership of the inputs\n\t\t\tfor jv := range cIn {\n\t\t\t\tjv.Free()\n\t\t\t}\n\t\t\tcErr <- fmt.Errorf(\"`args` parameter is of type %s not array!\", args.Kind().String())\n\t\t\targs.Free()\n\t\t\tclose(cOut)\n\t\t\tclose(cErr)\n\t\t}()\n\t\treturn\n\t}\n\n\tjq.errorStoreId = globalErrorChannels.Add(cErr)\n\n\t\/\/ Because we can't pass a function pointer to an exported Go func we have to\n\t\/\/ call a C function which uses the exported fund for us.\n\t\/\/ https:\/\/github.com\/golang\/go\/wiki\/cgo#function-variables\n\tC.install_jq_error_cb(jq._state, C.ulonglong(jq.errorStoreId))\n\n\tgo func() {\n\n\t\tif jq._Compile(program, args) == false {\n\t\t\t\/\/ Even if compile failed follow the contract. Read any inputs and take\n\t\t\t\/\/ ownership of them (aka free them)\n\t\t\t\/\/\n\t\t\t\/\/ Errors from compile will be sent to the error channel\n\t\t\tfor jv := range cIn {\n\t\t\t\tjv.Free()\n\t\t\t}\n\t\t} else {\n\t\t\tfor jv := range cIn {\n\t\t\t\tjq._Execute(jv, cOut, cErr)\n\t\t\t}\n\t\t}\n\t\t\/\/ Once we've read all the inputs close the output to signal to caller that\n\t\t\/\/ we are done.\n\t\tclose(cOut)\n\t\tclose(cErr)\n\t\tC.install_jq_error_cb(jq._state, 0)\n\t}()\n\n\treturn\n}\n\n\/\/ Process a single input and send the results on `out`\nfunc (jq *Jq) _Execute(jv *Jv, out chan<- *Jv, err chan<- error) {\n\tflags := C.int(0)\n\n\tC.jq_start(jq._state, jv.jv, flags)\n\tresult := &Jv{C.jq_next(jq._state)}\n\tfor result.IsValid() {\n\t\tout <- result\n\t\tresult = &Jv{C.jq_next(jq._state)}\n\t}\n\tmsg, ok := result.GetInvalidMessageAsString()\n\tif ok {\n\t\t\/\/ Uncaught jq exception\n\t\t\/\/ TODO: get file:line position in input somehow.\n\t\terr <- errors.New(msg)\n\t}\n}\n\nfunc (jq *Jq) _Compile(prog string, args *Jv) bool {\n\tcs := C.CString(prog)\n\tdefer C.free(unsafe.Pointer(cs))\n\n\tcompiled := C.jq_compile_args(jq._state, cs, args.jv) != 0\n\t\/\/ If there was an error it will have been sent to errorChannel\n\treturn compiled\n}\n<commit_msg>Link libm on linux<commit_after>\/\/ Package jq provides go bindings for libjq providing a streaming filter of\n\/\/ JSON documents.\n\/\/\n\/\/ This package provides a thin layer on top of stedolan's libjq -- it would\n\/\/ likely be helpful to read through the wiki pages about it:\n\/\/\n\/\/ jv: the JSON value type https:\/\/github.com\/stedolan\/jq\/wiki\/C-API:-jv\n\/\/\n\/\/ libjq: https:\/\/github.com\/stedolan\/jq\/wiki\/C-API:-libjq\npackage jq\n\n\/*\nTo install\n$ .\/configure --disable-maintainer-mode --prefix=$PWD\/BUILD\n$ make install-libLTLIBRARIES install-includeHEADERS\n*\/\n\n\/*\n#cgo CFLAGS: -I ${SRCDIR}\/..\/jq-1.5\/BUILD\/include\n#cgo LDFLAGS: ${SRCDIR}\/..\/jq-1.5\/BUILD\/lib\/libjq.a\n#cgo linux LDFLAGS: -lm\n\n\n#include <jq.h>\n#include <jv.h>\n\n#include <stdlib.h>\n\nvoid install_jq_error_cb(jq_state *jq, unsigned long long id);\n*\/\nimport \"C\"\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"unsafe\"\n)\n\n\/\/ Jq encapsulates the state needed to interface with the libjq C library\ntype Jq struct {\n\t_state *C.struct_jq_state\n\terrorStoreId uint64\n}\n\n\/\/ New initializes a new JQ object and the underlying C library.\nfunc New() (*Jq, error) {\n\tjq := new(Jq)\n\n\tvar err error\n\tjq._state, err = C.jq_init()\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else if jq == nil {\n\t\treturn nil, errors.New(\"jq_init returned nil -- out of memory?\")\n\t}\n\n\treturn jq, nil\n}\n\n\/\/ Close the handle to libjq and free C resources\nfunc (jq *Jq) Close() {\n\tif jq._state != nil {\n\t\tC.jq_teardown(&jq._state)\n\t\tjq._state = nil\n\t}\n\tif jq.errorStoreId != 0 {\n\t\tglobalErrorChannels.Delete(jq.errorStoreId)\n\t\tjq.errorStoreId = 0\n\t}\n}\n\n\/\/ We cant pass many things over the Go\/C boundary, so instead of passing the error channel we pass an opaque indentifier (a 64bit int as it turns out) and use that to look up in a global variable\ntype errorLookupState struct {\n\tsync.RWMutex\n\tidCounter uint64\n\tchannels map[uint64]chan<- error\n}\n\nfunc (e *errorLookupState) Add(c chan<- error) uint64 {\n\tnewID := atomic.AddUint64(&e.idCounter, 1)\n\te.RWMutex.Lock()\n\tdefer e.RWMutex.Unlock()\n\te.channels[newID] = c\n\treturn newID\n}\n\nfunc (e *errorLookupState) Get(id uint64) chan<- error {\n\te.RWMutex.RLock()\n\tdefer e.RWMutex.RUnlock()\n\tc, ok := e.channels[id]\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"Tried to get error channel #%d out of store but it wasn't there!\", id))\n\t}\n\treturn c\n}\n\nfunc (e *errorLookupState) Delete(id uint64) {\n\te.RWMutex.Lock()\n\tdefer e.RWMutex.Unlock()\n\tdelete(e.channels, id)\n}\n\n\/\/ The global state - this also serves to keep the channel in scope by keeping\n\/\/ a reference to it that the GC can see\nvar globalErrorChannels = errorLookupState{\n\tchannels: make(map[uint64]chan<- error),\n}\n\n\/\/export goLibjqErrorHandler\nfunc goLibjqErrorHandler(id uint64, jv C.jv) {\n\tch := globalErrorChannels.Get(id)\n\n\terr := _ConvertError(jv)\n\tch <- err\n}\n\n\/\/ Start will compile `program` and return a three channels: input, output and\n\/\/ error. Sending a jq.Jv* to input cause the program to be run to it and\n\/\/ one-or-more results returned as jq.Jv* on the output channel, or one or more\n\/\/ error values sent to the error channel. When you are done sending values\n\/\/ close the input channel.\n\/\/\n\/\/ args is a list of key\/value pairs to bind as variables into the program, and\n\/\/ must be an array type even if empty. Each element of the array should be an\n\/\/ object with a \"name\" and \"value\" properties. Name should exclude the \"$\"\n\/\/ sign. For example this is `[ {\"name\": \"n\", \"value\": 1 } ]` would then be\n\/\/ `$n` in the programm.\n\/\/\n\/\/ This function is not reentereant -- in that you cannot and should not call\n\/\/ Start again until you have closed the previous input channel.\n\/\/\n\/\/ If there is a problem compiling the JQ program then the errors will be\n\/\/ reported on error channel before any input is read so makle sure you account\n\/\/ for this case.\n\/\/\n\/\/ Any jq.Jv* values passed to the input channel will be owned by the channel.\n\/\/ If you want to keep them afterwards ensure you Copy() them before passing to\n\/\/ the channel\nfunc (jq *Jq) Start(program string, args *Jv) (in chan<- *Jv, out <-chan *Jv, errs <-chan error) {\n\t\/\/ Create out two way copy of the channels. We need to be able to recv from\n\t\/\/ input, so need to store the original channel\n\tcIn := make(chan *Jv)\n\tcOut := make(chan *Jv)\n\tcErr := make(chan error)\n\n\t\/\/ And assign the read\/write only versions to the output fars\n\tin = cIn\n\tout = cOut\n\terrs = cErr\n\n\t\/\/ Before setting up any of the global error handling state, lets check that\n\t\/\/ args is of the right type!\n\tif args.Kind() != JV_KIND_ARRAY {\n\t\tgo func() {\n\t\t\t\/\/ Take ownership of the inputs\n\t\t\tfor jv := range cIn {\n\t\t\t\tjv.Free()\n\t\t\t}\n\t\t\tcErr <- fmt.Errorf(\"`args` parameter is of type %s not array!\", args.Kind().String())\n\t\t\targs.Free()\n\t\t\tclose(cOut)\n\t\t\tclose(cErr)\n\t\t}()\n\t\treturn\n\t}\n\n\tjq.errorStoreId = globalErrorChannels.Add(cErr)\n\n\t\/\/ Because we can't pass a function pointer to an exported Go func we have to\n\t\/\/ call a C function which uses the exported fund for us.\n\t\/\/ https:\/\/github.com\/golang\/go\/wiki\/cgo#function-variables\n\tC.install_jq_error_cb(jq._state, C.ulonglong(jq.errorStoreId))\n\n\tgo func() {\n\n\t\tif jq._Compile(program, args) == false {\n\t\t\t\/\/ Even if compile failed follow the contract. Read any inputs and take\n\t\t\t\/\/ ownership of them (aka free them)\n\t\t\t\/\/\n\t\t\t\/\/ Errors from compile will be sent to the error channel\n\t\t\tfor jv := range cIn {\n\t\t\t\tjv.Free()\n\t\t\t}\n\t\t} else {\n\t\t\tfor jv := range cIn {\n\t\t\t\tjq._Execute(jv, cOut, cErr)\n\t\t\t}\n\t\t}\n\t\t\/\/ Once we've read all the inputs close the output to signal to caller that\n\t\t\/\/ we are done.\n\t\tclose(cOut)\n\t\tclose(cErr)\n\t\tC.install_jq_error_cb(jq._state, 0)\n\t}()\n\n\treturn\n}\n\n\/\/ Process a single input and send the results on `out`\nfunc (jq *Jq) _Execute(jv *Jv, out chan<- *Jv, err chan<- error) {\n\tflags := C.int(0)\n\n\tC.jq_start(jq._state, jv.jv, flags)\n\tresult := &Jv{C.jq_next(jq._state)}\n\tfor result.IsValid() {\n\t\tout <- result\n\t\tresult = &Jv{C.jq_next(jq._state)}\n\t}\n\tmsg, ok := result.GetInvalidMessageAsString()\n\tif ok {\n\t\t\/\/ Uncaught jq exception\n\t\t\/\/ TODO: get file:line position in input somehow.\n\t\terr <- errors.New(msg)\n\t}\n}\n\nfunc (jq *Jq) _Compile(prog string, args *Jv) bool {\n\tcs := C.CString(prog)\n\tdefer C.free(unsafe.Pointer(cs))\n\n\tcompiled := C.jq_compile_args(jq._state, cs, args.jv) != 0\n\t\/\/ If there was an error it will have been sent to errorChannel\n\treturn compiled\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/robertkrimen\/otto\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nvar (\n\tcodeListPtr = ArgsStrList(kingpin.Arg(\"code\", \"code\").Required())\n\tlineSeqPtr = kingpin.Flag(\"line-seq\", \"code\").Short('l').Default(\"\\n\").String()\n\tcolumnSeqPtr = kingpin.Flag(\"column-seq\", \"code\").Short('c').Default(\" \").String()\n\tfuncListPtr = ArgsStrList(kingpin.Flag(\"funcion\", \"function\").Short('f'))\n\tpathListPtr = ArgsStrList(kingpin.Flag(\"path\", \"command search path\").Short('p'))\n)\n\ntype argsStrList []string\n\nfunc (i *argsStrList) Set(value string) error {\n\t*i = append(*i, value)\n\treturn nil\n}\n\nfunc (i *argsStrList) String() string {\n\treturn \"\"\n}\n\nfunc (i *argsStrList) IsCumulative() bool {\n\treturn true\n}\n\nfunc ArgsStrList(s kingpin.Settings) (target *[]string) {\n\ttarget = new([]string)\n\ts.SetValue((*argsStrList)(target))\n\treturn\n}\n\nfunc readAll() string {\n\tbytes, err := ioutil.ReadAll(os.Stdin)\n\tif err == nil {\n\t\treturn string(bytes)\n\t} else {\n\t\tpanic(\"Failed to read stdin\")\n\t}\n}\n\ntype Matrix [][]string\n\nfunc setPrintArrayFunc(vm *otto.Otto) {\n\tvm.Run(`\n\tprintArray = function(arr) {\n\t\tfor(i=0;i<arr.length;i+=1){\n\t\t\tconsole.log(i)\n\t\t}\n\t}`)\n\n}\n\nfunc callExternalFunc(cmd string, args []string) string {\n\tout, err := exec.Command(cmd, args...).Output()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(out)\n\n}\n\nfunc addPATHEnv(path string) {\n\toldEnv := os.Getenv(\"PATH\")\n\tnewEnv := path + \":\" + oldEnv\n\tos.Setenv(\"PATH\", newEnv)\n}\n\nfunc getWd() string {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn cwd\n}\n\nfunc initExternelFunc(vm *otto.Otto, cmdPath string) {\n\tfuncName := cmdPath[:len(cmdPath)-len(filepath.Ext(cmdPath))]\n\tvm.Set(funcName, func(call otto.FunctionCall) otto.Value {\n\t\targs := make([]string, len(call.ArgumentList))\n\t\tfor i := 0; i < len(call.ArgumentList); i += 1 {\n\t\t\tret, err := call.Argument(i).ToString()\n\t\t\tif err == nil {\n\t\t\t\targs[i] = ret\n\t\t\t} else {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t}\n\t\toutput := callExternalFunc(cmdPath, args)\n\t\tresult, err := vm.ToValue(output)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\treturn result\n\t})\n\n}\n\nfunc main() {\n\taddPATHEnv(getWd())\n\tfmt.Printf(\"\")\n\tkingpin.Parse()\n\n\tstdin := readAll()\n\tlines := strings.Split(stdin, *lineSeqPtr)\n\tmatrixPtr := new(Matrix)\n\n\tfor i := 0; i < len(lines); i += 1 {\n\t\t*matrixPtr = append(*matrixPtr, strings.Split(lines[i], *columnSeqPtr))\n\t}\n\n\tvm := otto.New()\n\n\tvm.Set(\"stdin\", stdin)\n\tvm.Set(\"stdout\", \"\")\n\tvm.Set(\"lines\", lines)\n\tvm.Set(\"matrix\", *matrixPtr)\n\tvm.Run(\"print = console.log\")\n\tsetPrintArrayFunc(vm)\n\n\tfor i := 0; i < len(*funcListPtr); i += 1 {\n\t\tinitExternelFunc(vm, (*funcListPtr)[i])\n\t}\n\n\tfor i := 0; i < len(*pathListPtr); i += 1 {\n\t\taddPATHEnv((*pathListPtr)[i])\n\t}\n\n\tfor i := 0; i < len(*codeListPtr); i += 1 {\n\t\tvm.Run((*codeListPtr)[i])\n\t}\n\n}\n<commit_msg>allow to read js from file<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/robertkrimen\/otto\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nvar (\n\tcodeListPtr = ArgsStrList(kingpin.Arg(\"code\", \"code\").Required())\n\tlineSeqPtr = kingpin.Flag(\"line-seq\", \"code\").Short('l').Default(\"\\n\").String()\n\tcolumnSeqPtr = kingpin.Flag(\"column-seq\", \"code\").Short('c').Default(\" \").String()\n\tfuncListPtr = ArgsStrList(kingpin.Flag(\"funcion\", \"function\").Short('f'))\n\tpathListPtr = ArgsStrList(kingpin.Flag(\"path\", \"command search path\").Short('p'))\n\tjsListPtr = ArgsStrList(kingpin.Flag(\"js\", \"Javascript file\").Short('j'))\n)\n\ntype argsStrList []string\n\nfunc (i *argsStrList) Set(value string) error {\n\t*i = append(*i, value)\n\treturn nil\n}\n\nfunc (i *argsStrList) String() string {\n\treturn \"\"\n}\n\nfunc (i *argsStrList) IsCumulative() bool {\n\treturn true\n}\n\nfunc ArgsStrList(s kingpin.Settings) (target *[]string) {\n\ttarget = new([]string)\n\ts.SetValue((*argsStrList)(target))\n\treturn\n}\n\nfunc readAll() string {\n\tbytes, err := ioutil.ReadAll(os.Stdin)\n\tif err == nil {\n\t\treturn string(bytes)\n\t} else {\n\t\tpanic(\"Failed to read stdin\")\n\t}\n}\n\nfunc readJSFile(vm *otto.Otto, path string) {\n\tfile, err := os.Open(path)\n\tdefer file.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tbytes, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvm.Run(string(bytes))\n}\n\ntype Matrix [][]string\n\nfunc setPrintArrayFunc(vm *otto.Otto) {\n\tvm.Run(`\n\tprintArray = function(arr) {\n\t\tfor(i=0;i<arr.length;i+=1){\n\t\t\tconsole.log(i)\n\t\t}\n\t}`)\n\n}\n\nfunc callExternalFunc(cmd string, args []string) string {\n\tout, err := exec.Command(cmd, args...).Output()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(out)\n\n}\n\nfunc addPATHEnv(path string) {\n\toldEnv := os.Getenv(\"PATH\")\n\tnewEnv := path + \":\" + oldEnv\n\tos.Setenv(\"PATH\", newEnv)\n}\n\nfunc getWd() string {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn cwd\n}\n\nfunc initExternelFunc(vm *otto.Otto, cmdPath string) {\n\tfuncName := cmdPath[:len(cmdPath)-len(filepath.Ext(cmdPath))]\n\tvm.Set(funcName, func(call otto.FunctionCall) otto.Value {\n\t\targs := make([]string, len(call.ArgumentList))\n\t\tfor i := 0; i < len(call.ArgumentList); i += 1 {\n\t\t\tret, err := call.Argument(i).ToString()\n\t\t\tif err == nil {\n\t\t\t\targs[i] = ret\n\t\t\t} else {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t}\n\t\toutput := callExternalFunc(cmdPath, args)\n\t\tresult, err := vm.ToValue(output)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\treturn result\n\t})\n\n}\n\nfunc main() {\n\taddPATHEnv(getWd())\n\tfmt.Printf(\"\")\n\tkingpin.Parse()\n\n\tstdin := readAll()\n\tlines := strings.Split(stdin, *lineSeqPtr)\n\tmatrixPtr := new(Matrix)\n\n\tfor i := 0; i < len(lines); i += 1 {\n\t\t*matrixPtr = append(*matrixPtr, strings.Split(lines[i], *columnSeqPtr))\n\t}\n\n\tvm := otto.New()\n\n\tvm.Set(\"stdin\", stdin)\n\tvm.Set(\"stdout\", \"\")\n\tvm.Set(\"lines\", lines)\n\tvm.Set(\"matrix\", *matrixPtr)\n\tvm.Run(\"print = console.log\")\n\tsetPrintArrayFunc(vm)\n\n\tfor i := 0; i < len(*funcListPtr); i += 1 {\n\t\tinitExternelFunc(vm, (*funcListPtr)[i])\n\t}\n\n\tfor i := 0; i < len(*pathListPtr); i += 1 {\n\t\taddPATHEnv((*pathListPtr)[i])\n\t}\n\n\tfor i := 0; i < len(*jsListPtr); i += 1 {\n\t\tfmt.Printf(\"%v\", i)\n\t\treadJSFile(vm, (*jsListPtr)[i])\n\t}\n\n\tfor i := 0; i < len(*codeListPtr); i += 1 {\n\t\tvm.Run((*codeListPtr)[i])\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package junos\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/Juniper\/go-netconf\/netconf\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"strings\"\n)\n\n\/\/ Junos holds the connection information to our Junos device.\ntype Junos struct {\n\t*netconf.Session\n}\n\n\/\/ CommandXML parses our operational command responses.\ntype commandXML struct {\n\tConfig string `xml:\",innerxml\"`\n}\n\n\/\/ commitError parses any errors during the commit process.\ntype commitError struct {\n\tPath string `xml:\"error-path\"`\n\tElement string `xml:\"error-info>bad-element\"`\n\tMessage string `xml:\"error-message\"`\n}\n\n\/\/ commitResults stores our errors if we have any.\ntype commitResults struct {\n\tXMLName xml.Name `xml:\"commit-results\"`\n\tErrors []commitError `xml:\"rpc-error\"`\n}\n\n\/\/ rollbackXML parses our rollback diff configuration.\ntype diffXML struct {\n\tXMLName xml.Name `xml:\"rollback-information\"`\n\tConfig string `xml:\"configuration-information>configuration-output\"`\n}\n\n\/\/ Close disconnects our session to the device.\nfunc (j *Junos) Close() {\n\tj.Transport.Close()\n}\n\n\/\/ Command runs any operational mode command, such as \"show\" or \"request.\"\n\/\/ Format is either \"text\" or \"xml\".\nfunc (j *Junos) Command(cmd, format string) (string, error) {\n\tc := &commandXML{}\n\tvar command string\n\terrMessage := \"No output available. Please check the syntax of your command.\"\n\n\tswitch format {\n\tcase \"xml\":\n\t\tcommand = fmt.Sprintf(rpcCommand[\"command-xml\"], cmd)\n\tdefault:\n\t\tcommand = fmt.Sprintf(rpcCommand[\"command\"], cmd)\n\t}\n\treply, err := j.Exec(command)\n\tif err != nil {\n\t\treturn errMessage, err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errMessage, errors.New(m.Message)\n\t\t}\n\t}\n\n\terr = xml.Unmarshal([]byte(reply.Data), &c)\n\tif err != nil {\n\t\treturn errMessage, err\n\t}\n\n\tif c.Config == \"\" {\n\t\treturn errMessage, nil\n\t}\n\n\treturn c.Config, nil\n}\n\n\/\/ Commit commits the configuration.\nfunc (j *Junos) Commit() error {\n\terrs := &commitResults{}\n\treply, err := j.Exec(rpcCommand[\"commit\"])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\terr = xml.Unmarshal([]byte(reply.Data), &errs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif errs.Errors != nil {\n\t\tfor _, m := range errs.Errors {\n\t\t\tmessage := fmt.Sprintf(\"[%s]\\n %s\\nError: %s\", strings.Trim(m.Path, \"[\\r\\n]\"), strings.Trim(m.Element, \"[\\r\\n]\"), strings.Trim(m.Message, \"[\\r\\n]\"))\n\t\t\treturn errors.New(message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ CommitAt commits the configuration at the specified <time>.\nfunc (j *Junos) CommitAt(time string) error {\n\terrs := &commitResults{}\n\tcommand := fmt.Sprintf(rpcCommand[\"commit-at\"], time)\n\treply, err := j.Exec(command)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\terr = xml.Unmarshal([]byte(reply.Data), &errs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif errs.Errors != nil {\n\t\tfor _, m := range errs.Errors {\n\t\t\tmessage := fmt.Sprintf(\"[%s]\\n %s\\nError: %s\", strings.Trim(m.Path, \"[\\r\\n]\"), strings.Trim(m.Element, \"[\\r\\n]\"), strings.Trim(m.Message, \"[\\r\\n]\"))\n\t\t\treturn errors.New(message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ CommitCheck checks the configuration for syntax errors.\nfunc (j *Junos) CommitCheck() error {\n\terrs := &commitResults{}\n\treply, err := j.Exec(rpcCommand[\"commit-check\"])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\terr = xml.Unmarshal([]byte(reply.Data), &errs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif errs.Errors != nil {\n\t\tfor _, m := range errs.Errors {\n\t\t\tmessage := fmt.Sprintf(\"[%s]\\n %s\\nError: %s\", strings.Trim(m.Path, \"[\\r\\n]\"), strings.Trim(m.Element, \"[\\r\\n]\"), strings.Trim(m.Message, \"[\\r\\n]\"))\n\t\t\treturn errors.New(message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ CommitConfirm rolls back the configuration after <delay> minutes.\nfunc (j *Junos) CommitConfirm(delay int) error {\n\terrs := &commitResults{}\n\tcommand := fmt.Sprintf(rpcCommand[\"commit-confirm\"], delay)\n\treply, err := j.Exec(command)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\terr = xml.Unmarshal([]byte(reply.Data), &errs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif errs.Errors != nil {\n\t\tfor _, m := range errs.Errors {\n\t\t\tmessage := fmt.Sprintf(\"[%s]\\n %s\\nError: %s\", strings.Trim(m.Path, \"[\\r\\n]\"), strings.Trim(m.Element, \"[\\r\\n]\"), strings.Trim(m.Message, \"[\\r\\n]\"))\n\t\t\treturn errors.New(message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ ConfigDiff compares the current active configuration to a given rollback configuration.\nfunc (j *Junos) ConfigDiff(compare int) (string, error) {\n\trb := &diffXML{}\n\tcommand := fmt.Sprintf(rpcCommand[\"get-rollback-information-compare\"], compare)\n\treply, err := j.Exec(command)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn \"\", errors.New(m.Message)\n\t\t}\n\t}\n\n\terr = xml.Unmarshal([]byte(reply.Data), rb)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn rb.Config, nil\n}\n\n\/\/ LoadConfig loads a given configuration file locally or from\n\/\/ an FTP or HTTP server. Format is either \"set\" \"text\" or \"xml.\"\nfunc (j *Junos) LoadConfig(path, format string, commit bool) error {\n\tvar command string\n\tswitch format {\n\tcase \"set\":\n\t\tif strings.Contains(path, \"tp:\/\/\") {\n\t\t\tcommand = fmt.Sprintf(rpcCommand[\"load-config-url-set\"], path)\n\t\t} else {\n\t\t\tdata, err := ioutil.ReadFile(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcommand = fmt.Sprintf(rpcCommand[\"config-file-set\"], string(data))\n\t\t}\n\tcase \"text\":\n\t\tif strings.Contains(path, \"tp:\/\/\") {\n\t\t\tcommand = fmt.Sprintf(rpcCommand[\"load-config-url-text\"], path)\n\t\t} else {\n\t\t\tdata, err := ioutil.ReadFile(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcommand = fmt.Sprintf(rpcCommand[\"config-file-text\"], string(data))\n\t\t}\n\tcase \"xml\":\n\t\tif strings.Contains(path, \"tp:\/\/\") {\n\t\t\tcommand = fmt.Sprintf(rpcCommand[\"load-config-url-xml\"], path)\n\t\t} else {\n\t\t\tdata, err := ioutil.ReadFile(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcommand = fmt.Sprintf(rpcCommand[\"config-file-xml\"], string(data))\n\t\t}\n\t}\n\n\treply, err := j.Exec(command)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif commit {\n\t\terr = j.Commit()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Lock locks the candidate configuration.\nfunc (j *Junos) Lock() error {\n\treply, err := j.Exec(rpcCommand[\"lock\"])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ NewSession establishes a new connection to a Junos device that we will use\n\/\/ to run our commands against.\nfunc NewSession(host, user, password string) *Junos {\n\ts, err := netconf.DialSSH(host, netconf.SSHConfigPassword(user, password))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn &Junos{\n\t\ts,\n\t}\n}\n\n\/\/ Rescue will create or delete the rescue configuration given \"save\" or \"delete.\"\nfunc (j *Junos) Rescue(action string) error {\n\tcommand := fmt.Sprintf(\"rescue-%s\", action)\n\treply, err := j.Exec(rpcCommand[command])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ RollbackConfig loads and commits the configuration of a given rollback or rescue state.\nfunc (j *Junos) RollbackConfig(option interface{}) error {\n\tvar command string\n\tswitch option.(type) {\n\tcase int:\n\t\tcommand = fmt.Sprintf(rpcCommand[\"rollback-config\"], option)\n\tcase string:\n\t\tif option == \"rescue\" {\n\t\t\tcommand = fmt.Sprintf(rpcCommand[\"rescue-config\"])\n\t\t}\n\t}\n\n\treply, err := j.Exec(command)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = j.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Unlock unlocks the candidate configuration.\nfunc (j *Junos) Unlock() error {\n\tresp, err := j.Exec(rpcCommand[\"unlock\"])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.Ok == false {\n\t\tfor _, m := range resp.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>Added GetConfig() function<commit_after>package junos\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/Juniper\/go-netconf\/netconf\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"strings\"\n)\n\n\/\/ Junos holds the connection information to our Junos device.\ntype Junos struct {\n\t*netconf.Session\n}\n\n\/\/ CommandXML parses our operational command responses.\ntype commandXML struct {\n\tConfig string `xml:\",innerxml\"`\n}\n\n\/\/ commitError parses any errors during the commit process.\ntype commitError struct {\n\tPath string `xml:\"error-path\"`\n\tElement string `xml:\"error-info>bad-element\"`\n\tMessage string `xml:\"error-message\"`\n}\n\n\/\/ commitResults stores our errors if we have any.\ntype commitResults struct {\n\tXMLName xml.Name `xml:\"commit-results\"`\n\tErrors []commitError `xml:\"rpc-error\"`\n}\n\n\/\/ rollbackXML parses our rollback diff configuration.\ntype diffXML struct {\n\tXMLName xml.Name `xml:\"rollback-information\"`\n\tConfig string `xml:\"configuration-information>configuration-output\"`\n}\n\n\/\/ Close disconnects our session to the device.\nfunc (j *Junos) Close() {\n\tj.Transport.Close()\n}\n\n\/\/ Command runs any operational mode command, such as \"show\" or \"request.\"\n\/\/ Format is either \"text\" or \"xml\".\nfunc (j *Junos) Command(cmd, format string) (string, error) {\n\tc := &commandXML{}\n\tvar command string\n\terrMessage := \"No output available. Please check the syntax of your command.\"\n\n\tswitch format {\n\tcase \"xml\":\n\t\tcommand = fmt.Sprintf(rpcCommand[\"command-xml\"], cmd)\n\tdefault:\n\t\tcommand = fmt.Sprintf(rpcCommand[\"command\"], cmd)\n\t}\n\treply, err := j.Exec(command)\n\tif err != nil {\n\t\treturn errMessage, err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errMessage, errors.New(m.Message)\n\t\t}\n\t}\n\n\terr = xml.Unmarshal([]byte(reply.Data), &c)\n\tif err != nil {\n\t\treturn errMessage, err\n\t}\n\n\tif c.Config == \"\" {\n\t\treturn errMessage, nil\n\t}\n\n\treturn c.Config, nil\n}\n\n\/\/ Commit commits the configuration.\nfunc (j *Junos) Commit() error {\n\terrs := &commitResults{}\n\treply, err := j.Exec(rpcCommand[\"commit\"])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\terr = xml.Unmarshal([]byte(reply.Data), &errs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif errs.Errors != nil {\n\t\tfor _, m := range errs.Errors {\n\t\t\tmessage := fmt.Sprintf(\"[%s]\\n %s\\nError: %s\", strings.Trim(m.Path, \"[\\r\\n]\"), strings.Trim(m.Element, \"[\\r\\n]\"), strings.Trim(m.Message, \"[\\r\\n]\"))\n\t\t\treturn errors.New(message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ CommitAt commits the configuration at the specified <time>.\nfunc (j *Junos) CommitAt(time string) error {\n\terrs := &commitResults{}\n\tcommand := fmt.Sprintf(rpcCommand[\"commit-at\"], time)\n\treply, err := j.Exec(command)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\terr = xml.Unmarshal([]byte(reply.Data), &errs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif errs.Errors != nil {\n\t\tfor _, m := range errs.Errors {\n\t\t\tmessage := fmt.Sprintf(\"[%s]\\n %s\\nError: %s\", strings.Trim(m.Path, \"[\\r\\n]\"), strings.Trim(m.Element, \"[\\r\\n]\"), strings.Trim(m.Message, \"[\\r\\n]\"))\n\t\t\treturn errors.New(message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ CommitCheck checks the configuration for syntax errors.\nfunc (j *Junos) CommitCheck() error {\n\terrs := &commitResults{}\n\treply, err := j.Exec(rpcCommand[\"commit-check\"])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\terr = xml.Unmarshal([]byte(reply.Data), &errs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif errs.Errors != nil {\n\t\tfor _, m := range errs.Errors {\n\t\t\tmessage := fmt.Sprintf(\"[%s]\\n %s\\nError: %s\", strings.Trim(m.Path, \"[\\r\\n]\"), strings.Trim(m.Element, \"[\\r\\n]\"), strings.Trim(m.Message, \"[\\r\\n]\"))\n\t\t\treturn errors.New(message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ CommitConfirm rolls back the configuration after <delay> minutes.\nfunc (j *Junos) CommitConfirm(delay int) error {\n\terrs := &commitResults{}\n\tcommand := fmt.Sprintf(rpcCommand[\"commit-confirm\"], delay)\n\treply, err := j.Exec(command)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\terr = xml.Unmarshal([]byte(reply.Data), &errs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif errs.Errors != nil {\n\t\tfor _, m := range errs.Errors {\n\t\t\tmessage := fmt.Sprintf(\"[%s]\\n %s\\nError: %s\", strings.Trim(m.Path, \"[\\r\\n]\"), strings.Trim(m.Element, \"[\\r\\n]\"), strings.Trim(m.Message, \"[\\r\\n]\"))\n\t\t\treturn errors.New(message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ ConfigDiff compares the current active configuration to a given rollback configuration.\nfunc (j *Junos) ConfigDiff(compare int) (string, error) {\n\trb := &diffXML{}\n\tcommand := fmt.Sprintf(rpcCommand[\"get-rollback-information-compare\"], compare)\n\treply, err := j.Exec(command)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn \"\", errors.New(m.Message)\n\t\t}\n\t}\n\n\terr = xml.Unmarshal([]byte(reply.Data), rb)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn rb.Config, nil\n}\n\n\/\/ GetConfig returns the full configuration, or starting a given <section>.\n\/\/ Format can either be \"text\" or \"xml.\"\nfunc (j *Junos) GetConfig(format, section string) (string, error) {\n\tcommand := fmt.Sprintf(\"<rpc><get-configuration format=\\\"%s\\\"><configuration>\", format)\n\n\tif section == \"full\" {\n\t\tcommand += \"<\/configuration><\/get-configuration><\/rpc>\"\n\t} else {\n\t\tcommand += fmt.Sprintf(\"<%s\/><\/configuration><\/get-configuration><\/rpc>\", section)\n\t}\n\n\treply, err := j.Exec(command)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn \"\", errors.New(m.Message)\n\t\t}\n\t}\n\n\tif format == \"text\" {\n\t\toutput := &commandXML{}\n\t\terr = xml.Unmarshal([]byte(reply.Data), output)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn output.Config, nil\n\t}\n\n\treturn reply.Data, nil\n}\n\n\/\/ LoadConfig loads a given configuration file locally or from\n\/\/ an FTP or HTTP server. Format is either \"set\" \"text\" or \"xml.\"\nfunc (j *Junos) LoadConfig(path, format string, commit bool) error {\n\tvar command string\n\tswitch format {\n\tcase \"set\":\n\t\tif strings.Contains(path, \"tp:\/\/\") {\n\t\t\tcommand = fmt.Sprintf(rpcCommand[\"load-config-url-set\"], path)\n\t\t} else {\n\t\t\tdata, err := ioutil.ReadFile(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcommand = fmt.Sprintf(rpcCommand[\"config-file-set\"], string(data))\n\t\t}\n\tcase \"text\":\n\t\tif strings.Contains(path, \"tp:\/\/\") {\n\t\t\tcommand = fmt.Sprintf(rpcCommand[\"load-config-url-text\"], path)\n\t\t} else {\n\t\t\tdata, err := ioutil.ReadFile(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcommand = fmt.Sprintf(rpcCommand[\"config-file-text\"], string(data))\n\t\t}\n\tcase \"xml\":\n\t\tif strings.Contains(path, \"tp:\/\/\") {\n\t\t\tcommand = fmt.Sprintf(rpcCommand[\"load-config-url-xml\"], path)\n\t\t} else {\n\t\t\tdata, err := ioutil.ReadFile(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcommand = fmt.Sprintf(rpcCommand[\"config-file-xml\"], string(data))\n\t\t}\n\t}\n\n\treply, err := j.Exec(command)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif commit {\n\t\terr = j.Commit()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Lock locks the candidate configuration.\nfunc (j *Junos) Lock() error {\n\treply, err := j.Exec(rpcCommand[\"lock\"])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ NewSession establishes a new connection to a Junos device that we will use\n\/\/ to run our commands against.\nfunc NewSession(host, user, password string) *Junos {\n\ts, err := netconf.DialSSH(host, netconf.SSHConfigPassword(user, password))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn &Junos{\n\t\ts,\n\t}\n}\n\n\/\/ Rescue will create or delete the rescue configuration given \"save\" or \"delete.\"\nfunc (j *Junos) Rescue(action string) error {\n\tcommand := fmt.Sprintf(\"rescue-%s\", action)\n\treply, err := j.Exec(rpcCommand[command])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ RollbackConfig loads and commits the configuration of a given rollback or rescue state.\nfunc (j *Junos) RollbackConfig(option interface{}) error {\n\tvar command string\n\tswitch option.(type) {\n\tcase int:\n\t\tcommand = fmt.Sprintf(rpcCommand[\"rollback-config\"], option)\n\tcase string:\n\t\tif option == \"rescue\" {\n\t\t\tcommand = fmt.Sprintf(rpcCommand[\"rescue-config\"])\n\t\t}\n\t}\n\n\treply, err := j.Exec(command)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = j.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif reply.Errors != nil {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Unlock unlocks the candidate configuration.\nfunc (j *Junos) Unlock() error {\n\tresp, err := j.Exec(rpcCommand[\"unlock\"])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.Ok == false {\n\t\tfor _, m := range resp.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/redBorder\/rbforwarder\"\n\t\"gopkg.in\/Shopify\/sarama.v1\"\n\t\"gopkg.in\/bsm\/sarama-cluster.v2\"\n)\n\n\/\/ KafkaConsumer get messages from multiple kafka topics\ntype KafkaConsumer struct {\n\tbackend *rbforwarder.RBForwarder \/\/ The backend to send messages\n\tconsumer *cluster.Consumer\n\tclosed bool\n\tConfig KafkaConfig \/\/ Cofiguration after the parsing\n}\n\n\/\/ KafkaConfig stores the configuration for the Kafka source\ntype KafkaConfig struct {\n\ttopics []string \/\/ Topics where listen for messages\n\tbrokers []string \/\/ Brokers to connect\n\tconsumergroup string \/\/ ID for the consumer\n\tconsumerGroupConfig *cluster.Config\n}\n\n\/\/ Start starts reading messages from kafka and pushing them to the pipeline\nfunc (k *KafkaConsumer) Start() {\n\tvar err error\n\n\tvar offset uint64\n\tvar eventsReported uint64\n\tvar eventsSent uint64\n\n\tlogger = Logger.WithFields(logrus.Fields{\n\t\t\"prefix\": \"k2http\",\n\t})\n\n\t\/\/ Start processing reports\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tfor report := range k.backend.GetOrderedReports() {\n\t\t\tmessage := report.Metadata[\"sarama_message\"].(*sarama.ConsumerMessage)\n\n\t\t\tif offset != report.ID {\n\t\t\t\tlogger.Fatalf(\"Unexpected offset. Expected %d, found %d.\",\n\t\t\t\t\toffset, report.ID)\n\t\t\t}\n\n\t\t\tif report.StatusCode != 0 {\n\t\t\t\tlogger.\n\t\t\t\t\tWithField(\"ID\", report.ID).\n\t\t\t\t\tWithField(\"STATUS\", report.Status).\n\t\t\t\t\tWithField(\"OFFSET\", message.Offset).\n\t\t\t\t\tErrorf(\"REPORT\")\n\t\t\t}\n\n\t\t\tk.consumer.MarkOffset(message, \"\")\n\t\t\toffset++\n\t\t\teventsReported++\n\t\t}\n\n\t\tdone <- struct{}{}\n\t}()\n\n\t\/\/ Init consumer, consume errors & messages\nmainLoop:\n\tfor {\n\t\tk.consumer, err = cluster.NewConsumer(\n\t\t\tk.Config.brokers,\n\t\t\tk.Config.consumergroup,\n\t\t\tk.Config.topics,\n\t\t\tk.Config.consumerGroupConfig,\n\t\t)\n\t\tif err != nil {\n\t\t\tlogger.Fatal(\"Failed to start consumer: \", err)\n\t\t\tbreak\n\t\t}\n\n\t\tlogger.\n\t\t\tWithField(\"brokers\", k.Config.brokers).\n\t\t\tWithField(\"consumergroup\", k.Config.consumergroup).\n\t\t\tWithField(\"topics\", k.Config.topics).\n\t\t\tInfo(\"Started consumer\")\n\n\t\t\/\/ Start consuming messages\n\t\tfor message := range k.consumer.Messages() {\n\t\t\tif message == nil {\n\t\t\t\tk.Config.consumerGroupConfig.Consumer.Offsets.Initial = sarama.OffsetOldest\n\t\t\t\tif err := k.consumer.Close(); err != nil {\n\t\t\t\t\tlogger.Error(\"Failed to close consumer: \", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tmsg, err := k.backend.TakeMessage()\n\t\t\tif err != nil {\n\t\t\t\tbreak mainLoop\n\t\t\t}\n\n\t\t\tmsg.InputBuffer = bytes.NewBuffer(message.Value)\n\t\t\tmsg.Metadata[\"sarama_message\"] = message\n\t\t\tmsg.Metadata[\"topic\"] = message.Topic\n\t\t\tif err := msg.Produce(); err != nil {\n\t\t\t\tbreak mainLoop\n\t\t\t}\n\n\t\t\teventsSent++\n\t\t}\n\n\t\tif k.closed {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tlogger.Info(\"Consumer terminated\")\n\n\t<-done\n\tlogger.Infof(\"TOTAL SENT MESSAGES: %d\", eventsSent)\n\tlogger.Infof(\"TOTAL REPORTS: %d\", eventsReported)\n\n\treturn\n}\n\n\/\/ Close closes the connection with Kafka\nfunc (k *KafkaConsumer) Close() {\n\tk.closed = true\n\tif err := k.consumer.Close(); err != nil {\n\t\tlogger.Println(\"Failed to close consumer: \", err)\n\t}\n}\n\n\/\/ ParseKafkaConfig reads the configuration from the YAML config file and store it\n\/\/ on the instance\nfunc (k *KafkaConsumer) ParseKafkaConfig(config map[string]interface{}) {\n\n\t\/\/ Create the config\n\tk.Config.consumerGroupConfig = cluster.NewConfig()\n\tk.Config.consumerGroupConfig.Config.Consumer.Offsets.CommitInterval = 1 * time.Second\n\tk.Config.consumerGroupConfig.Consumer.Offsets.Initial = sarama.OffsetNewest\n\tk.Config.consumerGroupConfig.Consumer.MaxProcessingTime = 3 * time.Second\n\n\tif *debug {\n\t\tsaramaLogger := Logger.WithField(\"prefix\", \"kafka-consumer\")\n\t\tsarama.Logger = saramaLogger\n\t}\n\n\t\/\/ Parse the brokers addresses\n\tif config[\"broker\"] != nil {\n\t\tk.Config.brokers = strings.Split(config[\"broker\"].(string), \",\")\n\t}\n\n\t\/\/ Parse topics\n\tif config[\"topics\"] != nil {\n\t\ttopics := config[\"topics\"].([]interface{})\n\n\t\tfor _, topic := range topics {\n\t\t\tk.Config.topics = append(k.Config.topics, topic.(string))\n\t\t}\n\t}\n\n\t\/\/ Parse consumergroup\n\tif config[\"consumergroup\"] != nil {\n\t\tk.Config.consumergroup = config[\"consumergroup\"].(string)\n\t}\n}\n<commit_msg>Fix dependency fail<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Shopify\/sarama\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/redBorder\/rbforwarder\"\n\t\"gopkg.in\/bsm\/sarama-cluster.v2\"\n)\n\n\/\/ KafkaConsumer get messages from multiple kafka topics\ntype KafkaConsumer struct {\n\tbackend *rbforwarder.RBForwarder \/\/ The backend to send messages\n\tconsumer *cluster.Consumer\n\tclosed bool\n\tConfig KafkaConfig \/\/ Cofiguration after the parsing\n}\n\n\/\/ KafkaConfig stores the configuration for the Kafka source\ntype KafkaConfig struct {\n\ttopics []string \/\/ Topics where listen for messages\n\tbrokers []string \/\/ Brokers to connect\n\tconsumergroup string \/\/ ID for the consumer\n\tconsumerGroupConfig *cluster.Config\n}\n\n\/\/ Start starts reading messages from kafka and pushing them to the pipeline\nfunc (k *KafkaConsumer) Start() {\n\tvar err error\n\n\tvar offset uint64\n\tvar eventsReported uint64\n\tvar eventsSent uint64\n\n\tlogger = Logger.WithFields(logrus.Fields{\n\t\t\"prefix\": \"k2http\",\n\t})\n\n\t\/\/ Start processing reports\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tfor report := range k.backend.GetOrderedReports() {\n\t\t\tmessage := report.Metadata[\"sarama_message\"].(*sarama.ConsumerMessage)\n\n\t\t\tif offset != report.ID {\n\t\t\t\tlogger.Fatalf(\"Unexpected offset. Expected %d, found %d.\",\n\t\t\t\t\toffset, report.ID)\n\t\t\t}\n\n\t\t\tif report.StatusCode != 0 {\n\t\t\t\tlogger.\n\t\t\t\t\tWithField(\"ID\", report.ID).\n\t\t\t\t\tWithField(\"STATUS\", report.Status).\n\t\t\t\t\tWithField(\"OFFSET\", message.Offset).\n\t\t\t\t\tErrorf(\"REPORT\")\n\t\t\t}\n\n\t\t\tk.consumer.MarkOffset(message, \"\")\n\t\t\toffset++\n\t\t\teventsReported++\n\t\t}\n\n\t\tdone <- struct{}{}\n\t}()\n\n\t\/\/ Init consumer, consume errors & messages\nmainLoop:\n\tfor {\n\t\tk.consumer, err = cluster.NewConsumer(\n\t\t\tk.Config.brokers,\n\t\t\tk.Config.consumergroup,\n\t\t\tk.Config.topics,\n\t\t\tk.Config.consumerGroupConfig,\n\t\t)\n\t\tif err != nil {\n\t\t\tlogger.Fatal(\"Failed to start consumer: \", err)\n\t\t\tbreak\n\t\t}\n\n\t\tlogger.\n\t\t\tWithField(\"brokers\", k.Config.brokers).\n\t\t\tWithField(\"consumergroup\", k.Config.consumergroup).\n\t\t\tWithField(\"topics\", k.Config.topics).\n\t\t\tInfo(\"Started consumer\")\n\n\t\t\/\/ Start consuming messages\n\t\tfor message := range k.consumer.Messages() {\n\t\t\tif message == nil {\n\t\t\t\tk.Config.consumerGroupConfig.Consumer.Offsets.Initial = sarama.OffsetOldest\n\t\t\t\tif err := k.consumer.Close(); err != nil {\n\t\t\t\t\tlogger.Error(\"Failed to close consumer: \", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tmsg, err := k.backend.TakeMessage()\n\t\t\tif err != nil {\n\t\t\t\tbreak mainLoop\n\t\t\t}\n\n\t\t\tmsg.InputBuffer = bytes.NewBuffer(message.Value)\n\t\t\tmsg.Metadata[\"sarama_message\"] = message\n\t\t\tmsg.Metadata[\"topic\"] = message.Topic\n\t\t\tif err := msg.Produce(); err != nil {\n\t\t\t\tbreak mainLoop\n\t\t\t}\n\n\t\t\teventsSent++\n\t\t}\n\n\t\tif k.closed {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tlogger.Info(\"Consumer terminated\")\n\n\t<-done\n\tlogger.Infof(\"TOTAL SENT MESSAGES: %d\", eventsSent)\n\tlogger.Infof(\"TOTAL REPORTS: %d\", eventsReported)\n\n\treturn\n}\n\n\/\/ Close closes the connection with Kafka\nfunc (k *KafkaConsumer) Close() {\n\tk.closed = true\n\tif err := k.consumer.Close(); err != nil {\n\t\tlogger.Println(\"Failed to close consumer: \", err)\n\t}\n}\n\n\/\/ ParseKafkaConfig reads the configuration from the YAML config file and store it\n\/\/ on the instance\nfunc (k *KafkaConsumer) ParseKafkaConfig(config map[string]interface{}) {\n\n\t\/\/ Create the config\n\tk.Config.consumerGroupConfig = cluster.NewConfig()\n\tk.Config.consumerGroupConfig.Config.Consumer.Offsets.CommitInterval = 1 * time.Second\n\tk.Config.consumerGroupConfig.Consumer.Offsets.Initial = sarama.OffsetNewest\n\tk.Config.consumerGroupConfig.Consumer.MaxProcessingTime = 3 * time.Second\n\n\tif *debug {\n\t\tsaramaLogger := Logger.WithField(\"prefix\", \"kafka-consumer\")\n\t\tsarama.Logger = saramaLogger\n\t}\n\n\t\/\/ Parse the brokers addresses\n\tif config[\"broker\"] != nil {\n\t\tk.Config.brokers = strings.Split(config[\"broker\"].(string), \",\")\n\t}\n\n\t\/\/ Parse topics\n\tif config[\"topics\"] != nil {\n\t\ttopics := config[\"topics\"].([]interface{})\n\n\t\tfor _, topic := range topics {\n\t\t\tk.Config.topics = append(k.Config.topics, topic.(string))\n\t\t}\n\t}\n\n\t\/\/ Parse consumergroup\n\tif config[\"consumergroup\"] != nil {\n\t\tk.Config.consumergroup = config[\"consumergroup\"].(string)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package overlay\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"syscall\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\nconst ovPeerTable = \"overlay_peer_table\"\n\ntype peerKey struct {\n\tpeerIP net.IP\n\tpeerMac net.HardwareAddr\n}\n\ntype peerEntry struct {\n\teid string\n\tvtep net.IP\n\tpeerIPMask net.IPMask\n\tinSandbox bool\n\tisLocal bool\n}\n\ntype peerMap struct {\n\tmp map[string]peerEntry\n\tsync.Mutex\n}\n\ntype peerNetworkMap struct {\n\tmp map[string]*peerMap\n\tsync.Mutex\n}\n\nfunc (pKey peerKey) String() string {\n\treturn fmt.Sprintf(\"%s %s\", pKey.peerIP, pKey.peerMac)\n}\n\nfunc (pKey *peerKey) Scan(state fmt.ScanState, verb rune) error {\n\tipB, err := state.Token(true, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpKey.peerIP = net.ParseIP(string(ipB))\n\n\tmacB, err := state.Token(true, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpKey.peerMac, err = net.ParseMAC(string(macB))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nvar peerDbWg sync.WaitGroup\n\nfunc (d *driver) peerDbWalk(f func(string, *peerKey, *peerEntry) bool) error {\n\td.peerDb.Lock()\n\tnids := []string{}\n\tfor nid := range d.peerDb.mp {\n\t\tnids = append(nids, nid)\n\t}\n\td.peerDb.Unlock()\n\n\tfor _, nid := range nids {\n\t\td.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {\n\t\t\treturn f(nid, pKey, pEntry)\n\t\t})\n\t}\n\treturn nil\n}\n\nfunc (d *driver) peerDbNetworkWalk(nid string, f func(*peerKey, *peerEntry) bool) error {\n\td.peerDb.Lock()\n\tpMap, ok := d.peerDb.mp[nid]\n\tif !ok {\n\t\td.peerDb.Unlock()\n\t\treturn nil\n\t}\n\td.peerDb.Unlock()\n\n\tpMap.Lock()\n\tfor pKeyStr, pEntry := range pMap.mp {\n\t\tvar pKey peerKey\n\t\tif _, err := fmt.Sscan(pKeyStr, &pKey); err != nil {\n\t\t\tlog.Warnf(\"Peer key scan on network %s failed: %v\", nid, err)\n\t\t}\n\n\t\tif f(&pKey, &pEntry) {\n\t\t\tpMap.Unlock()\n\t\t\treturn nil\n\t\t}\n\t}\n\tpMap.Unlock()\n\n\treturn nil\n}\n\nfunc (d *driver) peerDbSearch(nid string, peerIP net.IP) (net.HardwareAddr, net.IPMask, net.IP, error) {\n\tvar (\n\t\tpeerMac net.HardwareAddr\n\t\tvtep net.IP\n\t\tpeerIPMask net.IPMask\n\t\tfound bool\n\t)\n\n\terr := d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {\n\t\tif pKey.peerIP.Equal(peerIP) {\n\t\t\tpeerMac = pKey.peerMac\n\t\t\tpeerIPMask = pEntry.peerIPMask\n\t\t\tvtep = pEntry.vtep\n\t\t\tfound = true\n\t\t\treturn found\n\t\t}\n\n\t\treturn found\n\t})\n\n\tif err != nil {\n\t\treturn nil, nil, nil, fmt.Errorf(\"peerdb search for peer ip %q failed: %v\", peerIP, err)\n\t}\n\n\tif !found {\n\t\treturn nil, nil, nil, fmt.Errorf(\"peer ip %q not found in peerdb\", peerIP)\n\t}\n\n\treturn peerMac, peerIPMask, vtep, nil\n}\n\nfunc (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,\n\tpeerMac net.HardwareAddr, vtep net.IP, isLocal bool) {\n\n\tpeerDbWg.Wait()\n\n\td.peerDb.Lock()\n\tpMap, ok := d.peerDb.mp[nid]\n\tif !ok {\n\t\td.peerDb.mp[nid] = &peerMap{\n\t\t\tmp: make(map[string]peerEntry),\n\t\t}\n\n\t\tpMap = d.peerDb.mp[nid]\n\t}\n\td.peerDb.Unlock()\n\n\tpKey := peerKey{\n\t\tpeerIP: peerIP,\n\t\tpeerMac: peerMac,\n\t}\n\n\tpEntry := peerEntry{\n\t\teid: eid,\n\t\tvtep: vtep,\n\t\tpeerIPMask: peerIPMask,\n\t\tisLocal: isLocal,\n\t}\n\n\tpMap.Lock()\n\tpMap.mp[pKey.String()] = pEntry\n\tpMap.Unlock()\n}\n\nfunc (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,\n\tpeerMac net.HardwareAddr, vtep net.IP) {\n\tpeerDbWg.Wait()\n\n\td.peerDb.Lock()\n\tpMap, ok := d.peerDb.mp[nid]\n\tif !ok {\n\t\td.peerDb.Unlock()\n\t\treturn\n\t}\n\td.peerDb.Unlock()\n\n\tpKey := peerKey{\n\t\tpeerIP: peerIP,\n\t\tpeerMac: peerMac,\n\t}\n\n\tpMap.Lock()\n\tdelete(pMap.mp, pKey.String())\n\tpMap.Unlock()\n}\n\nfunc (d *driver) peerDbUpdateSandbox(nid string) {\n\td.peerDb.Lock()\n\tpMap, ok := d.peerDb.mp[nid]\n\tif !ok {\n\t\td.peerDb.Unlock()\n\t\treturn\n\t}\n\td.peerDb.Unlock()\n\n\tpeerDbWg.Add(1)\n\n\tvar peerOps []func()\n\tpMap.Lock()\n\tfor pKeyStr, pEntry := range pMap.mp {\n\t\tvar pKey peerKey\n\t\tif _, err := fmt.Sscan(pKeyStr, &pKey); err != nil {\n\t\t\tfmt.Printf(\"peer key scan failed: %v\", err)\n\t\t}\n\n\t\tif pEntry.isLocal {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Go captures variables by reference. The pEntry could be\n\t\t\/\/ pointing to the same memory location for every iteration. Make\n\t\t\/\/ a copy of pEntry before capturing it in the following closure.\n\t\tentry := pEntry\n\t\top := func() {\n\t\t\tif err := d.peerAdd(nid, entry.eid, pKey.peerIP, entry.peerIPMask,\n\t\t\t\tpKey.peerMac, entry.vtep,\n\t\t\t\tfalse); err != nil {\n\t\t\t\tfmt.Printf(\"peerdbupdate in sandbox failed for ip %s and mac %s: %v\",\n\t\t\t\t\tpKey.peerIP, pKey.peerMac, err)\n\t\t\t}\n\t\t}\n\n\t\tpeerOps = append(peerOps, op)\n\t}\n\tpMap.Unlock()\n\n\tfor _, op := range peerOps {\n\t\top()\n\t}\n\n\tpeerDbWg.Done()\n}\n\nfunc (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,\n\tpeerMac net.HardwareAddr, vtep net.IP, updateDb bool) error {\n\n\tif err := validateID(nid, eid); err != nil {\n\t\treturn err\n\t}\n\n\tif updateDb {\n\t\td.peerDbAdd(nid, eid, peerIP, peerIPMask, peerMac, vtep, false)\n\t}\n\n\tn := d.network(nid)\n\tif n == nil {\n\t\treturn nil\n\t}\n\n\tsbox := n.sandbox()\n\tif sbox == nil {\n\t\treturn nil\n\t}\n\n\tIP := &net.IPNet{\n\t\tIP: peerIP,\n\t\tMask: peerIPMask,\n\t}\n\n\ts := n.getSubnetforIP(IP)\n\tif s == nil {\n\t\treturn fmt.Errorf(\"couldn't find the subnet %q in network %q\\n\", IP.String(), n.id)\n\t}\n\n\tif err := n.obtainVxlanID(s); err != nil {\n\t\treturn fmt.Errorf(\"couldn't get vxlan id for %q: %v\", s.subnetIP.String(), err)\n\t}\n\n\tif err := n.joinSubnetSandbox(s, false); err != nil {\n\t\treturn fmt.Errorf(\"subnet sandbox join failed for %q: %v\", s.subnetIP.String(), err)\n\t}\n\n\tif err := d.checkEncryption(nid, vtep, n.vxlanID(s), false, true); err != nil {\n\t\tlog.Warn(err)\n\t}\n\n\t\/\/ Add neighbor entry for the peer IP\n\tif err := sbox.AddNeighbor(peerIP, peerMac, sbox.NeighborOptions().LinkName(s.vxlanName)); err != nil {\n\t\treturn fmt.Errorf(\"could not add neigbor entry into the sandbox: %v\", err)\n\t}\n\n\t\/\/ Add fdb entry to the bridge for the peer mac\n\tif err := sbox.AddNeighbor(vtep, peerMac, sbox.NeighborOptions().LinkName(s.vxlanName),\n\t\tsbox.NeighborOptions().Family(syscall.AF_BRIDGE)); err != nil {\n\t\treturn fmt.Errorf(\"could not add fdb entry into the sandbox: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,\n\tpeerMac net.HardwareAddr, vtep net.IP, updateDb bool) error {\n\n\tif err := validateID(nid, eid); err != nil {\n\t\treturn err\n\t}\n\n\tif updateDb {\n\t\td.peerDbDelete(nid, eid, peerIP, peerIPMask, peerMac, vtep)\n\t}\n\n\tn := d.network(nid)\n\tif n == nil {\n\t\treturn nil\n\t}\n\n\tsbox := n.sandbox()\n\tif sbox == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Delete fdb entry to the bridge for the peer mac\n\tif err := sbox.DeleteNeighbor(vtep, peerMac); err != nil {\n\t\treturn fmt.Errorf(\"could not delete fdb entry into the sandbox: %v\", err)\n\t}\n\n\t\/\/ Delete neighbor entry for the peer IP\n\tif err := sbox.DeleteNeighbor(peerIP, peerMac); err != nil {\n\t\treturn fmt.Errorf(\"could not delete neigbor entry into the sandbox: %v\", err)\n\t}\n\n\tif err := d.checkEncryption(nid, vtep, 0, false, false); err != nil {\n\t\tlog.Warn(err)\n\t}\n\n\treturn nil\n}\n\nfunc (d *driver) pushLocalDb() {\n\td.peerDbWalk(func(nid string, pKey *peerKey, pEntry *peerEntry) bool {\n\t\tif pEntry.isLocal {\n\t\t\td.pushLocalEndpointEvent(\"join\", nid, pEntry.eid)\n\t\t}\n\t\treturn false\n\t})\n}\n<commit_msg>Fix typo in error message<commit_after>package overlay\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"syscall\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\nconst ovPeerTable = \"overlay_peer_table\"\n\ntype peerKey struct {\n\tpeerIP net.IP\n\tpeerMac net.HardwareAddr\n}\n\ntype peerEntry struct {\n\teid string\n\tvtep net.IP\n\tpeerIPMask net.IPMask\n\tinSandbox bool\n\tisLocal bool\n}\n\ntype peerMap struct {\n\tmp map[string]peerEntry\n\tsync.Mutex\n}\n\ntype peerNetworkMap struct {\n\tmp map[string]*peerMap\n\tsync.Mutex\n}\n\nfunc (pKey peerKey) String() string {\n\treturn fmt.Sprintf(\"%s %s\", pKey.peerIP, pKey.peerMac)\n}\n\nfunc (pKey *peerKey) Scan(state fmt.ScanState, verb rune) error {\n\tipB, err := state.Token(true, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpKey.peerIP = net.ParseIP(string(ipB))\n\n\tmacB, err := state.Token(true, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpKey.peerMac, err = net.ParseMAC(string(macB))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nvar peerDbWg sync.WaitGroup\n\nfunc (d *driver) peerDbWalk(f func(string, *peerKey, *peerEntry) bool) error {\n\td.peerDb.Lock()\n\tnids := []string{}\n\tfor nid := range d.peerDb.mp {\n\t\tnids = append(nids, nid)\n\t}\n\td.peerDb.Unlock()\n\n\tfor _, nid := range nids {\n\t\td.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {\n\t\t\treturn f(nid, pKey, pEntry)\n\t\t})\n\t}\n\treturn nil\n}\n\nfunc (d *driver) peerDbNetworkWalk(nid string, f func(*peerKey, *peerEntry) bool) error {\n\td.peerDb.Lock()\n\tpMap, ok := d.peerDb.mp[nid]\n\tif !ok {\n\t\td.peerDb.Unlock()\n\t\treturn nil\n\t}\n\td.peerDb.Unlock()\n\n\tpMap.Lock()\n\tfor pKeyStr, pEntry := range pMap.mp {\n\t\tvar pKey peerKey\n\t\tif _, err := fmt.Sscan(pKeyStr, &pKey); err != nil {\n\t\t\tlog.Warnf(\"Peer key scan on network %s failed: %v\", nid, err)\n\t\t}\n\n\t\tif f(&pKey, &pEntry) {\n\t\t\tpMap.Unlock()\n\t\t\treturn nil\n\t\t}\n\t}\n\tpMap.Unlock()\n\n\treturn nil\n}\n\nfunc (d *driver) peerDbSearch(nid string, peerIP net.IP) (net.HardwareAddr, net.IPMask, net.IP, error) {\n\tvar (\n\t\tpeerMac net.HardwareAddr\n\t\tvtep net.IP\n\t\tpeerIPMask net.IPMask\n\t\tfound bool\n\t)\n\n\terr := d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {\n\t\tif pKey.peerIP.Equal(peerIP) {\n\t\t\tpeerMac = pKey.peerMac\n\t\t\tpeerIPMask = pEntry.peerIPMask\n\t\t\tvtep = pEntry.vtep\n\t\t\tfound = true\n\t\t\treturn found\n\t\t}\n\n\t\treturn found\n\t})\n\n\tif err != nil {\n\t\treturn nil, nil, nil, fmt.Errorf(\"peerdb search for peer ip %q failed: %v\", peerIP, err)\n\t}\n\n\tif !found {\n\t\treturn nil, nil, nil, fmt.Errorf(\"peer ip %q not found in peerdb\", peerIP)\n\t}\n\n\treturn peerMac, peerIPMask, vtep, nil\n}\n\nfunc (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,\n\tpeerMac net.HardwareAddr, vtep net.IP, isLocal bool) {\n\n\tpeerDbWg.Wait()\n\n\td.peerDb.Lock()\n\tpMap, ok := d.peerDb.mp[nid]\n\tif !ok {\n\t\td.peerDb.mp[nid] = &peerMap{\n\t\t\tmp: make(map[string]peerEntry),\n\t\t}\n\n\t\tpMap = d.peerDb.mp[nid]\n\t}\n\td.peerDb.Unlock()\n\n\tpKey := peerKey{\n\t\tpeerIP: peerIP,\n\t\tpeerMac: peerMac,\n\t}\n\n\tpEntry := peerEntry{\n\t\teid: eid,\n\t\tvtep: vtep,\n\t\tpeerIPMask: peerIPMask,\n\t\tisLocal: isLocal,\n\t}\n\n\tpMap.Lock()\n\tpMap.mp[pKey.String()] = pEntry\n\tpMap.Unlock()\n}\n\nfunc (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,\n\tpeerMac net.HardwareAddr, vtep net.IP) {\n\tpeerDbWg.Wait()\n\n\td.peerDb.Lock()\n\tpMap, ok := d.peerDb.mp[nid]\n\tif !ok {\n\t\td.peerDb.Unlock()\n\t\treturn\n\t}\n\td.peerDb.Unlock()\n\n\tpKey := peerKey{\n\t\tpeerIP: peerIP,\n\t\tpeerMac: peerMac,\n\t}\n\n\tpMap.Lock()\n\tdelete(pMap.mp, pKey.String())\n\tpMap.Unlock()\n}\n\nfunc (d *driver) peerDbUpdateSandbox(nid string) {\n\td.peerDb.Lock()\n\tpMap, ok := d.peerDb.mp[nid]\n\tif !ok {\n\t\td.peerDb.Unlock()\n\t\treturn\n\t}\n\td.peerDb.Unlock()\n\n\tpeerDbWg.Add(1)\n\n\tvar peerOps []func()\n\tpMap.Lock()\n\tfor pKeyStr, pEntry := range pMap.mp {\n\t\tvar pKey peerKey\n\t\tif _, err := fmt.Sscan(pKeyStr, &pKey); err != nil {\n\t\t\tfmt.Printf(\"peer key scan failed: %v\", err)\n\t\t}\n\n\t\tif pEntry.isLocal {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Go captures variables by reference. The pEntry could be\n\t\t\/\/ pointing to the same memory location for every iteration. Make\n\t\t\/\/ a copy of pEntry before capturing it in the following closure.\n\t\tentry := pEntry\n\t\top := func() {\n\t\t\tif err := d.peerAdd(nid, entry.eid, pKey.peerIP, entry.peerIPMask,\n\t\t\t\tpKey.peerMac, entry.vtep,\n\t\t\t\tfalse); err != nil {\n\t\t\t\tfmt.Printf(\"peerdbupdate in sandbox failed for ip %s and mac %s: %v\",\n\t\t\t\t\tpKey.peerIP, pKey.peerMac, err)\n\t\t\t}\n\t\t}\n\n\t\tpeerOps = append(peerOps, op)\n\t}\n\tpMap.Unlock()\n\n\tfor _, op := range peerOps {\n\t\top()\n\t}\n\n\tpeerDbWg.Done()\n}\n\nfunc (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,\n\tpeerMac net.HardwareAddr, vtep net.IP, updateDb bool) error {\n\n\tif err := validateID(nid, eid); err != nil {\n\t\treturn err\n\t}\n\n\tif updateDb {\n\t\td.peerDbAdd(nid, eid, peerIP, peerIPMask, peerMac, vtep, false)\n\t}\n\n\tn := d.network(nid)\n\tif n == nil {\n\t\treturn nil\n\t}\n\n\tsbox := n.sandbox()\n\tif sbox == nil {\n\t\treturn nil\n\t}\n\n\tIP := &net.IPNet{\n\t\tIP: peerIP,\n\t\tMask: peerIPMask,\n\t}\n\n\ts := n.getSubnetforIP(IP)\n\tif s == nil {\n\t\treturn fmt.Errorf(\"couldn't find the subnet %q in network %q\\n\", IP.String(), n.id)\n\t}\n\n\tif err := n.obtainVxlanID(s); err != nil {\n\t\treturn fmt.Errorf(\"couldn't get vxlan id for %q: %v\", s.subnetIP.String(), err)\n\t}\n\n\tif err := n.joinSubnetSandbox(s, false); err != nil {\n\t\treturn fmt.Errorf(\"subnet sandbox join failed for %q: %v\", s.subnetIP.String(), err)\n\t}\n\n\tif err := d.checkEncryption(nid, vtep, n.vxlanID(s), false, true); err != nil {\n\t\tlog.Warn(err)\n\t}\n\n\t\/\/ Add neighbor entry for the peer IP\n\tif err := sbox.AddNeighbor(peerIP, peerMac, sbox.NeighborOptions().LinkName(s.vxlanName)); err != nil {\n\t\treturn fmt.Errorf(\"could not add neighbor entry into the sandbox: %v\", err)\n\t}\n\n\t\/\/ Add fdb entry to the bridge for the peer mac\n\tif err := sbox.AddNeighbor(vtep, peerMac, sbox.NeighborOptions().LinkName(s.vxlanName),\n\t\tsbox.NeighborOptions().Family(syscall.AF_BRIDGE)); err != nil {\n\t\treturn fmt.Errorf(\"could not add fdb entry into the sandbox: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,\n\tpeerMac net.HardwareAddr, vtep net.IP, updateDb bool) error {\n\n\tif err := validateID(nid, eid); err != nil {\n\t\treturn err\n\t}\n\n\tif updateDb {\n\t\td.peerDbDelete(nid, eid, peerIP, peerIPMask, peerMac, vtep)\n\t}\n\n\tn := d.network(nid)\n\tif n == nil {\n\t\treturn nil\n\t}\n\n\tsbox := n.sandbox()\n\tif sbox == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Delete fdb entry to the bridge for the peer mac\n\tif err := sbox.DeleteNeighbor(vtep, peerMac); err != nil {\n\t\treturn fmt.Errorf(\"could not delete fdb entry into the sandbox: %v\", err)\n\t}\n\n\t\/\/ Delete neighbor entry for the peer IP\n\tif err := sbox.DeleteNeighbor(peerIP, peerMac); err != nil {\n\t\treturn fmt.Errorf(\"could not delete neighbor entry into the sandbox: %v\", err)\n\t}\n\n\tif err := d.checkEncryption(nid, vtep, 0, false, false); err != nil {\n\t\tlog.Warn(err)\n\t}\n\n\treturn nil\n}\n\nfunc (d *driver) pushLocalDb() {\n\td.peerDbWalk(func(nid string, pKey *peerKey, pEntry *peerEntry) bool {\n\t\tif pEntry.isLocal {\n\t\t\td.pushLocalEndpointEvent(\"join\", nid, pEntry.eid)\n\t\t}\n\t\treturn false\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package log\n\nimport (\n\t\"bytes\"\n)\n\n\/\/ AllLevels is an array of all log levels, for easier registering of all levels to a handler\nvar AllLevels = []Level{\n\tDebugLevel,\n\tInfoLevel,\n\tNoticeLevel,\n\tWarnLevel,\n\tErrorLevel,\n\tPanicLevel,\n\tAlertLevel,\n\tFatalLevel,\n}\n\n\/\/ Level of the log\ntype Level uint8\n\n\/\/ Log levels.\nconst (\n\tDebugLevel Level = iota\n\tInfoLevel\n\tNoticeLevel\n\tWarnLevel\n\tErrorLevel\n\tPanicLevel\n\tAlertLevel\n\tFatalLevel \/\/ same as syslog CRITICAL\n)\n\nfunc (l Level) String() string {\n\tswitch l {\n\tcase DebugLevel:\n\t\treturn \"DEBUG\"\n\tcase InfoLevel:\n\t\treturn \"INFO\"\n\tcase NoticeLevel:\n\t\treturn \"NOTICE\"\n\tcase WarnLevel:\n\t\treturn \"WARN\"\n\tcase ErrorLevel:\n\t\treturn \"ERROR\"\n\tcase PanicLevel:\n\t\treturn \"PANIC\"\n\tcase AlertLevel:\n\t\treturn \"ALERT\"\n\tcase FatalLevel:\n\t\treturn \"FATAL\"\n\tdefault:\n\t\treturn \"Unknown Level\"\n\t}\n}\n\nfunc level(s string) Level {\n\tswitch s {\n\tcase \"DEBUG\":\n\t\treturn DebugLevel\n\tcase \"INFO\":\n\t\treturn InfoLevel\n\tcase \"NOTICE\":\n\t\treturn NoticeLevel\n\tcase \"WARN\":\n\t\treturn WarnLevel\n\tcase \"ERROR\":\n\t\treturn ErrorLevel\n\tcase \"PANIC\":\n\t\treturn PanicLevel\n\tcase \"ALERT\":\n\t\treturn AlertLevel\n\tcase \"FATAL\":\n\t\treturn FatalLevel\n\tdefault:\n\t\treturn 255\n\t}\n}\n\n\/\/ MarshalJSON implementation.\nfunc (l Level) MarshalJSON() ([]byte, error) {\n\treturn []byte(`\"` + l.String() + `\"`), nil\n}\n\n\/\/ UnmarshalJSON implementation.\nfunc (l *Level) UnmarshalJSON(b []byte) error {\n\t*l = level(string(bytes.Trim(b, `\"`)))\n\treturn nil\n}\n<commit_msg>add log level parser helper (#44)<commit_after>package log\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n)\n\n\/\/ AllLevels is an array of all log levels, for easier registering of all levels to a handler\nvar AllLevels = []Level{\n\tDebugLevel,\n\tInfoLevel,\n\tNoticeLevel,\n\tWarnLevel,\n\tErrorLevel,\n\tPanicLevel,\n\tAlertLevel,\n\tFatalLevel,\n}\n\n\/\/ Level of the log\ntype Level uint8\n\n\/\/ Log levels.\nconst (\n\tDebugLevel Level = iota\n\tInfoLevel\n\tNoticeLevel\n\tWarnLevel\n\tErrorLevel\n\tPanicLevel\n\tAlertLevel\n\tFatalLevel \/\/ same as syslog CRITICAL\n)\n\nfunc (l Level) String() string {\n\tswitch l {\n\tcase DebugLevel:\n\t\treturn \"DEBUG\"\n\tcase InfoLevel:\n\t\treturn \"INFO\"\n\tcase NoticeLevel:\n\t\treturn \"NOTICE\"\n\tcase WarnLevel:\n\t\treturn \"WARN\"\n\tcase ErrorLevel:\n\t\treturn \"ERROR\"\n\tcase PanicLevel:\n\t\treturn \"PANIC\"\n\tcase AlertLevel:\n\t\treturn \"ALERT\"\n\tcase FatalLevel:\n\t\treturn \"FATAL\"\n\tdefault:\n\t\treturn \"Unknown Level\"\n\t}\n}\n\n\/\/ ParseLevel parses the provided strings log level or if not supported return 255\nfunc ParseLevel(s string) Level {\n\tswitch strings.ToUpper(s) {\n\tcase \"DEBUG\":\n\t\treturn DebugLevel\n\tcase \"INFO\":\n\t\treturn InfoLevel\n\tcase \"NOTICE\":\n\t\treturn NoticeLevel\n\tcase \"WARN\":\n\t\treturn WarnLevel\n\tcase \"ERROR\":\n\t\treturn ErrorLevel\n\tcase \"PANIC\":\n\t\treturn PanicLevel\n\tcase \"ALERT\":\n\t\treturn AlertLevel\n\tcase \"FATAL\":\n\t\treturn FatalLevel\n\tdefault:\n\t\treturn 255\n\t}\n}\n\n\/\/ MarshalJSON implementation.\nfunc (l Level) MarshalJSON() ([]byte, error) {\n\treturn []byte(`\"` + l.String() + `\"`), nil\n}\n\n\/\/ UnmarshalJSON implementation.\nfunc (l *Level) UnmarshalJSON(b []byte) error {\n\t*l = ParseLevel(string(bytes.Trim(b, `\"`)))\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012, Bryan Matsuo. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/* Filename: lexer.go\n * Author: Bryan Matsuo <bryan.matsuo [at] gmail.com>\n * Created: 2012-11-02 22:10:59.782356 -0700 PDT\n * Description: Main source file in go-lexer\n *\/\n\n\/*\nPackage lexer provides a simple scanner and types for handrolling lexers.\nThe implementation is based on Rob Pike's talk.\n\n\thttp:\/\/www.youtube.com\/watch?v=HxaD_trXwRE\n\nTwo APIs\n\nThe Lexer type has two APIs, one is used byte StateFn types. The other is\ncalled by the parser. These APIs are called the scanner and the parser APIs\nhere.\n\nThe parser API\n\nThe only function the parser calls on the lexer is Next to retreive the next\ntoken from the input stream. Eventually an item with type ItemEOF is returned\nat which point there are no more tokens in the stream.\n\nThe scanner API\n\nThe lexer uses Emit to construct complete lexemes to return from\nfuture\/concurrent calls to Next by the parser. The scanner uses a combination\nof methods to manipulate its position and and prepare lexemes to be emitted.\nLexer errors are emitted to the parser using the Errorf method which keeps the\nscanner-parser interface uniform.\n\nCommon lexer methods used in a scanner are the Accept[Run][Range] family of\nmethods. Accept* methods take a set and advance the lexer if incoming runes\nare in the set. The AcceptRun* subfamily advance the lexer as far as possible.\n\nFor scanning known sequences of bytes (e.g. keywords) the AcceptString method\navoids a lot of branching that would be incurred using methods that match\ncharacter classes.\n\nThe remaining methods provide low level functionality that can be combined to\naddress corner cases.\n*\/\npackage lexer\n\nimport (\n\t\"container\/list\"\n\t\"fmt\"\n\t\"math\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode\/utf8\"\n)\n\nconst EOF rune = 0x04\n\n\/\/ IsEOF returns true if n is zero.\nfunc IsEOF(c rune, n int) bool {\n\treturn n == 0\n}\n\n\/\/ IsInvalid returns true if c is utf8.RuneError and n is 1.\nfunc IsInvalid(c rune, n int) bool {\n\treturn c == utf8.RuneError && n == 1\n}\n\n\/\/ StateFn functions scan runes from the lexer's input and emit items. A StateFn\n\/\/ is responsible for emitting ItemEOF after input has been consumed.\ntype StateFn func(*Lexer) StateFn\n\n\/\/ Lexer contains an input string and state associate with the lexing the\n\/\/ input.\ntype Lexer struct {\n\tinput string \/\/ string being scanned\n\tstart int \/\/ start position for the current lexeme\n\tpos int \/\/ current position\n\twidth int \/\/ length of the last rune read\n\tlast rune \/\/ the last rune read\n\tstate StateFn \/\/ the current state\n\titems *list.List \/\/ Buffer of lexed items\n}\n\n\/\/ Create a new lexer. Must be given a non-nil state.\nfunc New(start StateFn, input string) *Lexer {\n\tif start == nil {\n\t\tpanic(\"nil start state\")\n\t}\n\treturn &Lexer{\n\t\tstate: start,\n\t\tinput: input,\n\t\titems: list.New(),\n\t}\n}\n\n\/\/ Input returns the input string being lexed by the l.\nfunc (l *Lexer) Input() string {\n\treturn l.input\n}\n\n\/\/ Start marks the first byte of item currently being lexed.\nfunc (l *Lexer) Start() int {\n\treturn l.start\n}\n\n\/\/ Pos marks the next byte to be read in the input string. The behavior of Pos\n\/\/ is unspecified if an error previously occurred or if all input has been\n\/\/ consumed.\nfunc (l *Lexer) Pos() int {\n\treturn l.pos\n}\n\n\/\/ Current returns the contents of the item currently being lexed.\nfunc (l *Lexer) Current() string {\n\treturn l.input[l.start:l.pos]\n}\n\n\/\/ Last return the last rune read from the input stream.\nfunc (l *Lexer) Last() (r rune, width int) {\n\treturn l.last, l.width\n}\n\n\/\/ Advance adds one rune of input to the current lexeme, increments the lexer's\n\/\/ position, and returns the input rune with its size in bytes (encoded as\n\/\/ UTF-8). Invalid UTF-8 codepoints cause the current call and all subsequent\n\/\/ calls to return (utf8.RuneError, 1). If there is no input the returned size\n\/\/ is zero.\nfunc (l *Lexer) Advance() (rune, int) {\n\tif l.pos >= len(l.input) {\n\t\tl.width = 0\n\t\treturn EOF, l.width\n\t}\n\tl.last, l.width = utf8.DecodeRuneInString(l.input[l.pos:])\n\tif l.last == utf8.RuneError && l.width == 1 {\n\t\treturn l.last, l.width\n\t}\n\tl.pos += l.width\n\treturn l.last, l.width\n}\n\n\/\/ Backup removes the last rune from the current lexeme and moves l's position\n\/\/ back in the input string accordingly. Backup should only be called after a\n\/\/ call to Advance.\nfunc (l *Lexer) Backup() {\n\tl.pos -= l.width\n}\n\n\/\/ Peek returns the next rune in the input stream without adding it to the\n\/\/ current lexeme.\nfunc (l *Lexer) Peek() (rune, int) {\n\tc, n := l.Advance()\n\tl.Backup()\n\treturn c, n\n}\n\n\/\/ Ignore throws away the current lexeme.\nfunc (l *Lexer) Ignore() {\n\tl.start = l.pos\n}\n\n\/\/ Accept advances the lexer if the next rune is in valid.\nfunc (l *Lexer) Accept(valid string) (ok bool) {\n\tr, _ := l.Advance()\n\tok = strings.IndexRune(valid, r) >= 0\n\tif !ok {\n\t\tl.Backup()\n\t}\n\treturn\n}\n\n\/\/ AcceptFunc advances the lexer if fn return true for the next rune.\nfunc (l *Lexer) AcceptFunc(fn func(rune) bool) (ok bool) {\n\tswitch r, n := l.Advance(); {\n\tcase IsEOF(r, n):\n\t\treturn false\n\tcase IsInvalid(r, n):\n\t\treturn false\n\tcase fn(r):\n\t\treturn true\n\tdefault:\n\t\tl.Backup()\n\t\treturn false\n\t}\n}\n\n\/\/ AcceptRange advances l's position if the current rune is in tab.\nfunc (l *Lexer) AcceptRange(tab *unicode.RangeTable) (ok bool) {\n\tr, _ := l.Advance()\n\tok = unicode.Is(tab, r)\n\tif !ok {\n\t\tl.Backup()\n\t}\n\treturn\n}\n\n\/\/ AcceptRun advances l's position as long as the current rune is in valid.\nfunc (l *Lexer) AcceptRun(valid string) (n int) {\n\tfor l.Accept(valid) {\n\t\tn++\n\t}\n\treturn\n}\n\n\/\/ AcceptRunFunc advances l's position as long as fn returns true for the next\n\/\/ input rune.\nfunc (l *Lexer) AcceptRunFunc(fn func(rune) bool) int {\n\tvar n int\n\tfor l.AcceptFunc(fn) {\n\t\tn++\n\t}\n\treturn n\n}\n\n\/\/ AcceptRunRange advances l's possition as long as the current rune is in tab.\nfunc (l *Lexer) AcceptRunRange(tab *unicode.RangeTable) (n int) {\n\tfor l.AcceptRange(tab) {\n\t\tn++\n\t}\n\treturn\n}\n\n\/\/ AcceptString advances the lexer len(s) bytes if the next len(s) bytes equal\n\/\/ s. AcceptString returns true if l advanced.\nfunc (l *Lexer) AcceptString(s string) (ok bool) {\n\tif strings.HasPrefix(l.input[l.pos:], s) {\n\t\tl.pos += len(s)\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ Errorf causes an error item to be emitted from l.Next(). The item's value\n\/\/ (and its error message) are the result of evaluating format and vs with\n\/\/ fmt.Sprintf.\nfunc (l *Lexer) Errorf(format string, vs ...interface{}) StateFn {\n\tl.enqueue(&Item{\n\t\tItemError,\n\t\tl.start,\n\t\tfmt.Sprintf(format, vs...),\n\t})\n\treturn nil\n}\n\n\/\/ Emit the current value as an Item with the specified type.\nfunc (l *Lexer) Emit(t ItemType) {\n\tl.enqueue(&Item{\n\t\tt,\n\t\tl.start,\n\t\tl.input[l.start:l.pos],\n\t})\n\tl.start = l.pos\n}\n\n\/\/ The method by which items are extracted from the input.\n\/\/ Returns nil if the lexer has entered a nil state.\nfunc (l *Lexer) Next() (i *Item) {\n\tfor {\n\t\tif head := l.dequeue(); head != nil {\n\t\t\treturn head\n\t\t}\n\t\tif l.state == nil {\n\t\t\treturn &Item{ItemEOF, l.start, \"\"}\n\t\t}\n\t\tl.state = l.state(l)\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (l *Lexer) enqueue(i *Item) {\n\tl.items.PushBack(i)\n}\n\nfunc (l *Lexer) dequeue() *Item {\n\thead := l.items.Front()\n\tif head == nil {\n\t\treturn nil\n\t}\n\treturn l.items.Remove(head).(*Item)\n}\n\n\/\/ A type for all the types of items in the language being lexed.\ntype ItemType uint16\n\n\/\/ Special item types.\nconst (\n\tItemEOF ItemType = math.MaxUint16 - iota\n\tItemError\n)\n\n\/\/ An individual scanned item (a lexeme).\ntype Item struct {\n\tType ItemType\n\tPos int\n\tValue string\n}\n\n\/\/ Err returns the error corresponding to i, if one exists.\nfunc (i *Item) Err() error {\n\tif i.Type == ItemError {\n\t\treturn (*Error)(i)\n\t}\n\treturn nil\n}\n\n\/\/ String returns the raw lexeme of i.\nfunc (i *Item) String() string {\n\tswitch i.Type {\n\tcase ItemError:\n\t\treturn i.Value\n\tcase ItemEOF:\n\t\treturn \"EOF\"\n\t}\n\tif len(i.Value) > 10 {\n\t\treturn fmt.Sprintf(\"%.10q...\", i.Value)\n\t}\n\treturn i.Value\n}\n\n\/\/ Error is an item of type ItemError\ntype Error Item\n\nfunc (err *Error) Error() string {\n\treturn (*Item)(err).String()\n}\n<commit_msg>godoc<commit_after>\/\/ Copyright 2012, Bryan Matsuo. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/* Filename: lexer.go\n * Author: Bryan Matsuo <bryan.matsuo [at] gmail.com>\n * Created: 2012-11-02 22:10:59.782356 -0700 PDT\n * Description: Main source file in go-lexer\n *\/\n\n\/*\nPackage lexer provides a simple scanner and types for handrolling lexers.\nThe implementation is based on Rob Pike's talk.\n\n\thttp:\/\/www.youtube.com\/watch?v=HxaD_trXwRE\n\nThere are some key differences to Pike's presented code. Next has been renamed\nAdvance to be more idiomatic with Backup. Next is used by the parser to\nretrieve items from the lexer.\n\nTwo APIs\n\nThe Lexer type has two APIs, one is used byte StateFn types. The other is\ncalled by the parser. These APIs are called the scanner and the parser APIs\nhere.\n\nThe parser API\n\nThe only function the parser calls on the lexer is Next to retreive the next\ntoken from the input stream. Eventually an item with type ItemEOF is returned\nat which point there are no more tokens in the stream.\n\nThe scanner API\n\nThe lexer uses Emit to construct complete lexemes to return from\nfuture\/concurrent calls to Next by the parser. The scanner uses a combination\nof methods to manipulate its position and and prepare lexemes to be emitted.\nLexer errors are emitted to the parser using the Errorf method which keeps the\nscanner-parser interface uniform.\n\nCommon lexer methods used in a scanner are the Accept[Run][Range] family of\nmethods. Accept* methods take a set and advance the lexer if incoming runes\nare in the set. The AcceptRun* subfamily advance the lexer as far as possible.\n\nFor scanning known sequences of bytes (e.g. keywords) the AcceptString method\navoids a lot of branching that would be incurred using methods that match\ncharacter classes.\n\nThe remaining methods provide low level functionality that can be combined to\naddress corner cases.\n*\/\npackage lexer\n\nimport (\n\t\"container\/list\"\n\t\"fmt\"\n\t\"math\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode\/utf8\"\n)\n\nconst EOF rune = 0x04\n\n\/\/ IsEOF returns true if n is zero.\nfunc IsEOF(c rune, n int) bool {\n\treturn n == 0\n}\n\n\/\/ IsInvalid returns true if c is utf8.RuneError and n is 1.\nfunc IsInvalid(c rune, n int) bool {\n\treturn c == utf8.RuneError && n == 1\n}\n\n\/\/ StateFn functions scan runes from the lexer's input and emit items. A StateFn\n\/\/ is responsible for emitting ItemEOF after input has been consumed.\ntype StateFn func(*Lexer) StateFn\n\n\/\/ Lexer contains an input string and state associate with the lexing the\n\/\/ input.\ntype Lexer struct {\n\tinput string \/\/ string being scanned\n\tstart int \/\/ start position for the current lexeme\n\tpos int \/\/ current position\n\twidth int \/\/ length of the last rune read\n\tlast rune \/\/ the last rune read\n\tstate StateFn \/\/ the current state\n\titems *list.List \/\/ Buffer of lexed items\n}\n\n\/\/ Create a new lexer. Must be given a non-nil state.\nfunc New(start StateFn, input string) *Lexer {\n\tif start == nil {\n\t\tpanic(\"nil start state\")\n\t}\n\treturn &Lexer{\n\t\tstate: start,\n\t\tinput: input,\n\t\titems: list.New(),\n\t}\n}\n\n\/\/ Input returns the input string being lexed by the l.\nfunc (l *Lexer) Input() string {\n\treturn l.input\n}\n\n\/\/ Start marks the first byte of item currently being lexed.\nfunc (l *Lexer) Start() int {\n\treturn l.start\n}\n\n\/\/ Pos marks the next byte to be read in the input string. The behavior of Pos\n\/\/ is unspecified if an error previously occurred or if all input has been\n\/\/ consumed.\nfunc (l *Lexer) Pos() int {\n\treturn l.pos\n}\n\n\/\/ Current returns the contents of the item currently being lexed.\nfunc (l *Lexer) Current() string {\n\treturn l.input[l.start:l.pos]\n}\n\n\/\/ Last return the last rune read from the input stream.\nfunc (l *Lexer) Last() (r rune, width int) {\n\treturn l.last, l.width\n}\n\n\/\/ Advance adds one rune of input to the current lexeme, increments the lexer's\n\/\/ position, and returns the input rune with its size in bytes (encoded as\n\/\/ UTF-8). Invalid UTF-8 codepoints cause the current call and all subsequent\n\/\/ calls to return (utf8.RuneError, 1). If there is no input the returned size\n\/\/ is zero.\nfunc (l *Lexer) Advance() (rune, int) {\n\tif l.pos >= len(l.input) {\n\t\tl.width = 0\n\t\treturn EOF, l.width\n\t}\n\tl.last, l.width = utf8.DecodeRuneInString(l.input[l.pos:])\n\tif l.last == utf8.RuneError && l.width == 1 {\n\t\treturn l.last, l.width\n\t}\n\tl.pos += l.width\n\treturn l.last, l.width\n}\n\n\/\/ Backup removes the last rune from the current lexeme and moves l's position\n\/\/ back in the input string accordingly. Backup should only be called after a\n\/\/ call to Advance.\nfunc (l *Lexer) Backup() {\n\tl.pos -= l.width\n}\n\n\/\/ Peek returns the next rune in the input stream without adding it to the\n\/\/ current lexeme.\nfunc (l *Lexer) Peek() (rune, int) {\n\tc, n := l.Advance()\n\tl.Backup()\n\treturn c, n\n}\n\n\/\/ Ignore throws away the current lexeme.\nfunc (l *Lexer) Ignore() {\n\tl.start = l.pos\n}\n\n\/\/ Accept advances the lexer if the next rune is in valid.\nfunc (l *Lexer) Accept(valid string) (ok bool) {\n\tr, _ := l.Advance()\n\tok = strings.IndexRune(valid, r) >= 0\n\tif !ok {\n\t\tl.Backup()\n\t}\n\treturn\n}\n\n\/\/ AcceptFunc advances the lexer if fn return true for the next rune.\nfunc (l *Lexer) AcceptFunc(fn func(rune) bool) (ok bool) {\n\tswitch r, n := l.Advance(); {\n\tcase IsEOF(r, n):\n\t\treturn false\n\tcase IsInvalid(r, n):\n\t\treturn false\n\tcase fn(r):\n\t\treturn true\n\tdefault:\n\t\tl.Backup()\n\t\treturn false\n\t}\n}\n\n\/\/ AcceptRange advances l's position if the current rune is in tab.\nfunc (l *Lexer) AcceptRange(tab *unicode.RangeTable) (ok bool) {\n\tr, _ := l.Advance()\n\tok = unicode.Is(tab, r)\n\tif !ok {\n\t\tl.Backup()\n\t}\n\treturn\n}\n\n\/\/ AcceptRun advances l's position as long as the current rune is in valid.\nfunc (l *Lexer) AcceptRun(valid string) (n int) {\n\tfor l.Accept(valid) {\n\t\tn++\n\t}\n\treturn\n}\n\n\/\/ AcceptRunFunc advances l's position as long as fn returns true for the next\n\/\/ input rune.\nfunc (l *Lexer) AcceptRunFunc(fn func(rune) bool) int {\n\tvar n int\n\tfor l.AcceptFunc(fn) {\n\t\tn++\n\t}\n\treturn n\n}\n\n\/\/ AcceptRunRange advances l's possition as long as the current rune is in tab.\nfunc (l *Lexer) AcceptRunRange(tab *unicode.RangeTable) (n int) {\n\tfor l.AcceptRange(tab) {\n\t\tn++\n\t}\n\treturn\n}\n\n\/\/ AcceptString advances the lexer len(s) bytes if the next len(s) bytes equal\n\/\/ s. AcceptString returns true if l advanced.\nfunc (l *Lexer) AcceptString(s string) (ok bool) {\n\tif strings.HasPrefix(l.input[l.pos:], s) {\n\t\tl.pos += len(s)\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ Errorf causes an error item to be emitted from l.Next(). The item's value\n\/\/ (and its error message) are the result of evaluating format and vs with\n\/\/ fmt.Sprintf.\nfunc (l *Lexer) Errorf(format string, vs ...interface{}) StateFn {\n\tl.enqueue(&Item{\n\t\tItemError,\n\t\tl.start,\n\t\tfmt.Sprintf(format, vs...),\n\t})\n\treturn nil\n}\n\n\/\/ Emit the current value as an Item with the specified type.\nfunc (l *Lexer) Emit(t ItemType) {\n\tl.enqueue(&Item{\n\t\tt,\n\t\tl.start,\n\t\tl.input[l.start:l.pos],\n\t})\n\tl.start = l.pos\n}\n\n\/\/ The method by which items are extracted from the input.\n\/\/ Returns nil if the lexer has entered a nil state.\nfunc (l *Lexer) Next() (i *Item) {\n\tfor {\n\t\tif head := l.dequeue(); head != nil {\n\t\t\treturn head\n\t\t}\n\t\tif l.state == nil {\n\t\t\treturn &Item{ItemEOF, l.start, \"\"}\n\t\t}\n\t\tl.state = l.state(l)\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (l *Lexer) enqueue(i *Item) {\n\tl.items.PushBack(i)\n}\n\nfunc (l *Lexer) dequeue() *Item {\n\thead := l.items.Front()\n\tif head == nil {\n\t\treturn nil\n\t}\n\treturn l.items.Remove(head).(*Item)\n}\n\n\/\/ A type for all the types of items in the language being lexed.\ntype ItemType uint16\n\n\/\/ Special item types.\nconst (\n\tItemEOF ItemType = math.MaxUint16 - iota\n\tItemError\n)\n\n\/\/ An individual scanned item (a lexeme).\ntype Item struct {\n\tType ItemType\n\tPos int\n\tValue string\n}\n\n\/\/ Err returns the error corresponding to i, if one exists.\nfunc (i *Item) Err() error {\n\tif i.Type == ItemError {\n\t\treturn (*Error)(i)\n\t}\n\treturn nil\n}\n\n\/\/ String returns the raw lexeme of i.\nfunc (i *Item) String() string {\n\tswitch i.Type {\n\tcase ItemError:\n\t\treturn i.Value\n\tcase ItemEOF:\n\t\treturn \"EOF\"\n\t}\n\tif len(i.Value) > 10 {\n\t\treturn fmt.Sprintf(\"%.10q...\", i.Value)\n\t}\n\treturn i.Value\n}\n\n\/\/ Error is an item of type ItemError\ntype Error Item\n\nfunc (err *Error) Error() string {\n\treturn (*Item)(err).String()\n}\n<|endoftext|>"} {"text":"<commit_before>package drouter\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\tdockerTypes \"github.com\/docker\/engine-api\/types\"\n\tdockerCTypes \"github.com\/docker\/engine-api\/types\/container\"\n\tdockerNTypes \"github.com\/docker\/engine-api\/types\/network\"\n)\n\nconst (\n\tContName = \"drntest_c%v\"\n\tContImage = \"alpine\"\n)\n\nfunc testContainerBug(t *testing.T) {\n\trequire.NoError(t, cleanup(), \"Failed to cleanup()\")\n\trequire := require.New(t)\n\n\ts := 0\n\tfts := 0\n\truns := 100\n\tfor i := 0; i < runs; i++ {\n\t\trequire.NoError(cleanup(), \"Failed to cleanup test %v\", i)\n\t\tn0r, err := createNetwork(0, true)\n\t\trequire.NoError(err, \"Failed to create n0 test %v\", i)\n\t\tc, err := createContainer(0, n0r.ID)\n\t\tif err == nil {\n\t\t\tt.Logf(\"test %v succeeded.\\n\", i)\n\t\t\tc.remove()\n\t\t\ts++\n\t\t}\n\t\tif c != nil && err != nil {\n\t\t\tt.Logf(\"test %v failed to start.\\n\", i)\n\t\t\tfts++\n\t\t\tc.remove()\n\t\t}\n\t\trequire.NoError(dc.NetworkRemove(bg, n0r.ID), \"Failed to remove n0. test %v\", i)\n\t}\n\tt.Logf(\"%v\/%v succeeded.\\n\", s, runs)\n\tt.Logf(\"%v\/%v failed to start.\\n\", fts, runs)\n\trequire.Equal(runs, s, \"Some failed\")\n}\n\nfunc createContainer(cn int, n string) (*container, error) {\n\tr, err := dc.ContainerCreate(bg,\n\t\t&dockerCTypes.Config{\n\t\t\tImage: ContImage,\n\t\t\tEntrypoint: []string{\"\/bin\/sleep\", \"600\"},\n\t\t},\n\t\t&dockerCTypes.HostConfig{},\n\t\t&dockerNTypes.NetworkingConfig{}, fmt.Sprintf(ContName, cn))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = dc.NetworkConnect(bg, n, r.ID, &dockerNTypes.EndpointSettings{})\n\tif err != nil {\n\t\tc, err2 := newContainerFromID(r.ID)\n\t\tif err2 != nil {\n\t\t\tc.log.WithFields(log.Fields{\"Error\": err2}).Error(\"Failed to inspect container after failed network connect.\")\n\t\t}\n\t\treturn c, err\n\t}\n\n\terr = dc.ContainerStart(bg, r.ID, dockerTypes.ContainerStartOptions{})\n\tif err != nil {\n\t\tc, err2 := newContainerFromID(r.ID)\n\t\tif err2 != nil {\n\t\t\tc.log.WithFields(log.Fields{\"Error\": err2}).Error(\"Failed to inspect container after failed container start.\")\n\t\t}\n\t\treturn c, err\n\t}\n\n\treturn newContainerFromID(r.ID)\n}\n\nfunc (c *container) remove() error {\n\terr := dc.ContainerKill(bg, c.id, \"\")\n\tif err != nil && !strings.Contains(err.Error(), \"is not running\") {\n\t\treturn err\n\t}\n\treturn dc.ContainerRemove(bg, c.id, dockerTypes.ContainerRemoveOptions{})\n}\n\nfunc TestInvalidContainer(t *testing.T) {\n\trequire.NoError(t, cleanup(), \"Failed to cleanup()\")\n\n\tassert := assert.New(t)\n\t_, err := newContainerFromID(\"gibberish_nonsense\")\n\tassert.NotEqual(err, nil, \"Inspect invalid container should fail\")\n}\n\nfunc TestNonRunningContainer(t *testing.T) {\n\trequire.NoError(t, cleanup(), \"Failed to cleanup()\")\n\n\trequire := require.New(t)\n\n\tn0r, err := createNetwork(0, true)\n\trequire.NoError(err, \"Failed to create n0.\")\n\t\/\/defer func() { require.NoError(dc.NetworkRemove(bg, n0r.ID), \"Failed to remove n0.\") }()\n\n\tc, err := createContainer(0, n0r.ID)\n\trequire.NoError(err, \"Failed to create c0.\")\n\t\/\/defer func() { require.NoError(c.remove(), \"Failed to remove c0.\") }()\n\n\terr = dc.ContainerKill(bg, c.id, \"\")\n\trequire.NoError(err, \"Error stopping container\")\n\ttime.Sleep(1 * time.Second)\n\n\tcStopped, err := newContainerFromID(c.id)\n\trequire.NoError(err, \"Inspect stopped container should succeed\")\n\n\trequire.Nil(cStopped.handle, \"Inspect on stopped container should return nil handle\")\n}\n<commit_msg>pass net name<commit_after>package drouter\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\tdockerTypes \"github.com\/docker\/engine-api\/types\"\n\tdockerCTypes \"github.com\/docker\/engine-api\/types\/container\"\n\tdockerNTypes \"github.com\/docker\/engine-api\/types\/network\"\n)\n\nconst (\n\tContName = \"drntest_c%v\"\n\tContImage = \"alpine\"\n)\n\nfunc TestContainerBug(t *testing.T) {\n\trequire.NoError(t, cleanup(), \"Failed to cleanup()\")\n\trequire := require.New(t)\n\n\ts := 0\n\tfts := 0\n\truns := 100\n\tfor i := 0; i < runs; i++ {\n\t\trequire.NoError(cleanup(), \"Failed to cleanup test %v\", i)\n\t\tn0r, err := createNetwork(0, true)\n\t\trequire.NoError(err, \"Failed to create n0 test %v\", i)\n\t\tc, err := createContainer(0, n0r.Name)\n\t\tif err == nil {\n\t\t\tt.Logf(\"test %v succeeded.\\n\", i)\n\t\t\tc.remove()\n\t\t\ts++\n\t\t}\n\t\tif c != nil && err != nil {\n\t\t\tt.Logf(\"test %v failed to start.\\n\", i)\n\t\t\tfts++\n\t\t\tc.remove()\n\t\t}\n\t\trequire.NoError(dc.NetworkRemove(bg, n0r.ID), \"Failed to remove n0. test %v\", i)\n\t}\n\tt.Logf(\"%v\/%v succeeded.\\n\", s, runs)\n\tt.Logf(\"%v\/%v failed to start.\\n\", fts, runs)\n\trequire.Equal(runs, s, \"Some failed\")\n}\n\nfunc createContainer(cn int, n string) (*container, error) {\n\tr, err := dc.ContainerCreate(bg,\n\t\t&dockerCTypes.Config{\n\t\t\tImage: ContImage,\n\t\t\tEntrypoint: []string{\"\/bin\/sleep\", \"600\"},\n\t\t},\n\t\t&dockerCTypes.HostConfig{},\n\t\t&dockerNTypes.NetworkingConfig{\n\t\t\tEndpointsConfig: map[string]*dockerNTypes.EndpointSettings{\n\t\t\t\tn: {},\n\t\t\t},\n\t\t}, fmt.Sprintf(ContName, cn))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = dc.ContainerStart(bg, r.ID, dockerTypes.ContainerStartOptions{})\n\tif err != nil {\n\t\tc, err2 := newContainerFromID(r.ID)\n\t\tif err2 != nil {\n\t\t\tc.log.WithFields(log.Fields{\"Error\": err2}).Error(\"Failed to inspect container after failed container start.\")\n\t\t}\n\t\treturn c, err\n\t}\n\n\treturn newContainerFromID(r.ID)\n}\n\nfunc (c *container) remove() error {\n\terr := dc.ContainerKill(bg, c.id, \"\")\n\tif err != nil && !strings.Contains(err.Error(), \"is not running\") {\n\t\treturn err\n\t}\n\treturn dc.ContainerRemove(bg, c.id, dockerTypes.ContainerRemoveOptions{})\n}\n\nfunc TestInvalidContainer(t *testing.T) {\n\trequire.NoError(t, cleanup(), \"Failed to cleanup()\")\n\n\tassert := assert.New(t)\n\t_, err := newContainerFromID(\"gibberish_nonsense\")\n\tassert.NotEqual(err, nil, \"Inspect invalid container should fail\")\n}\n\nfunc TestNonRunningContainer(t *testing.T) {\n\trequire.NoError(t, cleanup(), \"Failed to cleanup()\")\n\n\trequire := require.New(t)\n\n\tn0r, err := createNetwork(0, true)\n\trequire.NoError(err, \"Failed to create n0.\")\n\t\/\/defer func() { require.NoError(dc.NetworkRemove(bg, n0r.ID), \"Failed to remove n0.\") }()\n\n\tc, err := createContainer(0, n0r.ID)\n\trequire.NoError(err, \"Failed to create c0.\")\n\t\/\/defer func() { require.NoError(c.remove(), \"Failed to remove c0.\") }()\n\n\terr = dc.ContainerKill(bg, c.id, \"\")\n\trequire.NoError(err, \"Error stopping container\")\n\ttime.Sleep(1 * time.Second)\n\n\tcStopped, err := newContainerFromID(c.id)\n\trequire.NoError(err, \"Inspect stopped container should succeed\")\n\n\trequire.Nil(cStopped.handle, \"Inspect on stopped container should return nil handle\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Circonus, Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage checkmgr\n\nimport (\n\t\"crypto\/rand\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/circonus-labs\/circonus-gometrics\/api\"\n)\n\n\/\/ UpdateCheck determines if the check needs to be updated (new metrics, tags, etc.)\nfunc (cm *CheckManager) UpdateCheck(newMetrics map[string]*api.CheckBundleMetric) {\n\t\/\/ only if check manager is enabled\n\tif !cm.enabled {\n\t\treturn\n\t}\n\n\t\/\/ only if checkBundle has been populated\n\tif cm.checkBundle == nil {\n\t\treturn\n\t}\n\n\t\/\/ only if there is *something* to update\n\tif !cm.forceCheckUpdate && len(newMetrics) == 0 && len(cm.metricTags) == 0 {\n\t\treturn\n\t}\n\n\t\/\/ refresh check bundle (in case there were changes made by other apps or in UI)\n\tcheckBundle, err := cm.apih.FetchCheckBundleByCID(api.CIDType(cm.checkBundle.CID))\n\tif err != nil {\n\t\tcm.Log.Printf(\"[ERROR] unable to fetch up-to-date check bundle %v\", err)\n\t\treturn\n\t}\n\tcm.cbmu.Lock()\n\tcm.checkBundle = checkBundle\n\tcm.cbmu.Unlock()\n\n\tcm.addNewMetrics(newMetrics)\n\n\tif len(cm.metricTags) > 0 {\n\t\t\/\/ note: if a tag has been added (queued) for a metric which never gets sent\n\t\t\/\/ the tags will be discarded. (setting tags does not *create* metrics.)\n\t\tfor metricName, metricTags := range cm.metricTags {\n\t\t\tfor metricIdx, metric := range cm.checkBundle.Metrics {\n\t\t\t\tif metric.Name == metricName {\n\t\t\t\t\tcm.checkBundle.Metrics[metricIdx].Tags = metricTags\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tcm.mtmu.Lock()\n\t\t\tdelete(cm.metricTags, metricName)\n\t\t\tcm.mtmu.Unlock()\n\t\t}\n\t\tcm.forceCheckUpdate = true\n\t}\n\n\tif cm.forceCheckUpdate {\n\t\tnewCheckBundle, err := cm.apih.UpdateCheckBundle(cm.checkBundle)\n\t\tif err != nil {\n\t\t\tcm.Log.Printf(\"[ERROR] updating check bundle %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tcm.forceCheckUpdate = false\n\t\tcm.cbmu.Lock()\n\t\tcm.checkBundle = newCheckBundle\n\t\tcm.cbmu.Unlock()\n\t\tcm.inventoryMetrics()\n\t}\n\n}\n\n\/\/ Initialize CirconusMetrics instance. Attempt to find a check otherwise create one.\n\/\/ use cases:\n\/\/\n\/\/ check [bundle] by submission url\n\/\/ check [bundle] by *check* id (note, not check_bundle id)\n\/\/ check [bundle] by search\n\/\/ create check [bundle]\nfunc (cm *CheckManager) initializeTrapURL() error {\n\tif cm.trapURL != \"\" {\n\t\treturn nil\n\t}\n\n\tcm.trapmu.Lock()\n\tdefer cm.trapmu.Unlock()\n\n\t\/\/ special case short-circuit: just send to a url, no check management\n\t\/\/ up to user to ensure that if url is https that it will work (e.g. not self-signed)\n\tif cm.checkSubmissionURL != \"\" {\n\t\tif !cm.enabled {\n\t\t\tcm.trapURL = cm.checkSubmissionURL\n\t\t\tcm.trapLastUpdate = time.Now()\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif !cm.enabled {\n\t\treturn errors.New(\"unable to initialize trap, check manager is disabled\")\n\t}\n\n\tvar err error\n\tvar check *api.Check\n\tvar checkBundle *api.CheckBundle\n\tvar broker *api.Broker\n\n\tif cm.checkSubmissionURL != \"\" {\n\t\tcheck, err = cm.apih.FetchCheckBySubmissionURL(cm.checkSubmissionURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !check.Active {\n\t\t\treturn fmt.Errorf(\"[ERROR] Check ID %v is not active\", check.CID)\n\t\t}\n\t\t\/\/ extract check id from check object returned from looking up using submission url\n\t\t\/\/ set m.CheckId to the id\n\t\t\/\/ set m.SubmissionUrl to \"\" to prevent trying to search on it going forward\n\t\t\/\/ use case: if the broker is changed in the UI metrics would stop flowing\n\t\t\/\/ unless the new submission url can be fetched with the API (which is no\n\t\t\/\/ longer possible using the original submission url)\n\t\tvar id int\n\t\tid, err = strconv.Atoi(strings.Replace(check.CID, \"\/check\/\", \"\", -1))\n\t\tif err == nil {\n\t\t\tcm.checkID = api.IDType(id)\n\t\t\tcm.checkSubmissionURL = \"\"\n\t\t} else {\n\t\t\tcm.Log.Printf(\n\t\t\t\t\"[WARN] SubmissionUrl check to Check ID: unable to convert %s to int %q\\n\",\n\t\t\t\tcheck.CID, err)\n\t\t}\n\t} else if cm.checkID > 0 {\n\t\tcheck, err = cm.apih.FetchCheckByID(cm.checkID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !check.Active {\n\t\t\treturn fmt.Errorf(\"[ERROR] Check ID %v is not active\", check.CID)\n\t\t}\n\t} else {\n\t\tsearchCriteria := fmt.Sprintf(\n\t\t\t\"(active:1)(host:\\\"%s\\\")(type:\\\"%s\\\")(tags:%s)\",\n\t\t\tcm.checkTarget,\n\t\t\tcm.checkType,\n\t\t\tstrings.Join(cm.checkSearchTag, \",\"))\n\t\tfilterCriteria := map[string]string{\"f_notes\": cm.getNotes()}\n\t\tcheckBundle, err = cm.checkBundleSearch(searchCriteria, filterCriteria)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif checkBundle == nil {\n\t\t\t\/\/ err==nil && checkBundle==nil is \"no check bundles matched\"\n\t\t\t\/\/ an error *should* be returned for any other invalid scenario\n\t\t\tcheckBundle, broker, err = cm.createNewCheck()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif checkBundle == nil {\n\t\tif check != nil {\n\t\t\tcheckBundle, err = cm.apih.FetchCheckBundleByCID(api.CIDType(check.CheckBundleCID))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"[ERROR] Unable to retrieve, find, or create check\")\n\t\t}\n\t}\n\n\tif broker == nil {\n\t\tbroker, err = cm.apih.FetchBrokerByCID(api.CIDType(checkBundle.Brokers[0]))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ retain to facilitate metric management (adding new metrics specifically)\n\tcm.checkBundle = checkBundle\n\tcm.inventoryMetrics()\n\n\t\/\/ determine the trap url to which metrics should be PUT\n\tif checkBundle.Type == \"httptrap\" {\n\t\tcm.trapURL = api.URLType(checkBundle.Config.SubmissionURL)\n\t} else {\n\t\t\/\/ build a submission_url for non-httptrap checks out of mtev_reverse url\n\t\tif len(checkBundle.ReverseConnectURLs) == 0 {\n\t\t\treturn fmt.Errorf(\"%s is not an HTTPTRAP check and no reverse connection urls found\", checkBundle.Checks[0])\n\t\t}\n\t\tmtevURL := checkBundle.ReverseConnectURLs[0]\n\t\tmtevURL = strings.Replace(mtevURL, \"mtev_reverse\", \"https\", 1)\n\t\tmtevURL = strings.Replace(mtevURL, \"check\", \"module\/httptrap\", 1)\n\t\tcm.trapURL = api.URLType(fmt.Sprintf(\"%s\/%s\", mtevURL, checkBundle.Config.ReverseSecret))\n\t}\n\n\t\/\/ used when sending as \"ServerName\" get around certs not having IP SANS\n\t\/\/ (cert created with server name as CN but IP used in trap url)\n\tcn, err := cm.getBrokerCN(broker, cm.trapURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcm.trapCN = BrokerCNType(cn)\n\n\tcm.trapLastUpdate = time.Now()\n\n\treturn nil\n}\n\n\/\/ Search for a check bundle given a predetermined set of criteria\nfunc (cm *CheckManager) checkBundleSearch(criteria string, filter map[string]string) (*api.CheckBundle, error) {\n\tcheckBundles, err := cm.apih.CheckBundleFilterSearch(api.SearchQueryType(criteria), filter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(checkBundles) == 0 {\n\t\treturn nil, nil \/\/ trigger creation of a new check\n\t}\n\n\tnumActive := 0\n\tcheckID := -1\n\n\tfor idx, check := range checkBundles {\n\t\tif check.Status == statusActive {\n\t\t\tnumActive++\n\t\t\tcheckID = idx\n\t\t}\n\t}\n\n\tif numActive > 1 {\n\t\treturn nil, fmt.Errorf(\"[ERROR] multiple check bundles match criteria %s\", criteria)\n\t}\n\n\treturn &checkBundles[checkID], nil\n}\n\n\/\/ Create a new check to receive metrics\nfunc (cm *CheckManager) createNewCheck() (*api.CheckBundle, *api.Broker, error) {\n\tcheckSecret := string(cm.checkSecret)\n\tif checkSecret == \"\" {\n\t\tsecret, err := cm.makeSecret()\n\t\tif err != nil {\n\t\t\tsecret = \"myS3cr3t\"\n\t\t}\n\t\tcheckSecret = secret\n\t}\n\n\tbroker, err := cm.getBroker()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tconfig := &api.CheckBundle{\n\t\tBrokers: []string{broker.CID},\n\t\tConfig: api.CheckBundleConfig{AsyncMetrics: true, Secret: checkSecret},\n\t\tDisplayName: string(cm.checkDisplayName),\n\t\tMetrics: []api.CheckBundleMetric{},\n\t\tMetricLimit: 0,\n\t\tNotes: cm.getNotes(),\n\t\tPeriod: 60,\n\t\tStatus: statusActive,\n\t\tTags: append(cm.checkSearchTag, cm.checkTags...),\n\t\tTarget: string(cm.checkTarget),\n\t\tTimeout: 10,\n\t\tType: string(cm.checkType),\n\t}\n\n\tcheckBundle, err := cm.apih.CreateCheckBundle(config)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn checkBundle, broker, nil\n}\n\n\/\/ Create a dynamic secret to use with a new check\nfunc (cm *CheckManager) makeSecret() (string, error) {\n\thash := sha256.New()\n\tx := make([]byte, 2048)\n\tif _, err := rand.Read(x); err != nil {\n\t\treturn \"\", err\n\t}\n\thash.Write(x)\n\treturn hex.EncodeToString(hash.Sum(nil))[0:16], nil\n}\n\nfunc (cm *CheckManager) getNotes() string {\n\treturn fmt.Sprintf(\"cgm_instanceid|%s\", cm.checkInstanceID)\n}\n<commit_msg>upd: remove host from search criteria. active, type, search tag and filter on instance id<commit_after>\/\/ Copyright 2016 Circonus, Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage checkmgr\n\nimport (\n\t\"crypto\/rand\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/circonus-labs\/circonus-gometrics\/api\"\n)\n\n\/\/ UpdateCheck determines if the check needs to be updated (new metrics, tags, etc.)\nfunc (cm *CheckManager) UpdateCheck(newMetrics map[string]*api.CheckBundleMetric) {\n\t\/\/ only if check manager is enabled\n\tif !cm.enabled {\n\t\treturn\n\t}\n\n\t\/\/ only if checkBundle has been populated\n\tif cm.checkBundle == nil {\n\t\treturn\n\t}\n\n\t\/\/ only if there is *something* to update\n\tif !cm.forceCheckUpdate && len(newMetrics) == 0 && len(cm.metricTags) == 0 {\n\t\treturn\n\t}\n\n\t\/\/ refresh check bundle (in case there were changes made by other apps or in UI)\n\tcheckBundle, err := cm.apih.FetchCheckBundleByCID(api.CIDType(cm.checkBundle.CID))\n\tif err != nil {\n\t\tcm.Log.Printf(\"[ERROR] unable to fetch up-to-date check bundle %v\", err)\n\t\treturn\n\t}\n\tcm.cbmu.Lock()\n\tcm.checkBundle = checkBundle\n\tcm.cbmu.Unlock()\n\n\tcm.addNewMetrics(newMetrics)\n\n\tif len(cm.metricTags) > 0 {\n\t\t\/\/ note: if a tag has been added (queued) for a metric which never gets sent\n\t\t\/\/ the tags will be discarded. (setting tags does not *create* metrics.)\n\t\tfor metricName, metricTags := range cm.metricTags {\n\t\t\tfor metricIdx, metric := range cm.checkBundle.Metrics {\n\t\t\t\tif metric.Name == metricName {\n\t\t\t\t\tcm.checkBundle.Metrics[metricIdx].Tags = metricTags\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tcm.mtmu.Lock()\n\t\t\tdelete(cm.metricTags, metricName)\n\t\t\tcm.mtmu.Unlock()\n\t\t}\n\t\tcm.forceCheckUpdate = true\n\t}\n\n\tif cm.forceCheckUpdate {\n\t\tnewCheckBundle, err := cm.apih.UpdateCheckBundle(cm.checkBundle)\n\t\tif err != nil {\n\t\t\tcm.Log.Printf(\"[ERROR] updating check bundle %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tcm.forceCheckUpdate = false\n\t\tcm.cbmu.Lock()\n\t\tcm.checkBundle = newCheckBundle\n\t\tcm.cbmu.Unlock()\n\t\tcm.inventoryMetrics()\n\t}\n\n}\n\n\/\/ Initialize CirconusMetrics instance. Attempt to find a check otherwise create one.\n\/\/ use cases:\n\/\/\n\/\/ check [bundle] by submission url\n\/\/ check [bundle] by *check* id (note, not check_bundle id)\n\/\/ check [bundle] by search\n\/\/ create check [bundle]\nfunc (cm *CheckManager) initializeTrapURL() error {\n\tif cm.trapURL != \"\" {\n\t\treturn nil\n\t}\n\n\tcm.trapmu.Lock()\n\tdefer cm.trapmu.Unlock()\n\n\t\/\/ special case short-circuit: just send to a url, no check management\n\t\/\/ up to user to ensure that if url is https that it will work (e.g. not self-signed)\n\tif cm.checkSubmissionURL != \"\" {\n\t\tif !cm.enabled {\n\t\t\tcm.trapURL = cm.checkSubmissionURL\n\t\t\tcm.trapLastUpdate = time.Now()\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif !cm.enabled {\n\t\treturn errors.New(\"unable to initialize trap, check manager is disabled\")\n\t}\n\n\tvar err error\n\tvar check *api.Check\n\tvar checkBundle *api.CheckBundle\n\tvar broker *api.Broker\n\n\tif cm.checkSubmissionURL != \"\" {\n\t\tcheck, err = cm.apih.FetchCheckBySubmissionURL(cm.checkSubmissionURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !check.Active {\n\t\t\treturn fmt.Errorf(\"[ERROR] Check ID %v is not active\", check.CID)\n\t\t}\n\t\t\/\/ extract check id from check object returned from looking up using submission url\n\t\t\/\/ set m.CheckId to the id\n\t\t\/\/ set m.SubmissionUrl to \"\" to prevent trying to search on it going forward\n\t\t\/\/ use case: if the broker is changed in the UI metrics would stop flowing\n\t\t\/\/ unless the new submission url can be fetched with the API (which is no\n\t\t\/\/ longer possible using the original submission url)\n\t\tvar id int\n\t\tid, err = strconv.Atoi(strings.Replace(check.CID, \"\/check\/\", \"\", -1))\n\t\tif err == nil {\n\t\t\tcm.checkID = api.IDType(id)\n\t\t\tcm.checkSubmissionURL = \"\"\n\t\t} else {\n\t\t\tcm.Log.Printf(\n\t\t\t\t\"[WARN] SubmissionUrl check to Check ID: unable to convert %s to int %q\\n\",\n\t\t\t\tcheck.CID, err)\n\t\t}\n\t} else if cm.checkID > 0 {\n\t\tcheck, err = cm.apih.FetchCheckByID(cm.checkID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !check.Active {\n\t\t\treturn fmt.Errorf(\"[ERROR] Check ID %v is not active\", check.CID)\n\t\t}\n\t} else {\n\t\tsearchCriteria := fmt.Sprintf(\n\t\t\t\"(active:1)(type:\\\"%s\\\")(tags:%s)\", cm.checkType, strings.Join(cm.checkSearchTag, \",\"))\n\t\tfilterCriteria := map[string]string{\"f_notes\": cm.getNotes()}\n\t\tcheckBundle, err = cm.checkBundleSearch(searchCriteria, filterCriteria)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif checkBundle == nil {\n\t\t\t\/\/ err==nil && checkBundle==nil is \"no check bundles matched\"\n\t\t\t\/\/ an error *should* be returned for any other invalid scenario\n\t\t\tcheckBundle, broker, err = cm.createNewCheck()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif checkBundle == nil {\n\t\tif check != nil {\n\t\t\tcheckBundle, err = cm.apih.FetchCheckBundleByCID(api.CIDType(check.CheckBundleCID))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"[ERROR] Unable to retrieve, find, or create check\")\n\t\t}\n\t}\n\n\tif broker == nil {\n\t\tbroker, err = cm.apih.FetchBrokerByCID(api.CIDType(checkBundle.Brokers[0]))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ retain to facilitate metric management (adding new metrics specifically)\n\tcm.checkBundle = checkBundle\n\tcm.inventoryMetrics()\n\n\t\/\/ determine the trap url to which metrics should be PUT\n\tif checkBundle.Type == \"httptrap\" {\n\t\tcm.trapURL = api.URLType(checkBundle.Config.SubmissionURL)\n\t} else {\n\t\t\/\/ build a submission_url for non-httptrap checks out of mtev_reverse url\n\t\tif len(checkBundle.ReverseConnectURLs) == 0 {\n\t\t\treturn fmt.Errorf(\"%s is not an HTTPTRAP check and no reverse connection urls found\", checkBundle.Checks[0])\n\t\t}\n\t\tmtevURL := checkBundle.ReverseConnectURLs[0]\n\t\tmtevURL = strings.Replace(mtevURL, \"mtev_reverse\", \"https\", 1)\n\t\tmtevURL = strings.Replace(mtevURL, \"check\", \"module\/httptrap\", 1)\n\t\tcm.trapURL = api.URLType(fmt.Sprintf(\"%s\/%s\", mtevURL, checkBundle.Config.ReverseSecret))\n\t}\n\n\t\/\/ used when sending as \"ServerName\" get around certs not having IP SANS\n\t\/\/ (cert created with server name as CN but IP used in trap url)\n\tcn, err := cm.getBrokerCN(broker, cm.trapURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcm.trapCN = BrokerCNType(cn)\n\n\tcm.trapLastUpdate = time.Now()\n\n\treturn nil\n}\n\n\/\/ Search for a check bundle given a predetermined set of criteria\nfunc (cm *CheckManager) checkBundleSearch(criteria string, filter map[string]string) (*api.CheckBundle, error) {\n\tcheckBundles, err := cm.apih.CheckBundleFilterSearch(api.SearchQueryType(criteria), filter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(checkBundles) == 0 {\n\t\treturn nil, nil \/\/ trigger creation of a new check\n\t}\n\n\tnumActive := 0\n\tcheckID := -1\n\n\tfor idx, check := range checkBundles {\n\t\tif check.Status == statusActive {\n\t\t\tnumActive++\n\t\t\tcheckID = idx\n\t\t}\n\t}\n\n\tif numActive > 1 {\n\t\treturn nil, fmt.Errorf(\"[ERROR] multiple check bundles match criteria %s\", criteria)\n\t}\n\n\treturn &checkBundles[checkID], nil\n}\n\n\/\/ Create a new check to receive metrics\nfunc (cm *CheckManager) createNewCheck() (*api.CheckBundle, *api.Broker, error) {\n\tcheckSecret := string(cm.checkSecret)\n\tif checkSecret == \"\" {\n\t\tsecret, err := cm.makeSecret()\n\t\tif err != nil {\n\t\t\tsecret = \"myS3cr3t\"\n\t\t}\n\t\tcheckSecret = secret\n\t}\n\n\tbroker, err := cm.getBroker()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tconfig := &api.CheckBundle{\n\t\tBrokers: []string{broker.CID},\n\t\tConfig: api.CheckBundleConfig{AsyncMetrics: true, Secret: checkSecret},\n\t\tDisplayName: string(cm.checkDisplayName),\n\t\tMetrics: []api.CheckBundleMetric{},\n\t\tMetricLimit: 0,\n\t\tNotes: cm.getNotes(),\n\t\tPeriod: 60,\n\t\tStatus: statusActive,\n\t\tTags: append(cm.checkSearchTag, cm.checkTags...),\n\t\tTarget: string(cm.checkTarget),\n\t\tTimeout: 10,\n\t\tType: string(cm.checkType),\n\t}\n\n\tcheckBundle, err := cm.apih.CreateCheckBundle(config)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn checkBundle, broker, nil\n}\n\n\/\/ Create a dynamic secret to use with a new check\nfunc (cm *CheckManager) makeSecret() (string, error) {\n\thash := sha256.New()\n\tx := make([]byte, 2048)\n\tif _, err := rand.Read(x); err != nil {\n\t\treturn \"\", err\n\t}\n\thash.Write(x)\n\treturn hex.EncodeToString(hash.Sum(nil))[0:16], nil\n}\n\nfunc (cm *CheckManager) getNotes() string {\n\treturn fmt.Sprintf(\"cgm_instanceid|%s\", cm.checkInstanceID)\n}\n<|endoftext|>"} {"text":"<commit_before>package bitly\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/url\"\n)\n\n\/\/ Links handles communication with the link related Bitly API endpoints.\ntype Links struct {\n\t*Client\n}\n\n\/\/ Link represents the data returned from link endpoints.\ntype Link struct {\n\tShortURL string `json:\"short_url\"`\n\tLongURL string `json:\"long_url\"`\n\tGlobalHash string `json:\"global_hash\"`\n\tUserHash string `json:\"user_hash\"`\n\tHash string `json:\"hash\"`\n\tNewHash int `json:\"new_hash\"`\n\tTitle string `json:\"title\"`\n\tURL string `json:\"url\"`\n\tAggregateLink string `json:\"aggregate_link\"`\n\tCreatedAt int `json:\"created_at\"`\n\tCreatedBy string `json:\"created_by\"`\n}\n\n\/\/ req wraps Client#get and unpacks the response specifically for Links methods.\nfunc (client *Links) req(path string, params url.Values, key string) (links []Link, err error) {\n\treq, err := client.get(path, params)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tres := map[string][]Link{}\n\terr = json.Unmarshal(req.Data, &res)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn res[key], err\n}\n\n\/\/ Expand returns the long urls for a given set short urls.\n\/\/\n\/\/ Bitly API docs: http:\/\/dev.bitly.com\/links.html#v3_expand\nfunc (client *Links) Expand(urls ...string) (links []Link, err error) {\n\treturn client.req(\"\/expand\", url.Values{\"shortUrl\": urls}, \"expand\")\n}\n\n\/\/ Info returns the page title and other metadata for a given set of short urls.\n\/\/\n\/\/ Bitly API docs: http:\/\/dev.bitly.com\/links.html#v3_info\nfunc (client *Links) Info(urls ...string) (links []Link, err error) {\n\treturn client.req(\"\/info\", url.Values{\"shortUrl\": urls}, \"info\")\n}\n\n\/\/ Lookup queries for bitlink(s) mapping to the given url(s).\n\/\/\n\/\/ Bitly API docs: https:\/\/dev.bitly.com\/links.html#v3_link_lookup\nfunc (client *Links) Lookup(urls ...string) (links []Link, err error) {\n\treturn client.req(\"\/link\/lookup\", url.Values{\"url\": urls}, \"link_lookup\")\n}\n\n\/\/ Shorten returns a short url from a given long url.\n\/\/\n\/\/ Bitly API docs: http:\/\/dev.bitly.com\/links.html#v3_shorten\nfunc (client *Links) Shorten(longURL string) (link Link, err error) {\n\treq, err := client.get(\"\/shorten\", url.Values{\n\t\t\"longUrl\": []string{longURL},\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(req.Data, &link)\n\n\treturn\n}\n<commit_msg>[skip ci] Documnet return values of Links methods.<commit_after>package bitly\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/url\"\n)\n\n\/\/ Links handles communication with the link related Bitly API endpoints.\ntype Links struct {\n\t*Client\n}\n\n\/\/ Link represents the data returned from link endpoints.\ntype Link struct {\n\tShortURL string `json:\"short_url\"`\n\tLongURL string `json:\"long_url\"`\n\tGlobalHash string `json:\"global_hash\"`\n\tUserHash string `json:\"user_hash\"`\n\tHash string `json:\"hash\"`\n\tNewHash int `json:\"new_hash\"`\n\tTitle string `json:\"title\"`\n\tURL string `json:\"url\"`\n\tAggregateLink string `json:\"aggregate_link\"`\n\tCreatedAt int `json:\"created_at\"`\n\tCreatedBy string `json:\"created_by\"`\n}\n\n\/\/ req wraps Client#get and unpacks the response specifically for Links methods.\nfunc (client *Links) req(path string, params url.Values, key string) (links []Link, err error) {\n\treq, err := client.get(path, params)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tres := map[string][]Link{}\n\terr = json.Unmarshal(req.Data, &res)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn res[key], err\n}\n\n\/\/ Expand returns the long urls for a given set short urls.\n\/\/ Returns slice of:\n\/\/\n\/\/\tbitly.Link{\n\/\/\t\tGlobalHash: \"1RmnUT\",\n\/\/\t\tLongURL: \"http:\/\/google.com\",\n\/\/\t\tShortURL: \"http:\/\/bit.ly\/1RmnUT\",\n\/\/\t\tUserHash: \"1RmnUT\",\n\/\/\t}\n\/\/\n\/\/ Bitly API docs: http:\/\/dev.bitly.com\/links.html#v3_expand\nfunc (client *Links) Expand(urls ...string) (links []Link, err error) {\n\treturn client.req(\"\/expand\", url.Values{\"shortUrl\": urls}, \"expand\")\n}\n\n\/\/ Info returns the page title and other metadata for a given set of short urls.\n\/\/ Returns slice of:\n\/\/\n\/\/\tbitly.Link{\n\/\/\t\tGlobalHash: \"1RmnUT\",\n\/\/\t\tShortURL: \"http:\/\/bit.ly\/1RmnUT\",\n\/\/\t\tTitle: \"Google\",\n\/\/\t\tUserHash: \"1RmnUT\",\n\/\/\t\tCreatedAt: 1212926400,\n\/\/\t}\n\/\/\n\/\/ Bitly API docs: http:\/\/dev.bitly.com\/links.html#v3_info\nfunc (client *Links) Info(urls ...string) (links []Link, err error) {\n\treturn client.req(\"\/info\", url.Values{\"shortUrl\": urls}, \"info\")\n}\n\n\/\/ Lookup queries for bitlink(s) mapping to the given url(s).\n\/\/ Returns slice of:\n\/\/\n\/\/\tbitly.Link{\n\/\/\t\tURL: \"http:\/\/www.google.com\/\",\n\/\/\t\tAggregateLink: \"http:\/\/www.google.com\/\",\n\/\/\t}\n\/\/\n\/\/ Bitly API docs: https:\/\/dev.bitly.com\/links.html#v3_link_lookup\nfunc (client *Links) Lookup(urls ...string) (links []Link, err error) {\n\treturn client.req(\"\/link\/lookup\", url.Values{\"url\": urls}, \"link_lookup\")\n}\n\n\/\/ Shorten returns a short url from a given long url.\n\/\/ Returns:\n\/\/\n\/\/\tbitly.Link{\n\/\/\t\tLongURL: \"http:\/\/google.com\/\",\n\/\/\t\tGlobalHash: \"900913\",\n\/\/\t\tHash: \"ze6poY\",\n\/\/\t\tNewHash: 0,\n\/\/\t\tURL: \"http:\/\/bit.ly\/ze6poY\",\n\/\/\t}\n\/\/\n\/\/ Bitly API docs: http:\/\/dev.bitly.com\/links.html#v3_shorten\nfunc (client *Links) Shorten(longURL string) (link Link, err error) {\n\treq, err := client.get(\"\/shorten\", url.Values{\n\t\t\"longUrl\": []string{longURL},\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(req.Data, &link)\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/rand\"\n\t\"database\/sql\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"golang.org\/x\/crypto\/scrypt\"\n\tlog \"gopkg.in\/inconshreveable\/log15.v2\"\n\n\t\"github.com\/lxc\/lxd\/shared\"\n)\n\nvar daemonConfigLock sync.Mutex\nvar daemonConfig map[string]*daemonConfigKey\n\ntype daemonConfigKey struct {\n\tvalueType string\n\tdefaultValue string\n\tvalidValues []string\n\tcurrentValue string\n\thiddenValue bool\n\n\tvalidator func(d *Daemon, key string, value string) error\n\tsetter func(d *Daemon, key string, value string) (string, error)\n\ttrigger func(d *Daemon, key string, value string)\n}\n\nfunc (k *daemonConfigKey) name() string {\n\tname := \"\"\n\n\t\/\/ Look for a matching entry in daemonConfig\n\tdaemonConfigLock.Lock()\n\tfor key, value := range daemonConfig {\n\t\tif value == k {\n\t\t\tname = key\n\t\t\tbreak\n\t\t}\n\t}\n\tdaemonConfigLock.Unlock()\n\n\treturn name\n}\n\nfunc (k *daemonConfigKey) Validate(d *Daemon, value string) error {\n\t\/\/ No need to validate when unsetting\n\tif value == \"\" {\n\t\treturn nil\n\t}\n\n\t\/\/ Validate booleans\n\tif k.valueType == \"bool\" && !shared.StringInSlice(strings.ToLower(value), []string{\"true\", \"false\", \"1\", \"0\", \"yes\", \"no\"}) {\n\t\treturn fmt.Errorf(\"Invalid value for a boolean: %s\", value)\n\t}\n\n\t\/\/ Validate integers\n\tif k.valueType == \"int\" {\n\t\t_, err := strconv.ParseInt(value, 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Check against valid values\n\tif k.validValues != nil && !shared.StringInSlice(value, k.validValues) {\n\t\treturn fmt.Errorf(\"Invalid value, only the following values are allowed: %s\", k.validValues)\n\t}\n\n\t\/\/ Run external validation function\n\tif k.validator != nil {\n\t\terr := k.validator(d, k.name(), value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (k *daemonConfigKey) Set(d *Daemon, value string) error {\n\tvar name string\n\n\t\/\/ Check if we are actually changing things\n\toldValue := k.currentValue\n\tif oldValue == value {\n\t\treturn nil\n\t}\n\n\t\/\/ Validate the new value\n\terr := k.Validate(d, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Run external setting function\n\tif k.setter != nil {\n\t\tvalue, err = k.setter(d, k.name(), value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Get the configuration key and make sure daemonConfig is sane\n\tname = k.name()\n\tif name == \"\" {\n\t\treturn fmt.Errorf(\"Corrupted configuration cache\")\n\t}\n\n\t\/\/ Actually apply the change\n\tdaemonConfigLock.Lock()\n\tk.currentValue = value\n\tdaemonConfigLock.Unlock()\n\n\terr = dbConfigValueSet(d.db, name, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (k *daemonConfigKey) Get() string {\n\tvalue := k.currentValue\n\n\t\/\/ Get the default value if not set\n\tif value == \"\" {\n\t\tvalue = k.defaultValue\n\t}\n\n\treturn value\n}\n\nfunc (k *daemonConfigKey) GetBool() bool {\n\tvalue := k.currentValue\n\n\t\/\/ Get the default value if not set\n\tif value == \"\" {\n\t\tvalue = k.defaultValue\n\t}\n\n\t\/\/ Convert to boolean\n\tif shared.StringInSlice(strings.ToLower(value), []string{\"true\", \"1\", \"yes\"}) {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (k *daemonConfigKey) GetInt64() int64 {\n\tvalue := k.currentValue\n\n\t\/\/ Get the default value if not set\n\tif value == \"\" {\n\t\tvalue = k.defaultValue\n\t}\n\n\t\/\/ Convert to int64\n\tret, _ := strconv.ParseInt(value, 10, 64)\n\treturn ret\n}\n\nfunc daemonConfigInit(db *sql.DB) error {\n\t\/\/ Set all the keys\n\tdaemonConfig = map[string]*daemonConfigKey{\n\t\t\"core.https_address\": &daemonConfigKey{valueType: \"string\", setter: daemonConfigSetAddress},\n\t\t\"core.https_allowed_headers\": &daemonConfigKey{valueType: \"string\"},\n\t\t\"core.https_allowed_methods\": &daemonConfigKey{valueType: \"string\"},\n\t\t\"core.https_allowed_origin\": &daemonConfigKey{valueType: \"string\"},\n\t\t\"core.proxy_http\": &daemonConfigKey{valueType: \"string\", setter: daemonConfigSetProxy},\n\t\t\"core.proxy_https\": &daemonConfigKey{valueType: \"string\", setter: daemonConfigSetProxy},\n\t\t\"core.proxy_ignore_hosts\": &daemonConfigKey{valueType: \"string\", setter: daemonConfigSetProxy},\n\t\t\"core.trust_password\": &daemonConfigKey{valueType: \"string\", hiddenValue: true, setter: daemonConfigSetPassword},\n\n\t\t\"images.auto_update_cached\": &daemonConfigKey{valueType: \"bool\", defaultValue: \"true\"},\n\t\t\"images.auto_update_interval\": &daemonConfigKey{valueType: \"int\", defaultValue: \"6\"},\n\t\t\"images.compression_algorithm\": &daemonConfigKey{valueType: \"string\", validator: daemonConfigValidateCommand, defaultValue: \"gzip\"},\n\t\t\"images.remote_cache_expiry\": &daemonConfigKey{valueType: \"int\", defaultValue: \"10\", trigger: daemonConfigTriggerExpiry},\n\n\t\t\"storage.lvm_fstype\": &daemonConfigKey{valueType: \"string\", defaultValue: \"ext4\", validValues: []string{\"ext4\", \"xfs\"}},\n\t\t\"storage.lvm_thinpool_name\": &daemonConfigKey{valueType: \"string\", defaultValue: \"LXDPool\", validator: storageLVMValidateThinPoolName},\n\t\t\"storage.lvm_vg_name\": &daemonConfigKey{valueType: \"string\", validator: storageLVMValidateVolumeGroupName, setter: daemonConfigSetStorage},\n\t\t\"storage.lvm_volume_size\": &daemonConfigKey{valueType: \"string\", defaultValue: \"10GiB\"},\n\t\t\"storage.zfs_pool_name\": &daemonConfigKey{valueType: \"string\", validator: storageZFSValidatePoolName, setter: daemonConfigSetStorage},\n\t\t\"storage.zfs_remove_snapshots\": &daemonConfigKey{valueType: \"bool\"},\n\t}\n\n\t\/\/ Load the values from the DB\n\tdbValues, err := dbConfigValuesGet(db)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdaemonConfigLock.Lock()\n\tfor k, v := range dbValues {\n\t\t_, ok := daemonConfig[k]\n\t\tif !ok {\n\t\t\tshared.Log.Error(\"Found invalid configuration key in database\", log.Ctx{\"key\": k})\n\t\t}\n\n\t\tdaemonConfig[k].currentValue = v\n\t}\n\tdaemonConfigLock.Unlock()\n\n\treturn nil\n}\n\nfunc daemonConfigRender() map[string]interface{} {\n\tconfig := map[string]interface{}{}\n\n\t\/\/ Turn the config into a JSON-compatible map\n\tfor k, v := range daemonConfig {\n\t\tvalue := v.Get()\n\t\tif value != v.defaultValue {\n\t\t\tif v.hiddenValue {\n\t\t\t\tconfig[k] = true\n\t\t\t} else {\n\t\t\t\tconfig[k] = value\n\t\t\t}\n\t\t}\n\t}\n\n\treturn config\n}\n\nfunc daemonConfigSetPassword(d *Daemon, key string, value string) (string, error) {\n\t\/\/ Nothing to do on unset\n\tif value == \"\" {\n\t\treturn value, nil\n\t}\n\n\t\/\/ Hash the password\n\tbuf := make([]byte, 32)\n\t_, err := io.ReadFull(rand.Reader, buf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\thash, err := scrypt.Key([]byte(value), buf, 1<<14, 8, 1, 64)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbuf = append(buf, hash...)\n\tvalue = hex.EncodeToString(buf)\n\n\treturn value, nil\n}\n\nfunc daemonConfigSetStorage(d *Daemon, key string, value string) (string, error) {\n\t\/\/ The storage driver looks at daemonConfig so just set it temporarily\n\tdaemonConfigLock.Lock()\n\toldValue := daemonConfig[key].Get()\n\tdaemonConfig[key].currentValue = value\n\tdaemonConfigLock.Unlock()\n\n\tdefer func() {\n\t\tdaemonConfigLock.Lock()\n\t\tdaemonConfig[key].currentValue = oldValue\n\t\tdaemonConfigLock.Unlock()\n\t}()\n\n\t\/\/ Update the current storage driver\n\terr := d.SetupStorageDriver()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn value, nil\n}\n\nfunc daemonConfigSetAddress(d *Daemon, key string, value string) (string, error) {\n\t\/\/ Update the current https address\n\terr := d.UpdateHTTPsPort(value)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn value, nil\n}\n\nfunc daemonConfigSetProxy(d *Daemon, key string, value string) (string, error) {\n\t\/\/ Get the current config\n\tconfig := map[string]string{}\n\tconfig[\"core.proxy_https\"] = daemonConfig[\"core.proxy_https\"].Get()\n\tconfig[\"core.proxy_http\"] = daemonConfig[\"core.proxy_http\"].Get()\n\tconfig[\"core.proxy_ignore_hosts\"] = daemonConfig[\"core.proxy_ignore_hosts\"].Get()\n\n\t\/\/ Apply the change\n\tconfig[key] = value\n\n\t\/\/ Update the cached proxy function\n\td.proxy = shared.ProxyFromConfig(\n\t\tconfig[\"core.proxy_https\"],\n\t\tconfig[\"core.proxy_http\"],\n\t\tconfig[\"core.proxy_ignore_hosts\"],\n\t)\n\n\t\/\/ Clear the simplestreams cache as it's tied to the old proxy config\n\timageStreamCacheLock.Lock()\n\tfor k, _ := range imageStreamCache {\n\t\tdelete(imageStreamCache, k)\n\t}\n\timageStreamCacheLock.Unlock()\n\n\treturn value, nil\n}\n\nfunc daemonConfigTriggerExpiry(d *Daemon, key string, value string) {\n\t\/\/ Trigger an image pruning run\n\td.pruneChan <- true\n}\n\nfunc daemonConfigValidateCommand(d *Daemon, key string, value string) error {\n\t_, err := exec.LookPath(value)\n\treturn err\n}\n<commit_msg>Allow on\/off as boolean strings<commit_after>package main\n\nimport (\n\t\"crypto\/rand\"\n\t\"database\/sql\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"golang.org\/x\/crypto\/scrypt\"\n\tlog \"gopkg.in\/inconshreveable\/log15.v2\"\n\n\t\"github.com\/lxc\/lxd\/shared\"\n)\n\nvar daemonConfigLock sync.Mutex\nvar daemonConfig map[string]*daemonConfigKey\n\ntype daemonConfigKey struct {\n\tvalueType string\n\tdefaultValue string\n\tvalidValues []string\n\tcurrentValue string\n\thiddenValue bool\n\n\tvalidator func(d *Daemon, key string, value string) error\n\tsetter func(d *Daemon, key string, value string) (string, error)\n\ttrigger func(d *Daemon, key string, value string)\n}\n\nfunc (k *daemonConfigKey) name() string {\n\tname := \"\"\n\n\t\/\/ Look for a matching entry in daemonConfig\n\tdaemonConfigLock.Lock()\n\tfor key, value := range daemonConfig {\n\t\tif value == k {\n\t\t\tname = key\n\t\t\tbreak\n\t\t}\n\t}\n\tdaemonConfigLock.Unlock()\n\n\treturn name\n}\n\nfunc (k *daemonConfigKey) Validate(d *Daemon, value string) error {\n\t\/\/ No need to validate when unsetting\n\tif value == \"\" {\n\t\treturn nil\n\t}\n\n\t\/\/ Validate booleans\n\tif k.valueType == \"bool\" && !shared.StringInSlice(strings.ToLower(value), []string{\"true\", \"false\", \"1\", \"0\", \"yes\", \"no\", \"on\", \"off\"}) {\n\t\treturn fmt.Errorf(\"Invalid value for a boolean: %s\", value)\n\t}\n\n\t\/\/ Validate integers\n\tif k.valueType == \"int\" {\n\t\t_, err := strconv.ParseInt(value, 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Check against valid values\n\tif k.validValues != nil && !shared.StringInSlice(value, k.validValues) {\n\t\treturn fmt.Errorf(\"Invalid value, only the following values are allowed: %s\", k.validValues)\n\t}\n\n\t\/\/ Run external validation function\n\tif k.validator != nil {\n\t\terr := k.validator(d, k.name(), value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (k *daemonConfigKey) Set(d *Daemon, value string) error {\n\tvar name string\n\n\t\/\/ Check if we are actually changing things\n\toldValue := k.currentValue\n\tif oldValue == value {\n\t\treturn nil\n\t}\n\n\t\/\/ Validate the new value\n\terr := k.Validate(d, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Run external setting function\n\tif k.setter != nil {\n\t\tvalue, err = k.setter(d, k.name(), value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Get the configuration key and make sure daemonConfig is sane\n\tname = k.name()\n\tif name == \"\" {\n\t\treturn fmt.Errorf(\"Corrupted configuration cache\")\n\t}\n\n\t\/\/ Actually apply the change\n\tdaemonConfigLock.Lock()\n\tk.currentValue = value\n\tdaemonConfigLock.Unlock()\n\n\terr = dbConfigValueSet(d.db, name, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (k *daemonConfigKey) Get() string {\n\tvalue := k.currentValue\n\n\t\/\/ Get the default value if not set\n\tif value == \"\" {\n\t\tvalue = k.defaultValue\n\t}\n\n\treturn value\n}\n\nfunc (k *daemonConfigKey) GetBool() bool {\n\tvalue := k.currentValue\n\n\t\/\/ Get the default value if not set\n\tif value == \"\" {\n\t\tvalue = k.defaultValue\n\t}\n\n\t\/\/ Convert to boolean\n\tif shared.StringInSlice(strings.ToLower(value), []string{\"true\", \"1\", \"yes\", \"on\"}) {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (k *daemonConfigKey) GetInt64() int64 {\n\tvalue := k.currentValue\n\n\t\/\/ Get the default value if not set\n\tif value == \"\" {\n\t\tvalue = k.defaultValue\n\t}\n\n\t\/\/ Convert to int64\n\tret, _ := strconv.ParseInt(value, 10, 64)\n\treturn ret\n}\n\nfunc daemonConfigInit(db *sql.DB) error {\n\t\/\/ Set all the keys\n\tdaemonConfig = map[string]*daemonConfigKey{\n\t\t\"core.https_address\": &daemonConfigKey{valueType: \"string\", setter: daemonConfigSetAddress},\n\t\t\"core.https_allowed_headers\": &daemonConfigKey{valueType: \"string\"},\n\t\t\"core.https_allowed_methods\": &daemonConfigKey{valueType: \"string\"},\n\t\t\"core.https_allowed_origin\": &daemonConfigKey{valueType: \"string\"},\n\t\t\"core.proxy_http\": &daemonConfigKey{valueType: \"string\", setter: daemonConfigSetProxy},\n\t\t\"core.proxy_https\": &daemonConfigKey{valueType: \"string\", setter: daemonConfigSetProxy},\n\t\t\"core.proxy_ignore_hosts\": &daemonConfigKey{valueType: \"string\", setter: daemonConfigSetProxy},\n\t\t\"core.trust_password\": &daemonConfigKey{valueType: \"string\", hiddenValue: true, setter: daemonConfigSetPassword},\n\n\t\t\"images.auto_update_cached\": &daemonConfigKey{valueType: \"bool\", defaultValue: \"true\"},\n\t\t\"images.auto_update_interval\": &daemonConfigKey{valueType: \"int\", defaultValue: \"6\"},\n\t\t\"images.compression_algorithm\": &daemonConfigKey{valueType: \"string\", validator: daemonConfigValidateCommand, defaultValue: \"gzip\"},\n\t\t\"images.remote_cache_expiry\": &daemonConfigKey{valueType: \"int\", defaultValue: \"10\", trigger: daemonConfigTriggerExpiry},\n\n\t\t\"storage.lvm_fstype\": &daemonConfigKey{valueType: \"string\", defaultValue: \"ext4\", validValues: []string{\"ext4\", \"xfs\"}},\n\t\t\"storage.lvm_thinpool_name\": &daemonConfigKey{valueType: \"string\", defaultValue: \"LXDPool\", validator: storageLVMValidateThinPoolName},\n\t\t\"storage.lvm_vg_name\": &daemonConfigKey{valueType: \"string\", validator: storageLVMValidateVolumeGroupName, setter: daemonConfigSetStorage},\n\t\t\"storage.lvm_volume_size\": &daemonConfigKey{valueType: \"string\", defaultValue: \"10GiB\"},\n\t\t\"storage.zfs_pool_name\": &daemonConfigKey{valueType: \"string\", validator: storageZFSValidatePoolName, setter: daemonConfigSetStorage},\n\t\t\"storage.zfs_remove_snapshots\": &daemonConfigKey{valueType: \"bool\"},\n\t}\n\n\t\/\/ Load the values from the DB\n\tdbValues, err := dbConfigValuesGet(db)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdaemonConfigLock.Lock()\n\tfor k, v := range dbValues {\n\t\t_, ok := daemonConfig[k]\n\t\tif !ok {\n\t\t\tshared.Log.Error(\"Found invalid configuration key in database\", log.Ctx{\"key\": k})\n\t\t}\n\n\t\tdaemonConfig[k].currentValue = v\n\t}\n\tdaemonConfigLock.Unlock()\n\n\treturn nil\n}\n\nfunc daemonConfigRender() map[string]interface{} {\n\tconfig := map[string]interface{}{}\n\n\t\/\/ Turn the config into a JSON-compatible map\n\tfor k, v := range daemonConfig {\n\t\tvalue := v.Get()\n\t\tif value != v.defaultValue {\n\t\t\tif v.hiddenValue {\n\t\t\t\tconfig[k] = true\n\t\t\t} else {\n\t\t\t\tconfig[k] = value\n\t\t\t}\n\t\t}\n\t}\n\n\treturn config\n}\n\nfunc daemonConfigSetPassword(d *Daemon, key string, value string) (string, error) {\n\t\/\/ Nothing to do on unset\n\tif value == \"\" {\n\t\treturn value, nil\n\t}\n\n\t\/\/ Hash the password\n\tbuf := make([]byte, 32)\n\t_, err := io.ReadFull(rand.Reader, buf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\thash, err := scrypt.Key([]byte(value), buf, 1<<14, 8, 1, 64)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbuf = append(buf, hash...)\n\tvalue = hex.EncodeToString(buf)\n\n\treturn value, nil\n}\n\nfunc daemonConfigSetStorage(d *Daemon, key string, value string) (string, error) {\n\t\/\/ The storage driver looks at daemonConfig so just set it temporarily\n\tdaemonConfigLock.Lock()\n\toldValue := daemonConfig[key].Get()\n\tdaemonConfig[key].currentValue = value\n\tdaemonConfigLock.Unlock()\n\n\tdefer func() {\n\t\tdaemonConfigLock.Lock()\n\t\tdaemonConfig[key].currentValue = oldValue\n\t\tdaemonConfigLock.Unlock()\n\t}()\n\n\t\/\/ Update the current storage driver\n\terr := d.SetupStorageDriver()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn value, nil\n}\n\nfunc daemonConfigSetAddress(d *Daemon, key string, value string) (string, error) {\n\t\/\/ Update the current https address\n\terr := d.UpdateHTTPsPort(value)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn value, nil\n}\n\nfunc daemonConfigSetProxy(d *Daemon, key string, value string) (string, error) {\n\t\/\/ Get the current config\n\tconfig := map[string]string{}\n\tconfig[\"core.proxy_https\"] = daemonConfig[\"core.proxy_https\"].Get()\n\tconfig[\"core.proxy_http\"] = daemonConfig[\"core.proxy_http\"].Get()\n\tconfig[\"core.proxy_ignore_hosts\"] = daemonConfig[\"core.proxy_ignore_hosts\"].Get()\n\n\t\/\/ Apply the change\n\tconfig[key] = value\n\n\t\/\/ Update the cached proxy function\n\td.proxy = shared.ProxyFromConfig(\n\t\tconfig[\"core.proxy_https\"],\n\t\tconfig[\"core.proxy_http\"],\n\t\tconfig[\"core.proxy_ignore_hosts\"],\n\t)\n\n\t\/\/ Clear the simplestreams cache as it's tied to the old proxy config\n\timageStreamCacheLock.Lock()\n\tfor k, _ := range imageStreamCache {\n\t\tdelete(imageStreamCache, k)\n\t}\n\timageStreamCacheLock.Unlock()\n\n\treturn value, nil\n}\n\nfunc daemonConfigTriggerExpiry(d *Daemon, key string, value string) {\n\t\/\/ Trigger an image pruning run\n\td.pruneChan <- true\n}\n\nfunc daemonConfigValidateCommand(d *Daemon, key string, value string) error {\n\t_, err := exec.LookPath(value)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/go:build linux && cgo && !agent\n\/\/ +build linux,cgo,!agent\n\npackage state\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com\/lxc\/lxd\/lxd\/db\"\n\t\"github.com\/lxc\/lxd\/lxd\/firewall\"\n\t\"github.com\/lxc\/lxd\/lxd\/sys\"\n)\n\n\/\/ NewTestState returns a State object initialized with testable instances of\n\/\/ the node\/cluster databases and of the OS facade.\n\/\/\n\/\/ Return the newly created State object, along with a function that can be\n\/\/ used for cleaning it up.\nfunc NewTestState(t *testing.T) (*State, func()) {\n\tnode, nodeCleanup := db.NewTestNode(t)\n\tcluster, clusterCleanup := db.NewTestCluster(t)\n\tos, osCleanup := sys.NewTestOS(t)\n\n\tcleanup := func() {\n\t\tnodeCleanup()\n\t\tclusterCleanup()\n\t\tosCleanup()\n\t}\n\n\tstate := NewState(context.TODO(), node, cluster, nil, os, nil, nil, nil, firewall.New(), nil)\n\n\treturn state, cleanup\n}\n<commit_msg>lxd\/state: Update tests with NewState usage<commit_after>\/\/go:build linux && cgo && !agent\n\/\/ +build linux,cgo,!agent\n\npackage state\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com\/lxc\/lxd\/lxd\/db\"\n\t\"github.com\/lxc\/lxd\/lxd\/firewall\"\n\t\"github.com\/lxc\/lxd\/lxd\/sys\"\n)\n\n\/\/ NewTestState returns a State object initialized with testable instances of\n\/\/ the node\/cluster databases and of the OS facade.\n\/\/\n\/\/ Return the newly created State object, along with a function that can be\n\/\/ used for cleaning it up.\nfunc NewTestState(t *testing.T) (*State, func()) {\n\tnode, nodeCleanup := db.NewTestNode(t)\n\tcluster, clusterCleanup := db.NewTestCluster(t)\n\tos, osCleanup := sys.NewTestOS(t)\n\n\tcleanup := func() {\n\t\tnodeCleanup()\n\t\tclusterCleanup()\n\t\tosCleanup()\n\t}\n\n\tstate := NewState(context.TODO(), node, cluster, nil, os, nil, nil, nil, firewall.New(), nil, nil, func() {})\n\n\treturn state, cleanup\n}\n<|endoftext|>"} {"text":"<commit_before>package locus\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/dpup\/locus\/tmpl\"\n)\n\n\/\/ OverrideQueryParam param that when specified in the URL overrides the request\n\/\/ in the URL.\n\/\/ e.g. http:\/\/localhost:5555\/?locus_override=http:\/\/sample.locus.xyz\nconst OverrideQueryParam = \"locus_override\"\n\n\/\/ Locus wraps a fork of golang's httputil.ReverseProxy to provide multi-host\n\/\/ routing.\ntype Locus struct {\n\n\t\/\/ VerboseLogging specifies that additional request details should be logged.\n\tVerboseLogging bool\n\n\t\/\/ AccessLog specifies an optional logger for request details. If nil,\n\t\/\/ logging goes to os.Stderr via the log package's standard logger.\n\tAccessLog *log.Logger\n\n\t\/\/ ErrorLog specifies an optional logger for exceptional occurances. If nil,\n\t\/\/ logging goes to os.Stderr via the log package's standard logger.\n\tErrorLog *log.Logger\n\n\t\/\/ Port specifies the port for incoming connections.\n\tPort uint16\n\n\t\/\/ ReadTimeout is the maximum duration before timing out read of the request.\n\tReadTimeout time.Duration\n\n\t\/\/ WriteTimeout is the maximum duration before timing out write of the\n\t\/\/ response.\n\tWriteTimeout time.Duration\n\n\t\/\/ Configs is a list of sites that locus will forward for.\n\tConfigs []*Config\n\n\tproxy *reverseProxy\n}\n\n\/\/ New returns an instance of a Locus server with the following defaults set:\n\/\/ Port = 5555\n\/\/ ReadTimeout = 30s\n\/\/ WriteTimeout = 30s\nfunc New() *Locus {\n\treturn &Locus{\n\t\tproxy: &reverseProxy{},\n\t\tConfigs: []*Config{},\n\t\tPort: 5555,\n\t\tReadTimeout: time.Second * 30,\n\t\tWriteTimeout: time.Second * 30,\n\t}\n}\n\n\/\/ FromConfig creates a new locus server from YAML config.\n\/\/ See SampleYAMLConfig.\nfunc FromConfig(data []byte) (*Locus, error) {\n\tcfgs, globals, err := loadConfigFromYAML(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocus := New()\n\n\tif globals.Port != 0 {\n\t\tlocus.Port = globals.Port\n\t}\n\tif globals.ReadTimeout != 0 {\n\t\tlocus.ReadTimeout = globals.ReadTimeout\n\t}\n\tif globals.WriteTimeout != 0 {\n\t\tlocus.WriteTimeout = globals.WriteTimeout\n\t}\n\n\tlocus.VerboseLogging = globals.VerboseLogging\n\n\tif globals.AccessLog != \"\" {\n\t\tlocus.AccessLog, err = newLogger(globals.AccessLog)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif globals.ErrorLog != \"\" {\n\t\tlocus.ErrorLog, err = newLogger(globals.ErrorLog)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tfor _, cfg := range cfgs {\n\t\tlocus.AddConfig(cfg)\n\t}\n\n\treturn locus, nil\n}\n\n\/\/ FromConfigFile creates a new locus server from a YAML config file.\nfunc FromConfigFile(filename string) (*Locus, error) {\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn FromConfig(data)\n}\n\n\/\/ NewConfig creates an empty config, registers it, then returns it.\nfunc (locus *Locus) NewConfig() *Config {\n\tcfg := &Config{Name: fmt.Sprintf(\"cfg%d\", len(locus.Configs))}\n\tlocus.AddConfig(cfg)\n\treturn cfg\n}\n\n\/\/ AddConfig adds config to the reverse proxy. Configs will be checked in the\n\/\/ order they were added, the first matching config being used to route the\n\/\/ request.\nfunc (locus *Locus) AddConfig(cfg *Config) {\n\tlocus.Configs = append(locus.Configs, cfg)\n}\n\n\/\/ ListenAndServe listens on locus.Port for incoming connections.\nfunc (locus *Locus) ListenAndServe() error {\n\ts := http.Server{\n\t\tAddr: fmt.Sprintf(\":%d\", locus.Port),\n\t\tHandler: locus,\n\t\tReadTimeout: locus.ReadTimeout,\n\t\tWriteTimeout: locus.WriteTimeout,\n\t\tMaxHeaderBytes: 1 << 20,\n\t}\n\tlocus.elogf(\"Starting Locus on port %d\", locus.Port)\n\treturn s.ListenAndServe()\n}\n\nfunc (locus *Locus) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\toverrideParam := req.URL.Query().Get(OverrideQueryParam)\n\tif overrideParam != \"\" {\n\t\toverrideURL, err := url.Parse(overrideParam)\n\t\tif err != nil {\n\t\t\tlocus.elogf(\"error parsing override URL, ignoring: \", err)\n\t\t} else {\n\t\t\treq.URL = overrideURL\n\t\t}\n\t}\n\n\tc := locus.findConfig(req)\n\tif c != nil {\n\t\t\/\/ Found matching config so copy req, transform it, and forward it.\n\t\tproxyreq := copyRequest(req)\n\n\t\tif err := c.Transform(proxyreq); err != nil { \/\/ TODO: Render local error page.\n\t\t\tlocus.elogf(\"error transforming request: %v\", err)\n\t\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\t\tlocus.logDefaultReq(http.StatusInternalServerError, req)\n\t\t\treturn\n\t\t}\n\n\t\tstatus := http.StatusOK \/\/ TODO: extract status code from rw.\n\n\t\tif err := locus.proxy.Proxy(rw, proxyreq); err != nil { \/\/ TODO: Render local error page.\n\t\t\tlocus.elogf(\"error proxying request: %v\", err)\n\t\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\t\tstatus = http.StatusInternalServerError\n\t\t}\n\n\t\tvar d []byte\n\t\tif locus.VerboseLogging {\n\t\t\td, _ = httputil.DumpRequestOut(proxyreq, false)\n\t\t}\n\t\tlocus.alogf(\"locus[%s] %d %s %s => %s (%s \\\"%s\\\") %s\",\n\t\t\tc.Name, status, req.Method, req.URL, proxyreq.URL, req.RemoteAddr,\n\t\t\treq.Header.Get(\"User-Agent\"), string(d))\n\n\t} else if req.URL.Path == \"\/debug\/configs\" {\n\t\ttmpl.DebugTemplate.ExecuteTemplate(rw, \"configs\", locus)\n\t\tlocus.logDefaultReq(http.StatusOK, req)\n\t} else {\n\t\trw.WriteHeader(http.StatusNotImplemented)\n\t\tlocus.logDefaultReq(http.StatusNotImplemented, req)\n\t}\n}\n\nfunc (locus *Locus) logDefaultReq(status int, req *http.Request) {\n\tlocus.alogf(\"locus[-] %d %s %s (%s \\\"%s\\\")\",\n\t\tstatus, req.Method, req.URL, req.RemoteAddr, req.Header.Get(\"User-Agent\"))\n}\n\nfunc (locus *Locus) findConfig(req *http.Request) *Config {\n\tfor _, c := range locus.Configs {\n\t\tif c.Matches(req) {\n\t\t\treturn c\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (locus *Locus) alogf(format string, args ...interface{}) {\n\tif locus.AccessLog != nil {\n\t\tlocus.AccessLog.Printf(format, args...)\n\t} else {\n\t\tlog.Printf(format, args...)\n\t}\n}\n\nfunc (locus *Locus) elogf(format string, args ...interface{}) {\n\tif locus.ErrorLog != nil {\n\t\tlocus.ErrorLog.Printf(format, args...)\n\t} else {\n\t\tlog.Printf(format, args...)\n\t}\n}\n<commit_msg>Add package docs<commit_after>\/\/ Package locus provides a multi-host reverse proxy.\npackage locus\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/dpup\/locus\/tmpl\"\n)\n\n\/\/ OverrideQueryParam param that when specified in the URL overrides the request\n\/\/ in the URL.\n\/\/ e.g. http:\/\/localhost:5555\/?locus_override=http:\/\/sample.locus.xyz\nconst OverrideQueryParam = \"locus_override\"\n\n\/\/ Locus wraps a fork of golang's httputil.ReverseProxy to provide multi-host\n\/\/ routing.\ntype Locus struct {\n\n\t\/\/ VerboseLogging specifies that additional request details should be logged.\n\tVerboseLogging bool\n\n\t\/\/ AccessLog specifies an optional logger for request details. If nil,\n\t\/\/ logging goes to os.Stderr via the log package's standard logger.\n\tAccessLog *log.Logger\n\n\t\/\/ ErrorLog specifies an optional logger for exceptional occurances. If nil,\n\t\/\/ logging goes to os.Stderr via the log package's standard logger.\n\tErrorLog *log.Logger\n\n\t\/\/ Port specifies the port for incoming connections.\n\tPort uint16\n\n\t\/\/ ReadTimeout is the maximum duration before timing out read of the request.\n\tReadTimeout time.Duration\n\n\t\/\/ WriteTimeout is the maximum duration before timing out write of the\n\t\/\/ response.\n\tWriteTimeout time.Duration\n\n\t\/\/ Configs is a list of sites that locus will forward for.\n\tConfigs []*Config\n\n\tproxy *reverseProxy\n}\n\n\/\/ New returns an instance of a Locus server with the following defaults set:\n\/\/ Port = 5555\n\/\/ ReadTimeout = 30s\n\/\/ WriteTimeout = 30s\nfunc New() *Locus {\n\treturn &Locus{\n\t\tproxy: &reverseProxy{},\n\t\tConfigs: []*Config{},\n\t\tPort: 5555,\n\t\tReadTimeout: time.Second * 30,\n\t\tWriteTimeout: time.Second * 30,\n\t}\n}\n\n\/\/ FromConfig creates a new locus server from YAML config.\n\/\/ See SampleYAMLConfig.\nfunc FromConfig(data []byte) (*Locus, error) {\n\tcfgs, globals, err := loadConfigFromYAML(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocus := New()\n\n\tif globals.Port != 0 {\n\t\tlocus.Port = globals.Port\n\t}\n\tif globals.ReadTimeout != 0 {\n\t\tlocus.ReadTimeout = globals.ReadTimeout\n\t}\n\tif globals.WriteTimeout != 0 {\n\t\tlocus.WriteTimeout = globals.WriteTimeout\n\t}\n\n\tlocus.VerboseLogging = globals.VerboseLogging\n\n\tif globals.AccessLog != \"\" {\n\t\tlocus.AccessLog, err = newLogger(globals.AccessLog)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif globals.ErrorLog != \"\" {\n\t\tlocus.ErrorLog, err = newLogger(globals.ErrorLog)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tfor _, cfg := range cfgs {\n\t\tlocus.AddConfig(cfg)\n\t}\n\n\treturn locus, nil\n}\n\n\/\/ FromConfigFile creates a new locus server from a YAML config file.\nfunc FromConfigFile(filename string) (*Locus, error) {\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn FromConfig(data)\n}\n\n\/\/ NewConfig creates an empty config, registers it, then returns it.\nfunc (locus *Locus) NewConfig() *Config {\n\tcfg := &Config{Name: fmt.Sprintf(\"cfg%d\", len(locus.Configs))}\n\tlocus.AddConfig(cfg)\n\treturn cfg\n}\n\n\/\/ AddConfig adds config to the reverse proxy. Configs will be checked in the\n\/\/ order they were added, the first matching config being used to route the\n\/\/ request.\nfunc (locus *Locus) AddConfig(cfg *Config) {\n\tlocus.Configs = append(locus.Configs, cfg)\n}\n\n\/\/ ListenAndServe listens on locus.Port for incoming connections.\nfunc (locus *Locus) ListenAndServe() error {\n\ts := http.Server{\n\t\tAddr: fmt.Sprintf(\":%d\", locus.Port),\n\t\tHandler: locus,\n\t\tReadTimeout: locus.ReadTimeout,\n\t\tWriteTimeout: locus.WriteTimeout,\n\t\tMaxHeaderBytes: 1 << 20,\n\t}\n\tlocus.elogf(\"Starting Locus on port %d\", locus.Port)\n\treturn s.ListenAndServe()\n}\n\nfunc (locus *Locus) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\toverrideParam := req.URL.Query().Get(OverrideQueryParam)\n\tif overrideParam != \"\" {\n\t\toverrideURL, err := url.Parse(overrideParam)\n\t\tif err != nil {\n\t\t\tlocus.elogf(\"error parsing override URL, ignoring: \", err)\n\t\t} else {\n\t\t\treq.URL = overrideURL\n\t\t}\n\t}\n\n\tc := locus.findConfig(req)\n\tif c != nil {\n\t\t\/\/ Found matching config so copy req, transform it, and forward it.\n\t\tproxyreq := copyRequest(req)\n\n\t\tif err := c.Transform(proxyreq); err != nil { \/\/ TODO: Render local error page.\n\t\t\tlocus.elogf(\"error transforming request: %v\", err)\n\t\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\t\tlocus.logDefaultReq(http.StatusInternalServerError, req)\n\t\t\treturn\n\t\t}\n\n\t\tstatus := http.StatusOK \/\/ TODO: extract status code from rw.\n\n\t\tif err := locus.proxy.Proxy(rw, proxyreq); err != nil { \/\/ TODO: Render local error page.\n\t\t\tlocus.elogf(\"error proxying request: %v\", err)\n\t\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\t\tstatus = http.StatusInternalServerError\n\t\t}\n\n\t\tvar d []byte\n\t\tif locus.VerboseLogging {\n\t\t\td, _ = httputil.DumpRequestOut(proxyreq, false)\n\t\t}\n\t\tlocus.alogf(\"locus[%s] %d %s %s => %s (%s \\\"%s\\\") %s\",\n\t\t\tc.Name, status, req.Method, req.URL, proxyreq.URL, req.RemoteAddr,\n\t\t\treq.Header.Get(\"User-Agent\"), string(d))\n\n\t} else if req.URL.Path == \"\/debug\/configs\" {\n\t\ttmpl.DebugTemplate.ExecuteTemplate(rw, \"configs\", locus)\n\t\tlocus.logDefaultReq(http.StatusOK, req)\n\t} else {\n\t\trw.WriteHeader(http.StatusNotImplemented)\n\t\tlocus.logDefaultReq(http.StatusNotImplemented, req)\n\t}\n}\n\nfunc (locus *Locus) logDefaultReq(status int, req *http.Request) {\n\tlocus.alogf(\"locus[-] %d %s %s (%s \\\"%s\\\")\",\n\t\tstatus, req.Method, req.URL, req.RemoteAddr, req.Header.Get(\"User-Agent\"))\n}\n\nfunc (locus *Locus) findConfig(req *http.Request) *Config {\n\tfor _, c := range locus.Configs {\n\t\tif c.Matches(req) {\n\t\t\treturn c\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (locus *Locus) alogf(format string, args ...interface{}) {\n\tif locus.AccessLog != nil {\n\t\tlocus.AccessLog.Printf(format, args...)\n\t} else {\n\t\tlog.Printf(format, args...)\n\t}\n}\n\nfunc (locus *Locus) elogf(format string, args ...interface{}) {\n\tif locus.ErrorLog != nil {\n\t\tlocus.ErrorLog.Printf(format, args...)\n\t} else {\n\t\tlog.Printf(format, args...)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\n\/* The output of this utility is JSON, but not intended to be easily human-readable.\n At this moment it exists to test the rtnetlink\/route subsystem *\/\n\nimport \"os\"\nimport \"netlink\/rtnetlink\/route\"\nimport \"netlink\/rtnetlink\"\nimport \"log\"\nimport \"netlink\"\n\nfunc main(){\n rtmsg := route.NewMessage(0,0,0,0,0,0,0,0,0)\n nlmsg, err := netlink.NewMessage2(rtnetlink.RTM_GETROUTE, netlink.NLM_F_DUMP|netlink.NLM_F_REQUEST, rtmsg)\n if err != nil {\n log.Exitf(\"Couldn't construct message: %v\", err)\n }\n nlsock, err := netlink.Dial(netlink.NETLINK_ROUTE)\n if err != nil {\n log.Exitf(\"Couldn't dial netlink: %v\", err)\n }\n h := netlink.NewHandler(nlsock)\n ec := make(chan os.Error)\n go h.Start(ec)\n c := make(chan netlink.NetlinkMessage)\n err = h.SendQuery(nlmsg, c)\n if err != nil {\n log.Exitf(\"Couldn't write netlink: %v\", err)\n }\n for i := range( c) {\n switch i.MessageType() {\n case rtnetlink.RTM_NEWROUTE:\n msg := &route.RoutingMessage{}\n err = msg.UnmarshalNetlink(i.Body(), 4)\n if err == nil {\n log.Printf(\"NLMsg: %v\", msg)\n } else {\n log.Printf(\"Unmarshal error: %v\", err)\n }\n default:\n log.Printf(\"Unknown type: %v\", i)\n }\n }\n}\n<commit_msg>Update syntax<commit_after>package main\n\n\/* The output of this utility is JSON, but not intended to be easily human-readable.\n At this moment it exists to test the rtnetlink\/route subsystem *\/\n\nimport \"os\"\nimport \"netlink\/rtnetlink\/route\"\nimport \"netlink\/rtnetlink\"\nimport \"log\"\nimport \"netlink\"\n\nfunc main(){\n rtmsg := route.NewMessage(0,0,0,0,0,0,0,0,0)\n nlmsg, err := netlink.NewMessage2(rtnetlink.RTM_GETROUTE, netlink.NLM_F_DUMP|netlink.NLM_F_REQUEST, rtmsg)\n if err != nil {\n log.Exitf(\"Couldn't construct message: %v\", err)\n }\n nlsock, err := netlink.Dial(netlink.NETLINK_ROUTE)\n if err != nil {\n log.Exitf(\"Couldn't dial netlink: %v\", err)\n }\n h := netlink.NewHandler(nlsock)\n ec := make(chan os.Error)\n go h.Start(ec)\n c := make(chan netlink.NetlinkMessage)\n err = h.SendQuery(nlmsg, c)\n if err != nil {\n log.Exitf(\"Couldn't write netlink: %v\", err)\n }\n for i := range( c) {\n switch i.MessageType() {\n case rtnetlink.RTM_NEWROUTE:\n msg := rtnetlink.NewMessage(&route.Header{}, nil)\n err = msg.UnmarshalNetlink(i.Body(), 4)\n if err == nil {\n log.Printf(\"NLMsg: %v\", msg)\n } else {\n log.Printf(\"Unmarshal error: %v\", err)\n }\n default:\n log.Printf(\"Unknown type: %v\", i)\n }\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package vjoy\n\nimport (\n\t\"errors\"\n\t\"github.com\/tajtiattila\/vjoy\/dll\"\n)\n\nvar (\n\tErrDeviceAlreadyOwned = errors.New(\"vJoy Device already open by this application\")\n\tErrDeviceBusy = errors.New(\"vJoy Device is owned by another application\")\n\tErrDeviceMissing = errors.New(\"vJoy Device is missing: either does not exist or the driver is down\")\n\tErrDeviceUnknown = errors.New(\"Unknown vJoy Device error\")\n\tErrReset = errors.New(\"Reset failed\")\n\tErrUpdate = errors.New(\"UpdateVJD failed\")\n\tErrUnknownName = errors.New(\"Unknown name (axis, button or pov)\")\n)\n\n\/\/ Available check if the vjoy.dll was successfully loaded and is enabled.\n\/\/ Other functions in this library will likely panic if Available returns\n\/\/ false.\nfunc Available() bool {\n\terr := dll.Load()\n\treturn err == nil && dll.VJoyEnabled()\n}\n\n\/\/ Version returns the version number of the installed vJoy.\nfunc Version() uint {\n\treturn uint(dll.GetvJoyVersion())\n}\n\nfunc ProductString() string {\n\treturn dll.GetvJoyProductString()\n}\n\nfunc ManufacturerString() string {\n\treturn dll.GetvJoyManufacturerString()\n}\n\nfunc SerialNumberString() string {\n\treturn dll.GetvJoySerialNumberString()\n}\n\n\/\/ ResetAll resets all VJD devices\nfunc ResetAll() error {\n\tif dll.ResetAll() {\n\t\treturn nil\n\t}\n\treturn ErrReset\n}\n\ntype AxisName int\n\nconst (\n\tAxisX AxisName = iota\n\tAxisY\n\tAxisZ\n\tAxisRX\n\tAxisRY\n\tAxisRZ\n\tSlider0\n\tSlider1\n\tMaxAxis\n)\n\nfunc axisNumber(a AxisName) uint {\n\tswitch a {\n\tcase AxisX:\n\t\treturn dll.HID_USAGE_X\n\tcase AxisY:\n\t\treturn dll.HID_USAGE_Y\n\tcase AxisZ:\n\t\treturn dll.HID_USAGE_Z\n\tcase AxisRX:\n\t\treturn dll.HID_USAGE_RX\n\tcase AxisRY:\n\t\treturn dll.HID_USAGE_RY\n\tcase AxisRZ:\n\t\treturn dll.HID_USAGE_RZ\n\tcase Slider0:\n\t\treturn dll.HID_USAGE_SL0\n\tcase Slider1:\n\t\treturn dll.HID_USAGE_SL1\n\t}\n\treturn 0\n}\n\ntype Axis struct {\n\tp *int32\n\texists bool\n\tmin, max int32\n}\n\nfunc (a *Axis) Exists() bool { return a.exists }\n\nfunc (a *Axis) Setu(val int) { *(a.p) = int32(val & 0x7fff) } \/\/ 0..0x7fff\nfunc (a *Axis) Seti(val int) { a.Setu(val - 0x4000) } \/\/ -0x4000..0x3fff\n\n\/\/ Setc is same as Setu, but truncates values to permitted bounds\nfunc (a *Axis) Setc(val int) {\n\tswitch {\n\tcase val < 0:\n\t\tval = 0\n\tcase 0x7fff < val:\n\t\tval = 0x7fff\n\t}\n\ta.Setu(val)\n}\n\nfunc (a *Axis) Setf(val float32) {\n\tvar v int\n\tif val < 0 {\n\t\tval += 1\n\t\tif val < 0 {\n\t\t\tval = 0\n\t\t}\n\t\tv = int(val * 0x4000)\n\t} else {\n\t\tif val > 1 {\n\t\t\tval = 1\n\t\t}\n\t\tv = 0x4000 + int(val*0x3fff)\n\t}\n\ta.Setu(v)\n}\n\nfunc (a *Axis) Setuf(val float32) { a.Setc(int(val * 0x7fff)) } \/\/ 0..1\n\ntype Button struct {\n\tp *int32\n\tmask int32\n\texists bool\n}\n\nfunc (b *Button) Exists() bool {\n\treturn b.exists\n}\n\nfunc (b *Button) Set(val bool) {\n\tif val {\n\t\t*(b.p) |= b.mask\n\t} else {\n\t\t*(b.p) &= ^b.mask\n\t}\n}\n\ntype HatState int\n\nconst (\n\tHatN HatState = 0\n\tHatE HatState = 1\n\tHatS HatState = 2\n\tHatW HatState = 3\n\tHatOff HatState = -1\n)\n\ntype Hat interface {\n\tExists() bool\n\tSetDiscrete(HatState)\n\tSetDegp(int) \/\/ set value in degree-percents (-1: off, 0-360000: direction)\n}\n\ntype continuousHat struct {\n\tp *uint32\n\tshift uint\n\texists bool\n}\n\nfunc (h *continuousHat) Exists() bool {\n\treturn h.exists\n}\n\nfunc (h *continuousHat) SetDiscrete(s HatState) {\n\tmask := uint32(0xffff) << h.shift\n\tval := int(s)\n\tif val > 0 {\n\t\tval *= 9000\n\t}\n\t*(h.p) = (*(h.p) & mask) | (uint32(val) << h.shift)\n}\n\nfunc (h *continuousHat) SetDegp(val int) {\n\tmask := uint32(0xffff) << h.shift\n\t*(h.p) = (*(h.p) & mask) | (uint32(val) << h.shift)\n}\n\ntype discreteHat struct {\n\tp *uint32\n\tshift uint\n\texists bool\n}\n\nfunc (h *discreteHat) Exists() bool {\n\treturn h.exists\n}\n\nfunc (h *discreteHat) SetDiscrete(val HatState) {\n\tmask := uint32(0xf) << h.shift\n\t*(h.p) = (*(h.p) & mask) | (uint32(val) << h.shift)\n}\n\nfunc (h *discreteHat) SetDegp(val int) {\n\tif val > 0 {\n\t\tval = ((val + 4500) \/ 9000) % 4\n\t}\n\tmask := uint32(0xffff) << h.shift\n\t*(h.p) = (*(h.p) & mask) | (uint32(val) << h.shift)\n}\n\n\/\/ Device represents an open vJoy device\ntype Device struct {\n\trid uint \/\/ rid Device opened with\n\tst dll.JOYSTICK_POSITION \/\/ state for Update()\n\tnoi int32\n\tnou uint32\n\taxes []*Axis\n\tbuttons []*Button\n\thats []Hat\n\tinvalidaxis *Axis\n\tinvalidbutton *Button\n\tinvalidhat Hat\n}\n\n\/\/ Acquire opens a Device for use in the application.\nfunc Acquire(rid uint) (*Device, error) {\n\td := &Device{rid: rid}\n\terr := d.init()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td.Reset()\n\td.Update()\n\treturn d, nil\n}\n\n\/\/ Relinquish closes an acquired device\nfunc (d *Device) Relinquish() {\n\tdll.RelinquishVJD(d.rid)\n}\n\n\/\/ reset Device: Axes centered, Buttons and Hats off.\nfunc (d *Device) Reset() {\n\tconst center = 0x4000\n\td.st.Throttle = center\n\td.st.Rudder = center\n\td.st.Aileron = center\n\td.st.AxisX = center\n\td.st.AxisY = center\n\td.st.AxisZ = center\n\td.st.AxisXRot = center\n\td.st.AxisYRot = center\n\td.st.AxisZRot = center\n\td.st.Slider = center\n\td.st.Dial = center\n\td.st.Wheel = center\n\td.st.AxisVX = center\n\td.st.AxisVY = center\n\td.st.AxisVZ = center\n\td.st.AxisVBRX = center\n\td.st.AxisVBRY = center\n\td.st.AxisVBRZ = center\n\td.st.Buttons = 0\n\tconst hatoff = 0xffffffff\n\td.st.Hats = hatoff\n\td.st.HatsEx1 = hatoff\n\td.st.HatsEx2 = hatoff\n\td.st.HatsEx3 = hatoff\n}\n\n\/\/ Axis returns the given Axis to update\n\/\/ cached values for this device, to be submitted\n\/\/ by Update()\nfunc (d *Device) Axis(n AxisName) *Axis {\n\tif 0 <= n && int(n) < len(d.axes) {\n\t\treturn d.axes[n]\n\t}\n\treturn d.invalidaxis\n}\n\n\/\/ return Button number n\nfunc (d *Device) Button(n int) *Button {\n\tif 0 <= n && n < len(d.buttons) {\n\t\treturn d.buttons[n]\n\t}\n\treturn d.invalidbutton\n}\n\n\/\/ return Hat number n\n\/\/ hats aren't yet supported by vjoy\nfunc (d *Device) not_yet_suppoerted_Hat(n int) Hat {\n\tif 0 <= n && n < len(d.hats) {\n\t\treturn d.hats[n]\n\t}\n\treturn d.invalidhat\n}\n\nfunc (d *Device) Axes() []*Axis { return d.axes }\nfunc (d *Device) Buttons() []*Button { return d.buttons }\nfunc (d *Device) not_yet_suppoerted_Hats() []Hat { return d.hats }\n\n\/\/ Update VJD with values changed with in\n\/\/ Button, Hat and Axis objects.\nfunc (d *Device) Update() error {\n\tif dll.UpdateVJD(d.rid, &d.st) {\n\t\treturn nil\n\t}\n\treturn ErrUpdate\n}\n\nfunc (d *Device) init() error {\n\td.st.Device = byte(d.rid)\n\tvar err error\n\tswitch dll.GetVJDStatus(d.rid) {\n\tcase dll.VJD_STAT_OWN:\n\t\treturn ErrDeviceAlreadyOwned\n\tcase dll.VJD_STAT_FREE:\n\t\t\/\/ no op\n\tcase dll.VJD_STAT_BUSY:\n\t\terr = ErrDeviceBusy\n\tcase dll.VJD_STAT_MISS:\n\t\terr = ErrDeviceMissing\n\tdefault:\n\t\t\/\/case VJD_STAT_UNKN:\n\t\terr = ErrDeviceUnknown\n\t}\n\tif !dll.AcquireVJD(d.rid) {\n\t\treturn err\n\t}\n\n\td.invalidaxis = &Axis{p: &d.noi, exists: false}\n\td.invalidbutton = &Button{p: &d.noi, exists: false}\n\td.invalidhat = &discreteHat{p: &d.nou, exists: false}\n\n\td.axes = make([]*Axis, MaxAxis)\n\tfor i := AxisName(0); i < MaxAxis; i++ {\n\t\taxn := axisNumber(i)\n\t\ta := &Axis{exists: dll.GetVJDAxisExist(d.rid, axn)}\n\t\tswitch i {\n\t\tcase AxisX:\n\t\t\ta.p = &d.st.AxisX\n\t\tcase AxisY:\n\t\t\ta.p = &d.st.AxisY\n\t\tcase AxisZ:\n\t\t\ta.p = &d.st.AxisZ\n\t\tcase AxisRX:\n\t\t\ta.p = &d.st.AxisXRot\n\t\tcase AxisRY:\n\t\t\ta.p = &d.st.AxisYRot\n\t\tcase AxisRZ:\n\t\t\ta.p = &d.st.AxisZRot\n\t\tcase Slider0:\n\t\t\ta.p = &d.st.Slider\n\t\tcase Slider1:\n\t\t\ta.p = &d.st.Dial\n\t\tdefault:\n\t\t\ta.p = &d.noi \/\/ unused\n\t\t}\n\t\tif a.exists {\n\t\t\tdll.GetVJDAxisMin(d.rid, axn, &a.min)\n\t\t\tdll.GetVJDAxisMax(d.rid, axn, &a.max)\n\t\t}\n\t\td.axes[i] = a\n\t}\n\tn := dll.GetVJDButtonNumber(d.rid)\n\tfor i := 0; i < n; i++ {\n\t\tp, mask := d.button(i)\n\t\tif p != nil {\n\t\t\td.buttons = append(d.buttons, &Button{p: p, exists: true, mask: mask})\n\t\t} else {\n\t\t\td.buttons = append(d.buttons, d.invalidbutton)\n\t\t}\n\t}\n\n\tn = dll.GetVJDDiscPovNumber(d.rid)\n\tfor i := 0; i < n; i++ {\n\t\tp, shift := d.hat(i)\n\t\tif p != nil {\n\t\t\td.hats = append(d.hats, &discreteHat{p: p, exists: true, shift: shift})\n\t\t}\n\t}\n\n\tn = dll.GetVJDContPovNumber(d.rid)\n\tfor i := 0; i < n; i++ {\n\t\tp, shift := d.hat(i)\n\t\tif p != nil {\n\t\t\td.hats = append(d.hats, &continuousHat{p: p, exists: true, shift: shift})\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (d *Device) button(i int) (p *int32, mask int32) {\n\tswitch i \/ 32 {\n\tcase 0:\n\t\tp = &d.st.Buttons\n\tcase 1:\n\t\tp = &d.st.ButtonsEx1\n\tcase 2:\n\t\tp = &d.st.ButtonsEx2\n\tcase 3:\n\t\tp = &d.st.ButtonsEx3\n\t}\n\tmask = 1 << uint(i%32)\n\treturn\n}\n\nfunc (d *Device) hat(i int) (p *uint32, shift uint) {\n\tshift = uint(i%2) * 16\n\tswitch i \/ 2 {\n\tcase 0:\n\t\tp = &d.st.Hats\n\tcase 1:\n\t\tp = &d.st.HatsEx1\n\tcase 2:\n\t\tp = &d.st.HatsEx2\n\tcase 3:\n\t\tp = &d.st.HatsEx3\n\t}\n\treturn\n}\n<commit_msg>fix hat support<commit_after>package vjoy\n\nimport (\n\t\"errors\"\n\t\"github.com\/tajtiattila\/vjoy\/dll\"\n)\n\nvar (\n\tErrDeviceAlreadyOwned = errors.New(\"vJoy Device already open by this application\")\n\tErrDeviceBusy = errors.New(\"vJoy Device is owned by another application\")\n\tErrDeviceMissing = errors.New(\"vJoy Device is missing: either does not exist or the driver is down\")\n\tErrDeviceUnknown = errors.New(\"Unknown vJoy Device error\")\n\tErrReset = errors.New(\"Reset failed\")\n\tErrUpdate = errors.New(\"UpdateVJD failed\")\n\tErrUnknownName = errors.New(\"Unknown name (axis, button or pov)\")\n)\n\n\/\/ Available check if the vjoy.dll was successfully loaded and is enabled.\n\/\/ Other functions in this library will likely panic if Available returns\n\/\/ false.\nfunc Available() bool {\n\terr := dll.Load()\n\treturn err == nil && dll.VJoyEnabled()\n}\n\n\/\/ Version returns the version number of the installed vJoy.\nfunc Version() uint {\n\treturn uint(dll.GetvJoyVersion())\n}\n\nfunc ProductString() string {\n\treturn dll.GetvJoyProductString()\n}\n\nfunc ManufacturerString() string {\n\treturn dll.GetvJoyManufacturerString()\n}\n\nfunc SerialNumberString() string {\n\treturn dll.GetvJoySerialNumberString()\n}\n\n\/\/ ResetAll resets all VJD devices\nfunc ResetAll() error {\n\tif dll.ResetAll() {\n\t\treturn nil\n\t}\n\treturn ErrReset\n}\n\ntype AxisName uint\n\nconst (\n\tAxisX AxisName = iota\n\tAxisY\n\tAxisZ\n\tAxisRX\n\tAxisRY\n\tAxisRZ\n\tSlider0\n\tSlider1\n\tMaxAxis\n\n\tMaxButton = 128\n)\n\nfunc axisNumber(a AxisName) uint {\n\tswitch a {\n\tcase AxisX:\n\t\treturn dll.HID_USAGE_X\n\tcase AxisY:\n\t\treturn dll.HID_USAGE_Y\n\tcase AxisZ:\n\t\treturn dll.HID_USAGE_Z\n\tcase AxisRX:\n\t\treturn dll.HID_USAGE_RX\n\tcase AxisRY:\n\t\treturn dll.HID_USAGE_RY\n\tcase AxisRZ:\n\t\treturn dll.HID_USAGE_RZ\n\tcase Slider0:\n\t\treturn dll.HID_USAGE_SL0\n\tcase Slider1:\n\t\treturn dll.HID_USAGE_SL1\n\t}\n\treturn 0\n}\n\ntype Axis struct {\n\tp *int32\n\texists bool\n\tmin, max int32\n}\n\nfunc (a *Axis) Exists() bool { return a.exists }\n\nfunc (a *Axis) Setu(val int) { *(a.p) = int32(val & 0x7fff) } \/\/ 0..0x7fff\nfunc (a *Axis) Seti(val int) { a.Setu(val - 0x4000) } \/\/ -0x4000..0x3fff\n\n\/\/ Setc is same as Setu, but truncates values to permitted bounds\nfunc (a *Axis) Setc(val int) {\n\tswitch {\n\tcase val < 0:\n\t\tval = 0\n\tcase 0x7fff < val:\n\t\tval = 0x7fff\n\t}\n\ta.Setu(val)\n}\n\nfunc (a *Axis) Setf(val float32) {\n\tvar v int\n\tif val < 0 {\n\t\tval += 1\n\t\tif val < 0 {\n\t\t\tval = 0\n\t\t}\n\t\tv = int(val * 0x4000)\n\t} else {\n\t\tif val > 1 {\n\t\t\tval = 1\n\t\t}\n\t\tv = 0x4000 + int(val*0x3fff)\n\t}\n\ta.Setu(v)\n}\n\nfunc (a *Axis) Setuf(val float32) { a.Setc(int(val * 0x7fff)) } \/\/ 0..1\n\ntype Button struct {\n\tp *int32\n\tmask int32\n\texists bool\n}\n\nfunc (b *Button) Exists() bool {\n\treturn b.exists\n}\n\nfunc (b *Button) Set(val bool) {\n\tif val {\n\t\t*(b.p) |= b.mask\n\t} else {\n\t\t*(b.p) &= ^b.mask\n\t}\n}\n\ntype HatState int\n\nconst (\n\tHatN HatState = 0\n\tHatE HatState = 1\n\tHatS HatState = 2\n\tHatW HatState = 3\n\tHatOff HatState = -1\n)\n\ntype Hat interface {\n\tExists() bool\n\tSetDiscrete(HatState)\n\tSetDegp(int) \/\/ set value in degree-percents (-1: off, 0-360000: direction)\n}\n\ntype continuousHat struct {\n\tp *uint32\n\tshift uint\n\texists bool\n}\n\nfunc (h *continuousHat) Exists() bool {\n\treturn h.exists\n}\n\nfunc (h *continuousHat) SetDiscrete(s HatState) {\n\tmask := uint32(0xffff) << h.shift\n\tval := int(s)\n\tif val > 0 {\n\t\tval *= 9000\n\t}\n\t*(h.p) = (*(h.p) & mask) | (uint32(val) << h.shift)\n}\n\nfunc (h *continuousHat) SetDegp(val int) {\n\tmask := uint32(0xffff) << h.shift\n\t*(h.p) = (*(h.p) & mask) | (uint32(val) << h.shift)\n}\n\ntype discreteHat struct {\n\tp *uint32\n\tshift uint\n\texists bool\n}\n\nfunc (h *discreteHat) Exists() bool {\n\treturn h.exists\n}\n\nfunc (h *discreteHat) SetDiscrete(val HatState) {\n\tmask := uint32(0xf) << h.shift\n\tvsh := (uint32(val) & 0xf) << h.shift\n\t*(h.p) = (*(h.p) & ^mask) | vsh\n}\n\nfunc (h *discreteHat) SetDegp(val int) {\n\tif val > 0 {\n\t\tval = ((val + 4500) \/ 9000) % 4\n\t}\n\tmask := uint32(0xffff) << h.shift\n\t*(h.p) = (*(h.p) & mask) | (uint32(val) << h.shift)\n}\n\n\/\/ Device represents an open vJoy device\ntype Device struct {\n\trid uint \/\/ rid Device opened with\n\tst dll.JOYSTICK_POSITION \/\/ state for Update()\n\tnoi int32\n\tnou uint32\n\taxes []*Axis\n\tbuttons []*Button\n\thats []Hat\n\tinvalidaxis *Axis\n\tinvalidbutton *Button\n\tinvalidhat Hat\n}\n\n\/\/ Acquire opens a Device for use in the application.\nfunc Acquire(rid uint) (*Device, error) {\n\td := &Device{rid: rid}\n\terr := d.init()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td.Reset()\n\td.Update()\n\treturn d, nil\n}\n\n\/\/ Relinquish closes an acquired device\nfunc (d *Device) Relinquish() {\n\tdll.RelinquishVJD(d.rid)\n}\n\n\/\/ reset Device: Axes centered, Buttons and Hats off.\nfunc (d *Device) Reset() {\n\tconst center = 0x4000\n\td.st.Throttle = center\n\td.st.Rudder = center\n\td.st.Aileron = center\n\td.st.AxisX = center\n\td.st.AxisY = center\n\td.st.AxisZ = center\n\td.st.AxisXRot = center\n\td.st.AxisYRot = center\n\td.st.AxisZRot = center\n\td.st.Slider = center\n\td.st.Dial = center\n\td.st.Wheel = center\n\td.st.AxisVX = center\n\td.st.AxisVY = center\n\td.st.AxisVZ = center\n\td.st.AxisVBRX = center\n\td.st.AxisVBRY = center\n\td.st.AxisVBRZ = center\n\td.st.Buttons = 0\n\tconst hatoff = 0xffffffff\n\td.st.Hats = hatoff\n\td.st.HatsEx1 = hatoff\n\td.st.HatsEx2 = hatoff\n\td.st.HatsEx3 = hatoff\n}\n\n\/\/ Axis returns the given Axis to update\n\/\/ cached values for this device, to be submitted\n\/\/ by Update()\nfunc (d *Device) Axis(n AxisName) *Axis {\n\tif int(n) < len(d.axes) {\n\t\treturn d.axes[n]\n\t}\n\treturn d.invalidaxis\n}\n\n\/\/ return Button number n\nfunc (d *Device) Button(n uint) *Button {\n\tif int(n) < len(d.buttons) {\n\t\treturn d.buttons[n]\n\t}\n\treturn d.invalidbutton\n}\n\n\/\/ return Hat number n\n\/\/ hats aren't yet supported by vjoy\nfunc (d *Device) Hat(n int) Hat {\n\tif 0 <= n && n < len(d.hats) {\n\t\treturn d.hats[n]\n\t}\n\treturn d.invalidhat\n}\n\nfunc (d *Device) Axes() []*Axis { return d.axes }\nfunc (d *Device) Buttons() []*Button { return d.buttons }\nfunc (d *Device) Hats() []Hat { return d.hats }\n\n\/\/ Update VJD with values changed with in\n\/\/ Button, Hat and Axis objects.\nfunc (d *Device) Update() error {\n\tif dll.UpdateVJD(d.rid, &d.st) {\n\t\treturn nil\n\t}\n\treturn ErrUpdate\n}\n\nfunc (d *Device) init() error {\n\td.st.Device = byte(d.rid)\n\tvar err error\n\tswitch dll.GetVJDStatus(d.rid) {\n\tcase dll.VJD_STAT_OWN:\n\t\treturn ErrDeviceAlreadyOwned\n\tcase dll.VJD_STAT_FREE:\n\t\t\/\/ no op\n\tcase dll.VJD_STAT_BUSY:\n\t\terr = ErrDeviceBusy\n\tcase dll.VJD_STAT_MISS:\n\t\terr = ErrDeviceMissing\n\tdefault:\n\t\t\/\/case VJD_STAT_UNKN:\n\t\terr = ErrDeviceUnknown\n\t}\n\tif !dll.AcquireVJD(d.rid) {\n\t\treturn err\n\t}\n\n\td.invalidaxis = &Axis{p: &d.noi, exists: false}\n\td.invalidbutton = &Button{p: &d.noi, exists: false}\n\td.invalidhat = &discreteHat{p: &d.nou, exists: false}\n\n\td.axes = make([]*Axis, MaxAxis)\n\tfor i := AxisName(0); i < MaxAxis; i++ {\n\t\taxn := axisNumber(i)\n\t\ta := &Axis{exists: dll.GetVJDAxisExist(d.rid, axn)}\n\t\tswitch i {\n\t\tcase AxisX:\n\t\t\ta.p = &d.st.AxisX\n\t\tcase AxisY:\n\t\t\ta.p = &d.st.AxisY\n\t\tcase AxisZ:\n\t\t\ta.p = &d.st.AxisZ\n\t\tcase AxisRX:\n\t\t\ta.p = &d.st.AxisXRot\n\t\tcase AxisRY:\n\t\t\ta.p = &d.st.AxisYRot\n\t\tcase AxisRZ:\n\t\t\ta.p = &d.st.AxisZRot\n\t\tcase Slider0:\n\t\t\ta.p = &d.st.Slider\n\t\tcase Slider1:\n\t\t\ta.p = &d.st.Dial\n\t\tdefault:\n\t\t\ta.p = &d.noi \/\/ unused\n\t\t}\n\t\tif a.exists {\n\t\t\tdll.GetVJDAxisMin(d.rid, axn, &a.min)\n\t\t\tdll.GetVJDAxisMax(d.rid, axn, &a.max)\n\t\t}\n\t\td.axes[i] = a\n\t}\n\tn := dll.GetVJDButtonNumber(d.rid)\n\tfor i := 0; i < n; i++ {\n\t\tp, mask := d.button(i)\n\t\tif p != nil {\n\t\t\td.buttons = append(d.buttons, &Button{p: p, exists: true, mask: mask})\n\t\t} else {\n\t\t\td.buttons = append(d.buttons, d.invalidbutton)\n\t\t}\n\t}\n\n\tn = dll.GetVJDDiscPovNumber(d.rid)\n\tfor i := 0; i < n; i++ {\n\t\tp, shift := d.dhat(i)\n\t\tif p != nil {\n\t\t\td.hats = append(d.hats, &discreteHat{p: p, exists: true, shift: shift})\n\t\t}\n\t}\n\n\tn = dll.GetVJDContPovNumber(d.rid)\n\tfor i := 0; i < n; i++ {\n\t\tp, shift := d.chat(i)\n\t\tif p != nil {\n\t\t\td.hats = append(d.hats, &continuousHat{p: p, exists: true, shift: shift})\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (d *Device) button(i int) (p *int32, mask int32) {\n\tswitch i \/ 32 {\n\tcase 0:\n\t\tp = &d.st.Buttons\n\tcase 1:\n\t\tp = &d.st.ButtonsEx1\n\tcase 2:\n\t\tp = &d.st.ButtonsEx2\n\tcase 3:\n\t\tp = &d.st.ButtonsEx3\n\t}\n\tmask = 1 << uint(i%32)\n\treturn\n}\n\nfunc (d *Device) dhat(i int) (p *uint32, shift uint) {\n\tp = &d.st.Hats\n\tshift = uint(i) * 4\n\treturn\n}\n\nfunc (d *Device) chat(i int) (p *uint32, shift uint) {\n\tshift = uint(i%2) * 16\n\tswitch i \/ 2 {\n\tcase 0:\n\t\tp = &d.st.Hats\n\tcase 1:\n\t\tp = &d.st.HatsEx1\n\tcase 2:\n\t\tp = &d.st.HatsEx2\n\tcase 3:\n\t\tp = &d.st.HatsEx3\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package transaction\n\nimport (\n\t\/\/ Stdlib\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"io\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\/\/ Vendor\n\t\"github.com\/btcsuite\/btcd\/btcec\"\n\t\"github.com\/btcsuite\/btcutil\/base58\"\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/crypto\/ripemd160\"\n)\n\n\/\/Encoder structure for the converter\ntype Encoder struct {\n\tw io.Writer\n}\n\n\/\/NewEncoder initializing a new converter\nfunc NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w}\n}\n\n\/\/EncodeVarint converting int64 to byte\nfunc (encoder *Encoder) EncodeVarint(i int64) error {\n\tif i >= 0 {\n\t\treturn encoder.EncodeUVarint(uint64(i))\n\t}\n\n\tb := make([]byte, binary.MaxVarintLen64)\n\tn := binary.PutVarint(b, i)\n\treturn encoder.writeBytes(b[:n])\n}\n\n\/\/EncodeUVarint converting uint64 to byte\nfunc (encoder *Encoder) EncodeUVarint(i uint64) error {\n\tb := make([]byte, binary.MaxVarintLen64)\n\tn := binary.PutUvarint(b, i)\n\treturn encoder.writeBytes(b[:n])\n}\n\n\/\/EncodeNumber converting number to byte\nfunc (encoder *Encoder) EncodeNumber(v interface{}) error {\n\tif err := binary.Write(encoder.w, binary.LittleEndian, v); err != nil {\n\t\treturn errors.Wrapf(err, \"encoder: failed to write number: %v\", v)\n\t}\n\treturn nil\n}\n\n\/\/EncodeArrString converting []string to byte\nfunc (encoder *Encoder) EncodeArrString(v []string) error {\n\tif err := encoder.EncodeUVarint(uint64(len(v))); err != nil {\n\t\treturn errors.Wrapf(err, \"encoder: failed to write string: %v\", v)\n\t}\n\tfor _, val := range v {\n\t\tif err := encoder.EncodeUVarint(uint64(len(val))); err != nil {\n\t\t\treturn errors.Wrapf(err, \"encoder: failed to write string: %v\", val)\n\t\t}\n\t\tif _, err := io.Copy(encoder.w, strings.NewReader(val)); err != nil {\n\t\t\treturn errors.Wrapf(err, \"encoder: failed to write string: %v\", val)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/Encode function that determines the input values of which converter to use\nfunc (encoder *Encoder) Encode(v interface{}) error {\n\tif marshaller, ok := v.(TransactionMarshaller); ok {\n\t\treturn marshaller.MarshalTransaction(encoder)\n\t}\n\n\tswitch v := v.(type) {\n\tcase int:\n\t\treturn encoder.EncodeNumber(v)\n\tcase int8:\n\t\treturn encoder.EncodeNumber(v)\n\tcase int16:\n\t\treturn encoder.EncodeNumber(v)\n\tcase int32:\n\t\treturn encoder.EncodeNumber(v)\n\tcase int64:\n\t\treturn encoder.EncodeNumber(v)\n\n\tcase uint:\n\t\treturn encoder.EncodeNumber(v)\n\tcase uint8:\n\t\treturn encoder.EncodeNumber(v)\n\tcase uint16:\n\t\treturn encoder.EncodeNumber(v)\n\tcase uint32:\n\t\treturn encoder.EncodeNumber(v)\n\tcase uint64:\n\t\treturn encoder.EncodeNumber(v)\n\n\tcase string:\n\t\treturn encoder.EncodeString(v)\n\tcase []byte:\n\t\treturn encoder.writeBytes(v)\n\tdefault:\n\t\treturn errors.Errorf(\"encoder: unsupported type encountered\")\n\t}\n}\n\n\/\/EncodeString converting string to byte\nfunc (encoder *Encoder) EncodeString(v string) error {\n\tif err := encoder.EncodeUVarint(uint64(len(v))); err != nil {\n\t\treturn errors.Wrapf(err, \"encoder: failed to write string: %v\", v)\n\t}\n\n\treturn encoder.writeString(v)\n}\n\nfunc (encoder *Encoder) writeBytes(bs []byte) error {\n\tif _, err := encoder.w.Write(bs); err != nil {\n\t\treturn errors.Wrapf(err, \"encoder: failed to write bytes: %v\", bs)\n\t}\n\treturn nil\n}\n\nfunc (encoder *Encoder) writeString(s string) error {\n\tif _, err := io.Copy(encoder.w, strings.NewReader(s)); err != nil {\n\t\treturn errors.Wrapf(err, \"encoder: failed to write string: %v\", s)\n\t}\n\treturn nil\n}\n\n\/\/EncodeBool converting bool to byte\nfunc (encoder *Encoder) EncodeBool(b bool) error {\n\tif b {\n\t\treturn encoder.EncodeNumber(byte(1))\n\t}\n\treturn encoder.EncodeNumber(byte(0))\n}\n\n\/\/EncodeMoney converting Asset to byte\nfunc (encoder *Encoder) EncodeMoney(s string) error {\n\tr, _ := regexp.Compile(`^[0-9]+\\\\.?[0-9]* [A-Za-z0-9]+$`)\n\tif r.MatchString(s) {\n\t\tasset := strings.Split(s, \" \")\n\t\tamm, errParsInt := strconv.ParseInt(strings.Replace(asset[0], \".\", \"\", -1), 10, 64)\n\t\tif errParsInt != nil {\n\t\t\treturn errParsInt\n\t\t}\n\t\tind := strings.Index(asset[0], \".\")\n\t\tvar perc int\n\t\tif ind == -1 {\n\t\t\tperc = 0\n\t\t} else {\n\t\t\tperc = len(asset[0]) - ind - 1\n\t\t}\n\t\tif err := binary.Write(encoder.w, binary.LittleEndian, amm); err != nil {\n\t\t\treturn errors.Wrapf(err, \"encoder: failed to write number: %v\", amm)\n\t\t}\n\t\tif err := binary.Write(encoder.w, binary.LittleEndian, byte(perc)); err != nil {\n\t\t\treturn errors.Wrapf(err, \"encoder: failed to write number: %v\", perc)\n\t\t}\n\n\t\tif _, err := io.Copy(encoder.w, strings.NewReader(asset[1])); err != nil {\n\t\t\treturn errors.Wrapf(err, \"encoder: failed to write string: %v\", asset[1])\n\t\t}\n\n\t\tfor i := byte(len(asset[1])); i < 7; i++ {\n\t\t\tif err := binary.Write(encoder.w, binary.LittleEndian, byte(0)); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"encoder: failed to write number: %v\", 0)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\treturn errors.New(\"Expecting amount like '99.000 SYMBOL'\")\n}\n\n\/\/EncodePubKey converting PubKey to byte\nfunc (encoder *Encoder) EncodePubKey(s string) error {\n\tpkn1 := strings.Join(strings.Split(s, \"\")[3:], \"\")\n\tb58 := base58.Decode(pkn1)\n\tchs := b58[len(b58)-4:]\n\tpkn2 := b58[:len(b58)-4]\n\tchHash := ripemd160.New()\n\t_, errHash := chHash.Write(pkn2)\n\tif errHash != nil {\n\t\treturn errHash\n\t}\n\tnchs := chHash.Sum(nil)[:4]\n\tif bytes.Equal(chs, nchs) {\n\t\tif string(pkn2) == string(make([]byte, 33)) {\n\t\t\treturn encoder.writeBytes(pkn2)\n\t\t}\n\t\tpkn3, _ := btcec.ParsePubKey(pkn2, btcec.S256())\n\t\tif _, err := encoder.w.Write(pkn3.SerializeCompressed()); err != nil {\n\t\t\treturn errors.Wrapf(err, \"encoder: failed to write bytes: %v\", pkn3.SerializeCompressed())\n\t\t}\n\t\treturn nil\n\t}\n\treturn errors.New(\"Public key is incorrect\")\n}\n<commit_msg>Update encoder.go<commit_after>package transaction\n\nimport (\n\t\/\/ Stdlib\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"io\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\/\/ Vendor\n\t\"github.com\/btcsuite\/btcd\/btcec\"\n\t\"github.com\/btcsuite\/btcutil\/base58\"\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/crypto\/ripemd160\"\n)\n\n\/\/Encoder structure for the converter\ntype Encoder struct {\n\tw io.Writer\n}\n\n\/\/NewEncoder initializing a new converter\nfunc NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w}\n}\n\n\/\/EncodeVarint converting int64 to byte\nfunc (encoder *Encoder) EncodeVarint(i int64) error {\n\tif i >= 0 {\n\t\treturn encoder.EncodeUVarint(uint64(i))\n\t}\n\n\tb := make([]byte, binary.MaxVarintLen64)\n\tn := binary.PutVarint(b, i)\n\treturn encoder.writeBytes(b[:n])\n}\n\n\/\/EncodeUVarint converting uint64 to byte\nfunc (encoder *Encoder) EncodeUVarint(i uint64) error {\n\tb := make([]byte, binary.MaxVarintLen64)\n\tn := binary.PutUvarint(b, i)\n\treturn encoder.writeBytes(b[:n])\n}\n\n\/\/EncodeNumber converting number to byte\nfunc (encoder *Encoder) EncodeNumber(v interface{}) error {\n\tif err := binary.Write(encoder.w, binary.LittleEndian, v); err != nil {\n\t\treturn errors.Wrapf(err, \"encoder: failed to write number: %v\", v)\n\t}\n\treturn nil\n}\n\n\/\/EncodeArrString converting []string to byte\nfunc (encoder *Encoder) EncodeArrString(v []string) error {\n\tif err := encoder.EncodeUVarint(uint64(len(v))); err != nil {\n\t\treturn errors.Wrapf(err, \"encoder: failed to write string: %v\", v)\n\t}\n\tfor _, val := range v {\n\t\tif err := encoder.EncodeUVarint(uint64(len(val))); err != nil {\n\t\t\treturn errors.Wrapf(err, \"encoder: failed to write string: %v\", val)\n\t\t}\n\t\tif _, err := io.Copy(encoder.w, strings.NewReader(val)); err != nil {\n\t\t\treturn errors.Wrapf(err, \"encoder: failed to write string: %v\", val)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/Encode function that determines the input values of which converter to use\nfunc (encoder *Encoder) Encode(v interface{}) error {\n\tif marshaller, ok := v.(TransactionMarshaller); ok {\n\t\treturn marshaller.MarshalTransaction(encoder)\n\t}\n\n\tswitch v := v.(type) {\n\tcase int:\n\t\treturn encoder.EncodeNumber(v)\n\tcase int8:\n\t\treturn encoder.EncodeNumber(v)\n\tcase int16:\n\t\treturn encoder.EncodeNumber(v)\n\tcase int32:\n\t\treturn encoder.EncodeNumber(v)\n\tcase int64:\n\t\treturn encoder.EncodeNumber(v)\n\n\tcase uint:\n\t\treturn encoder.EncodeNumber(v)\n\tcase uint8:\n\t\treturn encoder.EncodeNumber(v)\n\tcase uint16:\n\t\treturn encoder.EncodeNumber(v)\n\tcase uint32:\n\t\treturn encoder.EncodeNumber(v)\n\tcase uint64:\n\t\treturn encoder.EncodeNumber(v)\n\n\tcase string:\n\t\treturn encoder.EncodeString(v)\n\tcase []byte:\n\t\treturn encoder.writeBytes(v)\n\tdefault:\n\t\treturn errors.Errorf(\"encoder: unsupported type encountered\")\n\t}\n}\n\n\/\/EncodeString converting string to byte\nfunc (encoder *Encoder) EncodeString(v string) error {\n\tif err := encoder.EncodeUVarint(uint64(len(v))); err != nil {\n\t\treturn errors.Wrapf(err, \"encoder: failed to write string: %v\", v)\n\t}\n\n\treturn encoder.writeString(v)\n}\n\nfunc (encoder *Encoder) writeBytes(bs []byte) error {\n\tif _, err := encoder.w.Write(bs); err != nil {\n\t\treturn errors.Wrapf(err, \"encoder: failed to write bytes: %v\", bs)\n\t}\n\treturn nil\n}\n\nfunc (encoder *Encoder) writeString(s string) error {\n\tif _, err := io.Copy(encoder.w, strings.NewReader(s)); err != nil {\n\t\treturn errors.Wrapf(err, \"encoder: failed to write string: %v\", s)\n\t}\n\treturn nil\n}\n\n\/\/EncodeBool converting bool to byte\nfunc (encoder *Encoder) EncodeBool(b bool) error {\n\tif b {\n\t\treturn encoder.EncodeNumber(byte(1))\n\t}\n\treturn encoder.EncodeNumber(byte(0))\n}\n\n\/\/EncodeMoney converting Asset to byte\nfunc (encoder *Encoder) EncodeMoney(s string) error {\n\tr, _ := regexp.Compile(\"^[0-9]+\\\\.?[0-9]* [A-Za-z0-9]+$\")\n\tif r.MatchString(s) {\n\t\tasset := strings.Split(s, \" \")\n\t\tamm, errParsInt := strconv.ParseInt(strings.Replace(asset[0], \".\", \"\", -1), 10, 64)\n\t\tif errParsInt != nil {\n\t\t\treturn errParsInt\n\t\t}\n\t\tind := strings.Index(asset[0], \".\")\n\t\tvar perc int\n\t\tif ind == -1 {\n\t\t\tperc = 0\n\t\t} else {\n\t\t\tperc = len(asset[0]) - ind - 1\n\t\t}\n\t\tif err := binary.Write(encoder.w, binary.LittleEndian, amm); err != nil {\n\t\t\treturn errors.Wrapf(err, \"encoder: failed to write number: %v\", amm)\n\t\t}\n\t\tif err := binary.Write(encoder.w, binary.LittleEndian, byte(perc)); err != nil {\n\t\t\treturn errors.Wrapf(err, \"encoder: failed to write number: %v\", perc)\n\t\t}\n\n\t\tif _, err := io.Copy(encoder.w, strings.NewReader(asset[1])); err != nil {\n\t\t\treturn errors.Wrapf(err, \"encoder: failed to write string: %v\", asset[1])\n\t\t}\n\n\t\tfor i := byte(len(asset[1])); i < 7; i++ {\n\t\t\tif err := binary.Write(encoder.w, binary.LittleEndian, byte(0)); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"encoder: failed to write number: %v\", 0)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\treturn errors.New(\"Expecting amount like '99.000 SYMBOL'\")\n}\n\n\/\/EncodePubKey converting PubKey to byte\nfunc (encoder *Encoder) EncodePubKey(s string) error {\n\tpkn1 := strings.Join(strings.Split(s, \"\")[3:], \"\")\n\tb58 := base58.Decode(pkn1)\n\tchs := b58[len(b58)-4:]\n\tpkn2 := b58[:len(b58)-4]\n\tchHash := ripemd160.New()\n\t_, errHash := chHash.Write(pkn2)\n\tif errHash != nil {\n\t\treturn errHash\n\t}\n\tnchs := chHash.Sum(nil)[:4]\n\tif bytes.Equal(chs, nchs) {\n\t\tif string(pkn2) == string(make([]byte, 33)) {\n\t\t\treturn encoder.writeBytes(pkn2)\n\t\t}\n\t\tpkn3, _ := btcec.ParsePubKey(pkn2, btcec.S256())\n\t\tif _, err := encoder.w.Write(pkn3.SerializeCompressed()); err != nil {\n\t\t\treturn errors.Wrapf(err, \"encoder: failed to write bytes: %v\", pkn3.SerializeCompressed())\n\t\t}\n\t\treturn nil\n\t}\n\treturn errors.New(\"Public key is incorrect\")\n}\n<|endoftext|>"} {"text":"<commit_before>package mirror\n\nimport (\n\t\"reflect\"\n)\n\n\/\/ VisitType processes type information.\n\/\/ It is called by Walk.\n\/\/ - typ is:\n\/\/ - a wrapped type (if len(typ.Index) == 0)\n\/\/ - an interface method (type.Index[0] < 0; ignore first value); this is clumsy...\n\/\/ - a field of a struct\n\/\/ - typeIndex is the unique index of this type in the set types enountered during a walk\n\/\/ - depth counts the number of indirections used to get to this type from the root type\n\/\/\n\/\/ - for a map, the key and the element types are visited\n\/\/ - for a function, the input argument types and output argument types are visited\n\/\/ - for an interface, all contained functions are visited\ntype VisitType func(typ *reflect.StructField, typeIndex, depth int) error\n\ntype typeWalker map[reflect.Type]int\n\nfunc (walker typeWalker) index(t reflect.Type) (int, bool) {\n\tidx, known := walker[t]\n\tif !known {\n\t\tidx = len(walker)\n\t\twalker[t] = idx\n\t}\n\treturn idx, known\n}\n\nfunc (walker typeWalker) walk(t *reflect.StructField, visit VisitType) error {\n\tif t == nil || visit == nil {\n\t\treturn nil\n\t}\n\ttype stackNode struct {\n\t\tfield *reflect.StructField\n\t\tcurrentIdx int\n\t\tdepth int\n\t\tknown bool\n\t}\n\tidx, known := walker.index(t.Type)\n\tstack := []stackNode{{t, idx, 0, known}}\n\tvar node stackNode\n\tfor lastIdx := 0; lastIdx >= 0; lastIdx = len(stack) - 1 {\n\t\tstack, node = stack[:lastIdx], stack[lastIdx]\n\t\terr := visit(node.field, node.currentIdx, node.depth)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif node.known {\n\t\t\t\/\/ to prevent endless loops, don't follow known types\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ follow container types\n\t\tdepth := node.depth + 1\n\t\tswitch t := node.field.Type; t.Kind() {\n\t\tcase reflect.Struct:\n\t\t\tfields := t.NumField()\n\t\t\tfor i := fields - 1; i >= 0; i-- {\n\t\t\t\t\/\/ add each field from struct to stack. As this is LIFO, start with the last field.\n\t\t\t\tfield := t.Field(i)\n\t\t\t\ttypeIdx, known := walker.index(field.Type)\n\t\t\t\tstack = append(stack, stackNode{&field, typeIdx, depth, known})\n\t\t\t}\n\t\tcase reflect.Ptr, reflect.Array, reflect.Slice, reflect.Chan, reflect.Map:\n\t\t\t\/\/ add element to stack\n\t\t\tfield := &reflect.StructField{Type: t.Elem()}\n\t\t\ttypeIdx, known := walker.index(field.Type)\n\t\t\tstack = append(stack, stackNode{field, typeIdx, depth, known})\n\t\t\tif t.Kind() == reflect.Map {\n\t\t\t\t\/\/ add key to stack\n\t\t\t\tfield := &reflect.StructField{Type: t.Key()}\n\t\t\t\ttypeIdx, known := walker.index(field.Type)\n\t\t\t\tstack = append(stack, stackNode{field, typeIdx, depth, known})\n\t\t\t}\n\t\tcase reflect.Interface:\n\t\t\tfor i := t.NumMethod() - 1; i >= 0; i-- {\n\t\t\t\tm := t.Method(i)\n\t\t\t\ttypeIdx, known := walker.index(m.Type)\n\t\t\t\tstack = append(stack, stackNode{\n\t\t\t\t\t&reflect.StructField{\n\t\t\t\t\t\tName: m.Name,\n\t\t\t\t\t\tPkgPath: m.PkgPath,\n\t\t\t\t\t\tType: m.Type,\n\t\t\t\t\t\tIndex: []int{-1, m.Index},\n\t\t\t\t\t},\n\t\t\t\t\ttypeIdx,\n\t\t\t\t\tdepth,\n\t\t\t\t\tknown,\n\t\t\t\t})\n\t\t\t}\n\t\tcase reflect.Func:\n\t\t\tfor i := t.NumOut() - 1; i >= 0; i-- {\n\t\t\t\tr := t.Out(i)\n\t\t\t\ttypeIdx, known := walker.index(r)\n\t\t\t\tstack = append(stack, stackNode{\n\t\t\t\t\t&reflect.StructField{Type: r},\n\t\t\t\t\ttypeIdx,\n\t\t\t\t\tdepth,\n\t\t\t\t\tknown,\n\t\t\t\t})\n\t\t\t}\n\t\t\tfor i := t.NumIn() - 1; i >= 0; i-- {\n\t\t\t\ta := t.In(i)\n\t\t\t\ttypeIdx, known := walker.index(a)\n\t\t\t\tstack = append(stack, stackNode{\n\t\t\t\t\t&reflect.StructField{Type: a},\n\t\t\t\t\ttypeIdx,\n\t\t\t\t\tdepth,\n\t\t\t\t\tknown,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Walk will call visit on t and each type in t.\n\/\/ It will follow struct fields, pointers, arrays, slices, maps and chans\n\/\/ and call visit on each element.\n\/\/ Walk will only follow into types at their very first occurence during this walk.\nfunc Walk(t reflect.Type, visit VisitType) error {\n\twalker := make(typeWalker)\n\treturn walker.walk(&reflect.StructField{Type: t}, visit)\n}\n<commit_msg>slightly better variable names<commit_after>package mirror\n\nimport (\n\t\"reflect\"\n)\n\n\/\/ VisitType processes type information.\n\/\/ It is called by Walk.\n\/\/ - typ is:\n\/\/ - a wrapped type (if len(typ.Index) == 0)\n\/\/ - an interface method (type.Index[0] < 0; ignore first value); this is clumsy...\n\/\/ - a field of a struct\n\/\/ - typeIndex is the unique index of this type in the set types enountered during a walk\n\/\/ - depth counts the number of indirections used to get to this type from the root type\n\/\/\n\/\/ - for a map, the key and the element types are visited\n\/\/ - for a function, the input argument types and output argument types are visited\n\/\/ - for an interface, all contained functions are visited\ntype VisitType func(typ *reflect.StructField, typeIndex, depth int) error\n\ntype typeWalker map[reflect.Type]int\n\nfunc (walker typeWalker) index(t reflect.Type) (int, bool) {\n\tidx, known := walker[t]\n\tif !known {\n\t\tidx = len(walker)\n\t\twalker[t] = idx\n\t}\n\treturn idx, known\n}\n\nfunc (walker typeWalker) walk(t *reflect.StructField, visit VisitType) error {\n\tif t == nil || visit == nil {\n\t\treturn nil\n\t}\n\ttype stackNode struct {\n\t\tfield *reflect.StructField\n\t\tcurrentIdx int\n\t\tdepth int\n\t\tknown bool\n\t}\n\tidx, known := walker.index(t.Type)\n\tstack := []stackNode{{t, idx, 0, known}}\n\tvar node stackNode\n\tfor lastIdx := 0; lastIdx >= 0; lastIdx = len(stack) - 1 {\n\t\tstack, node = stack[:lastIdx], stack[lastIdx]\n\t\terr := visit(node.field, node.currentIdx, node.depth)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif node.known {\n\t\t\t\/\/ to prevent endless loops, don't follow known types\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ follow container types\n\t\tdepth := node.depth + 1\n\t\tswitch t := node.field.Type; t.Kind() {\n\t\tcase reflect.Struct:\n\t\t\tfields := t.NumField()\n\t\t\tfor i := fields - 1; i >= 0; i-- {\n\t\t\t\t\/\/ add each field from struct to stack. As this is LIFO, start with the last field.\n\t\t\t\tfield := t.Field(i)\n\t\t\t\ttypeIdx, known := walker.index(field.Type)\n\t\t\t\tstack = append(stack, stackNode{&field, typeIdx, depth, known})\n\t\t\t}\n\t\tcase reflect.Ptr, reflect.Array, reflect.Slice, reflect.Chan, reflect.Map:\n\t\t\t\/\/ add element to stack\n\t\t\tfield := &reflect.StructField{Type: t.Elem()}\n\t\t\ttypeIdx, known := walker.index(field.Type)\n\t\t\tstack = append(stack, stackNode{field, typeIdx, depth, known})\n\t\t\tif t.Kind() == reflect.Map {\n\t\t\t\t\/\/ add key to stack\n\t\t\t\tfield := &reflect.StructField{Type: t.Key()}\n\t\t\t\ttypeIdx, known := walker.index(field.Type)\n\t\t\t\tstack = append(stack, stackNode{field, typeIdx, depth, known})\n\t\t\t}\n\t\tcase reflect.Interface:\n\t\t\tfor i := t.NumMethod() - 1; i >= 0; i-- {\n\t\t\t\tm := t.Method(i)\n\t\t\t\ttypeIdx, known := walker.index(m.Type)\n\t\t\t\tstack = append(stack, stackNode{\n\t\t\t\t\t&reflect.StructField{\n\t\t\t\t\t\tName: m.Name,\n\t\t\t\t\t\tPkgPath: m.PkgPath,\n\t\t\t\t\t\tType: m.Type,\n\t\t\t\t\t\tIndex: []int{-1, m.Index},\n\t\t\t\t\t},\n\t\t\t\t\ttypeIdx,\n\t\t\t\t\tdepth,\n\t\t\t\t\tknown,\n\t\t\t\t})\n\t\t\t}\n\t\tcase reflect.Func:\n\t\t\tfor i := t.NumOut() - 1; i >= 0; i-- {\n\t\t\t\tret := t.Out(i)\n\t\t\t\ttypeIdx, known := walker.index(ret)\n\t\t\t\tstack = append(stack, stackNode{\n\t\t\t\t\t&reflect.StructField{Type: ret},\n\t\t\t\t\ttypeIdx,\n\t\t\t\t\tdepth,\n\t\t\t\t\tknown,\n\t\t\t\t})\n\t\t\t}\n\t\t\tfor i := t.NumIn() - 1; i >= 0; i-- {\n\t\t\t\targ := t.In(i)\n\t\t\t\ttypeIdx, known := walker.index(arg)\n\t\t\t\tstack = append(stack, stackNode{\n\t\t\t\t\t&reflect.StructField{Type: arg},\n\t\t\t\t\ttypeIdx,\n\t\t\t\t\tdepth,\n\t\t\t\t\tknown,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Walk will call visit on t and each type in t.\n\/\/ It will follow struct fields, pointers, arrays, slices, maps and chans\n\/\/ and call visit on each element.\n\/\/ Walk will only follow into types at their very first occurence during this walk.\nfunc Walk(t reflect.Type, visit VisitType) error {\n\twalker := make(typeWalker)\n\treturn walker.walk(&reflect.StructField{Type: t}, visit)\n}\n<|endoftext|>"} {"text":"<commit_before>package cloudinit\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"launchpad.net\/goyaml\"\n\t\"launchpad.net\/juju-core\/cloudinit\"\n\t\"launchpad.net\/juju-core\/environs\"\n\t\"launchpad.net\/juju-core\/environs\/config\"\n\t\"launchpad.net\/juju-core\/log\"\n\t\"launchpad.net\/juju-core\/state\"\n\t\"launchpad.net\/juju-core\/upstart\"\n\t\"path\"\n\t\"strings\"\n)\n\n\/\/ TODO(dfc) duplicated from environs\/ec2\n\nconst zkPort = 37017\n\nvar zkPortSuffix = fmt.Sprintf(\":%d\", zkPort)\n\n\/\/ MachineConfig represents initialization information for a new juju machine.\n\/\/ Creation of cloudinit data from this struct is largely provider-independent,\n\/\/ but we'll keep it internal until we need to factor it out.\ntype MachineConfig struct {\n\t\/\/ Provisioner specifies whether the new machine will run a provisioning agent.\n\tProvisioner bool\n\n\t\/\/ StateServer specifies whether the new machine will run a ZooKeeper \n\t\/\/ or MongoDB instance.\n\tStateServer bool\n\n\t\/\/ InstanceIdAccessor holds bash code that evaluates to the current instance id.\n\tInstanceIdAccessor string\n\n\t\/\/ ProviderType identifies the provider type so the host\n\t\/\/ knows which kind of provider to use.\n\tProviderType string\n\n\t\/\/ StateInfo holds the means for the new instance to communicate with the\n\t\/\/ juju state. Unless the new machine is running a state server (StateServer is\n\t\/\/ set), there must be at least one state server address supplied.\n\tStateInfo *state.Info\n\n\t\/\/ Tools is juju tools to be used on the new machine.\n\tTools *state.Tools\n\n\t\/\/ DataDir holds the directory that juju state will be put in the new\n\t\/\/ machine.\n\tDataDir string\n\n\t\/\/ MachineId identifies the new machine. It must be non-negative.\n\tMachineId int\n\n\t\/\/ AuthorizedKeys specifies the keys that are allowed to\n\t\/\/ connect to the machine (see cloudinit.SSHAddAuthorizedKeys)\n\t\/\/ If no keys are supplied, there can be no ssh access to the node.\n\t\/\/ On a bootstrap machine, that is fatal. On other\n\t\/\/ machines it will mean that the ssh, scp and debug-hooks\n\t\/\/ commands cannot work.\n\tAuthorizedKeys string\n\n\t\/\/ Config holds the initial environment configuration.\n\tConfig *config.Config\n}\n\ntype requiresError string\n\nfunc (e requiresError) Error() string {\n\treturn \"invalid machine configuration: missing \" + string(e)\n}\n\nfunc addScripts(c *cloudinit.Config, scripts ...string) {\n\tfor _, s := range scripts {\n\t\tc.AddRunCmd(s)\n\t}\n}\n\nfunc base64yaml(m *config.Config) string {\n\tdata, err := goyaml.Marshal(m.AllAttrs())\n\tif err != nil {\n\t\t\/\/ can't happen, these values have been validated a number of times\n\t\tpanic(err)\n\t}\n\treturn base64.StdEncoding.EncodeToString(data)\n}\n\nfunc New(cfg *MachineConfig) (*cloudinit.Config, error) {\n\tif err := verifyConfig(cfg); err != nil {\n\t\treturn nil, err\n\t}\n\tc := cloudinit.New()\n\n\tc.AddSSHAuthorizedKeys(cfg.AuthorizedKeys)\n\tc.AddPackage(\"git\")\n\tif cfg.StateServer {\n\t}\n\n\taddScripts(c,\n\t\tfmt.Sprintf(\"sudo mkdir -p %s\", cfg.DataDir),\n\t\t\"sudo mkdir -p \/var\/log\/juju\")\n\n\t\/\/ Make a directory for the tools to live in, then fetch the\n\t\/\/ tools and unarchive them into it.\n\taddScripts(c,\n\t\t\"bin=\"+shquote(cfg.jujuTools()),\n\t\t\"mkdir -p $bin\",\n\t\tfmt.Sprintf(\"wget -O - %s | tar xz -C $bin\", shquote(cfg.Tools.URL)),\n\t\tfmt.Sprintf(\"echo -n %s > $bin\/downloaded-url.txt\", shquote(cfg.Tools.URL)),\n\t)\n\n\tdebugFlag := \"\"\n\t\/\/ TODO: disable debug mode by default when the system is stable.\n\tif true || log.Debug {\n\t\tdebugFlag = \" --debug\"\n\t}\n\n\tif cfg.StateServer {\n\t\t\/\/ TODO The public bucket must come from the environment configuration.\n\t\tb := cfg.Tools.Binary\n\t\turl := fmt.Sprintf(\"http:\/\/juju-dist.s3.amazonaws.com\/tools\/mongo-2.2.0-%s-%s.tgz\", b.Series, b.Arch)\n\t\taddScripts(c,\n\t\t\t\"mkdir -p \/opt\",\n\t\t\tfmt.Sprintf(\"wget -O - %s | tar xz -C \/opt\", shquote(url)),\n\t\t\tcfg.jujuTools()+\"\/jujud bootstrap-state\"+\n\t\t\t\t\" --instance-id \"+cfg.InstanceIdAccessor+\n\t\t\t\t\" --env-config \"+shquote(base64yaml(cfg.Config))+\n\t\t\t\t\" --state-servers localhost\"+zkPortSuffix+\n\t\t\t\tdebugFlag,\n\t\t)\n\t\tif err := addMongoToBoot(c); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\taddScripts(c,\n\t\t\tcfg.jujuTools()+\"\/jujud bootstrap-state\"+\n\t\t\t\t\" --instance-id \"+cfg.InstanceIdAccessor+\n\t\t\t\t\" --env-config \"+shquote(base64yaml(cfg.Config))+\n\t\t\t\t\" --state-servers localhost\"+zkPortSuffix+\n\t\t\t\tdebugFlag,\n\t\t)\n\t}\n\n\tif err := addAgentToBoot(c, cfg, \"machine\", fmt.Sprintf(\"--machine-id %d \"+debugFlag, cfg.MachineId)); err != nil {\n\t\treturn nil, err\n\t}\n\tif cfg.Provisioner {\n\t\tif err := addAgentToBoot(c, cfg, \"provisioning\", debugFlag); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ general options\n\tc.SetAptUpgrade(true)\n\tc.SetAptUpdate(true)\n\tc.SetOutput(cloudinit.OutAll, \"| tee -a \/var\/log\/cloud-init-output.log\", \"\")\n\treturn c, nil\n}\n\nfunc addAgentToBoot(c *cloudinit.Config, cfg *MachineConfig, name, args string) error {\n\t\/\/ Make the agent run via a symbolic link to the actual tools\n\t\/\/ directory, so it can upgrade itself without needing to change\n\t\/\/ the upstart script.\n\ttoolsDir := environs.AgentToolsDir(cfg.DataDir, name)\n\t\/\/ TODO(dfc) ln -nfs, so it doesn't fail if for some reason that the target already exists\n\taddScripts(c, fmt.Sprintf(\"ln -s %v %s\", cfg.Tools.Binary, toolsDir))\n\tsvc := upstart.NewService(\"jujud-\" + name)\n\tcmd := fmt.Sprintf(\n\t\t\"%s\/jujud %s\"+\n\t\t\t\" --state-servers '%s'\"+\n\t\t\t\" --log-file \/var\/log\/juju\/%s-agent.log\"+\n\t\t\t\" --data-dir '%s'\"+\n\t\t\t\" %s\",\n\t\ttoolsDir, name,\n\t\tcfg.zookeeperHostAddrs(),\n\t\tname,\n\t\tcfg.DataDir,\n\t\targs,\n\t)\n\tconf := &upstart.Conf{\n\t\tService: *svc,\n\t\tDesc: fmt.Sprintf(\"juju %s agent\", name),\n\t\tCmd: cmd,\n\t}\n\tcmds, err := conf.InstallCommands()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot make cloud-init upstart script for the %s agent: %v\", name, err)\n\t}\n\taddScripts(c, cmds...)\n\treturn nil\n}\n\nfunc addMongoToBoot(c *cloudinit.Config) error {\n\taddScripts(c, fmt.Sprintf(\"mkdir -p \/var\/lib\/juju\/db\"))\n\tsvc := upstart.NewService(\"juju-db\")\n\tconf := &upstart.Conf{\n\t\tService: *svc,\n\t\tDesc: \"juju state database\",\n\t\tCmd: \"\/opt\/mongo\/bin\/mongod --port 37017 --bind_ip 0.0.0.0 --dbpath=\/var\/lib\/juju\/db\",\n\t}\n\tcmds, err := conf.InstallCommands()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot make cloud-init upstart script for the state database: %v\", err)\n\t}\n\taddScripts(c, cmds...)\n\treturn nil\n}\n\n\/\/ versionDir converts a tools URL into a name\n\/\/ to use as a directory for storing the tools executables in\n\/\/ by using the last element stripped of its extension.\nfunc versionDir(toolsURL string) string {\n\tname := path.Base(toolsURL)\n\text := path.Ext(name)\n\treturn name[:len(name)-len(ext)]\n}\n\nfunc (cfg *MachineConfig) jujuTools() string {\n\treturn environs.ToolsDir(cfg.DataDir, cfg.Tools.Binary)\n}\n\nfunc (cfg *MachineConfig) zookeeperHostAddrs() string {\n\tvar hosts []string\n\tif cfg.StateServer {\n\t\thosts = append(hosts, \"localhost\"+zkPortSuffix)\n\t}\n\tif cfg.StateInfo != nil {\n\t\thosts = append(hosts, cfg.StateInfo.Addrs...)\n\t}\n\treturn strings.Join(hosts, \",\")\n}\n\n\/\/ shquote quotes s so that when read by bash, no metacharacters\n\/\/ within s will be interpreted as such.\nfunc shquote(s string) string {\n\t\/\/ single-quote becomes single-quote, double-quote, single-quote, double-quote, single-quote\n\treturn `'` + strings.Replace(s, `'`, `'\"'\"'`, -1) + `'`\n}\n\nfunc verifyConfig(cfg *MachineConfig) error {\n\tif cfg.MachineId < 0 {\n\t\treturn fmt.Errorf(\"invalid machine configuration: negative machine id\")\n\t}\n\tif cfg.ProviderType == \"\" {\n\t\treturn requiresError(\"provider type\")\n\t}\n\tif cfg.DataDir == \"\" {\n\t\treturn requiresError(\"var directory\")\n\t}\n\tif cfg.Tools == nil {\n\t\treturn requiresError(\"tools\")\n\t}\n\tif cfg.Tools.URL == \"\" {\n\t\treturn requiresError(\"tools URL\")\n\t}\n\tif cfg.StateServer {\n\t\tif cfg.InstanceIdAccessor == \"\" {\n\t\t\treturn requiresError(\"instance id accessor\")\n\t\t}\n\t\tif cfg.Config == nil {\n\t\t\treturn requiresError(\"environment configuration\")\n\t\t}\n\t} else {\n\t\tif cfg.StateInfo == nil || len(cfg.StateInfo.Addrs) == 0 {\n\t\t\treturn requiresError(\"zookeeper hosts\")\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Removed empty block.<commit_after>package cloudinit\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"launchpad.net\/goyaml\"\n\t\"launchpad.net\/juju-core\/cloudinit\"\n\t\"launchpad.net\/juju-core\/environs\"\n\t\"launchpad.net\/juju-core\/environs\/config\"\n\t\"launchpad.net\/juju-core\/log\"\n\t\"launchpad.net\/juju-core\/state\"\n\t\"launchpad.net\/juju-core\/upstart\"\n\t\"path\"\n\t\"strings\"\n)\n\n\/\/ TODO(dfc) duplicated from environs\/ec2\n\nconst zkPort = 37017\n\nvar zkPortSuffix = fmt.Sprintf(\":%d\", zkPort)\n\n\/\/ MachineConfig represents initialization information for a new juju machine.\n\/\/ Creation of cloudinit data from this struct is largely provider-independent,\n\/\/ but we'll keep it internal until we need to factor it out.\ntype MachineConfig struct {\n\t\/\/ Provisioner specifies whether the new machine will run a provisioning agent.\n\tProvisioner bool\n\n\t\/\/ StateServer specifies whether the new machine will run a ZooKeeper \n\t\/\/ or MongoDB instance.\n\tStateServer bool\n\n\t\/\/ InstanceIdAccessor holds bash code that evaluates to the current instance id.\n\tInstanceIdAccessor string\n\n\t\/\/ ProviderType identifies the provider type so the host\n\t\/\/ knows which kind of provider to use.\n\tProviderType string\n\n\t\/\/ StateInfo holds the means for the new instance to communicate with the\n\t\/\/ juju state. Unless the new machine is running a state server (StateServer is\n\t\/\/ set), there must be at least one state server address supplied.\n\tStateInfo *state.Info\n\n\t\/\/ Tools is juju tools to be used on the new machine.\n\tTools *state.Tools\n\n\t\/\/ DataDir holds the directory that juju state will be put in the new\n\t\/\/ machine.\n\tDataDir string\n\n\t\/\/ MachineId identifies the new machine. It must be non-negative.\n\tMachineId int\n\n\t\/\/ AuthorizedKeys specifies the keys that are allowed to\n\t\/\/ connect to the machine (see cloudinit.SSHAddAuthorizedKeys)\n\t\/\/ If no keys are supplied, there can be no ssh access to the node.\n\t\/\/ On a bootstrap machine, that is fatal. On other\n\t\/\/ machines it will mean that the ssh, scp and debug-hooks\n\t\/\/ commands cannot work.\n\tAuthorizedKeys string\n\n\t\/\/ Config holds the initial environment configuration.\n\tConfig *config.Config\n}\n\ntype requiresError string\n\nfunc (e requiresError) Error() string {\n\treturn \"invalid machine configuration: missing \" + string(e)\n}\n\nfunc addScripts(c *cloudinit.Config, scripts ...string) {\n\tfor _, s := range scripts {\n\t\tc.AddRunCmd(s)\n\t}\n}\n\nfunc base64yaml(m *config.Config) string {\n\tdata, err := goyaml.Marshal(m.AllAttrs())\n\tif err != nil {\n\t\t\/\/ can't happen, these values have been validated a number of times\n\t\tpanic(err)\n\t}\n\treturn base64.StdEncoding.EncodeToString(data)\n}\n\nfunc New(cfg *MachineConfig) (*cloudinit.Config, error) {\n\tif err := verifyConfig(cfg); err != nil {\n\t\treturn nil, err\n\t}\n\tc := cloudinit.New()\n\n\tc.AddSSHAuthorizedKeys(cfg.AuthorizedKeys)\n\tc.AddPackage(\"git\")\n\n\taddScripts(c,\n\t\tfmt.Sprintf(\"sudo mkdir -p %s\", cfg.DataDir),\n\t\t\"sudo mkdir -p \/var\/log\/juju\")\n\n\t\/\/ Make a directory for the tools to live in, then fetch the\n\t\/\/ tools and unarchive them into it.\n\taddScripts(c,\n\t\t\"bin=\"+shquote(cfg.jujuTools()),\n\t\t\"mkdir -p $bin\",\n\t\tfmt.Sprintf(\"wget -O - %s | tar xz -C $bin\", shquote(cfg.Tools.URL)),\n\t\tfmt.Sprintf(\"echo -n %s > $bin\/downloaded-url.txt\", shquote(cfg.Tools.URL)),\n\t)\n\n\tdebugFlag := \"\"\n\t\/\/ TODO: disable debug mode by default when the system is stable.\n\tif true || log.Debug {\n\t\tdebugFlag = \" --debug\"\n\t}\n\n\tif cfg.StateServer {\n\t\t\/\/ TODO The public bucket must come from the environment configuration.\n\t\tb := cfg.Tools.Binary\n\t\turl := fmt.Sprintf(\"http:\/\/juju-dist.s3.amazonaws.com\/tools\/mongo-2.2.0-%s-%s.tgz\", b.Series, b.Arch)\n\t\taddScripts(c,\n\t\t\t\"mkdir -p \/opt\",\n\t\t\tfmt.Sprintf(\"wget -O - %s | tar xz -C \/opt\", shquote(url)),\n\t\t\tcfg.jujuTools()+\"\/jujud bootstrap-state\"+\n\t\t\t\t\" --instance-id \"+cfg.InstanceIdAccessor+\n\t\t\t\t\" --env-config \"+shquote(base64yaml(cfg.Config))+\n\t\t\t\t\" --state-servers localhost\"+zkPortSuffix+\n\t\t\t\tdebugFlag,\n\t\t)\n\t\tif err := addMongoToBoot(c); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\taddScripts(c,\n\t\t\tcfg.jujuTools()+\"\/jujud bootstrap-state\"+\n\t\t\t\t\" --instance-id \"+cfg.InstanceIdAccessor+\n\t\t\t\t\" --env-config \"+shquote(base64yaml(cfg.Config))+\n\t\t\t\t\" --state-servers localhost\"+zkPortSuffix+\n\t\t\t\tdebugFlag,\n\t\t)\n\t}\n\n\tif err := addAgentToBoot(c, cfg, \"machine\", fmt.Sprintf(\"--machine-id %d \"+debugFlag, cfg.MachineId)); err != nil {\n\t\treturn nil, err\n\t}\n\tif cfg.Provisioner {\n\t\tif err := addAgentToBoot(c, cfg, \"provisioning\", debugFlag); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ general options\n\tc.SetAptUpgrade(true)\n\tc.SetAptUpdate(true)\n\tc.SetOutput(cloudinit.OutAll, \"| tee -a \/var\/log\/cloud-init-output.log\", \"\")\n\treturn c, nil\n}\n\nfunc addAgentToBoot(c *cloudinit.Config, cfg *MachineConfig, name, args string) error {\n\t\/\/ Make the agent run via a symbolic link to the actual tools\n\t\/\/ directory, so it can upgrade itself without needing to change\n\t\/\/ the upstart script.\n\ttoolsDir := environs.AgentToolsDir(cfg.DataDir, name)\n\t\/\/ TODO(dfc) ln -nfs, so it doesn't fail if for some reason that the target already exists\n\taddScripts(c, fmt.Sprintf(\"ln -s %v %s\", cfg.Tools.Binary, toolsDir))\n\tsvc := upstart.NewService(\"jujud-\" + name)\n\tcmd := fmt.Sprintf(\n\t\t\"%s\/jujud %s\"+\n\t\t\t\" --state-servers '%s'\"+\n\t\t\t\" --log-file \/var\/log\/juju\/%s-agent.log\"+\n\t\t\t\" --data-dir '%s'\"+\n\t\t\t\" %s\",\n\t\ttoolsDir, name,\n\t\tcfg.zookeeperHostAddrs(),\n\t\tname,\n\t\tcfg.DataDir,\n\t\targs,\n\t)\n\tconf := &upstart.Conf{\n\t\tService: *svc,\n\t\tDesc: fmt.Sprintf(\"juju %s agent\", name),\n\t\tCmd: cmd,\n\t}\n\tcmds, err := conf.InstallCommands()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot make cloud-init upstart script for the %s agent: %v\", name, err)\n\t}\n\taddScripts(c, cmds...)\n\treturn nil\n}\n\nfunc addMongoToBoot(c *cloudinit.Config) error {\n\taddScripts(c, fmt.Sprintf(\"mkdir -p \/var\/lib\/juju\/db\"))\n\tsvc := upstart.NewService(\"juju-db\")\n\tconf := &upstart.Conf{\n\t\tService: *svc,\n\t\tDesc: \"juju state database\",\n\t\tCmd: \"\/opt\/mongo\/bin\/mongod --port 37017 --bind_ip 0.0.0.0 --dbpath=\/var\/lib\/juju\/db\",\n\t}\n\tcmds, err := conf.InstallCommands()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot make cloud-init upstart script for the state database: %v\", err)\n\t}\n\taddScripts(c, cmds...)\n\treturn nil\n}\n\n\/\/ versionDir converts a tools URL into a name\n\/\/ to use as a directory for storing the tools executables in\n\/\/ by using the last element stripped of its extension.\nfunc versionDir(toolsURL string) string {\n\tname := path.Base(toolsURL)\n\text := path.Ext(name)\n\treturn name[:len(name)-len(ext)]\n}\n\nfunc (cfg *MachineConfig) jujuTools() string {\n\treturn environs.ToolsDir(cfg.DataDir, cfg.Tools.Binary)\n}\n\nfunc (cfg *MachineConfig) zookeeperHostAddrs() string {\n\tvar hosts []string\n\tif cfg.StateServer {\n\t\thosts = append(hosts, \"localhost\"+zkPortSuffix)\n\t}\n\tif cfg.StateInfo != nil {\n\t\thosts = append(hosts, cfg.StateInfo.Addrs...)\n\t}\n\treturn strings.Join(hosts, \",\")\n}\n\n\/\/ shquote quotes s so that when read by bash, no metacharacters\n\/\/ within s will be interpreted as such.\nfunc shquote(s string) string {\n\t\/\/ single-quote becomes single-quote, double-quote, single-quote, double-quote, single-quote\n\treturn `'` + strings.Replace(s, `'`, `'\"'\"'`, -1) + `'`\n}\n\nfunc verifyConfig(cfg *MachineConfig) error {\n\tif cfg.MachineId < 0 {\n\t\treturn fmt.Errorf(\"invalid machine configuration: negative machine id\")\n\t}\n\tif cfg.ProviderType == \"\" {\n\t\treturn requiresError(\"provider type\")\n\t}\n\tif cfg.DataDir == \"\" {\n\t\treturn requiresError(\"var directory\")\n\t}\n\tif cfg.Tools == nil {\n\t\treturn requiresError(\"tools\")\n\t}\n\tif cfg.Tools.URL == \"\" {\n\t\treturn requiresError(\"tools URL\")\n\t}\n\tif cfg.StateServer {\n\t\tif cfg.InstanceIdAccessor == \"\" {\n\t\t\treturn requiresError(\"instance id accessor\")\n\t\t}\n\t\tif cfg.Config == nil {\n\t\t\treturn requiresError(\"environment configuration\")\n\t\t}\n\t} else {\n\t\tif cfg.StateInfo == nil || len(cfg.StateInfo.Addrs) == 0 {\n\t\t\treturn requiresError(\"zookeeper hosts\")\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package wini\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\/\/\"fmt\"\n\t\"log\"\n)\n\n\/\/ Suppress error if they are not otherwise used.\nvar _ = log.Printf\n\ntype Kvmap map[string]string\ntype SectionMap map[string]Kvmap\n\ntype INI struct {\n\tsections SectionMap\n\tlinesep string\n\tkvsep string\n}\n\nfunc New() *INI {\n\tini := &INI{\n\t\tsections: make(SectionMap),\n\t\tlinesep: \"\\n\",\n\t\tkvsep: \"=\",\n\t}\n\treturn ini\n}\n\n\/\/ ParseFile reads the INI file named by filename and parse the contents to store the data in the INI\n\/\/ A successful call returns err == nil\nfunc (ini *INI) ParseFile(filename string) error {\n\tcontents, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ini.parseINI(contents, \"\\n\", \"=\")\n}\n\n\/\/ Parse parse the data to store the data in the INI\n\/\/ A successful call returns err == nil\nfunc (ini *INI) Parse(data []byte, linesep, kvsep string) error {\n\treturn ini.parseINI(data, linesep, kvsep)\n}\n\n\/\/ Get looks up a value for a key in the default section\n\/\/ and returns that value, along with a boolean result similar to a map lookup.\nfunc (i INI) Get(key string) (value string, ok bool) {\n\treturn i.SectionGet(\"\", key)\n}\n\n\/\/ Get looks up a value for a key in a section\n\/\/ and returns that value, along with a boolean result similar to a map lookup.\nfunc (i INI) SectionGet(section, key string) (value string, ok bool) {\n\tif s := i.sections[section]; s != nil {\n\t\tvalue, ok = (s)[key]\n\t}\n\treturn\n}\n\nfunc (i INI) GetKvmap(section string) (kvmap Kvmap, ok bool) {\n\tkvmap, ok = i.sections[section]\n\treturn kvmap, ok\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (ini *INI) parseINI(data []byte, linesep, kvsep string) error {\n\tini.linesep = linesep\n\tini.kvsep = kvsep\n\n\t\/\/ Insert the default section\n\tvar section string\n\tkvmap := make(Kvmap)\n\tini.sections[section] = kvmap\n\n\tlines := bytes.Split(data, []byte(linesep))\n\tfor _, line := range lines {\n\t\tline = bytes.TrimSpace(line)\n\t\tsize := len(line)\n\t\tif size == 0 {\n\t\t\t\/\/ Skip blank lines\n\t\t\tcontinue\n\t\t}\n\t\tif line[0] == ';' || line[0] == '#' {\n\t\t\t\/\/ Skip comments\n\t\t\tcontinue\n\t\t}\n\t\tif line[0] == '[' && line[size-1] == ']' {\n\t\t\t\/\/ Try to parse INI-Section\n\t\t\tsection = string(line[1 : size-1])\n\t\t\t\/\/log.Printf(\"Got a sction [%v]\\n\", section)\n\t\t\tkvmap = make(Kvmap)\n\t\t\tini.sections[section] = kvmap\n\t\t\tcontinue\n\t\t}\n\n\t\tpos := bytes.Index(line, []byte(kvsep))\n\t\tif pos < 0 {\n\t\t\t\/\/ ERROR happened when passing\n\t\t\terr := errors.New(\"Came accross an error : \" + string(line) + \" is NOT a valid key\/value pair\")\n\t\t\treturn err\n\t\t}\n\n\t\tk := bytes.TrimSpace(line[0:pos])\n\t\tv := bytes.TrimSpace(line[pos+len(kvsep):])\n\t\tkvmap[string(k)] = string(v)\n\t\t\/\/log.Printf(\"Got a key\/value pair [%v\/%v] for section [%v]\\n\", string(k), string(v), section)\n\t}\n\treturn nil\n}\n<commit_msg>code format. extract some const the variables<commit_after>package wini\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"log\"\n)\n\n\/\/ Suppress error if they are not otherwise used.\nvar _ = log.Printf\n\ntype Kvmap map[string]string\ntype SectionMap map[string]Kvmap\n\nconst (\n\tDefaultSection = \"\"\n\tDefaultLineSeperator = \"\\n\"\n\tDefaultKeyValueSeperator = \"=\"\n)\n\ntype INI struct {\n\tsections SectionMap\n\tlinesep string\n\tkvsep string\n}\n\nfunc New() *INI {\n\tini := &INI{\n\t\tsections: make(SectionMap),\n\t\tlinesep: DefaultLineSeperator,\n\t\tkvsep: DefaultKeyValueSeperator,\n\t}\n\treturn ini\n}\n\n\/\/ ParseFile reads the INI file named by filename and parse the contents to store the data in the INI\n\/\/ A successful call returns err == nil\nfunc (ini *INI) ParseFile(filename string) error {\n\tcontents, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ini.parseINI(contents, DefaultLineSeperator, DefaultKeyValueSeperator)\n}\n\n\/\/ Parse parse the data to store the data in the INI\n\/\/ A successful call returns err == nil\nfunc (ini *INI) Parse(data []byte, linesep, kvsep string) error {\n\treturn ini.parseINI(data, linesep, kvsep)\n}\n\n\/\/ Get looks up a value for a key in the default section\n\/\/ and returns that value, along with a boolean result similar to a map lookup.\nfunc (ini *INI) Get(key string) (value string, ok bool) {\n\n\treturn ini.SectionGet(DefaultSection, key)\n}\n\n\/\/ Get looks up a value for a key in a section\n\/\/ and returns that value, along with a boolean result similar to a map lookup.\nfunc (ini *INI) SectionGet(section, key string) (value string, ok bool) {\n\tif s := ini.sections[section]; s != nil {\n\t\tvalue, ok = s[key]\n\t}\n\treturn\n}\n\nfunc (ini *INI) GetKvmap(section string) (kvmap Kvmap, ok bool) {\n\tkvmap, ok = ini.sections[section]\n\treturn kvmap, ok\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (ini *INI) parseINI(data []byte, linesep, kvsep string) error {\n\tini.linesep = linesep\n\tini.kvsep = kvsep\n\n\t\/\/ Insert the default section\n\tvar section string\n\tkvmap := make(Kvmap)\n\tini.sections[section] = kvmap\n\n\tlines := bytes.Split(data, []byte(linesep))\n\tfor _, line := range lines {\n\t\tline = bytes.TrimSpace(line)\n\t\tsize := len(line)\n\t\tif size == 0 {\n\t\t\t\/\/ Skip blank lines\n\t\t\tcontinue\n\t\t}\n\t\tif line[0] == ';' || line[0] == '#' {\n\t\t\t\/\/ Skip comments\n\t\t\tcontinue\n\t\t}\n\t\tif line[0] == '[' && line[size-1] == ']' {\n\t\t\t\/\/ Parse INI-Section\n\t\t\tsection = string(line[1 : size-1])\n\t\t\tkvmap = make(Kvmap)\n\t\t\tini.sections[section] = kvmap\n\t\t\tcontinue\n\t\t}\n\n\t\tpos := bytes.Index(line, []byte(kvsep))\n\t\tif pos < 0 {\n\t\t\t\/\/ ERROR happened when passing\n\t\t\terr := errors.New(\"Came accross an error : \" + string(line) + \" is NOT a valid key\/value pair\")\n\t\t\treturn err\n\t\t}\n\n\t\tk := bytes.TrimSpace(line[0:pos])\n\t\tv := bytes.TrimSpace(line[pos+len(kvsep):])\n\t\tkvmap[string(k)] = string(v)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package linenoise\n\n\/\/ -windows\n\n\/\/ #include <stdlib.h>\n\/\/ #include \"linenoise.h\"\n\/\/ #include \"linenoiseCompletionCallbackHook.h\"\nimport \"C\"\n\nimport (\n\t\"errors\"\n\t\"unsafe\"\n)\n\n\/\/ KillSignalError is returned returned by Line() when a user quits from prompt.\n\/\/ This occurs when the user enters ctrl+C or ctrl+D.\nvar KillSignalError = errors.New(\"prompt was quited with a killsignal\")\n\nfunc init() {\n\tC.linenoiseSetupCompletionCallbackHook()\n}\n\n\/\/ Line displays given string and returns line from user input.\nfunc Line(prompt string) (string, error) { \/\/ char *linenoise(const char *prompt);\n\tpromptCString := C.CString(prompt)\n\tresultCString := C.linenoise(promptCString)\n\tC.free(unsafe.Pointer(promptCString))\n\tdefer C.free(unsafe.Pointer(resultCString))\n\n\tif resultCString == nil {\n\t\treturn \"\", KillSignalError\n\t}\n\n\tresult := C.GoString(resultCString)\n\n\treturn result, nil\n}\n\n\/\/ AddHistory adds a line to history. Returns non-nil error on fail.\nfunc AddHistory(line string) error { \/\/ int linenoiseHistoryAdd(const char *line);\n\tlineCString := C.CString(line)\n\tres := C.linenoiseHistoryAdd(lineCString)\n\tC.free(unsafe.Pointer(lineCString))\n\tif res != 1 {\n\t\treturn errors.New(\"Could not add line to history.\")\n\t}\n\treturn nil\n}\n\n\/\/ SetHistoryCapacity changes the maximum length of history. Returns non-nil error on fail.\nfunc SetHistoryCapacity(capacity int) error { \/\/ int linenoiseHistorySetMaxLen(int len);\n\tres := C.linenoiseHistorySetMaxLen(C.int(capacity))\n\tif res != 1 {\n\t\treturn errors.New(\"Could not set history max len.\")\n\t}\n\treturn nil\n}\n\n\/\/ SaveHistory saves from file with given filename. Returns non-nil error on fail.\nfunc SaveHistory(filename string) error { \/\/ int linenoiseHistorySave(char *filename);\n\tfilenameCString := C.CString(filename)\n\tres := C.linenoiseHistorySave(filenameCString)\n\tC.free(unsafe.Pointer(filenameCString))\n\tif res != 0 {\n\t\treturn errors.New(\"Could not save history to file.\")\n\t}\n\treturn nil\n}\n\n\/\/ LoadHistory loads from file with given filename. Returns non-nil error on fail.\nfunc LoadHistory(filename string) error { \/\/ int linenoiseHistoryLoad(char *filename);\n\tfilenameCString := C.CString(filename)\n\tres := C.linenoiseHistoryLoad(filenameCString)\n\tC.free(unsafe.Pointer(filenameCString))\n\tif res != 0 {\n\t\treturn errors.New(\"Could not load history from file.\")\n\t}\n\treturn nil\n}\n\n\/\/ Clear clears the screen.\nfunc Clear() { \/\/ void linenoiseClearScreen(void);\n\tC.linenoiseClearScreen()\n}\n\n\/\/ SetMultiline sets linenoise to multiline or single line.\n\/\/ In multiline mode the user input will be wrapped to a new line when the length exceeds the amount of available rows in the terminal.\nfunc SetMultiline(ml bool) { \/\/ void linenoiseSetMultiLine(int ml);\n\tif ml {\n\t\tC.linenoiseSetMultiLine(1)\n\t} else {\n\t\tC.linenoiseSetMultiLine(0)\n\t}\n}\n\n\/\/ CompletionHandler provides possible completions for given input\ntype CompletionHandler func(input string) []string\n\n\/\/ DefaultCompletionHandler simply returns an empty slice.\nvar DefaultCompletionHandler = func(input string) []string {\n\treturn make([]string, 0)\n}\n\nvar complHandler = DefaultCompletionHandler\n\n\/\/ SetCompletionHandler sets the CompletionHandler to be used for completion\nfunc SetCompletionHandler(c CompletionHandler) {\n\tcomplHandler = c\n}\n\n\/\/ typedef struct linenoiseCompletions {\n\/\/ size_t len;\n\/\/ char **cvec;\n\/\/ } linenoiseCompletions;\n\/\/ typedef void(linenoiseCompletionCallback)(const char *, linenoiseCompletions *);\n\/\/ void linenoiseSetCompletionCallback(linenoiseCompletionCallback *);\n\/\/ void linenoiseAddCompletion(linenoiseCompletions *, char *);\n\n\/\/export linenoiseGoCompletionCallbackHook\nfunc linenoiseGoCompletionCallbackHook(input *C.char, completions *C.linenoiseCompletions) {\n\tcompletionsSlice := complHandler(C.GoString(input))\n\n\tcompletionsLen := len(completionsSlice)\n\tcompletions.len = C.size_t(completionsLen)\n\n\tif completionsLen > 0 {\n\t\tcvec := C.malloc(C.size_t(int(unsafe.Sizeof(*(**C.char)(nil))) * completionsLen))\n\t\tcvecSlice := (*(*[999999]*C.char)(cvec))[:completionsLen]\n\n\t\tfor i, str := range completionsSlice {\n\t\t\tcvecSlice[i] = C.CString(str)\n\t\t}\n\t\tcompletions.cvec = (**C.char)(cvec)\n\t}\n}\n\n\/\/ void linenoisePrintKeyCodes(void);\n\n\/\/ PrintKeyCodes puts linenoise in key codes debugging mode.\n\/\/ Press keys to see scan codes. Type 'quit' at any time to exit.\n\/\/ PrintKeyCodes blocks until user enters 'quit'.\nfunc PrintKeyCodes() {\n\tC.linenoisePrintKeyCodes()\n}\n<commit_msg>Move C header comment<commit_after>package linenoise\n\n\/\/ -windows\n\n\/\/ #include <stdlib.h>\n\/\/ #include \"linenoise.h\"\n\/\/ #include \"linenoiseCompletionCallbackHook.h\"\nimport \"C\"\n\nimport (\n\t\"errors\"\n\t\"unsafe\"\n)\n\n\/\/ KillSignalError is returned returned by Line() when a user quits from prompt.\n\/\/ This occurs when the user enters ctrl+C or ctrl+D.\nvar KillSignalError = errors.New(\"prompt was quited with a killsignal\")\n\nfunc init() {\n\tC.linenoiseSetupCompletionCallbackHook()\n}\n\n\/\/ Line displays given string and returns line from user input.\nfunc Line(prompt string) (string, error) { \/\/ char *linenoise(const char *prompt);\n\tpromptCString := C.CString(prompt)\n\tresultCString := C.linenoise(promptCString)\n\tC.free(unsafe.Pointer(promptCString))\n\tdefer C.free(unsafe.Pointer(resultCString))\n\n\tif resultCString == nil {\n\t\treturn \"\", KillSignalError\n\t}\n\n\tresult := C.GoString(resultCString)\n\n\treturn result, nil\n}\n\n\/\/ AddHistory adds a line to history. Returns non-nil error on fail.\nfunc AddHistory(line string) error { \/\/ int linenoiseHistoryAdd(const char *line);\n\tlineCString := C.CString(line)\n\tres := C.linenoiseHistoryAdd(lineCString)\n\tC.free(unsafe.Pointer(lineCString))\n\tif res != 1 {\n\t\treturn errors.New(\"Could not add line to history.\")\n\t}\n\treturn nil\n}\n\n\/\/ SetHistoryCapacity changes the maximum length of history. Returns non-nil error on fail.\nfunc SetHistoryCapacity(capacity int) error { \/\/ int linenoiseHistorySetMaxLen(int len);\n\tres := C.linenoiseHistorySetMaxLen(C.int(capacity))\n\tif res != 1 {\n\t\treturn errors.New(\"Could not set history max len.\")\n\t}\n\treturn nil\n}\n\n\/\/ SaveHistory saves from file with given filename. Returns non-nil error on fail.\nfunc SaveHistory(filename string) error { \/\/ int linenoiseHistorySave(char *filename);\n\tfilenameCString := C.CString(filename)\n\tres := C.linenoiseHistorySave(filenameCString)\n\tC.free(unsafe.Pointer(filenameCString))\n\tif res != 0 {\n\t\treturn errors.New(\"Could not save history to file.\")\n\t}\n\treturn nil\n}\n\n\/\/ LoadHistory loads from file with given filename. Returns non-nil error on fail.\nfunc LoadHistory(filename string) error { \/\/ int linenoiseHistoryLoad(char *filename);\n\tfilenameCString := C.CString(filename)\n\tres := C.linenoiseHistoryLoad(filenameCString)\n\tC.free(unsafe.Pointer(filenameCString))\n\tif res != 0 {\n\t\treturn errors.New(\"Could not load history from file.\")\n\t}\n\treturn nil\n}\n\n\/\/ Clear clears the screen.\nfunc Clear() { \/\/ void linenoiseClearScreen(void);\n\tC.linenoiseClearScreen()\n}\n\n\/\/ SetMultiline sets linenoise to multiline or single line.\n\/\/ In multiline mode the user input will be wrapped to a new line when the length exceeds the amount of available rows in the terminal.\nfunc SetMultiline(ml bool) { \/\/ void linenoiseSetMultiLine(int ml);\n\tif ml {\n\t\tC.linenoiseSetMultiLine(1)\n\t} else {\n\t\tC.linenoiseSetMultiLine(0)\n\t}\n}\n\n\/\/ CompletionHandler provides possible completions for given input\ntype CompletionHandler func(input string) []string\n\n\/\/ DefaultCompletionHandler simply returns an empty slice.\nvar DefaultCompletionHandler = func(input string) []string {\n\treturn make([]string, 0)\n}\n\nvar complHandler = DefaultCompletionHandler\n\n\/\/ SetCompletionHandler sets the CompletionHandler to be used for completion\nfunc SetCompletionHandler(c CompletionHandler) {\n\tcomplHandler = c\n}\n\n\/\/ typedef struct linenoiseCompletions {\n\/\/ size_t len;\n\/\/ char **cvec;\n\/\/ } linenoiseCompletions;\n\/\/ typedef void(linenoiseCompletionCallback)(const char *, linenoiseCompletions *);\n\/\/ void linenoiseSetCompletionCallback(linenoiseCompletionCallback *);\n\/\/ void linenoiseAddCompletion(linenoiseCompletions *, char *);\n\n\/\/export linenoiseGoCompletionCallbackHook\nfunc linenoiseGoCompletionCallbackHook(input *C.char, completions *C.linenoiseCompletions) {\n\tcompletionsSlice := complHandler(C.GoString(input))\n\n\tcompletionsLen := len(completionsSlice)\n\tcompletions.len = C.size_t(completionsLen)\n\n\tif completionsLen > 0 {\n\t\tcvec := C.malloc(C.size_t(int(unsafe.Sizeof(*(**C.char)(nil))) * completionsLen))\n\t\tcvecSlice := (*(*[999999]*C.char)(cvec))[:completionsLen]\n\n\t\tfor i, str := range completionsSlice {\n\t\t\tcvecSlice[i] = C.CString(str)\n\t\t}\n\t\tcompletions.cvec = (**C.char)(cvec)\n\t}\n}\n\n\/\/ PrintKeyCodes puts linenoise in key codes debugging mode.\n\/\/ Press keys to see scan codes. Type 'quit' at any time to exit.\n\/\/ PrintKeyCodes blocks until user enters 'quit'.\nfunc PrintKeyCodes() { \/\/ void linenoisePrintKeyCodes(void);\n\tC.linenoisePrintKeyCodes()\n}\n<|endoftext|>"} {"text":"<commit_before>package towerfall\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"sort\"\n\n\t\"github.com\/deckarep\/golang-set\"\n)\n\n\/\/ AllColors is a list of the available player colors\nvar AllColors = []interface{}{\n\t\"green\",\n\t\"blue\",\n\t\"pink\",\n\t\"orange\",\n\t\"white\",\n\t\"yellow\",\n\t\"cyan\",\n\t\"purple\",\n\t\"red\",\n}\n\n\/\/ Colors is the definitive set of all the colors\nvar Colors = mapset.NewSetFromSlice(AllColors)\n\nconst scoreMultiplier = 7\nconst scoreSweep = (97 * scoreMultiplier)\nconst scoreKill = (21 * scoreMultiplier)\nconst scoreSelf = (-35 * scoreMultiplier) \/\/ Negative 1.66 times of a kill\nconst scoreWinner = (350 * scoreMultiplier)\nconst scoreSecond = (150 * scoreMultiplier)\nconst scoreThird = (70 * scoreMultiplier)\nconst scoreFourth = (30 * scoreMultiplier)\nconst finalMultiplier = 2.5\n\n\/\/ ScoreData is a structured Key\/Value pair list for scores\ntype ScoreData struct {\n\tKey string\n\tValue int\n\tPlayer *Player\n}\n\n\/\/ Player is a representation of one player in a match\ntype Player struct {\n\tID uint `json:\"id\"`\n\tMatchID uint\n\tPersonID string `sql:\",pk\"`\n\tPerson *Person `json:\"person\" sql:\"-\"`\n\tColor string `json:\"color\"`\n\tPreferredColor string `json:\"preferred_color\"`\n\tArcherType int `json:\"archer_type\"`\n\tShots int `json:\"shots\"`\n\tSweeps int `json:\"sweeps\"`\n\tKills int `json:\"kills\"`\n\tSelf int `json:\"self\"`\n\tMatchScore int `json:\"match_score\"`\n\tTotalScore int `json:\"total_score\"`\n\tState PlayerState `json:\"state\" sql:\"-\"`\n\tMatch *Match `json:\"-\" sql:\"-\"`\n\tDisplayNames []string `sql:\",array\"`\n}\n\n\/\/ A PlayerSummary is a tournament-wide summary of the scores a player has\ntype PlayerSummary struct {\n\tID uint `json:\"id\"`\n\n\tTournamentID uint\n\tPersonID string `json:\"person_id\"`\n\tPerson *Person `json:\"person\" sql:\"-\"`\n\tShots int `json:\"shots\"`\n\tSweeps int `json:\"sweeps\"`\n\tKills int `json:\"kills\"`\n\tSelf int `json:\"self\"`\n\tMatches int `json:\"matches\"`\n\tTotalScore int `json:\"score\"`\n\tSkillScore int `json:\"skill_score\"`\n}\n\ntype PlayerState struct {\n\tArrows Arrows `json:\"arrows\"`\n\tShield bool `json:\"shield\"`\n\tWings bool `json:\"wings\"`\n\tHat bool `json:\"hat\"`\n\tInvisible bool `json:\"invisible\"`\n\tSpeed bool `json:\"speed\"`\n\tAlive bool `json:\"alive\"`\n\tLava bool `json:\"lava\"`\n\tKiller int `json:\"killer\"`\n}\n\n\/\/ NewPlayer returns a new instance of a player\nfunc NewPlayer(ps *Person) *Player {\n\tp := &Player{\n\t\tPersonID: ps.PersonID,\n\t\tPerson: ps,\n\t\tArcherType: ps.ArcherType,\n\t\tState: NewPlayerState(),\n\t}\n\tif len(ps.ColorPreference) > 0 {\n\t\tp.PreferredColor = ps.PreferredColor\n\t} else {\n\t\tp.PreferredColor = RandomColor(Colors)\n\t}\n\n\treturn p\n}\n\nfunc NewPlayerState() PlayerState {\n\tps := PlayerState{\n\t\tArrows: make(Arrows, 0),\n\t\tAlive: true,\n\t\tHat: true,\n\t\tKiller: -2,\n\t}\n\treturn ps\n}\n\n\/\/ NewPlayerSummary returns a new instance of a tournament player\nfunc NewPlayerSummary(ps *Person) *PlayerSummary {\n\tp := &PlayerSummary{\n\t\tPersonID: ps.PersonID,\n\t\tPerson: ps,\n\t}\n\treturn p\n}\n\nfunc (p *Player) String() string {\n\treturn fmt.Sprintf(\n\t\t\"<(%d) %s: %dsh %dsw %dk %ds>\",\n\t\tp.ID,\n\t\tp.Name(),\n\t\tp.Shots,\n\t\tp.Sweeps,\n\t\tp.Kills,\n\t\tp.Self,\n\t)\n}\n\n\/\/ Name returns the nickname\nfunc (p *Player) Name() string {\n\treturn p.Person.Nick\n}\n\n\/\/ NumericColor is the numeric representation of the color the player has\nfunc (p *Player) NumericColor() int {\n\tfor x, c := range AllColors {\n\t\tif p.Color == c {\n\t\t\treturn x\n\t\t}\n\t}\n\n\t\/\/ No color was found - this is a bug. Return default.\n\tlog.Printf(\"Player '%s' did not match a color for '%s'\", p.Name(), p.Color)\n\treturn 0\n}\n\n\/\/ Score calculates the score to determine runnerup positions.\nfunc (p *PlayerSummary) Score() (out int) {\n\tout += p.Sweeps * scoreSweep\n\tout += p.Kills * scoreKill\n\tout += p.Self * scoreSelf\n\n\t\/\/ Negative score is not allowed\n\tif out <= 0 {\n\t\tout = 0\n\t}\n\n\treturn\n}\n\n\/\/ Score calculates the score to determine runnerup positions.\nfunc (p *Player) Score() (out int) {\n\tout += p.Sweeps * scoreSweep\n\tout += p.Kills * scoreKill\n\tout += p.Self * scoreSelf\n\n\t\/\/ Negative score is not allowed\n\tif out <= 0 {\n\t\tout = 0\n\t}\n\n\t\/\/ Match score is added afterwards so that no one is stuck on 0.\n\tout += p.MatchScore\n\n\treturn\n}\n\n\/\/ Summary resturns a Summary{} object for the player\nfunc (p *Player) Summary() PlayerSummary {\n\treturn PlayerSummary{\n\t\tPersonID: p.PersonID,\n\t\tPerson: p.Person,\n\t\tShots: p.Shots,\n\t\tSweeps: p.Sweeps,\n\t\tKills: p.Kills,\n\t\tSelf: p.Self,\n\t}\n}\n\n\/\/ Player resturns a Player{} object from the summary\nfunc (p *PlayerSummary) Player() Player {\n\treturn *NewPlayer(p.Person)\n}\n\n\/\/ ScoreData returns this players set of ScoreData\nfunc (p *Player) ScoreData() []ScoreData {\n\tsd := []ScoreData{\n\t\t{Key: \"kills\", Value: p.Kills, Player: p},\n\t\t{Key: \"shots\", Value: p.Shots, Player: p},\n\t\t{Key: \"sweeps\", Value: p.Sweeps, Player: p},\n\t\t{Key: \"self\", Value: p.Self, Player: p},\n\t}\n\treturn sd\n}\n\n\/\/ URL returns the URL to this player\n\/\/\n\/\/ Used for action URLs\nfunc (p *Player) URL() string {\n\tout := fmt.Sprintf(\n\t\t\"%s\/%d\",\n\t\tp.Match.URL(),\n\t\tp.Index(),\n\t)\n\treturn out\n}\n\n\/\/ Classes returns the CSS color classes for the player\nfunc (p *Player) Classes() string {\n\tif p.Match != nil && p.Match.IsEnded() {\n\t\tps := ByScore(p.Match.Players)\n\t\tif ps[0].Name() == p.Name() {\n\t\t\t\/\/ Always gold for the winner\n\t\t\treturn \"gold\"\n\t\t} else if ps[1].Name() == p.Name() {\n\t\t\t\/\/ Silver for the second, unless there is a short amount of playoffs\n\t\t\tif p.Match.Kind != playoff || len(p.Match.Tournament.Matches)-3 <= 4 {\n\t\t\t\treturn \"silver\"\n\t\t\t}\n\t\t} else if ps[2].Name() == p.Name() && p.Match.Kind == final {\n\t\t\treturn \"bronze\"\n\t\t}\n\n\t\treturn \"out\"\n\t}\n\treturn p.PreferredColor\n}\n\n\/\/ Index returns the index in the current match\nfunc (p *Player) Index() int {\n\tif p.Match != nil {\n\t\tfor i, o := range p.Match.Players {\n\t\t\tif p.Name() == o.Name() {\n\t\t\t\treturn i\n\t\t\t}\n\t\t}\n\t}\n\treturn -1\n}\n\n\/\/ AddShot increases the shot count\nfunc (p *Player) AddShot() {\n\tlog.Printf(\"Adding shot to %s\", p.Name())\n\tp.Shots++\n}\n\n\/\/ RemoveShot decreases the shot count\n\/\/ Fails silently if shots are zero.\nfunc (p *Player) RemoveShot() {\n\n\tif p.Shots == 0 {\n\t\tlog.Printf(\"Not removing shot from %s; already at zero\", p.Name())\n\t\treturn\n\t}\n\tlog.Printf(\"Removing shot from %s\", p.Name())\n\tp.Shots--\n}\n\n\/\/ AddSweep increases the sweep count\nfunc (p *Player) AddSweep() {\n\tlog.Printf(\"Adding sweep to %s\", p.Name())\n\tp.Sweeps++\n}\n\n\/\/ AddKills increases the kill count and adds a sweep if necessary\nfunc (p *Player) AddKills(kills int) {\n\tlog.Printf(\"Adding %d kills to %s\", kills, p.Name())\n\tp.Kills += kills\n\tif kills == 3 {\n\t\tp.AddSweep()\n\t}\n}\n\n\/\/ RemoveKill decreases the kill count\n\/\/ Fails silently if kills are zero.\nfunc (p *Player) RemoveKill() {\n\tif p.Kills == 0 {\n\t\tlog.Printf(\"Not removing kill from %s; already at zero\", p.Name())\n\t\treturn\n\t}\n\tlog.Printf(\"Removing kill from %s\", p.Name())\n\tp.Kills--\n}\n\n\/\/ AddSelf increases the self count and decreases the kill\nfunc (p *Player) AddSelf() {\n\tlog.Printf(\"Adding self to %s\", p.Name())\n\tp.Self++\n\tp.RemoveKill()\n}\n\n\/\/ Reset resets the stats on a PlayerSummary to 0\n\/\/\n\/\/ It is to be run in Match.Start()\nfunc (p *Player) Reset() {\n\tp.Shots = 0\n\tp.Sweeps = 0\n\tp.Kills = 0\n\tp.Self = 0\n\tp.State = NewPlayerState()\n}\n\n\/\/ HTML renders the HTML of a player\nfunc (p *Player) HTML() (out string) {\n\treturn\n}\n\n\/\/ ByColorConflict is a sort.Interface that sorts players by their score\ntype ByColorConflict []PlayerSummary\n\nfunc (s ByColorConflict) Len() int { return len(s) }\n\nfunc (s ByColorConflict) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\n\nfunc (s ByColorConflict) Less(i, j int) bool {\n\tif s[i].Person.Userlevel != s[j].Person.Userlevel {\n\t\treturn s[i].Person.Userlevel > s[j].Person.Userlevel\n\t}\n\treturn s[i].TotalScore > s[j].TotalScore\n}\n\n\/\/ ByScore is a sort.Interface that sorts players by their score\ntype ByScore []Player\n\nfunc (s ByScore) Len() int {\n\treturn len(s)\n\n}\nfunc (s ByScore) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n\n}\nfunc (s ByScore) Less(i, j int) bool {\n\t\/\/ Technically not Less, but we want biggest first...\n\treturn s[i].Score() > s[j].Score()\n}\n\n\/\/ SortByColorConflicts returns a list in an unspecified order,\n\/\/ Probably by User level and then score.\nfunc SortByColorConflicts(m *Match, ps []Person) (tmp []PlayerSummary, err error) {\n\tvar tp *PlayerSummary\n\ttmp = make([]PlayerSummary, len(ps))\n\tfor i, p := range ps {\n\t\t\/\/ TODO(thiderman): This is not very elegant and should be replaced.\n\t\ttp, err = m.Tournament.getTournamentPlayerObject(&p)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\ttmp[i] = *tp\n\t}\n\tsort.Sort(ByColorConflict(tmp))\n\treturn\n}\n\n\/\/ ByKills is a sort.Interface that sorts players by their kills\ntype ByKills []Player\n\nfunc (s ByKills) Len() int {\n\treturn len(s)\n\n}\nfunc (s ByKills) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n\n}\nfunc (s ByKills) Less(i, j int) bool {\n\t\/\/ Technically not Less, but we want biggest first...\n\tiKills := s[i].Kills\n\tjKills := s[j].Kills\n\tif iKills == jKills {\n\t\treturn s[i].Score() > s[j].Score()\n\t}\n\treturn iKills > jKills\n}\n\n\/\/ SortByKills returns a list in order of the kills the players have\nfunc SortByKills(ps []Player) []Player {\n\ttmp := make([]Player, len(ps))\n\tcopy(tmp, ps)\n\tsort.Sort(ByKills(tmp))\n\treturn tmp\n}\n\n\/\/ ByRunnerup is a sort.Interface that sorts players by their runnerup status\n\/\/\n\/\/ This is almost exactly the same as score, but the number of matches a player\n\/\/ has played factors in, and players that have played less matches are sorted\n\/\/ favorably.\ntype ByRunnerup []PlayerSummary\n\nfunc (s ByRunnerup) Len() int {\n\treturn len(s)\n\n}\nfunc (s ByRunnerup) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n\n}\nfunc (s ByRunnerup) Less(i, j int) bool {\n\tif s[i].Matches == s[j].Matches {\n\t\t\/\/ Same as by kills\n\t\treturn s[i].Score() > s[j].Score()\n\t}\n\t\/\/ Lower is better - the ones that have not played should be at the top\n\treturn s[i].Matches < s[j].Matches\n}\n\n\/\/ SortByRunnerup returns a list in order of the kills the players have\nfunc SortByRunnerup(ps []PlayerSummary) []PlayerSummary {\n\tsort.Sort(ByRunnerup(ps))\n\treturn ps\n}\n\n\/\/ RandomColor returns a random color from the ColorList\nfunc RandomColor(s mapset.Set) string {\n\tcolors := s.ToSlice()\n\tx := len(colors)\n\treturn colors[rand.Intn(x)].(string)\n}\n\n\/\/ AvailableColors returns a ColorList with the colors not used in a match\nfunc AvailableColors(m *Match) mapset.Set {\n\tcolors := mapset.NewSetFromSlice(AllColors)\n\tret := colors.Difference(m.presentColors)\n\treturn ret\n}\n\n\/\/ Reset resets the stats on a PlayerSummary to 0\n\/\/\n\/\/ It is to be run in Match.Start()\nfunc (p *PlayerSummary) Reset() {\n\tp.Shots = 0\n\tp.Sweeps = 0\n\tp.Kills = 0\n\tp.Self = 0\n\tp.Matches = 0\n}\n\n\/\/ Update updates a player with the scores of another\n\/\/\n\/\/ This is primarily used by the tournament score calculator\nfunc (p *PlayerSummary) Update(other PlayerSummary) {\n\tp.Shots += other.Shots\n\tp.Sweeps += other.Sweeps\n\tp.Kills += other.Kills\n\tp.Self += other.Self\n\tp.TotalScore = p.Score()\n\n\t\/\/ Every call to this method is per match. Count every call\n\t\/\/ as if a match.\n\tp.Matches++\n\t\/\/ log.Printf(\"Updated player: %d, %d\", p.TotalScore, p.Matches)\n}\n<commit_msg>Add person fallback for PlayerSummary -> Player conversion<commit_after>package towerfall\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"sort\"\n\n\t\"github.com\/deckarep\/golang-set\"\n)\n\n\/\/ AllColors is a list of the available player colors\nvar AllColors = []interface{}{\n\t\"green\",\n\t\"blue\",\n\t\"pink\",\n\t\"orange\",\n\t\"white\",\n\t\"yellow\",\n\t\"cyan\",\n\t\"purple\",\n\t\"red\",\n}\n\n\/\/ Colors is the definitive set of all the colors\nvar Colors = mapset.NewSetFromSlice(AllColors)\n\nconst scoreMultiplier = 7\nconst scoreSweep = (97 * scoreMultiplier)\nconst scoreKill = (21 * scoreMultiplier)\nconst scoreSelf = (-35 * scoreMultiplier) \/\/ Negative 1.66 times of a kill\nconst scoreWinner = (350 * scoreMultiplier)\nconst scoreSecond = (150 * scoreMultiplier)\nconst scoreThird = (70 * scoreMultiplier)\nconst scoreFourth = (30 * scoreMultiplier)\nconst finalMultiplier = 2.5\n\n\/\/ ScoreData is a structured Key\/Value pair list for scores\ntype ScoreData struct {\n\tKey string\n\tValue int\n\tPlayer *Player\n}\n\n\/\/ Player is a representation of one player in a match\ntype Player struct {\n\tID uint `json:\"id\"`\n\tMatchID uint\n\tPersonID string `sql:\",pk\"`\n\tPerson *Person `json:\"person\" sql:\"-\"`\n\tColor string `json:\"color\"`\n\tPreferredColor string `json:\"preferred_color\"`\n\tArcherType int `json:\"archer_type\"`\n\tShots int `json:\"shots\"`\n\tSweeps int `json:\"sweeps\"`\n\tKills int `json:\"kills\"`\n\tSelf int `json:\"self\"`\n\tMatchScore int `json:\"match_score\"`\n\tTotalScore int `json:\"total_score\"`\n\tState PlayerState `json:\"state\" sql:\"-\"`\n\tMatch *Match `json:\"-\" sql:\"-\"`\n\tDisplayNames []string `sql:\",array\"`\n}\n\n\/\/ A PlayerSummary is a tournament-wide summary of the scores a player has\ntype PlayerSummary struct {\n\tID uint `json:\"id\"`\n\n\tTournamentID uint\n\tPersonID string `json:\"person_id\"`\n\tPerson *Person `json:\"person\" sql:\"-\"`\n\tShots int `json:\"shots\"`\n\tSweeps int `json:\"sweeps\"`\n\tKills int `json:\"kills\"`\n\tSelf int `json:\"self\"`\n\tMatches int `json:\"matches\"`\n\tTotalScore int `json:\"score\"`\n\tSkillScore int `json:\"skill_score\"`\n}\n\ntype PlayerState struct {\n\tArrows Arrows `json:\"arrows\"`\n\tShield bool `json:\"shield\"`\n\tWings bool `json:\"wings\"`\n\tHat bool `json:\"hat\"`\n\tInvisible bool `json:\"invisible\"`\n\tSpeed bool `json:\"speed\"`\n\tAlive bool `json:\"alive\"`\n\tLava bool `json:\"lava\"`\n\tKiller int `json:\"killer\"`\n}\n\n\/\/ NewPlayer returns a new instance of a player\nfunc NewPlayer(ps *Person) *Player {\n\tp := &Player{\n\t\tPersonID: ps.PersonID,\n\t\tPerson: ps,\n\t\tArcherType: ps.ArcherType,\n\t\tState: NewPlayerState(),\n\t}\n\tif len(ps.ColorPreference) > 0 {\n\t\tp.PreferredColor = ps.PreferredColor\n\t} else {\n\t\tp.PreferredColor = RandomColor(Colors)\n\t}\n\n\treturn p\n}\n\nfunc NewPlayerState() PlayerState {\n\tps := PlayerState{\n\t\tArrows: make(Arrows, 0),\n\t\tAlive: true,\n\t\tHat: true,\n\t\tKiller: -2,\n\t}\n\treturn ps\n}\n\n\/\/ NewPlayerSummary returns a new instance of a tournament player\nfunc NewPlayerSummary(ps *Person) *PlayerSummary {\n\tp := &PlayerSummary{\n\t\tPersonID: ps.PersonID,\n\t\tPerson: ps,\n\t}\n\treturn p\n}\n\nfunc (p *Player) String() string {\n\treturn fmt.Sprintf(\n\t\t\"<(%d) %s: %dsh %dsw %dk %ds>\",\n\t\tp.ID,\n\t\tp.Name(),\n\t\tp.Shots,\n\t\tp.Sweeps,\n\t\tp.Kills,\n\t\tp.Self,\n\t)\n}\n\n\/\/ Name returns the nickname\nfunc (p *Player) Name() string {\n\treturn p.Person.Nick\n}\n\n\/\/ NumericColor is the numeric representation of the color the player has\nfunc (p *Player) NumericColor() int {\n\tfor x, c := range AllColors {\n\t\tif p.Color == c {\n\t\t\treturn x\n\t\t}\n\t}\n\n\t\/\/ No color was found - this is a bug. Return default.\n\tlog.Printf(\"Player '%s' did not match a color for '%s'\", p.Name(), p.Color)\n\treturn 0\n}\n\n\/\/ Score calculates the score to determine runnerup positions.\nfunc (p *PlayerSummary) Score() (out int) {\n\tout += p.Sweeps * scoreSweep\n\tout += p.Kills * scoreKill\n\tout += p.Self * scoreSelf\n\n\t\/\/ Negative score is not allowed\n\tif out <= 0 {\n\t\tout = 0\n\t}\n\n\treturn\n}\n\n\/\/ Score calculates the score to determine runnerup positions.\nfunc (p *Player) Score() (out int) {\n\tout += p.Sweeps * scoreSweep\n\tout += p.Kills * scoreKill\n\tout += p.Self * scoreSelf\n\n\t\/\/ Negative score is not allowed\n\tif out <= 0 {\n\t\tout = 0\n\t}\n\n\t\/\/ Match score is added afterwards so that no one is stuck on 0.\n\tout += p.MatchScore\n\n\treturn\n}\n\n\/\/ Summary resturns a Summary{} object for the player\nfunc (p *Player) Summary() PlayerSummary {\n\treturn PlayerSummary{\n\t\tPersonID: p.PersonID,\n\t\tPerson: p.Person,\n\t\tShots: p.Shots,\n\t\tSweeps: p.Sweeps,\n\t\tKills: p.Kills,\n\t\tSelf: p.Self,\n\t}\n}\n\n\/\/ Player returns a new Player{} object from the summary\nfunc (p *PlayerSummary) Player() Player {\n\t\/\/ TODO(thiderman): It would be better to always make sure that the\n\t\/\/ person object is set\n\tif p.Person == nil {\n\t\tvar err error\n\t\tp.Person, err = globalDB.GetPerson(p.PersonID)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\treturn *NewPlayer(p.Person)\n}\n\n\/\/ ScoreData returns this players set of ScoreData\nfunc (p *Player) ScoreData() []ScoreData {\n\tsd := []ScoreData{\n\t\t{Key: \"kills\", Value: p.Kills, Player: p},\n\t\t{Key: \"shots\", Value: p.Shots, Player: p},\n\t\t{Key: \"sweeps\", Value: p.Sweeps, Player: p},\n\t\t{Key: \"self\", Value: p.Self, Player: p},\n\t}\n\treturn sd\n}\n\n\/\/ URL returns the URL to this player\n\/\/\n\/\/ Used for action URLs\nfunc (p *Player) URL() string {\n\tout := fmt.Sprintf(\n\t\t\"%s\/%d\",\n\t\tp.Match.URL(),\n\t\tp.Index(),\n\t)\n\treturn out\n}\n\n\/\/ Classes returns the CSS color classes for the player\nfunc (p *Player) Classes() string {\n\tif p.Match != nil && p.Match.IsEnded() {\n\t\tps := ByScore(p.Match.Players)\n\t\tif ps[0].Name() == p.Name() {\n\t\t\t\/\/ Always gold for the winner\n\t\t\treturn \"gold\"\n\t\t} else if ps[1].Name() == p.Name() {\n\t\t\t\/\/ Silver for the second, unless there is a short amount of playoffs\n\t\t\tif p.Match.Kind != playoff || len(p.Match.Tournament.Matches)-3 <= 4 {\n\t\t\t\treturn \"silver\"\n\t\t\t}\n\t\t} else if ps[2].Name() == p.Name() && p.Match.Kind == final {\n\t\t\treturn \"bronze\"\n\t\t}\n\n\t\treturn \"out\"\n\t}\n\treturn p.PreferredColor\n}\n\n\/\/ Index returns the index in the current match\nfunc (p *Player) Index() int {\n\tif p.Match != nil {\n\t\tfor i, o := range p.Match.Players {\n\t\t\tif p.Name() == o.Name() {\n\t\t\t\treturn i\n\t\t\t}\n\t\t}\n\t}\n\treturn -1\n}\n\n\/\/ AddShot increases the shot count\nfunc (p *Player) AddShot() {\n\tlog.Printf(\"Adding shot to %s\", p.Name())\n\tp.Shots++\n}\n\n\/\/ RemoveShot decreases the shot count\n\/\/ Fails silently if shots are zero.\nfunc (p *Player) RemoveShot() {\n\n\tif p.Shots == 0 {\n\t\tlog.Printf(\"Not removing shot from %s; already at zero\", p.Name())\n\t\treturn\n\t}\n\tlog.Printf(\"Removing shot from %s\", p.Name())\n\tp.Shots--\n}\n\n\/\/ AddSweep increases the sweep count\nfunc (p *Player) AddSweep() {\n\tlog.Printf(\"Adding sweep to %s\", p.Name())\n\tp.Sweeps++\n}\n\n\/\/ AddKills increases the kill count and adds a sweep if necessary\nfunc (p *Player) AddKills(kills int) {\n\tlog.Printf(\"Adding %d kills to %s\", kills, p.Name())\n\tp.Kills += kills\n\tif kills == 3 {\n\t\tp.AddSweep()\n\t}\n}\n\n\/\/ RemoveKill decreases the kill count\n\/\/ Fails silently if kills are zero.\nfunc (p *Player) RemoveKill() {\n\tif p.Kills == 0 {\n\t\tlog.Printf(\"Not removing kill from %s; already at zero\", p.Name())\n\t\treturn\n\t}\n\tlog.Printf(\"Removing kill from %s\", p.Name())\n\tp.Kills--\n}\n\n\/\/ AddSelf increases the self count and decreases the kill\nfunc (p *Player) AddSelf() {\n\tlog.Printf(\"Adding self to %s\", p.Name())\n\tp.Self++\n\tp.RemoveKill()\n}\n\n\/\/ Reset resets the stats on a PlayerSummary to 0\n\/\/\n\/\/ It is to be run in Match.Start()\nfunc (p *Player) Reset() {\n\tp.Shots = 0\n\tp.Sweeps = 0\n\tp.Kills = 0\n\tp.Self = 0\n\tp.State = NewPlayerState()\n}\n\n\/\/ HTML renders the HTML of a player\nfunc (p *Player) HTML() (out string) {\n\treturn\n}\n\n\/\/ ByColorConflict is a sort.Interface that sorts players by their score\ntype ByColorConflict []PlayerSummary\n\nfunc (s ByColorConflict) Len() int { return len(s) }\n\nfunc (s ByColorConflict) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\n\nfunc (s ByColorConflict) Less(i, j int) bool {\n\tif s[i].Person.Userlevel != s[j].Person.Userlevel {\n\t\treturn s[i].Person.Userlevel > s[j].Person.Userlevel\n\t}\n\treturn s[i].TotalScore > s[j].TotalScore\n}\n\n\/\/ ByScore is a sort.Interface that sorts players by their score\ntype ByScore []Player\n\nfunc (s ByScore) Len() int {\n\treturn len(s)\n\n}\nfunc (s ByScore) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n\n}\nfunc (s ByScore) Less(i, j int) bool {\n\t\/\/ Technically not Less, but we want biggest first...\n\treturn s[i].Score() > s[j].Score()\n}\n\n\/\/ SortByColorConflicts returns a list in an unspecified order,\n\/\/ Probably by User level and then score.\nfunc SortByColorConflicts(m *Match, ps []Person) (tmp []PlayerSummary, err error) {\n\tvar tp *PlayerSummary\n\ttmp = make([]PlayerSummary, len(ps))\n\tfor i, p := range ps {\n\t\t\/\/ TODO(thiderman): This is not very elegant and should be replaced.\n\t\ttp, err = m.Tournament.getTournamentPlayerObject(&p)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\ttmp[i] = *tp\n\t}\n\tsort.Sort(ByColorConflict(tmp))\n\treturn\n}\n\n\/\/ ByKills is a sort.Interface that sorts players by their kills\ntype ByKills []Player\n\nfunc (s ByKills) Len() int {\n\treturn len(s)\n\n}\nfunc (s ByKills) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n\n}\nfunc (s ByKills) Less(i, j int) bool {\n\t\/\/ Technically not Less, but we want biggest first...\n\tiKills := s[i].Kills\n\tjKills := s[j].Kills\n\tif iKills == jKills {\n\t\treturn s[i].Score() > s[j].Score()\n\t}\n\treturn iKills > jKills\n}\n\n\/\/ SortByKills returns a list in order of the kills the players have\nfunc SortByKills(ps []Player) []Player {\n\ttmp := make([]Player, len(ps))\n\tcopy(tmp, ps)\n\tsort.Sort(ByKills(tmp))\n\treturn tmp\n}\n\n\/\/ ByRunnerup is a sort.Interface that sorts players by their runnerup status\n\/\/\n\/\/ This is almost exactly the same as score, but the number of matches a player\n\/\/ has played factors in, and players that have played less matches are sorted\n\/\/ favorably.\ntype ByRunnerup []PlayerSummary\n\nfunc (s ByRunnerup) Len() int {\n\treturn len(s)\n\n}\nfunc (s ByRunnerup) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n\n}\nfunc (s ByRunnerup) Less(i, j int) bool {\n\tif s[i].Matches == s[j].Matches {\n\t\t\/\/ Higher is better - if you have a high score you'll play again\n\t\treturn s[i].Score() > s[j].Score()\n\t}\n\t\/\/ Lower is better - the ones that have not played should be at the top\n\treturn s[i].Matches < s[j].Matches\n}\n\n\/\/ SortByRunnerup returns a list in order of the kills the players have\nfunc SortByRunnerup(ps []PlayerSummary) []PlayerSummary {\n\tsort.Sort(ByRunnerup(ps))\n\treturn ps\n}\n\n\/\/ RandomColor returns a random color from the ColorList\nfunc RandomColor(s mapset.Set) string {\n\tcolors := s.ToSlice()\n\tx := len(colors)\n\treturn colors[rand.Intn(x)].(string)\n}\n\n\/\/ AvailableColors returns a ColorList with the colors not used in a match\nfunc AvailableColors(m *Match) mapset.Set {\n\tcolors := mapset.NewSetFromSlice(AllColors)\n\tret := colors.Difference(m.presentColors)\n\treturn ret\n}\n\n\/\/ Reset resets the stats on a PlayerSummary to 0\n\/\/\n\/\/ It is to be run in Match.Start()\nfunc (p *PlayerSummary) Reset() {\n\tp.Shots = 0\n\tp.Sweeps = 0\n\tp.Kills = 0\n\tp.Self = 0\n\tp.Matches = 0\n}\n\n\/\/ Update updates a player with the scores of another\n\/\/\n\/\/ This is primarily used by the tournament score calculator\nfunc (p *PlayerSummary) Update(other PlayerSummary) {\n\tp.Shots += other.Shots\n\tp.Sweeps += other.Sweeps\n\tp.Kills += other.Kills\n\tp.Self += other.Self\n\tp.TotalScore = p.Score()\n\n\t\/\/ Every call to this method is per match. Count every call\n\t\/\/ as if a match.\n\tp.Matches++\n\t\/\/ log.Printf(\"Updated player: %d, %d\", p.TotalScore, p.Matches)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018, OpenCensus Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build go1.11\n\npackage trace\n\nimport (\n\t\"context\"\n\tt \"runtime\/trace\"\n)\n\nfunc startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) {\n\treturn t.NewContext(ctx, name)\n}\n<commit_msg>Don't enable execution tracer tasks if tracer is not enabled (#725)<commit_after>\/\/ Copyright 2018, OpenCensus Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build go1.11\n\npackage trace\n\nimport (\n\t\"context\"\n\tt \"runtime\/trace\"\n)\n\nfunc startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) {\n\tif !t.IsEnabled() {\n\t\t\/\/ Avoid additional overhead if\n\t\t\/\/ runtime\/trace is not enabled.\n\t\treturn ctx, func() {}\n\t}\n\treturn t.NewContext(ctx, name)\n}\n<|endoftext|>"} {"text":"<commit_before>package goxlsx\n\nimport (\n\t\"archive\/zip\"\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Worksheet struct {\n\tName string\n\tfilename string\n\tid string\n\trows map[int]*row\n\tspreadsheet *Spreadsheet\n}\n\ntype cell struct {\n\tName string\n\tType string\n\tValue string\n}\n\ntype row struct {\n\tNum int\n\tCells map[int]*cell\n}\n\ntype Spreadsheet struct {\n\tfilepath string\n\tcompressedFiles []zip.File\n\tWorksheets []*Worksheet\n\tsharedStrings []string\n}\n\nfunc readWorkbook(d *xml.Decoder, s *Spreadsheet) []*Worksheet {\n\tworksheets := make([]*Worksheet, 0, 5)\n\tvar (\n\t\terr error\n\t\ttoken xml.Token\n\t)\n\n\tfor {\n\t\ttoken, err = d.Token()\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tswitch x := token.(type) {\n\t\tcase xml.StartElement:\n\t\t\tswitch x.Name.Local {\n\t\t\tcase \"sheet\":\n\t\t\t\tws := new(Worksheet)\n\t\t\t\tws.spreadsheet = s\n\t\t\t\tfor _, a := range x.Attr {\n\t\t\t\t\tif a.Name.Local == \"name\" {\n\t\t\t\t\t\tws.Name = a.Value\n\t\t\t\t\t}\n\t\t\t\t\tif a.Name.Local == \"sheetId\" {\n\t\t\t\t\t\tws.id = a.Value\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tworksheets = append(worksheets, ws)\n\t\t\t}\n\t\t}\n\t}\n\treturn worksheets\n}\n\nfunc readStrings(d *xml.Decoder, s *Spreadsheet) {\n\tvar (\n\t\terr error\n\t\tdata []byte\n\t\ttoken xml.Token\n\t)\n\tfor {\n\t\ttoken, err = d.Token()\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tswitch x := token.(type) {\n\t\tcase xml.StartElement:\n\t\t\tswitch x.Name.Local {\n\t\t\tcase \"sst\":\n\t\t\t\t\/\/ root element\n\t\t\t\tfor i := 0; i < len(x.Attr); i++ {\n\t\t\t\t\tif x.Attr[i].Name.Local == \"uniqueCount\" {\n\t\t\t\t\t\tcount, err := strconv.Atoi(x.Attr[i].Value)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\ts.sharedStrings = make([]string, 0, count)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\t\/\/ log.Println(x.Name.Local)\n\t\t\t}\n\t\tcase xml.CharData:\n\t\t\tdata = x.Copy()\n\t\tcase xml.EndElement:\n\t\t\tswitch x.Name.Local {\n\t\t\tcase \"t\":\n\t\t\t\ts.sharedStrings = append(s.sharedStrings, string(data))\n\t\t\t}\n\t\t}\n\n\t}\n}\n\nfunc OpenFile(path string) (*Spreadsheet, error) {\n\txlsx := new(Spreadsheet)\n\txlsx.filepath = path\n\n\tr, err := zip.OpenReader(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Close()\n\n\tfor _, f := range r.File {\n\t\tif f.Name == \"xl\/workbook.xml\" {\n\t\t\trc, err := f.Open()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\txlsx.Worksheets = readWorkbook(xml.NewDecoder(rc), xlsx)\n\t\t\trc.Close()\n\t\t}\n\t\tif f.Name == \"xl\/sharedStrings.xml\" {\n\t\t\trc, err := f.Open()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treadStrings(xml.NewDecoder(rc), xlsx)\n\t\t\trc.Close()\n\t\t}\n\t}\n\treturn xlsx, nil\n}\n\nfunc readWorksheetXML(dec *xml.Decoder, s *Spreadsheet) (map[int]*row, error) {\n\trows := make(map[int]*row)\n\tvar (\n\t\terr error\n\t\ttoken xml.Token\n\t\trownum int\n\t\tcurrentCell *cell\n\t\tcurrentRow *row\n\t)\n\tfor {\n\t\ttoken, err = dec.Token()\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tswitch x := token.(type) {\n\t\tcase xml.StartElement:\n\t\t\tswitch x.Name.Local {\n\t\t\tcase \"row\":\n\t\t\t\tcurrentRow = &row{}\n\t\t\t\tcurrentRow.Cells = make(map[int]*cell)\n\t\t\t\tfor _, a := range x.Attr {\n\t\t\t\t\tif a.Name.Local == \"r\" {\n\t\t\t\t\t\trownum, err = strconv.Atoi(a.Value)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcurrentRow.Num = rownum\n\t\t\t\trows[rownum] = currentRow\n\t\t\tcase \"c\":\n\t\t\t\tcurrentCell = &cell{}\n\t\t\t\tvar cellnumber rune\n\t\t\t\tfor _, a := range x.Attr {\n\t\t\t\t\tswitch a.Name.Local {\n\t\t\t\t\tcase \"r\":\n\t\t\t\t\t\tfor _, v := range a.Value {\n\t\t\t\t\t\t\tif v >= 'A' && v <= 'Z' {\n\t\t\t\t\t\t\t\tcellnumber = cellnumber*26 + v - 'A' + 1\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"t\":\n\t\t\t\t\t\tif a.Value == \"s\" {\n\t\t\t\t\t\t\tcurrentCell.Type = \"s\"\n\t\t\t\t\t\t} else if a.Value == \"n\" {\n\t\t\t\t\t\t\tcurrentCell.Type = \"n\"\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t\tcurrentRow.Cells[int(cellnumber)] = currentCell\n\t\t\t}\n\t\tcase xml.EndElement:\n\t\t\tswitch x.Name.Local {\n\t\t\tcase \"c\":\n\t\t\t\tcurrentCell = nil\n\t\t\t}\n\t\tcase xml.CharData:\n\t\t\tif currentCell != nil {\n\t\t\t\tval := string(x.Copy())\n\t\t\t\tif currentCell.Type == \"s\" {\n\t\t\t\t\tvalInt, _ := strconv.Atoi(val)\n\t\t\t\t\tcurrentCell.Value = s.sharedStrings[valInt]\n\t\t\t\t} else if currentCell.Type == \"n\" {\n\t\t\t\t\tcurrentCell.Value = strings.TrimSuffix(val, \".0\")\n\t\t\t\t} else {\n\t\t\t\t\tcurrentCell.Value = val\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn rows, nil\n}\n\nfunc (ws *Worksheet) readWorksheetZIP() error {\n\tr, err := zip.OpenReader(ws.spreadsheet.filepath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Close()\n\n\tfor _, f := range r.File {\n\t\tif f.Name == ws.filename {\n\t\t\trc, err := f.Open()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer rc.Close()\n\t\t\trows, err := readWorksheetXML(xml.NewDecoder(rc), ws.spreadsheet)\n\t\t\tws.rows = rows\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (ws *Worksheet) Cell(row, column int) string {\n\txrow := ws.rows[row]\n\tif xrow == nil {\n\t\treturn \"\"\n\t}\n\tif xrow.Cells[column] == nil {\n\t\treturn \"\"\n\t}\n\treturn xrow.Cells[column].Value\n}\n\nfunc (s *Spreadsheet) GetWorksheet(number int) (*Worksheet, error) {\n\tif number >= len(s.Worksheets) || number < 0 {\n\t\treturn nil, errors.New(\"Index out of range\")\n\t}\n\tws := s.Worksheets[number]\n\tws.filename = \"xl\/worksheets\" + \"\/\" + fmt.Sprintf(\"sheet%s.xml\", ws.id)\n\terr := ws.readWorksheetZIP()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ws, nil\n}\n<commit_msg>Documentation<commit_after>package goxlsx\n\nimport (\n\t\"archive\/zip\"\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Worksheet struct {\n\tName string\n\tfilename string\n\tid string\n\trows map[int]*row\n\tspreadsheet *Spreadsheet\n}\n\ntype cell struct {\n\tName string\n\tType string\n\tValue string\n}\n\ntype row struct {\n\tNum int\n\tCells map[int]*cell\n}\n\ntype Spreadsheet struct {\n\tfilepath string\n\tcompressedFiles []zip.File\n\tworksheets []*Worksheet\n\tsharedStrings []string\n}\n\nfunc readWorkbook(d *xml.Decoder, s *Spreadsheet) []*Worksheet {\n\tworksheets := make([]*Worksheet, 0, 5)\n\tvar (\n\t\terr error\n\t\ttoken xml.Token\n\t)\n\n\tfor {\n\t\ttoken, err = d.Token()\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tswitch x := token.(type) {\n\t\tcase xml.StartElement:\n\t\t\tswitch x.Name.Local {\n\t\t\tcase \"sheet\":\n\t\t\t\tws := new(Worksheet)\n\t\t\t\tws.spreadsheet = s\n\t\t\t\tfor _, a := range x.Attr {\n\t\t\t\t\tif a.Name.Local == \"name\" {\n\t\t\t\t\t\tws.Name = a.Value\n\t\t\t\t\t}\n\t\t\t\t\tif a.Name.Local == \"sheetId\" {\n\t\t\t\t\t\tws.id = a.Value\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tworksheets = append(worksheets, ws)\n\t\t\t}\n\t\t}\n\t}\n\treturn worksheets\n}\n\nfunc readStrings(d *xml.Decoder, s *Spreadsheet) {\n\tvar (\n\t\terr error\n\t\tdata []byte\n\t\ttoken xml.Token\n\t)\n\tfor {\n\t\ttoken, err = d.Token()\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tswitch x := token.(type) {\n\t\tcase xml.StartElement:\n\t\t\tswitch x.Name.Local {\n\t\t\tcase \"sst\":\n\t\t\t\t\/\/ root element\n\t\t\t\tfor i := 0; i < len(x.Attr); i++ {\n\t\t\t\t\tif x.Attr[i].Name.Local == \"uniqueCount\" {\n\t\t\t\t\t\tcount, err := strconv.Atoi(x.Attr[i].Value)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\ts.sharedStrings = make([]string, 0, count)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\t\/\/ log.Println(x.Name.Local)\n\t\t\t}\n\t\tcase xml.CharData:\n\t\t\tdata = x.Copy()\n\t\tcase xml.EndElement:\n\t\t\tswitch x.Name.Local {\n\t\t\tcase \"t\":\n\t\t\t\ts.sharedStrings = append(s.sharedStrings, string(data))\n\t\t\t}\n\t\t}\n\n\t}\n}\n\n\/\/ Read the Excel file located at the given path.\nfunc OpenFile(path string) (*Spreadsheet, error) {\n\txlsx := new(Spreadsheet)\n\txlsx.filepath = path\n\n\tr, err := zip.OpenReader(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Close()\n\n\tfor _, f := range r.File {\n\t\tif f.Name == \"xl\/workbook.xml\" {\n\t\t\trc, err := f.Open()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\txlsx.worksheets = readWorkbook(xml.NewDecoder(rc), xlsx)\n\t\t\trc.Close()\n\t\t}\n\t\tif f.Name == \"xl\/sharedStrings.xml\" {\n\t\t\trc, err := f.Open()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treadStrings(xml.NewDecoder(rc), xlsx)\n\t\t\trc.Close()\n\t\t}\n\t}\n\treturn xlsx, nil\n}\n\nfunc readWorksheetXML(dec *xml.Decoder, s *Spreadsheet) (map[int]*row, error) {\n\trows := make(map[int]*row)\n\tvar (\n\t\terr error\n\t\ttoken xml.Token\n\t\trownum int\n\t\tcurrentCell *cell\n\t\tcurrentRow *row\n\t)\n\tfor {\n\t\ttoken, err = dec.Token()\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tswitch x := token.(type) {\n\t\tcase xml.StartElement:\n\t\t\tswitch x.Name.Local {\n\t\t\tcase \"row\":\n\t\t\t\tcurrentRow = &row{}\n\t\t\t\tcurrentRow.Cells = make(map[int]*cell)\n\t\t\t\tfor _, a := range x.Attr {\n\t\t\t\t\tif a.Name.Local == \"r\" {\n\t\t\t\t\t\trownum, err = strconv.Atoi(a.Value)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcurrentRow.Num = rownum\n\t\t\t\trows[rownum] = currentRow\n\t\t\tcase \"c\":\n\t\t\t\tcurrentCell = &cell{}\n\t\t\t\tvar cellnumber rune\n\t\t\t\tfor _, a := range x.Attr {\n\t\t\t\t\tswitch a.Name.Local {\n\t\t\t\t\tcase \"r\":\n\t\t\t\t\t\tfor _, v := range a.Value {\n\t\t\t\t\t\t\tif v >= 'A' && v <= 'Z' {\n\t\t\t\t\t\t\t\tcellnumber = cellnumber*26 + v - 'A' + 1\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"t\":\n\t\t\t\t\t\tif a.Value == \"s\" {\n\t\t\t\t\t\t\tcurrentCell.Type = \"s\"\n\t\t\t\t\t\t} else if a.Value == \"n\" {\n\t\t\t\t\t\t\tcurrentCell.Type = \"n\"\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t\tcurrentRow.Cells[int(cellnumber)] = currentCell\n\t\t\t}\n\t\tcase xml.EndElement:\n\t\t\tswitch x.Name.Local {\n\t\t\tcase \"c\":\n\t\t\t\tcurrentCell = nil\n\t\t\t}\n\t\tcase xml.CharData:\n\t\t\tif currentCell != nil {\n\t\t\t\tval := string(x.Copy())\n\t\t\t\tif currentCell.Type == \"s\" {\n\t\t\t\t\tvalInt, _ := strconv.Atoi(val)\n\t\t\t\t\tcurrentCell.Value = s.sharedStrings[valInt]\n\t\t\t\t} else if currentCell.Type == \"n\" {\n\t\t\t\t\tcurrentCell.Value = strings.TrimSuffix(val, \".0\")\n\t\t\t\t} else {\n\t\t\t\t\tcurrentCell.Value = val\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn rows, nil\n}\n\nfunc (ws *Worksheet) readWorksheetZIP() error {\n\tr, err := zip.OpenReader(ws.spreadsheet.filepath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Close()\n\n\tfor _, f := range r.File {\n\t\tif f.Name == ws.filename {\n\t\t\trc, err := f.Open()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer rc.Close()\n\t\t\trows, err := readWorksheetXML(xml.NewDecoder(rc), ws.spreadsheet)\n\t\t\tws.rows = rows\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Get the contents of cell at row \/ column, where 1,1 is the top left corner. The return value is always a string.\n\/\/ The user is in charge to convert this value to a number, if necessary. Formulae are not returned.\nfunc (ws *Worksheet) Cell(row, column int) string {\n\txrow := ws.rows[row]\n\tif xrow == nil {\n\t\treturn \"\"\n\t}\n\tif xrow.Cells[column] == nil {\n\t\treturn \"\"\n\t}\n\treturn xrow.Cells[column].Value\n}\n\n\/\/ Get the worksheet with the given number, starting at 0.\nfunc (s *Spreadsheet) GetWorksheet(number int) (*Worksheet, error) {\n\tif number >= len(s.worksheets) || number < 0 {\n\t\treturn nil, errors.New(\"Index out of range\")\n\t}\n\tws := s.worksheets[number]\n\tws.filename = path.Join(\"xl\", \"worksheets\", fmt.Sprintf(\"sheet%s.xml\", ws.id))\n\terr := ws.readWorksheetZIP()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ws, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"time\"\n\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/fields\"\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Autoscaling\", func() {\n\tf := NewFramework(\"autoscaling\")\n\tvar nodeCount int\n\tvar coresPerNode int\n\tvar memCapacityMb int\n\n\tBeforeEach(func() {\n\t\tSkipUnlessProviderIs(\"gce\")\n\n\t\tnodes, err := f.Client.Nodes().List(labels.Everything(), fields.Everything())\n\t\texpectNoError(err)\n\t\tnodeCount = len(nodes.Items)\n\t\tExpect(nodeCount).NotTo(BeZero())\n\t\tcpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]\n\t\tmem := nodes.Items[0].Status.Capacity[api.ResourceMemory]\n\t\tcoresPerNode = int((&cpu).MilliValue() \/ 1000)\n\t\tmemCapacityMb = int((&mem).Value() \/ 1024 \/ 1024)\n\t})\n\n\tAfterEach(func() {\n\t\tcleanUpAutoscaler()\n\t})\n\n\tIt(\"[Skipped][Autoscaling Suite] should scale cluster size based on cpu utilization\", func() {\n\t\tsetUpAutoscaler(\"cpu\/node_utilization\", 0.4, nodeCount, nodeCount+1)\n\n\t\t\/\/ Consume 50% CPU\n\t\tmillicoresPerReplica := 500\n\t\trc := NewStaticResourceConsumer(\"cpu-utilization\", nodeCount*coresPerNode, millicoresPerReplica*nodeCount*coresPerNode, 0, int64(millicoresPerReplica), 100, f)\n\t\texpectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))\n\n\t\trc.CleanUp()\n\t\texpectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))\n\t})\n\n\tIt(\"[Skipped] should scale cluster size based on cpu reservation\", func() {\n\t\tsetUpAutoscaler(\"cpu\/node_reservation\", 0.5, nodeCount, nodeCount+1)\n\n\t\tReserveCpu(f, \"cpu-reservation\", 600*nodeCount*coresPerNode)\n\t\texpectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))\n\n\t\texpectNoError(DeleteRC(f.Client, f.Namespace.Name, \"cpu-reservation\"))\n\t\texpectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))\n\t})\n\n\tIt(\"[Skipped][Autoscaling Suite] should scale cluster size based on memory utilization\", func() {\n\t\tsetUpAutoscaler(\"memory\/node_utilization\", 0.5, nodeCount, nodeCount+1)\n\n\t\t\/\/ Consume 60% of total memory capacity\n\t\tmegabytesPerReplica := int(memCapacityMb * 6 \/ 10 \/ coresPerNode)\n\t\trc := NewStaticResourceConsumer(\"mem-utilization\", nodeCount*coresPerNode, 0, megabytesPerReplica*nodeCount*coresPerNode, 100, int64(megabytesPerReplica+100), f)\n\t\texpectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))\n\n\t\trc.CleanUp()\n\t\texpectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))\n\t})\n\n\tIt(\"[Skipped] should scale cluster size based on memory reservation\", func() {\n\t\tsetUpAutoscaler(\"memory\/node_reservation\", 0.5, nodeCount, nodeCount+1)\n\n\t\tReserveMemory(f, \"memory-reservation\", nodeCount*memCapacityMb*6\/10)\n\t\texpectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))\n\n\t\texpectNoError(DeleteRC(f.Client, f.Namespace.Name, \"memory-reservation\"))\n\t\texpectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))\n\t})\n})\n\nfunc setUpAutoscaler(metric string, target float64, min, max int) {\n\t\/\/ TODO integrate with kube-up.sh script once it will support autoscaler setup.\n\tBy(\"Setting up autoscaler to scale based on \" + metric)\n\tout, err := exec.Command(\"gcloud\", \"compute\", \"instance-groups\", \"managed\", \"set-autoscaling\",\n\t\ttestContext.CloudConfig.NodeInstanceGroup,\n\t\t\"--project=\"+testContext.CloudConfig.ProjectID,\n\t\t\"--zone=\"+testContext.CloudConfig.Zone,\n\t\t\"--custom-metric-utilization=metric=custom.cloudmonitoring.googleapis.com\/kubernetes.io\/\"+metric+fmt.Sprintf(\",utilization-target=%v\", target)+\",utilization-target-type=GAUGE\",\n\t\tfmt.Sprintf(\"--min-num-replicas=%v\", min),\n\t\tfmt.Sprintf(\"--max-num-replicas=%v\", max),\n\t).CombinedOutput()\n\texpectNoError(err, \"Output: \"+string(out))\n}\n\nfunc cleanUpAutoscaler() {\n\tBy(\"Removing autoscaler\")\n\tout, err := exec.Command(\"gcloud\", \"compute\", \"instance-groups\", \"managed\", \"stop-autoscaling\",\n\t\ttestContext.CloudConfig.NodeInstanceGroup,\n\t\t\"--project=\"+testContext.CloudConfig.ProjectID,\n\t\t\"--zone=\"+testContext.CloudConfig.Zone,\n\t).CombinedOutput()\n\texpectNoError(err, \"Output: \"+string(out))\n}\n\nfunc ReserveCpu(f *Framework, id string, millicores int) {\n\tBy(fmt.Sprintf(\"Running RC which reserves %v millicores\", millicores))\n\tconfig := &RCConfig{\n\t\tClient: f.Client,\n\t\tName: id,\n\t\tNamespace: f.Namespace.Name,\n\t\tTimeout: 10 * time.Minute,\n\t\tImage: \"gcr.io\/google_containers\/pause\",\n\t\tReplicas: millicores \/ 100,\n\t\tCpuRequest: 100,\n\t}\n\texpectNoError(RunRC(*config))\n}\n\nfunc ReserveMemory(f *Framework, id string, megabytes int) {\n\tBy(fmt.Sprintf(\"Running RC which reserves %v MB of memory\", megabytes))\n\tconfig := &RCConfig{\n\t\tClient: f.Client,\n\t\tName: id,\n\t\tNamespace: f.Namespace.Name,\n\t\tTimeout: 10 * time.Minute,\n\t\tImage: \"gcr.io\/google_containers\/pause\",\n\t\tReplicas: megabytes \/ 500,\n\t\tMemRequest: 500 * 1024 * 1024,\n\t}\n\texpectNoError(RunRC(*config))\n}\n<commit_msg>Another try to fix flaky autoscaling test.<commit_after>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"time\"\n\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/fields\"\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Autoscaling\", func() {\n\tf := NewFramework(\"autoscaling\")\n\tvar nodeCount int\n\tvar coresPerNode int\n\tvar memCapacityMb int\n\n\tBeforeEach(func() {\n\t\tSkipUnlessProviderIs(\"gce\")\n\n\t\tnodes, err := f.Client.Nodes().List(labels.Everything(), fields.Everything())\n\t\texpectNoError(err)\n\t\tnodeCount = len(nodes.Items)\n\t\tExpect(nodeCount).NotTo(BeZero())\n\t\tcpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]\n\t\tmem := nodes.Items[0].Status.Capacity[api.ResourceMemory]\n\t\tcoresPerNode = int((&cpu).MilliValue() \/ 1000)\n\t\tmemCapacityMb = int((&mem).Value() \/ 1024 \/ 1024)\n\t})\n\n\tAfterEach(func() {\n\t\tcleanUpAutoscaler()\n\t})\n\n\tIt(\"[Skipped][Autoscaling Suite] should scale cluster size based on cpu utilization\", func() {\n\t\tsetUpAutoscaler(\"cpu\/node_utilization\", 0.4, nodeCount, nodeCount+1)\n\n\t\t\/\/ Consume 50% CPU\n\t\tmillicoresPerReplica := 500\n\t\trc := NewStaticResourceConsumer(\"cpu-utilization\", nodeCount*coresPerNode, millicoresPerReplica*nodeCount*coresPerNode, 0, int64(millicoresPerReplica), 100, f)\n\t\texpectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))\n\n\t\trc.CleanUp()\n\t\texpectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))\n\t})\n\n\tIt(\"[Skipped] should scale cluster size based on cpu reservation\", func() {\n\t\tsetUpAutoscaler(\"cpu\/node_reservation\", 0.5, nodeCount, nodeCount+1)\n\n\t\tReserveCpu(f, \"cpu-reservation\", 600*nodeCount*coresPerNode)\n\t\texpectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))\n\n\t\texpectNoError(DeleteRC(f.Client, f.Namespace.Name, \"cpu-reservation\"))\n\t\texpectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))\n\t})\n\n\tIt(\"[Skipped][Autoscaling Suite] should scale cluster size based on memory utilization\", func() {\n\t\tsetUpAutoscaler(\"memory\/node_utilization\", 0.6, nodeCount, nodeCount+1)\n\n\t\t\/\/ Consume 60% of total memory capacity\n\t\tmegabytesPerReplica := int(memCapacityMb * 6 \/ 10 \/ coresPerNode)\n\t\trc := NewStaticResourceConsumer(\"mem-utilization\", nodeCount*coresPerNode, 0, megabytesPerReplica*nodeCount*coresPerNode, 100, int64(megabytesPerReplica+100), f)\n\t\texpectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))\n\n\t\trc.CleanUp()\n\t\texpectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))\n\t})\n\n\tIt(\"[Skipped] should scale cluster size based on memory reservation\", func() {\n\t\tsetUpAutoscaler(\"memory\/node_reservation\", 0.5, nodeCount, nodeCount+1)\n\n\t\tReserveMemory(f, \"memory-reservation\", nodeCount*memCapacityMb*6\/10)\n\t\texpectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))\n\n\t\texpectNoError(DeleteRC(f.Client, f.Namespace.Name, \"memory-reservation\"))\n\t\texpectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))\n\t})\n})\n\nfunc setUpAutoscaler(metric string, target float64, min, max int) {\n\t\/\/ TODO integrate with kube-up.sh script once it will support autoscaler setup.\n\tBy(\"Setting up autoscaler to scale based on \" + metric)\n\tout, err := exec.Command(\"gcloud\", \"compute\", \"instance-groups\", \"managed\", \"set-autoscaling\",\n\t\ttestContext.CloudConfig.NodeInstanceGroup,\n\t\t\"--project=\"+testContext.CloudConfig.ProjectID,\n\t\t\"--zone=\"+testContext.CloudConfig.Zone,\n\t\t\"--custom-metric-utilization=metric=custom.cloudmonitoring.googleapis.com\/kubernetes.io\/\"+metric+fmt.Sprintf(\",utilization-target=%v\", target)+\",utilization-target-type=GAUGE\",\n\t\tfmt.Sprintf(\"--min-num-replicas=%v\", min),\n\t\tfmt.Sprintf(\"--max-num-replicas=%v\", max),\n\t).CombinedOutput()\n\texpectNoError(err, \"Output: \"+string(out))\n}\n\nfunc cleanUpAutoscaler() {\n\tBy(\"Removing autoscaler\")\n\tout, err := exec.Command(\"gcloud\", \"compute\", \"instance-groups\", \"managed\", \"stop-autoscaling\",\n\t\ttestContext.CloudConfig.NodeInstanceGroup,\n\t\t\"--project=\"+testContext.CloudConfig.ProjectID,\n\t\t\"--zone=\"+testContext.CloudConfig.Zone,\n\t).CombinedOutput()\n\texpectNoError(err, \"Output: \"+string(out))\n}\n\nfunc ReserveCpu(f *Framework, id string, millicores int) {\n\tBy(fmt.Sprintf(\"Running RC which reserves %v millicores\", millicores))\n\tconfig := &RCConfig{\n\t\tClient: f.Client,\n\t\tName: id,\n\t\tNamespace: f.Namespace.Name,\n\t\tTimeout: 10 * time.Minute,\n\t\tImage: \"gcr.io\/google_containers\/pause\",\n\t\tReplicas: millicores \/ 100,\n\t\tCpuRequest: 100,\n\t}\n\texpectNoError(RunRC(*config))\n}\n\nfunc ReserveMemory(f *Framework, id string, megabytes int) {\n\tBy(fmt.Sprintf(\"Running RC which reserves %v MB of memory\", megabytes))\n\tconfig := &RCConfig{\n\t\tClient: f.Client,\n\t\tName: id,\n\t\tNamespace: f.Namespace.Name,\n\t\tTimeout: 10 * time.Minute,\n\t\tImage: \"gcr.io\/google_containers\/pause\",\n\t\tReplicas: megabytes \/ 500,\n\t\tMemRequest: 500 * 1024 * 1024,\n\t}\n\texpectNoError(RunRC(*config))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Program yang parses YANG files, displays errors, and possibly writes\n\/\/ something related to the input on output.\n\/\/\n\/\/ Usage: yang [--path DIR] [--format FORMAT] [FORMAT OPTIONS] [MODULE] [FILE ...]\n\/\/\n\/\/ If MODULE is specified (an argument that does not end in .yang), it is taken\n\/\/ as the name of the module to display. Any FILEs specified are read, and the\n\/\/ tree for MODULE is displayed. If MODULE was not defined in FILEs (or no\n\/\/ files were specified), then the file MODULES.yang is read as well. An error\n\/\/ is displayed if no definition for MODULE was found.\n\/\/\n\/\/ If MODULE is missing, then all base modules read from the FILEs are\n\/\/ displayed. If there are no arguments then standard input is parsed.\n\/\/\n\/\/ If DIR is specified, it is considered a comma separated list of paths\n\/\/ to append to the search directory.\n\/\/\n\/\/ FORMAT, which defaults to \"tree\", specifes the format of output to produce.\n\/\/ Use \"goyang --help\" for a list of available formats.\n\/\/\n\/\/ FORMAT OPTIONS are flags that apply to a specific format. They must follow\n\/\/ --format.\n\/\/\n\/\/ THIS PROGRAM IS STILL JUST A DEVELOPMENT TOOL.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"runtime\/trace\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/openconfig\/goyang\/pkg\/yang\"\n\t\"github.com\/openconfig\/goyang\/pkg\/indent\"\n\t\"github.com\/pborman\/getopt\"\n)\n\n\/\/ Each format must register a formatter with register. The function f will\n\/\/ be called once with the set of yang Entry trees generated.\ntype formatter struct {\n\tname string\n\tf func(io.Writer, []*yang.Entry)\n\thelp string\n\tflags *getopt.Set\n}\n\nvar formatters = map[string]*formatter{}\n\nfunc register(f *formatter) {\n\tformatters[f.name] = f\n}\n\n\/\/ exitIfError writes errs to standard error and exits with an exit status of 1.\n\/\/ If errs is empty then exitIfError does nothing and simply returns.\nfunc exitIfError(errs []error) {\n\tif len(errs) > 0 {\n\t\tfor _, err := range errs {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t\tstop(1)\n\t}\n}\n\nvar stop = os.Exit\n\nfunc main() {\n\tvar format string\n\tformats := make([]string, 0, len(formatters))\n\tfor k := range formatters {\n\t\tformats = append(formats, k)\n\t}\n\tsort.Strings(formats)\n\n\tvar traceP string\n\tvar help bool\n\tgetopt.ListVarLong(&yang.Path, \"path\", 0, \"comma separated list of directories to add to search path\", \"DIR[,DIR...]\")\n\tgetopt.StringVarLong(&format, \"format\", 0, \"format to display: \"+strings.Join(formats, \", \"), \"FORMAT\")\n\tgetopt.StringVarLong(&traceP, \"trace\", 0, \"write trace into to TRACEFILE\", \"TRACEFILE\")\n\tgetopt.BoolVarLong(&help, \"help\", '?', \"display help\")\n\tgetopt.SetParameters(\"[FORMAT OPTIONS] [SOURCE] [...]\")\n\n\tif err := getopt.Getopt(func(o getopt.Option) bool {\n\t\tif o.Name() == \"--format\" {\n\t\t\tf, ok := formatters[format]\n\t\t\tif !ok {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%s: invalid format. Choices are %s\\n\", format, strings.Join(formats, \", \"))\n\t\t\t\tstop(1)\n\t\t\t}\n\t\t\tif f.flags != nil {\n\t\t\t\tf.flags.VisitAll(func(o getopt.Option) {\n\t\t\t\t\tgetopt.AddOption(o)\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tgetopt.PrintUsage(os.Stderr)\n\t}\n\n\tif traceP != \"\" {\n\t\tfp, err := os.Create(traceP)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\ttrace.Start(fp)\n\t\tstop = func(c int) { trace.Stop(); os.Exit(c) }\n\t\tdefer func() { trace.Stop() }()\n\t}\n\n\tif help {\n\t\tgetopt.CommandLine.PrintUsage(os.Stderr)\n\t\tfmt.Fprintf(os.Stderr, `\nSOURCE may be a module name or a .yang file.\n\nFormats:\n`)\n\t\tfor _, fn := range formats {\n\t\t\tf := formatters[fn]\n\t\t\tfmt.Fprintf(os.Stderr, \" %s - %s\\n\", f.name, f.help)\n\t\t\tif f.flags != nil {\n\t\t\t\tf.flags.PrintOptions(indent.NewWriter(os.Stderr, \" \"))\n\t\t\t}\n\t\t\tfmt.Fprintln(os.Stderr)\n\t\t}\n\t\tstop(0)\n\t}\n\n\tif format == \"\" {\n\t\tformat = \"tree\"\n\t}\n\tif _, ok := formatters[format]; !ok {\n\t\tfmt.Fprintf(os.Stderr, \"%s: invalid format. Choices are %s\\n\", format, strings.Join(formats, \", \"))\n\t\tstop(1)\n\n\t}\n\n\tfiles := getopt.Args()\n\n\tif len(files) > 0 && !strings.HasSuffix(files[0], \".yang\") {\n\t\te, errs := yang.GetModule(files[0], files[1:]...)\n\t\texitIfError(errs)\n\t\tWrite(os.Stdout, e)\n\t\treturn\n\t}\n\n\t\/\/ Okay, either there are no arguments and we read stdin, or there\n\t\/\/ is one or more file names listed. Read them in and display them.\n\n\tms := yang.NewModules()\n\n\tif len(files) == 0 {\n\t\tdata, err := ioutil.ReadAll(os.Stdin)\n\t\tif err == nil {\n\t\t\terr = ms.Parse(string(data), \"<STDIN>\")\n\t\t}\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tstop(1)\n\t\t}\n\t}\n\n\tfor _, name := range files {\n\t\tif err := ms.Read(name); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t\/\/ Process the read files, exiting if any errors were found.\n\texitIfError(ms.Process())\n\n\t\/\/ Keep track of the top level modules we read in.\n\t\/\/ Those are the only modules we want to print below.\n\tmods := map[string]*yang.Module{}\n\tvar names []string\n\n\tfor _, m := range ms.Modules {\n\t\tif mods[m.Name] == nil {\n\t\t\tmods[m.Name] = m\n\t\t\tnames = append(names, m.Name)\n\t\t}\n\t}\n\tsort.Strings(names)\n\tentries := make([]*yang.Entry, len(names))\n\tfor x, n := range names {\n\t\tentries[x] = yang.ToEntry(mods[n])\n\t}\n\n\tformatters[format].f(os.Stdout, entries)\n}\n<commit_msg>More doc on --path DIR\/...<commit_after>\/\/ Copyright 2015 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Program yang parses YANG files, displays errors, and possibly writes\n\/\/ something related to the input on output.\n\/\/\n\/\/ Usage: yang [--path DIR] [--format FORMAT] [FORMAT OPTIONS] [MODULE] [FILE ...]\n\/\/\n\/\/ If MODULE is specified (an argument that does not end in .yang), it is taken\n\/\/ as the name of the module to display. Any FILEs specified are read, and the\n\/\/ tree for MODULE is displayed. If MODULE was not defined in FILEs (or no\n\/\/ files were specified), then the file MODULES.yang is read as well. An error\n\/\/ is displayed if no definition for MODULE was found.\n\/\/\n\/\/ If MODULE is missing, then all base modules read from the FILEs are\n\/\/ displayed. If there are no arguments then standard input is parsed.\n\/\/\n\/\/ If DIR is specified, it is considered a comma separated list of paths\n\/\/ to append to the search directory. If DIR appears as DIR\/... then\n\/\/ DIR and all direct and indirect subdirectories are checked.\n\/\/\n\/\/ FORMAT, which defaults to \"tree\", specifes the format of output to produce.\n\/\/ Use \"goyang --help\" for a list of available formats.\n\/\/\n\/\/ FORMAT OPTIONS are flags that apply to a specific format. They must follow\n\/\/ --format.\n\/\/\n\/\/ THIS PROGRAM IS STILL JUST A DEVELOPMENT TOOL.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"runtime\/trace\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/openconfig\/goyang\/pkg\/yang\"\n\t\"github.com\/openconfig\/goyang\/pkg\/indent\"\n\t\"github.com\/pborman\/getopt\"\n)\n\n\/\/ Each format must register a formatter with register. The function f will\n\/\/ be called once with the set of yang Entry trees generated.\ntype formatter struct {\n\tname string\n\tf func(io.Writer, []*yang.Entry)\n\thelp string\n\tflags *getopt.Set\n}\n\nvar formatters = map[string]*formatter{}\n\nfunc register(f *formatter) {\n\tformatters[f.name] = f\n}\n\n\/\/ exitIfError writes errs to standard error and exits with an exit status of 1.\n\/\/ If errs is empty then exitIfError does nothing and simply returns.\nfunc exitIfError(errs []error) {\n\tif len(errs) > 0 {\n\t\tfor _, err := range errs {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t\tstop(1)\n\t}\n}\n\nvar stop = os.Exit\n\nfunc main() {\n\tvar format string\n\tformats := make([]string, 0, len(formatters))\n\tfor k := range formatters {\n\t\tformats = append(formats, k)\n\t}\n\tsort.Strings(formats)\n\n\tvar traceP string\n\tvar help bool\n\tgetopt.ListVarLong(&yang.Path, \"path\", 0, \"comma separated list of directories to add to search path\", \"DIR[,DIR...]\")\n\tgetopt.StringVarLong(&format, \"format\", 0, \"format to display: \"+strings.Join(formats, \", \"), \"FORMAT\")\n\tgetopt.StringVarLong(&traceP, \"trace\", 0, \"write trace into to TRACEFILE\", \"TRACEFILE\")\n\tgetopt.BoolVarLong(&help, \"help\", '?', \"display help\")\n\tgetopt.SetParameters(\"[FORMAT OPTIONS] [SOURCE] [...]\")\n\n\tif err := getopt.Getopt(func(o getopt.Option) bool {\n\t\tif o.Name() == \"--format\" {\n\t\t\tf, ok := formatters[format]\n\t\t\tif !ok {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%s: invalid format. Choices are %s\\n\", format, strings.Join(formats, \", \"))\n\t\t\t\tstop(1)\n\t\t\t}\n\t\t\tif f.flags != nil {\n\t\t\t\tf.flags.VisitAll(func(o getopt.Option) {\n\t\t\t\t\tgetopt.AddOption(o)\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tgetopt.PrintUsage(os.Stderr)\n\t}\n\n\tif traceP != \"\" {\n\t\tfp, err := os.Create(traceP)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\ttrace.Start(fp)\n\t\tstop = func(c int) { trace.Stop(); os.Exit(c) }\n\t\tdefer func() { trace.Stop() }()\n\t}\n\n\tif help {\n\t\tgetopt.CommandLine.PrintUsage(os.Stderr)\n\t\tfmt.Fprintf(os.Stderr, `\nSOURCE may be a module name or a .yang file.\n\nFormats:\n`)\n\t\tfor _, fn := range formats {\n\t\t\tf := formatters[fn]\n\t\t\tfmt.Fprintf(os.Stderr, \" %s - %s\\n\", f.name, f.help)\n\t\t\tif f.flags != nil {\n\t\t\t\tf.flags.PrintOptions(indent.NewWriter(os.Stderr, \" \"))\n\t\t\t}\n\t\t\tfmt.Fprintln(os.Stderr)\n\t\t}\n\t\tstop(0)\n\t}\n\n\tif format == \"\" {\n\t\tformat = \"tree\"\n\t}\n\tif _, ok := formatters[format]; !ok {\n\t\tfmt.Fprintf(os.Stderr, \"%s: invalid format. Choices are %s\\n\", format, strings.Join(formats, \", \"))\n\t\tstop(1)\n\n\t}\n\n\tfiles := getopt.Args()\n\n\tif len(files) > 0 && !strings.HasSuffix(files[0], \".yang\") {\n\t\te, errs := yang.GetModule(files[0], files[1:]...)\n\t\texitIfError(errs)\n\t\tWrite(os.Stdout, e)\n\t\treturn\n\t}\n\n\t\/\/ Okay, either there are no arguments and we read stdin, or there\n\t\/\/ is one or more file names listed. Read them in and display them.\n\n\tms := yang.NewModules()\n\n\tif len(files) == 0 {\n\t\tdata, err := ioutil.ReadAll(os.Stdin)\n\t\tif err == nil {\n\t\t\terr = ms.Parse(string(data), \"<STDIN>\")\n\t\t}\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tstop(1)\n\t\t}\n\t}\n\n\tfor _, name := range files {\n\t\tif err := ms.Read(name); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t\/\/ Process the read files, exiting if any errors were found.\n\texitIfError(ms.Process())\n\n\t\/\/ Keep track of the top level modules we read in.\n\t\/\/ Those are the only modules we want to print below.\n\tmods := map[string]*yang.Module{}\n\tvar names []string\n\n\tfor _, m := range ms.Modules {\n\t\tif mods[m.Name] == nil {\n\t\t\tmods[m.Name] = m\n\t\t\tnames = append(names, m.Name)\n\t\t}\n\t}\n\tsort.Strings(names)\n\tentries := make([]*yang.Entry, len(names))\n\tfor x, n := range names {\n\t\tentries[x] = yang.ToEntry(mods[n])\n\t}\n\n\tformatters[format].f(os.Stdout, entries)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"io\/ioutil\"\r\n\t\"log\"\r\n\t\"math\/rand\"\r\n\t\"net\/http\"\r\n\t\"net\/url\"\r\n\t\"os\"\r\n\t\"strconv\"\r\n\t\"strings\"\r\n\t\"time\"\r\n\r\n\t\"github.com\/JustinBeckwith\/go-yelp\/yelp\"\r\n\t\"github.com\/guregu\/null\"\r\n\t\"github.com\/line\/line-bot-sdk-go\/linebot\"\r\n)\r\n\r\nvar o *yelp.AuthOptions\r\n\r\ntype UrlShortener struct {\r\n\tShortUrl string\r\n\tOriginalUrl string\r\n}\r\n\r\nfunc yelp_init() {\r\n\trand.Seed(time.Now().UnixNano())\r\n\r\n\t\/\/ check environment variables\r\n\to = &yelp.AuthOptions{\r\n\t\tConsumerKey: os.Getenv(\"CONSUMER_KEY\"),\r\n\t\tConsumerSecret: os.Getenv(\"CONSUMER_SECRET\"),\r\n\t\tAccessToken: os.Getenv(\"ACCESS_TOKEN\"),\r\n\t\tAccessTokenSecret: os.Getenv(\"ACCESS_TOKEN_SECRET\"),\r\n\t}\r\n\r\n\tif o.ConsumerKey == \"\" || o.ConsumerSecret == \"\" || o.AccessToken == \"\" || o.AccessTokenSecret == \"\" {\r\n\t\tlog.Fatal(\"Wrong environment setting about yelp-api-keys\")\r\n\t}\r\n}\r\n\r\nfunc yelp_parse(bot *linebot.Client, token string, loc *linebot.LocationMessage, food string) {\r\n\tvar err error\r\n\tvar results yelp.SearchResult\r\n\/\/\tvar msgs []linebot.Message\r\n\t\r\n\t\/\/ create a new yelp client with the auth keys\r\n\tclient := yelp.New(o, nil)\r\n\t\r\n\tif loc == nil {\r\n\t\t\/\/ make a simple query for food and location\r\n\t\tresults, err := client.DoSimpleSearch(food, \"台北市通化街\")\r\n\t} else {\r\n\t\t\/\/ Build an advanced set of search criteria that include\r\n\t\t\/\/ general options, and coordinate options.\r\n\t\ts := yelp.SearchOptions{\r\n\t\t\tGeneralOptions: &yelp.GeneralOptions{\r\n\t\t\t\tTerm: food,\r\n\t\t\t},\r\n\t\t\tCoordinateOptions: &yelp.CoordinateOptions{\r\n\t\t\t\tLatitude: null.FloatFrom(loc.Latitude),\r\n\t\t\t\tLongitude: null.FloatFrom(loc.Longitude),\r\n\t\t\t},\r\n\t\t}\r\n\r\n\t\t\/\/ Perform the search using the search options\r\n\t\tresults, err := client.DoSearch(s)\r\n\t}\r\n\tif err != nil {\r\n\t\tlog.Println(err)\r\n\t\t_, err = bot.ReplyMessage(token, linebot.NewTextMessage(\"查無資料!\\n請重新輸入\\n\\n吃吃 牛肉麵;台北市通化街\")).Do()\r\n\t}\r\n\r\n\tfor j := 0; j < 3; j++ {\r\n\t\ti := 0\r\n\t\tif results.Total >= 20 {\r\n\t\t\ti = rand.Intn(20)\r\n\t\t} else if results.Total >= 10 {\r\n\t\t\ti = rand.Intn(10)\r\n\t\t} else if results.Total > j {\r\n\t\t\ti = j\r\n\t\t} else if results.Total <= j && results.Total != 0 {\r\n\t\t\t_, err = bot.ReplyMessage(token, linebot.NewTextMessage(\"已無更多資料!\")).Do()\r\n\t\t\tbreak\r\n\t\t}\r\n\t\turlOrig := UrlShortener{}\r\n\t\turlOrig.short(results.Businesses[i].MobileURL)\r\n\t\taddress := strings.Join(results.Businesses[i].Location.DisplayAddress, \",\")\r\n\t\tvar largeImageURL = strings.Replace(results.Businesses[i].ImageURL, \"ms.jpg\", \"l.jpg\", 1)\r\n\r\n\t\t_, err = bot.ReplyMessage(token, linebot.NewImageMessage(largeImageURL, largeImageURL), linebot.NewTextMessage(\"店名:\"+results.Businesses[i].Name+\"\\n電話:\"+results.Businesses[i].Phone+\"\\n評比:\"+strconv.FormatFloat(float64(results.Businesses[i].Rating), 'f', 1, 64)+\"\\n更多資訊:\"+urlOrig.ShortUrl), linebot.NewLocationMessage(results.Businesses[i].Name+\"\\n\", address, float64(results.Businesses[i].Location.Coordinate.Latitude), float64(results.Businesses[i].Location.Coordinate.Longitude)) ).Do()\r\n\/\/\t\tmsgs = append(msgs, linebot.NewImageMessage(largeImageURL, largeImageURL))\r\n\/\/\t\tmsgs = append(msgs, linebot.NewTextMessage(\"店名:\"+results.Businesses[i].Name+\"\\n電話:\"+results.Businesses[i].Phone+\"\\n評比:\"+strconv.FormatFloat(float64(results.Businesses[i].Rating), 'f', 1, 64)+\"\\n更多資訊:\"+urlOrig.ShortUrl))\r\n\/\/\t\tmsgs = append(msgs, linebot.NewLocationMessage(results.Businesses[i].Name+\"\\n\", address, float64(results.Businesses[i].Location.Coordinate.Latitude), float64(results.Businesses[i].Location.Coordinate.Longitude)))\r\n\/\/\t\t_, err = bot.ReplyMessage(token, linebot.NewImageMessage(largeImageURL, largeImageURL)).Do()\r\n\/\/\t\t_, err = bot.ReplyMessage(token, linebot.NewTextMessage(\"店名:\"+results.Businesses[i].Name+\"\\n電話:\"+results.Businesses[i].Phone+\"\\n評比:\"+strconv.FormatFloat(float64(results.Businesses[i].Rating), 'f', 1, 64)+\"\\n更多資訊:\"+urlOrig.ShortUrl)).Do()\r\n\/\/\t\t_, err = bot.ReplyMessage(token, linebot.NewLocationMessage(results.Businesses[i].Name+\"\\n\", address, float64(results.Businesses[i].Location.Coordinate.Latitude), float64(results.Businesses[i].Location.Coordinate.Longitude))).Do()\r\n\t}\r\n\t\r\n\/\/\tif len(msgs) > 0 {\r\n\/\/\t\t_, err = bot.ReplyMessage(token, msgs).Do()\r\n\/\/\t}\r\n}\r\n\r\nfunc getResponseData(urlOrig string) string {\r\n\tresponse, err := http.Get(urlOrig)\r\n\tif err != nil {\r\n\t\tlog.Println(err)\r\n\t}\r\n\tdefer response.Body.Close()\r\n\tcontents, err := ioutil.ReadAll(response.Body)\r\n\treturn string(contents)\r\n}\r\n\r\nfunc isGdShortener(urlOrig string) (string, string) {\r\n\tescapedUrl := url.QueryEscape(urlOrig)\r\n\tisGdUrl := fmt.Sprintf(\"http:\/\/is.gd\/create.php?url=%s&format=simple\", escapedUrl)\r\n\treturn getResponseData(isGdUrl), urlOrig\r\n}\r\n\r\nfunc (u *UrlShortener) short(urlOrig string) *UrlShortener {\r\n\tshortUrl, originalUrl := isGdShortener(urlOrig)\r\n\tu.ShortUrl = shortUrl\r\n\tu.OriginalUrl = originalUrl\r\n\treturn u\r\n}\r\n<commit_msg>test location<commit_after>package main\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"io\/ioutil\"\r\n\t\"log\"\r\n\t\"math\/rand\"\r\n\t\"net\/http\"\r\n\t\"net\/url\"\r\n\t\"os\"\r\n\t\"strconv\"\r\n\t\"strings\"\r\n\t\"time\"\r\n\r\n\t\"github.com\/JustinBeckwith\/go-yelp\/yelp\"\r\n\t\"github.com\/guregu\/null\"\r\n\t\"github.com\/line\/line-bot-sdk-go\/linebot\"\r\n)\r\n\r\nvar o *yelp.AuthOptions\r\n\r\ntype UrlShortener struct {\r\n\tShortUrl string\r\n\tOriginalUrl string\r\n}\r\n\r\nfunc yelp_init() {\r\n\trand.Seed(time.Now().UnixNano())\r\n\r\n\t\/\/ check environment variables\r\n\to = &yelp.AuthOptions{\r\n\t\tConsumerKey: os.Getenv(\"CONSUMER_KEY\"),\r\n\t\tConsumerSecret: os.Getenv(\"CONSUMER_SECRET\"),\r\n\t\tAccessToken: os.Getenv(\"ACCESS_TOKEN\"),\r\n\t\tAccessTokenSecret: os.Getenv(\"ACCESS_TOKEN_SECRET\"),\r\n\t}\r\n\r\n\tif o.ConsumerKey == \"\" || o.ConsumerSecret == \"\" || o.AccessToken == \"\" || o.AccessTokenSecret == \"\" {\r\n\t\tlog.Fatal(\"Wrong environment setting about yelp-api-keys\")\r\n\t}\r\n}\r\n\r\nfunc yelp_parse(bot *linebot.Client, token string, loc *linebot.LocationMessage, food string) {\r\n\tvar err error\r\n\/\/\tvar msgs []linebot.Message\r\n\t\r\n\t\/\/ create a new yelp client with the auth keys\r\n\tclient := yelp.New(o, nil)\r\n\t\r\n\tif loc == nil {\r\n\t\t\/\/ make a simple query for food and location\r\n\t\tresults, err := client.DoSimpleSearch(food, \"台北市通化街\")\r\n\t} else {\r\n\t\t\/\/ Build an advanced set of search criteria that include\r\n\t\t\/\/ general options, and coordinate options.\r\n\t\ts := yelp.SearchOptions{\r\n\t\t\tGeneralOptions: &yelp.GeneralOptions{\r\n\t\t\t\tTerm: food,\r\n\t\t\t},\r\n\t\t\tCoordinateOptions: &yelp.CoordinateOptions{\r\n\t\t\t\tLatitude: null.FloatFrom(loc.Latitude),\r\n\t\t\t\tLongitude: null.FloatFrom(loc.Longitude),\r\n\t\t\t},\r\n\t\t}\r\n\r\n\t\t\/\/ Perform the search using the search options\r\n\t\tresults, err := client.DoSearch(s)\r\n\t}\r\n\tif err != nil {\r\n\t\tlog.Println(err)\r\n\t\t_, err = bot.ReplyMessage(token, linebot.NewTextMessage(\"查無資料!\\n請重新輸入\\n\\n吃吃 牛肉麵;台北市通化街\")).Do()\r\n\t}\r\n\r\n\tfor j := 0; j < 3; j++ {\r\n\t\ti := 0\r\n\t\tif results.Total >= 20 {\r\n\t\t\ti = rand.Intn(20)\r\n\t\t} else if results.Total >= 10 {\r\n\t\t\ti = rand.Intn(10)\r\n\t\t} else if results.Total > j {\r\n\t\t\ti = j\r\n\t\t} else if results.Total <= j && results.Total != 0 {\r\n\t\t\t_, err = bot.ReplyMessage(token, linebot.NewTextMessage(\"已無更多資料!\")).Do()\r\n\t\t\tbreak\r\n\t\t}\r\n\t\turlOrig := UrlShortener{}\r\n\t\turlOrig.short(results.Businesses[i].MobileURL)\r\n\t\taddress := strings.Join(results.Businesses[i].Location.DisplayAddress, \",\")\r\n\t\tvar largeImageURL = strings.Replace(results.Businesses[i].ImageURL, \"ms.jpg\", \"l.jpg\", 1)\r\n\r\n\t\t_, err = bot.ReplyMessage(token, linebot.NewImageMessage(largeImageURL, largeImageURL), linebot.NewTextMessage(\"店名:\"+results.Businesses[i].Name+\"\\n電話:\"+results.Businesses[i].Phone+\"\\n評比:\"+strconv.FormatFloat(float64(results.Businesses[i].Rating), 'f', 1, 64)+\"\\n更多資訊:\"+urlOrig.ShortUrl), linebot.NewLocationMessage(results.Businesses[i].Name+\"\\n\", address, float64(results.Businesses[i].Location.Coordinate.Latitude), float64(results.Businesses[i].Location.Coordinate.Longitude)) ).Do()\r\n\/\/\t\tmsgs = append(msgs, linebot.NewImageMessage(largeImageURL, largeImageURL))\r\n\/\/\t\tmsgs = append(msgs, linebot.NewTextMessage(\"店名:\"+results.Businesses[i].Name+\"\\n電話:\"+results.Businesses[i].Phone+\"\\n評比:\"+strconv.FormatFloat(float64(results.Businesses[i].Rating), 'f', 1, 64)+\"\\n更多資訊:\"+urlOrig.ShortUrl))\r\n\/\/\t\tmsgs = append(msgs, linebot.NewLocationMessage(results.Businesses[i].Name+\"\\n\", address, float64(results.Businesses[i].Location.Coordinate.Latitude), float64(results.Businesses[i].Location.Coordinate.Longitude)))\r\n\/\/\t\t_, err = bot.ReplyMessage(token, linebot.NewImageMessage(largeImageURL, largeImageURL)).Do()\r\n\/\/\t\t_, err = bot.ReplyMessage(token, linebot.NewTextMessage(\"店名:\"+results.Businesses[i].Name+\"\\n電話:\"+results.Businesses[i].Phone+\"\\n評比:\"+strconv.FormatFloat(float64(results.Businesses[i].Rating), 'f', 1, 64)+\"\\n更多資訊:\"+urlOrig.ShortUrl)).Do()\r\n\/\/\t\t_, err = bot.ReplyMessage(token, linebot.NewLocationMessage(results.Businesses[i].Name+\"\\n\", address, float64(results.Businesses[i].Location.Coordinate.Latitude), float64(results.Businesses[i].Location.Coordinate.Longitude))).Do()\r\n\t}\r\n\t\r\n\/\/\tif len(msgs) > 0 {\r\n\/\/\t\t_, err = bot.ReplyMessage(token, msgs).Do()\r\n\/\/\t}\r\n}\r\n\r\nfunc getResponseData(urlOrig string) string {\r\n\tresponse, err := http.Get(urlOrig)\r\n\tif err != nil {\r\n\t\tlog.Println(err)\r\n\t}\r\n\tdefer response.Body.Close()\r\n\tcontents, err := ioutil.ReadAll(response.Body)\r\n\treturn string(contents)\r\n}\r\n\r\nfunc isGdShortener(urlOrig string) (string, string) {\r\n\tescapedUrl := url.QueryEscape(urlOrig)\r\n\tisGdUrl := fmt.Sprintf(\"http:\/\/is.gd\/create.php?url=%s&format=simple\", escapedUrl)\r\n\treturn getResponseData(isGdUrl), urlOrig\r\n}\r\n\r\nfunc (u *UrlShortener) short(urlOrig string) *UrlShortener {\r\n\tshortUrl, originalUrl := isGdShortener(urlOrig)\r\n\tu.ShortUrl = shortUrl\r\n\tu.OriginalUrl = originalUrl\r\n\treturn u\r\n}\r\n<|endoftext|>"} {"text":"<commit_before>package lbrelease\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/lhcb-org\/lbx\/lbx\"\n)\n\ntype GetPack struct {\n\tReqPkg string \/\/ requested package\n\tReqPkgVers string\n\n\tpkgs map[string][]lbx.RepoInfo\n\tprojs []string\n\trepos lbx.RepoInfos\n\n\tsel_repo string \/\/ selected repository\n\tsel_hat string \/\/ selected repository hat\n\n\tproj_name string\n\tproj_vers string\n\n\tinit bool\n}\n\nfunc (gp *GetPack) setup() error {\n\tvar err error\n\tif gp.init {\n\t\treturn err\n\t}\n\n\terr = gp.initRepos(nil, \"\", \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = gp.initPkgs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgp.init = true\n\treturn err\n}\n\nfunc (gp *GetPack) initRepos(excludes []string, user, protocol string) error {\n\tvar err error\n\tif gp.repos != nil {\n\t\treturn err\n\t}\n\n\texcl := map[string]struct{}{}\n\tfor _, v := range excludes {\n\t\texcl[v] = struct{}{}\n\t}\n\n\tgp.repos = make(lbx.RepoInfos, 3)\n\n\t\/\/ prepare repositories urls\n\t\/\/ filter the requested protocols for the known repositories\n\tfor k, v := range lbx.Repositories(user, protocol) {\n\t\tif _, dup := excl[k]; dup {\n\t\t\tcontinue\n\t\t}\n\t\tgp.repos[k] = v\n\t}\n\n\tif len(gp.repos) <= 0 {\n\t\treturn fmt.Errorf(\"getpack: unable to find a repository for the specified protocol\")\n\t}\n\n\treturn err\n}\n\nfunc (gp *GetPack) initPkgs() error {\n\tvar err error\n\tif gp.pkgs != nil {\n\t\treturn err\n\t}\n\n\tgp.pkgs = make(map[string][]lbx.RepoInfo)\n\n\tfor _, repo := range gp.repos {\n\t\tfor _, p := range repo.ListPackages(gp.sel_hat) {\n\t\t\tif _, ok := gp.pkgs[p]; !ok {\n\t\t\t\tgp.pkgs[p] = make([]lbx.RepoInfo, 0, 1)\n\t\t\t}\n\t\t\tgp.pkgs[p] = append(gp.pkgs[p], repo)\n\t\t}\n\t}\n\treturn err\n}\n\nfunc (gp *GetPack) Run() error {\n\tvar err error\n\terr = gp.setup()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn err\n}\n<commit_msg>getpack: typos<commit_after>package lbrelease\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/lhcb-org\/lbx\/lbx\"\n)\n\ntype GetPack struct {\n\tReqPkg string \/\/ requested package\n\tReqPkgVers string\n\n\tpkgs map[string][]lbx.RepoInfo\n\tprojs []string\n\trepos lbx.RepoInfos\n\n\tsel_repo string \/\/ selected repository\n\tsel_hat string \/\/ selected repository hat\n\n\tproj_name string\n\tproj_vers string\n\n\tinit bool\n}\n\nfunc (gp *GetPack) setup() error {\n\tvar err error\n\tif gp.init {\n\t\treturn err\n\t}\n\n\terr = gp.initRepos(nil, \"\", \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = gp.initPkgs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgp.init = true\n\treturn err\n}\n\nfunc (gp *GetPack) initRepos(excludes []string, user, protocol string) error {\n\tvar err error\n\tif gp.repos != nil {\n\t\treturn err\n\t}\n\n\texcl := map[string]struct{}{}\n\tfor _, v := range excludes {\n\t\texcl[v] = struct{}{}\n\t}\n\n\tgp.repos = make(lbx.RepoInfos, 3)\n\n\t\/\/ prepare repositories urls\n\t\/\/ filter the requested protocols for the known repositories\n\tfor k, v := range lbx.Repositories(user, protocol) {\n\t\tif _, dup := excl[k]; dup {\n\t\t\tcontinue\n\t\t}\n\t\tgp.repos[k] = v\n\t}\n\n\tif len(gp.repos) <= 0 {\n\t\treturn fmt.Errorf(\"getpack: unable to find a repository for the specified protocol\")\n\t}\n\n\treturn err\n}\n\nfunc (gp *GetPack) initPkgs() error {\n\tvar err error\n\tif gp.pkgs != nil {\n\t\treturn err\n\t}\n\n\tgp.pkgs = make(map[string][]lbx.RepoInfo)\n\n\tfor _, repo := range gp.repos {\n\t\tfor _, p := range repo[0].ListPackages(gp.sel_hat) {\n\t\t\tif _, ok := gp.pkgs[p]; !ok {\n\t\t\t\tgp.pkgs[p] = make([]lbx.RepoInfo, 0, 1)\n\t\t\t}\n\t\t\tgp.pkgs[p] = append(gp.pkgs[p], repo[0])\n\t\t}\n\t}\n\treturn err\n}\n\nfunc (gp *GetPack) Run() error {\n\tvar err error\n\terr = gp.setup()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package cli\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"strings\"\n\n\tsys \"golang.org\/x\/sys\/unix\"\n\n\t\"github.com\/derekparker\/delve\/command\"\n\t\"github.com\/derekparker\/delve\/proctl\"\n\n\t\"github.com\/peterh\/liner\"\n)\n\nconst historyFile string = \".dbg_history\"\n\nfunc Run(run bool, pid int, args []string) {\n\tvar (\n\t\tdbp *proctl.DebuggedProcess\n\t\terr error\n\t\tline = liner.NewLiner()\n\t)\n\tdefer line.Close()\n\n\tswitch {\n\tcase run:\n\t\tconst debugname = \"debug\"\n\t\tcmd := exec.Command(\"go\", \"build\", \"-o\", debugname, \"-gcflags\", \"-N -l\")\n\t\terr := cmd.Run()\n\t\tif err != nil {\n\t\t\tdie(1, \"Could not compile program:\", err)\n\t\t}\n\t\tdefer os.Remove(debugname)\n\n\t\tdbp, err = proctl.Launch(append([]string{\".\/\" + debugname}, args...))\n\t\tif err != nil {\n\t\t\tdie(1, \"Could not launch program:\", err)\n\t\t}\n\tcase pid != 0:\n\t\tdbp, err = proctl.Attach(pid)\n\t\tif err != nil {\n\t\t\tdie(1, \"Could not attach to process:\", err)\n\t\t}\n\tdefault:\n\t\tdbp, err = proctl.Launch(args)\n\t\tif err != nil {\n\t\t\tdie(1, \"Could not launch program:\", err)\n\t\t}\n\t}\n\n\tch := make(chan os.Signal)\n\tsignal.Notify(ch, sys.SIGINT)\n\tgo func() {\n\t\tfor _ = range ch {\n\t\t\tif dbp.Running() {\n\t\t\t\tdbp.RequestManualStop()\n\t\t\t}\n\t\t}\n\t}()\n\n\tcmds := command.DebugCommands()\n\tif f, err := os.Open(historyFile); err == nil {\n\t\tline.ReadHistory(f)\n\t\tf.Close()\n\t}\n\tfmt.Println(\"Type 'help' for list of commands.\")\n\n\tfor {\n\t\tcmdstr, err := promptForInput(line)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\thandleExit(dbp, line, 0)\n\t\t\t}\n\t\t\tdie(1, \"Prompt for input failed.\\n\")\n\t\t}\n\n\t\tcmdstr, args := parseCommand(cmdstr)\n\n\t\tif cmdstr == \"exit\" {\n\t\t\thandleExit(dbp, line, 0)\n\t\t}\n\n\t\tcmd := cmds.Find(cmdstr)\n\t\terr = cmd(dbp, args...)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Command failed: %s\\n\", err)\n\t\t}\n\t}\n}\n\nfunc handleExit(dbp *proctl.DebuggedProcess, line *liner.State, status int) {\n\tif f, err := os.Open(historyFile); err == nil {\n\t\tline.WriteHistory(f)\n\t\tf.Close()\n\t}\n\n\tanswer, err := line.Prompt(\"Would you like to kill the process? [y\/n]\")\n\tif err != nil {\n\t\tdie(2, io.EOF)\n\t}\n\tanswer = strings.TrimSuffix(answer, \"\\n\")\n\n\tfor _, bp := range dbp.HWBreakPoints {\n\t\tif bp == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif _, err := dbp.Clear(bp.Addr); err != nil {\n\t\t\tfmt.Printf(\"Can't clear breakpoint @%x: %s\\n\", bp.Addr, err)\n\t\t}\n\t}\n\n\tfor pc := range dbp.BreakPoints {\n\t\tif _, err := dbp.Clear(pc); err != nil {\n\t\t\tfmt.Printf(\"Can't clear breakpoint @%x: %s\\n\", pc, err)\n\t\t}\n\t}\n\n\tfmt.Println(\"Detaching from process...\")\n\terr = sys.PtraceDetach(dbp.Process.Pid)\n\tif err != nil {\n\t\tdie(2, \"Could not detach\", err)\n\t}\n\n\tif answer == \"y\" {\n\t\tfmt.Println(\"Killing process\", dbp.Process.Pid)\n\n\t\terr := dbp.Process.Kill()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Could not kill process\", err)\n\t\t}\n\t}\n\n\tdie(status, \"Hope I was of service hunting your bug!\")\n}\n\nfunc die(status int, args ...interface{}) {\n\tfmt.Fprint(os.Stderr, args)\n\tfmt.Fprint(os.Stderr, \"\\n\")\n\tos.Exit(status)\n}\n\nfunc parseCommand(cmdstr string) (string, []string) {\n\tvals := strings.Split(cmdstr, \" \")\n\treturn vals[0], vals[1:]\n}\n\nfunc promptForInput(line *liner.State) (string, error) {\n\tl, err := line.Prompt(\"(dlv) \")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tl = strings.TrimSuffix(l, \"\\n\")\n\tif l != \"\" {\n\t\tline.AppendHistory(l)\n\t}\n\n\treturn l, nil\n}\n<commit_msg>Fix readline history<commit_after>package cli\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"strings\"\n\n\tsys \"golang.org\/x\/sys\/unix\"\n\n\t\"github.com\/derekparker\/delve\/command\"\n\t\"github.com\/derekparker\/delve\/proctl\"\n\n\t\"github.com\/peterh\/liner\"\n)\n\nconst historyFile string = \".dbg_history\"\n\nfunc Run(run bool, pid int, args []string) {\n\tvar (\n\t\tdbp *proctl.DebuggedProcess\n\t\terr error\n\t\tline = liner.NewLiner()\n\t)\n\tdefer line.Close()\n\n\tswitch {\n\tcase run:\n\t\tconst debugname = \"debug\"\n\t\tcmd := exec.Command(\"go\", \"build\", \"-o\", debugname, \"-gcflags\", \"-N -l\")\n\t\terr := cmd.Run()\n\t\tif err != nil {\n\t\t\tdie(1, \"Could not compile program:\", err)\n\t\t}\n\t\tdefer os.Remove(debugname)\n\n\t\tdbp, err = proctl.Launch(append([]string{\".\/\" + debugname}, args...))\n\t\tif err != nil {\n\t\t\tdie(1, \"Could not launch program:\", err)\n\t\t}\n\tcase pid != 0:\n\t\tdbp, err = proctl.Attach(pid)\n\t\tif err != nil {\n\t\t\tdie(1, \"Could not attach to process:\", err)\n\t\t}\n\tdefault:\n\t\tdbp, err = proctl.Launch(args)\n\t\tif err != nil {\n\t\t\tdie(1, \"Could not launch program:\", err)\n\t\t}\n\t}\n\n\tch := make(chan os.Signal)\n\tsignal.Notify(ch, sys.SIGINT)\n\tgo func() {\n\t\tfor _ = range ch {\n\t\t\tif dbp.Running() {\n\t\t\t\tdbp.RequestManualStop()\n\t\t\t}\n\t\t}\n\t}()\n\n\tcmds := command.DebugCommands()\n\tf, err := os.Open(historyFile)\n\tif err != nil {\n\t\tf, _ = os.Create(historyFile)\n\t}\n\tline.ReadHistory(f)\n\tf.Close()\n\tfmt.Println(\"Type 'help' for list of commands.\")\n\n\tfor {\n\t\tcmdstr, err := promptForInput(line)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\thandleExit(dbp, line, 0)\n\t\t\t}\n\t\t\tdie(1, \"Prompt for input failed.\\n\")\n\t\t}\n\n\t\tcmdstr, args := parseCommand(cmdstr)\n\n\t\tif cmdstr == \"exit\" {\n\t\t\thandleExit(dbp, line, 0)\n\t\t}\n\n\t\tcmd := cmds.Find(cmdstr)\n\t\terr = cmd(dbp, args...)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Command failed: %s\\n\", err)\n\t\t}\n\t}\n}\n\nfunc handleExit(dbp *proctl.DebuggedProcess, line *liner.State, status int) {\n\tif f, err := os.OpenFile(historyFile, os.O_RDWR, 0666); err == nil {\n\t\t_, err := line.WriteHistory(f)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"readline histroy: \", err)\n\t\t}\n\t\tf.Close()\n\t}\n\n\tanswer, err := line.Prompt(\"Would you like to kill the process? [y\/n]\")\n\tif err != nil {\n\t\tdie(2, io.EOF)\n\t}\n\tanswer = strings.TrimSuffix(answer, \"\\n\")\n\n\tfor _, bp := range dbp.HWBreakPoints {\n\t\tif bp == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif _, err := dbp.Clear(bp.Addr); err != nil {\n\t\t\tfmt.Printf(\"Can't clear breakpoint @%x: %s\\n\", bp.Addr, err)\n\t\t}\n\t}\n\n\tfor pc := range dbp.BreakPoints {\n\t\tif _, err := dbp.Clear(pc); err != nil {\n\t\t\tfmt.Printf(\"Can't clear breakpoint @%x: %s\\n\", pc, err)\n\t\t}\n\t}\n\n\tfmt.Println(\"Detaching from process...\")\n\terr = sys.PtraceDetach(dbp.Process.Pid)\n\tif err != nil {\n\t\tdie(2, \"Could not detach\", err)\n\t}\n\n\tif answer == \"y\" {\n\t\tfmt.Println(\"Killing process\", dbp.Process.Pid)\n\n\t\terr := dbp.Process.Kill()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Could not kill process\", err)\n\t\t}\n\t}\n\n\tdie(status, \"Hope I was of service hunting your bug!\")\n}\n\nfunc die(status int, args ...interface{}) {\n\tfmt.Fprint(os.Stderr, args)\n\tfmt.Fprint(os.Stderr, \"\\n\")\n\tos.Exit(status)\n}\n\nfunc parseCommand(cmdstr string) (string, []string) {\n\tvals := strings.Split(cmdstr, \" \")\n\treturn vals[0], vals[1:]\n}\n\nfunc promptForInput(line *liner.State) (string, error) {\n\tl, err := line.Prompt(\"(dlv) \")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tl = strings.TrimSuffix(l, \"\\n\")\n\tif l != \"\" {\n\t\tline.AppendHistory(l)\n\t}\n\n\treturn l, nil\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>kubetcl create --validate crashes when no apiVersion or kind is provided<commit_after><|endoftext|>"} {"text":"<commit_before>package bootconfig\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/systemboot\/systemboot\/pkg\/crypto\"\n\t\"github.com\/u-root\/u-root\/pkg\/kexec\"\n\t\"github.com\/u-root\/u-root\/pkg\/kexecbin\"\n)\n\n\/\/ BootConfig is a general-purpose boot configuration. It draws some\n\/\/ characteristics from FIT but it's not compatible with it. It uses\n\/\/ JSON for interoperability.\ntype BootConfig struct {\n\tName string `json:\"name,omitempty\"`\n\tKernel string `json:\"kernel\"`\n\tInitramfs string `json:\"initramfs,omitempty\"`\n\tKernelArgs string `json:\"kernel_args,omitempty\"`\n\tDeviceTree string `json:\"devicetree,omitempty\"`\n}\n\n\/\/ IsValid returns true if a BootConfig object has valid content, and false\n\/\/ otherwise\nfunc (bc *BootConfig) IsValid() bool {\n\treturn bc.Kernel != \"\"\n}\n\n\/\/ Boot tries to boot the kernel with optional initramfs and command line\n\/\/ options. If a device-tree is specified, that will be used too\nfunc (bc *BootConfig) Boot() error {\n\t\/\/ kexec: try the kexecbin executable first, and if it fails, use the native\n\t\/\/ Go implementation of kexec from u-root\n\tlog.Printf(\"Trying KexecBin on %+v\", bc)\n\tcrypto.TryMeasureBootConfig(bc.Name, bc.Kernel, bc.Initramfs, bc.KernelArgs, bc.DeviceTree)\n\tif err := kexecbin.KexecBin(bc.Kernel, bc.KernelArgs, bc.Initramfs, bc.DeviceTree); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tlog.Printf(\"BootConfig: KexecBin failed, trying pure-Go kexec. Error: %v\", err)\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\tkernel, err := os.Open(bc.Kernel)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar initramfs *os.File\n\tif bc.Initramfs != \"\" {\n\t\tinitramfs, err = os.Open(bc.Initramfs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tdefer func() {\n\t\t\/\/ clean up\n\t\tif kernel != nil {\n\t\t\tif err := kernel.Close(); err != nil {\n\t\t\t\tlog.Printf(\"Error closing kernel file descriptor: %v\", err)\n\t\t\t}\n\t\t}\n\t\tif initramfs != nil {\n\t\t\tif err := initramfs.Close(); err != nil {\n\t\t\t\tlog.Printf(\"Error closing initramfs file descriptor: %v\", err)\n\t\t\t}\n\t\t}\n\t}()\n\treturn kexec.FileLoad(kernel, initramfs, bc.KernelArgs)\n}\n\n\/\/ NewBootConfig parses a boot configuration in JSON format and returns a\n\/\/ BootConfig object.\nfunc NewBootConfig(data []byte) (*BootConfig, error) {\n\tvar bootconfig BootConfig\n\tif err := json.Unmarshal(data, &bootconfig); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &bootconfig, nil\n}\n<commit_msg>Bootconfig fixes\/improvements (#79)<commit_after>package bootconfig\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\n\t\"github.com\/systemboot\/systemboot\/pkg\/crypto\"\n\t\"github.com\/u-root\/u-root\/pkg\/kexec\"\n\t\"github.com\/u-root\/u-root\/pkg\/kexecbin\"\n)\n\n\/\/ BootConfig is a general-purpose boot configuration. It draws some\n\/\/ characteristics from FIT but it's not compatible with it. It uses\n\/\/ JSON for interoperability.\ntype BootConfig struct {\n\tName string `json:\"name,omitempty\"`\n\tKernel string `json:\"kernel\"`\n\tInitramfs string `json:\"initramfs,omitempty\"`\n\tKernelArgs string `json:\"kernel_args,omitempty\"`\n\tDeviceTree string `json:\"devicetree,omitempty\"`\n}\n\n\/\/ IsValid returns true if a BootConfig object has valid content, and false\n\/\/ otherwise\nfunc (bc *BootConfig) IsValid() bool {\n\treturn bc.Kernel != \"\"\n}\n\n\/\/ Boot tries to boot the kernel with optional initramfs and command line\n\/\/ options. If a device-tree is specified, that will be used too\nfunc (bc *BootConfig) Boot() error {\n\tcrypto.TryMeasureBootConfig(bc.Name, bc.Kernel, bc.Initramfs, bc.KernelArgs, bc.DeviceTree)\n\n\t\/\/ kexec: try the kexecbin executable first\n\t\/\/ if it is not available fallback to the Go implementation of kexec from u-root\n\tlog.Printf(\"Trying KexecBin on %+v\", bc)\n\tif err := kexecbin.KexecBin(bc.Kernel, bc.KernelArgs, bc.Initramfs, bc.DeviceTree); err != nil {\n\t\t\/\/ If it was found nowhere in PATH it will be exec.Error{exec.ErrNotFound}, which we have to unpack\n\t\texecErr, ok := err.(*exec.Error)\n\t\tif (ok && execErr.Err == exec.ErrNotFound) || os.IsNotExist(err) {\n\t\t\tlog.Printf(\"BootConfig: KexecBin is not available, trying pure-Go kexec. Error: %v\", err)\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tkernel, err := os.Open(bc.Kernel)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar initramfs *os.File\n\tif bc.Initramfs != \"\" {\n\t\tinitramfs, err = os.Open(bc.Initramfs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tdefer func() {\n\t\t\/\/ clean up\n\t\tif kernel != nil {\n\t\t\tif err := kernel.Close(); err != nil {\n\t\t\t\tlog.Printf(\"Error closing kernel file descriptor: %v\", err)\n\t\t\t}\n\t\t}\n\t\tif initramfs != nil {\n\t\t\tif err := initramfs.Close(); err != nil {\n\t\t\t\tlog.Printf(\"Error closing initramfs file descriptor: %v\", err)\n\t\t\t}\n\t\t}\n\t}()\n\tif err := kexec.FileLoad(kernel, initramfs, bc.KernelArgs); err != nil {\n\t\treturn err\n\t}\n\n\terr = kexec.Reboot()\n\tif err == nil {\n\t\treturn errors.New(\"Unexpectedly returned from Reboot() without error. The system did not reboot\")\n\t}\n\treturn err\n\n}\n\n\/\/ NewBootConfig parses a boot configuration in JSON format and returns a\n\/\/ BootConfig object.\nfunc NewBootConfig(data []byte) (*BootConfig, error) {\n\tvar bootconfig BootConfig\n\tif err := json.Unmarshal(data, &bootconfig); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &bootconfig, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package byteutil\n\nimport \"bytes\"\n\n\/\/ ReverseByteSlice reverses a byte slice\nfunc ReverseByteSlice(s []byte) []byte {\n\t\/\/ make a copy of s\n\tl := len(s)\n\tt := make([]byte, l)\n\tfor i := 0; i < l; i++ {\n\t\tt[i] = s[i]\n\t}\n\n\t\/\/ reverse\n\tfor i, j := 0, len(t)-1; i < j; i, j = i+1, j-1 {\n\t\tt[i], t[j] = t[j], t[i]\n\t}\n\treturn t\n}\n\n\/\/ WrapByteSlice wraps byte slice\nfunc WrapByteSlice(s []byte, width int) []byte {\n\tif width < 1 {\n\t\treturn s\n\t}\n\tvar buffer bytes.Buffer\n\tl := len(s)\n\tvar lines int\n\tif l%width == 0 {\n\t\tlines = l\/width - 1\n\t} else {\n\t\tlines = int(l \/ width)\n\t}\n\tvar start, end int\n\tfor i := 0; i <= lines; i++ {\n\t\tstart = i * width\n\t\tend = (i + 1) * width\n\t\tif end > l {\n\t\t\tend = l\n\t\t}\n\n\t\tbuffer.Write(s[start:end])\n\t\tif i < lines {\n\t\t\tbuffer.WriteString(\"\\n\")\n\t\t}\n\t}\n\treturn buffer.Bytes()\n}\n\n\/\/ SubSlice provides similar slice indexing as python with one exception\n\/\/ that end could be equal to 0.\n\/\/ So we could get the last element by SubSlice(s, -1, 0)\n\/\/ or get the whole element by SubSlice(s, 0, 0)\nfunc SubSlice(slice []byte, start int, end int) []byte {\n\tif start == 0 && end == 0 {\n\t\treturn slice\n\t}\n\tif start == end || (start < 0 && end > 0) {\n\t\treturn []byte{}\n\t}\n\tl := len(slice)\n\ts, e := start, end\n\n\tif s < 0 {\n\t\ts = l + s\n\t\tif s < 1 {\n\t\t\ts = 0\n\t\t}\n\t}\n\tif e < 0 {\n\t\te = l + e\n\t\tif e < 0 {\n\t\t\te = 0\n\t\t}\n\t}\n\tif e == 0 || e > l {\n\t\te = l\n\t}\n\treturn slice[s:e]\n}\n\n\/\/ ByteToLower lowers a byte\nfunc ByteToLower(b byte) byte {\n\tif b <= '\\u007F' {\n\t\tif 'A' <= b && b <= 'Z' {\n\t\t\tb += 'a' - 'A'\n\t\t}\n\t\treturn b\n\t}\n\treturn b\n}\n\n\/\/ ByteToUpper upper a byte\nfunc ByteToUpper(b byte) byte {\n\tif b <= '\\u007F' {\n\t\tif 'a' <= b && b <= 'z' {\n\t\t\tb -= 'a' - 'A'\n\t\t}\n\t\treturn b\n\t}\n\treturn b\n}\n\n\/\/ MakeQuerySlice is used to replace map.\n\/\/ see: http:\/\/blog.shenwei.me\/map-is-not-the-fastest-in-go\/\nfunc MakeQuerySlice(letters []byte) []byte {\n\tmax := -1\n\tfor i := 0; i < len(letters); i++ {\n\t\tj := int(letters[i])\n\t\tif max < j {\n\t\t\tmax = j\n\t\t}\n\t}\n\tquerySlice := make([]byte, max+1)\n\tfor i := 0; i < len(letters); i++ {\n\t\tquerySlice[int(letters[i])] = letters[i]\n\t}\n\treturn querySlice\n}\n\n\/\/ Split splits a byte slice by giveen letters.\n\/\/ It's much faster than regexp.Split\nfunc Split(slice []byte, letters []byte) [][]byte {\n\tquerySlice := MakeQuerySlice(letters)\n\tresults := [][]byte{}\n\ttmp := []byte{}\n\n\tvar j int\n\tvar value byte\n\tvar sliceSize = len(querySlice)\n\tfor _, b := range slice {\n\t\tj = int(b)\n\t\tif j >= sliceSize { \/\/ not delimiter byte\n\t\t\ttmp = append(tmp, b)\n\t\t\tcontinue\n\t\t}\n\t\tvalue = querySlice[j]\n\t\tif value == 0 { \/\/ not delimiter byte\n\t\t\ttmp = append(tmp, b)\n\t\t\tcontinue\n\t\t} else {\n\t\t\tif len(tmp) > 0 {\n\t\t\t\tresults = append(results, tmp)\n\t\t\t\ttmp = []byte{}\n\t\t\t}\n\t\t}\n\t}\n\tif len(tmp) > 0 {\n\t\tresults = append(results, tmp)\n\t}\n\treturn results\n}\n<commit_msg>add inplace replace reverse function<commit_after>package byteutil\n\nimport \"bytes\"\n\n\/\/ ReverseByteSlice reverses a byte slice\nfunc ReverseByteSlice(s []byte) []byte {\n\t\/\/ make a copy of s\n\tl := len(s)\n\tt := make([]byte, l)\n\tfor i := 0; i < l; i++ {\n\t\tt[i] = s[i]\n\t}\n\n\t\/\/ reverse\n\tfor i, j := 0, l-1; i < j; i, j = i+1, j-1 {\n\t\tt[i], t[j] = t[j], t[i]\n\t}\n\treturn t\n}\n\n\/\/ ReverseByteSliceInplace reverses a byte slice\nfunc ReverseByteSliceInplace(s []byte) {\n\t\/\/ reverse\n\tfor i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {\n\t\ts[i], s[j] = s[j], s[i]\n\t}\n}\n\n\/\/ WrapByteSlice wraps byte slice\nfunc WrapByteSlice(s []byte, width int) []byte {\n\tif width < 1 {\n\t\treturn s\n\t}\n\tvar buffer bytes.Buffer\n\tl := len(s)\n\tvar lines int\n\tif l%width == 0 {\n\t\tlines = l\/width - 1\n\t} else {\n\t\tlines = int(l \/ width)\n\t}\n\tvar start, end int\n\tfor i := 0; i <= lines; i++ {\n\t\tstart = i * width\n\t\tend = (i + 1) * width\n\t\tif end > l {\n\t\t\tend = l\n\t\t}\n\n\t\tbuffer.Write(s[start:end])\n\t\tif i < lines {\n\t\t\tbuffer.WriteString(\"\\n\")\n\t\t}\n\t}\n\treturn buffer.Bytes()\n}\n\n\/\/ SubSlice provides similar slice indexing as python with one exception\n\/\/ that end could be equal to 0.\n\/\/ So we could get the last element by SubSlice(s, -1, 0)\n\/\/ or get the whole element by SubSlice(s, 0, 0)\nfunc SubSlice(slice []byte, start int, end int) []byte {\n\tif start == 0 && end == 0 {\n\t\treturn slice\n\t}\n\tif start == end || (start < 0 && end > 0) {\n\t\treturn []byte{}\n\t}\n\tl := len(slice)\n\ts, e := start, end\n\n\tif s < 0 {\n\t\ts = l + s\n\t\tif s < 1 {\n\t\t\ts = 0\n\t\t}\n\t}\n\tif e < 0 {\n\t\te = l + e\n\t\tif e < 0 {\n\t\t\te = 0\n\t\t}\n\t}\n\tif e == 0 || e > l {\n\t\te = l\n\t}\n\treturn slice[s:e]\n}\n\n\/\/ ByteToLower lowers a byte\nfunc ByteToLower(b byte) byte {\n\tif b <= '\\u007F' {\n\t\tif 'A' <= b && b <= 'Z' {\n\t\t\tb += 'a' - 'A'\n\t\t}\n\t\treturn b\n\t}\n\treturn b\n}\n\n\/\/ ByteToUpper upper a byte\nfunc ByteToUpper(b byte) byte {\n\tif b <= '\\u007F' {\n\t\tif 'a' <= b && b <= 'z' {\n\t\t\tb -= 'a' - 'A'\n\t\t}\n\t\treturn b\n\t}\n\treturn b\n}\n\n\/\/ MakeQuerySlice is used to replace map.\n\/\/ see: http:\/\/blog.shenwei.me\/map-is-not-the-fastest-in-go\/\nfunc MakeQuerySlice(letters []byte) []byte {\n\tmax := -1\n\tfor i := 0; i < len(letters); i++ {\n\t\tj := int(letters[i])\n\t\tif max < j {\n\t\t\tmax = j\n\t\t}\n\t}\n\tquerySlice := make([]byte, max+1)\n\tfor i := 0; i < len(letters); i++ {\n\t\tquerySlice[int(letters[i])] = letters[i]\n\t}\n\treturn querySlice\n}\n\n\/\/ Split splits a byte slice by giveen letters.\n\/\/ It's much faster than regexp.Split\nfunc Split(slice []byte, letters []byte) [][]byte {\n\tquerySlice := MakeQuerySlice(letters)\n\tresults := [][]byte{}\n\ttmp := []byte{}\n\n\tvar j int\n\tvar value byte\n\tvar sliceSize = len(querySlice)\n\tfor _, b := range slice {\n\t\tj = int(b)\n\t\tif j >= sliceSize { \/\/ not delimiter byte\n\t\t\ttmp = append(tmp, b)\n\t\t\tcontinue\n\t\t}\n\t\tvalue = querySlice[j]\n\t\tif value == 0 { \/\/ not delimiter byte\n\t\t\ttmp = append(tmp, b)\n\t\t\tcontinue\n\t\t} else {\n\t\t\tif len(tmp) > 0 {\n\t\t\t\tresults = append(results, tmp)\n\t\t\t\ttmp = []byte{}\n\t\t\t}\n\t\t}\n\t}\n\tif len(tmp) > 0 {\n\t\tresults = append(results, tmp)\n\t}\n\treturn results\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage lease\n\nimport (\n\t\"container\/list\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/jacobsa\/fuse\/fsutil\"\n\t\"github.com\/jacobsa\/gcloud\/syncutil\"\n)\n\n\/\/ A type that manages read and read\/write leases for anonymous temporary files.\n\/\/\n\/\/ Safe for concurrent access.\ntype FileLeaser interface {\n\t\/\/ Create a new anonymous file, and return a read\/write lease for it. The\n\t\/\/ read\/write lease will pin resources until rwl.Downgrade is called. It need\n\t\/\/ not be called if the process is exiting.\n\tNewFile() (rwl ReadWriteLease, err error)\n\n\t\/\/ Revoke all read leases that have been issued. For testing use only.\n\tRevokeReadLeases()\n}\n\n\/\/ Create a new file leaser that uses the supplied directory for temporary\n\/\/ files (before unlinking them) and attempts to keep usage in bytes below the\n\/\/ given limit. If dir is empty, the system default will be used.\n\/\/\n\/\/ Usage may exceed the given limit if there are read\/write leases whose total\n\/\/ size exceeds the limit, since such leases cannot be revoked.\nfunc NewFileLeaser(\n\tdir string,\n\tlimitBytes int64) (fl FileLeaser) {\n\ttyped := &fileLeaser{\n\t\tdir: dir,\n\t\tlimit: limitBytes,\n\t\treadLeasesIndex: make(map[*readLease]*list.Element),\n\t}\n\n\ttyped.mu = syncutil.NewInvariantMutex(typed.checkInvariants)\n\n\tfl = typed\n\treturn\n}\n\ntype fileLeaser struct {\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Constant data\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tdir string\n\tlimit int64\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ A lock that guards the mutable state in this struct. Usually this is used\n\t\/\/ only for light weight operations, but while evicting it may require\n\t\/\/ waiting on a goroutine that is holding a read lease lock while reading\n\t\/\/ from a file.\n\t\/\/\n\t\/\/ Lock ordering\n\t\/\/ -------------\n\t\/\/\n\t\/\/ Define < to be the minimum strict partial order satisfying:\n\t\/\/\n\t\/\/ 1. For any read\/write lease W, W < leaser.\n\t\/\/ 2. For any read lease R, leaser < R.\n\t\/\/\n\t\/\/ In other words: read\/write before leaser before read, and never hold two\n\t\/\/ locks from the same category together.\n\tmu syncutil.InvariantMutex\n\n\t\/\/ The current estimated total size of outstanding read\/write leases. This is\n\t\/\/ only an estimate because we can't synchronize its update with a call to\n\t\/\/ the wrapped file to e.g. write or truncate.\n\treadWriteOutstanding int64\n\n\t\/\/ All outstanding read leases, ordered by recency of use.\n\t\/\/\n\t\/\/ INVARIANT: Each element is of type *readLease\n\t\/\/ INVARIANT: No element has been revoked.\n\treadLeases list.List\n\n\t\/\/ The sum of all outstanding read lease sizes.\n\t\/\/\n\t\/\/ INVARIANT: Equal to the sum over readLeases sizes.\n\t\/\/ INVARIANT: 0 <= readOutstanding\n\t\/\/ INVARIANT: readOutstanding <= max(0, limit - readWriteOutstanding)\n\treadOutstanding int64\n\n\t\/\/ Index of read leases by pointer.\n\t\/\/\n\t\/\/ INVARIANT: Is an index of exactly the elements of readLeases\n\treadLeasesIndex map[*readLease]*list.Element\n}\n\nfunc (fl *fileLeaser) NewFile() (rwl ReadWriteLease, err error) {\n\t\/\/ Create an anonymous file.\n\tf, err := fsutil.AnonymousFile(fl.dir)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"AnonymousFile: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Wrap a lease around it.\n\trwl = newReadWriteLease(fl, 0, f)\n\n\treturn\n}\n\nfunc (fl *fileLeaser) RevokeReadLeases() {\n\tpanic(\"TODO\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc maxInt64(a int64, b int64) int64 {\n\tif a > b {\n\t\treturn a\n\t}\n\n\treturn b\n}\n\n\/\/ LOCKS_REQUIRED(fl.mu)\nfunc (fl *fileLeaser) checkInvariants() {\n\t\/\/ INVARIANT: Each element is of type *readLease\n\t\/\/ INVARIANT: No element has been revoked.\n\tfor e := fl.readLeases.Front(); e != nil; e = e.Next() {\n\t\trl := e.Value.(*readLease)\n\t\tfunc() {\n\t\t\trl.Mu.Lock()\n\t\t\tdefer rl.Mu.Unlock()\n\n\t\t\tif rl.revoked() {\n\t\t\t\tpanic(\"Found revoked read lease\")\n\t\t\t}\n\t\t}()\n\t}\n\n\t\/\/ INVARIANT: Equal to the sum over readLeases sizes.\n\tvar sum int64\n\tfor e := fl.readLeases.Front(); e != nil; e = e.Next() {\n\t\trl := e.Value.(*readLease)\n\t\tsum += rl.Size()\n\t}\n\n\tif fl.readOutstanding != sum {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"readOutstanding mismatch: %v vs. %v\",\n\t\t\tfl.readOutstanding,\n\t\t\tsum))\n\t}\n\n\t\/\/ INVARIANT: 0 <= readOutstanding\n\tif !(0 <= fl.readOutstanding) {\n\t\tpanic(fmt.Sprintf(\"Unexpected readOutstanding: %v\", fl.readOutstanding))\n\t}\n\n\t\/\/ INVARIANT: readOutstanding <= max(0, limit - readWriteOutstanding)\n\tif !(fl.readOutstanding <= maxInt64(0, fl.limit-fl.readWriteOutstanding)) {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"Unexpected readOutstanding: %v. limit: %v, readWriteOutstanding: %v\",\n\t\t\tfl.readOutstanding,\n\t\t\tfl.limit,\n\t\t\tfl.readWriteOutstanding))\n\t}\n\n\t\/\/ INVARIANT: Is an index of exactly the elements of readLeases\n\tif len(fl.readLeasesIndex) != fl.readLeases.Len() {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"readLeasesIndex length mismatch: %v vs. %v\",\n\t\t\tlen(fl.readLeasesIndex),\n\t\t\tfl.readLeases.Len()))\n\t}\n\n\tfor e := fl.readLeases.Front(); e != nil; e = e.Next() {\n\t\tif fl.readLeasesIndex[e.Value.(*readLease)] != e {\n\t\t\tpanic(\"Mismatch in readLeasesIndex\")\n\t\t}\n\t}\n}\n\n\/\/ Add the supplied delta to the leaser's view of outstanding read\/write lease\n\/\/ bytes, then revoke read leases until we're under limit or we run out of\n\/\/ leases to revoke.\n\/\/\n\/\/ Called by readWriteLease while holding its lock.\n\/\/\n\/\/ LOCKS_EXCLUDED(fl.mu)\nfunc (fl *fileLeaser) addReadWriteByteDelta(delta int64) {\n\tfl.readWriteOutstanding += delta\n\tfl.evict()\n}\n\n\/\/ LOCKS_REQUIRED(fl.mu)\nfunc (fl *fileLeaser) overLimit() bool {\n\treturn fl.readOutstanding+fl.readWriteOutstanding > fl.limit\n}\n\n\/\/ Revoke read leases until we're under limit or we run out of things to revoke.\n\/\/\n\/\/ LOCKS_REQUIRED(fl.mu)\nfunc (fl *fileLeaser) evict() {\n\tfor fl.overLimit() {\n\t\t\/\/ Do we have anything to revoke?\n\t\tlru := fl.readLeases.Back()\n\t\tif lru == nil {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Revoke it.\n\t\trl := lru.Value.(*readLease)\n\t\tfunc() {\n\t\t\trl.Mu.Lock()\n\t\t\tdefer rl.Mu.Unlock()\n\n\t\t\tfl.revoke(rl)\n\t\t}()\n\t}\n}\n\n\/\/ Note that a read\/write lease of the given size is destroying itself, and\n\/\/ turn it into a read lease of the supplied size wrapped around the given\n\/\/ file.\n\/\/\n\/\/ Called by readWriteLease with its lock held.\n\/\/\n\/\/ LOCKS_EXCLUDED(fl.mu)\nfunc (fl *fileLeaser) downgrade(\n\tsize int64,\n\tfile *os.File) (rl ReadLease) {\n\t\/\/ Create the read lease.\n\trlTyped := newReadLease(size, fl, file)\n\trl = rlTyped\n\n\t\/\/ Update the leaser's state, noting the new read lease and that the\n\t\/\/ read\/write lease has gone away.\n\tfl.mu.Lock()\n\tdefer fl.mu.Unlock()\n\n\tfl.readWriteOutstanding -= size\n\tfl.readOutstanding += size\n\n\te := fl.readLeases.PushFront(rl)\n\tfl.readLeasesIndex[rlTyped] = e\n\n\t\/\/ Ensure that we're not now over capacity.\n\tfl.evict()\n\n\treturn\n}\n\n\/\/ Upgrade the supplied read lease.\n\/\/\n\/\/ Called by readLease with no lock held.\n\/\/\n\/\/ LOCKS_EXCLUDED(fl.mu, rl.Mu)\nfunc (fl *fileLeaser) upgrade(rl *readLease) (rwl ReadWriteLease, err error) {\n\t\/\/ Grab each lock in turn.\n\tfl.mu.Lock()\n\tdefer fl.mu.Unlock()\n\n\trl.Mu.Lock()\n\tdefer rl.Mu.Unlock()\n\n\t\/\/ Has the lease already been revoked?\n\tif rl.revoked() {\n\t\terr = &RevokedError{}\n\t\treturn\n\t}\n\n\tsize := rl.Size()\n\n\t\/\/ Update leaser state.\n\tfl.readWriteOutstanding += size\n\tfl.readOutstanding -= size\n\n\te := fl.readLeasesIndex[rl]\n\tdelete(fl.readLeasesIndex, rl)\n\tfl.readLeases.Remove(e)\n\n\t\/\/ Extract the interesting information from the read lease, leaving it an\n\t\/\/ empty husk.\n\tfile := rl.release()\n\n\t\/\/ Create the read\/write lease, telling it that we already know its initial\n\t\/\/ size.\n\trwl = newReadWriteLease(fl, size, file)\n\n\treturn\n}\n\n\/\/ Promote the given read lease to most recently used, if we still know it.\n\/\/ Because our lock order forbids us from acquiring the leaser lock while\n\/\/ holding a read lease lock, this of course races with other promotions.\n\/\/\n\/\/ Called by readLease without holding a lock.\n\/\/\n\/\/ LOCKS_EXCLUDED(fl.mu)\nfunc (fl *fileLeaser) promoteToMostRecent(rl *readLease) {\n\tfl.mu.Lock()\n\tdefer fl.mu.Unlock()\n\n\te := fl.readLeasesIndex[rl]\n\tif e != nil {\n\t\tfl.readLeases.MoveToFront(e)\n\t}\n}\n\n\/\/ Forcibly revoke the supplied read lease.\n\/\/\n\/\/ REQUIRES: !rl.revoked()\n\/\/\n\/\/ LOCKS_REQUIRED(fl.mu)\n\/\/ LOCKS_REQUIRED(rl.Mu)\nfunc (fl *fileLeaser) revoke(rl *readLease) {\n\tif rl.revoked() {\n\t\tpanic(\"Already revoked\")\n\t}\n\n\tsize := rl.Size()\n\n\t\/\/ Update leaser state.\n\tfl.readOutstanding -= size\n\n\te := fl.readLeasesIndex[rl]\n\tdelete(fl.readLeasesIndex, rl)\n\tfl.readLeases.Remove(e)\n\n\t\/\/ Kill the lease and close its file.\n\tfile := rl.release()\n\tif err := file.Close(); err != nil {\n\t\tlog.Println(\"Error closing file for revoked lease:\", err)\n\t}\n}\n\n\/\/ Called by the read lease when the user wants to manually revoke it.\n\/\/\n\/\/ LOCKS_EXCLUDED(fl.mu)\n\/\/ LOCKS_EXCLUDED(rl.Mu)\nfunc (fl *fileLeaser) revokeVoluntarily(rl *readLease) {\n\t\/\/ Grab each lock in turn.\n\tfl.mu.Lock()\n\tdefer fl.mu.Unlock()\n\n\trl.Mu.Lock()\n\tdefer rl.Mu.Unlock()\n\n\t\/\/ Has the lease already been revoked?\n\tif rl.revoked() {\n\t\treturn\n\t}\n\n\t\/\/ Revoke it.\n\tfl.revoke(rl)\n}\n<commit_msg>Implemented RevokeReadLeases.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage lease\n\nimport (\n\t\"container\/list\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/jacobsa\/fuse\/fsutil\"\n\t\"github.com\/jacobsa\/gcloud\/syncutil\"\n)\n\n\/\/ A type that manages read and read\/write leases for anonymous temporary files.\n\/\/\n\/\/ Safe for concurrent access.\ntype FileLeaser interface {\n\t\/\/ Create a new anonymous file, and return a read\/write lease for it. The\n\t\/\/ read\/write lease will pin resources until rwl.Downgrade is called. It need\n\t\/\/ not be called if the process is exiting.\n\tNewFile() (rwl ReadWriteLease, err error)\n\n\t\/\/ Revoke all read leases that have been issued. For testing use only.\n\tRevokeReadLeases()\n}\n\n\/\/ Create a new file leaser that uses the supplied directory for temporary\n\/\/ files (before unlinking them) and attempts to keep usage in bytes below the\n\/\/ given limit. If dir is empty, the system default will be used.\n\/\/\n\/\/ Usage may exceed the given limit if there are read\/write leases whose total\n\/\/ size exceeds the limit, since such leases cannot be revoked.\nfunc NewFileLeaser(\n\tdir string,\n\tlimitBytes int64) (fl FileLeaser) {\n\ttyped := &fileLeaser{\n\t\tdir: dir,\n\t\tlimit: limitBytes,\n\t\treadLeasesIndex: make(map[*readLease]*list.Element),\n\t}\n\n\ttyped.mu = syncutil.NewInvariantMutex(typed.checkInvariants)\n\n\tfl = typed\n\treturn\n}\n\ntype fileLeaser struct {\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Constant data\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tdir string\n\tlimit int64\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ A lock that guards the mutable state in this struct. Usually this is used\n\t\/\/ only for light weight operations, but while evicting it may require\n\t\/\/ waiting on a goroutine that is holding a read lease lock while reading\n\t\/\/ from a file.\n\t\/\/\n\t\/\/ Lock ordering\n\t\/\/ -------------\n\t\/\/\n\t\/\/ Define < to be the minimum strict partial order satisfying:\n\t\/\/\n\t\/\/ 1. For any read\/write lease W, W < leaser.\n\t\/\/ 2. For any read lease R, leaser < R.\n\t\/\/\n\t\/\/ In other words: read\/write before leaser before read, and never hold two\n\t\/\/ locks from the same category together.\n\tmu syncutil.InvariantMutex\n\n\t\/\/ The current estimated total size of outstanding read\/write leases. This is\n\t\/\/ only an estimate because we can't synchronize its update with a call to\n\t\/\/ the wrapped file to e.g. write or truncate.\n\treadWriteOutstanding int64\n\n\t\/\/ All outstanding read leases, ordered by recency of use.\n\t\/\/\n\t\/\/ INVARIANT: Each element is of type *readLease\n\t\/\/ INVARIANT: No element has been revoked.\n\treadLeases list.List\n\n\t\/\/ The sum of all outstanding read lease sizes.\n\t\/\/\n\t\/\/ INVARIANT: Equal to the sum over readLeases sizes.\n\t\/\/ INVARIANT: 0 <= readOutstanding\n\t\/\/ INVARIANT: readOutstanding <= max(0, limit - readWriteOutstanding)\n\treadOutstanding int64\n\n\t\/\/ Index of read leases by pointer.\n\t\/\/\n\t\/\/ INVARIANT: Is an index of exactly the elements of readLeases\n\treadLeasesIndex map[*readLease]*list.Element\n}\n\nfunc (fl *fileLeaser) NewFile() (rwl ReadWriteLease, err error) {\n\t\/\/ Create an anonymous file.\n\tf, err := fsutil.AnonymousFile(fl.dir)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"AnonymousFile: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Wrap a lease around it.\n\trwl = newReadWriteLease(fl, 0, f)\n\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(fl.mu)\nfunc (fl *fileLeaser) RevokeReadLeases() {\n\tfl.mu.Lock()\n\tdefer fl.mu.Unlock()\n\n\tfl.evict(0)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc maxInt64(a int64, b int64) int64 {\n\tif a > b {\n\t\treturn a\n\t}\n\n\treturn b\n}\n\n\/\/ LOCKS_REQUIRED(fl.mu)\nfunc (fl *fileLeaser) checkInvariants() {\n\t\/\/ INVARIANT: Each element is of type *readLease\n\t\/\/ INVARIANT: No element has been revoked.\n\tfor e := fl.readLeases.Front(); e != nil; e = e.Next() {\n\t\trl := e.Value.(*readLease)\n\t\tfunc() {\n\t\t\trl.Mu.Lock()\n\t\t\tdefer rl.Mu.Unlock()\n\n\t\t\tif rl.revoked() {\n\t\t\t\tpanic(\"Found revoked read lease\")\n\t\t\t}\n\t\t}()\n\t}\n\n\t\/\/ INVARIANT: Equal to the sum over readLeases sizes.\n\tvar sum int64\n\tfor e := fl.readLeases.Front(); e != nil; e = e.Next() {\n\t\trl := e.Value.(*readLease)\n\t\tsum += rl.Size()\n\t}\n\n\tif fl.readOutstanding != sum {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"readOutstanding mismatch: %v vs. %v\",\n\t\t\tfl.readOutstanding,\n\t\t\tsum))\n\t}\n\n\t\/\/ INVARIANT: 0 <= readOutstanding\n\tif !(0 <= fl.readOutstanding) {\n\t\tpanic(fmt.Sprintf(\"Unexpected readOutstanding: %v\", fl.readOutstanding))\n\t}\n\n\t\/\/ INVARIANT: readOutstanding <= max(0, limit - readWriteOutstanding)\n\tif !(fl.readOutstanding <= maxInt64(0, fl.limit-fl.readWriteOutstanding)) {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"Unexpected readOutstanding: %v. limit: %v, readWriteOutstanding: %v\",\n\t\t\tfl.readOutstanding,\n\t\t\tfl.limit,\n\t\t\tfl.readWriteOutstanding))\n\t}\n\n\t\/\/ INVARIANT: Is an index of exactly the elements of readLeases\n\tif len(fl.readLeasesIndex) != fl.readLeases.Len() {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"readLeasesIndex length mismatch: %v vs. %v\",\n\t\t\tlen(fl.readLeasesIndex),\n\t\t\tfl.readLeases.Len()))\n\t}\n\n\tfor e := fl.readLeases.Front(); e != nil; e = e.Next() {\n\t\tif fl.readLeasesIndex[e.Value.(*readLease)] != e {\n\t\t\tpanic(\"Mismatch in readLeasesIndex\")\n\t\t}\n\t}\n}\n\n\/\/ Add the supplied delta to the leaser's view of outstanding read\/write lease\n\/\/ bytes, then revoke read leases until we're under limit or we run out of\n\/\/ leases to revoke.\n\/\/\n\/\/ Called by readWriteLease while holding its lock.\n\/\/\n\/\/ LOCKS_EXCLUDED(fl.mu)\nfunc (fl *fileLeaser) addReadWriteByteDelta(delta int64) {\n\tfl.readWriteOutstanding += delta\n\tfl.evict(fl.limit)\n}\n\n\/\/ LOCKS_REQUIRED(fl.mu)\nfunc (fl *fileLeaser) overLimit(limit int64) bool {\n\treturn fl.readOutstanding+fl.readWriteOutstanding > limit\n}\n\n\/\/ Revoke read leases until we're within the given limit or we run out of\n\/\/ things to revoke.\n\/\/\n\/\/ LOCKS_REQUIRED(fl.mu)\nfunc (fl *fileLeaser) evict(limit int64) {\n\tfor fl.overLimit(limit) {\n\t\t\/\/ Do we have anything to revoke?\n\t\tlru := fl.readLeases.Back()\n\t\tif lru == nil {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Revoke it.\n\t\trl := lru.Value.(*readLease)\n\t\tfunc() {\n\t\t\trl.Mu.Lock()\n\t\t\tdefer rl.Mu.Unlock()\n\n\t\t\tfl.revoke(rl)\n\t\t}()\n\t}\n}\n\n\/\/ Note that a read\/write lease of the given size is destroying itself, and\n\/\/ turn it into a read lease of the supplied size wrapped around the given\n\/\/ file.\n\/\/\n\/\/ Called by readWriteLease with its lock held.\n\/\/\n\/\/ LOCKS_EXCLUDED(fl.mu)\nfunc (fl *fileLeaser) downgrade(\n\tsize int64,\n\tfile *os.File) (rl ReadLease) {\n\t\/\/ Create the read lease.\n\trlTyped := newReadLease(size, fl, file)\n\trl = rlTyped\n\n\t\/\/ Update the leaser's state, noting the new read lease and that the\n\t\/\/ read\/write lease has gone away.\n\tfl.mu.Lock()\n\tdefer fl.mu.Unlock()\n\n\tfl.readWriteOutstanding -= size\n\tfl.readOutstanding += size\n\n\te := fl.readLeases.PushFront(rl)\n\tfl.readLeasesIndex[rlTyped] = e\n\n\t\/\/ Ensure that we're not now over capacity.\n\tfl.evict(fl.limit)\n\n\treturn\n}\n\n\/\/ Upgrade the supplied read lease.\n\/\/\n\/\/ Called by readLease with no lock held.\n\/\/\n\/\/ LOCKS_EXCLUDED(fl.mu, rl.Mu)\nfunc (fl *fileLeaser) upgrade(rl *readLease) (rwl ReadWriteLease, err error) {\n\t\/\/ Grab each lock in turn.\n\tfl.mu.Lock()\n\tdefer fl.mu.Unlock()\n\n\trl.Mu.Lock()\n\tdefer rl.Mu.Unlock()\n\n\t\/\/ Has the lease already been revoked?\n\tif rl.revoked() {\n\t\terr = &RevokedError{}\n\t\treturn\n\t}\n\n\tsize := rl.Size()\n\n\t\/\/ Update leaser state.\n\tfl.readWriteOutstanding += size\n\tfl.readOutstanding -= size\n\n\te := fl.readLeasesIndex[rl]\n\tdelete(fl.readLeasesIndex, rl)\n\tfl.readLeases.Remove(e)\n\n\t\/\/ Extract the interesting information from the read lease, leaving it an\n\t\/\/ empty husk.\n\tfile := rl.release()\n\n\t\/\/ Create the read\/write lease, telling it that we already know its initial\n\t\/\/ size.\n\trwl = newReadWriteLease(fl, size, file)\n\n\treturn\n}\n\n\/\/ Promote the given read lease to most recently used, if we still know it.\n\/\/ Because our lock order forbids us from acquiring the leaser lock while\n\/\/ holding a read lease lock, this of course races with other promotions.\n\/\/\n\/\/ Called by readLease without holding a lock.\n\/\/\n\/\/ LOCKS_EXCLUDED(fl.mu)\nfunc (fl *fileLeaser) promoteToMostRecent(rl *readLease) {\n\tfl.mu.Lock()\n\tdefer fl.mu.Unlock()\n\n\te := fl.readLeasesIndex[rl]\n\tif e != nil {\n\t\tfl.readLeases.MoveToFront(e)\n\t}\n}\n\n\/\/ Forcibly revoke the supplied read lease.\n\/\/\n\/\/ REQUIRES: !rl.revoked()\n\/\/\n\/\/ LOCKS_REQUIRED(fl.mu)\n\/\/ LOCKS_REQUIRED(rl.Mu)\nfunc (fl *fileLeaser) revoke(rl *readLease) {\n\tif rl.revoked() {\n\t\tpanic(\"Already revoked\")\n\t}\n\n\tsize := rl.Size()\n\n\t\/\/ Update leaser state.\n\tfl.readOutstanding -= size\n\n\te := fl.readLeasesIndex[rl]\n\tdelete(fl.readLeasesIndex, rl)\n\tfl.readLeases.Remove(e)\n\n\t\/\/ Kill the lease and close its file.\n\tfile := rl.release()\n\tif err := file.Close(); err != nil {\n\t\tlog.Println(\"Error closing file for revoked lease:\", err)\n\t}\n}\n\n\/\/ Called by the read lease when the user wants to manually revoke it.\n\/\/\n\/\/ LOCKS_EXCLUDED(fl.mu)\n\/\/ LOCKS_EXCLUDED(rl.Mu)\nfunc (fl *fileLeaser) revokeVoluntarily(rl *readLease) {\n\t\/\/ Grab each lock in turn.\n\tfl.mu.Lock()\n\tdefer fl.mu.Unlock()\n\n\trl.Mu.Lock()\n\tdefer rl.Mu.Unlock()\n\n\t\/\/ Has the lease already been revoked?\n\tif rl.revoked() {\n\t\treturn\n\t}\n\n\t\/\/ Revoke it.\n\tfl.revoke(rl)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package maintner mirrors, searches, syncs, and serves Git, Github,\n\/\/ and Gerrit metadata.\n\/\/\n\/\/ Maintner is short for \"Maintainer\". This package is intended for\n\/\/ use by many tools. The name of the daemon that serves the maintner\n\/\/ data to other tools is \"maintnerd\".\npackage maintner\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/ptypes\"\n\t\"github.com\/google\/go-github\/github\"\n\t\"golang.org\/x\/build\/maintner\/maintpb\"\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\n\/\/ Corpus holds all of a project's metadata.\n\/\/\n\/\/ There are two main phases to the Corpus: the catch-up phase, when the Corpus\n\/\/ is populated from a MutationSource (disk, database), and the polling phase,\n\/\/ when the Corpus polls for new events and stores\/writes them to disk. Call\n\/\/ StartLogging between the catch-up phase and the polling phase.\ntype Corpus struct {\n\t\/\/ ... TODO\n\n\tmu sync.RWMutex\n\tgithubIssues map[githubRepo]map[int32]*githubIssue \/\/ repo -> num -> issue\n\tgithubUsers map[int64]*githubUser\n\tgithubRepos []repoObj\n\t\/\/ If true, log new commits\n\tshouldLog bool\n\tMutationLogger MutationLogger\n}\n\ntype repoObj struct {\n\tname githubRepo\n\ttokenFile string\n}\n\nfunc NewCorpus(logger MutationLogger) *Corpus {\n\treturn &Corpus{\n\t\tgithubIssues: make(map[githubRepo]map[int32]*githubIssue),\n\t\tgithubUsers: make(map[int64]*githubUser),\n\t\tgithubRepos: []repoObj{},\n\t\tMutationLogger: logger,\n\t}\n}\n\n\/\/ StartLogging indicates that further changes should be written to the log.\nfunc (c *Corpus) StartLogging() {\n\tc.mu.Lock()\n\tc.shouldLog = true\n\tc.mu.Unlock()\n}\n\nfunc (c *Corpus) AddGithub(owner, repo, tokenFile string) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tc.githubRepos = append(c.githubRepos, repoObj{\n\t\tname: githubRepo(owner + \"\/\" + repo),\n\t\ttokenFile: tokenFile,\n\t})\n}\n\n\/\/ githubRepo is a github org & repo, lowercase, joined by a '\/',\n\/\/ such as \"golang\/go\".\ntype githubRepo string\n\n\/\/ Org finds \"golang\" in the githubRepo string \"golang\/go\", or returns an empty\n\/\/ string if it is malformed.\nfunc (gr githubRepo) Org() string {\n\tsep := strings.IndexByte(string(gr), '\/')\n\tif sep == -1 {\n\t\treturn \"\"\n\t}\n\treturn string(gr[:sep])\n}\n\nfunc (gr githubRepo) Repo() string {\n\tsep := strings.IndexByte(string(gr), '\/')\n\tif sep == -1 || sep == len(gr)-1 {\n\t\treturn \"\"\n\t}\n\treturn string(gr[sep+1:])\n}\n\n\/\/ githubUser represents a github user.\n\/\/ It is a subset of https:\/\/developer.github.com\/v3\/users\/#get-a-single-user\ntype githubUser struct {\n\tID int64\n\tLogin string\n}\n\n\/\/ githubIssue represents a github issue.\n\/\/ See https:\/\/developer.github.com\/v3\/issues\/#get-a-single-issue\ntype githubIssue struct {\n\tID int64\n\tNumber int32\n\tClosed bool\n\tUser *githubUser\n\tCreated time.Time\n\tUpdated time.Time\n\tBody string\n\t\/\/ TODO Comments ...\n}\n\n\/\/ A MutationSource yields a log of mutations that will catch a corpus\n\/\/ back up to the present.\ntype MutationSource interface {\n\t\/\/ GetMutations returns a channel of mutations.\n\t\/\/ The channel should be closed at the end.\n\t\/\/ All sends on the returned channel should select\n\t\/\/ on the provided context.\n\tGetMutations(context.Context) <-chan *maintpb.Mutation\n}\n\n\/\/ Initialize populates the Corpus using the data from the MutationSource.\nfunc (c *Corpus) Initialize(ctx context.Context, src MutationSource) error {\n\treturn c.processMutations(ctx, src)\n}\n\nfunc (c *Corpus) processMutations(ctx context.Context, src MutationSource) error {\n\tch := src.GetMutations(ctx)\n\tdone := ctx.Done()\n\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn ctx.Err()\n\t\tcase m, ok := <-ch:\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tc.processMutationLocked(m)\n\t\t}\n\t}\n}\n\nfunc (c *Corpus) processMutation(m *maintpb.Mutation) {\n\tc.mu.Lock()\n\tc.processMutationLocked(m)\n\tc.mu.Unlock()\n\tif c.MutationLogger != nil && c.shouldLog {\n\t\terr := c.MutationLogger.Log(m)\n\t\tif err != nil {\n\t\t\t\/\/ TODO: handle errors better\n\t\t\tfmt.Printf(\"could not log mutation %v: %v\\n\", m, err)\n\t\t}\n\t}\n}\n\n\/\/ c.mu must be held.\nfunc (c *Corpus) processMutationLocked(m *maintpb.Mutation) {\n\tif im := m.GithubIssue; im != nil {\n\t\tc.processGithubIssueMutation(im)\n\t}\n\t\/\/ TODO: more...\n}\n\nfunc (c *Corpus) repoKey(owner, repo string) githubRepo {\n\tif owner == \"\" || repo == \"\" {\n\t\treturn \"\"\n\t}\n\t\/\/ TODO: avoid garbage, use interned strings? profile later\n\t\/\/ once we have gigabytes of mutation logs to slurp at\n\t\/\/ start-up. (The same thing mattered for Camlistore start-up\n\t\/\/ time at least)\n\treturn githubRepo(owner + \"\/\" + repo)\n}\n\nfunc (c *Corpus) getGithubUser(pu *maintpb.GithubUser) *githubUser {\n\tif pu == nil {\n\t\treturn nil\n\t}\n\tif u := c.githubUsers[pu.Id]; u != nil {\n\t\tif pu.Login != \"\" && pu.Login != u.Login {\n\t\t\tu.Login = pu.Login\n\t\t}\n\t\treturn u\n\t}\n\tif c.githubUsers == nil {\n\t\tc.githubUsers = make(map[int64]*githubUser)\n\t}\n\tu := &githubUser{\n\t\tID: pu.Id,\n\t\tLogin: pu.Login,\n\t}\n\tc.githubUsers[pu.Id] = u\n\treturn u\n}\n\nvar errNoChanges = errors.New(\"No changes in this github.Issue\")\n\n\/\/ newMutationFromIssue generates a GithubIssueMutation using the smallest\n\/\/ possible diff between ci (a corpus Issue) and gi (an external github issue).\n\/\/\n\/\/ If newMutationFromIssue returns nil, the provided github.Issue is no newer\n\/\/ than the data we have in the corpus. ci may be nil.\nfunc newMutationFromIssue(ci *githubIssue, gi *github.Issue, rp githubRepo) *maintpb.Mutation {\n\tif gi == nil || gi.Number == nil {\n\t\tpanic(fmt.Sprintf(\"github issue with nil number: %#v\", gi))\n\t}\n\towner, repo := rp.Org(), rp.Repo()\n\t\/\/ always need these fields to figure out which key to write to\n\tm := &maintpb.GithubIssueMutation{\n\t\tOwner: owner,\n\t\tRepo: repo,\n\t\tNumber: int32(*gi.Number),\n\t}\n\tif ci == nil {\n\t\t\/\/ We don't know about this github issue, so populate all fields in one\n\t\t\/\/ mutation.\n\t\tif gi.CreatedAt != nil {\n\t\t\ttproto, err := ptypes.TimestampProto(*gi.CreatedAt)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tm.Created = tproto\n\t\t}\n\t\tif gi.UpdatedAt != nil {\n\t\t\ttproto, err := ptypes.TimestampProto(*gi.UpdatedAt)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tm.Updated = tproto\n\t\t}\n\t\tif gi.Body != nil {\n\t\t\tm.Body = *gi.Body\n\t\t}\n\t\treturn &maintpb.Mutation{GithubIssue: m}\n\t}\n\tif gi.UpdatedAt != nil {\n\t\tif !gi.UpdatedAt.After(ci.Updated) {\n\t\t\t\/\/ This data is stale, ignore it.\n\t\t\treturn nil\n\t\t}\n\t\ttproto, err := ptypes.TimestampProto(*gi.UpdatedAt)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tm.Updated = tproto\n\t}\n\tif gi.Body != nil && *gi.Body != ci.Body {\n\t\tm.Body = *gi.Body\n\t}\n\treturn &maintpb.Mutation{GithubIssue: m}\n}\n\n\/\/ getIssue finds an issue in the Corpus or returns nil, false if it is not\n\/\/ present.\nfunc (c *Corpus) getIssue(rp githubRepo, number int32) (*githubIssue, bool) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\tissueMap, ok := c.githubIssues[rp]\n\tif !ok {\n\t\treturn nil, false\n\t}\n\tgi, ok := issueMap[number]\n\treturn gi, ok\n}\n\n\/\/ processGithubIssueMutation updates the corpus with the information in m, and\n\/\/ returns true if the Corpus was modified.\nfunc (c *Corpus) processGithubIssueMutation(m *maintpb.GithubIssueMutation) (changed bool) {\n\tif c == nil {\n\t\tpanic(\"nil corpus\")\n\t}\n\tk := c.repoKey(m.Owner, m.Repo)\n\tif k == \"\" {\n\t\t\/\/ TODO: errors? return false? skip for now.\n\t\treturn\n\t}\n\tif m.Number == 0 {\n\t\treturn\n\t}\n\tissueMap, ok := c.githubIssues[k]\n\tif !ok {\n\t\tif c.githubIssues == nil {\n\t\t\tc.githubIssues = make(map[githubRepo]map[int32]*githubIssue)\n\t\t}\n\t\tissueMap = make(map[int32]*githubIssue)\n\t\tc.githubIssues[k] = issueMap\n\t}\n\tgi, ok := issueMap[m.Number]\n\tif !ok {\n\t\tcreated, err := ptypes.Timestamp(m.Created)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tgi = &githubIssue{\n\t\t\tNumber: m.Number,\n\t\t\tUser: c.getGithubUser(m.User),\n\t\t\tCreated: created,\n\t\t}\n\t\tissueMap[m.Number] = gi\n\t\tchanged = true\n\t}\n\t\/\/ Check Updated before all other fields so they don't update if this\n\t\/\/ Mutation is stale\n\tif m.Updated != nil {\n\t\tupdated, err := ptypes.Timestamp(m.Updated)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif !updated.IsZero() && updated.Before(gi.Updated) {\n\t\t\t\/\/ this mutation represents data older than the data we have in\n\t\t\t\/\/ the corpus; ignore it.\n\t\t\treturn false\n\t\t}\n\t\tgi.Updated = updated\n\t\tchanged = changed || updated.After(gi.Updated)\n\t}\n\tif m.Body != \"\" {\n\t\tgi.Body = m.Body\n\t\tchanged = changed || m.Body != gi.Body\n\t}\n\t\/\/ ignoring Created since it *should* never update\n\treturn changed\n}\n\n\/\/ PopulateFromServer populates the corpus from a maintnerd server.\nfunc (c *Corpus) PopulateFromServer(ctx context.Context, serverURL string) error {\n\tpanic(\"TODO\")\n}\n\n\/\/ PopulateFromDisk populates the corpus from a set of mutation logs\n\/\/ in a local directory.\nfunc (c *Corpus) PopulateFromDisk(ctx context.Context, dir string) error {\n\tpanic(\"TODO\")\n}\n\n\/\/ PopulateFromAPIs populates the corpus using API calls to\n\/\/ the upstream Git, Github, and\/or Gerrit servers.\nfunc (c *Corpus) PopulateFromAPIs(ctx context.Context) error {\n\tpanic(\"TODO\")\n}\n\n\/\/ Poll checks for new changes on all repositories being tracked by the Corpus.\nfunc (c *Corpus) Poll(ctx context.Context) error {\n\tgroup, ctx := errgroup.WithContext(ctx)\n\tfor _, rp := range c.githubRepos {\n\t\trp := rp\n\t\tgroup.Go(func() error {\n\t\t\treturn c.PollGithubLoop(ctx, rp.name, rp.tokenFile)\n\t\t})\n\t}\n\treturn group.Wait()\n}\n\n\/\/ PollGithubLoop checks for new changes on a single Github repository and\n\/\/ updates the Corpus with any changes.\nfunc (c *Corpus) PollGithubLoop(ctx context.Context, rp githubRepo, tokenFile string) error {\n\tslurp, err := ioutil.ReadFile(tokenFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf := strings.SplitN(strings.TrimSpace(string(slurp)), \":\", 2)\n\tif len(f) != 2 || f[0] == \"\" || f[1] == \"\" {\n\t\treturn fmt.Errorf(\"Expected token file %s to be of form <username>:<token>\", tokenFile)\n\t}\n\tts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: f[1]})\n\ttc := oauth2.NewClient(oauth2.NoContext, ts)\n\tghc := github.NewClient(tc)\n\tfor {\n\t\terr := c.pollGithub(ctx, rp, ghc)\n\t\tif err == context.Canceled {\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(\"Polled github for %s; err = %v. Sleeping.\", rp, err)\n\t\t\/\/ TODO: select and listen for context errors\n\t\ttime.Sleep(30 * time.Second)\n\t}\n}\n\nfunc (c *Corpus) pollGithub(ctx context.Context, rp githubRepo, ghc *github.Client) error {\n\tlog.Printf(\"Polling github for %s ...\", rp)\n\tpage := 1\n\tkeepGoing := true\n\towner, repo := rp.Org(), rp.Repo()\n\tfor keepGoing {\n\t\t\/\/ TODO: use https:\/\/godoc.org\/github.com\/google\/go-github\/github#ActivityService.ListIssueEventsForRepository probably\n\t\tissues, _, err := ghc.Issues.ListByRepo(ctx, owner, repo, &github.IssueListByRepoOptions{\n\t\t\tState: \"all\",\n\t\t\tSort: \"updated\",\n\t\t\tDirection: \"desc\",\n\t\t\t\/\/ TODO: if an issue gets updated while we are paging, we might\n\t\t\t\/\/ process the same issue twice - as item 100 on page 1 and then\n\t\t\t\/\/ again as item 1 on page 2.\n\t\t\tListOptions: github.ListOptions{\n\t\t\t\tPage: page,\n\t\t\t\tPerPage: 100,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(\"github %s\/%s: page %d, num issues %d\", owner, repo, page, len(issues))\n\t\tif len(issues) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tfor _, is := range issues {\n\t\t\tgi, _ := c.getIssue(rp, int32(*is.Number))\n\t\t\tmp := newMutationFromIssue(gi, is, rp)\n\t\t\tif mp == nil {\n\t\t\t\tkeepGoing = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfmt.Printf(\"modifying %s, issue %d: %s\\n\", rp, *is.Number, *is.Title)\n\t\t\tc.processMutation(mp)\n\t\t}\n\t\tpage++\n\t}\n\treturn nil\n}\n<commit_msg>maintner: fix context usage<commit_after>\/\/ Copyright 2017 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package maintner mirrors, searches, syncs, and serves Git, Github,\n\/\/ and Gerrit metadata.\n\/\/\n\/\/ Maintner is short for \"Maintainer\". This package is intended for\n\/\/ use by many tools. The name of the daemon that serves the maintner\n\/\/ data to other tools is \"maintnerd\".\npackage maintner\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/ptypes\"\n\t\"github.com\/google\/go-github\/github\"\n\t\"golang.org\/x\/build\/maintner\/maintpb\"\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\n\/\/ Corpus holds all of a project's metadata.\n\/\/\n\/\/ There are two main phases to the Corpus: the catch-up phase, when the Corpus\n\/\/ is populated from a MutationSource (disk, database), and the polling phase,\n\/\/ when the Corpus polls for new events and stores\/writes them to disk. Call\n\/\/ StartLogging between the catch-up phase and the polling phase.\ntype Corpus struct {\n\t\/\/ ... TODO\n\n\tmu sync.RWMutex\n\tgithubIssues map[githubRepo]map[int32]*githubIssue \/\/ repo -> num -> issue\n\tgithubUsers map[int64]*githubUser\n\tgithubRepos []repoObj\n\t\/\/ If true, log new commits\n\tshouldLog bool\n\tMutationLogger MutationLogger\n}\n\ntype repoObj struct {\n\tname githubRepo\n\ttokenFile string\n}\n\nfunc NewCorpus(logger MutationLogger) *Corpus {\n\treturn &Corpus{\n\t\tgithubIssues: make(map[githubRepo]map[int32]*githubIssue),\n\t\tgithubUsers: make(map[int64]*githubUser),\n\t\tgithubRepos: []repoObj{},\n\t\tMutationLogger: logger,\n\t}\n}\n\n\/\/ StartLogging indicates that further changes should be written to the log.\nfunc (c *Corpus) StartLogging() {\n\tc.mu.Lock()\n\tc.shouldLog = true\n\tc.mu.Unlock()\n}\n\nfunc (c *Corpus) AddGithub(owner, repo, tokenFile string) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tc.githubRepos = append(c.githubRepos, repoObj{\n\t\tname: githubRepo(owner + \"\/\" + repo),\n\t\ttokenFile: tokenFile,\n\t})\n}\n\n\/\/ githubRepo is a github org & repo, lowercase, joined by a '\/',\n\/\/ such as \"golang\/go\".\ntype githubRepo string\n\n\/\/ Org finds \"golang\" in the githubRepo string \"golang\/go\", or returns an empty\n\/\/ string if it is malformed.\nfunc (gr githubRepo) Org() string {\n\tsep := strings.IndexByte(string(gr), '\/')\n\tif sep == -1 {\n\t\treturn \"\"\n\t}\n\treturn string(gr[:sep])\n}\n\nfunc (gr githubRepo) Repo() string {\n\tsep := strings.IndexByte(string(gr), '\/')\n\tif sep == -1 || sep == len(gr)-1 {\n\t\treturn \"\"\n\t}\n\treturn string(gr[sep+1:])\n}\n\n\/\/ githubUser represents a github user.\n\/\/ It is a subset of https:\/\/developer.github.com\/v3\/users\/#get-a-single-user\ntype githubUser struct {\n\tID int64\n\tLogin string\n}\n\n\/\/ githubIssue represents a github issue.\n\/\/ See https:\/\/developer.github.com\/v3\/issues\/#get-a-single-issue\ntype githubIssue struct {\n\tID int64\n\tNumber int32\n\tClosed bool\n\tUser *githubUser\n\tCreated time.Time\n\tUpdated time.Time\n\tBody string\n\t\/\/ TODO Comments ...\n}\n\n\/\/ A MutationSource yields a log of mutations that will catch a corpus\n\/\/ back up to the present.\ntype MutationSource interface {\n\t\/\/ GetMutations returns a channel of mutations.\n\t\/\/ The channel should be closed at the end.\n\t\/\/ All sends on the returned channel should select\n\t\/\/ on the provided context.\n\tGetMutations(context.Context) <-chan *maintpb.Mutation\n}\n\n\/\/ Initialize populates the Corpus using the data from the MutationSource.\nfunc (c *Corpus) Initialize(ctx context.Context, src MutationSource) error {\n\treturn c.processMutations(ctx, src)\n}\n\nfunc (c *Corpus) processMutations(ctx context.Context, src MutationSource) error {\n\tch := src.GetMutations(ctx)\n\tdone := ctx.Done()\n\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn ctx.Err()\n\t\tcase m, ok := <-ch:\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tc.processMutationLocked(m)\n\t\t}\n\t}\n}\n\nfunc (c *Corpus) processMutation(m *maintpb.Mutation) {\n\tc.mu.Lock()\n\tc.processMutationLocked(m)\n\tc.mu.Unlock()\n\tif c.MutationLogger != nil && c.shouldLog {\n\t\terr := c.MutationLogger.Log(m)\n\t\tif err != nil {\n\t\t\t\/\/ TODO: handle errors better\n\t\t\tfmt.Printf(\"could not log mutation %v: %v\\n\", m, err)\n\t\t}\n\t}\n}\n\n\/\/ c.mu must be held.\nfunc (c *Corpus) processMutationLocked(m *maintpb.Mutation) {\n\tif im := m.GithubIssue; im != nil {\n\t\tc.processGithubIssueMutation(im)\n\t}\n\t\/\/ TODO: more...\n}\n\nfunc (c *Corpus) repoKey(owner, repo string) githubRepo {\n\tif owner == \"\" || repo == \"\" {\n\t\treturn \"\"\n\t}\n\t\/\/ TODO: avoid garbage, use interned strings? profile later\n\t\/\/ once we have gigabytes of mutation logs to slurp at\n\t\/\/ start-up. (The same thing mattered for Camlistore start-up\n\t\/\/ time at least)\n\treturn githubRepo(owner + \"\/\" + repo)\n}\n\nfunc (c *Corpus) getGithubUser(pu *maintpb.GithubUser) *githubUser {\n\tif pu == nil {\n\t\treturn nil\n\t}\n\tif u := c.githubUsers[pu.Id]; u != nil {\n\t\tif pu.Login != \"\" && pu.Login != u.Login {\n\t\t\tu.Login = pu.Login\n\t\t}\n\t\treturn u\n\t}\n\tif c.githubUsers == nil {\n\t\tc.githubUsers = make(map[int64]*githubUser)\n\t}\n\tu := &githubUser{\n\t\tID: pu.Id,\n\t\tLogin: pu.Login,\n\t}\n\tc.githubUsers[pu.Id] = u\n\treturn u\n}\n\nvar errNoChanges = errors.New(\"No changes in this github.Issue\")\n\n\/\/ newMutationFromIssue generates a GithubIssueMutation using the smallest\n\/\/ possible diff between ci (a corpus Issue) and gi (an external github issue).\n\/\/\n\/\/ If newMutationFromIssue returns nil, the provided github.Issue is no newer\n\/\/ than the data we have in the corpus. ci may be nil.\nfunc newMutationFromIssue(ci *githubIssue, gi *github.Issue, rp githubRepo) *maintpb.Mutation {\n\tif gi == nil || gi.Number == nil {\n\t\tpanic(fmt.Sprintf(\"github issue with nil number: %#v\", gi))\n\t}\n\towner, repo := rp.Org(), rp.Repo()\n\t\/\/ always need these fields to figure out which key to write to\n\tm := &maintpb.GithubIssueMutation{\n\t\tOwner: owner,\n\t\tRepo: repo,\n\t\tNumber: int32(*gi.Number),\n\t}\n\tif ci == nil {\n\t\t\/\/ We don't know about this github issue, so populate all fields in one\n\t\t\/\/ mutation.\n\t\tif gi.CreatedAt != nil {\n\t\t\ttproto, err := ptypes.TimestampProto(*gi.CreatedAt)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tm.Created = tproto\n\t\t}\n\t\tif gi.UpdatedAt != nil {\n\t\t\ttproto, err := ptypes.TimestampProto(*gi.UpdatedAt)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tm.Updated = tproto\n\t\t}\n\t\tif gi.Body != nil {\n\t\t\tm.Body = *gi.Body\n\t\t}\n\t\treturn &maintpb.Mutation{GithubIssue: m}\n\t}\n\tif gi.UpdatedAt != nil {\n\t\tif !gi.UpdatedAt.After(ci.Updated) {\n\t\t\t\/\/ This data is stale, ignore it.\n\t\t\treturn nil\n\t\t}\n\t\ttproto, err := ptypes.TimestampProto(*gi.UpdatedAt)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tm.Updated = tproto\n\t}\n\tif gi.Body != nil && *gi.Body != ci.Body {\n\t\tm.Body = *gi.Body\n\t}\n\treturn &maintpb.Mutation{GithubIssue: m}\n}\n\n\/\/ getIssue finds an issue in the Corpus or returns nil, false if it is not\n\/\/ present.\nfunc (c *Corpus) getIssue(rp githubRepo, number int32) (*githubIssue, bool) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\tissueMap, ok := c.githubIssues[rp]\n\tif !ok {\n\t\treturn nil, false\n\t}\n\tgi, ok := issueMap[number]\n\treturn gi, ok\n}\n\n\/\/ processGithubIssueMutation updates the corpus with the information in m, and\n\/\/ returns true if the Corpus was modified.\nfunc (c *Corpus) processGithubIssueMutation(m *maintpb.GithubIssueMutation) (changed bool) {\n\tif c == nil {\n\t\tpanic(\"nil corpus\")\n\t}\n\tk := c.repoKey(m.Owner, m.Repo)\n\tif k == \"\" {\n\t\t\/\/ TODO: errors? return false? skip for now.\n\t\treturn\n\t}\n\tif m.Number == 0 {\n\t\treturn\n\t}\n\tissueMap, ok := c.githubIssues[k]\n\tif !ok {\n\t\tif c.githubIssues == nil {\n\t\t\tc.githubIssues = make(map[githubRepo]map[int32]*githubIssue)\n\t\t}\n\t\tissueMap = make(map[int32]*githubIssue)\n\t\tc.githubIssues[k] = issueMap\n\t}\n\tgi, ok := issueMap[m.Number]\n\tif !ok {\n\t\tcreated, err := ptypes.Timestamp(m.Created)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tgi = &githubIssue{\n\t\t\tNumber: m.Number,\n\t\t\tUser: c.getGithubUser(m.User),\n\t\t\tCreated: created,\n\t\t}\n\t\tissueMap[m.Number] = gi\n\t\tchanged = true\n\t}\n\t\/\/ Check Updated before all other fields so they don't update if this\n\t\/\/ Mutation is stale\n\tif m.Updated != nil {\n\t\tupdated, err := ptypes.Timestamp(m.Updated)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif !updated.IsZero() && updated.Before(gi.Updated) {\n\t\t\t\/\/ this mutation represents data older than the data we have in\n\t\t\t\/\/ the corpus; ignore it.\n\t\t\treturn false\n\t\t}\n\t\tgi.Updated = updated\n\t\tchanged = changed || updated.After(gi.Updated)\n\t}\n\tif m.Body != \"\" {\n\t\tgi.Body = m.Body\n\t\tchanged = changed || m.Body != gi.Body\n\t}\n\t\/\/ ignoring Created since it *should* never update\n\treturn changed\n}\n\n\/\/ PopulateFromServer populates the corpus from a maintnerd server.\nfunc (c *Corpus) PopulateFromServer(ctx context.Context, serverURL string) error {\n\tpanic(\"TODO\")\n}\n\n\/\/ PopulateFromDisk populates the corpus from a set of mutation logs\n\/\/ in a local directory.\nfunc (c *Corpus) PopulateFromDisk(ctx context.Context, dir string) error {\n\tpanic(\"TODO\")\n}\n\n\/\/ PopulateFromAPIs populates the corpus using API calls to\n\/\/ the upstream Git, Github, and\/or Gerrit servers.\nfunc (c *Corpus) PopulateFromAPIs(ctx context.Context) error {\n\tpanic(\"TODO\")\n}\n\n\/\/ Poll checks for new changes on all repositories being tracked by the Corpus.\nfunc (c *Corpus) Poll(ctx context.Context) error {\n\tgroup, ctx := errgroup.WithContext(ctx)\n\tfor _, rp := range c.githubRepos {\n\t\trp := rp\n\t\tgroup.Go(func() error {\n\t\t\treturn c.PollGithubLoop(ctx, rp.name, rp.tokenFile)\n\t\t})\n\t}\n\treturn group.Wait()\n}\n\n\/\/ PollGithubLoop checks for new changes on a single Github repository and\n\/\/ updates the Corpus with any changes.\nfunc (c *Corpus) PollGithubLoop(ctx context.Context, rp githubRepo, tokenFile string) error {\n\tslurp, err := ioutil.ReadFile(tokenFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf := strings.SplitN(strings.TrimSpace(string(slurp)), \":\", 2)\n\tif len(f) != 2 || f[0] == \"\" || f[1] == \"\" {\n\t\treturn fmt.Errorf(\"Expected token file %s to be of form <username>:<token>\", tokenFile)\n\t}\n\tts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: f[1]})\n\ttc := oauth2.NewClient(ctx, ts)\n\tghc := github.NewClient(tc)\n\tfor {\n\t\terr := c.pollGithub(ctx, rp, ghc)\n\t\tif err == context.Canceled {\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(\"Polled github for %s; err = %v. Sleeping.\", rp, err)\n\t\t\/\/ TODO: select and listen for context errors\n\t\tselect {\n\t\tcase <-time.After(30 * time.Second):\n\t\t\tcontinue\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\t}\n\t}\n}\n\nfunc (c *Corpus) pollGithub(ctx context.Context, rp githubRepo, ghc *github.Client) error {\n\tlog.Printf(\"Polling github for %s ...\", rp)\n\tpage := 1\n\tkeepGoing := true\n\towner, repo := rp.Org(), rp.Repo()\n\tfor keepGoing {\n\t\t\/\/ TODO: use https:\/\/godoc.org\/github.com\/google\/go-github\/github#ActivityService.ListIssueEventsForRepository probably\n\t\tissues, _, err := ghc.Issues.ListByRepo(ctx, owner, repo, &github.IssueListByRepoOptions{\n\t\t\tState: \"all\",\n\t\t\tSort: \"updated\",\n\t\t\tDirection: \"desc\",\n\t\t\t\/\/ TODO: if an issue gets updated while we are paging, we might\n\t\t\t\/\/ process the same issue twice - as item 100 on page 1 and then\n\t\t\t\/\/ again as item 1 on page 2.\n\t\t\tListOptions: github.ListOptions{\n\t\t\t\tPage: page,\n\t\t\t\tPerPage: 100,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(\"github %s\/%s: page %d, num issues %d\", owner, repo, page, len(issues))\n\t\tif len(issues) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tfor _, is := range issues {\n\t\t\tgi, _ := c.getIssue(rp, int32(*is.Number))\n\t\t\tmp := newMutationFromIssue(gi, is, rp)\n\t\t\tif mp == nil {\n\t\t\t\tkeepGoing = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfmt.Printf(\"modifying %s, issue %d: %s\\n\", rp, *is.Number, *is.Title)\n\t\t\tc.processMutation(mp)\n\t\t}\n\t\tpage++\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package caddy\n\nimport (\n\t\"github.com\/mholt\/caddy\/caddyhttp\/httpserver\"\n\t\"github.com\/tarent\/loginsrv\/login\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\n\/\/ CaddyHandler is the loginsrv handler wrapper for caddy\ntype CaddyHandler struct {\n\tnext httpserver.Handler\n\tconfig *login.Config\n\tloginHandler *login.Handler\n}\n\n\/\/ NewCaddyHandler create the handler\nfunc NewCaddyHandler(next httpserver.Handler, loginHandler *login.Handler, config *login.Config) *CaddyHandler {\n\th := &CaddyHandler{\n\t\tnext: next,\n\t\tconfig: config,\n\t\tloginHandler: loginHandler,\n\t}\n\treturn h\n}\n\nfunc (h *CaddyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {\n\t\/\/Fetch jwt token. If valid set a Caddy replacer for {user}\n\tuserInfo, valid := h.loginHandler.GetToken(r)\n\tif valid {\n\t\trepl := httpserver.NewReplacer(r, nil, \"-\")\n\t\trepl.Set(\"user\", userInfo.Sub)\n\t}\n\n\tif strings.HasPrefix(r.URL.Path, h.config.LoginPath) {\n\t\th.loginHandler.ServeHTTP(w, r)\n\t\treturn 0, nil\n\t}\n\n\treturn h.next.ServeHTTP(w, r)\n}\n<commit_msg>let upstream middleware (e.g. fastcgi and cgi) know about authenticated user<commit_after>package caddy\n\nimport (\n\t\"github.com\/mholt\/caddy\/caddyhttp\/httpserver\"\n\t\"github.com\/tarent\/loginsrv\/login\"\n\t\"context\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\n\/\/ CaddyHandler is the loginsrv handler wrapper for caddy\ntype CaddyHandler struct {\n\tnext httpserver.Handler\n\tconfig *login.Config\n\tloginHandler *login.Handler\n}\n\n\/\/ NewCaddyHandler create the handler\nfunc NewCaddyHandler(next httpserver.Handler, loginHandler *login.Handler, config *login.Config) *CaddyHandler {\n\th := &CaddyHandler{\n\t\tnext: next,\n\t\tconfig: config,\n\t\tloginHandler: loginHandler,\n\t}\n\treturn h\n}\n\nfunc (h *CaddyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {\n\t\/\/Fetch jwt token. If valid set a Caddy replacer for {user}\n\tuserInfo, valid := h.loginHandler.GetToken(r)\n\tif valid {\n\t\t\/\/ let upstream middleware (e.g. fastcgi and cgi) know about authenticated\n\t\t\/\/ user; this replaces the request with a wrapped instance\n\t\tr = r.WithContext(context.WithValue(r.Context(),\n\t\thttpserver.RemoteUserCtxKey, userInfo.Sub))\n\t\n\t\t\/\/ Provide username to be used in log by replacer\n\t\trepl := httpserver.NewReplacer(r, nil, \"-\")\n\t\trepl.Set(\"user\", userInfo.Sub)\n\t}\n\n\tif strings.HasPrefix(r.URL.Path, h.config.LoginPath) {\n\t\th.loginHandler.ServeHTTP(w, r)\n\t\treturn 0, nil\n\t}\n\n\treturn h.next.ServeHTTP(w, r)\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>profiler: ensure VMs will be shut down in e2e test<commit_after><|endoftext|>"} {"text":"<commit_before>package client\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"bytes\"\n\t\"strconv\"\n\t\"errors\"\n\t\"io\"\n)\n\ntype basicAuth struct {\n\tusername string\n\tpassword string\n}\n\ntype ResponseBodyHandler func(responseBody interface{}) error\n\ntype Request struct {\n\tclient *Client\n\tapiCall string\n\tmethod string\n\theaders http.Header\n\tbody interface{}\n\tresponseBody interface{}\n\thandler ResponseBodyHandler\n\tparameters url.Values\n\tauth *basicAuth\n\texpectedStatusCode *int\n}\n\nfunc NewRequest(client *Client, method string, apiCall string) *Request {\n\n\tbuilder := Request{\n\t\tclient: client,\n\t\tmethod: method,\n\t\tapiCall: apiCall,\n\t\theaders: make(http.Header),\n\t\tparameters: make(url.Values),\n\t}\n\n\treturn &builder\n}\n\nfunc (r *Request) Param(name string, value string) *Request {\n\tr.parameters.Add(name, value)\n\treturn r\n}\n\nfunc (r *Request) ParamBool(name string, value bool) *Request {\n\tr.parameters.Add(name, strconv.FormatBool(value))\n\treturn r\n}\n\nfunc (r *Request) ParamInt(name string, value int) *Request {\n\tr.parameters.Add(name, strconv.Itoa(value))\n\treturn r\n}\n\nfunc (r *Request) ParamInt64(name string, value int64) *Request {\n\tr.parameters.Add(name, strconv.FormatInt(value, 10))\n\treturn r\n}\n\nfunc (r *Request) ParamUint(name string, value uint) *Request {\n\treturn r.ParamUint64(name, uint64(value))\n}\n\nfunc (r *Request) ParamUint64(name string, value uint64) *Request {\n\tr.parameters.Add(name, strconv.FormatUint(value, 10))\n\treturn r\n}\n\nfunc (r *Request) Body(content interface{}) *Request {\n\tr.body = content\n\treturn r\n}\n\nfunc (r *Request) BasicAuth(username string, password string) *Request {\n\tr.auth = &basicAuth{\n\t\tusername: username,\n\t\tpassword: password,\n\t}\n\treturn r\n}\n\nfunc (r *Request) Expect(statusCode int) *Request {\n\tr.expectedStatusCode = &statusCode\n\treturn r\n}\n\nfunc (r *Request) ResponseBody(content interface{}) *Request {\n\tr.responseBody = content\n\treturn r\n}\n\nfunc (r *Request) ResponseBodyHandler(handler ResponseBodyHandler) *Request {\n\tr.handler = handler\n\treturn r\n}\n\nfunc (r *Request) Execute() (*Response, error) {\n\n\tvar reader io.Reader = nil\n\t\n\tif r.body != nil {\n\t\tbody, err := json.Marshal(r.body)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treader = bytes.NewReader(body)\n\t} \n\t\t\t\n\treq, err := http.NewRequest(r.method,\n\t\t*r.client.url + r.apiCall, reader)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.URL.RawQuery = r.parameters.Encode()\n\treq.Header = r.headers\n\treq.Header.Add(\"User-Agent\", r.client.userAgent)\n\n\tif r.body != nil {\n\t\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\t}\n\n\tif r.auth != nil {\n\t\treq.SetBasicAuth(r.auth.username, r.auth.password)\n\t} else {\n\t\tauthToken, err := ReadToken()\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treq.Header.Add(\"Authorization\", \"Bearer \" + authToken.Token)\n\t}\n\t\n\thttpRsp, err := r.client.httpClient.Do(req)\n\n\trsp := NewResponse(httpRsp)\n\t\n\tif err != nil {\n\t\treturn rsp, err\n\t}\n\n\t\/\/ Check if we got the status code we expected\n\tif r.expectedStatusCode != nil &&\n\t\t*r.expectedStatusCode != httpRsp.StatusCode {\n\n\t\t\/\/ Read error message if any\n\t\terrorMsg, err := rsp.ReadError()\n\t\t\n\t\tif err != nil {\n\t\t\terr = errors.New(\"Unexpected status code \" + httpRsp.Status)\n\t\t} else {\n\t\t\terr = errors.New(\"Error: \" + errorMsg.Errors[0].Message)\n\t\t}\n\t\treturn rsp, err\n\t}\n\n\tif r.responseBody != nil {\n\t\terr = rsp.Read(r.responseBody)\n\n\t\tif err != nil {\n\t\t\treturn rsp, err\n\t\t}\n\n\t\tif r.handler != nil {\n\t\t\terr = r.handler(r.responseBody)\n\t\t}\n\t}\n\n\treturn rsp, err\n}\n<commit_msg>If there is no saved token, try request without auth<commit_after>package client\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"bytes\"\n\t\"strconv\"\n\t\"errors\"\n\t\"io\"\n)\n\ntype basicAuth struct {\n\tusername string\n\tpassword string\n}\n\ntype ResponseBodyHandler func(responseBody interface{}) error\n\ntype Request struct {\n\tclient *Client\n\tapiCall string\n\tmethod string\n\theaders http.Header\n\tbody interface{}\n\tresponseBody interface{}\n\thandler ResponseBodyHandler\n\tparameters url.Values\n\tauth *basicAuth\n\texpectedStatusCode *int\n}\n\nfunc NewRequest(client *Client, method string, apiCall string) *Request {\n\n\tbuilder := Request{\n\t\tclient: client,\n\t\tmethod: method,\n\t\tapiCall: apiCall,\n\t\theaders: make(http.Header),\n\t\tparameters: make(url.Values),\n\t}\n\n\treturn &builder\n}\n\nfunc (r *Request) Param(name string, value string) *Request {\n\tr.parameters.Add(name, value)\n\treturn r\n}\n\nfunc (r *Request) ParamBool(name string, value bool) *Request {\n\tr.parameters.Add(name, strconv.FormatBool(value))\n\treturn r\n}\n\nfunc (r *Request) ParamInt(name string, value int) *Request {\n\tr.parameters.Add(name, strconv.Itoa(value))\n\treturn r\n}\n\nfunc (r *Request) ParamInt64(name string, value int64) *Request {\n\tr.parameters.Add(name, strconv.FormatInt(value, 10))\n\treturn r\n}\n\nfunc (r *Request) ParamUint(name string, value uint) *Request {\n\treturn r.ParamUint64(name, uint64(value))\n}\n\nfunc (r *Request) ParamUint64(name string, value uint64) *Request {\n\tr.parameters.Add(name, strconv.FormatUint(value, 10))\n\treturn r\n}\n\nfunc (r *Request) Body(content interface{}) *Request {\n\tr.body = content\n\treturn r\n}\n\nfunc (r *Request) BasicAuth(username string, password string) *Request {\n\tr.auth = &basicAuth{\n\t\tusername: username,\n\t\tpassword: password,\n\t}\n\treturn r\n}\n\nfunc (r *Request) Expect(statusCode int) *Request {\n\tr.expectedStatusCode = &statusCode\n\treturn r\n}\n\nfunc (r *Request) ResponseBody(content interface{}) *Request {\n\tr.responseBody = content\n\treturn r\n}\n\nfunc (r *Request) ResponseBodyHandler(handler ResponseBodyHandler) *Request {\n\tr.handler = handler\n\treturn r\n}\n\nfunc (r *Request) Execute() (*Response, error) {\n\n\tvar reader io.Reader = nil\n\t\n\tif r.body != nil {\n\t\tbody, err := json.Marshal(r.body)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treader = bytes.NewReader(body)\n\t} \n\t\t\t\n\treq, err := http.NewRequest(r.method,\n\t\t*r.client.url + r.apiCall, reader)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.URL.RawQuery = r.parameters.Encode()\n\treq.Header = r.headers\n\treq.Header.Add(\"User-Agent\", r.client.userAgent)\n\n\tif r.body != nil {\n\t\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\t}\n\n\tif r.auth != nil {\n\t\treq.SetBasicAuth(r.auth.username, r.auth.password)\n\t} else {\n\t\tauthToken, err := ReadToken()\n\n\t\t\/\/ If we didn't have a token, just try anyway and let\n\t\t\/\/ the API return error if we are not requesting an auth-less\n\t\t\/\/ API endpoint\n\t\tif err == nil {\n\t\t\treq.Header.Add(\"Authorization\", \"Bearer \" + authToken.Token)\n\t\t}\n\t}\n\t\n\thttpRsp, err := r.client.httpClient.Do(req)\n\n\trsp := NewResponse(httpRsp)\n\t\n\tif err != nil {\n\t\treturn rsp, err\n\t}\n\n\t\/\/ Check if we got the status code we expected\n\tif r.expectedStatusCode != nil &&\n\t\t*r.expectedStatusCode != httpRsp.StatusCode {\n\n\t\t\/\/ Read error message if any\n\t\terrorMsg, err := rsp.ReadError()\n\t\t\n\t\tif err != nil {\n\t\t\terr = errors.New(\"Unexpected status code \" + httpRsp.Status)\n\t\t} else if len(errorMsg.Errors) > 0 {\n\t\t\terr = errors.New(\"Error: \" + errorMsg.Errors[0].Message)\n\t\t}\n\t\treturn rsp, err\n\t}\n\n\tif r.responseBody != nil {\n\t\terr = rsp.Read(r.responseBody)\n\n\t\tif err != nil {\n\t\t\treturn rsp, err\n\t\t}\n\n\t\tif r.handler != nil {\n\t\t\terr = r.handler(r.responseBody)\n\t\t}\n\t}\n\n\treturn rsp, err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 go-swagger maintainers\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage client\n\nimport (\n\t\"fmt\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/go-swagger\/go-swagger\/client\"\n\t\"github.com\/go-swagger\/go-swagger\/httpkit\"\n\t\"github.com\/go-swagger\/go-swagger\/spec\"\n\t\"github.com\/go-swagger\/go-swagger\/strfmt\"\n)\n\n\/\/ Runtime represents an API client that uses the transport\n\/\/ to make http requests based on a swagger specification.\ntype Runtime struct {\n\tDefaultMediaType string\n\tDefaultAuthentication client.AuthInfoWriter\n\tConsumers map[string]httpkit.Consumer\n\tProducers map[string]httpkit.Producer\n\n\tTransport http.RoundTripper\n\tSpec *spec.Document\n\tHost string\n\tBasePath string\n\tFormats strfmt.Registry\n\tDebug bool\n\n\tclient *http.Client\n\tmethodsAndPaths map[string]methodAndPath\n}\n\n\/\/ New creates a new default runtime for a swagger api client.\nfunc New(swaggerSpec *spec.Document) *Runtime {\n\tvar rt Runtime\n\trt.DefaultMediaType = httpkit.JSONMime\n\n\t\/\/ TODO: actually infer this stuff from the spec\n\trt.Consumers = map[string]httpkit.Consumer{\n\t\thttpkit.JSONMime: httpkit.JSONConsumer(),\n\t}\n\trt.Producers = map[string]httpkit.Producer{\n\t\thttpkit.JSONMime: httpkit.JSONProducer(),\n\t}\n\trt.Spec = swaggerSpec\n\trt.Transport = http.DefaultTransport\n\trt.client = http.DefaultClient\n\trt.client.Transport = rt.Transport\n\trt.Host = swaggerSpec.Host()\n\trt.BasePath = swaggerSpec.BasePath()\n\tif !strings.HasPrefix(rt.BasePath, \"\/\") {\n\t\trt.BasePath = \"\/\" + rt.BasePath\n\t}\n\trt.Debug = os.Getenv(\"DEBUG\") == \"1\"\n\tschemes := swaggerSpec.Spec().Schemes\n\tif len(schemes) == 0 {\n\t\tschemes = append(schemes, \"http\")\n\t}\n\trt.methodsAndPaths = make(map[string]methodAndPath)\n\tfor mth, pathItem := range rt.Spec.Operations() {\n\t\tfor pth, op := range pathItem {\n\t\t\tif len(op.Schemes) > 0 {\n\t\t\t\trt.methodsAndPaths[op.ID] = methodAndPath{mth, pth, op.Schemes}\n\t\t\t} else {\n\t\t\t\trt.methodsAndPaths[op.ID] = methodAndPath{mth, pth, schemes}\n\t\t\t}\n\t\t}\n\t}\n\treturn &rt\n}\n\n\/\/ Submit a request and when there is a body on success it will turn that into the result\n\/\/ all other things are turned into an api error for swagger which retains the status code\nfunc (r *Runtime) Submit(context *client.Operation) (interface{}, error) {\n\toperationID, params, readResponse, auth := context.ID, context.Params, context.Reader, context.AuthInfo\n\tmthPth, ok := r.methodsAndPaths[operationID]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unknown operation: %q\", operationID)\n\t}\n\trequest, err := newRequest(mthPth.Method, mthPth.PathPattern, params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ TODO: infer most appropriate content type\n\trequest.SetHeaderParam(httpkit.HeaderContentType, r.DefaultMediaType)\n\tvar accept []string\n\tfor k := range r.Consumers {\n\t\taccept = append(accept, k)\n\t}\n\n\trequest.SetHeaderParam(httpkit.HeaderAccept, accept...)\n\n\tif auth == nil && r.DefaultAuthentication != nil {\n\t\tauth = r.DefaultAuthentication\n\t}\n\tif auth != nil {\n\t\tif err := auth.AuthenticateRequest(request, r.Formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treq, err := request.BuildHTTP(r.Producers[r.DefaultMediaType], r.Formats)\n\n\t\/\/ set the scheme\n\tif req.URL.Scheme == \"\" {\n\t\treq.URL.Scheme = \"http\"\n\t}\n\tschLen := len(mthPth.Schemes)\n\tif schLen > 0 {\n\t\tscheme := mthPth.Schemes[0]\n\t\t\/\/ prefer https, but skip when not possible\n\t\tif scheme != \"https\" && schLen > 1 {\n\t\t\tfor _, sch := range mthPth.Schemes {\n\t\t\t\tif sch == \"https\" {\n\t\t\t\t\tscheme = sch\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treq.URL.Scheme = scheme\n\t}\n\n\treq.URL.Host = r.Host\n\treq.URL.Path = filepath.Join(r.BasePath, req.URL.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr.client.Transport = r.Transport\n\tif r.Debug {\n\t\tb, err := httputil.DumpRequestOut(req, true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfmt.Println(string(b))\n\t}\n\tres, err := r.client.Do(req) \/\/ make requests, by default follows 10 redirects before failing\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif r.Debug {\n\t\tb, err := httputil.DumpResponse(res, true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfmt.Println(string(b))\n\t}\n\tct := res.Header.Get(httpkit.HeaderContentType)\n\tif ct == \"\" { \/\/ this should really really never occur\n\t\tct = r.DefaultMediaType\n\t}\n\n\t\/\/ TODO: normalize this (ct) and only match on media type,\n\t\/\/ skip the params like charset unless a tie breaker is needed\n\tmt, _, err := mime.ParseMediaType(ct)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parse content type: %s\", err)\n\t}\n\tcons, ok := r.Consumers[mt]\n\tif !ok {\n\t\t\/\/ scream about not knowing what to do\n\t\treturn nil, fmt.Errorf(\"no consumer: %q\", ct)\n\t}\n\treturn readResponse.ReadResponse(response{res}, cons)\n}\n<commit_msg>fixes #147 add brackets to json unmarshaller<commit_after>\/\/ Copyright 2015 go-swagger maintainers\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage client\n\nimport (\n\t\"fmt\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/go-swagger\/go-swagger\/client\"\n\t\"github.com\/go-swagger\/go-swagger\/httpkit\"\n\t\"github.com\/go-swagger\/go-swagger\/spec\"\n\t\"github.com\/go-swagger\/go-swagger\/strfmt\"\n)\n\n\/\/ Runtime represents an API client that uses the transport\n\/\/ to make http requests based on a swagger specification.\ntype Runtime struct {\n\tDefaultMediaType string\n\tDefaultAuthentication client.AuthInfoWriter\n\tConsumers map[string]httpkit.Consumer\n\tProducers map[string]httpkit.Producer\n\n\tTransport http.RoundTripper\n\tSpec *spec.Document\n\tHost string\n\tBasePath string\n\tFormats strfmt.Registry\n\tDebug bool\n\n\tclient *http.Client\n\tmethodsAndPaths map[string]methodAndPath\n}\n\n\/\/ New creates a new default runtime for a swagger api client.\nfunc New(swaggerSpec *spec.Document) *Runtime {\n\tvar rt Runtime\n\trt.DefaultMediaType = httpkit.JSONMime\n\n\t\/\/ TODO: actually infer this stuff from the spec\n\trt.Consumers = map[string]httpkit.Consumer{\n\t\thttpkit.JSONMime: httpkit.JSONConsumer(),\n\t}\n\trt.Producers = map[string]httpkit.Producer{\n\t\thttpkit.JSONMime: httpkit.JSONProducer(),\n\t}\n\trt.Spec = swaggerSpec\n\trt.Transport = http.DefaultTransport\n\trt.client = http.DefaultClient\n\trt.client.Transport = rt.Transport\n\trt.Host = swaggerSpec.Host()\n\trt.BasePath = swaggerSpec.BasePath()\n\tif !strings.HasPrefix(rt.BasePath, \"\/\") {\n\t\trt.BasePath = \"\/\" + rt.BasePath\n\t}\n\trt.Debug = os.Getenv(\"DEBUG\") == \"1\"\n\tschemes := swaggerSpec.Spec().Schemes\n\tif len(schemes) == 0 {\n\t\tschemes = append(schemes, \"http\")\n\t}\n\trt.methodsAndPaths = make(map[string]methodAndPath)\n\tfor mth, pathItem := range rt.Spec.Operations() {\n\t\tfor pth, op := range pathItem {\n\t\t\tif len(op.Schemes) > 0 {\n\t\t\t\trt.methodsAndPaths[op.ID] = methodAndPath{mth, pth, op.Schemes}\n\t\t\t} else {\n\t\t\t\trt.methodsAndPaths[op.ID] = methodAndPath{mth, pth, schemes}\n\t\t\t}\n\t\t}\n\t}\n\treturn &rt\n}\n\n\/\/ Submit a request and when there is a body on success it will turn that into the result\n\/\/ all other things are turned into an api error for swagger which retains the status code\nfunc (r *Runtime) Submit(context *client.Operation) (interface{}, error) {\n\toperationID, params, readResponse, auth := context.ID, context.Params, context.Reader, context.AuthInfo\n\tmthPth, ok := r.methodsAndPaths[operationID]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unknown operation: %q\", operationID)\n\t}\n\trequest, err := newRequest(mthPth.Method, mthPth.PathPattern, params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ TODO: infer most appropriate content type\n\trequest.SetHeaderParam(httpkit.HeaderContentType, r.DefaultMediaType)\n\tvar accept []string\n\tfor k := range r.Consumers {\n\t\taccept = append(accept, k)\n\t}\n\n\trequest.SetHeaderParam(httpkit.HeaderAccept, accept...)\n\n\tif auth == nil && r.DefaultAuthentication != nil {\n\t\tauth = r.DefaultAuthentication\n\t}\n\tif auth != nil {\n\t\tif err := auth.AuthenticateRequest(request, r.Formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treq, err := request.BuildHTTP(r.Producers[r.DefaultMediaType], r.Formats)\n\n\t\/\/ set the scheme\n\tif req.URL.Scheme == \"\" {\n\t\treq.URL.Scheme = \"http\"\n\t}\n\tschLen := len(mthPth.Schemes)\n\tif schLen > 0 {\n\t\tscheme := mthPth.Schemes[0]\n\t\t\/\/ prefer https, but skip when not possible\n\t\tif scheme != \"https\" && schLen > 1 {\n\t\t\tfor _, sch := range mthPth.Schemes {\n\t\t\t\tif sch == \"https\" {\n\t\t\t\t\tscheme = sch\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treq.URL.Scheme = scheme\n\t}\n\n\treq.URL.Host = r.Host\n\treq.URL.Path = filepath.Join(r.BasePath, req.URL.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr.client.Transport = r.Transport\n\tif r.Debug {\n\t\tb, err := httputil.DumpRequestOut(req, true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfmt.Println(string(b))\n\t}\n\tres, err := r.client.Do(req) \/\/ make requests, by default follows 10 redirects before failing\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\tif r.Debug {\n\t\tb, err := httputil.DumpResponse(res, true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfmt.Println(string(b))\n\t}\n\tct := res.Header.Get(httpkit.HeaderContentType)\n\tif ct == \"\" { \/\/ this should really really never occur\n\t\tct = r.DefaultMediaType\n\t}\n\n\tmt, _, err := mime.ParseMediaType(ct)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parse content type: %s\", err)\n\t}\n\tcons, ok := r.Consumers[mt]\n\tif !ok {\n\t\t\/\/ scream about not knowing what to do\n\t\treturn nil, fmt.Errorf(\"no consumer: %q\", ct)\n\t}\n\treturn readResponse.ReadResponse(response{res}, cons)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Copyright 2016-2018 Bryan T. Meyers <bmeyers@datadrake.com>\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage github\n\nimport (\n\t\"github.com\/DataDrake\/cuppa\/results\"\n\t\"regexp\"\n)\n\nconst (\n\t\/\/ SourceFormat is the format string for Github release tarballs\n\tSourceFormat = \"https:\/\/github.com\/%s\/archive\/%s.tar.gz\"\n)\n\n\/\/ SourceRegex is the regex for Github sources\nvar SourceRegex = regexp.MustCompile(\"github.com\/([^\/]+\/[^\/.]+)\")\n\n\/\/ VersionRegex is used to parse Github version numbers\nvar VersionRegex = regexp.MustCompile(\"(?:\\\\d+\\\\.)*\\\\d+\\\\w*\")\n\n\/\/ Provider is the upstream provider interface for github\ntype Provider struct{}\n\n\/\/ Latest finds the newest release for a github package\nfunc (c Provider) Latest(name string) (r *results.Result, s results.Status) {\n\trs, s := c.GetReleases(name, 1)\n\tif s != results.OK {\n\t\treturn\n\t}\n\tr = rs.Last()\n\tif r == nil {\n\t\ts = results.NotFound\n\t}\n\treturn\n}\n\n\/\/ Match checks to see if this provider can handle this kind of query\nfunc (c Provider) Match(query string) string {\n\tsm := SourceRegex.FindStringSubmatch(query)\n\tif len(sm) != 2 {\n\t\treturn \"\"\n\t}\n\treturn sm[1]\n}\n\n\/\/ Name gives the name of this provider\nfunc (c Provider) Name() string {\n\treturn \"GitHub\"\n}\n\n\/\/ Releases finds all matching releases for a github package\nfunc (c Provider) Releases(name string) (rs *results.ResultSet, s results.Status) {\n\trs, s = c.GetReleases(name, 50)\n\treturn\n}\n<commit_msg>Up limit of retrieved objects for github<commit_after>\/\/\n\/\/ Copyright 2016-2018 Bryan T. Meyers <bmeyers@datadrake.com>\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage github\n\nimport (\n\t\"github.com\/DataDrake\/cuppa\/results\"\n\t\"regexp\"\n)\n\nconst (\n\t\/\/ SourceFormat is the format string for Github release tarballs\n\tSourceFormat = \"https:\/\/github.com\/%s\/archive\/%s.tar.gz\"\n)\n\n\/\/ SourceRegex is the regex for Github sources\nvar SourceRegex = regexp.MustCompile(\"github.com\/([^\/]+\/[^\/.]+)\")\n\n\/\/ VersionRegex is used to parse Github version numbers\nvar VersionRegex = regexp.MustCompile(\"(?:\\\\d+\\\\.)*\\\\d+\\\\w*\")\n\n\/\/ Provider is the upstream provider interface for github\ntype Provider struct{}\n\n\/\/ Latest finds the newest release for a github package\nfunc (c Provider) Latest(name string) (r *results.Result, s results.Status) {\n\trs, s := c.GetReleases(name, 1)\n\tif s != results.OK {\n\t\treturn\n\t}\n\tr = rs.Last()\n\tif r == nil {\n\t\ts = results.NotFound\n\t}\n\treturn\n}\n\n\/\/ Match checks to see if this provider can handle this kind of query\nfunc (c Provider) Match(query string) string {\n\tsm := SourceRegex.FindStringSubmatch(query)\n\tif len(sm) != 2 {\n\t\treturn \"\"\n\t}\n\treturn sm[1]\n}\n\n\/\/ Name gives the name of this provider\nfunc (c Provider) Name() string {\n\treturn \"GitHub\"\n}\n\n\/\/ Releases finds all matching releases for a github package\nfunc (c Provider) Releases(name string) (rs *results.ResultSet, s results.Status) {\n\trs, s = c.GetReleases(name, 100)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package config for config files.\npackage client\n\n\/\/ LibraryVersion specifies the current version of twilio-go.\nconst LibraryVersion = \"0.25.0\"\n<commit_msg>Release v0.26.0<commit_after>\/\/ Package config for config files.\npackage client\n\n\/\/ LibraryVersion specifies the current version of twilio-go.\nconst LibraryVersion = \"0.26.0\"\n<|endoftext|>"} {"text":"<commit_before>package cluster\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/rqlite\/rqlite\/command\"\n\t\"github.com\/rqlite\/rqlite\/tcp\/pool\"\n)\n\nconst (\n\tinitialPoolSize = 4\n\tmaxPoolCapacity = 64\n)\n\n\/\/ Client allows communicating with a remote node.\ntype Client struct {\n\tdialer Dialer\n\ttimeout time.Duration\n\n\tmu sync.RWMutex\n\tpools map[string]pool.Pool\n}\n\n\/\/ NewClient returns a client instance for talking to a remote node.\nfunc NewClient(dl Dialer) *Client {\n\treturn &Client{\n\t\tdialer: dl,\n\t\ttimeout: 30 * time.Second,\n\t\tpools: make(map[string]pool.Pool),\n\t}\n}\n\n\/\/ GetNodeAPIAddr retrieves the API Address for the node at nodeAddr\nfunc (c *Client) GetNodeAPIAddr(nodeAddr string) (string, error) {\n\tconn, err := c.dial(nodeAddr, c.timeout)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer conn.Close()\n\n\t\/\/ Send the request\n\tcommand := &Command{\n\t\tType: Command_COMMAND_TYPE_GET_NODE_API_URL,\n\t}\n\tp, err := proto.Marshal(command)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"command marshal: %s\", err)\n\t}\n\n\t\/\/ Write length of Protobuf\n\tb := make([]byte, 4)\n\tbinary.LittleEndian.PutUint16(b[0:], uint16(len(p)))\n\n\t_, err = conn.Write(b)\n\tif err != nil {\n\t\thandleConnError(conn)\n\t\treturn \"\", fmt.Errorf(\"write protobuf length: %s\", err)\n\t}\n\t_, err = conn.Write(p)\n\tif err != nil {\n\t\thandleConnError(conn)\n\t\treturn \"\", fmt.Errorf(\"write protobuf: %s\", err)\n\t}\n\n\tb, err = ioutil.ReadAll(conn)\n\tif err != nil {\n\t\thandleConnError(conn)\n\t\treturn \"\", fmt.Errorf(\"read protobuf bytes: %s\", err)\n\t}\n\n\ta := &Address{}\n\terr = proto.Unmarshal(b, a)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"protobuf unmarshal: %s\", err)\n\t}\n\n\treturn a.Url, nil\n}\n\n\/\/ Execute performs an Execute on a remote node.\nfunc (c *Client) Execute(er *command.ExecuteRequest, nodeAddr string, timeout time.Duration) ([]*command.ExecuteResult, error) {\n\tconn, err := c.dialer.Dial(nodeAddr, c.timeout)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"dial connection: %s\", err)\n\t}\n\tdefer conn.Close()\n\n\t\/\/ Create the request.\n\tcommand := &Command{\n\t\tType: Command_COMMAND_TYPE_EXECUTE,\n\t\tRequest: &Command_ExecuteRequest{\n\t\t\tExecuteRequest: er,\n\t\t},\n\t}\n\tp, err := proto.Marshal(command)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"command marshal: %s\", err)\n\t}\n\n\t\/\/ Write length of Protobuf\n\tb := make([]byte, 4)\n\tbinary.LittleEndian.PutUint16(b[0:], uint16(len(p)))\n\n\tif err := conn.SetDeadline(time.Now().Add(timeout)); err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = conn.Write(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := conn.SetDeadline(time.Now().Add(timeout)); err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = conn.Write(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := conn.SetDeadline(time.Now().Add(timeout)); err != nil {\n\t\treturn nil, err\n\t}\n\tb, err = ioutil.ReadAll(conn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ta := &CommandExecuteResponse{}\n\terr = proto.Unmarshal(b, a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif a.Error != \"\" {\n\t\treturn nil, errors.New(a.Error)\n\t}\n\treturn a.Results, nil\n}\n\n\/\/ Query performs an Query on a remote node.\nfunc (c *Client) Query(qr *command.QueryRequest, nodeAddr string, timeout time.Duration) ([]*command.QueryRows, error) {\n\tconn, err := c.dialer.Dial(nodeAddr, c.timeout)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"dial connection: %s\", err)\n\t}\n\tdefer conn.Close()\n\n\t\/\/ Create the request.\n\tcommand := &Command{\n\t\tType: Command_COMMAND_TYPE_QUERY,\n\t\tRequest: &Command_QueryRequest{\n\t\t\tQueryRequest: qr,\n\t\t},\n\t}\n\tp, err := proto.Marshal(command)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"command marshal: %s\", err)\n\t}\n\n\t\/\/ Write length of Protobuf, the Protobuf\n\tb := make([]byte, 4)\n\tbinary.LittleEndian.PutUint16(b[0:], uint16(len(p)))\n\n\tif err := conn.SetDeadline(time.Now().Add(timeout)); err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = conn.Write(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := conn.SetDeadline(time.Now().Add(timeout)); err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = conn.Write(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := conn.SetDeadline(time.Now().Add(timeout)); err != nil {\n\t\treturn nil, err\n\t}\n\tb, err = ioutil.ReadAll(conn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ta := &CommandQueryResponse{}\n\terr = proto.Unmarshal(b, a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif a.Error != \"\" {\n\t\treturn nil, errors.New(a.Error)\n\t}\n\treturn a.Rows, nil\n}\n\n\/\/ Stats returns stats on the Client instance\nfunc (c *Client) Stats() (map[string]interface{}, error) {\n\treturn map[string]interface{}{\n\t\t\"timeout\": c.timeout,\n\t}, nil\n}\n\nfunc (c *Client) dial(nodeAddr string, timeout time.Duration) (net.Conn, error) {\n\tvar pl pool.Pool\n\tvar ok bool\n\n\tc.mu.RLock()\n\tpl, ok = c.pools[nodeAddr]\n\tc.mu.RUnlock()\n\n\t\/\/ Do we need a new pool for the given address?\n\tif !ok {\n\t\tif err := func() error {\n\t\t\tc.mu.Lock()\n\t\t\tdefer c.mu.Unlock()\n\t\t\tpl, ok = c.pools[nodeAddr]\n\t\t\tif ok {\n\t\t\t\treturn nil \/\/ Pool was inserted just after we checked.\n\t\t\t}\n\n\t\t\t\/\/ New pool is needed for given address.\n\t\t\tfactory := func() (net.Conn, error) { return c.dialer.Dial(nodeAddr, c.timeout) }\n\t\t\tp, err := pool.NewChannelPool(initialPoolSize, maxPoolCapacity, factory)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tc.pools[nodeAddr] = p\n\t\t\tpl = p\n\t\t\treturn nil\n\t\t}(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Got pool, now get a connection.\n\tconn, err := pl.Get()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"pool get: %s\", err)\n\t}\n\treturn conn, nil\n}\n\nfunc handleConnError(conn net.Conn) {\n\tif pc, ok := conn.(*pool.PoolConn); ok {\n\t\tpc.MarkUnusable()\n\t}\n}\n<commit_msg>Use connection pool with Cluster Execute and Query<commit_after>package cluster\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/rqlite\/rqlite\/command\"\n\t\"github.com\/rqlite\/rqlite\/tcp\/pool\"\n)\n\nconst (\n\tinitialPoolSize = 4\n\tmaxPoolCapacity = 64\n)\n\n\/\/ Client allows communicating with a remote node.\ntype Client struct {\n\tdialer Dialer\n\ttimeout time.Duration\n\n\tmu sync.RWMutex\n\tpools map[string]pool.Pool\n}\n\n\/\/ NewClient returns a client instance for talking to a remote node.\nfunc NewClient(dl Dialer) *Client {\n\treturn &Client{\n\t\tdialer: dl,\n\t\ttimeout: 30 * time.Second,\n\t\tpools: make(map[string]pool.Pool),\n\t}\n}\n\n\/\/ GetNodeAPIAddr retrieves the API Address for the node at nodeAddr\nfunc (c *Client) GetNodeAPIAddr(nodeAddr string) (string, error) {\n\tconn, err := c.dial(nodeAddr, c.timeout)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer conn.Close()\n\n\t\/\/ Send the request\n\tcommand := &Command{\n\t\tType: Command_COMMAND_TYPE_GET_NODE_API_URL,\n\t}\n\tp, err := proto.Marshal(command)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"command marshal: %s\", err)\n\t}\n\n\t\/\/ Write length of Protobuf\n\tb := make([]byte, 4)\n\tbinary.LittleEndian.PutUint16(b[0:], uint16(len(p)))\n\n\t_, err = conn.Write(b)\n\tif err != nil {\n\t\thandleConnError(conn)\n\t\treturn \"\", fmt.Errorf(\"write protobuf length: %s\", err)\n\t}\n\t_, err = conn.Write(p)\n\tif err != nil {\n\t\thandleConnError(conn)\n\t\treturn \"\", fmt.Errorf(\"write protobuf: %s\", err)\n\t}\n\n\tb, err = ioutil.ReadAll(conn)\n\tif err != nil {\n\t\thandleConnError(conn)\n\t\treturn \"\", fmt.Errorf(\"read protobuf bytes: %s\", err)\n\t}\n\n\ta := &Address{}\n\terr = proto.Unmarshal(b, a)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"protobuf unmarshal: %s\", err)\n\t}\n\n\treturn a.Url, nil\n}\n\n\/\/ Execute performs an Execute on a remote node.\nfunc (c *Client) Execute(er *command.ExecuteRequest, nodeAddr string, timeout time.Duration) ([]*command.ExecuteResult, error) {\n\tconn, err := c.dial(nodeAddr, c.timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\n\t\/\/ Create the request.\n\tcommand := &Command{\n\t\tType: Command_COMMAND_TYPE_EXECUTE,\n\t\tRequest: &Command_ExecuteRequest{\n\t\t\tExecuteRequest: er,\n\t\t},\n\t}\n\tp, err := proto.Marshal(command)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"command marshal: %s\", err)\n\t}\n\n\t\/\/ Write length of Protobuf\n\tb := make([]byte, 4)\n\tbinary.LittleEndian.PutUint16(b[0:], uint16(len(p)))\n\n\tif err := conn.SetDeadline(time.Now().Add(timeout)); err != nil {\n\t\thandleConnError(conn)\n\t\treturn nil, err\n\t}\n\t_, err = conn.Write(b)\n\tif err != nil {\n\t\thandleConnError(conn)\n\t\treturn nil, err\n\t}\n\tif err := conn.SetDeadline(time.Now().Add(timeout)); err != nil {\n\t\thandleConnError(conn)\n\t\treturn nil, err\n\t}\n\t_, err = conn.Write(p)\n\tif err != nil {\n\t\thandleConnError(conn)\n\t\treturn nil, err\n\t}\n\n\tif err := conn.SetDeadline(time.Now().Add(timeout)); err != nil {\n\t\thandleConnError(conn)\n\t\treturn nil, err\n\t}\n\tb, err = ioutil.ReadAll(conn)\n\tif err != nil {\n\t\thandleConnError(conn)\n\t\treturn nil, err\n\t}\n\n\ta := &CommandExecuteResponse{}\n\terr = proto.Unmarshal(b, a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif a.Error != \"\" {\n\t\treturn nil, errors.New(a.Error)\n\t}\n\treturn a.Results, nil\n}\n\n\/\/ Query performs an Query on a remote node.\nfunc (c *Client) Query(qr *command.QueryRequest, nodeAddr string, timeout time.Duration) ([]*command.QueryRows, error) {\n\tconn, err := c.dial(nodeAddr, c.timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\n\t\/\/ Create the request.\n\tcommand := &Command{\n\t\tType: Command_COMMAND_TYPE_QUERY,\n\t\tRequest: &Command_QueryRequest{\n\t\t\tQueryRequest: qr,\n\t\t},\n\t}\n\tp, err := proto.Marshal(command)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"command marshal: %s\", err)\n\t}\n\n\t\/\/ Write length of Protobuf, the Protobuf\n\tb := make([]byte, 4)\n\tbinary.LittleEndian.PutUint16(b[0:], uint16(len(p)))\n\n\tif err := conn.SetDeadline(time.Now().Add(timeout)); err != nil {\n\t\thandleConnError(conn)\n\t\treturn nil, err\n\t}\n\t_, err = conn.Write(b)\n\tif err != nil {\n\t\thandleConnError(conn)\n\t\treturn nil, err\n\t}\n\tif err := conn.SetDeadline(time.Now().Add(timeout)); err != nil {\n\t\thandleConnError(conn)\n\t\treturn nil, err\n\t}\n\t_, err = conn.Write(p)\n\tif err != nil {\n\t\thandleConnError(conn)\n\t\treturn nil, err\n\t}\n\n\tif err := conn.SetDeadline(time.Now().Add(timeout)); err != nil {\n\t\thandleConnError(conn)\n\t\treturn nil, err\n\t}\n\tb, err = ioutil.ReadAll(conn)\n\tif err != nil {\n\t\thandleConnError(conn)\n\t\treturn nil, err\n\t}\n\n\ta := &CommandQueryResponse{}\n\terr = proto.Unmarshal(b, a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif a.Error != \"\" {\n\t\treturn nil, errors.New(a.Error)\n\t}\n\treturn a.Rows, nil\n}\n\n\/\/ Stats returns stats on the Client instance\nfunc (c *Client) Stats() (map[string]interface{}, error) {\n\treturn map[string]interface{}{\n\t\t\"timeout\": c.timeout,\n\t}, nil\n}\n\nfunc (c *Client) dial(nodeAddr string, timeout time.Duration) (net.Conn, error) {\n\tvar pl pool.Pool\n\tvar ok bool\n\n\tc.mu.RLock()\n\tpl, ok = c.pools[nodeAddr]\n\tc.mu.RUnlock()\n\n\t\/\/ Do we need a new pool for the given address?\n\tif !ok {\n\t\tif err := func() error {\n\t\t\tc.mu.Lock()\n\t\t\tdefer c.mu.Unlock()\n\t\t\tpl, ok = c.pools[nodeAddr]\n\t\t\tif ok {\n\t\t\t\treturn nil \/\/ Pool was inserted just after we checked.\n\t\t\t}\n\n\t\t\t\/\/ New pool is needed for given address.\n\t\t\tfactory := func() (net.Conn, error) { return c.dialer.Dial(nodeAddr, c.timeout) }\n\t\t\tp, err := pool.NewChannelPool(initialPoolSize, maxPoolCapacity, factory)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tc.pools[nodeAddr] = p\n\t\t\tpl = p\n\t\t\treturn nil\n\t\t}(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Got pool, now get a connection.\n\tconn, err := pl.Get()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"pool get: %s\", err)\n\t}\n\treturn conn, nil\n}\n\nfunc handleConnError(conn net.Conn) {\n\tif pc, ok := conn.(*pool.PoolConn); ok {\n\t\tpc.MarkUnusable()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cihub\/seelog\"\n\t\"github.com\/pivotal-golang\/lager\"\n\n\tecr \"github.com\/awslabs\/amazon-ecr-credential-helper\/ecr-login\"\n\tecrapi \"github.com\/awslabs\/amazon-ecr-credential-helper\/ecr-login\/api\"\n\t\"github.com\/concourse\/retryhttp\"\n\t\"github.com\/docker\/distribution\/digest\"\n\t_ \"github.com\/docker\/distribution\/manifest\/schema1\"\n\t_ \"github.com\/docker\/distribution\/manifest\/schema2\"\n\t\"github.com\/docker\/distribution\/reference\"\n\tv2 \"github.com\/docker\/distribution\/registry\/api\/v2\"\n\t\"github.com\/docker\/distribution\/registry\/client\/auth\"\n\t\"github.com\/docker\/distribution\/registry\/client\/transport\"\n\t\"github.com\/hashicorp\/go-multierror\"\n\t\"github.com\/pivotal-golang\/clock\"\n)\n\nfunc main() {\n\tlogger := lager.NewLogger(\"http\")\n\trECRRepo, err := regexp.Compile(`[a-zA-Z0-9][a-zA-Z0-9_-]*\\.dkr\\.ecr\\.[a-zA-Z0-9][a-zA-Z0-9_-]*\\.amazonaws\\.com(\\.cn)?[^ ]*`)\n\tfatalIf(\"failed to compile ECR regex\", err)\n\n\tvar request CheckRequest\n\terr = json.NewDecoder(os.Stdin).Decode(&request)\n\tfatalIf(\"failed to read request\", err)\n\n\tos.Setenv(\"AWS_ACCESS_KEY_ID\", request.Source.AWSAccessKeyID)\n\tos.Setenv(\"AWS_SECRET_ACCESS_KEY\", request.Source.AWSSecretAccessKey)\n\tos.Setenv(\"AWS_SESSION_TOKEN\", request.Source.AWSSessionToken)\n\n\t\/\/ silence benign ecr-login errors\/warnings\n\tseelog.UseLogger(seelog.Disabled)\n\n\tif rECRRepo.MatchString(request.Source.Repository) == true {\n\t\tecrUser, ecrPass, err := ecr.ECRHelper{\n\t\t\tClientFactory: ecrapi.DefaultClientFactory{},\n\t\t}.Get(request.Source.Repository)\n\t\tfatalIf(\"failed to get ECR credentials\", err)\n\t\trequest.Source.Username = ecrUser\n\t\trequest.Source.Password = ecrPass\n\t}\n\n\tregistryHost, repo := parseRepository(request.Source.Repository)\n\n\tif len(request.Source.RegistryMirror) > 0 {\n\t\tregistryMirrorUrl, err := url.Parse(request.Source.RegistryMirror)\n\t\tfatalIf(\"failed to parse registry mirror URL\", err)\n\t\tregistryHost = registryMirrorUrl.Host\n\t}\n\n\ttag := string(request.Source.Tag)\n\tif tag == \"\" {\n\t\ttag = \"latest\"\n\t}\n\n\ttransport, registryURL := makeTransport(logger, request, registryHost, repo)\n\n\tclient := &http.Client{\n\t\tTransport: retryRoundTripper(logger, transport),\n\t}\n\n\tub, err := v2.NewURLBuilderFromString(registryURL, false)\n\tfatalIf(\"failed to construct registry URL builder\", err)\n\n\tnamedRef, err := reference.WithName(repo)\n\tfatalIf(\"failed to construct named reference\", err)\n\n\tvar response CheckResponse\n\n\ttaggedRef, err := reference.WithTag(namedRef, tag)\n\tfatalIf(\"failed to construct tagged reference\", err)\n\n\tlatestManifestURL, err := ub.BuildManifestURL(taggedRef)\n\tfatalIf(\"failed to build latest manifest URL\", err)\n\n\tlatestDigest, foundLatest := fetchDigest(client, latestManifestURL, request.Source.Repository, tag)\n\n\tif request.Version.Digest != \"\" {\n\t\tdigestRef, err := reference.WithDigest(namedRef, digest.Digest(request.Version.Digest))\n\t\tfatalIf(\"failed to build cursor manifest URL\", err)\n\n\t\tcursorManifestURL, err := ub.BuildManifestURL(digestRef)\n\t\tfatalIf(\"failed to build manifest URL\", err)\n\n\t\tcursorDigest, foundCursor := fetchDigest(client, cursorManifestURL, request.Source.Repository, tag)\n\n\t\tif foundCursor && cursorDigest != latestDigest {\n\t\t\tresponse = append(response, Version{cursorDigest})\n\t\t}\n\t}\n\n\tif foundLatest {\n\t\tresponse = append(response, Version{latestDigest})\n\t}\n\n\tjson.NewEncoder(os.Stdout).Encode(response)\n}\n\nfunc fetchDigest(client *http.Client, manifestURL, repository, tag string) (string, bool) {\n\tmanifestRequest, err := http.NewRequest(\"HEAD\", manifestURL, nil)\n\tfatalIf(\"failed to build manifest request\", err)\n\tmanifestRequest.Header.Add(\"Accept\", \"application\/vnd.docker.distribution.manifest.v2+json\")\n\tmanifestRequest.Header.Add(\"Accept\", \"application\/json\")\n\n\tmanifestResponse, err := client.Do(manifestRequest)\n\tfatalIf(\"failed to fetch manifest\", err)\n\n\tdefer manifestResponse.Body.Close()\n\tif manifestResponse.StatusCode == http.StatusNotFound {\n\t\treturn \"\", false\n\t}\n\n\tif manifestResponse.StatusCode != http.StatusOK {\n\t\tfatal(fmt.Sprintf(\"failed to fetch digest for image '%s:%s': %s\\ndoes the image exist?\", repository, tag, manifestResponse.Status))\n\t}\n\n\tdigest := manifestResponse.Header.Get(\"Docker-Content-Digest\")\n\tif digest == \"\" {\n\t\tfatal(\"no digest header returned\")\n\t}\n\n\treturn digest, true\n}\n\nfunc makeTransport(logger lager.Logger, request CheckRequest, registryHost string, repository string) (http.RoundTripper, string) {\n\t\/\/ for non self-signed registries, caCertPool must be nil in order to use the system certs\n\tvar caCertPool *x509.CertPool\n\tif len(request.Source.DomainCerts) > 0 {\n\t\tcaCertPool = x509.NewCertPool()\n\t\tfor _, domainCert := range request.Source.DomainCerts {\n\t\t\tok := caCertPool.AppendCertsFromPEM([]byte(domainCert.Cert))\n\t\t\tif !ok {\n\t\t\t\tfatal(fmt.Sprintf(\"failed to parse CA certificate for \\\"%s\\\"\", domainCert.Domain))\n\t\t\t}\n\t\t}\n\t}\n\n\tbaseTransport := &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: 30 * time.Second,\n\t\t\tKeepAlive: 30 * time.Second,\n\t\t\tDualStack: true,\n\t\t}).Dial,\n\t\tDisableKeepAlives: true,\n\t\tTLSClientConfig: &tls.Config{RootCAs: caCertPool},\n\t}\n\n\tvar insecure bool\n\tfor _, hostOrCIDR := range request.Source.InsecureRegistries {\n\t\tif isInsecure(hostOrCIDR, registryHost) {\n\t\t\tinsecure = true\n\t\t}\n\t}\n\n\tif insecure {\n\t\tbaseTransport.TLSClientConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: true,\n\t\t}\n\t}\n\n\tif len(request.Source.ClientCerts) > 0 {\n\t\tbaseTransport.TLSClientConfig = &tls.Config{\n\t\t\tRootCAs: caCertPool,\n\t\t\tCertificates: setClientCert(registryHost, request.Source.ClientCerts),\n\t\t}\n\t}\n\n\tauthTransport := transport.NewTransport(baseTransport)\n\n\tpingClient := &http.Client{\n\t\tTransport: retryRoundTripper(logger, authTransport),\n\t\tTimeout: 1 * time.Minute,\n\t}\n\n\tchallengeManager := auth.NewSimpleChallengeManager()\n\n\tvar registryURL string\n\n\tvar pingResp *http.Response\n\tvar pingErr error\n\tvar pingErrs error\n\tfor _, scheme := range []string{\"https\", \"http\"} {\n\t\tregistryURL = scheme + \":\/\/\" + registryHost\n\n\t\treq, err := http.NewRequest(\"GET\", registryURL+\"\/v2\/\", nil)\n\t\tfatalIf(\"failed to create ping request\", err)\n\n\t\tpingResp, pingErr = pingClient.Do(req)\n\t\tif pingErr == nil {\n\t\t\t\/\/ clear out previous attempts' failures\n\t\t\tpingErrs = nil\n\t\t\tbreak\n\t\t}\n\n\t\tpingErrs = multierror.Append(\n\t\t\tpingErrs,\n\t\t\tfmt.Errorf(\"ping %s: %s\", scheme, pingErr),\n\t\t)\n\t}\n\tfatalIf(\"failed to ping registry\", pingErrs)\n\n\tdefer pingResp.Body.Close()\n\n\terr := challengeManager.AddResponse(pingResp)\n\tfatalIf(\"failed to add response to challenge manager\", err)\n\n\tcredentialStore := dumbCredentialStore{request.Source.Username, request.Source.Password}\n\ttokenHandler := auth.NewTokenHandler(authTransport, credentialStore, repository, \"pull\")\n\tbasicHandler := auth.NewBasicHandler(credentialStore)\n\tauthorizer := auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)\n\n\treturn transport.NewTransport(baseTransport, authorizer), registryURL\n}\n\ntype dumbCredentialStore struct {\n\tusername string\n\tpassword string\n}\n\nfunc (dcs dumbCredentialStore) Basic(*url.URL) (string, string) {\n\treturn dcs.username, dcs.password\n}\n\nfunc (dumbCredentialStore) RefreshToken(u *url.URL, service string) string {\n\treturn \"\"\n}\n\nfunc (dumbCredentialStore) SetRefreshToken(u *url.URL, service, token string) {\n}\n\nfunc fatalIf(doing string, err error) {\n\tif err != nil {\n\t\tfatal(doing + \": \" + err.Error())\n\t}\n}\n\nfunc fatal(message string) {\n\tprintln(message)\n\tos.Exit(1)\n}\n\nconst officialRegistry = \"registry-1.docker.io\"\n\nfunc parseRepository(repository string) (string, string) {\n\tsegs := strings.Split(repository, \"\/\")\n\n\tif len(segs) > 1 && (strings.Contains(segs[0], \":\") || strings.Contains(segs[0], \".\")) {\n\t\t\/\/ In a private regsitry pretty much anything is valid.\n\t\treturn segs[0], strings.Join(segs[1:], \"\/\")\n\t}\n\tswitch len(segs) {\n\tcase 3:\n\t\treturn segs[0], segs[1] + \"\/\" + segs[2]\n\tcase 2:\n\t\treturn officialRegistry, segs[0] + \"\/\" + segs[1]\n\tcase 1:\n\t\treturn officialRegistry, \"library\/\" + segs[0]\n\t}\n\n\tfatal(\"malformed repository url\")\n\tpanic(\"unreachable\")\n}\n\nfunc isInsecure(hostOrCIDR string, hostPort string) bool {\n\thost, _, err := net.SplitHostPort(hostPort)\n\tif err != nil {\n\t\treturn hostOrCIDR == hostPort\n\t}\n\n\t_, cidr, err := net.ParseCIDR(hostOrCIDR)\n\tif err == nil {\n\t\tip := net.ParseIP(host)\n\t\tif ip != nil {\n\t\t\treturn cidr.Contains(ip)\n\t\t}\n\t}\n\n\treturn hostOrCIDR == hostPort\n}\n\nfunc retryRoundTripper(logger lager.Logger, rt http.RoundTripper) http.RoundTripper {\n\treturn &retryhttp.RetryRoundTripper{\n\t\tLogger: logger,\n\t\tSleeper: clock.NewClock(),\n\t\tRetryPolicy: retryhttp.ExponentialRetryPolicy{\n\t\t\tTimeout: 5 * time.Minute,\n\t\t},\n\t\tRoundTripper: rt,\n\t}\n}\n\nfunc setClientCert(registry string, list []ClientCertKey) []tls.Certificate {\n\tvar clientCert []tls.Certificate\n\tfor _, r := range list {\n\t\tif r.Domain == registry {\n\t\t\tcertKey, err := tls.X509KeyPair([]byte(r.Cert), []byte(r.Key))\n\t\t\tif err != nil {\n\t\t\t\tfatal(fmt.Sprintf(\"failed to parse client certificate and\/or key for \\\"%s\\\"\", r.Domain))\n\t\t\t}\n\t\t\tclientCert = append(clientCert, certKey)\n\t\t}\n\t}\n\treturn clientCert\n}\n<commit_msg>check: use GET if if HEAD doesnt return digest<commit_after>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cihub\/seelog\"\n\t\"github.com\/pivotal-golang\/lager\"\n\n\tecr \"github.com\/awslabs\/amazon-ecr-credential-helper\/ecr-login\"\n\tecrapi \"github.com\/awslabs\/amazon-ecr-credential-helper\/ecr-login\/api\"\n\t\"github.com\/concourse\/retryhttp\"\n\t\"github.com\/docker\/distribution\"\n\t\"github.com\/docker\/distribution\/digest\"\n\t_ \"github.com\/docker\/distribution\/manifest\/schema1\"\n\t_ \"github.com\/docker\/distribution\/manifest\/schema2\"\n\t\"github.com\/docker\/distribution\/reference\"\n\tv2 \"github.com\/docker\/distribution\/registry\/api\/v2\"\n\t\"github.com\/docker\/distribution\/registry\/client\/auth\"\n\t\"github.com\/docker\/distribution\/registry\/client\/transport\"\n\t\"github.com\/hashicorp\/go-multierror\"\n\t\"github.com\/pivotal-golang\/clock\"\n)\n\nfunc main() {\n\tlogger := lager.NewLogger(\"http\")\n\trECRRepo, err := regexp.Compile(`[a-zA-Z0-9][a-zA-Z0-9_-]*\\.dkr\\.ecr\\.[a-zA-Z0-9][a-zA-Z0-9_-]*\\.amazonaws\\.com(\\.cn)?[^ ]*`)\n\tfatalIf(\"failed to compile ECR regex\", err)\n\n\tvar request CheckRequest\n\terr = json.NewDecoder(os.Stdin).Decode(&request)\n\tfatalIf(\"failed to read request\", err)\n\n\tos.Setenv(\"AWS_ACCESS_KEY_ID\", request.Source.AWSAccessKeyID)\n\tos.Setenv(\"AWS_SECRET_ACCESS_KEY\", request.Source.AWSSecretAccessKey)\n\tos.Setenv(\"AWS_SESSION_TOKEN\", request.Source.AWSSessionToken)\n\n\t\/\/ silence benign ecr-login errors\/warnings\n\tseelog.UseLogger(seelog.Disabled)\n\n\tif rECRRepo.MatchString(request.Source.Repository) == true {\n\t\tecrUser, ecrPass, err := ecr.ECRHelper{\n\t\t\tClientFactory: ecrapi.DefaultClientFactory{},\n\t\t}.Get(request.Source.Repository)\n\t\tfatalIf(\"failed to get ECR credentials\", err)\n\t\trequest.Source.Username = ecrUser\n\t\trequest.Source.Password = ecrPass\n\t}\n\n\tregistryHost, repo := parseRepository(request.Source.Repository)\n\n\tif len(request.Source.RegistryMirror) > 0 {\n\t\tregistryMirrorUrl, err := url.Parse(request.Source.RegistryMirror)\n\t\tfatalIf(\"failed to parse registry mirror URL\", err)\n\t\tregistryHost = registryMirrorUrl.Host\n\t}\n\n\ttag := string(request.Source.Tag)\n\tif tag == \"\" {\n\t\ttag = \"latest\"\n\t}\n\n\ttransport, registryURL := makeTransport(logger, request, registryHost, repo)\n\n\tclient := &http.Client{\n\t\tTransport: retryRoundTripper(logger, transport),\n\t}\n\n\tub, err := v2.NewURLBuilderFromString(registryURL, false)\n\tfatalIf(\"failed to construct registry URL builder\", err)\n\n\tnamedRef, err := reference.WithName(repo)\n\tfatalIf(\"failed to construct named reference\", err)\n\n\tvar response CheckResponse\n\n\ttaggedRef, err := reference.WithTag(namedRef, tag)\n\tfatalIf(\"failed to construct tagged reference\", err)\n\n\tlatestManifestURL, err := ub.BuildManifestURL(taggedRef)\n\tfatalIf(\"failed to build latest manifest URL\", err)\n\n\tlatestDigest, foundLatest := fetchDigest(client, latestManifestURL, request.Source.Repository, tag)\n\n\tif request.Version.Digest != \"\" {\n\t\tdigestRef, err := reference.WithDigest(namedRef, digest.Digest(request.Version.Digest))\n\t\tfatalIf(\"failed to build cursor manifest URL\", err)\n\n\t\tcursorManifestURL, err := ub.BuildManifestURL(digestRef)\n\t\tfatalIf(\"failed to build manifest URL\", err)\n\n\t\tcursorDigest, foundCursor := fetchDigest(client, cursorManifestURL, request.Source.Repository, tag)\n\n\t\tif foundCursor && cursorDigest != latestDigest {\n\t\t\tresponse = append(response, Version{cursorDigest})\n\t\t}\n\t}\n\n\tif foundLatest {\n\t\tresponse = append(response, Version{latestDigest})\n\t}\n\n\tjson.NewEncoder(os.Stdout).Encode(response)\n}\n\nfunc headDigest(client *http.Client, manifestURL, repository, tag string) (string, bool) {\n\tmanifestRequest, err := http.NewRequest(\"HEAD\", manifestURL, nil)\n\tfatalIf(\"failed to build manifest request\", err)\n\tmanifestRequest.Header.Add(\"Accept\", \"application\/vnd.docker.distribution.manifest.v2+json\")\n\tmanifestRequest.Header.Add(\"Accept\", \"application\/json\")\n\n\tmanifestResponse, err := client.Do(manifestRequest)\n\tfatalIf(\"failed to fetch manifest\", err)\n\n\tdefer manifestResponse.Body.Close()\n\n\tif manifestResponse.StatusCode == http.StatusNotFound {\n\t\treturn \"\", false\n\t}\n\n\tif manifestResponse.StatusCode != http.StatusOK {\n\t\tfatal(fmt.Sprintf(\"failed to fetch digest for image '%s:%s': %s\\ndoes the image exist?\", repository, tag, manifestResponse.Status))\n\t}\n\n\tdigest := manifestResponse.Header.Get(\"Docker-Content-Digest\")\n\tif digest == \"\" {\n\t\treturn fetchDigest(client, manifestURL, repository, tag)\n\t}\n\n\treturn digest, true\n}\n\nfunc fetchDigest(client *http.Client, manifestURL, repository, tag string) (string, bool) {\n\tmanifestRequest, err := http.NewRequest(\"GET\", manifestURL, nil)\n\tfatalIf(\"failed to build manifest request\", err)\n\tmanifestRequest.Header.Add(\"Accept\", \"application\/vnd.docker.distribution.manifest.v2+json\")\n\tmanifestRequest.Header.Add(\"Accept\", \"application\/json\")\n\n\tmanifestResponse, err := client.Do(manifestRequest)\n\tfatalIf(\"failed to fetch manifest\", err)\n\n\tdefer manifestResponse.Body.Close()\n\n\tif manifestResponse.StatusCode == http.StatusNotFound {\n\t\treturn \"\", false\n\t}\n\n\tif manifestResponse.StatusCode != http.StatusOK {\n\t\tfatal(fmt.Sprintf(\"failed to fetch digest for image '%s:%s': %s\\ndoes the image exist?\", repository, tag, manifestResponse.Status))\n\t}\n\n\tctHeader := manifestResponse.Header.Get(\"Content-Type\")\n\n\tbytes, err := ioutil.ReadAll(manifestResponse.Body)\n\tfatalIf(\"failed to read response body\", err)\n\n\t_, desc, err := distribution.UnmarshalManifest(ctHeader, bytes)\n\tfatalIf(\"failed to unmarshal manifest\", err)\n\n\treturn string(desc.Digest), true\n}\n\nfunc makeTransport(logger lager.Logger, request CheckRequest, registryHost string, repository string) (http.RoundTripper, string) {\n\t\/\/ for non self-signed registries, caCertPool must be nil in order to use the system certs\n\tvar caCertPool *x509.CertPool\n\tif len(request.Source.DomainCerts) > 0 {\n\t\tcaCertPool = x509.NewCertPool()\n\t\tfor _, domainCert := range request.Source.DomainCerts {\n\t\t\tok := caCertPool.AppendCertsFromPEM([]byte(domainCert.Cert))\n\t\t\tif !ok {\n\t\t\t\tfatal(fmt.Sprintf(\"failed to parse CA certificate for \\\"%s\\\"\", domainCert.Domain))\n\t\t\t}\n\t\t}\n\t}\n\n\tbaseTransport := &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: 30 * time.Second,\n\t\t\tKeepAlive: 30 * time.Second,\n\t\t\tDualStack: true,\n\t\t}).Dial,\n\t\tDisableKeepAlives: true,\n\t\tTLSClientConfig: &tls.Config{RootCAs: caCertPool},\n\t}\n\n\tvar insecure bool\n\tfor _, hostOrCIDR := range request.Source.InsecureRegistries {\n\t\tif isInsecure(hostOrCIDR, registryHost) {\n\t\t\tinsecure = true\n\t\t}\n\t}\n\n\tif insecure {\n\t\tbaseTransport.TLSClientConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: true,\n\t\t}\n\t}\n\n\tif len(request.Source.ClientCerts) > 0 {\n\t\tbaseTransport.TLSClientConfig = &tls.Config{\n\t\t\tRootCAs: caCertPool,\n\t\t\tCertificates: setClientCert(registryHost, request.Source.ClientCerts),\n\t\t}\n\t}\n\n\tauthTransport := transport.NewTransport(baseTransport)\n\n\tpingClient := &http.Client{\n\t\tTransport: retryRoundTripper(logger, authTransport),\n\t\tTimeout: 1 * time.Minute,\n\t}\n\n\tchallengeManager := auth.NewSimpleChallengeManager()\n\n\tvar registryURL string\n\n\tvar pingResp *http.Response\n\tvar pingErr error\n\tvar pingErrs error\n\tfor _, scheme := range []string{\"https\", \"http\"} {\n\t\tregistryURL = scheme + \":\/\/\" + registryHost\n\n\t\treq, err := http.NewRequest(\"GET\", registryURL+\"\/v2\/\", nil)\n\t\tfatalIf(\"failed to create ping request\", err)\n\n\t\tpingResp, pingErr = pingClient.Do(req)\n\t\tif pingErr == nil {\n\t\t\t\/\/ clear out previous attempts' failures\n\t\t\tpingErrs = nil\n\t\t\tbreak\n\t\t}\n\n\t\tpingErrs = multierror.Append(\n\t\t\tpingErrs,\n\t\t\tfmt.Errorf(\"ping %s: %s\", scheme, pingErr),\n\t\t)\n\t}\n\tfatalIf(\"failed to ping registry\", pingErrs)\n\n\tdefer pingResp.Body.Close()\n\n\terr := challengeManager.AddResponse(pingResp)\n\tfatalIf(\"failed to add response to challenge manager\", err)\n\n\tcredentialStore := dumbCredentialStore{request.Source.Username, request.Source.Password}\n\ttokenHandler := auth.NewTokenHandler(authTransport, credentialStore, repository, \"pull\")\n\tbasicHandler := auth.NewBasicHandler(credentialStore)\n\tauthorizer := auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)\n\n\treturn transport.NewTransport(baseTransport, authorizer), registryURL\n}\n\ntype dumbCredentialStore struct {\n\tusername string\n\tpassword string\n}\n\nfunc (dcs dumbCredentialStore) Basic(*url.URL) (string, string) {\n\treturn dcs.username, dcs.password\n}\n\nfunc (dumbCredentialStore) RefreshToken(u *url.URL, service string) string {\n\treturn \"\"\n}\n\nfunc (dumbCredentialStore) SetRefreshToken(u *url.URL, service, token string) {\n}\n\nfunc fatalIf(doing string, err error) {\n\tif err != nil {\n\t\tfatal(doing + \": \" + err.Error())\n\t}\n}\n\nfunc fatal(message string) {\n\tprintln(message)\n\tos.Exit(1)\n}\n\nconst officialRegistry = \"registry-1.docker.io\"\n\nfunc parseRepository(repository string) (string, string) {\n\tsegs := strings.Split(repository, \"\/\")\n\n\tif len(segs) > 1 && (strings.Contains(segs[0], \":\") || strings.Contains(segs[0], \".\")) {\n\t\t\/\/ In a private regsitry pretty much anything is valid.\n\t\treturn segs[0], strings.Join(segs[1:], \"\/\")\n\t}\n\tswitch len(segs) {\n\tcase 3:\n\t\treturn segs[0], segs[1] + \"\/\" + segs[2]\n\tcase 2:\n\t\treturn officialRegistry, segs[0] + \"\/\" + segs[1]\n\tcase 1:\n\t\treturn officialRegistry, \"library\/\" + segs[0]\n\t}\n\n\tfatal(\"malformed repository url\")\n\tpanic(\"unreachable\")\n}\n\nfunc isInsecure(hostOrCIDR string, hostPort string) bool {\n\thost, _, err := net.SplitHostPort(hostPort)\n\tif err != nil {\n\t\treturn hostOrCIDR == hostPort\n\t}\n\n\t_, cidr, err := net.ParseCIDR(hostOrCIDR)\n\tif err == nil {\n\t\tip := net.ParseIP(host)\n\t\tif ip != nil {\n\t\t\treturn cidr.Contains(ip)\n\t\t}\n\t}\n\n\treturn hostOrCIDR == hostPort\n}\n\nfunc retryRoundTripper(logger lager.Logger, rt http.RoundTripper) http.RoundTripper {\n\treturn &retryhttp.RetryRoundTripper{\n\t\tLogger: logger,\n\t\tSleeper: clock.NewClock(),\n\t\tRetryPolicy: retryhttp.ExponentialRetryPolicy{\n\t\t\tTimeout: 5 * time.Minute,\n\t\t},\n\t\tRoundTripper: rt,\n\t}\n}\n\nfunc setClientCert(registry string, list []ClientCertKey) []tls.Certificate {\n\tvar clientCert []tls.Certificate\n\tfor _, r := range list {\n\t\tif r.Domain == registry {\n\t\t\tcertKey, err := tls.X509KeyPair([]byte(r.Cert), []byte(r.Key))\n\t\t\tif err != nil {\n\t\t\t\tfatal(fmt.Sprintf(\"failed to parse client certificate and\/or key for \\\"%s\\\"\", r.Domain))\n\t\t\t}\n\t\t\tclientCert = append(clientCert, certKey)\n\t\t}\n\t}\n\treturn clientCert\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/KyleBanks\/depth\"\n)\n\nconst (\n\toutputPadding = \" \"\n\toutputPrefix = \"├ \"\n\toutputPrefixLast = \"└ \"\n)\n\nvar (\n\toutputJSON bool\n)\n\nfunc main() {\n\tvar t depth.Tree\n\n\tflag.BoolVar(&t.ResolveInternal, \"internal\", false, \"If set, resolves dependencies of internal (stdlib) packages.\")\n\tflag.BoolVar(&t.ResolveTest, \"test\", false, \"If set, resolves dependencies used for testing.\")\n\tflag.IntVar(&t.MaxDepth, \"max\", 0, \"Sets the maximum depth of dependencies to resolve.\")\n\tflag.BoolVar(&outputJSON, \"json\", false, \"If set, outputs the depencies in JSON format.\")\n\tflag.Parse()\n\n\tfor _, arg := range flag.Args() {\n\t\terr := t.Resolve(arg)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tif outputJSON {\n\t\t\twritePkgJSON(os.Stdout, *t.Root)\n\t\t\tcontinue\n\t\t}\n\n\t\twritePkg(os.Stdout, *t.Root, 0, false)\n\t}\n}\n\nfunc writePkgJSON(w io.Writer, p depth.Pkg) {\n\te := json.NewEncoder(w)\n\te.SetIndent(\"\", \" \")\n\te.Encode(p)\n}\n\n\/\/ writePkg recursively prints a Pkg and its dependencies to the Writer provided.\nfunc writePkg(w io.Writer, p depth.Pkg, indent int, isLast bool) {\n\tvar prefix string\n\tif indent > 0 {\n\t\tprefix = outputPrefix\n\n\t\tif isLast {\n\t\t\tprefix = outputPrefixLast\n\t\t}\n\t}\n\n\tout := fmt.Sprintf(\"%v%v%v\\n\", strings.Repeat(outputPadding, indent), prefix, p.Name)\n\tw.Write([]byte(out))\n\n\tfor idx, d := range p.Deps {\n\t\twritePkg(w, d, indent+1, idx == len(p.Deps)-1)\n\t}\n}\n<commit_msg>Minor depth cmd code cleanup<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/KyleBanks\/depth\"\n)\n\nconst (\n\toutputPadding = \" \"\n\toutputPrefix = \"├ \"\n\toutputPrefixLast = \"└ \"\n)\n\nfunc main() {\n\tvar t depth.Tree\n\tvar outputJSON bool\n\n\tflag.BoolVar(&t.ResolveInternal, \"internal\", false, \"If set, resolves dependencies of internal (stdlib) packages.\")\n\tflag.BoolVar(&t.ResolveTest, \"test\", false, \"If set, resolves dependencies used for testing.\")\n\tflag.IntVar(&t.MaxDepth, \"max\", 0, \"Sets the maximum depth of dependencies to resolve.\")\n\tflag.BoolVar(&outputJSON, \"json\", false, \"If set, outputs the depencies in JSON format.\")\n\tflag.Parse()\n\n\tfor _, arg := range flag.Args() {\n\t\terr := t.Resolve(arg)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tif outputJSON {\n\t\t\twritePkgJSON(os.Stdout, *t.Root)\n\t\t\tcontinue\n\t\t}\n\n\t\twritePkg(os.Stdout, *t.Root, 0, false)\n\t}\n}\n\n\/\/ writePkgJSON writes the full Pkg as JSON to the provided Writer.\nfunc writePkgJSON(w io.Writer, p depth.Pkg) {\n\te := json.NewEncoder(w)\n\te.SetIndent(\"\", \" \")\n\te.Encode(p)\n}\n\n\/\/ writePkg recursively prints a Pkg and its dependencies to the Writer provided.\nfunc writePkg(w io.Writer, p depth.Pkg, indent int, isLast bool) {\n\tvar prefix string\n\tif indent > 0 {\n\t\tprefix = outputPrefix\n\n\t\tif isLast {\n\t\t\tprefix = outputPrefixLast\n\t\t}\n\t}\n\n\tout := fmt.Sprintf(\"%v%v%v\\n\", strings.Repeat(outputPadding, indent), prefix, p.Name)\n\tw.Write([]byte(out))\n\n\tfor idx, d := range p.Deps {\n\t\twritePkg(w, d, indent+1, idx == len(p.Deps)-1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"os\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/bryanl\/docli\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/digitalocean\/godo\"\n\t\"golang.org\/x\/oauth2\"\n)\n\ntype tokenSource struct {\n\tAccessToken string\n}\n\nfunc (t *tokenSource) Token() (*oauth2.Token, error) {\n\treturn &oauth2.Token{\n\t\tAccessToken: t.AccessToken,\n\t}, nil\n}\n\nfunc init() {\n\tlogrus.SetOutput(os.Stderr)\n\tlogrus.SetLevel(logrus.WarnLevel)\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"docli\"\n\tapp.Usage = \"DigitalOcean API CLI\"\n\tapp.Version = \"0.1.0\"\n\tapp.Flags = []cli.Flag{\n\t\ttokenFlag(),\n\t\tdebugFlag(),\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\taccountCommands(),\n\t\tactionCommands(),\n\t\tdomainCommands(),\n\t\tdropletCommands(),\n\t\tdropletActionCommands(),\n\t\timageActionCommands(),\n\t\timageCommands(),\n\t\tregionCommands(),\n\t\tsizeCommands(),\n\t\tsshKeyCommands(),\n\t}\n\n\tapp.RunAndExitOnError()\n}\n\nfunc tokenFlag() cli.Flag {\n\treturn cli.StringFlag{\n\t\tName: \"token\",\n\t\tUsage: \"DigitalOcean API V2 Token\",\n\t\tEnvVar: \"DO_TOKEN\",\n\t}\n}\n\nfunc debugFlag() cli.Flag {\n\treturn cli.BoolFlag{\n\t\tName: \"debug\",\n\t\tUsage: \"Debug\",\n\t}\n}\n\nfunc toJSON(item interface{}) (string, error) {\n\tb, err := json.MarshalIndent(item, \"\", \" \")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(b), nil\n}\n\nfunc newClient(c *cli.Context) *godo.Client {\n\tpat := c.GlobalString(\"token\")\n\ttokenSource := &tokenSource{\n\t\tAccessToken: pat,\n\t}\n\n\toauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource)\n\treturn godo.NewClient(oauthClient)\n}\n\nfunc loadOpts(c *cli.Context) *docli.Opts {\n\treturn &docli.Opts{\n\t\tDebug: c.GlobalBool(\"debug\"),\n\t}\n}\n<commit_msg>removing unused code<commit_after>package main\n\nimport (\n\t\"os\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"golang.org\/x\/oauth2\"\n)\n\ntype tokenSource struct {\n\tAccessToken string\n}\n\nfunc (t *tokenSource) Token() (*oauth2.Token, error) {\n\treturn &oauth2.Token{\n\t\tAccessToken: t.AccessToken,\n\t}, nil\n}\n\nfunc init() {\n\tlogrus.SetOutput(os.Stderr)\n\tlogrus.SetLevel(logrus.WarnLevel)\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"docli\"\n\tapp.Usage = \"DigitalOcean API CLI\"\n\tapp.Version = \"0.1.0\"\n\tapp.Flags = []cli.Flag{\n\t\ttokenFlag(),\n\t\tdebugFlag(),\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\taccountCommands(),\n\t\tactionCommands(),\n\t\tdomainCommands(),\n\t\tdropletCommands(),\n\t\tdropletActionCommands(),\n\t\timageActionCommands(),\n\t\timageCommands(),\n\t\tregionCommands(),\n\t\tsizeCommands(),\n\t\tsshKeyCommands(),\n\t}\n\n\tapp.RunAndExitOnError()\n}\n\nfunc tokenFlag() cli.Flag {\n\treturn cli.StringFlag{\n\t\tName: \"token\",\n\t\tUsage: \"DigitalOcean API V2 Token\",\n\t\tEnvVar: \"DIGITAL_OCEAN_TOKEN\",\n\t}\n}\n\nfunc debugFlag() cli.Flag {\n\treturn cli.BoolFlag{\n\t\tName: \"debug\",\n\t\tUsage: \"Debug\",\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Andrew O'Neill\n\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/foolusion\/choices\"\n\t\"github.com\/foolusion\/choices\/elwin\"\n\t\"google.golang.org\/grpc\"\n)\n\nfunc init() {\n\thttp.HandleFunc(\"\/\", rootHandler)\n}\n\nfunc main() {\n\tlog.Println(\"Starting elwin...\")\n\tt1 := choices.NewNamespace(\"t1\", \"test\")\n\tt1.Addexp(\n\t\t\"uniform\",\n\t\t[]choices.Param{{Name: \"a\", Value: &choices.Uniform{Choices: []string{\"b\", \"c\"}}}},\n\t\t128,\n\t)\n\tif err := choices.Addns(t1); err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\n\tt2 := choices.NewNamespace(\"t2\", \"test\")\n\tt2.Addexp(\n\t\t\"weighted\",\n\t\t[]choices.Param{{Name: \"b\", Value: &choices.Weighted{Choices: []string{\"on\", \"off\"}, Weights: []float64{2, 1}}}},\n\t\t128,\n\t)\n\tif err := choices.Addns(t2); err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\n\tt3 := choices.NewNamespace(\"t3\", \"test\")\n\tt3.Addexp(\n\t\t\"halfSegments\",\n\t\t[]choices.Param{{Name: \"b\", Value: &choices.Uniform{Choices: []string{\"on\"}}}},\n\t\t64,\n\t)\n\tif err := choices.Addns(t3); err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\n\tt4 := choices.NewNamespace(\"t4\", \"test\")\n\tt4.Addexp(\n\t\t\"multi\",\n\t\t[]choices.Param{\n\t\t\t{Name: \"a\", Value: &choices.Uniform{Choices: []string{\"on\", \"off\"}}},\n\t\t\t{Name: \"b\", Value: &choices.Weighted{Choices: []string{\"up\", \"down\"}, Weights: []float64{1, 2}}},\n\t\t},\n\t\t128,\n\t)\n\tif err := choices.Addns(t4); err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\tgo func() {\n\t\tlog.Fatal(http.ListenAndServe(\":8081\", nil))\n\t}()\n\n\tlis, err := net.Listen(\"tcp\", \":8080\")\n\tif err != nil {\n\t\tlog.Fatalf(\"main: failed to listen: %v\", err)\n\t}\n\tgrpcServer := grpc.NewServer()\n\telwin.RegisterElwinServer(grpcServer, &elwinServer{})\n\tgrpcServer.Serve(lis)\n}\n\ntype elwinServer struct {\n}\n\nfunc (e *elwinServer) GetNamespaces(ctx context.Context, id *elwin.Identifier) (*elwin.Experiments, error) {\n\tif id == nil {\n\t\treturn nil, fmt.Errorf(\"GetNamespaces: no Identifier recieved\")\n\t}\n\n\tresp, err := choices.Namespaces(id.TeamID, id.UserID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error resolving namespaces for %s, %s: %v\", id.TeamID, id.UserID, err)\n\t}\n\treturn resp, nil\n}\n\nfunc rootHandler(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\tresp, err := choices.Namespaces(\"test\", r.Form.Get(\"userID\"))\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"%v\", err)\n\t\treturn\n\t}\n\tenc := json.NewEncoder(w)\n\tenc.Encode(*resp)\n}\n<commit_msg>elwin: update server for ExperimentResponse<commit_after>\/\/ Copyright 2016 Andrew O'Neill\n\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/foolusion\/choices\"\n\t\"github.com\/foolusion\/choices\/elwin\"\n\t\"google.golang.org\/grpc\"\n)\n\nfunc init() {\n\thttp.HandleFunc(\"\/\", rootHandler)\n}\n\nfunc main() {\n\tlog.Println(\"Starting elwin...\")\n\tt1 := choices.NewNamespace(\"t1\", \"test\")\n\tt1.Addexp(\n\t\t\"uniform\",\n\t\t[]choices.Param{{Name: \"a\", Value: &choices.Uniform{Choices: []string{\"b\", \"c\"}}}},\n\t\t128,\n\t)\n\tif err := choices.Addns(t1); err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\n\tt2 := choices.NewNamespace(\"t2\", \"test\")\n\tt2.Addexp(\n\t\t\"weighted\",\n\t\t[]choices.Param{{Name: \"b\", Value: &choices.Weighted{Choices: []string{\"on\", \"off\"}, Weights: []float64{2, 1}}}},\n\t\t128,\n\t)\n\tif err := choices.Addns(t2); err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\n\tt3 := choices.NewNamespace(\"t3\", \"test\")\n\tt3.Addexp(\n\t\t\"halfSegments\",\n\t\t[]choices.Param{{Name: \"b\", Value: &choices.Uniform{Choices: []string{\"on\"}}}},\n\t\t64,\n\t)\n\tif err := choices.Addns(t3); err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\n\tt4 := choices.NewNamespace(\"t4\", \"test\")\n\tt4.Addexp(\n\t\t\"multi\",\n\t\t[]choices.Param{\n\t\t\t{Name: \"a\", Value: &choices.Uniform{Choices: []string{\"on\", \"off\"}}},\n\t\t\t{Name: \"b\", Value: &choices.Weighted{Choices: []string{\"up\", \"down\"}, Weights: []float64{1, 2}}},\n\t\t},\n\t\t128,\n\t)\n\tif err := choices.Addns(t4); err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\tgo func() {\n\t\tlog.Fatal(http.ListenAndServe(\":8081\", nil))\n\t}()\n\n\tlis, err := net.Listen(\"tcp\", \":8080\")\n\tif err != nil {\n\t\tlog.Fatalf(\"main: failed to listen: %v\", err)\n\t}\n\tgrpcServer := grpc.NewServer()\n\telwin.RegisterElwinServer(grpcServer, &elwinServer{})\n\tgrpcServer.Serve(lis)\n}\n\ntype elwinServer struct {\n}\n\nfunc (e *elwinServer) GetNamespaces(ctx context.Context, id *elwin.Identifier) (*elwin.Experiments, error) {\n\tif id == nil {\n\t\treturn nil, fmt.Errorf(\"GetNamespaces: no Identifier recieved\")\n\t}\n\n\tresp, err := choices.Namespaces(id.TeamID, id.UserID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error resolving namespaces for %s, %s: %v\", id.TeamID, id.UserID, err)\n\t}\n\n\texp := &elwin.Experiments{\n\t\tExperiments: make(map[string]*elwin.Experiment, len(resp)),\n\t}\n\n\tfor _, v := range resp {\n\t\texp.Experiments[v.Name] = &elwin.Experiment{\n\t\t\tParams: make([]*elwin.Param, len(v.Params)),\n\t\t}\n\n\t\tfor i, p := range v.Params {\n\t\t\texp.Experiments[v.Name].Params[i] = &elwin.Param{\n\t\t\t\tKey: p.Name,\n\t\t\t\tValue: p.Value,\n\t\t\t}\n\t\t}\n\t}\n\treturn exp, nil\n}\n\nfunc rootHandler(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\tresp, err := choices.Namespaces(\"test\", r.Form.Get(\"userID\"))\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"%v\", err)\n\t\treturn\n\t}\n\tenc := json.NewEncoder(w)\n\tenc.Encode(resp)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Volker Dobler. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n)\n\nvar cmdVersion = &Command{\n\tRunArgs: runVersion,\n\tUsage: \"version\",\n\tDescription: \"print version information\",\n\tFlag: flag.NewFlagSet(\"version\", flag.ContinueOnError),\n\tHelp: `\nVersion prints version information about ht.\n\t`,\n}\n\nvar (\n\tversion = \"4.3.0-beta\"\n)\n\nfunc runVersion(cmd *Command, _ []string) {\n\tfmt.Printf(\"ht version %s\\n\", version)\n}\n<commit_msg>all: bump version to 4.3.0<commit_after>\/\/ Copyright 2014 Volker Dobler. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n)\n\nvar cmdVersion = &Command{\n\tRunArgs: runVersion,\n\tUsage: \"version\",\n\tDescription: \"print version information\",\n\tFlag: flag.NewFlagSet(\"version\", flag.ContinueOnError),\n\tHelp: `\nVersion prints version information about ht.\n\t`,\n}\n\nvar (\n\tversion = \"4.3.0\"\n)\n\nfunc runVersion(cmd *Command, _ []string) {\n\tfmt.Printf(\"ht version %s\\n\", version)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tcmds \"github.com\/jbenet\/go-ipfs\/commands\"\n\tconfig \"github.com\/jbenet\/go-ipfs\/config\"\n\tcore \"github.com\/jbenet\/go-ipfs\/core\"\n\tci \"github.com\/jbenet\/go-ipfs\/crypto\"\n\timp \"github.com\/jbenet\/go-ipfs\/importer\"\n\tchunk \"github.com\/jbenet\/go-ipfs\/importer\/chunk\"\n\tpeer \"github.com\/jbenet\/go-ipfs\/peer\"\n\tu \"github.com\/jbenet\/go-ipfs\/util\"\n)\n\nvar initCmd = &cmds.Command{\n\tHelptext: cmds.HelpText{\n\t\tTagline: \"Initializes IPFS config file\",\n\t\tShortDescription: \"Initializes IPFS configuration files and generates a new keypair.\",\n\t},\n\n\tOptions: []cmds.Option{\n\t\tcmds.IntOption(\"bits\", \"b\", \"Number of bits to use in the generated RSA private key (defaults to 4096)\"),\n\t\tcmds.StringOption(\"passphrase\", \"p\", \"Passphrase for encrypting the private key\"),\n\t\tcmds.BoolOption(\"force\", \"f\", \"Overwrite existing config (if it exists)\"),\n\t\tcmds.StringOption(\"datastore\", \"d\", \"Location for the IPFS data store\"),\n\t},\n\tRun: func(req cmds.Request) (interface{}, error) {\n\n\t\tdspathOverride, _, err := req.Option(\"d\").String() \/\/ if !found it's okay. Let == \"\"\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tforce, _, err := req.Option(\"f\").Bool() \/\/ if !found, it's okay force == false\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tnBitsForKeypair, bitsOptFound, err := req.Option(\"b\").Int()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !bitsOptFound {\n\t\t\tnBitsForKeypair = 4096\n\t\t}\n\n\t\treturn nil, doInit(req.Context().ConfigRoot, dspathOverride, force, nBitsForKeypair)\n\t},\n}\n\n\/\/ TODO add default welcome hash: eaa68bedae247ed1e5bd0eb4385a3c0959b976e4\n\/\/ NB: if dspath is not provided, it will be retrieved from the config\nfunc doInit(configRoot string, dspathOverride string, force bool, nBitsForKeypair int) error {\n\n\tu.POut(\"initializing ipfs node at %s\\n\", configRoot)\n\n\tconfigFilename, err := config.Filename(configRoot)\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't get home directory path\")\n\t}\n\n\tfi, err := os.Lstat(configFilename)\n\tif fi != nil || (err != nil && !os.IsNotExist(err)) {\n\t\tif !force {\n\t\t\t\/\/ TODO multi-line string\n\t\t\treturn errors.New(\"ipfs configuration file already exists!\\nReinitializing would overwrite your keys.\\n(use -f to force overwrite)\")\n\t\t}\n\t}\n\n\tds, err := datastoreConfig(dspathOverride)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tidentity, err := identityConfig(nBitsForKeypair)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconf := config.Config{\n\n\t\t\/\/ setup the node addresses.\n\t\tAddresses: config.Addresses{\n\t\t\tSwarm: \"\/ip4\/0.0.0.0\/tcp\/4001\",\n\t\t\tAPI: \"\/ip4\/127.0.0.1\/tcp\/5001\",\n\t\t},\n\n\t\tBootstrap: []*config.BootstrapPeer{\n\t\t\t&config.BootstrapPeer{ \/\/ Use these hardcoded bootstrap peers for now.\n\t\t\t\t\/\/ mars.i.ipfs.io\n\t\t\t\tPeerID: \"QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ\",\n\t\t\t\tAddress: \"\/ip4\/104.131.131.82\/tcp\/4001\",\n\t\t\t},\n\t\t},\n\n\t\tDatastore: ds,\n\n\t\tIdentity: identity,\n\n\t\t\/\/ setup the node mount points.\n\t\tMounts: config.Mounts{\n\t\t\tIPFS: \"\/ipfs\",\n\t\t\tIPNS: \"\/ipns\",\n\t\t},\n\n\t\t\/\/ tracking ipfs version used to generate the init folder and adding\n\t\t\/\/ update checker default setting.\n\t\tVersion: config.VersionDefaultValue(),\n\t}\n\n\terr = config.WriteConfigFile(configFilename, conf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnd, err := core.NewIpfsNode(&conf, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer nd.Close()\n\n\t\/\/ Set up default file\n\tmsg := `Hello and Welcome to IPFS!\nIf you're seeing this, that means that you have successfully\ninstalled ipfs and are now interfacing with the wonderful\nworld of DAGs and hashes!\n`\n\treader := bytes.NewBufferString(msg)\n\n\tdefnd, err := imp.BuildDagFromReader(reader, nd.DAG, nd.Pinning.GetManual(), chunk.DefaultSplitter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tk, _ := defnd.Key()\n\tfmt.Printf(\"Default file key: %s\\n\", k)\n\n\treturn nil\n}\n\nfunc datastoreConfig(dspath string) (config.Datastore, error) {\n\tds := config.Datastore{}\n\tif len(dspath) == 0 {\n\t\tvar err error\n\t\tdspath, err = config.DataStorePath(\"\")\n\t\tif err != nil {\n\t\t\treturn ds, err\n\t\t}\n\t}\n\tds.Path = dspath\n\tds.Type = \"leveldb\"\n\n\t\/\/ Construct the data store if missing\n\tif err := os.MkdirAll(dspath, os.ModePerm); err != nil {\n\t\treturn ds, err\n\t}\n\n\t\/\/ Check the directory is writeable\n\tif f, err := os.Create(filepath.Join(dspath, \"._check_writeable\")); err == nil {\n\t\tos.Remove(f.Name())\n\t} else {\n\t\treturn ds, errors.New(\"Datastore '\" + dspath + \"' is not writeable\")\n\t}\n\n\treturn ds, nil\n}\n\nfunc identityConfig(nbits int) (config.Identity, error) {\n\t\/\/ TODO guard higher up\n\tident := config.Identity{}\n\tif nbits < 1024 {\n\t\treturn ident, errors.New(\"Bitsize less than 1024 is considered unsafe.\")\n\t}\n\n\tfmt.Println(\"generating key pair...\")\n\tsk, pk, err := ci.GenerateKeyPair(ci.RSA, nbits)\n\tif err != nil {\n\t\treturn ident, err\n\t}\n\n\t\/\/ currently storing key unencrypted. in the future we need to encrypt it.\n\t\/\/ TODO(security)\n\tskbytes, err := sk.Bytes()\n\tif err != nil {\n\t\treturn ident, err\n\t}\n\tident.PrivKey = base64.StdEncoding.EncodeToString(skbytes)\n\n\tid, err := peer.IDFromPubKey(pk)\n\tif err != nil {\n\t\treturn ident, err\n\t}\n\tident.PeerID = id.Pretty()\n\n\treturn ident, nil\n}\n<commit_msg>cleaned up ipfs init<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tcmds \"github.com\/jbenet\/go-ipfs\/commands\"\n\tconfig \"github.com\/jbenet\/go-ipfs\/config\"\n\tcore \"github.com\/jbenet\/go-ipfs\/core\"\n\tci \"github.com\/jbenet\/go-ipfs\/crypto\"\n\timp \"github.com\/jbenet\/go-ipfs\/importer\"\n\tchunk \"github.com\/jbenet\/go-ipfs\/importer\/chunk\"\n\tpeer \"github.com\/jbenet\/go-ipfs\/peer\"\n\tu \"github.com\/jbenet\/go-ipfs\/util\"\n)\n\nvar initCmd = &cmds.Command{\n\tHelptext: cmds.HelpText{\n\t\tTagline: \"Initializes IPFS config file\",\n\t\tShortDescription: \"Initializes IPFS configuration files and generates a new keypair.\",\n\t},\n\n\tOptions: []cmds.Option{\n\t\tcmds.IntOption(\"bits\", \"b\", \"Number of bits to use in the generated RSA private key (defaults to 4096)\"),\n\t\tcmds.StringOption(\"passphrase\", \"p\", \"Passphrase for encrypting the private key\"),\n\t\tcmds.BoolOption(\"force\", \"f\", \"Overwrite existing config (if it exists)\"),\n\t\tcmds.StringOption(\"datastore\", \"d\", \"Location for the IPFS data store\"),\n\t},\n\tRun: func(req cmds.Request) (interface{}, error) {\n\n\t\tdspathOverride, _, err := req.Option(\"d\").String() \/\/ if !found it's okay. Let == \"\"\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tforce, _, err := req.Option(\"f\").Bool() \/\/ if !found, it's okay force == false\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tnBitsForKeypair, bitsOptFound, err := req.Option(\"b\").Int()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !bitsOptFound {\n\t\t\tnBitsForKeypair = 4096\n\t\t}\n\n\t\treturn doInit(req.Context().ConfigRoot, dspathOverride, force, nBitsForKeypair)\n\t},\n}\n\nvar errCannotInitConfigExists = errors.New(`ipfs configuration file already exists!\nReinitializing would overwrite your keys.\n(use -f to force overwrite)\n`)\n\nvar welcomeMsg = `Hello and Welcome to IPFS!\n\n██╗██████╗ ███████╗███████╗\n██║██╔══██╗██╔════╝██╔════╝\n██║██████╔╝█████╗ ███████╗\n██║██╔═══╝ ██╔══╝ ╚════██║\n██║██║ ██║ ███████║\n╚═╝╚═╝ ╚═╝ ╚══════╝\n\nIf you're seeing this, you have successfully installed\nIPFS and are now interfacing with the ipfs merkledag!\n\nFor a short demo of what you can do, enter 'ipfs tour'\n`\n\n\/\/ TODO add default welcome hash: eaa68bedae247ed1e5bd0eb4385a3c0959b976e4\n\/\/ NB: if dspath is not provided, it will be retrieved from the config\nfunc doInit(configRoot string, dspathOverride string, force bool, nBitsForKeypair int) (interface{}, error) {\n\n\tu.POut(\"initializing ipfs node at %s\\n\", configRoot)\n\n\tconfigFilename, err := config.Filename(configRoot)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Couldn't get home directory path\")\n\t}\n\n\tfi, err := os.Lstat(configFilename)\n\tif fi != nil || (err != nil && !os.IsNotExist(err)) {\n\t\tif !force {\n\t\t\t\/\/ TODO multi-line string\n\t\t\treturn nil, errCannotInitConfigExists\n\t\t}\n\t}\n\n\tconf, err := initConfig(configFilename, dspathOverride, nBitsForKeypair)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnd, err := core.NewIpfsNode(conf, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer nd.Close()\n\n\t\/\/ Set up default file\n\treader := bytes.NewBufferString(welcomeMsg)\n\n\tdefnd, err := imp.BuildDagFromReader(reader, nd.DAG, nd.Pinning.GetManual(), chunk.DefaultSplitter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tk, _ := defnd.Key()\n\tfmt.Printf(\"done.\\nto test, enter: ipfs cat %s\\n\", k)\n\treturn nil, nil\n}\n\nfunc datastoreConfig(dspath string) (config.Datastore, error) {\n\tds := config.Datastore{}\n\tif len(dspath) == 0 {\n\t\tvar err error\n\t\tdspath, err = config.DataStorePath(\"\")\n\t\tif err != nil {\n\t\t\treturn ds, err\n\t\t}\n\t}\n\tds.Path = dspath\n\tds.Type = \"leveldb\"\n\n\t\/\/ Construct the data store if missing\n\tif err := os.MkdirAll(dspath, os.ModePerm); err != nil {\n\t\treturn ds, err\n\t}\n\n\t\/\/ Check the directory is writeable\n\tif f, err := os.Create(filepath.Join(dspath, \"._check_writeable\")); err == nil {\n\t\tos.Remove(f.Name())\n\t} else {\n\t\treturn ds, errors.New(\"Datastore '\" + dspath + \"' is not writeable\")\n\t}\n\n\treturn ds, nil\n}\n\nfunc initConfig(configFilename string, dspathOverride string, nBitsForKeypair int) (*config.Config, error) {\n\tds, err := datastoreConfig(dspathOverride)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tidentity, err := identityConfig(nBitsForKeypair)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconf := &config.Config{\n\n\t\t\/\/ setup the node addresses.\n\t\tAddresses: config.Addresses{\n\t\t\tSwarm: \"\/ip4\/0.0.0.0\/tcp\/4001\",\n\t\t\tAPI: \"\/ip4\/127.0.0.1\/tcp\/5001\",\n\t\t},\n\n\t\tBootstrap: []*config.BootstrapPeer{\n\t\t\t&config.BootstrapPeer{ \/\/ Use these hardcoded bootstrap peers for now.\n\t\t\t\t\/\/ mars.i.ipfs.io\n\t\t\t\tPeerID: \"QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ\",\n\t\t\t\tAddress: \"\/ip4\/104.131.131.82\/tcp\/4001\",\n\t\t\t},\n\t\t},\n\n\t\tDatastore: ds,\n\n\t\tIdentity: identity,\n\n\t\t\/\/ setup the node mount points.\n\t\tMounts: config.Mounts{\n\t\t\tIPFS: \"\/ipfs\",\n\t\t\tIPNS: \"\/ipns\",\n\t\t},\n\n\t\t\/\/ tracking ipfs version used to generate the init folder and adding\n\t\t\/\/ update checker default setting.\n\t\tVersion: config.VersionDefaultValue(),\n\t}\n\n\tif err := config.WriteConfigFile(configFilename, conf); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn conf, nil\n}\n\nfunc identityConfig(nbits int) (config.Identity, error) {\n\t\/\/ TODO guard higher up\n\tident := config.Identity{}\n\tif nbits < 1024 {\n\t\treturn ident, errors.New(\"Bitsize less than 1024 is considered unsafe.\")\n\t}\n\n\tfmt.Printf(\"generating key pair...\")\n\tsk, pk, err := ci.GenerateKeyPair(ci.RSA, nbits)\n\tif err != nil {\n\t\treturn ident, err\n\t}\n\n\t\/\/ currently storing key unencrypted. in the future we need to encrypt it.\n\t\/\/ TODO(security)\n\tskbytes, err := sk.Bytes()\n\tif err != nil {\n\t\treturn ident, err\n\t}\n\tident.PrivKey = base64.StdEncoding.EncodeToString(skbytes)\n\n\tid, err := peer.IDFromPubKey(pk)\n\tif err != nil {\n\t\treturn ident, err\n\t}\n\tident.PeerID = id.Pretty()\n\n\treturn ident, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\/pprof\"\n\n\tlogging \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-logging\"\n\tma \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-multiaddr\"\n\tmanet \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-multiaddr\/net\"\n\n\tcmds \"github.com\/jbenet\/go-ipfs\/commands\"\n\tcmdsCli \"github.com\/jbenet\/go-ipfs\/commands\/cli\"\n\tcmdsHttp \"github.com\/jbenet\/go-ipfs\/commands\/http\"\n\t\"github.com\/jbenet\/go-ipfs\/config\"\n\t\"github.com\/jbenet\/go-ipfs\/core\"\n\tcommands \"github.com\/jbenet\/go-ipfs\/core\/commands2\"\n\tdaemon \"github.com\/jbenet\/go-ipfs\/daemon2\"\n\tu \"github.com\/jbenet\/go-ipfs\/util\"\n)\n\n\/\/ log is the command logger\nvar log = u.Logger(\"cmd\/ipfs\")\n\nconst (\n\theapProfile = \"ipfs.mprof\"\n\terrorFormat = \"ERROR: %v\\n\\n\"\n)\n\nvar ofi io.WriteCloser\n\nfunc main() {\n\thandleInterrupt()\n\n\targs := os.Args[1:]\n\treq, root, err := createRequest(args)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\texit(1)\n\t}\n\thandleOptions(req, root)\n\n\t\/\/ if debugging, setup profiling.\n\tif u.Debug {\n\t\tvar err error\n\t\tofi, err = os.Create(\"cpu.prof\")\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tpprof.StartCPUProfile(ofi)\n\t}\n\n\tres := callCommand(req, root)\n\toutputResponse(res, root)\n\n\texit(0)\n}\n\nfunc createRequest(args []string) (cmds.Request, *cmds.Command, error) {\n\treq, root, cmd, path, err := cmdsCli.Parse(args, Root, commands.Root)\n\n\t\/\/ handle parse error (which means the commandline input was wrong,\n\t\/\/ e.g. incorrect number of args, or nonexistent subcommand)\n\tif err != nil {\n\t\t\/\/ if the -help flag wasn't specified, show the error message\n\t\t\/\/ or if a path was returned (user specified a valid subcommand), show the error message\n\t\t\/\/ (this means there was an option or argument error)\n\t\tif path != nil && len(path) > 0 {\n\t\t\thelp, _ := req.Option(\"help\").Bool()\n\t\t\tif !help {\n\t\t\t\tfmt.Printf(errorFormat, err)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ when generating help for the root command, we don't want the autogenerated subcommand text\n\t\t\/\/ (since we have better hand-made subcommand list in the root Help field)\n\t\tif cmd == nil {\n\t\t\troot = &*commands.Root\n\t\t\troot.Subcommands = nil\n\t\t}\n\n\t\t\/\/ generate the help text for the command the user was trying to call (or root)\n\t\thelpText, htErr := cmdsCli.HelpText(\"ipfs\", root, path)\n\t\tif htErr != nil {\n\t\t\tfmt.Println(htErr)\n\t\t} else {\n\t\t\tfmt.Println(helpText)\n\t\t}\n\t\treturn nil, nil, err\n\t}\n\n\tconfigPath, err := getConfigRoot(req)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tconf, err := getConfig(configPath)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tctx := req.Context()\n\tctx.ConfigRoot = configPath\n\tctx.Config = conf\n\n\tif !req.Option(\"encoding\").Found() {\n\t\tif req.Command().Marshallers != nil && req.Command().Marshallers[cmds.Text] != nil {\n\t\t\treq.SetOption(\"encoding\", cmds.Text)\n\t\t} else {\n\t\t\treq.SetOption(\"encoding\", cmds.JSON)\n\t\t}\n\t}\n\n\treturn req, root, nil\n}\n\nfunc handleOptions(req cmds.Request, root *cmds.Command) {\n\thelp, err := req.Option(\"help\").Bool()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\texit(1)\n\t}\n\n\tif help {\n\t\thelpText, err := cmdsCli.HelpText(\"ipfs\", root, req.Path())\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t} else {\n\t\t\tfmt.Println(helpText)\n\t\t}\n\t\texit(0)\n\t}\n\n\tif debug, err := req.Option(\"debug\").Bool(); debug && err == nil {\n\t\tu.Debug = true\n\t\tu.SetAllLoggers(logging.DEBUG)\n\t} else if err != nil {\n\t\tfmt.Println(err)\n\t\texit(1)\n\t}\n}\n\nfunc callCommand(req cmds.Request, root *cmds.Command) cmds.Response {\n\tvar res cmds.Response\n\n\tif root == Root {\n\t\tres = root.Call(req)\n\n\t} else {\n\t\tlocal, err := req.Option(\"local\").Bool()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\texit(1)\n\t\t}\n\n\t\tif (!req.Option(\"local\").Found() || !local) && daemon.Locked(req.Context().ConfigRoot) {\n\t\t\taddr, err := ma.NewMultiaddr(req.Context().Config.Addresses.API)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\texit(1)\n\t\t\t}\n\n\t\t\t_, host, err := manet.DialArgs(addr)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\texit(1)\n\t\t\t}\n\n\t\t\tclient := cmdsHttp.NewClient(host)\n\n\t\t\tres, err = client.Send(req)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\texit(1)\n\t\t\t}\n\n\t\t} else {\n\t\t\tnode, err := core.NewIpfsNode(req.Context().Config, false)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\texit(1)\n\t\t\t}\n\t\t\tdefer node.Close()\n\t\t\treq.Context().Node = node\n\n\t\t\tres = root.Call(req)\n\t\t}\n\t}\n\n\treturn res\n}\n\nfunc outputResponse(res cmds.Response, root *cmds.Command) {\n\tif res.Error() != nil {\n\t\tfmt.Printf(errorFormat, res.Error().Error())\n\n\t\tif res.Error().Code == cmds.ErrClient {\n\t\t\thelpText, err := cmdsCli.HelpText(\"ipfs\", root, res.Request().Path())\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t} else {\n\t\t\t\tfmt.Println(helpText)\n\t\t\t}\n\t\t}\n\n\t\texit(1)\n\t}\n\n\tout, err := res.Reader()\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn\n\t}\n\n\tio.Copy(os.Stdout, out)\n}\n\nfunc getConfigRoot(req cmds.Request) (string, error) {\n\tconfigOpt, err := req.Option(\"config\").String()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif configOpt != \"\" {\n\t\treturn configOpt, nil\n\t}\n\n\tconfigPath, err := config.PathRoot()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn configPath, nil\n}\n\nfunc getConfig(path string) (*config.Config, error) {\n\tconfigFile, err := config.Filename(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn config.Load(configFile)\n}\n\nfunc writeHeapProfileToFile() error {\n\tmprof, err := os.Create(heapProfile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer mprof.Close()\n\treturn pprof.WriteHeapProfile(mprof)\n}\n\n\/\/ listen for and handle SIGTERM\nfunc handleInterrupt() {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\n\tgo func() {\n\t\tfor _ = range c {\n\t\t\tlog.Info(\"Received interrupt signal, terminating...\")\n\t\t\texit(0)\n\t\t}\n\t}()\n}\n\nfunc exit(code int) {\n\tif u.Debug {\n\t\tpprof.StopCPUProfile()\n\t\tofi.Close()\n\n\t\terr := writeHeapProfileToFile()\n\t\tif err != nil {\n\t\t\tlog.Critical(err)\n\t\t}\n\t}\n\n\tos.Exit(code)\n}\n<commit_msg>refactor(ipfs2\/main) same for debug<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\/pprof\"\n\n\tlogging \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-logging\"\n\tma \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-multiaddr\"\n\tmanet \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-multiaddr\/net\"\n\n\tcmds \"github.com\/jbenet\/go-ipfs\/commands\"\n\tcmdsCli \"github.com\/jbenet\/go-ipfs\/commands\/cli\"\n\tcmdsHttp \"github.com\/jbenet\/go-ipfs\/commands\/http\"\n\t\"github.com\/jbenet\/go-ipfs\/config\"\n\t\"github.com\/jbenet\/go-ipfs\/core\"\n\tcommands \"github.com\/jbenet\/go-ipfs\/core\/commands2\"\n\tdaemon \"github.com\/jbenet\/go-ipfs\/daemon2\"\n\tu \"github.com\/jbenet\/go-ipfs\/util\"\n)\n\n\/\/ log is the command logger\nvar log = u.Logger(\"cmd\/ipfs\")\n\nconst (\n\theapProfile = \"ipfs.mprof\"\n\terrorFormat = \"ERROR: %v\\n\\n\"\n)\n\nvar ofi io.WriteCloser\n\nfunc main() {\n\thandleInterrupt()\n\n\targs := os.Args[1:]\n\treq, root, err := createRequest(args)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\texit(1)\n\t}\n\thandleOptions(req, root)\n\n\t\/\/ if debugging, setup profiling.\n\tif u.Debug {\n\t\tvar err error\n\t\tofi, err = os.Create(\"cpu.prof\")\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tpprof.StartCPUProfile(ofi)\n\t}\n\n\tres := callCommand(req, root)\n\toutputResponse(res, root)\n\n\texit(0)\n}\n\nfunc createRequest(args []string) (cmds.Request, *cmds.Command, error) {\n\treq, root, cmd, path, err := cmdsCli.Parse(args, Root, commands.Root)\n\n\t\/\/ handle parse error (which means the commandline input was wrong,\n\t\/\/ e.g. incorrect number of args, or nonexistent subcommand)\n\tif err != nil {\n\t\t\/\/ if the -help flag wasn't specified, show the error message\n\t\t\/\/ or if a path was returned (user specified a valid subcommand), show the error message\n\t\t\/\/ (this means there was an option or argument error)\n\t\tif path != nil && len(path) > 0 {\n\t\t\thelp, _ := req.Option(\"help\").Bool()\n\t\t\tif !help {\n\t\t\t\tfmt.Printf(errorFormat, err)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ when generating help for the root command, we don't want the autogenerated subcommand text\n\t\t\/\/ (since we have better hand-made subcommand list in the root Help field)\n\t\tif cmd == nil {\n\t\t\troot = &*commands.Root\n\t\t\troot.Subcommands = nil\n\t\t}\n\n\t\t\/\/ generate the help text for the command the user was trying to call (or root)\n\t\thelpText, htErr := cmdsCli.HelpText(\"ipfs\", root, path)\n\t\tif htErr != nil {\n\t\t\tfmt.Println(htErr)\n\t\t} else {\n\t\t\tfmt.Println(helpText)\n\t\t}\n\t\treturn nil, nil, err\n\t}\n\n\tconfigPath, err := getConfigRoot(req)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tconf, err := getConfig(configPath)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tctx := req.Context()\n\tctx.ConfigRoot = configPath\n\tctx.Config = conf\n\n\tif !req.Option(\"encoding\").Found() {\n\t\tif req.Command().Marshallers != nil && req.Command().Marshallers[cmds.Text] != nil {\n\t\t\treq.SetOption(\"encoding\", cmds.Text)\n\t\t} else {\n\t\t\treq.SetOption(\"encoding\", cmds.JSON)\n\t\t}\n\t}\n\n\treturn req, root, nil\n}\n\nfunc handleOptions(req cmds.Request, root *cmds.Command) {\n\thelp, err := req.Option(\"help\").Bool()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\texit(1)\n\t}\n\n\tif help {\n\t\thelpText, err := cmdsCli.HelpText(\"ipfs\", root, req.Path())\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t} else {\n\t\t\tfmt.Println(helpText)\n\t\t}\n\t\texit(0)\n\t}\n\n\tdebug, err := req.Option(\"debug\").Bool()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\texit(1)\n\t}\n\tif debug {\n\t\tu.Debug = true\n\t\tu.SetAllLoggers(logging.DEBUG)\n\t}\n}\n\nfunc callCommand(req cmds.Request, root *cmds.Command) cmds.Response {\n\tvar res cmds.Response\n\n\tif root == Root {\n\t\tres = root.Call(req)\n\n\t} else {\n\t\tlocal, err := req.Option(\"local\").Bool()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\texit(1)\n\t\t}\n\n\t\tif (!req.Option(\"local\").Found() || !local) && daemon.Locked(req.Context().ConfigRoot) {\n\t\t\taddr, err := ma.NewMultiaddr(req.Context().Config.Addresses.API)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\texit(1)\n\t\t\t}\n\n\t\t\t_, host, err := manet.DialArgs(addr)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\texit(1)\n\t\t\t}\n\n\t\t\tclient := cmdsHttp.NewClient(host)\n\n\t\t\tres, err = client.Send(req)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\texit(1)\n\t\t\t}\n\n\t\t} else {\n\t\t\tnode, err := core.NewIpfsNode(req.Context().Config, false)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\texit(1)\n\t\t\t}\n\t\t\tdefer node.Close()\n\t\t\treq.Context().Node = node\n\n\t\t\tres = root.Call(req)\n\t\t}\n\t}\n\n\treturn res\n}\n\nfunc outputResponse(res cmds.Response, root *cmds.Command) {\n\tif res.Error() != nil {\n\t\tfmt.Printf(errorFormat, res.Error().Error())\n\n\t\tif res.Error().Code == cmds.ErrClient {\n\t\t\thelpText, err := cmdsCli.HelpText(\"ipfs\", root, res.Request().Path())\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t} else {\n\t\t\t\tfmt.Println(helpText)\n\t\t\t}\n\t\t}\n\n\t\texit(1)\n\t}\n\n\tout, err := res.Reader()\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn\n\t}\n\n\tio.Copy(os.Stdout, out)\n}\n\nfunc getConfigRoot(req cmds.Request) (string, error) {\n\tconfigOpt, err := req.Option(\"config\").String()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif configOpt != \"\" {\n\t\treturn configOpt, nil\n\t}\n\n\tconfigPath, err := config.PathRoot()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn configPath, nil\n}\n\nfunc getConfig(path string) (*config.Config, error) {\n\tconfigFile, err := config.Filename(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn config.Load(configFile)\n}\n\nfunc writeHeapProfileToFile() error {\n\tmprof, err := os.Create(heapProfile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer mprof.Close()\n\treturn pprof.WriteHeapProfile(mprof)\n}\n\n\/\/ listen for and handle SIGTERM\nfunc handleInterrupt() {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\n\tgo func() {\n\t\tfor _ = range c {\n\t\t\tlog.Info(\"Received interrupt signal, terminating...\")\n\t\t\texit(0)\n\t\t}\n\t}()\n}\n\nfunc exit(code int) {\n\tif u.Debug {\n\t\tpprof.StopCPUProfile()\n\t\tofi.Close()\n\n\t\terr := writeHeapProfileToFile()\n\t\tif err != nil {\n\t\t\tlog.Critical(err)\n\t\t}\n\t}\n\n\tos.Exit(code)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"launchpad.net\/gnuflag\"\n\t\"launchpad.net\/loggo\"\n\t\"launchpad.net\/tomb\"\n\n\t\"launchpad.net\/juju-core\/cmd\"\n\t\"launchpad.net\/juju-core\/names\"\n\t\"launchpad.net\/juju-core\/state\"\n\t\"launchpad.net\/juju-core\/worker\"\n\t\"launchpad.net\/juju-core\/worker\/uniter\"\n\t\"launchpad.net\/juju-core\/worker\/upgrader\"\n)\n\nvar agentLogger = loggo.GetLogger(\"juju.jujud\")\n\n\/\/ UnitAgent is a cmd.Command responsible for running a unit agent.\ntype UnitAgent struct {\n\tcmd.CommandBase\n\ttomb tomb.Tomb\n\tConf AgentConf\n\tUnitName string\n\trunner *worker.Runner\n}\n\n\/\/ Info returns usage information for the command.\nfunc (a *UnitAgent) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"unit\",\n\t\tPurpose: \"run a juju unit agent\",\n\t}\n}\n\nfunc (a *UnitAgent) SetFlags(f *gnuflag.FlagSet) {\n\ta.Conf.addFlags(f)\n\tf.StringVar(&a.UnitName, \"unit-name\", \"\", \"name of the unit to run\")\n}\n\n\/\/ Init initializes the command for running.\nfunc (a *UnitAgent) Init(args []string) error {\n\tif a.UnitName == \"\" {\n\t\treturn requiredError(\"unit-name\")\n\t}\n\tif !names.IsUnit(a.UnitName) {\n\t\treturn fmt.Errorf(`--unit-name option expects \"<service>\/<n>\" argument`)\n\t}\n\tif err := a.Conf.checkArgs(args); err != nil {\n\t\treturn err\n\t}\n\ta.runner = worker.NewRunner(isFatal, moreImportant)\n\treturn nil\n}\n\n\/\/ Stop stops the unit agent.\nfunc (a *UnitAgent) Stop() error {\n\ta.runner.Kill()\n\treturn a.tomb.Wait()\n}\n\n\/\/ Run runs a unit agent.\nfunc (a *UnitAgent) Run(ctx *cmd.Context) error {\n\tdefer a.tomb.Done()\n\tif err := a.Conf.read(a.Tag()); err != nil {\n\t\treturn err\n\t}\n\tagentLogger.Infof(\"unit agent %v start\", a.Tag())\n\ta.runner.StartWorker(\"state\", a.StateWorkers)\n\ta.runner.StartWorker(\"api\", a.APIWorkers)\n\terr := agentDone(a.runner.Wait())\n\ta.tomb.Kill(err)\n\treturn err\n}\n\n\/\/ StateWorkers returns a worker that runs the unit agent workers.\nfunc (a *UnitAgent) StateWorkers() (worker.Worker, error) {\n\tst, entity, err := openState(a.Conf.config, a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tunit := entity.(*state.Unit)\n\tdataDir := a.Conf.dataDir\n\trunner := worker.NewRunner(connectionIsFatal(st), moreImportant)\n\trunner.StartWorker(\"uniter\", func() (worker.Worker, error) {\n\t\treturn uniter.NewUniter(st, unit.Name(), dataDir), nil\n\t})\n\treturn newCloseWorker(runner, st), nil\n}\n\nfunc (a *UnitAgent) APIWorkers() (worker.Worker, error) {\n\tagentConfig := a.Conf.config\n\tst, _, err := openAPIState(agentConfig, a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trunner := worker.NewRunner(connectionIsFatal(st), moreImportant)\n\trunner.StartWorker(\"upgrader\", func() (worker.Worker, error) {\n\t\treturn upgrader.NewUpgrader(st.Upgrader(), agentConfig), nil\n\t})\n\treturn newCloseWorker(runner, st), nil\n}\n\nfunc (a *UnitAgent) Entity(st *state.State) (AgentState, error) {\n\treturn st.Unit(a.UnitName)\n}\n\nfunc (a *UnitAgent) Tag() string {\n\treturn names.UnitTag(a.UnitName)\n}\n<commit_msg>The unit agent watches too.<commit_after>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"launchpad.net\/gnuflag\"\n\t\"launchpad.net\/loggo\"\n\t\"launchpad.net\/tomb\"\n\n\t\"launchpad.net\/juju-core\/cmd\"\n\t\"launchpad.net\/juju-core\/names\"\n\t\"launchpad.net\/juju-core\/state\"\n\t\"launchpad.net\/juju-core\/worker\"\n\t\"launchpad.net\/juju-core\/worker\/logger\"\n\t\"launchpad.net\/juju-core\/worker\/uniter\"\n\t\"launchpad.net\/juju-core\/worker\/upgrader\"\n)\n\nvar agentLogger = loggo.GetLogger(\"juju.jujud\")\n\n\/\/ UnitAgent is a cmd.Command responsible for running a unit agent.\ntype UnitAgent struct {\n\tcmd.CommandBase\n\ttomb tomb.Tomb\n\tConf AgentConf\n\tUnitName string\n\trunner *worker.Runner\n}\n\n\/\/ Info returns usage information for the command.\nfunc (a *UnitAgent) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"unit\",\n\t\tPurpose: \"run a juju unit agent\",\n\t}\n}\n\nfunc (a *UnitAgent) SetFlags(f *gnuflag.FlagSet) {\n\ta.Conf.addFlags(f)\n\tf.StringVar(&a.UnitName, \"unit-name\", \"\", \"name of the unit to run\")\n}\n\n\/\/ Init initializes the command for running.\nfunc (a *UnitAgent) Init(args []string) error {\n\tif a.UnitName == \"\" {\n\t\treturn requiredError(\"unit-name\")\n\t}\n\tif !names.IsUnit(a.UnitName) {\n\t\treturn fmt.Errorf(`--unit-name option expects \"<service>\/<n>\" argument`)\n\t}\n\tif err := a.Conf.checkArgs(args); err != nil {\n\t\treturn err\n\t}\n\ta.runner = worker.NewRunner(isFatal, moreImportant)\n\treturn nil\n}\n\n\/\/ Stop stops the unit agent.\nfunc (a *UnitAgent) Stop() error {\n\ta.runner.Kill()\n\treturn a.tomb.Wait()\n}\n\n\/\/ Run runs a unit agent.\nfunc (a *UnitAgent) Run(ctx *cmd.Context) error {\n\tdefer a.tomb.Done()\n\tif err := a.Conf.read(a.Tag()); err != nil {\n\t\treturn err\n\t}\n\tagentLogger.Infof(\"unit agent %v start\", a.Tag())\n\ta.runner.StartWorker(\"state\", a.StateWorkers)\n\ta.runner.StartWorker(\"api\", a.APIWorkers)\n\terr := agentDone(a.runner.Wait())\n\ta.tomb.Kill(err)\n\treturn err\n}\n\n\/\/ StateWorkers returns a worker that runs the unit agent workers.\nfunc (a *UnitAgent) StateWorkers() (worker.Worker, error) {\n\tst, entity, err := openState(a.Conf.config, a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tunit := entity.(*state.Unit)\n\tdataDir := a.Conf.dataDir\n\trunner := worker.NewRunner(connectionIsFatal(st), moreImportant)\n\trunner.StartWorker(\"uniter\", func() (worker.Worker, error) {\n\t\treturn uniter.NewUniter(st, unit.Name(), dataDir), nil\n\t})\n\treturn newCloseWorker(runner, st), nil\n}\n\nfunc (a *UnitAgent) APIWorkers() (worker.Worker, error) {\n\tagentConfig := a.Conf.config\n\tst, _, err := openAPIState(agentConfig, a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trunner := worker.NewRunner(connectionIsFatal(st), moreImportant)\n\trunner.StartWorker(\"upgrader\", func() (worker.Worker, error) {\n\t\treturn upgrader.NewUpgrader(st.Upgrader(), agentConfig), nil\n\t})\n\trunner.StartWorker(\"logger\", func() (worker.Worker, error) {\n\t\treturn logger.NewLogger(st.Logger(), agentConfig), nil\n\t})\n\treturn newCloseWorker(runner, st), nil\n}\n\nfunc (a *UnitAgent) Entity(st *state.State) (AgentState, error) {\n\treturn st.Unit(a.UnitName)\n}\n\nfunc (a *UnitAgent) Tag() string {\n\treturn names.UnitTag(a.UnitName)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2021 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage org\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/apigee\/apigeecli\/apiclient\"\n\t\"github.com\/apigee\/apigeecli\/client\/apis\"\n\t\"github.com\/apigee\/apigeecli\/client\/apps\"\n\t\"github.com\/apigee\/apigeecli\/client\/datacollectors\"\n\t\"github.com\/apigee\/apigeecli\/client\/developers\"\n\t\"github.com\/apigee\/apigeecli\/client\/env\"\n\t\"github.com\/apigee\/apigeecli\/client\/envgroups\"\n\t\"github.com\/apigee\/apigeecli\/client\/keystores\"\n\t\"github.com\/apigee\/apigeecli\/client\/kvm\"\n\t\"github.com\/apigee\/apigeecli\/client\/orgs\"\n\t\"github.com\/apigee\/apigeecli\/client\/products\"\n\t\"github.com\/apigee\/apigeecli\/client\/references\"\n\t\"github.com\/apigee\/apigeecli\/client\/sharedflows\"\n\t\"github.com\/apigee\/apigeecli\/client\/sync\"\n\t\"github.com\/apigee\/apigeecli\/client\/targetservers\"\n\t\"github.com\/apigee\/apigeecli\/clilog\"\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ ExportCmd to get org details\nvar ExportCmd = &cobra.Command{\n\tUse: \"export\",\n\tShort: \"Export Apigee Configuration\",\n\tLong: \"Export Apigee Configuration\",\n\tArgs: func(cmd *cobra.Command, args []string) (err error) {\n\t\treturn apiclient.SetApigeeOrg(org)\n\t},\n\tRunE: func(cmd *cobra.Command, args []string) (err error) {\n\n\t\tvar productResponse, appsResponse, targetServerResponse, referencesResponse [][]byte\n\t\tvar respBody, listKVMBytes []byte\n\n\t\truntimeType, _ := orgs.GetOrgField(\"runtimeType\")\n\n\t\tif err = createFolders(); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tclilog.Warning.Println(\"Calls to Apigee APIs have a quota of 6000 per min. Running this tool against large list of entities can exhaust that quota and impact the usage of the platform.\")\n\n\t\tfmt.Println(\"Exporting API Proxies...\")\n\t\tif err = apis.ExportProxies(conn, proxiesFolderName, allRevisions); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"Exporting Sharedflows...\")\n\t\tif err = sharedflows.Export(conn, sharedFlowsFolderName, allRevisions); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"Exporting API Products...\")\n\t\tif productResponse, err = products.Export(conn); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = apiclient.WriteArrayByteArrayToFile(productsFileName, false, productResponse); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Printf(\"\\tExporting KV Map names for org %s\\n\", org)\n\t\tif listKVMBytes, err = kvm.List(\"\"); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = apiclient.WriteByteArrayToFile(org+\"_\"+kVMFileName, false, respBody); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = exportKVMEntries(\"org\", \"\", listKVMBytes); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"Exporting Developers...\")\n\t\tif respBody, err = developers.Export(); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = apiclient.WriteByteArrayToFile(developersFileName, false, respBody); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"Exporting Developer Apps...\")\n\t\tif appsResponse, err = apps.Export(conn); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = apiclient.WriteArrayByteArrayToFile(appsFileName, false, appsResponse); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"Exporting Environment Group Configuration...\")\n\t\tapiclient.SetPrintOutput(false)\n\t\tif respBody, err = envgroups.List(); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = apiclient.WriteByteArrayToFile(envGroupsFileName, false, respBody); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"Exporting Data collectors Configuration...\")\n\t\tif respBody, err = datacollectors.List(); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = apiclient.WriteByteArrayToFile(dataCollFileName, false, respBody); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif runtimeType == \"HYBRID\" {\n\t\t\tfmt.Println(\"Exporting Sync Authorization Identities...\")\n\t\t\tif respBody, err = sync.Get(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = apiclient.WriteByteArrayToFile(syncAuthFileName, false, respBody); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tvar envRespBody []byte\n\t\tif envRespBody, err = env.List(); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tenvironments := []string{}\n\t\tif err = json.Unmarshal(envRespBody, &environments); proceedOnError(err) != nil {\n\t\t\treturn err\n\n\t\t}\n\n\t\tfor _, environment := range environments {\n\t\t\tfmt.Println(\"Exporting configuration for environment \" + environment)\n\t\t\tapiclient.SetApigeeEnv(environment)\n\t\t\tfmt.Println(\"\\tExporting Target servers...\")\n\t\t\tif targetServerResponse, err = targetservers.Export(conn); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = apiclient.WriteArrayByteArrayToFile(environment+\"_\"+targetServerFileName, false, targetServerResponse); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfmt.Printf(\"\\tExporting KV Map names for environment %s...\\n\", environment)\n\t\t\tif listKVMBytes, err = kvm.List(\"\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = apiclient.WriteByteArrayToFile(environment+\"_\"+kVMFileName, false, respBody); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err = exportKVMEntries(\"env\", environment, listKVMBytes); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfmt.Println(\"\\tExporting Key store names...\")\n\t\t\tif respBody, err = keystores.List(); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = apiclient.WriteByteArrayToFile(environment+\"_\"+keyStoresFileName, false, respBody); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfmt.Println(\"\\tExporting debugmask configuration...\")\n\t\t\tif respBody, err = env.GetDebug(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = apiclient.WriteByteArrayToFile(environment+debugmaskFileName, false, respBody); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfmt.Println(\"\\tExporting traceconfig...\")\n\t\t\tif respBody, err = env.GetTraceConfig(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = apiclient.WriteByteArrayToFile(environment+tracecfgFileName, false, respBody); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfmt.Println(\"\\tExporting references...\")\n\t\t\tif referencesResponse, err = references.Export(conn); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = apiclient.WriteArrayByteArrayToFile(environment+\"_\"+referencesFileName, false, referencesResponse); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t}\n\n\t\treturn\n\t},\n}\n\nvar allRevisions, continueOnErr bool\n\nfunc init() {\n\n\tExportCmd.Flags().StringVarP(&org, \"org\", \"o\",\n\t\t\"\", \"Apigee organization name\")\n\tExportCmd.Flags().IntVarP(&conn, \"conn\", \"c\",\n\t\t4, \"Number of connections\")\n\n\tExportCmd.Flags().BoolVarP(&allRevisions, \"all\", \"\",\n\t\tfalse, \"Export all revisions, default=false. Exports the latest revision\")\n\tExportCmd.Flags().BoolVarP(&continueOnErr, \"continueOnError\", \"\",\n\t\tfalse, \"Ignore errors and continue exporting data\")\n}\n\nfunc createFolders() (err error) {\n\tif err = os.Mkdir(proxiesFolderName, 0755); err != nil {\n\t\treturn err\n\t}\n\tif err = os.Mkdir(sharedFlowsFolderName, 0755); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc exportKVMEntries(scope string, env string, listKVMBytes []byte) (err error) {\n\n\tvar kvmEntries [][]byte\n\tvar listKVM []string\n\tvar fileName string\n\n\tif err = json.Unmarshal(listKVMBytes, &listKVM); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, mapName := range listKVM {\n\n\t\tfmt.Printf(\"\\tExporting KVM entries for %s in org %s\\n\", org, mapName)\n\t\tif kvmEntries, err = kvm.ExportEntries(\"\", mapName); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif scope == \"org\" {\n\t\t\tfileName = strings.Join([]string{scope, mapName, \"kvmfile\"}, \"_\")\n\t\t} else if scope == \"env\" {\n\t\t\tfileName = strings.Join([]string{scope, env, mapName, \"kvmfile\"}, \"_\")\n\t\t}\n\n\t\tif len(kvmEntries) > 0 {\n\t\t\tfor i := range kvmEntries {\n\t\t\t\tif err = apiclient.WriteByteArrayToFile(fileName+\"_\"+strconv.Itoa(i)+\".json\", false, kvmEntries[i]); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc proceedOnError(e error) error {\n\tif continueOnErr {\n\t\treturn nil\n\t}\n\treturn e\n}\n<commit_msg>fix for issue #85<commit_after>\/\/ Copyright 2021 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage org\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/apigee\/apigeecli\/apiclient\"\n\t\"github.com\/apigee\/apigeecli\/client\/apis\"\n\t\"github.com\/apigee\/apigeecli\/client\/apps\"\n\t\"github.com\/apigee\/apigeecli\/client\/datacollectors\"\n\t\"github.com\/apigee\/apigeecli\/client\/developers\"\n\t\"github.com\/apigee\/apigeecli\/client\/env\"\n\t\"github.com\/apigee\/apigeecli\/client\/envgroups\"\n\t\"github.com\/apigee\/apigeecli\/client\/keystores\"\n\t\"github.com\/apigee\/apigeecli\/client\/kvm\"\n\t\"github.com\/apigee\/apigeecli\/client\/orgs\"\n\t\"github.com\/apigee\/apigeecli\/client\/products\"\n\t\"github.com\/apigee\/apigeecli\/client\/references\"\n\t\"github.com\/apigee\/apigeecli\/client\/sharedflows\"\n\t\"github.com\/apigee\/apigeecli\/client\/sync\"\n\t\"github.com\/apigee\/apigeecli\/client\/targetservers\"\n\t\"github.com\/apigee\/apigeecli\/clilog\"\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ ExportCmd to get org details\nvar ExportCmd = &cobra.Command{\n\tUse: \"export\",\n\tShort: \"Export Apigee Configuration\",\n\tLong: \"Export Apigee Configuration\",\n\tArgs: func(cmd *cobra.Command, args []string) (err error) {\n\t\treturn apiclient.SetApigeeOrg(org)\n\t},\n\tRunE: func(cmd *cobra.Command, args []string) (err error) {\n\n\t\tvar productResponse, appsResponse, targetServerResponse, referencesResponse [][]byte\n\t\tvar respBody, listKVMBytes []byte\n\n\t\truntimeType, _ := orgs.GetOrgField(\"runtimeType\")\n\n\t\tif err = createFolders(); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tclilog.Warning.Println(\"Calls to Apigee APIs have a quota of 6000 per min. Running this tool against large list of entities can exhaust that quota and impact the usage of the platform.\")\n\n\t\tfmt.Println(\"Exporting API Proxies...\")\n\t\tif err = apis.ExportProxies(conn, proxiesFolderName, allRevisions); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"Exporting Sharedflows...\")\n\t\tif err = sharedflows.Export(conn, sharedFlowsFolderName, allRevisions); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"Exporting API Products...\")\n\t\tif productResponse, err = products.Export(conn); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = apiclient.WriteArrayByteArrayToFile(productsFileName, false, productResponse); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Printf(\"\\tExporting KV Map names for org %s\\n\", org)\n\t\tif listKVMBytes, err = kvm.List(\"\"); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = apiclient.WriteByteArrayToFile(org+\"_\"+kVMFileName, false, listKVMBytes); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = exportKVMEntries(\"org\", \"\", listKVMBytes); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"Exporting Developers...\")\n\t\tif respBody, err = developers.Export(); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = apiclient.WriteByteArrayToFile(developersFileName, false, respBody); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"Exporting Developer Apps...\")\n\t\tif appsResponse, err = apps.Export(conn); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = apiclient.WriteArrayByteArrayToFile(appsFileName, false, appsResponse); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"Exporting Environment Group Configuration...\")\n\t\tapiclient.SetPrintOutput(false)\n\t\tif respBody, err = envgroups.List(); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = apiclient.WriteByteArrayToFile(envGroupsFileName, false, respBody); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(\"Exporting Data collectors Configuration...\")\n\t\tif respBody, err = datacollectors.List(); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = apiclient.WriteByteArrayToFile(dataCollFileName, false, respBody); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif runtimeType == \"HYBRID\" {\n\t\t\tfmt.Println(\"Exporting Sync Authorization Identities...\")\n\t\t\tif respBody, err = sync.Get(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = apiclient.WriteByteArrayToFile(syncAuthFileName, false, respBody); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tvar envRespBody []byte\n\t\tif envRespBody, err = env.List(); proceedOnError(err) != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tenvironments := []string{}\n\t\tif err = json.Unmarshal(envRespBody, &environments); proceedOnError(err) != nil {\n\t\t\treturn err\n\n\t\t}\n\n\t\tfor _, environment := range environments {\n\t\t\tfmt.Println(\"Exporting configuration for environment \" + environment)\n\t\t\tapiclient.SetApigeeEnv(environment)\n\t\t\tapiclient.SetPrintOutput(false)\n\t\t\tfmt.Println(\"\\tExporting Target servers...\")\n\t\t\tif targetServerResponse, err = targetservers.Export(conn); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = apiclient.WriteArrayByteArrayToFile(environment+\"_\"+targetServerFileName, false, targetServerResponse); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfmt.Printf(\"\\tExporting KV Map names for environment %s...\\n\", environment)\n\t\t\tif listKVMBytes, err = kvm.List(\"\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = apiclient.WriteByteArrayToFile(environment+\"_\"+kVMFileName, false, listKVMBytes); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err = exportKVMEntries(\"env\", environment, listKVMBytes); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfmt.Println(\"\\tExporting Key store names...\")\n\t\t\tif respBody, err = keystores.List(); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = apiclient.WriteByteArrayToFile(environment+\"_\"+keyStoresFileName, false, respBody); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfmt.Println(\"\\tExporting debugmask configuration...\")\n\t\t\tif respBody, err = env.GetDebug(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = apiclient.WriteByteArrayToFile(environment+debugmaskFileName, false, respBody); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfmt.Println(\"\\tExporting traceconfig...\")\n\t\t\tif respBody, err = env.GetTraceConfig(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = apiclient.WriteByteArrayToFile(environment+tracecfgFileName, false, respBody); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfmt.Println(\"\\tExporting references...\")\n\t\t\tif referencesResponse, err = references.Export(conn); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = apiclient.WriteArrayByteArrayToFile(environment+\"_\"+referencesFileName, false, referencesResponse); proceedOnError(err) != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t}\n\n\t\treturn\n\t},\n}\n\nvar allRevisions, continueOnErr bool\n\nfunc init() {\n\n\tExportCmd.Flags().StringVarP(&org, \"org\", \"o\",\n\t\t\"\", \"Apigee organization name\")\n\tExportCmd.Flags().IntVarP(&conn, \"conn\", \"c\",\n\t\t4, \"Number of connections\")\n\n\tExportCmd.Flags().BoolVarP(&allRevisions, \"all\", \"\",\n\t\tfalse, \"Export all revisions, default=false. Exports the latest revision\")\n\tExportCmd.Flags().BoolVarP(&continueOnErr, \"continueOnError\", \"\",\n\t\tfalse, \"Ignore errors and continue exporting data\")\n}\n\nfunc createFolders() (err error) {\n\tif err = os.Mkdir(proxiesFolderName, 0755); err != nil {\n\t\treturn err\n\t}\n\tif err = os.Mkdir(sharedFlowsFolderName, 0755); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc exportKVMEntries(scope string, env string, listKVMBytes []byte) (err error) {\n\n\tvar kvmEntries [][]byte\n\tvar listKVM []string\n\tvar fileName string\n\n\tif err = json.Unmarshal(listKVMBytes, &listKVM); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, mapName := range listKVM {\n\n\t\tfmt.Printf(\"\\tExporting KVM entries for %s in org %s\\n\", org, mapName)\n\t\tif kvmEntries, err = kvm.ExportEntries(\"\", mapName); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif scope == \"org\" {\n\t\t\tfileName = strings.Join([]string{scope, mapName, \"kvmfile\"}, \"_\")\n\t\t} else if scope == \"env\" {\n\t\t\tfileName = strings.Join([]string{scope, env, mapName, \"kvmfile\"}, \"_\")\n\t\t}\n\n\t\tif len(kvmEntries) > 0 {\n\t\t\tfor i := range kvmEntries {\n\t\t\t\tif err = apiclient.WriteByteArrayToFile(fileName+\"_\"+strconv.Itoa(i)+\".json\", false, kvmEntries[i]); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc proceedOnError(e error) error {\n\tif continueOnErr {\n\t\treturn nil\n\t}\n\treturn e\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/bosssauce\/ponzu\/system\/admin\"\n\t\"github.com\/bosssauce\/ponzu\/system\/api\"\n\t\"github.com\/bosssauce\/ponzu\/system\/db\"\n)\n\nvar usage = `\n$ ponzu option <params> [specifiers]\n\nOptions \n\nnew <directory>:\n\n\tCreates a 'ponzu' directorty, or one by the name supplied as a parameter \n\timmediately following the 'new' option in the $GOPATH\/src directory. Note: \n\t'new' depends on the program 'git' and possibly a network connection. If \n\tthere is no local repository to clone from at the local machine's $GOPATH, \n\t'new' will attempt to clone the 'github.com\/bosssauce\/ponzu' package from \n\tover the network.\n\n\tExample:\n\t$ ponzu new myProject\n\t> New ponzu project created at $GOPATH\/src\/myProject\n\n\n\ngenerate, gen, g <type>:\n\n\tGenerate a content type file with boilerplate code to implement\n\tthe editor.Editable interface. Must be given one (1) parameter of\n\tthe name of the type for the new content.\n\n\tExample:\n\t$ ponzu gen review\n\n\n\n[[--port=8080] [--tls]] run <service(,service)>:\n\n\tStarts the 'ponzu' HTTP server for the JSON API, Admin System, or both.\n\tThe segments, separated by a comma, describe which services to start, either \n\t'admin' (Admin System \/ CMS backend) or 'api' (JSON API), and, optionally, \n\tif the server(s) should utilize TLS encryption (served over HTTPS), which is\n\tautomatically managed using Let's Encrypt (https:\/\/letsencrypt.org) \n\n\tExample: \n\t$ ponzu --port=8080 --tls run admin,api\n\t(or) \n\t$ ponzu run admin\n\t(or)\n\t$ ponzu --port=8888 run api\n\n\tDefaults to '--port=8080 run admin,api' (running Admin & API on port 8080, without TLS)\n\n\tNote: \n\tAdmin and API cannot run on separate processes unless you use a copy of the\n\tdatabase, since the first process to open it recieves a lock. If you intend\n\tto run the Admin and API on separate processes, you must call them with the\n\t'ponzu' command independently.\n\n`\n\nvar (\n\tport int\n\ttls bool\n\n\t\/\/ for ponzu internal \/ core development\n\tdev bool\n)\n\nfunc init() {\n\tflag.Usage = func() {\n\t\tfmt.Println(usage)\n\t}\n}\n\nfunc main() {\n\tflag.IntVar(&port, \"port\", 8080, \"port for ponzu to bind its listener\")\n\tflag.BoolVar(&tls, \"tls\", false, \"enable automatic TLS\/SSL certificate management\")\n\tflag.BoolVar(&dev, \"dev\", false, \"modify environment for Ponzu core development\")\n\tflag.Parse()\n\n\targs := flag.Args()\n\n\tif len(args) < 1 {\n\t\tflag.Usage()\n\t\tos.Exit(0)\n\t}\n\n\tswitch args[0] {\n\tcase \"new\":\n\t\tif len(args) < 2 {\n\t\t\tflag.PrintDefaults()\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\terr := newProjectInDir(args[1])\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\tcase \"generate\", \"gen\", \"g\":\n\t\tif len(args) < 2 {\n\t\t\tflag.PrintDefaults()\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\terr := generateContentType(args[1], \"\")\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\tcase \"build\":\n\t\terr := buildPonzuServer(args)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\tcase \"run\":\n\t\tvar addTLS string\n\t\tif tls {\n\t\t\taddTLS = \"--tls\"\n\t\t} else {\n\t\t\taddTLS = \"--tls=false\"\n\t\t}\n\n\t\tvar services string\n\t\tif len(args) > 1 {\n\t\t\tservices = args[1]\n\t\t} else {\n\t\t\tservices = \"admin,api\"\n\t\t}\n\n\t\tserve := exec.Command(\".\/ponzu-server\",\n\t\t\tfmt.Sprintf(\"--port=%d\", port),\n\t\t\taddTLS,\n\t\t\t\"serve\",\n\t\t\tservices,\n\t\t)\n\t\tserve.Stderr = os.Stderr\n\t\tserve.Stdout = os.Stdout\n\n\t\terr := serve.Start()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\terr = serve.Wait()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\tcase \"serve\", \"s\":\n\t\tdb.Init()\n\t\tif len(args) > 1 {\n\t\t\tservices := strings.Split(args[1], \",\")\n\n\t\t\tfor i := range services {\n\t\t\t\tif services[i] == \"api\" {\n\t\t\t\t\tapi.Run()\n\t\t\t\t} else if services[i] == \"admin\" {\n\t\t\t\t\tadmin.Run()\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"To execute 'ponzu serve', you must specify which service to run.\")\n\t\t\t\t\tfmt.Println(\"$ ponzu --help\")\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif tls {\n\t\t\tfmt.Println(\"TLS through Let's Encrypt is not implemented yet.\")\n\t\t\tfmt.Println(\"Please run 'ponzu serve' without the --tls flag for now.\")\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", port), nil))\n\tcase \"\":\n\t\tflag.PrintDefaults()\n\tdefault:\n\t\tflag.PrintDefaults()\n\t}\n}\n<commit_msg>fixing cli usage to match new order of args<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/bosssauce\/ponzu\/system\/admin\"\n\t\"github.com\/bosssauce\/ponzu\/system\/api\"\n\t\"github.com\/bosssauce\/ponzu\/system\/db\"\n)\n\nvar usage = `\n$ ponzu [specifiers] option <params>\n\nOptions \n\nnew <directory>:\n\n\tCreates a 'ponzu' directorty, or one by the name supplied as a parameter \n\timmediately following the 'new' option in the $GOPATH\/src directory. Note: \n\t'new' depends on the program 'git' and possibly a network connection. If \n\tthere is no local repository to clone from at the local machine's $GOPATH, \n\t'new' will attempt to clone the 'github.com\/bosssauce\/ponzu' package from \n\tover the network.\n\n\tExample:\n\t$ ponzu new myProject\n\t> New ponzu project created at $GOPATH\/src\/myProject\n\n\n\ngenerate, gen, g <type>:\n\n\tGenerate a content type file with boilerplate code to implement\n\tthe editor.Editable interface. Must be given one (1) parameter of\n\tthe name of the type for the new content.\n\n\tExample:\n\t$ ponzu gen review\n\n\n\n[[--port=8080] [--tls]] run <service(,service)>:\n\n\tStarts the 'ponzu' HTTP server for the JSON API, Admin System, or both.\n\tThe segments, separated by a comma, describe which services to start, either \n\t'admin' (Admin System \/ CMS backend) or 'api' (JSON API), and, optionally, \n\tif the server(s) should utilize TLS encryption (served over HTTPS), which is\n\tautomatically managed using Let's Encrypt (https:\/\/letsencrypt.org) \n\n\tExample: \n\t$ ponzu --port=8080 --tls run admin,api\n\t(or) \n\t$ ponzu run admin\n\t(or)\n\t$ ponzu --port=8888 run api\n\n\tDefaults to '--port=8080 run admin,api' (running Admin & API on port 8080, without TLS)\n\n\tNote: \n\tAdmin and API cannot run on separate processes unless you use a copy of the\n\tdatabase, since the first process to open it recieves a lock. If you intend\n\tto run the Admin and API on separate processes, you must call them with the\n\t'ponzu' command independently.\n\n`\n\nvar (\n\tport int\n\ttls bool\n\n\t\/\/ for ponzu internal \/ core development\n\tdev bool\n)\n\nfunc init() {\n\tflag.Usage = func() {\n\t\tfmt.Println(usage)\n\t}\n}\n\nfunc main() {\n\tflag.IntVar(&port, \"port\", 8080, \"port for ponzu to bind its listener\")\n\tflag.BoolVar(&tls, \"tls\", false, \"enable automatic TLS\/SSL certificate management\")\n\tflag.BoolVar(&dev, \"dev\", false, \"modify environment for Ponzu core development\")\n\tflag.Parse()\n\n\targs := flag.Args()\n\n\tif len(args) < 1 {\n\t\tflag.Usage()\n\t\tos.Exit(0)\n\t}\n\n\tswitch args[0] {\n\tcase \"new\":\n\t\tif len(args) < 2 {\n\t\t\tflag.PrintDefaults()\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\terr := newProjectInDir(args[1])\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\tcase \"generate\", \"gen\", \"g\":\n\t\tif len(args) < 2 {\n\t\t\tflag.PrintDefaults()\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\terr := generateContentType(args[1], \"\")\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\tcase \"build\":\n\t\terr := buildPonzuServer(args)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\tcase \"run\":\n\t\tvar addTLS string\n\t\tif tls {\n\t\t\taddTLS = \"--tls\"\n\t\t} else {\n\t\t\taddTLS = \"--tls=false\"\n\t\t}\n\n\t\tvar services string\n\t\tif len(args) > 1 {\n\t\t\tservices = args[1]\n\t\t} else {\n\t\t\tservices = \"admin,api\"\n\t\t}\n\n\t\tserve := exec.Command(\".\/ponzu-server\",\n\t\t\tfmt.Sprintf(\"--port=%d\", port),\n\t\t\taddTLS,\n\t\t\t\"serve\",\n\t\t\tservices,\n\t\t)\n\t\tserve.Stderr = os.Stderr\n\t\tserve.Stdout = os.Stdout\n\n\t\terr := serve.Start()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\terr = serve.Wait()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\tcase \"serve\", \"s\":\n\t\tdb.Init()\n\t\tif len(args) > 1 {\n\t\t\tservices := strings.Split(args[1], \",\")\n\n\t\t\tfor i := range services {\n\t\t\t\tif services[i] == \"api\" {\n\t\t\t\t\tapi.Run()\n\t\t\t\t} else if services[i] == \"admin\" {\n\t\t\t\t\tadmin.Run()\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"To execute 'ponzu serve', you must specify which service to run.\")\n\t\t\t\t\tfmt.Println(\"$ ponzu --help\")\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif tls {\n\t\t\tfmt.Println(\"TLS through Let's Encrypt is not implemented yet.\")\n\t\t\tfmt.Println(\"Please run 'ponzu serve' without the --tls flag for now.\")\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", port), nil))\n\tcase \"\":\n\t\tflag.PrintDefaults()\n\tdefault:\n\t\tflag.PrintDefaults()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>\n\/\/ See LICENSE for licensing information\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n\t\"mvdan.cc\/sh\/v3\/fileutil\"\n\t\"mvdan.cc\/sh\/v3\/syntax\"\n)\n\nvar (\n\tshowVersion = flag.Bool(\"version\", false, \"\")\n\n\tlist = flag.Bool(\"l\", false, \"\")\n\twrite = flag.Bool(\"w\", false, \"\")\n\tsimple = flag.Bool(\"s\", false, \"\")\n\tfind = flag.Bool(\"f\", false, \"\")\n\tdiff = flag.Bool(\"d\", false, \"\")\n\n\tlangStr = flag.String(\"ln\", \"\", \"\")\n\tposix = flag.Bool(\"p\", false, \"\")\n\n\tindent = flag.Uint(\"i\", 0, \"\")\n\tbinNext = flag.Bool(\"bn\", false, \"\")\n\tcaseIndent = flag.Bool(\"ci\", false, \"\")\n\tspaceRedirs = flag.Bool(\"sr\", false, \"\")\n\tkeepPadding = flag.Bool(\"kp\", false, \"\")\n\tminify = flag.Bool(\"mn\", false, \"\")\n\n\ttoJSON = flag.Bool(\"tojson\", false, \"\")\n\n\tparser *syntax.Parser\n\tprinter *syntax.Printer\n\treadBuf, writeBuf bytes.Buffer\n\n\tcopyBuf = make([]byte, 32*1024)\n\n\tin io.Reader = os.Stdin\n\tout io.Writer = os.Stdout\n\n\tcolor bool\n\tansiFgRed = \"\\u001b[31m\"\n\tansiFgGreen = \"\\u001b[32m\"\n\tansiReset = \"\\u001b[0m\"\n\n\tversion = \"v3.0.0-alpha1\"\n)\n\nfunc init() {\n\tif term, ok := os.LookupEnv(\"TERM\"); ok && term == \"dumb\" {\n\t\tcolor = false\n\t\treturn\n\t}\n\tif f, ok := out.(*os.File); ok && terminal.IsTerminal(int(f.Fd())) {\n\t\tcolor = true\n\t\treturn\n\t}\n\tcolor = false\n}\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprint(os.Stderr, `usage: shfmt [flags] [path ...]\n\nIf no arguments are given, standard input will be used. If a given path\nis a directory, it will be recursively searched for shell files - both\nby filename extension and by shebang.\n\n -version show version and exit\n\n -l list files whose formatting differs from shfmt's\n -w write result to file instead of stdout\n -d error with a diff when the formatting differs\n -s simplify the code\n\nParser options:\n\n -ln str language variant to parse (bash\/posix\/mksh, default \"bash\")\n -p shorthand for -ln=posix\n\nPrinter options:\n\n -i uint indent: 0 for tabs (default), >0 for number of spaces\n -bn binary ops like && and | may start a line\n -ci switch cases will be indented\n -sr redirect operators will be followed by a space\n -kp keep column alignment paddings\n -mn minify program to reduce its size (implies -s)\n\nUtilities:\n\n -f recursively find all shell files and print the paths\n -tojson print syntax tree to stdout as a typed JSON\n`)\n\t}\n\tflag.Parse()\n\n\tif *showVersion {\n\t\tfmt.Println(version)\n\t\treturn\n\t}\n\tif *posix && *langStr != \"\" {\n\t\tfmt.Fprintf(os.Stderr, \"-p and -ln=lang cannot coexist\\n\")\n\t\tos.Exit(1)\n\t}\n\tlang := syntax.LangBash\n\tswitch *langStr {\n\tcase \"bash\", \"\":\n\tcase \"posix\":\n\t\tlang = syntax.LangPOSIX\n\tcase \"mksh\":\n\t\tlang = syntax.LangMirBSDKorn\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"unknown shell language: %s\\n\", *langStr)\n\t\tos.Exit(1)\n\t}\n\tif *posix {\n\t\tlang = syntax.LangPOSIX\n\t}\n\tif *minify {\n\t\t*simple = true\n\t}\n\tparser = syntax.NewParser(syntax.KeepComments, syntax.Variant(lang))\n\tprinter = syntax.NewPrinter(func(p *syntax.Printer) {\n\t\tsyntax.Indent(*indent)(p)\n\t\tif *binNext {\n\t\t\tsyntax.BinaryNextLine(p)\n\t\t}\n\t\tif *caseIndent {\n\t\t\tsyntax.SwitchCaseIndent(p)\n\t\t}\n\t\tif *spaceRedirs {\n\t\t\tsyntax.SpaceRedirects(p)\n\t\t}\n\t\tif *keepPadding {\n\t\t\tsyntax.KeepPadding(p)\n\t\t}\n\t\tif *minify {\n\t\t\tsyntax.Minify(p)\n\t\t}\n\t})\n\tif flag.NArg() == 0 {\n\t\tif err := formatStdin(); err != nil {\n\t\t\tif err != errChangedWithDiff {\n\t\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\t}\n\t\t\tos.Exit(1)\n\t\t}\n\t\treturn\n\t}\n\tif *toJSON {\n\t\tfmt.Fprintln(os.Stderr, \"-tojson can only be used with stdin\/out\")\n\t\tos.Exit(1)\n\t}\n\tanyErr := false\n\tfor _, path := range flag.Args() {\n\t\twalk(path, func(err error) {\n\t\t\tif err != errChangedWithDiff {\n\t\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\t}\n\t\t\tanyErr = true\n\t\t})\n\t}\n\tif anyErr {\n\t\tos.Exit(1)\n\t}\n}\n\nvar errChangedWithDiff = fmt.Errorf(\"\")\n\nfunc formatStdin() error {\n\tif *write {\n\t\treturn fmt.Errorf(\"-w cannot be used on standard input\")\n\t}\n\tsrc, err := ioutil.ReadAll(in)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn formatBytes(src, \"<standard input>\")\n}\n\nvar vcsDir = regexp.MustCompile(`^\\.(git|svn|hg)$`)\n\nfunc walk(path string, onError func(error)) {\n\tinfo, err := os.Stat(path)\n\tif err != nil {\n\t\tonError(err)\n\t\treturn\n\t}\n\tif !info.IsDir() {\n\t\tif err := formatPath(path, false); err != nil {\n\t\t\tonError(err)\n\t\t}\n\t\treturn\n\t}\n\tfilepath.Walk(path, func(path string, info os.FileInfo, err error) error {\n\t\tif info.IsDir() && vcsDir.MatchString(info.Name()) {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\tif err != nil {\n\t\t\tonError(err)\n\t\t\treturn nil\n\t\t}\n\t\tconf := fileutil.CouldBeScript(info)\n\t\tif conf == fileutil.ConfNotScript {\n\t\t\treturn nil\n\t\t}\n\t\terr = formatPath(path, conf == fileutil.ConfIfShebang)\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\tonError(err)\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc formatPath(path string, checkShebang bool) error {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\treadBuf.Reset()\n\tif checkShebang {\n\t\tn, err := f.Read(copyBuf[:32])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !fileutil.HasShebang(copyBuf[:n]) {\n\t\t\treturn nil\n\t\t}\n\t\treadBuf.Write(copyBuf[:n])\n\t}\n\tif *find {\n\t\tfmt.Fprintln(out, path)\n\t\treturn nil\n\t}\n\tif _, err := io.CopyBuffer(&readBuf, f, copyBuf); err != nil {\n\t\treturn err\n\t}\n\tf.Close()\n\treturn formatBytes(readBuf.Bytes(), path)\n}\n\nfunc formatBytes(src []byte, path string) error {\n\tprog, err := parser.Parse(bytes.NewReader(src), path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif *simple {\n\t\tsyntax.Simplify(prog)\n\t}\n\tif *toJSON {\n\t\t\/\/ must be standard input; fine to return\n\t\treturn writeJSON(out, prog, true)\n\t}\n\twriteBuf.Reset()\n\tprinter.Print(&writeBuf, prog)\n\tres := writeBuf.Bytes()\n\tif !bytes.Equal(src, res) {\n\t\tif *list {\n\t\t\tif _, err := fmt.Fprintln(out, path); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif *write {\n\t\t\tf, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := f.Write(res); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := f.Close(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif *diff {\n\t\t\tdata, err := diffBytes(src, res, path)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"computing diff: %s\", err)\n\t\t\t}\n\t\t\tout.Write(data)\n\t\t\treturn errChangedWithDiff\n\t\t}\n\t}\n\tif !*list && !*write && !*diff {\n\t\tif _, err := out.Write(res); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc writeTempFile(dir, prefix string, data []byte) (string, error) {\n\tfile, err := ioutil.TempFile(dir, prefix)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t_, err = file.Write(data)\n\tif err1 := file.Close(); err == nil {\n\t\terr = err1\n\t}\n\tif err != nil {\n\t\tos.Remove(file.Name())\n\t\treturn \"\", err\n\t}\n\treturn file.Name(), nil\n}\n\nfunc diffBytes(b1, b2 []byte, path string) ([]byte, error) {\n\tfmt.Fprintf(out, \"diff -u %s %s\\n\",\n\t\tfilepath.ToSlash(path+\".orig\"),\n\t\tfilepath.ToSlash(path))\n\tf1, err := writeTempFile(\"\", \"shfmt\", b1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer os.Remove(f1)\n\n\tf2, err := writeTempFile(\"\", \"shfmt\", b2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer os.Remove(f2)\n\n\tdata, err := exec.Command(\"diff\", \"-u\", f1, f2).Output()\n\tif len(data) == 0 {\n\t\t\/\/ No diff, or something went wrong; don't check for err\n\t\t\/\/ as diff will return non-zero if the files differ.\n\t\treturn nil, err\n\t}\n\n\toutput := new(bytes.Buffer)\n\t\/\/ We already print the filename, so remove the\n\t\/\/ temporary filenames printed by diff.\n\tlines := bytes.Split(data, []byte(\"\\n\"))\n\tcount := len(lines)\n\tfor i, line := range lines {\n\t\tswitch {\n\t\tcase bytes.HasPrefix(line, []byte(\"---\")):\n\t\tcase bytes.HasPrefix(line, []byte(\"+++\")):\n\t\tcase bytes.HasPrefix(line, []byte(\"-\")):\n\t\t\tif color {\n\t\t\t\tfmt.Fprintf(output, \"%s%s%s\\n\", ansiFgRed, string(line), ansiReset)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintln(output, string(line))\n\t\t\t}\n\t\tcase bytes.HasPrefix(line, []byte(\"+\")):\n\t\t\tif color {\n\t\t\t\tfmt.Fprintf(output, \"%s%s%s\\n\", ansiFgGreen, string(line), ansiReset)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintln(output, string(line))\n\t\t\t}\n\t\tdefault:\n\t\t\tif i < count-1 {\n\t\t\t\tfmt.Fprintln(output, string(line))\n\t\t\t} else {\n\t\t\t\tfmt.Fprint(output, string(line))\n\t\t\t}\n\t\t}\n\t}\n\treturn output.Bytes(), nil\n}\n<commit_msg>cmd\/shfmt: fix the tests on a terminal<commit_after>\/\/ Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>\n\/\/ See LICENSE for licensing information\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n\t\"mvdan.cc\/sh\/v3\/fileutil\"\n\t\"mvdan.cc\/sh\/v3\/syntax\"\n)\n\nvar (\n\tshowVersion = flag.Bool(\"version\", false, \"\")\n\n\tlist = flag.Bool(\"l\", false, \"\")\n\twrite = flag.Bool(\"w\", false, \"\")\n\tsimple = flag.Bool(\"s\", false, \"\")\n\tfind = flag.Bool(\"f\", false, \"\")\n\tdiff = flag.Bool(\"d\", false, \"\")\n\n\tlangStr = flag.String(\"ln\", \"\", \"\")\n\tposix = flag.Bool(\"p\", false, \"\")\n\n\tindent = flag.Uint(\"i\", 0, \"\")\n\tbinNext = flag.Bool(\"bn\", false, \"\")\n\tcaseIndent = flag.Bool(\"ci\", false, \"\")\n\tspaceRedirs = flag.Bool(\"sr\", false, \"\")\n\tkeepPadding = flag.Bool(\"kp\", false, \"\")\n\tminify = flag.Bool(\"mn\", false, \"\")\n\n\ttoJSON = flag.Bool(\"tojson\", false, \"\")\n\n\tparser *syntax.Parser\n\tprinter *syntax.Printer\n\treadBuf, writeBuf bytes.Buffer\n\n\tcopyBuf = make([]byte, 32*1024)\n\n\tin io.Reader = os.Stdin\n\tout io.Writer = os.Stdout\n\n\tcolor bool\n\tansiFgRed = \"\\u001b[31m\"\n\tansiFgGreen = \"\\u001b[32m\"\n\tansiReset = \"\\u001b[0m\"\n\n\tversion = \"v3.0.0-alpha1\"\n)\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprint(os.Stderr, `usage: shfmt [flags] [path ...]\n\nIf no arguments are given, standard input will be used. If a given path\nis a directory, it will be recursively searched for shell files - both\nby filename extension and by shebang.\n\n -version show version and exit\n\n -l list files whose formatting differs from shfmt's\n -w write result to file instead of stdout\n -d error with a diff when the formatting differs\n -s simplify the code\n\nParser options:\n\n -ln str language variant to parse (bash\/posix\/mksh, default \"bash\")\n -p shorthand for -ln=posix\n\nPrinter options:\n\n -i uint indent: 0 for tabs (default), >0 for number of spaces\n -bn binary ops like && and | may start a line\n -ci switch cases will be indented\n -sr redirect operators will be followed by a space\n -kp keep column alignment paddings\n -mn minify program to reduce its size (implies -s)\n\nUtilities:\n\n -f recursively find all shell files and print the paths\n -tojson print syntax tree to stdout as a typed JSON\n`)\n\t}\n\tflag.Parse()\n\n\tif *showVersion {\n\t\tfmt.Println(version)\n\t\treturn\n\t}\n\tif *posix && *langStr != \"\" {\n\t\tfmt.Fprintf(os.Stderr, \"-p and -ln=lang cannot coexist\\n\")\n\t\tos.Exit(1)\n\t}\n\tlang := syntax.LangBash\n\tswitch *langStr {\n\tcase \"bash\", \"\":\n\tcase \"posix\":\n\t\tlang = syntax.LangPOSIX\n\tcase \"mksh\":\n\t\tlang = syntax.LangMirBSDKorn\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"unknown shell language: %s\\n\", *langStr)\n\t\tos.Exit(1)\n\t}\n\tif *posix {\n\t\tlang = syntax.LangPOSIX\n\t}\n\tif *minify {\n\t\t*simple = true\n\t}\n\tparser = syntax.NewParser(syntax.KeepComments, syntax.Variant(lang))\n\tprinter = syntax.NewPrinter(func(p *syntax.Printer) {\n\t\tsyntax.Indent(*indent)(p)\n\t\tif *binNext {\n\t\t\tsyntax.BinaryNextLine(p)\n\t\t}\n\t\tif *caseIndent {\n\t\t\tsyntax.SwitchCaseIndent(p)\n\t\t}\n\t\tif *spaceRedirs {\n\t\t\tsyntax.SpaceRedirects(p)\n\t\t}\n\t\tif *keepPadding {\n\t\t\tsyntax.KeepPadding(p)\n\t\t}\n\t\tif *minify {\n\t\t\tsyntax.Minify(p)\n\t\t}\n\t})\n\tif f, ok := out.(*os.File); ok && terminal.IsTerminal(int(f.Fd())) &&\n\t\tos.Getenv(\"TERM\") != \"dumb\" {\n\t\tcolor = true\n\t}\n\tif flag.NArg() == 0 {\n\t\tif err := formatStdin(); err != nil {\n\t\t\tif err != errChangedWithDiff {\n\t\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\t}\n\t\t\tos.Exit(1)\n\t\t}\n\t\treturn\n\t}\n\tif *toJSON {\n\t\tfmt.Fprintln(os.Stderr, \"-tojson can only be used with stdin\/out\")\n\t\tos.Exit(1)\n\t}\n\tanyErr := false\n\tfor _, path := range flag.Args() {\n\t\twalk(path, func(err error) {\n\t\t\tif err != errChangedWithDiff {\n\t\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\t}\n\t\t\tanyErr = true\n\t\t})\n\t}\n\tif anyErr {\n\t\tos.Exit(1)\n\t}\n}\n\nvar errChangedWithDiff = fmt.Errorf(\"\")\n\nfunc formatStdin() error {\n\tif *write {\n\t\treturn fmt.Errorf(\"-w cannot be used on standard input\")\n\t}\n\tsrc, err := ioutil.ReadAll(in)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn formatBytes(src, \"<standard input>\")\n}\n\nvar vcsDir = regexp.MustCompile(`^\\.(git|svn|hg)$`)\n\nfunc walk(path string, onError func(error)) {\n\tinfo, err := os.Stat(path)\n\tif err != nil {\n\t\tonError(err)\n\t\treturn\n\t}\n\tif !info.IsDir() {\n\t\tif err := formatPath(path, false); err != nil {\n\t\t\tonError(err)\n\t\t}\n\t\treturn\n\t}\n\tfilepath.Walk(path, func(path string, info os.FileInfo, err error) error {\n\t\tif info.IsDir() && vcsDir.MatchString(info.Name()) {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\tif err != nil {\n\t\t\tonError(err)\n\t\t\treturn nil\n\t\t}\n\t\tconf := fileutil.CouldBeScript(info)\n\t\tif conf == fileutil.ConfNotScript {\n\t\t\treturn nil\n\t\t}\n\t\terr = formatPath(path, conf == fileutil.ConfIfShebang)\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\tonError(err)\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc formatPath(path string, checkShebang bool) error {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\treadBuf.Reset()\n\tif checkShebang {\n\t\tn, err := f.Read(copyBuf[:32])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !fileutil.HasShebang(copyBuf[:n]) {\n\t\t\treturn nil\n\t\t}\n\t\treadBuf.Write(copyBuf[:n])\n\t}\n\tif *find {\n\t\tfmt.Fprintln(out, path)\n\t\treturn nil\n\t}\n\tif _, err := io.CopyBuffer(&readBuf, f, copyBuf); err != nil {\n\t\treturn err\n\t}\n\tf.Close()\n\treturn formatBytes(readBuf.Bytes(), path)\n}\n\nfunc formatBytes(src []byte, path string) error {\n\tprog, err := parser.Parse(bytes.NewReader(src), path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif *simple {\n\t\tsyntax.Simplify(prog)\n\t}\n\tif *toJSON {\n\t\t\/\/ must be standard input; fine to return\n\t\treturn writeJSON(out, prog, true)\n\t}\n\twriteBuf.Reset()\n\tprinter.Print(&writeBuf, prog)\n\tres := writeBuf.Bytes()\n\tif !bytes.Equal(src, res) {\n\t\tif *list {\n\t\t\tif _, err := fmt.Fprintln(out, path); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif *write {\n\t\t\tf, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := f.Write(res); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := f.Close(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif *diff {\n\t\t\tdata, err := diffBytes(src, res, path)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"computing diff: %s\", err)\n\t\t\t}\n\t\t\tout.Write(data)\n\t\t\treturn errChangedWithDiff\n\t\t}\n\t}\n\tif !*list && !*write && !*diff {\n\t\tif _, err := out.Write(res); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc writeTempFile(dir, prefix string, data []byte) (string, error) {\n\tfile, err := ioutil.TempFile(dir, prefix)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t_, err = file.Write(data)\n\tif err1 := file.Close(); err == nil {\n\t\terr = err1\n\t}\n\tif err != nil {\n\t\tos.Remove(file.Name())\n\t\treturn \"\", err\n\t}\n\treturn file.Name(), nil\n}\n\nfunc diffBytes(b1, b2 []byte, path string) ([]byte, error) {\n\tfmt.Fprintf(out, \"diff -u %s %s\\n\",\n\t\tfilepath.ToSlash(path+\".orig\"),\n\t\tfilepath.ToSlash(path))\n\tf1, err := writeTempFile(\"\", \"shfmt\", b1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer os.Remove(f1)\n\n\tf2, err := writeTempFile(\"\", \"shfmt\", b2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer os.Remove(f2)\n\n\tdata, err := exec.Command(\"diff\", \"-u\", f1, f2).Output()\n\tif len(data) == 0 {\n\t\t\/\/ No diff, or something went wrong; don't check for err\n\t\t\/\/ as diff will return non-zero if the files differ.\n\t\treturn nil, err\n\t}\n\n\toutput := new(bytes.Buffer)\n\t\/\/ We already print the filename, so remove the\n\t\/\/ temporary filenames printed by diff.\n\tlines := bytes.Split(data, []byte(\"\\n\"))\n\tcount := len(lines)\n\tfor i, line := range lines {\n\t\tswitch {\n\t\tcase bytes.HasPrefix(line, []byte(\"---\")):\n\t\tcase bytes.HasPrefix(line, []byte(\"+++\")):\n\t\tcase bytes.HasPrefix(line, []byte(\"-\")):\n\t\t\tif color {\n\t\t\t\tfmt.Fprintf(output, \"%s%s%s\\n\", ansiFgRed, string(line), ansiReset)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintln(output, string(line))\n\t\t\t}\n\t\tcase bytes.HasPrefix(line, []byte(\"+\")):\n\t\t\tif color {\n\t\t\t\tfmt.Fprintf(output, \"%s%s%s\\n\", ansiFgGreen, string(line), ansiReset)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintln(output, string(line))\n\t\t\t}\n\t\tdefault:\n\t\t\tif i < count-1 {\n\t\t\t\tfmt.Fprintln(output, string(line))\n\t\t\t} else {\n\t\t\t\tfmt.Fprint(output, string(line))\n\t\t\t}\n\t\t}\n\t}\n\treturn output.Bytes(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage cmd\n\nimport \"launchpad.net\/gocheck\"\n\nfunc (s *S) TestAddOneRow(c *gocheck.C) {\n\ttable := NewTable()\n\ttable.AddRow(Row{\"Three\", \"foo\"})\n\tc.Assert(table.String(), gocheck.Equals, \"+-------+-----+\\n| Three | foo |\\n+-------+-----+\\n\")\n}\n\nfunc (s *S) TestAddRows(c *gocheck.C) {\n\ttable := NewTable()\n\ttable.AddRow(Row{\"One\", \"1\"})\n\ttable.AddRow(Row{\"Two\", \"2\"})\n\ttable.AddRow(Row{\"Three\", \"3\"})\n\texpected := `+-------+---+\n| One | 1 |\n| Two | 2 |\n| Three | 3 |\n+-------+---+\n`\n\tc.Assert(table.String(), gocheck.Equals, expected)\n}\n\nfunc (s *S) TestColumnsSize(c *gocheck.C) {\n\ttable := NewTable()\n\ttable.AddRow(Row{\"One\", \"1\"})\n\ttable.AddRow(Row{\"Two\", \"2\"})\n\ttable.AddRow(Row{\"Three\", \"3\"})\n\tc.Assert(table.columnsSize(), gocheck.DeepEquals, []int{5, 1})\n}\n\nfunc (s *S) TestSeparator(c *gocheck.C) {\n\ttable := NewTable()\n\ttable.AddRow(Row{\"One\", \"1\"})\n\ttable.AddRow(Row{\"Two\", \"2\"})\n\ttable.AddRow(Row{\"Three\", \"3\"})\n\texpected := \"+-------+---+\\n\"\n\tc.Assert(table.separator(), gocheck.Equals, expected)\n}\n\nfunc (s *S) TestHeadings(c *gocheck.C) {\n\ttable := NewTable()\n\ttable.Headers = Row{\"Word\", \"Number\"}\n\ttable.AddRow(Row{\"One\", \"1\"})\n\ttable.AddRow(Row{\"Two\", \"2\"})\n\ttable.AddRow(Row{\"Three\", \"3\"})\n\texpected := `+-------+--------+\n| Word | Number |\n+-------+--------+\n| One | 1 |\n| Two | 2 |\n| Three | 3 |\n+-------+--------+\n`\n\tc.Assert(table.String(), gocheck.Equals, expected)\n}\n\nfunc (s *S) TestString(c *gocheck.C) {\n\ttable := NewTable()\n\ttable.AddRow(Row{\"One\", \"1\"})\n\ttable.AddRow(Row{\"Two\", \"2\"})\n\ttable.AddRow(Row{\"Three\", \"3\"})\n\texpected := `+-------+---+\n| One | 1 |\n| Two | 2 |\n| Three | 3 |\n+-------+---+\n`\n\tc.Assert(table.String(), gocheck.Equals, expected)\n}\n\nfunc (s *S) TestRenderNoRows(c *gocheck.C) {\n\ttable := NewTable()\n\ttable.Headers = Row{\"Word\", \"Number\"}\n\texpected := `+------+--------+\n| Word | Number |\n+------+--------+\n+------+--------+\n`\n\tc.Assert(table.String(), gocheck.Equals, expected)\n}\n\nfunc (s *S) TestRenderEmpty(c *gocheck.C) {\n\ttable := NewTable()\n\tc.Assert(table.String(), gocheck.Equals, \"\")\n}\n<commit_msg>Revert \"cmd: remove non-sense test\"<commit_after>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage cmd\n\nimport \"launchpad.net\/gocheck\"\n\nfunc (s *S) TestAddOneRow(c *gocheck.C) {\n\ttable := NewTable()\n\ttable.AddRow(Row{\"Three\", \"foo\"})\n\tc.Assert(table.String(), gocheck.Equals, \"+-------+-----+\\n| Three | foo |\\n+-------+-----+\\n\")\n}\n\nfunc (s *S) TestAddRows(c *gocheck.C) {\n\ttable := NewTable()\n\ttable.AddRow(Row{\"One\", \"1\"})\n\ttable.AddRow(Row{\"Two\", \"2\"})\n\ttable.AddRow(Row{\"Three\", \"3\"})\n\texpected := `+-------+---+\n| One | 1 |\n| Two | 2 |\n| Three | 3 |\n+-------+---+\n`\n\tc.Assert(table.String(), gocheck.Equals, expected)\n}\n\nfunc (s *S) TestColumnsSize(c *gocheck.C) {\n\ttable := NewTable()\n\ttable.AddRow(Row{\"One\", \"1\"})\n\ttable.AddRow(Row{\"Two\", \"2\"})\n\ttable.AddRow(Row{\"Three\", \"3\"})\n\tc.Assert(table.columnsSize(), gocheck.DeepEquals, []int{5, 1})\n}\n\nfunc (s *S) TestSeparator(c *gocheck.C) {\n\ttable := NewTable()\n\ttable.AddRow(Row{\"One\", \"1\"})\n\ttable.AddRow(Row{\"Two\", \"2\"})\n\ttable.AddRow(Row{\"Three\", \"3\"})\n\texpected := \"+-------+---+\\n\"\n\tc.Assert(table.separator(), gocheck.Equals, expected)\n}\n\nfunc (s *S) TestHeadings(c *gocheck.C) {\n\ttable := NewTable()\n\ttable.Headers = Row{\"Word\", \"Number\"}\n\ttable.AddRow(Row{\"One\", \"1\"})\n\ttable.AddRow(Row{\"Two\", \"2\"})\n\ttable.AddRow(Row{\"Three\", \"3\"})\n\texpected := `+-------+--------+\n| Word | Number |\n+-------+--------+\n| One | 1 |\n| Two | 2 |\n| Three | 3 |\n+-------+--------+\n`\n\tc.Assert(table.String(), gocheck.Equals, expected)\n}\n\nfunc (s *S) TestString(c *gocheck.C) {\n\ttable := NewTable()\n\ttable.AddRow(Row{\"One\", \"1\"})\n\ttable.AddRow(Row{\"Two\", \"2\"})\n\ttable.AddRow(Row{\"Three\", \"3\"})\n\texpected := `+-------+---+\n| One | 1 |\n| Two | 2 |\n| Three | 3 |\n+-------+---+\n`\n\tc.Assert(table.String(), gocheck.Equals, expected)\n}\n\nfunc (s *S) TestRenderNoRows(c *gocheck.C) {\n\ttable := NewTable()\n\ttable.Headers = Row{\"Word\", \"Number\"}\n\texpected := `+------+--------+\n| Word | Number |\n+------+--------+\n+------+--------+\n`\n\tc.Assert(table.String(), gocheck.Equals, expected)\n}\n\nfunc (s *S) TestRenderEmpty(c *gocheck.C) {\n\ttable := NewTable()\n\tc.Assert(table.String(), gocheck.Equals, \"\")\n}\n\nfunc (s *S) TestBytes(c *gocheck.C) {\n\ttable := NewTable()\n\ttable.AddRow(Row{\"One\", \"1\"})\n\ttable.AddRow(Row{\"Two\", \"2\"})\n\ttable.AddRow(Row{\"Three\", \"3\"})\n\tc.Assert(table.Bytes(), gocheck.DeepEquals, []byte(table.String()))\n}\n<|endoftext|>"} {"text":"<commit_before>package cmdline\n\nimport (\n\t\"os\"\n\t\"strings\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/codegangsta\/cli\"\n\tcolorlog \"github.com\/disorganizer\/brig\/util\/log\"\n)\n\nfunc init() {\n\tlog.SetOutput(os.Stderr)\n\n\t\/\/ Only log the warning severity or above.\n\tlog.SetLevel(log.DebugLevel)\n\n\t\/\/ Log pretty text\n\tlog.SetFormatter(&colorlog.ColorfulLogFormatter{})\n}\n\nfunc formatGroup(category string) string {\n\treturn strings.ToUpper(category) + \" COMMANDS\"\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Commandline definition \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ RunCmdline starts a brig commandline tool.\nfunc RunCmdline() int {\n\n\tapp := cli.NewApp()\n\tapp.Name = \"brig\"\n\tapp.Usage = \"Secure and dezentralized file synchronization\"\n\n\t\/\/groups\n\trepoGroup := formatGroup(\"repository\")\n\tidntGroup := formatGroup(\"id helper\")\n\twdirGroup := formatGroup(\"working\")\n\tadvnGroup := formatGroup(\"advanced\")\n\tmiscGroup := formatGroup(\"misc\")\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"nodaemon,n\",\n\t\t\tUsage: \"Don't run the daemon\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"password, x\",\n\t\t\tUsage: \"Supply user password\",\n\t\t\tValue: \"\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"path\",\n\t\t\tUsage: \"Path of the repository\",\n\t\t\tValue: \".\",\n\t\t\tEnvVar: \"BRIG_PATH\",\n\t\t},\n\t}\n\n\t\/\/ Commands.\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"init\",\n\t\t\tCategory: repoGroup,\n\t\t\tUsage: \"Initialize an empty repository\",\n\t\t\tArgsUsage: \"<brig-id>\",\n\t\t\tDescription: \"Creates a new brig repository folder and unlocks it.\\n The name of the folder is derivated from the given brig-id.\\n brig-id example: yourname@optionaldomain\/ressource\",\n\t\t\tAction: withArgCheck(needAtLeast(1), withExit(handleInit)),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"open\",\n\t\t\tCategory: repoGroup,\n\t\t\tUsage: \"Open an encrypted repository\",\n\t\t\tArgsUsage: \"[--password]\",\n\t\t\tDescription: \"Open a closed (encrypted) brig repository by providing a password\",\n\t\t\tAction: withDaemon(handleOpen, true),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"close\",\n\t\t\tCategory: repoGroup,\n\t\t\tUsage: \"Close an encrypted repository\",\n\t\t\tDescription: \"Encrypt all metadata in the repository and go offline\",\n\t\t\tAction: withDaemon(handleClose, false),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"history\",\n\t\t\tCategory: repoGroup,\n\t\t\tUsage: \"Show the history of the given brig file\",\n\t\t\tAction: withArgCheck(needAtLeast(1), withDaemon(handleHistory, true)),\n\t\t\tDescription: \"history lists all modifications of a given file\",\n\t\t\tArgsUsage: \"<filename>\",\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"net\",\n\t\t\tCategory: repoGroup,\n\t\t\tUsage: \"Query and modify network status\",\n\t\t\tArgsUsage: \"[offline | online | status]\",\n\t\t\tDescription: \"Query and modify the connection state to the ipfs network\",\n\t\t\tSubcommands: []cli.Command{\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"offline\",\n\t\t\t\t\tUsage: \"Disconnect from the outside world. The daemon will continue running\",\n\t\t\t\t\tAction: withDaemon(handleOffline, true),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"online\",\n\t\t\t\t\tUsage: \"Connect the daemon to the outside world\",\n\t\t\t\t\tAction: withDaemon(handleOnline, true),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"status\",\n\t\t\t\t\tUsage: \"Check if the daemon is online\",\n\t\t\t\t\tAction: withDaemon(handleIsOnline, true),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\tcli.Command{\n\t\t\tName: \"remote\",\n\t\t\tCategory: idntGroup,\n\t\t\tUsage: \"Manage remotes\",\n\t\t\tAction: withDaemon(handleRemoteList, true),\n\t\t\tSubcommands: []cli.Command{\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"add\",\n\t\t\t\t\tUsage: \"Add a specific remote\",\n\t\t\t\t\tAction: withArgCheck(needAtLeast(2), withDaemon(handleRemoteAdd, true)),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"remove\",\n\t\t\t\t\tUsage: \"Remove a specifc remote\",\n\t\t\t\t\tAction: withArgCheck(needAtLeast(1), withDaemon(handleRemoteRemove, true)),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"list\",\n\t\t\t\t\tUsage: \"List remote status\",\n\t\t\t\t\tAction: withDaemon(handleRemoteList, true),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"locate\",\n\t\t\t\t\tUsage: \"Search a specific remote\",\n\t\t\t\t\tAction: withArgCheck(needAtLeast(1), withDaemon(handleRemoteLocate, true)),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"self\",\n\t\t\t\t\tUsage: \"Print own identity hash\",\n\t\t\t\t\tAction: withDaemon(handleRemoteSelf, true),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"tree\",\n\t\t\tUsage: \"List files in a tree\",\n\t\t\tCategory: wdirGroup,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"depth, d\",\n\t\t\t\t\tUsage: \"Max depth to traverse\",\n\t\t\t\t\tValue: -1,\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: withDaemon(handleTree, true),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"ls\",\n\t\t\tUsage: \"List files\",\n\t\t\tCategory: wdirGroup,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"depth, d\",\n\t\t\t\t\tUsage: \"Max depth to traverse\",\n\t\t\t\t\tValue: -1,\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"recursive,r\",\n\t\t\t\t\tUsage: \"Allow recursive traverse\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: withDaemon(handleList, true),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"mkdir\",\n\t\t\tCategory: wdirGroup,\n\t\t\tUsage: \"Create an empty directory\",\n\t\t\tAction: withArgCheck(needAtLeast(1), withDaemon(handleMkdir, true)),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"add\",\n\t\t\tCategory: wdirGroup,\n\t\t\tUsage: \"Transer file into brig's control\",\n\t\t\tAction: withArgCheck(needAtLeast(1), withDaemon(handleAdd, true)),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"rm\",\n\t\t\tCategory: wdirGroup,\n\t\t\tUsage: \"Remove the file and optionally old versions of it\",\n\t\t\tAction: withArgCheck(needAtLeast(1), withDaemon(handleRm, true)),\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"recursive,r\",\n\t\t\t\t\tUsage: \"Remove directories recursively\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"mv\",\n\t\t\tCategory: wdirGroup,\n\t\t\tUsage: \"Move a file from SOURCE to DEST\",\n\t\t\tAction: withArgCheck(needAtLeast(2), withDaemon(handleMv, true)),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"cat\",\n\t\t\tCategory: wdirGroup,\n\t\t\tUsage: \"Write \",\n\t\t\tAction: withArgCheck(needAtLeast(1), withDaemon(handleCat, true)),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"daemon\",\n\t\t\tCategory: advnGroup,\n\t\t\tUsage: \"Manually run the daemon process\",\n\t\t\t\/\/ Flags\n\t\t\tAction: withExit(handleDaemon),\n\t\t\tSubcommands: []cli.Command{\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"quit\",\n\t\t\t\t\tCategory: advnGroup,\n\t\t\t\t\tUsage: \"Manually kill the daemon process\",\n\t\t\t\t\tAction: withDaemon(handleDaemonQuit, false),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"ping\",\n\t\t\t\t\tCategory: advnGroup,\n\t\t\t\t\tUsage: \"See if the daemon responds in a timely fashion\",\n\t\t\t\t\tAction: withDaemon(handleDaemonPing, false),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"wait\",\n\t\t\t\t\tCategory: advnGroup,\n\t\t\t\t\tUsage: \"Block until the daemon is available\",\n\t\t\t\t\tAction: withExit(handleDaemonWait),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"config\",\n\t\t\tCategory: miscGroup,\n\t\t\tUsage: \"Access, list and modify configuration values\",\n\t\t\tSubcommands: []cli.Command{\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"list\",\n\t\t\t\t\tUsage: \"Show current config values\",\n\t\t\t\t\tAction: withExit(withConfig(handleConfigList)),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"get\",\n\t\t\t\t\tUsage: \"Connect the daemon to the outside world\",\n\t\t\t\t\tAction: withExit(withConfig(handleConfigGet)),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"set\",\n\t\t\t\t\tUsage: \"Check if the daemon is online\",\n\t\t\t\t\tAction: withExit(withConfig(handleConfigSet)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"mount\",\n\t\t\tCategory: miscGroup,\n\t\t\tUsage: \"Handle FUSE mountpoints\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"unmount,u\",\n\t\t\t\t\tUsage: \"Unmount the specified directory\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: withArgCheck(needAtLeast(1), withDaemon(handleMount, true)),\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n\treturn 0\n}\n<commit_msg>cmdline\/parser.go: Remaining commands cleanup.<commit_after>package cmdline\n\nimport (\n\t\"os\"\n\t\"strings\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/codegangsta\/cli\"\n\tcolorlog \"github.com\/disorganizer\/brig\/util\/log\"\n)\n\nfunc init() {\n\tlog.SetOutput(os.Stderr)\n\n\t\/\/ Only log the warning severity or above.\n\tlog.SetLevel(log.DebugLevel)\n\n\t\/\/ Log pretty text\n\tlog.SetFormatter(&colorlog.ColorfulLogFormatter{})\n}\n\nfunc formatGroup(category string) string {\n\treturn strings.ToUpper(category) + \" COMMANDS\"\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Commandline definition \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ RunCmdline starts a brig commandline tool.\nfunc RunCmdline() int {\n\n\tapp := cli.NewApp()\n\tapp.Name = \"brig\"\n\tapp.Usage = \"Secure and dezentralized file synchronization\"\n\n\t\/\/groups\n\trepoGroup := formatGroup(\"repository\")\n\twdirGroup := formatGroup(\"working\")\n\tadvnGroup := formatGroup(\"advanced\")\n\tmiscGroup := formatGroup(\"misc\")\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"nodaemon,n\",\n\t\t\tUsage: \"Don't run the daemon\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"password, x\",\n\t\t\tUsage: \"Supply user password\",\n\t\t\tValue: \"\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"path\",\n\t\t\tUsage: \"Path of the repository\",\n\t\t\tValue: \".\",\n\t\t\tEnvVar: \"BRIG_PATH\",\n\t\t},\n\t}\n\n\t\/\/ Commands.\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"init\",\n\t\t\tCategory: repoGroup,\n\t\t\tUsage: \"Initialize an empty repository\",\n\t\t\tArgsUsage: \"<brig-id>\",\n\t\t\tDescription: \"Creates a new brig repository folder and unlocks it.\\n The name of the folder is derivated from the given brig-id.\\n brig-id example: yourname@optionaldomain\/ressource\",\n\t\t\tAction: withArgCheck(needAtLeast(1), withExit(handleInit)),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"open\",\n\t\t\tCategory: repoGroup,\n\t\t\tUsage: \"Open an encrypted repository\",\n\t\t\tArgsUsage: \"[--password]\",\n\t\t\tDescription: \"Open a closed (encrypted) brig repository by providing a password\",\n\t\t\tAction: withDaemon(handleOpen, true),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"close\",\n\t\t\tCategory: repoGroup,\n\t\t\tUsage: \"Close an encrypted repository\",\n\t\t\tDescription: \"Encrypt all metadata in the repository and go offline\",\n\t\t\tAction: withDaemon(handleClose, false),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"history\",\n\t\t\tCategory: repoGroup,\n\t\t\tUsage: \"Show the history of the given brig file\",\n\t\t\tAction: withArgCheck(needAtLeast(1), withDaemon(handleHistory, true)),\n\t\t\tDescription: \"history lists all modifications of a given file\",\n\t\t\tArgsUsage: \"<filename>\",\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"net\",\n\t\t\tCategory: repoGroup,\n\t\t\tUsage: \"Query and modify network status\",\n\t\t\tArgsUsage: \"[offline | online | status]\",\n\t\t\tDescription: \"Query and modify the connection state to the ipfs network\",\n\t\t\tSubcommands: []cli.Command{\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"offline\",\n\t\t\t\t\tUsage: \"Disconnect from the outside world. The daemon will continue running\",\n\t\t\t\t\tAction: withDaemon(handleOffline, true),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"online\",\n\t\t\t\t\tUsage: \"Connect the daemon to the outside world\",\n\t\t\t\t\tAction: withDaemon(handleOnline, true),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"status\",\n\t\t\t\t\tUsage: \"Check if the daemon is online\",\n\t\t\t\t\tAction: withDaemon(handleIsOnline, true),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\tcli.Command{\n\t\t\tName: \"remote\",\n\t\t\tCategory: repoGroup,\n\t\t\tUsage: \"Remote management.\",\n\t\t\tArgsUsage: \"[add | remove | list | locate | self]\",\n\t\t\tDescription: \"Add, remove, list, locate remotes and print own identity\",\n\t\t\tSubcommands: []cli.Command{\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"add\",\n\t\t\t\t\tUsage: \"Add a specific remote\",\n\t\t\t\t\tArgsUsage: \"<brig-id> <ipfs-hash>\",\n\t\t\t\t\tDescription: \"Adds a specific user (brig-remote-id) with a specific identity (ipfs-hash) to remotes\",\n\t\t\t\t\tAction: withArgCheck(needAtLeast(2), withDaemon(handleRemoteAdd, true)),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"remove\",\n\t\t\t\t\tUsage: \"Remove a specifc remote\",\n\t\t\t\t\tArgsUsage: \"<brig-remote-id>\",\n\t\t\t\t\tDescription: \"Removes a specific remote from remotes.\",\n\t\t\t\t\tAction: withArgCheck(needAtLeast(1), withDaemon(handleRemoteRemove, true)),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"list\",\n\t\t\t\t\tUsage: \"List status of known remotes\",\n\t\t\t\t\tDescription: \"Lists all known remotes and their status\",\n\t\t\t\t\tAction: withDaemon(handleRemoteList, true),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"locate\",\n\t\t\t\t\tUsage: \"Search a specific remote\",\n\t\t\t\t\tArgsUsage: \"<brig-remote-id>\",\n\t\t\t\t\tDescription: \"Locates all remotes with the given brig-remote-id \",\n\t\t\t\t\tAction: withArgCheck(needAtLeast(1), withDaemon(handleRemoteLocate, true)),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"self\",\n\t\t\t\t\tUsage: \"Print identity\",\n\t\t\t\t\tDescription: \"Prints the users identity and online status\",\n\t\t\t\t\tAction: withDaemon(handleRemoteSelf, true),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"tree\",\n\t\t\tUsage: \"List files in a tree\",\n\t\t\tArgsUsage: \"[\/brig-path] [--depth|-d]\",\n\t\t\tDescription: \"Lists all files of a specific brig path in a tree like-manner\",\n\t\t\tCategory: wdirGroup,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"depth, d\",\n\t\t\t\t\tUsage: \"Max depth to traverse\",\n\t\t\t\t\tValue: -1,\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: withDaemon(handleTree, true),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"ls\",\n\t\t\tUsage: \"List files\",\n\t\t\tArgsUsage: \"[\/brig-path] [--depth|-d] [--recursive|-r]\",\n\t\t\tDescription: \"Lists all files of a specific brig path in a ls-like manner\",\n\t\t\tCategory: wdirGroup,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"depth, d\",\n\t\t\t\t\tUsage: \"Max depth to traverse\",\n\t\t\t\t\tValue: -1,\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"recursive,r\",\n\t\t\t\t\tUsage: \"Allow recursive traverse\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: withDaemon(handleList, true),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"mkdir\",\n\t\t\tCategory: wdirGroup,\n\t\t\tUsage: \"Create an empty directory\",\n\t\t\tArgsUsage: \"<\/dirname>\",\n\t\t\tDescription: \"Create a empty directory\",\n\t\t\tAction: withArgCheck(needAtLeast(1), withDaemon(handleMkdir, true)),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"add\",\n\t\t\tCategory: wdirGroup,\n\t\t\tUsage: \"Transer file into brig's control\",\n\t\t\tArgsUsage: \"<\/file>\",\n\t\t\tDescription: \"Add a specific file to the brig repository\",\n\t\t\tAction: withArgCheck(needAtLeast(1), withDaemon(handleAdd, true)),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"rm\",\n\t\t\tCategory: wdirGroup,\n\t\t\tUsage: \"Remove the file and optionally old versions of it\",\n\t\t\tArgsUsage: \"<\/file> [--recursive|-r]\",\n\t\t\tDescription: \"Remove a spcific file or directory\",\n\t\t\tAction: withArgCheck(needAtLeast(1), withDaemon(handleRm, true)),\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"recursive,r\",\n\t\t\t\t\tUsage: \"Remove directories recursively\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"mv\",\n\t\t\tCategory: wdirGroup,\n\t\t\tUsage: \"Move a specific file\",\n\t\t\tArgsUsage: \"<\/sourcefile> <\/destinationfile>\",\n\t\t\tDescription: \"Move a file from SOURCE to DEST\",\n\t\t\tAction: withArgCheck(needAtLeast(2), withDaemon(handleMv, true)),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"cat\",\n\t\t\tCategory: wdirGroup,\n\t\t\tUsage: \"Concatenates a file\",\n\t\t\tArgsUsage: \"<\/file>\",\n\t\t\tDescription: \"Concatenates files and print them on stdout\",\n\t\t\tAction: withArgCheck(needAtLeast(1), withDaemon(handleCat, true)),\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"daemon\",\n\t\t\tCategory: advnGroup,\n\t\t\tUsage: \"Manually run the daemon process\",\n\t\t\tSubcommands: []cli.Command{\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"launch\",\n\t\t\t\t\tCategory: advnGroup,\n\t\t\t\t\tUsage: \"Start the daemon process\",\n\t\t\t\t\tDescription: \"Start the brig daemon process, unlock the repository and go online\",\n\t\t\t\t\tAction: withExit(handleDaemon),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"quit\",\n\t\t\t\t\tCategory: advnGroup,\n\t\t\t\t\tUsage: \"Manually kill the daemon process\",\n\t\t\t\t\tDescription: \"Disconnect from ipfs network, shutdown the daemon and lock the repository\",\n\t\t\t\t\tAction: withDaemon(handleDaemonQuit, false),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"ping\",\n\t\t\t\t\tCategory: advnGroup,\n\t\t\t\t\tUsage: \"See if the daemon responds in a timely fashion\",\n\t\t\t\t\tDescription: \"Checks if deamon is running and reports the response time\",\n\t\t\t\t\tAction: withDaemon(handleDaemonPing, false),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"wait\",\n\t\t\t\t\tCategory: advnGroup,\n\t\t\t\t\tUsage: \"Block until the daemon is available\",\n\t\t\t\t\tDescription: \"Wait blocks until the daemon is available\",\n\t\t\t\t\tAction: withExit(handleDaemonWait),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"config\",\n\t\t\tCategory: miscGroup,\n\t\t\tUsage: \"Access, list and modify configuration values\",\n\t\t\tSubcommands: []cli.Command{\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"list\",\n\t\t\t\t\tUsage: \"Show current config values\",\n\t\t\t\t\tDescription: \"Show the current brig configuration\",\n\t\t\t\t\tAction: withExit(withConfig(handleConfigList)),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"get\",\n\t\t\t\t\tUsage: \"Get a specific config value\",\n\t\t\t\t\tDescription: \"Get a specific config value and print it to stdout\",\n\t\t\t\t\tAction: withExit(withConfig(handleConfigGet)),\n\t\t\t\t},\n\t\t\t\tcli.Command{\n\t\t\t\t\tName: \"set\",\n\t\t\t\t\tUsage: \"Set a specific config value\",\n\t\t\t\t\tDescription: \"Set a given config option to the given value\",\n\t\t\t\t\tAction: withExit(withConfig(handleConfigSet)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tcli.Command{\n\t\t\tName: \"mount\",\n\t\t\tCategory: miscGroup,\n\t\t\tUsage: \"Mount a brig repository\",\n\t\t\tArgsUsage: \"[--umount|-u] <mountpath>\",\n\t\t\tDescription: \"Mount a brig repository as FUSE filesystem\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"umount,u\",\n\t\t\t\t\tUsage: \"Unmount the specified directory\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: withArgCheck(needAtLeast(1), withDaemon(handleMount, true)),\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n\treturn 0\n}\n<|endoftext|>"} {"text":"<commit_before>package log\n\nimport (\n\t\"time\"\n\n\t\"github.com\/go-stack\/stack\"\n)\n\n\/\/ A Valuer generates a log value. When passed to Context.With in a value\n\/\/ element (odd indexes), it represents a dynamic value which is re-evaluated\n\/\/ with each log event.\ntype Valuer func() interface{}\n\n\/\/ bindValues replaces all value elements (odd indexes) containing a Valuer\n\/\/ with their generated value.\nfunc bindValues(keyvals []interface{}) {\n\tfor i := 1; i < len(keyvals); i += 2 {\n\t\tif v, ok := keyvals[i].(Valuer); ok {\n\t\t\tkeyvals[i] = v()\n\t\t}\n\t}\n}\n\n\/\/ containsValuer returns true if any of the value elements (odd indexes)\n\/\/ contain a Valuer.\nfunc containsValuer(keyvals []interface{}) bool {\n\tfor i := 1; i < len(keyvals); i += 2 {\n\t\tif _, ok := keyvals[i].(Valuer); ok {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Timestamp returns a Valuer that invokes the underlying function when bound,\n\/\/ returning a time.Time. Users will probably want to use DefaultTimestamp or\n\/\/ DefaultTimestampUTC.\nfunc Timestamp(t func() time.Time) Valuer {\n\treturn func() interface{} { return t() }\n}\n\nvar (\n\t\/\/ DefaultTimestamp is a Valuer that returns the current wallclock time,\n\t\/\/ respecting time zones, when bound.\n\tDefaultTimestamp Valuer = func() interface{} { return time.Now().Format(time.RFC3339) }\n\n\t\/\/ DefaultTimestampUTC is a Valuer that returns the current time in UTC\n\t\/\/ when bound.\n\tDefaultTimestampUTC Valuer = func() interface{} { return time.Now().UTC().Format(time.RFC3339) }\n)\n\n\/\/ Caller returns a Valuer that returns a file and line from a specified depth\n\/\/ in the callstack. Users will probably want to use DefaultCaller.\nfunc Caller(depth int) Valuer {\n\treturn func() interface{} { return stack.Caller(depth) }\n}\n\nvar (\n\t\/\/ DefaultCaller is a Valuer that returns the file and line where the Log\n\t\/\/ method was invoked. It can only be used with log.With.\n\tDefaultCaller = Caller(3)\n)\n<commit_msg>Change timestamp value format to nano precision<commit_after>package log\n\nimport (\n\t\"time\"\n\n\t\"github.com\/go-stack\/stack\"\n)\n\n\/\/ A Valuer generates a log value. When passed to Context.With in a value\n\/\/ element (odd indexes), it represents a dynamic value which is re-evaluated\n\/\/ with each log event.\ntype Valuer func() interface{}\n\n\/\/ bindValues replaces all value elements (odd indexes) containing a Valuer\n\/\/ with their generated value.\nfunc bindValues(keyvals []interface{}) {\n\tfor i := 1; i < len(keyvals); i += 2 {\n\t\tif v, ok := keyvals[i].(Valuer); ok {\n\t\t\tkeyvals[i] = v()\n\t\t}\n\t}\n}\n\n\/\/ containsValuer returns true if any of the value elements (odd indexes)\n\/\/ contain a Valuer.\nfunc containsValuer(keyvals []interface{}) bool {\n\tfor i := 1; i < len(keyvals); i += 2 {\n\t\tif _, ok := keyvals[i].(Valuer); ok {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Timestamp returns a Valuer that invokes the underlying function when bound,\n\/\/ returning a time.Time. Users will probably want to use DefaultTimestamp or\n\/\/ DefaultTimestampUTC.\nfunc Timestamp(t func() time.Time) Valuer {\n\treturn func() interface{} { return t() }\n}\n\nvar (\n\t\/\/ DefaultTimestamp is a Valuer that returns the current wallclock time,\n\t\/\/ respecting time zones, when bound.\n\tDefaultTimestamp Valuer = func() interface{} { return time.Now().Format(time.RFC3339Nano) }\n\n\t\/\/ DefaultTimestampUTC is a Valuer that returns the current time in UTC\n\t\/\/ when bound.\n\tDefaultTimestampUTC Valuer = func() interface{} { return time.Now().UTC().Format(time.RFC3339Nano) }\n)\n\n\/\/ Caller returns a Valuer that returns a file and line from a specified depth\n\/\/ in the callstack. Users will probably want to use DefaultCaller.\nfunc Caller(depth int) Valuer {\n\treturn func() interface{} { return stack.Caller(depth) }\n}\n\nvar (\n\t\/\/ DefaultCaller is a Valuer that returns the file and line where the Log\n\t\/\/ method was invoked. It can only be used with log.With.\n\tDefaultCaller = Caller(3)\n)\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 Gravitational, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage auth\n\nimport (\n\t\"context\"\n\t\"crypto\/x509\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\n\t\"github.com\/gravitational\/teleport\"\n\t\"github.com\/gravitational\/teleport\/lib\"\n\t\"github.com\/gravitational\/teleport\/lib\/defaults\"\n\t\"github.com\/gravitational\/teleport\/lib\/services\"\n\t\"github.com\/gravitational\/teleport\/lib\/tlsca\"\n\t\"github.com\/gravitational\/teleport\/lib\/utils\"\n\n\t\"github.com\/gravitational\/trace\"\n)\n\n\/\/ LocalRegister is used to generate host keys when a node or proxy is running\n\/\/ within the same process as the Auth Server and as such, does not need to\n\/\/ use provisioning tokens.\nfunc LocalRegister(id IdentityID, authServer *AuthServer, additionalPrincipals, dnsNames []string, remoteAddr string) (*Identity, error) {\n\t\/\/ If local registration is happening and no remote address was passed in\n\t\/\/ (which means no advertise IP was set), use localhost.\n\tif remoteAddr == \"\" {\n\t\tremoteAddr = defaults.Localhost\n\t}\n\tkeys, err := authServer.GenerateServerKeys(GenerateServerKeysRequest{\n\t\tHostID: id.HostUUID,\n\t\tNodeName: id.NodeName,\n\t\tRoles: teleport.Roles{id.Role},\n\t\tAdditionalPrincipals: additionalPrincipals,\n\t\tRemoteAddr: remoteAddr,\n\t\tDNSNames: dnsNames,\n\t\tNoCache: true,\n\t})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\tidentity, err := ReadIdentityFromKeyPair(keys)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\treturn identity, nil\n}\n\n\/\/ RegisterParams specifies parameters\n\/\/ for first time register operation with auth server\ntype RegisterParams struct {\n\t\/\/ DataDir is the data directory\n\t\/\/ storing CA certificate\n\tDataDir string\n\t\/\/ Token is a secure token to join the cluster\n\tToken string\n\t\/\/ ID is identity ID\n\tID IdentityID\n\t\/\/ Servers is a list of auth servers to dial\n\tServers []utils.NetAddr\n\t\/\/ AdditionalPrincipals is a list of additional principals to dial\n\tAdditionalPrincipals []string\n\t\/\/ DNSNames is a list of DNS names to add to x509 certificate\n\tDNSNames []string\n\t\/\/ PrivateKey is a PEM encoded private key (not passed to auth servers)\n\tPrivateKey []byte\n\t\/\/ PublicTLSKey is a server's public key to sign\n\tPublicTLSKey []byte\n\t\/\/ PublicSSHKey is a server's public SSH key to sign\n\tPublicSSHKey []byte\n\t\/\/ CipherSuites is a list of cipher suites to use for TLS client connection\n\tCipherSuites []uint16\n\t\/\/ CAPin is the SKPI hash of the CA used to verify the Auth Server.\n\tCAPin string\n\t\/\/ CAPath is the path to the CA file.\n\tCAPath string\n\t\/\/ GetHostCredentials is a client that can fetch host credentials.\n\tGetHostCredentials HostCredentials\n}\n\n\/\/ CredGetter is an interface for a client that can be used to get host\n\/\/ credentials. This interface is needed because lib\/client can not be imported\n\/\/ in lib\/auth due to circular imports.\ntype HostCredentials func(context.Context, string, bool, RegisterUsingTokenRequest) (*PackedKeys, error)\n\n\/\/ Register is used to generate host keys when a node or proxy are running on\n\/\/ different hosts than the auth server. This method requires provisioning\n\/\/ tokens to prove a valid auth server was used to issue the joining request\n\/\/ as well as a method for the node to validate the auth server.\nfunc Register(params RegisterParams) (*Identity, error) {\n\t\/\/ Read in the token. The token can either be passed in or come from a file\n\t\/\/ on disk.\n\ttoken, err := readToken(params.Token)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\t\/\/ Attempt to register through the auth server, if it fails, try and\n\t\/\/ register through the proxy server.\n\tident, err := registerThroughAuth(token, params)\n\tif err != nil {\n\t\t\/\/ If no params client was set this is a proxy and fail right away.\n\t\tif params.GetHostCredentials == nil {\n\t\t\tlog.Debugf(\"Missing client, failing with error from Auth Server: %v.\", err)\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\n\t\tident, er := registerThroughProxy(token, params)\n\t\tif er != nil {\n\t\t\treturn nil, trace.NewAggregate(err, er)\n\t\t}\n\n\t\tlog.Debugf(\"Successfully registered through proxy server.\")\n\t\treturn ident, nil\n\t}\n\n\tlog.Debugf(\"Successfully registered through auth server.\")\n\treturn ident, nil\n}\n\n\/\/ registerThroughProxy is used to register through the proxy server.\nfunc registerThroughProxy(token string, params RegisterParams) (*Identity, error) {\n\tlog.Debugf(\"Attempting to register through proxy server.\")\n\n\tif len(params.Servers) == 0 {\n\t\treturn nil, trace.BadParameter(\"no auth servers set\")\n\t}\n\n\tkeys, err := params.GetHostCredentials(context.Background(),\n\t\tparams.Servers[0].String(),\n\t\tlib.IsInsecureDevMode(),\n\t\tRegisterUsingTokenRequest{\n\t\t\tToken: token,\n\t\t\tHostID: params.ID.HostUUID,\n\t\t\tNodeName: params.ID.NodeName,\n\t\t\tRole: params.ID.Role,\n\t\t\tAdditionalPrincipals: params.AdditionalPrincipals,\n\t\t\tDNSNames: params.DNSNames,\n\t\t\tPublicTLSKey: params.PublicTLSKey,\n\t\t\tPublicSSHKey: params.PublicSSHKey,\n\t\t})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tkeys.Key = params.PrivateKey\n\n\treturn ReadIdentityFromKeyPair(keys)\n}\n\n\/\/ registerThroughAuth is used to register through the auth server.\nfunc registerThroughAuth(token string, params RegisterParams) (*Identity, error) {\n\tlog.Debugf(\"Attempting to register through auth server.\")\n\n\tvar client *Client\n\tvar err error\n\n\t\/\/ Build a client to the Auth Server. If a CA pin is specified require the\n\t\/\/ Auth Server is validated. Otherwise attempt to use the CA file on disk\n\t\/\/ but if it's not available connect without validating the Auth Server CA.\n\tswitch {\n\tcase params.CAPin != \"\":\n\t\tclient, err = pinRegisterClient(params)\n\tdefault:\n\t\tclient, err = insecureRegisterClient(params)\n\t}\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tdefer client.Close()\n\n\t\/\/ Get the SSH and X509 certificates for a node.\n\tkeys, err := client.RegisterUsingToken(RegisterUsingTokenRequest{\n\t\tToken: token,\n\t\tHostID: params.ID.HostUUID,\n\t\tNodeName: params.ID.NodeName,\n\t\tRole: params.ID.Role,\n\t\tAdditionalPrincipals: params.AdditionalPrincipals,\n\t\tDNSNames: params.DNSNames,\n\t\tPublicTLSKey: params.PublicTLSKey,\n\t\tPublicSSHKey: params.PublicSSHKey,\n\t})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tkeys.Key = params.PrivateKey\n\n\treturn ReadIdentityFromKeyPair(keys)\n}\n\n\/\/ insecureRegisterClient attempts to connects to the Auth Server using the\n\/\/ CA on disk. If no CA is found on disk, Teleport will not verify the Auth\n\/\/ Server it is connecting to.\nfunc insecureRegisterClient(params RegisterParams) (*Client, error) {\n\ttlsConfig := utils.TLSConfig(params.CipherSuites)\n\n\tcert, err := readCA(params)\n\tif err != nil && !trace.IsNotFound(err) {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\t\/\/ If no CA was found, then create a insecure connection to the Auth Server,\n\t\/\/ otherwise use the CA on disk to validate the Auth Server.\n\tif trace.IsNotFound(err) {\n\t\ttlsConfig.InsecureSkipVerify = true\n\n\t\tlog.Warnf(\"Joining cluster without validating the identity of the Auth \" +\n\t\t\t\"Server. This may open you up to a Man-In-The-Middle (MITM) attack if an \" +\n\t\t\t\"attacker can gain privileged network access. To remedy this, use the CA pin \" +\n\t\t\t\"value provided when join token was generated to validate the identity of \" +\n\t\t\t\"the Auth Server.\")\n\t} else {\n\t\tcertPool := x509.NewCertPool()\n\t\tcertPool.AddCert(cert)\n\t\ttlsConfig.RootCAs = certPool\n\n\t\tlog.Infof(\"Joining remote cluster %v, validating connection with certificate on disk.\", cert.Subject.CommonName)\n\t}\n\n\tclient, err := NewTLSClient(ClientConfig{Addrs: params.Servers, TLS: tlsConfig})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\treturn client, nil\n}\n\n\/\/ readCA will read in CA that will be used to validate the certificate that\n\/\/ the Auth Server presents.\nfunc readCA(params RegisterParams) (*x509.Certificate, error) {\n\tcertBytes, err := utils.ReadPath(params.CAPath)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tcert, err := tlsca.ParseCertificatePEM(certBytes)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err, \"failed to parse certificate at %v\", params.CAPath)\n\t}\n\treturn cert, nil\n}\n\n\/\/ pinRegisterClient first connects to the Auth Server using a insecure\n\/\/ connection to fetch the root CA. If the root CA matches the provided CA\n\/\/ pin, a connection will be re-established and the root CA will be used to\n\/\/ validate the certificate presented. If both conditions hold true, then we\n\/\/ know we are connecting to the expected Auth Server.\nfunc pinRegisterClient(params RegisterParams) (*Client, error) {\n\t\/\/ Build a insecure client to the Auth Server. This is safe because even if\n\t\/\/ an attacker were to MITM this connection the CA pin will not match below.\n\ttlsConfig := utils.TLSConfig(params.CipherSuites)\n\ttlsConfig.InsecureSkipVerify = true\n\tclient, err := NewTLSClient(ClientConfig{Addrs: params.Servers, TLS: tlsConfig})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tdefer client.Close()\n\n\t\/\/ Fetch the root CA from the Auth Server. The NOP role has access to the\n\t\/\/ GetClusterCACert endpoint.\n\tlocalCA, err := client.GetClusterCACert()\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\ttlsCA, err := tlsca.ParseCertificatePEM(localCA.TLSCA)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\t\/\/ Check that the SPKI pin matches the CA we fetched over a insecure\n\t\/\/ connection. This makes sure the CA fetched over a insecure connection is\n\t\/\/ in-fact the expected CA.\n\terr = utils.CheckSPKI(params.CAPin, tlsCA)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\tlog.Infof(\"Joining remote cluster %v with CA pin.\", tlsCA.Subject.CommonName)\n\n\t\/\/ Create another client, but this time with the CA provided to validate\n\t\/\/ that the Auth Server was issued a certificate by the same CA.\n\ttlsConfig = utils.TLSConfig(params.CipherSuites)\n\tcertPool := x509.NewCertPool()\n\tcertPool.AddCert(tlsCA)\n\ttlsConfig.RootCAs = certPool\n\n\tclient, err = NewTLSClient(ClientConfig{Addrs: params.Servers, TLS: tlsConfig})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\treturn client, nil\n}\n\n\/\/ ReRegisterParams specifies parameters for re-registering\n\/\/ in the cluster (rotating certificates for existing members)\ntype ReRegisterParams struct {\n\t\/\/ Client is an authenticated client using old credentials\n\tClient ClientI\n\t\/\/ ID is identity ID\n\tID IdentityID\n\t\/\/ AdditionalPrincipals is a list of additional principals to dial\n\tAdditionalPrincipals []string\n\t\/\/ DNSNames is a list of DNS Names to add to the x509 client certificate\n\tDNSNames []string\n\t\/\/ PrivateKey is a PEM encoded private key (not passed to auth servers)\n\tPrivateKey []byte\n\t\/\/ PublicTLSKey is a server's public key to sign\n\tPublicTLSKey []byte\n\t\/\/ PublicSSHKey is a server's public SSH key to sign\n\tPublicSSHKey []byte\n\t\/\/ Rotation is the rotation state of the certificate authority\n\tRotation services.Rotation\n}\n\n\/\/ ReRegister renews the certificates and private keys based on the client's existing identity.\nfunc ReRegister(params ReRegisterParams) (*Identity, error) {\n\thostID, err := params.ID.HostID()\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tkeys, err := params.Client.GenerateServerKeys(GenerateServerKeysRequest{\n\t\tHostID: hostID,\n\t\tNodeName: params.ID.NodeName,\n\t\tRoles: teleport.Roles{params.ID.Role},\n\t\tAdditionalPrincipals: params.AdditionalPrincipals,\n\t\tDNSNames: params.DNSNames,\n\t\tPublicTLSKey: params.PublicTLSKey,\n\t\tPublicSSHKey: params.PublicSSHKey,\n\t\tRotation: ¶ms.Rotation,\n\t})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tkeys.Key = params.PrivateKey\n\n\treturn ReadIdentityFromKeyPair(keys)\n}\n\nfunc readToken(token string) (string, error) {\n\tif !strings.HasPrefix(token, \"\/\") {\n\t\treturn token, nil\n\t}\n\t\/\/ treat it as a file\n\tout, err := ioutil.ReadFile(token)\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\t\/\/ trim newlines as tokens in files tend to have newlines\n\treturn strings.TrimSpace(string(out)), nil\n}\n\n\/\/ PackedKeys is a collection of private key, SSH host certificate\n\/\/ and TLS certificate and certificate authority issued the certificate\ntype PackedKeys struct {\n\t\/\/ Key is a private key\n\tKey []byte `json:\"key\"`\n\t\/\/ Cert is an SSH host cert\n\tCert []byte `json:\"cert\"`\n\t\/\/ TLSCert is an X509 certificate\n\tTLSCert []byte `json:\"tls_cert\"`\n\t\/\/ TLSCACerts is a list of TLS certificate authorities.\n\tTLSCACerts [][]byte `json:\"tls_ca_certs\"`\n\t\/\/ SSHCACerts is a list of SSH certificate authorities.\n\tSSHCACerts [][]byte `json:\"ssh_ca_certs\"`\n}\n<commit_msg>Clearer logging when node fails to join cluster.<commit_after>\/*\nCopyright 2015 Gravitational, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage auth\n\nimport (\n\t\"context\"\n\t\"crypto\/x509\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\n\t\"github.com\/gravitational\/teleport\"\n\t\"github.com\/gravitational\/teleport\/lib\"\n\t\"github.com\/gravitational\/teleport\/lib\/defaults\"\n\t\"github.com\/gravitational\/teleport\/lib\/services\"\n\t\"github.com\/gravitational\/teleport\/lib\/tlsca\"\n\t\"github.com\/gravitational\/teleport\/lib\/utils\"\n\n\t\"github.com\/gravitational\/trace\"\n)\n\n\/\/ LocalRegister is used to generate host keys when a node or proxy is running\n\/\/ within the same process as the Auth Server and as such, does not need to\n\/\/ use provisioning tokens.\nfunc LocalRegister(id IdentityID, authServer *AuthServer, additionalPrincipals, dnsNames []string, remoteAddr string) (*Identity, error) {\n\t\/\/ If local registration is happening and no remote address was passed in\n\t\/\/ (which means no advertise IP was set), use localhost.\n\tif remoteAddr == \"\" {\n\t\tremoteAddr = defaults.Localhost\n\t}\n\tkeys, err := authServer.GenerateServerKeys(GenerateServerKeysRequest{\n\t\tHostID: id.HostUUID,\n\t\tNodeName: id.NodeName,\n\t\tRoles: teleport.Roles{id.Role},\n\t\tAdditionalPrincipals: additionalPrincipals,\n\t\tRemoteAddr: remoteAddr,\n\t\tDNSNames: dnsNames,\n\t\tNoCache: true,\n\t})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\tidentity, err := ReadIdentityFromKeyPair(keys)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\treturn identity, nil\n}\n\n\/\/ RegisterParams specifies parameters\n\/\/ for first time register operation with auth server\ntype RegisterParams struct {\n\t\/\/ DataDir is the data directory\n\t\/\/ storing CA certificate\n\tDataDir string\n\t\/\/ Token is a secure token to join the cluster\n\tToken string\n\t\/\/ ID is identity ID\n\tID IdentityID\n\t\/\/ Servers is a list of auth servers to dial\n\tServers []utils.NetAddr\n\t\/\/ AdditionalPrincipals is a list of additional principals to dial\n\tAdditionalPrincipals []string\n\t\/\/ DNSNames is a list of DNS names to add to x509 certificate\n\tDNSNames []string\n\t\/\/ PrivateKey is a PEM encoded private key (not passed to auth servers)\n\tPrivateKey []byte\n\t\/\/ PublicTLSKey is a server's public key to sign\n\tPublicTLSKey []byte\n\t\/\/ PublicSSHKey is a server's public SSH key to sign\n\tPublicSSHKey []byte\n\t\/\/ CipherSuites is a list of cipher suites to use for TLS client connection\n\tCipherSuites []uint16\n\t\/\/ CAPin is the SKPI hash of the CA used to verify the Auth Server.\n\tCAPin string\n\t\/\/ CAPath is the path to the CA file.\n\tCAPath string\n\t\/\/ GetHostCredentials is a client that can fetch host credentials.\n\tGetHostCredentials HostCredentials\n}\n\n\/\/ CredGetter is an interface for a client that can be used to get host\n\/\/ credentials. This interface is needed because lib\/client can not be imported\n\/\/ in lib\/auth due to circular imports.\ntype HostCredentials func(context.Context, string, bool, RegisterUsingTokenRequest) (*PackedKeys, error)\n\n\/\/ Register is used to generate host keys when a node or proxy are running on\n\/\/ different hosts than the auth server. This method requires provisioning\n\/\/ tokens to prove a valid auth server was used to issue the joining request\n\/\/ as well as a method for the node to validate the auth server.\nfunc Register(params RegisterParams) (*Identity, error) {\n\t\/\/ Read in the token. The token can either be passed in or come from a file\n\t\/\/ on disk.\n\ttoken, err := readToken(params.Token)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\t\/\/ Attempt to register through the auth server, if it fails, try and\n\t\/\/ register through the proxy server.\n\tident, err := registerThroughAuth(token, params)\n\tif err != nil {\n\t\t\/\/ If no params client was set this is a proxy and fail right away.\n\t\tif params.GetHostCredentials == nil {\n\t\t\tlog.Debugf(\"Missing client, failing with error from Auth Server: %v.\", err)\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\n\t\tident, err = registerThroughProxy(token, params)\n\t\tif err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\n\t\tlog.Debugf(\"Successfully registered through proxy server.\")\n\t\treturn ident, nil\n\t}\n\n\tlog.Debugf(\"Successfully registered through auth server.\")\n\treturn ident, nil\n}\n\n\/\/ registerThroughProxy is used to register through the proxy server.\nfunc registerThroughProxy(token string, params RegisterParams) (*Identity, error) {\n\tlog.Debugf(\"Attempting to register through proxy server.\")\n\n\tif len(params.Servers) == 0 {\n\t\treturn nil, trace.BadParameter(\"no auth servers set\")\n\t}\n\n\tkeys, err := params.GetHostCredentials(context.Background(),\n\t\tparams.Servers[0].String(),\n\t\tlib.IsInsecureDevMode(),\n\t\tRegisterUsingTokenRequest{\n\t\t\tToken: token,\n\t\t\tHostID: params.ID.HostUUID,\n\t\t\tNodeName: params.ID.NodeName,\n\t\t\tRole: params.ID.Role,\n\t\t\tAdditionalPrincipals: params.AdditionalPrincipals,\n\t\t\tDNSNames: params.DNSNames,\n\t\t\tPublicTLSKey: params.PublicTLSKey,\n\t\t\tPublicSSHKey: params.PublicSSHKey,\n\t\t})\n\tif err != nil {\n\t\treturn nil, trace.Unwrap(err)\n\t}\n\tkeys.Key = params.PrivateKey\n\n\treturn ReadIdentityFromKeyPair(keys)\n}\n\n\/\/ registerThroughAuth is used to register through the auth server.\nfunc registerThroughAuth(token string, params RegisterParams) (*Identity, error) {\n\tlog.Debugf(\"Attempting to register through auth server.\")\n\n\tvar client *Client\n\tvar err error\n\n\t\/\/ Build a client to the Auth Server. If a CA pin is specified require the\n\t\/\/ Auth Server is validated. Otherwise attempt to use the CA file on disk\n\t\/\/ but if it's not available connect without validating the Auth Server CA.\n\tswitch {\n\tcase params.CAPin != \"\":\n\t\tclient, err = pinRegisterClient(params)\n\tdefault:\n\t\tclient, err = insecureRegisterClient(params)\n\t}\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tdefer client.Close()\n\n\t\/\/ Get the SSH and X509 certificates for a node.\n\tkeys, err := client.RegisterUsingToken(RegisterUsingTokenRequest{\n\t\tToken: token,\n\t\tHostID: params.ID.HostUUID,\n\t\tNodeName: params.ID.NodeName,\n\t\tRole: params.ID.Role,\n\t\tAdditionalPrincipals: params.AdditionalPrincipals,\n\t\tDNSNames: params.DNSNames,\n\t\tPublicTLSKey: params.PublicTLSKey,\n\t\tPublicSSHKey: params.PublicSSHKey,\n\t})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tkeys.Key = params.PrivateKey\n\n\treturn ReadIdentityFromKeyPair(keys)\n}\n\n\/\/ insecureRegisterClient attempts to connects to the Auth Server using the\n\/\/ CA on disk. If no CA is found on disk, Teleport will not verify the Auth\n\/\/ Server it is connecting to.\nfunc insecureRegisterClient(params RegisterParams) (*Client, error) {\n\ttlsConfig := utils.TLSConfig(params.CipherSuites)\n\n\tcert, err := readCA(params)\n\tif err != nil && !trace.IsNotFound(err) {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\t\/\/ If no CA was found, then create a insecure connection to the Auth Server,\n\t\/\/ otherwise use the CA on disk to validate the Auth Server.\n\tif trace.IsNotFound(err) {\n\t\ttlsConfig.InsecureSkipVerify = true\n\n\t\tlog.Warnf(\"Joining cluster without validating the identity of the Auth \" +\n\t\t\t\"Server. This may open you up to a Man-In-The-Middle (MITM) attack if an \" +\n\t\t\t\"attacker can gain privileged network access. To remedy this, use the CA pin \" +\n\t\t\t\"value provided when join token was generated to validate the identity of \" +\n\t\t\t\"the Auth Server.\")\n\t} else {\n\t\tcertPool := x509.NewCertPool()\n\t\tcertPool.AddCert(cert)\n\t\ttlsConfig.RootCAs = certPool\n\n\t\tlog.Infof(\"Joining remote cluster %v, validating connection with certificate on disk.\", cert.Subject.CommonName)\n\t}\n\n\tclient, err := NewTLSClient(ClientConfig{Addrs: params.Servers, TLS: tlsConfig})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\treturn client, nil\n}\n\n\/\/ readCA will read in CA that will be used to validate the certificate that\n\/\/ the Auth Server presents.\nfunc readCA(params RegisterParams) (*x509.Certificate, error) {\n\tcertBytes, err := utils.ReadPath(params.CAPath)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tcert, err := tlsca.ParseCertificatePEM(certBytes)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err, \"failed to parse certificate at %v\", params.CAPath)\n\t}\n\treturn cert, nil\n}\n\n\/\/ pinRegisterClient first connects to the Auth Server using a insecure\n\/\/ connection to fetch the root CA. If the root CA matches the provided CA\n\/\/ pin, a connection will be re-established and the root CA will be used to\n\/\/ validate the certificate presented. If both conditions hold true, then we\n\/\/ know we are connecting to the expected Auth Server.\nfunc pinRegisterClient(params RegisterParams) (*Client, error) {\n\t\/\/ Build a insecure client to the Auth Server. This is safe because even if\n\t\/\/ an attacker were to MITM this connection the CA pin will not match below.\n\ttlsConfig := utils.TLSConfig(params.CipherSuites)\n\ttlsConfig.InsecureSkipVerify = true\n\tclient, err := NewTLSClient(ClientConfig{Addrs: params.Servers, TLS: tlsConfig})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tdefer client.Close()\n\n\t\/\/ Fetch the root CA from the Auth Server. The NOP role has access to the\n\t\/\/ GetClusterCACert endpoint.\n\tlocalCA, err := client.GetClusterCACert()\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\ttlsCA, err := tlsca.ParseCertificatePEM(localCA.TLSCA)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\t\/\/ Check that the SPKI pin matches the CA we fetched over a insecure\n\t\/\/ connection. This makes sure the CA fetched over a insecure connection is\n\t\/\/ in-fact the expected CA.\n\terr = utils.CheckSPKI(params.CAPin, tlsCA)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\tlog.Infof(\"Joining remote cluster %v with CA pin.\", tlsCA.Subject.CommonName)\n\n\t\/\/ Create another client, but this time with the CA provided to validate\n\t\/\/ that the Auth Server was issued a certificate by the same CA.\n\ttlsConfig = utils.TLSConfig(params.CipherSuites)\n\tcertPool := x509.NewCertPool()\n\tcertPool.AddCert(tlsCA)\n\ttlsConfig.RootCAs = certPool\n\n\tclient, err = NewTLSClient(ClientConfig{Addrs: params.Servers, TLS: tlsConfig})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\treturn client, nil\n}\n\n\/\/ ReRegisterParams specifies parameters for re-registering\n\/\/ in the cluster (rotating certificates for existing members)\ntype ReRegisterParams struct {\n\t\/\/ Client is an authenticated client using old credentials\n\tClient ClientI\n\t\/\/ ID is identity ID\n\tID IdentityID\n\t\/\/ AdditionalPrincipals is a list of additional principals to dial\n\tAdditionalPrincipals []string\n\t\/\/ DNSNames is a list of DNS Names to add to the x509 client certificate\n\tDNSNames []string\n\t\/\/ PrivateKey is a PEM encoded private key (not passed to auth servers)\n\tPrivateKey []byte\n\t\/\/ PublicTLSKey is a server's public key to sign\n\tPublicTLSKey []byte\n\t\/\/ PublicSSHKey is a server's public SSH key to sign\n\tPublicSSHKey []byte\n\t\/\/ Rotation is the rotation state of the certificate authority\n\tRotation services.Rotation\n}\n\n\/\/ ReRegister renews the certificates and private keys based on the client's existing identity.\nfunc ReRegister(params ReRegisterParams) (*Identity, error) {\n\thostID, err := params.ID.HostID()\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tkeys, err := params.Client.GenerateServerKeys(GenerateServerKeysRequest{\n\t\tHostID: hostID,\n\t\tNodeName: params.ID.NodeName,\n\t\tRoles: teleport.Roles{params.ID.Role},\n\t\tAdditionalPrincipals: params.AdditionalPrincipals,\n\t\tDNSNames: params.DNSNames,\n\t\tPublicTLSKey: params.PublicTLSKey,\n\t\tPublicSSHKey: params.PublicSSHKey,\n\t\tRotation: ¶ms.Rotation,\n\t})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tkeys.Key = params.PrivateKey\n\n\treturn ReadIdentityFromKeyPair(keys)\n}\n\nfunc readToken(token string) (string, error) {\n\tif !strings.HasPrefix(token, \"\/\") {\n\t\treturn token, nil\n\t}\n\t\/\/ treat it as a file\n\tout, err := ioutil.ReadFile(token)\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\t\/\/ trim newlines as tokens in files tend to have newlines\n\treturn strings.TrimSpace(string(out)), nil\n}\n\n\/\/ PackedKeys is a collection of private key, SSH host certificate\n\/\/ and TLS certificate and certificate authority issued the certificate\ntype PackedKeys struct {\n\t\/\/ Key is a private key\n\tKey []byte `json:\"key\"`\n\t\/\/ Cert is an SSH host cert\n\tCert []byte `json:\"cert\"`\n\t\/\/ TLSCert is an X509 certificate\n\tTLSCert []byte `json:\"tls_cert\"`\n\t\/\/ TLSCACerts is a list of TLS certificate authorities.\n\tTLSCACerts [][]byte `json:\"tls_ca_certs\"`\n\t\/\/ SSHCACerts is a list of SSH certificate authorities.\n\tSSHCACerts [][]byte `json:\"ssh_ca_certs\"`\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 John Welsh <john.welsh@gmail.com>. All rights reserved.\n\/\/ Use of this source code is governed by a MIT license that can\n\/\/ be found in the LICENSE file.\n\n\/\/ Package logviewer provides a terminal UI for viewing log streams from multiple sources\npackage logviewer\n\nimport (\n\tui \"github.com\/gizak\/termui\"\n\t\"bytes\"\n\t\"strconv\"\n\t\"github.com\/gizak\/termui\/extra\"\n\t\"errors\"\n\t\"time\"\n)\n\nconst HelpText string = \"[h] and [l] to navigate, [t] to show timestamps, [q] to exit\"\n\n\/\/ A Line represents a single line of log content\ntype Line struct {\n\tLog string\n\tTimestamp time.Time\n}\n\n\/\/ A Viewer is used to display log data from one or more sources\ntype Viewer struct {\n\tfeeds []*Feed\n\ttabpane *extra.Tabpane\n\ttitle *ui.Par\n\tinitialized bool\n\tshowTimestamp bool\n}\n\n\/\/ A Feed represents an independent stream of log data\ntype Feed struct {\n\tname string\n\tlogs []*Line\n\tmaxHistory int\n\tpar *ui.Par\n\tviewer *Viewer\n}\n\n\/\/ NewViewer initializes a Viewer with a given name\nfunc NewViewer(name string) *Viewer {\n\tviewer := &Viewer{\n\t\tfeeds: []*Feed{},\n\t\ttabpane: extra.NewTabpane(),\n\t\ttitle: ui.NewPar(\"[\" + name + \"](fg-green,fg-bold) \" + HelpText),\n\t\tinitialized: false,\n\t\tshowTimestamp: false,\n\t}\n\tviewer.tabpane.Border = true\n\tviewer.title.Height = 1\n\tviewer.title.Border = false\n\n\treturn viewer\n}\n\nfunc newLogFeed(name string, maxHistory int) *Feed {\n\tfeed := Feed{\n\t\tname: name,\n\t\tlogs: []*Line{},\n\t\tmaxHistory: maxHistory,\n\t}\n\treturn &feed\n}\n\n\/\/ AddLogFeed adds a new Feed to the Viewer, returning the index of the Feed for use in\n\/\/ future calls to AddLogLine\nfunc (v*Viewer) AddLogFeed(name string, maxHistory int) *Feed {\n\tfeed := newLogFeed(name, maxHistory)\n\tfeed.viewer = v\n\tv.feeds = append(v.feeds, feed)\n\n\tfeed.par = ui.NewPar(\" \")\n\tfeed.par.Border = false\n\n\ttab := extra.NewTab(feed.name)\n\ttab.AddBlocks(feed.par)\n\n\tv.tabpane.SetTabs(append(v.tabpane.Tabs, *tab)...)\n\tv.render()\n\treturn feed\n}\n\n\/\/ RemoveLogFeed removes a given Feed from the Viewer\nfunc (v*Viewer) RemoveLogFeed(f *Feed) error {\n\tfor i := range v.feeds {\n\t\tif v.feeds[i] == f {\n\t\t\t\/\/ TODO this probably leaks\n\t\t\ttabs := v.tabpane.Tabs\n\t\t\tv.tabpane.SetTabs(append(tabs[:i], tabs[i+1:]...)...)\n\t\t\tv.render()\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn errors.New(\"Feed not found\")\n}\n\n\/\/ AddLogLine adds a Line to a Feed and renders the result in the user's terminal\nfunc (f*Feed) AddLogLine(l *Line) {\n\tf.logs = append(f.logs, l)\n\t\/\/ TODO circular queue with size of maxHistory\n\tif len(f.logs) > f.maxHistory {\n\t\tover := len(f.logs) - f.maxHistory\n\t\tf.logs = f.logs[over:]\n\t}\n\tf.updatePar()\n\tf.viewer.render()\n}\n\nfunc (f*Feed) updatePar() {\n\th := f.par.GetHeight()\n\n\tvar text bytes.Buffer\n\tlogs := f.logs\n\tif h < len(f.logs) {\n\t\tlogs = logs[len(f.logs)-h:]\n\t}\n\tfor i, line := range logs {\n\t\tif f.viewer.showTimestamp {\n\t\t\ttext.WriteString(\"[\")\n\t\t\ttext.WriteString(line.Timestamp.Format(\"2006-01-02T15:04:05-0700\"))\n\t\t\ttext.WriteString(\"] \")\n\t\t}\n\t\ttext.WriteString(line.Log)\n\t\tif i != len(logs)-1 {\n\t\t\ttext.WriteString(\"\\n\")\n\t\t}\n\t}\n\tf.par.Text = text.String()\n}\n\nfunc (v*Viewer) titleString(width int) string {\n\tvar title bytes.Buffer\n\tfor i, feed := range v.feeds {\n\t\ttitle.WriteString(\"[\")\n\t\ttitle.WriteString(strconv.Itoa(i))\n\t\ttitle.WriteString(\"]\")\n\t\tif len(feed.name)+title.Len() > width-3 {\n\n\t\t}\n\t\ttitle.WriteString(feed.name)\n\t\ttitle.WriteString(\" \")\n\t\tif title.Len() > width-5 {\n\t\t\ttitle.WriteString(\"...\")\n\t\t\tbreak\n\t\t}\n\t}\n\treturn title.String()\n}\n\nfunc (v*Viewer) render() {\n\tif v.initialized {\n\t\tv.updateDimensions()\n\t\tui.Body.Align()\n\t\tui.Render(ui.Body)\n\t}\n}\n\nfunc (v*Viewer) updateFeedDimensions(w, h int) {\n\tfor _, f := range v.feeds {\n\t\tf.par.Width = w\n\t\tf.par.Height = h\n\t}\n}\n\n\/\/ Display starts the viewer. This call will block until the log display is exited by the user\nfunc (v*Viewer) Display() {\n\terr := ui.Init()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer ui.Close()\n\tv.initialized = true\n\n\tui.Body.AddRows(\n\t\tui.NewRow(\n\t\t\tui.NewCol(12, 0, v.title)),\n\t\tui.NewRow(\n\t\t\tui.NewCol(12, 0, v.tabpane)))\n\n\tui.Handle(\"\/sys\/wnd\/resize\", func(ui.Event) {\n\t\tv.render()\n\t})\n\n\tui.Handle(\"\/sys\/kbd\/q\", func(ui.Event) {\n\t\tui.StopLoop()\n\t})\n\n\tui.Handle(\"\/sys\/kbd\/l\", func(ui.Event) {\n\t\tv.tabpane.SetActiveRight()\n\t\tv.render()\n\t})\n\n\tui.Handle(\"\/sys\/kbd\/h\", func(ui.Event) {\n\t\tv.tabpane.SetActiveLeft()\n\t\tv.render()\n\t})\n\n\tui.Handle(\"\/sys\/kbd\/t\", func(ui.Event) {\n\t\tv.showTimestamp = !v.showTimestamp\n\t\tfor _, f := range v.feeds {\n\t\t\tf.updatePar()\n\t\t}\n\t\tv.render()\n\t})\n\n\tv.render()\n\tui.Loop()\n}\n\nfunc (v*Viewer) updateDimensions() {\n\tw := ui.TermWidth()\n\th := ui.TermHeight() - v.tabpane.Height\n\tui.Body.Width = w\n\tv.updateFeedDimensions(w, h)\n}\n<commit_msg>Finished documenting public API<commit_after>\/\/ Copyright 2017 John Welsh <john.welsh@gmail.com>. All rights reserved.\n\/\/ Use of this source code is governed by a MIT license that can\n\/\/ be found in the LICENSE file.\n\n\/\/ Package logviewer provides a terminal based UI for viewing log streams from multiple sources. Implemented using\n\/\/ @gizak's awesome termui lib\npackage logviewer\n\nimport (\n\tui \"github.com\/gizak\/termui\"\n\t\"bytes\"\n\t\"strconv\"\n\t\"github.com\/gizak\/termui\/extra\"\n\t\"errors\"\n\t\"time\"\n)\n\nconst HelpText string = \"[h] and [l] to navigate, [t] to show timestamps, [q] to exit\"\n\n\/\/ A Line represents a single line of log content\ntype Line struct {\n\tLog string\n\tTimestamp time.Time\n}\n\n\/\/ A Viewer is used to display log data from one or more sources\ntype Viewer struct {\n\tfeeds []*Feed\n\ttabpane *extra.Tabpane\n\ttitle *ui.Par\n\tinitialized bool\n\tshowTimestamp bool\n}\n\n\/\/ A Feed represents an independent stream of log data\ntype Feed struct {\n\tname string\n\tlogs []*Line\n\tmaxHistory int\n\tpar *ui.Par\n\tviewer *Viewer\n}\n\n\/\/ NewViewer initializes a Viewer with a given name\nfunc NewViewer(name string) *Viewer {\n\tviewer := &Viewer{\n\t\tfeeds: []*Feed{},\n\t\ttabpane: extra.NewTabpane(),\n\t\ttitle: ui.NewPar(\"[\" + name + \"](fg-green,fg-bold) \" + HelpText),\n\t\tinitialized: false,\n\t\tshowTimestamp: false,\n\t}\n\tviewer.tabpane.Border = true\n\tviewer.title.Height = 1\n\tviewer.title.Border = false\n\n\treturn viewer\n}\n\nfunc newLogFeed(name string, maxHistory int) *Feed {\n\tfeed := Feed{\n\t\tname: name,\n\t\tlogs: []*Line{},\n\t\tmaxHistory: maxHistory,\n\t}\n\treturn &feed\n}\n\n\/\/ AddLogFeed adds a new Feed to the Viewer, returning the index of the Feed for use in\n\/\/ future calls to AddLogLine\nfunc (v*Viewer) AddLogFeed(name string, maxHistory int) *Feed {\n\tfeed := newLogFeed(name, maxHistory)\n\tfeed.viewer = v\n\tv.feeds = append(v.feeds, feed)\n\n\tfeed.par = ui.NewPar(\" \")\n\tfeed.par.Border = false\n\n\ttab := extra.NewTab(feed.name)\n\ttab.AddBlocks(feed.par)\n\n\tv.tabpane.SetTabs(append(v.tabpane.Tabs, *tab)...)\n\tv.render()\n\treturn feed\n}\n\n\/\/ RemoveLogFeed removes a given Feed from the Viewer\nfunc (v*Viewer) RemoveLogFeed(f *Feed) error {\n\tfor i := range v.feeds {\n\t\tif v.feeds[i] == f {\n\t\t\t\/\/ TODO this probably leaks\n\t\t\ttabs := v.tabpane.Tabs\n\t\t\tv.tabpane.SetTabs(append(tabs[:i], tabs[i+1:]...)...)\n\t\t\tv.render()\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn errors.New(\"Feed not found\")\n}\n\n\/\/ AddLogLine adds a Line to a Feed and renders the result in the user's terminal\nfunc (f*Feed) AddLogLine(l *Line) {\n\tf.logs = append(f.logs, l)\n\t\/\/ TODO circular queue with size of maxHistory\n\tif len(f.logs) > f.maxHistory {\n\t\tover := len(f.logs) - f.maxHistory\n\t\tf.logs = f.logs[over:]\n\t}\n\tf.updatePar()\n\tf.viewer.render()\n}\n\nfunc (f*Feed) updatePar() {\n\th := f.par.GetHeight()\n\n\tvar text bytes.Buffer\n\tlogs := f.logs\n\tif h < len(f.logs) {\n\t\tlogs = logs[len(f.logs)-h:]\n\t}\n\tfor i, line := range logs {\n\t\tif f.viewer.showTimestamp {\n\t\t\ttext.WriteString(\"[\")\n\t\t\ttext.WriteString(line.Timestamp.Format(\"2006-01-02T15:04:05-0700\"))\n\t\t\ttext.WriteString(\"] \")\n\t\t}\n\t\ttext.WriteString(line.Log)\n\t\tif i != len(logs)-1 {\n\t\t\ttext.WriteString(\"\\n\")\n\t\t}\n\t}\n\tf.par.Text = text.String()\n}\n\nfunc (v*Viewer) titleString(width int) string {\n\tvar title bytes.Buffer\n\tfor i, feed := range v.feeds {\n\t\ttitle.WriteString(\"[\")\n\t\ttitle.WriteString(strconv.Itoa(i))\n\t\ttitle.WriteString(\"]\")\n\t\tif len(feed.name)+title.Len() > width-3 {\n\n\t\t}\n\t\ttitle.WriteString(feed.name)\n\t\ttitle.WriteString(\" \")\n\t\tif title.Len() > width-5 {\n\t\t\ttitle.WriteString(\"...\")\n\t\t\tbreak\n\t\t}\n\t}\n\treturn title.String()\n}\n\nfunc (v*Viewer) render() {\n\tif v.initialized {\n\t\tv.updateDimensions()\n\t\tui.Body.Align()\n\t\tui.Render(ui.Body)\n\t}\n}\n\nfunc (v*Viewer) updateFeedDimensions(w, h int) {\n\tfor _, f := range v.feeds {\n\t\tf.par.Width = w\n\t\tf.par.Height = h\n\t}\n}\n\n\/\/ Display starts the viewer. This call will block until the log display is exited by the user\nfunc (v*Viewer) Display() {\n\terr := ui.Init()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer ui.Close()\n\tv.initialized = true\n\n\tui.Body.AddRows(\n\t\tui.NewRow(\n\t\t\tui.NewCol(12, 0, v.title)),\n\t\tui.NewRow(\n\t\t\tui.NewCol(12, 0, v.tabpane)))\n\n\tui.Handle(\"\/sys\/wnd\/resize\", func(ui.Event) {\n\t\tv.render()\n\t})\n\n\tui.Handle(\"\/sys\/kbd\/q\", func(ui.Event) {\n\t\tui.StopLoop()\n\t})\n\n\tui.Handle(\"\/sys\/kbd\/l\", func(ui.Event) {\n\t\tv.tabpane.SetActiveRight()\n\t\tv.render()\n\t})\n\n\tui.Handle(\"\/sys\/kbd\/h\", func(ui.Event) {\n\t\tv.tabpane.SetActiveLeft()\n\t\tv.render()\n\t})\n\n\tui.Handle(\"\/sys\/kbd\/t\", func(ui.Event) {\n\t\tv.showTimestamp = !v.showTimestamp\n\t\tfor _, f := range v.feeds {\n\t\t\tf.updatePar()\n\t\t}\n\t\tv.render()\n\t})\n\n\tv.render()\n\tui.Loop()\n}\n\nfunc (v*Viewer) updateDimensions() {\n\tw := ui.TermWidth()\n\th := ui.TermHeight() - v.tabpane.Height\n\tui.Body.Width = w\n\tv.updateFeedDimensions(w, h)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ rsync transport for netbackup\n\/\/\n\/\/ This file is part of netbackup (http:\/\/github.com\/marcopaganini\/netbackup)\n\/\/ See instructions in the README.md file that accompanies this program.\n\/\/ rsync transport for netbackup\n\npackage transports\n\nimport (\n\t\"fmt\"\n\t\"github.com\/marcopaganini\/logger\"\n\t\"github.com\/marcopaganini\/netbackup\/config\"\n\t\"github.com\/marcopaganini\/netbackup\/execute\"\n\t\"os\"\n\t\"strings\"\n)\n\nconst (\n\trsyncCmd = \"rsync\"\n)\n\n\/\/ RsyncTransport is the main structure for the rsync transport.\ntype RsyncTransport struct {\n\tTransport\n}\n\n\/\/ NewRsyncTransport creates a new Transport object for rsync.\nfunc NewRsyncTransport(config *config.Config, ex execute.Executor, log *logger.Logger, dryRun bool) (*RsyncTransport, error) {\n\tt := &RsyncTransport{}\n\tt.config = config\n\tt.log = log\n\tt.dryRun = dryRun\n\n\t\/\/ If execute object is nil, create a new one\n\tt.execute = ex\n\tif t.execute == nil {\n\t\tt.execute = execute.New()\n\t}\n\n\t\/\/ Basic config checking\n\tif err := t.checkConfig(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn t, nil\n}\n\n\/\/ checkConfig performs rsync specific checks in the configuration.\nfunc (r *RsyncTransport) checkConfig() error {\n\t\/\/ Source and dest directories must be set.\n\t\/\/ Either source host or destination host can be set, not both.\n\tswitch {\n\tcase r.config.SourceDir == \"\":\n\t\treturn fmt.Errorf(\"Config error: SourceDir is empty\")\n\tcase r.config.DestDir == \"\":\n\t\treturn fmt.Errorf(\"Config error: DestDir is empty\")\n\tcase r.config.SourceHost != \"\" && r.config.DestHost != \"\":\n\t\treturn fmt.Errorf(\"Config error: Cannot have source & dest host set\")\n\t}\n\treturn nil\n}\n\n\/\/ Run builds the command name and executes it, saving the output to the log\n\/\/ file requested in the configuration or a default one if none is specified.\n\/\/ Temporary files with exclusion and inclusion paths are generated, if needed,\n\/\/ and removed at the end of execution. If dryRun is set, just output the\n\/\/ command to be executed and the contents of the exclusion and inclusion lists\n\/\/ to stderr.\nfunc (r *RsyncTransport) Run() error {\n\t\/\/ Create exclude\/include lists, if needed\n\terr := r.createExcludeFile(r.config.Exclude)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(r.excludeFile)\n\n\terr = r.createIncludeFile(r.config.Include)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(r.includeFile)\n\n\t\/\/ Build the full rsync command line\n\tcmd := []string{rsyncCmd, \"-avXH\", \"--delete\", \"--numeric-ids\"}\n\n\tif r.excludeFile != \"\" {\n\t\tcmd = append(cmd, fmt.Sprintf(\"--exclude-from=%s\", r.excludeFile))\n\t\tcmd = append(cmd, \"--delete-excluded\")\n\t}\n\tif r.includeFile != \"\" {\n\t\tcmd = append(cmd, fmt.Sprintf(\"--include-from=%s\", r.includeFile))\n\t}\n\tif len(r.config.ExtraArgs) != 0 {\n\t\tfor _, v := range r.config.ExtraArgs {\n\t\t\tcmd = append(cmd, v)\n\t\t}\n\t}\n\t\/\/ In rsync, the source needs to ends with a slash or the\n\t\/\/ source directory will be created inside the destination.\n\t\/\/ The exception are the cases where the source already ends\n\t\/\/ in a slash (ex: \/)\n\tsrc := r.buildSource()\n\tif !strings.HasSuffix(src, \"\/\") {\n\t\tsrc = src + \"\/\"\n\t}\n\tcmd = append(cmd, src)\n\tcmd = append(cmd, r.buildDest())\n\n\tr.log.Verbosef(1, \"Command: %s\\n\", strings.Join(cmd, \" \"))\n\n\t\/\/ Execute the command\n\tif !r.dryRun {\n\t\treturn execute.RunCommand(\"RSYNC\", cmd, r.log, r.execute, nil, nil)\n\t}\n\treturn nil\n}\n<commit_msg>Ignore rsync error 24 (some files disappeared during copy).<commit_after>\/\/ rsync transport for netbackup\n\/\/\n\/\/ This file is part of netbackup (http:\/\/github.com\/marcopaganini\/netbackup)\n\/\/ See instructions in the README.md file that accompanies this program.\n\/\/ rsync transport for netbackup\n\npackage transports\n\nimport (\n\t\"fmt\"\n\t\"github.com\/marcopaganini\/logger\"\n\t\"github.com\/marcopaganini\/netbackup\/config\"\n\t\"github.com\/marcopaganini\/netbackup\/execute\"\n\t\"os\"\n\t\"strings\"\n)\n\nconst (\n\trsyncCmd = \"rsync\"\n)\n\n\/\/ RsyncTransport is the main structure for the rsync transport.\ntype RsyncTransport struct {\n\tTransport\n}\n\n\/\/ NewRsyncTransport creates a new Transport object for rsync.\nfunc NewRsyncTransport(config *config.Config, ex execute.Executor, log *logger.Logger, dryRun bool) (*RsyncTransport, error) {\n\tt := &RsyncTransport{}\n\tt.config = config\n\tt.log = log\n\tt.dryRun = dryRun\n\n\t\/\/ If execute object is nil, create a new one\n\tt.execute = ex\n\tif t.execute == nil {\n\t\tt.execute = execute.New()\n\t}\n\n\t\/\/ Basic config checking\n\tif err := t.checkConfig(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn t, nil\n}\n\n\/\/ checkConfig performs rsync specific checks in the configuration.\nfunc (r *RsyncTransport) checkConfig() error {\n\t\/\/ Source and dest directories must be set.\n\t\/\/ Either source host or destination host can be set, not both.\n\tswitch {\n\tcase r.config.SourceDir == \"\":\n\t\treturn fmt.Errorf(\"Config error: SourceDir is empty\")\n\tcase r.config.DestDir == \"\":\n\t\treturn fmt.Errorf(\"Config error: DestDir is empty\")\n\tcase r.config.SourceHost != \"\" && r.config.DestHost != \"\":\n\t\treturn fmt.Errorf(\"Config error: Cannot have source & dest host set\")\n\t}\n\treturn nil\n}\n\n\/\/ Run builds the command name and executes it, saving the output to the log\n\/\/ file requested in the configuration or a default one if none is specified.\n\/\/ Temporary files with exclusion and inclusion paths are generated, if needed,\n\/\/ and removed at the end of execution. If dryRun is set, just output the\n\/\/ command to be executed and the contents of the exclusion and inclusion lists\n\/\/ to stderr.\nfunc (r *RsyncTransport) Run() error {\n\t\/\/ Create exclude\/include lists, if needed\n\terr := r.createExcludeFile(r.config.Exclude)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(r.excludeFile)\n\n\terr = r.createIncludeFile(r.config.Include)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(r.includeFile)\n\n\t\/\/ Build the full rsync command line\n\tcmd := []string{rsyncCmd, \"-avXH\", \"--delete\", \"--numeric-ids\"}\n\n\tif r.excludeFile != \"\" {\n\t\tcmd = append(cmd, fmt.Sprintf(\"--exclude-from=%s\", r.excludeFile))\n\t\tcmd = append(cmd, \"--delete-excluded\")\n\t}\n\tif r.includeFile != \"\" {\n\t\tcmd = append(cmd, fmt.Sprintf(\"--include-from=%s\", r.includeFile))\n\t}\n\tif len(r.config.ExtraArgs) != 0 {\n\t\tfor _, v := range r.config.ExtraArgs {\n\t\t\tcmd = append(cmd, v)\n\t\t}\n\t}\n\t\/\/ In rsync, the source needs to ends with a slash or the\n\t\/\/ source directory will be created inside the destination.\n\t\/\/ The exception are the cases where the source already ends\n\t\/\/ in a slash (ex: \/)\n\tsrc := r.buildSource()\n\tif !strings.HasSuffix(src, \"\/\") {\n\t\tsrc = src + \"\/\"\n\t}\n\tcmd = append(cmd, src)\n\tcmd = append(cmd, r.buildDest())\n\n\tr.log.Verbosef(1, \"Command: %s\\n\", strings.Join(cmd, \" \"))\n\n\t\/\/ Execute the command\n\terr = nil\n\tif !r.dryRun {\n\t\terr := execute.RunCommand(\"RSYNC\", cmd, r.log, r.execute, nil, nil)\n\t\tif err != nil {\n\t\t\trc := execute.ExitCode(err)\n\n\t\t\t\/\/ Rsync uses retcode 24 to indicate \"some files disappeared during\n\t\t\t\/\/ the transfer\" which is immaterial for our purposes. Ignore those\n\t\t\t\/\/ cases.\n\t\t\tif rc == 24 {\n\t\t\t\tr.log.Println(\"Note: rsync returned error 24 (some files disappeared during copy). Ignoring.\")\n\t\t\t\terr = nil\n\t\t\t}\n\t\t}\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"image\/png\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/peterstace\/grayt\"\n)\nimport (\n\t\"flag\"\n\t\"fmt\"\n)\n\nfunc main() {\n\n\tvar (\n\t\tout string\n\t\tspp int\n\t\tcv float64\n\t)\n\n\t\/\/ Set up flags.\n\tflag.StringVar(&out, \"out\", \"\",\n\t\t\"output file (must end in .png)\")\n\tflag.IntVar(&spp, \"spp-limit\", 0,\n\t\t\"samples per pixel stopping point\")\n\tflag.Float64Var(&cv, \"cv-limit\", 0.0,\n\t\t\"neighbourhood CV (coefficient of variation) stopping point\")\n\tflag.Parse()\n\n\t\/\/ Validate and interpret flags.\n\tif !strings.HasSuffix(out, \".png\") {\n\t\tlog.Fatalf(`%q does not end in \".png\"`, out)\n\t}\n\tif (spp == 0 && cv == 0) || (spp != 0 && cv != 0) {\n\t\tlog.Fatalf(`exactly 1 of s and d must be set`)\n\t}\n\tvar mode mode\n\tif spp != 0 {\n\t\tmode = &fixedSamplesPerPixel{required: spp}\n\t} else {\n\t\tmode = &untilRelativeStdDevBelowThreshold{threshold: cv}\n\t}\n\n\t\/\/ Load scene. TODO: load from file.\n\tscene := CornellBox()\n\n\t\/\/ TODO: these should come from command line args.\n\tconst (\n\t\tpxWide = 300\n\t\tpxHigh = 300\n\t\ttotalPx = pxWide * pxHigh\n\t)\n\tacc := grayt.NewAccumulator(pxWide, pxHigh)\n\n\trun(mode, scene, acc)\n\n\t\/\/ TODO: image exposure should come from the scene description.\n\timg := acc.ToImage(1.0)\n\n\t\/\/ Write image out to file.\n\tf, err := os.Create(out)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\tif err := png.Encode(f, img); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc run(mode mode, scene grayt.Scene, acc grayt.Accumulator) {\n\n\twide, high := acc.Dimensions()\n\ttotalPx := wide * high\n\n\tstartTime := time.Now()\n\titeration := 0\n\n\tfor !mode.stop() {\n\n\t\tgrayt.TracerImage(scene, acc)\n\t\titeration++\n\t\tcv := acc.NeighbourCoefficientOfVariation()\n\t\tmode.finishSample(cv)\n\n\t\tsamplesPerSecond := float64(iteration*totalPx) \/ time.Now().Sub(startTime).Seconds()\n\n\t\ttotalSamples := mode.estSamplesPerPixelRequired()\n\t\ttotalSamplesStr := \"??\"\n\t\teta := \"??\"\n\t\tif totalSamples >= 0 {\n\t\t\ttotalSamplesStr = fmt.Sprintf(\"%d\", totalSamples)\n\t\t\tetaSeconds := float64((totalSamples-iteration)*totalPx) \/ samplesPerSecond\n\t\t\teta = fmt.Sprintf(\"%v\", time.Duration(etaSeconds)*time.Second)\n\t\t}\n\n\t\tlog.Printf(\"Sample=%d\/%s, Samples\/sec=%.2e CV=%.4f ETA=%s\\n\",\n\t\t\titeration, totalSamplesStr, samplesPerSecond, cv, eta)\n\t}\n\n\tlog.Printf(\"TotalTime=%s\", time.Now().Sub(startTime))\n}\n\ntype mode interface {\n\t\/\/ estSamplesPerPixelRequired is an estimation of the number of total\n\t\/\/ samples per pixel that will be required before the render is completed.\n\testSamplesPerPixelRequired() int\n\n\t\/\/ finishSample signals to the mode that a sample has been finished. The\n\t\/\/ new coefficient of variation should be supplied.\n\tfinishSample(cv float64)\n\n\t\/\/ stop indicates if the render is complete.\n\tstop() bool\n}\n\ntype fixedSamplesPerPixel struct {\n\trequired int\n\tcompleted int\n}\n\nfunc (f *fixedSamplesPerPixel) estSamplesPerPixelRequired() int {\n\treturn f.required\n}\n\nfunc (f *fixedSamplesPerPixel) finishSample(float64) {\n\tf.completed++\n}\n\nfunc (f *fixedSamplesPerPixel) stop() bool {\n\treturn f.required == f.completed\n}\n\ntype untilRelativeStdDevBelowThreshold struct {\n\tthreshold float64\n\tcurrentCV float64\n\tpreviousCV float64\n\treducedCVCount int\n\tcompleted int\n\tcvDeltaPerSample float64\n}\n\nfunc (u *untilRelativeStdDevBelowThreshold) estSamplesPerPixelRequired() int {\n\tif u.reducedCVCount < 5 {\n\t\treturn -1\n\t}\n\tmore := (u.currentCV - u.threshold) \/ u.cvDeltaPerSample\n\treturn u.completed + int(more)\n}\n\nfunc (u *untilRelativeStdDevBelowThreshold) finishSample(relStdDev float64) {\n\tu.currentCV, u.previousCV = relStdDev, u.currentCV\n\tif u.currentCV < u.previousCV {\n\t\tu.reducedCVCount++\n\t} else {\n\t\tu.reducedCVCount = 0\n\t}\n\tu.completed++\n\tu.cvDeltaPerSample = 0.9*u.cvDeltaPerSample + 0.1*(u.previousCV-u.currentCV)\n}\n\nfunc (u *untilRelativeStdDevBelowThreshold) stop() bool {\n\treturn u.reducedCVCount >= 5 && u.currentCV < u.threshold\n}\n<commit_msg>Width and Height (pixels) comes from cmdline args<commit_after>package main\n\nimport (\n\t\"image\/png\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/peterstace\/grayt\"\n)\nimport (\n\t\"flag\"\n\t\"fmt\"\n)\n\nfunc main() {\n\n\tvar (\n\t\tout string\n\t\tspp int\n\t\tcv float64\n\t\tpxWide, pxHigh int\n\t)\n\n\t\/\/ Set up flags.\n\tflag.StringVar(&out, \"o\", \"\",\n\t\t\"output file (must end in .png)\")\n\tflag.IntVar(&spp, \"spp\", 0,\n\t\t\"samples per pixel stopping point\")\n\tflag.Float64Var(&cv, \"cv\", 0.0,\n\t\t\"neighbourhood CV (coefficient of variation) stopping point\")\n\tflag.IntVar(&pxWide, \"w\", 0, \"width in pixels\")\n\tflag.IntVar(&pxHigh, \"h\", 0, \"height in pixels\")\n\tflag.Parse()\n\n\t\/\/ Validate and interpret flags.\n\tif !strings.HasSuffix(out, \".png\") {\n\t\tflag.Usage()\n\t\tlog.Fatalf(`%q does not end in \".png\"`, out)\n\t}\n\tif (spp == 0 && cv == 0) || (spp != 0 && cv != 0) {\n\t\tlog.Fatalf(`exactly 1 of s and d must be set`)\n\t}\n\tvar mode mode\n\tif spp != 0 {\n\t\tmode = &fixedSamplesPerPixel{required: spp}\n\t} else {\n\t\tmode = &untilRelativeStdDevBelowThreshold{threshold: cv}\n\t}\n\tif pxWide == 0 || pxHigh == 0 {\n\t\tflag.Usage()\n\t\tlog.Fatal(\"width and height must be set\")\n\t}\n\n\t\/\/ Load scene. TODO: load from file.\n\tscene := CornellBox()\n\n\t\/\/ TODO: these should come from command line args.\n\tacc := grayt.NewAccumulator(pxWide, pxHigh)\n\n\trun(mode, scene, acc)\n\n\t\/\/ TODO: image exposure should come from the scene description.\n\timg := acc.ToImage(1.0)\n\n\t\/\/ Write image out to file.\n\tf, err := os.Create(out)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\tif err := png.Encode(f, img); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc run(mode mode, scene grayt.Scene, acc grayt.Accumulator) {\n\n\twide, high := acc.Dimensions()\n\ttotalPx := wide * high\n\n\tstartTime := time.Now()\n\titeration := 0\n\n\tfor !mode.stop() {\n\n\t\tgrayt.TracerImage(scene, acc)\n\t\titeration++\n\t\tcv := acc.NeighbourCoefficientOfVariation()\n\t\tmode.finishSample(cv)\n\n\t\tsamplesPerSecond := float64(iteration*totalPx) \/ time.Now().Sub(startTime).Seconds()\n\n\t\ttotalSamples := mode.estSamplesPerPixelRequired()\n\t\ttotalSamplesStr := \"??\"\n\t\teta := \"??\"\n\t\tif totalSamples >= 0 {\n\t\t\ttotalSamplesStr = fmt.Sprintf(\"%d\", totalSamples)\n\t\t\tetaSeconds := float64((totalSamples-iteration)*totalPx) \/ samplesPerSecond\n\t\t\teta = fmt.Sprintf(\"%v\", time.Duration(etaSeconds)*time.Second)\n\t\t}\n\n\t\tlog.Printf(\"Sample=%d\/%s, Samples\/sec=%.2e CV=%.4f ETA=%s\\n\",\n\t\t\titeration, totalSamplesStr, samplesPerSecond, cv, eta)\n\t}\n\n\tlog.Printf(\"TotalTime=%s\", time.Now().Sub(startTime))\n}\n\ntype mode interface {\n\t\/\/ estSamplesPerPixelRequired is an estimation of the number of total\n\t\/\/ samples per pixel that will be required before the render is completed.\n\testSamplesPerPixelRequired() int\n\n\t\/\/ finishSample signals to the mode that a sample has been finished. The\n\t\/\/ new coefficient of variation should be supplied.\n\tfinishSample(cv float64)\n\n\t\/\/ stop indicates if the render is complete.\n\tstop() bool\n}\n\ntype fixedSamplesPerPixel struct {\n\trequired int\n\tcompleted int\n}\n\nfunc (f *fixedSamplesPerPixel) estSamplesPerPixelRequired() int {\n\treturn f.required\n}\n\nfunc (f *fixedSamplesPerPixel) finishSample(float64) {\n\tf.completed++\n}\n\nfunc (f *fixedSamplesPerPixel) stop() bool {\n\treturn f.required == f.completed\n}\n\ntype untilRelativeStdDevBelowThreshold struct {\n\tthreshold float64\n\tcurrentCV float64\n\tpreviousCV float64\n\treducedCVCount int\n\tcompleted int\n\tcvDeltaPerSample float64\n}\n\nfunc (u *untilRelativeStdDevBelowThreshold) estSamplesPerPixelRequired() int {\n\tif u.reducedCVCount < 5 {\n\t\treturn -1\n\t}\n\tmore := (u.currentCV - u.threshold) \/ u.cvDeltaPerSample\n\treturn u.completed + int(more)\n}\n\nfunc (u *untilRelativeStdDevBelowThreshold) finishSample(relStdDev float64) {\n\tu.currentCV, u.previousCV = relStdDev, u.currentCV\n\tif u.currentCV < u.previousCV {\n\t\tu.reducedCVCount++\n\t} else {\n\t\tu.reducedCVCount = 0\n\t}\n\tu.completed++\n\tu.cvDeltaPerSample = 0.9*u.cvDeltaPerSample + 0.1*(u.previousCV-u.currentCV)\n}\n\nfunc (u *untilRelativeStdDevBelowThreshold) stop() bool {\n\treturn u.reducedCVCount >= 5 && u.currentCV < u.threshold\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n\t\"os\/exec\"\n\t\"testing\"\n\n\t\"fmt\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestMustHaveTokenEnv(t *testing.T) {\n\tcurrentToken := os.Getenv(\"CLAIMR_TOKEN\")\n\tos.Unsetenv(\"CLAIMR_TOKEN\")\n\tdefer func() { os.Setenv(\"CLAIMR_TOKEN\", currentToken) }()\n\n\tcmd := exec.Command(\"make\", \"run\")\n\t\/\/cmd.Env = append(os.Environ(), \"TEST_MUST_ENV=1\")\n\terr := cmd.Run()\n\tfmt.Print(err.(*exec.ExitError))\n\t\/\/if e, ok := err.(*exec.ExitError); ok && !e.Success() {\n\t\/\/\treturn\n\t\/\/}\n\tassert.EqualError(t, err, \"Claimr slack bot token unset. Set CLAIMR_TOKEN to continue.\")\n}\n<commit_msg>Adding tests to main package<commit_after>package main\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/bouk\/monkey\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestEnvironmentToken(t *testing.T) {\n\tcurrentEnv := os.Getenv(\"CLAIMR_TOKEN\")\n\tos.Unsetenv(\"CLAIMR_TOKEN\")\n\tdefer func() { os.Setenv(\"CLAIMR_TOKEN\", currentEnv) }()\n\n\twantMsg := \"Claimr slack bot token unset. Set CLAIMR_TOKEN to continue.\"\n\n\tmockLogFatal := func(msg ...interface{}) {\n\t\tassert.Equal(t, wantMsg, msg[0])\n\t\tpanic(\"log.Fatal called\")\n\t}\n\tpatchLog := monkey.Patch(log.Fatal, mockLogFatal)\n\tdefer patchLog.Unpatch()\n\tassert.PanicsWithValue(t, \"log.Fatal called\", main, \"log.Fatal was not called\")\n}\n<|endoftext|>"} {"text":"<commit_before>package ghw\n\nimport (\n \"testing\"\n)\n\nfunc TestHost(t *testing.T) {\n host, err := Host()\n\n if err != nil {\n t.Fatalf(\"Expected nil error but got %v\", err)\n }\n if host == nil {\n t.Fatalf(\"Expected non-nil host but got nil.\")\n }\n\n mem := host.Memory\n if mem == nil {\n t.Fatalf(\"Expected non-nil Memory but got nil.\")\n }\n\n tpb := mem.TotalPhysicalBytes\n if tpb < 1 {\n t.Fatalf(\"Expected >0 total physical memory, but got %d\", tpb)\n }\n\n tub := mem.TotalUsableBytes\n if tub < 1 {\n t.Fatalf(\"Expected >0 total usable memory, but got %d\", tub)\n }\n\n cpu := host.CPU\n if cpu == nil {\n t.Fatalf(\"Expected non-nil CPU, but got nil\")\n }\n\n cores := cpu.TotalCores\n if cores < 1 }\n t.Fatalf(\"Expected >0 total cores, but got %d\", cores)\n }\n\n threads := cpu.TotalThreads\n if threads < 1 }\n t.Fatalf(\"Expected >0 total threads, but got %d\", threads)\n }\n\n block := host.Block\n if block == nil {\n t.Fatalf(\"Expected non-nil Block but got nil.\")\n }\n\n blockTpb := block.TotalPhysicalBytes\n if blockTpb < 1 {\n t.Fatalf(\"Expected >0 total physical block bytes, but got %d\", blockTpb)\n }\n}\n<commit_msg>Should actually run the damn tests :(<commit_after>package ghw\n\nimport (\n \"testing\"\n)\n\nfunc TestHost(t *testing.T) {\n host, err := Host()\n\n if err != nil {\n t.Fatalf(\"Expected nil error but got %v\", err)\n }\n if host == nil {\n t.Fatalf(\"Expected non-nil host but got nil.\")\n }\n\n mem := host.Memory\n if mem == nil {\n t.Fatalf(\"Expected non-nil Memory but got nil.\")\n }\n\n tpb := mem.TotalPhysicalBytes\n if tpb < 1 {\n t.Fatalf(\"Expected >0 total physical memory, but got %d\", tpb)\n }\n\n tub := mem.TotalUsableBytes\n if tub < 1 {\n t.Fatalf(\"Expected >0 total usable memory, but got %d\", tub)\n }\n\n cpu := host.CPU\n if cpu == nil {\n t.Fatalf(\"Expected non-nil CPU, but got nil\")\n }\n\n cores := cpu.TotalCores\n if cores < 1 {\n t.Fatalf(\"Expected >0 total cores, but got %d\", cores)\n }\n\n threads := cpu.TotalThreads\n if threads < 1 {\n t.Fatalf(\"Expected >0 total threads, but got %d\", threads)\n }\n\n block := host.Block\n if block == nil {\n t.Fatalf(\"Expected non-nil Block but got nil.\")\n }\n\n blockTpb := block.TotalPhysicalBytes\n if blockTpb < 1 {\n t.Fatalf(\"Expected >0 total physical block bytes, but got %d\", blockTpb)\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package main_test\n\nimport (\n\t\"net\/http\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/verdverm\/frisby\"\n)\n\nfunc TestSomething(t *testing.T) {\n\terrs := frisby.Create(\"Test ping\").\n\t\tGet(\"http:\/\/localhost:5025\/ping\").\n\t\tSend().\n\t\tExpectStatus(http.StatusOK).\n\t\tErrors()\n\n\tfor _, err := range errs {\n\t\tassert.Nil(t, err)\n\t}\n}\n<commit_msg>update<commit_after>package main_test\n\nimport (\n\t\"net\/http\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/verdverm\/frisby\"\n)\n\nfunc TestPing(t *testing.T) {\n\terrs := frisby.Create(\"Test ping\").\n\t\tGet(\"http:\/\/localhost:5025\/ping\").\n\t\tSend().\n\t\tExpectStatus(http.StatusOK).\n\t\tExpectContent(\"pong\").\n\t\tErrors()\n\n\tfor _, err := range errs {\n\t\tassert.Nil(t, err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build integration\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"syscall\"\n\t\"testing\"\n\n\t\"regexp\"\n\n\t\"github.com\/elliotchance\/c2go\/util\"\n)\n\nvar (\n\tcPath = \"build\/a.out\"\n\tgoPath = \"build\/go.out\"\n\tstdin = \"7\"\n\targs = []string{\"some\", \"args\"}\n)\n\ntype programOut struct {\n\tstdout bytes.Buffer\n\tstderr bytes.Buffer\n\tisZero bool\n}\n\n\/\/ TestIntegrationScripts tests all programs in the tests directory.\n\/\/\n\/\/ Integration tests are not run by default (only unit tests). These are\n\/\/ indicated by the build flags at the top of the file. To include integration\n\/\/ tests use:\n\/\/\n\/\/ go test -tags=integration\n\/\/\n\/\/ You can also run a single file with:\n\/\/\n\/\/ go test -tags=integration -run=TestIntegrationScripts\/tests\/ctype\/isalnum.c\n\/\/\nfunc TestIntegrationScripts(t *testing.T) {\n\ttestFiles, err := filepath.Glob(\"tests\/*.c\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texampleFiles, err := filepath.Glob(\"examples\/*.c\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfiles := append(testFiles, exampleFiles...)\n\n\tisVerbose := flag.CommandLine.Lookup(\"test.v\").Value.String() == \"true\"\n\n\ttotalTapTests := 0\n\n\tfor _, file := range files {\n\t\t\/\/ Create build folder\n\t\tos.Mkdir(\"build\/\", os.ModePerm)\n\n\t\tt.Run(file, func(t *testing.T) {\n\t\t\tcProgram := programOut{}\n\t\t\tgoProgram := programOut{}\n\n\t\t\t\/\/ Compile C.\n\t\t\tout, err := exec.Command(\"clang\", \"-lm\", \"-o\", cPath, file).CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"error: %s\\n%s\", err, out)\n\t\t\t}\n\n\t\t\t\/\/ Run C program\n\t\t\tcmd := exec.Command(cPath, args...)\n\t\t\tcmd.Stdin = strings.NewReader(stdin)\n\t\t\tcmd.Stdout = &cProgram.stdout\n\t\t\tcmd.Stderr = &cProgram.stderr\n\t\t\terr = cmd.Run()\n\t\t\tcProgram.isZero = err == nil\n\n\t\t\tprogramArgs := ProgramArgs{\n\t\t\t\tinputFile: file,\n\t\t\t\toutputFile: \"build\/main.go\",\n\t\t\t\tpackageName: \"main\",\n\t\t\t}\n\n\t\t\t\/\/ Compile Go\n\t\t\terr = Start(programArgs)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"error: %s\\n%s\", err, out)\n\t\t\t}\n\n\t\t\tbuildErr, err := exec.Command(\"go\", \"build\", \"-o\", goPath, \"build\/main.go\").CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(string(buildErr), err)\n\t\t\t}\n\n\t\t\t\/\/ Run Go program\n\t\t\tcmd = exec.Command(goPath, args...)\n\t\t\tcmd.Stdin = strings.NewReader(stdin)\n\t\t\tcmd.Stdout = &goProgram.stdout\n\t\t\tcmd.Stderr = &goProgram.stderr\n\t\t\terr = cmd.Run()\n\t\t\tgoProgram.isZero = err == nil\n\n\t\t\t\/\/ Check for special exit codes that signal that tests have failed.\n\t\t\tif exitError, ok := err.(*exec.ExitError); ok {\n\t\t\t\texitStatus := exitError.Sys().(syscall.WaitStatus).ExitStatus()\n\t\t\t\tif exitStatus == 101 || exitStatus == 102 {\n\t\t\t\t\tt.Fatal(goProgram.stdout.String())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Check if both exit codes are zero (or non-zero)\n\t\t\tif cProgram.isZero != goProgram.isZero {\n\t\t\t\tt.Fatalf(\"Exit statuses did not match.\\n\" +\n\t\t\t\t\tutil.ShowDiff(cProgram.stdout.String(),\n\t\t\t\t\t\tgoProgram.stdout.String()),\n\t\t\t\t)\n\t\t\t}\n\n\t\t\t\/\/ Check stderr\n\t\t\tif cProgram.stderr.String() != goProgram.stderr.String() {\n\t\t\t\tt.Fatalf(\"Expected %q, Got: %q\",\n\t\t\t\t\tcProgram.stderr.String(),\n\t\t\t\t\tgoProgram.stderr.String())\n\t\t\t}\n\n\t\t\t\/\/ Check stdout\n\t\t\tif cProgram.stdout.String() != goProgram.stdout.String() {\n\t\t\t\tt.Fatalf(util.ShowDiff(cProgram.stdout.String(),\n\t\t\t\t\tgoProgram.stdout.String()))\n\t\t\t}\n\n\t\t\t\/\/ If this is not an example we will extact the number of tests run.\n\t\t\tif strings.Index(file, \"examples\/\") == -1 && isVerbose {\n\t\t\t\tfirstLine := strings.Split(goProgram.stdout.String(), \"\\n\")[0]\n\n\t\t\t\tmatches := regexp.MustCompile(`1\\.\\.(\\d+)`).\n\t\t\t\t\tFindStringSubmatch(firstLine)\n\t\t\t\tif len(matches) == 0 {\n\t\t\t\t\tt.Fatalf(\"Test did not output tap: %s\", file)\n\t\t\t\t}\n\n\t\t\t\tfmt.Printf(\"TAP: # %s: %s tests\\n\", file, matches[1])\n\t\t\t\ttotalTapTests += util.Atoi(matches[1])\n\t\t\t}\n\t\t})\n\t}\n\n\tif isVerbose {\n\t\tfmt.Printf(\"TAP: # Total tests: %d\\n\", totalTapTests)\n\t}\n}\n\nfunc TestStartPreprocess(t *testing.T) {\n\t\/\/ temp dir\n\ttempDir := os.TempDir()\n\n\t\/\/ create temp file with garantee\n\t\/\/ wrong file body\n\ttempFile, err := newTempFile(tempDir, \"c2go\", \"preprocess.c\")\n\tif err != nil {\n\t\tt.Errorf(\"Cannot create temp file for execute test\")\n\t}\n\tdefer os.Remove(tempFile.Name())\n\n\tfmt.Fprintf(tempFile, \"#include <AbsoluteWrongInclude.h>\\nint main(void){\\nwrong();\\n}\")\n\n\terr = tempFile.Close()\n\tif err != nil {\n\t\tt.Errorf(\"Cannot close the temp file\")\n\t}\n\n\tvar args ProgramArgs\n\targs.inputFile = tempFile.Name()\n\n\terr = Start(args)\n\tif err == nil {\n\t\tt.Errorf(\"Cannot test preprocess of application\")\n\t}\n}\n\nfunc TestGoPath(t *testing.T) {\n\tgopath := \"GOPATH\"\n\n\texistEnv := os.Getenv(gopath)\n\tif existEnv == \"\" {\n\t\tt.Errorf(\"$GOPATH is not set\")\n\t}\n\n\t\/\/ return env.var.\n\tdefer func() {\n\t\terr := os.Setenv(gopath, existEnv)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Cannot restore the value of $GOPATH\")\n\t\t}\n\t}()\n\n\t\/\/ reset value of env.var.\n\terr := os.Setenv(gopath, \"\")\n\tif err != nil {\n\t\tt.Errorf(\"Cannot set value of $GOPATH\")\n\t}\n\n\t\/\/ testing\n\terr = Start(ProgramArgs{})\n\tif err == nil {\n\t\tt.Errorf(err.Error())\n\t}\n}\n<commit_msg>Remove build folder afer integration test<commit_after>\/\/ +build integration\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"syscall\"\n\t\"testing\"\n\n\t\"regexp\"\n\n\t\"github.com\/elliotchance\/c2go\/util\"\n)\n\nvar (\n\tcPath = \"build\/a.out\"\n\tgoPath = \"build\/go.out\"\n\tstdin = \"7\"\n\targs = []string{\"some\", \"args\"}\n)\n\ntype programOut struct {\n\tstdout bytes.Buffer\n\tstderr bytes.Buffer\n\tisZero bool\n}\n\n\/\/ TestIntegrationScripts tests all programs in the tests directory.\n\/\/\n\/\/ Integration tests are not run by default (only unit tests). These are\n\/\/ indicated by the build flags at the top of the file. To include integration\n\/\/ tests use:\n\/\/\n\/\/ go test -tags=integration\n\/\/\n\/\/ You can also run a single file with:\n\/\/\n\/\/ go test -tags=integration -run=TestIntegrationScripts\/tests\/ctype\/isalnum.c\n\/\/\nfunc TestIntegrationScripts(t *testing.T) {\n\ttestFiles, err := filepath.Glob(\"tests\/*.c\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texampleFiles, err := filepath.Glob(\"examples\/*.c\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfiles := append(testFiles, exampleFiles...)\n\n\tisVerbose := flag.CommandLine.Lookup(\"test.v\").Value.String() == \"true\"\n\n\ttotalTapTests := 0\n\n\t\/\/ Create build folder\n\tos.Mkdir(\"build\/\", os.ModePerm)\n\tdefer os.RemoveAll(\"build\")\n\n\tfor _, file := range files {\n\t\tt.Run(file, func(t *testing.T) {\n\t\t\tcProgram := programOut{}\n\t\t\tgoProgram := programOut{}\n\n\t\t\t\/\/ Compile C.\n\t\t\tout, err := exec.Command(\"clang\", \"-lm\", \"-o\", cPath, file).CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"error: %s\\n%s\", err, out)\n\t\t\t}\n\n\t\t\t\/\/ Run C program\n\t\t\tcmd := exec.Command(cPath, args...)\n\t\t\tcmd.Stdin = strings.NewReader(stdin)\n\t\t\tcmd.Stdout = &cProgram.stdout\n\t\t\tcmd.Stderr = &cProgram.stderr\n\t\t\terr = cmd.Run()\n\t\t\tcProgram.isZero = err == nil\n\n\t\t\tprogramArgs := ProgramArgs{\n\t\t\t\tinputFile: file,\n\t\t\t\toutputFile: \"build\/main.go\",\n\t\t\t\tpackageName: \"main\",\n\t\t\t}\n\n\t\t\t\/\/ Compile Go\n\t\t\terr = Start(programArgs)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"error: %s\\n%s\", err, out)\n\t\t\t}\n\n\t\t\tbuildErr, err := exec.Command(\"go\", \"build\", \"-o\", goPath, \"build\/main.go\").CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(string(buildErr), err)\n\t\t\t}\n\n\t\t\t\/\/ Run Go program\n\t\t\tcmd = exec.Command(goPath, args...)\n\t\t\tcmd.Stdin = strings.NewReader(stdin)\n\t\t\tcmd.Stdout = &goProgram.stdout\n\t\t\tcmd.Stderr = &goProgram.stderr\n\t\t\terr = cmd.Run()\n\t\t\tgoProgram.isZero = err == nil\n\n\t\t\t\/\/ Check for special exit codes that signal that tests have failed.\n\t\t\tif exitError, ok := err.(*exec.ExitError); ok {\n\t\t\t\texitStatus := exitError.Sys().(syscall.WaitStatus).ExitStatus()\n\t\t\t\tif exitStatus == 101 || exitStatus == 102 {\n\t\t\t\t\tt.Fatal(goProgram.stdout.String())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Check if both exit codes are zero (or non-zero)\n\t\t\tif cProgram.isZero != goProgram.isZero {\n\t\t\t\tt.Fatalf(\"Exit statuses did not match.\\n\" +\n\t\t\t\t\tutil.ShowDiff(cProgram.stdout.String(),\n\t\t\t\t\t\tgoProgram.stdout.String()),\n\t\t\t\t)\n\t\t\t}\n\n\t\t\t\/\/ Check stderr\n\t\t\tif cProgram.stderr.String() != goProgram.stderr.String() {\n\t\t\t\tt.Fatalf(\"Expected %q, Got: %q\",\n\t\t\t\t\tcProgram.stderr.String(),\n\t\t\t\t\tgoProgram.stderr.String())\n\t\t\t}\n\n\t\t\t\/\/ Check stdout\n\t\t\tif cProgram.stdout.String() != goProgram.stdout.String() {\n\t\t\t\tt.Fatalf(util.ShowDiff(cProgram.stdout.String(),\n\t\t\t\t\tgoProgram.stdout.String()))\n\t\t\t}\n\n\t\t\t\/\/ If this is not an example we will extact the number of tests run.\n\t\t\tif strings.Index(file, \"examples\/\") == -1 && isVerbose {\n\t\t\t\tfirstLine := strings.Split(goProgram.stdout.String(), \"\\n\")[0]\n\n\t\t\t\tmatches := regexp.MustCompile(`1\\.\\.(\\d+)`).\n\t\t\t\t\tFindStringSubmatch(firstLine)\n\t\t\t\tif len(matches) == 0 {\n\t\t\t\t\tt.Fatalf(\"Test did not output tap: %s\", file)\n\t\t\t\t}\n\n\t\t\t\tfmt.Printf(\"TAP: # %s: %s tests\\n\", file, matches[1])\n\t\t\t\ttotalTapTests += util.Atoi(matches[1])\n\t\t\t}\n\t\t})\n\t}\n\n\tif isVerbose {\n\t\tfmt.Printf(\"TAP: # Total tests: %d\\n\", totalTapTests)\n\t}\n}\n\nfunc TestStartPreprocess(t *testing.T) {\n\t\/\/ temp dir\n\ttempDir := os.TempDir()\n\n\t\/\/ create temp file with garantee\n\t\/\/ wrong file body\n\ttempFile, err := newTempFile(tempDir, \"c2go\", \"preprocess.c\")\n\tif err != nil {\n\t\tt.Errorf(\"Cannot create temp file for execute test\")\n\t}\n\tdefer os.Remove(tempFile.Name())\n\n\tfmt.Fprintf(tempFile, \"#include <AbsoluteWrongInclude.h>\\nint main(void){\\nwrong();\\n}\")\n\n\terr = tempFile.Close()\n\tif err != nil {\n\t\tt.Errorf(\"Cannot close the temp file\")\n\t}\n\n\tvar args ProgramArgs\n\targs.inputFile = tempFile.Name()\n\n\terr = Start(args)\n\tif err == nil {\n\t\tt.Errorf(\"Cannot test preprocess of application\")\n\t}\n}\n\nfunc TestGoPath(t *testing.T) {\n\tgopath := \"GOPATH\"\n\n\texistEnv := os.Getenv(gopath)\n\tif existEnv == \"\" {\n\t\tt.Errorf(\"$GOPATH is not set\")\n\t}\n\n\t\/\/ return env.var.\n\tdefer func() {\n\t\terr := os.Setenv(gopath, existEnv)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Cannot restore the value of $GOPATH\")\n\t\t}\n\t}()\n\n\t\/\/ reset value of env.var.\n\terr := os.Setenv(gopath, \"\")\n\tif err != nil {\n\t\tt.Errorf(\"Cannot set value of $GOPATH\")\n\t}\n\n\t\/\/ testing\n\terr = Start(ProgramArgs{})\n\tif err == nil {\n\t\tt.Errorf(err.Error())\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/suite\"\n)\n\nconst testImagesFolder = \".\/test\/images\"\n\ntype FakeExternalServerTestSuite struct {\n\tsuite.Suite\n\tfakeServer *http.Server\n\tsubjectServer *httptest.Server\n}\n\nfunc (suite *FakeExternalServerTestSuite) SetupSuite() {\n\tsuite.fakeServer = &http.Server{Addr: \":8081\", Handler: http.FileServer(http.Dir(testImagesFolder))}\n\tgo func() {\n\t\tsuite.fakeServer.ListenAndServe()\n\t\t\/\/ http.ListenAndServe(\":8081\", suite.fakeServer)\n\t}()\n}\n\nfunc TestFakeExternalServerTestSuite(t *testing.T) {\n\tsuite.Run(t, new(FakeExternalServerTestSuite))\n}\n\nfunc (suite *FakeExternalServerTestSuite) SetupTest() {\n\tsuite.subjectServer = httptest.NewServer(GetApp())\n\t\/\/ defer suite.subjectServer.Close()\n}\n\nfunc (suite *FakeExternalServerTestSuite) TestExample() {\n\tres, err := http.Get(fmt.Sprintf(\"%s\/%s\",\n\t\tsuite.subjectServer.URL, \"http:\/\/localhost:8081\/oi.txt\"))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tres.Body.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tsuite.Equal(404, res.StatusCode, \"status code should be 404\")\n\tsuite.Equal(string(body), \"\\n\", \"they should be equal\")\n}\n\nfunc (suite *FakeExternalServerTestSuite) TearDownTest() {\n\tsuite.subjectServer.Close()\n}\n\nfunc (suite *FakeExternalServerTestSuite) TearDownSuite() {\n\tif err := suite.fakeServer.Shutdown(nil); err != nil {\n\t\tpanic(err)\n\t}\n}\n<commit_msg>Use a png file instead of txt on tests<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/suite\"\n)\n\nconst testImagesFolder = \".\/test\/images\"\n\ntype FakeExternalServerTestSuite struct {\n\tsuite.Suite\n\tfakeServer *http.Server\n\tsubjectServer *httptest.Server\n}\n\nfunc (suite *FakeExternalServerTestSuite) SetupSuite() {\n\tsuite.fakeServer = &http.Server{Addr: \":8081\", Handler: http.FileServer(http.Dir(testImagesFolder))}\n\tgo func() {\n\t\tsuite.fakeServer.ListenAndServe()\n\t}()\n}\n\nfunc TestFakeExternalServerTestSuite(t *testing.T) {\n\tsuite.Run(t, new(FakeExternalServerTestSuite))\n}\n\nfunc (suite *FakeExternalServerTestSuite) SetupTest() {\n\tsuite.subjectServer = httptest.NewServer(GetApp())\n}\n\nfunc (suite *FakeExternalServerTestSuite) TestExample() {\n\tres, err := http.Get(fmt.Sprintf(\"%s\/%s\",\n\t\tsuite.subjectServer.URL, \"http:\/\/localhost:8081\/oi.png\"))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tres.Body.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tsuite.Equal(404, res.StatusCode, \"status code should be 404\")\n\tsuite.Equal(string(body), \"\\n\", \"they should be equal\")\n}\n\nfunc (suite *FakeExternalServerTestSuite) TearDownTest() {\n\tsuite.subjectServer.Close()\n}\n\nfunc (suite *FakeExternalServerTestSuite) TearDownSuite() {\n\tif err := suite.fakeServer.Shutdown(nil); err != nil {\n\t\tpanic(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"testing\"\n)\n\nfunc TestClientOptions(t *testing.T) {\n\topts := clientOptions{\n cmdOptions: &cmdOptions{\n\t OptEndpoint: \"http:\/\/www.example.com\/mt\/mt-data-api.cgi\",\n\t OptApiVersion: \"1\",\n\t OptClientId: \"go-test\",\n\t OptUsername: \"Melody\",\n },\n PasswordData: \"password\",\n\t}\n\n\tif opts.Endpoint() != \"http:\/\/www.example.com\/mt\/mt-data-api.cgi\" {\n\t\tt.Errorf(\"got %q\", opts.Endpoint())\n\t}\n\n\tif opts.ApiVersion() != \"1\" {\n\t\tt.Errorf(\"got %q\", opts.ApiVersion())\n\t}\n\n\tif opts.ClientId() != \"go-test\" {\n\t\tt.Errorf(\"got %q\", opts.ClientId())\n\t}\n\n\tif opts.Username() != \"Melody\" {\n\t\tt.Errorf(\"got %q\", opts.Username())\n\t}\n\n\tif opts.Password() != \"password\" {\n\t\tt.Errorf(\"got %q\", opts.Password())\n\t}\n}\n<commit_msg>Fix format.<commit_after>package main\n\nimport (\n\t\"testing\"\n)\n\nfunc TestClientOptions(t *testing.T) {\n\topts := clientOptions{\n\t\tcmdOptions: &cmdOptions{\n\t\t\tOptEndpoint: \"http:\/\/www.example.com\/mt\/mt-data-api.cgi\",\n\t\t\tOptApiVersion: \"1\",\n\t\t\tOptClientId: \"go-test\",\n\t\t\tOptUsername: \"Melody\",\n\t\t},\n\t\tPasswordData: \"password\",\n\t}\n\n\tif opts.Endpoint() != \"http:\/\/www.example.com\/mt\/mt-data-api.cgi\" {\n\t\tt.Errorf(\"got %q\", opts.Endpoint())\n\t}\n\n\tif opts.ApiVersion() != \"1\" {\n\t\tt.Errorf(\"got %q\", opts.ApiVersion())\n\t}\n\n\tif opts.ClientId() != \"go-test\" {\n\t\tt.Errorf(\"got %q\", opts.ClientId())\n\t}\n\n\tif opts.Username() != \"Melody\" {\n\t\tt.Errorf(\"got %q\", opts.Username())\n\t}\n\n\tif opts.Password() != \"password\" {\n\t\tt.Errorf(\"got %q\", opts.Password())\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Tigera, Inc. All rights reserved.\n\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage clientv3\n\nimport (\n\t\"context\"\n\n\t\"fmt\"\n\n\tapiv3 \"github.com\/projectcalico\/libcalico-go\/lib\/apis\/v3\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/errors\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/names\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/net\"\n\tcnet \"github.com\/projectcalico\/libcalico-go\/lib\/net\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/options\"\n\tvalidator \"github.com\/projectcalico\/libcalico-go\/lib\/validator\/v3\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/watch\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ NodeInterface has methods to work with Node resources.\ntype NodeInterface interface {\n\tCreate(ctx context.Context, res *apiv3.Node, opts options.SetOptions) (*apiv3.Node, error)\n\tUpdate(ctx context.Context, res *apiv3.Node, opts options.SetOptions) (*apiv3.Node, error)\n\tDelete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.Node, error)\n\tGet(ctx context.Context, name string, opts options.GetOptions) (*apiv3.Node, error)\n\tList(ctx context.Context, opts options.ListOptions) (*apiv3.NodeList, error)\n\tWatch(ctx context.Context, opts options.ListOptions) (watch.Interface, error)\n}\n\n\/\/ nodes implements NodeInterface\ntype nodes struct {\n\tclient client\n}\n\n\/\/ Create takes the representation of a Node and creates it. Returns the stored\n\/\/ representation of the Node, and an error, if there is any.\nfunc (r nodes) Create(ctx context.Context, res *apiv3.Node, opts options.SetOptions) (*apiv3.Node, error) {\n\tif err := validator.Validate(res); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ For host-protection only clusters, we instruct the user to create a Node as the first\n\t\/\/ operation. Piggy-back the datastore initialisation on that to ensure the Ready flag gets\n\t\/\/ set. Since we're likely being called from calicoctl, we don't know the Calico version.\n\terr := r.client.EnsureInitialized(ctx, \"\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := r.client.resources.Create(ctx, opts, apiv3.KindNode, res)\n\tif out != nil {\n\t\treturn out.(*apiv3.Node), err\n\t}\n\treturn nil, err\n}\n\n\/\/ Update takes the representation of a Node and updates it. Returns the stored\n\/\/ representation of the Node, and an error, if there is any.\nfunc (r nodes) Update(ctx context.Context, res *apiv3.Node, opts options.SetOptions) (*apiv3.Node, error) {\n\tif err := validator.Validate(res); err != nil {\n\t\treturn nil, err\n\t}\n\n\tout, err := r.client.resources.Update(ctx, opts, apiv3.KindNode, res)\n\tif out != nil {\n\t\treturn out.(*apiv3.Node), err\n\t}\n\treturn nil, err\n}\n\n\/\/ Delete takes name of the Node and deletes it. Returns an error if one occurs.\nfunc (r nodes) Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.Node, error) {\n\tpname, err := names.WorkloadEndpointIdentifiers{Node: name}.CalculateWorkloadEndpointName(true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get all weps belonging to the node\n\tweps, err := r.client.WorkloadEndpoints().List(ctx, options.ListOptions{\n\t\tPrefix: true,\n\t\tName: pname,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Collate all IPs across all endpoints, and then release those IPs.\n\tips := []net.IP{}\n\tfor _, wep := range weps.Items {\n\t\t\/\/ The prefix match is unfortunately not a perfect match on the Node (since it is theoretically possible for\n\t\t\/\/ another node to match the prefix (e.g. a node name of the format <thisnode>-foobar would also match a prefix\n\t\t\/\/ search of the node <thisnode>). Therefore, we will also need to check that the Spec.Node field matches the Node.\n\t\tif wep.Spec.Node != name {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, ip := range wep.Spec.IPNetworks {\n\t\t\tipAddr, _, err := cnet.ParseCIDROrIP(ip)\n\t\t\tif err == nil {\n\t\t\t\tips = append(ips, *ipAddr)\n\t\t\t} else {\n\t\t\t\t\/\/ Validation for wep insists upon CIDR, so we should always succeed\n\t\t\t\tlog.WithError(err).Warnf(\"Failed to parse CIDR: %s\", ip)\n\t\t\t}\n\t\t}\n\t}\n\t_, err = r.client.IPAM().ReleaseIPs(context.Background(), ips)\n\tswitch err.(type) {\n\tcase nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:\n\tdefault:\n\t\treturn nil, err\n\t}\n\n\t\/\/ Delete the weps.\n\tfor _, wep := range weps.Items {\n\t\tif wep.Spec.Node != name {\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err = r.client.WorkloadEndpoints().Delete(ctx, wep.Namespace, wep.Name, options.DeleteOptions{})\n\t\tswitch err.(type) {\n\t\tcase nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:\n\t\tdefault:\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Remove the node from the IPAM data if it exists.\n\terr = r.client.IPAM().RemoveIPAMHost(ctx, name)\n\tswitch err.(type) {\n\tcase nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:\n\tdefault:\n\t\treturn nil, err\n\t}\n\n\t\/\/ Remove BGPPeers.\n\tbgpPeers, err := r.client.BGPPeers().List(ctx, options.ListOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, peer := range bgpPeers.Items {\n\t\tif peer.Spec.Node != name {\n\t\t\tcontinue\n\t\t}\n\t\t_, err = r.client.BGPPeers().Delete(ctx, peer.Name, options.DeleteOptions{})\n\t\tswitch err.(type) {\n\t\tcase nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:\n\t\tdefault:\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Delete felix configuration\n\tnodeConfName := fmt.Sprintf(\"node.%s\", name)\n\t_, err = r.client.FelixConfigurations().Delete(ctx, nodeConfName, options.DeleteOptions{})\n\tswitch err.(type) {\n\tcase nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:\n\tdefault:\n\t\treturn nil, err\n\t}\n\n\t\/\/ Delete bgp configuration\n\t_, err = r.client.BGPConfigurations().Delete(ctx, nodeConfName, options.DeleteOptions{})\n\tswitch err.(type) {\n\tcase nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:\n\tdefault:\n\t\treturn nil, err\n\t}\n\n\t\/\/ Delete the node.\n\tout, err := r.client.resources.Delete(ctx, opts, apiv3.KindNode, noNamespace, name)\n\tif out != nil {\n\t\treturn out.(*apiv3.Node), err\n\t}\n\treturn nil, err\n}\n\n\/\/ Get takes name of the Node, and returns the corresponding Node object,\n\/\/ and an error if there is any.\nfunc (r nodes) Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.Node, error) {\n\tout, err := r.client.resources.Get(ctx, opts, apiv3.KindNode, noNamespace, name)\n\tif out != nil {\n\t\treturn out.(*apiv3.Node), err\n\t}\n\treturn nil, err\n}\n\n\/\/ List returns the list of Node objects that match the supplied options.\nfunc (r nodes) List(ctx context.Context, opts options.ListOptions) (*apiv3.NodeList, error) {\n\tres := &apiv3.NodeList{}\n\tif err := r.client.resources.List(ctx, opts, apiv3.KindNode, apiv3.KindNodeList, res); err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}\n\n\/\/ Watch returns a watch.Interface that watches the Nodes that match the\n\/\/ supplied options.\nfunc (r nodes) Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error) {\n\treturn r.client.resources.Watch(ctx, opts, apiv3.KindNode, nil)\n}\n<commit_msg>Release IPIP and VXLAN addresses on node deletion<commit_after>\/\/ Copyright (c) 2017 Tigera, Inc. All rights reserved.\n\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage clientv3\n\nimport (\n\t\"context\"\n\n\t\"fmt\"\n\n\tapiv3 \"github.com\/projectcalico\/libcalico-go\/lib\/apis\/v3\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/errors\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/names\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/net\"\n\tcnet \"github.com\/projectcalico\/libcalico-go\/lib\/net\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/options\"\n\tvalidator \"github.com\/projectcalico\/libcalico-go\/lib\/validator\/v3\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/watch\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ NodeInterface has methods to work with Node resources.\ntype NodeInterface interface {\n\tCreate(ctx context.Context, res *apiv3.Node, opts options.SetOptions) (*apiv3.Node, error)\n\tUpdate(ctx context.Context, res *apiv3.Node, opts options.SetOptions) (*apiv3.Node, error)\n\tDelete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.Node, error)\n\tGet(ctx context.Context, name string, opts options.GetOptions) (*apiv3.Node, error)\n\tList(ctx context.Context, opts options.ListOptions) (*apiv3.NodeList, error)\n\tWatch(ctx context.Context, opts options.ListOptions) (watch.Interface, error)\n}\n\n\/\/ nodes implements NodeInterface\ntype nodes struct {\n\tclient client\n}\n\n\/\/ Create takes the representation of a Node and creates it. Returns the stored\n\/\/ representation of the Node, and an error, if there is any.\nfunc (r nodes) Create(ctx context.Context, res *apiv3.Node, opts options.SetOptions) (*apiv3.Node, error) {\n\tif err := validator.Validate(res); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ For host-protection only clusters, we instruct the user to create a Node as the first\n\t\/\/ operation. Piggy-back the datastore initialisation on that to ensure the Ready flag gets\n\t\/\/ set. Since we're likely being called from calicoctl, we don't know the Calico version.\n\terr := r.client.EnsureInitialized(ctx, \"\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := r.client.resources.Create(ctx, opts, apiv3.KindNode, res)\n\tif out != nil {\n\t\treturn out.(*apiv3.Node), err\n\t}\n\treturn nil, err\n}\n\n\/\/ Update takes the representation of a Node and updates it. Returns the stored\n\/\/ representation of the Node, and an error, if there is any.\nfunc (r nodes) Update(ctx context.Context, res *apiv3.Node, opts options.SetOptions) (*apiv3.Node, error) {\n\tif err := validator.Validate(res); err != nil {\n\t\treturn nil, err\n\t}\n\n\tout, err := r.client.resources.Update(ctx, opts, apiv3.KindNode, res)\n\tif out != nil {\n\t\treturn out.(*apiv3.Node), err\n\t}\n\treturn nil, err\n}\n\n\/\/ Delete takes name of the Node and deletes it. Returns an error if one occurs.\nfunc (r nodes) Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.Node, error) {\n\tpname, err := names.WorkloadEndpointIdentifiers{Node: name}.CalculateWorkloadEndpointName(true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get all weps belonging to the node\n\tweps, err := r.client.WorkloadEndpoints().List(ctx, options.ListOptions{\n\t\tPrefix: true,\n\t\tName: pname,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Collate all IPs across all endpoints, and then release those IPs.\n\tips := []net.IP{}\n\tfor _, wep := range weps.Items {\n\t\t\/\/ The prefix match is unfortunately not a perfect match on the Node (since it is theoretically possible for\n\t\t\/\/ another node to match the prefix (e.g. a node name of the format <thisnode>-foobar would also match a prefix\n\t\t\/\/ search of the node <thisnode>). Therefore, we will also need to check that the Spec.Node field matches the Node.\n\t\tif wep.Spec.Node != name {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, ip := range wep.Spec.IPNetworks {\n\t\t\tipAddr, _, err := cnet.ParseCIDROrIP(ip)\n\t\t\tif err == nil {\n\t\t\t\tips = append(ips, *ipAddr)\n\t\t\t} else {\n\t\t\t\t\/\/ Validation for wep insists upon CIDR, so we should always succeed\n\t\t\t\tlog.WithError(err).Warnf(\"Failed to parse CIDR: %s\", ip)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Add in tunnel addresses if they exist for the node.\n\tif n, err := r.client.Nodes().Get(ctx, name, options.GetOptions{}); err != nil {\n\t\tif _, ok := err.(errors.ErrorResourceDoesNotExist); !ok {\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ Resource does not exist, carry on and clean up as much as we can.\n\t} else {\n\t\tif n.Spec.BGP != nil && n.Spec.BGP.IPv4IPIPTunnelAddr != \"\" {\n\t\t\tipAddr, _, err := cnet.ParseCIDROrIP(n.Spec.BGP.IPv4IPIPTunnelAddr)\n\t\t\tif err == nil {\n\t\t\t\tips = append(ips, *ipAddr)\n\t\t\t} else {\n\t\t\t\tlog.WithError(err).Warnf(\"Failed to parse IPIP tunnel address CIDR: %s\", n.Spec.BGP.IPv4IPIPTunnelAddr)\n\t\t\t}\n\t\t}\n\t\tif n.Spec.IPv4VXLANTunnelAddr != \"\" {\n\t\t\tipAddr, _, err := cnet.ParseCIDROrIP(n.Spec.IPv4VXLANTunnelAddr)\n\t\t\tif err == nil {\n\t\t\t\tips = append(ips, *ipAddr)\n\t\t\t} else {\n\t\t\t\tlog.WithError(err).Warnf(\"Failed to parse VXLAN tunnel address CIDR: %s\", n.Spec.IPv4VXLANTunnelAddr)\n\t\t\t}\n\t\t}\n\t}\n\n\t_, err = r.client.IPAM().ReleaseIPs(context.Background(), ips)\n\tswitch err.(type) {\n\tcase nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:\n\tdefault:\n\t\treturn nil, err\n\t}\n\n\t\/\/ Delete the weps.\n\tfor _, wep := range weps.Items {\n\t\tif wep.Spec.Node != name {\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err = r.client.WorkloadEndpoints().Delete(ctx, wep.Namespace, wep.Name, options.DeleteOptions{})\n\t\tswitch err.(type) {\n\t\tcase nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:\n\t\tdefault:\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Remove the node from the IPAM data if it exists.\n\terr = r.client.IPAM().RemoveIPAMHost(ctx, name)\n\tswitch err.(type) {\n\tcase nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:\n\tdefault:\n\t\treturn nil, err\n\t}\n\n\t\/\/ Remove BGPPeers.\n\tbgpPeers, err := r.client.BGPPeers().List(ctx, options.ListOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, peer := range bgpPeers.Items {\n\t\tif peer.Spec.Node != name {\n\t\t\tcontinue\n\t\t}\n\t\t_, err = r.client.BGPPeers().Delete(ctx, peer.Name, options.DeleteOptions{})\n\t\tswitch err.(type) {\n\t\tcase nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:\n\t\tdefault:\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Delete felix configuration\n\tnodeConfName := fmt.Sprintf(\"node.%s\", name)\n\t_, err = r.client.FelixConfigurations().Delete(ctx, nodeConfName, options.DeleteOptions{})\n\tswitch err.(type) {\n\tcase nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:\n\tdefault:\n\t\treturn nil, err\n\t}\n\n\t\/\/ Delete bgp configuration\n\t_, err = r.client.BGPConfigurations().Delete(ctx, nodeConfName, options.DeleteOptions{})\n\tswitch err.(type) {\n\tcase nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:\n\tdefault:\n\t\treturn nil, err\n\t}\n\n\t\/\/ Delete the node.\n\tout, err := r.client.resources.Delete(ctx, opts, apiv3.KindNode, noNamespace, name)\n\tif out != nil {\n\t\treturn out.(*apiv3.Node), err\n\t}\n\treturn nil, err\n}\n\n\/\/ Get takes name of the Node, and returns the corresponding Node object,\n\/\/ and an error if there is any.\nfunc (r nodes) Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.Node, error) {\n\tout, err := r.client.resources.Get(ctx, opts, apiv3.KindNode, noNamespace, name)\n\tif out != nil {\n\t\treturn out.(*apiv3.Node), err\n\t}\n\treturn nil, err\n}\n\n\/\/ List returns the list of Node objects that match the supplied options.\nfunc (r nodes) List(ctx context.Context, opts options.ListOptions) (*apiv3.NodeList, error) {\n\tres := &apiv3.NodeList{}\n\tif err := r.client.resources.List(ctx, opts, apiv3.KindNode, apiv3.KindNodeList, res); err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}\n\n\/\/ Watch returns a watch.Interface that watches the Nodes that match the\n\/\/ supplied options.\nfunc (r nodes) Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error) {\n\treturn r.client.resources.Watch(ctx, opts, apiv3.KindNode, nil)\n}\n<|endoftext|>"} {"text":"<commit_before>package uatparse\n\n\/\/ AIRMET = AIRMET\/SIGMET\/ (TFR?)\n\nconst (\n\tUATMSG_TEXT = 1\n\tUATMSG_NEXRAD = 2\n\tUATMSG_AIRMET = 3 \/\/ AIRMET. Decoded.\n\n\t\/\/ How the coordinates should be used in a graphical AIRMET.\n\tAIRMET_POLYGON = 1\n\tAIRMET_ELLIPSE = 2\n\tAIRMET_PRISM = 3\n\tAIRMET_3D = 4\n)\n\n\/\/ Points can be in 3D - take care that altitude is used correctly.\ntype GeoPoints struct {\n\tLat float64\n\tLon float64\n\tAlt int32\n}\n\ntype UATAirmet struct {\n\tPoints []GeoPoints \/\/ Points\n}\n\ntype UATMsgDecoded struct {\n\tType int\n}\n<commit_msg>Revert \"Beginning.\"<commit_after><|endoftext|>"} {"text":"<commit_before>package goose\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t_ \"github.com\/lib\/pq\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n\t_ \"github.com\/ziutek\/mymysql\/godrv\"\n)\n\nvar (\n\tErrTableDoesNotExist = errors.New(\"table does not exist\")\n\tErrNoPreviousVersion = errors.New(\"no previous version found\")\n)\n\ntype MigrationRecord struct {\n\tVersionId int64\n\tTStamp time.Time\n\tIsApplied bool \/\/ was this a result of up() or down()\n}\n\ntype Migration struct {\n\tVersion int64\n\tNext int64 \/\/ next version, or -1 if none\n\tPrevious int64 \/\/ previous version, -1 if none\n\tSource string \/\/ path to .go or .sql script\n}\n\ntype migrationSorter []*Migration\n\n\/\/ helpers so we can use pkg sort\nfunc (ms migrationSorter) Len() int { return len(ms) }\nfunc (ms migrationSorter) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }\nfunc (ms migrationSorter) Less(i, j int) bool { return ms[i].Version < ms[j].Version }\n\nfunc newMigration(v int64, src string) *Migration {\n\treturn &Migration{v, -1, -1, src}\n}\n\nfunc RunMigrations(conf *DBConf, migrationsDir string, target int64) (err error) {\n\n\tdb, err := OpenDBFromDBConf(conf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\treturn RunMigrationsOnDb(conf, migrationsDir, target, db)\n}\n\n\/\/ Runs migration on a specific database instance.\nfunc RunMigrationsOnDb(conf *DBConf, migrationsDir string, target int64, db *sql.DB) (err error) {\n\tcurrent, err := EnsureDBVersion(conf, db)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmigrations, err := CollectMigrations(migrationsDir, current, target)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(migrations) == 0 {\n\t\tfmt.Printf(\"goose: no migrations to run. current version: %d\\n\", current)\n\t\treturn nil\n\t}\n\n\tms := migrationSorter(migrations)\n\tdirection := current < target\n\tms.Sort(direction)\n\n\tfmt.Printf(\"goose: migrating db environment '%v', current version: %d, target: %d\\n\",\n\t\tconf.Env, current, target)\n\n\tfor _, m := range ms {\n\n\t\tswitch filepath.Ext(m.Source) {\n\t\tcase \".sql\":\n\t\t\terr = runSQLMigration(conf, db, m.Source, m.Version, direction)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn errors.New(fmt.Sprintf(\"FAIL %v, quitting migration\", err))\n\t\t}\n\n\t\tfmt.Println(\"OK \", filepath.Base(m.Source))\n\t}\n\n\treturn nil\n}\n\n\/\/ collect all the valid looking migration scripts in the\n\/\/ migrations folder, and key them by version\nfunc CollectMigrations(dirpath string, current, target int64) (m []*Migration, err error) {\n\n\t\/\/ extract the numeric component of each migration,\n\t\/\/ filter out any uninteresting files,\n\t\/\/ and ensure we only have one file per migration version.\n\tfilepath.Walk(dirpath, func(name string, info os.FileInfo, err error) error {\n\n\t\tif v, e := NumericComponent(name); e == nil {\n\n\t\t\tfor _, g := range m {\n\t\t\t\tif v == g.Version {\n\t\t\t\t\tlog.Fatalf(\"more than one file specifies the migration for version %d (%s and %s)\",\n\t\t\t\t\t\tv, g.Source, filepath.Join(dirpath, name))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif versionFilter(v, current, target) {\n\t\t\t\tm = append(m, newMigration(v, name))\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn m, nil\n}\n\nfunc versionFilter(v, current, target int64) bool {\n\n\tif target > current {\n\t\treturn v > current && v <= target\n\t}\n\n\tif target < current {\n\t\treturn v <= current && v > target\n\t}\n\n\treturn false\n}\n\nfunc (ms migrationSorter) Sort(direction bool) {\n\n\t\/\/ sort ascending or descending by version\n\tif direction {\n\t\tsort.Sort(ms)\n\t} else {\n\t\tsort.Sort(sort.Reverse(ms))\n\t}\n\n\t\/\/ now that we're sorted in the appropriate direction,\n\t\/\/ populate next and previous for each migration\n\tfor i, m := range ms {\n\t\tprev := int64(-1)\n\t\tif i > 0 {\n\t\t\tprev = ms[i-1].Version\n\t\t\tms[i-1].Next = m.Version\n\t\t}\n\t\tms[i].Previous = prev\n\t}\n}\n\n\/\/ look for migration scripts with names in the form:\n\/\/ XXX_descriptivename.ext\n\/\/ where XXX specifies the version number\n\/\/ and ext specifies the type of migration\nfunc NumericComponent(name string) (int64, error) {\n\n\tbase := filepath.Base(name)\n\n\tif ext := filepath.Ext(base); ext != \".sql\" {\n\t\treturn 0, errors.New(\"not a recognized migration file type\")\n\t}\n\n\tidx := strings.Index(base, \"_\")\n\tif idx < 0 {\n\t\treturn 0, errors.New(\"no separator found\")\n\t}\n\n\tn, e := strconv.ParseInt(base[:idx], 10, 64)\n\tif e == nil && n <= 0 {\n\t\treturn 0, errors.New(\"migration IDs must be greater than zero\")\n\t}\n\n\treturn n, e\n}\n\n\/\/ retrieve the current version for this DB.\n\/\/ Create and initialize the DB version table if it doesn't exist.\nfunc EnsureDBVersion(conf *DBConf, db *sql.DB) (int64, error) {\n\n\trows, err := conf.Driver.Dialect.dbVersionQuery(db)\n\tif err != nil {\n\t\tif err == ErrTableDoesNotExist {\n\t\t\treturn 0, createVersionTable(conf, db)\n\t\t}\n\t\treturn 0, err\n\t}\n\tdefer rows.Close()\n\n\t\/\/ The most recent record for each migration specifies\n\t\/\/ whether it has been applied or rolled back.\n\t\/\/ The first version we find that has been applied is the current version.\n\n\ttoSkip := make([]int64, 0)\n\n\tfor rows.Next() {\n\t\tvar row MigrationRecord\n\t\tif err = rows.Scan(&row.VersionId, &row.IsApplied); err != nil {\n\t\t\tlog.Fatal(\"error scanning rows:\", err)\n\t\t}\n\n\t\t\/\/ have we already marked this version to be skipped?\n\t\tskip := false\n\t\tfor _, v := range toSkip {\n\t\t\tif v == row.VersionId {\n\t\t\t\tskip = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif skip {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ if version has been applied we're done\n\t\tif row.IsApplied {\n\t\t\treturn row.VersionId, nil\n\t\t}\n\n\t\t\/\/ latest version of migration has not been applied.\n\t\ttoSkip = append(toSkip, row.VersionId)\n\t}\n\n\tpanic(\"failure in EnsureDBVersion()\")\n}\n\n\/\/ Create the goose_db_version table\n\/\/ and insert the initial 0 value into it\nfunc createVersionTable(conf *DBConf, db *sql.DB) error {\n\ttxn, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td := conf.Driver.Dialect\n\n\tif _, err := txn.Exec(d.createVersionTableSql()); err != nil {\n\t\ttxn.Rollback()\n\t\treturn err\n\t}\n\n\tversion := 0\n\tapplied := true\n\tif _, err := txn.Exec(d.insertVersionSql(), version, applied); err != nil {\n\t\ttxn.Rollback()\n\t\treturn err\n\t}\n\n\treturn txn.Commit()\n}\n\n\/\/ wrapper for EnsureDBVersion for callers that don't already have\n\/\/ their own DB instance\nfunc GetDBVersion(conf *DBConf) (version int64, err error) {\n\n\tdb, err := OpenDBFromDBConf(conf)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tdefer db.Close()\n\n\tversion, err = EnsureDBVersion(conf, db)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\treturn version, nil\n}\n\nfunc GetPreviousDBVersion(dirpath string, version int64) (previous int64, err error) {\n\n\tprevious = -1\n\tsawGivenVersion := false\n\n\tfilepath.Walk(dirpath, func(name string, info os.FileInfo, walkerr error) error {\n\n\t\tif !info.IsDir() {\n\t\t\tif v, e := NumericComponent(name); e == nil {\n\t\t\t\tif v > previous && v < version {\n\t\t\t\t\tprevious = v\n\t\t\t\t}\n\t\t\t\tif v == version {\n\t\t\t\t\tsawGivenVersion = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif previous == -1 {\n\t\tif sawGivenVersion {\n\t\t\t\/\/ the given version is (likely) valid but we didn't find\n\t\t\t\/\/ anything before it.\n\t\t\t\/\/ 'previous' must reflect that no migrations have been applied.\n\t\t\tprevious = 0\n\t\t} else {\n\t\t\terr = ErrNoPreviousVersion\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ helper to identify the most recent possible version\n\/\/ within a folder of migration scripts\nfunc GetMostRecentDBVersion(dirpath string) (version int64, err error) {\n\n\tversion = -1\n\n\tfilepath.Walk(dirpath, func(name string, info os.FileInfo, walkerr error) error {\n\t\tif walkerr != nil {\n\t\t\treturn walkerr\n\t\t}\n\n\t\tif !info.IsDir() {\n\t\t\tif v, e := NumericComponent(name); e == nil {\n\t\t\t\tif v > version {\n\t\t\t\t\tversion = v\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif version == -1 {\n\t\terr = errors.New(\"no valid version found\")\n\t}\n\n\treturn\n}\n\nfunc CreateMigration(name, migrationType, dir string, t time.Time) (path string, err error) {\n\n\tif migrationType != \"sql\" {\n\t\treturn \"\", errors.New(\"migration type must be 'sql'\")\n\t}\n\n\ttimestamp := t.Format(\"20060102150405\")\n\tfilename := fmt.Sprintf(\"%v_%v.%v\", timestamp, name, migrationType)\n\n\tfpath := filepath.Join(dir, filename)\n\n\tvar tmpl *template.Template\n\tif migrationType == \"sql\" {\n\t\ttmpl = sqlMigrationTemplate\n\t}\n\n\tpath, err = writeTemplateToFile(fpath, tmpl, timestamp)\n\n\treturn\n}\n\nvar sqlMigrationTemplate = template.Must(template.New(\"goose.sql-migration\").Parse(`\n-- +goose Up\n-- SQL in section 'Up' is executed when this migration is applied\n\n\n-- +goose Down\n-- SQL section 'Down' is executed when this migration is rolled back\n`))\n<commit_msg>Remove whitespace at top of goose create template<commit_after>package goose\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t_ \"github.com\/lib\/pq\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n\t_ \"github.com\/ziutek\/mymysql\/godrv\"\n)\n\nvar (\n\tErrTableDoesNotExist = errors.New(\"table does not exist\")\n\tErrNoPreviousVersion = errors.New(\"no previous version found\")\n)\n\ntype MigrationRecord struct {\n\tVersionId int64\n\tTStamp time.Time\n\tIsApplied bool \/\/ was this a result of up() or down()\n}\n\ntype Migration struct {\n\tVersion int64\n\tNext int64 \/\/ next version, or -1 if none\n\tPrevious int64 \/\/ previous version, -1 if none\n\tSource string \/\/ path to .go or .sql script\n}\n\ntype migrationSorter []*Migration\n\n\/\/ helpers so we can use pkg sort\nfunc (ms migrationSorter) Len() int { return len(ms) }\nfunc (ms migrationSorter) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }\nfunc (ms migrationSorter) Less(i, j int) bool { return ms[i].Version < ms[j].Version }\n\nfunc newMigration(v int64, src string) *Migration {\n\treturn &Migration{v, -1, -1, src}\n}\n\nfunc RunMigrations(conf *DBConf, migrationsDir string, target int64) (err error) {\n\n\tdb, err := OpenDBFromDBConf(conf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\treturn RunMigrationsOnDb(conf, migrationsDir, target, db)\n}\n\n\/\/ Runs migration on a specific database instance.\nfunc RunMigrationsOnDb(conf *DBConf, migrationsDir string, target int64, db *sql.DB) (err error) {\n\tcurrent, err := EnsureDBVersion(conf, db)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmigrations, err := CollectMigrations(migrationsDir, current, target)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(migrations) == 0 {\n\t\tfmt.Printf(\"goose: no migrations to run. current version: %d\\n\", current)\n\t\treturn nil\n\t}\n\n\tms := migrationSorter(migrations)\n\tdirection := current < target\n\tms.Sort(direction)\n\n\tfmt.Printf(\"goose: migrating db environment '%v', current version: %d, target: %d\\n\",\n\t\tconf.Env, current, target)\n\n\tfor _, m := range ms {\n\n\t\tswitch filepath.Ext(m.Source) {\n\t\tcase \".sql\":\n\t\t\terr = runSQLMigration(conf, db, m.Source, m.Version, direction)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn errors.New(fmt.Sprintf(\"FAIL %v, quitting migration\", err))\n\t\t}\n\n\t\tfmt.Println(\"OK \", filepath.Base(m.Source))\n\t}\n\n\treturn nil\n}\n\n\/\/ collect all the valid looking migration scripts in the\n\/\/ migrations folder, and key them by version\nfunc CollectMigrations(dirpath string, current, target int64) (m []*Migration, err error) {\n\n\t\/\/ extract the numeric component of each migration,\n\t\/\/ filter out any uninteresting files,\n\t\/\/ and ensure we only have one file per migration version.\n\tfilepath.Walk(dirpath, func(name string, info os.FileInfo, err error) error {\n\n\t\tif v, e := NumericComponent(name); e == nil {\n\n\t\t\tfor _, g := range m {\n\t\t\t\tif v == g.Version {\n\t\t\t\t\tlog.Fatalf(\"more than one file specifies the migration for version %d (%s and %s)\",\n\t\t\t\t\t\tv, g.Source, filepath.Join(dirpath, name))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif versionFilter(v, current, target) {\n\t\t\t\tm = append(m, newMigration(v, name))\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn m, nil\n}\n\nfunc versionFilter(v, current, target int64) bool {\n\n\tif target > current {\n\t\treturn v > current && v <= target\n\t}\n\n\tif target < current {\n\t\treturn v <= current && v > target\n\t}\n\n\treturn false\n}\n\nfunc (ms migrationSorter) Sort(direction bool) {\n\n\t\/\/ sort ascending or descending by version\n\tif direction {\n\t\tsort.Sort(ms)\n\t} else {\n\t\tsort.Sort(sort.Reverse(ms))\n\t}\n\n\t\/\/ now that we're sorted in the appropriate direction,\n\t\/\/ populate next and previous for each migration\n\tfor i, m := range ms {\n\t\tprev := int64(-1)\n\t\tif i > 0 {\n\t\t\tprev = ms[i-1].Version\n\t\t\tms[i-1].Next = m.Version\n\t\t}\n\t\tms[i].Previous = prev\n\t}\n}\n\n\/\/ look for migration scripts with names in the form:\n\/\/ XXX_descriptivename.ext\n\/\/ where XXX specifies the version number\n\/\/ and ext specifies the type of migration\nfunc NumericComponent(name string) (int64, error) {\n\n\tbase := filepath.Base(name)\n\n\tif ext := filepath.Ext(base); ext != \".sql\" {\n\t\treturn 0, errors.New(\"not a recognized migration file type\")\n\t}\n\n\tidx := strings.Index(base, \"_\")\n\tif idx < 0 {\n\t\treturn 0, errors.New(\"no separator found\")\n\t}\n\n\tn, e := strconv.ParseInt(base[:idx], 10, 64)\n\tif e == nil && n <= 0 {\n\t\treturn 0, errors.New(\"migration IDs must be greater than zero\")\n\t}\n\n\treturn n, e\n}\n\n\/\/ retrieve the current version for this DB.\n\/\/ Create and initialize the DB version table if it doesn't exist.\nfunc EnsureDBVersion(conf *DBConf, db *sql.DB) (int64, error) {\n\n\trows, err := conf.Driver.Dialect.dbVersionQuery(db)\n\tif err != nil {\n\t\tif err == ErrTableDoesNotExist {\n\t\t\treturn 0, createVersionTable(conf, db)\n\t\t}\n\t\treturn 0, err\n\t}\n\tdefer rows.Close()\n\n\t\/\/ The most recent record for each migration specifies\n\t\/\/ whether it has been applied or rolled back.\n\t\/\/ The first version we find that has been applied is the current version.\n\n\ttoSkip := make([]int64, 0)\n\n\tfor rows.Next() {\n\t\tvar row MigrationRecord\n\t\tif err = rows.Scan(&row.VersionId, &row.IsApplied); err != nil {\n\t\t\tlog.Fatal(\"error scanning rows:\", err)\n\t\t}\n\n\t\t\/\/ have we already marked this version to be skipped?\n\t\tskip := false\n\t\tfor _, v := range toSkip {\n\t\t\tif v == row.VersionId {\n\t\t\t\tskip = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif skip {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ if version has been applied we're done\n\t\tif row.IsApplied {\n\t\t\treturn row.VersionId, nil\n\t\t}\n\n\t\t\/\/ latest version of migration has not been applied.\n\t\ttoSkip = append(toSkip, row.VersionId)\n\t}\n\n\tpanic(\"failure in EnsureDBVersion()\")\n}\n\n\/\/ Create the goose_db_version table\n\/\/ and insert the initial 0 value into it\nfunc createVersionTable(conf *DBConf, db *sql.DB) error {\n\ttxn, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td := conf.Driver.Dialect\n\n\tif _, err := txn.Exec(d.createVersionTableSql()); err != nil {\n\t\ttxn.Rollback()\n\t\treturn err\n\t}\n\n\tversion := 0\n\tapplied := true\n\tif _, err := txn.Exec(d.insertVersionSql(), version, applied); err != nil {\n\t\ttxn.Rollback()\n\t\treturn err\n\t}\n\n\treturn txn.Commit()\n}\n\n\/\/ wrapper for EnsureDBVersion for callers that don't already have\n\/\/ their own DB instance\nfunc GetDBVersion(conf *DBConf) (version int64, err error) {\n\n\tdb, err := OpenDBFromDBConf(conf)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tdefer db.Close()\n\n\tversion, err = EnsureDBVersion(conf, db)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\treturn version, nil\n}\n\nfunc GetPreviousDBVersion(dirpath string, version int64) (previous int64, err error) {\n\n\tprevious = -1\n\tsawGivenVersion := false\n\n\tfilepath.Walk(dirpath, func(name string, info os.FileInfo, walkerr error) error {\n\n\t\tif !info.IsDir() {\n\t\t\tif v, e := NumericComponent(name); e == nil {\n\t\t\t\tif v > previous && v < version {\n\t\t\t\t\tprevious = v\n\t\t\t\t}\n\t\t\t\tif v == version {\n\t\t\t\t\tsawGivenVersion = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif previous == -1 {\n\t\tif sawGivenVersion {\n\t\t\t\/\/ the given version is (likely) valid but we didn't find\n\t\t\t\/\/ anything before it.\n\t\t\t\/\/ 'previous' must reflect that no migrations have been applied.\n\t\t\tprevious = 0\n\t\t} else {\n\t\t\terr = ErrNoPreviousVersion\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ helper to identify the most recent possible version\n\/\/ within a folder of migration scripts\nfunc GetMostRecentDBVersion(dirpath string) (version int64, err error) {\n\n\tversion = -1\n\n\tfilepath.Walk(dirpath, func(name string, info os.FileInfo, walkerr error) error {\n\t\tif walkerr != nil {\n\t\t\treturn walkerr\n\t\t}\n\n\t\tif !info.IsDir() {\n\t\t\tif v, e := NumericComponent(name); e == nil {\n\t\t\t\tif v > version {\n\t\t\t\t\tversion = v\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif version == -1 {\n\t\terr = errors.New(\"no valid version found\")\n\t}\n\n\treturn\n}\n\nfunc CreateMigration(name, migrationType, dir string, t time.Time) (path string, err error) {\n\n\tif migrationType != \"sql\" {\n\t\treturn \"\", errors.New(\"migration type must be 'sql'\")\n\t}\n\n\ttimestamp := t.Format(\"20060102150405\")\n\tfilename := fmt.Sprintf(\"%v_%v.%v\", timestamp, name, migrationType)\n\n\tfpath := filepath.Join(dir, filename)\n\n\tvar tmpl *template.Template\n\tif migrationType == \"sql\" {\n\t\ttmpl = sqlMigrationTemplate\n\t}\n\n\tpath, err = writeTemplateToFile(fpath, tmpl, timestamp)\n\n\treturn\n}\n\nvar sqlMigrationTemplate = template.Must(template.New(\"goose.sql-migration\").Parse(\n\t`-- +goose Up\n-- SQL in section 'Up' is executed when this migration is applied\n\n\n-- +goose Down\n-- SQL section 'Down' is executed when this migration is rolled back\n`))\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"json\"\n\t\"math\"\n\t\".\/nbt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\tout *bufio.Writer\n\tyMin int\n\tblockFaces bool\n\thideBottom bool\n\tnoColor bool\n\n\tfaceCount int\n\tfaceLimit int\n\n\tchunkCount int\n\tchunkLimit int\n\n\tchunkMask ChunkMask\n)\n\nfunc main() {\n\tvar bx, bz float64\n\tvar cx, cz int\n\tvar square int\n\tvar rectx, rectz int\n\tvar maxProcs = runtime.GOMAXPROCS(0)\n\tvar prt bool\n\tvar solidSides bool\n\tvar mtlNumber bool\n\n\tvar defaultObjOutFilename = \"a.obj\"\n\tvar defaultPrtOutFilename = \"a.prt\"\n\n\tvar outFilename string\n\tflag.IntVar(&maxProcs, \"cpu\", maxProcs, \"Number of cores to use\")\n\tflag.StringVar(&outFilename, \"o\", defaultObjOutFilename, \"Name for output file\")\n\tflag.IntVar(&yMin, \"y\", 0, \"Omit all blocks below this height. 63 is sea level\")\n\tflag.BoolVar(&solidSides, \"sides\", false, \"Solid sides, rather than showing underground\")\n\tflag.BoolVar(&blockFaces, \"bf\", false, \"Don't combine adjacent faces of the same block within a column\")\n\tflag.BoolVar(&hideBottom, \"hb\", false, \"Hide bottom of world\")\n\tflag.BoolVar(&noColor, \"g\", false, \"Omit materials\")\n\tflag.Float64Var(&bx, \"x\", 0, \"Center x coordinate in blocks\")\n\tflag.Float64Var(&bz, \"z\", 0, \"Center z coordinate in blocks\")\n\tflag.IntVar(&cx, \"cx\", 0, \"Center x coordinate in chunks\")\n\tflag.IntVar(&cz, \"cz\", 0, \"Center z coordinate in chunks\")\n\tflag.IntVar(&square, \"s\", math.MaxInt32, \"Chunk square size\")\n\tflag.IntVar(&rectx, \"rx\", math.MaxInt32, \"Width(x) of rectangle size\")\n\tflag.IntVar(&rectz, \"rz\", math.MaxInt32, \"Height(z) of rectangle size\")\n\tflag.IntVar(&faceLimit, \"fk\", math.MaxInt32, \"Face limit (thousands of faces)\")\n\tflag.BoolVar(&prt, \"prt\", false, \"Write out PRT file instead of Obj file\")\n\tflag.BoolVar(&mtlNumber, \"mtlnum\", false, \"Number materials instead of using names\")\n\tvar showHelp = flag.Bool(\"h\", false, \"Show Help\")\n\tflag.Parse()\n\n\truntime.GOMAXPROCS(maxProcs)\n\tfmt.Printf(\"mcobj %v (cpu: %d) Copyright (c) 2011 Jonathan Wright\\n\", version, runtime.GOMAXPROCS(0))\n\n\tif *showHelp || flag.NArg() == 0 {\n\t\tfmt.Fprintln(os.Stderr)\n\t\tfmt.Fprintln(os.Stderr, \"Usage: mcobj -cpu 4 -s 20 -o world1.obj\", exampleWorldPath)\n\t\tfmt.Fprintln(os.Stderr)\n\t\tflag.PrintDefaults()\n\t\treturn\n\t}\n\n\tif faceLimit != math.MaxInt32 {\n\t\tfaceLimit *= 1000\n\t}\n\n\tif mtlNumber {\n\t\tMaterialNamer = new(NumberBlockIdNamer)\n\t} else {\n\t\tMaterialNamer = new(NameBlockIdNamer)\n\t}\n\n\tswitch {\n\tcase bx == 0 && cx == 0:\n\t\tcx = 0\n\tcase cx == 0:\n\t\tcx = int(bx \/ 16)\n\t}\n\n\tswitch {\n\tcase bz == 0 && cz == 0:\n\t\tcz = 0\n\tcase cz == 0:\n\t\tcz = int(bz \/ 16)\n\t}\n\n\tif square != math.MaxInt32 {\n\t\tchunkLimit = square * square\n\t\tvar h = square \/ 2\n\t\tchunkMask = &RectangeChunkMask{cx - h, cz - h, cx - h + square, cz - h + square}\n\t} else if rectx != math.MaxInt32 || rectz != math.MaxInt32 {\n\t\tswitch {\n\t\tcase rectx != math.MaxInt32 && rectz != math.MaxInt32:\n\t\t\tchunkLimit = rectx * rectz\n\t\t\tvar (\n\t\t\t\thx = rectx \/ 2\n\t\t\t\thz = rectz \/ 2\n\t\t\t)\n\t\t\tchunkMask = &RectangeChunkMask{cx - hx, cz - hz, cx - hx + rectx, cz - hz + rectz}\n\t\tcase rectx != math.MaxInt32:\n\t\t\tchunkLimit = math.MaxInt32\n\t\t\tvar hx = rectx \/ 2\n\t\t\tchunkMask = &RectangeChunkMask{cx - hx, math.MinInt32, cx - hx + rectx, math.MaxInt32}\n\t\tcase rectz != math.MaxInt32:\n\t\t\tchunkLimit = math.MaxInt32\n\t\t\tvar hz = rectz \/ 2\n\t\t\tchunkMask = &RectangeChunkMask{math.MinInt32, cz - hz, math.MaxInt32, cz - hz + rectz}\n\t\t}\n\t} else {\n\t\tchunkLimit = math.MaxInt32\n\t\tchunkMask = &AllChunksMask{}\n\t}\n\n\tif prt && outFilename == defaultObjOutFilename {\n\t\toutFilename = defaultPrtOutFilename\n\t}\n\n\tif solidSides {\n\t\tdefaultSide = &emptySide\n\t}\n\n\t{\n\t\tvar dir, _ = filepath.Split(strings.Replace(os.Args[0], \"\\\\\", \"\/\", -1))\n\t\tvar jsonError = loadBlockTypesJson(filepath.Join(dir, \"blocks.json\"))\n\t\tif jsonError != nil {\n\t\t\tfmt.Fprintln(os.Stderr, jsonError)\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor i := 0; i < flag.NArg(); i++ {\n\t\tvar dirpath = flag.Arg(i)\n\t\tvar fi, err = os.Stat(dirpath)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tcontinue\n\t\t} else if !fi.IsDirectory() {\n\t\t\tfmt.Fprintln(os.Stderr, dirpath, \"is not a directory\")\n\t\t}\n\n\t\tvar world = OpenWorld(dirpath, chunkMask)\n\t\tvar pool, poolErr = world.ChunkPool()\n\t\tif poolErr != nil {\n\t\t\tfmt.Println(poolErr)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar generator OutputGenerator\n\t\tif prt {\n\t\t\tgenerator = new(PrtGenerator)\n\t\t} else {\n\t\t\tgenerator = new(ObjGenerator)\n\t\t}\n\t\tvar boundary = new(BoundaryLocator)\n\t\tboundary.Init()\n\t\tgenerator.Start(outFilename, pool.Remaining(), maxProcs, boundary)\n\n\t\tif walkEnclosedChunks(pool, world, cx, cz, generator.GetEnclosedJobsChan()) {\n\t\t\t<-generator.GetCompleteChan()\n\t\t}\n\n\t\tgenerator.Close()\n\t}\n}\n\ntype OutputGenerator interface {\n\tStart(outFilename string, total int, maxProcs int, boundary *BoundaryLocator)\n\tGetEnclosedJobsChan() chan *EnclosedChunkJob\n\tGetCompleteChan() chan bool\n\tClose()\n}\n\ntype EnclosedChunkJob struct {\n\tlast bool\n\tenclosed *EnclosedChunk\n}\n\nfunc walkEnclosedChunks(pool ChunkPool, opener ChunkOpener, cx, cz int, enclosedsChan chan *EnclosedChunkJob) bool {\n\tvar (\n\t\tsideCache = new(SideCache)\n\t\tstarted = false\n\t)\n\n\tfor i := 0; moreChunks(pool.Remaining()); i++ {\n\t\tfor x := 0; x < i && moreChunks(pool.Remaining()); x++ {\n\t\t\tfor z := 0; z < i && moreChunks(pool.Remaining()); z++ {\n\t\t\t\tvar (\n\t\t\t\t\tax = cx + unzigzag(x)\n\t\t\t\t\taz = cz + unzigzag(z)\n\t\t\t\t)\n\n\t\t\t\tif pool.Pop(ax, az) {\n\t\t\t\t\tloadSide(sideCache, opener, ax-1, az)\n\t\t\t\t\tloadSide(sideCache, opener, ax+1, az)\n\t\t\t\t\tloadSide(sideCache, opener, ax, az-1)\n\t\t\t\t\tloadSide(sideCache, opener, ax, az+1)\n\n\t\t\t\t\tvar chunk, loadErr = loadChunk2(opener, ax, az)\n\t\t\t\t\tif loadErr != nil {\n\t\t\t\t\t\tfmt.Println(loadErr)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tvar enclosed = sideCache.EncloseChunk(chunk)\n\t\t\t\t\t\tsideCache.AddChunk(chunk)\n\t\t\t\t\t\tchunkCount++\n\t\t\t\t\t\tenclosedsChan <- &EnclosedChunkJob{!moreChunks(pool.Remaining()), enclosed}\n\t\t\t\t\t\tstarted = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn started\n}\n\ntype Blocks []uint16\n\ntype BlockColumn []uint16\n\nfunc (b *Blocks) Get(x, y, z int) uint16 {\n\treturn (*b)[y+(z*128+(x*128*16))]\n}\n\nfunc (b *Blocks) Column(x, z int) BlockColumn {\n\tvar i = 128 * (z + x*16)\n\treturn BlockColumn((*b)[i : i+128])\n}\n\nfunc zigzag(n int) int {\n\treturn (n << 1) ^ (n >> 31)\n}\n\nfunc unzigzag(n int) int {\n\treturn (n >> 1) ^ (-(n & 1))\n}\n\nfunc moreChunks(unprocessedCount int) bool {\n\treturn unprocessedCount > 0 && faceCount < faceLimit && chunkCount < chunkLimit\n}\n\nfunc loadChunk(filename string) (*nbt.Chunk, os.Error) {\n\tvar file, fileErr = os.Open(filename)\n\tdefer file.Close()\n\tif fileErr != nil {\n\t\treturn nil, fileErr\n\t}\n\tvar chunk, err = nbt.ReadDat(file)\n\tif err == os.EOF {\n\t\terr = nil\n\t}\n\treturn chunk, err\n}\n\nfunc loadChunk2(opener ChunkOpener, x, z int) (*nbt.Chunk, os.Error) {\n\tvar r, openErr = opener.OpenChunk(x, z)\n\tif openErr != nil {\n\t\treturn nil, openErr\n\t}\n\tdefer r.Close()\n\n\tvar chunk, nbtErr = nbt.ReadNbt(r)\n\tif nbtErr != nil {\n\t\treturn nil, nbtErr\n\t}\n\treturn chunk, nil\n}\n\nfunc loadSide(sideCache *SideCache, opener ChunkOpener, x, z int) {\n\tif !sideCache.HasSide(x, z) && !chunkMask.IsMasked(x, z) {\n\t\tvar chunk, loadErr = loadChunk2(opener, x, z)\n\t\tif loadErr != nil {\n\t\t\tfmt.Println(loadErr)\n\t\t} else {\n\t\t\tsideCache.AddChunk(chunk)\n\t\t}\n\t}\n}\n\nfunc loadBlockTypesJson(filename string) os.Error {\n\tvar jsonBytes, jsonIoError = ioutil.ReadFile(filename)\n\n\tif jsonIoError != nil {\n\t\treturn jsonIoError\n\t}\n\n\tvar f interface{}\n\tvar unmarshalError = json.Unmarshal(jsonBytes, &f)\n\tif unmarshalError != nil {\n\t\treturn unmarshalError\n\t}\n\n\tvar lines, linesOk = f.([]interface{})\n\tif linesOk {\n\t\tfor _, line := range lines {\n\t\t\tvar fields, fieldsOk = line.(map[string]interface{})\n\t\t\tif fieldsOk {\n\t\t\t\tvar (\n\t\t\t\t\tblockId byte\n\t\t\t\t\tdata byte = 255\n\t\t\t\t\tdataArray []byte\n\t\t\t\t\tname string\n\t\t\t\t\tmass SingularOrAggregate = Mass\n\t\t\t\t\ttransparency Transparency = Opaque\n\t\t\t\t\tempty bool = false\n\t\t\t\t\tcolor uint32\n\t\t\t\t)\n\t\t\t\tfor k, v := range fields {\n\t\t\t\t\tswitch k {\n\t\t\t\t\tcase \"name\":\n\t\t\t\t\t\tname = v.(string)\n\t\t\t\t\tcase \"color\":\n\t\t\t\t\t\tswitch len(v.(string)) {\n\t\t\t\t\t\tcase 7:\n\t\t\t\t\t\t\tvar n, numErr = strconv.Btoui64(v.(string)[1:], 16)\n\t\t\t\t\t\t\tif numErr == nil {\n\t\t\t\t\t\t\t\tcolor = uint32(n*0x100 + 0xff)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tcase 9:\n\t\t\t\t\t\t\tvar n, numErr = strconv.Btoui64(v.(string)[1:], 16)\n\t\t\t\t\t\t\tif numErr == nil {\n\t\t\t\t\t\t\t\tcolor = uint32(n)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"blockId\":\n\t\t\t\t\t\tblockId = byte(v.(float64))\n\t\t\t\t\tcase \"data\":\n\t\t\t\t\t\tswitch d := v.(type) {\n\t\t\t\t\t\tcase float64:\n\t\t\t\t\t\t\tdata = byte(d)\n\t\t\t\t\t\tcase []interface{}:\n\t\t\t\t\t\t\tdataArray = make([]byte, len(d))\n\t\t\t\t\t\t\tfor i, value := range d {\n\t\t\t\t\t\t\t\tdataArray[i] = byte(value.(float64))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"item\":\n\t\t\t\t\t\tif v.(bool) {\n\t\t\t\t\t\t\tmass = Item\n\t\t\t\t\t\t\ttransparency = Transparent\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tmass = Mass\n\t\t\t\t\t\t\ttransparency = Opaque\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"transparent\":\n\t\t\t\t\t\tif v.(bool) {\n\t\t\t\t\t\t\ttransparency = Transparent\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\ttransparency = Opaque\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"empty\":\n\t\t\t\t\t\tif v.(bool) {\n\t\t\t\t\t\t\tempty = true\n\t\t\t\t\t\t\ttransparency = Transparent\n\t\t\t\t\t\t\tmass = Mass\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tempty = false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tblockTypeMap[blockId] = &BlockType{blockId, mass, transparency, empty}\n\t\t\t\tif dataArray == nil {\n\t\t\t\t\tif data != 255 {\n\t\t\t\t\t\textraData[blockId] = true\n\t\t\t\t\t\tcolors = append(colors, MTL{blockId, data, color, name})\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcolors[blockId] = MTL{blockId, data, color, name}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\textraData[blockId] = true\n\t\t\t\t\tfor _, data = range dataArray {\n\t\t\t\t\t\tcolors = append(colors, MTL{blockId, data, color, fmt.Sprintf(\"%s_%d\", name, data)})\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>Fix off-by-one bug when -x or -z are negative<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"json\"\n\t\"math\"\n\t\".\/nbt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\tout *bufio.Writer\n\tyMin int\n\tblockFaces bool\n\thideBottom bool\n\tnoColor bool\n\n\tfaceCount int\n\tfaceLimit int\n\n\tchunkCount int\n\tchunkLimit int\n\n\tchunkMask ChunkMask\n)\n\nfunc main() {\n\tvar bx, bz float64\n\tvar cx, cz int\n\tvar square int\n\tvar rectx, rectz int\n\tvar maxProcs = runtime.GOMAXPROCS(0)\n\tvar prt bool\n\tvar solidSides bool\n\tvar mtlNumber bool\n\n\tvar defaultObjOutFilename = \"a.obj\"\n\tvar defaultPrtOutFilename = \"a.prt\"\n\n\tvar outFilename string\n\tflag.IntVar(&maxProcs, \"cpu\", maxProcs, \"Number of cores to use\")\n\tflag.StringVar(&outFilename, \"o\", defaultObjOutFilename, \"Name for output file\")\n\tflag.IntVar(&yMin, \"y\", 0, \"Omit all blocks below this height. 63 is sea level\")\n\tflag.BoolVar(&solidSides, \"sides\", false, \"Solid sides, rather than showing underground\")\n\tflag.BoolVar(&blockFaces, \"bf\", false, \"Don't combine adjacent faces of the same block within a column\")\n\tflag.BoolVar(&hideBottom, \"hb\", false, \"Hide bottom of world\")\n\tflag.BoolVar(&noColor, \"g\", false, \"Omit materials\")\n\tflag.Float64Var(&bx, \"x\", 0, \"Center x coordinate in blocks\")\n\tflag.Float64Var(&bz, \"z\", 0, \"Center z coordinate in blocks\")\n\tflag.IntVar(&cx, \"cx\", 0, \"Center x coordinate in chunks\")\n\tflag.IntVar(&cz, \"cz\", 0, \"Center z coordinate in chunks\")\n\tflag.IntVar(&square, \"s\", math.MaxInt32, \"Chunk square size\")\n\tflag.IntVar(&rectx, \"rx\", math.MaxInt32, \"Width(x) of rectangle size\")\n\tflag.IntVar(&rectz, \"rz\", math.MaxInt32, \"Height(z) of rectangle size\")\n\tflag.IntVar(&faceLimit, \"fk\", math.MaxInt32, \"Face limit (thousands of faces)\")\n\tflag.BoolVar(&prt, \"prt\", false, \"Write out PRT file instead of Obj file\")\n\tflag.BoolVar(&mtlNumber, \"mtlnum\", false, \"Number materials instead of using names\")\n\tvar showHelp = flag.Bool(\"h\", false, \"Show Help\")\n\tflag.Parse()\n\n\truntime.GOMAXPROCS(maxProcs)\n\tfmt.Printf(\"mcobj %v (cpu: %d) Copyright (c) 2011 Jonathan Wright\\n\", version, runtime.GOMAXPROCS(0))\n\n\tif *showHelp || flag.NArg() == 0 {\n\t\tfmt.Fprintln(os.Stderr)\n\t\tfmt.Fprintln(os.Stderr, \"Usage: mcobj -cpu 4 -s 20 -o world1.obj\", exampleWorldPath)\n\t\tfmt.Fprintln(os.Stderr)\n\t\tflag.PrintDefaults()\n\t\treturn\n\t}\n\n\tif faceLimit != math.MaxInt32 {\n\t\tfaceLimit *= 1000\n\t}\n\n\tif mtlNumber {\n\t\tMaterialNamer = new(NumberBlockIdNamer)\n\t} else {\n\t\tMaterialNamer = new(NameBlockIdNamer)\n\t}\n\n\tswitch {\n\tcase bx == 0 && cx == 0:\n\t\tcx = 0\n\tcase cx == 0:\n\t\tcx = int(math.Floor(bx \/ 16))\n\t}\n\n\tswitch {\n\tcase bz == 0 && cz == 0:\n\t\tcz = 0\n\tcase cz == 0:\n\t\tcz = int(math.Floor(bz \/ 16))\n\t}\n\n\tif square != math.MaxInt32 {\n\t\tchunkLimit = square * square\n\t\tvar h = square \/ 2\n\t\tchunkMask = &RectangeChunkMask{cx - h, cz - h, cx - h + square, cz - h + square}\n\t} else if rectx != math.MaxInt32 || rectz != math.MaxInt32 {\n\t\tswitch {\n\t\tcase rectx != math.MaxInt32 && rectz != math.MaxInt32:\n\t\t\tchunkLimit = rectx * rectz\n\t\t\tvar (\n\t\t\t\thx = rectx \/ 2\n\t\t\t\thz = rectz \/ 2\n\t\t\t)\n\t\t\tchunkMask = &RectangeChunkMask{cx - hx, cz - hz, cx - hx + rectx, cz - hz + rectz}\n\t\tcase rectx != math.MaxInt32:\n\t\t\tchunkLimit = math.MaxInt32\n\t\t\tvar hx = rectx \/ 2\n\t\t\tchunkMask = &RectangeChunkMask{cx - hx, math.MinInt32, cx - hx + rectx, math.MaxInt32}\n\t\tcase rectz != math.MaxInt32:\n\t\t\tchunkLimit = math.MaxInt32\n\t\t\tvar hz = rectz \/ 2\n\t\t\tchunkMask = &RectangeChunkMask{math.MinInt32, cz - hz, math.MaxInt32, cz - hz + rectz}\n\t\t}\n\t} else {\n\t\tchunkLimit = math.MaxInt32\n\t\tchunkMask = &AllChunksMask{}\n\t}\n\n\tif prt && outFilename == defaultObjOutFilename {\n\t\toutFilename = defaultPrtOutFilename\n\t}\n\n\tif solidSides {\n\t\tdefaultSide = &emptySide\n\t}\n\n\t{\n\t\tvar dir, _ = filepath.Split(strings.Replace(os.Args[0], \"\\\\\", \"\/\", -1))\n\t\tvar jsonError = loadBlockTypesJson(filepath.Join(dir, \"blocks.json\"))\n\t\tif jsonError != nil {\n\t\t\tfmt.Fprintln(os.Stderr, jsonError)\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor i := 0; i < flag.NArg(); i++ {\n\t\tvar dirpath = flag.Arg(i)\n\t\tvar fi, err = os.Stat(dirpath)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tcontinue\n\t\t} else if !fi.IsDirectory() {\n\t\t\tfmt.Fprintln(os.Stderr, dirpath, \"is not a directory\")\n\t\t}\n\n\t\tvar world = OpenWorld(dirpath, chunkMask)\n\t\tvar pool, poolErr = world.ChunkPool()\n\t\tif poolErr != nil {\n\t\t\tfmt.Println(poolErr)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar generator OutputGenerator\n\t\tif prt {\n\t\t\tgenerator = new(PrtGenerator)\n\t\t} else {\n\t\t\tgenerator = new(ObjGenerator)\n\t\t}\n\t\tvar boundary = new(BoundaryLocator)\n\t\tboundary.Init()\n\t\tgenerator.Start(outFilename, pool.Remaining(), maxProcs, boundary)\n\n\t\tif walkEnclosedChunks(pool, world, cx, cz, generator.GetEnclosedJobsChan()) {\n\t\t\t<-generator.GetCompleteChan()\n\t\t}\n\n\t\tgenerator.Close()\n\t}\n}\n\ntype OutputGenerator interface {\n\tStart(outFilename string, total int, maxProcs int, boundary *BoundaryLocator)\n\tGetEnclosedJobsChan() chan *EnclosedChunkJob\n\tGetCompleteChan() chan bool\n\tClose()\n}\n\ntype EnclosedChunkJob struct {\n\tlast bool\n\tenclosed *EnclosedChunk\n}\n\nfunc walkEnclosedChunks(pool ChunkPool, opener ChunkOpener, cx, cz int, enclosedsChan chan *EnclosedChunkJob) bool {\n\tvar (\n\t\tsideCache = new(SideCache)\n\t\tstarted = false\n\t)\n\n\tfor i := 0; moreChunks(pool.Remaining()); i++ {\n\t\tfor x := 0; x < i && moreChunks(pool.Remaining()); x++ {\n\t\t\tfor z := 0; z < i && moreChunks(pool.Remaining()); z++ {\n\t\t\t\tvar (\n\t\t\t\t\tax = cx + unzigzag(x)\n\t\t\t\t\taz = cz + unzigzag(z)\n\t\t\t\t)\n\n\t\t\t\tif pool.Pop(ax, az) {\n\t\t\t\t\tloadSide(sideCache, opener, ax-1, az)\n\t\t\t\t\tloadSide(sideCache, opener, ax+1, az)\n\t\t\t\t\tloadSide(sideCache, opener, ax, az-1)\n\t\t\t\t\tloadSide(sideCache, opener, ax, az+1)\n\n\t\t\t\t\tvar chunk, loadErr = loadChunk2(opener, ax, az)\n\t\t\t\t\tif loadErr != nil {\n\t\t\t\t\t\tfmt.Println(loadErr)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tvar enclosed = sideCache.EncloseChunk(chunk)\n\t\t\t\t\t\tsideCache.AddChunk(chunk)\n\t\t\t\t\t\tchunkCount++\n\t\t\t\t\t\tenclosedsChan <- &EnclosedChunkJob{!moreChunks(pool.Remaining()), enclosed}\n\t\t\t\t\t\tstarted = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn started\n}\n\ntype Blocks []uint16\n\ntype BlockColumn []uint16\n\nfunc (b *Blocks) Get(x, y, z int) uint16 {\n\treturn (*b)[y+(z*128+(x*128*16))]\n}\n\nfunc (b *Blocks) Column(x, z int) BlockColumn {\n\tvar i = 128 * (z + x*16)\n\treturn BlockColumn((*b)[i : i+128])\n}\n\nfunc zigzag(n int) int {\n\treturn (n << 1) ^ (n >> 31)\n}\n\nfunc unzigzag(n int) int {\n\treturn (n >> 1) ^ (-(n & 1))\n}\n\nfunc moreChunks(unprocessedCount int) bool {\n\treturn unprocessedCount > 0 && faceCount < faceLimit && chunkCount < chunkLimit\n}\n\nfunc loadChunk(filename string) (*nbt.Chunk, os.Error) {\n\tvar file, fileErr = os.Open(filename)\n\tdefer file.Close()\n\tif fileErr != nil {\n\t\treturn nil, fileErr\n\t}\n\tvar chunk, err = nbt.ReadDat(file)\n\tif err == os.EOF {\n\t\terr = nil\n\t}\n\treturn chunk, err\n}\n\nfunc loadChunk2(opener ChunkOpener, x, z int) (*nbt.Chunk, os.Error) {\n\tvar r, openErr = opener.OpenChunk(x, z)\n\tif openErr != nil {\n\t\treturn nil, openErr\n\t}\n\tdefer r.Close()\n\n\tvar chunk, nbtErr = nbt.ReadNbt(r)\n\tif nbtErr != nil {\n\t\treturn nil, nbtErr\n\t}\n\treturn chunk, nil\n}\n\nfunc loadSide(sideCache *SideCache, opener ChunkOpener, x, z int) {\n\tif !sideCache.HasSide(x, z) && !chunkMask.IsMasked(x, z) {\n\t\tvar chunk, loadErr = loadChunk2(opener, x, z)\n\t\tif loadErr != nil {\n\t\t\tfmt.Println(loadErr)\n\t\t} else {\n\t\t\tsideCache.AddChunk(chunk)\n\t\t}\n\t}\n}\n\nfunc loadBlockTypesJson(filename string) os.Error {\n\tvar jsonBytes, jsonIoError = ioutil.ReadFile(filename)\n\n\tif jsonIoError != nil {\n\t\treturn jsonIoError\n\t}\n\n\tvar f interface{}\n\tvar unmarshalError = json.Unmarshal(jsonBytes, &f)\n\tif unmarshalError != nil {\n\t\treturn unmarshalError\n\t}\n\n\tvar lines, linesOk = f.([]interface{})\n\tif linesOk {\n\t\tfor _, line := range lines {\n\t\t\tvar fields, fieldsOk = line.(map[string]interface{})\n\t\t\tif fieldsOk {\n\t\t\t\tvar (\n\t\t\t\t\tblockId byte\n\t\t\t\t\tdata byte = 255\n\t\t\t\t\tdataArray []byte\n\t\t\t\t\tname string\n\t\t\t\t\tmass SingularOrAggregate = Mass\n\t\t\t\t\ttransparency Transparency = Opaque\n\t\t\t\t\tempty bool = false\n\t\t\t\t\tcolor uint32\n\t\t\t\t)\n\t\t\t\tfor k, v := range fields {\n\t\t\t\t\tswitch k {\n\t\t\t\t\tcase \"name\":\n\t\t\t\t\t\tname = v.(string)\n\t\t\t\t\tcase \"color\":\n\t\t\t\t\t\tswitch len(v.(string)) {\n\t\t\t\t\t\tcase 7:\n\t\t\t\t\t\t\tvar n, numErr = strconv.Btoui64(v.(string)[1:], 16)\n\t\t\t\t\t\t\tif numErr == nil {\n\t\t\t\t\t\t\t\tcolor = uint32(n*0x100 + 0xff)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tcase 9:\n\t\t\t\t\t\t\tvar n, numErr = strconv.Btoui64(v.(string)[1:], 16)\n\t\t\t\t\t\t\tif numErr == nil {\n\t\t\t\t\t\t\t\tcolor = uint32(n)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"blockId\":\n\t\t\t\t\t\tblockId = byte(v.(float64))\n\t\t\t\t\tcase \"data\":\n\t\t\t\t\t\tswitch d := v.(type) {\n\t\t\t\t\t\tcase float64:\n\t\t\t\t\t\t\tdata = byte(d)\n\t\t\t\t\t\tcase []interface{}:\n\t\t\t\t\t\t\tdataArray = make([]byte, len(d))\n\t\t\t\t\t\t\tfor i, value := range d {\n\t\t\t\t\t\t\t\tdataArray[i] = byte(value.(float64))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"item\":\n\t\t\t\t\t\tif v.(bool) {\n\t\t\t\t\t\t\tmass = Item\n\t\t\t\t\t\t\ttransparency = Transparent\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tmass = Mass\n\t\t\t\t\t\t\ttransparency = Opaque\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"transparent\":\n\t\t\t\t\t\tif v.(bool) {\n\t\t\t\t\t\t\ttransparency = Transparent\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\ttransparency = Opaque\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"empty\":\n\t\t\t\t\t\tif v.(bool) {\n\t\t\t\t\t\t\tempty = true\n\t\t\t\t\t\t\ttransparency = Transparent\n\t\t\t\t\t\t\tmass = Mass\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tempty = false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tblockTypeMap[blockId] = &BlockType{blockId, mass, transparency, empty}\n\t\t\t\tif dataArray == nil {\n\t\t\t\t\tif data != 255 {\n\t\t\t\t\t\textraData[blockId] = true\n\t\t\t\t\t\tcolors = append(colors, MTL{blockId, data, color, name})\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcolors[blockId] = MTL{blockId, data, color, name}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\textraData[blockId] = true\n\t\t\t\t\tfor _, data = range dataArray {\n\t\t\t\t\t\tcolors = append(colors, MTL{blockId, data, color, fmt.Sprintf(\"%s_%d\", name, data)})\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package fscache\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"gopkg.in\/djherbis\/stream.v1\"\n)\n\ntype memFS struct {\n\tmu sync.RWMutex\n\tfiles map[string]*memFile\n}\n\n\/\/ NewMemFs creates an in-memory FileSystem.\n\/\/ It does not support persistence (Reload is a nop).\nfunc NewMemFs() FileSystem {\n\treturn &memFS{\n\t\tfiles: make(map[string]*memFile),\n\t}\n}\n\nfunc (fs *memFS) Stat(name string) (FileInfo, error) {\n\tfs.mu.RLock()\n\tdefer fs.mu.RUnlock()\n\tf, ok := fs.files[name]\n\tif !ok {\n\t\treturn FileInfo{}, errors.New(\"file has not been read\")\n\t}\n\n\tsize, err := fs.size(name)\n\tif err != nil {\n\t\treturn FileInfo{}, err\n\t}\n\n\treturn FileInfo{\n\t\tname: name,\n\t\tsize: size,\n\t\tfileMode: os.ModeIrregular,\n\t\tisDir: false,\n\t\tsys: nil,\n\t\trt: f.rt,\n\t\twt: f.wt,\n\t}, nil\n}\n\nfunc (fs *memFS) size(name string) (int64, error) {\n\tf, ok := fs.files[name]\n\tif ok {\n\t\treturn int64(len(f.Bytes())), nil\n\t}\n\treturn 0, errors.New(\"file has not been read\")\n}\n\nfunc (fs *memFS) Reload(add func(key, name string)) error {\n\treturn nil\n}\n\nfunc (fs *memFS) Create(key string) (stream.File, error) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\tif _, ok := fs.files[key]; ok {\n\t\treturn nil, errors.New(\"file exists\")\n\t}\n\tfile := &memFile{\n\t\tname: key,\n\t\tr: bytes.NewBuffer(nil),\n\t\twt: time.Now(),\n\t}\n\tfile.memReader.memFile = file\n\tfs.files[key] = file\n\treturn file, nil\n}\n\nfunc (fs *memFS) Open(name string) (stream.File, error) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\tif f, ok := fs.files[name]; ok {\n\t\tf.rt = time.Now()\n\t\treturn &memReader{memFile: f}, nil\n\t}\n\treturn nil, errors.New(\"file does not exist\")\n}\n\nfunc (fs *memFS) Remove(key string) error {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\tdelete(fs.files, key)\n\treturn nil\n}\n\nfunc (fs *memFS) RemoveAll() error {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\tfs.files = make(map[string]*memFile)\n\treturn nil\n}\n\ntype memFile struct {\n\tmu sync.RWMutex\n\tname string\n\tr *bytes.Buffer\n\tmemReader\n\trt, wt time.Time\n}\n\nfunc (f *memFile) Name() string {\n\treturn f.name\n}\n\nfunc (f *memFile) Write(p []byte) (int, error) {\n\tif len(p) > 0 {\n\t\tf.mu.Lock()\n\t\tdefer f.mu.Unlock()\n\t\treturn f.r.Write(p)\n\t}\n\treturn len(p), nil\n}\n\nfunc (f *memFile) Bytes() []byte {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\treturn f.r.Bytes()\n}\n\nfunc (f *memFile) Close() error {\n\treturn nil\n}\n\ntype memReader struct {\n\t*memFile\n\tn int\n}\n\nfunc (r *memReader) ReadAt(p []byte, off int64) (n int, err error) {\n\tdata := r.Bytes()\n\tif int64(len(data)) < off {\n\t\treturn 0, io.EOF\n\t}\n\tn, err = bytes.NewReader(data[off:]).ReadAt(p, 0)\n\treturn n, err\n}\n\nfunc (r *memReader) Read(p []byte) (n int, err error) {\n\tn, err = bytes.NewReader(r.Bytes()[r.n:]).Read(p)\n\tr.n += n\n\treturn n, err\n}\n\nfunc (r *memReader) Close() error {\n\treturn nil\n}\n<commit_msg>Remove redudant `memFs.size` method<commit_after>package fscache\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"gopkg.in\/djherbis\/stream.v1\"\n)\n\ntype memFS struct {\n\tmu sync.RWMutex\n\tfiles map[string]*memFile\n}\n\n\/\/ NewMemFs creates an in-memory FileSystem.\n\/\/ It does not support persistence (Reload is a nop).\nfunc NewMemFs() FileSystem {\n\treturn &memFS{\n\t\tfiles: make(map[string]*memFile),\n\t}\n}\n\nfunc (fs *memFS) Stat(name string) (FileInfo, error) {\n\tfs.mu.RLock()\n\tdefer fs.mu.RUnlock()\n\tf, ok := fs.files[name]\n\tif !ok {\n\t\treturn FileInfo{}, errors.New(\"file has not been read\")\n\t}\n\n\tsize := int64(len(f.Bytes()))\n\n\treturn FileInfo{\n\t\tname: name,\n\t\tsize: size,\n\t\tfileMode: os.ModeIrregular,\n\t\tisDir: false,\n\t\tsys: nil,\n\t\trt: f.rt,\n\t\twt: f.wt,\n\t}, nil\n}\n\nfunc (fs *memFS) Reload(add func(key, name string)) error {\n\treturn nil\n}\n\nfunc (fs *memFS) Create(key string) (stream.File, error) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\tif _, ok := fs.files[key]; ok {\n\t\treturn nil, errors.New(\"file exists\")\n\t}\n\tfile := &memFile{\n\t\tname: key,\n\t\tr: bytes.NewBuffer(nil),\n\t\twt: time.Now(),\n\t}\n\tfile.memReader.memFile = file\n\tfs.files[key] = file\n\treturn file, nil\n}\n\nfunc (fs *memFS) Open(name string) (stream.File, error) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\tif f, ok := fs.files[name]; ok {\n\t\tf.rt = time.Now()\n\t\treturn &memReader{memFile: f}, nil\n\t}\n\treturn nil, errors.New(\"file does not exist\")\n}\n\nfunc (fs *memFS) Remove(key string) error {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\tdelete(fs.files, key)\n\treturn nil\n}\n\nfunc (fs *memFS) RemoveAll() error {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\tfs.files = make(map[string]*memFile)\n\treturn nil\n}\n\ntype memFile struct {\n\tmu sync.RWMutex\n\tname string\n\tr *bytes.Buffer\n\tmemReader\n\trt, wt time.Time\n}\n\nfunc (f *memFile) Name() string {\n\treturn f.name\n}\n\nfunc (f *memFile) Write(p []byte) (int, error) {\n\tif len(p) > 0 {\n\t\tf.mu.Lock()\n\t\tdefer f.mu.Unlock()\n\t\treturn f.r.Write(p)\n\t}\n\treturn len(p), nil\n}\n\nfunc (f *memFile) Bytes() []byte {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\treturn f.r.Bytes()\n}\n\nfunc (f *memFile) Close() error {\n\treturn nil\n}\n\ntype memReader struct {\n\t*memFile\n\tn int\n}\n\nfunc (r *memReader) ReadAt(p []byte, off int64) (n int, err error) {\n\tdata := r.Bytes()\n\tif int64(len(data)) < off {\n\t\treturn 0, io.EOF\n\t}\n\tn, err = bytes.NewReader(data[off:]).ReadAt(p, 0)\n\treturn n, err\n}\n\nfunc (r *memReader) Read(p []byte) (n int, err error) {\n\tn, err = bytes.NewReader(r.Bytes()[r.n:]).Read(p)\n\tr.n += n\n\treturn n, err\n}\n\nfunc (r *memReader) Close() error {\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package grpsink\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/DataDog\/datadog-go\/statsd\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/stripe\/veneur\/protocol\"\n\t\"github.com\/stripe\/veneur\/sinks\"\n\t\"github.com\/stripe\/veneur\/ssf\"\n\t\"github.com\/stripe\/veneur\/trace\"\n\t\"github.com\/stripe\/veneur\/trace\/metrics\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/connectivity\"\n\t\"google.golang.org\/grpc\/status\"\n)\n\n\/\/ GRPCStreamingSpanSink is a sink that streams spans to a configurable target\n\/\/ service over gRPC. The sink is only tied to the grpc_sink.proto definition of\n\/\/ a SpanSink service, and thus is generic with respect to the specific server\n\/\/ it is connecting to.\ntype GRPCStreamingSpanSink struct {\n\tname string\n\ttarget string\n\tgrpcConn *grpc.ClientConn\n\tsinkClient SpanSinkClient\n\tstream SpanSink_SendSpansClient\n\tstreamMut sync.Mutex\n\tstats *statsd.Client\n\tcommonTags map[string]string\n\ttraceClient *trace.Client\n\tlog *logrus.Logger\n\tsentCount, dropCount uint32 \/\/ Total counts of sent and errors, respectively\n\tbad uint32 \/\/ If 1, indicates that the current stream is dead (has seen an EOF)\n}\n\nvar _ sinks.SpanSink = &GRPCStreamingSpanSink{}\n\n\/\/ NewGRPCStreamingSpanSink creates a sinks.SpanSink that can write to any\n\/\/ compliant gRPC server.\n\/\/\n\/\/ The target parameter should be of the \"host:port\"; the name parameter is\n\/\/ prepended with \"grpc-\", and is used when reporting logs in order to permit\n\/\/ differentiation between various services.\n\/\/\n\/\/ Any grpc.CallOpts that are provided will be used while first establishing the\n\/\/ connection to the target server (in grpc.DialContext()).\nfunc NewGRPCStreamingSpanSink(ctx context.Context, target, name string, commonTags map[string]string, log *logrus.Logger, opts ...grpc.DialOption) (*GRPCStreamingSpanSink, error) {\n\tname = \"grpc-\" + name\n\t\/\/ We want the stream in fail-fast mode. This is the default, but it's\n\t\/\/ important to the design, so it's worth being explicit: if the streams\n\t\/\/ weren't in fail-fast mode, then Ingest() calls will block while connections\n\t\/\/ get re-established in the background, resulting in undesirable backpressure.\n\t\/\/ For a use case like this sink, unequivocally preferred to fail fast instead,\n\t\/\/ resulting in dropped spans.\n\tdco := grpc.WithDefaultCallOptions(grpc.FailFast(true))\n\tconn, err := grpc.DialContext(ctx, target, append(opts, dco)...)\n\tif err != nil {\n\t\tlog.WithError(err).WithFields(logrus.Fields{\n\t\t\t\"name\": name,\n\t\t\t\"target\": target,\n\t\t}).Error(\"Error establishing connection to gRPC server\")\n\t\treturn nil, err\n\t}\n\n\treturn &GRPCStreamingSpanSink{\n\t\tgrpcConn: conn,\n\t\tname: name,\n\t\ttarget: target,\n\t\tsinkClient: NewSpanSinkClient(conn),\n\t\tcommonTags: commonTags,\n\t\tlog: log,\n\t}, nil\n}\n\n\/\/ Start performs final preparations on the sink before it is\n\/\/ ready to begin ingesting spans.\nfunc (gs *GRPCStreamingSpanSink) Start(cl *trace.Client) error {\n\tgs.traceClient = cl\n\n\tvar err error\n\tgs.stream, err = gs.sinkClient.SendSpans(context.TODO())\n\tif err != nil {\n\t\tgs.log.WithFields(logrus.Fields{\n\t\t\t\"name\": gs.name,\n\t\t\t\"target\": gs.target,\n\t\t\t\"chanstate\": gs.grpcConn.GetState().String(),\n\t\t\tlogrus.ErrorKey: err,\n\t\t}).Error(\"Failed to set up a stream over gRPC channel to sink target\")\n\t\treturn err\n\t}\n\n\tgo gs.maintainStream(context.TODO())\n\treturn nil\n}\n\n\/\/ maintainStream is intended to be run in a background goroutine. It is kicked\n\/\/ off by Start(), and is only kept as a standalone method for testing purposes.\n\/\/\n\/\/ This method runs an endless loop that reacts to state changes in the\n\/\/ underlying channel by automatically attempting to re-establish the stream\n\/\/ connection when it moves back into the 'READY' state. See\n\/\/ https:\/\/github.com\/grpc\/grpc\/blob\/master\/doc\/connectivity-semantics-and-api.mdA\n\/\/ for details about gRPC's connectivity state machine.\nfunc (gs *GRPCStreamingSpanSink) maintainStream(ctx context.Context) {\n\tfor {\n\t\t\/\/ This call will block on a channel receive until the gRPC connection\n\t\t\/\/ becomes unhealthy. Then, spring into action!\n\t\tstate := gs.grpcConn.GetState()\n\t\tswitch state {\n\t\tcase connectivity.Idle:\n\t\t\t\/\/ gRPC considers an open stream to be an active RPC, and channels\n\t\t\t\/\/ normally only go idle if there are no active RPC within the\n\t\t\t\/\/ channel's timeout window. It should be nigh-impossible for that\n\t\t\t\/\/ to happen here, so we record entering the Idle state as an error.\n\t\t\tgs.log.WithFields(logrus.Fields{\n\t\t\t\t\"name\": gs.name,\n\t\t\t\t\"target\": gs.target,\n\t\t\t\t\"chanstate\": gs.grpcConn.GetState().String(),\n\t\t\t\tlogrus.ErrorKey: fmt.Errorf(\"gRPC sink became idle; should be nearly impossible\"),\n\t\t\t}).Error(\"gRPC channel transitioned to idle\")\n\n\t\t\t\/\/ With the error sent, now fall through to recreating the stream\n\t\t\t\/\/ as if the channel were ready, as stream creation should force\n\t\t\t\/\/ the channel back out of idle. This isn't ideal - it'll end up\n\t\t\t\/\/ recreating the stream again after re-reaching READY - but it's\n\t\t\t\/\/ not a huge problem, as this case is essentially unreachable.\n\t\t\tfallthrough\n\t\tcase connectivity.Ready:\n\t\t\tgs.streamMut.Lock()\n\t\t\tstream, err := gs.sinkClient.SendSpans(ctx)\n\t\t\tif err != nil {\n\t\t\t\t\/\/ This is a weird case and probably shouldn't be reachable, but\n\t\t\t\t\/\/ if stream setup fails after recovery, then just wait for a\n\t\t\t\t\/\/ second and try again.\n\t\t\t\tgs.log.WithFields(logrus.Fields{\n\t\t\t\t\t\"name\": gs.name,\n\t\t\t\t\t\"target\": gs.target,\n\t\t\t\t\t\"chanstate\": gs.grpcConn.GetState().String(),\n\t\t\t\t\tlogrus.ErrorKey: err,\n\t\t\t\t}).Error(\"Failed to set up a new stream after gRPC channel recovered\")\n\t\t\t\tgs.streamMut.Unlock()\n\t\t\t\t\/\/ Wait for 1s before re-attempting to set up the stream.\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ CAS inside the mutex so that we can't ever get a double-send of the\n\t\t\t\/\/ \"first time channel degradation\" log error.\n\t\t\tatomic.CompareAndSwapUint32(&gs.bad, 1, 0)\n\t\t\tgs.stream = stream\n\t\t\tgs.streamMut.Unlock()\n\t\t\tgs.log.WithFields(logrus.Fields{\n\t\t\t\t\"name\": gs.name,\n\t\t\t\t\"target\": gs.target,\n\t\t\t\t\"chanstate\": gs.grpcConn.GetState().String(),\n\t\t\t}).Info(\"gRPC channel and stream re-established\")\n\t\tcase connectivity.Connecting:\n\t\t\tgs.log.WithFields(logrus.Fields{\n\t\t\t\t\"name\": gs.name,\n\t\t\t\t\"target\": gs.target,\n\t\t\t\t\"chanstate\": state,\n\t\t\t}).Info(\"Attempting to re-establish gRPC channel connection\")\n\t\t\t\/\/ Nothing to do here except wait.\n\t\tcase connectivity.TransientFailure:\n\t\t\tgs.log.WithFields(logrus.Fields{\n\t\t\t\t\"name\": gs.name,\n\t\t\t\t\"target\": gs.target,\n\t\t\t\t\"chanstate\": state,\n\t\t\t}).Warn(\"gRPC channel now in transient failure\")\n\t\tcase connectivity.Shutdown:\n\t\t\t\/\/ The current design has no path to actually shutting down the\n\t\t\t\/\/ connection, so this should be unreachable.\n\t\t\treturn\n\t\t}\n\t\tgs.grpcConn.WaitForStateChange(context.Background(), state)\n\t}\n}\n\n\/\/ Name returns this sink's name. As the gRPC sink is generic, it's expected\n\/\/ that this is set via configuration and injected.\nfunc (gs *GRPCStreamingSpanSink) Name() string {\n\treturn gs.name\n}\n\n\/\/ Ingest takes in a span and streams it over gRPC to the connected server.\nfunc (gs *GRPCStreamingSpanSink) Ingest(ssfSpan *ssf.SSFSpan) error {\n\tif err := protocol.ValidateTrace(ssfSpan); err != nil {\n\t\treturn err\n\t}\n\t\/\/ TODO(sdboyer) validation of span, e.g. time bounds like in datadog sink?\n\n\t\/\/ Apply any common tags, superseding defaults passed in at sink creation\n\t\/\/ in the event of overlap.\n\tfor k, v := range gs.commonTags {\n\t\tssfSpan.Tags[k] = v\n\t}\n\n\t\/\/ gRPC indicates that it is safe to send and receive on a stream at the\n\t\/\/ same time, but it is not safe to simultaneously send. Guard against that\n\t\/\/ with a mutex.\n\tgs.streamMut.Lock()\n\terr := gs.stream.Send(ssfSpan)\n\tgs.streamMut.Unlock()\n\n\tif err != nil {\n\t\tatomic.AddUint32(&gs.dropCount, 1)\n\n\t\t\/\/ gRPC guarantees that an error returned from an RPC call will be of\n\t\t\/\/ type status.Status. In the unexpected event that they're not, this\n\t\t\/\/ call creates a dummy type, so there's no risk of panic.\n\t\tserr := status.Convert(err)\n\t\tstate := gs.grpcConn.GetState()\n\n\t\tif err == io.EOF {\n\t\t\t\/\/ EOF should be a terminal state for the stream; we only need to\n\t\t\t\/\/ log that we've seen it once.\n\t\t\tif atomic.CompareAndSwapUint32(&gs.bad, 0, 1) {\n\t\t\t\tgs.log.WithFields(logrus.Fields{\n\t\t\t\t\tlogrus.ErrorKey: err,\n\t\t\t\t\t\"target\": gs.target,\n\t\t\t\t\t\"name\": gs.name,\n\t\t\t\t\t\"chanstate\": state.String(),\n\t\t\t\t}).Warn(\"Target server closed the span stream\")\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Errors that are not io.EOF are more interesting and, presumably,\n\t\t\t\/\/ ephemeral. It's worth logging each one of them.\n\t\t\tgs.log.WithFields(logrus.Fields{\n\t\t\t\tlogrus.ErrorKey: err,\n\t\t\t\t\"target\": gs.target,\n\t\t\t\t\"name\": gs.name,\n\t\t\t\t\"chanstate\": state.String(),\n\t\t\t\t\"code\": serr.Code(),\n\t\t\t\t\"details\": serr.Details(),\n\t\t\t\t\"message\": serr.Message(),\n\t\t\t}).Warn(\"Error while streaming trace to gRPC target server\")\n\t\t}\n\t} else {\n\t\tatomic.AddUint32(&gs.sentCount, 1)\n\t}\n\n\treturn err\n}\n\n\/\/ Flush reports metrics about the number of failed and successful\n\/\/\n\/\/ No data is sent to the target sink on Flush(); this sink operates\n\/\/ entirely over a stream on Ingest().\nfunc (gs *GRPCStreamingSpanSink) Flush() {\n\tsamples := &ssf.Samples{}\n\tsamples.Add(\n\t\tssf.Count(\n\t\t\tsinks.MetricKeyTotalSpansFlushed,\n\t\t\tfloat32(atomic.LoadUint32(&gs.sentCount)),\n\t\t\tmap[string]string{\"sink\": gs.Name()}),\n\t\tssf.Count(\n\t\t\tsinks.MetricKeyTotalSpansDropped,\n\t\t\tfloat32(atomic.LoadUint32(&gs.dropCount)),\n\t\t\tmap[string]string{\"sink\": gs.Name()},\n\t\t),\n\t)\n\n\tmetrics.Report(gs.traceClient, samples)\n\treturn\n}\n<commit_msg>Comment improvements<commit_after>package grpsink\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/DataDog\/datadog-go\/statsd\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/stripe\/veneur\/protocol\"\n\t\"github.com\/stripe\/veneur\/sinks\"\n\t\"github.com\/stripe\/veneur\/ssf\"\n\t\"github.com\/stripe\/veneur\/trace\"\n\t\"github.com\/stripe\/veneur\/trace\/metrics\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/connectivity\"\n\t\"google.golang.org\/grpc\/status\"\n)\n\n\/\/ GRPCStreamingSpanSink is a sink that streams spans to a configurable target\n\/\/ service over gRPC. The sink is only tied to the grpc_sink.proto definition of\n\/\/ a SpanSink service, and thus is generic with respect to the specific server\n\/\/ it is connecting to.\ntype GRPCStreamingSpanSink struct {\n\tname string\n\ttarget string\n\t\/\/ The underlying gRPC connection (\"channel\") to the target server.\n\tgrpcConn *grpc.ClientConn\n\t\/\/ The gRPC client that can generate stream connections.\n\tsinkClient SpanSinkClient\n\tstream SpanSink_SendSpansClient\n\t\/\/ streamMut guards two classes of interaction with the stream: actually\n\t\/\/ sending over the stream (gRPC requires that only one goroutine send on\n\t\/\/ a stream at a time), and automatically recreating the stream after a\n\t\/\/ connection drops.\n\tstreamMut sync.Mutex\n\t\/\/ Total counts of sent and dropped spans, respectively\n\tsentCount, dropCount uint32\n\t\/\/ If 1, indicates that the current stream is dead (has seen an EOF)\n\tbad uint32\n\tstats *statsd.Client\n\tcommonTags map[string]string\n\ttraceClient *trace.Client\n\tlog *logrus.Logger\n}\n\nvar _ sinks.SpanSink = &GRPCStreamingSpanSink{}\n\n\/\/ NewGRPCStreamingSpanSink creates a sinks.SpanSink that can write to any\n\/\/ compliant gRPC server.\n\/\/\n\/\/ The target parameter should be of the \"host:port\"; the name parameter is\n\/\/ prepended with \"grpc-\", and is used when reporting logs in order to permit\n\/\/ differentiation between various services.\n\/\/\n\/\/ Any grpc.CallOpts that are provided will be used while first establishing the\n\/\/ connection to the target server (in grpc.DialContext()).\nfunc NewGRPCStreamingSpanSink(ctx context.Context, target, name string, commonTags map[string]string, log *logrus.Logger, opts ...grpc.DialOption) (*GRPCStreamingSpanSink, error) {\n\tname = \"grpc-\" + name\n\t\/\/ We want the stream in fail-fast mode. This is the default, but it's\n\t\/\/ important to the design, so it's worth being explicit: if the streams\n\t\/\/ weren't in fail-fast mode, then Ingest() calls will block while connections\n\t\/\/ get re-established in the background, resulting in undesirable backpressure.\n\t\/\/ For a use case like this sink, unequivocally preferred to fail fast instead,\n\t\/\/ resulting in dropped spans.\n\tdco := grpc.WithDefaultCallOptions(grpc.FailFast(true))\n\tconn, err := grpc.DialContext(ctx, target, append(opts, dco)...)\n\tif err != nil {\n\t\tlog.WithError(err).WithFields(logrus.Fields{\n\t\t\t\"name\": name,\n\t\t\t\"target\": target,\n\t\t}).Error(\"Error establishing connection to gRPC server\")\n\t\treturn nil, err\n\t}\n\n\treturn &GRPCStreamingSpanSink{\n\t\tgrpcConn: conn,\n\t\tname: name,\n\t\ttarget: target,\n\t\tsinkClient: NewSpanSinkClient(conn),\n\t\tcommonTags: commonTags,\n\t\tlog: log,\n\t}, nil\n}\n\n\/\/ Start performs final preparations on the sink before it is\n\/\/ ready to begin ingesting spans.\nfunc (gs *GRPCStreamingSpanSink) Start(cl *trace.Client) error {\n\tgs.traceClient = cl\n\n\tvar err error\n\tgs.stream, err = gs.sinkClient.SendSpans(context.TODO())\n\tif err != nil {\n\t\tgs.log.WithFields(logrus.Fields{\n\t\t\t\"name\": gs.name,\n\t\t\t\"target\": gs.target,\n\t\t\t\"chanstate\": gs.grpcConn.GetState().String(),\n\t\t\tlogrus.ErrorKey: err,\n\t\t}).Error(\"Failed to set up a stream over gRPC channel to sink target\")\n\t\treturn err\n\t}\n\n\tgo gs.maintainStream(context.TODO())\n\treturn nil\n}\n\n\/\/ maintainStream is intended to be run in a background goroutine. It is kicked\n\/\/ off by Start(), and is only kept as a standalone method for testing purposes.\n\/\/\n\/\/ This method runs an endless loop that reacts to state changes in the\n\/\/ underlying channel by automatically attempting to re-establish the stream\n\/\/ connection when it moves back into the 'READY' state. See\n\/\/ https:\/\/github.com\/grpc\/grpc\/blob\/master\/doc\/connectivity-semantics-and-api.mdA\n\/\/ for details about gRPC's connectivity state machine.\nfunc (gs *GRPCStreamingSpanSink) maintainStream(ctx context.Context) {\n\tfor {\n\t\t\/\/ This call will block on a channel receive until the gRPC connection\n\t\t\/\/ becomes unhealthy. Then, spring into action!\n\t\tstate := gs.grpcConn.GetState()\n\t\tswitch state {\n\t\tcase connectivity.Idle:\n\t\t\t\/\/ gRPC considers an open stream to be an active RPC, and channels\n\t\t\t\/\/ normally only go idle if there are no active RPC within the\n\t\t\t\/\/ channel's timeout window. It should be nigh-impossible for that\n\t\t\t\/\/ to happen here, so we record entering the Idle state as an error.\n\t\t\tgs.log.WithFields(logrus.Fields{\n\t\t\t\t\"name\": gs.name,\n\t\t\t\t\"target\": gs.target,\n\t\t\t\t\"chanstate\": gs.grpcConn.GetState().String(),\n\t\t\t\tlogrus.ErrorKey: fmt.Errorf(\"gRPC sink became idle; should be nearly impossible\"),\n\t\t\t}).Error(\"gRPC channel transitioned to idle\")\n\n\t\t\t\/\/ With the error sent, now fall through to recreating the stream\n\t\t\t\/\/ as if the channel were ready, as stream creation should force\n\t\t\t\/\/ the channel back out of idle. This isn't ideal - it'll end up\n\t\t\t\/\/ recreating the stream again after re-reaching READY - but it's\n\t\t\t\/\/ not a huge problem, as this case is essentially unreachable.\n\t\t\tfallthrough\n\t\tcase connectivity.Ready:\n\t\t\tgs.streamMut.Lock()\n\t\t\tstream, err := gs.sinkClient.SendSpans(ctx)\n\t\t\tif err != nil {\n\t\t\t\t\/\/ This is a weird case and probably shouldn't be reachable, but\n\t\t\t\t\/\/ if stream setup fails after recovery, then just wait and retry.\n\t\t\t\tgs.log.WithFields(logrus.Fields{\n\t\t\t\t\t\"name\": gs.name,\n\t\t\t\t\t\"target\": gs.target,\n\t\t\t\t\t\"chanstate\": gs.grpcConn.GetState().String(),\n\t\t\t\t\tlogrus.ErrorKey: err,\n\t\t\t\t}).Error(\"Failed to set up a new stream after gRPC channel recovered\")\n\t\t\t\tgs.streamMut.Unlock()\n\t\t\t\t\/\/ Wait for 1s before re-attempting to set up the stream.\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ CAS inside the mutex means that it's impossible for for the same\n\t\t\t\/\/ stream instance to log an io.EOF twice.\n\t\t\tatomic.CompareAndSwapUint32(&gs.bad, 1, 0)\n\t\t\tgs.stream = stream\n\t\t\tgs.streamMut.Unlock()\n\t\t\tgs.log.WithFields(logrus.Fields{\n\t\t\t\t\"name\": gs.name,\n\t\t\t\t\"target\": gs.target,\n\t\t\t\t\"chanstate\": gs.grpcConn.GetState().String(),\n\t\t\t}).Info(\"gRPC channel and stream re-established\")\n\t\tcase connectivity.Connecting:\n\t\t\tgs.log.WithFields(logrus.Fields{\n\t\t\t\t\"name\": gs.name,\n\t\t\t\t\"target\": gs.target,\n\t\t\t\t\"chanstate\": state,\n\t\t\t}).Info(\"Attempting to re-establish gRPC channel connection\")\n\t\t\t\/\/ Nothing to do here except wait.\n\t\tcase connectivity.TransientFailure:\n\t\t\tgs.log.WithFields(logrus.Fields{\n\t\t\t\t\"name\": gs.name,\n\t\t\t\t\"target\": gs.target,\n\t\t\t\t\"chanstate\": state,\n\t\t\t}).Warn(\"gRPC channel now in transient failure\")\n\t\tcase connectivity.Shutdown:\n\t\t\t\/\/ The current design has no path to actually shutting down the\n\t\t\t\/\/ connection, so this should be unreachable.\n\t\t\treturn\n\t\t}\n\t\tgs.grpcConn.WaitForStateChange(context.Background(), state)\n\t}\n}\n\n\/\/ Name returns this sink's name. As the gRPC sink is generic, it's expected\n\/\/ that this is set via configuration and injected.\nfunc (gs *GRPCStreamingSpanSink) Name() string {\n\treturn gs.name\n}\n\n\/\/ Ingest takes in a span and streams it over gRPC to the connected server.\nfunc (gs *GRPCStreamingSpanSink) Ingest(ssfSpan *ssf.SSFSpan) error {\n\tif err := protocol.ValidateTrace(ssfSpan); err != nil {\n\t\treturn err\n\t}\n\t\/\/ TODO(sdboyer) validation of span, e.g. time bounds like in datadog sink?\n\n\t\/\/ Apply any common tags, superseding defaults passed in at sink creation\n\t\/\/ in the event of overlap.\n\tfor k, v := range gs.commonTags {\n\t\tssfSpan.Tags[k] = v\n\t}\n\n\t\/\/ gRPC indicates that it is safe to send and receive on a stream at the\n\t\/\/ same time, but it is not safe to simultaneously send. Guard against that\n\t\/\/ with a mutex.\n\tgs.streamMut.Lock()\n\terr := gs.stream.Send(ssfSpan)\n\tgs.streamMut.Unlock()\n\n\tif err != nil {\n\t\tatomic.AddUint32(&gs.dropCount, 1)\n\n\t\t\/\/ gRPC guarantees that an error returned from an RPC call will be of\n\t\t\/\/ type status.Status. In the unexpected event that they're not, this\n\t\t\/\/ call creates a dummy type, so there's no risk of panic.\n\t\tserr := status.Convert(err)\n\t\tstate := gs.grpcConn.GetState()\n\n\t\tif err == io.EOF {\n\t\t\t\/\/ EOF should be a terminal state for the stream; we only need to\n\t\t\t\/\/ log that we've seen it once.\n\t\t\tif atomic.CompareAndSwapUint32(&gs.bad, 0, 1) {\n\t\t\t\tgs.log.WithFields(logrus.Fields{\n\t\t\t\t\tlogrus.ErrorKey: err,\n\t\t\t\t\t\"target\": gs.target,\n\t\t\t\t\t\"name\": gs.name,\n\t\t\t\t\t\"chanstate\": state.String(),\n\t\t\t\t}).Warn(\"Target server closed the span stream\")\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Errors that are not io.EOF are more interesting and, presumably,\n\t\t\t\/\/ ephemeral. It's worth logging each one of them.\n\t\t\tgs.log.WithFields(logrus.Fields{\n\t\t\t\tlogrus.ErrorKey: err,\n\t\t\t\t\"target\": gs.target,\n\t\t\t\t\"name\": gs.name,\n\t\t\t\t\"chanstate\": state.String(),\n\t\t\t\t\"code\": serr.Code(),\n\t\t\t\t\"details\": serr.Details(),\n\t\t\t\t\"message\": serr.Message(),\n\t\t\t}).Warn(\"Error while streaming trace to gRPC target server\")\n\t\t}\n\t} else {\n\t\tatomic.AddUint32(&gs.sentCount, 1)\n\t}\n\n\treturn err\n}\n\n\/\/ Flush reports total counts of the number of sent and dropped spans over its\n\/\/ lifetime.\n\/\/\n\/\/ No data is sent to the target sink on Flush(), as this sink operates entirely\n\/\/ over a stream on Ingest().\nfunc (gs *GRPCStreamingSpanSink) Flush() {\n\tsamples := &ssf.Samples{}\n\tsamples.Add(\n\t\tssf.Count(\n\t\t\tsinks.MetricKeyTotalSpansFlushed,\n\t\t\tfloat32(atomic.LoadUint32(&gs.sentCount)),\n\t\t\tmap[string]string{\"sink\": gs.Name()}),\n\t\tssf.Count(\n\t\t\tsinks.MetricKeyTotalSpansDropped,\n\t\t\tfloat32(atomic.LoadUint32(&gs.dropCount)),\n\t\t\tmap[string]string{\"sink\": gs.Name()},\n\t\t),\n\t)\n\n\tmetrics.Report(gs.traceClient, samples)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage common\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/fields\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/events\"\n\truntimeclasstest \"k8s.io\/kubernetes\/pkg\/kubelet\/runtimeclass\/testing\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\te2epod \"k8s.io\/kubernetes\/test\/e2e\/framework\/pod\"\n\timageutils \"k8s.io\/kubernetes\/test\/utils\/image\"\n\tutilpointer \"k8s.io\/utils\/pointer\"\n\n\t\"github.com\/onsi\/ginkgo\"\n)\n\nconst (\n\t\/\/ PreconfiguredRuntimeHandler is the name of the runtime handler that is expected to be\n\t\/\/ preconfigured in the test environment.\n\tPreconfiguredRuntimeHandler = \"test-handler\"\n\t\/\/ DockerRuntimeHandler is a hardcoded runtime handler that is accepted by dockershim, and\n\t\/\/ treated equivalently to a nil runtime handler.\n\tDockerRuntimeHandler = \"docker\"\n)\n\nvar _ = ginkgo.Describe(\"[sig-node] RuntimeClass\", func() {\n\tf := framework.NewDefaultFramework(\"runtimeclass\")\n\n\tginkgo.It(\"should reject a Pod requesting a non-existent RuntimeClass\", func() {\n\t\trcName := f.Namespace.Name + \"-nonexistent\"\n\t\tpod := createRuntimeClassPod(f, rcName)\n\t\texpectSandboxFailureEvent(f, pod, fmt.Sprintf(\"\\\"%s\\\" not found\", rcName))\n\t})\n\n\tginkgo.It(\"should reject a Pod requesting a RuntimeClass with an unconfigured handler\", func() {\n\t\thandler := f.Namespace.Name + \"-handler\"\n\t\trcName := createRuntimeClass(f, \"unconfigured-handler\", handler)\n\t\tpod := createRuntimeClassPod(f, rcName)\n\t\texpectSandboxFailureEvent(f, pod, handler)\n\t})\n\n\t\/\/ This test requires that the PreconfiguredRuntimeHandler has already been set up on nodes.\n\tginkgo.It(\"should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler]\", func() {\n\t\t\/\/ The built-in docker runtime does not support configuring runtime handlers.\n\t\thandler := PreconfiguredRuntimeHandler\n\t\tif framework.TestContext.ContainerRuntime == \"docker\" {\n\t\t\thandler = DockerRuntimeHandler\n\t\t}\n\n\t\trcName := createRuntimeClass(f, \"preconfigured-handler\", handler)\n\t\tpod := createRuntimeClassPod(f, rcName)\n\t\texpectPodSuccess(f, pod)\n\t})\n\n\tginkgo.It(\"should reject a Pod requesting a deleted RuntimeClass\", func() {\n\t\trcName := createRuntimeClass(f, \"delete-me\", \"runc\")\n\t\trcClient := f.ClientSet.NodeV1beta1().RuntimeClasses()\n\n\t\tginkgo.By(\"Deleting RuntimeClass \"+rcName, func() {\n\t\t\terr := rcClient.Delete(rcName, nil)\n\t\t\tframework.ExpectNoError(err, \"failed to delete RuntimeClass %s\", rcName)\n\n\t\t\tginkgo.By(\"Waiting for the RuntimeClass to disappear\")\n\t\t\tframework.ExpectNoError(wait.PollImmediate(framework.Poll, time.Minute, func() (bool, error) {\n\t\t\t\t_, err := rcClient.Get(rcName, metav1.GetOptions{})\n\t\t\t\tif errors.IsNotFound(err) {\n\t\t\t\t\treturn true, nil \/\/ done\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn true, err \/\/ stop wait with error\n\t\t\t\t}\n\t\t\t\treturn false, nil\n\t\t\t}))\n\t\t})\n\n\t\tpod := createRuntimeClassPod(f, rcName)\n\t\texpectSandboxFailureEvent(f, pod, fmt.Sprintf(\"\\\"%s\\\" not found\", rcName))\n\t})\n})\n\n\/\/ createRuntimeClass generates a RuntimeClass with the desired handler and a \"namespaced\" name,\n\/\/ synchronously creates it, and returns the generated name.\nfunc createRuntimeClass(f *framework.Framework, name, handler string) string {\n\tuniqueName := fmt.Sprintf(\"%s-%s\", f.Namespace.Name, name)\n\trc := runtimeclasstest.NewRuntimeClass(uniqueName, handler)\n\trc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(rc)\n\tframework.ExpectNoError(err, \"failed to create RuntimeClass resource\")\n\treturn rc.GetName()\n}\n\n\/\/ createRuntimeClass creates a test pod with the given runtimeClassName.\nfunc createRuntimeClassPod(f *framework.Framework, runtimeClassName string) *v1.Pod {\n\tpod := &v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: fmt.Sprintf(\"test-runtimeclass-%s-\", runtimeClassName),\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tRuntimeClassName: &runtimeClassName,\n\t\t\tContainers: []v1.Container{{\n\t\t\t\tName: \"test\",\n\t\t\t\tImage: imageutils.GetE2EImage(imageutils.BusyBox),\n\t\t\t\tCommand: []string{\"true\"},\n\t\t\t}},\n\t\t\tRestartPolicy: v1.RestartPolicyNever,\n\t\t\tAutomountServiceAccountToken: utilpointer.BoolPtr(false),\n\t\t},\n\t}\n\treturn f.PodClient().Create(pod)\n}\n\n\/\/ expectPodSuccess waits for the given pod to terminate successfully.\nfunc expectPodSuccess(f *framework.Framework, pod *v1.Pod) {\n\tframework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(\n\t\tf.ClientSet, pod.Name, f.Namespace.Name))\n}\n\n\/\/ expectSandboxFailureEvent polls for an event with reason \"FailedCreatePodSandBox\" containing the\n\/\/ expected message string.\nfunc expectSandboxFailureEvent(f *framework.Framework, pod *v1.Pod, msg string) {\n\teventSelector := fields.Set{\n\t\t\"involvedObject.kind\": \"Pod\",\n\t\t\"involvedObject.name\": pod.Name,\n\t\t\"involvedObject.namespace\": f.Namespace.Name,\n\t\t\"reason\": events.FailedCreatePodSandBox,\n\t}.AsSelector().String()\n\tframework.ExpectNoError(WaitTimeoutForEvent(\n\t\tf.ClientSet, f.Namespace.Name, eventSelector, msg, framework.PodEventTimeout))\n}\n<commit_msg>Update RuntimeClass E2E expectations<commit_after>\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage common\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tapierrs \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/fields\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/events\"\n\truntimeclasstest \"k8s.io\/kubernetes\/pkg\/kubelet\/runtimeclass\/testing\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\te2epod \"k8s.io\/kubernetes\/test\/e2e\/framework\/pod\"\n\timageutils \"k8s.io\/kubernetes\/test\/utils\/image\"\n\tutilpointer \"k8s.io\/utils\/pointer\"\n\n\t\"github.com\/onsi\/ginkgo\"\n\t\"github.com\/onsi\/gomega\"\n)\n\nconst (\n\t\/\/ PreconfiguredRuntimeHandler is the name of the runtime handler that is expected to be\n\t\/\/ preconfigured in the test environment.\n\tPreconfiguredRuntimeHandler = \"test-handler\"\n\t\/\/ DockerRuntimeHandler is a hardcoded runtime handler that is accepted by dockershim, and\n\t\/\/ treated equivalently to a nil runtime handler.\n\tDockerRuntimeHandler = \"docker\"\n)\n\nvar _ = ginkgo.Describe(\"[sig-node] RuntimeClass\", func() {\n\tf := framework.NewDefaultFramework(\"runtimeclass\")\n\n\tginkgo.It(\"should reject a Pod requesting a non-existent RuntimeClass\", func() {\n\t\trcName := f.Namespace.Name + \"-nonexistent\"\n\t\texpectPodRejection(f, newRuntimeClassPod(rcName))\n\t})\n\n\tginkgo.It(\"should reject a Pod requesting a RuntimeClass with an unconfigured handler\", func() {\n\t\thandler := f.Namespace.Name + \"-handler\"\n\t\trcName := createRuntimeClass(f, \"unconfigured-handler\", handler)\n\t\tpod := f.PodClient().Create(newRuntimeClassPod(rcName))\n\t\texpectSandboxFailureEvent(f, pod, handler)\n\t})\n\n\t\/\/ This test requires that the PreconfiguredRuntimeHandler has already been set up on nodes.\n\tginkgo.It(\"should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler]\", func() {\n\t\t\/\/ The built-in docker runtime does not support configuring runtime handlers.\n\t\thandler := PreconfiguredRuntimeHandler\n\t\tif framework.TestContext.ContainerRuntime == \"docker\" {\n\t\t\thandler = DockerRuntimeHandler\n\t\t}\n\n\t\trcName := createRuntimeClass(f, \"preconfigured-handler\", handler)\n\t\tpod := f.PodClient().Create(newRuntimeClassPod(rcName))\n\t\texpectPodSuccess(f, pod)\n\t})\n\n\tginkgo.It(\"should reject a Pod requesting a deleted RuntimeClass\", func() {\n\t\trcName := createRuntimeClass(f, \"delete-me\", \"runc\")\n\t\trcClient := f.ClientSet.NodeV1beta1().RuntimeClasses()\n\n\t\tginkgo.By(\"Deleting RuntimeClass \"+rcName, func() {\n\t\t\terr := rcClient.Delete(rcName, nil)\n\t\t\tframework.ExpectNoError(err, \"failed to delete RuntimeClass %s\", rcName)\n\n\t\t\tginkgo.By(\"Waiting for the RuntimeClass to disappear\")\n\t\t\tframework.ExpectNoError(wait.PollImmediate(framework.Poll, time.Minute, func() (bool, error) {\n\t\t\t\t_, err := rcClient.Get(rcName, metav1.GetOptions{})\n\t\t\t\tif errors.IsNotFound(err) {\n\t\t\t\t\treturn true, nil \/\/ done\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn true, err \/\/ stop wait with error\n\t\t\t\t}\n\t\t\t\treturn false, nil\n\t\t\t}))\n\t\t})\n\n\t\texpectPodRejection(f, newRuntimeClassPod(rcName))\n\t})\n})\n\n\/\/ createRuntimeClass generates a RuntimeClass with the desired handler and a \"namespaced\" name,\n\/\/ synchronously creates it, and returns the generated name.\nfunc createRuntimeClass(f *framework.Framework, name, handler string) string {\n\tuniqueName := fmt.Sprintf(\"%s-%s\", f.Namespace.Name, name)\n\trc := runtimeclasstest.NewRuntimeClass(uniqueName, handler)\n\trc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(rc)\n\tframework.ExpectNoError(err, \"failed to create RuntimeClass resource\")\n\treturn rc.GetName()\n}\n\n\/\/ newRuntimeClassPod generates a test pod with the given runtimeClassName.\nfunc newRuntimeClassPod(runtimeClassName string) *v1.Pod {\n\treturn &v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: fmt.Sprintf(\"test-runtimeclass-%s-\", runtimeClassName),\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tRuntimeClassName: &runtimeClassName,\n\t\t\tContainers: []v1.Container{{\n\t\t\t\tName: \"test\",\n\t\t\t\tImage: imageutils.GetE2EImage(imageutils.BusyBox),\n\t\t\t\tCommand: []string{\"true\"},\n\t\t\t}},\n\t\t\tRestartPolicy: v1.RestartPolicyNever,\n\t\t\tAutomountServiceAccountToken: utilpointer.BoolPtr(false),\n\t\t},\n\t}\n}\n\nfunc expectPodRejection(f *framework.Framework, pod *v1.Pod) {\n\t\/\/ The Node E2E doesn't run the RuntimeClass admission controller, so we expect the rejection to\n\t\/\/ happen by the Kubelet.\n\tif framework.TestContext.NodeE2E {\n\t\tpod = f.PodClient().Create(pod)\n\t\texpectSandboxFailureEvent(f, pod, fmt.Sprintf(\"\\\"%s\\\" not found\", *pod.Spec.RuntimeClassName))\n\t} else {\n\t\t_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)\n\t\tframework.ExpectError(err, \"should be forbidden\")\n\t\tgomega.Expect(apierrs.IsForbidden(err)).To(gomega.BeTrue(), \"should be forbidden error\")\n\t}\n}\n\n\/\/ expectPodSuccess waits for the given pod to terminate successfully.\nfunc expectPodSuccess(f *framework.Framework, pod *v1.Pod) {\n\tframework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(\n\t\tf.ClientSet, pod.Name, f.Namespace.Name))\n}\n\n\/\/ expectSandboxFailureEvent polls for an event with reason \"FailedCreatePodSandBox\" containing the\n\/\/ expected message string.\nfunc expectSandboxFailureEvent(f *framework.Framework, pod *v1.Pod, msg string) {\n\teventSelector := fields.Set{\n\t\t\"involvedObject.kind\": \"Pod\",\n\t\t\"involvedObject.name\": pod.Name,\n\t\t\"involvedObject.namespace\": f.Namespace.Name,\n\t\t\"reason\": events.FailedCreatePodSandBox,\n\t}.AsSelector().String()\n\tframework.ExpectNoError(WaitTimeoutForEvent(\n\t\tf.ClientSet, f.Namespace.Name, eventSelector, msg, framework.PodEventTimeout))\n}\n<|endoftext|>"} {"text":"<commit_before>package main \/\/for test\n\nimport (\n\t\"encoding\/hex\"\n\t\"fmt\"\n)\n\ntype Header struct {\n\tName, Value string\n}\n\nfunc ParseIntRepresentation(buf []byte, N byte) (I int, cursor byte) {\n\tI = int(buf[0] & ((1 << N) - 1)) \/\/ byte could be used as byte\n\tcursor = 1\n\tif I < ((1 << N) - 1) {\n\t\treturn I, cursor\n\t} else {\n\t\tvar M byte = 0\n\t\tfor (buf[cursor] & 0x80) > 0 {\n\t\t\tI += int(buf[cursor]&0x7f) * (1 << M)\n\t\t\tM += 7\n\t\t\tcursor += 1\n\t\t}\n\t\tI += int(buf[cursor]&0x7f) * (1 << M)\n\t\treturn I, cursor + 1\n\t}\n}\n\nfunc ExtractContent(buf []byte, length int, isHuffman bool) (content string) {\n\tif isHuffman {\n\t\treturn content\n\t} else {\n\t\tcontent = string(buf[:length])\n\t\treturn content\n\t}\n}\n\nfunc ParseFromByte(buf []byte) (content string, cursor byte) {\n\tisHuffman := false\n\tif buf[0]&0x80 > 0 {\n\t\tisHuffman = true\n\t}\n\tlength, cursor := ParseIntRepresentation(buf, 7)\n\tcontent = ExtractContent(buf[cursor:], length, isHuffman)\n\tcursor += byte(length)\n\treturn content, cursor\n}\n\nfunc ParseHeader(index int, table int, buf []byte, isIndexed bool) (name, value string, cursor byte) {\n\tif c := byte(0); !isIndexed {\n\t\tif index == 0 {\n\t\t\tname, c = ParseFromByte(buf[cursor:])\n\t\t\tcursor += c\n\t\t}\n\t\tvalue, c = ParseFromByte(buf[cursor:])\n\t\tcursor += c\n\t}\n\n\tif index > 0 {\n\t\t\/\/get header from table\n\t}\n\n\treturn name, value, cursor\n}\n\nfunc decode(wire string) []Header {\n\tvar Headers []Header\n\tvar buf *[]byte\n\tnums, err := hex.DecodeString(string(wire))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tbuf = &nums\n\n\tvar cursor byte = 0\n\tfor cursor < byte(len(nums)) {\n\t\tisIndexed := false\n\t\tisIncremental := false\n\t\tvar index int\n\t\tvar c byte\n\t\tif (*buf)[cursor]&0xe0 == 0x20 {\n\t\t\t\/\/ 7.3 Header Table Size Update\n\t\t\tcursor = 1\n\t\t} else if ((*buf)[cursor] & 0x80) > 0 {\n\t\t\t\/\/ 7.1 Indexed Header Field\n\t\t\tif ((*buf)[cursor] & 0x7f) == 0 {\n\t\t\t\tpanic('a')\n\t\t\t}\n\t\t\tindex, c = ParseIntRepresentation((*buf)[cursor:], 7)\n\t\t\tisIndexed = true\n\t\t} else {\n\t\t\tif (*buf)[cursor]&0xc0 == 0x40 {\n\t\t\t\t\/\/ 7.2.1 Literal Header Field with Incremental Indexing\n\t\t\t\tindex, c = ParseIntRepresentation((*buf)[cursor:], 6)\n\t\t\t\tisIncremental = true\n\t\t\t} else if (*buf)[cursor]&0xf0 == 0xf0 {\n\t\t\t\t\/\/ 7.2.3 Literal Header Field never Indexed\n\t\t\t\tindex, c = ParseIntRepresentation((*buf)[cursor:], 4)\n\t\t\t} else {\n\t\t\t\t\/\/ 7.2.2 Literal Header Field without Indexing\n\t\t\t\tindex, c = ParseIntRepresentation((*buf)[cursor:], 4)\n\t\t\t}\n\t\t}\n\t\tcursor += c\n\n\t\ttable := 1 \/\/for test\n\t\tname, value, c := ParseHeader(index, table, (*buf)[cursor:], isIndexed)\n\t\tcursor += c\n\n\t\tif isIncremental {\n\t\t\t\/\/add to table\n\t\t}\n\t\tHeaders = append(Headers, []Header{{name, value}}...)\n\t}\n\n\t\/\/d := hex.EncodeToString(nums)\n\t\/\/fmt.Println(d)\n\treturn Headers\n}\n\nfunc main() {\n\t\/\/nums, _ := hex.DecodeString(string(\"1FA18DB701\"))\n\t\/\/fmt.Println(nums)\n\t\/\/fmt.Println(ParseIntRepresentation(nums, 5))\n\t\/\/decode(\"ff80000111\")\n\tfmt.Println(decode(\"00073a6d6574686f640347455400073a736368656d650468747470000a3a617574686f726974790f7777772e7961686f6f2e636f2e6a7000053a70617468012f\"))\n\tfmt.Println()\n}\n<commit_msg>change return value []Header to []map[string]string<commit_after>\/\/package main \/\/for test\npackage hpack\n\nimport (\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"huffman\"\n)\n\ntype Header struct {\n\tName, Value string\n}\n\nfunc ParseIntRepresentation(buf []byte, N byte) (I int, cursor byte) {\n\tI = int(buf[0] & ((1 << N) - 1)) \/\/ byte could be used as byte\n\tcursor = 1\n\tif I < ((1 << N) - 1) {\n\t\treturn I, cursor\n\t} else {\n\t\tvar M byte = 0\n\t\tfor (buf[cursor] & 0x80) > 0 {\n\t\t\tI += int(buf[cursor]&0x7f) * (1 << M)\n\t\t\tM += 7\n\t\t\tcursor += 1\n\t\t}\n\t\tI += int(buf[cursor]&0x7f) * (1 << M)\n\t\treturn I, cursor + 1\n\t}\n}\n\nfunc ExtractContent(buf []byte, length int, isHuffman bool) (content string) {\n\tif isHuffman {\n\t\treturn content\n\t} else {\n\t\tcontent = string(buf[:length])\n\t\treturn content\n\t}\n}\n\nfunc ParseFromByte(buf []byte) (content string, cursor byte) {\n\tisHuffman := false\n\tif buf[0]&0x80 > 0 {\n\t\tisHuffman = true\n\t}\n\tlength, cursor := ParseIntRepresentation(buf, 7)\n\tcontent = ExtractContent(buf[cursor:], length, isHuffman)\n\tcursor += byte(length)\n\treturn content, cursor\n}\n\nfunc ParseHeader(index int, table int, buf []byte, isIndexed bool) (name, value string, cursor byte) {\n\tif c := byte(0); !isIndexed {\n\t\tif index == 0 {\n\t\t\tname, c = ParseFromByte(buf[cursor:])\n\t\t\tcursor += c\n\t\t}\n\t\tvalue, c = ParseFromByte(buf[cursor:])\n\t\tcursor += c\n\t}\n\n\tif index > 0 {\n\t\t\/\/get header from table\n\t}\n\n\treturn name, value, cursor\n}\n\nfunc Decode(wire string) (Headers []map[string]string) {\n\tvar buf *[]byte\n\tnums, err := hex.DecodeString(string(wire))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tbuf = &nums\n\n\tvar cursor byte = 0\n\tfor cursor < byte(len(nums)) {\n\t\tisIndexed := false\n\t\tisIncremental := false\n\t\tvar index int\n\t\tvar c byte\n\t\tif (*buf)[cursor]&0xe0 == 0x20 {\n\t\t\t\/\/ 7.3 Header Table Size Update\n\t\t\tcursor = 1\n\t\t} else if ((*buf)[cursor] & 0x80) > 0 {\n\t\t\t\/\/ 7.1 Indexed Header Field\n\t\t\tif ((*buf)[cursor] & 0x7f) == 0 {\n\t\t\t\tpanic('a')\n\t\t\t}\n\t\t\tindex, c = ParseIntRepresentation((*buf)[cursor:], 7)\n\t\t\tisIndexed = true\n\t\t} else {\n\t\t\tif (*buf)[cursor]&0xc0 == 0x40 {\n\t\t\t\t\/\/ 7.2.1 Literal Header Field with Incremental Indexing\n\t\t\t\tindex, c = ParseIntRepresentation((*buf)[cursor:], 6)\n\t\t\t\tisIncremental = true\n\t\t\t} else if (*buf)[cursor]&0xf0 == 0xf0 {\n\t\t\t\t\/\/ 7.2.3 Literal Header Field never Indexed\n\t\t\t\tindex, c = ParseIntRepresentation((*buf)[cursor:], 4)\n\t\t\t} else {\n\t\t\t\t\/\/ 7.2.2 Literal Header Field without Indexing\n\t\t\t\tindex, c = ParseIntRepresentation((*buf)[cursor:], 4)\n\t\t\t}\n\t\t}\n\t\tcursor += c\n\n\t\ttable := 1 \/\/for test\n\t\tname, value, c := ParseHeader(index, table, (*buf)[cursor:], isIndexed)\n\t\tcursor += c\n\n\t\tif isIncremental {\n\t\t\t\/\/add to table\n\t\t}\n\t\theader := map[string]string{name: value}\n\t\tHeaders = append(Headers, header)\n\t}\n\n\t\/\/d := hex.EncodeToString(nums)\n\t\/\/fmt.Println(d)\n\treturn Headers\n}\n\nfunc main() {\n\t\/\/nums, _ := hex.DecodeString(string(\"1FA18DB701\"))\n\t\/\/fmt.Println(nums)\n\t\/\/fmt.Println(ParseIntRepresentation(nums, 5))\n\t\/\/decode(\"ff80000111\")\n\tfmt.Println(Decode(\"00073a6d6574686f640347455400073a736368656d650468747470000a3a617574686f726974790f7777772e7961686f6f2e636f2e6a7000053a70617468012f\"))\n\tfmt.Println(huffman.HUFFMAN_TABLE)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright ©2012 Dan Kortschak <dan.kortschak@adelaide.edu.au>\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage llrb_test\n\nimport (\n\t\"code.google.com\/p\/biogo.llrb\"\n\t\"fmt\"\n)\n\ntype (\n\tInt int\n\tIntUpperBound int\n)\n\nfunc (c Int) Compare(b llrb.Comparable) int {\n\tswitch i := b.(type) {\n\tcase Int:\n\t\treturn int(c) - int(i)\n\tcase IntUpperBound:\n\t\treturn int(c) - int(i)\n\t}\n\tpanic(\"unknown type\")\n}\n\nfunc (c IntUpperBound) Compare(b llrb.Comparable) int {\n\tvar d int\n\tswitch i := b.(type) {\n\tcase Int:\n\t\td = int(c) - int(i)\n\tcase IntUpperBound:\n\t\td = int(c) - int(i)\n\t}\n\tif d == 0 {\n\t\treturn 1\n\t}\n\treturn d\n}\n\nfunc Example() {\n\tvalues := []int{0, 1, 2, 3, 4, 2, 3, 5, 5, 65, 32, 3, 23}\n\n\t\/\/ Insert using a type that reports equality:\n\t{\n\t\tt := &llrb.Tree{}\n\t\tfor _, v := range values {\n\t\t\tt.Insert(Int(v)) \/\/ Insert with replacement.\n\t\t}\n\n\t\tresults := []int(nil)\n\t\t\/\/ More efficiently retrieved using Get(Int(3))...\n\t\tt.DoMatching(func(c llrb.Comparable) (done bool) {\n\t\t\tresults = append(results, int(c.(Int)))\n\t\t\treturn\n\t\t}, Int(3))\n\n\t\tfmt.Println(\"With replacement: \", results)\n\t}\n\n\t\/\/ Insert using a type that does not report equality:\n\t{\n\t\tt := &llrb.Tree{}\n\t\tfor _, v := range values {\n\t\t\tt.Insert(IntUpperBound(v)) \/\/ Insert without replacement.\n\t\t}\n\n\t\tresults := []int(nil)\n\t\tt.DoMatching(func(c llrb.Comparable) (done bool) {\n\t\t\tresults = append(results, int(c.(IntUpperBound)))\n\t\t\treturn\n\t\t}, Int(3))\n\n\t\tfmt.Println(\"Without replacement:\", results)\n\t}\n\n\t\/\/ Output:\n\t\/\/ With replacement: [3]\n\t\/\/ Without replacement: [3 3 3]\n}\n\n\/*func Example_2() {\n\tvalues := []int{0, 1, 2, 3, 4, 2, 3, 5, 5, 65, 32, 3, 23}\n\tt := &Tree{}\n\tfor _, v := range values {\n\t\tt.Insert(Int(v)) \/\/ Insert with replacement.\n\t}\n\n\tresults := []int(nil)\n\tt.DoMatching(func(c Comparable) (done bool) {\n\t\tresults = append(results, int(c.(Int)))\n\t\treturn\n\t}, Int(3))\n\n\tfmt.Println(results)\n\t\/\/ Output:\n\t\/\/ [3]\n}\n*\/\n<commit_msg>Lagging deletion<commit_after>\/\/ Copyright ©2012 Dan Kortschak <dan.kortschak@adelaide.edu.au>\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage llrb_test\n\nimport (\n\t\"code.google.com\/p\/biogo.llrb\"\n\t\"fmt\"\n)\n\ntype (\n\tInt int\n\tIntUpperBound int\n)\n\nfunc (c Int) Compare(b llrb.Comparable) int {\n\tswitch i := b.(type) {\n\tcase Int:\n\t\treturn int(c) - int(i)\n\tcase IntUpperBound:\n\t\treturn int(c) - int(i)\n\t}\n\tpanic(\"unknown type\")\n}\n\nfunc (c IntUpperBound) Compare(b llrb.Comparable) int {\n\tvar d int\n\tswitch i := b.(type) {\n\tcase Int:\n\t\td = int(c) - int(i)\n\tcase IntUpperBound:\n\t\td = int(c) - int(i)\n\t}\n\tif d == 0 {\n\t\treturn 1\n\t}\n\treturn d\n}\n\nfunc Example() {\n\tvalues := []int{0, 1, 2, 3, 4, 2, 3, 5, 5, 65, 32, 3, 23}\n\n\t\/\/ Insert using a type that reports equality:\n\t{\n\t\tt := &llrb.Tree{}\n\t\tfor _, v := range values {\n\t\t\tt.Insert(Int(v)) \/\/ Insert with replacement.\n\t\t}\n\n\t\tresults := []int(nil)\n\t\t\/\/ More efficiently retrieved using Get(Int(3))...\n\t\tt.DoMatching(func(c llrb.Comparable) (done bool) {\n\t\t\tresults = append(results, int(c.(Int)))\n\t\t\treturn\n\t\t}, Int(3))\n\n\t\tfmt.Println(\"With replacement: \", results)\n\t}\n\n\t\/\/ Insert using a type that does not report equality:\n\t{\n\t\tt := &llrb.Tree{}\n\t\tfor _, v := range values {\n\t\t\tt.Insert(IntUpperBound(v)) \/\/ Insert without replacement.\n\t\t}\n\n\t\tresults := []int(nil)\n\t\tt.DoMatching(func(c llrb.Comparable) (done bool) {\n\t\t\tresults = append(results, int(c.(IntUpperBound)))\n\t\t\treturn\n\t\t}, Int(3))\n\n\t\tfmt.Println(\"Without replacement:\", results)\n\t}\n\n\t\/\/ Output:\n\t\/\/ With replacement: [3]\n\t\/\/ Without replacement: [3 3 3]\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/cernops\/golbd\/lbcluster\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestLoadClusters(t *testing.T) {\n\tlg := lbcluster.Log{Syslog: false, Stdout: true, Debugflag: false}\n\n\tconfig := Config{Master: \"lbdxyz.cern.ch\",\n\t\tHeartbeatFile: \"heartbeat\",\n\t\tHeartbeatPath: \"\/work\/go\/src\/github.com\/cernops\/golbd\",\n\t\t\/\/HeartbeatMu: sync.Mutex{0, 0},\n\t\tTsigKeyPrefix: \"abcd-\",\n\t\tTsigInternalKey: \"xxx123==\",\n\t\tTsigExternalKey: \"yyy123==\",\n\t\tSnmpPassword: \"zzz123\",\n\t\tDnsManager: \"111.111.0.111\",\n\t\tClusters: map[string][]string{\"test01.cern.ch\": []string{\"lxplus142.cern.ch\", \"lxplus177.cern.ch\"},\n\t\t\t\"test02.cern.ch\": []string{\"lxplus013.cern.ch\", \"lxplus038.cern.ch\", \"lxplus025.cern.ch\"}},\n\t\tParameters: map[string]lbcluster.Params{\"test01.cern.ch\": lbcluster.Params{Behaviour: \"mindless\", Best_hosts: 2, External: true, Metric: \"cmsfrontier\", Polling_interval: 6, Statistics: \"long\"},\n\t\t\t\"test02.cern.ch\": lbcluster.Params{Behaviour: \"mindless\", Best_hosts: 10, External: false, Metric: \"cmsfrontier\", Polling_interval: 6, Statistics: \"long\"}}}\n\texpected := []lbcluster.LBCluster{\n\t\tlbcluster.LBCluster{Cluster_name: \"test01.cern.ch\",\n\t\t\tLoadbalancing_username: \"loadbalancing\",\n\t\t\tLoadbalancing_password: \"zzz123\",\n\t\t\tHost_metric_table: map[string]int{\"lxplus142.cern.ch\": 100000, \"lxplus177.cern.ch\": 100000},\n\t\t\tParameters: lbcluster.Params{Behaviour: \"mindless\", Best_hosts: 2, External: true, Metric: \"cmsfrontier\", Polling_interval: 6, Statistics: \"long\"},\n\t\t\t\/\/Time_of_last_evaluation time.Time\n\t\t\tCurrent_best_hosts: []string{\"unknown\"},\n\t\t\tPrevious_best_hosts: []string{\"unknown\"},\n\t\t\tPrevious_best_hosts_dns: []string{\"unknown\"},\n\t\t\tStatistics_filename: \"\/var\/log\/lb\/lbstatistics.test01.cern.ch\",\n\t\t\tPer_cluster_filename: \".\/test01.cern.ch.log\",\n\t\t\t\/\/Slog: Log\n\t\t\tCurrent_index: 0},\n\t\tlbcluster.LBCluster{Cluster_name: \"test02.cern.ch\",\n\t\t\tLoadbalancing_username: \"loadbalancing\",\n\t\t\tLoadbalancing_password: \"zzz123\",\n\t\t\tHost_metric_table: map[string]int{\"lxplus013.cern.ch\": 100000, \"lxplus038.cern.ch\": 100000, \"lxplus025.cern.ch\": 100000},\n\t\t\tParameters: lbcluster.Params{Behaviour: \"mindless\", Best_hosts: 10, External: false, Metric: \"cmsfrontier\", Polling_interval: 6, Statistics: \"long\"},\n\t\t\t\/\/Time_of_last_evaluation time.Time\n\t\t\tCurrent_best_hosts: []string{\"unknown\"},\n\t\t\tPrevious_best_hosts: []string{\"unknown\"},\n\t\t\tPrevious_best_hosts_dns: []string{\"unknown\"},\n\t\t\tStatistics_filename: \"\/var\/log\/lb\/lbstatistics.test02.cern.ch\",\n\t\t\tPer_cluster_filename: \".\/test02.cern.ch.log\",\n\t\t\t\/\/Slog: Log\n\t\t\tCurrent_index: 0}}\n\n\tlbclusters := loadClusters(config, lg)\n\t\/\/ reflect.DeepEqual(lbclusters, expected) occassionally fails as the array order is not always the same\n\t\/\/ so comparing element par element\n\ti := 0\n\tfor _, e := range expected {\n\t\tfor _, c := range lbclusters {\n\t\t\tif c.Cluster_name == e.Cluster_name {\n\t\t\t\tif !reflect.DeepEqual(c, e) {\n\t\t\t\t\tt.Errorf(\"loadClusters: got\\n%v\\nexpected\\n%v\", lbclusters, expected)\n\t\t\t\t} else {\n\t\t\t\t\ti = i + 1\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\tif (i != len(expected)) || (i != len(lbclusters)) {\n\t\tt.Errorf(\"loadClusters: wrong number of clusters, got\\n%v\\nexpected\\n%v\", lbclusters, expected)\n\n\t}\n}\n<commit_msg>fix test for new behaviour with statistics log<commit_after>package main\n\nimport (\n\t\"github.com\/cernops\/golbd\/lbcluster\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestLoadClusters(t *testing.T) {\n\tlg := lbcluster.Log{Syslog: false, Stdout: true, Debugflag: false}\n\n\tconfig := Config{Master: \"lbdxyz.cern.ch\",\n\t\tHeartbeatFile: \"heartbeat\",\n\t\tHeartbeatPath: \"\/work\/go\/src\/github.com\/cernops\/golbd\",\n\t\t\/\/HeartbeatMu: sync.Mutex{0, 0},\n\t\tTsigKeyPrefix: \"abcd-\",\n\t\tTsigInternalKey: \"xxx123==\",\n\t\tTsigExternalKey: \"yyy123==\",\n\t\tSnmpPassword: \"zzz123\",\n\t\tDnsManager: \"111.111.0.111\",\n\t\tClusters: map[string][]string{\"test01.cern.ch\": []string{\"lxplus142.cern.ch\", \"lxplus177.cern.ch\"},\n\t\t\t\"test02.cern.ch\": []string{\"lxplus013.cern.ch\", \"lxplus038.cern.ch\", \"lxplus025.cern.ch\"}},\n\t\tParameters: map[string]lbcluster.Params{\"test01.cern.ch\": lbcluster.Params{Behaviour: \"mindless\", Best_hosts: 2, External: true, Metric: \"cmsfrontier\", Polling_interval: 6, Statistics: \"long\"},\n\t\t\t\"test02.cern.ch\": lbcluster.Params{Behaviour: \"mindless\", Best_hosts: 10, External: false, Metric: \"cmsfrontier\", Polling_interval: 6, Statistics: \"long\"}}}\n\texpected := []lbcluster.LBCluster{\n\t\tlbcluster.LBCluster{Cluster_name: \"test01.cern.ch\",\n\t\t\tLoadbalancing_username: \"loadbalancing\",\n\t\t\tLoadbalancing_password: \"zzz123\",\n\t\t\tHost_metric_table: map[string]int{\"lxplus142.cern.ch\": 100000, \"lxplus177.cern.ch\": 100000},\n\t\t\tParameters: lbcluster.Params{Behaviour: \"mindless\", Best_hosts: 2, External: true, Metric: \"cmsfrontier\", Polling_interval: 6, Statistics: \"long\"},\n\t\t\t\/\/Time_of_last_evaluation time.Time\n\t\t\tCurrent_best_hosts: []string{\"unknown\"},\n\t\t\tPrevious_best_hosts: []string{\"unknown\"},\n\t\t\tPrevious_best_hosts_dns: []string{\"unknown\"},\n\t\t\tStatistics_filename: \".\/lbstatistics.test01.cern.ch\",\n\t\t\tPer_cluster_filename: \".\/test01.cern.ch.log\",\n\t\t\t\/\/Slog: Log\n\t\t\tCurrent_index: 0},\n\t\tlbcluster.LBCluster{Cluster_name: \"test02.cern.ch\",\n\t\t\tLoadbalancing_username: \"loadbalancing\",\n\t\t\tLoadbalancing_password: \"zzz123\",\n\t\t\tHost_metric_table: map[string]int{\"lxplus013.cern.ch\": 100000, \"lxplus038.cern.ch\": 100000, \"lxplus025.cern.ch\": 100000},\n\t\t\tParameters: lbcluster.Params{Behaviour: \"mindless\", Best_hosts: 10, External: false, Metric: \"cmsfrontier\", Polling_interval: 6, Statistics: \"long\"},\n\t\t\t\/\/Time_of_last_evaluation time.Time\n\t\t\tCurrent_best_hosts: []string{\"unknown\"},\n\t\t\tPrevious_best_hosts: []string{\"unknown\"},\n\t\t\tPrevious_best_hosts_dns: []string{\"unknown\"},\n\t\t\tStatistics_filename: \".\/lbstatistics.test02.cern.ch\",\n\t\t\tPer_cluster_filename: \".\/test02.cern.ch.log\",\n\t\t\t\/\/Slog: Log\n\t\t\tCurrent_index: 0}}\n\n\tlbclusters := loadClusters(config, lg)\n\t\/\/ reflect.DeepEqual(lbclusters, expected) occassionally fails as the array order is not always the same\n\t\/\/ so comparing element par element\n\ti := 0\n\tfor _, e := range expected {\n\t\tfor _, c := range lbclusters {\n\t\t\tif c.Cluster_name == e.Cluster_name {\n\t\t\t\tif !reflect.DeepEqual(c, e) {\n\t\t\t\t\tt.Errorf(\"loadClusters: got\\n%v\\nexpected\\n%v\", lbclusters, expected)\n\t\t\t\t} else {\n\t\t\t\t\ti = i + 1\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\tif (i != len(expected)) || (i != len(lbclusters)) {\n\t\tt.Errorf(\"loadClusters: wrong number of clusters, got\\n%v\\nexpected\\n%v\", lbclusters, expected)\n\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package lockfile defines a flock(2)-based implementation of the file lock.\npackage lockfile\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"syscall\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\n\/\/ File* constants family defines flags which are used to open lock files.\nconst (\n\tFileOpenFlags = os.O_WRONLY\n\tFileCreateFlags = os.O_CREATE | os.O_EXCL | FileOpenFlags\n)\n\n\/\/ FileMode defines default permission for file lock open.\nconst FileMode = os.FileMode(0666)\n\n\/\/ Lock file is a thin wrapper around os.File to give user a possibility\n\/\/ to use file as a lock (using flock(2))\ntype Lock struct {\n\tname string\n\tfileWasCreated bool\n\topenLock *sync.Mutex\n\tfile *os.File\n}\n\nfunc (l *Lock) String() string {\n\treturn fmt.Sprintf(\"<Lock=(filename='%s', fileWasCreated=%t, file='%v')>\",\n\t\tl.name,\n\t\tl.fileWasCreated,\n\t\tl.file)\n}\n\n\/\/ Acquire file lock. Returns error if acquiring failed, nil otherwise.\nfunc (l *Lock) Acquire() (err error) {\n\tif l.file != nil {\n\t\treturn fmt.Errorf(\"File %v is already acquired\", l.file)\n\t}\n\n\tif err = l.open(); err != nil {\n\t\tlog.WithField(\"filename\", l.name).Info(\"Cannot open file.\")\n\t\tl.finish()\n\t\treturn\n\t}\n\n\terr = syscall.Flock(int(l.file.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)\n\tif err != nil {\n\t\tlog.WithField(\"lock\", l).Info(\"Cannot acquire lock.\")\n\t\tl.finish()\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Release file lock. Returns error if something went wrong, nil otherwise.\nfunc (l *Lock) Release() error {\n\tdefer l.finish()\n\n\terr := syscall.Flock(int(l.file.Fd()), syscall.LOCK_UN)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"filename\": l.name,\n\t\t\t\"descriptor\": int(l.file.Fd()),\n\t\t}).Error(\"Cannot release lock.\")\n\t}\n\n\treturn err\n}\n\nfunc (l *Lock) open() (err error) {\n\tl.openLock.Lock()\n\tdefer l.openLock.Unlock()\n\n\tl.fileWasCreated = false\n\n\tflags := FileOpenFlags\n\tif _, err := os.Stat(l.name); os.IsNotExist(err) {\n\t\tflags = FileCreateFlags\n\t\tl.fileWasCreated = true\n\t}\n\n\tfile, err := os.OpenFile(l.name, flags, FileMode)\n\tif err != nil && flags == FileCreateFlags {\n\t\tfile, err = os.OpenFile(l.name, FileOpenFlags, FileMode)\n\t\tl.fileWasCreated = false\n\t}\n\n\tif err == nil {\n\t\tl.file = file\n\t}\n\n\treturn\n}\n\nfunc (l *Lock) finish() {\n\tif l.file != nil {\n\t\tl.file.Close()\n\t\tif l.fileWasCreated {\n\t\t\tos.Remove(l.name)\n\t\t}\n\t}\n}\n\n\/\/ NewLock returns new lock instance. Argument is a path to the lock file.\n\/\/ Please remember that file would be truncated so it should be file path\n\/\/ for absent file or something which has insensitive content.\nfunc NewLock(filename string) *Lock {\n\treturn &Lock{name: filename, fileWasCreated: false, openLock: new(sync.Mutex)}\n}\n<commit_msg>Simplify internal logic a bit<commit_after>\/\/ Package lockfile defines a flock(2)-based implementation of the file lock.\npackage lockfile\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"syscall\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\n\/\/ File* constants family defines flags which are used to open lock files.\nconst (\n\tFileOpenFlags = os.O_WRONLY\n\tFileCreateFlags = os.O_CREATE | os.O_EXCL | FileOpenFlags\n)\n\n\/\/ FileMode defines default permission for file lock open.\nconst FileMode = os.FileMode(0666)\n\n\/\/ Lock file is a thin wrapper around os.File to give user a possibility\n\/\/ to use file as a lock (using flock(2))\ntype Lock struct {\n\tname string\n\tfileWasCreated bool\n\topenLock *sync.Mutex\n\tfile *os.File\n}\n\nfunc (l *Lock) String() string {\n\treturn fmt.Sprintf(\"<Lock=(filename='%s', fileWasCreated=%t, file='%v')>\",\n\t\tl.name,\n\t\tl.fileWasCreated,\n\t\tl.file)\n}\n\n\/\/ Acquire file lock. Returns error if acquiring failed, nil otherwise.\nfunc (l *Lock) Acquire() (err error) {\n\tif l.file != nil {\n\t\treturn fmt.Errorf(\"File %v is already acquired\", l.file)\n\t}\n\n\tif err = l.open(); err != nil {\n\t\tlog.WithField(\"filename\", l.name).Info(\"Cannot open file.\")\n\t\tl.finish()\n\t\treturn\n\t}\n\n\terr = syscall.Flock(int(l.file.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)\n\tif err != nil {\n\t\tlog.WithField(\"lock\", l).Info(\"Cannot acquire lock.\")\n\t\tl.finish()\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Release file lock. Returns error if something went wrong, nil otherwise.\nfunc (l *Lock) Release() error {\n\tdefer l.finish()\n\n\terr := syscall.Flock(int(l.file.Fd()), syscall.LOCK_UN)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"filename\": l.name,\n\t\t\t\"descriptor\": int(l.file.Fd()),\n\t\t}).Error(\"Cannot release lock.\")\n\t}\n\n\treturn err\n}\n\n\/\/ open correctly opens file with different modes.\nfunc (l *Lock) open() (err error) {\n\tl.openLock.Lock()\n\tdefer l.openLock.Unlock()\n\n\tl.fileWasCreated = false\n\n\tflags := FileOpenFlags\n\tif _, err := os.Stat(l.name); os.IsNotExist(err) {\n\t\tflags = FileCreateFlags\n\t}\n\n\tfile, err := os.OpenFile(l.name, flags, FileMode)\n\tif err != nil && flags == FileCreateFlags {\n\t\tflags = FileOpenFlags\n\t\tfile, err = os.OpenFile(l.name, flags, FileMode)\n\t}\n\n\tif err == nil {\n\t\tl.file = file\n\t\tl.fileWasCreated = flags == FileCreateFlags\n\t}\n\n\treturn\n}\n\n\/\/ finish just cleans up lock file.\nfunc (l *Lock) finish() {\n\tif l.file != nil {\n\t\tl.file.Close()\n\t\tif l.fileWasCreated {\n\t\t\tos.Remove(l.name)\n\t\t}\n\t}\n}\n\n\/\/ NewLock returns new lock instance. Argument is a path to the lock file.\n\/\/ Please remember that file would be truncated so it should be file path\n\/\/ for absent file or something which has insensitive content.\nfunc NewLock(filename string) *Lock {\n\treturn &Lock{name: filename, fileWasCreated: false, openLock: new(sync.Mutex)}\n}\n<|endoftext|>"} {"text":"<commit_before>package logrus\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n)\n\n\/\/ smallFields is a small size data set for benchmarking\nvar loggerFields = Fields{\n\t\"foo\": \"bar\",\n\t\"baz\": \"qux\",\n\t\"one\": \"two\",\n\t\"three\": \"four\",\n}\n\nfunc BenchmarkDummyLogger(b *testing.B) {\n\tnullf, err := os.OpenFile(\"\/dev\/null\", os.O_WRONLY, 0666)\n\tif err != nil {\n\t\tb.Fatalf(\"%v\", err)\n\t}\n\tdefer nullf.Close()\n\tdoLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields)\n}\n\nfunc BenchmarkDummyLoggerNoLock(b *testing.B) {\n\tnullf, err := os.OpenFile(\"\/dev\/null\", os.O_WRONLY|os.O_APPEND, 0666)\n\tif err != nil {\n\t\tb.Fatalf(\"%v\", err)\n\t}\n\tdefer nullf.Close()\n\tdoLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields)\n}\n\nfunc doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) {\n\tlogger := Logger{\n\t\tOut: out,\n\t\tLevel: InfoLevel,\n\t\tFormatter: formatter,\n\t}\n\tentry := logger.WithFields(fields)\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tentry.Info(\"aaa\")\n\t\t}\n\t})\n}\n\nfunc doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) {\n\tlogger := Logger{\n\t\tOut: out,\n\t\tLevel: InfoLevel,\n\t\tFormatter: formatter,\n\t}\n\tlogger.SetNoLock()\n\tentry := logger.WithFields(fields)\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tentry.Info(\"aaa\")\n\t\t}\n\t})\n}\n\nfunc BenchmarkLoggerJSONFormatter(b *testing.B) {\n\tdoLoggerBenchmarkWithFormatter(b, &JSONFormatter{})\n}\n\nfunc BenchmarkLoggerTextFormatter(b *testing.B) {\n\tdoLoggerBenchmarkWithFormatter(b, &TextFormatter{})\n}\n\nfunc doLoggerBenchmarkWithFormatter(b *testing.B, f Formatter) {\n\tb.SetParallelism(100)\n\tlog := New()\n\tlog.Formatter = f\n\tlog.Out = ioutil.Discard\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tlog.\n\t\t\t\tWithField(\"foo1\", \"bar1\").\n\t\t\t\tWithField(\"foo2\", \"bar2\").\n\t\t\t\tInfo(\"this is a dummy log\")\n\t\t}\n\t})\n}\n<commit_msg>deadcode<commit_after>package logrus\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc BenchmarkDummyLogger(b *testing.B) {\n\tnullf, err := os.OpenFile(\"\/dev\/null\", os.O_WRONLY, 0666)\n\tif err != nil {\n\t\tb.Fatalf(\"%v\", err)\n\t}\n\tdefer nullf.Close()\n\tdoLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields)\n}\n\nfunc BenchmarkDummyLoggerNoLock(b *testing.B) {\n\tnullf, err := os.OpenFile(\"\/dev\/null\", os.O_WRONLY|os.O_APPEND, 0666)\n\tif err != nil {\n\t\tb.Fatalf(\"%v\", err)\n\t}\n\tdefer nullf.Close()\n\tdoLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields)\n}\n\nfunc doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) {\n\tlogger := Logger{\n\t\tOut: out,\n\t\tLevel: InfoLevel,\n\t\tFormatter: formatter,\n\t}\n\tentry := logger.WithFields(fields)\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tentry.Info(\"aaa\")\n\t\t}\n\t})\n}\n\nfunc doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) {\n\tlogger := Logger{\n\t\tOut: out,\n\t\tLevel: InfoLevel,\n\t\tFormatter: formatter,\n\t}\n\tlogger.SetNoLock()\n\tentry := logger.WithFields(fields)\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tentry.Info(\"aaa\")\n\t\t}\n\t})\n}\n\nfunc BenchmarkLoggerJSONFormatter(b *testing.B) {\n\tdoLoggerBenchmarkWithFormatter(b, &JSONFormatter{})\n}\n\nfunc BenchmarkLoggerTextFormatter(b *testing.B) {\n\tdoLoggerBenchmarkWithFormatter(b, &TextFormatter{})\n}\n\nfunc doLoggerBenchmarkWithFormatter(b *testing.B, f Formatter) {\n\tb.SetParallelism(100)\n\tlog := New()\n\tlog.Formatter = f\n\tlog.Out = ioutil.Discard\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tlog.\n\t\t\t\tWithField(\"foo1\", \"bar1\").\n\t\t\t\tWithField(\"foo2\", \"bar2\").\n\t\t\t\tInfo(\"this is a dummy log\")\n\t\t}\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package templates\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/jelmersnoeck\/dogen\/renderer\/documents\"\n)\n\n\/\/ TextBox takes a string of text and puts it on the given position on the page.\ntype TextBox struct {\n\tText string `mapstructure:\"text\"`\n\tHTML string `mapstructure:\"html\"`\n\tFont FontType `mapstructure:\"font\"`\n\tWidth float64 `mapstructure:\"width\"`\n\tFill bool `mapstructure:\"fill\"`\n\tAlign string `mapstructure:\"align\"`\n\tPosition Position `mapstructure:\"position\"`\n\tRotation float64\n\tHeight float64\n}\n\n\/\/ Parse puts the text on a specific position on the page.\nfunc (b *TextBox) Parse(doc documents.Document) {\n\tb.Font.Register(doc)\n\n\tif b.Rotation != 0 {\n\t\tdoc.TransformBegin()\n\n\t\trotationX, rotationY := b.getRotation(doc)\n\t\tdoc.TransformRotate(b.Rotation, rotationX, rotationY)\n\t}\n\n\tif b.Text != \"\" {\n\t\tb.setPosition(doc)\n\t\tdoc.MultiCell(b.Width, doc.PointConvert(b.Font.LineHeight), b.Text, \"\", b.Align, b.Fill)\n\t}\n\n\tif b.HTML != \"\" {\n\t\tleftMargin, topMargin, rightMargin, _ := doc.GetMargins()\n\t\tdoc.SetLeftMargin(leftMargin + b.Position.X)\n\t\tdoc.SetY(b.Position.Y + topMargin)\n\n\t\tif b.Width > 0 {\n\t\t\tpageWidth, _ := doc.GetPageSize()\n\t\t\trm := pageWidth - rightMargin - (b.Position.X + b.Width)\n\t\t\tdoc.SetRightMargin(rm)\n\t\t}\n\n\t\thtml := doc.HTMLBasicNew()\n\t\thtml.Write(doc.PointConvert(b.Font.LineHeight), b.HTML)\n\n\t\tdoc.SetLeftMargin(leftMargin)\n\t\tdoc.SetRightMargin(rightMargin)\n\t}\n\n\tif b.Rotation != 0 {\n\t\tdoc.TransformEnd()\n\t}\n}\n\n\/\/ Load sets all the options for the text block like transformation, font,\n\/\/ color, etc.\nfunc (b *TextBox) Load(t Template, block_data, user_input map[string]interface{}) {\n\tif block_data[\"align\"] == nil {\n\t\tb.Align = \"CM\"\n\t}\n\n\tif block_data[\"lineheight\"] == nil {\n\t\tb.Font.LineHeight = b.Font.Size + 2\n\t}\n\n\tif block_data[\"replace\"] != nil {\n\t\tfor _, value := range block_data[\"replace\"].([]interface{}) {\n\t\t\tkey := string(value.(string))\n\t\t\treplacer := strings.NewReplacer(\"%{\"+key+\"}%\", user_input[key].(string))\n\t\t\tb.HTML = replacer.Replace(b.HTML)\n\t\t}\n\t}\n}\n\nfunc (b *TextBox) setPosition(doc documents.Document) {\n\tif b.Position.Y >= 0 {\n\t\tdoc.SetY(b.Position.Y)\n\t}\n\n\tif b.Position.X >= 0 {\n\t\tdoc.SetX(b.Position.X)\n\t}\n}\n\nfunc (b *TextBox) getRotation(doc documents.Document) (rotationX float64, rotationY float64) {\n\tif b.Width == 0 {\n\t\tpageWidth, _ := doc.GetPageSize()\n\t\tleftMargin, _, rightMargin, _ := doc.GetMargins()\n\n\t\trotationX = (pageWidth - leftMargin - rightMargin - b.Position.X) \/ 2\n\t} else {\n\t\trotationX = b.Position.X + b.Width\/2\n\t}\n\n\tif b.Height == 0 {\n\t\trotationY = b.Position.Y + doc.PointConvert(b.Font.LineHeight)\/2\n\t} else {\n\t\trotationY = b.Position.Y + b.Height\/2\n\t}\n\n\treturn\n}\n<commit_msg>TextBox: get replacement from user_input.<commit_after>package templates\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/jelmersnoeck\/dogen\/renderer\/documents\"\n)\n\n\/\/ TextBox takes a string of text and puts it on the given position on the page.\ntype TextBox struct {\n\tText string `mapstructure:\"text\"`\n\tHTML string `mapstructure:\"html\"`\n\tFont FontType `mapstructure:\"font\"`\n\tWidth float64 `mapstructure:\"width\"`\n\tFill bool `mapstructure:\"fill\"`\n\tAlign string `mapstructure:\"align\"`\n\tPosition Position `mapstructure:\"position\"`\n\tRotation float64\n\tHeight float64\n}\n\n\/\/ Parse puts the text on a specific position on the page.\nfunc (b *TextBox) Parse(doc documents.Document) {\n\tb.Font.Register(doc)\n\n\tif b.Rotation != 0 {\n\t\tdoc.TransformBegin()\n\n\t\trotationX, rotationY := b.getRotation(doc)\n\t\tdoc.TransformRotate(b.Rotation, rotationX, rotationY)\n\t}\n\n\tif b.Text != \"\" {\n\t\tb.setPosition(doc)\n\t\tdoc.MultiCell(b.Width, doc.PointConvert(b.Font.LineHeight), b.Text, \"\", b.Align, b.Fill)\n\t}\n\n\tif b.HTML != \"\" {\n\t\tleftMargin, topMargin, rightMargin, _ := doc.GetMargins()\n\t\tdoc.SetLeftMargin(leftMargin + b.Position.X)\n\t\tdoc.SetY(b.Position.Y + topMargin)\n\n\t\tif b.Width > 0 {\n\t\t\tpageWidth, _ := doc.GetPageSize()\n\t\t\trm := pageWidth - rightMargin - (b.Position.X + b.Width)\n\t\t\tdoc.SetRightMargin(rm)\n\t\t}\n\n\t\thtml := doc.HTMLBasicNew()\n\t\thtml.Write(doc.PointConvert(b.Font.LineHeight), b.HTML)\n\n\t\tdoc.SetLeftMargin(leftMargin)\n\t\tdoc.SetRightMargin(rightMargin)\n\t}\n\n\tif b.Rotation != 0 {\n\t\tdoc.TransformEnd()\n\t}\n}\n\n\/\/ Load sets all the options for the text block like transformation, font,\n\/\/ color, etc.\nfunc (b *TextBox) Load(t Template, block_data, user_input map[string]interface{}) {\n\tif block_data[\"align\"] == nil {\n\t\tb.Align = \"CM\"\n\t}\n\n\tif block_data[\"lineheight\"] == nil {\n\t\tb.Font.LineHeight = b.Font.Size + 2\n\t}\n\n\tif block_data[\"replace\"] != nil {\n\t\tfor _, value := range block_data[\"replace\"].([]interface{}) {\n\t\t\tkey := string(value.(string))\n\t\t\treplacer := strings.NewReplacer(\"%{\"+key+\"}%\", user_input[key].(string))\n\t\t\tb.HTML = replacer.Replace(b.HTML)\n\t\t}\n\t}\n\n\tif text, ok := user_input[\"text\"]; ok {\n\t\tb.Text = text.(string)\n\t}\n}\n\nfunc (b *TextBox) setPosition(doc documents.Document) {\n\tif b.Position.Y >= 0 {\n\t\tdoc.SetY(b.Position.Y)\n\t}\n\n\tif b.Position.X >= 0 {\n\t\tdoc.SetX(b.Position.X)\n\t}\n}\n\nfunc (b *TextBox) getRotation(doc documents.Document) (rotationX float64, rotationY float64) {\n\tif b.Width == 0 {\n\t\tpageWidth, _ := doc.GetPageSize()\n\t\tleftMargin, _, rightMargin, _ := doc.GetMargins()\n\n\t\trotationX = (pageWidth - leftMargin - rightMargin - b.Position.X) \/ 2\n\t} else {\n\t\trotationX = b.Position.X + b.Width\/2\n\t}\n\n\tif b.Height == 0 {\n\t\trotationY = b.Position.Y + doc.PointConvert(b.Font.LineHeight)\/2\n\t} else {\n\t\trotationY = b.Position.Y + b.Height\/2\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package logmunch\n\nimport (\n\t\"time\"\n\t\"strings\"\n)\n\n\/\/ QueryGroup\ntype QueryGroup struct {\n Name string\n Keys []string\n}\n\nfunc NewQueryGroup(name string, keys []string) QueryGroup {\n return QueryGroup{\n Name: name,\n Keys: keys,\n }\n}\n\n\/\/ Create a new key in l by name and composed of the given keys.\nfunc (q *QueryGroup) Group(l *LogLine) {\n values := make([]string, len(q.Keys))\n\n for i, key := range q.Keys {\n if value, ok := l.Entries[key]; ok {\n values[i] = value\n } else {\n values[i] = \"-\"\n }\n }\n\n l.Entries[q.Name] = strings.Join(values, \"×\")\n}\n\ntype Query struct {\n\t\/\/ A prefix to group things by\n\tFilter string\n\n\t\/\/ When the log start and end\n\tStart time.Time\n\tEnd time.Time\n\n\t\/\/ How many lines to fetch\n\tLimit int\n\n\t\/\/ TODO: Filters?\n\t\/\/ TODO: Keep\/discard some key\/value pairs?\n\t\/\/ TODO: Some display niceness?\n\n GroupBy []QueryGroup\n}\n<commit_msg>Query: gofmt.<commit_after>package logmunch\n\nimport (\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ QueryGroup\ntype QueryGroup struct {\n\tName string\n\tKeys []string\n}\n\nfunc NewQueryGroup(name string, keys []string) QueryGroup {\n\treturn QueryGroup{\n\t\tName: name,\n\t\tKeys: keys,\n\t}\n}\n\n\/\/ Create a new key in l by name and composed of the given keys.\nfunc (q *QueryGroup) Group(l *LogLine) {\n\tvalues := make([]string, len(q.Keys))\n\n\tfor i, key := range q.Keys {\n\t\tif value, ok := l.Entries[key]; ok {\n\t\t\tvalues[i] = value\n\t\t} else {\n\t\t\tvalues[i] = \"-\"\n\t\t}\n\t}\n\n\tl.Entries[q.Name] = strings.Join(values, \"×\")\n}\n\ntype Query struct {\n\t\/\/ A prefix to group things by\n\tFilter string\n\n\t\/\/ When the log start and end\n\tStart time.Time\n\tEnd time.Time\n\n\t\/\/ How many lines to fetch\n\tLimit int\n\n\t\/\/ TODO: Filters?\n\t\/\/ TODO: Keep\/discard some key\/value pairs?\n\t\/\/ TODO: Some display niceness?\n\n\tGroupBy []QueryGroup\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"os\/exec\"\nimport \"strings\"\nimport \"runtime\"\nimport \"github.com\/fatih\/color\"\nimport \"os\"\n\n\n\n\n\nfunc main() {\n\n\n\n Green := color.New(color.FgGreen)\n BoldGreen := Green.Add(color.Bold)\n Yellow := color.New(color.FgYellow)\n BoldYellow := Yellow.Add(color.Bold)\n Red := color.New(color.FgRed)\n BoldRed := Red.Add(color.Bold)\n White := color.New(color.FgWhite)\n BoldWhite := White.Add(color.Bold)\n\n color.Red(\" ██░ ██ ▓█████ ██▀███ ▄████▄ █ ██ ██▓ ▓█████ ██████ \")\n color.Red(\"▓██░ ██▒▓█ ▀ ▓██ ▒ ██▒▒██▀ ▀█ ██ ▓██▒▓██▒ ▓█ ▀ ▒██ ▒ \")\n color.Red(\"▒██▀▀██░▒███ ▓██ ░▄█ ▒▒▓█ ▄ ▓██ ▒██░▒██░ ▒███ ░ ▓██▄ \")\n color.Red(\"░▓█ ░██ ▒▓█ ▄ ▒██▀▀█▄ ▒▓▓▄ ▄██▒▓▓█ ░██░▒██░ ▒▓█ ▄ ▒ ██▒\")\n color.Red(\"░▓█▒░██▓░▒████▒░██▓ ▒██▒▒ ▓███▀ ░▒▒█████▓ ░██████▒░▒████▒▒██████▒▒\")\n color.Red(\" ▒ ░░▒░▒░░ ▒░ ░░ ▒▓ ░▒▓░░ ░▒ ▒ ░░▒▓▒ ▒ ▒ ░ ▒░▓ ░░░ ▒░ ░▒ ▒▓▒ ▒ ░\")\n color.Red(\" ▒ ░▒░ ░ ░ ░ ░ ░▒ ░ ▒░ ░ ▒ ░░▒░ ░ ░ ░ ░ ▒ ░ ░ ░ ░░ ░▒ ░ ░\")\n color.Red(\" ░ ░░ ░ ░ ░░ ░ ░ ░░░ ░ ░ ░ ░ ░ ░ ░ ░ \")\n color.Red(\" ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ \")\n color.Red(\" ░ \")\n\n color.Green(\"\\n+ -- --=[ HERCULES FRAMEWORK ]\")\n BoldGreen.Println(\"+ -- --=[ Ege Balcı ]\")\n\n\n\n\n\n Priv := CheckSUDO()\n\n BoldWhite.Println(\"\\n\\n[*] STARTING HERCULES SETUP \\n\")\n\n\n BoldYellow.Println(\"[*] Detecting OS...\")\n\n if runtime.GOOS == \"linux\" {\n\n\n OsVersion, _ := exec.Command(\"sh\", \"-c\", \"uname -a\").Output()\n BoldYellow.Println(\"[*] OS Detected : \" + string(OsVersion))\n BoldYellow.Println(\"[*] Setting HERCULES path...\")\n\n\n Path, _ := exec.Command(\"sh\", \"-c\", \"pwd\").Output()\n\tBoldYellow.Println(\"[*] HERCULES_PATH=\"+string(Path))\n\t_Path := strings.Trim(string(Path), \"\\n\")\n var HERCULES_PATH string = string(\"echo 'export HERCULES_PATH=\"+_Path+\"' >> ~\/.bashrc\")\n exec.Command(\"sh\", \"-c\", HERCULES_PATH).Run()\n exec.Command(\"sh\", \"-c\", string(\"export HERCULES_PATH=\"+string(Path))).Run()\n if strings.Contains(string(OsVersion), \"Ubuntu\") || strings.Contains(string(OsVersion), \"kali\") {\n \tBoldYellow.Println(\"[*] Installing golang...\")\n \t if Priv == false {\n \t\t\tBoldRed.Println(\"[!] ERROR : Setup needs root privileges\")\n \t\t}\n \tGo := exec.Command(\"sh\", \"-c\", \"sudo apt-get install golang\")\n \tGo.Stdout = os.Stdout\n \tGo.Stderr = os.Stderr\n \tGo.Stdin = os.Stdin\n \tGo.Run()\n \tBoldYellow.Println(\"[*] Installing upx...\")\n \tUPX := exec.Command(\"sh\", \"-c\", \"sudo apt-get install upx\")\n \tUPX.Stdout = os.Stdout\n \tUPX.Stderr = os.Stderr\n \tUPX.Stdin = os.Stdin\n \tUPX.Run()\n \tBoldYellow.Println(\"[*] Installing git...\")\n \tGit := exec.Command(\"sh\", \"-c\", \"sudo apt-get install git\")\n \tGit.Stdout = os.Stdout\n \tGit.Stderr = os.Stderr\n \tGit.Stdin = os.Stdin\n \tGit.Run()\n\n \tBoldYellow.Println(\"[*] Cloning EGESPLOIT Library...\")\n \texec.Command(\"sh\", \"-c\", \"cd SOURCE && git clone https:\/\/github.com\/EgeBalci\/EGESPLOIT.git\").Run()\n \texec.Command(\"sh\", \"-c\", \"export GOPATH=$HERCULES_PATH\").Run()\n \tBoldYellow.Println(\"[*] Cloning color Library...\")\n \texec.Command(\"sh\", \"-c\", \"go get github.com\/fatih\/color\").Run()\n \t\n \texec.Command(\"sh\", \"-c\", \"cd SOURCE && go build HERCULES.go\").Run()\n\n \tBoldYellow.Println(\"[*] Createing shoutcut...\")\n \texec.Command(\"sh\", \"-c\", \"sudo cp HERCULES \/bin\/\").Run()\n \texec.Command(\"sh\", \"-c\", \"sudo chmod 777 \/bin\/HERCULES\").Run()\n\n }else if strings.Contains(string(OsVersion), \"ARCH\") || strings.Contains(string(OsVersion), \"MANJARO\") {\n \t\/\/pacman -S package_name1\n \tBoldYellow.Println(\"[*] Installing golang...\")\n \tBoldYellow.Println(\"[*] Installing golang...\")\n \t if Priv == false {\n \t\t\tBoldRed.Println(\"[!] ERROR : Setup needs root privileges\")\n \t\t}\n \tGo := exec.Command(\"sh\", \"-c\", \"pacman -S go\")\n \tGo.Stdout = os.Stdout\n Go.Stderr = os.Stderr\n \tGo.Stdin = os.Stdin\n \tGo.Run()\n \tBoldYellow.Println(\"[*] Installing upx...\")\n \tUPX := exec.Command(\"sh\", \"-c\", \"pacman -S upx\")\n \tUPX.Stdout = os.Stdout\n UPX.Stderr = os.Stderr\n \tUPX.Stdin = os.Stdin\n \tUPX.Run()\n \tBoldYellow.Println(\"[*] Installing git...\")\n \tGit := exec.Command(\"sh\", \"-c\", \"pacman -S git\")\n \tGit.Stdout = os.Stdout\n Git.Stderr = os.Stderr\n \tGit.Stdin = os.Stdin\n \tGit.Run()\n\n \tBoldYellow.Println(\"[*] Cloning EGESPLOIT Library...\")\n \texec.Command(\"sh\", \"-c\", \"cd SOURCE && git clone https:\/\/github.com\/EgeBalci\/EGESPLOIT.git\").Run()\n \texec.Command(\"sh\", \"-c\", \"export GOPATH=$HERCULES_PATH\").Run()\n \tBoldYellow.Println(\"[*] Cloning color Library...\")\n \texec.Command(\"sh\", \"-c\", \"go get github.com\/fatih\/color\").Run()\n\n\t\texec.Command(\"sh\", \"-c\", \"cd SOURCE && go build HERCULES.go\").Run()\n\n \tBoldYellow.Println(\"[*] Createing shoutcut...\")\n \texec.Command(\"sh\", \"-c\", \"sudo cp HERCULES \/bin\/\").Run()\n \texec.Command(\"sh\", \"-c\", \"sudo chmod 777 \/bin\/HERCULES\").Run()\n\n }else{\n \tBoldRed.Println(\"[!] ERROR : HERCULES does not support this OS\")\n }\n\n\n Stat, Err := CheckValid()\n\n if Stat == false {\n BoldYellow.Println(\"\\n\")\n BoldRed.Println(Err)\n }else{\n BoldGreen.Println(\"\\n\\n[+] Setup completed successfully\")\n exec.Command(\"sh\", \"-c\", \"gnome-terminal\").Run()\n exec.Command(\"sh\", \"-c\", \"exit\").Run()\n }\n\n\n \t}else if runtime.GOOS != \"linux\" {\n \tBoldRed.Println(\"[!] ERROR : HERCULES only supports linux distributions\")\n \t}\n\n}\n\n\nfunc CheckValid() (bool, string){\n\n OutUPX, _ := exec.Command(\"sh\", \"-c\", \"upx\").Output()\n if (!strings.Contains(string(OutUPX), \"Copyright\")) {\n return false, \"[!] ERROR : upx is not installed\"\n }\n\n OutGO, _ := exec.Command(\"sh\", \"-c\", \"go version\").Output()\n if (!strings.Contains(string(OutGO), \"version\")) {\n return false, \"[!] ERROR : golang is not installed\"\n }\n\n OutBin, _ := exec.Command(\"sh\", \"-c\", \"cd \/bin\/ && ls\").Output()\n if (!strings.Contains(string(OutBin), \"HERCULES\")) {\n return false, \"[!] ERROR : Unable to create shoutcut \"\n }\n\n return true, \"\"\n\n}\n\nfunc CheckSUDO() (bool){\n\tUser, _ := exec.Command(\"sh\", \"-c\", \"whoami\").Output()\n\tif strings.Contains(string(User), \"root\") {\n\t\treturn true\n\t}else {\n\t\treturn false\n\t}\n\t\n}\n<commit_msg>Fixed library issues<commit_after>package main\n\nimport \"os\/exec\"\nimport \"strings\"\nimport \"runtime\"\nimport \"github.com\/fatih\/color\"\nimport \"os\"\n\n\n\n\n\nfunc main() {\n\n\n\n Green := color.New(color.FgGreen)\n BoldGreen := Green.Add(color.Bold)\n Yellow := color.New(color.FgYellow)\n BoldYellow := Yellow.Add(color.Bold)\n Red := color.New(color.FgRed)\n BoldRed := Red.Add(color.Bold)\n White := color.New(color.FgWhite)\n BoldWhite := White.Add(color.Bold)\n\n color.Red(\" ██░ ██ ▓█████ ██▀███ ▄████▄ █ ██ ██▓ ▓█████ ██████ \")\n color.Red(\"▓██░ ██▒▓█ ▀ ▓██ ▒ ██▒▒██▀ ▀█ ██ ▓██▒▓██▒ ▓█ ▀ ▒██ ▒ \")\n color.Red(\"▒██▀▀██░▒███ ▓██ ░▄█ ▒▒▓█ ▄ ▓██ ▒██░▒██░ ▒███ ░ ▓██▄ \")\n color.Red(\"░▓█ ░██ ▒▓█ ▄ ▒██▀▀█▄ ▒▓▓▄ ▄██▒▓▓█ ░██░▒██░ ▒▓█ ▄ ▒ ██▒\")\n color.Red(\"░▓█▒░██▓░▒████▒░██▓ ▒██▒▒ ▓███▀ ░▒▒█████▓ ░██████▒░▒████▒▒██████▒▒\")\n color.Red(\" ▒ ░░▒░▒░░ ▒░ ░░ ▒▓ ░▒▓░░ ░▒ ▒ ░░▒▓▒ ▒ ▒ ░ ▒░▓ ░░░ ▒░ ░▒ ▒▓▒ ▒ ░\")\n color.Red(\" ▒ ░▒░ ░ ░ ░ ░ ░▒ ░ ▒░ ░ ▒ ░░▒░ ░ ░ ░ ░ ▒ ░ ░ ░ ░░ ░▒ ░ ░\")\n color.Red(\" ░ ░░ ░ ░ ░░ ░ ░ ░░░ ░ ░ ░ ░ ░ ░ ░ ░ \")\n color.Red(\" ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ \")\n color.Red(\" ░ \")\n\n color.Green(\"\\n+ -- --=[ HERCULES FRAMEWORK ]\")\n BoldGreen.Println(\"+ -- --=[ Ege Balcı ]\")\n\n\n\n\n\n Priv := CheckSUDO()\n\n BoldWhite.Println(\"\\n\\n[*] STARTING HERCULES SETUP \\n\")\n\n\n BoldYellow.Println(\"[*] Detecting OS...\")\n\n if runtime.GOOS == \"linux\" {\n\n\n OsVersion, _ := exec.Command(\"sh\", \"-c\", \"uname -a\").Output()\n BoldYellow.Println(\"[*] OS Detected : \" + string(OsVersion))\n BoldYellow.Println(\"[*] Setting HERCULES path...\")\n\n\n Path, _ := exec.Command(\"sh\", \"-c\", \"pwd\").Output()\n\tBoldYellow.Println(\"[*] HERCULES_PATH=\"+string(Path))\n\t_Path := strings.Trim(string(Path), \"\\n\")\n var HERCULES_PATH string = string(\"echo 'export HERCULES_PATH=\"+_Path+\"' >> ~\/.bashrc\")\n exec.Command(\"sh\", \"-c\", HERCULES_PATH).Run()\n exec.Command(\"sh\", \"-c\", string(\"export HERCULES_PATH=\"+string(Path))).Run()\n if strings.Contains(string(OsVersion), \"Ubuntu\") || strings.Contains(string(OsVersion), \"kali\") {\n \tBoldYellow.Println(\"[*] Installing golang...\")\n \t if Priv == false {\n \t\t\tBoldRed.Println(\"[!] ERROR : Setup needs root privileges\")\n \t\t}\n \tGo := exec.Command(\"sh\", \"-c\", \"sudo apt-get install golang\")\n \tGo.Stdout = os.Stdout\n \tGo.Stderr = os.Stderr\n \tGo.Stdin = os.Stdin\n \tGo.Run()\n \tBoldYellow.Println(\"[*] Installing upx...\")\n \tUPX := exec.Command(\"sh\", \"-c\", \"sudo apt-get install upx\")\n \tUPX.Stdout = os.Stdout\n \tUPX.Stderr = os.Stderr\n \tUPX.Stdin = os.Stdin\n \tUPX.Run()\n \tBoldYellow.Println(\"[*] Installing git...\")\n \tGit := exec.Command(\"sh\", \"-c\", \"sudo apt-get install git\")\n \tGit.Stdout = os.Stdout\n \tGit.Stderr = os.Stderr\n \tGit.Stdin = os.Stdin\n \tGit.Run()\n\n \tBoldYellow.Println(\"[*] Cloning EGESPLOIT Library...\")\n \texec.Command(\"sh\", \"-c\", \"cd src\/EGESPLOIT && git clone https:\/\/github.com\/EgeBalci\/EGESPLOIT.git\").Run()\n \texec.Command(\"sh\", \"-c\", \"export GOPATH=$HERCULES_PATH\").Run()\n \tBoldYellow.Println(\"[*] Cloning color Library...\")\n \texec.Command(\"sh\", \"-c\", \"go get github.com\/fatih\/color\").Run()\n \t\n \texec.Command(\"sh\", \"-c\", \"cd SOURCE && go build HERCULES.go\").Run()\n\n \tBoldYellow.Println(\"[*] Createing shoutcut...\")\n \texec.Command(\"sh\", \"-c\", \"sudo cp HERCULES \/bin\/\").Run()\n \texec.Command(\"sh\", \"-c\", \"sudo chmod 777 \/bin\/HERCULES\").Run()\n\n }else if strings.Contains(string(OsVersion), \"ARCH\") || strings.Contains(string(OsVersion), \"MANJARO\") {\n \t\/\/pacman -S package_name1\n \tBoldYellow.Println(\"[*] Installing golang...\")\n \tBoldYellow.Println(\"[*] Installing golang...\")\n \t if Priv == false {\n \t\t\tBoldRed.Println(\"[!] ERROR : Setup needs root privileges\")\n \t\t}\n \tGo := exec.Command(\"sh\", \"-c\", \"pacman -S go\")\n \tGo.Stdout = os.Stdout\n Go.Stderr = os.Stderr\n \tGo.Stdin = os.Stdin\n \tGo.Run()\n \tBoldYellow.Println(\"[*] Installing upx...\")\n \tUPX := exec.Command(\"sh\", \"-c\", \"pacman -S upx\")\n \tUPX.Stdout = os.Stdout\n UPX.Stderr = os.Stderr\n \tUPX.Stdin = os.Stdin\n \tUPX.Run()\n \tBoldYellow.Println(\"[*] Installing git...\")\n \tGit := exec.Command(\"sh\", \"-c\", \"pacman -S git\")\n \tGit.Stdout = os.Stdout\n Git.Stderr = os.Stderr\n \tGit.Stdin = os.Stdin\n \tGit.Run()\n\n \tBoldYellow.Println(\"[*] Cloning EGESPLOIT Library...\")\n \texec.Command(\"sh\", \"-c\", \"cd SOURCE && git clone https:\/\/github.com\/EgeBalci\/EGESPLOIT.git\").Run()\n \texec.Command(\"sh\", \"-c\", \"export GOPATH=$HERCULES_PATH\").Run()\n \tBoldYellow.Println(\"[*] Cloning color Library...\")\n \texec.Command(\"sh\", \"-c\", \"go get github.com\/fatih\/color\").Run()\n\n\t\texec.Command(\"sh\", \"-c\", \"cd SOURCE && go build HERCULES.go\").Run()\n\n \tBoldYellow.Println(\"[*] Createing shoutcut...\")\n \texec.Command(\"sh\", \"-c\", \"sudo cp HERCULES \/bin\/\").Run()\n \texec.Command(\"sh\", \"-c\", \"sudo chmod 777 \/bin\/HERCULES\").Run()\n\n }else{\n \tBoldRed.Println(\"[!] ERROR : HERCULES does not support this OS\")\n }\n\n\n Stat, Err := CheckValid()\n\n if Stat == false {\n BoldYellow.Println(\"\\n\")\n BoldRed.Println(Err)\n }else{\n BoldGreen.Println(\"\\n\\n[+] Setup completed successfully\")\n exec.Command(\"sh\", \"-c\", \"gnome-terminal\").Run()\n exec.Command(\"sh\", \"-c\", \"exit\").Run()\n }\n\n\n \t}else if runtime.GOOS != \"linux\" {\n \tBoldRed.Println(\"[!] ERROR : HERCULES only supports linux distributions\")\n \t}\n\n}\n\n\nfunc CheckValid() (bool, string){\n\n OutUPX, _ := exec.Command(\"sh\", \"-c\", \"upx\").Output()\n if (!strings.Contains(string(OutUPX), \"Copyright\")) {\n return false, \"[!] ERROR : upx is not installed\"\n }\n\n OutGO, _ := exec.Command(\"sh\", \"-c\", \"go version\").Output()\n if (!strings.Contains(string(OutGO), \"version\")) {\n return false, \"[!] ERROR : golang is not installed\"\n }\n\n OutBin, _ := exec.Command(\"sh\", \"-c\", \"cd \/bin\/ && ls\").Output()\n if (!strings.Contains(string(OutBin), \"HERCULES\")) {\n return false, \"[!] ERROR : Unable to create shoutcut \"\n }\n\n return true, \"\"\n\n}\n\nfunc CheckSUDO() (bool){\n\tUser, _ := exec.Command(\"sh\", \"-c\", \"whoami\").Output()\n\tif strings.Contains(string(User), \"root\") {\n\t\treturn true\n\t}else {\n\t\treturn false\n\t}\n\t\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\ntype animeList []anime\n\ntype anime struct {\n\tID string `bson:\"id\"`\n\tName string `bson:\"name\"`\n\tHref string `bson:\"href\"`\n\tEpisode int `bson:\"ep\"`\n\tSubs []string `bson:\"subs\"`\n\tLastUpdate time.Time `bson:\"lastUpdate\"`\n}\n\n\/\/LIMIT is a time constant for 22 Days (for removal of outdated db entries)\nconst LIMIT = 22 * 24 * time.Hour\n\n\/\/Gets every anime in animeList db and returns it as AnimeList type\nfunc getAnimeList() (result animeList) {\n\terr := DBanimeList.Find(nil).Sort(\"lastUpdate\").All(&result)\n\tif err != nil {\n\t\tlog.Println(\"getAnimeList() => Find() error:\\t\", err)\n\t}\n\treturn\n}\n\n\/\/db maintanance process calls animeMaintanance function once on startup\n\/\/and after every interval time.Duration\nfunc maintainAnimeListProcess(interval time.Duration) {\n\tmaintainAnimeList()\n\tlog.Println(\"Next maintanance in \", interval.String())\n\n\tfor _ = range time.Tick(interval) {\n\t\tmaintainAnimeList()\n\t\tlog.Println(\"Next maintanance in \", interval.String())\n\t}\n}\n\n\/\/db maintanance function called on app startup and after interval duration\n\/\/deletes entries over LIMIT days old\n\/\/gets urls for entries that don't have them\nfunc maintainAnimeList() {\n\tremovals, updates := 0, 0\n\tnewAnimeList := getAnimeList()\n\tnow := time.Now()\n\tfor _, a := range newAnimeList {\n\t\tif len(a.Href) < 5 {\n\t\t\tupdates += a.GetHref()\n\t\t}\n\t\tif now.Sub(a.LastUpdate) > LIMIT {\n\t\t\ta.Remove()\n\t\t\tremovals++\n\t\t}\n\t}\n\tlog.Printf(\"AUTO-MAINTANANCE: animeList updated! (removed: %d | updated: %d)\\n\",\n\t\tremovals, updates)\n}\n\n\/\/Inserts a new anime entry to db\n\/\/Generates unique id if it doesent exist\nfunc (a *anime) Insert() {\n\tif a.ID == \"\" {\n\t\ta.GenID()\n\t}\n\ta.LastUpdate = time.Now()\n\tDBanimeList.Insert(a)\n}\n\n\/\/Remove anime from db by Anime.Name or Anime.Id\nfunc (a anime) Remove() (success int) {\n\tsuccess = 0\n\tif a.Name != \"\" {\n\t\tDBanimeList.Remove(bson.M{\"name\": a.Name})\n\t\tsuccess = 1\n\t} else if a.ID != \"\" {\n\t\tDBanimeList.Remove(bson.M{\"id\": a.ID})\n\t\tsuccess = 1\n\t}\n\treturn\n}\n\n\/\/Updates the db entry with up-to-date episode number\nfunc (a *anime) UpdateEp() {\n\tupdateQuery := bson.M{\n\t\t\"$set\": bson.M{\n\t\t\t\"ep\": a.Episode,\n\t\t\t\"lastUpdate\": time.Now(),\n\t\t},\n\t}\n\tchange := mgo.Change{\n\t\tUpdate: updateQuery,\n\t\tUpsert: false,\n\t\tRemove: false,\n\t\tReturnNew: true,\n\t}\n\tDBanimeList.Find(bson.M{\"name\": a.Name}).Apply(change, a)\n}\n\n\/\/Adds new sub Name to the db entry of Anime.Id\nfunc (a *anime) AddSub(sub string) {\n\tupdateQuery := bson.M{\n\t\t\"$addToSet\": bson.M{\n\t\t\t\"subs\": sub,\n\t\t},\n\t}\n\tchange := mgo.Change{\n\t\tUpdate: updateQuery,\n\t\tUpsert: false,\n\t\tRemove: false,\n\t\tReturnNew: true,\n\t}\n\tDBanimeList.Find(bson.M{\"id\": a.ID}).Apply(change, a)\n}\n\n\/\/Removes the sub from the db entry of Anime.Id\nfunc (a *anime) RemoveSub(sub string) {\n\tupdateQuery := bson.M{\n\t\t\"$pull\": bson.M{\n\t\t\t\"subs\": sub,\n\t\t},\n\t}\n\tchange := mgo.Change{\n\t\tUpdate: updateQuery,\n\t\tUpsert: false,\n\t\tRemove: false,\n\t\tReturnNew: true,\n\t}\n\tDBanimeList.Find(bson.M{\"id\": a.ID}).Apply(change, a)\n}\n\n\/\/Generates a unique 3char alphanumeric ID\n\/\/Not case sensitive\nfunc (a *anime) GenID() {\n\tvar id, byteList string\n\tbyteList = \"0123456789abcdefghijklmnopqrstuvwxyz\"\n\tfor i := 0; i < 3; i++ {\n\t\tid += string(byteList[rand.Intn(len(byteList))])\n\t}\n\n\tif n, _ := DBanimeList.Find(bson.M{\"id\": id}).Count(); n == 0 {\n\t\ta.ID = id\n\t} else {\n\t\ta.GenID()\n\t}\n}\n\n\/\/Gets href for Anime.Name\nfunc (a *anime) GetHref() (success int) {\n\tsuccess = 0\n\tscrapper, target :=\n\t\t\"http:\/\/scraper-422.rhcloud.com\/?href=\",\n\t\t\"http:\/\/horriblesubs.info\/current-season\/\"\n\tdoc, err := goquery.NewDocument(scrapper + target)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t} else {\n\t\tdoc.Find(\".ind-show.linkful\").Each(func(i int, s *goquery.Selection) {\n\t\t\tname, _ := s.Find(\"a\").Attr(\"title\")\n\t\t\turl, _ := s.Find(\"a\").Attr(\"href\")\n\t\t\tif strings.ToLower(name) == strings.ToLower(a.Name) {\n\t\t\t\tnewHref := fmt.Sprintf(\"http:\/\/horriblesubs.info%s\", url)\n\t\t\t\tupdateQuery := bson.M{\n\t\t\t\t\t\"$set\": bson.M{\n\t\t\t\t\t\t\"href\": newHref,\n\t\t\t\t\t\t\"lastUpdate\": time.Now(),\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tDBanimeList.Update(bson.M{\"name\": a.Name}, updateQuery)\n\t\t\t\tsuccess = 1\n\t\t\t}\n\t\t})\n\t}\n\treturn\n}\n\n\/\/Checks if there is already an entry in db\n\/\/with the same id OR the same name\n\/\/returns true if it already exists\nfunc (a anime) Exists() bool {\n\tquery := bson.M{\n\t\t\"$or\": []interface{}{\n\t\t\tbson.M{\"id\": a.ID},\n\t\t\tbson.M{\"name\": a.Name},\n\t\t},\n\t}\n\tif n, _ := DBanimeList.Find(query).Count(); n == 0 {\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/Checks if episode # already exists in db\n\/\/Returns true if episode in db is outdated and\n\/\/needs to be updated and false if db is already\n\/\/up to date\nfunc (a anime) NewEpisode() bool {\n\tquery := bson.M{\n\t\t\"name\": a.Name,\n\t\t\"ep\": bson.M{\n\t\t\t\"$lt\": a.Episode,\n\t\t},\n\t}\n\n\tif n, _ := DBanimeList.Find(query).Count(); n == 0 {\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/returns length of AnimeList\n\/\/used for sort interface\nfunc (a animeList) Len() int {\n\treturn len(a)\n}\n\n\/\/Checks if index i should sort before index j\n\/\/used for sort interface\nfunc (a animeList) Less(i, j int) bool {\n\tif len(a[i].Subs) > len(a[j].Subs) {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/Swaps the values of i and j indexes\n\/\/used for sort interface\nfunc (a animeList) Swap(i, j int) {\n\ta[i], a[j] = a[j], a[i]\n}\n<commit_msg>Updated error message<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\ntype animeList []anime\n\ntype anime struct {\n\tID string `bson:\"id\"`\n\tName string `bson:\"name\"`\n\tHref string `bson:\"href\"`\n\tEpisode int `bson:\"ep\"`\n\tSubs []string `bson:\"subs\"`\n\tLastUpdate time.Time `bson:\"lastUpdate\"`\n}\n\n\/\/LIMIT is a time constant for 22 Days (for removal of outdated db entries)\nconst LIMIT = 22 * 24 * time.Hour\n\n\/\/Gets every anime in animeList db and returns it as AnimeList type\nfunc getAnimeList() (result animeList) {\n\terr := DBanimeList.Find(nil).Sort(\"lastUpdate\").All(&result)\n\tif err != nil {\n\t\tlog.Println(\"MongoDB error: \", err)\n\t}\n\treturn\n}\n\n\/\/db maintanance process calls animeMaintanance function once on startup\n\/\/and after every interval time.Duration\nfunc maintainAnimeListProcess(interval time.Duration) {\n\tmaintainAnimeList()\n\tlog.Println(\"Next maintanance in \", interval.String())\n\n\tfor _ = range time.Tick(interval) {\n\t\tmaintainAnimeList()\n\t\tlog.Println(\"Next maintanance in \", interval.String())\n\t}\n}\n\n\/\/db maintanance function called on app startup and after interval duration\n\/\/deletes entries over LIMIT days old\n\/\/gets urls for entries that don't have them\nfunc maintainAnimeList() {\n\tremovals, updates := 0, 0\n\tnewAnimeList := getAnimeList()\n\tnow := time.Now()\n\tfor _, a := range newAnimeList {\n\t\tif len(a.Href) < 5 {\n\t\t\tupdates += a.GetHref()\n\t\t}\n\t\tif now.Sub(a.LastUpdate) > LIMIT {\n\t\t\ta.Remove()\n\t\t\tremovals++\n\t\t}\n\t}\n\tlog.Printf(\"AUTO-MAINTANANCE: animeList updated! (removed: %d | updated: %d)\\n\",\n\t\tremovals, updates)\n}\n\n\/\/Inserts a new anime entry to db\n\/\/Generates unique id if it doesent exist\nfunc (a *anime) Insert() {\n\tif a.ID == \"\" {\n\t\ta.GenID()\n\t}\n\ta.LastUpdate = time.Now()\n\tDBanimeList.Insert(a)\n}\n\n\/\/Remove anime from db by Anime.Name or Anime.Id\nfunc (a anime) Remove() (success int) {\n\tsuccess = 0\n\tif a.Name != \"\" {\n\t\tDBanimeList.Remove(bson.M{\"name\": a.Name})\n\t\tsuccess = 1\n\t} else if a.ID != \"\" {\n\t\tDBanimeList.Remove(bson.M{\"id\": a.ID})\n\t\tsuccess = 1\n\t}\n\treturn\n}\n\n\/\/Updates the db entry with up-to-date episode number\nfunc (a *anime) UpdateEp() {\n\tupdateQuery := bson.M{\n\t\t\"$set\": bson.M{\n\t\t\t\"ep\": a.Episode,\n\t\t\t\"lastUpdate\": time.Now(),\n\t\t},\n\t}\n\tchange := mgo.Change{\n\t\tUpdate: updateQuery,\n\t\tUpsert: false,\n\t\tRemove: false,\n\t\tReturnNew: true,\n\t}\n\tDBanimeList.Find(bson.M{\"name\": a.Name}).Apply(change, a)\n}\n\n\/\/Adds new sub Name to the db entry of Anime.Id\nfunc (a *anime) AddSub(sub string) {\n\tupdateQuery := bson.M{\n\t\t\"$addToSet\": bson.M{\n\t\t\t\"subs\": sub,\n\t\t},\n\t}\n\tchange := mgo.Change{\n\t\tUpdate: updateQuery,\n\t\tUpsert: false,\n\t\tRemove: false,\n\t\tReturnNew: true,\n\t}\n\tDBanimeList.Find(bson.M{\"id\": a.ID}).Apply(change, a)\n}\n\n\/\/Removes the sub from the db entry of Anime.Id\nfunc (a *anime) RemoveSub(sub string) {\n\tupdateQuery := bson.M{\n\t\t\"$pull\": bson.M{\n\t\t\t\"subs\": sub,\n\t\t},\n\t}\n\tchange := mgo.Change{\n\t\tUpdate: updateQuery,\n\t\tUpsert: false,\n\t\tRemove: false,\n\t\tReturnNew: true,\n\t}\n\tDBanimeList.Find(bson.M{\"id\": a.ID}).Apply(change, a)\n}\n\n\/\/Generates a unique 3char alphanumeric ID\n\/\/Not case sensitive\nfunc (a *anime) GenID() {\n\tvar id, byteList string\n\tbyteList = \"0123456789abcdefghijklmnopqrstuvwxyz\"\n\tfor i := 0; i < 3; i++ {\n\t\tid += string(byteList[rand.Intn(len(byteList))])\n\t}\n\n\tif n, _ := DBanimeList.Find(bson.M{\"id\": id}).Count(); n == 0 {\n\t\ta.ID = id\n\t} else {\n\t\ta.GenID()\n\t}\n}\n\n\/\/Gets href for Anime.Name\nfunc (a *anime) GetHref() (success int) {\n\tsuccess = 0\n\tscrapper, target :=\n\t\t\"http:\/\/scraper-422.rhcloud.com\/?href=\",\n\t\t\"http:\/\/horriblesubs.info\/current-season\/\"\n\tdoc, err := goquery.NewDocument(scrapper + target)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t} else {\n\t\tdoc.Find(\".ind-show.linkful\").Each(func(i int, s *goquery.Selection) {\n\t\t\tname, _ := s.Find(\"a\").Attr(\"title\")\n\t\t\turl, _ := s.Find(\"a\").Attr(\"href\")\n\t\t\tif strings.ToLower(name) == strings.ToLower(a.Name) {\n\t\t\t\tnewHref := fmt.Sprintf(\"http:\/\/horriblesubs.info%s\", url)\n\t\t\t\tupdateQuery := bson.M{\n\t\t\t\t\t\"$set\": bson.M{\n\t\t\t\t\t\t\"href\": newHref,\n\t\t\t\t\t\t\"lastUpdate\": time.Now(),\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tDBanimeList.Update(bson.M{\"name\": a.Name}, updateQuery)\n\t\t\t\tsuccess = 1\n\t\t\t}\n\t\t})\n\t}\n\treturn\n}\n\n\/\/Checks if there is already an entry in db\n\/\/with the same id OR the same name\n\/\/returns true if it already exists\nfunc (a anime) Exists() bool {\n\tquery := bson.M{\n\t\t\"$or\": []interface{}{\n\t\t\tbson.M{\"id\": a.ID},\n\t\t\tbson.M{\"name\": a.Name},\n\t\t},\n\t}\n\tif n, _ := DBanimeList.Find(query).Count(); n == 0 {\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/Checks if episode # already exists in db\n\/\/Returns true if episode in db is outdated and\n\/\/needs to be updated and false if db is already\n\/\/up to date\nfunc (a anime) NewEpisode() bool {\n\tquery := bson.M{\n\t\t\"name\": a.Name,\n\t\t\"ep\": bson.M{\n\t\t\t\"$lt\": a.Episode,\n\t\t},\n\t}\n\n\tif n, _ := DBanimeList.Find(query).Count(); n == 0 {\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/returns length of AnimeList\n\/\/used for sort interface\nfunc (a animeList) Len() int {\n\treturn len(a)\n}\n\n\/\/Checks if index i should sort before index j\n\/\/used for sort interface\nfunc (a animeList) Less(i, j int) bool {\n\tif len(a[i].Subs) > len(a[j].Subs) {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/Swaps the values of i and j indexes\n\/\/used for sort interface\nfunc (a animeList) Swap(i, j int) {\n\ta[i], a[j] = a[j], a[i]\n}\n<|endoftext|>"} {"text":"<commit_before>package connectors\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/projectjane\/jane\/models\"\n)\n\n\/\/ Webhook Struct for manipulating the webhook connector\ntype Webhook struct {\n\tCommandMsgs chan<- models.Message\n\tConnector models.Connector\n}\n\nvar webhook Webhook\n\nfunc webhookHandler(w http.ResponseWriter, r *http.Request) {\n\tvar segs []string\n\n\t\/\/ get everything past \/webhook\/\n\twebhookString := r.URL.Path[9:]\n\n\tsegs = strings.Split(webhookString, \"+\")\n\n\tif len(segs) < 2 || segs[1] == \"\" {\n\t\tsegs = strings.Split(webhookString, \"\/\")\n\n\t\tif len(segs) < 1 {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tlog.Println(\"Route not found\")\n\t\t\tfmt.Fprintf(w, \"Route not found\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tif segs[0] == \"\" {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tlog.Println(\"Empty webhook data\")\n\t\tfmt.Fprintf(w, \"Empty webhook data\")\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tdefer r.Body.Close()\n\n\tcommand := strings.Join(segs[0:], \" \")\n\n\tvar m models.Message\n\tm.In.Source = webhook.Connector.ID\n\tm.In.Text = command\n\tm.In.Process = true\n\tm.Out.Detail = string(body)\n\twebhook.CommandMsgs <- m\n\n\tif webhook.Connector.Debug {\n\t\tlog.Printf(\"Command: %s\", command)\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"Request received.\"))\n}\n\n\/\/ Listen Webhook listener\nfunc (x Webhook) Listen(commandMsgs chan<- models.Message, connector models.Connector) {\n\tdefer Recovery(connector)\n\n\tx.CommandMsgs = commandMsgs\n\tx.Connector = connector\n\twebhook = x\n\n\tif connector.Debug {\n\t\tlog.Println(\"Starting Webhook connector...\")\n\t}\n\n\tport, _ := strconv.Atoi(connector.Port)\n\n\tserver := &http.Server{\n\t\tAddr: fmt.Sprintf(\":%d\", port),\n\t\tHandler: nil,\n\t}\n\n\tlog.Println(server.Addr)\n\n\thttp.HandleFunc(\"\/webhook\/\", webhookHandler)\n\n\terr := server.ListenAndServe()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ Command Webhook command parser\nfunc (x Webhook) Command(message models.Message, publishMsgs chan<- models.Message, connector models.Connector) {\n\tif connector.Debug {\n\t\tlog.Println(\"Processing command...\")\n\t\tlog.Println(message.In.Text)\n\t}\n\n\tif message.In.Process {\n\t\tfor _, c := range connector.Commands {\n\t\t\tif strings.HasPrefix(strings.ToLower(message.In.Text), strings.ToLower(c.Match)) {\n\t\t\t\tmsg := strings.TrimSpace(strings.Replace(message.In.Text, c.Match, \"\", 1))\n\n\t\t\t\tif connector.Debug {\n\t\t\t\t\tlog.Printf(\"Publishing... %s\", msg)\n\t\t\t\t}\n\n\t\t\t\tmessage.Out.Text = msg\n\t\t\t\tpublishMsgs <- message\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Publish Webhook does not publish\nfunc (x Webhook) Publish(connector models.Connector, message models.Message, target string) {\n\treturn\n}\n\n\/\/ Help Webhook help information\nfunc (x Webhook) Help(connector models.Connector) (help string) {\n\thelp += fmt.Sprintf(\"Webhooks enabled at %s:%s\/webhook\/\\n\", connector.Server, connector.Port)\n\treturn help\n}\n<commit_msg>Cleaning up log<commit_after>package connectors\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/projectjane\/jane\/models\"\n)\n\n\/\/ Webhook Struct for manipulating the webhook connector\ntype Webhook struct {\n\tCommandMsgs chan<- models.Message\n\tConnector models.Connector\n}\n\nvar webhook Webhook\n\nfunc webhookHandler(w http.ResponseWriter, r *http.Request) {\n\tvar segs []string\n\n\t\/\/ get everything past \/webhook\/\n\twebhookString := r.URL.Path[9:]\n\n\tsegs = strings.Split(webhookString, \"+\")\n\n\tif len(segs) < 2 || segs[1] == \"\" {\n\t\tsegs = strings.Split(webhookString, \"\/\")\n\n\t\tif len(segs) < 1 {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tlog.Println(\"Route not found\")\n\t\t\tfmt.Fprintf(w, \"Route not found\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tif segs[0] == \"\" {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tlog.Println(\"Empty webhook data\")\n\t\tfmt.Fprintf(w, \"Empty webhook data\")\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tdefer r.Body.Close()\n\n\tcommand := strings.Join(segs[0:], \" \")\n\n\tvar m models.Message\n\tm.In.Source = webhook.Connector.ID\n\tm.In.Text = command\n\tm.In.Process = true\n\tm.Out.Detail = string(body)\n\twebhook.CommandMsgs <- m\n\n\tif webhook.Connector.Debug {\n\t\tlog.Printf(\"Command: %s\", command)\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"Request received.\"))\n}\n\n\/\/ Listen Webhook listener\nfunc (x Webhook) Listen(commandMsgs chan<- models.Message, connector models.Connector) {\n\tdefer Recovery(connector)\n\n\tx.CommandMsgs = commandMsgs\n\tx.Connector = connector\n\twebhook = x\n\n\tif connector.Debug {\n\t\tlog.Println(\"Starting Webhook connector...\")\n\t}\n\n\tport, _ := strconv.Atoi(connector.Port)\n\n\tserver := &http.Server{\n\t\tAddr: fmt.Sprintf(\":%d\", port),\n\t\tHandler: nil,\n\t}\n\n\tif connector.Debug {\n\t\tlog.Println(server.Addr)\n\t}\n\n\thttp.HandleFunc(\"\/webhook\/\", webhookHandler)\n\n\terr := server.ListenAndServe()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ Command Webhook command parser\nfunc (x Webhook) Command(message models.Message, publishMsgs chan<- models.Message, connector models.Connector) {\n\tif connector.Debug {\n\t\tlog.Println(\"Processing command...\")\n\t\tlog.Println(message.In.Text)\n\t}\n\n\tif message.In.Process {\n\t\tfor _, c := range connector.Commands {\n\t\t\tif strings.HasPrefix(strings.ToLower(message.In.Text), strings.ToLower(c.Match)) {\n\t\t\t\tmsg := strings.TrimSpace(strings.Replace(message.In.Text, c.Match, \"\", 1))\n\n\t\t\t\tif connector.Debug {\n\t\t\t\t\tlog.Printf(\"Publishing... %s\", msg)\n\t\t\t\t}\n\n\t\t\t\tmessage.Out.Text = msg\n\t\t\t\tpublishMsgs <- message\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Publish Webhook does not publish\nfunc (x Webhook) Publish(connector models.Connector, message models.Message, target string) {\n\treturn\n}\n\n\/\/ Help Webhook help information\nfunc (x Webhook) Help(connector models.Connector) (help string) {\n\thelp += fmt.Sprintf(\"Webhooks enabled at %s:%s\/webhook\/\\n\", connector.Server, connector.Port)\n\treturn help\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"compute-api\/compute\"\n\t\"fmt\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"testing\"\n)\n\nfunc TestAccNetworkDomainCreate(t *testing.T) {\n\tresource.Test(t, resource.TestCase{\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testCheckDDComputeNetworkDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccDDCloudNetworkDomainBasic,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestCheckDDCloudNetworkDomainExists(\"ddcloud_networkdomain.acc_test_domain\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testCheckDDCloudNetworkDomainExists(name string) resource.TestCheckFunc {\n\treturn func(state *terraform.State) error {\n\t\tres, ok := state.RootModule().Resources[name]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", name)\n\t\t}\n\n\t\tnetworkDomainID := res.Primary.ID\n\n\t\tclient := testAccProvider.Meta().(*compute.Client)\n\t\tnetworkDomain, err := client.GetNetworkDomain(networkDomainID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Bad: Get network domain: %s\", err)\n\t\t}\n\t\tif networkDomain == nil {\n\t\t\treturn fmt.Errorf(\"Bad: Network domain not found with Id '%s'.\", networkDomainID)\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nfunc testCheckDDComputeNetworkDomainDestroy(state *terraform.State) error {\n\tfor _, res := range state.RootModule().Resources {\n\t\tif res.Type != \"ddcloud_networkdomain\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tnetworkDomainID := res.Primary.ID\n\n\t\tclient := testAccProvider.Meta().(*compute.Client)\n\t\tnetworkDomain, err := client.GetNetworkDomain(networkDomainID)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tif networkDomain != nil {\n\t\t\treturn fmt.Errorf(\"Network domain '%s' still exists.\", networkDomainID)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nconst testAccDDCloudNetworkDomainBasic = `\nresource \"ddcloud_networkdomain\" \"acc_test_domain\" {\n\tname\t\t= \"acc-test-domain\"\n\tdescription\t= \"Network domain for Terraform acceptance test.\"\n\tdatacenter\t= \"AU9\"\n}\n`\n<commit_msg>Improve acceptance test for ddcloud_networkdomain resource type.<commit_after>package main\n\nimport (\n\t\"compute-api\/compute\"\n\t\"fmt\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"testing\"\n)\n\nconst testAccDDCloudNetworkDomainBasic = `\nresource \"ddcloud_networkdomain\" \"acc_test_domain\" {\n\tname\t\t= \"acc-test-domain\"\n\tdescription\t= \"Network domain for Terraform acceptance test.\"\n\tdatacenter\t= \"AU9\"\n}\n`\n\n\/\/ Acceptance test for ddcloud_networkdomain (basic):\n\/\/\n\/\/ Create a network domain and verify that it gets created with the correct configuration.\nfunc TestAccNetworkDomainBasicCreate(t *testing.T) {\n\tresource.Test(t, resource.TestCase{\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testCheckDDComputeNetworkDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccDDCloudNetworkDomainBasic,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestCheckDDCloudNetworkDomainExists(\"ddcloud_networkdomain.acc_test_domain\", true),\n\t\t\t\t\ttestCheckDDCloudNetworkDomainMatches(\"ddcloud_networkdomain.acc_test_domain\", compute.NetworkDomain{\n\t\t\t\t\t\tName: \"acc-test-domain\",\n\t\t\t\t\t\tDescription: \"Network domain for Terraform acceptance test.\",\n\t\t\t\t\t\tDatacenterID: \"AU9\",\n\t\t\t\t\t}),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\n\/\/ Acceptance test check for ddcloud_networkdomain:\n\/\/\n\/\/ Check if the network domain exists.\nfunc testCheckDDCloudNetworkDomainExists(name string, exists bool) resource.TestCheckFunc {\n\treturn func(state *terraform.State) error {\n\t\tres, ok := state.RootModule().Resources[name]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", name)\n\t\t}\n\n\t\tnetworkDomainID := res.Primary.ID\n\n\t\tclient := testAccProvider.Meta().(*compute.Client)\n\t\tnetworkDomain, err := client.GetNetworkDomain(networkDomainID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Bad: Get network domain: %s\", err)\n\t\t}\n\t\tif exists && networkDomain == nil {\n\t\t\treturn fmt.Errorf(\"Bad: Network domain not found with Id '%s'.\", networkDomainID)\n\t\t} else if !exists && networkDomain != nil {\n\t\t\treturn fmt.Errorf(\"Bad: Network domain still exists with Id '%s'.\", networkDomainID)\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\n\/\/ Acceptance test check for ddcloud_networkdomain:\n\/\/\n\/\/ Check if the network domain's configuration matches the expected configuration.\nfunc testCheckDDCloudNetworkDomainMatches(name string, expected compute.NetworkDomain) resource.TestCheckFunc {\n\treturn func(state *terraform.State) error {\n\t\tres, ok := state.RootModule().Resources[name]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", name)\n\t\t}\n\n\t\tnetworkDomainID := res.Primary.ID\n\n\t\tclient := testAccProvider.Meta().(*compute.Client)\n\t\tnetworkDomain, err := client.GetNetworkDomain(networkDomainID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Bad: Get network domain: %s\", err)\n\t\t}\n\t\tif networkDomain == nil {\n\t\t\treturn fmt.Errorf(\"Bad: Network domain not found with Id '%s'.\", networkDomainID)\n\t\t}\n\n\t\tif networkDomain.Name != expected.Name {\n\t\t\treturn fmt.Errorf(\"Bad: Network domain '%s' has name '%s' (expected '%s').\", networkDomainID, networkDomain.Name, expected.Name)\n\t\t}\n\n\t\tif networkDomain.Description != expected.Description {\n\t\t\treturn fmt.Errorf(\"Bad: Network domain '%s' has name '%s' (expected '%s').\", networkDomainID, networkDomain.Description, expected.Description)\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\n\/\/ Acceptance test check for ddcloud_networkdomain:\n\/\/\n\/\/ Check all network domains specified in the configuration have been destroyed.\nfunc testCheckDDComputeNetworkDomainDestroy(state *terraform.State) error {\n\tfor _, res := range state.RootModule().Resources {\n\t\tif res.Type != \"ddcloud_networkdomain\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tnetworkDomainID := res.Primary.ID\n\n\t\tclient := testAccProvider.Meta().(*compute.Client)\n\t\tnetworkDomain, err := client.GetNetworkDomain(networkDomainID)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tif networkDomain != nil {\n\t\t\treturn fmt.Errorf(\"Network domain '%s' still exists.\", networkDomainID)\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package connectors\n\nimport (\n\t\"crypto\/tls\"\n\t\"github.com\/projectjane\/jane\/models\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Website struct {\n}\n\nfunc (x Website) Listen(commandMsgs chan<- models.Message, connector models.Connector) {\n\tdefer Recovery(connector)\n\tvar state = make(map[string]string)\n\tfor _, chk := range connector.Checks {\n\t\tstate[chk.Name] = \"OK\"\n\t}\n\tfor {\n\t\talerts := callWebsite(&state, connector)\n\t\treportWebsite(alerts, &state, commandMsgs, connector)\n\t\ttime.Sleep(60 * time.Second)\n\t}\n}\n\nfunc (x Website) Command(message models.Message, publishMsgs chan<- models.Message, connector models.Connector) {\n\treturn\n}\n\nfunc (x Website) Publish(connector models.Connector, message models.Message, target string) {\n\treturn\n}\n\nfunc (x Website) Help(connector models.Connector) (help string) {\n\treturn\n}\n\nfunc callWebsite(state *map[string]string, connector models.Connector) (alerts []string) {\n\tfor _, chk := range connector.Checks {\n\t\tout := \"OK\"\n\t\tif connector.Debug {\n\t\t\tlog.Print(\"Starting website call to \" + chk.Check)\n\t\t}\n\t\ttran := &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t}\n\t\tclient := &http.Client{Transport: tran}\n\t\tres, err := client.Get(chk.Check)\n\t\tdefer res.Body.Close()\n\t\tif err != nil {\n\t\t\tif connector.Debug {\n\t\t\t\tlog.Print(\"Error call to \" + chk.Check + \" with \" + err.Error())\n\t\t\t}\n\t\t\tout = \"CRITICAL \" + err.Error()\n\t\t} else {\n\t\t\tif connector.Debug {\n\t\t\t\tlog.Print(\"Completed website call to \" + chk.Check + \" with \" + res.Status)\n\t\t\t}\n\t\t\tif res.StatusCode == 200 {\n\t\t\t\tout = \"OK \" + chk.Check + \" \" + res.Status\n\t\t\t} else {\n\t\t\t\tout = \"CRITICAL \" + chk.Check + \" \" + res.Status\n\t\t\t}\n\t\t}\n\t\tif (*state)[chk.Name] != out && (*state)[chk.Name] != \"OK\" {\n\t\t\tif connector.Debug {\n\t\t\t\tlog.Print(\"Reporting alert for \" + chk.Name)\n\t\t\t}\n\t\t\talerts = append(alerts, chk.Name)\n\t\t}\n\t\t(*state)[chk.Name] = out\n\t}\n\treturn alerts\n}\n\nfunc reportWebsite(alerts []string, state *map[string]string, commandMsgs chan<- models.Message, connector models.Connector) {\n\tif connector.Debug {\n\t\tlog.Print(\"Starting reporting on website results for \" + connector.Server)\n\t}\n\tfor _, a := range alerts {\n\t\tout := (*state)[a]\n\t\tvar color = \"NONE\"\n\t\tif strings.Contains(out, \"OK\") {\n\t\t\tcolor = \"SUCCESS\"\n\t\t} else if strings.Contains(out, \"CRITICAL\") {\n\t\t\tcolor = \"FAIL\"\n\t\t} else {\n\t\t\tcolor = \"NONE\"\n\t\t}\n\t\tvar m models.Message\n\t\tm.Routes = connector.Routes\n\t\tm.In.Process = false\n\t\tm.Out.Text = connector.ID + \" \" + a\n\t\tm.Out.Detail = out\n\t\tm.Out.Status = color\n\t\tcommandMsgs <- m\n\t}\n}\n<commit_msg>Fixed website check panic fixes #96<commit_after>package connectors\n\nimport (\n\t\"crypto\/tls\"\n\t\"github.com\/projectjane\/jane\/models\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Website struct {\n}\n\nfunc (x Website) Listen(commandMsgs chan<- models.Message, connector models.Connector) {\n\tdefer Recovery(connector)\n\tvar state = make(map[string]string)\n\tfor _, chk := range connector.Checks {\n\t\tstate[chk.Name] = \"OK \" + chk.Check\n\t}\n\tfor {\n\t\talerts := callWebsite(&state, connector)\n\t\treportWebsite(alerts, &state, commandMsgs, connector)\n\t\ttime.Sleep(60 * time.Second)\n\t}\n}\n\nfunc (x Website) Command(message models.Message, publishMsgs chan<- models.Message, connector models.Connector) {\n\treturn\n}\n\nfunc (x Website) Publish(connector models.Connector, message models.Message, target string) {\n\treturn\n}\n\nfunc (x Website) Help(connector models.Connector) (help string) {\n\treturn\n}\n\nfunc callWebsite(state *map[string]string, connector models.Connector) (alerts []string) {\n\tfor _, chk := range connector.Checks {\n\t\tout := \"OK\"\n\t\tif connector.Debug {\n\t\t\tlog.Print(\"Starting website call to \" + chk.Check)\n\t\t}\n\t\ttran := &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t}\n\t\tclient := &http.Client{Transport: tran}\n\t\tres, err := client.Get(chk.Check)\n\t\tif err != nil {\n\t\t\tif connector.Debug {\n\t\t\t\tlog.Print(\"Error call to \" + chk.Check + \" with \" + err.Error())\n\t\t\t}\n\t\t\tout = \"CRITICAL \" + err.Error()\n\t\t} else {\n\t\t\tdefer res.Body.Close()\n\t\t\tif connector.Debug {\n\t\t\t\tlog.Print(\"Completed website call to \" + chk.Check + \" with \" + res.Status)\n\t\t\t}\n\t\t\tif res.StatusCode == 200 {\n\t\t\t\tout = \"OK \" + chk.Check\n\t\t\t} else {\n\t\t\t\tout = \"CRITICAL \" + chk.Check + \" \" + res.Status\n\t\t\t}\n\t\t}\n\t\tif (*state)[chk.Name] != out {\n\t\t\tif connector.Debug {\n\t\t\t\tlog.Print(\"Reporting alert for \" + chk.Name)\n\t\t\t}\n\t\t\talerts = append(alerts, chk.Name)\n\t\t}\n\t\t(*state)[chk.Name] = out\n\t}\n\treturn alerts\n}\n\nfunc reportWebsite(alerts []string, state *map[string]string, commandMsgs chan<- models.Message, connector models.Connector) {\n\tif connector.Debug {\n\t\tlog.Print(\"Starting reporting on website results for \" + connector.Server)\n\t}\n\tfor _, a := range alerts {\n\t\tout := (*state)[a]\n\t\tvar color = \"NONE\"\n\t\tif strings.Contains(out, \"OK\") {\n\t\t\tcolor = \"SUCCESS\"\n\t\t} else if strings.Contains(out, \"CRITICAL\") {\n\t\t\tcolor = \"FAIL\"\n\t\t} else {\n\t\t\tcolor = \"NONE\"\n\t\t}\n\t\tvar m models.Message\n\t\tm.Routes = connector.Routes\n\t\tm.In.Process = false\n\t\tm.Out.Text = connector.ID + \" \" + a\n\t\tm.Out.Detail = out\n\t\tm.Out.Status = color\n\t\tcommandMsgs <- m\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package matchers_test\n\nimport (\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/matchers\"\n)\n\nvar _ = Describe(\"NotMatcher\", func() {\n\tContext(\"basic examples\", func() {\n\t\tIt(\"works\", func() {\n\t\t\tExpect(input).To(Not(false1))\n\t\t\tExpect(input).To(Not(Not(true2)))\n\t\t\tExpect(input).ToNot(Not(true3))\n\t\t\tExpect(input).ToNot(Not(Not(false1)))\n\t\t\tExpect(input).To(Not(Not(Not(false2))))\n\t\t})\n\t})\n\n\tContext(\"De Morgan's laws\", func() {\n\t\tIt(\"~(A && B) == ~A || ~B\", func() {\n\t\t\tExpect(input).To(Not(And(false1, false2)))\n\t\t\tExpect(input).To(Or(Not(false1), Not(false2)))\n\t\t})\n\t\tIt(\"~(A || B) == ~A && ~B\", func() {\n\t\t\tExpect(input).To(Not(Or(false1, false2)))\n\t\t\tExpect(input).To(And(Not(false1), Not(false2)))\n\t\t})\n\t})\n\n\tContext(\"failure messages are opposite of original matchers' failure messages\", func() {\n\t\tContext(\"when match fails\", func() {\n\t\t\tIt(\"gives a descriptive message\", func() {\n\t\t\t\tverifyFailureMessage(Not(HaveLen(2)), input, \"not to have length 2\")\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when match succeeds, but expected it to fail\", func() {\n\t\t\tIt(\"gives a descriptive message\", func() {\n\t\t\t\tverifyFailureMessage(Not(Not(HaveLen(3))), input, \"to have length 3\")\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"MatchMayChangeInTheFuture()\", func() {\n\t\tIt(\"Propagates value from wrapped matcher\", func() {\n\t\t\tm := Not(Or()) \/\/ an empty Or() always returns false, and indicates it cannot change\n\t\t\tExpect(m.Match(\"anything\")).To(BeTrue())\n\t\t\tExpect(m.(*NotMatcher).MatchMayChangeInTheFuture(\"anything\")).To(BeFalse())\n\t\t})\n\t\tIt(\"Defaults to true\", func() {\n\t\t\tm := Not(Equal(1)) \/\/ Equal does not have this method\n\t\t\tExpect(m.Match(2)).To(BeTrue())\n\t\t\tExpect(m.(*NotMatcher).MatchMayChangeInTheFuture(2)).To(BeTrue()) \/\/ defaults to true\n\t\t})\n\t})\n})\n<commit_msg>add test for not matcher with errors<commit_after>package matchers_test\n\nimport (\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/matchers\"\n)\n\nvar _ = Describe(\"NotMatcher\", func() {\n\tContext(\"basic examples\", func() {\n\t\tIt(\"works\", func() {\n\t\t\tExpect(input).To(Not(false1))\n\t\t\tExpect(input).To(Not(Not(true2)))\n\t\t\tExpect(input).ToNot(Not(true3))\n\t\t\tExpect(input).ToNot(Not(Not(false1)))\n\t\t\tExpect(input).To(Not(Not(Not(false2))))\n\t\t})\n\n\t\tIt(\"fails on error\", func() {\n\t\t\tfailuresMessages := InterceptGomegaFailures(func() {\n\t\t\t\tExpect(input).To(Not(Panic()))\n\t\t\t})\n\t\t\tExpect(failuresMessages).To(Equal([]string{\"PanicMatcher expects a function. Got:\\n <string>: hi\"}))\n\t\t})\n\t})\n\n\tContext(\"De Morgan's laws\", func() {\n\t\tIt(\"~(A && B) == ~A || ~B\", func() {\n\t\t\tExpect(input).To(Not(And(false1, false2)))\n\t\t\tExpect(input).To(Or(Not(false1), Not(false2)))\n\t\t})\n\t\tIt(\"~(A || B) == ~A && ~B\", func() {\n\t\t\tExpect(input).To(Not(Or(false1, false2)))\n\t\t\tExpect(input).To(And(Not(false1), Not(false2)))\n\t\t})\n\t})\n\n\tContext(\"failure messages are opposite of original matchers' failure messages\", func() {\n\t\tContext(\"when match fails\", func() {\n\t\t\tIt(\"gives a descriptive message\", func() {\n\t\t\t\tverifyFailureMessage(Not(HaveLen(2)), input, \"not to have length 2\")\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when match succeeds, but expected it to fail\", func() {\n\t\t\tIt(\"gives a descriptive message\", func() {\n\t\t\t\tverifyFailureMessage(Not(Not(HaveLen(3))), input, \"to have length 3\")\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"MatchMayChangeInTheFuture()\", func() {\n\t\tIt(\"Propagates value from wrapped matcher\", func() {\n\t\t\tm := Not(Or()) \/\/ an empty Or() always returns false, and indicates it cannot change\n\t\t\tExpect(m.Match(\"anything\")).To(BeTrue())\n\t\t\tExpect(m.(*NotMatcher).MatchMayChangeInTheFuture(\"anything\")).To(BeFalse())\n\t\t})\n\t\tIt(\"Defaults to true\", func() {\n\t\t\tm := Not(Equal(1)) \/\/ Equal does not have this method\n\t\t\tExpect(m.Match(2)).To(BeTrue())\n\t\t\tExpect(m.(*NotMatcher).MatchMayChangeInTheFuture(2)).To(BeTrue()) \/\/ defaults to true\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package code39\n\nimport (\n\t\"errors\"\n\t\"github.com\/boombuler\/barcode\"\n\t\"strings\"\n)\n\ntype encodeInfo struct {\n\tvalue int\n\tdata []bool\n}\n\nvar encodeTable map[rune]encodeInfo = map[rune]encodeInfo{\n\t'0': encodeInfo{0, []bool{true, false, true, false, false, true, true, false, true, true, false, true}},\n\t'1': encodeInfo{1, []bool{true, true, false, true, false, false, true, false, true, false, true, true}},\n\t'2': encodeInfo{2, []bool{true, false, true, true, false, false, true, false, true, false, true, true}},\n\t'3': encodeInfo{3, []bool{true, true, false, true, true, false, false, true, false, true, false, true}},\n\t'4': encodeInfo{4, []bool{true, false, true, false, false, true, true, false, true, false, true, true}},\n\t'5': encodeInfo{5, []bool{true, true, false, true, false, false, true, true, false, true, false, true}},\n\t'6': encodeInfo{6, []bool{true, false, true, true, false, false, true, true, false, true, false, true}},\n\t'7': encodeInfo{7, []bool{true, false, true, false, false, true, false, true, true, false, true, true}},\n\t'8': encodeInfo{8, []bool{true, true, false, true, false, false, true, false, true, true, false, true}},\n\t'9': encodeInfo{9, []bool{true, false, true, true, false, false, true, false, true, true, false, true}},\n\t'A': encodeInfo{10, []bool{true, true, false, true, false, true, false, false, true, false, true, true}},\n\t'B': encodeInfo{11, []bool{true, false, true, true, false, true, false, false, true, false, true, true}},\n\t'C': encodeInfo{12, []bool{true, true, false, true, true, false, true, false, false, true, false, true}},\n\t'D': encodeInfo{13, []bool{true, false, true, false, true, true, false, false, true, false, true, true}},\n\t'E': encodeInfo{14, []bool{true, true, false, true, false, true, true, false, false, true, false, true}},\n\t'F': encodeInfo{15, []bool{true, false, true, true, false, true, true, false, false, true, false, true}},\n\t'G': encodeInfo{16, []bool{true, false, true, false, true, false, false, true, true, false, true, true}},\n\t'H': encodeInfo{17, []bool{true, true, false, true, false, true, false, false, true, true, false, true}},\n\t'I': encodeInfo{18, []bool{true, false, true, true, false, true, false, false, true, true, false, true}},\n\t'J': encodeInfo{19, []bool{true, false, true, false, true, true, false, false, true, true, false, true}},\n\t'K': encodeInfo{20, []bool{true, true, false, true, false, true, false, true, false, false, true, true}},\n\t'L': encodeInfo{21, []bool{true, false, true, true, false, true, false, true, false, false, true, true}},\n\t'M': encodeInfo{22, []bool{true, true, false, true, true, false, true, false, true, false, false, true}},\n\t'N': encodeInfo{23, []bool{true, false, true, false, true, true, false, true, false, false, true, true}},\n\t'O': encodeInfo{24, []bool{true, true, false, true, false, true, true, false, true, false, false, true}},\n\t'P': encodeInfo{25, []bool{true, false, true, true, false, true, true, false, true, false, false, true}},\n\t'Q': encodeInfo{26, []bool{true, false, true, false, true, false, true, true, false, false, true, true}},\n\t'R': encodeInfo{27, []bool{true, true, false, true, false, true, false, true, true, false, false, true}},\n\t'S': encodeInfo{28, []bool{true, false, true, true, false, true, false, true, true, false, false, true}},\n\t'T': encodeInfo{29, []bool{true, false, true, false, true, true, false, true, true, false, false, true}},\n\t'U': encodeInfo{30, []bool{true, true, false, false, true, false, true, false, true, false, true, true}},\n\t'V': encodeInfo{31, []bool{true, false, false, true, true, false, true, false, true, false, true, true}},\n\t'W': encodeInfo{32, []bool{true, true, false, false, true, true, false, true, false, true, false, true}},\n\t'X': encodeInfo{33, []bool{true, false, false, true, false, true, true, false, true, false, true, true}},\n\t'Y': encodeInfo{34, []bool{true, true, false, false, true, false, true, true, false, true, false, true}},\n\t'Z': encodeInfo{35, []bool{true, false, false, true, true, false, true, true, false, true, false, true}},\n\t'-': encodeInfo{36, []bool{true, false, false, true, false, true, false, true, true, false, true, true}},\n\t'.': encodeInfo{37, []bool{true, true, false, false, true, false, true, false, true, true, false, true}},\n\t' ': encodeInfo{38, []bool{true, false, false, true, true, false, true, false, true, true, false, true}},\n\t'$': encodeInfo{39, []bool{true, false, false, true, false, false, true, false, false, true, false, true}},\n\t'\/': encodeInfo{40, []bool{true, false, false, true, false, false, true, false, true, false, false, true}},\n\t'+': encodeInfo{41, []bool{true, false, false, true, false, true, false, false, true, false, false, true}},\n\t'%': encodeInfo{42, []bool{true, false, true, false, false, true, false, false, true, false, false, true}},\n\t'*': encodeInfo{-1, []bool{true, false, false, true, false, true, true, false, true, true, false, true}},\n}\n\nfunc getChecksum(content string) string {\n\tsum := 0\n\tfor _, r := range content {\n\t\tinfo, ok := encodeTable[r]\n\t\tif !ok || info.value < 0 {\n\t\t\treturn \"#\"\n\t\t}\n\n\t\tsum += info.value\n\t}\n\n\tsum = sum % 43\n\tfor r, v := range encodeTable {\n\t\tif v.value == sum {\n\t\t\treturn string(r)\n\t\t}\n\t}\n\treturn \"#\"\n}\n\n\/\/ encodes the given string as a code39 barcode\n\/\/ if includeChecksum is set to true, a checksum character is calculated and added to the content\nfunc Encode(content string, includeChecksum bool) (barcode.Barcode, error) {\n\tif strings.ContainsRune(content, '*') {\n\t\treturn nil, errors.New(\"invalid data\")\n\t}\n\n\tdata := \"*\" + content\n\tif includeChecksum {\n\t\tdata += getChecksum(content)\n\t}\n\tdata += \"*\"\n\n\tcd := newCode()\n\n\tfor i, r := range data {\n\t\tif i != 0 {\n\t\t\tcd.AddBit(false)\n\t\t}\n\n\t\tinfo, ok := encodeTable[r]\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"invalid data\")\n\t\t}\n\t\tfor _, bit := range info.data {\n\t\t\tcd.AddBit(bit)\n\t\t}\n\t}\n\n\treturn cd, nil\n}\n<commit_msg>fixed code39 content field<commit_after>package code39\n\nimport (\n\t\"errors\"\n\t\"github.com\/boombuler\/barcode\"\n\t\"strings\"\n)\n\ntype encodeInfo struct {\n\tvalue int\n\tdata []bool\n}\n\nvar encodeTable map[rune]encodeInfo = map[rune]encodeInfo{\n\t'0': encodeInfo{0, []bool{true, false, true, false, false, true, true, false, true, true, false, true}},\n\t'1': encodeInfo{1, []bool{true, true, false, true, false, false, true, false, true, false, true, true}},\n\t'2': encodeInfo{2, []bool{true, false, true, true, false, false, true, false, true, false, true, true}},\n\t'3': encodeInfo{3, []bool{true, true, false, true, true, false, false, true, false, true, false, true}},\n\t'4': encodeInfo{4, []bool{true, false, true, false, false, true, true, false, true, false, true, true}},\n\t'5': encodeInfo{5, []bool{true, true, false, true, false, false, true, true, false, true, false, true}},\n\t'6': encodeInfo{6, []bool{true, false, true, true, false, false, true, true, false, true, false, true}},\n\t'7': encodeInfo{7, []bool{true, false, true, false, false, true, false, true, true, false, true, true}},\n\t'8': encodeInfo{8, []bool{true, true, false, true, false, false, true, false, true, true, false, true}},\n\t'9': encodeInfo{9, []bool{true, false, true, true, false, false, true, false, true, true, false, true}},\n\t'A': encodeInfo{10, []bool{true, true, false, true, false, true, false, false, true, false, true, true}},\n\t'B': encodeInfo{11, []bool{true, false, true, true, false, true, false, false, true, false, true, true}},\n\t'C': encodeInfo{12, []bool{true, true, false, true, true, false, true, false, false, true, false, true}},\n\t'D': encodeInfo{13, []bool{true, false, true, false, true, true, false, false, true, false, true, true}},\n\t'E': encodeInfo{14, []bool{true, true, false, true, false, true, true, false, false, true, false, true}},\n\t'F': encodeInfo{15, []bool{true, false, true, true, false, true, true, false, false, true, false, true}},\n\t'G': encodeInfo{16, []bool{true, false, true, false, true, false, false, true, true, false, true, true}},\n\t'H': encodeInfo{17, []bool{true, true, false, true, false, true, false, false, true, true, false, true}},\n\t'I': encodeInfo{18, []bool{true, false, true, true, false, true, false, false, true, true, false, true}},\n\t'J': encodeInfo{19, []bool{true, false, true, false, true, true, false, false, true, true, false, true}},\n\t'K': encodeInfo{20, []bool{true, true, false, true, false, true, false, true, false, false, true, true}},\n\t'L': encodeInfo{21, []bool{true, false, true, true, false, true, false, true, false, false, true, true}},\n\t'M': encodeInfo{22, []bool{true, true, false, true, true, false, true, false, true, false, false, true}},\n\t'N': encodeInfo{23, []bool{true, false, true, false, true, true, false, true, false, false, true, true}},\n\t'O': encodeInfo{24, []bool{true, true, false, true, false, true, true, false, true, false, false, true}},\n\t'P': encodeInfo{25, []bool{true, false, true, true, false, true, true, false, true, false, false, true}},\n\t'Q': encodeInfo{26, []bool{true, false, true, false, true, false, true, true, false, false, true, true}},\n\t'R': encodeInfo{27, []bool{true, true, false, true, false, true, false, true, true, false, false, true}},\n\t'S': encodeInfo{28, []bool{true, false, true, true, false, true, false, true, true, false, false, true}},\n\t'T': encodeInfo{29, []bool{true, false, true, false, true, true, false, true, true, false, false, true}},\n\t'U': encodeInfo{30, []bool{true, true, false, false, true, false, true, false, true, false, true, true}},\n\t'V': encodeInfo{31, []bool{true, false, false, true, true, false, true, false, true, false, true, true}},\n\t'W': encodeInfo{32, []bool{true, true, false, false, true, true, false, true, false, true, false, true}},\n\t'X': encodeInfo{33, []bool{true, false, false, true, false, true, true, false, true, false, true, true}},\n\t'Y': encodeInfo{34, []bool{true, true, false, false, true, false, true, true, false, true, false, true}},\n\t'Z': encodeInfo{35, []bool{true, false, false, true, true, false, true, true, false, true, false, true}},\n\t'-': encodeInfo{36, []bool{true, false, false, true, false, true, false, true, true, false, true, true}},\n\t'.': encodeInfo{37, []bool{true, true, false, false, true, false, true, false, true, true, false, true}},\n\t' ': encodeInfo{38, []bool{true, false, false, true, true, false, true, false, true, true, false, true}},\n\t'$': encodeInfo{39, []bool{true, false, false, true, false, false, true, false, false, true, false, true}},\n\t'\/': encodeInfo{40, []bool{true, false, false, true, false, false, true, false, true, false, false, true}},\n\t'+': encodeInfo{41, []bool{true, false, false, true, false, true, false, false, true, false, false, true}},\n\t'%': encodeInfo{42, []bool{true, false, true, false, false, true, false, false, true, false, false, true}},\n\t'*': encodeInfo{-1, []bool{true, false, false, true, false, true, true, false, true, true, false, true}},\n}\n\nfunc getChecksum(content string) string {\n\tsum := 0\n\tfor _, r := range content {\n\t\tinfo, ok := encodeTable[r]\n\t\tif !ok || info.value < 0 {\n\t\t\treturn \"#\"\n\t\t}\n\n\t\tsum += info.value\n\t}\n\n\tsum = sum % 43\n\tfor r, v := range encodeTable {\n\t\tif v.value == sum {\n\t\t\treturn string(r)\n\t\t}\n\t}\n\treturn \"#\"\n}\n\n\/\/ encodes the given string as a code39 barcode\n\/\/ if includeChecksum is set to true, a checksum character is calculated and added to the content\nfunc Encode(content string, includeChecksum bool) (barcode.Barcode, error) {\n\tif strings.ContainsRune(content, '*') {\n\t\treturn nil, errors.New(\"invalid data\")\n\t}\n\n\tdata := \"*\" + content\n\tif includeChecksum {\n\t\tdata += getChecksum(content)\n\t}\n\tdata += \"*\"\n\n\tcd := newCode()\n\tcd.content = content\n\n\tfor i, r := range data {\n\t\tif i != 0 {\n\t\t\tcd.AddBit(false)\n\t\t}\n\n\t\tinfo, ok := encodeTable[r]\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"invalid data\")\n\t\t}\n\t\tfor _, bit := range info.data {\n\t\t\tcd.AddBit(bit)\n\t\t}\n\t}\n\n\treturn cd, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package restapi\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\terrors \"github.com\/go-openapi\/errors\"\n\truntime \"github.com\/go-openapi\/runtime\"\n\tmiddleware \"github.com\/go-openapi\/runtime\/middleware\"\n\t\"github.com\/go-openapi\/swag\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/influxdata\/mrfusion\"\n\t\"github.com\/influxdata\/mrfusion\/bolt\"\n\t\"github.com\/influxdata\/mrfusion\/canned\"\n\t\"github.com\/influxdata\/mrfusion\/dist\"\n\t\"github.com\/influxdata\/mrfusion\/handlers\"\n\t\"github.com\/influxdata\/mrfusion\/influx\"\n\t\"github.com\/influxdata\/mrfusion\/kapacitor\"\n\t\"github.com\/influxdata\/mrfusion\/layouts\"\n\tfusionlog \"github.com\/influxdata\/mrfusion\/log\"\n\t\"github.com\/influxdata\/mrfusion\/mock\"\n\top \"github.com\/influxdata\/mrfusion\/restapi\/operations\"\n\t\"github.com\/influxdata\/mrfusion\/uuid\"\n)\n\n\/\/ This file is safe to edit. Once it exists it will not be overwritten\n\n\/\/go:generate swagger generate server --target .. --name --spec ..\/swagger.yaml --with-context\n\nvar logger = fusionlog.New()\n\nvar devFlags = struct {\n\tDevelop bool `short:\"d\" long:\"develop\" description:\"Run server in develop mode.\"`\n}{}\n\nvar storeFlags = struct {\n\tBoltPath string `short:\"b\" long:\"bolt-path\" description:\"Full path to boltDB file (\/Users\/somebody\/mrfusion.db)\" env:\"BOLT_PATH\" default:\"chronograf.db\"`\n}{}\n\nvar cannedFlags = struct {\n\tCannedPath string `short:\"c\" long:\"canned-path\" description:\"Path to directory of pre-canned application layouts\" env:\"CANNED_PATH\" default:\"canned\"`\n}{}\n\nfunc configureFlags(api *op.MrFusionAPI) {\n\tapi.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{\n\t\tswag.CommandLineOptionsGroup{\n\t\t\tShortDescription: \"Develop Mode server\",\n\t\t\tLongDescription: \"Server will use the ui\/build directory directly.\",\n\t\t\tOptions: &devFlags,\n\t\t},\n\t\tswag.CommandLineOptionsGroup{\n\t\t\tShortDescription: \"Default Store Backend\",\n\t\t\tLongDescription: \"Specify the path to a BoltDB file\",\n\t\t\tOptions: &storeFlags,\n\t\t},\n\t\tswag.CommandLineOptionsGroup{\n\t\t\tShortDescription: \"Directory of pre-canned layouts\",\n\t\t\tLongDescription: \"Specify the path to a directory of pre-canned application layout files.\",\n\t\t\tOptions: &cannedFlags,\n\t\t},\n\t}\n}\n\nfunc assets() mrfusion.Assets {\n\tif devFlags.Develop {\n\t\treturn &dist.DebugAssets{\n\t\t\tDir: \"ui\/build\",\n\t\t\tDefault: \"ui\/build\/index.html\",\n\t\t}\n\t}\n\treturn &dist.BindataAssets{\n\t\tPrefix: \"ui\/build\",\n\t\tDefault: \"ui\/build\/index.html\",\n\t}\n}\n\nfunc configureAPI(api *op.MrFusionAPI) http.Handler {\n\t\/\/ configure the api here\n\tapi.ServeError = errors.ServeError\n\n\t\/\/ Set your custom logger if needed. Default one is log.Printf\n\t\/\/ Expected interface func(string, ...interface{})\n\t\/\/\n\t\/\/ Example:\n\tapi.Logger = func(msg string, args ...interface{}) {\n\t\tlogger.\n\t\t\tWithField(\"component\", \"api\").\n\t\t\tInfo(fmt.Sprintf(msg, args))\n\t}\n\n\tapi.JSONConsumer = runtime.JSONConsumer()\n\n\tapi.JSONProducer = runtime.JSONProducer()\n\n\tmockHandler := mock.NewHandler()\n\n\tapi.GetHandler = op.GetHandlerFunc(mockHandler.AllRoutes)\n\n\tif len(storeFlags.BoltPath) > 0 {\n\t\tc := bolt.NewClient()\n\t\tc.Path = storeFlags.BoltPath\n\t\tif err := c.Open(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tapps := canned.NewApps(cannedFlags.CannedPath, &uuid.V4{})\n\n\t\t\/\/ allLayouts acts as a front-end to both the bolt layouts and the filesystem layouts.\n\t\tallLayouts := &layouts.MultiLayoutStore{\n\t\t\tStores: []mrfusion.LayoutStore{\n\t\t\t\tc.LayoutStore,\n\t\t\t\tapps,\n\t\t\t},\n\t\t}\n\t\th := handlers.Store{\n\t\t\tExplorationStore: c.ExplorationStore,\n\t\t\tSourcesStore: c.SourcesStore,\n\t\t\tServersStore: c.ServersStore,\n\t\t\tLayoutStore: allLayouts,\n\t\t}\n\n\t\tapi.DeleteSourcesIDUsersUserIDExplorationsExplorationIDHandler = op.DeleteSourcesIDUsersUserIDExplorationsExplorationIDHandlerFunc(h.DeleteExploration)\n\t\tapi.GetSourcesIDUsersUserIDExplorationsExplorationIDHandler = op.GetSourcesIDUsersUserIDExplorationsExplorationIDHandlerFunc(h.Exploration)\n\t\tapi.GetSourcesIDUsersUserIDExplorationsHandler = op.GetSourcesIDUsersUserIDExplorationsHandlerFunc(h.Explorations)\n\t\tapi.PatchSourcesIDUsersUserIDExplorationsExplorationIDHandler = op.PatchSourcesIDUsersUserIDExplorationsExplorationIDHandlerFunc(h.UpdateExploration)\n\t\tapi.PostSourcesIDUsersUserIDExplorationsHandler = op.PostSourcesIDUsersUserIDExplorationsHandlerFunc(h.NewExploration)\n\n\t\tapi.DeleteSourcesIDHandler = op.DeleteSourcesIDHandlerFunc(h.RemoveSource)\n\t\tapi.PatchSourcesIDHandler = op.PatchSourcesIDHandlerFunc(h.UpdateSource)\n\n\t\tapi.GetSourcesHandler = op.GetSourcesHandlerFunc(h.Sources)\n\t\tapi.GetSourcesIDHandler = op.GetSourcesIDHandlerFunc(h.SourcesID)\n\t\tapi.PostSourcesHandler = op.PostSourcesHandlerFunc(h.NewSource)\n\n\t\tapi.GetSourcesIDKapacitorsHandler = op.GetSourcesIDKapacitorsHandlerFunc(h.Kapacitors)\n\t\tapi.PostSourcesIDKapacitorsHandler = op.PostSourcesIDKapacitorsHandlerFunc(h.NewKapacitor)\n\n\t\tapi.GetSourcesIDKapacitorsKapaIDHandler = op.GetSourcesIDKapacitorsKapaIDHandlerFunc(h.KapacitorsID)\n\t\tapi.DeleteSourcesIDKapacitorsKapaIDHandler = op.DeleteSourcesIDKapacitorsKapaIDHandlerFunc(h.RemoveKapacitor)\n\t\tapi.PatchSourcesIDKapacitorsKapaIDHandler = op.PatchSourcesIDKapacitorsKapaIDHandlerFunc(h.UpdateKapacitor)\n\n\t\tp := handlers.InfluxProxy{\n\t\t\tSrcs: c.SourcesStore,\n\t\t\tTimeSeries: &influx.Client{},\n\t\t\tKapacitorProxy: &kapacitor.Proxy{},\n\t\t\tServersStore: c.ServersStore,\n\t\t}\n\t\tapi.PostSourcesIDProxyHandler = op.PostSourcesIDProxyHandlerFunc(p.Proxy)\n\n\t\tapi.PostSourcesIDKapacitorsKapaIDProxyHandler = op.PostSourcesIDKapacitorsKapaIDProxyHandlerFunc(p.KapacitorProxyPost)\n\t\tapi.PatchSourcesIDKapacitorsKapaIDProxyHandler = op.PatchSourcesIDKapacitorsKapaIDProxyHandlerFunc(p.KapacitorProxyPatch)\n\t\tapi.GetSourcesIDKapacitorsKapaIDProxyHandler = op.GetSourcesIDKapacitorsKapaIDProxyHandlerFunc(p.KapacitorProxyGet)\n\t\tapi.DeleteSourcesIDKapacitorsKapaIDProxyHandler = op.DeleteSourcesIDKapacitorsKapaIDProxyHandlerFunc(p.KapacitorProxyDelete)\n\n\t\tapi.DeleteLayoutsIDHandler = op.DeleteLayoutsIDHandlerFunc(h.RemoveLayout)\n\t\tapi.GetLayoutsHandler = op.GetLayoutsHandlerFunc(h.Layouts)\n\t\tapi.GetLayoutsIDHandler = op.GetLayoutsIDHandlerFunc(h.LayoutsID)\n\t\tapi.PostLayoutsHandler = op.PostLayoutsHandlerFunc(h.NewLayout)\n\t\tapi.PutLayoutsIDHandler = op.PutLayoutsIDHandlerFunc(h.UpdateLayout)\n\t} else {\n\t\tapi.DeleteSourcesIDUsersUserIDExplorationsExplorationIDHandler = op.DeleteSourcesIDUsersUserIDExplorationsExplorationIDHandlerFunc(mockHandler.DeleteExploration)\n\t\tapi.GetSourcesIDUsersUserIDExplorationsExplorationIDHandler = op.GetSourcesIDUsersUserIDExplorationsExplorationIDHandlerFunc(mockHandler.Exploration)\n\t\tapi.GetSourcesIDUsersUserIDExplorationsHandler = op.GetSourcesIDUsersUserIDExplorationsHandlerFunc(mockHandler.Explorations)\n\t\tapi.PatchSourcesIDUsersUserIDExplorationsExplorationIDHandler = op.PatchSourcesIDUsersUserIDExplorationsExplorationIDHandlerFunc(mockHandler.UpdateExploration)\n\t\tapi.PostSourcesIDUsersUserIDExplorationsHandler = op.PostSourcesIDUsersUserIDExplorationsHandlerFunc(mockHandler.NewExploration)\n\n\t\tapi.DeleteSourcesIDHandler = op.DeleteSourcesIDHandlerFunc(mockHandler.RemoveSource)\n\t\tapi.PatchSourcesIDHandler = op.PatchSourcesIDHandlerFunc(mockHandler.UpdateSource)\n\n\t\tapi.GetSourcesHandler = op.GetSourcesHandlerFunc(mockHandler.Sources)\n\t\tapi.GetSourcesIDHandler = op.GetSourcesIDHandlerFunc(mockHandler.SourcesID)\n\t\tapi.PostSourcesHandler = op.PostSourcesHandlerFunc(mockHandler.NewSource)\n\t\tapi.PostSourcesIDProxyHandler = op.PostSourcesIDProxyHandlerFunc(mockHandler.Proxy)\n\t}\n\n\tapi.DeleteSourcesIDRolesRoleIDHandler = op.DeleteSourcesIDRolesRoleIDHandlerFunc(func(ctx context.Context, params op.DeleteSourcesIDRolesRoleIDParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .DeleteSourcesIDRolesRoleID has not yet been implemented\")\n\t})\n\n\tapi.DeleteSourcesIDUsersUserIDHandler = op.DeleteSourcesIDUsersUserIDHandlerFunc(func(ctx context.Context, params op.DeleteSourcesIDUsersUserIDParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .DeleteSourcesIDUsersUserID has not yet been implemented\")\n\t})\n\n\tapi.GetSourcesIDPermissionsHandler = op.GetSourcesIDPermissionsHandlerFunc(func(ctx context.Context, params op.GetSourcesIDPermissionsParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .GetSourcesIDPermissions has not yet been implemented\")\n\t})\n\tapi.GetSourcesIDRolesHandler = op.GetSourcesIDRolesHandlerFunc(func(ctx context.Context, params op.GetSourcesIDRolesParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .GetSourcesIDRoles has not yet been implemented\")\n\t})\n\tapi.GetSourcesIDRolesRoleIDHandler = op.GetSourcesIDRolesRoleIDHandlerFunc(func(ctx context.Context, params op.GetSourcesIDRolesRoleIDParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .GetSourcesIDRolesRoleID has not yet been implemented\")\n\t})\n\n\tapi.GetSourcesIDUsersHandler = op.GetSourcesIDUsersHandlerFunc(func(ctx context.Context, params op.GetSourcesIDUsersParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .GetSourcesIDUsers has not yet been implemented\")\n\t})\n\tapi.GetSourcesIDUsersUserIDHandler = op.GetSourcesIDUsersUserIDHandlerFunc(func(ctx context.Context, params op.GetSourcesIDUsersUserIDParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .GetSourcesIDUsersUserID has not yet been implemented\")\n\t})\n\n\tapi.PatchSourcesIDRolesRoleIDHandler = op.PatchSourcesIDRolesRoleIDHandlerFunc(func(ctx context.Context, params op.PatchSourcesIDRolesRoleIDParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .PatchSourcesIDRolesRoleID has not yet been implemented\")\n\t})\n\n\tapi.PatchSourcesIDUsersUserIDHandler = op.PatchSourcesIDUsersUserIDHandlerFunc(func(ctx context.Context, params op.PatchSourcesIDUsersUserIDParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .PatchSourcesIDUsersUserID has not yet been implemented\")\n\t})\n\tapi.PostSourcesIDRolesHandler = op.PostSourcesIDRolesHandlerFunc(func(ctx context.Context, params op.PostSourcesIDRolesParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .PostSourcesIDRoles has not yet been implemented\")\n\t})\n\tapi.PostSourcesIDUsersHandler = op.PostSourcesIDUsersHandlerFunc(func(ctx context.Context, params op.PostSourcesIDUsersParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .PostSourcesIDUsers has not yet been implemented\")\n\t})\n\n\tapi.GetMappingsHandler = op.GetMappingsHandlerFunc(mockHandler.GetMappings)\n\tapi.GetSourcesIDMonitoredHandler = op.GetSourcesIDMonitoredHandlerFunc(mockHandler.MonitoredServices)\n\n\tapi.ServerShutdown = func() {}\n\n\thandler := setupGlobalMiddleware(api.Serve(setupMiddlewares))\n\treturn handler\n}\n\n\/\/ The TLS configuration before HTTPS server starts.\nfunc configureTLS(tlsConfig *tls.Config) {\n\t\/\/ Make all necessary changes to the TLS configuration here.\n}\n\n\/\/ The middleware configuration is for the handler executors. These do not apply to the swagger.json document.\n\/\/ The middleware executes after routing but before authentication, binding and validation\nfunc setupMiddlewares(handler http.Handler) http.Handler {\n\treturn handler\n}\n\n\/\/ The middleware configuration happens before anything, this middleware also applies to serving the swagger.json document.\n\/\/ So this is a good place to plug in a panic handling middleware, logging and metrics\nfunc setupGlobalMiddleware(handler http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlogger.\n\t\t\tWithField(\"component\", \"server\").\n\t\t\tWithField(\"remote_addr\", r.RemoteAddr).\n\t\t\tWithField(\"method\", r.Method).\n\t\t\tWithField(\"url\", r.URL).\n\t\t\tInfo(\"Serving request\")\n\n\t\tif strings.Contains(r.URL.Path, \"\/chronograf\/v1\") {\n\t\t\thandler.ServeHTTP(w, r)\n\t\t\treturn\n\t\t} else if r.URL.Path == \"\/\/\" {\n\t\t\thttp.Redirect(w, r, \"\/index.html\", http.StatusFound)\n\t\t} else {\n\t\t\tassets().Handler().ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\t})\n}\n<commit_msg>Finer-grained logging<commit_after>package restapi\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\terrors \"github.com\/go-openapi\/errors\"\n\truntime \"github.com\/go-openapi\/runtime\"\n\tmiddleware \"github.com\/go-openapi\/runtime\/middleware\"\n\t\"github.com\/go-openapi\/swag\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/influxdata\/mrfusion\"\n\t\"github.com\/influxdata\/mrfusion\/bolt\"\n\t\"github.com\/influxdata\/mrfusion\/canned\"\n\t\"github.com\/influxdata\/mrfusion\/dist\"\n\t\"github.com\/influxdata\/mrfusion\/handlers\"\n\t\"github.com\/influxdata\/mrfusion\/influx\"\n\t\"github.com\/influxdata\/mrfusion\/kapacitor\"\n\t\"github.com\/influxdata\/mrfusion\/layouts\"\n\tfusionlog \"github.com\/influxdata\/mrfusion\/log\"\n\t\"github.com\/influxdata\/mrfusion\/mock\"\n\top \"github.com\/influxdata\/mrfusion\/restapi\/operations\"\n\t\"github.com\/influxdata\/mrfusion\/uuid\"\n)\n\n\/\/ This file is safe to edit. Once it exists it will not be overwritten\n\n\/\/go:generate swagger generate server --target .. --name --spec ..\/swagger.yaml --with-context\n\nvar logger = fusionlog.New()\n\nvar devFlags = struct {\n\tDevelop bool `short:\"d\" long:\"develop\" description:\"Run server in develop mode.\"`\n}{}\n\nvar storeFlags = struct {\n\tBoltPath string `short:\"b\" long:\"bolt-path\" description:\"Full path to boltDB file (\/Users\/somebody\/mrfusion.db)\" env:\"BOLT_PATH\" default:\"chronograf.db\"`\n}{}\n\nvar cannedFlags = struct {\n\tCannedPath string `short:\"c\" long:\"canned-path\" description:\"Path to directory of pre-canned application layouts\" env:\"CANNED_PATH\" default:\"canned\"`\n}{}\n\nfunc configureFlags(api *op.MrFusionAPI) {\n\tapi.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{\n\t\tswag.CommandLineOptionsGroup{\n\t\t\tShortDescription: \"Develop Mode server\",\n\t\t\tLongDescription: \"Server will use the ui\/build directory directly.\",\n\t\t\tOptions: &devFlags,\n\t\t},\n\t\tswag.CommandLineOptionsGroup{\n\t\t\tShortDescription: \"Default Store Backend\",\n\t\t\tLongDescription: \"Specify the path to a BoltDB file\",\n\t\t\tOptions: &storeFlags,\n\t\t},\n\t\tswag.CommandLineOptionsGroup{\n\t\t\tShortDescription: \"Directory of pre-canned layouts\",\n\t\t\tLongDescription: \"Specify the path to a directory of pre-canned application layout files.\",\n\t\t\tOptions: &cannedFlags,\n\t\t},\n\t}\n}\n\nfunc assets() mrfusion.Assets {\n\tif devFlags.Develop {\n\t\treturn &dist.DebugAssets{\n\t\t\tDir: \"ui\/build\",\n\t\t\tDefault: \"ui\/build\/index.html\",\n\t\t}\n\t}\n\treturn &dist.BindataAssets{\n\t\tPrefix: \"ui\/build\",\n\t\tDefault: \"ui\/build\/index.html\",\n\t}\n}\n\nfunc configureAPI(api *op.MrFusionAPI) http.Handler {\n\t\/\/ configure the api here\n\tapi.ServeError = errors.ServeError\n\n\t\/\/ Set your custom logger if needed. Default one is log.Printf\n\t\/\/ Expected interface func(string, ...interface{})\n\t\/\/\n\t\/\/ Example:\n\tapi.Logger = func(msg string, args ...interface{}) {\n\t\tlogger.\n\t\t\tWithField(\"component\", \"api\").\n\t\t\tInfo(fmt.Sprintf(msg, args))\n\t}\n\n\tapi.JSONConsumer = runtime.JSONConsumer()\n\n\tapi.JSONProducer = runtime.JSONProducer()\n\n\tmockHandler := mock.NewHandler()\n\n\tapi.GetHandler = op.GetHandlerFunc(mockHandler.AllRoutes)\n\n\tif len(storeFlags.BoltPath) > 0 {\n\t\tc := bolt.NewClient()\n\t\tc.Path = storeFlags.BoltPath\n\t\tif err := c.Open(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tapps := canned.NewApps(cannedFlags.CannedPath, &uuid.V4{})\n\n\t\t\/\/ allLayouts acts as a front-end to both the bolt layouts and the filesystem layouts.\n\t\tallLayouts := &layouts.MultiLayoutStore{\n\t\t\tStores: []mrfusion.LayoutStore{\n\t\t\t\tc.LayoutStore,\n\t\t\t\tapps,\n\t\t\t},\n\t\t}\n\t\th := handlers.Store{\n\t\t\tExplorationStore: c.ExplorationStore,\n\t\t\tSourcesStore: c.SourcesStore,\n\t\t\tServersStore: c.ServersStore,\n\t\t\tLayoutStore: allLayouts,\n\t\t}\n\n\t\tapi.DeleteSourcesIDUsersUserIDExplorationsExplorationIDHandler = op.DeleteSourcesIDUsersUserIDExplorationsExplorationIDHandlerFunc(h.DeleteExploration)\n\t\tapi.GetSourcesIDUsersUserIDExplorationsExplorationIDHandler = op.GetSourcesIDUsersUserIDExplorationsExplorationIDHandlerFunc(h.Exploration)\n\t\tapi.GetSourcesIDUsersUserIDExplorationsHandler = op.GetSourcesIDUsersUserIDExplorationsHandlerFunc(h.Explorations)\n\t\tapi.PatchSourcesIDUsersUserIDExplorationsExplorationIDHandler = op.PatchSourcesIDUsersUserIDExplorationsExplorationIDHandlerFunc(h.UpdateExploration)\n\t\tapi.PostSourcesIDUsersUserIDExplorationsHandler = op.PostSourcesIDUsersUserIDExplorationsHandlerFunc(h.NewExploration)\n\n\t\tapi.DeleteSourcesIDHandler = op.DeleteSourcesIDHandlerFunc(h.RemoveSource)\n\t\tapi.PatchSourcesIDHandler = op.PatchSourcesIDHandlerFunc(h.UpdateSource)\n\n\t\tapi.GetSourcesHandler = op.GetSourcesHandlerFunc(h.Sources)\n\t\tapi.GetSourcesIDHandler = op.GetSourcesIDHandlerFunc(h.SourcesID)\n\t\tapi.PostSourcesHandler = op.PostSourcesHandlerFunc(h.NewSource)\n\n\t\tapi.GetSourcesIDKapacitorsHandler = op.GetSourcesIDKapacitorsHandlerFunc(h.Kapacitors)\n\t\tapi.PostSourcesIDKapacitorsHandler = op.PostSourcesIDKapacitorsHandlerFunc(h.NewKapacitor)\n\n\t\tapi.GetSourcesIDKapacitorsKapaIDHandler = op.GetSourcesIDKapacitorsKapaIDHandlerFunc(h.KapacitorsID)\n\t\tapi.DeleteSourcesIDKapacitorsKapaIDHandler = op.DeleteSourcesIDKapacitorsKapaIDHandlerFunc(h.RemoveKapacitor)\n\t\tapi.PatchSourcesIDKapacitorsKapaIDHandler = op.PatchSourcesIDKapacitorsKapaIDHandlerFunc(h.UpdateKapacitor)\n\n\t\tp := handlers.InfluxProxy{\n\t\t\tSrcs: c.SourcesStore,\n\t\t\tTimeSeries: &influx.Client{},\n\t\t\tKapacitorProxy: &kapacitor.Proxy{},\n\t\t\tServersStore: c.ServersStore,\n\t\t}\n\t\tapi.PostSourcesIDProxyHandler = op.PostSourcesIDProxyHandlerFunc(p.Proxy)\n\n\t\tapi.PostSourcesIDKapacitorsKapaIDProxyHandler = op.PostSourcesIDKapacitorsKapaIDProxyHandlerFunc(p.KapacitorProxyPost)\n\t\tapi.PatchSourcesIDKapacitorsKapaIDProxyHandler = op.PatchSourcesIDKapacitorsKapaIDProxyHandlerFunc(p.KapacitorProxyPatch)\n\t\tapi.GetSourcesIDKapacitorsKapaIDProxyHandler = op.GetSourcesIDKapacitorsKapaIDProxyHandlerFunc(p.KapacitorProxyGet)\n\t\tapi.DeleteSourcesIDKapacitorsKapaIDProxyHandler = op.DeleteSourcesIDKapacitorsKapaIDProxyHandlerFunc(p.KapacitorProxyDelete)\n\n\t\tapi.DeleteLayoutsIDHandler = op.DeleteLayoutsIDHandlerFunc(h.RemoveLayout)\n\t\tapi.GetLayoutsHandler = op.GetLayoutsHandlerFunc(h.Layouts)\n\t\tapi.GetLayoutsIDHandler = op.GetLayoutsIDHandlerFunc(h.LayoutsID)\n\t\tapi.PostLayoutsHandler = op.PostLayoutsHandlerFunc(h.NewLayout)\n\t\tapi.PutLayoutsIDHandler = op.PutLayoutsIDHandlerFunc(h.UpdateLayout)\n\t} else {\n\t\tapi.DeleteSourcesIDUsersUserIDExplorationsExplorationIDHandler = op.DeleteSourcesIDUsersUserIDExplorationsExplorationIDHandlerFunc(mockHandler.DeleteExploration)\n\t\tapi.GetSourcesIDUsersUserIDExplorationsExplorationIDHandler = op.GetSourcesIDUsersUserIDExplorationsExplorationIDHandlerFunc(mockHandler.Exploration)\n\t\tapi.GetSourcesIDUsersUserIDExplorationsHandler = op.GetSourcesIDUsersUserIDExplorationsHandlerFunc(mockHandler.Explorations)\n\t\tapi.PatchSourcesIDUsersUserIDExplorationsExplorationIDHandler = op.PatchSourcesIDUsersUserIDExplorationsExplorationIDHandlerFunc(mockHandler.UpdateExploration)\n\t\tapi.PostSourcesIDUsersUserIDExplorationsHandler = op.PostSourcesIDUsersUserIDExplorationsHandlerFunc(mockHandler.NewExploration)\n\n\t\tapi.DeleteSourcesIDHandler = op.DeleteSourcesIDHandlerFunc(mockHandler.RemoveSource)\n\t\tapi.PatchSourcesIDHandler = op.PatchSourcesIDHandlerFunc(mockHandler.UpdateSource)\n\n\t\tapi.GetSourcesHandler = op.GetSourcesHandlerFunc(mockHandler.Sources)\n\t\tapi.GetSourcesIDHandler = op.GetSourcesIDHandlerFunc(mockHandler.SourcesID)\n\t\tapi.PostSourcesHandler = op.PostSourcesHandlerFunc(mockHandler.NewSource)\n\t\tapi.PostSourcesIDProxyHandler = op.PostSourcesIDProxyHandlerFunc(mockHandler.Proxy)\n\t}\n\n\tapi.DeleteSourcesIDRolesRoleIDHandler = op.DeleteSourcesIDRolesRoleIDHandlerFunc(func(ctx context.Context, params op.DeleteSourcesIDRolesRoleIDParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .DeleteSourcesIDRolesRoleID has not yet been implemented\")\n\t})\n\n\tapi.DeleteSourcesIDUsersUserIDHandler = op.DeleteSourcesIDUsersUserIDHandlerFunc(func(ctx context.Context, params op.DeleteSourcesIDUsersUserIDParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .DeleteSourcesIDUsersUserID has not yet been implemented\")\n\t})\n\n\tapi.GetSourcesIDPermissionsHandler = op.GetSourcesIDPermissionsHandlerFunc(func(ctx context.Context, params op.GetSourcesIDPermissionsParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .GetSourcesIDPermissions has not yet been implemented\")\n\t})\n\tapi.GetSourcesIDRolesHandler = op.GetSourcesIDRolesHandlerFunc(func(ctx context.Context, params op.GetSourcesIDRolesParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .GetSourcesIDRoles has not yet been implemented\")\n\t})\n\tapi.GetSourcesIDRolesRoleIDHandler = op.GetSourcesIDRolesRoleIDHandlerFunc(func(ctx context.Context, params op.GetSourcesIDRolesRoleIDParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .GetSourcesIDRolesRoleID has not yet been implemented\")\n\t})\n\n\tapi.GetSourcesIDUsersHandler = op.GetSourcesIDUsersHandlerFunc(func(ctx context.Context, params op.GetSourcesIDUsersParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .GetSourcesIDUsers has not yet been implemented\")\n\t})\n\tapi.GetSourcesIDUsersUserIDHandler = op.GetSourcesIDUsersUserIDHandlerFunc(func(ctx context.Context, params op.GetSourcesIDUsersUserIDParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .GetSourcesIDUsersUserID has not yet been implemented\")\n\t})\n\n\tapi.PatchSourcesIDRolesRoleIDHandler = op.PatchSourcesIDRolesRoleIDHandlerFunc(func(ctx context.Context, params op.PatchSourcesIDRolesRoleIDParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .PatchSourcesIDRolesRoleID has not yet been implemented\")\n\t})\n\n\tapi.PatchSourcesIDUsersUserIDHandler = op.PatchSourcesIDUsersUserIDHandlerFunc(func(ctx context.Context, params op.PatchSourcesIDUsersUserIDParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .PatchSourcesIDUsersUserID has not yet been implemented\")\n\t})\n\tapi.PostSourcesIDRolesHandler = op.PostSourcesIDRolesHandlerFunc(func(ctx context.Context, params op.PostSourcesIDRolesParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .PostSourcesIDRoles has not yet been implemented\")\n\t})\n\tapi.PostSourcesIDUsersHandler = op.PostSourcesIDUsersHandlerFunc(func(ctx context.Context, params op.PostSourcesIDUsersParams) middleware.Responder {\n\t\treturn middleware.NotImplemented(\"operation .PostSourcesIDUsers has not yet been implemented\")\n\t})\n\n\tapi.GetMappingsHandler = op.GetMappingsHandlerFunc(mockHandler.GetMappings)\n\tapi.GetSourcesIDMonitoredHandler = op.GetSourcesIDMonitoredHandlerFunc(mockHandler.MonitoredServices)\n\n\tapi.ServerShutdown = func() {}\n\n\thandler := setupGlobalMiddleware(api.Serve(setupMiddlewares))\n\treturn handler\n}\n\n\/\/ The TLS configuration before HTTPS server starts.\nfunc configureTLS(tlsConfig *tls.Config) {\n\t\/\/ Make all necessary changes to the TLS configuration here.\n}\n\n\/\/ The middleware configuration is for the handler executors. These do not apply to the swagger.json document.\n\/\/ The middleware executes after routing but before authentication, binding and validation\nfunc setupMiddlewares(handler http.Handler) http.Handler {\n\treturn handler\n}\n\n\/\/ The middleware configuration happens before anything, this middleware also applies to serving the swagger.json document.\n\/\/ So this is a good place to plug in a panic handling middleware, logging and metrics\nfunc setupGlobalMiddleware(handler http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tl := logger.\n\t\t\tWithField(\"component\", \"server\").\n\t\t\tWithField(\"remote_addr\", r.RemoteAddr).\n\t\t\tWithField(\"method\", r.Method).\n\t\t\tWithField(\"url\", r.URL)\n\n\t\tif strings.Contains(r.URL.Path, \"\/chronograf\/v1\") {\n\t\t\tl.Info(\"Serving API Request\")\n\t\t\thandler.ServeHTTP(w, r)\n\t\t\treturn\n\t\t} else if r.URL.Path == \"\/\/\" {\n\t\t\tl.Info(\"Serving root redirect\")\n\t\t\thttp.Redirect(w, r, \"\/index.html\", http.StatusFound)\n\t\t} else {\n\t\t\tl.Info(\"Serving assets\")\n\t\t\tassets().Handler().ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ chris 090115\n\n\/\/ TODO Test on Linux and Windows.\n\n\/\/ Package lockfile implements convenient lock file utilities for\n\/\/ Unix-based systems.\n\/\/\n\/\/ Removable Lock Files - The Difficulty\n\/\/\n\/\/ Removable lock files are notoriously difficult to get right. On BSD\n\/\/ systems, doing this in a race-free manner is trivial, via O_EXLOCK in\n\/\/ an open(2) call, but this is generally unavailable on Unix-like\n\/\/ systems due to Linux's lack of support for this option.\n\/\/\n\/\/ Consider several processes' hypothetical sequences of open, lock,\n\/\/ close (thus implicitly removing the kernel advisory lock), and\n\/\/ unlink.\n\/\/\n\/\/\tA B C\n\/\/\topen\n\/\/\t open\n\/\/\tlock\n\/\/\tclose\n\/\/\t lock\n\/\/\tunlink\n\/\/\t open\n\/\/\t lock\n\/\/\n\/\/ Now B thinks it's got a lock, but its lock file has been removed. C\n\/\/ has opened the same lock file name, thus creating a new file, and\n\/\/ locked it. So now B and C both think they've got the lock. Game\n\/\/ over.\n\/\/\n\/\/ You might attempt re-arranging the close and unlink calls, but the\n\/\/ problem remains. In general, if B opens the same file as A, and B\n\/\/ locks after A closes, then B can have a lock on a dangling file\n\/\/ descriptor.\n\/\/\n\/\/ The general problem is that the close and unlink steps are not\n\/\/ atomic.\n\/\/\n\/\/ Removable Lock Files - A Solution\n\/\/\n\/\/ One solution is to guard the two halves of the removable lock file\n\/\/ operations, open and lock, and close and unlink, with another lock\n\/\/ file that is itself not removed. This is the approach that this\n\/\/ package takes with LockRm. Using this approach, removable lock files\n\/\/ may be implemented with in a race-free manner.\npackage lockfile\n\n\/\/ Close calls' errors are not handled explicitly. The error conditions\n\/\/ all end up with the file descriptor being closed anyway, so there is\n\/\/ nothing special to handle.\n\nimport (\n\t\"os\"\n\n\t\"golang.org\/x\/sys\/unix\"\n)\n\nconst mode = 0666\n\n\/\/ Obtain a LockContext by calling Lock or LockNb. It represents a\n\/\/ locked file.\ntype LockContext struct {\n\tf *os.File\n}\n\n\/\/ Obtain a LockRmContext by calling LockRm. It represents a locked\n\/\/ file that can be removed on Unlock.\ntype LockRmContext struct {\n\tglobalname string\n\n\tlocal *LockContext\n}\n\n\/\/ lock is the internal implementation for Lock and LockNb. You merely\n\/\/ specify whether or not you want the flock call to block by passing\n\/\/ the block boolean.\nfunc lock(filename string, block bool) (*LockContext, error) {\n\tf, err := os.OpenFile(filename, os.O_CREATE, mode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thow := unix.LOCK_EX\n\tif !block {\n\t\thow = how | unix.LOCK_NB\n\t}\n\tif err := unix.Flock(int(f.Fd()), how); err != nil {\n\t\tf.Close()\n\t\treturn nil, err\n\t}\n\n\treturn &LockContext{f}, nil\n}\n\n\/\/ Lock locks on the filename given and returns a new LockContext, on\n\/\/ which you can later call Unlock. This implementation blocks, and\n\/\/ does not clean up the lock file on Unlock.\nfunc Lock(filename string) (*LockContext, error) {\n\treturn lock(filename, true)\n}\n\n\/\/ LockNb locks on the filename given and returns a new LockContext, on\n\/\/ which you can later call Unlock. This implementation does not block,\n\/\/ and does not clean up the lock file on Unlock.\nfunc LockNb(filename string) (*LockContext, error) {\n\treturn lock(filename, false)\n}\n\n\/\/ Unlock unlocks the lock file represented by the LockContext.\nfunc (lc *LockContext) Unlock() {\n\t\/\/ Close implicitly releases any kernel advisory locks.\n\tlc.f.Close()\n}\n\n\/\/ globalCtx wraps an inner function with a blocking Lock on a global\n\/\/ lock file. This is race-free since the global lock file is not\n\/\/ removed.\nfunc globalCtx(globalname string, inner func() error) error {\n\tglc, err := Lock(globalname)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer glc.Unlock()\n\treturn inner()\n}\n\n\/\/ LockRm implements a removable lock file, specified by localname.\n\/\/ This implementation does not block, and removes the lock file on\n\/\/ Unlock.\n\/\/\n\/\/ On BSD systems, doing this in a race-free manner is trivial, via\n\/\/ O_EXLOCK in an open(2) call, but this is generally unavailable on\n\/\/ Unix-like systems due to Linux's lack of support for this option.\n\/\/\n\/\/ With the normal facilities provided, removing a lock file on unlock\n\/\/ creates race conditions. However, if the \"local\" lock file\n\/\/ operations are secured by use of a \"global\" lock file, which is\n\/\/ itself not removed, this can be implemented in a race-free manner.\nfunc LockRm(globalname, localname string) (*LockRmContext, error) {\n\tvar lrc *LockRmContext\n\terr := globalCtx(globalname, func() error {\n\t\tllc, err := LockNb(localname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlrc = &LockRmContext{\n\t\t\tglobalname: globalname,\n\t\t\tlocal: llc,\n\t\t}\n\t\treturn nil\n\t})\n\treturn lrc, err\n}\n\n\/\/ Unlock unlocks and removes the lock file represented by the\n\/\/ LockRmContext.\nfunc (lrc *LockRmContext) Unlock() error {\n\treturn globalCtx(lrc.globalname, func() error {\n\t\tlrc.local.Unlock()\n\t\treturn os.Remove(lrc.local.f.Name())\n\t})\n}\n<commit_msg>lockfile: Removes unnecessary mode constant.<commit_after>\/\/ chris 090115\n\n\/\/ TODO Test on Linux and Windows.\n\n\/\/ Package lockfile implements convenient lock file utilities for\n\/\/ Unix-based systems.\n\/\/\n\/\/ Removable Lock Files - The Difficulty\n\/\/\n\/\/ Removable lock files are notoriously difficult to get right. On BSD\n\/\/ systems, doing this in a race-free manner is trivial, via O_EXLOCK in\n\/\/ an open(2) call, but this is generally unavailable on Unix-like\n\/\/ systems due to Linux's lack of support for this option.\n\/\/\n\/\/ Consider several processes' hypothetical sequences of open, lock,\n\/\/ close (thus implicitly removing the kernel advisory lock), and\n\/\/ unlink.\n\/\/\n\/\/\tA B C\n\/\/\topen\n\/\/\t open\n\/\/\tlock\n\/\/\tclose\n\/\/\t lock\n\/\/\tunlink\n\/\/\t open\n\/\/\t lock\n\/\/\n\/\/ Now B thinks it's got a lock, but its lock file has been removed. C\n\/\/ has opened the same lock file name, thus creating a new file, and\n\/\/ locked it. So now B and C both think they've got the lock. Game\n\/\/ over.\n\/\/\n\/\/ You might attempt re-arranging the close and unlink calls, but the\n\/\/ problem remains. In general, if B opens the same file as A, and B\n\/\/ locks after A closes, then B can have a lock on a dangling file\n\/\/ descriptor.\n\/\/\n\/\/ The general problem is that the close and unlink steps are not\n\/\/ atomic.\n\/\/\n\/\/ Removable Lock Files - A Solution\n\/\/\n\/\/ One solution is to guard the two halves of the removable lock file\n\/\/ operations, open and lock, and close and unlink, with another lock\n\/\/ file that is itself not removed. This is the approach that this\n\/\/ package takes with LockRm. Using this approach, removable lock files\n\/\/ may be implemented with in a race-free manner.\npackage lockfile\n\n\/\/ Close calls' errors are not handled explicitly. The error conditions\n\/\/ all end up with the file descriptor being closed anyway, so there is\n\/\/ nothing special to handle.\n\nimport (\n\t\"os\"\n\n\t\"golang.org\/x\/sys\/unix\"\n)\n\n\/\/ Obtain a LockContext by calling Lock or LockNb. It represents a\n\/\/ locked file.\ntype LockContext struct {\n\tf *os.File\n}\n\n\/\/ Obtain a LockRmContext by calling LockRm. It represents a locked\n\/\/ file that can be removed on Unlock.\ntype LockRmContext struct {\n\tglobalname string\n\n\tlocal *LockContext\n}\n\n\/\/ lock is the internal implementation for Lock and LockNb. You merely\n\/\/ specify whether or not you want the flock call to block by passing\n\/\/ the block boolean.\nfunc lock(filename string, block bool) (*LockContext, error) {\n\tf, err := os.OpenFile(filename, os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thow := unix.LOCK_EX\n\tif !block {\n\t\thow = how | unix.LOCK_NB\n\t}\n\tif err := unix.Flock(int(f.Fd()), how); err != nil {\n\t\tf.Close()\n\t\treturn nil, err\n\t}\n\n\treturn &LockContext{f}, nil\n}\n\n\/\/ Lock locks on the filename given and returns a new LockContext, on\n\/\/ which you can later call Unlock. This implementation blocks, and\n\/\/ does not clean up the lock file on Unlock.\nfunc Lock(filename string) (*LockContext, error) {\n\treturn lock(filename, true)\n}\n\n\/\/ LockNb locks on the filename given and returns a new LockContext, on\n\/\/ which you can later call Unlock. This implementation does not block,\n\/\/ and does not clean up the lock file on Unlock.\nfunc LockNb(filename string) (*LockContext, error) {\n\treturn lock(filename, false)\n}\n\n\/\/ Unlock unlocks the lock file represented by the LockContext.\nfunc (lc *LockContext) Unlock() {\n\t\/\/ Close implicitly releases any kernel advisory locks.\n\tlc.f.Close()\n}\n\n\/\/ globalCtx wraps an inner function with a blocking Lock on a global\n\/\/ lock file. This is race-free since the global lock file is not\n\/\/ removed.\nfunc globalCtx(globalname string, inner func() error) error {\n\tglc, err := Lock(globalname)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer glc.Unlock()\n\treturn inner()\n}\n\n\/\/ LockRm implements a removable lock file, specified by localname.\n\/\/ This implementation does not block, and removes the lock file on\n\/\/ Unlock.\n\/\/\n\/\/ On BSD systems, doing this in a race-free manner is trivial, via\n\/\/ O_EXLOCK in an open(2) call, but this is generally unavailable on\n\/\/ Unix-like systems due to Linux's lack of support for this option.\n\/\/\n\/\/ With the normal facilities provided, removing a lock file on unlock\n\/\/ creates race conditions. However, if the \"local\" lock file\n\/\/ operations are secured by use of a \"global\" lock file, which is\n\/\/ itself not removed, this can be implemented in a race-free manner.\nfunc LockRm(globalname, localname string) (*LockRmContext, error) {\n\tvar lrc *LockRmContext\n\terr := globalCtx(globalname, func() error {\n\t\tllc, err := LockNb(localname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlrc = &LockRmContext{\n\t\t\tglobalname: globalname,\n\t\t\tlocal: llc,\n\t\t}\n\t\treturn nil\n\t})\n\treturn lrc, err\n}\n\n\/\/ Unlock unlocks and removes the lock file represented by the\n\/\/ LockRmContext.\nfunc (lrc *LockRmContext) Unlock() error {\n\treturn globalCtx(lrc.globalname, func() error {\n\t\tlrc.local.Unlock()\n\t\treturn os.Remove(lrc.local.f.Name())\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package memstore is an in-memory implementation of the UpdateStore interface.\npackage memstore\n\nimport (\n\t\"partisci\/version\"\n\t\"time\"\n)\n\n\/\/ MemoryStore is an in-memory implementation of the UpdateStore interface.\ntype MemoryStore struct {\n\tversion map[string]version.Version\n\tapp map[string]version.AppSummary\n\thost map[string]version.HostSummary\n\tthreshold time.Time\n}\n\nfunc initMemoryStore(m *MemoryStore) {\n\tm.version = make(map[string]version.Version)\n\tm.app = make(map[string]version.AppSummary)\n\tm.host = make(map[string]version.HostSummary)\n\tm.threshold = time.Now()\n}\n\n\/\/ NewMemoryStore returns a empty MemoryStore.\nfunc NewMemoryStore() (m *MemoryStore) {\n\tm = new(MemoryStore)\n\tinitMemoryStore(m)\n\treturn\n}\n\n\/\/ Apps returns summary information about each application, based on the known Versions.\nfunc (s *MemoryStore) Apps() []version.AppSummary {\n\tvs := make([]version.AppSummary, 0)\n\tfor _, v := range s.app {\n\t\tvs = append(vs, v)\n\t}\n\treturn vs\n}\n\n\/\/ Hosts returns summary information about each host, based on the known Versions.\nfunc (s *MemoryStore) Hosts() []version.HostSummary {\n\tvs := make([]version.HostSummary, 0)\n\tfor _, v := range s.host {\n\t\tvs = append(vs, v)\n\t}\n\treturn vs\n}\n\n\/\/ Versions returns full Version structs where their values match app_id, host\n\/\/ and ver. Zero length strings are considered a match for all Versions.\nfunc (s *MemoryStore) Versions(app_id string,\n\thost string, ver string) []version.Version {\n\tvs := make([]version.Version, 0)\n\tfor _, v := range s.version {\n\t\tif (len(app_id) == 0 || app_id == v.AppId) &&\n\t\t\t(len(host) == 0 || host == v.Host) &&\n\t\t\t(len(ver) == 0 || ver == v.Ver) {\n\t\t\tvs = append(vs, v)\n\t\t}\n\t}\n\treturn vs\n}\n\n\/\/ Update stores a Version and updates app and host summaries.\nfunc (s *MemoryStore) Update(v version.Version) (err error) {\n\tkey := v.Key()\n\t_, vpresent := s.version[key]\n\tif v.ExactUpdate.After(s.threshold) {\n\t\ts.version[key] = v\n\t}\n\n\t\/\/ app map\n\tas, present := s.app[v.AppId]\n\tif present {\n\t\tas.LastUpdate = v.LastUpdate\n\t\tif !vpresent {\n\t\t\tas.HostCount++\n\t\t}\n\t} else {\n\t\tappv := version.AppSummary{\n\t\t\tApp: v.App,\n\t\t\tAppId: v.AppId,\n\t\t\tLastUpdate: v.LastUpdate,\n\t\t\tHostCount: 1,\n\t\t}\n\t\ts.app[v.AppId] = appv\n\t}\n\n\t\/\/ host map\n\thostv := version.HostSummary{\n\t\tHost: v.Host,\n\t\tLastUpdate: v.LastUpdate,\n\t}\n\ts.host[v.Host] = hostv\n\treturn\n}\n\n\/\/ Clear empties the MemoryStore.\nfunc (s *MemoryStore) Clear() {\n\tinitMemoryStore(s)\n}\n<commit_msg>added godoc comments<commit_after>\/\/ Package memstore is an in-memory implementation of the UpdateStore interface.\npackage memstore\n\nimport (\n\t\"partisci\/version\"\n\t\"time\"\n)\n\n\/\/ MemoryStore is an in-memory implementation of the UpdateStore interface.\ntype MemoryStore struct {\n\tversion map[string]version.Version\n\tapp map[string]version.AppSummary\n\thost map[string]version.HostSummary\n\tthreshold time.Time\n}\n\nfunc initMemoryStore(m *MemoryStore) {\n\tm.version = make(map[string]version.Version)\n\tm.app = make(map[string]version.AppSummary)\n\tm.host = make(map[string]version.HostSummary)\n\tm.threshold = time.Now()\n}\n\n\/\/ NewMemoryStore returns a new, initialized MemoryStore.\nfunc NewMemoryStore() (m *MemoryStore) {\n\tm = new(MemoryStore)\n\tinitMemoryStore(m)\n\treturn\n}\n\n\/\/ Apps returns summary information about each application, based on the known Versions.\nfunc (s *MemoryStore) Apps() []version.AppSummary {\n\tvs := make([]version.AppSummary, 0)\n\tfor _, v := range s.app {\n\t\tvs = append(vs, v)\n\t}\n\treturn vs\n}\n\n\/\/ Hosts returns summary information about each host, based on the known Versions.\nfunc (s *MemoryStore) Hosts() []version.HostSummary {\n\tvs := make([]version.HostSummary, 0)\n\tfor _, v := range s.host {\n\t\tvs = append(vs, v)\n\t}\n\treturn vs\n}\n\n\/\/ Versions returns full Version structs where their values match app_id, host\n\/\/ and ver. Zero length strings are considered a match for all Versions.\nfunc (s *MemoryStore) Versions(app_id string,\n\thost string, ver string) []version.Version {\n\tvs := make([]version.Version, 0)\n\tfor _, v := range s.version {\n\t\tif (len(app_id) == 0 || app_id == v.AppId) &&\n\t\t\t(len(host) == 0 || host == v.Host) &&\n\t\t\t(len(ver) == 0 || ver == v.Ver) {\n\t\t\tvs = append(vs, v)\n\t\t}\n\t}\n\treturn vs\n}\n\n\/\/ Update stores a Version and updates app and host summaries.\nfunc (s *MemoryStore) Update(v version.Version) (err error) {\n\tkey := v.Key()\n\t_, vpresent := s.version[key]\n\tif v.ExactUpdate.After(s.threshold) {\n\t\ts.version[key] = v\n\t}\n\n\t\/\/ app map\n\tas, present := s.app[v.AppId]\n\tif present {\n\t\tas.LastUpdate = v.LastUpdate\n\t\tif !vpresent {\n\t\t\tas.HostCount++\n\t\t}\n\t} else {\n\t\tappv := version.AppSummary{\n\t\t\tApp: v.App,\n\t\t\tAppId: v.AppId,\n\t\t\tLastUpdate: v.LastUpdate,\n\t\t\tHostCount: 1,\n\t\t}\n\t\ts.app[v.AppId] = appv\n\t}\n\n\t\/\/ host map\n\thostv := version.HostSummary{\n\t\tHost: v.Host,\n\t\tLastUpdate: v.LastUpdate,\n\t}\n\ts.host[v.Host] = hostv\n\treturn\n}\n\n\/\/ Clear empties the MemoryStore.\nfunc (s *MemoryStore) Clear() {\n\tinitMemoryStore(s)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Copyright (c) 2016 Dennis Chen\n\/\/\n\/\/ This file is part of Clashr.\n\/\/\n\/\/ Clashr is free software: you can redistribute it and\/or modify it under the\n\/\/ terms of the GNU Affero General Public License as published by the Free\n\/\/ Software Foundation, either version 3 of the License, or (at your option)\n\/\/ any later version.\n\/\/\n\/\/ Clashr is distributed in the hope that it will be useful, but WITHOUT ANY\n\/\/ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n\/\/ FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for\n\/\/ more details.\n\/\/\n\/\/ You should have received a copy of the GNU Affero General Public License\n\/\/ along with Clashr. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\n\/\/ Package cgroup provides a large amount of functionality for properly\n\/\/ implementing cgroups in the judging and tracking of programs in Clashr.\n\/\/ Most of this work is inspired by the DOMJudge implementation runguard.c and\n\/\/ attempts to mimic a majoritiy of it's behavior\n\/\/\npackage cgroup\n\n\/\/ #cgo pkg-config: libcgroup\n\/\/ #include <libcgroup.h>\nimport \"C\"\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\n\/\/ A uniform interface for the cgroup actions.\ntype Cgroup struct {\n\tname string\n\tcpus string\n\tmems int64\n}\n\n\/\/ OutputStats returns the memory used (in bytes) by a process controlled by a\n\/\/ cgroup.\nfunc (g *Cgroup) OutputStats() (int64, error) {\n\tvar param *C.char\n\n\tparam = C.CString(g.name)\n\tcg := C.cgroup_new_cgroup(param)\n\tif cg == nil {\n\t\treturn 0, errors.New(\"cgroup_new_cgroup\")\n\t}\n\tdefer C.cgroup_free(&cg)\n\n\tif ret := C.cgroup_get_cgroup(cg); ret != 0 {\n\t\terr := fmt.Errorf(\"Get cgroup information: %s(d)\",\n\t\t\tC.cgroup_strerror(ret), ret)\n\t\treturn 0, err\n\t}\n\tparam = C.CString(\"memory\")\n\tcgController := C.cgroup_get_controller(cg, param)\n\n\tvar maxUsage C.int64_t\n\tparam = C.CString(\"memory.memsw.max_usage_in_bytes\")\n\tif ret := C.cgroup_get_value_int64(cgController, param, &maxUsage); ret != 0 {\n\t\terr := fmt.Errorf(\"Get cgroup value: %s(%d)\",\n\t\t\tC.cgroup_strerror(ret), ret)\n\t\treturn 0, err\n\t}\n\n\treturn int64(maxUsage), nil\n}\n\n\/\/ Create makes a new instance of the cgroup interface. It sets the memory\n\/\/ limit of the cgroup to memsize, and the set of CPUs to use to cpuset. If\n\/\/ memory does not need to be limited, set memsize to syscall.RLIMIT_INFINITY.\n\/\/ Similarly, set cpuset to the empty string if CPUs do not need to be set.\nfunc Create(cgroupname, cpuset string, memsize int64) (Cgroup, error) {\n\tvar param *C.char\n\n\tparam = C.CString(cgroupname)\n\tcg := C.cgroup_new_cgroup(param)\n\tif cg == nil {\n\t\treturn Cgroup{}, errors.New(\"cgroup_new_cgroup\")\n\t}\n\tdefer C.cgroup_free(&cg)\n\n\t\/\/ Set memory restrictions. We limit both ram and ram+swap to the same\n\t\/\/ amount to prevent swapping.\n\tparam = C.CString(\"memory\")\n\tcgController := C.cgroup_add_controller(cg, param)\n\n\tmem := C.int64_t(memsize)\n\tparam = C.CString(\"memory.limit_in_bytes\")\n\tC.cgroup_add_value_int64(cgController, param, mem)\n\tparam = C.CString(\"memory.memsw.limit_in_bytes\")\n\tC.cgroup_add_value_int64(cgController, param, mem)\n\n\tif len(cpuset) > 0 {\n\t\tparam = C.CString(\"cpuset\")\n\t\tcgController = C.cgroup_add_controller(cg, param)\n\n\t\tvar arg *C.char\n\t\tparam = C.CString(\"cpuset.mems\")\n\t\targ = C.CString(\"0\")\n\t\tC.cgroup_add_value_string(cgController, param, arg)\n\t\tparam = C.CString(\"cpuset.cpus\")\n\t\targ = C.CString(cpuset)\n\t\tC.cgroup_add_value_string(cgController, param, arg)\n\t}\n\treturn Cgroup{cgroupname, cpuset, memsize}, nil\n}\n<commit_msg>cgroup: actually create a new cgroup instead of break<commit_after>\/\/\n\/\/ Copyright (c) 2016 Dennis Chen\n\/\/\n\/\/ This file is part of Clashr.\n\/\/\n\/\/ Clashr is free software: you can redistribute it and\/or modify it under the\n\/\/ terms of the GNU Affero General Public License as published by the Free\n\/\/ Software Foundation, either version 3 of the License, or (at your option)\n\/\/ any later version.\n\/\/\n\/\/ Clashr is distributed in the hope that it will be useful, but WITHOUT ANY\n\/\/ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n\/\/ FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for\n\/\/ more details.\n\/\/\n\/\/ You should have received a copy of the GNU Affero General Public License\n\/\/ along with Clashr. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\n\/\/ Package cgroup provides a large amount of functionality for properly\n\/\/ implementing cgroups in the judging and tracking of programs in Clashr.\n\/\/ Most of this work is inspired by the DOMJudge implementation runguard.c and\n\/\/ attempts to mimic a majoritiy of it's behavior\n\/\/\npackage cgroup\n\n\/\/ #cgo pkg-config: libcgroup\n\/\/ #include <libcgroup.h>\nimport \"C\"\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\n\/\/ A uniform interface for the cgroup actions.\ntype Cgroup struct {\n\tname string\n\tcpus string\n\tmems int64\n}\n\n\/\/ OutputStats returns the memory used (in bytes) by a process controlled by a\n\/\/ cgroup.\nfunc (g *Cgroup) OutputStats() (int64, error) {\n\tvar param *C.char\n\n\tparam = C.CString(g.name)\n\tcg := C.cgroup_new_cgroup(param)\n\tif cg == nil {\n\t\treturn 0, errors.New(\"cgroup_new_cgroup\")\n\t}\n\tdefer C.cgroup_free(&cg)\n\n\tif ret := C.cgroup_get_cgroup(cg); ret != 0 {\n\t\terr := fmt.Errorf(\"Get cgroup information: %s(d)\",\n\t\t\tC.cgroup_strerror(ret), ret)\n\t\treturn 0, err\n\t}\n\tparam = C.CString(\"memory\")\n\tcgController := C.cgroup_get_controller(cg, param)\n\n\tvar maxUsage C.int64_t\n\tparam = C.CString(\"memory.memsw.max_usage_in_bytes\")\n\tif ret := C.cgroup_get_value_int64(cgController, param, &maxUsage); ret != 0 {\n\t\terr := fmt.Errorf(\"Get cgroup value: %s(%d)\",\n\t\t\tC.cgroup_strerror(ret), ret)\n\t\treturn 0, err\n\t}\n\n\treturn int64(maxUsage), nil\n}\n\n\/\/ Create makes a new instance of the cgroup interface. It sets the memory\n\/\/ limit of the cgroup to memsize, and the set of CPUs to use to cpuset. If\n\/\/ memory does not need to be limited, set memsize to syscall.RLIMIT_INFINITY.\n\/\/ Similarly, set cpuset to the empty string if CPUs do not need to be set.\nfunc Create(cgroupname, cpuset string, memsize int64) (Cgroup, error) {\n\tvar param *C.char\n\n\tparam = C.CString(cgroupname)\n\tcg := C.cgroup_new_cgroup(param)\n\tif cg == nil {\n\t\treturn Cgroup{}, errors.New(\"cgroup_new_cgroup\")\n\t}\n\tdefer C.cgroup_free(&cg)\n\n\t\/\/ Set memory restrictions. We limit both ram and ram+swap to the same\n\t\/\/ amount to prevent swapping.\n\tparam = C.CString(\"memory\")\n\tcgController := C.cgroup_add_controller(cg, param)\n\n\tmem := C.int64_t(memsize)\n\tparam = C.CString(\"memory.limit_in_bytes\")\n\tC.cgroup_add_value_int64(cgController, param, mem)\n\tparam = C.CString(\"memory.memsw.limit_in_bytes\")\n\tC.cgroup_add_value_int64(cgController, param, mem)\n\n\t\/\/ Setup CPU restrictions. The task is pinned to a specific set of CPUs\n\t\/\/ if a non-empty string is passed.\n\tif len(cpuset) > 0 {\n\t\tparam = C.CString(\"cpuset\")\n\t\tcgController = C.cgroup_add_controller(cg, param)\n\n\t\tvar arg *C.char\n\t\tparam = C.CString(\"cpuset.mems\")\n\t\targ = C.CString(\"0\")\n\t\tC.cgroup_add_value_string(cgController, param, arg)\n\t\tparam = C.CString(\"cpuset.cpus\")\n\t\targ = C.CString(cpuset)\n\t\tC.cgroup_add_value_string(cgController, param, arg)\n\t}\n\n\t\/\/ Perform the creation of the cgroup.\n\tif ret := C.cgroup_create_cgroup(cg, 1); ret != 0 {\n\t\terr := fmt.Errorf(\"Creating cgroup: %s(%d)\",\n\t\t\tC.cgroup_stderror(ret), ret)\n\t\treturn Cgroup{}, err\n\t}\n\treturn Cgroup{cgroupname, cpuset, memsize}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011, 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage charm\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"reflect\"\n\n\t\"github.com\/xeipuuv\/gojsonschema\"\n\t\"launchpad.net\/goyaml\"\n)\n\n\/\/ Actions defines the available actions for the charm.\ntype Actions struct {\n\tActionSpecs map[string]ActionSpec\n}\n\n\/\/ ActionSpec is a definition of the parameters and traits of an Action.\ntype ActionSpec struct {\n\tDescription string\n\tParams map[string]interface{}\n}\n\n\/\/ NewActions returns a new Actions object without any defined content.\nfunc NewActions() *Actions {\n\treturn &Actions{map[string]ActionSpec{}}\n}\n\n\/\/ ReadActions builds an Actions spec from a charm's actions.yaml.\nfunc ReadActionsYaml(r io.Reader) (*Actions, error) {\n\tdata, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar actionsSpec *Actions\n\tif err := goyaml.Unmarshal(data, &actionsSpec); err != nil {\n\t\treturn nil, err\n\t}\n\tif actionsSpec == nil {\n\t\treturn nil, fmt.Errorf(\"Empty actions definition\")\n\t}\n\tif reflect.DeepEqual(actionsSpec, &Actions{}) {\n\t\treturn nil, fmt.Errorf(\"actions.yaml failed to unmarshal -- key mismatch\")\n\t}\n\tfor _, actionSpec := range actionsSpec.ActionSpecs {\n\t\t_, err := gojsonschema.NewJsonSchemaDocument(actionSpec.Params)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error loading Charm actions.yaml -- nonconformant params: %v\", err)\n\t\t}\n\t}\n\treturn actionsSpec, nil\n}\n<commit_msg>Applied requested changes to charm\/actions.go.<commit_after>\/\/ Copyright 2011, 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage charm\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"github.com\/xeipuuv\/gojsonschema\"\n\t\"launchpad.net\/goyaml\"\n)\n\n\/\/ Actions defines the available actions for the charm.\ntype Actions struct {\n\tActionSpecs map[string]ActionSpec\n}\n\n\/\/ ActionSpec is a definition of the parameters and traits of an Action.\ntype ActionSpec struct {\n\tDescription string\n\tParams map[string]interface{}\n}\n\n\/\/ NewActions returns a new Actions object without any defined content.\nfunc NewActions() *Actions {\n\treturn &Actions{map[string]ActionSpec{}}\n}\n\n\/\/ ReadActions builds an Actions spec from a charm's actions.yaml.\nfunc ReadActionsYaml(r io.Reader) (*Actions, error) {\n\tdata, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar actionsSpec *Actions\n\tif err := goyaml.Unmarshal(data, &actionsSpec); err != nil {\n\t\treturn nil, err\n\t}\n\tif actionsSpec == nil {\n\t\treturn nil, fmt.Errorf(\"empty actions definition\")\n\t}\n\tif data != []byte{} && reflect.DeepEqual(actionsSpec, &Actions{}) {\n\t\treturn nil, fmt.Errorf(\"actions.yaml failed to unmarshal -- key mismatch\")\n\t}\n\n\tnameRule := regexp.MustCompile(\"^[^-][a-z-]+[^-]$\")\n\n\tfor name, actionSpec := range actionsSpec.ActionSpecs {\n\t\tbadName := !nameRule.MatchString(name)\n\t\tif badName {\n\t\t\treturn nil, fmt.Errorf(\"bad action name %s\", name)\n\t\t}\n\t\t_, err := gojsonschema.NewJsonSchemaDocument(actionSpec.Params)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid params schema for action %q: %v\", err)\n\t\t}\n\t}\n\treturn actionsSpec, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package untrusted\n\nimport (\n\t\"fmt\"\n\n\t\"bazil.org\/bazil\/cas\"\n\t\"bazil.org\/bazil\/kv\"\n\t\"bazil.org\/bazil\/tokens\"\n\t\"code.google.com\/p\/go.crypto\/nacl\/secretbox\"\n\t\"github.com\/dchest\/blake2b\"\n)\n\ntype Convergent struct {\n\t\/\/ not a CAS because key derives from the plaintext version, but\n\t\/\/ used like a Fixed Content Storage (FCS)\n\tuntrusted kv.KV\n\tsecret *[32]byte\n}\n\nvar _ = kv.KV(&Convergent{})\n\nvar personalizeKey = []byte(tokens.Blake2bPersonalizationConvergentKey)\n\nfunc (s *Convergent) computeBoxedKey(key []byte) []byte {\n\tconf := blake2b.Config{\n\t\tSize: cas.KeySize,\n\t\tKey: s.secret[:],\n\t\tPerson: personalizeKey,\n\t}\n\th, err := blake2b.New(&conf)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"blake2 config failure: %v\", err))\n\t}\n\t\/\/ hash.Hash docs say it never fails\n\t_, _ = h.Write(key)\n\treturn h.Sum(nil)\n}\n\nconst nonceSize = 24\n\nvar personalizeNonce = []byte(tokens.Blake2bPersonalizationConvergentNonce)\n\n\/\/ Nonce summarizes key, type and level so mismatch of e.g. type can\n\/\/ be detected.\nfunc (s *Convergent) makeNonce(key []byte) *[nonceSize]byte {\n\tconf := blake2b.Config{\n\t\tSize: nonceSize,\n\t\tPerson: personalizeNonce,\n\t}\n\th, err := blake2b.New(&conf)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"blake2 config failure: %v\", err))\n\t}\n\t\/\/ hash.Hash docs say it never fails\n\t_, _ = h.Write(key)\n\n\tvar ret [nonceSize]byte\n\th.Sum(ret[:0])\n\treturn &ret\n}\n\nfunc (s *Convergent) Get(key []byte) ([]byte, error) {\n\tboxedkey := s.computeBoxedKey(key)\n\tbox, err := s.untrusted.Get(boxedkey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnonce := s.makeNonce(key)\n\tplain, ok := secretbox.Open(nil, box, nonce, s.secret)\n\tif !ok {\n\t\treturn nil, Corrupt{Key: key}\n\t}\n\treturn plain, nil\n}\n\nfunc (s *Convergent) Put(key []byte, value []byte) error {\n\tnonce := s.makeNonce(key)\n\tbox := secretbox.Seal(nil, value, nonce, s.secret)\n\n\tboxedkey := s.computeBoxedKey(key)\n\terr := s.untrusted.Put(boxedkey, box)\n\treturn err\n}\n\nfunc New(store kv.KV, secret *[32]byte) *Convergent {\n\treturn &Convergent{\n\t\tuntrusted: store,\n\t\tsecret: secret,\n\t}\n}\n\ntype Corrupt struct {\n\tKey []byte\n}\n\nfunc (c Corrupt) Error() string {\n\treturn fmt.Sprintf(\"corrupt encrypted chunk: %x\", c.Key)\n}\n\nvar _ = error(Corrupt{})\n<commit_msg>kv\/untrusted: Use cgo blake2b for a 5.5x speedup in raw hashing<commit_after>package untrusted\n\nimport (\n\t\"fmt\"\n\n\t\"bazil.org\/bazil\/cas\"\n\t\"bazil.org\/bazil\/kv\"\n\t\"bazil.org\/bazil\/tokens\"\n\t\"code.google.com\/p\/go.crypto\/nacl\/secretbox\"\n\t\"github.com\/codahale\/blake2\"\n)\n\ntype Convergent struct {\n\t\/\/ not a CAS because key derives from the plaintext version, but\n\t\/\/ used like a Fixed Content Storage (FCS)\n\tuntrusted kv.KV\n\tsecret *[32]byte\n}\n\nvar _ = kv.KV(&Convergent{})\n\nvar personalizeKey = []byte(tokens.Blake2bPersonalizationConvergentKey)\n\nfunc (s *Convergent) computeBoxedKey(key []byte) []byte {\n\tconf := blake2.Config{\n\t\tSize: cas.KeySize,\n\t\tKey: s.secret[:],\n\t\tPersonal: personalizeKey,\n\t}\n\th := blake2.New(&conf)\n\t\/\/ hash.Hash docs say it never fails\n\t_, _ = h.Write(key)\n\treturn h.Sum(nil)\n}\n\nconst nonceSize = 24\n\nvar personalizeNonce = []byte(tokens.Blake2bPersonalizationConvergentNonce)\n\n\/\/ Nonce summarizes key, type and level so mismatch of e.g. type can\n\/\/ be detected.\nfunc (s *Convergent) makeNonce(key []byte) *[nonceSize]byte {\n\tconf := blake2.Config{\n\t\tSize: nonceSize,\n\t\tPersonal: personalizeNonce,\n\t}\n\th := blake2.New(&conf)\n\t\/\/ hash.Hash docs say it never fails\n\t_, _ = h.Write(key)\n\n\tvar ret [nonceSize]byte\n\th.Sum(ret[:0])\n\treturn &ret\n}\n\nfunc (s *Convergent) Get(key []byte) ([]byte, error) {\n\tboxedkey := s.computeBoxedKey(key)\n\tbox, err := s.untrusted.Get(boxedkey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnonce := s.makeNonce(key)\n\tplain, ok := secretbox.Open(nil, box, nonce, s.secret)\n\tif !ok {\n\t\treturn nil, Corrupt{Key: key}\n\t}\n\treturn plain, nil\n}\n\nfunc (s *Convergent) Put(key []byte, value []byte) error {\n\tnonce := s.makeNonce(key)\n\tbox := secretbox.Seal(nil, value, nonce, s.secret)\n\n\tboxedkey := s.computeBoxedKey(key)\n\terr := s.untrusted.Put(boxedkey, box)\n\treturn err\n}\n\nfunc New(store kv.KV, secret *[32]byte) *Convergent {\n\treturn &Convergent{\n\t\tuntrusted: store,\n\t\tsecret: secret,\n\t}\n}\n\ntype Corrupt struct {\n\tKey []byte\n}\n\nfunc (c Corrupt) Error() string {\n\treturn fmt.Sprintf(\"corrupt encrypted chunk: %x\", c.Key)\n}\n\nvar _ = error(Corrupt{})\n<|endoftext|>"} {"text":"<commit_before>package urlvalues\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com\/go-pg\/pg\/internal\/structfilter\"\n)\n\n\/\/ Decode decodes url values into the struct.\nfunc Decode(strct interface{}, values Values) error {\n\tv := reflect.Indirect(reflect.ValueOf(strct))\n\tmeta := structfilter.GetStruct(v.Type())\n\n\tfor name, values := range values {\n\t\tif strings.HasPrefix(name, \":\") {\n\t\t\tname = name[1:]\n\t\t}\n\t\tif strings.HasSuffix(name, \"[]\") {\n\t\t\tname = name[:len(name)-2]\n\t\t}\n\n\t\tfield := meta.Field(name)\n\t\tif field != nil && !field.NoDecode() {\n\t\t\terr := field.Scan(field.Value(v), values)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>urlvalues: use TrimPrefix and TrimSuffix<commit_after>package urlvalues\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com\/go-pg\/pg\/internal\/structfilter\"\n)\n\n\/\/ Decode decodes url values into the struct.\nfunc Decode(strct interface{}, values Values) error {\n\tv := reflect.Indirect(reflect.ValueOf(strct))\n\tmeta := structfilter.GetStruct(v.Type())\n\n\tfor name, values := range values {\n\t\tname = strings.TrimPrefix(name, \":\")\n\t\tname = strings.TrimSuffix(name, \"[]\")\n\n\t\tfield := meta.Field(name)\n\t\tif field != nil && !field.NoDecode() {\n\t\t\terr := field.Scan(field.Value(v), values)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package command\n\nimport (\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/armon\/go-metrics\"\n\t\"github.com\/hashicorp\/logutils\"\n\t\"github.com\/hashicorp\/vault\/audit\"\n\t\"github.com\/hashicorp\/vault\/command\/server\"\n\t\"github.com\/hashicorp\/vault\/helper\/flag-slice\"\n\t\"github.com\/hashicorp\/vault\/helper\/gated-writer\"\n\t\"github.com\/hashicorp\/vault\/helper\/mlock\"\n\tvaulthttp \"github.com\/hashicorp\/vault\/http\"\n\t\"github.com\/hashicorp\/vault\/logical\"\n\t\"github.com\/hashicorp\/vault\/physical\"\n\t\"github.com\/hashicorp\/vault\/vault\"\n)\n\n\/\/ ServerCommand is a Command that starts the Vault server.\ntype ServerCommand struct {\n\tAuditBackends map[string]audit.Factory\n\tCredentialBackends map[string]logical.Factory\n\tLogicalBackends map[string]logical.Factory\n\n\tMeta\n}\n\nfunc (c *ServerCommand) Run(args []string) int {\n\tvar dev bool\n\tvar configPath []string\n\tvar logLevel string\n\tflags := c.Meta.FlagSet(\"server\", FlagSetDefault)\n\tflags.BoolVar(&dev, \"dev\", false, \"\")\n\tflags.StringVar(&logLevel, \"log-level\", \"info\", \"\")\n\tflags.Usage = func() { c.Ui.Error(c.Help()) }\n\tflags.Var((*sliceflag.StringFlag)(&configPath), \"config\", \"config\")\n\tif err := flags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\t\/\/ Validation\n\tif !dev && len(configPath) == 0 {\n\t\tc.Ui.Error(\"At least one config path must be specified with -config\")\n\t\tflags.Usage()\n\t\treturn 1\n\t}\n\n\t\/\/ Load the configuration\n\tvar config *server.Config\n\tif dev {\n\t\tconfig = server.DevConfig()\n\t}\n\tfor _, path := range configPath {\n\t\tcurrent, err := server.LoadConfig(path)\n\t\tif err != nil {\n\t\t\tc.Ui.Error(fmt.Sprintf(\n\t\t\t\t\"Error loading configuration from %s: %s\", path, err))\n\t\t\treturn 1\n\t\t}\n\n\t\tif config == nil {\n\t\t\tconfig = current\n\t\t} else {\n\t\t\tconfig = config.Merge(current)\n\t\t}\n\t}\n\n\t\/\/ If mlock isn't supported, show a warning. We disable this in\n\t\/\/ dev because it is quite scary to see when first using Vault.\n\tif !dev && !mlock.Supported() {\n\t\tc.Ui.Output(\"==> WARNING: mlock not supported on this system!\\n\")\n\t\tc.Ui.Output(\" The `mlock` syscall to prevent memory from being swapped to\")\n\t\tc.Ui.Output(\" disk is not supported on this system. Enabling mlock or\")\n\t\tc.Ui.Output(\" running Vault on a system with mlock is much more secure.\\n\")\n\t}\n\n\t\/\/ Create a logger. We wrap it in a gated writer so that it doesn't\n\t\/\/ start logging too early.\n\tlogGate := &gatedwriter.Writer{Writer: os.Stderr}\n\tlogger := log.New(&logutils.LevelFilter{\n\t\tLevels: []logutils.LogLevel{\n\t\t\t\"TRACE\", \"DEBUG\", \"INFO\", \"WARN\", \"ERR\"},\n\t\tMinLevel: logutils.LogLevel(strings.ToUpper(logLevel)),\n\t\tWriter: logGate,\n\t}, \"\", log.LstdFlags)\n\n\t\/\/ Initialize the backend\n\tbackend, err := physical.NewBackend(\n\t\tconfig.Backend.Type, config.Backend.Config)\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\n\t\t\t\"Error initializing backend of type %s: %s\",\n\t\t\tconfig.Backend.Type, err))\n\t\treturn 1\n\t}\n\n\t\/\/ Attempt to detect the advertise address possible\n\tif detect, ok := backend.(physical.AdvertiseDetect); ok && config.Backend.AdvertiseAddr == \"\" {\n\t\tadvertise, err := c.detectAdvertise(detect, config)\n\t\tif err != nil {\n\t\t\tc.Ui.Error(fmt.Sprintf(\"Error detecting advertise address: %s\", err))\n\t\t} else if advertise == \"\" {\n\t\t\tc.Ui.Error(\"Failed to detect advertise address.\")\n\t\t} else {\n\t\t\tconfig.Backend.AdvertiseAddr = advertise\n\t\t}\n\t}\n\n\t\/\/ Initialize the core\n\tcore, err := vault.NewCore(&vault.CoreConfig{\n\t\tAdvertiseAddr: config.Backend.AdvertiseAddr,\n\t\tPhysical: backend,\n\t\tAuditBackends: c.AuditBackends,\n\t\tCredentialBackends: c.CredentialBackends,\n\t\tLogicalBackends: c.LogicalBackends,\n\t\tLogger: logger,\n\t\tDisableMlock: config.DisableMlock,\n\t})\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error initializing core: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ If we're in dev mode, then initialize the core\n\tif dev {\n\t\tinit, err := c.enableDev(core)\n\t\tif err != nil {\n\t\t\tc.Ui.Error(fmt.Sprintf(\n\t\t\t\t\"Error initializing dev mode: %s\", err))\n\t\t\treturn 1\n\t\t}\n\n\t\tc.Ui.Output(fmt.Sprintf(\n\t\t\t\"==> WARNING: Dev mode is enabled!\\n\\n\"+\n\t\t\t\t\"In this mode, Vault is completely in-memory and unsealed.\\n\"+\n\t\t\t\t\"Vault is configured to only have a single unseal key. The root\\n\"+\n\t\t\t\t\"token has already been authenticated with the CLI, so you can\\n\"+\n\t\t\t\t\"immediately begin using the Vault CLI.\\n\\n\"+\n\t\t\t\t\"The only step you need to take is to set the following\\n\"+\n\t\t\t\t\"environment variables:\\n\\n\"+\n\t\t\t\t\" export VAULT_ADDR='http:\/\/127.0.0.1:8200'\\n\"+\n\t\t\t\t\" export VAULT_TOKEN='%s'\\n\\n\"+\n\t\t\t\t\"The unseal key and root token are reproduced below in case you\\n\"+\n\t\t\t\t\"want to seal\/unseal the Vault or play with authentication.\\n\\n\"+\n\t\t\t\t\"Unseal Key: %s\\nRoot Token: %s\\n\",\n\t\t\tinit.RootToken,\n\t\t\thex.EncodeToString(init.SecretShares[0]),\n\t\t\tinit.RootToken,\n\t\t))\n\t}\n\n\t\/\/ Compile server information for output later\n\tinfoKeys := make([]string, 0, 10)\n\tinfo := make(map[string]string)\n\tinfo[\"backend\"] = config.Backend.Type\n\tinfo[\"log level\"] = logLevel\n\tinfo[\"mlock\"] = fmt.Sprintf(\n\t\t\"supported: %v, enabled: %v\",\n\t\tmlock.Supported(), !config.DisableMlock)\n\tinfoKeys = append(infoKeys, \"log level\", \"mlock\", \"backend\")\n\n\t\/\/ If the backend supports HA, then note it\n\tif _, ok := backend.(physical.HABackend); ok {\n\t\tinfo[\"backend\"] += \" (HA available)\"\n\t\tinfo[\"advertise address\"] = config.Backend.AdvertiseAddr\n\t\tinfoKeys = append(infoKeys, \"advertise address\")\n\t}\n\n\t\/\/ Initialize the telemetry\n\tif err := c.setupTelementry(config); err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error initializing telemetry: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ Initialize the listeners\n\tlns := make([]net.Listener, 0, len(config.Listeners))\n\tfor i, lnConfig := range config.Listeners {\n\t\tln, props, err := server.NewListener(lnConfig.Type, lnConfig.Config)\n\t\tif err != nil {\n\t\t\tc.Ui.Error(fmt.Sprintf(\n\t\t\t\t\"Error initializing listener of type %s: %s\",\n\t\t\t\tlnConfig.Type, err))\n\t\t\treturn 1\n\t\t}\n\n\t\t\/\/ Store the listener props for output later\n\t\tkey := fmt.Sprintf(\"listener %d\", i+1)\n\t\tpropsList := make([]string, 0, len(props))\n\t\tfor k, v := range props {\n\t\t\tpropsList = append(propsList, fmt.Sprintf(\n\t\t\t\t\"%s: %q\", k, v))\n\t\t}\n\t\tsort.Strings(propsList)\n\t\tinfoKeys = append(infoKeys, key)\n\t\tinfo[key] = fmt.Sprintf(\n\t\t\t\"%s (%s)\", lnConfig.Type, strings.Join(propsList, \", \"))\n\n\t\tlns = append(lns, ln)\n\t}\n\n\t\/\/ Initialize the HTTP server\n\tserver := &http.Server{}\n\tserver.Handler = vaulthttp.Handler(core)\n\tfor _, ln := range lns {\n\t\tgo server.Serve(ln)\n\t}\n\n\t\/\/ Server configuration output\n\tpadding := 18\n\tc.Ui.Output(\"==> Vault server configuration:\\n\")\n\tfor _, k := range infoKeys {\n\t\tc.Ui.Output(fmt.Sprintf(\n\t\t\t\"%s%s: %s\",\n\t\t\tstrings.Repeat(\" \", padding-len(k)),\n\t\t\tstrings.Title(k),\n\t\t\tinfo[k]))\n\t}\n\tc.Ui.Output(\"\")\n\n\t\/\/ Output the header that the server has started\n\tc.Ui.Output(\"==> Vault server started! Log data will stream in below:\\n\")\n\n\t\/\/ Release the log gate.\n\tlogGate.Flush()\n\n\t<-make(chan struct{})\n\treturn 0\n}\n\nfunc (c *ServerCommand) enableDev(core *vault.Core) (*vault.InitResult, error) {\n\t\/\/ Initialize it with a basic single key\n\tinit, err := core.Initialize(&vault.SealConfig{\n\t\tSecretShares: 1,\n\t\tSecretThreshold: 1,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Copy the key so that it can be zeroed\n\tkey := make([]byte, len(init.SecretShares[0]))\n\tcopy(key, init.SecretShares[0])\n\n\t\/\/ Unseal the core\n\tunsealed, err := core.Unseal(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !unsealed {\n\t\treturn nil, fmt.Errorf(\"failed to unseal Vault for dev mode\")\n\t}\n\n\t\/\/ Set the token\n\ttokenHelper, err := c.TokenHelper()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := tokenHelper.Store(init.RootToken); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn init, nil\n}\n\n\/\/ detectAdvertise is used to attempt advertise address detection\nfunc (c *ServerCommand) detectAdvertise(detect physical.AdvertiseDetect,\n\tconfig *server.Config) (string, error) {\n\t\/\/ Get the hostname\n\thost, err := detect.DetectHostAddr()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Default the port and scheme\n\tscheme := \"https\"\n\tport := 8200\n\n\t\/\/ Attempt to detect overrides\n\tfor _, list := range config.Listeners {\n\t\t\/\/ Only attempt TCP\n\t\tif list.Type != \"tcp\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Check if TLS is disabled\n\t\tif val, ok := list.Config[\"tls_disable\"]; ok && val == \"1\" {\n\t\t\tscheme = \"http\"\n\t\t}\n\n\t\t\/\/ Check for address override\n\t\taddr, ok := list.Config[\"address\"]\n\t\tif !ok {\n\t\t\taddr = \"127.0.0.1:8200\"\n\t\t}\n\n\t\t\/\/ Check for localhost\n\t\thostStr, portStr, err := net.SplitHostPort(addr)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif hostStr == \"127.0.0.1\" {\n\t\t\thost = hostStr\n\t\t}\n\n\t\t\/\/ Check for custom port\n\t\tlistPort, err := strconv.Atoi(portStr)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tport = listPort\n\t}\n\n\t\/\/ Build a URL\n\turl := &url.URL{\n\t\tScheme: scheme,\n\t\tHost: fmt.Sprintf(\"%s:%d\", host, port),\n\t}\n\n\t\/\/ Return the URL string\n\treturn url.String(), nil\n}\n\n\/\/ setupTelementry is used ot setup the telemetry sub-systems\nfunc (c *ServerCommand) setupTelementry(config *server.Config) error {\n\t\/* Setup telemetry\n\tAggregate on 10 second intervals for 1 minute. Expose the\n\tmetrics over stderr when there is a SIGUSR1 received.\n\t*\/\n\tinm := metrics.NewInmemSink(10*time.Second, time.Minute)\n\tmetrics.DefaultInmemSignal(inm)\n\tmetricsConf := metrics.DefaultConfig(\"vault\")\n\n\t\/\/ Configure the statsite sink\n\tvar fanout metrics.FanoutSink\n\tif config.StatsiteAddr != \"\" {\n\t\tsink, err := metrics.NewStatsiteSink(config.StatsiteAddr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfanout = append(fanout, sink)\n\t}\n\n\t\/\/ Configure the statsd sink\n\tif config.StatsdAddr != \"\" {\n\t\tsink, err := metrics.NewStatsdSink(config.StatsdAddr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfanout = append(fanout, sink)\n\t}\n\n\t\/\/ Initialize the global sink\n\tif len(fanout) > 0 {\n\t\tfanout = append(fanout, inm)\n\t\tmetrics.NewGlobal(metricsConf, fanout)\n\t} else {\n\t\tmetricsConf.EnableHostname = false\n\t\tmetrics.NewGlobal(metricsConf, inm)\n\t}\n\treturn nil\n}\n\nfunc (c *ServerCommand) Synopsis() string {\n\treturn \"Start a Vault server\"\n}\n\nfunc (c *ServerCommand) Help() string {\n\thelpText := `\nUsage: vault server [options]\n\n Start a Vault server.\n\n This command starts a Vault server that responds to API requests.\n Vault will start in a \"sealed\" state. The Vault must be unsealed\n with \"vault unseal\" or the API before this server can respond to requests.\n This must be done for every server.\n\n If the server is being started against a storage backend that has\n brand new (no existing Vault data in it), it must be initialized with\n \"vault init\" or the API first.\n\n\nGeneral Options:\n\n -config=<path> Path to the configuration file or directory. This can be\n specified multiple times. If it is a directory, all\n files with a \".hcl\" or \".json\" suffix will be loaded.\n\n -log-level=info Log verbosity. Defaults to \"info\", will be outputted\n to stderr.\n\n`\n\treturn strings.TrimSpace(helpText)\n}\n<commit_msg>Use strconv.ParseBool<commit_after>package command\n\nimport (\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/armon\/go-metrics\"\n\t\"github.com\/hashicorp\/logutils\"\n\t\"github.com\/hashicorp\/vault\/audit\"\n\t\"github.com\/hashicorp\/vault\/command\/server\"\n\t\"github.com\/hashicorp\/vault\/helper\/flag-slice\"\n\t\"github.com\/hashicorp\/vault\/helper\/gated-writer\"\n\t\"github.com\/hashicorp\/vault\/helper\/mlock\"\n\tvaulthttp \"github.com\/hashicorp\/vault\/http\"\n\t\"github.com\/hashicorp\/vault\/logical\"\n\t\"github.com\/hashicorp\/vault\/physical\"\n\t\"github.com\/hashicorp\/vault\/vault\"\n)\n\n\/\/ ServerCommand is a Command that starts the Vault server.\ntype ServerCommand struct {\n\tAuditBackends map[string]audit.Factory\n\tCredentialBackends map[string]logical.Factory\n\tLogicalBackends map[string]logical.Factory\n\n\tMeta\n}\n\nfunc (c *ServerCommand) Run(args []string) int {\n\tvar dev bool\n\tvar configPath []string\n\tvar logLevel string\n\tflags := c.Meta.FlagSet(\"server\", FlagSetDefault)\n\tflags.BoolVar(&dev, \"dev\", false, \"\")\n\tflags.StringVar(&logLevel, \"log-level\", \"info\", \"\")\n\tflags.Usage = func() { c.Ui.Error(c.Help()) }\n\tflags.Var((*sliceflag.StringFlag)(&configPath), \"config\", \"config\")\n\tif err := flags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\t\/\/ Validation\n\tif !dev && len(configPath) == 0 {\n\t\tc.Ui.Error(\"At least one config path must be specified with -config\")\n\t\tflags.Usage()\n\t\treturn 1\n\t}\n\n\t\/\/ Load the configuration\n\tvar config *server.Config\n\tif dev {\n\t\tconfig = server.DevConfig()\n\t}\n\tfor _, path := range configPath {\n\t\tcurrent, err := server.LoadConfig(path)\n\t\tif err != nil {\n\t\t\tc.Ui.Error(fmt.Sprintf(\n\t\t\t\t\"Error loading configuration from %s: %s\", path, err))\n\t\t\treturn 1\n\t\t}\n\n\t\tif config == nil {\n\t\t\tconfig = current\n\t\t} else {\n\t\t\tconfig = config.Merge(current)\n\t\t}\n\t}\n\n\t\/\/ If mlock isn't supported, show a warning. We disable this in\n\t\/\/ dev because it is quite scary to see when first using Vault.\n\tif !dev && !mlock.Supported() {\n\t\tc.Ui.Output(\"==> WARNING: mlock not supported on this system!\\n\")\n\t\tc.Ui.Output(\" The `mlock` syscall to prevent memory from being swapped to\")\n\t\tc.Ui.Output(\" disk is not supported on this system. Enabling mlock or\")\n\t\tc.Ui.Output(\" running Vault on a system with mlock is much more secure.\\n\")\n\t}\n\n\t\/\/ Create a logger. We wrap it in a gated writer so that it doesn't\n\t\/\/ start logging too early.\n\tlogGate := &gatedwriter.Writer{Writer: os.Stderr}\n\tlogger := log.New(&logutils.LevelFilter{\n\t\tLevels: []logutils.LogLevel{\n\t\t\t\"TRACE\", \"DEBUG\", \"INFO\", \"WARN\", \"ERR\"},\n\t\tMinLevel: logutils.LogLevel(strings.ToUpper(logLevel)),\n\t\tWriter: logGate,\n\t}, \"\", log.LstdFlags)\n\n\t\/\/ Initialize the backend\n\tbackend, err := physical.NewBackend(\n\t\tconfig.Backend.Type, config.Backend.Config)\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\n\t\t\t\"Error initializing backend of type %s: %s\",\n\t\t\tconfig.Backend.Type, err))\n\t\treturn 1\n\t}\n\n\t\/\/ Attempt to detect the advertise address possible\n\tif detect, ok := backend.(physical.AdvertiseDetect); ok && config.Backend.AdvertiseAddr == \"\" {\n\t\tadvertise, err := c.detectAdvertise(detect, config)\n\t\tif err != nil {\n\t\t\tc.Ui.Error(fmt.Sprintf(\"Error detecting advertise address: %s\", err))\n\t\t} else if advertise == \"\" {\n\t\t\tc.Ui.Error(\"Failed to detect advertise address.\")\n\t\t} else {\n\t\t\tconfig.Backend.AdvertiseAddr = advertise\n\t\t}\n\t}\n\n\t\/\/ Initialize the core\n\tcore, err := vault.NewCore(&vault.CoreConfig{\n\t\tAdvertiseAddr: config.Backend.AdvertiseAddr,\n\t\tPhysical: backend,\n\t\tAuditBackends: c.AuditBackends,\n\t\tCredentialBackends: c.CredentialBackends,\n\t\tLogicalBackends: c.LogicalBackends,\n\t\tLogger: logger,\n\t\tDisableMlock: config.DisableMlock,\n\t})\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error initializing core: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ If we're in dev mode, then initialize the core\n\tif dev {\n\t\tinit, err := c.enableDev(core)\n\t\tif err != nil {\n\t\t\tc.Ui.Error(fmt.Sprintf(\n\t\t\t\t\"Error initializing dev mode: %s\", err))\n\t\t\treturn 1\n\t\t}\n\n\t\tc.Ui.Output(fmt.Sprintf(\n\t\t\t\"==> WARNING: Dev mode is enabled!\\n\\n\"+\n\t\t\t\t\"In this mode, Vault is completely in-memory and unsealed.\\n\"+\n\t\t\t\t\"Vault is configured to only have a single unseal key. The root\\n\"+\n\t\t\t\t\"token has already been authenticated with the CLI, so you can\\n\"+\n\t\t\t\t\"immediately begin using the Vault CLI.\\n\\n\"+\n\t\t\t\t\"The only step you need to take is to set the following\\n\"+\n\t\t\t\t\"environment variables:\\n\\n\"+\n\t\t\t\t\" export VAULT_ADDR='http:\/\/127.0.0.1:8200'\\n\"+\n\t\t\t\t\" export VAULT_TOKEN='%s'\\n\\n\"+\n\t\t\t\t\"The unseal key and root token are reproduced below in case you\\n\"+\n\t\t\t\t\"want to seal\/unseal the Vault or play with authentication.\\n\\n\"+\n\t\t\t\t\"Unseal Key: %s\\nRoot Token: %s\\n\",\n\t\t\tinit.RootToken,\n\t\t\thex.EncodeToString(init.SecretShares[0]),\n\t\t\tinit.RootToken,\n\t\t))\n\t}\n\n\t\/\/ Compile server information for output later\n\tinfoKeys := make([]string, 0, 10)\n\tinfo := make(map[string]string)\n\tinfo[\"backend\"] = config.Backend.Type\n\tinfo[\"log level\"] = logLevel\n\tinfo[\"mlock\"] = fmt.Sprintf(\n\t\t\"supported: %v, enabled: %v\",\n\t\tmlock.Supported(), !config.DisableMlock)\n\tinfoKeys = append(infoKeys, \"log level\", \"mlock\", \"backend\")\n\n\t\/\/ If the backend supports HA, then note it\n\tif _, ok := backend.(physical.HABackend); ok {\n\t\tinfo[\"backend\"] += \" (HA available)\"\n\t\tinfo[\"advertise address\"] = config.Backend.AdvertiseAddr\n\t\tinfoKeys = append(infoKeys, \"advertise address\")\n\t}\n\n\t\/\/ Initialize the telemetry\n\tif err := c.setupTelementry(config); err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error initializing telemetry: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ Initialize the listeners\n\tlns := make([]net.Listener, 0, len(config.Listeners))\n\tfor i, lnConfig := range config.Listeners {\n\t\tln, props, err := server.NewListener(lnConfig.Type, lnConfig.Config)\n\t\tif err != nil {\n\t\t\tc.Ui.Error(fmt.Sprintf(\n\t\t\t\t\"Error initializing listener of type %s: %s\",\n\t\t\t\tlnConfig.Type, err))\n\t\t\treturn 1\n\t\t}\n\n\t\t\/\/ Store the listener props for output later\n\t\tkey := fmt.Sprintf(\"listener %d\", i+1)\n\t\tpropsList := make([]string, 0, len(props))\n\t\tfor k, v := range props {\n\t\t\tpropsList = append(propsList, fmt.Sprintf(\n\t\t\t\t\"%s: %q\", k, v))\n\t\t}\n\t\tsort.Strings(propsList)\n\t\tinfoKeys = append(infoKeys, key)\n\t\tinfo[key] = fmt.Sprintf(\n\t\t\t\"%s (%s)\", lnConfig.Type, strings.Join(propsList, \", \"))\n\n\t\tlns = append(lns, ln)\n\t}\n\n\t\/\/ Initialize the HTTP server\n\tserver := &http.Server{}\n\tserver.Handler = vaulthttp.Handler(core)\n\tfor _, ln := range lns {\n\t\tgo server.Serve(ln)\n\t}\n\n\t\/\/ Server configuration output\n\tpadding := 18\n\tc.Ui.Output(\"==> Vault server configuration:\\n\")\n\tfor _, k := range infoKeys {\n\t\tc.Ui.Output(fmt.Sprintf(\n\t\t\t\"%s%s: %s\",\n\t\t\tstrings.Repeat(\" \", padding-len(k)),\n\t\t\tstrings.Title(k),\n\t\t\tinfo[k]))\n\t}\n\tc.Ui.Output(\"\")\n\n\t\/\/ Output the header that the server has started\n\tc.Ui.Output(\"==> Vault server started! Log data will stream in below:\\n\")\n\n\t\/\/ Release the log gate.\n\tlogGate.Flush()\n\n\t<-make(chan struct{})\n\treturn 0\n}\n\nfunc (c *ServerCommand) enableDev(core *vault.Core) (*vault.InitResult, error) {\n\t\/\/ Initialize it with a basic single key\n\tinit, err := core.Initialize(&vault.SealConfig{\n\t\tSecretShares: 1,\n\t\tSecretThreshold: 1,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Copy the key so that it can be zeroed\n\tkey := make([]byte, len(init.SecretShares[0]))\n\tcopy(key, init.SecretShares[0])\n\n\t\/\/ Unseal the core\n\tunsealed, err := core.Unseal(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !unsealed {\n\t\treturn nil, fmt.Errorf(\"failed to unseal Vault for dev mode\")\n\t}\n\n\t\/\/ Set the token\n\ttokenHelper, err := c.TokenHelper()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := tokenHelper.Store(init.RootToken); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn init, nil\n}\n\n\/\/ detectAdvertise is used to attempt advertise address detection\nfunc (c *ServerCommand) detectAdvertise(detect physical.AdvertiseDetect,\n\tconfig *server.Config) (string, error) {\n\t\/\/ Get the hostname\n\thost, err := detect.DetectHostAddr()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Default the port and scheme\n\tscheme := \"https\"\n\tport := 8200\n\n\t\/\/ Attempt to detect overrides\n\tfor _, list := range config.Listeners {\n\t\t\/\/ Only attempt TCP\n\t\tif list.Type != \"tcp\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Check if TLS is disabled\n\t\tif val, ok := list.Config[\"tls_disable\"]; ok {\n\t\t\tdisable, err := strconv.ParseBool(val)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"tls_disable: %s\", err)\n\t\t\t}\n\n\t\t\tif disable {\n\t\t\t\tscheme = \"http\"\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Check for address override\n\t\taddr, ok := list.Config[\"address\"]\n\t\tif !ok {\n\t\t\taddr = \"127.0.0.1:8200\"\n\t\t}\n\n\t\t\/\/ Check for localhost\n\t\thostStr, portStr, err := net.SplitHostPort(addr)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif hostStr == \"127.0.0.1\" {\n\t\t\thost = hostStr\n\t\t}\n\n\t\t\/\/ Check for custom port\n\t\tlistPort, err := strconv.Atoi(portStr)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tport = listPort\n\t}\n\n\t\/\/ Build a URL\n\turl := &url.URL{\n\t\tScheme: scheme,\n\t\tHost: fmt.Sprintf(\"%s:%d\", host, port),\n\t}\n\n\t\/\/ Return the URL string\n\treturn url.String(), nil\n}\n\n\/\/ setupTelementry is used ot setup the telemetry sub-systems\nfunc (c *ServerCommand) setupTelementry(config *server.Config) error {\n\t\/* Setup telemetry\n\tAggregate on 10 second intervals for 1 minute. Expose the\n\tmetrics over stderr when there is a SIGUSR1 received.\n\t*\/\n\tinm := metrics.NewInmemSink(10*time.Second, time.Minute)\n\tmetrics.DefaultInmemSignal(inm)\n\tmetricsConf := metrics.DefaultConfig(\"vault\")\n\n\t\/\/ Configure the statsite sink\n\tvar fanout metrics.FanoutSink\n\tif config.StatsiteAddr != \"\" {\n\t\tsink, err := metrics.NewStatsiteSink(config.StatsiteAddr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfanout = append(fanout, sink)\n\t}\n\n\t\/\/ Configure the statsd sink\n\tif config.StatsdAddr != \"\" {\n\t\tsink, err := metrics.NewStatsdSink(config.StatsdAddr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfanout = append(fanout, sink)\n\t}\n\n\t\/\/ Initialize the global sink\n\tif len(fanout) > 0 {\n\t\tfanout = append(fanout, inm)\n\t\tmetrics.NewGlobal(metricsConf, fanout)\n\t} else {\n\t\tmetricsConf.EnableHostname = false\n\t\tmetrics.NewGlobal(metricsConf, inm)\n\t}\n\treturn nil\n}\n\nfunc (c *ServerCommand) Synopsis() string {\n\treturn \"Start a Vault server\"\n}\n\nfunc (c *ServerCommand) Help() string {\n\thelpText := `\nUsage: vault server [options]\n\n Start a Vault server.\n\n This command starts a Vault server that responds to API requests.\n Vault will start in a \"sealed\" state. The Vault must be unsealed\n with \"vault unseal\" or the API before this server can respond to requests.\n This must be done for every server.\n\n If the server is being started against a storage backend that has\n brand new (no existing Vault data in it), it must be initialized with\n \"vault init\" or the API first.\n\n\nGeneral Options:\n\n -config=<path> Path to the configuration file or directory. This can be\n specified multiple times. If it is a directory, all\n files with a \".hcl\" or \".json\" suffix will be loaded.\n\n -log-level=info Log verbosity. Defaults to \"info\", will be outputted\n to stderr.\n\n`\n\treturn strings.TrimSpace(helpText)\n}\n<|endoftext|>"} {"text":"<commit_before>package statemachine\n\nimport (\n\t\"log\"\n\t\"os\"\n)\n\ntype Handler func() string\n\ntype Machine struct {\n\tHandlers map[string]Handler\n\tLogger *log.Logger\n}\n\ntype StateMachineError struct {\n\tState string\n}\n\nfunc (sme StateMachineError) Error() string {\n\treturn \"statemachine: No handler function registered for state: \" + sme.State\n}\n\nfunc NewMachine() Machine {\n\treturn Machine{\n\t\tHandlers: map[string]Handler{},\n\t\tLogger: log.New(os.Stdout, \"statemachine: \", 0),\n\t}\n}\n\nfunc (machine Machine) AddState(stateName string, handlerFn Handler) {\n\tmachine.Handlers[stateName] = handlerFn\n}\n\nfunc (machine Machine) Run() (success bool, error error) {\n\tstate := \"INIT\"\n\tmachine.Logger.Println(\"Starting in state: INIT\")\n\tfor {\n\t\tif handler, present := machine.Handlers[state]; present {\n\t\t\toldstate := state\n\t\t\tstate = handler()\n\t\t\tmachine.Logger.Printf(\"State transition: %s -> %s\\n\", oldstate, state)\n\t\t\tif state == \"END\" {\n\t\t\t\tmachine.Logger.Println(\"Terminating\")\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t} else {\n\t\t\treturn false, StateMachineError{state}\n\t\t}\n\t}\n}\n<commit_msg>Add levels to log output<commit_after>package statemachine\n\nimport (\n\t\"log\"\n\t\"os\"\n)\n\ntype Handler func() string\n\ntype Machine struct {\n\tHandlers map[string]Handler\n\tLogger *log.Logger\n}\n\ntype StateMachineError struct {\n\tState string\n}\n\nfunc (sme StateMachineError) Error() string {\n\treturn \"ERROR: No handler function registered for state: \" + sme.State\n}\n\nfunc NewMachine() Machine {\n\treturn Machine{\n\t\tHandlers: map[string]Handler{},\n\t\tLogger: log.New(os.Stdout, \"statemachine: \", 0),\n\t}\n}\n\nfunc (machine Machine) AddState(stateName string, handlerFn Handler) {\n\tmachine.Handlers[stateName] = handlerFn\n}\n\nfunc (machine Machine) Run() (success bool, error error) {\n\tstate := \"INIT\"\n\tmachine.Logger.Println(\"INFO: Starting in state: INIT\")\n\tfor {\n\t\tif handler, present := machine.Handlers[state]; present {\n\t\t\toldstate := state\n\t\t\tstate = handler()\n\t\t\tmachine.Logger.Printf(\"INFO: State transition: %s -> %s\\n\", oldstate, state)\n\t\t\tif state == \"END\" {\n\t\t\t\tmachine.Logger.Println(\"INFO: Terminating\")\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t} else {\n\t\t\terr := StateMachineError{state}\n\t\t\tmachine.Logger.Print(err)\n\t\t\treturn false, err\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n\n\tgolden is a package designed to make it possible to compare a game to a\n\tgolden run for testing purposes. It takes a record saved in\n\tstorage\/filesystem format and compares it.\n\n*\/\npackage golden\n\nimport (\n\t\"errors\"\n\t\"github.com\/jkomoros\/boardgame\"\n\t\"github.com\/jkomoros\/boardgame\/storage\/filesystem\/record\"\n\t\"github.com\/jkomoros\/boardgame\/storage\/memory\"\n\t\"github.com\/yudai\/gojsondiff\"\n\t\"github.com\/yudai\/gojsondiff\/formatter\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\n\/\/Compare is the primary method in the package. It takes a game delegate and a\n\/\/filename denoting a record to compare against. delegate shiould be a fresh\n\/\/delegate not yet affiliated with a manager.\nfunc Compare(delegate boardgame.GameDelegate, recFilename string) error {\n\n\tmanager, err := boardgame.NewGameManager(delegate, memory.NewStorageManager())\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't create new manager: \" + err.Error())\n\t}\n\n\trec, err := record.New(recFilename)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't create record: \" + err.Error())\n\t}\n\n\treturn compare(manager, rec)\n\n}\n\n\/\/CompareFolder is like Compare, except it will iterate through any file in\n\/\/recFolder that ends in .json. Errors if any of those files cannot be parsed\n\/\/into recs, or if no files match.\nfunc CompareFolder(delegate boardgame.GameDelegate, recFolder string) error {\n\tmanager, err := boardgame.NewGameManager(delegate, memory.NewStorageManager())\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't create new manager: \" + err.Error())\n\t}\n\n\tinfos, err := ioutil.ReadDir(recFolder)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't read folder: \" + err.Error())\n\t}\n\n\tprocessedRecs := 0\n\n\tfor _, info := range infos {\n\t\tif info.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif filepath.Ext(info.Name()) != \".json\" {\n\t\t\tcontinue\n\t\t}\n\n\t\trec, err := record.New(filepath.Join(recFolder, info.Name()))\n\n\t\tif err != nil {\n\t\t\treturn errors.New(\"File with name \" + info.Name() + \" couldn't be loaded into rec: \" + err.Error())\n\t\t}\n\n\t\tif err := compare(manager, rec); err != nil {\n\t\t\treturn errors.New(\"File named \" + info.Name() + \" had compare error: \" + err.Error())\n\t\t}\n\n\t\tprocessedRecs++\n\t}\n\n\tif processedRecs < 1 {\n\t\treturn errors.New(\"Processed 0 recs in folder\")\n\t}\n\n\treturn nil\n}\n\nfunc compare(manager *boardgame.GameManager, rec *record.Record) error {\n\tgame, err := manager.RecreateGame(rec.Game())\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't create game: \" + err.Error())\n\t}\n\n\tlastVerifiedVersion := 0\n\n\tfor !game.Finished() {\n\t\t\/\/Verify all new moves that have happened since the last time we\n\t\t\/\/checked (often, fix-up moves).\n\t\tfor lastVerifiedVersion < game.Version() {\n\t\t\tstateToCompare, err := rec.State(lastVerifiedVersion)\n\n\t\t\tif err != nil {\n\t\t\t\treturn errors.New(\"Couldn't get \" + strconv.Itoa(lastVerifiedVersion) + \" state: \" + err.Error())\n\t\t\t}\n\n\t\t\tif err := compareJsonBlobs(game.State(lastVerifiedVersion).StorageRecord(), stateToCompare); err != nil {\n\t\t\t\treturn errors.New(\"State \" + strconv.Itoa(lastVerifiedVersion) + \" compared differently: \" + err.Error())\n\t\t\t}\n\n\t\t\tif lastVerifiedVersion > 0 {\n\n\t\t\t\t\/\/Version 0 has no associated move\n\n\t\t\t\trecMove, err := rec.Move(lastVerifiedVersion)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.New(\"Couldn't get move \" + strconv.Itoa(lastVerifiedVersion) + \" from record\")\n\t\t\t\t}\n\n\t\t\t\tmoves := game.MoveRecords(lastVerifiedVersion)\n\n\t\t\t\tif len(moves) < 1 {\n\t\t\t\t\treturn errors.New(\"Didn't fetch historical move records for \" + strconv.Itoa(lastVerifiedVersion))\n\t\t\t\t}\n\n\t\t\t\t\/\/Warning: records are modified by this method\n\t\t\t\tif err := compareMoveStorageRecords(moves[len(moves)-1], recMove); err != nil {\n\t\t\t\t\treturn errors.New(\"Move \" + strconv.Itoa(lastVerifiedVersion) + \" compared differently: \" + err.Error())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tlastVerifiedVersion++\n\t\t}\n\n\t\tnextMoveRec, err := rec.Move(lastVerifiedVersion + 1)\n\n\t\tif err != nil {\n\t\t\t\/\/We'll assume that menas that's all of the moves there are to make.\n\t\t\tbreak\n\t\t}\n\n\t\tif nextMoveRec.Proposer < 0 {\n\t\t\treturn errors.New(\"At version \" + strconv.Itoa(lastVerifiedVersion) + \" the next player move to apply was not applied by a player\")\n\t\t}\n\n\t\tnextMove, err := nextMoveRec.Inflate(game)\n\n\t\tif err != nil {\n\t\t\treturn errors.New(\"Couldn't inflate move: \" + err.Error())\n\t\t}\n\n\t\tif err := <-game.ProposeMove(nextMove, nextMoveRec.Proposer); err != nil {\n\t\t\treturn errors.New(\"Couldn't propose next move in chain: \" + err.Error())\n\t\t}\n\n\t}\n\n\tif game.Finished() != rec.Game().Finished {\n\t\treturn errors.New(\"Game finished did not match rec\")\n\t}\n\n\tif !reflect.DeepEqual(game.Winners(), rec.Game().Winners) {\n\t\treturn errors.New(\"Game winners did not match\")\n\t}\n\n\treturn nil\n}\n\nvar differ = gojsondiff.New()\n\nvar diffformatter = formatter.NewDeltaFormatter()\n\nfunc compareJsonBlobs(one, two []byte) error {\n\n\tdiff, err := differ.Compare(one, two)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't diff: \" + err.Error())\n\t}\n\n\tif diff.Modified() {\n\n\t\tstr, err := diffformatter.Format(diff)\n\n\t\tif err != nil {\n\t\t\treturn errors.New(\"Couldn't format diff: \" + err.Error())\n\t\t}\n\n\t\treturn errors.New(\"Diff: \" + str)\n\t}\n\n\treturn nil\n\n}\n\n\/\/warning: modifies the records\nfunc compareMoveStorageRecords(one, two *boardgame.MoveStorageRecord) error {\n\n\tif one == nil {\n\t\treturn errors.New(\"One was nil\")\n\t}\n\n\tif two == nil {\n\t\treturn errors.New(\"Two was nil\")\n\t}\n\n\toneBlob := one.Blob\n\ttwoBlob := two.Blob\n\n\t\/\/Set the fields we know might differ to known values\n\tone.Blob = nil\n\ttwo.Blob = nil\n\n\ttwo.Timestamp = one.Timestamp\n\n\tif !reflect.DeepEqual(one, two) {\n\t\treturn errors.New(\"Move storage records differed in base fields\")\n\t}\n\n\treturn compareJsonBlobs(oneBlob, twoBlob)\n\n}\n<commit_msg>Better documentation of what golden.Compare does. Part of #648.<commit_after>\/*\n\n\tgolden is a package designed to make it possible to compare a game to a\n\tgolden run for testing purposes. It takes a record saved in\n\tstorage\/filesystem format and compares it.\n\n*\/\npackage golden\n\nimport (\n\t\"errors\"\n\t\"github.com\/jkomoros\/boardgame\"\n\t\"github.com\/jkomoros\/boardgame\/storage\/filesystem\/record\"\n\t\"github.com\/jkomoros\/boardgame\/storage\/memory\"\n\t\"github.com\/yudai\/gojsondiff\"\n\t\"github.com\/yudai\/gojsondiff\/formatter\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\n\/\/Compare is the primary method in the package. It takes a game delegate and a\n\/\/filename denoting a record to compare against. delegate shiould be a fresh\n\/\/delegate not yet affiliated with a manager. It compares every version and\n\/\/move in the history (ignoring things that shouldn't be the same, like\n\/\/timestamps) and reports the first place they divrge. Any time it finds a\n\/\/move not proposed by AdminPlayerIndex it will propose that move. As long as\n\/\/your game uses state.Rand() for all randomness and is otherwise\n\/\/deterministic then everything should work.\nfunc Compare(delegate boardgame.GameDelegate, recFilename string) error {\n\n\tmanager, err := boardgame.NewGameManager(delegate, memory.NewStorageManager())\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't create new manager: \" + err.Error())\n\t}\n\n\trec, err := record.New(recFilename)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't create record: \" + err.Error())\n\t}\n\n\treturn compare(manager, rec)\n\n}\n\n\/\/CompareFolder is like Compare, except it will iterate through any file in\n\/\/recFolder that ends in .json. Errors if any of those files cannot be parsed\n\/\/into recs, or if no files match.\nfunc CompareFolder(delegate boardgame.GameDelegate, recFolder string) error {\n\tmanager, err := boardgame.NewGameManager(delegate, memory.NewStorageManager())\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't create new manager: \" + err.Error())\n\t}\n\n\tinfos, err := ioutil.ReadDir(recFolder)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't read folder: \" + err.Error())\n\t}\n\n\tprocessedRecs := 0\n\n\tfor _, info := range infos {\n\t\tif info.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif filepath.Ext(info.Name()) != \".json\" {\n\t\t\tcontinue\n\t\t}\n\n\t\trec, err := record.New(filepath.Join(recFolder, info.Name()))\n\n\t\tif err != nil {\n\t\t\treturn errors.New(\"File with name \" + info.Name() + \" couldn't be loaded into rec: \" + err.Error())\n\t\t}\n\n\t\tif err := compare(manager, rec); err != nil {\n\t\t\treturn errors.New(\"File named \" + info.Name() + \" had compare error: \" + err.Error())\n\t\t}\n\n\t\tprocessedRecs++\n\t}\n\n\tif processedRecs < 1 {\n\t\treturn errors.New(\"Processed 0 recs in folder\")\n\t}\n\n\treturn nil\n}\n\nfunc compare(manager *boardgame.GameManager, rec *record.Record) error {\n\tgame, err := manager.RecreateGame(rec.Game())\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't create game: \" + err.Error())\n\t}\n\n\tlastVerifiedVersion := 0\n\n\tfor !game.Finished() {\n\t\t\/\/Verify all new moves that have happened since the last time we\n\t\t\/\/checked (often, fix-up moves).\n\t\tfor lastVerifiedVersion < game.Version() {\n\t\t\tstateToCompare, err := rec.State(lastVerifiedVersion)\n\n\t\t\tif err != nil {\n\t\t\t\treturn errors.New(\"Couldn't get \" + strconv.Itoa(lastVerifiedVersion) + \" state: \" + err.Error())\n\t\t\t}\n\n\t\t\tif err := compareJsonBlobs(game.State(lastVerifiedVersion).StorageRecord(), stateToCompare); err != nil {\n\t\t\t\treturn errors.New(\"State \" + strconv.Itoa(lastVerifiedVersion) + \" compared differently: \" + err.Error())\n\t\t\t}\n\n\t\t\tif lastVerifiedVersion > 0 {\n\n\t\t\t\t\/\/Version 0 has no associated move\n\n\t\t\t\trecMove, err := rec.Move(lastVerifiedVersion)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.New(\"Couldn't get move \" + strconv.Itoa(lastVerifiedVersion) + \" from record\")\n\t\t\t\t}\n\n\t\t\t\tmoves := game.MoveRecords(lastVerifiedVersion)\n\n\t\t\t\tif len(moves) < 1 {\n\t\t\t\t\treturn errors.New(\"Didn't fetch historical move records for \" + strconv.Itoa(lastVerifiedVersion))\n\t\t\t\t}\n\n\t\t\t\t\/\/Warning: records are modified by this method\n\t\t\t\tif err := compareMoveStorageRecords(moves[len(moves)-1], recMove); err != nil {\n\t\t\t\t\treturn errors.New(\"Move \" + strconv.Itoa(lastVerifiedVersion) + \" compared differently: \" + err.Error())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tlastVerifiedVersion++\n\t\t}\n\n\t\tnextMoveRec, err := rec.Move(lastVerifiedVersion + 1)\n\n\t\tif err != nil {\n\t\t\t\/\/We'll assume that menas that's all of the moves there are to make.\n\t\t\tbreak\n\t\t}\n\n\t\tif nextMoveRec.Proposer < 0 {\n\t\t\treturn errors.New(\"At version \" + strconv.Itoa(lastVerifiedVersion) + \" the next player move to apply was not applied by a player\")\n\t\t}\n\n\t\tnextMove, err := nextMoveRec.Inflate(game)\n\n\t\tif err != nil {\n\t\t\treturn errors.New(\"Couldn't inflate move: \" + err.Error())\n\t\t}\n\n\t\tif err := <-game.ProposeMove(nextMove, nextMoveRec.Proposer); err != nil {\n\t\t\treturn errors.New(\"Couldn't propose next move in chain: \" + err.Error())\n\t\t}\n\n\t}\n\n\tif game.Finished() != rec.Game().Finished {\n\t\treturn errors.New(\"Game finished did not match rec\")\n\t}\n\n\tif !reflect.DeepEqual(game.Winners(), rec.Game().Winners) {\n\t\treturn errors.New(\"Game winners did not match\")\n\t}\n\n\treturn nil\n}\n\nvar differ = gojsondiff.New()\n\nvar diffformatter = formatter.NewDeltaFormatter()\n\nfunc compareJsonBlobs(one, two []byte) error {\n\n\tdiff, err := differ.Compare(one, two)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't diff: \" + err.Error())\n\t}\n\n\tif diff.Modified() {\n\n\t\tstr, err := diffformatter.Format(diff)\n\n\t\tif err != nil {\n\t\t\treturn errors.New(\"Couldn't format diff: \" + err.Error())\n\t\t}\n\n\t\treturn errors.New(\"Diff: \" + str)\n\t}\n\n\treturn nil\n\n}\n\n\/\/warning: modifies the records\nfunc compareMoveStorageRecords(one, two *boardgame.MoveStorageRecord) error {\n\n\tif one == nil {\n\t\treturn errors.New(\"One was nil\")\n\t}\n\n\tif two == nil {\n\t\treturn errors.New(\"Two was nil\")\n\t}\n\n\toneBlob := one.Blob\n\ttwoBlob := two.Blob\n\n\t\/\/Set the fields we know might differ to known values\n\tone.Blob = nil\n\ttwo.Blob = nil\n\n\ttwo.Timestamp = one.Timestamp\n\n\tif !reflect.DeepEqual(one, two) {\n\t\treturn errors.New(\"Move storage records differed in base fields\")\n\t}\n\n\treturn compareJsonBlobs(oneBlob, twoBlob)\n\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\tcmds \"github.com\/jbenet\/go-ipfs\/commands\"\n\tcore \"github.com\/jbenet\/go-ipfs\/core\"\n\tpeer \"github.com\/jbenet\/go-ipfs\/p2p\/peer\"\n\tu \"github.com\/jbenet\/go-ipfs\/util\"\n\n\tcontext \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/code.google.com\/p\/go.net\/context\"\n)\n\nconst kPingTimeout = 10 * time.Second\n\ntype PingResult struct {\n\tSuccess bool\n\tTime time.Duration\n\tText string\n}\n\nvar PingCmd = &cmds.Command{\n\tHelptext: cmds.HelpText{\n\t\tTagline: \"send echo request packets to IPFS hosts\",\n\t\tSynopsis: `\nSend pings to a peer using the routing system to discover its address\n\t\t`,\n\t\tShortDescription: `\n\t\tipfs ping is a tool to find a node (in the routing system),\n\t\tsend pings, wait for pongs, and print out round-trip latency information.\n\t\t`,\n\t},\n\tArguments: []cmds.Argument{\n\t\tcmds.StringArg(\"peer ID\", true, true, \"ID of peer to be pinged\"),\n\t},\n\tOptions: []cmds.Option{\n\t\tcmds.IntOption(\"count\", \"n\", \"number of ping messages to send\"),\n\t},\n\tMarshalers: cmds.MarshalerMap{\n\t\tcmds.Text: func(res cmds.Response) (io.Reader, error) {\n\t\t\toutChan, ok := res.Output().(chan interface{})\n\t\t\tif !ok {\n\t\t\t\treturn nil, u.ErrCast()\n\t\t\t}\n\n\t\t\tmarshal := func(v interface{}) (io.Reader, error) {\n\t\t\t\tobj, ok := v.(*PingResult)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, u.ErrCast()\n\t\t\t\t}\n\n\t\t\t\tbuf := new(bytes.Buffer)\n\t\t\t\tif len(obj.Text) > 0 {\n\t\t\t\t\tbuf = bytes.NewBufferString(obj.Text + \"\\n\")\n\t\t\t\t} else if obj.Success {\n\t\t\t\t\tfmt.Fprintf(buf, \"Pong took %.2fms\\n\", obj.Time.Seconds()*1000)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Fprintf(buf, \"Pong failed\\n\")\n\t\t\t\t}\n\t\t\t\treturn buf, nil\n\t\t\t}\n\n\t\t\treturn &cmds.ChannelMarshaler{\n\t\t\t\tChannel: outChan,\n\t\t\t\tMarshaler: marshal,\n\t\t\t}, nil\n\t\t},\n\t},\n\tRun: func(req cmds.Request) (interface{}, error) {\n\t\tn, err := req.Context().GetNode()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Must be online!\n\t\tif !n.OnlineMode() {\n\t\t\treturn nil, errNotOnline\n\t\t}\n\n\t\tpeerID, err := peer.IDB58Decode(req.Arguments()[0])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Set up number of pings\n\t\tnumPings := 10\n\t\tval, found, err := req.Option(\"count\").Int()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif found {\n\t\t\tnumPings = val\n\t\t}\n\n\t\toutChan := make(chan interface{})\n\n\t\tgo pingPeer(n, peerID, numPings, outChan)\n\n\t\treturn outChan, nil\n\t},\n\tType: PingResult{},\n}\n\nfunc pingPeer(n *core.IpfsNode, pid peer.ID, numPings int, outChan chan interface{}) {\n\tdefer close(outChan)\n\n\t\/\/ Make sure we can find the node in question\n\toutChan <- &PingResult{\n\t\tText: fmt.Sprintf(\"Looking up peer %s\", pid.Pretty()),\n\t}\n\n\t\/\/ TODO: get master context passed in\n\tctx, _ := context.WithTimeout(context.TODO(), kPingTimeout)\n\tp, err := n.Routing.FindPeer(ctx, pid)\n\tif err != nil {\n\t\toutChan <- &PingResult{Text: fmt.Sprintf(\"Peer lookup error: %s\", err)}\n\t\treturn\n\t}\n\tn.Peerstore.AddPeerInfo(p)\n\n\toutChan <- &PingResult{Text: fmt.Sprintf(\"Peer found, starting pings.\")}\n\n\tvar total time.Duration\n\tfor i := 0; i < numPings; i++ {\n\t\tctx, _ = context.WithTimeout(context.TODO(), kPingTimeout)\n\t\ttook, err := n.Routing.Ping(ctx, p.ID)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Ping error: %s\", err)\n\t\t\toutChan <- &PingResult{Text: fmt.Sprintf(\"Ping error: %s\", err)}\n\t\t\tbreak\n\t\t}\n\t\toutChan <- &PingResult{\n\t\t\tSuccess: true,\n\t\t\tTime: took,\n\t\t}\n\t\ttotal += took\n\t\ttime.Sleep(time.Second)\n\t}\n\taveragems := total.Seconds() * 1000 \/ float64(numPings)\n\toutChan <- &PingResult{\n\t\tText: fmt.Sprintf(\"Average latency: %.2fms\", averagems),\n\t}\n}\n<commit_msg>improve UI of ping<commit_after>package commands\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\tcmds \"github.com\/jbenet\/go-ipfs\/commands\"\n\tconfig \"github.com\/jbenet\/go-ipfs\/config\"\n\tcore \"github.com\/jbenet\/go-ipfs\/core\"\n\tpeer \"github.com\/jbenet\/go-ipfs\/p2p\/peer\"\n\tu \"github.com\/jbenet\/go-ipfs\/util\"\n\n\tcontext \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/code.google.com\/p\/go.net\/context\"\n\tma \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-multiaddr\"\n)\n\nconst kPingTimeout = 10 * time.Second\n\ntype PingResult struct {\n\tSuccess bool\n\tTime time.Duration\n\tText string\n}\n\nvar PingCmd = &cmds.Command{\n\tHelptext: cmds.HelpText{\n\t\tTagline: \"send echo request packets to IPFS hosts\",\n\t\tSynopsis: `\nSend pings to a peer using the routing system to discover its address\n\t\t`,\n\t\tShortDescription: `\n\t\tipfs ping is a tool to find a node (in the routing system),\n\t\tsend pings, wait for pongs, and print out round-trip latency information.\n\t\t`,\n\t},\n\tArguments: []cmds.Argument{\n\t\tcmds.StringArg(\"peer ID\", true, true, \"ID of peer to be pinged\"),\n\t},\n\tOptions: []cmds.Option{\n\t\tcmds.IntOption(\"count\", \"n\", \"number of ping messages to send\"),\n\t},\n\tMarshalers: cmds.MarshalerMap{\n\t\tcmds.Text: func(res cmds.Response) (io.Reader, error) {\n\t\t\toutChan, ok := res.Output().(chan interface{})\n\t\t\tif !ok {\n\t\t\t\treturn nil, u.ErrCast()\n\t\t\t}\n\n\t\t\tmarshal := func(v interface{}) (io.Reader, error) {\n\t\t\t\tobj, ok := v.(*PingResult)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, u.ErrCast()\n\t\t\t\t}\n\n\t\t\t\tbuf := new(bytes.Buffer)\n\t\t\t\tif len(obj.Text) > 0 {\n\t\t\t\t\tbuf = bytes.NewBufferString(obj.Text + \"\\n\")\n\t\t\t\t} else if obj.Success {\n\t\t\t\t\tfmt.Fprintf(buf, \"Pong received: time=%.2f ms\\n\", obj.Time.Seconds()*1000)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Fprintf(buf, \"Pong failed\\n\")\n\t\t\t\t}\n\t\t\t\treturn buf, nil\n\t\t\t}\n\n\t\t\treturn &cmds.ChannelMarshaler{\n\t\t\t\tChannel: outChan,\n\t\t\t\tMarshaler: marshal,\n\t\t\t}, nil\n\t\t},\n\t},\n\tRun: func(req cmds.Request) (interface{}, error) {\n\t\tn, err := req.Context().GetNode()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Must be online!\n\t\tif !n.OnlineMode() {\n\t\t\treturn nil, errNotOnline\n\t\t}\n\n\t\tbsp, err := config.ParseBootstrapPeer(req.Arguments()[0])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpeerID, err := peer.IDB58Decode(bsp.PeerID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif len(bsp.Address) > 0 {\n\t\t\taddr, err := ma.NewMultiaddr(bsp.Address)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tn.Peerstore.AddAddress(peerID, addr)\n\t\t}\n\n\t\t\/\/ Set up number of pings\n\t\tnumPings := 10\n\t\tval, found, err := req.Option(\"count\").Int()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif found {\n\t\t\tnumPings = val\n\t\t}\n\n\t\toutChan := make(chan interface{})\n\n\t\tgo pingPeer(n, peerID, numPings, outChan)\n\n\t\treturn outChan, nil\n\t},\n\tType: PingResult{},\n}\n\nfunc pingPeer(n *core.IpfsNode, pid peer.ID, numPings int, outChan chan interface{}) {\n\tdefer close(outChan)\n\n\tif len(n.Peerstore.Addresses(pid)) == 0 {\n\t\t\/\/ Make sure we can find the node in question\n\t\toutChan <- &PingResult{\n\t\t\tText: fmt.Sprintf(\"Looking up peer %s\", pid.Pretty()),\n\t\t}\n\n\t\t\/\/ TODO: get master context passed in\n\t\tctx, _ := context.WithTimeout(context.TODO(), kPingTimeout)\n\t\tp, err := n.Routing.FindPeer(ctx, pid)\n\t\tif err != nil {\n\t\t\toutChan <- &PingResult{Text: fmt.Sprintf(\"Peer lookup error: %s\", err)}\n\t\t\treturn\n\t\t}\n\t\tn.Peerstore.AddPeerInfo(p)\n\t}\n\n\toutChan <- &PingResult{Text: fmt.Sprintf(\"PING %s.\", pid.Pretty())}\n\n\tvar total time.Duration\n\tfor i := 0; i < numPings; i++ {\n\t\tctx, _ := context.WithTimeout(context.TODO(), kPingTimeout)\n\t\ttook, err := n.Routing.Ping(ctx, pid)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Ping error: %s\", err)\n\t\t\toutChan <- &PingResult{Text: fmt.Sprintf(\"Ping error: %s\", err)}\n\t\t\tbreak\n\t\t}\n\t\toutChan <- &PingResult{\n\t\t\tSuccess: true,\n\t\t\tTime: took,\n\t\t}\n\t\ttotal += took\n\t\ttime.Sleep(time.Second)\n\t}\n\taveragems := total.Seconds() * 1000 \/ float64(numPings)\n\toutChan <- &PingResult{\n\t\tText: fmt.Sprintf(\"Average latency: %.2fms\", averagems),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage discovery\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n\n\t\"v.io\/v23\/discovery\"\n)\n\nconst (\n\tmaxAdvertisementSize = 512\n\tmaxAttachmentSize = 4096\n\tmaxNumAttributes = 32\n\tmaxNumAttachments = 32\n)\n\n\/\/ validateKey returns an error if the key is not suitable for advertising.\nfunc validateKey(key string) error {\n\tif len(key) == 0 {\n\t\treturn errors.New(\"empty key\")\n\t}\n\tif strings.HasPrefix(key, \"_\") {\n\t\treturn errors.New(\"key starts with '_'\")\n\t}\n\tfor _, c := range key {\n\t\tif c < 0x20 || c > 0x7e {\n\t\t\treturn errors.New(\"key is not printable US-ASCII\")\n\t\t}\n\t\tif c == '=' {\n\t\t\treturn errors.New(\"key includes '='\")\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ validateAttributes returns an error if the attributes are not suitable for advertising.\nfunc validateAttributes(attributes discovery.Attributes) error {\n\tif len(attributes) > maxNumAttributes {\n\t\treturn errors.New(\"too many\")\n\t}\n\tfor k, v := range attributes {\n\t\tif err := validateKey(k); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !utf8.ValidString(v) {\n\t\t\treturn errors.New(\"value is not valid UTF-8 string\")\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ validateAttachments returns an error if the attachments are not suitable for advertising.\nfunc validateAttachments(attachments discovery.Attachments) error {\n\tif len(attachments) > maxNumAttachments {\n\t\treturn errors.New(\"too many\")\n\t}\n\tfor k, v := range attachments {\n\t\tif err := validateKey(k); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(v) > maxAttachmentSize {\n\t\t\treturn errors.New(\"too large\")\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ sizeOfAd returns the size of ad excluding id and attachments.\nfunc sizeOfAd(ad *discovery.Advertisement) int {\n\tsize := len(ad.InterfaceName)\n\tfor _, a := range ad.Addresses {\n\t\tsize += len(a)\n\t}\n\tfor k, v := range ad.Attributes {\n\t\tsize += len(k) + len(v)\n\t}\n\treturn size\n}\n\n\/\/ validateAd returns an error if ad is not suitable for advertising.\nfunc validateAd(ad *discovery.Advertisement) error {\n\tif !ad.Id.IsValid() {\n\t\treturn errors.New(\"id not valid\")\n\t}\n\tif len(ad.InterfaceName) == 0 {\n\t\treturn errors.New(\"interface name not provided\")\n\t}\n\tif len(ad.Addresses) == 0 {\n\t\treturn errors.New(\"address not provided\")\n\t}\n\tif err := validateAttributes(ad.Attributes); err != nil {\n\t\treturn fmt.Errorf(\"attributes not valid: %v\", err)\n\t}\n\tif err := validateAttachments(ad.Attachments); err != nil {\n\t\treturn fmt.Errorf(\"attachments not valid: %v\", err)\n\t}\n\tif sizeOfAd(ad) > maxAdvertisementSize {\n\t\treturn errors.New(\"advertisement too large\")\n\t}\n\treturn nil\n}\n<commit_msg>discovery: make validation error messages more descriptive<commit_after>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage discovery\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n\n\t\"v.io\/v23\/discovery\"\n)\n\nconst (\n\tmaxAdvertisementSize = 512\n\tmaxAttachmentSize = 4096\n\tmaxNumAttributes = 32\n\tmaxNumAttachments = 32\n)\n\n\/\/ validateKey returns an error if the key is not suitable for advertising.\nfunc validateKey(key string) error {\n\tif len(key) == 0 {\n\t\treturn errors.New(\"empty key\")\n\t}\n\tif strings.HasPrefix(key, \"_\") {\n\t\treturn fmt.Errorf(\"key starts with '_': %q\", key)\n\t}\n\tfor _, c := range key {\n\t\tif c < 0x20 || c > 0x7e {\n\t\t\treturn fmt.Errorf(\"key is not printable US-ASCII: %q\", key)\n\t\t}\n\t\tif c == '=' {\n\t\t\treturn fmt.Errorf(\"key includes '=': %q\", key)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ validateAttributes returns an error if the attributes are not suitable for advertising.\nfunc validateAttributes(attributes discovery.Attributes) error {\n\tif len(attributes) > maxNumAttributes {\n\t\treturn fmt.Errorf(\"too many: %d > %d\", len(attributes), maxNumAttributes)\n\t}\n\tfor k, v := range attributes {\n\t\tif err := validateKey(k); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !utf8.ValidString(v) {\n\t\t\treturn fmt.Errorf(\"value is not valid UTF-8 string: %q\", v)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ validateAttachments returns an error if the attachments are not suitable for advertising.\nfunc validateAttachments(attachments discovery.Attachments) error {\n\tif len(attachments) > maxNumAttachments {\n\t\treturn fmt.Errorf(\"too many: %d > %d\", len(attachments), maxNumAttachments)\n\t}\n\tfor k, v := range attachments {\n\t\tif err := validateKey(k); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(v) > maxAttachmentSize {\n\t\t\treturn fmt.Errorf(\"too large value for %q: %d > %d\", k, len(v), maxAttachmentSize)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ sizeOfAd returns the size of ad excluding id and attachments.\nfunc sizeOfAd(ad *discovery.Advertisement) int {\n\tsize := len(ad.InterfaceName)\n\tfor _, a := range ad.Addresses {\n\t\tsize += len(a)\n\t}\n\tfor k, v := range ad.Attributes {\n\t\tsize += len(k) + len(v)\n\t}\n\treturn size\n}\n\n\/\/ validateAd returns an error if ad is not suitable for advertising.\nfunc validateAd(ad *discovery.Advertisement) error {\n\tif !ad.Id.IsValid() {\n\t\treturn errors.New(\"id not valid\")\n\t}\n\tif len(ad.InterfaceName) == 0 {\n\t\treturn errors.New(\"interface name not provided\")\n\t}\n\tif len(ad.Addresses) == 0 {\n\t\treturn errors.New(\"address not provided\")\n\t}\n\tif err := validateAttributes(ad.Attributes); err != nil {\n\t\treturn fmt.Errorf(\"attributes not valid: %v\", err)\n\t}\n\tif err := validateAttachments(ad.Attachments); err != nil {\n\t\treturn fmt.Errorf(\"attachments not valid: %v\", err)\n\t}\n\tif sizeOfAd(ad) > maxAdvertisementSize {\n\t\treturn fmt.Errorf(\"advertisement too large: %d > %d\", sizeOfAd(ad), maxAdvertisementSize)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\/\/ zmq \"github.com\/alecthomas\/gozmq\"\n\ttrisclient \"github.com\/fvbock\/tris\/client\"\n\ttrisserver \"github.com\/fvbock\/tris\/server\"\n\t\"github.com\/sbinet\/liner\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ TODO: detect connection loss and handle reconnect + currentDB\n\nvar (\n\tterm *liner.State = nil\n\tdsnString = flag.String(\"d\", \"tcp:localhost:6000\", \"dsn to connect to\")\n)\n\nfunc init() {\n\tfmt.Printf(\"TriS cli %s\\n\", trisclient.VERSION)\n\tterm = liner.NewLiner()\n\n\tfname := path.Join(os.Getenv(\"HOME\"), \".tris.history\")\n\tf, err := os.Open(fname)\n\tif err != nil {\n\t\tfmt.Printf(\"**warning: could not access history file [%s]\\n\", fname)\n\t\treturn\n\t}\n\tdefer f.Close()\n\t_, err = term.ReadHistory(f)\n\tif err != nil {\n\t\tfmt.Printf(\"**warning: could not read history file [%s]\\n\", fname)\n\t\treturn\n\t}\n}\n\nfunc atexit() {\n\tfmt.Println(\"CLI atexit\")\n\tfname := path.Join(os.Getenv(\"HOME\"), \".tris.history\")\n\tf, err := os.OpenFile(fname, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0666)\n\tif err != nil {\n\t\tfmt.Printf(\"**warning: could not access history file [%s]\\n\", fname)\n\t\treturn\n\t}\n\tdefer f.Close()\n\t_, err = term.WriteHistory(f)\n\tif err != nil {\n\t\tfmt.Printf(\"**warning: could not write history file [%s]\\n\", fname)\n\t\treturn\n\t}\n\n\terr = term.Close()\n\tif err != nil {\n\t\tfmt.Printf(\"**warning: problem closing term: %v\\n\", err)\n\t\treturn\n\t}\n\tfmt.Println(\"\\n\")\n}\n\nfunc main() {\n\tdefer atexit()\n\tvar ierr error = nil \/\/ previous interpreter error\n\tps1 := \"[not connected]> \"\n\t\/\/ ps2 := \"... \"\n\tprompt := &ps1\n\tcommand := \"\"\n\n\t\/\/ TRIS conn\n\tflag.Parse()\n\tdsnParts := strings.Split(*dsnString, \":\")\n\tport, _ := strconv.ParseInt(dsnParts[2], 10, 32)\n\tdsn := &trisclient.DSN{\n\t\tProtocol: dsnParts[0],\n\t\tHost: dsnParts[1],\n\t\tPort: int(port),\n\t}\n\tfmt.Printf(\"Connecting to %s:\/\/%s:%v\\n\", dsn.Protocol, dsn.Host, dsn.Port)\n\n\tclient, err := trisclient.NewClient(dsn)\n\terr = client.Dial()\n\tif err != nil {\n\t\tfmt.Println(command, ierr)\n\t} else {\n\t\tps1 = fmt.Sprintf(\"%s:%v\/[%s]> \", dsnParts[1], dsnParts[2], client.ActiveDb)\n\t}\n\tdefer client.Close()\n\n\t\/\/ check conn\n\t\/\/ r, err := client.Send(\"PING\")\n\t\/\/ if err != nil {\n\t\/\/ \tfmt.Println(\"Error:\", err)\n\t\/\/ }\n\tresponse, err := client.Ping()\n\t\/\/ response := trisserver.Unserialize(r)\n\tif response.ReturnCode != trisserver.COMMAND_OK {\n\t\tfmt.Printf(\"Initial PING failed:\\n%v\\n\", response)\n\t} else {\n\t\tfor {\n\t\t\tps1 = fmt.Sprintf(\"%s:%v\/[%s]> \", dsnParts[1], dsnParts[2], client.ActiveDb)\n\t\t\t\/\/ fmt.Println(\">>>\", client.ActiveDb)\n\t\t\tline, err := term.Prompt(*prompt)\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tierr = err\n\t\t\t\t} else {\n\t\t\t\t\tierr = nil\n\t\t\t\t}\n\t\t\t\tbreak \/\/os.Exit(0)\n\t\t\t}\n\t\t\tif line == \"\" || line == \";\" {\n\t\t\t\t\/\/ no more input\n\t\t\t\tprompt = &ps1\n\t\t\t}\n\n\t\t\tcommand += line\n\t\t\tif command != \"\" {\n\t\t\t\tfor _, ll := range strings.Split(command, \"\\n\") {\n\t\t\t\t\tterm.AppendHistory(ll)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ \/\/ nama\n\t\t\t\/\/ r, err := client.Send(command)\n\t\t\t\/\/ if err != nil {\n\t\t\t\/\/ \tfmt.Println(\"Error:\", err)\n\t\t\t\/\/ }\n\n\t\t\t\/\/ response := trisserver.Unserialize(r)\n\t\t\t\/\/ \/\/ fmt.Println(response)\n\t\t\t\/\/ response.Print()\n\n\t\t\t\/\/ use lib\n\t\t\tvar cmds []string\n\t\t\tvar args [][]string\n\t\t\tmsgs := strings.Split(strings.Trim(command, \" \"), \"\\n\")\n\t\t\tfmt.Println(\"msgs\", msgs)\n\t\t\tfor n, msg := range msgs {\n\t\t\t\tparts := strings.Split(strings.Trim(msg, \" \"), \" \")\n\t\t\t\tfor i, p := range parts {\n\t\t\t\t\tif len(p) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif i == 0 {\n\t\t\t\t\t\tcmds = append(cmds, strings.ToUpper(p))\n\t\t\t\t\t\targs = append(args, make([]string, 0))\n\t\t\t\t\t} else {\n\t\t\t\t\t\targs[n] = append(args[n], p)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Println(\"cmds, args\", cmds, args)\n\n\t\tcmdexec:\n\t\t\tfor i, cmdname := range cmds {\n\t\t\t\tvar response *trisserver.Reply\n\t\t\t\tvar err error\n\t\t\t\tswitch cmdname {\n\t\t\t\tcase \"PING\":\n\t\t\t\t\tresponse, err = client.Ping()\n\t\t\t\tcase \"SELECT\":\n\t\t\t\t\tresponse, err = client.Select(args[i][0])\n\t\t\t\tcase \"DBINFO\":\n\t\t\t\t\tresponse, err = client.DbInfo()\n\t\t\t\tcase \"INFO\":\n\t\t\t\t\tresponse, err = client.Info()\n\t\t\t\tcase \"SAVE\":\n\t\t\t\t\tresponse, err = client.Save()\n\t\t\t\tcase \"IMPORT\":\n\t\t\t\t\tresponse, err = client.ImportDb(args[i][0], args[i][1])\n\t\t\t\tcase \"MERGE\":\n\t\t\t\t\tresponse, err = client.MergeDb(args[i][0])\n\t\t\t\tcase \"CREATE\":\n\t\t\t\t\tresponse, err = client.Create(args[i][0])\n\t\t\t\tcase \"ADD\":\n\t\t\t\t\tresponse, err = client.Add(args[i][0])\n\t\t\t\tcase \"DEL\":\n\t\t\t\t\tresponse, err = client.Del(args[i][0])\n\t\t\t\tcase \"HAS\":\n\t\t\t\t\tresponse, err = client.Has(args[i][0])\n\t\t\t\tcase \"HASCOUNT\":\n\t\t\t\t\tresponse, err = client.HasCount(args[i][0])\n\t\t\t\tcase \"HASPREFIX\":\n\t\t\t\t\tresponse, err = client.HasPrefix(args[i][0])\n\t\t\t\tcase \"MEMBERS\":\n\t\t\t\t\tresponse, err = client.Members()\n\t\t\t\tcase \"PREFIXMEMBERS\":\n\t\t\t\t\tresponse, err = client.PrefixMembers(args[i][0])\n\t\t\t\tdefault:\n\t\t\t\t\tfmt.Println(\"Unknown command.\")\n\t\t\t\t\tbreak cmdexec\n\t\t\t\t}\n\t\t\t\tif response.ReturnCode != trisserver.COMMAND_OK || err != nil {\n\t\t\t\t\t\/\/ TODO\n\t\t\t\t}\n\t\t\t\tresponse.Print()\n\t\t\t}\n\n\t\t\t\/\/ reset state\n\t\t\tcommand = \"\"\n\t\t\tierr = nil\n\t\t}\n\t}\n}\n<commit_msg>add basic script file support<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\/\/ zmq \"github.com\/alecthomas\/gozmq\"\n\ttrisclient \"github.com\/fvbock\/tris\/client\"\n\ttrisserver \"github.com\/fvbock\/tris\/server\"\n\t\"github.com\/sbinet\/liner\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ TODO: detect connection loss and handle reconnect + currentDB\n\nvar (\n\tterm *liner.State = nil\n\tdsnString = flag.String(\"d\", \"tcp:localhost:6000\", \"dsn to connect to\")\n)\n\nfunc init() {\n\tfmt.Printf(\"TriS cli %s\\n\", trisclient.VERSION)\n\tterm = liner.NewLiner()\n\n\tfname := path.Join(os.Getenv(\"HOME\"), \".tris.history\")\n\tf, err := os.Open(fname)\n\tif err != nil {\n\t\tfmt.Printf(\"**warning: could not access history file [%s]\\n\", fname)\n\t\treturn\n\t}\n\tdefer f.Close()\n\t_, err = term.ReadHistory(f)\n\tif err != nil {\n\t\tfmt.Printf(\"**warning: could not read history file [%s]\\n\", fname)\n\t\treturn\n\t}\n}\n\nfunc atexit() {\n\tfmt.Println(\"CLI atexit\")\n\tfname := path.Join(os.Getenv(\"HOME\"), \".tris.history\")\n\tf, err := os.OpenFile(fname, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0666)\n\tif err != nil {\n\t\tfmt.Printf(\"**warning: could not access history file [%s]\\n\", fname)\n\t\treturn\n\t}\n\tdefer f.Close()\n\t_, err = term.WriteHistory(f)\n\tif err != nil {\n\t\tfmt.Printf(\"**warning: could not write history file [%s]\\n\", fname)\n\t\treturn\n\t}\n\n\terr = term.Close()\n\tif err != nil {\n\t\tfmt.Printf(\"**warning: problem closing term: %v\\n\", err)\n\t\treturn\n\t}\n\tfmt.Println(\"\\n\")\n}\n\nfunc main() {\n\tdefer atexit()\n\tvar ierr error = nil \/\/ previous interpreter error\n\tps1 := \"[not connected]> \"\n\t\/\/ ps2 := \"... \"\n\tprompt := &ps1\n\tcommand := \"\"\n\n\t\/\/ TRIS conn\n\tflag.Parse()\n\tdsnParts := strings.Split(*dsnString, \":\")\n\tport, _ := strconv.ParseInt(dsnParts[2], 10, 32)\n\tdsn := &trisclient.DSN{\n\t\tProtocol: dsnParts[0],\n\t\tHost: dsnParts[1],\n\t\tPort: int(port),\n\t}\n\tfmt.Printf(\"Connecting to %s:\/\/%s:%v\\n\", dsn.Protocol, dsn.Host, dsn.Port)\n\n\tclient, err := trisclient.NewClient(dsn)\n\terr = client.Dial()\n\tif err != nil {\n\t\tfmt.Println(command, ierr)\n\t} else {\n\t\tps1 = fmt.Sprintf(\"%s:%v\/[%s]> \", dsnParts[1], dsnParts[2], client.ActiveDb)\n\t}\n\tdefer client.Close()\n\n\t\/\/ check conn\n\t\/\/ r, err := client.Send(\"PING\")\n\t\/\/ if err != nil {\n\t\/\/ \tfmt.Println(\"Error:\", err)\n\t\/\/ }\n\tresponse, err := client.Ping()\n\t\/\/ response := trisserver.Unserialize(r)\n\tif response.ReturnCode != trisserver.COMMAND_OK {\n\t\tfmt.Printf(\"Initial PING failed:\\n%v\\n\", response)\n\t} else {\n\t\tfor {\n\t\t\tps1 = fmt.Sprintf(\"%s:%v\/[%s]> \", dsnParts[1], dsnParts[2], client.ActiveDb)\n\t\t\t\/\/ fmt.Println(\">>>\", client.ActiveDb)\n\t\t\tline, err := term.Prompt(*prompt)\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tierr = err\n\t\t\t\t} else {\n\t\t\t\t\tierr = nil\n\t\t\t\t}\n\t\t\t\tbreak \/\/os.Exit(0)\n\t\t\t}\n\t\t\tif line == \"\" || line == \";\" {\n\t\t\t\t\/\/ no more input\n\t\t\t\tprompt = &ps1\n\t\t\t}\n\n\t\t\tcommand += line\n\t\t\tif command != \"\" {\n\t\t\t\tfor _, ll := range strings.Split(command, \"\\n\") {\n\t\t\t\t\tterm.AppendHistory(ll)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ SOURCE cmd. read script file\n\t\t\tif strings.ToUpper(command[:6]) == \"SOURCE\" {\n\t\t\t\tfname := strings.Trim(command[6:], \" \\n\")\n\t\t\t\tfmt.Println(\"OPEN source file:\", fname)\n\t\t\t\tsourcedFile, err := ioutil.ReadFile(fname)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"ERR opening source file:\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcommand = string(sourcedFile)\n\t\t\t}\n\t\t\t\/\/ \/\/ nama\n\t\t\t\/\/ r, err := client.Send(command)\n\t\t\t\/\/ if err != nil {\n\t\t\t\/\/ \tfmt.Println(\"Error:\", err)\n\t\t\t\/\/ }\n\n\t\t\t\/\/ response := trisserver.Unserialize(r)\n\t\t\t\/\/ \/\/ fmt.Println(response)\n\t\t\t\/\/ response.Print()\n\n\t\t\t\/\/ use lib\n\t\t\tvar cmds []string\n\t\t\tvar args [][]string\n\t\t\tmsgs := strings.Split(strings.Trim(command, \" \"), \"\\n\")\n\t\t\tfmt.Println(\"msgs\", msgs)\n\t\t\tfor n, msg := range msgs {\n\t\t\t\tparts := strings.Split(strings.Trim(msg, \" \"), \" \")\n\t\t\t\tfor i, p := range parts {\n\t\t\t\t\tif len(p) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif i == 0 {\n\t\t\t\t\t\tcmds = append(cmds, strings.ToUpper(p))\n\t\t\t\t\t\targs = append(args, make([]string, 0))\n\t\t\t\t\t} else {\n\t\t\t\t\t\targs[n] = append(args[n], p)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Println(\"cmds, args\", cmds, args)\n\n\t\tcmdexec:\n\t\t\tfor i, cmdname := range cmds {\n\t\t\t\tvar response *trisserver.Reply\n\t\t\t\tvar err error\n\t\t\t\tswitch cmdname {\n\t\t\t\tcase \"PING\":\n\t\t\t\t\tresponse, err = client.Ping()\n\t\t\t\tcase \"SELECT\":\n\t\t\t\t\tresponse, err = client.Select(args[i][0])\n\t\t\t\tcase \"DBINFO\":\n\t\t\t\t\tresponse, err = client.DbInfo()\n\t\t\t\tcase \"INFO\":\n\t\t\t\t\tresponse, err = client.Info()\n\t\t\t\tcase \"SAVE\":\n\t\t\t\t\tresponse, err = client.Save()\n\t\t\t\tcase \"IMPORT\":\n\t\t\t\t\tresponse, err = client.ImportDb(args[i][0], args[i][1])\n\t\t\t\tcase \"MERGE\":\n\t\t\t\t\tresponse, err = client.MergeDb(args[i][0])\n\t\t\t\tcase \"CREATE\":\n\t\t\t\t\tresponse, err = client.Create(args[i][0])\n\t\t\t\tcase \"ADD\":\n\t\t\t\t\tresponse, err = client.Add(args[i][0])\n\t\t\t\tcase \"DEL\":\n\t\t\t\t\tresponse, err = client.Del(args[i][0])\n\t\t\t\tcase \"HAS\":\n\t\t\t\t\tresponse, err = client.Has(args[i][0])\n\t\t\t\tcase \"HASCOUNT\":\n\t\t\t\t\tresponse, err = client.HasCount(args[i][0])\n\t\t\t\tcase \"HASPREFIX\":\n\t\t\t\t\tresponse, err = client.HasPrefix(args[i][0])\n\t\t\t\tcase \"MEMBERS\":\n\t\t\t\t\tresponse, err = client.Members()\n\t\t\t\tcase \"PREFIXMEMBERS\":\n\t\t\t\t\tresponse, err = client.PrefixMembers(args[i][0])\n\t\t\t\tdefault:\n\t\t\t\t\tfmt.Println(\"Unknown command.\")\n\t\t\t\t\tbreak cmdexec\n\t\t\t\t}\n\t\t\t\tif response.ReturnCode != trisserver.COMMAND_OK || err != nil {\n\t\t\t\t\t\/\/ TODO\n\t\t\t\t}\n\t\t\t\tresponse.Print()\n\t\t\t}\n\n\t\t\t\/\/ reset state\n\t\t\tcommand = \"\"\n\t\t\tierr = nil\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/* This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/. *\/\n\npackage main\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/smartystreets\/goconvey\/convey\"\n)\n\nfunc TestListUsers(t *testing.T) {\n\tconvey.Convey(\"Given I get all users\", t, func() {\n\t\tserver := mockRequest(\"\/api\/users\/\", \"GET\", 200, `[]`)\n\t\tm := Manager{URL: server.URL}\n\t\t_, err := m.ListUsers(\"token\")\n\t\tconvey.Convey(\"Then It does not fail\", func() {\n\t\t\tconvey.So(err, convey.ShouldBeNil)\n\t\t})\n\t})\n}\n\nfunc TestCreateUser(t *testing.T) {\n\tt.Skip()\n\tconvey.Convey(\"Given I create a client\", t, func() {\n\t\tserver := mockRequest(\"\/api\/groups\/\", \"POST\", 200, `{}`)\n\t\tm := Manager{URL: server.URL}\n\t\terr := m.CreateUser(\"name\", \"email\", \"user\", \"password\", \"adminuser\", \"adminpassword\")\n\t\tconvey.Convey(\"Then It does not fail\", func() {\n\t\t\tconvey.So(err, convey.ShouldBeNil)\n\t\t})\n\t})\n}\n<commit_msg>Fix CI<commit_after>\/* This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/. *\/\n\npackage main\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/smartystreets\/goconvey\/convey\"\n)\n\nfunc TestListUsers(t *testing.T) {\n\tconvey.Convey(\"Given I get all users\", t, func() {\n\t\tserver := mockRequest(\"\/api\/users\/\", \"GET\", 200, `[]`)\n\t\tm := Manager{URL: server.URL}\n\t\t_, err := m.ListUsers(\"token\")\n\t\tconvey.Convey(\"Then It does not fail\", func() {\n\t\t\tconvey.So(err, convey.ShouldBeNil)\n\t\t})\n\t})\n}\n\nfunc TestCreateUser(t *testing.T) {\n\tt.Skip()\n\tconvey.Convey(\"Given I create a client\", t, func() {\n\t\tserver := mockRequest(\"\/api\/groups\/\", \"POST\", 200, `{}`)\n\t\tm := Manager{URL: server.URL}\n\t\terr := m.CreateUser(\"token\", \"name\", \"email\", \"user\", \"password\")\n\t\tconvey.Convey(\"Then It does not fail\", func() {\n\t\t\tconvey.So(err, convey.ShouldBeNil)\n\t\t})\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n\n*\/\npackage mandrake\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"encoding\/json\"\n\n\t\"golang.org\/x\/exp\/inotify\"\n\t\"github.com\/hosom\/gomandrake\/config\"\n\t\"github.com\/hosom\/gomandrake\/filemeta\"\n\t\"github.com\/hosom\/gomandrake\/plugin\"\n)\n\n\/\/ Mandrake is a wrapper struct for the bulk of the application logic\ntype Mandrake struct {\n\tAnalysisPipeline\tchan string\n\tMonitoredDirectory\tstring\n\tAnalyzers\t\t\t[]plugin.AnalyzerCaller\n}\n\n\/\/ NewMandrake creates and returns a Mandrake struct utilizing a passed \n\/\/ parsed configuration file to create the correct fields.\nfunc NewMandrake(c config.Config) Mandrake {\n\tlog.Println(c.Analyzers)\n\tanalyzers := []plugin.AnalyzerCaller{}\n\tfor _, plug := range c.Analyzers {\n\t\tlog.Println(plug)\n\t\tanalyzer := plugin.NewAnalyzerCaller(plug)\n\t\tanalyzers = append(analyzers, analyzer)\n\t}\n\n\treturn Mandrake{make(chan string), c.MonitoredDirectory, analyzers}\n}\n\n\/\/ ListenAndServe starts the goroutines that perform all of the heavy lifting\n\/\/ including Monitor() and DispatchAnalysis(). \nfunc (m Mandrake) ListenAndServe() {\n\tlog.SetPrefix(\"[mandrake] \")\n\tfmt.Println(\"HELLO\")\n\tlog.Println(m.Analyzers[0])\n\tgo m.DispatchAnalysis()\n\tm.Monitor()\n}\n\n\/\/ DispatchAnalysis intelligently sends a new file to registered plugins so\n\/\/ that it can be analyzed.\nfunc (m Mandrake) DispatchAnalysis() {\t\n\tfor fpath := range m.AnalysisPipeline {\n\t\tfmeta, err := filemeta.NewFileMeta(fpath)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\tfs, err := json.Marshal(fmeta)\n\n\t\tlog.Println(string(fs))\n\t\tlog.Printf(\"%s\", fpath)\n\t}\n}\n\n\/\/ Monitor uses inotify to monitor the MonitoredDirectory for IN_CLOSE_WRITE\n\/\/ events. Files written to the MonitoredDirectory will be sent to the \n\/\/ analysis pipeline to be analyzed.\nfunc (m Mandrake) Monitor() {\n\tlog.Println(\"starting inotify watcher\")\n\twatcher, err := inotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Printf(\"adding watcher to %s directory\", m.MonitoredDirectory)\n\terr = watcher.AddWatch(m.MonitoredDirectory, inotify.IN_CLOSE_WRITE)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase ev := <- watcher.Event:\n\t\t\tm.AnalysisPipeline <- ev.Name\n\t\tcase err := <- watcher.Error:\n\t\t\tlog.Printf(\"inotify error: %s\", err)\n\t\t}\n\t}\n}<commit_msg>add Analyze call for plugins<commit_after>\/*\n\n*\/\npackage mandrake\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"encoding\/json\"\n\n\t\"golang.org\/x\/exp\/inotify\"\n\t\"github.com\/hosom\/gomandrake\/config\"\n\t\"github.com\/hosom\/gomandrake\/filemeta\"\n\t\"github.com\/hosom\/gomandrake\/plugin\"\n)\n\n\/\/ Mandrake is a wrapper struct for the bulk of the application logic\ntype Mandrake struct {\n\tAnalysisPipeline\tchan string\n\tMonitoredDirectory\tstring\n\tAnalyzers\t\t\t[]plugin.AnalyzerCaller\n}\n\n\/\/ NewMandrake creates and returns a Mandrake struct utilizing a passed \n\/\/ parsed configuration file to create the correct fields.\nfunc NewMandrake(c config.Config) Mandrake {\n\tanalyzers := []plugin.AnalyzerCaller{}\n\tfor _, plug := range c.Analyzers {\n\t\tanalyzer := plugin.NewAnalyzerCaller(plug)\n\t\tanalyzers = append(analyzers, analyzer)\n\t}\n\n\treturn Mandrake{make(chan string), c.MonitoredDirectory, analyzers}\n}\n\n\/\/ ListenAndServe starts the goroutines that perform all of the heavy lifting\n\/\/ including Monitor() and DispatchAnalysis(). \nfunc (m Mandrake) ListenAndServe() {\n\tlog.SetPrefix(\"[mandrake] \")\n\tlog.Println(m.Analyzers[0])\n\tgo m.DispatchAnalysis()\n\tm.Monitor()\n}\n\n\/\/ DispatchAnalysis intelligently sends a new file to registered plugins so\n\/\/ that it can be analyzed.\nfunc (m Mandrake) DispatchAnalysis() {\t\n\tfor fpath := range m.AnalysisPipeline {\n\t\tfmeta, err := filemeta.NewFileMeta(fpath)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\tfs, err := json.Marshal(fmeta)\n\n\t\tfor _, analyzer := range m.Analyzers {\n\t\t\tresult, err := analyzer.Analyze(fmeta)\n\t\t\tlog.Print(result)\n\t\t}\n\n\t\tlog.Println(string(fs))\n\t\tlog.Printf(\"%s\", fpath)\n\t}\n}\n\n\/\/ Monitor uses inotify to monitor the MonitoredDirectory for IN_CLOSE_WRITE\n\/\/ events. Files written to the MonitoredDirectory will be sent to the \n\/\/ analysis pipeline to be analyzed.\nfunc (m Mandrake) Monitor() {\n\tlog.Println(\"starting inotify watcher\")\n\twatcher, err := inotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Printf(\"adding watcher to %s directory\", m.MonitoredDirectory)\n\terr = watcher.AddWatch(m.MonitoredDirectory, inotify.IN_CLOSE_WRITE)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase ev := <- watcher.Event:\n\t\t\tm.AnalysisPipeline <- ev.Name\n\t\tcase err := <- watcher.Error:\n\t\t\tlog.Printf(\"inotify error: %s\", err)\n\t\t}\n\t}\n}<|endoftext|>"} {"text":"<commit_before>\/*\n\n*\/\npackage mandrake\n\nimport (\n\t\"log\"\n\t\"encoding\/json\"\n\n\t\"golang.org\/x\/exp\/inotify\"\n\t\"github.com\/hosom\/gomandrake\/config\"\n\t\"github.com\/hosom\/gomandrake\/filemeta\"\n\t\"github.com\/hosom\/gomandrake\/plugin\"\n)\n\n\/\/ Mandrake is a wrapper struct for the bulk of the application logic\ntype Mandrake struct {\n\tAnalysisPipeline\tchan string\n\tMonitoredDirectory\tstring\n\tAnalyzers\t\t\t[]plugin.AnalyzerCaller\n}\n\n\/\/ NewMandrake creates and returns a Mandrake struct utilizing a passed \n\/\/ parsed configuration file to create the correct fields.\nfunc NewMandrake(c config.Config) Mandrake {\n\tanalyzers := []plugin.AnalyzerCaller{}\n\tfor _, plug := range c.Analyzers {\n\t\tanalyzer := plugin.NewAnalyzerCaller(plug)\n\t\tanalyzers = append(analyzers, analyzer)\n\t}\n\n\treturn Mandrake{make(chan string), c.MonitoredDirectory, analyzers}\n}\n\n\/\/ ListenAndServe starts the goroutines that perform all of the heavy lifting\n\/\/ including Monitor() and DispatchAnalysis(). \nfunc (m Mandrake) ListenAndServe() {\n\tlog.SetPrefix(\"[mandrake] \")\n\tgo m.DispatchAnalysis()\n\tm.Monitor()\n}\n\n\/\/ DispatchAnalysis intelligently sends a new file to registered plugins so\n\/\/ that it can be analyzed.\nfunc (m Mandrake) DispatchAnalysis() {\t\n\tfor fpath := range m.AnalysisPipeline {\n\t\tfmeta, err := filemeta.NewFileMeta(fpath)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\tfs, err := json.Marshal(fmeta)\n\n\t\tlog.Println(string(fs))\n\t\tlog.Printf(\"%s\", fpath)\n\t}\n}\n\n\/\/ Monitor uses inotify to monitor the MonitoredDirectory for IN_CLOSE_WRITE\n\/\/ events. Files written to the MonitoredDirectory will be sent to the \n\/\/ analysis pipeline to be analyzed.\nfunc (m Mandrake) Monitor() {\n\tlog.Println(\"starting inotify watcher\")\n\twatcher, err := inotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Printf(\"adding watcher to %s directory\", m.MonitoredDirectory)\n\terr = watcher.AddWatch(m.MonitoredDirectory, inotify.IN_CLOSE_WRITE)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase ev := <- watcher.Event:\n\t\t\tm.AnalysisPipeline <- ev.Name\n\t\tcase err := <- watcher.Error:\n\t\t\tlog.Printf(\"inotify error: %s\", err)\n\t\t}\n\t}\n}<commit_msg>updates<commit_after>\/*\n\n*\/\npackage mandrake\n\nimport (\n\t\"log\"\n\t\"encoding\/json\"\n\n\t\"golang.org\/x\/exp\/inotify\"\n\t\"github.com\/hosom\/gomandrake\/config\"\n\t\"github.com\/hosom\/gomandrake\/filemeta\"\n\t\"github.com\/hosom\/gomandrake\/plugin\"\n)\n\n\/\/ Mandrake is a wrapper struct for the bulk of the application logic\ntype Mandrake struct {\n\tAnalysisPipeline\tchan string\n\tMonitoredDirectory\tstring\n\tAnalyzers\t\t\t[]plugin.AnalyzerCaller\n}\n\n\/\/ NewMandrake creates and returns a Mandrake struct utilizing a passed \n\/\/ parsed configuration file to create the correct fields.\nfunc NewMandrake(c config.Config) Mandrake {\n\tlog.Println(c.Analyzers)\n\tanalyzers := []plugin.AnalyzerCaller{}\n\tfor _, plug := range c.Analyzers {\n\t\tlog.Println(plug)\n\t\tanalyzer := plugin.NewAnalyzerCaller(plug)\n\t\tanalyzers = append(analyzers, analyzer)\n\t}\n\n\treturn Mandrake{make(chan string), c.MonitoredDirectory, analyzers}\n}\n\n\/\/ ListenAndServe starts the goroutines that perform all of the heavy lifting\n\/\/ including Monitor() and DispatchAnalysis(). \nfunc (m Mandrake) ListenAndServe() {\n\tlog.SetPrefix(\"[mandrake] \")\n\tlog.Println(m.Analyzers)\n\tgo m.DispatchAnalysis()\n\tm.Monitor()\n}\n\n\/\/ DispatchAnalysis intelligently sends a new file to registered plugins so\n\/\/ that it can be analyzed.\nfunc (m Mandrake) DispatchAnalysis() {\t\n\tfor fpath := range m.AnalysisPipeline {\n\t\tfmeta, err := filemeta.NewFileMeta(fpath)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\tfs, err := json.Marshal(fmeta)\n\n\t\tlog.Println(string(fs))\n\t\tlog.Printf(\"%s\", fpath)\n\t}\n}\n\n\/\/ Monitor uses inotify to monitor the MonitoredDirectory for IN_CLOSE_WRITE\n\/\/ events. Files written to the MonitoredDirectory will be sent to the \n\/\/ analysis pipeline to be analyzed.\nfunc (m Mandrake) Monitor() {\n\tlog.Println(\"starting inotify watcher\")\n\twatcher, err := inotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Printf(\"adding watcher to %s directory\", m.MonitoredDirectory)\n\terr = watcher.AddWatch(m.MonitoredDirectory, inotify.IN_CLOSE_WRITE)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase ev := <- watcher.Event:\n\t\t\tm.AnalysisPipeline <- ev.Name\n\t\tcase err := <- watcher.Error:\n\t\t\tlog.Printf(\"inotify error: %s\", err)\n\t\t}\n\t}\n}<|endoftext|>"} {"text":"<commit_before>package manifest\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n)\n\ntype Manifest struct {\n\tName string `json:\"name\"`\n\tSize int64 `json:\"size\"`\n\tModTime time.Time `json:\"lastModified\"`\n\tMode os.FileMode `json:\"fileMode\"`\n\tIsDir bool `json:\"isDir\"`\n\tFiles map[string]*Manifest `json:\"files,omitempty\"`\n}\n\nfunc NewManifest(name string, size int64, modtime time.Time, mode os.FileMode, isDir bool) *Manifest {\n\treturn &Manifest{name, size, modtime, mode, isDir, nil}\n}\n\nfunc GetManifest(dirPath string) (map[string]*Manifest, error) {\n\tif dirPath == \"\/\" || dirPath == \"..\/\" || dirPath == \"..\" {\n\t\tdirPath = \".\"\n\t}\n\tlist, err := ioutil.ReadDir(dirPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmanifest := make(map[string]*Manifest)\n\tfor _, child := range list {\n\t\tchildPath := path.Join(dirPath, child.Name())\n\t\tmanifest[childPath] = NewManifest(childPath, child.Size(), child.ModTime(), child.Mode(), child.IsDir())\n\t\tif child.IsDir() == true {\n\t\t\tchildContent, err := GetManifest(childPath)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tmanifest[childPath].Files = childContent\n\t\t}\n\t}\n\treturn manifest, nil\n}\n<commit_msg>Updated manifest\/manifest.go to use .\/ instead of \".\" when trying to specify current working directory<commit_after>package manifest\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n)\n\ntype Manifest struct {\n\tName string `json:\"name\"`\n\tSize int64 `json:\"size\"`\n\tModTime time.Time `json:\"lastModified\"`\n\tMode os.FileMode `json:\"fileMode\"`\n\tIsDir bool `json:\"isDir\"`\n\tFiles map[string]*Manifest `json:\"files,omitempty\"`\n}\n\nfunc NewManifest(name string, size int64, modtime time.Time, mode os.FileMode, isDir bool) *Manifest {\n\treturn &Manifest{name, size, modtime, mode, isDir, nil}\n}\n\nfunc GetManifest(dirPath string) (map[string]*Manifest, error) {\n\tif dirPath == \"\/\" || dirPath == \"..\/\" || dirPath == \"..\" {\n\t\tdirPath = \".\/\"\n\t}\n\tlist, err := ioutil.ReadDir(dirPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmanifest := make(map[string]*Manifest)\n\tfor _, child := range list {\n\t\tchildPath := path.Join(dirPath, child.Name())\n\t\tmanifest[childPath] = NewManifest(childPath, child.Size(), child.ModTime(), child.Mode(), child.IsDir())\n\t\tif child.IsDir() == true {\n\t\t\tchildContent, err := GetManifest(childPath)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tmanifest[childPath].Files = childContent\n\t\t}\n\t}\n\treturn manifest, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) Alex Ellis 2017. All rights reserved.\n\/\/ Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com\/alexellis\/faas-cli\/builder\"\n\t\"github.com\/alexellis\/faas-cli\/stack\"\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ Flags that are to be added to commands.\nvar (\n\tnocache bool\n\tsquash bool\n\tparallel int\n)\n\nfunc init() {\n\t\/\/ Setup flags that are used by multiple commands (variables defined in faas.go)\n\tbuildCmd.Flags().StringVar(&image, \"image\", \"\", \"Docker image name to build\")\n\tbuildCmd.Flags().StringVar(&handler, \"handler\", \"\", \"Directory with handler for function, e.g. handler.js\")\n\tbuildCmd.Flags().StringVar(&functionName, \"name\", \"\", \"Name of the deployed function\")\n\tbuildCmd.Flags().StringVar(&language, \"lang\", \"node\", \"Programming language template\")\n\n\t\/\/ Setup flags that are used only by this command (variables defined above)\n\tbuildCmd.Flags().BoolVar(&nocache, \"no-cache\", false, \"Do not use Docker's build cache\")\n\tbuildCmd.Flags().BoolVar(&squash, \"squash\", false, `Use Docker's squash flag for smaller images\n\t\t\t\t\t\t [experimental] `)\n\tbuildCmd.Flags().IntVar(¶llel, \"parallel\", 1, \"Build in parallel to depth specified.\")\n\n\t\/\/ Set bash-completion.\n\t_ = buildCmd.Flags().SetAnnotation(\"handler\", cobra.BashCompSubdirsInDir, []string{})\n\n\tfaasCmd.AddCommand(buildCmd)\n}\n\n\/\/ buildCmd allows the user to build an OpenFaaS function container\nvar buildCmd = &cobra.Command{\n\tUse: `build -f YAML_FILE [--no-cache] [--squash]\n faas-cli build --image IMAGE_NAME\n --handler HANDLER_DIR\n --name FUNCTION_NAME\n [--lang <ruby|python|python3|node|csharp|Dockerfile>]\n [--no-cache] [--squash]\n [--regex \"REGEX\"]\n\t\t\t\t [--filter \"WILDCARD\"]\n\t\t\t\t [--parallel PARALLEL_DEPTH]`,\n\tShort: \"Builds OpenFaaS function containers\",\n\tLong: `Builds OpenFaaS function containers either via the supplied YAML config using\nthe \"--yaml\" flag (which may contain multiple function definitions), or directly\nvia flags.`,\n\tExample: ` faas-cli build -f https:\/\/domain\/path\/myfunctions.yml\n faas-cli build -f .\/samples.yml --no-cache\n faas-cli build -f .\/samples.yml --filter \"*gif*\"\n faas-cli build -f .\/samples.yml --regex \"fn[0-9]_.*\"\n faas-cli build --image=my_image --lang=python --handler=\/path\/to\/fn\/ \n --name=my_fn --squash`,\n\tRun: runBuild,\n}\n\nfunc runBuild(cmd *cobra.Command, args []string) {\n\n\tvar services stack.Services\n\tif len(yamlFile) > 0 {\n\t\tparsedServices, err := stack.ParseYAMLFile(yamlFile, regex, filter)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tif parsedServices != nil {\n\t\t\tservices = *parsedServices\n\t\t}\n\t}\n\n\tif pullErr := PullTemplates(\"\"); pullErr != nil {\n\t\tlog.Fatalln(\"Could not pull templates for OpenFaaS.\", pullErr)\n\t}\n\n\tif len(services.Functions) > 0 {\n\t\tbuild(&services, parallel)\n\t} else {\n\t\tif len(image) == 0 {\n\t\t\tfmt.Println(\"Please provide a valid -image name for your Docker image.\")\n\t\t\treturn\n\t\t}\n\t\tif len(handler) == 0 {\n\t\t\tfmt.Println(\"Please provide the full path to your function's handler.\")\n\t\t\treturn\n\t\t}\n\t\tif len(functionName) == 0 {\n\t\t\tfmt.Println(\"Please provide the deployed -name of your function.\")\n\t\t\treturn\n\t\t}\n\t\tbuilder.BuildImage(image, handler, functionName, language, nocache, squash)\n\t}\n\n}\n\n\/\/ PullTemplates pulls templates from Github from the master zip download file.\nfunc PullTemplates(templateUrl string) error {\n\tvar err error\n\texists, err := os.Stat(\".\/template\")\n\tif err != nil || exists == nil {\n\t\tlog.Println(\"No templates found in current directory.\")\n\n\t\terr = fetchTemplates(templateUrl)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Unable to download templates from Github.\")\n\t\t\treturn err\n\t\t}\n\t}\n\treturn err\n}\n\nfunc build(services *stack.Services, queueDepth int) {\n\twg := sync.WaitGroup{}\n\n\tworkChannel := make(chan stack.Function)\n\n\tfor i := 0; i < queueDepth; i++ {\n\n\t\tgo func(index int) {\n\t\t\twg.Add(1)\n\t\t\tfor function := range workChannel {\n\t\t\t\tfmt.Printf(\"Building: %s.\\n\", function.Name)\n\t\t\t\tif len(function.Language) == 0 {\n\t\t\t\t\tfmt.Println(\"Please provide a valid -lang or 'Dockerfile' for your function.\")\n\n\t\t\t\t} else {\n\t\t\t\t\tbuilder.BuildImage(function.Image, function.Handler, function.Name, function.Language, nocache, squash)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\"worker done.\\n\")\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\n\tfor k, function := range services.Functions {\n\t\tif function.SkipBuild {\n\t\t\tfmt.Printf(\"Skipping build of: %s.\\n\", function.Name)\n\t\t} else {\n\t\t\tfunction.Name = k\n\t\t\tworkChannel <- function\n\t\t}\n\t}\n\tclose(workChannel)\n\n\twg.Wait()\n\n}\n<commit_msg>Add builder number<commit_after>\/\/ Copyright (c) Alex Ellis 2017. All rights reserved.\n\/\/ Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com\/alexellis\/faas-cli\/builder\"\n\t\"github.com\/alexellis\/faas-cli\/stack\"\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ Flags that are to be added to commands.\nvar (\n\tnocache bool\n\tsquash bool\n\tparallel int\n)\n\nfunc init() {\n\t\/\/ Setup flags that are used by multiple commands (variables defined in faas.go)\n\tbuildCmd.Flags().StringVar(&image, \"image\", \"\", \"Docker image name to build\")\n\tbuildCmd.Flags().StringVar(&handler, \"handler\", \"\", \"Directory with handler for function, e.g. handler.js\")\n\tbuildCmd.Flags().StringVar(&functionName, \"name\", \"\", \"Name of the deployed function\")\n\tbuildCmd.Flags().StringVar(&language, \"lang\", \"node\", \"Programming language template\")\n\n\t\/\/ Setup flags that are used only by this command (variables defined above)\n\tbuildCmd.Flags().BoolVar(&nocache, \"no-cache\", false, \"Do not use Docker's build cache\")\n\tbuildCmd.Flags().BoolVar(&squash, \"squash\", false, `Use Docker's squash flag for smaller images\n\t\t\t\t\t\t [experimental] `)\n\tbuildCmd.Flags().IntVar(¶llel, \"parallel\", 1, \"Build in parallel to depth specified.\")\n\n\t\/\/ Set bash-completion.\n\t_ = buildCmd.Flags().SetAnnotation(\"handler\", cobra.BashCompSubdirsInDir, []string{})\n\n\tfaasCmd.AddCommand(buildCmd)\n}\n\n\/\/ buildCmd allows the user to build an OpenFaaS function container\nvar buildCmd = &cobra.Command{\n\tUse: `build -f YAML_FILE [--no-cache] [--squash]\n faas-cli build --image IMAGE_NAME\n --handler HANDLER_DIR\n --name FUNCTION_NAME\n [--lang <ruby|python|python3|node|csharp|Dockerfile>]\n [--no-cache] [--squash]\n [--regex \"REGEX\"]\n\t\t\t\t [--filter \"WILDCARD\"]\n\t\t\t\t [--parallel PARALLEL_DEPTH]`,\n\tShort: \"Builds OpenFaaS function containers\",\n\tLong: `Builds OpenFaaS function containers either via the supplied YAML config using\nthe \"--yaml\" flag (which may contain multiple function definitions), or directly\nvia flags.`,\n\tExample: ` faas-cli build -f https:\/\/domain\/path\/myfunctions.yml\n faas-cli build -f .\/samples.yml --no-cache\n faas-cli build -f .\/samples.yml --filter \"*gif*\"\n faas-cli build -f .\/samples.yml --regex \"fn[0-9]_.*\"\n faas-cli build --image=my_image --lang=python --handler=\/path\/to\/fn\/ \n --name=my_fn --squash`,\n\tRun: runBuild,\n}\n\nfunc runBuild(cmd *cobra.Command, args []string) {\n\n\tvar services stack.Services\n\tif len(yamlFile) > 0 {\n\t\tparsedServices, err := stack.ParseYAMLFile(yamlFile, regex, filter)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tif parsedServices != nil {\n\t\t\tservices = *parsedServices\n\t\t}\n\t}\n\n\tif pullErr := PullTemplates(\"\"); pullErr != nil {\n\t\tlog.Fatalln(\"Could not pull templates for OpenFaaS.\", pullErr)\n\t}\n\n\tif len(services.Functions) > 0 {\n\t\tbuild(&services, parallel)\n\t} else {\n\t\tif len(image) == 0 {\n\t\t\tfmt.Println(\"Please provide a valid -image name for your Docker image.\")\n\t\t\treturn\n\t\t}\n\t\tif len(handler) == 0 {\n\t\t\tfmt.Println(\"Please provide the full path to your function's handler.\")\n\t\t\treturn\n\t\t}\n\t\tif len(functionName) == 0 {\n\t\t\tfmt.Println(\"Please provide the deployed -name of your function.\")\n\t\t\treturn\n\t\t}\n\t\tbuilder.BuildImage(image, handler, functionName, language, nocache, squash)\n\t}\n\n}\n\n\/\/ PullTemplates pulls templates from Github from the master zip download file.\nfunc PullTemplates(templateUrl string) error {\n\tvar err error\n\texists, err := os.Stat(\".\/template\")\n\tif err != nil || exists == nil {\n\t\tlog.Println(\"No templates found in current directory.\")\n\n\t\terr = fetchTemplates(templateUrl)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Unable to download templates from Github.\")\n\t\t\treturn err\n\t\t}\n\t}\n\treturn err\n}\n\nfunc build(services *stack.Services, queueDepth int) {\n\twg := sync.WaitGroup{}\n\n\tworkChannel := make(chan stack.Function)\n\n\tfor i := 0; i < queueDepth; i++ {\n\n\t\tgo func(index int) {\n\t\t\twg.Add(1)\n\t\t\tfor function := range workChannel {\n\t\t\t\tfmt.Printf(\"[%d] > Building: %s.\\n\", index, function.Name)\n\t\t\t\tif len(function.Language) == 0 {\n\t\t\t\t\tfmt.Println(\"Please provide a valid -lang or 'Dockerfile' for your function.\")\n\n\t\t\t\t} else {\n\t\t\t\t\tbuilder.BuildImage(function.Image, function.Handler, function.Name, function.Language, nocache, squash)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfmt.Printf(\"[%d] < Builder done.\\n\", index)\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\n\tfor k, function := range services.Functions {\n\t\tif function.SkipBuild {\n\t\t\tfmt.Printf(\"Skipping build of: %s.\\n\", function.Name)\n\t\t} else {\n\t\t\tfunction.Name = k\n\t\t\tworkChannel <- function\n\t\t}\n\t}\n\tclose(workChannel)\n\n\twg.Wait()\n\n}\n<|endoftext|>"} {"text":"<commit_before>package log\n\nimport (\n\t\"DNA\/config\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tBlue = \"0;34\"\n\tRed = \"0;31\"\n\tGreen = \"0;32\"\n\tYellow = \"0;33\"\n\tCyan = \"0;36\"\n\tPink = \"1;35\"\n)\n\nfunc Color(code, msg string) string {\n\treturn fmt.Sprintf(\"\\033[%sm%s\\033[m\", code, msg)\n}\n\nconst (\n\ttraceLog = iota\n\tdebugLog\n\tinfoLog\n\twarnLog\n\terrorLog\n\tfatalLog\n\tprintLog\n\tmaxLevelLog\n)\n\nvar (\n\tlevels = map[int]string{\n\t\ttraceLog: Color(Pink, \"[TRACE]\"),\n\t\tdebugLog: Color(Green, \"[DEBUG]\"),\n\t\tinfoLog: Color(Green, \"[INFO ]\"),\n\t\twarnLog: Color(Yellow, \"[WARN ]\"),\n\t\terrorLog: Color(Red, \"[ERROR]\"),\n\t\tfatalLog: Color(Red, \"[FATAL]\"),\n\t\tprintLog: Color(Cyan, \"[ForcePrint]\"),\n\t}\n)\n\nconst (\n\tnamePrefix = \"LEVEL\"\n\tcallDepth = 2\n)\n\nfunc GetGID() uint64 {\n\tb := make([]byte, 64)\n\tb = b[:runtime.Stack(b, false)]\n\tb = bytes.TrimPrefix(b, []byte(\"goroutine \"))\n\tb = b[:bytes.IndexByte(b, ' ')]\n\tn, _ := strconv.ParseUint(string(b), 10, 64)\n\treturn n\n}\n\nvar Log *Logger\n\nfunc LevelName(level int) string {\n\tif name, ok := levels[level]; ok {\n\t\treturn name\n\t}\n\treturn namePrefix + strconv.Itoa(level)\n}\n\nfunc NameLevel(name string) int {\n\tfor k, v := range levels {\n\t\tif v == name {\n\t\t\treturn k\n\t\t}\n\t}\n\tvar level int\n\tif strings.HasPrefix(name, namePrefix) {\n\t\tlevel, _ = strconv.Atoi(name[len(namePrefix):])\n\t}\n\treturn level\n}\n\ntype Logger struct {\n\tsync.Mutex\n\tlevel int\n\tlogger *log.Logger\n}\n\nfunc New(out io.Writer, prefix string, flag, level int) *Logger {\n\treturn &Logger{\n\t\tlevel: level,\n\t\tlogger: log.New(out, prefix, flag),\n\t}\n}\n\nfunc (l *Logger) SetDebugLevel(level int) error {\n\tl.Lock()\n\tdefer l.Unlock()\n\tif level >= maxLevelLog || level < 0 {\n\t\treturn errors.New(\"Invalid Debug Level\")\n\t}\n\tl.level = level\n\treturn nil\n}\n\nfunc (l *Logger) output(level int, s string) error {\n\t\/\/ FIXME enable print GID for all log, should be disable as it effect performance\n\tif (level == 0) || (level == 1) || (level == 2) || (level == 3) || (level == 4) {\n\t\tgid := GetGID()\n\t\tgidStr := strconv.FormatUint(gid, 10)\n\n\t\treturn l.logger.Output(callDepth, LevelName(level)+\" \"+\"GID\"+\n\t\t\t\" \"+gidStr+\", \"+s)\n\t} else {\n\t\treturn l.logger.Output(callDepth, LevelName(level)+\" \"+s)\n\t}\n}\n\nfunc (l *Logger) Output(level int, a ...interface{}) error {\n\tif level >= l.level {\n\t\treturn l.output(level, fmt.Sprintln(a...))\n\t}\n\treturn nil\n}\n\nfunc (l *Logger) Trace(a ...interface{}) {\n\tl.Lock()\n\tdefer l.Unlock()\n\tl.Output(traceLog, a...)\n}\n\nfunc (l *Logger) Print(a ...interface{}) {\n\tl.Lock()\n\tdefer l.Unlock()\n\tl.Output(printLog, a...)\n}\n\nfunc (l *Logger) Debug(a ...interface{}) {\n\tl.Lock()\n\tdefer l.Unlock()\n\tl.Output(debugLog, a...)\n}\n\nfunc (l *Logger) Info(a ...interface{}) {\n\tl.Lock()\n\tdefer l.Unlock()\n\tl.Output(infoLog, a...)\n}\n\nfunc (l *Logger) Warn(a ...interface{}) {\n\tl.Lock()\n\tdefer l.Unlock()\n\tl.Output(warnLog, a...)\n}\n\nfunc (l *Logger) Error(a ...interface{}) {\n\tl.Lock()\n\tdefer l.Unlock()\n\tl.Output(errorLog, a...)\n}\n\nfunc (l *Logger) Fatal(a ...interface{}) {\n\tl.Lock()\n\tdefer l.Unlock()\n\tl.Output(fatalLog, a...)\n}\n\nfunc Trace(a ...interface{}) {\n\tpc := make([]uintptr, 10)\n\truntime.Callers(2, pc)\n\tf := runtime.FuncForPC(pc[0])\n\tfile, line := f.FileLine(pc[0])\n\tfileName := filepath.Base(file)\n\n\tLog.Trace(fmt.Sprint(f.Name(), \" \", fileName, \":\", line))\n\n}\n\nfunc Debug(a ...interface{}) {\n\tLog.Debug(fmt.Sprint(a...))\n}\n\nfunc Info(a ...interface{}) {\n\tLog.Info(fmt.Sprint(a...))\n}\n\nfunc Warn(a ...interface{}) {\n\tLog.Warn(fmt.Sprint(a...))\n}\n\nfunc Error(a ...interface{}) {\n\tLog.Error(fmt.Sprint(a...))\n}\n\nfunc Fatal(a ...interface{}) {\n\tLog.Fatal(fmt.Sprint(a...))\n}\n\nfunc Print(a ...interface{}) {\n\tLog.Print(fmt.Sprint(a...))\n}\n\nfunc FileOpen(path string) (*os.File, error) {\n\tif fi, err := os.Stat(path); err == nil {\n\t\tif !fi.IsDir() {\n\t\t\treturn nil, fmt.Errorf(\"open %s: not a directory\", path)\n\t\t}\n\t} else if os.IsNotExist(err) {\n\t\tif err := os.MkdirAll(path, 0766); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\treturn nil, err\n\t}\n\n\tvar currenttime string = time.Now().Format(\"2006-01-02\")\n\n\tlogfile, err := os.OpenFile(path+currenttime+\"_LOG.log\", os.O_RDWR|os.O_CREATE, 0666)\n\tif err != nil {\n\t\tfmt.Printf(\"%s\\n\", err.Error())\n\t\t\/\/os.Exit(-1)\n\t}\n\n\t\/\/defer logfile.Close()\n\n\treturn logfile, nil\n}\n\nfunc CreatePrintLog(path string) {\n\tlogfile, err := FileOpen(path)\n\tif err != nil {\n\t\tfmt.Printf(\"%s\\n\", err.Error)\n\t}\n\tvar printlevel int = config.Parameters.PrintLevel\n\twriters := []io.Writer{\n\t\tlogfile,\n\t\tos.Stdout,\n\t}\n\tfileAndStdoutWrite := io.MultiWriter(writers...)\n\n\tLog = New(fileAndStdoutWrite, \"\", log.Lmicroseconds, printlevel)\n}\n\nfunc ClosePrintLog() {\n\t\/\/TODO\n}\n<commit_msg>Use log.trace instead of force print<commit_after>package log\n\nimport (\n\t\"DNA\/config\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tBlue = \"0;34\"\n\tRed = \"0;31\"\n\tGreen = \"0;32\"\n\tYellow = \"0;33\"\n\tCyan = \"0;36\"\n\tPink = \"1;35\"\n)\n\nfunc Color(code, msg string) string {\n\treturn fmt.Sprintf(\"\\033[%sm%s\\033[m\", code, msg)\n}\n\nconst (\n\tdebugLog = iota\n\tinfoLog\n\twarnLog\n\terrorLog\n\tfatalLog\n\ttraceLog\n\tmaxLevelLog\n)\n\nvar (\n\tlevels = map[int]string{\n\t\ttraceLog: Color(Pink, \"[TRACE]\"),\n\t\tdebugLog: Color(Green, \"[DEBUG]\"),\n\t\tinfoLog: Color(Green, \"[INFO ]\"),\n\t\twarnLog: Color(Yellow, \"[WARN ]\"),\n\t\terrorLog: Color(Red, \"[ERROR]\"),\n\t\tfatalLog: Color(Red, \"[FATAL]\"),\n\t}\n)\n\nconst (\n\tnamePrefix = \"LEVEL\"\n\tcallDepth = 2\n)\n\nfunc GetGID() uint64 {\n\tb := make([]byte, 64)\n\tb = b[:runtime.Stack(b, false)]\n\tb = bytes.TrimPrefix(b, []byte(\"goroutine \"))\n\tb = b[:bytes.IndexByte(b, ' ')]\n\tn, _ := strconv.ParseUint(string(b), 10, 64)\n\treturn n\n}\n\nvar Log *Logger\n\nfunc LevelName(level int) string {\n\tif name, ok := levels[level]; ok {\n\t\treturn name\n\t}\n\treturn namePrefix + strconv.Itoa(level)\n}\n\nfunc NameLevel(name string) int {\n\tfor k, v := range levels {\n\t\tif v == name {\n\t\t\treturn k\n\t\t}\n\t}\n\tvar level int\n\tif strings.HasPrefix(name, namePrefix) {\n\t\tlevel, _ = strconv.Atoi(name[len(namePrefix):])\n\t}\n\treturn level\n}\n\ntype Logger struct {\n\tsync.Mutex\n\tlevel int\n\tlogger *log.Logger\n}\n\nfunc New(out io.Writer, prefix string, flag, level int) *Logger {\n\treturn &Logger{\n\t\tlevel: level,\n\t\tlogger: log.New(out, prefix, flag),\n\t}\n}\n\nfunc (l *Logger) SetDebugLevel(level int) error {\n\tl.Lock()\n\tdefer l.Unlock()\n\tif level >= maxLevelLog || level < 0 {\n\t\treturn errors.New(\"Invalid Debug Level\")\n\t}\n\tl.level = level\n\treturn nil\n}\n\nfunc (l *Logger) output(level int, s string) error {\n\t\/\/ FIXME enable print GID for all log, should be disable as it effect performance\n\tif (level == 0) || (level == 1) || (level == 2) || (level == 3) || (level == 4) {\n\t\tgid := GetGID()\n\t\tgidStr := strconv.FormatUint(gid, 10)\n\n\t\treturn l.logger.Output(callDepth, LevelName(level)+\" \"+\"GID\"+\n\t\t\t\" \"+gidStr+\", \"+s)\n\t} else {\n\t\treturn l.logger.Output(callDepth, LevelName(level)+\" \"+s)\n\t}\n}\n\nfunc (l *Logger) Output(level int, a ...interface{}) error {\n\tif level >= l.level {\n\t\treturn l.output(level, fmt.Sprintln(a...))\n\t}\n\treturn nil\n}\n\nfunc (l *Logger) Trace(a ...interface{}) {\n\tl.Lock()\n\tdefer l.Unlock()\n\tl.Output(traceLog, a...)\n}\n\nfunc (l *Logger) Debug(a ...interface{}) {\n\tl.Lock()\n\tdefer l.Unlock()\n\tl.Output(debugLog, a...)\n}\n\nfunc (l *Logger) Info(a ...interface{}) {\n\tl.Lock()\n\tdefer l.Unlock()\n\tl.Output(infoLog, a...)\n}\n\nfunc (l *Logger) Warn(a ...interface{}) {\n\tl.Lock()\n\tdefer l.Unlock()\n\tl.Output(warnLog, a...)\n}\n\nfunc (l *Logger) Error(a ...interface{}) {\n\tl.Lock()\n\tdefer l.Unlock()\n\tl.Output(errorLog, a...)\n}\n\nfunc (l *Logger) Fatal(a ...interface{}) {\n\tl.Lock()\n\tdefer l.Unlock()\n\tl.Output(fatalLog, a...)\n}\n\nfunc Trace(a ...interface{}) {\n\tpc := make([]uintptr, 10)\n\truntime.Callers(2, pc)\n\tf := runtime.FuncForPC(pc[0])\n\tfile, line := f.FileLine(pc[0])\n\tfileName := filepath.Base(file)\n\tLog.Trace(fmt.Sprint(f.Name(), \" \", fileName, \":\", line, \" \", fmt.Sprint(a...)))\n\n}\n\nfunc Debug(a ...interface{}) {\n\tLog.Debug(fmt.Sprint(a...))\n}\n\nfunc Info(a ...interface{}) {\n\tLog.Info(fmt.Sprint(a...))\n}\n\nfunc Warn(a ...interface{}) {\n\tLog.Warn(fmt.Sprint(a...))\n}\n\nfunc Error(a ...interface{}) {\n\tLog.Error(fmt.Sprint(a...))\n}\n\nfunc Fatal(a ...interface{}) {\n\tLog.Fatal(fmt.Sprint(a...))\n}\n\nfunc FileOpen(path string) (*os.File, error) {\n\tif fi, err := os.Stat(path); err == nil {\n\t\tif !fi.IsDir() {\n\t\t\treturn nil, fmt.Errorf(\"open %s: not a directory\", path)\n\t\t}\n\t} else if os.IsNotExist(err) {\n\t\tif err := os.MkdirAll(path, 0766); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\treturn nil, err\n\t}\n\n\tvar currenttime string = time.Now().Format(\"2006-01-02\")\n\n\tlogfile, err := os.OpenFile(path+currenttime+\"_LOG.log\", os.O_RDWR|os.O_CREATE, 0666)\n\tif err != nil {\n\t\tfmt.Printf(\"%s\\n\", err.Error())\n\t\t\/\/os.Exit(-1)\n\t}\n\n\t\/\/defer logfile.Close()\n\n\treturn logfile, nil\n}\n\nfunc CreatePrintLog(path string) {\n\tlogfile, err := FileOpen(path)\n\tif err != nil {\n\t\tfmt.Printf(\"%s\\n\", err.Error)\n\t}\n\n\tvar printlevel int = config.Parameters.PrintLevel\n\twriters := []io.Writer{\n\t\tlogfile,\n\t\tos.Stdout,\n\t}\n\tfileAndStdoutWrite := io.MultiWriter(writers...)\n\n\tLog = New(fileAndStdoutWrite, \"\", log.Lmicroseconds, printlevel)\n}\n\nfunc ClosePrintLog() {\n\t\/\/TODO\n}\n<|endoftext|>"} {"text":"<commit_before>package log\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/v2ray\/v2ray-core\/common\/serial\"\n)\n\nconst (\n\tDebugLevel = LogLevel(0)\n\tInfoLevel = LogLevel(1)\n\tWarningLevel = LogLevel(2)\n\tErrorLevel = LogLevel(3)\n)\n\ntype errorLog struct {\n\tprefix string\n\tvalues []interface{}\n}\n\nfunc (this *errorLog) String() string {\n\tdata := \"\"\n\tfor _, value := range this.values {\n\t\tswitch typedVal := value.(type) {\n\t\tcase string:\n\t\t\tdata += typedVal\n\t\tcase *string:\n\t\t\tdata += *typedVal\n\t\tcase serial.String:\n\t\t\tdata += typedVal.String()\n\t\tcase error:\n\t\t\tdata += typedVal.Error()\n\t\tdefault:\n\t\t\tdata += fmt.Sprintf(\"%v\", value)\n\t\t}\n\t}\n\treturn this.prefix + data\n}\n\nvar (\n\tnoOpLoggerInstance logWriter = &noOpLogWriter{}\n\tstreamLoggerInstance logWriter = newStdOutLogWriter()\n\n\tdebugLogger = streamLoggerInstance\n\tinfoLogger = streamLoggerInstance\n\twarningLogger = streamLoggerInstance\n\terrorLogger = streamLoggerInstance\n)\n\ntype LogLevel int\n\nfunc SetLogLevel(level LogLevel) {\n\tdebugLogger = noOpLoggerInstance\n\tif level <= DebugLevel {\n\t\tdebugLogger = streamLoggerInstance\n\t}\n\n\tinfoLogger = noOpLoggerInstance\n\tif level <= InfoLevel {\n\t\tinfoLogger = streamLoggerInstance\n\t}\n\n\twarningLogger = noOpLoggerInstance\n\tif level <= WarningLevel {\n\t\twarningLogger = streamLoggerInstance\n\t}\n\n\terrorLogger = noOpLoggerInstance\n\tif level <= ErrorLevel {\n\t\terrorLogger = streamLoggerInstance\n\t}\n}\n\nfunc InitErrorLogger(file string) error {\n\tlogger, err := newFileLogWriter(file)\n\tif err != nil {\n\t\tError(\"Failed to create error logger on file (\", file, \"): \", err)\n\t\treturn err\n\t}\n\tstreamLoggerInstance = logger\n\treturn nil\n}\n\n\/\/ Debug outputs a debug log with given format and optional arguments.\nfunc Debug(v ...interface{}) {\n\tdebugLogger.Log(&errorLog{\n\t\tprefix: \"[Debug]\",\n\t\tvalues: v,\n\t})\n}\n\n\/\/ Info outputs an info log with given format and optional arguments.\nfunc Info(v ...interface{}) {\n\tinfoLogger.Log(&errorLog{\n\t\tprefix: \"[Info]\",\n\t\tvalues: v,\n\t})\n}\n\n\/\/ Warning outputs a warning log with given format and optional arguments.\nfunc Warning(v ...interface{}) {\n\twarningLogger.Log(&errorLog{\n\t\tprefix: \"[Warning]\",\n\t\tvalues: v,\n\t})\n}\n\n\/\/ Error outputs an error log with given format and optional arguments.\nfunc Error(v ...interface{}) {\n\terrorLogger.Log(&errorLog{\n\t\tprefix: \"[Error]\",\n\t\tvalues: v,\n\t})\n}\n<commit_msg>revert unintentional change<commit_after>package log\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/v2ray\/v2ray-core\/common\/serial\"\n)\n\nconst (\n\tDebugLevel = LogLevel(0)\n\tInfoLevel = LogLevel(1)\n\tWarningLevel = LogLevel(2)\n\tErrorLevel = LogLevel(3)\n)\n\ntype errorLog struct {\n\tprefix string\n\tvalues []interface{}\n}\n\nfunc (this *errorLog) String() string {\n\tdata := \"\"\n\tfor _, value := range this.values {\n\t\tswitch typedVal := value.(type) {\n\t\tcase string:\n\t\t\tdata += typedVal\n\t\tcase *string:\n\t\t\tdata += *typedVal\n\t\tcase serial.String:\n\t\t\tdata += typedVal.String()\n\t\tcase error:\n\t\t\tdata += typedVal.Error()\n\t\tdefault:\n\t\t\tdata += fmt.Sprintf(\"%v\", value)\n\t\t}\n\t}\n\treturn this.prefix + data\n}\n\nvar (\n\tnoOpLoggerInstance logWriter = &noOpLogWriter{}\n\tstreamLoggerInstance logWriter = newStdOutLogWriter()\n\n\tdebugLogger = noOpLoggerInstance\n\tinfoLogger = noOpLoggerInstance\n\twarningLogger = streamLoggerInstance\n\terrorLogger = streamLoggerInstance\n)\n\ntype LogLevel int\n\nfunc SetLogLevel(level LogLevel) {\n\tdebugLogger = noOpLoggerInstance\n\tif level <= DebugLevel {\n\t\tdebugLogger = streamLoggerInstance\n\t}\n\n\tinfoLogger = noOpLoggerInstance\n\tif level <= InfoLevel {\n\t\tinfoLogger = streamLoggerInstance\n\t}\n\n\twarningLogger = noOpLoggerInstance\n\tif level <= WarningLevel {\n\t\twarningLogger = streamLoggerInstance\n\t}\n\n\terrorLogger = noOpLoggerInstance\n\tif level <= ErrorLevel {\n\t\terrorLogger = streamLoggerInstance\n\t}\n}\n\nfunc InitErrorLogger(file string) error {\n\tlogger, err := newFileLogWriter(file)\n\tif err != nil {\n\t\tError(\"Failed to create error logger on file (\", file, \"): \", err)\n\t\treturn err\n\t}\n\tstreamLoggerInstance = logger\n\treturn nil\n}\n\n\/\/ Debug outputs a debug log with given format and optional arguments.\nfunc Debug(v ...interface{}) {\n\tdebugLogger.Log(&errorLog{\n\t\tprefix: \"[Debug]\",\n\t\tvalues: v,\n\t})\n}\n\n\/\/ Info outputs an info log with given format and optional arguments.\nfunc Info(v ...interface{}) {\n\tinfoLogger.Log(&errorLog{\n\t\tprefix: \"[Info]\",\n\t\tvalues: v,\n\t})\n}\n\n\/\/ Warning outputs a warning log with given format and optional arguments.\nfunc Warning(v ...interface{}) {\n\twarningLogger.Log(&errorLog{\n\t\tprefix: \"[Warning]\",\n\t\tvalues: v,\n\t})\n}\n\n\/\/ Error outputs an error log with given format and optional arguments.\nfunc Error(v ...interface{}) {\n\terrorLogger.Log(&errorLog{\n\t\tprefix: \"[Error]\",\n\t\tvalues: v,\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package slack\n\nimport (\n\t\"time\"\n\n\t\"github.com\/danryan\/hal\"\n\t\"github.com\/nlopes\/slack\"\n)\n\nfunc (a *adapter) startConnection() {\n\tchReceiver := make(chan slack.SlackEvent)\n\tapi := slack.New(a.token)\n\t\/\/ api.SetDebug(true)\n\n\tusers, err := api.GetUsers()\n\tif err != nil {\n\t\thal.Logger.Debugf(\"%s\\n\", err)\n\t}\n\n\tfor _, user := range users {\n\t\t\/\/ retrieve the name and mention name of our bot from the server\n\t\t\/\/ if user.Id == api.Id {\n\t\t\/\/ \ta.name = user.Name\n\t\t\/\/ \t\/\/ skip adding the bot to the users map\n\t\t\/\/ \tcontinue\n\t\t\/\/ }\n\t\t\/\/ Initialize a newUser object in case we need it.\n\t\tnewUser := hal.User{\n\t\t\tID: user.Id,\n\t\t\tName: user.Name,\n\t\t}\n\t\t\/\/ Prepopulate our users map because we can easily do so.\n\t\t\/\/ If a user doesn't exist, set it.\n\t\tu, err := a.Robot.Users.Get(user.Id)\n\t\tif err != nil {\n\t\t\ta.Robot.Users.Set(user.Id, newUser)\n\t\t}\n\n\t\t\/\/ If the user doesn't match completely (say, if someone changes their name),\n\t\t\/\/ then adjust what we have stored.\n\t\tif u.Name != user.Name {\n\t\t\ta.Robot.Users.Set(user.Id, newUser)\n\t\t}\n\t}\n\thal.Logger.Debugf(\"Stored users: %s\\n\", a.Robot.Users.All())\n\n\ta.wsAPI, err = api.StartRTM(\"\", \"http:\/\/\"+a.team+\".slack.com\")\n\tif err != nil {\n\t\thal.Logger.Debugf(\"%s\\n\", err)\n\t}\n\n\tgo a.wsAPI.HandleIncomingEvents(chReceiver)\n\tgo a.wsAPI.Keepalive(20 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase msg := <-chReceiver:\n\t\t\thal.Logger.Debug(\"Event Received: \")\n\t\t\tswitch msg.Data.(type) {\n\t\t\tcase slack.HelloEvent:\n\t\t\t\t\/\/ Ignore hello\n\t\t\tcase *slack.MessageEvent:\n\t\t\t\tm := msg.Data.(*slack.MessageEvent)\n\t\t\t\thal.Logger.Debugf(\"Message: %v\\n\", m)\n\t\t\t\tmsg := a.newMessage(m)\n\t\t\t\ta.Receive(msg)\n\t\t\tcase *slack.PresenceChangeEvent:\n\t\t\t\tm := msg.Data.(*slack.PresenceChangeEvent)\n\t\t\t\thal.Logger.Debugf(\"Presence Change: %v\\n\", m)\n\t\t\tcase slack.LatencyReport:\n\t\t\t\tm := msg.Data.(slack.LatencyReport)\n\t\t\t\thal.Logger.Debugf(\"Current latency: %v\\n\", m.Value)\n\t\t\tdefault:\n\t\t\t\thal.Logger.Debugf(\"Unexpected: %v\\n\", msg.Data)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (a *adapter) newMessage(msg *slack.MessageEvent) *hal.Message {\n\tuser, _ := a.Robot.Users.Get(msg.UserId)\n\treturn &hal.Message{\n\t\tUser: user,\n\t\tRoom: msg.ChannelId,\n\t\tText: msg.Text,\n\t}\n}\n<commit_msg>Handle new member joining the team<commit_after>package slack\n\nimport (\n\t\"time\"\n\n\t\"github.com\/danryan\/hal\"\n\t\"github.com\/nlopes\/slack\"\n)\n\nfunc (a *adapter) startConnection() {\n\tchReceiver := make(chan slack.SlackEvent)\n\tapi := slack.New(a.token)\n\t\/\/ api.SetDebug(true)\n\n\tusers, err := api.GetUsers()\n\tif err != nil {\n\t\thal.Logger.Debugf(\"%s\\n\", err)\n\t}\n\n\tfor _, user := range users {\n\t\t\/\/ retrieve the name and mention name of our bot from the server\n\t\t\/\/ if user.Id == api.Id {\n\t\t\/\/ \ta.name = user.Name\n\t\t\/\/ \t\/\/ skip adding the bot to the users map\n\t\t\/\/ \tcontinue\n\t\t\/\/ }\n\t\t\/\/ Initialize a newUser object in case we need it.\n\t\tnewUser := hal.User{\n\t\t\tID: user.Id,\n\t\t\tName: user.Name,\n\t\t}\n\t\t\/\/ Prepopulate our users map because we can easily do so.\n\t\t\/\/ If a user doesn't exist, set it.\n\t\tu, err := a.Robot.Users.Get(user.Id)\n\t\tif err != nil {\n\t\t\ta.Robot.Users.Set(user.Id, newUser)\n\t\t}\n\n\t\t\/\/ If the user doesn't match completely (say, if someone changes their name),\n\t\t\/\/ then adjust what we have stored.\n\t\tif u.Name != user.Name {\n\t\t\ta.Robot.Users.Set(user.Id, newUser)\n\t\t}\n\t}\n\thal.Logger.Debugf(\"Stored users: %s\\n\", a.Robot.Users.All())\n\n\ta.wsAPI, err = api.StartRTM(\"\", \"http:\/\/\"+a.team+\".slack.com\")\n\tif err != nil {\n\t\thal.Logger.Debugf(\"%s\\n\", err)\n\t}\n\n\tgo a.wsAPI.HandleIncomingEvents(chReceiver)\n\tgo a.wsAPI.Keepalive(20 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase msg := <-chReceiver:\n\t\t\thal.Logger.Debug(\"Event Received: \")\n\t\t\tswitch msg.Data.(type) {\n\t\t\tcase slack.HelloEvent:\n\t\t\t\t\/\/ Ignore hello\n\t\t\tcase *slack.MessageEvent:\n\t\t\t\tm := msg.Data.(*slack.MessageEvent)\n\t\t\t\thal.Logger.Debugf(\"Message: %v\\n\", m)\n\t\t\t\tmsg := a.newMessage(m)\n\t\t\t\ta.Receive(msg)\n\t\t\tcase *slack.PresenceChangeEvent:\n\t\t\t\tm := msg.Data.(*slack.PresenceChangeEvent)\n\t\t\t\thal.Logger.Debugf(\"Presence Change: %v\\n\", m)\n\t\t\tcase slack.LatencyReport:\n\t\t\t\tm := msg.Data.(slack.LatencyReport)\n\t\t\t\thal.Logger.Debugf(\"Current latency: %v\\n\", m.Value)\n\t\t\tcase slack.TeamJoinEvent:\n\t\t\t\tm := msg.Data.(slack.TeamJoinEvent)\n\t\t\t\thal.Logger.Debugf(\"New member joined the team: %v\\n\", m.User)\n\t\t\t\t\/\/ Add the new member to the user list\n\t\t\t\tif _, err := a.Robot.Users.Get(m.User.Id); err != nil {\n\t\t\t\t\ta.Robot.Users.Set(m.User.Id, hal.User{ID: m.User.Id, Name: m.User.Name})\n\t\t\t\t}\n\n\t\t\tdefault:\n\t\t\t\thal.Logger.Debugf(\"Unexpected: %v\\n\", msg.Data)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (a *adapter) newMessage(msg *slack.MessageEvent) *hal.Message {\n\tuser, _ := a.Robot.Users.Get(msg.UserId)\n\treturn &hal.Message{\n\t\tUser: user,\n\t\tRoom: msg.ChannelId,\n\t\tText: msg.Text,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n\/\/\n\/\/ All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are met:\n\/\/\n\/\/ 1. Redistributions of source code must retain the above copyright notice, this\n\/\/ list of conditions and the following disclaimer.\n\/\/ 2. Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation\n\/\/ and\/or other materials provided with the distribution.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n\/\/ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n\/\/ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n\/\/ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n\/\/ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n\/\/ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n\/\/ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n\/\/ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\/\/ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n\/\/ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar workingDir = \"\/\"\n\nfunc init() {\n\twd, err := os.Getwd()\n\tif err == nil {\n\t\tworkingDir = filepath.ToSlash(wd) + \"\/\"\n\t}\n}\n\n\/\/ Represents runtime caller context.\ntype LogContextInterface interface {\n\t\/\/ Caller's function name.\n\tFunc() string\n\t\/\/ Caller's line number.\n\tLine() int\n\t\/\/ Caller's file short path (in slashed form).\n\tShortPath() string\n\t\/\/ Caller's file full path (in slashed form).\n\tFullPath() string\n\t\/\/ Caller's file name (without path).\n\tFileName() string\n\t\/\/ True if the context is correct and may be used.\n\t\/\/ If false, then an error in context evaluation occurred and\n\t\/\/ all its other data may be corrupted.\n\tIsValid() bool\n\t\/\/ Time when log function was called.\n\tCallTime() time.Time\n}\n\n\/\/ Returns context of the caller\nfunc currentContext() (LogContextInterface, error) {\n\treturn specifyContext(1)\n}\n\nfunc extractCallerInfo(skip int) (fullPath string, shortPath string, funcName string, line int, err error) {\n\tpc, fp, ln, ok := runtime.Caller(skip)\n\tif !ok {\n\t\terr = fmt.Errorf(\"error during runtime.Caller\")\n\t\treturn\n\t}\n\tline = ln\n\tfullPath = fp\n\tif strings.HasPrefix(fp, workingDir) {\n\t\tshortPath = fp[len(workingDir):]\n\t} else {\n\t\tshortPath = fp\n\t}\n\tfuncName = runtime.FuncForPC(pc).Name()\n\tif strings.HasPrefix(funcName, workingDir) {\n\t\tfuncName = funcName[len(workingDir):]\n\t}\n\treturn\n}\n\n\/\/ Returns context of the function with placed \"skip\" stack frames of the caller\n\/\/ If skip == 0 then behaves like currentContext\n\/\/ Context is returned in any situation, even if error occurs. But, if an error\n\/\/ occurs, the returned context is an error context, which contains no paths\n\/\/ or names, but states that they can't be extracted.\nfunc specifyContext(skip int) (ctx LogContextInterface, err error) {\n\tcallTime := time.Now()\n\tif skip < 0 {\n\t\terr = fmt.Errorf(\"can not skip negative stack frames\")\n\t\tctx = &errorContext{callTime, err}\n\t\treturn\n\t}\n\tvar (\n\t\tfullPath, shortPath, funcName string\n\t\tline int\n\t)\n\tfullPath, shortPath, funcName, line, err = extractCallerInfo(skip + 2)\n\tif err != nil {\n\t\tctx = &errorContext{callTime, err}\n\t\treturn\n\t}\n\t_, fileName := filepath.Split(fullPath)\n\tctx = &logContext{funcName, line, shortPath, fullPath, fileName, callTime}\n\treturn\n}\n\n\/\/ Represents a normal runtime caller context.\ntype logContext struct {\n\tfuncName string\n\tline int\n\tshortPath string\n\tfullPath string\n\tfileName string\n\tcallTime time.Time\n}\n\nfunc (context *logContext) IsValid() bool {\n\treturn true\n}\n\nfunc (context *logContext) Func() string {\n\treturn context.funcName\n}\n\nfunc (context *logContext) Line() int {\n\treturn context.line\n}\n\nfunc (context *logContext) ShortPath() string {\n\treturn context.shortPath\n}\n\nfunc (context *logContext) FullPath() string {\n\treturn context.fullPath\n}\n\nfunc (context *logContext) FileName() string {\n\treturn context.fileName\n}\n\nfunc (context *logContext) CallTime() time.Time {\n\treturn context.callTime\n}\n\nconst (\n\terrorContextFunc = \"Func() error: \"\n\terrorContextShortPath = \"ShortPath() error: \"\n\terrorContextFullPath = \"FullPath() error: \"\n\terrorContextFileName = \"FileName() error: \"\n)\n\n\/\/ Represents an error context\ntype errorContext struct {\n\terrorTime time.Time\n\terr error\n}\n\nfunc (errContext *errorContext) IsValid() bool {\n\treturn false\n}\n\nfunc (errContext *errorContext) Line() int {\n\treturn -1\n}\n\nfunc (errContext *errorContext) Func() string {\n\treturn errorContextFunc + errContext.err.Error()\n}\n\nfunc (errContext *errorContext) ShortPath() string {\n\treturn errorContextShortPath + errContext.err.Error()\n}\n\nfunc (errContext *errorContext) FullPath() string {\n\treturn errorContextFullPath + errContext.err.Error()\n}\n\nfunc (errContext *errorContext) FileName() string {\n\treturn errorContextFileName + errContext.err.Error()\n}\n\nfunc (errContext *errorContext) CallTime() time.Time {\n\treturn errContext.errorTime\n}\n<commit_msg>Refactor specifyContext function<commit_after>\/\/ Copyright (c) 2012 - Cloud Instruments Co., Ltd.\n\/\/\n\/\/ All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are met:\n\/\/\n\/\/ 1. Redistributions of source code must retain the above copyright notice, this\n\/\/ list of conditions and the following disclaimer.\n\/\/ 2. Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation\n\/\/ and\/or other materials provided with the distribution.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n\/\/ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n\/\/ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n\/\/ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n\/\/ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n\/\/ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n\/\/ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n\/\/ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\/\/ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n\/\/ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage seelog\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar workingDir = \"\/\"\n\nfunc init() {\n\twd, err := os.Getwd()\n\tif err == nil {\n\t\tworkingDir = filepath.ToSlash(wd) + \"\/\"\n\t}\n}\n\n\/\/ Represents runtime caller context.\ntype LogContextInterface interface {\n\t\/\/ Caller's function name.\n\tFunc() string\n\t\/\/ Caller's line number.\n\tLine() int\n\t\/\/ Caller's file short path (in slashed form).\n\tShortPath() string\n\t\/\/ Caller's file full path (in slashed form).\n\tFullPath() string\n\t\/\/ Caller's file name (without path).\n\tFileName() string\n\t\/\/ True if the context is correct and may be used.\n\t\/\/ If false, then an error in context evaluation occurred and\n\t\/\/ all its other data may be corrupted.\n\tIsValid() bool\n\t\/\/ Time when log function was called.\n\tCallTime() time.Time\n}\n\n\/\/ Returns context of the caller\nfunc currentContext() (LogContextInterface, error) {\n\treturn specifyContext(1)\n}\n\nfunc extractCallerInfo(skip int) (fullPath string, shortPath string, funcName string, line int, err error) {\n\tpc, fp, ln, ok := runtime.Caller(skip)\n\tif !ok {\n\t\terr = fmt.Errorf(\"error during runtime.Caller\")\n\t\treturn\n\t}\n\tline = ln\n\tfullPath = fp\n\tif strings.HasPrefix(fp, workingDir) {\n\t\tshortPath = fp[len(workingDir):]\n\t} else {\n\t\tshortPath = fp\n\t}\n\tfuncName = runtime.FuncForPC(pc).Name()\n\tif strings.HasPrefix(funcName, workingDir) {\n\t\tfuncName = funcName[len(workingDir):]\n\t}\n\treturn\n}\n\n\/\/ Returns context of the function with placed \"skip\" stack frames of the caller\n\/\/ If skip == 0 then behaves like currentContext\n\/\/ Context is returned in any situation, even if error occurs. But, if an error\n\/\/ occurs, the returned context is an error context, which contains no paths\n\/\/ or names, but states that they can't be extracted.\nfunc specifyContext(skip int) (LogContextInterface, error) {\n\tcallTime := time.Now()\n\tif skip < 0 {\n\t\terr := fmt.Errorf(\"can not skip negative stack frames\")\n\t\treturn &errorContext{callTime, err}, err\n\t}\n\tfullPath, shortPath, funcName, line, err := extractCallerInfo(skip + 2)\n\tif err != nil {\n\t\treturn &errorContext{callTime, err}, err\n\t}\n\t_, fileName := filepath.Split(fullPath)\n\treturn &logContext{funcName, line, shortPath, fullPath, fileName, callTime}, nil\n}\n\n\/\/ Represents a normal runtime caller context.\ntype logContext struct {\n\tfuncName string\n\tline int\n\tshortPath string\n\tfullPath string\n\tfileName string\n\tcallTime time.Time\n}\n\nfunc (context *logContext) IsValid() bool {\n\treturn true\n}\n\nfunc (context *logContext) Func() string {\n\treturn context.funcName\n}\n\nfunc (context *logContext) Line() int {\n\treturn context.line\n}\n\nfunc (context *logContext) ShortPath() string {\n\treturn context.shortPath\n}\n\nfunc (context *logContext) FullPath() string {\n\treturn context.fullPath\n}\n\nfunc (context *logContext) FileName() string {\n\treturn context.fileName\n}\n\nfunc (context *logContext) CallTime() time.Time {\n\treturn context.callTime\n}\n\nconst (\n\terrorContextFunc = \"Func() error: \"\n\terrorContextShortPath = \"ShortPath() error: \"\n\terrorContextFullPath = \"FullPath() error: \"\n\terrorContextFileName = \"FileName() error: \"\n)\n\n\/\/ Represents an error context\ntype errorContext struct {\n\terrorTime time.Time\n\terr error\n}\n\nfunc (errContext *errorContext) IsValid() bool {\n\treturn false\n}\n\nfunc (errContext *errorContext) Line() int {\n\treturn -1\n}\n\nfunc (errContext *errorContext) Func() string {\n\treturn errorContextFunc + errContext.err.Error()\n}\n\nfunc (errContext *errorContext) ShortPath() string {\n\treturn errorContextShortPath + errContext.err.Error()\n}\n\nfunc (errContext *errorContext) FullPath() string {\n\treturn errorContextFullPath + errContext.err.Error()\n}\n\nfunc (errContext *errorContext) FileName() string {\n\treturn errorContextFileName + errContext.err.Error()\n}\n\nfunc (errContext *errorContext) CallTime() time.Time {\n\treturn errContext.errorTime\n}\n<|endoftext|>"} {"text":"<commit_before>package daemon\n\nimport (\n\t\"github.com\/Clever\/sphinx\/config\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"testing\"\n)\n\nfunc TestConfigReload(t *testing.T) {\n\tconf, err := config.New(\"..\/example.yaml\")\n\tif err != nil {\n\t\tt.Fatal(\"Error loading config: \" + err.Error())\n\t}\n\td := daemon{}\n\td.LoadConfig(conf)\n\tif d.handler == nil {\n\t\tt.Fatal(\"Didn't assign handler\")\n\t}\n}\n\nfunc TestFailedReload(t *testing.T) {\n\tconf, err := config.New(\"..\/example.yaml\")\n\tif err != nil {\n\t\tt.Fatal(\"Error loading config: \" + err.Error())\n\t}\n\tdaemon, err := New(conf)\n\tif err != nil {\n\t\tt.Fatal(\"Error creating new daemon: \" + err.Error())\n\t}\n\tconf2 := config.Config{}\n\terr = daemon.LoadConfig(conf2)\n\tif err == nil {\n\t\tt.Fatal(\"Should have errored on empty configuration\")\n\t}\n\n\tconf.Proxy.Listen = \":1000\"\n\terr = daemon.LoadConfig(conf)\n\tif err == nil {\n\t\tt.Fatalf(\"Should have errored on changed listen port\")\n\t}\n}\n\nvar localServerPort = \":8081\"\nvar localServerHost = \"http:\/\/localhost\" + localServerPort\nvar localProxyHost = \"http:\/\/localhost:6634\"\nvar healthCheckURL = \"http:\/\/localhost:60002\/health\/check\"\n\nfunc setUpDaemonWithLocalServer() error {\n\t\/\/ Set up a local server that 404s everywhere except route '\/healthyroute'.\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"\/healthyroute\", func(rw http.ResponseWriter, req *http.Request) {\n\t\trw.Write([]byte(\"healthy\"))\n\t})\n\tmux.HandleFunc(\"\/\", func(rw http.ResponseWriter, req *http.Request) {\n\t\trw.WriteHeader(http.StatusNotFound)\n\t\trw.Write([]byte(\"404\"))\n\t})\n\tgo http.ListenAndServe(localServerPort, mux)\n\n\t\/\/ Set up the daemon to proxy to the local server.\n\tconf, err := config.New(\"..\/example.yaml\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tconf.Proxy.Host = localServerHost\n\n\tdaemon, err := New(conf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo daemon.Start()\n\treturn nil\n}\n\n\/\/ testProxyRequest calls the proxy server at the given path and verifies that\n\/\/ the request returns the given HTTP status and body content.\nfunc testProxyRequest(t *testing.T, url string, expectedStatus int, expectedBody string) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tt.Fatalf(\"Proxy request failed: %s\", err.Error())\n\t}\n\n\tif resp.StatusCode != expectedStatus {\n\t\tt.Fatalf(\"Response status with url %s does not match expected value. Actual: %d. Expected: %d.\",\n\t\t\turl, resp.StatusCode, expectedStatus)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't read response body: %s\", err.Error())\n\t}\n\n\tbodyStr := string(body)\n\tif bodyStr != expectedBody {\n\t\tt.Fatalf(\"Response body does not match expected value. Actual: \\\"%s\\\" Expected: \\\"%s\\\"\",\n\t\t\tbodyStr, expectedBody)\n\t}\n}\n\nfunc TestHealthCheck(t *testing.T) {\n\terr := setUpDaemonWithLocalServer()\n\tif err != nil {\n\t\tt.Fatalf(\"Test daemon setup failed: %s\", err.Error())\n\t}\n\n\t\/\/ Test a route that should be proxied to 404.\n\ttestProxyRequest(t, localProxyHost+\"\/helloworld\", http.StatusNotFound, \"404\")\n\n\t\/\/ Test a route that should be proxied to a valid response.\n\ttestProxyRequest(t, localProxyHost+\"\/healthyroute\", http.StatusOK, \"healthy\")\n\n\t\/\/ Test the health check.\n\ttestProxyRequest(t, healthCheckURL, http.StatusOK, \"\")\n}\n<commit_msg>daemon: Add daemon test for no health check.<commit_after>package daemon\n\nimport (\n\t\"github.com\/Clever\/sphinx\/config\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestConfigReload(t *testing.T) {\n\tconf, err := config.New(\"..\/example.yaml\")\n\tif err != nil {\n\t\tt.Fatal(\"Error loading config: \" + err.Error())\n\t}\n\td := daemon{}\n\td.LoadConfig(conf)\n\tif d.handler == nil {\n\t\tt.Fatal(\"Didn't assign handler\")\n\t}\n}\n\nfunc TestFailedReload(t *testing.T) {\n\tconf, err := config.New(\"..\/example.yaml\")\n\tif err != nil {\n\t\tt.Fatal(\"Error loading config: \" + err.Error())\n\t}\n\tdaemon, err := New(conf)\n\tif err != nil {\n\t\tt.Fatal(\"Error creating new daemon: \" + err.Error())\n\t}\n\tconf2 := config.Config{}\n\terr = daemon.LoadConfig(conf2)\n\tif err == nil {\n\t\tt.Fatal(\"Should have errored on empty configuration\")\n\t}\n\n\tconf.Proxy.Listen = \":1000\"\n\terr = daemon.LoadConfig(conf)\n\tif err == nil {\n\t\tt.Fatalf(\"Should have errored on changed listen port\")\n\t}\n}\n\nfunc setUpDaemonWithLocalServer(localServerPort, localProxyListen, healthCheckPort string) error {\n\t\/\/ Set up a local server that 404s everywhere except route '\/healthyroute'.\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"\/healthyroute\", func(rw http.ResponseWriter, req *http.Request) {\n\t\trw.Write([]byte(\"healthy\"))\n\t})\n\tmux.HandleFunc(\"\/\", func(rw http.ResponseWriter, req *http.Request) {\n\t\trw.WriteHeader(http.StatusNotFound)\n\t\trw.Write([]byte(\"404\"))\n\t})\n\tgo http.ListenAndServe(\":\"+localServerPort, mux)\n\n\t\/\/ Set up the daemon to proxy to the local server.\n\tconf, err := config.New(\"..\/example.yaml\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tconf.Proxy.Host = \"http:\/\/localhost:\" + localServerPort\n\tconf.Proxy.Listen = localProxyListen\n\tconf.HealthCheck.Port = healthCheckPort\n\n\tdaemon, err := New(conf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo daemon.Start()\n\treturn nil\n}\n\n\/\/ testProxyRequest calls the proxy server at the given path and verifies that\n\/\/ the request returns the given HTTP status and body content.\nfunc testProxyRequest(t *testing.T, url string, expectedStatus int, expectedBody string) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tt.Fatalf(\"Proxy request failed: %s\", err.Error())\n\t}\n\n\tif resp.StatusCode != expectedStatus {\n\t\tt.Fatalf(\"Response status with url %s does not match expected value. Actual: %d. Expected: %d.\",\n\t\t\turl, resp.StatusCode, expectedStatus)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't read response body: %s\", err.Error())\n\t}\n\n\tbodyStr := string(body)\n\tif bodyStr != expectedBody {\n\t\tt.Fatalf(\"Response body does not match expected value. Actual: \\\"%s\\\" Expected: \\\"%s\\\"\",\n\t\t\tbodyStr, expectedBody)\n\t}\n}\n\nfunc getHealthCheckURLFromPort(port string) string {\n\tif len(port) > 0 {\n\t\treturn \"http:\/\/localhost:\" + port + \"\/health\/check\"\n\t}\n\treturn \"http:\/\/localhost\/health\/check\"\n}\n\nfunc TestHealthCheck(t *testing.T) {\n\tlocalProxyListen := \":6634\"\n\thealthCheckPort := \"60002\"\n\n\terr := setUpDaemonWithLocalServer(\"8000\", localProxyListen, healthCheckPort)\n\tif err != nil {\n\t\tt.Fatalf(\"Test daemon setup failed: %s\", err.Error())\n\t}\n\n\tlocalProxyURL := \"http:\/\/localhost\" + localProxyListen\n\n\t\/\/ Test a route that should be proxied to 404.\n\ttestProxyRequest(t, localProxyURL+\"\/helloworld\", http.StatusNotFound, \"404\")\n\n\t\/\/ Test a route that should be proxied to a valid response.\n\ttestProxyRequest(t, localProxyURL+\"\/healthyroute\", http.StatusOK, \"healthy\")\n\n\t\/\/ Test the health check.\n\ttestProxyRequest(t, getHealthCheckURLFromPort(healthCheckPort), http.StatusOK, \"\")\n}\n\nfunc TestDaemonWithNoHealthCheck(t *testing.T) {\n\tlocalProxyListen := \":6635\"\n\thealthCheckPort := \"\"\n\n\terr := setUpDaemonWithLocalServer(\"8001\", localProxyListen, healthCheckPort)\n\tif err != nil {\n\t\tt.Fatalf(\"Test daemon setup failed: %s\", err.Error())\n\t}\n\n\t\/\/ Because so many servers are starting, sleep for a second to make sure\n\t\/\/ they start.\n\ttime.Sleep(time.Second)\n\n\tlocalProxyURL := \"http:\/\/localhost\" + localProxyListen\n\n\t\/\/ Test a route that should be proxied to 404.\n\ttestProxyRequest(t, localProxyURL+\"\/helloworld\", http.StatusNotFound, \"404\")\n\n\t\/\/ Test a route that should be proxied to a valid response.\n\ttestProxyRequest(t, localProxyURL+\"\/healthyroute\", http.StatusOK, \"healthy\")\n\n\t\/\/ Health check request should fail.\n\tif _, err := http.Get(getHealthCheckURLFromPort(healthCheckPort)); err == nil {\n\t\tt.Fatalf(\"Health check request should have failed, but it did not.\")\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016-2017 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/cilium\/cilium\/common\"\n\t\"github.com\/cilium\/cilium\/common\/addressing\"\n\t\"github.com\/cilium\/cilium\/daemon\/options\"\n\t\"github.com\/cilium\/cilium\/pkg\/apierror\"\n\t\"github.com\/cilium\/cilium\/pkg\/endpoint\"\n\t\"github.com\/cilium\/cilium\/pkg\/labels\"\n\t\"github.com\/cilium\/cilium\/pkg\/option\"\n\t\"github.com\/cilium\/cilium\/pkg\/policy\"\n\n\t. \"gopkg.in\/check.v1\"\n)\n\nvar (\n\tlbls = labels.Labels{\n\t\t\"foo\": labels.NewLabel(\"foo\", \"bar\", common.CiliumLabelSource),\n\t\t\"foo2\": labels.NewLabel(\"foo2\", \"=bar2\", common.CiliumLabelSource),\n\t\t\"key\": labels.NewLabel(\"key\", \"\", common.CiliumLabelSource),\n\t\t\"foo==\": labels.NewLabel(\"foo==\", \"==\", common.CiliumLabelSource),\n\t\t`foo\\\\=`: labels.NewLabel(`foo\\\\=`, `\\=`, common.CiliumLabelSource),\n\t\t`\/\/=\/`: labels.NewLabel(`\/\/=\/`, \"\", common.CiliumLabelSource),\n\t\t`%`: labels.NewLabel(`%`, `%ed`, common.CiliumLabelSource),\n\t}\n\tlbls2 = labels.Labels{\n\t\t\"foo\": labels.NewLabel(\"foo\", \"bar\", common.CiliumLabelSource),\n\t\t\"foo2\": labels.NewLabel(\"foo2\", \"=bar2\", common.CiliumLabelSource),\n\t}\n\twantSecCtxLbls = policy.Identity{\n\t\tID: 123,\n\t\tEndpoints: map[string]time.Time{\n\t\t\t\"cc08ff400e355f736dce1c291a6a4007ab9f2d56d42e1f3630ba87b861d45307\": time.Now(),\n\t\t},\n\t\tLabels: lbls,\n\t}\n\tnilAPIError *apierror.APIError\n)\n\nfunc (ds *DaemonSuite) SetUpTest(c *C) {\n\ttime.Local = time.UTC\n\ttempRunDir, err := ioutil.TempDir(\"\", \"cilium-test-run\")\n\tc.Assert(err, IsNil)\n\terr = os.Mkdir(filepath.Join(tempRunDir, \"globals\"), 0777)\n\tc.Assert(err, IsNil)\n\n\tnodeAddress, err := addressing.NewNodeAddress(\"beef:beef:beef:beef:aaaa:aaaa:1111:0\", \"10.1.0.1\", \"\")\n\tc.Assert(err, IsNil)\n\n\tdaemonConf := &Config{\n\t\tDryMode: true,\n\t\tOpts: option.NewBoolOptions(&options.Library),\n\t}\n\tdaemonConf.RunDir = tempRunDir\n\tdaemonConf.StateDir = tempRunDir\n\tdaemonConf.LXCMap = nil\n\tdaemonConf.NodeAddress = nodeAddress\n\tdaemonConf.DockerEndpoint = \"tcp:\/\/127.0.0.1\"\n\tdaemonConf.K8sEndpoint = \"tcp:\/\/127.0.0.1\"\n\tdaemonConf.ValidLabelPrefixes = nil\n\tdaemonConf.Opts.Set(endpoint.OptionDropNotify, true)\n\tdaemonConf.Device = \"undefined\"\n\n\terr = daemonConf.SetKVBackend()\n\tc.Assert(err, IsNil)\n\n\td, err := NewDaemon(daemonConf)\n\tc.Assert(err, IsNil)\n\tds.d = d\n\td.kvClient.DeleteTree(common.OperationalPath)\n\t\/\/ Needs to be less than 1 second otherwise GetCachedMaxLabelID might\n\t\/\/ not work properly\n\td.EnableKVStoreWatcher(time.Nanosecond)\n}\n\nfunc (ds *DaemonSuite) TearDownTest(c *C) {\n\tos.RemoveAll(ds.d.conf.RunDir)\n}\n\nfunc (ds *DaemonSuite) TestLabels(c *C) {\n\t\/\/Set up last free ID with zero\n\tid, err := ds.d.GetMaxLabelID()\n\tc.Assert(err, IsNil)\n\tc.Assert(id, Equals, policy.MinimalNumericIdentity)\n\n\tsecCtxLbl, new, err := ds.d.CreateOrUpdateIdentity(lbls, \"containerLabel1-1\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 1)\n\tc.Assert(new, Equals, true)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls, \"containerLabel1-2\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 2)\n\tc.Assert(new, Equals, false)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls2, \"containerLabel2-1\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity+1)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 1)\n\tc.Assert(new, Equals, true)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls2, \"containerLabel2-2\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity+1)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 2)\n\tc.Assert(new, Equals, false)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls, \"containerLabel1-3\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 3)\n\tc.Assert(new, Equals, false)\n\n\t\/\/Get labels from ID\n\tgotSecCtxLbl, err := ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\twantSecCtxLbls.ID = policy.MinimalNumericIdentity\n\twantSecCtxLbls.Labels = lbls\n\tc.Assert(gotSecCtxLbl.ID, Equals, wantSecCtxLbls.ID)\n\tc.Assert(gotSecCtxLbl.Labels, DeepEquals, wantSecCtxLbls.Labels)\n\tc.Assert(gotSecCtxLbl.RefCount(), Equals, 3)\n\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity, \"containerLabel1-1\")\n\tc.Assert(err, IsNil)\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\twantSecCtxLbls.ID = policy.MinimalNumericIdentity\n\twantSecCtxLbls.Labels = lbls\n\tc.Assert(gotSecCtxLbl.ID, Equals, wantSecCtxLbls.ID)\n\tc.Assert(gotSecCtxLbl.Labels, DeepEquals, wantSecCtxLbls.Labels)\n\tc.Assert(gotSecCtxLbl.RefCount(), Equals, 2)\n\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity + 1)\n\tc.Assert(err, IsNil)\n\twantSecCtxLbls.ID = policy.MinimalNumericIdentity + 1\n\twantSecCtxLbls.Labels = lbls2\n\tc.Assert(gotSecCtxLbl.ID, Equals, wantSecCtxLbls.ID)\n\tc.Assert(gotSecCtxLbl.Labels, DeepEquals, wantSecCtxLbls.Labels)\n\tc.Assert(gotSecCtxLbl.RefCount(), Equals, 2)\n\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity, \"containerLabel1-2\")\n\tc.Assert(err, IsNil)\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\twantSecCtxLbls.ID = policy.MinimalNumericIdentity\n\twantSecCtxLbls.Labels = lbls\n\tc.Assert(gotSecCtxLbl.ID, Equals, wantSecCtxLbls.ID)\n\tc.Assert(gotSecCtxLbl.Labels, DeepEquals, wantSecCtxLbls.Labels)\n\tc.Assert(gotSecCtxLbl.RefCount(), Equals, 1)\n\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity, \"containerLabel1-3\")\n\tc.Assert(err, IsNil)\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\tvar emptySecCtxLblPtr *policy.Identity\n\tc.Assert(gotSecCtxLbl, Equals, emptySecCtxLblPtr)\n\n\terr = ds.d.kvClient.SetMaxID(common.LastFreeLabelIDKeyPath, policy.MinimalNumericIdentity.Uint32(), policy.MinimalNumericIdentity.Uint32())\n\tc.Assert(err, IsNil)\n\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity, \"containerLabel1-non-existent\")\n\tc.Assert(err, DeepEquals, errors.New(\"identity not found\"))\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\tc.Assert(gotSecCtxLbl, Equals, emptySecCtxLblPtr)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls2, \"containerLabel2-3\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity+1)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 3)\n\tc.Assert(new, Equals, false)\n\n\tsha256sum := lbls2.SHA256Sum()\n\tgotSecCtxLbl, err = ds.d.LookupIdentityBySHA256(sha256sum)\n\tc.Assert(err, IsNil)\n\tc.Assert(gotSecCtxLbl, DeepEquals, secCtxLbl)\n\n\terr = ds.d.DeleteIdentityBySHA256(sha256sum, \"containerLabel2-1\")\n\tc.Assert(err, IsNil)\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity+1, \"containerLabel2-2\")\n\tc.Assert(err, IsNil)\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity+1, \"containerLabel2-3\")\n\tc.Assert(err, IsNil)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls2, \"containerLabel2-3\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 1)\n\tc.Assert(new, Equals, true)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls, \"containerLabel2-3\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity+1)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 1)\n\tc.Assert(new, Equals, true)\n}\n\nfunc (ds *DaemonSuite) TestGetMaxID(c *C) {\n\tlastID := policy.NumericIdentity(common.MaxSetOfLabels - 1)\n\terr := ds.d.kvClient.SetValue(common.LastFreeLabelIDKeyPath, lastID)\n\tc.Assert(err, IsNil)\n\n\tid, err := ds.d.GetMaxLabelID()\n\tc.Assert(err, IsNil)\n\tc.Assert(id, Equals, lastID)\n}\n<commit_msg>ineffassign: check unused error<commit_after>\/\/ Copyright 2016-2017 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/cilium\/cilium\/common\"\n\t\"github.com\/cilium\/cilium\/common\/addressing\"\n\t\"github.com\/cilium\/cilium\/daemon\/options\"\n\t\"github.com\/cilium\/cilium\/pkg\/apierror\"\n\t\"github.com\/cilium\/cilium\/pkg\/endpoint\"\n\t\"github.com\/cilium\/cilium\/pkg\/labels\"\n\t\"github.com\/cilium\/cilium\/pkg\/option\"\n\t\"github.com\/cilium\/cilium\/pkg\/policy\"\n\n\t. \"gopkg.in\/check.v1\"\n)\n\nvar (\n\tlbls = labels.Labels{\n\t\t\"foo\": labels.NewLabel(\"foo\", \"bar\", common.CiliumLabelSource),\n\t\t\"foo2\": labels.NewLabel(\"foo2\", \"=bar2\", common.CiliumLabelSource),\n\t\t\"key\": labels.NewLabel(\"key\", \"\", common.CiliumLabelSource),\n\t\t\"foo==\": labels.NewLabel(\"foo==\", \"==\", common.CiliumLabelSource),\n\t\t`foo\\\\=`: labels.NewLabel(`foo\\\\=`, `\\=`, common.CiliumLabelSource),\n\t\t`\/\/=\/`: labels.NewLabel(`\/\/=\/`, \"\", common.CiliumLabelSource),\n\t\t`%`: labels.NewLabel(`%`, `%ed`, common.CiliumLabelSource),\n\t}\n\tlbls2 = labels.Labels{\n\t\t\"foo\": labels.NewLabel(\"foo\", \"bar\", common.CiliumLabelSource),\n\t\t\"foo2\": labels.NewLabel(\"foo2\", \"=bar2\", common.CiliumLabelSource),\n\t}\n\twantSecCtxLbls = policy.Identity{\n\t\tID: 123,\n\t\tEndpoints: map[string]time.Time{\n\t\t\t\"cc08ff400e355f736dce1c291a6a4007ab9f2d56d42e1f3630ba87b861d45307\": time.Now(),\n\t\t},\n\t\tLabels: lbls,\n\t}\n\tnilAPIError *apierror.APIError\n)\n\nfunc (ds *DaemonSuite) SetUpTest(c *C) {\n\ttime.Local = time.UTC\n\ttempRunDir, err := ioutil.TempDir(\"\", \"cilium-test-run\")\n\tc.Assert(err, IsNil)\n\terr = os.Mkdir(filepath.Join(tempRunDir, \"globals\"), 0777)\n\tc.Assert(err, IsNil)\n\n\tnodeAddress, err := addressing.NewNodeAddress(\"beef:beef:beef:beef:aaaa:aaaa:1111:0\", \"10.1.0.1\", \"\")\n\tc.Assert(err, IsNil)\n\n\tdaemonConf := &Config{\n\t\tDryMode: true,\n\t\tOpts: option.NewBoolOptions(&options.Library),\n\t}\n\tdaemonConf.RunDir = tempRunDir\n\tdaemonConf.StateDir = tempRunDir\n\tdaemonConf.LXCMap = nil\n\tdaemonConf.NodeAddress = nodeAddress\n\tdaemonConf.DockerEndpoint = \"tcp:\/\/127.0.0.1\"\n\tdaemonConf.K8sEndpoint = \"tcp:\/\/127.0.0.1\"\n\tdaemonConf.ValidLabelPrefixes = nil\n\tdaemonConf.Opts.Set(endpoint.OptionDropNotify, true)\n\tdaemonConf.Device = \"undefined\"\n\n\terr = daemonConf.SetKVBackend()\n\tc.Assert(err, IsNil)\n\n\td, err := NewDaemon(daemonConf)\n\tc.Assert(err, IsNil)\n\tds.d = d\n\td.kvClient.DeleteTree(common.OperationalPath)\n\t\/\/ Needs to be less than 1 second otherwise GetCachedMaxLabelID might\n\t\/\/ not work properly\n\td.EnableKVStoreWatcher(time.Nanosecond)\n}\n\nfunc (ds *DaemonSuite) TearDownTest(c *C) {\n\tos.RemoveAll(ds.d.conf.RunDir)\n}\n\nfunc (ds *DaemonSuite) TestLabels(c *C) {\n\t\/\/Set up last free ID with zero\n\tid, err := ds.d.GetMaxLabelID()\n\tc.Assert(err, IsNil)\n\tc.Assert(id, Equals, policy.MinimalNumericIdentity)\n\n\tsecCtxLbl, new, err := ds.d.CreateOrUpdateIdentity(lbls, \"containerLabel1-1\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 1)\n\tc.Assert(new, Equals, true)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls, \"containerLabel1-2\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 2)\n\tc.Assert(new, Equals, false)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls2, \"containerLabel2-1\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity+1)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 1)\n\tc.Assert(new, Equals, true)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls2, \"containerLabel2-2\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity+1)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 2)\n\tc.Assert(new, Equals, false)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls, \"containerLabel1-3\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 3)\n\tc.Assert(new, Equals, false)\n\n\t\/\/Get labels from ID\n\tgotSecCtxLbl, err := ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\twantSecCtxLbls.ID = policy.MinimalNumericIdentity\n\twantSecCtxLbls.Labels = lbls\n\tc.Assert(gotSecCtxLbl.ID, Equals, wantSecCtxLbls.ID)\n\tc.Assert(gotSecCtxLbl.Labels, DeepEquals, wantSecCtxLbls.Labels)\n\tc.Assert(gotSecCtxLbl.RefCount(), Equals, 3)\n\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity, \"containerLabel1-1\")\n\tc.Assert(err, IsNil)\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\twantSecCtxLbls.ID = policy.MinimalNumericIdentity\n\twantSecCtxLbls.Labels = lbls\n\tc.Assert(gotSecCtxLbl.ID, Equals, wantSecCtxLbls.ID)\n\tc.Assert(gotSecCtxLbl.Labels, DeepEquals, wantSecCtxLbls.Labels)\n\tc.Assert(gotSecCtxLbl.RefCount(), Equals, 2)\n\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity + 1)\n\tc.Assert(err, IsNil)\n\twantSecCtxLbls.ID = policy.MinimalNumericIdentity + 1\n\twantSecCtxLbls.Labels = lbls2\n\tc.Assert(gotSecCtxLbl.ID, Equals, wantSecCtxLbls.ID)\n\tc.Assert(gotSecCtxLbl.Labels, DeepEquals, wantSecCtxLbls.Labels)\n\tc.Assert(gotSecCtxLbl.RefCount(), Equals, 2)\n\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity, \"containerLabel1-2\")\n\tc.Assert(err, IsNil)\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\twantSecCtxLbls.ID = policy.MinimalNumericIdentity\n\twantSecCtxLbls.Labels = lbls\n\tc.Assert(gotSecCtxLbl.ID, Equals, wantSecCtxLbls.ID)\n\tc.Assert(gotSecCtxLbl.Labels, DeepEquals, wantSecCtxLbls.Labels)\n\tc.Assert(gotSecCtxLbl.RefCount(), Equals, 1)\n\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity, \"containerLabel1-3\")\n\tc.Assert(err, IsNil)\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\tvar emptySecCtxLblPtr *policy.Identity\n\tc.Assert(gotSecCtxLbl, Equals, emptySecCtxLblPtr)\n\n\terr = ds.d.kvClient.SetMaxID(common.LastFreeLabelIDKeyPath, policy.MinimalNumericIdentity.Uint32(), policy.MinimalNumericIdentity.Uint32())\n\tc.Assert(err, IsNil)\n\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity, \"containerLabel1-non-existent\")\n\tc.Assert(err, DeepEquals, errors.New(\"identity not found\"))\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\tc.Assert(gotSecCtxLbl, Equals, emptySecCtxLblPtr)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls2, \"containerLabel2-3\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity+1)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 3)\n\tc.Assert(new, Equals, false)\n\n\tsha256sum := lbls2.SHA256Sum()\n\tgotSecCtxLbl, err = ds.d.LookupIdentityBySHA256(sha256sum)\n\tc.Assert(err, IsNil)\n\tc.Assert(gotSecCtxLbl, DeepEquals, secCtxLbl)\n\n\terr = ds.d.DeleteIdentityBySHA256(sha256sum, \"containerLabel2-1\")\n\tc.Assert(err, IsNil)\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity+1, \"containerLabel2-2\")\n\tc.Assert(err, IsNil)\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity+1, \"containerLabel2-3\")\n\tc.Assert(err, IsNil)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls2, \"containerLabel2-3\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 1)\n\tc.Assert(new, Equals, true)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls, \"containerLabel2-3\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity+1)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 1)\n\tc.Assert(new, Equals, true)\n}\n\nfunc (ds *DaemonSuite) TestGetMaxID(c *C) {\n\tlastID := policy.NumericIdentity(common.MaxSetOfLabels - 1)\n\terr := ds.d.kvClient.SetValue(common.LastFreeLabelIDKeyPath, lastID)\n\tc.Assert(err, IsNil)\n\n\tid, err := ds.d.GetMaxLabelID()\n\tc.Assert(err, IsNil)\n\tc.Assert(id, Equals, lastID)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016-2017 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/cilium\/cilium\/common\"\n\t\"github.com\/cilium\/cilium\/daemon\/options\"\n\t\"github.com\/cilium\/cilium\/pkg\/endpoint\"\n\t\"github.com\/cilium\/cilium\/pkg\/kvstore\"\n\t\"github.com\/cilium\/cilium\/pkg\/labels\"\n\t\"github.com\/cilium\/cilium\/pkg\/option\"\n\t\"github.com\/cilium\/cilium\/pkg\/policy\"\n\n\t. \"gopkg.in\/check.v1\"\n)\n\nvar (\n\tlbls = labels.Labels{\n\t\t\"foo\": labels.NewLabel(\"foo\", \"bar\", labels.LabelSourceContainer),\n\t\t\"foo2\": labels.NewLabel(\"foo2\", \"=bar2\", labels.LabelSourceContainer),\n\t\t\"key\": labels.NewLabel(\"key\", \"\", labels.LabelSourceContainer),\n\t\t\"foo==\": labels.NewLabel(\"foo==\", \"==\", labels.LabelSourceContainer),\n\t\t`foo\\\\=`: labels.NewLabel(`foo\\\\=`, `\\=`, labels.LabelSourceContainer),\n\t\t`\/\/=\/`: labels.NewLabel(`\/\/=\/`, \"\", labels.LabelSourceContainer),\n\t\t`%`: labels.NewLabel(`%`, `%ed`, labels.LabelSourceContainer),\n\t}\n\tlbls2 = labels.Labels{\n\t\t\"foo\": labels.NewLabel(\"foo\", \"bar\", labels.LabelSourceContainer),\n\t\t\"foo2\": labels.NewLabel(\"foo2\", \"=bar2\", labels.LabelSourceContainer),\n\t}\n\twantSecCtxLbls = policy.Identity{\n\t\tID: 123,\n\t\tEndpoints: map[string]time.Time{\n\t\t\t\"cc08ff400e355f736dce1c291a6a4007ab9f2d56d42e1f3630ba87b861d45307\": time.Now().UTC(),\n\t\t},\n\t\tLabels: lbls,\n\t}\n)\n\nfunc (ds *DaemonSuite) SetUpTest(c *C) {\n\ttime.Local = time.UTC\n\ttempRunDir, err := ioutil.TempDir(\"\", \"cilium-test-run\")\n\tc.Assert(err, IsNil)\n\terr = os.Mkdir(filepath.Join(tempRunDir, \"globals\"), 0777)\n\tc.Assert(err, IsNil)\n\n\tdaemonConf := &Config{\n\t\tDryMode: true,\n\t\tOpts: option.NewBoolOptions(&options.Library),\n\t}\n\tdaemonConf.RunDir = tempRunDir\n\tdaemonConf.StateDir = tempRunDir\n\t\/\/ Get the default labels prefix filter\n\terr = labels.ParseLabelPrefixCfg(nil, \"\")\n\tc.Assert(err, IsNil)\n\tdaemonConf.Opts.Set(endpoint.OptionDropNotify, true)\n\tdaemonConf.Opts.Set(endpoint.OptionTraceNotify, true)\n\tdaemonConf.Device = \"undefined\"\n\n\tkvstore.SetupDummy()\n\n\td, err := NewDaemon(daemonConf)\n\tc.Assert(err, IsNil)\n\tds.d = d\n\tkvstore.Client().DeleteTree(common.OperationalPath)\n\t\/\/ Needs to be less than 1 second otherwise GetCachedMaxLabelID might\n\t\/\/ not work properly\n\td.EnableKVStoreWatcher(time.Nanosecond)\n}\n\nfunc (ds *DaemonSuite) TearDownTest(c *C) {\n\tos.RemoveAll(ds.d.conf.RunDir)\n}\n\nfunc (ds *DaemonSuite) TestLabels(c *C) {\n\t\/\/Set up last free ID with zero\n\tid, err := GetMaxLabelID()\n\tc.Assert(err, IsNil)\n\tc.Assert(id, Equals, policy.MinimalNumericIdentity)\n\n\tsecCtxLbl, new, err := ds.d.CreateOrUpdateIdentity(lbls, \"containerLabel1-1\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 1)\n\tc.Assert(new, Equals, true)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls, \"containerLabel1-2\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 2)\n\tc.Assert(new, Equals, false)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls2, \"containerLabel2-1\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity+1)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 1)\n\tc.Assert(new, Equals, true)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls2, \"containerLabel2-2\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity+1)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 2)\n\tc.Assert(new, Equals, false)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls, \"containerLabel1-3\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 3)\n\tc.Assert(new, Equals, false)\n\n\t\/\/Get labels from ID\n\tgotSecCtxLbl, err := ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\twantSecCtxLbls.ID = policy.MinimalNumericIdentity\n\twantSecCtxLbls.Labels = lbls\n\tc.Assert(gotSecCtxLbl.ID, Equals, wantSecCtxLbls.ID)\n\tc.Assert(gotSecCtxLbl.Labels, DeepEquals, wantSecCtxLbls.Labels)\n\tc.Assert(gotSecCtxLbl.RefCount(), Equals, 3)\n\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity, \"containerLabel1-1\")\n\tc.Assert(err, IsNil)\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\twantSecCtxLbls.ID = policy.MinimalNumericIdentity\n\twantSecCtxLbls.Labels = lbls\n\tc.Assert(gotSecCtxLbl.ID, Equals, wantSecCtxLbls.ID)\n\tc.Assert(gotSecCtxLbl.Labels, DeepEquals, wantSecCtxLbls.Labels)\n\tc.Assert(gotSecCtxLbl.RefCount(), Equals, 2)\n\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity + 1)\n\tc.Assert(err, IsNil)\n\twantSecCtxLbls.ID = policy.MinimalNumericIdentity + 1\n\twantSecCtxLbls.Labels = lbls2\n\tc.Assert(gotSecCtxLbl.ID, Equals, wantSecCtxLbls.ID)\n\tc.Assert(gotSecCtxLbl.Labels, DeepEquals, wantSecCtxLbls.Labels)\n\tc.Assert(gotSecCtxLbl.RefCount(), Equals, 2)\n\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity, \"containerLabel1-2\")\n\tc.Assert(err, IsNil)\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\twantSecCtxLbls.ID = policy.MinimalNumericIdentity\n\twantSecCtxLbls.Labels = lbls\n\tc.Assert(gotSecCtxLbl.ID, Equals, wantSecCtxLbls.ID)\n\tc.Assert(gotSecCtxLbl.Labels, DeepEquals, wantSecCtxLbls.Labels)\n\tc.Assert(gotSecCtxLbl.RefCount(), Equals, 1)\n\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity, \"containerLabel1-3\")\n\tc.Assert(err, IsNil)\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\tvar emptySecCtxLblPtr *policy.Identity\n\tc.Assert(gotSecCtxLbl, Equals, emptySecCtxLblPtr)\n\n\terr = kvstore.Client().SetMaxID(common.LastFreeLabelIDKeyPath, policy.MinimalNumericIdentity.Uint32(), policy.MinimalNumericIdentity.Uint32())\n\tc.Assert(err, IsNil)\n\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity, \"containerLabel1-non-existent\")\n\tc.Assert(err, DeepEquals, errors.New(\"identity not found\"))\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\tc.Assert(gotSecCtxLbl, Equals, emptySecCtxLblPtr)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls2, \"containerLabel2-3\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity+1)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 3)\n\tc.Assert(new, Equals, false)\n\n\tsha256sum := lbls2.SHA256Sum()\n\tgotSecCtxLbl, err = LookupIdentityBySHA256(sha256sum)\n\tc.Assert(err, IsNil)\n\tc.Assert(gotSecCtxLbl, DeepEquals, secCtxLbl)\n\n\terr = ds.d.DeleteIdentityBySHA256(sha256sum, \"containerLabel2-1\")\n\tc.Assert(err, IsNil)\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity+1, \"containerLabel2-2\")\n\tc.Assert(err, IsNil)\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity+1, \"containerLabel2-3\")\n\tc.Assert(err, IsNil)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls2, \"containerLabel2-3\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 1)\n\tc.Assert(new, Equals, true)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls, \"containerLabel2-3\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity+1)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 1)\n\tc.Assert(new, Equals, true)\n}\n\nfunc (ds *DaemonSuite) TestGetMaxID(c *C) {\n\tlastID := policy.NumericIdentity(common.MaxSetOfLabels - 1)\n\terr := kvstore.Client().SetValue(common.LastFreeLabelIDKeyPath, lastID)\n\tc.Assert(err, IsNil)\n\n\tid, err := GetMaxLabelID()\n\tc.Assert(err, IsNil)\n\tc.Assert(id, Equals, lastID)\n}\n<commit_msg>daemon: Fix unit test panic in daemon init failure<commit_after>\/\/ Copyright 2016-2017 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/cilium\/cilium\/common\"\n\t\"github.com\/cilium\/cilium\/daemon\/options\"\n\t\"github.com\/cilium\/cilium\/pkg\/endpoint\"\n\t\"github.com\/cilium\/cilium\/pkg\/kvstore\"\n\t\"github.com\/cilium\/cilium\/pkg\/labels\"\n\t\"github.com\/cilium\/cilium\/pkg\/option\"\n\t\"github.com\/cilium\/cilium\/pkg\/policy\"\n\n\t. \"gopkg.in\/check.v1\"\n)\n\nvar (\n\tlbls = labels.Labels{\n\t\t\"foo\": labels.NewLabel(\"foo\", \"bar\", labels.LabelSourceContainer),\n\t\t\"foo2\": labels.NewLabel(\"foo2\", \"=bar2\", labels.LabelSourceContainer),\n\t\t\"key\": labels.NewLabel(\"key\", \"\", labels.LabelSourceContainer),\n\t\t\"foo==\": labels.NewLabel(\"foo==\", \"==\", labels.LabelSourceContainer),\n\t\t`foo\\\\=`: labels.NewLabel(`foo\\\\=`, `\\=`, labels.LabelSourceContainer),\n\t\t`\/\/=\/`: labels.NewLabel(`\/\/=\/`, \"\", labels.LabelSourceContainer),\n\t\t`%`: labels.NewLabel(`%`, `%ed`, labels.LabelSourceContainer),\n\t}\n\tlbls2 = labels.Labels{\n\t\t\"foo\": labels.NewLabel(\"foo\", \"bar\", labels.LabelSourceContainer),\n\t\t\"foo2\": labels.NewLabel(\"foo2\", \"=bar2\", labels.LabelSourceContainer),\n\t}\n\twantSecCtxLbls = policy.Identity{\n\t\tID: 123,\n\t\tEndpoints: map[string]time.Time{\n\t\t\t\"cc08ff400e355f736dce1c291a6a4007ab9f2d56d42e1f3630ba87b861d45307\": time.Now().UTC(),\n\t\t},\n\t\tLabels: lbls,\n\t}\n)\n\nfunc (ds *DaemonSuite) SetUpTest(c *C) {\n\ttime.Local = time.UTC\n\ttempRunDir, err := ioutil.TempDir(\"\", \"cilium-test-run\")\n\tc.Assert(err, IsNil)\n\terr = os.Mkdir(filepath.Join(tempRunDir, \"globals\"), 0777)\n\tc.Assert(err, IsNil)\n\n\tdaemonConf := &Config{\n\t\tDryMode: true,\n\t\tOpts: option.NewBoolOptions(&options.Library),\n\t}\n\tdaemonConf.RunDir = tempRunDir\n\tdaemonConf.StateDir = tempRunDir\n\t\/\/ Get the default labels prefix filter\n\terr = labels.ParseLabelPrefixCfg(nil, \"\")\n\tc.Assert(err, IsNil)\n\tdaemonConf.Opts.Set(endpoint.OptionDropNotify, true)\n\tdaemonConf.Opts.Set(endpoint.OptionTraceNotify, true)\n\tdaemonConf.Device = \"undefined\"\n\n\tkvstore.SetupDummy()\n\n\td, err := NewDaemon(daemonConf)\n\tc.Assert(err, IsNil)\n\tds.d = d\n\tkvstore.Client().DeleteTree(common.OperationalPath)\n\t\/\/ Needs to be less than 1 second otherwise GetCachedMaxLabelID might\n\t\/\/ not work properly\n\td.EnableKVStoreWatcher(time.Nanosecond)\n}\n\nfunc (ds *DaemonSuite) TearDownTest(c *C) {\n\tif ds.d != nil {\n\t\tos.RemoveAll(ds.d.conf.RunDir)\n\t}\n}\n\nfunc (ds *DaemonSuite) TestLabels(c *C) {\n\t\/\/Set up last free ID with zero\n\tid, err := GetMaxLabelID()\n\tc.Assert(err, IsNil)\n\tc.Assert(id, Equals, policy.MinimalNumericIdentity)\n\n\tsecCtxLbl, new, err := ds.d.CreateOrUpdateIdentity(lbls, \"containerLabel1-1\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 1)\n\tc.Assert(new, Equals, true)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls, \"containerLabel1-2\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 2)\n\tc.Assert(new, Equals, false)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls2, \"containerLabel2-1\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity+1)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 1)\n\tc.Assert(new, Equals, true)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls2, \"containerLabel2-2\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity+1)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 2)\n\tc.Assert(new, Equals, false)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls, \"containerLabel1-3\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 3)\n\tc.Assert(new, Equals, false)\n\n\t\/\/Get labels from ID\n\tgotSecCtxLbl, err := ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\twantSecCtxLbls.ID = policy.MinimalNumericIdentity\n\twantSecCtxLbls.Labels = lbls\n\tc.Assert(gotSecCtxLbl.ID, Equals, wantSecCtxLbls.ID)\n\tc.Assert(gotSecCtxLbl.Labels, DeepEquals, wantSecCtxLbls.Labels)\n\tc.Assert(gotSecCtxLbl.RefCount(), Equals, 3)\n\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity, \"containerLabel1-1\")\n\tc.Assert(err, IsNil)\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\twantSecCtxLbls.ID = policy.MinimalNumericIdentity\n\twantSecCtxLbls.Labels = lbls\n\tc.Assert(gotSecCtxLbl.ID, Equals, wantSecCtxLbls.ID)\n\tc.Assert(gotSecCtxLbl.Labels, DeepEquals, wantSecCtxLbls.Labels)\n\tc.Assert(gotSecCtxLbl.RefCount(), Equals, 2)\n\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity + 1)\n\tc.Assert(err, IsNil)\n\twantSecCtxLbls.ID = policy.MinimalNumericIdentity + 1\n\twantSecCtxLbls.Labels = lbls2\n\tc.Assert(gotSecCtxLbl.ID, Equals, wantSecCtxLbls.ID)\n\tc.Assert(gotSecCtxLbl.Labels, DeepEquals, wantSecCtxLbls.Labels)\n\tc.Assert(gotSecCtxLbl.RefCount(), Equals, 2)\n\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity, \"containerLabel1-2\")\n\tc.Assert(err, IsNil)\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\twantSecCtxLbls.ID = policy.MinimalNumericIdentity\n\twantSecCtxLbls.Labels = lbls\n\tc.Assert(gotSecCtxLbl.ID, Equals, wantSecCtxLbls.ID)\n\tc.Assert(gotSecCtxLbl.Labels, DeepEquals, wantSecCtxLbls.Labels)\n\tc.Assert(gotSecCtxLbl.RefCount(), Equals, 1)\n\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity, \"containerLabel1-3\")\n\tc.Assert(err, IsNil)\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\tvar emptySecCtxLblPtr *policy.Identity\n\tc.Assert(gotSecCtxLbl, Equals, emptySecCtxLblPtr)\n\n\terr = kvstore.Client().SetMaxID(common.LastFreeLabelIDKeyPath, policy.MinimalNumericIdentity.Uint32(), policy.MinimalNumericIdentity.Uint32())\n\tc.Assert(err, IsNil)\n\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity, \"containerLabel1-non-existent\")\n\tc.Assert(err, DeepEquals, errors.New(\"identity not found\"))\n\tgotSecCtxLbl, err = ds.d.LookupIdentity(policy.MinimalNumericIdentity)\n\tc.Assert(err, IsNil)\n\tc.Assert(gotSecCtxLbl, Equals, emptySecCtxLblPtr)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls2, \"containerLabel2-3\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity+1)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 3)\n\tc.Assert(new, Equals, false)\n\n\tsha256sum := lbls2.SHA256Sum()\n\tgotSecCtxLbl, err = LookupIdentityBySHA256(sha256sum)\n\tc.Assert(err, IsNil)\n\tc.Assert(gotSecCtxLbl, DeepEquals, secCtxLbl)\n\n\terr = ds.d.DeleteIdentityBySHA256(sha256sum, \"containerLabel2-1\")\n\tc.Assert(err, IsNil)\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity+1, \"containerLabel2-2\")\n\tc.Assert(err, IsNil)\n\terr = ds.d.DeleteIdentity(policy.MinimalNumericIdentity+1, \"containerLabel2-3\")\n\tc.Assert(err, IsNil)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls2, \"containerLabel2-3\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 1)\n\tc.Assert(new, Equals, true)\n\n\tsecCtxLbl, new, err = ds.d.CreateOrUpdateIdentity(lbls, \"containerLabel2-3\")\n\tc.Assert(err, IsNil)\n\tc.Assert(secCtxLbl.ID, Equals, policy.MinimalNumericIdentity+1)\n\tc.Assert(secCtxLbl.RefCount(), Equals, 1)\n\tc.Assert(new, Equals, true)\n}\n\nfunc (ds *DaemonSuite) TestGetMaxID(c *C) {\n\tlastID := policy.NumericIdentity(common.MaxSetOfLabels - 1)\n\terr := kvstore.Client().SetValue(common.LastFreeLabelIDKeyPath, lastID)\n\tc.Assert(err, IsNil)\n\n\tid, err := GetMaxLabelID()\n\tc.Assert(err, IsNil)\n\tc.Assert(id, Equals, lastID)\n}\n<|endoftext|>"} {"text":"<commit_before>package middleware\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/labstack\/echo\"\n)\n\ntype (\n\t\/\/ StaticConfig defines the config for Static middleware.\n\tStaticConfig struct {\n\t\t\/\/ Skipper defines a function to skip middleware.\n\t\tSkipper Skipper\n\n\t\t\/\/ Root directory from where the static content is served.\n\t\t\/\/ Required.\n\t\tRoot string `json:\"root\"`\n\n\t\t\/\/ Index file for serving a directory.\n\t\t\/\/ Optional. Default value \"index.html\".\n\t\tIndex string `json:\"index\"`\n\n\t\t\/\/ Enable HTML5 mode by forwarding all not-found requests to root so that\n\t\t\/\/ SPA (single-page application) can handle the routing.\n\t\t\/\/ Optional. Default value false.\n\t\tHTML5 bool `json:\"html5\"`\n\n\t\t\/\/ Enable directory browsing.\n\t\t\/\/ Optional. Default value false.\n\t\tBrowse bool `json:\"browse\"`\n\t}\n)\n\nvar (\n\t\/\/ DefaultStaticConfig is the default Static middleware config.\n\tDefaultStaticConfig = StaticConfig{\n\t\tSkipper: DefaultSkipper,\n\t\tIndex: \"index.html\",\n\t}\n)\n\n\/\/ Static returns a Static middleware to serves static content from the provided\n\/\/ root directory.\nfunc Static(root string) echo.MiddlewareFunc {\n\tc := DefaultStaticConfig\n\tc.Root = root\n\treturn StaticWithConfig(c)\n}\n\n\/\/ StaticWithConfig returns a Static middleware with config.\n\/\/ See `Static()`.\nfunc StaticWithConfig(config StaticConfig) echo.MiddlewareFunc {\n\t\/\/ Defaults\n\tif config.Root == \"\" {\n\t\tconfig.Root = \".\" \/\/ For security we want to restrict to CWD.\n\t}\n\tif config.Skipper == nil {\n\t\tconfig.Skipper = DefaultStaticConfig.Skipper\n\t}\n\tif config.Index == \"\" {\n\t\tconfig.Index = DefaultStaticConfig.Index\n\t}\n\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) error {\n\t\t\tp := c.Request().URL.Path\n\t\t\tif strings.HasSuffix(c.Path(), \"*\") { \/\/ When serving from a group, e.g. `\/static*`.\n\t\t\t\tp = c.Param(\"*\")\n\t\t\t}\n\t\t\tname := filepath.Join(config.Root, path.Clean(\"\/\"+p)) \/\/ \"\/\"+ for security\n\n\t\t\tfi, err := os.Stat(name)\n\t\t\tif err != nil {\n\t\t\t\tif os.IsNotExist(err) {\n\t\t\t\t\tif config.HTML5 {\n\t\t\t\t\t\treturn c.File(filepath.Join(config.Root, config.Index))\n\t\t\t\t\t}\n\t\t\t\t\treturn next(c)\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif fi.IsDir() {\n\t\t\t\tindex := filepath.Join(name, config.Index)\n\t\t\t\tfi, err = os.Stat(index)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tif config.Browse {\n\t\t\t\t\t\treturn listDir(name, c.Response())\n\t\t\t\t\t}\n\t\t\t\t\tif os.IsNotExist(err) {\n\t\t\t\t\t\treturn next(c)\n\t\t\t\t\t}\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\treturn c.File(index)\n\t\t\t}\n\n\t\t\treturn c.File(name)\n\t\t}\n\t}\n}\n\nfunc listDir(name string, res *echo.Response) error {\n\tdir, err := os.Open(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdirs, err := dir.Readdir(-1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Create a directory index\n\tres.Header().Set(echo.HeaderContentType, echo.MIMETextHTMLCharsetUTF8)\n\tif _, err = fmt.Fprintf(res, \"<pre>\\n\"); err != nil {\n\t\treturn err\n\t}\n\tfor _, d := range dirs {\n\t\tname := d.Name()\n\t\tcolor := \"#212121\"\n\t\tif d.IsDir() {\n\t\t\tcolor = \"#e91e63\"\n\t\t\tname += \"\/\"\n\t\t}\n\t\tif _, err = fmt.Fprintf(res, \"<a href=\\\"%s\\\" style=\\\"color: %s;\\\">%s<\/a>\\n\", name, color, name); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t_, err = fmt.Fprintf(res, \"<\/pre>\\n\")\n\treturn err\n}\n<commit_msg>Fixed skipper for static middleware (#896)<commit_after>package middleware\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/labstack\/echo\"\n)\n\ntype (\n\t\/\/ StaticConfig defines the config for Static middleware.\n\tStaticConfig struct {\n\t\t\/\/ Skipper defines a function to skip middleware.\n\t\tSkipper Skipper\n\n\t\t\/\/ Root directory from where the static content is served.\n\t\t\/\/ Required.\n\t\tRoot string `json:\"root\"`\n\n\t\t\/\/ Index file for serving a directory.\n\t\t\/\/ Optional. Default value \"index.html\".\n\t\tIndex string `json:\"index\"`\n\n\t\t\/\/ Enable HTML5 mode by forwarding all not-found requests to root so that\n\t\t\/\/ SPA (single-page application) can handle the routing.\n\t\t\/\/ Optional. Default value false.\n\t\tHTML5 bool `json:\"html5\"`\n\n\t\t\/\/ Enable directory browsing.\n\t\t\/\/ Optional. Default value false.\n\t\tBrowse bool `json:\"browse\"`\n\t}\n)\n\nvar (\n\t\/\/ DefaultStaticConfig is the default Static middleware config.\n\tDefaultStaticConfig = StaticConfig{\n\t\tSkipper: DefaultSkipper,\n\t\tIndex: \"index.html\",\n\t}\n)\n\n\/\/ Static returns a Static middleware to serves static content from the provided\n\/\/ root directory.\nfunc Static(root string) echo.MiddlewareFunc {\n\tc := DefaultStaticConfig\n\tc.Root = root\n\treturn StaticWithConfig(c)\n}\n\n\/\/ StaticWithConfig returns a Static middleware with config.\n\/\/ See `Static()`.\nfunc StaticWithConfig(config StaticConfig) echo.MiddlewareFunc {\n\t\/\/ Defaults\n\tif config.Root == \"\" {\n\t\tconfig.Root = \".\" \/\/ For security we want to restrict to CWD.\n\t}\n\tif config.Skipper == nil {\n\t\tconfig.Skipper = DefaultStaticConfig.Skipper\n\t}\n\tif config.Index == \"\" {\n\t\tconfig.Index = DefaultStaticConfig.Index\n\t}\n\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) error {\n\t\t\tif config.Skipper(c) {\n\t\t\t\treturn next(c)\n\t\t\t}\n\t\t\t\n\t\t\tp := c.Request().URL.Path\n\t\t\tif strings.HasSuffix(c.Path(), \"*\") { \/\/ When serving from a group, e.g. `\/static*`.\n\t\t\t\tp = c.Param(\"*\")\n\t\t\t}\n\t\t\tname := filepath.Join(config.Root, path.Clean(\"\/\"+p)) \/\/ \"\/\"+ for security\n\n\t\t\tfi, err := os.Stat(name)\n\t\t\tif err != nil {\n\t\t\t\tif os.IsNotExist(err) {\n\t\t\t\t\tif config.HTML5 {\n\t\t\t\t\t\treturn c.File(filepath.Join(config.Root, config.Index))\n\t\t\t\t\t}\n\t\t\t\t\treturn next(c)\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif fi.IsDir() {\n\t\t\t\tindex := filepath.Join(name, config.Index)\n\t\t\t\tfi, err = os.Stat(index)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tif config.Browse {\n\t\t\t\t\t\treturn listDir(name, c.Response())\n\t\t\t\t\t}\n\t\t\t\t\tif os.IsNotExist(err) {\n\t\t\t\t\t\treturn next(c)\n\t\t\t\t\t}\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\treturn c.File(index)\n\t\t\t}\n\n\t\t\treturn c.File(name)\n\t\t}\n\t}\n}\n\nfunc listDir(name string, res *echo.Response) error {\n\tdir, err := os.Open(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdirs, err := dir.Readdir(-1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Create a directory index\n\tres.Header().Set(echo.HeaderContentType, echo.MIMETextHTMLCharsetUTF8)\n\tif _, err = fmt.Fprintf(res, \"<pre>\\n\"); err != nil {\n\t\treturn err\n\t}\n\tfor _, d := range dirs {\n\t\tname := d.Name()\n\t\tcolor := \"#212121\"\n\t\tif d.IsDir() {\n\t\t\tcolor = \"#e91e63\"\n\t\t\tname += \"\/\"\n\t\t}\n\t\tif _, err = fmt.Fprintf(res, \"<a href=\\\"%s\\\" style=\\\"color: %s;\\\">%s<\/a>\\n\", name, color, name); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t_, err = fmt.Fprintf(res, \"<\/pre>\\n\")\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * This file is part of the KubeVirt project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Copyright 2020 Red Hat, Inc.\n *\n *\/\n\npackage libvmi\n\nimport (\n\tkvirtv1 \"kubevirt.io\/client-go\/api\/v1\"\n\n\tcd \"kubevirt.io\/kubevirt\/tests\/containerdisk\"\n)\n\n\/\/ Default VMI values\nconst (\n\tDefaultTestGracePeriod int64 = 0\n\tDefaultVmiName = \"testvmi\"\n)\n\n\/\/ NewFedora instantiates a new Fedora based VMI configuration,\n\/\/ building its extra properties based on the specified With* options.\nfunc NewFedora(opts ...Option) *kvirtv1.VirtualMachineInstance {\n\treturn newFedora(cd.ContainerDiskFedora, opts...)\n}\n\n\/\/ NewTestToolingFedora instantiates a new Fedora based VMI configuration,\n\/\/ building its extra properties based on the specified With* options.\n\/\/ This image has tooling for the guest agent, stress, and more\nfunc NewTestToolingFedora(opts ...Option) *kvirtv1.VirtualMachineInstance {\n\treturn newFedora(cd.ContainerDiskFedoraTestTooling, opts...)\n}\n\n\/\/ NewSriovFedora instantiates a new Fedora based VMI configuration,\n\/\/ building its extra properties based on the specified With* options, the\n\/\/ image used include Guest Agent and some moduled needed by SRIOV.\nfunc NewSriovFedora(opts ...Option) *kvirtv1.VirtualMachineInstance {\n\treturn newFedora(cd.ContainerDiskFedoraSRIOVLane, opts...)\n}\n\n\/\/ NewFedora instantiates a new Fedora based VMI configuration with specified\n\/\/ containerDisk, building its extra properties based on the specified With*\n\/\/ options.\nfunc newFedora(containerDisk cd.ContainerDisk, opts ...Option) *kvirtv1.VirtualMachineInstance {\n\tconfigurePassword := `#!\/bin\/bash\n\techo \"fedora\" |passwd fedora --stdin\n\techo `\n\n\tfedoraOptions := []Option{\n\t\tWithTerminationGracePeriod(DefaultTestGracePeriod),\n\t\tWithResourceMemory(\"512M\"),\n\t\tWithRng(),\n\t\tWithContainerImage(cd.ContainerDiskFor(containerDisk)),\n\t\tWithCloudInitNoCloudUserData(configurePassword, false),\n\t}\n\topts = append(fedoraOptions, opts...)\n\treturn New(RandName(DefaultVmiName), opts...)\n}\n\n\/\/ NewCirros instantiates a new CirrOS based VMI configuration\nfunc NewCirros(opts ...Option) *kvirtv1.VirtualMachineInstance {\n\tcirrosOpts := []Option{\n\t\tWithContainerImage(cd.ContainerDiskFor(cd.ContainerDiskCirros)),\n\t\tWithCloudInitNoCloudUserData(\"#!\/bin\/bash\\necho 'hello'\\n\", true),\n\t\tWithResourceMemory(\"128Mi\"),\n\t\tWithTerminationGracePeriod(DefaultTestGracePeriod),\n\t}\n\tcirrosOpts = append(cirrosOpts, opts...)\n\treturn New(RandName(DefaultVmiName), cirrosOpts...)\n}\n<commit_msg>sriov, Remove setting of the password as part of UserData<commit_after>\/*\n * This file is part of the KubeVirt project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Copyright 2020 Red Hat, Inc.\n *\n *\/\n\npackage libvmi\n\nimport (\n\tkvirtv1 \"kubevirt.io\/client-go\/api\/v1\"\n\n\tcd \"kubevirt.io\/kubevirt\/tests\/containerdisk\"\n)\n\n\/\/ Default VMI values\nconst (\n\tDefaultTestGracePeriod int64 = 0\n\tDefaultVmiName = \"testvmi\"\n\n\tuserDataSetPassword = `#!\/bin\/bash\n\techo \"fedora\" | passwd fedora --stdin\n\techo `\n)\n\n\/\/ NewFedora instantiates a new Fedora based VMI configuration,\n\/\/ building its extra properties based on the specified With* options.\nfunc NewFedora(opts ...Option) *kvirtv1.VirtualMachineInstance {\n\topts = append([]Option{WithCloudInitNoCloudUserData(userDataSetPassword, false)}, opts...)\n\treturn newFedora(cd.ContainerDiskFedora, opts...)\n}\n\n\/\/ NewTestToolingFedora instantiates a new Fedora based VMI configuration,\n\/\/ building its extra properties based on the specified With* options.\n\/\/ This image has tooling for the guest agent, stress, and more\nfunc NewTestToolingFedora(opts ...Option) *kvirtv1.VirtualMachineInstance {\n\topts = append([]Option{WithCloudInitNoCloudUserData(userDataSetPassword, false)}, opts...)\n\treturn newFedora(cd.ContainerDiskFedoraTestTooling, opts...)\n}\n\n\/\/ NewSriovFedora instantiates a new Fedora based VMI configuration,\n\/\/ building its extra properties based on the specified With* options, the\n\/\/ image used include Guest Agent and some moduled needed by SRIOV.\nfunc NewSriovFedora(opts ...Option) *kvirtv1.VirtualMachineInstance {\n\treturn newFedora(cd.ContainerDiskFedoraSRIOVLane, opts...)\n}\n\n\/\/ NewFedora instantiates a new Fedora based VMI configuration with specified\n\/\/ containerDisk, building its extra properties based on the specified With*\n\/\/ options.\nfunc newFedora(containerDisk cd.ContainerDisk, opts ...Option) *kvirtv1.VirtualMachineInstance {\n\tfedoraOptions := []Option{\n\t\tWithTerminationGracePeriod(DefaultTestGracePeriod),\n\t\tWithResourceMemory(\"512M\"),\n\t\tWithRng(),\n\t\tWithContainerImage(cd.ContainerDiskFor(containerDisk)),\n\t}\n\topts = append(fedoraOptions, opts...)\n\treturn New(RandName(DefaultVmiName), opts...)\n}\n\n\/\/ NewCirros instantiates a new CirrOS based VMI configuration\nfunc NewCirros(opts ...Option) *kvirtv1.VirtualMachineInstance {\n\tcirrosOpts := []Option{\n\t\tWithContainerImage(cd.ContainerDiskFor(cd.ContainerDiskCirros)),\n\t\tWithCloudInitNoCloudUserData(\"#!\/bin\/bash\\necho 'hello'\\n\", true),\n\t\tWithResourceMemory(\"128Mi\"),\n\t\tWithTerminationGracePeriod(DefaultTestGracePeriod),\n\t}\n\tcirrosOpts = append(cirrosOpts, opts...)\n\treturn New(RandName(DefaultVmiName), cirrosOpts...)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * This file is part of the KubeVirt project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Copyright 2018 Red Hat, Inc.\n *\n *\/\n\npackage tests_test\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/rand\"\n\n\tv1 \"kubevirt.io\/kubevirt\/pkg\/api\/v1\"\n\t\"kubevirt.io\/kubevirt\/pkg\/kubecli\"\n\t\"kubevirt.io\/kubevirt\/tests\"\n)\n\nvar _ = Describe(\"Migrations\", func() {\n\tflag.Parse()\n\n\tvirtClient, err := kubecli.GetKubevirtClient()\n\ttests.PanicOnError(err)\n\n\tBeforeEach(func() {\n\t\ttests.BeforeTestCleanup()\n\n\t\tnodes, err := virtClient.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: v1.NodeSchedulable + \"=\" + \"true\"})\n\t\tExpect(err).To(BeNil())\n\n\t\tif len(nodes.Items) < 2 {\n\t\t\tSkip(\"Migration tests require at least 2 nodes\")\n\t\t}\n\t})\n\n\tAfterEach(func() {\n\t})\n\n\trunVMIAndExpectLaunch := func(vmi *v1.VirtualMachineInstance, timeout int) *v1.VirtualMachineInstance {\n\t\tBy(\"Starting a VirtualMachineInstance\")\n\t\tvar obj *v1.VirtualMachineInstance\n\t\tvar err error\n\t\tEventually(func() error {\n\t\t\tobj, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(vmi)\n\t\t\treturn err\n\t\t}, timeout, 1*time.Second).ShouldNot(HaveOccurred())\n\t\tBy(\"Waiting until the VirtualMachineInstance starts\")\n\t\ttests.WaitForSuccessfulVMIStartWithTimeout(obj, timeout)\n\t\treturn obj\n\t}\n\n\tconfirmVMIPostMigration := func(vmi *v1.VirtualMachineInstance, migrationUID string) {\n\t\tBy(\"Retrieving the VMI post migration\")\n\t\tvmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &metav1.GetOptions{})\n\t\tExpect(err).To(BeNil())\n\n\t\tBy(\"Verifying the VMI's migration state\")\n\t\tExpect(vmi.Status.MigrationState).ToNot(BeNil())\n\t\tExpect(vmi.Status.MigrationState.StartTimestamp).ToNot(BeNil())\n\t\tExpect(vmi.Status.MigrationState.EndTimestamp).ToNot(BeNil())\n\t\tExpect(vmi.Status.MigrationState.TargetNode).To(Equal(vmi.Status.NodeName))\n\t\tExpect(vmi.Status.MigrationState.TargetNode).ToNot(Equal(vmi.Status.MigrationState.SourceNode))\n\t\tExpect(vmi.Status.MigrationState.Completed).To(Equal(true))\n\t\tExpect(vmi.Status.MigrationState.Failed).To(Equal(false))\n\t\tExpect(vmi.Status.MigrationState.TargetNodeAddress).ToNot(Equal(\"\"))\n\t\tExpect(string(vmi.Status.MigrationState.MigrationUID)).To(Equal(migrationUID))\n\n\t\tBy(\"Verifying the VMI's is in the running state\")\n\t\tExpect(vmi.Status.Phase).To(Equal(v1.Running))\n\t}\n\n\trunMigrationAndExpectCompletion := func(migration *v1.VirtualMachineInstanceMigration, timeout int) string {\n\t\tBy(\"Starting a Migration\")\n\t\tEventually(func() error {\n\t\t\t_, err := virtClient.VirtualMachineInstanceMigration(migration.Namespace).Create(migration)\n\t\t\treturn err\n\t\t}, timeout, 1*time.Second).ShouldNot(HaveOccurred())\n\t\tBy(\"Waiting until the Migration Completes\")\n\n\t\tuid := \"\"\n\t\tEventually(func() bool {\n\t\t\tmigration, err := virtClient.VirtualMachineInstanceMigration(migration.Namespace).Get(migration.Name, &metav1.GetOptions{})\n\t\t\tExpect(err).To(BeNil())\n\n\t\t\tExpect(migration.Status.Phase).ToNot(Equal(v1.MigrationFailed))\n\n\t\t\tuid = string(migration.UID)\n\t\t\tif migration.Status.Phase == v1.MigrationSucceeded {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\n\t\t}, timeout, 1*time.Second).Should(Equal(true))\n\t\treturn uid\n\t}\n\n\tDescribe(\"Starting a VirtualMachineInstance \", func() {\n\t\tContext(\"with an Alpine read only disk\", func() {\n\t\t\tIt(\"should be successfully migrated multiple times\", func() {\n\n\t\t\t\tvmi := tests.NewRandomVMIWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskAlpine))\n\t\t\t\tvmi.Spec.Domain.Devices.Disks[0].DiskDevice.Disk.ReadOnly = true\n\n\t\t\t\tBy(\"Starting the VirtualMachineInstance\")\n\t\t\t\tvmi = runVMIAndExpectLaunch(vmi, 240)\n\n\t\t\t\tBy(\"Checking that the VirtualMachineInstance console has expected output\")\n\t\t\t\texpecter, err := tests.LoggedInAlpineExpecter(vmi)\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t\texpecter.Close()\n\n\t\t\t\tnum := 2\n\n\t\t\t\tfor i := 0; i < num; i++ {\n\t\t\t\t\t\/\/ execute a migration, wait for finalized state\n\t\t\t\t\tBy(fmt.Sprintf(\"Starting the Migration for iteration %d\", i))\n\t\t\t\t\tmigration := tests.NewRandomMigration(vmi.Name, vmi.Namespace)\n\t\t\t\t\tmigrationUID := runMigrationAndExpectCompletion(migration, 180)\n\n\t\t\t\t\t\/\/ check VMI, confirm migration state\n\t\t\t\t\tconfirmVMIPostMigration(vmi, migrationUID)\n\t\t\t\t}\n\t\t\t\t\/\/ delete VMI\n\t\t\t\tBy(\"Deleting the VMI\")\n\t\t\t\terr = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(vmi.Name, &metav1.DeleteOptions{})\n\t\t\t\tExpect(err).To(BeNil())\n\n\t\t\t\tBy(\"Waiting for VMI to disappear\")\n\t\t\t\ttests.WaitForVirtualMachineToDisappearWithTimeout(vmi, 240)\n\n\t\t\t})\n\t\t})\n\t\tContext(\"with an Alpine shared ISCSI PVC\", func() {\n\t\t\tvar pvName string\n\t\t\tBeforeEach(func() {\n\t\t\t\tpvName = \"test-iscsi-lun\" + rand.String(48)\n\t\t\t\t\/\/ Start a ISCSI POD and service\n\t\t\t\tBy(\"Starting an iSCSI POD\")\n\t\t\t\tiscsiIP := tests.CreateISCSITargetPOD()\n\t\t\t\t\/\/ create a new PV and PVC (PVs can't be reused)\n\t\t\t\tBy(\"create a new iSCSI PV and PVC\")\n\t\t\t\ttests.CreateISCSIPvAndPvc(pvName, \"1Gi\", iscsiIP)\n\t\t\t}, 60)\n\n\t\t\tAfterEach(func() {\n\t\t\t\t\/\/ create a new PV and PVC (PVs can't be reused)\n\t\t\t\ttests.DeletePvAndPvc(pvName)\n\t\t\t}, 60)\n\t\t\tIt(\"should reject migration specs with shared and non-shared disks\", func() {\n\t\t\t\t\/\/ Start the VirtualMachineInstance with PVC and Ephemeral Disks\n\t\t\t\tvmi := tests.NewRandomVMIWithPVC(pvName)\n\t\t\t\timage := tests.ContainerDiskFor(tests.ContainerDiskAlpine)\n\t\t\t\ttests.AddEphemeralDisk(vmi, \"myephemeral\", \"virtio\", image)\n\n\t\t\t\tBy(\"Starting the VirtualMachineInstance\")\n\t\t\t\tvmi = runVMIAndExpectLaunch(vmi, 120)\n\n\t\t\t\tBy(\"Checking that the VirtualMachineInstance console has expected output\")\n\t\t\t\texpecter, err := tests.LoggedInAlpineExpecter(vmi)\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t\texpecter.Close()\n\n\t\t\t\tBy(\"Starting a Migration and expecting it to be rejected\")\n\t\t\t\tmigration := tests.NewRandomMigration(vmi.Name, vmi.Namespace)\n\t\t\t\tEventually(func() error {\n\t\t\t\t\t_, err := virtClient.VirtualMachineInstanceMigration(migration.Namespace).Create(migration)\n\t\t\t\t\treturn err\n\t\t\t\t}, 120, 1*time.Second).Should(HaveOccurred())\n\t\t\t})\n\t\t\tIt(\"should be successfully migrated multiple times\", func() {\n\t\t\t\t\/\/ Start the VirtualMachineInstance with the PVC attached\n\t\t\t\tvmi := tests.NewRandomVMIWithPVC(pvName)\n\n\t\t\t\tvmi = runVMIAndExpectLaunch(vmi, 180)\n\n\t\t\t\tBy(\"Checking that the VirtualMachineInstance console has expected output\")\n\t\t\t\texpecter, err := tests.LoggedInAlpineExpecter(vmi)\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t\texpecter.Close()\n\n\t\t\t\tnum := 2\n\n\t\t\t\tfor i := 0; i < num; i++ {\n\t\t\t\t\t\/\/ execute a migration, wait for finalized state\n\t\t\t\t\tBy(fmt.Sprintf(\"Starting the Migration for iteration %d\", i))\n\t\t\t\t\tmigration := tests.NewRandomMigration(vmi.Name, vmi.Namespace)\n\t\t\t\t\tmigrationUID := runMigrationAndExpectCompletion(migration, 180)\n\n\t\t\t\t\t\/\/ check VMI, confirm migration state\n\t\t\t\t\tconfirmVMIPostMigration(vmi, migrationUID)\n\t\t\t\t}\n\t\t\t\t\/\/ delete VMI\n\t\t\t\tBy(\"Deleting the VMI\")\n\t\t\t\terr = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(vmi.Name, &metav1.DeleteOptions{})\n\t\t\t\tExpect(err).To(BeNil())\n\n\t\t\t\tBy(\"Waiting for VMI to disappear\")\n\t\t\t\ttests.WaitForVirtualMachineToDisappearWithTimeout(vmi, 120)\n\n\t\t\t})\n\t\t})\n\t})\n\n})\n<commit_msg>add a functioal test to verify live migration with mixed vloumes<commit_after>\/*\n * This file is part of the KubeVirt project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Copyright 2018 Red Hat, Inc.\n *\n *\/\n\npackage tests_test\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/rand\"\n\n\tv1 \"kubevirt.io\/kubevirt\/pkg\/api\/v1\"\n\t\"kubevirt.io\/kubevirt\/pkg\/kubecli\"\n\t\"kubevirt.io\/kubevirt\/tests\"\n)\n\nvar _ = Describe(\"Migrations\", func() {\n\tflag.Parse()\n\n\tvirtClient, err := kubecli.GetKubevirtClient()\n\ttests.PanicOnError(err)\n\n\tBeforeEach(func() {\n\t\ttests.BeforeTestCleanup()\n\n\t\tnodes, err := virtClient.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: v1.NodeSchedulable + \"=\" + \"true\"})\n\t\tExpect(err).To(BeNil())\n\n\t\tif len(nodes.Items) < 2 {\n\t\t\tSkip(\"Migration tests require at least 2 nodes\")\n\t\t}\n\t})\n\n\tAfterEach(func() {\n\t})\n\n\trunVMIAndExpectLaunch := func(vmi *v1.VirtualMachineInstance, timeout int) *v1.VirtualMachineInstance {\n\t\tBy(\"Starting a VirtualMachineInstance\")\n\t\tvar obj *v1.VirtualMachineInstance\n\t\tvar err error\n\t\tEventually(func() error {\n\t\t\tobj, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(vmi)\n\t\t\treturn err\n\t\t}, timeout, 1*time.Second).ShouldNot(HaveOccurred())\n\t\tBy(\"Waiting until the VirtualMachineInstance starts\")\n\t\ttests.WaitForSuccessfulVMIStartWithTimeout(obj, timeout)\n\t\treturn obj\n\t}\n\n\tconfirmVMIPostMigration := func(vmi *v1.VirtualMachineInstance, migrationUID string) {\n\t\tBy(\"Retrieving the VMI post migration\")\n\t\tvmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &metav1.GetOptions{})\n\t\tExpect(err).To(BeNil())\n\n\t\tBy(\"Verifying the VMI's migration state\")\n\t\tExpect(vmi.Status.MigrationState).ToNot(BeNil())\n\t\tExpect(vmi.Status.MigrationState.StartTimestamp).ToNot(BeNil())\n\t\tExpect(vmi.Status.MigrationState.EndTimestamp).ToNot(BeNil())\n\t\tExpect(vmi.Status.MigrationState.TargetNode).To(Equal(vmi.Status.NodeName))\n\t\tExpect(vmi.Status.MigrationState.TargetNode).ToNot(Equal(vmi.Status.MigrationState.SourceNode))\n\t\tExpect(vmi.Status.MigrationState.Completed).To(Equal(true))\n\t\tExpect(vmi.Status.MigrationState.Failed).To(Equal(false))\n\t\tExpect(vmi.Status.MigrationState.TargetNodeAddress).ToNot(Equal(\"\"))\n\t\tExpect(string(vmi.Status.MigrationState.MigrationUID)).To(Equal(migrationUID))\n\n\t\tBy(\"Verifying the VMI's is in the running state\")\n\t\tExpect(vmi.Status.Phase).To(Equal(v1.Running))\n\t}\n\n\trunMigrationAndExpectCompletion := func(migration *v1.VirtualMachineInstanceMigration, timeout int) string {\n\t\tBy(\"Starting a Migration\")\n\t\tEventually(func() error {\n\t\t\t_, err := virtClient.VirtualMachineInstanceMigration(migration.Namespace).Create(migration)\n\t\t\treturn err\n\t\t}, timeout, 1*time.Second).ShouldNot(HaveOccurred())\n\t\tBy(\"Waiting until the Migration Completes\")\n\n\t\tuid := \"\"\n\t\tEventually(func() bool {\n\t\t\tmigration, err := virtClient.VirtualMachineInstanceMigration(migration.Namespace).Get(migration.Name, &metav1.GetOptions{})\n\t\t\tExpect(err).To(BeNil())\n\n\t\t\tExpect(migration.Status.Phase).ToNot(Equal(v1.MigrationFailed))\n\n\t\t\tuid = string(migration.UID)\n\t\t\tif migration.Status.Phase == v1.MigrationSucceeded {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\n\t\t}, timeout, 1*time.Second).Should(Equal(true))\n\t\treturn uid\n\t}\n\n\tDescribe(\"Starting a VirtualMachineInstance \", func() {\n\t\tContext(\"with an Alpine read only disk\", func() {\n\t\t\tIt(\"should be successfully migrated multiple times\", func() {\n\n\t\t\t\tvmi := tests.NewRandomVMIWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskAlpine))\n\t\t\t\tvmi.Spec.Domain.Devices.Disks[0].DiskDevice.Disk.ReadOnly = true\n\n\t\t\t\tBy(\"Starting the VirtualMachineInstance\")\n\t\t\t\tvmi = runVMIAndExpectLaunch(vmi, 240)\n\n\t\t\t\tBy(\"Checking that the VirtualMachineInstance console has expected output\")\n\t\t\t\texpecter, err := tests.LoggedInAlpineExpecter(vmi)\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t\texpecter.Close()\n\n\t\t\t\tnum := 2\n\n\t\t\t\tfor i := 0; i < num; i++ {\n\t\t\t\t\t\/\/ execute a migration, wait for finalized state\n\t\t\t\t\tBy(fmt.Sprintf(\"Starting the Migration for iteration %d\", i))\n\t\t\t\t\tmigration := tests.NewRandomMigration(vmi.Name, vmi.Namespace)\n\t\t\t\t\tmigrationUID := runMigrationAndExpectCompletion(migration, 180)\n\n\t\t\t\t\t\/\/ check VMI, confirm migration state\n\t\t\t\t\tconfirmVMIPostMigration(vmi, migrationUID)\n\t\t\t\t}\n\t\t\t\t\/\/ delete VMI\n\t\t\t\tBy(\"Deleting the VMI\")\n\t\t\t\terr = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(vmi.Name, &metav1.DeleteOptions{})\n\t\t\t\tExpect(err).To(BeNil())\n\n\t\t\t\tBy(\"Waiting for VMI to disappear\")\n\t\t\t\ttests.WaitForVirtualMachineToDisappearWithTimeout(vmi, 240)\n\n\t\t\t})\n\t\t})\n\t\tContext(\"with an Alpine shared ISCSI PVC\", func() {\n\t\t\tvar pvName string\n\t\t\tBeforeEach(func() {\n\t\t\t\tpvName = \"test-iscsi-lun\" + rand.String(48)\n\t\t\t\t\/\/ Start a ISCSI POD and service\n\t\t\t\tBy(\"Starting an iSCSI POD\")\n\t\t\t\tiscsiIP := tests.CreateISCSITargetPOD()\n\t\t\t\t\/\/ create a new PV and PVC (PVs can't be reused)\n\t\t\t\tBy(\"create a new iSCSI PV and PVC\")\n\t\t\t\ttests.CreateISCSIPvAndPvc(pvName, \"1Gi\", iscsiIP)\n\t\t\t}, 60)\n\n\t\t\tAfterEach(func() {\n\t\t\t\t\/\/ create a new PV and PVC (PVs can't be reused)\n\t\t\t\ttests.DeletePvAndPvc(pvName)\n\t\t\t}, 60)\n\t\t\tIt(\"should migrate a VMI with shared and non-shared disks\", func() {\n\t\t\t\t\/\/ Start the VirtualMachineInstance with PVC and Ephemeral Disks\n\t\t\t\tvmi := tests.NewRandomVMIWithPVC(pvName)\n\t\t\t\timage := tests.ContainerDiskFor(tests.ContainerDiskAlpine)\n\t\t\t\ttests.AddEphemeralDisk(vmi, \"myephemeral\", \"virtio\", image)\n\n\t\t\t\tBy(\"Starting the VirtualMachineInstance\")\n\t\t\t\tvmi = runVMIAndExpectLaunch(vmi, 120)\n\n\t\t\t\tBy(\"Checking that the VirtualMachineInstance console has expected output\")\n\t\t\t\texpecter, err := tests.LoggedInAlpineExpecter(vmi)\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t\texpecter.Close()\n\n\t\t\t\tBy(\"Starting a Migration\")\n\t\t\t\tmigration := tests.NewRandomMigration(vmi.Name, vmi.Namespace)\n\t\t\t\tmigrationUID := runMigrationAndExpectCompletion(migration, 180)\n\n\t\t\t\t\/\/ check VMI, confirm migration state\n\t\t\t\tconfirmVMIPostMigration(vmi, migrationUID)\n\n\t\t\t\t\/\/ delete VMI\n\t\t\t\tBy(\"Deleting the VMI\")\n\t\t\t\terr = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(vmi.Name, &metav1.DeleteOptions{})\n\t\t\t\tExpect(err).To(BeNil())\n\n\t\t\t\tBy(\"Waiting for VMI to disappear\")\n\t\t\t\ttests.WaitForVirtualMachineToDisappearWithTimeout(vmi, 120)\n\t\t\t})\n\t\t\tIt(\"should be successfully migrated multiple times\", func() {\n\t\t\t\t\/\/ Start the VirtualMachineInstance with the PVC attached\n\t\t\t\tvmi := tests.NewRandomVMIWithPVC(pvName)\n\n\t\t\t\tvmi = runVMIAndExpectLaunch(vmi, 180)\n\n\t\t\t\tBy(\"Checking that the VirtualMachineInstance console has expected output\")\n\t\t\t\texpecter, err := tests.LoggedInAlpineExpecter(vmi)\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t\texpecter.Close()\n\n\t\t\t\tnum := 2\n\n\t\t\t\tfor i := 0; i < num; i++ {\n\t\t\t\t\t\/\/ execute a migration, wait for finalized state\n\t\t\t\t\tBy(fmt.Sprintf(\"Starting the Migration for iteration %d\", i))\n\t\t\t\t\tmigration := tests.NewRandomMigration(vmi.Name, vmi.Namespace)\n\t\t\t\t\tmigrationUID := runMigrationAndExpectCompletion(migration, 180)\n\n\t\t\t\t\t\/\/ check VMI, confirm migration state\n\t\t\t\t\tconfirmVMIPostMigration(vmi, migrationUID)\n\t\t\t\t}\n\t\t\t\t\/\/ delete VMI\n\t\t\t\tBy(\"Deleting the VMI\")\n\t\t\t\terr = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(vmi.Name, &metav1.DeleteOptions{})\n\t\t\t\tExpect(err).To(BeNil())\n\n\t\t\t\tBy(\"Waiting for VMI to disappear\")\n\t\t\t\ttests.WaitForVirtualMachineToDisappearWithTimeout(vmi, 120)\n\n\t\t\t})\n\t\t\tIt(\"should be successfully with a cloud init\", func() {\n\t\t\t\t\/\/ Start the VirtualMachineInstance with the PVC attached\n\t\t\t\tvmi := tests.NewRandomVMIWithPVC(pvName)\n\t\t\t\ttests.AddUserData(vmi, \"cloud-init\", \"#!\/bin\/bash\\necho 'hello'\\n\")\n\n\t\t\t\tvmi = runVMIAndExpectLaunch(vmi, 180)\n\n\t\t\t\tBy(\"Checking that the VirtualMachineInstance console has expected output\")\n\t\t\t\texpecter, err := tests.LoggedInAlpineExpecter(vmi)\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t\texpecter.Close()\n\n\t\t\t\t\/\/ execute a migration, wait for finalized state\n\t\t\t\tBy(\"Starting the Migration for iteration\")\n\t\t\t\tmigration := tests.NewRandomMigration(vmi.Name, vmi.Namespace)\n\t\t\t\tmigrationUID := runMigrationAndExpectCompletion(migration, 180)\n\n\t\t\t\t\/\/ check VMI, confirm migration state\n\t\t\t\tconfirmVMIPostMigration(vmi, migrationUID)\n\n\t\t\t\t\/\/ delete VMI\n\t\t\t\tBy(\"Deleting the VMI\")\n\t\t\t\terr = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(vmi.Name, &metav1.DeleteOptions{})\n\t\t\t\tExpect(err).To(BeNil())\n\n\t\t\t\tBy(\"Waiting for VMI to disappear\")\n\t\t\t\ttests.WaitForVirtualMachineToDisappearWithTimeout(vmi, 120)\n\t\t\t})\n\t\t})\n\t})\n\n})\n<|endoftext|>"} {"text":"<commit_before>package agent\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/codegangsta\/cli\"\n\n\t\"github.com\/subutai-io\/base\/agent\/agent\/alert\"\n\t\"github.com\/subutai-io\/base\/agent\/agent\/connect\"\n\t\"github.com\/subutai-io\/base\/agent\/agent\/container\"\n\t\"github.com\/subutai-io\/base\/agent\/agent\/executer\"\n\t\"github.com\/subutai-io\/base\/agent\/agent\/utils\"\n\t\"github.com\/subutai-io\/base\/agent\/cli\"\n\t\"github.com\/subutai-io\/base\/agent\/config\"\n\t\"github.com\/subutai-io\/base\/agent\/lib\/gpg\"\n\t\"github.com\/subutai-io\/base\/agent\/log\"\n)\n\ntype Response struct {\n\tBeat Heartbeat `json:\"response\"`\n}\n\ntype Heartbeat struct {\n\tType string `json:\"type\"`\n\tHostname string `json:\"hostname\"`\n\tId string `json:\"id\"`\n\tArch string `json:\"arch\"`\n\tInstance string `json:\"instance\"`\n\tInterfaces []utils.Iface `json:\"interfaces,omitempty\"`\n\tContainers []container.Container `json:\"containers,omitempty\"`\n\tAlert []alert.Load `json:\"alert,omitempty\"`\n}\n\nvar (\n\tlastHeartbeat []byte\n\tmutex sync.Mutex\n\tfingerprint string\n\thostname string\n\tclient *http.Client\n\tinstanceType string\n\tinstanceArch string\n\tlastHeartbeatTime time.Time\n\tpool []container.Container\n)\n\nfunc initAgent() {\n\t\/\/ move .gnupg dir to app home\n\tos.Setenv(\"GNUPGHOME\", config.Agent.DataPrefix+\".gnupg\")\n\n\tinstanceType = utils.InstanceType()\n\tinstanceArch = strings.ToUpper(runtime.GOARCH)\n\tclient = tlsConfig()\n}\n\nfunc Start(c *cli.Context) {\n\thttp.HandleFunc(\"\/trigger\", trigger)\n\thttp.HandleFunc(\"\/ping\", ping)\n\thttp.HandleFunc(\"\/heartbeat\", heartbeatCall)\n\tgo http.ListenAndServe(\":7070\", nil)\n\n\tinitAgent()\n\tgo lib.Collect()\n\tgo connectionMonitor()\n\tgo alert.AlertProcessing()\n\n\tfor {\n\t\tif heartbeat() {\n\t\t\ttime.Sleep(30 * time.Second)\n\t\t} else {\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t}\n\t\tcontainer.ContainersRestoreState(pool)\n\t}\n}\n\nfunc connectionMonitor() {\n\tfor {\n\t\thostname, _ = os.Hostname()\n\t\tif fingerprint == \"\" || config.Management.GpgUser == \"\" {\n\t\t\tfingerprint = gpg.GetFingerprint(hostname + \"@subutai.io\")\n\t\t\tconnect.Connect(config.Management.Host, config.Management.Port, config.Agent.GpgUser, config.Management.Secret)\n\t\t} else {\n\t\t\tresp, err := client.Get(\"https:\/\/\" + config.Management.Host + \":8444\/rest\/v1\/agent\/check\/\" + fingerprint)\n\t\t\tif err == nil && resp.StatusCode == http.StatusOK {\n\t\t\t\tresp.Body.Close()\n\t\t\t\tlog.Debug(\"Connection monitor check - success\")\n\t\t\t} else {\n\t\t\t\tlog.Debug(\"Connection monitor check - failed\")\n\t\t\t\tconnect.Connect(config.Management.Host, config.Management.Port, config.Agent.GpgUser, config.Management.Secret)\n\t\t\t\tlastHeartbeat = []byte{}\n\t\t\t\tgo heartbeat()\n\t\t\t}\n\t\t}\n\n\t\ttime.Sleep(time.Second * 10)\n\t}\n}\n\nfunc heartbeat() bool {\n\tif time.Since(lastHeartbeatTime) < time.Second*3 {\n\t\treturn false\n\t}\n\tlastHeartbeatTime = time.Now()\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\n\tpool = container.GetActiveContainers(false)\n\tbeat := Heartbeat{\n\t\tType: \"HEARTBEAT\",\n\t\tHostname: hostname,\n\t\tId: fingerprint,\n\t\tArch: instanceArch,\n\t\tInstance: instanceType,\n\t\tContainers: pool,\n\t\tInterfaces: utils.GetInterfaces(),\n\t\tAlert: alert.CurrentAlerts(pool),\n\t}\n\tres := Response{Beat: beat}\n\tjbeat, _ := json.Marshal(&res)\n\n\tif string(jbeat) == string(lastHeartbeat) {\n\t\treturn true\n\t}\n\tlastHeartbeat = jbeat\n\n\tmessage, err := json.Marshal(map[string]string{\n\t\t\"hostId\": fingerprint,\n\t\t\"response\": gpg.EncryptWrapper(config.Agent.GpgUser, config.Management.GpgUser, string(jbeat)),\n\t})\n\tlog.Check(log.WarnLevel, \"Marshal response json\", err)\n\n\tresp, err := client.PostForm(\"https:\/\/\"+config.Management.Host+\":8444\/rest\/v1\/agent\/heartbeat\", url.Values{\"heartbeat\": {string(message)}})\n\tif !log.Check(log.WarnLevel, \"Sending heartbeat: \"+string(jbeat), err) {\n\t\tlog.Debug(resp.Status)\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode == http.StatusAccepted {\n\t\t\treturn true\n\t\t}\n\t}\n\tlastHeartbeat = []byte{}\n\treturn false\n}\n\nfunc execute(rsp executer.EncRequest) {\n\tvar req executer.Request\n\tvar md, contName, pub, keyring, payload string\n\tvar err error\n\n\tif rsp.HostId == fingerprint {\n\t\tmd = gpg.DecryptWrapper(rsp.Request)\n\t} else {\n\t\tcontName = nameById(rsp.HostId)\n\t\tif contName == \"\" {\n\t\t\tlastHeartbeat = []byte{}\n\t\t\theartbeat()\n\t\t\tcontName = nameById(rsp.HostId)\n\t\t\tif contName == \"\" {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tpub = config.Agent.LxcPrefix + contName + \"\/public.pub\"\n\t\tkeyring = config.Agent.LxcPrefix + contName + \"\/secret.sec\"\n\t\tlog.Info(\"Getting public keyring\", \"keyring\", keyring)\n\t\tmd = gpg.DecryptNoDefaultKeyring(rsp.Request, keyring, pub)\n\t}\n\ti := strings.Index(md, \"{\")\n\tj := strings.LastIndex(md, \"}\") + 1\n\tif i > j && i > 0 {\n\t\tlog.Warn(\"Error getting JSON request\")\n\t\treturn\n\t}\n\trequest := md[i:j]\n\n\terr = json.Unmarshal([]byte(request), &req.Request)\n\tlog.Check(log.WarnLevel, \"Decrypting request\", err)\n\n\t\/\/create channels for stdout and stderr\n\tsOut := make(chan executer.ResponseOptions)\n\tif rsp.HostId == fingerprint {\n\t\tgo executer.ExecHost(req.Request, sOut)\n\t} else {\n\t\tgo executer.AttachContainer(contName, req.Request, sOut)\n\t}\n\n\tfor sOut != nil {\n\t\tselect {\n\t\tcase elem, ok := <-sOut:\n\t\t\tif ok {\n\t\t\t\tresp := executer.Response{ResponseOpts: elem}\n\t\t\t\tjsonR, err := json.Marshal(resp)\n\t\t\t\tlog.Check(log.WarnLevel, \"Marshal response\", err)\n\t\t\t\tif rsp.HostId == fingerprint {\n\t\t\t\t\tpayload = gpg.EncryptWrapper(config.Agent.GpgUser, config.Management.GpgUser, string(jsonR))\n\t\t\t\t} else {\n\t\t\t\t\tpayload = gpg.EncryptWrapperNoDefaultKeyring(contName, config.Management.GpgUser, string(jsonR), pub, keyring)\n\t\t\t\t}\n\t\t\t\tmessage, err := json.Marshal(map[string]string{\n\t\t\t\t\t\"hostId\": elem.Id,\n\t\t\t\t\t\"response\": payload,\n\t\t\t\t})\n\t\t\t\tlog.Check(log.WarnLevel, \"Marshal response json \"+elem.CommandId, err)\n\t\t\t\tgo response(message)\n\t\t\t} else {\n\t\t\t\tsOut = nil\n\t\t\t}\n\t\t}\n\t}\n\tgo heartbeat()\n}\n\nfunc tlsConfig() *http.Client {\n\ttlsconfig := newTLSConfig()\n\tfor tlsconfig == nil || len(tlsconfig.Certificates[0].Certificate) == 0 {\n\t\ttime.Sleep(time.Second * 2)\n\t\tfor utils.PublicCert() == \"\" {\n\t\t\tx509generate()\n\t\t}\n\t\ttlsconfig = newTLSConfig()\n\t}\n\n\ttransport := &http.Transport{TLSClientConfig: tlsconfig}\n\treturn &http.Client{Transport: transport, Timeout: time.Second * 30}\n}\n\nfunc response(msg []byte) {\n\tresp, err := client.PostForm(\"https:\/\/\"+config.Management.Host+\":8444\/rest\/v1\/agent\/response\", url.Values{\"response\": {string(msg)}})\n\tif !log.Check(log.WarnLevel, \"Sending response \"+string(msg), err) {\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode == http.StatusAccepted {\n\t\t\treturn\n\t\t}\n\t}\n\ttime.Sleep(time.Second * 5)\n\tgo response(msg)\n\n}\n\nfunc command() {\n\tvar rsp []executer.EncRequest\n\n\tresp, err := client.Get(\"https:\/\/\" + config.Management.Host + \":8444\/rest\/v1\/agent\/requests\/\" + fingerprint)\n\tif log.Check(log.WarnLevel, \"Getting requests\", err) {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusNoContent {\n\t\treturn\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif !log.Check(log.WarnLevel, \"Reading body\", err) {\n\t\tlog.Check(log.WarnLevel, \"Unmarshal payload\", json.Unmarshal(data, &rsp))\n\t\tfor _, request := range rsp {\n\t\t\tgo execute(request)\n\t\t}\n\t}\n}\n\nfunc ping(rw http.ResponseWriter, request *http.Request) {\n\tif request.Method == http.MethodGet && strings.Split(request.RemoteAddr, \":\")[0] == config.Management.Host {\n\t\trw.WriteHeader(http.StatusOK)\n\t} else {\n\t\trw.WriteHeader(http.StatusForbidden)\n\t}\n}\n\nfunc trigger(rw http.ResponseWriter, request *http.Request) {\n\tif request.Method == http.MethodPost && strings.Split(request.RemoteAddr, \":\")[0] == config.Management.Host {\n\t\trw.WriteHeader(http.StatusAccepted)\n\t\tgo command()\n\t} else {\n\t\trw.WriteHeader(http.StatusForbidden)\n\t}\n}\n\nfunc heartbeatCall(rw http.ResponseWriter, request *http.Request) {\n\tif request.Method == http.MethodGet && strings.Split(request.RemoteAddr, \":\")[0] == config.Management.Host {\n\t\trw.WriteHeader(http.StatusOK)\n\t\tlastHeartbeat = []byte{}\n\t\theartbeat()\n\t} else {\n\t\trw.WriteHeader(http.StatusForbidden)\n\t}\n}\n\nfunc nameById(id string) string {\n\tfor _, c := range pool {\n\t\tif c.Id == id {\n\t\t\treturn c.Name\n\t\t}\n\t}\n\treturn \"\"\n}\n<commit_msg>Test commit<commit_after>package agent\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/codegangsta\/cli\"\n\n\t\"github.com\/subutai-io\/base\/agent\/agent\/alert\"\n\t\"github.com\/subutai-io\/base\/agent\/agent\/connect\"\n\t\"github.com\/subutai-io\/base\/agent\/agent\/container\"\n\t\"github.com\/subutai-io\/base\/agent\/agent\/executer\"\n\t\"github.com\/subutai-io\/base\/agent\/agent\/utils\"\n\t\"github.com\/subutai-io\/base\/agent\/cli\"\n\t\"github.com\/subutai-io\/base\/agent\/config\"\n\t\"github.com\/subutai-io\/base\/agent\/lib\/gpg\"\n\t\"github.com\/subutai-io\/base\/agent\/log\"\n)\n\ntype Response struct {\n\tBeat Heartbeat `json:\"response\"`\n}\n\ntype Heartbeat struct {\n\tType string `json:\"type\"`\n\tHostname string `json:\"hostname\"`\n\tId string `json:\"id\"`\n\tArch string `json:\"arch\"`\n\tInstance string `json:\"instance\"`\n\tInterfaces []utils.Iface `json:\"interfaces,omitempty\"`\n\tContainers []container.Container `json:\"containers,omitempty\"`\n\tAlert []alert.Load `json:\"alert,omitempty\"`\n}\n\nvar (\n\tlastHeartbeat []byte\n\tmutex sync.Mutex\n\tfingerprint string\n\thostname string\n\tclient *http.Client\n\tinstanceType string\n\tinstanceArch string\n\tlastHeartbeatTime time.Time\n\tpool []container.Container\n)\n\nfunc initAgent() {\n\t\/\/ move .gnupg dir to app home\n\tos.Setenv(\"GNUPGHOME\", config.Agent.DataPrefix+\".gnupg\")\n\n\tinstanceType = utils.InstanceType()\n\tinstanceArch = strings.ToUpper(runtime.GOARCH)\n\tclient = tlsConfig()\n}\n\nfunc Start(c *cli.Context) {\n\thttp.HandleFunc(\"\/trigger\", trigger)\n\thttp.HandleFunc(\"\/ping\", ping)\n\thttp.HandleFunc(\"\/heartbeat\", heartbeatCall)\n\tgo http.ListenAndServe(\":7070\", nil)\n\n\tinitAgent()\n\tgo lib.Collect()\n\tgo connectionMonitor()\n\tgo alert.AlertProcessing()\n\n\tfor {\n\t\tif heartbeat() {\n\t\t\ttime.Sleep(30 * time.Second)\n\t\t} else {\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t}\n\t\tcontainer.ContainersRestoreState(pool)\n\t}\n}\n\nfunc connectionMonitor() {\n\tfor {\n\t\thostname, _ = os.Hostname()\n\t\tif fingerprint == \"\" || config.Management.GpgUser == \"\" {\n\t\t\tfingerprint = gpg.GetFingerprint(hostname + \"@subutai.io\")\n\t\t\tconnect.Connect(config.Management.Host, config.Management.Port, config.Agent.GpgUser, config.Management.Secret)\n\t\t} else {\n\t\t\tresp, err := client.Get(\"https:\/\/\" + config.Management.Host + \":8444\/rest\/v1\/agent\/check\/\" + fingerprint)\n\t\t\tif err == nil && resp.StatusCode == http.StatusOK {\n\t\t\t\tresp.Body.Close()\n\t\t\t\tlog.Debug(\"Connection monitor check - success\")\n\t\t\t} else {\n\t\t\t\tlog.Debug(\"Connection monitor check - failed\")\n\t\t\t\tconnect.Connect(config.Management.Host, config.Management.Port, config.Agent.GpgUser, config.Management.Secret)\n\t\t\t\tlastHeartbeat = []byte{}\n\t\t\t\tgo heartbeat()\n\t\t\t}\n\t\t}\n\n\t\ttime.Sleep(time.Second * 10)\n\t}\n}\n\nfunc heartbeat() bool {\n\tif time.Since(lastHeartbeatTime) < time.Second*3 {\n\t\treturn false\n\t}\n\tlastHeartbeatTime = time.Now()\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\n\tpool = container.GetActiveContainers(false)\n\tbeat := Heartbeat{\n\t\tType: \"HEARTBEAT\",\n\t\tHostname: hostname,\n\t\tId: fingerprint,\n\t\tArch: instanceArch,\n\t\tInstance: instanceType,\n\t\tContainers: pool,\n\t\tInterfaces: utils.GetInterfaces(),\n\t\tAlert: alert.CurrentAlerts(pool),\n\t}\n\tres := Response{Beat: beat}\n\tjbeat, _ := json.Marshal(&res)\n\n\tif string(jbeat) == string(lastHeartbeat) {\n\t\treturn true\n\t}\n\tlastHeartbeat = jbeat\n\n\tmessage, err := json.Marshal(map[string]string{\n\t\t\"hostId\": fingerprint,\n\t\t\"response\": gpg.EncryptWrapper(config.Agent.GpgUser, config.Management.GpgUser, string(jbeat)),\n\t})\n\tlog.Check(log.WarnLevel, \"Marshal response json\", err)\n\n\tresp, err := client.PostForm(\"https:\/\/\"+config.Management.Host+\":8444\/rest\/v1\/agent\/heartbeat\", url.Values{\"heartbeat\": {string(message)}})\n\tif !log.Check(log.WarnLevel, \"Sending heartbeat: \"+string(jbeat), err) {\n\t\tlog.Debug(resp.Status)\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode == http.StatusAccepted {\n\t\t\treturn true\n\t\t}\n\t}\n\tlastHeartbeat = []byte{}\n\treturn false\n}\n\nfunc execute(rsp executer.EncRequest) {\n\tvar req executer.Request\n\tvar md, contName, pub, keyring, payload string\n\tvar err error\n\n\tif rsp.HostId == fingerprint {\n\t\tmd = gpg.DecryptWrapper(rsp.Request)\n\t} else {\n\t\tcontName = nameById(rsp.HostId)\n\t\tif contName == \"\" {\n\t\t\tlastHeartbeat = []byte{}\n\t\t\theartbeat()\n\t\t\tcontName = nameById(rsp.HostId)\n\t\t\tif contName == \"\" {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tpub = config.Agent.LxcPrefix + contName + \"\/public.pub\"\n\t\tkeyring = config.Agent.LxcPrefix + contName + \"\/secret.sec\"\n\t\tlog.Info(\"Getting public keyring\", \"keyring\", keyring)\n\t\tmd = gpg.DecryptNoDefaultKeyring(rsp.Request, keyring, pub)\n\t}\n\ti := strings.Index(md, \"{\")\n\tj := strings.LastIndex(md, \"}\") + 1\n\tif i > j && i > 0 {\n\t\tlog.Warn(\"Error getting JSON request\")\n\t\treturn\n\t}\n\trequest := md[i:j]\n\n\terr = json.Unmarshal([]byte(request), &req.Request)\n\tlog.Check(log.WarnLevel, \"Decrypting request\", err)\n\n\t\/\/create channels for stdout and stderr\n\tsOut := make(chan executer.ResponseOptions)\n\tif rsp.HostId == fingerprint {\n\t\tgo executer.ExecHost(req.Request, sOut)\n\t} else {\n\t\tgo executer.AttachContainer(contName, req.Request, sOut)\n\t}\n\n\tfor sOut != nil {\n\t\tselect {\n\t\tcase elem, ok := <-sOut:\n\t\t\tif ok {\n\t\t\t\tresp := executer.Response{ResponseOpts: elem}\n\t\t\t\tjsonR, err := json.Marshal(resp)\n\t\t\t\tlog.Check(log.WarnLevel, \"Marshal response\", err)\n\t\t\t\tif rsp.HostId == fingerprint {\n\t\t\t\t\tpayload = gpg.EncryptWrapper(config.Agent.GpgUser, config.Management.GpgUser, string(jsonR))\n\t\t\t\t} else {\n\t\t\t\t\tpayload = gpg.EncryptWrapperNoDefaultKeyring(contName, config.Management.GpgUser, string(jsonR), pub, keyring)\n\t\t\t\t}\n\t\t\t\tmessage, err := json.Marshal(map[string]string{\n\t\t\t\t\t\"hostId\": elem.Id,\n\t\t\t\t\t\"response\": payload,\n\t\t\t\t})\n\t\t\t\tlog.Check(log.WarnLevel, \"Marshal response json \"+elem.CommandId, err)\n\t\t\t\tgo response(message)\n\t\t\t} else {\n\t\t\t\tsOut = nil\n\t\t\t}\n\t\t}\n\t}\n\tgo heartbeat()\n}\n\nfunc tlsConfig() *http.Client {\n\ttlsconfig := newTLSConfig()\n\tfor tlsconfig == nil || len(tlsconfig.Certificates[0].Certificate) == 0 {\n\t\ttime.Sleep(time.Second * 2)\n\t\tfor utils.PublicCert() == \"\" {\n\t\t\tx509generate()\n\t\t}\n\t\ttlsconfig = newTLSConfig()\n\t}\n\ttransport := &http.Transport{TLSClientConfig: tlsconfig}\n\treturn &http.Client{Transport: transport, Timeout: time.Second * 30}\n}\n\nfunc response(msg []byte) {\n\tresp, err := client.PostForm(\"https:\/\/\"+config.Management.Host+\":8444\/rest\/v1\/agent\/response\", url.Values{\"response\": {string(msg)}})\n\tif !log.Check(log.WarnLevel, \"Sending response \"+string(msg), err) {\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode == http.StatusAccepted {\n\t\t\treturn\n\t\t}\n\t}\n\ttime.Sleep(time.Second * 5)\n\tgo response(msg)\n\n}\n\nfunc command() {\n\tvar rsp []executer.EncRequest\n\n\tresp, err := client.Get(\"https:\/\/\" + config.Management.Host + \":8444\/rest\/v1\/agent\/requests\/\" + fingerprint)\n\tif log.Check(log.WarnLevel, \"Getting requests\", err) {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusNoContent {\n\t\treturn\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif !log.Check(log.WarnLevel, \"Reading body\", err) {\n\t\tlog.Check(log.WarnLevel, \"Unmarshal payload\", json.Unmarshal(data, &rsp))\n\t\tfor _, request := range rsp {\n\t\t\tgo execute(request)\n\t\t}\n\t}\n}\n\nfunc ping(rw http.ResponseWriter, request *http.Request) {\n\tif request.Method == http.MethodGet && strings.Split(request.RemoteAddr, \":\")[0] == config.Management.Host {\n\t\trw.WriteHeader(http.StatusOK)\n\t} else {\n\t\trw.WriteHeader(http.StatusForbidden)\n\t}\n}\n\nfunc trigger(rw http.ResponseWriter, request *http.Request) {\n\tif request.Method == http.MethodPost && strings.Split(request.RemoteAddr, \":\")[0] == config.Management.Host {\n\t\trw.WriteHeader(http.StatusAccepted)\n\t\tgo command()\n\t} else {\n\t\trw.WriteHeader(http.StatusForbidden)\n\t}\n}\n\nfunc heartbeatCall(rw http.ResponseWriter, request *http.Request) {\n\tif request.Method == http.MethodGet && strings.Split(request.RemoteAddr, \":\")[0] == config.Management.Host {\n\t\trw.WriteHeader(http.StatusOK)\n\t\tlastHeartbeat = []byte{}\n\t\theartbeat()\n\t} else {\n\t\trw.WriteHeader(http.StatusForbidden)\n\t}\n}\n\nfunc nameById(id string) string {\n\tfor _, c := range pool {\n\t\tif c.Id == id {\n\t\t\treturn c.Name\n\t\t}\n\t}\n\treturn \"\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 OpenConfigd Project.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"os\/exec\"\n)\n\nfunc Compare() string {\n\tconfigActive.WriteTo(\"\/tmp\/config.1\")\n\tconfigCandidate.WriteTo(\"\/tmp\/config.2\")\n\n\tvar config string\n\tout, err := exec.Command(\"diff\", \"-U\", \"-1\", \"\/tmp\/config.1\", \"\/tmp\/config.2\").Output()\n\tif err != nil {\n\t\tlines := bytes.Split(out, []byte{'\\n'})\n\t\tif len(lines) > 3 {\n\t\t\tlines = lines[3:]\n\t\t}\n\t\tconfig = \"\"\n\t\tfor _, s := range lines {\n\t\t\tconfig = config + string(s) + \"\\n\"\n\t\t}\n\t} else {\n\t\tconfig = configCandidate.String()\n\t}\n\n\tos.Remove(\"\/tmp\/config.1\")\n\tos.Remove(\"\/tmp\/config.2\")\n\n\treturn config\n}\n\nfunc JsonMarshal() string {\n\treturn configCandidate.JsonMarshal()\n}\n\nfunc CompareCommand() string {\n\tconfigActive.WriteCommandTo(\"\/tmp\/config.1\")\n\tconfigCandidate.WriteCommandTo(\"\/tmp\/config.2\")\n\n\tvar config string\n\tout, err := exec.Command(\"diff\", \"-U\", \"-1\", \"\/tmp\/config.1\", \"\/tmp\/config.2\").Output()\n\tif err != nil {\n\t\tlines := bytes.Split(out, []byte{'\\n'})\n\t\tif len(lines) > 3 {\n\t\t\tlines = lines[3:]\n\t\t}\n\t\tconfig = \"\"\n\t\tfor _, s := range lines {\n\t\t\tconfig = config + string(s) + \"\\n\"\n\t\t}\n\t}\n\n\tos.Remove(\"\/tmp\/config.1\")\n\tos.Remove(\"\/tmp\/config.2\")\n\n\treturn config\n}\n\nfunc Commands() string {\n\treturn configActive.CommandString()\n}\n<commit_msg>Fix diff -U arugemnt for diff 3.8.<commit_after>\/\/ Copyright 2016 OpenConfigd Project.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"os\/exec\"\n)\n\nfunc Compare() string {\n\tconfigActive.WriteTo(\"\/tmp\/config.1\")\n\tconfigCandidate.WriteTo(\"\/tmp\/config.2\")\n\n\tvar config string\n\tout, err := exec.Command(\"diff\", \"-U\", \"65535\", \"\/tmp\/config.1\", \"\/tmp\/config.2\").Output()\n\tif err != nil {\n\t\tlines := bytes.Split(out, []byte{'\\n'})\n\t\tif len(lines) > 3 {\n\t\t\tlines = lines[3:]\n\t\t}\n\t\tconfig = \"\"\n\t\tfor _, s := range lines {\n\t\t\tconfig = config + string(s) + \"\\n\"\n\t\t}\n\t} else {\n\t\tconfig = configCandidate.String()\n\t}\n\n\tos.Remove(\"\/tmp\/config.1\")\n\tos.Remove(\"\/tmp\/config.2\")\n\n\treturn config\n}\n\nfunc JsonMarshal() string {\n\treturn configCandidate.JsonMarshal()\n}\n\nfunc CompareCommand() string {\n\tconfigActive.WriteCommandTo(\"\/tmp\/config.1\")\n\tconfigCandidate.WriteCommandTo(\"\/tmp\/config.2\")\n\n\tvar config string\n\tout, err := exec.Command(\"diff\", \"-U\", \"65535\", \"\/tmp\/config.1\", \"\/tmp\/config.2\").Output()\n\tif err != nil {\n\t\tlines := bytes.Split(out, []byte{'\\n'})\n\t\tif len(lines) > 3 {\n\t\t\tlines = lines[3:]\n\t\t}\n\t\tconfig = \"\"\n\t\tfor _, s := range lines {\n\t\t\tconfig = config + string(s) + \"\\n\"\n\t\t}\n\t}\n\n\tos.Remove(\"\/tmp\/config.1\")\n\tos.Remove(\"\/tmp\/config.2\")\n\n\treturn config\n}\n\nfunc Commands() string {\n\treturn configActive.CommandString()\n}\n<|endoftext|>"} {"text":"<commit_before>\/**\n * Copyright (c) 2016 Magicshui\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n *\/\n\/**\n * Copyright (c) 2017 yunify\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n *\/\n\npackage qingcloud\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\tqc \"github.com\/yunify\/qingcloud-sdk-go\/service\"\n)\n\nconst (\n\tresourceVpcType = \"type\"\n\tresourceVpcNetwork = \"vpc_network\"\n\tresourceVpcEipID = \"eip_id\"\n\tresourceVpcSecurityGroupID = \"security_group_id\"\n\tresourceVpcPrivateIP = \"private_ip\"\n\tresourceVpcPublicIP = \"public_ip\"\n)\n\nfunc resourceQingcloudVpc() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceQingcloudVpcCreate,\n\t\tRead: resourceQingcloudVpcRead,\n\t\tUpdate: resourceQingcloudVpcUpdate,\n\t\tDelete: resourceQingcloudVpcDelete,\n\t\tSchema: map[string]*schema.Schema{\n\t\t\tresourceName: &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\tresourceVpcType: &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tDefault: 1,\n\t\t\t\tValidateFunc: withinArrayInt(0, 1, 2, 3),\n\t\t\t},\n\t\t\tresourceVpcNetwork: &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: withinArrayString(\"192.168.0.0\/16\", \"172.16.0.0\/16\", \"172.17.0.0\/16\",\n\t\t\t\t\t\"172.18.0.0\/16\", \"172.19.0.0\/16\", \"172.20.0.0\/16\", \"172.21.0.0\/16\", \"172.22.0.0\/16\",\n\t\t\t\t\t\"172.23.0.0\/16\", \"172.24.0.0\/16\", \"172.25.0.0\/16\"),\n\t\t\t},\n\t\t\tresourceTagIds: tagIdsSchema(),\n\t\t\tresourceTagNames: tagNamesSchema(),\n\t\t\tresourceVpcEipID: &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\tresourceVpcSecurityGroupID: &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\tresourceDescription: &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\tresourceVpcPrivateIP: &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\tresourceVpcPublicIP: &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ resourceQingcloudRouterCreate\nfunc resourceQingcloudVpcCreate(d *schema.ResourceData, meta interface{}) error {\n\tclt := meta.(*QingCloudClient).router\n\tinput := new(qc.CreateRoutersInput)\n\tinput.RouterName, _ = getNamePointer(d)\n\tinput.VpcNetwork = getSetStringPointer(d, resourceVpcNetwork)\n\tinput.SecurityGroup = getSetStringPointer(d, resourceVpcSecurityGroupID)\n\tinput.RouterType = qc.Int(d.Get(resourceVpcType).(int))\n\tinput.Count = qc.Int(1)\n\tvar output *qc.CreateRoutersOutput\n\tvar err error\n\tsimpleRetry(func() error {\n\t\toutput, err = clt.CreateRouters(input)\n\t\treturn isServerBusy(err)\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\td.SetId(qc.StringValue(output.Routers[0]))\n\tif _, err = RouterTransitionStateRefresh(clt, d.Id()); err != nil {\n\t\treturn fmt.Errorf(\"Error waiting for router (%s) to start: %s\", d.Id(), err.Error())\n\t}\n\treturn resourceQingcloudVpcUpdate(d, meta)\n}\n\nfunc resourceQingcloudVpcRead(d *schema.ResourceData, meta interface{}) error {\n\tclt := meta.(*QingCloudClient).router\n\tinput := new(qc.DescribeRoutersInput)\n\tinput.Routers = []*string{qc.String(d.Id())}\n\tinput.Verbose = qc.Int(1)\n\tvar output *qc.DescribeRoutersOutput\n\tvar err error\n\tsimpleRetry(func() error {\n\t\toutput, err = clt.DescribeRouters(input)\n\t\treturn isServerBusy(err)\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif isRouterDeleted(output.RouterSet) {\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\trtr := output.RouterSet[0]\n\td.Set(resourceName, qc.StringValue(rtr.RouterName))\n\td.Set(resourceVpcType, qc.IntValue(rtr.RouterType))\n\td.Set(resourceVpcSecurityGroupID, qc.StringValue(rtr.SecurityGroupID))\n\td.Set(resourceDescription, qc.StringValue(rtr.Description))\n\td.Set(resourceVpcPrivateIP, qc.StringValue(rtr.PrivateIP))\n\td.Set(resourceVpcEipID, qc.StringValue(rtr.EIP.EIPID))\n\td.Set(resourceVpcPublicIP, qc.StringValue(rtr.EIP.EIPAddr))\n\tresourceSetTag(d, rtr.Tags)\n\treturn nil\n}\n\nfunc resourceQingcloudVpcUpdate(d *schema.ResourceData, meta interface{}) error {\n\tclt := meta.(*QingCloudClient).router\n\tif _, err := RouterTransitionStateRefresh(clt, d.Id()); err != nil {\n\t\treturn err\n\t}\n\tif err := waitRouterLease(d, meta); err != nil {\n\t\treturn err\n\t}\n\td.Partial(true)\n\tif err := modifyRouterAttributes(d, meta); err != nil {\n\t\treturn err\n\t}\n\td.SetPartial(resourceName)\n\td.SetPartial(resourceDescription)\n\tif d.HasChange(resourceVpcEipID) {\n\t\tif err := applyRouterUpdate(qc.String(d.Id()), meta); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\td.SetPartial(resourceVpcEipID)\n\tif d.HasChange(resourceVpcSecurityGroupID) && !d.IsNewResource() {\n\t\tif err := applySecurityGroupRule(qc.String(resourceVpcSecurityGroupID), meta); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\td.SetPartial(resourceVpcSecurityGroupID)\n\tif err := resourceUpdateTag(d, meta, qingcloudResourceTypeRouter); err != nil {\n\t\treturn err\n\t}\n\td.SetPartial(resourceTagIds)\n\td.Partial(false)\n\treturn resourceQingcloudVpcRead(d, meta)\n}\n\nfunc resourceQingcloudVpcDelete(d *schema.ResourceData, meta interface{}) error {\n\tclt := meta.(*QingCloudClient).router\n\tif _, err := RouterTransitionStateRefresh(clt, d.Id()); err != nil {\n\t\treturn err\n\t}\n\tif err := waitRouterLease(d, meta); err != nil {\n\t\treturn err\n\t}\n\tinput := new(qc.DeleteRoutersInput)\n\tinput.Routers = []*string{qc.String(d.Id())}\n\tvar output *qc.DeleteRoutersOutput\n\tvar err error\n\tsimpleRetry(func() error {\n\t\toutput, err = clt.DeleteRouters(input)\n\t\treturn isServerBusy(err)\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := RouterTransitionStateRefresh(clt, d.Id()); err != nil {\n\t\treturn err\n\t}\n\td.SetId(\"\")\n\treturn nil\n}\n<commit_msg>add new ip range<commit_after>\/**\n * Copyright (c) 2016 Magicshui\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n *\/\n\/**\n * Copyright (c) 2017 yunify\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n *\/\n\npackage qingcloud\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\tqc \"github.com\/yunify\/qingcloud-sdk-go\/service\"\n)\n\nconst (\n\tresourceVpcType = \"type\"\n\tresourceVpcNetwork = \"vpc_network\"\n\tresourceVpcEipID = \"eip_id\"\n\tresourceVpcSecurityGroupID = \"security_group_id\"\n\tresourceVpcPrivateIP = \"private_ip\"\n\tresourceVpcPublicIP = \"public_ip\"\n)\n\nfunc resourceQingcloudVpc() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceQingcloudVpcCreate,\n\t\tRead: resourceQingcloudVpcRead,\n\t\tUpdate: resourceQingcloudVpcUpdate,\n\t\tDelete: resourceQingcloudVpcDelete,\n\t\tSchema: map[string]*schema.Schema{\n\t\t\tresourceName: &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\tresourceVpcType: &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tDefault: 1,\n\t\t\t\tValidateFunc: withinArrayInt(0, 1, 2, 3),\n\t\t\t},\n\t\t\tresourceVpcNetwork: &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: withinArrayString(\"192.168.0.0\/16\", \"172.16.0.0\/16\", \"172.17.0.0\/16\",\n\t\t\t\t\t\"172.18.0.0\/16\", \"172.19.0.0\/16\", \"172.20.0.0\/16\", \"172.21.0.0\/16\", \"172.22.0.0\/16\",\n\t\t\t\t\t\"172.23.0.0\/16\", \"172.24.0.0\/16\", \"172.25.0.0\/16\", \"172.26.0.0\/16\", \"172.27.0.0\/16\",\n\t\t\t\t\t\"172.28.0.0\/16\", \"172.29.0.0\/16\", \"172.30.0.0\/16\", \"172.31.0.0\/16\"),\n\t\t\t},\n\t\t\tresourceTagIds: tagIdsSchema(),\n\t\t\tresourceTagNames: tagNamesSchema(),\n\t\t\tresourceVpcEipID: &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\tresourceVpcSecurityGroupID: &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\tresourceDescription: &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\tresourceVpcPrivateIP: &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\tresourceVpcPublicIP: &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ resourceQingcloudRouterCreate\nfunc resourceQingcloudVpcCreate(d *schema.ResourceData, meta interface{}) error {\n\tclt := meta.(*QingCloudClient).router\n\tinput := new(qc.CreateRoutersInput)\n\tinput.RouterName, _ = getNamePointer(d)\n\tinput.VpcNetwork = getSetStringPointer(d, resourceVpcNetwork)\n\tinput.SecurityGroup = getSetStringPointer(d, resourceVpcSecurityGroupID)\n\tinput.RouterType = qc.Int(d.Get(resourceVpcType).(int))\n\tinput.Count = qc.Int(1)\n\tvar output *qc.CreateRoutersOutput\n\tvar err error\n\tsimpleRetry(func() error {\n\t\toutput, err = clt.CreateRouters(input)\n\t\treturn isServerBusy(err)\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\td.SetId(qc.StringValue(output.Routers[0]))\n\tif _, err = RouterTransitionStateRefresh(clt, d.Id()); err != nil {\n\t\treturn fmt.Errorf(\"Error waiting for router (%s) to start: %s\", d.Id(), err.Error())\n\t}\n\treturn resourceQingcloudVpcUpdate(d, meta)\n}\n\nfunc resourceQingcloudVpcRead(d *schema.ResourceData, meta interface{}) error {\n\tclt := meta.(*QingCloudClient).router\n\tinput := new(qc.DescribeRoutersInput)\n\tinput.Routers = []*string{qc.String(d.Id())}\n\tinput.Verbose = qc.Int(1)\n\tvar output *qc.DescribeRoutersOutput\n\tvar err error\n\tsimpleRetry(func() error {\n\t\toutput, err = clt.DescribeRouters(input)\n\t\treturn isServerBusy(err)\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif isRouterDeleted(output.RouterSet) {\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\trtr := output.RouterSet[0]\n\td.Set(resourceName, qc.StringValue(rtr.RouterName))\n\td.Set(resourceVpcType, qc.IntValue(rtr.RouterType))\n\td.Set(resourceVpcSecurityGroupID, qc.StringValue(rtr.SecurityGroupID))\n\td.Set(resourceDescription, qc.StringValue(rtr.Description))\n\td.Set(resourceVpcPrivateIP, qc.StringValue(rtr.PrivateIP))\n\td.Set(resourceVpcEipID, qc.StringValue(rtr.EIP.EIPID))\n\td.Set(resourceVpcPublicIP, qc.StringValue(rtr.EIP.EIPAddr))\n\tresourceSetTag(d, rtr.Tags)\n\treturn nil\n}\n\nfunc resourceQingcloudVpcUpdate(d *schema.ResourceData, meta interface{}) error {\n\tclt := meta.(*QingCloudClient).router\n\tif _, err := RouterTransitionStateRefresh(clt, d.Id()); err != nil {\n\t\treturn err\n\t}\n\tif err := waitRouterLease(d, meta); err != nil {\n\t\treturn err\n\t}\n\td.Partial(true)\n\tif err := modifyRouterAttributes(d, meta); err != nil {\n\t\treturn err\n\t}\n\td.SetPartial(resourceName)\n\td.SetPartial(resourceDescription)\n\tif d.HasChange(resourceVpcEipID) {\n\t\tif err := applyRouterUpdate(qc.String(d.Id()), meta); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\td.SetPartial(resourceVpcEipID)\n\tif d.HasChange(resourceVpcSecurityGroupID) && !d.IsNewResource() {\n\t\tif err := applySecurityGroupRule(qc.String(resourceVpcSecurityGroupID), meta); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\td.SetPartial(resourceVpcSecurityGroupID)\n\tif err := resourceUpdateTag(d, meta, qingcloudResourceTypeRouter); err != nil {\n\t\treturn err\n\t}\n\td.SetPartial(resourceTagIds)\n\td.Partial(false)\n\treturn resourceQingcloudVpcRead(d, meta)\n}\n\nfunc resourceQingcloudVpcDelete(d *schema.ResourceData, meta interface{}) error {\n\tclt := meta.(*QingCloudClient).router\n\tif _, err := RouterTransitionStateRefresh(clt, d.Id()); err != nil {\n\t\treturn err\n\t}\n\tif err := waitRouterLease(d, meta); err != nil {\n\t\treturn err\n\t}\n\tinput := new(qc.DeleteRoutersInput)\n\tinput.Routers = []*string{qc.String(d.Id())}\n\tvar output *qc.DeleteRoutersOutput\n\tvar err error\n\tsimpleRetry(func() error {\n\t\toutput, err = clt.DeleteRouters(input)\n\t\treturn isServerBusy(err)\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := RouterTransitionStateRefresh(clt, d.Id()); err != nil {\n\t\treturn err\n\t}\n\td.SetId(\"\")\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package metadata\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/socialengine\/rancher-cron\/model\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/rancher\/go-rancher-metadata\/metadata\"\n)\n\nconst (\n\tmetadataURL = \"http:\/\/rancher-metadata\/latest\"\n)\n\n\/\/ Client is a Struct that holds all metadata-specific data\ntype Client struct {\n\tMetadataClient *metadata.Client\n\tEnvironmentName string\n\tCronLabelName string\n\tSchedules *model.Schedules\n}\n\n\/\/ NewClient creates a new metadata client\nfunc NewClient(cronLabelName string) (*Client, error) {\n\tm, err := metadata.NewClientAndWait(metadataURL)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Failed to configure rancher-metadata: %v\", err)\n\t}\n\n\tenvName, err := getEnvironmentName(m)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Error reading stack info: %v\", err)\n\t}\n\n\tschedules := make(model.Schedules)\n\n\treturn &Client{\n\t\tMetadataClient: m,\n\t\tEnvironmentName: envName,\n\t\tCronLabelName: cronLabelName,\n\t\tSchedules: &schedules,\n\t}, nil\n}\n\n\/\/ GetVersion grabs the version of metadata client we're using\nfunc (m *Client) GetVersion() (string, error) {\n\treturn m.MetadataClient.GetVersion()\n}\n\n\/\/ GetCronSchedules returns a map of schedules with ContainerUUID as a key\nfunc (m *Client) GetCronSchedules() (*model.Schedules, error) {\n\tschedules := *m.Schedules\n\tservices, err := m.MetadataClient.GetServices()\n\n\tif err != nil {\n\t\tlogrus.Infof(\"Error reading services %v\", err)\n\t\treturn &schedules, err\n\t}\n\n\tmarkScheduleForCleanup(m.Schedules)\n\n\tfor _, service := range services {\n\t\tcronExpression, ok := service.Labels[m.CronLabelName]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tcontainerUUID, err := m.getCronContainer(service)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\texistingSchedule, ok := schedules[containerUUID]\n\t\t\/\/ we already have schedule for this container\n\t\tif ok {\n\t\t\t\/\/ do not cleanup\n\t\t\texistingSchedule.ToCleanup = false\n\n\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\"uuid\": containerUUID,\n\t\t\t\t\"cronExpression\": cronExpression,\n\t\t\t}).Debugf(\"already have container\")\n\n\t\t\tcontinue\n\t\t}\n\t\t\/\/label exists, configure schedule\n\t\tschedule := model.NewSchedule()\n\n\t\tschedule.CronExpression = cronExpression\n\t\tschedule.ContainerUUID = containerUUID\n\t\tschedules[containerUUID] = schedule\n\t}\n\n\treturn &schedules, nil\n}\n\nfunc (m *Client) getCronContainer(service metadata.Service) (string, error) {\n\tcontainers := service.Containers\n\n\tfor _, container := range containers {\n\t\tif len(container.ServiceName) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(service.Name) != 0 {\n\t\t\tif service.Name != container.ServiceName {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif service.StackName != container.StackName {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\treturn container.UUID, nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"could not find container UUID with %s\", m.CronLabelName)\n}\n\nfunc markScheduleForCleanup(schedules *model.Schedules) {\n\tfor _, schedule := range *schedules {\n\t\tschedule.ToCleanup = true\n\t}\n}\n\nfunc getEnvironmentName(m *metadata.Client) (string, error) {\n\ttimeout := 30 * time.Second\n\tvar err error\n\tvar stack metadata.Stack\n\tfor i := 1 * time.Second; i < timeout; i *= time.Duration(2) {\n\t\tstack, err = m.GetSelfStack()\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"Error reading stack info: %v...will retry\", err)\n\t\t\ttime.Sleep(i)\n\t\t} else {\n\t\t\treturn stack.EnvironmentName, nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"Error reading stack info: %v\", err)\n}\n<commit_msg>Updates metadata to allow multiple containers to be registered per service. (#4)<commit_after>package metadata\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/socialengine\/rancher-cron\/model\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/rancher\/go-rancher-metadata\/metadata\"\n)\n\nconst (\n\tmetadataURL = \"http:\/\/rancher-metadata\/latest\"\n)\n\n\/\/ Client is a Struct that holds all metadata-specific data\ntype Client struct {\n\tMetadataClient *metadata.Client\n\tEnvironmentName string\n\tCronLabelName string\n\tSchedules *model.Schedules\n}\n\n\/\/ NewClient creates a new metadata client\nfunc NewClient(cronLabelName string) (*Client, error) {\n\tm, err := metadata.NewClientAndWait(metadataURL)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Failed to configure rancher-metadata: %v\", err)\n\t}\n\n\tenvName, err := getEnvironmentName(m)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Error reading stack info: %v\", err)\n\t}\n\n\tschedules := make(model.Schedules)\n\n\treturn &Client{\n\t\tMetadataClient: m,\n\t\tEnvironmentName: envName,\n\t\tCronLabelName: cronLabelName,\n\t\tSchedules: &schedules,\n\t}, nil\n}\n\n\/\/ GetVersion grabs the version of metadata client we're using\nfunc (m *Client) GetVersion() (string, error) {\n\treturn m.MetadataClient.GetVersion()\n}\n\n\/\/ GetCronSchedules returns a map of schedules with ContainerUUID as a key\nfunc (m *Client) GetCronSchedules() (*model.Schedules, error) {\n\tschedules := *m.Schedules\n\tservices, err := m.MetadataClient.GetServices()\n\n\tif err != nil {\n\t\tlogrus.Infof(\"Error reading services %v\", err)\n\t\treturn &schedules, err\n\t}\n\n\tmarkScheduleForCleanup(m.Schedules)\n\n\tfor _, service := range services {\n\t\tcronExpression, ok := service.Labels[m.CronLabelName]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tcontainerUUIDs, err := m.getCronContainers(service)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, containerUUID := range containerUUIDs {\n\t\t\texistingSchedule, ok := schedules[containerUUID]\n\t\t\t\/\/ we already have schedule for this container\n\t\t\tif ok {\n\t\t\t\t\/\/ do not cleanup\n\t\t\t\texistingSchedule.ToCleanup = false\n\n\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\"uuid\": containerUUID,\n\t\t\t\t\t\"cronExpression\": cronExpression,\n\t\t\t\t}).Debugf(\"already have container\")\n\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/label exists, configure schedule\n\t\t\tschedule := model.NewSchedule()\n\n\t\t\tschedule.CronExpression = cronExpression\n\t\t\tschedule.ContainerUUID = containerUUID\n\t\t\tschedules[containerUUID] = schedule\n\t\t}\n\t}\n\n\treturn &schedules, nil\n}\n\nfunc (m *Client) getCronContainers(service metadata.Service) ([]string, error) {\n\tcontainers := service.Containers\n\tvar uuids []string\n\n\tfor _, container := range containers {\n\t\tif len(container.ServiceName) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(service.Name) != 0 {\n\t\t\tif service.Name != container.ServiceName {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif service.StackName != container.StackName {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tuuids = append(uuids, container.UUID)\n\t}\n\n\tif len(uuids) > 0 {\n\t\treturn uuids, nil\n\t}\n\n\treturn uuids, fmt.Errorf(\"could not find container UUID with %s\", m.CronLabelName)\n}\n\nfunc markScheduleForCleanup(schedules *model.Schedules) {\n\tfor _, schedule := range *schedules {\n\t\tschedule.ToCleanup = true\n\t}\n}\n\nfunc getEnvironmentName(m *metadata.Client) (string, error) {\n\ttimeout := 30 * time.Second\n\tvar err error\n\tvar stack metadata.Stack\n\tfor i := 1 * time.Second; i < timeout; i *= time.Duration(2) {\n\t\tstack, err = m.GetSelfStack()\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"Error reading stack info: %v...will retry\", err)\n\t\t\ttime.Sleep(i)\n\t\t} else {\n\t\t\treturn stack.EnvironmentName, nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"Error reading stack info: %v\", err)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"fmt\"\n\n\t\/\/\"github.com\/dgryski\/go-tsz\"\n\t\"github.com\/raintank\/go-tsz\"\n)\n\n\/\/ Chunk is a chunk of data. not concurrency safe.\ntype Chunk struct {\n\t*tsz.Series\n\tT0 uint32\n\tLastTs uint32\n\tNumPoints uint32\n\tSaved bool\n}\n\nfunc NewChunk(t0 uint32) *Chunk {\n\treturn &Chunk{tsz.New(t0), t0, 0, 0, false}\n}\n\nfunc (c *Chunk) String() string {\n\treturn fmt.Sprintf(\"<chunk t0 at %s, %d points>\", TS(c.T0), c.NumPoints)\n\n}\nfunc (c *Chunk) Push(t uint32, v float64) error {\n\tif t <= c.LastTs {\n\t\treturn fmt.Errorf(\"Can't push points that are older then points already added. t:%d lastTs: %d\", t.c.LastTs)\n\t}\n\tc.Series.Push(t, v)\n\tc.NumPoints += 1\n\tc.LastTs = t\n\treturn nil\n}\n\ntype chunkOnDisk struct {\n\tSeries *tsz.Series\n\tT0 uint32\n\tLastTs uint32\n\tNumPoints uint32\n\tSaved bool\n}\n\nfunc (c *Chunk) GobEncode() ([]byte, error) {\n\t\/\/ create an OnDisk format of our data.\n\tcOnDisk := chunkOnDisk{\n\t\tSeries: c.Series,\n\t\tT0: c.T0,\n\t\tLastTs: c.LastTs,\n\t\tNumPoints: c.NumPoints,\n\t\tSaved: c.Saved,\n\t}\n\tvar b bytes.Buffer\n\tenc := gob.NewEncoder(&b)\n\terr := enc.Encode(cOnDisk)\n\treturn b.Bytes(), err\n}\n\nfunc (c *Chunk) GobDecode(data []byte) error {\n\t\/\/decode our data bytes into our onDisk struct\n\tr := bytes.NewReader(data)\n\tdec := gob.NewDecoder(r)\n\tcOnDisk := &chunkOnDisk{}\n\terr := dec.Decode(cOnDisk)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ fill in the fields of the passed Chunk with the data from our OnDisk format.\n\tc.Series = cOnDisk.Series\n\tc.T0 = cOnDisk.T0\n\tc.LastTs = cOnDisk.LastTs\n\tc.NumPoints = cOnDisk.NumPoints\n\tc.Saved = cOnDisk.Saved\n\n\treturn nil\n}\n<commit_msg>fix typo in chunk.go<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"fmt\"\n\n\t\/\/\"github.com\/dgryski\/go-tsz\"\n\t\"github.com\/raintank\/go-tsz\"\n)\n\n\/\/ Chunk is a chunk of data. not concurrency safe.\ntype Chunk struct {\n\t*tsz.Series\n\tT0 uint32\n\tLastTs uint32\n\tNumPoints uint32\n\tSaved bool\n}\n\nfunc NewChunk(t0 uint32) *Chunk {\n\treturn &Chunk{tsz.New(t0), t0, 0, 0, false}\n}\n\nfunc (c *Chunk) String() string {\n\treturn fmt.Sprintf(\"<chunk t0 at %s, %d points>\", TS(c.T0), c.NumPoints)\n\n}\nfunc (c *Chunk) Push(t uint32, v float64) error {\n\tif t <= c.LastTs {\n\t\treturn fmt.Errorf(\"Can't push points that are older then points already added. t:%d lastTs: %d\", t, c.LastTs)\n\t}\n\tc.Series.Push(t, v)\n\tc.NumPoints += 1\n\tc.LastTs = t\n\treturn nil\n}\n\ntype chunkOnDisk struct {\n\tSeries *tsz.Series\n\tT0 uint32\n\tLastTs uint32\n\tNumPoints uint32\n\tSaved bool\n}\n\nfunc (c *Chunk) GobEncode() ([]byte, error) {\n\t\/\/ create an OnDisk format of our data.\n\tcOnDisk := chunkOnDisk{\n\t\tSeries: c.Series,\n\t\tT0: c.T0,\n\t\tLastTs: c.LastTs,\n\t\tNumPoints: c.NumPoints,\n\t\tSaved: c.Saved,\n\t}\n\tvar b bytes.Buffer\n\tenc := gob.NewEncoder(&b)\n\terr := enc.Encode(cOnDisk)\n\treturn b.Bytes(), err\n}\n\nfunc (c *Chunk) GobDecode(data []byte) error {\n\t\/\/decode our data bytes into our onDisk struct\n\tr := bytes.NewReader(data)\n\tdec := gob.NewDecoder(r)\n\tcOnDisk := &chunkOnDisk{}\n\terr := dec.Decode(cOnDisk)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ fill in the fields of the passed Chunk with the data from our OnDisk format.\n\tc.Series = cOnDisk.Series\n\tc.T0 = cOnDisk.T0\n\tc.LastTs = cOnDisk.LastTs\n\tc.NumPoints = cOnDisk.NumPoints\n\tc.Saved = cOnDisk.Saved\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright The containerd Authors.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage analyzer\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/containerd\/console\"\n\t\"github.com\/containerd\/containerd\"\n\t\"github.com\/containerd\/containerd\/cio\"\n\t\"github.com\/containerd\/containerd\/cmd\/ctr\/commands\"\n\t\"github.com\/containerd\/containerd\/cmd\/ctr\/commands\/tasks\"\n\t\"github.com\/containerd\/containerd\/errdefs\"\n\t\"github.com\/containerd\/containerd\/log\"\n\t\"github.com\/containerd\/containerd\/mount\"\n\t\"github.com\/containerd\/containerd\/oci\"\n\t\"github.com\/containerd\/containerd\/platforms\"\n\t\"github.com\/containerd\/containerd\/snapshots\"\n\t\"github.com\/containerd\/stargz-snapshotter\/analyzer\/fanotify\"\n\t\"github.com\/containerd\/stargz-snapshotter\/analyzer\/recorder\"\n\t\"github.com\/opencontainers\/go-digest\"\n\t\"github.com\/opencontainers\/image-spec\/identity\"\n\truntimespec \"github.com\/opencontainers\/runtime-spec\/specs-go\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/rs\/xid\"\n)\n\nvar defaultPeriod = 10 * time.Second\n\n\/\/ Analyze analyzes the passed image then store the record of prioritized files into\n\/\/ containerd's content store. This function returns the digest of that record file.\n\/\/ This digest can be used to read record from the content store.\nfunc Analyze(ctx context.Context, client *containerd.Client, ref string, opts ...Option) (digest.Digest, error) {\n\tvar aOpts analyzerOpts\n\tfor _, o := range opts {\n\t\to(&aOpts)\n\t}\n\tif aOpts.terminal && aOpts.waitOnSignal {\n\t\treturn \"\", fmt.Errorf(\"wait-on-signal option cannot be used with terminal option\")\n\t}\n\n\ttarget, err := ioutil.TempDir(\"\", \"target\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer os.RemoveAll(target)\n\n\tcs := client.ContentStore()\n\tis := client.ImageService()\n\tss := client.SnapshotService(aOpts.snapshotter)\n\n\timg, err := is.Get(ctx, ref)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tplatformImg := containerd.NewImageWithPlatform(client, img, platforms.Default())\n\n\t\/\/ Mount the target image\n\t\/\/ NOTE: We cannot let containerd prepare the rootfs. We create mount namespace *before*\n\t\/\/ creating the container so containerd's bundle preparation (mounting snapshots to\n\t\/\/ the bundle directory, etc.) is invisible inside the pre-unshared mount namespace.\n\t\/\/ This leads to runc using empty directory as the rootfs.\n\tif unpacked, err := platformImg.IsUnpacked(ctx, aOpts.snapshotter); err != nil {\n\t\treturn \"\", err\n\t} else if !unpacked {\n\t\tif err := platformImg.Unpack(ctx, aOpts.snapshotter); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\tcleanup, err := mountImage(ctx, ss, platformImg, target)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer cleanup()\n\n\t\/\/ Spawn a fanotifier process in a new mount namespace and setup recorder.\n\tfanotifier, err := fanotify.SpawnFanotifier(\"\/proc\/self\/exe\")\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"failed to spawn fanotifier\")\n\t}\n\tdefer func() {\n\t\tif err := fanotifier.Close(); err != nil {\n\t\t\tlog.G(ctx).WithError(err).Warnf(\"failed to close fanotifier\")\n\t\t}\n\t}()\n\n\t\/\/ Prepare the spec based on the specified image and runtime options.\n\tvar sOpts []oci.SpecOpts\n\tif aOpts.specOpts != nil {\n\t\tgotOpts, done, err := aOpts.specOpts(platformImg, target)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tdefer func() {\n\t\t\tif err := done(); err != nil {\n\t\t\t\tlog.G(ctx).WithError(err).Warnf(\"failed to cleanup container\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}()\n\t\tsOpts = append(sOpts, gotOpts...)\n\t} else {\n\t\tsOpts = append(sOpts,\n\t\t\toci.WithDefaultSpec(),\n\t\t\toci.WithDefaultUnixDevices,\n\t\t\toci.WithRootFSPath(target),\n\t\t\toci.WithImageConfig(platformImg),\n\t\t)\n\t}\n\tsOpts = append(sOpts, oci.WithLinuxNamespace(runtimespec.LinuxNamespace{\n\t\tType: runtimespec.MountNamespace,\n\t\tPath: fanotifier.MountNamespacePath(), \/\/ use mount namespace that the fanotifier created\n\t}))\n\n\t\/\/ Create the container and the task\n\tvar container containerd.Container\n\tfor i := 0; i < 3; i++ {\n\t\tid := xid.New().String()\n\t\tvar s runtimespec.Spec\n\t\tcontainer, err = client.NewContainer(ctx, id,\n\t\t\tcontainerd.WithImage(platformImg),\n\t\t\tcontainerd.WithSnapshotter(aOpts.snapshotter),\n\t\t\tcontainerd.WithImageStopSignal(platformImg, \"SIGKILL\"),\n\n\t\t\t\/\/ WithImageConfig depends on WithImage and WithSnapshotter for resolving\n\t\t\t\/\/ username (accesses to \/etc\/{passwd,group} files on the rootfs)\n\t\t\tcontainerd.WithSpec(&s, sOpts...),\n\t\t)\n\t\tif err != nil {\n\t\t\tif errdefs.IsAlreadyExists(err) {\n\t\t\t\tlog.G(ctx).WithError(err).Warnf(\"failed to create container\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn \"\", err\n\t\t}\n\t\tbreak\n\t}\n\tif container == nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create container\")\n\t}\n\tdefer container.Delete(ctx, containerd.WithSnapshotCleanup)\n\tvar ioCreator cio.Creator\n\tvar con console.Console\n\tstdinC := newLazyReadCloser(os.Stdin)\n\tif aOpts.terminal {\n\t\tif !aOpts.stdin {\n\t\t\treturn \"\", fmt.Errorf(\"terminal cannot be used if stdin isn't enabled\")\n\t\t}\n\t\tcon = console.Current()\n\t\tdefer con.Reset()\n\t\tif err := con.SetRaw(); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\t\/\/ On terminal mode, the \"stderr\" field is unused.\n\t\tioCreator = cio.NewCreator(cio.WithStreams(con, con, nil), cio.WithTerminal)\n\t} else if aOpts.stdin {\n\t\tioCreator = cio.NewCreator(cio.WithStreams(stdinC, os.Stdout, os.Stderr))\n\t} else {\n\t\tioCreator = cio.NewCreator(cio.WithStreams(nil, os.Stdout, os.Stderr))\n\t}\n\ttask, err := container.NewTask(ctx, ioCreator)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tstdinC.registerCloser(func() { \/\/ Ensure to close IO when stdin get EOF\n\t\ttask.CloseIO(ctx, containerd.WithStdinCloser)\n\t})\n\n\t\/\/ Start to monitor \"\/\" and run the task.\n\trc, err := recorder.NewImageRecorder(ctx, cs, img, platforms.Default())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer rc.Close()\n\tif err := fanotifier.Start(); err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"failed to start fanotifier\")\n\t}\n\tvar fanotifierClosed bool\n\tvar fanotifierClosedMu sync.Mutex\n\tgo func() {\n\t\tfor {\n\t\t\tpath, err := fanotifier.GetPath()\n\t\t\tif err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tfanotifierClosedMu.Lock()\n\t\t\t\t\tisFanotifierClosed := fanotifierClosed\n\t\t\t\t\tfanotifierClosedMu.Unlock()\n\t\t\t\t\tif isFanotifierClosed {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tlog.G(ctx).WithError(err).Error(\"failed to get notified path\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err := rc.Record(path); err != nil {\n\t\t\t\tlog.G(ctx).WithError(err).Debugf(\"failed to record %q\", path)\n\t\t\t}\n\t\t}\n\t}()\n\tif aOpts.terminal {\n\t\tif err := tasks.HandleConsoleResize(ctx, task, con); err != nil {\n\t\t\tlog.G(ctx).WithError(err).Error(\"failed to resize console\")\n\t\t}\n\t} else {\n\t\tsigc := commands.ForwardAllSignals(ctx, task)\n\t\tdefer commands.StopCatch(sigc)\n\t}\n\tif err := task.Start(ctx); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Wait until the task exit\n\tvar status containerd.ExitStatus\n\tvar killOk bool\n\tif aOpts.waitOnSignal { \/\/ NOTE: not functional with `terminal` option\n\t\tlog.G(ctx).Infof(\"press Ctrl+C to terminate the container\")\n\t\tstatus, killOk, err = waitOnSignal(ctx, container, task)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t} else {\n\t\tif aOpts.period <= 0 {\n\t\t\taOpts.period = defaultPeriod\n\t\t}\n\t\tlog.G(ctx).Infof(\"waiting for %v ...\", aOpts.period)\n\t\tstatus, killOk, err = waitOnTimeout(ctx, container, task, aOpts.period)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\tif !killOk {\n\t\tlog.G(ctx).Warnf(\"failed to exit task %v; manually kill it\", task.ID())\n\t} else {\n\t\tcode, _, err := status.Result()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tlog.G(ctx).Infof(\"container exit with code %v\", code)\n\t\tif _, err := task.Delete(ctx); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\t\/\/ ensure no record comes in\n\tfanotifierClosedMu.Lock()\n\tfanotifierClosed = true\n\tfanotifierClosedMu.Unlock()\n\tif err := fanotifier.Close(); err != nil {\n\t\tlog.G(ctx).WithError(err).Warnf(\"failed to cleanup fanotifier\")\n\t}\n\n\t\/\/ Finish recording\n\treturn rc.Commit(ctx)\n}\n\nfunc mountImage(ctx context.Context, ss snapshots.Snapshotter, image containerd.Image, mountpoint string) (func(), error) {\n\tdiffIDs, err := image.RootFS(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmounts, err := ss.Prepare(ctx, mountpoint, identity.ChainID(diffIDs).String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := mount.All(mounts, mountpoint); err != nil {\n\t\tif err := ss.Remove(ctx, mountpoint); err != nil && !errdefs.IsNotFound(err) {\n\t\t\tlog.G(ctx).WithError(err).Warnf(\"failed to cleanup snapshot after mount error\")\n\t\t}\n\t\treturn nil, errors.Wrapf(err, \"failed to mount rootfs at %q\", mountpoint)\n\t}\n\treturn func() {\n\t\tif err := mount.UnmountAll(mountpoint, 0); err != nil {\n\t\t\tlog.G(ctx).WithError(err).Warnf(\"failed to unmount snapshot\")\n\t\t}\n\t\tif err := ss.Remove(ctx, mountpoint); err != nil && !errdefs.IsNotFound(err) {\n\t\t\tlog.G(ctx).WithError(err).Warnf(\"failed to cleanup snapshot\")\n\t\t}\n\t}, nil\n}\n\nfunc waitOnSignal(ctx context.Context, container containerd.Container, task containerd.Task) (containerd.ExitStatus, bool, error) {\n\tstatusC, err := task.Wait(ctx)\n\tif err != nil {\n\t\treturn containerd.ExitStatus{}, false, err\n\t}\n\tsc := make(chan os.Signal, 1)\n\tsignal.Notify(sc, syscall.SIGINT)\n\tdefer signal.Stop(sc)\n\tselect {\n\tcase status := <-statusC:\n\t\treturn status, true, nil\n\tcase <-sc:\n\t\tlog.G(ctx).Info(\"signal detected\")\n\t\tstatus, err := killTask(ctx, container, task, statusC)\n\t\tif err != nil {\n\t\t\tlog.G(ctx).WithError(err).Warnf(\"failed to kill container\")\n\t\t\treturn containerd.ExitStatus{}, false, nil\n\t\t}\n\t\treturn status, true, nil\n\t}\n}\n\nfunc waitOnTimeout(ctx context.Context, container containerd.Container, task containerd.Task, period time.Duration) (containerd.ExitStatus, bool, error) {\n\tstatusC, err := task.Wait(ctx)\n\tif err != nil {\n\t\treturn containerd.ExitStatus{}, false, err\n\t}\n\tselect {\n\tcase status := <-statusC:\n\t\treturn status, true, nil\n\tcase <-time.After(period):\n\t\tlog.G(ctx).Warnf(\"killing task. the time period to monitor access log (%s) has timed out\", period.String())\n\t\tstatus, err := killTask(ctx, container, task, statusC)\n\t\tif err != nil {\n\t\t\tlog.G(ctx).WithError(err).Warnf(\"failed to kill container\")\n\t\t\treturn containerd.ExitStatus{}, false, nil\n\t\t}\n\t\treturn status, true, nil\n\t}\n}\n\nfunc killTask(ctx context.Context, container containerd.Container, task containerd.Task, statusC <-chan containerd.ExitStatus) (containerd.ExitStatus, error) {\n\tsig, err := containerd.GetStopSignal(ctx, container, syscall.SIGKILL)\n\tif err != nil {\n\t\treturn containerd.ExitStatus{}, err\n\t}\n\tif err := task.Kill(ctx, sig, containerd.WithKillAll); err != nil && !errdefs.IsNotFound(err) {\n\t\treturn containerd.ExitStatus{}, errors.Wrapf(err, \"forward SIGKILL\")\n\t}\n\tselect {\n\tcase status := <-statusC:\n\t\treturn status, nil\n\tcase <-time.After(5 * time.Second):\n\t\treturn containerd.ExitStatus{}, fmt.Errorf(\"timeout\")\n\t}\n}\n\ntype lazyReadCloser struct {\n\treader io.Reader\n\tcloser func()\n\tcloserMu sync.Mutex\n\tinitCond *sync.Cond\n\tinitialized int64\n}\n\nfunc newLazyReadCloser(r io.Reader) *lazyReadCloser {\n\trc := &lazyReadCloser{reader: r, initCond: sync.NewCond(&sync.Mutex{})}\n\treturn rc\n}\n\nfunc (s *lazyReadCloser) registerCloser(closer func()) {\n\ts.closerMu.Lock()\n\ts.closer = closer\n\ts.closerMu.Unlock()\n\tatomic.AddInt64(&s.initialized, 1)\n\ts.initCond.Broadcast()\n}\n\nfunc (s *lazyReadCloser) Read(p []byte) (int, error) {\n\tif atomic.LoadInt64(&s.initialized) <= 0 {\n\t\t\/\/ wait until initialized\n\t\ts.initCond.L.Lock()\n\t\tif atomic.LoadInt64(&s.initialized) <= 0 {\n\t\t\ts.initCond.Wait()\n\t\t}\n\t\ts.initCond.L.Unlock()\n\t}\n\n\tn, err := s.reader.Read(p)\n\tif err == io.EOF {\n\t\ts.closerMu.Lock()\n\t\ts.closer()\n\t\ts.closerMu.Unlock()\n\t}\n\treturn n, err\n}\n<commit_msg>feat: log success record path count<commit_after>\/*\n Copyright The containerd Authors.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage analyzer\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/containerd\/console\"\n\t\"github.com\/containerd\/containerd\"\n\t\"github.com\/containerd\/containerd\/cio\"\n\t\"github.com\/containerd\/containerd\/cmd\/ctr\/commands\"\n\t\"github.com\/containerd\/containerd\/cmd\/ctr\/commands\/tasks\"\n\t\"github.com\/containerd\/containerd\/errdefs\"\n\t\"github.com\/containerd\/containerd\/log\"\n\t\"github.com\/containerd\/containerd\/mount\"\n\t\"github.com\/containerd\/containerd\/oci\"\n\t\"github.com\/containerd\/containerd\/platforms\"\n\t\"github.com\/containerd\/containerd\/snapshots\"\n\t\"github.com\/containerd\/stargz-snapshotter\/analyzer\/fanotify\"\n\t\"github.com\/containerd\/stargz-snapshotter\/analyzer\/recorder\"\n\t\"github.com\/opencontainers\/go-digest\"\n\t\"github.com\/opencontainers\/image-spec\/identity\"\n\truntimespec \"github.com\/opencontainers\/runtime-spec\/specs-go\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/rs\/xid\"\n)\n\nvar defaultPeriod = 10 * time.Second\n\n\/\/ Analyze analyzes the passed image then store the record of prioritized files into\n\/\/ containerd's content store. This function returns the digest of that record file.\n\/\/ This digest can be used to read record from the content store.\nfunc Analyze(ctx context.Context, client *containerd.Client, ref string, opts ...Option) (digest.Digest, error) {\n\tvar aOpts analyzerOpts\n\tfor _, o := range opts {\n\t\to(&aOpts)\n\t}\n\tif aOpts.terminal && aOpts.waitOnSignal {\n\t\treturn \"\", fmt.Errorf(\"wait-on-signal option cannot be used with terminal option\")\n\t}\n\n\ttarget, err := ioutil.TempDir(\"\", \"target\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer os.RemoveAll(target)\n\n\tcs := client.ContentStore()\n\tis := client.ImageService()\n\tss := client.SnapshotService(aOpts.snapshotter)\n\n\timg, err := is.Get(ctx, ref)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tplatformImg := containerd.NewImageWithPlatform(client, img, platforms.Default())\n\n\t\/\/ Mount the target image\n\t\/\/ NOTE: We cannot let containerd prepare the rootfs. We create mount namespace *before*\n\t\/\/ creating the container so containerd's bundle preparation (mounting snapshots to\n\t\/\/ the bundle directory, etc.) is invisible inside the pre-unshared mount namespace.\n\t\/\/ This leads to runc using empty directory as the rootfs.\n\tif unpacked, err := platformImg.IsUnpacked(ctx, aOpts.snapshotter); err != nil {\n\t\treturn \"\", err\n\t} else if !unpacked {\n\t\tif err := platformImg.Unpack(ctx, aOpts.snapshotter); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\tcleanup, err := mountImage(ctx, ss, platformImg, target)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer cleanup()\n\n\t\/\/ Spawn a fanotifier process in a new mount namespace and setup recorder.\n\tfanotifier, err := fanotify.SpawnFanotifier(\"\/proc\/self\/exe\")\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"failed to spawn fanotifier\")\n\t}\n\tdefer func() {\n\t\tif err := fanotifier.Close(); err != nil {\n\t\t\tlog.G(ctx).WithError(err).Warnf(\"failed to close fanotifier\")\n\t\t}\n\t}()\n\n\t\/\/ Prepare the spec based on the specified image and runtime options.\n\tvar sOpts []oci.SpecOpts\n\tif aOpts.specOpts != nil {\n\t\tgotOpts, done, err := aOpts.specOpts(platformImg, target)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tdefer func() {\n\t\t\tif err := done(); err != nil {\n\t\t\t\tlog.G(ctx).WithError(err).Warnf(\"failed to cleanup container\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}()\n\t\tsOpts = append(sOpts, gotOpts...)\n\t} else {\n\t\tsOpts = append(sOpts,\n\t\t\toci.WithDefaultSpec(),\n\t\t\toci.WithDefaultUnixDevices,\n\t\t\toci.WithRootFSPath(target),\n\t\t\toci.WithImageConfig(platformImg),\n\t\t)\n\t}\n\tsOpts = append(sOpts, oci.WithLinuxNamespace(runtimespec.LinuxNamespace{\n\t\tType: runtimespec.MountNamespace,\n\t\tPath: fanotifier.MountNamespacePath(), \/\/ use mount namespace that the fanotifier created\n\t}))\n\n\t\/\/ Create the container and the task\n\tvar container containerd.Container\n\tfor i := 0; i < 3; i++ {\n\t\tid := xid.New().String()\n\t\tvar s runtimespec.Spec\n\t\tcontainer, err = client.NewContainer(ctx, id,\n\t\t\tcontainerd.WithImage(platformImg),\n\t\t\tcontainerd.WithSnapshotter(aOpts.snapshotter),\n\t\t\tcontainerd.WithImageStopSignal(platformImg, \"SIGKILL\"),\n\n\t\t\t\/\/ WithImageConfig depends on WithImage and WithSnapshotter for resolving\n\t\t\t\/\/ username (accesses to \/etc\/{passwd,group} files on the rootfs)\n\t\t\tcontainerd.WithSpec(&s, sOpts...),\n\t\t)\n\t\tif err != nil {\n\t\t\tif errdefs.IsAlreadyExists(err) {\n\t\t\t\tlog.G(ctx).WithError(err).Warnf(\"failed to create container\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn \"\", err\n\t\t}\n\t\tbreak\n\t}\n\tif container == nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create container\")\n\t}\n\tdefer container.Delete(ctx, containerd.WithSnapshotCleanup)\n\tvar ioCreator cio.Creator\n\tvar con console.Console\n\tstdinC := newLazyReadCloser(os.Stdin)\n\tif aOpts.terminal {\n\t\tif !aOpts.stdin {\n\t\t\treturn \"\", fmt.Errorf(\"terminal cannot be used if stdin isn't enabled\")\n\t\t}\n\t\tcon = console.Current()\n\t\tdefer con.Reset()\n\t\tif err := con.SetRaw(); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\t\/\/ On terminal mode, the \"stderr\" field is unused.\n\t\tioCreator = cio.NewCreator(cio.WithStreams(con, con, nil), cio.WithTerminal)\n\t} else if aOpts.stdin {\n\t\tioCreator = cio.NewCreator(cio.WithStreams(stdinC, os.Stdout, os.Stderr))\n\t} else {\n\t\tioCreator = cio.NewCreator(cio.WithStreams(nil, os.Stdout, os.Stderr))\n\t}\n\ttask, err := container.NewTask(ctx, ioCreator)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tstdinC.registerCloser(func() { \/\/ Ensure to close IO when stdin get EOF\n\t\ttask.CloseIO(ctx, containerd.WithStdinCloser)\n\t})\n\n\t\/\/ Start to monitor \"\/\" and run the task.\n\trc, err := recorder.NewImageRecorder(ctx, cs, img, platforms.Default())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer rc.Close()\n\tif err := fanotifier.Start(); err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"failed to start fanotifier\")\n\t}\n\tvar fanotifierClosed bool\n\tvar fanotifierClosedMu sync.Mutex\n\tgo func() {\n\t\tvar successCount int\n\t\tdefer func() {\n\t\t\tlog.G(ctx).Debugf(\"success record %d path\", successCount)\n\t\t}()\n\t\tfor {\n\t\t\tpath, err := fanotifier.GetPath()\n\t\t\tif err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tfanotifierClosedMu.Lock()\n\t\t\t\t\tisFanotifierClosed := fanotifierClosed\n\t\t\t\t\tfanotifierClosedMu.Unlock()\n\t\t\t\t\tif isFanotifierClosed {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tlog.G(ctx).WithError(err).Error(\"failed to get notified path\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err := rc.Record(path); err != nil {\n\t\t\t\tlog.G(ctx).WithError(err).Debugf(\"failed to record %q\", path)\n\t\t\t}\n\t\t\tsuccessCount++\n\t\t}\n\t}()\n\tif aOpts.terminal {\n\t\tif err := tasks.HandleConsoleResize(ctx, task, con); err != nil {\n\t\t\tlog.G(ctx).WithError(err).Error(\"failed to resize console\")\n\t\t}\n\t} else {\n\t\tsigc := commands.ForwardAllSignals(ctx, task)\n\t\tdefer commands.StopCatch(sigc)\n\t}\n\tif err := task.Start(ctx); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Wait until the task exit\n\tvar status containerd.ExitStatus\n\tvar killOk bool\n\tif aOpts.waitOnSignal { \/\/ NOTE: not functional with `terminal` option\n\t\tlog.G(ctx).Infof(\"press Ctrl+C to terminate the container\")\n\t\tstatus, killOk, err = waitOnSignal(ctx, container, task)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t} else {\n\t\tif aOpts.period <= 0 {\n\t\t\taOpts.period = defaultPeriod\n\t\t}\n\t\tlog.G(ctx).Infof(\"waiting for %v ...\", aOpts.period)\n\t\tstatus, killOk, err = waitOnTimeout(ctx, container, task, aOpts.period)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\tif !killOk {\n\t\tlog.G(ctx).Warnf(\"failed to exit task %v; manually kill it\", task.ID())\n\t} else {\n\t\tcode, _, err := status.Result()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tlog.G(ctx).Infof(\"container exit with code %v\", code)\n\t\tif _, err := task.Delete(ctx); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\t\/\/ ensure no record comes in\n\tfanotifierClosedMu.Lock()\n\tfanotifierClosed = true\n\tfanotifierClosedMu.Unlock()\n\tif err := fanotifier.Close(); err != nil {\n\t\tlog.G(ctx).WithError(err).Warnf(\"failed to cleanup fanotifier\")\n\t}\n\n\t\/\/ Finish recording\n\treturn rc.Commit(ctx)\n}\n\nfunc mountImage(ctx context.Context, ss snapshots.Snapshotter, image containerd.Image, mountpoint string) (func(), error) {\n\tdiffIDs, err := image.RootFS(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmounts, err := ss.Prepare(ctx, mountpoint, identity.ChainID(diffIDs).String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := mount.All(mounts, mountpoint); err != nil {\n\t\tif err := ss.Remove(ctx, mountpoint); err != nil && !errdefs.IsNotFound(err) {\n\t\t\tlog.G(ctx).WithError(err).Warnf(\"failed to cleanup snapshot after mount error\")\n\t\t}\n\t\treturn nil, errors.Wrapf(err, \"failed to mount rootfs at %q\", mountpoint)\n\t}\n\treturn func() {\n\t\tif err := mount.UnmountAll(mountpoint, 0); err != nil {\n\t\t\tlog.G(ctx).WithError(err).Warnf(\"failed to unmount snapshot\")\n\t\t}\n\t\tif err := ss.Remove(ctx, mountpoint); err != nil && !errdefs.IsNotFound(err) {\n\t\t\tlog.G(ctx).WithError(err).Warnf(\"failed to cleanup snapshot\")\n\t\t}\n\t}, nil\n}\n\nfunc waitOnSignal(ctx context.Context, container containerd.Container, task containerd.Task) (containerd.ExitStatus, bool, error) {\n\tstatusC, err := task.Wait(ctx)\n\tif err != nil {\n\t\treturn containerd.ExitStatus{}, false, err\n\t}\n\tsc := make(chan os.Signal, 1)\n\tsignal.Notify(sc, syscall.SIGINT)\n\tdefer signal.Stop(sc)\n\tselect {\n\tcase status := <-statusC:\n\t\treturn status, true, nil\n\tcase <-sc:\n\t\tlog.G(ctx).Info(\"signal detected\")\n\t\tstatus, err := killTask(ctx, container, task, statusC)\n\t\tif err != nil {\n\t\t\tlog.G(ctx).WithError(err).Warnf(\"failed to kill container\")\n\t\t\treturn containerd.ExitStatus{}, false, nil\n\t\t}\n\t\treturn status, true, nil\n\t}\n}\n\nfunc waitOnTimeout(ctx context.Context, container containerd.Container, task containerd.Task, period time.Duration) (containerd.ExitStatus, bool, error) {\n\tstatusC, err := task.Wait(ctx)\n\tif err != nil {\n\t\treturn containerd.ExitStatus{}, false, err\n\t}\n\tselect {\n\tcase status := <-statusC:\n\t\treturn status, true, nil\n\tcase <-time.After(period):\n\t\tlog.G(ctx).Warnf(\"killing task. the time period to monitor access log (%s) has timed out\", period.String())\n\t\tstatus, err := killTask(ctx, container, task, statusC)\n\t\tif err != nil {\n\t\t\tlog.G(ctx).WithError(err).Warnf(\"failed to kill container\")\n\t\t\treturn containerd.ExitStatus{}, false, nil\n\t\t}\n\t\treturn status, true, nil\n\t}\n}\n\nfunc killTask(ctx context.Context, container containerd.Container, task containerd.Task, statusC <-chan containerd.ExitStatus) (containerd.ExitStatus, error) {\n\tsig, err := containerd.GetStopSignal(ctx, container, syscall.SIGKILL)\n\tif err != nil {\n\t\treturn containerd.ExitStatus{}, err\n\t}\n\tif err := task.Kill(ctx, sig, containerd.WithKillAll); err != nil && !errdefs.IsNotFound(err) {\n\t\treturn containerd.ExitStatus{}, errors.Wrapf(err, \"forward SIGKILL\")\n\t}\n\tselect {\n\tcase status := <-statusC:\n\t\treturn status, nil\n\tcase <-time.After(5 * time.Second):\n\t\treturn containerd.ExitStatus{}, fmt.Errorf(\"timeout\")\n\t}\n}\n\ntype lazyReadCloser struct {\n\treader io.Reader\n\tcloser func()\n\tcloserMu sync.Mutex\n\tinitCond *sync.Cond\n\tinitialized int64\n}\n\nfunc newLazyReadCloser(r io.Reader) *lazyReadCloser {\n\trc := &lazyReadCloser{reader: r, initCond: sync.NewCond(&sync.Mutex{})}\n\treturn rc\n}\n\nfunc (s *lazyReadCloser) registerCloser(closer func()) {\n\ts.closerMu.Lock()\n\ts.closer = closer\n\ts.closerMu.Unlock()\n\tatomic.AddInt64(&s.initialized, 1)\n\ts.initCond.Broadcast()\n}\n\nfunc (s *lazyReadCloser) Read(p []byte) (int, error) {\n\tif atomic.LoadInt64(&s.initialized) <= 0 {\n\t\t\/\/ wait until initialized\n\t\ts.initCond.L.Lock()\n\t\tif atomic.LoadInt64(&s.initialized) <= 0 {\n\t\t\ts.initCond.Wait()\n\t\t}\n\t\ts.initCond.L.Unlock()\n\t}\n\n\tn, err := s.reader.Read(p)\n\tif err == io.EOF {\n\t\ts.closerMu.Lock()\n\t\ts.closer()\n\t\ts.closerMu.Unlock()\n\t}\n\treturn n, err\n}\n<|endoftext|>"} {"text":"<commit_before>package middleware\n\nimport (\n\t\"crypto\/tls\"\n\t\"io\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\n\t\"github.com\/pivotal-golang\/lager\"\n)\n\ntype Logger struct {\n\tlogger lager.Logger\n}\n\nfunc NewLogger(logger lager.Logger) Middleware {\n\treturn Logger{\n\t\tlogger: logger,\n\t}\n}\n\nfunc (l Logger) Wrap(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {\n\t\tloggingResponseWriter := responseWriter{\n\t\t\trw,\n\t\t\t[]byte{},\n\t\t\t0,\n\t\t}\n\t\tnext.ServeHTTP(&loggingResponseWriter, req)\n\n\t\trequestCopy := *req\n\t\trequestCopy.Header[\"Authorization\"] = nil\n\n\t\tresponse := map[string]interface{}{\n\t\t\t\"Header\": loggingResponseWriter.Header(),\n\t\t\t\"Body\": string(loggingResponseWriter.body),\n\t\t\t\"StatusCode\": loggingResponseWriter.statusCode,\n\t\t}\n\n\t\tl.logger.Debug(\"\", lager.Data{\n\t\t\t\"request\": fromHTTPRequest(requestCopy),\n\t\t\t\"response\": response,\n\t\t})\n\t})\n}\n\ntype responseWriter struct {\n\thttp.ResponseWriter\n\tbody []byte\n\tstatusCode int\n}\n\nfunc (rw *responseWriter) Write(b []byte) (int, error) {\n\trw.Header().Set(\"Content-Length\", strconv.Itoa(len(b)))\n\n\tif rw.statusCode == 0 {\n\t\trw.WriteHeader(http.StatusOK)\n\t}\n\n\tsize, err := rw.ResponseWriter.Write(b)\n\trw.body = b\n\treturn size, err\n}\n\nfunc (rw *responseWriter) WriteHeader(s int) {\n\trw.statusCode = s\n\trw.ResponseWriter.WriteHeader(s)\n}\n\n\/\/ Golang 1.5 introduces a http.Request.Cancel field,\n\/\/ of type <-chan struct{} which the lager library fails to deal with.\n\/\/ We introduced loggableHTTPRequest as a way of handling this\n\/\/ until such time as lager can deal with it.\n\/\/\n\/\/ Once lager can handle the request directly, remove this struct.\n\/\/ #101259402\ntype LoggableHTTPRequest struct {\n\tMethod string\n\tURL *url.URL\n\tProto string\n\tProtoMajor int\n\tProtoMinor int\n\tHeader http.Header\n\tBody io.ReadCloser\n\tContentLength int64\n\tTransferEncoding []string\n\tClose bool\n\tHost string\n\tForm url.Values\n\tPostForm url.Values\n\tMultipartForm *multipart.Form\n\tTrailer http.Header\n\tRemoteAddr string\n\tRequestURI string\n\tTLS *tls.ConnectionState\n}\n\nfunc fromHTTPRequest(req http.Request) LoggableHTTPRequest {\n\treturn LoggableHTTPRequest{\n\t\tMethod: req.Method,\n\t\tURL: req.URL,\n\t\tProto: req.Proto,\n\t\tProtoMajor: req.ProtoMajor,\n\t\tProtoMinor: req.ProtoMinor,\n\t\tHeader: req.Header,\n\t\tBody: req.Body,\n\t\tContentLength: req.ContentLength,\n\t\tTransferEncoding: req.TransferEncoding,\n\t\tClose: req.Close,\n\t\tHost: req.Host,\n\t\tForm: req.Form,\n\t\tPostForm: req.PostForm,\n\t\tMultipartForm: req.MultipartForm,\n\t\tTrailer: req.Trailer,\n\t\tRemoteAddr: req.RemoteAddr,\n\t\tRequestURI: req.RequestURI,\n\t\tTLS: req.TLS,\n\t}\n}\n<commit_msg>Do not log http response body.<commit_after>package middleware\n\nimport (\n\t\"crypto\/tls\"\n\t\"io\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\n\t\"github.com\/pivotal-golang\/lager\"\n)\n\ntype Logger struct {\n\tlogger lager.Logger\n}\n\nfunc NewLogger(logger lager.Logger) Middleware {\n\treturn Logger{\n\t\tlogger: logger,\n\t}\n}\n\nfunc (l Logger) Wrap(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {\n\t\tloggingResponseWriter := responseWriter{\n\t\t\trw,\n\t\t\t[]byte{},\n\t\t\t0,\n\t\t}\n\t\tnext.ServeHTTP(&loggingResponseWriter, req)\n\n\t\trequestCopy := *req\n\t\trequestCopy.Header[\"Authorization\"] = nil\n\n\t\tresponse := map[string]interface{}{\n\t\t\t\"Header\": loggingResponseWriter.Header(),\n\t\t\t\"StatusCode\": loggingResponseWriter.statusCode,\n\t\t}\n\n\t\tl.logger.Debug(\"\", lager.Data{\n\t\t\t\"request\": fromHTTPRequest(requestCopy),\n\t\t\t\"response\": response,\n\t\t})\n\t})\n}\n\ntype responseWriter struct {\n\thttp.ResponseWriter\n\tbody []byte\n\tstatusCode int\n}\n\nfunc (rw *responseWriter) Write(b []byte) (int, error) {\n\trw.Header().Set(\"Content-Length\", strconv.Itoa(len(b)))\n\n\tif rw.statusCode == 0 {\n\t\trw.WriteHeader(http.StatusOK)\n\t}\n\n\tsize, err := rw.ResponseWriter.Write(b)\n\trw.body = b\n\treturn size, err\n}\n\nfunc (rw *responseWriter) WriteHeader(s int) {\n\trw.statusCode = s\n\trw.ResponseWriter.WriteHeader(s)\n}\n\n\/\/ Golang 1.5 introduces a http.Request.Cancel field,\n\/\/ of type <-chan struct{} which the lager library fails to deal with.\n\/\/ We introduced loggableHTTPRequest as a way of handling this\n\/\/ until such time as lager can deal with it.\n\/\/\n\/\/ Once lager can handle the request directly, remove this struct.\n\/\/ #101259402\ntype LoggableHTTPRequest struct {\n\tMethod string\n\tURL *url.URL\n\tProto string\n\tProtoMajor int\n\tProtoMinor int\n\tHeader http.Header\n\tBody io.ReadCloser\n\tContentLength int64\n\tTransferEncoding []string\n\tClose bool\n\tHost string\n\tForm url.Values\n\tPostForm url.Values\n\tMultipartForm *multipart.Form\n\tTrailer http.Header\n\tRemoteAddr string\n\tRequestURI string\n\tTLS *tls.ConnectionState\n}\n\nfunc fromHTTPRequest(req http.Request) LoggableHTTPRequest {\n\treturn LoggableHTTPRequest{\n\t\tMethod: req.Method,\n\t\tURL: req.URL,\n\t\tProto: req.Proto,\n\t\tProtoMajor: req.ProtoMajor,\n\t\tProtoMinor: req.ProtoMinor,\n\t\tHeader: req.Header,\n\t\tBody: req.Body,\n\t\tContentLength: req.ContentLength,\n\t\tTransferEncoding: req.TransferEncoding,\n\t\tClose: req.Close,\n\t\tHost: req.Host,\n\t\tForm: req.Form,\n\t\tPostForm: req.PostForm,\n\t\tMultipartForm: req.MultipartForm,\n\t\tTrailer: req.Trailer,\n\t\tRemoteAddr: req.RemoteAddr,\n\t\tRequestURI: req.RequestURI,\n\t\tTLS: req.TLS,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\/\/\n\/\/ Contributor: Aaron Meihm ameihm@mozilla.com [:alm]\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"mig.ninja\/mig\"\n\t\"os\/exec\"\n\t\"path\"\n)\n\nvar pluginList []plugin\n\ntype plugin struct {\n\tname string\n\tpath string\n}\n\nfunc runPlugin(r mig.RunnerResult) (err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = fmt.Errorf(\"runPlugin() -> %v\", e)\n\t\t}\n\t}()\n\n\tvar pent *plugin\n\tfor i := range pluginList {\n\t\tif pluginList[i].name == r.UsePlugin {\n\t\t\tpent = &pluginList[i]\n\t\t\tbreak\n\t\t}\n\t}\n\tif pent == nil {\n\t\tpanic(\"unable to locate plugin\")\n\t}\n\n\tgo func() {\n\t\tbuf, err := json.Marshal(r)\n\t\tif err != nil {\n\t\t\tmlog(\"%v: %v\", pent.name, err)\n\t\t\treturn\n\t\t}\n\t\tc := exec.Command(pent.path)\n\t\tstdin, err := c.StdinPipe()\n\t\tif err != nil {\n\t\t\tmlog(\"%v: %v\", pent.name, err)\n\t\t\treturn\n\t\t}\n\t\terr = c.Start()\n\t\tif err != nil {\n\t\t\tmlog(\"%v: %v\", pent.name, err)\n\t\t\treturn\n\t\t}\n\t\twb, err := stdin.Write(buf)\n\t\tif err != nil {\n\t\t\tmlog(\"%v: %v\", pent.name, err)\n\t\t}\n\t\tstdin.Close()\n\t\terr = c.Wait()\n\t\tif err != nil {\n\t\t\tmlog(\"%v: %v\", pent.name, err)\n\t\t}\n\t\tmlog(\"%v: wrote %v bytes to plugin\", pent.name, wb)\n\t}()\n\n\treturn nil\n}\n\nfunc loadPlugins() (err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = fmt.Errorf(\"loadPlugins() -> %v\", e)\n\t\t}\n\t}()\n\n\tpluginList = make([]plugin, 0)\n\n\t\/\/ Identify any available output plugins\n\tdirents, err := ioutil.ReadDir(ctx.Runner.PluginDirectory)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfor _, x := range dirents {\n\t\tn := x.Name()\n\t\tm := x.Mode()\n\t\tif (m & 0111) == 0 {\n\t\t\tmlog(\"plugins: skipping %v (not executable)\", n)\n\t\t\tcontinue\n\t\t}\n\t\tppath := path.Join(ctx.Runner.PluginDirectory, n)\n\t\tmlog(\"plugins: registering %v\", n)\n\t\tnp := plugin{}\n\t\tnp.name = n\n\t\tnp.path = ppath\n\t\tpluginList = append(pluginList, np)\n\t}\n\n\treturn nil\n}\n<commit_msg>[minor] remove unneccessary make() call, more explicit variable names<commit_after>\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\/\/\n\/\/ Contributor: Aaron Meihm ameihm@mozilla.com [:alm]\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"mig.ninja\/mig\"\n\t\"os\/exec\"\n\t\"path\"\n)\n\nvar pluginList []plugin\n\ntype plugin struct {\n\tname string\n\tpath string\n}\n\nfunc runPlugin(r mig.RunnerResult) (err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = fmt.Errorf(\"runPlugin() -> %v\", e)\n\t\t}\n\t}()\n\n\tvar pent *plugin\n\tfor i := range pluginList {\n\t\tif pluginList[i].name == r.UsePlugin {\n\t\t\tpent = &pluginList[i]\n\t\t\tbreak\n\t\t}\n\t}\n\tif pent == nil {\n\t\tpanic(\"unable to locate plugin\")\n\t}\n\n\tgo func() {\n\t\tbuf, err := json.Marshal(r)\n\t\tif err != nil {\n\t\t\tmlog(\"%v: %v\", pent.name, err)\n\t\t\treturn\n\t\t}\n\t\tc := exec.Command(pent.path)\n\t\tstdin, err := c.StdinPipe()\n\t\tif err != nil {\n\t\t\tmlog(\"%v: %v\", pent.name, err)\n\t\t\treturn\n\t\t}\n\t\terr = c.Start()\n\t\tif err != nil {\n\t\t\tmlog(\"%v: %v\", pent.name, err)\n\t\t\treturn\n\t\t}\n\t\twb, err := stdin.Write(buf)\n\t\tif err != nil {\n\t\t\tmlog(\"%v: %v\", pent.name, err)\n\t\t}\n\t\tstdin.Close()\n\t\terr = c.Wait()\n\t\tif err != nil {\n\t\t\tmlog(\"%v: %v\", pent.name, err)\n\t\t}\n\t\tmlog(\"%v: wrote %v bytes to plugin\", pent.name, wb)\n\t}()\n\n\treturn nil\n}\n\nfunc loadPlugins() (err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = fmt.Errorf(\"loadPlugins() -> %v\", e)\n\t\t}\n\t}()\n\n\t\/\/ Identify any available output plugins\n\tdirents, err := ioutil.ReadDir(ctx.Runner.PluginDirectory)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfor _, pluginEnt := range dirents {\n\t\tpluginName := pluginEnt.Name()\n\t\tpluginMode := pluginEnt.Mode()\n\t\tif (pluginMode & 0111) == 0 {\n\t\t\tmlog(\"plugins: skipping %v (not executable)\", pluginName)\n\t\t\tcontinue\n\t\t}\n\t\tppath := path.Join(ctx.Runner.PluginDirectory, pluginName)\n\t\tmlog(\"plugins: registering %v\", pluginName)\n\t\tnp := plugin{\n\t\t\tname: pluginName,\n\t\t\tpath: ppath,\n\t\t}\n\t\tpluginList = append(pluginList, np)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"encoding\/hex\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/bytom\/consensus\"\n\t\"github.com\/bytom\/protocol\/bc\"\n\t\"github.com\/bytom\/protocol\/bc\/types\"\n)\n\nfunc genesisTx() *types.Tx {\n\tcontract, err := hex.DecodeString(\"00148c9d063ff74ee6d9ffa88d83aeb038068366c4c4\")\n\tif err != nil {\n\t\tlog.Panicf(\"fail on decode genesis tx output control program\")\n\t}\n\n\ttxData := types.TxData{\n\t\tVersion: 1,\n\t\tInputs: []*types.TxInput{\n\t\t\ttypes.NewCoinbaseInput([]byte(\"Information is power. -- Jan\/11\/2013. Computing is power. -- Apr\/24\/2018.\")),\n\t\t},\n\t\tOutputs: []*types.TxOutput{\n\t\t\ttypes.NewTxOutput(*consensus.BTMAssetID, consensus.InitialBlockSubsidy, contract),\n\t\t},\n\t}\n\treturn types.NewTx(txData)\n}\n\n\/\/ GenesisBlock will return genesis block\nfunc GenesisBlock() *types.Block {\n\ttx := genesisTx()\n\ttxStatus := bc.NewTransactionStatus()\n\ttxStatus.SetStatus(0, false)\n\ttxStatusHash, err := bc.TxStatusMerkleRoot(txStatus.VerifyStatus)\n\tif err != nil {\n\t\tlog.Panicf(\"fail on calc genesis tx status merkle root\")\n\t}\n\n\tmerkleRoot, err := bc.TxMerkleRoot([]*bc.Tx{tx.Tx})\n\tif err != nil {\n\t\tlog.Panicf(\"fail on calc genesis tx merkel root\")\n\t}\n\n\tblock := &types.Block{\n\t\tBlockHeader: types.BlockHeader{\n\t\t\tVersion: 1,\n\t\t\tHeight: 0,\n\t\t\tNonce: 9253507043297,\n\t\t\tTimestamp: 1524549600,\n\t\t\tBits: 2161727821137910632,\n\t\t\tBlockCommitment: types.BlockCommitment{\n\t\t\t\tTransactionsMerkleRoot: merkleRoot,\n\t\t\t\tTransactionStatusHash: txStatusHash,\n\t\t\t},\n\t\t},\n\t\tTransactions: []*types.Tx{tx},\n\t}\n\treturn block\n}\n<commit_msg>genesis block support mutil net (#1036)<commit_after>package config\n\nimport (\n\t\"encoding\/hex\"\n\t\"time\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/bytom\/consensus\"\n\t\"github.com\/bytom\/protocol\/bc\"\n\t\"github.com\/bytom\/protocol\/bc\/types\"\n)\n\nfunc genesisTx() *types.Tx {\n\tcontract, err := hex.DecodeString(\"00148c9d063ff74ee6d9ffa88d83aeb038068366c4c4\")\n\tif err != nil {\n\t\tlog.Panicf(\"fail on decode genesis tx output control program\")\n\t}\n\n\ttxData := types.TxData{\n\t\tVersion: 1,\n\t\tInputs: []*types.TxInput{\n\t\t\ttypes.NewCoinbaseInput([]byte(\"Information is power. -- Jan\/11\/2013. Computing is power. -- Apr\/24\/2018.\")),\n\t\t},\n\t\tOutputs: []*types.TxOutput{\n\t\t\ttypes.NewTxOutput(*consensus.BTMAssetID, consensus.InitialBlockSubsidy, contract),\n\t\t},\n\t}\n\treturn types.NewTx(txData)\n}\n\nfunc mainNetGenesisBlock() *types.Block {\n\ttx := genesisTx()\n\ttxStatus := bc.NewTransactionStatus()\n\ttxStatus.SetStatus(0, false)\n\ttxStatusHash, err := bc.TxStatusMerkleRoot(txStatus.VerifyStatus)\n\tif err != nil {\n\t\tlog.Panicf(\"fail on calc genesis tx status merkle root\")\n\t}\n\n\tmerkleRoot, err := bc.TxMerkleRoot([]*bc.Tx{tx.Tx})\n\tif err != nil {\n\t\tlog.Panicf(\"fail on calc genesis tx merkel root\")\n\t}\n\n\tblock := &types.Block{\n\t\tBlockHeader: types.BlockHeader{\n\t\t\tVersion: 1,\n\t\t\tHeight: 0,\n\t\t\tNonce: 9253507043297,\n\t\t\tTimestamp: 1524549600,\n\t\t\tBits: 2161727821137910632,\n\t\t\tBlockCommitment: types.BlockCommitment{\n\t\t\t\tTransactionsMerkleRoot: merkleRoot,\n\t\t\t\tTransactionStatusHash: txStatusHash,\n\t\t\t},\n\t\t},\n\t\tTransactions: []*types.Tx{tx},\n\t}\n\treturn block\n}\n\nfunc testNetGenesisBlock() *types.Block {\n\ttx := genesisTx()\n\ttxStatus := bc.NewTransactionStatus()\n\ttxStatus.SetStatus(0, false)\n\ttxStatusHash, err := bc.TxStatusMerkleRoot(txStatus.VerifyStatus)\n\tif err != nil {\n\t\tlog.Panicf(\"fail on calc genesis tx status merkle root\")\n\t}\n\n\tmerkleRoot, err := bc.TxMerkleRoot([]*bc.Tx{tx.Tx})\n\tif err != nil {\n\t\tlog.Panicf(\"fail on calc genesis tx merkel root\")\n\t}\n\n\tblock := &types.Block{\n\t\tBlockHeader: types.BlockHeader{\n\t\t\tVersion: 1,\n\t\t\tHeight: 0,\n\t\t\tNonce: 9253507043297,\n\t\t\tTimestamp: 1528455800,\n\t\t\tBits: 2233785415178221890,\n\t\t\tBlockCommitment: types.BlockCommitment{\n\t\t\t\tTransactionsMerkleRoot: merkleRoot,\n\t\t\t\tTransactionStatusHash: txStatusHash,\n\t\t\t},\n\t\t},\n\t\tTransactions: []*types.Tx{tx},\n\t}\n\treturn block\n}\n\nfunc soloNetGenesisBlock() *types.Block {\n\ttx := genesisTx()\n\ttxStatus := bc.NewTransactionStatus()\n\ttxStatus.SetStatus(0, false)\n\ttxStatusHash, err := bc.TxStatusMerkleRoot(txStatus.VerifyStatus)\n\tif err != nil {\n\t\tlog.Panicf(\"fail on calc genesis tx status merkle root\")\n\t}\n\n\tmerkleRoot, err := bc.TxMerkleRoot([]*bc.Tx{tx.Tx})\n\tif err != nil {\n\t\tlog.Panicf(\"fail on calc genesis tx merkel root\")\n\t}\n\n\tblock := &types.Block{\n\t\tBlockHeader: types.BlockHeader{\n\t\t\tVersion: 1,\n\t\t\tHeight: 0,\n\t\t\tNonce: 9253507043297,\n\t\t\tTimestamp: uint64(time.Now().Unix()),\n\t\t\tBits: 2305843009214532812,\n\t\t\tBlockCommitment: types.BlockCommitment{\n\t\t\t\tTransactionsMerkleRoot: merkleRoot,\n\t\t\t\tTransactionStatusHash: txStatusHash,\n\t\t\t},\n\t\t},\n\t\tTransactions: []*types.Tx{tx},\n\t}\n\treturn block\n}\n\n\/\/ GenesisBlock will return genesis block\nfunc GenesisBlock() *types.Block {\n\treturn map[string]func() *types.Block{\n\t\t\"main\": mainNetGenesisBlock,\n\t\t\"test\": testNetGenesisBlock,\n\t\t\"solo\": soloNetGenesisBlock,\n\t}[consensus.ActiveNetParams.Name]()\n}\n<|endoftext|>"} {"text":"<commit_before>package mixpanel\n\nimport (\n\t\"crypto\/md5\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/nu7hatch\/gouuid\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ The official base URL\nconst MixpanelBaseURL = \"https:\/\/data.mixpanel.com\/api\/2.0\/export\"\n\n\/\/ Key into the EventData map that contains the UUID of this event. Name is\n\/\/ chosen to make collisions with actual keys very unlikely.\nconst EventIDKey = \"$__$$event_id\"\n\n\/\/ Mixpanel struct represents a set of credentials used to access the Mixpanel\n\/\/ API for a particular product.\ntype Mixpanel struct {\n\tProduct string\n\tKey string\n\tSecret string\n\tBaseURL string\n}\n\n\/\/ EventData is a representation of each individual JSON record spit out of the\n\/\/ export process.\ntype EventData map[string]interface{}\n\n\/\/ New creates a Mixpanel object with the given API credentials and uses the\n\/\/ official API URL.\nfunc New(product, key, secret string) *Mixpanel {\n\treturn NewWithURL(product, key, secret, MixpanelBaseURL)\n}\n\n\/\/ NewWithURL creates a Mixpanel object with the given API credentials and a\n\/\/ custom Mixpanel API URL.\n\/\/\n\/\/ I doubt this will ever be useful but there you go.\nfunc NewWithURL(product, key, secret, baseURL string) *Mixpanel {\n\tm := new(Mixpanel)\n\tm.Product = product\n\tm.Key = key\n\tm.Secret = secret\n\tm.BaseURL = baseURL\n\treturn m\n}\n\n\/\/ Add the cryptographic signature that Mixpanel API requests require.\n\/\/\n\/\/ FIXME: Pretty sure sorting the pairs isn't necessary. Double check.\n\/\/\n\/\/ Algorithm:\n\/\/ - join key=value pairs\n\/\/ - sort the pairs alphabetically\n\/\/ - appending a secret\n\/\/ - take MD5 hex digest.\nfunc (m *Mixpanel) addSignature(args *url.Values) {\n\thash := md5.New()\n\n\tvar params []string\n\tfor k, vs := range *args {\n\t\tfor _, v := range vs {\n\t\t\tparams = append(params, fmt.Sprintf(\"%s=%s\", k, v))\n\t\t}\n\t}\n\n\tsort.StringSlice(params).Sort()\n\n\tio.WriteString(hash, strings.Join(params, \"\")+m.Secret)\n\n\targs.Set(\"sig\", fmt.Sprintf(\"%x\", hash.Sum(nil)))\n}\n\n\/\/ Generate the initial, base arguments that should be common to all Mixpanel\n\/\/ API requests being created here.\nfunc (m *Mixpanel) makeArgs(date time.Time) url.Values {\n\targs := url.Values{}\n\n\targs.Set(\"format\", \"json\")\n\targs.Set(\"api_key\", m.Key)\n\targs.Set(\"expire\", fmt.Sprintf(\"%d\", time.Now().Unix()+10000))\n\n\tday := date.Format(\"2006-01-02\")\n\n\targs.Set(\"from_date\", day)\n\targs.Set(\"to_date\", day)\n\n\treturn args\n}\n\n\/\/ ExportDate downloads event data for the given day and streams the resulting\n\/\/ transformed JSON blobs as byte strings over the send-only channel passed\n\/\/ to the function.\n\/\/\n\/\/ The optional `moreArgs` parameter can be given to add additional URL\n\/\/ parameters to the API request.\nfunc (m *Mixpanel) ExportDate(date time.Time, output chan<- EventData, moreArgs *url.Values) error {\n\targs := m.makeArgs(date)\n\n\tif moreArgs != nil {\n\t\tfor k, vs := range *moreArgs {\n\t\t\tfor _, v := range vs {\n\t\t\t\targs.Add(k, v)\n\t\t\t}\n\t\t}\n\t}\n\n\tm.addSignature(&args)\n\n\tresp, err := http.Get(fmt.Sprintf(\"%s?%s\", m.BaseURL, args.Encode()))\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s: download failed: %s\", m.Product, err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\treturn m.TransformEventData(resp.Body, output)\n}\n\n\/\/ TransformEventData reads JSON objects line by line from `input`, performs a\n\/\/ simple translation, and pipes the result back out through the `output` chan.\n\/\/\n\/\/ The transformation effectively folds the properties map into the top level\n\/\/ and attaches product information.\n\/\/\n\/\/ Input : `{\"event\": \"...\", \"properties\": {\"k\": \"v\"}}`\n\/\/ Output: `{\"event\": \"...\", \"product: \"...\", \"k\": \"v\", ...}`\nfunc (m *Mixpanel) TransformEventData(input io.Reader, output chan<- EventData) error {\n\tdecoder := json.NewDecoder(input)\n\n\t\/\/ Don't default all numeric values to float\n\tdecoder.UseNumber()\n\n\tfor {\n\t\tvar ev struct {\n\t\t\tError *string\n\t\t\tEvent string\n\t\t\tProperties map[string]interface{}\n\t\t}\n\n\t\tif err := decoder.Decode(&ev); err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn fmt.Errorf(\"%s: Failed to parse JSON: %s\", m.Product, err)\n\t\t} else if ev.Error != nil {\n\t\t\treturn fmt.Errorf(\"%s: API error: %s\", m.Product, *ev.Error)\n\t\t}\n\n\t\tif id, err := uuid.NewV4(); err == nil {\n\t\t\tev.Properties[EventIDKey] = id.String()\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"%s: generating UUID failed: %s\", m.Product, err)\n\t\t}\n\n\t\tev.Properties[\"product\"] = m.Product\n\t\tev.Properties[\"event\"] = ev.Event\n\n\t\toutput <- ev.Properties\n\t}\n\n\treturn nil\n}\n<commit_msg>Sorting params is definitely necessary for signature<commit_after>package mixpanel\n\nimport (\n\t\"crypto\/md5\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/nu7hatch\/gouuid\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ The official base URL\nconst MixpanelBaseURL = \"https:\/\/data.mixpanel.com\/api\/2.0\/export\"\n\n\/\/ Key into the EventData map that contains the UUID of this event. Name is\n\/\/ chosen to make collisions with actual keys very unlikely.\nconst EventIDKey = \"$__$$event_id\"\n\n\/\/ Mixpanel struct represents a set of credentials used to access the Mixpanel\n\/\/ API for a particular product.\ntype Mixpanel struct {\n\tProduct string\n\tKey string\n\tSecret string\n\tBaseURL string\n}\n\n\/\/ EventData is a representation of each individual JSON record spit out of the\n\/\/ export process.\ntype EventData map[string]interface{}\n\n\/\/ New creates a Mixpanel object with the given API credentials and uses the\n\/\/ official API URL.\nfunc New(product, key, secret string) *Mixpanel {\n\treturn NewWithURL(product, key, secret, MixpanelBaseURL)\n}\n\n\/\/ NewWithURL creates a Mixpanel object with the given API credentials and a\n\/\/ custom Mixpanel API URL.\n\/\/\n\/\/ I doubt this will ever be useful but there you go.\nfunc NewWithURL(product, key, secret, baseURL string) *Mixpanel {\n\tm := new(Mixpanel)\n\tm.Product = product\n\tm.Key = key\n\tm.Secret = secret\n\tm.BaseURL = baseURL\n\treturn m\n}\n\n\/\/ Add the cryptographic signature that Mixpanel API requests require.\n\/\/\n\/\/ Algorithm:\n\/\/ - join key=value pairs\n\/\/ - sort the pairs alphabetically\n\/\/ - appending a secret\n\/\/ - take MD5 hex digest.\nfunc (m *Mixpanel) addSignature(args *url.Values) {\n\thash := md5.New()\n\n\tvar params []string\n\tfor k, vs := range *args {\n\t\tfor _, v := range vs {\n\t\t\tparams = append(params, fmt.Sprintf(\"%s=%s\", k, v))\n\t\t}\n\t}\n\n\tsort.StringSlice(params).Sort()\n\n\tio.WriteString(hash, strings.Join(params, \"\")+m.Secret)\n\targs.Set(\"sig\", fmt.Sprintf(\"%x\", hash.Sum(nil)))\n}\n\n\/\/ Generate the initial, base arguments that should be common to all Mixpanel\n\/\/ API requests being created here.\nfunc (m *Mixpanel) makeArgs(date time.Time) url.Values {\n\targs := url.Values{}\n\n\targs.Set(\"format\", \"json\")\n\targs.Set(\"api_key\", m.Key)\n\targs.Set(\"expire\", fmt.Sprintf(\"%d\", time.Now().Unix()+10000))\n\n\tday := date.Format(\"2006-01-02\")\n\n\targs.Set(\"from_date\", day)\n\targs.Set(\"to_date\", day)\n\n\treturn args\n}\n\n\/\/ ExportDate downloads event data for the given day and streams the resulting\n\/\/ transformed JSON blobs as byte strings over the send-only channel passed\n\/\/ to the function.\n\/\/\n\/\/ The optional `moreArgs` parameter can be given to add additional URL\n\/\/ parameters to the API request.\nfunc (m *Mixpanel) ExportDate(date time.Time, output chan<- EventData, moreArgs *url.Values) error {\n\targs := m.makeArgs(date)\n\n\tif moreArgs != nil {\n\t\tfor k, vs := range *moreArgs {\n\t\t\tfor _, v := range vs {\n\t\t\t\targs.Add(k, v)\n\t\t\t}\n\t\t}\n\t}\n\n\tm.addSignature(&args)\n\n\tresp, err := http.Get(fmt.Sprintf(\"%s?%s\", m.BaseURL, args.Encode()))\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s: download failed: %s\", m.Product, err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\treturn m.TransformEventData(resp.Body, output)\n}\n\n\/\/ TransformEventData reads JSON objects line by line from `input`, performs a\n\/\/ simple translation, and pipes the result back out through the `output` chan.\n\/\/\n\/\/ The transformation effectively folds the properties map into the top level\n\/\/ and attaches product information.\n\/\/\n\/\/ Input : `{\"event\": \"...\", \"properties\": {\"k\": \"v\"}}`\n\/\/ Output: `{\"event\": \"...\", \"product: \"...\", \"k\": \"v\", ...}`\nfunc (m *Mixpanel) TransformEventData(input io.Reader, output chan<- EventData) error {\n\tdecoder := json.NewDecoder(input)\n\n\t\/\/ Don't default all numeric values to float\n\tdecoder.UseNumber()\n\n\tfor {\n\t\tvar ev struct {\n\t\t\tError *string\n\t\t\tEvent string\n\t\t\tProperties map[string]interface{}\n\t\t}\n\n\t\tif err := decoder.Decode(&ev); err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn fmt.Errorf(\"%s: Failed to parse JSON: %s\", m.Product, err)\n\t\t} else if ev.Error != nil {\n\t\t\treturn fmt.Errorf(\"%s: API error: %s\", m.Product, *ev.Error)\n\t\t}\n\n\t\tif id, err := uuid.NewV4(); err == nil {\n\t\t\tev.Properties[EventIDKey] = id.String()\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"%s: generating UUID failed: %s\", m.Product, err)\n\t\t}\n\n\t\tev.Properties[\"product\"] = m.Product\n\t\tev.Properties[\"event\"] = ev.Event\n\n\t\toutput <- ev.Properties\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package mod_test\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/common\/expfmt\"\n\t\"github.com\/robustirc\/bridge\/robustsession\"\n\t\"github.com\/robustirc\/internal\/health\"\n\t\"github.com\/robustirc\/robustirc\/internal\/localnet\"\n\t\"github.com\/robustirc\/robustirc\/internal\/robust\"\n)\n\nfunc TestMessageOfDeath(t *testing.T) {\n\ttempdir, err := ioutil.TempDir(\"\", \"robustirc-message-of-death-\")\n\tif err != nil {\n\t\tt.Fatalf(\"Could not create tempdir: %v\", err)\n\t}\n\n\tl, err := localnet.NewLocalnet(-1, tempdir)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not start local RobustIRC network: %v\", err)\n\t}\n\tdefer l.Kill(true)\n\n\tl.EnablePanicCommand = \"1\"\n\n\t\/\/ For each of the nodes, start a goroutine that verifies that the node crashes, then start it again\n\tvar wg sync.WaitGroup\n\n\tfor i := 0; i < 3; i++ {\n\t\tcmd, tempdir, addr := l.StartIRCServer(i == 0)\n\t\twg.Add(1)\n\t\tgo func(cmd *exec.Cmd, tempdir string, addr string) {\n\t\t\tdefer wg.Done()\n\n\t\t\tterminated := make(chan error)\n\t\t\tskipped := make(chan bool)\n\n\t\t\tgo func() {\n\t\t\t\tterminated <- cmd.Wait()\n\t\t\t}()\n\n\t\t\tgo func() {\n\t\t\t\t\/\/ Poll messages of death counter.\n\t\t\t\tparser := &expfmt.TextParser{}\n\t\t\t\tfor {\n\t\t\t\t\ttime.Sleep(50 * time.Millisecond)\n\t\t\t\t\treq, err := http.NewRequest(\"GET\", addr+\"metrics\", nil)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tresp, err := l.Httpclient.Do(req)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tmetrics, err := parser.TextToMetricFamilies(resp.Body)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tapplied, ok := metrics[\"applied_messages\"]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tfor _, m := range applied.GetMetric() {\n\t\t\t\t\t\tfor _, labelpair := range m.GetLabel() {\n\t\t\t\t\t\t\tif labelpair.GetName() == \"type\" &&\n\t\t\t\t\t\t\t\tlabelpair.GetValue() == robust.Type(robust.MessageOfDeath).String() {\n\t\t\t\t\t\t\t\tif m.GetCounter().GetValue() > 0 {\n\t\t\t\t\t\t\t\t\tskipped <- true\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t\/\/ Wait for the server to either crash or skip a message of death.\n\t\t\tselect {\n\t\t\tcase <-terminated:\n\t\t\t\tt.Logf(\"Node terminated (as expected)\")\n\t\t\tcase <-skipped:\n\t\t\t\tt.Logf(\"Node skipped message of death\")\n\t\t\t}\n\n\t\t\t\/\/ Run restart.sh for that node.\n\t\t\trcmd := exec.Command(filepath.Join(tempdir, \"restart.sh\"))\n\t\t\tif err := rcmd.Start(); err != nil {\n\t\t\t\tt.Errorf(\"Cannot restart node: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tl.RecordResource(\"pid\", strconv.Itoa(cmd.Process.Pid))\n\n\t\t\t\/\/ Ensure the node comes back up.\n\t\t\tstarted := time.Now()\n\t\t\tfor time.Since(started) < 10*time.Second {\n\t\t\t\tif _, err := health.GetServerStatus(addr, l.NetworkPassword); err != nil {\n\t\t\t\t\tt.Logf(\"Node %s unhealthy: %v\", addr, err)\n\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tt.Logf(\"Node %s became healthy\", addr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Errorf(\"Node did not become healthy within 10s\")\n\t\t}(cmd, tempdir, addr)\n\t}\n\n\t\/\/ Connect and send the PANIC message.\n\tsession, err := robustsession.Create(strings.Join(l.Servers(), \",\"), filepath.Join(tempdir, \"cert.pem\"))\n\tif err != nil {\n\t\tt.Fatalf(\"Could not create robustsession: %v\", err)\n\t}\n\tfoundjoin := make(chan bool)\n\tgo func() {\n\t\tfor msg := range session.Messages {\n\t\t\tif !strings.HasPrefix(msg, \":mod!1@\") ||\n\t\t\t\t!strings.HasSuffix(msg, \" JOIN #mod\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase foundjoin <- true:\n\t\t\tdefault:\n\t\t\t\tt.Errorf(\"Found JOIN too early or too late (channel write did not block)\")\n\t\t\t}\n\t\t}\n\t}()\n\tgo func() {\n\t\tfor err := range session.Errors {\n\t\t\tt.Errorf(\"RobustSession error: %v\", err)\n\t\t}\n\t}()\n\n\tsession.PostMessage(\"NICK mod\")\n\tsession.PostMessage(\"USER 1 2 3 4\")\n\tsession.PostMessage(\"PANIC\")\n\n\tt.Logf(\"Message of death sent\")\n\n\twg.Wait()\n\n\thealthy := false\n\tfor try := 0; try < 5; try++ {\n\t\tif l.Healthy() {\n\t\t\thealthy = true\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\n\tif !healthy {\n\t\tt.Fatalf(\"Expected recovery, but not all nodes are healthy\")\n\t}\n\n\t\/\/ Verify sending a JOIN now results in an output message.\n\tsession.PostMessage(\"JOIN #mod\")\n\tselect {\n\tcase <-foundjoin:\n\t\tt.Logf(\"JOIN reply received, network progressing\")\n\tcase <-time.After(10 * time.Second):\n\t\tt.Errorf(\"Timeout waiting for JOIN message\")\n\t}\n}\n<commit_msg>mod_test: record correct PID for teardown<commit_after>package mod_test\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/common\/expfmt\"\n\t\"github.com\/robustirc\/bridge\/robustsession\"\n\t\"github.com\/robustirc\/internal\/health\"\n\t\"github.com\/robustirc\/robustirc\/internal\/localnet\"\n\t\"github.com\/robustirc\/robustirc\/internal\/robust\"\n)\n\nfunc TestMessageOfDeath(t *testing.T) {\n\ttempdir, err := ioutil.TempDir(\"\", \"robustirc-message-of-death-\")\n\tif err != nil {\n\t\tt.Fatalf(\"Could not create tempdir: %v\", err)\n\t}\n\n\tl, err := localnet.NewLocalnet(-1, tempdir)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not start local RobustIRC network: %v\", err)\n\t}\n\tdefer l.Kill(true)\n\n\tl.EnablePanicCommand = \"1\"\n\n\t\/\/ For each of the nodes, start a goroutine that verifies that the node crashes, then start it again\n\tvar wg sync.WaitGroup\n\n\tfor i := 0; i < 3; i++ {\n\t\tcmd, tempdir, addr := l.StartIRCServer(i == 0)\n\t\twg.Add(1)\n\t\tgo func(cmd *exec.Cmd, tempdir string, addr string) {\n\t\t\tdefer wg.Done()\n\n\t\t\tterminated := make(chan error)\n\t\t\tskipped := make(chan bool)\n\n\t\t\tgo func() {\n\t\t\t\tterminated <- cmd.Wait()\n\t\t\t}()\n\n\t\t\tgo func() {\n\t\t\t\t\/\/ Poll messages of death counter.\n\t\t\t\tparser := &expfmt.TextParser{}\n\t\t\t\tfor {\n\t\t\t\t\ttime.Sleep(50 * time.Millisecond)\n\t\t\t\t\treq, err := http.NewRequest(\"GET\", addr+\"metrics\", nil)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tresp, err := l.Httpclient.Do(req)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tmetrics, err := parser.TextToMetricFamilies(resp.Body)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tapplied, ok := metrics[\"applied_messages\"]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tfor _, m := range applied.GetMetric() {\n\t\t\t\t\t\tfor _, labelpair := range m.GetLabel() {\n\t\t\t\t\t\t\tif labelpair.GetName() == \"type\" &&\n\t\t\t\t\t\t\t\tlabelpair.GetValue() == robust.Type(robust.MessageOfDeath).String() {\n\t\t\t\t\t\t\t\tif m.GetCounter().GetValue() > 0 {\n\t\t\t\t\t\t\t\t\tskipped <- true\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t\/\/ Wait for the server to either crash or skip a message of death.\n\t\t\tselect {\n\t\t\tcase <-terminated:\n\t\t\t\tt.Logf(\"Node terminated (as expected)\")\n\t\t\tcase <-skipped:\n\t\t\t\tt.Logf(\"Node skipped message of death\")\n\t\t\t}\n\n\t\t\t\/\/ Run restart.sh for that node.\n\t\t\trcmd := exec.Command(filepath.Join(tempdir, \"restart.sh\"))\n\t\t\tif err := rcmd.Start(); err != nil {\n\t\t\t\tt.Errorf(\"Cannot restart node: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tl.RecordResource(\"pid\", strconv.Itoa(rcmd.Process.Pid))\n\n\t\t\t\/\/ Ensure the node comes back up.\n\t\t\tstarted := time.Now()\n\t\t\tfor time.Since(started) < 10*time.Second {\n\t\t\t\tif _, err := health.GetServerStatus(addr, l.NetworkPassword); err != nil {\n\t\t\t\t\tt.Logf(\"Node %s unhealthy: %v\", addr, err)\n\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tt.Logf(\"Node %s became healthy\", addr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Errorf(\"Node did not become healthy within 10s\")\n\t\t}(cmd, tempdir, addr)\n\t}\n\n\t\/\/ Connect and send the PANIC message.\n\tsession, err := robustsession.Create(strings.Join(l.Servers(), \",\"), filepath.Join(tempdir, \"cert.pem\"))\n\tif err != nil {\n\t\tt.Fatalf(\"Could not create robustsession: %v\", err)\n\t}\n\tfoundjoin := make(chan bool)\n\tgo func() {\n\t\tfor msg := range session.Messages {\n\t\t\tif !strings.HasPrefix(msg, \":mod!1@\") ||\n\t\t\t\t!strings.HasSuffix(msg, \" JOIN #mod\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase foundjoin <- true:\n\t\t\tdefault:\n\t\t\t\tt.Errorf(\"Found JOIN too early or too late (channel write did not block)\")\n\t\t\t}\n\t\t}\n\t}()\n\tgo func() {\n\t\tfor err := range session.Errors {\n\t\t\tt.Errorf(\"RobustSession error: %v\", err)\n\t\t}\n\t}()\n\n\tsession.PostMessage(\"NICK mod\")\n\tsession.PostMessage(\"USER 1 2 3 4\")\n\tsession.PostMessage(\"PANIC\")\n\n\tt.Logf(\"Message of death sent\")\n\n\twg.Wait()\n\n\thealthy := false\n\tfor try := 0; try < 5; try++ {\n\t\tif l.Healthy() {\n\t\t\thealthy = true\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\n\tif !healthy {\n\t\tt.Fatalf(\"Expected recovery, but not all nodes are healthy\")\n\t}\n\n\t\/\/ Verify sending a JOIN now results in an output message.\n\tsession.PostMessage(\"JOIN #mod\")\n\tselect {\n\tcase <-foundjoin:\n\t\tt.Logf(\"JOIN reply received, network progressing\")\n\tcase <-time.After(10 * time.Second):\n\t\tt.Errorf(\"Timeout waiting for JOIN message\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package model\n\nimport \"encoding\/json\"\n\ntype ReferentialId string\ntype ReferentialSlug string\n\ntype Referential struct {\n\tid ReferentialId\n\tslug ReferentialSlug\n\n\tmanager Referentials\n\tmodel Model\n}\n\ntype Referentials interface {\n\tNew(slug ReferentialSlug) Referential\n\tFind(id ReferentialId) (Referential, bool)\n\tFindBySlug(slug ReferentialSlug) (Referential, bool)\n\tSave(stopArea *Referential) bool\n\tDelete(stopArea *Referential) bool\n\tLoad() error\n}\n\nvar referentials = NewMemoryReferentials()\n\nfunc (referential *Referential) Id() ReferentialId {\n\treturn referential.id\n}\n\nfunc (referential *Referential) Slug() ReferentialSlug {\n\treturn referential.slug\n}\n\nfunc (referential *Referential) Model() Model {\n\treturn referential.model\n}\n\nfunc (referential *Referential) Save() (ok bool) {\n\tok = referential.manager.Save(referential)\n\treturn\n}\n\nfunc (referential *Referential) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(map[string]interface{}{\n\t\t\"Id\": referential.id,\n\t\t\"Slug\": referential.slug,\n\t})\n}\n\ntype MemoryReferentials struct {\n\tUUIDConsumer\n\n\tbyId map[ReferentialId]*Referential\n}\n\nfunc NewMemoryReferentials() *MemoryReferentials {\n\treturn &MemoryReferentials{\n\t\tbyId: make(map[ReferentialId]*Referential),\n\t}\n}\n\nfunc CurrentReferentials() Referentials {\n\treturn referentials\n}\n\nfunc (manager *MemoryReferentials) New(slug ReferentialSlug) Referential {\n\tmodel := NewMemoryModel()\n\treturn Referential{slug: slug, manager: manager, model: model}\n}\n\nfunc (manager *MemoryReferentials) NewWithId(slug ReferentialSlug, id ReferentialId) Referential {\n\tmodel := NewMemoryModel()\n\treturn Referential{id: id, slug: slug, manager: manager, model: model}\n}\n\nfunc (manager *MemoryReferentials) Find(id ReferentialId) (Referential, bool) {\n\treferential, ok := manager.byId[id]\n\tif ok {\n\t\treturn *referential, true\n\t} else {\n\t\treturn Referential{}, false\n\t}\n}\n\nfunc (manager *MemoryReferentials) FindBySlug(slug ReferentialSlug) (Referential, bool) {\n\tfor _, referential := range manager.byId {\n\t\tif referential.slug == slug {\n\t\t\treturn *referential, true\n\t\t}\n\t}\n\treturn Referential{}, false\n}\n\nfunc (manager *MemoryReferentials) Save(referential *Referential) bool {\n\tif referential.Id() == \"\" {\n\t\treferential.id = ReferentialId(manager.NewUUID())\n\t}\n\treferential.manager = manager\n\tmanager.byId[referential.Id()] = referential\n\treturn true\n}\n\nfunc (manager *MemoryReferentials) Delete(referential *Referential) bool {\n\tdelete(manager.byId, referential.Id())\n\treturn true\n}\n\nfunc (manager *MemoryReferentials) Load() error {\n\tvar selectReferentials []struct {\n\t\tId string `db:\"referential_id\"`\n\t\tSlug string\n\t}\n\t_, err := Database.Select(&selectReferentials, \"select * from referentials\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, r := range selectReferentials {\n\t\treferential := manager.NewWithId(ReferentialSlug(r.Slug), ReferentialId(r.Id))\n\t\tmanager.Save(&referential)\n\t}\n\n\treturn nil\n}\n<commit_msg>small change on referentials Load<commit_after>package model\n\nimport \"encoding\/json\"\n\ntype ReferentialId string\ntype ReferentialSlug string\n\ntype Referential struct {\n\tid ReferentialId\n\tslug ReferentialSlug\n\n\tmanager Referentials\n\tmodel Model\n}\n\ntype Referentials interface {\n\tNew(slug ReferentialSlug) Referential\n\tFind(id ReferentialId) (Referential, bool)\n\tFindBySlug(slug ReferentialSlug) (Referential, bool)\n\tSave(stopArea *Referential) bool\n\tDelete(stopArea *Referential) bool\n\tLoad() error\n}\n\nvar referentials = NewMemoryReferentials()\n\nfunc (referential *Referential) Id() ReferentialId {\n\treturn referential.id\n}\n\nfunc (referential *Referential) Slug() ReferentialSlug {\n\treturn referential.slug\n}\n\nfunc (referential *Referential) Model() Model {\n\treturn referential.model\n}\n\nfunc (referential *Referential) Save() (ok bool) {\n\tok = referential.manager.Save(referential)\n\treturn\n}\n\nfunc (referential *Referential) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(map[string]interface{}{\n\t\t\"Id\": referential.id,\n\t\t\"Slug\": referential.slug,\n\t})\n}\n\ntype MemoryReferentials struct {\n\tUUIDConsumer\n\n\tbyId map[ReferentialId]*Referential\n}\n\nfunc NewMemoryReferentials() *MemoryReferentials {\n\treturn &MemoryReferentials{\n\t\tbyId: make(map[ReferentialId]*Referential),\n\t}\n}\n\nfunc CurrentReferentials() Referentials {\n\treturn referentials\n}\n\nfunc (manager *MemoryReferentials) New(slug ReferentialSlug) Referential {\n\tmodel := NewMemoryModel()\n\treturn Referential{slug: slug, manager: manager, model: model}\n}\n\nfunc (manager *MemoryReferentials) NewWithId(slug ReferentialSlug, id ReferentialId) Referential {\n\tmodel := NewMemoryModel()\n\treturn Referential{id: id, slug: slug, manager: manager, model: model}\n}\n\nfunc (manager *MemoryReferentials) Find(id ReferentialId) (Referential, bool) {\n\treferential, ok := manager.byId[id]\n\tif ok {\n\t\treturn *referential, true\n\t} else {\n\t\treturn Referential{}, false\n\t}\n}\n\nfunc (manager *MemoryReferentials) FindBySlug(slug ReferentialSlug) (Referential, bool) {\n\tfor _, referential := range manager.byId {\n\t\tif referential.slug == slug {\n\t\t\treturn *referential, true\n\t\t}\n\t}\n\treturn Referential{}, false\n}\n\nfunc (manager *MemoryReferentials) Save(referential *Referential) bool {\n\tif referential.Id() == \"\" {\n\t\treferential.id = ReferentialId(manager.NewUUID())\n\t}\n\treferential.manager = manager\n\tmanager.byId[referential.Id()] = referential\n\treturn true\n}\n\nfunc (manager *MemoryReferentials) Delete(referential *Referential) bool {\n\tdelete(manager.byId, referential.Id())\n\treturn true\n}\n\nfunc (manager *MemoryReferentials) Load() error {\n\tvar selectReferentials []struct {\n\t\tReferential_id string\n\t\tSlug string\n\t}\n\t_, err := Database.Select(&selectReferentials, \"select * from referentials\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, r := range selectReferentials {\n\t\treferential := manager.NewWithId(ReferentialSlug(r.Slug), ReferentialId(r.Referential_id))\n\t\tmanager.Save(&referential)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package models\n\nimport (\n\t\/\/\"errors\"\n\t\"database\/sql\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"gopkg.in\/DATA-DOG\/go-sqlmock.v1\"\n\t\"testing\"\n\n\t\"github.com\/eirka\/eirka-libs\/db\"\n\te \"github.com\/eirka\/eirka-libs\/errors\"\n)\n\nfunc TestEmailIsValid(t *testing.T) {\n\n\temail := EmailModel{\n\t\tUid: 1,\n\t\tName: \"test\",\n\t\tEmail: \"cool@test.com\",\n\t}\n\n\tassert.False(t, email.IsValid(), \"Should be false\")\n\n}\n\nfunc TestEmailValidate(t *testing.T) {\n\n\tvar err error\n\n\tmock, err := db.NewTestDb()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n\trows := sqlmock.NewRows([]string{\"name\", \"email\"}).AddRow(\"test\", \"old@test.com\")\n\tmock.ExpectQuery(`SELECT user_name,user_email FROM users WHERE user_id`).WillReturnRows(rows)\n\n\temail := EmailModel{\n\t\tUid: 2,\n\t\tEmail: \"cool@test.com\",\n\t}\n\n\terr = email.Validate()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.Equal(t, email.Name, \"test\", \"Should match\")\n\t\tassert.Equal(t, email.CurrentEmail, \"old@test.com\", \"Should match\")\n\t}\n\n}\n\nfunc TestEmailValidateBadEmails(t *testing.T) {\n\n\tvar err error\n\n\tfirst := EmailModel{\n\t\tEmail: \"notanemail\",\n\t}\n\n\terr = first.Validate()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, e.ErrInvalidEmail, \"Error should match\")\n\t}\n\n\tsecond := EmailModel{\n\t\tEmail: \"not@anemail\",\n\t}\n\n\terr = second.Validate()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, e.ErrInvalidEmail, \"Error should match\")\n\t}\n\n}\n\nfunc TestEmailValidateNoUser(t *testing.T) {\n\n\tvar err error\n\n\tmock, err := db.NewTestDb()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n\tmock.ExpectQuery(`SELECT user_name,user_email FROM users WHERE user_id`).WillReturnError(sql.ErrNoRows)\n\n\temail := EmailModel{\n\t\tUid: 2,\n\t\tEmail: \"cool@test.com\",\n\t}\n\n\terr = email.Validate()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, e.ErrUserNotExist, \"Error should match\")\n\t}\n\n}\n\nfunc TestEmailValidateSameEmail(t *testing.T) {\n\n\tvar err error\n\n\tmock, err := db.NewTestDb()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n\trows := sqlmock.NewRows([]string{\"name\", \"email\"}).AddRow(\"test\", \"cool@test.com\")\n\tmock.ExpectQuery(`SELECT user_name,user_email FROM users WHERE user_id`).WillReturnRows(rows)\n\n\temail := EmailModel{\n\t\tUid: 2,\n\t\tEmail: \"cool@test.com\",\n\t}\n\n\terr = email.Validate()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, e.ErrEmailSame, \"Error should match\")\n\t}\n\n}\n\nfunc TestEmailUpdate(t *testing.T) {\n\n\tvar err error\n\n\tmock, err := db.NewTestDb()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n\tmock.ExpectExec(\"UPDATE users SET user_email\").\n\t\tWithArgs(\"cool@test.com\", 2).\n\t\tWillReturnResult(sqlmock.NewResult(1, 1))\n\n\temail := EmailModel{\n\t\tUid: 2,\n\t\tEmail: \"cool@test.com\",\n\t}\n\n}\n<commit_msg>add model tests<commit_after>package models\n\nimport (\n\t\/\/\"errors\"\n\t\"database\/sql\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"gopkg.in\/DATA-DOG\/go-sqlmock.v1\"\n\t\"testing\"\n\n\t\"github.com\/eirka\/eirka-libs\/db\"\n\te \"github.com\/eirka\/eirka-libs\/errors\"\n)\n\nfunc TestEmailIsValid(t *testing.T) {\n\n\temail := EmailModel{\n\t\tUid: 1,\n\t\tName: \"test\",\n\t\tEmail: \"cool@test.com\",\n\t}\n\n\tassert.False(t, email.IsValid(), \"Should be false\")\n\n}\n\nfunc TestEmailValidate(t *testing.T) {\n\n\tvar err error\n\n\tmock, err := db.NewTestDb()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n\trows := sqlmock.NewRows([]string{\"name\", \"email\"}).AddRow(\"test\", \"old@test.com\")\n\tmock.ExpectQuery(`SELECT user_name,user_email FROM users WHERE user_id`).WillReturnRows(rows)\n\n\temail := EmailModel{\n\t\tUid: 2,\n\t\tEmail: \"cool@test.com\",\n\t}\n\n\terr = email.Validate()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.Equal(t, email.Name, \"test\", \"Should match\")\n\t\tassert.Equal(t, email.CurrentEmail, \"old@test.com\", \"Should match\")\n\t}\n\n}\n\nfunc TestEmailValidateBadEmails(t *testing.T) {\n\n\tvar err error\n\n\tfirst := EmailModel{\n\t\tEmail: \"notanemail\",\n\t}\n\n\terr = first.Validate()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, e.ErrInvalidEmail, \"Error should match\")\n\t}\n\n\tsecond := EmailModel{\n\t\tEmail: \"not@anemail\",\n\t}\n\n\terr = second.Validate()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, e.ErrInvalidEmail, \"Error should match\")\n\t}\n\n}\n\nfunc TestEmailValidateNoUser(t *testing.T) {\n\n\tvar err error\n\n\tmock, err := db.NewTestDb()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n\tmock.ExpectQuery(`SELECT user_name,user_email FROM users WHERE user_id`).WillReturnError(sql.ErrNoRows)\n\n\temail := EmailModel{\n\t\tUid: 2,\n\t\tEmail: \"cool@test.com\",\n\t}\n\n\terr = email.Validate()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, e.ErrUserNotExist, \"Error should match\")\n\t}\n\n}\n\nfunc TestEmailValidateSameEmail(t *testing.T) {\n\n\tvar err error\n\n\tmock, err := db.NewTestDb()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n\trows := sqlmock.NewRows([]string{\"name\", \"email\"}).AddRow(\"test\", \"cool@test.com\")\n\tmock.ExpectQuery(`SELECT user_name,user_email FROM users WHERE user_id`).WillReturnRows(rows)\n\n\temail := EmailModel{\n\t\tUid: 2,\n\t\tEmail: \"cool@test.com\",\n\t}\n\n\terr = email.Validate()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, e.ErrEmailSame, \"Error should match\")\n\t}\n\n}\n\nfunc TestEmailUpdate(t *testing.T) {\n\n\tvar err error\n\n\tmock, err := db.NewTestDb()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n\tmock.ExpectExec(\"UPDATE users SET user_email\").\n\t\tWithArgs(\"cool@test.com\", 2).\n\t\tWillReturnResult(sqlmock.NewResult(1, 1))\n\n\temail := EmailModel{\n\t\tUid: 2,\n\t\tEmail: \"cool@test.com\",\n\t}\n\n\terr = email.Update()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n}\n<|endoftext|>"} {"text":"<commit_before>package preparable\n\nimport (\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/struCoder\/pmgo\/lib\/process\"\n)\n\n\/\/ ProcPreparable is a preparable with all the necessary informations to run\n\/\/ a process. To actually run a process, call the Start() method.\ntype ProcPreparable interface {\n\tPrepareBin(bool) ([]byte, error)\n\tStart() (process.ProcContainer, error)\n\tgetPath() string\n\tIdentifier() string\n\tgetBinPath() string\n\tgetPidPath() string\n\tgetOutPath() string\n\tgetErrPath() string\n}\n\ntype Preparable struct {\n\tName string\n\tSourcePath string\n\tCmd string\n\tSysFolder string\n\tLanguage string\n\tKeepAlive bool\n\tArgs []string\n}\n\n\/\/ PrepareBin will compile the Golang project from SourcePath and populate Cmd with the proper\n\/\/ command for the process to be executed.\n\/\/ Returns the compile command output.\nfunc (preparable *Preparable) PrepareBin(fromBinFile bool) ([]byte, error) {\n\t\/\/ Remove the last character '\/' if present\n\tif preparable.SourcePath[len(preparable.SourcePath)-1] == '\/' {\n\t\tpreparable.SourcePath = strings.TrimSuffix(preparable.SourcePath, \"\/\")\n\t}\n\tcmd := \"\"\n\tcmdArgs := []string{}\n\tbinPath := preparable.getBinPath()\n\n\tif preparable.Language == \"go\" {\n\t\tif fromBinFile {\n\t\t\tos.MkdirAll(filepath.Dir(binPath), 0755)\n\t\t\tcmd = \"cp\"\n\t\t\tcmdArgs = []string{preparable.SourcePath, binPath}\n\t\t\tlog.Info(\"copy file \", preparable.SourcePath, \" to \", filepath.Dir(binPath))\n\t\t} else {\n\t\t\tcmd = \"go\"\n\t\t\tcmdArgs = []string{\"build\", \"-o\", binPath, preparable.SourcePath + \"\/.\"}\n\t\t}\n\t}\n\n\tpreparable.Cmd = preparable.getBinPath()\n\treturn exec.Command(cmd, cmdArgs...).Output()\n}\n\n\/\/ Start will execute the process based on the information presented on the preparable.\n\/\/ This function should be called from inside the master to make sure\n\/\/ all the watchers and process handling are done correctly.\n\/\/ Returns a tuple with the process and an error in case there's any.\nfunc (preparable *Preparable) Start() (process.ProcContainer, error) {\n\tproc := &process.Proc{\n\t\tName: preparable.Name,\n\t\tCmd: preparable.Cmd,\n\t\tArgs: preparable.Args,\n\t\tPath: preparable.getPath(),\n\t\tPidfile: preparable.getPidPath(),\n\t\tOutfile: preparable.getOutPath(),\n\t\tErrfile: preparable.getErrPath(),\n\t\tKeepAlive: preparable.KeepAlive,\n\t\tStatus: &process.ProcStatus{},\n\t}\n\n\terr := proc.Start()\n\treturn proc, err\n}\n\n\/\/ Identifier is a function that get proc name\nfunc (preparable *Preparable) Identifier() string {\n\treturn preparable.Name\n}\n\nfunc (preparable *Preparable) getPath() string {\n\tif preparable.SysFolder[len(preparable.SysFolder)-1] == '\/' {\n\t\tpreparable.SysFolder = strings.TrimSuffix(preparable.SysFolder, \"\/\")\n\t}\n\treturn preparable.SysFolder + \"\/\" + preparable.Name\n}\n\nfunc (preparable *Preparable) getBinPath() string {\n\treturn preparable.getPath() + \"\/\" + preparable.Name\n}\n\nfunc (preparable *Preparable) getPidPath() string {\n\treturn preparable.getBinPath() + \".pid\"\n}\n\nfunc (preparable *Preparable) getOutPath() string {\n\treturn preparable.getBinPath() + \".out\"\n}\n\nfunc (preparable *Preparable) getErrPath() string {\n\treturn preparable.getBinPath() + \".err\"\n}\n<commit_msg>[StartBinary] (In Progress) Remove copying binary file<commit_after>package preparable\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/struCoder\/pmgo\/lib\/process\"\n)\n\n\/\/ ProcPreparable is a preparable with all the necessary informations to run\n\/\/ a process. To actually run a process, call the Start() method.\ntype ProcPreparable interface {\n\tPrepareBin(bool) ([]byte, error)\n\tStart() (process.ProcContainer, error)\n\tgetPath() string\n\tIdentifier() string\n\tgetBinPath() string\n\tgetPidPath() string\n\tgetOutPath() string\n\tgetErrPath() string\n}\n\ntype Preparable struct {\n\tName string\n\tSourcePath string\n\tCmd string\n\tSysFolder string\n\tLanguage string\n\tKeepAlive bool\n\tArgs []string\n}\n\n\/\/ PrepareBin will compile the Golang project from SourcePath and populate Cmd with the proper\n\/\/ command for the process to be executed.\n\/\/ Returns the compile command output.\nfunc (preparable *Preparable) PrepareBin(fromBinFile bool) ([]byte, error) {\n\t\/\/ Remove the last character '\/' if present\n\tif preparable.SourcePath[len(preparable.SourcePath)-1] == '\/' {\n\t\tpreparable.SourcePath = strings.TrimSuffix(preparable.SourcePath, \"\/\")\n\t}\n\tbinPath := preparable.getBinPath()\n\n\tvar output []byte\n\tvar err error\n\tif preparable.Language == \"go\" && !fromBinFile {\n\t\tcmd := \"go\"\n\t\tcmdArgs := []string{\"build\", \"-o\", binPath, preparable.SourcePath + \"\/.\"}\n\n\t\toutput, err = exec.Command(cmd, cmdArgs...).Output()\n\t} else {\n\t\tinfo, err := os.Stat(preparable.SourcePath)\n\t\tif err != nil {\n\t\t\treturn make([]byte, 0), err\n\t\t}\n\t\tif info.Mode()&0111 == 0 {\n\t\t\treturn make([]byte, 0), fmt.Errorf(\"The given source path(%s) is not executable, neither a Go source to compile\", preparable.SourcePath)\n\t\t}\n\n\t\tbinPath = preparable.SourcePath\n\t}\n\n\tpreparable.Cmd = binPath\n\treturn output, err\n}\n\n\/\/ Start will execute the process based on the information presented on the preparable.\n\/\/ This function should be called from inside the master to make sure\n\/\/ all the watchers and process handling are done correctly.\n\/\/ Returns a tuple with the process and an error in case there's any.\nfunc (preparable *Preparable) Start() (process.ProcContainer, error) {\n\tproc := &process.Proc{\n\t\tName: preparable.Name,\n\t\tCmd: preparable.Cmd,\n\t\tArgs: preparable.Args,\n\t\tPath: preparable.getPath(),\n\t\tPidfile: preparable.getPidPath(),\n\t\tOutfile: preparable.getOutPath(),\n\t\tErrfile: preparable.getErrPath(),\n\t\tKeepAlive: preparable.KeepAlive,\n\t\tStatus: &process.ProcStatus{},\n\t}\n\n\terr := proc.Start()\n\treturn proc, err\n}\n\n\/\/ Identifier is a function that get proc name\nfunc (preparable *Preparable) Identifier() string {\n\treturn preparable.Name\n}\n\nfunc (preparable *Preparable) getPath() string {\n\tif preparable.SysFolder[len(preparable.SysFolder)-1] == '\/' {\n\t\tpreparable.SysFolder = strings.TrimSuffix(preparable.SysFolder, \"\/\")\n\t}\n\treturn preparable.SysFolder + \"\/\" + preparable.Name\n}\n\nfunc (preparable *Preparable) getBinPath() string {\n\treturn preparable.getPath() + \"\/\" + preparable.Name\n}\n\nfunc (preparable *Preparable) getPidPath() string {\n\treturn preparable.getBinPath() + \".pid\"\n}\n\nfunc (preparable *Preparable) getOutPath() string {\n\treturn preparable.getBinPath() + \".out\"\n}\n\nfunc (preparable *Preparable) getErrPath() string {\n\treturn preparable.getBinPath() + \".err\"\n}\n<|endoftext|>"} {"text":"<commit_before>package chain\n\nimport (\n\t\"sort\"\n\n\t\"github.com\/btcboost\/copernicus\/model\/blockindex\"\n\t\"github.com\/btcboost\/copernicus\/util\"\n)\n\n\/\/ Chain An in-memory blIndexed chain of blocks.\ntype Chain struct {\n\tactive \t\t[]* blockindex.BlockIndex\n\tbranch \t\t[]* blockindex.BlockIndex\n\twaitForTx \tmap[util.Hash]* blockindex.BlockIndex\n\torphan \t[]* blockindex.BlockIndex\n\tblockIndexMap \tmap[util.Hash]* blockindex.BlockIndex\n\tnewestBlock \t*blockindex.BlockIndex\n\treceiveID \tuint64\n}\n\n\n\/\/ Genesis Returns the blIndex entry for the genesis block of this chain,\n\/\/ or nullptr if none.\nfunc (c *Chain) Genesis() *blockindex.BlockIndex {\n\tif len(c.active) > 0 {\n\t\treturn c.active[0]\n\t}\n\n\treturn nil\n}\n\n\/\/ Tip Returns the blIndex entry for the tip of this chain, or nullptr if none.\nfunc (c *Chain) Tip() *blockindex.BlockIndex {\n\tif len(c.active) > 0 {\n\t\treturn c.active[len(c.active)-1]\n\t}\n\n\treturn nil\n}\n\n\/\/ GetSpecIndex Returns the blIndex entry at a particular height in this chain, or nullptr\n\/\/ if no such height exists.\nfunc (c *Chain) GetIndex(height int) *blockindex.BlockIndex {\n\tif height < 0 || height >= len(c.active) {\n\t\treturn nil\n\t}\n\n\treturn c.active[height]\n}\n\n\/\/ Equal Compare two chains efficiently.\nfunc (c *Chain) Equal(dst *Chain) bool {\n\treturn len(c.active) == len(dst.active) &&\n\t\tc.active[len(c.active)-1] == dst.active[len(dst.active)-1]\n}\n\n\/\/ Contains \/** Efficiently check whether a block is present in this chain\nfunc (c *Chain) Contains(index *blockindex.BlockIndex) bool {\n\treturn c.GetIndex(index.Height) == index\n}\n\n\/\/ Next Find the successor of a block in this chain, or nullptr if the given\n\/\/ index is not found or is the tip.\nfunc (c *Chain) Next(index *blockindex.BlockIndex) *blockindex.BlockIndex {\n\tif c.Contains(index) {\n\t\treturn c.GetIndex(index.Height + 1)\n\t}\n\treturn nil\n}\n\n\/\/ Height Return the maximal height in the chain. Is equal to chain.Tip() ?\n\/\/ chain.Tip()->nHeight : -1.\nfunc (c *Chain) Height() int {\n\treturn len(c.active) - 1\n}\n\n\/\/ SetTip Set\/initialize a chain with a given tip.\nfunc (c *Chain) SetTip(index *BlockIndex) {\n\tif index == nil {\n\t\tc.active = []*BlockIndex{}\n\t\treturn\n\t}\n\n\ttmp := make([]*BlockIndex, index.Height+1)\n\tcopy(tmp, c.active)\n\tc.active = tmp\n\tfor index != nil && c.active[index.Height] != index {\n\t\tc.active[index.Height] = index\n\t\tindex = index.Prev\n\t}\n}\n\n\/\/ FindFork Find the last common block between this chain and a block blIndex entry.\nfunc (chain *Chain) FindFork(blIndex *BlockIndex) *BlockIndex {\n\tif blIndex == nil {\n\t\treturn nil\n\t}\n\n\tif blIndex.Height > chain.Height() {\n\t\tblIndex = blIndex.GetAncestor(chain.Height())\n\t}\n\n\tfor blIndex != nil && !chain.Contains(blIndex) {\n\t\tblIndex = blIndex.Prev\n\t}\n\treturn blIndex\n}\n\n\/\/ FindEarliestAtLeast Find the earliest block with timestamp equal or greater than the given.\nfunc (chain *Chain) FindEarliestAtLeast(time int64) *BlockIndex {\n\ti := sort.Search(len(chain.Chain), func(i int) bool {\n\t\treturn int64(chain.Chain[i].GetBlockTimeMax()) > time\n\t})\n\tif i == len(chain.Chain) {\n\t\treturn nil\n\t}\n\n\treturn chain.Chain[i]\n}\n\nfunc ActiveBestChain(bi *BlockIndex) bool {\n\tforkBlock := ActiveChain.FindFork(bi)\n\tif forkBlock == nil {\n\t\treturn false\n\t}\n\n\t\/\/ maintain global variable NewestBlock\n\tNewestBlock = bi\n\n\tsubHeight := bi.Height - forkBlock.Height\n\ttmpBi := make([]*BlockIndex, subHeight)\n\ttmpBi[subHeight-1] = bi\n\tfor i := 0; i < subHeight; i++ {\n\t\tbi = bi.Prev\n\t\ttmpBi[subHeight-i-2] = bi\n\t}\n\n\t\/\/ maintain the global variable ActiveChain\n\t\/\/ todo should be locked\n\tActiveChain.Chain = append(ActiveChain.Chain[:bi.Height+1], tmpBi...)\n\n\t\/\/ maintain global variable BranchChain\n\tremoveBlockIndexFromBranchChain(tmpBi)\n\taddBlockIndexToBranchChain(tmpBi)\n\n\treturn true\n}\n\n\/\/ should be before addBlockIndexToBranchChain()\nfunc removeBlockIndexFromBranchChain(bis []*BlockIndex) {\n\tfor i := 0; i < len(bis); i++ {\n\t\tfor j := 0; j < len(BranchChain); {\n\t\t\tif BranchChain[j].BlockHash == bis[i].BlockHash {\n\t\t\t\tBranchChain = append(BranchChain[:j], BranchChain[j+1:]...)\n\t\t\t} else {\n\t\t\t\tj++\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ should be after removeBlockIndexFromBranchChain()\nfunc addBlockIndexToBranchChain(bis []*BlockIndex) {\n\tBranchChain = append(BranchChain, bis...)\n}\n\nfunc FindMostWorkChain() *BlockIndex {\n\t\/\/ todo complete\n\treturn nil\n}<commit_msg>add function TipHeight(),GetMedianTimePast(),GetVersionState(),GetTipTime()<commit_after>package chain\n\nimport (\n\t\"sort\"\n\n\t\"github.com\/btcboost\/copernicus\/model\/blockindex\"\n\t\"github.com\/btcboost\/copernicus\/util\"\n)\n\n\/\/ Chain An in-memory blIndexed chain of blocks.\ntype Chain struct {\n\tactive \t\t[]* blockindex.BlockIndex\n\tbranch \t\t[]* blockindex.BlockIndex\n\twaitForTx \tmap[util.Hash]* blockindex.BlockIndex\n\torphan \t[]* blockindex.BlockIndex\n\tblockIndexMap \tmap[util.Hash]* blockindex.BlockIndex\n\tnewestBlock \t*blockindex.BlockIndex\n\treceiveID \tuint64\n}\n\n\n\/\/ Genesis Returns the blIndex entry for the genesis block of this chain,\n\/\/ or nullptr if none.\nfunc (c *Chain) Genesis() *blockindex.BlockIndex {\n\tif len(c.active) > 0 {\n\t\treturn c.active[0]\n\t}\n\n\treturn nil\n}\n\n\/\/ Tip Returns the blIndex entry for the tip of this chain, or nullptr if none.\nfunc (c *Chain) Tip() *blockindex.BlockIndex {\n\tif len(c.active) > 0 {\n\t\treturn c.active[len(c.active)-1]\n\t}\n\n\treturn nil\n}\n\nfunc (c *Chain) TipHeight() int {\n\tif len(c.active) > 0 {\n\t\treturn c.active[len(c.active)-1].Height\n\t}\n\n\treturn 0\n}\n\nfunc (c *Chain) GetMedianTimePast() int64 {\n\n\treturn 0\n}\n\nfunc (c *Chain) GetVersionState() int {\n\n\treturn 0\n}\n\nfunc (c *Chain) GetTipTime() int64 {\n\n\treturn 0\n}\n\n\n\/\/ GetSpecIndex Returns the blIndex entry at a particular height in this chain, or nullptr\n\/\/ if no such height exists.\nfunc (c *Chain) GetIndex(height int) *blockindex.BlockIndex {\n\tif height < 0 || height >= len(c.active) {\n\t\treturn nil\n\t}\n\n\treturn c.active[height]\n}\n\n\/\/ Equal Compare two chains efficiently.\nfunc (c *Chain) Equal(dst *Chain) bool {\n\treturn len(c.active) == len(dst.active) &&\n\t\tc.active[len(c.active)-1] == dst.active[len(dst.active)-1]\n}\n\n\/\/ Contains \/** Efficiently check whether a block is present in this chain\nfunc (c *Chain) Contains(index *blockindex.BlockIndex) bool {\n\treturn c.GetIndex(index.Height) == index\n}\n\n\/\/ Next Find the successor of a block in this chain, or nullptr if the given\n\/\/ index is not found or is the tip.\nfunc (c *Chain) Next(index *blockindex.BlockIndex) *blockindex.BlockIndex {\n\tif c.Contains(index) {\n\t\treturn c.GetIndex(index.Height + 1)\n\t}\n\treturn nil\n}\n\n\/\/ Height Return the maximal height in the chain. Is equal to chain.Tip() ?\n\/\/ chain.Tip()->nHeight : -1.\nfunc (c *Chain) Height() int {\n\treturn len(c.active) - 1\n}\n\n\/\/ SetTip Set\/initialize a chain with a given tip.\nfunc (c *Chain) SetTip(index *BlockIndex) {\n\tif index == nil {\n\t\tc.active = []*BlockIndex{}\n\t\treturn\n\t}\n\n\ttmp := make([]*BlockIndex, index.Height+1)\n\tcopy(tmp, c.active)\n\tc.active = tmp\n\tfor index != nil && c.active[index.Height] != index {\n\t\tc.active[index.Height] = index\n\t\tindex = index.Prev\n\t}\n}\n\n\/\/ FindFork Find the last common block between this chain and a block blIndex entry.\nfunc (chain *Chain) FindFork(blIndex *BlockIndex) *BlockIndex {\n\tif blIndex == nil {\n\t\treturn nil\n\t}\n\n\tif blIndex.Height > chain.Height() {\n\t\tblIndex = blIndex.GetAncestor(chain.Height())\n\t}\n\n\tfor blIndex != nil && !chain.Contains(blIndex) {\n\t\tblIndex = blIndex.Prev\n\t}\n\treturn blIndex\n}\n\n\/\/ FindEarliestAtLeast Find the earliest block with timestamp equal or greater than the given.\nfunc (chain *Chain) FindEarliestAtLeast(time int64) *BlockIndex {\n\ti := sort.Search(len(chain.Chain), func(i int) bool {\n\t\treturn int64(chain.Chain[i].GetBlockTimeMax()) > time\n\t})\n\tif i == len(chain.Chain) {\n\t\treturn nil\n\t}\n\n\treturn chain.Chain[i]\n}\n\nfunc ActiveBestChain(bi *BlockIndex) bool {\n\tforkBlock := ActiveChain.FindFork(bi)\n\tif forkBlock == nil {\n\t\treturn false\n\t}\n\n\t\/\/ maintain global variable NewestBlock\n\tNewestBlock = bi\n\n\tsubHeight := bi.Height - forkBlock.Height\n\ttmpBi := make([]*BlockIndex, subHeight)\n\ttmpBi[subHeight-1] = bi\n\tfor i := 0; i < subHeight; i++ {\n\t\tbi = bi.Prev\n\t\ttmpBi[subHeight-i-2] = bi\n\t}\n\n\t\/\/ maintain the global variable ActiveChain\n\t\/\/ todo should be locked\n\tActiveChain.Chain = append(ActiveChain.Chain[:bi.Height+1], tmpBi...)\n\n\t\/\/ maintain global variable BranchChain\n\tremoveBlockIndexFromBranchChain(tmpBi)\n\taddBlockIndexToBranchChain(tmpBi)\n\n\treturn true\n}\n\n\/\/ should be before addBlockIndexToBranchChain()\nfunc removeBlockIndexFromBranchChain(bis []*BlockIndex) {\n\tfor i := 0; i < len(bis); i++ {\n\t\tfor j := 0; j < len(BranchChain); {\n\t\t\tif BranchChain[j].BlockHash == bis[i].BlockHash {\n\t\t\t\tBranchChain = append(BranchChain[:j], BranchChain[j+1:]...)\n\t\t\t} else {\n\t\t\t\tj++\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ should be after removeBlockIndexFromBranchChain()\nfunc addBlockIndexToBranchChain(bis []*BlockIndex) {\n\tBranchChain = append(BranchChain, bis...)\n}\n\nfunc FindMostWorkChain() *BlockIndex {\n\t\/\/ todo complete\n\treturn nil\n}<|endoftext|>"} {"text":"<commit_before>package msgpack\n\nimport (\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestPack(t *testing.T) {\n\tt.Parallel()\n\n\tpackTests := map[string]struct {\n\t\t\/\/ Expected value\n\t\tv interface{}\n\t\t\/\/ Hex encodings of typ, v\n\t\ths string\n\t}{\n\t\t\"Int64\/0x0\": {\n\t\t\tv: int64(0x0),\n\t\t\ths: \"00\",\n\t\t},\n\t\t\"Int64\/0x1\": {\n\t\t\tv: int64(0x1),\n\t\t\ths: \"01\",\n\t\t},\n\t\t\"Int64\/0x7f\": {\n\t\t\tv: int64(0x7f),\n\t\t\ths: \"7f\",\n\t\t},\n\t\t\"Int64\/0x80\": {\n\t\t\tv: int64(0x80),\n\t\t\ths: \"cc80\",\n\t\t},\n\t\t\"Int64\/0x7fff\": {\n\t\t\tv: int64(0x7fff),\n\t\t\ths: \"cd7fff\",\n\t\t},\n\t\t\"Int64\/0x8000\": {\n\t\t\tv: int64(0x8000),\n\t\t\ths: \"cd8000\",\n\t\t},\n\t\t\"Int64\/0x7fffffff\": {\n\t\t\tv: int64(0x7fffffff),\n\t\t\ths: \"ce7fffffff\",\n\t\t},\n\t\t\"Int64\/0x80000000\": {\n\t\t\tv: int64(0x80000000),\n\t\t\ths: \"ce80000000\",\n\t\t},\n\t\t\"Int64\/0x7fffffffffffffff\": {\n\t\t\tv: int64(0x7fffffffffffffff),\n\t\t\ths: \"cf7fffffffffffffff\",\n\t\t},\n\t\t\"Int64\/-0x1\": {\n\t\t\tv: int64(-0x1),\n\t\t\ths: \"ff\",\n\t\t},\n\t\t\"Int64\/-0x20\": {\n\t\t\tv: int64(-0x20),\n\t\t\ths: \"e0\",\n\t\t},\n\t\t\"Int64\/-0x21\": {\n\t\t\tv: int64(-0x21),\n\t\t\ths: \"d0df\",\n\t\t},\n\t\t\"Int64\/-0x80\": {\n\t\t\tv: int64(-0x80),\n\t\t\ths: \"d080\",\n\t\t},\n\t\t\"Int64\/-0x81\": {\n\t\t\tv: int64(-0x81),\n\t\t\ths: \"d1ff7f\",\n\t\t},\n\t\t\"Int64\/-0x8000\": {\n\t\t\tv: int64(-0x8000),\n\t\t\ths: \"d18000\",\n\t\t},\n\t\t\"Int64\/-0x8001\": {\n\t\t\tv: int64(-0x8001),\n\t\t\ths: \"d2ffff7fff\",\n\t\t},\n\t\t\"Int64\/-0x80000000\": {\n\t\t\tv: int64(-0x80000000),\n\t\t\ths: \"d280000000\",\n\t\t},\n\t\t\"Int64\/-0x80000001\": {\n\t\t\tv: int64(-0x80000001),\n\t\t\ths: \"d3ffffffff7fffffff\",\n\t\t},\n\t\t\"Int64\/-0x8000000000000000\": {\n\t\t\tv: int64(-0x8000000000000000),\n\t\t\ths: \"d38000000000000000\",\n\t\t},\n\t\t\"Uint64\/0x0\": {\n\t\t\tv: uint64(0x0),\n\t\t\ths: \"00\",\n\t\t},\n\t\t\"Uint64\/0x1\": {\n\t\t\tv: uint64(0x1),\n\t\t\ths: \"01\",\n\t\t},\n\t\t\"Uint64\/0x7f\": {\n\t\t\tv: uint64(0x7f),\n\t\t\ths: \"7f\",\n\t\t},\n\t\t\"Uint64\/0xff\": {\n\t\t\tv: uint64(0xff),\n\t\t\ths: \"ccff\",\n\t\t},\n\t\t\"Uint64\/0x100\": {\n\t\t\tv: uint64(0x100),\n\t\t\ths: \"cd0100\",\n\t\t},\n\t\t\"Uint64\/0xffff\": {\n\t\t\tv: uint64(0xffff),\n\t\t\ths: \"cdffff\",\n\t\t},\n\t\t\"Uint64\/0x10000\": {\n\t\t\tv: uint64(0x10000),\n\t\t\ths: \"ce00010000\",\n\t\t},\n\t\t\"Uint64\/0xffffffff\": {\n\t\t\tv: uint64(0xffffffff),\n\t\t\ths: \"ceffffffff\",\n\t\t},\n\t\t\"Uint64\/0x100000000\": {\n\t\t\tv: uint64(0x100000000),\n\t\t\ths: \"cf0000000100000000\",\n\t\t},\n\t\t\"Uint64\/0xffffffffffffffff\": {\n\t\t\tv: uint64(0xffffffffffffffff),\n\t\t\ths: \"cfffffffffffffffff\",\n\t\t},\n\t\t\"Nil\": {\n\t\t\tv: nil,\n\t\t\ths: \"c0\",\n\t\t},\n\t\t\"True\": {\n\t\t\tv: true,\n\t\t\ths: \"c3\",\n\t\t},\n\t\t\"False\": {\n\t\t\tv: false,\n\t\t\ths: \"c2\",\n\t\t},\n\t\t\"Float64\/1.23456\": {\n\t\t\tv: float64(1.23456),\n\t\t\ths: \"cb3ff3c0c1fc8f3238\",\n\t\t},\n\t\t\"MapLen\/0x0\": {\n\t\t\tv: mapLen(0x0),\n\t\t\ths: \"80\",\n\t\t},\n\t\t\"MapLen\/0x1\": {\n\t\t\tv: mapLen(0x1),\n\t\t\ths: \"81\",\n\t\t},\n\t\t\"MapLen\/0xf\": {\n\t\t\tv: mapLen(0xf),\n\t\t\ths: \"8f\",\n\t\t},\n\t\t\"MapLen\/0x10\": {\n\t\t\tv: mapLen(0x10),\n\t\t\ths: \"de0010\",\n\t\t},\n\t\t\"MapLen\/0xffff\": {\n\t\t\tv: mapLen(0xffff),\n\t\t\ths: \"deffff\",\n\t\t},\n\t\t\"MapLen\/0x10000\": {\n\t\t\tv: mapLen(0x10000),\n\t\t\ths: \"df00010000\",\n\t\t},\n\t\t\"MapLen\/0xffffffff\": {\n\t\t\tv: mapLen(0xffffffff),\n\t\t\ths: \"dfffffffff\",\n\t\t},\n\t\t\"ArrayLen\/0x0\": {\n\t\t\tv: arrayLen(0x0),\n\t\t\ths: \"90\",\n\t\t},\n\t\t\"ArrayLen\/0x1\": {\n\t\t\tv: arrayLen(0x1),\n\t\t\ths: \"91\",\n\t\t},\n\t\t\"ArrayLen\/0xf\": {\n\t\t\tv: arrayLen(0xf),\n\t\t\ths: \"9f\",\n\t\t},\n\t\t\"ArrayLen\/0x10\": {\n\t\t\tv: arrayLen(0x10),\n\t\t\ths: \"dc0010\",\n\t\t},\n\t\t\"ArrayLen\/0xffff\": {\n\t\t\tv: arrayLen(0xffff),\n\t\t\ths: \"dcffff\",\n\t\t},\n\t\t\"ArrayLen\/0x10000\": {\n\t\t\tv: arrayLen(0x10000),\n\t\t\ths: \"dd00010000\",\n\t\t},\n\t\t\"ArrayLen\/0xffffffff\": {\n\t\t\tv: arrayLen(0xffffffff),\n\t\t\ths: \"ddffffffff\",\n\t\t},\n\t\t\"String\/Empty\": {\n\t\t\tv: \"\",\n\t\t\ths: \"a0\",\n\t\t},\n\t\t\"String\/1\": {\n\t\t\tv: \"1\",\n\t\t\ths: \"a131\",\n\t\t},\n\t\t\"String\/1234567890123456789012345678901\": {\n\t\t\tv: \"1234567890123456789012345678901\",\n\t\t\ths: \"bf31323334353637383930313233343536373839303132333435363738393031\",\n\t\t},\n\t\t\"String\/12345678901234567890123456789012\": {\n\t\t\tv: \"12345678901234567890123456789012\",\n\t\t\ths: \"d9203132333435363738393031323334353637383930313233343536373839303132\",\n\t\t},\n\t\t\"Bytes\/Empty\": {\n\t\t\tv: []byte(\"\"),\n\t\t\ths: \"c400\",\n\t\t},\n\t\t\"Bytes\/1\": {\n\t\t\tv: []byte(\"1\"),\n\t\t\ths: \"c40131\",\n\t\t},\n\t\t\"Extension\/1\/Empty\": {\n\t\t\tv: extension{1, \"\"},\n\t\t\ths: \"c70001\",\n\t\t},\n\t\t\"Extension\/2\/1\": {\n\t\t\tv: extension{2, \"1\"},\n\t\t\ths: \"d40231\",\n\t\t},\n\t\t\"Extension\/3\/12\": {\n\t\t\tv: extension{3, \"12\"},\n\t\t\ths: \"d5033132\",\n\t\t},\n\t\t\"Extension\/4\/1234\": {\n\t\t\tv: extension{4, \"1234\"},\n\t\t\ths: \"d60431323334\",\n\t\t},\n\t\t\"Extension\/5\/12345678\": {\n\t\t\tv: extension{5, \"12345678\"},\n\t\t\ths: \"d7053132333435363738\",\n\t\t},\n\t\t\"Extension\/6\/1234567890123456\": {\n\t\t\tv: extension{6, \"1234567890123456\"},\n\t\t\ths: \"d80631323334353637383930313233343536\",\n\t\t},\n\t\t\"Extension\/7\/12345678901234567\": {\n\t\t\tv: extension{7, \"12345678901234567\"},\n\t\t\ths: \"c711073132333435363738393031323334353637\",\n\t\t},\n\t}\n\tfor name, tt := range packTests {\n\t\ttt := tt\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tvar arg string\n\t\t\tswitch reflect.ValueOf(tt.v).Kind() {\n\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\t\targ = fmt.Sprintf(\"%T %x\", tt.v, tt.v)\n\t\t\tdefault:\n\t\t\t\targ = fmt.Sprintf(\"%T %v\", tt.v, tt.v)\n\t\t\t}\n\n\t\t\tp, err := pack(tt.v)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"pack %s returned error %v\", arg, err)\n\t\t\t}\n\t\t\th := hex.EncodeToString(p)\n\t\t\tif h != tt.hs {\n\t\t\t\tt.Fatalf(\"pack %s returned %s, want %s\", arg, h, tt.hs)\n\t\t\t}\n\t\t})\n\t}\n}\n<commit_msg>msgpack: format and reorder packTests<commit_after>package msgpack\n\nimport (\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestPack(t *testing.T) {\n\tt.Parallel()\n\n\tpackTests := map[string]struct {\n\t\t\/\/ Expected value\n\t\tv interface{}\n\t\t\/\/ Hex encodings of typ, v\n\t\ths string\n\t}{\n\t\t\"Bool\/True\": {\n\t\t\tv: true,\n\t\t\ths: \"c3\",\n\t\t},\n\t\t\"Bool\/False\": {\n\t\t\tv: false,\n\t\t\ths: \"c2\",\n\t\t},\n\t\t\"Int64\/0x0\": {\n\t\t\tv: int64(0x0),\n\t\t\ths: \"00\",\n\t\t},\n\t\t\"Int64\/0x1\": {\n\t\t\tv: int64(0x1),\n\t\t\ths: \"01\",\n\t\t},\n\t\t\"Int64\/0x7f\": {\n\t\t\tv: int64(0x7f),\n\t\t\ths: \"7f\",\n\t\t},\n\t\t\"Int64\/0x80\": {\n\t\t\tv: int64(0x80),\n\t\t\ths: \"cc80\",\n\t\t},\n\t\t\"Int64\/0x7fff\": {\n\t\t\tv: int64(0x7fff),\n\t\t\ths: \"cd7fff\",\n\t\t},\n\t\t\"Int64\/0x8000\": {\n\t\t\tv: int64(0x8000),\n\t\t\ths: \"cd8000\",\n\t\t},\n\t\t\"Int64\/0x7fffffff\": {\n\t\t\tv: int64(0x7fffffff),\n\t\t\ths: \"ce7fffffff\",\n\t\t},\n\t\t\"Int64\/0x80000000\": {\n\t\t\tv: int64(0x80000000),\n\t\t\ths: \"ce80000000\",\n\t\t},\n\t\t\"Int64\/0x7fffffffffffffff\": {\n\t\t\tv: int64(0x7fffffffffffffff),\n\t\t\ths: \"cf7fffffffffffffff\",\n\t\t},\n\t\t\"Int64\/-0x1\": {\n\t\t\tv: int64(-0x1),\n\t\t\ths: \"ff\",\n\t\t},\n\t\t\"Int64\/-0x20\": {\n\t\t\tv: int64(-0x20),\n\t\t\ths: \"e0\",\n\t\t},\n\t\t\"Int64\/-0x21\": {\n\t\t\tv: int64(-0x21),\n\t\t\ths: \"d0df\",\n\t\t},\n\t\t\"Int64\/-0x80\": {\n\t\t\tv: int64(-0x80),\n\t\t\ths: \"d080\",\n\t\t},\n\t\t\"Int64\/-0x81\": {\n\t\t\tv: int64(-0x81),\n\t\t\ths: \"d1ff7f\",\n\t\t},\n\t\t\"Int64\/-0x8000\": {\n\t\t\tv: int64(-0x8000),\n\t\t\ths: \"d18000\",\n\t\t},\n\t\t\"Int64\/-0x8001\": {\n\t\t\tv: int64(-0x8001),\n\t\t\ths: \"d2ffff7fff\",\n\t\t},\n\t\t\"Int64\/-0x80000000\": {\n\t\t\tv: int64(-0x80000000),\n\t\t\ths: \"d280000000\",\n\t\t},\n\t\t\"Int64\/-0x80000001\": {\n\t\t\tv: int64(-0x80000001),\n\t\t\ths: \"d3ffffffff7fffffff\",\n\t\t},\n\t\t\"Int64\/-0x8000000000000000\": {\n\t\t\tv: int64(-0x8000000000000000),\n\t\t\ths: \"d38000000000000000\",\n\t\t},\n\t\t\"Uint64\/0x0\": {\n\t\t\tv: uint64(0x0),\n\t\t\ths: \"00\",\n\t\t},\n\t\t\"Uint64\/0x1\": {\n\t\t\tv: uint64(0x1),\n\t\t\ths: \"01\",\n\t\t},\n\t\t\"Uint64\/0x7f\": {\n\t\t\tv: uint64(0x7f),\n\t\t\ths: \"7f\",\n\t\t},\n\t\t\"Uint64\/0xff\": {\n\t\t\tv: uint64(0xff),\n\t\t\ths: \"ccff\",\n\t\t},\n\t\t\"Uint64\/0x100\": {\n\t\t\tv: uint64(0x100),\n\t\t\ths: \"cd0100\",\n\t\t},\n\t\t\"Uint64\/0xffff\": {\n\t\t\tv: uint64(0xffff),\n\t\t\ths: \"cdffff\",\n\t\t},\n\t\t\"Uint64\/0x10000\": {\n\t\t\tv: uint64(0x10000),\n\t\t\ths: \"ce00010000\",\n\t\t},\n\t\t\"Uint64\/0xffffffff\": {\n\t\t\tv: uint64(0xffffffff),\n\t\t\ths: \"ceffffffff\",\n\t\t},\n\t\t\"Uint64\/0x100000000\": {\n\t\t\tv: uint64(0x100000000),\n\t\t\ths: \"cf0000000100000000\",\n\t\t},\n\t\t\"Uint64\/0xffffffffffffffff\": {\n\t\t\tv: uint64(0xffffffffffffffff),\n\t\t\ths: \"cfffffffffffffffff\",\n\t\t},\n\t\t\"Float64\/1.23456\": {\n\t\t\tv: float64(1.23456),\n\t\t\ths: \"cb3ff3c0c1fc8f3238\",\n\t\t},\n\t\t\"String\/Empty\": {\n\t\t\tv: string(\"\"),\n\t\t\ths: \"a0\",\n\t\t},\n\t\t\"String\/1\": {\n\t\t\tv: string(\"1\"),\n\t\t\ths: \"a131\",\n\t\t},\n\t\t\"String\/1234567890123456789012345678901\": {\n\t\t\tv: string(\"1234567890123456789012345678901\"),\n\t\t\ths: \"bf31323334353637383930313233343536373839303132333435363738393031\",\n\t\t},\n\t\t\"String\/12345678901234567890123456789012\": {\n\t\t\tv: string(\"12345678901234567890123456789012\"),\n\t\t\ths: \"d9203132333435363738393031323334353637383930313233343536373839303132\",\n\t\t},\n\t\t\"Binary\/Empty\": {\n\t\t\tv: []byte(\"\"),\n\t\t\ths: \"c400\",\n\t\t},\n\t\t\"Binary\/1\": {\n\t\t\tv: []byte(\"1\"),\n\t\t\ths: \"c40131\",\n\t\t},\n\t\t\"MapLen\/0x0\": {\n\t\t\tv: mapLen(0x0),\n\t\t\ths: \"80\",\n\t\t},\n\t\t\"MapLen\/0x1\": {\n\t\t\tv: mapLen(0x1),\n\t\t\ths: \"81\",\n\t\t},\n\t\t\"MapLen\/0xf\": {\n\t\t\tv: mapLen(0xf),\n\t\t\ths: \"8f\",\n\t\t},\n\t\t\"MapLen\/0x10\": {\n\t\t\tv: mapLen(0x10),\n\t\t\ths: \"de0010\",\n\t\t},\n\t\t\"MapLen\/0xffff\": {\n\t\t\tv: mapLen(0xffff),\n\t\t\ths: \"deffff\",\n\t\t},\n\t\t\"MapLen\/0x10000\": {\n\t\t\tv: mapLen(0x10000),\n\t\t\ths: \"df00010000\",\n\t\t},\n\t\t\"MapLen\/0xffffffff\": {\n\t\t\tv: mapLen(0xffffffff),\n\t\t\ths: \"dfffffffff\",\n\t\t},\n\t\t\"ArrayLen\/0x0\": {\n\t\t\tv: arrayLen(0x0),\n\t\t\ths: \"90\",\n\t\t},\n\t\t\"ArrayLen\/0x1\": {\n\t\t\tv: arrayLen(0x1),\n\t\t\ths: \"91\",\n\t\t},\n\t\t\"ArrayLen\/0xf\": {\n\t\t\tv: arrayLen(0xf),\n\t\t\ths: \"9f\",\n\t\t},\n\t\t\"ArrayLen\/0x10\": {\n\t\t\tv: arrayLen(0x10),\n\t\t\ths: \"dc0010\",\n\t\t},\n\t\t\"ArrayLen\/0xffff\": {\n\t\t\tv: arrayLen(0xffff),\n\t\t\ths: \"dcffff\",\n\t\t},\n\t\t\"ArrayLen\/0x10000\": {\n\t\t\tv: arrayLen(0x10000),\n\t\t\ths: \"dd00010000\",\n\t\t},\n\t\t\"ArrayLen\/0xffffffff\": {\n\t\t\tv: arrayLen(0xffffffff),\n\t\t\ths: \"ddffffffff\",\n\t\t},\n\t\t\"Extension\/1\/Empty\": {\n\t\t\tv: extension{1, \"\"},\n\t\t\ths: \"c70001\",\n\t\t},\n\t\t\"Extension\/2\/1\": {\n\t\t\tv: extension{2, \"1\"},\n\t\t\ths: \"d40231\",\n\t\t},\n\t\t\"Extension\/3\/12\": {\n\t\t\tv: extension{3, \"12\"},\n\t\t\ths: \"d5033132\",\n\t\t},\n\t\t\"Extension\/4\/1234\": {\n\t\t\tv: extension{4, \"1234\"},\n\t\t\ths: \"d60431323334\",\n\t\t},\n\t\t\"Extension\/5\/12345678\": {\n\t\t\tv: extension{5, \"12345678\"},\n\t\t\ths: \"d7053132333435363738\",\n\t\t},\n\t\t\"Extension\/6\/1234567890123456\": {\n\t\t\tv: extension{6, \"1234567890123456\"},\n\t\t\ths: \"d80631323334353637383930313233343536\",\n\t\t},\n\t\t\"Extension\/7\/12345678901234567\": {\n\t\t\tv: extension{7, \"12345678901234567\"},\n\t\t\ths: \"c711073132333435363738393031323334353637\",\n\t\t},\n\t\t\"Nil\": {\n\t\t\tv: nil,\n\t\t\ths: \"c0\",\n\t\t},\n\t}\n\tfor name, tt := range packTests {\n\t\ttt := tt\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tvar arg string\n\t\t\tswitch reflect.ValueOf(tt.v).Kind() {\n\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\t\targ = fmt.Sprintf(\"%T %x\", tt.v, tt.v)\n\t\t\tdefault:\n\t\t\t\targ = fmt.Sprintf(\"%T %v\", tt.v, tt.v)\n\t\t\t}\n\n\t\t\tp, err := pack(tt.v)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"pack %s returned error %v\", arg, err)\n\t\t\t}\n\n\t\t\th := hex.EncodeToString(p)\n\t\t\tif h != tt.hs {\n\t\t\t\tt.Fatalf(\"pack %s returned %s, want %s\", arg, h, tt.hs)\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package textrazor\n\nconst (\n\tEntitiesExtractor = \"entities\"\n\tWordsExtractor = \"words\"\n\tRelationsExtractor = \"relations\"\n\n\t\/\/ for now just these three\n\tEntitiesWordsRelations = \"entities,words,relations\"\n)\n\n\/\/ JSON response structs for the \"entities,words\" extractors;\n\/\/ with the help from https:\/\/mholt.github.io\/json-to-go\/.\n\ntype Word struct {\n\tPosition int `json:\"position\"`\n\tStartingPos int `json:\"startingPos\"`\n\tEndingPos int `json:\"endingPos\"`\n\tStem string `json:\"stem\"`\n\tLemma string `json:\"lemma\"`\n\tToken string `json:\"token\"`\n\tPartOfSpeech string `json:\"partOfSpeech\"`\n\tParentPosition int `json:\"parentPosition,omitempty\"`\n\tRelationToParent string `json:\"relationToParent,omitempty\"`\n}\n\ntype Sentense struct {\n\tPosition int `json:\"position\"`\n\tWords []Word `json:\"words\"`\n}\n\ntype Entity struct {\n\tID int `json:\"id\"`\n\tType []string `json:\"type,omitempty\"`\n\tMatchingTokens []int `json:\"matchingTokens\"`\n\tEntityID string `json:\"entityId\"`\n\tFreebaseTypes []string `json:\"freebaseTypes,omitempty\"`\n\tConfidenceScore float64 `json:\"confidenceScore\"`\n\tWikiLink string `json:\"wikiLink\"`\n\tMatchedText string `json:\"matchedText\"`\n\tFreebaseID string `json:\"freebaseId,omitempty\"`\n\tRelevanceScore int `json:\"relevanceScore\"`\n\tEntityEnglishID string `json:\"entityEnglishId\"`\n\tStartingPos int `json:\"startingPos\"`\n\tEndingPos int `json:\"endingPos\"`\n\tWikidataID string `json:\"wikidataId,omitempty\"`\n}\n\ntype Param struct {\n\tRelation string `json:\"relation\"`\n\tWordPositions []int `json:\"wordPositions\"`\n}\n\ntype Relation struct {\n\tID int `json:\"id\"`\n\tWordPositions []int `json:\"wordPositions\"`\n\tParams []Param `json:\"params\"`\n}\n\ntype Property struct {\n\tID int `json:\"id\"`\n\tWordPositions []int `json:\"wordPositions\"`\n\tPropertyPositions []int `json:\"propertyPositions\"`\n}\n\ntype Response struct {\n\tSentences []Sentense `json:\"sentences\"`\n\tLanguage string `json:\"language\"`\n\tLanguageIsReliable bool `json:\"languageIsReliable\"`\n\tEntities []Entity `json:\"entities\"`\n\tRelations []Relation `json:\"relations\"`\n\tProperties []Property `json:\"properties\"`\n}\n\ntype Result struct {\n\tResp Response `json:\"response\"`\n\tTime float64 `json:\"time\"`\n\tOk bool `json:\"ok\"`\n}\n\n\/\/ OneLine gives a pretty one-line output of the sentence\n\/\/ formatted as Stanford Parser tagging\n\/\/ (http:\/\/nlp.stanford.edu:8080\/parser\/index.jsp).\nfunc (s *Sentense) OneLine() string {\n\tstr := \"$\"\n\tfor _, w := range s.Words {\n\t\tstr += (\" \" + w.Token + \"\/\" + w.PartOfSpeech)\n\t}\n\treturn str\n}\n\n\/\/ Body is the JSON request payload.\ntype Body struct {\n\tExtractors string `json:\"extractors\"`\n\tText string `json:\"text\"`\n}\n<commit_msg>fix (nlp): sentence typo<commit_after>package textrazor\n\nconst (\n\tEntitiesExtractor = \"entities\"\n\tWordsExtractor = \"words\"\n\tRelationsExtractor = \"relations\"\n\n\t\/\/ for now just these three\n\tEntitiesWordsRelations = \"entities,words,relations\"\n)\n\n\/\/ JSON response structs for the \"entities,words\" extractors;\n\/\/ with the help from https:\/\/mholt.github.io\/json-to-go\/.\n\ntype Word struct {\n\tPosition int `json:\"position\"`\n\tStartingPos int `json:\"startingPos\"`\n\tEndingPos int `json:\"endingPos\"`\n\tStem string `json:\"stem\"`\n\tLemma string `json:\"lemma\"`\n\tToken string `json:\"token\"`\n\tPartOfSpeech string `json:\"partOfSpeech\"`\n\tParentPosition int `json:\"parentPosition,omitempty\"`\n\tRelationToParent string `json:\"relationToParent,omitempty\"`\n}\n\ntype Sentence struct {\n\tPosition int `json:\"position\"`\n\tWords []Word `json:\"words\"`\n}\n\ntype Entity struct {\n\tID int `json:\"id\"`\n\tType []string `json:\"type,omitempty\"`\n\tMatchingTokens []int `json:\"matchingTokens\"`\n\tEntityID string `json:\"entityId\"`\n\tFreebaseTypes []string `json:\"freebaseTypes,omitempty\"`\n\tConfidenceScore float64 `json:\"confidenceScore\"`\n\tWikiLink string `json:\"wikiLink\"`\n\tMatchedText string `json:\"matchedText\"`\n\tFreebaseID string `json:\"freebaseId,omitempty\"`\n\tRelevanceScore int `json:\"relevanceScore\"`\n\tEntityEnglishID string `json:\"entityEnglishId\"`\n\tStartingPos int `json:\"startingPos\"`\n\tEndingPos int `json:\"endingPos\"`\n\tWikidataID string `json:\"wikidataId,omitempty\"`\n}\n\ntype Param struct {\n\tRelation string `json:\"relation\"`\n\tWordPositions []int `json:\"wordPositions\"`\n}\n\ntype Relation struct {\n\tID int `json:\"id\"`\n\tWordPositions []int `json:\"wordPositions\"`\n\tParams []Param `json:\"params\"`\n}\n\ntype Property struct {\n\tID int `json:\"id\"`\n\tWordPositions []int `json:\"wordPositions\"`\n\tPropertyPositions []int `json:\"propertyPositions\"`\n}\n\ntype Response struct {\n\tSentences []Sentence `json:\"sentences\"`\n\tLanguage string `json:\"language\"`\n\tLanguageIsReliable bool `json:\"languageIsReliable\"`\n\tEntities []Entity `json:\"entities\"`\n\tRelations []Relation `json:\"relations\"`\n\tProperties []Property `json:\"properties\"`\n}\n\ntype Result struct {\n\tResp Response `json:\"response\"`\n\tTime float64 `json:\"time\"`\n\tOk bool `json:\"ok\"`\n}\n\n\/\/ OneLine gives a pretty one-line output of the sentence\n\/\/ formatted as Stanford Parser tagging\n\/\/ (http:\/\/nlp.stanford.edu:8080\/parser\/index.jsp).\nfunc (s *Sentence) OneLine() string {\n\tstr := \"$\"\n\tfor _, w := range s.Words {\n\t\tstr += (\" \" + w.Token + \"\/\" + w.PartOfSpeech)\n\t}\n\treturn str\n}\n\n\/\/ Body is the JSON request payload.\ntype Body struct {\n\tExtractors string `json:\"extractors\"`\n\tText string `json:\"text\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package abstract_sql\n\nimport (\n\t\"context\"\n\t\"database\/sql\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/filer\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\"\n)\n\nfunc (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {\n\n\tdirStr, dirHash, name := genDirAndName(key)\n\n\tres, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, dirHash, name, dirStr, value)\n\tif err != nil {\n\t\tif !strings.Contains(strings.ToLower(err.Error()), \"duplicate\") {\n\t\t\treturn fmt.Errorf(\"kv insert: %s\", err)\n\t\t}\n\t}\n\n\t\/\/ now the insert failed possibly due to duplication constraints\n\tglog.V(1).Infof(\"kv insert falls back to update: %s\", err)\n\n\tres, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, value, dirHash, name, dirStr)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"kv upsert: %s\", err)\n\t}\n\n\t_, err = res.RowsAffected()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"kv upsert no rows affected: %s\", err)\n\t}\n\treturn nil\n\n}\n\nfunc (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {\n\n\tdirStr, dirHash, name := genDirAndName(key)\n\trow := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, dirHash, name, dirStr)\n\n\terr = row.Scan(&value)\n\n\tif err == sql.ErrNoRows {\n\t\treturn nil, filer.ErrKvNotFound\n\t}\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"kv get: %v\", err)\n\t}\n\n\treturn\n}\n\nfunc (store *AbstractSqlStore) KvDelete(ctx context.Context, key []byte) (err error) {\n\n\tdirStr, dirHash, name := genDirAndName(key)\n\n\tres, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, dirHash, name, dirStr)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"kv delete: %s\", err)\n\t}\n\n\t_, err = res.RowsAffected()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"kv delete no rows affected: %s\", err)\n\t}\n\n\treturn nil\n\n}\n\nfunc genDirAndName(key []byte) (dirStr string, dirHash int64, name string) {\n\tfor len(key) < 8 {\n\t\tkey = append(key, 0)\n\t}\n\n\tdirHash = int64(util.BytesToUint64(key[:8]))\n\tdirStr = base64.StdEncoding.EncodeToString(key[:8])\n\tname = base64.StdEncoding.EncodeToString(key[8:])\n\n\treturn\n}\n<commit_msg>sql put kv: avoid unnecessary update<commit_after>package abstract_sql\n\nimport (\n\t\"context\"\n\t\"database\/sql\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/filer\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\"\n\t\"strings\"\n)\n\nfunc (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {\n\n\tdirStr, dirHash, name := genDirAndName(key)\n\n\tres, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, dirHash, name, dirStr, value)\n\tif err == nil {\n\t\treturn\n\t}\n\n\tif !strings.Contains(strings.ToLower(err.Error()), \"duplicate\") {\n\t\treturn fmt.Errorf(\"kv insert: %s\", err)\n\t}\n\n\t\/\/ now the insert failed possibly due to duplication constraints\n\tglog.V(1).Infof(\"kv insert falls back to update: %s\", err)\n\n\tres, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, value, dirHash, name, dirStr)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"kv upsert: %s\", err)\n\t}\n\n\t_, err = res.RowsAffected()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"kv upsert no rows affected: %s\", err)\n\t}\n\treturn nil\n\n}\n\nfunc (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {\n\n\tdirStr, dirHash, name := genDirAndName(key)\n\trow := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, dirHash, name, dirStr)\n\n\terr = row.Scan(&value)\n\n\tif err == sql.ErrNoRows {\n\t\treturn nil, filer.ErrKvNotFound\n\t}\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"kv get: %v\", err)\n\t}\n\n\treturn\n}\n\nfunc (store *AbstractSqlStore) KvDelete(ctx context.Context, key []byte) (err error) {\n\n\tdirStr, dirHash, name := genDirAndName(key)\n\n\tres, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, dirHash, name, dirStr)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"kv delete: %s\", err)\n\t}\n\n\t_, err = res.RowsAffected()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"kv delete no rows affected: %s\", err)\n\t}\n\n\treturn nil\n\n}\n\nfunc genDirAndName(key []byte) (dirStr string, dirHash int64, name string) {\n\tfor len(key) < 8 {\n\t\tkey = append(key, 0)\n\t}\n\n\tdirHash = int64(util.BytesToUint64(key[:8]))\n\tdirStr = base64.StdEncoding.EncodeToString(key[:8])\n\tname = base64.StdEncoding.EncodeToString(key[8:])\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package controller\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\tct \"github.com\/flynn\/flynn-controller\/types\"\n\t\"github.com\/flynn\/flynn-controller\/utils\"\n\t\"github.com\/flynn\/go-discoverd\"\n\t\"github.com\/flynn\/go-discoverd\/dialer\"\n\t\"github.com\/flynn\/go-flynn\/pinned\"\n\t\"github.com\/flynn\/rpcplus\"\n\t\"github.com\/flynn\/strowger\/types\"\n)\n\nfunc NewClient(uri, key string) (*Client, error) {\n\tif uri == \"\" {\n\t\turi = \"discoverd+http:\/\/flynn-controller\"\n\t}\n\tu, err := url.Parse(uri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := &Client{\n\t\turl: uri,\n\t\taddr: u.Host,\n\t\thttp: http.DefaultClient,\n\t\tkey: key,\n\t}\n\tif u.Scheme == \"discoverd+http\" {\n\t\tif err := discoverd.Connect(\"\"); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdialer := dialer.New(discoverd.DefaultClient, nil)\n\t\tc.dial = dialer.Dial\n\t\tc.dialClose = dialer\n\t\tc.http = &http.Client{Transport: &http.Transport{Dial: c.dial}}\n\t\tu.Scheme = \"http\"\n\t\tc.url = u.String()\n\t}\n\treturn c, nil\n}\n\nfunc NewClientWithPin(uri, key string, pin []byte) (*Client, error) {\n\tu, err := url.Parse(uri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := &Client{\n\t\tdial: (&pinned.Config{Pin: pin}).Dial,\n\t\tkey: key,\n\t}\n\tif _, port, _ := net.SplitHostPort(u.Host); port == \"\" {\n\t\tu.Host += \":443\"\n\t}\n\tc.addr = u.Host\n\tu.Scheme = \"http\"\n\tc.url = u.String()\n\tc.http = &http.Client{Transport: &http.Transport{Dial: c.dial}}\n\treturn c, nil\n}\n\ntype Client struct {\n\turl string\n\tkey string\n\taddr string\n\thttp *http.Client\n\n\tdial rpcplus.DialFunc\n\tdialClose io.Closer\n}\n\nfunc (c *Client) Close() error {\n\tif c.dialClose != nil {\n\t\tc.dialClose.Close()\n\t}\n\treturn nil\n}\n\nvar ErrNotFound = errors.New(\"controller: not found\")\n\nfunc toJSON(v interface{}) (io.Reader, error) {\n\tdata, err := json.Marshal(v)\n\treturn bytes.NewBuffer(data), err\n}\n\nfunc (c *Client) rawReq(method, path string, contentType string, in, out interface{}) (*http.Response, error) {\n\tvar payload io.Reader\n\tswitch v := in.(type) {\n\tcase io.Reader:\n\t\tpayload = v\n\tcase nil:\n\tdefault:\n\t\tvar err error\n\t\tpayload, err = toJSON(in)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(method, c.url+path, payload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif contentType == \"\" {\n\t\tcontentType = \"application\/json\"\n\t}\n\treq.Header.Set(\"Content-Type\", contentType)\n\treq.SetBasicAuth(\"\", c.key)\n\tres, err := c.http.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode == 404 {\n\t\tres.Body.Close()\n\t\treturn res, ErrNotFound\n\t}\n\tif res.StatusCode != 200 {\n\t\tres.Body.Close()\n\t\treturn res, &url.Error{\n\t\t\tOp: req.Method,\n\t\t\tURL: req.URL.String(),\n\t\t\tErr: fmt.Errorf(\"controller: unexpected status %d\", res.StatusCode),\n\t\t}\n\t}\n\tif out != nil {\n\t\tdefer res.Body.Close()\n\t\treturn res, json.NewDecoder(res.Body).Decode(out)\n\t}\n\treturn res, nil\n}\n\nfunc (c *Client) send(method, path string, in, out interface{}) error {\n\t_, err := c.rawReq(method, path, \"\", in, out)\n\treturn err\n}\n\nfunc (c *Client) put(path string, in, out interface{}) error {\n\treturn c.send(\"PUT\", path, in, out)\n}\n\nfunc (c *Client) post(path string, in, out interface{}) error {\n\treturn c.send(\"POST\", path, in, out)\n}\n\nfunc (c *Client) get(path string, out interface{}) error {\n\t_, err := c.rawReq(\"GET\", path, \"\", nil, out)\n\treturn err\n}\n\nfunc (c *Client) delete(path string) error {\n\tres, err := c.rawReq(\"DELETE\", path, \"\", nil, nil)\n\tif err == nil {\n\t\tres.Body.Close()\n\t}\n\treturn err\n}\n\nfunc (c *Client) StreamFormations(since *time.Time) (<-chan *ct.ExpandedFormation, *error) {\n\tif since == nil {\n\t\ts := time.Unix(0, 0)\n\t\tsince = &s\n\t}\n\tdial := c.dial\n\tif dial == nil {\n\t\tdial = net.Dial\n\t}\n\tch := make(chan *ct.ExpandedFormation)\n\tconn, err := dial(\"tcp\", c.addr)\n\tif err != nil {\n\t\tclose(ch)\n\t\treturn ch, &err\n\t}\n\theader := make(http.Header)\n\theader.Set(\"Authorization\", \"Basic \"+base64.StdEncoding.EncodeToString([]byte(\":\"+c.key)))\n\tclient, err := rpcplus.NewHTTPClient(conn, rpcplus.DefaultRPCPath, header)\n\tif err != nil {\n\t\tclose(ch)\n\t\treturn ch, &err\n\t}\n\treturn ch, &client.StreamGo(\"Controller.StreamFormations\", since, ch).Error\n}\n\nfunc (c *Client) CreateArtifact(artifact *ct.Artifact) error {\n\treturn c.post(\"\/artifacts\", artifact, artifact)\n}\n\nfunc (c *Client) CreateRelease(release *ct.Release) error {\n\treturn c.post(\"\/releases\", release, release)\n}\n\nfunc (c *Client) CreateApp(app *ct.App) error {\n\treturn c.post(\"\/apps\", app, app)\n}\n\nfunc (c *Client) CreateProvider(provider *ct.Provider) error {\n\treturn c.post(\"\/providers\", provider, provider)\n}\n\nfunc (c *Client) ProvisionResource(req *ct.ResourceReq) (*ct.Resource, error) {\n\tif req.ProviderID == \"\" {\n\t\treturn nil, errors.New(\"controller: missing provider id\")\n\t}\n\tres := &ct.Resource{}\n\terr := c.post(fmt.Sprintf(\"\/providers\/%s\/resources\", req.ProviderID), req, res)\n\treturn res, err\n}\n\nfunc (c *Client) PutResource(resource *ct.Resource) error {\n\tif resource.ID == \"\" || resource.ProviderID == \"\" {\n\t\treturn errors.New(\"controller: missing id and\/or provider id\")\n\t}\n\treturn c.put(fmt.Sprintf(\"\/providers\/%s\/resources\/%s\", resource.ProviderID, resource.ID), resource, resource)\n}\n\nfunc (c *Client) PutFormation(formation *ct.Formation) error {\n\tif formation.AppID == \"\" || formation.ReleaseID == \"\" {\n\t\treturn errors.New(\"controller: missing app id and\/or release id\")\n\t}\n\treturn c.put(fmt.Sprintf(\"\/apps\/%s\/formations\/%s\", formation.AppID, formation.ReleaseID), formation, formation)\n}\n\nfunc (c *Client) SetAppRelease(appID, releaseID string) error {\n\treturn c.put(fmt.Sprintf(\"\/apps\/%s\/release\", appID), &ct.Release{ID: releaseID}, nil)\n}\n\nfunc (c *Client) GetAppRelease(appID string) (*ct.Release, error) {\n\trelease := &ct.Release{}\n\treturn release, c.get(fmt.Sprintf(\"\/apps\/%s\/release\", appID), release)\n}\n\nfunc (c *Client) CreateRoute(appID string, route *strowger.Route) error {\n\treturn c.post(fmt.Sprintf(\"\/apps\/%s\/routes\", appID), route, route)\n}\n\nfunc (c *Client) GetFormation(appID, releaseID string) (*ct.Formation, error) {\n\tformation := &ct.Formation{}\n\treturn formation, c.get(fmt.Sprintf(\"\/apps\/%s\/formations\/%s\", appID, releaseID), formation)\n}\n\nfunc (c *Client) GetRelease(releaseID string) (*ct.Release, error) {\n\trelease := &ct.Release{}\n\treturn release, c.get(fmt.Sprintf(\"\/releases\/%s\", releaseID), release)\n}\n\nfunc (c *Client) GetArtifact(artifactID string) (*ct.Artifact, error) {\n\tartifact := &ct.Artifact{}\n\treturn artifact, c.get(fmt.Sprintf(\"\/artifacts\/%s\", artifactID), artifact)\n}\n\nfunc (c *Client) GetApp(appID string) (*ct.App, error) {\n\tapp := &ct.App{}\n\treturn app, c.get(fmt.Sprintf(\"\/apps\/%s\", appID), app)\n}\n\nfunc (c *Client) GetJobLog(appID, jobID string) (io.ReadCloser, error) {\n\tres, err := c.rawReq(\"GET\", fmt.Sprintf(\"\/apps\/%s\/jobs\/%s\/log\", appID, jobID), \"\", nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res.Body, nil\n}\n\nfunc (c *Client) RunJobAttached(appID string, job *ct.NewJob) (utils.ReadWriteCloser, error) {\n\tdata, err := toJSON(job)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq, err := http.NewRequest(\"POST\", fmt.Sprintf(\"%s\/apps\/%s\/jobs\", c.url, appID), data)\n\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\treq.Header.Set(\"Accept\", \"application\/vnd.flynn.attach\")\n\treq.SetBasicAuth(\"\", c.key)\n\tvar dial rpcplus.DialFunc\n\tif c.dial != nil {\n\t\tdial = c.dial\n\t}\n\tres, rwc, err := utils.HijackRequest(req, dial)\n\tif err != nil {\n\t\tres.Body.Close()\n\t\treturn nil, err\n\t}\n\treturn rwc, nil\n}\n\nfunc (c *Client) RunJobDetached(appID string, req *ct.NewJob) (*ct.Job, error) {\n\tjob := &ct.Job{}\n\treturn job, c.post(fmt.Sprintf(\"\/apps\/%s\/jobs\", appID), req, job)\n}\n\nfunc (c *Client) JobList(appID string) ([]*ct.Job, error) {\n\tvar jobs []*ct.Job\n\treturn jobs, c.get(fmt.Sprintf(\"\/apps\/%s\/jobs\", appID), &jobs)\n}\n\nfunc (c *Client) KeyList() ([]*ct.Key, error) {\n\tvar keys []*ct.Key\n\treturn keys, c.get(\"\/keys\", &keys)\n}\n\nfunc (c *Client) CreateKey(pubKey string) (*ct.Key, error) {\n\tkey := &ct.Key{}\n\treturn key, c.post(\"\/keys\", &ct.Key{Key: pubKey}, key)\n}\n\nfunc (c *Client) DeleteKey(id string) error {\n\treturn c.delete(\"\/keys\/\" + strings.Replace(id, \":\", \"\", -1))\n}\n\nfunc (c *Client) ProviderList() ([]*ct.Provider, error) {\n\tvar providers []*ct.Provider\n\treturn providers, c.get(\"\/providers\", &providers)\n}\n<commit_msg>controller: Add client method to get route list from controller<commit_after>package controller\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\tct \"github.com\/flynn\/flynn-controller\/types\"\n\t\"github.com\/flynn\/flynn-controller\/utils\"\n\t\"github.com\/flynn\/go-discoverd\"\n\t\"github.com\/flynn\/go-discoverd\/dialer\"\n\t\"github.com\/flynn\/go-flynn\/pinned\"\n\t\"github.com\/flynn\/rpcplus\"\n\t\"github.com\/flynn\/strowger\/types\"\n)\n\nfunc NewClient(uri, key string) (*Client, error) {\n\tif uri == \"\" {\n\t\turi = \"discoverd+http:\/\/flynn-controller\"\n\t}\n\tu, err := url.Parse(uri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := &Client{\n\t\turl: uri,\n\t\taddr: u.Host,\n\t\thttp: http.DefaultClient,\n\t\tkey: key,\n\t}\n\tif u.Scheme == \"discoverd+http\" {\n\t\tif err := discoverd.Connect(\"\"); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdialer := dialer.New(discoverd.DefaultClient, nil)\n\t\tc.dial = dialer.Dial\n\t\tc.dialClose = dialer\n\t\tc.http = &http.Client{Transport: &http.Transport{Dial: c.dial}}\n\t\tu.Scheme = \"http\"\n\t\tc.url = u.String()\n\t}\n\treturn c, nil\n}\n\nfunc NewClientWithPin(uri, key string, pin []byte) (*Client, error) {\n\tu, err := url.Parse(uri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := &Client{\n\t\tdial: (&pinned.Config{Pin: pin}).Dial,\n\t\tkey: key,\n\t}\n\tif _, port, _ := net.SplitHostPort(u.Host); port == \"\" {\n\t\tu.Host += \":443\"\n\t}\n\tc.addr = u.Host\n\tu.Scheme = \"http\"\n\tc.url = u.String()\n\tc.http = &http.Client{Transport: &http.Transport{Dial: c.dial}}\n\treturn c, nil\n}\n\ntype Client struct {\n\turl string\n\tkey string\n\taddr string\n\thttp *http.Client\n\n\tdial rpcplus.DialFunc\n\tdialClose io.Closer\n}\n\nfunc (c *Client) Close() error {\n\tif c.dialClose != nil {\n\t\tc.dialClose.Close()\n\t}\n\treturn nil\n}\n\nvar ErrNotFound = errors.New(\"controller: not found\")\n\nfunc toJSON(v interface{}) (io.Reader, error) {\n\tdata, err := json.Marshal(v)\n\treturn bytes.NewBuffer(data), err\n}\n\nfunc (c *Client) rawReq(method, path string, contentType string, in, out interface{}) (*http.Response, error) {\n\tvar payload io.Reader\n\tswitch v := in.(type) {\n\tcase io.Reader:\n\t\tpayload = v\n\tcase nil:\n\tdefault:\n\t\tvar err error\n\t\tpayload, err = toJSON(in)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(method, c.url+path, payload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif contentType == \"\" {\n\t\tcontentType = \"application\/json\"\n\t}\n\treq.Header.Set(\"Content-Type\", contentType)\n\treq.SetBasicAuth(\"\", c.key)\n\tres, err := c.http.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode == 404 {\n\t\tres.Body.Close()\n\t\treturn res, ErrNotFound\n\t}\n\tif res.StatusCode != 200 {\n\t\tres.Body.Close()\n\t\treturn res, &url.Error{\n\t\t\tOp: req.Method,\n\t\t\tURL: req.URL.String(),\n\t\t\tErr: fmt.Errorf(\"controller: unexpected status %d\", res.StatusCode),\n\t\t}\n\t}\n\tif out != nil {\n\t\tdefer res.Body.Close()\n\t\treturn res, json.NewDecoder(res.Body).Decode(out)\n\t}\n\treturn res, nil\n}\n\nfunc (c *Client) send(method, path string, in, out interface{}) error {\n\t_, err := c.rawReq(method, path, \"\", in, out)\n\treturn err\n}\n\nfunc (c *Client) put(path string, in, out interface{}) error {\n\treturn c.send(\"PUT\", path, in, out)\n}\n\nfunc (c *Client) post(path string, in, out interface{}) error {\n\treturn c.send(\"POST\", path, in, out)\n}\n\nfunc (c *Client) get(path string, out interface{}) error {\n\t_, err := c.rawReq(\"GET\", path, \"\", nil, out)\n\treturn err\n}\n\nfunc (c *Client) delete(path string) error {\n\tres, err := c.rawReq(\"DELETE\", path, \"\", nil, nil)\n\tif err == nil {\n\t\tres.Body.Close()\n\t}\n\treturn err\n}\n\nfunc (c *Client) StreamFormations(since *time.Time) (<-chan *ct.ExpandedFormation, *error) {\n\tif since == nil {\n\t\ts := time.Unix(0, 0)\n\t\tsince = &s\n\t}\n\tdial := c.dial\n\tif dial == nil {\n\t\tdial = net.Dial\n\t}\n\tch := make(chan *ct.ExpandedFormation)\n\tconn, err := dial(\"tcp\", c.addr)\n\tif err != nil {\n\t\tclose(ch)\n\t\treturn ch, &err\n\t}\n\theader := make(http.Header)\n\theader.Set(\"Authorization\", \"Basic \"+base64.StdEncoding.EncodeToString([]byte(\":\"+c.key)))\n\tclient, err := rpcplus.NewHTTPClient(conn, rpcplus.DefaultRPCPath, header)\n\tif err != nil {\n\t\tclose(ch)\n\t\treturn ch, &err\n\t}\n\treturn ch, &client.StreamGo(\"Controller.StreamFormations\", since, ch).Error\n}\n\nfunc (c *Client) CreateArtifact(artifact *ct.Artifact) error {\n\treturn c.post(\"\/artifacts\", artifact, artifact)\n}\n\nfunc (c *Client) CreateRelease(release *ct.Release) error {\n\treturn c.post(\"\/releases\", release, release)\n}\n\nfunc (c *Client) CreateApp(app *ct.App) error {\n\treturn c.post(\"\/apps\", app, app)\n}\n\nfunc (c *Client) CreateProvider(provider *ct.Provider) error {\n\treturn c.post(\"\/providers\", provider, provider)\n}\n\nfunc (c *Client) ProvisionResource(req *ct.ResourceReq) (*ct.Resource, error) {\n\tif req.ProviderID == \"\" {\n\t\treturn nil, errors.New(\"controller: missing provider id\")\n\t}\n\tres := &ct.Resource{}\n\terr := c.post(fmt.Sprintf(\"\/providers\/%s\/resources\", req.ProviderID), req, res)\n\treturn res, err\n}\n\nfunc (c *Client) PutResource(resource *ct.Resource) error {\n\tif resource.ID == \"\" || resource.ProviderID == \"\" {\n\t\treturn errors.New(\"controller: missing id and\/or provider id\")\n\t}\n\treturn c.put(fmt.Sprintf(\"\/providers\/%s\/resources\/%s\", resource.ProviderID, resource.ID), resource, resource)\n}\n\nfunc (c *Client) PutFormation(formation *ct.Formation) error {\n\tif formation.AppID == \"\" || formation.ReleaseID == \"\" {\n\t\treturn errors.New(\"controller: missing app id and\/or release id\")\n\t}\n\treturn c.put(fmt.Sprintf(\"\/apps\/%s\/formations\/%s\", formation.AppID, formation.ReleaseID), formation, formation)\n}\n\nfunc (c *Client) SetAppRelease(appID, releaseID string) error {\n\treturn c.put(fmt.Sprintf(\"\/apps\/%s\/release\", appID), &ct.Release{ID: releaseID}, nil)\n}\n\nfunc (c *Client) GetAppRelease(appID string) (*ct.Release, error) {\n\trelease := &ct.Release{}\n\treturn release, c.get(fmt.Sprintf(\"\/apps\/%s\/release\", appID), release)\n}\n\nfunc (c *Client) RouteList(appID string) ([]*strowger.Route, error) {\n\tvar routes []*strowger.Route\n\treturn routes, c.get(fmt.Sprintf(\"\/apps\/%s\/routes\", appID), &routes)\n}\n\nfunc (c *Client) CreateRoute(appID string, route *strowger.Route) error {\n\treturn c.post(fmt.Sprintf(\"\/apps\/%s\/routes\", appID), route, route)\n}\n\nfunc (c *Client) GetFormation(appID, releaseID string) (*ct.Formation, error) {\n\tformation := &ct.Formation{}\n\treturn formation, c.get(fmt.Sprintf(\"\/apps\/%s\/formations\/%s\", appID, releaseID), formation)\n}\n\nfunc (c *Client) GetRelease(releaseID string) (*ct.Release, error) {\n\trelease := &ct.Release{}\n\treturn release, c.get(fmt.Sprintf(\"\/releases\/%s\", releaseID), release)\n}\n\nfunc (c *Client) GetArtifact(artifactID string) (*ct.Artifact, error) {\n\tartifact := &ct.Artifact{}\n\treturn artifact, c.get(fmt.Sprintf(\"\/artifacts\/%s\", artifactID), artifact)\n}\n\nfunc (c *Client) GetApp(appID string) (*ct.App, error) {\n\tapp := &ct.App{}\n\treturn app, c.get(fmt.Sprintf(\"\/apps\/%s\", appID), app)\n}\n\nfunc (c *Client) GetJobLog(appID, jobID string) (io.ReadCloser, error) {\n\tres, err := c.rawReq(\"GET\", fmt.Sprintf(\"\/apps\/%s\/jobs\/%s\/log\", appID, jobID), \"\", nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res.Body, nil\n}\n\nfunc (c *Client) RunJobAttached(appID string, job *ct.NewJob) (utils.ReadWriteCloser, error) {\n\tdata, err := toJSON(job)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq, err := http.NewRequest(\"POST\", fmt.Sprintf(\"%s\/apps\/%s\/jobs\", c.url, appID), data)\n\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\treq.Header.Set(\"Accept\", \"application\/vnd.flynn.attach\")\n\treq.SetBasicAuth(\"\", c.key)\n\tvar dial rpcplus.DialFunc\n\tif c.dial != nil {\n\t\tdial = c.dial\n\t}\n\tres, rwc, err := utils.HijackRequest(req, dial)\n\tif err != nil {\n\t\tres.Body.Close()\n\t\treturn nil, err\n\t}\n\treturn rwc, nil\n}\n\nfunc (c *Client) RunJobDetached(appID string, req *ct.NewJob) (*ct.Job, error) {\n\tjob := &ct.Job{}\n\treturn job, c.post(fmt.Sprintf(\"\/apps\/%s\/jobs\", appID), req, job)\n}\n\nfunc (c *Client) JobList(appID string) ([]*ct.Job, error) {\n\tvar jobs []*ct.Job\n\treturn jobs, c.get(fmt.Sprintf(\"\/apps\/%s\/jobs\", appID), &jobs)\n}\n\nfunc (c *Client) KeyList() ([]*ct.Key, error) {\n\tvar keys []*ct.Key\n\treturn keys, c.get(\"\/keys\", &keys)\n}\n\nfunc (c *Client) CreateKey(pubKey string) (*ct.Key, error) {\n\tkey := &ct.Key{}\n\treturn key, c.post(\"\/keys\", &ct.Key{Key: pubKey}, key)\n}\n\nfunc (c *Client) DeleteKey(id string) error {\n\treturn c.delete(\"\/keys\/\" + strings.Replace(id, \":\", \"\", -1))\n}\n\nfunc (c *Client) ProviderList() ([]*ct.Provider, error) {\n\tvar providers []*ct.Provider\n\treturn providers, c.get(\"\/providers\", &providers)\n}\n<|endoftext|>"} {"text":"<commit_before>package client\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"github.com\/flynn\/go-tuf\/data\"\n\t\"github.com\/flynn\/go-tuf\/keys\"\n\t\"github.com\/flynn\/go-tuf\/signed\"\n\t\"github.com\/flynn\/go-tuf\/util\"\n)\n\nvar (\n\tErrNotFound = errors.New(\"tuf: file not found\")\n\tErrLatest = errors.New(\"tuf: the current version is the latest\")\n\tErrWrongSize = errors.New(\"tuf: unexpected file size\")\n\tErrNoRootKeys = errors.New(\"tuf: no root keys found in local meta store\")\n\tErrChecksumFailed = errors.New(\"tuf: checksum failed\")\n)\n\n\/\/ LocalStore is local storage for downloaded top-level metadata.\ntype LocalStore interface {\n\t\/\/ GetMeta returns top-level metadata from local storage. The keys are\n\t\/\/ in the form `ROLE.json`, with ROLE being a valid top-level role.\n\tGetMeta() (map[string]json.RawMessage, error)\n\n\t\/\/ SetMeta persists the given top-level metadata in local storage, the\n\t\/\/ name taking the same format as the keys returned by GetMeta.\n\tSetMeta(name string, meta json.RawMessage) error\n}\n\n\/\/ RemoteStore downloads top-level metadata and target files from a remote\n\/\/ repository.\ntype RemoteStore interface {\n\t\/\/ Get downloads the given file from remote storage.\n\t\/\/\n\t\/\/ `file` should be relative to the root of remote repository (e.g.\n\t\/\/ \"root.json\" or \"targets\/path\/to\/target\/file.txt\")\n\tGet(file string, size int64) (io.ReadCloser, error)\n}\n\n\/\/ Client provides methods for fetching updates from a remote repository and\n\/\/ downloading remote target files.\ntype Client struct {\n\tlocal LocalStore\n\tremote RemoteStore\n\n\t\/\/ The following four fields represent decoded metatdata from\n\t\/\/ local storage\n\troot *data.Root\n\ttargets *data.Targets\n\tsnapshot *data.Snapshot\n\ttimestamp *data.Timestamp\n\n\t\/\/ localMeta is the raw metadata from local storage and is used to\n\t\/\/ check whether remote metadata is present locally\n\tlocalMeta map[string]json.RawMessage\n\n\t\/\/ db is a key DB used for verifying metadata\n\tdb *keys.DB\n}\n\nfunc NewClient(local LocalStore, remote RemoteStore) *Client {\n\treturn &Client{\n\t\tlocal: local,\n\t\tremote: remote,\n\t}\n}\n\n\/\/ Init initializes a local repository.\n\/\/\n\/\/ The latest root.json is fetched from remote storage, verified using rootKeys\n\/\/ and threshold, and then saved in local storage. It is expected that rootKeys\n\/\/ were securely distributed with the software being updated.\nfunc (c *Client) Init(rootKeys []*data.Key, threshold int) error {\n\trootJSON, err := c.downloadMeta(\"root.json\", nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.db = keys.NewDB()\n\trootKeyIDs := make([]string, len(rootKeys))\n\tfor i, key := range rootKeys {\n\t\tid := key.ID()\n\t\trootKeyIDs[i] = id\n\t\tif err := c.db.AddKey(id, key); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\trole := &data.Role{Threshold: threshold, KeyIDs: rootKeyIDs}\n\tif err := c.db.AddRole(\"root\", role); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.verifyRoot(rootJSON); err != nil {\n\t\treturn err\n\t}\n\n\treturn c.local.SetMeta(\"root.json\", rootJSON)\n}\n\n\/\/ Update downloads and verifies remote metadata and returns updated targets.\n\/\/\n\/\/ It performs the update part of \"The client application\" workflow from\n\/\/ section 5.1 of the TUF spec:\n\/\/\n\/\/ https:\/\/github.com\/theupdateframework\/tuf\/blob\/v0.9.9\/docs\/tuf-spec.txt#L714\nfunc (c *Client) Update() (data.Files, error) {\n\t\/\/ Always start the update using local metadata\n\tif err := c.getLocalMeta(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ TODO: If we get an invalid signature downloading timestamp.json\n\t\/\/ or snapshot.json, download root.json and start again.\n\n\t\/\/ Get timestamp.json, extract snapshot.json file meta and save the\n\t\/\/ timestamp.json locally\n\ttimestampJSON, err := c.downloadMeta(\"timestamp.json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsnapshotMeta, err := c.decodeTimestamp(timestampJSON)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := c.local.SetMeta(\"timestamp.json\", timestampJSON); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Return ErrLatest if we already have the latest snapshot.json\n\tif c.hasMeta(\"snapshot.json\", snapshotMeta) {\n\t\treturn nil, ErrLatest\n\t}\n\n\t\/\/ Get snapshot.json, then extract root.json and targets.json file meta.\n\t\/\/\n\t\/\/ The snapshot.json is only saved locally after checking root.json and\n\t\/\/ targets.json so that it will be re-downloaded on subsequent updates\n\t\/\/ if this update fails.\n\tsnapshotJSON, err := c.downloadMeta(\"snapshot.json\", &snapshotMeta)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trootMeta, targetsMeta, err := c.decodeSnapshot(snapshotJSON)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If we don't have the root.json, download it, save it in local\n\t\/\/ storage and restart the update\n\tif !c.hasMeta(\"root.json\", rootMeta) {\n\t\trootJSON, err := c.downloadMeta(\"root.json\", &rootMeta)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := c.verifyRoot(rootJSON); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := c.local.SetMeta(\"root.json\", rootJSON); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn c.Update()\n\t}\n\n\t\/\/ If we don't have the targets.json, download it, determine updated\n\t\/\/ targets and save targets.json in local storage\n\tvar updatedTargets data.Files\n\tif !c.hasMeta(\"targets.json\", targetsMeta) {\n\t\ttargetsJSON, err := c.downloadMeta(\"targets.json\", &targetsMeta)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tupdatedTargets, err = c.decodeTargets(targetsJSON)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := c.local.SetMeta(\"targets.json\", targetsJSON); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Save the snapshot.json now it has been processed successfully\n\tif err := c.local.SetMeta(\"snapshot.json\", snapshotJSON); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn updatedTargets, nil\n}\n\n\/\/ getLocalMeta decodes and verifies metadata from local storage.\n\/\/\n\/\/ The verification of local files is purely for consistency, if an attacker\n\/\/ has compromised the local storage, there is no guarantee it can be trusted.\nfunc (c *Client) getLocalMeta() error {\n\tmeta, err := c.local.GetMeta()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif rootJSON, ok := meta[\"root.json\"]; ok {\n\t\t\/\/ unmarshal root.json without verifying as we need the root\n\t\t\/\/ keys first\n\t\ts := &data.Signed{}\n\t\tif err := json.Unmarshal(rootJSON, s); err != nil {\n\t\t\treturn err\n\t\t}\n\t\troot := &data.Root{}\n\t\tif err := json.Unmarshal(s.Signed, root); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdb := keys.NewDB()\n\t\tfor id, k := range root.Keys {\n\t\t\tif err := db.AddKey(id, k); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tfor name, role := range root.Roles {\n\t\t\tif err := db.AddRole(name, role); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif err := signed.Verify(s, \"root\", 0, db); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.root = root\n\t\tc.db = db\n\t} else {\n\t\treturn ErrNoRootKeys\n\t}\n\n\tif snapshotJSON, ok := meta[\"snapshot.json\"]; ok {\n\t\tsnapshot := &data.Snapshot{}\n\t\tif err := signed.Unmarshal(snapshotJSON, snapshot, \"snapshot\", 0, c.db); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.snapshot = snapshot\n\t}\n\n\tif targetsJSON, ok := meta[\"targets.json\"]; ok {\n\t\ttargets := &data.Targets{}\n\t\tif err := signed.Unmarshal(targetsJSON, targets, \"targets\", 0, c.db); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.targets = targets\n\t}\n\n\tif timestampJSON, ok := meta[\"timestamp.json\"]; ok {\n\t\ttimestamp := &data.Timestamp{}\n\t\tif err := signed.Unmarshal(timestampJSON, timestamp, \"timestamp\", 0, c.db); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.timestamp = timestamp\n\t}\n\n\tc.localMeta = meta\n\treturn nil\n}\n\n\/\/ maxMetaSize is the maximum number of bytes that will be downloaded when\n\/\/ getting remote metadata without knowing it's length.\nconst maxMetaSize = 50 * 1024\n\n\/\/ downloadMeta downloads top-level metadata from remote storage and verifies\n\/\/ it using the given file metadata.\nfunc (c *Client) downloadMeta(name string, m *data.FileMeta) ([]byte, error) {\n\tvar size int64\n\tif m != nil {\n\t\tsize = m.Length\n\t}\n\tr, err := c.remote.Get(name, size)\n\tif err != nil {\n\t\tif err == ErrNotFound {\n\t\t\treturn nil, ErrMissingRemoteMetadata{name}\n\t\t}\n\t\treturn nil, err\n\t}\n\tdefer r.Close()\n\n\t\/\/ if m is nil (e.g. when downloading timestamp.json, which has unknown\n\t\/\/ size), just read the stream (up to a sane maximum) and return it\n\tif m == nil {\n\t\tb, err := ioutil.ReadAll(io.LimitReader(r, maxMetaSize))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn b, nil\n\t}\n\n\t\/\/ read exactly m.Length bytes from the stream\n\tbuf := make([]byte, m.Length)\n\tif _, err := io.ReadFull(r, buf); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ verify buf matches given metadata\n\tmeta, err := util.GenerateFileMeta(bytes.NewReader(buf))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := util.FileMetaEqual(meta, *m); err != nil {\n\t\treturn nil, ErrDownloadFailed{name, err}\n\t}\n\treturn buf, nil\n}\n\n\/\/ verifyRoot verifies root metadata.\nfunc (c *Client) verifyRoot(b json.RawMessage) error {\n\tvar minVer int\n\tif c.root != nil {\n\t\tminVer = c.root.Version\n\t}\n\ts := &data.Signed{}\n\tif err := json.Unmarshal(b, s); err != nil {\n\t\treturn err\n\t}\n\treturn signed.Verify(s, \"root\", minVer, c.db)\n}\n\n\/\/ decodeSnapshot decodes and verifies snapshot metadata, and returns the new\n\/\/ root and targets file meta.\nfunc (c *Client) decodeSnapshot(b json.RawMessage) (data.FileMeta, data.FileMeta, error) {\n\tvar minVer int\n\tif c.snapshot != nil {\n\t\tminVer = c.snapshot.Version\n\t}\n\tsnapshot := &data.Snapshot{}\n\tif err := signed.Unmarshal(b, snapshot, \"snapshot\", minVer, c.db); err != nil {\n\t\treturn data.FileMeta{}, data.FileMeta{}, err\n\t}\n\treturn snapshot.Meta[\"root.json\"], snapshot.Meta[\"targets.json\"], nil\n}\n\n\/\/ decodeTargets decodes and verifies targets metadata, and returns updated\n\/\/ targets.\nfunc (c *Client) decodeTargets(b json.RawMessage) (data.Files, error) {\n\tvar minVer int\n\tvar currTargets data.Files\n\tif c.targets != nil {\n\t\tminVer = c.targets.Version\n\t\tcurrTargets = c.targets.Targets\n\t}\n\ttargets := &data.Targets{}\n\tif err := signed.Unmarshal(b, targets, \"targets\", minVer, c.db); err != nil {\n\t\treturn nil, err\n\t}\n\tupdatedTargets := make(data.Files)\n\tfor path, meta := range targets.Targets {\n\t\tif curr, ok := currTargets[path]; ok {\n\t\t\tif err := util.FileMetaEqual(curr, meta); err == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tupdatedTargets[path] = meta\n\t}\n\treturn updatedTargets, nil\n}\n\n\/\/ decodeTimestamp decodes and verifies timestamp metadata, and returns the\n\/\/ new snapshot file meta.\nfunc (c *Client) decodeTimestamp(b json.RawMessage) (data.FileMeta, error) {\n\tvar minVer int\n\tif c.timestamp != nil {\n\t\tminVer = c.timestamp.Version\n\t}\n\ttimestamp := &data.Timestamp{}\n\tif err := signed.Unmarshal(b, timestamp, \"timestamp\", minVer, c.db); err != nil {\n\t\treturn data.FileMeta{}, err\n\t}\n\treturn timestamp.Meta[\"snapshot.json\"], nil\n}\n\n\/\/ hasMeta checks whether local metadata has the given file meta\nfunc (c *Client) hasMeta(name string, m data.FileMeta) bool {\n\tb, ok := c.localMeta[name]\n\tif !ok {\n\t\treturn false\n\t}\n\tmeta, err := util.GenerateFileMeta(bytes.NewReader(b))\n\tif err != nil {\n\t\treturn false\n\t}\n\terr = util.FileMetaEqual(meta, m)\n\treturn err == nil\n}\n\nfunc (c *Client) Expires() time.Time {\n\treturn time.Time{}\n}\n\nfunc (c *Client) Version() int {\n\treturn 0\n}\n\nfunc (c *Client) Files() data.Files {\n\treturn nil\n}\n\ntype Destination interface {\n\tio.Writer\n\tSize() (int, error)\n\tDelete() error\n}\n\nfunc (c *Client) Download(name string, dest Destination) error {\n\t\/*\n\t\tThe software update system instructs TUF to download a specific target file.\n\n\t\tTUF downloads and verifies the file and then makes the file available to the\n\t\tsoftware update system.\n\t*\/\n\treturn nil\n}\n<commit_msg>Only store local versions and targets in the Client struct<commit_after>package client\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"github.com\/flynn\/go-tuf\/data\"\n\t\"github.com\/flynn\/go-tuf\/keys\"\n\t\"github.com\/flynn\/go-tuf\/signed\"\n\t\"github.com\/flynn\/go-tuf\/util\"\n)\n\nvar (\n\tErrNotFound = errors.New(\"tuf: file not found\")\n\tErrLatest = errors.New(\"tuf: the current version is the latest\")\n\tErrWrongSize = errors.New(\"tuf: unexpected file size\")\n\tErrNoRootKeys = errors.New(\"tuf: no root keys found in local meta store\")\n\tErrChecksumFailed = errors.New(\"tuf: checksum failed\")\n)\n\n\/\/ LocalStore is local storage for downloaded top-level metadata.\ntype LocalStore interface {\n\t\/\/ GetMeta returns top-level metadata from local storage. The keys are\n\t\/\/ in the form `ROLE.json`, with ROLE being a valid top-level role.\n\tGetMeta() (map[string]json.RawMessage, error)\n\n\t\/\/ SetMeta persists the given top-level metadata in local storage, the\n\t\/\/ name taking the same format as the keys returned by GetMeta.\n\tSetMeta(name string, meta json.RawMessage) error\n}\n\n\/\/ RemoteStore downloads top-level metadata and target files from a remote\n\/\/ repository.\ntype RemoteStore interface {\n\t\/\/ Get downloads the given file from remote storage.\n\t\/\/\n\t\/\/ `file` should be relative to the root of remote repository (e.g.\n\t\/\/ \"root.json\" or \"targets\/path\/to\/target\/file.txt\")\n\tGet(file string, size int64) (io.ReadCloser, error)\n}\n\n\/\/ Client provides methods for fetching updates from a remote repository and\n\/\/ downloading remote target files.\ntype Client struct {\n\tlocal LocalStore\n\tremote RemoteStore\n\n\t\/\/ The following four fields represent the versions of metatdata from\n\t\/\/ local storage\n\tlocalRootVer int\n\tlocalTargetsVer int\n\tlocalSnapshotVer int\n\tlocalTimestampVer int\n\n\t\/\/ localTargets is the list of targets from local storage\n\tlocalTargets data.Files\n\n\t\/\/ localMeta is the raw metadata from local storage and is used to\n\t\/\/ check whether remote metadata is present locally\n\tlocalMeta map[string]json.RawMessage\n\n\t\/\/ db is a key DB used for verifying metadata\n\tdb *keys.DB\n}\n\nfunc NewClient(local LocalStore, remote RemoteStore) *Client {\n\treturn &Client{\n\t\tlocal: local,\n\t\tremote: remote,\n\t}\n}\n\n\/\/ Init initializes a local repository.\n\/\/\n\/\/ The latest root.json is fetched from remote storage, verified using rootKeys\n\/\/ and threshold, and then saved in local storage. It is expected that rootKeys\n\/\/ were securely distributed with the software being updated.\nfunc (c *Client) Init(rootKeys []*data.Key, threshold int) error {\n\trootJSON, err := c.downloadMeta(\"root.json\", nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.db = keys.NewDB()\n\trootKeyIDs := make([]string, len(rootKeys))\n\tfor i, key := range rootKeys {\n\t\tid := key.ID()\n\t\trootKeyIDs[i] = id\n\t\tif err := c.db.AddKey(id, key); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\trole := &data.Role{Threshold: threshold, KeyIDs: rootKeyIDs}\n\tif err := c.db.AddRole(\"root\", role); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.verifyRoot(rootJSON); err != nil {\n\t\treturn err\n\t}\n\n\treturn c.local.SetMeta(\"root.json\", rootJSON)\n}\n\n\/\/ Update downloads and verifies remote metadata and returns updated targets.\n\/\/\n\/\/ It performs the update part of \"The client application\" workflow from\n\/\/ section 5.1 of the TUF spec:\n\/\/\n\/\/ https:\/\/github.com\/theupdateframework\/tuf\/blob\/v0.9.9\/docs\/tuf-spec.txt#L714\nfunc (c *Client) Update() (data.Files, error) {\n\t\/\/ Always start the update using local metadata\n\tif err := c.getLocalMeta(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ TODO: If we get an invalid signature downloading timestamp.json\n\t\/\/ or snapshot.json, download root.json and start again.\n\n\t\/\/ Get timestamp.json, extract snapshot.json file meta and save the\n\t\/\/ timestamp.json locally\n\ttimestampJSON, err := c.downloadMeta(\"timestamp.json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsnapshotMeta, err := c.decodeTimestamp(timestampJSON)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := c.local.SetMeta(\"timestamp.json\", timestampJSON); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Return ErrLatest if we already have the latest snapshot.json\n\tif c.hasMeta(\"snapshot.json\", snapshotMeta) {\n\t\treturn nil, ErrLatest\n\t}\n\n\t\/\/ Get snapshot.json, then extract root.json and targets.json file meta.\n\t\/\/\n\t\/\/ The snapshot.json is only saved locally after checking root.json and\n\t\/\/ targets.json so that it will be re-downloaded on subsequent updates\n\t\/\/ if this update fails.\n\tsnapshotJSON, err := c.downloadMeta(\"snapshot.json\", &snapshotMeta)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trootMeta, targetsMeta, err := c.decodeSnapshot(snapshotJSON)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If we don't have the root.json, download it, save it in local\n\t\/\/ storage and restart the update\n\tif !c.hasMeta(\"root.json\", rootMeta) {\n\t\trootJSON, err := c.downloadMeta(\"root.json\", &rootMeta)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := c.verifyRoot(rootJSON); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := c.local.SetMeta(\"root.json\", rootJSON); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn c.Update()\n\t}\n\n\t\/\/ If we don't have the targets.json, download it, determine updated\n\t\/\/ targets and save targets.json in local storage\n\tvar updatedTargets data.Files\n\tif !c.hasMeta(\"targets.json\", targetsMeta) {\n\t\ttargetsJSON, err := c.downloadMeta(\"targets.json\", &targetsMeta)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tupdatedTargets, err = c.decodeTargets(targetsJSON)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := c.local.SetMeta(\"targets.json\", targetsJSON); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Save the snapshot.json now it has been processed successfully\n\tif err := c.local.SetMeta(\"snapshot.json\", snapshotJSON); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn updatedTargets, nil\n}\n\n\/\/ getLocalMeta decodes and verifies metadata from local storage.\n\/\/\n\/\/ The verification of local files is purely for consistency, if an attacker\n\/\/ has compromised the local storage, there is no guarantee it can be trusted.\nfunc (c *Client) getLocalMeta() error {\n\tmeta, err := c.local.GetMeta()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif rootJSON, ok := meta[\"root.json\"]; ok {\n\t\t\/\/ unmarshal root.json without verifying as we need the root\n\t\t\/\/ keys first\n\t\ts := &data.Signed{}\n\t\tif err := json.Unmarshal(rootJSON, s); err != nil {\n\t\t\treturn err\n\t\t}\n\t\troot := &data.Root{}\n\t\tif err := json.Unmarshal(s.Signed, root); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdb := keys.NewDB()\n\t\tfor id, k := range root.Keys {\n\t\t\tif err := db.AddKey(id, k); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tfor name, role := range root.Roles {\n\t\t\tif err := db.AddRole(name, role); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif err := signed.Verify(s, \"root\", 0, db); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.localRootVer = root.Version\n\t\tc.db = db\n\t} else {\n\t\treturn ErrNoRootKeys\n\t}\n\n\tif snapshotJSON, ok := meta[\"snapshot.json\"]; ok {\n\t\tsnapshot := &data.Snapshot{}\n\t\tif err := signed.Unmarshal(snapshotJSON, snapshot, \"snapshot\", 0, c.db); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.localSnapshotVer = snapshot.Version\n\t}\n\n\tif targetsJSON, ok := meta[\"targets.json\"]; ok {\n\t\ttargets := &data.Targets{}\n\t\tif err := signed.Unmarshal(targetsJSON, targets, \"targets\", 0, c.db); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.localTargetsVer = targets.Version\n\t\tc.localTargets = targets.Targets\n\t}\n\n\tif timestampJSON, ok := meta[\"timestamp.json\"]; ok {\n\t\ttimestamp := &data.Timestamp{}\n\t\tif err := signed.Unmarshal(timestampJSON, timestamp, \"timestamp\", 0, c.db); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.localTimestampVer = timestamp.Version\n\t}\n\n\tc.localMeta = meta\n\treturn nil\n}\n\n\/\/ maxMetaSize is the maximum number of bytes that will be downloaded when\n\/\/ getting remote metadata without knowing it's length.\nconst maxMetaSize = 50 * 1024\n\n\/\/ downloadMeta downloads top-level metadata from remote storage and verifies\n\/\/ it using the given file metadata.\nfunc (c *Client) downloadMeta(name string, m *data.FileMeta) ([]byte, error) {\n\tvar size int64\n\tif m != nil {\n\t\tsize = m.Length\n\t}\n\tr, err := c.remote.Get(name, size)\n\tif err != nil {\n\t\tif err == ErrNotFound {\n\t\t\treturn nil, ErrMissingRemoteMetadata{name}\n\t\t}\n\t\treturn nil, err\n\t}\n\tdefer r.Close()\n\n\t\/\/ if m is nil (e.g. when downloading timestamp.json, which has unknown\n\t\/\/ size), just read the stream (up to a sane maximum) and return it\n\tif m == nil {\n\t\tb, err := ioutil.ReadAll(io.LimitReader(r, maxMetaSize))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn b, nil\n\t}\n\n\t\/\/ read exactly m.Length bytes from the stream\n\tbuf := make([]byte, m.Length)\n\tif _, err := io.ReadFull(r, buf); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ verify buf matches given metadata\n\tmeta, err := util.GenerateFileMeta(bytes.NewReader(buf))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := util.FileMetaEqual(meta, *m); err != nil {\n\t\treturn nil, ErrDownloadFailed{name, err}\n\t}\n\treturn buf, nil\n}\n\n\/\/ verifyRoot verifies root metadata.\nfunc (c *Client) verifyRoot(b json.RawMessage) error {\n\ts := &data.Signed{}\n\tif err := json.Unmarshal(b, s); err != nil {\n\t\treturn err\n\t}\n\treturn signed.Verify(s, \"root\", c.localRootVer, c.db)\n}\n\n\/\/ decodeSnapshot decodes and verifies snapshot metadata, and returns the new\n\/\/ root and targets file meta.\nfunc (c *Client) decodeSnapshot(b json.RawMessage) (data.FileMeta, data.FileMeta, error) {\n\tsnapshot := &data.Snapshot{}\n\tif err := signed.Unmarshal(b, snapshot, \"snapshot\", c.localSnapshotVer, c.db); err != nil {\n\t\treturn data.FileMeta{}, data.FileMeta{}, err\n\t}\n\treturn snapshot.Meta[\"root.json\"], snapshot.Meta[\"targets.json\"], nil\n}\n\n\/\/ decodeTargets decodes and verifies targets metadata, and returns updated\n\/\/ targets.\nfunc (c *Client) decodeTargets(b json.RawMessage) (data.Files, error) {\n\ttargets := &data.Targets{}\n\tif err := signed.Unmarshal(b, targets, \"targets\", c.localTargetsVer, c.db); err != nil {\n\t\treturn nil, err\n\t}\n\tupdatedTargets := make(data.Files)\n\tfor path, meta := range targets.Targets {\n\t\tif local, ok := c.localTargets[path]; ok {\n\t\t\tif err := util.FileMetaEqual(local, meta); err == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tupdatedTargets[path] = meta\n\t}\n\treturn updatedTargets, nil\n}\n\n\/\/ decodeTimestamp decodes and verifies timestamp metadata, and returns the\n\/\/ new snapshot file meta.\nfunc (c *Client) decodeTimestamp(b json.RawMessage) (data.FileMeta, error) {\n\ttimestamp := &data.Timestamp{}\n\tif err := signed.Unmarshal(b, timestamp, \"timestamp\", c.localTimestampVer, c.db); err != nil {\n\t\treturn data.FileMeta{}, err\n\t}\n\treturn timestamp.Meta[\"snapshot.json\"], nil\n}\n\n\/\/ hasMeta checks whether local metadata has the given file meta\nfunc (c *Client) hasMeta(name string, m data.FileMeta) bool {\n\tb, ok := c.localMeta[name]\n\tif !ok {\n\t\treturn false\n\t}\n\tmeta, err := util.GenerateFileMeta(bytes.NewReader(b))\n\tif err != nil {\n\t\treturn false\n\t}\n\terr = util.FileMetaEqual(meta, m)\n\treturn err == nil\n}\n\nfunc (c *Client) Expires() time.Time {\n\treturn time.Time{}\n}\n\nfunc (c *Client) Version() int {\n\treturn 0\n}\n\nfunc (c *Client) Files() data.Files {\n\treturn nil\n}\n\ntype Destination interface {\n\tio.Writer\n\tSize() (int, error)\n\tDelete() error\n}\n\nfunc (c *Client) Download(name string, dest Destination) error {\n\t\/*\n\t\tThe software update system instructs TUF to download a specific target file.\n\n\t\tTUF downloads and verifies the file and then makes the file available to the\n\t\tsoftware update system.\n\t*\/\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package brats_test\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/cloudfoundry\/libbuildpack\/bratshelper\"\n\t\"github.com\/cloudfoundry\/libbuildpack\/cutlass\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Nodejs buildpack\", func() {\n\tbratshelper.UnbuiltBuildpack(\"node\", CopyBrats)\n\tbratshelper.DeployingAnAppWithAnUpdatedVersionOfTheSameBuildpack(CopyBrats)\n\tbratshelper.StagingWithBuildpackThatSetsEOL(\"node\", CopyBrats)\n\tbratshelper.StagingWithCustomBuildpackWithCredentialsInDependencies(CopyBrats)\n\tbratshelper.DeployAppWithExecutableProfileScript(\"node\", CopyBrats)\n\tbratshelper.DeployAnAppWithSensitiveEnvironmentVariables(CopyBrats)\n\tbratshelper.ForAllSupportedVersions(\"node\", CopyBrats, func(nodeVersion string, app *cutlass.App) {\n\t\tPushApp(app)\n\n\t\tBy(\"runs a simple webserver\", func() {\n\t\t\tExpect(app.GetBody(\"\/\")).To(ContainSubstring(\"Hello World!\"))\n\t\t})\n\t\tBy(\"supports bcrypt\", func() {\n\t\t\tExpect(app.GetBody(\"\/bcrypt\")).To(ContainSubstring(\"Hello Bcrypt!\"))\n\t\t})\n\t\tBy(\"supports bson-ext\", func() {\n\t\t\tif strings.HasPrefix(nodeVersion, \"12\") {\n\t\t\t\t\/\/ TODO: Bson-ext doesn't work with NodeJS 12 yet. When it does work (this fails), this can be removed.\n\t\t\t\tExpect(app.GetBody(\"\/bson-ext\")).To(ContainSubstring(\"502 Bad Gateway: Registered endpoint failed to handle the request.\"))\n\t\t\t} else {\n\t\t\t\tExpect(app.GetBody(\"\/bson-ext\")).To(ContainSubstring(\"Hello Bson-ext!\"))\n\t\t\t}\n\t\t})\n\t\tBy(\"installs the correct version\", func() {\n\t\t\tExpect(app.Stdout.String()).To(ContainSubstring(\"Installing node \" + nodeVersion))\n\t\t})\n\t})\n})\n<commit_msg>Remove bson-ext exclusion from brats<commit_after>package brats_test\n\nimport (\n\t\"github.com\/cloudfoundry\/libbuildpack\/bratshelper\"\n\t\"github.com\/cloudfoundry\/libbuildpack\/cutlass\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Nodejs buildpack\", func() {\n\tbratshelper.UnbuiltBuildpack(\"node\", CopyBrats)\n\tbratshelper.DeployingAnAppWithAnUpdatedVersionOfTheSameBuildpack(CopyBrats)\n\tbratshelper.StagingWithBuildpackThatSetsEOL(\"node\", CopyBrats)\n\tbratshelper.StagingWithCustomBuildpackWithCredentialsInDependencies(CopyBrats)\n\tbratshelper.DeployAppWithExecutableProfileScript(\"node\", CopyBrats)\n\tbratshelper.DeployAnAppWithSensitiveEnvironmentVariables(CopyBrats)\n\tbratshelper.ForAllSupportedVersions(\"node\", CopyBrats, func(nodeVersion string, app *cutlass.App) {\n\t\tPushApp(app)\n\n\t\tBy(\"runs a simple webserver\", func() {\n\t\t\tExpect(app.GetBody(\"\/\")).To(ContainSubstring(\"Hello World!\"))\n\t\t})\n\t\tBy(\"supports bcrypt\", func() {\n\t\t\tExpect(app.GetBody(\"\/bcrypt\")).To(ContainSubstring(\"Hello Bcrypt!\"))\n\t\t})\n\t\tBy(\"supports bson-ext\", func() {\n\t\t\tExpect(app.GetBody(\"\/bson-ext\")).To(ContainSubstring(\"Hello Bson-ext!\"))\n\t\t})\n\t\tBy(\"installs the correct version\", func() {\n\t\t\tExpect(app.Stdout.String()).To(ContainSubstring(\"Installing node \" + nodeVersion))\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Koichi Shiraishi. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"go\/build\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"nvim-go\/config\"\n\t\"nvim-go\/nvim\"\n\t\"nvim-go\/nvim\/profile\"\n\t\"nvim-go\/nvim\/quickfix\"\n\n\t\"github.com\/neovim-go\/vim\"\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/tools\/refactor\/rename\"\n)\n\nconst pkgRename = \"GoRename\"\n\ntype cmdRenameEval struct {\n\tCwd string `msgpack:\",array\"`\n\tFile string\n\tRenameFrom string\n}\n\nfunc (c *Commands) cmdRename(args []string, bang bool, eval *cmdRenameEval) {\n\tgo c.Rename(args, bang, eval)\n}\n\n\/\/ Rename rename the current cursor word use golang.org\/x\/tools\/refactor\/rename.\nfunc (c *Commands) Rename(args []string, bang bool, eval *cmdRenameEval) error {\n\tdefer profile.Start(time.Now(), \"GoRename\")\n\tdir := filepath.Dir(eval.File)\n\tdefer c.ctxt.SetContext(dir)()\n\n\tvar (\n\t\tb vim.Buffer\n\t\tw vim.Window\n\t)\n\tif c.p == nil {\n\t\tc.p = c.v.NewPipeline()\n\t}\n\tc.p.CurrentBuffer(&b)\n\tc.p.CurrentWindow(&w)\n\tif err := c.p.Wait(); err != nil {\n\t\terr = errors.Wrap(err, pkgRename)\n\t\treturn nvim.ErrorWrap(c.v, err)\n\t}\n\n\toffset, err := nvim.ByteOffset(c.v, b, w)\n\tif err != nil {\n\t\terr = errors.Wrap(err, pkgRename)\n\t\treturn nvim.ErrorWrap(c.v, err)\n\t}\n\tpos := fmt.Sprintf(\"%s:#%d\", eval.File, offset)\n\n\tvar renameTo string\n\tif len(args) > 0 {\n\t\trenameTo = args[0]\n\t} else {\n\t\taskMessage := fmt.Sprintf(\"%s: Rename '%s' to: \", pkgRename, eval.RenameFrom)\n\t\tvar toResult interface{}\n\t\tif config.RenamePrefill {\n\t\t\terr := c.v.Call(\"input\", &toResult, askMessage, eval.RenameFrom)\n\t\t\tif err != nil {\n\t\t\t\treturn nvim.EchohlErr(c.v, pkgRename, \"Keyboard interrupt\")\n\t\t\t}\n\t\t} else {\n\t\t\terr := c.v.Call(\"input\", &toResult, askMessage)\n\t\t\tif err != nil {\n\t\t\t\treturn nvim.EchohlErr(c.v, pkgRename, \"Keyboard interrupt\")\n\t\t\t}\n\t\t}\n\t\tif toResult.(string) == \"\" {\n\t\t\treturn nvim.EchohlErr(c.v, pkgRename, \"Not enough arguments for rename destination name\")\n\t\t}\n\t\trenameTo = fmt.Sprintf(\"%s\", toResult)\n\t}\n\n\tc.v.Command(fmt.Sprintf(\"echo '%s: Renaming ' | echohl Identifier | echon '%s' | echohl None | echon ' to ' | echohl Identifier | echon '%s' | echohl None | echon ' ...'\", pkgRename, eval.RenameFrom, renameTo))\n\n\tif bang {\n\t\trename.Force = true\n\t}\n\n\t\/\/ TODO(zchee): More elegant way\n\tsaveStdout, saveStderr := os.Stdout, os.Stderr\n\tos.Stderr = os.Stdout\n\tread, write, _ := os.Pipe()\n\tos.Stdout, os.Stderr = write, write\n\tdefer func() {\n\t\tos.Stderr = saveStdout\n\t\tos.Stderr = saveStderr\n\t}()\n\n\tif err := rename.Main(&build.Default, pos, \"\", renameTo); err != nil {\n\t\twrite.Close()\n\t\ter, _ := ioutil.ReadAll(read)\n\t\tlog.Printf(\"er: %+v\\n\", string(er))\n\t\tgo func() {\n\t\t\tloclist, _ := quickfix.ParseError(er, eval.Cwd, &c.ctxt.Build)\n\t\t\tquickfix.SetLoclist(c.v, loclist)\n\t\t\tquickfix.OpenLoclist(c.v, w, loclist, true)\n\t\t}()\n\n\t\terr = errors.Wrap(err, pkgRename)\n\t\treturn nvim.ErrorWrap(c.v, err)\n\t}\n\n\twrite.Close()\n\tout, _ := ioutil.ReadAll(read)\n\tdefer nvim.EchoSuccess(c.v, pkgRename, fmt.Sprintf(\"%s\", out))\n\n\t\/\/ TODO(zchee): 'edit' command is ugly.\n\t\/\/ Should create tempfile and use SetBufferLines.\n\treturn c.v.Command(\"silent edit\")\n}\n<commit_msg>cmds\/rename: fix save stdio to more better way and add error handling<commit_after>\/\/ Copyright 2016 Koichi Shiraishi. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"go\/build\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"nvim-go\/config\"\n\t\"nvim-go\/nvim\"\n\t\"nvim-go\/nvim\/profile\"\n\t\"nvim-go\/nvim\/quickfix\"\n\n\t\"github.com\/neovim-go\/vim\"\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/tools\/refactor\/rename\"\n)\n\nconst pkgRename = \"GoRename\"\n\ntype cmdRenameEval struct {\n\tCwd string `msgpack:\",array\"`\n\tFile string\n\tRenameFrom string\n}\n\nfunc (c *Commands) cmdRename(args []string, bang bool, eval *cmdRenameEval) {\n\tgo c.Rename(args, bang, eval)\n}\n\n\/\/ Rename rename the current cursor word use golang.org\/x\/tools\/refactor\/rename.\nfunc (c *Commands) Rename(args []string, bang bool, eval *cmdRenameEval) error {\n\tdefer profile.Start(time.Now(), \"GoRename\")\n\tdir := filepath.Dir(eval.File)\n\tdefer c.ctxt.SetContext(dir)()\n\n\tvar (\n\t\tb vim.Buffer\n\t\tw vim.Window\n\t)\n\tif c.p == nil {\n\t\tc.p = c.v.NewPipeline()\n\t}\n\tc.p.CurrentBuffer(&b)\n\tc.p.CurrentWindow(&w)\n\tif err := c.p.Wait(); err != nil {\n\t\terr = errors.Wrap(err, pkgRename)\n\t\treturn nvim.ErrorWrap(c.v, err)\n\t}\n\n\toffset, err := nvim.ByteOffset(c.v, b, w)\n\tif err != nil {\n\t\terr = errors.Wrap(err, pkgRename)\n\t\treturn nvim.ErrorWrap(c.v, err)\n\t}\n\tpos := fmt.Sprintf(\"%s:#%d\", eval.File, offset)\n\n\tvar renameTo string\n\tif len(args) > 0 {\n\t\trenameTo = args[0]\n\t} else {\n\t\taskMessage := fmt.Sprintf(\"%s: Rename '%s' to: \", pkgRename, eval.RenameFrom)\n\t\tvar toResult interface{}\n\t\tif config.RenamePrefill {\n\t\t\terr := c.v.Call(\"input\", &toResult, askMessage, eval.RenameFrom)\n\t\t\tif err != nil {\n\t\t\t\treturn nvim.EchohlErr(c.v, pkgRename, \"Keyboard interrupt\")\n\t\t\t}\n\t\t} else {\n\t\t\terr := c.v.Call(\"input\", &toResult, askMessage)\n\t\t\tif err != nil {\n\t\t\t\treturn nvim.EchohlErr(c.v, pkgRename, \"Keyboard interrupt\")\n\t\t\t}\n\t\t}\n\t\tif toResult.(string) == \"\" {\n\t\t\treturn nvim.EchohlErr(c.v, pkgRename, \"Not enough arguments for rename destination name\")\n\t\t}\n\t\trenameTo = fmt.Sprintf(\"%s\", toResult)\n\t}\n\n\tc.v.Command(fmt.Sprintf(\"echo '%s: Renaming ' | echohl Identifier | echon '%s' | echohl None | echon ' to ' | echohl Identifier | echon '%s' | echohl None | echon ' ...'\", pkgRename, eval.RenameFrom, renameTo))\n\n\tif bang {\n\t\trename.Force = true\n\t}\n\n\t\/\/ TODO(zchee): More elegant way\n\t\/\/ save original stdout and stderr\n\tsaveStdout, saveStderr := os.Stdout, os.Stderr\n\tread, write, _ := os.Pipe()\n\t\/\/ migrate stderr and stdout\n\tos.Stderr = os.Stdout\n\tos.Stderr = write\n\tdefer func() {\n\t\tos.Stderr = saveStdout\n\t\tos.Stderr = saveStderr\n\t}()\n\n\tif err := rename.Main(&build.Default, pos, \"\", renameTo); err != nil {\n\t\twrite.Close()\n\t\trenameErr, err := ioutil.ReadAll(read)\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, pkgRename)\n\t\t\treturn nvim.ErrorWrap(c.v, err)\n\t\t}\n\n\t\tlog.Printf(\"er: %+v\\n\", string(renameErr))\n\t\tgo func() {\n\t\t\tloclist, _ := quickfix.ParseError(renameErr, eval.Cwd, &c.ctxt.Build)\n\t\t\tquickfix.SetLoclist(c.v, loclist)\n\t\t\tquickfix.OpenLoclist(c.v, w, loclist, true)\n\t\t}()\n\n\t\terr = errors.Wrap(err, pkgRename)\n\t\treturn nvim.ErrorWrap(c.v, err)\n\t}\n\n\twrite.Close()\n\tout, _ := ioutil.ReadAll(read)\n\tdefer nvim.EchoSuccess(c.v, pkgRename, fmt.Sprintf(\"%s\", out))\n\n\t\/\/ TODO(zchee): 'edit' command is ugly.\n\t\/\/ Should create tempfile and use SetBufferLines.\n\treturn c.v.Command(\"silent edit\")\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"io\"\n\t\"nvim-go\/gb\"\n\t\"nvim-go\/nvim\"\n\t\"os\"\n\t\"strings\"\n\n\t\"golang.org\/x\/tools\/refactor\/rename\"\n\n\t\"github.com\/garyburd\/neovim-go\/vim\"\n\t\"github.com\/garyburd\/neovim-go\/vim\/plugin\"\n)\n\nfunc init() {\n\tplugin.HandleCommand(\"Gorename\",\n\t\t&plugin.CommandOptions{\n\t\t\tNArgs: \"?\", Eval: \"[expand('%:p:h'), expand('%:p'), line2byte(line('.'))+(col('.')-2)]\"},\n\t\tcmdRename)\n}\n\nvar (\n\trenamePrefill = \"go#rename#prefill\"\n\tvRenamePrefill interface{}\n)\n\ntype onRenameEval struct {\n\tDir string `msgpack:\",array\"`\n\tFile string\n\tOffset int\n}\n\nfunc cmdRename(v *vim.Vim, args []string, eval *onRenameEval) error {\n\tgo Rename(v, args, eval)\n\treturn nil\n}\n\n\/\/ Rename rename the current cursor word use golang.org\/x\/tools\/refactor\/rename.\nfunc Rename(v *vim.Vim, args []string, eval *onRenameEval) error {\n\tdefer gb.WithGoBuildForPath(eval.Dir)()\n\n\tv.Var(renamePrefill, &vRenamePrefill)\n\tfrom, err := v.CommandOutput(fmt.Sprintf(\"silent! echo expand('<cword>')\"))\n\tif err != nil {\n\t\tnvim.Echomsg(v, \"%s\", err)\n\t}\n\n\tvar (\n\t\tb vim.Buffer\n\t\tw vim.Window\n\t)\n\tp := v.NewPipeline()\n\tp.CurrentBuffer(&b)\n\tp.CurrentWindow(&w)\n\tif err := p.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\toffset := fmt.Sprintf(\"%s:#%d\", eval.File, eval.Offset)\n\tfmt.Printf(offset)\n\n\taskMessage := fmt.Sprintf(\"%s: Rename '%s' to: \", \"nvim-go\", from[1:])\n\tvar to interface{}\n\tif vRenamePrefill.(int64) == int64(1) {\n\t\tp.Call(\"input\", &to, askMessage, from[1:])\n\t\tif err := p.Wait(); err != nil {\n\t\t\treturn nvim.Echomsg(v, \"%s\", err)\n\t\t}\n\t} else {\n\t\tp.Call(\"input\", &to, askMessage)\n\t\tif err := p.Wait(); err != nil {\n\t\t\treturn nvim.Echomsg(v, \"%s\", err)\n\t\t}\n\t}\n\n\t\/\/ out, err := ParseStdout(fmt.Sprintln(eval.Offset))\n\t\/\/ if err != nil {\n\t\/\/ \treturn nvim.Echomsg(v, \"%s\", err)\n\t\/\/ }\n\n\tif err := rename.Main(&build.Default, offset, \"\", fmt.Sprint(to)); err != nil {\n\t\tif err != rename.ConflictError {\n\t\t\tnvim.Echomsg(v, \"%s\", err)\n\t\t}\n\t}\n\tp.Command(\"edit!\")\n\t\/\/ nvim.Echohl(v, nvim.PackageName, \"Function\", out)\n\n\treturn p.Wait()\n}\n\nfunc ParseStdout(trim string) (string, error) {\n\tstdout := os.Stdout\n\tr, w, err := os.Pipe()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tos.Stdout = w\n\toutC := make(chan string)\n\n\tvar buf bytes.Buffer\n\t_, err = io.Copy(&buf, r)\n\tr.Close()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\toutC <- buf.String()\n\n\tw.Close()\n\tos.Stdout = stdout\n\tout := <-outC\n\n\tntext := strings.SplitN(out, \"#\", 2)\n\treturn strings.Trim(ntext[1], trim), nil\n}\n<commit_msg>Remove unused code<commit_after>package commands\n\nimport (\n\t\"fmt\"\n\t\"go\/build\"\n\t\"nvim-go\/gb\"\n\t\"nvim-go\/nvim\"\n\n\t\"golang.org\/x\/tools\/refactor\/rename\"\n\n\t\"github.com\/garyburd\/neovim-go\/vim\"\n\t\"github.com\/garyburd\/neovim-go\/vim\/plugin\"\n)\n\nfunc init() {\n\tplugin.HandleCommand(\"Gorename\",\n\t\t&plugin.CommandOptions{\n\t\t\tNArgs: \"?\", Eval: \"[expand('%:p:h'), expand('%:p'), line2byte(line('.'))+(col('.')-2)]\"},\n\t\tcmdRename)\n}\n\nvar (\n\trenamePrefill = \"go#rename#prefill\"\n\tvRenamePrefill interface{}\n)\n\ntype onRenameEval struct {\n\tDir string `msgpack:\",array\"`\n\tFile string\n\tOffset int\n}\n\nfunc cmdRename(v *vim.Vim, args []string, eval *onRenameEval) error {\n\tgo Rename(v, args, eval)\n\treturn nil\n}\n\n\/\/ Rename rename the current cursor word use golang.org\/x\/tools\/refactor\/rename.\nfunc Rename(v *vim.Vim, args []string, eval *onRenameEval) error {\n\tdefer gb.WithGoBuildForPath(eval.Dir)()\n\n\tv.Var(renamePrefill, &vRenamePrefill)\n\tfrom, err := v.CommandOutput(fmt.Sprintf(\"silent! echo expand('<cword>')\"))\n\tif err != nil {\n\t\tnvim.Echomsg(v, \"%s\", err)\n\t}\n\n\tvar (\n\t\tb vim.Buffer\n\t\tw vim.Window\n\t)\n\tp := v.NewPipeline()\n\tp.CurrentBuffer(&b)\n\tp.CurrentWindow(&w)\n\tif err := p.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\toffset := fmt.Sprintf(\"%s:#%d\", eval.File, eval.Offset)\n\tfmt.Printf(offset)\n\n\taskMessage := fmt.Sprintf(\"%s: Rename '%s' to: \", \"nvim-go\", from[1:])\n\tvar to interface{}\n\tif vRenamePrefill.(int64) == int64(1) {\n\t\tp.Call(\"input\", &to, askMessage, from[1:])\n\t\tif err := p.Wait(); err != nil {\n\t\t\treturn nvim.Echomsg(v, \"%s\", err)\n\t\t}\n\t} else {\n\t\tp.Call(\"input\", &to, askMessage)\n\t\tif err := p.Wait(); err != nil {\n\t\t\treturn nvim.Echomsg(v, \"%s\", err)\n\t\t}\n\t}\n\n\tif err := rename.Main(&build.Default, offset, \"\", fmt.Sprint(to)); err != nil {\n\t\tif err != rename.ConflictError {\n\t\t\tnvim.Echomsg(v, \"%s\", err)\n\t\t}\n\t}\n\tp.Command(\"edit!\")\n\n\treturn p.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>package end2end_test\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/apoydence\/talaria\/pb\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"End2end\", func() {\n\tContext(\"Data has been written\", func() {\n\t\tvar (\n\t\t\tfileInfo *pb.File\n\t\t)\n\n\t\tvar writeTo = func(name string, data []byte, writer pb.Talaria_WriteClient) {\n\t\t\tpacket := &pb.WriteDataPacket{\n\t\t\t\tFileName: name,\n\t\t\t\tMessage: data,\n\t\t\t}\n\t\t\tExpect(writer.Send(packet)).To(Succeed())\n\t\t}\n\n\t\tvar fetchReaderWithIndex = func(name string, index uint64, client pb.TalariaClient) (chan []byte, chan uint64) {\n\t\t\tc := make(chan []byte, 100)\n\t\t\tidx := make(chan uint64, 100)\n\n\t\t\tfileInfo = &pb.File{\n\t\t\t\tFileName: name,\n\t\t\t\tStartIndex: index,\n\t\t\t}\n\n\t\t\treader, err := client.Read(context.Background(), fileInfo)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tpacket, err := reader.Recv()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tc <- packet.Message\n\t\t\t\t\tidx <- packet.Index\n\t\t\t\t}\n\t\t\t}()\n\t\t\treturn c, idx\n\t\t}\n\n\t\tvar fetchReaderLastIndex = func(name string, client pb.TalariaClient) (chan []byte, chan uint64) {\n\t\t\tc := make(chan []byte, 100)\n\t\t\tidx := make(chan uint64, 100)\n\n\t\t\tfileInfo = &pb.File{\n\t\t\t\tFileName: name,\n\t\t\t\tStartIndex: 1,\n\t\t\t\tStartFromEnd: true,\n\t\t\t}\n\n\t\t\treader, err := client.Read(context.Background(), fileInfo)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tpacket, err := reader.Recv()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tc <- packet.Message\n\t\t\t\t\tidx <- packet.Index\n\t\t\t\t}\n\t\t\t}()\n\t\t\treturn c, idx\n\t\t}\n\n\t\tvar writeSlowly = func(count int, fileInfo *pb.File, writer pb.Talaria_WriteClient) *sync.WaitGroup {\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tfor i := 0; i < count; i++ {\n\t\t\t\t\ttime.Sleep(time.Millisecond)\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(fmt.Sprintf(\"some-data-%d\", i)), writer)\n\t\t\t\t}\n\t\t\t}()\n\t\t\treturn &wg\n\t\t}\n\n\t\tBeforeEach(func() {\n\t\t\tfileInfo = &pb.File{\n\t\t\t\tFileName: createFileName(),\n\t\t\t}\n\t\t})\n\n\t\tContext(\"file has been created\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\ttalariaClient.Create(context.Background(), fileInfo)\n\t\t\t})\n\n\t\t\tContext(\"start tailing from beginning\", func() {\n\t\t\t\tIt(\"writes data to a subscriber\", func() {\n\t\t\t\t\twriter, err := talariaClient.Write(context.Background())\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(\"some-data-1\"), writer)\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(\"some-data-2\"), writer)\n\n\t\t\t\t\tdata, indexes := fetchReaderWithIndex(fileInfo.FileName, 0, talariaClient)\n\t\t\t\t\tEventually(data).Should(Receive(Equal([]byte(\"some-data-1\"))))\n\t\t\t\t\tEventually(indexes).Should(Receive(BeEquivalentTo(0)))\n\t\t\t\t\tEventually(data).Should(Receive(Equal([]byte(\"some-data-2\"))))\n\t\t\t\t\tEventually(indexes).Should(Receive(BeEquivalentTo(1)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"tails via Read()\", func() {\n\t\t\t\t\tdata, _ := fetchReaderWithIndex(fileInfo.FileName, 0, talariaClient)\n\t\t\t\t\twriter, err := talariaClient.Write(context.Background())\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\t\twg := writeSlowly(10, fileInfo, writer)\n\t\t\t\t\tdefer wg.Wait()\n\n\t\t\t\t\tfor i := 0; i < 10; i++ {\n\t\t\t\t\t\texpectedData := []byte(fmt.Sprintf(\"some-data-%d\", i))\n\t\t\t\t\t\tEventually(data).Should(Receive(Equal(expectedData)))\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"tail from middle\", func() {\n\t\t\t\tIt(\"reads from the given index\", func() {\n\t\t\t\t\twriter, err := talariaClient.Write(context.Background())\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(\"some-data-1\"), writer)\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(\"some-data-2\"), writer)\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(\"some-data-3\"), writer)\n\n\t\t\t\t\tdata, indexes := fetchReaderWithIndex(fileInfo.FileName, 1, talariaClient)\n\n\t\t\t\t\tvar idx uint64\n\t\t\t\t\tEventually(indexes).Should(Receive(&idx))\n\t\t\t\t\tExpect(idx).To(BeEquivalentTo(1))\n\t\t\t\t\tExpect(data).To(Receive(Equal([]byte(\"some-data-2\"))))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"tail from end\", func() {\n\t\t\t\tIt(\"reads from the given index\", func() {\n\t\t\t\t\twriter, err := talariaClient.Write(context.Background())\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(\"some-data-1\"), writer)\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(\"some-data-2\"), writer)\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(\"some-data-3\"), writer)\n\n\t\t\t\t\tdata, indexes := fetchReaderLastIndex(fileInfo.FileName, talariaClient)\n\n\t\t\t\t\tvar idx uint64\n\t\t\t\t\tEventually(indexes).Should(Receive(&idx))\n\t\t\t\t\tExpect(idx).To(BeEquivalentTo(2))\n\t\t\t\t\tExpect(data).To(Receive(Equal([]byte(\"some-data-3\"))))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tContext(\"file has not been created\", func() {\n\t\t\tIt(\"returns an error\", func() {\n\t\t\t\twriter, err := talariaClient.Write(context.Background())\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\t_, err = writer.CloseAndRecv()\n\t\t\t\tExpect(err).To(HaveOccurred())\n\t\t\t})\n\t\t})\n\t})\n})\n\nfunc createFileName() string {\n\treturn fmt.Sprintf(\"some-file-%d\", rand.Int63())\n}\n<commit_msg>Fix flaky end2end tetst<commit_after>package end2end_test\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/apoydence\/talaria\/pb\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"End2end\", func() {\n\tContext(\"Data has been written\", func() {\n\t\tvar (\n\t\t\tfileInfo *pb.File\n\t\t)\n\n\t\tvar writeTo = func(name string, data []byte, writer pb.Talaria_WriteClient) {\n\t\t\tpacket := &pb.WriteDataPacket{\n\t\t\t\tFileName: name,\n\t\t\t\tMessage: data,\n\t\t\t}\n\t\t\tExpect(writer.Send(packet)).To(Succeed())\n\t\t}\n\n\t\tvar fetchReaderWithIndex = func(name string, index uint64, client pb.TalariaClient) (chan []byte, chan uint64) {\n\t\t\tc := make(chan []byte, 100)\n\t\t\tidx := make(chan uint64, 100)\n\n\t\t\tfileInfo = &pb.File{\n\t\t\t\tFileName: name,\n\t\t\t\tStartIndex: index,\n\t\t\t}\n\n\t\t\treader, err := client.Read(context.Background(), fileInfo)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tpacket, err := reader.Recv()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tc <- packet.Message\n\t\t\t\t\tidx <- packet.Index\n\t\t\t\t}\n\t\t\t}()\n\t\t\treturn c, idx\n\t\t}\n\n\t\tvar fetchReaderLastIndex = func(name string, client pb.TalariaClient) (chan []byte, chan uint64) {\n\t\t\tc := make(chan []byte, 100)\n\t\t\tidx := make(chan uint64, 100)\n\n\t\t\tfileInfo = &pb.File{\n\t\t\t\tFileName: name,\n\t\t\t\tStartIndex: 1,\n\t\t\t\tStartFromEnd: true,\n\t\t\t}\n\n\t\t\treader, err := client.Read(context.Background(), fileInfo)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tpacket, err := reader.Recv()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tc <- packet.Message\n\t\t\t\t\tidx <- packet.Index\n\t\t\t\t}\n\t\t\t}()\n\t\t\treturn c, idx\n\t\t}\n\n\t\tvar writeSlowly = func(count int, fileInfo *pb.File, writer pb.Talaria_WriteClient) *sync.WaitGroup {\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tfor i := 0; i < count; i++ {\n\t\t\t\t\ttime.Sleep(time.Millisecond)\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(fmt.Sprintf(\"some-data-%d\", i)), writer)\n\t\t\t\t}\n\t\t\t}()\n\t\t\treturn &wg\n\t\t}\n\n\t\tBeforeEach(func() {\n\t\t\tfileInfo = &pb.File{\n\t\t\t\tFileName: createFileName(),\n\t\t\t}\n\t\t})\n\n\t\tContext(\"file has been created\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\ttalariaClient.Create(context.Background(), fileInfo)\n\t\t\t})\n\n\t\t\tContext(\"start tailing from beginning\", func() {\n\t\t\t\tIt(\"writes data to a subscriber\", func() {\n\t\t\t\t\twriter, err := talariaClient.Write(context.Background())\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(\"some-data-1\"), writer)\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(\"some-data-2\"), writer)\n\n\t\t\t\t\tdata, indexes := fetchReaderWithIndex(fileInfo.FileName, 0, talariaClient)\n\t\t\t\t\tEventually(data).Should(Receive(Equal([]byte(\"some-data-1\"))))\n\t\t\t\t\tEventually(indexes).Should(Receive(BeEquivalentTo(0)))\n\t\t\t\t\tEventually(data).Should(Receive(Equal([]byte(\"some-data-2\"))))\n\t\t\t\t\tEventually(indexes).Should(Receive(BeEquivalentTo(1)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"tails via Read()\", func() {\n\t\t\t\t\tdata, _ := fetchReaderWithIndex(fileInfo.FileName, 0, talariaClient)\n\t\t\t\t\twriter, err := talariaClient.Write(context.Background())\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\t\twg := writeSlowly(10, fileInfo, writer)\n\t\t\t\t\tdefer wg.Wait()\n\n\t\t\t\t\tfor i := 0; i < 10; i++ {\n\t\t\t\t\t\texpectedData := []byte(fmt.Sprintf(\"some-data-%d\", i))\n\t\t\t\t\t\tEventually(data).Should(Receive(Equal(expectedData)))\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"tail from middle\", func() {\n\t\t\t\tIt(\"reads from the given index\", func() {\n\t\t\t\t\twriter, err := talariaClient.Write(context.Background())\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(\"some-data-1\"), writer)\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(\"some-data-2\"), writer)\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(\"some-data-3\"), writer)\n\n\t\t\t\t\tdata, indexes := fetchReaderWithIndex(fileInfo.FileName, 1, talariaClient)\n\n\t\t\t\t\tvar idx uint64\n\t\t\t\t\tEventually(indexes).Should(Receive(&idx))\n\t\t\t\t\tExpect(idx).To(BeEquivalentTo(1))\n\t\t\t\t\tExpect(data).To(Receive(Equal([]byte(\"some-data-2\"))))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"tail from end\", func() {\n\t\t\t\tvar waitForData = func() {\n\t\t\t\t\tdata, _ := fetchReaderWithIndex(fileInfo.FileName, 0, talariaClient)\n\t\t\t\t\tEventually(data).Should(HaveLen(3))\n\t\t\t\t}\n\n\t\t\t\tIt(\"reads from the given index\", func() {\n\t\t\t\t\twriter, err := talariaClient.Write(context.Background())\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(\"some-data-1\"), writer)\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(\"some-data-2\"), writer)\n\t\t\t\t\twriteTo(fileInfo.FileName, []byte(\"some-data-3\"), writer)\n\t\t\t\t\twaitForData()\n\n\t\t\t\t\tdata, indexes := fetchReaderLastIndex(fileInfo.FileName, talariaClient)\n\n\t\t\t\t\tvar idx uint64\n\t\t\t\t\tEventually(indexes).Should(Receive(&idx))\n\t\t\t\t\tExpect(idx).To(BeEquivalentTo(2))\n\t\t\t\t\tExpect(data).To(Receive(Equal([]byte(\"some-data-3\"))))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tContext(\"file has not been created\", func() {\n\t\t\tIt(\"returns an error\", func() {\n\t\t\t\twriter, err := talariaClient.Write(context.Background())\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\t_, err = writer.CloseAndRecv()\n\t\t\t\tExpect(err).To(HaveOccurred())\n\t\t\t})\n\t\t})\n\t})\n})\n\nfunc createFileName() string {\n\treturn fmt.Sprintf(\"some-file-%d\", rand.Int63())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage save\n\nimport (\n\t\"testing\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/jacobsa\/comeback\/internal\/blob\"\n\t\"github.com\/jacobsa\/comeback\/internal\/blob\/mock\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\nfunc TestBlobStore(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype VisitorTest struct {\n\tctx context.Context\n\tblobStore blob.Store\n\n\tnode fsNode\n}\n\nfunc init() { RegisterTestSuite(&VisitorTest{}) }\n\nfunc (t *VisitorTest) SetUp(ti *TestInfo) {\n\tt.ctx = ti.Ctx\n\tt.blobStore = mock_blob.NewMockStore(ti.MockController, \"blobStore\")\n}\n\nfunc (t *VisitorTest) call() (err error) {\n\tvisitor := newVisitor(t.blobStore, make(chan *fsNode, 1))\n\terr = visitor.Visit(t.ctx, &t.node)\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *VisitorTest) ScoresAlreadyPresent_Empty() {\n\tscores := []blob.Score{}\n\tt.node.Scores = scores\n\n\terr := t.call()\n\tAssertEq(nil, err)\n\tExpectThat(t.node.Scores, DeepEquals(scores))\n}\n\nfunc (t *VisitorTest) ScoresAlreadyPresent_NonEmpty() {\n\tscores := []blob.Score{blob.ComputeScore([]byte(\"taco\"))}\n\tt.node.Scores = scores\n\n\terr := t.call()\n\tAssertEq(nil, err)\n\tExpectThat(t.node.Scores, DeepEquals(scores))\n}\n\nfunc (t *VisitorTest) Symlink() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *VisitorTest) Directory() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *VisitorTest) File_Empty() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *VisitorTest) File_LastChunkIsFull() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *VisitorTest) File_LastChunkIsPartial() {\n\tAssertTrue(false, \"TODO\")\n}\n<commit_msg>VisitorTest.Symlink<commit_after>\/\/ Copyright 2015 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage save\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/jacobsa\/comeback\/internal\/blob\"\n\t\"github.com\/jacobsa\/comeback\/internal\/blob\/mock\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\nfunc TestBlobStore(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype VisitorTest struct {\n\tctx context.Context\n\tblobStore blob.Store\n\n\tnode fsNode\n\n\t\/\/ A temporary directory removed at the end of the test.\n\tdir string\n}\n\nfunc init() { RegisterTestSuite(&VisitorTest{}) }\n\nvar _ SetUpInterface = &VisitorTest{}\nvar _ TearDownInterface = &VisitorTest{}\n\nfunc (t *VisitorTest) SetUp(ti *TestInfo) {\n\tvar err error\n\n\tt.ctx = ti.Ctx\n\tt.blobStore = mock_blob.NewMockStore(ti.MockController, \"blobStore\")\n\n\t\/\/ Set up the directory.\n\tt.dir, err = ioutil.TempDir(\"\", \"score_map_test\")\n\tAssertEq(nil, err)\n}\n\nfunc (t *VisitorTest) TearDown() {\n\tvar err error\n\n\terr = os.RemoveAll(t.dir)\n\tAssertEq(nil, err)\n}\n\nfunc (t *VisitorTest) call() (err error) {\n\tvisitor := newVisitor(t.blobStore, make(chan *fsNode, 1))\n\terr = visitor.Visit(t.ctx, &t.node)\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *VisitorTest) ScoresAlreadyPresent_Empty() {\n\tscores := []blob.Score{}\n\tt.node.Scores = scores\n\n\terr := t.call()\n\tAssertEq(nil, err)\n\tExpectThat(t.node.Scores, DeepEquals(scores))\n}\n\nfunc (t *VisitorTest) ScoresAlreadyPresent_NonEmpty() {\n\tscores := []blob.Score{blob.ComputeScore([]byte(\"taco\"))}\n\tt.node.Scores = scores\n\n\terr := t.call()\n\tAssertEq(nil, err)\n\tExpectThat(t.node.Scores, DeepEquals(scores))\n}\n\nfunc (t *VisitorTest) Symlink() {\n\tvar err error\n\n\t\/\/ Node setup\n\tp := path.Join(t.dir, \"foo\")\n\n\terr = os.Symlink(\"blah\", p)\n\tAssertEq(nil, err)\n\n\tt.node.Info, err = os.Lstat(p)\n\tAssertEq(nil, err)\n\tAssertEq(os.ModeSymlink, t.node.Info.Mode()&os.ModeType)\n\n\t\/\/ Call\n\terr = t.call()\n\tAssertEq(nil, err)\n\n\tExpectEq(nil, t.node.Scores)\n}\n\nfunc (t *VisitorTest) Directory() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *VisitorTest) File_Empty() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *VisitorTest) File_LastChunkIsFull() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *VisitorTest) File_LastChunkIsPartial() {\n\tAssertTrue(false, \"TODO\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage setters\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"sigs.k8s.io\/kustomize\/kyaml\/kio\"\n\t\"sigs.k8s.io\/kustomize\/kyaml\/setters\"\n)\n\nfunc PerformSetters(path string) error {\n\trw := &kio.LocalPackageReadWriter{\n\t\tPackagePath: path,\n\t\tKeepReaderAnnotations: false,\n\t\tIncludeSubpackages: true,\n\t}\n\n\t\/\/ auto-fill setters from the environment\n\tvar fltrs []kio.Filter\n\tfor i := range os.Environ() {\n\t\te := os.Environ()[i]\n\t\tif !strings.HasPrefix(e, \"KPT_SET_\") {\n\t\t\tcontinue\n\t\t}\n\t\tparts := strings.SplitN(e, \"=\", 2)\n\t\tif len(parts) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tk, v := strings.TrimPrefix(parts[0], \"KPT_SET_\"), parts[1]\n\t\tk = strings.ToLower(k)\n\t\tfltrs = append(fltrs, &setters.PerformSetters{Name: k, Value: v, SetBy: \"kpt\"})\n\t}\n\n\t\/\/ auto-fill setters from gcloud\n\tgcloudConfig := []string{\"compute.region\", \"compute.zone\", \"core.project\"}\n\tfor _, c := range gcloudConfig {\n\t\tgcloudCmd := exec.Command(\"gcloud\",\n\t\t\t\"config\", \"list\", \"--format\", fmt.Sprintf(\"value(%s)\", c))\n\t\tb, err := gcloudCmd.Output()\n\t\tif err != nil {\n\t\t\t\/\/ don't fail if gcloud fails -- it may not be installed or have this config property\n\t\t\tcontinue\n\t\t}\n\t\tv := strings.TrimSpace(string(b))\n\t\tif v == \"\" {\n\t\t\t\/\/ don't replace values that aren't set - stick with the defaults as defined in the manifest\n\t\t\tcontinue\n\t\t}\n\t\tfltrs = append(fltrs, &setters.PerformSetters{Name: fmt.Sprintf(\"gcloud.%s\", c), Value: v, SetBy: \"kpt\"})\n\t}\n\n\tif len(fltrs) == 0 {\n\t\treturn nil\n\t}\n\n\treturn kio.Pipeline{Inputs: []kio.Reader{rw}, Filters: fltrs, Outputs: []kio.Writer{rw}}.\n\t\tExecute()\n}\n<commit_msg>add an auto setter for gcloud.project.projectNumber<commit_after>\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage setters\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"sigs.k8s.io\/kustomize\/kyaml\/kio\"\n\t\"sigs.k8s.io\/kustomize\/kyaml\/setters\"\n)\n\nfunc PerformSetters(path string) error {\n\trw := &kio.LocalPackageReadWriter{\n\t\tPackagePath: path,\n\t\tKeepReaderAnnotations: false,\n\t\tIncludeSubpackages: true,\n\t}\n\n\t\/\/ auto-fill setters from the environment\n\tvar fltrs []kio.Filter\n\tfor i := range os.Environ() {\n\t\te := os.Environ()[i]\n\t\tif !strings.HasPrefix(e, \"KPT_SET_\") {\n\t\t\tcontinue\n\t\t}\n\t\tparts := strings.SplitN(e, \"=\", 2)\n\t\tif len(parts) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tk, v := strings.TrimPrefix(parts[0], \"KPT_SET_\"), parts[1]\n\t\tk = strings.ToLower(k)\n\t\tfltrs = append(fltrs, &setters.PerformSetters{Name: k, Value: v, SetBy: \"kpt\"})\n\t}\n\n\t\/\/ auto-fill setters from gcloud\n\tgcloudConfig := []string{\"compute.region\", \"compute.zone\", \"core.project\"}\n\tprojectID := \"\"\n\tfor _, c := range gcloudConfig {\n\t\tgcloudCmd := exec.Command(\"gcloud\",\n\t\t\t\"config\", \"list\", \"--format\", fmt.Sprintf(\"value(%s)\", c))\n\t\tb, err := gcloudCmd.Output()\n\t\tif err != nil {\n\t\t\t\/\/ don't fail if gcloud fails -- it may not be installed or have this config property\n\t\t\tcontinue\n\t\t}\n\t\tv := strings.TrimSpace(string(b))\n\t\tif v == \"\" {\n\t\t\t\/\/ don't replace values that aren't set - stick with the defaults as defined in the manifest\n\t\t\tcontinue\n\t\t}\n\t\tif c == \"core.project\" {\n\t\t\tprojectID = v\n\t\t}\n\t\tfltrs = append(fltrs, &setters.PerformSetters{Name: fmt.Sprintf(\"gcloud.%s\", c), Value: v, SetBy: \"kpt\"})\n\t}\n\n\tif projectID != \"\" {\n\t\tprojectNumber, err := GetProjectNumberFromProjectID(projectID)\n\t\tif err == nil && projectNumber != \"\" {\n\t\t\tfltrs = append(fltrs, &setters.PerformSetters{Name: \"gcloud.project.projectNumber\", Value: projectNumber, SetBy: \"kpt\"})\n\t\t}\n\t}\n\n\tif len(fltrs) == 0 {\n\t\treturn nil\n\t}\n\n\treturn kio.Pipeline{Inputs: []kio.Reader{rw}, Filters: fltrs, Outputs: []kio.Writer{rw}}.\n\t\tExecute()\n}\n\nfunc GetProjectNumberFromProjectID(projectID string) (string, error) {\n\tgcloudCmd := exec.Command(\"gcloud\",\n\t\t\"projects\", \"describe\", projectID, \"--format\", \"value(projectNumber)\")\n\tb, err := gcloudCmd.Output()\n\tif err != nil {\n\t\tfmt.Printf(\"err: %v\\n\", err)\n\t\treturn \"\", err\n\t}\n\tfmt.Println(strings.TrimSpace(string(b)))\n\treturn strings.TrimSpace(string(b)), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package lzw implements the Lempel-Ziv-Welch compressed data format,\n\/\/ described in T. A. Welch, ``A Technique for High-Performance Data\n\/\/ Compression'', Computer, 17(6) (June 1984), pp 8-19.\n\/\/\n\/\/ In particular, it implements LZW as used by the GIF, TIFF and PDF file\n\/\/ formats, which means variable-width codes up to 12 bits and the first\n\/\/ two non-literal codes are a clear code and an EOF code.\npackage lzw\n\n\/\/ TODO(nigeltao): check that TIFF and PDF use LZW in the same way as GIF,\n\/\/ modulo LSB\/MSB packing order.\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\n\/\/ Order specifies the bit ordering in an LZW data stream.\ntype Order int\n\nconst (\n\t\/\/ LSB means Least Significant Bits first, as used in the GIF file format.\n\tLSB Order = iota\n\t\/\/ MSB means Most Significant Bits first, as used in the TIFF and PDF\n\t\/\/ file formats.\n\tMSB\n)\n\n\/\/ decoder is the state from which the readXxx method converts a byte\n\/\/ stream into a code stream.\ntype decoder struct {\n\tr io.ByteReader\n\tbits uint32\n\tnBits uint\n\twidth uint\n}\n\n\/\/ readLSB returns the next code for \"Least Significant Bits first\" data.\nfunc (d *decoder) readLSB() (uint16, os.Error) {\n\tfor d.nBits < d.width {\n\t\tx, err := d.r.ReadByte()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\td.bits |= uint32(x) << d.nBits\n\t\td.nBits += 8\n\t}\n\tcode := uint16(d.bits & (1<<d.width - 1))\n\td.bits >>= d.width\n\td.nBits -= d.width\n\treturn code, nil\n}\n\n\/\/ readMSB returns the next code for \"Most Significant Bits first\" data.\nfunc (d *decoder) readMSB() (uint16, os.Error) {\n\tfor d.nBits < d.width {\n\t\tx, err := d.r.ReadByte()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\td.bits |= uint32(x) << (24 - d.nBits)\n\t\td.nBits += 8\n\t}\n\tcode := uint16(d.bits >> (32 - d.width))\n\td.bits <<= d.width\n\td.nBits -= d.width\n\treturn code, nil\n}\n\n\/\/ decode decompresses bytes from r and writes them to pw.\n\/\/ read specifies how to decode bytes into codes.\n\/\/ litWidth is the width in bits of literal codes.\nfunc decode(r io.Reader, read func(*decoder) (uint16, os.Error), litWidth int, pw *io.PipeWriter) {\n\tbr, ok := r.(io.ByteReader)\n\tif !ok {\n\t\tbr = bufio.NewReader(r)\n\t}\n\tpw.CloseWithError(decode1(pw, br, read, uint(litWidth)))\n}\n\nfunc decode1(pw *io.PipeWriter, r io.ByteReader, read func(*decoder) (uint16, os.Error), litWidth uint) os.Error {\n\tconst (\n\t\tmaxWidth = 12\n\t\tinvalidCode = 0xffff\n\t)\n\td := decoder{r, 0, 0, 1 + litWidth}\n\tw := bufio.NewWriter(pw)\n\t\/\/ The first 1<<litWidth codes are literal codes.\n\t\/\/ The next two codes mean clear and EOF.\n\t\/\/ Other valid codes are in the range [lo, hi] where lo := clear + 2,\n\t\/\/ with the upper bound incrementing on each code seen.\n\tclear := uint16(1) << litWidth\n\teof, hi := clear+1, clear+1\n\t\/\/ overflow is the code at which hi overflows the code width.\n\toverflow := uint16(1) << d.width\n\tvar (\n\t\t\/\/ Each code c in [lo, hi] expands to two or more bytes. For c != hi:\n\t\t\/\/ suffix[c] is the last of these bytes.\n\t\t\/\/ prefix[c] is the code for all but the last byte.\n\t\t\/\/ This code can either be a literal code or another code in [lo, c).\n\t\t\/\/ The c == hi case is a special case.\n\t\tsuffix [1 << maxWidth]uint8\n\t\tprefix [1 << maxWidth]uint16\n\t\t\/\/ buf is a scratch buffer for reconstituting the bytes that a code expands to.\n\t\t\/\/ Code suffixes are written right-to-left from the end of the buffer.\n\t\tbuf [1 << maxWidth]byte\n\t)\n\n\t\/\/ Loop over the code stream, converting codes into decompressed bytes.\n\tlast := uint16(invalidCode)\n\tfor {\n\t\tcode, err := read(&d)\n\t\tif err != nil {\n\t\t\tif err == os.EOF {\n\t\t\t\terr = io.ErrUnexpectedEOF\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tswitch {\n\t\tcase code < clear:\n\t\t\t\/\/ We have a literal code.\n\t\t\tif err := w.WriteByte(uint8(code)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif last != invalidCode {\n\t\t\t\t\/\/ Save what the hi code expands to.\n\t\t\t\tsuffix[hi] = uint8(code)\n\t\t\t\tprefix[hi] = last\n\t\t\t}\n\t\tcase code == clear:\n\t\t\td.width = 1 + litWidth\n\t\t\thi = eof\n\t\t\toverflow = 1 << d.width\n\t\t\tlast = invalidCode\n\t\t\tcontinue\n\t\tcase code == eof:\n\t\t\treturn w.Flush()\n\t\tcase code <= hi:\n\t\t\tc, i := code, len(buf)-1\n\t\t\tif code == hi {\n\t\t\t\t\/\/ code == hi is a special case which expands to the last expansion\n\t\t\t\t\/\/ followed by the head of the last expansion. To find the head, we walk\n\t\t\t\t\/\/ the prefix chain until we find a literal code.\n\t\t\t\tc = last\n\t\t\t\tfor c >= clear {\n\t\t\t\t\tc = prefix[c]\n\t\t\t\t}\n\t\t\t\tbuf[i] = uint8(c)\n\t\t\t\ti--\n\t\t\t\tc = last\n\t\t\t}\n\t\t\t\/\/ Copy the suffix chain into buf and then write that to w.\n\t\t\tfor c >= clear {\n\t\t\t\tbuf[i] = suffix[c]\n\t\t\t\ti--\n\t\t\t\tc = prefix[c]\n\t\t\t}\n\t\t\tbuf[i] = uint8(c)\n\t\t\tif _, err := w.Write(buf[i:]); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t\/\/ Save what the hi code expands to.\n\t\t\tsuffix[hi] = uint8(c)\n\t\t\tprefix[hi] = last\n\t\tdefault:\n\t\t\treturn os.NewError(\"lzw: invalid code\")\n\t\t}\n\t\tlast, hi = code, hi+1\n\t\tif hi == overflow {\n\t\t\tif d.width == maxWidth {\n\t\t\t\treturn os.NewError(\"lzw: missing clear code\")\n\t\t\t}\n\t\t\td.width++\n\t\t\toverflow <<= 1\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}\n\n\/\/ NewReader creates a new io.ReadCloser that satisfies reads by decompressing\n\/\/ the data read from r.\n\/\/ It is the caller's responsibility to call Close on the ReadCloser when\n\/\/ finished reading.\n\/\/ The number of bits to use for literal codes, litWidth, must be in the\n\/\/ range [2,8] and is typically 8.\nfunc NewReader(r io.Reader, order Order, litWidth int) io.ReadCloser {\n\tpr, pw := io.Pipe()\n\tvar read func(*decoder) (uint16, os.Error)\n\tswitch order {\n\tcase LSB:\n\t\tread = (*decoder).readLSB\n\tcase MSB:\n\t\tread = (*decoder).readMSB\n\tdefault:\n\t\tpw.CloseWithError(os.NewError(\"lzw: unknown order\"))\n\t\treturn pr\n\t}\n\tif litWidth < 2 || 8 < litWidth {\n\t\tpw.CloseWithError(fmt.Errorf(\"lzw: litWidth %d out of range\", litWidth))\n\t\treturn pr\n\t}\n\tgo decode(r, read, litWidth, pw)\n\treturn pr\n}\n<commit_msg>compress\/lzw: silently drop implied codes that are too large, instead of returning an error.<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package lzw implements the Lempel-Ziv-Welch compressed data format,\n\/\/ described in T. A. Welch, ``A Technique for High-Performance Data\n\/\/ Compression'', Computer, 17(6) (June 1984), pp 8-19.\n\/\/\n\/\/ In particular, it implements LZW as used by the GIF, TIFF and PDF file\n\/\/ formats, which means variable-width codes up to 12 bits and the first\n\/\/ two non-literal codes are a clear code and an EOF code.\npackage lzw\n\n\/\/ TODO(nigeltao): check that TIFF and PDF use LZW in the same way as GIF,\n\/\/ modulo LSB\/MSB packing order.\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\n\/\/ Order specifies the bit ordering in an LZW data stream.\ntype Order int\n\nconst (\n\t\/\/ LSB means Least Significant Bits first, as used in the GIF file format.\n\tLSB Order = iota\n\t\/\/ MSB means Most Significant Bits first, as used in the TIFF and PDF\n\t\/\/ file formats.\n\tMSB\n)\n\n\/\/ decoder is the state from which the readXxx method converts a byte\n\/\/ stream into a code stream.\ntype decoder struct {\n\tr io.ByteReader\n\tbits uint32\n\tnBits uint\n\twidth uint\n}\n\n\/\/ readLSB returns the next code for \"Least Significant Bits first\" data.\nfunc (d *decoder) readLSB() (uint16, os.Error) {\n\tfor d.nBits < d.width {\n\t\tx, err := d.r.ReadByte()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\td.bits |= uint32(x) << d.nBits\n\t\td.nBits += 8\n\t}\n\tcode := uint16(d.bits & (1<<d.width - 1))\n\td.bits >>= d.width\n\td.nBits -= d.width\n\treturn code, nil\n}\n\n\/\/ readMSB returns the next code for \"Most Significant Bits first\" data.\nfunc (d *decoder) readMSB() (uint16, os.Error) {\n\tfor d.nBits < d.width {\n\t\tx, err := d.r.ReadByte()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\td.bits |= uint32(x) << (24 - d.nBits)\n\t\td.nBits += 8\n\t}\n\tcode := uint16(d.bits >> (32 - d.width))\n\td.bits <<= d.width\n\td.nBits -= d.width\n\treturn code, nil\n}\n\n\/\/ decode decompresses bytes from r and writes them to pw.\n\/\/ read specifies how to decode bytes into codes.\n\/\/ litWidth is the width in bits of literal codes.\nfunc decode(r io.Reader, read func(*decoder) (uint16, os.Error), litWidth int, pw *io.PipeWriter) {\n\tbr, ok := r.(io.ByteReader)\n\tif !ok {\n\t\tbr = bufio.NewReader(r)\n\t}\n\tpw.CloseWithError(decode1(pw, br, read, uint(litWidth)))\n}\n\nfunc decode1(pw *io.PipeWriter, r io.ByteReader, read func(*decoder) (uint16, os.Error), litWidth uint) os.Error {\n\tconst (\n\t\tmaxWidth = 12\n\t\tinvalidCode = 0xffff\n\t)\n\td := decoder{r, 0, 0, 1 + litWidth}\n\tw := bufio.NewWriter(pw)\n\t\/\/ The first 1<<litWidth codes are literal codes.\n\t\/\/ The next two codes mean clear and EOF.\n\t\/\/ Other valid codes are in the range [lo, hi] where lo := clear + 2,\n\t\/\/ with the upper bound incrementing on each code seen.\n\tclear := uint16(1) << litWidth\n\teof, hi := clear+1, clear+1\n\t\/\/ overflow is the code at which hi overflows the code width.\n\toverflow := uint16(1) << d.width\n\tvar (\n\t\t\/\/ Each code c in [lo, hi] expands to two or more bytes. For c != hi:\n\t\t\/\/ suffix[c] is the last of these bytes.\n\t\t\/\/ prefix[c] is the code for all but the last byte.\n\t\t\/\/ This code can either be a literal code or another code in [lo, c).\n\t\t\/\/ The c == hi case is a special case.\n\t\tsuffix [1 << maxWidth]uint8\n\t\tprefix [1 << maxWidth]uint16\n\t\t\/\/ buf is a scratch buffer for reconstituting the bytes that a code expands to.\n\t\t\/\/ Code suffixes are written right-to-left from the end of the buffer.\n\t\tbuf [1 << maxWidth]byte\n\t)\n\n\t\/\/ Loop over the code stream, converting codes into decompressed bytes.\n\tlast := uint16(invalidCode)\n\tfor {\n\t\tcode, err := read(&d)\n\t\tif err != nil {\n\t\t\tif err == os.EOF {\n\t\t\t\terr = io.ErrUnexpectedEOF\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tswitch {\n\t\tcase code < clear:\n\t\t\t\/\/ We have a literal code.\n\t\t\tif err := w.WriteByte(uint8(code)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif last != invalidCode {\n\t\t\t\t\/\/ Save what the hi code expands to.\n\t\t\t\tsuffix[hi] = uint8(code)\n\t\t\t\tprefix[hi] = last\n\t\t\t}\n\t\tcase code == clear:\n\t\t\td.width = 1 + litWidth\n\t\t\thi = eof\n\t\t\toverflow = 1 << d.width\n\t\t\tlast = invalidCode\n\t\t\tcontinue\n\t\tcase code == eof:\n\t\t\treturn w.Flush()\n\t\tcase code <= hi:\n\t\t\tc, i := code, len(buf)-1\n\t\t\tif code == hi {\n\t\t\t\t\/\/ code == hi is a special case which expands to the last expansion\n\t\t\t\t\/\/ followed by the head of the last expansion. To find the head, we walk\n\t\t\t\t\/\/ the prefix chain until we find a literal code.\n\t\t\t\tc = last\n\t\t\t\tfor c >= clear {\n\t\t\t\t\tc = prefix[c]\n\t\t\t\t}\n\t\t\t\tbuf[i] = uint8(c)\n\t\t\t\ti--\n\t\t\t\tc = last\n\t\t\t}\n\t\t\t\/\/ Copy the suffix chain into buf and then write that to w.\n\t\t\tfor c >= clear {\n\t\t\t\tbuf[i] = suffix[c]\n\t\t\t\ti--\n\t\t\t\tc = prefix[c]\n\t\t\t}\n\t\t\tbuf[i] = uint8(c)\n\t\t\tif _, err := w.Write(buf[i:]); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif last != invalidCode {\n\t\t\t\t\/\/ Save what the hi code expands to.\n\t\t\t\tsuffix[hi] = uint8(c)\n\t\t\t\tprefix[hi] = last\n\t\t\t}\n\t\tdefault:\n\t\t\treturn os.NewError(\"lzw: invalid code\")\n\t\t}\n\t\tlast, hi = code, hi+1\n\t\tif hi >= overflow {\n\t\t\tif d.width == maxWidth {\n\t\t\t\tlast = invalidCode\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\td.width++\n\t\t\toverflow <<= 1\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}\n\n\/\/ NewReader creates a new io.ReadCloser that satisfies reads by decompressing\n\/\/ the data read from r.\n\/\/ It is the caller's responsibility to call Close on the ReadCloser when\n\/\/ finished reading.\n\/\/ The number of bits to use for literal codes, litWidth, must be in the\n\/\/ range [2,8] and is typically 8.\nfunc NewReader(r io.Reader, order Order, litWidth int) io.ReadCloser {\n\tpr, pw := io.Pipe()\n\tvar read func(*decoder) (uint16, os.Error)\n\tswitch order {\n\tcase LSB:\n\t\tread = (*decoder).readLSB\n\tcase MSB:\n\t\tread = (*decoder).readMSB\n\tdefault:\n\t\tpw.CloseWithError(os.NewError(\"lzw: unknown order\"))\n\t\treturn pr\n\t}\n\tif litWidth < 2 || 8 < litWidth {\n\t\tpw.CloseWithError(fmt.Errorf(\"lzw: litWidth %d out of range\", litWidth))\n\t\treturn pr\n\t}\n\tgo decode(r, read, litWidth, pw)\n\treturn pr\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Tools to convert a TomlTree to different representations\npackage toml\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ encodes a string to a TOML-compliant string value\nfunc encodeTomlString(value string) string {\n\tresult := \"\"\n\tfor _, rr := range value {\n\t\tintRr := uint16(rr)\n\t\tswitch rr {\n\t\tcase '\\b':\n\t\t\tresult += \"\\\\b\"\n\t\tcase '\\t':\n\t\t\tresult += \"\\\\t\"\n\t\tcase '\\n':\n\t\t\tresult += \"\\\\n\"\n\t\tcase '\\f':\n\t\t\tresult += \"\\\\f\"\n\t\tcase '\\r':\n\t\t\tresult += \"\\\\r\"\n\t\tcase '\"':\n\t\t\tresult += \"\\\\\\\"\"\n\t\tcase '\\\\':\n\t\t\tresult += \"\\\\\\\\\"\n\t\tdefault:\n\t\t\tif intRr < 0x001F {\n\t\t\t\tresult += fmt.Sprintf(\"\\\\u%0.4X\", intRr)\n\t\t\t} else {\n\t\t\t\tresult += string(rr)\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}\n\n\/\/ Value print support function for ToString()\n\/\/ Outputs the TOML compliant string representation of a value\nfunc toTomlValue(item interface{}, indent int) string {\n\ttab := strings.Repeat(\" \", indent)\n\tswitch value := item.(type) {\n\tcase int64:\n\t\treturn tab + strconv.FormatInt(value, 10)\n\tcase float64:\n\t\treturn tab + strconv.FormatFloat(value, 'f', -1, 64)\n\tcase string:\n\t\treturn tab + \"\\\"\" + encodeTomlString(value) + \"\\\"\"\n\tcase bool:\n\t\tif value {\n\t\t\treturn \"true\"\n\t\t}\n\t\treturn \"false\"\n\tcase time.Time:\n\t\treturn tab + value.Format(time.RFC3339)\n\tcase []interface{}:\n\t\tresult := tab + \"[\\n\"\n\t\tfor _, item := range value {\n\t\t\tresult += toTomlValue(item, indent+2) + \",\\n\"\n\t\t}\n\t\treturn result + tab + \"]\"\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported value type: %v\", value))\n\t}\n}\n\n\/\/ Recursive support function for ToString()\n\/\/ Outputs a tree, using the provided keyspace to prefix group names\nfunc (t *TomlTree) toToml(indent, keyspace string) string {\n\tresult := \"\"\n\tfor k, v := range t.values {\n\t\t\/\/ figure out the keyspace\n\t\tcombinedKey := k\n\t\tif keyspace != \"\" {\n\t\t\tcombinedKey = keyspace + \".\" + combinedKey\n\t\t}\n\t\t\/\/ output based on type\n\t\tswitch node := v.(type) {\n\t\tcase []*TomlTree:\n\t\t\tfor _, item := range node {\n\t\t\t\tif len(item.Keys()) > 0 {\n\t\t\t\t\tresult += fmt.Sprintf(\"\\n%s[[%s]]\\n\", indent, combinedKey)\n\t\t\t\t}\n\t\t\t\tresult += item.toToml(indent+\" \", combinedKey)\n\t\t\t}\n\t\tcase *TomlTree:\n\t\t\tif len(node.Keys()) > 0 {\n\t\t\t\tresult += fmt.Sprintf(\"\\n%s[%s]\\n\", indent, combinedKey)\n\t\t\t}\n\t\t\tresult += node.toToml(indent+\" \", combinedKey)\n\t\tcase map[string]interface{}:\n\t\t\tsub := TreeFromMap(node)\n\n\t\t\tif len(sub.Keys()) > 0 {\n\t\t\t\tresult += fmt.Sprintf(\"\\n%s[%s]\\n\", indent, combinedKey)\n\t\t\t}\n\t\t\tresult += sub.toToml(indent+\" \", combinedKey)\n\t\tcase *tomlValue:\n\t\t\tresult += fmt.Sprintf(\"%s%s = %s\\n\", indent, k, toTomlValue(node.value, 0))\n\t\tdefault:\n\t\t\tresult += fmt.Sprintf(\"%s%s = %s\\n\", indent, k, toTomlValue(v, 0))\n\t\t}\n\t}\n\treturn result\n}\n\n\/\/ ToString generates a human-readable representation of the current tree.\n\/\/ Output spans multiple lines, and is suitable for ingest by a TOML parser\nfunc (t *TomlTree) ToString() string {\n\treturn t.toToml(\"\", \"\")\n}\n\n\/\/ ToMap recursively generates a representation of the current tree using map[string]interface{}.\nfunc (t *TomlTree) ToMap() map[string]interface{} {\n\tresult := map[string]interface{}{}\n\n\tfor k, v := range t.values {\n\t\tswitch node := v.(type) {\n\t\tcase []*TomlTree:\n\t\t\tresult[k] = make([]interface{}, 0)\n\t\t\tfor _, item := range node {\n\t\t\t\tresult[k] = item.ToMap()\n\t\t\t}\n\t\tcase *TomlTree:\n\t\t\tresult[k] = node.ToMap()\n\t\tcase map[string]interface{}:\n\t\t\tsub := TreeFromMap(node)\n\t\t\tresult[k] = sub.ToMap()\n\t\tcase *tomlValue:\n\t\t\tresult[k] = node.value\n\t\t}\n\t}\n\n\treturn result\n}\n<commit_msg>Implement fmt.Stringer and alias ToString (#73)<commit_after>\/\/ Tools to convert a TomlTree to different representations\npackage toml\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ encodes a string to a TOML-compliant string value\nfunc encodeTomlString(value string) string {\n\tresult := \"\"\n\tfor _, rr := range value {\n\t\tintRr := uint16(rr)\n\t\tswitch rr {\n\t\tcase '\\b':\n\t\t\tresult += \"\\\\b\"\n\t\tcase '\\t':\n\t\t\tresult += \"\\\\t\"\n\t\tcase '\\n':\n\t\t\tresult += \"\\\\n\"\n\t\tcase '\\f':\n\t\t\tresult += \"\\\\f\"\n\t\tcase '\\r':\n\t\t\tresult += \"\\\\r\"\n\t\tcase '\"':\n\t\t\tresult += \"\\\\\\\"\"\n\t\tcase '\\\\':\n\t\t\tresult += \"\\\\\\\\\"\n\t\tdefault:\n\t\t\tif intRr < 0x001F {\n\t\t\t\tresult += fmt.Sprintf(\"\\\\u%0.4X\", intRr)\n\t\t\t} else {\n\t\t\t\tresult += string(rr)\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}\n\n\/\/ Value print support function for ToString()\n\/\/ Outputs the TOML compliant string representation of a value\nfunc toTomlValue(item interface{}, indent int) string {\n\ttab := strings.Repeat(\" \", indent)\n\tswitch value := item.(type) {\n\tcase int64:\n\t\treturn tab + strconv.FormatInt(value, 10)\n\tcase float64:\n\t\treturn tab + strconv.FormatFloat(value, 'f', -1, 64)\n\tcase string:\n\t\treturn tab + \"\\\"\" + encodeTomlString(value) + \"\\\"\"\n\tcase bool:\n\t\tif value {\n\t\t\treturn \"true\"\n\t\t}\n\t\treturn \"false\"\n\tcase time.Time:\n\t\treturn tab + value.Format(time.RFC3339)\n\tcase []interface{}:\n\t\tresult := tab + \"[\\n\"\n\t\tfor _, item := range value {\n\t\t\tresult += toTomlValue(item, indent+2) + \",\\n\"\n\t\t}\n\t\treturn result + tab + \"]\"\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported value type: %v\", value))\n\t}\n}\n\n\/\/ Recursive support function for ToString()\n\/\/ Outputs a tree, using the provided keyspace to prefix group names\nfunc (t *TomlTree) toToml(indent, keyspace string) string {\n\tresult := \"\"\n\tfor k, v := range t.values {\n\t\t\/\/ figure out the keyspace\n\t\tcombinedKey := k\n\t\tif keyspace != \"\" {\n\t\t\tcombinedKey = keyspace + \".\" + combinedKey\n\t\t}\n\t\t\/\/ output based on type\n\t\tswitch node := v.(type) {\n\t\tcase []*TomlTree:\n\t\t\tfor _, item := range node {\n\t\t\t\tif len(item.Keys()) > 0 {\n\t\t\t\t\tresult += fmt.Sprintf(\"\\n%s[[%s]]\\n\", indent, combinedKey)\n\t\t\t\t}\n\t\t\t\tresult += item.toToml(indent+\" \", combinedKey)\n\t\t\t}\n\t\tcase *TomlTree:\n\t\t\tif len(node.Keys()) > 0 {\n\t\t\t\tresult += fmt.Sprintf(\"\\n%s[%s]\\n\", indent, combinedKey)\n\t\t\t}\n\t\t\tresult += node.toToml(indent+\" \", combinedKey)\n\t\tcase map[string]interface{}:\n\t\t\tsub := TreeFromMap(node)\n\n\t\t\tif len(sub.Keys()) > 0 {\n\t\t\t\tresult += fmt.Sprintf(\"\\n%s[%s]\\n\", indent, combinedKey)\n\t\t\t}\n\t\t\tresult += sub.toToml(indent+\" \", combinedKey)\n\t\tcase *tomlValue:\n\t\t\tresult += fmt.Sprintf(\"%s%s = %s\\n\", indent, k, toTomlValue(node.value, 0))\n\t\tdefault:\n\t\t\tresult += fmt.Sprintf(\"%s%s = %s\\n\", indent, k, toTomlValue(v, 0))\n\t\t}\n\t}\n\treturn result\n}\n\n\/\/ ToString is an alias for String\nfunc (t *TomlTree) ToString() string {\n\treturn t.String()\n}\n\n\/\/ ToString generates a human-readable representation of the current tree.\n\/\/ Output spans multiple lines, and is suitable for ingest by a TOML parser\nfunc (t *TomlTree) String() string {\n\treturn t.toToml(\"\", \"\")\n}\n\n\/\/ ToMap recursively generates a representation of the current tree using map[string]interface{}.\nfunc (t *TomlTree) ToMap() map[string]interface{} {\n\tresult := map[string]interface{}{}\n\n\tfor k, v := range t.values {\n\t\tswitch node := v.(type) {\n\t\tcase []*TomlTree:\n\t\t\tresult[k] = make([]interface{}, 0)\n\t\t\tfor _, item := range node {\n\t\t\t\tresult[k] = item.ToMap()\n\t\t\t}\n\t\tcase *TomlTree:\n\t\t\tresult[k] = node.ToMap()\n\t\tcase map[string]interface{}:\n\t\t\tsub := TreeFromMap(node)\n\t\t\tresult[k] = sub.ToMap()\n\t\tcase *tomlValue:\n\t\t\tresult[k] = node.value\n\t\t}\n\t}\n\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/ Author: jacobsa@google.com (Aaron Jacobs)\n\npackage fuseutil\n\nimport (\n\t\"errors\"\n\t\"log\"\n\n\t\"bazil.org\/fuse\"\n\tfusefs \"bazil.org\/fuse\/fs\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ A struct representing the status of a mount operation, with methods for\n\/\/ waiting on the mount to complete, waiting for unmounting, and causing\n\/\/ unmounting.\ntype MountedFileSystem struct {\n\tdir string\n\n\t\/\/ The result to return from WaitForReady. Not valid until the channel is\n\t\/\/ closed.\n\treadyStatus error\n\treadyStatusAvailable chan struct{}\n\n\t\/\/ The result to return from Join. Not valid until the channel is closed.\n\tjoinStatus error\n\tjoinStatusAvailable chan struct{}\n}\n\n\/\/ Wait until the mount point is ready to be used. After a successful return\n\/\/ from this function, the contents of the mounted file system should be\n\/\/ visible in the directory supplied to NewMountPoint. May be called multiple\n\/\/ times.\nfunc (mfs *MountedFileSystem) WaitForReady(ctx context.Context) error {\n\tselect {\n\tcase <-mfs.readyStatusAvailable:\n\t\treturn mfs.readyStatus\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}\n\n\/\/ Block until a mounted file system has been unmounted. The return value will\n\/\/ be non-nil if anything unexpected happened while serving. May be called\n\/\/ multiple times. Must not be called unless WaitForReady has returned nil.\nfunc (mfs *MountedFileSystem) Join(ctx context.Context) error {\n\tselect {\n\tcase <-mfs.joinStatusAvailable:\n\t\treturn mfs.joinStatus\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}\n\n\/\/ Attempt to unmount the file system. Use Join to wait for it to actually be\n\/\/ unmounted. You must first call WaitForReady to ensure there is no race with\n\/\/ mounting.\nfunc (mfs *MountedFileSystem) Unmount() error {\n\treturn fuse.Unmount(mfs.dir)\n}\n\n\/\/ Runs in the background.\nfunc (mfs *MountedFileSystem) mountAndServe(\n\tfs fusefs.FS,\n\toptions []fuse.MountOption) {\n\t\/\/ Open a FUSE connection.\n\tlog.Println(\"Opening a FUSE connection.\")\n\tc, err := fuse.Mount(mfs.dir, options...)\n\tif err != nil {\n\t\tmfs.readyStatus = errors.New(\"fuse.Mount: \" + err.Error())\n\t\tclose(mfs.readyStatusAvailable)\n\t\treturn\n\t}\n\n\tdefer c.Close()\n\n\t\/\/ Start a goroutine that will notify the MountedFileSystem object when the\n\t\/\/ connection says it is ready (or it fails to become ready).\n\tgo func() {\n\t\t<-c.Ready\n\t\tmfs.readyStatus = c.MountError\n\t\tclose(mfs.readyStatusAvailable)\n\t}()\n\n\t\/\/ Serve the connection using the file system object.\n\tif err := fusefs.Serve(c, fs); err != nil {\n\t\tmfs.joinStatus = errors.New(\"fusefs.Serve: \" + err.Error())\n\t\tclose(mfs.joinStatusAvailable)\n\t\treturn\n\t}\n\n\t\/\/ Signal that everything is okay.\n\tclose(mfs.joinStatusAvailable)\n}\n\n\/\/ Attempt to mount the supplied file system on the given directory.\n\/\/ mfs.WaitForReady() must be called to find out whether the mount was\n\/\/ successful.\nfunc MountFileSystem(\n\tdir string,\n\tfs fusefs.FS,\n\toptions ...fuse.MountOption) (mfs *MountedFileSystem) {\n\t\/\/ Initialize the struct.\n\tmfs = &MountedFileSystem{\n\t\tdir: dir,\n\t\treadyStatusAvailable: make(chan struct{}),\n\t}\n\n\t\/\/ Mount in the background.\n\tgo mfs.mountAndServe(fs, options)\n\n\treturn mfs\n}\n<commit_msg>Fixed a nil channel bug.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/ Author: jacobsa@google.com (Aaron Jacobs)\n\npackage fuseutil\n\nimport (\n\t\"errors\"\n\t\"log\"\n\n\t\"bazil.org\/fuse\"\n\tfusefs \"bazil.org\/fuse\/fs\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ A struct representing the status of a mount operation, with methods for\n\/\/ waiting on the mount to complete, waiting for unmounting, and causing\n\/\/ unmounting.\ntype MountedFileSystem struct {\n\tdir string\n\n\t\/\/ The result to return from WaitForReady. Not valid until the channel is\n\t\/\/ closed.\n\treadyStatus error\n\treadyStatusAvailable chan struct{}\n\n\t\/\/ The result to return from Join. Not valid until the channel is closed.\n\tjoinStatus error\n\tjoinStatusAvailable chan struct{}\n}\n\n\/\/ Wait until the mount point is ready to be used. After a successful return\n\/\/ from this function, the contents of the mounted file system should be\n\/\/ visible in the directory supplied to NewMountPoint. May be called multiple\n\/\/ times.\nfunc (mfs *MountedFileSystem) WaitForReady(ctx context.Context) error {\n\tselect {\n\tcase <-mfs.readyStatusAvailable:\n\t\treturn mfs.readyStatus\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}\n\n\/\/ Block until a mounted file system has been unmounted. The return value will\n\/\/ be non-nil if anything unexpected happened while serving. May be called\n\/\/ multiple times. Must not be called unless WaitForReady has returned nil.\nfunc (mfs *MountedFileSystem) Join(ctx context.Context) error {\n\tselect {\n\tcase <-mfs.joinStatusAvailable:\n\t\treturn mfs.joinStatus\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}\n\n\/\/ Attempt to unmount the file system. Use Join to wait for it to actually be\n\/\/ unmounted. You must first call WaitForReady to ensure there is no race with\n\/\/ mounting.\nfunc (mfs *MountedFileSystem) Unmount() error {\n\treturn fuse.Unmount(mfs.dir)\n}\n\n\/\/ Runs in the background.\nfunc (mfs *MountedFileSystem) mountAndServe(\n\tfs fusefs.FS,\n\toptions []fuse.MountOption) {\n\t\/\/ Open a FUSE connection.\n\tlog.Println(\"Opening a FUSE connection.\")\n\tc, err := fuse.Mount(mfs.dir, options...)\n\tif err != nil {\n\t\tmfs.readyStatus = errors.New(\"fuse.Mount: \" + err.Error())\n\t\tclose(mfs.readyStatusAvailable)\n\t\treturn\n\t}\n\n\tdefer c.Close()\n\n\t\/\/ Start a goroutine that will notify the MountedFileSystem object when the\n\t\/\/ connection says it is ready (or it fails to become ready).\n\tgo func() {\n\t\t<-c.Ready\n\t\tmfs.readyStatus = c.MountError\n\t\tclose(mfs.readyStatusAvailable)\n\t}()\n\n\t\/\/ Serve the connection using the file system object.\n\tif err := fusefs.Serve(c, fs); err != nil {\n\t\tmfs.joinStatus = errors.New(\"fusefs.Serve: \" + err.Error())\n\t\tclose(mfs.joinStatusAvailable)\n\t\treturn\n\t}\n\n\t\/\/ Signal that everything is okay.\n\tclose(mfs.joinStatusAvailable)\n}\n\n\/\/ Attempt to mount the supplied file system on the given directory.\n\/\/ mfs.WaitForReady() must be called to find out whether the mount was\n\/\/ successful.\nfunc MountFileSystem(\n\tdir string,\n\tfs fusefs.FS,\n\toptions ...fuse.MountOption) (mfs *MountedFileSystem) {\n\t\/\/ Initialize the struct.\n\tmfs = &MountedFileSystem{\n\t\tdir: dir,\n\t\treadyStatusAvailable: make(chan struct{}),\n\t\tjoinStatusAvailable: make(chan struct{}),\n\t}\n\n\t\/\/ Mount in the background.\n\tgo mfs.mountAndServe(fs, options)\n\n\treturn mfs\n}\n<|endoftext|>"} {"text":"<commit_before>package gridserver\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"image\"\n\t\"image\/color\"\n\t\"image\/png\"\n\t\"strings\"\n\n\t\"github.com\/golang\/freetype\"\n\t\"github.com\/golang\/freetype\/truetype\"\n\tlog \"github.com\/golang\/glog\"\n\tgeojson \"github.com\/paulmach\/go.geojson\"\n\t\"golang.org\/x\/image\/font\/gofont\/goregular\"\n)\n\nconst (\n\ttileSize = 256 \/\/ size of tiles in pixels. Accessed by other functions in this package.\n\tfontSize = 80\n)\n\nvar (\n\timageTileFont *truetype.Font\n\n\twhite = color.RGBA{255, 255, 255, 255}\n\tblack = color.RGBA{0, 0, 0, 255}\n\tgrey = color.RGBA{0, 0, 0, 128}\n\tlineColor = black\n\tlabelColor = grey\n)\n\n\/\/ Image returns the tile as a 256x256 pixel PNG image.\nfunc (t *TileRef) Image() ([]byte, error) {\n\tlog.Infof(\"Producing image for tile z\/x\/y %v\/%v\/%v (%s)\", t.Z, t.X, t.Y, t.Path())\n\tgj, err := t.GeoJSON()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ If the font hasn't been set, fallback to the default.\n\tif imageTileFont == nil {\n\t\tif err := SetImageFont(goregular.TTF); err != nil {\n\t\t\treturn []byte{}, fmt.Errorf(\"Failed reading font: %v\", err)\n\t\t}\n\t}\n\timg := image.NewRGBA(image.Rect(0, 0, tileSize, tileSize))\n\t\/\/ Create a context used for adding text to the image.\n\tctx := freetype.NewContext()\n\tctx.SetDst(img)\n\tctx.SetClip(image.Rect(-tileSize, -tileSize, tileSize, tileSize))\n\tctx.SetSrc(image.NewUniform(t.Options.LabelColor))\n\tctx.SetFont(imageTileFont)\n\n\t\/\/ Create a colour for the sub-grid, using half-alpha of the label colour.\n\tr, g, b, a := t.Options.LabelColor.RGBA()\n\ta = a \/ 2\n\tgridCol := color.RGBA{\n\t\tuint8(min(r, a)),\n\t\tuint8(min(g, a)),\n\t\tuint8(min(b, a)),\n\t\tuint8(a),\n\t}\n\t\/\/ Draw and label each OLC grid cell that is returned in the geojson (i.e. feature).\n\tfor _, ft := range gj.Features {\n\t\tcell := makeGridCell(ctx, t, ft)\n\t\t\/\/ Decide if we want to draw the sub-grid, depending on the code length and the pixel width.\n\t\tif len(ft.Properties[\"global_code\"].(string)) <= 10 && cell.width > 200 {\n\t\t\tcell.drawGrid(t, img, gridCol, 20, 20)\n\t\t} else if len(ft.Properties[\"global_code\"].(string)) > 10 && cell.width > 100 {\n\t\t\tcell.drawGrid(t, img, gridCol, 4, 5)\n\t\t}\n\t\t\/\/ Draw the cell outline.\n\t\tcell.drawRect(img, t.Options.LineColor)\n\t\t\/\/ Draw the label.\n\t\tcell.label(featureLabel(ft))\n\t}\n\tbuf := new(bytes.Buffer)\n\tpng.Encode(buf, img)\n\treturn buf.Bytes(), nil\n}\n\n\/\/ gridCell represents an OLC grid cell.\ntype gridCell struct {\n\tctx *freetype.Context\n\t\/\/ Latlng coordinates.\n\tlatlo, lnglo, lathi, lnghi float64\n\t\/\/ Pixel coordinates.\n\tx1, y1, x2, y2 float64\n\tcx, cy, width float64\n}\n\n\/\/ makeGridCell creates a gridCell structure.\nfunc makeGridCell(ctx *freetype.Context, t *TileRef, f *geojson.Feature) *gridCell {\n\to := &gridCell{ctx: ctx}\n\t\/\/ Get the bounds and create the pixel coordinates.\n\to.latlo, o.lnglo, o.lathi, o.lnghi = bounds(f)\n\t\/\/ The pixels go from top to bottom so the y-coordinates are swapped.\n\to.x1, o.y2 = t.LatLngToPixel(o.latlo, o.lnglo, tileSize)\n\to.x2, o.y1 = t.LatLngToPixel(o.lathi, o.lnghi, tileSize)\n\t\/\/ Get the pixel center and the width.\n\to.cx = (o.x1 + o.x2) \/ 2\n\to.cy = (o.y1 + o.y2) \/ 2\n\to.width = o.x2 - o.x1\n\treturn o\n}\n\n\/\/ drawRect draws a rectangle utilizing HLine() and VLine()\nfunc (c *gridCell) drawRect(img *image.RGBA, col color.Color) {\n\tc.drawHoriz(img, col, c.y1)\n\tc.drawHoriz(img, col, c.y2)\n\tc.drawVert(img, col, c.x1)\n\tc.drawVert(img, col, c.x2)\n}\n\n\/\/ drawGrid draws a grid within the cell of xdiv horizontal and ydiv vertical divisions.\nfunc (c *gridCell) drawGrid(t *TileRef, img *image.RGBA, col color.Color, xdiv, ydiv float64) {\n\t\/\/ Draw the horizontal sub grid. We need to use the lat\/lng coordinates for the horizontal lines\n\t\/\/ because the divisions are regular in degrees but not pixels.\n\ts := (c.lathi - c.latlo) \/ ydiv\n\tfor i := 1; i <= 19; i++ {\n\t\t_, y := t.LatLngToPixel(c.latlo+float64(i)*s, c.lnglo, tileSize)\n\t\tc.drawHoriz(img, col, y)\n\t}\n\t\/\/ Draw the vertical sub grid.\n\ts = (c.x2 - c.x1) \/ xdiv\n\tfor i := 1; i <= 19; i++ {\n\t\tc.drawVert(img, col, c.x1+float64(i)*s)\n\t}\n}\n\n\/\/ drawHoriz draws a horizontal line across the cell.\nfunc (c *gridCell) drawHoriz(img *image.RGBA, col color.Color, y float64) {\n\tfor x := c.x1; x <= c.x2; x++ {\n\t\timg.Set(int(x), int(y), col)\n\t}\n}\n\n\/\/ drawVert draws a vertical line across the cell.\nfunc (c *gridCell) drawVert(img *image.RGBA, col color.Color, x float64) {\n\tfor y := c.y1; y <= c.y2; y++ {\n\t\timg.Set(int(x), int(y), col)\n\t}\n}\n\n\/\/ label draws a multi-line label in the center of the cell - not tile, but grid cell.\n\/\/ The font size of each line is scaled to fit.\nfunc (c *gridCell) label(label string) {\n\t\/\/ Split the label into it's lines and get the font sizes for each line.\n\tlines := strings.Split(label, \"\\n\")\n\tfontSizes := make([]float64, len(lines))\n\tvar total float64\n\tfor n, l := range lines {\n\t\t\/\/ Get the font size for the label.\n\t\tvar fs float64\n\t\t\/\/ If it's the first line, and there are more, then the font size is reduced.\n\t\tif n == 0 && len(lines) > 1 {\n\t\t\tfs = scaleFontSize(c.ctx, c.width, strings.Repeat(\"W\", 5)) * 0.9\n\t\t} else {\n\t\t\tfs = scaleFontSize(c.ctx, c.width, strings.Repeat(\"W\", len(l)))\n\t\t}\n\t\tfontSizes[n] = fs\n\t\ttotal += fs\n\t}\n\t\/\/ Work out the y coordinate for the _last_ line. The y coordinate is the bottom of the line,\n\t\/\/ measured from the top of the cell.\n\ty := c.cy + total\/2\n\t\/\/ Draw the last line, and work backwards to the first line.\n\tfor i := len(lines) - 1; i >= 0; i-- {\n\t\tc.ctx.SetFontSize(fontSizes[i])\n\t\tw := getStringWidth(c.ctx, lines[i]) \/ 2\n\t\tif _, err := c.ctx.DrawString(lines[i], freetype.Pt(int(c.cx-w), int(y))); err != nil {\n\t\t\tlog.Errorf(\"Error drawing label: %v\", err)\n\t\t}\n\t\t\/\/ Reduce the y coordinate by the font size.\n\t\ty -= fontSizes[i]\n\t}\n}\n\n\/\/ scaleFontSize returns the scaled font size so the label fits within the cell width.\nfunc scaleFontSize(ctx *freetype.Context, cw float64, label string) float64 {\n\tif len(label) == 0 {\n\t\treturn 1000\n\t}\n\t\/\/ Start with the default font size.\n\tctx.SetFontSize(fontSize)\n\tlw := getStringWidth(ctx, label)\n\t\/\/ Scale the font to make the label fit in the cell width.\n\treturn (cw \/ lw) * fontSize\n}\n\n\/\/ SetImageFont parses a TTF font and uses it for the image labels.\nfunc SetImageFont(ttf []byte) error {\n\t\/\/ Parse the truetype file.\n\tfont, err := truetype.Parse(ttf)\n\timageTileFont = font\n\treturn err\n}\n\n\/\/ getStringWidth returns the width of the string in the current font and font size.\nfunc getStringWidth(ctx *freetype.Context, s string) float64 {\n\tif len(s) == 0 {\n\t\treturn 0\n\t}\n\t\/\/ Draw it somewhere off the tile.\n\tst := freetype.Pt(-1000, -1000)\n\tval, err := ctx.DrawString(s, st)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed drawing string to compute width: %v\", err)\n\t\treturn 0\n\t}\n\tw := float64(val.X.Round() - st.X.Round())\n\treturn w\n}\n\nfunc min(a, b uint32) uint32 {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n<commit_msg>Correct comments<commit_after>package gridserver\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"image\"\n\t\"image\/color\"\n\t\"image\/png\"\n\t\"strings\"\n\n\t\"github.com\/golang\/freetype\"\n\t\"github.com\/golang\/freetype\/truetype\"\n\tlog \"github.com\/golang\/glog\"\n\tgeojson \"github.com\/paulmach\/go.geojson\"\n\t\"golang.org\/x\/image\/font\/gofont\/goregular\"\n)\n\nconst (\n\ttileSize = 256 \/\/ size of tiles in pixels. Accessed by other functions in this package.\n\tfontSize = 80\n)\n\nvar (\n\timageTileFont *truetype.Font\n\n\twhite = color.RGBA{255, 255, 255, 255}\n\tblack = color.RGBA{0, 0, 0, 255}\n\tgrey = color.RGBA{0, 0, 0, 128}\n\tlineColor = black\n\tlabelColor = grey\n)\n\n\/\/ Image returns the tile as a 256x256 pixel PNG image.\nfunc (t *TileRef) Image() ([]byte, error) {\n\tlog.Infof(\"Producing image for tile z\/x\/y %v\/%v\/%v (%s)\", t.Z, t.X, t.Y, t.Path())\n\tgj, err := t.GeoJSON()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ If the font hasn't been set, fallback to the default.\n\tif imageTileFont == nil {\n\t\tif err := SetImageFont(goregular.TTF); err != nil {\n\t\t\treturn []byte{}, fmt.Errorf(\"Failed reading font: %v\", err)\n\t\t}\n\t}\n\timg := image.NewRGBA(image.Rect(0, 0, tileSize, tileSize))\n\t\/\/ Create a context used for adding text to the image.\n\tctx := freetype.NewContext()\n\tctx.SetDst(img)\n\tctx.SetClip(image.Rect(-tileSize, -tileSize, tileSize, tileSize))\n\tctx.SetSrc(image.NewUniform(t.Options.LabelColor))\n\tctx.SetFont(imageTileFont)\n\n\t\/\/ Create a colour for the sub-grid, using half-alpha of the label colour.\n\tr, g, b, a := t.Options.LabelColor.RGBA()\n\ta = a \/ 2\n\tgridCol := color.RGBA{\n\t\tuint8(min(r, a)),\n\t\tuint8(min(g, a)),\n\t\tuint8(min(b, a)),\n\t\tuint8(a),\n\t}\n\t\/\/ Draw and label each OLC grid cell that is returned in the geojson (i.e. feature).\n\tfor _, ft := range gj.Features {\n\t\tcell := makeGridCell(ctx, t, ft)\n\t\t\/\/ Decide if we want to draw the sub-grid, depending on the code length and the pixel width.\n\t\tif len(ft.Properties[\"global_code\"].(string)) <= 10 && cell.width > 200 {\n\t\t\tcell.drawGrid(t, img, gridCol, 20, 20)\n\t\t} else if len(ft.Properties[\"global_code\"].(string)) > 10 && cell.width > 100 {\n\t\t\tcell.drawGrid(t, img, gridCol, 4, 5)\n\t\t}\n\t\t\/\/ Draw the cell outline.\n\t\tcell.drawRect(img, t.Options.LineColor)\n\t\t\/\/ Draw the label.\n\t\tcell.label(featureLabel(ft))\n\t}\n\tbuf := new(bytes.Buffer)\n\tpng.Encode(buf, img)\n\treturn buf.Bytes(), nil\n}\n\n\/\/ gridCell represents an OLC grid cell.\ntype gridCell struct {\n\tctx *freetype.Context\n\t\/\/ Latlng coordinates.\n\tlatlo, lnglo, lathi, lnghi float64\n\t\/\/ Pixel coordinates.\n\tx1, y1, x2, y2 float64\n\tcx, cy, width float64\n}\n\n\/\/ makeGridCell creates a gridCell structure.\nfunc makeGridCell(ctx *freetype.Context, t *TileRef, f *geojson.Feature) *gridCell {\n\to := &gridCell{ctx: ctx}\n\t\/\/ Get the bounds and create the pixel coordinates.\n\to.latlo, o.lnglo, o.lathi, o.lnghi = bounds(f)\n\t\/\/ The pixels go from top to bottom so the y-coordinates are swapped.\n\to.x1, o.y2 = t.LatLngToPixel(o.latlo, o.lnglo, tileSize)\n\to.x2, o.y1 = t.LatLngToPixel(o.lathi, o.lnghi, tileSize)\n\t\/\/ Get the pixel center and the width.\n\to.cx = (o.x1 + o.x2) \/ 2\n\to.cy = (o.y1 + o.y2) \/ 2\n\to.width = o.x2 - o.x1\n\treturn o\n}\n\n\/\/ drawRect draws a rectangle around the cell.\nfunc (c *gridCell) drawRect(img *image.RGBA, col color.Color) {\n\tc.drawHoriz(img, col, c.y1)\n\tc.drawHoriz(img, col, c.y2)\n\tc.drawVert(img, col, c.x1)\n\tc.drawVert(img, col, c.x2)\n}\n\n\/\/ drawGrid draws a grid within the cell of xdiv horizontal and ydiv vertical divisions.\nfunc (c *gridCell) drawGrid(t *TileRef, img *image.RGBA, col color.Color, xdiv, ydiv float64) {\n\t\/\/ Draw the horizontal sub grid. We need to use the lat\/lng coordinates for the horizontal lines\n\t\/\/ because the divisions are regular in degrees but not pixels.\n\ts := (c.lathi - c.latlo) \/ ydiv\n\tfor i := 1; i <= 19; i++ {\n\t\t_, y := t.LatLngToPixel(c.latlo+float64(i)*s, c.lnglo, tileSize)\n\t\tc.drawHoriz(img, col, y)\n\t}\n\t\/\/ Draw the vertical sub grid.\n\ts = (c.x2 - c.x1) \/ xdiv\n\tfor i := 1; i <= 19; i++ {\n\t\tc.drawVert(img, col, c.x1+float64(i)*s)\n\t}\n}\n\n\/\/ drawHoriz draws a horizontal line across the cell.\nfunc (c *gridCell) drawHoriz(img *image.RGBA, col color.Color, y float64) {\n\tfor x := c.x1; x <= c.x2; x++ {\n\t\timg.Set(int(x), int(y), col)\n\t}\n}\n\n\/\/ drawVert draws a vertical line across the cell.\nfunc (c *gridCell) drawVert(img *image.RGBA, col color.Color, x float64) {\n\tfor y := c.y1; y <= c.y2; y++ {\n\t\timg.Set(int(x), int(y), col)\n\t}\n}\n\n\/\/ label draws a multi-line label in the center of the cell - not tile, but grid cell.\n\/\/ The font size of each line is scaled to fit the cell width.\nfunc (c *gridCell) label(label string) {\n\t\/\/ Split the label into it's lines and get the font sizes for each line.\n\tlines := strings.Split(label, \"\\n\")\n\tfontSizes := make([]float64, len(lines))\n\tvar total float64\n\tfor n, l := range lines {\n\t\t\/\/ Get the font size for the label.\n\t\tvar fs float64\n\t\t\/\/ If it's the first line, and there are more, then the font size is reduced.\n\t\tif n == 0 && len(lines) > 1 {\n\t\t\tfs = scaleFontSize(c.ctx, c.width, strings.Repeat(\"W\", 5)) * 0.9\n\t\t} else {\n\t\t\tfs = scaleFontSize(c.ctx, c.width, strings.Repeat(\"W\", len(l)))\n\t\t}\n\t\tfontSizes[n] = fs\n\t\ttotal += fs\n\t}\n\t\/\/ Work out the y coordinate for the _last_ line. The y coordinate is the bottom of the line,\n\t\/\/ measured from the top of the cell.\n\ty := c.cy + total\/2\n\t\/\/ Draw the last line, and work backwards to the first line.\n\tfor i := len(lines) - 1; i >= 0; i-- {\n\t\tc.ctx.SetFontSize(fontSizes[i])\n\t\tw := getStringWidth(c.ctx, lines[i]) \/ 2\n\t\tif _, err := c.ctx.DrawString(lines[i], freetype.Pt(int(c.cx-w), int(y))); err != nil {\n\t\t\tlog.Errorf(\"Error drawing label: %v\", err)\n\t\t}\n\t\t\/\/ Reduce the y coordinate by the font size.\n\t\ty -= fontSizes[i]\n\t}\n}\n\n\/\/ scaleFontSize returns the scaled font size to fit the label within the available width.\nfunc scaleFontSize(ctx *freetype.Context, cw float64, label string) float64 {\n\tif len(label) == 0 {\n\t\treturn 1000\n\t}\n\t\/\/ Start with the default font size.\n\tctx.SetFontSize(fontSize)\n\tlw := getStringWidth(ctx, label)\n\t\/\/ Scale the font to make the label fit in the cell width.\n\treturn (cw \/ lw) * fontSize\n}\n\n\/\/ SetImageFont parses a TTF font and uses it for the image labels.\nfunc SetImageFont(ttf []byte) error {\n\t\/\/ Parse the truetype file.\n\tfont, err := truetype.Parse(ttf)\n\timageTileFont = font\n\treturn err\n}\n\n\/\/ getStringWidth returns the width of the string in the current font and font size.\nfunc getStringWidth(ctx *freetype.Context, s string) float64 {\n\tif len(s) == 0 {\n\t\treturn 0\n\t}\n\t\/\/ Draw it somewhere off the tile.\n\tst := freetype.Pt(-1000, -1000)\n\tval, err := ctx.DrawString(s, st)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed drawing string to compute width: %v\", err)\n\t\treturn 0\n\t}\n\tw := float64(val.X.Round() - st.X.Round())\n\treturn w\n}\n\nfunc min(a, b uint32) uint32 {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n<|endoftext|>"} {"text":"<commit_before>package netascii\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"io\"\n)\n\nfunc WriteTo(b []byte, bw *bufio.Writer) (n int, err error) {\n\tb = bytes.Replace(b, []byte{'\\r', 0}, []byte{'\\r'}, -1)\n\tb = bytes.Replace(b, []byte{'\\r', '\\n'}, []byte{'\\n'}, -1)\n\tn, err = bw.Write(b)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn n, nil\n}\n\nfunc Convert(b []byte) []byte {\n\tb = bytes.Replace(b, []byte{'\\r', 0}, []byte{'\\r'}, -1)\n\tb = bytes.Replace(b, []byte{'\\r', '\\n'}, []byte{'\\n'}, -1)\n\treturn b\n}\n\nfunc ReadFull(r io.Reader, buf []byte) (n int, err error) {\n\tif len(buf) == 0 {\n\t\treturn 0, io.ErrShortBuffer\n\t}\n\tn, err = r.Read(buf)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tbuf = bytes.Replace(buf, []byte{'\\r', 0}, []byte{'\\r'}, -1)\n\tbuf = bytes.Replace(buf, []byte{'\\r', '\\n'}, []byte{'\\n'}, -1)\n\n\treturn n, nil\n}\n<commit_msg>Return the actual size written to bw, even when an error occurs.<commit_after>package netascii\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"io\"\n)\n\nfunc WriteTo(b []byte, bw *bufio.Writer) (n int, err error) {\n\tb = bytes.Replace(b, []byte{'\\r', 0}, []byte{'\\r'}, -1)\n\tb = bytes.Replace(b, []byte{'\\r', '\\n'}, []byte{'\\n'}, -1)\n\tn, err = bw.Write(b)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\treturn n, nil\n}\n\nfunc Convert(b []byte) []byte {\n\tb = bytes.Replace(b, []byte{'\\r', 0}, []byte{'\\r'}, -1)\n\tb = bytes.Replace(b, []byte{'\\r', '\\n'}, []byte{'\\n'}, -1)\n\treturn b\n}\n\nfunc ReadFull(r io.Reader, buf []byte) (n int, err error) {\n\tif len(buf) == 0 {\n\t\treturn 0, io.ErrShortBuffer\n\t}\n\tn, err = r.Read(buf)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tbuf = bytes.Replace(buf, []byte{'\\r', 0}, []byte{'\\r'}, -1)\n\tbuf = bytes.Replace(buf, []byte{'\\r', '\\n'}, []byte{'\\n'}, -1)\n\n\treturn n, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package network\n\nimport (\n\t\"context\"\n\t\"crypto\"\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/ava-labs\/avalanchego\/ids\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\/networking\/benchlist\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\/validators\"\n\t\"github.com\/ava-labs\/avalanchego\/utils\"\n\t\"github.com\/ava-labs\/avalanchego\/utils\/hashing\"\n\t\"github.com\/ava-labs\/avalanchego\/utils\/logging\"\n\t\"github.com\/ava-labs\/avalanchego\/version\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\ntype TestMsg struct {\n\top Op\n\tbytes []byte\n}\n\nfunc newTestMsg(op Op, bits []byte) *TestMsg {\n\treturn &TestMsg{op: op, bytes: bits}\n}\n\nfunc (m *TestMsg) Op() Op {\n\treturn m.op\n}\n\nfunc (*TestMsg) Get(Field) interface{} {\n\treturn nil\n}\n\nfunc (m *TestMsg) Bytes() []byte {\n\treturn m.bytes\n}\n\nfunc TestPeer_Close(t *testing.T) {\n\tlog := logging.NoLog{}\n\tip := utils.NewDynamicIPDesc(\n\t\tnet.IPv6loopback,\n\t\t0,\n\t)\n\tid := ids.ShortID(hashing.ComputeHash160Array([]byte(ip.IP().String())))\n\tnetworkID := uint32(0)\n\tappVersion := version.NewDefaultApplication(\"app\", 0, 1, 0)\n\tversionParser := version.NewDefaultApplicationParser()\n\n\tlistener := &testListener{\n\t\taddr: &net.TCPAddr{\n\t\t\tIP: net.IPv6loopback,\n\t\t\tPort: 0,\n\t\t},\n\t\tinbound: make(chan net.Conn, 1<<10),\n\t\tclosed: make(chan struct{}),\n\t}\n\tcaller := &testDialer{\n\t\taddr: &net.TCPAddr{\n\t\t\tIP: net.IPv6loopback,\n\t\t\tPort: 0,\n\t\t},\n\t\toutbounds: make(map[string]*testListener),\n\t}\n\tserverUpgrader0 := NewTLSServerUpgrader(tlsConfig0)\n\tclientUpgrader0 := NewTLSClientUpgrader(tlsConfig0)\n\n\tvdrs := validators.NewSet()\n\thandler := &testHandler{}\n\n\tversionManager := version.NewCompatibility(\n\t\tappVersion,\n\t\tappVersion,\n\t\ttime.Now(),\n\t\tappVersion,\n\t\tappVersion,\n\t\ttime.Now(),\n\t\tappVersion,\n\t)\n\n\tnetwrk := NewDefaultNetwork(\n\t\tprometheus.NewRegistry(),\n\t\tlog,\n\t\tid,\n\t\tip,\n\t\tnetworkID,\n\t\tversionManager,\n\t\tversionParser,\n\t\tlistener,\n\t\tcaller,\n\t\tserverUpgrader0,\n\t\tclientUpgrader0,\n\t\tvdrs,\n\t\tvdrs,\n\t\thandler,\n\t\ttime.Duration(0),\n\t\t0,\n\t\tdefaultSendQueueSize,\n\t\tHealthConfig{},\n\t\tbenchlist.NewManager(&benchlist.Config{}),\n\t\tdefaultAliasTimeout,\n\t\tcert0.PrivateKey.(crypto.Signer),\n\t\tdefaultPeerListSize,\n\t\tdefaultGossipPeerListTo,\n\t\tdefaultGossipPeerListFreq,\n\t\tNewDialerConfig(0, 30*time.Second),\n\t\tfalse,\n\t\tdefaultGossipAcceptedFrontierSize,\n\t\tdefaultGossipOnAcceptSize,\n\t)\n\tassert.NotNil(t, netwrk)\n\n\tip1 := utils.NewDynamicIPDesc(\n\t\tnet.IPv6loopback,\n\t\t1,\n\t)\n\tcaller.outbounds[ip1.IP().String()] = listener\n\tconn, err := caller.Dial(context.Background(), ip1.IP())\n\tassert.NoError(t, err)\n\n\tbasenetwork := netwrk.(*network)\n\n\tnewmsgbytes := []byte(\"hello\")\n\n\t\/\/ fake a peer, and write a message\n\tpeer := newPeer(basenetwork, conn, ip1.IP())\n\tpeer.sender = make(chan []byte, 10)\n\ttestMsg := newTestMsg(GetVersion, newmsgbytes)\n\tpeer.Send(testMsg, true)\n\n\t\/\/ make sure the net pending and peer pending bytes updated\n\tif basenetwork.pendingBytes != int64(len(newmsgbytes)) {\n\t\tt.Fatalf(\"pending bytes invalid\")\n\t}\n\tif peer.pendingBytes != int64(len(newmsgbytes)) {\n\t\tt.Fatalf(\"pending bytes invalid\")\n\t}\n\n\tgo func() {\n\t\terr := netwrk.Close()\n\t\tassert.NoError(t, err)\n\t}()\n\n\tpeer.Close()\n\n\t\/\/ The network pending bytes should be reduced back to zero on close.\n\tif basenetwork.pendingBytes != int64(0) {\n\t\tt.Fatalf(\"pending bytes invalid\")\n\t}\n\tif peer.pendingBytes != int64(len(newmsgbytes)) {\n\t\tt.Fatalf(\"pending bytes invalid\")\n\t}\n}\n<commit_msg>add test<commit_after>package network\n\nimport (\n\t\"context\"\n\t\"crypto\"\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/ava-labs\/avalanchego\/ids\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\/networking\/benchlist\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\/validators\"\n\t\"github.com\/ava-labs\/avalanchego\/utils\"\n\t\"github.com\/ava-labs\/avalanchego\/utils\/hashing\"\n\t\"github.com\/ava-labs\/avalanchego\/utils\/logging\"\n\t\"github.com\/ava-labs\/avalanchego\/version\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\ntype TestMsg struct {\n\top Op\n\tbytes []byte\n}\n\nfunc newTestMsg(op Op, bits []byte) *TestMsg {\n\treturn &TestMsg{op: op, bytes: bits}\n}\n\nfunc (m *TestMsg) Op() Op {\n\treturn m.op\n}\n\nfunc (*TestMsg) Get(Field) interface{} {\n\treturn nil\n}\n\nfunc (m *TestMsg) Bytes() []byte {\n\treturn m.bytes\n}\n\nfunc TestPeer_Close(t *testing.T) {\n\tlog := logging.NoLog{}\n\tip := utils.NewDynamicIPDesc(\n\t\tnet.IPv6loopback,\n\t\t0,\n\t)\n\tid := ids.ShortID(hashing.ComputeHash160Array([]byte(ip.IP().String())))\n\tnetworkID := uint32(0)\n\tappVersion := version.NewDefaultApplication(\"app\", 0, 1, 0)\n\tversionParser := version.NewDefaultApplicationParser()\n\n\tlistener := &testListener{\n\t\taddr: &net.TCPAddr{\n\t\t\tIP: net.IPv6loopback,\n\t\t\tPort: 0,\n\t\t},\n\t\tinbound: make(chan net.Conn, 1<<10),\n\t\tclosed: make(chan struct{}),\n\t}\n\tcaller := &testDialer{\n\t\taddr: &net.TCPAddr{\n\t\t\tIP: net.IPv6loopback,\n\t\t\tPort: 0,\n\t\t},\n\t\toutbounds: make(map[string]*testListener),\n\t}\n\tserverUpgrader0 := NewTLSServerUpgrader(tlsConfig0)\n\tclientUpgrader0 := NewTLSClientUpgrader(tlsConfig0)\n\n\tvdrs := validators.NewSet()\n\thandler := &testHandler{}\n\n\tversionManager := version.NewCompatibility(\n\t\tappVersion,\n\t\tappVersion,\n\t\ttime.Now(),\n\t\tappVersion,\n\t\tappVersion,\n\t\ttime.Now(),\n\t\tappVersion,\n\t)\n\n\tnetwrk := NewDefaultNetwork(\n\t\tprometheus.NewRegistry(),\n\t\tlog,\n\t\tid,\n\t\tip,\n\t\tnetworkID,\n\t\tversionManager,\n\t\tversionParser,\n\t\tlistener,\n\t\tcaller,\n\t\tserverUpgrader0,\n\t\tclientUpgrader0,\n\t\tvdrs,\n\t\tvdrs,\n\t\thandler,\n\t\ttime.Duration(0),\n\t\t0,\n\t\tdefaultSendQueueSize,\n\t\tHealthConfig{},\n\t\tbenchlist.NewManager(&benchlist.Config{}),\n\t\tdefaultAliasTimeout,\n\t\tcert0.PrivateKey.(crypto.Signer),\n\t\tdefaultPeerListSize,\n\t\tdefaultGossipPeerListTo,\n\t\tdefaultGossipPeerListFreq,\n\t\tNewDialerConfig(0, 30*time.Second),\n\t\tfalse,\n\t\tdefaultGossipAcceptedFrontierSize,\n\t\tdefaultGossipOnAcceptSize,\n\t)\n\tassert.NotNil(t, netwrk)\n\n\tip1 := utils.NewDynamicIPDesc(\n\t\tnet.IPv6loopback,\n\t\t1,\n\t)\n\tcaller.outbounds[ip1.IP().String()] = listener\n\tconn, err := caller.Dial(context.Background(), ip1.IP())\n\tassert.NoError(t, err)\n\n\tbasenetwork := netwrk.(*network)\n\n\tnewmsgbytes := []byte(\"hello\")\n\n\t\/\/ fake a peer, and write a message\n\tpeer := newPeer(basenetwork, conn, ip1.IP())\n\tpeer.sender = make(chan []byte, 10)\n\ttestMsg := newTestMsg(GetVersion, newmsgbytes)\n\tpeer.Send(testMsg, true)\n\n\t\/\/ make sure the net pending and peer pending bytes updated\n\tif basenetwork.pendingBytes != int64(len(newmsgbytes)) {\n\t\tt.Fatalf(\"pending bytes invalid\")\n\t}\n\tif peer.pendingBytes != int64(len(newmsgbytes)) {\n\t\tt.Fatalf(\"pending bytes invalid\")\n\t}\n\n\tgo func() {\n\t\terr := netwrk.Close()\n\t\tassert.NoError(t, err)\n\t}()\n\n\tpeer.Close()\n\n\t\/\/ The network pending bytes should be reduced back to zero on close.\n\tif basenetwork.pendingBytes != int64(0) {\n\t\tt.Fatalf(\"pending bytes invalid\")\n\t}\n\tif peer.pendingBytes != int64(len(newmsgbytes)) {\n\t\tt.Fatalf(\"pending bytes invalid\")\n\t}\n}\n\nfunc TestShouldCompress(t *testing.T) {\n\tp := newPeer(nil, nil, utils.IPDesc{}) \/\/ Dummy values are OK here\n\tshouldNotCompressOps := []Op{Version, GetVersion, PeerList, GetPeerList, Ping, Pong}\n\tshouldCompressOps := []Op{Put, Get, MultiPut, GetAncestors, Accepted, AcceptedFrontier, GetAccepted, GetAcceptedFrontier, PushQuery, PullQuery, Chits}\n\tfor _, op := range shouldCompressOps {\n\t\t\/\/ Shouldn't compress because [p.canHandleCompressed] == false\n\t\tassert.False(t, p.shouldCompress(op))\n\t}\n\tfor _, op := range shouldNotCompressOps {\n\t\tassert.False(t, p.shouldCompress(op))\n\t}\n\n\tp.canHandleCompressed = true\n\tfor _, op := range shouldCompressOps {\n\t\tassert.True(t, p.shouldCompress(op))\n\t}\n\tfor _, op := range shouldNotCompressOps {\n\t\tassert.False(t, p.shouldCompress(op))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package torpedo_lastfm_plugin\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\t\n\tcommon \"github.com\/tb0hdan\/torpedo_common\"\n\t\"github.com\/tb0hdan\/torpedo_registry\"\n\t\n\t\"github.com\/shkh\/lastfm-go\/lastfm\"\n)\n\nvar (\n\tLastFmKey *string\n\tLastFmSecret *string\n)\n\nfunc ConfigureLastFmPlugin(cfg *torpedo_registry.ConfigStruct) {\n\tLastFmKey = flag.String(\"lastfm_key\", \"\", \"Last.FM API Key\")\n\tLastFmSecret = flag.String(\"lastfm_secret\", \"\", \"Last.FM API Secret\")\n\n}\n\nfunc ParseLastFmPlugin(cfg *torpedo_registry.ConfigStruct) {\n\tcfg.SetConfig(\"lastfmkey\", *LastFmKey)\n\tcfg.SetConfig(\"lastfmsecret\", *LastFmSecret)\n\tif cfg.GetConfig()[\"lastfmkey\"] == \"\" {\n\t\tcfg.SetConfig(\"lastfmkey\", common.GetStripEnv(\"LASTFM_KEY\"))\n\t}\n\tif cfg.GetConfig()[\"lastfmsecret\"] == \"\" {\n\t\tcfg.SetConfig(\"lastfmsecret\", common.GetStripEnv(\"LASTFM_SECRET\"))\n\t}\n}\n\n\ntype LastFmWrapper struct {\n\tLastFmKey string\n\tLastFmSecret string\n}\n\nfunc (lfw *LastFmWrapper) LastfmArtist(artist string) (summary, artist_url, artist_corrected, image_url string) {\n\tvar tags string\n\tlastfm_api := lastfm.New(lfw.LastFmKey, lfw.LastFmSecret)\n\tr, err := lastfm_api.Artist.GetInfo(lastfm.P{\"artist\": artist})\n\tsummary = \"An error occured while processing your request\"\n\tif err == nil {\n\t\tfor idx, tag := range r.Tags {\n\t\t\tif idx == 0 {\n\t\t\t\ttags = tag.Name\n\t\t\t} else {\n\t\t\t\ttags += fmt.Sprintf(\", %s\", tag.Name)\n\t\t\t}\n\t\t}\n\n\t\tfor _, img := range r.Images {\n\t\t\tif img.Size == \"large\" {\n\t\t\t\timage_url = img.Url\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tsummary = fmt.Sprintf(\"%s\\n\\nTags: %s\\n\", r.Bio.Summary, tags)\n\t\tartist_url = r.Url\n\t\tr, err := lastfm_api.Artist.GetCorrection(lastfm.P{\"artist\": artist})\n\t\tartist_corrected = artist\n\t\tif err == nil {\n\t\t\tartist_corrected = r.Correction.Artist.Name\n\t\t}\n\t}\n\treturn\n}\n\nfunc (lfw *LastFmWrapper) LastfmTag(tag string) (result string) {\n\tvar artists string\n\tlastfm_api := lastfm.New(lfw.LastFmKey, lfw.LastFmSecret)\n\tr, err := lastfm_api.Tag.GetTopArtists(lastfm.P{\"tag\": tag})\n\tresult = \"An error occured while processing your request\"\n\tif err == nil {\n\t\tfor idx, artist := range r.Artists {\n\t\t\tif idx == 0 {\n\t\t\t\tartists = artist.Name\n\t\t\t} else {\n\t\t\t\tartists += fmt.Sprintf(\", %s\", artist.Name)\n\t\t\t}\n\t\t}\n\t\tif artists != \"\" {\n\t\t\tresult = fmt.Sprintf(\"Artists: %s\\n\", artists)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (lfw *LastFmWrapper) LastfmUser(user string) (result string) {\n\tlastfm_api := lastfm.New(lfw.LastFmKey, lfw.LastFmSecret)\n\tr, err := lastfm_api.User.GetInfo(lastfm.P{\"user\": user})\n\tresult = \"An error occured while processing your request\"\n\tif err == nil {\n\t\tresult = fmt.Sprintf(\"Profile information for: %s\\n\", r.Url)\n\t\tresult += fmt.Sprintf(\"Play count: %+v track(s)\\n\", r.PlayCount)\n\t\tresult += fmt.Sprintf(\"\\nTop artists:\\n\")\n\t\tr2, _ := lastfm_api.User.GetTopArtists(lastfm.P{\"user\": user, \"limit\": 10})\n\t\tfor idx, artist := range r2.Artists {\n\t\t\tresult += fmt.Sprintf(\"%+v - %s - %s play(s)\\n\", idx+1, artist.Name, artist.PlayCount)\n\t\t}\n\t\tresult += fmt.Sprintf(\"\\nTop tracks:\\n\")\n\t\tr3, _ := lastfm_api.User.GetTopTracks(lastfm.P{\"user\": user, \"limit\": 10})\n\t\tfor idx, track := range r3.Tracks {\n\t\t\tresult += fmt.Sprintf(\"%+v - %s - %s - %s play(s)\\n\", idx+1, track.Artist.Name, track.Name, track.PlayCount)\n\t\t}\n\t}\n\treturn\n}\n\nfunc LastFmProcessMessage(api *torpedo_registry.BotAPI, channel interface{}, incoming_message string) {\n\tvar message string\n\tvar richmsg torpedo_registry.RichMessage\n\tlfm := &LastFmWrapper{LastFmKey: torpedo_registry.Config.GetConfig()[\"lastfmkey\"],\n\t\t\t\t\t LastFmSecret: torpedo_registry.Config.GetConfig()[\"lastfmsecret\"]}\n\thelp := fmt.Sprintf(\"Usage: %slastfm command\\nAvailable commands: artist, tag, user\", api.CommandPrefix)\n\tcommand := strings.Split(strings.TrimSpace(strings.TrimLeft(incoming_message, fmt.Sprintf(\"%slastfm\", api.CommandPrefix))), \" \")[0]\n\n\tswitch command {\n\tcase \"artist\":\n\t\tartist := strings.TrimSpace(strings.TrimPrefix(incoming_message, fmt.Sprintf(\"%slastfm %s\", api.CommandPrefix, command)))\n\t\tif artist != \"\" {\n\t\t\tsummary, artist_url, artist_corrected, image_url := lfm.LastfmArtist(artist)\n\t\t\trichmsg = torpedo_registry.RichMessage{BarColor: \"#36a64f\",\n\t\t\t\tText: summary,\n\t\t\t\tTitle: artist_corrected,\n\t\t\t\tTitleLink: artist_url,\n\t\t\t\tImageURL: image_url}\n\t\t} else {\n\t\t\tmessage = fmt.Sprintf(\"Please supply artist: %slastfm artist artist_name\", api.CommandPrefix)\n\t\t}\n\tcase \"tag\":\n\t\ttag := strings.TrimSpace(strings.TrimPrefix(incoming_message, fmt.Sprintf(\"%slastfm %s\", api.CommandPrefix, command)))\n\t\tif tag != \"\" {\n\t\t\tmessage = lfm.LastfmTag(tag)\n\t\t} else {\n\t\t\tmessage = fmt.Sprintf(\"Please supply tag: %slastfm tag tag_name\", api.CommandPrefix)\n\t\t}\n\tcase \"user\":\n\t\tuser := strings.TrimSpace(strings.TrimPrefix(incoming_message, fmt.Sprintf(\"%slastfm %s\", api.CommandPrefix, command)))\n\t\tif user != \"\" {\n\t\t\tmessage = lfm.LastfmUser(user)\n\t\t} else {\n\t\t\tmessage = fmt.Sprintf(\"Please supply user name: %slastfm user user_name\", api.CommandPrefix)\n\t\t}\n\tdefault:\n\t\tmessage = help\n\t}\n\n\tapi.Bot.PostMessage(channel, message, api, richmsg)\n}\n\nfunc init() {\n\ttorpedo_registry.Config.RegisterHelpAndHandler(\"lastfm\", \"Query Last.FM for artist, tag, user\", LastFmProcessMessage)\n\ttorpedo_registry.Config.RegisterParser(\"lastfm\", ConfigureLastFmPlugin, ParseLastFmPlugin)\n}\n<commit_msg>forked last.fm repo, until issue is fixed by author<commit_after>package torpedo_lastfm_plugin\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\t\n\tcommon \"github.com\/tb0hdan\/torpedo_common\"\n\t\"github.com\/tb0hdan\/torpedo_registry\"\n\t\n\t\/\/ \"github.com\/shkh\/lastfm-go\/lastfm\"\n\t\"github.com\/tb0hdan\/lastfm-go\/lastfm\"\n)\n\nvar (\n\tLastFmKey *string\n\tLastFmSecret *string\n)\n\nfunc ConfigureLastFmPlugin(cfg *torpedo_registry.ConfigStruct) {\n\tLastFmKey = flag.String(\"lastfm_key\", \"\", \"Last.FM API Key\")\n\tLastFmSecret = flag.String(\"lastfm_secret\", \"\", \"Last.FM API Secret\")\n\n}\n\nfunc ParseLastFmPlugin(cfg *torpedo_registry.ConfigStruct) {\n\tcfg.SetConfig(\"lastfmkey\", *LastFmKey)\n\tcfg.SetConfig(\"lastfmsecret\", *LastFmSecret)\n\tif cfg.GetConfig()[\"lastfmkey\"] == \"\" {\n\t\tcfg.SetConfig(\"lastfmkey\", common.GetStripEnv(\"LASTFM_KEY\"))\n\t}\n\tif cfg.GetConfig()[\"lastfmsecret\"] == \"\" {\n\t\tcfg.SetConfig(\"lastfmsecret\", common.GetStripEnv(\"LASTFM_SECRET\"))\n\t}\n}\n\n\ntype LastFmWrapper struct {\n\tLastFmKey string\n\tLastFmSecret string\n}\n\nfunc (lfw *LastFmWrapper) LastfmArtist(artist string) (summary, artist_url, artist_corrected, image_url string) {\n\tvar tags string\n\tlastfm_api := lastfm.New(lfw.LastFmKey, lfw.LastFmSecret)\n\tr, err := lastfm_api.Artist.GetInfo(lastfm.P{\"artist\": artist})\n\tsummary = \"An error occured while processing your request\"\n\tif err == nil {\n\t\tfor idx, tag := range r.Tags {\n\t\t\tif idx == 0 {\n\t\t\t\ttags = tag.Name\n\t\t\t} else {\n\t\t\t\ttags += fmt.Sprintf(\", %s\", tag.Name)\n\t\t\t}\n\t\t}\n\n\t\tfor _, img := range r.Images {\n\t\t\tif img.Size == \"large\" {\n\t\t\t\timage_url = img.Url\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tsummary = fmt.Sprintf(\"%s\\n\\nTags: %s\\n\", r.Bio.Summary, tags)\n\t\tartist_url = r.Url\n\t\tr, err := lastfm_api.Artist.GetCorrection(lastfm.P{\"artist\": artist})\n\t\tartist_corrected = artist\n\t\tif err == nil {\n\t\t\tartist_corrected = r.Correction.Artist.Name\n\t\t}\n\t}\n\treturn\n}\n\nfunc (lfw *LastFmWrapper) LastfmTag(tag string) (result string) {\n\tvar artists string\n\tlastfm_api := lastfm.New(lfw.LastFmKey, lfw.LastFmSecret)\n\tr, err := lastfm_api.Tag.GetTopArtists(lastfm.P{\"tag\": tag})\n\tresult = \"An error occured while processing your request\"\n\tif err == nil {\n\t\tfor idx, artist := range r.Artists {\n\t\t\tif idx == 0 {\n\t\t\t\tartists = artist.Name\n\t\t\t} else {\n\t\t\t\tartists += fmt.Sprintf(\", %s\", artist.Name)\n\t\t\t}\n\t\t}\n\t\tif artists != \"\" {\n\t\t\tresult = fmt.Sprintf(\"Artists: %s\\n\", artists)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (lfw *LastFmWrapper) LastfmUser(user string) (result string) {\n\tlastfm_api := lastfm.New(lfw.LastFmKey, lfw.LastFmSecret)\n\tr, err := lastfm_api.User.GetInfo(lastfm.P{\"user\": user})\n\tresult = \"An error occured while processing your request\"\n\tif err == nil {\n\t\tresult = fmt.Sprintf(\"Profile information for: %s\\n\", r.Url)\n\t\tresult += fmt.Sprintf(\"Play count: %+v track(s)\\n\", r.PlayCount)\n\t\tresult += fmt.Sprintf(\"\\nTop artists:\\n\")\n\t\tr2, _ := lastfm_api.User.GetTopArtists(lastfm.P{\"user\": user, \"limit\": 10})\n\t\tfor idx, artist := range r2.Artists {\n\t\t\tresult += fmt.Sprintf(\"%+v - %s - %s play(s)\\n\", idx+1, artist.Name, artist.PlayCount)\n\t\t}\n\t\tresult += fmt.Sprintf(\"\\nTop tracks:\\n\")\n\t\tr3, _ := lastfm_api.User.GetTopTracks(lastfm.P{\"user\": user, \"limit\": 10})\n\t\tfor idx, track := range r3.Tracks {\n\t\t\tresult += fmt.Sprintf(\"%+v - %s - %s - %s play(s)\\n\", idx+1, track.Artist.Name, track.Name, track.PlayCount)\n\t\t}\n\t}\n\treturn\n}\n\nfunc LastFmProcessMessage(api *torpedo_registry.BotAPI, channel interface{}, incoming_message string) {\n\tvar message string\n\tvar richmsg torpedo_registry.RichMessage\n\tlfm := &LastFmWrapper{LastFmKey: torpedo_registry.Config.GetConfig()[\"lastfmkey\"],\n\t\t\t\t\t LastFmSecret: torpedo_registry.Config.GetConfig()[\"lastfmsecret\"]}\n\thelp := fmt.Sprintf(\"Usage: %slastfm command\\nAvailable commands: artist, tag, user\", api.CommandPrefix)\n\tcommand := strings.Split(strings.TrimSpace(strings.TrimLeft(incoming_message, fmt.Sprintf(\"%slastfm\", api.CommandPrefix))), \" \")[0]\n\n\tswitch command {\n\tcase \"artist\":\n\t\tartist := strings.TrimSpace(strings.TrimPrefix(incoming_message, fmt.Sprintf(\"%slastfm %s\", api.CommandPrefix, command)))\n\t\tif artist != \"\" {\n\t\t\tsummary, artist_url, artist_corrected, image_url := lfm.LastfmArtist(artist)\n\t\t\trichmsg = torpedo_registry.RichMessage{BarColor: \"#36a64f\",\n\t\t\t\tText: summary,\n\t\t\t\tTitle: artist_corrected,\n\t\t\t\tTitleLink: artist_url,\n\t\t\t\tImageURL: image_url}\n\t\t} else {\n\t\t\tmessage = fmt.Sprintf(\"Please supply artist: %slastfm artist artist_name\", api.CommandPrefix)\n\t\t}\n\tcase \"tag\":\n\t\ttag := strings.TrimSpace(strings.TrimPrefix(incoming_message, fmt.Sprintf(\"%slastfm %s\", api.CommandPrefix, command)))\n\t\tif tag != \"\" {\n\t\t\tmessage = lfm.LastfmTag(tag)\n\t\t} else {\n\t\t\tmessage = fmt.Sprintf(\"Please supply tag: %slastfm tag tag_name\", api.CommandPrefix)\n\t\t}\n\tcase \"user\":\n\t\tuser := strings.TrimSpace(strings.TrimPrefix(incoming_message, fmt.Sprintf(\"%slastfm %s\", api.CommandPrefix, command)))\n\t\tif user != \"\" {\n\t\t\tmessage = lfm.LastfmUser(user)\n\t\t} else {\n\t\t\tmessage = fmt.Sprintf(\"Please supply user name: %slastfm user user_name\", api.CommandPrefix)\n\t\t}\n\tdefault:\n\t\tmessage = help\n\t}\n\n\tapi.Bot.PostMessage(channel, message, api, richmsg)\n}\n\nfunc init() {\n\ttorpedo_registry.Config.RegisterHelpAndHandler(\"lastfm\", \"Query Last.FM for artist, tag, user\", LastFmProcessMessage)\n\ttorpedo_registry.Config.RegisterParser(\"lastfm\", ConfigureLastFmPlugin, ParseLastFmPlugin)\n}\n<|endoftext|>"} {"text":"<commit_before>\/**\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\/\n\npackage cli\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"mynewt.apache.org\/newt\/newt\/interfaces\"\n\t\"mynewt.apache.org\/newt\/newt\/newtutil\"\n\t\"mynewt.apache.org\/newt\/newt\/project\"\n\t\"mynewt.apache.org\/newt\/util\"\n)\n\nvar NewTypeStr = \"pkg\"\n\nfunc pkgNewCmd(cmd *cobra.Command, args []string) {\n\tNewTypeStr = strings.ToUpper(NewTypeStr)\n\n\tpw := project.NewPackageWriter()\n\tif err := pw.ConfigurePackage(NewTypeStr, args[0]); err != nil {\n\t\tNewtUsage(cmd, err)\n\t}\n\tif err := pw.WritePackage(); err != nil {\n\t\tNewtUsage(cmd, err)\n\t}\n}\n\nfunc pkgMoveCmd(cmd *cobra.Command, args []string) {\n\tif len(args) != 2 {\n\t\tNewtUsage(cmd, util.NewNewtError(\"Exactly two arguments required to pkg move\"))\n\t}\n\n\tsrcLoc := args[0]\n\tdstLoc := args[1]\n\n\tproj := TryGetProject()\n\tinterfaces.SetProject(proj)\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tNewtUsage(cmd, util.NewNewtError(err.Error()))\n\t}\n\n\tif err := os.Chdir(proj.Path() + \"\/\"); err != nil {\n\t\tNewtUsage(cmd, util.NewNewtError(err.Error()))\n\t}\n\n\t\/* Find source package, defaulting search to the local project if no\n\t * repository descriptor is found.\n\t *\/\n\tsrcRepoName, srcName, err := newtutil.ParsePackageString(srcLoc)\n\tif err != nil {\n\t\tos.Chdir(wd)\n\t\tNewtUsage(cmd, err)\n\t}\n\n\tsrcRepo := proj.LocalRepo()\n\tif srcRepoName != \"\" {\n\t\tsrcRepo = proj.FindRepo(srcRepoName)\n\t}\n\n\tsrcPkg, err := proj.ResolvePackage(srcRepo, srcName)\n\tif err != nil {\n\t\tos.Chdir(wd)\n\t\tNewtUsage(cmd, err)\n\t}\n\n\t\/* Resolve the destination package to a physical location, and then\n\t * move the source package to that location.\n\t * dstLoc is assumed to be in the format \"@repo\/pkg\/loc\"\n\t *\/\n\trepoName, pkgName, err := newtutil.ParsePackageString(dstLoc)\n\tif err != nil {\n\t\tos.Chdir(wd)\n\t\tNewtUsage(cmd, err)\n\t}\n\n\tdstPath := proj.Path() + \"\/\"\n\trepo := proj.LocalRepo()\n\tif repoName != \"\" {\n\t\tdstPath += \"repos\/\" + repoName + \"\/\"\n\t\trepo = proj.FindRepo(repoName)\n\t\tif repo == nil {\n\t\t\tos.Chdir(wd)\n\t\t\tNewtUsage(cmd, util.NewNewtError(\"Destination repo \"+\n\t\t\t\trepoName+\" does not exist\"))\n\t\t}\n\t}\n\tdstPath += pkgName + \"\/\"\n\n\tif util.NodeExist(dstPath) {\n\t\tos.Chdir(wd)\n\t\tNewtUsage(cmd, util.NewNewtError(\"Cannot overwrite existing package, \"+\n\t\t\t\"use pkg delete first\"))\n\t}\n\n\tutil.StatusMessage(util.VERBOSITY_DEFAULT, \"Moving package %s to %s\\n\",\n\t\tsrcLoc, dstLoc)\n\n\tif err := util.MoveDir(srcPkg.BasePath(), dstPath); err != nil {\n\t\tos.Chdir(wd)\n\t\tNewtUsage(cmd, err)\n\t}\n\n\t\/* Replace the package name in the pkg.yml file *\/\n\tpkgData, err := ioutil.ReadFile(dstPath + \"\/pkg.yml\")\n\tif err != nil {\n\t\tos.Chdir(wd)\n\t\tNewtUsage(cmd, err)\n\t}\n\n\tre := regexp.MustCompile(regexp.QuoteMeta(srcName))\n\tres := re.ReplaceAllString(string(pkgData), pkgName)\n\n\tif err := ioutil.WriteFile(dstPath+\"\/pkg.yml\", []byte(res), 0666); err != nil {\n\t\tNewtUsage(cmd, util.ChildNewtError(err))\n\t}\n\n\t\/* If the last element of the package path changes, rename the include\n\t * directory.\n\t *\/\n\tif path.Base(pkgName) != path.Base(srcPkg.Name()) {\n\t\tutil.MoveDir(dstPath+\"\/include\/\"+path.Base(srcPkg.Name()),\n\t\t\tdstPath+\"\/include\/\"+path.Base(pkgName))\n\t}\n\n\tos.Chdir(wd)\n}\n\nfunc AddPackageCommands(cmd *cobra.Command) {\n\t\/* Add the base package command, on top of which other commands are\n\t * keyed\n\t *\/\n\tpkgHelpText := \"Commands for creating and manipulating packages\"\n\tpkgHelpEx := \"newt pkg new --type=pkg libs\/mylib\"\n\n\tpkgCmd := &cobra.Command{\n\t\tUse: \"pkg\",\n\t\tShort: \"Create and manage packages in the current workspace\",\n\t\tLong: pkgHelpText,\n\t\tExample: pkgHelpEx,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tcmd.Help()\n\t\t},\n\t}\n\n\tcmd.AddCommand(pkgCmd)\n\n\t\/* Package new command, create a new package *\/\n\tnewCmdHelpText := \"\"\n\tnewCmdHelpEx := \"\"\n\n\tnewCmd := &cobra.Command{\n\t\tUse: \"new\",\n\t\tShort: \"Create a new package, from a template\",\n\t\tLong: newCmdHelpText,\n\t\tExample: newCmdHelpEx,\n\t\tRun: pkgNewCmd,\n\t}\n\n\tnewCmd.PersistentFlags().StringVarP(&NewTypeStr, \"type\", \"t\",\n\t\t\"pkg\", \"Type of package to create: pkg, bsp, sdk. Default pkg.\")\n\n\tpkgCmd.AddCommand(newCmd)\n\n\tmoveCmdHelpText := \"\"\n\tmoveCmdHelpEx := \"\"\n\n\tmoveCmd := &cobra.Command{\n\t\tUse: \"move\",\n\t\tShort: \"Move a package from one location to another\",\n\t\tLong: moveCmdHelpText,\n\t\tExample: moveCmdHelpEx,\n\t\tRun: pkgMoveCmd,\n\t}\n\n\tpkgCmd.AddCommand(moveCmd)\n}\n<commit_msg>add the pkg remove command to newt<commit_after>\/**\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\/\n\npackage cli\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"mynewt.apache.org\/newt\/newt\/interfaces\"\n\t\"mynewt.apache.org\/newt\/newt\/newtutil\"\n\t\"mynewt.apache.org\/newt\/newt\/pkg\"\n\t\"mynewt.apache.org\/newt\/newt\/project\"\n\t\"mynewt.apache.org\/newt\/util\"\n)\n\nvar NewTypeStr = \"pkg\"\n\nfunc pkgNewCmd(cmd *cobra.Command, args []string) {\n\tNewTypeStr = strings.ToUpper(NewTypeStr)\n\n\tpw := project.NewPackageWriter()\n\tif err := pw.ConfigurePackage(NewTypeStr, args[0]); err != nil {\n\t\tNewtUsage(cmd, err)\n\t}\n\tif err := pw.WritePackage(); err != nil {\n\t\tNewtUsage(cmd, err)\n\t}\n}\n\nfunc pkgMoveCmd(cmd *cobra.Command, args []string) {\n\tif len(args) != 2 {\n\t\tNewtUsage(cmd, util.NewNewtError(\"Exactly two arguments required to pkg move\"))\n\t}\n\n\tsrcLoc := args[0]\n\tdstLoc := args[1]\n\n\tproj := TryGetProject()\n\tinterfaces.SetProject(proj)\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tNewtUsage(cmd, util.ChildNewtError(err))\n\t}\n\n\tif err := os.Chdir(proj.Path() + \"\/\"); err != nil {\n\t\tNewtUsage(cmd, util.ChildNewtError(err))\n\t}\n\n\t\/* Find source package, defaulting search to the local project if no\n\t * repository descriptor is found.\n\t *\/\n\tsrcRepoName, srcName, err := newtutil.ParsePackageString(srcLoc)\n\tif err != nil {\n\t\tos.Chdir(wd)\n\t\tNewtUsage(cmd, err)\n\t}\n\n\tsrcRepo := proj.LocalRepo()\n\tif srcRepoName != \"\" {\n\t\tsrcRepo = proj.FindRepo(srcRepoName)\n\t}\n\n\tsrcPkg, err := proj.ResolvePackage(srcRepo, srcName)\n\tif err != nil {\n\t\tos.Chdir(wd)\n\t\tNewtUsage(cmd, err)\n\t}\n\n\t\/* Resolve the destination package to a physical location, and then\n\t * move the source package to that location.\n\t * dstLoc is assumed to be in the format \"@repo\/pkg\/loc\"\n\t *\/\n\trepoName, pkgName, err := newtutil.ParsePackageString(dstLoc)\n\tif err != nil {\n\t\tos.Chdir(wd)\n\t\tNewtUsage(cmd, err)\n\t}\n\n\tdstPath := proj.Path() + \"\/\"\n\trepo := proj.LocalRepo()\n\tif repoName != \"\" {\n\t\tdstPath += \"repos\/\" + repoName + \"\/\"\n\t\trepo = proj.FindRepo(repoName)\n\t\tif repo == nil {\n\t\t\tos.Chdir(wd)\n\t\t\tNewtUsage(cmd, util.NewNewtError(\"Destination repo \"+\n\t\t\t\trepoName+\" does not exist\"))\n\t\t}\n\t}\n\tdstPath += pkgName + \"\/\"\n\n\tif util.NodeExist(dstPath) {\n\t\tos.Chdir(wd)\n\t\tNewtUsage(cmd, util.NewNewtError(\"Cannot overwrite existing package, \"+\n\t\t\t\"use pkg delete first\"))\n\t}\n\n\tutil.StatusMessage(util.VERBOSITY_DEFAULT, \"Moving package %s to %s\\n\",\n\t\tsrcLoc, dstLoc)\n\n\tif err := util.MoveDir(srcPkg.BasePath(), dstPath); err != nil {\n\t\tos.Chdir(wd)\n\t\tNewtUsage(cmd, err)\n\t}\n\n\t\/* Replace the package name in the pkg.yml file *\/\n\tpkgData, err := ioutil.ReadFile(dstPath + \"\/pkg.yml\")\n\tif err != nil {\n\t\tos.Chdir(wd)\n\t\tNewtUsage(cmd, err)\n\t}\n\n\tre := regexp.MustCompile(regexp.QuoteMeta(srcName))\n\tres := re.ReplaceAllString(string(pkgData), pkgName)\n\n\tif err := ioutil.WriteFile(dstPath+\"\/pkg.yml\", []byte(res), 0666); err != nil {\n\t\tNewtUsage(cmd, util.ChildNewtError(err))\n\t}\n\n\t\/* If the last element of the package path changes, rename the include\n\t * directory.\n\t *\/\n\tif path.Base(pkgName) != path.Base(srcPkg.Name()) {\n\t\tutil.MoveDir(dstPath+\"\/include\/\"+path.Base(srcPkg.Name()),\n\t\t\tdstPath+\"\/include\/\"+path.Base(pkgName))\n\t}\n\n\tos.Chdir(wd)\n}\n\nfunc pkgRemoveCmd(cmd *cobra.Command, args []string) {\n\tif len(args) != 1 {\n\t\tNewtUsage(cmd, util.NewNewtError(\"Must specify a package name to delete\"))\n\t}\n\n\tproj := TryGetProject()\n\tinterfaces.SetProject(proj)\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tNewtUsage(cmd, util.ChildNewtError(err))\n\t}\n\n\tif err := os.Chdir(proj.Path() + \"\/\"); err != nil {\n\t\tNewtUsage(cmd, util.ChildNewtError(err))\n\t}\n\t\/* Resolve package, and get path from package to ensure we're being asked\n\t * to remove a valid path.\n\t *\/\n\trepoName, pkgName, err := newtutil.ParsePackageString(args[0])\n\tif err != nil {\n\t\tos.Chdir(wd)\n\t\tNewtUsage(cmd, err)\n\t}\n\n\trepo := proj.LocalRepo()\n\tif repoName != \"\" {\n\t\trepo = proj.FindRepo(repoName)\n\t\tif repo == nil {\n\t\t\tos.Chdir(wd)\n\t\t\tNewtUsage(cmd, util.NewNewtError(\"Destination repo \"+\n\t\t\t\trepoName+\" does not exist\"))\n\t\t}\n\t}\n\n\tpkg, err := pkg.LoadLocalPackage(repo, pkgName)\n\tif err != nil {\n\t\tos.Chdir(wd)\n\t\tNewtUsage(cmd, err)\n\t}\n\n\tutil.StatusMessage(util.VERBOSITY_DEFAULT, \"Removing package %s\\n\",\n\t\targs[0])\n\n\tif err := os.RemoveAll(pkg.BasePath()); err != nil {\n\t\tos.Chdir(wd)\n\t\tNewtUsage(cmd, util.ChildNewtError(err))\n\t}\n\n\tos.Chdir(wd)\n}\n\nfunc AddPackageCommands(cmd *cobra.Command) {\n\t\/* Add the base package command, on top of which other commands are\n\t * keyed\n\t *\/\n\tpkgHelpText := \"Commands for creating and manipulating packages\"\n\tpkgHelpEx := \" newt pkg new --type=pkg libs\/mylib\"\n\n\tpkgCmd := &cobra.Command{\n\t\tUse: \"pkg\",\n\t\tShort: \"Create and manage packages in the current workspace\",\n\t\tLong: pkgHelpText,\n\t\tExample: pkgHelpEx,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tcmd.Help()\n\t\t},\n\t}\n\n\tcmd.AddCommand(pkgCmd)\n\n\t\/* Package new command, create a new package *\/\n\tnewCmdHelpText := \"\"\n\tnewCmdHelpEx := \"\"\n\n\tnewCmd := &cobra.Command{\n\t\tUse: \"new\",\n\t\tShort: \"Create a new package, from a template\",\n\t\tLong: newCmdHelpText,\n\t\tExample: newCmdHelpEx,\n\t\tRun: pkgNewCmd,\n\t}\n\n\tnewCmd.PersistentFlags().StringVarP(&NewTypeStr, \"type\", \"t\",\n\t\t\"pkg\", \"Type of package to create: pkg, bsp, sdk. Default pkg.\")\n\n\tpkgCmd.AddCommand(newCmd)\n\n\tmoveCmdHelpText := \"\"\n\tmoveCmdHelpEx := \"\"\n\n\tmoveCmd := &cobra.Command{\n\t\tUse: \"move\",\n\t\tShort: \"Move a package from one location to another\",\n\t\tLong: moveCmdHelpText,\n\t\tExample: moveCmdHelpEx,\n\t\tRun: pkgMoveCmd,\n\t}\n\n\tpkgCmd.AddCommand(moveCmd)\n\n\tremoveCmdHelpText := \"\"\n\tremoveCmdHelpEx := \"\"\n\n\tremoveCmd := &cobra.Command{\n\t\tUse: \"remove\",\n\t\tShort: \"Remove a package\",\n\t\tLong: removeCmdHelpText,\n\t\tExample: removeCmdHelpEx,\n\t\tRun: pkgRemoveCmd,\n\t}\n\n\tpkgCmd.AddCommand(removeCmd)\n}\n<|endoftext|>"} {"text":"<commit_before>package metadata\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com\/mackerelio\/mackerel-agent\/config\"\n\t\"github.com\/mackerelio\/mackerel-agent\/logging\"\n)\n\nvar logger = logging.GetLogger(\"metadata\")\n\n\/\/ Generator generates metadata\ntype Generator struct {\n\tName string\n\tConfig *config.MetadataPlugin\n\tTempfile string\n\tPrevMetadata interface{}\n}\n\n\/\/ Fetch invokes the command and returns the result\nfunc (g *Generator) Fetch() (interface{}, error) {\n\tmessage, stderr, exitCode, err := g.Config.Run()\n\n\tif err != nil {\n\t\tlogger.Warningf(\"Error occurred while executing a metadata plugin %q: %s\", g.Name, err.Error())\n\t\treturn nil, err\n\t}\n\n\tif stderr != \"\" {\n\t\tlogger.Warningf(\"metadata plugin %q outputs stderr: %s\", g.Name, stderr)\n\t}\n\n\tif exitCode != 0 {\n\t\treturn nil, fmt.Errorf(\"exits with: %d\", exitCode)\n\t}\n\n\tvar metadata interface{}\n\tif err := json.Unmarshal([]byte(message), &metadata); err != nil {\n\t\treturn nil, fmt.Errorf(\"outputs invalid JSON: %v\", message)\n\t}\n\n\treturn metadata, nil\n}\n\n\/\/ Differs returns whether the metadata has been changed or not\nfunc (g *Generator) Differs(metadata interface{}) bool {\n\tif g.PrevMetadata == nil {\n\t\tg.LoadFromFile()\n\t}\n\treturn !reflect.DeepEqual(g.PrevMetadata, metadata)\n}\n\n\/\/ Load loads the previous metadata from file\nfunc (g *Generator) LoadFromFile() {\n\tdata, err := ioutil.ReadFile(g.Tempfile)\n\tif err != nil { \/\/ maybe initial state\n\t\treturn\n\t}\n\tvar metadata interface{}\n\tif err := json.Unmarshal(data, &metadata); err != nil {\n\t\tlogger.Warningf(\"metadata plugin %q detected a invalid json in temporary file: %s\", g.Name, string(data))\n\t\treturn\n\t}\n\tg.PrevMetadata = metadata\n}\n\n\/\/ Save stores the metadata locally\nfunc (g *Generator) Save(metadata interface{}) error {\n\tg.PrevMetadata = metadata\n\tdata, err := json.Marshal(metadata)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal the metadata to json: %v %s\", metadata, err.Error())\n\t}\n\tif err = writeFileAtomically(g.Tempfile, data); err != nil {\n\t\treturn fmt.Errorf(\"failed to write the metadata to temporary file: %v %s\", metadata, err.Error())\n\t}\n\treturn nil\n}\n\nfunc writeFileAtomically(f string, contents []byte) error {\n\ttmpf, err := ioutil.TempFile(\"\", \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(tmpf.Name())\n\t_, err = tmpf.Write(contents)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttmpf.Close()\n\treturn os.Rename(tmpf.Name(), f)\n}\n\nconst defaultExecutionInterval = 10 * time.Minute\n\n\/\/ Interval calculates the time interval of command execution\nfunc (g *Generator) Interval() time.Duration {\n\tif g.Config.ExecutionInterval == nil {\n\t\treturn defaultExecutionInterval\n\t}\n\tinterval := time.Duration(*g.Config.ExecutionInterval) * time.Minute\n\tif interval < 1*time.Minute {\n\t\treturn 1 * time.Minute\n\t}\n\treturn interval\n}\n<commit_msg>improve comment<commit_after>package metadata\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com\/mackerelio\/mackerel-agent\/config\"\n\t\"github.com\/mackerelio\/mackerel-agent\/logging\"\n)\n\nvar logger = logging.GetLogger(\"metadata\")\n\n\/\/ Generator generates metadata\ntype Generator struct {\n\tName string\n\tConfig *config.MetadataPlugin\n\tTempfile string\n\tPrevMetadata interface{}\n}\n\n\/\/ Fetch invokes the command and returns the result\nfunc (g *Generator) Fetch() (interface{}, error) {\n\tmessage, stderr, exitCode, err := g.Config.Run()\n\n\tif err != nil {\n\t\tlogger.Warningf(\"Error occurred while executing a metadata plugin %q: %s\", g.Name, err.Error())\n\t\treturn nil, err\n\t}\n\n\tif stderr != \"\" {\n\t\tlogger.Warningf(\"metadata plugin %q outputs stderr: %s\", g.Name, stderr)\n\t}\n\n\tif exitCode != 0 {\n\t\treturn nil, fmt.Errorf(\"exits with: %d\", exitCode)\n\t}\n\n\tvar metadata interface{}\n\tif err := json.Unmarshal([]byte(message), &metadata); err != nil {\n\t\treturn nil, fmt.Errorf(\"outputs invalid JSON: %v\", message)\n\t}\n\n\treturn metadata, nil\n}\n\n\/\/ Differs returns whether the metadata has been changed or not\nfunc (g *Generator) Differs(metadata interface{}) bool {\n\tif g.PrevMetadata == nil {\n\t\tg.LoadFromFile()\n\t}\n\treturn !reflect.DeepEqual(g.PrevMetadata, metadata)\n}\n\n\/\/ LoadFromFile loads the previous metadata from file\nfunc (g *Generator) LoadFromFile() {\n\tdata, err := ioutil.ReadFile(g.Tempfile)\n\tif err != nil { \/\/ maybe initial state\n\t\treturn\n\t}\n\tvar metadata interface{}\n\tif err := json.Unmarshal(data, &metadata); err != nil {\n\t\tlogger.Warningf(\"metadata plugin %q detected a invalid json in temporary file: %s\", g.Name, string(data))\n\t\treturn\n\t}\n\tg.PrevMetadata = metadata\n}\n\n\/\/ Save stores the metadata locally\nfunc (g *Generator) Save(metadata interface{}) error {\n\tg.PrevMetadata = metadata\n\tdata, err := json.Marshal(metadata)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal the metadata to json: %v %s\", metadata, err.Error())\n\t}\n\tif err = writeFileAtomically(g.Tempfile, data); err != nil {\n\t\treturn fmt.Errorf(\"failed to write the metadata to temporary file: %v %s\", metadata, err.Error())\n\t}\n\treturn nil\n}\n\nfunc writeFileAtomically(f string, contents []byte) error {\n\ttmpf, err := ioutil.TempFile(\"\", \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(tmpf.Name())\n\t_, err = tmpf.Write(contents)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttmpf.Close()\n\treturn os.Rename(tmpf.Name(), f)\n}\n\nconst defaultExecutionInterval = 10 * time.Minute\n\n\/\/ Interval calculates the time interval of command execution\nfunc (g *Generator) Interval() time.Duration {\n\tif g.Config.ExecutionInterval == nil {\n\t\treturn defaultExecutionInterval\n\t}\n\tinterval := time.Duration(*g.Config.ExecutionInterval) * time.Minute\n\tif interval < 1*time.Minute {\n\t\treturn 1 * time.Minute\n\t}\n\treturn interval\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package astcontext provides context aware utilities to be used within\n\/\/ editors.\npackage astcontext\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/parser\"\n\t\"go\/token\"\n)\n\n\/\/ Func represents a declared (*ast.FuncDecl) or an anonymous (*ast.FuncLit) Go\n\/\/ function\ntype Func struct {\n\tFuncPos token.Position `json:\"funcPos\"` \/\/ position of the \"func\" keyword\n\tLbrace token.Position `json:\"lbrace\"` \/\/ position of \"{\"\n\tRbrace token.Position `json:\"rbrace\"` \/\/ position of \"}\"\n\n\tnode ast.Node \/\/ either *ast.FuncDecl or *ast.FuncLit\n}\n\nfunc (f *Func) String() string {\n\treturn fmt.Sprintf(\"{'func': {'line': '%d', 'col':'%d'}, 'lbrace': {'line': '%d', 'col':'%d'}, 'rbrace': {'line': '%d', 'col':'%d'}}\",\n\t\tf.FuncPos.Line, f.FuncPos.Column, f.Lbrace.Line, f.Lbrace.Column, f.Rbrace.Line, f.Rbrace.Column)\n\n}\n\n\/\/ EnclosingFunc returns the enclosing Func for the given offset from the src.\n\/\/ Src needs to be a valid Go source file content.\nfunc EnclosingFunc(src []byte, offset int) (*Func, error) {\n\tfuncs, err := ParseFuncs(src)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn enclosingFunc(funcs, offset)\n}\n\n\/\/ EnclosingFuncFile returns the enclosing *Func for the given offset from the\n\/\/ filename. File needs to be a valid Go source file.\nfunc EnclosingFuncFile(filename string, offset int) (*Func, error) {\n\tfuncs, err := ParseFuncsFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn enclosingFunc(funcs, offset)\n}\n\n\/\/ ParseFuncsFile returns a list of Func's from the given filename. The list of\n\/\/ Func's are sorted according to the order of Go functions in the given\n\/\/ filename.\nfunc ParseFuncsFile(filename string) ([]*Func, error) {\n\tfset := token.NewFileSet() \/\/ positions are relative to fset\n\tf, err := parser.ParseFile(fset, filename, nil, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parseFuncs(fset, f), nil\n}\n\n\/\/ ParseFuncs returns a list of Func's from the given src. The list of Func's\n\/\/ are sorted according to the order of Go functions in the given src.\nfunc ParseFuncs(src []byte) ([]*Func, error) {\n\tfset := token.NewFileSet() \/\/ positions are relative to fset\n\tf, err := parser.ParseFile(fset, \"src.go\", src, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn parseFuncs(fset, f), nil\n}\n\nfunc parseFuncs(fset *token.FileSet, f ast.Node) []*Func {\n\tvar funcs []*Func\n\n\t\/\/ Inspect the AST and find all function declarements and literals\n\tast.Inspect(f, func(n ast.Node) bool {\n\t\tswitch x := n.(type) {\n\t\tcase *ast.FuncDecl:\n\t\t\tfuncs = append(funcs, &Func{\n\t\t\t\tLbrace: fset.Position(x.Body.Lbrace),\n\t\t\t\tRbrace: fset.Position(x.Body.Rbrace),\n\t\t\t\tFuncPos: fset.Position(x.Type.Func),\n\t\t\t\tnode: x,\n\t\t\t})\n\t\tcase *ast.FuncLit:\n\t\t\tfuncs = append(funcs, &Func{\n\t\t\t\tLbrace: fset.Position(x.Body.Lbrace),\n\t\t\t\tRbrace: fset.Position(x.Body.Rbrace),\n\t\t\t\tFuncPos: fset.Position(x.Type.Func),\n\t\t\t\tnode: x,\n\t\t\t})\n\t\t}\n\t\treturn true\n\t})\n\n\treturn funcs\n}\n\nfunc enclosingFunc(funcs []*Func, offset int) (*Func, error) {\n\tvar encFunc *Func\n\tfor _, fn := range funcs {\n\t\tstart := fn.Lbrace.Offset\n\t\tend := fn.Rbrace.Offset\n\t\tif start <= offset && offset <= end {\n\t\t\tencFunc = fn\n\t\t}\n\t}\n\n\tif encFunc == nil {\n\t\treturn &Func{}, errors.New(\"no enclosing functions found\")\n\t}\n\n\treturn encFunc, nil\n}\n<commit_msg>Remvoe whitespace<commit_after>\/\/ Package astcontext provides context aware utilities to be used within\n\/\/ editors.\npackage astcontext\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/parser\"\n\t\"go\/token\"\n)\n\n\/\/ Func represents a declared (*ast.FuncDecl) or an anonymous (*ast.FuncLit) Go\n\/\/ function\ntype Func struct {\n\tFuncPos token.Position `json:\"funcPos\"` \/\/ position of the \"func\" keyword\n\tLbrace token.Position `json:\"lbrace\"` \/\/ position of \"{\"\n\tRbrace token.Position `json:\"rbrace\"` \/\/ position of \"}\"\n\n\tnode ast.Node \/\/ either *ast.FuncDecl or *ast.FuncLit\n}\n\nfunc (f *Func) String() string {\n\treturn fmt.Sprintf(\"{'func': {'line': '%d', 'col':'%d'}, 'lbrace': {'line': '%d', 'col':'%d'}, 'rbrace': {'line': '%d', 'col':'%d'}}\",\n\t\tf.FuncPos.Line, f.FuncPos.Column, f.Lbrace.Line, f.Lbrace.Column, f.Rbrace.Line, f.Rbrace.Column)\n}\n\n\/\/ EnclosingFunc returns the enclosing Func for the given offset from the src.\n\/\/ Src needs to be a valid Go source file content.\nfunc EnclosingFunc(src []byte, offset int) (*Func, error) {\n\tfuncs, err := ParseFuncs(src)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn enclosingFunc(funcs, offset)\n}\n\n\/\/ EnclosingFuncFile returns the enclosing *Func for the given offset from the\n\/\/ filename. File needs to be a valid Go source file.\nfunc EnclosingFuncFile(filename string, offset int) (*Func, error) {\n\tfuncs, err := ParseFuncsFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn enclosingFunc(funcs, offset)\n}\n\n\/\/ ParseFuncsFile returns a list of Func's from the given filename. The list of\n\/\/ Func's are sorted according to the order of Go functions in the given\n\/\/ filename.\nfunc ParseFuncsFile(filename string) ([]*Func, error) {\n\tfset := token.NewFileSet() \/\/ positions are relative to fset\n\tf, err := parser.ParseFile(fset, filename, nil, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parseFuncs(fset, f), nil\n}\n\n\/\/ ParseFuncs returns a list of Func's from the given src. The list of Func's\n\/\/ are sorted according to the order of Go functions in the given src.\nfunc ParseFuncs(src []byte) ([]*Func, error) {\n\tfset := token.NewFileSet() \/\/ positions are relative to fset\n\tf, err := parser.ParseFile(fset, \"src.go\", src, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn parseFuncs(fset, f), nil\n}\n\nfunc parseFuncs(fset *token.FileSet, f ast.Node) []*Func {\n\tvar funcs []*Func\n\n\t\/\/ Inspect the AST and find all function declarements and literals\n\tast.Inspect(f, func(n ast.Node) bool {\n\t\tswitch x := n.(type) {\n\t\tcase *ast.FuncDecl:\n\t\t\tfuncs = append(funcs, &Func{\n\t\t\t\tLbrace: fset.Position(x.Body.Lbrace),\n\t\t\t\tRbrace: fset.Position(x.Body.Rbrace),\n\t\t\t\tFuncPos: fset.Position(x.Type.Func),\n\t\t\t\tnode: x,\n\t\t\t})\n\t\tcase *ast.FuncLit:\n\t\t\tfuncs = append(funcs, &Func{\n\t\t\t\tLbrace: fset.Position(x.Body.Lbrace),\n\t\t\t\tRbrace: fset.Position(x.Body.Rbrace),\n\t\t\t\tFuncPos: fset.Position(x.Type.Func),\n\t\t\t\tnode: x,\n\t\t\t})\n\t\t}\n\t\treturn true\n\t})\n\n\treturn funcs\n}\n\nfunc enclosingFunc(funcs []*Func, offset int) (*Func, error) {\n\tvar encFunc *Func\n\tfor _, fn := range funcs {\n\t\tstart := fn.Lbrace.Offset\n\t\tend := fn.Rbrace.Offset\n\t\tif start <= offset && offset <= end {\n\t\t\tencFunc = fn\n\t\t}\n\t}\n\n\tif encFunc == nil {\n\t\treturn &Func{}, errors.New(\"no enclosing functions found\")\n\t}\n\n\treturn encFunc, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package atomas\n\nfunc CreateElementGenerator(rand func() int) func(int) int {\n\treturn func(round int) int {\n\t\treturn rand() % 3 + 1\n\t}\n}<commit_msg>random element generator with small logic<commit_after>package atomas\n\nfunc CreateElementGenerator(rand func() int) func(int) int {\n\treturn func(round int) int {\n\t\tif (round == 0) {\n\t\t\treturn rand() % 3 + 1\n\t\t}else {\n\t\t\treturn rand() % 4\n\t\t}\n\t}\n}<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.\n\/\/ Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.\n\/\/ Copyright (c) 2016 Andriy Pylypenko. All rights reserved.\n\/\/\n\/\/ All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without modification,\n\/\/ are permitted provided that the following conditions are met:\n\/\/\n\/\/ 1. Redistributions of source code must retain the above copyright notice, this\n\/\/ list of conditions and the following disclaimer.\n\/\/\n\/\/ 2. Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation and\/or\n\/\/ other materials provided with the distribution.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n\/\/ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n\/\/ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n\/\/ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\n\/\/ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n\/\/ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n\/\/ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n\/\/ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\/\/ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n\/\/ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\npackage sippy\n\nimport (\n \"crypto\/rand\"\n \"errors\"\n \"fmt\"\n \"math\/big\"\n \"runtime\"\n \"sync\"\n\n \"sippy\/conf\"\n \"sippy\/net\"\n \"sippy\/sdp\"\n \"sippy\/types\"\n)\n\ntype Rtp_proxy_session struct {\n caller_session_exists bool\n call_id string\n from_tag string\n to_tag string\n _rtp_proxy_client sippy_types.RtpProxyClient\n max_index int\n l4r *local4remote\n notify_socket string\n notify_tag string\n insert_nortpp bool\n caller _rtpps_side\n callee _rtpps_side\n session_lock sync.Locker\n config sippy_conf.Config\n inflight_lock sync.Mutex\n inflight_cmd *rtpp_cmd\n rtpp_wi chan *rtpp_cmd\n}\n\ntype rtpproxy_update_result struct {\n rtpproxy_address string\n rtpproxy_port string\n family string\n sendonly bool\n}\n\ntype rtpp_cmd struct {\n cmd string\n cb func(string)\n rtp_proxy_client sippy_types.RtpProxyClient\n}\n\nfunc (self *rtpproxy_update_result) Address() string {\n return self.rtpproxy_address\n}\n\nfunc NewRtp_proxy_session(config sippy_conf.Config, rtp_proxy_clients []sippy_types.RtpProxyClient, call_id, from_tag, to_tag, notify_socket, notify_tag string, session_lock sync.Locker, callee_origin *sippy_sdp.SdpOrigin) (*Rtp_proxy_session, error) {\n self := &Rtp_proxy_session{\n notify_socket : notify_socket,\n notify_tag : notify_tag,\n call_id : call_id,\n from_tag : from_tag,\n to_tag : to_tag,\n insert_nortpp : false,\n max_index : -1,\n session_lock : session_lock,\n config : config,\n rtpp_wi : make(chan *rtpp_cmd, 50),\n }\n self.caller.otherside = &self.callee\n self.callee.otherside = &self.caller\n self.caller.owner = self\n self.callee.owner = self\n self.caller.session_exists = false\n self.callee.session_exists = false\n \/\/ RFC4566\n \/\/ *******\n \/\/ For privacy reasons, it is sometimes desirable to obfuscate the\n \/\/ username and IP address of the session originator. If this is a\n \/\/ concern, an arbitrary <username> and private <unicast-address> MAY be\n \/\/ chosen to populate the \"o=\" field, provided that these are selected\n \/\/ in a manner that does not affect the global uniqueness of the field.\n \/\/ *******\n addr := \"192.0.2.1\" \/\/ 192.0.2.0\/24 (TEST-NET-1)\n self.caller.origin, _ = sippy_sdp.NewSdpOrigin(addr)\n if callee_origin != nil {\n self.callee.origin = callee_origin.GetCopy()\n \/\/ New session means new RTP port so the SDP is now different and the SDP\n \/\/ version must be increased.\n self.callee.origin.IncVersion()\n } else {\n self.callee.origin, _ = sippy_sdp.NewSdpOrigin(addr)\n }\n online_clients := []sippy_types.RtpProxyClient{}\n for _, cl := range rtp_proxy_clients {\n if cl.IsOnline() {\n online_clients = append(online_clients, cl)\n }\n }\n n := len(online_clients)\n if n == 0 {\n return nil, fmt.Errorf(\"No online RTP proxy client has been found\")\n }\n idx, err := rand.Int(rand.Reader, big.NewInt(int64(n)))\n if err != nil {\n self._rtp_proxy_client = online_clients[0]\n } else {\n self._rtp_proxy_client = online_clients[idx.Int64()]\n }\n if self.call_id == \"\" {\n buf := make([]byte, 16)\n rand.Read(buf)\n self.call_id = fmt.Sprintf(\"%x\", buf)\n }\n if from_tag == \"\" {\n buf := make([]byte, 16)\n rand.Read(buf)\n self.from_tag = fmt.Sprintf(\"%x\", buf)\n }\n if to_tag == \"\" {\n buf := make([]byte, 16)\n rand.Read(buf)\n self.to_tag = fmt.Sprintf(\"%x\", buf)\n }\n runtime.SetFinalizer(self, rtp_proxy_session_destructor)\n return self, nil\n}\n\/*\n def version(self, result_callback):\n self.send_command(\"V\", self.version_result, result_callback)\n\n def version_result(self, result, result_callback):\n result_callback(result)\n*\/\nfunc (self *Rtp_proxy_session) PlayCaller(prompt_name string, times int\/*= 1*\/, result_callback func(string)\/*= nil*\/, index int \/*= 0*\/) {\n self.caller._play(prompt_name, times, result_callback, index)\n}\n\nfunc (self *Rtp_proxy_session) send_command(cmd string, cb func(string)) {\n if rtp_proxy_client := self._rtp_proxy_client; rtp_proxy_client != nil {\n self.inflight_lock.Lock()\n defer self.inflight_lock.Unlock()\n new_cmd := &rtpp_cmd{ cmd, cb, rtp_proxy_client }\n if self.inflight_cmd == nil {\n self.inflight_cmd = new_cmd\n rtp_proxy_client.SendCommand(cmd, self.cmd_done, nil)\n } else {\n self.rtpp_wi <- new_cmd\n }\n }\n}\n\nfunc (self *Rtp_proxy_session) cmd_done(res string) {\n self.inflight_lock.Lock()\n done_cmd := self.inflight_cmd\n select {\n case self.inflight_cmd = <-self.rtpp_wi:\n self.inflight_cmd.rtp_proxy_client.SendCommand(self.inflight_cmd.cmd, self.cmd_done, nil)\n default:\n self.inflight_cmd = nil\n }\n self.inflight_lock.Unlock()\n if done_cmd != nil && done_cmd.cb != nil {\n self.session_lock.Lock()\n done_cmd.cb(res)\n self.session_lock.Unlock()\n }\n}\n\nfunc (self *Rtp_proxy_session) StopPlayCaller(result_callback func(string)\/*= nil*\/, index int\/*= 0*\/) {\n if ! self.caller_session_exists {\n return\n }\n command := fmt.Sprintf(\"S %s-%d %s %s\", self.call_id, index, self.from_tag, self.to_tag)\n self.send_command(command, func(r string) { self.command_result(r, result_callback) })\n}\n\nfunc (self *Rtp_proxy_session) StartRecording(rname\/*= nil*\/ string, result_callback func(string)\/*= nil*\/, index int\/*= 0*\/) {\n if ! self.caller.session_exists {\n self.caller.update(\"0.0.0.0\", \"0\", func(*rtpproxy_update_result) { self._start_recording(rname, result_callback, index) }, \"\", index, \"IP4\")\n return\n }\n self._start_recording(rname, result_callback, index)\n}\n\nfunc (self *Rtp_proxy_session) _start_recording(rname string, result_callback func(string), index int) {\n if rname == \"\" {\n command := fmt.Sprintf(\"R %s-%d %s %s\", self.call_id, index, self.from_tag, self.to_tag)\n self.send_command(command, func (r string) { self.command_result(r, result_callback) })\n return\n }\n command := fmt.Sprintf(\"C %s-%d %s.a %s %s\", self.call_id, index, rname, self.from_tag, self.to_tag)\n self.send_command(command, func(string) { self._start_recording1(rname, result_callback, index) })\n}\n\nfunc (self *Rtp_proxy_session) _start_recording1(rname string, result_callback func(string), index int) {\n command := fmt.Sprintf(\"C %s-%d %s.o %s %s\", self.call_id, index, rname, self.to_tag, self.from_tag)\n self.send_command(command, func (r string) { self.command_result(r, result_callback) })\n}\n\nfunc (self *Rtp_proxy_session) command_result(result string, result_callback func(string)) {\n \/\/print \"%s.command_result(%s)\" % (id(self), result)\n if result_callback != nil {\n result_callback(result)\n }\n}\n\nfunc (self *Rtp_proxy_session) Delete() {\n if self._rtp_proxy_client == nil {\n return\n }\n for self.max_index >= 0 {\n command := fmt.Sprintf(\"D %s-%d %s %s\", self.call_id, self.max_index, self.from_tag, self.to_tag)\n self.send_command(command, nil)\n self.max_index--\n }\n self._rtp_proxy_client = nil\n}\n\nfunc (self *Rtp_proxy_session) OnCallerSdpChange(sdp_body sippy_types.MsgBody, cc_event sippy_types.CCEvent, result_callback func(sippy_types.MsgBody)) error {\n return self.caller._on_sdp_change(sdp_body, result_callback)\n}\n\nfunc (self *Rtp_proxy_session) OnCalleeSdpChange(sdp_body sippy_types.MsgBody, msg sippy_types.SipMsg, result_callback func(sippy_types.MsgBody)) error {\n return self.callee._on_sdp_change(sdp_body, result_callback)\n}\n\nfunc rtp_proxy_session_destructor(self *Rtp_proxy_session) {\n self.Delete()\n}\n\nfunc (self *Rtp_proxy_session) CallerSessionExists() bool { return self.caller_session_exists }\n\nfunc (self *Rtp_proxy_session) SetCallerLaddress(addr string) {\n self.caller.laddress = addr\n}\n\nfunc (self *Rtp_proxy_session) SetCallerRaddress(addr *sippy_net.HostPort) {\n self.caller.raddress = addr\n}\n\nfunc (self *Rtp_proxy_session) SetCalleeLaddress(addr string) {\n self.callee.laddress = addr\n}\n\nfunc (self *Rtp_proxy_session) SetCalleeRaddress(addr *sippy_net.HostPort) {\n self.callee.raddress = addr\n}\n\nfunc (self *Rtp_proxy_session) SetInsertNortpp(v bool) {\n self.insert_nortpp = v\n}\n\nfunc (self *Rtp_proxy_session) SetAfterCallerSdpChange(cb func(sippy_types.RtpProxyUpdateResult)) {\n self.caller.after_sdp_change = cb\n}\n\nfunc (self *Rtp_proxy_session) CalleeOrigin() *sippy_sdp.SdpOrigin {\n if self == nil {\n return nil\n }\n return self.callee.origin\n}\n\nfunc (self Rtp_proxy_session) SBindSupported() (bool, error) {\n rtp_proxy_client := self._rtp_proxy_client\n if rtp_proxy_client == nil {\n return true, errors.New(\"the session already deleted\")\n }\n return rtp_proxy_client.SBindSupported(), nil\n}\n\nfunc (self Rtp_proxy_session) IsLocal() (bool, error) {\n rtp_proxy_client := self._rtp_proxy_client\n if rtp_proxy_client == nil {\n return true, errors.New(\"the session already deleted\")\n }\n return rtp_proxy_client.IsLocal(), nil\n}\n\nfunc (self Rtp_proxy_session) TNotSupported() (bool, error) {\n rtp_proxy_client := self._rtp_proxy_client\n if rtp_proxy_client == nil {\n return true, errors.New(\"the session already deleted\")\n }\n return rtp_proxy_client.TNotSupported(), nil\n}\n\nfunc (self Rtp_proxy_session) GetProxyAddress() (string, error) {\n rtp_proxy_client := self._rtp_proxy_client\n if rtp_proxy_client == nil {\n return \"\", errors.New(\"the session already deleted\")\n }\n return rtp_proxy_client.GetProxyAddress(), nil\n}\n<commit_msg>Fix typo.<commit_after>\/\/ Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.\n\/\/ Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.\n\/\/ Copyright (c) 2016 Andriy Pylypenko. All rights reserved.\n\/\/\n\/\/ All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without modification,\n\/\/ are permitted provided that the following conditions are met:\n\/\/\n\/\/ 1. Redistributions of source code must retain the above copyright notice, this\n\/\/ list of conditions and the following disclaimer.\n\/\/\n\/\/ 2. Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation and\/or\n\/\/ other materials provided with the distribution.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n\/\/ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n\/\/ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n\/\/ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\n\/\/ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n\/\/ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n\/\/ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n\/\/ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\/\/ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n\/\/ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\npackage sippy\n\nimport (\n \"crypto\/rand\"\n \"errors\"\n \"fmt\"\n \"math\/big\"\n \"runtime\"\n \"sync\"\n\n \"sippy\/conf\"\n \"sippy\/net\"\n \"sippy\/sdp\"\n \"sippy\/types\"\n)\n\ntype Rtp_proxy_session struct {\n call_id string\n from_tag string\n to_tag string\n _rtp_proxy_client sippy_types.RtpProxyClient\n max_index int\n l4r *local4remote\n notify_socket string\n notify_tag string\n insert_nortpp bool\n caller _rtpps_side\n callee _rtpps_side\n session_lock sync.Locker\n config sippy_conf.Config\n inflight_lock sync.Mutex\n inflight_cmd *rtpp_cmd\n rtpp_wi chan *rtpp_cmd\n}\n\ntype rtpproxy_update_result struct {\n rtpproxy_address string\n rtpproxy_port string\n family string\n sendonly bool\n}\n\ntype rtpp_cmd struct {\n cmd string\n cb func(string)\n rtp_proxy_client sippy_types.RtpProxyClient\n}\n\nfunc (self *rtpproxy_update_result) Address() string {\n return self.rtpproxy_address\n}\n\nfunc NewRtp_proxy_session(config sippy_conf.Config, rtp_proxy_clients []sippy_types.RtpProxyClient, call_id, from_tag, to_tag, notify_socket, notify_tag string, session_lock sync.Locker, callee_origin *sippy_sdp.SdpOrigin) (*Rtp_proxy_session, error) {\n self := &Rtp_proxy_session{\n notify_socket : notify_socket,\n notify_tag : notify_tag,\n call_id : call_id,\n from_tag : from_tag,\n to_tag : to_tag,\n insert_nortpp : false,\n max_index : -1,\n session_lock : session_lock,\n config : config,\n rtpp_wi : make(chan *rtpp_cmd, 50),\n }\n self.caller.otherside = &self.callee\n self.callee.otherside = &self.caller\n self.caller.owner = self\n self.callee.owner = self\n self.caller.session_exists = false\n self.callee.session_exists = false\n \/\/ RFC4566\n \/\/ *******\n \/\/ For privacy reasons, it is sometimes desirable to obfuscate the\n \/\/ username and IP address of the session originator. If this is a\n \/\/ concern, an arbitrary <username> and private <unicast-address> MAY be\n \/\/ chosen to populate the \"o=\" field, provided that these are selected\n \/\/ in a manner that does not affect the global uniqueness of the field.\n \/\/ *******\n addr := \"192.0.2.1\" \/\/ 192.0.2.0\/24 (TEST-NET-1)\n self.caller.origin, _ = sippy_sdp.NewSdpOrigin(addr)\n if callee_origin != nil {\n self.callee.origin = callee_origin.GetCopy()\n \/\/ New session means new RTP port so the SDP is now different and the SDP\n \/\/ version must be increased.\n self.callee.origin.IncVersion()\n } else {\n self.callee.origin, _ = sippy_sdp.NewSdpOrigin(addr)\n }\n online_clients := []sippy_types.RtpProxyClient{}\n for _, cl := range rtp_proxy_clients {\n if cl.IsOnline() {\n online_clients = append(online_clients, cl)\n }\n }\n n := len(online_clients)\n if n == 0 {\n return nil, fmt.Errorf(\"No online RTP proxy client has been found\")\n }\n idx, err := rand.Int(rand.Reader, big.NewInt(int64(n)))\n if err != nil {\n self._rtp_proxy_client = online_clients[0]\n } else {\n self._rtp_proxy_client = online_clients[idx.Int64()]\n }\n if self.call_id == \"\" {\n buf := make([]byte, 16)\n rand.Read(buf)\n self.call_id = fmt.Sprintf(\"%x\", buf)\n }\n if from_tag == \"\" {\n buf := make([]byte, 16)\n rand.Read(buf)\n self.from_tag = fmt.Sprintf(\"%x\", buf)\n }\n if to_tag == \"\" {\n buf := make([]byte, 16)\n rand.Read(buf)\n self.to_tag = fmt.Sprintf(\"%x\", buf)\n }\n runtime.SetFinalizer(self, rtp_proxy_session_destructor)\n return self, nil\n}\n\/*\n def version(self, result_callback):\n self.send_command(\"V\", self.version_result, result_callback)\n\n def version_result(self, result, result_callback):\n result_callback(result)\n*\/\nfunc (self *Rtp_proxy_session) PlayCaller(prompt_name string, times int\/*= 1*\/, result_callback func(string)\/*= nil*\/, index int \/*= 0*\/) {\n self.caller._play(prompt_name, times, result_callback, index)\n}\n\nfunc (self *Rtp_proxy_session) send_command(cmd string, cb func(string)) {\n if rtp_proxy_client := self._rtp_proxy_client; rtp_proxy_client != nil {\n self.inflight_lock.Lock()\n defer self.inflight_lock.Unlock()\n new_cmd := &rtpp_cmd{ cmd, cb, rtp_proxy_client }\n if self.inflight_cmd == nil {\n self.inflight_cmd = new_cmd\n rtp_proxy_client.SendCommand(cmd, self.cmd_done, nil)\n } else {\n self.rtpp_wi <- new_cmd\n }\n }\n}\n\nfunc (self *Rtp_proxy_session) cmd_done(res string) {\n self.inflight_lock.Lock()\n done_cmd := self.inflight_cmd\n select {\n case self.inflight_cmd = <-self.rtpp_wi:\n self.inflight_cmd.rtp_proxy_client.SendCommand(self.inflight_cmd.cmd, self.cmd_done, nil)\n default:\n self.inflight_cmd = nil\n }\n self.inflight_lock.Unlock()\n if done_cmd != nil && done_cmd.cb != nil {\n self.session_lock.Lock()\n done_cmd.cb(res)\n self.session_lock.Unlock()\n }\n}\n\nfunc (self *Rtp_proxy_session) StopPlayCaller(result_callback func(string)\/*= nil*\/, index int\/*= 0*\/) {\n if ! self.caller.session_exists {\n return\n }\n command := fmt.Sprintf(\"S %s-%d %s %s\", self.call_id, index, self.from_tag, self.to_tag)\n self.send_command(command, func(r string) { self.command_result(r, result_callback) })\n}\n\nfunc (self *Rtp_proxy_session) StartRecording(rname\/*= nil*\/ string, result_callback func(string)\/*= nil*\/, index int\/*= 0*\/) {\n if ! self.caller.session_exists {\n self.caller.update(\"0.0.0.0\", \"0\", func(*rtpproxy_update_result) { self._start_recording(rname, result_callback, index) }, \"\", index, \"IP4\")\n return\n }\n self._start_recording(rname, result_callback, index)\n}\n\nfunc (self *Rtp_proxy_session) _start_recording(rname string, result_callback func(string), index int) {\n if rname == \"\" {\n command := fmt.Sprintf(\"R %s-%d %s %s\", self.call_id, index, self.from_tag, self.to_tag)\n self.send_command(command, func (r string) { self.command_result(r, result_callback) })\n return\n }\n command := fmt.Sprintf(\"C %s-%d %s.a %s %s\", self.call_id, index, rname, self.from_tag, self.to_tag)\n self.send_command(command, func(string) { self._start_recording1(rname, result_callback, index) })\n}\n\nfunc (self *Rtp_proxy_session) _start_recording1(rname string, result_callback func(string), index int) {\n command := fmt.Sprintf(\"C %s-%d %s.o %s %s\", self.call_id, index, rname, self.to_tag, self.from_tag)\n self.send_command(command, func (r string) { self.command_result(r, result_callback) })\n}\n\nfunc (self *Rtp_proxy_session) command_result(result string, result_callback func(string)) {\n \/\/print \"%s.command_result(%s)\" % (id(self), result)\n if result_callback != nil {\n result_callback(result)\n }\n}\n\nfunc (self *Rtp_proxy_session) Delete() {\n if self._rtp_proxy_client == nil {\n return\n }\n for self.max_index >= 0 {\n command := fmt.Sprintf(\"D %s-%d %s %s\", self.call_id, self.max_index, self.from_tag, self.to_tag)\n self.send_command(command, nil)\n self.max_index--\n }\n self._rtp_proxy_client = nil\n}\n\nfunc (self *Rtp_proxy_session) OnCallerSdpChange(sdp_body sippy_types.MsgBody, cc_event sippy_types.CCEvent, result_callback func(sippy_types.MsgBody)) error {\n return self.caller._on_sdp_change(sdp_body, result_callback)\n}\n\nfunc (self *Rtp_proxy_session) OnCalleeSdpChange(sdp_body sippy_types.MsgBody, msg sippy_types.SipMsg, result_callback func(sippy_types.MsgBody)) error {\n return self.callee._on_sdp_change(sdp_body, result_callback)\n}\n\nfunc rtp_proxy_session_destructor(self *Rtp_proxy_session) {\n self.Delete()\n}\n\nfunc (self *Rtp_proxy_session) CallerSessionExists() bool { return self.caller.session_exists }\n\nfunc (self *Rtp_proxy_session) SetCallerLaddress(addr string) {\n self.caller.laddress = addr\n}\n\nfunc (self *Rtp_proxy_session) SetCallerRaddress(addr *sippy_net.HostPort) {\n self.caller.raddress = addr\n}\n\nfunc (self *Rtp_proxy_session) SetCalleeLaddress(addr string) {\n self.callee.laddress = addr\n}\n\nfunc (self *Rtp_proxy_session) SetCalleeRaddress(addr *sippy_net.HostPort) {\n self.callee.raddress = addr\n}\n\nfunc (self *Rtp_proxy_session) SetInsertNortpp(v bool) {\n self.insert_nortpp = v\n}\n\nfunc (self *Rtp_proxy_session) SetAfterCallerSdpChange(cb func(sippy_types.RtpProxyUpdateResult)) {\n self.caller.after_sdp_change = cb\n}\n\nfunc (self *Rtp_proxy_session) CalleeOrigin() *sippy_sdp.SdpOrigin {\n if self == nil {\n return nil\n }\n return self.callee.origin\n}\n\nfunc (self Rtp_proxy_session) SBindSupported() (bool, error) {\n rtp_proxy_client := self._rtp_proxy_client\n if rtp_proxy_client == nil {\n return true, errors.New(\"the session already deleted\")\n }\n return rtp_proxy_client.SBindSupported(), nil\n}\n\nfunc (self Rtp_proxy_session) IsLocal() (bool, error) {\n rtp_proxy_client := self._rtp_proxy_client\n if rtp_proxy_client == nil {\n return true, errors.New(\"the session already deleted\")\n }\n return rtp_proxy_client.IsLocal(), nil\n}\n\nfunc (self Rtp_proxy_session) TNotSupported() (bool, error) {\n rtp_proxy_client := self._rtp_proxy_client\n if rtp_proxy_client == nil {\n return true, errors.New(\"the session already deleted\")\n }\n return rtp_proxy_client.TNotSupported(), nil\n}\n\nfunc (self Rtp_proxy_session) GetProxyAddress() (string, error) {\n rtp_proxy_client := self._rtp_proxy_client\n if rtp_proxy_client == nil {\n return \"\", errors.New(\"the session already deleted\")\n }\n return rtp_proxy_client.GetProxyAddress(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package lrucache\n\nimport (\n\t\"math\/rand\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestBasicExpiry(t *testing.T) {\n\tt.Parallel()\n\tb := NewLRUCache(3)\n\tif _, ok := b.Get(\"a\"); ok {\n\t\tt.Error(\"\")\n\t}\n\n\tnow := time.Now()\n\tb.Set(\"b\", \"vb\", now.Add(time.Duration(2*time.Second)))\n\tb.Set(\"a\", \"va\", now.Add(time.Duration(1*time.Second)))\n\tb.Set(\"c\", \"vc\", now.Add(time.Duration(3*time.Second)))\n\n\tif v, _ := b.Get(\"a\"); v != \"va\" {\n\t\tt.Error(\"\")\n\t}\n\tif v, _ := b.Get(\"b\"); v != \"vb\" {\n\t\tt.Error(\"\")\n\t}\n\tif v, _ := b.Get(\"c\"); v != \"vc\" {\n\t\tt.Error(\"\")\n\t}\n\n\tb.Set(\"d\", \"vd\", now.Add(time.Duration(4*time.Second)))\n\tif _, ok := b.Get(\"a\"); ok {\n\t\tt.Error(\"Expecting element A to be evicted\")\n\t}\n\n\tb.Set(\"e\", \"ve\", now.Add(time.Duration(-4*time.Second)))\n\tif _, ok := b.Get(\"b\"); ok {\n\t\tt.Error(\"Expecting element B to be evicted\")\n\t}\n\n\tb.Set(\"f\", \"vf\", now.Add(time.Duration(5*time.Second)))\n\tif _, ok := b.Get(\"e\"); ok {\n\t\tt.Error(\"Expecting element E to be evicted\")\n\t}\n\n\tif v, _ := b.Get(\"c\"); v != \"vc\" {\n\t\tt.Error(\"Expecting element C to not be evicted\")\n\t}\n\tn := now.Add(time.Duration(10 * time.Second))\n\tb.SetNow(\"g\", \"vg\", now.Add(time.Duration(5*time.Second)), n)\n\tif _, ok := b.Get(\"c\"); ok {\n\t\tt.Error(\"Expecting element C to be evicted\")\n\t}\n\n\tif b.Len() != 3 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n\tb.Del(\"miss\")\n\tb.Del(\"g\")\n\tif b.Len() != 2 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n\n\tb.Clear()\n\tif b.Len() != 0 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n\n\tnow = time.Now()\n\tb.Set(\"b\", \"vb\", now.Add(time.Duration(2*time.Second)))\n\tb.Set(\"a\", \"va\", now.Add(time.Duration(1*time.Second)))\n\tb.Set(\"d\", \"vd\", now.Add(time.Duration(4*time.Second)))\n\tb.Set(\"c\", \"vc\", now.Add(time.Duration(3*time.Second)))\n\n\tif _, ok := b.Get(\"b\"); ok {\n\t\tt.Error(\"Expecting miss\")\n\t}\n\n\tb.GetQuiet(\"miss\")\n\tif v, _ := b.GetQuiet(\"a\"); v != \"va\" {\n\t\tt.Error(\"Expecting hit\")\n\t}\n\n\tb.Set(\"e\", \"ve\", now.Add(time.Duration(5*time.Second)))\n\tif _, ok := b.Get(\"a\"); ok {\n\t\tt.Error(\"Expecting miss\")\n\t}\n\n\tif b.Capacity() != 3 {\n\t\tt.Error(\"Expecting different capacity\")\n\t}\n}\n\nfunc TestBasicNoExpiry(t *testing.T) {\n\tt.Parallel()\n\tb := NewLRUCache(3)\n\tif _, ok := b.Get(\"a\"); ok {\n\t\tt.Error(\"\")\n\t}\n\n\tb.Set(\"b\", \"vb\", time.Time{})\n\tb.Set(\"a\", \"va\", time.Time{})\n\tb.Set(\"c\", \"vc\", time.Time{})\n\tb.Set(\"d\", \"vd\", time.Time{})\n\n\tif _, ok := b.Get(\"b\"); ok {\n\t\tt.Error(\"expecting miss\")\n\t}\n\n\tif v, _ := b.Get(\"a\"); v != \"va\" {\n\t\tt.Error(\"expecting hit\")\n\t}\n\tif v, _ := b.Get(\"c\"); v != \"vc\" {\n\t\tt.Error(\"expecting hit\")\n\t}\n\tif v, _ := b.Get(\"d\"); v != \"vd\" {\n\t\tt.Error(\"expecting hit\")\n\t}\n\n\tpast := time.Now().Add(time.Duration(-10 * time.Second))\n\n\tb.Set(\"e\", \"ve\", past)\n\n\tif _, ok := b.Get(\"a\"); ok {\n\t\tt.Error(\"expecting miss\")\n\t}\n\tif v, _ := b.Get(\"e\"); v != \"ve\" {\n\t\tt.Error(\"expecting hit\")\n\t}\n\n\t\/\/ Make sure expired items get evicted before items without expiry\n\tb.Set(\"f\", \"vf\", time.Time{})\n\tif _, ok := b.Get(\"e\"); ok {\n\t\tt.Error(\"expecting miss\")\n\t}\n\n\tr := b.Clear()\n\tif b.Len() != 0 || r != 3 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n\n\tb.Set(\"c\", \"vc\", time.Time{})\n\tb.Set(\"d\", \"vd\", time.Time{})\n\tb.Set(\"e\", \"ve\", past)\n\n\tif b.Len() != 3 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n\tr = b.Expire()\n\tif b.Len() != 2 || r != 1 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n\tr = b.Clear()\n\tif b.Len() != 0 || r != 2 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n}\n\nfunc TestNil(t *testing.T) {\n\tt.Parallel()\n\tb := NewLRUCache(3)\n\n\t\/\/ value nil\n\tif v, ok := b.Get(\"a\"); v != nil || ok != false {\n\t\tt.Error(\"expecting miss\")\n\t}\n\n\tb.Set(\"a\", nil, time.Time{})\n\n\tif v, ok := b.Get(\"a\"); v != nil || ok != true {\n\t\tt.Error(\"expecting hit\")\n\t}\n\n\t\/\/ value not nil (sanity check)\n\tif v, ok := b.Get(\"b\"); v != nil || ok != false {\n\t\tt.Error(\"expecting miss\")\n\t}\n\n\tb.Set(\"b\", \"vb\", time.Time{})\n\n\tif v, ok := b.Get(\"b\"); v != \"vb\" || ok != true {\n\t\tt.Error(\"expecting miss\")\n\t}\n}\n\nfunc rec(foo func()) (recovered int) {\n\trecovered = 0\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\trecovered += 1\n\t\t}\n\t}()\n\tfoo()\n\treturn recovered\n}\n\nfunc TestPanicByValue(t *testing.T) {\n\tt.Parallel()\n\tb := NewLRUCache(3)\n\n\tb.Set(\"a\", \"a\", time.Time{})\n\n\tc := *b\n\tr := rec(func() {\n\t\tc.Del(\"a\")\n\t})\n\tif r != 1 {\n\t\tt.Error(\"Expecting panic\")\n\t}\n\n\tb.Del(\"a\")\n\n\tr = rec(func() {\n\t\tc.Set(\"a\", \"A\", time.Time{})\n\t})\n\tif r != 1 {\n\t\tt.Error(\"Expecting panic\")\n\t}\n}\n\nfunc TestZeroLength(t *testing.T) {\n\tt.Parallel()\n\tb := NewLRUCache(0)\n\n\tif _, ok := b.Get(\"a\"); ok {\n\t\tt.Error(\"Expected miss\")\n\t}\n\n\tb.Set(\"a\", \"va\", time.Time{})\n\tif _, ok := b.Get(\"a\"); ok {\n\t\tt.Error(\"Expected miss\")\n\t}\n\n\tb.Clear()\n}\n\nfunc TestExtra(t *testing.T) {\n\tt.Parallel()\n\tb := NewLRUCache(3)\n\tif _, ok := b.Get(\"a\"); ok {\n\t\tt.Error(\"\")\n\t}\n\n\tnow := time.Now()\n\tb.Set(\"b\", \"vb\", now)\n\tb.Set(\"a\", \"va\", now)\n\tb.Set(\"c\", \"vc\", now.Add(time.Duration(3*time.Second)))\n\n\tif v, _ := b.Get(\"a\"); v != \"va\" {\n\t\tt.Error(\"expecting value\")\n\t}\n\n\tif _, ok := b.GetNotStale(\"a\"); ok {\n\t\tt.Error(\"not expecting value\")\n\t}\n\tif _, ok := b.GetNotStale(\"miss\"); ok {\n\t\tt.Error(\"not expecting value\")\n\t}\n\tif v, _ := b.GetNotStale(\"c\"); v != \"vc\" {\n\t\tt.Error(\"expecting hit\")\n\t}\n\n\tif b.Len() != 2 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n\tif b.Expire() != 1 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n}\n\nfunc randomString(l int) string {\n\tbytes := make([]byte, l)\n\tfor i := 0; i < l; i++ {\n\t\tbytes[i] = byte(65 + rand.Intn(90-65))\n\t}\n\treturn string(bytes)\n}\n\nfunc createFilledBucket(expire time.Time) *LRUCache {\n\tb := NewLRUCache(1000)\n\tfor i := 0; i < 1000; i++ {\n\t\tb.Set(randomString(2), \"value\", expire)\n\t}\n\treturn b\n}\n\nfunc TestConcurrentGet(t *testing.T) {\n\tt.Parallel()\n\tb := createFilledBucket(time.Now().Add(time.Duration(4)))\n\n\tdone := make(chan bool)\n\tworker := func() {\n\t\tfor i := 0; i < 10000; i++ {\n\t\t\tb.Get(randomString(2))\n\t\t}\n\t\tdone <- true\n\t}\n\tworkers := 4\n\tfor i := 0; i < workers; i++ {\n\t\tgo worker()\n\t}\n\tfor i := 0; i < workers; i++ {\n\t\t_ = <-done\n\t}\n}\n\nfunc TestConcurrentSet(t *testing.T) {\n\tt.Parallel()\n\tb := createFilledBucket(time.Now().Add(time.Duration(4)))\n\n\tdone := make(chan bool)\n\tworker := func() {\n\t\texpire := time.Now().Add(time.Duration(4 * time.Second))\n\t\tfor i := 0; i < 10000; i++ {\n\t\t\tb.Set(randomString(2), \"value\", expire)\n\t\t}\n\t\tdone <- true\n\t}\n\tworkers := 4\n\tfor i := 0; i < workers; i++ {\n\t\tgo worker()\n\t}\n\tfor i := 0; i < workers; i++ {\n\t\t_ = <-done\n\t}\n}\n\nfunc BenchmarkConcurrentGetLRUCache(bb *testing.B) {\n\tb := createFilledBucket(time.Now().Add(time.Duration(4)))\n\n\tcpu := runtime.GOMAXPROCS(0)\n\tch := make(chan bool)\n\tworker := func() {\n\t\tfor i := 0; i < bb.N\/cpu; i++ {\n\t\t\tb.Get(randomString(2))\n\t\t}\n\t\tch <- true\n\t}\n\tfor i := 0; i < cpu; i++ {\n\t\tgo worker()\n\t}\n\tfor i := 0; i < cpu; i++ {\n\t\t_ = <-ch\n\t}\n}\n\nfunc BenchmarkConcurrentSetLRUCache(bb *testing.B) {\n\tb := createFilledBucket(time.Now().Add(time.Duration(4)))\n\n\tcpu := runtime.GOMAXPROCS(0)\n\tch := make(chan bool)\n\tworker := func() {\n\t\tfor i := 0; i < bb.N\/cpu; i++ {\n\t\t\texpire := time.Now().Add(time.Duration(4 * time.Second))\n\t\t\tb.Set(randomString(2), \"v\", expire)\n\t\t}\n\t\tch <- true\n\t}\n\tfor i := 0; i < cpu; i++ {\n\t\tgo worker()\n\t}\n\tfor i := 0; i < cpu; i++ {\n\t\t_ = <-ch\n\t}\n}\n\n\/\/ No expiry\nfunc BenchmarkConcurrentSetNXLRUCache(bb *testing.B) {\n\tb := createFilledBucket(time.Time{})\n\n\tcpu := runtime.GOMAXPROCS(0)\n\tch := make(chan bool)\n\tworker := func() {\n\t\tfor i := 0; i < bb.N\/cpu; i++ {\n\t\t\tb.Set(randomString(2), \"v\", time.Time{})\n\t\t}\n\t\tch <- true\n\t}\n\tfor i := 0; i < cpu; i++ {\n\t\tgo worker()\n\t}\n\tfor i := 0; i < cpu; i++ {\n\t\t_ = <-ch\n\t}\n}\n<commit_msg>lrucache: fix race condition in tests<commit_after>package lrucache\n\nimport (\n\t\"math\/rand\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestBasicExpiry(t *testing.T) {\n\tt.Parallel()\n\tb := NewLRUCache(3)\n\tif _, ok := b.Get(\"a\"); ok {\n\t\tt.Error(\"\")\n\t}\n\n\tnow := time.Now()\n\tb.Set(\"b\", \"vb\", now.Add(time.Duration(2*time.Second)))\n\tb.Set(\"a\", \"va\", now.Add(time.Duration(1*time.Second)))\n\tb.Set(\"c\", \"vc\", now.Add(time.Duration(3*time.Second)))\n\n\tif v, _ := b.Get(\"a\"); v != \"va\" {\n\t\tt.Error(\"\")\n\t}\n\tif v, _ := b.Get(\"b\"); v != \"vb\" {\n\t\tt.Error(\"\")\n\t}\n\tif v, _ := b.Get(\"c\"); v != \"vc\" {\n\t\tt.Error(\"\")\n\t}\n\n\tb.Set(\"d\", \"vd\", now.Add(time.Duration(4*time.Second)))\n\tif _, ok := b.Get(\"a\"); ok {\n\t\tt.Error(\"Expecting element A to be evicted\")\n\t}\n\n\tb.Set(\"e\", \"ve\", now.Add(time.Duration(-4*time.Second)))\n\tif _, ok := b.Get(\"b\"); ok {\n\t\tt.Error(\"Expecting element B to be evicted\")\n\t}\n\n\tb.Set(\"f\", \"vf\", now.Add(time.Duration(5*time.Second)))\n\tif _, ok := b.Get(\"e\"); ok {\n\t\tt.Error(\"Expecting element E to be evicted\")\n\t}\n\n\tif v, _ := b.Get(\"c\"); v != \"vc\" {\n\t\tt.Error(\"Expecting element C to not be evicted\")\n\t}\n\tn := now.Add(time.Duration(10 * time.Second))\n\tb.SetNow(\"g\", \"vg\", now.Add(time.Duration(5*time.Second)), n)\n\tif _, ok := b.Get(\"c\"); ok {\n\t\tt.Error(\"Expecting element C to be evicted\")\n\t}\n\n\tif b.Len() != 3 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n\tb.Del(\"miss\")\n\tb.Del(\"g\")\n\tif b.Len() != 2 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n\n\tb.Clear()\n\tif b.Len() != 0 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n\n\tnow = time.Now()\n\tb.Set(\"b\", \"vb\", now.Add(time.Duration(2*time.Second)))\n\tb.Set(\"a\", \"va\", now.Add(time.Duration(1*time.Second)))\n\tb.Set(\"d\", \"vd\", now.Add(time.Duration(4*time.Second)))\n\tb.Set(\"c\", \"vc\", now.Add(time.Duration(3*time.Second)))\n\n\tif _, ok := b.Get(\"b\"); ok {\n\t\tt.Error(\"Expecting miss\")\n\t}\n\n\tb.GetQuiet(\"miss\")\n\tif v, _ := b.GetQuiet(\"a\"); v != \"va\" {\n\t\tt.Error(\"Expecting hit\")\n\t}\n\n\tb.Set(\"e\", \"ve\", now.Add(time.Duration(5*time.Second)))\n\tif _, ok := b.Get(\"a\"); ok {\n\t\tt.Error(\"Expecting miss\")\n\t}\n\n\tif b.Capacity() != 3 {\n\t\tt.Error(\"Expecting different capacity\")\n\t}\n}\n\nfunc TestBasicNoExpiry(t *testing.T) {\n\tt.Parallel()\n\tb := NewLRUCache(3)\n\tif _, ok := b.Get(\"a\"); ok {\n\t\tt.Error(\"\")\n\t}\n\n\tb.Set(\"b\", \"vb\", time.Time{})\n\tb.Set(\"a\", \"va\", time.Time{})\n\tb.Set(\"c\", \"vc\", time.Time{})\n\tb.Set(\"d\", \"vd\", time.Time{})\n\n\tif _, ok := b.Get(\"b\"); ok {\n\t\tt.Error(\"expecting miss\")\n\t}\n\n\tif v, _ := b.Get(\"a\"); v != \"va\" {\n\t\tt.Error(\"expecting hit\")\n\t}\n\tif v, _ := b.Get(\"c\"); v != \"vc\" {\n\t\tt.Error(\"expecting hit\")\n\t}\n\tif v, _ := b.Get(\"d\"); v != \"vd\" {\n\t\tt.Error(\"expecting hit\")\n\t}\n\n\tpast := time.Now().Add(time.Duration(-10 * time.Second))\n\n\tb.Set(\"e\", \"ve\", past)\n\n\tif _, ok := b.Get(\"a\"); ok {\n\t\tt.Error(\"expecting miss\")\n\t}\n\tif v, _ := b.Get(\"e\"); v != \"ve\" {\n\t\tt.Error(\"expecting hit\")\n\t}\n\n\t\/\/ Make sure expired items get evicted before items without expiry\n\tb.Set(\"f\", \"vf\", time.Time{})\n\tif _, ok := b.Get(\"e\"); ok {\n\t\tt.Error(\"expecting miss\")\n\t}\n\n\tr := b.Clear()\n\tif b.Len() != 0 || r != 3 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n\n\tb.Set(\"c\", \"vc\", time.Time{})\n\tb.Set(\"d\", \"vd\", time.Time{})\n\tb.Set(\"e\", \"ve\", past)\n\n\tif b.Len() != 3 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n\tr = b.Expire()\n\tif b.Len() != 2 || r != 1 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n\tr = b.Clear()\n\tif b.Len() != 0 || r != 2 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n}\n\nfunc TestNil(t *testing.T) {\n\tt.Parallel()\n\tb := NewLRUCache(3)\n\n\t\/\/ value nil\n\tif v, ok := b.Get(\"a\"); v != nil || ok != false {\n\t\tt.Error(\"expecting miss\")\n\t}\n\n\tb.Set(\"a\", nil, time.Time{})\n\n\tif v, ok := b.Get(\"a\"); v != nil || ok != true {\n\t\tt.Error(\"expecting hit\")\n\t}\n\n\t\/\/ value not nil (sanity check)\n\tif v, ok := b.Get(\"b\"); v != nil || ok != false {\n\t\tt.Error(\"expecting miss\")\n\t}\n\n\tb.Set(\"b\", \"vb\", time.Time{})\n\n\tif v, ok := b.Get(\"b\"); v != \"vb\" || ok != true {\n\t\tt.Error(\"expecting miss\")\n\t}\n}\n\nfunc rec(foo func()) (recovered int) {\n\trecovered = 0\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\trecovered += 1\n\t\t}\n\t}()\n\tfoo()\n\treturn recovered\n}\n\nfunc TestPanicByValue(t *testing.T) {\n\tt.Parallel()\n\tb := NewLRUCache(3)\n\n\tb.Set(\"a\", \"a\", time.Time{})\n\n\tc := *b\n\tr := rec(func() {\n\t\tc.Del(\"a\")\n\t})\n\tif r != 1 {\n\t\tt.Error(\"Expecting panic\")\n\t}\n\n\tb.Del(\"a\")\n\n\tr = rec(func() {\n\t\tc.Set(\"a\", \"A\", time.Time{})\n\t})\n\tif r != 1 {\n\t\tt.Error(\"Expecting panic\")\n\t}\n}\n\nfunc TestZeroLength(t *testing.T) {\n\tt.Parallel()\n\tb := NewLRUCache(0)\n\n\tif _, ok := b.Get(\"a\"); ok {\n\t\tt.Error(\"Expected miss\")\n\t}\n\n\tb.Set(\"a\", \"va\", time.Time{})\n\tif _, ok := b.Get(\"a\"); ok {\n\t\tt.Error(\"Expected miss\")\n\t}\n\n\tb.Clear()\n}\n\nfunc TestExtra(t *testing.T) {\n\tt.Parallel()\n\tb := NewLRUCache(3)\n\tif _, ok := b.Get(\"a\"); ok {\n\t\tt.Error(\"\")\n\t}\n\n\tnow := time.Now()\n\tb.Set(\"b\", \"vb\", now.Add(time.Duration(-2*time.Second)))\n\tb.Set(\"a\", \"va\", now.Add(time.Duration(-1*time.Second)))\n\tb.Set(\"c\", \"vc\", now.Add(time.Duration(3*time.Second)))\n\n\tif v, _ := b.Get(\"a\"); v != \"va\" {\n\t\tt.Error(\"expecting value\")\n\t}\n\n\tif _, ok := b.GetNotStale(\"a\"); ok {\n\t\tt.Error(\"not expecting value\")\n\t}\n\tif _, ok := b.GetNotStale(\"miss\"); ok {\n\t\tt.Error(\"not expecting value\")\n\t}\n\tif v, _ := b.GetNotStale(\"c\"); v != \"vc\" {\n\t\tt.Error(\"expecting hit\")\n\t}\n\n\tif b.Len() != 2 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n\tif b.Expire() != 1 {\n\t\tt.Error(\"Expecting different length\")\n\t}\n}\n\nfunc randomString(l int) string {\n\tbytes := make([]byte, l)\n\tfor i := 0; i < l; i++ {\n\t\tbytes[i] = byte(65 + rand.Intn(90-65))\n\t}\n\treturn string(bytes)\n}\n\nfunc createFilledBucket(expire time.Time) *LRUCache {\n\tb := NewLRUCache(1000)\n\tfor i := 0; i < 1000; i++ {\n\t\tb.Set(randomString(2), \"value\", expire)\n\t}\n\treturn b\n}\n\nfunc TestConcurrentGet(t *testing.T) {\n\tt.Parallel()\n\tb := createFilledBucket(time.Now().Add(time.Duration(4)))\n\n\tdone := make(chan bool)\n\tworker := func() {\n\t\tfor i := 0; i < 10000; i++ {\n\t\t\tb.Get(randomString(2))\n\t\t}\n\t\tdone <- true\n\t}\n\tworkers := 4\n\tfor i := 0; i < workers; i++ {\n\t\tgo worker()\n\t}\n\tfor i := 0; i < workers; i++ {\n\t\t_ = <-done\n\t}\n}\n\nfunc TestConcurrentSet(t *testing.T) {\n\tt.Parallel()\n\tb := createFilledBucket(time.Now().Add(time.Duration(4)))\n\n\tdone := make(chan bool)\n\tworker := func() {\n\t\texpire := time.Now().Add(time.Duration(4 * time.Second))\n\t\tfor i := 0; i < 10000; i++ {\n\t\t\tb.Set(randomString(2), \"value\", expire)\n\t\t}\n\t\tdone <- true\n\t}\n\tworkers := 4\n\tfor i := 0; i < workers; i++ {\n\t\tgo worker()\n\t}\n\tfor i := 0; i < workers; i++ {\n\t\t_ = <-done\n\t}\n}\n\nfunc BenchmarkConcurrentGetLRUCache(bb *testing.B) {\n\tb := createFilledBucket(time.Now().Add(time.Duration(4)))\n\n\tcpu := runtime.GOMAXPROCS(0)\n\tch := make(chan bool)\n\tworker := func() {\n\t\tfor i := 0; i < bb.N\/cpu; i++ {\n\t\t\tb.Get(randomString(2))\n\t\t}\n\t\tch <- true\n\t}\n\tfor i := 0; i < cpu; i++ {\n\t\tgo worker()\n\t}\n\tfor i := 0; i < cpu; i++ {\n\t\t_ = <-ch\n\t}\n}\n\nfunc BenchmarkConcurrentSetLRUCache(bb *testing.B) {\n\tb := createFilledBucket(time.Now().Add(time.Duration(4)))\n\n\tcpu := runtime.GOMAXPROCS(0)\n\tch := make(chan bool)\n\tworker := func() {\n\t\tfor i := 0; i < bb.N\/cpu; i++ {\n\t\t\texpire := time.Now().Add(time.Duration(4 * time.Second))\n\t\t\tb.Set(randomString(2), \"v\", expire)\n\t\t}\n\t\tch <- true\n\t}\n\tfor i := 0; i < cpu; i++ {\n\t\tgo worker()\n\t}\n\tfor i := 0; i < cpu; i++ {\n\t\t_ = <-ch\n\t}\n}\n\n\/\/ No expiry\nfunc BenchmarkConcurrentSetNXLRUCache(bb *testing.B) {\n\tb := createFilledBucket(time.Time{})\n\n\tcpu := runtime.GOMAXPROCS(0)\n\tch := make(chan bool)\n\tworker := func() {\n\t\tfor i := 0; i < bb.N\/cpu; i++ {\n\t\t\tb.Set(randomString(2), \"v\", time.Time{})\n\t\t}\n\t\tch <- true\n\t}\n\tfor i := 0; i < cpu; i++ {\n\t\tgo worker()\n\t}\n\tfor i := 0; i < cpu; i++ {\n\t\t_ = <-ch\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/cespare\/goclj\/format\"\n\t\"github.com\/cespare\/goclj\/parse\"\n)\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, `usage: %s [flags] [paths...]\nAny directories given will be recursively walked. If no paths are provided,\ncljfmt reads from standard input.\n\nFlags:`, os.Args[0])\n\tflag.PrintDefaults()\n}\n\nfunc main() {\n\tvar (\n\t\tlist = flag.Bool(\"l\", false, \"print files whose formatting differs from cljfmt's\")\n\t\twrite = flag.Bool(\"w\", false, \"write result to (source) file instead of stdout\")\n\t)\n\tflag.Usage = usage\n\tflag.Parse()\n\n\tif flag.NArg() == 0 {\n\t\tif *write {\n\t\t\tfatal(\"cannot use -w with standard input\")\n\t\t}\n\t\tif err := processFile(\"<stdin>\", os.Stdin, false, false); err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\treturn\n\t}\n\n\tfor _, path := range flag.Args() {\n\t\tstat, err := os.Stat(path)\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tif stat.IsDir() {\n\t\t\twalkDir(path, *list, *write)\n\t\t\tcontinue\n\t\t}\n\t\tif err := processFile(path, nil, *list, *write); err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t}\n}\n\nvar (\n\tbuf1 bytes.Buffer\n\tbuf2 bytes.Buffer\n)\n\n\/\/ processFile formats the given file.\n\/\/ If in == nil, the input is the file of the given name.\nfunc processFile(filename string, in io.Reader, list, write bool) error {\n\tif in == nil {\n\t\tf, err := os.Open(filename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\t\tin = f\n\t}\n\n\tbuf1.Reset()\n\tbuf2.Reset()\n\n\tif _, err := io.Copy(&buf1, in); err != nil {\n\t\treturn err\n\t}\n\tt, err := parse.Reader(bytes.NewReader(buf1.Bytes()), filename, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp := format.NewPrinter(&buf2)\n\tp.IndentChar = ' '\n\tif err := p.PrintTree(t); err != nil {\n\t\treturn err\n\t}\n\tif bytes.Equal(buf1.Bytes(), buf2.Bytes()) {\n\t\treturn nil\n\t}\n\tif list {\n\t\tfmt.Println(filename)\n\t}\n\tif write {\n\t\tif err := ioutil.WriteFile(filename, buf2.Bytes(), 0644); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif !list && !write {\n\t\tio.Copy(os.Stdout, &buf2)\n\t}\n\treturn nil\n}\n\nfunc walkDir(path string, list, write bool) {\n\twalk := func(path string, f os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(path, f.IsDir())\n\t\tif f.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tif name := f.Name(); strings.HasPrefix(name, \".\") || !strings.HasSuffix(name, \".clj\") {\n\t\t\treturn nil\n\t\t}\n\t\treturn processFile(path, nil, list, write)\n\t}\n\tif err := filepath.Walk(path, walk); err != nil {\n\t\tfatal(err)\n\t}\n}\n\nfunc fatal(args ...interface{}) {\n\tfmt.Fprintln(os.Stderr, args...)\n\tos.Exit(1)\n}\n\nfunc fatalf(format string, args ...interface{}) {\n\tfmt.Fprintf(os.Stderr, format, args...)\n\tos.Exit(1)\n}\n<commit_msg>[cljfmt] Keep original permissions<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/cespare\/goclj\/format\"\n\t\"github.com\/cespare\/goclj\/parse\"\n)\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, `usage: %s [flags] [paths...]\nAny directories given will be recursively walked. If no paths are provided,\ncljfmt reads from standard input.\n\nFlags:`, os.Args[0])\n\tflag.PrintDefaults()\n}\n\nfunc main() {\n\tvar (\n\t\tlist = flag.Bool(\"l\", false, \"print files whose formatting differs from cljfmt's\")\n\t\twrite = flag.Bool(\"w\", false, \"write result to (source) file instead of stdout\")\n\t)\n\tflag.Usage = usage\n\tflag.Parse()\n\n\tif flag.NArg() == 0 {\n\t\tif *write {\n\t\t\tfatal(\"cannot use -w with standard input\")\n\t\t}\n\t\tif err := processFile(\"<stdin>\", os.Stdin, false, false); err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\treturn\n\t}\n\n\tfor _, path := range flag.Args() {\n\t\tstat, err := os.Stat(path)\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tif stat.IsDir() {\n\t\t\twalkDir(path, *list, *write)\n\t\t\tcontinue\n\t\t}\n\t\tif err := processFile(path, nil, *list, *write); err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t}\n}\n\nvar (\n\tbuf1 bytes.Buffer\n\tbuf2 bytes.Buffer\n)\n\n\/\/ processFile formats the given file.\n\/\/ If in == nil, the input is the file of the given name.\nfunc processFile(filename string, in io.Reader, list, write bool) error {\n\tvar perm os.FileMode = 0644\n\tif in == nil {\n\t\tf, err := os.Open(filename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\t\tstat, err := f.Stat()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tperm = stat.Mode().Perm()\n\t\tin = f\n\t}\n\n\tbuf1.Reset()\n\tbuf2.Reset()\n\n\tif _, err := io.Copy(&buf1, in); err != nil {\n\t\treturn err\n\t}\n\tt, err := parse.Reader(bytes.NewReader(buf1.Bytes()), filename, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp := format.NewPrinter(&buf2)\n\tp.IndentChar = ' '\n\tif err := p.PrintTree(t); err != nil {\n\t\treturn err\n\t}\n\tif bytes.Equal(buf1.Bytes(), buf2.Bytes()) {\n\t\treturn nil\n\t}\n\tif list {\n\t\tfmt.Println(filename)\n\t}\n\tif write {\n\t\tif err := ioutil.WriteFile(filename, buf2.Bytes(), perm); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif !list && !write {\n\t\tio.Copy(os.Stdout, &buf2)\n\t}\n\treturn nil\n}\n\nfunc walkDir(path string, list, write bool) {\n\twalk := func(path string, f os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif f.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tif name := f.Name(); strings.HasPrefix(name, \".\") || !strings.HasSuffix(name, \".clj\") {\n\t\t\treturn nil\n\t\t}\n\t\treturn processFile(path, nil, list, write)\n\t}\n\tif err := filepath.Walk(path, walk); err != nil {\n\t\tfatal(err)\n\t}\n}\n\nfunc fatal(args ...interface{}) {\n\tfmt.Fprintln(os.Stderr, args...)\n\tos.Exit(1)\n}\n\nfunc fatalf(format string, args ...interface{}) {\n\tfmt.Fprintf(os.Stderr, format, args...)\n\tos.Exit(1)\n}\n<|endoftext|>"} {"text":"<commit_before>package migrator\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/twitchscience\/aws_utils\/logger\"\n\t\"github.com\/twitchscience\/rs_ingester\/backend\"\n\t\"github.com\/twitchscience\/rs_ingester\/blueprint\"\n\t\"github.com\/twitchscience\/rs_ingester\/metadata\"\n\t\"github.com\/twitchscience\/rs_ingester\/versions\"\n)\n\ntype tableVersion struct {\n\ttable string\n\tversion int\n}\n\n\/\/ VersionIncrement is used to send a request to increment a table's version w\/o running a migration.\ntype VersionIncrement struct {\n\tTable string\n\tVersion int\n\tResponse chan error\n}\n\n\/\/ Migrator manages the migration of Ace as new versioned tsvs come in.\ntype Migrator struct {\n\tversions versions.GetterSetter\n\taceBackend backend.Backend\n\tmetaBackend metadata.Reader\n\tbpClient blueprint.Client\n\tcloser chan bool\n\toldVersionWaitClose chan bool\n\tversionIncrement chan VersionIncrement\n\twg sync.WaitGroup\n\tpollPeriod time.Duration\n\twaitProcessorPeriod time.Duration\n\tmigrationStarted map[tableVersion]time.Time\n\toffpeakStartHour int\n\toffpeakDurationHours int\n}\n\n\/\/ New returns a new Migrator for migrating schemas\nfunc New(aceBack backend.Backend,\n\tmetaBack metadata.Reader,\n\tblueprintClient blueprint.Client,\n\tversions versions.GetterSetter,\n\tpollPeriod time.Duration,\n\twaitProcessorPeriod time.Duration,\n\toffpeakStartHour int,\n\toffpeakDurationHours int,\n\tversionIncrement chan VersionIncrement) *Migrator {\n\tm := Migrator{\n\t\tversions: versions,\n\t\taceBackend: aceBack,\n\t\tmetaBackend: metaBack,\n\t\tbpClient: blueprintClient,\n\t\tcloser: make(chan bool),\n\t\toldVersionWaitClose: make(chan bool),\n\t\tversionIncrement: versionIncrement,\n\t\tpollPeriod: pollPeriod,\n\t\twaitProcessorPeriod: waitProcessorPeriod,\n\t\tmigrationStarted: make(map[tableVersion]time.Time),\n\t\toffpeakStartHour: offpeakStartHour,\n\t\toffpeakDurationHours: offpeakDurationHours,\n\t}\n\n\tm.wg.Add(1)\n\tlogger.Go(func() {\n\t\tdefer m.wg.Done()\n\t\tm.loop()\n\t})\n\treturn &m\n}\n\n\/\/ findTablesToMigrate inspects tsvs waiting to be loaded and compares their versions\n\/\/ with the current versions, returning table names to be migrated up\nfunc (m *Migrator) findTablesToMigrate() ([]string, error) {\n\ttsvVersions, err := m.metaBackend.Versions()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error finding versions from unloaded tsvs: %v\", err)\n\t}\n\tvar tables []string\n\tfor tsvTable, tsvVersion := range tsvVersions {\n\t\taceVersion, existant := m.versions.Get(tsvTable)\n\t\tif !existant || tsvVersion > aceVersion {\n\t\t\ttables = append(tables, tsvTable)\n\t\t}\n\t}\n\treturn tables, nil\n}\n\n\/\/isOldVersionCleared checks to see if there are any tsvs for the given table and\n\/\/version still in queue to be loaded. If there are, it prioritizes those tsvs\n\/\/to be loaded.\nfunc (m *Migrator) isOldVersionCleared(table string, version int) (bool, error) {\n\texists, err := m.metaBackend.TSVVersionExists(table, version)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !exists {\n\t\treturn true, nil\n\t}\n\treturn false, m.metaBackend.PrioritizeTSVVersion(table, version-1)\n}\n\nfunc (m *Migrator) migrate(table string, to int) error {\n\tops, err := m.bpClient.GetMigration(table, to)\n\tif err != nil {\n\t\treturn err\n\t}\n\texists, err := m.aceBackend.TableExists(table)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !exists {\n\t\terr = m.aceBackend.CreateTable(table, ops, to)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t\/\/ to migrate, first we wait until processor finishes the old version...\n\t\ttimeMigrationStarted, started := m.migrationStarted[tableVersion{table, to}]\n\t\tif !started {\n\t\t\tnow := time.Now()\n\t\t\tm.migrationStarted[tableVersion{table, to}] = now\n\t\t\tlogger.WithField(\"table\", table).\n\t\t\t\tWithField(\"version\", to).\n\t\t\t\tWithField(\"until\", now.Add(m.waitProcessorPeriod)).\n\t\t\t\tInfo(\"Starting to wait for processor before migrating\")\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ don't do anything if we haven't waited long enough for processor\n\t\tif time.Since(timeMigrationStarted) < m.waitProcessorPeriod {\n\t\t\tlogger.WithField(\"table\", table).\n\t\t\t\tWithField(\"version\", to).\n\t\t\t\tWithField(\"until\", timeMigrationStarted.Add(m.waitProcessorPeriod)).\n\t\t\t\tInfo(\"Waiting for processor before migrating\")\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ wait for all the old version TSVs to ingest before proceeding\n\t\tcleared, err := m.isOldVersionCleared(table, to-1)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error waiting for old version to clear: %v\", err)\n\t\t}\n\t\tif !cleared {\n\t\t\tlogger.WithField(\"table\", table).WithField(\"version\", to).Info(\"Waiting for old version to clear.\")\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ everything is ready, now actually do the migration\n\t\tlogger.WithField(\"table\", table).WithField(\"version\", to).Info(\"Beginning to migrate\")\n\t\terr = m.aceBackend.ApplyOperations(table, ops, to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error applying operations to %s: %v\", table, err)\n\t\t}\n\t}\n\tm.versions.Set(table, to)\n\tlogger.WithField(\"table\", table).WithField(\"version\", to).Info(\"Migrated table successfully\")\n\n\treturn nil\n}\n\nfunc (m *Migrator) isOffPeakHours() bool {\n\tcurrentHour := time.Now().Hour()\n\tif m.offpeakStartHour+m.offpeakDurationHours <= 24 {\n\t\tif (m.offpeakStartHour <= currentHour) &&\n\t\t\t(currentHour < m.offpeakStartHour+m.offpeakDurationHours) {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\t\/\/ if duration bleeds into the new day, check the two segments before and after midnight\n\tif (m.offpeakStartHour <= currentHour) &&\n\t\t(currentHour < 24) {\n\t\treturn true\n\t}\n\tif (0 <= currentHour) &&\n\t\t(currentHour < (m.offpeakStartHour+m.offpeakDurationHours)%24) {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (m *Migrator) incrementVersion(verInc VersionIncrement) {\n\texists, err := m.aceBackend.TableExists(verInc.Table)\n\tswitch {\n\tcase err != nil:\n\t\tverInc.Response <- fmt.Errorf(\n\t\t\t\"error determining if table %s exists: %v\", verInc.Table, err)\n\tcase exists:\n\t\tverInc.Response <- fmt.Errorf(\n\t\t\t\"attempted to increment version of table that exists: %s\", verInc.Table)\n\tdefault:\n\t\terr = m.aceBackend.ApplyOperations(verInc.Table, nil, verInc.Version)\n\t\tif err == nil {\n\t\t\tlogger.Infof(\"Incremented table %s to version %d\",\n\t\t\t\tverInc.Table, verInc.Version)\n\t\t\tm.versions.Set(verInc.Table, verInc.Version)\n\t\t}\n\t\tverInc.Response <- err\n\t}\n}\n\nfunc (m *Migrator) findAndApplyMigrations() {\n\toutdatedTables, err := m.findTablesToMigrate()\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error finding migrations to apply\")\n\t}\n\tif len(outdatedTables) == 0 {\n\t\tlogger.Infof(\"Migrator didn't find any tables to migrate.\")\n\t} else {\n\t\tlogger.WithField(\"numTables\", len(outdatedTables)).Infof(\"Migrator found tables to migrate.\")\n\t}\n\tfor _, table := range outdatedTables {\n\t\tvar newVersion int\n\t\tcurrentVersion, exists := m.versions.Get(table)\n\t\tif !exists { \/\/ table doesn't exist yet, create it by 'migrating' to version 0\n\t\t\tnewVersion = 0\n\t\t} else {\n\t\t\tnewVersion = currentVersion + 1\n\t\t}\n\t\t\/\/ if not offpeak, don't migrate table, but still allow table creation\n\t\tif newVersion > 0 && !m.isOffPeakHours() {\n\t\t\tlogger.WithField(\"table\", table).WithField(\"version\", newVersion).Infof(\"Not migrating; waiting until offpeak at %dh UTC\", m.offpeakStartHour)\n\t\t\tcontinue\n\t\t}\n\t\terr := m.migrate(table, newVersion)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).WithField(\"table\", table).WithField(\"version\", newVersion).Error(\"Error migrating table\")\n\t\t}\n\t}\n}\n\nfunc (m *Migrator) loop() {\n\tlogger.Info(\"Migrator started.\")\n\tdefer logger.Info(\"Migrator stopped.\")\n\ttick := time.NewTicker(m.pollPeriod)\n\tfor {\n\t\tselect {\n\t\tcase verInc := <-m.versionIncrement:\n\t\t\tm.incrementVersion(verInc)\n\t\tcase <-tick.C:\n\t\t\tm.findAndApplyMigrations()\n\t\tcase <-m.closer:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Close signals the migrator to stop looking for new migrations and waits until\n\/\/ it's finished any migrations.\nfunc (m *Migrator) Close() {\n\tm.closer <- true\n\tm.wg.Wait()\n}\n<commit_msg>Fix TSV prioritization for old versions<commit_after>package migrator\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/twitchscience\/aws_utils\/logger\"\n\t\"github.com\/twitchscience\/rs_ingester\/backend\"\n\t\"github.com\/twitchscience\/rs_ingester\/blueprint\"\n\t\"github.com\/twitchscience\/rs_ingester\/metadata\"\n\t\"github.com\/twitchscience\/rs_ingester\/versions\"\n)\n\ntype tableVersion struct {\n\ttable string\n\tversion int\n}\n\n\/\/ VersionIncrement is used to send a request to increment a table's version w\/o running a migration.\ntype VersionIncrement struct {\n\tTable string\n\tVersion int\n\tResponse chan error\n}\n\n\/\/ Migrator manages the migration of Ace as new versioned tsvs come in.\ntype Migrator struct {\n\tversions versions.GetterSetter\n\taceBackend backend.Backend\n\tmetaBackend metadata.Reader\n\tbpClient blueprint.Client\n\tcloser chan bool\n\toldVersionWaitClose chan bool\n\tversionIncrement chan VersionIncrement\n\twg sync.WaitGroup\n\tpollPeriod time.Duration\n\twaitProcessorPeriod time.Duration\n\tmigrationStarted map[tableVersion]time.Time\n\toffpeakStartHour int\n\toffpeakDurationHours int\n}\n\n\/\/ New returns a new Migrator for migrating schemas\nfunc New(aceBack backend.Backend,\n\tmetaBack metadata.Reader,\n\tblueprintClient blueprint.Client,\n\tversions versions.GetterSetter,\n\tpollPeriod time.Duration,\n\twaitProcessorPeriod time.Duration,\n\toffpeakStartHour int,\n\toffpeakDurationHours int,\n\tversionIncrement chan VersionIncrement) *Migrator {\n\tm := Migrator{\n\t\tversions: versions,\n\t\taceBackend: aceBack,\n\t\tmetaBackend: metaBack,\n\t\tbpClient: blueprintClient,\n\t\tcloser: make(chan bool),\n\t\toldVersionWaitClose: make(chan bool),\n\t\tversionIncrement: versionIncrement,\n\t\tpollPeriod: pollPeriod,\n\t\twaitProcessorPeriod: waitProcessorPeriod,\n\t\tmigrationStarted: make(map[tableVersion]time.Time),\n\t\toffpeakStartHour: offpeakStartHour,\n\t\toffpeakDurationHours: offpeakDurationHours,\n\t}\n\n\tm.wg.Add(1)\n\tlogger.Go(func() {\n\t\tdefer m.wg.Done()\n\t\tm.loop()\n\t})\n\treturn &m\n}\n\n\/\/ findTablesToMigrate inspects tsvs waiting to be loaded and compares their versions\n\/\/ with the current versions, returning table names to be migrated up\nfunc (m *Migrator) findTablesToMigrate() ([]string, error) {\n\ttsvVersions, err := m.metaBackend.Versions()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error finding versions from unloaded tsvs: %v\", err)\n\t}\n\tvar tables []string\n\tfor tsvTable, tsvVersion := range tsvVersions {\n\t\taceVersion, existant := m.versions.Get(tsvTable)\n\t\tif !existant || tsvVersion > aceVersion {\n\t\t\ttables = append(tables, tsvTable)\n\t\t}\n\t}\n\treturn tables, nil\n}\n\n\/\/isOldVersionCleared checks to see if there are any tsvs for the given table and\n\/\/version still in queue to be loaded. If there are, it prioritizes those tsvs\n\/\/to be loaded.\nfunc (m *Migrator) isOldVersionCleared(table string, version int) (bool, error) {\n\texists, err := m.metaBackend.TSVVersionExists(table, version)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !exists {\n\t\treturn true, nil\n\t}\n\treturn false, m.metaBackend.PrioritizeTSVVersion(table, version)\n}\n\nfunc (m *Migrator) migrate(table string, to int) error {\n\tops, err := m.bpClient.GetMigration(table, to)\n\tif err != nil {\n\t\treturn err\n\t}\n\texists, err := m.aceBackend.TableExists(table)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !exists {\n\t\terr = m.aceBackend.CreateTable(table, ops, to)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t\/\/ to migrate, first we wait until processor finishes the old version...\n\t\ttimeMigrationStarted, started := m.migrationStarted[tableVersion{table, to}]\n\t\tif !started {\n\t\t\tnow := time.Now()\n\t\t\tm.migrationStarted[tableVersion{table, to}] = now\n\t\t\tlogger.WithField(\"table\", table).\n\t\t\t\tWithField(\"version\", to).\n\t\t\t\tWithField(\"until\", now.Add(m.waitProcessorPeriod)).\n\t\t\t\tInfo(\"Starting to wait for processor before migrating\")\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ don't do anything if we haven't waited long enough for processor\n\t\tif time.Since(timeMigrationStarted) < m.waitProcessorPeriod {\n\t\t\tlogger.WithField(\"table\", table).\n\t\t\t\tWithField(\"version\", to).\n\t\t\t\tWithField(\"until\", timeMigrationStarted.Add(m.waitProcessorPeriod)).\n\t\t\t\tInfo(\"Waiting for processor before migrating\")\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ wait for all the old version TSVs to ingest before proceeding\n\t\tcleared, err := m.isOldVersionCleared(table, to-1)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error waiting for old version to clear: %v\", err)\n\t\t}\n\t\tif !cleared {\n\t\t\tlogger.WithField(\"table\", table).WithField(\"version\", to).Info(\"Waiting for old version to clear.\")\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ everything is ready, now actually do the migration\n\t\tlogger.WithField(\"table\", table).WithField(\"version\", to).Info(\"Beginning to migrate\")\n\t\terr = m.aceBackend.ApplyOperations(table, ops, to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error applying operations to %s: %v\", table, err)\n\t\t}\n\t}\n\tm.versions.Set(table, to)\n\tlogger.WithField(\"table\", table).WithField(\"version\", to).Info(\"Migrated table successfully\")\n\n\treturn nil\n}\n\nfunc (m *Migrator) isOffPeakHours() bool {\n\tcurrentHour := time.Now().Hour()\n\tif m.offpeakStartHour+m.offpeakDurationHours <= 24 {\n\t\tif (m.offpeakStartHour <= currentHour) &&\n\t\t\t(currentHour < m.offpeakStartHour+m.offpeakDurationHours) {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\t\/\/ if duration bleeds into the new day, check the two segments before and after midnight\n\tif (m.offpeakStartHour <= currentHour) &&\n\t\t(currentHour < 24) {\n\t\treturn true\n\t}\n\tif (0 <= currentHour) &&\n\t\t(currentHour < (m.offpeakStartHour+m.offpeakDurationHours)%24) {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (m *Migrator) incrementVersion(verInc VersionIncrement) {\n\texists, err := m.aceBackend.TableExists(verInc.Table)\n\tswitch {\n\tcase err != nil:\n\t\tverInc.Response <- fmt.Errorf(\n\t\t\t\"error determining if table %s exists: %v\", verInc.Table, err)\n\tcase exists:\n\t\tverInc.Response <- fmt.Errorf(\n\t\t\t\"attempted to increment version of table that exists: %s\", verInc.Table)\n\tdefault:\n\t\terr = m.aceBackend.ApplyOperations(verInc.Table, nil, verInc.Version)\n\t\tif err == nil {\n\t\t\tlogger.Infof(\"Incremented table %s to version %d\",\n\t\t\t\tverInc.Table, verInc.Version)\n\t\t\tm.versions.Set(verInc.Table, verInc.Version)\n\t\t}\n\t\tverInc.Response <- err\n\t}\n}\n\nfunc (m *Migrator) findAndApplyMigrations() {\n\toutdatedTables, err := m.findTablesToMigrate()\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Error finding migrations to apply\")\n\t}\n\tif len(outdatedTables) == 0 {\n\t\tlogger.Infof(\"Migrator didn't find any tables to migrate.\")\n\t} else {\n\t\tlogger.WithField(\"numTables\", len(outdatedTables)).Infof(\"Migrator found tables to migrate.\")\n\t}\n\tfor _, table := range outdatedTables {\n\t\tvar newVersion int\n\t\tcurrentVersion, exists := m.versions.Get(table)\n\t\tif !exists { \/\/ table doesn't exist yet, create it by 'migrating' to version 0\n\t\t\tnewVersion = 0\n\t\t} else {\n\t\t\tnewVersion = currentVersion + 1\n\t\t}\n\t\t\/\/ if not offpeak, don't migrate table, but still allow table creation\n\t\tif newVersion > 0 && !m.isOffPeakHours() {\n\t\t\tlogger.WithField(\"table\", table).WithField(\"version\", newVersion).Infof(\"Not migrating; waiting until offpeak at %dh UTC\", m.offpeakStartHour)\n\t\t\tcontinue\n\t\t}\n\t\terr := m.migrate(table, newVersion)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).WithField(\"table\", table).WithField(\"version\", newVersion).Error(\"Error migrating table\")\n\t\t}\n\t}\n}\n\nfunc (m *Migrator) loop() {\n\tlogger.Info(\"Migrator started.\")\n\tdefer logger.Info(\"Migrator stopped.\")\n\ttick := time.NewTicker(m.pollPeriod)\n\tfor {\n\t\tselect {\n\t\tcase verInc := <-m.versionIncrement:\n\t\t\tm.incrementVersion(verInc)\n\t\tcase <-tick.C:\n\t\t\tm.findAndApplyMigrations()\n\t\tcase <-m.closer:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Close signals the migrator to stop looking for new migrations and waits until\n\/\/ it's finished any migrations.\nfunc (m *Migrator) Close() {\n\tm.closer <- true\n\tm.wg.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build evmjit\n\npackage vm\n\n\/*\n#include <stdint.h>\n#include <stdlib.h>\n\nstruct evmjit_result\n{\n\tint32_t returnCode;\n\tuint64_t returnDataSize;\n\tvoid* returnData;\n};\n\nstruct evmjit_result evmjit_run(void* _data, void* _env);\n\n\/\/ Shared library evmjit (e.g. libevmjit.so) is expected to be installed in \/usr\/local\/lib\n\/\/ More: https:\/\/github.com\/ethereum\/evmjit\n#cgo LDFLAGS: -levmjit\n*\/\nimport \"C\"\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/ethereum\/go-ethereum\/crypto\"\n\t\"github.com\/ethereum\/go-ethereum\/state\"\n\t\"math\/big\"\n\t\"unsafe\"\n)\n\ntype JitVm struct {\n\tenv Environment\n\tme ContextRef\n\tcallerAddr []byte\n\tprice *big.Int\n\tdata RuntimeData\n}\n\ntype i256 [32]byte\n\ntype RuntimeData struct {\n\tgas int64\n\tgasPrice int64\n\tcallData *byte\n\tcallDataSize uint64\n\taddress i256\n\tcaller i256\n\torigin i256\n\tcallValue i256\n\tcoinBase i256\n\tdifficulty i256\n\tgasLimit i256\n\tnumber uint64\n\ttimestamp int64\n\tcode *byte\n\tcodeSize uint64\n}\n\nfunc hash2llvm(h []byte) i256 {\n\tvar m i256\n\tcopy(m[len(m)-len(h):], h) \/\/ right aligned copy\n\treturn m\n}\n\nfunc llvm2hash(m *i256) []byte {\n\treturn C.GoBytes(unsafe.Pointer(m), C.int(len(m)))\n}\n\nfunc llvm2hashRef(m *i256) []byte {\n\treturn (*[1 << 30]byte)(unsafe.Pointer(m))[:len(m):len(m)]\n}\n\nfunc address2llvm(addr []byte) i256 {\n\tn := hash2llvm(addr)\n\tbswap(&n)\n\treturn n\n}\n\n\/\/ bswap swap bytes of the 256-bit integer on LLVM side\n\/\/ TODO: Do not change memory on LLVM side, that can conflict with memory access optimizations\nfunc bswap(m *i256) *i256 {\n\tfor i, l := 0, len(m); i < l\/2; i++ {\n\t\tm[i], m[l-i-1] = m[l-i-1], m[i]\n\t}\n\treturn m\n}\n\nfunc trim(m []byte) []byte {\n\tskip := 0\n\tfor i := 0; i < len(m); i++ {\n\t\tif m[i] == 0 {\n\t\t\tskip++\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn m[skip:]\n}\n\nfunc getDataPtr(m []byte) *byte {\n\tvar p *byte\n\tif len(m) > 0 {\n\t\tp = &m[0]\n\t}\n\treturn p\n}\n\nfunc big2llvm(n *big.Int) i256 {\n\tm := hash2llvm(n.Bytes())\n\tbswap(&m)\n\treturn m\n}\n\nfunc llvm2big(m *i256) *big.Int {\n\tn := big.NewInt(0)\n\tfor i := 0; i < len(m); i++ {\n\t\tb := big.NewInt(int64(m[i]))\n\t\tb.Lsh(b, uint(i)*8)\n\t\tn.Add(n, b)\n\t}\n\treturn n\n}\n\n\/\/ llvm2bytesRef creates a []byte slice that references byte buffer on LLVM side (as of that not controller by GC)\n\/\/ User must asure that referenced memory is available to Go until the data is copied or not needed any more\nfunc llvm2bytesRef(data *byte, length uint64) []byte {\n\tif length == 0 {\n\t\treturn nil\n\t}\n\tif data == nil {\n\t\tpanic(\"Unexpected nil data pointer\")\n\t}\n\treturn (*[1 << 30]byte)(unsafe.Pointer(data))[:length:length]\n}\n\nfunc untested(condition bool, message string) {\n\tif condition {\n\t\tpanic(\"Condition `\" + message + \"` tested. Remove assert.\")\n\t}\n}\n\nfunc assert(condition bool, message string) {\n\tif !condition {\n\t\tpanic(\"Assert `\" + message + \"` failed!\")\n\t}\n}\n\nfunc NewJitVm(env Environment) *JitVm {\n\treturn &JitVm{env: env}\n}\n\nfunc (self *JitVm) Run(me, caller ContextRef, code []byte, value, gas, price *big.Int, callData []byte) (ret []byte, err error) {\n\t\/\/ TODO: depth is increased but never checked by VM. VM should not know about it at all.\n\tself.env.SetDepth(self.env.Depth() + 1)\n\n\t\/\/ TODO: Move it to Env.Call() or sth\n\tif Precompiled[string(me.Address())] != nil {\n\t\t\/\/ if it's address of precopiled contract\n\t\t\/\/ fallback to standard VM\n\t\tstdVm := New(self.env)\n\t\treturn stdVm.Run(me, caller, code, value, gas, price, callData)\n\t}\n\n\tif self.me != nil {\n\t\tpanic(\"JitVm.Run() can be called only once per JitVm instance\")\n\t}\n\n\tself.me = me\n\tself.callerAddr = caller.Address()\n\tself.price = price\n\n\tself.data.gas = gas.Int64()\n\tself.data.gasPrice = price.Int64()\n\tself.data.callData = getDataPtr(callData)\n\tself.data.callDataSize = uint64(len(callData))\n\tself.data.address = address2llvm(self.me.Address())\n\tself.data.caller = address2llvm(caller.Address())\n\tself.data.origin = address2llvm(self.env.Origin())\n\tself.data.callValue = big2llvm(value)\n\tself.data.coinBase = address2llvm(self.env.Coinbase())\n\tself.data.difficulty = big2llvm(self.env.Difficulty())\n\tself.data.gasLimit = big2llvm(self.env.GasLimit())\n\tself.data.number = self.env.BlockNumber().Uint64()\n\tself.data.timestamp = self.env.Time()\n\tself.data.code = getDataPtr(code)\n\tself.data.codeSize = uint64(len(code))\n\n\tresult := C.evmjit_run(unsafe.Pointer(&self.data), unsafe.Pointer(self))\n\n\tif result.returnCode >= 100 {\n\t\terr = errors.New(\"OOG from JIT\")\n\t\tgas.SetInt64(0) \/\/ Set gas to 0, JIT does not bother\n\t} else {\n\t\tgas.SetInt64(self.data.gas)\n\t\tif result.returnCode == 1 { \/\/ RETURN\n\t\t\tret = C.GoBytes(result.returnData, C.int(result.returnDataSize))\n\t\t\tC.free(result.returnData)\n\t\t} else if result.returnCode == 2 { \/\/ SUICIDE\n\t\t\t\/\/ TODO: Suicide support logic should be moved to Env to be shared by VM implementations\n\t\t\tstate := self.Env().State()\n\t\t\treceiverAddr := llvm2hashRef(bswap(&self.data.address))\n\t\t\treceiver := state.GetOrNewStateObject(receiverAddr)\n\t\t\tbalance := state.GetBalance(me.Address())\n\t\t\treceiver.AddAmount(balance)\n\t\t\tstate.Delete(me.Address())\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (self *JitVm) Printf(format string, v ...interface{}) VirtualMachine {\n\treturn self\n}\n\nfunc (self *JitVm) Endl() VirtualMachine {\n\treturn self\n}\n\nfunc (self *JitVm) Env() Environment {\n\treturn self.env\n}\n\n\/\/export env_sha3\nfunc env_sha3(dataPtr *byte, length uint64, resultPtr unsafe.Pointer) {\n\tdata := llvm2bytesRef(dataPtr, length)\n\thash := crypto.Sha3(data)\n\tresult := (*i256)(resultPtr)\n\t*result = hash2llvm(hash)\n}\n\n\/\/export env_sstore\nfunc env_sstore(vmPtr unsafe.Pointer, indexPtr unsafe.Pointer, valuePtr unsafe.Pointer) {\n\tvm := (*JitVm)(vmPtr)\n\tindex := llvm2hash(bswap((*i256)(indexPtr)))\n\tvalue := llvm2hash(bswap((*i256)(valuePtr)))\n\tvalue = trim(value)\n\tif len(value) == 0 {\n\t\tprevValue := vm.env.State().GetState(vm.me.Address(), index)\n\t\tif len(prevValue) != 0 {\n\t\t\tvm.Env().State().Refund(vm.callerAddr, GasSStoreRefund)\n\t\t}\n\t}\n\n\tvm.env.State().SetState(vm.me.Address(), index, value)\n}\n\n\/\/export env_sload\nfunc env_sload(vmPtr unsafe.Pointer, indexPtr unsafe.Pointer, resultPtr unsafe.Pointer) {\n\tvm := (*JitVm)(vmPtr)\n\tindex := llvm2hash(bswap((*i256)(indexPtr)))\n\tvalue := vm.env.State().GetState(vm.me.Address(), index)\n\tresult := (*i256)(resultPtr)\n\t*result = hash2llvm(value)\n\tbswap(result)\n}\n\n\/\/export env_balance\nfunc env_balance(_vm unsafe.Pointer, _addr unsafe.Pointer, _result unsafe.Pointer) {\n\tvm := (*JitVm)(_vm)\n\taddr := llvm2hash((*i256)(_addr))\n\tbalance := vm.Env().State().GetBalance(addr)\n\tresult := (*i256)(_result)\n\t*result = big2llvm(balance)\n}\n\n\/\/export env_blockhash\nfunc env_blockhash(_vm unsafe.Pointer, _number unsafe.Pointer, _result unsafe.Pointer) {\n\tvm := (*JitVm)(_vm)\n\tnumber := llvm2big((*i256)(_number))\n\tresult := (*i256)(_result)\n\n\tcurrNumber := vm.Env().BlockNumber()\n\tlimit := big.NewInt(0).Sub(currNumber, big.NewInt(256))\n\tif number.Cmp(limit) >= 0 && number.Cmp(currNumber) < 0 {\n\t\thash := vm.Env().GetHash(uint64(number.Int64()))\n\t\t*result = hash2llvm(hash)\n\t} else {\n\t\t*result = i256{}\n\t}\n}\n\n\/\/export env_call\nfunc env_call(_vm unsafe.Pointer, _gas unsafe.Pointer, _receiveAddr unsafe.Pointer, _value unsafe.Pointer, inDataPtr unsafe.Pointer, inDataLen uint64, outDataPtr *byte, outDataLen uint64, _codeAddr unsafe.Pointer) bool {\n\tvm := (*JitVm)(_vm)\n\n\t\/\/fmt.Printf(\"env_call (depth %d)\\n\", vm.Env().Depth())\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Printf(\"Recovered in env_call (depth %d, out %p %d): %s\\n\", vm.Env().Depth(), outDataPtr, outDataLen, r)\n\t\t}\n\t}()\n\n\tbalance := vm.Env().State().GetBalance(vm.me.Address())\n\tvalue := llvm2big((*i256)(_value))\n\n\tif balance.Cmp(value) >= 0 {\n\t\treceiveAddr := llvm2hash((*i256)(_receiveAddr))\n\t\tinData := C.GoBytes(inDataPtr, C.int(inDataLen))\n\t\toutData := llvm2bytesRef(outDataPtr, outDataLen)\n\t\tcodeAddr := llvm2hash((*i256)(_codeAddr))\n\t\tllvmGas := (*i256)(_gas)\n\t\tgas := llvm2big(llvmGas)\n\t\tvar out []byte\n\t\tvar err error\n\t\tif bytes.Equal(codeAddr, receiveAddr) {\n\t\t\tout, err = vm.env.Call(vm.me, codeAddr, inData, gas, vm.price, value)\n\t\t} else {\n\t\t\tout, err = vm.env.CallCode(vm.me, codeAddr, inData, gas, vm.price, value)\n\t\t}\n\t\t*llvmGas = big2llvm(gas)\n\t\tif err == nil {\n\t\t\tcopy(outData, out)\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/export env_create\nfunc env_create(_vm unsafe.Pointer, _gas unsafe.Pointer, _value unsafe.Pointer, initDataPtr unsafe.Pointer, initDataLen uint64, _result unsafe.Pointer) {\n\tvm := (*JitVm)(_vm)\n\n\tvalue := llvm2big((*i256)(_value))\n\tinitData := C.GoBytes(initDataPtr, C.int(initDataLen)) \/\/ TODO: Unnecessary if low balance\n\tresult := (*i256)(_result)\n\t*result = i256{}\n\n\tllvmGas := (*i256)(_gas)\n\tgas := llvm2big(llvmGas)\n\n\tret, suberr, ref := vm.env.Create(vm.me, nil, initData, gas, vm.price, value)\n\tif suberr == nil {\n\t\tdataGas := big.NewInt(int64(len(ret))) \/\/ TODO: Nto the best design. env.Create can do it, it has the reference to gas counter\n\t\tdataGas.Mul(dataGas, GasCreateByte)\n\t\tgas.Sub(gas, dataGas)\n\t\t*result = hash2llvm(ref.Address())\n\t}\n\t*llvmGas = big2llvm(gas)\n}\n\n\/\/export env_log\nfunc env_log(_vm unsafe.Pointer, dataPtr unsafe.Pointer, dataLen uint64, _topic1 unsafe.Pointer, _topic2 unsafe.Pointer, _topic3 unsafe.Pointer, _topic4 unsafe.Pointer) {\n\tvm := (*JitVm)(_vm)\n\n\tdata := C.GoBytes(dataPtr, C.int(dataLen))\n\n\ttopics := make([][]byte, 0, 4)\n\tif _topic1 != nil {\n\t\ttopics = append(topics, llvm2hash((*i256)(_topic1)))\n\t}\n\tif _topic2 != nil {\n\t\ttopics = append(topics, llvm2hash((*i256)(_topic2)))\n\t}\n\tif _topic3 != nil {\n\t\ttopics = append(topics, llvm2hash((*i256)(_topic3)))\n\t}\n\tif _topic4 != nil {\n\t\ttopics = append(topics, llvm2hash((*i256)(_topic4)))\n\t}\n\n\tvm.Env().AddLog(state.NewLog(vm.me.Address(), topics, data))\n}\n\n\/\/export env_extcode\nfunc env_extcode(_vm unsafe.Pointer, _addr unsafe.Pointer, o_size *uint64) *byte {\n\tvm := (*JitVm)(_vm)\n\taddr := llvm2hash((*i256)(_addr))\n\tcode := vm.Env().State().GetCode(addr)\n\t*o_size = uint64(len(code))\n\treturn getDataPtr(code)\n}\n<commit_msg>Update JitVm to new EVM JIT ABI (C interface)<commit_after>\/\/ +build evmjit\n\npackage vm\n\n\/*\n\nvoid* evmjit_create();\nint evmjit_run(void* _jit, void* _data, void* _env);\nvoid evmjit_destroy(void* _jit);\n\n\/\/ Shared library evmjit (e.g. libevmjit.so) is expected to be installed in \/usr\/local\/lib\n\/\/ More: https:\/\/github.com\/ethereum\/evmjit\n#cgo LDFLAGS: -levmjit\n*\/\nimport \"C\"\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/ethereum\/go-ethereum\/crypto\"\n\t\"github.com\/ethereum\/go-ethereum\/state\"\n\t\"math\/big\"\n\t\"unsafe\"\n)\n\ntype JitVm struct {\n\tenv Environment\n\tme ContextRef\n\tcallerAddr []byte\n\tprice *big.Int\n\tdata RuntimeData\n}\n\ntype i256 [32]byte\n\ntype RuntimeData struct {\n\tgas int64\n\tgasPrice int64\n\tcallData *byte\n\tcallDataSize uint64\n\taddress i256\n\tcaller i256\n\torigin i256\n\tcallValue i256\n\tcoinBase i256\n\tdifficulty i256\n\tgasLimit i256\n\tnumber uint64\n\ttimestamp int64\n\tcode *byte\n\tcodeSize uint64\n}\n\nfunc hash2llvm(h []byte) i256 {\n\tvar m i256\n\tcopy(m[len(m)-len(h):], h) \/\/ right aligned copy\n\treturn m\n}\n\nfunc llvm2hash(m *i256) []byte {\n\treturn C.GoBytes(unsafe.Pointer(m), C.int(len(m)))\n}\n\nfunc llvm2hashRef(m *i256) []byte {\n\treturn (*[1 << 30]byte)(unsafe.Pointer(m))[:len(m):len(m)]\n}\n\nfunc address2llvm(addr []byte) i256 {\n\tn := hash2llvm(addr)\n\tbswap(&n)\n\treturn n\n}\n\n\/\/ bswap swap bytes of the 256-bit integer on LLVM side\n\/\/ TODO: Do not change memory on LLVM side, that can conflict with memory access optimizations\nfunc bswap(m *i256) *i256 {\n\tfor i, l := 0, len(m); i < l\/2; i++ {\n\t\tm[i], m[l-i-1] = m[l-i-1], m[i]\n\t}\n\treturn m\n}\n\nfunc trim(m []byte) []byte {\n\tskip := 0\n\tfor i := 0; i < len(m); i++ {\n\t\tif m[i] == 0 {\n\t\t\tskip++\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn m[skip:]\n}\n\nfunc getDataPtr(m []byte) *byte {\n\tvar p *byte\n\tif len(m) > 0 {\n\t\tp = &m[0]\n\t}\n\treturn p\n}\n\nfunc big2llvm(n *big.Int) i256 {\n\tm := hash2llvm(n.Bytes())\n\tbswap(&m)\n\treturn m\n}\n\nfunc llvm2big(m *i256) *big.Int {\n\tn := big.NewInt(0)\n\tfor i := 0; i < len(m); i++ {\n\t\tb := big.NewInt(int64(m[i]))\n\t\tb.Lsh(b, uint(i)*8)\n\t\tn.Add(n, b)\n\t}\n\treturn n\n}\n\n\/\/ llvm2bytesRef creates a []byte slice that references byte buffer on LLVM side (as of that not controller by GC)\n\/\/ User must asure that referenced memory is available to Go until the data is copied or not needed any more\nfunc llvm2bytesRef(data *byte, length uint64) []byte {\n\tif length == 0 {\n\t\treturn nil\n\t}\n\tif data == nil {\n\t\tpanic(\"Unexpected nil data pointer\")\n\t}\n\treturn (*[1 << 30]byte)(unsafe.Pointer(data))[:length:length]\n}\n\nfunc untested(condition bool, message string) {\n\tif condition {\n\t\tpanic(\"Condition `\" + message + \"` tested. Remove assert.\")\n\t}\n}\n\nfunc assert(condition bool, message string) {\n\tif !condition {\n\t\tpanic(\"Assert `\" + message + \"` failed!\")\n\t}\n}\n\nfunc NewJitVm(env Environment) *JitVm {\n\treturn &JitVm{env: env}\n}\n\nfunc (self *JitVm) Run(me, caller ContextRef, code []byte, value, gas, price *big.Int, callData []byte) (ret []byte, err error) {\n\t\/\/ TODO: depth is increased but never checked by VM. VM should not know about it at all.\n\tself.env.SetDepth(self.env.Depth() + 1)\n\n\t\/\/ TODO: Move it to Env.Call() or sth\n\tif Precompiled[string(me.Address())] != nil {\n\t\t\/\/ if it's address of precopiled contract\n\t\t\/\/ fallback to standard VM\n\t\tstdVm := New(self.env)\n\t\treturn stdVm.Run(me, caller, code, value, gas, price, callData)\n\t}\n\n\tif self.me != nil {\n\t\tpanic(\"JitVm.Run() can be called only once per JitVm instance\")\n\t}\n\n\tself.me = me\n\tself.callerAddr = caller.Address()\n\tself.price = price\n\n\tself.data.gas = gas.Int64()\n\tself.data.gasPrice = price.Int64()\n\tself.data.callData = getDataPtr(callData)\n\tself.data.callDataSize = uint64(len(callData))\n\tself.data.address = address2llvm(self.me.Address())\n\tself.data.caller = address2llvm(caller.Address())\n\tself.data.origin = address2llvm(self.env.Origin())\n\tself.data.callValue = big2llvm(value)\n\tself.data.coinBase = address2llvm(self.env.Coinbase())\n\tself.data.difficulty = big2llvm(self.env.Difficulty())\n\tself.data.gasLimit = big2llvm(self.env.GasLimit())\n\tself.data.number = self.env.BlockNumber().Uint64()\n\tself.data.timestamp = self.env.Time()\n\tself.data.code = getDataPtr(code)\n\tself.data.codeSize = uint64(len(code))\n\n\tjit := C.evmjit_create()\n\tretCode := C.evmjit_run(jit, unsafe.Pointer(&self.data), unsafe.Pointer(self))\n\n\tif retCode < 0 {\n\t\terr = errors.New(\"OOG from JIT\")\n\t\tgas.SetInt64(0) \/\/ Set gas to 0, JIT does not bother\n\t} else {\n\t\tgas.SetInt64(self.data.gas)\n\t\tif retCode == 1 { \/\/ RETURN\n\t\t\tret = C.GoBytes(unsafe.Pointer(self.data.callData), C.int(self.data.callDataSize))\n\t\t} else if retCode == 2 { \/\/ SUICIDE\n\t\t\t\/\/ TODO: Suicide support logic should be moved to Env to be shared by VM implementations\n\t\t\tstate := self.Env().State()\n\t\t\treceiverAddr := llvm2hashRef(bswap(&self.data.address))\n\t\t\treceiver := state.GetOrNewStateObject(receiverAddr)\n\t\t\tbalance := state.GetBalance(me.Address())\n\t\t\treceiver.AddAmount(balance)\n\t\t\tstate.Delete(me.Address())\n\t\t}\n\t}\n\t\n\tC.evmjit_destroy(jit);\n\treturn\n}\n\nfunc (self *JitVm) Printf(format string, v ...interface{}) VirtualMachine {\n\treturn self\n}\n\nfunc (self *JitVm) Endl() VirtualMachine {\n\treturn self\n}\n\nfunc (self *JitVm) Env() Environment {\n\treturn self.env\n}\n\n\/\/export env_sha3\nfunc env_sha3(dataPtr *byte, length uint64, resultPtr unsafe.Pointer) {\n\tdata := llvm2bytesRef(dataPtr, length)\n\thash := crypto.Sha3(data)\n\tresult := (*i256)(resultPtr)\n\t*result = hash2llvm(hash)\n}\n\n\/\/export env_sstore\nfunc env_sstore(vmPtr unsafe.Pointer, indexPtr unsafe.Pointer, valuePtr unsafe.Pointer) {\n\tvm := (*JitVm)(vmPtr)\n\tindex := llvm2hash(bswap((*i256)(indexPtr)))\n\tvalue := llvm2hash(bswap((*i256)(valuePtr)))\n\tvalue = trim(value)\n\tif len(value) == 0 {\n\t\tprevValue := vm.env.State().GetState(vm.me.Address(), index)\n\t\tif len(prevValue) != 0 {\n\t\t\tvm.Env().State().Refund(vm.callerAddr, GasSStoreRefund)\n\t\t}\n\t}\n\n\tvm.env.State().SetState(vm.me.Address(), index, value)\n}\n\n\/\/export env_sload\nfunc env_sload(vmPtr unsafe.Pointer, indexPtr unsafe.Pointer, resultPtr unsafe.Pointer) {\n\tvm := (*JitVm)(vmPtr)\n\tindex := llvm2hash(bswap((*i256)(indexPtr)))\n\tvalue := vm.env.State().GetState(vm.me.Address(), index)\n\tresult := (*i256)(resultPtr)\n\t*result = hash2llvm(value)\n\tbswap(result)\n}\n\n\/\/export env_balance\nfunc env_balance(_vm unsafe.Pointer, _addr unsafe.Pointer, _result unsafe.Pointer) {\n\tvm := (*JitVm)(_vm)\n\taddr := llvm2hash((*i256)(_addr))\n\tbalance := vm.Env().State().GetBalance(addr)\n\tresult := (*i256)(_result)\n\t*result = big2llvm(balance)\n}\n\n\/\/export env_blockhash\nfunc env_blockhash(_vm unsafe.Pointer, _number unsafe.Pointer, _result unsafe.Pointer) {\n\tvm := (*JitVm)(_vm)\n\tnumber := llvm2big((*i256)(_number))\n\tresult := (*i256)(_result)\n\n\tcurrNumber := vm.Env().BlockNumber()\n\tlimit := big.NewInt(0).Sub(currNumber, big.NewInt(256))\n\tif number.Cmp(limit) >= 0 && number.Cmp(currNumber) < 0 {\n\t\thash := vm.Env().GetHash(uint64(number.Int64()))\n\t\t*result = hash2llvm(hash)\n\t} else {\n\t\t*result = i256{}\n\t}\n}\n\n\/\/export env_call\nfunc env_call(_vm unsafe.Pointer, _gas unsafe.Pointer, _receiveAddr unsafe.Pointer, _value unsafe.Pointer, inDataPtr unsafe.Pointer, inDataLen uint64, outDataPtr *byte, outDataLen uint64, _codeAddr unsafe.Pointer) bool {\n\tvm := (*JitVm)(_vm)\n\n\t\/\/fmt.Printf(\"env_call (depth %d)\\n\", vm.Env().Depth())\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Printf(\"Recovered in env_call (depth %d, out %p %d): %s\\n\", vm.Env().Depth(), outDataPtr, outDataLen, r)\n\t\t}\n\t}()\n\n\tbalance := vm.Env().State().GetBalance(vm.me.Address())\n\tvalue := llvm2big((*i256)(_value))\n\n\tif balance.Cmp(value) >= 0 {\n\t\treceiveAddr := llvm2hash((*i256)(_receiveAddr))\n\t\tinData := C.GoBytes(inDataPtr, C.int(inDataLen))\n\t\toutData := llvm2bytesRef(outDataPtr, outDataLen)\n\t\tcodeAddr := llvm2hash((*i256)(_codeAddr))\n\t\tllvmGas := (*i256)(_gas)\n\t\tgas := llvm2big(llvmGas)\n\t\tvar out []byte\n\t\tvar err error\n\t\tif bytes.Equal(codeAddr, receiveAddr) {\n\t\t\tout, err = vm.env.Call(vm.me, codeAddr, inData, gas, vm.price, value)\n\t\t} else {\n\t\t\tout, err = vm.env.CallCode(vm.me, codeAddr, inData, gas, vm.price, value)\n\t\t}\n\t\t*llvmGas = big2llvm(gas)\n\t\tif err == nil {\n\t\t\tcopy(outData, out)\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/export env_create\nfunc env_create(_vm unsafe.Pointer, _gas unsafe.Pointer, _value unsafe.Pointer, initDataPtr unsafe.Pointer, initDataLen uint64, _result unsafe.Pointer) {\n\tvm := (*JitVm)(_vm)\n\n\tvalue := llvm2big((*i256)(_value))\n\tinitData := C.GoBytes(initDataPtr, C.int(initDataLen)) \/\/ TODO: Unnecessary if low balance\n\tresult := (*i256)(_result)\n\t*result = i256{}\n\n\tllvmGas := (*i256)(_gas)\n\tgas := llvm2big(llvmGas)\n\n\tret, suberr, ref := vm.env.Create(vm.me, nil, initData, gas, vm.price, value)\n\tif suberr == nil {\n\t\tdataGas := big.NewInt(int64(len(ret))) \/\/ TODO: Nto the best design. env.Create can do it, it has the reference to gas counter\n\t\tdataGas.Mul(dataGas, GasCreateByte)\n\t\tgas.Sub(gas, dataGas)\n\t\t*result = hash2llvm(ref.Address())\n\t}\n\t*llvmGas = big2llvm(gas)\n}\n\n\/\/export env_log\nfunc env_log(_vm unsafe.Pointer, dataPtr unsafe.Pointer, dataLen uint64, _topic1 unsafe.Pointer, _topic2 unsafe.Pointer, _topic3 unsafe.Pointer, _topic4 unsafe.Pointer) {\n\tvm := (*JitVm)(_vm)\n\n\tdata := C.GoBytes(dataPtr, C.int(dataLen))\n\n\ttopics := make([][]byte, 0, 4)\n\tif _topic1 != nil {\n\t\ttopics = append(topics, llvm2hash((*i256)(_topic1)))\n\t}\n\tif _topic2 != nil {\n\t\ttopics = append(topics, llvm2hash((*i256)(_topic2)))\n\t}\n\tif _topic3 != nil {\n\t\ttopics = append(topics, llvm2hash((*i256)(_topic3)))\n\t}\n\tif _topic4 != nil {\n\t\ttopics = append(topics, llvm2hash((*i256)(_topic4)))\n\t}\n\n\tvm.Env().AddLog(state.NewLog(vm.me.Address(), topics, data))\n}\n\n\/\/export env_extcode\nfunc env_extcode(_vm unsafe.Pointer, _addr unsafe.Pointer, o_size *uint64) *byte {\n\tvm := (*JitVm)(_vm)\n\taddr := llvm2hash((*i256)(_addr))\n\tcode := vm.Env().State().GetCode(addr)\n\t*o_size = uint64(len(code))\n\treturn getDataPtr(code)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Precisely AB.\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage hellosign\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"mime\/multipart\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc fieldTagName(tag string) string {\n\tsArr := strings.Split(tag, \",\")\n\tif len(sArr) > 0 {\n\t\treturn sArr[0]\n\t}\n\treturn \"\"\n}\n\nfunc omitEmpty(tag string) bool {\n\tsArr := strings.Split(tag, \",\")\n\tif len(sArr) == 2 {\n\t\treturn sArr[1] == \"omitempty\"\n\t}\n\treturn false\n}\n\nfunc (c *hellosign) marshalMultipart(obj interface{}) (*bytes.Buffer, *multipart.Writer, error) {\n\tvar b bytes.Buffer\n\tw := multipart.NewWriter(&b)\n\tif err := marshalObj(w, \"\", obj); err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn &b, w, nil\n}\n\nfunc marshalObj(w *multipart.Writer, prefix string, obj interface{}) error {\n\tstructType := reflect.TypeOf(obj)\n\tval := reflect.ValueOf(obj)\n\tif val.Kind() == reflect.Ptr {\n\t\tif val.IsNil() {\n\t\t\treturn fmt.Errorf(\"cannot marshal nil ptr\")\n\t\t}\n\t\tval = val.Elem()\n\t\tstructType = reflect.TypeOf(val.Interface())\n\t}\n\n\tfor i := 0; i < val.NumField(); i++ {\n\t\tvalField := val.Field(i)\n\t\tf := valField.Interface()\n\t\tval := reflect.ValueOf(f)\n\n\t\tfield := structType.Field(i)\n\t\ttag := field.Tag.Get(\"form\")\n\t\ttagName := fieldTagName(tag)\n\t\toe := omitEmpty(tag)\n\n\t\tif tagName == \"\" || tagName == \"-\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch val.Kind() {\n\t\tcase reflect.Ptr:\n\t\t\tif val.IsNil() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvInter := val.Interface()\n\t\t\tif err := marshalObj(w, fmt.Sprintf(\"%s%s\", prefix, tagName), vInter); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase reflect.Map:\n\t\t\tif oe && len(val.MapKeys()) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, k := range val.MapKeys() {\n\t\t\t\tif err := writeString(w, fmt.Sprintf(\"%s[%s]\", prefix+tagName, k.String()), val.MapIndex(k).String()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Slice:\n\t\t\tif oe && val.Len() == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfIndexVal := val.Index(0)\n\t\t\tswitch fIndexVal.Kind() {\n\t\t\tcase reflect.Slice:\n\t\t\t\tif tagName == \"file\" {\n\t\t\t\t\tfor i := 0; i < val.Len(); i++ {\n\t\t\t\t\t\tkey := fmt.Sprintf(\"%s%s[%d]\", prefix, tagName, i)\n\t\t\t\t\t\tinter := val.Index(i).Interface()\n\t\t\t\t\t\tbArr, ok := inter.([]byte)\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"%s is not a byte slice\", key)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tff, err := w.CreateFormFile(key, fmt.Sprintf(\"Document %d\", i))\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tff.Write(bArr)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\/\/ No else case as we don't really have any other kinds of slices in slices.\n\t\t\tcase reflect.String:\n\t\t\t\tfor i := 0; i < val.Len(); i++ {\n\t\t\t\t\tkey := fmt.Sprintf(\"%s%s[%d]\", prefix, tagName, i)\n\t\t\t\t\tif err := writeString(w, key, val.Index(i).String()); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase reflect.Struct:\n\t\t\t\tfor i := 0; i < val.Len(); i++ {\n\t\t\t\t\tsObj := val.Index(i).Interface()\n\t\t\t\t\tkey := fmt.Sprintf(\"%s%s[%d]\", prefix, tagName, i)\n\t\t\t\t\tif err := marshalObj(w, key, sObj); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\n\t\tdefault:\n\t\t\tkey := fmt.Sprintf(\"%s%s\", prefix, tagName)\n\t\t\tif prefix != \"\" {\n\t\t\t\tkey = fmt.Sprintf(\"%s[%s]\", prefix, tagName)\n\t\t\t}\n\t\t\tif err := marshalPrimitive(w, oe, key, val.Interface()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc marshalPrimitive(w *multipart.Writer, oe bool, tagName string, v interface{}) error {\n\tval := reflect.ValueOf(v)\n\tswitch val.Kind() {\n\tcase reflect.Bool:\n\t\tif err := marshalBool(w, tagName, oe, val.Bool()); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase reflect.String:\n\t\tif oe && val.String() == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tif err := writeString(w, tagName, val.String()); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase reflect.Int8:\n\t\tfallthrough\n\tcase reflect.Int16:\n\t\tfallthrough\n\tcase reflect.Int32:\n\t\tfallthrough\n\tcase reflect.Int64:\n\t\tfallthrough\n\tcase reflect.Int:\n\t\tif err := marshalInt(w, tagName, oe, val.Int()); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase reflect.Uint8:\n\t\tfallthrough\n\tcase reflect.Uint16:\n\t\tfallthrough\n\tcase reflect.Uint32:\n\t\tfallthrough\n\tcase reflect.Uint64:\n\t\tfallthrough\n\tcase reflect.Uint:\n\t\tif err := marshalUint(w, tagName, oe, val.Uint()); err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\tif oe && val.String() == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tif err := writeString(w, tagName, val.String()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc writeString(w *multipart.Writer, name, val string) error {\n\tff, err := w.CreateFormField(name)\n\tif err != nil {\n\t\treturn nil\n\t}\n\t_, err = ff.Write([]byte(val))\n\treturn err\n}\n\nfunc marshalUint(w *multipart.Writer, tagName string, oe bool, val uint64) error {\n\tif oe && val == 0 {\n\t\treturn nil\n\t}\n\tstrUint := strconv.FormatUint(val, 10)\n\treturn writeString(w, tagName, strUint)\n}\n\nfunc marshalInt(w *multipart.Writer, tagName string, oe bool, val int64) error {\n\tif oe && val == 0 {\n\t\treturn nil\n\t}\n\tstrInt := strconv.FormatInt(val, 10)\n\treturn writeString(w, tagName, strInt)\n}\n\nfunc marshalBool(w *multipart.Writer, tagName string, oe bool, val bool) error {\n\tif oe && val == false {\n\t\treturn nil\n\t}\n\tstrBool := strconv.FormatInt(int64(BoolToInt(val)), 10)\n\treturn writeString(w, tagName, strBool)\n}\n<commit_msg>marshaler: make sure to close the multipart writer<commit_after>\/\/ Copyright 2016 Precisely AB.\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage hellosign\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"mime\/multipart\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc fieldTagName(tag string) string {\n\tsArr := strings.Split(tag, \",\")\n\tif len(sArr) > 0 {\n\t\treturn sArr[0]\n\t}\n\treturn \"\"\n}\n\nfunc omitEmpty(tag string) bool {\n\tsArr := strings.Split(tag, \",\")\n\tif len(sArr) == 2 {\n\t\treturn sArr[1] == \"omitempty\"\n\t}\n\treturn false\n}\n\nfunc (c *hellosign) marshalMultipart(obj interface{}) (*bytes.Buffer, *multipart.Writer, error) {\n\tvar b bytes.Buffer\n\tw := multipart.NewWriter(&b)\n\tif err := marshalObj(w, \"\", obj); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tw.Close()\n\treturn &b, w, nil\n}\n\nfunc marshalObj(w *multipart.Writer, prefix string, obj interface{}) error {\n\tstructType := reflect.TypeOf(obj)\n\tval := reflect.ValueOf(obj)\n\tif val.Kind() == reflect.Ptr {\n\t\tif val.IsNil() {\n\t\t\treturn fmt.Errorf(\"cannot marshal nil ptr\")\n\t\t}\n\t\tval = val.Elem()\n\t\tstructType = reflect.TypeOf(val.Interface())\n\t}\n\n\tfor i := 0; i < val.NumField(); i++ {\n\t\tvalField := val.Field(i)\n\t\tf := valField.Interface()\n\t\tval := reflect.ValueOf(f)\n\n\t\tfield := structType.Field(i)\n\t\ttag := field.Tag.Get(\"form\")\n\t\ttagName := fieldTagName(tag)\n\t\toe := omitEmpty(tag)\n\n\t\tif tagName == \"\" || tagName == \"-\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch val.Kind() {\n\t\tcase reflect.Ptr:\n\t\t\tif val.IsNil() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvInter := val.Interface()\n\t\t\tif err := marshalObj(w, fmt.Sprintf(\"%s%s\", prefix, tagName), vInter); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase reflect.Map:\n\t\t\tif oe && len(val.MapKeys()) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, k := range val.MapKeys() {\n\t\t\t\tif err := writeString(w, fmt.Sprintf(\"%s[%s]\", prefix+tagName, k.String()), val.MapIndex(k).String()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Slice:\n\t\t\tif oe && val.Len() == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfIndexVal := val.Index(0)\n\t\t\tswitch fIndexVal.Kind() {\n\t\t\tcase reflect.Slice:\n\t\t\t\tif tagName == \"file\" {\n\t\t\t\t\tfor i := 0; i < val.Len(); i++ {\n\t\t\t\t\t\tkey := fmt.Sprintf(\"%s%s[%d]\", prefix, tagName, i)\n\t\t\t\t\t\tinter := val.Index(i).Interface()\n\t\t\t\t\t\tbArr, ok := inter.([]byte)\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"%s is not a byte slice\", key)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tff, err := w.CreateFormFile(key, fmt.Sprintf(\"Document %d\", i))\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tff.Write(bArr)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\/\/ No else case as we don't really have any other kinds of slices in slices.\n\t\t\tcase reflect.String:\n\t\t\t\tfor i := 0; i < val.Len(); i++ {\n\t\t\t\t\tkey := fmt.Sprintf(\"%s%s[%d]\", prefix, tagName, i)\n\t\t\t\t\tif err := writeString(w, key, val.Index(i).String()); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase reflect.Struct:\n\t\t\t\tfor i := 0; i < val.Len(); i++ {\n\t\t\t\t\tsObj := val.Index(i).Interface()\n\t\t\t\t\tkey := fmt.Sprintf(\"%s%s[%d]\", prefix, tagName, i)\n\t\t\t\t\tif err := marshalObj(w, key, sObj); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\n\t\tdefault:\n\t\t\tkey := fmt.Sprintf(\"%s%s\", prefix, tagName)\n\t\t\tif prefix != \"\" {\n\t\t\t\tkey = fmt.Sprintf(\"%s[%s]\", prefix, tagName)\n\t\t\t}\n\t\t\tif err := marshalPrimitive(w, oe, key, val.Interface()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc marshalPrimitive(w *multipart.Writer, oe bool, tagName string, v interface{}) error {\n\tval := reflect.ValueOf(v)\n\tswitch val.Kind() {\n\tcase reflect.Bool:\n\t\tif err := marshalBool(w, tagName, oe, val.Bool()); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase reflect.String:\n\t\tif oe && val.String() == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tif err := writeString(w, tagName, val.String()); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase reflect.Int8:\n\t\tfallthrough\n\tcase reflect.Int16:\n\t\tfallthrough\n\tcase reflect.Int32:\n\t\tfallthrough\n\tcase reflect.Int64:\n\t\tfallthrough\n\tcase reflect.Int:\n\t\tif err := marshalInt(w, tagName, oe, val.Int()); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase reflect.Uint8:\n\t\tfallthrough\n\tcase reflect.Uint16:\n\t\tfallthrough\n\tcase reflect.Uint32:\n\t\tfallthrough\n\tcase reflect.Uint64:\n\t\tfallthrough\n\tcase reflect.Uint:\n\t\tif err := marshalUint(w, tagName, oe, val.Uint()); err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\tif oe && val.String() == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tif err := writeString(w, tagName, val.String()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc writeString(w *multipart.Writer, name, val string) error {\n\tff, err := w.CreateFormField(name)\n\tif err != nil {\n\t\treturn nil\n\t}\n\t_, err = ff.Write([]byte(val))\n\treturn err\n}\n\nfunc marshalUint(w *multipart.Writer, tagName string, oe bool, val uint64) error {\n\tif oe && val == 0 {\n\t\treturn nil\n\t}\n\tstrUint := strconv.FormatUint(val, 10)\n\treturn writeString(w, tagName, strUint)\n}\n\nfunc marshalInt(w *multipart.Writer, tagName string, oe bool, val int64) error {\n\tif oe && val == 0 {\n\t\treturn nil\n\t}\n\tstrInt := strconv.FormatInt(val, 10)\n\treturn writeString(w, tagName, strInt)\n}\n\nfunc marshalBool(w *multipart.Writer, tagName string, oe bool, val bool) error {\n\tif oe && val == false {\n\t\treturn nil\n\t}\n\tstrBool := strconv.FormatInt(int64(BoolToInt(val)), 10)\n\treturn writeString(w, tagName, strBool)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"io\/ioutil\"\n\t\"log\"\n)\n\ntype ImageDirective struct {\n\tTag string\n\tParameters map[string]string\n}\n\ntype Directive struct {\n\tRepository string\n\tImageDirectives []*ImageDirective\n}\n\ntype unmarshalledYaml struct {\n\tRepository string\n\tImages []struct {\n\t\tTag string\n\t\tParameters []struct {\n\t\t\tName string\n\t\t\tValue string\n\t\t}\n\t}\n}\n\nfunc main() {\n\tbuf, err := ioutil.ReadFile(\"voicepipe.yml\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tuy := &unmarshalledYaml{}\n\terr = yaml.Unmarshal(buf, uy)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\td := &Directive{\n\t\tRepository: uy.Repository,\n\t\tImageDirectives: make([]*ImageDirective, 0),\n\t}\n\tfor _, i := range uy.Images {\n\t\tparams := make(map[string]string)\n\t\tfor _, p := range i.Parameters {\n\t\t\tparams[p.Name] = params[p.Value]\n\t\t}\n\t\td.ImageDirectives = append(\n\t\t\td.ImageDirectives,\n\t\t\t&ImageDirective{Tag: i.Tag, Parameters: params},\n\t\t)\n\t}\n\tfmt.Println(d)\n}\n<commit_msg>Rename a structure<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"io\/ioutil\"\n\t\"log\"\n)\n\ntype ImageDirective struct {\n\tTag string\n\tParameters map[string]string\n}\n\ntype Directive struct {\n\tRepository string\n\tImageDirectives []*ImageDirective\n}\n\ntype intermediateDirective struct {\n\tRepository string\n\tImages []struct {\n\t\tTag string\n\t\tParameters []struct {\n\t\t\tName string\n\t\t\tValue string\n\t\t}\n\t}\n}\n\nfunc main() {\n\tbuf, err := ioutil.ReadFile(\"voicepipe.yml\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tin := &intermediateDirective{}\n\terr = yaml.Unmarshal(buf, in)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\td := &Directive{\n\t\tRepository: in.Repository,\n\t\tImageDirectives: make([]*ImageDirective, 0),\n\t}\n\tfor _, i := range in.Images {\n\t\tparams := make(map[string]string)\n\t\tfor _, p := range i.Parameters {\n\t\t\tparams[p.Name] = params[p.Value]\n\t\t}\n\t\td.ImageDirectives = append(\n\t\t\td.ImageDirectives,\n\t\t\t&ImageDirective{Tag: i.Tag, Parameters: params},\n\t\t)\n\t}\n\tfmt.Println(d)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/brotherlogic\/goserver\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\n\tpbd \"github.com\/brotherlogic\/discovery\/proto\"\n\tpb \"github.com\/brotherlogic\/gobuildmaster\/proto\"\n\tpbs \"github.com\/brotherlogic\/gobuildslave\/proto\"\n\tpbg \"github.com\/brotherlogic\/goserver\/proto\"\n\t\"github.com\/brotherlogic\/goserver\/utils\"\n)\n\nconst (\n\tintentWait = time.Second\n)\n\n\/\/ Server the main server type\ntype Server struct {\n\t*goserver.GoServer\n\tconfig *pb.Config\n\tserving bool\n\tLastIntent time.Time\n\tLastMaster time.Time\n\tworldMutex *sync.Mutex\n\tworld map[string]map[string]struct{}\n\tgetter getter\n}\n\ntype prodGetter struct{}\n\nfunc (g *prodGetter) getJobs(server *pbd.RegistryEntry) (*pbs.JobList, error) {\n\tlist := &pbs.JobList{}\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\tconn, err := grpc.Dial(server.GetIp()+\":\"+strconv.Itoa(int(server.GetPort())), grpc.WithInsecure())\n\tdefer conn.Close()\n\tif err != nil {\n\t\treturn list, err\n\t}\n\n\tslave := pbs.NewGoBuildSlaveClient(conn)\n\tr, err := slave.List(ctx, &pbs.Empty{}, grpc.FailFast(false))\n\treturn r, err\n}\n\nfunc (g *prodGetter) getSlaves() (*pbd.ServiceList, error) {\n\tret := &pbd.ServiceList{}\n\n\tconn, err := grpc.Dial(utils.RegistryIP+\":\"+strconv.Itoa(utils.RegistryPort), grpc.WithInsecure())\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tdefer conn.Close()\n\n\tregistry := pbd.NewDiscoveryServiceClient(conn)\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\tr, err := registry.ListAllServices(ctx, &pbd.Empty{}, grpc.FailFast(false))\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tfor _, s := range r.Services {\n\t\tif s.GetName() == \"gobuildslave\" {\n\t\t\tret.Services = append(ret.Services, s)\n\t\t}\n\t}\n\n\treturn ret, nil\n}\n\ntype mainChecker struct {\n\tprev []string\n\tlogger func(string)\n}\n\nfunc getIP(servertype, servername string) (string, int) {\n\tconn, _ := grpc.Dial(utils.RegistryIP+\":\"+strconv.Itoa(utils.RegistryPort), grpc.WithInsecure())\n\tdefer conn.Close()\n\n\tregistry := pbd.NewDiscoveryServiceClient(conn)\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\tr, err := registry.ListAllServices(ctx, &pbd.Empty{}, grpc.FailFast(false))\n\tif err != nil {\n\t\treturn \"\", -1\n\t}\n\tfor _, s := range r.Services {\n\t\tif s.Name == servertype && s.Identifier == servername {\n\t\t\treturn s.Ip, int(s.Port)\n\t\t}\n\t}\n\n\treturn \"\", -1\n}\n\nfunc (t *mainChecker) getprev() []string {\n\treturn t.prev\n}\nfunc (t *mainChecker) setprev(v []string) {\n\tt.prev = v\n}\n\nfunc (t *mainChecker) assess(server string) (*pbs.JobList, *pbs.Config) {\n\tlist := &pbs.JobList{}\n\tconf := &pbs.Config{}\n\n\tip, port := getIP(\"gobuildslave\", server)\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\tconn, err := grpc.Dial(ip+\":\"+strconv.Itoa(port), grpc.WithInsecure())\n\tdefer conn.Close()\n\tif err != nil {\n\t\treturn list, conf\n\t}\n\n\tslave := pbs.NewGoBuildSlaveClient(conn)\n\tr, err := slave.List(ctx, &pbs.Empty{}, grpc.FailFast(false))\n\tif err != nil {\n\t\treturn list, conf\n\t}\n\n\tr2, err := slave.GetConfig(ctx, &pbs.Empty{}, grpc.FailFast(false))\n\tif err != nil {\n\t\treturn list, conf\n\t}\n\n\treturn r, r2\n}\n\nfunc (t *mainChecker) master(entry *pbd.RegistryEntry, master bool) bool {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Minute)\n\tdefer cancel()\n\tconn, _ := grpc.Dial(entry.GetIp()+\":\"+strconv.Itoa(int(entry.GetPort())), grpc.WithInsecure())\n\tdefer conn.Close()\n\n\tserver := pbg.NewGoserverServiceClient(conn)\n\t_, err := server.Mote(ctx, &pbg.MoteRequest{Master: master}, grpc.FailFast(false))\n\tif err != nil {\n\t\tt.logger(fmt.Sprintf(\"Master REJECT(%v): %v\", entry, err))\n\t}\n\n\treturn err == nil\n}\n\nfunc runJob(job *pbs.JobSpec, server string) {\n\tif server != \"\" {\n\t\tip, port := getIP(\"gobuildslave\", server)\n\t\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\t\tdefer cancel()\n\t\tconn, _ := grpc.Dial(ip+\":\"+strconv.Itoa(port), grpc.WithInsecure())\n\t\tdefer conn.Close()\n\n\t\tslave := pbs.NewGoBuildSlaveClient(conn)\n\t\tjob.Server = server\n\t\tslave.Run(ctx, job, grpc.FailFast(false))\n\t}\n}\n\nfunc (t *mainChecker) discover() *pbd.ServiceList {\n\tret := &pbd.ServiceList{}\n\n\tconn, _ := grpc.Dial(utils.RegistryIP+\":\"+strconv.Itoa(utils.RegistryPort), grpc.WithInsecure())\n\tdefer conn.Close()\n\n\tregistry := pbd.NewDiscoveryServiceClient(conn)\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\tr, err := registry.ListAllServices(ctx, &pbd.Empty{}, grpc.FailFast(false))\n\tif err == nil {\n\t\tfor _, s := range r.Services {\n\t\t\tret.Services = append(ret.Services, s)\n\t\t}\n\t}\n\n\treturn ret\n}\n\n\/\/ DoRegister Registers this server\nfunc (s Server) DoRegister(server *grpc.Server) {\n\tpb.RegisterGoBuildMasterServer(server, &s)\n}\n\n\/\/ ReportHealth determines if the server is healthy\nfunc (s Server) ReportHealth() bool {\n\treturn true\n}\n\n\/\/ Mote promotes\/demotes this server\nfunc (s Server) Mote(master bool) error {\n\treturn nil\n}\n\n\/\/GetState gets the state of the server\nfunc (s Server) GetState() []*pbg.State {\n\treturn []*pbg.State{&pbg.State{Key: \"last_intent\", TimeValue: s.LastIntent.Unix()},\n\t\t&pbg.State{Key: \"last_master\", TimeValue: s.LastMaster.Unix()},\n\t\t&pbg.State{Key: \"world\", Text: fmt.Sprintf(\"%v\", s.world)}}\n}\n\n\/\/Compare compares current state to desired state\nfunc (s Server) Compare(ctx context.Context, in *pb.Empty) (*pb.CompareResponse, error) {\n\tresp := &pb.CompareResponse{}\n\tlist, _ := getFleetStatus(&mainChecker{logger: s.Log})\n\tcc := &pb.Config{}\n\tfor _, jlist := range list {\n\t\tfor _, job := range jlist.GetDetails() {\n\t\t\tcc.Intents = append(cc.Intents, &pb.Intent{Spec: job.GetSpec()})\n\t\t}\n\t}\n\tresp.Current = cc\n\tresp.Desired = s.config\n\n\treturn resp, nil\n}\n\nfunc getConfig(c checker) *pb.Config {\n\tlist, _ := getFleetStatus(c)\n\tconfig := &pb.Config{}\n\n\tfor _, jlist := range list {\n\t\tfor _, job := range jlist.Details {\n\t\t\tfound := false\n\t\t\tfor _, ij := range config.Intents {\n\t\t\t\tif job.Spec.Name == ij.Spec.Name {\n\t\t\t\t\tij.Count++\n\t\t\t\t\tfound = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !found {\n\t\t\t\tconfig.Intents = append(config.Intents, &pb.Intent{Spec: &pbs.JobSpec{Name: job.Spec.Name}, Count: 1})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn config\n}\n\n\/\/ MatchIntent tries to match the intent with the state of production\nfunc (s *Server) MatchIntent() {\n\tchecker := &mainChecker{logger: s.Log}\n\tfor s.serving {\n\t\ttime.Sleep(intentWait)\n\t\ts.LastIntent = time.Now()\n\n\t\tstate := getConfig(checker)\n\t\tdiff := configDiff(s.config, state)\n\t\tjoblist := runJobs(diff)\n\t\tfor _, job := range joblist {\n\t\t\trunJob(job, chooseServer(job, checker))\n\t\t}\n\t}\n}\n\n\/\/ SetMaster sets up the master settings\nfunc (s *Server) SetMaster() {\n\tchecker := &mainChecker{logger: s.Log}\n\tfor s.serving {\n\t\ttime.Sleep(intentWait)\n\t\ts.LastMaster = time.Now()\n\n\t\tfleet := checker.discover()\n\t\tmatcher := make(map[string][]*pbd.RegistryEntry)\n\t\thasMaster := make(map[string]int)\n\t\tfor _, entry := range fleet.GetServices() {\n\t\t\tif !entry.GetIgnoresMaster() {\n\t\t\t\tif _, ok := matcher[entry.GetName()]; !ok {\n\t\t\t\t\tif entry.GetMaster() {\n\t\t\t\t\t\thasMaster[entry.GetName()]++\n\t\t\t\t\t}\n\t\t\t\t\tmatcher[entry.GetName()] = []*pbd.RegistryEntry{entry}\n\t\t\t\t} else {\n\t\t\t\t\tif entry.GetMaster() {\n\t\t\t\t\t\thasMaster[entry.GetName()] = 1\n\t\t\t\t\t\tmatcher[entry.GetName()] = append(matcher[entry.GetName()], entry)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor key, entries := range matcher {\n\t\t\tif hasMaster[key] > 1 {\n\t\t\t\thasMaster[key] = 1\n\t\t\t\tseen := false\n\t\t\t\tfor _, entry := range entries {\n\t\t\t\t\tif seen && entry.GetMaster() {\n\t\t\t\t\t\tchecker.master(entry, false)\n\t\t\t\t\t} else if entry.GetMaster() {\n\t\t\t\t\t\tseen = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif hasMaster[key] == 0 {\n\t\t\t\tfor _, entry := range entries {\n\t\t\t\t\tif checker.master(entry, true) {\n\t\t\t\t\t\tentry.Master = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/Init builds up the server\nfunc Init(config *pb.Config) *Server {\n\ts := &Server{\n\t\t&goserver.GoServer{},\n\t\tconfig,\n\t\ttrue,\n\t\ttime.Now(),\n\t\ttime.Now(),\n\t\t&sync.Mutex{},\n\t\tmake(map[string]map[string]struct{}),\n\t\t&prodGetter{},\n\t}\n\treturn s\n}\n\nfunc main() {\n\tconfig, err := loadConfig(\"config.pb\")\n\tif err != nil {\n\t\tlog.Fatalf(\"Fatal loading of config: %v\", err)\n\t}\n\n\ts := Init(config)\n\n\tvar quiet = flag.Bool(\"quiet\", false, \"Show all output\")\n\tflag.Parse()\n\n\tif *quiet {\n\t\tlog.SetFlags(0)\n\t\tlog.SetOutput(ioutil.Discard)\n\t}\n\n\ts.Register = s\n\ts.PrepServer()\n\ts.GoServer.Killme = false\n\ts.RegisterServer(\"gobuildmaster\", false)\n\ts.RegisterServingTask(s.MatchIntent)\n\ts.RegisterServingTask(s.SetMaster)\n\ts.RegisterRepeatingTask(s.buildWorld, time.Minute)\n\n\terr = s.Serve()\n\tif err != nil {\n\t\tlog.Fatalf(\"Serve error: %v\", err)\n\t}\n}\n<commit_msg>Converts master loop to serving task. This closes #85<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/brotherlogic\/goserver\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\n\tpbd \"github.com\/brotherlogic\/discovery\/proto\"\n\tpb \"github.com\/brotherlogic\/gobuildmaster\/proto\"\n\tpbs \"github.com\/brotherlogic\/gobuildslave\/proto\"\n\tpbg \"github.com\/brotherlogic\/goserver\/proto\"\n\t\"github.com\/brotherlogic\/goserver\/utils\"\n)\n\nconst (\n\tintentWait = time.Second\n)\n\n\/\/ Server the main server type\ntype Server struct {\n\t*goserver.GoServer\n\tconfig *pb.Config\n\tserving bool\n\tLastIntent time.Time\n\tLastMaster time.Time\n\tworldMutex *sync.Mutex\n\tworld map[string]map[string]struct{}\n\tgetter getter\n}\n\ntype prodGetter struct{}\n\nfunc (g *prodGetter) getJobs(server *pbd.RegistryEntry) (*pbs.JobList, error) {\n\tlist := &pbs.JobList{}\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\tconn, err := grpc.Dial(server.GetIp()+\":\"+strconv.Itoa(int(server.GetPort())), grpc.WithInsecure())\n\tdefer conn.Close()\n\tif err != nil {\n\t\treturn list, err\n\t}\n\n\tslave := pbs.NewGoBuildSlaveClient(conn)\n\tr, err := slave.List(ctx, &pbs.Empty{}, grpc.FailFast(false))\n\treturn r, err\n}\n\nfunc (g *prodGetter) getSlaves() (*pbd.ServiceList, error) {\n\tret := &pbd.ServiceList{}\n\n\tconn, err := grpc.Dial(utils.RegistryIP+\":\"+strconv.Itoa(utils.RegistryPort), grpc.WithInsecure())\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tdefer conn.Close()\n\n\tregistry := pbd.NewDiscoveryServiceClient(conn)\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\tr, err := registry.ListAllServices(ctx, &pbd.Empty{}, grpc.FailFast(false))\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tfor _, s := range r.Services {\n\t\tif s.GetName() == \"gobuildslave\" {\n\t\t\tret.Services = append(ret.Services, s)\n\t\t}\n\t}\n\n\treturn ret, nil\n}\n\ntype mainChecker struct {\n\tprev []string\n\tlogger func(string)\n}\n\nfunc getIP(servertype, servername string) (string, int) {\n\tconn, _ := grpc.Dial(utils.RegistryIP+\":\"+strconv.Itoa(utils.RegistryPort), grpc.WithInsecure())\n\tdefer conn.Close()\n\n\tregistry := pbd.NewDiscoveryServiceClient(conn)\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\tr, err := registry.ListAllServices(ctx, &pbd.Empty{}, grpc.FailFast(false))\n\tif err != nil {\n\t\treturn \"\", -1\n\t}\n\tfor _, s := range r.Services {\n\t\tif s.Name == servertype && s.Identifier == servername {\n\t\t\treturn s.Ip, int(s.Port)\n\t\t}\n\t}\n\n\treturn \"\", -1\n}\n\nfunc (t *mainChecker) getprev() []string {\n\treturn t.prev\n}\nfunc (t *mainChecker) setprev(v []string) {\n\tt.prev = v\n}\n\nfunc (t *mainChecker) assess(server string) (*pbs.JobList, *pbs.Config) {\n\tlist := &pbs.JobList{}\n\tconf := &pbs.Config{}\n\n\tip, port := getIP(\"gobuildslave\", server)\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\tconn, err := grpc.Dial(ip+\":\"+strconv.Itoa(port), grpc.WithInsecure())\n\tdefer conn.Close()\n\tif err != nil {\n\t\treturn list, conf\n\t}\n\n\tslave := pbs.NewGoBuildSlaveClient(conn)\n\tr, err := slave.List(ctx, &pbs.Empty{}, grpc.FailFast(false))\n\tif err != nil {\n\t\treturn list, conf\n\t}\n\n\tr2, err := slave.GetConfig(ctx, &pbs.Empty{}, grpc.FailFast(false))\n\tif err != nil {\n\t\treturn list, conf\n\t}\n\n\treturn r, r2\n}\n\nfunc (t *mainChecker) master(entry *pbd.RegistryEntry, master bool) bool {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Minute)\n\tdefer cancel()\n\tconn, _ := grpc.Dial(entry.GetIp()+\":\"+strconv.Itoa(int(entry.GetPort())), grpc.WithInsecure())\n\tdefer conn.Close()\n\n\tserver := pbg.NewGoserverServiceClient(conn)\n\t_, err := server.Mote(ctx, &pbg.MoteRequest{Master: master}, grpc.FailFast(false))\n\tif err != nil {\n\t\tt.logger(fmt.Sprintf(\"Master REJECT(%v): %v\", entry, err))\n\t}\n\n\treturn err == nil\n}\n\nfunc runJob(job *pbs.JobSpec, server string) {\n\tif server != \"\" {\n\t\tip, port := getIP(\"gobuildslave\", server)\n\t\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\t\tdefer cancel()\n\t\tconn, _ := grpc.Dial(ip+\":\"+strconv.Itoa(port), grpc.WithInsecure())\n\t\tdefer conn.Close()\n\n\t\tslave := pbs.NewGoBuildSlaveClient(conn)\n\t\tjob.Server = server\n\t\tslave.Run(ctx, job, grpc.FailFast(false))\n\t}\n}\n\nfunc (t *mainChecker) discover() *pbd.ServiceList {\n\tret := &pbd.ServiceList{}\n\n\tconn, _ := grpc.Dial(utils.RegistryIP+\":\"+strconv.Itoa(utils.RegistryPort), grpc.WithInsecure())\n\tdefer conn.Close()\n\n\tregistry := pbd.NewDiscoveryServiceClient(conn)\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\tr, err := registry.ListAllServices(ctx, &pbd.Empty{}, grpc.FailFast(false))\n\tif err == nil {\n\t\tfor _, s := range r.Services {\n\t\t\tret.Services = append(ret.Services, s)\n\t\t}\n\t}\n\n\treturn ret\n}\n\n\/\/ DoRegister Registers this server\nfunc (s Server) DoRegister(server *grpc.Server) {\n\tpb.RegisterGoBuildMasterServer(server, &s)\n}\n\n\/\/ ReportHealth determines if the server is healthy\nfunc (s Server) ReportHealth() bool {\n\treturn true\n}\n\n\/\/ Mote promotes\/demotes this server\nfunc (s Server) Mote(master bool) error {\n\treturn nil\n}\n\n\/\/GetState gets the state of the server\nfunc (s Server) GetState() []*pbg.State {\n\treturn []*pbg.State{&pbg.State{Key: \"last_intent\", TimeValue: s.LastIntent.Unix()},\n\t\t&pbg.State{Key: \"last_master\", TimeValue: s.LastMaster.Unix()},\n\t\t&pbg.State{Key: \"world\", Text: fmt.Sprintf(\"%v\", s.world)}}\n}\n\n\/\/Compare compares current state to desired state\nfunc (s Server) Compare(ctx context.Context, in *pb.Empty) (*pb.CompareResponse, error) {\n\tresp := &pb.CompareResponse{}\n\tlist, _ := getFleetStatus(&mainChecker{logger: s.Log})\n\tcc := &pb.Config{}\n\tfor _, jlist := range list {\n\t\tfor _, job := range jlist.GetDetails() {\n\t\t\tcc.Intents = append(cc.Intents, &pb.Intent{Spec: job.GetSpec()})\n\t\t}\n\t}\n\tresp.Current = cc\n\tresp.Desired = s.config\n\n\treturn resp, nil\n}\n\nfunc getConfig(c checker) *pb.Config {\n\tlist, _ := getFleetStatus(c)\n\tconfig := &pb.Config{}\n\n\tfor _, jlist := range list {\n\t\tfor _, job := range jlist.Details {\n\t\t\tfound := false\n\t\t\tfor _, ij := range config.Intents {\n\t\t\t\tif job.Spec.Name == ij.Spec.Name {\n\t\t\t\t\tij.Count++\n\t\t\t\t\tfound = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !found {\n\t\t\t\tconfig.Intents = append(config.Intents, &pb.Intent{Spec: &pbs.JobSpec{Name: job.Spec.Name}, Count: 1})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn config\n}\n\n\/\/ MatchIntent tries to match the intent with the state of production\nfunc (s *Server) MatchIntent() {\n\tchecker := &mainChecker{logger: s.Log}\n\tfor s.serving {\n\t\ttime.Sleep(intentWait)\n\t\ts.LastIntent = time.Now()\n\n\t\tstate := getConfig(checker)\n\t\tdiff := configDiff(s.config, state)\n\t\tjoblist := runJobs(diff)\n\t\tfor _, job := range joblist {\n\t\t\trunJob(job, chooseServer(job, checker))\n\t\t}\n\t}\n}\n\n\/\/ SetMaster sets up the master settings\nfunc (s *Server) SetMaster() {\n\tt := time.Now()\n\tchecker := &mainChecker{logger: s.Log}\n\ts.LastMaster = time.Now()\n\n\tfleet := checker.discover()\n\tmatcher := make(map[string][]*pbd.RegistryEntry)\n\thasMaster := make(map[string]int)\n\tfor _, entry := range fleet.GetServices() {\n\t\tif !entry.GetIgnoresMaster() {\n\t\t\tif _, ok := matcher[entry.GetName()]; !ok {\n\t\t\t\tif entry.GetMaster() {\n\t\t\t\t\thasMaster[entry.GetName()]++\n\t\t\t\t}\n\t\t\t\tmatcher[entry.GetName()] = []*pbd.RegistryEntry{entry}\n\t\t\t} else {\n\t\t\t\tif entry.GetMaster() {\n\t\t\t\t\thasMaster[entry.GetName()] = 1\n\t\t\t\t\tmatcher[entry.GetName()] = append(matcher[entry.GetName()], entry)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor key, entries := range matcher {\n\t\tif hasMaster[key] > 1 {\n\t\t\thasMaster[key] = 1\n\t\t\tseen := false\n\t\t\tfor _, entry := range entries {\n\t\t\t\tif seen && entry.GetMaster() {\n\t\t\t\t\tchecker.master(entry, false)\n\t\t\t\t} else if entry.GetMaster() {\n\t\t\t\t\tseen = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif hasMaster[key] == 0 {\n\t\t\tfor _, entry := range entries {\n\t\t\t\tif checker.master(entry, true) {\n\t\t\t\t\tentry.Master = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\ts.LogFunction(\"SetMasterRun\", t)\n}\n\n\/\/Init builds up the server\nfunc Init(config *pb.Config) *Server {\n\ts := &Server{\n\t\t&goserver.GoServer{},\n\t\tconfig,\n\t\ttrue,\n\t\ttime.Now(),\n\t\ttime.Now(),\n\t\t&sync.Mutex{},\n\t\tmake(map[string]map[string]struct{}),\n\t\t&prodGetter{},\n\t}\n\treturn s\n}\n\nfunc main() {\n\tconfig, err := loadConfig(\"config.pb\")\n\tif err != nil {\n\t\tlog.Fatalf(\"Fatal loading of config: %v\", err)\n\t}\n\n\ts := Init(config)\n\n\tvar quiet = flag.Bool(\"quiet\", false, \"Show all output\")\n\tflag.Parse()\n\n\tif *quiet {\n\t\tlog.SetFlags(0)\n\t\tlog.SetOutput(ioutil.Discard)\n\t}\n\n\ts.Register = s\n\ts.PrepServer()\n\ts.GoServer.Killme = false\n\ts.RegisterServer(\"gobuildmaster\", false)\n\ts.RegisterServingTask(s.MatchIntent)\n\ts.RegisterRepeatingTask(s.SetMaster, time.Second)\n\ts.RegisterRepeatingTask(s.buildWorld, time.Minute)\n\n\terr = s.Serve()\n\tif err != nil {\n\t\tlog.Fatalf(\"Serve error: %v\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package device\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/lxc\/lxd\/lxd\/device\/config\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\"\n\t\"github.com\/lxc\/lxd\/shared\"\n)\n\ntype nicMACVLAN struct {\n\tdeviceCommon\n}\n\n\/\/ validateConfig checks the supplied config for correctness.\nfunc (d *nicMACVLAN) validateConfig() error {\n\tif d.instance.Type() != instance.TypeContainer {\n\t\treturn ErrUnsupportedDevType\n\t}\n\n\trequiredFields := []string{\"parent\"}\n\toptionalFields := []string{\"name\", \"mtu\", \"hwaddr\", \"vlan\", \"maas.subnet.ipv4\", \"maas.subnet.ipv6\"}\n\terr := config.ValidateDevice(nicValidationRules(requiredFields, optionalFields), d.config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ validateEnvironment checks the runtime environment for correctness.\nfunc (d *nicMACVLAN) validateEnvironment() error {\n\tif d.config[\"name\"] == \"\" {\n\t\treturn fmt.Errorf(\"Requires name property to start\")\n\t}\n\n\tif !shared.PathExists(fmt.Sprintf(\"\/sys\/class\/net\/%s\", d.config[\"parent\"])) {\n\t\treturn fmt.Errorf(\"Parent device '%s' doesn't exist\", d.config[\"parent\"])\n\t}\n\n\treturn nil\n}\n\n\/\/ Start is run when the device is added to the container.\nfunc (d *nicMACVLAN) Start() (*RunConfig, error) {\n\terr := d.validateEnvironment()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsaveData := make(map[string]string)\n\n\t\/\/ Decide which parent we should use based on VLAN setting.\n\tparentName := NetworkGetHostDevice(d.config[\"parent\"], d.config[\"vlan\"])\n\n\t\/\/ Record the temporary device name used for deletion later.\n\tsaveData[\"host_name\"] = NetworkRandomDevName(\"mac\")\n\n\t\/\/ Create VLAN parent device if needed.\n\tcreatedDev, err := NetworkCreateVlanDeviceIfNeeded(d.config[\"parent\"], parentName, d.config[\"vlan\"])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Record whether we created the parent device or not so it can be removed on stop.\n\tsaveData[\"last_state.created\"] = fmt.Sprintf(\"%t\", createdDev)\n\n\t\/\/ Create MACVLAN interface.\n\t_, err = shared.RunCommand(\"ip\", \"link\", \"add\", \"dev\", saveData[\"host_name\"], \"link\", parentName, \"type\", \"macvlan\", \"mode\", \"bridge\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Set the MAC address.\n\tif d.config[\"hwaddr\"] != \"\" {\n\t\t_, err := shared.RunCommand(\"ip\", \"link\", \"set\", \"dev\", saveData[\"host_name\"], \"address\", d.config[\"hwaddr\"])\n\t\tif err != nil {\n\t\t\tif createdDev {\n\t\t\t\tNetworkRemoveInterface(saveData[\"host_name\"])\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"Failed to set the MAC address: %s\", err)\n\t\t}\n\t}\n\n\t\/\/ Set the MTU.\n\tif d.config[\"mtu\"] != \"\" {\n\t\t_, err := shared.RunCommand(\"ip\", \"link\", \"set\", \"dev\", saveData[\"host_name\"], \"mtu\", d.config[\"mtu\"])\n\t\tif err != nil {\n\t\t\tif createdDev {\n\t\t\t\tNetworkRemoveInterface(saveData[\"host_name\"])\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"Failed to set the MTU: %s\", err)\n\t\t}\n\t}\n\n\terr = d.volatileSet(saveData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trunConf := RunConfig{}\n\trunConf.NetworkInterface = []RunConfigItem{\n\t\t{Key: \"name\", Value: d.config[\"name\"]},\n\t\t{Key: \"type\", Value: \"phys\"},\n\t\t{Key: \"flags\", Value: \"up\"},\n\t\t{Key: \"link\", Value: saveData[\"host_name\"]},\n\t}\n\n\treturn &runConf, nil\n}\n\n\/\/ Stop is run when the device is removed from the container.\nfunc (d *nicMACVLAN) Stop() error {\n\tdefer d.volatileSet(map[string]string{\n\t\t\"host_name\": \"\",\n\t\t\"last_state.hwaddr\": \"\",\n\t\t\"last_state.mtu\": \"\",\n\t\t\"last_state.created\": \"\",\n\t})\n\n\terrs := []error{}\n\tv := d.volatileGet()\n\n\t\/\/ Delete the detached device.\n\tif v[\"host_name\"] != \"\" && shared.PathExists(fmt.Sprintf(\"\/sys\/class\/net\/%s\", v[\"host_name\"])) {\n\t\terr := NetworkRemoveInterface(v[\"host_name\"])\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\n\t\/\/ This will delete the parent interface if we created it for VLAN parent.\n\tif shared.IsTrue(v[\"last_state.created\"]) {\n\t\tparentName := NetworkGetHostDevice(d.config[\"parent\"], d.config[\"vlan\"])\n\t\terr := NetworkRemoveInterface(parentName)\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\n\tif len(errs) > 0 {\n\t\treturn fmt.Errorf(\"%v\", errs)\n\t}\n\n\treturn nil\n}\n<commit_msg>device\/nic\/macvlan: Updates for post stop hooks<commit_after>package device\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/lxc\/lxd\/lxd\/device\/config\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\"\n\t\"github.com\/lxc\/lxd\/shared\"\n)\n\ntype nicMACVLAN struct {\n\tdeviceCommon\n}\n\n\/\/ validateConfig checks the supplied config for correctness.\nfunc (d *nicMACVLAN) validateConfig() error {\n\tif d.instance.Type() != instance.TypeContainer {\n\t\treturn ErrUnsupportedDevType\n\t}\n\n\trequiredFields := []string{\"parent\"}\n\toptionalFields := []string{\"name\", \"mtu\", \"hwaddr\", \"vlan\", \"maas.subnet.ipv4\", \"maas.subnet.ipv6\"}\n\terr := config.ValidateDevice(nicValidationRules(requiredFields, optionalFields), d.config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ validateEnvironment checks the runtime environment for correctness.\nfunc (d *nicMACVLAN) validateEnvironment() error {\n\tif d.config[\"name\"] == \"\" {\n\t\treturn fmt.Errorf(\"Requires name property to start\")\n\t}\n\n\tif !shared.PathExists(fmt.Sprintf(\"\/sys\/class\/net\/%s\", d.config[\"parent\"])) {\n\t\treturn fmt.Errorf(\"Parent device '%s' doesn't exist\", d.config[\"parent\"])\n\t}\n\n\treturn nil\n}\n\n\/\/ Start is run when the device is added to the container.\nfunc (d *nicMACVLAN) Start() (*RunConfig, error) {\n\terr := d.validateEnvironment()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsaveData := make(map[string]string)\n\n\t\/\/ Decide which parent we should use based on VLAN setting.\n\tparentName := NetworkGetHostDevice(d.config[\"parent\"], d.config[\"vlan\"])\n\n\t\/\/ Record the temporary device name used for deletion later.\n\tsaveData[\"host_name\"] = NetworkRandomDevName(\"mac\")\n\n\t\/\/ Create VLAN parent device if needed.\n\tcreatedDev, err := NetworkCreateVlanDeviceIfNeeded(d.config[\"parent\"], parentName, d.config[\"vlan\"])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Record whether we created the parent device or not so it can be removed on stop.\n\tsaveData[\"last_state.created\"] = fmt.Sprintf(\"%t\", createdDev)\n\n\t\/\/ Create MACVLAN interface.\n\t_, err = shared.RunCommand(\"ip\", \"link\", \"add\", \"dev\", saveData[\"host_name\"], \"link\", parentName, \"type\", \"macvlan\", \"mode\", \"bridge\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Set the MAC address.\n\tif d.config[\"hwaddr\"] != \"\" {\n\t\t_, err := shared.RunCommand(\"ip\", \"link\", \"set\", \"dev\", saveData[\"host_name\"], \"address\", d.config[\"hwaddr\"])\n\t\tif err != nil {\n\t\t\tif createdDev {\n\t\t\t\tNetworkRemoveInterface(saveData[\"host_name\"])\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"Failed to set the MAC address: %s\", err)\n\t\t}\n\t}\n\n\t\/\/ Set the MTU.\n\tif d.config[\"mtu\"] != \"\" {\n\t\t_, err := shared.RunCommand(\"ip\", \"link\", \"set\", \"dev\", saveData[\"host_name\"], \"mtu\", d.config[\"mtu\"])\n\t\tif err != nil {\n\t\t\tif createdDev {\n\t\t\t\tNetworkRemoveInterface(saveData[\"host_name\"])\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"Failed to set the MTU: %s\", err)\n\t\t}\n\t}\n\n\terr = d.volatileSet(saveData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trunConf := RunConfig{}\n\trunConf.NetworkInterface = []RunConfigItem{\n\t\t{Key: \"name\", Value: d.config[\"name\"]},\n\t\t{Key: \"type\", Value: \"phys\"},\n\t\t{Key: \"flags\", Value: \"up\"},\n\t\t{Key: \"link\", Value: saveData[\"host_name\"]},\n\t}\n\n\treturn &runConf, nil\n}\n\n\/\/ Stop is run when the device is removed from the instance.\nfunc (d *nicMACVLAN) Stop() (*RunConfig, error) {\n\tv := d.volatileGet()\n\trunConfig := RunConfig{\n\t\tPostHooks: []func() error{d.postStop},\n\t\tNetworkInterface: []RunConfigItem{\n\t\t\t{Key: \"link\", Value: v[\"host_name\"]},\n\t\t},\n\t}\n\n\treturn &runConfig, nil\n}\n\n\/\/ postStop is run after the device is removed from the instance.\nfunc (d *nicMACVLAN) postStop() error {\n\tdefer d.volatileSet(map[string]string{\n\t\t\"host_name\": \"\",\n\t\t\"last_state.hwaddr\": \"\",\n\t\t\"last_state.mtu\": \"\",\n\t\t\"last_state.created\": \"\",\n\t})\n\n\terrs := []error{}\n\tv := d.volatileGet()\n\n\t\/\/ Delete the detached device.\n\tif v[\"host_name\"] != \"\" && shared.PathExists(fmt.Sprintf(\"\/sys\/class\/net\/%s\", v[\"host_name\"])) {\n\t\terr := NetworkRemoveInterface(v[\"host_name\"])\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\n\t\/\/ This will delete the parent interface if we created it for VLAN parent.\n\tif shared.IsTrue(v[\"last_state.created\"]) {\n\t\tparentName := NetworkGetHostDevice(d.config[\"parent\"], d.config[\"vlan\"])\n\t\terr := NetworkRemoveInterface(parentName)\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\n\tif len(errs) > 0 {\n\t\treturn fmt.Errorf(\"%v\", errs)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\n\/\/ Key key for CheckBundleConfig\ntype Key string\n\n\/\/ Constants per type as defined in\n\/\/ https:\/\/login.circonus.com\/resources\/api\/calls\/check_bundle\nconst (\n\tDefaultCheckBundleMetricLimit = -1 \/\/ unlimited\n\tDefaultCheckBundleStatus = \"active\"\n\tDefaultCheckBundlePeriod = 60\n\tDefaultCheckBundleTimeout = 10\n\n\tAsyncMetrics = Key(\"async_metrics\")\n\tReverseSecretKey = Key(\"reverse:secret_key\")\n\tSecretKey = Key(\"secret\")\n\tSubmissionURL = Key(\"submission_url\")\n\n\t\/\/ \"http\"\n\tAuthMethod = Key(\"auth_method\")\n\tAuthPassword = Key(\"auth_password\")\n\tAuthUser = Key(\"auth_user\")\n\tBody = Key(\"body\")\n\tCAChain = Key(\"ca_chain\")\n\tCertFile = Key(\"certificate_file\")\n\tCiphers = Key(\"ciphers\")\n\tCode = Key(\"code\")\n\tExtract = Key(\"extract\")\n\t\/\/ HeaderPrefix is special because the actual key is dynamic and matches:\n\t\/\/ `header_(\\S+)`\n\tHeaderPrefix = Key(\"header_\")\n\tHTTPVersion = Key(\"http_version\")\n\tKeyFile = Key(\"key_file\")\n\tMethod = Key(\"method\")\n\tPayload = Key(\"payload\")\n\tReadLimit = Key(\"read_limit\")\n\tRedirects = Key(\"redirects\")\n\tURL = Key(\"url\")\n)\n<commit_msg>upd: commeting<commit_after>package config\n\n\/\/ Key for CheckBundleConfig options\ntype Key string\n\n\/\/ Constants per type as defined in\n\/\/ https:\/\/login.circonus.com\/resources\/api\/calls\/check_bundle\nconst (\n\t\/\/\n\t\/\/ default settings for api.NewCheckBundle()\n\t\/\/\n\tDefaultCheckBundleMetricLimit = -1 \/\/ unlimited\n\tDefaultCheckBundleStatus = \"active\"\n\tDefaultCheckBundlePeriod = 60\n\tDefaultCheckBundleTimeout = 10\n\n\t\/\/\n\t\/\/ common (apply to more than one check type)\n\t\/\/\n\tAsyncMetrics = Key(\"async_metrics\")\n\n\t\/\/\n\t\/\/ httptrap\n\t\/\/\n\tSecretKey = Key(\"secret\")\n\n\t\/\/\n\t\/\/ \"http\"\n\t\/\/\n\tAuthMethod = Key(\"auth_method\")\n\tAuthPassword = Key(\"auth_password\")\n\tAuthUser = Key(\"auth_user\")\n\tBody = Key(\"body\")\n\tCAChain = Key(\"ca_chain\")\n\tCertFile = Key(\"certificate_file\")\n\tCiphers = Key(\"ciphers\")\n\tCode = Key(\"code\")\n\tExtract = Key(\"extract\")\n\t\/\/ HeaderPrefix is special because the actual key is dynamic and matches:\n\t\/\/ `header_(\\S+)`\n\tHeaderPrefix = Key(\"header_\")\n\tHTTPVersion = Key(\"http_version\")\n\tKeyFile = Key(\"key_file\")\n\tMethod = Key(\"method\")\n\tPayload = Key(\"payload\")\n\tReadLimit = Key(\"read_limit\")\n\tRedirects = Key(\"redirects\")\n\tURL = Key(\"url\")\n\n\t\/\/\n\t\/\/ reserved - config option(s) can't actually be set - here for r\/o access\n\t\/\/\n\tReverseSecretKey = Key(\"reverse:secret_key\")\n\tSubmissionURL = Key(\"submission_url\")\n)\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Circonus, Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Contact Group API support - Fetch, Create, Update, Delete, and Search\n\/\/ See: https:\/\/login.circonus.com\/resources\/api\/calls\/contact_group\n\npackage api\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"regexp\"\n\n\t\"github.com\/circonus-labs\/circonus-gometrics\/api\/config\"\n)\n\n\/\/ ContactGroupAlertFormats define alert formats\ntype ContactGroupAlertFormats struct {\n\tLongMessage *string `json:\"long_message\"`\n\tLongSubject *string `json:\"long_subject\"`\n\tLongSummary *string `json:\"long_summary\"`\n\tShortMessage *string `json:\"short_message\"`\n\tShortSummary *string `json:\"short_summary\"`\n}\n\n\/\/ ContactGroupContactsExternal external contacts\ntype ContactGroupContactsExternal struct {\n\tInfo string `json:\"contact_info\"`\n\tMethod string `json:\"method\"`\n}\n\n\/\/ ContactGroupContactsUser user contacts\ntype ContactGroupContactsUser struct {\n\tInfo string `json:\"_contact_info,omitempty\"`\n\tMethod string `json:\"method\"`\n\tUserCID string `json:\"user\"`\n}\n\n\/\/ ContactGroupContacts list of contacts\ntype ContactGroupContacts struct {\n\tExternal []ContactGroupContactsExternal `json:\"external\"`\n\tUsers []ContactGroupContactsUser `json:\"users\"`\n}\n\n\/\/ ContactGroupEscalation defines escalations for severity levels\ntype ContactGroupEscalation struct {\n\tAfter uint `json:\"after\"`\n\tContactGroupCID string `json:\"contact_group\"`\n}\n\n\/\/ ContactGroup defines a contact group. See https:\/\/login.circonus.com\/resources\/api\/calls\/contact_group for more information.\ntype ContactGroup struct {\n\tCID string `json:\"_cid,omitempty\"`\n\tLastModified uint `json:\"_last_modified,omitempty\"`\n\tLastModifiedBy string `json:\"_last_modified_by,omitempty\"`\n\tAggregationWindow uint `json:\"aggregation_window,omitempty\"`\n\tAlertFormats ContactGroupAlertFormats `json:\"alert_formats,omitempty\"`\n\tContacts ContactGroupContacts `json:\"contacts,omitempty\"`\n\tEscalations []*ContactGroupEscalation `json:\"escalations,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tReminders []uint `json:\"reminders,omitempty\"`\n\tTags []string `json:\"tags,omitempty\"`\n}\n\n\/\/ NewContactGroup returns a ContactGroup (with defaults, if applicable)\nfunc NewContactGroup() *ContactGroup {\n\treturn &ContactGroup{\n\t\tEscalations: make([]*ContactGroupEscalation, config.NumSeverityLevels),\n\t\tReminders: make([]uint, config.NumSeverityLevels),\n\t\tContacts: ContactGroupContacts{\n\t\t\tExternal: []ContactGroupContactsExternal{},\n\t\t\tUsers: []ContactGroupContactsUser{},\n\t\t},\n\t}\n}\n\n\/\/ FetchContactGroup retrieves contact group with passed cid.\nfunc (a *API) FetchContactGroup(cid CIDType) (*ContactGroup, error) {\n\tif cid == nil || *cid == \"\" {\n\t\treturn nil, fmt.Errorf(\"Invalid contact group CID [none]\")\n\t}\n\n\tgroupCID := string(*cid)\n\n\tmatched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !matched {\n\t\treturn nil, fmt.Errorf(\"Invalid contact group CID [%s]\", groupCID)\n\t}\n\n\tresult, err := a.Get(groupCID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif a.Debug {\n\t\ta.Log.Printf(\"[DEBUG] fetch contact group, received JSON: %s\", string(result))\n\t}\n\n\tgroup := new(ContactGroup)\n\tif err := json.Unmarshal(result, group); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn group, nil\n}\n\n\/\/ FetchContactGroups retrieves all contact groups available to the API Token.\nfunc (a *API) FetchContactGroups() (*[]ContactGroup, error) {\n\tresult, err := a.Get(config.ContactGroupPrefix)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar groups []ContactGroup\n\tif err := json.Unmarshal(result, &groups); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &groups, nil\n}\n\n\/\/ UpdateContactGroup updates passed contact group.\nfunc (a *API) UpdateContactGroup(cfg *ContactGroup) (*ContactGroup, error) {\n\tif cfg == nil {\n\t\treturn nil, fmt.Errorf(\"Invalid contact group config [nil]\")\n\t}\n\n\tgroupCID := string(cfg.CID)\n\n\tmatched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !matched {\n\t\treturn nil, fmt.Errorf(\"Invalid contact group CID [%s]\", groupCID)\n\t}\n\n\tjsonCfg, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif a.Debug {\n\t\ta.Log.Printf(\"[DEBUG] update contact group, sending JSON: %s\", string(jsonCfg))\n\t}\n\n\tresult, err := a.Put(groupCID, jsonCfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgroup := &ContactGroup{}\n\tif err := json.Unmarshal(result, group); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn group, nil\n}\n\n\/\/ CreateContactGroup creates a new contact group.\nfunc (a *API) CreateContactGroup(cfg *ContactGroup) (*ContactGroup, error) {\n\tif cfg == nil {\n\t\treturn nil, fmt.Errorf(\"Invalid contact group config [nil]\")\n\t}\n\n\tjsonCfg, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif a.Debug {\n\t\ta.Log.Printf(\"[DEBUG] create contact group, sending JSON: %s\", string(jsonCfg))\n\t}\n\n\tresult, err := a.Post(config.ContactGroupPrefix, jsonCfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgroup := &ContactGroup{}\n\tif err := json.Unmarshal(result, group); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn group, nil\n}\n\n\/\/ DeleteContactGroup deletes passed contact group.\nfunc (a *API) DeleteContactGroup(cfg *ContactGroup) (bool, error) {\n\tif cfg == nil {\n\t\treturn false, fmt.Errorf(\"Invalid contact group config [nil]\")\n\t}\n\treturn a.DeleteContactGroupByCID(CIDType(&cfg.CID))\n}\n\n\/\/ DeleteContactGroupByCID deletes contact group with passed cid.\nfunc (a *API) DeleteContactGroupByCID(cid CIDType) (bool, error) {\n\tif cid == nil || *cid == \"\" {\n\t\treturn false, fmt.Errorf(\"Invalid contact group CID [none]\")\n\t}\n\n\tgroupCID := string(*cid)\n\n\tmatched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !matched {\n\t\treturn false, fmt.Errorf(\"Invalid contact group CID [%s]\", groupCID)\n\t}\n\n\t_, err = a.Delete(groupCID)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\n\/\/ SearchContactGroups returns contact groups matching the specified\n\/\/ search query and\/or filter. If nil is passed for both parameters\n\/\/ all contact groups will be returned.\nfunc (a *API) SearchContactGroups(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]ContactGroup, error) {\n\tq := url.Values{}\n\n\tif searchCriteria != nil && *searchCriteria != \"\" {\n\t\tq.Set(\"search\", string(*searchCriteria))\n\t}\n\n\tif filterCriteria != nil && len(*filterCriteria) > 0 {\n\t\tfor filter, criteria := range *filterCriteria {\n\t\t\tfor _, val := range criteria {\n\t\t\t\tq.Add(filter, val)\n\t\t\t}\n\t\t}\n\t}\n\n\tif q.Encode() == \"\" {\n\t\treturn a.FetchContactGroups()\n\t}\n\n\treqURL := url.URL{\n\t\tPath: config.ContactGroupPrefix,\n\t\tRawQuery: q.Encode(),\n\t}\n\n\tresult, err := a.Get(reqURL.String())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"[ERROR] API call error %+v\", err)\n\t}\n\n\tvar groups []ContactGroup\n\tif err := json.Unmarshal(result, &groups); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &groups, nil\n}\n<commit_msg>upd: document\/update struct member types to reflect what is received from api<commit_after>\/\/ Copyright 2016 Circonus, Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Contact Group API support - Fetch, Create, Update, Delete, and Search\n\/\/ See: https:\/\/login.circonus.com\/resources\/api\/calls\/contact_group\n\npackage api\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"regexp\"\n\n\t\"github.com\/circonus-labs\/circonus-gometrics\/api\/config\"\n)\n\n\/\/ ContactGroupAlertFormats define alert formats\ntype ContactGroupAlertFormats struct {\n\tLongMessage *string `json:\"long_message\"` \/\/ string or null\n\tLongSubject *string `json:\"long_subject\"` \/\/ string or null\n\tLongSummary *string `json:\"long_summary\"` \/\/ string or null\n\tShortMessage *string `json:\"short_message\"` \/\/ string or null\n\tShortSummary *string `json:\"short_summary\"` \/\/ string or null\n}\n\n\/\/ ContactGroupContactsExternal external contacts\ntype ContactGroupContactsExternal struct {\n\tInfo string `json:\"contact_info\"` \/\/ string\n\tMethod string `json:\"method\"` \/\/ string\n}\n\n\/\/ ContactGroupContactsUser user contacts\ntype ContactGroupContactsUser struct {\n\tInfo string `json:\"_contact_info,omitempty\"` \/\/ string\n\tMethod string `json:\"method\"` \/\/ string\n\tUserCID string `json:\"user\"` \/\/ string\n}\n\n\/\/ ContactGroupContacts list of contacts\ntype ContactGroupContacts struct {\n\tExternal []ContactGroupContactsExternal `json:\"external\"` \/\/ [] len >= 0\n\tUsers []ContactGroupContactsUser `json:\"users\"` \/\/ [] len >= 0\n}\n\n\/\/ ContactGroupEscalation defines escalations for severity levels\ntype ContactGroupEscalation struct {\n\tAfter uint `json:\"after\"` \/\/ uint\n\tContactGroupCID string `json:\"contact_group\"` \/\/ string\n}\n\n\/\/ ContactGroup defines a contact group. See https:\/\/login.circonus.com\/resources\/api\/calls\/contact_group for more information.\ntype ContactGroup struct {\n\tCID string `json:\"_cid,omitempty\"` \/\/ string\n\tLastModified uint `json:\"_last_modified,omitempty\"` \/\/ uint\n\tLastModifiedBy string `json:\"_last_modified_by,omitempty\"` \/\/ string\n\tAggregationWindow uint `json:\"aggregation_window,omitempty\"` \/\/ uint\n\tAlertFormats ContactGroupAlertFormats `json:\"alert_formats,omitempty\"` \/\/ ContactGroupAlertFormats\n\tContacts ContactGroupContacts `json:\"contacts,omitempty\"` \/\/ ContactGroupContacts\n\tEscalations []*ContactGroupEscalation `json:\"escalations,omitempty\"` \/\/ [] len == 5, elements: ContactGroupEscalation or null\n\tName string `json:\"name,omitempty\"` \/\/ string\n\tReminders []uint `json:\"reminders,omitempty\"` \/\/ [] len == 5\n\tTags []string `json:\"tags,omitempty\"` \/\/ [] len >= 0\n}\n\n\/\/ NewContactGroup returns a ContactGroup (with defaults, if applicable)\nfunc NewContactGroup() *ContactGroup {\n\treturn &ContactGroup{\n\t\tEscalations: make([]*ContactGroupEscalation, config.NumSeverityLevels),\n\t\tReminders: make([]uint, config.NumSeverityLevels),\n\t\tContacts: ContactGroupContacts{\n\t\t\tExternal: []ContactGroupContactsExternal{},\n\t\t\tUsers: []ContactGroupContactsUser{},\n\t\t},\n\t}\n}\n\n\/\/ FetchContactGroup retrieves contact group with passed cid.\nfunc (a *API) FetchContactGroup(cid CIDType) (*ContactGroup, error) {\n\tif cid == nil || *cid == \"\" {\n\t\treturn nil, fmt.Errorf(\"Invalid contact group CID [none]\")\n\t}\n\n\tgroupCID := string(*cid)\n\n\tmatched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !matched {\n\t\treturn nil, fmt.Errorf(\"Invalid contact group CID [%s]\", groupCID)\n\t}\n\n\tresult, err := a.Get(groupCID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif a.Debug {\n\t\ta.Log.Printf(\"[DEBUG] fetch contact group, received JSON: %s\", string(result))\n\t}\n\n\tgroup := new(ContactGroup)\n\tif err := json.Unmarshal(result, group); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn group, nil\n}\n\n\/\/ FetchContactGroups retrieves all contact groups available to the API Token.\nfunc (a *API) FetchContactGroups() (*[]ContactGroup, error) {\n\tresult, err := a.Get(config.ContactGroupPrefix)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar groups []ContactGroup\n\tif err := json.Unmarshal(result, &groups); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &groups, nil\n}\n\n\/\/ UpdateContactGroup updates passed contact group.\nfunc (a *API) UpdateContactGroup(cfg *ContactGroup) (*ContactGroup, error) {\n\tif cfg == nil {\n\t\treturn nil, fmt.Errorf(\"Invalid contact group config [nil]\")\n\t}\n\n\tgroupCID := string(cfg.CID)\n\n\tmatched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !matched {\n\t\treturn nil, fmt.Errorf(\"Invalid contact group CID [%s]\", groupCID)\n\t}\n\n\tjsonCfg, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif a.Debug {\n\t\ta.Log.Printf(\"[DEBUG] update contact group, sending JSON: %s\", string(jsonCfg))\n\t}\n\n\tresult, err := a.Put(groupCID, jsonCfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgroup := &ContactGroup{}\n\tif err := json.Unmarshal(result, group); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn group, nil\n}\n\n\/\/ CreateContactGroup creates a new contact group.\nfunc (a *API) CreateContactGroup(cfg *ContactGroup) (*ContactGroup, error) {\n\tif cfg == nil {\n\t\treturn nil, fmt.Errorf(\"Invalid contact group config [nil]\")\n\t}\n\n\tjsonCfg, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif a.Debug {\n\t\ta.Log.Printf(\"[DEBUG] create contact group, sending JSON: %s\", string(jsonCfg))\n\t}\n\n\tresult, err := a.Post(config.ContactGroupPrefix, jsonCfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgroup := &ContactGroup{}\n\tif err := json.Unmarshal(result, group); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn group, nil\n}\n\n\/\/ DeleteContactGroup deletes passed contact group.\nfunc (a *API) DeleteContactGroup(cfg *ContactGroup) (bool, error) {\n\tif cfg == nil {\n\t\treturn false, fmt.Errorf(\"Invalid contact group config [nil]\")\n\t}\n\treturn a.DeleteContactGroupByCID(CIDType(&cfg.CID))\n}\n\n\/\/ DeleteContactGroupByCID deletes contact group with passed cid.\nfunc (a *API) DeleteContactGroupByCID(cid CIDType) (bool, error) {\n\tif cid == nil || *cid == \"\" {\n\t\treturn false, fmt.Errorf(\"Invalid contact group CID [none]\")\n\t}\n\n\tgroupCID := string(*cid)\n\n\tmatched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !matched {\n\t\treturn false, fmt.Errorf(\"Invalid contact group CID [%s]\", groupCID)\n\t}\n\n\t_, err = a.Delete(groupCID)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\n\/\/ SearchContactGroups returns contact groups matching the specified\n\/\/ search query and\/or filter. If nil is passed for both parameters\n\/\/ all contact groups will be returned.\nfunc (a *API) SearchContactGroups(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]ContactGroup, error) {\n\tq := url.Values{}\n\n\tif searchCriteria != nil && *searchCriteria != \"\" {\n\t\tq.Set(\"search\", string(*searchCriteria))\n\t}\n\n\tif filterCriteria != nil && len(*filterCriteria) > 0 {\n\t\tfor filter, criteria := range *filterCriteria {\n\t\t\tfor _, val := range criteria {\n\t\t\t\tq.Add(filter, val)\n\t\t\t}\n\t\t}\n\t}\n\n\tif q.Encode() == \"\" {\n\t\treturn a.FetchContactGroups()\n\t}\n\n\treqURL := url.URL{\n\t\tPath: config.ContactGroupPrefix,\n\t\tRawQuery: q.Encode(),\n\t}\n\n\tresult, err := a.Get(reqURL.String())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"[ERROR] API call error %+v\", err)\n\t}\n\n\tvar groups []ContactGroup\n\tif err := json.Unmarshal(result, &groups); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &groups, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/containernetworking\/cni\/pkg\/types\"\n)\n\n\/\/ OVNNetConf is the Go structure representing configuration for the\n\/\/ ovn-kubernetes CNI plugin\ntype OVNNetConf struct {\n\ttypes.NetConf\n\n\tConfigFilePath string `json:\"configFilePath,omitempty\"`\n}\n\n\/\/ WriteCNIConfig writes a CNI JSON config file to directory given by global config\nfunc WriteCNIConfig(configFilePath string) error {\n\tbytes, err := json.Marshal(&OVNNetConf{\n\t\tNetConf: types.NetConf{\n\t\t\tCNIVersion: \"0.3.1\",\n\t\t\tName: \"ovn-kubernetes\",\n\t\t\tType: CNI.Plugin,\n\t\t},\n\t\tConfigFilePath: configFilePath,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal CNI config JSON: %v\", err)\n\t}\n\n\t\/\/ Install the CNI config file after all initialization is done\n\t\/\/ MkdirAll() returns no error if the path already exists\n\terr = os.MkdirAll(CNI.ConfDir, os.ModeDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Always create the CNI config for consistency.\n\tconfFile := filepath.Join(CNI.ConfDir, \"10-ovn-kubernetes.conf\")\n\n\tvar f *os.File\n\tf, err = os.OpenFile(confFile, os.O_CREATE|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\t_, err = f.Write(bytes)\n\treturn err\n}\n\n\/\/ ReadCNIConfig unmarshals a CNI JSON config into an OVNNetConf structure\nfunc ReadCNIConfig(bytes []byte) (*OVNNetConf, error) {\n\tconf := &OVNNetConf{}\n\tif err := json.Unmarshal(bytes, conf); err != nil {\n\t\treturn nil, err\n\t}\n\treturn conf, nil\n}\n<commit_msg>config: Use temporary file and rename to write CNI config atomically<commit_after>package config\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/containernetworking\/cni\/pkg\/types\"\n)\n\n\/\/ OVNNetConf is the Go structure representing configuration for the\n\/\/ ovn-kubernetes CNI plugin\ntype OVNNetConf struct {\n\ttypes.NetConf\n\n\tConfigFilePath string `json:\"configFilePath,omitempty\"`\n}\n\n\/\/ WriteCNIConfig writes a CNI JSON config file to directory given by global config\nfunc WriteCNIConfig(configFilePath string) error {\n\tbytes, err := json.Marshal(&OVNNetConf{\n\t\tNetConf: types.NetConf{\n\t\t\tCNIVersion: \"0.3.1\",\n\t\t\tName: \"ovn-kubernetes\",\n\t\t\tType: CNI.Plugin,\n\t\t},\n\t\tConfigFilePath: configFilePath,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal CNI config JSON: %v\", err)\n\t}\n\n\t\/\/ Install the CNI config file after all initialization is done\n\t\/\/ MkdirAll() returns no error if the path already exists\n\terr = os.MkdirAll(CNI.ConfDir, os.ModeDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Always create the CNI config for consistency.\n\tconfFile := filepath.Join(CNI.ConfDir, \"10-ovn-kubernetes.conf\")\n\n\tvar f *os.File\n\tf, err = ioutil.TempFile(CNI.ConfDir, \"ovnkube-\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = f.Write(bytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = f.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn os.Rename(f.Name(), confFile)\n}\n\n\/\/ ReadCNIConfig unmarshals a CNI JSON config into an OVNNetConf structure\nfunc ReadCNIConfig(bytes []byte) (*OVNNetConf, error) {\n\tconf := &OVNNetConf{}\n\tif err := json.Unmarshal(bytes, conf); err != nil {\n\t\treturn nil, err\n\t}\n\treturn conf, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage mysql\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\n\tbinlogdatapb \"vitess.io\/vitess\/go\/vt\/proto\/binlogdata\"\n\t\"vitess.io\/vitess\/go\/vt\/proto\/vtrpc\"\n\t\"vitess.io\/vitess\/go\/vt\/vterrors\"\n)\n\n\/\/ binlogEvent wraps a raw packet buffer and provides methods to examine it\n\/\/ by partially implementing BinlogEvent. These methods can be composed\n\/\/ into flavor-specific event types to pull in common parsing code.\n\/\/\n\/\/ The default v4 header format is:\n\/\/ offset : size\n\/\/ +============================+\n\/\/ | timestamp 0 : 4 |\n\/\/ +----------------------------+\n\/\/ | type_code 4 : 1 |\n\/\/ +----------------------------+\n\/\/ | server_id 5 : 4 |\n\/\/ +----------------------------+\n\/\/ | event_length 9 : 4 |\n\/\/ +----------------------------+\n\/\/ | next_position 13 : 4 |\n\/\/ +----------------------------+\n\/\/ | flags 17 : 2 |\n\/\/ +----------------------------+\n\/\/ | extra_headers 19 : x-19 |\n\/\/ +============================+\n\/\/ http:\/\/dev.mysql.com\/doc\/internals\/en\/event-header-fields.html\ntype binlogEvent []byte\n\nconst (\n\tsemiSyncIndicator byte = 0xef\n\tsemiSyncAckRequested byte = 0x01\n)\n\n\/\/ isSemiSyncAckRequested examines the given buffer. If semi sync is enables, then the event is preceded by two bytes,\n\/\/ the 1st is a semi-sync indicator, the 2nd specifies whether the source expects an ACK for this event\n\/\/ see https:\/\/dev.mysql.com\/doc\/internals\/en\/semi-sync-binlog-event.html\nfunc isSemiSyncAckRequested(buf []byte) ([]byte, bool) {\n\tackRequested := false\n\tif buf[0] == semiSyncIndicator {\n\t\tackRequested = (buf[1] == semiSyncAckRequested)\n\t\treturn buf[2:], ackRequested\n\t}\n\treturn buf, false\n}\n\n\/\/ dataBytes returns the event bytes without header prefix and without checksum suffix\nfunc (ev binlogEvent) dataBytes(f BinlogFormat) []byte {\n\tdata := ev.Bytes()[f.HeaderLength:]\n\tdata = data[0 : len(data)-4]\n\treturn data\n}\n\n\/\/ IsValid implements BinlogEvent.IsValid().\nfunc (ev binlogEvent) IsValid() bool {\n\tbufLen := len(ev.Bytes())\n\n\t\/\/ The buffer must be at least 19 bytes to contain a valid header.\n\tif bufLen < 19 {\n\t\treturn false\n\t}\n\n\t\/\/ It's now safe to use methods that examine header fields.\n\t\/\/ Let's see if the event is right about its own size.\n\tevLen := ev.Length()\n\tif evLen < 19 || evLen != uint32(bufLen) {\n\t\treturn false\n\t}\n\n\t\/\/ Everything's there, so we shouldn't have any out-of-bounds issues while\n\t\/\/ reading header fields or constant-offset data fields. We should still check\n\t\/\/ bounds any time we compute an offset based on values in the buffer itself.\n\treturn true\n}\n\n\/\/ Bytes returns the underlying byte buffer.\nfunc (ev binlogEvent) Bytes() []byte {\n\treturn []byte(ev)\n}\n\n\/\/ Type returns the type_code field from the header.\nfunc (ev binlogEvent) Type() byte {\n\treturn ev.Bytes()[4]\n}\n\n\/\/ Flags returns the flags field from the header.\nfunc (ev binlogEvent) Flags() uint16 {\n\treturn binary.LittleEndian.Uint16(ev.Bytes()[17 : 17+2])\n}\n\n\/\/ Timestamp returns the timestamp field from the header.\nfunc (ev binlogEvent) Timestamp() uint32 {\n\treturn binary.LittleEndian.Uint32(ev.Bytes()[:4])\n}\n\n\/\/ ServerID returns the server_id field from the header.\nfunc (ev binlogEvent) ServerID() uint32 {\n\treturn binary.LittleEndian.Uint32(ev.Bytes()[5 : 5+4])\n}\n\n\/\/ Length returns the event_length field from the header.\nfunc (ev binlogEvent) Length() uint32 {\n\treturn binary.LittleEndian.Uint32(ev.Bytes()[9 : 9+4])\n}\n\n\/\/ nextPosition the nextPosition field from the header\nfunc (ev binlogEvent) NextPosition() uint32 {\n\treturn binary.LittleEndian.Uint32(ev.Bytes()[13 : 13+4])\n}\n\n\/\/ IsFormatDescription implements BinlogEvent.IsFormatDescription().\nfunc (ev binlogEvent) IsFormatDescription() bool {\n\treturn ev.Type() == eFormatDescriptionEvent\n}\n\n\/\/ IsQuery implements BinlogEvent.IsQuery().\nfunc (ev binlogEvent) IsQuery() bool {\n\treturn ev.Type() == eQueryEvent\n}\n\n\/\/ IsRotate implements BinlogEvent.IsRotate().\nfunc (ev binlogEvent) IsRotate() bool {\n\treturn ev.Type() == eRotateEvent\n}\n\n\/\/ IsXID implements BinlogEvent.IsXID().\nfunc (ev binlogEvent) IsXID() bool {\n\treturn ev.Type() == eXIDEvent\n}\n\n\/\/ IsStop implements BinlogEvent.IsStop().\nfunc (ev binlogEvent) IsStop() bool {\n\treturn ev.Type() == eStopEvent\n}\n\n\/\/ IsIntVar implements BinlogEvent.IsIntVar().\nfunc (ev binlogEvent) IsIntVar() bool {\n\treturn ev.Type() == eIntVarEvent\n}\n\n\/\/ IsRand implements BinlogEvent.IsRand().\nfunc (ev binlogEvent) IsRand() bool {\n\treturn ev.Type() == eRandEvent\n}\n\n\/\/ IsPreviousGTIDs implements BinlogEvent.IsPreviousGTIDs().\nfunc (ev binlogEvent) IsPreviousGTIDs() bool {\n\treturn ev.Type() == ePreviousGTIDsEvent\n}\n\n\/\/ IsTableMap implements BinlogEvent.IsTableMap().\nfunc (ev binlogEvent) IsTableMap() bool {\n\treturn ev.Type() == eTableMapEvent\n}\n\n\/\/ IsWriteRows implements BinlogEvent.IsWriteRows().\n\/\/ We do not support v0.\nfunc (ev binlogEvent) IsWriteRows() bool {\n\treturn ev.Type() == eWriteRowsEventV1 ||\n\t\tev.Type() == eWriteRowsEventV2\n}\n\n\/\/ IsUpdateRows implements BinlogEvent.IsUpdateRows().\n\/\/ We do not support v0.\nfunc (ev binlogEvent) IsUpdateRows() bool {\n\treturn ev.Type() == eUpdateRowsEventV1 ||\n\t\tev.Type() == eUpdateRowsEventV2\n}\n\n\/\/ IsDeleteRows implements BinlogEvent.IsDeleteRows().\n\/\/ We do not support v0.\nfunc (ev binlogEvent) IsDeleteRows() bool {\n\treturn ev.Type() == eDeleteRowsEventV1 ||\n\t\tev.Type() == eDeleteRowsEventV2\n}\n\n\/\/ IsPseudo is always false for a native binlogEvent.\nfunc (ev binlogEvent) IsPseudo() bool {\n\treturn false\n}\n\n\/\/ IsCompressed returns true if a compressed event is found (binlog_transaction_compression=ON)\nfunc (ev binlogEvent) IsCompressed() bool {\n\treturn ev.Type() == eCompressedEvent\n}\n\n\/\/ Format implements BinlogEvent.Format().\n\/\/\n\/\/ Expected format (L = total length of event data):\n\/\/ # bytes field\n\/\/ 2 format version\n\/\/ 50 server version string, 0-padded but not necessarily 0-terminated\n\/\/ 4 timestamp (same as timestamp header field)\n\/\/ 1 header length\n\/\/ p (one byte per packet type) event type header lengths\n\/\/ Rest was inferred from reading source code:\n\/\/ 1 checksum algorithm\n\/\/ 4 checksum\nfunc (ev binlogEvent) Format() (f BinlogFormat, err error) {\n\t\/\/ FORMAT_DESCRIPTION_EVENT has a fixed header size of 19\n\t\/\/ because we have to read it before we know the header_length.\n\tdata := ev.Bytes()[19:]\n\n\tf.FormatVersion = binary.LittleEndian.Uint16(data[:2])\n\tif f.FormatVersion != 4 {\n\t\treturn f, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, \"format version = %d, we only support version 4\", f.FormatVersion)\n\t}\n\tf.ServerVersion = string(bytes.TrimRight(data[2:2+50], \"\\x00\"))\n\tf.HeaderLength = data[2+50+4]\n\tif f.HeaderLength < 19 {\n\t\treturn f, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, \"header length = %d, should be >= 19\", f.HeaderLength)\n\t}\n\n\t\/\/ MySQL\/MariaDB 5.6.1+ always adds a 4-byte checksum to the end of a\n\t\/\/ FORMAT_DESCRIPTION_EVENT, regardless of the server setting. The byte\n\t\/\/ immediately before that checksum tells us which checksum algorithm\n\t\/\/ (if any) is used for the rest of the events.\n\tf.ChecksumAlgorithm = data[len(data)-5]\n\n\tf.HeaderSizes = data[2+50+4+1 : len(data)-5]\n\treturn f, nil\n}\n\n\/\/ Query implements BinlogEvent.Query().\n\/\/\n\/\/ Expected format (L = total length of event data):\n\/\/ # bytes field\n\/\/ 4 thread_id\n\/\/ 4 execution time\n\/\/ 1 length of db_name, not including NULL terminator (X)\n\/\/ 2 error code\n\/\/ 2 length of status vars block (Y)\n\/\/ Y status vars block\n\/\/ X+1 db_name + NULL terminator\n\/\/ L-X-1-Y SQL statement (no NULL terminator)\nfunc (ev binlogEvent) Query(f BinlogFormat) (query Query, err error) {\n\tconst varsPos = 4 + 4 + 1 + 2 + 2\n\n\tdata := ev.Bytes()[f.HeaderLength:]\n\n\t\/\/ length of database name\n\tdbLen := int(data[4+4])\n\t\/\/ length of status variables block\n\tvarsLen := int(binary.LittleEndian.Uint16(data[4+4+1+2 : 4+4+1+2+2]))\n\n\t\/\/ position of database name\n\tdbPos := varsPos + varsLen\n\t\/\/ position of SQL query\n\tsqlPos := dbPos + dbLen + 1 \/\/ +1 for NULL terminator\n\tif sqlPos > len(data) {\n\t\treturn query, vterrors.Errorf(vtrpc.Code_INTERNAL, \"SQL query position overflows buffer (%v > %v)\", sqlPos, len(data))\n\t}\n\n\t\/\/ We've checked that the buffer is big enough for sql, so everything before\n\t\/\/ it (db and vars) is in-bounds too.\n\tquery.Database = string(data[dbPos : dbPos+dbLen])\n\tquery.SQL = string(data[sqlPos:])\n\n\t\/\/ Scan the status vars for ones we care about. This requires us to know the\n\t\/\/ size of every var that comes before the ones we're interested in.\n\tvars := data[varsPos : varsPos+varsLen]\n\nvarsLoop:\n\tfor pos := 0; pos < len(vars); {\n\t\tcode := vars[pos]\n\t\tpos++\n\n\t\t\/\/ All codes are optional, but if present they must occur in numerically\n\t\t\/\/ increasing order (except for 6 which occurs in the place of 2) to allow\n\t\t\/\/ for backward compatibility.\n\t\tswitch code {\n\t\tcase QFlags2Code, QAutoIncrement:\n\t\t\tpos += 4\n\t\tcase QSQLModeCode:\n\t\t\tpos += 8\n\t\tcase QCatalog: \/\/ Used in MySQL 5.0.0 - 5.0.3\n\t\t\tif pos+1 > len(vars) {\n\t\t\t\treturn query, vterrors.Errorf(vtrpc.Code_INTERNAL, \"Q_CATALOG status var overflows buffer (%v + 1 > %v)\", pos, len(vars))\n\t\t\t}\n\t\t\tpos += 1 + int(vars[pos]) + 1\n\t\tcase QCatalogNZCode: \/\/ Used in MySQL > 5.0.3 to replace QCatalog\n\t\t\tif pos+1 > len(vars) {\n\t\t\t\treturn query, vterrors.Errorf(vtrpc.Code_INTERNAL, \"Q_CATALOG_NZ_CODE status var overflows buffer (%v + 1 > %v)\", pos, len(vars))\n\t\t\t}\n\t\t\tpos += 1 + int(vars[pos])\n\t\tcase QCharsetCode:\n\t\t\tif pos+6 > len(vars) {\n\t\t\t\treturn query, vterrors.Errorf(vtrpc.Code_INTERNAL, \"Q_CHARSET_CODE status var overflows buffer (%v + 6 > %v)\", pos, len(vars))\n\t\t\t}\n\t\t\tquery.Charset = &binlogdatapb.Charset{\n\t\t\t\tClient: int32(binary.LittleEndian.Uint16(vars[pos : pos+2])),\n\t\t\t\tConn: int32(binary.LittleEndian.Uint16(vars[pos+2 : pos+4])),\n\t\t\t\tServer: int32(binary.LittleEndian.Uint16(vars[pos+4 : pos+6])),\n\t\t\t}\n\t\t\tpos += 6\n\t\tdefault:\n\t\t\t\/\/ If we see something higher than what we're interested in, we can stop.\n\t\t\tbreak varsLoop\n\t\t}\n\t}\n\n\treturn query, nil\n}\n\n\/\/ IntVar implements BinlogEvent.IntVar().\n\/\/\n\/\/ Expected format (L = total length of event data):\n\/\/ # bytes field\n\/\/ 1 variable ID\n\/\/ 8 variable value\nfunc (ev binlogEvent) IntVar(f BinlogFormat) (byte, uint64, error) {\n\tdata := ev.Bytes()[f.HeaderLength:]\n\n\ttyp := data[0]\n\tif typ != IntVarLastInsertID && typ != IntVarInsertID {\n\t\treturn 0, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, \"invalid IntVar ID: %v\", data[0])\n\t}\n\n\tvalue := binary.LittleEndian.Uint64(data[1 : 1+8])\n\treturn typ, value, nil\n}\n\n\/\/ Rand implements BinlogEvent.Rand().\n\/\/\n\/\/ Expected format (L = total length of event data):\n\/\/ # bytes field\n\/\/ 8 seed 1\n\/\/ 8 seed 2\nfunc (ev binlogEvent) Rand(f BinlogFormat) (seed1 uint64, seed2 uint64, err error) {\n\tdata := ev.Bytes()[f.HeaderLength:]\n\tseed1 = binary.LittleEndian.Uint64(data[0:8])\n\tseed2 = binary.LittleEndian.Uint64(data[8 : 8+8])\n\treturn seed1, seed2, nil\n}\n\nfunc (ev binlogEvent) TableID(f BinlogFormat) uint64 {\n\ttyp := ev.Type()\n\tpos := f.HeaderLength\n\tif f.HeaderSize(typ) == 6 {\n\t\t\/\/ Encoded in 4 bytes.\n\t\treturn uint64(binary.LittleEndian.Uint32(ev[pos : pos+4]))\n\t}\n\n\t\/\/ Encoded in 6 bytes.\n\treturn uint64(ev[pos]) |\n\t\tuint64(ev[pos+1])<<8 |\n\t\tuint64(ev[pos+2])<<16 |\n\t\tuint64(ev[pos+3])<<24 |\n\t\tuint64(ev[pos+4])<<32 |\n\t\tuint64(ev[pos+5])<<40\n}\n\nfunc (ev binlogEvent) NextLogFile(f BinlogFormat) (string, uint64, error) {\n\tdata := ev.dataBytes(f)\n\tpos := 0\n\tlogPos, pos, _ := readUint64(data, pos)\n\tlogFile := string(data[pos:])\n\treturn logFile, logPos, nil\n}\n<commit_msg>simplify code<commit_after>\/*\nCopyright 2019 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage mysql\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\n\tbinlogdatapb \"vitess.io\/vitess\/go\/vt\/proto\/binlogdata\"\n\t\"vitess.io\/vitess\/go\/vt\/proto\/vtrpc\"\n\t\"vitess.io\/vitess\/go\/vt\/vterrors\"\n)\n\n\/\/ binlogEvent wraps a raw packet buffer and provides methods to examine it\n\/\/ by partially implementing BinlogEvent. These methods can be composed\n\/\/ into flavor-specific event types to pull in common parsing code.\n\/\/\n\/\/ The default v4 header format is:\n\/\/ offset : size\n\/\/ +============================+\n\/\/ | timestamp 0 : 4 |\n\/\/ +----------------------------+\n\/\/ | type_code 4 : 1 |\n\/\/ +----------------------------+\n\/\/ | server_id 5 : 4 |\n\/\/ +----------------------------+\n\/\/ | event_length 9 : 4 |\n\/\/ +----------------------------+\n\/\/ | next_position 13 : 4 |\n\/\/ +----------------------------+\n\/\/ | flags 17 : 2 |\n\/\/ +----------------------------+\n\/\/ | extra_headers 19 : x-19 |\n\/\/ +============================+\n\/\/ http:\/\/dev.mysql.com\/doc\/internals\/en\/event-header-fields.html\ntype binlogEvent []byte\n\nconst (\n\tsemiSyncIndicator byte = 0xef\n\tsemiSyncAckRequested byte = 0x01\n)\n\n\/\/ isSemiSyncAckRequested examines the given buffer. If semi sync is enables, then the event is preceded by two bytes,\n\/\/ the 1st is a semi-sync indicator, the 2nd specifies whether the source expects an ACK for this event\n\/\/ see https:\/\/dev.mysql.com\/doc\/internals\/en\/semi-sync-binlog-event.html\nfunc isSemiSyncAckRequested(buf []byte) ([]byte, bool) {\n\tif len(buf) < 2 {\n\t\treturn buf, false\n\t}\n\tif buf[0] == semiSyncIndicator {\n\t\tackRequested := (buf[1] == semiSyncAckRequested)\n\t\treturn buf[2:], ackRequested\n\t}\n\treturn buf, false\n}\n\n\/\/ dataBytes returns the event bytes without header prefix and without checksum suffix\nfunc (ev binlogEvent) dataBytes(f BinlogFormat) []byte {\n\tdata := ev.Bytes()[f.HeaderLength:]\n\tdata = data[0 : len(data)-4]\n\treturn data\n}\n\n\/\/ IsValid implements BinlogEvent.IsValid().\nfunc (ev binlogEvent) IsValid() bool {\n\tbufLen := len(ev.Bytes())\n\n\t\/\/ The buffer must be at least 19 bytes to contain a valid header.\n\tif bufLen < 19 {\n\t\treturn false\n\t}\n\n\t\/\/ It's now safe to use methods that examine header fields.\n\t\/\/ Let's see if the event is right about its own size.\n\tevLen := ev.Length()\n\tif evLen < 19 || evLen != uint32(bufLen) {\n\t\treturn false\n\t}\n\n\t\/\/ Everything's there, so we shouldn't have any out-of-bounds issues while\n\t\/\/ reading header fields or constant-offset data fields. We should still check\n\t\/\/ bounds any time we compute an offset based on values in the buffer itself.\n\treturn true\n}\n\n\/\/ Bytes returns the underlying byte buffer.\nfunc (ev binlogEvent) Bytes() []byte {\n\treturn []byte(ev)\n}\n\n\/\/ Type returns the type_code field from the header.\nfunc (ev binlogEvent) Type() byte {\n\treturn ev.Bytes()[4]\n}\n\n\/\/ Flags returns the flags field from the header.\nfunc (ev binlogEvent) Flags() uint16 {\n\treturn binary.LittleEndian.Uint16(ev.Bytes()[17 : 17+2])\n}\n\n\/\/ Timestamp returns the timestamp field from the header.\nfunc (ev binlogEvent) Timestamp() uint32 {\n\treturn binary.LittleEndian.Uint32(ev.Bytes()[:4])\n}\n\n\/\/ ServerID returns the server_id field from the header.\nfunc (ev binlogEvent) ServerID() uint32 {\n\treturn binary.LittleEndian.Uint32(ev.Bytes()[5 : 5+4])\n}\n\n\/\/ Length returns the event_length field from the header.\nfunc (ev binlogEvent) Length() uint32 {\n\treturn binary.LittleEndian.Uint32(ev.Bytes()[9 : 9+4])\n}\n\n\/\/ nextPosition the nextPosition field from the header\nfunc (ev binlogEvent) NextPosition() uint32 {\n\treturn binary.LittleEndian.Uint32(ev.Bytes()[13 : 13+4])\n}\n\n\/\/ IsFormatDescription implements BinlogEvent.IsFormatDescription().\nfunc (ev binlogEvent) IsFormatDescription() bool {\n\treturn ev.Type() == eFormatDescriptionEvent\n}\n\n\/\/ IsQuery implements BinlogEvent.IsQuery().\nfunc (ev binlogEvent) IsQuery() bool {\n\treturn ev.Type() == eQueryEvent\n}\n\n\/\/ IsRotate implements BinlogEvent.IsRotate().\nfunc (ev binlogEvent) IsRotate() bool {\n\treturn ev.Type() == eRotateEvent\n}\n\n\/\/ IsXID implements BinlogEvent.IsXID().\nfunc (ev binlogEvent) IsXID() bool {\n\treturn ev.Type() == eXIDEvent\n}\n\n\/\/ IsStop implements BinlogEvent.IsStop().\nfunc (ev binlogEvent) IsStop() bool {\n\treturn ev.Type() == eStopEvent\n}\n\n\/\/ IsIntVar implements BinlogEvent.IsIntVar().\nfunc (ev binlogEvent) IsIntVar() bool {\n\treturn ev.Type() == eIntVarEvent\n}\n\n\/\/ IsRand implements BinlogEvent.IsRand().\nfunc (ev binlogEvent) IsRand() bool {\n\treturn ev.Type() == eRandEvent\n}\n\n\/\/ IsPreviousGTIDs implements BinlogEvent.IsPreviousGTIDs().\nfunc (ev binlogEvent) IsPreviousGTIDs() bool {\n\treturn ev.Type() == ePreviousGTIDsEvent\n}\n\n\/\/ IsTableMap implements BinlogEvent.IsTableMap().\nfunc (ev binlogEvent) IsTableMap() bool {\n\treturn ev.Type() == eTableMapEvent\n}\n\n\/\/ IsWriteRows implements BinlogEvent.IsWriteRows().\n\/\/ We do not support v0.\nfunc (ev binlogEvent) IsWriteRows() bool {\n\treturn ev.Type() == eWriteRowsEventV1 ||\n\t\tev.Type() == eWriteRowsEventV2\n}\n\n\/\/ IsUpdateRows implements BinlogEvent.IsUpdateRows().\n\/\/ We do not support v0.\nfunc (ev binlogEvent) IsUpdateRows() bool {\n\treturn ev.Type() == eUpdateRowsEventV1 ||\n\t\tev.Type() == eUpdateRowsEventV2\n}\n\n\/\/ IsDeleteRows implements BinlogEvent.IsDeleteRows().\n\/\/ We do not support v0.\nfunc (ev binlogEvent) IsDeleteRows() bool {\n\treturn ev.Type() == eDeleteRowsEventV1 ||\n\t\tev.Type() == eDeleteRowsEventV2\n}\n\n\/\/ IsPseudo is always false for a native binlogEvent.\nfunc (ev binlogEvent) IsPseudo() bool {\n\treturn false\n}\n\n\/\/ IsCompressed returns true if a compressed event is found (binlog_transaction_compression=ON)\nfunc (ev binlogEvent) IsCompressed() bool {\n\treturn ev.Type() == eCompressedEvent\n}\n\n\/\/ Format implements BinlogEvent.Format().\n\/\/\n\/\/ Expected format (L = total length of event data):\n\/\/ # bytes field\n\/\/ 2 format version\n\/\/ 50 server version string, 0-padded but not necessarily 0-terminated\n\/\/ 4 timestamp (same as timestamp header field)\n\/\/ 1 header length\n\/\/ p (one byte per packet type) event type header lengths\n\/\/ Rest was inferred from reading source code:\n\/\/ 1 checksum algorithm\n\/\/ 4 checksum\nfunc (ev binlogEvent) Format() (f BinlogFormat, err error) {\n\t\/\/ FORMAT_DESCRIPTION_EVENT has a fixed header size of 19\n\t\/\/ because we have to read it before we know the header_length.\n\tdata := ev.Bytes()[19:]\n\n\tf.FormatVersion = binary.LittleEndian.Uint16(data[:2])\n\tif f.FormatVersion != 4 {\n\t\treturn f, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, \"format version = %d, we only support version 4\", f.FormatVersion)\n\t}\n\tf.ServerVersion = string(bytes.TrimRight(data[2:2+50], \"\\x00\"))\n\tf.HeaderLength = data[2+50+4]\n\tif f.HeaderLength < 19 {\n\t\treturn f, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, \"header length = %d, should be >= 19\", f.HeaderLength)\n\t}\n\n\t\/\/ MySQL\/MariaDB 5.6.1+ always adds a 4-byte checksum to the end of a\n\t\/\/ FORMAT_DESCRIPTION_EVENT, regardless of the server setting. The byte\n\t\/\/ immediately before that checksum tells us which checksum algorithm\n\t\/\/ (if any) is used for the rest of the events.\n\tf.ChecksumAlgorithm = data[len(data)-5]\n\n\tf.HeaderSizes = data[2+50+4+1 : len(data)-5]\n\treturn f, nil\n}\n\n\/\/ Query implements BinlogEvent.Query().\n\/\/\n\/\/ Expected format (L = total length of event data):\n\/\/ # bytes field\n\/\/ 4 thread_id\n\/\/ 4 execution time\n\/\/ 1 length of db_name, not including NULL terminator (X)\n\/\/ 2 error code\n\/\/ 2 length of status vars block (Y)\n\/\/ Y status vars block\n\/\/ X+1 db_name + NULL terminator\n\/\/ L-X-1-Y SQL statement (no NULL terminator)\nfunc (ev binlogEvent) Query(f BinlogFormat) (query Query, err error) {\n\tconst varsPos = 4 + 4 + 1 + 2 + 2\n\n\tdata := ev.Bytes()[f.HeaderLength:]\n\n\t\/\/ length of database name\n\tdbLen := int(data[4+4])\n\t\/\/ length of status variables block\n\tvarsLen := int(binary.LittleEndian.Uint16(data[4+4+1+2 : 4+4+1+2+2]))\n\n\t\/\/ position of database name\n\tdbPos := varsPos + varsLen\n\t\/\/ position of SQL query\n\tsqlPos := dbPos + dbLen + 1 \/\/ +1 for NULL terminator\n\tif sqlPos > len(data) {\n\t\treturn query, vterrors.Errorf(vtrpc.Code_INTERNAL, \"SQL query position overflows buffer (%v > %v)\", sqlPos, len(data))\n\t}\n\n\t\/\/ We've checked that the buffer is big enough for sql, so everything before\n\t\/\/ it (db and vars) is in-bounds too.\n\tquery.Database = string(data[dbPos : dbPos+dbLen])\n\tquery.SQL = string(data[sqlPos:])\n\n\t\/\/ Scan the status vars for ones we care about. This requires us to know the\n\t\/\/ size of every var that comes before the ones we're interested in.\n\tvars := data[varsPos : varsPos+varsLen]\n\nvarsLoop:\n\tfor pos := 0; pos < len(vars); {\n\t\tcode := vars[pos]\n\t\tpos++\n\n\t\t\/\/ All codes are optional, but if present they must occur in numerically\n\t\t\/\/ increasing order (except for 6 which occurs in the place of 2) to allow\n\t\t\/\/ for backward compatibility.\n\t\tswitch code {\n\t\tcase QFlags2Code, QAutoIncrement:\n\t\t\tpos += 4\n\t\tcase QSQLModeCode:\n\t\t\tpos += 8\n\t\tcase QCatalog: \/\/ Used in MySQL 5.0.0 - 5.0.3\n\t\t\tif pos+1 > len(vars) {\n\t\t\t\treturn query, vterrors.Errorf(vtrpc.Code_INTERNAL, \"Q_CATALOG status var overflows buffer (%v + 1 > %v)\", pos, len(vars))\n\t\t\t}\n\t\t\tpos += 1 + int(vars[pos]) + 1\n\t\tcase QCatalogNZCode: \/\/ Used in MySQL > 5.0.3 to replace QCatalog\n\t\t\tif pos+1 > len(vars) {\n\t\t\t\treturn query, vterrors.Errorf(vtrpc.Code_INTERNAL, \"Q_CATALOG_NZ_CODE status var overflows buffer (%v + 1 > %v)\", pos, len(vars))\n\t\t\t}\n\t\t\tpos += 1 + int(vars[pos])\n\t\tcase QCharsetCode:\n\t\t\tif pos+6 > len(vars) {\n\t\t\t\treturn query, vterrors.Errorf(vtrpc.Code_INTERNAL, \"Q_CHARSET_CODE status var overflows buffer (%v + 6 > %v)\", pos, len(vars))\n\t\t\t}\n\t\t\tquery.Charset = &binlogdatapb.Charset{\n\t\t\t\tClient: int32(binary.LittleEndian.Uint16(vars[pos : pos+2])),\n\t\t\t\tConn: int32(binary.LittleEndian.Uint16(vars[pos+2 : pos+4])),\n\t\t\t\tServer: int32(binary.LittleEndian.Uint16(vars[pos+4 : pos+6])),\n\t\t\t}\n\t\t\tpos += 6\n\t\tdefault:\n\t\t\t\/\/ If we see something higher than what we're interested in, we can stop.\n\t\t\tbreak varsLoop\n\t\t}\n\t}\n\n\treturn query, nil\n}\n\n\/\/ IntVar implements BinlogEvent.IntVar().\n\/\/\n\/\/ Expected format (L = total length of event data):\n\/\/ # bytes field\n\/\/ 1 variable ID\n\/\/ 8 variable value\nfunc (ev binlogEvent) IntVar(f BinlogFormat) (byte, uint64, error) {\n\tdata := ev.Bytes()[f.HeaderLength:]\n\n\ttyp := data[0]\n\tif typ != IntVarLastInsertID && typ != IntVarInsertID {\n\t\treturn 0, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, \"invalid IntVar ID: %v\", data[0])\n\t}\n\n\tvalue := binary.LittleEndian.Uint64(data[1 : 1+8])\n\treturn typ, value, nil\n}\n\n\/\/ Rand implements BinlogEvent.Rand().\n\/\/\n\/\/ Expected format (L = total length of event data):\n\/\/ # bytes field\n\/\/ 8 seed 1\n\/\/ 8 seed 2\nfunc (ev binlogEvent) Rand(f BinlogFormat) (seed1 uint64, seed2 uint64, err error) {\n\tdata := ev.Bytes()[f.HeaderLength:]\n\tseed1 = binary.LittleEndian.Uint64(data[0:8])\n\tseed2 = binary.LittleEndian.Uint64(data[8 : 8+8])\n\treturn seed1, seed2, nil\n}\n\nfunc (ev binlogEvent) TableID(f BinlogFormat) uint64 {\n\ttyp := ev.Type()\n\tpos := f.HeaderLength\n\tif f.HeaderSize(typ) == 6 {\n\t\t\/\/ Encoded in 4 bytes.\n\t\treturn uint64(binary.LittleEndian.Uint32(ev[pos : pos+4]))\n\t}\n\n\t\/\/ Encoded in 6 bytes.\n\treturn uint64(ev[pos]) |\n\t\tuint64(ev[pos+1])<<8 |\n\t\tuint64(ev[pos+2])<<16 |\n\t\tuint64(ev[pos+3])<<24 |\n\t\tuint64(ev[pos+4])<<32 |\n\t\tuint64(ev[pos+5])<<40\n}\n\nfunc (ev binlogEvent) NextLogFile(f BinlogFormat) (string, uint64, error) {\n\tdata := ev.dataBytes(f)\n\tpos := 0\n\tlogPos, pos, _ := readUint64(data, pos)\n\tlogFile := string(data[pos:])\n\treturn logFile, logPos, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage network\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\tv1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\tclientset \"k8s.io\/client-go\/kubernetes\"\n\tcloudprovider \"k8s.io\/cloud-provider\"\n\t\"k8s.io\/kubernetes\/pkg\/cluster\/ports\"\n\tkubeschedulerconfig \"k8s.io\/kubernetes\/pkg\/scheduler\/apis\/config\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\te2enetwork \"k8s.io\/kubernetes\/test\/e2e\/framework\/network\"\n\te2enode \"k8s.io\/kubernetes\/test\/e2e\/framework\/node\"\n\te2epod \"k8s.io\/kubernetes\/test\/e2e\/framework\/pod\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\/providers\/gce\"\n\te2eservice \"k8s.io\/kubernetes\/test\/e2e\/framework\/service\"\n\te2eskipper \"k8s.io\/kubernetes\/test\/e2e\/framework\/skipper\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/network\/common\"\n\tgcecloud \"k8s.io\/legacy-cloud-providers\/gce\"\n\n\t\"github.com\/onsi\/ginkgo\"\n)\n\nconst (\n\tfirewallTestTCPTimeout = time.Duration(1 * time.Second)\n\t\/\/ Set ports outside of 30000-32767, 80 and 8080 to avoid being whitelisted by the e2e cluster\n\tfirewallTestHTTPPort = int32(29999)\n\tfirewallTestUDPPort = int32(29998)\n)\n\nvar _ = common.SIGDescribe(\"Firewall rule\", func() {\n\tvar firewallTestName = \"firewall-test\"\n\tf := framework.NewDefaultFramework(firewallTestName)\n\n\tvar cs clientset.Interface\n\tvar cloudConfig framework.CloudConfig\n\tvar gceCloud *gcecloud.Cloud\n\n\tginkgo.BeforeEach(func() {\n\t\te2eskipper.SkipUnlessProviderIs(\"gce\")\n\n\t\tvar err error\n\t\tcs = f.ClientSet\n\t\tcloudConfig = framework.TestContext.CloudConfig\n\t\tgceCloud, err = gce.GetGCECloud()\n\t\tframework.ExpectNoError(err)\n\t})\n\n\t\/\/ This test takes around 6 minutes to run\n\tginkgo.It(\"[Slow] [Serial] should create valid firewall rules for LoadBalancer type service\", func() {\n\t\tns := f.Namespace.Name\n\t\t\/\/ This source ranges is just used to examine we have exact same things on LB firewall rules\n\t\tfirewallTestSourceRanges := []string{\"0.0.0.0\/1\", \"128.0.0.0\/1\"}\n\t\tserviceName := \"firewall-test-loadbalancer\"\n\n\t\tginkgo.By(\"Getting cluster ID\")\n\t\tclusterID, err := gce.GetClusterID(cs)\n\t\tframework.ExpectNoError(err)\n\t\tframework.Logf(\"Got cluster ID: %v\", clusterID)\n\n\t\tjig := e2eservice.NewTestJig(cs, ns, serviceName)\n\t\tnodeList, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)\n\t\tframework.ExpectNoError(err)\n\n\t\tnodesNames := []string{}\n\t\tfor _, node := range nodeList.Items {\n\t\t\tnodesNames = append(nodesNames, node.Name)\n\t\t}\n\t\tnodesSet := sets.NewString(nodesNames...)\n\n\t\tginkgo.By(\"Creating a LoadBalancer type service with ExternalTrafficPolicy=Global\")\n\t\tsvc, err := jig.CreateLoadBalancerService(e2eservice.GetServiceLoadBalancerCreationTimeout(cs), func(svc *v1.Service) {\n\t\t\tsvc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: firewallTestHTTPPort}}\n\t\t\tsvc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges\n\t\t})\n\t\tframework.ExpectNoError(err)\n\t\tdefer func() {\n\t\t\t_, err = jig.UpdateService(func(svc *v1.Service) {\n\t\t\t\tsvc.Spec.Type = v1.ServiceTypeNodePort\n\t\t\t\tsvc.Spec.LoadBalancerSourceRanges = nil\n\t\t\t})\n\t\t\tframework.ExpectNoError(err)\n\t\t\terr = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})\n\t\t\tframework.ExpectNoError(err)\n\t\t\tginkgo.By(\"Waiting for the local traffic health check firewall rule to be deleted\")\n\t\t\tlocalHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false)\n\t\t\t_, err := gce.WaitForFirewallRule(gceCloud, localHCFwName, false, e2eservice.LoadBalancerCleanupTimeout)\n\t\t\tframework.ExpectNoError(err)\n\t\t}()\n\t\tsvcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP\n\n\t\tginkgo.By(\"Checking if service's firewall rule is correct\")\n\t\tlbFw := gce.ConstructFirewallForLBService(svc, cloudConfig.NodeTag)\n\t\tfw, err := gceCloud.GetFirewall(lbFw.Name)\n\t\tframework.ExpectNoError(err)\n\t\terr = gce.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)\n\t\tframework.ExpectNoError(err)\n\n\t\tginkgo.By(\"Checking if service's nodes health check firewall rule is correct\")\n\t\tnodesHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true)\n\t\tfw, err = gceCloud.GetFirewall(nodesHCFw.Name)\n\t\tframework.ExpectNoError(err)\n\t\terr = gce.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)\n\t\tframework.ExpectNoError(err)\n\n\t\t\/\/ OnlyLocal service is needed to examine which exact nodes the requests are being forwarded to by the Load Balancer on GCE\n\t\tginkgo.By(\"Updating LoadBalancer service to ExternalTrafficPolicy=Local\")\n\t\tsvc, err = jig.UpdateService(func(svc *v1.Service) {\n\t\t\tsvc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal\n\t\t})\n\t\tframework.ExpectNoError(err)\n\n\t\tginkgo.By(\"Waiting for the nodes health check firewall rule to be deleted\")\n\t\t_, err = gce.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, e2eservice.LoadBalancerCleanupTimeout)\n\t\tframework.ExpectNoError(err)\n\n\t\tginkgo.By(\"Waiting for the correct local traffic health check firewall rule to be created\")\n\t\tlocalHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false)\n\t\tfw, err = gce.WaitForFirewallRule(gceCloud, localHCFw.Name, true, e2eservice.GetServiceLoadBalancerCreationTimeout(cs))\n\t\tframework.ExpectNoError(err)\n\t\terr = gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)\n\t\tframework.ExpectNoError(err)\n\n\t\tginkgo.By(fmt.Sprintf(\"Creating netexec pods on at most %v nodes\", e2eservice.MaxNodesForEndpointsTests))\n\t\tfor i, nodeName := range nodesNames {\n\t\t\tpodName := fmt.Sprintf(\"netexec%v\", i)\n\n\t\t\tframework.Logf(\"Creating netexec pod %q on node %v in namespace %q\", podName, nodeName, ns)\n\t\t\tpod := e2epod.NewAgnhostPod(ns, podName, nil, nil, nil,\n\t\t\t\t\"netexec\",\n\t\t\t\tfmt.Sprintf(\"--http-port=%d\", firewallTestHTTPPort),\n\t\t\t\tfmt.Sprintf(\"--udp-port=%d\", firewallTestUDPPort))\n\t\t\tpod.ObjectMeta.Labels = jig.Labels\n\t\t\tpod.Spec.NodeName = nodeName\n\t\t\tpod.Spec.HostNetwork = true\n\t\t\t_, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})\n\t\t\tframework.ExpectNoError(err)\n\t\t\tframework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, framework.PodStartTimeout))\n\t\t\tframework.Logf(\"Netexec pod %q in namespace %q running\", podName, ns)\n\n\t\t\tdefer func() {\n\t\t\t\tframework.Logf(\"Cleaning up the netexec pod: %v\", podName)\n\t\t\t\terr = cs.CoreV1().Pods(ns).Delete(context.TODO(), podName, metav1.DeleteOptions{})\n\t\t\t\tframework.ExpectNoError(err)\n\t\t\t}()\n\t\t}\n\n\t\t\/\/ Send requests from outside of the cluster because internal traffic is whitelisted\n\t\tginkgo.By(\"Accessing the external service ip from outside, all non-master nodes should be reached\")\n\t\terr = testHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(cs), nodesSet)\n\t\tframework.ExpectNoError(err)\n\n\t\t\/\/ Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster\n\t\t\/\/ by removing the tag on one vm and make sure it doesn't get any traffic. This is an imperfect\n\t\t\/\/ simulation, we really want to check that traffic doesn't reach a vm outside the GKE cluster, but\n\t\t\/\/ that's much harder to do in the current e2e framework.\n\t\tginkgo.By(fmt.Sprintf(\"Removing tags from one of the nodes: %v\", nodesNames[0]))\n\t\tnodesSet.Delete(nodesNames[0])\n\t\t\/\/ Instance could run in a different zone in multi-zone test. Figure out which zone\n\t\t\/\/ it is in before proceeding.\n\t\tzone := cloudConfig.Zone\n\t\tif zoneInLabel, ok := nodeList.Items[0].Labels[v1.LabelFailureDomainBetaZone]; ok {\n\t\t\tzone = zoneInLabel\n\t\t} else if zoneInLabel, ok := nodeList.Items[0].Labels[v1.LabelTopologyZone]; ok {\n\t\t\tzone = zoneInLabel\n\t\t}\n\t\tremovedTags := gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{})\n\t\tdefer func() {\n\t\t\tginkgo.By(\"Adding tags back to the node and wait till the traffic is recovered\")\n\t\t\tnodesSet.Insert(nodesNames[0])\n\t\t\tgce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags)\n\t\t\t\/\/ Make sure traffic is recovered before exit\n\t\t\terr = testHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(cs), nodesSet)\n\t\t\tframework.ExpectNoError(err)\n\t\t}()\n\n\t\tginkgo.By(\"Accessing serivce through the external ip and examine got no response from the node without tags\")\n\t\terr = testHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(cs), nodesSet, 15)\n\t\tframework.ExpectNoError(err)\n\t})\n\n\tginkgo.It(\"should have correct firewall rules for e2e cluster\", func() {\n\t\tginkgo.By(\"Checking if e2e firewall rules are correct\")\n\t\tfor _, expFw := range gce.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) {\n\t\t\tfw, err := gceCloud.GetFirewall(expFw.Name)\n\t\t\tframework.ExpectNoError(err)\n\t\t\terr = gce.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)\n\t\t\tframework.ExpectNoError(err)\n\t\t}\n\t})\n\n\tginkgo.It(\"control plane should not expose well-known ports\", func() {\n\t\tnodes, err := e2enode.GetReadySchedulableNodes(cs)\n\t\tframework.ExpectNoError(err)\n\n\t\tginkgo.By(\"Checking well known ports on master and nodes are not exposed externally\")\n\t\tnodeAddr := e2enode.FirstAddress(nodes, v1.NodeExternalIP)\n\t\tif nodeAddr == \"\" {\n\t\t\tframework.Failf(\"did not find any node addresses\")\n\t\t}\n\n\t\tcontrolPlaneAddresses := framework.GetControlPlaneAddresses(cs)\n\t\tfor _, instanceAddress := range controlPlaneAddresses {\n\t\t\tassertNotReachableHTTPTimeout(instanceAddress, \"\/healthz\", ports.KubeControllerManagerPort, firewallTestTCPTimeout, true)\n\t\t\tassertNotReachableHTTPTimeout(instanceAddress, \"\/healthz\", kubeschedulerconfig.DefaultKubeSchedulerPort, firewallTestTCPTimeout, true)\n\t\t}\n\t\tassertNotReachableHTTPTimeout(nodeAddr, \"\/\", ports.KubeletPort, firewallTestTCPTimeout, false)\n\t\tassertNotReachableHTTPTimeout(nodeAddr, \"\/\", ports.KubeletReadOnlyPort, firewallTestTCPTimeout, false)\n\t\tassertNotReachableHTTPTimeout(nodeAddr, \"\/\", ports.ProxyStatusPort, firewallTestTCPTimeout, false)\n\t})\n})\n\nfunc assertNotReachableHTTPTimeout(ip, path string, port int, timeout time.Duration, enableHTTPS bool) {\n\tresult := e2enetwork.PokeHTTP(ip, port, path, &e2enetwork.HTTPPokeParams{Timeout: timeout, EnableHTTPS: enableHTTPS})\n\tif result.Status == e2enetwork.HTTPError {\n\t\tframework.Failf(\"Unexpected error checking for reachability of %s:%d: %v\", ip, port, result.Error)\n\t}\n\tif result.Code != 0 {\n\t\tframework.Failf(\"Was unexpectedly able to reach %s:%d\", ip, port)\n\t}\n}\n\n\/\/ testHitNodesFromOutside checkes HTTP connectivity from outside.\nfunc testHitNodesFromOutside(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String) error {\n\treturn testHitNodesFromOutsideWithCount(externalIP, httpPort, timeout, expectedHosts, 1)\n}\n\n\/\/ testHitNodesFromOutsideWithCount checkes HTTP connectivity from outside with count.\nfunc testHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String,\n\tcountToSucceed int) error {\n\tframework.Logf(\"Waiting up to %v for satisfying expectedHosts for %v times\", timeout, countToSucceed)\n\thittedHosts := sets.NewString()\n\tcount := 0\n\tcondition := func() (bool, error) {\n\t\tresult := e2enetwork.PokeHTTP(externalIP, int(httpPort), \"\/hostname\", &e2enetwork.HTTPPokeParams{Timeout: 1 * time.Second})\n\t\tif result.Status != e2enetwork.HTTPSuccess {\n\t\t\treturn false, nil\n\t\t}\n\n\t\thittedHost := strings.TrimSpace(string(result.Body))\n\t\tif !expectedHosts.Has(hittedHost) {\n\t\t\tframework.Logf(\"Error hitting unexpected host: %v, reset counter: %v\", hittedHost, count)\n\t\t\tcount = 0\n\t\t\treturn false, nil\n\t\t}\n\t\tif !hittedHosts.Has(hittedHost) {\n\t\t\thittedHosts.Insert(hittedHost)\n\t\t\tframework.Logf(\"Missing %+v, got %+v\", expectedHosts.Difference(hittedHosts), hittedHosts)\n\t\t}\n\t\tif hittedHosts.Equal(expectedHosts) {\n\t\t\tcount++\n\t\t\tif count >= countToSucceed {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t\treturn false, nil\n\t}\n\n\tif err := wait.Poll(time.Second, timeout, condition); err != nil {\n\t\treturn fmt.Errorf(\"error waiting for expectedHosts: %v, hittedHosts: %v, count: %v, expected count: %v\",\n\t\t\texpectedHosts, hittedHosts, count, countToSucceed)\n\t}\n\treturn nil\n}\n<commit_msg>UPSTREAM: 101488: e2e\/network\/firewall: don't assume nodes are exposed externally<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage network\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\tv1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\tclientset \"k8s.io\/client-go\/kubernetes\"\n\tcloudprovider \"k8s.io\/cloud-provider\"\n\t\"k8s.io\/kubernetes\/pkg\/cluster\/ports\"\n\tkubeschedulerconfig \"k8s.io\/kubernetes\/pkg\/scheduler\/apis\/config\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\te2enetwork \"k8s.io\/kubernetes\/test\/e2e\/framework\/network\"\n\te2enode \"k8s.io\/kubernetes\/test\/e2e\/framework\/node\"\n\te2epod \"k8s.io\/kubernetes\/test\/e2e\/framework\/pod\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\/providers\/gce\"\n\te2eservice \"k8s.io\/kubernetes\/test\/e2e\/framework\/service\"\n\te2eskipper \"k8s.io\/kubernetes\/test\/e2e\/framework\/skipper\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/network\/common\"\n\tgcecloud \"k8s.io\/legacy-cloud-providers\/gce\"\n\n\t\"github.com\/onsi\/ginkgo\"\n)\n\nconst (\n\tfirewallTestTCPTimeout = time.Duration(1 * time.Second)\n\t\/\/ Set ports outside of 30000-32767, 80 and 8080 to avoid being whitelisted by the e2e cluster\n\tfirewallTestHTTPPort = int32(29999)\n\tfirewallTestUDPPort = int32(29998)\n)\n\nvar _ = common.SIGDescribe(\"Firewall rule\", func() {\n\tvar firewallTestName = \"firewall-test\"\n\tf := framework.NewDefaultFramework(firewallTestName)\n\n\tvar cs clientset.Interface\n\tvar cloudConfig framework.CloudConfig\n\tvar gceCloud *gcecloud.Cloud\n\n\tginkgo.BeforeEach(func() {\n\t\te2eskipper.SkipUnlessProviderIs(\"gce\")\n\n\t\tvar err error\n\t\tcs = f.ClientSet\n\t\tcloudConfig = framework.TestContext.CloudConfig\n\t\tgceCloud, err = gce.GetGCECloud()\n\t\tframework.ExpectNoError(err)\n\t})\n\n\t\/\/ This test takes around 6 minutes to run\n\tginkgo.It(\"[Slow] [Serial] should create valid firewall rules for LoadBalancer type service\", func() {\n\t\tns := f.Namespace.Name\n\t\t\/\/ This source ranges is just used to examine we have exact same things on LB firewall rules\n\t\tfirewallTestSourceRanges := []string{\"0.0.0.0\/1\", \"128.0.0.0\/1\"}\n\t\tserviceName := \"firewall-test-loadbalancer\"\n\n\t\tginkgo.By(\"Getting cluster ID\")\n\t\tclusterID, err := gce.GetClusterID(cs)\n\t\tframework.ExpectNoError(err)\n\t\tframework.Logf(\"Got cluster ID: %v\", clusterID)\n\n\t\tjig := e2eservice.NewTestJig(cs, ns, serviceName)\n\t\tnodeList, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)\n\t\tframework.ExpectNoError(err)\n\n\t\tnodesNames := []string{}\n\t\tfor _, node := range nodeList.Items {\n\t\t\tnodesNames = append(nodesNames, node.Name)\n\t\t}\n\t\tnodesSet := sets.NewString(nodesNames...)\n\n\t\tginkgo.By(\"Creating a LoadBalancer type service with ExternalTrafficPolicy=Global\")\n\t\tsvc, err := jig.CreateLoadBalancerService(e2eservice.GetServiceLoadBalancerCreationTimeout(cs), func(svc *v1.Service) {\n\t\t\tsvc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: firewallTestHTTPPort}}\n\t\t\tsvc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges\n\t\t})\n\t\tframework.ExpectNoError(err)\n\t\tdefer func() {\n\t\t\t_, err = jig.UpdateService(func(svc *v1.Service) {\n\t\t\t\tsvc.Spec.Type = v1.ServiceTypeNodePort\n\t\t\t\tsvc.Spec.LoadBalancerSourceRanges = nil\n\t\t\t})\n\t\t\tframework.ExpectNoError(err)\n\t\t\terr = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})\n\t\t\tframework.ExpectNoError(err)\n\t\t\tginkgo.By(\"Waiting for the local traffic health check firewall rule to be deleted\")\n\t\t\tlocalHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false)\n\t\t\t_, err := gce.WaitForFirewallRule(gceCloud, localHCFwName, false, e2eservice.LoadBalancerCleanupTimeout)\n\t\t\tframework.ExpectNoError(err)\n\t\t}()\n\t\tsvcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP\n\n\t\tginkgo.By(\"Checking if service's firewall rule is correct\")\n\t\tlbFw := gce.ConstructFirewallForLBService(svc, cloudConfig.NodeTag)\n\t\tfw, err := gceCloud.GetFirewall(lbFw.Name)\n\t\tframework.ExpectNoError(err)\n\t\terr = gce.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)\n\t\tframework.ExpectNoError(err)\n\n\t\tginkgo.By(\"Checking if service's nodes health check firewall rule is correct\")\n\t\tnodesHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true)\n\t\tfw, err = gceCloud.GetFirewall(nodesHCFw.Name)\n\t\tframework.ExpectNoError(err)\n\t\terr = gce.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)\n\t\tframework.ExpectNoError(err)\n\n\t\t\/\/ OnlyLocal service is needed to examine which exact nodes the requests are being forwarded to by the Load Balancer on GCE\n\t\tginkgo.By(\"Updating LoadBalancer service to ExternalTrafficPolicy=Local\")\n\t\tsvc, err = jig.UpdateService(func(svc *v1.Service) {\n\t\t\tsvc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal\n\t\t})\n\t\tframework.ExpectNoError(err)\n\n\t\tginkgo.By(\"Waiting for the nodes health check firewall rule to be deleted\")\n\t\t_, err = gce.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, e2eservice.LoadBalancerCleanupTimeout)\n\t\tframework.ExpectNoError(err)\n\n\t\tginkgo.By(\"Waiting for the correct local traffic health check firewall rule to be created\")\n\t\tlocalHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false)\n\t\tfw, err = gce.WaitForFirewallRule(gceCloud, localHCFw.Name, true, e2eservice.GetServiceLoadBalancerCreationTimeout(cs))\n\t\tframework.ExpectNoError(err)\n\t\terr = gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)\n\t\tframework.ExpectNoError(err)\n\n\t\tginkgo.By(fmt.Sprintf(\"Creating netexec pods on at most %v nodes\", e2eservice.MaxNodesForEndpointsTests))\n\t\tfor i, nodeName := range nodesNames {\n\t\t\tpodName := fmt.Sprintf(\"netexec%v\", i)\n\n\t\t\tframework.Logf(\"Creating netexec pod %q on node %v in namespace %q\", podName, nodeName, ns)\n\t\t\tpod := e2epod.NewAgnhostPod(ns, podName, nil, nil, nil,\n\t\t\t\t\"netexec\",\n\t\t\t\tfmt.Sprintf(\"--http-port=%d\", firewallTestHTTPPort),\n\t\t\t\tfmt.Sprintf(\"--udp-port=%d\", firewallTestUDPPort))\n\t\t\tpod.ObjectMeta.Labels = jig.Labels\n\t\t\tpod.Spec.NodeName = nodeName\n\t\t\tpod.Spec.HostNetwork = true\n\t\t\t_, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})\n\t\t\tframework.ExpectNoError(err)\n\t\t\tframework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, framework.PodStartTimeout))\n\t\t\tframework.Logf(\"Netexec pod %q in namespace %q running\", podName, ns)\n\n\t\t\tdefer func() {\n\t\t\t\tframework.Logf(\"Cleaning up the netexec pod: %v\", podName)\n\t\t\t\terr = cs.CoreV1().Pods(ns).Delete(context.TODO(), podName, metav1.DeleteOptions{})\n\t\t\t\tframework.ExpectNoError(err)\n\t\t\t}()\n\t\t}\n\n\t\t\/\/ Send requests from outside of the cluster because internal traffic is whitelisted\n\t\tginkgo.By(\"Accessing the external service ip from outside, all non-master nodes should be reached\")\n\t\terr = testHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(cs), nodesSet)\n\t\tframework.ExpectNoError(err)\n\n\t\t\/\/ Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster\n\t\t\/\/ by removing the tag on one vm and make sure it doesn't get any traffic. This is an imperfect\n\t\t\/\/ simulation, we really want to check that traffic doesn't reach a vm outside the GKE cluster, but\n\t\t\/\/ that's much harder to do in the current e2e framework.\n\t\tginkgo.By(fmt.Sprintf(\"Removing tags from one of the nodes: %v\", nodesNames[0]))\n\t\tnodesSet.Delete(nodesNames[0])\n\t\t\/\/ Instance could run in a different zone in multi-zone test. Figure out which zone\n\t\t\/\/ it is in before proceeding.\n\t\tzone := cloudConfig.Zone\n\t\tif zoneInLabel, ok := nodeList.Items[0].Labels[v1.LabelFailureDomainBetaZone]; ok {\n\t\t\tzone = zoneInLabel\n\t\t} else if zoneInLabel, ok := nodeList.Items[0].Labels[v1.LabelTopologyZone]; ok {\n\t\t\tzone = zoneInLabel\n\t\t}\n\t\tremovedTags := gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{})\n\t\tdefer func() {\n\t\t\tginkgo.By(\"Adding tags back to the node and wait till the traffic is recovered\")\n\t\t\tnodesSet.Insert(nodesNames[0])\n\t\t\tgce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags)\n\t\t\t\/\/ Make sure traffic is recovered before exit\n\t\t\terr = testHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(cs), nodesSet)\n\t\t\tframework.ExpectNoError(err)\n\t\t}()\n\n\t\tginkgo.By(\"Accessing serivce through the external ip and examine got no response from the node without tags\")\n\t\terr = testHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(cs), nodesSet, 15)\n\t\tframework.ExpectNoError(err)\n\t})\n\n\tginkgo.It(\"should have correct firewall rules for e2e cluster\", func() {\n\t\tginkgo.By(\"Checking if e2e firewall rules are correct\")\n\t\tfor _, expFw := range gce.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) {\n\t\t\tfw, err := gceCloud.GetFirewall(expFw.Name)\n\t\t\tframework.ExpectNoError(err)\n\t\t\terr = gce.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)\n\t\t\tframework.ExpectNoError(err)\n\t\t}\n\t})\n\n\tginkgo.It(\"control plane should not expose well-known ports\", func() {\n\t\tnodes, err := e2enode.GetReadySchedulableNodes(cs)\n\t\tframework.ExpectNoError(err)\n\n\t\tginkgo.By(\"Checking well known ports on master and nodes are not exposed externally\")\n\t\tnodeAddr := e2enode.FirstAddress(nodes, v1.NodeExternalIP)\n\t\tif nodeAddr != \"\" {\n\t\t\tassertNotReachableHTTPTimeout(nodeAddr, \"\/\", ports.KubeletPort, firewallTestTCPTimeout, false)\n\t\t\tassertNotReachableHTTPTimeout(nodeAddr, \"\/\", ports.KubeletReadOnlyPort, firewallTestTCPTimeout, false)\n\t\t\tassertNotReachableHTTPTimeout(nodeAddr, \"\/\", ports.ProxyStatusPort, firewallTestTCPTimeout, false)\n\t\t}\n\n\t\tcontrolPlaneAddresses := framework.GetControlPlaneAddresses(cs)\n\t\tfor _, instanceAddress := range controlPlaneAddresses {\n\t\t\tassertNotReachableHTTPTimeout(instanceAddress, \"\/healthz\", ports.KubeControllerManagerPort, firewallTestTCPTimeout, true)\n\t\t\tassertNotReachableHTTPTimeout(instanceAddress, \"\/healthz\", kubeschedulerconfig.DefaultKubeSchedulerPort, firewallTestTCPTimeout, true)\n\t\t}\n\t})\n})\n\nfunc assertNotReachableHTTPTimeout(ip, path string, port int, timeout time.Duration, enableHTTPS bool) {\n\tresult := e2enetwork.PokeHTTP(ip, port, path, &e2enetwork.HTTPPokeParams{Timeout: timeout, EnableHTTPS: enableHTTPS})\n\tif result.Status == e2enetwork.HTTPError {\n\t\tframework.Failf(\"Unexpected error checking for reachability of %s:%d: %v\", ip, port, result.Error)\n\t}\n\tif result.Code != 0 {\n\t\tframework.Failf(\"Was unexpectedly able to reach %s:%d\", ip, port)\n\t}\n}\n\n\/\/ testHitNodesFromOutside checkes HTTP connectivity from outside.\nfunc testHitNodesFromOutside(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String) error {\n\treturn testHitNodesFromOutsideWithCount(externalIP, httpPort, timeout, expectedHosts, 1)\n}\n\n\/\/ testHitNodesFromOutsideWithCount checkes HTTP connectivity from outside with count.\nfunc testHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String,\n\tcountToSucceed int) error {\n\tframework.Logf(\"Waiting up to %v for satisfying expectedHosts for %v times\", timeout, countToSucceed)\n\thittedHosts := sets.NewString()\n\tcount := 0\n\tcondition := func() (bool, error) {\n\t\tresult := e2enetwork.PokeHTTP(externalIP, int(httpPort), \"\/hostname\", &e2enetwork.HTTPPokeParams{Timeout: 1 * time.Second})\n\t\tif result.Status != e2enetwork.HTTPSuccess {\n\t\t\treturn false, nil\n\t\t}\n\n\t\thittedHost := strings.TrimSpace(string(result.Body))\n\t\tif !expectedHosts.Has(hittedHost) {\n\t\t\tframework.Logf(\"Error hitting unexpected host: %v, reset counter: %v\", hittedHost, count)\n\t\t\tcount = 0\n\t\t\treturn false, nil\n\t\t}\n\t\tif !hittedHosts.Has(hittedHost) {\n\t\t\thittedHosts.Insert(hittedHost)\n\t\t\tframework.Logf(\"Missing %+v, got %+v\", expectedHosts.Difference(hittedHosts), hittedHosts)\n\t\t}\n\t\tif hittedHosts.Equal(expectedHosts) {\n\t\t\tcount++\n\t\t\tif count >= countToSucceed {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t\treturn false, nil\n\t}\n\n\tif err := wait.Poll(time.Second, timeout, condition); err != nil {\n\t\treturn fmt.Errorf(\"error waiting for expectedHosts: %v, hittedHosts: %v, count: %v, expected count: %v\",\n\t\t\texpectedHosts, hittedHosts, count, countToSucceed)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013, Google Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage vtgate\n\nimport (\n\t\"flag\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/golang\/glog\"\n\n\t\"github.com\/youtube\/vitess\/go\/vt\/topo\"\n)\n\nvar (\n\tsrvTopoCacheTTL = flag.Duration(\"-srv_topo_cache_ttl\", 1*time.Second, \"how long to use cached entries for topology\")\n)\n\n\/\/ SrvTopoServer is a subset of topo.Server that only contains the serving\n\/\/ graph read-only calls used by clients to resolve serving addresses.\ntype SrvTopoServer interface {\n\tGetSrvKeyspaceNames(cell string) ([]string, error)\n\n\tGetSrvKeyspace(cell, keyspace string) (*topo.SrvKeyspace, error)\n\n\tGetEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error)\n}\n\n\/\/ ResilientSrvTopoServer is an implementation of SrvTopoServer based\n\/\/ on another SrvTopoServer that uses a cache for two purposes:\n\/\/ - limit the QPS to the underlying SrvTopoServer\n\/\/ - return the last known value of the data if there is an error\ntype ResilientSrvTopoServer struct {\n\tToposerv SrvTopoServer\n\n\t\/\/ mutex protects the cache\n\tmu sync.Mutex\n\tsvrKeyspaceNamesCache map[string]svrKeyspaceNamesEntry\n\tsrvKeyspaceCache map[string]svrKeyspaceEntry\n\tendPointsCache map[string]endPointsEntry\n}\n\ntype svrKeyspaceNamesEntry struct {\n\tinsertionTime time.Time\n\tvalue []string\n}\n\ntype svrKeyspaceEntry struct {\n\tinsertionTime time.Time\n\tvalue *topo.SrvKeyspace\n}\n\ntype endPointsEntry struct {\n\tinsertionTime time.Time\n\tvalue *topo.EndPoints\n}\n\n\/\/ NewResilientSrvTopoServer creates a new ResilientSrvTopoServer\n\/\/ based on the provided SrvTopoServer.\nfunc NewResilientSrvTopoServer(base SrvTopoServer) *ResilientSrvTopoServer {\n\treturn &ResilientSrvTopoServer{\n\t\tToposerv: base,\n\t\tsvrKeyspaceNamesCache: make(map[string]svrKeyspaceNamesEntry),\n\t\tsrvKeyspaceCache: make(map[string]svrKeyspaceEntry),\n\t\tendPointsCache: make(map[string]endPointsEntry),\n\t}\n}\n\nfunc (server *ResilientSrvTopoServer) GetSrvKeyspaceNames(cell string) ([]string, error) {\n\t\/\/ try the cache first\n\tkey := cell\n\tnow := time.Now()\n\tserver.mu.Lock()\n\tentry, ok := server.svrKeyspaceNamesCache[key]\n\tserver.mu.Unlock()\n\tif ok && now.Sub(entry.insertionTime) < *srvTopoCacheTTL {\n\t\treturn entry.value, nil\n\t}\n\n\t\/\/ not in cache or too old, get the real value\n\tresult, err := server.Toposerv.GetSrvKeyspaceNames(cell)\n\tif err != nil {\n\t\tif ok {\n\t\t\tlog.Warningf(\"GetSrvKeyspaceNames(%v) failed: %v (returning cached value)\", cell, err)\n\t\t\treturn entry.value, nil\n\t\t} else {\n\t\t\tlog.Warningf(\"GetSrvKeyspaceNames(%v) failed: %v (no cached value, returning error)\", cell, err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ save the last value in the cache\n\tserver.mu.Lock()\n\tserver.svrKeyspaceNamesCache[key] = svrKeyspaceNamesEntry{\n\t\tinsertionTime: now,\n\t\tvalue: result,\n\t}\n\tserver.mu.Unlock()\n\treturn result, nil\n}\n\nfunc (server *ResilientSrvTopoServer) GetSrvKeyspace(cell, keyspace string) (*topo.SrvKeyspace, error) {\n\t\/\/ try the cache first\n\tkey := cell + \":\" + keyspace\n\tnow := time.Now()\n\tserver.mu.Lock()\n\tentry, ok := server.srvKeyspaceCache[key]\n\tserver.mu.Unlock()\n\tif ok && now.Sub(entry.insertionTime) < *srvTopoCacheTTL {\n\t\treturn entry.value, nil\n\t}\n\n\t\/\/ not in cache or too old, get the real value\n\tresult, err := server.Toposerv.GetSrvKeyspace(cell, keyspace)\n\tif err != nil {\n\t\tif ok {\n\t\t\tlog.Warningf(\"GetSrvKeyspace(%v, %v) failed: %v (returning cached value)\", cell, keyspace, err)\n\t\t\treturn entry.value, nil\n\t\t} else {\n\t\t\tlog.Warningf(\"GetSrvKeyspace(%v, %v) failed: %v (no cached value, returning error)\", cell, keyspace, err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ save the last value in the cache\n\tserver.mu.Lock()\n\tserver.srvKeyspaceCache[key] = svrKeyspaceEntry{\n\t\tinsertionTime: time.Now(),\n\t\tvalue: result,\n\t}\n\tserver.mu.Unlock()\n\treturn result, nil\n}\n\nfunc (server *ResilientSrvTopoServer) GetEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) {\n\t\/\/ try the cache first\n\tkey := cell + \":\" + keyspace + \":\" + shard + \":\" + string(tabletType)\n\tnow := time.Now()\n\tserver.mu.Lock()\n\tentry, ok := server.endPointsCache[key]\n\tserver.mu.Unlock()\n\tif ok && now.Sub(entry.insertionTime) < *srvTopoCacheTTL {\n\t\treturn entry.value, nil\n\t}\n\n\t\/\/ not in cache or too old, get the real value\n\tresult, err := server.Toposerv.GetEndPoints(cell, keyspace, shard, tabletType)\n\tif err != nil {\n\t\tif ok {\n\t\t\tlog.Warningf(\"GetEndPoints(%v, %v%, v, %v) failed: %v (returning cached value)\", cell, keyspace, shard, tabletType, err)\n\t\t\treturn entry.value, nil\n\t\t} else {\n\t\t\tlog.Warningf(\"GetEndPoints(%v, %v, %v, %v) failed: %v (no cached value, returning error)\", cell, keyspace, shard, tabletType, err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ save the last value in the cache\n\tserver.mu.Lock()\n\tserver.endPointsCache[key] = endPointsEntry{\n\t\tinsertionTime: time.Now(),\n\t\tvalue: result,\n\t}\n\tserver.mu.Unlock()\n\treturn result, nil\n}\n<commit_msg>Small fix to use time.Now() all the time.<commit_after>\/\/ Copyright 2013, Google Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage vtgate\n\nimport (\n\t\"flag\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/golang\/glog\"\n\n\t\"github.com\/youtube\/vitess\/go\/vt\/topo\"\n)\n\nvar (\n\tsrvTopoCacheTTL = flag.Duration(\"-srv_topo_cache_ttl\", 1*time.Second, \"how long to use cached entries for topology\")\n)\n\n\/\/ SrvTopoServer is a subset of topo.Server that only contains the serving\n\/\/ graph read-only calls used by clients to resolve serving addresses.\ntype SrvTopoServer interface {\n\tGetSrvKeyspaceNames(cell string) ([]string, error)\n\n\tGetSrvKeyspace(cell, keyspace string) (*topo.SrvKeyspace, error)\n\n\tGetEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error)\n}\n\n\/\/ ResilientSrvTopoServer is an implementation of SrvTopoServer based\n\/\/ on another SrvTopoServer that uses a cache for two purposes:\n\/\/ - limit the QPS to the underlying SrvTopoServer\n\/\/ - return the last known value of the data if there is an error\ntype ResilientSrvTopoServer struct {\n\tToposerv SrvTopoServer\n\n\t\/\/ mutex protects the cache\n\tmu sync.Mutex\n\tsvrKeyspaceNamesCache map[string]svrKeyspaceNamesEntry\n\tsrvKeyspaceCache map[string]svrKeyspaceEntry\n\tendPointsCache map[string]endPointsEntry\n}\n\ntype svrKeyspaceNamesEntry struct {\n\tinsertionTime time.Time\n\tvalue []string\n}\n\ntype svrKeyspaceEntry struct {\n\tinsertionTime time.Time\n\tvalue *topo.SrvKeyspace\n}\n\ntype endPointsEntry struct {\n\tinsertionTime time.Time\n\tvalue *topo.EndPoints\n}\n\n\/\/ NewResilientSrvTopoServer creates a new ResilientSrvTopoServer\n\/\/ based on the provided SrvTopoServer.\nfunc NewResilientSrvTopoServer(base SrvTopoServer) *ResilientSrvTopoServer {\n\treturn &ResilientSrvTopoServer{\n\t\tToposerv: base,\n\t\tsvrKeyspaceNamesCache: make(map[string]svrKeyspaceNamesEntry),\n\t\tsrvKeyspaceCache: make(map[string]svrKeyspaceEntry),\n\t\tendPointsCache: make(map[string]endPointsEntry),\n\t}\n}\n\nfunc (server *ResilientSrvTopoServer) GetSrvKeyspaceNames(cell string) ([]string, error) {\n\t\/\/ try the cache first\n\tkey := cell\n\tserver.mu.Lock()\n\tentry, ok := server.svrKeyspaceNamesCache[key]\n\tserver.mu.Unlock()\n\tif ok && time.Now().Sub(entry.insertionTime) < *srvTopoCacheTTL {\n\t\treturn entry.value, nil\n\t}\n\n\t\/\/ not in cache or too old, get the real value\n\tresult, err := server.Toposerv.GetSrvKeyspaceNames(cell)\n\tif err != nil {\n\t\tif ok {\n\t\t\tlog.Warningf(\"GetSrvKeyspaceNames(%v) failed: %v (returning cached value)\", cell, err)\n\t\t\treturn entry.value, nil\n\t\t} else {\n\t\t\tlog.Warningf(\"GetSrvKeyspaceNames(%v) failed: %v (no cached value, returning error)\", cell, err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ save the last value in the cache\n\tserver.mu.Lock()\n\tserver.svrKeyspaceNamesCache[key] = svrKeyspaceNamesEntry{\n\t\tinsertionTime: time.Now(),\n\t\tvalue: result,\n\t}\n\tserver.mu.Unlock()\n\treturn result, nil\n}\n\nfunc (server *ResilientSrvTopoServer) GetSrvKeyspace(cell, keyspace string) (*topo.SrvKeyspace, error) {\n\t\/\/ try the cache first\n\tkey := cell + \":\" + keyspace\n\tserver.mu.Lock()\n\tentry, ok := server.srvKeyspaceCache[key]\n\tserver.mu.Unlock()\n\tif ok && time.Now().Sub(entry.insertionTime) < *srvTopoCacheTTL {\n\t\treturn entry.value, nil\n\t}\n\n\t\/\/ not in cache or too old, get the real value\n\tresult, err := server.Toposerv.GetSrvKeyspace(cell, keyspace)\n\tif err != nil {\n\t\tif ok {\n\t\t\tlog.Warningf(\"GetSrvKeyspace(%v, %v) failed: %v (returning cached value)\", cell, keyspace, err)\n\t\t\treturn entry.value, nil\n\t\t} else {\n\t\t\tlog.Warningf(\"GetSrvKeyspace(%v, %v) failed: %v (no cached value, returning error)\", cell, keyspace, err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ save the last value in the cache\n\tserver.mu.Lock()\n\tserver.srvKeyspaceCache[key] = svrKeyspaceEntry{\n\t\tinsertionTime: time.Now(),\n\t\tvalue: result,\n\t}\n\tserver.mu.Unlock()\n\treturn result, nil\n}\n\nfunc (server *ResilientSrvTopoServer) GetEndPoints(cell, keyspace, shard string, tabletType topo.TabletType) (*topo.EndPoints, error) {\n\t\/\/ try the cache first\n\tkey := cell + \":\" + keyspace + \":\" + shard + \":\" + string(tabletType)\n\tserver.mu.Lock()\n\tentry, ok := server.endPointsCache[key]\n\tserver.mu.Unlock()\n\tif ok && time.Now().Sub(entry.insertionTime) < *srvTopoCacheTTL {\n\t\treturn entry.value, nil\n\t}\n\n\t\/\/ not in cache or too old, get the real value\n\tresult, err := server.Toposerv.GetEndPoints(cell, keyspace, shard, tabletType)\n\tif err != nil {\n\t\tif ok {\n\t\t\tlog.Warningf(\"GetEndPoints(%v, %v%, v, %v) failed: %v (returning cached value)\", cell, keyspace, shard, tabletType, err)\n\t\t\treturn entry.value, nil\n\t\t} else {\n\t\t\tlog.Warningf(\"GetEndPoints(%v, %v, %v, %v) failed: %v (no cached value, returning error)\", cell, keyspace, shard, tabletType, err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ save the last value in the cache\n\tserver.mu.Lock()\n\tserver.endPointsCache[key] = endPointsEntry{\n\t\tinsertionTime: time.Now(),\n\t\tvalue: result,\n\t}\n\tserver.mu.Unlock()\n\treturn result, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ errorcheck -0 -race\n\npackage foo\n\nconst benchmarkNumNodes = 10000\n\nfunc BenchmarkUpdateNodeTransaction(b B) {\n\ts, nodeIDs := setupNodes(benchmarkNumNodes)\n\tb.ResetTimer()\n\tfor i := 0; i < b.N(); i++ {\n\t\t_ = s.Update(func(tx1 Tx) error {\n\t\t\t_ = UpdateNode(tx1, &Node{\n\t\t\t\tID: nodeIDs[i%benchmarkNumNodes],\n\t\t\t})\n\t\t\treturn nil\n\t\t})\n\t}\n}\n\ntype B interface {\n\tResetTimer()\n\tN() int\n}\n\ntype Tx interface {\n}\n\ntype Node struct {\n\tID string\n}\n\ntype MemoryStore struct {\n}\n\n\/\/ go:noinline\nfunc setupNodes(n int) (s *MemoryStore, nodeIDs []string) {\n\treturn\n}\n\n\/\/go:noinline\nfunc (s *MemoryStore) Update(cb func(Tx) error) error {\n\treturn nil\n}\n\nvar sink interface{}\n\n\/\/go:noinline\nfunc UpdateNode(tx Tx, n *Node) error {\n\tsink = tx\n\tsink = n\n\treturn nil\n}\n<commit_msg>test: add missing copyright notice<commit_after>\/\/ errorcheck -0 -race\n\n\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage foo\n\nconst benchmarkNumNodes = 10000\n\nfunc BenchmarkUpdateNodeTransaction(b B) {\n\ts, nodeIDs := setupNodes(benchmarkNumNodes)\n\tb.ResetTimer()\n\tfor i := 0; i < b.N(); i++ {\n\t\t_ = s.Update(func(tx1 Tx) error {\n\t\t\t_ = UpdateNode(tx1, &Node{\n\t\t\t\tID: nodeIDs[i%benchmarkNumNodes],\n\t\t\t})\n\t\t\treturn nil\n\t\t})\n\t}\n}\n\ntype B interface {\n\tResetTimer()\n\tN() int\n}\n\ntype Tx interface {\n}\n\ntype Node struct {\n\tID string\n}\n\ntype MemoryStore struct {\n}\n\n\/\/ go:noinline\nfunc setupNodes(n int) (s *MemoryStore, nodeIDs []string) {\n\treturn\n}\n\n\/\/go:noinline\nfunc (s *MemoryStore) Update(cb func(Tx) error) error {\n\treturn nil\n}\n\nvar sink interface{}\n\n\/\/go:noinline\nfunc UpdateNode(tx Tx, n *Node) error {\n\tsink = tx\n\tsink = n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ buildrun -t 30\n\n\/\/ +build !js\n\n\/\/ Copyright 2021 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Ensure that runtime traceback does not infinite loop for\n\/\/ the testcase below.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n)\n\nconst prog = `\n\npackage main\n\nimport \"context\"\n\nvar gpi *int\n\ntype nAO struct {\n\teE bool\n}\n\ntype NAO func(*nAO)\n\nfunc WEA() NAO {\n\treturn func(o *nAO) { o.eE = true }\n}\n\ntype R struct {\n\tcM *CM\n}\n\ntype CM int\n\ntype A string\n\nfunc (m *CM) NewA(ctx context.Context, cN string, nn *nAO, opts ...NAO) (*A, error) {\n\tfor _, o := range opts {\n\t\to(nn)\n\t}\n\ts := A(\"foo\")\n\treturn &s, nil\n}\n\nfunc (r *R) CA(ctx context.Context, cN string, nn *nAO) (*int, error) {\n\tcA, err := r.cM.NewA(ctx, cN, nn, WEA(), WEA())\n\tif err == nil {\n\t\treturn nil, err\n\t}\n\tprintln(cA)\n\tx := int(42)\n\treturn &x, nil\n}\n\nfunc main() {\n\tc := CM(1)\n\tr := R{cM: &c}\n\tvar ctx context.Context\n\tnnr := nAO{}\n\tpi, err := r.CA(ctx, \"foo\", nil)\n\tif err != nil {\n\t\tpanic(\"bad\")\n\t}\n\tprintln(nnr.eE)\n\tgpi = pi\n}\n`\n\nfunc main() {\n\tdir, err := ioutil.TempDir(\"\", \"46234\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tfile := filepath.Join(dir, \"main.go\")\n\tif err := ioutil.WriteFile(file, []byte(prog), 0655); err != nil {\n\t\tlog.Fatalf(\"Write error %v\", err)\n\t}\n\n\tcmd := exec.Command(\"go\", \"run\", file)\n\toutput, err := cmd.CombinedOutput()\n\tif err == nil {\n\t\tlog.Fatalf(\"Passed, expected an error\")\n\t}\n\n\twant := []byte(\"segmentation violation\")\n\tif !bytes.Contains(output, want) {\n\t\tlog.Fatalf(\"Unmatched error message %q:\\nin\\n%s\\nError: %v\", want, output, err)\n\t}\n}\n<commit_msg>test: check portable error message on issue46234.go<commit_after>\/\/ buildrun -t 30\n\n\/\/ +build !js\n\n\/\/ Copyright 2021 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Ensure that runtime traceback does not infinite loop for\n\/\/ the testcase below.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n)\n\nconst prog = `\n\npackage main\n\nimport \"context\"\n\nvar gpi *int\n\ntype nAO struct {\n\teE bool\n}\n\ntype NAO func(*nAO)\n\nfunc WEA() NAO {\n\treturn func(o *nAO) { o.eE = true }\n}\n\ntype R struct {\n\tcM *CM\n}\n\ntype CM int\n\ntype A string\n\nfunc (m *CM) NewA(ctx context.Context, cN string, nn *nAO, opts ...NAO) (*A, error) {\n\tfor _, o := range opts {\n\t\to(nn)\n\t}\n\ts := A(\"foo\")\n\treturn &s, nil\n}\n\nfunc (r *R) CA(ctx context.Context, cN string, nn *nAO) (*int, error) {\n\tcA, err := r.cM.NewA(ctx, cN, nn, WEA(), WEA())\n\tif err == nil {\n\t\treturn nil, err\n\t}\n\tprintln(cA)\n\tx := int(42)\n\treturn &x, nil\n}\n\nfunc main() {\n\tc := CM(1)\n\tr := R{cM: &c}\n\tvar ctx context.Context\n\tnnr := nAO{}\n\tpi, err := r.CA(ctx, \"foo\", nil)\n\tif err != nil {\n\t\tpanic(\"bad\")\n\t}\n\tprintln(nnr.eE)\n\tgpi = pi\n}\n`\n\nfunc main() {\n\tdir, err := ioutil.TempDir(\"\", \"46234\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tfile := filepath.Join(dir, \"main.go\")\n\tif err := ioutil.WriteFile(file, []byte(prog), 0655); err != nil {\n\t\tlog.Fatalf(\"Write error %v\", err)\n\t}\n\n\tcmd := exec.Command(\"go\", \"run\", file)\n\toutput, err := cmd.CombinedOutput()\n\tif err == nil {\n\t\tlog.Fatalf(\"Passed, expected an error\")\n\t}\n\n\twant := []byte(\"nil pointer dereference\")\n\tif !bytes.Contains(output, want) {\n\t\tlog.Fatalf(\"Unmatched error message %q:\\nin\\n%s\\nError: %v\", want, output, err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/jmartin82\/mmock\/console\"\n\t\"github.com\/jmartin82\/mmock\/definition\"\n\t\"github.com\/jmartin82\/mmock\/definition\/parser\"\n\t\"github.com\/jmartin82\/mmock\/match\"\n\t\"github.com\/jmartin82\/mmock\/scenario\"\n\t\"github.com\/jmartin82\/mmock\/server\"\n\t\"github.com\/jmartin82\/mmock\/statistics\"\n\t\"github.com\/jmartin82\/mmock\/translate\"\n\t\"github.com\/jmartin82\/mmock\/vars\"\n\t\"github.com\/jmartin82\/mmock\/vars\/fakedata\"\n)\n\n\/\/VERSION of the application\nconst VERSION string = \"2.1.0\"\n\n\/\/ErrNotFoundPath error from missing or configuration path\nvar ErrNotFoundPath = errors.New(\"Configuration path not found\")\n\n\/\/ErrNotFoundDefaultPath if we can't resolve the current path\nvar ErrNotFoundDefaultPath = errors.New(\"We can't determinate the current path\")\n\n\/\/ErrNotFoundAnyMock when we don't found any valid mock definition to load\nvar ErrNotFoundAnyMock = errors.New(\"No valid mock definition found\")\n\nfunc banner() {\n\tfmt.Printf(\"MMock v %s\", VERSION)\n\tfmt.Println(\"\")\n\n\tfmt.Print(\n\t\t`\t\t.---. .---.\n : : o : me want request!\n _..-: o : :-.._ \/\n .-'' ' ` + \"`\" + `---' ` + \"`\" + `---' \" ` + \"`\" + `` + \"`\" + `-.\n .' \" ' \" . \" . ' \" ` + \"`\" + `.\n : '.---.,,.,...,.,.,.,..---. ' ;\n ` + \"`\" + `. \" ` + \"`\" + `. .' \" .'\n ` + \"`\" + `. '` + \"`\" + `. .' ' .'\n ` + \"`\" + `. ` + \"`\" + `-._ _.-' \" .' .----.\n ` + \"`\" + `. \" '\"--...--\"' . ' .' .' o ` + \"`\" + `.\n .'` + \"`\" + `-._' \" . \" _.-'` + \"`\" + `. : o :\n .' ` + \"`\" + `` + \"`\" + `` + \"`\" + `--.....--''' ' ` + \"`\" + `:_ o :\n .' \" ' \" \" ; ` + \"`\" + `.;\";\";\";'\n ; ' \" ' . ; .' ; ; ;\n ; ' ' ' \" .' .-'\n ' \" \" ' \" \" _.-'\n `)\n\tfmt.Println(\"\")\n}\n\n\/\/ Get preferred outbound ip of this machine\nfunc getOutboundIP() string {\n\tconn, err := net.Dial(\"udp\", \"8.8.8.8:80\")\n\tif err != nil {\n\t\treturn \"127.0.0.1\"\n\t}\n\tdefer conn.Close()\n\n\tlog.Println(\"Getting external IP\")\n\tlocalAddr := conn.LocalAddr().String()\n\tidx := strings.LastIndex(localAddr, \":\")\n\n\treturn localAddr[0:idx]\n}\n\nfunc getMatchSpy(checker match.Checker, matchStore match.Store) match.Spier {\n\treturn match.NewSpy(checker, matchStore)\n}\n\nfunc existsConfigPath(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc getMapping(path string) definition.Mapping {\n\tpath, _ = filepath.Abs(path)\n\tif !existsConfigPath(path) {\n\t\tlog.Fatalf(ErrNotFoundPath.Error())\n\t}\n\n\tconfigMapper := definition.NewConfigMapper()\n\n\tconfigMapper.AddConfigParser(parser.JSONReader{})\n\tconfigMapper.AddConfigParser(parser.YAMLReader{})\n\n\tfsUpdate := make(chan struct{})\n\twatcher := definition.NewFileWatcher(path, fsUpdate)\n\twatcher.Bind()\n\n\treturn definition.NewConfigMapping(path, configMapper, fsUpdate)\n}\n\nfunc getRouter(mapping definition.Mapping, checker match.Checker) *server.Router {\n\trouter := server.NewRouter(mapping, checker)\n\treturn router\n}\n\nfunc getVarsProcessor() vars.Processor {\n\n\treturn vars.Processor{FillerFactory: vars.MockFillerFactory{FakeAdapter: fakedata.FakeAdapter{}}}\n}\n\nfunc startServer(ip string, port, portTLS int, configTLS string, done chan bool, router server.Resolver, mLog chan definition.Match, scenario scenario.Director, varsProcessor vars.Processor, spier match.Spier) {\n\tdispatcher := server.Dispatcher{\n\t\tIP: ip,\n\t\tPort: port,\n\t\tPortTLS: portTLS,\n\t\tConfigTLS: configTLS,\n\t\tResolver: router,\n\t\tTranslator: translate.HTTP{},\n\t\tProcessor: varsProcessor,\n\t\tScenario: scenario,\n\t\tSpier: spier,\n\t\tMlog: mLog,\n\t}\n\tdispatcher.Start()\n\tdone <- true\n}\nfunc startConsole(ip string, port int, spy match.Spier, scenario scenario.Director, mapping definition.Mapping, done chan bool, mLog chan definition.Match) {\n\tdispatcher := console.Dispatcher{\n\t\tIP: ip,\n\t\tPort: port,\n\t\tMatchSpy: spy,\n\t\tScenario: scenario,\n\t\tMapping: mapping,\n\t\tMlog: mLog}\n\tdispatcher.Start()\n\tdone <- true\n}\n\nfunc main() {\n\tbanner()\n\toutIP := getOutboundIP()\n\tpath, err := filepath.Abs(\".\/config\")\n\tTLS, err := filepath.Abs(\".\/tls\")\n\tif err != nil {\n\t\tpanic(ErrNotFoundDefaultPath)\n\t}\n\n\tsIP := flag.String(\"server-ip\", outIP, \"Mock server IP\")\n\tsPort := flag.Int(\"server-port\", 8083, \"Mock server Port\")\n\tsPortTLS := flag.Int(\"server-tls-port\", 8084, \"Mock server TLS Port\")\n\tsStatistics := flag.Bool(\"server-statistics\", true, \"Mock server sends anonymous statistics\")\n\tcIP := flag.String(\"console-ip\", outIP, \"Console server IP\")\n\tcPort := flag.Int(\"console-port\", 8082, \"Console server Port\")\n\tconsole := flag.Bool(\"console\", true, \"Console enabled (true\/false)\")\n\tcPath := flag.String(\"config-path\", path, \"Mocks definition folder\")\n\tcTLS := flag.String(\"tls-path\", TLS, \"TLS config folder (server.crt and server.key should be inside)\")\n\n\tflag.Parse()\n\n\t\/\/chanels\n\tmLog := make(chan definition.Match)\n\tdone := make(chan bool)\n\n\t\/\/shared structs\n\tscenario := scenario.NewMemoryStore()\n\tchecker := match.NewTester(scenario)\n\tmatchStore := match.NewMemoryStore()\n\n\tmapping := getMapping(*cPath)\n\tspy := getMatchSpy(checker, matchStore)\n\trouter := getRouter(mapping, checker)\n\tvarsProcessor := getVarsProcessor()\n\n\tif !(*sStatistics) {\n\t\tstatistics.SetMonitor(statistics.NewNullableMonitor())\n\t\tlog.Printf(\"Not sending statistics\\n\")\n\t}\n\tdefer statistics.Stop()\n\n\tgo startServer(*sIP, *sPort, *sPortTLS, *cTLS, done, router, mLog, scenario, varsProcessor, spy)\n\tlog.Printf(\"HTTP Server running at %s:%d\\n\", *sIP, *sPort)\n\tlog.Printf(\"HTTPS Server running at %s:%d\\n\", *sIP, *sPortTLS)\n\tif *console {\n\t\tgo startConsole(*cIP, *cPort, spy, scenario, mapping, done, mLog)\n\t\tlog.Printf(\"Console running at %s:%d\\n\", *cIP, *cPort)\n\t}\n\n\t<-done\n\n}\n<commit_msg>Increase the version.<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/jmartin82\/mmock\/console\"\n\t\"github.com\/jmartin82\/mmock\/definition\"\n\t\"github.com\/jmartin82\/mmock\/definition\/parser\"\n\t\"github.com\/jmartin82\/mmock\/match\"\n\t\"github.com\/jmartin82\/mmock\/scenario\"\n\t\"github.com\/jmartin82\/mmock\/server\"\n\t\"github.com\/jmartin82\/mmock\/statistics\"\n\t\"github.com\/jmartin82\/mmock\/translate\"\n\t\"github.com\/jmartin82\/mmock\/vars\"\n\t\"github.com\/jmartin82\/mmock\/vars\/fakedata\"\n)\n\n\/\/VERSION of the application\nconst VERSION string = \"2.2.0\"\n\n\/\/ErrNotFoundPath error from missing or configuration path\nvar ErrNotFoundPath = errors.New(\"Configuration path not found\")\n\n\/\/ErrNotFoundDefaultPath if we can't resolve the current path\nvar ErrNotFoundDefaultPath = errors.New(\"We can't determinate the current path\")\n\n\/\/ErrNotFoundAnyMock when we don't found any valid mock definition to load\nvar ErrNotFoundAnyMock = errors.New(\"No valid mock definition found\")\n\nfunc banner() {\n\tfmt.Printf(\"MMock v %s\", VERSION)\n\tfmt.Println(\"\")\n\n\tfmt.Print(\n\t\t`\t\t.---. .---.\n : : o : me want request!\n _..-: o : :-.._ \/\n .-'' ' ` + \"`\" + `---' ` + \"`\" + `---' \" ` + \"`\" + `` + \"`\" + `-.\n .' \" ' \" . \" . ' \" ` + \"`\" + `.\n : '.---.,,.,...,.,.,.,..---. ' ;\n ` + \"`\" + `. \" ` + \"`\" + `. .' \" .'\n ` + \"`\" + `. '` + \"`\" + `. .' ' .'\n ` + \"`\" + `. ` + \"`\" + `-._ _.-' \" .' .----.\n ` + \"`\" + `. \" '\"--...--\"' . ' .' .' o ` + \"`\" + `.\n .'` + \"`\" + `-._' \" . \" _.-'` + \"`\" + `. : o :\n .' ` + \"`\" + `` + \"`\" + `` + \"`\" + `--.....--''' ' ` + \"`\" + `:_ o :\n .' \" ' \" \" ; ` + \"`\" + `.;\";\";\";'\n ; ' \" ' . ; .' ; ; ;\n ; ' ' ' \" .' .-'\n ' \" \" ' \" \" _.-'\n `)\n\tfmt.Println(\"\")\n}\n\n\/\/ Get preferred outbound ip of this machine\nfunc getOutboundIP() string {\n\tconn, err := net.Dial(\"udp\", \"8.8.8.8:80\")\n\tif err != nil {\n\t\treturn \"127.0.0.1\"\n\t}\n\tdefer conn.Close()\n\n\tlog.Println(\"Getting external IP\")\n\tlocalAddr := conn.LocalAddr().String()\n\tidx := strings.LastIndex(localAddr, \":\")\n\n\treturn localAddr[0:idx]\n}\n\nfunc getMatchSpy(checker match.Checker, matchStore match.Store) match.Spier {\n\treturn match.NewSpy(checker, matchStore)\n}\n\nfunc existsConfigPath(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc getMapping(path string) definition.Mapping {\n\tpath, _ = filepath.Abs(path)\n\tif !existsConfigPath(path) {\n\t\tlog.Fatalf(ErrNotFoundPath.Error())\n\t}\n\n\tconfigMapper := definition.NewConfigMapper()\n\n\tconfigMapper.AddConfigParser(parser.JSONReader{})\n\tconfigMapper.AddConfigParser(parser.YAMLReader{})\n\n\tfsUpdate := make(chan struct{})\n\twatcher := definition.NewFileWatcher(path, fsUpdate)\n\twatcher.Bind()\n\n\treturn definition.NewConfigMapping(path, configMapper, fsUpdate)\n}\n\nfunc getRouter(mapping definition.Mapping, checker match.Checker) *server.Router {\n\trouter := server.NewRouter(mapping, checker)\n\treturn router\n}\n\nfunc getVarsProcessor() vars.Processor {\n\n\treturn vars.Processor{FillerFactory: vars.MockFillerFactory{FakeAdapter: fakedata.FakeAdapter{}}}\n}\n\nfunc startServer(ip string, port, portTLS int, configTLS string, done chan bool, router server.Resolver, mLog chan definition.Match, scenario scenario.Director, varsProcessor vars.Processor, spier match.Spier) {\n\tdispatcher := server.Dispatcher{\n\t\tIP: ip,\n\t\tPort: port,\n\t\tPortTLS: portTLS,\n\t\tConfigTLS: configTLS,\n\t\tResolver: router,\n\t\tTranslator: translate.HTTP{},\n\t\tProcessor: varsProcessor,\n\t\tScenario: scenario,\n\t\tSpier: spier,\n\t\tMlog: mLog,\n\t}\n\tdispatcher.Start()\n\tdone <- true\n}\nfunc startConsole(ip string, port int, spy match.Spier, scenario scenario.Director, mapping definition.Mapping, done chan bool, mLog chan definition.Match) {\n\tdispatcher := console.Dispatcher{\n\t\tIP: ip,\n\t\tPort: port,\n\t\tMatchSpy: spy,\n\t\tScenario: scenario,\n\t\tMapping: mapping,\n\t\tMlog: mLog}\n\tdispatcher.Start()\n\tdone <- true\n}\n\nfunc main() {\n\tbanner()\n\toutIP := getOutboundIP()\n\tpath, err := filepath.Abs(\".\/config\")\n\tTLS, err := filepath.Abs(\".\/tls\")\n\tif err != nil {\n\t\tpanic(ErrNotFoundDefaultPath)\n\t}\n\n\tsIP := flag.String(\"server-ip\", outIP, \"Mock server IP\")\n\tsPort := flag.Int(\"server-port\", 8083, \"Mock server Port\")\n\tsPortTLS := flag.Int(\"server-tls-port\", 8084, \"Mock server TLS Port\")\n\tsStatistics := flag.Bool(\"server-statistics\", true, \"Mock server sends anonymous statistics\")\n\tcIP := flag.String(\"console-ip\", outIP, \"Console server IP\")\n\tcPort := flag.Int(\"console-port\", 8082, \"Console server Port\")\n\tconsole := flag.Bool(\"console\", true, \"Console enabled (true\/false)\")\n\tcPath := flag.String(\"config-path\", path, \"Mocks definition folder\")\n\tcTLS := flag.String(\"tls-path\", TLS, \"TLS config folder (server.crt and server.key should be inside)\")\n\n\tflag.Parse()\n\n\t\/\/chanels\n\tmLog := make(chan definition.Match)\n\tdone := make(chan bool)\n\n\t\/\/shared structs\n\tscenario := scenario.NewMemoryStore()\n\tchecker := match.NewTester(scenario)\n\tmatchStore := match.NewMemoryStore()\n\n\tmapping := getMapping(*cPath)\n\tspy := getMatchSpy(checker, matchStore)\n\trouter := getRouter(mapping, checker)\n\tvarsProcessor := getVarsProcessor()\n\n\tif !(*sStatistics) {\n\t\tstatistics.SetMonitor(statistics.NewNullableMonitor())\n\t\tlog.Printf(\"Not sending statistics\\n\")\n\t}\n\tdefer statistics.Stop()\n\n\tgo startServer(*sIP, *sPort, *sPortTLS, *cTLS, done, router, mLog, scenario, varsProcessor, spy)\n\tlog.Printf(\"HTTP Server running at %s:%d\\n\", *sIP, *sPort)\n\tlog.Printf(\"HTTPS Server running at %s:%d\\n\", *sIP, *sPortTLS)\n\tif *console {\n\t\tgo startConsole(*cIP, *cPort, spy, scenario, mapping, done, mLog)\n\t\tlog.Printf(\"Console running at %s:%d\\n\", *cIP, *cPort)\n\t}\n\n\t<-done\n\n}\n<|endoftext|>"} {"text":"<commit_before>package clarifai\n\nimport (\n\t\"net\/http\"\n)\n\nconst (\n\t\/\/ Public pre-defined models.\n\t\/\/ Source: https:\/\/developer-preview.clarifai.com\/guide\/publicmodels#public-models\n\n\t\/\/ PublicModelGeneral is a public model \"general\".\n\tPublicModelGeneral = \"aaa03c23b3724a16a56b629203edc62c\"\n\n\t\/\/ PublicModelFood is a public model \"food\".\n\tPublicModelFood = \"bd367be194cf45149e75f01d59f77ba7\"\n\n\t\/\/ PublicModelTravel is a public model \"travel\".\n\tPublicModelTravel = \"eee28c313d69466f836ab83287a54ed9\"\n\n\t\/\/ PublicModelNSFW is a public model \"NSFW\" (Not Safe For Work).\n\tPublicModelNSFW = \"e9576d86d2004ed1a38ba0cf39ecb4b1\"\n\n\t\/\/ PublicModelWeddings is a public model \"weddings\".\n\tPublicModelWeddings = \"c386b7a870114f4a87477c0824499348\"\n\n\t\/\/ PublicModelColor is a public model \"color\".\n\tPublicModelColor = \"eeed0b6733a644cea07cf4c60f87ebb7\"\n)\n\ntype ModelRequest struct {\n\tModel Model `json:\"model\"`\n}\n\ntype Model struct {\n\tName *string `json:\"name,omitempty\"`\n\tID *string `json:\"id,omitempty\"`\n\tCreatedAt *string `json:\"created_at,omitempty\"`\n\tAppID *string `json:\"app_id,omitempty\"`\n\tOutputInfo *OutputInfo `json:\"output_info,omitempty\"`\n\tModelVersion *ModelVersion `json:\"model_version,omitempty\"`\n}\n\ntype OutputInfo struct {\n\tMessage string `json:\"message,omitempty\"`\n\tType string `json:\"type,omitempty\"`\n\tOutputConfig *OutputConfig `json:\"output_config,omitempty\"`\n\tOutputData *OutputData `json:\"data,omitempty\"`\n}\n\ntype ModelVersion struct {\n\tID string `json:\"id\"`\n\tCreatedAt string `json:\"created_at\"`\n\tStatus *ServiceStatus `json:\"status\"`\n}\n\ntype OutputConfig struct {\n\tConceptsMutuallyExclusive bool `json:\"concepts_mutually_exclusive\"`\n\tClosedEnvironment bool `json:\"closed_environment\"`\n}\n\n\/\/ modelOptions is a model configuration object used to set optional settings for a new model.\ntype modelOptions struct {\n\tID string \/\/ Model ID. If not set, wil be generated automatically.\n\tConcepts []string \/\/ Optional concepts to associated with this model\n\tConceptsMutuallyExclusive bool \/\/ True or False, whether concepts are mutually exclusive\n\tClosedEnvironment bool \/\/ True or False, whether use negatives for prediction\n}\n\n\/\/ ModelQuery is a model search payload.\ntype ModelQuery struct {\n\tName string `json:\"name,omitempty\"`\n\tType string `json:\"type,omitempty\"`\n}\n\n\/\/ NewModelOptions returns default model options.\nfunc NewModelOptions() *modelOptions {\n\treturn &modelOptions{}\n}\n\n\/\/ Predict fetches prediction info for a provided asset from a given model.\nfunc (s *Session) Predict(i *Inputs) *Request {\n\n\tr := NewRequest(s, http.MethodPost, \"models\/\"+i.modelID+\"\/outputs\")\n\tr.SetPayload(i)\n\n\treturn r\n}\n\n\/\/ CreateModel creates a new model. If ID is empty, it will be created automatically by Clarifai API.\nfunc (s *Session) CreateModel(name string, opt *modelOptions) *Request {\n\n\tp := ModelRequest{\n\t\tModel: Model{\n\t\t\tName: &name,\n\t\t},\n\t}\n\n\tif p.Model.OutputInfo == nil {\n\t\tp.Model.OutputInfo = &OutputInfo{\n\t\t\tOutputData: &OutputData{},\n\t\t\tOutputConfig: &OutputConfig{},\n\t\t}\n\t}\n\n\tif opt.ID != \"\" {\n\t\tp.Model.ID = &opt.ID\n\t}\n\n\tif len(opt.Concepts) > 0 {\n\t\tvar concepts []*OutputConcept\n\n\t\tfor _, id := range opt.Concepts {\n\t\t\tconcepts = append(concepts, &OutputConcept{\n\t\t\t\tID: id,\n\t\t\t})\n\t\t}\n\n\t\tp.Model.OutputInfo.OutputData.Concepts = concepts\n\t}\n\n\tif opt.Concepts != nil {\n\t\tp.Model.OutputInfo.OutputConfig.ConceptsMutuallyExclusive = opt.ConceptsMutuallyExclusive\n\t}\n\n\tif opt.Concepts != nil {\n\t\tp.Model.OutputInfo.OutputConfig.ClosedEnvironment = opt.ClosedEnvironment\n\t}\n\n\tr := NewRequest(s, http.MethodPost, \"models\")\n\tr.SetPayload(p)\n\n\treturn r\n}\n\n\/\/ AddModelConcepts adds new concepts to an existing model.\nfunc (s *Session) AddModelConcepts(ID string, c []string) *Request {\n\n\tr := NewRequest(s, http.MethodPatch, \"models\/\"+ID+\"\/output_info\/data\/concepts\")\n\n\tp := struct {\n\t\tConcepts []*OutputConcept `json:\"concepts\"`\n\t\tAction string `json:\"action\"`\n\t}{\n\t\tConcepts: sliceToConcepts(c),\n\t\tAction: \"merge_concepts\",\n\t}\n\n\tr.SetPayload(p)\n\n\treturn r\n}\n\n\/\/ GetModels fetches a list of all models, including custom and public.\nfunc (s *Session) GetModels() *Request {\n\n\treturn NewRequest(s, http.MethodGet, \"models\")\n}\n\n\/\/ GetModel fetches a single model by its ID.\nfunc (s *Session) GetModel(ID string) *Request {\n\n\treturn NewRequest(s, http.MethodGet, \"models\/\"+ID)\n}\n\n\/\/ GetModelOutput fetches a single model by its ID with output_info data.\nfunc (s *Session) GetModelOutput(ID string) *Request {\n\n\treturn NewRequest(s, http.MethodGet, \"models\/\"+ID+\"\/output_info\")\n}\n\n\/\/ GetModelVersion fetches version data of a single model .\nfunc (s *Session) GetModelVersion(m, v string) *Request {\n\n\treturn NewRequest(s, http.MethodGet, \"models\/\"+m+\"\/versions\/\"+v)\n}\n\n\/\/ GetModelVersionInputs fetches inputs used to train a specific model version.\nfunc (s *Session) GetModelVersionInputs(m, v string) *Request {\n\n\treturn NewRequest(s, http.MethodGet, \"models\/\"+m+\"\/versions\/\"+v+\"\/inputs\")\n}\n\n\/\/ GetModelVersions fetches a single model by its ID with versions data.\nfunc (s *Session) GetModelVersions(ID string) *Request {\n\n\treturn NewRequest(s, http.MethodGet, \"models\/\"+ID+\"\/versions\")\n}\n\n\/\/ GetModelInputs fetches inputs of a single model by its ID.\nfunc (s *Session) GetModelInputs(ID string) *Request {\n\n\treturn NewRequest(s, http.MethodGet, \"models\/\"+ID+\"\/inputs\")\n}\n\n\/\/ DeleteModelVersion deletes a specific version of a model.\nfunc (s *Session) DeleteModelVersion(m, v string) *Request {\n\n\treturn NewRequest(s, http.MethodDelete, \"models\/\"+m+\"\/versions\/\"+v)\n}\n\n\/\/ DeleteModel deletes a single model by ID.\nfunc (s *Session) DeleteModel(ID string) *Request {\n\n\treturn NewRequest(s, http.MethodDelete, \"models\/\"+ID)\n}\n\n\/\/ DeleteAllModels deletes all models associated with your application.\nfunc (s *Session) DeleteAllModels() *Request {\n\n\treturn NewRequest(s, http.MethodDelete, \"models\")\n}\n\n\/\/ TrainModel starts a model training operation.\n\/\/ When you train a model, you are telling the system to look at all the images with concepts you've provided and learn from them.\n\/\/ This train operation is asynchronous. It may take a few seconds for your model to be fully trained and ready.\nfunc (s *Session) TrainModel(ID string) *Request {\n\n\tr := NewRequest(s, http.MethodPost, \"models\/\"+ID+\"\/versions\")\n\n\treturn r\n}\n\n\/\/ DeleteModelConcepts removes concepts from a model.\nfunc (s *Session) DeleteModelConcepts(ID string, c []string) *Request {\n\n\tr := NewRequest(s, http.MethodPatch, \"models\/\"+ID+\"\/output_info\/data\/concepts\")\n\n\tp := struct {\n\t\tConcepts []*OutputConcept `json:\"concepts\"`\n\t\tAction string `json:\"action\"`\n\t}{\n\t\tConcepts: sliceToConcepts(c),\n\t\tAction: \"delete_concepts\",\n\t}\n\n\tr.SetPayload(p)\n\n\treturn r\n}\n\n\/\/ SearchModel searches models by name and\/or type.\nfunc (s *Session) SearchModel(n, t string) *Request {\n\n\tr := NewRequest(s, http.MethodPost, \"models\/searches\")\n\n\tmq := ModelQuery{}\n\tif n != \"\" {\n\t\tmq.Name = n\n\t}\n\tif t != \"\" {\n\t\tmq.Type = t\n\t}\n\n\tp := struct {\n\t\tModelQuery ModelQuery `json:\"model_query\"`\n\t}{\n\t\tModelQuery: mq,\n\t}\n\n\tr.SetPayload(p)\n\n\treturn r\n}\n\n\/\/ Convert a slice of concepts into a request format.\nfunc sliceToConcepts(c []string) []*OutputConcept {\n\tvar concepts []*OutputConcept\n\n\tfor _, id := range c {\n\t\tconcepts = append(concepts, &OutputConcept{\n\t\t\tID: id,\n\t\t})\n\t}\n\n\treturn concepts\n}\n<commit_msg>updated add models<commit_after>package clarifai\n\nimport (\n\t\"net\/http\"\n)\n\nconst (\n\t\/\/ Public pre-defined models.\n\t\/\/ Source: https:\/\/developer-preview.clarifai.com\/guide\/publicmodels#public-models\n\n\t\/\/ PublicModelGeneral is a public model \"general\".\n\tPublicModelGeneral = \"aaa03c23b3724a16a56b629203edc62c\"\n\n\t\/\/ PublicModelFood is a public model \"food\".\n\tPublicModelFood = \"bd367be194cf45149e75f01d59f77ba7\"\n\n\t\/\/ PublicModelTravel is a public model \"travel\".\n\tPublicModelTravel = \"eee28c313d69466f836ab83287a54ed9\"\n\n\t\/\/ PublicModelNSFW is a public model \"NSFW\" (Not Safe For Work).\n\tPublicModelNSFW = \"e9576d86d2004ed1a38ba0cf39ecb4b1\"\n\n\t\/\/ PublicModelWeddings is a public model \"weddings\".\n\tPublicModelWeddings = \"c386b7a870114f4a87477c0824499348\"\n\n\t\/\/ PublicModelColor is a public model \"color\".\n\tPublicModelColor = \"eeed0b6733a644cea07cf4c60f87ebb7\"\n)\n\ntype ModelRequest struct {\n\tModel Model `json:\"model\"`\n}\n\ntype Model struct {\n\tName *string `json:\"name,omitempty\"`\n\tID *string `json:\"id,omitempty\"`\n\tCreatedAt *string `json:\"created_at,omitempty\"`\n\tAppID *string `json:\"app_id,omitempty\"`\n\tOutputInfo *OutputInfo `json:\"output_info,omitempty\"`\n\tModelVersion *ModelVersion `json:\"model_version,omitempty\"`\n}\n\ntype OutputInfo struct {\n\tMessage string `json:\"message,omitempty\"`\n\tType string `json:\"type,omitempty\"`\n\tOutputConfig *OutputConfig `json:\"output_config,omitempty\"`\n\tOutputData *OutputData `json:\"data,omitempty\"`\n}\n\ntype ModelVersion struct {\n\tID string `json:\"id\"`\n\tCreatedAt string `json:\"created_at\"`\n\tStatus *ServiceStatus `json:\"status\"`\n}\n\ntype OutputConfig struct {\n\tConceptsMutuallyExclusive bool `json:\"concepts_mutually_exclusive\"`\n\tClosedEnvironment bool `json:\"closed_environment\"`\n}\n\n\/\/ modelOptions is a model configuration object used to set optional settings for a new model.\ntype modelOptions struct {\n\tID string \/\/ Model ID. If not set, wil be generated automatically.\n\tConcepts []string \/\/ Optional concepts to associated with this model\n\tConceptsMutuallyExclusive bool \/\/ True or False, whether concepts are mutually exclusive\n\tClosedEnvironment bool \/\/ True or False, whether use negatives for prediction\n}\n\n\/\/ ModelQuery is a model search payload.\ntype ModelQuery struct {\n\tName string `json:\"name,omitempty\"`\n\tType string `json:\"type,omitempty\"`\n}\n\n\/\/ NewModelOptions returns default model options.\nfunc NewModelOptions() *modelOptions {\n\treturn &modelOptions{}\n}\n\n\/\/ Predict fetches prediction info for a provided asset from a given model.\nfunc (s *Session) Predict(i *Inputs) *Request {\n\n\tr := NewRequest(s, http.MethodPost, \"models\/\"+i.modelID+\"\/outputs\")\n\tr.SetPayload(i)\n\n\treturn r\n}\n\n\/\/ CreateModel creates a new model. If ID is empty, it will be created automatically by Clarifai API.\nfunc (s *Session) CreateModel(name string, opt *modelOptions) *Request {\n\n\tp := ModelRequest{\n\t\tModel: Model{\n\t\t\tName: &name,\n\t\t},\n\t}\n\n\tif p.Model.OutputInfo == nil {\n\t\tp.Model.OutputInfo = &OutputInfo{\n\t\t\tOutputData: &OutputData{},\n\t\t\tOutputConfig: &OutputConfig{},\n\t\t}\n\t}\n\n\tif opt.ID != \"\" {\n\t\tp.Model.ID = &opt.ID\n\t}\n\n\tif len(opt.Concepts) > 0 {\n\t\tvar concepts []*OutputConcept\n\n\t\tfor _, id := range opt.Concepts {\n\t\t\tconcepts = append(concepts, &OutputConcept{\n\t\t\t\tID: id,\n\t\t\t})\n\t\t}\n\n\t\tp.Model.OutputInfo.OutputData.Concepts = concepts\n\t}\n\n\tif opt.Concepts != nil {\n\t\tp.Model.OutputInfo.OutputConfig.ConceptsMutuallyExclusive = opt.ConceptsMutuallyExclusive\n\t}\n\n\tif opt.Concepts != nil {\n\t\tp.Model.OutputInfo.OutputConfig.ClosedEnvironment = opt.ClosedEnvironment\n\t}\n\n\tr := NewRequest(s, http.MethodPost, \"models\")\n\tr.SetPayload(p)\n\n\treturn r\n}\n\n\/\/ AddModelConcepts adds new concepts to an existing model.\nfunc (s *Session) AddModelConcepts(ID string, c []string) *Request {\n\n\tr := NewRequest(s, http.MethodPatch, \"models\/\")\n\n\tp := struct {\n\t\tModels []Model `json:\"models\"`\n\t\tAction string `json:\"action\"`\n\t}{\n\t\tModels: []Model{\n\t\t\t{\n\t\t\t\tID: &ID,\n\t\t\t\tOutputInfo: &OutputInfo{\n\t\t\t\t\tOutputData: &OutputData{\n\t\t\t\t\t\tConcepts: sliceToConcepts(c),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tAction: \"merge\",\n\t}\n\n\tr.SetPayload(p)\n\n\treturn r\n}\n\n\/\/ GetModels fetches a list of all models, including custom and public.\nfunc (s *Session) GetModels() *Request {\n\n\treturn NewRequest(s, http.MethodGet, \"models\")\n}\n\n\/\/ GetModel fetches a single model by its ID.\nfunc (s *Session) GetModel(ID string) *Request {\n\n\treturn NewRequest(s, http.MethodGet, \"models\/\"+ID)\n}\n\n\/\/ GetModelOutput fetches a single model by its ID with output_info data.\nfunc (s *Session) GetModelOutput(ID string) *Request {\n\n\treturn NewRequest(s, http.MethodGet, \"models\/\"+ID+\"\/output_info\")\n}\n\n\/\/ GetModelVersion fetches version data of a single model .\nfunc (s *Session) GetModelVersion(m, v string) *Request {\n\n\treturn NewRequest(s, http.MethodGet, \"models\/\"+m+\"\/versions\/\"+v)\n}\n\n\/\/ GetModelVersionInputs fetches inputs used to train a specific model version.\nfunc (s *Session) GetModelVersionInputs(m, v string) *Request {\n\n\treturn NewRequest(s, http.MethodGet, \"models\/\"+m+\"\/versions\/\"+v+\"\/inputs\")\n}\n\n\/\/ GetModelVersions fetches a single model by its ID with versions data.\nfunc (s *Session) GetModelVersions(ID string) *Request {\n\n\treturn NewRequest(s, http.MethodGet, \"models\/\"+ID+\"\/versions\")\n}\n\n\/\/ GetModelInputs fetches inputs of a single model by its ID.\nfunc (s *Session) GetModelInputs(ID string) *Request {\n\n\treturn NewRequest(s, http.MethodGet, \"models\/\"+ID+\"\/inputs\")\n}\n\n\/\/ DeleteModelVersion deletes a specific version of a model.\nfunc (s *Session) DeleteModelVersion(m, v string) *Request {\n\n\treturn NewRequest(s, http.MethodDelete, \"models\/\"+m+\"\/versions\/\"+v)\n}\n\n\/\/ DeleteModel deletes a single model by ID.\nfunc (s *Session) DeleteModel(ID string) *Request {\n\n\treturn NewRequest(s, http.MethodDelete, \"models\/\"+ID)\n}\n\n\/\/ DeleteAllModels deletes all models associated with your application.\nfunc (s *Session) DeleteAllModels() *Request {\n\n\treturn NewRequest(s, http.MethodDelete, \"models\")\n}\n\n\/\/ TrainModel starts a model training operation.\n\/\/ When you train a model, you are telling the system to look at all the images with concepts you've provided and learn from them.\n\/\/ This train operation is asynchronous. It may take a few seconds for your model to be fully trained and ready.\nfunc (s *Session) TrainModel(ID string) *Request {\n\n\tr := NewRequest(s, http.MethodPost, \"models\/\"+ID+\"\/versions\")\n\n\treturn r\n}\n\n\/\/ DeleteModelConcepts removes concepts from a model.\nfunc (s *Session) DeleteModelConcepts(ID string, c []string) *Request {\n\n\tr := NewRequest(s, http.MethodPatch, \"models\/\"+ID+\"\/output_info\/data\/concepts\")\n\n\tp := struct {\n\t\tConcepts []*OutputConcept `json:\"concepts\"`\n\t\tAction string `json:\"action\"`\n\t}{\n\t\tConcepts: sliceToConcepts(c),\n\t\tAction: \"delete_concepts\",\n\t}\n\n\tr.SetPayload(p)\n\n\treturn r\n}\n\n\/\/ SearchModel searches models by name and\/or type.\nfunc (s *Session) SearchModel(n, t string) *Request {\n\n\tr := NewRequest(s, http.MethodPost, \"models\/searches\")\n\n\tmq := ModelQuery{}\n\tif n != \"\" {\n\t\tmq.Name = n\n\t}\n\tif t != \"\" {\n\t\tmq.Type = t\n\t}\n\n\tp := struct {\n\t\tModelQuery ModelQuery `json:\"model_query\"`\n\t}{\n\t\tModelQuery: mq,\n\t}\n\n\tr.SetPayload(p)\n\n\treturn r\n}\n\n\/\/ Convert a slice of concepts into a request format.\nfunc sliceToConcepts(c []string) []*OutputConcept {\n\tvar concepts []*OutputConcept\n\n\tfor _, id := range c {\n\t\tconcepts = append(concepts, &OutputConcept{\n\t\t\tID: id,\n\t\t})\n\t}\n\n\treturn concepts\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package usl provides functionality to build Universal Scalability Law models\n\/\/ from sets of observed measurements.\npackage usl\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\n\t\"github.com\/maorshutman\/lm\"\n)\n\nvar (\n\t\/\/ ErrInsufficientMeasurements is returned when fewer than 6 measurements were provided.\n\tErrInsufficientMeasurements = errors.New(\"usl: need at least 6 measurements\")\n)\n\n\/\/ Model is a Universal Scalability Law model.\ntype Model struct {\n\tSigma float64 \/\/ The model's coefficient of contention, σ.\n\tKappa float64 \/\/ The model's coefficient of crosstalk\/coherency, κ.\n\tLambda float64 \/\/ The model's coefficient of performance, λ.\n}\n\nfunc (m *Model) String() string {\n\treturn fmt.Sprintf(\"Model{σ=%v,κ=%v,λ=%v}\", m.Sigma, m.Kappa, m.Lambda)\n}\n\n\/\/ ThroughputAtConcurrency returns the expected throughput given a number of concurrent events,\n\/\/ X(N).\n\/\/\n\/\/ See \"Practical Scalability Analysis with the Universal Scalability Law, Equation 3\".\nfunc (m *Model) ThroughputAtConcurrency(n float64) float64 {\n\treturn (m.Lambda * n) \/ (1 + (m.Sigma * (n - 1)) + (m.Kappa * n * (n - 1)))\n}\n\n\/\/ LatencyAtConcurrency returns the expected mean latency given a number of concurrent events,\n\/\/ R(N).\n\/\/\n\/\/ See \"Practical Scalability Analysis with the Universal Scalability Law, Equation 6\".\nfunc (m *Model) LatencyAtConcurrency(n float64) float64 {\n\treturn (1 + (m.Sigma * (n - 1)) + (m.Kappa * n * (n - 1))) \/ m.Lambda\n}\n\n\/\/ MaxConcurrency returns the maximum expected number of concurrent events the system can handle,\n\/\/ Nmax.\n\/\/\n\/\/ See \"Practical Scalability Analysis with the Universal Scalability Law, Equation 4\".\nfunc (m *Model) MaxConcurrency() float64 {\n\treturn math.Floor(math.Sqrt((1 - m.Sigma) \/ m.Kappa))\n}\n\n\/\/ MaxThroughput returns the maximum expected throughput the system can handle, Xmax.\nfunc (m Model) MaxThroughput() float64 {\n\treturn m.ThroughputAtConcurrency(m.MaxConcurrency())\n}\n\n\/\/ LatencyAtThroughput returns the expected mean latency given a throughput, R(X).\n\/\/\n\/\/ See \"Practical Scalability Analysis with the Universal Scalability Law, Equation 8\".\nfunc (m *Model) LatencyAtThroughput(x float64) float64 {\n\treturn (m.Sigma - 1) \/ (m.Sigma*x - m.Lambda)\n}\n\n\/\/ ThroughputAtLatency returns the expected throughput given a mean latency, X(R).\n\/\/\n\/\/ See \"Practical Scalability Analysis with the Universal Scalability Law, Equation 9\".\nfunc (m *Model) ThroughputAtLatency(r float64) float64 {\n\treturn (math.Sqrt(math.Pow(m.Sigma, 2)+math.Pow(m.Kappa, 2)+\n\t\t2*m.Kappa*(2*m.Lambda*r+m.Sigma-2)) - m.Kappa + m.Sigma) \/ (2.0 * m.Kappa * r)\n}\n\n\/\/ ConcurrencyAtLatency returns the expected number of concurrent events at a particular mean\n\/\/ latency, N(R).\n\/\/\n\/\/ See \"Practical Scalability Analysis with the Universal Scalability Law, Equation 10\".\nfunc (m *Model) ConcurrencyAtLatency(r float64) float64 {\n\treturn (m.Kappa - m.Sigma +\n\t\tmath.Sqrt(math.Pow(m.Sigma, 2)+\n\t\t\tmath.Pow(m.Kappa, 2)+\n\t\t\t2*m.Kappa*((2*m.Lambda*r)+m.Sigma-2))) \/ (2 * m.Kappa)\n}\n\n\/\/ ConcurrencyAtThroughput returns the expected number of concurrent events at a particular\n\/\/ throughput, N(X).\nfunc (m *Model) ConcurrencyAtThroughput(x float64) float64 {\n\treturn m.LatencyAtThroughput(x) * x\n}\n\n\/\/ ContentionConstrained returns true if the system is constrained by contention.\nfunc (m *Model) ContentionConstrained() bool {\n\treturn m.Sigma > m.Kappa\n}\n\n\/\/ CoherencyConstrained returns true if the system is constrained by coherency costs.\nfunc (m *Model) CoherencyConstrained() bool {\n\treturn m.Sigma < m.Kappa\n}\n\n\/\/ Limitless returns true if the system is linearly scalable.\nfunc (m *Model) Limitless() bool {\n\treturn m.Kappa == 0\n}\n\nconst minMeasurements = 6\n\n\/\/ Build returns a model whose parameters are generated from the given measurements.\n\/\/\n\/\/ Finds a set of coefficients for the equation y = λx\/(1+σ(x-1)+κx(x-1)) which best fit the\n\/\/ observed values using unconstrained least-squares regression. The resulting values for λ, κ, and\n\/\/ σ are the parameters of the returned model.\nfunc Build(measurements []Measurement) (m *Model, err error) {\n\tif len(measurements) < minMeasurements {\n\t\treturn nil, ErrInsufficientMeasurements\n\t}\n\n\t\/\/ Calculate an initial guess at the model parameters.\n\tinit := []float64{0.1, 0.01, 0}\n\n\t\/\/ Use max(x\/n) as initial lambda.\n\tfor _, m := range measurements {\n\t\tv := m.Throughput \/ m.Concurrency\n\t\tif v > init[2] {\n\t\t\tinit[2] = v\n\t\t}\n\t}\n\n\t\/\/ Calculate the residuals of a possible model.\n\tf := func(dst, x []float64) {\n\t\tmodel := Model{Sigma: x[0], Kappa: x[1], Lambda: x[2]}\n\n\t\tfor i, v := range measurements {\n\t\t\tdst[i] = v.Throughput - model.ThroughputAtConcurrency(v.Concurrency)\n\t\t}\n\t}\n\tj := lm.NumJac{Func: f}\n\n\t\/\/ Formulate an LM problem.\n\tp := lm.LMProblem{\n\t\tDim: 3, \/\/ Three parameters in the model.\n\t\tSize: len(measurements), \/\/ Use all measurements to calculate residuals.\n\t\tFunc: f, \/\/ Reduce the residuals of model predictions to observations.\n\t\tJac: j.Jac, \/\/ Approximate the Jacobian by finite differences.\n\t\tInitParams: init, \/\/ Use our initial guesses at parameters.\n\t\tTau: 1e-6, \/\/ Need a non-zero initial damping factor.\n\t\tEps1: 1e-8, \/\/ Small but non-zero values here prevent singular matrices.\n\t\tEps2: 1e-8,\n\t}\n\n\t\/\/ Calculate the model parameters.\n\tresults, err := lm.LM(p, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to build model: %w\", err)\n\t}\n\n\t\/\/ Return the model.\n\treturn &Model{\n\t\tSigma: results.X[0],\n\t\tKappa: results.X[1],\n\t\tLambda: results.X[2],\n\t}, nil\n}\n<commit_msg>fmtfix<commit_after>\/\/ Package usl provides functionality to build Universal Scalability Law models\n\/\/ from sets of observed measurements.\npackage usl\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\n\t\"github.com\/maorshutman\/lm\"\n)\n\n\/\/ Model is a Universal Scalability Law model.\ntype Model struct {\n\tSigma float64 \/\/ The model's coefficient of contention, σ.\n\tKappa float64 \/\/ The model's coefficient of crosstalk\/coherency, κ.\n\tLambda float64 \/\/ The model's coefficient of performance, λ.\n}\n\nfunc (m *Model) String() string {\n\treturn fmt.Sprintf(\"Model{σ=%v,κ=%v,λ=%v}\", m.Sigma, m.Kappa, m.Lambda)\n}\n\n\/\/ ThroughputAtConcurrency returns the expected throughput given a number of concurrent events,\n\/\/ X(N).\n\/\/\n\/\/ See \"Practical Scalability Analysis with the Universal Scalability Law, Equation 3\".\nfunc (m *Model) ThroughputAtConcurrency(n float64) float64 {\n\treturn (m.Lambda * n) \/ (1 + (m.Sigma * (n - 1)) + (m.Kappa * n * (n - 1)))\n}\n\n\/\/ LatencyAtConcurrency returns the expected mean latency given a number of concurrent events,\n\/\/ R(N).\n\/\/\n\/\/ See \"Practical Scalability Analysis with the Universal Scalability Law, Equation 6\".\nfunc (m *Model) LatencyAtConcurrency(n float64) float64 {\n\treturn (1 + (m.Sigma * (n - 1)) + (m.Kappa * n * (n - 1))) \/ m.Lambda\n}\n\n\/\/ MaxConcurrency returns the maximum expected number of concurrent events the system can handle,\n\/\/ Nmax.\n\/\/\n\/\/ See \"Practical Scalability Analysis with the Universal Scalability Law, Equation 4\".\nfunc (m *Model) MaxConcurrency() float64 {\n\treturn math.Floor(math.Sqrt((1 - m.Sigma) \/ m.Kappa))\n}\n\n\/\/ MaxThroughput returns the maximum expected throughput the system can handle, Xmax.\nfunc (m Model) MaxThroughput() float64 {\n\treturn m.ThroughputAtConcurrency(m.MaxConcurrency())\n}\n\n\/\/ LatencyAtThroughput returns the expected mean latency given a throughput, R(X).\n\/\/\n\/\/ See \"Practical Scalability Analysis with the Universal Scalability Law, Equation 8\".\nfunc (m *Model) LatencyAtThroughput(x float64) float64 {\n\treturn (m.Sigma - 1) \/ (m.Sigma*x - m.Lambda)\n}\n\n\/\/ ThroughputAtLatency returns the expected throughput given a mean latency, X(R).\n\/\/\n\/\/ See \"Practical Scalability Analysis with the Universal Scalability Law, Equation 9\".\nfunc (m *Model) ThroughputAtLatency(r float64) float64 {\n\treturn (math.Sqrt(math.Pow(m.Sigma, 2)+math.Pow(m.Kappa, 2)+\n\t\t2*m.Kappa*(2*m.Lambda*r+m.Sigma-2)) - m.Kappa + m.Sigma) \/ (2.0 * m.Kappa * r)\n}\n\n\/\/ ConcurrencyAtLatency returns the expected number of concurrent events at a particular mean\n\/\/ latency, N(R).\n\/\/\n\/\/ See \"Practical Scalability Analysis with the Universal Scalability Law, Equation 10\".\nfunc (m *Model) ConcurrencyAtLatency(r float64) float64 {\n\treturn (m.Kappa - m.Sigma +\n\t\tmath.Sqrt(math.Pow(m.Sigma, 2)+\n\t\t\tmath.Pow(m.Kappa, 2)+\n\t\t\t2*m.Kappa*((2*m.Lambda*r)+m.Sigma-2))) \/ (2 * m.Kappa)\n}\n\n\/\/ ConcurrencyAtThroughput returns the expected number of concurrent events at a particular\n\/\/ throughput, N(X).\nfunc (m *Model) ConcurrencyAtThroughput(x float64) float64 {\n\treturn m.LatencyAtThroughput(x) * x\n}\n\n\/\/ ContentionConstrained returns true if the system is constrained by contention.\nfunc (m *Model) ContentionConstrained() bool {\n\treturn m.Sigma > m.Kappa\n}\n\n\/\/ CoherencyConstrained returns true if the system is constrained by coherency costs.\nfunc (m *Model) CoherencyConstrained() bool {\n\treturn m.Sigma < m.Kappa\n}\n\n\/\/ Limitless returns true if the system is linearly scalable.\nfunc (m *Model) Limitless() bool {\n\treturn m.Kappa == 0\n}\n\n\/\/ Build returns a model whose parameters are generated from the given measurements.\n\/\/\n\/\/ Finds a set of coefficients for the equation y = λx\/(1+σ(x-1)+κx(x-1)) which best fit the\n\/\/ observed values using unconstrained least-squares regression. The resulting values for λ, κ, and\n\/\/ σ are the parameters of the returned model.\nfunc Build(measurements []Measurement) (m *Model, err error) {\n\tif len(measurements) < minMeasurements {\n\t\treturn nil, ErrInsufficientMeasurements\n\t}\n\n\t\/\/ Calculate an initial guess at the model parameters.\n\tinit := []float64{0.1, 0.01, 0}\n\n\t\/\/ Use max(x\/n) as initial lambda.\n\tfor _, m := range measurements {\n\t\tv := m.Throughput \/ m.Concurrency\n\t\tif v > init[2] {\n\t\t\tinit[2] = v\n\t\t}\n\t}\n\n\t\/\/ Calculate the residuals of a possible model.\n\tf := func(dst, x []float64) {\n\t\tmodel := Model{Sigma: x[0], Kappa: x[1], Lambda: x[2]}\n\n\t\tfor i, v := range measurements {\n\t\t\tdst[i] = v.Throughput - model.ThroughputAtConcurrency(v.Concurrency)\n\t\t}\n\t}\n\tj := lm.NumJac{Func: f}\n\n\t\/\/ Formulate an LM problem.\n\tp := lm.LMProblem{\n\t\tDim: 3, \/\/ Three parameters in the model.\n\t\tSize: len(measurements), \/\/ Use all measurements to calculate residuals.\n\t\tFunc: f, \/\/ Reduce the residuals of model predictions to observations.\n\t\tJac: j.Jac, \/\/ Approximate the Jacobian by finite differences.\n\t\tInitParams: init, \/\/ Use our initial guesses at parameters.\n\t\tTau: 1e-6, \/\/ Need a non-zero initial damping factor.\n\t\tEps1: 1e-8, \/\/ Small but non-zero values here prevent singular matrices.\n\t\tEps2: 1e-8,\n\t}\n\n\t\/\/ Calculate the model parameters.\n\tresults, err := lm.LM(p, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to build model: %w\", err)\n\t}\n\n\t\/\/ Return the model.\n\treturn &Model{\n\t\tSigma: results.X[0],\n\t\tKappa: results.X[1],\n\t\tLambda: results.X[2],\n\t}, nil\n}\n\nconst (\n\t\/\/ minMeasurement is the smallest number of measurements from which a useful model can be\n\t\/\/ created.\n\tminMeasurements = 6\n)\n\nvar (\n\t\/\/ ErrInsufficientMeasurements is returned when fewer than 6 measurements were provided.\n\tErrInsufficientMeasurements = fmt.Errorf(\"usl: need at least %d measurements\", minMeasurements)\n)\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/llgcode\/draw2d\/draw2dimg\"\n\t\"github.com\/llgcode\/draw2d\"\n\t\"github.com\/llgcode\/draw2d\/draw2dpdf\"\n\t\"image\"\n\t\"image\/color\"\n\t\"image\/png\"\n\t\"io\"\n\t\"math\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\t\"strconv\"\n)\n\nfunc init() {\n\thttp.HandleFunc(\"\/png\", handler)\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(\".\/static\")))\n}\n\ntype pt struct {\n\tw float64\n\td float64\n\td_in float64\n}\n\nfunc computeD(r float64, R float64, D float64, phi_deg int, theta float64) float64 {\n\tphi := float64(phi_deg) \/ 360.0 * 2*math.Pi\n\tx_disp := (r*math.Sin(theta))\n\td := D\n\tif math.Abs(x_disp) < R {\n\t\td -= (math.Sqrt(R*R - x_disp*x_disp))\n\t}\n\t\n\tif phi_deg != 90 {\n\t\td += (r - r*math.Cos(theta)) \/ math.Tan(phi)\n\t}\n\treturn d\n}\n\nfunc computeIntersection(r float64, t float64, R float64, D float64, phi_deg int) (pts []pt) {\t\n\tfor w := float64(0.0); w < r*2*math.Pi; w += 0.025 {\n\t\ttheta := float64(w\/r);\n\t\td_out := computeD(r, R, D, phi_deg, theta);\n\t\td_in := computeD(r - t, R, D, phi_deg, theta);\n\t\tpts = append(pts, pt{w, d_out, d_in})\n\t}\n\treturn pts\n}\n\nfunc dumpText(writer http.ResponseWriter, pts []pt) {\n\tfor _, p := range pts {\n\t\tio.WriteString(writer, fmt.Sprintf(\"%f %f\\n\", p.w, p.d));\n\t}\n}\n\nfunc DrawPattern(gc draw2d.GraphicContext,\n\tr float64,\n\tR float64,\n\tD float64,\n\tphi_deg int,\n\tpts []pt,\n\tscale float64) {\n\tgc.SetStrokeColor(color.Gray{0x00})\n\tgc.SetLineWidth(0.02 * scale)\n\n\t\/\/ outside profile:\n\tgc.MoveTo(0, 0)\n\tfor _,p := range pts {\n\t\tgc.LineTo(p.d * scale, p.w * scale)\n\t}\n\tgc.Stroke()\n\n\t\/\/ inside profile:\n\tgc.SetStrokeColor(color.Gray{0xaa})\n\tgc.MoveTo(0, 0)\n\tfor _,p := range pts {\n\t\tgc.LineTo(p.d_in * scale, p.w * scale)\n\t}\n\tgc.Stroke()\n\n\n\t\/\/ quarter rotation lines:\n\tgc.SetStrokeColor(color.Gray{0x00})\n\tfor i := float64(1.0); i <= 4; i++ {\n\t\ttheta := float64(math.Pi*i\/2.0)\n\t\tquarter_d := computeD(r, R, D, int(phi_deg), theta)\n\t\tquarter_w := theta*r\n\t\tgc.MoveTo(0, quarter_w * scale)\n\t\tgc.LineTo(quarter_d * scale, quarter_w * scale)\n\t\t\n\t}\n\tgc.Stroke()\n}\n\nfunc dumpPng(\n\twriter http.ResponseWriter,\n\tr float64,\n\tR float64,\n\tD float64,\n\tphi_deg int,\n\tpts []pt) {\n\n\tdest := image.NewRGBA(image.Rect(0, 0, 1000, 1000))\n\tgc := draw2dimg.NewGraphicContext(dest)\n\n\tDrawPattern(gc,r,R,D,phi_deg,pts, 100.0)\n\n\t\/\/ text output does not appear to be portable across PNG\/PDF\n\tdraw2d.SetFontFolder(\".\")\n\tgc.SetFontData(draw2d.FontData{\"go\", draw2d.FontFamilyMono, draw2d.FontStyleBold})\n\tgc.SetFontSize(12)\n\tgc.SetFillColor(color.Gray{0x00})\n\tgc.FillStringAt(fmt.Sprintf(\"Edge of pattern is %0.2fin from center of tube\", D),\n\t\t10, 30)\n\tgc.Fill()\n\n\tpng.Encode(writer, dest)\n}\n\nfunc dumpPdf(\n\twriter http.ResponseWriter,\n\tr float64,\n\tR float64,\n\tD float64,\n\tphi_deg int,\n\tpts []pt) {\n\t\n\tdest := draw2dpdf.NewPdf(\"L\", \"in\", \"Letter\")\n\tgc := draw2dpdf.NewGraphicContext(dest)\n\n\tdest.SetFont(\"Courier\", \"B\", 12)\n\tdest.Cell(0.5, 0.5, fmt.Sprintf(\"Edge of pattern is %0.2fin from center of tube\", D))\n\n\tDrawPattern(gc,r,R,D,phi_deg,pts,1.0)\n\te := dest.Output(writer)\n\tif e != nil {\n\t\tfmt.Fprint(writer, \"Error: \", e);\n\t}\n}\n\nfunc handler(writer http.ResponseWriter, req *http.Request) {\n\tif (req.ParseForm() != nil) {\n\t\tfmt.Fprint(writer, \"Error parsing request\")\n\t\treturn\n\t}\n\n\tR,_ := strconv.ParseFloat(req.FormValue(\"R\"), 64)\n\tr,_ := strconv.ParseFloat(req.FormValue(\"r\"), 64)\n\tphi_deg,_ := strconv.ParseFloat(req.FormValue(\"phi\"), 64)\n\tthick,_ := strconv.ParseFloat(req.FormValue(\"t\"), 64);\n\tformat := req.FormValue(\"f\");\n\n\tR = R\/2;\n\tr = r\/2;\n\t\n\tD := math.Trunc(R + 4)\n\tpts := computeIntersection(r, thick, R, D, int(phi_deg))\n\n\tif format == \"text\" {\n\t\tdumpText(writer, pts)\n\t} else if format == \"png\" {\n\t\tdumpPng(writer, r, R, D, int(phi_deg), pts)\n\t} else if format == \"pdf\" {\n\t\tdumpPdf(writer, r, R, D, int(phi_deg), pts)\n\t}\n\n}\n<commit_msg>Mime types<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/llgcode\/draw2d\/draw2dimg\"\n\t\"github.com\/llgcode\/draw2d\"\n\t\"github.com\/llgcode\/draw2d\/draw2dpdf\"\n\t\"image\"\n\t\"image\/color\"\n\t\"image\/png\"\n\t\"io\"\n\t\"math\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\t\"strconv\"\n)\n\nfunc init() {\n\thttp.HandleFunc(\"\/png\", handler)\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(\".\/static\")))\n}\n\ntype pt struct {\n\tw float64\n\td float64\n\td_in float64\n}\n\nfunc computeD(r float64, R float64, D float64, phi_deg int, theta float64) float64 {\n\tphi := float64(phi_deg) \/ 360.0 * 2*math.Pi\n\tx_disp := (r*math.Sin(theta))\n\td := D\n\tif math.Abs(x_disp) < R {\n\t\td -= (math.Sqrt(R*R - x_disp*x_disp))\n\t}\n\t\n\tif phi_deg != 90 {\n\t\td += (r - r*math.Cos(theta)) \/ math.Tan(phi)\n\t}\n\treturn d\n}\n\nfunc computeIntersection(r float64, t float64, R float64, D float64, phi_deg int) (pts []pt) {\t\n\tfor w := float64(0.0); w < r*2*math.Pi; w += 0.025 {\n\t\ttheta := float64(w\/r);\n\t\td_out := computeD(r, R, D, phi_deg, theta);\n\t\td_in := computeD(r - t, R, D, phi_deg, theta);\n\t\tpts = append(pts, pt{w, d_out, d_in})\n\t}\n\treturn pts\n}\n\nfunc dumpText(writer http.ResponseWriter, pts []pt) {\n\tfor _, p := range pts {\n\t\tio.WriteString(writer, fmt.Sprintf(\"%f %f\\n\", p.w, p.d));\n\t}\n}\n\nfunc DrawPattern(gc draw2d.GraphicContext,\n\tr float64,\n\tR float64,\n\tD float64,\n\tphi_deg int,\n\tpts []pt,\n\tscale float64) {\n\tgc.SetStrokeColor(color.Gray{0x00})\n\tgc.SetLineWidth(0.02 * scale)\n\n\t\/\/ outside profile:\n\tgc.MoveTo(0, 0)\n\tfor _,p := range pts {\n\t\tgc.LineTo(p.d * scale, p.w * scale)\n\t}\n\tgc.Stroke()\n\n\t\/\/ inside profile:\n\tgc.SetStrokeColor(color.Gray{0xaa})\n\tgc.MoveTo(0, 0)\n\tfor _,p := range pts {\n\t\tgc.LineTo(p.d_in * scale, p.w * scale)\n\t}\n\tgc.Stroke()\n\n\n\t\/\/ quarter rotation lines:\n\tgc.SetStrokeColor(color.Gray{0x00})\n\tfor i := float64(1.0); i <= 4; i++ {\n\t\ttheta := float64(math.Pi*i\/2.0)\n\t\tquarter_d := computeD(r, R, D, int(phi_deg), theta)\n\t\tquarter_w := theta*r\n\t\tgc.MoveTo(0, quarter_w * scale)\n\t\tgc.LineTo(quarter_d * scale, quarter_w * scale)\n\t\t\n\t}\n\tgc.Stroke()\n}\n\nfunc dumpPng(\n\twriter http.ResponseWriter,\n\tr float64,\n\tR float64,\n\tD float64,\n\tphi_deg int,\n\tpts []pt) {\n\n\tdest := image.NewRGBA(image.Rect(0, 0, 1000, 1000))\n\tgc := draw2dimg.NewGraphicContext(dest)\n\n\tDrawPattern(gc,r,R,D,phi_deg,pts, 100.0)\n\n\t\/\/ text output does not appear to be portable across PNG\/PDF\n\tdraw2d.SetFontFolder(\".\")\n\tgc.SetFontData(draw2d.FontData{\"go\", draw2d.FontFamilyMono, draw2d.FontStyleBold})\n\tgc.SetFontSize(12)\n\tgc.SetFillColor(color.Gray{0x00})\n\tgc.FillStringAt(fmt.Sprintf(\"Edge of pattern is %0.2fin from center of tube\", D),\n\t\t10, 30)\n\tgc.Fill()\n\n\tpng.Encode(writer, dest)\n}\n\nfunc dumpPdf(\n\twriter http.ResponseWriter,\n\tr float64,\n\tR float64,\n\tD float64,\n\tphi_deg int,\n\tpts []pt) {\n\t\n\tdest := draw2dpdf.NewPdf(\"L\", \"in\", \"Letter\")\n\tgc := draw2dpdf.NewGraphicContext(dest)\n\n\tdest.SetFont(\"Courier\", \"B\", 12)\n\tdest.Cell(0.5, 0.5, fmt.Sprintf(\"Edge of pattern is %0.2fin from center of tube\", D))\n\n\tDrawPattern(gc,r,R,D,phi_deg,pts,1.0)\n\te := dest.Output(writer)\n\tif e != nil {\n\t\tfmt.Fprint(writer, \"Error: \", e);\n\t}\n}\n\nfunc handler(writer http.ResponseWriter, req *http.Request) {\n\tif (req.ParseForm() != nil) {\n\t\tfmt.Fprint(writer, \"Error parsing request\")\n\t\treturn\n\t}\n\n\tR,_ := strconv.ParseFloat(req.FormValue(\"R\"), 64)\n\tr,_ := strconv.ParseFloat(req.FormValue(\"r\"), 64)\n\tphi_deg,_ := strconv.ParseFloat(req.FormValue(\"phi\"), 64)\n\tthick,_ := strconv.ParseFloat(req.FormValue(\"t\"), 64);\n\tformat := req.FormValue(\"f\");\n\n\tR = R\/2;\n\tr = r\/2;\n\t\n\tD := math.Trunc(R + 4)\n\tpts := computeIntersection(r, thick, R, D, int(phi_deg))\n\n\tif format == \"text\" {\n\t\twriter.Header().Set(\"Content-type\", \"text\/plain\")\n\t\tdumpText(writer, pts)\n\t} else if format == \"png\" {\n\t\twriter.Header().Set(\"Content-type\", \"image\/png\")\n\t\tdumpPng(writer, r, R, D, int(phi_deg), pts)\n\t} else if format == \"pdf\" {\n\t\twriter.Header().Set(\"Content-type\", \"application\/pdf\")\n\t\tdumpPdf(writer, r, R, D, int(phi_deg), pts)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage backup\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/jacobsa\/comeback\/blob\"\n\t\"github.com\/jacobsa\/comeback\/fs\"\n\t\"github.com\/jacobsa\/comeback\/repr\"\n\t\"log\"\n\t\"path\"\n\t\"regexp\"\n)\n\n\/\/ An object that knows how to save directories to some underlying storage.\ntype DirectorySaver interface {\n\t\/\/ Recursively save the contents of the supplied directory (defined by a base\n\t\/\/ path of a backup and a relative path within the backup) to the underlying\n\t\/\/ storage, returning the score of a blob representing the directory's\n\t\/\/ listing in a format that can be recovered with repr.Unmarshal.\n\t\/\/\n\t\/\/ Recursively exclude from backup relative paths that match any of the\n\t\/\/ supplied exclusion regexps. This is *not* tested against the initial\n\t\/\/ relative path.\n\tSave(basePath, relPath string, exclusions []*regexp.Regexp) (blob.Score, error)\n}\n\n\/\/ A directory saver that creates a new directory saver for each call to Save.\n\/\/ This breaks a self-dependency that would be needed to make use of\n\/\/ NewNonRecursiveDirectorySaver.\ntype onDemandDirSaver struct {\n\tcreateSaver func(wrapped DirectorySaver) DirectorySaver\n}\n\nfunc (s *onDemandDirSaver) Save(\n\tbasePath,\n\trelPath string,\n\texclusions []*regexp.Regexp) (\n\tscore blob.Score,\n\terr error) {\n\treturn s.createSaver(s).Save(basePath, relPath, exclusions)\n}\n\n\/\/ Return a directory saver that makes use of the supplied dependencies.\nfunc NewDirectorySaver(\n\tblobStore blob.Store,\n\tfileSystem fs.FileSystem,\n\tfileSaver FileSaver) (DirectorySaver, error) {\n\tlinkResolver := NewLinkResolver()\n\tcreateSaver := func(wrapped DirectorySaver) DirectorySaver {\n\t\tsaver, err := NewNonRecursiveDirectorySaver(\n\t\t\tblobStore,\n\t\t\tfileSystem,\n\t\t\tfileSaver,\n\t\t\twrapped,\n\t\t\tlinkResolver)\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\treturn saver\n\t}\n\n\treturn &onDemandDirSaver{createSaver}, nil\n}\n\n\/\/ Equivalent to NewDirectorySaver, but with an injectable wrapped directory\n\/\/ saver and link resolver to aid with testability. You should not use this\n\/\/ function.\nfunc NewNonRecursiveDirectorySaver(\n\tstore blob.Store,\n\tfileSystem fs.FileSystem,\n\tfileSaver FileSaver,\n\twrapped DirectorySaver,\n\tlinkResolver LinkResolver) (DirectorySaver, error) {\n\treturn &dirSaver{\n\t\tblobStore: store,\n\t\tfileSystem: fileSystem,\n\t\tfileSaver: fileSaver,\n\t\twrapped: wrapped,\n\t\tlinkResolver: linkResolver,\n\t}, nil\n}\n\ntype dirSaver struct {\n\tblobStore blob.Store\n\tfileSystem fs.FileSystem\n\tfileSaver FileSaver\n\twrapped DirectorySaver\n\tlinkResolver LinkResolver\n}\n\nfunc (s *dirSaver) saveDir(\n\tbasePath string,\n\trelPath string,\n\texclusions []*regexp.Regexp,\n\tentry *fs.DirectoryEntry) (\n\t[]blob.Score,\n\terror) {\n\t\/\/ Recurse.\n\tscore, err := s.wrapped.Save(\n\t\tbasePath,\n\t\tpath.Join(relPath, entry.Name),\n\t\texclusions)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn []blob.Score{score}, nil\n}\n\nfunc (s *dirSaver) saveFile(parent string, entry *fs.DirectoryEntry) ([]blob.Score, error) {\n\t\/\/ Defer to the file saver.\n\treturn s.fileSaver.Save(path.Join(parent, entry.Name))\n}\n\nfunc shouldExclude(exclusions []*regexp.Regexp, relPath string) bool {\n\tfor _, re := range exclusions {\n\t\tif re.MatchString(relPath) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (s *dirSaver) Save(\n\tbasePath,\n\trelPath string,\n\texclusions []*regexp.Regexp) (\n\tscore blob.Score,\n\terr error) {\n\tdirpath := path.Join(basePath, relPath)\n\n\t\/\/ Grab a listing for the directory.\n\tentries, err := s.fileSystem.ReadDir(dirpath)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Listing directory: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Filter the entries according to the list of exclusions.\n\tvar tmp []*fs.DirectoryEntry\n\tfor _, entry := range entries {\n\t\tif !shouldExclude(exclusions, path.Join(relPath, entry.Name)) {\n\t\t\ttmp = append(tmp, entry)\n\t\t}\n\t}\n\n\tentries = tmp\n\n\t\/\/ Save the data for each entry.\n\tfor _, entry := range entries {\n\t\tlog.Println(\"Processing:\", path.Join(relPath, entry.Name))\n\n\t\t\/\/ Call the appropriate method based on this entry's type.\n\t\tswitch entry.Type {\n\t\tcase fs.TypeFile:\n\t\t\t\/\/ Make sure this isn't a hard link to something we've already saved.\n\t\t\tentry.HardLinkTarget = s.linkResolver.Register(\n\t\t\t\tentry.ContainingDevice,\n\t\t\t\tentry.Inode,\n\t\t\t\tpath.Join(relPath, entry.Name))\n\n\t\t\tif entry.HardLinkTarget == nil {\n\t\t\t\tentry.Scores, err = s.saveFile(dirpath, entry)\n\t\t\t}\n\n\t\tcase fs.TypeDirectory:\n\t\t\tentry.Scores, err = s.saveDir(basePath, relPath, exclusions, entry)\n\t\tcase fs.TypeSymlink:\n\t\tcase fs.TypeBlockDevice:\n\t\tcase fs.TypeCharDevice:\n\t\tcase fs.TypeNamedPipe:\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"Unhandled type: %v\", entry.Type)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Create a serialized version of this information.\n\tdata, err := repr.Marshal(entries)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Marshaling: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Store that serialized version.\n\tscore, err = s.blobStore.Store(data)\n\tif err != nil {\n\t\terr = errors.New(\"Storing dir blob: \" + err.Error())\n\t\treturn\n\t}\n\n\treturn\n}\n<commit_msg>Fixed a long line.<commit_after>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage backup\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"path\"\n\t\"regexp\"\n\n\t\"github.com\/jacobsa\/comeback\/blob\"\n\t\"github.com\/jacobsa\/comeback\/fs\"\n\t\"github.com\/jacobsa\/comeback\/repr\"\n)\n\n\/\/ An object that knows how to save directories to some underlying storage.\ntype DirectorySaver interface {\n\t\/\/ Recursively save the contents of the supplied directory (defined by a base\n\t\/\/ path of a backup and a relative path within the backup) to the underlying\n\t\/\/ storage, returning the score of a blob representing the directory's\n\t\/\/ listing in a format that can be recovered with repr.Unmarshal.\n\t\/\/\n\t\/\/ Recursively exclude from backup relative paths that match any of the\n\t\/\/ supplied exclusion regexps. This is *not* tested against the initial\n\t\/\/ relative path.\n\tSave(basePath, relPath string, exclusions []*regexp.Regexp) (blob.Score, error)\n}\n\n\/\/ A directory saver that creates a new directory saver for each call to Save.\n\/\/ This breaks a self-dependency that would be needed to make use of\n\/\/ NewNonRecursiveDirectorySaver.\ntype onDemandDirSaver struct {\n\tcreateSaver func(wrapped DirectorySaver) DirectorySaver\n}\n\nfunc (s *onDemandDirSaver) Save(\n\tbasePath,\n\trelPath string,\n\texclusions []*regexp.Regexp) (\n\tscore blob.Score,\n\terr error) {\n\treturn s.createSaver(s).Save(basePath, relPath, exclusions)\n}\n\n\/\/ Return a directory saver that makes use of the supplied dependencies.\nfunc NewDirectorySaver(\n\tblobStore blob.Store,\n\tfileSystem fs.FileSystem,\n\tfileSaver FileSaver) (DirectorySaver, error) {\n\tlinkResolver := NewLinkResolver()\n\tcreateSaver := func(wrapped DirectorySaver) DirectorySaver {\n\t\tsaver, err := NewNonRecursiveDirectorySaver(\n\t\t\tblobStore,\n\t\t\tfileSystem,\n\t\t\tfileSaver,\n\t\t\twrapped,\n\t\t\tlinkResolver)\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\treturn saver\n\t}\n\n\treturn &onDemandDirSaver{createSaver}, nil\n}\n\n\/\/ Equivalent to NewDirectorySaver, but with an injectable wrapped directory\n\/\/ saver and link resolver to aid with testability. You should not use this\n\/\/ function.\nfunc NewNonRecursiveDirectorySaver(\n\tstore blob.Store,\n\tfileSystem fs.FileSystem,\n\tfileSaver FileSaver,\n\twrapped DirectorySaver,\n\tlinkResolver LinkResolver) (DirectorySaver, error) {\n\treturn &dirSaver{\n\t\tblobStore: store,\n\t\tfileSystem: fileSystem,\n\t\tfileSaver: fileSaver,\n\t\twrapped: wrapped,\n\t\tlinkResolver: linkResolver,\n\t}, nil\n}\n\ntype dirSaver struct {\n\tblobStore blob.Store\n\tfileSystem fs.FileSystem\n\tfileSaver FileSaver\n\twrapped DirectorySaver\n\tlinkResolver LinkResolver\n}\n\nfunc (s *dirSaver) saveDir(\n\tbasePath string,\n\trelPath string,\n\texclusions []*regexp.Regexp,\n\tentry *fs.DirectoryEntry) (\n\t[]blob.Score,\n\terror) {\n\t\/\/ Recurse.\n\tscore, err := s.wrapped.Save(\n\t\tbasePath,\n\t\tpath.Join(relPath, entry.Name),\n\t\texclusions)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn []blob.Score{score}, nil\n}\n\nfunc (s *dirSaver) saveFile(\n\tparent string,\n\tentry *fs.DirectoryEntry) ([]blob.Score, error) {\n\t\/\/ Defer to the file saver.\n\treturn s.fileSaver.Save(path.Join(parent, entry.Name))\n}\n\nfunc shouldExclude(exclusions []*regexp.Regexp, relPath string) bool {\n\tfor _, re := range exclusions {\n\t\tif re.MatchString(relPath) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (s *dirSaver) Save(\n\tbasePath,\n\trelPath string,\n\texclusions []*regexp.Regexp) (\n\tscore blob.Score,\n\terr error) {\n\tdirpath := path.Join(basePath, relPath)\n\n\t\/\/ Grab a listing for the directory.\n\tentries, err := s.fileSystem.ReadDir(dirpath)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Listing directory: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Filter the entries according to the list of exclusions.\n\tvar tmp []*fs.DirectoryEntry\n\tfor _, entry := range entries {\n\t\tif !shouldExclude(exclusions, path.Join(relPath, entry.Name)) {\n\t\t\ttmp = append(tmp, entry)\n\t\t}\n\t}\n\n\tentries = tmp\n\n\t\/\/ Save the data for each entry.\n\tfor _, entry := range entries {\n\t\tlog.Println(\"Processing:\", path.Join(relPath, entry.Name))\n\n\t\t\/\/ Call the appropriate method based on this entry's type.\n\t\tswitch entry.Type {\n\t\tcase fs.TypeFile:\n\t\t\t\/\/ Make sure this isn't a hard link to something we've already saved.\n\t\t\tentry.HardLinkTarget = s.linkResolver.Register(\n\t\t\t\tentry.ContainingDevice,\n\t\t\t\tentry.Inode,\n\t\t\t\tpath.Join(relPath, entry.Name))\n\n\t\t\tif entry.HardLinkTarget == nil {\n\t\t\t\tentry.Scores, err = s.saveFile(dirpath, entry)\n\t\t\t}\n\n\t\tcase fs.TypeDirectory:\n\t\t\tentry.Scores, err = s.saveDir(basePath, relPath, exclusions, entry)\n\t\tcase fs.TypeSymlink:\n\t\tcase fs.TypeBlockDevice:\n\t\tcase fs.TypeCharDevice:\n\t\tcase fs.TypeNamedPipe:\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"Unhandled type: %v\", entry.Type)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Create a serialized version of this information.\n\tdata, err := repr.Marshal(entries)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Marshaling: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Store that serialized version.\n\tscore, err = s.blobStore.Store(data)\n\tif err != nil {\n\t\terr = errors.New(\"Storing dir blob: \" + err.Error())\n\t\treturn\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2014 Couchbase, Inc.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the\n\/\/ License. You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an \"AS\n\/\/ IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n\/\/ express or implied. See the License for the specific language\n\/\/ governing permissions and limitations under the License.\n\npackage main\n\nimport (\n\t\"expvar\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n\n\t\"github.com\/blevesearch\/bleve\"\n\tbleveHttp \"github.com\/blevesearch\/bleve\/http\"\n\tbleveRegistry \"github.com\/blevesearch\/bleve\/registry\"\n\n\tlog \"github.com\/couchbase\/clog\"\n\t\"github.com\/couchbase\/go-couchbase\"\n\t\"github.com\/couchbaselabs\/cbft\"\n)\n\nvar VERSION = \"0.0.0\"\n\nvar bindAddr = flag.String(\"addr\", \"localhost:8095\",\n\t\"\\n\\thttp listen address:port\")\nvar dataDir = flag.String(\"dataDir\", \"data\",\n\t\"\\n\\tdirectory path where index data and\\n\"+\n\t\t\"\\tlocal configuration files will be stored\")\nvar logFlags = flag.String(\"logFlags\", \"\",\n\t\"\\n\\tcomma-separated logging control flags\")\nvar staticDir = flag.String(\"staticDir\", \"static\",\n\t\"\\n\\tdirectory for static web UI content\")\nvar staticETag = flag.String(\"staticETag\", \"\",\n\t\"\\n\\tstatic etag value\")\nvar server = flag.String(\"server\", \"\",\n\t\"\\n\\turl to datasource server;\\n\"+\n\t\t\"\\texample for couchbase: http:\/\/localhost:8091\")\nvar tags = flag.String(\"tags\", \"\",\n\t\"\\n\\tcomma-separated list of tags (or roles) for this node\")\nvar container = flag.String(\"container\", \"\",\n\t\"\\n\\tslash separated path of parent containers for this node,\\n\"+\n\t\t\"\\tfor shelf\/rack\/row\/zone awareness\")\nvar weight = flag.Int(\"weight\", 1,\n\t\"\\n\\tweight of this node (a more capable node has higher weight)\")\nvar register = flag.String(\"register\", \"wanted\",\n\t\"\\n\\tregister this node as wanted, wantedForce,\\n\\t\"+\n\t\t\" known, knownForce,\"+\n\t\t\" unwanted, unknown or unchanged\")\nvar cfgConnect = flag.String(\"cfgConnect\", \"simple\",\n\t\"\\n\\tconnection string\/info to configuration provider\")\n\nvar expvars = expvar.NewMap(\"stats\")\n\nfunc init() {\n\texpvars.Set(\"indexes\", bleveHttp.IndexStats())\n\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr,\n\t\t\t\"%s: couchbase full-text server\\n\\n\", os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"more information is available at:\\n\"+\n\t\t\t\" http:\/\/github.com\/couchbaselabs\/cbft\\n\\n\")\n\t\tfmt.Fprintf(os.Stderr, \"usage:\\n %s [flags]\\n\\n\", os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"flags:\\n\")\n\t\tflag.PrintDefaults()\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tlog.Printf(\"main: %s started (%s\/%s)\", os.Args[0], VERSION, cbft.VERSION)\n\n\trand.Seed(time.Now().UTC().UnixNano())\n\n\tgo dumpOnSignalForPlatform()\n\n\tmr, err := cbft.NewMsgRing(os.Stderr, 1000)\n\tif err != nil {\n\t\tlog.Fatalf(\"main: could not create MsgRing, err: %v\", err)\n\t}\n\tlog.SetOutput(mr)\n\n\tMainWelcome()\n\n\t\/\/ TODO: If cfg goes down, should we stop? How do we reconnect?\n\t\/\/\n\tcfg, err := MainCfg(*cfgConnect, *dataDir)\n\tif err != nil {\n\t\tlog.Fatalf(\"main: could not start cfg, cfgConnect: %s, err: %v\",\n\t\t\t*cfgConnect, err)\n\t\treturn\n\t}\n\n\tuuid, err := MainUUID(*dataDir)\n\tif err != nil {\n\t\tlog.Fatalf(fmt.Sprintf(\"%v\", err))\n\t\treturn\n\t}\n\n\tvar tagsArr []string\n\tif *tags != \"\" {\n\t\ttagsArr = strings.Split(*tags, \",\")\n\t}\n\n\trouter, err := MainStart(cfg, uuid, tagsArr, *container, *weight,\n\t\t*bindAddr, *dataDir, *staticDir, *staticETag, *server, *register, mr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\thttp.Handle(\"\/\", router)\n\tlog.Printf(\"main: listening on: %v\", *bindAddr)\n\tlog.Fatal(http.ListenAndServe(*bindAddr, nil))\n}\n\nfunc MainWelcome() {\n\tif *logFlags != \"\" {\n\t\tlog.ParseLogFlag(*logFlags)\n\t}\n\tflag.VisitAll(func(f *flag.Flag) {\n\t\tlog.Printf(\" -%s=%s\\n\", f.Name, f.Value)\n\t})\n\tlog.Printf(\" GOMAXPROCS=%d\", runtime.GOMAXPROCS(-1))\n\n\tlog.Printf(\"main: registered bleve stores\")\n\ttypes, instances := bleveRegistry.KVStoreTypesAndInstances()\n\tfor _, s := range types {\n\t\tlog.Printf(\" %s\", s)\n\t}\n\tfor _, s := range instances {\n\t\tlog.Printf(\" %s\", s)\n\t}\n}\n\nfunc MainUUID(dataDir string) (string, error) {\n\tuuid := cbft.NewUUID()\n\tuuidPath := dataDir + string(os.PathSeparator) + \"cbft.uuid\"\n\tuuidBuf, err := ioutil.ReadFile(uuidPath)\n\tif err == nil {\n\t\tuuid = strings.TrimSpace(string(uuidBuf))\n\t\tif uuid == \"\" {\n\t\t\treturn \"\", fmt.Errorf(\"error: could not parse uuidPath: %s\",\n\t\t\t\tuuidPath)\n\t\t}\n\t\tlog.Printf(\"main: manager uuid: %s\", uuid)\n\t\tlog.Printf(\"main: manager uuid was reloaded\")\n\t} else {\n\t\tlog.Printf(\"main: manager uuid: %s\", uuid)\n\t\tlog.Printf(\"main: manager uuid was generated\")\n\t}\n\terr = ioutil.WriteFile(uuidPath, []byte(uuid), 0600)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error: could not write uuidPath: %s\", uuidPath)\n\t}\n\treturn uuid, nil\n}\n\nfunc MainStart(cfg cbft.Cfg, uuid string, tags []string, container string,\n\tweight int, bindAddr, dataDir, staticDir, staticETag, server string,\n\tregister string, mr *cbft.MsgRing) (\n\t*mux.Router, error) {\n\tif server == \"\" {\n\t\treturn nil, fmt.Errorf(\"error: server URL required (-server)\")\n\t}\n\n\t_, err := couchbase.Connect(server)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error: could not connect to server,\"+\n\t\t\t\" URL: %s, err: %v\",\n\t\t\tserver, err)\n\t}\n\n\tmgr := cbft.NewManager(cbft.VERSION, cfg, uuid, tags, container, weight,\n\t\tbindAddr, dataDir, server, &MainHandlers{})\n\terr = mgr.Start(register)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trouter, _, err :=\n\t\tcbft.NewManagerRESTRouter(VERSION, mgr, staticDir, staticETag, mr)\n\n\treturn router, err\n}\n\ntype MainHandlers struct{}\n\nfunc (meh *MainHandlers) OnRegisterPIndex(pindex *cbft.PIndex) {\n\tbindex, ok := pindex.Impl.(bleve.Index)\n\tif ok {\n\t\tbleveHttp.RegisterIndexName(pindex.Name, bindex)\n\t}\n}\n\nfunc (meh *MainHandlers) OnUnregisterPIndex(pindex *cbft.PIndex) {\n\tbleveHttp.UnregisterIndexByName(pindex.Name)\n}\n\nfunc dumpOnSignal(signals ...os.Signal) {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, signals...)\n\tfor _ = range c {\n\t\tlog.Printf(\"dump: goroutine...\")\n\t\tpprof.Lookup(\"goroutine\").WriteTo(os.Stderr, 1)\n\t\tlog.Printf(\"dump: heap...\")\n\t\tpprof.Lookup(\"heap\").WriteTo(os.Stderr, 1)\n\t}\n}\n<commit_msg>use spaces instead of tabs in usage\/help<commit_after>\/\/ Copyright (c) 2014 Couchbase, Inc.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the\n\/\/ License. You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an \"AS\n\/\/ IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n\/\/ express or implied. See the License for the specific language\n\/\/ governing permissions and limitations under the License.\n\npackage main\n\nimport (\n\t\"expvar\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n\n\t\"github.com\/blevesearch\/bleve\"\n\tbleveHttp \"github.com\/blevesearch\/bleve\/http\"\n\tbleveRegistry \"github.com\/blevesearch\/bleve\/registry\"\n\n\tlog \"github.com\/couchbase\/clog\"\n\t\"github.com\/couchbase\/go-couchbase\"\n\t\"github.com\/couchbaselabs\/cbft\"\n)\n\nvar VERSION = \"0.0.0\"\n\nvar bindAddr = flag.String(\"addr\", \"localhost:8095\",\n\tadesc(\"http listen address:port\"))\nvar dataDir = flag.String(\"dataDir\", \"data\",\n\tadesc(\"directory path where index data and\"+\n\t\t\"\\nlocal configuration files will be stored\"))\nvar logFlags = flag.String(\"logFlags\", \"\",\n\tadesc(\"comma-separated logging control flags\"))\nvar staticDir = flag.String(\"staticDir\", \"static\",\n\tadesc(\"directory for static web UI content\"))\nvar staticETag = flag.String(\"staticETag\", \"\",\n\tadesc(\"static etag value\"))\nvar server = flag.String(\"server\", \"\",\n\tadesc(\"url to datasource server;\"+\n\t\t\"\\nexample for couchbase: http:\/\/localhost:8091\"))\nvar tags = flag.String(\"tags\", \"\",\n\tadesc(\"comma-separated list of tags (or roles) for this node\"))\nvar container = flag.String(\"container\", \"\",\n\tadesc(\"slash separated path of parent containers for this node,\"+\n\t\t\"\\nfor shelf\/rack\/row\/zone awareness\"))\nvar weight = flag.Int(\"weight\", 1,\n\tadesc(\"weight of this node (a more capable node has higher weight)\"))\nvar register = flag.String(\"register\", \"wanted\",\n\tadesc(\"register this node as wanted, wantedForce,\"+\n\t\t\"\\nknown, knownForce, unwanted, unknown or unchanged\"))\nvar cfgConnect = flag.String(\"cfgConnect\", \"simple\",\n\tadesc(\"connection string\/info to configuration provider\"))\n\nvar expvars = expvar.NewMap(\"stats\")\n\nfunc init() {\n\texpvars.Set(\"indexes\", bleveHttp.IndexStats())\n\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr,\n\t\t\t\"%s: couchbase full-text server\\n\\n\", os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"more information is available at:\\n\"+\n\t\t\t\" http:\/\/github.com\/couchbaselabs\/cbft\\n\\n\")\n\t\tfmt.Fprintf(os.Stderr, \"usage:\\n %s [flags]\\n\\n\", os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"flags:\\n\")\n\t\tflag.PrintDefaults()\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tlog.Printf(\"main: %s started (%s\/%s)\", os.Args[0], VERSION, cbft.VERSION)\n\n\trand.Seed(time.Now().UTC().UnixNano())\n\n\tgo dumpOnSignalForPlatform()\n\n\tmr, err := cbft.NewMsgRing(os.Stderr, 1000)\n\tif err != nil {\n\t\tlog.Fatalf(\"main: could not create MsgRing, err: %v\", err)\n\t}\n\tlog.SetOutput(mr)\n\n\tMainWelcome()\n\n\t\/\/ TODO: If cfg goes down, should we stop? How do we reconnect?\n\t\/\/\n\tcfg, err := MainCfg(*cfgConnect, *dataDir)\n\tif err != nil {\n\t\tlog.Fatalf(\"main: could not start cfg, cfgConnect: %s, err: %v\",\n\t\t\t*cfgConnect, err)\n\t\treturn\n\t}\n\n\tuuid, err := MainUUID(*dataDir)\n\tif err != nil {\n\t\tlog.Fatalf(fmt.Sprintf(\"%v\", err))\n\t\treturn\n\t}\n\n\tvar tagsArr []string\n\tif *tags != \"\" {\n\t\ttagsArr = strings.Split(*tags, \",\")\n\t}\n\n\trouter, err := MainStart(cfg, uuid, tagsArr, *container, *weight,\n\t\t*bindAddr, *dataDir, *staticDir, *staticETag, *server, *register, mr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\thttp.Handle(\"\/\", router)\n\tlog.Printf(\"main: listening on: %v\", *bindAddr)\n\tlog.Fatal(http.ListenAndServe(*bindAddr, nil))\n}\n\nfunc MainWelcome() {\n\tif *logFlags != \"\" {\n\t\tlog.ParseLogFlag(*logFlags)\n\t}\n\tflag.VisitAll(func(f *flag.Flag) {\n\t\tlog.Printf(\" -%s=%s\\n\", f.Name, f.Value)\n\t})\n\tlog.Printf(\" GOMAXPROCS=%d\", runtime.GOMAXPROCS(-1))\n\n\tlog.Printf(\"main: registered bleve stores\")\n\ttypes, instances := bleveRegistry.KVStoreTypesAndInstances()\n\tfor _, s := range types {\n\t\tlog.Printf(\" %s\", s)\n\t}\n\tfor _, s := range instances {\n\t\tlog.Printf(\" %s\", s)\n\t}\n}\n\nfunc MainUUID(dataDir string) (string, error) {\n\tuuid := cbft.NewUUID()\n\tuuidPath := dataDir + string(os.PathSeparator) + \"cbft.uuid\"\n\tuuidBuf, err := ioutil.ReadFile(uuidPath)\n\tif err == nil {\n\t\tuuid = strings.TrimSpace(string(uuidBuf))\n\t\tif uuid == \"\" {\n\t\t\treturn \"\", fmt.Errorf(\"error: could not parse uuidPath: %s\",\n\t\t\t\tuuidPath)\n\t\t}\n\t\tlog.Printf(\"main: manager uuid: %s\", uuid)\n\t\tlog.Printf(\"main: manager uuid was reloaded\")\n\t} else {\n\t\tlog.Printf(\"main: manager uuid: %s\", uuid)\n\t\tlog.Printf(\"main: manager uuid was generated\")\n\t}\n\terr = ioutil.WriteFile(uuidPath, []byte(uuid), 0600)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error: could not write uuidPath: %s\", uuidPath)\n\t}\n\treturn uuid, nil\n}\n\nfunc MainStart(cfg cbft.Cfg, uuid string, tags []string, container string,\n\tweight int, bindAddr, dataDir, staticDir, staticETag, server string,\n\tregister string, mr *cbft.MsgRing) (\n\t*mux.Router, error) {\n\tif server == \"\" {\n\t\treturn nil, fmt.Errorf(\"error: server URL required (-server)\")\n\t}\n\n\t_, err := couchbase.Connect(server)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error: could not connect to server,\"+\n\t\t\t\" URL: %s, err: %v\",\n\t\t\tserver, err)\n\t}\n\n\tmgr := cbft.NewManager(cbft.VERSION, cfg, uuid, tags, container, weight,\n\t\tbindAddr, dataDir, server, &MainHandlers{})\n\terr = mgr.Start(register)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trouter, _, err :=\n\t\tcbft.NewManagerRESTRouter(VERSION, mgr, staticDir, staticETag, mr)\n\n\treturn router, err\n}\n\ntype MainHandlers struct{}\n\nfunc (meh *MainHandlers) OnRegisterPIndex(pindex *cbft.PIndex) {\n\tbindex, ok := pindex.Impl.(bleve.Index)\n\tif ok {\n\t\tbleveHttp.RegisterIndexName(pindex.Name, bindex)\n\t}\n}\n\nfunc (meh *MainHandlers) OnUnregisterPIndex(pindex *cbft.PIndex) {\n\tbleveHttp.UnregisterIndexByName(pindex.Name)\n}\n\nfunc dumpOnSignal(signals ...os.Signal) {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, signals...)\n\tfor _ = range c {\n\t\tlog.Printf(\"dump: goroutine...\")\n\t\tpprof.Lookup(\"goroutine\").WriteTo(os.Stderr, 1)\n\t\tlog.Printf(\"dump: heap...\")\n\t\tpprof.Lookup(\"heap\").WriteTo(os.Stderr, 1)\n\t}\n}\n\nfunc adesc(s string) string {\n\treturn \"\\n \" + strings.Replace(s, \"\\n\", \"\\n \", -1)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Vulcan Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/digitalocean\/vulcan\/forwarder\"\n\t\"github.com\/digitalocean\/vulcan\/kafka\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\/promhttp\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ Forwarder handles parsing the command line options, initializes, and starts the\n\/\/ forwarder service accordingling. It is the entry point for the forwarder\n\/\/ service.\nfunc Forwarder() *cobra.Command {\n\tf := &cobra.Command{\n\t\tUse: \"forwarder\",\n\t\tShort: \"forwards metric received from prometheus to message bus\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tvar (\n\t\t\t\treg = prometheus.NewRegistry()\n\t\t\t\tsignals = make(chan os.Signal, 1)\n\t\t\t)\n\n\t\t\tsignal.Notify(signals, os.Interrupt, syscall.SIGTERM)\n\n\t\t\t\/\/ create upstream kafka writer to receive data\n\t\t\tw, err := kafka.NewWriter(&kafka.WriterConfig{\n\t\t\t\tClientID: viper.GetString(flagKafkaClientID),\n\t\t\t\tTopic: viper.GetString(flagKafkaTopic),\n\t\t\t\tAddrs: strings.Split(viper.GetString(flagKafkaAddrs), \",\"),\n\t\t\t\tTrackWrites: viper.GetBool(flagKafkaTrackWrites),\n\t\t\t\tBatchSize: viper.GetInt(flagKafkaBatchSize),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\terr = reg.Register(w)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"kafka_client_id\": viper.GetString(flagKafkaClientID),\n\t\t\t\t\"kafka_topic\": viper.GetString(flagKafkaTopic),\n\t\t\t\t\"kafka_addresses\": viper.GetString(flagKafkaAddrs),\n\t\t\t}).Info(\"registered as kafka producer\")\n\n\t\t\tfwd := forwarder.NewForwarder(&forwarder.Config{\n\t\t\t\tWriter: w,\n\t\t\t})\n\n\t\t\t\/\/ listen for signals so can gracefully stop kakfa producer\n\t\t\tgo func() {\n\t\t\t\t<-signals\n\t\t\t\t\/\/ tell forwarder to stop handling in incoming requests\n\t\t\t\tfwd.Stop()\n\t\t\t\t\/\/ tell kafka writer to stop sending out producer messages\n\t\t\t\tw.Stop()\n\t\t\t\tos.Exit(0)\n\t\t\t}()\n\n\t\t\thttp.Handle(\"\/\", forwarder.WriteHandler(fwd, \"snappy\"))\n\t\t\thttp.Handle(viper.GetString(flagTelemetryPath), promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))\n\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"listening_address\": viper.GetString(flagAddress),\n\t\t\t}).Info(\"starting vulcan forwarder http service\")\n\n\t\t\treturn http.ListenAndServe(viper.GetString(flagAddress), nil)\n\t\t},\n\t}\n\n\tf.Flags().String(flagAddress, \":8888\", \"server listening address\")\n\tf.Flags().String(flagKafkaTopic, \"vulcan\", \"kafka topic to write to\")\n\tf.Flags().String(flagKafkaAddrs, \"\", \"one.example.com:9092,two.example.com:9092\")\n\tf.Flags().String(flagKafkaClientID, \"vulcan-forwarder\", \"set the kafka client id\")\n\tf.Flags().String(flagTelemetryPath, \"\/metrics\", \"path under which to expose metrics\")\n\tf.Flags().Bool(flagKafkaTrackWrites, false, \"track kafka writes for metric scraping and logging\")\n\tf.Flags().Int(flagKafkaBatchSize, 65536, \"batch size of each send of kafka producer messages\")\n\n\treturn f\n}\n<commit_msg>forwarder: register forwarder to prometheus (#76)<commit_after>\/\/ Copyright 2016 The Vulcan Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/digitalocean\/vulcan\/forwarder\"\n\t\"github.com\/digitalocean\/vulcan\/kafka\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\/promhttp\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ Forwarder handles parsing the command line options, initializes, and starts the\n\/\/ forwarder service accordingling. It is the entry point for the forwarder\n\/\/ service.\nfunc Forwarder() *cobra.Command {\n\tf := &cobra.Command{\n\t\tUse: \"forwarder\",\n\t\tShort: \"forwards metric received from prometheus to message bus\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tvar (\n\t\t\t\treg = prometheus.NewRegistry()\n\t\t\t\tsignals = make(chan os.Signal, 1)\n\t\t\t)\n\n\t\t\tsignal.Notify(signals, os.Interrupt, syscall.SIGTERM)\n\n\t\t\t\/\/ create upstream kafka writer to receive data\n\t\t\tw, err := kafka.NewWriter(&kafka.WriterConfig{\n\t\t\t\tClientID: viper.GetString(flagKafkaClientID),\n\t\t\t\tTopic: viper.GetString(flagKafkaTopic),\n\t\t\t\tAddrs: strings.Split(viper.GetString(flagKafkaAddrs), \",\"),\n\t\t\t\tTrackWrites: viper.GetBool(flagKafkaTrackWrites),\n\t\t\t\tBatchSize: viper.GetInt(flagKafkaBatchSize),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treg.MustRegister(w)\n\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"kafka_client_id\": viper.GetString(flagKafkaClientID),\n\t\t\t\t\"kafka_topic\": viper.GetString(flagKafkaTopic),\n\t\t\t\t\"kafka_addresses\": viper.GetString(flagKafkaAddrs),\n\t\t\t}).Info(\"registered as kafka producer\")\n\n\t\t\tfwd := forwarder.NewForwarder(&forwarder.Config{\n\t\t\t\tWriter: w,\n\t\t\t})\n\t\t\treg.MustRegister(fwd)\n\n\t\t\t\/\/ listen for signals so can gracefully stop kakfa producer\n\t\t\tgo func() {\n\t\t\t\t<-signals\n\t\t\t\t\/\/ tell forwarder to stop handling in incoming requests\n\t\t\t\tfwd.Stop()\n\t\t\t\t\/\/ tell kafka writer to stop sending out producer messages\n\t\t\t\tw.Stop()\n\t\t\t\tos.Exit(0)\n\t\t\t}()\n\n\t\t\thttp.Handle(\"\/\", forwarder.WriteHandler(fwd, \"snappy\"))\n\t\t\thttp.Handle(viper.GetString(flagTelemetryPath), promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))\n\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"listening_address\": viper.GetString(flagAddress),\n\t\t\t}).Info(\"starting vulcan forwarder http service\")\n\n\t\t\treturn http.ListenAndServe(viper.GetString(flagAddress), nil)\n\t\t},\n\t}\n\n\tf.Flags().String(flagAddress, \":8888\", \"server listening address\")\n\tf.Flags().String(flagKafkaTopic, \"vulcan\", \"kafka topic to write to\")\n\tf.Flags().String(flagKafkaAddrs, \"\", \"one.example.com:9092,two.example.com:9092\")\n\tf.Flags().String(flagKafkaClientID, \"vulcan-forwarder\", \"set the kafka client id\")\n\tf.Flags().String(flagTelemetryPath, \"\/metrics\", \"path under which to expose metrics\")\n\tf.Flags().Bool(flagKafkaTrackWrites, false, \"track kafka writes for metric scraping and logging\")\n\tf.Flags().Int(flagKafkaBatchSize, 65536, \"batch size of each send of kafka producer messages\")\n\n\treturn f\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ lissajous2 solution for task 1.5\npackage main\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\t\"image\/gif\"\n\t\"io\"\n\t\"log\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"time\"\n)\n\nvar palette = []color.Color{color.Black, color.RGBA{0, 128, 0, 255}}\n\nconst (\n\tblackIndex = 0\n\tgreenIndex = 1\n)\n\nfunc main() {\n\trand.Seed(time.Now().UTC().UnixNano())\n\tlissajous(os.Stdout)\n}\n\nfunc lissajous(out io.Writer) {\n\tconst (\n\t\tcycles = 5\n\t\tres = 0.001\n\t\tsize = 200\n\t\tnframes = 64\n\t\tdelay = 8\n\t)\n\tfreq := rand.Float64() * 3.0\n\tanim := gif.GIF{LoopCount: nframes}\n\tphase := 0.0\n\tfor i := 0; i < nframes; i++ {\n\t\trect := image.Rect(0, 0, 2*size+1, 2*size+1)\n\t\timg := image.NewPaletted(rect, palette)\n\t\tfor t := 0.0; t < cycles*2*math.Pi; t += res {\n\t\t\tx := math.Sin(t)\n\t\t\ty := math.Sin(t*freq + phase)\n\t\t\timg.SetColorIndex(size+int(x*size+0.5), size+int(y*size+0.5),\n\t\t\t\tgreenIndex)\n\t\t}\n\t\tphase += 0.1\n\t\tanim.Delay = append(anim.Delay, delay)\n\t\tanim.Image = append(anim.Image, img)\n\t}\n\terr := gif.EncodeAll(out, &anim)\n\tif err != nil {\n\t\tlog.Fatalf(\"error encoding gif image: %v\", err)\n\t}\n}\n<commit_msg>Updated soluton for gif for task 1.6.<commit_after>\/\/ lissajous2 solution for task 1.6\npackage main\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\t\"image\/gif\"\n\t\"io\"\n\t\"log\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"time\"\n)\n\nfunc main() {\n\trand.Seed(time.Now().UTC().UnixNano())\n\tlissajous(os.Stdout)\n}\n\nfunc lissajous(out io.Writer) {\n\tconst (\n\t\tcolours = 128\n\t\tcycles = 5\n\t\tres = 0.001\n\t\tsize = 200\n\t\tnframes = 64\n\t\tdelay = 8\n\t)\n\n\tfreq := rand.Float64() * 3.0\n\tanim := gif.GIF{LoopCount: nframes}\n\tphase := 0.0\n\tfor i := 0; i < nframes; i++ {\n\t\tpalette := []color.Color{color.Black}\n\t\trect := image.Rect(0, 0, 2*size+1, 2*size+1)\n\t\timg := image.NewPaletted(rect, append(palette, rainbow()))\n\n\t\tfor t := 0.0; t < cycles*2*math.Pi; t += res {\n\t\t\tx := math.Sin(t)\n\t\t\ty := math.Sin(t*freq + phase)\n\t\t\timg.SetColorIndex(size+int(x*size+0.5), size+int(y*size+0.5), 1)\n\t\t}\n\t\tphase += 0.1\n\t\tanim.Delay = append(anim.Delay, delay)\n\t\tanim.Image = append(anim.Image, img)\n\t}\n\terr := gif.EncodeAll(out, &anim)\n\tif err != nil {\n\t\tlog.Fatalf(\"error encoding gif image: %v\", err)\n\t}\n}\n\nfunc rainbow() color.Color {\n\n\tvar vibgyor = []color.Color{\n\t\tcolor.RGBA{0x94, 0x00, 0xD3, 0xFF}, \/\/ violet\n\t\tcolor.RGBA{0x4B, 0x00, 0x82, 0xFF}, \/\/ indigo\n\t\tcolor.RGBA{0x00, 0x00, 0xFF, 0xFF}, \/\/ blue\n\t\tcolor.RGBA{0x00, 0xFF, 0x00, 0xFF}, \/\/ green\n\t\tcolor.RGBA{0xFF, 0xFF, 0x00, 0xFF}, \/\/ yellow\n\t\tcolor.RGBA{0xFF, 0x7F, 0x00, 0xFF}, \/\/ orange\n\t\tcolor.RGBA{0xFF, 0x00, 0x00, 0xFF}, \/\/ red\n\t}\n\treturn vibgyor[rand.Intn(len(vibgyor))]\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Reposeed Authors.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"gopkg.in\/yaml.v2\"\n)\n\ntype config struct {\n\tProject struct {\n\t\tName string `yaml:\"name\"`\n\t\tDescription string `yaml:\"description\"`\n\t\tState string `yaml:\"state,omitempty\"`\n\t\tOneLiner string `yaml:\"oneLiner,omitempty\"`\n\t\tImage string `yaml:\"image,omitempty\"`\n\t\tWebsite string `yaml:\"website,omitempty\"`\n\t} `yaml:\"project\"`\n\tVision struct {\n\t\tType string `yaml:\"type\"`\n\t\tItems []string `yaml:\"items\"`\n\t\tConcept string `yaml:\"concept\"`\n\t\tOverview string `yaml:\"overview\"`\n\t\tAim string `yaml:\"aim\"`\n\t} `yaml:\"vision\"`\n\tRepo struct {\n\t\tType string `yaml:\"type\"`\n\t\tLink string `yaml:\"link\"`\n\t} `yaml:\"repo\"`\n\tCopyright struct {\n\t\tOwner string `yaml:\"owner\"`\n\t\tYear string `yaml:\"year\"`\n\t} `yaml:\"copyright\"`\n\tCla struct {\n\t\tCopyrightHolder string `yaml:\"copyrightHolder\"`\n\t} `yaml:\"cla\"`\n\tMaintainers []struct {\n\t\tName string `yaml:\"name\"`\n\t\tNick string `yaml:\"nick,omitempty\"`\n\t} `yaml:\"maintainers,omitempty\"`\n\tEmails struct {\n\t\tCommercialSupport string `yaml:\"commercialSupport,omitempty\"`\n\t\tSecurity string `yaml:\"security\"`\n\t\tCoc string `yaml:\"coc\"`\n\t} `yaml:\"emails\"`\n\tBadges []struct {\n\t\tImage string `yaml:\"image,omitempty\"`\n\t\tLink string `yaml:\"link,omitempty\"`\n\t\tAlt string `yaml:\"alt,omitempty\"`\n\t} `yaml:\"badges\"`\n\tSupportLinks struct {\n\t\tDocumentation string `yaml:\"documentation,omitempty\"`\n\t\tExamples string `yaml:\"examples,omitempty\"`\n\t\tTroubleshooting string `yaml:\"troubleshooting,omitempty\"`\n\t} `yaml:\"supportLinks,omitempty\"`\n\tReadme struct {\n\t\tUsageExample string `yaml:\"usageExample,omitempty\"`\n\t} `yaml:\"readme,omitempty\"`\n\tSupportPlatforms []struct {\n\t\tService string `yaml:\"service\"`\n\t\tLink string `yaml:\"link\"`\n\t} `yaml:\"supportPlatforms,omitempty\"`\n\tIssueTemplate struct {\n\t\tQuestions []string `yaml:\"questions\"`\n\t} `yaml:\"issueTemplate\"`\n\tContributionLinks struct {\n\t\tIssueTemplate string `yaml:\"issueTemplate,omitempty\"`\n\t\tStarterIssues string `yaml:\"starterIssues,omitempty\"`\n\t} `yaml:\"contributionLinks\"`\n}\n\nfunc parseConfig(path string) config {\n\tvar conf config\n\n\tfilename, _ := filepath.Abs(path)\n\tfile, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = yaml.Unmarshal(file, &conf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn conf\n}\n\nfunc generateFile(config config, path string, newPath string, overwrite bool) error {\n\ttemp, err := template.ParseFiles(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to parse file: %s\", err)\n\t}\n\n\tif _, e := os.Stat(newPath); os.IsNotExist(e) {\n\t\tos.MkdirAll(filepath.Dir(newPath), os.ModePerm)\n\t}\n\n\tif !overwrite {\n\t\tif _, e := os.Stat(newPath); !os.IsNotExist(e) {\n\t\t\treturn fmt.Errorf(\"file %s not overwritten\", newPath)\n\t\t}\n\t}\n\n\tfile, err := os.Create(newPath)\n\tdefer file.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create file: %s\", err)\n\t}\n\n\terr = temp.Execute(file, config)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to parse template: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc main() {\n\tvar tempDir, conf string\n\tvar overwrite bool\n\n\tflag.StringVar(&tempDir, \"input\", \"templates\", \"Template directory\")\n\tflag.StringVar(&conf, \"conf\", \".seed-config.yaml\", \"Config file\")\n\tflag.BoolVar(&overwrite, \"overwrite\", false, \"Force overwrite files\")\n\n\tflag.Parse()\n\n\tconfig := parseConfig(conf)\n\ttempDir, _ = filepath.Abs(tempDir)\n\n\tbl := make(map[string]bool)\n\tbl[\"seed-config.example.yaml\"] = true\n\n\tfilepath.Walk(tempDir, func(path string, info os.FileInfo, err error) error {\n\t\tif bl[info.Name()] {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\n\t\tif !info.IsDir() {\n\t\t\tnewPath, _ := filepath.Rel(tempDir, path)\n\t\t\terr := generateFile(config, path, newPath, overwrite)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n}\n<commit_msg>Add output directory option<commit_after>\/*\nCopyright 2017 The Reposeed Authors.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"gopkg.in\/yaml.v2\"\n)\n\ntype config struct {\n\tProject struct {\n\t\tName string `yaml:\"name\"`\n\t\tDescription string `yaml:\"description\"`\n\t\tState string `yaml:\"state,omitempty\"`\n\t\tOneLiner string `yaml:\"oneLiner,omitempty\"`\n\t\tImage string `yaml:\"image,omitempty\"`\n\t\tWebsite string `yaml:\"website,omitempty\"`\n\t} `yaml:\"project\"`\n\tVision struct {\n\t\tType string `yaml:\"type\"`\n\t\tItems []string `yaml:\"items\"`\n\t\tConcept string `yaml:\"concept\"`\n\t\tOverview string `yaml:\"overview\"`\n\t\tAim string `yaml:\"aim\"`\n\t} `yaml:\"vision\"`\n\tRepo struct {\n\t\tType string `yaml:\"type\"`\n\t\tLink string `yaml:\"link\"`\n\t} `yaml:\"repo\"`\n\tCopyright struct {\n\t\tOwner string `yaml:\"owner\"`\n\t\tYear string `yaml:\"year\"`\n\t} `yaml:\"copyright\"`\n\tCla struct {\n\t\tCopyrightHolder string `yaml:\"copyrightHolder\"`\n\t} `yaml:\"cla\"`\n\tMaintainers []struct {\n\t\tName string `yaml:\"name\"`\n\t\tNick string `yaml:\"nick,omitempty\"`\n\t} `yaml:\"maintainers,omitempty\"`\n\tEmails struct {\n\t\tCommercialSupport string `yaml:\"commercialSupport,omitempty\"`\n\t\tSecurity string `yaml:\"security\"`\n\t\tCoc string `yaml:\"coc\"`\n\t} `yaml:\"emails\"`\n\tBadges []struct {\n\t\tImage string `yaml:\"image,omitempty\"`\n\t\tLink string `yaml:\"link,omitempty\"`\n\t\tAlt string `yaml:\"alt,omitempty\"`\n\t} `yaml:\"badges\"`\n\tSupportLinks struct {\n\t\tDocumentation string `yaml:\"documentation,omitempty\"`\n\t\tExamples string `yaml:\"examples,omitempty\"`\n\t\tTroubleshooting string `yaml:\"troubleshooting,omitempty\"`\n\t} `yaml:\"supportLinks,omitempty\"`\n\tReadme struct {\n\t\tUsageExample string `yaml:\"usageExample,omitempty\"`\n\t} `yaml:\"readme,omitempty\"`\n\tSupportPlatforms []struct {\n\t\tService string `yaml:\"service\"`\n\t\tLink string `yaml:\"link\"`\n\t} `yaml:\"supportPlatforms,omitempty\"`\n\tIssueTemplate struct {\n\t\tQuestions []string `yaml:\"questions\"`\n\t} `yaml:\"issueTemplate\"`\n\tContributionLinks struct {\n\t\tIssueTemplate string `yaml:\"issueTemplate,omitempty\"`\n\t\tStarterIssues string `yaml:\"starterIssues,omitempty\"`\n\t} `yaml:\"contributionLinks\"`\n}\n\nfunc parseConfig(path string) config {\n\tvar conf config\n\n\tfilename, _ := filepath.Abs(path)\n\tfile, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = yaml.Unmarshal(file, &conf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn conf\n}\n\nfunc generateFile(config config, path string, newPath string, overwrite bool) error {\n\ttemp, err := template.ParseFiles(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to parse file: %s\", err)\n\t}\n\n\tif _, e := os.Stat(newPath); os.IsNotExist(e) {\n\t\tos.MkdirAll(filepath.Dir(newPath), os.ModePerm)\n\t}\n\n\tif !overwrite {\n\t\tif _, e := os.Stat(newPath); !os.IsNotExist(e) {\n\t\t\treturn fmt.Errorf(\"file %s not overwritten\", newPath)\n\t\t}\n\t}\n\n\tfile, err := os.Create(newPath)\n\tdefer file.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create file: %s\", err)\n\t}\n\n\terr = temp.Execute(file, config)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to parse template: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc main() {\n\tvar tempDir, outputDir, conf string\n\tvar overwrite bool\n\n\tflag.StringVar(&tempDir, \"input\", \"templates\", \"Template directory\")\n\tflag.StringVar(&outputDir, \"output\", \"\", \"Output directory\")\n\tflag.StringVar(&conf, \"conf\", \".seed-config.yaml\", \"Config file\")\n\tflag.BoolVar(&overwrite, \"overwrite\", false, \"Force overwrite files\")\n\n\tflag.Parse()\n\n\tconfig := parseConfig(conf)\n\ttempDir, _ = filepath.Abs(tempDir)\n\n\tbl := make(map[string]bool)\n\tbl[\"seed-config.example.yaml\"] = true\n\n\tfilepath.Walk(tempDir, func(path string, info os.FileInfo, err error) error {\n\t\tif bl[info.Name()] {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\n\t\tif !info.IsDir() {\n\t\t\trelPath, _ := filepath.Rel(tempDir, path)\n\t\t\tnewPath := relPath\n\t\t\tif outputDir != \"\" {\n\t\t\t\tnewPath = filepath.Join(outputDir, relPath)\n\t\t\t}\n\t\t\terr := generateFile(config, path, newPath, overwrite)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"google.golang.org\/grpc\"\n)\n\nvar helmHome string\n\n\/\/ flagVerbose is a signal that the user wants additional output.\nvar flagVerbose bool\n\nvar globalUsage = `The Kubernetes package manager\n\nTo begin working with Helm, run the 'helm init' command:\n\n$ helm init\n\nThis will install Tiller to your running Kubernetes cluster.\nIt will also set up any necessary local configuration.\n\nCommond actions from this point on include:\n\n- helm search: search for charts\n- helm fetch: download a chart to your local directory to view\n- helm install: upload the chart to Kubernetes\n- helm list: list releases of charts\n\nENVIRONMENT:\n$HELM_HOME: Set an alternative location for Helm files.\n By default, these are stored in ~\/.helm\n`\n\n\/\/ RootCommand is the top-level command for Helm.\nvar RootCommand = &cobra.Command{\n\tUse: \"helm\",\n\tShort: \"The Helm package manager for Kubernetes.\",\n\tLong: globalUsage,\n}\n\nfunc init() {\n\tRootCommand.PersistentFlags().StringVar(&helmHome, \"home\", \"$HOME\/.helm\", \"location of you Helm files [$HELM_HOME]\")\n\tRootCommand.PersistentFlags().BoolVarP(&flagVerbose, \"verbose\", \"v\", false, \"enable verbose output\")\n}\n\nfunc main() {\n\tif err := RootCommand.Execute(); err != nil {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc checkArgsLength(expectedNum, actualNum int, requiredArgs ...string) error {\n\tif actualNum != expectedNum {\n\t\targ := \"arguments\"\n\t\tif expectedNum == 1 {\n\t\t\targ = \"argument\"\n\t\t}\n\t\treturn fmt.Errorf(\"This command needs %v %s: %s\", expectedNum, arg, strings.Join(requiredArgs, \", \"))\n\t}\n\treturn nil\n}\n\n\/\/ prettyError unwraps or rewrites certain errors to make them more user-friendly.\nfunc prettyError(err error) error {\n\t\/\/ This is ridiculous. Why is 'grpc.rpcError' not exported? The least they\n\t\/\/ could do is throw an interface on the lib that would let us get back\n\t\/\/ the desc. Instead, we have to pass ALL errors through this.\n\treturn errors.New(grpc.ErrorDesc(err))\n}\n<commit_msg>fix(helm): update main help text<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"google.golang.org\/grpc\"\n)\n\nvar helmHome string\n\n\/\/ flagVerbose is a signal that the user wants additional output.\nvar flagVerbose bool\n\nvar globalUsage = `The Kubernetes package manager\n\nTo begin working with Helm, run the 'helm init' command:\n\n\t$ helm init\n\nThis will install Tiller to your running Kubernetes cluster.\nIt will also set up any necessary local configuration.\n\nCommon actions from this point include:\n\n- helm search: search for charts\n- helm fetch: download a chart to your local directory to view\n- helm install: upload the chart to Kubernetes\n- helm list: list releases of charts\n\nEnvironment:\n $HELM_HOME Set an alternative location for Helm files. By default, these are stored in ~\/.helm\n`\n\n\/\/ RootCommand is the top-level command for Helm.\nvar RootCommand = &cobra.Command{\n\tUse: \"helm\",\n\tShort: \"The Helm package manager for Kubernetes.\",\n\tLong: globalUsage,\n}\n\nfunc init() {\n\tRootCommand.PersistentFlags().StringVar(&helmHome, \"home\", \"$HOME\/.helm\", \"location of you Helm files [$HELM_HOME]\")\n\tRootCommand.PersistentFlags().BoolVarP(&flagVerbose, \"verbose\", \"v\", false, \"enable verbose output\")\n}\n\nfunc main() {\n\tif err := RootCommand.Execute(); err != nil {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc checkArgsLength(expectedNum, actualNum int, requiredArgs ...string) error {\n\tif actualNum != expectedNum {\n\t\targ := \"arguments\"\n\t\tif expectedNum == 1 {\n\t\t\targ = \"argument\"\n\t\t}\n\t\treturn fmt.Errorf(\"This command needs %v %s: %s\", expectedNum, arg, strings.Join(requiredArgs, \", \"))\n\t}\n\treturn nil\n}\n\n\/\/ prettyError unwraps or rewrites certain errors to make them more user-friendly.\nfunc prettyError(err error) error {\n\t\/\/ This is ridiculous. Why is 'grpc.rpcError' not exported? The least they\n\t\/\/ could do is throw an interface on the lib that would let us get back\n\t\/\/ the desc. Instead, we have to pass ALL errors through this.\n\treturn errors.New(grpc.ErrorDesc(err))\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCommand lark executes project tasks defined with lua scripts. The Lua\ninterpreter included in lark in embedded and is not affected by an existing Lua\ninstallation on the host system. Furthermore, Lark isolates the lua modules\navailable to scripts to modules in the relative directory path .\/lark_modules\/\nto ensure portability of project tasks across developer machines.\n\nAs of the current release the embedded Lua interpreter is compliant with\nLua5.1.\n\nThe lark command locates tasks defined in the .\/lark.lua file or otherwise\ndirectly under the directory .\/lark_tasks\/. Tasks can have names (either\nexplicitly given or otherwise inferred) or patterns. Pattern matching tasks\nwill match a set of names defined by a regular expression.\n\nNames are matched against available tasks with a strict precedence. Explicitly\nnamed tasks will match the same name with the highest priority. Any task with\nan inferred name will match the same name with the second highest priority.\nPattern matching tasks have the lowest priority and will match names in the\norder they were defined.\n\nTasks can be executed by calling the lua function lark.run() in a script, using\nthe lark subcommand \"run\". When given no arguments, run will execute the first\nnamed task that was defined, or a task specified by setting the \"default\"\nvariable in the \"lark.task\" lua module.\n\n\tlocal task = require('lark.task')\n\ttask1 = task .. function() print('task1') end\n\ttask2 = task .. function() print('task2') end\n\tlark.run()\n\ttask.default = 'task2'\n\tlark.run()\n\tlark.run('task1')\n\nThe above script will print a line containing text \"task1\" followed by a line\ncontaining \"task2\" and finally a line containing \"task1\" again.\n\n\nCommand Reference\n\nCommand reference documentation is available through the \"help\" subcommand.\n\n\tlark help\n\nThe documentation for a specific subcommand is available through the help\ncommand or by passing the subcommand the -h (or --help) flag.\n\n\tlark run -h\n\tlark help run\n\n\nLua Reference\n\nLua API documentation is available through the help() function in the embedded REPL.\n\n\tlark repl\n\t> help()\n\t> help(lark)\n*\/\npackage main\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/bmatsuo\/lark\/larkmeta\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/mattn\/go-isatty\"\n\t\"github.com\/yuin\/gopher-lua\"\n)\n\n\/\/ IsTTY is true if standard error is connected to a terminal. This is taken to\n\/\/ mean that lark was executed from the command line and is not being logged to\n\/\/ a file.\n\/\/\n\/\/ BUG:\n\/\/ The assumptions made due to IsTTY cannot be overridden (e.g. by a command\n\/\/ line flag).\nvar (\n\tIsTTYStderr = isatty.IsTerminal(os.Stderr.Fd())\n\tIsTTYStdout = isatty.IsTerminal(os.Stdout.Fd())\n\tIsTTYStdin = isatty.IsTerminal(os.Stdin.Fd())\n)\n\n\/\/ MainHelp is the top-level hop documentation.\nvar MainHelp = `\n\n If no builtin command is provided the \"run\" command is executed with any\n task arguments provided. For more information see the command\n documentation\n\n lark run -h\n`\n\nfunc main() {\n\tif IsTTYStderr {\n\t\tlogflags := log.Flags()\n\t\tlogflags &^= log.Ldate | log.Ltime\n\t\tlog.SetFlags(logflags)\n\t}\n\n\t\/\/ Set search path for lua modules. The search path must be completely\n\t\/\/ contained by the working directory to help ensure repeatable builds\n\t\/\/ across machines.\n\tlua.LuaPathDefault = \".\/lark_modules\/?.lua;.\/lark_modules\/?\/init.lua\"\n\tos.Setenv(lua.LuaPath, \"\")\n\n\tcli.VersionFlag.Name = \"version\"\n\n\tapp := cli.NewApp()\n\tapp.Name = \"lark\"\n\tapp.Usage = \"Run repeated project tasks\"\n\tapp.ArgsUsage = MainHelp\n\tapp.Version = larkmeta.Version\n\tapp.Authors = larkmeta.Authors\n\tapp.Action = func(c *cli.Context) {\n\t\targs := []string{os.Args[0], \"run\"}\n\t\targs = append(args, c.Args()...)\n\t\tapp.Run(args)\n\t}\n\tapp.Commands = Commands\n\n\tapp.Run(os.Args)\n}\n<commit_msg>docs: replace reference to `lark repl` with `lark lua`<commit_after>\/*\nCommand lark executes project tasks defined with lua scripts. The Lua\ninterpreter included in lark in embedded and is not affected by an existing Lua\ninstallation on the host system. Furthermore, Lark isolates the lua modules\navailable to scripts to modules in the relative directory path .\/lark_modules\/\nto ensure portability of project tasks across developer machines.\n\nAs of the current release the embedded Lua interpreter is compliant with\nLua5.1.\n\nThe lark command locates tasks defined in the .\/lark.lua file or otherwise\ndirectly under the directory .\/lark_tasks\/. Tasks can have names (either\nexplicitly given or otherwise inferred) or patterns. Pattern matching tasks\nwill match a set of names defined by a regular expression.\n\nNames are matched against available tasks with a strict precedence. Explicitly\nnamed tasks will match the same name with the highest priority. Any task with\nan inferred name will match the same name with the second highest priority.\nPattern matching tasks have the lowest priority and will match names in the\norder they were defined.\n\nTasks can be executed by calling the lua function lark.run() in a script, using\nthe lark subcommand \"run\". When given no arguments, run will execute the first\nnamed task that was defined, or a task specified by setting the \"default\"\nvariable in the \"lark.task\" lua module.\n\n\tlocal task = require('lark.task')\n\ttask1 = task .. function() print('task1') end\n\ttask2 = task .. function() print('task2') end\n\tlark.run()\n\ttask.default = 'task2'\n\tlark.run()\n\tlark.run('task1')\n\nThe above script will print a line containing text \"task1\" followed by a line\ncontaining \"task2\" and finally a line containing \"task1\" again.\n\n\nCommand Reference\n\nCommand reference documentation is available through the \"help\" subcommand.\n\n\tlark help\n\nThe documentation for a specific subcommand is available through the help\ncommand or by passing the subcommand the -h (or --help) flag.\n\n\tlark run -h\n\tlark help run\n\n\nLua Reference\n\nLua API documentation is available through the help() function in the embedded REPL.\n\n\tlark lua\n\t> help()\n\t> help(lark)\n*\/\npackage main\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/bmatsuo\/lark\/larkmeta\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/mattn\/go-isatty\"\n\t\"github.com\/yuin\/gopher-lua\"\n)\n\n\/\/ IsTTY is true if standard error is connected to a terminal. This is taken to\n\/\/ mean that lark was executed from the command line and is not being logged to\n\/\/ a file.\n\/\/\n\/\/ BUG:\n\/\/ The assumptions made due to IsTTY cannot be overridden (e.g. by a command\n\/\/ line flag).\nvar (\n\tIsTTYStderr = isatty.IsTerminal(os.Stderr.Fd())\n\tIsTTYStdout = isatty.IsTerminal(os.Stdout.Fd())\n\tIsTTYStdin = isatty.IsTerminal(os.Stdin.Fd())\n)\n\n\/\/ MainHelp is the top-level hop documentation.\nvar MainHelp = `\n\n If no builtin command is provided the \"run\" command is executed with any\n task arguments provided. For more information see the command\n documentation\n\n lark run -h\n`\n\nfunc main() {\n\tif IsTTYStderr {\n\t\tlogflags := log.Flags()\n\t\tlogflags &^= log.Ldate | log.Ltime\n\t\tlog.SetFlags(logflags)\n\t}\n\n\t\/\/ Set search path for lua modules. The search path must be completely\n\t\/\/ contained by the working directory to help ensure repeatable builds\n\t\/\/ across machines.\n\tlua.LuaPathDefault = \".\/lark_modules\/?.lua;.\/lark_modules\/?\/init.lua\"\n\tos.Setenv(lua.LuaPath, \"\")\n\n\tcli.VersionFlag.Name = \"version\"\n\n\tapp := cli.NewApp()\n\tapp.Name = \"lark\"\n\tapp.Usage = \"Run repeated project tasks\"\n\tapp.ArgsUsage = MainHelp\n\tapp.Version = larkmeta.Version\n\tapp.Authors = larkmeta.Authors\n\tapp.Action = func(c *cli.Context) {\n\t\targs := []string{os.Args[0], \"run\"}\n\t\targs = append(args, c.Args()...)\n\t\tapp.Run(args)\n\t}\n\tapp.Commands = Commands\n\n\tapp.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 Google Inc. All Rights Reserved.\n\/\/ This file is available under the Apache license.\n\n\/*\nCommand mdot turns an mtail program AST into a graphviz graph on standard output.\n\nTo use, run it like (assuming your shell is in the same directory as this file)\n\n go run github.com\/google\/mtail\/cmd\/mdot --prog ..\/..\/examples\/dhcpd.mtail | xdot -\n\nor\n\n go run github.com\/google\/mtail\/cmd\/mdot --prog ..\/..\/examples\/dhcpd.mtail --http_port 8080\n\nto view the dot output visit http:\/\/localhost:8080\n\nYou'll need the graphviz `dot' command installed.\n*\/\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/google\/mtail\/internal\/mtail\"\n\t\"github.com\/google\/mtail\/internal\/vm\/ast\"\n\t\"github.com\/google\/mtail\/internal\/vm\/checker\"\n\t\"github.com\/google\/mtail\/internal\/vm\/parser\"\n)\n\nvar (\n\tprog = flag.String(\"prog\", \"\", \"Name of the program source to parse.\")\n\thttpPort = flag.String(\"http_port\", \"\", \"Port number to run HTTP server on.\")\n)\n\ntype dotter struct {\n\tw io.Writer\n\tid int\n\tparentID []int \/\/ id of the parent node\n}\n\nfunc (d *dotter) nextID() int {\n\td.id++\n\treturn d.id\n}\n\nfunc (d *dotter) emitNode(id int, node ast.Node) {\n\tattrs := map[string]string{\n\t\t\"label\": strings.Split(fmt.Sprintf(\"%T\", node), \".\")[1] + \"\\n\",\n\t\t\"shape\": \"box\",\n\t\t\"style\": \"filled\",\n\t\t\"tooltip\": node.Type().String(),\n\t}\n\tswitch n := node.(type) {\n\tcase *ast.VarDecl, *ast.DecoDecl:\n\t\tattrs[\"fillcolor\"] = \"green\"\n\t\tswitch n := n.(type) {\n\t\tcase *ast.VarDecl:\n\t\t\tattrs[\"label\"] += fmt.Sprintf(\"%s %s\", n.Kind, n.Name)\n\t\tcase *ast.DecoDecl:\n\t\t\tattrs[\"label\"] += n.Name\n\t\t}\n\tcase *ast.IdTerm, *ast.CaprefTerm:\n\t\tattrs[\"fillcolor\"] = \"pink\"\n\t\tattrs[\"shape\"] = \"ellipse\"\n\t\tswitch n := n.(type) {\n\t\tcase *ast.IdTerm:\n\t\t\tattrs[\"label\"] += n.Name\n\t\tcase *ast.CaprefTerm:\n\t\t\tattrs[\"label\"] += fmt.Sprintf(\"$%s\", n.Name)\n\t\t}\n\tcase *ast.IntLit, *ast.FloatLit, *ast.PatternLit, *ast.StringLit:\n\t\tattrs[\"fillcolor\"] = \"pink\"\n\t\tattrs[\"shape\"] = \"ellipse\"\n\t\tswitch n := n.(type) {\n\t\tcase *ast.IntLit:\n\t\t\tattrs[\"label\"] += fmt.Sprintf(\"%d\", n.I)\n\t\tcase *ast.FloatLit:\n\t\t\tattrs[\"label\"] += fmt.Sprintf(\"%g\", n.F)\n\t\tcase *ast.PatternLit:\n\t\t\tattrs[\"label\"] += fmt.Sprintf(\"\/%s\/\", n.Pattern)\n\t\tcase *ast.StringLit:\n\t\t\tattrs[\"label\"] += n.Text\n\t\t}\n\tcase *ast.IndexedExpr, *ast.BinaryExpr, *ast.UnaryExpr, *ast.PatternExpr, *ast.BuiltinExpr:\n\t\tattrs[\"fillcolor\"] = \"lightblue\"\n\t\tswitch n := n.(type) {\n\t\tcase *ast.BinaryExpr:\n\t\t\tattrs[\"label\"] += parser.Kind(n.Op).String()\n\t\tcase *ast.UnaryExpr:\n\t\t\tattrs[\"label\"] += parser.Kind(n.Op).String()\n\t\tcase *ast.BuiltinExpr:\n\t\t\tattrs[\"label\"] += n.Name\n\t\t}\n\t}\n\tpos := node.Pos()\n\tif pos != nil {\n\t\tattrs[\"xlabel\"] = pos.String()\n\t}\n\tfmt.Fprintf(d.w, \"n%d [\", id)\n\tfor k, v := range attrs {\n\t\tfmt.Fprintf(d.w, \"%s=\\\"%s\\\" \", k, v)\n\t}\n\tfmt.Fprintf(d.w, \"]\\n\")\n}\n\nfunc (d *dotter) emitLine(src, dst int) {\n\tfmt.Fprintf(d.w, \"n%d -> n%d\\n\", src, dst)\n}\n\nfunc (d *dotter) VisitBefore(node ast.Node) (ast.Visitor, ast.Node) {\n\tid := d.nextID()\n\td.emitNode(id, node)\n\tif len(d.parentID) > 0 {\n\t\tparentID := d.parentID[len(d.parentID)-1]\n\t\td.emitLine(parentID, id)\n\t}\n\td.parentID = append(d.parentID, id)\n\treturn d, node\n}\n\nfunc (d *dotter) VisitAfter(node ast.Node) ast.Node {\n\td.parentID = d.parentID[:len(d.parentID)-1]\n\treturn node\n}\n\nfunc makeDot(n ast.Node, w io.Writer) error {\n\tfmt.Fprintf(w, \"digraph \\\"%s\\\" {\\n\", *prog)\n\tdot := &dotter{w: w}\n\tast.Walk(dot, n)\n\tfmt.Fprintf(w, \"}\\n\")\n\treturn nil\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif *prog == \"\" {\n\t\tglog.Exitf(\"No -prog given\")\n\t}\n\n\tf, err := os.Open(*prog)\n\tif err != nil {\n\t\tglog.Fatal(err)\n\t}\n\tn, err := parser.Parse(*prog, f)\n\tif err != nil {\n\t\tglog.Exit(err)\n\t}\n\tn, err = checker.Check(n)\n\tif err != nil {\n\t\tglog.Exit(err)\n\t}\n\tif *httpPort == \"\" {\n\t\tmakeDot(n, os.Stdout)\n\t\treturn\n\t}\n\n\thttp.HandleFunc(\"\/\",\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tdot := exec.Command(\"dot\", \"-Tsvg\")\n\t\t\tin, err := dot.StdinPipe()\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tout, err := dot.StdoutPipe()\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = dot.Start()\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = makeDot(n, in)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = in.Close()\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.Header().Add(\"Content-type\", \"image\/svg+xml\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_, err = io.Copy(w, out)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t}\n\t\t\terr = dot.Wait()\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t}\n\t\t})\n\thttp.HandleFunc(\"\/favicon.ico\", mtail.FaviconHandler)\n\thttp.ListenAndServe(fmt.Sprintf(\":%s\", *httpPort), nil)\n}\n<commit_msg>Let a page reload also regenerate the graph so you can edit the source code but leave mdot running.<commit_after>\/\/ Copyright 2018 Google Inc. All Rights Reserved.\n\/\/ This file is available under the Apache license.\n\n\/*\nCommand mdot turns an mtail program AST into a graphviz graph on standard output.\n\nTo use, run it like (assuming your shell is in the same directory as this file)\n\n go run github.com\/google\/mtail\/cmd\/mdot --prog ..\/..\/examples\/dhcpd.mtail | xdot -\n\nor\n\n go run github.com\/google\/mtail\/cmd\/mdot --prog ..\/..\/examples\/dhcpd.mtail --http_port 8080\n\nto view the dot output visit http:\/\/localhost:8080\n\nYou'll need the graphviz `dot' command installed.\n*\/\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/google\/mtail\/internal\/mtail\"\n\t\"github.com\/google\/mtail\/internal\/vm\/ast\"\n\t\"github.com\/google\/mtail\/internal\/vm\/checker\"\n\t\"github.com\/google\/mtail\/internal\/vm\/parser\"\n)\n\nvar (\n\tprog = flag.String(\"prog\", \"\", \"Name of the program source to parse.\")\n\thttpPort = flag.String(\"http_port\", \"\", \"Port number to run HTTP server on.\")\n)\n\ntype dotter struct {\n\tw io.Writer\n\tid int\n\tparentID []int \/\/ id of the parent node\n}\n\nfunc (d *dotter) nextID() int {\n\td.id++\n\treturn d.id\n}\n\nfunc (d *dotter) emitNode(id int, node ast.Node) {\n\tattrs := map[string]string{\n\t\t\"label\": strings.Split(fmt.Sprintf(\"%T\", node), \".\")[1] + \"\\n\",\n\t\t\"shape\": \"box\",\n\t\t\"style\": \"filled\",\n\t\t\"tooltip\": node.Type().String(),\n\t}\n\tswitch n := node.(type) {\n\tcase *ast.VarDecl, *ast.DecoDecl:\n\t\tattrs[\"fillcolor\"] = \"green\"\n\t\tswitch n := n.(type) {\n\t\tcase *ast.VarDecl:\n\t\t\tattrs[\"label\"] += fmt.Sprintf(\"%s %s\", n.Kind, n.Name)\n\t\tcase *ast.DecoDecl:\n\t\t\tattrs[\"label\"] += n.Name\n\t\t}\n\tcase *ast.IdTerm, *ast.CaprefTerm:\n\t\tattrs[\"fillcolor\"] = \"pink\"\n\t\tattrs[\"shape\"] = \"ellipse\"\n\t\tswitch n := n.(type) {\n\t\tcase *ast.IdTerm:\n\t\t\tattrs[\"label\"] += n.Name\n\t\tcase *ast.CaprefTerm:\n\t\t\tattrs[\"label\"] += fmt.Sprintf(\"$%s\", n.Name)\n\t\t}\n\tcase *ast.IntLit, *ast.FloatLit, *ast.PatternLit, *ast.StringLit:\n\t\tattrs[\"fillcolor\"] = \"pink\"\n\t\tattrs[\"shape\"] = \"ellipse\"\n\t\tswitch n := n.(type) {\n\t\tcase *ast.IntLit:\n\t\t\tattrs[\"label\"] += fmt.Sprintf(\"%d\", n.I)\n\t\tcase *ast.FloatLit:\n\t\t\tattrs[\"label\"] += fmt.Sprintf(\"%g\", n.F)\n\t\tcase *ast.PatternLit:\n\t\t\tattrs[\"label\"] += fmt.Sprintf(\"\/%s\/\", n.Pattern)\n\t\tcase *ast.StringLit:\n\t\t\tattrs[\"label\"] += n.Text\n\t\t}\n\tcase *ast.IndexedExpr, *ast.BinaryExpr, *ast.UnaryExpr, *ast.PatternExpr, *ast.BuiltinExpr:\n\t\tattrs[\"fillcolor\"] = \"lightblue\"\n\t\tswitch n := n.(type) {\n\t\tcase *ast.BinaryExpr:\n\t\t\tattrs[\"label\"] += parser.Kind(n.Op).String()\n\t\tcase *ast.UnaryExpr:\n\t\t\tattrs[\"label\"] += parser.Kind(n.Op).String()\n\t\tcase *ast.BuiltinExpr:\n\t\t\tattrs[\"label\"] += n.Name\n\t\t}\n\t}\n\tpos := node.Pos()\n\tif pos != nil {\n\t\tattrs[\"xlabel\"] = pos.String()\n\t}\n\tfmt.Fprintf(d.w, \"n%d [\", id)\n\tfor k, v := range attrs {\n\t\tfmt.Fprintf(d.w, \"%s=\\\"%s\\\" \", k, v)\n\t}\n\tfmt.Fprintf(d.w, \"]\\n\")\n}\n\nfunc (d *dotter) emitLine(src, dst int) {\n\tfmt.Fprintf(d.w, \"n%d -> n%d\\n\", src, dst)\n}\n\nfunc (d *dotter) VisitBefore(node ast.Node) (ast.Visitor, ast.Node) {\n\tid := d.nextID()\n\td.emitNode(id, node)\n\tif len(d.parentID) > 0 {\n\t\tparentID := d.parentID[len(d.parentID)-1]\n\t\td.emitLine(parentID, id)\n\t}\n\td.parentID = append(d.parentID, id)\n\treturn d, node\n}\n\nfunc (d *dotter) VisitAfter(node ast.Node) ast.Node {\n\td.parentID = d.parentID[:len(d.parentID)-1]\n\treturn node\n}\n\nfunc makeDot(name string, w io.Writer) error {\n\tf, err := os.Open(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn, err := parser.Parse(name, f)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn, err = checker.Check(n)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintf(w, \"digraph \\\"%s\\\" {\\n\", *prog)\n\tdot := &dotter{w: w}\n\tast.Walk(dot, n)\n\tfmt.Fprintf(w, \"}\\n\")\n\treturn nil\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif *prog == \"\" {\n\t\tglog.Exitf(\"No -prog given\")\n\t}\n\n\tif *httpPort == \"\" {\n\t\tglog.Exit(makeDot(*prog, os.Stdout))\n\t}\n\n\thttp.HandleFunc(\"\/\",\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tdot := exec.Command(\"dot\", \"-Tsvg\")\n\t\t\tin, err := dot.StdinPipe()\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tout, err := dot.StdoutPipe()\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = dot.Start()\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = makeDot(*prog, in)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = in.Close()\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.Header().Add(\"Content-type\", \"image\/svg+xml\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_, err = io.Copy(w, out)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t}\n\t\t\terr = dot.Wait()\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t}\n\t\t})\n\thttp.HandleFunc(\"\/favicon.ico\", mtail.FaviconHandler)\n\thttp.ListenAndServe(fmt.Sprintf(\":%s\", *httpPort), nil)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\n\t\"github.com\/cortesi\/modd\"\n\t\"github.com\/cortesi\/termlog\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nconst lullTime = time.Millisecond * 300\n\nfunc main() {\n\tpaths := kingpin.Arg(\n\t\t\"path\",\n\t\t\"Paths to monitor for changes.\",\n\t).Required().Strings()\n\n\tbeep := kingpin.Flag(\"beep\", \"Beep if any command returned an error\").\n\t\tShort('b').\n\t\tBool()\n\n\tnocommon := kingpin.Flag(\"nocommon\", \"Don't exclude commonly ignored files\").\n\t\tShort('c').\n\t\tBool()\n\n\tdaemons := kingpin.Flag(\"daemon\", \"Daemon to keep running\").\n\t\tPlaceHolder(\"CMD\").\n\t\tShort('d').\n\t\tStrings()\n\n\tprep := kingpin.Flag(\"prep\", \"Prep command to run before daemons are restarted\").\n\t\tPlaceHolder(\"CMD\").\n\t\tShort('p').\n\t\tStrings()\n\n\tcmdstats := kingpin.Flag(\"cmdstats\", \"Show stats on command execution\").\n\t\tShort('s').\n\t\tDefault(\"false\").\n\t\tBool()\n\n\texcludes := kingpin.Flag(\"exclude\", \"Glob pattern for files to exclude from monitoring\").\n\t\tPlaceHolder(\"PATTERN\").\n\t\tShort('x').\n\t\tStrings()\n\n\tdebug := kingpin.Flag(\"debug\", \"Debugging for devd development\").\n\t\tDefault(\"false\").\n\t\tBool()\n\n\tkingpin.Version(modd.Version)\n\tkingpin.Parse()\n\tlog := termlog.NewLog()\n\tlog.Notice(\"modd v%s\", modd.Version)\n\n\tif *debug {\n\t\tlog.Enable(\"debug\")\n\t\tmodd.Logger = log\n\t}\n\tif *cmdstats {\n\t\tlog.Enable(\"cmdstats\")\n\t}\n\n\tmodchan := make(chan modd.Mod)\n\texc := *excludes\n\tif !*nocommon {\n\t\texc = append(*excludes, modd.CommonExcludes...)\n\t}\n\terr := modd.Watch(*paths, exc, lullTime, modchan)\n\tif err != nil {\n\t\tkingpin.Fatalf(\"Fatal error: %s\", err)\n\t}\n\terr = modd.RunProcs(*prep, log)\n\tif err != nil {\n\t\tif *beep {\n\t\t\tfmt.Print(\"\\a\")\n\t\t}\n\t}\n\td := modd.DaemonPen{}\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt, os.Kill)\n\tgo func() {\n\t\td.Shutdown(<-c)\n\t\tos.Exit(0)\n\t}()\n\td.Start(*daemons, log)\n\tfor mod := range modchan {\n\t\tlog.SayAs(\"debug\", \"Delta: \\n%s\", mod.String())\n\t\terr := modd.RunProcs(*prep, log)\n\t\tif err != nil {\n\t\t\tif *beep {\n\t\t\t\tfmt.Print(\"\\a\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\td.Restart()\n\t}\n}\n<commit_msg>We don't need to print the version on startup<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\n\t\"github.com\/cortesi\/modd\"\n\t\"github.com\/cortesi\/termlog\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nconst lullTime = time.Millisecond * 300\n\nfunc main() {\n\tpaths := kingpin.Arg(\n\t\t\"path\",\n\t\t\"Paths to monitor for changes.\",\n\t).Required().Strings()\n\n\tbeep := kingpin.Flag(\"beep\", \"Beep if any command returned an error\").\n\t\tShort('b').\n\t\tBool()\n\n\tnocommon := kingpin.Flag(\"nocommon\", \"Don't exclude commonly ignored files\").\n\t\tShort('c').\n\t\tBool()\n\n\tdaemons := kingpin.Flag(\"daemon\", \"Daemon to keep running\").\n\t\tPlaceHolder(\"CMD\").\n\t\tShort('d').\n\t\tStrings()\n\n\tprep := kingpin.Flag(\"prep\", \"Prep command to run before daemons are restarted\").\n\t\tPlaceHolder(\"CMD\").\n\t\tShort('p').\n\t\tStrings()\n\n\tcmdstats := kingpin.Flag(\"cmdstats\", \"Show stats on command execution\").\n\t\tShort('s').\n\t\tDefault(\"false\").\n\t\tBool()\n\n\texcludes := kingpin.Flag(\"exclude\", \"Glob pattern for files to exclude from monitoring\").\n\t\tPlaceHolder(\"PATTERN\").\n\t\tShort('x').\n\t\tStrings()\n\n\tdebug := kingpin.Flag(\"debug\", \"Debugging for devd development\").\n\t\tDefault(\"false\").\n\t\tBool()\n\n\tkingpin.Version(modd.Version)\n\tkingpin.Parse()\n\tlog := termlog.NewLog()\n\n\tif *debug {\n\t\tlog.Enable(\"debug\")\n\t\tmodd.Logger = log\n\t}\n\tif *cmdstats {\n\t\tlog.Enable(\"cmdstats\")\n\t}\n\n\tmodchan := make(chan modd.Mod)\n\texc := *excludes\n\tif !*nocommon {\n\t\texc = append(*excludes, modd.CommonExcludes...)\n\t}\n\terr := modd.Watch(*paths, exc, lullTime, modchan)\n\tif err != nil {\n\t\tkingpin.Fatalf(\"Fatal error: %s\", err)\n\t}\n\terr = modd.RunProcs(*prep, log)\n\tif err != nil {\n\t\tif *beep {\n\t\t\tfmt.Print(\"\\a\")\n\t\t}\n\t}\n\td := modd.DaemonPen{}\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt, os.Kill)\n\tgo func() {\n\t\td.Shutdown(<-c)\n\t\tos.Exit(0)\n\t}()\n\td.Start(*daemons, log)\n\tfor mod := range modchan {\n\t\tlog.SayAs(\"debug\", \"Delta: \\n%s\", mod.String())\n\t\terr := modd.RunProcs(*prep, log)\n\t\tif err != nil {\n\t\t\tif *beep {\n\t\t\t\tfmt.Print(\"\\a\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\td.Restart()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCommand line utility for the Moon configuration language.\n\nPurpose\n\nThe moon utility (moon) is a command line tool for working with moon files. It\ncan be used to read values from moon files, evaluate moon files to remove\nvariables, convert moon files to json where possible, and verify that a given\nmoon file is syntactically valid.\n\nThe following is taken to be the contents of a file named ex.moon:\n\n # the whole document is implicitly a namespace, so you can set key value pairs\n # at the top level.\n first_name: \"jordan\"\n last_name: \"orelli\"\n\n # lists of things should be supported\n items: [\n \"one\"\n 2\n 3.4\n [\"five\" 6 7.8]\n ]\n\n # objects should be supported\n hash: {key: \"value\" other_key: \"other_value\"}\n\n other_hash: {\n key_1: \"one\"\n key_2: 2\n key_3: 3.4\n key_4: [\"five\" 6 7.8]\n }\n\n # we may reference an item that was defined earlier\n repeat_hash: hash\n\n # items can be hidden. i.e., they're only valid in the parse and eval stage as\n # intermediate values internal to the config file; they are *not* visible to\n # the host program. This is generally useful for composing larger, more\n # complicated things.\n .hidden_item: \"it has a value\"\n\n visible_item: .hidden_item\n\n .person_one: {\n name: \"the first name here\"\n age: 28\n hometown: \"crooklyn\"\n }\n\n .person_two: {\n name: \"the second name here\"\n age: 30\n hometown: \"tha bronx\"\n }\n\n people: [.person_one .person_two]\n\n\nSubcommands\n\ncheck: used to syntax check a given file. To check a given file, in this case ex.moon, to see if it is syntactically valid, one would invoke the following command:\n\n moon check ex.moon\n\nIf the file is syntactically valid, moon will print nothing and exit with a\nstatus of 0. If the file is invalid, moon will print a cryptic error message\nthat won't help you fix the file and exit with a status of 1.\n\neval: evaluates a given moon file. The file is parsed and evaluated, and its result is printed on stdout, itself in the moon format. Invoking the following command:\n\n moon eval ex.moon\n\nwould produce the following output:\n\n first_name: \"jordan\"\n items: [\"one\" 2 3.4 [\"five\" 6 7.8]]\n other_hash: {key_3: 3.4 key_4: [\"five\" 6 7.8] key_1: \"one\" key_2: 2}\n repeat_hash: {key: \"value\" other_key: \"other_value\"}\n people: [{age: 28 hometown: \"crooklyn\" name: \"the first name here\"} {hometown: \"tha bronx\" name: \"the second name here\" age: 30}]\n last_name: \"orelli\"\n hash: {key: \"value\" other_key: \"other_value\"}\n visible_item: \"it has a value\"\n\nto: used to convert moon files to other formats. Right now, the only\nsupported format is json. To convert a given moon file to json, one would invoke the following command:\n\n moon to json ex.moon\n\nIf the file is valid and can be converted to json (i.e., only involves types\nthat are also supported by json or readily convertible to json types), the\nfile's json representation will be printed on stdout.\n\nThe \"to\" subcommand can also take its input from stdin by omitting a file name, as in one of the following invocations:\n\n cat ex.moon | moon to json\n moon to json < ex.moon\n\nEither invocation would have the same effect.\n\nget: evaluates a moon file and retrieves a value, printing it on stdout out.\nThe file is parsed and evaluated, meaning that get will fail if executed\nagainst an invalid moon file; this is a search on an evaluated moon document,\nnot a textual search. The general format of invocation is as follows:\n\n moon get $search_term $file\n\nWhere $search_term refers to a key or a path, and $file refers to the name of a\nfile to be searched. Given the moon file ex.moon:\n\n > moon get first_name ex.moon\n \"jordan\"\n\n > moon get visible_item ex.moon\n \"it has a value\"\n\n > moon get people ex.moon\n [{hometown: \"crooklyn\" name: \"the first name here\" age: 28} {name: \"the second name here\" age: 30 hometown: \"tha bronx\"}]\n\nThe search term may involve a path, allowing one to reach into an Object or List and retrieve individual items:\n\n > moon get hash\/other_key ex.moon\n \"other_value\"\n\n > moon get people\/1\/name ex.moon\n \"the second name here\"\n\n*\/\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/jordanorelli\/moon\/lib\"\n\t\"io\"\n\t\"os\"\n)\n\nfunc input(n int) io.ReadCloser {\n\tif flag.Arg(n) == \"\" {\n\t\treturn os.Stdin\n\t} else {\n\t\tf, err := os.Open(flag.Arg(n))\n\t\tif err != nil {\n\t\t\tbail(1, \"input error: %s\", err)\n\t\t}\n\t\treturn f\n\t}\n}\n\nfunc check() {\n\tr := input(1)\n\tdefer r.Close()\n\n\t_, err := moon.Read(r)\n\tif err != nil {\n\t\tbail(1, \"parse error: %s\", err)\n\t}\n}\n\nfunc to() {\n\tswitch flag.Arg(1) {\n\tcase \"json\":\n\t\tto_json(2)\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"%s is not a valid output format\\n\", flag.Arg(1))\n\t\tfmt.Fprintln(os.Stderr, \"valid output formats: json\")\n\t\tos.Exit(1)\n\t}\n}\n\nfunc to_json(n int) {\n\tin := input(n)\n\tdefer in.Close()\n\tdoc, err := moon.Read(in)\n\tif err != nil {\n\t\tbail(1, \"input error: %s\", err)\n\t}\n\tb, err := json.MarshalIndent(doc, \"\", \" \")\n\tif err != nil {\n\t\tbail(1, \"encode error: %s\", err)\n\t}\n\tos.Stdout.Write(b)\n}\n\nfunc get() {\n\tdocpath := flag.Arg(1)\n\tin := input(2)\n\tdefer in.Close()\n\n\tdoc, err := moon.Read(in)\n\tif err != nil {\n\t\tbail(1, \"input error: %s\", err)\n\t}\n\tvar v interface{}\n\tif err := doc.Get(docpath, &v); err != nil {\n\t\tbail(1, \"error reading value at path %s: %s\", docpath, err)\n\t}\n\tb, err := moon.Encode(v)\n\tif err != nil {\n\t\tbail(1, \"error encoding value: %s\", err)\n\t}\n\tos.Stdout.Write(b)\n}\n\nfunc eval() {\n\tin := input(1)\n\tdefer in.Close()\n\n\tdoc, err := moon.Read(in)\n\tif err != nil {\n\t\tbail(1, \"input error: %s\", err)\n\t}\n\tb, err := moon.Encode(doc)\n\tif err != nil {\n\t\tbail(1, \"output error: %s\", err)\n\t}\n\tos.Stdout.Write(b)\n}\n\nfunc bail(status int, t string, args ...interface{}) {\n\tvar w io.Writer\n\tif status == 0 {\n\t\tw = os.Stdout\n\t} else {\n\t\tw = os.Stderr\n\t}\n\tfmt.Fprintf(w, t+\"\\n\", args...)\n\tos.Exit(status)\n}\n\nfunc main() {\n\tflag.Parse()\n\tswitch flag.Arg(0) {\n\tcase \"check\":\n\t\tcheck()\n\tcase \"to\":\n\t\tto()\n\tcase \"get\":\n\t\tget()\n\tcase \"eval\":\n\t\teval()\n\tcase \"\":\n\t\tbail(1, \"must specify an action.\\nvalid actions: check to get eval\")\n\tdefault:\n\t\tbail(1, \"no such action:%s\", flag.Arg(0))\n\t}\n}\n<commit_msg>add documentation info<commit_after>\/*\nCommand line utility for the Moon configuration language.\n\nPurpose\n\nThe moon utility (moon) is a command line tool for working with moon files. It\ncan be used to read values from moon files, evaluate moon files to remove\nvariables, convert moon files to json where possible, and verify that a given\nmoon file is syntactically valid.\n\nThe following is taken to be the contents of a file named ex.moon:\n\n # ------------------------------------------------------------------------------\n # example config format\n #\n # this is a working draft of things that are valid in a new config language to\n # replace json as a config language for Go projects.\n #\n # comments are a thing now!\n # ------------------------------------------------------------------------------\n\n # the whole document is implicitly a namespace, so you can set key value pairs\n # at the top level.\n first_name: jordan\n last_name: orelli\n\n # lists of things should be supported\n items: [\n one\n 2\n 3.4\n [five; 6 7.8]\n ]\n\n # objects should be supported\n hash: {key: value; other_key: other_value}\n\n other_hash: {\n key_1: one\n key_2: 2\n key_3: 3.4\n key_4: [five; 6 7.8]\n }\n\n # we may reference an item that was defined earlier using a sigil\n repeat_hash: @hash\n\n # items can be hidden. i.e., they're only valid in the parse and eval stage as\n # intermediate values internal to the config file; they are *not* visible to\n # the host program. This is generally useful for composing larger, more\n # complicated things.\n @hidden_item: it has a value\n visible_item: @hidden_item\n\n @person_one: {\n name: the first name here\n age: 28\n hometown: crooklyn\n }\n\n @person_two: {\n name: the second name here\n age: 30\n hometown: tha bronx\n }\n\n people: [@person_one @person_two]\n\n\nSubcommands\n\ncheck: used to syntax check a given file. To check a given file, in this case ex.moon, to see if it is syntactically valid, one would invoke the following command:\n\n moon check ex.moon\n\nIf the file is syntactically valid, moon will print nothing and exit with a\nstatus of 0. If the file is invalid, moon will print a cryptic error message\nthat won't help you fix the file and exit with a status of 1.\n\neval: evaluates a given moon file. The file is parsed and evaluated, and its result is printed on stdout, itself in the moon format. Invoking the following command:\n\n moon eval ex.moon\n\nwould produce the following output:\n\n first_name: \"jordan\"\n items: [\"one\" 2 3.4 [\"five\" 6 7.8]]\n other_hash: {key_3: 3.4 key_4: [\"five\" 6 7.8] key_1: \"one\" key_2: 2}\n repeat_hash: {key: \"value\" other_key: \"other_value\"}\n people: [{age: 28 hometown: \"crooklyn\" name: \"the first name here\"} {hometown: \"tha bronx\" name: \"the second name here\" age: 30}]\n last_name: \"orelli\"\n hash: {key: \"value\" other_key: \"other_value\"}\n visible_item: \"it has a value\"\n\nto: used to convert moon files to other formats. Right now, the only\nsupported format is json. To convert a given moon file to json, one would invoke the following command:\n\n moon to json ex.moon\n\nIf the file is valid and can be converted to json (i.e., only involves types\nthat are also supported by json or readily convertible to json types), the\nfile's json representation will be printed on stdout.\n\nThe \"to\" subcommand can also take its input from stdin by omitting a file name, as in one of the following invocations:\n\n cat ex.moon | moon to json\n moon to json < ex.moon\n\nEither invocation would have the same effect.\n\nget: evaluates a moon file and retrieves a value, printing it on stdout out.\nThe file is parsed and evaluated, meaning that get will fail if executed\nagainst an invalid moon file; this is a search on an evaluated moon document,\nnot a textual search. The general format of invocation is as follows:\n\n moon get $search_term $file\n\nWhere $search_term refers to a key or a path, and $file refers to the name of a\nfile to be searched. Given the moon file ex.moon:\n\n > moon get first_name ex.moon\n \"jordan\"\n\n > moon get visible_item ex.moon\n \"it has a value\"\n\n > moon get people ex.moon\n [{hometown: \"crooklyn\" name: \"the first name here\" age: 28} {name: \"the second name here\" age: 30 hometown: \"tha bronx\"}]\n\nThe search term may involve a path, allowing one to reach into an Object or List and retrieve individual items:\n\n > moon get hash\/other_key ex.moon\n \"other_value\"\n\n > moon get people\/1\/name ex.moon\n \"the second name here\"\n\n*\/\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/jordanorelli\/moon\/lib\"\n\t\"io\"\n\t\"os\"\n)\n\nfunc input(n int) io.ReadCloser {\n\tif flag.Arg(n) == \"\" {\n\t\treturn os.Stdin\n\t} else {\n\t\tf, err := os.Open(flag.Arg(n))\n\t\tif err != nil {\n\t\t\tbail(1, \"input error: %s\", err)\n\t\t}\n\t\treturn f\n\t}\n}\n\nfunc check() {\n\tr := input(1)\n\tdefer r.Close()\n\n\t_, err := moon.Read(r)\n\tif err != nil {\n\t\tbail(1, \"parse error: %s\", err)\n\t}\n}\n\nfunc to() {\n\tswitch flag.Arg(1) {\n\tcase \"json\":\n\t\tto_json(2)\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"%s is not a valid output format\\n\", flag.Arg(1))\n\t\tfmt.Fprintln(os.Stderr, \"valid output formats: json\")\n\t\tos.Exit(1)\n\t}\n}\n\nfunc to_json(n int) {\n\tin := input(n)\n\tdefer in.Close()\n\tdoc, err := moon.Read(in)\n\tif err != nil {\n\t\tbail(1, \"input error: %s\", err)\n\t}\n\tb, err := json.MarshalIndent(doc, \"\", \" \")\n\tif err != nil {\n\t\tbail(1, \"encode error: %s\", err)\n\t}\n\tos.Stdout.Write(b)\n}\n\nfunc get() {\n\tdocpath := flag.Arg(1)\n\tin := input(2)\n\tdefer in.Close()\n\n\tdoc, err := moon.Read(in)\n\tif err != nil {\n\t\tbail(1, \"input error: %s\", err)\n\t}\n\tvar v interface{}\n\tif err := doc.Get(docpath, &v); err != nil {\n\t\tbail(1, \"error reading value at path %s: %s\", docpath, err)\n\t}\n\tb, err := moon.Encode(v)\n\tif err != nil {\n\t\tbail(1, \"error encoding value: %s\", err)\n\t}\n\tos.Stdout.Write(b)\n}\n\nfunc eval() {\n\tin := input(1)\n\tdefer in.Close()\n\n\tdoc, err := moon.Read(in)\n\tif err != nil {\n\t\tbail(1, \"input error: %s\", err)\n\t}\n\tb, err := moon.Encode(doc)\n\tif err != nil {\n\t\tbail(1, \"output error: %s\", err)\n\t}\n\tos.Stdout.Write(b)\n}\n\nfunc bail(status int, t string, args ...interface{}) {\n\tvar w io.Writer\n\tif status == 0 {\n\t\tw = os.Stdout\n\t} else {\n\t\tw = os.Stderr\n\t}\n\tfmt.Fprintf(w, t+\"\\n\", args...)\n\tos.Exit(status)\n}\n\nfunc main() {\n\tflag.Parse()\n\tswitch flag.Arg(0) {\n\tcase \"check\":\n\t\tcheck()\n\tcase \"to\":\n\t\tto()\n\tcase \"get\":\n\t\tget()\n\tcase \"eval\":\n\t\teval()\n\tcase \"\":\n\t\tbail(1, \"must specify an action.\\nvalid actions: check to get eval\")\n\tdefault:\n\t\tbail(1, \"no such action:%s\", flag.Arg(0))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage apiserver_test\n\nimport (\n\t\"time\"\n\n\t\"github.com\/juju\/loggo\"\n\tgitjujutesting \"github.com\/juju\/testing\"\n\t\"github.com\/juju\/utils\"\n\tgc \"launchpad.net\/gocheck\"\n\n\t\"github.com\/juju\/juju\/juju\/testing\"\n\t\"github.com\/juju\/juju\/rpc\"\n\t\"github.com\/juju\/juju\/state\/api\"\n\t\"github.com\/juju\/juju\/state\/apiserver\"\n\tcoretesting \"github.com\/juju\/juju\/testing\"\n)\n\ntype stateSuite struct {\n\ttesting.JujuConnSuite\n}\n\nvar _ = gc.Suite(&stateSuite{})\n\nvar testPingPeriod = 100 * time.Millisecond\n\nfunc (s *stateSuite) TestConnectionBrokenDetection(c *gc.C) {\n\ts.PatchValue(&api.PingPeriod, testPingPeriod)\n\n\tst, _ := s.OpenAPIAsNewMachine(c)\n\n\t\/\/ Connection still alive\n\tselect {\n\tcase <-time.After(testPingPeriod):\n\tcase <-st.Broken():\n\t\tc.Fatalf(\"connection should be alive still\")\n\t}\n\n\t\/\/ Close the connection and see if we detect this\n\tgo st.Close()\n\n\t\/\/ Check it's detected\n\tselect {\n\tcase <-time.After(testPingPeriod + time.Second):\n\t\tc.Fatalf(\"connection not closed as expected\")\n\tcase <-st.Broken():\n\t\treturn\n\t}\n}\n\nfunc (s *stateSuite) TestPing(c *gc.C) {\n\ttw := &loggo.TestWriter{}\n\tc.Assert(loggo.RegisterWriter(\"ping-tester\", tw, loggo.DEBUG), gc.IsNil)\n\tdefer loggo.RemoveWriter(\"ping-tester\")\n\n\tst, _ := s.OpenAPIAsNewMachine(c)\n\terr := st.Ping()\n\tc.Assert(err, gc.IsNil)\n\terr = st.Close()\n\tc.Assert(err, gc.IsNil)\n\terr = st.Ping()\n\tc.Assert(err, gc.Equals, rpc.ErrShutdown)\n\n\t\/\/ Make sure that ping messages have not been logged.\n\tfor _, m := range tw.Log {\n\t\tc.Logf(\"checking %q\", m.Message)\n\t\tc.Check(m.Message, gc.Not(gc.Matches), `.*\"Request\":\"Ping\".*`)\n\t}\n}\n\nfunc (s *stateSuite) TestClientNoNeedToPing(c *gc.C) {\n\ts.PatchValue(apiserver.MaxClientPingInterval, time.Duration(0))\n\tst, err := api.Open(s.APIInfo(c), api.DefaultDialOpts())\n\tc.Assert(err, gc.IsNil)\n\tdefer st.Close()\n\ttime.Sleep(coretesting.ShortWait)\n\terr = st.Ping()\n\tc.Assert(err, gc.IsNil)\n}\n\nfunc (s *stateSuite) TestAgentConnectionShutsDownWithNoPing(c *gc.C) {\n\ts.PatchValue(apiserver.MaxClientPingInterval, time.Duration(0))\n\tst, _ := s.OpenAPIAsNewMachine(c)\n\ttime.Sleep(coretesting.ShortWait)\n\terr := st.Ping()\n\tc.Assert(err, gc.ErrorMatches, \"connection is shut down\")\n}\n\nfunc (s *stateSuite) TestAgentConnectionDelaysShutdownWithPing(c *gc.C) {\n\t\/\/ We have to be careful, because Login can take 25ms, so we ping\n\t\/\/ immediately after connecting.\n\ts.PatchValue(apiserver.MaxClientPingInterval, 50*time.Millisecond)\n\tst, _ := s.OpenAPIAsNewMachine(c)\n\terr := st.Ping()\n\tc.Assert(err, gc.IsNil)\n\t\/\/ As long as we don't wait too long, the connection stays open\n\tfor i := 0; i < 10; i++ {\n\t\ttime.Sleep(10 * time.Millisecond)\n\t\terr = st.Ping()\n\t\tc.Assert(err, gc.IsNil)\n\t}\n\t\/\/ However, once we stop pinging for too long, the connection dies\n\ttime.Sleep(75 * time.Millisecond)\n\terr = st.Ping()\n\tc.Assert(err, gc.ErrorMatches, \"connection is shut down\")\n}\n\ntype mongoPingerSuite struct {\n\ttesting.JujuConnSuite\n}\n\nvar _ = gc.Suite(&mongoPingerSuite{})\n\nfunc (s *mongoPingerSuite) SetUpTest(c *gc.C) {\n\t\/\/ We need to set the ping interval before the server is started.\n\ts.PatchValue(apiserver.MongoPingInterval, coretesting.ShortWait)\n\ts.JujuConnSuite.SetUpTest(c)\n}\n\nfunc (s *mongoPingerSuite) TestAgentConnectionsShutDownWhenStateDies(c *gc.C) {\n\ts.PatchValue(apiserver.MongoPingInterval, coretesting.ShortWait)\n\tst, _ := s.OpenAPIAsNewMachine(c)\n\terr := st.Ping()\n\tc.Assert(err, gc.IsNil)\n\tgitjujutesting.MgoServer.Destroy()\n\n\tattempt := utils.AttemptStrategy{\n\t\tTotal: coretesting.LongWait,\n\t\tDelay: coretesting.ShortWait,\n\t}\n\tfor a := attempt.Start(); a.Next(); {\n\t\tif err := st.Ping(); err != nil {\n\t\t\tc.Assert(err, gc.ErrorMatches, \"connection is shut down\")\n\t\t\treturn\n\t\t}\n\t}\n\tc.Fatalf(\"timed out waiting for API server to die\")\n}\n<commit_msg>Patching values twice is bad, m'kay<commit_after>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage apiserver_test\n\nimport (\n\t\"time\"\n\n\t\"github.com\/juju\/loggo\"\n\tgitjujutesting \"github.com\/juju\/testing\"\n\t\"github.com\/juju\/utils\"\n\tgc \"launchpad.net\/gocheck\"\n\n\t\"github.com\/juju\/juju\/juju\/testing\"\n\t\"github.com\/juju\/juju\/rpc\"\n\t\"github.com\/juju\/juju\/state\/api\"\n\t\"github.com\/juju\/juju\/state\/apiserver\"\n\tcoretesting \"github.com\/juju\/juju\/testing\"\n)\n\ntype stateSuite struct {\n\ttesting.JujuConnSuite\n}\n\nvar _ = gc.Suite(&stateSuite{})\n\nvar testPingPeriod = 100 * time.Millisecond\n\nfunc (s *stateSuite) TestConnectionBrokenDetection(c *gc.C) {\n\ts.PatchValue(&api.PingPeriod, testPingPeriod)\n\n\tst, _ := s.OpenAPIAsNewMachine(c)\n\n\t\/\/ Connection still alive\n\tselect {\n\tcase <-time.After(testPingPeriod):\n\tcase <-st.Broken():\n\t\tc.Fatalf(\"connection should be alive still\")\n\t}\n\n\t\/\/ Close the connection and see if we detect this\n\tgo st.Close()\n\n\t\/\/ Check it's detected\n\tselect {\n\tcase <-time.After(testPingPeriod + time.Second):\n\t\tc.Fatalf(\"connection not closed as expected\")\n\tcase <-st.Broken():\n\t\treturn\n\t}\n}\n\nfunc (s *stateSuite) TestPing(c *gc.C) {\n\ttw := &loggo.TestWriter{}\n\tc.Assert(loggo.RegisterWriter(\"ping-tester\", tw, loggo.DEBUG), gc.IsNil)\n\tdefer loggo.RemoveWriter(\"ping-tester\")\n\n\tst, _ := s.OpenAPIAsNewMachine(c)\n\terr := st.Ping()\n\tc.Assert(err, gc.IsNil)\n\terr = st.Close()\n\tc.Assert(err, gc.IsNil)\n\terr = st.Ping()\n\tc.Assert(err, gc.Equals, rpc.ErrShutdown)\n\n\t\/\/ Make sure that ping messages have not been logged.\n\tfor _, m := range tw.Log {\n\t\tc.Logf(\"checking %q\", m.Message)\n\t\tc.Check(m.Message, gc.Not(gc.Matches), `.*\"Request\":\"Ping\".*`)\n\t}\n}\n\nfunc (s *stateSuite) TestClientNoNeedToPing(c *gc.C) {\n\ts.PatchValue(apiserver.MaxClientPingInterval, time.Duration(0))\n\tst, err := api.Open(s.APIInfo(c), api.DefaultDialOpts())\n\tc.Assert(err, gc.IsNil)\n\tdefer st.Close()\n\ttime.Sleep(coretesting.ShortWait)\n\terr = st.Ping()\n\tc.Assert(err, gc.IsNil)\n}\n\nfunc (s *stateSuite) TestAgentConnectionShutsDownWithNoPing(c *gc.C) {\n\ts.PatchValue(apiserver.MaxClientPingInterval, time.Duration(0))\n\tst, _ := s.OpenAPIAsNewMachine(c)\n\ttime.Sleep(coretesting.ShortWait)\n\terr := st.Ping()\n\tc.Assert(err, gc.ErrorMatches, \"connection is shut down\")\n}\n\nfunc (s *stateSuite) TestAgentConnectionDelaysShutdownWithPing(c *gc.C) {\n\t\/\/ We have to be careful, because Login can take 25ms, so we ping\n\t\/\/ immediately after connecting.\n\ts.PatchValue(apiserver.MaxClientPingInterval, 50*time.Millisecond)\n\tst, _ := s.OpenAPIAsNewMachine(c)\n\terr := st.Ping()\n\tc.Assert(err, gc.IsNil)\n\t\/\/ As long as we don't wait too long, the connection stays open\n\tfor i := 0; i < 10; i++ {\n\t\ttime.Sleep(10 * time.Millisecond)\n\t\terr = st.Ping()\n\t\tc.Assert(err, gc.IsNil)\n\t}\n\t\/\/ However, once we stop pinging for too long, the connection dies\n\ttime.Sleep(75 * time.Millisecond)\n\terr = st.Ping()\n\tc.Assert(err, gc.ErrorMatches, \"connection is shut down\")\n}\n\ntype mongoPingerSuite struct {\n\ttesting.JujuConnSuite\n}\n\nvar _ = gc.Suite(&mongoPingerSuite{})\n\nfunc (s *mongoPingerSuite) SetUpTest(c *gc.C) {\n\t\/\/ We need to set the ping interval before the server is started.\n\ts.PatchValue(apiserver.MongoPingInterval, coretesting.ShortWait)\n\ts.JujuConnSuite.SetUpTest(c)\n}\n\nfunc (s *mongoPingerSuite) TestAgentConnectionsShutDownWhenStateDies(c *gc.C) {\n\tst, _ := s.OpenAPIAsNewMachine(c)\n\terr := st.Ping()\n\tc.Assert(err, gc.IsNil)\n\tgitjujutesting.MgoServer.Destroy()\n\n\tattempt := utils.AttemptStrategy{\n\t\tTotal: coretesting.LongWait,\n\t\tDelay: coretesting.ShortWait,\n\t}\n\tfor a := attempt.Start(); a.Next(); {\n\t\tif err := st.Ping(); err != nil {\n\t\t\tc.Assert(err, gc.ErrorMatches, \"connection is shut down\")\n\t\t\treturn\n\t\t}\n\t}\n\tc.Fatalf(\"timed out waiting for API server to die\")\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sort\"\n\n\t\"github.com\/aptly-dev\/aptly\/deb\"\n\t\"github.com\/smira\/commander\"\n)\n\nfunc aptlyRepoList(cmd *commander.Command, args []string) error {\n\tvar err error\n\tif len(args) != 0 {\n\t\tcmd.Usage()\n\t\treturn commander.ErrCommandError\n\t}\n\n\traw := cmd.Flag.Lookup(\"raw\").Value.Get().(bool)\n\tjsonFlag := cmd.Flag.Lookup(\"json\").Value.Get().(bool)\n\n\trepos := make([]string, context.CollectionFactory().LocalRepoCollection().Len())\n\tjsonRepos := make([]*deb.LocalRepo, context.CollectionFactory().LocalRepoCollection().Len())\n\ti := 0\n\tcontext.CollectionFactory().LocalRepoCollection().ForEach(func(repo *deb.LocalRepo) error {\n\t\tif raw {\n\t\t\trepos[i] = repo.Name\n\t\t} else {\n\t\t\te := context.CollectionFactory().LocalRepoCollection().LoadComplete(repo)\n\t\t\tif e != nil {\n\t\t\t\treturn e\n\t\t\t}\n\n\t\t\tif jsonFlag {\n\t\t\t\tjsonRepos[i] = repo\n\t\t\t} else {\n\t\t\t\trepos[i] = fmt.Sprintf(\" * %s (packages: %d)\", repo.String(), repo.NumPackages())\n\t\t\t}\n\t\t}\n\t\ti++\n\t\treturn nil\n\t})\n\n\tcontext.CloseDatabase()\n\n\tif raw {\n\t\tsort.Strings(repos)\n\t\tfor _, repo := range repos {\n\t\t\tfmt.Printf(\"%s\\n\", repo)\n\t\t}\n\t} else if jsonFlag {\n\t\tsort.Slice(jsonRepos, func(i, j int) bool {\n\t\t\treturn jsonRepos[i].Name < jsonRepos[j].Name\n\t\t})\n\t\tif output, e := json.MarshalIndent(jsonRepos, \"\", \" \"); e == nil {\n\t\t\tfmt.Println(string(output))\n\t\t} else {\n\t\t\terr = e\n\t\t}\n\t} else {\n\t\tsort.Strings(repos)\n\t\tif len(repos) > 0 {\n\t\t\tfmt.Printf(\"List of local repos:\\n\")\n\t\t\tfor _, repo := range repos {\n\t\t\t\tfmt.Println(repo)\n\t\t\t}\n\n\t\t\tfmt.Printf(\"\\nTo get more information about local repository, run `aptly repo show <name>`.\\n\")\n\t\t} else {\n\t\t\tfmt.Printf(\"No local repositories found, create one with `aptly repo create ...`.\\n\")\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc makeCmdRepoList() *commander.Command {\n\tcmd := &commander.Command{\n\t\tRun: aptlyRepoList,\n\t\tUsageLine: \"list\",\n\t\tShort: \"list local repositories\",\n\t\tLong: `\nList command shows full list of local package repositories.\n\nExample:\n\n $ aptly repo list\n`,\n\t}\n\n\tcmd.Flag.Bool(\"json\", false, \"display list in JSON format\")\n\tcmd.Flag.Bool(\"raw\", false, \"display list in machine-readable format\")\n\n\treturn cmd\n}\n<commit_msg>Refactor repo list into json and txt output<commit_after>package cmd\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sort\"\n\n\t\"github.com\/aptly-dev\/aptly\/deb\"\n\t\"github.com\/smira\/commander\"\n)\n\nfunc aptlyRepoList(cmd *commander.Command, args []string) error {\n\tif len(args) != 0 {\n\t\tcmd.Usage()\n\t\treturn commander.ErrCommandError\n\t}\n\n\tjsonFlag := cmd.Flag.Lookup(\"json\").Value.Get().(bool)\n\n\tif jsonFlag {\n\t\treturn aptlyRepoListJson(cmd, args)\n\t}\n\n\treturn aptlyRepoListTxt(cmd, args)\n}\n\nfunc aptlyRepoListTxt(cmd *commander.Command, args []string) error {\n\tvar err error\n\n\traw := cmd.Flag.Lookup(\"raw\").Value.Get().(bool)\n\n\trepos := make([]string, context.CollectionFactory().LocalRepoCollection().Len())\n\ti := 0\n\tcontext.CollectionFactory().LocalRepoCollection().ForEach(func(repo *deb.LocalRepo) error {\n\t\tif raw {\n\t\t\trepos[i] = repo.Name\n\t\t} else {\n\t\t\te := context.CollectionFactory().LocalRepoCollection().LoadComplete(repo)\n\t\t\tif e != nil {\n\t\t\t\treturn e\n\t\t\t}\n\n\t\t\trepos[i] = fmt.Sprintf(\" * %s (packages: %d)\", repo.String(), repo.NumPackages())\n\t\t}\n\t\ti++\n\t\treturn nil\n\t})\n\n\tcontext.CloseDatabase()\n\n\tif raw {\n\t\tsort.Strings(repos)\n\t\tfor _, repo := range repos {\n\t\t\tfmt.Printf(\"%s\\n\", repo)\n\t\t}\n\t} else {\n\t\tsort.Strings(repos)\n\t\tif len(repos) > 0 {\n\t\t\tfmt.Printf(\"List of local repos:\\n\")\n\t\t\tfor _, repo := range repos {\n\t\t\t\tfmt.Println(repo)\n\t\t\t}\n\n\t\t\tfmt.Printf(\"\\nTo get more information about local repository, run `aptly repo show <name>`.\\n\")\n\t\t} else {\n\t\t\tfmt.Printf(\"No local repositories found, create one with `aptly repo create ...`.\\n\")\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc aptlyRepoListJson(cmd *commander.Command, args []string) error {\n\tvar err error\n\n\tjsonRepos := make([]*deb.LocalRepo, context.CollectionFactory().LocalRepoCollection().Len())\n\ti := 0\n\tcontext.CollectionFactory().LocalRepoCollection().ForEach(func(repo *deb.LocalRepo) error {\n\t\te := context.CollectionFactory().LocalRepoCollection().LoadComplete(repo)\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\n\t\tjsonRepos[i] = repo\n\t\ti++\n\t\treturn nil\n\t})\n\n\tcontext.CloseDatabase()\n\n\tsort.Slice(jsonRepos, func(i, j int) bool {\n\t\treturn jsonRepos[i].Name < jsonRepos[j].Name\n\t})\n\tif output, e := json.MarshalIndent(jsonRepos, \"\", \" \"); e == nil {\n\t\tfmt.Println(string(output))\n\t} else {\n\t\terr = e\n\t}\n\n\treturn err\n}\n\nfunc makeCmdRepoList() *commander.Command {\n\tcmd := &commander.Command{\n\t\tRun: aptlyRepoList,\n\t\tUsageLine: \"list\",\n\t\tShort: \"list local repositories\",\n\t\tLong: `\nList command shows full list of local package repositories.\n\nExample:\n\n $ aptly repo list\n`,\n\t}\n\n\tcmd.Flag.Bool(\"json\", false, \"display list in JSON format\")\n\tcmd.Flag.Bool(\"raw\", false, \"display list in machine-readable format\")\n\n\treturn cmd\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 tsuru-client authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage installer\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/tsuru\/tsuru-client\/tsuru\/installer\/defaultconfig\"\n\t\"gopkg.in\/check.v1\"\n)\n\nfunc (s *S) TestResolverConfig(c *check.C) {\n\ttt := []struct {\n\t\tDescription string\n\t\tBase string\n\t\tConfig map[string]string\n\t\tResult string\n\t\tErrMsg string\n\t}{\n\t\t{\"invalid path\", \"invalid\", nil, \"\", \"no such file or directory\"},\n\t\t{\"default configuration\", \"\", nil, defaultconfig.Compose, \"\"},\n\t\t{\"custom parameter\", \"\", map[string]string{\"TSURU_API_IMAGE\": \"INJECT_TEST\"}, \"INJECT_TEST\", \"\"},\n\t}\n\n\tfor _, tc := range tt {\n\t\tresult, err := resolveConfig(tc.Base, tc.Config)\n\t\tif len(tc.ErrMsg) > 0 {\n\t\t\tcontains := strings.Contains(err.Error(), tc.ErrMsg)\n\t\t\tc.Assert(contains, check.Equals, true)\n\t\t} else {\n\t\t\tc.Assert(err, check.IsNil)\n\t\t}\n\t\tc.Assert(strings.Contains(result, tc.Result), check.Equals, true)\n\t}\n}\n<commit_msg>installer: fix test error message check on windows<commit_after>\/\/ Copyright 2016 tsuru-client authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage installer\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/tsuru\/tsuru-client\/tsuru\/installer\/defaultconfig\"\n\t\"gopkg.in\/check.v1\"\n)\n\nfunc (s *S) TestResolverConfig(c *check.C) {\n\ttt := []struct {\n\t\tDescription string\n\t\tBase string\n\t\tConfig map[string]string\n\t\tResult string\n\t\tErrMsg string\n\t}{\n\t\t{\"invalid path\", \"invalid\", nil, \"\", \".*(cannot find the file|no such file).*\"},\n\t\t{\"default configuration\", \"\", nil, defaultconfig.Compose, \"\"},\n\t\t{\"custom parameter\", \"\", map[string]string{\"TSURU_API_IMAGE\": \"INJECT_TEST\"}, \"INJECT_TEST\", \"\"},\n\t}\n\n\tfor _, tc := range tt {\n\t\tresult, err := resolveConfig(tc.Base, tc.Config)\n\t\tif len(tc.ErrMsg) > 0 {\n\t\t\tc.Assert(err, check.ErrorMatches, tc.ErrMsg)\n\t\t} else {\n\t\t\tc.Assert(err, check.IsNil)\n\t\t}\n\t\tc.Assert(strings.Contains(result, tc.Result), check.Equals, true)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/JonLundy\/go-totp\/totp\"\n\t\"bufio\"\n\t\"os\"\n\t\"encoding\/base32\"\n\t\"time\"\n\t\"crypto\/sha1\"\n\t\"fmt\"\n)\n\nfunc main() {\n\treader := bufio.NewReader(os.Stdin)\n\tkey, _ := reader.ReadString('\\n')\n\n\tk, _ := base32.StdEncoding.DecodeString(key)\n\n\tcode, _ := totp.Totp(k, time.Now().Unix(), sha1.New, 6)\n\tfmt.Print(code)\n}<commit_msg>add padding handling<commit_after>package main\n\nimport (\n\t\"github.com\/JonLundy\/go-totp\/pkg\/totp\"\n\t\"bufio\"\n\t\"os\"\n\t\"encoding\/base32\"\n\t\"time\"\n\t\"crypto\/sha1\"\n\t\"fmt\"\n \"strings\"\n)\n\nfunc main() {\n\treader := bufio.NewReader(os.Stdin)\n\tkey, _ := reader.ReadString('\\n')\n\n\tkey = strings.Trim(key, \"=\\n\")\n\n if i := len(key) % 8; i != 0 {\n key += strings.Repeat(\"=\", 8-i)\n }\n\n k, err := base32.StdEncoding.DecodeString(key)\n\n\tcode, _ := totp.Totp(k, time.Now().Unix(), sha1.New, 6)\n\tfmt.Print(code)\n}\n\n<|endoftext|>"} {"text":"<commit_before>package marathon\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Event struct {\n\tEventType string `json:eventType`\n\tTimestamp time.Time `json:timestamp`\n\tSlaveID string `json:slaveId`\n\tTaskID string `json:taskId`\n\tTaskStatus string `json:taskStatus`\n\tAppID string `json:appId`\n\tHost string `json:host`\n\tPorts []int `json:ports`\n\tVersion string `json:version`\n}\n\ntype Listener struct {\n\tevents chan Event\n\thost string\n\tinternalPort string \/\/ Internal\/external ports are relative\n\texternalPort string \/\/ to the container this process runs in.\n}\n\nfunc NewListener(host string, internalPort, externalPort string) *Listener {\n\tlistener := &Listener{\n\t\tevents: make(chan Event),\n\t\thost: host,\n\t\tinternalPort: internalPort,\n\t\texternalPort: externalPort,\n\t}\n\thttp.HandleFunc(\"\/push-listener\", listener.handler)\n\tgo http.ListenAndServe(\":\"+internalPort, nil)\n\n\treturn listener\n}\n\nfunc (l *Listener) handler(res http.ResponseWriter, req *http.Request) {\n\tdecoder := json.NewDecoder(req.Body)\n\n\tvar event Event\n\tif err := decoder.Decode(&event); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif event.EventType == \"status_update_event\" { \/\/ We only care about container change events\n\t\tl.events <- event\n\t}\n\n\tres.Write([]byte(\"Thanks.\")) \/\/ Marathon ignores replies. Just being polite.\n}\n\nfunc (l *Listener) Events() <-chan Event {\n\treturn l.events\n}\n\nfunc (l *Listener) Subscribe(marathonHost string) error {\n\tmarathonURL := url.URL{Scheme: \"http\", Host: marathonHost, Path: \"\/v2\/eventSubscriptions\"}\n\tq := marathonURL.Query()\n\tq.Set(\"callbackUrl\", fmt.Sprintf(\"http:\/\/%s:%s\/push-listener\", l.host, l.externalPort))\n\tmarathonURL.RawQuery = q.Encode()\n\n\tres, err := http.Post(marathonURL.String(), \"application\/json\", strings.NewReader(\"\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"Bad status code while subscribing to marathon events: \" + res.Status)\n\t}\n\n\tvar data map[string]interface{}\n\tdecoder := json.NewDecoder(res.Body)\n\tif err := decoder.Decode(&data); err != nil {\n\t\treturn err\n\t}\n\n\n\treturn nil\n}\n<commit_msg>Added quotes around json descriptions in Event struct.<commit_after>package marathon\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Event struct {\n\tEventType string `json:\"eventType\"`\n\tTimestamp time.Time `json:\"timestamp\"`\n\tSlaveID string `json:\"slaveId\"`\n\tTaskID string `json:\"taskId\"`\n\tTaskStatus string `json:\"taskStatus\"`\n\tAppID string `json:\"appId\"`\n\tHost string `json:\"host\"`\n\tPorts []int `json:\"ports\"`\n\tVersion string `json:\"version\"`\n}\n\ntype Listener struct {\n\tevents chan Event\n\thost string\n\tinternalPort string \/\/ Internal\/external ports are relative\n\texternalPort string \/\/ to the container this process runs in.\n}\n\nfunc NewListener(host string, internalPort, externalPort string) *Listener {\n\tlistener := &Listener{\n\t\tevents: make(chan Event),\n\t\thost: host,\n\t\tinternalPort: internalPort,\n\t\texternalPort: externalPort,\n\t}\n\thttp.HandleFunc(\"\/push-listener\", listener.handler)\n\tgo http.ListenAndServe(\":\"+internalPort, nil)\n\n\treturn listener\n}\n\nfunc (l *Listener) handler(res http.ResponseWriter, req *http.Request) {\n\tdecoder := json.NewDecoder(req.Body)\n\n\tvar event Event\n\tif err := decoder.Decode(&event); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif event.EventType == \"status_update_event\" { \/\/ We only care about container change events\n\t\tl.events <- event\n\t}\n\n\tres.Write([]byte(\"Thanks.\")) \/\/ Marathon ignores replies. Just being polite.\n}\n\nfunc (l *Listener) Events() <-chan Event {\n\treturn l.events\n}\n\nfunc (l *Listener) Subscribe(marathonHost string) error {\n\tmarathonURL := url.URL{Scheme: \"http\", Host: marathonHost, Path: \"\/v2\/eventSubscriptions\"}\n\tq := marathonURL.Query()\n\tq.Set(\"callbackUrl\", fmt.Sprintf(\"http:\/\/%s:%s\/push-listener\", l.host, l.externalPort))\n\tmarathonURL.RawQuery = q.Encode()\n\n\tres, err := http.Post(marathonURL.String(), \"application\/json\", strings.NewReader(\"\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"Bad status code while subscribing to marathon events: \" + res.Status)\n\t}\n\n\tvar data map[string]interface{}\n\tdecoder := json.NewDecoder(res.Body)\n\tif err := decoder.Decode(&data); err != nil {\n\t\treturn err\n\t}\n\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Gogs Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage models\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/Unknwon\/com\"\n\n\t\"code.gitea.io\/gitea\/modules\/log\"\n\t\"code.gitea.io\/gitea\/modules\/markup\"\n\t\"code.gitea.io\/gitea\/modules\/setting\"\n)\n\nfunc (issue *Issue) mailSubject() string {\n\treturn fmt.Sprintf(\"[%s] %s (#%d)\", issue.Repo.Name, issue.Title, issue.Index)\n}\n\n\/\/ mailIssueCommentToParticipants can be used for both new issue creation and comment.\n\/\/ This function sends two list of emails:\n\/\/ 1. Repository watchers and users who are participated in comments.\n\/\/ 2. Users who are not in 1. but get mentioned in current issue\/comment.\nfunc mailIssueCommentToParticipants(e Engine, issue *Issue, doer *User, content string, comment *Comment, mentions []string) error {\n\tif !setting.Service.EnableNotifyMail {\n\t\treturn nil\n\t}\n\n\twatchers, err := getWatchers(e, issue.RepoID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getWatchers [repo_id: %d]: %v\", issue.RepoID, err)\n\t}\n\tparticipants, err := getParticipantsByIssueID(e, issue.ID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getParticipantsByIssueID [issue_id: %d]: %v\", issue.ID, err)\n\t}\n\n\t\/\/ In case the issue poster is not watching the repository and is active,\n\t\/\/ even if we have duplicated in watchers, can be safely filtered out.\n\tposter, err := GetUserByID(issue.PosterID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"GetUserByID [%d]: %v\", issue.PosterID, err)\n\t}\n\tif issue.PosterID != doer.ID && poster.IsActive && !poster.ProhibitLogin {\n\t\tparticipants = append(participants, issue.Poster)\n\t}\n\n\t\/\/ Assignees must receive any communications\n\tassignees, err := GetAssigneesByIssue(issue)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, assignee := range assignees {\n\t\tif assignee.ID != doer.ID {\n\t\t\tparticipants = append(participants, assignee)\n\t\t}\n\t}\n\n\ttos := make([]string, 0, len(watchers)) \/\/ List of email addresses.\n\tnames := make([]string, 0, len(watchers))\n\tfor i := range watchers {\n\t\tif watchers[i].UserID == doer.ID {\n\t\t\tcontinue\n\t\t}\n\n\t\tto, err := getUserByID(e, watchers[i].UserID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"GetUserByID [%d]: %v\", watchers[i].UserID, err)\n\t\t}\n\t\tif to.IsOrganization() {\n\t\t\tcontinue\n\t\t}\n\n\t\ttos = append(tos, to.Email)\n\t\tnames = append(names, to.Name)\n\t}\n\tfor i := range participants {\n\t\tif participants[i].ID == doer.ID {\n\t\t\tcontinue\n\t\t} else if com.IsSliceContainsStr(names, participants[i].Name) {\n\t\t\tcontinue\n\t\t}\n\n\t\ttos = append(tos, participants[i].Email)\n\t\tnames = append(names, participants[i].Name)\n\t}\n\n\tSendIssueCommentMail(issue, doer, content, comment, tos)\n\n\t\/\/ Mail mentioned people and exclude watchers.\n\tnames = append(names, doer.Name)\n\ttos = make([]string, 0, len(mentions)) \/\/ list of user names.\n\tfor i := range mentions {\n\t\tif com.IsSliceContainsStr(names, mentions[i]) {\n\t\t\tcontinue\n\t\t}\n\n\t\ttos = append(tos, mentions[i])\n\t}\n\tSendIssueMentionMail(issue, doer, content, comment, getUserEmailsByNames(e, tos))\n\n\treturn nil\n}\n\n\/\/ MailParticipants sends new issue thread created emails to repository watchers\n\/\/ and mentioned people.\nfunc (issue *Issue) MailParticipants() (err error) {\n\treturn issue.mailParticipants(x)\n}\n\nfunc (issue *Issue) mailParticipants(e Engine) (err error) {\n\tmentions := markup.FindAllMentions(issue.Content)\n\tif err = UpdateIssueMentions(e, issue.ID, mentions); err != nil {\n\t\treturn fmt.Errorf(\"UpdateIssueMentions [%d]: %v\", issue.ID, err)\n\t}\n\n\tif err = mailIssueCommentToParticipants(e, issue, issue.Poster, issue.Content, nil, mentions); err != nil {\n\t\tlog.Error(4, \"mailIssueCommentToParticipants: %v\", err)\n\t}\n\n\treturn nil\n}\n<commit_msg>Don't disclose emails of all users when sending out emails (#4664)<commit_after>\/\/ Copyright 2016 The Gogs Authors. All rights reserved.\n\/\/ Copyright 2018 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage models\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/Unknwon\/com\"\n\n\t\"code.gitea.io\/gitea\/modules\/log\"\n\t\"code.gitea.io\/gitea\/modules\/markup\"\n\t\"code.gitea.io\/gitea\/modules\/setting\"\n)\n\nfunc (issue *Issue) mailSubject() string {\n\treturn fmt.Sprintf(\"[%s] %s (#%d)\", issue.Repo.Name, issue.Title, issue.Index)\n}\n\n\/\/ mailIssueCommentToParticipants can be used for both new issue creation and comment.\n\/\/ This function sends two list of emails:\n\/\/ 1. Repository watchers and users who are participated in comments.\n\/\/ 2. Users who are not in 1. but get mentioned in current issue\/comment.\nfunc mailIssueCommentToParticipants(e Engine, issue *Issue, doer *User, content string, comment *Comment, mentions []string) error {\n\tif !setting.Service.EnableNotifyMail {\n\t\treturn nil\n\t}\n\n\twatchers, err := getWatchers(e, issue.RepoID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getWatchers [repo_id: %d]: %v\", issue.RepoID, err)\n\t}\n\tparticipants, err := getParticipantsByIssueID(e, issue.ID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getParticipantsByIssueID [issue_id: %d]: %v\", issue.ID, err)\n\t}\n\n\t\/\/ In case the issue poster is not watching the repository and is active,\n\t\/\/ even if we have duplicated in watchers, can be safely filtered out.\n\tposter, err := GetUserByID(issue.PosterID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"GetUserByID [%d]: %v\", issue.PosterID, err)\n\t}\n\tif issue.PosterID != doer.ID && poster.IsActive && !poster.ProhibitLogin {\n\t\tparticipants = append(participants, issue.Poster)\n\t}\n\n\t\/\/ Assignees must receive any communications\n\tassignees, err := GetAssigneesByIssue(issue)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, assignee := range assignees {\n\t\tif assignee.ID != doer.ID {\n\t\t\tparticipants = append(participants, assignee)\n\t\t}\n\t}\n\n\ttos := make([]string, 0, len(watchers)) \/\/ List of email addresses.\n\tnames := make([]string, 0, len(watchers))\n\tfor i := range watchers {\n\t\tif watchers[i].UserID == doer.ID {\n\t\t\tcontinue\n\t\t}\n\n\t\tto, err := getUserByID(e, watchers[i].UserID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"GetUserByID [%d]: %v\", watchers[i].UserID, err)\n\t\t}\n\t\tif to.IsOrganization() {\n\t\t\tcontinue\n\t\t}\n\n\t\ttos = append(tos, to.Email)\n\t\tnames = append(names, to.Name)\n\t}\n\tfor i := range participants {\n\t\tif participants[i].ID == doer.ID {\n\t\t\tcontinue\n\t\t} else if com.IsSliceContainsStr(names, participants[i].Name) {\n\t\t\tcontinue\n\t\t}\n\n\t\ttos = append(tos, participants[i].Email)\n\t\tnames = append(names, participants[i].Name)\n\t}\n\n\tfor _, to := range tos {\n\t\tSendIssueCommentMail(issue, doer, content, comment, []string{to})\n\t}\n\n\t\/\/ Mail mentioned people and exclude watchers.\n\tnames = append(names, doer.Name)\n\ttos = make([]string, 0, len(mentions)) \/\/ list of user names.\n\tfor i := range mentions {\n\t\tif com.IsSliceContainsStr(names, mentions[i]) {\n\t\t\tcontinue\n\t\t}\n\n\t\ttos = append(tos, mentions[i])\n\t}\n\n\temails := getUserEmailsByNames(e, tos)\n\n\tfor _, to := range emails {\n\t\tSendIssueMentionMail(issue, doer, content, comment, []string{to})\n\t}\n\n\treturn nil\n}\n\n\/\/ MailParticipants sends new issue thread created emails to repository watchers\n\/\/ and mentioned people.\nfunc (issue *Issue) MailParticipants() (err error) {\n\treturn issue.mailParticipants(x)\n}\n\nfunc (issue *Issue) mailParticipants(e Engine) (err error) {\n\tmentions := markup.FindAllMentions(issue.Content)\n\tif err = UpdateIssueMentions(e, issue.ID, mentions); err != nil {\n\t\treturn fmt.Errorf(\"UpdateIssueMentions [%d]: %v\", issue.ID, err)\n\t}\n\n\tif err = mailIssueCommentToParticipants(e, issue, issue.Poster, issue.Content, nil, mentions); err != nil {\n\t\tlog.Error(4, \"mailIssueCommentToParticipants: %v\", err)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package models\n\nimport (\n\t\"testing\"\n\n\t\"bitbucket.org\/mundipagg\/boletoapi\/test\"\n)\n\nfunc TestShouldReturnValidCpfOnDocumentType(t *testing.T) {\n\tdocument := Document{Number: \"12345678901\", Type: \"CPF\"}\n\tif document.IsCPF() == false {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestShouldReturnInvalidCpfOnDocumentType(t *testing.T) {\n\tdocument := Document{Number: \"1234567890132\", Type: \"CNPJ\"}\n\tif document.IsCPF() {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestShouldReturnValidCnpjOnDocumentType(t *testing.T) {\n\tdocument := Document{Number: \"1234567890132\", Type: \"CnpJ\"}\n\tif document.IsCNPJ() == false {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestShouldReturnInvalidCnpjOnDocumentType(t *testing.T) {\n\tdocument := Document{Number: \"12345678901\", Type: \"CPF\"}\n\tif document.IsCNPJ() {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestShouldReturnValidCnpjOnDocumentNumber(t *testing.T) {\n\tdocument := Document{Number: \"12345678901564fas\", Type: \"CNPJ\"}\n\tif err := document.ValidateCNPJ(); err != nil {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestShouldReturnInvalidCnpjOnDocumentNumber(t *testing.T) {\n\tdocument := Document{Number: \"12345678901564asdf22\", Type: \"CNPJ\"}\n\tif err := document.ValidateCNPJ(); err == nil {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestShouldReturnBankNumberIsValid(t *testing.T) {\n\tvar b BankNumber = 237\n\n\tif b.IsBankNumberValid() == false {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestShouldAppendCollectionOfErrrors(t *testing.T) {\n\te := NewErrorCollection(ErrorResponse{Code: \"200\", Message: \"Hue2\"})\n\te.Append(\"100\", \"Hue\")\n\ttest.ExpectTrue(len(e) == 2, t)\n}\n\nfunc TestShouldCreateNewSingleErrorCollection(t *testing.T) {\n\te := NewSingleErrorCollection(\"200\", \"Hue2\")\n\ttest.ExpectTrue(len(e) == 1, t)\n}\n\nfunc TestIsAgencyValid(t *testing.T) {\n\ta := Agreement{\n\t\tAgency: \"234-2a\",\n\t}\n\terr := a.IsAgencyValid()\n\ttest.ExpectNoError(err, t)\n\ttest.ExpectTrue(a.Agency == \"2342\", t)\n}\n\nfunc TestIsAgencyInValid(t *testing.T) {\n\ta := Agreement{\n\t\tAgency: \"234-2222a\",\n\t}\n\terr := a.IsAgencyValid()\n\ttest.ExpectError(err, t)\n}\n\nfunc TestIsAgencyValidWithLessThan4Digits(t *testing.T) {\n\ta := Agreement{\n\t\tAgency: \"321\",\n\t}\n\terr := a.IsAgencyValid()\n\ttest.ExpectNoError(err, t)\n\ttest.ExpectTrue(a.Agency == \"0321\", t)\n}\n\nfunc TestCalculateAgencyDigit(t *testing.T) {\n\ta := new(Agreement)\n\ta.AccountDigit = \"1sssss\"\n\tc := func(s string) string {\n\t\treturn \"1\"\n\t}\n\ta.CalculateAgencyDigit(c)\n\ttest.ExpectTrue(a.AgencyDigit == \"1\", t)\n}\n\nfunc WTestCalculateAgencyDigitWithInvalidDigit(t *testing.T) {\n\ta := Agreement{\n\t\tAgencyDigit: \"\",\n\t}\n\tc := func(s string) string {\n\t\treturn \"1\"\n\t}\n\ta.CalculateAgencyDigit(c)\n\ttest.ExpectTrue(a.AgencyDigit == \"1\", t)\n}\n\nfunc TestIsAccountValid(t *testing.T) {\n\ta := Agreement{\n\t\tAccount: \"1234fff\",\n\t}\n\terr := a.IsAccountValid(8)\n\ttest.ExpectNoError(err, t)\n\ttest.ExpectTrue(a.Account == \"00001234\", t)\n}\n\nfunc TestIsAccountInValid(t *testing.T) {\n\ta := Agreement{\n\t\tAccount: \"123456789\",\n\t}\n\terr := a.IsAccountValid(8)\n\ttest.ExpectError(err, t)\n}\n<commit_msg>:art: Muda nome de método no teste<commit_after>package models\n\nimport (\n\t\"testing\"\n\n\t\"bitbucket.org\/mundipagg\/boletoapi\/test\"\n)\n\nfunc TestShouldReturnValidCpfOnDocumentType(t *testing.T) {\n\tdocument := Document{Number: \"12345678901\", Type: \"CPF\"}\n\tif document.IsCPF() == false {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestShouldReturnInvalidCpfOnDocumentType(t *testing.T) {\n\tdocument := Document{Number: \"1234567890132\", Type: \"CNPJ\"}\n\tif document.IsCNPJ() == false {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestShouldReturnValidCnpjOnDocumentType(t *testing.T) {\n\tdocument := Document{Number: \"1234567890132\", Type: \"CnpJ\"}\n\tif document.IsCNPJ() == false {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestShouldReturnInvalidCnpjOnDocumentType(t *testing.T) {\n\tdocument := Document{Number: \"12345678901\", Type: \"CPF\"}\n\tif document.IsCNPJ() {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestShouldReturnValidCnpjOnDocumentNumber(t *testing.T) {\n\tdocument := Document{Number: \"12345678901564fas\", Type: \"CNPJ\"}\n\tif err := document.ValidateCNPJ(); err != nil {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestShouldReturnInvalidCnpjOnDocumentNumber(t *testing.T) {\n\tdocument := Document{Number: \"12345678901564asdf22\", Type: \"CNPJ\"}\n\tif err := document.ValidateCNPJ(); err == nil {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestShouldReturnBankNumberIsValid(t *testing.T) {\n\tvar b BankNumber = 237\n\n\tif b.IsBankNumberValid() == false {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestShouldAppendCollectionOfErrrors(t *testing.T) {\n\te := NewErrorCollection(ErrorResponse{Code: \"200\", Message: \"Hue2\"})\n\te.Append(\"100\", \"Hue\")\n\ttest.ExpectTrue(len(e) == 2, t)\n}\n\nfunc TestShouldCreateNewSingleErrorCollection(t *testing.T) {\n\te := NewSingleErrorCollection(\"200\", \"Hue2\")\n\ttest.ExpectTrue(len(e) == 1, t)\n}\n\nfunc TestIsAgencyValid(t *testing.T) {\n\ta := Agreement{\n\t\tAgency: \"234-2a\",\n\t}\n\terr := a.IsAgencyValid()\n\ttest.ExpectNoError(err, t)\n\ttest.ExpectTrue(a.Agency == \"2342\", t)\n}\n\nfunc TestIsAgencyInValid(t *testing.T) {\n\ta := Agreement{\n\t\tAgency: \"234-2222a\",\n\t}\n\terr := a.IsAgencyValid()\n\ttest.ExpectError(err, t)\n}\n\nfunc TestIsAgencyValidWithLessThan4Digits(t *testing.T) {\n\ta := Agreement{\n\t\tAgency: \"321\",\n\t}\n\terr := a.IsAgencyValid()\n\ttest.ExpectNoError(err, t)\n\ttest.ExpectTrue(a.Agency == \"0321\", t)\n}\n\nfunc TestCalculateAgencyDigit(t *testing.T) {\n\ta := new(Agreement)\n\ta.AccountDigit = \"1sssss\"\n\tc := func(s string) string {\n\t\treturn \"1\"\n\t}\n\ta.CalculateAgencyDigit(c)\n\ttest.ExpectTrue(a.AgencyDigit == \"1\", t)\n}\n\nfunc WTestCalculateAgencyDigitWithInvalidDigit(t *testing.T) {\n\ta := Agreement{\n\t\tAgencyDigit: \"\",\n\t}\n\tc := func(s string) string {\n\t\treturn \"1\"\n\t}\n\ta.CalculateAgencyDigit(c)\n\ttest.ExpectTrue(a.AgencyDigit == \"1\", t)\n}\n\nfunc TestIsAccountValid(t *testing.T) {\n\ta := Agreement{\n\t\tAccount: \"1234fff\",\n\t}\n\terr := a.IsAccountValid(8)\n\ttest.ExpectNoError(err, t)\n\ttest.ExpectTrue(a.Account == \"00001234\", t)\n}\n\nfunc TestIsAccountInValid(t *testing.T) {\n\ta := Agreement{\n\t\tAccount: \"123456789\",\n\t}\n\terr := a.IsAccountValid(8)\n\ttest.ExpectError(err, t)\n}\n<|endoftext|>"} {"text":"<commit_before>package argot\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/xeipuuv\/gojsonschema\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"testing\"\n)\n\ntype Step interface {\n\tGo() error\n}\n\ntype Steps []Step\n\n\/\/ t can be nil. If t is not nil and an error occurs, then t.Fatal\n\/\/ will be called. The steps returned represent the steps that were\n\/\/ executed, including the step that errored, if an error occurred.\nfunc (ss Steps) Go(t *testing.T) (results Steps, err error) {\n\tif t != nil {\n\t\tdefer func() {\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Achieved Steps: %v; Error: %v\", results, err)\n\t\t\t}\n\t\t}()\n\t}\n\tresults = make([]Step, 0, len(ss))\n\tfor _, step := range ss {\n\t\tresults = append(results, step)\n\t\terr = step.Go()\n\t\tif err != nil {\n\t\t\treturn results, err\n\t\t}\n\t}\n\treturn results, nil\n}\n\ntype StepFunc func() error\n\nfunc (sf StepFunc) Go() error {\n\treturn sf()\n}\n\ntype HttpCall struct {\n\tClient *http.Client\n\tRequest *http.Request\n\tResponse *http.Response\n\tResponseBody []byte\n}\n\n\/\/ client can be nil. If it is nil, a new http.Client is used.\nfunc NewHttpCall(client *http.Client) *HttpCall {\n\tif client == nil {\n\t\tclient = new(http.Client)\n\t}\n\treturn &HttpCall{\n\t\tClient: client,\n\t}\n}\n\nfunc (hc *HttpCall) AssertNoRequest() error {\n\tif hc.Request == nil {\n\t\treturn nil\n\t} else {\n\t\treturn errors.New(\"Request already set\")\n\t}\n}\n\nfunc (hc *HttpCall) AssertRequest() error {\n\tif hc.Request == nil {\n\t\treturn errors.New(\"No Request set\")\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (hc *HttpCall) AssertNoRespose() error {\n\tif hc.Response == nil {\n\t\treturn nil\n\t} else {\n\t\treturn errors.New(\"Response already set\")\n\t}\n}\n\nfunc (hc *HttpCall) EnsureResponse() error {\n\tif hc.Response != nil {\n\t\treturn nil\n\t} else if hc.Request == nil {\n\t\treturn errors.New(\"Cannot ensure response: no request.\")\n\t} else if response, err := hc.Client.Do(hc.Request); err != nil {\n\t\treturn fmt.Errorf(\"Error when making call of %v: %v\", hc.Request, err)\n\t} else {\n\t\thc.Response = response\n\t\treturn nil\n\t}\n}\n\n\/\/ Idempotent.\nfunc (hc *HttpCall) ReceiveBody() error {\n\tif err := hc.EnsureResponse(); err != nil {\n\t\treturn err\n\t} else if hc.ResponseBody != nil {\n\t\treturn nil\n\t} else {\n\t\tdefer hc.Response.Body.Close()\n\t\tbites := new(bytes.Buffer)\n\t\tif _, err = io.Copy(bites, hc.Response.Body); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\thc.ResponseBody = bites.Bytes()\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\n\/\/ Idempotent. You should ensure this is called at the end of life for\n\/\/ each HttpCall. It drains bodies and generally sweeps up the mess.\nfunc (hc *HttpCall) Reset() error {\n\thc.Request = nil\n\tif hc.Response != nil && hc.ResponseBody == nil {\n\t\tio.Copy(ioutil.Discard, hc.Response.Body)\n\t\thc.Response.Body.Close()\n\t\thc.Response = nil\n\t}\n\thc.ResponseBody = nil\n\treturn nil\n}\n\n\/\/ This will automatically call HttpCall.Reset to ensure it's safe to\n\/\/ create a new request.\nfunc NewRequest(hc *HttpCall, method, urlStr string, body io.Reader) Step {\n\treturn StepFunc(func() error {\n\t\tif err := hc.Reset(); err != nil {\n\t\t\treturn err\n\t\t} else if req, err := http.NewRequest(method, urlStr, body); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\thc.Request = req\n\t\t\treturn nil\n\t\t}\n\t})\n}\n\nfunc RequestHeader(hc *HttpCall, key, value string) Step {\n\treturn StepFunc(func() error {\n\t\tif err := AnyError(hc.AssertRequest(), hc.AssertNoRespose()); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\thc.Request.Header.Set(key, value)\n\t\t\treturn nil\n\t\t}\n\t})\n}\n\nfunc ResponseStatusEquals(hc *HttpCall, status int) Step {\n\treturn StepFunc(func() error {\n\t\tif err := hc.EnsureResponse(); err != nil {\n\t\t\treturn err\n\t\t} else if hc.Response.StatusCode != status {\n\t\t\treturn fmt.Errorf(\"Status: Expected %d; found %d.\", status, hc.Response.StatusCode)\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t})\n}\n\nfunc ResponseHeaderEquals(hc *HttpCall, key, value string) Step {\n\treturn StepFunc(func() error {\n\t\tif err := hc.EnsureResponse(); err != nil {\n\t\t\treturn err\n\t\t} else if header := hc.Response.Header.Get(key); header != value {\n\t\t\treturn fmt.Errorf(\"Header '%s': Expected '%s'; found '%s'.\", key, value, header)\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t})\n}\n\nfunc ResponseHeaderContains(hc *HttpCall, key, value string) Step {\n\treturn StepFunc(func() error {\n\t\tif err := hc.EnsureResponse(); err != nil {\n\t\t\treturn err\n\t\t} else if header := hc.Response.Header.Get(key); !strings.Contains(header, value) {\n\t\t\treturn fmt.Errorf(\"Header '%s': Expected '%s'; found '%s'.\", key, value, header)\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t})\n}\n\nfunc ResponseBodyEquals(hc *HttpCall, value string) Step {\n\treturn StepFunc(func() error {\n\t\tif err := hc.ReceiveBody(); err != nil {\n\t\t\treturn err\n\t\t} else if string(hc.ResponseBody) != value {\n\t\t\treturn fmt.Errorf(\"Body: Expected '%s'; found '%s'.\", value, string(hc.ResponseBody))\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t})\n}\n\nfunc ResponseBodyContains(hc *HttpCall, value string) Step {\n\treturn StepFunc(func() error {\n\t\tif err := hc.ReceiveBody(); err != nil {\n\t\t\treturn err\n\t\t} else if !strings.Contains(string(hc.ResponseBody), value) {\n\t\t\treturn fmt.Errorf(\"Body: Expected '%s'; found '%s'.\", value, string(hc.ResponseBody))\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t})\n}\n\nfunc ResponseBodyJSONSchema(hc *HttpCall, schema string) Step {\n\treturn StepFunc(func() error {\n\t\tif err := hc.ReceiveBody(); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\tschemaLoader := gojsonschema.NewStringLoader(schema)\n\t\t\tbodyLoader := gojsonschema.NewStringLoader(string(hc.ResponseBody))\n\t\t\tif result, err := gojsonschema.Validate(schemaLoader, bodyLoader); err != nil {\n\t\t\t\treturn err\n\t\t\t} else if !result.Valid() {\n\t\t\t\tmsg := \"Validation failure:\\n\"\n\t\t\t\tfor _, err := range result.Errors() {\n\t\t\t\t\tmsg += fmt.Sprintf(\"\\t%v\\n\", err)\n\t\t\t\t}\n\t\t\t\treturn errors.New(msg[:len(msg)-1])\n\t\t\t} else {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc AnyError(errs ...error) error {\n\tfor _, err := range errs {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Support using a chan as well as a slice for steps. Lazy+++.<commit_after>package argot\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/xeipuuv\/gojsonschema\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"testing\"\n)\n\ntype Step interface {\n\tGo() error\n}\n\ntype Steps []Step\n\n\/\/ t can be nil. If t is not nil and an error occurs, then t.Fatal\n\/\/ will be called. The steps returned represent the steps that were\n\/\/ executed, including the step that errored, if an error occurred.\nfunc (ss Steps) Go(t *testing.T) (results Steps, err error) {\n\tif t != nil {\n\t\tdefer func() {\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Achieved Steps: %v; Error: %v\", results, err)\n\t\t\t}\n\t\t}()\n\t}\n\tresults = make([]Step, 0, len(ss))\n\tfor _, step := range ss {\n\t\tresults = append(results, step)\n\t\terr = step.Go()\n\t\tif err != nil {\n\t\t\treturn results, err\n\t\t}\n\t}\n\treturn results, nil\n}\n\n\/\/ Use a chan to indicate the steps to carry out rather than a\n\/\/ slice. The advantage of a chan is that it allows more laziness:\n\/\/ steps can even be responsible for issuing their own subsequent\n\/\/ steps.\ntype StepsChan <-chan Step\n\n\/\/ t can be nil. If t is not nil and an error occurs, then t.Fatal\n\/\/ will be called. The steps returned represent the steps that were\n\/\/ executed, including the step that errored, if an error occurred.\nfunc (sc StepsChan) Go(t *testing.T) (results Steps, err error) {\n\tif t != nil {\n\t\tdefer func() {\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Achieved Steps: %v; Error: %v\", results, err)\n\t\t\t}\n\t\t}()\n\t}\n\tresults = make([]Step, 0, 16)\n\tfor step := range sc {\n\t\tresults = append(results, step)\n\t\terr = step.Go()\n\t\tif err != nil {\n\t\t\treturn results, err\n\t\t}\n\t}\n\treturn results, nil\n}\n\ntype StepFunc func() error\n\nfunc (sf StepFunc) Go() error {\n\treturn sf()\n}\n\ntype HttpCall struct {\n\tClient *http.Client\n\tRequest *http.Request\n\tResponse *http.Response\n\tResponseBody []byte\n}\n\n\/\/ client can be nil. If it is nil, a new http.Client is used.\nfunc NewHttpCall(client *http.Client) *HttpCall {\n\tif client == nil {\n\t\tclient = new(http.Client)\n\t}\n\treturn &HttpCall{\n\t\tClient: client,\n\t}\n}\n\nfunc (hc *HttpCall) AssertNoRequest() error {\n\tif hc.Request == nil {\n\t\treturn nil\n\t} else {\n\t\treturn errors.New(\"Request already set\")\n\t}\n}\n\nfunc (hc *HttpCall) AssertRequest() error {\n\tif hc.Request == nil {\n\t\treturn errors.New(\"No Request set\")\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (hc *HttpCall) AssertNoRespose() error {\n\tif hc.Response == nil {\n\t\treturn nil\n\t} else {\n\t\treturn errors.New(\"Response already set\")\n\t}\n}\n\nfunc (hc *HttpCall) EnsureResponse() error {\n\tif hc.Response != nil {\n\t\treturn nil\n\t} else if hc.Request == nil {\n\t\treturn errors.New(\"Cannot ensure response: no request.\")\n\t} else if response, err := hc.Client.Do(hc.Request); err != nil {\n\t\treturn fmt.Errorf(\"Error when making call of %v: %v\", hc.Request, err)\n\t} else {\n\t\thc.Response = response\n\t\treturn nil\n\t}\n}\n\n\/\/ Idempotent.\nfunc (hc *HttpCall) ReceiveBody() error {\n\tif err := hc.EnsureResponse(); err != nil {\n\t\treturn err\n\t} else if hc.ResponseBody != nil {\n\t\treturn nil\n\t} else {\n\t\tdefer hc.Response.Body.Close()\n\t\tbites := new(bytes.Buffer)\n\t\tif _, err = io.Copy(bites, hc.Response.Body); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\thc.ResponseBody = bites.Bytes()\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\n\/\/ Idempotent. You should ensure this is called at the end of life for\n\/\/ each HttpCall. It drains bodies and generally sweeps up the mess.\nfunc (hc *HttpCall) Reset() error {\n\thc.Request = nil\n\tif hc.Response != nil && hc.ResponseBody == nil {\n\t\tio.Copy(ioutil.Discard, hc.Response.Body)\n\t\thc.Response.Body.Close()\n\t\thc.Response = nil\n\t}\n\thc.ResponseBody = nil\n\treturn nil\n}\n\n\/\/ This will automatically call HttpCall.Reset to ensure it's safe to\n\/\/ create a new request.\nfunc NewRequest(hc *HttpCall, method, urlStr string, body io.Reader) Step {\n\treturn StepFunc(func() error {\n\t\tif err := hc.Reset(); err != nil {\n\t\t\treturn err\n\t\t} else if req, err := http.NewRequest(method, urlStr, body); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\thc.Request = req\n\t\t\treturn nil\n\t\t}\n\t})\n}\n\nfunc RequestHeader(hc *HttpCall, key, value string) Step {\n\treturn StepFunc(func() error {\n\t\tif err := AnyError(hc.AssertRequest(), hc.AssertNoRespose()); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\thc.Request.Header.Set(key, value)\n\t\t\treturn nil\n\t\t}\n\t})\n}\n\nfunc ResponseStatusEquals(hc *HttpCall, status int) Step {\n\treturn StepFunc(func() error {\n\t\tif err := hc.EnsureResponse(); err != nil {\n\t\t\treturn err\n\t\t} else if hc.Response.StatusCode != status {\n\t\t\treturn fmt.Errorf(\"Status: Expected %d; found %d.\", status, hc.Response.StatusCode)\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t})\n}\n\nfunc ResponseHeaderEquals(hc *HttpCall, key, value string) Step {\n\treturn StepFunc(func() error {\n\t\tif err := hc.EnsureResponse(); err != nil {\n\t\t\treturn err\n\t\t} else if header := hc.Response.Header.Get(key); header != value {\n\t\t\treturn fmt.Errorf(\"Header '%s': Expected '%s'; found '%s'.\", key, value, header)\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t})\n}\n\nfunc ResponseHeaderContains(hc *HttpCall, key, value string) Step {\n\treturn StepFunc(func() error {\n\t\tif err := hc.EnsureResponse(); err != nil {\n\t\t\treturn err\n\t\t} else if header := hc.Response.Header.Get(key); !strings.Contains(header, value) {\n\t\t\treturn fmt.Errorf(\"Header '%s': Expected '%s'; found '%s'.\", key, value, header)\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t})\n}\n\nfunc ResponseBodyEquals(hc *HttpCall, value string) Step {\n\treturn StepFunc(func() error {\n\t\tif err := hc.ReceiveBody(); err != nil {\n\t\t\treturn err\n\t\t} else if string(hc.ResponseBody) != value {\n\t\t\treturn fmt.Errorf(\"Body: Expected '%s'; found '%s'.\", value, string(hc.ResponseBody))\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t})\n}\n\nfunc ResponseBodyContains(hc *HttpCall, value string) Step {\n\treturn StepFunc(func() error {\n\t\tif err := hc.ReceiveBody(); err != nil {\n\t\t\treturn err\n\t\t} else if !strings.Contains(string(hc.ResponseBody), value) {\n\t\t\treturn fmt.Errorf(\"Body: Expected '%s'; found '%s'.\", value, string(hc.ResponseBody))\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t})\n}\n\nfunc ResponseBodyJSONSchema(hc *HttpCall, schema string) Step {\n\treturn StepFunc(func() error {\n\t\tif err := hc.ReceiveBody(); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\tschemaLoader := gojsonschema.NewStringLoader(schema)\n\t\t\tbodyLoader := gojsonschema.NewStringLoader(string(hc.ResponseBody))\n\t\t\tif result, err := gojsonschema.Validate(schemaLoader, bodyLoader); err != nil {\n\t\t\t\treturn err\n\t\t\t} else if !result.Valid() {\n\t\t\t\tmsg := \"Validation failure:\\n\"\n\t\t\t\tfor _, err := range result.Errors() {\n\t\t\t\t\tmsg += fmt.Sprintf(\"\\t%v\\n\", err)\n\t\t\t\t}\n\t\t\t\treturn errors.New(msg[:len(msg)-1])\n\t\t\t} else {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc AnyError(errs ...error) error {\n\tfor _, err := range errs {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package unsplash\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/joho\/godotenv\"\n)\n\nfunc readEnv() {\n\terr := godotenv.Load(\"..\/.env\")\n\n\tif err != nil {\n\t\tlog.Fatal(\"Error loading .env file\")\n\t}\n}\n\nvar imageIds = []struct {\n\tid string\n\tstatusCode int\n}{\n\t{\"bWI4Vd4vI3w\", 200},\n\t{\"xjQhTrxyVBw\", 200},\n\t{\"7NcGuPF5NU0\", 200},\n\t{\"oeks929jesj\", 422},\n\t{\"9jdwnduwwen\", 422},\n\t{\"aaaaa1aaaaa\", 422},\n}\n\nfunc TestDownloadPhoto(t *testing.T) {\n\treadEnv()\n\n\tfor _, value := range imageIds {\n\t\tpath := fmt.Sprintf(\"\/download-photo?id=%s\", value.id)\n\n\t\treq, err := http.NewRequest(\"GET\", path, nil)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\trr := httptest.NewRecorder()\n\t\thandler := http.HandlerFunc(DownloadPhoto)\n\n\t\thandler.ServeHTTP(rr, req)\n\n\t\tif rr.Code != value.statusCode {\n\t\t\tt.Errorf(\"Status should be %d, got %d\", http.StatusOK, rr.Code)\n\t\t}\n\t}\n}\n\nfunc TestGetPhotoDownloadLocation(t *testing.T) {\n\treadEnv()\n\n\tfor _, value := range imageIds {\n\t\ts, err := GetPhotoDownloadLocation(value.id)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif value.statusCode == 200 && s.URL == \"\" {\n\t\t\tt.Errorf(\"Expected UnsplashResponse URL field to be non-empty, got %s\", s.URL)\n\t\t}\n\n\t\tif value.statusCode == 422 && s.Errors == nil {\n\t\t\tt.Errorf(\"Expected UnsplashResponse to contains Errors, got %v\", s.Errors)\n\t\t}\n\t}\n}\n\nfunc makeRequest(t *testing.T, path string, h func(http.ResponseWriter, *http.Request)) {\n\treadEnv()\n\n\treq, err := http.NewRequest(\"GET\", path, nil)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(h)\n\n\thandler.ServeHTTP(rr, req)\n\n\tif rr.Code != http.StatusOK {\n\t\tt.Errorf(\"Status should be %d, got %d\", http.StatusOK, rr.Code)\n\t}\n}\n\nvar searchTable = []struct {\n\tkey string\n\tpage int\n}{\n\t{\"cars\", 1},\n\t{\"house\", 6},\n\t{\"animal\", 10},\n}\n\nfunc TestSearchUnsplash(t *testing.T) {\n\tfor _, value := range searchTable {\n\t\tmakeRequest(t, fmt.Sprintf(\"\/search-unsplash?key=%s&page=%d\", value.key, value.page), SearchUnsplash)\n\t}\n}\n\nfunc TestGetRandomPhoto(t *testing.T) {\n\tmakeRequest(t, \"\/random-photo?collections=998309\", GetRandomPhoto)\n}\n\nvar collectionsTable = []struct {\n\tids string\n\tstatusCode int\n}{\n\t{\"998309,317099\", 200},\n\t{\"39843782,4993402\", 400},\n\t{\"151521,9829382\", 400},\n\t{\"175083,762960\", 200},\n}\n\nfunc TestValidateCollections(t *testing.T) {\n\treadEnv()\n\n\tfor _, value := range collectionsTable {\n\t\tpath := fmt.Sprintf(\"\/validate-collections?collections=%s\", value.ids)\n\t\treq, err := http.NewRequest(\"GET\", path, nil)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\trr := httptest.NewRecorder()\n\t\thandler := http.HandlerFunc(ValidateCollections)\n\n\t\thandler.ServeHTTP(rr, req)\n\n\t\tif rr.Code != value.statusCode {\n\t\t\tt.Errorf(\"Status should be %d, got %d\", http.StatusOK, rr.Code)\n\t\t}\n\t}\n}\n<commit_msg>Fix failing tests<commit_after>package unsplash\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/joho\/godotenv\"\n)\n\nfunc readEnv() {\n\terr := godotenv.Load(\"..\/.env\")\n\n\tif err != nil {\n\t\tlog.Fatal(\"Error loading .env file\")\n\t}\n}\n\nvar imageIds = []struct {\n\tid string\n\tstatusCode int\n}{\n\t{\"bWI4Vd4vI3w\", 200},\n\t{\"xjQhTrxyVBw\", 200},\n\t{\"7NcGuPF5NU0\", 200},\n\t{\"oeks929jesj\", 500},\n\t{\"9jdwnduwwen\", 500},\n\t{\"aaaaa1aaaaa\", 500},\n}\n\nfunc TestDownloadPhoto(t *testing.T) {\n\treadEnv()\n\n\tfor _, value := range imageIds {\n\t\tpath := fmt.Sprintf(\"\/download-photo?id=%s\", value.id)\n\n\t\treq, err := http.NewRequest(\"GET\", path, nil)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\trr := httptest.NewRecorder()\n\t\thandler := http.HandlerFunc(DownloadPhoto)\n\n\t\thandler.ServeHTTP(rr, req)\n\n\t\tif rr.Code != value.statusCode {\n\t\t\tt.Errorf(\"Status should be %d, got %d\", http.StatusOK, rr.Code)\n\t\t}\n\t}\n}\n\nfunc TestGetPhotoDownloadLocation(t *testing.T) {\n\treadEnv()\n\n\tfor _, value := range imageIds {\n\t\ts, err := GetPhotoDownloadLocation(value.id)\n\n\t\tif err != nil && value.statusCode == 200 {\n\t\t\tt.Errorf(\"Expect no errors from GetPhotoDownloadLocation for a valid photo id\")\n\t\t}\n\n\t\tif err == nil && value.statusCode == 500 {\n\t\t\tt.Errorf(\"Expected GetPhotoDownloadLocation to throw an error for an invalid photo id\")\n\t\t}\n\n\t\tif value.statusCode == 200 && s.URL == \"\" {\n\t\t\tt.Errorf(\"Expected UnsplashResponse URL field to be non-empty, got %s\", s.URL)\n\t\t}\n\t}\n}\n\nfunc makeRequest(t *testing.T, path string, h func(http.ResponseWriter, *http.Request)) {\n\treadEnv()\n\n\treq, err := http.NewRequest(\"GET\", path, nil)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(h)\n\n\thandler.ServeHTTP(rr, req)\n\n\tif rr.Code != http.StatusOK {\n\t\tt.Errorf(\"Status should be %d, got %d\", http.StatusOK, rr.Code)\n\t}\n}\n\nvar searchTable = []struct {\n\tkey string\n\tpage int\n}{\n\t{\"cars\", 1},\n\t{\"house\", 6},\n\t{\"animal\", 10},\n}\n\nfunc TestSearchUnsplash(t *testing.T) {\n\tfor _, value := range searchTable {\n\t\tmakeRequest(t, fmt.Sprintf(\"\/search-unsplash?key=%s&page=%d\", value.key, value.page), SearchUnsplash)\n\t}\n}\n\nfunc TestGetRandomPhoto(t *testing.T) {\n\tmakeRequest(t, \"\/random-photo?collections=998309\", GetRandomPhoto)\n}\n\nvar collectionsTable = []struct {\n\tids string\n\tstatusCode int\n}{\n\t{\"998309,317099\", 200},\n\t{\"39843782,4993402\", 500},\n\t{\"151521,9829382\", 500},\n\t{\"175083,762960\", 200},\n}\n\nfunc TestValidateCollections(t *testing.T) {\n\treadEnv()\n\n\tfor _, value := range collectionsTable {\n\t\tpath := fmt.Sprintf(\"\/validate-collections?collections=%s\", value.ids)\n\t\treq, err := http.NewRequest(\"GET\", path, nil)\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\trr := httptest.NewRecorder()\n\t\thandler := http.HandlerFunc(ValidateCollections)\n\n\t\thandler.ServeHTTP(rr, req)\n\n\t\tif rr.Code != value.statusCode {\n\t\t\tt.Errorf(\"Status should be %d, got %d\", http.StatusOK, rr.Code)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package mbotapi\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\n\/\/ This defines a bot\n\/\/ Set Debug to true for debugging\ntype BotAPI struct {\n\tToken string\n\tVerifyToken string\n\tDebug bool\n\tClient *http.Client\n}\n\n\/\/ This helps create a BotAPI instance with token, verify_token\n\/\/ By default Debug is set to false\nfunc NewBotAPI(token string, vtoken string) *BotAPI {\n\treturn &BotAPI{\n\t\tToken: token,\n\t\tVerifyToken: vtoken,\n\t\tDebug: false,\n\t\tClient: &http.Client{},\n\t}\n}\n\n\/\/ This helps send request (send messages to users)\n\/\/ It takes Request struct encoded into a buffer of json bytes\n\/\/ The APIResponse contains the error from FB if any\n\/\/ Should NOT be directly used, Use Send \/ SendFile\nfunc (bot *BotAPI) MakeRequest(b *bytes.Buffer) (APIResponse, error) {\n\turi := fmt.Sprintf(APIEndpoint, bot.Token)\n\n\treq, _ := http.NewRequest(\"POST\", uri, b)\n\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\tresp, err := bot.Client.Do(req)\n\tif err != nil {\n\t\treturn APIResponse{}, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar rsp APIResponse\n\tdec := json.NewDecoder(resp.Body)\n\terr = dec.Decode(&rsp)\n\tif err != nil {\n\t\treturn APIResponse{}, nil\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn rsp, errors.New(http.StatusText(resp.StatusCode))\n\t}\n\treturn rsp, nil\n}\n\n\/\/ This function helps send messages to users\n\/\/ It takes Message \/ GenericTemplate \/ ButtonTemplate \/ ReceiptTemplate and\n\/\/ sends it to the user\nfunc (bot *BotAPI) Send(u User, c interface{}, notif string) (APIResponse, error) {\n\tvar r Request\n\n\tn := RegularNotif\n\tif notif != \"\" {\n\t\tn = notif\n\t}\n\n\tswitch c.(type) {\n\tcase Request:\n\t\treturn APIResponse{}, errors.New(\"Use MakeRequest to send Request!!\")\n\tcase Message:\n\t\tr = Request{\n\t\t\tRecipient: u,\n\t\t\tMessage: c.(Message),\n\t\t\tNotifType: n,\n\t\t}\n\n\tcase GenericTemplate:\n\t\tr = Request{\n\t\t\tRecipient: u,\n\t\t\tNotifType: n,\n\t\t\tMessage: Message{\n\t\t\t\tAttachment: &Attachment{\n\t\t\t\t\tType: \"template\",\n\t\t\t\t\tPayload: c.(GenericTemplate),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\tcase ButtonTemplate:\n\t\tr = Request{\n\t\t\tRecipient: u,\n\t\t\tNotifType: n,\n\t\t\tMessage: Message{\n\t\t\t\tAttachment: &Attachment{\n\t\t\t\t\tType: \"template\",\n\t\t\t\t\tPayload: c.(ButtonTemplate),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\tcase ReceiptTemplate:\n\t\tr = Request{\n\t\t\tRecipient: u,\n\t\t\tNotifType: n,\n\t\t\tMessage: Message{\n\t\t\t\tAttachment: &Attachment{\n\t\t\t\t\tType: \"template\",\n\t\t\t\t\tPayload: c.(ReceiptTemplate),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\tdefault:\n\t\treturn APIResponse{}, errors.New(\"Type is not supported\")\n\t}\n\n\tif r == (Request{}) {\n\t\treturn APIResponse{}, errors.New(\"Unknown Error\")\n\t}\n\tpayl, _ := json.Marshal(r)\n\tif bot.Debug {\n\t\tlog.Printf(\"[INFO] Payload: %s\", string(payl))\n\t}\n\treturn bot.MakeRequest(bytes.NewBuffer(payl))\n}\n\n\/\/ This helps to send local images (currently) to users\n\/\/ TODO: not tested yet!\nfunc (bot *BotAPI) SendFile(u User, path string) (APIResponse, error) {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn APIResponse{}, err\n\t}\n\tdefer file.Close()\n\n\tbody := &bytes.Buffer{}\n\twriter := multipart.NewWriter(body)\n\tpart, err := writer.CreateFormFile(\"filedata\", filepath.Base(path))\n\tif err != nil {\n\t\treturn APIResponse{}, err\n\t}\n\t_, err = io.Copy(part, file)\n\n\tusr, _ := json.Marshal(u)\n\t_ = writer.WriteField(\"recipient\", string(usr))\n\timg := NewImageMessage(\"\")\n\tim, _ := json.Marshal(img)\n\t_ = writer.WriteField(\"message\", string(im))\n\n\terr = writer.Close()\n\tif err != nil {\n\t\treturn APIResponse{}, err\n\t}\n\n\treturn bot.MakeRequest(body)\n}\n\n\/\/ This function registers the handlers for\n\/\/ - webhook verification\n\/\/ - all callbacks made on the webhhoks\n\/\/ It loops over all entries in the callback and\n\/\/ pushes to the Callback channel\n\/\/ This also return a *http.ServeMux which can be used to listenAndServe\nfunc (bot *BotAPI) SetWebhook(pattern string) (<-chan Callback, *http.ServeMux) {\n\tcallbackChan := make(chan Callback, 100)\n\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(pattern, func(w http.ResponseWriter, req *http.Request) {\n\t\tswitch req.Method {\n\t\tcase \"GET\":\n\t\t\tif req.FormValue(\"hub.verify_token\") == bot.VerifyToken {\n\t\t\t\tw.Write([]byte(req.FormValue(\"hub.challenge\")))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\n\t\tcase \"POST\":\n\t\t\tdefer req.Body.Close()\n\n\t\t\tvar rsp Response\n\t\t\tif bot.Debug {\n\t\t\t\tbody, _ := ioutil.ReadAll(response.Body)\n\t\t\t\tlog.Printf(\"[INFO]%s\", body)\n\t\t\t}\n\t\t\tdecoder := json.NewDecoder(req.Body)\n\t\t\tdecoder.Decode(&rsp)\n\n\t\t\tif rsp.Object == \"page\" {\n\t\t\t\tfor _, e := range rsp.Entries {\n\t\t\t\t\tfor _, c := range e.Messaging {\n\t\t\t\t\t\tcallbackChan <- c\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\treturn\n\n\t\tdefault:\n\t\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\t\treturn\n\t\t}\n\t})\n\n\treturn callbackChan, mux\n\n}\n<commit_msg>debug<commit_after>package mbotapi\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\n\/\/ This defines a bot\n\/\/ Set Debug to true for debugging\ntype BotAPI struct {\n\tToken string\n\tVerifyToken string\n\tDebug bool\n\tClient *http.Client\n}\n\n\/\/ This helps create a BotAPI instance with token, verify_token\n\/\/ By default Debug is set to false\nfunc NewBotAPI(token string, vtoken string) *BotAPI {\n\treturn &BotAPI{\n\t\tToken: token,\n\t\tVerifyToken: vtoken,\n\t\tDebug: false,\n\t\tClient: &http.Client{},\n\t}\n}\n\n\/\/ This helps send request (send messages to users)\n\/\/ It takes Request struct encoded into a buffer of json bytes\n\/\/ The APIResponse contains the error from FB if any\n\/\/ Should NOT be directly used, Use Send \/ SendFile\nfunc (bot *BotAPI) MakeRequest(b *bytes.Buffer) (APIResponse, error) {\n\turi := fmt.Sprintf(APIEndpoint, bot.Token)\n\n\treq, _ := http.NewRequest(\"POST\", uri, b)\n\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\tresp, err := bot.Client.Do(req)\n\tif err != nil {\n\t\treturn APIResponse{}, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar rsp APIResponse\n\tdec := json.NewDecoder(resp.Body)\n\terr = dec.Decode(&rsp)\n\tif err != nil {\n\t\treturn APIResponse{}, nil\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn rsp, errors.New(http.StatusText(resp.StatusCode))\n\t}\n\treturn rsp, nil\n}\n\n\/\/ This function helps send messages to users\n\/\/ It takes Message \/ GenericTemplate \/ ButtonTemplate \/ ReceiptTemplate and\n\/\/ sends it to the user\nfunc (bot *BotAPI) Send(u User, c interface{}, notif string) (APIResponse, error) {\n\tvar r Request\n\n\tn := RegularNotif\n\tif notif != \"\" {\n\t\tn = notif\n\t}\n\n\tswitch c.(type) {\n\tcase Request:\n\t\treturn APIResponse{}, errors.New(\"Use MakeRequest to send Request!!\")\n\tcase Message:\n\t\tr = Request{\n\t\t\tRecipient: u,\n\t\t\tMessage: c.(Message),\n\t\t\tNotifType: n,\n\t\t}\n\n\tcase GenericTemplate:\n\t\tr = Request{\n\t\t\tRecipient: u,\n\t\t\tNotifType: n,\n\t\t\tMessage: Message{\n\t\t\t\tAttachment: &Attachment{\n\t\t\t\t\tType: \"template\",\n\t\t\t\t\tPayload: c.(GenericTemplate),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\tcase ButtonTemplate:\n\t\tr = Request{\n\t\t\tRecipient: u,\n\t\t\tNotifType: n,\n\t\t\tMessage: Message{\n\t\t\t\tAttachment: &Attachment{\n\t\t\t\t\tType: \"template\",\n\t\t\t\t\tPayload: c.(ButtonTemplate),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\tcase ReceiptTemplate:\n\t\tr = Request{\n\t\t\tRecipient: u,\n\t\t\tNotifType: n,\n\t\t\tMessage: Message{\n\t\t\t\tAttachment: &Attachment{\n\t\t\t\t\tType: \"template\",\n\t\t\t\t\tPayload: c.(ReceiptTemplate),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\tdefault:\n\t\treturn APIResponse{}, errors.New(\"Type is not supported\")\n\t}\n\n\tif r == (Request{}) {\n\t\treturn APIResponse{}, errors.New(\"Unknown Error\")\n\t}\n\tpayl, _ := json.Marshal(r)\n\tif bot.Debug {\n\t\tlog.Printf(\"[INFO] Payload: %s\", string(payl))\n\t}\n\treturn bot.MakeRequest(bytes.NewBuffer(payl))\n}\n\n\/\/ This helps to send local images (currently) to users\n\/\/ TODO: not tested yet!\nfunc (bot *BotAPI) SendFile(u User, path string) (APIResponse, error) {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn APIResponse{}, err\n\t}\n\tdefer file.Close()\n\n\tbody := &bytes.Buffer{}\n\twriter := multipart.NewWriter(body)\n\tpart, err := writer.CreateFormFile(\"filedata\", filepath.Base(path))\n\tif err != nil {\n\t\treturn APIResponse{}, err\n\t}\n\t_, err = io.Copy(part, file)\n\n\tusr, _ := json.Marshal(u)\n\t_ = writer.WriteField(\"recipient\", string(usr))\n\timg := NewImageMessage(\"\")\n\tim, _ := json.Marshal(img)\n\t_ = writer.WriteField(\"message\", string(im))\n\n\terr = writer.Close()\n\tif err != nil {\n\t\treturn APIResponse{}, err\n\t}\n\n\treturn bot.MakeRequest(body)\n}\n\n\/\/ This function registers the handlers for\n\/\/ - webhook verification\n\/\/ - all callbacks made on the webhhoks\n\/\/ It loops over all entries in the callback and\n\/\/ pushes to the Callback channel\n\/\/ This also return a *http.ServeMux which can be used to listenAndServe\nfunc (bot *BotAPI) SetWebhook(pattern string) (<-chan Callback, *http.ServeMux) {\n\tcallbackChan := make(chan Callback, 100)\n\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(pattern, func(w http.ResponseWriter, req *http.Request) {\n\t\tswitch req.Method {\n\t\tcase \"GET\":\n\t\t\tif req.FormValue(\"hub.verify_token\") == bot.VerifyToken {\n\t\t\t\tw.Write([]byte(req.FormValue(\"hub.challenge\")))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\n\t\tcase \"POST\":\n\t\t\tdefer req.Body.Close()\n\n\t\t\tvar rsp Response\n\t\t\tif bot.Debug {\n\t\t\t\tbody, _ := ioutil.ReadAll(req.Body)\n\t\t\t\tlog.Printf(\"[INFO]%s\", body)\n\t\t\t}\n\t\t\tdecoder := json.NewDecoder(req.Body)\n\t\t\tdecoder.Decode(&rsp)\n\n\t\t\tif rsp.Object == \"page\" {\n\t\t\t\tfor _, e := range rsp.Entries {\n\t\t\t\t\tfor _, c := range e.Messaging {\n\t\t\t\t\t\tcallbackChan <- c\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\treturn\n\n\t\tdefault:\n\t\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\t\treturn\n\t\t}\n\t})\n\n\treturn callbackChan, mux\n\n}\n<|endoftext|>"} {"text":"<commit_before>package jira\n\nimport (\n\t\"fmt\"\n\t\"github.com\/trivago\/tgo\/tcontainer\"\n\t\"strings\"\n)\n\n\/\/ CreateMeta contains information about fields and their attributed to create a ticket.\ntype CreateMetaInfo struct {\n\tExpand string `json:\"expand,omitempty\"`\n\tProjects []*MetaProject `json:\"projects,omitempty\"`\n}\n\n\/\/ MetaProject is the meta information about a project returned from createmeta api\ntype MetaProject struct {\n\tExpand string `json:\"expand,omitempty\"`\n\tSelf string `json:\"self, omitempty\"`\n\tId string `json:\"id,omitempty\"`\n\tKey string `json:\"key,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\t\/\/ omitted avatarUrls\n\tIssueTypes []*MetaIssueTypes `json:\"issuetypes,omitempty\"`\n}\n\n\/\/ metaIssueTypes represents the different issue types a project has.\n\/\/\n\/\/ Note: Fields is interface because this is an object which can\n\/\/ have arbitraty keys related to customfields. It is not possible to\n\/\/ expect these for a general way. This will be returning a map.\n\/\/ Further processing must be done depending on what is required.\ntype MetaIssueTypes struct {\n\tSelf string `json:\"expand,omitempty\"`\n\tId string `json:\"id,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tIconUrl string `json:\"iconurl,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tSubtasks bool `json:\"subtask,omitempty\"`\n\tExpand string `json:\"expand,omitempty\"`\n\tFields tcontainer.MarshalMap `json:\"fields,omitempty\"`\n}\n\n\/\/ GetCreateMeta makes the api call to get the meta information required to create a ticket\nfunc (s *IssueService) GetCreateMeta(projectkey string) (*CreateMetaInfo, *Response, error) {\n\n\tapiEndpoint := fmt.Sprintf(\"\/rest\/api\/2\/issue\/createmeta?projectKeys=%s&expand=projects.issuetypes.fields\", projectkey)\n\n\treq, err := s.client.NewRequest(\"GET\", apiEndpoint, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfmt.Println(req.URL)\n\tmeta := new(CreateMetaInfo)\n\tresp, err := s.client.Do(req, meta)\n\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn meta, resp, nil\n}\n\n\/\/ GetProjectWithName returns a project with \"name\" from the meta information recieved. If not found, this returns nil.\n\/\/ The comparision of the name is case insensitive.\nfunc (m *CreateMetaInfo) GetProjectWithName(name string) *MetaProject {\n\tfor _, m := range m.Projects {\n\t\tif strings.ToLower(m.Name) == strings.ToLower(name) {\n\t\t\treturn m\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ GetIssueWithName returns an IssueType with name from a given MetaProject. If not found, this returns nil.\n\/\/ The comparision of the name is case insensitive\nfunc (p *MetaProject) GetIssueTypeWithName(name string) *MetaIssueTypes {\n\tfor _, m := range p.IssueTypes {\n\t\tif strings.ToLower(m.Name) == strings.ToLower(name) {\n\t\t\treturn m\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ GetMandatoryFields returns a map of all the required fields from the MetaIssueTypes.\n\/\/ if a frield returned by the api was:\n\/\/ \"customfield_10806\": {\n\/\/\t\t\t\t\t\"required\": true,\n\/\/\t\t\t\t\t\"schema\": {\n\/\/\t\t\t\t\t\t\"type\": \"any\",\n\/\/\t\t\t\t\t\t\"custom\": \"com.pyxis.greenhopper.jira:gh-epic-link\",\n\/\/\t\t\t\t\t\t\"customId\": 10806\n\/\/\t\t\t\t\t},\n\/\/\t\t\t\t\t\"name\": \"Epic Link\",\n\/\/\t\t\t\t\t\"hasDefaultValue\": false,\n\/\/\t\t\t\t\t\"operations\": [\n\/\/\t\t\t\t\t\t\"set\"\n\/\/\t\t\t\t\t]\n\/\/\t\t\t\t}\n\/\/ the returned map would have \"Epic Link\" as the key and \"customfield_10806\" as value.\n\/\/ This choice has been made so that the it is easier to generate the create api request later.\nfunc (t *MetaIssueTypes) GetMandatoryFields() (map[string]string, error) {\n\tret := make(map[string]string)\n\tfor key, _ := range t.Fields {\n\t\trequired, err := t.Fields.Bool(key + \"\/required\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif required {\n\t\t\tname, err := t.Fields.String(key + \"\/name\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tret[name] = key\n\t\t}\n\t}\n\treturn ret, nil\n}\n\n\/\/ GetAllFields returns a map of all the fields for an IssueType. This includes all required and not required.\n\/\/ The key of the returned map is what you see in the form and the value is how it is representated in the jira schema.\nfunc (t *MetaIssueTypes) GetAllFields() (map[string]string, error) {\n\tret := make(map[string]string)\n\tfor key, _ := range t.Fields {\n\n\t\tname, err := t.Fields.String(key + \"\/name\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tret[name] = key\n\t}\n\treturn ret, nil\n}\n<commit_msg>Rename MetaIssueTypes to MetaIssueType<commit_after>package jira\n\nimport (\n\t\"fmt\"\n\t\"github.com\/trivago\/tgo\/tcontainer\"\n\t\"strings\"\n)\n\n\/\/ CreateMeta contains information about fields and their attributed to create a ticket.\ntype CreateMetaInfo struct {\n\tExpand string `json:\"expand,omitempty\"`\n\tProjects []*MetaProject `json:\"projects,omitempty\"`\n}\n\n\/\/ MetaProject is the meta information about a project returned from createmeta api\ntype MetaProject struct {\n\tExpand string `json:\"expand,omitempty\"`\n\tSelf string `json:\"self, omitempty\"`\n\tId string `json:\"id,omitempty\"`\n\tKey string `json:\"key,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\t\/\/ omitted avatarUrls\n\tIssueTypes []*MetaIssueType `json:\"issuetypes,omitempty\"`\n}\n\n\/\/ metaIssueTypes represents the different issue types a project has.\n\/\/\n\/\/ Note: Fields is interface because this is an object which can\n\/\/ have arbitraty keys related to customfields. It is not possible to\n\/\/ expect these for a general way. This will be returning a map.\n\/\/ Further processing must be done depending on what is required.\ntype MetaIssueType struct {\n\tSelf string `json:\"expand,omitempty\"`\n\tId string `json:\"id,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tIconUrl string `json:\"iconurl,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tSubtasks bool `json:\"subtask,omitempty\"`\n\tExpand string `json:\"expand,omitempty\"`\n\tFields tcontainer.MarshalMap `json:\"fields,omitempty\"`\n}\n\n\/\/ GetCreateMeta makes the api call to get the meta information required to create a ticket\nfunc (s *IssueService) GetCreateMeta(projectkey string) (*CreateMetaInfo, *Response, error) {\n\n\tapiEndpoint := fmt.Sprintf(\"\/rest\/api\/2\/issue\/createmeta?projectKeys=%s&expand=projects.issuetypes.fields\", projectkey)\n\n\treq, err := s.client.NewRequest(\"GET\", apiEndpoint, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfmt.Println(req.URL)\n\tmeta := new(CreateMetaInfo)\n\tresp, err := s.client.Do(req, meta)\n\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn meta, resp, nil\n}\n\n\/\/ GetProjectWithName returns a project with \"name\" from the meta information recieved. If not found, this returns nil.\n\/\/ The comparision of the name is case insensitive.\nfunc (m *CreateMetaInfo) GetProjectWithName(name string) *MetaProject {\n\tfor _, m := range m.Projects {\n\t\tif strings.ToLower(m.Name) == strings.ToLower(name) {\n\t\t\treturn m\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ GetIssueWithName returns an IssueType with name from a given MetaProject. If not found, this returns nil.\n\/\/ The comparision of the name is case insensitive\nfunc (p *MetaProject) GetIssueTypeWithName(name string) *MetaIssueType {\n\tfor _, m := range p.IssueTypes {\n\t\tif strings.ToLower(m.Name) == strings.ToLower(name) {\n\t\t\treturn m\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ GetMandatoryFields returns a map of all the required fields from the MetaIssueTypes.\n\/\/ if a frield returned by the api was:\n\/\/ \"customfield_10806\": {\n\/\/\t\t\t\t\t\"required\": true,\n\/\/\t\t\t\t\t\"schema\": {\n\/\/\t\t\t\t\t\t\"type\": \"any\",\n\/\/\t\t\t\t\t\t\"custom\": \"com.pyxis.greenhopper.jira:gh-epic-link\",\n\/\/\t\t\t\t\t\t\"customId\": 10806\n\/\/\t\t\t\t\t},\n\/\/\t\t\t\t\t\"name\": \"Epic Link\",\n\/\/\t\t\t\t\t\"hasDefaultValue\": false,\n\/\/\t\t\t\t\t\"operations\": [\n\/\/\t\t\t\t\t\t\"set\"\n\/\/\t\t\t\t\t]\n\/\/\t\t\t\t}\n\/\/ the returned map would have \"Epic Link\" as the key and \"customfield_10806\" as value.\n\/\/ This choice has been made so that the it is easier to generate the create api request later.\nfunc (t *MetaIssueType) GetMandatoryFields() (map[string]string, error) {\n\tret := make(map[string]string)\n\tfor key, _ := range t.Fields {\n\t\trequired, err := t.Fields.Bool(key + \"\/required\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif required {\n\t\t\tname, err := t.Fields.String(key + \"\/name\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tret[name] = key\n\t\t}\n\t}\n\treturn ret, nil\n}\n\n\/\/ GetAllFields returns a map of all the fields for an IssueType. This includes all required and not required.\n\/\/ The key of the returned map is what you see in the form and the value is how it is representated in the jira schema.\nfunc (t *MetaIssueType) GetAllFields() (map[string]string, error) {\n\tret := make(map[string]string)\n\tfor key, _ := range t.Fields {\n\n\t\tname, err := t.Fields.String(key + \"\/name\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tret[name] = key\n\t}\n\treturn ret, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/lambrospetrou\/gomicroblog\/auth\"\n\t\"github.com\/lambrospetrou\/gomicroblog\/gen\"\n\t\"github.com\/lambrospetrou\/gomicroblog\/view\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"time\"\n)\n\nvar validPath = regexp.MustCompile(\"^\/blog\/(view|edit|save|del)\/([a-zA-Z0-9_-]+)$\")\n\n\/\/ BLOG HANDLERS\nfunc makeHandler(fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tm := validPath.FindStringSubmatch(r.URL.Path)\n\t\tif m == nil {\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\t\tfn(w, r, m[2])\n\t}\n}\n\n\/\/ show all posts\nfunc rootHandler(w http.ResponseWriter, r *http.Request) {\n\t\/\/fmt.Fprintf(w, \"Hi there, I love you %s\\n\", r.URL.Path)\n\t\/*\n\t\tposts, err := LoadAllBlogPosts()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Could not load blog posts\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t*\/\n\tbundle := &view.TemplateBundleIndex{\n\t\tFooter: &view.FooterStruct{Year: time.Now().Year()},\n\t\tHeader: &view.HeaderStruct{Title: \"All posts\"},\n\t\t\/\/Posts: posts,\n\t\tPosts: nil,\n\t}\n\tview.Render(w, \"index\", bundle)\n}\n\nfunc main() {\n\tfmt.Println(\"Go Microblog service started!\")\n\n\t\/\/ use all the available cores\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\t\/\/ Blog endpoints\n\thttp.HandleFunc(\"\/gen-site\", auth.BasicHandler(gen.GenerateHandler))\n\thttp.HandleFunc(\"\/\", rootHandler)\n\n\tfs := http.FileServer(http.Dir(\"static\"))\n\thttp.Handle(\"\/s\/\", http.StripPrefix(\"\/s\/\", fs))\n\n\thttp.ListenAndServe(\":40080\", nil)\n\n}\n<commit_msg>added logging for the server<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/lambrospetrou\/gomicroblog\/auth\"\n\t\"github.com\/lambrospetrou\/gomicroblog\/gen\"\n\t\"github.com\/lambrospetrou\/gomicroblog\/view\"\n\t\"log\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"time\"\n)\n\nvar validPath = regexp.MustCompile(\"^\/blog\/(view|edit|save|del)\/([a-zA-Z0-9_-]+)$\")\n\n\/\/ BLOG HANDLERS\nfunc makeHandler(fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tm := validPath.FindStringSubmatch(r.URL.Path)\n\t\tif m == nil {\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\t\tfn(w, r, m[2])\n\t}\n}\n\n\/\/ show all posts\nfunc rootHandler(w http.ResponseWriter, r *http.Request) {\n\t\/\/fmt.Fprintf(w, \"Hi there, I love you %s\\n\", r.URL.Path)\n\t\/*\n\t\tposts, err := LoadAllBlogPosts()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Could not load blog posts\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t*\/\n\tbundle := &view.TemplateBundleIndex{\n\t\tFooter: &view.FooterStruct{Year: time.Now().Year()},\n\t\tHeader: &view.HeaderStruct{Title: \"All posts\"},\n\t\t\/\/Posts: posts,\n\t\tPosts: nil,\n\t}\n\tview.Render(w, \"index\", bundle)\n}\n\nfunc main() {\n\tfmt.Println(\"Go Microblog service started!\")\n\n\t\/\/ use all the available cores\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\t\/\/ Blog endpoints\n\thttp.HandleFunc(\"\/gen-site\", auth.BasicHandler(gen.GenerateHandler))\n\thttp.HandleFunc(\"\/\", rootHandler)\n\n\tfs := http.FileServer(http.Dir(\"static\"))\n\thttp.Handle(\"\/s\/\", http.StripPrefix(\"\/s\/\", fs))\n\n\tlog.Fatal(http.ListenAndServe(\":40080\", nil))\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ The MIT License (MIT)\n\n\/\/ Copyright (c) 2016 Maciej Borzecki\n\n\/\/ Permission is hereby granted, free of charge, to any person obtaining\n\/\/ a copy of this software and associated documentation files (the\n\/\/ \"Software\"), to deal in the Software without restriction, including\n\/\/ without limitation the rights to use, copy, modify, merge, publish,\n\/\/ distribute, sublicense, and\/or sell copies of the Software, and to\n\/\/ permit persons to whom the Software is furnished to do so, subject to\n\/\/ the following conditions:\n\n\/\/ The above copyright notice and this permission notice shall be\n\/\/ included in all copies or substantial portions of the Software.\n\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n\/\/ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n\/\/ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n\/\/ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n\/\/ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n\/\/ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n\/\/ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\npackage test\n\nimport (\n\t\"github.com\/bboozzoo\/q3stats\/store\"\n\t\"github.com\/jinzhu\/gorm\"\n\t_ \"github.com\/jinzhu\/gorm\/dialects\/sqlite\"\n\t\"github.com\/pkg\/errors\"\n\t\"testing\"\n)\n\ntype DB struct {\n\tdb *gorm.DB\n}\n\nfunc (d *DB) Open() error {\n\tdb, err := gorm.Open(\"sqlite3\", \":memory:\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to open DB\")\n\t}\n\n\td.db = db\n\n\treturn nil\n}\n\nfunc (d *DB) Conn() *gorm.DB {\n\treturn d.db\n}\n\nfunc (d *DB) Close() {\n\tif d.db != nil {\n\t\td.db.Close()\n\t}\n}\n\nfunc GetStore(t *testing.T) store.DB {\n\tdb := &DB{}\n\n\tif err := db.Open(); err != nil {\n\t\tt.Fatalf(\"failed to open DB: %s\", err)\n\t}\n\treturn db\n}\n<commit_msg>models\/test\/store: implement connectable and transactional interfaces<commit_after>\/\/ The MIT License (MIT)\n\n\/\/ Copyright (c) 2016 Maciej Borzecki\n\n\/\/ Permission is hereby granted, free of charge, to any person obtaining\n\/\/ a copy of this software and associated documentation files (the\n\/\/ \"Software\"), to deal in the Software without restriction, including\n\/\/ without limitation the rights to use, copy, modify, merge, publish,\n\/\/ distribute, sublicense, and\/or sell copies of the Software, and to\n\/\/ permit persons to whom the Software is furnished to do so, subject to\n\/\/ the following conditions:\n\n\/\/ The above copyright notice and this permission notice shall be\n\/\/ included in all copies or substantial portions of the Software.\n\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n\/\/ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n\/\/ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n\/\/ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n\/\/ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n\/\/ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n\/\/ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\npackage test\n\nimport (\n\t\"github.com\/bboozzoo\/q3stats\/store\"\n\t\"github.com\/jinzhu\/gorm\"\n\t_ \"github.com\/jinzhu\/gorm\/dialects\/sqlite\"\n\t\"github.com\/pkg\/errors\"\n\t\"testing\"\n)\n\ntype DB struct {\n\tdb *gorm.DB\n}\n\nfunc (d *DB) Open() error {\n\tdb, err := gorm.Open(\"sqlite3\", \":memory:\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to open DB\")\n\t}\n\n\td.db = db\n\n\treturn nil\n}\n\nfunc (d *DB) Conn() *gorm.DB {\n\treturn d.db\n}\n\nfunc (d *DB) Close() {\n\tif d.db != nil {\n\t\td.db.Close()\n\t}\n}\n\nfunc GetStore(t *testing.T) store.DB {\n\tdb := &DB{}\n\n\tif err := db.Open(); err != nil {\n\t\tt.Fatalf(\"failed to open DB: %s\", err)\n\t}\n\treturn db\n}\n\nfunc (d *DB) Begin() store.DBTransaction {\n\treturn &dbTx{\n\t\tdb: d.db.Begin(),\n\t}\n}\n\ntype dbTx struct {\n\tdb *gorm.DB\n}\n\nfunc (dbt *dbTx) Conn() *gorm.DB {\n\treturn dbt.db\n}\n\nfunc (dbt *dbTx) Commit() {\n\tdbt.db.Commit()\n}\n<|endoftext|>"} {"text":"<commit_before>package elk\n\nimport (\n\t\"errors\"\n\t\/\/ \"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\/\/ \"log\/syslog\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\/\/ \"time\"\n\t\"encoding\/json\"\n\n\t\"github.com\/delectable\/logspout\/router\"\n)\n\nvar HOSTNAME string\n\nfunc init() {\n\trouter.AdapterFactories.Register(NewElkAdapter, \"elk\")\n\n\thostname_bytestring, _ := ioutil.ReadFile(\"\/etc\/hostname\")\n\tHOSTNAME = strings.TrimSpace(string(hostname_bytestring))\n\n}\n\nfunc getopt(name, dfault string) string {\n\tvalue := os.Getenv(name)\n\tif value == \"\" {\n\t\tvalue = dfault\n\t}\n\treturn value\n}\n\nfunc NewElkAdapter(route *router.Route) (router.LogAdapter, error) {\n\ttransport, found := router.AdapterTransports.Lookup(route.AdapterTransport(\"udp\"))\n\tif !found {\n\t\treturn nil, errors.New(\"unable to find adapter: \" + route.Adapter)\n\t}\n\n\tconn, err := transport.Dial(route.Address, route.Options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ElkAdapter{\n\t\troute: route,\n\t\tconn: conn,\n\t}, nil\n}\n\ntype ElkAdapter struct {\n\tconn net.Conn\n\troute *router.Route\n}\n\nfunc (adapter *ElkAdapter) Stream(logstream chan *router.Message) {\n\tfor message := range logstream {\n\t\telkMessage := NewElkMessage(message)\n\t\tio.WriteString(adapter.conn, elkMessage.ToString())\n\t}\n}\n\ntype ElkMessage struct {\n\trouterMessage *router.Message\n\tObject struct {\n\t\tTime int64 `json: \"time\"`\n\t\tMessage string `json: \"message\"`\n\t\tHostname string `json: \"hostname\"`\n\t\tImage string `json: \"image\"`\n\t\tApp string `json: \"app\"`\n\t\tEnv string `json: \"env\"`\n\t}\n}\n\nfunc NewElkMessage(routerMessage *router.Message) *ElkMessage {\n\telkMessage := &ElkMessage{\n\t\trouterMessage: routerMessage,\n\t}\n\n\telkMessage.Object.Time = routerMessage.Time.Unix()\n\telkMessage.Object.Message = routerMessage.Data\n\n\telkMessage.Object.Hostname = HOSTNAME\n\telkMessage.Object.Hostname = getopt(\"ENV\", \"development\")\n\n\telkMessage.Object.Image = routerMessage.Container.Config.Image\n\n\tenv_map := make(map[string]string)\n\tfor _, blob := range routerMessage.Container.Config.Env {\n\t\tsplit_blob := strings.Split(blob, \"=\")\n\t\tenv_map[split_blob[0]] = split_blob[1]\n\t}\n\n\telkMessage.Object.App = env_map[\"MARATHON_APP_ID\"][1:] \/\/ Marathon, for some reason, prepends MARATHON_APP_ID with a '\/'\n\n\treturn elkMessage\n}\n\nfunc (elkMessage *ElkMessage) ToString() string {\n\treturn_string, _ := json.Marshal(elkMessage.Object)\n\treturn string(return_string)\n}\n<commit_msg>Hostname -> Env<commit_after>package elk\n\nimport (\n\t\"errors\"\n\t\/\/ \"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\/\/ \"log\/syslog\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\/\/ \"time\"\n\t\"encoding\/json\"\n\n\t\"github.com\/delectable\/logspout\/router\"\n)\n\nvar HOSTNAME string\n\nfunc init() {\n\trouter.AdapterFactories.Register(NewElkAdapter, \"elk\")\n\n\thostname_bytestring, _ := ioutil.ReadFile(\"\/etc\/hostname\")\n\tHOSTNAME = strings.TrimSpace(string(hostname_bytestring))\n\n}\n\nfunc getopt(name, dfault string) string {\n\tvalue := os.Getenv(name)\n\tif value == \"\" {\n\t\tvalue = dfault\n\t}\n\treturn value\n}\n\nfunc NewElkAdapter(route *router.Route) (router.LogAdapter, error) {\n\ttransport, found := router.AdapterTransports.Lookup(route.AdapterTransport(\"udp\"))\n\tif !found {\n\t\treturn nil, errors.New(\"unable to find adapter: \" + route.Adapter)\n\t}\n\n\tconn, err := transport.Dial(route.Address, route.Options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ElkAdapter{\n\t\troute: route,\n\t\tconn: conn,\n\t}, nil\n}\n\ntype ElkAdapter struct {\n\tconn net.Conn\n\troute *router.Route\n}\n\nfunc (adapter *ElkAdapter) Stream(logstream chan *router.Message) {\n\tfor message := range logstream {\n\t\telkMessage := NewElkMessage(message)\n\t\tio.WriteString(adapter.conn, elkMessage.ToString())\n\t}\n}\n\ntype ElkMessage struct {\n\trouterMessage *router.Message\n\tObject struct {\n\t\tTime int64 `json: \"time\"`\n\t\tMessage string `json: \"message\"`\n\t\tHostname string `json: \"hostname\"`\n\t\tImage string `json: \"image\"`\n\t\tApp string `json: \"app\"`\n\t\tEnv string `json: \"env\"`\n\t}\n}\n\nfunc NewElkMessage(routerMessage *router.Message) *ElkMessage {\n\telkMessage := &ElkMessage{\n\t\trouterMessage: routerMessage,\n\t}\n\n\telkMessage.Object.Time = routerMessage.Time.Unix()\n\telkMessage.Object.Message = routerMessage.Data\n\n\telkMessage.Object.Hostname = HOSTNAME\n\telkMessage.Object.Env = getopt(\"ENV\", \"development\")\n\n\telkMessage.Object.Image = routerMessage.Container.Config.Image\n\n\tenv_map := make(map[string]string)\n\tfor _, blob := range routerMessage.Container.Config.Env {\n\t\tsplit_blob := strings.Split(blob, \"=\")\n\t\tenv_map[split_blob[0]] = split_blob[1]\n\t}\n\n\telkMessage.Object.App = env_map[\"MARATHON_APP_ID\"][1:] \/\/ Marathon, for some reason, prepends MARATHON_APP_ID with a '\/'\n\n\treturn elkMessage\n}\n\nfunc (elkMessage *ElkMessage) ToString() string {\n\treturn_string, _ := json.Marshal(elkMessage.Object)\n\treturn string(return_string)\n}\n<|endoftext|>"} {"text":"<commit_before>package domain\n\nimport (\n\t\"cf\"\n\t\"cf\/api\"\n\t\"cf\/configuration\"\n\t\"cf\/requirements\"\n\t\"cf\/terminal\"\n\t\"errors\"\n\t\"github.com\/codegangsta\/cli\"\n)\n\ntype CreateDomain struct {\n\tui terminal.UI\n\tconfig *configuration.Configuration\n\tdomainRepo api.DomainRepository\n\torgReq requirements.OrganizationRequirement\n}\n\nfunc NewCreateDomain(ui terminal.UI, config *configuration.Configuration, domainRepo api.DomainRepository) (cmd *CreateDomain) {\n\tcmd = new(CreateDomain)\n\tcmd.ui = ui\n\tcmd.config = config\n\tcmd.domainRepo = domainRepo\n\treturn\n}\n\nfunc (cmd *CreateDomain) GetRequirements(reqFactory requirements.Factory, c *cli.Context) (reqs []requirements.Requirement, err error) {\n\tif len(c.Args()) != 2 {\n\t\terr = errors.New(\"Incorrect Usage\")\n\t\tcmd.ui.FailWithUsage(c, \"create-domain\")\n\t\treturn\n\t}\n\n\tcmd.orgReq = reqFactory.NewOrganizationRequirement(c.Args()[0])\n\treqs = []requirements.Requirement{\n\t\treqFactory.NewLoginRequirement(),\n\t\tcmd.orgReq,\n\t}\n\treturn\n}\n\nfunc (cmd *CreateDomain) Run(c *cli.Context) {\n\tdomainName := c.Args()[1]\n\towningOrg := cmd.orgReq.GetOrganization()\n\n\tcmd.ui.Say(\"Creating domain %s for org %s as %s...\",\n\t\tterminal.EntityNameColor(domainName),\n\t\tterminal.EntityNameColor(owningOrg.Name),\n\t\tterminal.EntityNameColor(cmd.config.Username()),\n\t)\n\n\t_, apiResponse := cmd.domainRepo.Create(domainName, owningOrg.Guid)\n\tif apiResponse.IsNotSuccessful() {\n\t\tcmd.ui.Failed(apiResponse.Message)\n\t\treturn\n\t}\n\n\tcmd.ui.Ok()\n\n\tcmd.ui.Say(\"TIP: Use '%s' to assign it to a space\", terminal.CommandColor(cf.Name()+\" map-domain\"))\n}\n<commit_msg>Remove create-domain TIP referencing map-domain<commit_after>package domain\n\nimport (\n\t\"cf\/api\"\n\t\"cf\/configuration\"\n\t\"cf\/requirements\"\n\t\"cf\/terminal\"\n\t\"errors\"\n\t\"github.com\/codegangsta\/cli\"\n)\n\ntype CreateDomain struct {\n\tui terminal.UI\n\tconfig *configuration.Configuration\n\tdomainRepo api.DomainRepository\n\torgReq requirements.OrganizationRequirement\n}\n\nfunc NewCreateDomain(ui terminal.UI, config *configuration.Configuration, domainRepo api.DomainRepository) (cmd *CreateDomain) {\n\tcmd = new(CreateDomain)\n\tcmd.ui = ui\n\tcmd.config = config\n\tcmd.domainRepo = domainRepo\n\treturn\n}\n\nfunc (cmd *CreateDomain) GetRequirements(reqFactory requirements.Factory, c *cli.Context) (reqs []requirements.Requirement, err error) {\n\tif len(c.Args()) != 2 {\n\t\terr = errors.New(\"Incorrect Usage\")\n\t\tcmd.ui.FailWithUsage(c, \"create-domain\")\n\t\treturn\n\t}\n\n\tcmd.orgReq = reqFactory.NewOrganizationRequirement(c.Args()[0])\n\treqs = []requirements.Requirement{\n\t\treqFactory.NewLoginRequirement(),\n\t\tcmd.orgReq,\n\t}\n\treturn\n}\n\nfunc (cmd *CreateDomain) Run(c *cli.Context) {\n\tdomainName := c.Args()[1]\n\towningOrg := cmd.orgReq.GetOrganization()\n\n\tcmd.ui.Say(\"Creating domain %s for org %s as %s...\",\n\t\tterminal.EntityNameColor(domainName),\n\t\tterminal.EntityNameColor(owningOrg.Name),\n\t\tterminal.EntityNameColor(cmd.config.Username()),\n\t)\n\n\t_, apiResponse := cmd.domainRepo.Create(domainName, owningOrg.Guid)\n\tif apiResponse.IsNotSuccessful() {\n\t\tcmd.ui.Failed(apiResponse.Message)\n\t\treturn\n\t}\n\n\tcmd.ui.Ok()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015-2017, Cyrill @ Schumacher.fm and the CoreStore contributors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage dbr\n\nimport (\n\t\"encoding\/json\"\n\t\"time\"\n\n\t\"github.com\/corestoreio\/errors\"\n)\n\nfunc (a NullTime) toIFace(args *[]interface{}) {\n\tif a.Valid {\n\t\t*args = append(*args, a.Time)\n\t} else {\n\t\t*args = append(*args, nil)\n\t}\n}\n\nfunc (a NullTime) writeTo(w queryWriter, _ int) error {\n\tif a.Valid {\n\t\tdialect.EscapeTime(w, a.Time)\n\t\treturn nil\n\t}\n\t_, err := w.WriteString(\"NULL\")\n\treturn err\n}\n\nfunc (a NullTime) len() int { return 1 }\nfunc (a NullTime) Operator(opt byte) Argument {\n\ta.opt = opt\n\treturn a\n}\n\nfunc (a NullTime) operator() byte { return a.opt }\n\n\/\/ MakeNullTime creates a new NullTime. Setting the second optional argument to\n\/\/ false, the string will not be valid anymore, hence NULL. NullTime implements\n\/\/ interface Argument.\nfunc MakeNullTime(t time.Time, valid ...bool) NullTime {\n\tv := true\n\tif len(valid) == 1 {\n\t\tv = valid[0]\n\t}\n\treturn NullTime{\n\t\tTime: t,\n\t\tValid: v,\n\t}\n}\n\n\/\/ MarshalJSON implements json.Marshaler.\n\/\/ It will encode null if this time is null.\nfunc (t NullTime) MarshalJSON() ([]byte, error) {\n\tif !t.Valid {\n\t\treturn []byte(\"null\"), nil\n\t}\n\treturn t.Time.MarshalJSON()\n}\n\n\/\/ UnmarshalJSON implements json.Unmarshaler.\n\/\/ It supports string, object (e.g. pq.NullTime and friends)\n\/\/ and null input.\nfunc (t *NullTime) UnmarshalJSON(data []byte) error {\n\tvar err error\n\tvar v interface{}\n\tif err = json.Unmarshal(data, &v); err != nil {\n\t\treturn err\n\t}\n\tswitch x := v.(type) {\n\tcase string:\n\t\terr = t.Time.UnmarshalJSON(data)\n\tcase map[string]interface{}:\n\t\tti, tiOK := x[\"Time\"].(string)\n\t\tvalid, validOK := x[\"Valid\"].(bool)\n\t\tif !tiOK || !validOK {\n\t\t\treturn errors.NewNotValidf(`[dbr] json: unmarshalling object into Go value of type dbr.NullTime requires key \"Time\" to be of type string and key \"Valid\" to be of type bool; found %T and %T, respectively`, x[\"Time\"], x[\"Valid\"])\n\t\t}\n\t\terr = t.Time.UnmarshalText([]byte(ti))\n\t\tt.Valid = valid\n\t\treturn err\n\tcase nil:\n\t\tt.Valid = false\n\t\treturn nil\n\tdefault:\n\t\terr = errors.NewNotValidf(\"[dbr] json: cannot unmarshal %#v into Go value of type dbr.NullTime\", v)\n\t}\n\tt.Valid = err == nil\n\treturn err\n}\n\nfunc (t NullTime) MarshalText() ([]byte, error) {\n\tif !t.Valid {\n\t\treturn []byte(\"null\"), nil\n\t}\n\treturn t.Time.MarshalText()\n}\n\nfunc (t *NullTime) UnmarshalText(text []byte) error {\n\tstr := string(text)\n\tif str == \"\" || str == \"null\" {\n\t\tt.Valid = false\n\t\treturn nil\n\t}\n\tif err := t.Time.UnmarshalText(text); err != nil {\n\t\treturn err\n\t}\n\tt.Valid = true\n\treturn nil\n}\n\n\/\/ SetValid changes this Time's value and sets it to be non-null.\nfunc (t *NullTime) SetValid(v time.Time) {\n\tt.Time = v\n\tt.Valid = true\n}\n\n\/\/ Ptr returns a pointer to this Time's value, or a nil pointer if this Time is null.\nfunc (t NullTime) Ptr() *time.Time {\n\tif !t.Valid {\n\t\treturn nil\n\t}\n\treturn &t.Time\n}\n\ntype argNullTimes struct {\n\topt byte\n\tdata []NullTime\n}\n\nfunc (a argNullTimes) toIFace(args *[]interface{}) {\n\tfor _, s := range a.data {\n\t\tif s.Valid {\n\t\t\t*args = append(*args, s.Time)\n\t\t} else {\n\t\t\t*args = append(*args, nil)\n\t\t}\n\t}\n}\n\nfunc (a argNullTimes) writeTo(w queryWriter, pos int) error {\n\tif a.operator() != OperatorIn && a.operator() != OperatorNotIn {\n\t\tif s := a.data[pos]; s.Valid {\n\t\t\tdialect.EscapeTime(w, s.Time)\n\t\t\treturn nil\n\t\t}\n\t\t_, err := w.WriteString(\"NULL\")\n\t\treturn err\n\t}\n\tl := len(a.data) - 1\n\tw.WriteRune('(')\n\tfor i, v := range a.data {\n\t\tif v.Valid {\n\t\t\tdialect.EscapeTime(w, v.Time)\n\t\t} else {\n\t\t\tw.WriteString(\"NULL\")\n\t\t}\n\t\tif i < l {\n\t\t\tw.WriteRune(',')\n\t\t}\n\t}\n\t_, err := w.WriteRune(')')\n\treturn err\n}\n\nfunc (a argNullTimes) len() int {\n\tif isNotIn(a.operator()) {\n\t\treturn len(a.data)\n\t}\n\treturn 1\n}\n\nfunc (a argNullTimes) Operator(opt byte) Argument {\n\ta.opt = opt\n\treturn a\n}\n\nfunc (a argNullTimes) operator() byte { return a.opt }\n\n\/\/ ArgNullTime adds a nullable Time or a slice of nullable Timess to the\n\/\/ argument list. Providing no arguments returns a NULL type.\nfunc ArgNullTime(args ...NullTime) Argument {\n\tif len(args) == 1 {\n\t\treturn args[0]\n\t}\n\treturn argNullTimes{data: args}\n}\n<commit_msg>storage\/dbr: Remove encoding\/json dependency in NullTime<commit_after>\/\/ Copyright 2015-2017, Cyrill @ Schumacher.fm and the CoreStore contributors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage dbr\n\nimport (\n\t\"time\"\n\n\t\"github.com\/corestoreio\/errors\"\n)\n\nfunc (a NullTime) toIFace(args *[]interface{}) {\n\tif a.Valid {\n\t\t*args = append(*args, a.Time)\n\t} else {\n\t\t*args = append(*args, nil)\n\t}\n}\n\nfunc (a NullTime) writeTo(w queryWriter, _ int) error {\n\tif a.Valid {\n\t\tdialect.EscapeTime(w, a.Time)\n\t\treturn nil\n\t}\n\t_, err := w.WriteString(\"NULL\")\n\treturn err\n}\n\nfunc (a NullTime) len() int { return 1 }\nfunc (a NullTime) Operator(opt byte) Argument {\n\ta.opt = opt\n\treturn a\n}\n\nfunc (a NullTime) operator() byte { return a.opt }\n\n\/\/ MakeNullTime creates a new NullTime. Setting the second optional argument to\n\/\/ false, the string will not be valid anymore, hence NULL. NullTime implements\n\/\/ interface Argument.\nfunc MakeNullTime(t time.Time, valid ...bool) NullTime {\n\tv := true\n\tif len(valid) == 1 {\n\t\tv = valid[0]\n\t}\n\treturn NullTime{\n\t\tTime: t,\n\t\tValid: v,\n\t}\n}\n\n\/\/ MarshalJSON implements json.Marshaler.\n\/\/ It will encode null if this time is null.\nfunc (t NullTime) MarshalJSON() ([]byte, error) {\n\tif !t.Valid {\n\t\treturn []byte(\"null\"), nil\n\t}\n\treturn t.Time.MarshalJSON()\n}\n\n\/\/ UnmarshalJSON implements json.Unmarshaler.\n\/\/ It supports string, object (e.g. pq.NullTime and friends)\n\/\/ and null input.\nfunc (t *NullTime) UnmarshalJSON(data []byte) error {\n\tvar err error\n\tvar v interface{}\n\tif err = JSONUnMarshalFn(data, &v); err != nil {\n\t\treturn err\n\t}\n\tswitch x := v.(type) {\n\tcase string:\n\t\terr = t.Time.UnmarshalJSON(data)\n\tcase map[string]interface{}:\n\t\tti, tiOK := x[\"Time\"].(string)\n\t\tvalid, validOK := x[\"Valid\"].(bool)\n\t\tif !tiOK || !validOK {\n\t\t\treturn errors.NewNotValidf(`[dbr] json: unmarshalling object into Go value of type dbr.NullTime requires key \"Time\" to be of type string and key \"Valid\" to be of type bool; found %T and %T, respectively`, x[\"Time\"], x[\"Valid\"])\n\t\t}\n\t\terr = t.Time.UnmarshalText([]byte(ti))\n\t\tt.Valid = valid\n\t\treturn err\n\tcase nil:\n\t\tt.Valid = false\n\t\treturn nil\n\tdefault:\n\t\terr = errors.NewNotValidf(\"[dbr] json: cannot unmarshal %#v into Go value of type dbr.NullTime\", v)\n\t}\n\tt.Valid = err == nil\n\treturn err\n}\n\nfunc (t NullTime) MarshalText() ([]byte, error) {\n\tif !t.Valid {\n\t\treturn []byte(\"null\"), nil\n\t}\n\treturn t.Time.MarshalText()\n}\n\nfunc (t *NullTime) UnmarshalText(text []byte) error {\n\tstr := string(text)\n\tif str == \"\" || str == \"null\" {\n\t\tt.Valid = false\n\t\treturn nil\n\t}\n\tif err := t.Time.UnmarshalText(text); err != nil {\n\t\treturn err\n\t}\n\tt.Valid = true\n\treturn nil\n}\n\n\/\/ SetValid changes this Time's value and sets it to be non-null.\nfunc (t *NullTime) SetValid(v time.Time) {\n\tt.Time = v\n\tt.Valid = true\n}\n\n\/\/ Ptr returns a pointer to this Time's value, or a nil pointer if this Time is null.\nfunc (t NullTime) Ptr() *time.Time {\n\tif !t.Valid {\n\t\treturn nil\n\t}\n\treturn &t.Time\n}\n\ntype argNullTimes struct {\n\topt byte\n\tdata []NullTime\n}\n\nfunc (a argNullTimes) toIFace(args *[]interface{}) {\n\tfor _, s := range a.data {\n\t\tif s.Valid {\n\t\t\t*args = append(*args, s.Time)\n\t\t} else {\n\t\t\t*args = append(*args, nil)\n\t\t}\n\t}\n}\n\nfunc (a argNullTimes) writeTo(w queryWriter, pos int) error {\n\tif a.operator() != OperatorIn && a.operator() != OperatorNotIn {\n\t\tif s := a.data[pos]; s.Valid {\n\t\t\tdialect.EscapeTime(w, s.Time)\n\t\t\treturn nil\n\t\t}\n\t\t_, err := w.WriteString(\"NULL\")\n\t\treturn err\n\t}\n\tl := len(a.data) - 1\n\tw.WriteRune('(')\n\tfor i, v := range a.data {\n\t\tif v.Valid {\n\t\t\tdialect.EscapeTime(w, v.Time)\n\t\t} else {\n\t\t\tw.WriteString(\"NULL\")\n\t\t}\n\t\tif i < l {\n\t\t\tw.WriteRune(',')\n\t\t}\n\t}\n\t_, err := w.WriteRune(')')\n\treturn err\n}\n\nfunc (a argNullTimes) len() int {\n\tif isNotIn(a.operator()) {\n\t\treturn len(a.data)\n\t}\n\treturn 1\n}\n\nfunc (a argNullTimes) Operator(opt byte) Argument {\n\ta.opt = opt\n\treturn a\n}\n\nfunc (a argNullTimes) operator() byte { return a.opt }\n\n\/\/ ArgNullTime adds a nullable Time or a slice of nullable Timess to the\n\/\/ argument list. Providing no arguments returns a NULL type.\nfunc ArgNullTime(args ...NullTime) Argument {\n\tif len(args) == 1 {\n\t\treturn args[0]\n\t}\n\treturn argNullTimes{data: args}\n}\n<|endoftext|>"} {"text":"<commit_before>package dests\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/stephane-martin\/skewer\/conf\"\n\t\"github.com\/stephane-martin\/skewer\/model\"\n\t\"github.com\/stephane-martin\/skewer\/model\/encoders\"\n)\n\nconst writeWait = 10 * time.Second\nconst pongWait = 60 * time.Second\nconst pingPeriod = (pongWait * 9) \/ 10\n\nvar upgrader = websocket.Upgrader{\n\tReadBufferSize: 1024,\n\tWriteBufferSize: 1024,\n}\n\ntype WebsocketServerDestination struct {\n\t*baseDestination\n\tsendQueue chan *model.FullMessage\n\tserver *http.Server\n\tmessageType int\n\twg sync.WaitGroup\n\tmu sync.Mutex\n\tconnections map[*websocket.Conn]bool\n\tstopchan <-chan struct{}\n}\n\nfunc NewWebsocketServerDestination(ctx context.Context, e *Env) (Destination, error) {\n\tconfig := e.config.WebsocketServerDest\n\n\td := &WebsocketServerDestination{\n\t\tbaseDestination: newBaseDestination(conf.HTTPServer, \"websocketserver\", e),\n\t\tconnections: make(map[*websocket.Conn]bool),\n\t\tstopchan: ctx.Done(),\n\t}\n\terr := d.setFormat(config.Format)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch d.format {\n\tcase encoders.Protobuf:\n\t\td.messageType = websocket.BinaryMessage\n\tdefault:\n\t\td.messageType = websocket.TextMessage\n\t}\n\n\td.sendQueue = make(chan *model.FullMessage, 1024)\n\n\thostport := net.JoinHostPort(config.BindAddr, strconv.FormatInt(int64(config.Port), 10))\n\tlistener, err := d.binder.Listen(\"tcp\", hostport)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(config.WebEndPoint, d.serveRoot)\n\tmux.HandleFunc(config.LogEndPoint, d.serveLogs)\n\td.server = &http.Server{\n\t\tAddr: hostport,\n\t\tHandler: mux,\n\t}\n\td.wg.Add(1)\n\tgo d.serve(listener)\n\n\treturn d, nil\n}\n\nfunc reader(wsconn *websocket.Conn) {\n\tdefer wsconn.Close() \/\/ client is gone\n\n\twsconn.SetReadLimit(512)\n\twsconn.SetReadDeadline(time.Now().Add(pongWait))\n\twsconn.SetPongHandler(func(string) error {\n\t\twsconn.SetReadDeadline(time.Now().Add(pongWait))\n\t\treturn nil\n\t})\n\tfor {\n\t\t_, _, err := wsconn.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (d *WebsocketServerDestination) serveLogs(w http.ResponseWriter, r *http.Request) {\n\t\/\/ a new websocket client is connected\n\td.logger.Debug(\"New websocket connection for logs\")\n\td.mu.Lock()\n\twsconn, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tif _, ok := err.(websocket.HandshakeError); !ok {\n\t\t\td.logger.Error(\"Websocket handshake error\", \"error\", err)\n\t\t} else {\n\t\t\td.logger.Error(\"Websocket upgrade error\", \"error\", err)\n\t\t}\n\t\td.mu.Unlock()\n\t\treturn\n\t}\n\td.logger.Debug(\"Connection upgraded to websocket\")\n\td.connections[wsconn] = true\n\td.mu.Unlock()\n\n\tdefer func() {\n\t\td.mu.Lock()\n\t\tdelete(d.connections, wsconn)\n\t\td.mu.Unlock()\n\t}()\n\n\td.wg.Add(1)\n\tgo d.writeLogs(wsconn)\n\treader(wsconn)\n}\n\nfunc (d *WebsocketServerDestination) writeLogs(wsconn *websocket.Conn) (err error) {\n\tpingTicker := time.NewTicker(pingPeriod)\n\tdefer func() {\n\t\tpingTicker.Stop()\n\t\tif err == nil {\n\t\t\terr = wsconn.WriteControl(\n\t\t\t\twebsocket.CloseMessage,\n\t\t\t\twebsocket.FormatCloseMessage(websocket.CloseNormalClosure, \"bye!\"),\n\t\t\t\ttime.Now().Add(time.Second),\n\t\t\t)\n\t\t}\n\t\twsconn.Close()\n\t\tif err != nil {\n\t\t\td.logger.Info(\"Websocket connection closed\", \"error\", err)\n\t\t} else {\n\t\t\td.logger.Info(\"Websocket connection closed\")\n\t\t}\n\t\td.wg.Done()\n\t}()\n\n\tvar message *model.FullMessage\n\tvar ok bool\n\tvar writer io.WriteCloser\n\n\tfor {\n\t\tselect {\n\t\tcase <-pingTicker.C:\n\t\t\twsconn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\terr = wsconn.WriteMessage(websocket.PingMessage, []byte{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase <-d.stopchan:\n\t\t\t\/\/ server is shutting down\n\t\t\treturn nil\n\t\tcase message, ok = <-d.sendQueue:\n\t\t\tif !ok {\n\t\t\t\t\/\/ no more messages to send\n\t\t\t\td.dofatal()\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif writer == nil {\n\t\t\t\twriter, err = wsconn.NextWriter(d.messageType)\n\t\t\t\tif err != nil {\n\t\t\t\t\t\/\/ client is gone\n\t\t\t\t\td.nack(message.Uid)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\twsconn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\terr = d.encoder(message, writer)\n\t\t\tif err == nil {\n\t\t\t\t\/\/ flush the ws buffer\n\t\t\t\terr = writer.Close()\n\t\t\t\twriter = nil\n\t\t\t\tif err == nil {\n\t\t\t\t\td.ack(message.Uid)\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ error when flushing\n\t\t\t\t\td.nack(message.Uid)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else if encoders.IsEncodingError(err) {\n\t\t\t\t\/\/ message can not be encoded\n\t\t\t\td.permerr(message.Uid)\n\t\t\t} else {\n\t\t\t\t\/\/ error writing to client, must be gone\n\t\t\t\td.nack(message.Uid)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (d *WebsocketServerDestination) serveRoot(w http.ResponseWriter, r *http.Request) {\n\tio.Copy(ioutil.Discard, r.Body)\n\tr.Body.Close()\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc (d *WebsocketServerDestination) serve(listener net.Listener) (err error) {\n\tdefer func() {\n\t\td.dofatal()\n\t\tif err != nil {\n\t\t\td.logger.Info(\"Websocket server has stopped\", \"error\", err)\n\t\t} else {\n\t\t\td.logger.Info(\"Websocket server has stopped\")\n\t\t}\n\t\tlistener.Close()\n\t\td.wg.Done()\n\t}()\n\treturn d.server.Serve(listener)\n}\n\nfunc (d *WebsocketServerDestination) Close() (err error) {\n\t\/\/ Send will not be called again, we can close the sendQueue\n\tclose(d.sendQueue)\n\t\/\/ close the HTTP server\n\terr = d.server.Close()\n\t\/\/ wait that the webserver and the websocket connections have finished\n\td.wg.Wait()\n\t\/\/ disconnect everything\n\td.mu.Lock()\n\tfor wsconn := range d.connections {\n\t\twsconn.Close()\n\t}\n\td.connections = make(map[*websocket.Conn]bool)\n\td.mu.Unlock()\n\t\/\/ nack remaining messages\n\tfor message := range d.sendQueue {\n\t\td.nack(message.Uid)\n\t}\n\treturn err\n}\n\nfunc (d *WebsocketServerDestination) Send(msg *model.FullMessage, partitionKey string, partitionNumber int32, topic string) (err error) {\n\tselect {\n\tcase d.sendQueue <- msg:\n\t\treturn nil\n\tcase <-d.stopchan:\n\t\treturn fmt.Errorf(\"Websocket server destination is shutting down\")\n\t}\n}\n<commit_msg>bug fix<commit_after>package dests\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/stephane-martin\/skewer\/conf\"\n\t\"github.com\/stephane-martin\/skewer\/model\"\n\t\"github.com\/stephane-martin\/skewer\/model\/encoders\"\n)\n\nconst writeWait = 10 * time.Second\nconst pongWait = 60 * time.Second\nconst pingPeriod = (pongWait * 9) \/ 10\n\nvar upgrader = websocket.Upgrader{\n\tReadBufferSize: 1024,\n\tWriteBufferSize: 1024,\n}\n\ntype WebsocketServerDestination struct {\n\t*baseDestination\n\tsendQueue chan *model.FullMessage\n\tserver *http.Server\n\tmessageType int\n\twg sync.WaitGroup\n\tmu sync.Mutex\n\tconnections map[*websocket.Conn]bool\n\tstopchan <-chan struct{}\n}\n\nfunc NewWebsocketServerDestination(ctx context.Context, e *Env) (Destination, error) {\n\tconfig := e.config.WebsocketServerDest\n\n\td := &WebsocketServerDestination{\n\t\tbaseDestination: newBaseDestination(conf.WebsocketServer, \"websocketserver\", e),\n\t\tconnections: make(map[*websocket.Conn]bool),\n\t\tstopchan: ctx.Done(),\n\t}\n\terr := d.setFormat(config.Format)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch d.format {\n\tcase encoders.Protobuf:\n\t\td.messageType = websocket.BinaryMessage\n\tdefault:\n\t\td.messageType = websocket.TextMessage\n\t}\n\n\td.sendQueue = make(chan *model.FullMessage, 1024)\n\n\thostport := net.JoinHostPort(config.BindAddr, strconv.FormatInt(int64(config.Port), 10))\n\tlistener, err := d.binder.Listen(\"tcp\", hostport)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(config.WebEndPoint, d.serveRoot)\n\tmux.HandleFunc(config.LogEndPoint, d.serveLogs)\n\td.server = &http.Server{\n\t\tAddr: hostport,\n\t\tHandler: mux,\n\t}\n\td.wg.Add(1)\n\tgo d.serve(listener)\n\n\treturn d, nil\n}\n\nfunc reader(wsconn *websocket.Conn) {\n\tdefer wsconn.Close() \/\/ client is gone\n\n\twsconn.SetReadLimit(512)\n\twsconn.SetReadDeadline(time.Now().Add(pongWait))\n\twsconn.SetPongHandler(func(string) error {\n\t\twsconn.SetReadDeadline(time.Now().Add(pongWait))\n\t\treturn nil\n\t})\n\tfor {\n\t\t_, _, err := wsconn.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (d *WebsocketServerDestination) serveLogs(w http.ResponseWriter, r *http.Request) {\n\t\/\/ a new websocket client is connected\n\td.logger.Debug(\"New websocket connection for logs\")\n\td.mu.Lock()\n\twsconn, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tif _, ok := err.(websocket.HandshakeError); !ok {\n\t\t\td.logger.Error(\"Websocket handshake error\", \"error\", err)\n\t\t} else {\n\t\t\td.logger.Error(\"Websocket upgrade error\", \"error\", err)\n\t\t}\n\t\td.mu.Unlock()\n\t\treturn\n\t}\n\td.logger.Debug(\"Connection upgraded to websocket\")\n\td.connections[wsconn] = true\n\td.mu.Unlock()\n\n\tdefer func() {\n\t\td.mu.Lock()\n\t\tdelete(d.connections, wsconn)\n\t\td.mu.Unlock()\n\t}()\n\n\td.wg.Add(1)\n\tgo d.writeLogs(wsconn)\n\treader(wsconn)\n}\n\nfunc (d *WebsocketServerDestination) writeLogs(wsconn *websocket.Conn) (err error) {\n\tpingTicker := time.NewTicker(pingPeriod)\n\tdefer func() {\n\t\tpingTicker.Stop()\n\t\tif err == nil {\n\t\t\terr = wsconn.WriteControl(\n\t\t\t\twebsocket.CloseMessage,\n\t\t\t\twebsocket.FormatCloseMessage(websocket.CloseNormalClosure, \"bye!\"),\n\t\t\t\ttime.Now().Add(time.Second),\n\t\t\t)\n\t\t}\n\t\twsconn.Close()\n\t\tif err != nil {\n\t\t\td.logger.Info(\"Websocket connection closed\", \"error\", err)\n\t\t} else {\n\t\t\td.logger.Info(\"Websocket connection closed\")\n\t\t}\n\t\td.wg.Done()\n\t}()\n\n\tvar message *model.FullMessage\n\tvar ok bool\n\tvar writer io.WriteCloser\n\n\tfor {\n\t\tselect {\n\t\tcase <-pingTicker.C:\n\t\t\twsconn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\terr = wsconn.WriteMessage(websocket.PingMessage, []byte{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase <-d.stopchan:\n\t\t\t\/\/ server is shutting down\n\t\t\treturn nil\n\t\tcase message, ok = <-d.sendQueue:\n\t\t\tif !ok {\n\t\t\t\t\/\/ no more messages to send\n\t\t\t\td.dofatal()\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif writer == nil {\n\t\t\t\twriter, err = wsconn.NextWriter(d.messageType)\n\t\t\t\tif err != nil {\n\t\t\t\t\t\/\/ client is gone\n\t\t\t\t\td.nack(message.Uid)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\twsconn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\terr = d.encoder(message, writer)\n\t\t\tif err == nil {\n\t\t\t\t\/\/ flush the ws buffer\n\t\t\t\terr = writer.Close()\n\t\t\t\twriter = nil\n\t\t\t\tif err == nil {\n\t\t\t\t\td.ack(message.Uid)\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ error when flushing\n\t\t\t\t\td.nack(message.Uid)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else if encoders.IsEncodingError(err) {\n\t\t\t\t\/\/ message can not be encoded\n\t\t\t\td.permerr(message.Uid)\n\t\t\t} else {\n\t\t\t\t\/\/ error writing to client, must be gone\n\t\t\t\td.nack(message.Uid)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (d *WebsocketServerDestination) serveRoot(w http.ResponseWriter, r *http.Request) {\n\tio.Copy(ioutil.Discard, r.Body)\n\tr.Body.Close()\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc (d *WebsocketServerDestination) serve(listener net.Listener) (err error) {\n\tdefer func() {\n\t\td.dofatal()\n\t\tif err != nil {\n\t\t\td.logger.Info(\"Websocket server has stopped\", \"error\", err)\n\t\t} else {\n\t\t\td.logger.Info(\"Websocket server has stopped\")\n\t\t}\n\t\tlistener.Close()\n\t\td.wg.Done()\n\t}()\n\treturn d.server.Serve(listener)\n}\n\nfunc (d *WebsocketServerDestination) Close() (err error) {\n\t\/\/ Send will not be called again, we can close the sendQueue\n\tclose(d.sendQueue)\n\t\/\/ close the HTTP server\n\terr = d.server.Close()\n\t\/\/ wait that the webserver and the websocket connections have finished\n\td.wg.Wait()\n\t\/\/ disconnect everything\n\td.mu.Lock()\n\tfor wsconn := range d.connections {\n\t\twsconn.Close()\n\t}\n\td.connections = make(map[*websocket.Conn]bool)\n\td.mu.Unlock()\n\t\/\/ nack remaining messages\n\tfor message := range d.sendQueue {\n\t\td.nack(message.Uid)\n\t}\n\treturn err\n}\n\nfunc (d *WebsocketServerDestination) Send(msg *model.FullMessage, partitionKey string, partitionNumber int32, topic string) (err error) {\n\tselect {\n\tcase d.sendQueue <- msg:\n\t\treturn nil\n\tcase <-d.stopchan:\n\t\treturn fmt.Errorf(\"Websocket server destination is shutting down\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/There is one rule here. \"thou shall not block\"\npackage main\n\nimport (\n\t_ \"bufio\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"runtime\/pprof\"\n\t\"syscall\"\n)\n\nconst (\n\tMAX_AUDIT_MESSAGE_LENGTH = 8970\n)\n\nvar count int\n\nfunc genericPrinter(c <-chan string) {\n\tfor {\n\t\t\/\/Uncomment this bit for some rate messages when debugging\n\t\t\/\/if ping(&count, 500) == true {\n\t\t\/\/\tfmt.Println(count)\n\t\t\/\/}\n\t\tdata := <-c\n\t\tvar _ = data\n\t\tlogLine(data)\n\t\t\/\/fmt.Println(data)\n\t}\n}\n\nfunc ping(count *int, interval int) bool {\n\t*count++\n\treturn (*count % interval) == 0\n}\n\nfunc connect() (conn *NetlinkConnection) {\n\tconn, err := newNetlinkConnection()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn\n\n}\n\nfunc startFlow(conn *NetlinkConnection) {\n\t\/\/this mask starts the flow\n\tvar ret []byte\n\ta, err := newAuditStatusPayload()\n\ta.Mask = 4\n\ta.Enabled = 1\n\ta.Pid = uint32(syscall.Getpid())\n\n\tn := newNetlinkPacket(1001)\n\n\tret, _ = AuditRequestSerialize(n, a)\n\t\/\/PrettyPacketSplit(ret, []int{32, 48, 64, 96, 128, 160, 192, 224, 256, 288})\n\n\terr = conn.Send(&ret)\n\tif err != nil {\n\t\tfmt.Println(\"something broke\")\n\t}\n}\n\n\/\/Helper for profiling. Don't forget to \"pprof.StopCPUProfile()\" at some point or the file isn't written.\nfunc profile() {\n\tf, err := os.Create(\"\/tmp\/profile\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tf2, err := os.Create(\"\/tmp\/profile2\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tpprof.StartCPUProfile(f)\n\tpprof.WriteHeapProfile(f2)\n}\n\nfunc main() {\n\n\teventJsonChannel := make(chan string)\n\t\/\/This buffer holds partial events because they come as associated but separate lines from the kernel\n\teventBuffer := make(map[int]map[string]string)\n\n\tgo genericPrinter(eventJsonChannel)\n\n\tconn := connect()\n\tstartFlow(conn)\n\n\t\/\/Main loop. Get data from netlink and send it to the json lib for processing\n\tfor {\n\t\tdata, _ := conn.Receive()\n\t\tdstring := fmt.Sprintf(\"%s\", data[16:])\n\t\tmakeJsonString(eventBuffer, dstring, eventJsonChannel)\n\n\t}\n}\n<commit_msg>now supports a config. (named config.yaml)<commit_after>\/\/There is one rule here. \"thou shall not block\"\npackage main\n\nimport (\n\t_ \"bufio\"\n\t\"fmt\"\n\t\"github.com\/spf13\/viper\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\/pprof\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nconst (\n\tMAX_AUDIT_MESSAGE_LENGTH = 8970\n)\n\nvar count int\n\nfunc genericPrinter(c <-chan string) {\n\tfor {\n\t\t\/\/Uncomment this bit for some rate messages when debugging\n\t\t\/\/if ping(&count, 500) == true {\n\t\t\/\/\tfmt.Println(count)\n\t\t\/\/}\n\t\tdata := <-c\n\t\tvar _ = data\n\t\tlogLine(data)\n\t\t\/\/fmt.Println(data)\n\t}\n}\n\nfunc ping(count *int, interval int) bool {\n\t*count++\n\treturn (*count % interval) == 0\n}\n\nfunc connect() (conn *NetlinkConnection) {\n\tconn, err := newNetlinkConnection()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn\n\n}\n\nfunc startFlow(conn *NetlinkConnection) {\n\t\/\/this mask starts the flow\n\tvar ret []byte\n\ta, err := newAuditStatusPayload()\n\ta.Mask = 4\n\ta.Enabled = 1\n\ta.Pid = uint32(syscall.Getpid())\n\n\tn := newNetlinkPacket(1001)\n\n\tret, _ = AuditRequestSerialize(n, a)\n\t\/\/PrettyPacketSplit(ret, []int{32, 48, 64, 96, 128, 160, 192, 224, 256, 288})\n\n\terr = conn.Send(&ret)\n\tif err != nil {\n\t\tfmt.Println(\"something broke\")\n\t}\n}\n\n\/\/Helper for profiling. Don't forget to \"pprof.StopCPUProfile()\" at some point or the file isn't written.\nfunc profile() {\n\tf, err := os.Create(\"\/tmp\/profile\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tf2, err := os.Create(\"\/tmp\/profile2\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tpprof.StartCPUProfile(f)\n\tpprof.WriteHeapProfile(f2)\n}\n\nfunc loadConfig() {\n\tviper.SetConfigName(\"config\")\n\tviper.AddConfigPath(\".\")\n\terr := viper.ReadInConfig() \/\/ Find and read the config file\n\tif err != nil { \/\/ Handle errors reading the config file\n\t\tfmt.Println(\"Log not found. Running in default mode. (forwarding all events to syslog)\")\n\t\treturn\n\t}\n\tif viper.GetBool(\"canary\") {\n\t\tgo canaryGo(viper.GetString(\"host\"), viper.GetString(\"port\"))\n\t}\n\tif rules := viper.GetStringSlice(\"rules\"); len(rules) != 0 {\n\t\tfor _, v := range rules {\n\t\t\tvar _ = v\n\t\t\tv := strings.Fields(v)\n\t\t\terr := exec.Command(\"auditctl\", v...).Start()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfmt.Println(\"No rules found. Running with existing ruleset (may be empty!)\")\n\t}\n}\n\nfunc main() {\n\n\tloadConfig()\n\n\teventJsonChannel := make(chan string)\n\t\/\/This buffer holds partial events because they come as associated but separate lines from the kernel\n\teventBuffer := make(map[int]map[string]string)\n\n\tgo genericPrinter(eventJsonChannel)\n\n\tconn := connect()\n\tstartFlow(conn)\n\n\t\/\/Main loop. Get data from netlink and send it to the json lib for processing\n\tfor {\n\t\tdata, _ := conn.Receive()\n\t\tdstring := fmt.Sprintf(\"%s\", data[16:])\n\t\tmakeJsonString(eventBuffer, dstring, eventJsonChannel)\n\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ go-to-protobuf generates a Protobuf IDL from a Go struct, respecting any\n\/\/ existing IDL tags on the Go struct.\npackage protobuf\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"k8s.io\/kubernetes\/cmd\/libs\/go2idl\/args\"\n\t\"k8s.io\/kubernetes\/cmd\/libs\/go2idl\/generator\"\n\t\"k8s.io\/kubernetes\/cmd\/libs\/go2idl\/namer\"\n\t\"k8s.io\/kubernetes\/cmd\/libs\/go2idl\/parser\"\n\t\"k8s.io\/kubernetes\/cmd\/libs\/go2idl\/types\"\n\n\tflag \"github.com\/spf13\/pflag\"\n)\n\ntype Generator struct {\n\tCommon args.GeneratorArgs\n\tPackages string\n\tOutputBase string\n\tProtoImport []string\n\tConditional string\n\tClean bool\n\tOnlyIDL bool\n\tKeepGogoproto bool\n\tSkipGeneratedRewrite bool\n\tDropEmbeddedFields string\n}\n\nfunc New() *Generator {\n\tsourceTree := args.DefaultSourceTree()\n\tcommon := args.GeneratorArgs{\n\t\tOutputBase: sourceTree,\n\t\tGoHeaderFilePath: filepath.Join(sourceTree, \"k8s.io\/kubernetes\/hack\/boilerplate\/boilerplate.go.txt\"),\n\t}\n\tdefaultProtoImport := filepath.Join(sourceTree, \"k8s.io\", \"kubernetes\", \"vendor\", \"github.com\", \"gogo\", \"protobuf\", \"protobuf\")\n\treturn &Generator{\n\t\tCommon: common,\n\t\tOutputBase: sourceTree,\n\t\tProtoImport: []string{defaultProtoImport},\n\t\tPackages: strings.Join([]string{\n\t\t\t`+k8s.io\/kubernetes\/pkg\/util\/intstr`,\n\t\t\t`+k8s.io\/kubernetes\/pkg\/api\/resource`,\n\t\t\t`+k8s.io\/kubernetes\/pkg\/runtime`,\n\t\t\t`+k8s.io\/kubernetes\/pkg\/watch\/versioned`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/api\/unversioned`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/api\/v1`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/apis\/policy\/v1alpha1`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/apis\/extensions\/v1beta1`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/apis\/autoscaling\/v1`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/apis\/batch\/v1`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/apis\/batch\/v2alpha1`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/apis\/apps\/v1alpha1`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/apis\/rbac\/v1alpha1`,\n\t\t\t`k8s.io\/kubernetes\/federation\/apis\/federation\/v1beta1`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/apis\/certificates\/v1alpha1`,\n\t\t}, \",\"),\n\t\tDropEmbeddedFields: \"k8s.io\/kubernetes\/pkg\/api\/unversioned.TypeMeta\",\n\t}\n}\n\nfunc (g *Generator) BindFlags(flag *flag.FlagSet) {\n\tflag.StringVarP(&g.Common.GoHeaderFilePath, \"go-header-file\", \"h\", g.Common.GoHeaderFilePath, \"File containing boilerplate header text. The string YEAR will be replaced with the current 4-digit year.\")\n\tflag.BoolVar(&g.Common.VerifyOnly, \"verify-only\", g.Common.VerifyOnly, \"If true, only verify existing output, do not write anything.\")\n\tflag.StringVarP(&g.Packages, \"packages\", \"p\", g.Packages, \"comma-separated list of directories to get input types from. Directories prefixed with '-' are not generated, directories prefixed with '+' only create types with explicit IDL instructions.\")\n\tflag.StringVarP(&g.OutputBase, \"output-base\", \"o\", g.OutputBase, \"Output base; defaults to $GOPATH\/src\/\")\n\tflag.StringSliceVar(&g.ProtoImport, \"proto-import\", g.ProtoImport, \"The search path for the core protobuf .protos, required, defaults to GODEPS on path.\")\n\tflag.StringVar(&g.Conditional, \"conditional\", g.Conditional, \"An optional Golang build tag condition to add to the generated Go code\")\n\tflag.BoolVar(&g.Clean, \"clean\", g.Clean, \"If true, remove all generated files for the specified Packages.\")\n\tflag.BoolVar(&g.OnlyIDL, \"only-idl\", g.OnlyIDL, \"If true, only generate the IDL for each package.\")\n\tflag.BoolVar(&g.KeepGogoproto, \"keep-gogoproto\", g.KeepGogoproto, \"If true, the generated IDL will contain gogoprotobuf extensions which are normally removed\")\n\tflag.BoolVar(&g.SkipGeneratedRewrite, \"skip-generated-rewrite\", g.SkipGeneratedRewrite, \"If true, skip fixing up the generated.pb.go file (debugging only).\")\n\tflag.StringVar(&g.DropEmbeddedFields, \"drop-embedded-fields\", g.DropEmbeddedFields, \"Comma-delimited list of embedded Go types to omit from generated protobufs\")\n}\n\nfunc Run(g *Generator) {\n\tif g.Common.VerifyOnly {\n\t\tg.OnlyIDL = true\n\t\tg.Clean = false\n\t}\n\n\tb := parser.New()\n\tb.AddBuildTags(\"proto\")\n\n\tomitTypes := map[types.Name]struct{}{}\n\tfor _, t := range strings.Split(g.DropEmbeddedFields, \",\") {\n\t\tname := types.Name{}\n\t\tif i := strings.LastIndex(t, \".\"); i != -1 {\n\t\t\tname.Package, name.Name = t[:i], t[i+1:]\n\t\t} else {\n\t\t\tname.Name = t\n\t\t}\n\t\tif len(name.Name) == 0 {\n\t\t\tlog.Fatalf(\"--drop-embedded-types requires names in the form of [GOPACKAGE.]TYPENAME: %v\", t)\n\t\t}\n\t\tomitTypes[name] = struct{}{}\n\t}\n\n\tboilerplate, err := g.Common.LoadGoBoilerplate()\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed loading boilerplate: %v\", err)\n\t}\n\n\tprotobufNames := NewProtobufNamer()\n\toutputPackages := generator.Packages{}\n\tfor _, d := range strings.Split(g.Packages, \",\") {\n\t\tgenerateAllTypes, outputPackage := true, true\n\t\tswitch {\n\t\tcase strings.HasPrefix(d, \"+\"):\n\t\t\td = d[1:]\n\t\t\tgenerateAllTypes = false\n\t\tcase strings.HasPrefix(d, \"-\"):\n\t\t\td = d[1:]\n\t\t\toutputPackage = false\n\t\t}\n\t\tif strings.Contains(d, \"-\") {\n\t\t\tlog.Fatalf(\"Package names must be valid protobuf package identifiers, which allow only [a-z0-9_]: %s\", d)\n\t\t}\n\t\tname := protoSafePackage(d)\n\t\tparts := strings.SplitN(d, \"=\", 2)\n\t\tif len(parts) > 1 {\n\t\t\td = parts[0]\n\t\t\tname = parts[1]\n\t\t}\n\t\tp := newProtobufPackage(d, name, generateAllTypes, omitTypes)\n\t\theader := append([]byte{}, boilerplate...)\n\t\theader = append(header, p.HeaderText...)\n\t\tp.HeaderText = header\n\t\tprotobufNames.Add(p)\n\t\tif outputPackage {\n\t\t\toutputPackages = append(outputPackages, p)\n\t\t}\n\t}\n\n\tif !g.Common.VerifyOnly {\n\t\tfor _, p := range outputPackages {\n\t\t\tif err := p.(*protobufPackage).Clean(g.OutputBase); err != nil {\n\t\t\t\tlog.Fatalf(\"Unable to clean package %s: %v\", p.Name(), err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif g.Clean {\n\t\treturn\n\t}\n\n\tfor _, p := range protobufNames.List() {\n\t\tif err := b.AddDir(p.Path()); err != nil {\n\t\t\tlog.Fatalf(\"Unable to add directory %q: %v\", p.Path(), err)\n\t\t}\n\t}\n\n\tc, err := generator.NewContext(\n\t\tb,\n\t\tnamer.NameSystems{\n\t\t\t\"public\": namer.NewPublicNamer(3),\n\t\t\t\"proto\": protobufNames,\n\t\t},\n\t\t\"public\",\n\t)\n\tc.Verify = g.Common.VerifyOnly\n\tc.FileTypes[\"protoidl\"] = NewProtoFile()\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed making a context: %v\", err)\n\t}\n\n\tif err := protobufNames.AssignTypesToPackages(c); err != nil {\n\t\tlog.Fatalf(\"Failed to identify Common types: %v\", err)\n\t}\n\n\tif err := c.ExecutePackages(g.OutputBase, outputPackages); err != nil {\n\t\tlog.Fatalf(\"Failed executing generator: %v\", err)\n\t}\n\n\tif g.OnlyIDL {\n\t\treturn\n\t}\n\n\tif _, err := exec.LookPath(\"protoc\"); err != nil {\n\t\tlog.Fatalf(\"Unable to find 'protoc': %v\", err)\n\t}\n\n\tsearchArgs := []string{\"-I\", \".\", \"-I\", g.OutputBase}\n\tif len(g.ProtoImport) != 0 {\n\t\tfor _, s := range g.ProtoImport {\n\t\t\tsearchArgs = append(searchArgs, \"-I\", s)\n\t\t}\n\t}\n\targs := append(searchArgs, fmt.Sprintf(\"--gogo_out=%s\", g.OutputBase))\n\n\tbuf := &bytes.Buffer{}\n\tif len(g.Conditional) > 0 {\n\t\tfmt.Fprintf(buf, \"\/\/ +build %s\\n\\n\", g.Conditional)\n\t}\n\tbuf.Write(boilerplate)\n\n\tfor _, outputPackage := range outputPackages {\n\t\tp := outputPackage.(*protobufPackage)\n\n\t\tpath := filepath.Join(g.OutputBase, p.ImportPath())\n\t\toutputPath := filepath.Join(g.OutputBase, p.OutputPath())\n\n\t\t\/\/ generate the gogoprotobuf protoc\n\t\tcmd := exec.Command(\"protoc\", append(args, path)...)\n\t\tout, err := cmd.CombinedOutput()\n\t\tif len(out) > 0 {\n\t\t\tlog.Printf(string(out))\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Println(strings.Join(cmd.Args, \" \"))\n\t\t\tlog.Fatalf(\"Unable to generate protoc on %s: %v\", p.PackageName, err)\n\t\t}\n\n\t\tif g.SkipGeneratedRewrite {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ alter the generated protobuf file to remove the generated types (but leave the serializers) and rewrite the\n\t\t\/\/ package statement to match the desired package name\n\t\tif err := RewriteGeneratedGogoProtobufFile(outputPath, p.ExtractGeneratedType, p.OptionalTypeName, buf.Bytes()); err != nil {\n\t\t\tlog.Fatalf(\"Unable to rewrite generated %s: %v\", outputPath, err)\n\t\t}\n\n\t\t\/\/ sort imports\n\t\tcmd = exec.Command(\"goimports\", \"-w\", outputPath)\n\t\tout, err = cmd.CombinedOutput()\n\t\tif len(out) > 0 {\n\t\t\tlog.Printf(string(out))\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Println(strings.Join(cmd.Args, \" \"))\n\t\t\tlog.Fatalf(\"Unable to rewrite imports for %s: %v\", p.PackageName, err)\n\t\t}\n\n\t\t\/\/ format and simplify the generated file\n\t\tcmd = exec.Command(\"gofmt\", \"-s\", \"-w\", outputPath)\n\t\tout, err = cmd.CombinedOutput()\n\t\tif len(out) > 0 {\n\t\t\tlog.Printf(string(out))\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Println(strings.Join(cmd.Args, \" \"))\n\t\t\tlog.Fatalf(\"Unable to apply gofmt for %s: %v\", p.PackageName, err)\n\t\t}\n\t}\n\n\tif g.SkipGeneratedRewrite {\n\t\treturn\n\t}\n\n\tif !g.KeepGogoproto {\n\t\t\/\/ generate, but do so without gogoprotobuf extensions\n\t\tfor _, outputPackage := range outputPackages {\n\t\t\tp := outputPackage.(*protobufPackage)\n\t\t\tp.OmitGogo = true\n\t\t}\n\t\tif err := c.ExecutePackages(g.OutputBase, outputPackages); err != nil {\n\t\t\tlog.Fatalf(\"Failed executing generator: %v\", err)\n\t\t}\n\t}\n\n\tfor _, outputPackage := range outputPackages {\n\t\tp := outputPackage.(*protobufPackage)\n\n\t\tif len(p.StructTags) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tpattern := filepath.Join(g.OutputBase, p.PackagePath, \"*.go\")\n\t\tfiles, err := filepath.Glob(pattern)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Can't glob pattern %q: %v\", pattern, err)\n\t\t}\n\n\t\tfor _, s := range files {\n\t\t\tif strings.HasSuffix(s, \"_test.go\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := RewriteTypesWithProtobufStructTags(s, p.StructTags); err != nil {\n\t\t\t\tlog.Fatalf(\"Unable to rewrite with struct tags %s: %v\", s, err)\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>Fail correctly in go-to-protobuf<commit_after>\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ go-to-protobuf generates a Protobuf IDL from a Go struct, respecting any\n\/\/ existing IDL tags on the Go struct.\npackage protobuf\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"k8s.io\/kubernetes\/cmd\/libs\/go2idl\/args\"\n\t\"k8s.io\/kubernetes\/cmd\/libs\/go2idl\/generator\"\n\t\"k8s.io\/kubernetes\/cmd\/libs\/go2idl\/namer\"\n\t\"k8s.io\/kubernetes\/cmd\/libs\/go2idl\/parser\"\n\t\"k8s.io\/kubernetes\/cmd\/libs\/go2idl\/types\"\n\n\tflag \"github.com\/spf13\/pflag\"\n)\n\ntype Generator struct {\n\tCommon args.GeneratorArgs\n\tPackages string\n\tOutputBase string\n\tProtoImport []string\n\tConditional string\n\tClean bool\n\tOnlyIDL bool\n\tKeepGogoproto bool\n\tSkipGeneratedRewrite bool\n\tDropEmbeddedFields string\n}\n\nfunc New() *Generator {\n\tsourceTree := args.DefaultSourceTree()\n\tcommon := args.GeneratorArgs{\n\t\tOutputBase: sourceTree,\n\t\tGoHeaderFilePath: filepath.Join(sourceTree, \"k8s.io\/kubernetes\/hack\/boilerplate\/boilerplate.go.txt\"),\n\t}\n\tdefaultProtoImport := filepath.Join(sourceTree, \"k8s.io\", \"kubernetes\", \"vendor\", \"github.com\", \"gogo\", \"protobuf\", \"protobuf\")\n\treturn &Generator{\n\t\tCommon: common,\n\t\tOutputBase: sourceTree,\n\t\tProtoImport: []string{defaultProtoImport},\n\t\tPackages: strings.Join([]string{\n\t\t\t`+k8s.io\/kubernetes\/pkg\/util\/intstr`,\n\t\t\t`+k8s.io\/kubernetes\/pkg\/api\/resource`,\n\t\t\t`+k8s.io\/kubernetes\/pkg\/runtime`,\n\t\t\t`+k8s.io\/kubernetes\/pkg\/watch\/versioned`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/api\/unversioned`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/api\/v1`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/apis\/policy\/v1alpha1`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/apis\/extensions\/v1beta1`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/apis\/autoscaling\/v1`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/apis\/batch\/v1`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/apis\/batch\/v2alpha1`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/apis\/apps\/v1alpha1`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/apis\/rbac\/v1alpha1`,\n\t\t\t`k8s.io\/kubernetes\/federation\/apis\/federation\/v1beta1`,\n\t\t\t`k8s.io\/kubernetes\/pkg\/apis\/certificates\/v1alpha1`,\n\t\t}, \",\"),\n\t\tDropEmbeddedFields: \"k8s.io\/kubernetes\/pkg\/api\/unversioned.TypeMeta\",\n\t}\n}\n\nfunc (g *Generator) BindFlags(flag *flag.FlagSet) {\n\tflag.StringVarP(&g.Common.GoHeaderFilePath, \"go-header-file\", \"h\", g.Common.GoHeaderFilePath, \"File containing boilerplate header text. The string YEAR will be replaced with the current 4-digit year.\")\n\tflag.BoolVar(&g.Common.VerifyOnly, \"verify-only\", g.Common.VerifyOnly, \"If true, only verify existing output, do not write anything.\")\n\tflag.StringVarP(&g.Packages, \"packages\", \"p\", g.Packages, \"comma-separated list of directories to get input types from. Directories prefixed with '-' are not generated, directories prefixed with '+' only create types with explicit IDL instructions.\")\n\tflag.StringVarP(&g.OutputBase, \"output-base\", \"o\", g.OutputBase, \"Output base; defaults to $GOPATH\/src\/\")\n\tflag.StringSliceVar(&g.ProtoImport, \"proto-import\", g.ProtoImport, \"The search path for the core protobuf .protos, required, defaults to GODEPS on path.\")\n\tflag.StringVar(&g.Conditional, \"conditional\", g.Conditional, \"An optional Golang build tag condition to add to the generated Go code\")\n\tflag.BoolVar(&g.Clean, \"clean\", g.Clean, \"If true, remove all generated files for the specified Packages.\")\n\tflag.BoolVar(&g.OnlyIDL, \"only-idl\", g.OnlyIDL, \"If true, only generate the IDL for each package.\")\n\tflag.BoolVar(&g.KeepGogoproto, \"keep-gogoproto\", g.KeepGogoproto, \"If true, the generated IDL will contain gogoprotobuf extensions which are normally removed\")\n\tflag.BoolVar(&g.SkipGeneratedRewrite, \"skip-generated-rewrite\", g.SkipGeneratedRewrite, \"If true, skip fixing up the generated.pb.go file (debugging only).\")\n\tflag.StringVar(&g.DropEmbeddedFields, \"drop-embedded-fields\", g.DropEmbeddedFields, \"Comma-delimited list of embedded Go types to omit from generated protobufs\")\n}\n\nfunc Run(g *Generator) {\n\tif g.Common.VerifyOnly {\n\t\tg.OnlyIDL = true\n\t\tg.Clean = false\n\t}\n\n\tb := parser.New()\n\tb.AddBuildTags(\"proto\")\n\n\tomitTypes := map[types.Name]struct{}{}\n\tfor _, t := range strings.Split(g.DropEmbeddedFields, \",\") {\n\t\tname := types.Name{}\n\t\tif i := strings.LastIndex(t, \".\"); i != -1 {\n\t\t\tname.Package, name.Name = t[:i], t[i+1:]\n\t\t} else {\n\t\t\tname.Name = t\n\t\t}\n\t\tif len(name.Name) == 0 {\n\t\t\tlog.Fatalf(\"--drop-embedded-types requires names in the form of [GOPACKAGE.]TYPENAME: %v\", t)\n\t\t}\n\t\tomitTypes[name] = struct{}{}\n\t}\n\n\tboilerplate, err := g.Common.LoadGoBoilerplate()\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed loading boilerplate: %v\", err)\n\t}\n\n\tprotobufNames := NewProtobufNamer()\n\toutputPackages := generator.Packages{}\n\tfor _, d := range strings.Split(g.Packages, \",\") {\n\t\tgenerateAllTypes, outputPackage := true, true\n\t\tswitch {\n\t\tcase strings.HasPrefix(d, \"+\"):\n\t\t\td = d[1:]\n\t\t\tgenerateAllTypes = false\n\t\tcase strings.HasPrefix(d, \"-\"):\n\t\t\td = d[1:]\n\t\t\toutputPackage = false\n\t\t}\n\t\tif strings.Contains(d, \"-\") {\n\t\t\tlog.Fatalf(\"Package names must be valid protobuf package identifiers, which allow only [a-z0-9_]: %s\", d)\n\t\t}\n\t\tname := protoSafePackage(d)\n\t\tparts := strings.SplitN(d, \"=\", 2)\n\t\tif len(parts) > 1 {\n\t\t\td = parts[0]\n\t\t\tname = parts[1]\n\t\t}\n\t\tp := newProtobufPackage(d, name, generateAllTypes, omitTypes)\n\t\theader := append([]byte{}, boilerplate...)\n\t\theader = append(header, p.HeaderText...)\n\t\tp.HeaderText = header\n\t\tprotobufNames.Add(p)\n\t\tif outputPackage {\n\t\t\toutputPackages = append(outputPackages, p)\n\t\t}\n\t}\n\n\tif !g.Common.VerifyOnly {\n\t\tfor _, p := range outputPackages {\n\t\t\tif err := p.(*protobufPackage).Clean(g.OutputBase); err != nil {\n\t\t\t\tlog.Fatalf(\"Unable to clean package %s: %v\", p.Name(), err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif g.Clean {\n\t\treturn\n\t}\n\n\tfor _, p := range protobufNames.List() {\n\t\tif err := b.AddDir(p.Path()); err != nil {\n\t\t\tlog.Fatalf(\"Unable to add directory %q: %v\", p.Path(), err)\n\t\t}\n\t}\n\n\tc, err := generator.NewContext(\n\t\tb,\n\t\tnamer.NameSystems{\n\t\t\t\"public\": namer.NewPublicNamer(3),\n\t\t\t\"proto\": protobufNames,\n\t\t},\n\t\t\"public\",\n\t)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed making a context: %v\", err)\n\t}\n\n\tc.Verify = g.Common.VerifyOnly\n\tc.FileTypes[\"protoidl\"] = NewProtoFile()\n\n\tif err := protobufNames.AssignTypesToPackages(c); err != nil {\n\t\tlog.Fatalf(\"Failed to identify Common types: %v\", err)\n\t}\n\n\tif err := c.ExecutePackages(g.OutputBase, outputPackages); err != nil {\n\t\tlog.Fatalf(\"Failed executing generator: %v\", err)\n\t}\n\n\tif g.OnlyIDL {\n\t\treturn\n\t}\n\n\tif _, err := exec.LookPath(\"protoc\"); err != nil {\n\t\tlog.Fatalf(\"Unable to find 'protoc': %v\", err)\n\t}\n\n\tsearchArgs := []string{\"-I\", \".\", \"-I\", g.OutputBase}\n\tif len(g.ProtoImport) != 0 {\n\t\tfor _, s := range g.ProtoImport {\n\t\t\tsearchArgs = append(searchArgs, \"-I\", s)\n\t\t}\n\t}\n\targs := append(searchArgs, fmt.Sprintf(\"--gogo_out=%s\", g.OutputBase))\n\n\tbuf := &bytes.Buffer{}\n\tif len(g.Conditional) > 0 {\n\t\tfmt.Fprintf(buf, \"\/\/ +build %s\\n\\n\", g.Conditional)\n\t}\n\tbuf.Write(boilerplate)\n\n\tfor _, outputPackage := range outputPackages {\n\t\tp := outputPackage.(*protobufPackage)\n\n\t\tpath := filepath.Join(g.OutputBase, p.ImportPath())\n\t\toutputPath := filepath.Join(g.OutputBase, p.OutputPath())\n\n\t\t\/\/ generate the gogoprotobuf protoc\n\t\tcmd := exec.Command(\"protoc\", append(args, path)...)\n\t\tout, err := cmd.CombinedOutput()\n\t\tif len(out) > 0 {\n\t\t\tlog.Printf(string(out))\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Println(strings.Join(cmd.Args, \" \"))\n\t\t\tlog.Fatalf(\"Unable to generate protoc on %s: %v\", p.PackageName, err)\n\t\t}\n\n\t\tif g.SkipGeneratedRewrite {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ alter the generated protobuf file to remove the generated types (but leave the serializers) and rewrite the\n\t\t\/\/ package statement to match the desired package name\n\t\tif err := RewriteGeneratedGogoProtobufFile(outputPath, p.ExtractGeneratedType, p.OptionalTypeName, buf.Bytes()); err != nil {\n\t\t\tlog.Fatalf(\"Unable to rewrite generated %s: %v\", outputPath, err)\n\t\t}\n\n\t\t\/\/ sort imports\n\t\tcmd = exec.Command(\"goimports\", \"-w\", outputPath)\n\t\tout, err = cmd.CombinedOutput()\n\t\tif len(out) > 0 {\n\t\t\tlog.Printf(string(out))\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Println(strings.Join(cmd.Args, \" \"))\n\t\t\tlog.Fatalf(\"Unable to rewrite imports for %s: %v\", p.PackageName, err)\n\t\t}\n\n\t\t\/\/ format and simplify the generated file\n\t\tcmd = exec.Command(\"gofmt\", \"-s\", \"-w\", outputPath)\n\t\tout, err = cmd.CombinedOutput()\n\t\tif len(out) > 0 {\n\t\t\tlog.Printf(string(out))\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Println(strings.Join(cmd.Args, \" \"))\n\t\t\tlog.Fatalf(\"Unable to apply gofmt for %s: %v\", p.PackageName, err)\n\t\t}\n\t}\n\n\tif g.SkipGeneratedRewrite {\n\t\treturn\n\t}\n\n\tif !g.KeepGogoproto {\n\t\t\/\/ generate, but do so without gogoprotobuf extensions\n\t\tfor _, outputPackage := range outputPackages {\n\t\t\tp := outputPackage.(*protobufPackage)\n\t\t\tp.OmitGogo = true\n\t\t}\n\t\tif err := c.ExecutePackages(g.OutputBase, outputPackages); err != nil {\n\t\t\tlog.Fatalf(\"Failed executing generator: %v\", err)\n\t\t}\n\t}\n\n\tfor _, outputPackage := range outputPackages {\n\t\tp := outputPackage.(*protobufPackage)\n\n\t\tif len(p.StructTags) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tpattern := filepath.Join(g.OutputBase, p.PackagePath, \"*.go\")\n\t\tfiles, err := filepath.Glob(pattern)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Can't glob pattern %q: %v\", pattern, err)\n\t\t}\n\n\t\tfor _, s := range files {\n\t\t\tif strings.HasSuffix(s, \"_test.go\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := RewriteTypesWithProtobufStructTags(s, p.StructTags); err != nil {\n\t\t\t\tlog.Fatalf(\"Unable to rewrite with struct tags %s: %v\", s, err)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2021 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage schema\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"time\"\n\n\t\"vitess.io\/vitess\/go\/vt\/callerid\"\n\n\t\"vitess.io\/vitess\/go\/vt\/vttablet\/queryservice\"\n\n\t\"vitess.io\/vitess\/go\/mysql\"\n\t\"vitess.io\/vitess\/go\/sqltypes\"\n\tquerypb \"vitess.io\/vitess\/go\/vt\/proto\/query\"\n\n\t\"vitess.io\/vitess\/go\/vt\/discovery\"\n\t\"vitess.io\/vitess\/go\/vt\/log\"\n\t\"vitess.io\/vitess\/go\/vt\/sqlparser\"\n\t\"vitess.io\/vitess\/go\/vt\/vtgate\/vindexes\"\n)\n\ntype (\n\tkeyspaceStr = string\n\ttableNameStr = string\n\n\t\/\/ Tracker contains the required fields to perform schema tracking.\n\tTracker struct {\n\t\tch chan *discovery.TabletHealth\n\t\tcancel context.CancelFunc\n\n\t\tmu sync.Mutex\n\t\ttables *tableMap\n\t\tctx context.Context\n\t\tsignal func() \/\/ a function that we'll call whenever we have new schema data\n\n\t\t\/\/ map of keyspace currently tracked\n\t\ttracked map[keyspaceStr]*updateController\n\t\tconsumeDelay time.Duration\n\t}\n)\n\n\/\/ defaultConsumeDelay is the default time, the updateController will wait before checking the schema fetch request queue.\nconst defaultConsumeDelay = 1 * time.Second\n\n\/\/ NewTracker creates the tracker object.\nfunc NewTracker(ch chan *discovery.TabletHealth, user *string) *Tracker {\n\tctx := context.Background()\n\t\/\/ Set the caller on the context if the user is provided.\n\t\/\/ This user that will be sent down to vttablet calls.\n\tif user != nil && *user != \"\" {\n\t\tctx = callerid.NewContext(ctx, nil, callerid.NewImmediateCallerID(*user))\n\t}\n\n\treturn &Tracker{\n\t\tctx: ctx,\n\t\tch: ch,\n\t\ttables: &tableMap{m: map[keyspaceStr]map[tableNameStr][]vindexes.Column{}},\n\t\ttracked: map[keyspaceStr]*updateController{},\n\t\tconsumeDelay: defaultConsumeDelay,\n\t}\n}\n\n\/\/ LoadKeyspace loads the keyspace schema.\nfunc (t *Tracker) LoadKeyspace(conn queryservice.QueryService, target *querypb.Target) error {\n\tres, err := conn.Execute(t.ctx, target, mysql.FetchTables, nil, 0, 0, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tt.updateTables(target.Keyspace, res)\n\tt.tracked[target.Keyspace].setLoaded(true)\n\tlog.Infof(\"finished loading schema for keyspace %s. Found %d tables\", target.Keyspace, len(res.Rows))\n\treturn nil\n}\n\n\/\/ Start starts the schema tracking.\nfunc (t *Tracker) Start() {\n\tlog.Info(\"Starting schema tracking\")\n\tctx, cancel := context.WithCancel(t.ctx)\n\tt.cancel = cancel\n\tgo func(ctx context.Context, t *Tracker) {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase th := <-t.ch:\n\t\t\t\tksUpdater := t.getKeyspaceUpdateController(th)\n\t\t\t\tksUpdater.add(th)\n\t\t\tcase <-ctx.Done():\n\t\t\t\t\/\/ closing of the channel happens outside the scope of the tracker. It is the responsibility of the one who created this tracker.\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(ctx, t)\n}\n\n\/\/ getKeyspaceUpdateController returns the updateController for the given keyspace\n\/\/ the updateController will be created if there was none.\nfunc (t *Tracker) getKeyspaceUpdateController(th *discovery.TabletHealth) *updateController {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tksUpdater, exists := t.tracked[th.Target.Keyspace]\n\tif !exists {\n\t\tksUpdater = t.newUpdateController()\n\t\tt.tracked[th.Target.Keyspace] = ksUpdater\n\t}\n\treturn ksUpdater\n}\n\nfunc (t *Tracker) newUpdateController() *updateController {\n\treturn &updateController{update: t.updateSchema, reloadKeyspace: t.initKeyspace, signal: t.signal, consumeDelay: t.consumeDelay}\n}\n\nfunc (t *Tracker) initKeyspace(th *discovery.TabletHealth) bool {\n\terr := t.LoadKeyspace(th.Conn, th.Target)\n\tif err != nil {\n\t\tlog.Warningf(\"Unable to add keyspace to tracker: %v\", err)\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/ Stop stops the schema tracking\nfunc (t *Tracker) Stop() {\n\tlog.Info(\"Stopping schema tracking\")\n\tt.cancel()\n}\n\n\/\/ GetColumns returns the column list for table in the given keyspace.\nfunc (t *Tracker) GetColumns(ks string, tbl string) []vindexes.Column {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\treturn t.tables.get(ks, tbl)\n}\n\n\/\/ Tables returns a map with the columns for all known tables in the keyspace\nfunc (t *Tracker) Tables(ks string) map[string][]vindexes.Column {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tm := t.tables.m[ks]\n\tif m == nil {\n\t\treturn map[string][]vindexes.Column{} \/\/ we know nothing about this KS, so that is the info we can give out\n\t}\n\n\treturn m\n}\n\nfunc (t *Tracker) updateSchema(th *discovery.TabletHealth) bool {\n\ttablesUpdated := th.Stats.TableSchemaChanged\n\ttables, err := sqltypes.BuildBindVariable(tablesUpdated)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to read updated tables from TabletHealth: %v\", err)\n\t\treturn false\n\t}\n\tbv := map[string]*querypb.BindVariable{\"tableNames\": tables}\n\tres, err := th.Conn.Execute(t.ctx, th.Target, mysql.FetchUpdatedTables, bv, 0, 0, nil)\n\tif err != nil {\n\t\tt.tracked[th.Target.Keyspace].setLoaded(false)\n\t\t\/\/ TODO: optimize for the tables that got errored out.\n\t\tlog.Warningf(\"error fetching new schema for %v, making them non-authoritative: %v\", tablesUpdated, err)\n\t\treturn false\n\t}\n\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\t\/\/ first we empty all prior schema. deleted tables will not show up in the result,\n\t\/\/ so this is the only chance to delete\n\tfor _, tbl := range tablesUpdated {\n\t\tt.tables.delete(th.Target.Keyspace, tbl)\n\t}\n\tt.updateTables(th.Target.Keyspace, res)\n\treturn true\n}\n\nfunc (t *Tracker) updateTables(keyspace string, res *sqltypes.Result) {\n\tfor _, row := range res.Rows {\n\t\ttbl := row[0].ToString()\n\t\tcolName := row[1].ToString()\n\t\tcolType := row[2].ToString()\n\n\t\tcType := sqlparser.ColumnType{Type: colType}\n\t\tcol := vindexes.Column{Name: sqlparser.NewColIdent(colName), Type: cType.SQLType()}\n\t\tcols := t.tables.get(keyspace, tbl)\n\n\t\tt.tables.set(keyspace, tbl, append(cols, col))\n\t}\n}\n\n\/\/ RegisterSignalReceiver allows a function to register to be called when new schema is available\nfunc (t *Tracker) RegisterSignalReceiver(f func()) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tfor _, controller := range t.tracked {\n\t\tcontroller.signal = f\n\t}\n\tt.signal = f\n}\n\n\/\/ AddNewKeyspace adds keyspace to the tracker.\nfunc (t *Tracker) AddNewKeyspace(conn queryservice.QueryService, target *querypb.Target) error {\n\tt.tracked[target.Keyspace] = t.newUpdateController()\n\treturn t.LoadKeyspace(conn, target)\n}\n\ntype tableMap struct {\n\tm map[keyspaceStr]map[tableNameStr][]vindexes.Column\n}\n\nfunc (tm *tableMap) set(ks, tbl string, cols []vindexes.Column) {\n\tm := tm.m[ks]\n\tif m == nil {\n\t\tm = make(map[tableNameStr][]vindexes.Column)\n\t\ttm.m[ks] = m\n\t}\n\tm[tbl] = cols\n}\n\nfunc (tm *tableMap) get(ks, tbl string) []vindexes.Column {\n\tm := tm.m[ks]\n\tif m == nil {\n\t\treturn nil\n\t}\n\treturn m[tbl]\n}\n\nfunc (tm *tableMap) delete(ks, tbl string) {\n\tm := tm.m[ks]\n\tif m == nil {\n\t\treturn\n\t}\n\tdelete(m, tbl)\n}\n<commit_msg>minimize logging of errors when loading keyspace<commit_after>\/*\nCopyright 2021 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage schema\n\nimport (\n\t\"context\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"vitess.io\/vitess\/go\/vt\/callerid\"\n\n\t\"vitess.io\/vitess\/go\/vt\/vttablet\/queryservice\"\n\n\t\"vitess.io\/vitess\/go\/mysql\"\n\t\"vitess.io\/vitess\/go\/sqltypes\"\n\tquerypb \"vitess.io\/vitess\/go\/vt\/proto\/query\"\n\n\t\"vitess.io\/vitess\/go\/vt\/discovery\"\n\t\"vitess.io\/vitess\/go\/vt\/log\"\n\t\"vitess.io\/vitess\/go\/vt\/sqlparser\"\n\t\"vitess.io\/vitess\/go\/vt\/vtgate\/vindexes\"\n)\n\ntype (\n\tkeyspaceStr = string\n\ttableNameStr = string\n\n\t\/\/ Tracker contains the required fields to perform schema tracking.\n\tTracker struct {\n\t\tch chan *discovery.TabletHealth\n\t\tcancel context.CancelFunc\n\n\t\tmu sync.Mutex\n\t\ttables *tableMap\n\t\tctx context.Context\n\t\tsignal func() \/\/ a function that we'll call whenever we have new schema data\n\n\t\t\/\/ map of keyspace currently tracked\n\t\ttracked map[keyspaceStr]*updateController\n\t\tconsumeDelay time.Duration\n\n\t\t\/\/ we'll only log a failed keyspace loading once\n\t\tfailedKeyspaces []string\n\t}\n)\n\n\/\/ defaultConsumeDelay is the default time, the updateController will wait before checking the schema fetch request queue.\nconst defaultConsumeDelay = 1 * time.Second\n\n\/\/ NewTracker creates the tracker object.\nfunc NewTracker(ch chan *discovery.TabletHealth, user *string) *Tracker {\n\tctx := context.Background()\n\t\/\/ Set the caller on the context if the user is provided.\n\t\/\/ This user that will be sent down to vttablet calls.\n\tif user != nil && *user != \"\" {\n\t\tctx = callerid.NewContext(ctx, nil, callerid.NewImmediateCallerID(*user))\n\t}\n\n\treturn &Tracker{\n\t\tctx: ctx,\n\t\tch: ch,\n\t\ttables: &tableMap{m: map[keyspaceStr]map[tableNameStr][]vindexes.Column{}},\n\t\ttracked: map[keyspaceStr]*updateController{},\n\t\tconsumeDelay: defaultConsumeDelay,\n\t}\n}\n\n\/\/ LoadKeyspace loads the keyspace schema.\nfunc (t *Tracker) LoadKeyspace(conn queryservice.QueryService, target *querypb.Target) error {\n\tres, err := conn.Execute(t.ctx, target, mysql.FetchTables, nil, 0, 0, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tt.updateTables(target.Keyspace, res)\n\tt.tracked[target.Keyspace].setLoaded(true)\n\tlog.Infof(\"finished loading schema for keyspace %s. Found %d tables\", target.Keyspace, len(res.Rows))\n\treturn nil\n}\n\n\/\/ Start starts the schema tracking.\nfunc (t *Tracker) Start() {\n\tlog.Info(\"Starting schema tracking\")\n\tctx, cancel := context.WithCancel(t.ctx)\n\tt.cancel = cancel\n\tgo func(ctx context.Context, t *Tracker) {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase th := <-t.ch:\n\t\t\t\tksUpdater := t.getKeyspaceUpdateController(th)\n\t\t\t\tksUpdater.add(th)\n\t\t\tcase <-ctx.Done():\n\t\t\t\t\/\/ closing of the channel happens outside the scope of the tracker. It is the responsibility of the one who created this tracker.\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(ctx, t)\n}\n\n\/\/ getKeyspaceUpdateController returns the updateController for the given keyspace\n\/\/ the updateController will be created if there was none.\nfunc (t *Tracker) getKeyspaceUpdateController(th *discovery.TabletHealth) *updateController {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tksUpdater, exists := t.tracked[th.Target.Keyspace]\n\tif !exists {\n\t\tksUpdater = t.newUpdateController()\n\t\tt.tracked[th.Target.Keyspace] = ksUpdater\n\t}\n\treturn ksUpdater\n}\n\nfunc (t *Tracker) newUpdateController() *updateController {\n\treturn &updateController{update: t.updateSchema, reloadKeyspace: t.initKeyspace, signal: t.signal, consumeDelay: t.consumeDelay}\n}\n\nfunc (t *Tracker) initKeyspace(th *discovery.TabletHealth) bool {\n\tif t.isFailedKeyspace(th) {\n\t\treturn false\n\t}\n\n\terr := t.LoadKeyspace(th.Conn, th.Target)\n\tif err != nil {\n\t\tt.checkIfWeShouldFailKeyspace(th, err)\n\t\tlog.Warningf(\"Unable to add keyspace to tracker: %v\", err)\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/ checkIfWeShouldFailKeyspace inspects an error and\n\/\/ will mark a keyspace as failed and won't try to load more information from it\nfunc (t *Tracker) checkIfWeShouldFailKeyspace(th *discovery.TabletHealth, err error) {\n\terrMessage := err.Error()\n\tif strings.Contains(errMessage, \"Unknown database '\") ||\n\t\tstrings.Contains(errMessage, \"Table '_vt.schemacopy' doesn't exist\") {\n\t\tt.failedKeyspaces = append(t.failedKeyspaces, th.Target.Keyspace)\n\t}\n}\n\nfunc (t *Tracker) isFailedKeyspace(th *discovery.TabletHealth) bool {\n\tfor _, keyspace := range t.failedKeyspaces {\n\t\tif th.Target.Keyspace == keyspace {\n\t\t\t\/\/ this keyspace is marked as failed. we are not going to reload\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Stop stops the schema tracking\nfunc (t *Tracker) Stop() {\n\tlog.Info(\"Stopping schema tracking\")\n\tt.cancel()\n}\n\n\/\/ GetColumns returns the column list for table in the given keyspace.\nfunc (t *Tracker) GetColumns(ks string, tbl string) []vindexes.Column {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\treturn t.tables.get(ks, tbl)\n}\n\n\/\/ Tables returns a map with the columns for all known tables in the keyspace\nfunc (t *Tracker) Tables(ks string) map[string][]vindexes.Column {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tm := t.tables.m[ks]\n\tif m == nil {\n\t\treturn map[string][]vindexes.Column{} \/\/ we know nothing about this KS, so that is the info we can give out\n\t}\n\n\treturn m\n}\n\nfunc (t *Tracker) updateSchema(th *discovery.TabletHealth) bool {\n\ttablesUpdated := th.Stats.TableSchemaChanged\n\ttables, err := sqltypes.BuildBindVariable(tablesUpdated)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to read updated tables from TabletHealth: %v\", err)\n\t\treturn false\n\t}\n\tbv := map[string]*querypb.BindVariable{\"tableNames\": tables}\n\tres, err := th.Conn.Execute(t.ctx, th.Target, mysql.FetchUpdatedTables, bv, 0, 0, nil)\n\tif err != nil {\n\t\tt.tracked[th.Target.Keyspace].setLoaded(false)\n\t\t\/\/ TODO: optimize for the tables that got errored out.\n\t\tlog.Warningf(\"error fetching new schema for %v, making them non-authoritative: %v\", tablesUpdated, err)\n\t\treturn false\n\t}\n\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\t\/\/ first we empty all prior schema. deleted tables will not show up in the result,\n\t\/\/ so this is the only chance to delete\n\tfor _, tbl := range tablesUpdated {\n\t\tt.tables.delete(th.Target.Keyspace, tbl)\n\t}\n\tt.updateTables(th.Target.Keyspace, res)\n\treturn true\n}\n\nfunc (t *Tracker) updateTables(keyspace string, res *sqltypes.Result) {\n\tfor _, row := range res.Rows {\n\t\ttbl := row[0].ToString()\n\t\tcolName := row[1].ToString()\n\t\tcolType := row[2].ToString()\n\n\t\tcType := sqlparser.ColumnType{Type: colType}\n\t\tcol := vindexes.Column{Name: sqlparser.NewColIdent(colName), Type: cType.SQLType()}\n\t\tcols := t.tables.get(keyspace, tbl)\n\n\t\tt.tables.set(keyspace, tbl, append(cols, col))\n\t}\n}\n\n\/\/ RegisterSignalReceiver allows a function to register to be called when new schema is available\nfunc (t *Tracker) RegisterSignalReceiver(f func()) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tfor _, controller := range t.tracked {\n\t\tcontroller.signal = f\n\t}\n\tt.signal = f\n}\n\n\/\/ AddNewKeyspace adds keyspace to the tracker.\nfunc (t *Tracker) AddNewKeyspace(conn queryservice.QueryService, target *querypb.Target) error {\n\tt.tracked[target.Keyspace] = t.newUpdateController()\n\treturn t.LoadKeyspace(conn, target)\n}\n\ntype tableMap struct {\n\tm map[keyspaceStr]map[tableNameStr][]vindexes.Column\n}\n\nfunc (tm *tableMap) set(ks, tbl string, cols []vindexes.Column) {\n\tm := tm.m[ks]\n\tif m == nil {\n\t\tm = make(map[tableNameStr][]vindexes.Column)\n\t\ttm.m[ks] = m\n\t}\n\tm[tbl] = cols\n}\n\nfunc (tm *tableMap) get(ks, tbl string) []vindexes.Column {\n\tm := tm.m[ks]\n\tif m == nil {\n\t\treturn nil\n\t}\n\treturn m[tbl]\n}\n\nfunc (tm *tableMap) delete(ks, tbl string) {\n\tm := tm.m[ks]\n\tif m == nil {\n\t\treturn\n\t}\n\tdelete(m, tbl)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016, RadiantBlue Technologies, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage piazza\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"testing\"\n\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\n\/\/--------------------------\n\ntype Thing struct {\n\tId string `json:\"id\"`\n\tValue string `json:\"value\"`\n}\n\ntype ThingService struct {\n\tassert *assert.Assertions\n\tData map[string]string `json:\"data\"`\n\tIdCount int\n}\n\nfunc (service *ThingService) GetThing(id string) *JsonResponse {\n\tval, ok := service.Data[id]\n\tif !ok {\n\t\treturn &JsonResponse{StatusCode: http.StatusNotFound}\n\t}\n\treturn &JsonResponse{StatusCode: http.StatusOK, Data: Thing{Id: id, Value: val}}\n}\n\nfunc (service *ThingService) PostThing(thing *Thing) *JsonResponse {\n\tif thing.Value == \"NULL\" {\n\t\tresp := &JsonResponse{StatusCode: http.StatusBadRequest, Message: \"oops\"}\n\t\treturn resp\n\t}\n\tservice.IdCount++\n\tthing.Id = fmt.Sprintf(\"%d\", service.IdCount)\n\tservice.Data[thing.Id] = thing.Value\n\treturn &JsonResponse{StatusCode: http.StatusCreated, Data: thing}\n}\n\nfunc (service *ThingService) PutThing(id string, thing *Thing) *JsonResponse {\n\tif thing.Value == \"NULL\" {\n\t\treturn &JsonResponse{StatusCode: http.StatusBadRequest, Message: \"oops\"}\n\t}\n\tif thing.Id != id {\n\t\treturn &JsonResponse{StatusCode: http.StatusBadRequest, Message: \"oops - id mismatch\"}\n\t}\n\tservice.Data[thing.Id] = thing.Value\n\n\treturn &JsonResponse{StatusCode: http.StatusOK, Data: thing}\n}\n\nfunc (service *ThingService) DeleteThing(id string) *JsonResponse {\n\t_, ok := service.Data[id]\n\tif !ok {\n\t\treturn &JsonResponse{StatusCode: http.StatusNotFound}\n\t}\n\tdelete(service.Data, id)\n\treturn &JsonResponse{StatusCode: http.StatusOK}\n}\n\n\/\/---------------------------------------------------------------\n\ntype ThingServer struct {\n\troutes []RouteData\n\tservice *ThingService\n}\n\nfunc (server *ThingServer) Init(service *ThingService) {\n\n\tserver.service = service\n\n\tserver.routes = []RouteData{\n\t\t{\"GET\", \"\/\", server.handleGetRoot},\n\t\t{\"GET\", \"\/:id\", server.handleGet},\n\t\t{\"POST\", \"\/\", server.handlePost},\n\t\t{\"PUT\", \"\/:id\", server.handlePut},\n\t\t{\"DELETE\", \"\/:id\", server.handleDelete},\n\t}\n}\n\nfunc (server *ThingServer) handleGetRoot(c *gin.Context) {\n\ttype T struct {\n\t\tMessage string\n\t}\n\tmessage := \"Hi.\"\n\tresp := JsonResponse{StatusCode: http.StatusOK, Data: message}\n\tc.JSON(resp.StatusCode, resp)\n}\n\nfunc (server *ThingServer) handleGet(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tresp := server.service.GetThing(id)\n\tc.JSON(resp.StatusCode, resp)\n}\n\nfunc (server *ThingServer) handlePost(c *gin.Context) {\n\tvar thing Thing\n\terr := c.BindJSON(&thing)\n\tif err != nil {\n\t\tresp := &JsonResponse{StatusCode: http.StatusInternalServerError, Message: err.Error()}\n\t\tc.JSON(resp.StatusCode, resp)\n\t}\n\tresp := server.service.PostThing(&thing)\n\tc.JSON(resp.StatusCode, resp)\n}\n\nfunc (server *ThingServer) handlePut(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tvar thing Thing\n\terr := c.BindJSON(&thing)\n\tif err != nil {\n\t\tresp := &JsonResponse{StatusCode: http.StatusInternalServerError, Message: err.Error()}\n\t\tc.JSON(resp.StatusCode, resp)\n\t}\n\tthing.Id = id\n\tresp := server.service.PutThing(id, &thing)\n\tc.JSON(resp.StatusCode, resp)\n}\n\nfunc (server *ThingServer) handleDelete(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tresp := server.service.DeleteThing(id)\n\tc.JSON(resp.StatusCode, resp)\n}\n\n\/\/------------------------------------------\n\nfunc Test07Server(t *testing.T) {\n\tassert := assert.New(t)\n\n\trequired := []ServiceName{}\n\tsys, err := NewSystemConfig(PzGoCommon, required)\n\tassert.NoError(err)\n\n\tgenericServer := GenericServer{Sys: sys}\n\n\tservice := &ThingService{\n\t\tassert: assert,\n\t\tIdCount: 0,\n\t\tData: make(map[string]string),\n\t}\n\n\tserver := &ThingServer{}\n\tserver.Init(service)\n\n\turl := \"\"\n\n\t{\n\t\terr = genericServer.Configure(server.routes)\n\t\tif err != nil {\n\t\t\tassert.FailNow(\"server failed to configure: \" + err.Error())\n\t\t}\n\t\t_, err = genericServer.Start()\n\t\tif err != nil {\n\t\t\tassert.FailNow(\"server failed to start: \" + err.Error())\n\t\t}\n\n\t\turl = \"http:\/\/\" + sys.BindTo\n\t}\n\n\t{\n\t\tvar input *Thing\n\t\tvar output Thing\n\t\tvar jresp *JsonResponse\n\n\t\t\/\/ GET bad\n\t\tjresp = HttpGetJson(url + \"\/mpg\")\n\t\tassert.Equal(404, jresp.StatusCode)\n\n\t\t\/\/ POST 1\n\t\tinput = &Thing{Value: \"17\"}\n\t\tjresp = HttpPostJson(url, input)\n\t\tassert.Equal(201, jresp.StatusCode)\n\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.EqualValues(\"1\", output.Id)\n\t\tassert.EqualValues(\"17\", output.Value)\n\n\t\t\/\/ POST bad\n\t\tinput = &Thing{Value: \"NULL\"}\n\t\tjresp = HttpPostJson(url, input)\n\t\tassert.Equal(400, jresp.StatusCode)\n\n\t\t\/\/ POST 2\n\t\tinput = &Thing{Value: \"18\"}\n\t\tjresp = HttpPostJson(url, input)\n\t\tassert.Equal(201, jresp.StatusCode)\n\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.EqualValues(\"2\", output.Id)\n\t\tassert.EqualValues(\"18\", output.Value)\n\n\t\t\/\/ GET 2\n\t\tjresp = HttpGetJson(url + \"\/2\")\n\t\tassert.Equal(200, jresp.StatusCode)\n\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.NoError(err)\n\t\tassert.EqualValues(\"2\", output.Id)\n\t\tassert.EqualValues(\"18\", output.Value)\n\n\t\t\/\/ PUT 1\n\t\tinput = &Thing{Value: \"71\"}\n\t\tjresp = HttpPutJson(url+\"\/1\", input)\n\t\tassert.Equal(200, jresp.StatusCode)\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.NoError(err)\n\t\tassert.EqualValues(\"71\", output.Value)\n\n\t\t\/\/ GET 1\n\t\tjresp = HttpGetJson(url + \"\/1\")\n\t\tassert.Equal(200, jresp.StatusCode)\n\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.NoError(err)\n\t\tassert.EqualValues(\"1\", output.Id)\n\t\tassert.EqualValues(\"71\", output.Value)\n\n\t\t\/\/ DELETE 3\n\t\tjresp = HttpDeleteJson(url + \"\/3\")\n\t\tassert.Equal(404, jresp.StatusCode)\n\n\t\t\/\/ DELETE 1\n\t\tjresp = HttpDeleteJson(url + \"\/1\")\n\t\tassert.Equal(200, jresp.StatusCode)\n\n\t\t\/\/ GET 1\n\t\tjresp = HttpGetJson(url + \"\/1\")\n\t\tassert.Equal(404, jresp.StatusCode)\n\t}\n\t{\n\t\terr = genericServer.Stop()\n\t\tassert.NoError(err)\n\n\t\t_, err := http.Get(url)\n\t\tassert.Error(err)\n\t}\n}\n<commit_msg>Comment failing test (for now)<commit_after>\/\/ Copyright 2016, RadiantBlue Technologies, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage piazza\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"testing\"\n\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\n\/\/--------------------------\n\ntype Thing struct {\n\tId string `json:\"id\"`\n\tValue string `json:\"value\"`\n}\n\ntype ThingService struct {\n\tassert *assert.Assertions\n\tData map[string]string `json:\"data\"`\n\tIdCount int\n}\n\nfunc (service *ThingService) GetThing(id string) *JsonResponse {\n\tval, ok := service.Data[id]\n\tif !ok {\n\t\treturn &JsonResponse{StatusCode: http.StatusNotFound}\n\t}\n\treturn &JsonResponse{StatusCode: http.StatusOK, Data: Thing{Id: id, Value: val}}\n}\n\nfunc (service *ThingService) PostThing(thing *Thing) *JsonResponse {\n\tif thing.Value == \"NULL\" {\n\t\tresp := &JsonResponse{StatusCode: http.StatusBadRequest, Message: \"oops\"}\n\t\treturn resp\n\t}\n\tservice.IdCount++\n\tthing.Id = fmt.Sprintf(\"%d\", service.IdCount)\n\tservice.Data[thing.Id] = thing.Value\n\treturn &JsonResponse{StatusCode: http.StatusCreated, Data: thing}\n}\n\nfunc (service *ThingService) PutThing(id string, thing *Thing) *JsonResponse {\n\tif thing.Value == \"NULL\" {\n\t\treturn &JsonResponse{StatusCode: http.StatusBadRequest, Message: \"oops\"}\n\t}\n\tif thing.Id != id {\n\t\treturn &JsonResponse{StatusCode: http.StatusBadRequest, Message: \"oops - id mismatch\"}\n\t}\n\tservice.Data[thing.Id] = thing.Value\n\n\treturn &JsonResponse{StatusCode: http.StatusOK, Data: thing}\n}\n\nfunc (service *ThingService) DeleteThing(id string) *JsonResponse {\n\t_, ok := service.Data[id]\n\tif !ok {\n\t\treturn &JsonResponse{StatusCode: http.StatusNotFound}\n\t}\n\tdelete(service.Data, id)\n\treturn &JsonResponse{StatusCode: http.StatusOK}\n}\n\n\/\/---------------------------------------------------------------\n\ntype ThingServer struct {\n\troutes []RouteData\n\tservice *ThingService\n}\n\nfunc (server *ThingServer) Init(service *ThingService) {\n\n\tserver.service = service\n\n\tserver.routes = []RouteData{\n\t\t{\"GET\", \"\/\", server.handleGetRoot},\n\t\t{\"GET\", \"\/:id\", server.handleGet},\n\t\t{\"POST\", \"\/\", server.handlePost},\n\t\t{\"PUT\", \"\/:id\", server.handlePut},\n\t\t{\"DELETE\", \"\/:id\", server.handleDelete},\n\t}\n}\n\nfunc (server *ThingServer) handleGetRoot(c *gin.Context) {\n\ttype T struct {\n\t\tMessage string\n\t}\n\tmessage := \"Hi.\"\n\tresp := JsonResponse{StatusCode: http.StatusOK, Data: message}\n\tc.JSON(resp.StatusCode, resp)\n}\n\nfunc (server *ThingServer) handleGet(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tresp := server.service.GetThing(id)\n\tc.JSON(resp.StatusCode, resp)\n}\n\nfunc (server *ThingServer) handlePost(c *gin.Context) {\n\tvar thing Thing\n\terr := c.BindJSON(&thing)\n\tif err != nil {\n\t\tresp := &JsonResponse{StatusCode: http.StatusInternalServerError, Message: err.Error()}\n\t\tc.JSON(resp.StatusCode, resp)\n\t}\n\tresp := server.service.PostThing(&thing)\n\tc.JSON(resp.StatusCode, resp)\n}\n\nfunc (server *ThingServer) handlePut(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tvar thing Thing\n\terr := c.BindJSON(&thing)\n\tif err != nil {\n\t\tresp := &JsonResponse{StatusCode: http.StatusInternalServerError, Message: err.Error()}\n\t\tc.JSON(resp.StatusCode, resp)\n\t}\n\tthing.Id = id\n\tresp := server.service.PutThing(id, &thing)\n\tc.JSON(resp.StatusCode, resp)\n}\n\nfunc (server *ThingServer) handleDelete(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tresp := server.service.DeleteThing(id)\n\tc.JSON(resp.StatusCode, resp)\n}\n\n\/\/------------------------------------------\n\nfunc Test07Server(t *testing.T) {\n\tassert := assert.New(t)\n\n\trequired := []ServiceName{}\n\tsys, err := NewSystemConfig(PzGoCommon, required)\n\tassert.NoError(err)\n\n\tgenericServer := GenericServer{Sys: sys}\n\n\tservice := &ThingService{\n\t\tassert: assert,\n\t\tIdCount: 0,\n\t\tData: make(map[string]string),\n\t}\n\n\tserver := &ThingServer{}\n\tserver.Init(service)\n\n\turl := \"\"\n\n\t{\n\t\terr = genericServer.Configure(server.routes)\n\t\tif err != nil {\n\t\t\tassert.FailNow(\"server failed to configure: \" + err.Error())\n\t\t}\n\t\t_, err = genericServer.Start()\n\t\tif err != nil {\n\t\t\tassert.FailNow(\"server failed to start: \" + err.Error())\n\t\t}\n\n\t\turl = \"http:\/\/\" + sys.BindTo\n\t}\n\n\t{\n\t\tvar input *Thing\n\t\tvar output Thing\n\t\tvar jresp *JsonResponse\n\n\t\t\/\/ GET bad\n\t\tjresp = HttpGetJson(url + \"\/mpg\")\n\t\tassert.Equal(404, jresp.StatusCode)\n\n\t\t\/\/ POST 1\n\t\tinput = &Thing{Value: \"17\"}\n\t\tjresp = HttpPostJson(url, input)\n\t\tassert.Equal(201, jresp.StatusCode)\n\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.EqualValues(\"1\", output.Id)\n\t\tassert.EqualValues(\"17\", output.Value)\n\n\t\t\/\/ POST bad\n\t\tinput = &Thing{Value: \"NULL\"}\n\t\tjresp = HttpPostJson(url, input)\n\t\tassert.Equal(400, jresp.StatusCode)\n\n\t\t\/\/ POST 2\n\t\tinput = &Thing{Value: \"18\"}\n\t\tjresp = HttpPostJson(url, input)\n\t\tassert.Equal(201, jresp.StatusCode)\n\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.EqualValues(\"2\", output.Id)\n\t\tassert.EqualValues(\"18\", output.Value)\n\n\t\t\/\/ GET 2\n\t\tjresp = HttpGetJson(url + \"\/2\")\n\t\tassert.Equal(200, jresp.StatusCode)\n\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.NoError(err)\n\t\tassert.EqualValues(\"2\", output.Id)\n\t\tassert.EqualValues(\"18\", output.Value)\n\n\t\t\/\/ PUT 1\n\t\tinput = &Thing{Value: \"71\"}\n\t\tjresp = HttpPutJson(url+\"\/1\", input)\n\t\tassert.Equal(200, jresp.StatusCode)\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.NoError(err)\n\t\tassert.EqualValues(\"71\", output.Value)\n\n\t\t\/\/ GET 1\n\t\tjresp = HttpGetJson(url + \"\/1\")\n\t\tassert.Equal(200, jresp.StatusCode)\n\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.NoError(err)\n\t\tassert.EqualValues(\"1\", output.Id)\n\t\tassert.EqualValues(\"71\", output.Value)\n\n\t\t\/\/ DELETE 3\n\t\tjresp = HttpDeleteJson(url + \"\/3\")\n\t\tassert.Equal(404, jresp.StatusCode)\n\n\t\t\/\/ DELETE 1\n\t\tjresp = HttpDeleteJson(url + \"\/1\")\n\t\tassert.Equal(200, jresp.StatusCode)\n\n\t\t\/\/ GET 1\n\t\tjresp = HttpGetJson(url + \"\/1\")\n\t\tassert.Equal(404, jresp.StatusCode)\n\t}\n\t{\n\t\terr = genericServer.Stop()\n\t\t\/\/assert.NoError(err)\n\n\t\t_, err := http.Get(url)\n\t\tassert.Error(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package basex generates alpha id (alphanumeric id) for big integers. This\n\/\/ is particularly useful for shortening URLs.\npackage basex\n\nimport (\n\t\"errors\"\n\t\"math\/big\"\n\t\"strconv\"\n\t\"unicode\"\n)\n\nvar (\n\tdictionary = []byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'}\n\tbase *big.Int\n)\n\nfunc init() {\n\tbase = big.NewInt(int64(len(dictionary)))\n}\n\n\/\/checks if given string is a valid numeric\nfunc isValidNumeric(s string) bool {\n\tfor _, r := range s {\n\t\tif !unicode.IsNumber(r) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ checks if s is ascii and printable, aka doesn't include tab, backspace, etc.\nfunc isAsciiPrintable(s string) bool {\n\tfor _, r := range s {\n\t\tif r > unicode.MaxASCII || !unicode.IsPrint(r) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Encode converts the big integer to alpha id (an alphanumeric id with mixed cases)\nfunc Encode(s string) (string, error) {\n\t\/\/numeric validation\n\tif !isValidNumeric(s) {\n\t\treturn \"\", errors.New(\"Encode string is not a valid numeric\")\n\t}\n\tvar result []byte\n\tvar index int\n\tvar strVal string\n\n\ta := big.NewInt(0)\n\tb := big.NewInt(0)\n\tc := big.NewInt(0)\n\td := big.NewInt(0)\n\n\texponent := 1\n\n\tremaining := big.NewInt(0)\n\tremaining.SetString(s, 10)\n\n\tfor remaining.Cmp(big.NewInt(0)) != 0 {\n\t\ta.Exp(base, big.NewInt(int64(exponent)), nil) \/\/16^1 = 16\n\t\tb = b.Mod(remaining, a) \/\/119 % 16 = 7 | 112 % 256 = 112\n\t\tc = c.Exp(base, big.NewInt(int64(exponent-1)), nil)\n\t\td = d.Div(b, c)\n\n\t\t\/\/if d > dictionary.length, we have a problem. but BigInteger doesnt have\n\t\t\/\/a greater than method :-( hope for the best. theoretically, d is always\n\t\t\/\/an index of the dictionary!\n\t\tstrVal = d.String()\n\t\tindex, _ = strconv.Atoi(strVal)\n\t\tresult = append(result, dictionary[index])\n\t\tremaining = remaining.Sub(remaining, b) \/\/119 - 7 = 112 | 112 - 112 = 0\n\t\texponent = exponent + 1\n\t}\n\n\t\/\/need to reverse it, since the start of the list contains the least significant values\n\treturn string(reverse(result)), nil\n}\n\n\/\/ Decode converts the alpha id to big integer\nfunc Decode(s string) (string, error) {\n\t\/\/Validate if given string is valid\n\tif !isAsciiPrintable(s) {\n\t\treturn \"\", errors.New(\"Decode string is not valid.[a-z, A_Z, 0-9] only allowed\")\n\t}\n\t\/\/reverse it, coz its already reversed!\n\tchars2 := reverse([]byte(s))\n\n\t\/\/for efficiency, make a map\n\tdictMap := make(map[byte]*big.Int)\n\n\tj := 0\n\tfor _, val := range dictionary {\n\t\tdictMap[val] = big.NewInt(int64(j))\n\t\tj = j + 1\n\t}\n\n\tbi := big.NewInt(0)\n\n\texponent := 0\n\ta := big.NewInt(0)\n\tb := big.NewInt(0)\n\tintermed := big.NewInt(0)\n\n\tfor _, c := range chars2 {\n\t\ta = dictMap[c]\n\t\tintermed = intermed.Exp(base, big.NewInt(int64(exponent)), nil)\n\t\tb = b.Mul(intermed, a)\n\t\tbi = bi.Add(bi, b)\n\t\texponent = exponent + 1\n\t}\n\treturn bi.String(), nil\n}\n\nfunc reverse(bs []byte) []byte {\n\tfor i, j := 0, len(bs)-1; i < j; i, j = i+1, j-1 {\n\t\tbs[i], bs[j] = bs[j], bs[i]\n\t}\n\treturn bs\n}\n<commit_msg>fix decode punctuation bug<commit_after>\/\/ Package basex generates alpha id (alphanumeric id) for big integers. This\n\/\/ is particularly useful for shortening URLs.\npackage basex\n\nimport (\n\t\"errors\"\n\t\"math\/big\"\n\t\"strconv\"\n\t\"unicode\"\n)\n\nvar (\n\tdictionary = []byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'}\n\tbase *big.Int\n)\n\nfunc init() {\n\tbase = big.NewInt(int64(len(dictionary)))\n}\n\n\/\/checks if given string is a valid numeric\nfunc isValidNumeric(s string) bool {\n\tfor _, r := range s {\n\t\tif !unicode.IsNumber(r) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ checks if s is ascii and printable, aka doesn't include tab, backspace, etc.\nfunc isAsciiPrintable(s string) bool {\n\tfor _, r := range s {\n\t\tif r > unicode.MaxASCII || !unicode.IsPrint(r) || unicode.IsPunct(r) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Encode converts the big integer to alpha id (an alphanumeric id with mixed cases)\nfunc Encode(s string) (string, error) {\n\t\/\/numeric validation\n\tif !isValidNumeric(s) {\n\t\treturn \"\", errors.New(\"Encode string is not a valid numeric\")\n\t}\n\tvar result []byte\n\tvar index int\n\tvar strVal string\n\n\ta := big.NewInt(0)\n\tb := big.NewInt(0)\n\tc := big.NewInt(0)\n\td := big.NewInt(0)\n\n\texponent := 1\n\n\tremaining := big.NewInt(0)\n\tremaining.SetString(s, 10)\n\n\tfor remaining.Cmp(big.NewInt(0)) != 0 {\n\t\ta.Exp(base, big.NewInt(int64(exponent)), nil) \/\/16^1 = 16\n\t\tb = b.Mod(remaining, a) \/\/119 % 16 = 7 | 112 % 256 = 112\n\t\tc = c.Exp(base, big.NewInt(int64(exponent-1)), nil)\n\t\td = d.Div(b, c)\n\n\t\t\/\/if d > dictionary.length, we have a problem. but BigInteger doesnt have\n\t\t\/\/a greater than method :-( hope for the best. theoretically, d is always\n\t\t\/\/an index of the dictionary!\n\t\tstrVal = d.String()\n\t\tindex, _ = strconv.Atoi(strVal)\n\t\tresult = append(result, dictionary[index])\n\t\tremaining = remaining.Sub(remaining, b) \/\/119 - 7 = 112 | 112 - 112 = 0\n\t\texponent = exponent + 1\n\t}\n\n\t\/\/need to reverse it, since the start of the list contains the least significant values\n\treturn string(reverse(result)), nil\n}\n\n\/\/ Decode converts the alpha id to big integer\nfunc Decode(s string) (string, error) {\n\t\/\/Validate if given string is valid\n\tif !isAsciiPrintable(s) {\n\t\treturn \"\", errors.New(\"Decode string is not valid.[a-z, A_Z, 0-9] only allowed\")\n\t}\n\t\/\/reverse it, coz its already reversed!\n\tchars2 := reverse([]byte(s))\n\n\t\/\/for efficiency, make a map\n\tdictMap := make(map[byte]*big.Int)\n\n\tj := 0\n\tfor _, val := range dictionary {\n\t\tdictMap[val] = big.NewInt(int64(j))\n\t\tj = j + 1\n\t}\n\n\tbi := big.NewInt(0)\n\n\texponent := 0\n\ta := big.NewInt(0)\n\tb := big.NewInt(0)\n\tintermed := big.NewInt(0)\n\n\tfor _, c := range chars2 {\n\t\ta = dictMap[c]\n\t\tintermed = intermed.Exp(base, big.NewInt(int64(exponent)), nil)\n\t\tb = b.Mul(intermed, a)\n\t\tbi = bi.Add(bi, b)\n\t\texponent = exponent + 1\n\t}\n\treturn bi.String(), nil\n}\n\nfunc reverse(bs []byte) []byte {\n\tfor i, j := 0, len(bs)-1; i < j; i, j = i+1, j-1 {\n\t\tbs[i], bs[j] = bs[j], bs[i]\n\t}\n\treturn bs\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016, RadiantBlue Technologies, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage piazza\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"testing\"\n\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\n\/\/--------------------------\n\ntype Thing struct {\n\tId string `json:\"id\"`\n\tValue string `json:\"value\"`\n}\n\ntype ThingService struct {\n\tassert *assert.Assertions\n\tData map[string]string `json:\"data\"`\n\tIdCount int\n}\n\nfunc (service *ThingService) GetThing(id string) *JsonResponse {\n\tval, ok := service.Data[id]\n\tif !ok {\n\t\treturn &JsonResponse{StatusCode: http.StatusNotFound}\n\t}\n\treturn &JsonResponse{StatusCode: http.StatusOK, Data: Thing{Id: id, Value: val}}\n}\n\nfunc (service *ThingService) PostThing(thing *Thing) *JsonResponse {\n\tif thing.Value == \"NULL\" {\n\t\tresp := &JsonResponse{StatusCode: http.StatusBadRequest, Message: \"oops\"}\n\t\treturn resp\n\t}\n\tservice.IdCount++\n\tthing.Id = fmt.Sprintf(\"%d\", service.IdCount)\n\tservice.Data[thing.Id] = thing.Value\n\treturn &JsonResponse{StatusCode: http.StatusCreated, Data: thing}\n}\n\nfunc (service *ThingService) PutThing(id string, thing *Thing) *JsonResponse {\n\tif thing.Value == \"NULL\" {\n\t\treturn &JsonResponse{StatusCode: http.StatusBadRequest, Message: \"oops\"}\n\t}\n\tif thing.Id != id {\n\t\treturn &JsonResponse{StatusCode: http.StatusBadRequest, Message: \"oops - id mismatch\"}\n\t}\n\tservice.Data[thing.Id] = thing.Value\n\n\treturn &JsonResponse{StatusCode: http.StatusOK, Data: thing}\n}\n\nfunc (service *ThingService) DeleteThing(id string) *JsonResponse {\n\t_, ok := service.Data[id]\n\tif !ok {\n\t\treturn &JsonResponse{StatusCode: http.StatusNotFound}\n\t}\n\tdelete(service.Data, id)\n\treturn &JsonResponse{StatusCode: http.StatusOK}\n}\n\n\/\/---------------------------------------------------------------\n\ntype ThingServer struct {\n\troutes []RouteData\n\tservice *ThingService\n}\n\nfunc (server *ThingServer) Init(service *ThingService) {\n\n\tserver.service = service\n\n\tserver.routes = []RouteData{\n\t\t{\"GET\", \"\/\", server.handleGetRoot},\n\t\t{\"GET\", \"\/:id\", server.handleGet},\n\t\t{\"POST\", \"\/\", server.handlePost},\n\t\t{\"PUT\", \"\/:id\", server.handlePut},\n\t\t{\"DELETE\", \"\/:id\", server.handleDelete},\n\t}\n}\n\nfunc (server *ThingServer) handleGetRoot(c *gin.Context) {\n\ttype T struct {\n\t\tMessage string\n\t}\n\tmessage := \"Hi.\"\n\tresp := JsonResponse{StatusCode: http.StatusOK, Data: message}\n\tc.JSON(resp.StatusCode, resp)\n}\n\nfunc (server *ThingServer) handleGet(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tresp := server.service.GetThing(id)\n\tc.JSON(resp.StatusCode, resp)\n}\n\nfunc (server *ThingServer) handlePost(c *gin.Context) {\n\tvar thing Thing\n\terr := c.BindJSON(&thing)\n\tif err != nil {\n\t\tresp := &JsonResponse{StatusCode: http.StatusInternalServerError, Message: err.Error()}\n\t\tc.JSON(resp.StatusCode, resp)\n\t}\n\tresp := server.service.PostThing(&thing)\n\tc.JSON(resp.StatusCode, resp)\n}\n\nfunc (server *ThingServer) handlePut(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tvar thing Thing\n\terr := c.BindJSON(&thing)\n\tif err != nil {\n\t\tresp := &JsonResponse{StatusCode: http.StatusInternalServerError, Message: err.Error()}\n\t\tc.JSON(resp.StatusCode, resp)\n\t}\n\tthing.Id = id\n\tresp := server.service.PutThing(id, &thing)\n\tc.JSON(resp.StatusCode, resp)\n}\n\nfunc (server *ThingServer) handleDelete(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tresp := server.service.DeleteThing(id)\n\tc.JSON(resp.StatusCode, resp)\n}\n\n\/\/------------------------------------------\n\nfunc Test07Server(t *testing.T) {\n\tassert := assert.New(t)\n\n\trequired := []ServiceName{}\n\tsys, err := NewSystemConfig(PzGoCommon, required)\n\tassert.NoError(err)\n\n\tgenericServer := GenericServer{Sys: sys}\n\n\tservice := &ThingService{\n\t\tassert: assert,\n\t\tIdCount: 0,\n\t\tData: make(map[string]string),\n\t}\n\n\tserver := &ThingServer{}\n\tserver.Init(service)\n\n\turl := \"\"\n\n\t{\n\t\terr = genericServer.Configure(server.routes)\n\t\tif err != nil {\n\t\t\tassert.FailNow(\"server failed to configure: \" + err.Error())\n\t\t}\n\t\t_, err = genericServer.Start()\n\t\tif err != nil {\n\t\t\tassert.FailNow(\"server failed to start: \" + err.Error())\n\t\t}\n\n\t\turl = \"http:\/\/\" + sys.BindTo\n\t}\n\n\t{\n\t\tvar input *Thing\n\t\tvar output *Thing\n\t\tvar jresp *JsonResponse\n\n\t\t\/\/ GET bad\n\t\tjresp = HttpGetJson(url + \"\/mpg\")\n\t\tassert.Equal(404, jresp.StatusCode)\n\n\t\t\/\/ POST 1\n\t\tinput = &Thing{Value: \"17\"}\n\t\tjresp = HttpPostJson(url, input)\n\t\tassert.Equal(201, jresp.StatusCode)\n\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.EqualValues(\"1\", output.Id)\n\t\tassert.EqualValues(\"17\", output.Value)\n\n\t\t\/\/ POST bad\n\t\tinput = &Thing{Value: \"NULL\"}\n\t\tjresp = HttpPostJson(url, input)\n\t\tassert.Equal(400, jresp.StatusCode)\n\n\t\t\/\/ POST 2\n\t\tinput = &Thing{Value: \"18\"}\n\t\tjresp = HttpPostJson(url, input)\n\t\tassert.Equal(201, jresp.StatusCode)\n\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.EqualValues(\"2\", output.Id)\n\t\tassert.EqualValues(\"18\", output.Value)\n\n\t\t\/\/ GET 2\n\t\tjresp = HttpGetJson(url + \"\/2\")\n\t\tassert.Equal(200, jresp.StatusCode)\n\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.NoError(err)\n\t\tassert.EqualValues(\"2\", output.Id)\n\t\tassert.EqualValues(\"18\", output.Value)\n\n\t\t\/\/ PUT 1\n\t\tinput = &Thing{Value: \"71\"}\n\t\tjresp = HttpPutJson(url+\"\/1\", input)\n\t\tassert.Equal(200, jresp.StatusCode)\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.NoError(err)\n\t\tassert.EqualValues(\"71\", output.Value)\n\n\t\t\/\/ GET 1\n\t\tjresp = HttpGetJson(url + \"\/1\")\n\t\tassert.Equal(200, jresp.StatusCode)\n\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.NoError(err)\n\t\tassert.EqualValues(\"1\", output.Id)\n\t\tassert.EqualValues(\"71\", output.Value)\n\n\t\t\/\/ DELETE 3\n\t\tjresp = HttpDeleteJson(url + \"\/3\")\n\t\tassert.Equal(404, jresp.StatusCode)\n\n\t\t\/\/ DELETE 1\n\t\tjresp = HttpDeleteJson(url + \"\/1\")\n\t\tassert.Equal(200, jresp.StatusCode)\n\n\t\t\/\/ GET 1\n\t\tjresp = HttpGetJson(url + \"\/1\")\n\t\tassert.Equal(404, jresp.StatusCode)\n\t}\n\t{\n\t\terr = genericServer.Stop()\n\t\tassert.NoError(err)\n\n\t\t_, err := http.Get(url)\n\t\tassert.Error(err)\n\t}\n}\n<commit_msg>Clearly this was wrong, even though it wasn't failing.<commit_after>\/\/ Copyright 2016, RadiantBlue Technologies, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage piazza\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"testing\"\n\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\n\/\/--------------------------\n\ntype Thing struct {\n\tId string `json:\"id\"`\n\tValue string `json:\"value\"`\n}\n\ntype ThingService struct {\n\tassert *assert.Assertions\n\tData map[string]string `json:\"data\"`\n\tIdCount int\n}\n\nfunc (service *ThingService) GetThing(id string) *JsonResponse {\n\tval, ok := service.Data[id]\n\tif !ok {\n\t\treturn &JsonResponse{StatusCode: http.StatusNotFound}\n\t}\n\treturn &JsonResponse{StatusCode: http.StatusOK, Data: Thing{Id: id, Value: val}}\n}\n\nfunc (service *ThingService) PostThing(thing *Thing) *JsonResponse {\n\tif thing.Value == \"NULL\" {\n\t\tresp := &JsonResponse{StatusCode: http.StatusBadRequest, Message: \"oops\"}\n\t\treturn resp\n\t}\n\tservice.IdCount++\n\tthing.Id = fmt.Sprintf(\"%d\", service.IdCount)\n\tservice.Data[thing.Id] = thing.Value\n\treturn &JsonResponse{StatusCode: http.StatusCreated, Data: thing}\n}\n\nfunc (service *ThingService) PutThing(id string, thing *Thing) *JsonResponse {\n\tif thing.Value == \"NULL\" {\n\t\treturn &JsonResponse{StatusCode: http.StatusBadRequest, Message: \"oops\"}\n\t}\n\tif thing.Id != id {\n\t\treturn &JsonResponse{StatusCode: http.StatusBadRequest, Message: \"oops - id mismatch\"}\n\t}\n\tservice.Data[thing.Id] = thing.Value\n\n\treturn &JsonResponse{StatusCode: http.StatusOK, Data: thing}\n}\n\nfunc (service *ThingService) DeleteThing(id string) *JsonResponse {\n\t_, ok := service.Data[id]\n\tif !ok {\n\t\treturn &JsonResponse{StatusCode: http.StatusNotFound}\n\t}\n\tdelete(service.Data, id)\n\treturn &JsonResponse{StatusCode: http.StatusOK}\n}\n\n\/\/---------------------------------------------------------------\n\ntype ThingServer struct {\n\troutes []RouteData\n\tservice *ThingService\n}\n\nfunc (server *ThingServer) Init(service *ThingService) {\n\n\tserver.service = service\n\n\tserver.routes = []RouteData{\n\t\t{\"GET\", \"\/\", server.handleGetRoot},\n\t\t{\"GET\", \"\/:id\", server.handleGet},\n\t\t{\"POST\", \"\/\", server.handlePost},\n\t\t{\"PUT\", \"\/:id\", server.handlePut},\n\t\t{\"DELETE\", \"\/:id\", server.handleDelete},\n\t}\n}\n\nfunc (server *ThingServer) handleGetRoot(c *gin.Context) {\n\ttype T struct {\n\t\tMessage string\n\t}\n\tmessage := \"Hi.\"\n\tresp := JsonResponse{StatusCode: http.StatusOK, Data: message}\n\tc.JSON(resp.StatusCode, resp)\n}\n\nfunc (server *ThingServer) handleGet(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tresp := server.service.GetThing(id)\n\tc.JSON(resp.StatusCode, resp)\n}\n\nfunc (server *ThingServer) handlePost(c *gin.Context) {\n\tvar thing Thing\n\terr := c.BindJSON(&thing)\n\tif err != nil {\n\t\tresp := &JsonResponse{StatusCode: http.StatusInternalServerError, Message: err.Error()}\n\t\tc.JSON(resp.StatusCode, resp)\n\t}\n\tresp := server.service.PostThing(&thing)\n\tc.JSON(resp.StatusCode, resp)\n}\n\nfunc (server *ThingServer) handlePut(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tvar thing Thing\n\terr := c.BindJSON(&thing)\n\tif err != nil {\n\t\tresp := &JsonResponse{StatusCode: http.StatusInternalServerError, Message: err.Error()}\n\t\tc.JSON(resp.StatusCode, resp)\n\t}\n\tthing.Id = id\n\tresp := server.service.PutThing(id, &thing)\n\tc.JSON(resp.StatusCode, resp)\n}\n\nfunc (server *ThingServer) handleDelete(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tresp := server.service.DeleteThing(id)\n\tc.JSON(resp.StatusCode, resp)\n}\n\n\/\/------------------------------------------\n\nfunc Test07Server(t *testing.T) {\n\tassert := assert.New(t)\n\n\trequired := []ServiceName{}\n\tsys, err := NewSystemConfig(PzGoCommon, required)\n\tassert.NoError(err)\n\n\tgenericServer := GenericServer{Sys: sys}\n\n\tservice := &ThingService{\n\t\tassert: assert,\n\t\tIdCount: 0,\n\t\tData: make(map[string]string),\n\t}\n\n\tserver := &ThingServer{}\n\tserver.Init(service)\n\n\turl := \"\"\n\n\t{\n\t\terr = genericServer.Configure(server.routes)\n\t\tif err != nil {\n\t\t\tassert.FailNow(\"server failed to configure: \" + err.Error())\n\t\t}\n\t\t_, err = genericServer.Start()\n\t\tif err != nil {\n\t\t\tassert.FailNow(\"server failed to start: \" + err.Error())\n\t\t}\n\n\t\turl = \"http:\/\/\" + sys.BindTo\n\t}\n\n\t{\n\t\tvar input *Thing\n\t\tvar output Thing\n\t\tvar jresp *JsonResponse\n\n\t\t\/\/ GET bad\n\t\tjresp = HttpGetJson(url + \"\/mpg\")\n\t\tassert.Equal(404, jresp.StatusCode)\n\n\t\t\/\/ POST 1\n\t\tinput = &Thing{Value: \"17\"}\n\t\tjresp = HttpPostJson(url, input)\n\t\tassert.Equal(201, jresp.StatusCode)\n\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.EqualValues(\"1\", output.Id)\n\t\tassert.EqualValues(\"17\", output.Value)\n\n\t\t\/\/ POST bad\n\t\tinput = &Thing{Value: \"NULL\"}\n\t\tjresp = HttpPostJson(url, input)\n\t\tassert.Equal(400, jresp.StatusCode)\n\n\t\t\/\/ POST 2\n\t\tinput = &Thing{Value: \"18\"}\n\t\tjresp = HttpPostJson(url, input)\n\t\tassert.Equal(201, jresp.StatusCode)\n\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.EqualValues(\"2\", output.Id)\n\t\tassert.EqualValues(\"18\", output.Value)\n\n\t\t\/\/ GET 2\n\t\tjresp = HttpGetJson(url + \"\/2\")\n\t\tassert.Equal(200, jresp.StatusCode)\n\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.NoError(err)\n\t\tassert.EqualValues(\"2\", output.Id)\n\t\tassert.EqualValues(\"18\", output.Value)\n\n\t\t\/\/ PUT 1\n\t\tinput = &Thing{Value: \"71\"}\n\t\tjresp = HttpPutJson(url+\"\/1\", input)\n\t\tassert.Equal(200, jresp.StatusCode)\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.NoError(err)\n\t\tassert.EqualValues(\"71\", output.Value)\n\n\t\t\/\/ GET 1\n\t\tjresp = HttpGetJson(url + \"\/1\")\n\t\tassert.Equal(200, jresp.StatusCode)\n\n\t\terr = SuperConverter(jresp.Data, &output)\n\t\tassert.NoError(err)\n\t\tassert.EqualValues(\"1\", output.Id)\n\t\tassert.EqualValues(\"71\", output.Value)\n\n\t\t\/\/ DELETE 3\n\t\tjresp = HttpDeleteJson(url + \"\/3\")\n\t\tassert.Equal(404, jresp.StatusCode)\n\n\t\t\/\/ DELETE 1\n\t\tjresp = HttpDeleteJson(url + \"\/1\")\n\t\tassert.Equal(200, jresp.StatusCode)\n\n\t\t\/\/ GET 1\n\t\tjresp = HttpGetJson(url + \"\/1\")\n\t\tassert.Equal(404, jresp.StatusCode)\n\t}\n\t{\n\t\terr = genericServer.Stop()\n\t\tassert.NoError(err)\n\n\t\t_, err := http.Get(url)\n\t\tassert.Error(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package ingestion\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"strings\"\n\t\"time\"\n\n\t\"cloud.google.com\/go\/storage\"\n\t\"go.opencensus.io\/trace\"\n\t\"go.skia.org\/infra\/go\/fileutil\"\n\t\"go.skia.org\/infra\/go\/gcs\"\n\t\"go.skia.org\/infra\/go\/sklog\"\n)\n\n\/\/ FileSearcher is an interface around the logic for polling for files that may have been\n\/\/ missed via the typical event-based ingestion.\ntype FileSearcher interface {\n\t\/\/ SearchForFiles returns a slice of files that appear in the given time range.\n\tSearchForFiles(ctx context.Context, start, end time.Time) []string\n}\n\n\/\/ Source represents a place that an ingester can get a file to process.\ntype Source interface {\n\t\/\/ GetReader returns a reader to the content. If there is a problem (e.g. file does not exist)\n\t\/\/ an error will be returned.\n\tGetReader(ctx context.Context, name string) (io.ReadCloser, error)\n\t\/\/ HandlesFile returns true if this file is handled by the given source.\n\tHandlesFile(name string) bool\n}\n\n\/\/ GCSSource represents a bucket and sublocation in Google Cloud Storage.\ntype GCSSource struct {\n\tClient *storage.Client\n\tBucket string\n\tPrefix string\n}\n\n\/\/ HandlesFile returns true if this file matches the prefix of the configured GCS source.\nfunc (s *GCSSource) HandlesFile(name string) bool {\n\treturn strings.HasPrefix(name, s.Prefix)\n}\n\n\/\/ SearchForFiles uses the standard pattern of named, hourly folders to search for all files\n\/\/ in the given time range.\nfunc (s *GCSSource) SearchForFiles(ctx context.Context, start, end time.Time) []string {\n\tctx, span := trace.StartSpan(ctx, \"ingestion_SearchForFiles\")\n\tdefer span.End()\n\tdirs := fileutil.GetHourlyDirs(s.Prefix, start, end)\n\n\tvar files []string\n\tfor _, dir := range dirs {\n\t\terr := gcs.AllFilesInDir(s.Client, s.Bucket, dir, func(item *storage.ObjectAttrs) {\n\t\t\tif strings.HasSuffix(item.Name, \".json\") && item.Updated.After(start) {\n\t\t\t\tfiles = append(files, item.Name)\n\t\t\t}\n\t\t})\n\t\tif err != nil {\n\t\t\tsklog.Errorf(\"Error occurred while retrieving files from %s\/%s: %s\", s.Bucket, dir, err)\n\t\t}\n\t}\n\treturn files\n}\n\n\/\/ GetReader returns a ReadCloser with the data from this file or an error.\nfunc (s *GCSSource) GetReader(ctx context.Context, name string) (io.ReadCloser, error) {\n\treturn s.Client.Bucket(s.Bucket).Object(name).NewReader(ctx)\n}\n\nfunc (s *GCSSource) String() string {\n\treturn \"gs:\/\/\" + s.Bucket + \"\/\" + s.Prefix\n}\n\n\/\/ Validate returns true if all fields are filled in.\nfunc (s *GCSSource) Validate() bool {\n\treturn s.Client != nil && s.Bucket != \"\" && s.Prefix != \"\"\n}\n\n\/\/ Make sure GCSSource implements the Source interface.\nvar _ Source = (*GCSSource)(nil)\n<commit_msg>[gold] Add a few more ingestion logs<commit_after>package ingestion\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"strings\"\n\t\"time\"\n\n\t\"cloud.google.com\/go\/storage\"\n\t\"go.opencensus.io\/trace\"\n\t\"go.skia.org\/infra\/go\/fileutil\"\n\t\"go.skia.org\/infra\/go\/gcs\"\n\t\"go.skia.org\/infra\/go\/sklog\"\n)\n\n\/\/ FileSearcher is an interface around the logic for polling for files that may have been\n\/\/ missed via the typical event-based ingestion.\ntype FileSearcher interface {\n\t\/\/ SearchForFiles returns a slice of files that appear in the given time range.\n\tSearchForFiles(ctx context.Context, start, end time.Time) []string\n}\n\n\/\/ Source represents a place that an ingester can get a file to process.\ntype Source interface {\n\t\/\/ GetReader returns a reader to the content. If there is a problem (e.g. file does not exist)\n\t\/\/ an error will be returned.\n\tGetReader(ctx context.Context, name string) (io.ReadCloser, error)\n\t\/\/ HandlesFile returns true if this file is handled by the given source.\n\tHandlesFile(name string) bool\n}\n\n\/\/ GCSSource represents a bucket and sublocation in Google Cloud Storage.\ntype GCSSource struct {\n\tClient *storage.Client\n\tBucket string\n\tPrefix string\n}\n\n\/\/ HandlesFile returns true if this file matches the prefix of the configured GCS source.\nfunc (s *GCSSource) HandlesFile(name string) bool {\n\treturn strings.HasPrefix(name, s.Prefix)\n}\n\n\/\/ SearchForFiles uses the standard pattern of named, hourly folders to search for all files\n\/\/ in the given time range.\nfunc (s *GCSSource) SearchForFiles(ctx context.Context, start, end time.Time) []string {\n\tctx, span := trace.StartSpan(ctx, \"ingestion_SearchForFiles\")\n\tdefer span.End()\n\tdirs := fileutil.GetHourlyDirs(s.Prefix, start, end)\n\n\tvar files []string\n\tfor _, dir := range dirs {\n\t\terr := gcs.AllFilesInDir(s.Client, s.Bucket, dir, func(item *storage.ObjectAttrs) {\n\t\t\tif strings.HasSuffix(item.Name, \".json\") {\n\t\t\t\tfiles = append(files, item.Name)\n\t\t\t}\n\t\t})\n\t\tif err != nil {\n\t\t\tsklog.Errorf(\"Error occurred while retrieving files from %s\/%s: %s\", s.Bucket, dir, err)\n\t\t}\n\t}\n\tif len(files) > 0 {\n\t\tsklog.Infof(\"First GCS file in backup range: %s\", files[0])\n\t\tsklog.Infof(\"Last GCS file in backup range: %s\", files[len(files)-1])\n\t}\n\treturn files\n}\n\n\/\/ GetReader returns a ReadCloser with the data from this file or an error.\nfunc (s *GCSSource) GetReader(ctx context.Context, name string) (io.ReadCloser, error) {\n\treturn s.Client.Bucket(s.Bucket).Object(name).NewReader(ctx)\n}\n\nfunc (s *GCSSource) String() string {\n\treturn \"gs:\/\/\" + s.Bucket + \"\/\" + s.Prefix\n}\n\n\/\/ Validate returns true if all fields are filled in.\nfunc (s *GCSSource) Validate() bool {\n\treturn s.Client != nil && s.Bucket != \"\" && s.Prefix != \"\"\n}\n\n\/\/ Make sure GCSSource implements the Source interface.\nvar _ Source = (*GCSSource)(nil)\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar (\n\tskippedPaths = regexp.MustCompile(`Godeps|third_party|_gopath|_output|\\.git|cluster\/env.sh|vendor|test\/e2e\/generated\/bindata.go|site\/themes\/docsy`)\n\tboilerplatedir = flag.String(\"boilerplate-dir\", \".\", \"Boilerplate directory for boilerplate files\")\n\trootdir = flag.String(\"rootdir\", \"..\/..\/\", \"Root directory to examine\")\n\tverbose = flag.Bool(\"v\", false, \"Verbose\")\n)\n\nfunc main() {\n\tflag.Parse()\n\trefs, err := extensionToBoilerplate(*boilerplatedir)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif len(refs) == 0 {\n\t\tlog.Fatal(\"no references in \", *boilerplatedir)\n\t}\n\tfiles, err := filesToCheck(*rootdir, refs)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor _, file := range files {\n\t\tpass, err := filePasses(file, refs[filepath.Ext(file)])\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\tif !pass {\n\t\t\tpath, err := filepath.Abs(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t\tfmt.Println(path)\n\t\t}\n\t}\n\n}\n\n\/\/ extensionToBoilerplate returns a map of file extension to required boilerplate text\nfunc extensionToBoilerplate(dir string) (map[string][]byte, error) {\n\trefs := make(map[string][]byte)\n\tfiles, _ := filepath.Glob(dir + \"\/*.txt\")\n\tfor _, filename := range files {\n\t\tre := regexp.MustCompile(`\\.txt`)\n\t\textension := strings.ToLower(filepath.Ext(re.ReplaceAllString(filename, \"\")))\n\t\tdata, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tre = regexp.MustCompile(`\\r`)\n\t\trefs[extension] = re.ReplaceAll(data, nil)\n\t}\n\tif *verbose {\n\t\tdir, err := filepath.Abs(dir)\n\t\tif err != nil {\n\t\t\treturn refs, err\n\t\t}\n\t\tfmt.Printf(\"Found %v boilerplates in %v for the following extensions:\", len(refs), dir)\n\t\tfor ext := range refs {\n\t\t\tfmt.Printf(\" %v\", ext)\n\t\t}\n\t\tfmt.Println()\n\t}\n\treturn refs, nil\n}\n\n\/\/ filePasses checks whether the processed file is valid. Returning false means that the file does not the proper boilerplate template.\nfunc filePasses(filename string, expectedBoilerplate []byte) (bool, error) {\n\tvar re *regexp.Regexp\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tre = regexp.MustCompile(`\\r`)\n\tdata = re.ReplaceAll(data, nil)\n\n\textension := filepath.Ext(filename)\n\n\t\/\/ remove build tags from the top of Go files\n\tif extension == \".go\" {\n\t\tre = regexp.MustCompile(`(?m)^(\/\/ \\+build.*\\n)+\\n`)\n\t\tdata = re.ReplaceAll(data, nil)\n\t}\n\n\t\/\/ remove shebang from the top of shell files\n\tif extension == \".sh\" {\n\t\tre = regexp.MustCompile(`(?m)^(#!.*\\n)\\n*`)\n\t\tdata = re.ReplaceAll(data, nil)\n\t}\n\n\t\/\/ if our test file is smaller than the reference it surely fails!\n\tif len(data) < len(expectedBoilerplate) {\n\t\treturn false, nil\n\t}\n\n\tdata = data[:len(expectedBoilerplate)]\n\n\t\/\/ Search for \"Copyright YEAR\" which exists in the boilerplate, but shouldn't in the real thing\n\tre = regexp.MustCompile(`Copyright YEAR`)\n\tif re.Match(data) {\n\t\treturn false, nil\n\t}\n\n\t\/\/ Replace all occurrences of the regex \"Copyright \\d{4}\" with \"Copyright YEAR\"\n\tre = regexp.MustCompile(`Copyright \\d{4}`)\n\tdata = re.ReplaceAll(data, []byte(`Copyright YEAR`))\n\n\treturn bytes.Equal(data, expectedBoilerplate), nil\n}\n\n\/\/ filesToCheck returns the list of the filers that will be checked for the boilerplate.\nfunc filesToCheck(rootDir string, extensions map[string][]byte) ([]string, error) {\n\tvar outFiles []string\n\terr := filepath.Walk(rootDir, func(path string, info os.FileInfo, err error) error {\n\t\t\/\/ remove current workdir from the beginig of the path in case it matches the skipped path\n\t\tcwd, _ := os.Getwd()\n\t\t\/\/ replace \"\\\" with \"\\\\\" for windows style path\n\t\tre := regexp.MustCompile(`\\\\`)\n\t\tre = regexp.MustCompile(`^` + re.ReplaceAllString(cwd, `\\\\`))\n\t\tif !info.IsDir() && !skippedPaths.MatchString(re.ReplaceAllString(filepath.Dir(path), \"\")) {\n\t\t\tif extensions[strings.ToLower(filepath.Ext(path))] != nil {\n\t\t\t\toutFiles = append(outFiles, path)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif *verbose {\n\t\trootDir, err = filepath.Abs(rootDir)\n\t\tif err != nil {\n\t\t\treturn outFiles, err\n\t\t}\n\t\tfmt.Printf(\"Found %v files to check in %v\\n\\n\", len(outFiles), rootDir)\n\t}\n\treturn outFiles, nil\n}\n<commit_msg>Fix comment for extensionToBoilerplate in boilerplate.go<commit_after>\/*\nCopyright 2019 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar (\n\tskippedPaths = regexp.MustCompile(`Godeps|third_party|_gopath|_output|\\.git|cluster\/env.sh|vendor|test\/e2e\/generated\/bindata.go|site\/themes\/docsy`)\n\tboilerplatedir = flag.String(\"boilerplate-dir\", \".\", \"Boilerplate directory for boilerplate files\")\n\trootdir = flag.String(\"rootdir\", \"..\/..\/\", \"Root directory to examine\")\n\tverbose = flag.Bool(\"v\", false, \"Verbose\")\n)\n\nfunc main() {\n\tflag.Parse()\n\trefs, err := extensionToBoilerplate(*boilerplatedir)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif len(refs) == 0 {\n\t\tlog.Fatal(\"no references in \", *boilerplatedir)\n\t}\n\tfiles, err := filesToCheck(*rootdir, refs)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor _, file := range files {\n\t\tpass, err := filePasses(file, refs[filepath.Ext(file)])\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\tif !pass {\n\t\t\tpath, err := filepath.Abs(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t\tfmt.Println(path)\n\t\t}\n\t}\n\n}\n\n\/\/ extensionToBoilerplate returns a map of file extension to required boilerplate text.\nfunc extensionToBoilerplate(dir string) (map[string][]byte, error) {\n\trefs := make(map[string][]byte)\n\tfiles, _ := filepath.Glob(dir + \"\/*.txt\")\n\tfor _, filename := range files {\n\t\tre := regexp.MustCompile(`\\.txt`)\n\t\textension := strings.ToLower(filepath.Ext(re.ReplaceAllString(filename, \"\")))\n\t\tdata, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tre = regexp.MustCompile(`\\r`)\n\t\trefs[extension] = re.ReplaceAll(data, nil)\n\t}\n\tif *verbose {\n\t\tdir, err := filepath.Abs(dir)\n\t\tif err != nil {\n\t\t\treturn refs, err\n\t\t}\n\t\tfmt.Printf(\"Found %v boilerplates in %v for the following extensions:\", len(refs), dir)\n\t\tfor ext := range refs {\n\t\t\tfmt.Printf(\" %v\", ext)\n\t\t}\n\t\tfmt.Println()\n\t}\n\treturn refs, nil\n}\n\n\/\/ filePasses checks whether the processed file is valid. Returning false means that the file does not the proper boilerplate template.\nfunc filePasses(filename string, expectedBoilerplate []byte) (bool, error) {\n\tvar re *regexp.Regexp\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tre = regexp.MustCompile(`\\r`)\n\tdata = re.ReplaceAll(data, nil)\n\n\textension := filepath.Ext(filename)\n\n\t\/\/ remove build tags from the top of Go files\n\tif extension == \".go\" {\n\t\tre = regexp.MustCompile(`(?m)^(\/\/ \\+build.*\\n)+\\n`)\n\t\tdata = re.ReplaceAll(data, nil)\n\t}\n\n\t\/\/ remove shebang from the top of shell files\n\tif extension == \".sh\" {\n\t\tre = regexp.MustCompile(`(?m)^(#!.*\\n)\\n*`)\n\t\tdata = re.ReplaceAll(data, nil)\n\t}\n\n\t\/\/ if our test file is smaller than the reference it surely fails!\n\tif len(data) < len(expectedBoilerplate) {\n\t\treturn false, nil\n\t}\n\n\tdata = data[:len(expectedBoilerplate)]\n\n\t\/\/ Search for \"Copyright YEAR\" which exists in the boilerplate, but shouldn't in the real thing\n\tre = regexp.MustCompile(`Copyright YEAR`)\n\tif re.Match(data) {\n\t\treturn false, nil\n\t}\n\n\t\/\/ Replace all occurrences of the regex \"Copyright \\d{4}\" with \"Copyright YEAR\"\n\tre = regexp.MustCompile(`Copyright \\d{4}`)\n\tdata = re.ReplaceAll(data, []byte(`Copyright YEAR`))\n\n\treturn bytes.Equal(data, expectedBoilerplate), nil\n}\n\n\/\/ filesToCheck returns the list of the filers that will be checked for the boilerplate.\nfunc filesToCheck(rootDir string, extensions map[string][]byte) ([]string, error) {\n\tvar outFiles []string\n\terr := filepath.Walk(rootDir, func(path string, info os.FileInfo, err error) error {\n\t\t\/\/ remove current workdir from the beginig of the path in case it matches the skipped path\n\t\tcwd, _ := os.Getwd()\n\t\t\/\/ replace \"\\\" with \"\\\\\" for windows style path\n\t\tre := regexp.MustCompile(`\\\\`)\n\t\tre = regexp.MustCompile(`^` + re.ReplaceAllString(cwd, `\\\\`))\n\t\tif !info.IsDir() && !skippedPaths.MatchString(re.ReplaceAllString(filepath.Dir(path), \"\")) {\n\t\t\tif extensions[strings.ToLower(filepath.Ext(path))] != nil {\n\t\t\t\toutFiles = append(outFiles, path)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif *verbose {\n\t\trootDir, err = filepath.Abs(rootDir)\n\t\tif err != nil {\n\t\t\treturn outFiles, err\n\t\t}\n\t\tfmt.Printf(\"Found %v files to check in %v\\n\\n\", len(outFiles), rootDir)\n\t}\n\treturn outFiles, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"testing\"\n\t\"encoding\/csv\"\n\t\"strconv\"\n\t\"io\"\n\t\"log\"\n\t\"runtime\"\n)\n\ntype NCollection struct {\n\tN int\n\tResults []testing.BenchmarkResult\n}\n\nfunc (c NCollection) ToStringSlice() []string {\n\tresult := []string{\n\t\tstrconv.Itoa(c.N),\n\t}\n\n\tfor _, nres := range c.Results {\n\t\tresult = append(result, strconv.Itoa(int(nres.NsPerOp())))\n\t}\n\n\treturn result\n}\n\ntype BenchFunc func(n int, b *testing.B)\n\ntype Benchem struct {\n\tF BenchFunc\n\tName string\n}\n\nfunc NewBenchem(name string, f BenchFunc) *Benchem {\n\treturn &Benchem{\n\t\tF: f,\n\t\tName: name,\n\t}\n}\n\nfunc (bm *Benchem) Run(n int) testing.BenchmarkResult {\n\treturn testing.Benchmark(func(b *testing.B) {\n\t\tbm.F(n, b)\n\t})\n}\n\ntype BenchCollection struct {\n\tBenches []*Benchem\n\tResults []NCollection\n}\n\nfunc NewBenchCollection() *BenchCollection {\n\treturn &BenchCollection {\n\t\tBenches: []*Benchem{},\n\t\tResults: []NCollection{},\n\t}\n}\n\nfunc (c *BenchCollection) AddFunc(name string, f BenchFunc) {\n\tc.Benches = append(c.Benches, NewBenchem(name, f))\n}\n\nfunc (c *BenchCollection) Run(start, end, step int) {\n\n\tc.Results = []NCollection{}\n\n\tfor iterations := start; iterations <= end; iterations += step {\n\n\t\tcollection := NCollection{\n\t\t\tN: iterations,\n\t\t\tResults: []testing.BenchmarkResult{},\n\t\t}\n\n\t\tfor _, bench := range c.Benches {\n\t\t\tcollection.Results = append(collection.Results, bench.Run(iterations))\n\n\t\t\truntime.GC()\n\t\t}\n\n\t\tlog.Printf(\"Completed run for %d iterations\", iterations)\n\n\t\tc.Results = append(c.Results, collection)\n\t}\n}\n\nfunc (c *BenchCollection) BenchNames() []string {\n\tresult := []string{}\n\n\tfor _, bench := range c.Benches {\n\t\tresult = append(result, bench.Name)\n\t}\n\n\treturn result\n}\n\nfunc (c *BenchCollection) Save(w io.Writer) {\n\twriter := csv.NewWriter(w)\n\n\tcsvTitles := []string{\"N\"}\n\tcsvTitles = append(csvTitles, c.BenchNames()...)\n\n\twriter.Write(csvTitles)\n\n\tfor _, result := range c.Results {\n\t\twriter.Write(result.ToStringSlice())\n\t}\n\n\twriter.Flush()\n}<commit_msg>Rearrange data collection Prevents adjacent benchmarks from affecting each other<commit_after>package main\n\nimport (\n\t\"testing\"\n\t\"encoding\/csv\"\n\t\"strconv\"\n\t\"io\"\n\t\"log\"\n\t\"runtime\"\n)\n\ntype NCollection struct {\n\tN int\n\tResults []testing.BenchmarkResult\n}\n\nfunc (c *NCollection) ToStringSlice() []string {\n\tresult := []string{\n\t\tstrconv.Itoa(c.N),\n\t}\n\n\tfor _, nres := range c.Results {\n\t\tresult = append(result, strconv.Itoa(int(nres.NsPerOp())))\n\t}\n\n\treturn result\n}\n\ntype BenchFunc func(n int, b *testing.B)\n\ntype Benchem struct {\n\tF BenchFunc\n\tName string\n}\n\nfunc NewBenchem(name string, f BenchFunc) *Benchem {\n\treturn &Benchem{\n\t\tF: f,\n\t\tName: name,\n\t}\n}\n\nfunc (bm *Benchem) Run(n int) testing.BenchmarkResult {\n\treturn testing.Benchmark(func(b *testing.B) {\n\t\tbm.F(n, b)\n\t})\n}\n\ntype BenchCollection struct {\n\tBenches []*Benchem\n\tResults []*NCollection\n}\n\nfunc NewBenchCollection() *BenchCollection {\n\treturn &BenchCollection {\n\t\tBenches: []*Benchem{},\n\t\tResults: []*NCollection{},\n\t}\n}\n\nfunc (c *BenchCollection) AddFunc(name string, f BenchFunc) {\n\tc.Benches = append(c.Benches, NewBenchem(name, f))\n}\n\nfunc (c *BenchCollection) Run(start, end, step int) {\n\n\tc.Results = []*NCollection{}\n\n\tfor iterations := start; iterations <= end; iterations += step {\n\n\t\tcollection := &NCollection{\n\t\t\tN: iterations,\n\t\t\tResults: []testing.BenchmarkResult{},\n\t\t}\n\t\tc.Results = append(c.Results, collection)\n\n\t}\n\n\tfor _, bench := range c.Benches {\n\t\tfor _, collection := range c.Results {\n\t\t\tcollection.Results = append(collection.Results, bench.Run(collection.N))\n\n\t\t\truntime.GC()\n\n\t\t\tlog.Printf(\"Completed run for %d iterations\", collection.N)\n\t\t}\n\n\t\tlog.Printf(\"Completed bench: %s\", bench.Name)\n\t}\n\n}\n\nfunc (c *BenchCollection) BenchNames() []string {\n\tresult := []string{}\n\n\tfor _, bench := range c.Benches {\n\t\tresult = append(result, bench.Name)\n\t}\n\n\treturn result\n}\n\nfunc (c *BenchCollection) Save(w io.Writer) {\n\twriter := csv.NewWriter(w)\n\n\tcsvTitles := []string{\"N\"}\n\tcsvTitles = append(csvTitles, c.BenchNames()...)\n\n\twriter.Write(csvTitles)\n\n\tfor _, result := range c.Results {\n\t\twriter.Write(result.ToStringSlice())\n\t}\n\n\twriter.Flush()\n}<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"time\"\n)\n\nfunc main() {\n\n\tvar num int64\n\tvar target []string\n\tvar lang string\n\n\tif len(os.Args) > 3 {\n\t\tnum, _ = strconv.ParseInt(os.Args[1], 10, 64)\n\t\tlang = os.Args[2]\n\t\ttarget = os.Args[3:]\n\t} else {\n\t\tlog.Fatal(\"number of iterations, lang, target program\")\n\t}\n\n\tvar i int64 \/\/ XXX\n\n\tstart := time.Now()\n\n\tfor i = 0; i < num; i++ {\n\n\t\tfmt.Printf(\"%d \", i)\n\n\t\tout, err := exec.Command(lang, target...).Output()\n\n\t\tif i == 0 {\n\t\t\tfmt.Printf(os.Stderr, \"%s\\n\", out)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t}\n\n\telapsed := time.Since(start).Seconds()\n\tavg := elapsed \/ float64(num)\n\tfmt.Printf(\"\\n\\\"%s %s\\\" %d run(s) wallclock=%0.2f avg=%0.2f sec(s)\\n\", lang, target, num, float64(elapsed), avg)\n\n}\n<commit_msg>checkpoint<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"time\"\n)\n\nfunc main() {\n\n\tvar num int64\n\tvar target []string\n\tvar lang string\n\n\tif len(os.Args) > 3 {\n\t\tnum, _ = strconv.ParseInt(os.Args[1], 10, 64)\n\t\tlang = os.Args[2]\n\t\ttarget = os.Args[3:]\n\t} else {\n\t\tlog.Fatal(\"number of iterations, lang, target program\")\n\t}\n\n\tvar i int64 \/\/ XXX\n\n\tstart := time.Now()\n\n\tfor i = 0; i < num; i++ {\n\n\t\tfmt.Printf(\"%d \", i)\n\n\t\tout, err := exec.Command(lang, target...).Output()\n\n\t\tif i == 0 {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", out)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t}\n\n\telapsed := time.Since(start).Seconds()\n\tavg := elapsed \/ float64(num)\n\tfmt.Printf(\"\\n\\\"%s %s\\\" %d run(s) wallclock=%0.2f avg=%0.2f sec(s)\\n\", lang, target, num, float64(elapsed), avg)\n\n}\n<|endoftext|>"} {"text":"<commit_before>package version\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/Masterminds\/semver\"\n\t\"github.com\/rusenask\/keel\/types\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\n\/\/ ErrVersionTagMissing - tag missing error\nvar ErrVersionTagMissing = errors.New(\"version tag is missing\")\n\n\/\/ GetVersion - parse version\nfunc GetVersion(version string) (*types.Version, error) {\n\n\tv, err := semver.NewVersion(version)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ TODO: probably make it customazible\n\tprefix := \"\"\n\tif strings.HasPrefix(version, \"v\") {\n\t\tprefix = \"v\"\n\t}\n\n\treturn &types.Version{\n\t\tMajor: v.Major(),\n\t\tMinor: v.Minor(),\n\t\tPatch: v.Patch(),\n\t\tPreRelease: string(v.Prerelease()),\n\t\tMetadata: v.Metadata(),\n\t\tPrefix: prefix,\n\t}, nil\n}\n\n\/\/ GetVersionFromImageName - get version from image name\nfunc GetVersionFromImageName(name string) (*types.Version, error) {\n\tparts := strings.Split(name, \":\")\n\tif len(parts) > 1 {\n\t\treturn GetVersion(parts[1])\n\t}\n\n\treturn nil, ErrVersionTagMissing\n}\n\n\/\/ GetImageNameAndVersion - get name and version\nfunc GetImageNameAndVersion(name string) (string, *types.Version, error) {\n\tparts := strings.Split(name, \":\")\n\tif len(parts) > 0 {\n\t\tv, err := GetVersion(parts[1])\n\t\tif err != nil {\n\t\t\treturn \"\", nil, err\n\t\t}\n\n\t\treturn parts[0], v, nil\n\t}\n\n\treturn \"\", nil, ErrVersionTagMissing\n}\n\n\/\/ NewAvailable - takes version and current tags. Checks whether there is a new version in the list of tags\n\/\/ and returns it as well as newAvailable bool\nfunc NewAvailable(current string, tags []string) (newVersion string, newAvailable bool, err error) {\n\n\tcurrentVersion, err := semver.NewVersion(current)\n\tif err != nil {\n\t\treturn \"\", false, err\n\t}\n\n\tif len(tags) == 0 {\n\t\treturn \"\", false, nil\n\t}\n\n\tvar vs []*semver.Version\n\tfor _, r := range tags {\n\t\tv, err := semver.NewVersion(r)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"tag\": r,\n\t\t\t}).Error(\"failed to parse tag\")\n\t\t\tcontinue\n\n\t\t}\n\n\t\tvs = append(vs, v)\n\t}\n\n\tif len(vs) == 0 {\n\t\tlog.Error(\"no versions available\")\n\t\treturn \"\", false, nil\n\t}\n\n\tsort.Sort(sort.Reverse(semver.Collection(vs)))\n\n\tif currentVersion.LessThan(vs[0]) {\n\t\treturn vs[0].String(), true, nil\n\t}\n\treturn \"\", false, nil\n}\n\n\/\/ ShouldUpdate - checks whether update is needed\nfunc ShouldUpdate(current *types.Version, new *types.Version, policy types.PolicyType) (bool, error) {\n\tif policy == types.PolicyTypeForce {\n\t\treturn true, nil\n\t}\n\n\tcurrentVersion, err := semver.NewVersion(current.String())\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to parse current version: %s\", err)\n\t}\n\tnewVersion, err := semver.NewVersion(new.String())\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to parse new version: %s\", err)\n\t}\n\n\t\/\/ new version is not higher than current - do nothing\n\tif !currentVersion.LessThan(newVersion) {\n\t\treturn false, nil\n\t}\n\n\tswitch policy {\n\tcase types.PolicyTypeAll:\n\t\treturn true, nil\n\tcase types.PolicyTypeMajor:\n\t\treturn newVersion.Major() > currentVersion.Major(), nil\n\tcase types.PolicyTypeMinor:\n\t\treturn newVersion.Major() == currentVersion.Major() && newVersion.Minor() > currentVersion.Minor(), nil\n\tcase types.PolicyTypePatch:\n\t\treturn newVersion.Major() == currentVersion.Major() && newVersion.Minor() == currentVersion.Minor() && newVersion.Patch() > currentVersion.Patch(), nil\n\t}\n\treturn false, nil\n}\n<commit_msg>info > debug<commit_after>package version\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/Masterminds\/semver\"\n\t\"github.com\/rusenask\/keel\/types\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\n\/\/ ErrVersionTagMissing - tag missing error\nvar ErrVersionTagMissing = errors.New(\"version tag is missing\")\n\n\/\/ ErrInvalidSemVer is returned a version is found to be invalid when\n\/\/ being parsed.\nvar ErrInvalidSemVer = errors.New(\"invalid semantic version\")\n\n\/\/ GetVersion - parse version\nfunc GetVersion(version string) (*types.Version, error) {\n\n\tv, err := semver.NewVersion(version)\n\tif err != nil {\n\t\tif err == semver.ErrInvalidSemVer {\n\t\t\treturn nil, ErrInvalidSemVer\n\t\t}\n\t\treturn nil, err\n\t}\n\t\/\/ TODO: probably make it customazible\n\tprefix := \"\"\n\tif strings.HasPrefix(version, \"v\") {\n\t\tprefix = \"v\"\n\t}\n\n\treturn &types.Version{\n\t\tMajor: v.Major(),\n\t\tMinor: v.Minor(),\n\t\tPatch: v.Patch(),\n\t\tPreRelease: string(v.Prerelease()),\n\t\tMetadata: v.Metadata(),\n\t\tPrefix: prefix,\n\t}, nil\n}\n\n\/\/ GetVersionFromImageName - get version from image name\nfunc GetVersionFromImageName(name string) (*types.Version, error) {\n\tparts := strings.Split(name, \":\")\n\tif len(parts) > 1 {\n\t\treturn GetVersion(parts[1])\n\t}\n\n\treturn nil, ErrVersionTagMissing\n}\n\n\/\/ GetImageNameAndVersion - get name and version\nfunc GetImageNameAndVersion(name string) (string, *types.Version, error) {\n\tparts := strings.Split(name, \":\")\n\tif len(parts) > 0 {\n\t\tv, err := GetVersion(parts[1])\n\t\tif err != nil {\n\t\t\treturn \"\", nil, err\n\t\t}\n\n\t\treturn parts[0], v, nil\n\t}\n\n\treturn \"\", nil, ErrVersionTagMissing\n}\n\n\/\/ NewAvailable - takes version and current tags. Checks whether there is a new version in the list of tags\n\/\/ and returns it as well as newAvailable bool\nfunc NewAvailable(current string, tags []string) (newVersion string, newAvailable bool, err error) {\n\n\tcurrentVersion, err := semver.NewVersion(current)\n\tif err != nil {\n\t\treturn \"\", false, err\n\t}\n\n\tif len(tags) == 0 {\n\t\treturn \"\", false, nil\n\t}\n\n\tvar vs []*semver.Version\n\tfor _, r := range tags {\n\t\tv, err := semver.NewVersion(r)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"tag\": r,\n\t\t\t}).Debug(\"failed to parse tag\")\n\t\t\tcontinue\n\n\t\t}\n\n\t\tvs = append(vs, v)\n\t}\n\n\tif len(vs) == 0 {\n\t\tlog.Debug(\"no versions available\")\n\t\treturn \"\", false, nil\n\t}\n\n\tsort.Sort(sort.Reverse(semver.Collection(vs)))\n\n\tif currentVersion.LessThan(vs[0]) {\n\t\treturn vs[0].String(), true, nil\n\t}\n\treturn \"\", false, nil\n}\n\n\/\/ ShouldUpdate - checks whether update is needed\nfunc ShouldUpdate(current *types.Version, new *types.Version, policy types.PolicyType) (bool, error) {\n\tif policy == types.PolicyTypeForce {\n\t\treturn true, nil\n\t}\n\n\tcurrentVersion, err := semver.NewVersion(current.String())\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to parse current version: %s\", err)\n\t}\n\tnewVersion, err := semver.NewVersion(new.String())\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to parse new version: %s\", err)\n\t}\n\n\t\/\/ new version is not higher than current - do nothing\n\tif !currentVersion.LessThan(newVersion) {\n\t\treturn false, nil\n\t}\n\n\tswitch policy {\n\tcase types.PolicyTypeAll:\n\t\treturn true, nil\n\tcase types.PolicyTypeMajor:\n\t\treturn newVersion.Major() > currentVersion.Major(), nil\n\tcase types.PolicyTypeMinor:\n\t\treturn newVersion.Major() == currentVersion.Major() && newVersion.Minor() > currentVersion.Minor(), nil\n\tcase types.PolicyTypePatch:\n\t\treturn newVersion.Major() == currentVersion.Major() && newVersion.Minor() == currentVersion.Minor() && newVersion.Patch() > currentVersion.Patch(), nil\n\t}\n\treturn false, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Sonia Hamilton. All rights reserved. Use of this\n\/\/ source code is governed by a BSD-style license that can be found in\n\/\/ the LICENSE file.\n\npackage gosnmp\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nvar _ = fmt.Sprintf(\"dummy\") \/\/ dummy\n\n\/\/ Tests in alphabetical order of function being tested\n\n\/\/ -----------------------------------------------------------------------------\n\nvar testsMarshalLength = []struct {\n\tlength int\n\texpected []byte\n}{\n\t{1, []byte{0x01}},\n\t{129, []byte{0x81, 0x81}},\n}\n\nfunc TestMarshalLength(t *testing.T) {\n\tfor i, test := range testsMarshalLength {\n\t\ttest_bytes, err := marshalLength(test.length)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%d: length %d got err %v\", i, test.length, err)\n\t\t}\n\t\tif !reflect.DeepEqual(test_bytes, test.expected) {\n\t\t\tt.Errorf(\"%d: length %d got |%x| expected |%x|\",\n\t\t\t\ti, test.length, test_bytes, test.expected)\n\t\t}\n\t}\n}\n\n\/\/ -----------------------------------------------------------------------------\n\nvar testsPartition = []struct {\n\tcurrent_position int\n\tpartition_size int\n\tsliceLength int\n\tok bool\n}{\n\t{-1, 3, 8, false}, \/\/ test out of range\n\t{8, 3, 8, false}, \/\/ test out of range\n\t{0, 3, 8, false}, \/\/ test 0-7\/3 per doco\n\t{1, 3, 8, false},\n\t{2, 3, 8, true},\n\t{3, 3, 8, false},\n\t{4, 3, 8, false},\n\t{5, 3, 8, true},\n\t{6, 3, 8, false},\n\t{7, 3, 8, true},\n\t{-1, 1, 3, false}, \/\/ partition size of one\n\t{0, 1, 3, true},\n\t{1, 1, 3, true},\n\t{2, 1, 3, true},\n\t{3, 1, 3, false},\n}\n\nfunc TestPartition(t *testing.T) {\n\tfor i, test := range testsPartition {\n\t\tok := Partition(test.current_position, test.partition_size, test.sliceLength)\n\t\tif ok != test.ok {\n\t\t\tt.Errorf(\"#%d: Bad result: %v (expected %v)\", i, ok, test.ok)\n\t\t}\n\t}\n}\n\n\/\/ ---------------------------------------------------------------------\n\nvar testsSnmpVersionString = []struct {\n\tin SnmpVersion\n\tout string\n}{\n\t{Version1, \"1\"},\n\t{Version2c, \"2c\"},\n}\n\nfunc TestSnmpVersionString(t *testing.T) {\n\tfor i, test := range testsSnmpVersionString {\n\t\tresult := test.in.String()\n\t\tif result != test.out {\n\t\t\tt.Errorf(\"#%d, got %v expected %v\", i, result, test.out)\n\t\t}\n\t}\n}\n\n\/\/ ---------------------------------------------------------------------\n<commit_msg>golint: misc_test.go<commit_after>\/\/ Copyright 2013 Sonia Hamilton. All rights reserved. Use of this\n\/\/ source code is governed by a BSD-style license that can be found in\n\/\/ the LICENSE file.\n\npackage gosnmp\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nvar _ = fmt.Sprintf(\"dummy\") \/\/ dummy\n\n\/\/ Tests in alphabetical order of function being tested\n\n\/\/ -----------------------------------------------------------------------------\n\nvar testsMarshalLength = []struct {\n\tlength int\n\texpected []byte\n}{\n\t{1, []byte{0x01}},\n\t{129, []byte{0x81, 0x81}},\n}\n\nfunc TestMarshalLength(t *testing.T) {\n\tfor i, test := range testsMarshalLength {\n\t\ttestBytes, err := marshalLength(test.length)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%d: length %d got err %v\", i, test.length, err)\n\t\t}\n\t\tif !reflect.DeepEqual(testBytes, test.expected) {\n\t\t\tt.Errorf(\"%d: length %d got |%x| expected |%x|\",\n\t\t\t\ti, test.length, testBytes, test.expected)\n\t\t}\n\t}\n}\n\n\/\/ -----------------------------------------------------------------------------\n\nvar testsPartition = []struct {\n\tcurrentPosition int\n\tpartitionSize int\n\tsliceLength int\n\tok bool\n}{\n\t{-1, 3, 8, false}, \/\/ test out of range\n\t{8, 3, 8, false}, \/\/ test out of range\n\t{0, 3, 8, false}, \/\/ test 0-7\/3 per doco\n\t{1, 3, 8, false},\n\t{2, 3, 8, true},\n\t{3, 3, 8, false},\n\t{4, 3, 8, false},\n\t{5, 3, 8, true},\n\t{6, 3, 8, false},\n\t{7, 3, 8, true},\n\t{-1, 1, 3, false}, \/\/ partition size of one\n\t{0, 1, 3, true},\n\t{1, 1, 3, true},\n\t{2, 1, 3, true},\n\t{3, 1, 3, false},\n}\n\nfunc TestPartition(t *testing.T) {\n\tfor i, test := range testsPartition {\n\t\tok := Partition(test.currentPosition, test.partitionSize, test.sliceLength)\n\t\tif ok != test.ok {\n\t\t\tt.Errorf(\"#%d: Bad result: %v (expected %v)\", i, ok, test.ok)\n\t\t}\n\t}\n}\n\n\/\/ ---------------------------------------------------------------------\n\nvar testsSnmpVersionString = []struct {\n\tin SnmpVersion\n\tout string\n}{\n\t{Version1, \"1\"},\n\t{Version2c, \"2c\"},\n}\n\nfunc TestSnmpVersionString(t *testing.T) {\n\tfor i, test := range testsSnmpVersionString {\n\t\tresult := test.in.String()\n\t\tif result != test.out {\n\t\t\tt.Errorf(\"#%d, got %v expected %v\", i, result, test.out)\n\t\t}\n\t}\n}\n\n\/\/ ---------------------------------------------------------------------\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 Gravitational, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage influxdb\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/gravitational\/monitoring-app\/watcher\/lib\/constants\"\n\t\"github.com\/gravitational\/monitoring-app\/watcher\/lib\/utils\"\n\n\t\"github.com\/gravitational\/trace\"\n)\n\n\/\/ Rollup is the rollup configuration\ntype Rollup struct {\n\t\/\/ Retention is the retention policy for this rollup\n\tRetention string `json:\"retention\"`\n\t\/\/ Measurement is the name of the measurement to run rollup on\n\tMeasurement string `json:\"measurement\"`\n\t\/\/ Name is both the name of the rollup query and the name of the\n\t\/\/ new measurement rollup data will be inserted into\n\tName string `json:\"name\"`\n\t\/\/ Functions is a list of functions for rollup calculation\n\tFunctions []Function `json:\"functions\"`\n}\n\n\/\/ Check verifies that rollup configuration is correct\nfunc (r Rollup) Check() error {\n\tif !utils.OneOf(r.Retention, constants.AllRetentions) {\n\t\treturn trace.BadParameter(\n\t\t\t\"invalid Retention, must be one of: %v\", constants.AllRetentions)\n\t}\n\tif r.Measurement == \"\" {\n\t\treturn trace.BadParameter(\"parameter Measurement is missing\")\n\t}\n\tif r.Name == \"\" {\n\t\treturn trace.BadParameter(\"parameter Name is missing\")\n\t}\n\tif len(r.Functions) == 0 {\n\t\treturn trace.BadParameter(\"parameter Functions is empty\")\n\t}\n\tfor _, rollup := range r.Functions {\n\t\terr := rollup.Check()\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Function defines a single rollup function\ntype Function struct {\n\t\/\/ Function is the function name (mean, max, etc.)\n\tFunction string `json:\"function\"`\n\t\/\/ Field is the name of the field to apply the function to\n\tField string `json:\"field\"`\n\t\/\/ Alias is the optional alias for the new field in the rollup table\n\tAlias string `json:\"alias,omitempty\"`\n}\n\n\/\/ Check verifies the function configuration is correct\nfunc (f Function) Check() error {\n\tif !utils.OneOf(f.Function, constants.AllFunctions) &&\n\t\t!strings.HasPrefix(f.Function, constants.FunctionPercentile) {\n\t\treturn trace.BadParameter(\n\t\t\t\"invalid Function, must be one of: %v, or start with %q\",\n\t\t\tconstants.AllFunctions, constants.FunctionPercentile)\n\t}\n\tif f.Field == \"\" {\n\t\treturn trace.BadParameter(\"parameter Field is missing\")\n\t}\n\treturn nil\n}\n\n\/\/ buildQuery returns a string with InfluxDB query based on the rollup configuration\nfunc buildQuery(r Rollup) (string, error) {\n\tvar functions []string\n\tfor _, fn := range r.Functions {\n\t\tfunction, err := buildFunction(fn)\n\t\tif err != nil {\n\t\t\treturn \"\", trace.Wrap(err)\n\t\t}\n\t\tfunctions = append(functions, function)\n\t}\n\n\tvar b bytes.Buffer\n\terr := queryTemplate.Execute(&b, map[string]string{\n\t\t\"name\": r.Name,\n\t\t\"database\": constants.InfluxDBDatabase,\n\t\t\"functions\": strings.Join(functions, \", \"),\n\t\t\"retention_into\": r.Retention,\n\t\t\"measurement_into\": r.Name,\n\t\t\"retention_from\": constants.InfluxDBRetentionPolicy,\n\t\t\"measurement_from\": r.Measurement,\n\t\t\"interval\": constants.RetentionToInterval[r.Retention],\n\t})\n\tif err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\n\treturn b.String(), nil\n}\n\n\/\/ buildFunction returns a function string based on the provided function configuration\nfunc buildFunction(f Function) (string, error) {\n\talias := f.Alias\n\tif alias == \"\" {\n\t\talias = f.Field\n\t}\n\tif strings.HasPrefix(f.Function, constants.FunctionPercentile) {\n\t\tvalue, err := parseValueComposedFunc(f.Function)\n\t\tif err != nil {\n\t\t\treturn \"\", trace.Wrap(err)\n\t\t}\n\t\treturn fmt.Sprintf(\"%v(%v, %v) as %v\", constants.FunctionPercentile, f.Field, value, alias), nil\n\t}\n\tif strings.HasPrefix(f.Function, constants.FunctionBottom) {\n\t\tvalue, err := parseValueComposedFunc(f.Function)\n\t\tif err != nil {\n\t\t\treturn \"\", trace.Wrap(err)\n\t\t}\n\t\treturn fmt.Sprintf(\"%v(%v, %v) as %v\", constants.FunctionBottom, f.Field, value, alias), nil\n\t}\n\tif strings.HasPrefix(f.Function, constants.FunctionTop) {\n\t\tvalue, err := parseValueComposedFunc(f.Function)\n\t\tif err != nil {\n\t\t\treturn \"\", trace.Wrap(err)\n\t\t}\n\t\treturn fmt.Sprintf(\"%v(%v, %v) as %v\", constants.FunctionTop, f.Field, value, alias), nil\n\t}\n\tif strings.HasPrefix(f.Function, constants.FunctionSample) {\n\t\tvalue, err := parseValueComposedFunc(f.Function)\n\t\tif err != nil {\n\t\t\treturn \"\", trace.Wrap(err)\n\t\t}\n\t\treturn fmt.Sprintf(\"%v(%v, %v) as %v\", constants.FunctionSample, f.Field, value, alias), nil\n\t}\n\treturn fmt.Sprintf(\"%v(%v) as %v\", f.Function, f.Field, alias), nil\n}\n\n\/\/ parseValueComposedFunc parses the additional arg value from the composed functions strings like \"percentile_90\"\nfunc parseValueComposedFunc(data string) (string, error) {\n\tparts := strings.Split(data, \"_\")\n\tif len(parts) != 2 {\n\t\treturn \"\", trace.BadParameter(\n\t\t\t\"percentile function must have format like 'percentile_90', 'top_10', 'bottom_10' or 'sample_1000' \")\n\t}\n\tfuncName := parts[0]\n\n\tvalue, err := strconv.Atoi(parts[1])\n\tif err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\n\tif funcName == \"percentile\" && value < 0 || value > 100 {\n\t\treturn \"\", trace.BadParameter(\n\t\t\t\"percentile value must be between 0 and 100 (inclusive)\")\n\t} else if (funcName == \"top\" || funcName == \"bottom\" || funcName == \"sample\") && value < 0 {\n\t\treturn \"\", trace.BadParameter(\n\t\t\t\"Top, Bottom and Sample value must be greater or equal to 0\")\n\t}\n\treturn parts[1], nil\n}\n\nvar (\n\t\/\/ queryTemplate is the template of the InfluxDB rollup query\n\tqueryTemplate = template.Must(template.New(\"query\").Parse(\n\t\t`create continuous query \"{{.name}}\" on {{.database}} begin select {{.functions}} into {{.database}}.\"{{.retention_into}}\".\"{{.measurement_into}}\" from {{.database}}.\"{{.retention_from}}\".\"{{.measurement_from}}\" group by *, time({{.interval}}) end`))\n)\n<commit_msg>Added functions simplifaction\/cleaning as suggested by Dmitry<commit_after>\/*\nCopyright 2017 Gravitational, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage influxdb\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/gravitational\/monitoring-app\/watcher\/lib\/constants\"\n\t\"github.com\/gravitational\/monitoring-app\/watcher\/lib\/utils\"\n\n\t\"github.com\/gravitational\/trace\"\n)\n\n\/\/ Rollup is the rollup configuration\ntype Rollup struct {\n\t\/\/ Retention is the retention policy for this rollup\n\tRetention string `json:\"retention\"`\n\t\/\/ Measurement is the name of the measurement to run rollup on\n\tMeasurement string `json:\"measurement\"`\n\t\/\/ Name is both the name of the rollup query and the name of the\n\t\/\/ new measurement rollup data will be inserted into\n\tName string `json:\"name\"`\n\t\/\/ Functions is a list of functions for rollup calculation\n\tFunctions []Function `json:\"functions\"`\n}\n\n\/\/ Check verifies that rollup configuration is correct\nfunc (r Rollup) Check() error {\n\tif !utils.OneOf(r.Retention, constants.AllRetentions) {\n\t\treturn trace.BadParameter(\n\t\t\t\"invalid Retention, must be one of: %v\", constants.AllRetentions)\n\t}\n\tif r.Measurement == \"\" {\n\t\treturn trace.BadParameter(\"parameter Measurement is missing\")\n\t}\n\tif r.Name == \"\" {\n\t\treturn trace.BadParameter(\"parameter Name is missing\")\n\t}\n\tif len(r.Functions) == 0 {\n\t\treturn trace.BadParameter(\"parameter Functions is empty\")\n\t}\n\tfor _, rollup := range r.Functions {\n\t\terr := rollup.Check()\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Function defines a single rollup function\ntype Function struct {\n\t\/\/ Function is the function name (mean, max, etc.)\n\tFunction string `json:\"function\"`\n\t\/\/ Field is the name of the field to apply the function to\n\tField string `json:\"field\"`\n\t\/\/ Alias is the optional alias for the new field in the rollup table\n\tAlias string `json:\"alias,omitempty\"`\n}\n\n\/\/ Check verifies the function configuration is correct\nfunc (f Function) Check() error {\n\tif !utils.OneOf(f.Function, constants.AllFunctions) &&\n\t\t!strings.HasPrefix(f.Function, constants.FunctionPercentile) {\n\t\treturn trace.BadParameter(\n\t\t\t\"invalid Function, must be one of: %v, or start with %q\",\n\t\t\tconstants.AllFunctions, constants.FunctionPercentile)\n\t}\n\tif f.Field == \"\" {\n\t\treturn trace.BadParameter(\"parameter Field is missing\")\n\t}\n\treturn nil\n}\n\n\/\/ buildQuery returns a string with InfluxDB query based on the rollup configuration\nfunc buildQuery(r Rollup) (string, error) {\n\tvar functions []string\n\tfor _, fn := range r.Functions {\n\t\tfunction, err := buildFunction(fn)\n\t\tif err != nil {\n\t\t\treturn \"\", trace.Wrap(err)\n\t\t}\n\t\tfunctions = append(functions, function)\n\t}\n\n\tvar b bytes.Buffer\n\terr := queryTemplate.Execute(&b, map[string]string{\n\t\t\"name\": r.Name,\n\t\t\"database\": constants.InfluxDBDatabase,\n\t\t\"functions\": strings.Join(functions, \", \"),\n\t\t\"retention_into\": r.Retention,\n\t\t\"measurement_into\": r.Name,\n\t\t\"retention_from\": constants.InfluxDBRetentionPolicy,\n\t\t\"measurement_from\": r.Measurement,\n\t\t\"interval\": constants.RetentionToInterval[r.Retention],\n\t})\n\tif err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\n\treturn b.String(), nil\n}\n\n\/\/ define which functions needs an additional parameter\nvar funcsWithParams = []string{\n\tconstants.FunctionPercentile,\n\tconstants.FunctionBottom,\n\tconstants.FunctionTop,\n\tconstants.FunctionSample,\n}\n\n\/\/ buildFunction returns a function string based on the provided function configuration\nfunc buildFunction(f Function) (string, error) {\n\talias := f.Alias\n\tif alias == \"\" {\n\t\talias = f.Field\n\t}\n\n\t\/\/ split function name, based on the \"_\" separator (eg: percentile_99, top_10, ecc)\n\tfuncAndValue := strings.Split(f.Function, \"_\")\n\tif len(funcAndValue) == 2 && isFuncWithParams(funcAndValue[0]) {\n\t\tfuncName := funcAndValue[0]\n\t\tparam := funcAndValue[1]\n\t\terr := validateParam(funcName, param)\n\t\tif err != nil {\n\t\t\treturn \"\", trace.Wrap(err)\n\t\t}\n\t\treturn fmt.Sprintf(\"%v(%v, %v) as %v\", funcName, f.Field, param, alias), nil\n\t}\n\n\treturn fmt.Sprintf(\"%v(%v) as %v\", f.Function, f.Field, alias), nil\n}\n\n\/\/ validateParam checks the function parameter for validity.\nfunc validateParam(funcName, param string) error {\n\t\/\/ convert parameter value as it's always going to be an Integer\n\tvalue, err := strconv.Atoi(param)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\tswitch funcName {\n\tcase constants.FunctionPercentile:\n\t\tif value < 0 || value > 100 {\n\t\t\treturn trace.BadParameter(\n\t\t\t\t\"Percentile value must be between 0 and 100 (inclusive)\")\n\t\t}\n\tcase constants.FunctionTop, constants.FunctionBottom, constants.FunctionSample:\n\t\tif value < 0 {\n\t\t\treturn trace.BadParameter(\n\t\t\t\t\"Top, Bottom and Sample value must be greater or equal to 0\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nvar (\n\t\/\/ queryTemplate is the template of the InfluxDB rollup query\n\tqueryTemplate = template.Must(template.New(\"query\").Parse(\n\t\t`create continuous query \"{{.name}}\" on {{.database}} begin select {{.functions}} into {{.database}}.\"{{.retention_into}}\".\"{{.measurement_into}}\" from {{.database}}.\"{{.retention_from}}\".\"{{.measurement_from}}\" group by *, time({{.interval}}) end`))\n)\n<|endoftext|>"} {"text":"<commit_before>package routes\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/boatilus\/peppercorn\/cookie\"\n\t\"github.com\/boatilus\/peppercorn\/db\"\n\t\"github.com\/boatilus\/peppercorn\/middleware\"\n\t\"github.com\/boatilus\/peppercorn\/paths\"\n\t\"github.com\/boatilus\/peppercorn\/posts\"\n\t\"github.com\/boatilus\/peppercorn\/pwreset\"\n\t\"github.com\/boatilus\/peppercorn\/session\"\n\t\"github.com\/boatilus\/peppercorn\/templates\"\n\t\"github.com\/boatilus\/peppercorn\/users\"\n\t\"github.com\/boatilus\/peppercorn\/utility\"\n\t\"github.com\/pressly\/chi\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ IndexGetHandler is called for the `\/` (index) route and directs the user either to the first\n\/\/ page, or to the last page the user viewed.\nfunc IndexGetHandler(w http.ResponseWriter, req *http.Request) {\n\tu := users.FromContext(req.Context())\n\tif u == nil {\n\t\thttp.Error(w, \"Could not read user data from request context\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif len(u.LastViewed) == 0 {\n\t\t\/\/ There's no value or we can't read from it, so we'll just sent the user to the first page.\n\t\thttp.Redirect(w, req, \"\/page\/1\", http.StatusSeeOther)\n\t\treturn\n\t}\n\n\tn, err := posts.GetOffset(u.LastViewed)\n\tif err != nil {\n\t\t\/\/ There's no value or we can't read from it, so we'll just sent the user to the first page.\n\t\thttp.Redirect(w, req, \"\/page\/1\", http.StatusSeeOther)\n\t\treturn\n\t}\n\n\tpn := utility.ComputePage(n, u.PPP)\n\turi := fmt.Sprintf(\"\/page\/%d#%s\", pn, u.LastViewed)\n\n\thttp.Redirect(w, req, uri, http.StatusSeeOther)\n}\n\nfunc SignInGetHandler(w http.ResponseWriter, req *http.Request) {\n\ttemplates.SignIn.Execute(w, nil)\n}\n\nfunc SignOutGetHandler(w http.ResponseWriter, req *http.Request) {\n\t\/\/ Destroy session\n\tc, err := req.Cookie(session.GetKey())\n\tif err != nil {\n\n\t}\n\n\tsid, err := cookie.Decode(c)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tlog.Printf(\"Destroying session for SID \\\"%s\\\"\", sid)\n\n\t\/\/ Setting the Max-Age attribute to -1 effectively destroys the cookie, but we'll also null the\n\t\/\/ content if the client decides to ignore Max-Age\n\tc.MaxAge = -1\n\tc.Value = \"\"\n\n\thttp.SetCookie(w, c)\n\n\tif err = session.Destroy(sid); err != nil {\n\t\tlog.Printf(\"Error in deleting session with SID \\\"%s\\\"\", sid)\n\t}\n\n\thttp.Redirect(w, req, paths.Get.SignIn, http.StatusTemporaryRedirect)\n}\n\n\/\/ Of the format: \/page\/{num}\nfunc PageGetHandler(w http.ResponseWriter, req *http.Request) {\n\tvar data struct {\n\t\tCurrentUser *users.User\n\t\tPostCount db.CountType\n\t\tPosts []posts.Zip\n\t\tPageNum db.CountType\n\t\tTotalPages db.CountType\n\t}\n\n\tdata.CurrentUser = users.FromContext(req.Context())\n\tif data.CurrentUser == nil {\n\t\thttp.Error(w, \"Could not read user data from request context\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvar err error\n\n\t\/\/ TODO: We can run these following two queries in parallel.\n\tdata.PostCount, err = posts.Count()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tdata.TotalPages = utility.ComputePage(data.PostCount, data.CurrentUser.PPP)\n\n\tnum := chi.URLParam(req, \"num\")\n\n\tif num == \"latest\" {\n\t\tdata.PageNum = data.TotalPages\n\t} else {\n\t\tpageNum, err := strconv.ParseInt(num, 10, 32)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tdata.PageNum = db.CountType(pageNum)\n\t}\n\n\t\/\/ To get the first post to load for this page, we must take into account the user's\n\t\/\/ posts-per-page setting.\n\tbegin := ((data.PageNum * data.CurrentUser.PPP) - data.CurrentUser.PPP) + 1\n\n\tps, err := posts.GetRange(begin, data.CurrentUser.PPP)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ Load the user's timezone setting so we can provide correct post timestamps.\n\tloc, err := time.LoadLocation(data.CurrentUser.Timezone)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tnow := time.Now()\n\n\tfor _, p := range ps {\n\t\tu := users.Users[p.Author]\n\n\t\tzip := posts.Zip{\n\t\t\tID: p.ID,\n\t\t\tAuthorID: p.Author,\n\t\t\tContent: p.Content,\n\t\t\tTime: p.Time,\n\t\t\tAvatar: u.Avatar,\n\t\t\tAuthorName: u.Name,\n\t\t\tTitle: u.Title,\n\t\t\tCount: begin,\n\t\t\tPrettyTime: utility.FormatTime(p.Time.In(loc), now),\n\t\t}\n\n\t\tdata.Posts = append(data.Posts, zip)\n\n\t\tbegin++\n\t}\n\n\t\/\/ Now that we've successfully gathered the data needed to render, we want to mark the most\n\t\/\/ recent post the user's seen. For now, we'll do this even if it's far back in time, but ideally,\n\t\/\/ we should only do so if it's newer than what the `LastViewed` property currently reflects.\n\tnumPosts := len(data.Posts)\n\tlast := data.Posts[numPosts-1]\n\n\tif data.CurrentUser.LastViewed != last.ID {\n\t\tdata.CurrentUser.LastViewed = last.ID\n\t\tif err := users.Update(data.CurrentUser); err != nil {\n\t\t\t\/\/ This is a non-essential task, so simply log the error.\n\t\t\tlog.Printf(\"Could not update property LastViewed [%s] on user %q [%s]: %s\", last.ID, data.CurrentUser.ID, data.CurrentUser.Name, err.Error())\n\t\t}\n\t}\n\n\ttemplates.Index.Execute(w, data)\n}\n\n\/\/ SingleHandler is called for GET requests for the `\/post\/{num}` route and renders a single post\n\/\/ by its computed post number.\nfunc SingleGetHandler(w http.ResponseWriter, req *http.Request) {\n\tnum := chi.URLParam(req, \"num\")\n\n\tn, err := strconv.ParseInt(num, 10, 32)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Bad request for route '\/post\/%v'. Expected '%v' to be a positive integer\", num, num)\n\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tp, err := posts.GetOne(db.CountType(n))\n\tif err != nil {\n\t\thttp.NotFound(w, req)\n\t\treturn\n\t}\n\n\tio.WriteString(w, p.Content)\n}\n\n\/\/ SingleRemoveGetHandler is called for GET requests for the `\/post\/{num}\/delete` route and removes\n\/\/ a single post, if the user is authorized to do so.\nfunc SingleRemoveGetHandler(w http.ResponseWriter, req *http.Request) {\n\tu := users.FromContext(req.Context())\n\tif u == nil {\n\t\thttp.Error(w, \"Could not read user data from request context\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ BUG: Due to some weirdness in Chi, the param here is \"num\", but we're actually getting supplied\n\t\/\/ a post ID. We can't change the route param name due to this.\n\t\/\/ See: https:\/\/github.com\/pressly\/chi\/issues\/78\n\tid := chi.URLParam(req, \"num\")\n\n\tif len(id) == 0 {\n\t\thttp.Error(w, \"routes: ID cannot be empty\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tp, err := posts.GetByID(id)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif p.Author != u.ID {\n\t\tmsg := fmt.Sprintf(\"routes: user %q cannot delete post of user %q\", u.ID, p.Author)\n\n\t\thttp.Error(w, msg, http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tif err := posts.Deactivate(id); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\thttp.Redirect(w, req, \"\/page\/latest\", http.StatusSeeOther)\n}\n\nfunc CountGetHandler(w http.ResponseWriter, _ *http.Request) {\n\tn, err := posts.Count()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tio.WriteString(w, strconv.Itoa(int(n)))\n}\n\n\/\/ MeGetHandler is the handler\nfunc MeGetHandler(w http.ResponseWriter, req *http.Request) {\n\tvid := middleware.GetVisitorID(req.Context())\n\tlog.Print(vid)\n\n\tu := users.FromContext(req.Context())\n\tif u == nil {\n\t\thttp.Error(w, \"Could not read user data from request context\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tss, err := session.GetByUser(u.ID)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ Reduce the session data retrieved into something more easily-consumable.\n\ttype sessionData struct {\n\t\tDevice string\n\t\tIP string\n\t\tTimestamp string\n\t}\n\n\tvar sessions []sessionData\n\n\t\/\/ Load the user's timezone setting so we can provide correct post timestamps.\n\tloc, err := time.LoadLocation(u.Timezone)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tnow := time.Now()\n\n\tfor i := range ss {\n\t\tdata := utility.ParseUserAgent(ss[i].UserAgent)\n\n\t\ts := sessionData{\n\t\t\tDevice: fmt.Sprintf(\"%s on %s\", data.Browser, data.OS),\n\t\t\tIP: ss[i].IP,\n\t\t\tTimestamp: utility.FormatTime(ss[i].Timestamp.In(loc), now),\n\t\t}\n\n\t\tsessions = append(sessions, s)\n\t}\n\n\tobEmail := utility.ObfuscateEmail(u.Email) \/\/ Obfuscate email\n\tpppOptions := viper.GetStringSlice(\"ppp_options\")\n\n\to := struct {\n\t\tFlash string\n\t\tObfuscatedEmail string\n\t\tName string\n\t\tTitle string\n\t\tAvatar string\n\t\tPPPOptions []string\n\t\tPPP string\n\t\tTimezones []string\n\t\tUserTimezone string\n\t\tSessions []sessionData\n\t}{\n\t\tsession.GetFlash(u.ID),\n\t\tobEmail,\n\t\tu.Name,\n\t\tu.Title,\n\t\tu.Avatar,\n\t\tpppOptions,\n\t\tstrconv.FormatInt(int64(u.PPP), 10),\n\t\tviper.GetStringSlice(\"timezones\"),\n\t\tu.Timezone,\n\t\tsessions,\n\t}\n\n\ttemplates.Me.Execute(w, &o)\n}\n\n\/\/ MeRevokeGetHandler is the handler called from the \/me route to destroy a single session by :num,\n\/\/ or all sessions with \"all\"\nfunc MeRevokeGetHandler(w http.ResponseWriter, req *http.Request) {\n\tu := users.FromContext(req.Context())\n\tif u == nil {\n\t\thttp.Error(w, \"Could not read user data from request context\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\ti, err := strconv.ParseInt(chi.URLParam(req, \"num\"), 10, 32)\n\tif err != nil || i < 0 {\n\t\thttp.Error(w, \"\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := session.DestroyByIndex(u.ID, db.CountType(i)); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\thttp.Redirect(w, req, paths.Get.Me, http.StatusSeeOther)\n}\n\n\/\/ ForgotGetHandler is the route called to send the user a password reset email.\nfunc ForgotGetHandler(w http.ResponseWriter, req *http.Request) {\n\ttemplates.Forgot.Execute(w, nil)\n}\n\n\/\/ ResetPasswordGetHandler is the route called to reset a user's password.\nfunc ResetPasswordGetHandler(w http.ResponseWriter, req *http.Request) {\n\ttype data struct {\n\t\tFlashMessage string\n\t\tToken string\n\t}\n\n\ttoken := req.FormValue(\"token\")\n\tif token == \"\" {\n\t\ttemplates.ResetPassword.Execute(w, data{FlashMessage: \"Invalid reset token.\"})\n\t\treturn\n\t}\n\n\tvalid, _ := pwreset.ValidateToken(token)\n\n\tif !valid {\n\t\ttemplates.ResetPassword.Execute(w, data{FlashMessage: \"Reset is expired or doesn't exist.\"})\n\t\treturn\n\t}\n\n\ttemplates.ResetPassword.Execute(w, data{Token: token})\n}\n<commit_msg>user settings: display session IPs without port<commit_after>package routes\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/boatilus\/peppercorn\/cookie\"\n\t\"github.com\/boatilus\/peppercorn\/db\"\n\t\"github.com\/boatilus\/peppercorn\/middleware\"\n\t\"github.com\/boatilus\/peppercorn\/paths\"\n\t\"github.com\/boatilus\/peppercorn\/posts\"\n\t\"github.com\/boatilus\/peppercorn\/pwreset\"\n\t\"github.com\/boatilus\/peppercorn\/session\"\n\t\"github.com\/boatilus\/peppercorn\/templates\"\n\t\"github.com\/boatilus\/peppercorn\/users\"\n\t\"github.com\/boatilus\/peppercorn\/utility\"\n\t\"github.com\/pressly\/chi\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ IndexGetHandler is called for the `\/` (index) route and directs the user either to the first\n\/\/ page, or to the last page the user viewed.\nfunc IndexGetHandler(w http.ResponseWriter, req *http.Request) {\n\tu := users.FromContext(req.Context())\n\tif u == nil {\n\t\thttp.Error(w, \"Could not read user data from request context\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif len(u.LastViewed) == 0 {\n\t\t\/\/ There's no value or we can't read from it, so we'll just sent the user to the first page.\n\t\thttp.Redirect(w, req, \"\/page\/1\", http.StatusSeeOther)\n\t\treturn\n\t}\n\n\tn, err := posts.GetOffset(u.LastViewed)\n\tif err != nil {\n\t\t\/\/ There's no value or we can't read from it, so we'll just sent the user to the first page.\n\t\thttp.Redirect(w, req, \"\/page\/1\", http.StatusSeeOther)\n\t\treturn\n\t}\n\n\tpn := utility.ComputePage(n, u.PPP)\n\turi := fmt.Sprintf(\"\/page\/%d#%s\", pn, u.LastViewed)\n\n\thttp.Redirect(w, req, uri, http.StatusSeeOther)\n}\n\nfunc SignInGetHandler(w http.ResponseWriter, req *http.Request) {\n\ttemplates.SignIn.Execute(w, nil)\n}\n\nfunc SignOutGetHandler(w http.ResponseWriter, req *http.Request) {\n\t\/\/ Destroy session\n\tc, err := req.Cookie(session.GetKey())\n\tif err != nil {\n\n\t}\n\n\tsid, err := cookie.Decode(c)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tlog.Printf(\"Destroying session for SID \\\"%s\\\"\", sid)\n\n\t\/\/ Setting the Max-Age attribute to -1 effectively destroys the cookie, but we'll also null the\n\t\/\/ content if the client decides to ignore Max-Age\n\tc.MaxAge = -1\n\tc.Value = \"\"\n\n\thttp.SetCookie(w, c)\n\n\tif err = session.Destroy(sid); err != nil {\n\t\tlog.Printf(\"Error in deleting session with SID \\\"%s\\\"\", sid)\n\t}\n\n\thttp.Redirect(w, req, paths.Get.SignIn, http.StatusTemporaryRedirect)\n}\n\n\/\/ Of the format: \/page\/{num}\nfunc PageGetHandler(w http.ResponseWriter, req *http.Request) {\n\tvar data struct {\n\t\tCurrentUser *users.User\n\t\tPostCount db.CountType\n\t\tPosts []posts.Zip\n\t\tPageNum db.CountType\n\t\tTotalPages db.CountType\n\t}\n\n\tdata.CurrentUser = users.FromContext(req.Context())\n\tif data.CurrentUser == nil {\n\t\thttp.Error(w, \"Could not read user data from request context\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvar err error\n\n\t\/\/ TODO: We can run these following two queries in parallel.\n\tdata.PostCount, err = posts.Count()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tdata.TotalPages = utility.ComputePage(data.PostCount, data.CurrentUser.PPP)\n\n\tnum := chi.URLParam(req, \"num\")\n\n\tif num == \"latest\" {\n\t\tdata.PageNum = data.TotalPages\n\t} else {\n\t\tpageNum, err := strconv.ParseInt(num, 10, 32)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tdata.PageNum = db.CountType(pageNum)\n\t}\n\n\t\/\/ To get the first post to load for this page, we must take into account the user's\n\t\/\/ posts-per-page setting.\n\tbegin := ((data.PageNum * data.CurrentUser.PPP) - data.CurrentUser.PPP) + 1\n\n\tps, err := posts.GetRange(begin, data.CurrentUser.PPP)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ Load the user's timezone setting so we can provide correct post timestamps.\n\tloc, err := time.LoadLocation(data.CurrentUser.Timezone)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tnow := time.Now()\n\n\tfor _, p := range ps {\n\t\tu := users.Users[p.Author]\n\n\t\tzip := posts.Zip{\n\t\t\tID: p.ID,\n\t\t\tAuthorID: p.Author,\n\t\t\tContent: p.Content,\n\t\t\tTime: p.Time,\n\t\t\tAvatar: u.Avatar,\n\t\t\tAuthorName: u.Name,\n\t\t\tTitle: u.Title,\n\t\t\tCount: begin,\n\t\t\tPrettyTime: utility.FormatTime(p.Time.In(loc), now),\n\t\t}\n\n\t\tdata.Posts = append(data.Posts, zip)\n\n\t\tbegin++\n\t}\n\n\t\/\/ Now that we've successfully gathered the data needed to render, we want to mark the most\n\t\/\/ recent post the user's seen. For now, we'll do this even if it's far back in time, but ideally,\n\t\/\/ we should only do so if it's newer than what the `LastViewed` property currently reflects.\n\tnumPosts := len(data.Posts)\n\tlast := data.Posts[numPosts-1]\n\n\tif data.CurrentUser.LastViewed != last.ID {\n\t\tdata.CurrentUser.LastViewed = last.ID\n\t\tif err := users.Update(data.CurrentUser); err != nil {\n\t\t\t\/\/ This is a non-essential task, so simply log the error.\n\t\t\tlog.Printf(\"Could not update property LastViewed [%s] on user %q [%s]: %s\", last.ID, data.CurrentUser.ID, data.CurrentUser.Name, err.Error())\n\t\t}\n\t}\n\n\ttemplates.Index.Execute(w, data)\n}\n\n\/\/ SingleHandler is called for GET requests for the `\/post\/{num}` route and renders a single post\n\/\/ by its computed post number.\nfunc SingleGetHandler(w http.ResponseWriter, req *http.Request) {\n\tnum := chi.URLParam(req, \"num\")\n\n\tn, err := strconv.ParseInt(num, 10, 32)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Bad request for route '\/post\/%v'. Expected '%v' to be a positive integer\", num, num)\n\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tp, err := posts.GetOne(db.CountType(n))\n\tif err != nil {\n\t\thttp.NotFound(w, req)\n\t\treturn\n\t}\n\n\tio.WriteString(w, p.Content)\n}\n\n\/\/ SingleRemoveGetHandler is called for GET requests for the `\/post\/{num}\/delete` route and removes\n\/\/ a single post, if the user is authorized to do so.\nfunc SingleRemoveGetHandler(w http.ResponseWriter, req *http.Request) {\n\tu := users.FromContext(req.Context())\n\tif u == nil {\n\t\thttp.Error(w, \"Could not read user data from request context\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ BUG: Due to some weirdness in Chi, the param here is \"num\", but we're actually getting supplied\n\t\/\/ a post ID. We can't change the route param name due to this.\n\t\/\/ See: https:\/\/github.com\/pressly\/chi\/issues\/78\n\tid := chi.URLParam(req, \"num\")\n\n\tif len(id) == 0 {\n\t\thttp.Error(w, \"routes: ID cannot be empty\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tp, err := posts.GetByID(id)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif p.Author != u.ID {\n\t\tmsg := fmt.Sprintf(\"routes: user %q cannot delete post of user %q\", u.ID, p.Author)\n\n\t\thttp.Error(w, msg, http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tif err := posts.Deactivate(id); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\thttp.Redirect(w, req, \"\/page\/latest\", http.StatusSeeOther)\n}\n\nfunc CountGetHandler(w http.ResponseWriter, _ *http.Request) {\n\tn, err := posts.Count()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tio.WriteString(w, strconv.Itoa(int(n)))\n}\n\n\/\/ MeGetHandler is the handler\nfunc MeGetHandler(w http.ResponseWriter, req *http.Request) {\n\tvid := middleware.GetVisitorID(req.Context())\n\tlog.Print(vid)\n\n\tu := users.FromContext(req.Context())\n\tif u == nil {\n\t\thttp.Error(w, \"Could not read user data from request context\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tss, err := session.GetByUser(u.ID)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ Reduce the session data retrieved into something more easily-consumable.\n\ttype sessionData struct {\n\t\tDevice string\n\t\tIP string\n\t\tTimestamp string\n\t}\n\n\tvar sessions []sessionData\n\n\t\/\/ Load the user's timezone setting so we can provide correct post timestamps.\n\tloc, err := time.LoadLocation(u.Timezone)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tnow := time.Now()\n\n\tfor i := range ss {\n\t\tdata := utility.ParseUserAgent(ss[i].UserAgent)\n\n\t\tvar ip string\n\n\t\t\/\/ Running locally, an IP is displayed like \"[::1]:57305\". Ergo, if we're running locally,\n\t\t\/\/ just pass the IP unchanged. Otherwise, split off the port from the IP address and only\n\t\t\/\/ display that to the user.\n\t\tif ss[i].IP[0] == '[' {\n\t\t\tip = ss[i].IP\n\t\t} else {\n\t\t\tip = strings.Split(ss[i].IP, \":\")[0]\n\t\t}\n\n\t\ts := sessionData{\n\t\t\tDevice: fmt.Sprintf(\"%s on %s\", data.Browser, data.OS),\n\t\t\tIP: ip,\n\t\t\tTimestamp: utility.FormatTime(ss[i].Timestamp.In(loc), now),\n\t\t}\n\n\t\tsessions = append(sessions, s)\n\t}\n\n\tobEmail := utility.ObfuscateEmail(u.Email) \/\/ Obfuscate email\n\tpppOptions := viper.GetStringSlice(\"ppp_options\")\n\n\to := struct {\n\t\tFlash string\n\t\tObfuscatedEmail string\n\t\tName string\n\t\tTitle string\n\t\tAvatar string\n\t\tPPPOptions []string\n\t\tPPP string\n\t\tTimezones []string\n\t\tUserTimezone string\n\t\tSessions []sessionData\n\t}{\n\t\tsession.GetFlash(u.ID),\n\t\tobEmail,\n\t\tu.Name,\n\t\tu.Title,\n\t\tu.Avatar,\n\t\tpppOptions,\n\t\tstrconv.FormatInt(int64(u.PPP), 10),\n\t\tviper.GetStringSlice(\"timezones\"),\n\t\tu.Timezone,\n\t\tsessions,\n\t}\n\n\ttemplates.Me.Execute(w, &o)\n}\n\n\/\/ MeRevokeGetHandler is the handler called from the \/me route to destroy a single session by :num,\n\/\/ or all sessions with \"all\"\nfunc MeRevokeGetHandler(w http.ResponseWriter, req *http.Request) {\n\tu := users.FromContext(req.Context())\n\tif u == nil {\n\t\thttp.Error(w, \"Could not read user data from request context\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\ti, err := strconv.ParseInt(chi.URLParam(req, \"num\"), 10, 32)\n\tif err != nil || i < 0 {\n\t\thttp.Error(w, \"\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := session.DestroyByIndex(u.ID, db.CountType(i)); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\thttp.Redirect(w, req, paths.Get.Me, http.StatusSeeOther)\n}\n\n\/\/ ForgotGetHandler is the route called to send the user a password reset email.\nfunc ForgotGetHandler(w http.ResponseWriter, req *http.Request) {\n\ttemplates.Forgot.Execute(w, nil)\n}\n\n\/\/ ResetPasswordGetHandler is the route called to reset a user's password.\nfunc ResetPasswordGetHandler(w http.ResponseWriter, req *http.Request) {\n\ttype data struct {\n\t\tFlashMessage string\n\t\tToken string\n\t}\n\n\ttoken := req.FormValue(\"token\")\n\tif token == \"\" {\n\t\ttemplates.ResetPassword.Execute(w, data{FlashMessage: \"Invalid reset token.\"})\n\t\treturn\n\t}\n\n\tvalid, _ := pwreset.ValidateToken(token)\n\n\tif !valid {\n\t\ttemplates.ResetPassword.Execute(w, data{FlashMessage: \"Reset is expired or doesn't exist.\"})\n\t\treturn\n\t}\n\n\ttemplates.ResetPassword.Execute(w, data{Token: token})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tucdFileName = \"UnicodeData.txt\"\n\tucdBaseUrl = \"http:\/\/www.unicode.org\/Public\/UCD\/latest\/ucd\/\"\n\tindexFileName = \"runefinder-index.gob\"\n)\n\ntype RuneSlice []rune\n\nfunc (rs RuneSlice) Len() int { return len(rs) }\nfunc (rs RuneSlice) Less(i, j int) bool { return rs[i] < rs[j] }\nfunc (rs RuneSlice) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] }\n\ntype RuneSet map[rune]struct{}\n\nfunc (rs RuneSet) Put(key rune) {\n\trs[key] = struct{}{} \/\/ zero-byte struct\n}\n\nfunc (rs RuneSet) Contains(key rune) bool {\n\t_, found := rs[key]\n\treturn found\n}\n\nfunc (rs RuneSet) Intersection(other RuneSet) RuneSet {\n\tresult := RuneSet{}\n\tfor k := range rs {\n\t\tif other.Contains(k) {\n\t\t\tresult.Put(k)\n\t\t}\n\t}\n\treturn result\n}\n\nfunc (rs RuneSet) ToRuneSlice() RuneSlice {\n\tresult := RuneSlice{}\n\tfor uchar := range rs {\n\t\tresult = append(result, uchar)\n\t}\n\treturn result\n}\n\nfunc (rs RuneSet) String() string {\n\tsl := rs.ToRuneSlice()\n\tsort.Sort(sl)\n\tstr := \"❮\"\n\tfor i, uchar := range sl {\n\t\tif i > 0 {\n\t\t\tstr += \" \"\n\t\t}\n\t\tstr += fmt.Sprintf(\"U+%04X\", uchar)\n\t}\n\treturn str + \"❯\"\n}\n\ntype RuneIndex struct {\n\tCharacters map[string]RuneSet\n\tNames map[rune]string\n}\n\nfunc progressDisplay(running <-chan bool) {\n\tfor {\n\t\tselect {\n\t\tcase <-running:\n\t\t\tfmt.Println()\n\t\tcase <-time.After(200 * time.Millisecond):\n\t\t\tfmt.Print(\".\")\n\t\t}\n\t}\n}\n\nfunc getUcdLines() []string {\n\turl := ucdBaseUrl + ucdFileName\n\tfmt.Printf(\"Index not found. Retrieving data from:\\n%s\\n\", url)\n\trunning := make(chan bool)\n\tgo progressDisplay(running)\n\tdefer func() {\n\t\trunning <- false\n\t}()\n\tresponse, err := http.Get(url)\n\tif err != nil {\n\t\tlog.Fatal(\"getUcdFile\/http.Get:\", err)\n\t}\n\tcontent, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\tlog.Fatal(\"buildIndex\/ioutil.ReadAll:\", err)\n\t}\n\tdefer response.Body.Close()\n\treturn strings.Split(string(content), \"\\n\")\n}\n\nfunc buildIndex(indexDir string) RuneIndex {\n\tvar index RuneIndex\n\tindex.Characters = map[string]RuneSet{}\n\tindex.Names = map[rune]string{}\n\n\tfor _, line := range getUcdLines() {\n\t\tvar uchar rune\n\t\tfields := strings.Split(line, \";\")\n\t\tif len(fields) >= 2 {\n\t\t\tcode64, _ := strconv.ParseInt(fields[0], 16, 0)\n\t\t\tuchar = rune(code64)\n\t\t\tindex.Names[uchar] = fields[1]\n\t\t\tfor _, word := range strings.Split(fields[1], \" \") {\n\t\t\t\texisting, ok := index.Characters[word]\n\t\t\t\tif !ok {\n\t\t\t\t\texisting = RuneSet{}\n\t\t\t\t}\n\t\t\t\texisting.Put(uchar)\n\t\t\t\tindex.Characters[word] = existing\n\t\t\t}\n\t\t}\n\n\t}\n\tindexPath := path.Join(indexDir, indexFileName)\n\tindexFile, err := os.Create(indexPath)\n\tif err != nil {\n\t\tlog.Printf(\"WARNING: Unable to save index file.\")\n\t} else {\n\t\tdefer indexFile.Close()\n\t\tencoder := gob.NewEncoder(indexFile)\n\t\tencoder.Encode(index)\n\t}\n\treturn index\n}\n\nfunc getIndex() RuneIndex {\n\tindexDir, _ := os.Getwd()\n\tindexPath := path.Join(indexDir, indexFileName)\n\tif _, err := os.Stat(indexPath); os.IsNotExist(err) {\n\t\treturn buildIndex(indexDir)\n\t}\n\t\/\/ load existing index\n\tindexFile, err := os.Open(indexPath)\n\tif err != nil {\n\t\tlog.Fatal(\"getIndex\/os.Open:\", err)\n\t}\n\tdefer indexFile.Close()\n\n\tvar index RuneIndex\n\n\tdecoder := gob.NewDecoder(indexFile)\n\terr = decoder.Decode(&index)\n\tif err != nil {\n\t\tlog.Fatal(\"getIndex\/Decode:\", err)\n\t}\n\treturn index\n}\n\nfunc findRunes(query []string, index RuneIndex) RuneSlice {\n\tcommonRunes := RuneSet{}\n\tfor i, word := range query {\n\t\tword = strings.ToUpper(word)\n\t\tfound := index.Characters[word]\n\t\tif i == 0 {\n\t\t\tcommonRunes = found\n\t\t} else {\n\t\t\tcommonRunes = commonRunes.Intersection(found)\n\t\t}\n\t\tif len(commonRunes) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\tresult := commonRunes.ToRuneSlice()\n\tsort.Sort(result)\n\treturn result\n}\n\nfunc main() {\n\tif len(os.Args) < 2 {\n\t\tfmt.Println(\"Usage: runefinder <word>\\texample: runefinder cat\")\n\t\tos.Exit(1)\n\t}\n\twords := os.Args[1:]\n\n\tindex := getIndex()\n\n\tcount := 0\n\tformat := \"U+%04X %c \\t%s\\n\"\n\tfor _, uchar := range findRunes(words, index) {\n\t\tif uchar > 0xFFFF {\n\t\t\tformat = \"U+%5X %c \\t%s\\n\"\n\t\t}\n\t\tfmt.Printf(format, uchar, uchar, index.Names[uchar])\n\t\tcount++\n\t}\n\tfmt.Printf(\"%d characters found\\n\", count)\n}\n<commit_msg>minor refactoring: removed duplicate path construction<commit_after>package main\n\nimport (\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tucdFileName = \"UnicodeData.txt\"\n\tucdBaseUrl = \"http:\/\/www.unicode.org\/Public\/UCD\/latest\/ucd\/\"\n\tindexFileName = \"runefinder-index.gob\"\n)\n\ntype RuneSlice []rune\n\nfunc (rs RuneSlice) Len() int { return len(rs) }\nfunc (rs RuneSlice) Less(i, j int) bool { return rs[i] < rs[j] }\nfunc (rs RuneSlice) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] }\n\ntype RuneSet map[rune]struct{}\n\nfunc (rs RuneSet) Put(key rune) {\n\trs[key] = struct{}{} \/\/ zero-byte struct\n}\n\nfunc (rs RuneSet) Contains(key rune) bool {\n\t_, found := rs[key]\n\treturn found\n}\n\nfunc (rs RuneSet) Intersection(other RuneSet) RuneSet {\n\tresult := RuneSet{}\n\tfor k := range rs {\n\t\tif other.Contains(k) {\n\t\t\tresult.Put(k)\n\t\t}\n\t}\n\treturn result\n}\n\nfunc (rs RuneSet) ToRuneSlice() RuneSlice {\n\tresult := RuneSlice{}\n\tfor uchar := range rs {\n\t\tresult = append(result, uchar)\n\t}\n\treturn result\n}\n\nfunc (rs RuneSet) String() string {\n\tsl := rs.ToRuneSlice()\n\tsort.Sort(sl)\n\tstr := \"❮\"\n\tfor i, uchar := range sl {\n\t\tif i > 0 {\n\t\t\tstr += \" \"\n\t\t}\n\t\tstr += fmt.Sprintf(\"U+%04X\", uchar)\n\t}\n\treturn str + \"❯\"\n}\n\ntype RuneIndex struct {\n\tCharacters map[string]RuneSet\n\tNames map[rune]string\n}\n\nfunc progressDisplay(running <-chan bool) {\n\tfor {\n\t\tselect {\n\t\tcase <-running:\n\t\t\tfmt.Println()\n\t\tcase <-time.After(200 * time.Millisecond):\n\t\t\tfmt.Print(\".\")\n\t\t}\n\t}\n}\n\nfunc getUcdLines() []string {\n\turl := ucdBaseUrl + ucdFileName\n\tfmt.Printf(\"Index not found. Retrieving data from:\\n%s\\n\", url)\n\trunning := make(chan bool)\n\tgo progressDisplay(running)\n\tdefer func() {\n\t\trunning <- false\n\t}()\n\tresponse, err := http.Get(url)\n\tif err != nil {\n\t\tlog.Fatal(\"getUcdFile\/http.Get:\", err)\n\t}\n\tcontent, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\tlog.Fatal(\"buildIndex\/ioutil.ReadAll:\", err)\n\t}\n\tdefer response.Body.Close()\n\treturn strings.Split(string(content), \"\\n\")\n}\n\nfunc buildIndex(indexPath string) RuneIndex {\n\tvar index RuneIndex\n\tindex.Characters = map[string]RuneSet{}\n\tindex.Names = map[rune]string{}\n\n\tfor _, line := range getUcdLines() {\n\t\tvar uchar rune\n\t\tfields := strings.Split(line, \";\")\n\t\tif len(fields) >= 2 {\n\t\t\tcode64, _ := strconv.ParseInt(fields[0], 16, 0)\n\t\t\tuchar = rune(code64)\n\t\t\tindex.Names[uchar] = fields[1]\n\t\t\twords := strings.Split(strings.ToUpper(fields[1]), \" \")\n\t\t\tfor _, word := range words {\n\t\t\t\texisting, ok := index.Characters[word]\n\t\t\t\tif !ok {\n\t\t\t\t\texisting = RuneSet{}\n\t\t\t\t}\n\t\t\t\texisting.Put(uchar)\n\t\t\t\tindex.Characters[word] = existing\n\t\t\t}\n\t\t}\n\n\t}\n\tindexFile, err := os.Create(indexPath)\n\tif err != nil {\n\t\tlog.Printf(\"WARNING: Unable to save index file.\")\n\t} else {\n\t\tdefer indexFile.Close()\n\t\tencoder := gob.NewEncoder(indexFile)\n\t\tencoder.Encode(index)\n\t}\n\treturn index\n}\n\nfunc getIndex() RuneIndex {\n\tindexDir, _ := os.Getwd()\n\tindexPath := path.Join(indexDir, indexFileName)\n\tif _, err := os.Stat(indexPath); os.IsNotExist(err) {\n\t\treturn buildIndex(indexPath)\n\t}\n\t\/\/ load existing index\n\tindexFile, err := os.Open(indexPath)\n\tif err != nil {\n\t\tlog.Fatal(\"getIndex\/os.Open:\", err)\n\t}\n\tdefer indexFile.Close()\n\n\tvar index RuneIndex\n\n\tdecoder := gob.NewDecoder(indexFile)\n\terr = decoder.Decode(&index)\n\tif err != nil {\n\t\tlog.Fatal(\"getIndex\/Decode:\", err)\n\t}\n\treturn index\n}\n\nfunc findRunes(query []string, index RuneIndex) RuneSlice {\n\tcommonRunes := RuneSet{}\n\tfor i, word := range query {\n\t\tword = strings.ToUpper(word)\n\t\tfound := index.Characters[word]\n\t\tif i == 0 {\n\t\t\tcommonRunes = found\n\t\t} else {\n\t\t\tcommonRunes = commonRunes.Intersection(found)\n\t\t}\n\t\tif len(commonRunes) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\tresult := commonRunes.ToRuneSlice()\n\tsort.Sort(result)\n\treturn result\n}\n\nfunc main() {\n\tif len(os.Args) < 2 {\n\t\tfmt.Println(\"Usage: runefinder <word>\\texample: runefinder cat\")\n\t\tos.Exit(1)\n\t}\n\twords := os.Args[1:]\n\n\tindex := getIndex()\n\n\tcount := 0\n\tformat := \"U+%04X %c \\t%s\\n\"\n\tfor _, uchar := range findRunes(words, index) {\n\t\tif uchar > 0xFFFF {\n\t\t\tformat = \"U+%5X %c \\t%s\\n\"\n\t\t}\n\t\tfmt.Printf(format, uchar, uchar, index.Names[uchar])\n\t\tcount++\n\t}\n\tfmt.Printf(\"%d characters found\\n\", count)\n}\n<|endoftext|>"} {"text":"<commit_before>package filetree \/\/ import \"a4.io\/blobstash\/pkg\/client\/filetree\"\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"a4.io\/blobstash\/pkg\/client\/clientutil\"\n)\n\n\/\/ Filetree client\ntype Filetree struct {\n\tclient *clientutil.ClientUtil\n}\n\n\/\/ serverAddr should't have a trailing space\nfunc New(client *clientutil.ClientUtil) *Filetree {\n\treturn &Filetree{\n\t\tclient: client,\n\t}\n}\n\ntype snapReq struct {\n\tFS string `json:\"fs\"`\n\tMessage string `json:\"message\"`\n\tHostname string `json:\"hostname\"`\n}\n\ntype snapResp struct {\n\tVersion int64 `json:\"version\"`\n\tRef string `json:\"ref\"`\n}\n\n\/\/ MakeSnaphot create a FS snapshot from a tree reference\nfunc (f *Filetree) MakeSnapshot(ref, fs, message string) (int64, error) {\n\th, err := os.Hostname()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\ts := &snapReq{FS: fs, Message: message, Hostname: h}\n\tresp, err := f.client.PostJSON(fmt.Sprintf(\"\/api\/filetree\/node\/%s\/_snapshot\", ref), s)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif err := clientutil.ExpectStatusCode(resp, http.StatusOK); err != nil {\n\t\treturn 0, err\n\t}\n\n\tsnap := &snapResp{}\n\tif err := clientutil.Unmarshal(resp, snap); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn snap.Version, nil\n}\n\n\/\/ GC performs a garbage collection to save the latest filetreee snapshot\nfunc (f *Filetree) GC(ns, name string, rev int64) error {\n\tgcScript := fmt.Sprintf(`\nlocal kvstore = require('kvstore')\n\nlocal key = \"_filetree:fs:%s\"\nlocal version = \"%d\"\nlocal _, ref, _ = kvstore.get(key, version)\n\n-- mark the actual KV entry\nmark_kv(key, version)\n\n-- mark the whole tree\nmark_filetree_node(ref)\n`, name, rev)\n\n\tresp, err := f.client.PostMsgpack(\n\t\tfmt.Sprintf(\"\/api\/stash\/%s\/_gc\", ns),\n\t\tmap[string]interface{}{\n\t\t\t\"script\": gcScript,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := clientutil.ExpectStatusCode(resp, http.StatusNoContent); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>client\/filetree: add UA support for snapshots<commit_after>package filetree \/\/ import \"a4.io\/blobstash\/pkg\/client\/filetree\"\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"a4.io\/blobstash\/pkg\/client\/clientutil\"\n)\n\n\/\/ Filetree client\ntype Filetree struct {\n\tclient *clientutil.ClientUtil\n}\n\n\/\/ serverAddr should't have a trailing space\nfunc New(client *clientutil.ClientUtil) *Filetree {\n\treturn &Filetree{\n\t\tclient: client,\n\t}\n}\n\ntype snapReq struct {\n\tFS string `json:\"fs\"`\n\tMessage string `json:\"message\"`\n\tHostname string `json:\"hostname\"`\n\tUserAgent string `json:\"user_agent\"`\n}\n\ntype snapResp struct {\n\tVersion int64 `json:\"version\"`\n\tRef string `json:\"ref\"`\n}\n\n\/\/ MakeSnaphot create a FS snapshot from a tree reference\nfunc (f *Filetree) MakeSnapshot(ref, fs, message, userAgent string) (int64, error) {\n\th, err := os.Hostname()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\ts := &snapReq{\n\t\tFS: fs,\n\t\tMessage: message,\n\t\tHostname: h,\n\t\tUserAgent: userAgent,\n\t}\n\tresp, err := f.client.PostJSON(fmt.Sprintf(\"\/api\/filetree\/node\/%s\/_snapshot\", ref), s)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif err := clientutil.ExpectStatusCode(resp, http.StatusOK); err != nil {\n\t\treturn 0, err\n\t}\n\n\tsnap := &snapResp{}\n\tif err := clientutil.Unmarshal(resp, snap); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn snap.Version, nil\n}\n\n\/\/ GC performs a garbage collection to save the latest filetreee snapshot\nfunc (f *Filetree) GC(ns, name string, rev int64) error {\n\tgcScript := fmt.Sprintf(`\nlocal kvstore = require('kvstore')\n\nlocal key = \"_filetree:fs:%s\"\nlocal version = \"%d\"\nlocal _, ref, _ = kvstore.get(key, version)\n\n-- mark the actual KV entry\nmark_kv(key, version)\n\n-- mark the whole tree\nmark_filetree_node(ref)\n`, name, rev)\n\n\tresp, err := f.client.PostMsgpack(\n\t\tfmt.Sprintf(\"\/api\/stash\/%s\/_gc\", ns),\n\t\tmap[string]interface{}{\n\t\t\t\"script\": gcScript,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := clientutil.ExpectStatusCode(resp, http.StatusNoContent); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2018 Chef Software Inc. and\/or applicable contributors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage controller\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/go-kit\/kit\/log\/level\"\n\thabv1beta1 \"github.com\/kinvolk\/habitat-operator\/pkg\/apis\/habitat\/v1beta1\"\n\tappsv1beta1 \"k8s.io\/api\/apps\/v1beta1\"\n\tapiv1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/resource\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n)\n\nconst persistentVolumeName = \"persistent\"\n\nfunc (hc *HabitatController) newStatefulSet(h *habv1beta1.Habitat) (*appsv1beta1.StatefulSet, error) {\n\t\/\/ This value needs to be passed as a *int32, so we convert it, assign it to a\n\t\/\/ variable and afterwards pass a pointer to it.\n\tcount := int32(h.Spec.Count)\n\n\t\/\/ Set the service arguments we send to Habitat.\n\tvar habArgs []string\n\tif h.Spec.Service.Group != \"\" {\n\t\t\/\/ When a service is started without explicitly naming the group,\n\t\t\/\/ it's assigned to the default group.\n\t\thabArgs = append(habArgs,\n\t\t\t\"--group\", h.Spec.Service.Group)\n\t}\n\n\t\/\/ As we want to label our pods with the\n\t\/\/ topology type we set standalone as the default one.\n\t\/\/ We do not need to pass this to habitat, as if no topology\n\t\/\/ is set, habitat by default sets standalone topology.\n\ttopology := habv1beta1.TopologyStandalone\n\n\tif h.Spec.Service.Topology == habv1beta1.TopologyLeader {\n\t\ttopology = habv1beta1.TopologyLeader\n\t}\n\n\tpath := fmt.Sprintf(\"%s\/%s\", configMapDir, peerFilename)\n\n\thabArgs = append(habArgs,\n\t\t\"--topology\", topology.String(),\n\t\t\"--peer-watch-file\", path,\n\t)\n\n\t\/\/ Runtime binding.\n\t\/\/ One Service connects to another forming a producer\/consumer relationship.\n\tfor _, bind := range h.Spec.Service.Bind {\n\t\t\/\/ Pass --bind flag.\n\t\tbindArg := fmt.Sprintf(\"%s:%s.%s\", bind.Name, bind.Service, bind.Group)\n\t\thabArgs = append(habArgs,\n\t\t\t\"--bind\", bindArg)\n\t}\n\n\tbase := &appsv1beta1.StatefulSet{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: h.Name,\n\t\t},\n\t\tSpec: appsv1beta1.StatefulSetSpec{\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\thabv1beta1.HabitatNameLabel: h.Name,\n\t\t\t\t},\n\t\t\t},\n\t\t\tReplicas: &count,\n\t\t\tPodManagementPolicy: appsv1beta1.ParallelPodManagement,\n\t\t\tTemplate: apiv1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\thabv1beta1.HabitatLabel: \"true\",\n\t\t\t\t\t\thabv1beta1.HabitatNameLabel: h.Name,\n\t\t\t\t\t\thabv1beta1.TopologyLabel: topology.String(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: apiv1.PodSpec{\n\t\t\t\t\tContainers: []apiv1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"habitat-service\",\n\t\t\t\t\t\t\tImage: h.Spec.Image,\n\t\t\t\t\t\t\tArgs: habArgs,\n\t\t\t\t\t\t\tVolumeMounts: []apiv1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"config\",\n\t\t\t\t\t\t\t\t\tMountPath: configMapDir,\n\t\t\t\t\t\t\t\t\tReadOnly: true,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEnv: h.Spec.Env,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\/\/ Define the volume for the ConfigMap.\n\t\t\t\t\tVolumes: []apiv1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"config\",\n\t\t\t\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\t\t\t\tConfigMap: &apiv1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: apiv1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: configMapName,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tItems: []apiv1.KeyToPath{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tKey: peerFile,\n\t\t\t\t\t\t\t\t\t\t\tPath: peerFilename,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t\/\/ If we have a secret name present we should mount that secret.\n\tif h.Spec.Service.ConfigSecretName != \"\" {\n\t\t\/\/ Let's make sure our secret is there before mounting it.\n\t\tsecret, err := hc.config.KubernetesClientset.CoreV1().Secrets(h.Namespace).Get(h.Spec.Service.ConfigSecretName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsecretVolume := &apiv1.Volume{\n\t\t\tName: userConfigFilename,\n\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\tSecret: &apiv1.SecretVolumeSource{\n\t\t\t\t\tSecretName: secret.Name,\n\t\t\t\t\tItems: []apiv1.KeyToPath{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: userTOMLFile,\n\t\t\t\t\t\t\tPath: userTOMLFile,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tsecretVolumeMount := &apiv1.VolumeMount{\n\t\t\tName: userConfigFilename,\n\t\t\t\/\/ The Habitat supervisor creates a directory for each service under \/hab\/svc\/<servicename>.\n\t\t\t\/\/ We need to place the user.toml file in there in order for it to be detected.\n\t\t\tMountPath: fmt.Sprintf(\"\/hab\/user\/%s\/config\", h.Spec.Service.Name),\n\t\t\tReadOnly: false,\n\t\t}\n\n\t\tbase.Spec.Template.Spec.Containers[0].VolumeMounts = append(base.Spec.Template.Spec.Containers[0].VolumeMounts, *secretVolumeMount)\n\t\tbase.Spec.Template.Spec.Volumes = append(base.Spec.Template.Spec.Volumes, *secretVolume)\n\t}\n\n\t\/\/ Mount Persistent Volume, if requested.\n\tif ps := h.Spec.PersistentStorage; ps != nil {\n\t\tvm := &apiv1.VolumeMount{\n\t\t\tName: persistentVolumeName,\n\t\t\tMountPath: ps.MountPath,\n\t\t}\n\n\t\tbase.Spec.Template.Spec.Containers[0].VolumeMounts = append(base.Spec.Template.Spec.Containers[0].VolumeMounts, *vm)\n\n\t\tq, err := resource.ParseQuantity(ps.Size)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Could not parse PersistentStorage.Size: %v\", err)\n\t\t}\n\n\t\tbase.Spec.VolumeClaimTemplates = []apiv1.PersistentVolumeClaim{\n\t\t\tapiv1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: persistentVolumeName,\n\t\t\t\t\tNamespace: h.Namespace,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\thabv1beta1.HabitatLabel: \"true\",\n\t\t\t\t\t\thabv1beta1.HabitatNameLabel: h.Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: apiv1.PersistentVolumeClaimSpec{\n\t\t\t\t\tAccessModes: []apiv1.PersistentVolumeAccessMode{\n\t\t\t\t\t\tapiv1.ReadWriteOnce,\n\t\t\t\t\t},\n\t\t\t\t\tStorageClassName: &ps.StorageClassName,\n\t\t\t\t\tResources: apiv1.ResourceRequirements{\n\t\t\t\t\t\tRequests: apiv1.ResourceList{\n\t\t\t\t\t\t\tapiv1.ResourceStorage: q,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\t\/\/ Handle ring key, if one is specified.\n\tif ringSecretName := h.Spec.Service.RingSecretName; ringSecretName != \"\" {\n\t\ts, err := hc.config.KubernetesClientset.CoreV1().Secrets(apiv1.NamespaceDefault).Get(ringSecretName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tlevel.Error(hc.logger).Log(\"msg\", \"Could not find Secret containing ring key\")\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ The filename under which the ring key is saved.\n\t\tringKeyFile := fmt.Sprintf(\"%s.%s\", ringSecretName, ringKeyFileExt)\n\n\t\t\/\/ Extract the bare ring name, by removing the revision.\n\t\t\/\/ Validation has already been performed by this point.\n\t\tringName := ringRegexp.FindStringSubmatch(ringSecretName)[1]\n\n\t\tv := &apiv1.Volume{\n\t\t\tName: ringSecretName,\n\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\tSecret: &apiv1.SecretVolumeSource{\n\t\t\t\t\tSecretName: s.Name,\n\t\t\t\t\tItems: []apiv1.KeyToPath{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: ringSecretKey,\n\t\t\t\t\t\t\tPath: ringKeyFile,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tvm := &apiv1.VolumeMount{\n\t\t\tName: ringSecretName,\n\t\t\tMountPath: \"\/hab\/cache\/keys\",\n\t\t\t\/\/ This directory cannot be made read-only, as the supervisor writes to\n\t\t\t\/\/ it during its operation.\n\t\t\tReadOnly: false,\n\t\t}\n\n\t\t\/\/ Mount ring key file.\n\t\tbase.Spec.Template.Spec.Volumes = append(base.Spec.Template.Spec.Volumes, *v)\n\t\tbase.Spec.Template.Spec.Containers[0].VolumeMounts = append(base.Spec.Template.Spec.Containers[0].VolumeMounts, *vm)\n\n\t\t\/\/ Add --ring argument to supervisor invocation.\n\t\tbase.Spec.Template.Spec.Containers[0].Args = append(base.Spec.Template.Spec.Containers[0].Args, \"--ring\", ringName)\n\t}\n\n\treturn base, nil\n}\n\nfunc (hc *HabitatController) cacheStatefulSets() {\n\tsource := newListWatchFromClientWithLabels(\n\t\thc.config.KubernetesClientset.AppsV1beta1().RESTClient(),\n\t\t\"statefulsets\",\n\t\tapiv1.NamespaceAll,\n\t\tlabelListOptions())\n\n\thc.stsInformer = cache.NewSharedIndexInformer(\n\t\tsource,\n\t\t&appsv1beta1.StatefulSet{},\n\t\tresyncPeriod,\n\t\tcache.Indexers{},\n\t)\n\n\thc.stsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: hc.handleStsAdd,\n\t\tUpdateFunc: hc.handleStsUpdate,\n\t\tDeleteFunc: hc.handleStsDelete,\n\t})\n\n\thc.stsInformerSynced = hc.stsInformer.HasSynced\n}\n\nfunc (hc *HabitatController) handleStsAdd(obj interface{}) {\n\td, ok := obj.(*appsv1beta1.StatefulSet)\n\tif !ok {\n\t\tlevel.Error(hc.logger).Log(\"msg\", \"Failed to type assert StatefulSet\", \"obj\", obj)\n\t\treturn\n\t}\n\n\th, err := hc.getHabitatFromLabeledResource(d)\n\tif err != nil {\n\t\tlevel.Error(hc.logger).Log(\"msg\", \"Could not find Habitat for StatefulSet\", \"name\", d.Name)\n\t\treturn\n\t}\n\n\thc.enqueue(h)\n}\n\nfunc (hc *HabitatController) handleStsUpdate(oldObj, newObj interface{}) {\n\td, ok := newObj.(*appsv1beta1.StatefulSet)\n\tif !ok {\n\t\tlevel.Error(hc.logger).Log(\"msg\", \"Failed to type assert StatefulSet\", \"obj\", newObj)\n\t\treturn\n\t}\n\n\th, err := hc.getHabitatFromLabeledResource(d)\n\tif err != nil {\n\t\tlevel.Error(hc.logger).Log(\"msg\", \"Could not find Habitat for StatefulSet\", \"name\", d.Name)\n\t\treturn\n\t}\n\n\thc.enqueue(h)\n}\n\nfunc (hc *HabitatController) handleStsDelete(obj interface{}) {\n\td, ok := obj.(*appsv1beta1.StatefulSet)\n\tif !ok {\n\t\tlevel.Error(hc.logger).Log(\"msg\", \"Failed to type assert StatefulSet\", \"obj\", obj)\n\t\treturn\n\t}\n\n\th, err := hc.getHabitatFromLabeledResource(d)\n\tif err != nil {\n\t\t\/\/ Could not find Habitat, it must have already been removed.\n\t\tlevel.Debug(hc.logger).Log(\"msg\", \"Could not find Habitat for StatefulSet\", \"name\", d.Name)\n\t\treturn\n\t}\n\n\thc.enqueue(h)\n}\n<commit_msg>Add OwnerReference from StatefulSet to Habitat<commit_after>\/\/ Copyright (c) 2018 Chef Software Inc. and\/or applicable contributors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage controller\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/go-kit\/kit\/log\/level\"\n\thabv1beta1 \"github.com\/kinvolk\/habitat-operator\/pkg\/apis\/habitat\/v1beta1\"\n\tappsv1beta1 \"k8s.io\/api\/apps\/v1beta1\"\n\tapiv1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/resource\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n)\n\nconst persistentVolumeName = \"persistent\"\n\nfunc (hc *HabitatController) newStatefulSet(h *habv1beta1.Habitat) (*appsv1beta1.StatefulSet, error) {\n\t\/\/ This value needs to be passed as a *int32, so we convert it, assign it to a\n\t\/\/ variable and afterwards pass a pointer to it.\n\tcount := int32(h.Spec.Count)\n\n\t\/\/ Set the service arguments we send to Habitat.\n\tvar habArgs []string\n\tif h.Spec.Service.Group != \"\" {\n\t\t\/\/ When a service is started without explicitly naming the group,\n\t\t\/\/ it's assigned to the default group.\n\t\thabArgs = append(habArgs,\n\t\t\t\"--group\", h.Spec.Service.Group)\n\t}\n\n\t\/\/ As we want to label our pods with the\n\t\/\/ topology type we set standalone as the default one.\n\t\/\/ We do not need to pass this to habitat, as if no topology\n\t\/\/ is set, habitat by default sets standalone topology.\n\ttopology := habv1beta1.TopologyStandalone\n\n\tif h.Spec.Service.Topology == habv1beta1.TopologyLeader {\n\t\ttopology = habv1beta1.TopologyLeader\n\t}\n\n\tpath := fmt.Sprintf(\"%s\/%s\", configMapDir, peerFilename)\n\n\thabArgs = append(habArgs,\n\t\t\"--topology\", topology.String(),\n\t\t\"--peer-watch-file\", path,\n\t)\n\n\t\/\/ Runtime binding.\n\t\/\/ One Service connects to another forming a producer\/consumer relationship.\n\tfor _, bind := range h.Spec.Service.Bind {\n\t\t\/\/ Pass --bind flag.\n\t\tbindArg := fmt.Sprintf(\"%s:%s.%s\", bind.Name, bind.Service, bind.Group)\n\t\thabArgs = append(habArgs,\n\t\t\t\"--bind\", bindArg)\n\t}\n\n\tbase := &appsv1beta1.StatefulSet{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: h.Name,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\tmetav1.OwnerReference{\n\t\t\t\t\tAPIVersion: h.APIVersion,\n\t\t\t\t\tKind: h.Kind,\n\t\t\t\t\tName: h.Name,\n\t\t\t\t\tUID: h.UID,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1beta1.StatefulSetSpec{\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\thabv1beta1.HabitatNameLabel: h.Name,\n\t\t\t\t},\n\t\t\t},\n\t\t\tReplicas: &count,\n\t\t\tPodManagementPolicy: appsv1beta1.ParallelPodManagement,\n\t\t\tTemplate: apiv1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\thabv1beta1.HabitatLabel: \"true\",\n\t\t\t\t\t\thabv1beta1.HabitatNameLabel: h.Name,\n\t\t\t\t\t\thabv1beta1.TopologyLabel: topology.String(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: apiv1.PodSpec{\n\t\t\t\t\tContainers: []apiv1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"habitat-service\",\n\t\t\t\t\t\t\tImage: h.Spec.Image,\n\t\t\t\t\t\t\tArgs: habArgs,\n\t\t\t\t\t\t\tVolumeMounts: []apiv1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"config\",\n\t\t\t\t\t\t\t\t\tMountPath: configMapDir,\n\t\t\t\t\t\t\t\t\tReadOnly: true,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEnv: h.Spec.Env,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\/\/ Define the volume for the ConfigMap.\n\t\t\t\t\tVolumes: []apiv1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"config\",\n\t\t\t\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\t\t\t\tConfigMap: &apiv1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: apiv1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: configMapName,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tItems: []apiv1.KeyToPath{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tKey: peerFile,\n\t\t\t\t\t\t\t\t\t\t\tPath: peerFilename,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t\/\/ If we have a secret name present we should mount that secret.\n\tif h.Spec.Service.ConfigSecretName != \"\" {\n\t\t\/\/ Let's make sure our secret is there before mounting it.\n\t\tsecret, err := hc.config.KubernetesClientset.CoreV1().Secrets(h.Namespace).Get(h.Spec.Service.ConfigSecretName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsecretVolume := &apiv1.Volume{\n\t\t\tName: userConfigFilename,\n\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\tSecret: &apiv1.SecretVolumeSource{\n\t\t\t\t\tSecretName: secret.Name,\n\t\t\t\t\tItems: []apiv1.KeyToPath{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: userTOMLFile,\n\t\t\t\t\t\t\tPath: userTOMLFile,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tsecretVolumeMount := &apiv1.VolumeMount{\n\t\t\tName: userConfigFilename,\n\t\t\t\/\/ The Habitat supervisor creates a directory for each service under \/hab\/svc\/<servicename>.\n\t\t\t\/\/ We need to place the user.toml file in there in order for it to be detected.\n\t\t\tMountPath: fmt.Sprintf(\"\/hab\/user\/%s\/config\", h.Spec.Service.Name),\n\t\t\tReadOnly: false,\n\t\t}\n\n\t\tbase.Spec.Template.Spec.Containers[0].VolumeMounts = append(base.Spec.Template.Spec.Containers[0].VolumeMounts, *secretVolumeMount)\n\t\tbase.Spec.Template.Spec.Volumes = append(base.Spec.Template.Spec.Volumes, *secretVolume)\n\t}\n\n\t\/\/ Mount Persistent Volume, if requested.\n\tif ps := h.Spec.PersistentStorage; ps != nil {\n\t\tvm := &apiv1.VolumeMount{\n\t\t\tName: persistentVolumeName,\n\t\t\tMountPath: ps.MountPath,\n\t\t}\n\n\t\tbase.Spec.Template.Spec.Containers[0].VolumeMounts = append(base.Spec.Template.Spec.Containers[0].VolumeMounts, *vm)\n\n\t\tq, err := resource.ParseQuantity(ps.Size)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Could not parse PersistentStorage.Size: %v\", err)\n\t\t}\n\n\t\tbase.Spec.VolumeClaimTemplates = []apiv1.PersistentVolumeClaim{\n\t\t\tapiv1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: persistentVolumeName,\n\t\t\t\t\tNamespace: h.Namespace,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\thabv1beta1.HabitatLabel: \"true\",\n\t\t\t\t\t\thabv1beta1.HabitatNameLabel: h.Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: apiv1.PersistentVolumeClaimSpec{\n\t\t\t\t\tAccessModes: []apiv1.PersistentVolumeAccessMode{\n\t\t\t\t\t\tapiv1.ReadWriteOnce,\n\t\t\t\t\t},\n\t\t\t\t\tStorageClassName: &ps.StorageClassName,\n\t\t\t\t\tResources: apiv1.ResourceRequirements{\n\t\t\t\t\t\tRequests: apiv1.ResourceList{\n\t\t\t\t\t\t\tapiv1.ResourceStorage: q,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\t\/\/ Handle ring key, if one is specified.\n\tif ringSecretName := h.Spec.Service.RingSecretName; ringSecretName != \"\" {\n\t\ts, err := hc.config.KubernetesClientset.CoreV1().Secrets(apiv1.NamespaceDefault).Get(ringSecretName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tlevel.Error(hc.logger).Log(\"msg\", \"Could not find Secret containing ring key\")\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ The filename under which the ring key is saved.\n\t\tringKeyFile := fmt.Sprintf(\"%s.%s\", ringSecretName, ringKeyFileExt)\n\n\t\t\/\/ Extract the bare ring name, by removing the revision.\n\t\t\/\/ Validation has already been performed by this point.\n\t\tringName := ringRegexp.FindStringSubmatch(ringSecretName)[1]\n\n\t\tv := &apiv1.Volume{\n\t\t\tName: ringSecretName,\n\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\tSecret: &apiv1.SecretVolumeSource{\n\t\t\t\t\tSecretName: s.Name,\n\t\t\t\t\tItems: []apiv1.KeyToPath{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: ringSecretKey,\n\t\t\t\t\t\t\tPath: ringKeyFile,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tvm := &apiv1.VolumeMount{\n\t\t\tName: ringSecretName,\n\t\t\tMountPath: \"\/hab\/cache\/keys\",\n\t\t\t\/\/ This directory cannot be made read-only, as the supervisor writes to\n\t\t\t\/\/ it during its operation.\n\t\t\tReadOnly: false,\n\t\t}\n\n\t\t\/\/ Mount ring key file.\n\t\tbase.Spec.Template.Spec.Volumes = append(base.Spec.Template.Spec.Volumes, *v)\n\t\tbase.Spec.Template.Spec.Containers[0].VolumeMounts = append(base.Spec.Template.Spec.Containers[0].VolumeMounts, *vm)\n\n\t\t\/\/ Add --ring argument to supervisor invocation.\n\t\tbase.Spec.Template.Spec.Containers[0].Args = append(base.Spec.Template.Spec.Containers[0].Args, \"--ring\", ringName)\n\t}\n\n\treturn base, nil\n}\n\nfunc (hc *HabitatController) cacheStatefulSets() {\n\tsource := newListWatchFromClientWithLabels(\n\t\thc.config.KubernetesClientset.AppsV1beta1().RESTClient(),\n\t\t\"statefulsets\",\n\t\tapiv1.NamespaceAll,\n\t\tlabelListOptions())\n\n\thc.stsInformer = cache.NewSharedIndexInformer(\n\t\tsource,\n\t\t&appsv1beta1.StatefulSet{},\n\t\tresyncPeriod,\n\t\tcache.Indexers{},\n\t)\n\n\thc.stsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: hc.handleStsAdd,\n\t\tUpdateFunc: hc.handleStsUpdate,\n\t\tDeleteFunc: hc.handleStsDelete,\n\t})\n\n\thc.stsInformerSynced = hc.stsInformer.HasSynced\n}\n\nfunc (hc *HabitatController) handleStsAdd(obj interface{}) {\n\td, ok := obj.(*appsv1beta1.StatefulSet)\n\tif !ok {\n\t\tlevel.Error(hc.logger).Log(\"msg\", \"Failed to type assert StatefulSet\", \"obj\", obj)\n\t\treturn\n\t}\n\n\th, err := hc.getHabitatFromLabeledResource(d)\n\tif err != nil {\n\t\tlevel.Error(hc.logger).Log(\"msg\", \"Could not find Habitat for StatefulSet\", \"name\", d.Name)\n\t\treturn\n\t}\n\n\thc.enqueue(h)\n}\n\nfunc (hc *HabitatController) handleStsUpdate(oldObj, newObj interface{}) {\n\td, ok := newObj.(*appsv1beta1.StatefulSet)\n\tif !ok {\n\t\tlevel.Error(hc.logger).Log(\"msg\", \"Failed to type assert StatefulSet\", \"obj\", newObj)\n\t\treturn\n\t}\n\n\th, err := hc.getHabitatFromLabeledResource(d)\n\tif err != nil {\n\t\tlevel.Error(hc.logger).Log(\"msg\", \"Could not find Habitat for StatefulSet\", \"name\", d.Name)\n\t\treturn\n\t}\n\n\thc.enqueue(h)\n}\n\nfunc (hc *HabitatController) handleStsDelete(obj interface{}) {\n\td, ok := obj.(*appsv1beta1.StatefulSet)\n\tif !ok {\n\t\tlevel.Error(hc.logger).Log(\"msg\", \"Failed to type assert StatefulSet\", \"obj\", obj)\n\t\treturn\n\t}\n\n\th, err := hc.getHabitatFromLabeledResource(d)\n\tif err != nil {\n\t\t\/\/ Could not find Habitat, it must have already been removed.\n\t\tlevel.Debug(hc.logger).Log(\"msg\", \"Could not find Habitat for StatefulSet\", \"name\", d.Name)\n\t\treturn\n\t}\n\n\thc.enqueue(h)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2013 Conformal Systems LLC.\n\/\/ Use of this source code is governed by an ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage btcutil\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/conformal\/btcwire\"\n)\n\n\/\/ OutOfRangeError describes an error due to accessing an element that is out\n\/\/ of range.\ntype OutOfRangeError string\n\n\/\/ BlockHeightUnknown is the value returned for a block height that is unknown.\n\/\/ This is typically because the block has not been inserted into the main chain\n\/\/ yet.\nconst BlockHeightUnknown = int64(-1)\n\n\/\/ Error satisfies the error interface and prints human-readable errors.\nfunc (e OutOfRangeError) Error() string {\n\treturn string(e)\n}\n\n\/\/ Block defines a bitcoin block that provides easier and more efficient\n\/\/ manipulation of raw wire protocol blocks. It also memoizes hashes for the\n\/\/ block and its transactions on their first access so subsequent accesses don't\n\/\/ have to repeat the relatively expensive hashing operations.\ntype Block struct {\n\tmsgBlock *btcwire.MsgBlock \/\/ Underlying MsgBlock\n\trawBlock []byte \/\/ Raw wire encoded bytes for the block\n\tprotocolVersion uint32 \/\/ Protocol version used to encode rawBlock\n\tblockSha *btcwire.ShaHash \/\/ Cached block hash\n\tblockHeight int64 \/\/ Height in the main block chain\n\ttxShas []*btcwire.ShaHash \/\/ Cached transaction hashes\n\ttxShasGenerated bool \/\/ ALL transaction hashes generated\n}\n\n\/\/ MsgBlock returns the underlying btcwire.MsgBlock for the Block.\nfunc (b *Block) MsgBlock() *btcwire.MsgBlock {\n\t\/\/ Return the cached block.\n\treturn b.msgBlock\n}\n\n\/\/ Bytes returns the raw wire protocol encoded bytes for the Block and the\n\/\/ protocol version used to encode it. This is equivalent to calling BtcEncode\n\/\/ on the underlying btcwire.MsgBlock, however it caches the result so\n\/\/ subsequent calls are more efficient.\nfunc (b *Block) Bytes() ([]byte, uint32, error) {\n\t\/\/ Return the cached raw block bytes and associated protocol version if\n\t\/\/ it has already been generated.\n\tif len(b.rawBlock) != 0 {\n\t\treturn b.rawBlock, b.protocolVersion, nil\n\t}\n\n\t\/\/ Encode the MsgBlock into raw block bytes.\n\tvar w bytes.Buffer\n\terr := b.msgBlock.BtcEncode(&w, b.protocolVersion)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\trawBlock := w.Bytes()\n\n\t\/\/ Cache the encoded bytes and return them.\n\tb.rawBlock = rawBlock\n\treturn rawBlock, b.protocolVersion, nil\n}\n\n\/\/ Sha returns the block identifier hash for the Block. This is equivalent to\n\/\/ calling BlockSha on the underlying btcwire.MsgBlock, however it caches the\n\/\/ result so subsequent calls are more efficient.\nfunc (b *Block) Sha() (*btcwire.ShaHash, error) {\n\t\/\/ Return the cached block hash if it has already been generated.\n\tif b.blockSha != nil {\n\t\treturn b.blockSha, nil\n\t}\n\n\t\/\/ Generate the block hash. Ignore the error since BlockSha can't\n\t\/\/ currently fail.\n\tsha, _ := b.msgBlock.BlockSha(b.protocolVersion)\n\n\t\/\/ Cache the block hash and return it.\n\tb.blockSha = &sha\n\treturn &sha, nil\n}\n\n\/\/ TxSha returns the hash for the requested transaction number in the Block.\n\/\/ The supplied index is 0 based. That is to say, the first transaction is the\n\/\/ block is txNum 0. This is equivalent to calling TxSha on the underlying\n\/\/ btcwire.MsgTx, however it caches the result so subsequent calls are more\n\/\/ efficient.\nfunc (b *Block) TxSha(txNum int) (*btcwire.ShaHash, error) {\n\t\/\/ Ensure the requested transaction is in range.\n\tnumTx := b.msgBlock.Header.TxnCount\n\tif txNum < 0 || uint64(txNum) > numTx {\n\t\tstr := fmt.Sprintf(\"transaction index %d is out of range - max %d\",\n\t\t\ttxNum, numTx-1)\n\t\treturn nil, OutOfRangeError(str)\n\t}\n\n\t\/\/ Generate slice to hold all of the transaction hashes if needed.\n\tif len(b.txShas) == 0 {\n\t\tb.txShas = make([]*btcwire.ShaHash, numTx)\n\t}\n\n\t\/\/ Return the cached hash if it has already been generated.\n\tif b.txShas[txNum] != nil {\n\t\treturn b.txShas[txNum], nil\n\t}\n\n\t\/\/ Generate the hash for the transaction. Ignore the error since TxSha\n\t\/\/ can't currently fail.\n\tsha, _ := b.msgBlock.Transactions[txNum].TxSha(b.protocolVersion)\n\n\t\/\/ Cache the transaction hash and return it.\n\tb.txShas[txNum] = &sha\n\treturn &sha, nil\n}\n\n\/\/ TxShas returns a slice of hashes for all transactions in the Block. This is\n\/\/ equivalent to calling TxSha on each underlying btcwire.MsgTx, however it\n\/\/ caches the result so subsequent calls are more efficient.\nfunc (b *Block) TxShas() ([]*btcwire.ShaHash, error) {\n\t\/\/ Return cached hashes if they have ALL already been generated. This\n\t\/\/ flag is necessary because the transaction hashes are lazily generated\n\t\/\/ in a sparse fashion.\n\tif b.txShasGenerated {\n\t\treturn b.txShas, nil\n\t}\n\n\t\/\/ Generate slice to hold all of the transaction hashes if needed.\n\tif len(b.txShas) == 0 {\n\t\tb.txShas = make([]*btcwire.ShaHash, b.msgBlock.Header.TxnCount)\n\t}\n\n\t\/\/ Generate and cache the transaction hashes for all that haven't already\n\t\/\/ been done.\n\tfor i, hash := range b.txShas {\n\t\tif hash == nil {\n\t\t\t\/\/ Ignore the error since TxSha can't currently fail.\n\t\t\tsha, _ := b.msgBlock.Transactions[i].TxSha(b.protocolVersion)\n\t\t\tb.txShas[i] = &sha\n\t\t}\n\t}\n\n\tb.txShasGenerated = true\n\treturn b.txShas, nil\n}\n\n\/\/ ProtocolVersion returns the protocol version that was used to create the\n\/\/ underlying btcwire.MsgBlock.\nfunc (b *Block) ProtocolVersion() uint32 {\n\treturn b.protocolVersion\n}\n\n\/\/ TxLoc() returns the offsets and lengths of each transaction in a raw block.\n\/\/ It is used to allow fast indexing into transactions within the raw byte\n\/\/ stream.\nfunc (b *Block) TxLoc() ([]btcwire.TxLoc, error) {\n\trawMsg, pver, err := b.Bytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trbuf := bytes.NewBuffer(rawMsg)\n\n\tvar mblock btcwire.MsgBlock\n\ttxLocs, err := mblock.BtcDecodeTxLoc(rbuf, pver)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn txLocs, err\n}\n\n\/\/ Height returns the saved height of the block in the blockchain. This value\n\/\/ will be BlockHeightUnknown if it hasn't already explicitly been set.\nfunc (b *Block) Height() int64 {\n\treturn b.blockHeight\n}\n\n\/\/ SetHeight sets the height of the block in the blockchain.\nfunc (b *Block) SetHeight(height int64) {\n\tb.blockHeight = height\n}\n\n\/\/ NewBlock returns a new instance of a bitcoin block given an underlying\n\/\/ btcwire.MsgBlock and protocol version. See Block.\nfunc NewBlock(msgBlock *btcwire.MsgBlock, pver uint32) *Block {\n\treturn &Block{\n\t\tmsgBlock: msgBlock,\n\t\tprotocolVersion: pver,\n\t\tblockHeight: BlockHeightUnknown,\n\t}\n}\n\n\/\/ NewBlockFromBytes returns a new instance of a bitcoin block given the\n\/\/ raw wire encoded bytes and protocol version used to encode those bytes.\n\/\/ See Block.\nfunc NewBlockFromBytes(rawBlock []byte, pver uint32) (*Block, error) {\n\t\/\/ Decode the raw block bytes into a MsgBlock.\n\tvar msgBlock btcwire.MsgBlock\n\tbr := bytes.NewBuffer(rawBlock)\n\terr := msgBlock.BtcDecode(br, pver)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb := Block{\n\t\tmsgBlock: &msgBlock,\n\t\trawBlock: rawBlock,\n\t\tprotocolVersion: pver,\n\t\tblockHeight: BlockHeightUnknown,\n\t}\n\treturn &b, nil\n}\n\n\/\/ NewBlockFromBlockAndBytes returns a new instance of a bitcoin block given\n\/\/ an underlying btcwire.MsgBlock, protocol version and raw Block. See Block.\nfunc NewBlockFromBlockAndBytes(msgBlock *btcwire.MsgBlock, rawBlock []byte, pver uint32) *Block {\n\treturn &Block{\n\t\tmsgBlock: msgBlock,\n\t\trawBlock: rawBlock,\n\t\tprotocolVersion: pver,\n\t\tblockHeight: BlockHeightUnknown,\n\t}\n}\n<commit_msg>Make TxLoc comment consistent with others.<commit_after>\/\/ Copyright (c) 2013 Conformal Systems LLC.\n\/\/ Use of this source code is governed by an ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage btcutil\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/conformal\/btcwire\"\n)\n\n\/\/ OutOfRangeError describes an error due to accessing an element that is out\n\/\/ of range.\ntype OutOfRangeError string\n\n\/\/ BlockHeightUnknown is the value returned for a block height that is unknown.\n\/\/ This is typically because the block has not been inserted into the main chain\n\/\/ yet.\nconst BlockHeightUnknown = int64(-1)\n\n\/\/ Error satisfies the error interface and prints human-readable errors.\nfunc (e OutOfRangeError) Error() string {\n\treturn string(e)\n}\n\n\/\/ Block defines a bitcoin block that provides easier and more efficient\n\/\/ manipulation of raw wire protocol blocks. It also memoizes hashes for the\n\/\/ block and its transactions on their first access so subsequent accesses don't\n\/\/ have to repeat the relatively expensive hashing operations.\ntype Block struct {\n\tmsgBlock *btcwire.MsgBlock \/\/ Underlying MsgBlock\n\trawBlock []byte \/\/ Raw wire encoded bytes for the block\n\tprotocolVersion uint32 \/\/ Protocol version used to encode rawBlock\n\tblockSha *btcwire.ShaHash \/\/ Cached block hash\n\tblockHeight int64 \/\/ Height in the main block chain\n\ttxShas []*btcwire.ShaHash \/\/ Cached transaction hashes\n\ttxShasGenerated bool \/\/ ALL transaction hashes generated\n}\n\n\/\/ MsgBlock returns the underlying btcwire.MsgBlock for the Block.\nfunc (b *Block) MsgBlock() *btcwire.MsgBlock {\n\t\/\/ Return the cached block.\n\treturn b.msgBlock\n}\n\n\/\/ Bytes returns the raw wire protocol encoded bytes for the Block and the\n\/\/ protocol version used to encode it. This is equivalent to calling BtcEncode\n\/\/ on the underlying btcwire.MsgBlock, however it caches the result so\n\/\/ subsequent calls are more efficient.\nfunc (b *Block) Bytes() ([]byte, uint32, error) {\n\t\/\/ Return the cached raw block bytes and associated protocol version if\n\t\/\/ it has already been generated.\n\tif len(b.rawBlock) != 0 {\n\t\treturn b.rawBlock, b.protocolVersion, nil\n\t}\n\n\t\/\/ Encode the MsgBlock into raw block bytes.\n\tvar w bytes.Buffer\n\terr := b.msgBlock.BtcEncode(&w, b.protocolVersion)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\trawBlock := w.Bytes()\n\n\t\/\/ Cache the encoded bytes and return them.\n\tb.rawBlock = rawBlock\n\treturn rawBlock, b.protocolVersion, nil\n}\n\n\/\/ Sha returns the block identifier hash for the Block. This is equivalent to\n\/\/ calling BlockSha on the underlying btcwire.MsgBlock, however it caches the\n\/\/ result so subsequent calls are more efficient.\nfunc (b *Block) Sha() (*btcwire.ShaHash, error) {\n\t\/\/ Return the cached block hash if it has already been generated.\n\tif b.blockSha != nil {\n\t\treturn b.blockSha, nil\n\t}\n\n\t\/\/ Generate the block hash. Ignore the error since BlockSha can't\n\t\/\/ currently fail.\n\tsha, _ := b.msgBlock.BlockSha(b.protocolVersion)\n\n\t\/\/ Cache the block hash and return it.\n\tb.blockSha = &sha\n\treturn &sha, nil\n}\n\n\/\/ TxSha returns the hash for the requested transaction number in the Block.\n\/\/ The supplied index is 0 based. That is to say, the first transaction is the\n\/\/ block is txNum 0. This is equivalent to calling TxSha on the underlying\n\/\/ btcwire.MsgTx, however it caches the result so subsequent calls are more\n\/\/ efficient.\nfunc (b *Block) TxSha(txNum int) (*btcwire.ShaHash, error) {\n\t\/\/ Ensure the requested transaction is in range.\n\tnumTx := b.msgBlock.Header.TxnCount\n\tif txNum < 0 || uint64(txNum) > numTx {\n\t\tstr := fmt.Sprintf(\"transaction index %d is out of range - max %d\",\n\t\t\ttxNum, numTx-1)\n\t\treturn nil, OutOfRangeError(str)\n\t}\n\n\t\/\/ Generate slice to hold all of the transaction hashes if needed.\n\tif len(b.txShas) == 0 {\n\t\tb.txShas = make([]*btcwire.ShaHash, numTx)\n\t}\n\n\t\/\/ Return the cached hash if it has already been generated.\n\tif b.txShas[txNum] != nil {\n\t\treturn b.txShas[txNum], nil\n\t}\n\n\t\/\/ Generate the hash for the transaction. Ignore the error since TxSha\n\t\/\/ can't currently fail.\n\tsha, _ := b.msgBlock.Transactions[txNum].TxSha(b.protocolVersion)\n\n\t\/\/ Cache the transaction hash and return it.\n\tb.txShas[txNum] = &sha\n\treturn &sha, nil\n}\n\n\/\/ TxShas returns a slice of hashes for all transactions in the Block. This is\n\/\/ equivalent to calling TxSha on each underlying btcwire.MsgTx, however it\n\/\/ caches the result so subsequent calls are more efficient.\nfunc (b *Block) TxShas() ([]*btcwire.ShaHash, error) {\n\t\/\/ Return cached hashes if they have ALL already been generated. This\n\t\/\/ flag is necessary because the transaction hashes are lazily generated\n\t\/\/ in a sparse fashion.\n\tif b.txShasGenerated {\n\t\treturn b.txShas, nil\n\t}\n\n\t\/\/ Generate slice to hold all of the transaction hashes if needed.\n\tif len(b.txShas) == 0 {\n\t\tb.txShas = make([]*btcwire.ShaHash, b.msgBlock.Header.TxnCount)\n\t}\n\n\t\/\/ Generate and cache the transaction hashes for all that haven't already\n\t\/\/ been done.\n\tfor i, hash := range b.txShas {\n\t\tif hash == nil {\n\t\t\t\/\/ Ignore the error since TxSha can't currently fail.\n\t\t\tsha, _ := b.msgBlock.Transactions[i].TxSha(b.protocolVersion)\n\t\t\tb.txShas[i] = &sha\n\t\t}\n\t}\n\n\tb.txShasGenerated = true\n\treturn b.txShas, nil\n}\n\n\/\/ ProtocolVersion returns the protocol version that was used to create the\n\/\/ underlying btcwire.MsgBlock.\nfunc (b *Block) ProtocolVersion() uint32 {\n\treturn b.protocolVersion\n}\n\n\/\/ TxLoc returns the offsets and lengths of each transaction in a raw block.\n\/\/ It is used to allow fast indexing into transactions within the raw byte\n\/\/ stream.\nfunc (b *Block) TxLoc() ([]btcwire.TxLoc, error) {\n\trawMsg, pver, err := b.Bytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trbuf := bytes.NewBuffer(rawMsg)\n\n\tvar mblock btcwire.MsgBlock\n\ttxLocs, err := mblock.BtcDecodeTxLoc(rbuf, pver)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn txLocs, err\n}\n\n\/\/ Height returns the saved height of the block in the blockchain. This value\n\/\/ will be BlockHeightUnknown if it hasn't already explicitly been set.\nfunc (b *Block) Height() int64 {\n\treturn b.blockHeight\n}\n\n\/\/ SetHeight sets the height of the block in the blockchain.\nfunc (b *Block) SetHeight(height int64) {\n\tb.blockHeight = height\n}\n\n\/\/ NewBlock returns a new instance of a bitcoin block given an underlying\n\/\/ btcwire.MsgBlock and protocol version. See Block.\nfunc NewBlock(msgBlock *btcwire.MsgBlock, pver uint32) *Block {\n\treturn &Block{\n\t\tmsgBlock: msgBlock,\n\t\tprotocolVersion: pver,\n\t\tblockHeight: BlockHeightUnknown,\n\t}\n}\n\n\/\/ NewBlockFromBytes returns a new instance of a bitcoin block given the\n\/\/ raw wire encoded bytes and protocol version used to encode those bytes.\n\/\/ See Block.\nfunc NewBlockFromBytes(rawBlock []byte, pver uint32) (*Block, error) {\n\t\/\/ Decode the raw block bytes into a MsgBlock.\n\tvar msgBlock btcwire.MsgBlock\n\tbr := bytes.NewBuffer(rawBlock)\n\terr := msgBlock.BtcDecode(br, pver)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb := Block{\n\t\tmsgBlock: &msgBlock,\n\t\trawBlock: rawBlock,\n\t\tprotocolVersion: pver,\n\t\tblockHeight: BlockHeightUnknown,\n\t}\n\treturn &b, nil\n}\n\n\/\/ NewBlockFromBlockAndBytes returns a new instance of a bitcoin block given\n\/\/ an underlying btcwire.MsgBlock, protocol version and raw Block. See Block.\nfunc NewBlockFromBlockAndBytes(msgBlock *btcwire.MsgBlock, rawBlock []byte, pver uint32) *Block {\n\treturn &Block{\n\t\tmsgBlock: msgBlock,\n\t\trawBlock: rawBlock,\n\t\tprotocolVersion: pver,\n\t\tblockHeight: BlockHeightUnknown,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 The cert-manager Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage http\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/selection\"\n\tutilerrors \"k8s.io\/apimachinery\/pkg\/util\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/intstr\"\n\n\tcmacme \"github.com\/jetstack\/cert-manager\/pkg\/apis\/acme\/v1\"\n\tlogf \"github.com\/jetstack\/cert-manager\/pkg\/logs\"\n)\n\nfunc (s *Solver) ensureService(ctx context.Context, ch *cmacme.Challenge) (*corev1.Service, error) {\n\tlog := logf.FromContext(ctx).WithName(\"ensureService\")\n\n\tlog.V(logf.DebugLevel).Info(\"checking for existing HTTP01 solver services for challenge\")\n\texistingServices, err := s.getServicesForChallenge(ctx, ch)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(existingServices) == 1 {\n\t\tlogf.WithRelatedResource(log, existingServices[0]).Info(\"found one existing HTTP01 solver Service for challenge resource\")\n\t\treturn existingServices[0], nil\n\t}\n\tif len(existingServices) > 1 {\n\t\tlog.V(logf.DebugLevel).Info(\"multiple challenge solver services found for challenge. cleaning up all existing services.\")\n\t\terr := s.cleanupServices(ctx, ch)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, fmt.Errorf(\"multiple existing challenge solver services found and cleaned up. retrying challenge sync\")\n\t}\n\n\tlog.V(logf.DebugLevel).Info(\"creating HTTP01 challenge solver service\")\n\treturn s.createService(ctx, ch)\n}\n\n\/\/ getServicesForChallenge returns a list of services that were created to solve\n\/\/ http challenges for the given domain\nfunc (s *Solver) getServicesForChallenge(ctx context.Context, ch *cmacme.Challenge) ([]*corev1.Service, error) {\n\tlog := logf.FromContext(ctx).WithName(\"getServicesForChallenge\")\n\n\tpodLabels := podLabels(ch)\n\tselector := labels.NewSelector()\n\tfor key, val := range podLabels {\n\t\treq, err := labels.NewRequirement(key, selection.Equals, []string{val})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tselector = selector.Add(*req)\n\t}\n\n\tserviceList, err := s.serviceLister.Services(ch.Namespace).List(selector)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar relevantServices []*corev1.Service\n\tfor _, service := range serviceList {\n\t\tif !metav1.IsControlledBy(service, ch) {\n\t\t\tlogf.WithRelatedResource(log, service).Info(\"found existing solver pod for this challenge resource, however \" +\n\t\t\t\t\"it does not have an appropriate OwnerReference referencing this challenge. Skipping it altogether.\")\n\t\t\tcontinue\n\t\t}\n\t\trelevantServices = append(relevantServices, service)\n\t}\n\n\treturn relevantServices, nil\n}\n\n\/\/ createService will create the service required to solve this challenge\n\/\/ in the target API server.\nfunc (s *Solver) createService(ctx context.Context, ch *cmacme.Challenge) (*corev1.Service, error) {\n\tsvc, err := buildService(ch)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s.Client.CoreV1().Services(ch.Namespace).Create(ctx, svc, metav1.CreateOptions{})\n}\n\nfunc buildService(ch *cmacme.Challenge) (*corev1.Service, error) {\n\tpodLabels := podLabels(ch)\n\tservice := &corev1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: \"cm-acme-http-solver-\",\n\t\t\tNamespace: ch.Namespace,\n\t\t\tLabels: podLabels,\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"auth.istio.io\/8089\": \"NONE\",\n\t\t\t},\n\t\t\tOwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ch, challengeGvk)},\n\t\t},\n\t\tSpec: corev1.ServiceSpec{\n\t\t\tType: corev1.ServiceTypeNodePort,\n\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName: \"http\",\n\t\t\t\t\tPort: acmeSolverListenPort,\n\t\t\t\t\tTargetPort: intstr.FromInt(acmeSolverListenPort),\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: podLabels,\n\t\t},\n\t}\n\n\t\/\/ checking for presence of http01 config and if set serviceType is set, override our default (NodePort)\n\tserviceType, err := getServiceType(ch)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tservice.Spec.Type = serviceType\n\n\treturn service, nil\n}\n\nfunc (s *Solver) cleanupServices(ctx context.Context, ch *cmacme.Challenge) error {\n\tlog := logf.FromContext(ctx, \"cleanupPods\")\n\n\tservices, err := s.getServicesForChallenge(ctx, ch)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar errs []error\n\tfor _, service := range services {\n\t\tlog := logf.WithRelatedResource(log, service).V(logf.DebugLevel)\n\t\tlog.V(logf.DebugLevel).Info(\"deleting service resource\")\n\n\t\terr := s.Client.CoreV1().Services(service.Namespace).Delete(ctx, service.Name, metav1.DeleteOptions{})\n\t\tif err != nil {\n\t\t\tlog.V(logf.WarnLevel).Info(\"failed to delete pod resource\", \"error\", err)\n\t\t\terrs = append(errs, err)\n\t\t\tcontinue\n\t\t}\n\t\tlog.V(logf.DebugLevel).Info(\"successfully deleted pod resource\")\n\t}\n\treturn utilerrors.NewAggregate(errs)\n}\n<commit_msg>Fix regression in solver service builder - default service type should be NodePort rather than empty<commit_after>\/*\nCopyright 2020 The cert-manager Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage http\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/selection\"\n\tutilerrors \"k8s.io\/apimachinery\/pkg\/util\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/intstr\"\n\n\tcmacme \"github.com\/jetstack\/cert-manager\/pkg\/apis\/acme\/v1\"\n\tlogf \"github.com\/jetstack\/cert-manager\/pkg\/logs\"\n)\n\nfunc (s *Solver) ensureService(ctx context.Context, ch *cmacme.Challenge) (*corev1.Service, error) {\n\tlog := logf.FromContext(ctx).WithName(\"ensureService\")\n\n\tlog.V(logf.DebugLevel).Info(\"checking for existing HTTP01 solver services for challenge\")\n\texistingServices, err := s.getServicesForChallenge(ctx, ch)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(existingServices) == 1 {\n\t\tlogf.WithRelatedResource(log, existingServices[0]).Info(\"found one existing HTTP01 solver Service for challenge resource\")\n\t\treturn existingServices[0], nil\n\t}\n\tif len(existingServices) > 1 {\n\t\tlog.V(logf.DebugLevel).Info(\"multiple challenge solver services found for challenge. cleaning up all existing services.\")\n\t\terr := s.cleanupServices(ctx, ch)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, fmt.Errorf(\"multiple existing challenge solver services found and cleaned up. retrying challenge sync\")\n\t}\n\n\tlog.V(logf.DebugLevel).Info(\"creating HTTP01 challenge solver service\")\n\treturn s.createService(ctx, ch)\n}\n\n\/\/ getServicesForChallenge returns a list of services that were created to solve\n\/\/ http challenges for the given domain\nfunc (s *Solver) getServicesForChallenge(ctx context.Context, ch *cmacme.Challenge) ([]*corev1.Service, error) {\n\tlog := logf.FromContext(ctx).WithName(\"getServicesForChallenge\")\n\n\tpodLabels := podLabels(ch)\n\tselector := labels.NewSelector()\n\tfor key, val := range podLabels {\n\t\treq, err := labels.NewRequirement(key, selection.Equals, []string{val})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tselector = selector.Add(*req)\n\t}\n\n\tserviceList, err := s.serviceLister.Services(ch.Namespace).List(selector)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar relevantServices []*corev1.Service\n\tfor _, service := range serviceList {\n\t\tif !metav1.IsControlledBy(service, ch) {\n\t\t\tlogf.WithRelatedResource(log, service).Info(\"found existing solver pod for this challenge resource, however \" +\n\t\t\t\t\"it does not have an appropriate OwnerReference referencing this challenge. Skipping it altogether.\")\n\t\t\tcontinue\n\t\t}\n\t\trelevantServices = append(relevantServices, service)\n\t}\n\n\treturn relevantServices, nil\n}\n\n\/\/ createService will create the service required to solve this challenge\n\/\/ in the target API server.\nfunc (s *Solver) createService(ctx context.Context, ch *cmacme.Challenge) (*corev1.Service, error) {\n\tsvc, err := buildService(ch)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s.Client.CoreV1().Services(ch.Namespace).Create(ctx, svc, metav1.CreateOptions{})\n}\n\nfunc buildService(ch *cmacme.Challenge) (*corev1.Service, error) {\n\tpodLabels := podLabels(ch)\n\tservice := &corev1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: \"cm-acme-http-solver-\",\n\t\t\tNamespace: ch.Namespace,\n\t\t\tLabels: podLabels,\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"auth.istio.io\/8089\": \"NONE\",\n\t\t\t},\n\t\t\tOwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ch, challengeGvk)},\n\t\t},\n\t\tSpec: corev1.ServiceSpec{\n\t\t\tType: corev1.ServiceTypeNodePort,\n\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName: \"http\",\n\t\t\t\t\tPort: acmeSolverListenPort,\n\t\t\t\t\tTargetPort: intstr.FromInt(acmeSolverListenPort),\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: podLabels,\n\t\t},\n\t}\n\n\t\/\/ checking for presence of http01 config and if set serviceType is set, override our default (NodePort)\n\tserviceType, err := getServiceType(ch)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif serviceType != \"\" {\n\t\tservice.Spec.Type = serviceType\n\t}\n\n\treturn service, nil\n}\n\nfunc (s *Solver) cleanupServices(ctx context.Context, ch *cmacme.Challenge) error {\n\tlog := logf.FromContext(ctx, \"cleanupPods\")\n\n\tservices, err := s.getServicesForChallenge(ctx, ch)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar errs []error\n\tfor _, service := range services {\n\t\tlog := logf.WithRelatedResource(log, service).V(logf.DebugLevel)\n\t\tlog.V(logf.DebugLevel).Info(\"deleting service resource\")\n\n\t\terr := s.Client.CoreV1().Services(service.Namespace).Delete(ctx, service.Name, metav1.DeleteOptions{})\n\t\tif err != nil {\n\t\t\tlog.V(logf.WarnLevel).Info(\"failed to delete pod resource\", \"error\", err)\n\t\t\terrs = append(errs, err)\n\t\t\tcontinue\n\t\t}\n\t\tlog.V(logf.DebugLevel).Info(\"successfully deleted pod resource\")\n\t}\n\treturn utilerrors.NewAggregate(errs)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage bloom provides data structures and methods for creating Bloom filters.\n\nA Bloom filter is a representation of a set of _n_ items, where the main\nrequirement is to make membership queries; _i.e._, whether an item is a\nmember of a set.\n\nA Bloom filter has two parameters: _m_, a maximum size (typically a reasonably large\nmultiple of the cardinality of the set to represent) and _k_, the number of hashing\nfunctions on elements of the set. (The actual hashing functions are important, too,\nbut this is not a parameter for this implementation). A Bloom filter is backed by\na BitSet; a key is represented in the filter by setting the bits at each value of the\nhashing functions (modulo _m_). Set membership is done by _testing_ whether the\nbits at each value of the hashing functions (again, modulo _m_) are set. If so,\nthe item is in the set. If the item is actually in the set, a Bloom filter will\nnever fail (the true positive rate is 1.0); but it is susceptible to false\npositives. The art is to choose _k_ and _m_ correctly.\n\nIn this implementation, the hashing functions used is a local version of FNV,\na non-cryptographic hashing function, seeded with the index number of\nthe kth hashing function.\n\nThis implementation accepts keys for setting as testing as []byte. Thus, to\nadd a string item, \"Love\":\n\n uint n = 1000\n filter := bloom.New(20*n, 5) \/\/ load of 20, 5 keys\n filter.Add([]byte(\"Love\"))\n\nSimilarly, to test if \"Love\" is in bloom:\n\n if filter.Test([]byte(\"Love\"))\n\nFor numeric data, I recommend that you look into the binary\/encoding library. But,\nfor example, to add a uint32 to the filter:\n\n i := uint32(100)\n n1 := make([]byte,4)\n binary.BigEndian.PutUint32(n1,i)\n f.Add(n1)\n\nFinally, there is a method to estimate the false positive rate of a particular\nBloom filter for a set of size _n_:\n\n if filter.EstimateFalsePositiveRate(1000) > 0.001\n\nGiven the particular hashing scheme, it's best to be empirical about this. Note\nthat estimating the FP rate will clear the Bloom filter.\n*\/\npackage bloom\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\n\t\"github.com\/spaolacci\/murmur3\"\n\t\"github.com\/willf\/bitset\"\n)\n\n\/\/ A BloomFilter is a representation of a set of _n_ items, where the main\n\/\/ requirement is to make membership queries; _i.e._, whether an item is a\n\/\/ member of a set.\ntype BloomFilter struct {\n\tm uint\n\tk uint\n\tb *bitset.BitSet\n}\n\n\/\/ New creates a new Bloom filter with _m_ bits and _k_ hashing functions\nfunc New(m uint, k uint) *BloomFilter {\n\treturn &BloomFilter{m, k, bitset.New(m)}\n}\n\n\/\/ From creates a new Bloom filter with len(_data_) * 64 bits and _k_ hashing\n\/\/ functions. The data slice is not going to be reset.\nfunc From(data []uint64, k uint) *BloomFilter {\n\tm := uint(len(data) * 64)\n\treturn &BloomFilter{m, k, bitset.From(data)}\n}\n\n\/\/ baseHashes returns the four hash values of data that are used to create k\n\/\/ hashes\nfunc baseHashes(data []byte) [4]uint64 {\n\ta1 := []byte{1} \/\/ to grab another bit of data\n\thasher := murmur3.New128()\n\thasher.Write(data)\n\tv1, v2 := hasher.Sum128()\n\thasher.Write(a1)\n\tv3, v4 := hasher.Sum128()\n\treturn [4]uint64{\n\t\tv1, v2, v3, v4,\n\t}\n}\n\n\/\/ location returns the ith hashed location using the four base hash values\nfunc (f *BloomFilter) location(h [4]uint64, i uint) (location uint) {\n\tii := uint64(i)\n\tlocation = uint((h[ii%2] + ii*h[2+(((ii+(ii%2))%4)\/2)]) % uint64(f.m))\n\treturn\n}\n\n\/\/ EstimateParameters estimates requirements for m and k.\n\/\/ Based on https:\/\/bitbucket.org\/ww\/bloom\/src\/829aa19d01d9\/bloom.go\n\/\/ used with permission.\nfunc EstimateParameters(n uint, p float64) (m uint, k uint) {\n\tm = uint(math.Ceil(-1 * float64(n) * math.Log(p) \/ math.Pow(math.Log(2), 2)))\n\tk = uint(math.Ceil(math.Log(2) * float64(m) \/ float64(n)))\n\treturn\n}\n\n\/\/ NewWithEstimates creates a new Bloom filter for about n items with fp\n\/\/ false positive rate\nfunc NewWithEstimates(n uint, fp float64) *BloomFilter {\n\tm, k := EstimateParameters(n, fp)\n\treturn New(m, k)\n}\n\n\/\/ Cap returns the capacity, _m_, of a Bloom filter\nfunc (f *BloomFilter) Cap() uint {\n\treturn f.m\n}\n\n\/\/ K returns the number of hash functions used in the BloomFilter\nfunc (f *BloomFilter) K() uint {\n\treturn f.k\n}\n\n\/\/ Add data to the Bloom Filter. Returns the filter (allows chaining)\nfunc (f *BloomFilter) Add(data []byte) *BloomFilter {\n\th := baseHashes(data)\n\tfor i := uint(0); i < f.k; i++ {\n\t\tf.b.Set(f.location(h, i))\n\t}\n\treturn f\n}\n\n\/\/ Merge the data from two Bloom Filters.\nfunc (f *BloomFilter) Merge(g *BloomFilter) error {\n\t\/\/ Make sure the m's and k's are the same, otherwise merging has no real use.\n\tif f.m != g.m {\n\t\treturn fmt.Errorf(\"m's don't match: %d != %d\", f.m, g.m)\n\t}\n\n\tif f.k != g.k {\n\t\treturn fmt.Errorf(\"k's don't match: %d != %d\", f.m, g.m)\n\t}\n\n\tf.b.InPlaceUnion(g.b)\n\treturn nil\n}\n\n\/\/ Copy creates a copy of a Bloom filter.\nfunc (f *BloomFilter) Copy() *BloomFilter {\n\tfc := New(f.m, f.k)\n\tfc.Merge(f)\n\treturn fc\n}\n\n\/\/ AddString to the Bloom Filter. Returns the filter (allows chaining)\nfunc (f *BloomFilter) AddString(data string) *BloomFilter {\n\treturn f.Add([]byte(data))\n}\n\n\/\/ Test returns true if the data is in the BloomFilter, false otherwise.\n\/\/ If true, the result might be a false positive. If false, the data\n\/\/ is definitely not in the set.\nfunc (f *BloomFilter) Test(data []byte) bool {\n\th := baseHashes(data)\n\tfor i := uint(0); i < f.k; i++ {\n\t\tif !f.b.Test(f.location(h, i)) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ TestString returns true if the string is in the BloomFilter, false otherwise.\n\/\/ If true, the result might be a false positive. If false, the data\n\/\/ is definitely not in the set.\nfunc (f *BloomFilter) TestString(data string) bool {\n\treturn f.Test([]byte(data))\n}\n\n\/\/ TestAndAdd is the equivalent to calling Test(data) then Add(data).\n\/\/ Returns the result of Test.\nfunc (f *BloomFilter) TestAndAdd(data []byte) bool {\n\tpresent := true\n\th := baseHashes(data)\n\tfor i := uint(0); i < f.k; i++ {\n\t\tl := f.location(h, i)\n\t\tif !f.b.Test(l) {\n\t\t\tpresent = false\n\t\t}\n\t\tf.b.Set(l)\n\t}\n\treturn present\n}\n\n\/\/ TestAndAddString is the equivalent to calling Test(string) then Add(string).\n\/\/ Returns the result of Test.\nfunc (f *BloomFilter) TestAndAddString(data string) bool {\n\treturn f.TestAndAdd([]byte(data))\n}\n\n\/\/ ClearAll clears all the data in a Bloom filter, removing all keys\nfunc (f *BloomFilter) ClearAll() *BloomFilter {\n\tf.b.ClearAll()\n\treturn f\n}\n\n\/\/ EstimateFalsePositiveRate returns, for a BloomFilter with a estimate of m bits\n\/\/ and k hash functions, what the false positive rate will be\n\/\/ while storing n entries; runs 100,000 tests. This is an empirical\n\/\/ test using integers as keys. As a side-effect, it clears the BloomFilter.\nfunc (f *BloomFilter) EstimateFalsePositiveRate(n uint) (fpRate float64) {\n\trounds := uint32(100000)\n\tf.ClearAll()\n\tn1 := make([]byte, 4)\n\tfor i := uint32(0); i < uint32(n); i++ {\n\t\tbinary.BigEndian.PutUint32(n1, i)\n\t\tf.Add(n1)\n\t}\n\tfp := 0\n\t\/\/ test for number of rounds\n\tfor i := uint32(0); i < rounds; i++ {\n\t\tbinary.BigEndian.PutUint32(n1, i+uint32(n)+1)\n\t\tif f.Test(n1) {\n\t\t\t\/\/fmt.Printf(\"%v failed.\\n\", i+uint32(n)+1)\n\t\t\tfp++\n\t\t}\n\t}\n\tfpRate = float64(fp) \/ (float64(rounds))\n\tf.ClearAll()\n\treturn\n}\n\n\/\/ bloomFilterJSON is an unexported type for marshaling\/unmarshaling BloomFilter struct.\ntype bloomFilterJSON struct {\n\tM uint `json:\"m\"`\n\tK uint `json:\"k\"`\n\tB *bitset.BitSet `json:\"b\"`\n}\n\n\/\/ MarshalJSON implements json.Marshaler interface.\nfunc (f *BloomFilter) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(bloomFilterJSON{f.m, f.k, f.b})\n}\n\n\/\/ UnmarshalJSON implements json.Unmarshaler interface.\nfunc (f *BloomFilter) UnmarshalJSON(data []byte) error {\n\tvar j bloomFilterJSON\n\terr := json.Unmarshal(data, &j)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.m = j.M\n\tf.k = j.K\n\tf.b = j.B\n\treturn nil\n}\n\n\/\/ WriteTo writes a binary representation of the BloomFilter to an i\/o stream.\n\/\/ It returns the number of bytes written.\nfunc (f *BloomFilter) WriteTo(stream io.Writer) (int64, error) {\n\terr := binary.Write(stream, binary.BigEndian, uint64(f.m))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\terr = binary.Write(stream, binary.BigEndian, uint64(f.k))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tnumBytes, err := f.b.WriteTo(stream)\n\treturn numBytes + int64(2*binary.Size(uint64(0))), err\n}\n\n\/\/ ReadFrom reads a binary representation of the BloomFilter (such as might\n\/\/ have been written by WriteTo()) from an i\/o stream. It returns the number\n\/\/ of bytes read.\nfunc (f *BloomFilter) ReadFrom(stream io.Reader) (int64, error) {\n\tvar m, k uint64\n\terr := binary.Read(stream, binary.BigEndian, &m)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\terr = binary.Read(stream, binary.BigEndian, &k)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tb := &bitset.BitSet{}\n\tnumBytes, err := b.ReadFrom(stream)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tf.m = uint(m)\n\tf.k = uint(k)\n\tf.b = b\n\treturn numBytes + int64(2*binary.Size(uint64(0))), nil\n}\n\n\/\/ GobEncode implements gob.GobEncoder interface.\nfunc (f *BloomFilter) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\t_, err := f.WriteTo(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}\n\n\/\/ GobDecode implements gob.GobDecoder interface.\nfunc (f *BloomFilter) GobDecode(data []byte) error {\n\tbuf := bytes.NewBuffer(data)\n\t_, err := f.ReadFrom(buf)\n\n\treturn err\n}\n\n\/\/ Equal tests for the equality of two Bloom filters\nfunc (f *BloomFilter) Equal(g *BloomFilter) bool {\n\treturn f.m == g.m && f.k == g.k && f.b.Equal(g.b)\n}\n<commit_msg>minor fix in the documentation<commit_after>\/*\nPackage bloom provides data structures and methods for creating Bloom filters.\n\nA Bloom filter is a representation of a set of _n_ items, where the main\nrequirement is to make membership queries; _i.e._, whether an item is a\nmember of a set.\n\nA Bloom filter has two parameters: _m_, a maximum size (typically a reasonably large\nmultiple of the cardinality of the set to represent) and _k_, the number of hashing\nfunctions on elements of the set. (The actual hashing functions are important, too,\nbut this is not a parameter for this implementation). A Bloom filter is backed by\na BitSet; a key is represented in the filter by setting the bits at each value of the\nhashing functions (modulo _m_). Set membership is done by _testing_ whether the\nbits at each value of the hashing functions (again, modulo _m_) are set. If so,\nthe item is in the set. If the item is actually in the set, a Bloom filter will\nnever fail (the true positive rate is 1.0); but it is susceptible to false\npositives. The art is to choose _k_ and _m_ correctly.\n\nIn this implementation, the hashing functions used is murmurhash,\na non-cryptographic hashing function.\n\nThis implementation accepts keys for setting as testing as []byte. Thus, to\nadd a string item, \"Love\":\n\n uint n = 1000\n filter := bloom.New(20*n, 5) \/\/ load of 20, 5 keys\n filter.Add([]byte(\"Love\"))\n\nSimilarly, to test if \"Love\" is in bloom:\n\n if filter.Test([]byte(\"Love\"))\n\nFor numeric data, I recommend that you look into the binary\/encoding library. But,\nfor example, to add a uint32 to the filter:\n\n i := uint32(100)\n n1 := make([]byte,4)\n binary.BigEndian.PutUint32(n1,i)\n f.Add(n1)\n\nFinally, there is a method to estimate the false positive rate of a particular\nBloom filter for a set of size _n_:\n\n if filter.EstimateFalsePositiveRate(1000) > 0.001\n\nGiven the particular hashing scheme, it's best to be empirical about this. Note\nthat estimating the FP rate will clear the Bloom filter.\n*\/\npackage bloom\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\n\t\"github.com\/spaolacci\/murmur3\"\n\t\"github.com\/willf\/bitset\"\n)\n\n\/\/ A BloomFilter is a representation of a set of _n_ items, where the main\n\/\/ requirement is to make membership queries; _i.e._, whether an item is a\n\/\/ member of a set.\ntype BloomFilter struct {\n\tm uint\n\tk uint\n\tb *bitset.BitSet\n}\n\n\/\/ New creates a new Bloom filter with _m_ bits and _k_ hashing functions\nfunc New(m uint, k uint) *BloomFilter {\n\treturn &BloomFilter{m, k, bitset.New(m)}\n}\n\n\/\/ From creates a new Bloom filter with len(_data_) * 64 bits and _k_ hashing\n\/\/ functions. The data slice is not going to be reset.\nfunc From(data []uint64, k uint) *BloomFilter {\n\tm := uint(len(data) * 64)\n\treturn &BloomFilter{m, k, bitset.From(data)}\n}\n\n\/\/ baseHashes returns the four hash values of data that are used to create k\n\/\/ hashes\nfunc baseHashes(data []byte) [4]uint64 {\n\ta1 := []byte{1} \/\/ to grab another bit of data\n\thasher := murmur3.New128()\n\thasher.Write(data)\n\tv1, v2 := hasher.Sum128()\n\thasher.Write(a1)\n\tv3, v4 := hasher.Sum128()\n\treturn [4]uint64{\n\t\tv1, v2, v3, v4,\n\t}\n}\n\n\/\/ location returns the ith hashed location using the four base hash values\nfunc (f *BloomFilter) location(h [4]uint64, i uint) (location uint) {\n\tii := uint64(i)\n\tlocation = uint((h[ii%2] + ii*h[2+(((ii+(ii%2))%4)\/2)]) % uint64(f.m))\n\treturn\n}\n\n\/\/ EstimateParameters estimates requirements for m and k.\n\/\/ Based on https:\/\/bitbucket.org\/ww\/bloom\/src\/829aa19d01d9\/bloom.go\n\/\/ used with permission.\nfunc EstimateParameters(n uint, p float64) (m uint, k uint) {\n\tm = uint(math.Ceil(-1 * float64(n) * math.Log(p) \/ math.Pow(math.Log(2), 2)))\n\tk = uint(math.Ceil(math.Log(2) * float64(m) \/ float64(n)))\n\treturn\n}\n\n\/\/ NewWithEstimates creates a new Bloom filter for about n items with fp\n\/\/ false positive rate\nfunc NewWithEstimates(n uint, fp float64) *BloomFilter {\n\tm, k := EstimateParameters(n, fp)\n\treturn New(m, k)\n}\n\n\/\/ Cap returns the capacity, _m_, of a Bloom filter\nfunc (f *BloomFilter) Cap() uint {\n\treturn f.m\n}\n\n\/\/ K returns the number of hash functions used in the BloomFilter\nfunc (f *BloomFilter) K() uint {\n\treturn f.k\n}\n\n\/\/ Add data to the Bloom Filter. Returns the filter (allows chaining)\nfunc (f *BloomFilter) Add(data []byte) *BloomFilter {\n\th := baseHashes(data)\n\tfor i := uint(0); i < f.k; i++ {\n\t\tf.b.Set(f.location(h, i))\n\t}\n\treturn f\n}\n\n\/\/ Merge the data from two Bloom Filters.\nfunc (f *BloomFilter) Merge(g *BloomFilter) error {\n\t\/\/ Make sure the m's and k's are the same, otherwise merging has no real use.\n\tif f.m != g.m {\n\t\treturn fmt.Errorf(\"m's don't match: %d != %d\", f.m, g.m)\n\t}\n\n\tif f.k != g.k {\n\t\treturn fmt.Errorf(\"k's don't match: %d != %d\", f.m, g.m)\n\t}\n\n\tf.b.InPlaceUnion(g.b)\n\treturn nil\n}\n\n\/\/ Copy creates a copy of a Bloom filter.\nfunc (f *BloomFilter) Copy() *BloomFilter {\n\tfc := New(f.m, f.k)\n\tfc.Merge(f)\n\treturn fc\n}\n\n\/\/ AddString to the Bloom Filter. Returns the filter (allows chaining)\nfunc (f *BloomFilter) AddString(data string) *BloomFilter {\n\treturn f.Add([]byte(data))\n}\n\n\/\/ Test returns true if the data is in the BloomFilter, false otherwise.\n\/\/ If true, the result might be a false positive. If false, the data\n\/\/ is definitely not in the set.\nfunc (f *BloomFilter) Test(data []byte) bool {\n\th := baseHashes(data)\n\tfor i := uint(0); i < f.k; i++ {\n\t\tif !f.b.Test(f.location(h, i)) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ TestString returns true if the string is in the BloomFilter, false otherwise.\n\/\/ If true, the result might be a false positive. If false, the data\n\/\/ is definitely not in the set.\nfunc (f *BloomFilter) TestString(data string) bool {\n\treturn f.Test([]byte(data))\n}\n\n\/\/ TestAndAdd is the equivalent to calling Test(data) then Add(data).\n\/\/ Returns the result of Test.\nfunc (f *BloomFilter) TestAndAdd(data []byte) bool {\n\tpresent := true\n\th := baseHashes(data)\n\tfor i := uint(0); i < f.k; i++ {\n\t\tl := f.location(h, i)\n\t\tif !f.b.Test(l) {\n\t\t\tpresent = false\n\t\t}\n\t\tf.b.Set(l)\n\t}\n\treturn present\n}\n\n\/\/ TestAndAddString is the equivalent to calling Test(string) then Add(string).\n\/\/ Returns the result of Test.\nfunc (f *BloomFilter) TestAndAddString(data string) bool {\n\treturn f.TestAndAdd([]byte(data))\n}\n\n\/\/ ClearAll clears all the data in a Bloom filter, removing all keys\nfunc (f *BloomFilter) ClearAll() *BloomFilter {\n\tf.b.ClearAll()\n\treturn f\n}\n\n\/\/ EstimateFalsePositiveRate returns, for a BloomFilter with a estimate of m bits\n\/\/ and k hash functions, what the false positive rate will be\n\/\/ while storing n entries; runs 100,000 tests. This is an empirical\n\/\/ test using integers as keys. As a side-effect, it clears the BloomFilter.\nfunc (f *BloomFilter) EstimateFalsePositiveRate(n uint) (fpRate float64) {\n\trounds := uint32(100000)\n\tf.ClearAll()\n\tn1 := make([]byte, 4)\n\tfor i := uint32(0); i < uint32(n); i++ {\n\t\tbinary.BigEndian.PutUint32(n1, i)\n\t\tf.Add(n1)\n\t}\n\tfp := 0\n\t\/\/ test for number of rounds\n\tfor i := uint32(0); i < rounds; i++ {\n\t\tbinary.BigEndian.PutUint32(n1, i+uint32(n)+1)\n\t\tif f.Test(n1) {\n\t\t\t\/\/fmt.Printf(\"%v failed.\\n\", i+uint32(n)+1)\n\t\t\tfp++\n\t\t}\n\t}\n\tfpRate = float64(fp) \/ (float64(rounds))\n\tf.ClearAll()\n\treturn\n}\n\n\/\/ bloomFilterJSON is an unexported type for marshaling\/unmarshaling BloomFilter struct.\ntype bloomFilterJSON struct {\n\tM uint `json:\"m\"`\n\tK uint `json:\"k\"`\n\tB *bitset.BitSet `json:\"b\"`\n}\n\n\/\/ MarshalJSON implements json.Marshaler interface.\nfunc (f *BloomFilter) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(bloomFilterJSON{f.m, f.k, f.b})\n}\n\n\/\/ UnmarshalJSON implements json.Unmarshaler interface.\nfunc (f *BloomFilter) UnmarshalJSON(data []byte) error {\n\tvar j bloomFilterJSON\n\terr := json.Unmarshal(data, &j)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.m = j.M\n\tf.k = j.K\n\tf.b = j.B\n\treturn nil\n}\n\n\/\/ WriteTo writes a binary representation of the BloomFilter to an i\/o stream.\n\/\/ It returns the number of bytes written.\nfunc (f *BloomFilter) WriteTo(stream io.Writer) (int64, error) {\n\terr := binary.Write(stream, binary.BigEndian, uint64(f.m))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\terr = binary.Write(stream, binary.BigEndian, uint64(f.k))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tnumBytes, err := f.b.WriteTo(stream)\n\treturn numBytes + int64(2*binary.Size(uint64(0))), err\n}\n\n\/\/ ReadFrom reads a binary representation of the BloomFilter (such as might\n\/\/ have been written by WriteTo()) from an i\/o stream. It returns the number\n\/\/ of bytes read.\nfunc (f *BloomFilter) ReadFrom(stream io.Reader) (int64, error) {\n\tvar m, k uint64\n\terr := binary.Read(stream, binary.BigEndian, &m)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\terr = binary.Read(stream, binary.BigEndian, &k)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tb := &bitset.BitSet{}\n\tnumBytes, err := b.ReadFrom(stream)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tf.m = uint(m)\n\tf.k = uint(k)\n\tf.b = b\n\treturn numBytes + int64(2*binary.Size(uint64(0))), nil\n}\n\n\/\/ GobEncode implements gob.GobEncoder interface.\nfunc (f *BloomFilter) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\t_, err := f.WriteTo(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}\n\n\/\/ GobDecode implements gob.GobDecoder interface.\nfunc (f *BloomFilter) GobDecode(data []byte) error {\n\tbuf := bytes.NewBuffer(data)\n\t_, err := f.ReadFrom(buf)\n\n\treturn err\n}\n\n\/\/ Equal tests for the equality of two Bloom filters\nfunc (f *BloomFilter) Equal(g *BloomFilter) bool {\n\treturn f.m == g.m && f.k == g.k && f.b.Equal(g.b)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright AppsCode Inc. and Contributors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage tableconvertor\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"kmodules.xyz\/resource-metadata\/pkg\/tableconvertor\/printers\"\n\n\t\"github.com\/Masterminds\/sprig\/v3\"\n\tprom_op \"github.com\/prometheus-operator\/prometheus-operator\/pkg\/apis\/monitoring\/v1\"\n\t\"gomodules.xyz\/jsonpath\"\n\tcore \"k8s.io\/api\/core\/v1\"\n\trbac \"k8s.io\/api\/rbac\/v1\"\n\tmetatable \"k8s.io\/apimachinery\/pkg\/api\/meta\/table\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/duration\"\n\tkubedb \"kubedb.dev\/apimachinery\/apis\/kubedb\/v1alpha2\"\n)\n\nvar templateFns = sprig.TxtFuncMap()\n\nfunc init() {\n\ttemplateFns[\"jp\"] = jsonpathFn\n\ttemplateFns[\"k8s_fmt_selector\"] = formatLabelSelectorFn\n\ttemplateFns[\"k8s_fmt_label\"] = formatLabelsFn\n\ttemplateFns[\"k8s_age\"] = ageFn\n\ttemplateFns[\"k8s_svc_ports\"] = servicePortsFn\n\ttemplateFns[\"k8s_container_ports\"] = containerPortFn\n\ttemplateFns[\"k8s_container_images\"] = containerImagesFn\n\ttemplateFns[\"k8s_volumes\"] = volumesFn\n\ttemplateFns[\"k8s_volumeMounts\"] = volumeMountsFn\n\ttemplateFns[\"k8s_duration\"] = durationFn\n\ttemplateFns[\"fmt_list\"] = fmtListFn\n\ttemplateFns[\"prom_ns_selector\"] = promNamespaceSelectorFn\n\ttemplateFns[\"map_key_count\"] = mapKeyCountFn\n\ttemplateFns[\"kubedb_db_mode\"] = kubedbDBModeFn\n\ttemplateFns[\"kubedb_db_replicas\"] = kubedbDBReplicasFn\n\ttemplateFns[\"kubedb_db_resources\"] = kubedbDBResourcesFn\n\ttemplateFns[\"rbac_subjects\"] = rbacSubjects\n\ttemplateFns[\"cert_validity\"] = certificateValidity\n}\n\nfunc jsonpathFn(expr string, data interface{}, jsonoutput ...bool) (interface{}, error) {\n\tenableJSONoutput := len(jsonoutput) > 0 && jsonoutput[0]\n\n\tjp := jsonpath.New(\"jp\")\n\tif err := jp.Parse(expr); err != nil {\n\t\treturn nil, fmt.Errorf(\"unrecognized column definition %q\", expr)\n\t}\n\tjp.AllowMissingKeys(true)\n\tjp.EnableJSONOutput(enableJSONoutput)\n\n\tvar buf bytes.Buffer\n\terr := jp.Execute(&buf, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif enableJSONoutput {\n\t\tvar v []interface{}\n\t\terr = json.Unmarshal(buf.Bytes(), &v)\n\t\treturn v, err\n\t}\n\treturn buf.String(), err\n}\n\nfunc formatLabelSelectorFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar sel metav1.LabelSelector\n\terr := json.Unmarshal([]byte(data), &sel)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn metav1.FormatLabelSelector(&sel), nil\n}\n\nfunc formatLabelsFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar label map[string]string\n\terr := json.Unmarshal([]byte(data), &label)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn labels.FormatLabels(label), nil\n}\n\nfunc ageFn(data string) (string, error) {\n\tif data == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar timestamp metav1.Time\n\terr := timestamp.UnmarshalQueryParameter(data)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn metatable.ConvertToHumanReadableDateType(timestamp), nil\n}\n\nfunc servicePortsFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar ports []core.ServicePort\n\terr := json.Unmarshal([]byte(data), &ports)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn printers.MakeServicePortString(ports), nil\n}\n\nfunc containerPortFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar ports []core.ContainerPort\n\terr := json.Unmarshal([]byte(data), &ports)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tpieces := make([]string, len(ports))\n\tfor ix := range ports {\n\t\tport := &ports[ix]\n\t\tpieces[ix] = fmt.Sprintf(\"%d\/%s\", port.ContainerPort, port.Protocol)\n\t\tif port.HostPort > 0 {\n\t\t\tpieces[ix] = fmt.Sprintf(\"%d:%d\/%s\", port.ContainerPort, port.HostPort, port.Protocol)\n\t\t}\n\t}\n\treturn strings.Join(pieces, \",\"), nil\n}\n\nfunc volumesFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar volumes []core.Volume\n\terr := json.Unmarshal([]byte(data), &volumes)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tss := \"[\"\n\tfor i := range volumes {\n\t\tss += describeVolume(volumes[i])\n\t\tif i < len(volumes)-1 {\n\t\t\tss += \",\"\n\t\t}\n\t}\n\tss += \"]\"\n\treturn ss, nil\n}\n\nfunc volumeMountsFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar mounts []core.VolumeMount\n\tss := make([]string, 0)\n\terr := json.Unmarshal([]byte(data), &mounts)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor i := range mounts {\n\t\tmnt := fmt.Sprintf(\"%s:%s\", mounts[i].Name, mounts[i].MountPath)\n\t\tif mounts[i].SubPath != \"\" {\n\t\t\tmnt = fmt.Sprintf(\"%s:%s:%s\", mounts[i].Name, mounts[i].MountPath, mounts[i].SubPath)\n\t\t}\n\t\tss = append(ss, mnt)\n\t}\n\treturn strings.Join(ss, \"\\n\"), nil\n}\n\nfunc fmtListFn(data string) (string, error) {\n\t\/\/ Return empty list if the data is empty. This helps to avoid object parsing error.\n\t\/\/ ref: https:\/\/stackoverflow.com\/a\/18419503\n\tif len(data) == 0 {\n\t\treturn \"[]\", nil\n\t}\n\treturn data, nil\n}\n\ntype promNamespaceSelector struct {\n\tmetav1.ObjectMeta `json:\"metadata\"`\n\tSpec promNamespaceSelectorSpec `json:\"spec\"`\n}\ntype promNamespaceSelectorSpec struct {\n\tNamespaceSelector *prom_op.NamespaceSelector `json:\"namespaceSelector,omitempty\"`\n}\n\nfunc promNamespaceSelectorFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar selOpts promNamespaceSelector\n\terr := json.Unmarshal([]byte(data), &selOpts)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t\/\/ If selector field is empty, then all namespaces are the targets.\n\tif selOpts.Spec.NamespaceSelector == nil {\n\t\treturn \"All\", nil\n\t} else if len(selOpts.Spec.NamespaceSelector.MatchNames) != 0 {\n\t\t\/\/ If an array of namespace is provided, then those namespaces are the target\n\t\treturn strings.Join(selOpts.Spec.NamespaceSelector.MatchNames, \", \"), nil\n\t} else if !selOpts.Spec.NamespaceSelector.Any {\n\t\t\/\/ If \"any: false\" is set in the namespace selector field, only the object namespace is the target.\n\t\treturn selOpts.Namespace, nil\n\t}\n\treturn \"\", nil\n}\n\nfunc containerImagesFn(data string) (string, error) {\n\tvar imagesBuffer bytes.Buffer\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\tvar containers []core.Container\n\terr := json.Unmarshal([]byte(data), &containers)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor i, container := range containers {\n\t\timagesBuffer.WriteString(container.Image)\n\t\tif i != len(containers)-1 {\n\t\t\timagesBuffer.WriteString(\",\")\n\t\t}\n\t}\n\treturn imagesBuffer.String(), nil\n}\n\nfunc durationFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\t\/\/ make the data a valid json\n\tss := strings.Split(strings.TrimSuffix(data, \",\"), \",\")\n\tjsonData := \"[\"\n\tfor i := range ss {\n\t\tjsonData += fmt.Sprintf(\"%q\", ss[i])\n\t\tif i < len(ss)-1 {\n\t\t\tjsonData += \",\"\n\t\t}\n\t}\n\tjsonData += \"]\"\n\n\tvar tt []metav1.Time\n\terr := json.Unmarshal([]byte(jsonData), &tt)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(tt) == 1 {\n\t\t\/\/ only start time is does exist\n\t\treturn duration.HumanDuration(time.Since(tt[0].Time)), nil\n\t} else {\n\t\treturn duration.HumanDuration(tt[1].Sub(tt[0].Time)), nil\n\t}\n}\n\nfunc mapKeyCountFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\tvar m map[string]string\n\terr := json.Unmarshal([]byte(data), &m)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn strconv.Itoa(len(m)), nil\n}\n\nfunc kubedbDBModeFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar obj unstructured.Unstructured\n\terr := json.Unmarshal([]byte(data), &obj)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch obj.GetKind() {\n\tcase kubedb.ResourceKindMongoDB:\n\t\tdb := new(kubedb.MongoDB)\n\t\terr := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), db)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif db.Spec.ShardTopology != nil {\n\t\t\treturn \"Sharded\", nil\n\t\t} else if db.Spec.ReplicaSet != nil {\n\t\t\treturn \"ReplicaSet\", nil\n\t\t}\n\t\treturn \"Standalone\", nil\n\t}\n\treturn \"\", fmt.Errorf(\"failed to detectect database mode. Reason: Unknown database type\")\n}\n\nfunc kubedbDBReplicasFn(data string) (string, error) {\n\tvar obj unstructured.Unstructured\n\terr := json.Unmarshal([]byte(data), &obj)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch obj.GetKind() {\n\tcase kubedb.ResourceKindMongoDB:\n\t\tdb := new(kubedb.MongoDB)\n\t\terr := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), db)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif db.Spec.ShardTopology != nil {\n\t\t\tt := db.Spec.ShardTopology\n\t\t\treturn fmt.Sprintf(\"%d, %d, %d\", t.Shard.Replicas, t.ConfigServer.Replicas, t.Mongos.Replicas), nil\n\t\t} else if db.Spec.ReplicaSet != nil {\n\t\t\treturn fmt.Sprintf(\"%d\", *db.Spec.Replicas), nil\n\t\t} else {\n\t\t\treturn \"1\", nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"failed to detect replica number. Reason: Unknown database type\")\n}\n\nfunc kubedbDBResourcesFn(data string) (string, error) {\n\tvar obj unstructured.Unstructured\n\terr := json.Unmarshal([]byte(data), &obj)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch obj.GetKind() {\n\tcase kubedb.ResourceKindMongoDB:\n\t\tdb := new(kubedb.MongoDB)\n\t\terr := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), db)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tcpu, memory, storage := mongoDBResources(db.Spec)\n\t\treturn fmt.Sprintf(\"{%q:%q, %q:%q, %q:%q}\", core.ResourceCPU, cpu, core.ResourceMemory, memory, core.ResourceStorage, storage), nil\n\t}\n\treturn \"\", fmt.Errorf(\"failed to extract CPU information. Reason: Unknown database type\")\n}\n\nfunc rbacSubjects(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\tvar subjects []rbac.Subject\n\terr := json.Unmarshal([]byte(data), &subjects)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvar ss []string\n\tfor i := range subjects {\n\t\ts := fmt.Sprintf(\"%s %s\", subjects[i].Kind, subjects[i].Name)\n\t\tif subjects[i].Namespace != \"\" {\n\t\t\ts = fmt.Sprintf(\"%s %s\/%s\", subjects[i].Kind, subjects[i].Namespace, subjects[i].Name)\n\t\t}\n\t\tss = append(ss, s)\n\t}\n\treturn strings.Join(ss, \",\"), nil\n}\n\nfunc certificateValidity(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\tcertStatus := struct {\n\t\tNotBefore metav1.Time `json:\"notBefore\"`\n\t\tNotAfter metav1.Time `json:\"notAfter\"`\n\t}{}\n\terr := json.Unmarshal([]byte(data), &certStatus)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif certStatus.NotBefore.After(time.Now()) {\n\t\treturn \"Not valid yet\", nil\n\t} else if time.Now().After(certStatus.NotAfter.Time) {\n\t\treturn \"Expired\", nil\n\t}\n\treturn duration.HumanDuration(time.Until(certStatus.NotAfter.Time)), nil\n}\n<commit_msg>Add TxtFuncMap (#124)<commit_after>\/*\nCopyright AppsCode Inc. and Contributors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage tableconvertor\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n\n\t\"kmodules.xyz\/resource-metadata\/pkg\/tableconvertor\/printers\"\n\n\t\"github.com\/Masterminds\/sprig\/v3\"\n\tprom_op \"github.com\/prometheus-operator\/prometheus-operator\/pkg\/apis\/monitoring\/v1\"\n\t\"gomodules.xyz\/jsonpath\"\n\tcore \"k8s.io\/api\/core\/v1\"\n\trbac \"k8s.io\/api\/rbac\/v1\"\n\tmetatable \"k8s.io\/apimachinery\/pkg\/api\/meta\/table\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/duration\"\n\tkubedb \"kubedb.dev\/apimachinery\/apis\/kubedb\/v1alpha2\"\n)\n\nvar templateFns = sprig.TxtFuncMap()\n\nfunc init() {\n\ttemplateFns[\"jp\"] = jsonpathFn\n\ttemplateFns[\"k8s_fmt_selector\"] = formatLabelSelectorFn\n\ttemplateFns[\"k8s_fmt_label\"] = formatLabelsFn\n\ttemplateFns[\"k8s_age\"] = ageFn\n\ttemplateFns[\"k8s_svc_ports\"] = servicePortsFn\n\ttemplateFns[\"k8s_container_ports\"] = containerPortFn\n\ttemplateFns[\"k8s_container_images\"] = containerImagesFn\n\ttemplateFns[\"k8s_volumes\"] = volumesFn\n\ttemplateFns[\"k8s_volumeMounts\"] = volumeMountsFn\n\ttemplateFns[\"k8s_duration\"] = durationFn\n\ttemplateFns[\"fmt_list\"] = fmtListFn\n\ttemplateFns[\"prom_ns_selector\"] = promNamespaceSelectorFn\n\ttemplateFns[\"map_key_count\"] = mapKeyCountFn\n\ttemplateFns[\"kubedb_db_mode\"] = kubedbDBModeFn\n\ttemplateFns[\"kubedb_db_replicas\"] = kubedbDBReplicasFn\n\ttemplateFns[\"kubedb_db_resources\"] = kubedbDBResourcesFn\n\ttemplateFns[\"rbac_subjects\"] = rbacSubjects\n\ttemplateFns[\"cert_validity\"] = certificateValidity\n}\n\n\/\/ TxtFuncMap returns a 'text\/template'.FuncMap\nfunc TxtFuncMap() template.FuncMap {\n\tgfm := make(map[string]interface{}, len(templateFns))\n\tfor k, v := range templateFns {\n\t\tgfm[k] = v\n\t}\n\treturn gfm\n}\n\nfunc jsonpathFn(expr string, data interface{}, jsonoutput ...bool) (interface{}, error) {\n\tenableJSONoutput := len(jsonoutput) > 0 && jsonoutput[0]\n\n\tjp := jsonpath.New(\"jp\")\n\tif err := jp.Parse(expr); err != nil {\n\t\treturn nil, fmt.Errorf(\"unrecognized column definition %q\", expr)\n\t}\n\tjp.AllowMissingKeys(true)\n\tjp.EnableJSONOutput(enableJSONoutput)\n\n\tvar buf bytes.Buffer\n\terr := jp.Execute(&buf, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif enableJSONoutput {\n\t\tvar v []interface{}\n\t\terr = json.Unmarshal(buf.Bytes(), &v)\n\t\treturn v, err\n\t}\n\treturn buf.String(), err\n}\n\nfunc formatLabelSelectorFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar sel metav1.LabelSelector\n\terr := json.Unmarshal([]byte(data), &sel)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn metav1.FormatLabelSelector(&sel), nil\n}\n\nfunc formatLabelsFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar label map[string]string\n\terr := json.Unmarshal([]byte(data), &label)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn labels.FormatLabels(label), nil\n}\n\nfunc ageFn(data string) (string, error) {\n\tif data == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar timestamp metav1.Time\n\terr := timestamp.UnmarshalQueryParameter(data)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn metatable.ConvertToHumanReadableDateType(timestamp), nil\n}\n\nfunc servicePortsFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar ports []core.ServicePort\n\terr := json.Unmarshal([]byte(data), &ports)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn printers.MakeServicePortString(ports), nil\n}\n\nfunc containerPortFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar ports []core.ContainerPort\n\terr := json.Unmarshal([]byte(data), &ports)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tpieces := make([]string, len(ports))\n\tfor ix := range ports {\n\t\tport := &ports[ix]\n\t\tpieces[ix] = fmt.Sprintf(\"%d\/%s\", port.ContainerPort, port.Protocol)\n\t\tif port.HostPort > 0 {\n\t\t\tpieces[ix] = fmt.Sprintf(\"%d:%d\/%s\", port.ContainerPort, port.HostPort, port.Protocol)\n\t\t}\n\t}\n\treturn strings.Join(pieces, \",\"), nil\n}\n\nfunc volumesFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar volumes []core.Volume\n\terr := json.Unmarshal([]byte(data), &volumes)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tss := \"[\"\n\tfor i := range volumes {\n\t\tss += describeVolume(volumes[i])\n\t\tif i < len(volumes)-1 {\n\t\t\tss += \",\"\n\t\t}\n\t}\n\tss += \"]\"\n\treturn ss, nil\n}\n\nfunc volumeMountsFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar mounts []core.VolumeMount\n\tss := make([]string, 0)\n\terr := json.Unmarshal([]byte(data), &mounts)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor i := range mounts {\n\t\tmnt := fmt.Sprintf(\"%s:%s\", mounts[i].Name, mounts[i].MountPath)\n\t\tif mounts[i].SubPath != \"\" {\n\t\t\tmnt = fmt.Sprintf(\"%s:%s:%s\", mounts[i].Name, mounts[i].MountPath, mounts[i].SubPath)\n\t\t}\n\t\tss = append(ss, mnt)\n\t}\n\treturn strings.Join(ss, \"\\n\"), nil\n}\n\nfunc fmtListFn(data string) (string, error) {\n\t\/\/ Return empty list if the data is empty. This helps to avoid object parsing error.\n\t\/\/ ref: https:\/\/stackoverflow.com\/a\/18419503\n\tif len(data) == 0 {\n\t\treturn \"[]\", nil\n\t}\n\treturn data, nil\n}\n\ntype promNamespaceSelector struct {\n\tmetav1.ObjectMeta `json:\"metadata\"`\n\tSpec promNamespaceSelectorSpec `json:\"spec\"`\n}\ntype promNamespaceSelectorSpec struct {\n\tNamespaceSelector *prom_op.NamespaceSelector `json:\"namespaceSelector,omitempty\"`\n}\n\nfunc promNamespaceSelectorFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar selOpts promNamespaceSelector\n\terr := json.Unmarshal([]byte(data), &selOpts)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t\/\/ If selector field is empty, then all namespaces are the targets.\n\tif selOpts.Spec.NamespaceSelector == nil {\n\t\treturn \"All\", nil\n\t} else if len(selOpts.Spec.NamespaceSelector.MatchNames) != 0 {\n\t\t\/\/ If an array of namespace is provided, then those namespaces are the target\n\t\treturn strings.Join(selOpts.Spec.NamespaceSelector.MatchNames, \", \"), nil\n\t} else if !selOpts.Spec.NamespaceSelector.Any {\n\t\t\/\/ If \"any: false\" is set in the namespace selector field, only the object namespace is the target.\n\t\treturn selOpts.Namespace, nil\n\t}\n\treturn \"\", nil\n}\n\nfunc containerImagesFn(data string) (string, error) {\n\tvar imagesBuffer bytes.Buffer\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\tvar containers []core.Container\n\terr := json.Unmarshal([]byte(data), &containers)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor i, container := range containers {\n\t\timagesBuffer.WriteString(container.Image)\n\t\tif i != len(containers)-1 {\n\t\t\timagesBuffer.WriteString(\",\")\n\t\t}\n\t}\n\treturn imagesBuffer.String(), nil\n}\n\nfunc durationFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\t\/\/ make the data a valid json\n\tss := strings.Split(strings.TrimSuffix(data, \",\"), \",\")\n\tjsonData := \"[\"\n\tfor i := range ss {\n\t\tjsonData += fmt.Sprintf(\"%q\", ss[i])\n\t\tif i < len(ss)-1 {\n\t\t\tjsonData += \",\"\n\t\t}\n\t}\n\tjsonData += \"]\"\n\n\tvar tt []metav1.Time\n\terr := json.Unmarshal([]byte(jsonData), &tt)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(tt) == 1 {\n\t\t\/\/ only start time is does exist\n\t\treturn duration.HumanDuration(time.Since(tt[0].Time)), nil\n\t} else {\n\t\treturn duration.HumanDuration(tt[1].Sub(tt[0].Time)), nil\n\t}\n}\n\nfunc mapKeyCountFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\tvar m map[string]string\n\terr := json.Unmarshal([]byte(data), &m)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn strconv.Itoa(len(m)), nil\n}\n\nfunc kubedbDBModeFn(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar obj unstructured.Unstructured\n\terr := json.Unmarshal([]byte(data), &obj)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch obj.GetKind() {\n\tcase kubedb.ResourceKindMongoDB:\n\t\tdb := new(kubedb.MongoDB)\n\t\terr := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), db)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif db.Spec.ShardTopology != nil {\n\t\t\treturn \"Sharded\", nil\n\t\t} else if db.Spec.ReplicaSet != nil {\n\t\t\treturn \"ReplicaSet\", nil\n\t\t}\n\t\treturn \"Standalone\", nil\n\t}\n\treturn \"\", fmt.Errorf(\"failed to detectect database mode. Reason: Unknown database type\")\n}\n\nfunc kubedbDBReplicasFn(data string) (string, error) {\n\tvar obj unstructured.Unstructured\n\terr := json.Unmarshal([]byte(data), &obj)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch obj.GetKind() {\n\tcase kubedb.ResourceKindMongoDB:\n\t\tdb := new(kubedb.MongoDB)\n\t\terr := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), db)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif db.Spec.ShardTopology != nil {\n\t\t\tt := db.Spec.ShardTopology\n\t\t\treturn fmt.Sprintf(\"%d, %d, %d\", t.Shard.Replicas, t.ConfigServer.Replicas, t.Mongos.Replicas), nil\n\t\t} else if db.Spec.ReplicaSet != nil {\n\t\t\treturn fmt.Sprintf(\"%d\", *db.Spec.Replicas), nil\n\t\t} else {\n\t\t\treturn \"1\", nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"failed to detect replica number. Reason: Unknown database type\")\n}\n\nfunc kubedbDBResourcesFn(data string) (string, error) {\n\tvar obj unstructured.Unstructured\n\terr := json.Unmarshal([]byte(data), &obj)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch obj.GetKind() {\n\tcase kubedb.ResourceKindMongoDB:\n\t\tdb := new(kubedb.MongoDB)\n\t\terr := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), db)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tcpu, memory, storage := mongoDBResources(db.Spec)\n\t\treturn fmt.Sprintf(\"{%q:%q, %q:%q, %q:%q}\", core.ResourceCPU, cpu, core.ResourceMemory, memory, core.ResourceStorage, storage), nil\n\t}\n\treturn \"\", fmt.Errorf(\"failed to extract CPU information. Reason: Unknown database type\")\n}\n\nfunc rbacSubjects(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\tvar subjects []rbac.Subject\n\terr := json.Unmarshal([]byte(data), &subjects)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvar ss []string\n\tfor i := range subjects {\n\t\ts := fmt.Sprintf(\"%s %s\", subjects[i].Kind, subjects[i].Name)\n\t\tif subjects[i].Namespace != \"\" {\n\t\t\ts = fmt.Sprintf(\"%s %s\/%s\", subjects[i].Kind, subjects[i].Namespace, subjects[i].Name)\n\t\t}\n\t\tss = append(ss, s)\n\t}\n\treturn strings.Join(ss, \",\"), nil\n}\n\nfunc certificateValidity(data string) (string, error) {\n\tif strings.TrimSpace(data) == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\tcertStatus := struct {\n\t\tNotBefore metav1.Time `json:\"notBefore\"`\n\t\tNotAfter metav1.Time `json:\"notAfter\"`\n\t}{}\n\terr := json.Unmarshal([]byte(data), &certStatus)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif certStatus.NotBefore.After(time.Now()) {\n\t\treturn \"Not valid yet\", nil\n\t} else if time.Now().After(certStatus.NotAfter.Time) {\n\t\treturn \"Expired\", nil\n\t}\n\treturn duration.HumanDuration(time.Until(certStatus.NotAfter.Time)), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package amqp\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/RichardKnop\/machinery\/v1\/brokers\/errs\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/brokers\/iface\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/common\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/config\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/log\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/tasks\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/streadway\/amqp\"\n)\n\ntype AMQPConnection struct {\n\tqueueName string\n\tconnection *amqp.Connection\n\tchannel *amqp.Channel\n\tqueue amqp.Queue\n\tconfirmation <-chan amqp.Confirmation\n\terrorchan <-chan *amqp.Error\n\tcleanup chan struct{}\n}\n\n\/\/ Broker represents an AMQP broker\ntype Broker struct {\n\tcommon.Broker\n\tcommon.AMQPConnector\n\tprocessingWG sync.WaitGroup \/\/ use wait group to make sure task processing completes on interrupt signal\n\n\tconnections map[string]*AMQPConnection\n\tconnectionsMutex sync.RWMutex\n}\n\n\/\/ New creates new Broker instance\nfunc New(cnf *config.Config) iface.Broker {\n\treturn &Broker{Broker: common.NewBroker(cnf), AMQPConnector: common.AMQPConnector{}, connections: make(map[string]*AMQPConnection)}\n}\n\n\/\/ StartConsuming enters a loop and waits for incoming messages\nfunc (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) {\n\tb.Broker.StartConsuming(consumerTag, concurrency, taskProcessor)\n\n\tqueueName := taskProcessor.CustomQueue()\n\tif queueName == \"\" {\n\t\tqueueName = b.GetConfig().DefaultQueue\n\t}\n\n\tconn, channel, queue, _, amqpCloseChan, err := b.Connect(\n\t\tb.GetConfig().Broker,\n\t\tb.GetConfig().TLSConfig,\n\t\tb.GetConfig().AMQP.Exchange, \/\/ exchange name\n\t\tb.GetConfig().AMQP.ExchangeType, \/\/ exchange type\n\t\tqueueName, \/\/ queue name\n\t\ttrue, \/\/ queue durable\n\t\tfalse, \/\/ queue delete when unused\n\t\tb.GetConfig().AMQP.BindingKey, \/\/ queue binding key\n\t\tnil, \/\/ exchange declare args\n\t\tamqp.Table(b.GetConfig().AMQP.QueueDeclareArgs), \/\/ queue declare args\n\t\tamqp.Table(b.GetConfig().AMQP.QueueBindingArgs), \/\/ queue binding args\n\t)\n\tif err != nil {\n\t\tb.GetRetryFunc()(b.GetRetryStopChan())\n\t\treturn b.GetRetry(), err\n\t}\n\tdefer b.Close(channel, conn)\n\n\tif err = channel.Qos(\n\t\tb.GetConfig().AMQP.PrefetchCount,\n\t\t0, \/\/ prefetch size\n\t\tfalse, \/\/ global\n\t); err != nil {\n\t\treturn b.GetRetry(), fmt.Errorf(\"Channel qos error: %s\", err)\n\t}\n\n\tdeliveries, err := channel.Consume(\n\t\tqueue.Name, \/\/ queue\n\t\tconsumerTag, \/\/ consumer tag\n\t\tfalse, \/\/ auto-ack\n\t\tfalse, \/\/ exclusive\n\t\tfalse, \/\/ no-local\n\t\tfalse, \/\/ no-wait\n\t\tnil, \/\/ arguments\n\t)\n\tif err != nil {\n\t\treturn b.GetRetry(), fmt.Errorf(\"Queue consume error: %s\", err)\n\t}\n\n\tlog.INFO.Print(\"[*] Waiting for messages. To exit press CTRL+C\")\n\n\tif err := b.consume(deliveries, concurrency, taskProcessor, amqpCloseChan); err != nil {\n\t\treturn b.GetRetry(), err\n\t}\n\n\t\/\/ Waiting for any tasks being processed to finish\n\tb.processingWG.Wait()\n\n\treturn b.GetRetry(), nil\n}\n\n\/\/ StopConsuming quits the loop\nfunc (b *Broker) StopConsuming() {\n\tb.Broker.StopConsuming()\n\n\t\/\/ Waiting for any tasks being processed to finish\n\tb.processingWG.Wait()\n}\n\n\/\/ GetOrOpenConnection will return a connection on a particular queue name. Open connections\n\/\/ are saved to avoid having to reopen connection for multiple queues\nfunc (b *Broker) GetOrOpenConnection(queueName string, queueBindingKey string, exchangeDeclareArgs, queueDeclareArgs, queueBindingArgs amqp.Table) (*AMQPConnection, error) {\n\tvar err error\n\n\tb.connectionsMutex.Lock()\n\tdefer b.connectionsMutex.Unlock()\n\n\tconn, ok := b.connections[queueName]\n\tif !ok {\n\t\tconn = &AMQPConnection{\n\t\t\tqueueName: queueName,\n\t\t\tcleanup: make(chan struct{}),\n\t\t}\n\t\tconn.connection, conn.channel, conn.queue, conn.confirmation, conn.errorchan, err = b.Connect(\n\t\t\tb.GetConfig().Broker,\n\t\t\tb.GetConfig().TLSConfig,\n\t\t\tb.GetConfig().AMQP.Exchange, \/\/ exchange name\n\t\t\tb.GetConfig().AMQP.ExchangeType, \/\/ exchange type\n\t\t\tqueueName, \/\/ queue name\n\t\t\ttrue, \/\/ queue durable\n\t\t\tfalse, \/\/ queue delete when unused\n\t\t\tqueueBindingKey, \/\/ queue binding key\n\t\t\texchangeDeclareArgs, \/\/ exchange declare args\n\t\t\tqueueDeclareArgs, \/\/ queue declare args\n\t\t\tqueueBindingArgs, \/\/ queue binding args\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"Failed to connect to queue %s\", queueName)\n\t\t}\n\n\t\t\/\/ Reconnect to the channel if it disconnects\/errors out\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase err = <-conn.errorchan:\n\t\t\t\tlog.INFO.Printf(\"Error occured on queue: %s. Reconnecting\", queueName)\n\t\t\t\tb.connectionsMutex.Lock()\n\t\t\t\tdelete(b.connections, queueName)\n\t\t\t\tb.connectionsMutex.Unlock()\n\t\t\t\t_, err := b.GetOrOpenConnection(queueName, queueBindingKey, exchangeDeclareArgs, queueDeclareArgs, queueBindingArgs)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.ERROR.Printf(\"Failed to reopen queue: %s.\", queueName)\n\t\t\t\t}\n\t\t\tcase <-conn.cleanup:\n\t\t\t\treturn\n\t\t\t}\n\t\t\treturn\n\t\t}()\n\t\tb.connections[queueName] = conn\n\t}\n\treturn conn, nil\n}\n\nfunc (b *Broker) CloseConnections() error {\n\tb.connectionsMutex.Lock()\n\tdefer b.connectionsMutex.Unlock()\n\n\tfor key, conn := range b.connections {\n\t\tif err := b.Close(conn.channel, conn.connection); err != nil {\n\t\t\tlog.ERROR.Print(\"Failed to close channel\")\n\t\t\treturn nil\n\t\t}\n\t\tclose(conn.cleanup)\n\t\tdelete(b.connections, key)\n\t}\n\treturn nil\n}\n\n\/\/ Publish places a new message on the default queue\nfunc (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error {\n\t\/\/ Adjust routing key (this decides which queue the message will be published to)\n\tb.AdjustRoutingKey(signature)\n\n\tmsg, err := json.Marshal(signature)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"JSON marshal error: %s\", err)\n\t}\n\n\t\/\/ Check the ETA signature field, if it is set and it is in the future,\n\t\/\/ delay the task\n\tif signature.ETA != nil {\n\t\tnow := time.Now().UTC()\n\n\t\tif signature.ETA.After(now) {\n\t\t\tdelayMs := int64(signature.ETA.Sub(now) \/ time.Millisecond)\n\n\t\t\treturn b.delay(signature, delayMs)\n\t\t}\n\t}\n\n\tqueue := b.GetConfig().DefaultQueue\n\tbindingKey := b.GetConfig().AMQP.BindingKey \/\/ queue binding key\n\tif b.isDirectExchange() {\n\t\tqueue = signature.RoutingKey\n\t\tbindingKey = signature.RoutingKey\n\t}\n\n\tconnection, err := b.GetOrOpenConnection(\n\t\tqueue,\n\t\tbindingKey, \/\/ queue binding key\n\t\tnil, \/\/ exchange declare args\n\t\tamqp.Table(b.GetConfig().AMQP.QueueDeclareArgs), \/\/ queue declare args\n\t\tamqp.Table(b.GetConfig().AMQP.QueueBindingArgs), \/\/ queue binding args\n\t)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to get a connection for queue %s\", queue)\n\t}\n\n\tchannel := connection.channel\n\tconfirmsChan := connection.confirmation\n\n\tif err := channel.Publish(\n\t\tb.GetConfig().AMQP.Exchange, \/\/ exchange name\n\t\tsignature.RoutingKey, \/\/ routing key\n\t\tfalse, \/\/ mandatory\n\t\tfalse, \/\/ immediate\n\t\tamqp.Publishing{\n\t\t\tHeaders: amqp.Table(signature.Headers),\n\t\t\tContentType: \"application\/json\",\n\t\t\tBody: msg,\n\t\t\tPriority: signature.Priority,\n\t\t\tDeliveryMode: amqp.Persistent,\n\t\t},\n\t); err != nil {\n\t\treturn errors.Wrap(err, \"Failed to publish task\")\n\t}\n\n\tconfirmed := <-confirmsChan\n\n\tif confirmed.Ack {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"Failed delivery of delivery tag: %v\", confirmed.DeliveryTag)\n}\n\n\/\/ consume takes delivered messages from the channel and manages a worker pool\n\/\/ to process tasks concurrently\nfunc (b *Broker) consume(deliveries <-chan amqp.Delivery, concurrency int, taskProcessor iface.TaskProcessor, amqpCloseChan <-chan *amqp.Error) error {\n\tpool := make(chan struct{}, concurrency)\n\n\t\/\/ initialize worker pool with maxWorkers workers\n\tgo func() {\n\t\tfor i := 0; i < concurrency; i++ {\n\t\t\tpool <- struct{}{}\n\t\t}\n\t}()\n\n\t\/\/ make channel with a capacity makes it become a buffered channel so that a worker which wants to\n\t\/\/ push an error to `errorsChan` doesn't need to be blocked while the for-loop is blocked waiting\n\t\/\/ a worker, that is, it avoids a possible deadlock\n\terrorsChan := make(chan error, 1)\n\n\tfor {\n\t\tselect {\n\t\tcase amqpErr := <-amqpCloseChan:\n\t\t\treturn amqpErr\n\t\tcase err := <-errorsChan:\n\t\t\treturn err\n\t\tcase d := <-deliveries:\n\t\t\tif concurrency > 0 {\n\t\t\t\t\/\/ get worker from pool (blocks until one is available)\n\t\t\t\t<-pool\n\t\t\t}\n\n\t\t\tb.processingWG.Add(1)\n\n\t\t\t\/\/ Consume the task inside a gotourine so multiple tasks\n\t\t\t\/\/ can be processed concurrently\n\t\t\tgo func() {\n\t\t\t\tif err := b.consumeOne(d, taskProcessor); err != nil {\n\t\t\t\t\terrorsChan <- err\n\t\t\t\t}\n\n\t\t\t\tb.processingWG.Done()\n\n\t\t\t\tif concurrency > 0 {\n\t\t\t\t\t\/\/ give worker back to pool\n\t\t\t\t\tpool <- struct{}{}\n\t\t\t\t}\n\t\t\t}()\n\t\tcase <-b.GetStopChan():\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\n\/\/ consumeOne processes a single message using TaskProcessor\nfunc (b *Broker) consumeOne(delivery amqp.Delivery, taskProcessor iface.TaskProcessor) error {\n\tif len(delivery.Body) == 0 {\n\t\tdelivery.Nack(true, false) \/\/ multiple, requeue\n\t\treturn errors.New(\"Received an empty message\") \/\/ RabbitMQ down?\n\t}\n\n\tvar multiple, requeue = false, false\n\n\t\/\/ Unmarshal message body into signature struct\n\tsignature := new(tasks.Signature)\n\tdecoder := json.NewDecoder(bytes.NewReader(delivery.Body))\n\tdecoder.UseNumber()\n\tif err := decoder.Decode(signature); err != nil {\n\t\tdelivery.Nack(multiple, requeue)\n\t\treturn errs.NewErrCouldNotUnmarshaTaskSignature(delivery.Body, err)\n\t}\n\n\t\/\/ If the task is not registered, we nack it and requeue,\n\t\/\/ there might be different workers for processing specific tasks\n\tif !b.IsTaskRegistered(signature.Name) {\n\t\tif !delivery.Redelivered {\n\t\t\trequeue = true\n\t\t\tlog.INFO.Printf(\"Task not registered with this worker. Requeing message: %s\", delivery.Body)\n\t\t}\n\t\tif !signature.IgnoreWhenTaskNotRegistered {\n\t\t\tdelivery.Nack(multiple, requeue)\n\t\t}\n\t\treturn nil\n\t}\n\n\tlog.INFO.Printf(\"Received new message: %s\", delivery.Body)\n\n\terr := taskProcessor.Process(signature)\n\tdelivery.Ack(multiple)\n\treturn err\n}\n\n\/\/ delay a task by delayDuration miliseconds, the way it works is a new queue\n\/\/ is created without any consumers, the message is then published to this queue\n\/\/ with appropriate ttl expiration headers, after the expiration, it is sent to\n\/\/ the proper queue with consumers\nfunc (b *Broker) delay(signature *tasks.Signature, delayMs int64) error {\n\tif delayMs <= 0 {\n\t\treturn errors.New(\"Cannot delay task by 0ms\")\n\t}\n\n\tmessage, err := json.Marshal(signature)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"JSON marshal error: %s\", err)\n\t}\n\n\t\/\/ It's necessary to redeclare the queue each time (to zero its TTL timer).\n\tqueueName := fmt.Sprintf(\n\t\t\"delay.%d.%s.%s\",\n\t\tdelayMs, \/\/ delay duration in mileseconds\n\t\tb.GetConfig().AMQP.Exchange,\n\t\tsignature.RoutingKey, \/\/ routing key\n\t)\n\tdeclareQueueArgs := amqp.Table{\n\t\t\/\/ Exchange where to send messages after TTL expiration.\n\t\t\"x-dead-letter-exchange\": b.GetConfig().AMQP.Exchange,\n\t\t\/\/ Routing key which use when resending expired messages.\n\t\t\"x-dead-letter-routing-key\": signature.RoutingKey,\n\t\t\/\/ Time in milliseconds\n\t\t\/\/ after that message will expire and be sent to destination.\n\t\t\"x-message-ttl\": delayMs,\n\t\t\/\/ Time after that the queue will be deleted.\n\t\t\"x-expires\": delayMs * 2,\n\t}\n\tconn, channel, _, _, _, err := b.Connect(\n\t\tb.GetConfig().Broker,\n\t\tb.GetConfig().TLSConfig,\n\t\tb.GetConfig().AMQP.Exchange, \/\/ exchange name\n\t\tb.GetConfig().AMQP.ExchangeType, \/\/ exchange type\n\t\tqueueName, \/\/ queue name\n\t\ttrue, \/\/ queue durable\n\t\tb.GetConfig().AMQP.AutoDelete, \/\/ queue delete when unused\n\t\tqueueName, \/\/ queue binding key\n\t\tnil, \/\/ exchange declare args\n\t\tdeclareQueueArgs, \/\/ queue declare args\n\t\tamqp.Table(b.GetConfig().AMQP.QueueBindingArgs), \/\/ queue binding args\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer b.Close(channel, conn)\n\n\tif err := channel.Publish(\n\t\tb.GetConfig().AMQP.Exchange, \/\/ exchange\n\t\tqueueName, \/\/ routing key\n\t\tfalse, \/\/ mandatory\n\t\tfalse, \/\/ immediate\n\t\tamqp.Publishing{\n\t\t\tHeaders: amqp.Table(signature.Headers),\n\t\t\tContentType: \"application\/json\",\n\t\t\tBody: message,\n\t\t\tDeliveryMode: amqp.Persistent,\n\t\t},\n\t); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *Broker) isDirectExchange() bool {\n\treturn b.GetConfig().AMQP != nil && b.GetConfig().AMQP.ExchangeType == \"direct\"\n}\n\n\/\/ AdjustRoutingKey makes sure the routing key is correct.\n\/\/ If the routing key is an empty string:\n\/\/ a) set it to binding key for direct exchange type\n\/\/ b) set it to default queue name\nfunc (b *Broker) AdjustRoutingKey(s *tasks.Signature) {\n\tif s.RoutingKey != \"\" {\n\t\treturn\n\t}\n\n\tif b.isDirectExchange() {\n\t\t\/\/ The routing algorithm behind a direct exchange is simple - a message goes\n\t\t\/\/ to the queues whose binding key exactly matches the routing key of the message.\n\t\ts.RoutingKey = b.GetConfig().AMQP.BindingKey\n\t\treturn\n\t}\n\n\ts.RoutingKey = b.GetConfig().DefaultQueue\n}\n<commit_msg>Fix typo 'occured' to 'occurred'<commit_after>package amqp\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/RichardKnop\/machinery\/v1\/brokers\/errs\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/brokers\/iface\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/common\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/config\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/log\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/tasks\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/streadway\/amqp\"\n)\n\ntype AMQPConnection struct {\n\tqueueName string\n\tconnection *amqp.Connection\n\tchannel *amqp.Channel\n\tqueue amqp.Queue\n\tconfirmation <-chan amqp.Confirmation\n\terrorchan <-chan *amqp.Error\n\tcleanup chan struct{}\n}\n\n\/\/ Broker represents an AMQP broker\ntype Broker struct {\n\tcommon.Broker\n\tcommon.AMQPConnector\n\tprocessingWG sync.WaitGroup \/\/ use wait group to make sure task processing completes on interrupt signal\n\n\tconnections map[string]*AMQPConnection\n\tconnectionsMutex sync.RWMutex\n}\n\n\/\/ New creates new Broker instance\nfunc New(cnf *config.Config) iface.Broker {\n\treturn &Broker{Broker: common.NewBroker(cnf), AMQPConnector: common.AMQPConnector{}, connections: make(map[string]*AMQPConnection)}\n}\n\n\/\/ StartConsuming enters a loop and waits for incoming messages\nfunc (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) {\n\tb.Broker.StartConsuming(consumerTag, concurrency, taskProcessor)\n\n\tqueueName := taskProcessor.CustomQueue()\n\tif queueName == \"\" {\n\t\tqueueName = b.GetConfig().DefaultQueue\n\t}\n\n\tconn, channel, queue, _, amqpCloseChan, err := b.Connect(\n\t\tb.GetConfig().Broker,\n\t\tb.GetConfig().TLSConfig,\n\t\tb.GetConfig().AMQP.Exchange, \/\/ exchange name\n\t\tb.GetConfig().AMQP.ExchangeType, \/\/ exchange type\n\t\tqueueName, \/\/ queue name\n\t\ttrue, \/\/ queue durable\n\t\tfalse, \/\/ queue delete when unused\n\t\tb.GetConfig().AMQP.BindingKey, \/\/ queue binding key\n\t\tnil, \/\/ exchange declare args\n\t\tamqp.Table(b.GetConfig().AMQP.QueueDeclareArgs), \/\/ queue declare args\n\t\tamqp.Table(b.GetConfig().AMQP.QueueBindingArgs), \/\/ queue binding args\n\t)\n\tif err != nil {\n\t\tb.GetRetryFunc()(b.GetRetryStopChan())\n\t\treturn b.GetRetry(), err\n\t}\n\tdefer b.Close(channel, conn)\n\n\tif err = channel.Qos(\n\t\tb.GetConfig().AMQP.PrefetchCount,\n\t\t0, \/\/ prefetch size\n\t\tfalse, \/\/ global\n\t); err != nil {\n\t\treturn b.GetRetry(), fmt.Errorf(\"Channel qos error: %s\", err)\n\t}\n\n\tdeliveries, err := channel.Consume(\n\t\tqueue.Name, \/\/ queue\n\t\tconsumerTag, \/\/ consumer tag\n\t\tfalse, \/\/ auto-ack\n\t\tfalse, \/\/ exclusive\n\t\tfalse, \/\/ no-local\n\t\tfalse, \/\/ no-wait\n\t\tnil, \/\/ arguments\n\t)\n\tif err != nil {\n\t\treturn b.GetRetry(), fmt.Errorf(\"Queue consume error: %s\", err)\n\t}\n\n\tlog.INFO.Print(\"[*] Waiting for messages. To exit press CTRL+C\")\n\n\tif err := b.consume(deliveries, concurrency, taskProcessor, amqpCloseChan); err != nil {\n\t\treturn b.GetRetry(), err\n\t}\n\n\t\/\/ Waiting for any tasks being processed to finish\n\tb.processingWG.Wait()\n\n\treturn b.GetRetry(), nil\n}\n\n\/\/ StopConsuming quits the loop\nfunc (b *Broker) StopConsuming() {\n\tb.Broker.StopConsuming()\n\n\t\/\/ Waiting for any tasks being processed to finish\n\tb.processingWG.Wait()\n}\n\n\/\/ GetOrOpenConnection will return a connection on a particular queue name. Open connections\n\/\/ are saved to avoid having to reopen connection for multiple queues\nfunc (b *Broker) GetOrOpenConnection(queueName string, queueBindingKey string, exchangeDeclareArgs, queueDeclareArgs, queueBindingArgs amqp.Table) (*AMQPConnection, error) {\n\tvar err error\n\n\tb.connectionsMutex.Lock()\n\tdefer b.connectionsMutex.Unlock()\n\n\tconn, ok := b.connections[queueName]\n\tif !ok {\n\t\tconn = &AMQPConnection{\n\t\t\tqueueName: queueName,\n\t\t\tcleanup: make(chan struct{}),\n\t\t}\n\t\tconn.connection, conn.channel, conn.queue, conn.confirmation, conn.errorchan, err = b.Connect(\n\t\t\tb.GetConfig().Broker,\n\t\t\tb.GetConfig().TLSConfig,\n\t\t\tb.GetConfig().AMQP.Exchange, \/\/ exchange name\n\t\t\tb.GetConfig().AMQP.ExchangeType, \/\/ exchange type\n\t\t\tqueueName, \/\/ queue name\n\t\t\ttrue, \/\/ queue durable\n\t\t\tfalse, \/\/ queue delete when unused\n\t\t\tqueueBindingKey, \/\/ queue binding key\n\t\t\texchangeDeclareArgs, \/\/ exchange declare args\n\t\t\tqueueDeclareArgs, \/\/ queue declare args\n\t\t\tqueueBindingArgs, \/\/ queue binding args\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"Failed to connect to queue %s\", queueName)\n\t\t}\n\n\t\t\/\/ Reconnect to the channel if it disconnects\/errors out\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase err = <-conn.errorchan:\n\t\t\t\tlog.INFO.Printf(\"Error occurred on queue: %s. Reconnecting\", queueName)\n\t\t\t\tb.connectionsMutex.Lock()\n\t\t\t\tdelete(b.connections, queueName)\n\t\t\t\tb.connectionsMutex.Unlock()\n\t\t\t\t_, err := b.GetOrOpenConnection(queueName, queueBindingKey, exchangeDeclareArgs, queueDeclareArgs, queueBindingArgs)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.ERROR.Printf(\"Failed to reopen queue: %s.\", queueName)\n\t\t\t\t}\n\t\t\tcase <-conn.cleanup:\n\t\t\t\treturn\n\t\t\t}\n\t\t\treturn\n\t\t}()\n\t\tb.connections[queueName] = conn\n\t}\n\treturn conn, nil\n}\n\nfunc (b *Broker) CloseConnections() error {\n\tb.connectionsMutex.Lock()\n\tdefer b.connectionsMutex.Unlock()\n\n\tfor key, conn := range b.connections {\n\t\tif err := b.Close(conn.channel, conn.connection); err != nil {\n\t\t\tlog.ERROR.Print(\"Failed to close channel\")\n\t\t\treturn nil\n\t\t}\n\t\tclose(conn.cleanup)\n\t\tdelete(b.connections, key)\n\t}\n\treturn nil\n}\n\n\/\/ Publish places a new message on the default queue\nfunc (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error {\n\t\/\/ Adjust routing key (this decides which queue the message will be published to)\n\tb.AdjustRoutingKey(signature)\n\n\tmsg, err := json.Marshal(signature)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"JSON marshal error: %s\", err)\n\t}\n\n\t\/\/ Check the ETA signature field, if it is set and it is in the future,\n\t\/\/ delay the task\n\tif signature.ETA != nil {\n\t\tnow := time.Now().UTC()\n\n\t\tif signature.ETA.After(now) {\n\t\t\tdelayMs := int64(signature.ETA.Sub(now) \/ time.Millisecond)\n\n\t\t\treturn b.delay(signature, delayMs)\n\t\t}\n\t}\n\n\tqueue := b.GetConfig().DefaultQueue\n\tbindingKey := b.GetConfig().AMQP.BindingKey \/\/ queue binding key\n\tif b.isDirectExchange() {\n\t\tqueue = signature.RoutingKey\n\t\tbindingKey = signature.RoutingKey\n\t}\n\n\tconnection, err := b.GetOrOpenConnection(\n\t\tqueue,\n\t\tbindingKey, \/\/ queue binding key\n\t\tnil, \/\/ exchange declare args\n\t\tamqp.Table(b.GetConfig().AMQP.QueueDeclareArgs), \/\/ queue declare args\n\t\tamqp.Table(b.GetConfig().AMQP.QueueBindingArgs), \/\/ queue binding args\n\t)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to get a connection for queue %s\", queue)\n\t}\n\n\tchannel := connection.channel\n\tconfirmsChan := connection.confirmation\n\n\tif err := channel.Publish(\n\t\tb.GetConfig().AMQP.Exchange, \/\/ exchange name\n\t\tsignature.RoutingKey, \/\/ routing key\n\t\tfalse, \/\/ mandatory\n\t\tfalse, \/\/ immediate\n\t\tamqp.Publishing{\n\t\t\tHeaders: amqp.Table(signature.Headers),\n\t\t\tContentType: \"application\/json\",\n\t\t\tBody: msg,\n\t\t\tPriority: signature.Priority,\n\t\t\tDeliveryMode: amqp.Persistent,\n\t\t},\n\t); err != nil {\n\t\treturn errors.Wrap(err, \"Failed to publish task\")\n\t}\n\n\tconfirmed := <-confirmsChan\n\n\tif confirmed.Ack {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"Failed delivery of delivery tag: %v\", confirmed.DeliveryTag)\n}\n\n\/\/ consume takes delivered messages from the channel and manages a worker pool\n\/\/ to process tasks concurrently\nfunc (b *Broker) consume(deliveries <-chan amqp.Delivery, concurrency int, taskProcessor iface.TaskProcessor, amqpCloseChan <-chan *amqp.Error) error {\n\tpool := make(chan struct{}, concurrency)\n\n\t\/\/ initialize worker pool with maxWorkers workers\n\tgo func() {\n\t\tfor i := 0; i < concurrency; i++ {\n\t\t\tpool <- struct{}{}\n\t\t}\n\t}()\n\n\t\/\/ make channel with a capacity makes it become a buffered channel so that a worker which wants to\n\t\/\/ push an error to `errorsChan` doesn't need to be blocked while the for-loop is blocked waiting\n\t\/\/ a worker, that is, it avoids a possible deadlock\n\terrorsChan := make(chan error, 1)\n\n\tfor {\n\t\tselect {\n\t\tcase amqpErr := <-amqpCloseChan:\n\t\t\treturn amqpErr\n\t\tcase err := <-errorsChan:\n\t\t\treturn err\n\t\tcase d := <-deliveries:\n\t\t\tif concurrency > 0 {\n\t\t\t\t\/\/ get worker from pool (blocks until one is available)\n\t\t\t\t<-pool\n\t\t\t}\n\n\t\t\tb.processingWG.Add(1)\n\n\t\t\t\/\/ Consume the task inside a gotourine so multiple tasks\n\t\t\t\/\/ can be processed concurrently\n\t\t\tgo func() {\n\t\t\t\tif err := b.consumeOne(d, taskProcessor); err != nil {\n\t\t\t\t\terrorsChan <- err\n\t\t\t\t}\n\n\t\t\t\tb.processingWG.Done()\n\n\t\t\t\tif concurrency > 0 {\n\t\t\t\t\t\/\/ give worker back to pool\n\t\t\t\t\tpool <- struct{}{}\n\t\t\t\t}\n\t\t\t}()\n\t\tcase <-b.GetStopChan():\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\n\/\/ consumeOne processes a single message using TaskProcessor\nfunc (b *Broker) consumeOne(delivery amqp.Delivery, taskProcessor iface.TaskProcessor) error {\n\tif len(delivery.Body) == 0 {\n\t\tdelivery.Nack(true, false) \/\/ multiple, requeue\n\t\treturn errors.New(\"Received an empty message\") \/\/ RabbitMQ down?\n\t}\n\n\tvar multiple, requeue = false, false\n\n\t\/\/ Unmarshal message body into signature struct\n\tsignature := new(tasks.Signature)\n\tdecoder := json.NewDecoder(bytes.NewReader(delivery.Body))\n\tdecoder.UseNumber()\n\tif err := decoder.Decode(signature); err != nil {\n\t\tdelivery.Nack(multiple, requeue)\n\t\treturn errs.NewErrCouldNotUnmarshaTaskSignature(delivery.Body, err)\n\t}\n\n\t\/\/ If the task is not registered, we nack it and requeue,\n\t\/\/ there might be different workers for processing specific tasks\n\tif !b.IsTaskRegistered(signature.Name) {\n\t\tif !delivery.Redelivered {\n\t\t\trequeue = true\n\t\t\tlog.INFO.Printf(\"Task not registered with this worker. Requeing message: %s\", delivery.Body)\n\t\t}\n\t\tif !signature.IgnoreWhenTaskNotRegistered {\n\t\t\tdelivery.Nack(multiple, requeue)\n\t\t}\n\t\treturn nil\n\t}\n\n\tlog.INFO.Printf(\"Received new message: %s\", delivery.Body)\n\n\terr := taskProcessor.Process(signature)\n\tdelivery.Ack(multiple)\n\treturn err\n}\n\n\/\/ delay a task by delayDuration miliseconds, the way it works is a new queue\n\/\/ is created without any consumers, the message is then published to this queue\n\/\/ with appropriate ttl expiration headers, after the expiration, it is sent to\n\/\/ the proper queue with consumers\nfunc (b *Broker) delay(signature *tasks.Signature, delayMs int64) error {\n\tif delayMs <= 0 {\n\t\treturn errors.New(\"Cannot delay task by 0ms\")\n\t}\n\n\tmessage, err := json.Marshal(signature)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"JSON marshal error: %s\", err)\n\t}\n\n\t\/\/ It's necessary to redeclare the queue each time (to zero its TTL timer).\n\tqueueName := fmt.Sprintf(\n\t\t\"delay.%d.%s.%s\",\n\t\tdelayMs, \/\/ delay duration in mileseconds\n\t\tb.GetConfig().AMQP.Exchange,\n\t\tsignature.RoutingKey, \/\/ routing key\n\t)\n\tdeclareQueueArgs := amqp.Table{\n\t\t\/\/ Exchange where to send messages after TTL expiration.\n\t\t\"x-dead-letter-exchange\": b.GetConfig().AMQP.Exchange,\n\t\t\/\/ Routing key which use when resending expired messages.\n\t\t\"x-dead-letter-routing-key\": signature.RoutingKey,\n\t\t\/\/ Time in milliseconds\n\t\t\/\/ after that message will expire and be sent to destination.\n\t\t\"x-message-ttl\": delayMs,\n\t\t\/\/ Time after that the queue will be deleted.\n\t\t\"x-expires\": delayMs * 2,\n\t}\n\tconn, channel, _, _, _, err := b.Connect(\n\t\tb.GetConfig().Broker,\n\t\tb.GetConfig().TLSConfig,\n\t\tb.GetConfig().AMQP.Exchange, \/\/ exchange name\n\t\tb.GetConfig().AMQP.ExchangeType, \/\/ exchange type\n\t\tqueueName, \/\/ queue name\n\t\ttrue, \/\/ queue durable\n\t\tb.GetConfig().AMQP.AutoDelete, \/\/ queue delete when unused\n\t\tqueueName, \/\/ queue binding key\n\t\tnil, \/\/ exchange declare args\n\t\tdeclareQueueArgs, \/\/ queue declare args\n\t\tamqp.Table(b.GetConfig().AMQP.QueueBindingArgs), \/\/ queue binding args\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer b.Close(channel, conn)\n\n\tif err := channel.Publish(\n\t\tb.GetConfig().AMQP.Exchange, \/\/ exchange\n\t\tqueueName, \/\/ routing key\n\t\tfalse, \/\/ mandatory\n\t\tfalse, \/\/ immediate\n\t\tamqp.Publishing{\n\t\t\tHeaders: amqp.Table(signature.Headers),\n\t\t\tContentType: \"application\/json\",\n\t\t\tBody: message,\n\t\t\tDeliveryMode: amqp.Persistent,\n\t\t},\n\t); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *Broker) isDirectExchange() bool {\n\treturn b.GetConfig().AMQP != nil && b.GetConfig().AMQP.ExchangeType == \"direct\"\n}\n\n\/\/ AdjustRoutingKey makes sure the routing key is correct.\n\/\/ If the routing key is an empty string:\n\/\/ a) set it to binding key for direct exchange type\n\/\/ b) set it to default queue name\nfunc (b *Broker) AdjustRoutingKey(s *tasks.Signature) {\n\tif s.RoutingKey != \"\" {\n\t\treturn\n\t}\n\n\tif b.isDirectExchange() {\n\t\t\/\/ The routing algorithm behind a direct exchange is simple - a message goes\n\t\t\/\/ to the queues whose binding key exactly matches the routing key of the message.\n\t\ts.RoutingKey = b.GetConfig().AMQP.BindingKey\n\t\treturn\n\t}\n\n\ts.RoutingKey = b.GetConfig().DefaultQueue\n}\n<|endoftext|>"} {"text":"<commit_before>package messages\n\ntype AliasApprovedMessage struct {\n\tMessageType uint8\n}\n\nfunc NewAliasApprovedMessage() AliasApprovedMessage {\n\treturn AliasApprovedMessage{\n\t\tMessageType: TypeAliasApproved,\n\t}\n}\n\nfunc NewAliasApprovedMessageFromBytes(messageBytes []byte) AliasApprovedMessage {\n\treturn AliasApprovedMessage{\n\t\tMessageType: uint8(messageBytes[0]),\n\t}\n}\n\nfunc (ms *AliasApprovedMessage) Bytes() []byte {\n\tb := make([]byte, 1)\n\tb[0] = byte(ms.MessageType)\n\treturn b\n}\n<commit_msg>Commented AliasApproved.go<commit_after>package messages\n\n\/\/ AliasApprovedMessage is the struct which describes an Alias approved message\ntype AliasApprovedMessage struct {\n\t\/\/MessageType is the message type\n\tMessageType uint8\n}\n\n\/\/ NewAliasApprovedMessage returns an instance of AliasApprovedMessage\nfunc NewAliasApprovedMessage() AliasApprovedMessage {\n\treturn AliasApprovedMessage{\n\t\tMessageType: TypeAliasApproved,\n\t}\n}\n\n\/\/ NewAliasApprovedMessageFromBytes returns an instance of AliasApprovedMessage\n\/\/ Based on a given slice of bytes\nfunc NewAliasApprovedMessageFromBytes(messageBytes []byte) AliasApprovedMessage {\n\treturn AliasApprovedMessage{\n\t\tMessageType: uint8(messageBytes[0]),\n\t}\n}\n\n\/\/ Bytes returns a slice of bytes representing an AliasApprovedMessage\n\/\/ which can be sent through a connection\nfunc (ms *AliasApprovedMessage) Bytes() []byte {\n\tb := make([]byte, 1)\n\tb[0] = byte(ms.MessageType)\n\treturn b\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage version_test\n\nimport (\n\t\"encoding\/json\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"strings\"\n\t\"testing\"\n\n\tgc \"launchpad.net\/gocheck\"\n\t\"launchpad.net\/juju-core\/version\"\n)\n\ntype suite struct{}\n\nvar _ = gc.Suite(suite{})\n\nfunc Test(t *testing.T) {\n\tgc.TestingT(t)\n}\n\n\/\/ N.B. The FORCE-VERSION logic is tested in the environs package.\n\nvar cmpTests = []struct {\n\tv1, v2 string\n\tcompare int\n}{\n\t{\"1.0.0\", \"1.0.0\", 0},\n\t{\"01.0.0\", \"1.0.0\", 0},\n\t{\"10.0.0\", \"9.0.0\", 1},\n\t{\"1.0.0\", \"1.0.1\", -1},\n\t{\"1.0.1\", \"1.0.0\", 1},\n\t{\"1.0.0\", \"1.1.0\", -1},\n\t{\"1.1.0\", \"1.0.0\", 1},\n\t{\"1.0.0\", \"2.0.0\", -1},\n\t{\"2.0.0\", \"1.0.0\", 1},\n\t{\"2.0.0.0\", \"2.0.0\", 0},\n\t{\"2.0.0.0\", \"2.0.0.0\", 0},\n\t{\"2.0.0.1\", \"2.0.0.0\", 1},\n\t{\"2.0.1.10\", \"2.0.0.0\", 1},\n}\n\nfunc (suite) TestCompare(c *gc.C) {\n\tfor i, test := range cmpTests {\n\t\tc.Logf(\"test %d\", i)\n\t\tv1, err := version.Parse(test.v1)\n\t\tc.Assert(err, gc.IsNil)\n\t\tv2, err := version.Parse(test.v2)\n\t\tc.Assert(err, gc.IsNil)\n\t\tcompare := v1.Compare(v2)\n\t\tc.Check(compare, gc.Equals, test.compare)\n\t}\n}\n\nvar parseTests = []struct {\n\tv string\n\terr string\n\texpect version.Number\n\tdev bool\n}{{\n\tv: \"0.0.0\",\n}, {\n\tv: \"0.0.1\",\n\texpect: version.Number{0, 0, 1, 0},\n}, {\n\tv: \"0.0.2\",\n\texpect: version.Number{0, 0, 2, 0},\n}, {\n\tv: \"0.1.0\",\n\texpect: version.Number{0, 1, 0, 0},\n\tdev: true,\n}, {\n\tv: \"0.2.3\",\n\texpect: version.Number{0, 2, 3, 0},\n}, {\n\tv: \"1.0.0\",\n\texpect: version.Number{1, 0, 0, 0},\n}, {\n\tv: \"10.234.3456\",\n\texpect: version.Number{10, 234, 3456, 0},\n}, {\n\tv: \"10.234.3456.1\",\n\texpect: version.Number{10, 234, 3456, 1},\n\tdev: true,\n}, {\n\tv: \"10.234.3456.64\",\n\texpect: version.Number{10, 234, 3456, 64},\n\tdev: true,\n}, {\n\tv: \"10.235.3456\",\n\texpect: version.Number{10, 235, 3456, 0},\n\tdev: true,\n}, {\n\tv: \"1234567890.2.1\",\n\terr: \"invalid version.*\",\n}, {\n\tv: \"0.2..1\",\n\terr: \"invalid version.*\",\n}}\n\nfunc (suite) TestParse(c *gc.C) {\n\tfor i, test := range parseTests {\n\t\tc.Logf(\"test %d\", i)\n\t\tgot, err := version.Parse(test.v)\n\t\tif test.err != \"\" {\n\t\t\tc.Assert(err, gc.ErrorMatches, test.err)\n\t\t} else {\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t\tc.Assert(got, gc.Equals, test.expect)\n\t\t\tc.Check(got.IsDev(), gc.Equals, test.dev)\n\t\t\tc.Check(got.String(), gc.Equals, test.v)\n\t\t}\n\t}\n}\n\nfunc binaryVersion(major, minor, patch, build int, series, arch string) version.Binary {\n\treturn version.Binary{\n\t\tNumber: version.Number{\n\t\t\tMajor: major,\n\t\t\tMinor: minor,\n\t\t\tPatch: patch,\n\t\t\tBuild: build,\n\t\t},\n\t\tSeries: series,\n\t\tArch: arch,\n\t}\n}\n\nvar parseBinaryTests = []struct {\n\tv string\n\terr string\n\texpect version.Binary\n}{{\n\tv: \"1.2.3-a-b\",\n\texpect: binaryVersion(1, 2, 3, 0, \"a\", \"b\"),\n}, {\n\tv: \"1.2.3.4-a-b\",\n\texpect: binaryVersion(1, 2, 3, 4, \"a\", \"b\"),\n}, {\n\tv: \"1.2.3--b\",\n\terr: \"invalid binary version.*\",\n}, {\n\tv: \"1.2.3-a-\",\n\terr: \"invalid binary version.*\",\n}}\n\nfunc (suite) TestParseBinary(c *gc.C) {\n\tfor i, test := range parseBinaryTests {\n\t\tc.Logf(\"test 1: %d\", i)\n\t\tgot, err := version.ParseBinary(test.v)\n\t\tif test.err != \"\" {\n\t\t\tc.Assert(err, gc.ErrorMatches, test.err)\n\t\t} else {\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t\tc.Assert(got, gc.Equals, test.expect)\n\t\t}\n\t}\n\n\tfor i, test := range parseTests {\n\t\tc.Logf(\"test 2: %d\", i)\n\t\tv := test.v + \"-a-b\"\n\t\tgot, err := version.ParseBinary(v)\n\t\texpect := version.Binary{\n\t\t\tNumber: test.expect,\n\t\t\tSeries: \"a\",\n\t\t\tArch: \"b\",\n\t\t}\n\t\tif test.err != \"\" {\n\t\t\tc.Assert(err, gc.ErrorMatches, strings.Replace(test.err, \"version\", \"binary version\", 1))\n\t\t} else {\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t\tc.Assert(got, gc.Equals, expect)\n\t\t\tc.Check(got.IsDev(), gc.Equals, test.dev)\n\t\t}\n\t}\n}\n\nvar marshallers = []struct {\n\tname string\n\tmarshal func(interface{}) ([]byte, error)\n\tunmarshal func([]byte, interface{}) error\n}{{\n\t\"json\",\n\tjson.Marshal,\n\tjson.Unmarshal,\n}, {\n\t\"bson\",\n\tbson.Marshal,\n\tbson.Unmarshal,\n}}\n\nfunc (suite) TestBinaryMarshalUnmarshal(c *gc.C) {\n\tfor _, m := range marshallers {\n\t\tc.Logf(\"encoding %v\", m.name)\n\t\ttype doc struct {\n\t\t\tVersion version.Binary\n\t\t}\n\t\tv := doc{version.MustParseBinary(\"1.2.3-foo-bar\")}\n\t\tdata, err := m.marshal(v)\n\t\tc.Assert(err, gc.IsNil)\n\t\tvar nv doc\n\t\terr = m.unmarshal(data, &nv)\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(v, gc.Equals, nv)\n\t}\n}\n\nfunc (suite) TestNumberMarshalUnmarshal(c *gc.C) {\n\tfor _, m := range marshallers {\n\t\tc.Logf(\"encoding %v\", m.name)\n\t\ttype doc struct {\n\t\t\tVersion version.Number\n\t\t}\n\t\tv := doc{version.MustParse(\"1.2.3\")}\n\t\tdata, err := m.marshal(&v)\n\t\tc.Assert(err, gc.IsNil)\n\t\tvar nv doc\n\t\terr = m.unmarshal(data, &nv)\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(v, gc.Equals, nv)\n\t}\n}\n\nvar parseMajorMinorTests = []struct {\n\tv string\n\terr string\n\texpectMajor int\n\texpectMinor int\n}{{\n\tv: \"1.2\",\n\texpectMajor: 1,\n\texpectMinor: 2,\n}, {\n\tv: \"1\",\n\texpectMajor: 1,\n\texpectMinor: -1,\n}, {\n\tv: \"1.2.3\",\n\terr: \"invalid major.minor version number 1.2.3\",\n}, {\n\tv: \"blah\",\n\terr: `invalid major version number blah: strconv.ParseInt: parsing \"blah\": invalid syntax`,\n}}\n\nfunc (suite) TestParseMajorMinor(c *gc.C) {\n\tfor i, test := range parseMajorMinorTests {\n\t\tc.Logf(\"test %d\", i)\n\t\tmajor, minor, err := version.ParseMajorMinor(test.v)\n\t\tif test.err != \"\" {\n\t\t\tc.Check(err, gc.ErrorMatches, test.err)\n\t\t} else {\n\t\t\tc.Check(err, gc.IsNil)\n\t\t\tc.Check(major, gc.Equals, test.expectMajor)\n\t\t\tc.Check(minor, gc.Equals, test.expectMinor)\n\t\t}\n\t}\n}\n<commit_msg>go fmt<commit_after>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage version_test\n\nimport (\n\t\"encoding\/json\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"strings\"\n\t\"testing\"\n\n\tgc \"launchpad.net\/gocheck\"\n\t\"launchpad.net\/juju-core\/version\"\n)\n\ntype suite struct{}\n\nvar _ = gc.Suite(suite{})\n\nfunc Test(t *testing.T) {\n\tgc.TestingT(t)\n}\n\n\/\/ N.B. The FORCE-VERSION logic is tested in the environs package.\n\nvar cmpTests = []struct {\n\tv1, v2 string\n\tcompare int\n}{\n\t{\"1.0.0\", \"1.0.0\", 0},\n\t{\"01.0.0\", \"1.0.0\", 0},\n\t{\"10.0.0\", \"9.0.0\", 1},\n\t{\"1.0.0\", \"1.0.1\", -1},\n\t{\"1.0.1\", \"1.0.0\", 1},\n\t{\"1.0.0\", \"1.1.0\", -1},\n\t{\"1.1.0\", \"1.0.0\", 1},\n\t{\"1.0.0\", \"2.0.0\", -1},\n\t{\"2.0.0\", \"1.0.0\", 1},\n\t{\"2.0.0.0\", \"2.0.0\", 0},\n\t{\"2.0.0.0\", \"2.0.0.0\", 0},\n\t{\"2.0.0.1\", \"2.0.0.0\", 1},\n\t{\"2.0.1.10\", \"2.0.0.0\", 1},\n}\n\nfunc (suite) TestCompare(c *gc.C) {\n\tfor i, test := range cmpTests {\n\t\tc.Logf(\"test %d\", i)\n\t\tv1, err := version.Parse(test.v1)\n\t\tc.Assert(err, gc.IsNil)\n\t\tv2, err := version.Parse(test.v2)\n\t\tc.Assert(err, gc.IsNil)\n\t\tcompare := v1.Compare(v2)\n\t\tc.Check(compare, gc.Equals, test.compare)\n\t}\n}\n\nvar parseTests = []struct {\n\tv string\n\terr string\n\texpect version.Number\n\tdev bool\n}{{\n\tv: \"0.0.0\",\n}, {\n\tv: \"0.0.1\",\n\texpect: version.Number{0, 0, 1, 0},\n}, {\n\tv: \"0.0.2\",\n\texpect: version.Number{0, 0, 2, 0},\n}, {\n\tv: \"0.1.0\",\n\texpect: version.Number{0, 1, 0, 0},\n\tdev: true,\n}, {\n\tv: \"0.2.3\",\n\texpect: version.Number{0, 2, 3, 0},\n}, {\n\tv: \"1.0.0\",\n\texpect: version.Number{1, 0, 0, 0},\n}, {\n\tv: \"10.234.3456\",\n\texpect: version.Number{10, 234, 3456, 0},\n}, {\n\tv: \"10.234.3456.1\",\n\texpect: version.Number{10, 234, 3456, 1},\n\tdev: true,\n}, {\n\tv: \"10.234.3456.64\",\n\texpect: version.Number{10, 234, 3456, 64},\n\tdev: true,\n}, {\n\tv: \"10.235.3456\",\n\texpect: version.Number{10, 235, 3456, 0},\n\tdev: true,\n}, {\n\tv: \"1234567890.2.1\",\n\terr: \"invalid version.*\",\n}, {\n\tv: \"0.2..1\",\n\terr: \"invalid version.*\",\n}}\n\nfunc (suite) TestParse(c *gc.C) {\n\tfor i, test := range parseTests {\n\t\tc.Logf(\"test %d\", i)\n\t\tgot, err := version.Parse(test.v)\n\t\tif test.err != \"\" {\n\t\t\tc.Assert(err, gc.ErrorMatches, test.err)\n\t\t} else {\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t\tc.Assert(got, gc.Equals, test.expect)\n\t\t\tc.Check(got.IsDev(), gc.Equals, test.dev)\n\t\t\tc.Check(got.String(), gc.Equals, test.v)\n\t\t}\n\t}\n}\n\nfunc binaryVersion(major, minor, patch, build int, series, arch string) version.Binary {\n\treturn version.Binary{\n\t\tNumber: version.Number{\n\t\t\tMajor: major,\n\t\t\tMinor: minor,\n\t\t\tPatch: patch,\n\t\t\tBuild: build,\n\t\t},\n\t\tSeries: series,\n\t\tArch: arch,\n\t}\n}\n\nvar parseBinaryTests = []struct {\n\tv string\n\terr string\n\texpect version.Binary\n}{{\n\tv: \"1.2.3-a-b\",\n\texpect: binaryVersion(1, 2, 3, 0, \"a\", \"b\"),\n}, {\n\tv: \"1.2.3.4-a-b\",\n\texpect: binaryVersion(1, 2, 3, 4, \"a\", \"b\"),\n}, {\n\tv: \"1.2.3--b\",\n\terr: \"invalid binary version.*\",\n}, {\n\tv: \"1.2.3-a-\",\n\terr: \"invalid binary version.*\",\n}}\n\nfunc (suite) TestParseBinary(c *gc.C) {\n\tfor i, test := range parseBinaryTests {\n\t\tc.Logf(\"test 1: %d\", i)\n\t\tgot, err := version.ParseBinary(test.v)\n\t\tif test.err != \"\" {\n\t\t\tc.Assert(err, gc.ErrorMatches, test.err)\n\t\t} else {\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t\tc.Assert(got, gc.Equals, test.expect)\n\t\t}\n\t}\n\n\tfor i, test := range parseTests {\n\t\tc.Logf(\"test 2: %d\", i)\n\t\tv := test.v + \"-a-b\"\n\t\tgot, err := version.ParseBinary(v)\n\t\texpect := version.Binary{\n\t\t\tNumber: test.expect,\n\t\t\tSeries: \"a\",\n\t\t\tArch: \"b\",\n\t\t}\n\t\tif test.err != \"\" {\n\t\t\tc.Assert(err, gc.ErrorMatches, strings.Replace(test.err, \"version\", \"binary version\", 1))\n\t\t} else {\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t\tc.Assert(got, gc.Equals, expect)\n\t\t\tc.Check(got.IsDev(), gc.Equals, test.dev)\n\t\t}\n\t}\n}\n\nvar marshallers = []struct {\n\tname string\n\tmarshal func(interface{}) ([]byte, error)\n\tunmarshal func([]byte, interface{}) error\n}{{\n\t\"json\",\n\tjson.Marshal,\n\tjson.Unmarshal,\n}, {\n\t\"bson\",\n\tbson.Marshal,\n\tbson.Unmarshal,\n}}\n\nfunc (suite) TestBinaryMarshalUnmarshal(c *gc.C) {\n\tfor _, m := range marshallers {\n\t\tc.Logf(\"encoding %v\", m.name)\n\t\ttype doc struct {\n\t\t\tVersion version.Binary\n\t\t}\n\t\tv := doc{version.MustParseBinary(\"1.2.3-foo-bar\")}\n\t\tdata, err := m.marshal(v)\n\t\tc.Assert(err, gc.IsNil)\n\t\tvar nv doc\n\t\terr = m.unmarshal(data, &nv)\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(v, gc.Equals, nv)\n\t}\n}\n\nfunc (suite) TestNumberMarshalUnmarshal(c *gc.C) {\n\tfor _, m := range marshallers {\n\t\tc.Logf(\"encoding %v\", m.name)\n\t\ttype doc struct {\n\t\t\tVersion version.Number\n\t\t}\n\t\tv := doc{version.MustParse(\"1.2.3\")}\n\t\tdata, err := m.marshal(&v)\n\t\tc.Assert(err, gc.IsNil)\n\t\tvar nv doc\n\t\terr = m.unmarshal(data, &nv)\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(v, gc.Equals, nv)\n\t}\n}\n\nvar parseMajorMinorTests = []struct {\n\tv string\n\terr string\n\texpectMajor int\n\texpectMinor int\n}{{\n\tv: \"1.2\",\n\texpectMajor: 1,\n\texpectMinor: 2,\n}, {\n\tv: \"1\",\n\texpectMajor: 1,\n\texpectMinor: -1,\n}, {\n\tv: \"1.2.3\",\n\terr: \"invalid major.minor version number 1.2.3\",\n}, {\n\tv: \"blah\",\n\terr: `invalid major version number blah: strconv.ParseInt: parsing \"blah\": invalid syntax`,\n}}\n\nfunc (suite) TestParseMajorMinor(c *gc.C) {\n\tfor i, test := range parseMajorMinorTests {\n\t\tc.Logf(\"test %d\", i)\n\t\tmajor, minor, err := version.ParseMajorMinor(test.v)\n\t\tif test.err != \"\" {\n\t\t\tc.Check(err, gc.ErrorMatches, test.err)\n\t\t} else {\n\t\t\tc.Check(err, gc.IsNil)\n\t\t\tc.Check(major, gc.Equals, test.expectMajor)\n\t\t\tc.Check(minor, gc.Equals, test.expectMinor)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 ThoughtWorks, Inc.\n\n\/\/ This file is part of Gauge.\n\n\/\/ Gauge is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either Version 3 of the License, or\n\/\/ (at your option) any later Version.\n\n\/\/ Gauge is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with Gauge. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage version\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t. \"gopkg.in\/check.v1\"\n)\n\nfunc Test(t *testing.T) { TestingT(t) }\n\ntype MySuite struct{}\n\nvar _ = Suite(&MySuite{})\n\nfunc (s *MySuite) TestParsingVersion(c *C) {\n\tVersion, err := ParseVersion(\"1.5.9\")\n\tc.Assert(err, Equals, nil)\n\tc.Assert(Version.Major, Equals, 1)\n\tc.Assert(Version.Minor, Equals, 5)\n\tc.Assert(Version.Patch, Equals, 9)\n}\n\nfunc (s *MySuite) TestParsingErrorForIncorrectNumberOfDotCharacters(c *C) {\n\t_, err := ParseVersion(\"1.5.9.9\")\n\tc.Assert(err, ErrorMatches, \"Incorrect Version format. Version should be in the form 1.5.7\")\n\n\t_, err = ParseVersion(\"0.\")\n\tc.Assert(err, ErrorMatches, \"Incorrect Version format. Version should be in the form 1.5.7\")\n}\n\nfunc (s *MySuite) TestParsingErrorForNonIntegerVersion(c *C) {\n\t_, err := ParseVersion(\"a.9.0\")\n\tc.Assert(err, ErrorMatches, `Error parsing major Version a to integer. strconv.ParseInt: parsing \"a\": invalid syntax`)\n\n\t_, err = ParseVersion(\"0.ffhj.78\")\n\tc.Assert(err, ErrorMatches, `Error parsing minor Version ffhj to integer. strconv.ParseInt: parsing \"ffhj\": invalid syntax`)\n\n\t_, err = ParseVersion(\"8.9.opl\")\n\tc.Assert(err, ErrorMatches, `Error parsing patch Version opl to integer. strconv.ParseInt: parsing \"opl\": invalid syntax`)\n}\n\nfunc (s *MySuite) TestVersionComparisonGreaterLesser(c *C) {\n\thigherVersion, _ := ParseVersion(\"0.0.7\")\n\tlowerVersion, _ := ParseVersion(\"0.0.3\")\n\tc.Assert(lowerVersion.IsLesserThan(higherVersion), Equals, true)\n\tc.Assert(higherVersion.IsGreaterThan(lowerVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"0.7.2\")\n\tlowerVersion, _ = ParseVersion(\"0.5.7\")\n\tc.Assert(lowerVersion.IsLesserThan(higherVersion), Equals, true)\n\tc.Assert(higherVersion.IsGreaterThan(lowerVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"4.7.2\")\n\tlowerVersion, _ = ParseVersion(\"3.8.7\")\n\tc.Assert(lowerVersion.IsLesserThan(higherVersion), Equals, true)\n\tc.Assert(higherVersion.IsGreaterThan(lowerVersion), Equals, true)\n\n\tversion1, _ := ParseVersion(\"4.7.2\")\n\tversion2, _ := ParseVersion(\"4.7.2\")\n\tc.Assert(version1.IsEqualTo(version2), Equals, true)\n}\n\nfunc (s *MySuite) TestVersionComparisonGreaterThanEqual(c *C) {\n\thigherVersion, _ := ParseVersion(\"0.0.7\")\n\tlowerVersion, _ := ParseVersion(\"0.0.3\")\n\tc.Assert(higherVersion.IsGreaterThanEqualTo(lowerVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"0.7.2\")\n\tlowerVersion, _ = ParseVersion(\"0.5.7\")\n\tc.Assert(higherVersion.IsGreaterThan(lowerVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"4.7.2\")\n\tlowerVersion, _ = ParseVersion(\"3.8.7\")\n\tc.Assert(lowerVersion.IsLesserThan(higherVersion), Equals, true)\n\tc.Assert(higherVersion.IsGreaterThan(lowerVersion), Equals, true)\n\n\tversion1, _ := ParseVersion(\"6.7.2\")\n\tversion2, _ := ParseVersion(\"6.7.2\")\n\tc.Assert(version1.IsGreaterThanEqualTo(version2), Equals, true)\n}\n\nfunc (s *MySuite) TestVersionComparisonLesserThanEqual(c *C) {\n\thigherVersion, _ := ParseVersion(\"0.0.7\")\n\tlowerVersion, _ := ParseVersion(\"0.0.3\")\n\tc.Assert(lowerVersion.IsLesserThanEqualTo(higherVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"0.7.2\")\n\tlowerVersion, _ = ParseVersion(\"0.5.7\")\n\tc.Assert(lowerVersion.IsLesserThanEqualTo(higherVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"5.8.2\")\n\tlowerVersion, _ = ParseVersion(\"2.9.7\")\n\tc.Assert(lowerVersion.IsLesserThanEqualTo(higherVersion), Equals, true)\n\n\tversion1, _ := ParseVersion(\"6.7.2\")\n\tversion2, _ := ParseVersion(\"6.7.2\")\n\tc.Assert(version1.IsLesserThanEqualTo(version2), Equals, true)\n}\n\nfunc (s *MySuite) TestVersionIsBetweenTwoVersions(c *C) {\n\thigherVersion, _ := ParseVersion(\"0.0.9\")\n\tlowerVersion, _ := ParseVersion(\"0.0.7\")\n\tmiddleVersion, _ := ParseVersion(\"0.0.8\")\n\tc.Assert(middleVersion.IsBetween(lowerVersion, higherVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"0.7.2\")\n\tlowerVersion, _ = ParseVersion(\"0.5.7\")\n\tmiddleVersion, _ = ParseVersion(\"0.6.9\")\n\tc.Assert(middleVersion.IsBetween(lowerVersion, higherVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"4.7.2\")\n\tlowerVersion, _ = ParseVersion(\"3.8.7\")\n\tmiddleVersion, _ = ParseVersion(\"4.0.1\")\n\tc.Assert(middleVersion.IsBetween(lowerVersion, higherVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"4.7.2\")\n\tlowerVersion, _ = ParseVersion(\"4.0.1\")\n\tmiddleVersion, _ = ParseVersion(\"4.0.1\")\n\tc.Assert(middleVersion.IsBetween(lowerVersion, higherVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"0.0.2\")\n\tlowerVersion, _ = ParseVersion(\"0.0.1\")\n\tmiddleVersion, _ = ParseVersion(\"0.0.2\")\n\tc.Assert(middleVersion.IsBetween(lowerVersion, higherVersion), Equals, true)\n}\n\nfunc (s *MySuite) TestGetLatestVersion(c *C) {\n\thighestVersion := &Version{2, 2, 2}\n\tversions := []*Version{&Version{0, 0, 1}, &Version{1, 2, 2}, highestVersion, &Version{0, 0, 2}, &Version{0, 2, 2}, &Version{0, 0, 3}, &Version{0, 2, 1}, &Version{0, 1, 2}}\n\tlatestVersion := GetLatestVersion(versions)\n\n\tc.Assert(latestVersion, DeepEquals, highestVersion)\n}\n\nfunc (s *MySuite) TestCheckVersionCompatibilitySuccess(c *C) {\n\tversionSupported := &VersionSupport{\"0.6.5\", \"1.8.5\"}\n\tgaugeVersion := &Version{0, 6, 7}\n\tc.Assert(CheckCompatibility(gaugeVersion, versionSupported), Equals, nil)\n\n\tversionSupported = &VersionSupport{\"0.0.1\", \"0.0.1\"}\n\tgaugeVersion = &Version{0, 0, 1}\n\tc.Assert(CheckCompatibility(gaugeVersion, versionSupported), Equals, nil)\n\n\tversionSupported = &VersionSupport{Minimum: \"0.0.1\"}\n\tgaugeVersion = &Version{1, 5, 2}\n\tc.Assert(CheckCompatibility(gaugeVersion, versionSupported), Equals, nil)\n\n\tversionSupported = &VersionSupport{Minimum: \"0.5.1\"}\n\tgaugeVersion = &Version{0, 5, 1}\n\tc.Assert(CheckCompatibility(gaugeVersion, versionSupported), Equals, nil)\n\n}\n\nfunc (s *MySuite) TestCheckVersionCompatibilityFailure(c *C) {\n\tversionsSupported := &VersionSupport{\"0.6.5\", \"1.8.5\"}\n\tgaugeVersion := &Version{1, 9, 9}\n\tc.Assert(CheckCompatibility(gaugeVersion, versionsSupported), NotNil)\n\n\tversionsSupported = &VersionSupport{\"0.0.1\", \"0.0.1\"}\n\tgaugeVersion = &Version{0, 0, 2}\n\tc.Assert(CheckCompatibility(gaugeVersion, versionsSupported), NotNil)\n\n\tversionsSupported = &VersionSupport{Minimum: \"1.3.1\"}\n\tgaugeVersion = &Version{1, 3, 0}\n\tc.Assert(CheckCompatibility(gaugeVersion, versionsSupported), NotNil)\n\n\tversionsSupported = &VersionSupport{Minimum: \"0.5.1\"}\n\tgaugeVersion = &Version{0, 0, 9}\n\tc.Assert(CheckCompatibility(gaugeVersion, versionsSupported), NotNil)\n\n}\n\nfunc (s *MySuite) TestFullVersionWithBuildMetadata(c *C) {\n\tc.Assert(FullVersion(), Equals, CurrentGaugeVersion.String())\n\tBuildMetadata = \"nightly-2016-02-21\"\n\tc.Assert(FullVersion(), Equals, fmt.Sprintf(\"%s.%s\", CurrentGaugeVersion.String(), BuildMetadata))\n}\n<commit_msg>Fix unit tests<commit_after>\/\/ Copyright 2015 ThoughtWorks, Inc.\n\n\/\/ This file is part of Gauge.\n\n\/\/ Gauge is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either Version 3 of the License, or\n\/\/ (at your option) any later Version.\n\n\/\/ Gauge is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with Gauge. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage version\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t. \"gopkg.in\/check.v1\"\n)\n\nfunc Test(t *testing.T) { TestingT(t) }\n\ntype MySuite struct{}\n\nvar _ = Suite(&MySuite{})\n\nfunc (s *MySuite) TestParsingVersion(c *C) {\n\tVersion, err := ParseVersion(\"1.5.9\")\n\tc.Assert(err, Equals, nil)\n\tc.Assert(Version.Major, Equals, 1)\n\tc.Assert(Version.Minor, Equals, 5)\n\tc.Assert(Version.Patch, Equals, 9)\n}\n\nfunc (s *MySuite) TestParsingErrorForIncorrectNumberOfDotCharacters(c *C) {\n\t_, err := ParseVersion(\"1.5.9.9\")\n\tc.Assert(err, ErrorMatches, \"Incorrect Version format. Version should be in the form 1.5.7\")\n\n\t_, err = ParseVersion(\"0.\")\n\tc.Assert(err, ErrorMatches, \"Incorrect Version format. Version should be in the form 1.5.7\")\n}\n\nfunc (s *MySuite) TestParsingErrorForNonIntegerVersion(c *C) {\n\t_, err := ParseVersion(\"a.9.0\")\n\tc.Assert(err, ErrorMatches, `Error parsing major Version a to integer. strconv.Atoi: parsing \"a\": invalid syntax`)\n\n\t_, err = ParseVersion(\"0.ffhj.78\")\n\tc.Assert(err, ErrorMatches, `Error parsing minor Version ffhj to integer. strconv.Atoi: parsing \"ffhj\": invalid syntax`)\n\n\t_, err = ParseVersion(\"8.9.opl\")\n\tc.Assert(err, ErrorMatches, `Error parsing patch Version opl to integer. strconv.Atoi: parsing \"opl\": invalid syntax`)\n}\n\nfunc (s *MySuite) TestVersionComparisonGreaterLesser(c *C) {\n\thigherVersion, _ := ParseVersion(\"0.0.7\")\n\tlowerVersion, _ := ParseVersion(\"0.0.3\")\n\tc.Assert(lowerVersion.IsLesserThan(higherVersion), Equals, true)\n\tc.Assert(higherVersion.IsGreaterThan(lowerVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"0.7.2\")\n\tlowerVersion, _ = ParseVersion(\"0.5.7\")\n\tc.Assert(lowerVersion.IsLesserThan(higherVersion), Equals, true)\n\tc.Assert(higherVersion.IsGreaterThan(lowerVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"4.7.2\")\n\tlowerVersion, _ = ParseVersion(\"3.8.7\")\n\tc.Assert(lowerVersion.IsLesserThan(higherVersion), Equals, true)\n\tc.Assert(higherVersion.IsGreaterThan(lowerVersion), Equals, true)\n\n\tversion1, _ := ParseVersion(\"4.7.2\")\n\tversion2, _ := ParseVersion(\"4.7.2\")\n\tc.Assert(version1.IsEqualTo(version2), Equals, true)\n}\n\nfunc (s *MySuite) TestVersionComparisonGreaterThanEqual(c *C) {\n\thigherVersion, _ := ParseVersion(\"0.0.7\")\n\tlowerVersion, _ := ParseVersion(\"0.0.3\")\n\tc.Assert(higherVersion.IsGreaterThanEqualTo(lowerVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"0.7.2\")\n\tlowerVersion, _ = ParseVersion(\"0.5.7\")\n\tc.Assert(higherVersion.IsGreaterThan(lowerVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"4.7.2\")\n\tlowerVersion, _ = ParseVersion(\"3.8.7\")\n\tc.Assert(lowerVersion.IsLesserThan(higherVersion), Equals, true)\n\tc.Assert(higherVersion.IsGreaterThan(lowerVersion), Equals, true)\n\n\tversion1, _ := ParseVersion(\"6.7.2\")\n\tversion2, _ := ParseVersion(\"6.7.2\")\n\tc.Assert(version1.IsGreaterThanEqualTo(version2), Equals, true)\n}\n\nfunc (s *MySuite) TestVersionComparisonLesserThanEqual(c *C) {\n\thigherVersion, _ := ParseVersion(\"0.0.7\")\n\tlowerVersion, _ := ParseVersion(\"0.0.3\")\n\tc.Assert(lowerVersion.IsLesserThanEqualTo(higherVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"0.7.2\")\n\tlowerVersion, _ = ParseVersion(\"0.5.7\")\n\tc.Assert(lowerVersion.IsLesserThanEqualTo(higherVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"5.8.2\")\n\tlowerVersion, _ = ParseVersion(\"2.9.7\")\n\tc.Assert(lowerVersion.IsLesserThanEqualTo(higherVersion), Equals, true)\n\n\tversion1, _ := ParseVersion(\"6.7.2\")\n\tversion2, _ := ParseVersion(\"6.7.2\")\n\tc.Assert(version1.IsLesserThanEqualTo(version2), Equals, true)\n}\n\nfunc (s *MySuite) TestVersionIsBetweenTwoVersions(c *C) {\n\thigherVersion, _ := ParseVersion(\"0.0.9\")\n\tlowerVersion, _ := ParseVersion(\"0.0.7\")\n\tmiddleVersion, _ := ParseVersion(\"0.0.8\")\n\tc.Assert(middleVersion.IsBetween(lowerVersion, higherVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"0.7.2\")\n\tlowerVersion, _ = ParseVersion(\"0.5.7\")\n\tmiddleVersion, _ = ParseVersion(\"0.6.9\")\n\tc.Assert(middleVersion.IsBetween(lowerVersion, higherVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"4.7.2\")\n\tlowerVersion, _ = ParseVersion(\"3.8.7\")\n\tmiddleVersion, _ = ParseVersion(\"4.0.1\")\n\tc.Assert(middleVersion.IsBetween(lowerVersion, higherVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"4.7.2\")\n\tlowerVersion, _ = ParseVersion(\"4.0.1\")\n\tmiddleVersion, _ = ParseVersion(\"4.0.1\")\n\tc.Assert(middleVersion.IsBetween(lowerVersion, higherVersion), Equals, true)\n\n\thigherVersion, _ = ParseVersion(\"0.0.2\")\n\tlowerVersion, _ = ParseVersion(\"0.0.1\")\n\tmiddleVersion, _ = ParseVersion(\"0.0.2\")\n\tc.Assert(middleVersion.IsBetween(lowerVersion, higherVersion), Equals, true)\n}\n\nfunc (s *MySuite) TestGetLatestVersion(c *C) {\n\thighestVersion := &Version{2, 2, 2}\n\tversions := []*Version{&Version{0, 0, 1}, &Version{1, 2, 2}, highestVersion, &Version{0, 0, 2}, &Version{0, 2, 2}, &Version{0, 0, 3}, &Version{0, 2, 1}, &Version{0, 1, 2}}\n\tlatestVersion := GetLatestVersion(versions)\n\n\tc.Assert(latestVersion, DeepEquals, highestVersion)\n}\n\nfunc (s *MySuite) TestCheckVersionCompatibilitySuccess(c *C) {\n\tversionSupported := &VersionSupport{\"0.6.5\", \"1.8.5\"}\n\tgaugeVersion := &Version{0, 6, 7}\n\tc.Assert(CheckCompatibility(gaugeVersion, versionSupported), Equals, nil)\n\n\tversionSupported = &VersionSupport{\"0.0.1\", \"0.0.1\"}\n\tgaugeVersion = &Version{0, 0, 1}\n\tc.Assert(CheckCompatibility(gaugeVersion, versionSupported), Equals, nil)\n\n\tversionSupported = &VersionSupport{Minimum: \"0.0.1\"}\n\tgaugeVersion = &Version{1, 5, 2}\n\tc.Assert(CheckCompatibility(gaugeVersion, versionSupported), Equals, nil)\n\n\tversionSupported = &VersionSupport{Minimum: \"0.5.1\"}\n\tgaugeVersion = &Version{0, 5, 1}\n\tc.Assert(CheckCompatibility(gaugeVersion, versionSupported), Equals, nil)\n\n}\n\nfunc (s *MySuite) TestCheckVersionCompatibilityFailure(c *C) {\n\tversionsSupported := &VersionSupport{\"0.6.5\", \"1.8.5\"}\n\tgaugeVersion := &Version{1, 9, 9}\n\tc.Assert(CheckCompatibility(gaugeVersion, versionsSupported), NotNil)\n\n\tversionsSupported = &VersionSupport{\"0.0.1\", \"0.0.1\"}\n\tgaugeVersion = &Version{0, 0, 2}\n\tc.Assert(CheckCompatibility(gaugeVersion, versionsSupported), NotNil)\n\n\tversionsSupported = &VersionSupport{Minimum: \"1.3.1\"}\n\tgaugeVersion = &Version{1, 3, 0}\n\tc.Assert(CheckCompatibility(gaugeVersion, versionsSupported), NotNil)\n\n\tversionsSupported = &VersionSupport{Minimum: \"0.5.1\"}\n\tgaugeVersion = &Version{0, 0, 9}\n\tc.Assert(CheckCompatibility(gaugeVersion, versionsSupported), NotNil)\n\n}\n\nfunc (s *MySuite) TestFullVersionWithBuildMetadata(c *C) {\n\tc.Assert(FullVersion(), Equals, CurrentGaugeVersion.String())\n\tBuildMetadata = \"nightly-2016-02-21\"\n\tc.Assert(FullVersion(), Equals, fmt.Sprintf(\"%s.%s\", CurrentGaugeVersion.String(), BuildMetadata))\n}\n<|endoftext|>"} {"text":"<commit_before>package metadata\n\nimport (\n\t\"encoding\/json\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/mackerelio\/mackerel-agent\/config\"\n)\n\nfunc TestMetadataGenerator(t *testing.T) {\n\ttests := []struct {\n\t\tcommand string\n\t\tmetadata string\n\t\terr bool\n\t}{\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 0 -metadata \"{}\"`,\n\t\t\tmetadata: `{}`,\n\t\t\terr: false,\n\t\t},\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 1 -metadata \"{}\"`,\n\t\t\tmetadata: ``,\n\t\t\terr: true,\n\t\t},\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 0 -metadata '{\"example\": \"metadata\", \"foo\": [100, 200, {}, null]}'`,\n\t\t\tmetadata: `{\"example\":\"metadata\",\"foo\":[100,200,{},null]}`,\n\t\t\terr: false,\n\t\t},\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 0 -metadata '{\"example\": metadata\"}'`,\n\t\t\tmetadata: ``,\n\t\t\terr: true,\n\t\t},\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 0 -metadata '\"foobar\"'`,\n\t\t\tmetadata: `\"foobar\"`,\n\t\t\terr: false,\n\t\t},\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 0 -metadata foobar`,\n\t\t\tmetadata: ``,\n\t\t\terr: true,\n\t\t},\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 0 -metadata 262144`,\n\t\t\tmetadata: `262144`,\n\t\t\terr: false,\n\t\t},\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 0 -metadata true`,\n\t\t\tmetadata: `true`,\n\t\t\terr: false,\n\t\t},\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 0 -metadata null`,\n\t\t\tmetadata: `null`,\n\t\t\terr: false,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tg := Generator{\n\t\t\tConfig: &config.MetadataPlugin{\n\t\t\t\tCommand: test.command,\n\t\t\t},\n\t\t}\n\t\tmetadata, err := g.Fetch()\n\t\tif err != nil {\n\t\t\tif !test.err {\n\t\t\t\tt.Errorf(\"error occurred unexpectedly on command %q\", test.command)\n\t\t\t}\n\t\t} else {\n\t\t\tif test.err {\n\t\t\t\tt.Errorf(\"error did not occurr but error expected on command %q\", test.command)\n\t\t\t}\n\t\t\tmetadataStr, _ := json.Marshal(metadata)\n\t\t\tif string(metadataStr) != test.metadata {\n\t\t\t\tt.Errorf(\"metadata should be %q but got %q\", test.metadata, string(metadataStr))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestMetadataGeneratorSaveDiffers(t *testing.T) {\n\ttests := []struct {\n\t\tprevmetadata string\n\t\tmetadata string\n\t\tdiffers bool\n\t}{\n\t\t{\n\t\t\tprevmetadata: `{}`,\n\t\t\tmetadata: `{}`,\n\t\t\tdiffers: false,\n\t\t},\n\t\t{\n\t\t\tprevmetadata: `{ \"foo\": [ 100, 200, null, {} ] }`,\n\t\t\tmetadata: `{\"foo\":[100,200,null,{}]}`,\n\t\t\tdiffers: false,\n\t\t},\n\t\t{\n\t\t\tprevmetadata: `null`,\n\t\t\tmetadata: `{}`,\n\t\t\tdiffers: true,\n\t\t},\n\t\t{\n\t\t\tprevmetadata: `[]`,\n\t\t\tmetadata: `{}`,\n\t\t\tdiffers: true,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tg := Generator{}\n\t\tvar prevmetadata interface{}\n\t\t_ = json.Unmarshal([]byte(test.prevmetadata), &prevmetadata)\n\t\tg.Save(prevmetadata)\n\n\t\tvar metadata interface{}\n\t\t_ = json.Unmarshal([]byte(test.metadata), &metadata)\n\n\t\tgot := g.Differs(metadata)\n\t\tif got != test.differs {\n\t\t\tt.Errorf(\"Differs() should return %t but got %t for %v, %v\", test.differs, got, prevmetadata, metadata)\n\t\t}\n\t}\n}\n\nfunc TestMetadataGeneratorInterval(t *testing.T) {\n\ttests := []struct {\n\t\tinterval *int32\n\t\texpected time.Duration\n\t}{\n\t\t{\n\t\t\tinterval: pint(0),\n\t\t\texpected: 1 * time.Minute,\n\t\t},\n\t\t{\n\t\t\tinterval: pint(1),\n\t\t\texpected: 1 * time.Minute,\n\t\t},\n\t\t{\n\t\t\tinterval: pint(30),\n\t\t\texpected: 30 * time.Minute,\n\t\t},\n\t\t{\n\t\t\tinterval: nil,\n\t\t\texpected: 10 * time.Minute,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tg := Generator{\n\t\t\tConfig: &config.MetadataPlugin{\n\t\t\t\tExecutionInterval: test.interval,\n\t\t\t},\n\t\t}\n\t\tif g.Interval() != test.expected {\n\t\t\tt.Errorf(\"interval should be %v but got: %v\", test.expected, g.Interval())\n\t\t}\n\t}\n}\n\nfunc pint(i int32) *int32 {\n\treturn &i\n}\n<commit_msg>rename test name<commit_after>package metadata\n\nimport (\n\t\"encoding\/json\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/mackerelio\/mackerel-agent\/config\"\n)\n\nfunc TestMetadataGeneratorFetch(t *testing.T) {\n\ttests := []struct {\n\t\tcommand string\n\t\tmetadata string\n\t\terr bool\n\t}{\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 0 -metadata \"{}\"`,\n\t\t\tmetadata: `{}`,\n\t\t\terr: false,\n\t\t},\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 1 -metadata \"{}\"`,\n\t\t\tmetadata: ``,\n\t\t\terr: true,\n\t\t},\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 0 -metadata '{\"example\": \"metadata\", \"foo\": [100, 200, {}, null]}'`,\n\t\t\tmetadata: `{\"example\":\"metadata\",\"foo\":[100,200,{},null]}`,\n\t\t\terr: false,\n\t\t},\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 0 -metadata '{\"example\": metadata\"}'`,\n\t\t\tmetadata: ``,\n\t\t\terr: true,\n\t\t},\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 0 -metadata '\"foobar\"'`,\n\t\t\tmetadata: `\"foobar\"`,\n\t\t\terr: false,\n\t\t},\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 0 -metadata foobar`,\n\t\t\tmetadata: ``,\n\t\t\terr: true,\n\t\t},\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 0 -metadata 262144`,\n\t\t\tmetadata: `262144`,\n\t\t\terr: false,\n\t\t},\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 0 -metadata true`,\n\t\t\tmetadata: `true`,\n\t\t\terr: false,\n\t\t},\n\t\t{\n\t\t\tcommand: `go run testdata\/json.go -exit-code 0 -metadata null`,\n\t\t\tmetadata: `null`,\n\t\t\terr: false,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tg := Generator{\n\t\t\tConfig: &config.MetadataPlugin{\n\t\t\t\tCommand: test.command,\n\t\t\t},\n\t\t}\n\t\tmetadata, err := g.Fetch()\n\t\tif err != nil {\n\t\t\tif !test.err {\n\t\t\t\tt.Errorf(\"error occurred unexpectedly on command %q\", test.command)\n\t\t\t}\n\t\t} else {\n\t\t\tif test.err {\n\t\t\t\tt.Errorf(\"error did not occurr but error expected on command %q\", test.command)\n\t\t\t}\n\t\t\tmetadataStr, _ := json.Marshal(metadata)\n\t\t\tif string(metadataStr) != test.metadata {\n\t\t\t\tt.Errorf(\"metadata should be %q but got %q\", test.metadata, string(metadataStr))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestMetadataGeneratorSaveDiffers(t *testing.T) {\n\ttests := []struct {\n\t\tprevmetadata string\n\t\tmetadata string\n\t\tdiffers bool\n\t}{\n\t\t{\n\t\t\tprevmetadata: `{}`,\n\t\t\tmetadata: `{}`,\n\t\t\tdiffers: false,\n\t\t},\n\t\t{\n\t\t\tprevmetadata: `{ \"foo\": [ 100, 200, null, {} ] }`,\n\t\t\tmetadata: `{\"foo\":[100,200,null,{}]}`,\n\t\t\tdiffers: false,\n\t\t},\n\t\t{\n\t\t\tprevmetadata: `null`,\n\t\t\tmetadata: `{}`,\n\t\t\tdiffers: true,\n\t\t},\n\t\t{\n\t\t\tprevmetadata: `[]`,\n\t\t\tmetadata: `{}`,\n\t\t\tdiffers: true,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tg := Generator{}\n\t\tvar prevmetadata interface{}\n\t\t_ = json.Unmarshal([]byte(test.prevmetadata), &prevmetadata)\n\t\tg.Save(prevmetadata)\n\n\t\tvar metadata interface{}\n\t\t_ = json.Unmarshal([]byte(test.metadata), &metadata)\n\n\t\tgot := g.Differs(metadata)\n\t\tif got != test.differs {\n\t\t\tt.Errorf(\"Differs() should return %t but got %t for %v, %v\", test.differs, got, prevmetadata, metadata)\n\t\t}\n\t}\n}\n\nfunc TestMetadataGeneratorInterval(t *testing.T) {\n\ttests := []struct {\n\t\tinterval *int32\n\t\texpected time.Duration\n\t}{\n\t\t{\n\t\t\tinterval: pint(0),\n\t\t\texpected: 1 * time.Minute,\n\t\t},\n\t\t{\n\t\t\tinterval: pint(1),\n\t\t\texpected: 1 * time.Minute,\n\t\t},\n\t\t{\n\t\t\tinterval: pint(30),\n\t\t\texpected: 30 * time.Minute,\n\t\t},\n\t\t{\n\t\t\tinterval: nil,\n\t\t\texpected: 10 * time.Minute,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tg := Generator{\n\t\t\tConfig: &config.MetadataPlugin{\n\t\t\t\tExecutionInterval: test.interval,\n\t\t\t},\n\t\t}\n\t\tif g.Interval() != test.expected {\n\t\t\tt.Errorf(\"interval should be %v but got: %v\", test.expected, g.Interval())\n\t\t}\n\t}\n}\n\nfunc pint(i int32) *int32 {\n\treturn &i\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"go-daemon\"\n\t\"log\"\n\t\"os\"\n\t\"syscall\"\n\t\"time\"\n)\n\nconst (\n\tpidFileName = \"dmn.pid\"\n\tlogFileName = \"dmn.log\"\n\n\tfileMask = 0600\n)\nconst (\n\tret_OK = iota\n\tret_ALREADYRUN\n\tret_PIDFERROR\n\tret_REBORNERROR\n\tret_CONFERROR\n)\n\nvar (\n\tstatus = flag.Bool(\"status\", false,\n\t\t`Check status of the daemon. The program immediately exits after these \n\t\tchecks with either a return code of 0 (Daemon Stopped) or return code \n\t\tnot equal to 0 (Daemon Running)`)\n\n\tsilent = flag.Bool(\"silent\", false, \"Don't write in stdout\")\n\n\ttest = flag.Bool(\"t\", false,\n\t\t`Run syntax tests for configuration files only. The program \n\t\timmediately exits after these syntax parsing tests with either \n\t\ta return code of 0 (Syntax OK) or return code not equal to 0 \n\t\t(Syntax Error)`)\n\n\tconfigFileName = flag.String(\"f\", \"dmn.conf\",\n\t\t`Specifies the name of the configuration file. The default is dmn.conf. \n\t\tDaemon refuses to start if there is no configuration file.`)\n)\n\nvar confProv = make(chan Config, 8)\n\nfunc main() {\n\tflag.Parse()\n\n\tsetupLogging()\n\n\tconf, err := loadConfig(*configFileName)\n\tif err != nil {\n\t\tlog.Println(\"Config error:\", err)\n\t\tos.Exit(ret_CONFERROR)\n\t}\n\tif *test {\n\t\tos.Exit(ret_OK)\n\t}\n\n\tpidf := lockPidFile()\n\terr = daemon.Reborn(027, \".\/\")\n\tif err != nil {\n\t\tlog.Println(\"Reborn error:\", err)\n\t\tos.Exit(ret_REBORNERROR)\n\t}\n\n\tconfProv <- conf\n\tgo watchdog(confProv)\n\n\tserveSignals()\n\n\tpidf.Unlock()\n}\n\nfunc setupLogging() {\n\tif daemon.WasReborn() {\n\t\tfile, _ := os.OpenFile(logFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, fileMask)\n\t\tdaemon.RedirectStream(os.Stdout, file)\n\t\tdaemon.RedirectStream(os.Stderr, file)\n\t\tfile.Close()\n\t\tlog.Println(\"--- log ---\")\n\t} else {\n\t\tlog.SetFlags(0)\n\t\tif *silent {\n\t\t\tfile, _ := os.OpenFile(os.DevNull, os.O_WRONLY, fileMask)\n\t\t\tdaemon.RedirectStream(os.Stdout, file)\n\t\t\tdaemon.RedirectStream(os.Stderr, file)\n\t\t\tfile.Close()\n\t\t}\n\t}\n}\n\ntype Config []string\n\nfunc loadConfig(path string) (config Config, err error) {\n\tvar file *os.File\n\tfile, err = os.OpenFile(path, os.O_RDONLY, 0700)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\tconfig = make([]string, 0)\n\terr = json.NewDecoder(file).Decode(&config)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, path = range config {\n\t\tif _, err = os.Stat(path); os.IsNotExist(err) {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc lockPidFile() *daemon.PidFile {\n\tpidf, err := daemon.LockPidFile(pidFileName, fileMask)\n\tif err != nil {\n\t\tif err == daemon.ErrWouldBlock {\n\t\t\tlog.Println(\"daemon copy is already running\")\n\t\t\tos.Exit(ret_ALREADYRUN)\n\t\t} else {\n\t\t\tlog.Println(\"pid file creation error:\", err)\n\t\t\tos.Exit(ret_PIDFERROR)\n\t\t}\n\t}\n\n\tif !daemon.WasReborn() {\n\t\tpidf.Unlock()\n\t}\n\n\tif *status {\n\t\tos.Exit(ret_OK)\n\t}\n\n\treturn pidf\n}\n\nfunc watchdog(confProv <-chan Config) {\n\tstates := make(map[string]time.Time)\n\tconf := <-confProv\n\tfor {\n\t\tselect {\n\t\tcase conf = <-confProv:\n\t\tdefault:\n\t\t}\n\n\t\tfor _, path := range conf {\n\t\t\tfi, err := os.Stat(path)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcur := fi.ModTime()\n\t\t\tif pre, exists := states[path]; exists {\n\t\t\t\tif pre != cur {\n\t\t\t\t\tlog.Printf(\"file %s modified at %s\", path, cur)\n\t\t\t\t}\n\t\t\t}\n\t\t\tstates[path] = cur\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n}\n\nfunc serveSignals() {\n\tdaemon.SetHandler(termHandler, syscall.SIGTERM, syscall.SIGKILL)\n\tdaemon.SetHandler(hupHandler, syscall.SIGHUP)\n\n\terr := daemon.ServeSignals()\n\tif err != nil {\n\t\tlog.Println(\"Error:\", err)\n\t}\n\n\tlog.Println(\"--- end ---\")\n}\n\nfunc termHandler(sig os.Signal) error {\n\tlog.Println(\"SIGTERM:\", sig)\n\treturn daemon.ErrStop\n}\n\nfunc hupHandler(sig os.Signal) error {\n\tlog.Println(\"SIGHUP:\", sig)\n\n\tconf, err := loadConfig(*configFileName)\n\tif err != nil {\n\t\tlog.Println(\"Config error:\", err)\n\t} else {\n\t\tconfProv <- conf\n\t}\n\n\treturn nil\n}\n<commit_msg>Update dmn.go<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"github.com\/sevlyar\/go-daemon\"\n\t\"log\"\n\t\"os\"\n\t\"syscall\"\n\t\"time\"\n)\n\nconst (\n\tpidFileName = \"dmn.pid\"\n\tlogFileName = \"dmn.log\"\n\n\tfileMask = 0600\n)\nconst (\n\tret_OK = iota\n\tret_ALREADYRUN\n\tret_PIDFERROR\n\tret_REBORNERROR\n\tret_CONFERROR\n)\n\nvar (\n\tstatus = flag.Bool(\"status\", false,\n\t\t`Check status of the daemon. The program immediately exits after these \n\t\tchecks with either a return code of 0 (Daemon Stopped) or return code \n\t\tnot equal to 0 (Daemon Running)`)\n\n\tsilent = flag.Bool(\"silent\", false, \"Don't write in stdout\")\n\n\ttest = flag.Bool(\"t\", false,\n\t\t`Run syntax tests for configuration files only. The program \n\t\timmediately exits after these syntax parsing tests with either \n\t\ta return code of 0 (Syntax OK) or return code not equal to 0 \n\t\t(Syntax Error)`)\n\n\tconfigFileName = flag.String(\"f\", \"dmn.conf\",\n\t\t`Specifies the name of the configuration file. The default is dmn.conf. \n\t\tDaemon refuses to start if there is no configuration file.`)\n)\n\nvar confProv = make(chan Config, 8)\n\nfunc main() {\n\tflag.Parse()\n\n\tsetupLogging()\n\n\tconf, err := loadConfig(*configFileName)\n\tif err != nil {\n\t\tlog.Println(\"Config error:\", err)\n\t\tos.Exit(ret_CONFERROR)\n\t}\n\tif *test {\n\t\tos.Exit(ret_OK)\n\t}\n\n\tpidf := lockPidFile()\n\terr = daemon.Reborn(027, \".\/\")\n\tif err != nil {\n\t\tlog.Println(\"Reborn error:\", err)\n\t\tos.Exit(ret_REBORNERROR)\n\t}\n\n\tconfProv <- conf\n\tgo watchdog(confProv)\n\n\tserveSignals()\n\n\tpidf.Unlock()\n}\n\nfunc setupLogging() {\n\tif daemon.WasReborn() {\n\t\tfile, _ := os.OpenFile(logFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, fileMask)\n\t\tdaemon.RedirectStream(os.Stdout, file)\n\t\tdaemon.RedirectStream(os.Stderr, file)\n\t\tfile.Close()\n\t\tlog.Println(\"--- log ---\")\n\t} else {\n\t\tlog.SetFlags(0)\n\t\tif *silent {\n\t\t\tfile, _ := os.OpenFile(os.DevNull, os.O_WRONLY, fileMask)\n\t\t\tdaemon.RedirectStream(os.Stdout, file)\n\t\t\tdaemon.RedirectStream(os.Stderr, file)\n\t\t\tfile.Close()\n\t\t}\n\t}\n}\n\ntype Config []string\n\nfunc loadConfig(path string) (config Config, err error) {\n\tvar file *os.File\n\tfile, err = os.OpenFile(path, os.O_RDONLY, 0700)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\tconfig = make([]string, 0)\n\terr = json.NewDecoder(file).Decode(&config)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, path = range config {\n\t\tif _, err = os.Stat(path); os.IsNotExist(err) {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc lockPidFile() *daemon.PidFile {\n\tpidf, err := daemon.LockPidFile(pidFileName, fileMask)\n\tif err != nil {\n\t\tif err == daemon.ErrWouldBlock {\n\t\t\tlog.Println(\"daemon copy is already running\")\n\t\t\tos.Exit(ret_ALREADYRUN)\n\t\t} else {\n\t\t\tlog.Println(\"pid file creation error:\", err)\n\t\t\tos.Exit(ret_PIDFERROR)\n\t\t}\n\t}\n\n\tif !daemon.WasReborn() {\n\t\tpidf.Unlock()\n\t}\n\n\tif *status {\n\t\tos.Exit(ret_OK)\n\t}\n\n\treturn pidf\n}\n\nfunc watchdog(confProv <-chan Config) {\n\tstates := make(map[string]time.Time)\n\tconf := <-confProv\n\tfor {\n\t\tselect {\n\t\tcase conf = <-confProv:\n\t\tdefault:\n\t\t}\n\n\t\tfor _, path := range conf {\n\t\t\tfi, err := os.Stat(path)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcur := fi.ModTime()\n\t\t\tif pre, exists := states[path]; exists {\n\t\t\t\tif pre != cur {\n\t\t\t\t\tlog.Printf(\"file %s modified at %s\", path, cur)\n\t\t\t\t}\n\t\t\t}\n\t\t\tstates[path] = cur\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n}\n\nfunc serveSignals() {\n\tdaemon.SetHandler(termHandler, syscall.SIGTERM, syscall.SIGKILL)\n\tdaemon.SetHandler(hupHandler, syscall.SIGHUP)\n\n\terr := daemon.ServeSignals()\n\tif err != nil {\n\t\tlog.Println(\"Error:\", err)\n\t}\n\n\tlog.Println(\"--- end ---\")\n}\n\nfunc termHandler(sig os.Signal) error {\n\tlog.Println(\"SIGTERM:\", sig)\n\treturn daemon.ErrStop\n}\n\nfunc hupHandler(sig os.Signal) error {\n\tlog.Println(\"SIGHUP:\", sig)\n\n\tconf, err := loadConfig(*configFileName)\n\tif err != nil {\n\t\tlog.Println(\"Config error:\", err)\n\t} else {\n\t\tconfProv <- conf\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Lars Wiegman. All rights reserved. Use of this source code is\n\/\/ governed by a BSD-style license that can be found in the LICENSE file.\n\npackage mock\n\n\/\/ UserService represents a mock implementation of multipass.UserService.\ntype UserService struct {\n\tRegisterFn func(handle string) error\n\tRegisterInvoked bool\n\n\tListedFn func(handle string) error\n\tListedInvoked bool\n\n\tAuthorizedFn func(handle, rawurl string) bool\n\tAuthorizedInvoked bool\n\n\tNotify func(handle, loginurl string) error\n\tNotifyInvoked bool\n\n\tCloseFn func() error\n\tCloseInvoked bool\n}\n\n\/\/ Register invokes the mock implementation and marks the function as invoked.\nfunc (s *UserService) Register(handle string) error {\n\ts.RegisterInvoked = true\n\treturn s.RegisterFn(handle)\n}\n\n\/\/ Listed invokes the mock implementation and marks the function as invoked.\nfunc (s *UserService) Listed(handle string) bool {\n\ts.ListedInvoked = true\n\treturn s.ListedFn(handle)\n}\n\n\/\/ Notify invokes the mock implementation and marks the function as invoked.\nfunc (s *UserService) Notify(handle, loginurl string) error {\n\ts.NotifyInvoked = true\n\treturn s.NotifyFn(handle)\n}\n\n\/\/ Authorized invokes the mock implementation and marks the function as invoked.\nfunc (s *UserService) Authorized(handle, rawurl string) bool {\n\ts.AuthorizedInvoked = true\n\treturn s.AuthorizedFn(handle, rawurl)\n}\n\n\/\/ Close invokes the mock implementation and marks the function as invoked.\nfunc (s *UserService) Close() error {\n\ts.CloseInvoked = true\n\treturn s.CloseFn(handle)\n}\n<commit_msg>Fix methods of mock.UserService<commit_after>\/\/ Copyright 2016 Lars Wiegman. All rights reserved. Use of this source code is\n\/\/ governed by a BSD-style license that can be found in the LICENSE file.\n\npackage mock\n\n\/\/ UserService represents a mock implementation of multipass.UserService.\ntype UserService struct {\n\tRegisterFn func(handle string) error\n\tRegisterInvoked bool\n\n\tListedFn func(handle string) bool\n\tListedInvoked bool\n\n\tAuthorizedFn func(handle, rawurl string) bool\n\tAuthorizedInvoked bool\n\n\tNotifyFn func(handle, loginurl string) error\n\tNotifyInvoked bool\n\n\tCloseFn func() error\n\tCloseInvoked bool\n}\n\n\/\/ Register invokes the mock implementation and marks the function as invoked.\nfunc (s *UserService) Register(handle string) error {\n\ts.RegisterInvoked = true\n\treturn s.RegisterFn(handle)\n}\n\n\/\/ Listed invokes the mock implementation and marks the function as invoked.\nfunc (s *UserService) Listed(handle string) bool {\n\ts.ListedInvoked = true\n\treturn s.ListedFn(handle)\n}\n\n\/\/ Notify invokes the mock implementation and marks the function as invoked.\nfunc (s *UserService) Notify(handle, loginurl string) error {\n\ts.NotifyInvoked = true\n\treturn s.NotifyFn(handle, loginurl)\n}\n\n\/\/ Authorized invokes the mock implementation and marks the function as invoked.\nfunc (s *UserService) Authorized(handle, rawurl string) bool {\n\ts.AuthorizedInvoked = true\n\treturn s.AuthorizedFn(handle, rawurl)\n}\n\n\/\/ Close invokes the mock implementation and marks the function as invoked.\nfunc (s *UserService) Close() error {\n\ts.CloseInvoked = true\n\treturn s.CloseFn()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2016 Aaron Longwell\n\/\/\n\/\/ Use of this source code is governed by an MIT licese.\n\/\/ Details in the LICENSE file.\n\npackage trello\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\n\/\/ Board represents a Trello Board.\n\/\/ https:\/\/developers.trello.com\/reference\/#boardsid\ntype Board struct {\n\tclient *Client\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDesc string `json:\"desc\"`\n\tClosed bool `json:\"closed\"`\n\tIdOrganization string `json:\"idOrganization\"`\n\tPinned bool `json:\"pinned\"`\n\tUrl string `json:\"url\"`\n\tShortUrl string `json:\"shortUrl\"`\n\tPrefs struct {\n\t\tPermissionLevel string `json:\"permissionLevel\"`\n\t\tVoting string `json:\"voting\"`\n\t\tComments string `json:\"comments\"`\n\t\tInvitations string `json:\"invitations\"`\n\t\tSelfJoin bool `json:\"selfjoin\"`\n\t\tCardCovers bool `json:\"cardCovers\"`\n\t\tCardAging string `json:\"cardAging\"`\n\t\tCalendarFeedEnabled bool `json:\"calendarFeedEnabled\"`\n\t\tBackground string `json:\"background\"`\n\t\tBackgroundColor string `json:\"backgroundColor\"`\n\t\tBackgroundImage string `json:\"backgroundImage\"`\n\t\tBackgroundImageScaled []BackgroundImage `json:\"backgroundImageScaled\"`\n\t\tBackgroundTile bool `json:\"backgroundTile\"`\n\t\tBackgroundBrightness string `json:\"backgroundBrightness\"`\n\t\tCanBePublic bool `json:\"canBePublic\"`\n\t\tCanBeOrg bool `json:\"canBeOrg\"`\n\t\tCanBePrivate bool `json:\"canBePrivate\"`\n\t\tCanInvite bool `json:\"canInvite\"`\n\t} `json:\"prefs\"`\n\tLabelNames struct {\n\t\tBlack string `json:\"black,omitempty\"`\n\t\tBlue string `json:\"blue,omitempty\"`\n\t\tGreen string `json:\"green,omitempty\"`\n\t\tLime string `json:\"lime,omitempty\"`\n\t\tOrange string `json:\"orange,omitempty\"`\n\t\tPink string `json:\"pink,omitempty\"`\n\t\tPurple string `json:\"purple,omitempty\"`\n\t\tRed string `json:\"red,omitempty\"`\n\t\tSky string `json:\"sky,omitempty\"`\n\t\tYellow string `json:\"yellow,omitempty\"`\n\t} `json:\"labelNames\"`\n\tLists []*List `json:\"lists\"`\n\tActions []*Action `json:\"actions\"`\n\tOrganization Organization `json:\"organization\"`\n}\n\n\/\/ NewBoard is a constructor that sets the default values\n\/\/ for Prefs.SelfJoin and Prefs.CardCovers also set by the API.\nfunc NewBoard(name string) Board {\n\tb := Board{Name: name}\n\n\t\/\/ default values in line with API POST\n\tb.Prefs.SelfJoin = true\n\tb.Prefs.CardCovers = true\n\n\treturn b\n}\n\ntype BackgroundImage struct {\n\tWidth int `json:\"width\"`\n\tHeight int `json:\"height\"`\n\tURL string `json:\"url\"`\n}\n\nfunc (b *Board) CreatedAt() time.Time {\n\tt, _ := IDToTime(b.ID)\n\treturn t\n}\n\n\/**\n * Board retrieves a Trello board by its ID.\n *\/\nfunc (c *Client) GetBoard(boardID string, args Arguments) (board *Board, err error) {\n\tpath := fmt.Sprintf(\"boards\/%s\", boardID)\n\terr = c.Get(path, args, &board)\n\tif board != nil {\n\t\tboard.client = c\n\t}\n\treturn\n}\n\nfunc (m *Member) GetBoards(args Arguments) (boards []*Board, err error) {\n\tpath := fmt.Sprintf(\"members\/%s\/boards\", m.ID)\n\terr = m.client.Get(path, args, &boards)\n\tfor i := range boards {\n\t\tboards[i].client = m.client\n\t}\n\treturn\n}\n<commit_msg>Add create method for Board for talking to POST endpoint<commit_after>\/\/ Copyright © 2016 Aaron Longwell\n\/\/\n\/\/ Use of this source code is governed by an MIT licese.\n\/\/ Details in the LICENSE file.\n\npackage trello\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\n\/\/ Board represents a Trello Board.\n\/\/ https:\/\/developers.trello.com\/reference\/#boardsid\ntype Board struct {\n\tclient *Client\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDesc string `json:\"desc\"`\n\tClosed bool `json:\"closed\"`\n\tIdOrganization string `json:\"idOrganization\"`\n\tPinned bool `json:\"pinned\"`\n\tUrl string `json:\"url\"`\n\tShortUrl string `json:\"shortUrl\"`\n\tPrefs struct {\n\t\tPermissionLevel string `json:\"permissionLevel\"`\n\t\tVoting string `json:\"voting\"`\n\t\tComments string `json:\"comments\"`\n\t\tInvitations string `json:\"invitations\"`\n\t\tSelfJoin bool `json:\"selfjoin\"`\n\t\tCardCovers bool `json:\"cardCovers\"`\n\t\tCardAging string `json:\"cardAging\"`\n\t\tCalendarFeedEnabled bool `json:\"calendarFeedEnabled\"`\n\t\tBackground string `json:\"background\"`\n\t\tBackgroundColor string `json:\"backgroundColor\"`\n\t\tBackgroundImage string `json:\"backgroundImage\"`\n\t\tBackgroundImageScaled []BackgroundImage `json:\"backgroundImageScaled\"`\n\t\tBackgroundTile bool `json:\"backgroundTile\"`\n\t\tBackgroundBrightness string `json:\"backgroundBrightness\"`\n\t\tCanBePublic bool `json:\"canBePublic\"`\n\t\tCanBeOrg bool `json:\"canBeOrg\"`\n\t\tCanBePrivate bool `json:\"canBePrivate\"`\n\t\tCanInvite bool `json:\"canInvite\"`\n\t} `json:\"prefs\"`\n\tLabelNames struct {\n\t\tBlack string `json:\"black,omitempty\"`\n\t\tBlue string `json:\"blue,omitempty\"`\n\t\tGreen string `json:\"green,omitempty\"`\n\t\tLime string `json:\"lime,omitempty\"`\n\t\tOrange string `json:\"orange,omitempty\"`\n\t\tPink string `json:\"pink,omitempty\"`\n\t\tPurple string `json:\"purple,omitempty\"`\n\t\tRed string `json:\"red,omitempty\"`\n\t\tSky string `json:\"sky,omitempty\"`\n\t\tYellow string `json:\"yellow,omitempty\"`\n\t} `json:\"labelNames\"`\n\tLists []*List `json:\"lists\"`\n\tActions []*Action `json:\"actions\"`\n\tOrganization Organization `json:\"organization\"`\n}\n\n\/\/ NewBoard is a constructor that sets the default values\n\/\/ for Prefs.SelfJoin and Prefs.CardCovers also set by the API.\nfunc NewBoard(name string) Board {\n\tb := Board{Name: name}\n\n\t\/\/ default values in line with API POST\n\tb.Prefs.SelfJoin = true\n\tb.Prefs.CardCovers = true\n\n\treturn b\n}\n\ntype BackgroundImage struct {\n\tWidth int `json:\"width\"`\n\tHeight int `json:\"height\"`\n\tURL string `json:\"url\"`\n}\n\nfunc (b *Board) CreatedAt() time.Time {\n\tt, _ := IDToTime(b.ID)\n\treturn t\n}\n\n\/\/ CreateBoard creates a board remote.\n\/\/ Attribute currently supported as exra argument: powerUps.\n\/\/ Attributes currently known to be unsupported: idBoardSource, keepFromSource.\n\/\/\n\/\/ API Docs: https:\/\/developers.trello.com\/reference\/#boardsid\nfunc (c *Client) CreateBoard(board *Board, extraArgs Arguments) error {\n\tpath := \"boards\"\n\targs := Arguments{\n\t\t\"desc\": board.Desc,\n\t\t\"name\": board.Name,\n\t\t\"prefs_selfJoin\": fmt.Sprintf(\"%t\", board.Prefs.SelfJoin),\n\t\t\"prefs_cardCovers\": fmt.Sprintf(\"%t\", board.Prefs.CardCovers),\n\t\t\"idOrganization\": board.IdOrganization,\n\t}\n\n\tif board.Prefs.Voting != \"\" {\n\t\targs[\"prefs_voting\"] = board.Prefs.Voting\n\t}\n\tif board.Prefs.PermissionLevel != \"\" {\n\t\targs[\"prefs_permissionLevel\"] = board.Prefs.PermissionLevel\n\t}\n\tif board.Prefs.Comments != \"\" {\n\t\targs[\"prefs_comments\"] = board.Prefs.Comments\n\t}\n\tif board.Prefs.Invitations != \"\" {\n\t\targs[\"prefs_invitations\"] = board.Prefs.Invitations\n\t}\n\tif board.Prefs.Background != \"\" {\n\t\targs[\"prefs_background\"] = board.Prefs.Background\n\t}\n\tif board.Prefs.CardAging != \"\" {\n\t\targs[\"prefs_cardAging\"] = board.Prefs.CardAging\n\t}\n\n\t\/\/ Expects one of \"all\", \"calendar\", \"cardAging\", \"recap\", or \"voting\".\n\tif powerUps, ok := extraArgs[\"powerUps\"]; ok {\n\t\targs[\"powerUps\"] = powerUps\n\t}\n\n\terr := c.Post(path, args, &board)\n\tif err == nil {\n\t\tboard.client = c\n\t}\n\treturn err\n}\n\/**\n * Board retrieves a Trello board by its ID.\n *\/\nfunc (c *Client) GetBoard(boardID string, args Arguments) (board *Board, err error) {\n\tpath := fmt.Sprintf(\"boards\/%s\", boardID)\n\terr = c.Get(path, args, &board)\n\tif board != nil {\n\t\tboard.client = c\n\t}\n\treturn\n}\n\nfunc (m *Member) GetBoards(args Arguments) (boards []*Board, err error) {\n\tpath := fmt.Sprintf(\"members\/%s\/boards\", m.ID)\n\terr = m.client.Get(path, args, &boards)\n\tfor i := range boards {\n\t\tboards[i].client = m.client\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package filelist_test\n\nimport (\n\t\"..\/filelist\"\n\t\"log\"\n)\n\nfunc main() {\n\tlog.Fatalf(\"filelist_test.go not implemented\")\n\trootDir := \"..\/filelist\"\n\tdirContents, err := filelist.GetDirectoryListing(rootDir)\n\tif err != nil {\n\t\tlog.Fatalf(\"GetDirectoryListing() threw err %v\\n\", err)\n\t}\n\tlog.Printf(\"dirContents: %v\\n\", dirContents)\n}\n<commit_msg>Initial Sketch<commit_after>package filelist_test\n\nimport (\n\t\"..\/filelist\"\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc indexOf(target string, arrayOfStrings []string) bool {\n\tfor _, str := range arrayOfStrings {\n\t\tif target == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc TestGetDirectoryListing(t *testing.T) {\n\texpectedDirContents := []string{\n\t\t\"..\/filelist\",\n\t\t\"..\/filelist\/filelist.go\",\n\t\t\"..\/filelist\/filelist_test.go\",\n\t}\n\trootDir := \"..\/filelist\"\n\tdirContents, err := filelist.GetDirectoryListing(rootDir)\n\tif err != nil {\n\t\tt.Error(\"GetDirectoryListing() threw err %v\\n\", err)\n\t}\n\tfmt.Printf(\"DEBUG %v\\n\", dirContents)\n\tif len(dirContents) != 3 {\n\t\tt.Error(\"unexpected directory results: %v\\n\", dirContents)\n\t}\n\n\tfor _, target := range expectedDirContents {\n\t\tif indexOf(target, dirContents) == false {\n\t\t\tt.Error(\"Could not find %s in %v\\n\", target, dirContents)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package importer defines the Importer, which loads, parses and\n\/\/ type-checks packages of Go code plus their transitive closure, and\n\/\/ retains both the ASTs and the derived facts.\n\/\/\n\/\/ TODO(adonovan): document and test this package better, with examples.\n\/\/ Currently it's covered by the ssa\/ tests.\n\/\/\npackage importer\n\nimport (\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/token\"\n\t\"os\"\n\n\t\"code.google.com\/p\/go.tools\/go\/exact\"\n\t\"code.google.com\/p\/go.tools\/go\/types\"\n)\n\n\/\/ An Importer's methods are not thread-safe.\ntype Importer struct {\n\tconfig *Config \/\/ the client configuration\n\tFset *token.FileSet \/\/ position info for all files seen\n\tPackages map[string]*PackageInfo \/\/ keys are import paths\n\terrors map[string]error \/\/ cache of errors by import path\n}\n\n\/\/ Config specifies the configuration for the importer.\n\/\/\ntype Config struct {\n\t\/\/ TypeChecker contains options relating to the type checker.\n\t\/\/ The Importer will override any user-supplied values for its\n\t\/\/ Error and Import fields; other fields will be passed\n\t\/\/ through to the type checker.\n\tTypeChecker types.Config\n\n\t\/\/ If Loader is non-nil, it is used to satisfy imports.\n\t\/\/\n\t\/\/ If it is nil, binary object files produced by the gc\n\t\/\/ compiler will be loaded instead of source code for all\n\t\/\/ imported packages. Such files supply only the types of\n\t\/\/ package-level declarations and values of constants, but no\n\t\/\/ code, so this mode will not yield a whole program. It is\n\t\/\/ intended for analyses that perform intraprocedural analysis\n\t\/\/ of a single package.\n\tLoader SourceLoader\n}\n\n\/\/ SourceLoader is the signature of a function that locates, reads and\n\/\/ parses a set of source files given an import path.\n\/\/\n\/\/ fset is the fileset to which the ASTs should be added.\n\/\/ path is the imported path, e.g. \"sync\/atomic\".\n\/\/\n\/\/ On success, the function returns files, the set of ASTs produced,\n\/\/ or the first error encountered.\n\/\/\n\/\/ The MakeGoBuildLoader utility can be used to construct a\n\/\/ SourceLoader based on go\/build.\n\/\/\ntype SourceLoader func(fset *token.FileSet, path string) (files []*ast.File, err error)\n\n\/\/ New returns a new, empty Importer using configuration options\n\/\/ specified by config.\n\/\/\nfunc New(config *Config) *Importer {\n\timp := &Importer{\n\t\tconfig: config,\n\t\tFset: token.NewFileSet(),\n\t\tPackages: make(map[string]*PackageInfo),\n\t\terrors: make(map[string]error),\n\t}\n\timp.config.TypeChecker.Error = func(e error) { fmt.Fprintln(os.Stderr, e) }\n\timp.config.TypeChecker.Import = imp.doImport\n\treturn imp\n}\n\n\/\/ doImport loads the typechecker package identified by path\n\/\/ Implements the types.Importer prototype.\n\/\/\nfunc (imp *Importer) doImport(imports map[string]*types.Package, path string) (pkg *types.Package, err error) {\n\t\/\/ Package unsafe is handled specially, and has no PackageInfo.\n\tif path == \"unsafe\" {\n\t\treturn types.Unsafe, nil\n\t}\n\n\t\/\/ Load the source\/binary for 'path', type-check it, construct\n\t\/\/ a PackageInfo and update our map (imp.Packages) and the\n\t\/\/ type-checker's map (imports).\n\tvar info *PackageInfo\n\tif imp.config.Loader != nil {\n\t\tinfo, err = imp.LoadPackage(path)\n\t} else {\n\t\tif info, ok := imp.Packages[path]; ok {\n\t\t\timports[path] = info.Pkg\n\t\t\tpkg = info.Pkg\n\t\t\treturn \/\/ positive cache hit\n\t\t}\n\n\t\tif err = imp.errors[path]; err != nil {\n\t\t\treturn \/\/ negative cache hit\n\t\t}\n\n\t\tif pkg, err = types.GcImport(imports, path); err == nil {\n\t\t\tinfo = &PackageInfo{Pkg: pkg}\n\t\t\timp.Packages[path] = info\n\t\t}\n\t}\n\n\tif err == nil {\n\t\t\/\/ Cache success.\n\t\tpkg = info.Pkg\n\t\timports[path] = pkg\n\t\treturn pkg, nil\n\t}\n\n\t\/\/ Cache failure\n\timp.errors[path] = err\n\treturn nil, err\n}\n\n\/\/ LoadPackage loads the package of the specified import-path,\n\/\/ performs type-checking, and returns the corresponding\n\/\/ PackageInfo.\n\/\/\n\/\/ Not idempotent!\n\/\/ Precondition: Importer.config.Loader != nil.\n\/\/ TODO(adonovan): fix: violated in call from CreatePackageFromArgs!\n\/\/ Not thread-safe!\n\/\/ TODO(adonovan): rethink this API.\n\/\/\nfunc (imp *Importer) LoadPackage(importPath string) (*PackageInfo, error) {\n\tif info, ok := imp.Packages[importPath]; ok {\n\t\treturn info, nil \/\/ positive cache hit\n\t}\n\n\tif err := imp.errors[importPath]; err != nil {\n\t\treturn nil, err \/\/ negative cache hit\n\t}\n\n\tif imp.config.Loader == nil {\n\t\tpanic(\"Importer.LoadPackage without a SourceLoader\")\n\t}\n\tfiles, err := imp.config.Loader(imp.Fset, importPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinfo := imp.CreateSourcePackage(importPath, files)\n\tif info.Err != nil {\n\t\treturn nil, info.Err\n\t}\n\treturn info, nil\n}\n\n\/\/ CreateSourcePackage invokes the type-checker on files and returns a\n\/\/ PackageInfo containing the resulting type-checker package, the\n\/\/ ASTs, and other type information.\n\/\/\n\/\/ The order of files determines the package initialization order.\n\/\/\n\/\/ importPath is the full name under which this package is known, such\n\/\/ as appears in an import declaration. e.g. \"sync\/atomic\".\n\/\/\n\/\/ The ParseFiles utility may be helpful for parsing a set of Go\n\/\/ source files.\n\/\/\n\/\/ The result is always non-nil; the presence of errors is indicated\n\/\/ by the PackageInfo.Err field.\n\/\/\nfunc (imp *Importer) CreateSourcePackage(importPath string, files []*ast.File) *PackageInfo {\n\tpkgInfo := &PackageInfo{\n\t\tFiles: files,\n\t\tInfo: types.Info{\n\t\t\tTypes: make(map[ast.Expr]types.Type),\n\t\t\tValues: make(map[ast.Expr]exact.Value),\n\t\t\tObjects: make(map[*ast.Ident]types.Object),\n\t\t\tImplicits: make(map[ast.Node]types.Object),\n\t\t\tSelections: make(map[*ast.SelectorExpr]*types.Selection),\n\t\t},\n\t}\n\tpkgInfo.Pkg, pkgInfo.Err = imp.config.TypeChecker.Check(importPath, imp.Fset, files, &pkgInfo.Info)\n\timp.Packages[importPath] = pkgInfo\n\treturn pkgInfo\n}\n<commit_msg>go.tools\/importer: retain scope information.<commit_after>\/\/ Package importer defines the Importer, which loads, parses and\n\/\/ type-checks packages of Go code plus their transitive closure, and\n\/\/ retains both the ASTs and the derived facts.\n\/\/\n\/\/ TODO(adonovan): document and test this package better, with examples.\n\/\/ Currently it's covered by the ssa\/ tests.\n\/\/\npackage importer\n\nimport (\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/token\"\n\t\"os\"\n\n\t\"code.google.com\/p\/go.tools\/go\/exact\"\n\t\"code.google.com\/p\/go.tools\/go\/types\"\n)\n\n\/\/ An Importer's methods are not thread-safe.\ntype Importer struct {\n\tconfig *Config \/\/ the client configuration\n\tFset *token.FileSet \/\/ position info for all files seen\n\tPackages map[string]*PackageInfo \/\/ keys are import paths\n\terrors map[string]error \/\/ cache of errors by import path\n}\n\n\/\/ Config specifies the configuration for the importer.\n\/\/\ntype Config struct {\n\t\/\/ TypeChecker contains options relating to the type checker.\n\t\/\/ The Importer will override any user-supplied values for its\n\t\/\/ Error and Import fields; other fields will be passed\n\t\/\/ through to the type checker.\n\tTypeChecker types.Config\n\n\t\/\/ If Loader is non-nil, it is used to satisfy imports.\n\t\/\/\n\t\/\/ If it is nil, binary object files produced by the gc\n\t\/\/ compiler will be loaded instead of source code for all\n\t\/\/ imported packages. Such files supply only the types of\n\t\/\/ package-level declarations and values of constants, but no\n\t\/\/ code, so this mode will not yield a whole program. It is\n\t\/\/ intended for analyses that perform intraprocedural analysis\n\t\/\/ of a single package.\n\tLoader SourceLoader\n}\n\n\/\/ SourceLoader is the signature of a function that locates, reads and\n\/\/ parses a set of source files given an import path.\n\/\/\n\/\/ fset is the fileset to which the ASTs should be added.\n\/\/ path is the imported path, e.g. \"sync\/atomic\".\n\/\/\n\/\/ On success, the function returns files, the set of ASTs produced,\n\/\/ or the first error encountered.\n\/\/\n\/\/ The MakeGoBuildLoader utility can be used to construct a\n\/\/ SourceLoader based on go\/build.\n\/\/\ntype SourceLoader func(fset *token.FileSet, path string) (files []*ast.File, err error)\n\n\/\/ New returns a new, empty Importer using configuration options\n\/\/ specified by config.\n\/\/\nfunc New(config *Config) *Importer {\n\timp := &Importer{\n\t\tconfig: config,\n\t\tFset: token.NewFileSet(),\n\t\tPackages: make(map[string]*PackageInfo),\n\t\terrors: make(map[string]error),\n\t}\n\timp.config.TypeChecker.Error = func(e error) { fmt.Fprintln(os.Stderr, e) }\n\timp.config.TypeChecker.Import = imp.doImport\n\treturn imp\n}\n\n\/\/ doImport loads the typechecker package identified by path\n\/\/ Implements the types.Importer prototype.\n\/\/\nfunc (imp *Importer) doImport(imports map[string]*types.Package, path string) (pkg *types.Package, err error) {\n\t\/\/ Package unsafe is handled specially, and has no PackageInfo.\n\tif path == \"unsafe\" {\n\t\treturn types.Unsafe, nil\n\t}\n\n\t\/\/ Load the source\/binary for 'path', type-check it, construct\n\t\/\/ a PackageInfo and update our map (imp.Packages) and the\n\t\/\/ type-checker's map (imports).\n\tvar info *PackageInfo\n\tif imp.config.Loader != nil {\n\t\tinfo, err = imp.LoadPackage(path)\n\t} else {\n\t\tif info, ok := imp.Packages[path]; ok {\n\t\t\timports[path] = info.Pkg\n\t\t\tpkg = info.Pkg\n\t\t\treturn \/\/ positive cache hit\n\t\t}\n\n\t\tif err = imp.errors[path]; err != nil {\n\t\t\treturn \/\/ negative cache hit\n\t\t}\n\n\t\tif pkg, err = types.GcImport(imports, path); err == nil {\n\t\t\tinfo = &PackageInfo{Pkg: pkg}\n\t\t\timp.Packages[path] = info\n\t\t}\n\t}\n\n\tif err == nil {\n\t\t\/\/ Cache success.\n\t\tpkg = info.Pkg\n\t\timports[path] = pkg\n\t\treturn pkg, nil\n\t}\n\n\t\/\/ Cache failure\n\timp.errors[path] = err\n\treturn nil, err\n}\n\n\/\/ LoadPackage loads the package of the specified import-path,\n\/\/ performs type-checking, and returns the corresponding\n\/\/ PackageInfo.\n\/\/\n\/\/ Not idempotent!\n\/\/ Precondition: Importer.config.Loader != nil.\n\/\/ TODO(adonovan): fix: violated in call from CreatePackageFromArgs!\n\/\/ Not thread-safe!\n\/\/ TODO(adonovan): rethink this API.\n\/\/\nfunc (imp *Importer) LoadPackage(importPath string) (*PackageInfo, error) {\n\tif info, ok := imp.Packages[importPath]; ok {\n\t\treturn info, nil \/\/ positive cache hit\n\t}\n\n\tif err := imp.errors[importPath]; err != nil {\n\t\treturn nil, err \/\/ negative cache hit\n\t}\n\n\tif imp.config.Loader == nil {\n\t\tpanic(\"Importer.LoadPackage without a SourceLoader\")\n\t}\n\tfiles, err := imp.config.Loader(imp.Fset, importPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinfo := imp.CreateSourcePackage(importPath, files)\n\tif info.Err != nil {\n\t\treturn nil, info.Err\n\t}\n\treturn info, nil\n}\n\n\/\/ CreateSourcePackage invokes the type-checker on files and returns a\n\/\/ PackageInfo containing the resulting type-checker package, the\n\/\/ ASTs, and other type information.\n\/\/\n\/\/ The order of files determines the package initialization order.\n\/\/\n\/\/ importPath is the full name under which this package is known, such\n\/\/ as appears in an import declaration. e.g. \"sync\/atomic\".\n\/\/\n\/\/ The ParseFiles utility may be helpful for parsing a set of Go\n\/\/ source files.\n\/\/\n\/\/ The result is always non-nil; the presence of errors is indicated\n\/\/ by the PackageInfo.Err field.\n\/\/\nfunc (imp *Importer) CreateSourcePackage(importPath string, files []*ast.File) *PackageInfo {\n\tpkgInfo := &PackageInfo{\n\t\tFiles: files,\n\t\tInfo: types.Info{\n\t\t\tTypes: make(map[ast.Expr]types.Type),\n\t\t\tValues: make(map[ast.Expr]exact.Value),\n\t\t\tObjects: make(map[*ast.Ident]types.Object),\n\t\t\tImplicits: make(map[ast.Node]types.Object),\n\t\t\tScopes: make(map[ast.Node]*types.Scope),\n\t\t\tSelections: make(map[*ast.SelectorExpr]*types.Selection),\n\t\t},\n\t}\n\tpkgInfo.Pkg, pkgInfo.Err = imp.config.TypeChecker.Check(importPath, imp.Fset, files, &pkgInfo.Info)\n\timp.Packages[importPath] = pkgInfo\n\treturn pkgInfo\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build windows\n\npackage windows\n\nimport (\n\t\"github.com\/mackerelio\/mackerel-agent\/logging\"\n\t\"github.com\/mackerelio\/mackerel-agent\/metrics\"\n\t\"github.com\/mackerelio\/mackerel-agent\/util\/windows\"\n\t\"unsafe\"\n)\n\n\/\/ MemoryGenerator XXX\ntype MemoryGenerator struct {\n}\n\nvar memoryLogger = logging.GetLogger(\"metrics.memory\")\n\n\/\/ NewMemoryGenerator XXX\nfunc NewMemoryGenerator() (*MemoryGenerator, error) {\n\treturn &MemoryGenerator{}, nil\n}\n\n\/\/ Generate XXX\nfunc (g *MemoryGenerator) Generate() (metrics.Values, error) {\n\tret := make(map[string]float64)\n\n\tvar memoryStatusEx windows.MEMORY_STATUS_EX\n\tmemoryStatusEx.Length = uint32(unsafe.Sizeof(memoryStatusEx))\n\tr, _, err := windows.GlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memoryStatusEx)))\n\tif r == 0 {\n\t\treturn nil, err\n\t}\n\n\tfree := float64(memoryStatusEx.AvailPhys) \/ 1024 * 1000\n\ttotal := float64(memoryStatusEx.TotalPhys) \/ 1024 * 1000\n\tret[\"memory.free\"] = free\n\tret[\"memory.total\"] = total\n\tret[\"memory.used\"] = total - free\n\tret[\"memory.swap_total\"] = float64(memoryStatusEx.TotalVirtual) \/ 1024\n\tret[\"memory.swap_free\"] = float64(memoryStatusEx.AvailVirtual) \/ 1024\n\n\tmemoryLogger.Debugf(\"memory : %s\", ret)\n\treturn metrics.Values(ret), nil\n}\n<commit_msg>missed unit conversion.<commit_after>\/\/ +build windows\n\npackage windows\n\nimport (\n\t\"github.com\/mackerelio\/mackerel-agent\/logging\"\n\t\"github.com\/mackerelio\/mackerel-agent\/metrics\"\n\t\"github.com\/mackerelio\/mackerel-agent\/util\/windows\"\n\t\"unsafe\"\n)\n\n\/\/ MemoryGenerator XXX\ntype MemoryGenerator struct {\n}\n\nvar memoryLogger = logging.GetLogger(\"metrics.memory\")\n\n\/\/ NewMemoryGenerator XXX\nfunc NewMemoryGenerator() (*MemoryGenerator, error) {\n\treturn &MemoryGenerator{}, nil\n}\n\n\/\/ Generate XXX\nfunc (g *MemoryGenerator) Generate() (metrics.Values, error) {\n\tret := make(map[string]float64)\n\n\tvar memoryStatusEx windows.MEMORY_STATUS_EX\n\tmemoryStatusEx.Length = uint32(unsafe.Sizeof(memoryStatusEx))\n\tr, _, err := windows.GlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memoryStatusEx)))\n\tif r == 0 {\n\t\treturn nil, err\n\t}\n\n\tfree := float64(memoryStatusEx.AvailPhys) \/ 1024 * 1024\n\ttotal := float64(memoryStatusEx.TotalPhys) \/ 1024 * 1024\n\tret[\"memory.free\"] = free\n\tret[\"memory.total\"] = total\n\tret[\"memory.used\"] = total - free\n\tret[\"memory.swap_total\"] = float64(memoryStatusEx.TotalVirtual) \/ 1024\n\tret[\"memory.swap_free\"] = float64(memoryStatusEx.AvailVirtual) \/ 1024\n\n\tmemoryLogger.Debugf(\"memory : %s\", ret)\n\treturn metrics.Values(ret), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2015 The Syncthing Authors.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at https:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage model\n\nimport (\n\t\"github.com\/syncthing\/syncthing\/lib\/protocol\"\n\t\"github.com\/syncthing\/syncthing\/lib\/sync\"\n)\n\n\/\/ deviceFolderFileDownloadState holds current download state of a file that\n\/\/ a remote device has advertised. blockIndexes represends indexes within\n\/\/ FileInfo.Blocks that the remote device already has, and version represents\n\/\/ the version of the file that the remote device is downloading.\ntype deviceFolderFileDownloadState struct {\n\tblockIndexes []int32\n\tversion protocol.Vector\n\tblockSize int\n}\n\n\/\/ deviceFolderDownloadState holds current download state of all files that\n\/\/ a remote device is currently downloading in a specific folder.\ntype deviceFolderDownloadState struct {\n\tmut sync.RWMutex\n\tfiles map[string]deviceFolderFileDownloadState\n}\n\n\/\/ Has returns whether a block at that specific index, and that specific version of the file\n\/\/ is currently available on the remote device for pulling from a temporary file.\nfunc (p *deviceFolderDownloadState) Has(file string, version protocol.Vector, index int32) bool {\n\tp.mut.RLock()\n\tdefer p.mut.RUnlock()\n\n\tlocal, ok := p.files[file]\n\n\tif !ok || !local.version.Equal(version) {\n\t\treturn false\n\t}\n\n\tfor _, existingIndex := range local.blockIndexes {\n\t\tif existingIndex == index {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Update updates internal state of what has been downloaded into the temporary\n\/\/ files by the remote device for this specific folder.\nfunc (p *deviceFolderDownloadState) Update(updates []protocol.FileDownloadProgressUpdate) {\n\tp.mut.Lock()\n\tdefer p.mut.Unlock()\n\n\tfor _, update := range updates {\n\t\tlocal, ok := p.files[update.Name]\n\t\tif update.UpdateType == protocol.UpdateTypeForget && ok && local.version.Equal(update.Version) {\n\t\t\tdelete(p.files, update.Name)\n\t\t} else if update.UpdateType == protocol.UpdateTypeAppend {\n\t\t\tif !ok {\n\t\t\t\tlocal = deviceFolderFileDownloadState{\n\t\t\t\t\tblockIndexes: update.BlockIndexes,\n\t\t\t\t\tversion: update.Version,\n\t\t\t\t\tblockSize: int(update.BlockSize),\n\t\t\t\t}\n\t\t\t} else if !local.version.Equal(update.Version) {\n\t\t\t\tlocal.blockIndexes = append(local.blockIndexes[:0], update.BlockIndexes...)\n\t\t\t\tlocal.version = update.Version\n\t\t\t\tlocal.blockSize = int(update.BlockSize)\n\t\t\t} else {\n\t\t\t\tlocal.blockIndexes = append(local.blockIndexes, update.BlockIndexes...)\n\t\t\t}\n\t\t\tp.files[update.Name] = local\n\t\t}\n\t}\n}\n\nfunc (p *deviceFolderDownloadState) BytesDownloaded() int64 {\n\tvar res int64\n\tfor _, state := range p.files {\n\t\t\/\/ BlockSize is a new field introduced in 1.4.1, thus a fallback\n\t\t\/\/ is required (will potentially underrepresent downloaded bytes).\n\t\tif state.blockSize != 0 {\n\t\t\tres += int64(len(state.blockIndexes) * state.blockSize)\n\t\t} else {\n\t\t\tres += int64(len(state.blockIndexes) * protocol.MinBlockSize)\n\t\t}\n\t}\n\treturn res\n}\n\n\/\/ GetBlockCounts returns a map filename -> number of blocks downloaded.\nfunc (p *deviceFolderDownloadState) GetBlockCounts() map[string]int {\n\tp.mut.RLock()\n\tres := make(map[string]int, len(p.files))\n\tfor name, state := range p.files {\n\t\tres[name] = len(state.blockIndexes)\n\t}\n\tp.mut.RUnlock()\n\treturn res\n}\n\n\/\/ deviceDownloadState represents the state of all in progress downloads\n\/\/ for all folders of a specific device.\ntype deviceDownloadState struct {\n\tmut sync.RWMutex\n\tfolders map[string]*deviceFolderDownloadState\n}\n\n\/\/ Update updates internal state of what has been downloaded into the temporary\n\/\/ files by the remote device for this specific folder.\nfunc (t *deviceDownloadState) Update(folder string, updates []protocol.FileDownloadProgressUpdate) {\n\tif t == nil {\n\t\treturn\n\t}\n\tt.mut.RLock()\n\tf, ok := t.folders[folder]\n\tt.mut.RUnlock()\n\n\tif !ok {\n\t\tf = &deviceFolderDownloadState{\n\t\t\tmut: sync.NewRWMutex(),\n\t\t\tfiles: make(map[string]deviceFolderFileDownloadState),\n\t\t}\n\t\tt.mut.Lock()\n\t\tt.folders[folder] = f\n\t\tt.mut.Unlock()\n\t}\n\n\tf.Update(updates)\n}\n\n\/\/ Has returns whether block at that specific index, and that specific version of the file\n\/\/ is currently available on the remote device for pulling from a temporary file.\nfunc (t *deviceDownloadState) Has(folder, file string, version protocol.Vector, index int32) bool {\n\tif t == nil {\n\t\treturn false\n\t}\n\tt.mut.RLock()\n\tf, ok := t.folders[folder]\n\tt.mut.RUnlock()\n\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn f.Has(file, version, index)\n}\n\n\/\/ GetBlockCounts returns a map filename -> number of blocks downloaded for the\n\/\/ given folder.\nfunc (t *deviceDownloadState) GetBlockCounts(folder string) map[string]int {\n\tif t == nil {\n\t\treturn nil\n\t}\n\n\tt.mut.RLock()\n\tdefer t.mut.RUnlock()\n\n\tfor name, state := range t.folders {\n\t\tif name == folder {\n\t\t\treturn state.GetBlockCounts()\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (t *deviceDownloadState) BytesDownloaded(folder string) int64 {\n\tif t == nil {\n\t\treturn 0\n\t}\n\n\tt.mut.RLock()\n\tdefer t.mut.RUnlock()\n\n\tfor name, state := range t.folders {\n\t\tif name == folder {\n\t\t\treturn state.BytesDownloaded()\n\t\t}\n\t}\n\treturn 0\n}\n\nfunc newDeviceDownloadState() *deviceDownloadState {\n\treturn &deviceDownloadState{\n\t\tmut: sync.NewRWMutex(),\n\t\tfolders: make(map[string]*deviceFolderDownloadState),\n\t}\n}\n<commit_msg>lib\/model: Add missing lock on download-state (fixes #6880) (#6945)<commit_after>\/\/ Copyright (C) 2015 The Syncthing Authors.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at https:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage model\n\nimport (\n\t\"github.com\/syncthing\/syncthing\/lib\/protocol\"\n\t\"github.com\/syncthing\/syncthing\/lib\/sync\"\n)\n\n\/\/ deviceFolderFileDownloadState holds current download state of a file that\n\/\/ a remote device has advertised. blockIndexes represends indexes within\n\/\/ FileInfo.Blocks that the remote device already has, and version represents\n\/\/ the version of the file that the remote device is downloading.\ntype deviceFolderFileDownloadState struct {\n\tblockIndexes []int32\n\tversion protocol.Vector\n\tblockSize int\n}\n\n\/\/ deviceFolderDownloadState holds current download state of all files that\n\/\/ a remote device is currently downloading in a specific folder.\ntype deviceFolderDownloadState struct {\n\tmut sync.RWMutex\n\tfiles map[string]deviceFolderFileDownloadState\n}\n\n\/\/ Has returns whether a block at that specific index, and that specific version of the file\n\/\/ is currently available on the remote device for pulling from a temporary file.\nfunc (p *deviceFolderDownloadState) Has(file string, version protocol.Vector, index int32) bool {\n\tp.mut.RLock()\n\tdefer p.mut.RUnlock()\n\n\tlocal, ok := p.files[file]\n\n\tif !ok || !local.version.Equal(version) {\n\t\treturn false\n\t}\n\n\tfor _, existingIndex := range local.blockIndexes {\n\t\tif existingIndex == index {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Update updates internal state of what has been downloaded into the temporary\n\/\/ files by the remote device for this specific folder.\nfunc (p *deviceFolderDownloadState) Update(updates []protocol.FileDownloadProgressUpdate) {\n\tp.mut.Lock()\n\tdefer p.mut.Unlock()\n\n\tfor _, update := range updates {\n\t\tlocal, ok := p.files[update.Name]\n\t\tif update.UpdateType == protocol.UpdateTypeForget && ok && local.version.Equal(update.Version) {\n\t\t\tdelete(p.files, update.Name)\n\t\t} else if update.UpdateType == protocol.UpdateTypeAppend {\n\t\t\tif !ok {\n\t\t\t\tlocal = deviceFolderFileDownloadState{\n\t\t\t\t\tblockIndexes: update.BlockIndexes,\n\t\t\t\t\tversion: update.Version,\n\t\t\t\t\tblockSize: int(update.BlockSize),\n\t\t\t\t}\n\t\t\t} else if !local.version.Equal(update.Version) {\n\t\t\t\tlocal.blockIndexes = append(local.blockIndexes[:0], update.BlockIndexes...)\n\t\t\t\tlocal.version = update.Version\n\t\t\t\tlocal.blockSize = int(update.BlockSize)\n\t\t\t} else {\n\t\t\t\tlocal.blockIndexes = append(local.blockIndexes, update.BlockIndexes...)\n\t\t\t}\n\t\t\tp.files[update.Name] = local\n\t\t}\n\t}\n}\n\nfunc (p *deviceFolderDownloadState) BytesDownloaded() int64 {\n\tp.mut.RLock()\n\tdefer p.mut.RUnlock()\n\n\tvar res int64\n\tfor _, state := range p.files {\n\t\t\/\/ BlockSize is a new field introduced in 1.4.1, thus a fallback\n\t\t\/\/ is required (will potentially underrepresent downloaded bytes).\n\t\tif state.blockSize != 0 {\n\t\t\tres += int64(len(state.blockIndexes) * state.blockSize)\n\t\t} else {\n\t\t\tres += int64(len(state.blockIndexes) * protocol.MinBlockSize)\n\t\t}\n\t}\n\treturn res\n}\n\n\/\/ GetBlockCounts returns a map filename -> number of blocks downloaded.\nfunc (p *deviceFolderDownloadState) GetBlockCounts() map[string]int {\n\tp.mut.RLock()\n\tres := make(map[string]int, len(p.files))\n\tfor name, state := range p.files {\n\t\tres[name] = len(state.blockIndexes)\n\t}\n\tp.mut.RUnlock()\n\treturn res\n}\n\n\/\/ deviceDownloadState represents the state of all in progress downloads\n\/\/ for all folders of a specific device.\ntype deviceDownloadState struct {\n\tmut sync.RWMutex\n\tfolders map[string]*deviceFolderDownloadState\n}\n\n\/\/ Update updates internal state of what has been downloaded into the temporary\n\/\/ files by the remote device for this specific folder.\nfunc (t *deviceDownloadState) Update(folder string, updates []protocol.FileDownloadProgressUpdate) {\n\tif t == nil {\n\t\treturn\n\t}\n\tt.mut.RLock()\n\tf, ok := t.folders[folder]\n\tt.mut.RUnlock()\n\n\tif !ok {\n\t\tf = &deviceFolderDownloadState{\n\t\t\tmut: sync.NewRWMutex(),\n\t\t\tfiles: make(map[string]deviceFolderFileDownloadState),\n\t\t}\n\t\tt.mut.Lock()\n\t\tt.folders[folder] = f\n\t\tt.mut.Unlock()\n\t}\n\n\tf.Update(updates)\n}\n\n\/\/ Has returns whether block at that specific index, and that specific version of the file\n\/\/ is currently available on the remote device for pulling from a temporary file.\nfunc (t *deviceDownloadState) Has(folder, file string, version protocol.Vector, index int32) bool {\n\tif t == nil {\n\t\treturn false\n\t}\n\tt.mut.RLock()\n\tf, ok := t.folders[folder]\n\tt.mut.RUnlock()\n\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn f.Has(file, version, index)\n}\n\n\/\/ GetBlockCounts returns a map filename -> number of blocks downloaded for the\n\/\/ given folder.\nfunc (t *deviceDownloadState) GetBlockCounts(folder string) map[string]int {\n\tif t == nil {\n\t\treturn nil\n\t}\n\n\tt.mut.RLock()\n\tdefer t.mut.RUnlock()\n\n\tfor name, state := range t.folders {\n\t\tif name == folder {\n\t\t\treturn state.GetBlockCounts()\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (t *deviceDownloadState) BytesDownloaded(folder string) int64 {\n\tif t == nil {\n\t\treturn 0\n\t}\n\n\tt.mut.RLock()\n\tdefer t.mut.RUnlock()\n\n\tfor name, state := range t.folders {\n\t\tif name == folder {\n\t\t\treturn state.BytesDownloaded()\n\t\t}\n\t}\n\treturn 0\n}\n\nfunc newDeviceDownloadState() *deviceDownloadState {\n\treturn &deviceDownloadState{\n\t\tmut: sync.NewRWMutex(),\n\t\tfolders: make(map[string]*deviceFolderDownloadState),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"http\"\n\t\"json\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"template\"\n\t\"time\"\n\t\"xml\"\n)\n\ntype Configuration struct {\n\tUpdateInterval int64\n\tFeeds []FeedInfo\n}\n\ntype FeedInfo struct {\n\tName string\n\tURL string\n\tType string\n}\n\ntype Feed struct {\n\tInfo FeedInfo\n\tItems map[string]Item\n}\n\ntype Item struct {\n\tTitle string\n\tGUID string\n\tURL string\n\tDate string \/\/ TODO this may need to be a struct\n\tDesc string\n\tContent string\n\tRead bool\n}\n\ntype RSSData struct {\n\tChannel Channel\n}\n\ntype Channel struct {\n\tTitle string\n\tLink string\n\tDescription string\n\tItem []RSSItemData\n}\n\ntype RSSItemData struct {\n\tTitle string\n\tLink string\n\tPubDate string\n\tGUID string\n\tDescription string\n\tContent string\n}\n\ntype TemplateData struct {\n\tConfig Configuration\n\tFeeds map[string]Feed\n}\n\nvar (\n\tConfig Configuration\n\tFeeds map[string]Feed\n\tTmplData TemplateData\n\n\tpage_template template.Template\n\tpage_content string\n\tclient http.Client\n)\n\nfunc main() {\n\tflag.Parse()\n\n\tInitTemplate()\n\tReadConfig()\n\tInitCache()\n\tWriteCache()\n\tgo RunHTTPServer()\n\n\tticks := time.Tick(1e9 * Config.UpdateInterval)\n\tfor {\n\t\tReadFeeds()\n\t\t<-ticks\n\t}\n}\n\nfunc InitTemplate() {\n\tlog.Print(\"Initializing Page Template\")\n\tpage_template.Parse(page_template_string)\n}\n\nfunc ReadConfig() {\n\tlog.Print(\"Reading Config\")\n\t\/\/ Read config from ~\/.munchrc\n\tfile, err := os.Open(path.Join(os.Getenv(\"HOME\"),\".munchrc\"))\n\tif err != nil {\n\t\tlog.Fatal(err.String())\n\t}\n\tdecoder := json.NewDecoder(file)\n\terr = decoder.Decode(&Config)\n\tif err != nil {\n\t\tlog.Fatal(err.String())\n\t}\n}\n\nfunc InitCache() {\n\tFeeds = make(map[string]Feed)\n\t\/\/ Ensure the cache directory exists\n\tcachePath := path.Join(os.Getenv(\"HOME\"), \".munch.d\", \"cache\")\n\tos.MkdirAll(cachePath, 0700)\n\t\/\/ For each feed\n\tfor _, info := range Config.Feeds {\n\t\tname := info.Name\n\t\tfPath := path.Join(cachePath, name)\n\t\tfile, _ := os.Open(fPath)\n\t\tif file != nil {\n\t\t} else {\n\t\t\tlog.Print(\"New Feed: \", name)\n\t\t\tfeed := Feed{}\n\t\t\tfeed.Info = info\n\t\t\tfeed.Items = make(map[string]Item)\n\t\t\tFeeds[name] = feed\n\t\t}\n\t}\n}\n\nfunc WriteCache() {\n\t\/\/ TODO\n}\n\nfunc ReadFeeds() {\n\tlog.Print(\"Updating feeds\")\n\tfor _, feed := range Feeds {\n\t\tswitch (feed.Info.Type) {\n\t\tcase \"RSS\":\n\t\t\treadRSS(feed)\n\t\tdefault:\n\t\t\tlog.Print(\"Ignoring unknown feed of type \", feed.Info.Type)\n\t\t}\n\t}\n\tlog.Print(\"Done\")\n}\n\nfunc readRSS(feed Feed) {\n\turl := feed.Info.URL\n\tlog.Print(url)\n\tr, err := client.Get(url)\n\tif err != nil {\n\t\tlog.Print(\"ERROR: \", err.String())\n\t}\n\treader := r.Body\n\tfeedData := RSSData{}\n\terr = xml.Unmarshal(reader, &feedData)\n\tif err != nil {\n\t\tlog.Print(\"ERROR: \", err.String())\n\t}\n\t\/\/ now transform the XML into our internal data structure\n\tchanged := false\n\tfor _, itemData := range feedData.Channel.Item {\n\t\tguid := itemData.GUID\n\t\t_, ok := feed.Items[guid]\n\t\tif !ok {\n\t\t\t\/\/ GUID not found - add the item\n\t\t\tchanged = true\n\t\t\titem := Item {\n\t\t\t\tTitle: itemData.Title,\n\t\t\t\tGUID: guid,\n\t\t\t\tURL: itemData.Link,\n\t\t\t\tDate: itemData.PubDate,\n\t\t\t\tDesc: itemData.Description,\n\t\t\t\tContent: itemData.Content,\n\t\t\t\tRead: false,\n\t\t\t}\n\t\t\tfeed.Items[guid] = item\n\t\t}\n\t}\n\tif (changed) {\n\t\tUpdatePage()\n\t\t\/\/ TODO run some commands from Config?\n\t}\n}\n\nfunc UpdatePage() {\n\t\/\/ TODO create a write on page_content\n}\n\nfunc RunHTTPServer() {\n\tlog.Print(\"Spawning HTTP Server\")\n\thttp.HandleFunc(\"\/\", HTTPHandler)\n\terr := http.ListenAndServe(\"localhost:8090\", nil)\n\tif err != nil {\n\t\tlog.Fatal(\"ListenAndServe: \", err.String())\n\t}\n}\n\nfunc HTTPHandler(w http.ResponseWriter, req *http.Request) {\n\tTmplData.Feeds = Feeds\n\tTmplData.Config = Config\n\terr := page_template.Execute(w, TmplData)\n\tif err != nil {\n\t\tlog.Print(\"ERROR: \", err.String())\n\t}\n}\n\n<commit_msg>improved error checking<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"http\"\n\t\"json\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"template\"\n\t\"time\"\n\t\"xml\"\n)\n\ntype Configuration struct {\n\tUpdateInterval int64\n\tFeeds []FeedInfo\n}\n\ntype FeedInfo struct {\n\tName string\n\tURL string\n\tType string\n}\n\ntype Feed struct {\n\tInfo FeedInfo\n\tItems map[string]Item\n}\n\ntype Item struct {\n\tTitle string\n\tGUID string\n\tURL string\n\tDate string \/\/ TODO this may need to be a struct\n\tDesc string\n\tContent string\n\tRead bool\n}\n\ntype RSSData struct {\n\tChannel Channel\n}\n\ntype Channel struct {\n\tTitle string\n\tLink string\n\tDescription string\n\tItem []RSSItemData\n}\n\ntype RSSItemData struct {\n\tTitle string\n\tLink string\n\tPubDate string\n\tGUID string\n\tDescription string\n\tContent string\n}\n\ntype TemplateData struct {\n\tConfig Configuration\n\tFeeds map[string]Feed\n}\n\nvar (\n\tConfig Configuration\n\tFeeds map[string]Feed\n\tTmplData TemplateData\n\n\tpage_template template.Template\n\tpage_content string\n\tclient http.Client\n)\n\nfunc main() {\n\tflag.Parse()\n\n\tInitTemplate()\n\tReadConfig()\n\tInitCache()\n\tWriteCache()\n\tgo RunHTTPServer()\n\n\tticks := time.Tick(1e9 * Config.UpdateInterval)\n\tfor {\n\t\tReadFeeds()\n\t\t<-ticks\n\t}\n}\n\nfunc InitTemplate() {\n\tlog.Print(\"Initializing Page Template\")\n\tpage_template.Parse(page_template_string)\n}\n\nfunc ReadConfig() {\n\tlog.Print(\"Reading Config\")\n\t\/\/ Read config from ~\/.munchrc\n\tfile, err := os.Open(path.Join(os.Getenv(\"HOME\"),\".munchrc\"))\n\tif err != nil {\n\t\tlog.Fatal(err.String())\n\t}\n\tdecoder := json.NewDecoder(file)\n\terr = decoder.Decode(&Config)\n\tif err != nil {\n\t\tlog.Fatal(err.String())\n\t}\n}\n\nfunc InitCache() {\n\tFeeds = make(map[string]Feed)\n\t\/\/ Ensure the cache directory exists\n\tcachePath := path.Join(os.Getenv(\"HOME\"), \".munch.d\", \"cache\")\n\tos.MkdirAll(cachePath, 0700)\n\t\/\/ For each feed\n\tfor _, info := range Config.Feeds {\n\t\tname := info.Name\n\t\tfPath := path.Join(cachePath, name)\n\t\tfile, _ := os.Open(fPath)\n\t\tif file != nil {\n\t\t} else {\n\t\t\tlog.Print(\"New Feed: \", name)\n\t\t\tfeed := Feed{}\n\t\t\tfeed.Info = info\n\t\t\tfeed.Items = make(map[string]Item)\n\t\t\tFeeds[name] = feed\n\t\t}\n\t}\n}\n\nfunc WriteCache() {\n\t\/\/ TODO\n}\n\nfunc ReadFeeds() {\n\tlog.Print(\"Updating feeds\")\n\tfor _, feed := range Feeds {\n\t\tswitch (feed.Info.Type) {\n\t\tcase \"RSS\":\n\t\t\treadRSS(feed)\n\t\tdefault:\n\t\t\tlog.Print(\"Ignoring unknown feed of type \", feed.Info.Type)\n\t\t}\n\t}\n\tlog.Print(\"Done\")\n}\n\nfunc readRSS(feed Feed) {\n\turl := feed.Info.URL\n\tlog.Print(url)\n\tr, err := client.Get(url)\n\tif err != nil {\n\t\tlog.Print(\"ERROR: \", err.String())\n\t\treturn\n\t}\n\treader := r.Body\n\tfeedData := RSSData{}\n\terr = xml.Unmarshal(reader, &feedData)\n\tif err != nil {\n\t\tlog.Print(\"ERROR: \", err.String())\n\t\treturn\n\t}\n\t\/\/ now transform the XML into our internal data structure\n\tchanged := false\n\tfor _, itemData := range feedData.Channel.Item {\n\t\tguid := itemData.GUID\n\t\t_, ok := feed.Items[guid]\n\t\tif !ok {\n\t\t\t\/\/ GUID not found - add the item\n\t\t\tchanged = true\n\t\t\titem := Item {\n\t\t\t\tTitle: itemData.Title,\n\t\t\t\tGUID: guid,\n\t\t\t\tURL: itemData.Link,\n\t\t\t\tDate: itemData.PubDate,\n\t\t\t\tDesc: itemData.Description,\n\t\t\t\tContent: itemData.Content,\n\t\t\t\tRead: false,\n\t\t\t}\n\t\t\tfeed.Items[guid] = item\n\t\t}\n\t}\n\tif (changed) {\n\t\tUpdatePage()\n\t\t\/\/ TODO run some commands from Config?\n\t}\n}\n\nfunc readAtom(feed Feed) {\n\n}\n\nfunc readRDF(feed Feed) {\n\n}\n\nfunc UpdatePage() {\n\t\/\/ TODO create a write on page_content\n}\n\nfunc RunHTTPServer() {\n\tlog.Print(\"Spawning HTTP Server\")\n\thttp.HandleFunc(\"\/\", HTTPHandler)\n\terr := http.ListenAndServe(\"localhost:8090\", nil)\n\tif err != nil {\n\t\tlog.Fatal(\"ListenAndServe: \", err.String())\n\t}\n}\n\nfunc HTTPHandler(w http.ResponseWriter, req *http.Request) {\n\tTmplData.Feeds = Feeds\n\tTmplData.Config = Config\n\terr := page_template.Execute(w, TmplData)\n\tif err != nil {\n\t\tlog.Print(\"ERROR: \", err.String())\n\t}\n}\n\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package gcslock is a scalable, distributed mutex that can be used\n\/\/ to serialize computations anywhere on the global internet.\npackage gcslock\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/oauth2\/google\"\n)\n\nconst (\n\tdefaultStorageLockURL = \"https:\/\/www.googleapis.com\/upload\/storage\/v1\"\n\tdefaultStorageUnlockURL = \"https:\/\/www.googleapis.com\/storage\/v1\"\n)\n\nvar (\n\t\/\/ These vars are used in the requests below. Having separate default\n\t\/\/ values makes it easy to reset the standard config during testing.\n\tstorageLockURL = defaultStorageLockURL\n\tstorageUnlockURL = defaultStorageUnlockURL\n)\n\n\/\/ ContextLocker provides an extension of the sync.Locker interface.\ntype ContextLocker interface {\n\tsync.Locker\n\tContextLock(context.Context) error\n\tContextUnlock(context.Context) error\n}\n\ntype mutex struct {\n\tbucket string\n\tobject string\n\tclient *http.Client\n}\n\nvar _ ContextLocker = (*mutex)(nil)\n\n\/\/ Lock waits indefinitely to acquire a mutex.\nfunc (m *mutex) Lock() {\n\tm.ContextLock(context.Background())\n}\n\n\/\/ ContextLock waits indefinitely to acquire a mutex with timeout\n\/\/ governed by passed context.\nfunc (m *mutex) ContextLock(ctx context.Context) error {\n\tq := url.Values{\n\t\t\"name\": {m.object},\n\t\t\"uploadType\": {\"media\"},\n\t\t\"ifGenerationMatch\": {\"0\"},\n\t}\n\turl := fmt.Sprintf(\"%s\/b\/%s\/o?%s\", storageLockURL, m.bucket, q.Encode())\n\t\/\/ NOTE: ctx deadline\/timeout and backoff are independent. The former is\n\t\/\/ an aggregate timeout and the latter is a per loop iteration delay.\n\tbackoff := 10 * time.Millisecond\n\tfor {\n\t\treq, err := http.NewRequest(\"POST\", url, bytes.NewReader([]byte(\"1\")))\n\t\tif err != nil {\n\t\t\t\/\/ Likely malformed URL - retry won't fix so return.\n\t\t\treturn err\n\t\t}\n\t\treq.Header.Set(\"content-type\", \"text\/plain\")\n\t\treq = req.WithContext(ctx)\n\t\tres, err := m.client.Do(req)\n\t\tif err == nil {\n\t\t\tres.Body.Close()\n\t\t\tif res.StatusCode == 200 {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tselect {\n\t\tcase <-time.After(backoff):\n\t\t\tbackoff *= 2\n\t\t\tcontinue\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\t}\n\t}\n}\n\n\/\/ Unlock waits indefinitely to release a mutex.\nfunc (m *mutex) Unlock() {\n\tm.ContextUnlock(context.Background())\n}\n\n\/\/ ContextUnlock waits indefinitely to release a mutex with timeout\n\/\/ governed by passed context.\nfunc (m *mutex) ContextUnlock(ctx context.Context) error {\n\turl := fmt.Sprintf(\"%s\/b\/%s\/o\/%s?\", storageUnlockURL, m.bucket, m.object)\n\t\/\/ NOTE: ctx deadline\/timeout and backoff are independent. The former is\n\t\/\/ an aggregate timeout and the latter is a per loop iteration delay.\n\tbackoff := 10 * time.Millisecond\n\tfor {\n\t\treq, err := http.NewRequest(\"DELETE\", url, nil)\n\t\tif err != nil {\n\t\t\t\/\/ Likely malformed URL - retry won't fix so return.\n\t\t\treturn err\n\t\t}\n\t\treq.Header.Set(\"content-type\", \"text\/plain\")\n\t\treq = req.WithContext(ctx)\n\t\tres, err := m.client.Do(req)\n\t\tif err == nil {\n\t\t\tres.Body.Close()\n\t\t\tif res.StatusCode == 204 {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tselect {\n\t\tcase <-time.After(backoff):\n\t\t\tbackoff *= 2\n\t\t\tcontinue\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\t}\n\t}\n}\n\n\/\/ httpClient is overwritten in tests\nvar httpClient = func(ctx context.Context) (*http.Client, error) {\n\tconst scope = \"https:\/\/www.googleapis.com\/auth\/devstorage.full_control\"\n\treturn google.DefaultClient(ctx, scope)\n}\n\n\/\/ New creates a GCS-based sync.Locker.\n\/\/ It uses Application Default Credentials to make authenticated requests\n\/\/ to Google Cloud Storage. See the DefaultClient function of the\n\/\/ golang.org\/x\/oauth2\/google package for App Default Credentials details.\n\/\/\n\/\/ If ctx argument is nil, context.Background is used.\n\/\/\nfunc New(ctx context.Context, bucket, object string) (ContextLocker, error) {\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tclient, err := httpClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := &mutex{\n\t\tbucket: bucket,\n\t\tobject: object,\n\t\tclient: client,\n\t}\n\treturn m, nil\n}\n<commit_msg>remove exptraneous content-type header setting<commit_after>\/\/ Copyright 2016 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package gcslock is a scalable, distributed mutex that can be used\n\/\/ to serialize computations anywhere on the global internet.\npackage gcslock\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/oauth2\/google\"\n)\n\nconst (\n\tdefaultStorageLockURL = \"https:\/\/www.googleapis.com\/upload\/storage\/v1\"\n\tdefaultStorageUnlockURL = \"https:\/\/www.googleapis.com\/storage\/v1\"\n)\n\nvar (\n\t\/\/ These vars are used in the requests below. Having separate default\n\t\/\/ values makes it easy to reset the standard config during testing.\n\tstorageLockURL = defaultStorageLockURL\n\tstorageUnlockURL = defaultStorageUnlockURL\n)\n\n\/\/ ContextLocker provides an extension of the sync.Locker interface.\ntype ContextLocker interface {\n\tsync.Locker\n\tContextLock(context.Context) error\n\tContextUnlock(context.Context) error\n}\n\ntype mutex struct {\n\tbucket string\n\tobject string\n\tclient *http.Client\n}\n\nvar _ ContextLocker = (*mutex)(nil)\n\n\/\/ Lock waits indefinitely to acquire a mutex.\nfunc (m *mutex) Lock() {\n\tm.ContextLock(context.Background())\n}\n\n\/\/ ContextLock waits indefinitely to acquire a mutex with timeout\n\/\/ governed by passed context.\nfunc (m *mutex) ContextLock(ctx context.Context) error {\n\tq := url.Values{\n\t\t\"name\": {m.object},\n\t\t\"uploadType\": {\"media\"},\n\t\t\"ifGenerationMatch\": {\"0\"},\n\t}\n\turl := fmt.Sprintf(\"%s\/b\/%s\/o?%s\", storageLockURL, m.bucket, q.Encode())\n\t\/\/ NOTE: ctx deadline\/timeout and backoff are independent. The former is\n\t\/\/ an aggregate timeout and the latter is a per loop iteration delay.\n\tbackoff := 10 * time.Millisecond\n\tfor {\n\t\treq, err := http.NewRequest(\"POST\", url, bytes.NewReader([]byte(\"1\")))\n\t\tif err != nil {\n\t\t\t\/\/ Likely malformed URL - retry won't fix so return.\n\t\t\treturn err\n\t\t}\n\t\treq.Header.Set(\"content-type\", \"text\/plain\")\n\t\treq = req.WithContext(ctx)\n\t\tres, err := m.client.Do(req)\n\t\tif err == nil {\n\t\t\tres.Body.Close()\n\t\t\tif res.StatusCode == 200 {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tselect {\n\t\tcase <-time.After(backoff):\n\t\t\tbackoff *= 2\n\t\t\tcontinue\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\t}\n\t}\n}\n\n\/\/ Unlock waits indefinitely to release a mutex.\nfunc (m *mutex) Unlock() {\n\tm.ContextUnlock(context.Background())\n}\n\n\/\/ ContextUnlock waits indefinitely to release a mutex with timeout\n\/\/ governed by passed context.\nfunc (m *mutex) ContextUnlock(ctx context.Context) error {\n\turl := fmt.Sprintf(\"%s\/b\/%s\/o\/%s?\", storageUnlockURL, m.bucket, m.object)\n\t\/\/ NOTE: ctx deadline\/timeout and backoff are independent. The former is\n\t\/\/ an aggregate timeout and the latter is a per loop iteration delay.\n\tbackoff := 10 * time.Millisecond\n\tfor {\n\t\treq, err := http.NewRequest(\"DELETE\", url, nil)\n\t\tif err != nil {\n\t\t\t\/\/ Likely malformed URL - retry won't fix so return.\n\t\t\treturn err\n\t\t}\n\t\treq = req.WithContext(ctx)\n\t\tres, err := m.client.Do(req)\n\t\tif err == nil {\n\t\t\tres.Body.Close()\n\t\t\tif res.StatusCode == 204 {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tselect {\n\t\tcase <-time.After(backoff):\n\t\t\tbackoff *= 2\n\t\t\tcontinue\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\t}\n\t}\n}\n\n\/\/ httpClient is overwritten in tests\nvar httpClient = func(ctx context.Context) (*http.Client, error) {\n\tconst scope = \"https:\/\/www.googleapis.com\/auth\/devstorage.full_control\"\n\treturn google.DefaultClient(ctx, scope)\n}\n\n\/\/ New creates a GCS-based sync.Locker.\n\/\/ It uses Application Default Credentials to make authenticated requests\n\/\/ to Google Cloud Storage. See the DefaultClient function of the\n\/\/ golang.org\/x\/oauth2\/google package for App Default Credentials details.\n\/\/\n\/\/ If ctx argument is nil, context.Background is used.\n\/\/\nfunc New(ctx context.Context, bucket, object string) (ContextLocker, error) {\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tclient, err := httpClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := &mutex{\n\t\tbucket: bucket,\n\t\tobject: object,\n\t\tclient: client,\n\t}\n\treturn m, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package sqlx\n\n\/\/ Named Query Support\n\/\/\n\/\/ * BindMap - bind query bindvars to map\/struct args\n\/\/\t* NamedExec, NamedQuery - named query w\/ struct or map\n\/\/ * NamedStmt - a pre-compiled named query which is a prepared statement\n\/\/\n\/\/ Internal Interfaces:\n\/\/\n\/\/ * compileNamedQuery - rebind a named query, returning a query and list of names\n\/\/ * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist\n\/\/\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"unicode\"\n\n\t\"github.com\/jmoiron\/sqlx\/reflectx\"\n)\n\n\/\/ NamedStmt is a prepared statement that executes named queries. Prepare it\n\/\/ how you would execute a NamedQuery, but pass in a struct or map when executing.\ntype NamedStmt struct {\n\tParams []string\n\tQueryString string\n\tStmt *Stmt\n}\n\n\/\/ Close closes the named statement.\nfunc (n *NamedStmt) Close() error {\n\treturn n.Stmt.Close()\n}\n\n\/\/ Exec executes a named statement using the struct passed.\nfunc (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) {\n\targs, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)\n\tif err != nil {\n\t\treturn *new(sql.Result), err\n\t}\n\treturn n.Stmt.Exec(args...)\n}\n\n\/\/ Query executes a named statement using the struct argument, returning rows.\nfunc (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) {\n\targs, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn n.Stmt.Query(args...)\n}\n\n\/\/ QueryRow executes a named statement against the database. Because sqlx cannot\n\/\/ create a *sql.Row with an error condition pre-set for binding errors, sqlx\n\/\/ returns a *sqlx.Row instead.\nfunc (n *NamedStmt) QueryRow(arg interface{}) *Row {\n\targs, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)\n\tif err != nil {\n\t\treturn &Row{err: err}\n\t}\n\treturn n.Stmt.QueryRowx(args...)\n}\n\n\/\/ MustExec execs a NamedStmt, panicing on error\nfunc (n *NamedStmt) MustExec(arg interface{}) sql.Result {\n\tres, err := n.Exec(arg)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn res\n}\n\n\/\/ Queryx using this NamedStmt\nfunc (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) {\n\tr, err := n.Query(arg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Rows{Rows: r, Mapper: n.Stmt.Mapper}, err\n}\n\n\/\/ QueryRowx this NamedStmt. Because of limitations with QueryRow, this is\n\/\/ an alias for QueryRow.\nfunc (n *NamedStmt) QueryRowx(arg interface{}) *Row {\n\treturn n.QueryRow(arg)\n}\n\n\/\/ Select using this NamedStmt\nfunc (n *NamedStmt) Select(dest interface{}, arg interface{}) error {\n\trows, err := n.Query(arg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ if something happens here, we want to make sure the rows are Closed\n\tdefer rows.Close()\n\treturn scanAll(rows, dest, false)\n}\n\n\/\/ Get using this NamedStmt\nfunc (n *NamedStmt) Get(dest interface{}, arg interface{}) error {\n\tr := n.QueryRowx(arg)\n\treturn r.scanAny(dest, false)\n}\n\n\/\/ A union interface of preparer and binder, required to be able to prepare\n\/\/ named statements (as the bindtype must be determined).\ntype namedPreparer interface {\n\tPreparer\n\tbinder\n}\n\nfunc prepareNamed(p namedPreparer, query string) (*NamedStmt, error) {\n\tbindType := BindType(p.DriverName())\n\tq, args, err := compileNamedQuery([]byte(query), bindType)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt, err := Preparex(p, q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &NamedStmt{\n\t\tQueryString: q,\n\t\tParams: args,\n\t\tStmt: stmt,\n\t}, nil\n}\n\nfunc bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {\n\tif maparg, ok := arg.(map[string]interface{}); ok {\n\t\treturn bindMapArgs(names, maparg)\n\t}\n\treturn bindArgs(names, arg, m)\n}\n\n\/\/ private interface to generate a list of interfaces from a given struct\n\/\/ type, given a list of names to pull out of the struct. Used by public\n\/\/ BindStruct interface.\nfunc bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {\n\targlist := make([]interface{}, 0, len(names))\n\n\t\/\/ grab the indirected value of arg\n\tv := reflect.ValueOf(arg)\n\tfor v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; {\n\t\tv = v.Elem()\n\t}\n\n\tfields := m.TraversalsByName(v.Type(), names)\n\tfor i, t := range fields {\n\t\tif len(t) == 0 {\n\t\t\treturn arglist, fmt.Errorf(\"could not find name %s in %#v\", names[i], arg)\n\t\t}\n\t\tval := reflectx.FieldByIndexesReadOnly(v, t)\n\t\targlist = append(arglist, val.Interface())\n\t}\n\n\treturn arglist, nil\n}\n\n\/\/ like bindArgs, but for maps.\nfunc bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) {\n\targlist := make([]interface{}, 0, len(names))\n\n\tfor _, name := range names {\n\t\tval, ok := arg[name]\n\t\tif !ok {\n\t\t\treturn arglist, fmt.Errorf(\"could not find name %s in %#v\", name, arg)\n\t\t}\n\t\targlist = append(arglist, val)\n\t}\n\treturn arglist, nil\n}\n\n\/\/ bindStruct binds a named parameter query with fields from a struct argument.\n\/\/ The rules for binding field names to parameter names follow the same\n\/\/ conventions as for StructScan, including obeying the `db` struct tags.\nfunc bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {\n\tbound, names, err := compileNamedQuery([]byte(query), bindType)\n\tif err != nil {\n\t\treturn \"\", []interface{}{}, err\n\t}\n\n\targlist, err := bindArgs(names, arg, m)\n\tif err != nil {\n\t\treturn \"\", []interface{}{}, err\n\t}\n\n\treturn bound, arglist, nil\n}\n\n\/\/ bindMap binds a named parameter query with a map of arguments.\nfunc bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) {\n\tbound, names, err := compileNamedQuery([]byte(query), bindType)\n\tif err != nil {\n\t\treturn \"\", []interface{}{}, err\n\t}\n\n\targlist, err := bindMapArgs(names, args)\n\treturn bound, arglist, err\n}\n\n\/\/ -- Compilation of Named Queries\n\n\/\/ Allow digits and letters in bind params; additionally runes are\n\/\/ checked against underscores, meaning that bind params can have be\n\/\/ alphanumeric with underscores. Mind the difference between unicode\n\/\/ digits and numbers, where '5' is a digit but '五' is not.\nvar allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit}\n\n\/\/ FIXME: this function isn't safe for unicode named params, as a failing test\n\/\/ can testify. This is not a regression but a failure of the original code\n\/\/ as well. It should be modified to range over runes in a string rather than\n\/\/ bytes, even though this is less convenient and slower. Hopefully the\n\/\/ addition of the prepared NamedStmt (which will only do this once) will make\n\/\/ up for the slightly slower ad-hoc NamedExec\/NamedQuery.\n\n\/\/ compile a NamedQuery into an unbound query (using the '?' bindvar) and\n\/\/ a list of names.\nfunc compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) {\n\tnames = make([]string, 0, 10)\n\trebound := make([]byte, 0, len(qs))\n\n\tinName := false\n\tlast := len(qs) - 1\n\tcurrentVar := 1\n\tname := make([]byte, 0, 10)\n\n\tfor i, b := range qs {\n\t\t\/\/ a ':' while we're in a name is an error\n\t\tif b == ':' {\n\t\t\t\/\/ if this is the second ':' in a '::' escape sequence, append a ':'\n\t\t\tif inName && i > 0 && qs[i-1] == ':' {\n\t\t\t\trebound = append(rebound, ':')\n\t\t\t\tinName = false\n\t\t\t\tcontinue\n\t\t\t} else if inName {\n\t\t\t\terr = errors.New(\"unexpected `:` while reading named param at \" + strconv.Itoa(i))\n\t\t\t\treturn query, names, err\n\t\t\t}\n\t\t\tinName = true\n\t\t\tname = []byte{}\n\t\t\t\/\/ if we're in a name, and this is an allowed character, continue\n\t\t} else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_') && i != last {\n\t\t\t\/\/ append the byte to the name if we are in a name and not on the last byte\n\t\t\tname = append(name, b)\n\t\t\t\/\/ if we're in a name and it's not an allowed character, the name is done\n\t\t} else if inName {\n\t\t\tinName = false\n\t\t\t\/\/ if this is the final byte of the string and it is part of the name, then\n\t\t\t\/\/ make sure to add it to the name\n\t\t\tif i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) {\n\t\t\t\tname = append(name, b)\n\t\t\t}\n\t\t\t\/\/ add the string representation to the names list\n\t\t\tnames = append(names, string(name))\n\t\t\t\/\/ add a proper bindvar for the bindType\n\t\t\tswitch bindType {\n\t\t\t\/\/ oracle only supports named type bind vars even for positional\n\t\t\tcase NAMED:\n\t\t\t\trebound = append(rebound, ':')\n\t\t\t\trebound = append(rebound, name...)\n\t\t\tcase QUESTION, UNKNOWN:\n\t\t\t\trebound = append(rebound, '?')\n\t\t\tcase DOLLAR:\n\t\t\t\trebound = append(rebound, '$')\n\t\t\t\tfor _, b := range strconv.Itoa(currentVar) {\n\t\t\t\t\trebound = append(rebound, byte(b))\n\t\t\t\t}\n\t\t\t\tcurrentVar++\n\t\t\t}\n\t\t\t\/\/ add this byte to string unless it was not part of the name\n\t\t\tif i != last {\n\t\t\t\trebound = append(rebound, b)\n\t\t\t} else if !unicode.IsOneOf(allowedBindRunes, rune(b)) {\n\t\t\t\trebound = append(rebound, b)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ this is a normal byte and should just go onto the rebound query\n\t\t\trebound = append(rebound, b)\n\t\t}\n\t}\n\n\treturn string(rebound), names, err\n}\n\n\/\/ BindNamed binds a struct or a map to a query with named parameters.\n\/\/ DEPRECATED: use sqlx.Named` instead of this, it may be removed in future.\nfunc BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) {\n\treturn bindNamedMapper(bindType, query, arg, mapper())\n}\n\n\/\/ Named takes a query using named parameters and an argument and\n\/\/ returns a new query with a list of args that can be executed by\n\/\/ a database. The return value uses the `?` bindvar.\nfunc Named(query string, arg interface{}) (string, []interface{}, error) {\n\treturn bindNamedMapper(QUESTION, query, arg, mapper())\n}\n\nfunc bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {\n\tif maparg, ok := arg.(map[string]interface{}); ok {\n\t\treturn bindMap(bindType, query, maparg)\n\t}\n\treturn bindStruct(bindType, query, arg, m)\n}\n\n\/\/ NamedQuery binds a named query and then runs Query on the result using the\n\/\/ provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with\n\/\/ map[string]interface{} types.\nfunc NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) {\n\tq, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn e.Queryx(q, args...)\n}\n\n\/\/ NamedExec uses BindStruct to get a query executable by the driver and\n\/\/ then runs Exec on the result. Returns an error from the binding\n\/\/ or the query excution itself.\nfunc NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) {\n\tq, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn e.Exec(q, args...)\n}\n<commit_msg>Add NamedStmt.Selectx<commit_after>package sqlx\n\n\/\/ Named Query Support\n\/\/\n\/\/ * BindMap - bind query bindvars to map\/struct args\n\/\/\t* NamedExec, NamedQuery - named query w\/ struct or map\n\/\/ * NamedStmt - a pre-compiled named query which is a prepared statement\n\/\/\n\/\/ Internal Interfaces:\n\/\/\n\/\/ * compileNamedQuery - rebind a named query, returning a query and list of names\n\/\/ * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist\n\/\/\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"unicode\"\n\n\t\"github.com\/jmoiron\/sqlx\/reflectx\"\n)\n\n\/\/ NamedStmt is a prepared statement that executes named queries. Prepare it\n\/\/ how you would execute a NamedQuery, but pass in a struct or map when executing.\ntype NamedStmt struct {\n\tParams []string\n\tQueryString string\n\tStmt *Stmt\n}\n\n\/\/ Close closes the named statement.\nfunc (n *NamedStmt) Close() error {\n\treturn n.Stmt.Close()\n}\n\n\/\/ Exec executes a named statement using the struct passed.\nfunc (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) {\n\targs, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)\n\tif err != nil {\n\t\treturn *new(sql.Result), err\n\t}\n\treturn n.Stmt.Exec(args...)\n}\n\n\/\/ Query executes a named statement using the struct argument, returning rows.\nfunc (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) {\n\targs, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn n.Stmt.Query(args...)\n}\n\n\/\/ QueryRow executes a named statement against the database. Because sqlx cannot\n\/\/ create a *sql.Row with an error condition pre-set for binding errors, sqlx\n\/\/ returns a *sqlx.Row instead.\nfunc (n *NamedStmt) QueryRow(arg interface{}) *Row {\n\targs, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)\n\tif err != nil {\n\t\treturn &Row{err: err}\n\t}\n\treturn n.Stmt.QueryRowx(args...)\n}\n\n\/\/ MustExec execs a NamedStmt, panicing on error\nfunc (n *NamedStmt) MustExec(arg interface{}) sql.Result {\n\tres, err := n.Exec(arg)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn res\n}\n\n\/\/ Queryx using this NamedStmt\nfunc (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) {\n\tr, err := n.Query(arg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Rows{Rows: r, Mapper: n.Stmt.Mapper}, err\n}\n\n\/\/ QueryRowx this NamedStmt. Because of limitations with QueryRow, this is\n\/\/ an alias for QueryRow.\nfunc (n *NamedStmt) QueryRowx(arg interface{}) *Row {\n\treturn n.QueryRow(arg)\n}\n\n\/\/ Select using this NamedStmt\nfunc (n *NamedStmt) Select(dest interface{}, arg interface{}) error {\n\trows, err := n.Query(arg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ if something happens here, we want to make sure the rows are Closed\n\tdefer rows.Close()\n\treturn scanAll(rows, dest, false)\n}\n\n\/\/ Selectx using this NamedStmt\nfunc (n *NamedStmt) Selectx(dest interface{}, arg interface{}) error {\n\trows, err := n.Queryx(arg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ if something happens here, we want to make sure the rows are Closed\n\tdefer rows.Close()\n\treturn scanAll(rows, dest, false)\n}\n\n\/\/ Get using this NamedStmt\nfunc (n *NamedStmt) Get(dest interface{}, arg interface{}) error {\n\tr := n.QueryRowx(arg)\n\treturn r.scanAny(dest, false)\n}\n\n\/\/ A union interface of preparer and binder, required to be able to prepare\n\/\/ named statements (as the bindtype must be determined).\ntype namedPreparer interface {\n\tPreparer\n\tbinder\n}\n\nfunc prepareNamed(p namedPreparer, query string) (*NamedStmt, error) {\n\tbindType := BindType(p.DriverName())\n\tq, args, err := compileNamedQuery([]byte(query), bindType)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt, err := Preparex(p, q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &NamedStmt{\n\t\tQueryString: q,\n\t\tParams: args,\n\t\tStmt: stmt,\n\t}, nil\n}\n\nfunc bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {\n\tif maparg, ok := arg.(map[string]interface{}); ok {\n\t\treturn bindMapArgs(names, maparg)\n\t}\n\treturn bindArgs(names, arg, m)\n}\n\n\/\/ private interface to generate a list of interfaces from a given struct\n\/\/ type, given a list of names to pull out of the struct. Used by public\n\/\/ BindStruct interface.\nfunc bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {\n\targlist := make([]interface{}, 0, len(names))\n\n\t\/\/ grab the indirected value of arg\n\tv := reflect.ValueOf(arg)\n\tfor v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; {\n\t\tv = v.Elem()\n\t}\n\n\tfields := m.TraversalsByName(v.Type(), names)\n\tfor i, t := range fields {\n\t\tif len(t) == 0 {\n\t\t\treturn arglist, fmt.Errorf(\"could not find name %s in %#v\", names[i], arg)\n\t\t}\n\t\tval := reflectx.FieldByIndexesReadOnly(v, t)\n\t\targlist = append(arglist, val.Interface())\n\t}\n\n\treturn arglist, nil\n}\n\n\/\/ like bindArgs, but for maps.\nfunc bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) {\n\targlist := make([]interface{}, 0, len(names))\n\n\tfor _, name := range names {\n\t\tval, ok := arg[name]\n\t\tif !ok {\n\t\t\treturn arglist, fmt.Errorf(\"could not find name %s in %#v\", name, arg)\n\t\t}\n\t\targlist = append(arglist, val)\n\t}\n\treturn arglist, nil\n}\n\n\/\/ bindStruct binds a named parameter query with fields from a struct argument.\n\/\/ The rules for binding field names to parameter names follow the same\n\/\/ conventions as for StructScan, including obeying the `db` struct tags.\nfunc bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {\n\tbound, names, err := compileNamedQuery([]byte(query), bindType)\n\tif err != nil {\n\t\treturn \"\", []interface{}{}, err\n\t}\n\n\targlist, err := bindArgs(names, arg, m)\n\tif err != nil {\n\t\treturn \"\", []interface{}{}, err\n\t}\n\n\treturn bound, arglist, nil\n}\n\n\/\/ bindMap binds a named parameter query with a map of arguments.\nfunc bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) {\n\tbound, names, err := compileNamedQuery([]byte(query), bindType)\n\tif err != nil {\n\t\treturn \"\", []interface{}{}, err\n\t}\n\n\targlist, err := bindMapArgs(names, args)\n\treturn bound, arglist, err\n}\n\n\/\/ -- Compilation of Named Queries\n\n\/\/ Allow digits and letters in bind params; additionally runes are\n\/\/ checked against underscores, meaning that bind params can have be\n\/\/ alphanumeric with underscores. Mind the difference between unicode\n\/\/ digits and numbers, where '5' is a digit but '五' is not.\nvar allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit}\n\n\/\/ FIXME: this function isn't safe for unicode named params, as a failing test\n\/\/ can testify. This is not a regression but a failure of the original code\n\/\/ as well. It should be modified to range over runes in a string rather than\n\/\/ bytes, even though this is less convenient and slower. Hopefully the\n\/\/ addition of the prepared NamedStmt (which will only do this once) will make\n\/\/ up for the slightly slower ad-hoc NamedExec\/NamedQuery.\n\n\/\/ compile a NamedQuery into an unbound query (using the '?' bindvar) and\n\/\/ a list of names.\nfunc compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) {\n\tnames = make([]string, 0, 10)\n\trebound := make([]byte, 0, len(qs))\n\n\tinName := false\n\tlast := len(qs) - 1\n\tcurrentVar := 1\n\tname := make([]byte, 0, 10)\n\n\tfor i, b := range qs {\n\t\t\/\/ a ':' while we're in a name is an error\n\t\tif b == ':' {\n\t\t\t\/\/ if this is the second ':' in a '::' escape sequence, append a ':'\n\t\t\tif inName && i > 0 && qs[i-1] == ':' {\n\t\t\t\trebound = append(rebound, ':')\n\t\t\t\tinName = false\n\t\t\t\tcontinue\n\t\t\t} else if inName {\n\t\t\t\terr = errors.New(\"unexpected `:` while reading named param at \" + strconv.Itoa(i))\n\t\t\t\treturn query, names, err\n\t\t\t}\n\t\t\tinName = true\n\t\t\tname = []byte{}\n\t\t\t\/\/ if we're in a name, and this is an allowed character, continue\n\t\t} else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_') && i != last {\n\t\t\t\/\/ append the byte to the name if we are in a name and not on the last byte\n\t\t\tname = append(name, b)\n\t\t\t\/\/ if we're in a name and it's not an allowed character, the name is done\n\t\t} else if inName {\n\t\t\tinName = false\n\t\t\t\/\/ if this is the final byte of the string and it is part of the name, then\n\t\t\t\/\/ make sure to add it to the name\n\t\t\tif i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) {\n\t\t\t\tname = append(name, b)\n\t\t\t}\n\t\t\t\/\/ add the string representation to the names list\n\t\t\tnames = append(names, string(name))\n\t\t\t\/\/ add a proper bindvar for the bindType\n\t\t\tswitch bindType {\n\t\t\t\/\/ oracle only supports named type bind vars even for positional\n\t\t\tcase NAMED:\n\t\t\t\trebound = append(rebound, ':')\n\t\t\t\trebound = append(rebound, name...)\n\t\t\tcase QUESTION, UNKNOWN:\n\t\t\t\trebound = append(rebound, '?')\n\t\t\tcase DOLLAR:\n\t\t\t\trebound = append(rebound, '$')\n\t\t\t\tfor _, b := range strconv.Itoa(currentVar) {\n\t\t\t\t\trebound = append(rebound, byte(b))\n\t\t\t\t}\n\t\t\t\tcurrentVar++\n\t\t\t}\n\t\t\t\/\/ add this byte to string unless it was not part of the name\n\t\t\tif i != last {\n\t\t\t\trebound = append(rebound, b)\n\t\t\t} else if !unicode.IsOneOf(allowedBindRunes, rune(b)) {\n\t\t\t\trebound = append(rebound, b)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ this is a normal byte and should just go onto the rebound query\n\t\t\trebound = append(rebound, b)\n\t\t}\n\t}\n\n\treturn string(rebound), names, err\n}\n\n\/\/ BindNamed binds a struct or a map to a query with named parameters.\n\/\/ DEPRECATED: use sqlx.Named` instead of this, it may be removed in future.\nfunc BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) {\n\treturn bindNamedMapper(bindType, query, arg, mapper())\n}\n\n\/\/ Named takes a query using named parameters and an argument and\n\/\/ returns a new query with a list of args that can be executed by\n\/\/ a database. The return value uses the `?` bindvar.\nfunc Named(query string, arg interface{}) (string, []interface{}, error) {\n\treturn bindNamedMapper(QUESTION, query, arg, mapper())\n}\n\nfunc bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {\n\tif maparg, ok := arg.(map[string]interface{}); ok {\n\t\treturn bindMap(bindType, query, maparg)\n\t}\n\treturn bindStruct(bindType, query, arg, m)\n}\n\n\/\/ NamedQuery binds a named query and then runs Query on the result using the\n\/\/ provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with\n\/\/ map[string]interface{} types.\nfunc NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) {\n\tq, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn e.Queryx(q, args...)\n}\n\n\/\/ NamedExec uses BindStruct to get a query executable by the driver and\n\/\/ then runs Exec on the result. Returns an error from the binding\n\/\/ or the query excution itself.\nfunc NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) {\n\tq, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn e.Exec(q, args...)\n}\n<|endoftext|>"} {"text":"<commit_before>package model\n\nimport \"net\/url\"\n\ntype Tag struct {\n\tName string\n\tUrl string\n}\n\nfunc NewTag(name string) Tag {\n\treturn Tag{\n\t\tName: name,\n\t\tUrl: \"\/tags\/\" + url.QueryEscape(name),\n\t}\n}\n<commit_msg>[model] fix tag url escaped<commit_after>package model\n\ntype Tag struct {\n\tName string\n\tUrl string\n}\n\nfunc NewTag(name string) Tag {\n\treturn Tag{\n\t\tName: name,\n\t\tUrl: \"\/tags\/\" + name,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright (c) 2019 the Octant contributors. All Rights Reserved.\nSPDX-License-Identifier: Apache-2.0\n*\/\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\tVERSION = \"v0.9.1\"\n\tGOPATH = os.Getenv(\"GOPATH\")\n\tGIT_COMMIT = gitCommit()\n\tBUILD_TIME = time.Now().UTC().Format(time.RFC3339)\n\tLD_FLAGS = fmt.Sprintf(\"-X \\\"main.buildTime=%s\\\" -X main.gitCommit=%s\", BUILD_TIME, GIT_COMMIT)\n\tGO_FLAGS = fmt.Sprintf(\"-ldflags=%s\", LD_FLAGS)\n)\n\nfunc main() {\n\tflag.Parse()\n\tfor _, cmd := range flag.Args() {\n\t\tswitch cmd {\n\t\tcase \"ci\":\n\t\t\ttest()\n\t\t\tvet()\n\t\t\twebDeps()\n\t\t\twebTest()\n\t\t\twebBuild()\n\t\t\tbuild()\n\t\tcase \"ci-quick\":\n\t\t\twebDeps()\n\t\t\twebBuild()\n\t\t\tbuild()\n\t\tcase \"web-deps\":\n\t\t\twebDeps()\n\t\tcase \"web-test\":\n\t\t\twebDeps()\n\t\t\twebTest()\n\t\tcase \"web-build\":\n\t\t\twebDeps()\n\t\t\twebBuild()\n\t\tcase \"web\":\n\t\t\twebDeps()\n\t\t\twebTest()\n\t\t\twebBuild()\n\t\tcase \"clean\":\n\t\t\tclean()\n\t\tcase \"generate\":\n\t\t\tgenerate()\n\t\tcase \"vet\":\n\t\t\tvet()\n\t\tcase \"test\":\n\t\t\tgenerate()\n\t\t\ttest()\n\t\tcase \"build\":\n\t\t\tbuild()\n\t\tcase \"run-dev\":\n\t\t\trunDev()\n\t\tcase \"go-install\":\n\t\t\tgoInstall()\n\t\tcase \"serve\":\n\t\t\tserve()\n\t\tcase \"install-test-plugin\":\n\t\t\tinstallTestPlugin()\n\t\tcase \"version\":\n\t\t\tversion()\n\t\tcase \"release\":\n\t\t\trelease()\n\t\tcase \"docker\":\n\t\t\tdocker()\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unknown command %q\", cmd)\n\t\t}\n\t}\n}\n\nfunc runCmd(command string, env map[string]string, args ...string) {\n\tcmd := newCmd(command, env, args...)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tlog.Printf(\"Running: %s\\n\", cmd.String())\n\tif err := cmd.Run(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc newCmd(command string, env map[string]string, args ...string) *exec.Cmd {\n\trealCommand, err := exec.LookPath(command)\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to find command '%s'\", command)\n\t}\n\n\tcmd := exec.Command(realCommand, args...)\n\tcmd.Env = os.Environ()\n\t\/\/ Setting Stdout here complains about it already being set?\n\t\/\/ cmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tfor k, v := range env {\n\t\tcmd.Env = append(cmd.Env, fmt.Sprintf(\"%s=%s\", k, v))\n\t}\n\treturn cmd\n}\n\nfunc goInstall() {\n\tpkgs := []string{\n\t\t\"github.com\/GeertJohan\/go.rice\",\n\t\t\"github.com\/GeertJohan\/go.rice\/rice\",\n\t\t\"github.com\/golang\/mock\/gomock\",\n\t\t\"github.com\/golang\/mock\/mockgen\",\n\t\t\"github.com\/golang\/protobuf\/protoc-gen-go\",\n\t}\n\tfor _, pkg := range pkgs {\n\t\trunCmd(\"go\", map[string]string{\"GO11MODULE\": \"on\"}, \"install\", pkg)\n\t}\n}\n\nfunc clean() {\n\tremoveFakes()\n\tif err := os.Remove(\"pkg\/icon\/rice-box.go\"); err != nil {\n\t\tlog.Fatalf(\"clean: %s\", err)\n\t}\n}\n\nfunc generate() {\n\tremoveFakes()\n\trunCmd(\"go\", nil, \"generate\", \"-v\", \".\/pkg\/...\", \".\/internal\/...\")\n}\n\nfunc build() {\n\tnewpath := filepath.Join(\".\", \"build\")\n\tos.MkdirAll(newpath, 0755)\n\trunCmd(\"go\", nil, \"build\", \"-o\", \"build\/octant\", GO_FLAGS, \"-v\", \".\/cmd\/octant\")\n}\n\nfunc runDev() {\n\trunCmd(\"build\/octant\", nil, \"--enable-feature-applications=true\")\n}\n\nfunc test() {\n\trunCmd(\"go\", nil, \"test\", \"-v\", \".\/internal\/...\", \".\/pkg\/...\")\n}\n\nfunc vet() {\n\trunCmd(\"go\", nil, \"vet\", \".\/internal\/...\", \".\/pkg\/...\")\n}\n\nfunc webDeps() {\n\tcmd := newCmd(\"npm\", nil, \"ci\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Dir = \".\/web\"\n\tif err := cmd.Run(); err != nil {\n\t\tlog.Fatalf(\"web-deps: %s\", err)\n\t}\n}\n\nfunc webTest() {\n\tcmd := newCmd(\"npm\", nil, \"run\", \"test:headless\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Dir = \".\/web\"\n\tif err := cmd.Run(); err != nil {\n\t\tlog.Fatalf(\"web-test: %s\", err)\n\t}\n}\n\nfunc webBuild() {\n\tcmd := newCmd(\"npm\", nil, \"run\", \"build\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Dir = \".\/web\"\n\tif err := cmd.Run(); err != nil {\n\t\tlog.Fatalf(\"web-build: %s\", err)\n\t}\n\trunCmd(\"go\", nil, \"generate\", \".\/web\")\n}\n\nfunc serve() {\n\tvar wg sync.WaitGroup\n\n\tuiVars := map[string]string{\"API_BASE\": \"http:\/\/localhost:7777\"}\n\tuiCmd := newCmd(\"npm\", uiVars, \"run\", \"start\")\n\tuiCmd.Stdout = os.Stdout\n\tuiCmd.Dir = \".\/web\"\n\tif err := uiCmd.Start(); err != nil {\n\t\tlog.Fatalf(\"uiCmd: start: %s\", err)\n\t}\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tif err := uiCmd.Wait(); err != nil {\n\t\t\tlog.Fatalf(\"serve: npm run: %s\", err)\n\t\t}\n\t}()\n\n\tserverVars := map[string]string{\n\t\t\"OCTANT_DISABLE_OPEN_BROWSER\": \"true\",\n\t\t\"OCTANT_LISTENER_ADDR\": \"localhost:7777\",\n\t\t\"OCTANT_PROXY_FRONTEND\": \"http:\/\/localhost:4200\",\n\t}\n\tserverCmd := newCmd(\"go\", serverVars, \"run\", \".\/cmd\/octant\/main.go\")\n\tserverCmd.Stdout = os.Stdout\n\tif err := serverCmd.Start(); err != nil {\n\t\tlog.Fatalf(\"serveCmd: start: %s\", err)\n\t}\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tif err := serverCmd.Wait(); err != nil {\n\t\t\tlog.Fatalf(\"serve: go run: %s\", err)\n\t\t}\n\t}()\n\n\twg.Wait()\n}\n\nfunc installTestPlugin() {\n\tdir := pluginDir()\n\tlog.Printf(\"Plugin path: %s\", dir)\n\tos.MkdirAll(dir, 0755)\n\tpluginFile := fmt.Sprintf(\"%s\/octant-sample-plugin\", dir)\n\trunCmd(\"go\", nil, \"build\", \"-o\", pluginFile, \"github.com\/vmware-tanzu\/octant\/cmd\/octant-sample-plugin\")\n}\n\nfunc version() {\n\tfmt.Println(VERSION)\n}\n\nfunc release() {\n\trunCmd(\"git\", nil, \"tag\", \"-a\", VERSION, \"-m\", fmt.Sprintf(\"\\\"Release %s\\\"\", VERSION))\n\trunCmd(\"git\", nil, \"push\", \"--follow-tags\")\n}\n\nfunc docker() {\n\tdockerVars := map[string]string{\n\t\t\"CGO_ENABLED\": \"0\",\n\t\t\"GOOS\": \"linux\",\n\t\t\"GOARCH\": \"amd64\",\n\t}\n\trunCmd(\"go\", dockerVars, \"build\", \"-o\", \"octant\", GO_FLAGS, \"-v\", \".\/cmd\/octant\/main.go\")\n}\n\nfunc removeFakes() {\n\tcheckDirs := []string{\"pkg\", \"internal\"}\n\tfakePaths := []string{}\n\n\tfor _, dir := range checkDirs {\n\t\terr := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {\n\t\t\tif !info.IsDir() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif info.Name() == \"fake\" {\n\t\t\t\tfakePaths = append(fakePaths, filepath.Join(path, info.Name()))\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"generate (%s): %s\", dir, err)\n\t\t}\n\t}\n\n\tlog.Print(\"Removing fakes from pkg\/ and internal\/\")\n\tfor _, p := range fakePaths {\n\t\tos.RemoveAll(p)\n\t}\n}\n\nfunc gitCommit() string {\n\tcmd := newCmd(\"git\", nil, \"rev-parse\", \"--short\", \"HEAD\")\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\tlog.Printf(\"gitCommit: %s\", err)\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%s\", out)\n}\n\nfunc pluginDir() string {\n\txdgConfigHome := os.Getenv(\"XDG_CONFIG_HOME\")\n\tif xdgConfigHome != \"\" {\n\t\treturn filepath.Join(xdgConfigHome, \"octant\", \"plugins\")\n\t} else if runtime.GOOS == \"windows\" {\n\t\treturn filepath.Join(os.Getenv(\"LOCALAPPDATA\"), \"octant\", \"plugins\")\n\t} else {\n\t\treturn filepath.Join(os.Getenv(\"HOME\"), \".config\", \"octant\", \"plugins\")\n\t}\n}\n<commit_msg>Do not remove fakes when running \"build.go clean\"<commit_after>\/*\nCopyright (c) 2019 the Octant contributors. All Rights Reserved.\nSPDX-License-Identifier: Apache-2.0\n*\/\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\tVERSION = \"v0.9.1\"\n\tGOPATH = os.Getenv(\"GOPATH\")\n\tGIT_COMMIT = gitCommit()\n\tBUILD_TIME = time.Now().UTC().Format(time.RFC3339)\n\tLD_FLAGS = fmt.Sprintf(\"-X \\\"main.buildTime=%s\\\" -X main.gitCommit=%s\", BUILD_TIME, GIT_COMMIT)\n\tGO_FLAGS = fmt.Sprintf(\"-ldflags=%s\", LD_FLAGS)\n)\n\nfunc main() {\n\tflag.Parse()\n\tfor _, cmd := range flag.Args() {\n\t\tswitch cmd {\n\t\tcase \"ci\":\n\t\t\ttest()\n\t\t\tvet()\n\t\t\twebDeps()\n\t\t\twebTest()\n\t\t\twebBuild()\n\t\t\tbuild()\n\t\tcase \"ci-quick\":\n\t\t\twebDeps()\n\t\t\twebBuild()\n\t\t\tbuild()\n\t\tcase \"web-deps\":\n\t\t\twebDeps()\n\t\tcase \"web-test\":\n\t\t\twebDeps()\n\t\t\twebTest()\n\t\tcase \"web-build\":\n\t\t\twebDeps()\n\t\t\twebBuild()\n\t\tcase \"web\":\n\t\t\twebDeps()\n\t\t\twebTest()\n\t\t\twebBuild()\n\t\tcase \"clean\":\n\t\t\tclean()\n\t\tcase \"generate\":\n\t\t\tgenerate()\n\t\tcase \"vet\":\n\t\t\tvet()\n\t\tcase \"test\":\n\t\t\tgenerate()\n\t\t\ttest()\n\t\tcase \"build\":\n\t\t\tbuild()\n\t\tcase \"run-dev\":\n\t\t\trunDev()\n\t\tcase \"go-install\":\n\t\t\tgoInstall()\n\t\tcase \"serve\":\n\t\t\tserve()\n\t\tcase \"install-test-plugin\":\n\t\t\tinstallTestPlugin()\n\t\tcase \"version\":\n\t\t\tversion()\n\t\tcase \"release\":\n\t\t\trelease()\n\t\tcase \"docker\":\n\t\t\tdocker()\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unknown command %q\", cmd)\n\t\t}\n\t}\n}\n\nfunc runCmd(command string, env map[string]string, args ...string) {\n\tcmd := newCmd(command, env, args...)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tlog.Printf(\"Running: %s\\n\", cmd.String())\n\tif err := cmd.Run(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc newCmd(command string, env map[string]string, args ...string) *exec.Cmd {\n\trealCommand, err := exec.LookPath(command)\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to find command '%s'\", command)\n\t}\n\n\tcmd := exec.Command(realCommand, args...)\n\tcmd.Env = os.Environ()\n\t\/\/ Setting Stdout here complains about it already being set?\n\t\/\/ cmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tfor k, v := range env {\n\t\tcmd.Env = append(cmd.Env, fmt.Sprintf(\"%s=%s\", k, v))\n\t}\n\treturn cmd\n}\n\nfunc goInstall() {\n\tpkgs := []string{\n\t\t\"github.com\/GeertJohan\/go.rice\",\n\t\t\"github.com\/GeertJohan\/go.rice\/rice\",\n\t\t\"github.com\/golang\/mock\/gomock\",\n\t\t\"github.com\/golang\/mock\/mockgen\",\n\t\t\"github.com\/golang\/protobuf\/protoc-gen-go\",\n\t}\n\tfor _, pkg := range pkgs {\n\t\trunCmd(\"go\", map[string]string{\"GO11MODULE\": \"on\"}, \"install\", pkg)\n\t}\n}\n\nfunc clean() {\n\tif err := os.Remove(\"pkg\/icon\/rice-box.go\"); err != nil {\n\t\tlog.Fatalf(\"clean: %s\", err)\n\t}\n}\n\nfunc generate() {\n\tremoveFakes()\n\trunCmd(\"go\", nil, \"generate\", \"-v\", \".\/pkg\/...\", \".\/internal\/...\")\n}\n\nfunc build() {\n\tnewpath := filepath.Join(\".\", \"build\")\n\tos.MkdirAll(newpath, 0755)\n\trunCmd(\"go\", nil, \"build\", \"-o\", \"build\/octant\", GO_FLAGS, \"-v\", \".\/cmd\/octant\")\n}\n\nfunc runDev() {\n\trunCmd(\"build\/octant\", nil, \"--enable-feature-applications=true\")\n}\n\nfunc test() {\n\trunCmd(\"go\", nil, \"test\", \"-v\", \".\/internal\/...\", \".\/pkg\/...\")\n}\n\nfunc vet() {\n\trunCmd(\"go\", nil, \"vet\", \".\/internal\/...\", \".\/pkg\/...\")\n}\n\nfunc webDeps() {\n\tcmd := newCmd(\"npm\", nil, \"ci\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Dir = \".\/web\"\n\tif err := cmd.Run(); err != nil {\n\t\tlog.Fatalf(\"web-deps: %s\", err)\n\t}\n}\n\nfunc webTest() {\n\tcmd := newCmd(\"npm\", nil, \"run\", \"test:headless\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Dir = \".\/web\"\n\tif err := cmd.Run(); err != nil {\n\t\tlog.Fatalf(\"web-test: %s\", err)\n\t}\n}\n\nfunc webBuild() {\n\tcmd := newCmd(\"npm\", nil, \"run\", \"build\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Dir = \".\/web\"\n\tif err := cmd.Run(); err != nil {\n\t\tlog.Fatalf(\"web-build: %s\", err)\n\t}\n\trunCmd(\"go\", nil, \"generate\", \".\/web\")\n}\n\nfunc serve() {\n\tvar wg sync.WaitGroup\n\n\tuiVars := map[string]string{\"API_BASE\": \"http:\/\/localhost:7777\"}\n\tuiCmd := newCmd(\"npm\", uiVars, \"run\", \"start\")\n\tuiCmd.Stdout = os.Stdout\n\tuiCmd.Dir = \".\/web\"\n\tif err := uiCmd.Start(); err != nil {\n\t\tlog.Fatalf(\"uiCmd: start: %s\", err)\n\t}\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tif err := uiCmd.Wait(); err != nil {\n\t\t\tlog.Fatalf(\"serve: npm run: %s\", err)\n\t\t}\n\t}()\n\n\tserverVars := map[string]string{\n\t\t\"OCTANT_DISABLE_OPEN_BROWSER\": \"true\",\n\t\t\"OCTANT_LISTENER_ADDR\": \"localhost:7777\",\n\t\t\"OCTANT_PROXY_FRONTEND\": \"http:\/\/localhost:4200\",\n\t}\n\tserverCmd := newCmd(\"go\", serverVars, \"run\", \".\/cmd\/octant\/main.go\")\n\tserverCmd.Stdout = os.Stdout\n\tif err := serverCmd.Start(); err != nil {\n\t\tlog.Fatalf(\"serveCmd: start: %s\", err)\n\t}\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tif err := serverCmd.Wait(); err != nil {\n\t\t\tlog.Fatalf(\"serve: go run: %s\", err)\n\t\t}\n\t}()\n\n\twg.Wait()\n}\n\nfunc installTestPlugin() {\n\tdir := pluginDir()\n\tlog.Printf(\"Plugin path: %s\", dir)\n\tos.MkdirAll(dir, 0755)\n\tpluginFile := fmt.Sprintf(\"%s\/octant-sample-plugin\", dir)\n\trunCmd(\"go\", nil, \"build\", \"-o\", pluginFile, \"github.com\/vmware-tanzu\/octant\/cmd\/octant-sample-plugin\")\n}\n\nfunc version() {\n\tfmt.Println(VERSION)\n}\n\nfunc release() {\n\trunCmd(\"git\", nil, \"tag\", \"-a\", VERSION, \"-m\", fmt.Sprintf(\"\\\"Release %s\\\"\", VERSION))\n\trunCmd(\"git\", nil, \"push\", \"--follow-tags\")\n}\n\nfunc docker() {\n\tdockerVars := map[string]string{\n\t\t\"CGO_ENABLED\": \"0\",\n\t\t\"GOOS\": \"linux\",\n\t\t\"GOARCH\": \"amd64\",\n\t}\n\trunCmd(\"go\", dockerVars, \"build\", \"-o\", \"octant\", GO_FLAGS, \"-v\", \".\/cmd\/octant\/main.go\")\n}\n\nfunc removeFakes() {\n\tcheckDirs := []string{\"pkg\", \"internal\"}\n\tfakePaths := []string{}\n\n\tfor _, dir := range checkDirs {\n\t\terr := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {\n\t\t\tif !info.IsDir() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif info.Name() == \"fake\" {\n\t\t\t\tfakePaths = append(fakePaths, filepath.Join(path, info.Name()))\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"generate (%s): %s\", dir, err)\n\t\t}\n\t}\n\n\tlog.Print(\"Removing fakes from pkg\/ and internal\/\")\n\tfor _, p := range fakePaths {\n\t\tos.RemoveAll(p)\n\t}\n}\n\nfunc gitCommit() string {\n\tcmd := newCmd(\"git\", nil, \"rev-parse\", \"--short\", \"HEAD\")\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\tlog.Printf(\"gitCommit: %s\", err)\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%s\", out)\n}\n\nfunc pluginDir() string {\n\txdgConfigHome := os.Getenv(\"XDG_CONFIG_HOME\")\n\tif xdgConfigHome != \"\" {\n\t\treturn filepath.Join(xdgConfigHome, \"octant\", \"plugins\")\n\t} else if runtime.GOOS == \"windows\" {\n\t\treturn filepath.Join(os.Getenv(\"LOCALAPPDATA\"), \"octant\", \"plugins\")\n\t} else {\n\t\treturn filepath.Join(os.Getenv(\"HOME\"), \".config\", \"octant\", \"plugins\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build ignore\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n)\n\nvar (\n\tverbose bool\n\tkeepGopath bool\n\trunTests bool\n\tenableCGO bool\n)\n\nvar config = struct {\n\tName string\n\tNamespace string\n\tMain string\n\tTests []string\n}{\n\tName: \"restic\", \/\/ name of the program executable and directory\n\tNamespace: \"\", \/\/ subdir of GOPATH, e.g. \"github.com\/foo\/bar\"\n\tMain: \"cmds\/restic\", \/\/ package name for the main package\n\tTests: []string{\"restic\/...\", \"cmds\/...\"}, \/\/ tests to run\n}\n\n\/\/ specialDir returns true if the file begins with a special character ('.' or '_').\nfunc specialDir(name string) bool {\n\tif name == \".\" {\n\t\treturn false\n\t}\n\n\tbase := filepath.Base(name)\n\tif base == \"vendor\" || base[0] == '_' || base[0] == '.' {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/ excludePath returns true if the file should not be copied to the new GOPATH.\nfunc excludePath(name string) bool {\n\text := path.Ext(name)\n\tif ext == \".go\" || ext == \".s\" || ext == \".h\" {\n\t\treturn false\n\t}\n\n\tparentDir := filepath.Base(filepath.Dir(name))\n\tif parentDir == \"testdata\" {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n\/\/ updateGopath builds a valid GOPATH at dst, with all Go files in src\/ copied\n\/\/ to dst\/prefix\/, so calling\n\/\/\n\/\/ updateGopath(\"\/tmp\/gopath\", \"\/home\/u\/restic\", \"github.com\/restic\/restic\")\n\/\/\n\/\/ with \"\/home\/u\/restic\" containing the file \"foo.go\" yields the following tree\n\/\/ at \"\/tmp\/gopath\":\n\/\/\n\/\/ \/tmp\/gopath\n\/\/ └── src\n\/\/ └── github.com\n\/\/ └── restic\n\/\/ └── restic\n\/\/ └── foo.go\nfunc updateGopath(dst, src, prefix string) error {\n\treturn filepath.Walk(src, func(name string, fi os.FileInfo, err error) error {\n\t\tif specialDir(name) {\n\t\t\tif fi.IsDir() {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\n\t\tif fi.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\tif excludePath(name) {\n\t\t\treturn nil\n\t\t}\n\n\t\tintermediatePath, err := filepath.Rel(src, name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfileSrc := filepath.Join(src, intermediatePath)\n\t\tfileDst := filepath.Join(dst, \"src\", prefix, intermediatePath)\n\n\t\treturn copyFile(fileDst, fileSrc)\n\t})\n}\n\nfunc directoryExists(dirname string) bool {\n\tstat, err := os.Stat(dirname)\n\tif err != nil && os.IsNotExist(err) {\n\t\treturn false\n\t}\n\n\treturn stat.IsDir()\n}\n\n\/\/ copyFile creates dst from src, preserving file attributes and timestamps.\nfunc copyFile(dst, src string) error {\n\tfi, err := os.Stat(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfsrc, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer fsrc.Close()\n\n\tif err = os.MkdirAll(filepath.Dir(dst), 0755); err != nil {\n\t\tfmt.Printf(\"MkdirAll(%v)\\n\", filepath.Dir(dst))\n\t\treturn err\n\t}\n\n\tfdst, err := os.Create(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer fdst.Close()\n\n\t_, err = io.Copy(fdst, fsrc)\n\tif err == nil {\n\t\terr = os.Chmod(dst, fi.Mode())\n\t}\n\tif err == nil {\n\t\terr = os.Chtimes(dst, fi.ModTime(), fi.ModTime())\n\t}\n\n\treturn err\n}\n\n\/\/ die prints the message with fmt.Fprintf() to stderr and exits with an error\n\/\/ code.\nfunc die(message string, args ...interface{}) {\n\tfmt.Fprintf(os.Stderr, message, args...)\n\tos.Exit(1)\n}\n\nfunc showUsage(output io.Writer) {\n\tfmt.Fprintf(output, \"USAGE: go run build.go OPTIONS\\n\")\n\tfmt.Fprintf(output, \"\\n\")\n\tfmt.Fprintf(output, \"OPTIONS:\\n\")\n\tfmt.Fprintf(output, \" -v --verbose output more messages\\n\")\n\tfmt.Fprintf(output, \" -t --tags specify additional build tags\\n\")\n\tfmt.Fprintf(output, \" -k --keep-gopath do not remove the GOPATH after build\\n\")\n\tfmt.Fprintf(output, \" -T --test run tests\\n\")\n\tfmt.Fprintf(output, \" -o --output set output file name\\n\")\n\tfmt.Fprintf(output, \" --enable-cgo use CGO to link against libc\\n\")\n\tfmt.Fprintf(output, \" --goos value set GOOS for cross-compilation\\n\")\n\tfmt.Fprintf(output, \" --goarch value set GOARCH for cross-compilation\\n\")\n}\n\nfunc verbosePrintf(message string, args ...interface{}) {\n\tif !verbose {\n\t\treturn\n\t}\n\n\tfmt.Printf(\"build: \"+message, args...)\n}\n\n\/\/ cleanEnv returns a clean environment with GOPATH and GOBIN removed (if\n\/\/ present).\nfunc cleanEnv() (env []string) {\n\tfor _, v := range os.Environ() {\n\t\tif strings.HasPrefix(v, \"GOPATH=\") || strings.HasPrefix(v, \"GOBIN=\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tenv = append(env, v)\n\t}\n\n\treturn env\n}\n\n\/\/ build runs \"go build args...\" with GOPATH set to gopath.\nfunc build(cwd, goos, goarch, gopath string, args ...string) error {\n\ta := []string{\"build\"}\n\ta = append(a, \"-asmflags\", fmt.Sprintf(\"-trimpath=%s\", gopath))\n\ta = append(a, \"-gcflags\", fmt.Sprintf(\"-trimpath=%s\", gopath))\n\ta = append(a, args...)\n\tcmd := exec.Command(\"go\", a...)\n\tcmd.Env = append(cleanEnv(), \"GOPATH=\"+gopath, \"GOARCH=\"+goarch, \"GOOS=\"+goos)\n\tif !enableCGO {\n\t\tcmd.Env = append(cmd.Env, \"CGO_ENABLED=0\")\n\t}\n\n\tcmd.Dir = cwd\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tverbosePrintf(\"go %s\\n\", args)\n\n\treturn cmd.Run()\n}\n\n\/\/ test runs \"go test args...\" with GOPATH set to gopath.\nfunc test(cwd, gopath string, args ...string) error {\n\targs = append([]string{\"test\"}, args...)\n\tcmd := exec.Command(\"go\", args...)\n\tcmd.Env = append(cleanEnv(), \"GOPATH=\"+gopath)\n\tcmd.Dir = cwd\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tverbosePrintf(\"go %s\\n\", args)\n\n\treturn cmd.Run()\n}\n\n\/\/ getVersion returns the version string from the file VERSION in the current\n\/\/ directory.\nfunc getVersionFromFile() string {\n\tbuf, err := ioutil.ReadFile(\"VERSION\")\n\tif err != nil {\n\t\tverbosePrintf(\"error reading file VERSION: %v\\n\", err)\n\t\treturn \"\"\n\t}\n\n\treturn strings.TrimSpace(string(buf))\n}\n\n\/\/ getVersion returns a version string which is a combination of the contents\n\/\/ of the file VERSION in the current directory and the version from git (if\n\/\/ available).\nfunc getVersion() string {\n\tversionFile := getVersionFromFile()\n\tversionGit := getVersionFromGit()\n\n\tverbosePrintf(\"version from file 'VERSION' is %q, version from git %q\\n\",\n\t\tversionFile, versionGit)\n\n\tswitch {\n\tcase versionFile == \"\":\n\t\treturn versionGit\n\tcase versionGit == \"\":\n\t\treturn versionFile\n\t}\n\n\treturn fmt.Sprintf(\"%s (%s)\", versionFile, versionGit)\n}\n\n\/\/ getVersionFromGit returns a version string that identifies the currently\n\/\/ checked out git commit.\nfunc getVersionFromGit() string {\n\tcmd := exec.Command(\"git\", \"describe\",\n\t\t\"--long\", \"--tags\", \"--dirty\", \"--always\")\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\tverbosePrintf(\"git describe returned error: %v\\n\", err)\n\t\treturn \"\"\n\t}\n\n\tversion := strings.TrimSpace(string(out))\n\tverbosePrintf(\"git version is %s\\n\", version)\n\treturn version\n}\n\n\/\/ Constants represents a set of constants that are set in the final binary to\n\/\/ the given value via compiler flags.\ntype Constants map[string]string\n\n\/\/ LDFlags returns the string that can be passed to go build's `-ldflags`.\nfunc (cs Constants) LDFlags() string {\n\tl := make([]string, 0, len(cs))\n\n\tfor k, v := range cs {\n\t\tl = append(l, fmt.Sprintf(`-X \"%s=%s\"`, k, v))\n\t}\n\n\treturn strings.Join(l, \" \")\n}\n\nfunc main() {\n\tver := runtime.Version()\n\tif strings.HasPrefix(ver, \"go1\") && ver < \"go1.7\" {\n\t\tfmt.Fprintf(os.Stderr, \"Go version %s detected, restic requires at least Go 1.7\\n\", ver)\n\t\tos.Exit(1)\n\t}\n\n\tbuildTags := []string{}\n\n\tskipNext := false\n\tparams := os.Args[1:]\n\n\ttargetGOOS := runtime.GOOS\n\ttargetGOARCH := runtime.GOARCH\n\n\tvar outputFilename string\n\n\tfor i, arg := range params {\n\t\tif skipNext {\n\t\t\tskipNext = false\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch arg {\n\t\tcase \"-v\", \"--verbose\":\n\t\t\tverbose = true\n\t\tcase \"-k\", \"--keep-gopath\":\n\t\t\tkeepGopath = true\n\t\tcase \"-t\", \"-tags\", \"--tags\":\n\t\t\tif i+1 >= len(params) {\n\t\t\t\tdie(\"-t given but no tag specified\")\n\t\t\t}\n\t\t\tskipNext = true\n\t\t\tbuildTags = strings.Split(params[i+1], \" \")\n\t\tcase \"-o\", \"--output\":\n\t\t\tskipNext = true\n\t\t\toutputFilename = params[i+1]\n\t\tcase \"-T\", \"--test\":\n\t\t\trunTests = true\n\t\tcase \"--enable-cgo\":\n\t\t\tenableCGO = true\n\t\tcase \"--goos\":\n\t\t\tskipNext = true\n\t\t\ttargetGOOS = params[i+1]\n\t\tcase \"--goarch\":\n\t\t\tskipNext = true\n\t\t\ttargetGOARCH = params[i+1]\n\t\tcase \"-h\":\n\t\t\tshowUsage(os.Stdout)\n\t\t\treturn\n\t\tdefault:\n\t\t\tfmt.Fprintf(os.Stderr, \"Error: unknown option %q\\n\\n\", arg)\n\t\t\tshowUsage(os.Stderr)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\tif len(buildTags) == 0 {\n\t\tverbosePrintf(\"adding build-tag release\\n\")\n\t\tbuildTags = []string{\"release\"}\n\t}\n\n\tfor i := range buildTags {\n\t\tbuildTags[i] = strings.TrimSpace(buildTags[i])\n\t}\n\n\tverbosePrintf(\"build tags: %s\\n\", buildTags)\n\n\troot, err := os.Getwd()\n\tif err != nil {\n\t\tdie(\"Getwd(): %v\\n\", err)\n\t}\n\n\tgopath, err := ioutil.TempDir(\"\", fmt.Sprintf(\"%v-build-\", config.Name))\n\tif err != nil {\n\t\tdie(\"TempDir(): %v\\n\", err)\n\t}\n\n\tverbosePrintf(\"create GOPATH at %v\\n\", gopath)\n\tif err = updateGopath(gopath, filepath.Join(root, \"src\"), config.Namespace); err != nil {\n\t\tdie(\"copying files from %v\/src to %v\/src failed: %v\\n\", root, gopath, err)\n\t}\n\n\tvendor := filepath.Join(root, \"vendor\", \"src\")\n\tif directoryExists(vendor) {\n\t\tif err = updateGopath(gopath, vendor, \"\"); err != nil {\n\t\t\tdie(\"copying files from %v to %v failed: %v\\n\", root, gopath, err)\n\t\t}\n\t}\n\n\tdefer func() {\n\t\tif !keepGopath {\n\t\t\tverbosePrintf(\"remove %v\\n\", gopath)\n\t\t\tif err = os.RemoveAll(gopath); err != nil {\n\t\t\t\tdie(\"remove GOPATH at %s failed: %v\\n\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tverbosePrintf(\"leaving temporary GOPATH at %v\\n\", gopath)\n\t\t}\n\t}()\n\n\tif outputFilename == \"\" {\n\t\toutputFilename = config.Name\n\t\tif targetGOOS == \"windows\" {\n\t\t\toutputFilename += \".exe\"\n\t\t}\n\t}\n\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tdie(\"Getwd() returned %v\\n\", err)\n\t}\n\toutput := outputFilename\n\tif !filepath.IsAbs(output) {\n\t\toutput = filepath.Join(cwd, output)\n\t}\n\n\tversion := getVersion()\n\tconstants := Constants{}\n\tif version != \"\" {\n\t\tconstants[\"main.version\"] = version\n\t}\n\tldflags := \"-s -w \" + constants.LDFlags()\n\tverbosePrintf(\"ldflags: %s\\n\", ldflags)\n\n\targs := []string{\n\t\t\"-tags\", strings.Join(buildTags, \" \"),\n\t\t\"-ldflags\", ldflags,\n\t\t\"-o\", output, config.Main,\n\t}\n\n\terr = build(filepath.Join(gopath, \"src\"), targetGOOS, targetGOARCH, gopath, args...)\n\tif err != nil {\n\t\tdie(\"build failed: %v\\n\", err)\n\t}\n\n\tif runTests {\n\t\tverbosePrintf(\"running tests\\n\")\n\n\t\terr = test(cwd, gopath, config.Tests...)\n\t\tif err != nil {\n\t\t\tdie(\"running tests failed: %v\\n\", err)\n\t\t}\n\t}\n}\n<commit_msg>Update build.go<commit_after>\/\/ +build ignore\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n)\n\nvar (\n\tverbose bool\n\tkeepGopath bool\n\trunTests bool\n\tenableCGO bool\n)\n\nvar config = struct {\n\tName string\n\tNamespace string\n\tMain string\n\tTests []string\n}{\n\tName: \"restic\", \/\/ name of the program executable and directory\n\tNamespace: \"github.com\/restic\/restic\", \/\/ subdir of GOPATH, e.g. \"github.com\/foo\/bar\"\n\tMain: \"github.com\/restic\/restic\/cmd\/restic\", \/\/ package name for the main package\n\tTests: []string{\"internal\/...\", \"cmd\/...\"}, \/\/ tests to run\n}\n\n\/\/ specialDir returns true if the file begins with a special character ('.' or '_').\nfunc specialDir(name string) bool {\n\tif name == \".\" {\n\t\treturn false\n\t}\n\n\tbase := filepath.Base(name)\n\tif base == \"vendor\" || base[0] == '_' || base[0] == '.' {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/ excludePath returns true if the file should not be copied to the new GOPATH.\nfunc excludePath(name string) bool {\n\text := path.Ext(name)\n\tif ext == \".go\" || ext == \".s\" || ext == \".h\" {\n\t\treturn false\n\t}\n\n\tparentDir := filepath.Base(filepath.Dir(name))\n\tif parentDir == \"testdata\" {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n\/\/ updateGopath builds a valid GOPATH at dst, with all Go files in src\/ copied\n\/\/ to dst\/prefix\/, so calling\n\/\/\n\/\/ updateGopath(\"\/tmp\/gopath\", \"\/home\/u\/restic\", \"github.com\/restic\/restic\")\n\/\/\n\/\/ with \"\/home\/u\/restic\" containing the file \"foo.go\" yields the following tree\n\/\/ at \"\/tmp\/gopath\":\n\/\/\n\/\/ \/tmp\/gopath\n\/\/ └── src\n\/\/ └── github.com\n\/\/ └── restic\n\/\/ └── restic\n\/\/ └── foo.go\nfunc updateGopath(dst, src, prefix string) error {\n\tverbosePrintf(\"copy contents of %v to %v\\n\", src, filepath.Join(dst, prefix))\n\treturn filepath.Walk(src, func(name string, fi os.FileInfo, err error) error {\n\t\tif name == src {\n\t\t\treturn err\n\t\t}\n\n\t\tif specialDir(name) {\n\t\t\tif fi.IsDir() {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif fi.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\tif excludePath(name) {\n\t\t\treturn nil\n\t\t}\n\n\t\tintermediatePath, err := filepath.Rel(src, name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfileSrc := filepath.Join(src, intermediatePath)\n\t\tfileDst := filepath.Join(dst, \"src\", prefix, intermediatePath)\n\n\t\treturn copyFile(fileDst, fileSrc)\n\t})\n}\n\nfunc directoryExists(dirname string) bool {\n\tstat, err := os.Stat(dirname)\n\tif err != nil && os.IsNotExist(err) {\n\t\treturn false\n\t}\n\n\treturn stat.IsDir()\n}\n\n\/\/ copyFile creates dst from src, preserving file attributes and timestamps.\nfunc copyFile(dst, src string) error {\n\tfi, err := os.Stat(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfsrc, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer fsrc.Close()\n\n\tif err = os.MkdirAll(filepath.Dir(dst), 0755); err != nil {\n\t\tfmt.Printf(\"MkdirAll(%v)\\n\", filepath.Dir(dst))\n\t\treturn err\n\t}\n\n\tfdst, err := os.Create(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer fdst.Close()\n\n\t_, err = io.Copy(fdst, fsrc)\n\tif err == nil {\n\t\terr = os.Chmod(dst, fi.Mode())\n\t}\n\tif err == nil {\n\t\terr = os.Chtimes(dst, fi.ModTime(), fi.ModTime())\n\t}\n\n\treturn err\n}\n\n\/\/ die prints the message with fmt.Fprintf() to stderr and exits with an error\n\/\/ code.\nfunc die(message string, args ...interface{}) {\n\tfmt.Fprintf(os.Stderr, message, args...)\n\tos.Exit(1)\n}\n\nfunc showUsage(output io.Writer) {\n\tfmt.Fprintf(output, \"USAGE: go run build.go OPTIONS\\n\")\n\tfmt.Fprintf(output, \"\\n\")\n\tfmt.Fprintf(output, \"OPTIONS:\\n\")\n\tfmt.Fprintf(output, \" -v --verbose output more messages\\n\")\n\tfmt.Fprintf(output, \" -t --tags specify additional build tags\\n\")\n\tfmt.Fprintf(output, \" -k --keep-gopath do not remove the GOPATH after build\\n\")\n\tfmt.Fprintf(output, \" -T --test run tests\\n\")\n\tfmt.Fprintf(output, \" -o --output set output file name\\n\")\n\tfmt.Fprintf(output, \" --enable-cgo use CGO to link against libc\\n\")\n\tfmt.Fprintf(output, \" --goos value set GOOS for cross-compilation\\n\")\n\tfmt.Fprintf(output, \" --goarch value set GOARCH for cross-compilation\\n\")\n}\n\nfunc verbosePrintf(message string, args ...interface{}) {\n\tif !verbose {\n\t\treturn\n\t}\n\n\tfmt.Printf(\"build: \"+message, args...)\n}\n\n\/\/ cleanEnv returns a clean environment with GOPATH and GOBIN removed (if\n\/\/ present).\nfunc cleanEnv() (env []string) {\n\tfor _, v := range os.Environ() {\n\t\tif strings.HasPrefix(v, \"GOPATH=\") || strings.HasPrefix(v, \"GOBIN=\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tenv = append(env, v)\n\t}\n\n\treturn env\n}\n\n\/\/ build runs \"go build args...\" with GOPATH set to gopath.\nfunc build(cwd, goos, goarch, gopath string, args ...string) error {\n\ta := []string{\"build\"}\n\ta = append(a, \"-asmflags\", fmt.Sprintf(\"-trimpath=%s\", gopath))\n\ta = append(a, \"-gcflags\", fmt.Sprintf(\"-trimpath=%s\", gopath))\n\ta = append(a, args...)\n\tcmd := exec.Command(\"go\", a...)\n\tcmd.Env = append(cleanEnv(), \"GOPATH=\"+gopath, \"GOARCH=\"+goarch, \"GOOS=\"+goos)\n\tif !enableCGO {\n\t\tcmd.Env = append(cmd.Env, \"CGO_ENABLED=0\")\n\t}\n\n\tcmd.Dir = cwd\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tverbosePrintf(\"go %s\\n\", args)\n\n\treturn cmd.Run()\n}\n\n\/\/ test runs \"go test args...\" with GOPATH set to gopath.\nfunc test(cwd, gopath string, args ...string) error {\n\targs = append([]string{\"test\"}, args...)\n\tcmd := exec.Command(\"go\", args...)\n\tcmd.Env = append(cleanEnv(), \"GOPATH=\"+gopath)\n\tcmd.Dir = cwd\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tverbosePrintf(\"go %s\\n\", args)\n\n\treturn cmd.Run()\n}\n\n\/\/ getVersion returns the version string from the file VERSION in the current\n\/\/ directory.\nfunc getVersionFromFile() string {\n\tbuf, err := ioutil.ReadFile(\"VERSION\")\n\tif err != nil {\n\t\tverbosePrintf(\"error reading file VERSION: %v\\n\", err)\n\t\treturn \"\"\n\t}\n\n\treturn strings.TrimSpace(string(buf))\n}\n\n\/\/ getVersion returns a version string which is a combination of the contents\n\/\/ of the file VERSION in the current directory and the version from git (if\n\/\/ available).\nfunc getVersion() string {\n\tversionFile := getVersionFromFile()\n\tversionGit := getVersionFromGit()\n\n\tverbosePrintf(\"version from file 'VERSION' is %q, version from git %q\\n\",\n\t\tversionFile, versionGit)\n\n\tswitch {\n\tcase versionFile == \"\":\n\t\treturn versionGit\n\tcase versionGit == \"\":\n\t\treturn versionFile\n\t}\n\n\treturn fmt.Sprintf(\"%s (%s)\", versionFile, versionGit)\n}\n\n\/\/ getVersionFromGit returns a version string that identifies the currently\n\/\/ checked out git commit.\nfunc getVersionFromGit() string {\n\tcmd := exec.Command(\"git\", \"describe\",\n\t\t\"--long\", \"--tags\", \"--dirty\", \"--always\")\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\tverbosePrintf(\"git describe returned error: %v\\n\", err)\n\t\treturn \"\"\n\t}\n\n\tversion := strings.TrimSpace(string(out))\n\tverbosePrintf(\"git version is %s\\n\", version)\n\treturn version\n}\n\n\/\/ Constants represents a set of constants that are set in the final binary to\n\/\/ the given value via compiler flags.\ntype Constants map[string]string\n\n\/\/ LDFlags returns the string that can be passed to go build's `-ldflags`.\nfunc (cs Constants) LDFlags() string {\n\tl := make([]string, 0, len(cs))\n\n\tfor k, v := range cs {\n\t\tl = append(l, fmt.Sprintf(`-X \"%s=%s\"`, k, v))\n\t}\n\n\treturn strings.Join(l, \" \")\n}\n\nfunc main() {\n\tver := runtime.Version()\n\tif strings.HasPrefix(ver, \"go1\") && ver < \"go1.7\" {\n\t\tfmt.Fprintf(os.Stderr, \"Go version %s detected, restic requires at least Go 1.7\\n\", ver)\n\t\tos.Exit(1)\n\t}\n\n\tbuildTags := []string{}\n\n\tskipNext := false\n\tparams := os.Args[1:]\n\n\ttargetGOOS := runtime.GOOS\n\ttargetGOARCH := runtime.GOARCH\n\n\tvar outputFilename string\n\n\tfor i, arg := range params {\n\t\tif skipNext {\n\t\t\tskipNext = false\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch arg {\n\t\tcase \"-v\", \"--verbose\":\n\t\t\tverbose = true\n\t\tcase \"-k\", \"--keep-gopath\":\n\t\t\tkeepGopath = true\n\t\tcase \"-t\", \"-tags\", \"--tags\":\n\t\t\tif i+1 >= len(params) {\n\t\t\t\tdie(\"-t given but no tag specified\")\n\t\t\t}\n\t\t\tskipNext = true\n\t\t\tbuildTags = strings.Split(params[i+1], \" \")\n\t\tcase \"-o\", \"--output\":\n\t\t\tskipNext = true\n\t\t\toutputFilename = params[i+1]\n\t\tcase \"-T\", \"--test\":\n\t\t\trunTests = true\n\t\tcase \"--enable-cgo\":\n\t\t\tenableCGO = true\n\t\tcase \"--goos\":\n\t\t\tskipNext = true\n\t\t\ttargetGOOS = params[i+1]\n\t\tcase \"--goarch\":\n\t\t\tskipNext = true\n\t\t\ttargetGOARCH = params[i+1]\n\t\tcase \"-h\":\n\t\t\tshowUsage(os.Stdout)\n\t\t\treturn\n\t\tdefault:\n\t\t\tfmt.Fprintf(os.Stderr, \"Error: unknown option %q\\n\\n\", arg)\n\t\t\tshowUsage(os.Stderr)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\tif len(buildTags) == 0 {\n\t\tverbosePrintf(\"adding build-tag release\\n\")\n\t\tbuildTags = []string{\"release\"}\n\t}\n\n\tfor i := range buildTags {\n\t\tbuildTags[i] = strings.TrimSpace(buildTags[i])\n\t}\n\n\tverbosePrintf(\"build tags: %s\\n\", buildTags)\n\n\troot, err := os.Getwd()\n\tif err != nil {\n\t\tdie(\"Getwd(): %v\\n\", err)\n\t}\n\n\tgopath, err := ioutil.TempDir(\"\", fmt.Sprintf(\"%v-build-\", config.Name))\n\tif err != nil {\n\t\tdie(\"TempDir(): %v\\n\", err)\n\t}\n\n\tverbosePrintf(\"create GOPATH at %v\\n\", gopath)\n\tif err = updateGopath(gopath, root, config.Namespace); err != nil {\n\t\tdie(\"copying files from %v\/src to %v\/src failed: %v\\n\", root, gopath, err)\n\t}\n\n\tvendor := filepath.Join(root, \"vendor\")\n\tif directoryExists(vendor) {\n\t\tif err = updateGopath(gopath, vendor, filepath.Join(config.Namespace, \"vendor\")); err != nil {\n\t\t\tdie(\"copying files from %v to %v failed: %v\\n\", root, gopath, err)\n\t\t}\n\t}\n\n\tdefer func() {\n\t\tif !keepGopath {\n\t\t\tverbosePrintf(\"remove %v\\n\", gopath)\n\t\t\tif err = os.RemoveAll(gopath); err != nil {\n\t\t\t\tdie(\"remove GOPATH at %s failed: %v\\n\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tverbosePrintf(\"leaving temporary GOPATH at %v\\n\", gopath)\n\t\t}\n\t}()\n\n\tif outputFilename == \"\" {\n\t\toutputFilename = config.Name\n\t\tif targetGOOS == \"windows\" {\n\t\t\toutputFilename += \".exe\"\n\t\t}\n\t}\n\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tdie(\"Getwd() returned %v\\n\", err)\n\t}\n\toutput := outputFilename\n\tif !filepath.IsAbs(output) {\n\t\toutput = filepath.Join(cwd, output)\n\t}\n\n\tversion := getVersion()\n\tconstants := Constants{}\n\tif version != \"\" {\n\t\tconstants[\"main.version\"] = version\n\t}\n\tldflags := \"-s -w \" + constants.LDFlags()\n\tverbosePrintf(\"ldflags: %s\\n\", ldflags)\n\n\targs := []string{\n\t\t\"-tags\", strings.Join(buildTags, \" \"),\n\t\t\"-ldflags\", ldflags,\n\t\t\"-o\", output, config.Main,\n\t}\n\n\terr = build(filepath.Join(gopath, \"src\"), targetGOOS, targetGOARCH, gopath, args...)\n\tif err != nil {\n\t\tdie(\"build failed: %v\\n\", err)\n\t}\n\n\tif runTests {\n\t\tverbosePrintf(\"running tests\\n\")\n\n\t\terr = test(cwd, gopath, config.Tests...)\n\t\tif err != nil {\n\t\t\tdie(\"running tests failed: %v\\n\", err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage siv\n\n\/\/ Given strings A and B with len(A) >= len(B), xor B onto the right end of A.\n\/\/ This matches the xorend operator of RFC 5297.\nfunc xorend(a, b []byte) []byte {\n\treturn nil\n}\n<commit_msg>Implemented xorend.<commit_after>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage siv\n\nimport (\n\t\"github.com\/jacobsa\/aes\/common\"\n)\n\n\/\/ Given strings A and B with len(A) >= len(B), xor B onto the right end of A.\n\/\/ This matches the xorend operator of RFC 5297.\nfunc xorend(a, b []byte) []byte {\n\taLen := len(a)\n\tbLen := len(b)\n\n\tif aLen < bLen {\n\t\tpanic(\"Invalid lengths.\")\n\t}\n\n\tdifference := aLen - bLen\n\treturn append(a[:difference], common.Xor(a[difference:], b)...)\n}\n<|endoftext|>"} {"text":"<commit_before>package gocb\n\nimport (\n\t\"fmt\"\n)\n\n\/\/ CouchbaseList represents a list document.\ntype CouchbaseList struct {\n\tcollection *Collection\n\tid string\n}\n\n\/\/ List returns a new CouchbaseList for the document specified by id.\nfunc (c *Collection) List(id string) *CouchbaseList {\n\treturn &CouchbaseList{\n\t\tcollection: c,\n\t\tid: id,\n\t}\n}\n\n\/\/ Iterator returns an iterable for all items in the list.\nfunc (cl *CouchbaseList) Iterator() ([]interface{}, error) {\n\tcontent, err := cl.collection.Get(cl.id, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar listContents []interface{}\n\terr = content.Content(&listContents)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn listContents, nil\n}\n\n\/\/ At retrieves the value specified at the given index from the list.\nfunc (cl *CouchbaseList) At(index int, valuePtr interface{}) error {\n\tops := make([]LookupInSpec, 1)\n\tops[0] = GetSpec(fmt.Sprintf(\"[%d]\", index), nil)\n\tresult, err := cl.collection.LookupIn(cl.id, ops, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn result.ContentAt(0, valuePtr)\n}\n\n\/\/ RemoveAt removes the value specified at the given index from the list.\nfunc (cl *CouchbaseList) RemoveAt(index int) error {\n\tops := make([]MutateInSpec, 1)\n\tops[0] = RemoveSpec(fmt.Sprintf(\"[%d]\", index), nil)\n\t_, err := cl.collection.MutateIn(cl.id, ops, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Append appends an item to the list.\nfunc (cl *CouchbaseList) Append(val interface{}) error {\n\tops := make([]MutateInSpec, 1)\n\tops[0] = ArrayAppendSpec(\"\", val, nil)\n\t_, err := cl.collection.MutateIn(cl.id, ops, &MutateInOptions{UpsertDocument: true})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Prepend prepends an item to the list.\nfunc (cl *CouchbaseList) Prepend(val interface{}) error {\n\tops := make([]MutateInSpec, 1)\n\tops[0] = ArrayPrependSpec(\"\", val, nil)\n\t_, err := cl.collection.MutateIn(cl.id, ops, &MutateInOptions{UpsertDocument: true})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ IndexOf gets the index of the item in the list.\nfunc (cl *CouchbaseList) IndexOf(val interface{}) (int, error) {\n\tcontent, err := cl.collection.Get(cl.id, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar listContents []interface{}\n\terr = content.Content(&listContents)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tfor i, item := range listContents {\n\t\tif item == val {\n\t\t\treturn i, nil\n\t\t}\n\t}\n\n\treturn -1, nil\n}\n\n\/\/ Size returns the size of the list.\nfunc (cl *CouchbaseList) Size() (int, error) {\n\tops := make([]LookupInSpec, 1)\n\tops[0] = CountSpec(\"\", nil)\n\tresult, err := cl.collection.LookupIn(cl.id, ops, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar count int\n\terr = result.ContentAt(0, &count)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn count, nil\n}\n\n\/\/ Clear clears a list, also removing it.\nfunc (cl *CouchbaseList) Clear() error {\n\t_, err := cl.collection.Remove(cl.id, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ CouchbaseMap represents a map document.\ntype CouchbaseMap struct {\n\tcollection *Collection\n\tid string\n}\n\n\/\/ Map returns a new CouchbaseMap.\nfunc (c *Collection) Map(id string) *CouchbaseMap {\n\treturn &CouchbaseMap{\n\t\tcollection: c,\n\t\tid: id,\n\t}\n}\n\n\/\/ Iterator returns an iterable for all items in the map.\nfunc (cl *CouchbaseMap) Iterator() (map[string]interface{}, error) {\n\tcontent, err := cl.collection.Get(cl.id, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar mapContents map[string]interface{}\n\terr = content.Content(&mapContents)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn mapContents, nil\n}\n\n\/\/ At retrieves the item for the given id from the map.\nfunc (cl *CouchbaseMap) At(id string, valuePtr interface{}) error {\n\tops := make([]LookupInSpec, 1)\n\tops[0] = GetSpec(fmt.Sprintf(\"[%s]\", id), nil)\n\tresult, err := cl.collection.LookupIn(cl.id, ops, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn result.ContentAt(0, valuePtr)\n}\n\n\/\/ Add adds an item to the map.\nfunc (cl *CouchbaseMap) Add(id string, val interface{}) error {\n\tops := make([]MutateInSpec, 1)\n\tops[0] = UpsertSpec(id, val, nil)\n\t_, err := cl.collection.MutateIn(cl.id, ops, &MutateInOptions{UpsertDocument: true})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Remove removes an item from the map.\nfunc (cl *CouchbaseMap) Remove(id string) error {\n\tops := make([]MutateInSpec, 1)\n\tops[0] = RemoveSpec(id, nil)\n\t_, err := cl.collection.MutateIn(cl.id, ops, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Exists verifies whether or a id exists in the map.\nfunc (cl *CouchbaseMap) Exists(id string) (bool, error) {\n\tops := make([]LookupInSpec, 1)\n\tops[0] = ExistsSpec(fmt.Sprintf(\"[%s]\", id), nil)\n\tresult, err := cl.collection.LookupIn(cl.id, ops, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn result.Exists(0), nil\n}\n\n\/\/ Size returns the size of the map.\nfunc (cl *CouchbaseMap) Size() (int, error) {\n\tops := make([]LookupInSpec, 1)\n\tops[0] = CountSpec(\"\", nil)\n\tresult, err := cl.collection.LookupIn(cl.id, ops, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar count int\n\terr = result.ContentAt(0, &count)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn count, nil\n}\n\n\/\/ Keys returns all of the keys within the map.\nfunc (cl *CouchbaseMap) Keys() ([]string, error) {\n\tcontent, err := cl.collection.Get(cl.id, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar mapContents map[string]interface{}\n\terr = content.Content(&mapContents)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar keys []string\n\tfor id := range mapContents {\n\t\tkeys = append(keys, id)\n\t}\n\n\treturn keys, nil\n}\n\n\/\/ Values returns all of the values within the map.\nfunc (cl *CouchbaseMap) Values() ([]interface{}, error) {\n\tcontent, err := cl.collection.Get(cl.id, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar mapContents map[string]interface{}\n\terr = content.Content(&mapContents)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar values []interface{}\n\tfor _, val := range mapContents {\n\t\tvalues = append(values, val)\n\t}\n\n\treturn values, nil\n}\n\n\/\/ Clear clears a map, also removing it.\nfunc (cl *CouchbaseMap) Clear() error {\n\t_, err := cl.collection.Remove(cl.id, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ CouchbaseSet represents a set document.\ntype CouchbaseSet struct {\n\tid string\n\tunderlying *CouchbaseList\n}\n\n\/\/ Set returns a new CouchbaseSet.\nfunc (c *Collection) Set(id string) *CouchbaseSet {\n\treturn &CouchbaseSet{\n\t\tid: id,\n\t\tunderlying: c.List(id),\n\t}\n}\n\n\/\/ Iterator returns an iterable for all items in the set.\nfunc (cs *CouchbaseSet) Iterator() ([]interface{}, error) {\n\treturn cs.underlying.Iterator()\n}\n\n\/\/ Add adds a value to the set.\nfunc (cs *CouchbaseSet) Add(val interface{}) error {\n\tops := make([]MutateInSpec, 1)\n\tops[0] = ArrayAddUniqueSpec(\"\", val, nil)\n\t_, err := cs.underlying.collection.MutateIn(cs.id, ops, &MutateInOptions{UpsertDocument: true})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Remove removes an value from the set.\nfunc (cs *CouchbaseSet) Remove(val string) error {\n\tfor {\n\t\tcontent, err := cs.underlying.collection.Get(cs.id, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcas := content.Cas()\n\n\t\tvar setContents []interface{}\n\t\terr = content.Content(&setContents)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tindexToRemove := -1\n\t\tfor i, item := range setContents {\n\t\t\tif item == val {\n\t\t\t\tindexToRemove = i\n\t\t\t}\n\t\t}\n\n\t\tif indexToRemove > -1 {\n\t\t\tops := make([]MutateInSpec, 1)\n\t\t\tops[0] = RemoveSpec(fmt.Sprintf(\"[%d]\", indexToRemove), nil)\n\t\t\t_, err = cs.underlying.collection.MutateIn(cs.id, ops, &MutateInOptions{Cas: cas})\n\t\t\tif IsCasMismatchError(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Values returns all of the values within the set.\nfunc (cs *CouchbaseSet) Values() ([]interface{}, error) {\n\tcontent, err := cs.underlying.collection.Get(cs.id, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar setContents []interface{}\n\terr = content.Content(&setContents)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn setContents, nil\n}\n\n\/\/ Contains verifies whether or not a value exists within the set.\nfunc (cs *CouchbaseSet) Contains(val string) (bool, error) {\n\tcontent, err := cs.underlying.collection.Get(cs.id, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tvar setContents []interface{}\n\terr = content.Content(&setContents)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, item := range setContents {\n\t\tif item == val {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}\n\n\/\/ Size returns the size of the set\nfunc (cs *CouchbaseSet) Size() (int, error) {\n\treturn cs.underlying.Size()\n}\n\n\/\/ Clear clears a set, also removing it.\nfunc (cs *CouchbaseSet) Clear() error {\n\terr := cs.underlying.Clear()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ CouchbaseQueue represents a queue document.\ntype CouchbaseQueue struct {\n\tid string\n\tunderlying *CouchbaseList\n}\n\n\/\/ Queue returns a new CouchbaseQueue.\nfunc (c *Collection) Queue(id string) *CouchbaseQueue {\n\treturn &CouchbaseQueue{\n\t\tid: id,\n\t\tunderlying: c.List(id),\n\t}\n}\n\n\/\/ Iterator returns an iterable for all items in the queue.\nfunc (cs *CouchbaseQueue) Iterator() ([]interface{}, error) {\n\treturn cs.underlying.Iterator()\n}\n\n\/\/ Push pushes a value onto the queue.\nfunc (cs *CouchbaseQueue) Push(val interface{}) error {\n\treturn cs.underlying.Prepend(val)\n}\n\n\/\/ Pop pops an items off of the queue.\nfunc (cs *CouchbaseQueue) Pop(valuePtr interface{}) error {\n\tfor {\n\t\tops := make([]LookupInSpec, 1)\n\t\tops[0] = GetSpec(\"[-1]\", nil)\n\t\tcontent, err := cs.underlying.collection.LookupIn(cs.id, ops, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcas := content.Cas()\n\t\terr = content.ContentAt(0, valuePtr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tmutateOps := make([]MutateInSpec, 1)\n\t\tmutateOps[0] = RemoveSpec(\"[-1]\", nil)\n\t\t_, err = cs.underlying.collection.MutateIn(cs.id, mutateOps, &MutateInOptions{Cas: cas})\n\t\tif IsCasMismatchError(err) {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbreak\n\t}\n\n\treturn nil\n}\n\n\/\/ Size returns the size of the queue.\nfunc (cs *CouchbaseQueue) Size() (int, error) {\n\treturn cs.underlying.Size()\n}\n\n\/\/ Clear clears a queue, also removing it.\nfunc (cs *CouchbaseQueue) Clear() error {\n\terr := cs.underlying.Clear()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>GOCBC-601: Bound datastruct Cas retries to 16 attempts<commit_after>package gocb\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ CouchbaseList represents a list document.\ntype CouchbaseList struct {\n\tcollection *Collection\n\tid string\n}\n\n\/\/ List returns a new CouchbaseList for the document specified by id.\nfunc (c *Collection) List(id string) *CouchbaseList {\n\treturn &CouchbaseList{\n\t\tcollection: c,\n\t\tid: id,\n\t}\n}\n\n\/\/ Iterator returns an iterable for all items in the list.\nfunc (cl *CouchbaseList) Iterator() ([]interface{}, error) {\n\tcontent, err := cl.collection.Get(cl.id, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar listContents []interface{}\n\terr = content.Content(&listContents)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn listContents, nil\n}\n\n\/\/ At retrieves the value specified at the given index from the list.\nfunc (cl *CouchbaseList) At(index int, valuePtr interface{}) error {\n\tops := make([]LookupInSpec, 1)\n\tops[0] = GetSpec(fmt.Sprintf(\"[%d]\", index), nil)\n\tresult, err := cl.collection.LookupIn(cl.id, ops, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn result.ContentAt(0, valuePtr)\n}\n\n\/\/ RemoveAt removes the value specified at the given index from the list.\nfunc (cl *CouchbaseList) RemoveAt(index int) error {\n\tops := make([]MutateInSpec, 1)\n\tops[0] = RemoveSpec(fmt.Sprintf(\"[%d]\", index), nil)\n\t_, err := cl.collection.MutateIn(cl.id, ops, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Append appends an item to the list.\nfunc (cl *CouchbaseList) Append(val interface{}) error {\n\tops := make([]MutateInSpec, 1)\n\tops[0] = ArrayAppendSpec(\"\", val, nil)\n\t_, err := cl.collection.MutateIn(cl.id, ops, &MutateInOptions{UpsertDocument: true})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Prepend prepends an item to the list.\nfunc (cl *CouchbaseList) Prepend(val interface{}) error {\n\tops := make([]MutateInSpec, 1)\n\tops[0] = ArrayPrependSpec(\"\", val, nil)\n\t_, err := cl.collection.MutateIn(cl.id, ops, &MutateInOptions{UpsertDocument: true})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ IndexOf gets the index of the item in the list.\nfunc (cl *CouchbaseList) IndexOf(val interface{}) (int, error) {\n\tcontent, err := cl.collection.Get(cl.id, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar listContents []interface{}\n\terr = content.Content(&listContents)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tfor i, item := range listContents {\n\t\tif item == val {\n\t\t\treturn i, nil\n\t\t}\n\t}\n\n\treturn -1, nil\n}\n\n\/\/ Size returns the size of the list.\nfunc (cl *CouchbaseList) Size() (int, error) {\n\tops := make([]LookupInSpec, 1)\n\tops[0] = CountSpec(\"\", nil)\n\tresult, err := cl.collection.LookupIn(cl.id, ops, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar count int\n\terr = result.ContentAt(0, &count)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn count, nil\n}\n\n\/\/ Clear clears a list, also removing it.\nfunc (cl *CouchbaseList) Clear() error {\n\t_, err := cl.collection.Remove(cl.id, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ CouchbaseMap represents a map document.\ntype CouchbaseMap struct {\n\tcollection *Collection\n\tid string\n}\n\n\/\/ Map returns a new CouchbaseMap.\nfunc (c *Collection) Map(id string) *CouchbaseMap {\n\treturn &CouchbaseMap{\n\t\tcollection: c,\n\t\tid: id,\n\t}\n}\n\n\/\/ Iterator returns an iterable for all items in the map.\nfunc (cl *CouchbaseMap) Iterator() (map[string]interface{}, error) {\n\tcontent, err := cl.collection.Get(cl.id, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar mapContents map[string]interface{}\n\terr = content.Content(&mapContents)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn mapContents, nil\n}\n\n\/\/ At retrieves the item for the given id from the map.\nfunc (cl *CouchbaseMap) At(id string, valuePtr interface{}) error {\n\tops := make([]LookupInSpec, 1)\n\tops[0] = GetSpec(fmt.Sprintf(\"[%s]\", id), nil)\n\tresult, err := cl.collection.LookupIn(cl.id, ops, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn result.ContentAt(0, valuePtr)\n}\n\n\/\/ Add adds an item to the map.\nfunc (cl *CouchbaseMap) Add(id string, val interface{}) error {\n\tops := make([]MutateInSpec, 1)\n\tops[0] = UpsertSpec(id, val, nil)\n\t_, err := cl.collection.MutateIn(cl.id, ops, &MutateInOptions{UpsertDocument: true})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Remove removes an item from the map.\nfunc (cl *CouchbaseMap) Remove(id string) error {\n\tops := make([]MutateInSpec, 1)\n\tops[0] = RemoveSpec(id, nil)\n\t_, err := cl.collection.MutateIn(cl.id, ops, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Exists verifies whether or a id exists in the map.\nfunc (cl *CouchbaseMap) Exists(id string) (bool, error) {\n\tops := make([]LookupInSpec, 1)\n\tops[0] = ExistsSpec(fmt.Sprintf(\"[%s]\", id), nil)\n\tresult, err := cl.collection.LookupIn(cl.id, ops, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn result.Exists(0), nil\n}\n\n\/\/ Size returns the size of the map.\nfunc (cl *CouchbaseMap) Size() (int, error) {\n\tops := make([]LookupInSpec, 1)\n\tops[0] = CountSpec(\"\", nil)\n\tresult, err := cl.collection.LookupIn(cl.id, ops, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar count int\n\terr = result.ContentAt(0, &count)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn count, nil\n}\n\n\/\/ Keys returns all of the keys within the map.\nfunc (cl *CouchbaseMap) Keys() ([]string, error) {\n\tcontent, err := cl.collection.Get(cl.id, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar mapContents map[string]interface{}\n\terr = content.Content(&mapContents)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar keys []string\n\tfor id := range mapContents {\n\t\tkeys = append(keys, id)\n\t}\n\n\treturn keys, nil\n}\n\n\/\/ Values returns all of the values within the map.\nfunc (cl *CouchbaseMap) Values() ([]interface{}, error) {\n\tcontent, err := cl.collection.Get(cl.id, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar mapContents map[string]interface{}\n\terr = content.Content(&mapContents)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar values []interface{}\n\tfor _, val := range mapContents {\n\t\tvalues = append(values, val)\n\t}\n\n\treturn values, nil\n}\n\n\/\/ Clear clears a map, also removing it.\nfunc (cl *CouchbaseMap) Clear() error {\n\t_, err := cl.collection.Remove(cl.id, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ CouchbaseSet represents a set document.\ntype CouchbaseSet struct {\n\tid string\n\tunderlying *CouchbaseList\n}\n\n\/\/ Set returns a new CouchbaseSet.\nfunc (c *Collection) Set(id string) *CouchbaseSet {\n\treturn &CouchbaseSet{\n\t\tid: id,\n\t\tunderlying: c.List(id),\n\t}\n}\n\n\/\/ Iterator returns an iterable for all items in the set.\nfunc (cs *CouchbaseSet) Iterator() ([]interface{}, error) {\n\treturn cs.underlying.Iterator()\n}\n\n\/\/ Add adds a value to the set.\nfunc (cs *CouchbaseSet) Add(val interface{}) error {\n\tops := make([]MutateInSpec, 1)\n\tops[0] = ArrayAddUniqueSpec(\"\", val, nil)\n\t_, err := cs.underlying.collection.MutateIn(cs.id, ops, &MutateInOptions{UpsertDocument: true})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Remove removes an value from the set.\nfunc (cs *CouchbaseSet) Remove(val string) error {\n\tfor i := 0; i < 16; i++ {\n\t\tcontent, err := cs.underlying.collection.Get(cs.id, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcas := content.Cas()\n\n\t\tvar setContents []interface{}\n\t\terr = content.Content(&setContents)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tindexToRemove := -1\n\t\tfor i, item := range setContents {\n\t\t\tif item == val {\n\t\t\t\tindexToRemove = i\n\t\t\t}\n\t\t}\n\n\t\tif indexToRemove > -1 {\n\t\t\tops := make([]MutateInSpec, 1)\n\t\t\tops[0] = RemoveSpec(fmt.Sprintf(\"[%d]\", indexToRemove), nil)\n\t\t\t_, err = cs.underlying.collection.MutateIn(cs.id, ops, &MutateInOptions{Cas: cas})\n\t\t\tif IsCasMismatchError(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"failed to perform operation after 16 retries\")\n}\n\n\/\/ Values returns all of the values within the set.\nfunc (cs *CouchbaseSet) Values() ([]interface{}, error) {\n\tcontent, err := cs.underlying.collection.Get(cs.id, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar setContents []interface{}\n\terr = content.Content(&setContents)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn setContents, nil\n}\n\n\/\/ Contains verifies whether or not a value exists within the set.\nfunc (cs *CouchbaseSet) Contains(val string) (bool, error) {\n\tcontent, err := cs.underlying.collection.Get(cs.id, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tvar setContents []interface{}\n\terr = content.Content(&setContents)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, item := range setContents {\n\t\tif item == val {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}\n\n\/\/ Size returns the size of the set\nfunc (cs *CouchbaseSet) Size() (int, error) {\n\treturn cs.underlying.Size()\n}\n\n\/\/ Clear clears a set, also removing it.\nfunc (cs *CouchbaseSet) Clear() error {\n\terr := cs.underlying.Clear()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ CouchbaseQueue represents a queue document.\ntype CouchbaseQueue struct {\n\tid string\n\tunderlying *CouchbaseList\n}\n\n\/\/ Queue returns a new CouchbaseQueue.\nfunc (c *Collection) Queue(id string) *CouchbaseQueue {\n\treturn &CouchbaseQueue{\n\t\tid: id,\n\t\tunderlying: c.List(id),\n\t}\n}\n\n\/\/ Iterator returns an iterable for all items in the queue.\nfunc (cs *CouchbaseQueue) Iterator() ([]interface{}, error) {\n\treturn cs.underlying.Iterator()\n}\n\n\/\/ Push pushes a value onto the queue.\nfunc (cs *CouchbaseQueue) Push(val interface{}) error {\n\treturn cs.underlying.Prepend(val)\n}\n\n\/\/ Pop pops an items off of the queue.\nfunc (cs *CouchbaseQueue) Pop(valuePtr interface{}) error {\n\tfor i := 0; i < 16; i++ {\n\t\tops := make([]LookupInSpec, 1)\n\t\tops[0] = GetSpec(\"[-1]\", nil)\n\t\tcontent, err := cs.underlying.collection.LookupIn(cs.id, ops, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcas := content.Cas()\n\t\terr = content.ContentAt(0, valuePtr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tmutateOps := make([]MutateInSpec, 1)\n\t\tmutateOps[0] = RemoveSpec(\"[-1]\", nil)\n\t\t_, err = cs.underlying.collection.MutateIn(cs.id, mutateOps, &MutateInOptions{Cas: cas})\n\t\tif IsCasMismatchError(err) {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"failed to perform operation after 16 retries\")\n}\n\n\/\/ Size returns the size of the queue.\nfunc (cs *CouchbaseQueue) Size() (int, error) {\n\treturn cs.underlying.Size()\n}\n\n\/\/ Clear clears a queue, also removing it.\nfunc (cs *CouchbaseQueue) Clear() error {\n\terr := cs.underlying.Clear()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package parser\n\nimport (\n\t\"github.com\/Zac-Garby\/pluto\/ast\"\n\t\"github.com\/Zac-Garby\/pluto\/token\"\n)\n\nfunc (p *Parser) parseStatement() ast.Statement {\n\tvar stmt ast.Statement\n\n\tif p.curIs(token.SEMI) {\n\t\treturn nil\n\t} else if p.curIs(token.RETURN) {\n\t\tstmt = p.parseReturnStatement()\n\t} else if p.curIs(token.DEF) {\n\t\tstmt = p.parseDefStatement()\n\t} else if p.curIs(token.NEXT) {\n\t\tstmt = p.parseNextStatement()\n\t} else if p.curIs(token.BREAK) {\n\t\tstmt = p.parseBreakStatement()\n\t} else if p.curIs(token.CLASS) {\n\t\tstmt = p.parseClassDeclaration()\n\t} else {\n\t\tstmt = p.parseExpressionStatement()\n\t}\n\n\tif !p.expect(token.SEMI) {\n\t\treturn nil\n\t}\n\n\treturn stmt\n}\n\nfunc (p *Parser) parseBlockStatement() ast.Statement {\n\tblock := &ast.BlockStatement{\n\t\tTok: p.cur,\n\t\tStatements: []ast.Statement{},\n\t}\n\n\tp.next()\n\n\tfor !p.curIs(token.RBRACE) && !p.curIs(token.EOF) {\n\t\tstmt := p.parseStatement()\n\n\t\tif stmt != nil {\n\t\t\tblock.Statements = append(block.Statements, stmt)\n\t\t}\n\n\t\tp.next()\n\t}\n\n\treturn block\n}\n\nfunc (p *Parser) parseExpressionStatement() ast.Statement {\n\tstmt := &ast.ExpressionStatement{\n\t\tTok: p.cur,\n\t\tExpr: p.parseExpression(LOWEST),\n\t}\n\n\tif stmt.Expr == nil {\n\t\treturn nil\n\t}\n\n\treturn stmt\n}\n\nfunc (p *Parser) parseReturnStatement() ast.Statement {\n\tif p.peek.Type == token.SEMI {\n\t\treturn &ast.ReturnStatement{\n\t\t\tTok: p.cur,\n\t\t}\n\t}\n\n\tstmt := &ast.ReturnStatement{\n\t\tTok: p.cur,\n\t}\n\n\tp.next()\n\tstmt.Value = p.parseExpression(LOWEST)\n\n\treturn stmt\n}\n\nfunc (p *Parser) parseNextStatement() ast.Statement {\n\treturn &ast.NextStatement{\n\t\tTok: p.cur,\n\t}\n}\n\nfunc (p *Parser) parseBreakStatement() ast.Statement {\n\treturn &ast.BreakStatement{\n\t\tTok: p.cur,\n\t}\n}\n\nfunc (p *Parser) parseDefStatement() ast.Statement {\n\tstmt := &ast.FunctionDefinition{\n\t\tTok: p.cur,\n\t}\n\n\tp.next()\n\tstmt.Pattern = p.parsePatternCall(token.LBRACE)\n\n\tif len(stmt.Pattern) == 0 {\n\t\tp.defaultErr(\"expected at least one item in a pattern\")\n\t\treturn nil\n\t}\n\n\tstmt.Body = p.parseBlockStatement()\n\n\treturn stmt\n}\n\nfunc (p *Parser) parseInitStatement() ast.Statement {\n\tstmt := &ast.InitDefinition{\n\t\tTok: p.cur,\n\t}\n\n\tp.next()\n\tstmt.Pattern = p.parsePatternCall(token.LBRACE)\n\n\tif len(stmt.Pattern) == 0 {\n\t\tp.defaultErr(\"expected at least one item in a pattern\")\n\t\treturn nil\n\t}\n\n\tstmt.Body = p.parseBlockStatement()\n\n\treturn stmt\n}\n\nfunc (p *Parser) parseClassDeclaration() ast.Statement {\n\tstmt := &ast.ClassStatement{\n\t\tTok: p.cur,\n\t}\n\n\tif !p.expect(token.ID) {\n\t\treturn nil\n\t}\n\n\tstmt.Name = p.parseNonFnID()\n\n\tif p.peekIs(token.EXTENDS) {\n\t\tp.next()\n\t\tp.next()\n\n\t\tstmt.Parent = p.parseNonFnID()\n\t}\n\n\tif !p.expect(token.LBRACE) {\n\t\treturn nil\n\t}\n\n\tp.next()\n\n\tif p.curIs(token.RBRACE) {\n\t\treturn stmt\n\t}\n\n\tfor p.curIs(token.INIT, token.DEF) {\n\t\tif p.curIs(token.INIT) {\n\t\t\tstmt.Methods = append(stmt.Methods, p.parseInitStatement())\n\t\t} else {\n\t\t\tstmt.Methods = append(stmt.Methods, p.parseDefStatement())\n\t\t}\n\n\t\tif !p.expect(token.SEMI) {\n\t\t\treturn nil\n\t\t}\n\n\t\tif p.peekIs(token.INIT, token.DEF) {\n\t\t\tp.next()\n\t\t}\n\t}\n\n\tif !p.expect(token.RBRACE) {\n\t\treturn nil\n\t}\n\n\treturn stmt\n}\n<commit_msg>Parse import and use statements<commit_after>package parser\n\nimport (\n\t\"github.com\/Zac-Garby\/pluto\/ast\"\n\t\"github.com\/Zac-Garby\/pluto\/token\"\n)\n\nfunc (p *Parser) parseStatement() ast.Statement {\n\tvar stmt ast.Statement\n\n\tif p.curIs(token.SEMI) {\n\t\treturn nil\n\t} else if p.curIs(token.RETURN) {\n\t\tstmt = p.parseReturnStatement()\n\t} else if p.curIs(token.DEF) {\n\t\tstmt = p.parseDefStatement()\n\t} else if p.curIs(token.NEXT) {\n\t\tstmt = p.parseNextStatement()\n\t} else if p.curIs(token.BREAK) {\n\t\tstmt = p.parseBreakStatement()\n\t} else if p.curIs(token.CLASS) {\n\t\tstmt = p.parseClassDeclaration()\n\t} else if p.curIs(token.IMPORT) {\n\t\tstmt = p.parseImportStatement()\n\t} else if p.curIs(token.USE) {\n\t\tstmt = p.parseUseStatement()\n\t} else {\n\t\tstmt = p.parseExpressionStatement()\n\t}\n\n\tif !p.expect(token.SEMI) {\n\t\treturn nil\n\t}\n\n\treturn stmt\n}\n\nfunc (p *Parser) parseBlockStatement() ast.Statement {\n\tblock := &ast.BlockStatement{\n\t\tTok: p.cur,\n\t\tStatements: []ast.Statement{},\n\t}\n\n\tp.next()\n\n\tfor !p.curIs(token.RBRACE) && !p.curIs(token.EOF) {\n\t\tstmt := p.parseStatement()\n\n\t\tif stmt != nil {\n\t\t\tblock.Statements = append(block.Statements, stmt)\n\t\t}\n\n\t\tp.next()\n\t}\n\n\treturn block\n}\n\nfunc (p *Parser) parseExpressionStatement() ast.Statement {\n\tstmt := &ast.ExpressionStatement{\n\t\tTok: p.cur,\n\t\tExpr: p.parseExpression(LOWEST),\n\t}\n\n\tif stmt.Expr == nil {\n\t\treturn nil\n\t}\n\n\treturn stmt\n}\n\nfunc (p *Parser) parseReturnStatement() ast.Statement {\n\tif p.peek.Type == token.SEMI {\n\t\treturn &ast.ReturnStatement{\n\t\t\tTok: p.cur,\n\t\t}\n\t}\n\n\tstmt := &ast.ReturnStatement{\n\t\tTok: p.cur,\n\t}\n\n\tp.next()\n\tstmt.Value = p.parseExpression(LOWEST)\n\n\treturn stmt\n}\n\nfunc (p *Parser) parseNextStatement() ast.Statement {\n\treturn &ast.NextStatement{\n\t\tTok: p.cur,\n\t}\n}\n\nfunc (p *Parser) parseBreakStatement() ast.Statement {\n\treturn &ast.BreakStatement{\n\t\tTok: p.cur,\n\t}\n}\n\nfunc (p *Parser) parseDefStatement() ast.Statement {\n\tstmt := &ast.FunctionDefinition{\n\t\tTok: p.cur,\n\t}\n\n\tp.next()\n\tstmt.Pattern = p.parsePatternCall(token.LBRACE)\n\n\tif len(stmt.Pattern) == 0 {\n\t\tp.defaultErr(\"expected at least one item in a pattern\")\n\t\treturn nil\n\t}\n\n\tstmt.Body = p.parseBlockStatement()\n\n\treturn stmt\n}\n\nfunc (p *Parser) parseInitStatement() ast.Statement {\n\tstmt := &ast.InitDefinition{\n\t\tTok: p.cur,\n\t}\n\n\tp.next()\n\tstmt.Pattern = p.parsePatternCall(token.LBRACE)\n\n\tif len(stmt.Pattern) == 0 {\n\t\tp.defaultErr(\"expected at least one item in a pattern\")\n\t\treturn nil\n\t}\n\n\tstmt.Body = p.parseBlockStatement()\n\n\treturn stmt\n}\n\nfunc (p *Parser) parseClassDeclaration() ast.Statement {\n\tstmt := &ast.ClassStatement{\n\t\tTok: p.cur,\n\t}\n\n\tif !p.expect(token.ID) {\n\t\treturn nil\n\t}\n\n\tstmt.Name = p.parseNonFnID()\n\n\tif p.peekIs(token.EXTENDS) {\n\t\tp.next()\n\t\tp.next()\n\n\t\tstmt.Parent = p.parseNonFnID()\n\t}\n\n\tif !p.expect(token.LBRACE) {\n\t\treturn nil\n\t}\n\n\tp.next()\n\n\tif p.curIs(token.RBRACE) {\n\t\treturn stmt\n\t}\n\n\tfor p.curIs(token.INIT, token.DEF) {\n\t\tif p.curIs(token.INIT) {\n\t\t\tstmt.Methods = append(stmt.Methods, p.parseInitStatement())\n\t\t} else {\n\t\t\tstmt.Methods = append(stmt.Methods, p.parseDefStatement())\n\t\t}\n\n\t\tif !p.expect(token.SEMI) {\n\t\t\treturn nil\n\t\t}\n\n\t\tif p.peekIs(token.INIT, token.DEF) {\n\t\t\tp.next()\n\t\t}\n\t}\n\n\tif !p.expect(token.RBRACE) {\n\t\treturn nil\n\t}\n\n\treturn stmt\n}\n\nfunc (p *Parser) parseImportStatement() ast.Statement {\n\tstmt := &ast.ImportStatement{\n\t\tTok: p.cur,\n\t}\n\n\tif !p.expect(token.STR) {\n\t\treturn nil\n\t}\n\n\tstmt.Package = p.cur.Literal\n\n\treturn stmt\n}\n\nfunc (p *Parser) parseUseStatement() ast.Statement {\n\tstmt := &ast.UseStatement{\n\t\tTok: p.cur,\n\t}\n\n\tif !p.expect(token.STR) {\n\t\treturn nil\n\t}\n\n\tstmt.Package = p.cur.Literal\n\n\treturn stmt\n}\n<|endoftext|>"} {"text":"<commit_before>package command\n\nimport (\n\t\"github.com\/deweysasser\/locksmith\/connection\"\n\t\"github.com\/deweysasser\/locksmith\/data\"\n\t\"github.com\/deweysasser\/locksmith\/lib\"\n\t\"github.com\/deweysasser\/locksmith\/output\"\n\t\"github.com\/urfave\/cli\"\n\t\"reflect\"\n\t\"sync\"\n\t\"fmt\"\n)\n\nfunc CmdFetch(c *cli.Context) error {\n\toutputLevel(c)\n\tlibWG := sync.WaitGroup{}\n\tml := lib.MainLibrary{Path: datadir(c)}\n\n\tfKeys := data.NewFanInKey(nil)\n\tfAccounts := data.NewFanInAccount()\n\n\tlibWG.Add(1)\n\tgo ingestKeys(ml.Keys(), fKeys.Output(), &libWG)\n\n\tlibWG.Add(1)\n\tgo ingestAccounts(ml.Accounts(), fAccounts.Output(), &libWG)\n\n\tfilter := buildFilterFromContext(c)\n\n\tfor conn := range ml.Connections().List() {\n\t\tif filter(conn) {\n\t\t\toutput.Verbosef(\"Fetching from %s\\n\", conn)\n\t\t\tk, a := fetchFrom(conn)\n\t\t\tfKeys.Add(k)\n\t\t\tfAccounts.Add(a)\n\t\t}\n\t}\n\n\tfKeys.Wait()\n\tfAccounts.Wait()\n\tlibWG.Wait()\n\n\treturn nil\n}\n\nfunc fetchFrom(conn interface{}) (keys <- chan data.Key, accounts <- chan data.Account) {\n\tswitch conn.(type) {\n\tcase connection.Connection:\n\t\t\/\/fmt.Println(\"Fetching from \", conn)\n\t\treturn conn.(connection.Connection).Fetch()\n\tdefault:\n\t\tpanic(\"Unknown connection type \" + reflect.TypeOf(conn).Name())\n\t}\n}\n\nfunc ingestAccounts(alib lib.Library, accounts chan data.Account, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\ti := 0\n\tfor k := range accounts {\n\t\ti++\n\t\tid := alib.Id(k)\n\t\tif existing, err := alib.Fetch(id); err == nil {\n\t\t\tif existingacct, ok := existing.(data.Account); ok {\n\t\t\t\texistingacct.Merge(k)\n\t\t\t\tif e := alib.Store(existingacct); e != nil {\n\t\t\t\t\toutput.Error(e)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tpanic(fmt.Sprint(\"type for\", id, \" was not Account\"))\n\t\t\t}\n\t\t} else {\n\t\t\tif e := alib.Store(k); e != nil {\n\t\t\t\toutput.Error(e)\n\t\t\t}\n\t\t}\n\t}\n\n\toutput.Normalf(\"Discovered %d accounts\\n\", i)\n}\n\nfunc ingestKeys(klib lib.Library, keys chan data.Key, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\ti := 0\n\tfor k := range keys {\n\t\ti++\n\t\tid := klib.Id(k)\n\t\tif existing, err := klib.Fetch(id); err == nil {\n\t\t\texisting.(data.Key).Merge(k)\n\t\t\tif e := klib.Store(existing); e != nil {\n\t\t\t\toutput.Error(e)\n\t\t\t}\n\t\t\t\/\/ It's possible for a key primary ID to change if we didn't before have a public key.\n\t\t\tif klib.Id(existing) != id {\n\t\t\t\toutput.Debug(\"Updating key id from\", id, \"to\", klib.Id(existing))\n\t\t\t\t\/\/ If so, delete the previous key file. This, however, takes they key out of the cache so we need to\n\t\t\t\t\/\/ re-cache it. Storing it again puts it back in the cache at the cost of a bit more disk I\/O (but code\n\t\t\t\t\/\/ simplicity)\n\t\t\t\tif e := klib.Delete(id); e == nil {\n\t\t\t\t\tif e := klib.Store(existing); e != nil {\n\t\t\t\t\t\toutput.Error(\"Error re-storing\", klib.Id(existing))\n\t\t\t\t\t}\n\t\t\t\t} \n\t\t\t}\n\n\t\t} else {\n\t\t\tif e := klib.Store(k); e != nil {\n\t\t\t\toutput.Error(e)\n\t\t\t}\n\t\t}\n\t}\n\n\toutput.Normalf(\"Discovered %d keys\\n\", i)\n}\n<commit_msg>Print number of unique keys\/accounts processed<commit_after>package command\n\nimport (\n\t\"github.com\/deweysasser\/locksmith\/connection\"\n\t\"github.com\/deweysasser\/locksmith\/data\"\n\t\"github.com\/deweysasser\/locksmith\/lib\"\n\t\"github.com\/deweysasser\/locksmith\/output\"\n\t\"github.com\/urfave\/cli\"\n\t\"reflect\"\n\t\"sync\"\n\t\"fmt\"\n)\n\nfunc CmdFetch(c *cli.Context) error {\n\toutputLevel(c)\n\tlibWG := sync.WaitGroup{}\n\tml := lib.MainLibrary{Path: datadir(c)}\n\n\tfKeys := data.NewFanInKey(nil)\n\tfAccounts := data.NewFanInAccount()\n\n\tlibWG.Add(1)\n\tgo ingestKeys(ml.Keys(), fKeys.Output(), &libWG)\n\n\tlibWG.Add(1)\n\tgo ingestAccounts(ml.Accounts(), fAccounts.Output(), &libWG)\n\n\tfilter := buildFilterFromContext(c)\n\n\tfor conn := range ml.Connections().List() {\n\t\tif filter(conn) {\n\t\t\toutput.Verbosef(\"Fetching from %s\\n\", conn)\n\t\t\tk, a := fetchFrom(conn)\n\t\t\tfKeys.Add(k)\n\t\t\tfAccounts.Add(a)\n\t\t}\n\t}\n\n\tfKeys.Wait()\n\tfAccounts.Wait()\n\tlibWG.Wait()\n\n\treturn nil\n}\n\nfunc fetchFrom(conn interface{}) (keys <- chan data.Key, accounts <- chan data.Account) {\n\tswitch conn.(type) {\n\tcase connection.Connection:\n\t\t\/\/fmt.Println(\"Fetching from \", conn)\n\t\treturn conn.(connection.Connection).Fetch()\n\tdefault:\n\t\tpanic(\"Unknown connection type \" + reflect.TypeOf(conn).Name())\n\t}\n}\n\nfunc ingestAccounts(alib lib.Library, accounts chan data.Account, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tidmap := make(map[string] bool)\n\ti := 0\n\tfor k := range accounts {\n\t\ti++\n\t\tid := alib.Id(k)\n\t\tidmap[id]=true\n\t\tif existing, err := alib.Fetch(id); err == nil {\n\t\t\tif existingacct, ok := existing.(data.Account); ok {\n\t\t\t\texistingacct.Merge(k)\n\t\t\t\tif e := alib.Store(existingacct); e != nil {\n\t\t\t\t\toutput.Error(e)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tpanic(fmt.Sprint(\"type for\", id, \" was not Account\"))\n\t\t\t}\n\t\t} else {\n\t\t\tif e := alib.Store(k); e != nil {\n\t\t\t\toutput.Error(e)\n\t\t\t}\n\t\t}\n\t}\n\n\toutput.Normalf(\"Discovered %d accounts in %d references\\n\", len(idmap), i)\n}\n\nfunc ingestKeys(klib lib.Library, keys chan data.Key, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tidmap := make(map[string] bool)\n\ti := 0\n\tfor k := range keys {\n\t\ti++\n\t\tid := klib.Id(k)\n\t\tidmap[id]=true\n\t\tif existing, err := klib.Fetch(id); err == nil {\n\t\t\texisting.(data.Key).Merge(k)\n\t\t\tif e := klib.Store(existing); e != nil {\n\t\t\t\toutput.Error(e)\n\t\t\t}\n\t\t\t\/\/ It's possible for a key primary ID to change if we didn't before have a public key.\n\t\t\tif klib.Id(existing) != id {\n\t\t\t\toutput.Debug(\"Updating key id from\", id, \"to\", klib.Id(existing))\n\t\t\t\t\/\/ If so, delete the previous key file. This, however, takes they key out of the cache so we need to\n\t\t\t\t\/\/ re-cache it. Storing it again puts it back in the cache at the cost of a bit more disk I\/O (but code\n\t\t\t\t\/\/ simplicity)\n\t\t\t\tif e := klib.Delete(id); e == nil {\n\t\t\t\t\tif e := klib.Store(existing); e != nil {\n\t\t\t\t\t\toutput.Error(\"Error re-storing\", klib.Id(existing))\n\t\t\t\t\t}\n\t\t\t\t} \n\t\t\t}\n\n\t\t} else {\n\t\t\tif e := klib.Store(k); e != nil {\n\t\t\t\toutput.Error(e)\n\t\t\t}\n\t\t}\n\t}\n\n\toutput.Normalf(\"Discovered %d keys in %d locations\\n\", len(idmap), i)\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"unicode\"\n\n\t\"github.com\/zetamatta\/go-getch\"\n\n\t\"..\/dos\"\n)\n\ntype copymove_t struct {\n\t*exec.Cmd\n\tAction func(src, dst string) error\n\tIsDirOk bool\n}\n\nfunc cmd_copy(cmd *exec.Cmd) (int, error) {\n\treturn copymove_t{\n\t\tCmd: cmd,\n\t\tAction: func(src, dst string) error {\n\t\t\treturn dos.Copy(src, dst, false)\n\t\t},\n\t}.Run()\n}\n\nfunc cmd_move(cmd *exec.Cmd) (int, error) {\n\treturn copymove_t{\n\t\tCmd: cmd,\n\t\tAction: dos.Move,\n\t\tIsDirOk: true,\n\t}.Run()\n}\n\nfunc cmd_ln(cmd *exec.Cmd) (int, error) {\n\tif len(cmd.Args) >= 2 && cmd.Args[1] == \"-s\" {\n\t\targs := make([]string, 0, len(cmd.Args)-1)\n\t\targs = append(args, cmd.Args[0])\n\t\targs = append(args, cmd.Args[2:]...)\n\t\tcmd.Args = args\n\t\treturn copymove_t{\n\t\t\tCmd: cmd,\n\t\t\tAction: os.Symlink,\n\t\t\tIsDirOk: true,\n\t\t}.Run()\n\t} else {\n\t\treturn copymove_t{\n\t\t\tCmd: cmd,\n\t\t\tAction: os.Link,\n\t\t}.Run()\n\t}\n}\n\nfunc (this copymove_t) Run() (int, error) {\n\tif len(this.Args) <= 2 {\n\t\tfmt.Fprintf(this.Stderr,\n\t\t\t\"Usage: %s [\/y] SOURCE-FILENAME DESITINATE-FILENAME\\n\"+\n\t\t\t\t\" %s [\/y] FILENAMES... DESINATE-DIRECTORY\\n\",\n\t\t\tthis.Args[0], this.Args[0])\n\t\treturn 0, nil\n\t}\n\tfi, err := os.Stat(this.Args[len(this.Args)-1])\n\tisDir := err == nil && fi.Mode().IsDir()\n\tall := false\n\tsrcs := this.Args[1 : len(this.Args)-1]\n\tfor i, src := range srcs {\n\t\tif getch.IsCtrlCPressed() {\n\t\t\tfmt.Fprintln(this.Stderr, \"^C\")\n\t\t\treturn 0, nil\n\t\t}\n\t\tif src == \"\/y\" {\n\t\t\tall = true\n\t\t\tcontinue\n\t\t}\n\t\tdst := this.Args[len(this.Args)-1]\n\t\tif isDir {\n\t\t\tdst = filepath.Join(dst, filepath.Base(src))\n\t\t}\n\t\tif !this.IsDirOk {\n\t\t\tfi, err := os.Stat(src)\n\t\t\tif err == nil && fi.Mode().IsDir() {\n\t\t\t\tfmt.Fprintf(this.Stderr, \"%s is directory and passed.\\n\", src)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintf(this.Stderr, \"%s -> %s\\n\", src, dst)\n\t\tif !all {\n\t\t\tfi, err := os.Stat(dst)\n\t\t\tif fi != nil && err == nil {\n\t\t\t\tfmt.Fprintf(this.Stderr,\n\t\t\t\t\t\"%s: override? [Yes\/No\/All\/Quit] \",\n\t\t\t\t\tdst)\n\t\t\t\tch := getch.Rune()\n\t\t\t\tif unicode.IsPrint(ch) {\n\t\t\t\t\tfmt.Fprintf(this.Stderr, \"%c\\n\", ch)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Fprint(this.Stderr, \"\\n\")\n\t\t\t\t}\n\t\t\t\tswitch ch {\n\t\t\t\tcase 'y', 'Y':\n\n\t\t\t\tcase 'a', 'A':\n\t\t\t\t\tall = true\n\t\t\t\tcase 'q', 'Q', rune(0x03):\n\t\t\t\t\treturn 0, nil\n\t\t\t\tdefault:\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\terr := this.Action(src, dst)\n\t\tif err != nil {\n\t\t\tif i >= len(srcs)-1 {\n\t\t\t\treturn 1, err\n\t\t\t}\n\t\t\tfmt.Fprintf(this.Stderr, \"%s\\nContinue? [Yes\/No] \", err.Error())\n\t\t\tch := getch.Rune()\n\t\t\tif unicode.IsPrint(ch) {\n\t\t\t\tfmt.Fprintf(this.Stderr, \"%c\\n\", ch)\n\t\t\t} else {\n\t\t\t\tfmt.Fprint(this.Stderr, \"\\n\")\n\t\t\t}\n\t\t\tif ch != 'y' && ch != 'Y' {\n\t\t\t\treturn 0, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn 0, nil\n}\n<commit_msg>copy & move regard the desitinate ending with \\ : or dots as a directory<commit_after>package commands\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"unicode\"\n\n\t\"github.com\/zetamatta\/go-getch\"\n\n\t\"..\/dos\"\n)\n\ntype copymove_t struct {\n\t*exec.Cmd\n\tAction func(src, dst string) error\n\tIsDirOk bool\n}\n\nfunc cmd_copy(cmd *exec.Cmd) (int, error) {\n\treturn copymove_t{\n\t\tCmd: cmd,\n\t\tAction: func(src, dst string) error {\n\t\t\treturn dos.Copy(src, dst, false)\n\t\t},\n\t}.Run()\n}\n\nfunc cmd_move(cmd *exec.Cmd) (int, error) {\n\treturn copymove_t{\n\t\tCmd: cmd,\n\t\tAction: dos.Move,\n\t\tIsDirOk: true,\n\t}.Run()\n}\n\nfunc cmd_ln(cmd *exec.Cmd) (int, error) {\n\tif len(cmd.Args) >= 2 && cmd.Args[1] == \"-s\" {\n\t\targs := make([]string, 0, len(cmd.Args)-1)\n\t\targs = append(args, cmd.Args[0])\n\t\targs = append(args, cmd.Args[2:]...)\n\t\tcmd.Args = args\n\t\treturn copymove_t{\n\t\t\tCmd: cmd,\n\t\t\tAction: os.Symlink,\n\t\t\tIsDirOk: true,\n\t\t}.Run()\n\t} else {\n\t\treturn copymove_t{\n\t\t\tCmd: cmd,\n\t\t\tAction: os.Link,\n\t\t}.Run()\n\t}\n}\n\nvar rxDir = regexp.MustCompile(`[\\\\\/:].{1,2}$`)\n\nfunc judgeDir(path string) bool {\n\tif path == \".\" || path == \"..\" || rxDir.MatchString(path) {\n\t\treturn true\n\t}\n\tstat, err := os.Stat(path)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn stat.Mode().IsDir()\n}\n\nfunc (this copymove_t) Run() (int, error) {\n\tif len(this.Args) <= 2 {\n\t\tfmt.Fprintf(this.Stderr,\n\t\t\t\"Usage: %s [\/y] SOURCE-FILENAME DESITINATE-FILENAME\\n\"+\n\t\t\t\t\" %s [\/y] FILENAMES... DESINATE-DIRECTORY\\n\",\n\t\t\tthis.Args[0], this.Args[0])\n\t\treturn 0, nil\n\t}\n\tall := false\n\tisDir := judgeDir(this.Args[len(this.Args)-1])\n\tsrcs := this.Args[1 : len(this.Args)-1]\n\tfor i, src := range srcs {\n\t\tif getch.IsCtrlCPressed() {\n\t\t\tfmt.Fprintln(this.Stderr, \"^C\")\n\t\t\treturn 0, nil\n\t\t}\n\t\tif src == \"\/y\" {\n\t\t\tall = true\n\t\t\tcontinue\n\t\t}\n\t\tdst := this.Args[len(this.Args)-1]\n\t\tif isDir {\n\t\t\tdst = filepath.Join(dst, filepath.Base(src))\n\t\t}\n\t\tif !this.IsDirOk {\n\t\t\tfi, err := os.Stat(src)\n\t\t\tif err == nil && fi.Mode().IsDir() {\n\t\t\t\tfmt.Fprintf(this.Stderr, \"%s is directory and passed.\\n\", src)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintf(this.Stderr, \"%s -> %s\\n\", src, dst)\n\t\tif !all {\n\t\t\tfi, err := os.Stat(dst)\n\t\t\tif fi != nil && err == nil {\n\t\t\t\tfmt.Fprintf(this.Stderr,\n\t\t\t\t\t\"%s: override? [Yes\/No\/All\/Quit] \",\n\t\t\t\t\tdst)\n\t\t\t\tch := getch.Rune()\n\t\t\t\tif unicode.IsPrint(ch) {\n\t\t\t\t\tfmt.Fprintf(this.Stderr, \"%c\\n\", ch)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Fprint(this.Stderr, \"\\n\")\n\t\t\t\t}\n\t\t\t\tswitch ch {\n\t\t\t\tcase 'y', 'Y':\n\n\t\t\t\tcase 'a', 'A':\n\t\t\t\t\tall = true\n\t\t\t\tcase 'q', 'Q', rune(0x03):\n\t\t\t\t\treturn 0, nil\n\t\t\t\tdefault:\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\terr := this.Action(src, dst)\n\t\tif err != nil {\n\t\t\tif i >= len(srcs)-1 {\n\t\t\t\treturn 1, err\n\t\t\t}\n\t\t\tfmt.Fprintf(this.Stderr, \"%s\\nContinue? [Yes\/No] \", err.Error())\n\t\t\tch := getch.Rune()\n\t\t\tif unicode.IsPrint(ch) {\n\t\t\t\tfmt.Fprintf(this.Stderr, \"%c\\n\", ch)\n\t\t\t} else {\n\t\t\t\tfmt.Fprint(this.Stderr, \"\\n\")\n\t\t\t}\n\t\t\tif ch != 'y' && ch != 'Y' {\n\t\t\t\treturn 0, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn 0, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package shell\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/types\"\n\t\"io\"\n\n\t\"google.golang.org\/grpc\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/operation\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/master_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/volume_server_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/erasure_coding\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/needle\"\n)\n\nfunc init() {\n\tCommands = append(Commands, &commandEcDecode{})\n}\n\ntype commandEcDecode struct {\n}\n\nfunc (c *commandEcDecode) Name() string {\n\treturn \"ec.decode\"\n}\n\nfunc (c *commandEcDecode) Help() string {\n\treturn `decode a erasure coded volume into a normal volume\n\n\tec.decode [-collection=\"\"] [-volumeId=<volume_id>]\n\n`\n}\n\nfunc (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {\n\tencodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)\n\tvolumeId := encodeCommand.Int(\"volumeId\", 0, \"the volume id\")\n\tcollection := encodeCommand.String(\"collection\", \"\", \"the collection name\")\n\tapplyChanges := fixCommand.Bool(\"force\", false, \"force the encoding even if the cluster has less than recommended 4 nodes\")\n\tif err = encodeCommand.Parse(args); err != nil {\n\t\treturn nil\n\t}\n\n\tif err = commandEnv.confirmIsLocked(); err != nil {\n\t\treturn\n\t}\n\n\tvid := needle.VolumeId(*volumeId)\n\n\t\/\/ collect topology information\n\ttopologyInfo, _, err := collectTopologyInfo(commandEnv)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !applyChanges {\n\t\tvar nodeCount int\n\t\teachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {\n\t\t\tnodeCount++\n\t\t})\n\t\tif nodeCount < erasure_coding.ParityShardsCount {\n\t\t\tglog.V(0).Infof(\"skip erasure coding with %d nodes, less than recommended %d nodes\", nodeCount, erasure_coding.ParityShardsCount)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ volumeId is provided\n\tif vid != 0 {\n\t\treturn doEcDecode(commandEnv, topologyInfo, *collection, vid)\n\t}\n\n\t\/\/ apply to all volumes in the collection\n\tvolumeIds := collectEcShardIds(topologyInfo, *collection)\n\tfmt.Printf(\"ec encode volumes: %v\\n\", volumeIds)\n\tfor _, vid := range volumeIds {\n\t\tif err = doEcDecode(commandEnv, topologyInfo, *collection, vid); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc doEcDecode(commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) {\n\t\/\/ find volume location\n\tnodeToEcIndexBits := collectEcNodeShardBits(topoInfo, vid)\n\n\tfmt.Printf(\"ec volume %d shard locations: %+v\\n\", vid, nodeToEcIndexBits)\n\n\t\/\/ collect ec shards to the server with most space\n\ttargetNodeLocation, err := collectEcShards(commandEnv, nodeToEcIndexBits, collection, vid)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"collectEcShards for volume %d: %v\", vid, err)\n\t}\n\n\t\/\/ generate a normal volume\n\terr = generateNormalVolume(commandEnv.option.GrpcDialOption, vid, collection, targetNodeLocation)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"generate normal volume %d on %s: %v\", vid, targetNodeLocation, err)\n\t}\n\n\t\/\/ delete the previous ec shards\n\terr = mountVolumeAndDeleteEcShards(commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"delete ec shards for volume %d: %v\", vid, err)\n\t}\n\n\treturn nil\n}\n\nfunc mountVolumeAndDeleteEcShards(grpcDialOption grpc.DialOption, collection string, targetNodeLocation pb.ServerAddress, nodeToEcIndexBits map[pb.ServerAddress]erasure_coding.ShardBits, vid needle.VolumeId) error {\n\n\t\/\/ mount volume\n\tif err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {\n\t\t_, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{\n\t\t\tVolumeId: uint32(vid),\n\t\t})\n\t\treturn mountErr\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"mountVolumeAndDeleteEcShards mount volume %d on %s: %v\", vid, targetNodeLocation, err)\n\t}\n\n\t\/\/ unmount ec shards\n\tfor location, ecIndexBits := range nodeToEcIndexBits {\n\t\tfmt.Printf(\"unmount ec volume %d on %s has shards: %+v\\n\", vid, location, ecIndexBits.ShardIds())\n\t\terr := unmountEcShards(grpcDialOption, vid, location, ecIndexBits.ToUint32Slice())\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"mountVolumeAndDeleteEcShards unmount ec volume %d on %s: %v\", vid, location, err)\n\t\t}\n\t}\n\t\/\/ delete ec shards\n\tfor location, ecIndexBits := range nodeToEcIndexBits {\n\t\tfmt.Printf(\"delete ec volume %d on %s has shards: %+v\\n\", vid, location, ecIndexBits.ShardIds())\n\t\terr := sourceServerDeleteEcShards(grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice())\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"mountVolumeAndDeleteEcShards delete ec volume %d on %s: %v\", vid, location, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc generateNormalVolume(grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer pb.ServerAddress) error {\n\n\tfmt.Printf(\"generateNormalVolume from ec volume %d on %s\\n\", vid, sourceVolumeServer)\n\n\terr := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {\n\t\t_, genErr := volumeServerClient.VolumeEcShardsToVolume(context.Background(), &volume_server_pb.VolumeEcShardsToVolumeRequest{\n\t\t\tVolumeId: uint32(vid),\n\t\t\tCollection: collection,\n\t\t})\n\t\treturn genErr\n\t})\n\n\treturn err\n\n}\n\nfunc collectEcShards(commandEnv *CommandEnv, nodeToEcIndexBits map[pb.ServerAddress]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation pb.ServerAddress, err error) {\n\n\tmaxShardCount := 0\n\tvar exisitngEcIndexBits erasure_coding.ShardBits\n\tfor loc, ecIndexBits := range nodeToEcIndexBits {\n\t\ttoBeCopiedShardCount := ecIndexBits.MinusParityShards().ShardIdCount()\n\t\tif toBeCopiedShardCount > maxShardCount {\n\t\t\tmaxShardCount = toBeCopiedShardCount\n\t\t\ttargetNodeLocation = loc\n\t\t\texisitngEcIndexBits = ecIndexBits\n\t\t}\n\t}\n\n\tfmt.Printf(\"collectEcShards: ec volume %d collect shards to %s from: %+v\\n\", vid, targetNodeLocation, nodeToEcIndexBits)\n\n\tvar copiedEcIndexBits erasure_coding.ShardBits\n\tfor loc, ecIndexBits := range nodeToEcIndexBits {\n\t\tif loc == targetNodeLocation {\n\t\t\tcontinue\n\t\t}\n\n\t\tneedToCopyEcIndexBits := ecIndexBits.Minus(exisitngEcIndexBits).MinusParityShards()\n\t\tif needToCopyEcIndexBits.ShardIdCount() == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\terr = operation.WithVolumeServerClient(targetNodeLocation, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {\n\n\t\t\tfmt.Printf(\"copy %d.%v %s => %s\\n\", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation)\n\n\t\t\t_, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{\n\t\t\t\tVolumeId: uint32(vid),\n\t\t\t\tCollection: collection,\n\t\t\t\tShardIds: needToCopyEcIndexBits.ToUint32Slice(),\n\t\t\t\tCopyEcxFile: false,\n\t\t\t\tCopyEcjFile: true,\n\t\t\t\tCopyVifFile: true,\n\t\t\t\tSourceDataNode: string(loc),\n\t\t\t})\n\t\t\tif copyErr != nil {\n\t\t\t\treturn fmt.Errorf(\"copy %d.%v %s => %s : %v\\n\", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation, copyErr)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tcopiedEcIndexBits = copiedEcIndexBits.Plus(needToCopyEcIndexBits)\n\n\t}\n\n\tnodeToEcIndexBits[targetNodeLocation] = exisitngEcIndexBits.Plus(copiedEcIndexBits)\n\n\treturn targetNodeLocation, err\n\n}\n\nfunc lookupVolumeIds(commandEnv *CommandEnv, volumeIds []string) (volumeIdLocations []*master_pb.LookupVolumeResponse_VolumeIdLocation, err error) {\n\tvar resp *master_pb.LookupVolumeResponse\n\terr = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {\n\t\tresp, err = client.LookupVolume(context.Background(), &master_pb.LookupVolumeRequest{VolumeOrFileIds: volumeIds})\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.VolumeIdLocations, nil\n}\n\nfunc collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, err error) {\n\n\tvar resp *master_pb.VolumeListResponse\n\terr = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {\n\t\tresp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn resp.TopologyInfo, resp.VolumeSizeLimitMb, nil\n\n}\n\nfunc collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) {\n\n\tvidMap := make(map[uint32]bool)\n\teachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {\n\t\tif diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {\n\t\t\tfor _, v := range diskInfo.EcShardInfos {\n\t\t\t\tif v.Collection == selectedCollection {\n\t\t\t\t\tvidMap[v.Id] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\tfor vid := range vidMap {\n\t\tvids = append(vids, needle.VolumeId(vid))\n\t}\n\n\treturn\n}\n\nfunc collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeId) map[pb.ServerAddress]erasure_coding.ShardBits {\n\n\tnodeToEcIndexBits := make(map[pb.ServerAddress]erasure_coding.ShardBits)\n\teachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {\n\t\tif diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {\n\t\t\tfor _, v := range diskInfo.EcShardInfos {\n\t\t\t\tif v.Id == uint32(vid) {\n\t\t\t\t\tnodeToEcIndexBits[pb.NewServerAddressFromDataNode(dn)] = erasure_coding.ShardBits(v.EcIndexBits)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\treturn nodeToEcIndexBits\n}\n<commit_msg>fix compilation<commit_after>package shell\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/types\"\n\t\"io\"\n\n\t\"google.golang.org\/grpc\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/operation\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/master_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/volume_server_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/erasure_coding\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/needle\"\n)\n\nfunc init() {\n\tCommands = append(Commands, &commandEcDecode{})\n}\n\ntype commandEcDecode struct {\n}\n\nfunc (c *commandEcDecode) Name() string {\n\treturn \"ec.decode\"\n}\n\nfunc (c *commandEcDecode) Help() string {\n\treturn `decode a erasure coded volume into a normal volume\n\n\tec.decode [-collection=\"\"] [-volumeId=<volume_id>]\n\n`\n}\n\nfunc (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {\n\tencodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)\n\tvolumeId := encodeCommand.Int(\"volumeId\", 0, \"the volume id\")\n\tcollection := encodeCommand.String(\"collection\", \"\", \"the collection name\")\n\tapplyChanges := encodeCommand.Bool(\"force\", false, \"force the encoding even if the cluster has less than recommended 4 nodes\")\n\tif err = encodeCommand.Parse(args); err != nil {\n\t\treturn nil\n\t}\n\n\tif err = commandEnv.confirmIsLocked(); err != nil {\n\t\treturn\n\t}\n\n\tvid := needle.VolumeId(*volumeId)\n\n\t\/\/ collect topology information\n\ttopologyInfo, _, err := collectTopologyInfo(commandEnv)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !*applyChanges {\n\t\tvar nodeCount int\n\t\teachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {\n\t\t\tnodeCount++\n\t\t})\n\t\tif nodeCount < erasure_coding.ParityShardsCount {\n\t\t\tglog.V(0).Infof(\"skip erasure coding with %d nodes, less than recommended %d nodes\", nodeCount, erasure_coding.ParityShardsCount)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ volumeId is provided\n\tif vid != 0 {\n\t\treturn doEcDecode(commandEnv, topologyInfo, *collection, vid)\n\t}\n\n\t\/\/ apply to all volumes in the collection\n\tvolumeIds := collectEcShardIds(topologyInfo, *collection)\n\tfmt.Printf(\"ec encode volumes: %v\\n\", volumeIds)\n\tfor _, vid := range volumeIds {\n\t\tif err = doEcDecode(commandEnv, topologyInfo, *collection, vid); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc doEcDecode(commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) {\n\t\/\/ find volume location\n\tnodeToEcIndexBits := collectEcNodeShardBits(topoInfo, vid)\n\n\tfmt.Printf(\"ec volume %d shard locations: %+v\\n\", vid, nodeToEcIndexBits)\n\n\t\/\/ collect ec shards to the server with most space\n\ttargetNodeLocation, err := collectEcShards(commandEnv, nodeToEcIndexBits, collection, vid)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"collectEcShards for volume %d: %v\", vid, err)\n\t}\n\n\t\/\/ generate a normal volume\n\terr = generateNormalVolume(commandEnv.option.GrpcDialOption, vid, collection, targetNodeLocation)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"generate normal volume %d on %s: %v\", vid, targetNodeLocation, err)\n\t}\n\n\t\/\/ delete the previous ec shards\n\terr = mountVolumeAndDeleteEcShards(commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"delete ec shards for volume %d: %v\", vid, err)\n\t}\n\n\treturn nil\n}\n\nfunc mountVolumeAndDeleteEcShards(grpcDialOption grpc.DialOption, collection string, targetNodeLocation pb.ServerAddress, nodeToEcIndexBits map[pb.ServerAddress]erasure_coding.ShardBits, vid needle.VolumeId) error {\n\n\t\/\/ mount volume\n\tif err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {\n\t\t_, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{\n\t\t\tVolumeId: uint32(vid),\n\t\t})\n\t\treturn mountErr\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"mountVolumeAndDeleteEcShards mount volume %d on %s: %v\", vid, targetNodeLocation, err)\n\t}\n\n\t\/\/ unmount ec shards\n\tfor location, ecIndexBits := range nodeToEcIndexBits {\n\t\tfmt.Printf(\"unmount ec volume %d on %s has shards: %+v\\n\", vid, location, ecIndexBits.ShardIds())\n\t\terr := unmountEcShards(grpcDialOption, vid, location, ecIndexBits.ToUint32Slice())\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"mountVolumeAndDeleteEcShards unmount ec volume %d on %s: %v\", vid, location, err)\n\t\t}\n\t}\n\t\/\/ delete ec shards\n\tfor location, ecIndexBits := range nodeToEcIndexBits {\n\t\tfmt.Printf(\"delete ec volume %d on %s has shards: %+v\\n\", vid, location, ecIndexBits.ShardIds())\n\t\terr := sourceServerDeleteEcShards(grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice())\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"mountVolumeAndDeleteEcShards delete ec volume %d on %s: %v\", vid, location, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc generateNormalVolume(grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer pb.ServerAddress) error {\n\n\tfmt.Printf(\"generateNormalVolume from ec volume %d on %s\\n\", vid, sourceVolumeServer)\n\n\terr := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {\n\t\t_, genErr := volumeServerClient.VolumeEcShardsToVolume(context.Background(), &volume_server_pb.VolumeEcShardsToVolumeRequest{\n\t\t\tVolumeId: uint32(vid),\n\t\t\tCollection: collection,\n\t\t})\n\t\treturn genErr\n\t})\n\n\treturn err\n\n}\n\nfunc collectEcShards(commandEnv *CommandEnv, nodeToEcIndexBits map[pb.ServerAddress]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation pb.ServerAddress, err error) {\n\n\tmaxShardCount := 0\n\tvar exisitngEcIndexBits erasure_coding.ShardBits\n\tfor loc, ecIndexBits := range nodeToEcIndexBits {\n\t\ttoBeCopiedShardCount := ecIndexBits.MinusParityShards().ShardIdCount()\n\t\tif toBeCopiedShardCount > maxShardCount {\n\t\t\tmaxShardCount = toBeCopiedShardCount\n\t\t\ttargetNodeLocation = loc\n\t\t\texisitngEcIndexBits = ecIndexBits\n\t\t}\n\t}\n\n\tfmt.Printf(\"collectEcShards: ec volume %d collect shards to %s from: %+v\\n\", vid, targetNodeLocation, nodeToEcIndexBits)\n\n\tvar copiedEcIndexBits erasure_coding.ShardBits\n\tfor loc, ecIndexBits := range nodeToEcIndexBits {\n\t\tif loc == targetNodeLocation {\n\t\t\tcontinue\n\t\t}\n\n\t\tneedToCopyEcIndexBits := ecIndexBits.Minus(exisitngEcIndexBits).MinusParityShards()\n\t\tif needToCopyEcIndexBits.ShardIdCount() == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\terr = operation.WithVolumeServerClient(targetNodeLocation, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {\n\n\t\t\tfmt.Printf(\"copy %d.%v %s => %s\\n\", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation)\n\n\t\t\t_, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{\n\t\t\t\tVolumeId: uint32(vid),\n\t\t\t\tCollection: collection,\n\t\t\t\tShardIds: needToCopyEcIndexBits.ToUint32Slice(),\n\t\t\t\tCopyEcxFile: false,\n\t\t\t\tCopyEcjFile: true,\n\t\t\t\tCopyVifFile: true,\n\t\t\t\tSourceDataNode: string(loc),\n\t\t\t})\n\t\t\tif copyErr != nil {\n\t\t\t\treturn fmt.Errorf(\"copy %d.%v %s => %s : %v\\n\", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation, copyErr)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tcopiedEcIndexBits = copiedEcIndexBits.Plus(needToCopyEcIndexBits)\n\n\t}\n\n\tnodeToEcIndexBits[targetNodeLocation] = exisitngEcIndexBits.Plus(copiedEcIndexBits)\n\n\treturn targetNodeLocation, err\n\n}\n\nfunc lookupVolumeIds(commandEnv *CommandEnv, volumeIds []string) (volumeIdLocations []*master_pb.LookupVolumeResponse_VolumeIdLocation, err error) {\n\tvar resp *master_pb.LookupVolumeResponse\n\terr = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {\n\t\tresp, err = client.LookupVolume(context.Background(), &master_pb.LookupVolumeRequest{VolumeOrFileIds: volumeIds})\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.VolumeIdLocations, nil\n}\n\nfunc collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, err error) {\n\n\tvar resp *master_pb.VolumeListResponse\n\terr = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {\n\t\tresp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn resp.TopologyInfo, resp.VolumeSizeLimitMb, nil\n\n}\n\nfunc collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) {\n\n\tvidMap := make(map[uint32]bool)\n\teachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {\n\t\tif diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {\n\t\t\tfor _, v := range diskInfo.EcShardInfos {\n\t\t\t\tif v.Collection == selectedCollection {\n\t\t\t\t\tvidMap[v.Id] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\tfor vid := range vidMap {\n\t\tvids = append(vids, needle.VolumeId(vid))\n\t}\n\n\treturn\n}\n\nfunc collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeId) map[pb.ServerAddress]erasure_coding.ShardBits {\n\n\tnodeToEcIndexBits := make(map[pb.ServerAddress]erasure_coding.ShardBits)\n\teachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {\n\t\tif diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {\n\t\t\tfor _, v := range diskInfo.EcShardInfos {\n\t\t\t\tif v.Id == uint32(vid) {\n\t\t\t\t\tnodeToEcIndexBits[pb.NewServerAddressFromDataNode(dn)] = erasure_coding.ShardBits(v.EcIndexBits)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\treturn nodeToEcIndexBits\n}\n<|endoftext|>"} {"text":"<commit_before>package CloudForest\n\n\/*\nWRFTarget wraps a numerical feature as a target for us weigted random forest.\n*\/\ntype WRFTarget struct {\n\tCatFeature\n\tWeights []float64\n}\n\n\/*\nNewWRFTarget creates a weighted random forest target and initializes its weights.\n*\/\nfunc NewWRFTarget(f CatFeature, weights map[string]float64) (abt *WRFTarget) {\n\tabt = &WRFTarget{f, make([]float64, f.NCats())}\n\n\tfor i := range abt.Weights {\n\t\tabt.Weights[i] = weights[f.NumToCat(i)]\n\t}\n\n\treturn\n}\n\n\/*\nSplitImpurity is an weigtedRF version of SplitImpurity.\n*\/\nfunc (target *WRFTarget) SplitImpurity(l *[]int, r *[]int, m *[]int, allocs *BestSplitAllocs) (impurityDecrease float64) {\n\tnl := float64(len(*l))\n\tnr := float64(len(*r))\n\tnm := 0.0\n\n\timpurityDecrease = nl * target.Impurity(l, allocs.LCounter)\n\timpurityDecrease += nr * target.Impurity(r, allocs.RCounter)\n\tif m != nil && len(*m) > 0 {\n\t\tnm = float64(len(*m))\n\t\timpurityDecrease += nm * target.Impurity(m, allocs.Counter)\n\t}\n\n\timpurityDecrease \/= nl + nr + nm\n\treturn\n}\n\n\/\/UpdateSImpFromAllocs willl be called when splits are being built by moving cases from r to l as in learning from numerical variables.\n\/\/Here it just wraps SplitImpurity but it can be implemented to provide further optimization.\nfunc (target *WRFTarget) UpdateSImpFromAllocs(l *[]int, r *[]int, m *[]int, allocs *BestSplitAllocs, movedRtoL *[]int) (impurityDecrease float64) {\n\treturn target.SplitImpurity(l, r, m, allocs)\n}\n\n\/\/Impurity is Gini impurity that uses the weights specified in WRFTarget.weights.\nfunc (target *WRFTarget) Impurity(cases *[]int, counter *[]int) (e float64) {\n\ttotal := 0.0\n\tcounts := *counter\n\tfor i := range counts {\n\t\tcounts[i] = 0\n\t}\n\tfor _, i := range *cases {\n\t\tif !target.IsMissing(i) {\n\t\t\tcati := target.Geti(i)\n\t\t\tcounts[cati]++\n\t\t\ttotal += target.Weights[cati]\n\t\t}\n\t}\n\te = 1.0\n\tt := float64(total * total)\n\tfor i, v := range counts {\n\t\tw := target.Weights[i]\n\t\te -= float64(v*v) * w * w \/ t\n\t}\n\treturn\n}\n\n\/\/FindPredicted finds the predicted target as the weighted catagorical Mode.\nfunc (target *WRFTarget) FindPredicted(cases []int) (pred string) {\n\n\tcounts := make([]int, target.NCats())\n\tfor _, i := range cases {\n\t\tif !target.IsMissing(i) {\n\t\t\tcounts[target.Geti(i)] += 1\n\t\t}\n\n\t}\n\tm := 0\n\tmax := 0.0\n\tfor k, v := range counts {\n\t\tval := float64(v) * target.Weights[k]\n\t\tif val > max {\n\t\t\tm = k\n\t\t\tmax = val\n\t\t}\n\t}\n\n\tpred = target.NumToCat(m)\n\n\treturn\n\n}\n<commit_msg>Optimize weighted random forest<commit_after>package CloudForest\n\n\/*\nWRFTarget wraps a numerical feature as a target for us weigted random forest.\n*\/\ntype WRFTarget struct {\n\tCatFeature\n\tWeights []float64\n}\n\n\/*\nNewWRFTarget creates a weighted random forest target and initializes its weights.\n*\/\nfunc NewWRFTarget(f CatFeature, weights map[string]float64) (abt *WRFTarget) {\n\tabt = &WRFTarget{f, make([]float64, f.NCats())}\n\n\tfor i := range abt.Weights {\n\t\tabt.Weights[i] = weights[f.NumToCat(i)]\n\t}\n\n\treturn\n}\n\n\/*\nSplitImpurity is an weigtedRF version of SplitImpurity.\n*\/\nfunc (target *WRFTarget) SplitImpurity(l *[]int, r *[]int, m *[]int, allocs *BestSplitAllocs) (impurityDecrease float64) {\n\tnl := float64(len(*l))\n\tnr := float64(len(*r))\n\tnm := 0.0\n\n\timpurityDecrease = nl * target.Impurity(l, allocs.LCounter)\n\timpurityDecrease += nr * target.Impurity(r, allocs.RCounter)\n\tif m != nil && len(*m) > 0 {\n\t\tnm = float64(len(*m))\n\t\timpurityDecrease += nm * target.Impurity(m, allocs.Counter)\n\t}\n\n\timpurityDecrease \/= nl + nr + nm\n\treturn\n}\n\n\/\/UpdateSImpFromAllocs willl be called when splits are being built by moving cases from r to l\n\/\/to avoid recalulatign the entire split impurity.\nfunc (target *WRFTarget) UpdateSImpFromAllocs(l *[]int, r *[]int, m *[]int, allocs *BestSplitAllocs, movedRtoL *[]int) (impurityDecrease float64) {\n\tvar cat, i int\n\tlcounter := *allocs.LCounter\n\trcounter := *allocs.RCounter\n\tfor _, i = range *movedRtoL {\n\n\t\t\/\/most expensive statement:\n\t\tcat = target.Geti(i)\n\t\tlcounter[cat]++\n\t\trcounter[cat]--\n\t\t\/\/counter[target.Geti(i)]++\n\n\t}\n\tnl := float64(len(*l))\n\tnr := float64(len(*r))\n\tnm := 0.0\n\n\timpurityDecrease = nl * target.ImpFromCounts(allocs.LCounter)\n\timpurityDecrease += nr * target.ImpFromCounts(allocs.RCounter)\n\tif m != nil && len(*m) > 0 {\n\t\tnm = float64(len(*m))\n\t\timpurityDecrease += nm * target.ImpFromCounts(allocs.Counter)\n\t}\n\n\timpurityDecrease \/= nl + nr + nm\n\treturn\n}\n\n\/\/Impurity is Gini impurity that uses the weights specified in WRFTarget.weights.\nfunc (target *WRFTarget) Impurity(cases *[]int, counter *[]int) (e float64) {\n\n\ttarget.CountPerCat(cases, counter)\n\n\treturn target.ImpFromCounts(counter)\n}\n\n\/\/ImpFromCounts recalculates gini impurity from class counts for us in intertive updates.\nfunc (target *WRFTarget) ImpFromCounts(counter *[]int) (e float64) {\n\n\ttotal := 0.0\n\tfor i, v := range *counter {\n\t\tw := target.Weights[i]\n\t\ttotal += float64(v) * w\n\n\t\te -= float64(v*v) * w * w\n\t}\n\n\te \/= float64(total * total)\n\te++\n\n\treturn\n\n}\n\n\/\/FindPredicted finds the predicted target as the weighted catagorical Mode.\nfunc (target *WRFTarget) FindPredicted(cases []int) (pred string) {\n\n\tcounts := make([]int, target.NCats())\n\tfor _, i := range cases {\n\t\tif !target.IsMissing(i) {\n\t\t\tcounts[target.Geti(i)] += 1\n\t\t}\n\n\t}\n\tm := 0\n\tmax := 0.0\n\tfor k, v := range counts {\n\t\tval := float64(v) * target.Weights[k]\n\t\tif val > max {\n\t\t\tm = k\n\t\t\tmax = val\n\t\t}\n\t}\n\n\tpred = target.NumToCat(m)\n\n\treturn\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nRPC with a pool of providers each connected via a web-socket.\n*\/\npackage wsrpcpool\n\n\/\/ The pool server module\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"errors\"\n\t\"golang.org\/x\/net\/websocket\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/rpc\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/*\nPoolServer is used to listen on a set of web-socket URLs for RPC\nproviders.\n*\/\ntype PoolServer struct {\n\tServer http.Server\n\t\/\/ Provider name to call channel map\n\tPoolMap map[string]chan *rpc.Call\n\t\/\/ Default call channel\n\tDefaultPool chan *rpc.Call\n\t\/\/ Used to signal the pool is listening for incoming connections\n\tListening <-chan struct{}\n\t\/\/ Used to signal the pool is listening for incoming connections (pool side)\n\tlistening chan struct{}\n\t\/\/ Path to call channel map\n\tpathMap map[string]chan *rpc.Call\n\t\/\/ Closer list used in Close()\n\tcList []io.Closer\n\t\/\/ Pool mutex\n\tlock *sync.RWMutex\n}\n\nvar (\n\t\/\/ ErrNoDefaultPool signals that provider isn't found and there is no default pool\n\tErrNoDefaultPool = errors.New(\"No default path is bound\")\n\t\/\/ ErrNoCertsParsed signals that no SSL certificates were found in the given file\n\tErrNoCertsParsed = errors.New(\"No certificates parsed\")\n)\n\n\/*\nNewPool returns a plain PoolServer instance.\n*\/\nfunc NewPool() *PoolServer {\n\tlistening := make(chan struct{}, 1)\n\treturn &PoolServer{\n\t\tListening: listening,\n\t\tlistening: listening,\n\t\tcList: make([]io.Closer, 0),\n\t\tlock: &sync.RWMutex{},\n\t}\n}\n\n\/*\nNewPoolTLS returns a PoolServer instance equipped with the given\nSSL certificate.\n*\/\nfunc NewPoolTLS(certfile, keyfile string) (*PoolServer, error) {\n\tpool := NewPool()\n\tif err := pool.AppendCertificate(certfile, keyfile); err != nil {\n\t\treturn nil, err\n\t}\n\treturn pool, nil\n}\n\n\/*\nNewPoolTLSAuth returns a PoolServer instance equipped with the given\nSSL certificate and a root CA certificates for client authentication.\n*\/\nfunc NewPoolTLSAuth(certfile, keyfile string, clientCAs ...string) (*PoolServer, error) {\n\tpool, err := NewPoolTLS(certfile, keyfile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = pool.AppendClientCAs(clientCAs...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pool, err\n}\n\n\/*\nAppendCertificate appends an SSL certificate to the set of server\ncertificates loading it from the pair of public certificate and private\nkey files.\n*\/\nfunc (pool *PoolServer) AppendCertificate(certfile, keyfile string) error {\n\tif pool.Server.TLSConfig == nil {\n\t\tpool.Server.TLSConfig = &tls.Config{}\n\t}\n\treturn appendCertificate(pool.Server.TLSConfig, certfile, keyfile)\n}\n\n\/*\nappendCertificate appends an SSL certificate to the given tls.Config\nloading it from the pair of public certificate and private key files.\n*\/\nfunc appendCertificate(tlsConfig *tls.Config, certfile, keyfile string) error {\n\tcert, err := tls.LoadX509KeyPair(certfile, keyfile)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttlsConfig.Certificates = append(tlsConfig.Certificates, cert)\n\ttlsConfig.BuildNameToCertificate()\n\treturn nil\n}\n\n\/*\nAppendClientCAs appends the given SSL root CA certificate files to the\nset of client CAs to verify client connections against.\n*\/\nfunc (pool *PoolServer) AppendClientCAs(clientCAs ...string) error {\n\tif len(clientCAs) == 0 {\n\t\treturn nil\n\t}\n\tif pool.Server.TLSConfig == nil {\n\t\tpool.Server.TLSConfig = &tls.Config{}\n\t}\n\tif pool.Server.TLSConfig.ClientCAs == nil {\n\t\tpool.Server.TLSConfig.ClientCAs = x509.NewCertPool()\n\t}\n\terr := appendCAs(pool.Server.TLSConfig.ClientCAs, clientCAs...)\n\tif err == nil {\n\t\tpool.Server.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert\n\t}\n\treturn err\n}\n\n\/*\nappendCAs appends the given SSL root CA certificate files to the\ngiven CA pool.\n*\/\nfunc appendCAs(caPool *x509.CertPool, caCerts ...string) error {\n\tfor _, caFile := range caCerts {\n\t\tcaCert, err := ioutil.ReadFile(caFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !caPool.AppendCertsFromPEM(caCert) {\n\t\t\treturn ErrNoCertsParsed\n\t\t}\n\t}\n\treturn nil\n}\n\n\/*\ninvoke passes the given call to the client.\n*\/\nfunc invoke(client *rpc.Client, call *rpc.Call) *rpc.Call {\n\treturn client.Go(call.ServiceMethod, call.Args, call.Reply, call.Done)\n}\n\n\/*\nconnObserver used to observe I\/O errors in a websocket connection.\n*\/\ntype connObserver struct {\n\t*websocket.Conn\n\tioError chan error\n}\n\n\/*\nreportError sends the given error over the ioError channel\nif there is a free slot available and do nothing otherwise\n(i.e. non blocking).\n*\/\nfunc (conn *connObserver) reportError(err error) {\n\tselect {\n\tcase conn.ioError <- err:\n\tdefault:\n\t}\n}\n\n\/*\nRead implements io.Reader.\n*\/\nfunc (conn *connObserver) Read(p []byte) (n int, err error) {\n\tn, err = conn.Conn.Read(p)\n\tif err != nil {\n\t\tconn.reportError(err)\n\t}\n\treturn\n}\n\n\/*\nWrite implements io.Writer.\n*\/\nfunc (conn *connObserver) Write(p []byte) (n int, err error) {\n\tn, err = conn.Conn.Write(p)\n\tif err != nil {\n\t\tconn.reportError(err)\n\t}\n\treturn\n}\n\n\/*\nhandle returns and invoker() function casted to the\nwebsocket.Handler type in order to get the necessary websocket\nhandshake behavior. The invoker function is wrapped call\nto addCloser() to register the connection with the pool.\n*\/\nfunc (pool *PoolServer) handle(newClient func(conn io.ReadWriteCloser) *rpc.Client, callIn <-chan *rpc.Call) websocket.Handler {\n\t_invoker := invoker(newClient, callIn, nil)\n\treturn websocket.Handler(func(ws *websocket.Conn) {\n\t\tpool.addCloser(ws)\n\t\t_invoker(ws)\n\t})\n}\n\n\/*\naddCloser adds the given connection or a listener to the\nset of opened objects.\n*\/\nfunc (pool *PoolServer) addCloser(c io.Closer) {\n\tpool.lock.Lock()\n\tpool.cList = append(pool.cList, c)\n\tpool.lock.Unlock()\n}\n\n\/*\ninvoker returns a function that the passes calls from the\ngiven channel over a websocket connection. In the case of I\/O error\nit is written to errOut channel if it is provided and the function\nreturns. The function also returns if callIn channel is closed.\nNo error is sent in that case. The errOut, if provied, is anyway\nclosed on return.\n*\/\nfunc invoker(newClient func(conn io.ReadWriteCloser) *rpc.Client, callIn <-chan *rpc.Call, errOut chan<- error) func(ws *websocket.Conn) {\n\treturn func(ws *websocket.Conn) {\n\t\tconn := &connObserver{ws, make(chan error, 10)}\n\t\tclient := newClient(conn)\n\t\tdefer client.Close()\n\t\tdefer func() {\n\t\t\tif errOut != nil {\n\t\t\t\tclose(errOut)\n\t\t\t}\n\t\t}()\n\tloop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase c, ok := <-callIn:\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak loop\n\t\t\t\t}\n\t\t\t\tinvoke(client, c)\n\t\t\tcase err := <-conn.ioError:\n\t\t\t\tif errOut != nil {\n\t\t\t\t\terrOut <- err\n\t\t\t\t}\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/*\nassertMux checks for pool.Server.Handler mux and makes\none if it doesn't yet exist.\n*\/\nfunc (pool *PoolServer) assertMux() *http.ServeMux {\n\tif pool.Server.Handler == nil {\n\t\tpool.Server.Handler = http.NewServeMux()\n\t}\n\treturn pool.Server.Handler.(*http.ServeMux)\n}\n\n\/*\nBind makes all the connections to the given path to be\nconsidered as means to make calls to the named providers.\nIf no providers are specified the connections at the path\nare considered the default providers. The expected RPC\nprotocol is what is used by rpc.NewClient().\n*\/\nfunc (pool *PoolServer) Bind(path string, providers ...string) {\n\tpool.BindProto(path, rpc.NewClient, providers...)\n}\n\n\/*\nBind associates the given path with the particular RPC protocol\nclient. If no providers are specified the connections at the path\nare considered the default providers.\n*\/\nfunc (pool *PoolServer) BindProto(path string, newClient func(conn io.ReadWriteCloser) *rpc.Client, providers ...string) {\n\tpool.lock.Lock()\n\tmux := pool.assertMux()\n\tif pool.pathMap == nil {\n\t\tpool.pathMap = make(map[string]chan *rpc.Call)\n\t}\n\tcallIn := pool.pathMap[path]\n\tif callIn == nil {\n\t\tcallIn = make(chan *rpc.Call)\n\t\tpool.pathMap[path] = callIn\n\t}\n\tif len(providers) > 0 {\n\t\tif pool.PoolMap == nil {\n\t\t\tpool.PoolMap = make(map[string]chan *rpc.Call)\n\t\t}\n\t\tfor _, name := range providers {\n\t\t\tpool.PoolMap[name] = callIn\n\t\t}\n\t} else {\n\t\tpool.DefaultPool = callIn\n\t}\n\tmux.Handle(path, pool.handle(newClient, callIn))\n\tpool.lock.Unlock()\n}\n\n\/*\nhandleIn returns the websocket.Handler that the serves\nincoming RPC calls over a websocket connection using\nthe given handler.\n*\/\nfunc (pool *PoolServer) handleIn(serveConn func(conn io.ReadWriteCloser)) websocket.Handler {\n\treturn websocket.Handler(func(ws *websocket.Conn) {\n\t\tpool.addCloser(ws)\n\t\tserveConn(ws)\n\t})\n}\n\n\/*\nBindIn handles incoming RPC calls on the given path.\nThe expected RPC protocol is what is used by rpc.ServeConn().\n*\/\nfunc (pool *PoolServer) BindIn(path string) {\n\tpool.BindProtoIn(path, rpc.ServeConn)\n}\n\n\/*\nBindIn handles all the connections to the given path with the\ngiven RPC protocol handler.\n*\/\nfunc (pool *PoolServer) BindProtoIn(path string, serveConn func(conn io.ReadWriteCloser)) {\n\tpool.lock.Lock()\n\tmux := pool.assertMux()\n\tmux.Handle(path, pool.handleIn(serveConn))\n\tpool.lock.Unlock()\n}\n\n\/*\nlistnObserver wraps a net.Listener providing a special\nchannel to signal the server pool.Close() was called.\n*\/\ntype listnObserver struct {\n\tnet.Listener\n\tclosed chan struct{}\n\twg sync.WaitGroup\n}\n\n\/*\nClose calls Close() on the embedded Listener and aloso closes\nthe \"closed\" channel to signal Close() was called on the pool.\n*\/\nfunc (lo *listnObserver) Close() error {\n\tclose(lo.closed)\n\terr := lo.Listener.Close()\n\tlo.wg.Wait()\n\treturn err\n}\n\n\/*\nlisten returns the active listener for the current pool config\nand an error if any. It also send a signal over the \"listening\"\nchannel.\n*\/\nfunc (pool *PoolServer) listen(addr string, tlsConfig *tls.Config) (*listnObserver, error) {\n\tl, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif tlsConfig != nil {\n\t\tl = tls.NewListener(l, tlsConfig)\n\t}\n\n\tlo := &listnObserver{Listener: l, closed: make(chan struct{})}\n\tpool.addCloser(lo)\n\n\tselect {\n\tcase pool.listening <- struct{}{}:\n\tdefault:\n\t}\n\n\treturn lo, nil\n}\n\n\/*\nuse uses the given listener waiting for a signal on\nthe \"stop\" channel.\n*\/\nfunc (pool *PoolServer) use(lo *listnObserver) error {\n\tlo.wg.Add(1)\n\terr := pool.Server.Serve(lo.Listener)\n\tlo.wg.Done()\n\tselect {\n\tcase _, opened := <-lo.closed:\n\t\tif !opened {\n\t\t\terr = nil \/\/ closed by pool.Close()\n\t\t}\n\tdefault:\n\t}\n\treturn err\n}\n\n\/*\nListenAndUse listens the given (or configured if \"\" is given) address\n([host]:port) with no SSL encryption.\n*\/\nfunc (pool *PoolServer) ListenAndUse(addr string) error {\n\tif addr == \"\" {\n\t\taddr = pool.Server.Addr\n\t}\n\tl, err := pool.listen(addr, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn pool.use(l)\n}\n\n\/*\nListenAndUseTLS listens the listens the given (or configured if \"\"\nis given) address ([host]:port) with SSL encryption on.\n*\/\nfunc (pool *PoolServer) ListenAndUseTLS(addr string) error {\n\tif addr == \"\" {\n\t\taddr = pool.Server.Addr\n\t}\n\tl, err := pool.listen(addr, pool.Server.TLSConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn pool.use(l)\n}\n\n\/*\nClose closes the pool listener.\n*\/\nfunc (pool *PoolServer) Close() error {\n\tvar err error\n\n\tpool.lock.Lock()\n\tfor i := range pool.cList {\n\t\tswitch c := pool.cList[i].(type) {\n\t\tcase *websocket.Conn:\n\t\t\tc.Close() \/\/ skip socket error\n\t\tdefault:\n\t\t\tif _err := c.Close(); _err != nil {\n\t\t\t\tif err == nil {\n\t\t\t\t\terr = _err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tpool.cList[i] = nil\n\t}\n\tpool.cList = pool.cList[:0]\n\tpool.lock.Unlock()\n\n\treturn err\n}\n\n\/*\nGo invokes the given remote function asynchronously. The name of the\nprovider (if given as the first part of serviceMethod, i.e. \"Provider.Function\")\nis first searched in the PoolMap and the DefaultPool is used if it isn't there\n(or isn't specified). If \"done\" is nil, a new channel is allocated and passed in\nthe return value. See net\/rpc package for details.\n*\/\nfunc (pool *PoolServer) Go(serviceMethod string, args interface{}, reply interface{}, done chan *rpc.Call) (*rpc.Call, error) {\n\tvar callIn chan *rpc.Call\n\tif split := strings.SplitN(serviceMethod, \".\", 2); len(split) > 1 {\n\t\tcallIn = pool.PoolMap[split[0]]\n\t}\n\tif callIn == nil {\n\t\tcallIn = pool.DefaultPool\n\t}\n\tif callIn == nil {\n\t\treturn nil, ErrNoDefaultPool\n\t}\n\n\tcall := &rpc.Call{\n\t\tServiceMethod: serviceMethod,\n\t\tArgs: args,\n\t\tReply: reply,\n\t}\n\tif done == nil {\n\t\tdone = make(chan *rpc.Call, 1)\n\t}\n\tcall.Done = done\n\n\tcallIn <- call\n\treturn call, nil\n}\n\n\/*\nCall invokes the given remote function and waits for it to complete,\nreturning its error status. If an I\/O error encountered, then the\nfunction re-queues the call.\n*\/\nfunc (pool *PoolServer) Call(serviceMethod string, args interface{}, reply interface{}) error {\n\tfor {\n\t\tif call, err := pool.Go(serviceMethod, args, reply, nil); err == nil {\n\t\t\tcall = <-call.Done\n\t\t\tswitch call.Error {\n\t\t\tcase rpc.ErrShutdown, io.ErrUnexpectedEOF:\n\t\t\tdefault:\n\t\t\t\treturn call.Error\n\t\t\t}\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n}\n<commit_msg>BindProto => BindWith<commit_after>\/*\nRPC with a pool of providers each connected via a web-socket.\n*\/\npackage wsrpcpool\n\n\/\/ The pool server module\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"errors\"\n\t\"golang.org\/x\/net\/websocket\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/rpc\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/*\nPoolServer is used to listen on a set of web-socket URLs for RPC\nproviders.\n*\/\ntype PoolServer struct {\n\tServer http.Server\n\t\/\/ Provider name to call channel map\n\tPoolMap map[string]chan *rpc.Call\n\t\/\/ Default call channel\n\tDefaultPool chan *rpc.Call\n\t\/\/ Used to signal the pool is listening for incoming connections\n\tListening <-chan struct{}\n\t\/\/ Used to signal the pool is listening for incoming connections (pool side)\n\tlistening chan struct{}\n\t\/\/ Path to call channel map\n\tpathMap map[string]chan *rpc.Call\n\t\/\/ Closer list used in Close()\n\tcList []io.Closer\n\t\/\/ Pool mutex\n\tlock *sync.RWMutex\n}\n\nvar (\n\t\/\/ ErrNoDefaultPool signals that provider isn't found and there is no default pool\n\tErrNoDefaultPool = errors.New(\"No default path is bound\")\n\t\/\/ ErrNoCertsParsed signals that no SSL certificates were found in the given file\n\tErrNoCertsParsed = errors.New(\"No certificates parsed\")\n)\n\n\/*\nNewPool returns a plain PoolServer instance.\n*\/\nfunc NewPool() *PoolServer {\n\tlistening := make(chan struct{}, 1)\n\treturn &PoolServer{\n\t\tListening: listening,\n\t\tlistening: listening,\n\t\tcList: make([]io.Closer, 0),\n\t\tlock: &sync.RWMutex{},\n\t}\n}\n\n\/*\nNewPoolTLS returns a PoolServer instance equipped with the given\nSSL certificate.\n*\/\nfunc NewPoolTLS(certfile, keyfile string) (*PoolServer, error) {\n\tpool := NewPool()\n\tif err := pool.AppendCertificate(certfile, keyfile); err != nil {\n\t\treturn nil, err\n\t}\n\treturn pool, nil\n}\n\n\/*\nNewPoolTLSAuth returns a PoolServer instance equipped with the given\nSSL certificate and a root CA certificates for client authentication.\n*\/\nfunc NewPoolTLSAuth(certfile, keyfile string, clientCAs ...string) (*PoolServer, error) {\n\tpool, err := NewPoolTLS(certfile, keyfile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = pool.AppendClientCAs(clientCAs...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pool, err\n}\n\n\/*\nAppendCertificate appends an SSL certificate to the set of server\ncertificates loading it from the pair of public certificate and private\nkey files.\n*\/\nfunc (pool *PoolServer) AppendCertificate(certfile, keyfile string) error {\n\tif pool.Server.TLSConfig == nil {\n\t\tpool.Server.TLSConfig = &tls.Config{}\n\t}\n\treturn appendCertificate(pool.Server.TLSConfig, certfile, keyfile)\n}\n\n\/*\nappendCertificate appends an SSL certificate to the given tls.Config\nloading it from the pair of public certificate and private key files.\n*\/\nfunc appendCertificate(tlsConfig *tls.Config, certfile, keyfile string) error {\n\tcert, err := tls.LoadX509KeyPair(certfile, keyfile)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttlsConfig.Certificates = append(tlsConfig.Certificates, cert)\n\ttlsConfig.BuildNameToCertificate()\n\treturn nil\n}\n\n\/*\nAppendClientCAs appends the given SSL root CA certificate files to the\nset of client CAs to verify client connections against.\n*\/\nfunc (pool *PoolServer) AppendClientCAs(clientCAs ...string) error {\n\tif len(clientCAs) == 0 {\n\t\treturn nil\n\t}\n\tif pool.Server.TLSConfig == nil {\n\t\tpool.Server.TLSConfig = &tls.Config{}\n\t}\n\tif pool.Server.TLSConfig.ClientCAs == nil {\n\t\tpool.Server.TLSConfig.ClientCAs = x509.NewCertPool()\n\t}\n\terr := appendCAs(pool.Server.TLSConfig.ClientCAs, clientCAs...)\n\tif err == nil {\n\t\tpool.Server.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert\n\t}\n\treturn err\n}\n\n\/*\nappendCAs appends the given SSL root CA certificate files to the\ngiven CA pool.\n*\/\nfunc appendCAs(caPool *x509.CertPool, caCerts ...string) error {\n\tfor _, caFile := range caCerts {\n\t\tcaCert, err := ioutil.ReadFile(caFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !caPool.AppendCertsFromPEM(caCert) {\n\t\t\treturn ErrNoCertsParsed\n\t\t}\n\t}\n\treturn nil\n}\n\n\/*\ninvoke passes the given call to the client.\n*\/\nfunc invoke(client *rpc.Client, call *rpc.Call) *rpc.Call {\n\treturn client.Go(call.ServiceMethod, call.Args, call.Reply, call.Done)\n}\n\n\/*\nconnObserver used to observe I\/O errors in a websocket connection.\n*\/\ntype connObserver struct {\n\t*websocket.Conn\n\tioError chan error\n}\n\n\/*\nreportError sends the given error over the ioError channel\nif there is a free slot available and do nothing otherwise\n(i.e. non blocking).\n*\/\nfunc (conn *connObserver) reportError(err error) {\n\tselect {\n\tcase conn.ioError <- err:\n\tdefault:\n\t}\n}\n\n\/*\nRead implements io.Reader.\n*\/\nfunc (conn *connObserver) Read(p []byte) (n int, err error) {\n\tn, err = conn.Conn.Read(p)\n\tif err != nil {\n\t\tconn.reportError(err)\n\t}\n\treturn\n}\n\n\/*\nWrite implements io.Writer.\n*\/\nfunc (conn *connObserver) Write(p []byte) (n int, err error) {\n\tn, err = conn.Conn.Write(p)\n\tif err != nil {\n\t\tconn.reportError(err)\n\t}\n\treturn\n}\n\n\/*\nhandle returns and invoker() function casted to the\nwebsocket.Handler type in order to get the necessary websocket\nhandshake behavior. The invoker function is wrapped call\nto addCloser() to register the connection with the pool.\n*\/\nfunc (pool *PoolServer) handle(newClient func(conn io.ReadWriteCloser) *rpc.Client, callIn <-chan *rpc.Call) websocket.Handler {\n\t_invoker := invoker(newClient, callIn, nil)\n\treturn websocket.Handler(func(ws *websocket.Conn) {\n\t\tpool.addCloser(ws)\n\t\t_invoker(ws)\n\t})\n}\n\n\/*\naddCloser adds the given connection or a listener to the\nset of opened objects.\n*\/\nfunc (pool *PoolServer) addCloser(c io.Closer) {\n\tpool.lock.Lock()\n\tpool.cList = append(pool.cList, c)\n\tpool.lock.Unlock()\n}\n\n\/*\ninvoker returns a function that the passes calls from the\ngiven channel over a websocket connection. In the case of I\/O error\nit is written to errOut channel if it is provided and the function\nreturns. The function also returns if callIn channel is closed.\nNo error is sent in that case. The errOut, if provied, is anyway\nclosed on return.\n*\/\nfunc invoker(newClient func(conn io.ReadWriteCloser) *rpc.Client, callIn <-chan *rpc.Call, errOut chan<- error) func(ws *websocket.Conn) {\n\treturn func(ws *websocket.Conn) {\n\t\tconn := &connObserver{ws, make(chan error, 10)}\n\t\tclient := newClient(conn)\n\t\tdefer client.Close()\n\t\tdefer func() {\n\t\t\tif errOut != nil {\n\t\t\t\tclose(errOut)\n\t\t\t}\n\t\t}()\n\tloop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase c, ok := <-callIn:\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak loop\n\t\t\t\t}\n\t\t\t\tinvoke(client, c)\n\t\t\tcase err := <-conn.ioError:\n\t\t\t\tif errOut != nil {\n\t\t\t\t\terrOut <- err\n\t\t\t\t}\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/*\nassertMux checks for pool.Server.Handler mux and makes\none if it doesn't yet exist.\n*\/\nfunc (pool *PoolServer) assertMux() *http.ServeMux {\n\tif pool.Server.Handler == nil {\n\t\tpool.Server.Handler = http.NewServeMux()\n\t}\n\treturn pool.Server.Handler.(*http.ServeMux)\n}\n\n\/*\nBind makes all the connections to the given path to be\nconsidered as means to make calls to the named providers.\nIf no providers are specified the connections at the path\nare considered the default providers. The expected RPC\nprotocol is what is used by rpc.NewClient().\n*\/\nfunc (pool *PoolServer) Bind(path string, providers ...string) {\n\tpool.BindWith(path, rpc.NewClient, providers...)\n}\n\n\/*\nBindWith associates the given path with the particular RPC protocol\nclient. If no providers are specified the connections at the path\nare considered the default providers.\n*\/\nfunc (pool *PoolServer) BindWith(path string, newClient func(conn io.ReadWriteCloser) *rpc.Client, providers ...string) {\n\tpool.lock.Lock()\n\tmux := pool.assertMux()\n\tif pool.pathMap == nil {\n\t\tpool.pathMap = make(map[string]chan *rpc.Call)\n\t}\n\tcallIn := pool.pathMap[path]\n\tif callIn == nil {\n\t\tcallIn = make(chan *rpc.Call)\n\t\tpool.pathMap[path] = callIn\n\t}\n\tif len(providers) > 0 {\n\t\tif pool.PoolMap == nil {\n\t\t\tpool.PoolMap = make(map[string]chan *rpc.Call)\n\t\t}\n\t\tfor _, name := range providers {\n\t\t\tpool.PoolMap[name] = callIn\n\t\t}\n\t} else {\n\t\tpool.DefaultPool = callIn\n\t}\n\tmux.Handle(path, pool.handle(newClient, callIn))\n\tpool.lock.Unlock()\n}\n\n\/*\nhandleIn returns the websocket.Handler that the serves\nincoming RPC calls over a websocket connection using\nthe given handler.\n*\/\nfunc (pool *PoolServer) handleIn(serveConn func(conn io.ReadWriteCloser)) websocket.Handler {\n\treturn websocket.Handler(func(ws *websocket.Conn) {\n\t\tpool.addCloser(ws)\n\t\tserveConn(ws)\n\t})\n}\n\n\/*\nBindIn handles incoming RPC calls on the given path.\nThe expected RPC protocol is what is used by rpc.ServeConn().\n*\/\nfunc (pool *PoolServer) BindIn(path string) {\n\tpool.BindInWith(path, rpc.ServeConn)\n}\n\n\/*\nBindInWith handles all the connections to the given path with the\ngiven RPC protocol handler.\n*\/\nfunc (pool *PoolServer) BindInWith(path string, serveConn func(conn io.ReadWriteCloser)) {\n\tpool.lock.Lock()\n\tmux := pool.assertMux()\n\tmux.Handle(path, pool.handleIn(serveConn))\n\tpool.lock.Unlock()\n}\n\n\/*\nlistnObserver wraps a net.Listener providing a special\nchannel to signal the server pool.Close() was called.\n*\/\ntype listnObserver struct {\n\tnet.Listener\n\tclosed chan struct{}\n\twg sync.WaitGroup\n}\n\n\/*\nClose calls Close() on the embedded Listener and aloso closes\nthe \"closed\" channel to signal Close() was called on the pool.\n*\/\nfunc (lo *listnObserver) Close() error {\n\tclose(lo.closed)\n\terr := lo.Listener.Close()\n\tlo.wg.Wait()\n\treturn err\n}\n\n\/*\nlisten returns the active listener for the current pool config\nand an error if any. It also send a signal over the \"listening\"\nchannel.\n*\/\nfunc (pool *PoolServer) listen(addr string, tlsConfig *tls.Config) (*listnObserver, error) {\n\tl, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif tlsConfig != nil {\n\t\tl = tls.NewListener(l, tlsConfig)\n\t}\n\n\tlo := &listnObserver{Listener: l, closed: make(chan struct{})}\n\tpool.addCloser(lo)\n\n\tselect {\n\tcase pool.listening <- struct{}{}:\n\tdefault:\n\t}\n\n\treturn lo, nil\n}\n\n\/*\nuse uses the given listener waiting for a signal on\nthe \"stop\" channel.\n*\/\nfunc (pool *PoolServer) use(lo *listnObserver) error {\n\tlo.wg.Add(1)\n\terr := pool.Server.Serve(lo.Listener)\n\tlo.wg.Done()\n\tselect {\n\tcase _, opened := <-lo.closed:\n\t\tif !opened {\n\t\t\terr = nil \/\/ closed by pool.Close()\n\t\t}\n\tdefault:\n\t}\n\treturn err\n}\n\n\/*\nListenAndUse listens the given (or configured if \"\" is given) address\n([host]:port) with no SSL encryption.\n*\/\nfunc (pool *PoolServer) ListenAndUse(addr string) error {\n\tif addr == \"\" {\n\t\taddr = pool.Server.Addr\n\t}\n\tl, err := pool.listen(addr, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn pool.use(l)\n}\n\n\/*\nListenAndUseTLS listens the listens the given (or configured if \"\"\nis given) address ([host]:port) with SSL encryption on.\n*\/\nfunc (pool *PoolServer) ListenAndUseTLS(addr string) error {\n\tif addr == \"\" {\n\t\taddr = pool.Server.Addr\n\t}\n\tl, err := pool.listen(addr, pool.Server.TLSConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn pool.use(l)\n}\n\n\/*\nClose closes the pool listener.\n*\/\nfunc (pool *PoolServer) Close() error {\n\tvar err error\n\n\tpool.lock.Lock()\n\tfor i := range pool.cList {\n\t\tswitch c := pool.cList[i].(type) {\n\t\tcase *websocket.Conn:\n\t\t\tc.Close() \/\/ skip socket error\n\t\tdefault:\n\t\t\tif _err := c.Close(); _err != nil {\n\t\t\t\tif err == nil {\n\t\t\t\t\terr = _err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tpool.cList[i] = nil\n\t}\n\tpool.cList = pool.cList[:0]\n\tpool.lock.Unlock()\n\n\treturn err\n}\n\n\/*\nGo invokes the given remote function asynchronously. The name of the\nprovider (if given as the first part of serviceMethod, i.e. \"Provider.Function\")\nis first searched in the PoolMap and the DefaultPool is used if it isn't there\n(or isn't specified). If \"done\" is nil, a new channel is allocated and passed in\nthe return value. See net\/rpc package for details.\n*\/\nfunc (pool *PoolServer) Go(serviceMethod string, args interface{}, reply interface{}, done chan *rpc.Call) (*rpc.Call, error) {\n\tvar callIn chan *rpc.Call\n\tif split := strings.SplitN(serviceMethod, \".\", 2); len(split) > 1 {\n\t\tcallIn = pool.PoolMap[split[0]]\n\t}\n\tif callIn == nil {\n\t\tcallIn = pool.DefaultPool\n\t}\n\tif callIn == nil {\n\t\treturn nil, ErrNoDefaultPool\n\t}\n\n\tcall := &rpc.Call{\n\t\tServiceMethod: serviceMethod,\n\t\tArgs: args,\n\t\tReply: reply,\n\t}\n\tif done == nil {\n\t\tdone = make(chan *rpc.Call, 1)\n\t}\n\tcall.Done = done\n\n\tcallIn <- call\n\treturn call, nil\n}\n\n\/*\nCall invokes the given remote function and waits for it to complete,\nreturning its error status. If an I\/O error encountered, then the\nfunction re-queues the call.\n*\/\nfunc (pool *PoolServer) Call(serviceMethod string, args interface{}, reply interface{}) error {\n\tfor {\n\t\tif call, err := pool.Go(serviceMethod, args, reply, nil); err == nil {\n\t\t\tcall = <-call.Done\n\t\t\tswitch call.Error {\n\t\t\tcase rpc.ErrShutdown, io.ErrUnexpectedEOF:\n\t\t\tdefault:\n\t\t\t\treturn call.Error\n\t\t\t}\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/* RPC with a pool of providers each connected via web-socket. *\/\npackage wsrpcpool\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\/\/\"golang.org\/x\/net\/websocket\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\/\/\"net\/rpc\/jsonrpc\"\n)\n\n\/* A Web-socket connection endpoint for RPC provideres. *\/\ntype PoolServer struct {\n\thttp.Server\n}\n\n\/* Plain PoolServer instance. *\/\nfunc NewPool() *PoolServer {\n\treturn &PoolServer{}\n}\n\n\/* PoolServer instance equipped with the given SSL certificate. *\/\nfunc NewPoolTLS(certfile, keyfile string) (*PoolServer, error) {\n\tpool := NewPool()\n\tif err := pool.AppendCertificate(certfile, keyfile); err != nil {\n\t\treturn nil, err\n\t}\n\treturn pool, nil\n}\n\n\/* Appends an SSL certificate to the set of server certificates loading\nit from the pair of public certificate and private key files. *\/\nfunc (pool *PoolServer) AppendCertificate(certfile, keyfile string) error {\n\tif pool.TLSConfig == nil {\n\t\tpool.TLSConfig = &tls.Config{}\n\t}\n\n\tcert, err := tls.LoadX509KeyPair(certfile, keyfile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpool.TLSConfig.Certificates = append(pool.TLSConfig.Certificates, cert)\n\tpool.TLSConfig.BuildNameToCertificate()\n\n\treturn nil\n}\n\n\/* Appends the given SSL root CA certificate files to the set of client\nCAs to verify client connections against. *\/\nfunc (pool *PoolServer) AppendClientCAs(clientCAs ...string) error {\n\tif pool.TLSConfig == nil {\n\t\tpool.TLSConfig = &tls.Config{}\n\t}\n\tif pool.TLSConfig.ClientCAs == nil {\n\t\tpool.TLSConfig.ClientCAs = x509.NewCertPool()\n\t}\n\n\tfor _, caFile := range clientCAs {\n\t\tcaCert, err := ioutil.ReadFile(caFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpool.TLSConfig.ClientCAs.AppendCertsFromPEM(caCert)\n\t}\n\n\tpool.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert\n\treturn nil\n}\n\n\/* PoolServer instance equipped with the given SSL certificate\nand a root CA certificates for client authentication. *\/\nfunc NewPoolTLSAuth(certfile, keyfile string, clientCAs ...string) (*PoolServer, error) {\n\tpool, err := NewPoolTLS(certfile, keyfile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = pool.AppendClientCAs(clientCAs...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn pool, err\n}\n<commit_msg>Implement the pool map<commit_after>\/* RPC with a pool of providers each connected via web-socket. *\/\npackage wsrpcpool\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"golang.org\/x\/net\/websocket\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/rpc\"\n\t\"net\/rpc\/jsonrpc\"\n\t\"sync\"\n)\n\n\/* A Web-socket connection endpoint for RPC provideres. *\/\ntype PoolServer struct {\n\thttp.Server\n\tPoolMap map[string]chan *rpc.Call\n\tDefaultPool chan *rpc.Call\n}\n\n\/* Plain PoolServer instance. *\/\nfunc NewPool() *PoolServer {\n\treturn &PoolServer{}\n}\n\n\/* PoolServer instance equipped with the given SSL certificate. *\/\nfunc NewPoolTLS(certfile, keyfile string) (*PoolServer, error) {\n\tpool := NewPool()\n\tif err := pool.AppendCertificate(certfile, keyfile); err != nil {\n\t\treturn nil, err\n\t}\n\treturn pool, nil\n}\n\n\/* Appends an SSL certificate to the set of server certificates loading\nit from the pair of public certificate and private key files. *\/\nfunc (pool *PoolServer) AppendCertificate(certfile, keyfile string) error {\n\tif pool.TLSConfig == nil {\n\t\tpool.TLSConfig = &tls.Config{}\n\t}\n\n\tcert, err := tls.LoadX509KeyPair(certfile, keyfile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpool.TLSConfig.Certificates = append(pool.TLSConfig.Certificates, cert)\n\tpool.TLSConfig.BuildNameToCertificate()\n\n\treturn nil\n}\n\n\/* Appends the given SSL root CA certificate files to the set of client\nCAs to verify client connections against. *\/\nfunc (pool *PoolServer) AppendClientCAs(clientCAs ...string) error {\n\tif pool.TLSConfig == nil {\n\t\tpool.TLSConfig = &tls.Config{}\n\t}\n\tif pool.TLSConfig.ClientCAs == nil {\n\t\tpool.TLSConfig.ClientCAs = x509.NewCertPool()\n\t}\n\n\tfor _, caFile := range clientCAs {\n\t\tcaCert, err := ioutil.ReadFile(caFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpool.TLSConfig.ClientCAs.AppendCertsFromPEM(caCert)\n\t}\n\n\tpool.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert\n\treturn nil\n}\n\n\/* PoolServer instance equipped with the given SSL certificate\nand a root CA certificates for client authentication. *\/\nfunc NewPoolTLSAuth(certfile, keyfile string, clientCAs ...string) (*PoolServer, error) {\n\tpool, err := NewPoolTLS(certfile, keyfile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = pool.AppendClientCAs(clientCAs...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn pool, err\n}\n\n\/* Passes the given call to the client. *\/\nfunc invoke(client *rpc.Client, call *rpc.Call) *rpc.Call {\n\treturn client.Go(call.ServiceMethod, call.Args, call.Reply, call.Done)\n}\n\n\/* Associates the given path with the set of remote objects or make\nit the default path if no object names given. *\/\nfunc (pool *PoolServer) BindPath(path string, names ...string) {\n\tif pool.Handler == nil {\n\t\tpool.Handler = http.NewServeMux()\n\t}\n\n\tmux := pool.Handler.(*http.ServeMux)\n\n\tif len(names) > 0 {\n\t\tif pool.PoolMap == nil {\n\t\t\tpool.PoolMap = make(map[string]chan *rpc.Call)\n\t\t}\n\t\tfor _, name := range names {\n\t\t\tif pool.PoolMap[name] == nil {\n\t\t\t\tpool.PoolMap[name] = make(chan *rpc.Call)\n\t\t\t}\n\t\t}\n\t\tmux.Handle(path, websocket.Handler(func(ws *websocket.Conn) {\n\t\t\tvar wg sync.WaitGroup\n\t\t\tclient := jsonrpc.NewClient(ws)\n\t\t\tfor _, name := range names {\n\t\t\t\tcallIn := pool.PoolMap[name]\n\t\t\t\tgo func() {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\tfor c := range callIn {\n\t\t\t\t\t\tinvoke(client, c)\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t}\n\t\t\twg.Add(len(names))\n\t\t\twg.Wait()\n\t\t\tclient.Close()\n\t\t}))\n\t} else {\n\t\tif pool.DefaultPool == nil {\n\t\t\tpool.DefaultPool = make(chan *rpc.Call)\n\t\t}\n\t\tchanIn := pool.DefaultPool\n\t\tmux.Handle(path, websocket.Handler(func(ws *websocket.Conn) {\n\t\t\tclient := jsonrpc.NewClient(ws)\n\t\t\tfor c := range chanIn {\n\t\t\t\tinvoke(client, c)\n\t\t\t}\n\t\t\tclient.Close()\n\t\t}))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/github\/hub\/github\"\n\t\"github.com\/github\/hub\/ui\"\n\t\"github.com\/github\/hub\/utils\"\n)\n\nvar (\n\tcmdGist = &Command{\n\t\tRun: printGistHelp,\n\t\tUsage: `\ngist create [--public] [<FILES>...]\ngist show <ID> [<FILENAME>]\n`,\n\t\tLong: `Create and print GitHub Gists\n\n## Commands:\n\n\t* _create_:\n\t\tCreate a new gist. If no <FILES> are specified, the content is read from\n\t\tstandard input.\n\n * _show_:\n\t\tPrint the contents of a gist. If the gist contains multiple files, the\n\t\toperation will error out unless <FILENAME> is specified.\n\n## Options:\n\n --public\n Make the new gist public (default: false).\n\n\t-o, --browse\n\t\tOpen the new gist in a web browser.\n\n\t-c, --copy\n\t\tPut the URL of the new gist to clipboard instead of printing it.\n\n## Examples:\n\n\t$ echo hello | hub gist create --public\n\n\t$ hub gist create <file1> <file2>\n\n # print a specific file within a gist:\n $ hub gist show <ID> testfile1.txt\n\n## See also:\n\nhub(1), hub-api(1)\n`,\n\t}\n\n\tcmdShowGist = &Command{\n\t\tKey: \"show\",\n\t\tRun: showGist,\n\t}\n\n\tcmdCreateGist = &Command{\n\t\tKey: \"create\",\n\t\tRun: createGist,\n\t\tKnownFlags: `\n\t\t--public\n\t\t-o, --browse\n\t\t-c, --copy\n`,\n\t}\n)\n\nfunc init() {\n\tcmdGist.Use(cmdShowGist)\n\tcmdGist.Use(cmdCreateGist)\n\tCmdRunner.Use(cmdGist)\n}\n\nfunc getGist(gh *github.Client, id string, filename string) error {\n\tgist, err := gh.FetchGist(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(gist.Files) > 1 && filename == \"\" {\n\t\tfilenames := []string{}\n\t\tfor name := range gist.Files {\n\t\t\tfilenames = append(filenames, name)\n\t\t}\n\t\treturn fmt.Errorf(\"the gist contains multiple files, you must specify one:\\n%s\", strings.Join(filenames, \"\\n\"))\n\t}\n\n\tif filename != \"\" {\n\t\tif val, ok := gist.Files[filename]; ok {\n\t\t\tui.Println(val.Content)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"no such file in gist\")\n\t\t}\n\t} else {\n\t\tfor name := range gist.Files {\n\t\t\tfile := gist.Files[name]\n\t\t\tui.Println(file.Content)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc printGistHelp(command *Command, args *Args) {\n\tutils.Check(command.UsageError(\"\"))\n}\n\nfunc createGist(cmd *Command, args *Args) {\n\targs.NoForward()\n\n\thost, err := github.CurrentConfig().DefaultHostNoPrompt()\n\tutils.Check(err)\n\tgh := github.NewClient(host.Host)\n\n\tfilenames := []string{}\n\tif args.IsParamsEmpty() {\n\t\tfilenames = append(filenames, \"-\")\n\t} else {\n\t\tfilenames = args.Params\n\t}\n\n\tvar gist *github.Gist\n\tif args.Noop {\n\t\tui.Println(\"Would create gist\")\n\t\tgist = &github.Gist{\n\t\t\tHtmlUrl: fmt.Sprintf(\"https:\/\/gist.%s\/%s\", gh.Host.Host, \"ID\"),\n\t\t}\n\t} else {\n\t\tgist, err = gh.CreateGist(filenames, args.Flag.Bool(\"--public\"))\n\t\tutils.Check(err)\n\t}\n\n\tflagIssueBrowse := args.Flag.Bool(\"--browse\")\n\tflagIssueCopy := args.Flag.Bool(\"--copy\")\n\tprintBrowseOrCopy(args, gist.HtmlUrl, flagIssueBrowse, flagIssueCopy)\n}\n\nfunc showGist(cmd *Command, args *Args) {\n\targs.NoForward()\n\tif args.ParamsSize() < 1 {\n\t\tutils.Check(cmd.UsageError(\"you must specify a gist ID\"))\n\t}\n\tid := args.GetParam(0)\n\tfilename := \"\"\n\tif args.ParamsSize() > 1 {\n\t\tfilename = args.GetParam(1)\n\t}\n\n\thost, err := github.CurrentConfig().DefaultHostNoPrompt()\n\tutils.Check(err)\n\tgh := github.NewClient(host.Host)\n\terr = getGist(gh, id, filename)\n\tutils.Check(err)\n}\n<commit_msg>[gist] List shorthand flags in usage synopsis<commit_after>package commands\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/github\/hub\/github\"\n\t\"github.com\/github\/hub\/ui\"\n\t\"github.com\/github\/hub\/utils\"\n)\n\nvar (\n\tcmdGist = &Command{\n\t\tRun: printGistHelp,\n\t\tUsage: `\ngist create [-oc] [--public] [<FILES>...]\ngist show <ID> [<FILENAME>]\n`,\n\t\tLong: `Create and print GitHub Gists\n\n## Commands:\n\n\t* _create_:\n\t\tCreate a new gist. If no <FILES> are specified, the content is read from\n\t\tstandard input.\n\n * _show_:\n\t\tPrint the contents of a gist. If the gist contains multiple files, the\n\t\toperation will error out unless <FILENAME> is specified.\n\n## Options:\n\n --public\n Make the new gist public (default: false).\n\n\t-o, --browse\n\t\tOpen the new gist in a web browser.\n\n\t-c, --copy\n\t\tPut the URL of the new gist to clipboard instead of printing it.\n\n## Examples:\n\n\t$ echo hello | hub gist create --public\n\n\t$ hub gist create <file1> <file2>\n\n # print a specific file within a gist:\n $ hub gist show <ID> testfile1.txt\n\n## See also:\n\nhub(1), hub-api(1)\n`,\n\t}\n\n\tcmdShowGist = &Command{\n\t\tKey: \"show\",\n\t\tRun: showGist,\n\t}\n\n\tcmdCreateGist = &Command{\n\t\tKey: \"create\",\n\t\tRun: createGist,\n\t\tKnownFlags: `\n\t\t--public\n\t\t-o, --browse\n\t\t-c, --copy\n`,\n\t}\n)\n\nfunc init() {\n\tcmdGist.Use(cmdShowGist)\n\tcmdGist.Use(cmdCreateGist)\n\tCmdRunner.Use(cmdGist)\n}\n\nfunc getGist(gh *github.Client, id string, filename string) error {\n\tgist, err := gh.FetchGist(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(gist.Files) > 1 && filename == \"\" {\n\t\tfilenames := []string{}\n\t\tfor name := range gist.Files {\n\t\t\tfilenames = append(filenames, name)\n\t\t}\n\t\treturn fmt.Errorf(\"the gist contains multiple files, you must specify one:\\n%s\", strings.Join(filenames, \"\\n\"))\n\t}\n\n\tif filename != \"\" {\n\t\tif val, ok := gist.Files[filename]; ok {\n\t\t\tui.Println(val.Content)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"no such file in gist\")\n\t\t}\n\t} else {\n\t\tfor name := range gist.Files {\n\t\t\tfile := gist.Files[name]\n\t\t\tui.Println(file.Content)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc printGistHelp(command *Command, args *Args) {\n\tutils.Check(command.UsageError(\"\"))\n}\n\nfunc createGist(cmd *Command, args *Args) {\n\targs.NoForward()\n\n\thost, err := github.CurrentConfig().DefaultHostNoPrompt()\n\tutils.Check(err)\n\tgh := github.NewClient(host.Host)\n\n\tfilenames := []string{}\n\tif args.IsParamsEmpty() {\n\t\tfilenames = append(filenames, \"-\")\n\t} else {\n\t\tfilenames = args.Params\n\t}\n\n\tvar gist *github.Gist\n\tif args.Noop {\n\t\tui.Println(\"Would create gist\")\n\t\tgist = &github.Gist{\n\t\t\tHtmlUrl: fmt.Sprintf(\"https:\/\/gist.%s\/%s\", gh.Host.Host, \"ID\"),\n\t\t}\n\t} else {\n\t\tgist, err = gh.CreateGist(filenames, args.Flag.Bool(\"--public\"))\n\t\tutils.Check(err)\n\t}\n\n\tflagIssueBrowse := args.Flag.Bool(\"--browse\")\n\tflagIssueCopy := args.Flag.Bool(\"--copy\")\n\tprintBrowseOrCopy(args, gist.HtmlUrl, flagIssueBrowse, flagIssueCopy)\n}\n\nfunc showGist(cmd *Command, args *Args) {\n\targs.NoForward()\n\tif args.ParamsSize() < 1 {\n\t\tutils.Check(cmd.UsageError(\"you must specify a gist ID\"))\n\t}\n\tid := args.GetParam(0)\n\tfilename := \"\"\n\tif args.ParamsSize() > 1 {\n\t\tfilename = args.GetParam(1)\n\t}\n\n\thost, err := github.CurrentConfig().DefaultHostNoPrompt()\n\tutils.Check(err)\n\tgh := github.NewClient(host.Host)\n\terr = getGist(gh, id, filename)\n\tutils.Check(err)\n}\n<|endoftext|>"} {"text":"<commit_before>package beertasting\n\nimport (\n\t\"appengine\"\n\t\"appengine\/datastore\"\n\t\"appengine\/urlfetch\"\n\t\"appengine\/user\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/ant0ine\/go-json-rest\/rest\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"path\"\n\t\"time\"\n)\n\nfunc init() {\n\thttp.HandleFunc(\"\/\", handler)\n\thttp.HandleFunc(\"\/search\", searchHandler)\n\thttp.HandleFunc(\"\/feed\", feedHandler)\n\thttp.HandleFunc(\"\/displayFeed\", displayFeedHandler)\n\thttp.HandleFunc(\"\/oauth\/untappd\", oauthUntappdHandler)\n\trestHandler := rest.ResourceHandler{}\n\trestHandler.SetRoutes(\n\t\t&rest.Route{\"GET\", \"\/admin\/config\", getAdminConfig},\n\t\t&rest.Route{\"PUT\", \"\/admin\/config\", putAdminConfig},\n\t)\n\thttp.Handle(\"\/admin\/config\", &restHandler)\n}\n\nvar (\n\tendpoint = url.URL{Scheme: \"http\", Host: \"api.untappd.com\", Path: \"v4\"}\n)\n\ntype Config struct {\n\tClientId string\n\tClientSecret string\n\tWhitelist []string\n}\n\nfunc configKey(c appengine.Context) *datastore.Key {\n\treturn datastore.NewKey(c, \"Config\", \"default\", 0, nil)\n}\n\nfunc getConfig(c appengine.Context) (Config, error) {\n\tvar cfg Config\n\terr := datastore.Get(c, configKey(c), &cfg)\n\treturn cfg, err\n}\n\nfunc httpCallback(c appengine.Context, path string) *url.URL {\n\treturn &url.URL{\n\t\tScheme: \"http\",\n\t\tHost: appengine.DefaultVersionHostname(c),\n\t\tPath: path,\n\t}\n}\n\nfunc oauthCallback(c appengine.Context, svc string) *url.URL {\n\treturn httpCallback(c, fmt.Sprintf(\"oauth\/%s\", svc))\n}\n\nfunc userLoggedIn(c appengine.Context, curUrl *url.URL, w http.ResponseWriter) (*user.User, bool) {\n\tu := user.Current(c)\n\tif u != nil {\n\t\treturn u, true\n\t}\n\turl, err := user.LoginURL(c, curUrl.String())\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn nil, false\n\t}\n\tw.Header().Set(\"Location\", url)\n\tw.WriteHeader(http.StatusFound)\n\treturn nil, false\n}\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\tuser, ok := userLoggedIn(c, r.URL, w)\n\tif !ok {\n\t\treturn\n\t}\n\tfmt.Fprintf(w, \"Welcome, %s\", user)\n}\n\nfunc searchHandler(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\tuser, ok := userLoggedIn(c, r.URL, w)\n\tif !ok {\n\t\treturn\n\t}\n\tvar err error\n\tvar config Config\n\tif config, err = getConfig(c); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif len(config.Whitelist) == 0 {\n\t\tc.Infof(\"whitelist not found, using test@example.com\")\n\t\tconfig.Whitelist = []string{\"test@example.com\"}\n\t}\n\tfound := false\n\tfor _, record := range config.Whitelist {\n\t\tif record == user.Email {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\thttp.Error(w, fmt.Sprintf(\"user %s not in whitelist\", user), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tt, err := template.ParseFiles(\"templates\/trial1.html\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\ts := struct{ Name, Endpoint, ClientId, ClientSecret string }{\n\t\tuser.String(),\n\t\tendpoint.String(),\n\t\tconfig.ClientId,\n\t\tconfig.ClientSecret,\n\t}\n\tif err := t.Execute(w, s); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}\n\nfunc oauthUntappdHandler(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\t_, ok := userLoggedIn(c, r.URL, w)\n\tif !ok {\n\t\treturn\n\t}\n\tif len(r.FormValue(\"code\")) == 0 {\n\t\thttp.Error(w, \"missing code parameter\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tvar config Config\n\tvar err error\n\tif config, err = getConfig(c); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tu := url.URL{Scheme: \"https\", Host: \"untappd.com\", Path: \"oauth\/authorize\/\"}\n\tq := u.Query()\n\tq.Add(\"client_id\", config.ClientId)\n\tq.Add(\"client_secret\", config.ClientSecret)\n\tq.Add(\"response_type\", \"code\")\n\tq.Add(\"code\", r.FormValue(\"code\"))\n\tq.Add(\"redirect_url\", oauthCallback(c, \"untappd\").String())\n\tu.RawQuery = q.Encode()\n\tc.Infof(\"authorize URL: %s\", u.String())\n\tresp, err := urlfetch.Client(c).Get(u.String())\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tbuf, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\toauthResponse := struct {\n\t\tResponse struct {\n\t\t\tAccessToken string `json:\"access_token\"`\n\t\t}\n\t}{}\n\terr = json.Unmarshal(buf, &oauthResponse)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"%s: %s\", err.Error(), string(buf))\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\texpire := time.Now().AddDate(0, 0, 1)\n\thostname := appengine.DefaultVersionHostname(c)\n\traw := fmt.Sprintf(\"access_token=%s\", oauthResponse.Response.AccessToken)\n\tcookie := http.Cookie{\n\t\tName: \"access_token\",\n\t\tValue: oauthResponse.Response.AccessToken,\n\t\tPath: \"\/\",\n\t\tDomain: hostname,\n\t\tExpires: expire,\n\t\tRawExpires: expire.Format(time.UnixDate),\n\t\tMaxAge: 86400,\n\t\tSecure: false,\n\t\tHttpOnly: false,\n\t\tRaw: raw,\n\t\tUnparsed: []string{raw},\n\t}\n\thttp.SetCookie(w, &cookie)\n\thttp.Redirect(w, r, \"\/displayFeed\", http.StatusFound)\n}\n\nfunc displayFeedHandler(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\tuser, ok := userLoggedIn(c, r.URL, w)\n\tif !ok {\n\t\treturn\n\t}\n\tt, err := template.ParseFiles(\"templates\/feed.html\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tendpoint.Path = path.Join(endpoint.Path, \"checkin\/recent\")\n\ts := struct{ Name, FeedRequest string }{user.String(), endpoint.String()}\n\tif err := t.Execute(w, s); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}\n\nfunc feedHandler(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\t_, ok := userLoggedIn(c, r.URL, w)\n\tif !ok {\n\t\treturn\n\t}\n\tvar config Config\n\tvar err error\n\tif config, err = getConfig(c); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tvar untappdOath url.URL\n\tuntappdOath.Scheme = \"https\"\n\tuntappdOath.Host = \"untappd.com\"\n\tuntappdOath.Path = \"oauth\/authenticate\/\"\n\tq := untappdOath.Query()\n\tq.Add(\"client_id\", config.ClientId)\n\tq.Add(\"response_type\", \"code\")\n\tq.Add(\"redirect_url\", oauthCallback(c, \"untappd\").String())\n\tuntappdOath.RawQuery = q.Encode()\n\thttp.Redirect(w, r, untappdOath.String(), http.StatusFound)\n\treturn\n}\n\nfunc writeJson(w rest.ResponseWriter, v interface{}) {\n\tif err := w.WriteJson(v); err != nil {\n\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}\n\nfunc datastoreRestGet(c appengine.Context, k *datastore.Key, w rest.ResponseWriter, v interface{}) {\n\tif err := datastore.Get(c, k, v); err != nil {\n\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\twriteJson(w, v)\n}\n\nfunc datastoreRestPut(c appengine.Context, k *datastore.Key, w rest.ResponseWriter, v interface{}) {\n\tif _, err := datastore.Put(c, k, v); err != nil {\n\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\twriteJson(w, v)\n}\n\nfunc putAdminConfigCtx(c appengine.Context, w rest.ResponseWriter, r *rest.Request) {\n\tvar config Config\n\terr := r.DecodeJsonPayload(&config)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"DecodeJsonPayload(): %s\", err)\n\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdatastoreRestPut(c, configKey(c), w, &config)\n}\n\nfunc putAdminConfig(w rest.ResponseWriter, r *rest.Request) {\n\tc := appengine.NewContext(r.Request)\n\tputAdminConfigCtx(c, w, r)\n}\n\nfunc getAdminConfigCtx(c appengine.Context, w rest.ResponseWriter) {\n\tvar config Config\n\tdatastoreRestGet(c, configKey(c), w, &config)\n}\n\nfunc getAdminConfig(w rest.ResponseWriter, r *rest.Request) {\n\tc := appengine.NewContext(r.Request)\n\tgetAdminConfigCtx(c, w)\n}\n<commit_msg>add a basic RESTful API for users<commit_after>package beertasting\n\nimport (\n\t\"appengine\"\n\t\"appengine\/datastore\"\n\t\"appengine\/urlfetch\"\n\t\"appengine\/user\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/ant0ine\/go-json-rest\/rest\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"path\"\n\t\"strconv\"\n\t\"time\"\n)\n\nfunc init() {\n\thttp.HandleFunc(\"\/\", handler)\n\thttp.HandleFunc(\"\/search\", searchHandler)\n\thttp.HandleFunc(\"\/feed\", feedHandler)\n\thttp.HandleFunc(\"\/displayFeed\", displayFeedHandler)\n\thttp.HandleFunc(\"\/oauth\/untappd\", oauthUntappdHandler)\n\trestHandler := rest.ResourceHandler{}\n\trestHandler.SetRoutes(\n\t\t&rest.Route{\"GET\", \"\/admin\/config\", getAdminConfig},\n\t\t&rest.Route{\"PUT\", \"\/admin\/config\", putAdminConfig},\n\t\t&rest.Route{\"GET\", \"\/users\", getAllUsers},\n\t\t&rest.Route{\"POST\", \"\/users\", postUser},\n\t\t&rest.Route{\"GET\", \"\/users\/:id\", getUser},\n\t\t&rest.Route{\"DELETE\", \"\/users\/:id\", deleteUser},\n\t)\n\thttp.Handle(\"\/admin\/config\", &restHandler)\n\thttp.Handle(\"\/users\", &restHandler)\n\thttp.Handle(\"\/users\/\", &restHandler)\n}\n\nvar (\n\tendpoint = url.URL{Scheme: \"http\", Host: \"api.untappd.com\", Path: \"v4\"}\n)\n\ntype Config struct {\n\tClientId string\n\tClientSecret string\n\tWhitelist []string\n}\n\ntype User struct {\n\tID int64 `datastore:\"-\"`\n\tName string\n\tEmail string\n}\n\nfunc (user *User) DecodeJsonPayload(r *rest.Request) error {\n\terr := r.DecodeJsonPayload(user)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif user.Name == \"\" {\n\t\treturn fmt.Errorf(\"name required\")\n\t}\n\tif user.Email == \"\" {\n\t\treturn fmt.Errorf(\"email required\")\n\t}\n\treturn nil\n}\n\nfunc configKey(c appengine.Context) *datastore.Key {\n\treturn datastore.NewKey(c, \"Config\", \"default\", 0, nil)\n}\n\nfunc getConfig(c appengine.Context) (Config, error) {\n\tvar cfg Config\n\terr := datastore.Get(c, configKey(c), &cfg)\n\treturn cfg, err\n}\n\nfunc httpCallback(c appengine.Context, path string) *url.URL {\n\treturn &url.URL{\n\t\tScheme: \"http\",\n\t\tHost: appengine.DefaultVersionHostname(c),\n\t\tPath: path,\n\t}\n}\n\nfunc oauthCallback(c appengine.Context, svc string) *url.URL {\n\treturn httpCallback(c, fmt.Sprintf(\"oauth\/%s\", svc))\n}\n\nfunc userLoggedIn(c appengine.Context, curUrl *url.URL, w http.ResponseWriter) (*user.User, bool) {\n\tu := user.Current(c)\n\tif u != nil {\n\t\treturn u, true\n\t}\n\turl, err := user.LoginURL(c, curUrl.String())\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn nil, false\n\t}\n\tw.Header().Set(\"Location\", url)\n\tw.WriteHeader(http.StatusFound)\n\treturn nil, false\n}\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\tuser, ok := userLoggedIn(c, r.URL, w)\n\tif !ok {\n\t\treturn\n\t}\n\tfmt.Fprintf(w, \"Welcome, %s\", user)\n}\n\nfunc searchHandler(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\tuser, ok := userLoggedIn(c, r.URL, w)\n\tif !ok {\n\t\treturn\n\t}\n\tvar err error\n\tvar config Config\n\tif config, err = getConfig(c); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif len(config.Whitelist) == 0 {\n\t\tc.Infof(\"whitelist not found, using test@example.com\")\n\t\tconfig.Whitelist = []string{\"test@example.com\"}\n\t}\n\tfound := false\n\tfor _, record := range config.Whitelist {\n\t\tif record == user.Email {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\thttp.Error(w, fmt.Sprintf(\"user %s not in whitelist\", user), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tt, err := template.ParseFiles(\"templates\/trial1.html\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\ts := struct{ Name, Endpoint, ClientId, ClientSecret string }{\n\t\tuser.String(),\n\t\tendpoint.String(),\n\t\tconfig.ClientId,\n\t\tconfig.ClientSecret,\n\t}\n\tif err := t.Execute(w, s); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}\n\nfunc oauthUntappdHandler(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\t_, ok := userLoggedIn(c, r.URL, w)\n\tif !ok {\n\t\treturn\n\t}\n\tif len(r.FormValue(\"code\")) == 0 {\n\t\thttp.Error(w, \"missing code parameter\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tvar config Config\n\tvar err error\n\tif config, err = getConfig(c); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tu := url.URL{Scheme: \"https\", Host: \"untappd.com\", Path: \"oauth\/authorize\/\"}\n\tq := u.Query()\n\tq.Add(\"client_id\", config.ClientId)\n\tq.Add(\"client_secret\", config.ClientSecret)\n\tq.Add(\"response_type\", \"code\")\n\tq.Add(\"code\", r.FormValue(\"code\"))\n\tq.Add(\"redirect_url\", oauthCallback(c, \"untappd\").String())\n\tu.RawQuery = q.Encode()\n\tc.Infof(\"authorize URL: %s\", u.String())\n\tresp, err := urlfetch.Client(c).Get(u.String())\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tbuf, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\toauthResponse := struct {\n\t\tResponse struct {\n\t\t\tAccessToken string `json:\"access_token\"`\n\t\t}\n\t}{}\n\terr = json.Unmarshal(buf, &oauthResponse)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"%s: %s\", err.Error(), string(buf))\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\texpire := time.Now().AddDate(0, 0, 1)\n\thostname := appengine.DefaultVersionHostname(c)\n\traw := fmt.Sprintf(\"access_token=%s\", oauthResponse.Response.AccessToken)\n\tcookie := http.Cookie{\n\t\tName: \"access_token\",\n\t\tValue: oauthResponse.Response.AccessToken,\n\t\tPath: \"\/\",\n\t\tDomain: hostname,\n\t\tExpires: expire,\n\t\tRawExpires: expire.Format(time.UnixDate),\n\t\tMaxAge: 86400,\n\t\tSecure: false,\n\t\tHttpOnly: false,\n\t\tRaw: raw,\n\t\tUnparsed: []string{raw},\n\t}\n\thttp.SetCookie(w, &cookie)\n\thttp.Redirect(w, r, \"\/displayFeed\", http.StatusFound)\n}\n\nfunc displayFeedHandler(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\tuser, ok := userLoggedIn(c, r.URL, w)\n\tif !ok {\n\t\treturn\n\t}\n\tt, err := template.ParseFiles(\"templates\/feed.html\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tendpoint.Path = path.Join(endpoint.Path, \"checkin\/recent\")\n\ts := struct{ Name, FeedRequest string }{user.String(), endpoint.String()}\n\tif err := t.Execute(w, s); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}\n\nfunc feedHandler(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\t_, ok := userLoggedIn(c, r.URL, w)\n\tif !ok {\n\t\treturn\n\t}\n\tvar config Config\n\tvar err error\n\tif config, err = getConfig(c); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tvar untappdOath url.URL\n\tuntappdOath.Scheme = \"https\"\n\tuntappdOath.Host = \"untappd.com\"\n\tuntappdOath.Path = \"oauth\/authenticate\/\"\n\tq := untappdOath.Query()\n\tq.Add(\"client_id\", config.ClientId)\n\tq.Add(\"response_type\", \"code\")\n\tq.Add(\"redirect_url\", oauthCallback(c, \"untappd\").String())\n\tuntappdOath.RawQuery = q.Encode()\n\thttp.Redirect(w, r, untappdOath.String(), http.StatusFound)\n\treturn\n}\n\nfunc writeJson(w rest.ResponseWriter, v interface{}) {\n\tif err := w.WriteJson(v); err != nil {\n\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}\n\nfunc datastoreRestGet(c appengine.Context, k *datastore.Key, w rest.ResponseWriter, v interface{}) {\n\tif err := datastore.Get(c, k, v); err != nil {\n\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\twriteJson(w, v)\n}\n\nfunc datastoreRestPut(c appengine.Context, k *datastore.Key, w rest.ResponseWriter, v interface{}) {\n\tif _, err := datastore.Put(c, k, v); err != nil {\n\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\twriteJson(w, v)\n}\n\nfunc putAdminConfigCtx(c appengine.Context, w rest.ResponseWriter, r *rest.Request) {\n\tvar config Config\n\terr := r.DecodeJsonPayload(&config)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"DecodeJsonPayload(): %s\", err)\n\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdatastoreRestPut(c, configKey(c), w, &config)\n}\n\nfunc putAdminConfig(w rest.ResponseWriter, r *rest.Request) {\n\tc := appengine.NewContext(r.Request)\n\tputAdminConfigCtx(c, w, r)\n}\n\nfunc getAdminConfigCtx(c appengine.Context, w rest.ResponseWriter) {\n\tvar config Config\n\tdatastoreRestGet(c, configKey(c), w, &config)\n}\n\nfunc getAdminConfig(w rest.ResponseWriter, r *rest.Request) {\n\tc := appengine.NewContext(r.Request)\n\tgetAdminConfigCtx(c, w)\n}\n\nfunc getUser(w rest.ResponseWriter, r *rest.Request) {\n\tid, err := strconv.Atoi(r.PathParam(\"id\"))\n\tif err != nil {\n\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tc := appengine.NewContext(r.Request)\n\tkey := datastore.NewKey(c, \"User\", \"\", int64(id), nil)\n\tvar user User\n\tdatastoreRestGet(c, key, w, &user)\n}\n\nfunc getAllUsers(w rest.ResponseWriter, r *rest.Request) {\n\tusers := []User{}\n\tc := appengine.NewContext(r.Request)\n\tq := datastore.NewQuery(\"User\")\n\tfor t := q.Run(c); ; {\n\t\tvar u User\n\t\tkey, err := t.Next(&u)\n\t\tif err == datastore.Done {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tu.ID = key.IntID()\n\t\tusers = append(users, u)\n\t}\n\tw.WriteJson(&users)\n}\n\nfunc postUser(w rest.ResponseWriter, r *rest.Request) {\n\tuser := User{}\n\terr := user.DecodeJsonPayload(r)\n\tif err != nil {\n\t\trest.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tc := appengine.NewContext(r.Request)\n\tkey := datastore.NewIncompleteKey(c, \"User\", nil)\n\tnewKey, err := datastore.Put(c, key, &user)\n\tif err != nil {\n\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tuser.ID = newKey.IntID()\n\twriteJson(w, user)\n}\n\nfunc deleteUser(w rest.ResponseWriter, r *rest.Request) {\n\tc := appengine.NewContext(r.Request)\n\tid, err := strconv.Atoi(r.PathParam(\"id\"))\n\tif err != nil {\n\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tkey := datastore.NewKey(c, \"User\", \"\", int64(id), nil)\n\terr = datastore.Delete(c, key)\n\tif err != nil {\n\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage charm\n\nimport (\n\t\"fmt\"\n\t\"launchpad.net\/juju-core\/charm\"\n\t\"launchpad.net\/juju-core\/log\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"time\"\n)\n\n\/\/ Deployer maintains a git repository tracking a series of charm versions,\n\/\/ and can install and upgrade charm deployments to the current version.\ntype Deployer struct {\n\tpath string\n\tcurrent *GitDir\n}\n\n\/\/ NewDeployer creates a new Deployer which stores its state in the supplied\n\/\/ directory.\nfunc NewDeployer(path string) *Deployer {\n\treturn &Deployer{\n\t\tpath: path,\n\t\tcurrent: NewGitDir(filepath.Join(path, \"current\")),\n\t}\n}\n\n\/\/ Stage causes subsequent calls to Deploy to deploy the supplied charm.\nfunc (d *Deployer) Stage(bun *charm.Bundle, url *charm.URL) error {\n\t\/\/ Read present state of current.\n\tif err := os.MkdirAll(d.path, 0755); err != nil {\n\t\treturn err\n\t}\n\tdefer d.collectOrphans()\n\tsrcExists, err := d.current.Exists()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif srcExists {\n\t\tprevURL, err := ReadCharmURL(d.current)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif *url == *prevURL {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ Prepare a fresh repository for the update, using current's history\n\t\/\/ if it exists.\n\tupdatePath, err := d.newDir(\"update\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar repo *GitDir\n\tif srcExists {\n\t\trepo, err = d.current.Clone(updatePath)\n\t} else {\n\t\trepo = NewGitDir(updatePath)\n\t\terr = repo.Init()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Write the desired new state and commit.\n\tif err = bun.ExpandTo(updatePath); err != nil {\n\t\treturn err\n\t}\n\tif err = WriteCharmURL(repo, url); err != nil {\n\t\treturn err\n\t}\n\tif err = repo.Snapshotf(\"Imported charm %q from %q.\", url, bun.Path); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Atomically rename fresh repository to current.\n\ttmplink := filepath.Join(updatePath, \"tmplink\")\n\tif err = os.Symlink(updatePath, tmplink); err != nil {\n\t\treturn err\n\t}\n\treturn os.Rename(tmplink, d.current.Path())\n}\n\n\/\/ Deploy deploys the current charm to the target directory.\nfunc (d *Deployer) Deploy(target *GitDir) (err error) {\n\tdefer func() {\n\t\tif err == ErrConflict {\n\t\t\tlog.Warningf(\"worker\/uniter\/charm: charm deployment completed with conflicts\")\n\t\t} else if err != nil {\n\t\t\terr = fmt.Errorf(\"charm deployment failed: %s\", err)\n\t\t\tlog.Errorf(\"worker\/uniter\/charm: %v\", err)\n\t\t} else {\n\t\t\tlog.Infof(\"worker\/uniter\/charm: charm deployment succeeded\")\n\t\t}\n\t}()\n\tif exists, err := d.current.Exists(); err != nil {\n\t\treturn err\n\t} else if !exists {\n\t\treturn fmt.Errorf(\"no charm set\")\n\t}\n\tif exists, err := target.Exists(); err != nil {\n\t\treturn err\n\t} else if !exists {\n\t\treturn d.install(target)\n\t}\n\treturn d.upgrade(target)\n}\n\n\/\/ install creates a new deployment of current, and atomically moves it to\n\/\/ target.\nfunc (d *Deployer) install(target *GitDir) error {\n\tdefer d.collectOrphans()\n\tlog.Infof(\"worker\/uniter\/charm: preparing new charm deployment\")\n\turl, err := ReadCharmURL(d.current)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinstallPath, err := d.newDir(\"install\")\n\tif err != nil {\n\t\treturn err\n\t}\n\trepo := NewGitDir(installPath)\n\tif err = repo.Init(); err != nil {\n\t\treturn err\n\t}\n\tif err = repo.Pull(d.current); err != nil {\n\t\treturn err\n\t}\n\tif err = repo.Snapshotf(\"Deployed charm %q.\", url); err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"worker\/uniter\/charm: deploying charm\")\n\treturn os.Rename(installPath, target.Path())\n}\n\n\/\/ upgrade pulls from current into target. If target has local changes, but\n\/\/ no conflicts, it will be snapshotted before any changes are made.\nfunc (d *Deployer) upgrade(target *GitDir) error {\n\tlog.Infof(\"worker\/uniter\/charm: preparing charm upgrade\")\n\turl, err := ReadCharmURL(d.current)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := target.Init(); err != nil {\n\t\treturn err\n\t}\n\tif dirty, err := target.Dirty(); err != nil {\n\t\treturn err\n\t} else if dirty {\n\t\tif conflicted, err := target.Conflicted(); err != nil {\n\t\t\treturn err\n\t\t} else if !conflicted {\n\t\t\tlog.Infof(\"worker\/uniter\/charm: snapshotting dirty charm before upgrade\")\n\t\t\tif err = target.Snapshotf(\"Pre-upgrade snapshot.\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tlog.Infof(\"worker\/uniter\/charm: deploying charm\")\n\tif err := target.Pull(d.current); err != nil {\n\t\treturn err\n\t}\n\treturn target.Snapshotf(\"Upgraded charm to %q.\", url)\n}\n\n\/\/ collectOrphans deletes all repos in path except the one pointed to by current.\n\/\/ Errors are generally ignored; some are logged.\nfunc (d *Deployer) collectOrphans() {\n\tcurrent, err := os.Readlink(d.current.Path())\n\tif err != nil {\n\t\treturn\n\t}\n\tfilepath.Walk(d.path, func(repoPath string, fi os.FileInfo, err error) error {\n\t\tif err != nil && repoPath != d.path && repoPath != current {\n\t\t\tif err = os.RemoveAll(repoPath); err != nil {\n\t\t\t\tlog.Warningf(\"worker\/uniter\/charm: failed to remove orphan repo at %s: %s\", repoPath, err)\n\t\t\t}\n\t\t}\n\t\treturn err\n\t})\n}\n\n\/\/ newDir creates a new timestamped directory with the given prefix. It\n\/\/ assumes that the deployer will not need to create more than 10\n\/\/ directories in any given second.\nfunc (d *Deployer) newDir(prefix string) (string, error) {\n\tprefix = prefix + time.Now().Format(\"-%Y%m%d-%H%M%S\")\n\tvar err error\n\tvar prefixPath string\n\tfor i := 0; i < 10; i++ {\n\t\tprefixPath = path.Join(d.path, fmt.Sprintf(\"%s-%d\", prefix, i))\n\t\tif err = os.Mkdir(prefixPath, 0755); err == nil {\n\t\t\treturn prefixPath, nil\n\t\t} else if !os.IsExist(err) {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"failed to create %q: %v\", prefixPath, err)\n}\n<commit_msg>worker\/uniter\/charm: use ioutil.TempDir and correct Time.Format directive<commit_after>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage charm\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"launchpad.net\/juju-core\/charm\"\n\t\"launchpad.net\/juju-core\/log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n)\n\n\/\/ Deployer maintains a git repository tracking a series of charm versions,\n\/\/ and can install and upgrade charm deployments to the current version.\ntype Deployer struct {\n\tpath string\n\tcurrent *GitDir\n}\n\n\/\/ NewDeployer creates a new Deployer which stores its state in the supplied\n\/\/ directory.\nfunc NewDeployer(path string) *Deployer {\n\treturn &Deployer{\n\t\tpath: path,\n\t\tcurrent: NewGitDir(filepath.Join(path, \"current\")),\n\t}\n}\n\n\/\/ Stage causes subsequent calls to Deploy to deploy the supplied charm.\nfunc (d *Deployer) Stage(bun *charm.Bundle, url *charm.URL) error {\n\t\/\/ Read present state of current.\n\tif err := os.MkdirAll(d.path, 0755); err != nil {\n\t\treturn err\n\t}\n\tdefer d.collectOrphans()\n\tsrcExists, err := d.current.Exists()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif srcExists {\n\t\tprevURL, err := ReadCharmURL(d.current)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif *url == *prevURL {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ Prepare a fresh repository for the update, using current's history\n\t\/\/ if it exists.\n\tupdatePath, err := d.newDir(\"update\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar repo *GitDir\n\tif srcExists {\n\t\trepo, err = d.current.Clone(updatePath)\n\t} else {\n\t\trepo = NewGitDir(updatePath)\n\t\terr = repo.Init()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Write the desired new state and commit.\n\tif err = bun.ExpandTo(updatePath); err != nil {\n\t\treturn err\n\t}\n\tif err = WriteCharmURL(repo, url); err != nil {\n\t\treturn err\n\t}\n\tif err = repo.Snapshotf(\"Imported charm %q from %q.\", url, bun.Path); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Atomically rename fresh repository to current.\n\ttmplink := filepath.Join(updatePath, \"tmplink\")\n\tif err = os.Symlink(updatePath, tmplink); err != nil {\n\t\treturn err\n\t}\n\treturn os.Rename(tmplink, d.current.Path())\n}\n\n\/\/ Deploy deploys the current charm to the target directory.\nfunc (d *Deployer) Deploy(target *GitDir) (err error) {\n\tdefer func() {\n\t\tif err == ErrConflict {\n\t\t\tlog.Warningf(\"worker\/uniter\/charm: charm deployment completed with conflicts\")\n\t\t} else if err != nil {\n\t\t\terr = fmt.Errorf(\"charm deployment failed: %s\", err)\n\t\t\tlog.Errorf(\"worker\/uniter\/charm: %v\", err)\n\t\t} else {\n\t\t\tlog.Infof(\"worker\/uniter\/charm: charm deployment succeeded\")\n\t\t}\n\t}()\n\tif exists, err := d.current.Exists(); err != nil {\n\t\treturn err\n\t} else if !exists {\n\t\treturn fmt.Errorf(\"no charm set\")\n\t}\n\tif exists, err := target.Exists(); err != nil {\n\t\treturn err\n\t} else if !exists {\n\t\treturn d.install(target)\n\t}\n\treturn d.upgrade(target)\n}\n\n\/\/ install creates a new deployment of current, and atomically moves it to\n\/\/ target.\nfunc (d *Deployer) install(target *GitDir) error {\n\tdefer d.collectOrphans()\n\tlog.Infof(\"worker\/uniter\/charm: preparing new charm deployment\")\n\turl, err := ReadCharmURL(d.current)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinstallPath, err := d.newDir(\"install\")\n\tif err != nil {\n\t\treturn err\n\t}\n\trepo := NewGitDir(installPath)\n\tif err = repo.Init(); err != nil {\n\t\treturn err\n\t}\n\tif err = repo.Pull(d.current); err != nil {\n\t\treturn err\n\t}\n\tif err = repo.Snapshotf(\"Deployed charm %q.\", url); err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"worker\/uniter\/charm: deploying charm\")\n\treturn os.Rename(installPath, target.Path())\n}\n\n\/\/ upgrade pulls from current into target. If target has local changes, but\n\/\/ no conflicts, it will be snapshotted before any changes are made.\nfunc (d *Deployer) upgrade(target *GitDir) error {\n\tlog.Infof(\"worker\/uniter\/charm: preparing charm upgrade\")\n\turl, err := ReadCharmURL(d.current)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := target.Init(); err != nil {\n\t\treturn err\n\t}\n\tif dirty, err := target.Dirty(); err != nil {\n\t\treturn err\n\t} else if dirty {\n\t\tif conflicted, err := target.Conflicted(); err != nil {\n\t\t\treturn err\n\t\t} else if !conflicted {\n\t\t\tlog.Infof(\"worker\/uniter\/charm: snapshotting dirty charm before upgrade\")\n\t\t\tif err = target.Snapshotf(\"Pre-upgrade snapshot.\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tlog.Infof(\"worker\/uniter\/charm: deploying charm\")\n\tif err := target.Pull(d.current); err != nil {\n\t\treturn err\n\t}\n\treturn target.Snapshotf(\"Upgraded charm to %q.\", url)\n}\n\n\/\/ collectOrphans deletes all repos in path except the one pointed to by current.\n\/\/ Errors are generally ignored; some are logged.\nfunc (d *Deployer) collectOrphans() {\n\tcurrent, err := os.Readlink(d.current.Path())\n\tif err != nil {\n\t\treturn\n\t}\n\tfilepath.Walk(d.path, func(repoPath string, fi os.FileInfo, err error) error {\n\t\tif err != nil && repoPath != d.path && repoPath != current {\n\t\t\tif err = os.RemoveAll(repoPath); err != nil {\n\t\t\t\tlog.Warningf(\"worker\/uniter\/charm: failed to remove orphan repo at %s: %s\", repoPath, err)\n\t\t\t}\n\t\t}\n\t\treturn err\n\t})\n}\n\n\/\/ newDir creates a new timestamped directory with the given prefix. It\n\/\/ assumes that the deployer will not need to create more than 10\n\/\/ directories in any given second.\nfunc (d *Deployer) newDir(prefix string) (string, error) {\n\treturn ioutil.TempDir(d.path, prefix+time.Now().Format(\"-20060102-150405\"))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012-present Oliver Eilhard. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-license.\n\/\/ See http:\/\/olivere.mit-license.org\/license.txt for details.\n\npackage elastic\n\nimport (\n\t\"encoding\/json\"\n\t\"testing\"\n)\n\nfunc TestQueryStringQuery(t *testing.T) {\n\tq := NewQueryStringQuery(`this AND that OR thus`)\n\tq = q.DefaultField(\"content\")\n\tsrc, err := q.Source()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdata, err := json.Marshal(src)\n\tif err != nil {\n\t\tt.Fatalf(\"marshaling to JSON failed: %v\", err)\n\t}\n\tgot := string(data)\n\texpected := `{\"query_string\":{\"default_field\":\"content\",\"query\":\"this AND that OR thus\"}}`\n\tif got != expected {\n\t\tt.Errorf(\"expected\\n%s\\n,got:\\n%s\", expected, got)\n\t}\n}\n\nfunc TestQueryStringQueryTimeZone(t *testing.T) {\n\tq := NewQueryStringQuery(`tweet_date:[2015-01-01 TO 2017-12-31]`)\n\tq = q.TimeZone(\"Europe\/Berlin\")\n\tsrc, err := q.Source()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdata, err := json.Marshal(src)\n\tif err != nil {\n\t\tt.Fatalf(\"marshaling to JSON failed: %v\", err)\n\t}\n\tgot := string(data)\n\texpected := `{\"query_string\":{\"query\":\"tweet_date:[2015-01-01 TO 2017-12-31]\",\"time_zone\":\"Europe\/Berlin\"}}`\n\tif got != expected {\n\t\tt.Errorf(\"expected\\n%s\\n,got:\\n%s\", expected, got)\n\t}\n}\n<commit_msg>Add integration test for QueryStringQuery<commit_after>\/\/ Copyright 2012-present Oliver Eilhard. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-license.\n\/\/ See http:\/\/olivere.mit-license.org\/license.txt for details.\n\npackage elastic\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"testing\"\n)\n\nfunc TestQueryStringQuery(t *testing.T) {\n\tq := NewQueryStringQuery(`this AND that OR thus`)\n\tq = q.DefaultField(\"content\")\n\tsrc, err := q.Source()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdata, err := json.Marshal(src)\n\tif err != nil {\n\t\tt.Fatalf(\"marshaling to JSON failed: %v\", err)\n\t}\n\tgot := string(data)\n\texpected := `{\"query_string\":{\"default_field\":\"content\",\"query\":\"this AND that OR thus\"}}`\n\tif got != expected {\n\t\tt.Errorf(\"expected\\n%s\\n,got:\\n%s\", expected, got)\n\t}\n}\n\nfunc TestQueryStringQueryTimeZone(t *testing.T) {\n\tq := NewQueryStringQuery(`tweet_date:[2015-01-01 TO 2017-12-31]`)\n\tq = q.TimeZone(\"Europe\/Berlin\")\n\tsrc, err := q.Source()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdata, err := json.Marshal(src)\n\tif err != nil {\n\t\tt.Fatalf(\"marshaling to JSON failed: %v\", err)\n\t}\n\tgot := string(data)\n\texpected := `{\"query_string\":{\"query\":\"tweet_date:[2015-01-01 TO 2017-12-31]\",\"time_zone\":\"Europe\/Berlin\"}}`\n\tif got != expected {\n\t\tt.Errorf(\"expected\\n%s\\n,got:\\n%s\", expected, got)\n\t}\n}\n\nfunc TestQueryStringQueryIntegration(t *testing.T) {\n\tclient := setupTestClientAndCreateIndexAndAddDocs(t) \/\/, SetTraceLog(log.New(os.Stdout, \"\", log.LstdFlags)))\n\n\tq := NewQueryStringQuery(\"Golang\")\n\n\t\/\/ Match all should return all documents\n\tsearchResult, err := client.Search().\n\t\tIndex(testIndexName).\n\t\tQuery(q).\n\t\tPretty(true).\n\t\tDo(context.TODO())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif searchResult.Hits == nil {\n\t\tt.Errorf(\"expected SearchResult.Hits != nil; got nil\")\n\t}\n\tif got, want := searchResult.Hits.TotalHits, int64(1); got != want {\n\t\tt.Errorf(\"expected SearchResult.Hits.TotalHits = %d; got %d\", want, got)\n\t}\n\tif got, want := len(searchResult.Hits.Hits), 1; got != want {\n\t\tt.Errorf(\"expected len(SearchResult.Hits.Hits) = %d; got %d\", want, got)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package common contains common types, constants and functions used over different demoinfocs packages.\npackage common\n\nimport (\n\t\"math\/rand\"\n\t\"time\"\n\n\t\"github.com\/golang\/geo\/r3\"\n)\n\n\/\/ Team is the type for the various TeamXYZ constants.\ntype Team byte\n\n\/\/ Team constants give information about which team a player is on.\nconst (\n\tTeamUnassigned Team = iota\n\tTeamSpectators\n\tTeamTerrorists\n\tTeamCounterTerrorists\n)\n\n\/\/ DemoHeader contains information from a demo's header.\ntype DemoHeader struct {\n\tFilestamp string \/\/ aka. File-type, must be HL2DEMO\n\tProtocol int \/\/ Should be 4\n\tNetworkProtocol int \/\/ Not sure what this is for\n\tServerName string \/\/ Server's 'hostname' config value\n\tClientName string \/\/ Usually 'GOTV Demo'\n\tMapName string \/\/ E.g. de_cache, de_nuke, cs_office, etc.\n\tGameDirectory string \/\/ Usually 'csgo'\n\tPlaybackTime time.Duration \/\/ Demo duration in seconds (= PlaybackTicks \/ Server's tickrate)\n\tPlaybackTicks int \/\/ Game duration in ticks (= PlaybackTime * Server's tickrate)\n\tPlaybackFrames int \/\/ Amount of 'frames' aka demo-ticks recorded (= PlaybackTime * Demo's recording rate)\n\tSignonLength int \/\/ Length of the Signon package in bytes\n}\n\n\/\/ FrameRate returns the frame rate of the demo (frames \/ demo-ticks per second).\n\/\/ Not necessarily the tick-rate the server ran on during the game.\nfunc (h DemoHeader) FrameRate() float64 {\n\treturn float64(h.PlaybackFrames) \/ h.PlaybackTime.Seconds()\n}\n\n\/\/ FrameTime returns the time a frame \/ demo-tick takes in seconds.\n\/\/\n\/\/ Returns 0 if PlaybackTime or PlaybackFrames are 0 (corrupt demo headers).\nfunc (h DemoHeader) FrameTime() time.Duration {\n\tif h.PlaybackFrames == 0 {\n\t\treturn 0\n\t}\n\treturn time.Duration(h.PlaybackTime.Nanoseconds() \/ int64(h.PlaybackFrames))\n}\n\n\/\/ TickRate returns the tick-rate the server ran on during the game.\n\/\/ VolvoPlx128TixKTnxBye\nfunc (h DemoHeader) TickRate() float64 {\n\treturn float64(h.PlaybackTicks) \/ h.PlaybackTime.Seconds()\n}\n\n\/\/ TickTime returns the time a single tick takes in seconds.\nfunc (h DemoHeader) TickTime() time.Duration {\n\treturn time.Duration(h.PlaybackTime.Nanoseconds() \/ int64(h.PlaybackTicks))\n}\n\n\/\/ GrenadeProjectile is a grenade thrown intentionally by a player. It is used to track grenade projectile\n\/\/ positions between the time at which they are thrown and until they detonate.\ntype GrenadeProjectile struct {\n\tEntityID int\n\tWeapon EquipmentElement\n\tThrower *Player \/\/ Always seems to be the same as Owner, even if the grenade was picked up\n\tOwner *Player \/\/ Always seems to be the same as Thrower, even if the grenade was picked up\n\tPosition r3.Vector\n\tTrajectory []r3.Vector \/\/ List of all known locations of the grenade up to the current point\n\n\t\/\/ uniqueID is used to distinguish different grenades (which potentially have the same, reused entityID) from each other.\n\tuniqueID int64\n}\n\n\/\/ UniqueID returns the unique id of the grenade.\n\/\/ The unique id is a random int generated internally by this library and can be used to differentiate\n\/\/ grenades from each other. This is needed because demo-files reuse entity ids.\nfunc (g GrenadeProjectile) UniqueID() int64 {\n\treturn g.uniqueID\n}\n\n\/\/ NewGrenadeProjectile creates a grenade projectile and sets the Unique-ID.\n\/\/\n\/\/ Intended for internal use only.\nfunc NewGrenadeProjectile() *GrenadeProjectile {\n\treturn &GrenadeProjectile{uniqueID: rand.Int63()}\n}\n\n\/\/ Bomb tracks the bomb's position, and the player carrying it, if any.\ntype Bomb struct {\n\t\/\/ Intended for internal use only. Use Position() instead.\n\t\/\/ Contains the last location of the dropped or planted bomb.\n\tLastOnGroundPosition r3.Vector\n\tCarrier *Player\n}\n\n\/\/ Position returns the current position of the bomb.\n\/\/ This is either the position of the player holding it\n\/\/ or LastOnGroundPosition if it's dropped or planted.\nfunc (b Bomb) Position() r3.Vector {\n\tif b.Carrier != nil {\n\t\treturn b.Carrier.Position\n\t}\n\n\treturn b.LastOnGroundPosition\n}\n\n\/\/ TeamState contains a team's ID, score, clan name & country flag.\ntype TeamState struct {\n\tteam Team\n\tmembersCallback func(Team) []*Player\n\n\t\/\/ ID stays the same even after switching sides.\n\tID int\n\n\tScore int\n\tClanName string\n\n\t\/\/ Flag, e.g. DE, FR, etc.\n\t\/\/\n\t\/\/ Watch out, in some demos this is upper-case and in some lower-case.\n\tFlag string\n\n\t\/\/ Terrorist TeamState for CTs, CT TeamState for Terrorists\n\tOpponent *TeamState\n}\n\n\/\/ Team returns the team for which the TeamState contains data.\nfunc (ts TeamState) Team() Team {\n\treturn ts.team\n}\n\n\/\/ Members returns the members of the team.\nfunc (ts TeamState) Members() []*Player {\n\treturn ts.membersCallback(ts.team)\n}\n\n\/\/ CurrentEquipmentValue returns the cumulative value of all equipment currently owned by the members of the team.\nfunc (ts TeamState) CurrentEquipmentValue() (value int) {\n\tfor _, pl := range ts.Members() {\n\t\tvalue += pl.CurrentEquipmentValue\n\t}\n\treturn\n}\n\n\/\/ RoundStartEquipmentValue returns the cumulative value of all equipment owned by the members of the team at the start of the current round.\nfunc (ts TeamState) RoundStartEquipmentValue() (value int) {\n\tfor _, pl := range ts.Members() {\n\t\tvalue += pl.RoundStartEquipmentValue\n\t}\n\treturn\n}\n\n\/\/ FreezeTimeEndEquipmentValue returns the cumulative value of all equipment owned by the members of the team at the end of the freeze-time of the current round.\nfunc (ts TeamState) FreezeTimeEndEquipmentValue() (value int) {\n\tfor _, pl := range ts.Members() {\n\t\tvalue += pl.FreezetimeEndEquipmentValue\n\t}\n\treturn\n}\n\n\/\/ CashSpentThisRound returns the total amount of cash spent by the whole team in the current round.\nfunc (ts TeamState) CashSpentThisRound() (value int) {\n\tfor _, pl := range ts.Members() {\n\t\tvalue += pl.AdditionalPlayerInformation.CashSpentThisRound\n\t}\n\treturn\n}\n\n\/\/ CashSpentThisRound returns the total amount of cash spent by the whole team during the whole game up to the current point.\nfunc (ts TeamState) CashSpentTotal() (value int) {\n\tfor _, pl := range ts.Members() {\n\t\tvalue += pl.AdditionalPlayerInformation.TotalCashSpent\n\t}\n\treturn\n}\n\n\/\/ NewTeamState creates a new TeamState with the given Team and members callback function.\nfunc NewTeamState(team Team, membersCallback func(Team) []*Player) TeamState {\n\treturn TeamState{\n\t\tteam: team,\n\t\tmembersCallback: membersCallback,\n\t}\n}\n<commit_msg>common: make godoc clearer by avoiding iota<commit_after>\/\/ Package common contains common types, constants and functions used over different demoinfocs packages.\npackage common\n\nimport (\n\t\"math\/rand\"\n\t\"time\"\n\n\t\"github.com\/golang\/geo\/r3\"\n)\n\n\/\/ Team is the type for the various TeamXYZ constants.\ntype Team byte\n\n\/\/ Team constants give information about which team a player is on.\nconst (\n\tTeamUnassigned Team = 0\n\tTeamSpectators Team = 1\n\tTeamTerrorists Team = 2\n\tTeamCounterTerrorists Team = 3\n)\n\n\/\/ DemoHeader contains information from a demo's header.\ntype DemoHeader struct {\n\tFilestamp string \/\/ aka. File-type, must be HL2DEMO\n\tProtocol int \/\/ Should be 4\n\tNetworkProtocol int \/\/ Not sure what this is for\n\tServerName string \/\/ Server's 'hostname' config value\n\tClientName string \/\/ Usually 'GOTV Demo'\n\tMapName string \/\/ E.g. de_cache, de_nuke, cs_office, etc.\n\tGameDirectory string \/\/ Usually 'csgo'\n\tPlaybackTime time.Duration \/\/ Demo duration in seconds (= PlaybackTicks \/ Server's tickrate)\n\tPlaybackTicks int \/\/ Game duration in ticks (= PlaybackTime * Server's tickrate)\n\tPlaybackFrames int \/\/ Amount of 'frames' aka demo-ticks recorded (= PlaybackTime * Demo's recording rate)\n\tSignonLength int \/\/ Length of the Signon package in bytes\n}\n\n\/\/ FrameRate returns the frame rate of the demo (frames \/ demo-ticks per second).\n\/\/ Not necessarily the tick-rate the server ran on during the game.\nfunc (h DemoHeader) FrameRate() float64 {\n\treturn float64(h.PlaybackFrames) \/ h.PlaybackTime.Seconds()\n}\n\n\/\/ FrameTime returns the time a frame \/ demo-tick takes in seconds.\n\/\/\n\/\/ Returns 0 if PlaybackTime or PlaybackFrames are 0 (corrupt demo headers).\nfunc (h DemoHeader) FrameTime() time.Duration {\n\tif h.PlaybackFrames == 0 {\n\t\treturn 0\n\t}\n\treturn time.Duration(h.PlaybackTime.Nanoseconds() \/ int64(h.PlaybackFrames))\n}\n\n\/\/ TickRate returns the tick-rate the server ran on during the game.\n\/\/ VolvoPlx128TixKTnxBye\nfunc (h DemoHeader) TickRate() float64 {\n\treturn float64(h.PlaybackTicks) \/ h.PlaybackTime.Seconds()\n}\n\n\/\/ TickTime returns the time a single tick takes in seconds.\nfunc (h DemoHeader) TickTime() time.Duration {\n\treturn time.Duration(h.PlaybackTime.Nanoseconds() \/ int64(h.PlaybackTicks))\n}\n\n\/\/ GrenadeProjectile is a grenade thrown intentionally by a player. It is used to track grenade projectile\n\/\/ positions between the time at which they are thrown and until they detonate.\ntype GrenadeProjectile struct {\n\tEntityID int\n\tWeapon EquipmentElement\n\tThrower *Player \/\/ Always seems to be the same as Owner, even if the grenade was picked up\n\tOwner *Player \/\/ Always seems to be the same as Thrower, even if the grenade was picked up\n\tPosition r3.Vector\n\tTrajectory []r3.Vector \/\/ List of all known locations of the grenade up to the current point\n\n\t\/\/ uniqueID is used to distinguish different grenades (which potentially have the same, reused entityID) from each other.\n\tuniqueID int64\n}\n\n\/\/ UniqueID returns the unique id of the grenade.\n\/\/ The unique id is a random int generated internally by this library and can be used to differentiate\n\/\/ grenades from each other. This is needed because demo-files reuse entity ids.\nfunc (g GrenadeProjectile) UniqueID() int64 {\n\treturn g.uniqueID\n}\n\n\/\/ NewGrenadeProjectile creates a grenade projectile and sets the Unique-ID.\n\/\/\n\/\/ Intended for internal use only.\nfunc NewGrenadeProjectile() *GrenadeProjectile {\n\treturn &GrenadeProjectile{uniqueID: rand.Int63()}\n}\n\n\/\/ Bomb tracks the bomb's position, and the player carrying it, if any.\ntype Bomb struct {\n\t\/\/ Intended for internal use only. Use Position() instead.\n\t\/\/ Contains the last location of the dropped or planted bomb.\n\tLastOnGroundPosition r3.Vector\n\tCarrier *Player\n}\n\n\/\/ Position returns the current position of the bomb.\n\/\/ This is either the position of the player holding it\n\/\/ or LastOnGroundPosition if it's dropped or planted.\nfunc (b Bomb) Position() r3.Vector {\n\tif b.Carrier != nil {\n\t\treturn b.Carrier.Position\n\t}\n\n\treturn b.LastOnGroundPosition\n}\n\n\/\/ TeamState contains a team's ID, score, clan name & country flag.\ntype TeamState struct {\n\tteam Team\n\tmembersCallback func(Team) []*Player\n\n\t\/\/ ID stays the same even after switching sides.\n\tID int\n\n\tScore int\n\tClanName string\n\n\t\/\/ Flag, e.g. DE, FR, etc.\n\t\/\/\n\t\/\/ Watch out, in some demos this is upper-case and in some lower-case.\n\tFlag string\n\n\t\/\/ Terrorist TeamState for CTs, CT TeamState for Terrorists\n\tOpponent *TeamState\n}\n\n\/\/ Team returns the team for which the TeamState contains data.\nfunc (ts TeamState) Team() Team {\n\treturn ts.team\n}\n\n\/\/ Members returns the members of the team.\nfunc (ts TeamState) Members() []*Player {\n\treturn ts.membersCallback(ts.team)\n}\n\n\/\/ CurrentEquipmentValue returns the cumulative value of all equipment currently owned by the members of the team.\nfunc (ts TeamState) CurrentEquipmentValue() (value int) {\n\tfor _, pl := range ts.Members() {\n\t\tvalue += pl.CurrentEquipmentValue\n\t}\n\treturn\n}\n\n\/\/ RoundStartEquipmentValue returns the cumulative value of all equipment owned by the members of the team at the start of the current round.\nfunc (ts TeamState) RoundStartEquipmentValue() (value int) {\n\tfor _, pl := range ts.Members() {\n\t\tvalue += pl.RoundStartEquipmentValue\n\t}\n\treturn\n}\n\n\/\/ FreezeTimeEndEquipmentValue returns the cumulative value of all equipment owned by the members of the team at the end of the freeze-time of the current round.\nfunc (ts TeamState) FreezeTimeEndEquipmentValue() (value int) {\n\tfor _, pl := range ts.Members() {\n\t\tvalue += pl.FreezetimeEndEquipmentValue\n\t}\n\treturn\n}\n\n\/\/ CashSpentThisRound returns the total amount of cash spent by the whole team in the current round.\nfunc (ts TeamState) CashSpentThisRound() (value int) {\n\tfor _, pl := range ts.Members() {\n\t\tvalue += pl.AdditionalPlayerInformation.CashSpentThisRound\n\t}\n\treturn\n}\n\n\/\/ CashSpentThisRound returns the total amount of cash spent by the whole team during the whole game up to the current point.\nfunc (ts TeamState) CashSpentTotal() (value int) {\n\tfor _, pl := range ts.Members() {\n\t\tvalue += pl.AdditionalPlayerInformation.TotalCashSpent\n\t}\n\treturn\n}\n\n\/\/ NewTeamState creates a new TeamState with the given Team and members callback function.\nfunc NewTeamState(team Team, membersCallback func(Team) []*Player) TeamState {\n\treturn TeamState{\n\t\tteam: team,\n\t\tmembersCallback: membersCallback,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2015 Serge Gebhardt. All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by the ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage acd\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"reflect\"\n\n\t\"errors\"\n\t\"github.com\/google\/go-querystring\/query\"\n)\n\n\/\/ NodesService provides access to the nodes in the Amazon Cloud Drive API.\n\/\/\n\/\/ See: https:\/\/developer.amazon.com\/public\/apis\/experience\/cloud-drive\/content\/nodes\ntype NodesService struct {\n\tclient *Client\n}\n\n\/\/ Gets the root folder of the Amazon Cloud Drive.\nfunc (s *NodesService) GetRoot() (*Folder, *http.Response, error) {\n\topts := &NodeListOptions{Filters: \"kind:FOLDER AND isRoot:true\"}\n\n\troots, resp, err := s.GetNodes(opts)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\tif len(roots) < 1 {\n\t\treturn nil, resp, errors.New(\"No root found\")\n\t}\n\n\treturn &Folder{roots[0]}, resp, nil\n}\n\n\/\/ Gets the list of all nodes.\nfunc (s *NodesService) GetAllNodes(opts *NodeListOptions) ([]*Node, *http.Response, error) {\n\treturn s.listAllNodes(\"nodes\", opts)\n}\n\n\/\/ Gets a list of nodes, up until the limit (either default or the one set in opts).\nfunc (s *NodesService) GetNodes(opts *NodeListOptions) ([]*Node, *http.Response, error) {\n\treturn s.listNodes(\"nodes\", opts)\n}\n\nfunc (s *NodesService) listAllNodes(url string, opts *NodeListOptions) ([]*Node, *http.Response, error) {\n\t\/\/ Need opts to maintain state (NodeListOptions.reachedEnd)\n\tif opts == nil {\n\t\topts = &NodeListOptions{}\n\t}\n\n\tresult := make([]*Node, 0, 200)\n\n\tfor {\n\t\tnodes, resp, err := s.listNodes(url, opts)\n\t\tif err != nil {\n\t\t\treturn result, resp, err\n\t\t}\n\t\tif nodes == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tresult = append(result, nodes...)\n\t}\n\n\treturn result, nil, nil\n}\n\nfunc (s *NodesService) listNodes(url string, opts *NodeListOptions) ([]*Node, *http.Response, error) {\n\tif opts.reachedEnd {\n\t\treturn nil, nil, nil\n\t}\n\n\turl, err := addOptions(url, opts)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tnodeList := &nodeListInternal{}\n\tresp, err := s.client.Do(req, nodeList)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\tif nodeList.NextToken != nil {\n\t\topts.StartToken = *nodeList.NextToken\n\t} else {\n\t\topts.reachedEnd = true\n\t}\n\n\tnodes := nodeList.Data\n\t\/\/ iterate over index since iterating over value would create a copy\n\tfor i := range nodes {\n\t\tnodes[i].service = s\n\t}\n\n\treturn nodes, resp, nil\n}\n\ntype nodeListInternal struct {\n\tCount *uint64 `json:\"count\"`\n\tNextToken *string `json:\"nextToken\"`\n\tData []*Node `json:\"data\"`\n}\n\n\/\/ Node represents a digital asset on the Amazon Cloud Drive, including files\n\/\/ and folders, in a parent-child relationship. A node contains only metadata\n\/\/ (e.g. folder) or it contains metadata and content (e.g. file).\ntype Node struct {\n\tId *string `json:\"id\"`\n\tName *string `json:\"name\"`\n\tKind *string `json:\"kind\"`\n\n\tservice *NodesService\n}\n\nfunc (n *Node) IsFile() bool {\n\treturn n.Kind != nil && *n.Kind == \"FILE\"\n}\n\nfunc (n *Node) IsFolder() bool {\n\treturn n.Kind != nil && *n.Kind == \"FOLDER\"\n}\n\nfunc (n *Node) Typed() interface{} {\n\tif n.IsFile() {\n\t\treturn &File{n}\n\t}\n\n\tif n.IsFolder() {\n\t\treturn &Folder{n}\n\t}\n\n\treturn n\n}\n\n\/\/ Represents a file and contains only metadata.\ntype File struct {\n\t*Node\n}\n\n\/\/ Represents a folder and contains only metadata.\ntype Folder struct {\n\t*Node\n}\n\n\/\/ Gets the list of all children.\nfunc (f *Folder) GetAllChildren(opts *NodeListOptions) ([]*Node, *http.Response, error) {\n\turl := fmt.Sprintf(\"nodes\/%s\/children\", *f.Id)\n\treturn f.service.listAllNodes(url, opts)\n}\n\n\/\/ Gets a list of children, up until the limit (either default or the one set in opts).\nfunc (f *Folder) GetChildren(opts *NodeListOptions) ([]*Node, *http.Response, error) {\n\turl := fmt.Sprintf(\"nodes\/%s\/children\", *f.Id)\n\treturn f.service.listNodes(url, opts)\n}\n\n\/\/ Gets the subfolder by name. It is an error if not exactly one subfolder is found.\nfunc (f *Folder) GetFolder(name string) (*Folder, *http.Response, error) {\n\tn, resp, err := f.GetNode(name)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\tres, ok := n.Typed().(*Folder)\n\tif !ok {\n\t\terr := errors.New(fmt.Sprintf(\"Node '%s' is not a folder\", name))\n\t\treturn nil, resp, err\n\t}\n\n\treturn res, resp, nil\n}\n\n\/\/ Gets the file by name. It is an error if not exactly one file is found.\nfunc (f *Folder) GetFile(name string) (*File, *http.Response, error) {\n\tn, resp, err := f.GetNode(name)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\tres, ok := n.Typed().(*File)\n\tif !ok {\n\t\terr := errors.New(fmt.Sprintf(\"Node '%s' is not a file\", name))\n\t\treturn nil, resp, err\n\t}\n\n\treturn res, resp, nil\n}\n\n\/\/ Gets the node by name. It is an error if not exactly one node is found.\nfunc (f *Folder) GetNode(name string) (*Node, *http.Response, error) {\n\tfilter := \"parents:\" + *f.Id + \" AND name:\" + name\n\topts := &NodeListOptions{Filters: filter}\n\n\tnodes, resp, err := f.service.GetNodes(opts)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\tif len(nodes) < 1 {\n\t\terr := errors.New(fmt.Sprintf(\"No node '%s' found\", name))\n\t\treturn nil, resp, err\n\t}\n\tif len(nodes) > 1 {\n\t\terr := errors.New(fmt.Sprintf(\"Too many nodes '%s' found (%v)\", name, len(nodes)))\n\t\treturn nil, resp, err\n\t}\n\n\treturn nodes[0], resp, nil\n}\n\n\/\/ WalkNodes walks the given node hierarchy, getting each node along the way, and returns\n\/\/ the deepest node. If an error occurs, returns the furthest successful node and the list\n\/\/ of HTTP responses.\nfunc (f *Folder) WalkNodes(names ...string) (*Node, []*http.Response, error) {\n\tresps := make([]*http.Response, 0, len(names))\n\n\tif len(names) == 0 {\n\t\treturn f.Node, resps, nil\n\t}\n\n\t\/\/ process each node except the last one\n\tfp := f\n\tfor _, name := range names[:len(names)-1] {\n\t\tfn, resp, err := fp.GetFolder(name)\n\t\tresps = append(resps, resp)\n\t\tif err != nil {\n\t\t\treturn fp.Node, resps, err\n\t\t}\n\n\t\tfp = fn\n\t}\n\n\t\/\/ process the last node\n\tnl, resp, err := fp.GetNode(names[len(names)-1])\n\tresps = append(resps, resp)\n\tif err != nil {\n\t\treturn fp.Node, resps, err\n\t}\n\n\treturn nl, resps, nil\n}\n\n\/\/ NodeListOptions holds the options when getting a list of nodes, such as the filter,\n\/\/ sorting and pagination.\ntype NodeListOptions struct {\n\tLimit uint `url:\"limit,omitempty\"`\n\tFilters string `url:\"filters,omitempty\"`\n\tSort string `url:\"sort,omitempty\"`\n\n\t\/\/ Token where to start for next page (internal)\n\tStartToken string `url:\"startToken,omitempty\"`\n\treachedEnd bool\n}\n\n\/\/ addOptions adds the parameters in opts as URL query parameters to s. opts\n\/\/ must be a struct whose fields may contain \"url\" tags.\nfunc addOptions(s string, opts interface{}) (string, error) {\n\tv := reflect.ValueOf(opts)\n\tif v.Kind() == reflect.Ptr && v.IsNil() {\n\t\treturn s, nil\n\t}\n\n\tu, err := url.Parse(s)\n\tif err != nil {\n\t\treturn s, err\n\t}\n\n\tqs, err := query.Values(opts)\n\tif err != nil {\n\t\treturn s, err\n\t}\n\n\tu.RawQuery = qs.Encode()\n\treturn u.String(), nil\n}\n<commit_msg>Parse size from JSON<commit_after>\/\/ Copyright (c) 2015 Serge Gebhardt. All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by the ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage acd\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"reflect\"\n\n\t\"errors\"\n\t\"github.com\/google\/go-querystring\/query\"\n)\n\n\/\/ NodesService provides access to the nodes in the Amazon Cloud Drive API.\n\/\/\n\/\/ See: https:\/\/developer.amazon.com\/public\/apis\/experience\/cloud-drive\/content\/nodes\ntype NodesService struct {\n\tclient *Client\n}\n\n\/\/ Gets the root folder of the Amazon Cloud Drive.\nfunc (s *NodesService) GetRoot() (*Folder, *http.Response, error) {\n\topts := &NodeListOptions{Filters: \"kind:FOLDER AND isRoot:true\"}\n\n\troots, resp, err := s.GetNodes(opts)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\tif len(roots) < 1 {\n\t\treturn nil, resp, errors.New(\"No root found\")\n\t}\n\n\treturn &Folder{roots[0]}, resp, nil\n}\n\n\/\/ Gets the list of all nodes.\nfunc (s *NodesService) GetAllNodes(opts *NodeListOptions) ([]*Node, *http.Response, error) {\n\treturn s.listAllNodes(\"nodes\", opts)\n}\n\n\/\/ Gets a list of nodes, up until the limit (either default or the one set in opts).\nfunc (s *NodesService) GetNodes(opts *NodeListOptions) ([]*Node, *http.Response, error) {\n\treturn s.listNodes(\"nodes\", opts)\n}\n\nfunc (s *NodesService) listAllNodes(url string, opts *NodeListOptions) ([]*Node, *http.Response, error) {\n\t\/\/ Need opts to maintain state (NodeListOptions.reachedEnd)\n\tif opts == nil {\n\t\topts = &NodeListOptions{}\n\t}\n\n\tresult := make([]*Node, 0, 200)\n\n\tfor {\n\t\tnodes, resp, err := s.listNodes(url, opts)\n\t\tif err != nil {\n\t\t\treturn result, resp, err\n\t\t}\n\t\tif nodes == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tresult = append(result, nodes...)\n\t}\n\n\treturn result, nil, nil\n}\n\nfunc (s *NodesService) listNodes(url string, opts *NodeListOptions) ([]*Node, *http.Response, error) {\n\tif opts.reachedEnd {\n\t\treturn nil, nil, nil\n\t}\n\n\turl, err := addOptions(url, opts)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tnodeList := &nodeListInternal{}\n\tresp, err := s.client.Do(req, nodeList)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\tif nodeList.NextToken != nil {\n\t\topts.StartToken = *nodeList.NextToken\n\t} else {\n\t\topts.reachedEnd = true\n\t}\n\n\tnodes := nodeList.Data\n\t\/\/ iterate over index since iterating over value would create a copy\n\tfor i := range nodes {\n\t\tnodes[i].service = s\n\t}\n\n\treturn nodes, resp, nil\n}\n\ntype nodeListInternal struct {\n\tCount *uint64 `json:\"count\"`\n\tNextToken *string `json:\"nextToken\"`\n\tData []*Node `json:\"data\"`\n}\n\n\/\/ Node represents a digital asset on the Amazon Cloud Drive, including files\n\/\/ and folders, in a parent-child relationship. A node contains only metadata\n\/\/ (e.g. folder) or it contains metadata and content (e.g. file).\ntype Node struct {\n\tId *string `json:\"id\"`\n\tName *string `json:\"name\"`\n\tKind *string `json:\"kind\"`\n\tContentProperties *struct {\n\t\tSize *uint64 `json:\"size\"`\n\t} `json:\"contentProperties\"`\n\tservice *NodesService\n}\n\nfunc (n *Node) IsFile() bool {\n\treturn n.Kind != nil && *n.Kind == \"FILE\"\n}\n\nfunc (n *Node) IsFolder() bool {\n\treturn n.Kind != nil && *n.Kind == \"FOLDER\"\n}\n\nfunc (n *Node) Typed() interface{} {\n\tif n.IsFile() {\n\t\treturn &File{n}\n\t}\n\n\tif n.IsFolder() {\n\t\treturn &Folder{n}\n\t}\n\n\treturn n\n}\n\n\/\/ Represents a file and contains only metadata.\ntype File struct {\n\t*Node\n}\n\n\/\/ Represents a folder and contains only metadata.\ntype Folder struct {\n\t*Node\n}\n\n\/\/ Gets the list of all children.\nfunc (f *Folder) GetAllChildren(opts *NodeListOptions) ([]*Node, *http.Response, error) {\n\turl := fmt.Sprintf(\"nodes\/%s\/children\", *f.Id)\n\treturn f.service.listAllNodes(url, opts)\n}\n\n\/\/ Gets a list of children, up until the limit (either default or the one set in opts).\nfunc (f *Folder) GetChildren(opts *NodeListOptions) ([]*Node, *http.Response, error) {\n\turl := fmt.Sprintf(\"nodes\/%s\/children\", *f.Id)\n\treturn f.service.listNodes(url, opts)\n}\n\n\/\/ Gets the subfolder by name. It is an error if not exactly one subfolder is found.\nfunc (f *Folder) GetFolder(name string) (*Folder, *http.Response, error) {\n\tn, resp, err := f.GetNode(name)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\tres, ok := n.Typed().(*Folder)\n\tif !ok {\n\t\terr := errors.New(fmt.Sprintf(\"Node '%s' is not a folder\", name))\n\t\treturn nil, resp, err\n\t}\n\n\treturn res, resp, nil\n}\n\n\/\/ Gets the file by name. It is an error if not exactly one file is found.\nfunc (f *Folder) GetFile(name string) (*File, *http.Response, error) {\n\tn, resp, err := f.GetNode(name)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\tres, ok := n.Typed().(*File)\n\tif !ok {\n\t\terr := errors.New(fmt.Sprintf(\"Node '%s' is not a file\", name))\n\t\treturn nil, resp, err\n\t}\n\n\treturn res, resp, nil\n}\n\n\/\/ Gets the node by name. It is an error if not exactly one node is found.\nfunc (f *Folder) GetNode(name string) (*Node, *http.Response, error) {\n\tfilter := \"parents:\" + *f.Id + \" AND name:\" + name\n\topts := &NodeListOptions{Filters: filter}\n\n\tnodes, resp, err := f.service.GetNodes(opts)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\tif len(nodes) < 1 {\n\t\terr := errors.New(fmt.Sprintf(\"No node '%s' found\", name))\n\t\treturn nil, resp, err\n\t}\n\tif len(nodes) > 1 {\n\t\terr := errors.New(fmt.Sprintf(\"Too many nodes '%s' found (%v)\", name, len(nodes)))\n\t\treturn nil, resp, err\n\t}\n\n\treturn nodes[0], resp, nil\n}\n\n\/\/ WalkNodes walks the given node hierarchy, getting each node along the way, and returns\n\/\/ the deepest node. If an error occurs, returns the furthest successful node and the list\n\/\/ of HTTP responses.\nfunc (f *Folder) WalkNodes(names ...string) (*Node, []*http.Response, error) {\n\tresps := make([]*http.Response, 0, len(names))\n\n\tif len(names) == 0 {\n\t\treturn f.Node, resps, nil\n\t}\n\n\t\/\/ process each node except the last one\n\tfp := f\n\tfor _, name := range names[:len(names)-1] {\n\t\tfn, resp, err := fp.GetFolder(name)\n\t\tresps = append(resps, resp)\n\t\tif err != nil {\n\t\t\treturn fp.Node, resps, err\n\t\t}\n\n\t\tfp = fn\n\t}\n\n\t\/\/ process the last node\n\tnl, resp, err := fp.GetNode(names[len(names)-1])\n\tresps = append(resps, resp)\n\tif err != nil {\n\t\treturn fp.Node, resps, err\n\t}\n\n\treturn nl, resps, nil\n}\n\n\/\/ NodeListOptions holds the options when getting a list of nodes, such as the filter,\n\/\/ sorting and pagination.\ntype NodeListOptions struct {\n\tLimit uint `url:\"limit,omitempty\"`\n\tFilters string `url:\"filters,omitempty\"`\n\tSort string `url:\"sort,omitempty\"`\n\n\t\/\/ Token where to start for next page (internal)\n\tStartToken string `url:\"startToken,omitempty\"`\n\treachedEnd bool\n}\n\n\/\/ addOptions adds the parameters in opts as URL query parameters to s. opts\n\/\/ must be a struct whose fields may contain \"url\" tags.\nfunc addOptions(s string, opts interface{}) (string, error) {\n\tv := reflect.ValueOf(opts)\n\tif v.Kind() == reflect.Ptr && v.IsNil() {\n\t\treturn s, nil\n\t}\n\n\tu, err := url.Parse(s)\n\tif err != nil {\n\t\treturn s, err\n\t}\n\n\tqs, err := query.Values(opts)\n\tif err != nil {\n\t\treturn s, err\n\t}\n\n\tu.RawQuery = qs.Encode()\n\treturn u.String(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package signals\n\nimport (\n\t\"encoding\/gob\"\n\t\"math\/rand\"\n\t\"sync\"\n)\n\nfunc init() {\n\tgob.Register(Noise{})\n}\n\n\/\/ Noise is a deterministic random Signal, white noise.\n\/\/ it always produces the same y value for the same x value, (for the same Noise) but random otherwise.\n\/\/ determinism allows caching even for this type\ntype Noise struct {\n\tgenerator rand.Rand\n}\n\nfunc NewNoise() Noise {\n\treturn Noise{*rand.New(rand.NewSource(rand.Int63()))} \/\/ give each noise, very probably, a different generator source\n}\n\nvar noiseMutex = &sync.Mutex{}\n\nfunc (s Noise) property(p x) (v y) {\n\tnoiseMutex.Lock()\n\trand.Seed(int64(p)) \/\/ default generator set to the same seed for the same x\n\ts.generator.Seed(int64(rand.Int63())) \/\/ Noise sets its generator's seed to a random number from default generator, which is the same at a given x, so the same random numbers generated from it, for the same x, but different for different Noises.\n\tv += y(s.generator.Int63())\n\tv -= y(s.generator.Int63())\n\tnoiseMutex.Unlock()\n\treturn\n}\n<commit_msg>fix noise thread safety<commit_after>package signals\n\nimport (\n\t\"encoding\/gob\"\n\t\"math\/rand\"\n\t\"sync\"\n)\n\nfunc init() {\n\tgob.Register(Noise{})\n}\n\n\/\/ Noise is a deterministic random Signal, white noise.\n\/\/ it always produces the same y value for the same x value, (for the same Noise) but random otherwise.\n\/\/ determinism allows caching even for this type\ntype Noise struct {\n\tgenerator rand.Rand\n}\n\nfunc NewNoise() Noise {\n\treturn Noise{*rand.New(rand.NewSource(rand.Int63()))} \/\/ give each noise, very probably, a different generator source\n}\n\nvar noiseMutex = &sync.Mutex{}\n\nfunc (s Noise) property(p x) (v y) {\n\tnoiseMutex.Lock()\n\trand.Seed(int64(p)) \/\/ default generator set to the same seed for the same x\n\ts.generator.Seed(int64(rand.Int63())) \/\/ Noise sets its generator's seed to a random number from default generator, which is the same at a given x, so the same random numbers generated from it, for the same x, but different for different Noises.\n\tv = y(s.generator.Int63())\n\tv -= y(s.generator.Int63())\n\tnoiseMutex.Unlock()\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package webrdb\n\nimport (\n\t\"github.com\/kirillDanshin\/fap\/lib\/templates\/webrdb\/qtpl\"\n\t\"github.com\/kirillDanshin\/fap\/lib\/types\"\n\t\"github.com\/kirillDanshin\/myutils\"\n)\n\n\/\/ Generate returns a map with generated\n\/\/ package structure\nfunc Generate(packageName string) (types.PackageStructure, error) {\n\tif packageName == \"\" {\n\t\tpackageName = \"main\"\n\t}\n\tpath := \"main.go\"\n\tif packageName != \"main\" {\n\t\tpath = myutils.Concat(packageName, \"\/web.go\")\n\t}\n\tstructure := types.PackageStructure{\n\t\t\"homeHandler.go\": qtpl.HomeHandler(packageName),\n\t\tpath: qtpl.FastHTTPRouter(packageName),\n\t}\n\treturn structure, nil\n}\n<commit_msg>fix generator<commit_after>package webrdb\n\nimport (\n\t\"github.com\/kirillDanshin\/fap\/lib\/templates\/webrdb\/qtpl\"\n\t\"github.com\/kirillDanshin\/fap\/lib\/types\"\n\t\"github.com\/kirillDanshin\/myutils\"\n)\n\n\/\/ Generate returns a map with generated\n\/\/ package structure\nfunc Generate(packageName string) (types.PackageStructure, error) {\n\tif packageName == \"\" {\n\t\tpackageName = \"main\"\n\t}\n\tpath := \"main.go\"\n\tif packageName != \"main\" {\n\t\tpath = myutils.Concat(packageName, \"\/web.go\")\n\t}\n\tstructure := types.PackageStructure{\n\t\t\"homeHandler.go\": qtpl.HomeHandler(packageName),\n\t\tpath: qtpl.FastHTTPRouter(packageName),\n\t\t\"getRouteChain\": qtpl.GetRouteChain(packageName),\n\t}\n\treturn structure, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2015 Mattermost, Inc. All Rights Reserved.\n\/\/ See License.txt for license information.\n\npackage server\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/kyokomi\/emoji\"\n\tapns \"github.com\/sideshow\/apns2\"\n\t\"github.com\/sideshow\/apns2\/certificate\"\n\t\"github.com\/sideshow\/apns2\/payload\"\n)\n\ntype AppleNotificationServer struct {\n\tApplePushSettings ApplePushSettings\n\tAppleClient *apns.Client\n}\n\nfunc NewAppleNotificationServer(settings ApplePushSettings) NotificationServer {\n\treturn &AppleNotificationServer{ApplePushSettings: settings}\n}\n\nfunc (me *AppleNotificationServer) Initialize() bool {\n\tLogInfo(fmt.Sprintf(\"Initializing apple notification server for type=%v\", me.ApplePushSettings.Type))\n\n\tif len(me.ApplePushSettings.ApplePushCertPrivate) > 0 {\n\t\tappleCert, appleCertErr := certificate.FromPemFile(me.ApplePushSettings.ApplePushCertPrivate, me.ApplePushSettings.ApplePushCertPassword)\n\t\tif appleCertErr != nil {\n\t\t\tLogCritical(fmt.Sprintf(\"Failed to load the apple pem cert err=%v for type=%v\", appleCertErr, me.ApplePushSettings.Type))\n\t\t\treturn false\n\t\t}\n\n\t\tif me.ApplePushSettings.ApplePushUseDevelopment {\n\t\t\tme.AppleClient = apns.NewClient(appleCert).Development()\n\t\t} else {\n\t\t\tme.AppleClient = apns.NewClient(appleCert).Production()\n\t\t}\n\n\t\treturn true\n\t} else {\n\t\tLogError(fmt.Sprintf(\"Apple push notifications not configured. Missing ApplePushCertPrivate. for type=%v\", me.ApplePushSettings.Type))\n\t\treturn false\n\t}\n}\n\nfunc (me *AppleNotificationServer) SendNotification(msg *PushNotification) PushResponse {\n\tnotification := &apns.Notification{}\n\tnotification.DeviceToken = msg.DeviceId\n\tpayload := payload.NewPayload()\n\tnotification.Payload = payload\n\tnotification.Topic = me.ApplePushSettings.ApplePushTopic\n\n\tpayload.Badge(msg.Badge)\n\n\tif msg.Type != PUSH_TYPE_CLEAR {\n\t\tpayload.Category(msg.Category)\n\t\tpayload.Sound(\"default\")\n\t\tpayload.Custom(\"version\", msg.Version)\n\n\t\tif len(msg.ChannelName) > 0 && msg.Version == \"v2\" {\n\t\t\tpayload.AlertTitle(msg.ChannelName)\n\t\t\tpayload.AlertBody(emoji.Sprint(msg.Message))\n\t\t\tpayload.Custom(\"channel_name\", msg.ChannelName)\n\t\t} else {\n\t\t\tpayload.Alert(emoji.Sprint(msg.Message))\n\n\t\t\tif len(msg.ChannelName) > 0 {\n\t\t\t\tpayload.Custom(\"channel_name\", msg.ChannelName)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tpayload.ContentAvailable()\n\t}\n\n\tpayload.Custom(\"type\", msg.Type)\n\n\tif len(msg.ChannelId) > 0 {\n\t\tpayload.Custom(\"channel_id\", msg.ChannelId)\n\t\tpayload.ThreadID(msg.ChannelId)\n\t}\n\n\tif len(msg.TeamId) > 0 {\n\t\tpayload.Custom(\"team_id\", msg.TeamId)\n\t}\n\n\tif len(msg.SenderId) > 0 {\n\t\tpayload.Custom(\"sender_id\", msg.SenderId)\n\t}\n\n\tif len(msg.PostId) > 0 {\n\t\tpayload.Custom(\"post_id\", msg.PostId)\n\t}\n\n\tif len(msg.RootId) > 0 {\n\t\tpayload.Custom(\"root_id\", msg.RootId)\n\t}\n\n\tif len(msg.OverrideUsername) > 0 {\n\t\tpayload.Custom(\"override_username\", msg.OverrideUsername)\n\t}\n\n\tif len(msg.OverrideIconUrl) > 0 {\n\t\tpayload.Custom(\"override_icon_url\", msg.OverrideIconUrl)\n\t}\n\n\tif len(msg.FromWebhook) > 0 {\n\t\tpayload.Custom(\"from_webhook\", msg.FromWebhook)\n\t}\n\n\tif me.AppleClient != nil {\n\t\tLogInfo(fmt.Sprintf(\"Sending apple push notification for device=%v and type=%v\", me.ApplePushSettings.Type, msg.Type))\n\t\tstart := time.Now()\n\t\tres, err := me.AppleClient.Push(notification)\n\t\tobserveAPNSResponse(time.Since(start).Seconds())\n\t\tif err != nil {\n\t\t\tLogError(fmt.Sprintf(\"Failed to send apple push sid=%v did=%v err=%v type=%v\", msg.ServerId, msg.DeviceId, err, me.ApplePushSettings.Type))\n\t\t\tincrementFailure(me.ApplePushSettings.Type)\n\t\t\treturn NewErrorPushResponse(\"unknown transport error\")\n\t\t}\n\n\t\tif !res.Sent() {\n\t\t\tif res.Reason == \"BadDeviceToken\" || res.Reason == \"Unregistered\" || res.Reason == \"MissingDeviceToken\" || res.Reason == \"DeviceTokenNotForTopic\" {\n\t\t\t\tLogInfo(fmt.Sprintf(\"Failed to send apple push sending remove code res ApnsID=%v reason=%v code=%v type=%v\", res.ApnsID, res.Reason, res.StatusCode, me.ApplePushSettings.Type))\n\t\t\t\tincrementRemoval(me.ApplePushSettings.Type)\n\t\t\t\treturn NewRemovePushResponse()\n\t\t\t}\n\n\t\t\tLogError(fmt.Sprintf(\"Failed to send apple push with res ApnsID=%v reason=%v code=%v type=%v\", res.ApnsID, res.Reason, res.StatusCode, me.ApplePushSettings.Type))\n\t\t\tincrementFailure(me.ApplePushSettings.Type)\n\t\t\treturn NewErrorPushResponse(\"unknown send response error\")\n\t\t}\n\t}\n\n\tincrementSuccess(me.ApplePushSettings.Type)\n\treturn NewOkPushResponse()\n}\n<commit_msg>Set apple notification to allow mutations<commit_after>\/\/ Copyright (c) 2015 Mattermost, Inc. All Rights Reserved.\n\/\/ See License.txt for license information.\n\npackage server\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/kyokomi\/emoji\"\n\tapns \"github.com\/sideshow\/apns2\"\n\t\"github.com\/sideshow\/apns2\/certificate\"\n\t\"github.com\/sideshow\/apns2\/payload\"\n)\n\ntype AppleNotificationServer struct {\n\tApplePushSettings ApplePushSettings\n\tAppleClient *apns.Client\n}\n\nfunc NewAppleNotificationServer(settings ApplePushSettings) NotificationServer {\n\treturn &AppleNotificationServer{ApplePushSettings: settings}\n}\n\nfunc (me *AppleNotificationServer) Initialize() bool {\n\tLogInfo(fmt.Sprintf(\"Initializing apple notification server for type=%v\", me.ApplePushSettings.Type))\n\n\tif len(me.ApplePushSettings.ApplePushCertPrivate) > 0 {\n\t\tappleCert, appleCertErr := certificate.FromPemFile(me.ApplePushSettings.ApplePushCertPrivate, me.ApplePushSettings.ApplePushCertPassword)\n\t\tif appleCertErr != nil {\n\t\t\tLogCritical(fmt.Sprintf(\"Failed to load the apple pem cert err=%v for type=%v\", appleCertErr, me.ApplePushSettings.Type))\n\t\t\treturn false\n\t\t}\n\n\t\tif me.ApplePushSettings.ApplePushUseDevelopment {\n\t\t\tme.AppleClient = apns.NewClient(appleCert).Development()\n\t\t} else {\n\t\t\tme.AppleClient = apns.NewClient(appleCert).Production()\n\t\t}\n\n\t\treturn true\n\t} else {\n\t\tLogError(fmt.Sprintf(\"Apple push notifications not configured. Missing ApplePushCertPrivate. for type=%v\", me.ApplePushSettings.Type))\n\t\treturn false\n\t}\n}\n\nfunc (me *AppleNotificationServer) SendNotification(msg *PushNotification) PushResponse {\n\tnotification := &apns.Notification{}\n\tnotification.DeviceToken = msg.DeviceId\n\tpayload := payload.NewPayload()\n\tnotification.Payload = payload\n\tnotification.Topic = me.ApplePushSettings.ApplePushTopic\n\n\tpayload.Badge(msg.Badge)\n\n\tif msg.Type != PUSH_TYPE_CLEAR {\n\t\tpayload.Category(msg.Category)\n\t\tpayload.Sound(\"default\")\n\t\tpayload.Custom(\"version\", msg.Version)\n\t\tpayload.MutableContent()\n\n\t\tif len(msg.ChannelName) > 0 && msg.Version == \"v2\" {\n\t\t\tpayload.AlertTitle(msg.ChannelName)\n\t\t\tpayload.AlertBody(emoji.Sprint(msg.Message))\n\t\t\tpayload.Custom(\"channel_name\", msg.ChannelName)\n\t\t} else {\n\t\t\tpayload.Alert(emoji.Sprint(msg.Message))\n\n\t\t\tif len(msg.ChannelName) > 0 {\n\t\t\t\tpayload.Custom(\"channel_name\", msg.ChannelName)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tpayload.ContentAvailable()\n\t}\n\n\tpayload.Custom(\"type\", msg.Type)\n\n\tif len(msg.ChannelId) > 0 {\n\t\tpayload.Custom(\"channel_id\", msg.ChannelId)\n\t\tpayload.ThreadID(msg.ChannelId)\n\t}\n\n\tif len(msg.TeamId) > 0 {\n\t\tpayload.Custom(\"team_id\", msg.TeamId)\n\t}\n\n\tif len(msg.SenderId) > 0 {\n\t\tpayload.Custom(\"sender_id\", msg.SenderId)\n\t}\n\n\tif len(msg.PostId) > 0 {\n\t\tpayload.Custom(\"post_id\", msg.PostId)\n\t}\n\n\tif len(msg.RootId) > 0 {\n\t\tpayload.Custom(\"root_id\", msg.RootId)\n\t}\n\n\tif len(msg.OverrideUsername) > 0 {\n\t\tpayload.Custom(\"override_username\", msg.OverrideUsername)\n\t}\n\n\tif len(msg.OverrideIconUrl) > 0 {\n\t\tpayload.Custom(\"override_icon_url\", msg.OverrideIconUrl)\n\t}\n\n\tif len(msg.FromWebhook) > 0 {\n\t\tpayload.Custom(\"from_webhook\", msg.FromWebhook)\n\t}\n\n\tif me.AppleClient != nil {\n\t\tLogInfo(fmt.Sprintf(\"Sending apple push notification for device=%v and type=%v\", me.ApplePushSettings.Type, msg.Type))\n\t\tstart := time.Now()\n\t\tres, err := me.AppleClient.Push(notification)\n\t\tobserveAPNSResponse(time.Since(start).Seconds())\n\t\tif err != nil {\n\t\t\tLogError(fmt.Sprintf(\"Failed to send apple push sid=%v did=%v err=%v type=%v\", msg.ServerId, msg.DeviceId, err, me.ApplePushSettings.Type))\n\t\t\tincrementFailure(me.ApplePushSettings.Type)\n\t\t\treturn NewErrorPushResponse(\"unknown transport error\")\n\t\t}\n\n\t\tif !res.Sent() {\n\t\t\tif res.Reason == \"BadDeviceToken\" || res.Reason == \"Unregistered\" || res.Reason == \"MissingDeviceToken\" || res.Reason == \"DeviceTokenNotForTopic\" {\n\t\t\t\tLogInfo(fmt.Sprintf(\"Failed to send apple push sending remove code res ApnsID=%v reason=%v code=%v type=%v\", res.ApnsID, res.Reason, res.StatusCode, me.ApplePushSettings.Type))\n\t\t\t\tincrementRemoval(me.ApplePushSettings.Type)\n\t\t\t\treturn NewRemovePushResponse()\n\t\t\t}\n\n\t\t\tLogError(fmt.Sprintf(\"Failed to send apple push with res ApnsID=%v reason=%v code=%v type=%v\", res.ApnsID, res.Reason, res.StatusCode, me.ApplePushSettings.Type))\n\t\t\tincrementFailure(me.ApplePushSettings.Type)\n\t\t\treturn NewErrorPushResponse(\"unknown send response error\")\n\t\t}\n\t}\n\n\tincrementSuccess(me.ApplePushSettings.Type)\n\treturn NewOkPushResponse()\n}\n<|endoftext|>"} {"text":"<commit_before>package event_logger\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jpillora\/go-ogle-analytics\"\n\t\"github.com\/oinume\/lekcije\/server\/context_data\"\n\t\"github.com\/oinume\/lekcije\/server\/errors\"\n\t\"github.com\/oinume\/lekcije\/server\/logger\"\n\t\"go.uber.org\/zap\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\nconst (\n\tCategoryEmail = \"email\"\n\tCategoryUser = \"user\"\n\tCategoryFollowingTeacher = \"followingTeacher\"\n)\n\n\/\/ TODO: Optimize http.Client\nvar gaHTTPClient = &http.Client{\n\tTransport: &logger.LoggingHTTPTransport{DumpHeaderBody: true},\n\tTimeout: time.Second * 7,\n}\n\ntype GAMeasurementEventValues struct {\n\tUserAgentOverride string\n\tClientID string\n\tDocumentHostName string\n\tDocumentPath string\n\tDocumentTitle string\n\tDocumentReferrer string\n\tIPOverride string\n}\n\ntype gaMeasurementEventValuesKey struct{}\n\nfunc NewGAMeasurementEventValuesFromRequest(req *http.Request) *GAMeasurementEventValues {\n\treturn &GAMeasurementEventValues{\n\t\tUserAgentOverride: req.UserAgent(),\n\t\tClientID: context_data.MustTrackingID(req.Context()),\n\t\tDocumentHostName: req.Host,\n\t\tDocumentPath: req.URL.Path,\n\t\tDocumentTitle: req.URL.Path,\n\t\tDocumentReferrer: req.Referer(),\n\t\tIPOverride: getRemoteAddress(req),\n\t}\n}\n\nfunc WithGAMeasurementEventValues(ctx context.Context, v *GAMeasurementEventValues) context.Context {\n\treturn context.WithValue(ctx, gaMeasurementEventValuesKey{}, v)\n}\n\nfunc GetGAMeasurementEventValues(ctx context.Context) (*GAMeasurementEventValues, error) {\n\tv := ctx.Value(gaMeasurementEventValuesKey{})\n\tif value, ok := v.(*GAMeasurementEventValues); ok {\n\t\treturn value, nil\n\t} else {\n\t\treturn nil, errors.Internalf(\"failed get value from context\")\n\t}\n}\n\nfunc MustGAMeasurementEventValues(ctx context.Context) *GAMeasurementEventValues {\n\tv, err := GetGAMeasurementEventValues(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn v\n}\n\nfunc Log(userID uint32, category string, action string, fields ...zapcore.Field) {\n\tf := make([]zapcore.Field, 0, len(fields)+1)\n\tf = append(\n\t\tf,\n\t\tzap.String(\"category\", category),\n\t\tzap.String(\"action\", action),\n\t\tzap.Uint(\"userID\", uint(userID)),\n\t)\n\tf = append(f, fields...)\n\tlogger.Access.Info(\"eventLog\", f...)\n}\n\nfunc SendGAMeasurementEvent(req *http.Request, category, action, label string, value int64, userID uint32) {\n\tgaClient, err := ga.NewClient(os.Getenv(\"GOOGLE_ANALYTICS_ID\"))\n\tif err != nil {\n\t\tlogger.App.Warn(\"ga.NewClient() failed\", zap.Error(err))\n\t}\n\tgaClient.HttpClient = gaHTTPClient\n\tgaClient.UserAgentOverride(req.UserAgent())\n\n\tgaClient.ClientID(context_data.MustTrackingID(req.Context()))\n\tgaClient.DocumentHostName(req.Host)\n\tgaClient.DocumentPath(req.URL.Path)\n\tgaClient.DocumentTitle(req.URL.Path)\n\tgaClient.DocumentReferrer(req.Referer())\n\tgaClient.IPOverride(getRemoteAddress(req))\n\n\tlogFields := []zapcore.Field{\n\t\tzap.String(\"category\", category),\n\t\tzap.String(\"action\", action),\n\t}\n\tevent := ga.NewEvent(category, action)\n\tif label != \"\" {\n\t\tevent.Label(label)\n\t\tlogFields = append(logFields, zap.String(\"label\", label))\n\t}\n\tif value != 0 {\n\t\tevent.Value(value)\n\t\tlogFields = append(logFields, zap.Int64(\"value\", value))\n\t}\n\tif userID != 0 {\n\t\tgaClient.UserID(fmt.Sprint(userID))\n\t\tlogFields = append(logFields, zap.Uint(\"userID\", uint(userID)))\n\t}\n\tif err := gaClient.Send(event); err == nil {\n\t\tlogger.App.Debug(\"SendGAMeasurementEvent() success\", logFields...)\n\t\tLog(userID, category, action, zap.String(\"label\", label), zap.Int64(\"value\", value))\n\t} else {\n\t\tlogger.App.Warn(\"SendGAMeasurementEvent() failed\", zap.Error(err))\n\t}\n}\n\nfunc SendGAMeasurementEvent2(values *GAMeasurementEventValues, category, action, label string, value int64, userID uint32) {\n\tgaClient, err := ga.NewClient(os.Getenv(\"GOOGLE_ANALYTICS_ID\"))\n\tif err != nil {\n\t\tlogger.App.Warn(\"ga.NewClient() failed\", zap.Error(err))\n\t}\n\tgaClient.HttpClient = gaHTTPClient\n\tgaClient.UserAgentOverride(values.UserAgentOverride)\n\tgaClient.ClientID(values.ClientID)\n\tgaClient.DocumentHostName(values.DocumentHostName)\n\tgaClient.DocumentPath(values.DocumentPath)\n\tgaClient.DocumentTitle(values.DocumentTitle)\n\tgaClient.DocumentReferrer(values.DocumentReferrer)\n\tgaClient.IPOverride(values.IPOverride)\n\n\tlogFields := []zapcore.Field{\n\t\tzap.String(\"category\", category),\n\t\tzap.String(\"action\", action),\n\t}\n\tevent := ga.NewEvent(category, action)\n\tif label != \"\" {\n\t\tevent.Label(label)\n\t\tlogFields = append(logFields, zap.String(\"label\", label))\n\t}\n\tif value != 0 {\n\t\tevent.Value(value)\n\t\tlogFields = append(logFields, zap.Int64(\"value\", value))\n\t}\n\tif userID != 0 {\n\t\tgaClient.UserID(fmt.Sprint(userID))\n\t\tlogFields = append(logFields, zap.Uint(\"userID\", uint(userID)))\n\t}\n\tif err := gaClient.Send(event); err == nil {\n\t\tlogger.App.Debug(\"SendGAMeasurementEvent() success\", logFields...)\n\t\tLog(userID, category, action, zap.String(\"label\", label), zap.Int64(\"value\", value))\n\t} else {\n\t\tlogger.App.Warn(\"SendGAMeasurementEvent() failed\", zap.Error(err))\n\t}\n}\n\nfunc getRemoteAddress(req *http.Request) string {\n\txForwardedFor := req.Header.Get(\"X-Forwarded-For\")\n\tif xForwardedFor == \"\" {\n\t\treturn (strings.Split(req.RemoteAddr, \":\"))[0]\n\t}\n\treturn strings.TrimSpace((strings.Split(xForwardedFor, \",\"))[0])\n}\n<commit_msg>Add TODO comment<commit_after>package event_logger\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jpillora\/go-ogle-analytics\"\n\t\"github.com\/oinume\/lekcije\/server\/context_data\"\n\t\"github.com\/oinume\/lekcije\/server\/errors\"\n\t\"github.com\/oinume\/lekcije\/server\/logger\"\n\t\"go.uber.org\/zap\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\nconst (\n\tCategoryEmail = \"email\"\n\tCategoryUser = \"user\"\n\tCategoryFollowingTeacher = \"followingTeacher\"\n)\n\n\/\/ TODO: Optimize http.Client\nvar gaHTTPClient = &http.Client{\n\tTransport: &logger.LoggingHTTPTransport{DumpHeaderBody: true},\n\tTimeout: time.Second * 7,\n}\n\ntype GAMeasurementEventValues struct {\n\tUserAgentOverride string\n\tClientID string\n\tDocumentHostName string\n\tDocumentPath string\n\tDocumentTitle string\n\tDocumentReferrer string\n\tIPOverride string\n}\n\ntype gaMeasurementEventValuesKey struct{}\n\nfunc NewGAMeasurementEventValuesFromRequest(req *http.Request) *GAMeasurementEventValues {\n\treturn &GAMeasurementEventValues{\n\t\tUserAgentOverride: req.UserAgent(),\n\t\tClientID: context_data.MustTrackingID(req.Context()),\n\t\tDocumentHostName: req.Host,\n\t\tDocumentPath: req.URL.Path,\n\t\tDocumentTitle: req.URL.Path,\n\t\tDocumentReferrer: req.Referer(),\n\t\tIPOverride: getRemoteAddress(req),\n\t}\n}\n\nfunc WithGAMeasurementEventValues(ctx context.Context, v *GAMeasurementEventValues) context.Context {\n\treturn context.WithValue(ctx, gaMeasurementEventValuesKey{}, v)\n}\n\nfunc GetGAMeasurementEventValues(ctx context.Context) (*GAMeasurementEventValues, error) {\n\tv := ctx.Value(gaMeasurementEventValuesKey{})\n\tif value, ok := v.(*GAMeasurementEventValues); ok {\n\t\treturn value, nil\n\t} else {\n\t\treturn nil, errors.Internalf(\"failed get value from context\")\n\t}\n}\n\nfunc MustGAMeasurementEventValues(ctx context.Context) *GAMeasurementEventValues {\n\tv, err := GetGAMeasurementEventValues(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn v\n}\n\nfunc Log(userID uint32, category string, action string, fields ...zapcore.Field) {\n\tf := make([]zapcore.Field, 0, len(fields)+1)\n\tf = append(\n\t\tf,\n\t\tzap.String(\"category\", category),\n\t\tzap.String(\"action\", action),\n\t\tzap.Uint(\"userID\", uint(userID)),\n\t)\n\tf = append(f, fields...)\n\tlogger.Access.Info(\"eventLog\", f...)\n}\n\n\/\/ TODO: remove\nfunc SendGAMeasurementEvent(req *http.Request, category, action, label string, value int64, userID uint32) {\n\tgaClient, err := ga.NewClient(os.Getenv(\"GOOGLE_ANALYTICS_ID\"))\n\tif err != nil {\n\t\tlogger.App.Warn(\"ga.NewClient() failed\", zap.Error(err))\n\t}\n\tgaClient.HttpClient = gaHTTPClient\n\tgaClient.UserAgentOverride(req.UserAgent())\n\n\tgaClient.ClientID(context_data.MustTrackingID(req.Context()))\n\tgaClient.DocumentHostName(req.Host)\n\tgaClient.DocumentPath(req.URL.Path)\n\tgaClient.DocumentTitle(req.URL.Path)\n\tgaClient.DocumentReferrer(req.Referer())\n\tgaClient.IPOverride(getRemoteAddress(req))\n\n\tlogFields := []zapcore.Field{\n\t\tzap.String(\"category\", category),\n\t\tzap.String(\"action\", action),\n\t}\n\tevent := ga.NewEvent(category, action)\n\tif label != \"\" {\n\t\tevent.Label(label)\n\t\tlogFields = append(logFields, zap.String(\"label\", label))\n\t}\n\tif value != 0 {\n\t\tevent.Value(value)\n\t\tlogFields = append(logFields, zap.Int64(\"value\", value))\n\t}\n\tif userID != 0 {\n\t\tgaClient.UserID(fmt.Sprint(userID))\n\t\tlogFields = append(logFields, zap.Uint(\"userID\", uint(userID)))\n\t}\n\tif err := gaClient.Send(event); err == nil {\n\t\tlogger.App.Debug(\"SendGAMeasurementEvent() success\", logFields...)\n\t\tLog(userID, category, action, zap.String(\"label\", label), zap.Int64(\"value\", value))\n\t} else {\n\t\tlogger.App.Warn(\"SendGAMeasurementEvent() failed\", zap.Error(err))\n\t}\n}\n\nfunc SendGAMeasurementEvent2(values *GAMeasurementEventValues, category, action, label string, value int64, userID uint32) {\n\tgaClient, err := ga.NewClient(os.Getenv(\"GOOGLE_ANALYTICS_ID\"))\n\tif err != nil {\n\t\tlogger.App.Warn(\"ga.NewClient() failed\", zap.Error(err))\n\t}\n\tgaClient.HttpClient = gaHTTPClient\n\tgaClient.UserAgentOverride(values.UserAgentOverride)\n\tgaClient.ClientID(values.ClientID)\n\tgaClient.DocumentHostName(values.DocumentHostName)\n\tgaClient.DocumentPath(values.DocumentPath)\n\tgaClient.DocumentTitle(values.DocumentTitle)\n\tgaClient.DocumentReferrer(values.DocumentReferrer)\n\tgaClient.IPOverride(values.IPOverride)\n\n\tlogFields := []zapcore.Field{\n\t\tzap.String(\"category\", category),\n\t\tzap.String(\"action\", action),\n\t}\n\tevent := ga.NewEvent(category, action)\n\tif label != \"\" {\n\t\tevent.Label(label)\n\t\tlogFields = append(logFields, zap.String(\"label\", label))\n\t}\n\tif value != 0 {\n\t\tevent.Value(value)\n\t\tlogFields = append(logFields, zap.Int64(\"value\", value))\n\t}\n\tif userID != 0 {\n\t\tgaClient.UserID(fmt.Sprint(userID))\n\t\tlogFields = append(logFields, zap.Uint(\"userID\", uint(userID)))\n\t}\n\tif err := gaClient.Send(event); err == nil {\n\t\tlogger.App.Debug(\"SendGAMeasurementEvent() success\", logFields...)\n\t\tLog(userID, category, action, zap.String(\"label\", label), zap.Int64(\"value\", value))\n\t} else {\n\t\tlogger.App.Warn(\"SendGAMeasurementEvent() failed\", zap.Error(err))\n\t}\n}\n\nfunc getRemoteAddress(req *http.Request) string {\n\txForwardedFor := req.Header.Get(\"X-Forwarded-For\")\n\tif xForwardedFor == \"\" {\n\t\treturn (strings.Split(req.RemoteAddr, \":\"))[0]\n\t}\n\treturn strings.TrimSpace((strings.Split(xForwardedFor, \",\"))[0])\n}\n<|endoftext|>"} {"text":"<commit_before>package lang\n\nimport (\n\/\/ \"fmt\"\n . \"jvmgo\/any\"\n \"jvmgo\/jvm\/rtda\"\n rtc \"jvmgo\/jvm\/rtda\/class\"\n)\n\nfunc init() {\n _throwable(fillInStackTrace, \"fillInStackTrace\", \"(I)Ljava\/lang\/Throwable;\")\n _throwable(getStackTraceElement, \"getStackTraceElement\", \"(I)Ljava\/lang\/StackTraceElement;\")\n _throwable(getStackTraceDepth, \"getStackTraceDepth\", \"()I\")\n}\n\nfunc _throwable(method Any, name, desc string) {\n rtc.RegisterNativeMethod(\"java\/lang\/Throwable\", name, desc, method)\n}\n\n\/\/ private native Throwable fillInStackTrace(int dummy);\n\/\/ (I)Ljava\/lang\/Throwable;\nfunc fillInStackTrace(frame *rtda.Frame) {\n stack := frame.OperandStack()\n stack.PopInt() \/\/ dummy\n this := stack.PopRef() \/\/ this\n stack.PushRef(this)\n\n stes := createStackTraceElements(frame)\n this.SetExtra(stes)\n}\n\nfunc createStackTraceElements(frame *rtda.Frame) ([]*StackTraceElement) {\n thread := frame.Thread()\n depth := thread.StackDepth()\n\n stes := make([]*StackTraceElement, depth)\n for i := uint(0); i < depth; i++ {\n frameN := thread.TopFrameN(i)\n methodN := frameN.Method()\n classN := methodN.Class()\n\n stes[i] = &StackTraceElement{\n declaringClass: classN.Name(),\n methodName: methodN.Name(),\n fileName: classN.SourceFile(),\n lineNumber: int32(-1), \/\/ todo\n }\n }\n return stes\n}\n\n\/\/ native int getStackTraceDepth();\n\/\/ ()I\nfunc getStackTraceDepth(frame *rtda.Frame) {\n stack := frame.OperandStack()\n this := stack.PopRef()\n\n stes := this.Extra().([]*StackTraceElement)\n depth := int32(len(stes))\n stack.PushInt(depth)\n}\n\n\/\/ native StackTraceElement getStackTraceElement(int index);\n\/\/ (I)Ljava\/lang\/StackTraceElement;\nfunc getStackTraceElement(frame *rtda.Frame) {\n stack := frame.OperandStack()\n index := stack.PopInt()\n this := stack.PopRef()\n\n stes := this.Extra().([]*StackTraceElement)\n ste := stes[index]\n\n steObj := createStackTraceElementObj(ste, frame)\n stack.PushRef(steObj)\n}\n\nfunc createStackTraceElementObj(ste *StackTraceElement, frame *rtda.Frame) (*rtc.Obj) {\n declaringClass := rtda.NewJString(ste.declaringClass, frame)\n methodName := rtda.NewJString(ste.methodName, frame)\n fileName := rtda.NewJString(ste.fileName, frame)\n lineNumber := ste.lineNumber\n\n \/*\n public StackTraceElement(String declaringClass, String methodName,\n String fileName, int lineNumber)\n *\/\n steClass := frame.GetClassLoader().LoadClass(\"java\/lang\/StackTraceElement\")\n steObj := steClass.NewObj()\n \/\/ todo: call <init>\n steObj.SetFieldValue(\"declaringClass\", \"Ljava\/lang\/String;\", declaringClass)\n steObj.SetFieldValue(\"methodName\", \"Ljava\/lang\/String;\", methodName)\n steObj.SetFieldValue(\"fileName\", \"Ljava\/lang\/String;\", fileName)\n steObj.SetFieldValue(\"lineNumber\", \"I\", lineNumber)\n\n return steObj\n}\n<commit_msg>stack trace: skip unrelated frames<commit_after>package lang\n\nimport (\n\/\/ \"fmt\"\n . \"jvmgo\/any\"\n \"jvmgo\/jvm\/rtda\"\n rtc \"jvmgo\/jvm\/rtda\/class\"\n)\n\nfunc init() {\n _throwable(fillInStackTrace, \"fillInStackTrace\", \"(I)Ljava\/lang\/Throwable;\")\n _throwable(getStackTraceElement, \"getStackTraceElement\", \"(I)Ljava\/lang\/StackTraceElement;\")\n _throwable(getStackTraceDepth, \"getStackTraceDepth\", \"()I\")\n}\n\nfunc _throwable(method Any, name, desc string) {\n rtc.RegisterNativeMethod(\"java\/lang\/Throwable\", name, desc, method)\n}\n\n\/\/ private native Throwable fillInStackTrace(int dummy);\n\/\/ (I)Ljava\/lang\/Throwable;\nfunc fillInStackTrace(frame *rtda.Frame) {\n stack := frame.OperandStack()\n stack.PopInt() \/\/ dummy\n this := stack.PopRef() \/\/ this\n stack.PushRef(this)\n\n stes := createStackTraceElements(this, frame)\n this.SetExtra(stes)\n}\n\nfunc createStackTraceElements(tObj *rtc.Obj, frame *rtda.Frame) ([]*StackTraceElement) {\n thread := frame.Thread()\n depth := thread.StackDepth()\n\n \/\/ skip unrelated frames\n i := uint(0)\n for k := tObj.Class(); k != nil; k = k.SuperClass() {\n i++\n }\n\n stes := make([]*StackTraceElement, 0, depth)\n for ; i < depth; i++ {\n frameN := thread.TopFrameN(i)\n methodN := frameN.Method()\n classN := methodN.Class()\n\n ste := &StackTraceElement{\n declaringClass: classN.Name(),\n methodName: methodN.Name(),\n fileName: classN.SourceFile(),\n lineNumber: int32(-1), \/\/ todo\n }\n stes = append(stes, ste)\n }\n\n return stes\n}\n\n\/\/ native int getStackTraceDepth();\n\/\/ ()I\nfunc getStackTraceDepth(frame *rtda.Frame) {\n stack := frame.OperandStack()\n this := stack.PopRef()\n\n stes := this.Extra().([]*StackTraceElement)\n depth := int32(len(stes))\n stack.PushInt(depth)\n}\n\n\/\/ native StackTraceElement getStackTraceElement(int index);\n\/\/ (I)Ljava\/lang\/StackTraceElement;\nfunc getStackTraceElement(frame *rtda.Frame) {\n stack := frame.OperandStack()\n index := stack.PopInt()\n this := stack.PopRef()\n\n stes := this.Extra().([]*StackTraceElement)\n ste := stes[index]\n\n steObj := createStackTraceElementObj(ste, frame)\n stack.PushRef(steObj)\n}\n\nfunc createStackTraceElementObj(ste *StackTraceElement, frame *rtda.Frame) (*rtc.Obj) {\n declaringClass := rtda.NewJString(ste.declaringClass, frame)\n methodName := rtda.NewJString(ste.methodName, frame)\n fileName := rtda.NewJString(ste.fileName, frame)\n lineNumber := ste.lineNumber\n\n \/*\n public StackTraceElement(String declaringClass, String methodName,\n String fileName, int lineNumber)\n *\/\n steClass := frame.GetClassLoader().LoadClass(\"java\/lang\/StackTraceElement\")\n steObj := steClass.NewObj()\n \/\/ todo: call <init>\n steObj.SetFieldValue(\"declaringClass\", \"Ljava\/lang\/String;\", declaringClass)\n steObj.SetFieldValue(\"methodName\", \"Ljava\/lang\/String;\", methodName)\n steObj.SetFieldValue(\"fileName\", \"Ljava\/lang\/String;\", fileName)\n steObj.SetFieldValue(\"lineNumber\", \"I\", lineNumber)\n\n return steObj\n}\n<|endoftext|>"} {"text":"<commit_before>package xpra\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/subgraph\/oz\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/user\"\n\t\"path\"\n\t\"strconv\"\n)\n\ntype Xpra struct {\n\t\/\/ Server working directory where unix socket is created\n\tWorkDir string\n\n\tConfig *oz.XServerConf\n\n\t\/\/ Running xpra process\n\tProcess *exec.Cmd\n\n\t\/\/ Display number\n\tDisplay uint64\n\n\t\/\/ Arguments passed to xpra command\n\txpraArgs []string\n}\n\nvar xpraDefaultArgs = []string{\n\t\"--no-daemon\",\n\t\"--mmap\",\n\t\"--no-sharing\",\n\t\"--bell\",\n\t\"--system-tray\",\n\t\"--xsettings\",\n\t\/\/\"--no-xsettings\",\n\t\"--notifications\",\n\t\"--cursors\",\n\t\"--encoding=rgb\",\n\t\"--no-pulseaudio\",\n}\n\nfunc getDefaultArgs(config *oz.XServerConf) []string {\n\targs := []string{}\n\targs = append(args, xpraDefaultArgs...)\n\tif config.DisableClipboard {\n\t\targs = append(args, \"--no-clipboard\")\n\t} else {\n\t\targs = append(args, \"--clipboard\")\n\t}\n\n\tif config.DisableAudio {\n\t\targs = append(args, \"--no-microphone\", \"--no-speaker\")\n\t} else {\n\t\targs = append(args, \"--microphone\", \"--speaker\")\n\t}\n\n\treturn args\n}\n\nfunc (x *Xpra) Stop() ([]byte, error) {\n\tcmd := exec.Command(\"\/usr\/bin\/xpra\",\n\t\t\"--socket-dir=\"+x.WorkDir,\n\t\t\"stop\",\n\t\tfmt.Sprintf(\":%d\", x.Display),\n\t)\n\tcmd.Env = []string{\"TMPDIR=\" + x.WorkDir}\n\treturn cmd.Output()\n}\n\nfunc GetPath(u *user.User, name string) string {\n\treturn path.Join(u.HomeDir, \".Xoz\", name)\n}\n\nfunc CreateDir(u *user.User, name string) (string, error) {\n\tuid, gid, err := userIds(u)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdir := GetPath(u, name)\n\tif err := createSubdirs(u.HomeDir, uid, gid, 0755, \".Xoz\", name); err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create xpra directory (%s): %v\", dir, err)\n\t}\n\treturn dir, nil\n}\n\nfunc createSubdirs(base string, uid, gid int, mode os.FileMode, subdirs ...string) error {\n\tdir := base\n\tfor _, sd := range subdirs {\n\t\tdir = path.Join(dir, sd)\n\t\tif err := os.Mkdir(dir, mode); err != nil && !os.IsExist(err) {\n\t\t\treturn err\n\t\t}\n\t\tif err := os.Chown(dir, uid, gid); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc userIds(user *user.User) (int, int, error) {\n\tuid, err := strconv.Atoi(user.Uid)\n\tif err != nil {\n\t\treturn -1, -1, errors.New(\"failed to parse uid from user struct: \" + err.Error())\n\t}\n\tgid, err := strconv.Atoi(user.Gid)\n\tif err != nil {\n\t\treturn -1, -1, errors.New(\"failed to parse gid from user struct: \" + err.Error())\n\t}\n\treturn uid, gid, nil\n}\n<commit_msg>Pass proper creds to xpra.Stop()<commit_after>package xpra\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/subgraph\/oz\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/user\"\n\t\"path\"\n\t\"strconv\"\n\t\"syscall\"\n)\n\ntype Xpra struct {\n\t\/\/ Server working directory where unix socket is created\n\tWorkDir string\n\n\tConfig *oz.XServerConf\n\n\t\/\/ Running xpra process\n\tProcess *exec.Cmd\n\n\t\/\/ Display number\n\tDisplay uint64\n\n\t\/\/ Arguments passed to xpra command\n\txpraArgs []string\n}\n\nvar xpraDefaultArgs = []string{\n\t\"--no-daemon\",\n\t\"--mmap\",\n\t\"--no-sharing\",\n\t\"--bell\",\n\t\"--system-tray\",\n\t\"--xsettings\",\n\t\/\/\"--no-xsettings\",\n\t\"--notifications\",\n\t\"--cursors\",\n\t\"--encoding=rgb\",\n\t\"--no-pulseaudio\",\n}\n\nfunc getDefaultArgs(config *oz.XServerConf) []string {\n\targs := []string{}\n\targs = append(args, xpraDefaultArgs...)\n\tif config.DisableClipboard {\n\t\targs = append(args, \"--no-clipboard\")\n\t} else {\n\t\targs = append(args, \"--clipboard\")\n\t}\n\n\tif config.DisableAudio {\n\t\targs = append(args, \"--no-microphone\", \"--no-speaker\")\n\t} else {\n\t\targs = append(args, \"--microphone\", \"--speaker\")\n\t}\n\n\treturn args\n}\n\nfunc (x *Xpra) Stop(cred *syscall.Credential) ([]byte, error) {\n\tcmd := exec.Command(\"\/usr\/bin\/xpra\",\n\t\t\"--socket-dir=\"+x.WorkDir,\n\t\t\"stop\",\n\t\tfmt.Sprintf(\":%d\", x.Display),\n\t)\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCredential: cred,\n\t}\n\tcmd.Env = []string{\"TMPDIR=\" + x.WorkDir}\n\treturn cmd.Output()\n}\n\nfunc GetPath(u *user.User, name string) string {\n\treturn path.Join(u.HomeDir, \".Xoz\", name)\n}\n\nfunc CreateDir(u *user.User, name string) (string, error) {\n\tuid, gid, err := userIds(u)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdir := GetPath(u, name)\n\tif err := createSubdirs(u.HomeDir, uid, gid, 0755, \".Xoz\", name); err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create xpra directory (%s): %v\", dir, err)\n\t}\n\treturn dir, nil\n}\n\nfunc createSubdirs(base string, uid, gid int, mode os.FileMode, subdirs ...string) error {\n\tdir := base\n\tfor _, sd := range subdirs {\n\t\tdir = path.Join(dir, sd)\n\t\tif err := os.Mkdir(dir, mode); err != nil && !os.IsExist(err) {\n\t\t\treturn err\n\t\t}\n\t\tif err := os.Chown(dir, uid, gid); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc userIds(user *user.User) (int, int, error) {\n\tuid, err := strconv.Atoi(user.Uid)\n\tif err != nil {\n\t\treturn -1, -1, errors.New(\"failed to parse uid from user struct: \" + err.Error())\n\t}\n\tgid, err := strconv.Atoi(user.Gid)\n\tif err != nil {\n\t\treturn -1, -1, errors.New(\"failed to parse gid from user struct: \" + err.Error())\n\t}\n\treturn uid, gid, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestWrapperCommandStructSingleArg(t *testing.T) {\n\tconst expectedRequest = \"test\"\n\tvar expectedArgs = Args{\"flag\": \"value\"}\n\n\twrapper := WrapperCommand{\n\t\texpectedRequest,\n\t\texpectedArgs,\n\t\tCredential{},\n\t}\n\n\tif wrapper.RequestType != expectedRequest {\n\t\tt.Errorf(\"expected request value incorrect\")\n\t}\n\n\tif !reflect.DeepEqual(wrapper.Args, expectedArgs) {\n\t\tt.Errorf(\"expected args are not correct\")\n\t}\n}\n\nfunc TestWrapperCommandStructManyArgs(t *testing.T) {\n\tconst expectedRequest = \"test\"\n\tvar expectedArgs = Args{\"flag\": []string{\"value1\", \"value2\"}}\n\n\twrapper := WrapperCommand{\n\t\texpectedRequest,\n\t\texpectedArgs,\n\t\tCredential{},\n\t}\n\n\tif wrapper.RequestType != expectedRequest {\n\t\tt.Errorf(\"expected request value incorrect\")\n\t}\n\n\tif !reflect.DeepEqual(wrapper.Args, expectedArgs) {\n\t\tt.Errorf(\"expected args are not correct\")\n\t}\n}\n\nfunc TestInvalidTypeInArgs(t *testing.T) {\n\tconst expectedRequest = \"test\"\n\tvar expectedArgs = Args{\"flag\": []int{2, 3}}\n\n\twrapper := WrapperCommand{\n\t\texpectedRequest,\n\t\texpectedArgs,\n\t\tnil,\n\t}\n\n\t_, err := wrapper.Execute()\n\n\tif err.Error() != \"invalid type found in args\" {\n\t\tt.Errorf(\"invalid types not detected\")\n\t}\n}\n\nfunc TestValidTypeInArgs(t *testing.T) {\n\tconst expectedRequest = \"test\"\n\tvar expectedArgs = Args{\"flag\": []string{\"test\"}}\n\n\twrapper := WrapperCommand{\n\t\texpectedRequest,\n\t\texpectedArgs,\n\t\tnil,\n\t}\n\n\t_, err := combinedArgs(wrapper)\n\n\tif err != nil {\n\t\tt.Errorf(\"valid types not detected\")\n\t}\n}\n\nfunc TestJSONValidTypeInArgs(t *testing.T) {\n\twrapper := WrapperCommand{}\n\n\tjsonString := []byte(`{\n \"requesttype\": \"fetch\",\n \"args\": {\n \"--scope\": [\"cloud-platform\",\"userinfo.email\"]\n\t\t},\n\t\t\"body\": {}\n\t}`)\n\n\tjson.Unmarshal(jsonString, &wrapper)\n\n\t_, err := wrapper.Execute()\n\n\tif err != nil {\n\t\tt.Errorf(\"valid types not detected\")\n\t}\n}\n\nfunc TestJSONInvalidTypeInArgs(t *testing.T) {\n\twrapper := WrapperCommand{}\n\n\tjsonString := []byte(`{\n \"requesttype\": \"fetch\",\n \"args\": {\n \"--scope\": 2\n\t\t},\n\t\t\"body\": {}\n\t}`)\n\n\tjson.Unmarshal(jsonString, &wrapper)\n\n\t_, err := wrapper.Execute()\n\n\tif err.Error() != \"invalid type found in args\" {\n\t\tt.Errorf(\"invalid types not detected\")\n\t}\n}\n\nfunc TestJSONValueInArgs(t *testing.T) {\n\twrapper := WrapperCommand{}\n\n\tjsonString := []byte(`{\n \"requesttype\": \"fetch\",\n \"args\": {\n \"--scope\": [\"cloud-platform\", \"userinfo.email\"]\n\t\t},\n\t\t\"body\": {}\n\t}`)\n\n\tjson.Unmarshal(jsonString, &wrapper)\n\n\tflattenedArgs, _ := combinedArgs(wrapper)\n\n\tif !reflect.DeepEqual(flattenedArgs, []string{\"fetch\", \"--scope\", \"cloud-platform\", \"userinfo.email\"}) {\n\t\tt.Errorf(\"flattened values not correct\")\n\t}\n}\n\nfunc TestDummyOauth2lCommand(t *testing.T) {\n\tconst expectedRequest = \"test\"\n\tvar expectedArgs = Args{\"--token\": \"ya29.justkiddingmadethisoneup\"}\n\n\twrapper := WrapperCommand{\n\t\texpectedRequest,\n\t\texpectedArgs,\n\t\tnil,\n\t}\n\n\toutput, _ := wrapper.Execute()\n\tif output != \"1\" {\n\t\tt.Errorf(\"error running dummy command\")\n\t}\n}\n\nfunc TestTokenCreationCredential(t *testing.T) {\n\tcred := Credential{\"credential\": `{\n\t\t\"quota_project_id\": \"delays-or-traffi-1569131153704\",\n\t\t\"refresh_token\": \"1\/\/0dFSxxi4NOTl2CgYIARAAGA0SNwF-L9Ira5YTnnFer1GCZBToGkwmVMAounGai_x4CgHwMAFgFNBsPSK5hBwxfpGn88u3roPrRcQ\",\n\t\t\"type\": \"authorized_user\"\n\t }`}\n\n\twrapper := WrapperCommand{\n\t\t\"fetch\",\n\t\tArgs{\"--scope\": []string{\"cloud-platform\"}},\n\t\tcred,\n\t}\n\n\t_, err := wrapper.Execute()\n\n\tif err != nil {\n\t\tt.Errorf(\"error creating token\")\n\t}\n}\n\nfunc TestCredentialFileCreation(t *testing.T) {\n\tcred := Credential{\"credential\": `{\n\t\t\"client_id\": \"some\",\n\t\t\"client_secret\": \"random\",\n\t\t\"refresh_token\": \"test\",\n\t\t\"type\": \"code\"\n\t\t}`}\n\n\t_, err := allocateMemFile(cred)\n\n\tif err != nil {\n\t\tt.Errorf(\"error creating file\")\n\t}\n}\n\nfunc TestCredentialPath(t *testing.T) {\n\tcred := Credential{\"credential\": `{\n\t\t\"client_id\": \"some\",\n\t\t\"client_secret\": \"random\",\n\t\t\"refresh_token\": \"test\",\n\t\t\"type\": \"code\"\n\t\t}`}\n\n\tpath, err := getCredentialPath(cred)\n\n\tif err != nil || path == \"\" {\n\t\tt.Errorf(\"error generating path\")\n\t}\n}\n\nfunc TestCredentialFileContents(t *testing.T) {\n\tcred := Credential{\"credential\": `{\n\t\t\"client_id\": \"some\",\n\t\t\"client_secret\": \"random\",\n\t\t\"refresh_token\": \"test\",\n\t\t\"type\": \"code\"\n\t\t}`}\n\n\tpath, err := getCredentialPath(cred)\n\n\tfileBytes, err := ioutil.ReadFile(path)\n\t\n\tif err != nil {\n\t\tt.Errorf(\"error reading from file\")\n\t}\n\n\tfileStr := string(fileBytes)\n\n\tif fileStr != cred[\"credential\"].(string) {\n\t\tt.Errorf(\"file contents do not match\")\n\t}\n}\n<commit_msg>Update tests to match changes to wrapper struct<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestWrapperCommandStructSingleArg(t *testing.T) {\n\tconst expectedRequest = \"test\"\n\tvar expectedArgs = Args{\"flag\": \"value\"}\n\n\twrapper := WrapperCommand{\n\t\texpectedRequest,\n\t\texpectedArgs,\n\t\tCredential{},\n\t}\n\n\tif wrapper.CommandType != expectedRequest {\n\t\tt.Errorf(\"expected request value incorrect\")\n\t}\n\n\tif !reflect.DeepEqual(wrapper.Args, expectedArgs) {\n\t\tt.Errorf(\"expected args are not correct\")\n\t}\n}\n\nfunc TestWrapperCommandStructManyArgs(t *testing.T) {\n\tconst expectedRequest = \"test\"\n\tvar expectedArgs = Args{\"flag\": []string{\"value1\", \"value2\"}}\n\n\twrapper := WrapperCommand{\n\t\texpectedRequest,\n\t\texpectedArgs,\n\t\tCredential{},\n\t}\n\n\tif wrapper.CommandType != expectedRequest {\n\t\tt.Errorf(\"expected request value incorrect\")\n\t}\n\n\tif !reflect.DeepEqual(wrapper.Args, expectedArgs) {\n\t\tt.Errorf(\"expected args are not correct\")\n\t}\n}\n\nfunc TestInvalidTypeInArgs(t *testing.T) {\n\tconst expectedRequest = \"test\"\n\tvar expectedArgs = Args{\"flag\": []int{2, 3}}\n\n\twrapper := WrapperCommand{\n\t\texpectedRequest,\n\t\texpectedArgs,\n\t\tnil,\n\t}\n\n\t_, err := wrapper.Execute()\n\n\tif err.Error() != \"invalid type found in args\" {\n\t\tt.Errorf(\"invalid types not detected\")\n\t}\n}\n\nfunc TestValidTypeInArgs(t *testing.T) {\n\tconst expectedRequest = \"test\"\n\tvar expectedArgs = Args{\"flag\": []string{\"test\"}}\n\n\twrapper := WrapperCommand{\n\t\texpectedRequest,\n\t\texpectedArgs,\n\t\tnil,\n\t}\n\n\t_, err := combinedArgs(wrapper)\n\n\tif err != nil {\n\t\tt.Errorf(\"valid types not detected\")\n\t}\n}\n\nfunc TestJSONValidTypeInArgs(t *testing.T) {\n\twrapper := WrapperCommand{}\n\n\tjsonString := []byte(`{\n \"commandType\": \"fetch\",\n \"args\": {\n \"--scope\": [\"cloud-platform\",\"userinfo.email\"]\n\t\t},\n\t\t\"body\": {}\n\t}`)\n\n\tjson.Unmarshal(jsonString, &wrapper)\n\n\t_, err := wrapper.Execute()\n\n\tif err != nil {\n\t\tt.Errorf(\"valid types not detected\")\n\t}\n}\n\nfunc TestJSONInvalidTypeInArgs(t *testing.T) {\n\twrapper := WrapperCommand{}\n\n\tjsonString := []byte(`{\n \"commandType\": \"fetch\",\n \"args\": {\n \"--scope\": 2\n\t\t},\n\t\t\"body\": {}\n\t}`)\n\n\tjson.Unmarshal(jsonString, &wrapper)\n\n\t_, err := wrapper.Execute()\n\n\tif err.Error() != \"invalid type found in args\" {\n\t\tt.Errorf(\"invalid types not detected\")\n\t}\n}\n\nfunc TestJSONValueInArgs(t *testing.T) {\n\twrapper := WrapperCommand{}\n\n\tjsonString := []byte(`{\n \"commandType\": \"fetch\",\n \"args\": {\n \"--scope\": [\"cloud-platform\", \"userinfo.email\"]\n\t\t},\n\t\t\"body\": {}\n\t}`)\n\n\tjson.Unmarshal(jsonString, &wrapper)\n\n\tflattenedArgs, _ := combinedArgs(wrapper)\n\n\tif !reflect.DeepEqual(flattenedArgs, []string{\"fetch\", \"--scope\", \"cloud-platform\", \"userinfo.email\"}) {\n\t\tt.Errorf(\"flattened values not correct\")\n\t}\n}\n\nfunc TestDummyOauth2lCommand(t *testing.T) {\n\tconst expectedRequest = \"test\"\n\tvar expectedArgs = Args{\"--token\": \"ya29.justkiddingmadethisoneup\"}\n\n\twrapper := WrapperCommand{\n\t\texpectedRequest,\n\t\texpectedArgs,\n\t\tnil,\n\t}\n\n\toutput, _ := wrapper.Execute()\n\tif output != \"1\" {\n\t\tt.Errorf(\"error running dummy command\")\n\t}\n}\n\nfunc TestTokenCreationCredential(t *testing.T) {\n\tcred := Credential{\"credential\": `{\n\t\t\"quota_project_id\": \"delays-or-traffi-1569131153704\",\n\t\t\"refresh_token\": \"1\/\/0dFSxxi4NOTl2CgYIARAAGA0SNwF-L9Ira5YTnnFer1GCZBToGkwmVMAounGai_x4CgHwMAFgFNBsPSK5hBwxfpGn88u3roPrRcQ\",\n\t\t\"type\": \"authorized_user\"\n\t }`}\n\n\twrapper := WrapperCommand{\n\t\t\"fetch\",\n\t\tArgs{\"--scope\": []string{\"cloud-platform\"}},\n\t\tcred,\n\t}\n\n\t_, err := wrapper.Execute()\n\n\tif err != nil {\n\t\tt.Errorf(\"error creating token\")\n\t}\n}\n\nfunc TestCredentialFileCreation(t *testing.T) {\n\tcred := Credential{\"credential\": `{\n\t\t\"client_id\": \"some\",\n\t\t\"client_secret\": \"random\",\n\t\t\"refresh_token\": \"test\",\n\t\t\"type\": \"code\"\n\t\t}`}\n\n\t_, err := allocateMemFile(cred)\n\n\tif err != nil {\n\t\tt.Errorf(\"error creating file\")\n\t}\n}\n\nfunc TestCredentialPath(t *testing.T) {\n\tcred := Credential{\"credential\": `{\n\t\t\"client_id\": \"some\",\n\t\t\"client_secret\": \"random\",\n\t\t\"refresh_token\": \"test\",\n\t\t\"type\": \"code\"\n\t\t}`}\n\n\tpath, err := getCredentialPath(cred)\n\n\tif err != nil || path == \"\" {\n\t\tt.Errorf(\"error generating path\")\n\t}\n}\n\nfunc TestCredentialFileContents(t *testing.T) {\n\tcred := Credential{\"credential\": `{\n\t\t\"client_id\": \"some\",\n\t\t\"client_secret\": \"random\",\n\t\t\"refresh_token\": \"test\",\n\t\t\"type\": \"code\"\n\t\t}`}\n\n\tpath, err := getCredentialPath(cred)\n\n\tfileBytes, err := ioutil.ReadFile(path)\n\t\n\tif err != nil {\n\t\tt.Errorf(\"error reading from file\")\n\t}\n\n\tfileStr := string(fileBytes)\n\n\tif fileStr != cred[\"credential\"].(string) {\n\t\tt.Errorf(\"file contents do not match\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2015 Spinpunch, Inc. All Rights Reserved.\n\/\/ See License.txt for license information.\n\npackage api\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/mattermost\/platform\/model\"\n\t\"github.com\/mattermost\/platform\/store\"\n)\n\nfunc TestSuggestRootCommands(t *testing.T) {\n\tSetup()\n\n\tteam := &model.Team{DisplayName: \"Name\", Name: \"z-z-\" + model.NewId() + \"a\", Email: \"test@nowhere.com\", Type: model.TEAM_OPEN}\n\tteam = Client.Must(Client.CreateTeam(team)).Data.(*model.Team)\n\n\tuser1 := &model.User{TeamId: team.Id, Email: model.NewId() + \"corey@test.com\", Nickname: \"Corey Hulen\", Password: \"pwd\"}\n\tuser1 = Client.Must(Client.CreateUser(user1, \"\")).Data.(*model.User)\n\tstore.Must(Srv.Store.User().VerifyEmail(user1.Id))\n\n\tClient.LoginByEmail(team.Name, user1.Email, \"pwd\")\n\n\tif _, err := Client.Command(\"\", \"\", true); err == nil {\n\t\tt.Fatal(\"Should fail\")\n\t}\n\n\trs1 := Client.Must(Client.Command(\"\", \"\/\", true)).Data.(*model.Command)\n\n\thasLogout := false\n\tfor _, v := range rs1.Suggestions {\n\t\tif v.Suggestion == \"\/logout\" {\n\t\t\thasLogout = true\n\t\t}\n\t}\n\n\tif !hasLogout {\n\t\tt.Log(rs1.Suggestions)\n\t\tt.Fatal(\"should have logout cmd\")\n\t}\n\n\trs2 := Client.Must(Client.Command(\"\", \"\/log\", true)).Data.(*model.Command)\n\n\tif rs2.Suggestions[0].Suggestion != \"\/logout\" {\n\t\tt.Fatal(\"should have logout cmd\")\n\t}\n\n\trs3 := Client.Must(Client.Command(\"\", \"\/joi\", true)).Data.(*model.Command)\n\n\tif rs3.Suggestions[0].Suggestion != \"\/join\" {\n\t\tt.Fatal(\"should have join cmd\")\n\t}\n\n\trs4 := Client.Must(Client.Command(\"\", \"\/ech\", true)).Data.(*model.Command)\n\n\tif rs4.Suggestions[0].Suggestion != \"\/echo\" {\n\t\tt.Fatal(\"should have echo cmd\")\n\t}\n}\n\nfunc TestLogoutCommands(t *testing.T) {\n\tSetup()\n\n\tteam := &model.Team{DisplayName: \"Name\", Name: \"z-z-\" + model.NewId() + \"a\", Email: \"test@nowhere.com\", Type: model.TEAM_OPEN}\n\tteam = Client.Must(Client.CreateTeam(team)).Data.(*model.Team)\n\n\tuser1 := &model.User{TeamId: team.Id, Email: model.NewId() + \"corey@test.com\", Nickname: \"Corey Hulen\", Password: \"pwd\"}\n\tuser1 = Client.Must(Client.CreateUser(user1, \"\")).Data.(*model.User)\n\tstore.Must(Srv.Store.User().VerifyEmail(user1.Id))\n\n\tClient.LoginByEmail(team.Name, user1.Email, \"pwd\")\n\n\trs1 := Client.Must(Client.Command(\"\", \"\/logout\", false)).Data.(*model.Command)\n\tif rs1.GotoLocation != \"\/logout\" {\n\t\tt.Fatal(\"failed to logout\")\n\t}\n}\n\nfunc TestJoinCommands(t *testing.T) {\n\tSetup()\n\n\tteam := &model.Team{DisplayName: \"Name\", Name: \"z-z-\" + model.NewId() + \"a\", Email: \"test@nowhere.com\", Type: model.TEAM_OPEN}\n\tteam = Client.Must(Client.CreateTeam(team)).Data.(*model.Team)\n\n\tuser1 := &model.User{TeamId: team.Id, Email: model.NewId() + \"corey@test.com\", Nickname: \"Corey Hulen\", Password: \"pwd\"}\n\tuser1 = Client.Must(Client.CreateUser(user1, \"\")).Data.(*model.User)\n\tstore.Must(Srv.Store.User().VerifyEmail(user1.Id))\n\n\tClient.LoginByEmail(team.Name, user1.Email, \"pwd\")\n\n\tchannel1 := &model.Channel{DisplayName: \"AA\", Name: \"aa\" + model.NewId() + \"a\", Type: model.CHANNEL_OPEN, TeamId: team.Id}\n\tchannel1 = Client.Must(Client.CreateChannel(channel1)).Data.(*model.Channel)\n\tClient.Must(Client.LeaveChannel(channel1.Id))\n\n\tchannel2 := &model.Channel{DisplayName: \"BB\", Name: \"bb\" + model.NewId() + \"a\", Type: model.CHANNEL_OPEN, TeamId: team.Id}\n\tchannel2 = Client.Must(Client.CreateChannel(channel2)).Data.(*model.Channel)\n\tClient.Must(Client.LeaveChannel(channel2.Id))\n\n\tuser2 := &model.User{TeamId: team.Id, Email: model.NewId() + \"corey@test.com\", Nickname: \"Corey Hulen\", Password: \"pwd\"}\n\tuser2 = Client.Must(Client.CreateUser(user2, \"\")).Data.(*model.User)\n\tstore.Must(Srv.Store.User().VerifyEmail(user1.Id))\n\n\tdata := make(map[string]string)\n\tdata[\"user_id\"] = user2.Id\n\tchannel3 := Client.Must(Client.CreateDirectChannel(data)).Data.(*model.Channel)\n\n\trs1 := Client.Must(Client.Command(\"\", \"\/join aa\", true)).Data.(*model.Command)\n\tif rs1.Suggestions[0].Suggestion != \"\/join \"+channel1.Name {\n\t\tt.Fatal(\"should have join cmd\")\n\t}\n\n\trs2 := Client.Must(Client.Command(\"\", \"\/join bb\", true)).Data.(*model.Command)\n\tif rs2.Suggestions[0].Suggestion != \"\/join \"+channel2.Name {\n\t\tt.Fatal(\"should have join cmd\")\n\t}\n\n\trs3 := Client.Must(Client.Command(\"\", \"\/join\", true)).Data.(*model.Command)\n\tif len(rs3.Suggestions) != 2 {\n\t\tt.Fatal(\"should have 2 join cmd\")\n\t}\n\n\trs4 := Client.Must(Client.Command(\"\", \"\/join \", true)).Data.(*model.Command)\n\tif len(rs4.Suggestions) != 2 {\n\t\tt.Fatal(\"should have 2 join cmd\")\n\t}\n\n\trs5 := Client.Must(Client.Command(\"\", \"\/join \"+channel2.Name, false)).Data.(*model.Command)\n\tif !strings.HasSuffix(rs5.GotoLocation, \"\/\"+team.Name+\"\/channels\/\"+channel2.Name) {\n\t\tt.Fatal(\"failed to join channel\")\n\t}\n\n\trs6 := Client.Must(Client.Command(\"\", \"\/join \"+channel3.Name, false)).Data.(*model.Command)\n\tif strings.HasSuffix(rs6.GotoLocation, \"\/\"+team.Name+\"\/channels\/\"+channel3.Name) {\n\t\tt.Fatal(\"should not have joined direct message channel\")\n\t}\n\n\tc1 := Client.Must(Client.GetChannels(\"\")).Data.(*model.ChannelList)\n\n\tif len(c1.Channels) != 4 { \/\/ 4 because of town-square, off-topic and direct\n\t\tt.Fatal(\"didn't join channel\")\n\t}\n\n\tfound := false\n\tfor _, c := range c1.Channels {\n\t\tif c.Name == channel2.Name {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\tt.Fatal(\"didn't join channel\")\n\t}\n}\n\nfunc TestEchoCommand(t *testing.T) {\n\tSetup()\n\n\tteam := &model.Team{DisplayName: \"Name\", Name: \"z-z-\" + model.NewId() + \"a\", Email: \"test@nowhere.com\", Type: model.TEAM_OPEN}\n\tteam = Client.Must(Client.CreateTeam(team)).Data.(*model.Team)\n\n\tuser1 := &model.User{TeamId: team.Id, Email: model.NewId() + \"corey@test.com\", Nickname: \"Corey Hulen\", Password: \"pwd\"}\n\tuser1 = Client.Must(Client.CreateUser(user1, \"\")).Data.(*model.User)\n\tstore.Must(Srv.Store.User().VerifyEmail(user1.Id))\n\n\tClient.LoginByEmail(team.Name, user1.Email, \"pwd\")\n\n\tchannel1 := &model.Channel{DisplayName: \"AA\", Name: \"aa\" + model.NewId() + \"a\", Type: model.CHANNEL_OPEN, TeamId: team.Id}\n\tchannel1 = Client.Must(Client.CreateChannel(channel1)).Data.(*model.Channel)\n\n\techoTestString := \"\/echo test\"\n\n\tr1 := Client.Must(Client.Command(channel1.Id, echoTestString, false)).Data.(*model.Command)\n\tif r1.Response != model.RESP_EXECUTED {\n\t\tt.Fatal(\"Echo command failed to execute\")\n\t}\n\n\tp1 := Client.Must(Client.GetPosts(channel1.Id, 0, 2, \"\")).Data.(*model.PostList)\n\tif len(p1.Order) != 1 {\n\t\tt.Fatal(\"Echo command failed to send\")\n\t}\n}\n<commit_msg>Added slight delay to TestEchoCommand to account for race condition<commit_after>\/\/ Copyright (c) 2015 Spinpunch, Inc. All Rights Reserved.\n\/\/ See License.txt for license information.\n\npackage api\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/mattermost\/platform\/model\"\n\t\"github.com\/mattermost\/platform\/store\"\n)\n\nfunc TestSuggestRootCommands(t *testing.T) {\n\tSetup()\n\n\tteam := &model.Team{DisplayName: \"Name\", Name: \"z-z-\" + model.NewId() + \"a\", Email: \"test@nowhere.com\", Type: model.TEAM_OPEN}\n\tteam = Client.Must(Client.CreateTeam(team)).Data.(*model.Team)\n\n\tuser1 := &model.User{TeamId: team.Id, Email: model.NewId() + \"corey@test.com\", Nickname: \"Corey Hulen\", Password: \"pwd\"}\n\tuser1 = Client.Must(Client.CreateUser(user1, \"\")).Data.(*model.User)\n\tstore.Must(Srv.Store.User().VerifyEmail(user1.Id))\n\n\tClient.LoginByEmail(team.Name, user1.Email, \"pwd\")\n\n\tif _, err := Client.Command(\"\", \"\", true); err == nil {\n\t\tt.Fatal(\"Should fail\")\n\t}\n\n\trs1 := Client.Must(Client.Command(\"\", \"\/\", true)).Data.(*model.Command)\n\n\thasLogout := false\n\tfor _, v := range rs1.Suggestions {\n\t\tif v.Suggestion == \"\/logout\" {\n\t\t\thasLogout = true\n\t\t}\n\t}\n\n\tif !hasLogout {\n\t\tt.Log(rs1.Suggestions)\n\t\tt.Fatal(\"should have logout cmd\")\n\t}\n\n\trs2 := Client.Must(Client.Command(\"\", \"\/log\", true)).Data.(*model.Command)\n\n\tif rs2.Suggestions[0].Suggestion != \"\/logout\" {\n\t\tt.Fatal(\"should have logout cmd\")\n\t}\n\n\trs3 := Client.Must(Client.Command(\"\", \"\/joi\", true)).Data.(*model.Command)\n\n\tif rs3.Suggestions[0].Suggestion != \"\/join\" {\n\t\tt.Fatal(\"should have join cmd\")\n\t}\n\n\trs4 := Client.Must(Client.Command(\"\", \"\/ech\", true)).Data.(*model.Command)\n\n\tif rs4.Suggestions[0].Suggestion != \"\/echo\" {\n\t\tt.Fatal(\"should have echo cmd\")\n\t}\n}\n\nfunc TestLogoutCommands(t *testing.T) {\n\tSetup()\n\n\tteam := &model.Team{DisplayName: \"Name\", Name: \"z-z-\" + model.NewId() + \"a\", Email: \"test@nowhere.com\", Type: model.TEAM_OPEN}\n\tteam = Client.Must(Client.CreateTeam(team)).Data.(*model.Team)\n\n\tuser1 := &model.User{TeamId: team.Id, Email: model.NewId() + \"corey@test.com\", Nickname: \"Corey Hulen\", Password: \"pwd\"}\n\tuser1 = Client.Must(Client.CreateUser(user1, \"\")).Data.(*model.User)\n\tstore.Must(Srv.Store.User().VerifyEmail(user1.Id))\n\n\tClient.LoginByEmail(team.Name, user1.Email, \"pwd\")\n\n\trs1 := Client.Must(Client.Command(\"\", \"\/logout\", false)).Data.(*model.Command)\n\tif rs1.GotoLocation != \"\/logout\" {\n\t\tt.Fatal(\"failed to logout\")\n\t}\n}\n\nfunc TestJoinCommands(t *testing.T) {\n\tSetup()\n\n\tteam := &model.Team{DisplayName: \"Name\", Name: \"z-z-\" + model.NewId() + \"a\", Email: \"test@nowhere.com\", Type: model.TEAM_OPEN}\n\tteam = Client.Must(Client.CreateTeam(team)).Data.(*model.Team)\n\n\tuser1 := &model.User{TeamId: team.Id, Email: model.NewId() + \"corey@test.com\", Nickname: \"Corey Hulen\", Password: \"pwd\"}\n\tuser1 = Client.Must(Client.CreateUser(user1, \"\")).Data.(*model.User)\n\tstore.Must(Srv.Store.User().VerifyEmail(user1.Id))\n\n\tClient.LoginByEmail(team.Name, user1.Email, \"pwd\")\n\n\tchannel1 := &model.Channel{DisplayName: \"AA\", Name: \"aa\" + model.NewId() + \"a\", Type: model.CHANNEL_OPEN, TeamId: team.Id}\n\tchannel1 = Client.Must(Client.CreateChannel(channel1)).Data.(*model.Channel)\n\tClient.Must(Client.LeaveChannel(channel1.Id))\n\n\tchannel2 := &model.Channel{DisplayName: \"BB\", Name: \"bb\" + model.NewId() + \"a\", Type: model.CHANNEL_OPEN, TeamId: team.Id}\n\tchannel2 = Client.Must(Client.CreateChannel(channel2)).Data.(*model.Channel)\n\tClient.Must(Client.LeaveChannel(channel2.Id))\n\n\tuser2 := &model.User{TeamId: team.Id, Email: model.NewId() + \"corey@test.com\", Nickname: \"Corey Hulen\", Password: \"pwd\"}\n\tuser2 = Client.Must(Client.CreateUser(user2, \"\")).Data.(*model.User)\n\tstore.Must(Srv.Store.User().VerifyEmail(user1.Id))\n\n\tdata := make(map[string]string)\n\tdata[\"user_id\"] = user2.Id\n\tchannel3 := Client.Must(Client.CreateDirectChannel(data)).Data.(*model.Channel)\n\n\trs1 := Client.Must(Client.Command(\"\", \"\/join aa\", true)).Data.(*model.Command)\n\tif rs1.Suggestions[0].Suggestion != \"\/join \"+channel1.Name {\n\t\tt.Fatal(\"should have join cmd\")\n\t}\n\n\trs2 := Client.Must(Client.Command(\"\", \"\/join bb\", true)).Data.(*model.Command)\n\tif rs2.Suggestions[0].Suggestion != \"\/join \"+channel2.Name {\n\t\tt.Fatal(\"should have join cmd\")\n\t}\n\n\trs3 := Client.Must(Client.Command(\"\", \"\/join\", true)).Data.(*model.Command)\n\tif len(rs3.Suggestions) != 2 {\n\t\tt.Fatal(\"should have 2 join cmd\")\n\t}\n\n\trs4 := Client.Must(Client.Command(\"\", \"\/join \", true)).Data.(*model.Command)\n\tif len(rs4.Suggestions) != 2 {\n\t\tt.Fatal(\"should have 2 join cmd\")\n\t}\n\n\trs5 := Client.Must(Client.Command(\"\", \"\/join \"+channel2.Name, false)).Data.(*model.Command)\n\tif !strings.HasSuffix(rs5.GotoLocation, \"\/\"+team.Name+\"\/channels\/\"+channel2.Name) {\n\t\tt.Fatal(\"failed to join channel\")\n\t}\n\n\trs6 := Client.Must(Client.Command(\"\", \"\/join \"+channel3.Name, false)).Data.(*model.Command)\n\tif strings.HasSuffix(rs6.GotoLocation, \"\/\"+team.Name+\"\/channels\/\"+channel3.Name) {\n\t\tt.Fatal(\"should not have joined direct message channel\")\n\t}\n\n\tc1 := Client.Must(Client.GetChannels(\"\")).Data.(*model.ChannelList)\n\n\tif len(c1.Channels) != 4 { \/\/ 4 because of town-square, off-topic and direct\n\t\tt.Fatal(\"didn't join channel\")\n\t}\n\n\tfound := false\n\tfor _, c := range c1.Channels {\n\t\tif c.Name == channel2.Name {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\tt.Fatal(\"didn't join channel\")\n\t}\n}\n\nfunc TestEchoCommand(t *testing.T) {\n\tSetup()\n\n\tteam := &model.Team{DisplayName: \"Name\", Name: \"z-z-\" + model.NewId() + \"a\", Email: \"test@nowhere.com\", Type: model.TEAM_OPEN}\n\tteam = Client.Must(Client.CreateTeam(team)).Data.(*model.Team)\n\n\tuser1 := &model.User{TeamId: team.Id, Email: model.NewId() + \"corey@test.com\", Nickname: \"Corey Hulen\", Password: \"pwd\"}\n\tuser1 = Client.Must(Client.CreateUser(user1, \"\")).Data.(*model.User)\n\tstore.Must(Srv.Store.User().VerifyEmail(user1.Id))\n\n\tClient.LoginByEmail(team.Name, user1.Email, \"pwd\")\n\n\tchannel1 := &model.Channel{DisplayName: \"AA\", Name: \"aa\" + model.NewId() + \"a\", Type: model.CHANNEL_OPEN, TeamId: team.Id}\n\tchannel1 = Client.Must(Client.CreateChannel(channel1)).Data.(*model.Channel)\n\n\techoTestString := \"\/echo test\"\n\n\tr1 := Client.Must(Client.Command(channel1.Id, echoTestString, false)).Data.(*model.Command)\n\tif r1.Response != model.RESP_EXECUTED {\n\t\tt.Fatal(\"Echo command failed to execute\")\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tp1 := Client.Must(Client.GetPosts(channel1.Id, 0, 2, \"\")).Data.(*model.PostList)\n\tif len(p1.Order) != 1 {\n\t\tt.Fatal(\"Echo command failed to send\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ BaruwaAPI Golang bindings for Baruwa REST API\n\/\/ Copyright (C) 2019 Andrew Colin Kissa <andrew@topdog.za.net>\n\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\n\/\/ Package api Golang bindings for Baruwa REST API\npackage api\n\n\/\/ AliasDomain hold alias domain entries\ntype AliasDomain struct {\n\tID int `json:\"id\"`\n\tName string `json:\"name\"`\n}\n\n\/\/ DomainAlias holds domain aliases\ntype DomainAlias struct {\n\tID int `json:\"id,omitempty\"`\n\tAddress string `json:\"address\"`\n\tEnabled bool `json:\"enabled\"`\n\tDomain []AliasDomain `json:\"domain,omitempty\"`\n}\n\n\/\/ GetDomainAlias returns a domain alias\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#retrieve-domain-alias\nfunc (c *Client) GetDomainAlias(id int) (alias *DomainAlias, err error) {\n\treturn\n}\n\n\/\/ CreateDomainAlias creates a domain alias\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#create-a-domain-alias\nfunc (c *Client) CreateDomainAlias(domain *DomainAlias) (err error) {\n\treturn\n}\n\n\/\/ UpdateDomainAlias updates a domain alias\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#update-a-domain-alias\nfunc (c *Client) UpdateDomainAlias(domain *DomainAlias) (err error) {\n\treturn\n}\n\n\/\/ DeleteDomainAlias deletes an domain alias\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#delete-a-domain-alias\nfunc (c *Client) DeleteDomainAlias(id int) (err error) {\n\treturn\n}\n<commit_msg>FET: Domain Alias implementation<commit_after>\/\/ BaruwaAPI Golang bindings for Baruwa REST API\n\/\/ Copyright (C) 2019 Andrew Colin Kissa <andrew@topdog.za.net>\n\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\n\/\/ Package api Golang bindings for Baruwa REST API\npackage api\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\n\t\"github.com\/google\/go-querystring\/query\"\n)\n\n\/\/ AliasDomain hold alias domain entries\ntype AliasDomain struct {\n\tID int `json:\"id\"`\n\tName string `json:\"name\"`\n}\n\n\/\/ DomainAlias holds domain aliases\ntype DomainAlias struct {\n\tID int `json:\"id,omitempty\"`\n\tAddress string `json:\"address\"`\n\tEnabled bool `json:\"enabled\"`\n\tDomain []AliasDomain `json:\"domain,omitempty\"`\n}\n\n\/\/ GetDomainAlias returns a domain alias\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#retrieve-domain-alias\nfunc (c *Client) GetDomainAlias(domainid, aliasid int) (alias *DomainAlias, err error) {\n\tif domainid <= 0 {\n\t\terr = fmt.Errorf(\"The domainid param should be > 0\")\n\t\treturn\n\t}\n\n\tif aliasid <= 0 {\n\t\terr = fmt.Errorf(\"The aliasid param should be > 0\")\n\t\treturn\n\t}\n\n\terr = c.get(fmt.Sprintf(\"domainaliases\/%d\/%d\", domainid, aliasid), alias)\n\n\treturn\n}\n\n\/\/ CreateDomainAlias creates a domain alias\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#create-a-domain-alias\nfunc (c *Client) CreateDomainAlias(domainid int, alias *DomainAlias) (err error) {\n\tvar v url.Values\n\n\tif domainid <= 0 {\n\t\terr = fmt.Errorf(\"The domainid param should be > 0\")\n\t\treturn\n\t}\n\n\tif alias == nil {\n\t\terr = fmt.Errorf(\"The alias param cannot be nil\")\n\t\treturn\n\t}\n\n\tif v, err = query.Values(alias); err != nil {\n\t\treturn\n\t}\n\n\terr = c.post(fmt.Sprintf(\"domainaliases\/%d\", domainid), v, alias)\n\n\treturn\n}\n\n\/\/ UpdateDomainAlias updates a domain alias\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#update-a-domain-alias\nfunc (c *Client) UpdateDomainAlias(domainid int, alias *DomainAlias) (err error) {\n\tvar v url.Values\n\n\tif domainid <= 0 {\n\t\terr = fmt.Errorf(\"The domainid param should be > 0\")\n\t\treturn\n\t}\n\n\tif alias == nil {\n\t\terr = fmt.Errorf(\"The alias param cannot be nil\")\n\t\treturn\n\t}\n\n\tif alias.ID <= 0 {\n\t\terr = fmt.Errorf(\"The alias.ID param should be > 0\")\n\t\treturn\n\t}\n\n\tif v, err = query.Values(alias); err != nil {\n\t\treturn\n\t}\n\n\terr = c.put(fmt.Sprintf(\"domainaliases\/%d\/%d\", domainid, alias.ID), v, alias)\n\n\treturn\n}\n\n\/\/ DeleteDomainAlias deletes an domain alias\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#delete-a-domain-alias\nfunc (c *Client) DeleteDomainAlias(domainid, aliasid int) (err error) {\n\tif domainid <= 0 {\n\t\terr = fmt.Errorf(\"The domainid param should be > 0\")\n\t\treturn\n\t}\n\n\tif aliasid <= 0 {\n\t\terr = fmt.Errorf(\"The aliasid param should be > 0\")\n\t\treturn\n\t}\n\n\terr = c.delete(fmt.Sprintf(\"domainaliases\/%d\/%d\", domainid, aliasid))\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/skeswa\/gophr\/common\/depot\"\n\t\"github.com\/skeswa\/gophr\/common\/errors\"\n)\n\nconst (\n\tblobHandlerURLVarAuthor = \"author\"\n\tblobHandlerURLVarRepo = \"repo\"\n\tblobHandlerURLVarSHA = \"sha\"\n\tblobHandlerURLVarPath = \"path\"\n)\n\ntype blobRequestArgs struct {\n\tauthor string\n\trepo string\n\tsha string\n\tpath string\n}\n\n\/\/ BlobHandler creates an HTTP request handler that responds to filepath lookups.\nfunc BlobHandler() func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\targs, err := extractBlobRequestArgs(r)\n\t\tif err != nil {\n\t\t\terrors.RespondWithError(w, err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Request the filepath from depot gitweb.\n\t\thashedRepoName := depot.BuildHashedRepoName(args.author, args.repo, args.sha)\n\t\tdepotBlobURL := fmt.Sprintf(\"http:\/\/%s\/?p=%s;a=blob_plain;f=%s;hb=refs\/heads\/master\", depot.DepotInternalServiceAddress, hashedRepoName, args.path)\n\t\tdepotBlobResp, err := http.Get(depotBlobURL)\n\t\tif err != nil {\n\t\t\terrors.RespondWithError(w, err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ If path was not found return 404.\n\t\tif depotBlobResp.StatusCode == 404 {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tw.Write([]byte{})\n\t\t}\n\n\t\tbody, err := ioutil.ReadAll(depotBlobResp.Body)\n\t\tif err != nil {\n\t\t\terrors.RespondWithError(w, err)\n\t\t\treturn\n\t\t}\n\t\tdepotBlobResp.Body.Close()\n\n\t\tif len(body) > 0 {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\tw.Write([]byte(body))\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\tw.Write([]byte{})\n\t\t}\n\t}\n}\n\nfunc extractBlobRequestArgs(r *http.Request) (blobRequestArgs, error) {\n\tvars := mux.Vars(r)\n\targs := blobRequestArgs{}\n\n\targs.author = vars[blobHandlerURLVarAuthor]\n\tif len(args.author) < 0 {\n\t\treturn args, NewInvalidURLParameterError(blobHandlerURLVarAuthor, args.author)\n\t}\n\n\targs.repo = vars[blobHandlerURLVarRepo]\n\tif len(args.repo) < 0 {\n\t\treturn args, NewInvalidURLParameterError(blobHandlerURLVarRepo, args.repo)\n\t}\n\n\targs.sha = vars[blobHandlerURLVarSHA]\n\tif len(args.sha) < 0 {\n\t\treturn args, NewInvalidURLParameterError(blobHandlerURLVarSHA, args.sha)\n\t}\n\n\targs.path = vars[blobHandlerURLVarPath]\n\tif len(args.path) < 0 {\n\t\treturn args, NewInvalidURLParameterError(blobHandlerURLVarPath, args.path)\n\t}\n\n\treturn args, nil\n}\n<commit_msg>fix hander_blob to include .git<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/skeswa\/gophr\/common\/depot\"\n\t\"github.com\/skeswa\/gophr\/common\/errors\"\n)\n\nconst (\n\tblobHandlerURLVarAuthor = \"author\"\n\tblobHandlerURLVarRepo = \"repo\"\n\tblobHandlerURLVarSHA = \"sha\"\n\tblobHandlerURLVarPath = \"path\"\n)\n\ntype blobRequestArgs struct {\n\tauthor string\n\trepo string\n\tsha string\n\tpath string\n}\n\n\/\/ BlobHandler creates an HTTP request handler that responds to filepath lookups.\nfunc BlobHandler() func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\targs, err := extractBlobRequestArgs(r)\n\t\tif err != nil {\n\t\t\terrors.RespondWithError(w, err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Request the filepath from depot gitweb.\n\t\thashedRepoName := depot.BuildHashedRepoName(args.author, args.repo, args.sha)\n\t\tdepotBlobURL := fmt.Sprintf(\"http:\/\/%s\/?p=%s.git;a=blob_plain;f=%s;hb=refs\/heads\/master\", depot.DepotInternalServiceAddress, hashedRepoName, args.path)\n\t\tdepotBlobResp, err := http.Get(depotBlobURL)\n\t\tif err != nil {\n\t\t\terrors.RespondWithError(w, err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ If path was not found return 404.\n\t\tif depotBlobResp.StatusCode == 404 {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tw.Write([]byte{})\n\t\t}\n\n\t\tbody, err := ioutil.ReadAll(depotBlobResp.Body)\n\t\tif err != nil {\n\t\t\terrors.RespondWithError(w, err)\n\t\t\treturn\n\t\t}\n\t\tdepotBlobResp.Body.Close()\n\n\t\tif len(body) > 0 {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\tw.Write([]byte(body))\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\tw.Write([]byte{})\n\t\t}\n\t}\n}\n\nfunc extractBlobRequestArgs(r *http.Request) (blobRequestArgs, error) {\n\tvars := mux.Vars(r)\n\targs := blobRequestArgs{}\n\n\targs.author = vars[blobHandlerURLVarAuthor]\n\tif len(args.author) < 0 {\n\t\treturn args, NewInvalidURLParameterError(blobHandlerURLVarAuthor, args.author)\n\t}\n\n\targs.repo = vars[blobHandlerURLVarRepo]\n\tif len(args.repo) < 0 {\n\t\treturn args, NewInvalidURLParameterError(blobHandlerURLVarRepo, args.repo)\n\t}\n\n\targs.sha = vars[blobHandlerURLVarSHA]\n\tif len(args.sha) < 0 {\n\t\treturn args, NewInvalidURLParameterError(blobHandlerURLVarSHA, args.sha)\n\t}\n\n\targs.path = vars[blobHandlerURLVarPath]\n\tif len(args.path) < 0 {\n\t\treturn args, NewInvalidURLParameterError(blobHandlerURLVarPath, args.path)\n\t}\n\n\treturn args, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package gparselib\n\nimport (\n\t\"fmt\"\n\t\"math\"\n)\n\n\/\/ SubparserOp is a simple filter to the outside and gets the same data as the\n\/\/ parent parser.\ntype SubparserOp func(pd *ParseData, ctx interface{}) (*ParseData, interface{})\n\n\/\/ ParseMulti uses a subparser multiple times.\n\/\/ The minimum times the subparser has to match and the maximum times the\n\/\/ subparser can match have to be configured.\nfunc ParseMulti(\n\tpd *ParseData,\n\tctx interface{},\n\tpluginSubparser SubparserOp,\n\tpluginSemantics SemanticsOp,\n\tcfgMin int,\n\tcfgMax int,\n) (*ParseData, interface{}) {\n\torgPos := pd.Source.pos\n\trelPos := 0\n\tsubresults := make([]*ParseResult, 0, min(cfgMax, 128))\n\n\tfor i := 0; i < cfgMax && pd.Result == nil; i++ {\n\t\tpd, ctx = pluginSubparser(pd, ctx)\n\t\tif pd.Result.ErrPos < 0 {\n\t\t\tsubresults = append(subresults, pd.Result)\n\t\t\tpd.Result = nil\n\t\t}\n\t}\n\n\trelPos = pd.Source.pos - orgPos\n\tpd.Source.pos = orgPos\n\tif len(subresults) >= cfgMin {\n\t\tpd.Result = nil\n\t\tcreateMatchedResult(pd, relPos)\n\t\tsaveAllValuesFeedback(pd, subresults)\n\t} else {\n\t\tsubresult := pd.Result\n\t\tpd.Result = nil\n\t\tcreateUnmatchedResult(pd, relPos, fmt.Sprintf(\"At least %d matches expected but got only %d.\", cfgMin, len(subresults)), nil)\n\t\tpd.Result.Feedback = append(pd.Result.Feedback, subresult.Feedback...)\n\t}\n\tpd.SubResults = subresults\n\treturn handleSemantics(pluginSemantics, pd, ctx)\n}\n\n\/\/ ParseMulti0 uses its subparser at least one time without upper bound.\n\/\/ But the result is still positive even if the subparser didn't match a single\n\/\/ time.\nfunc ParseMulti0(\n\tpd *ParseData,\n\tctx interface{},\n\tpluginSubparser SubparserOp,\n\tpluginSemantics SemanticsOp,\n) (*ParseData, interface{}) {\n\treturn ParseMulti(pd, ctx, pluginSubparser, pluginSemantics, 0, math.MaxInt32)\n}\n\n\/\/ ParseMulti1 uses its subparser at least one time without upper bound.\n\/\/ The result is positive as long as the subparser matches at least one time.\nfunc ParseMulti1(\n\tpd *ParseData,\n\tctx interface{},\n\tpluginSubparser SubparserOp,\n\tpluginSemantics SemanticsOp,\n) (*ParseData, interface{}) {\n\treturn ParseMulti(pd, ctx, pluginSubparser, pluginSemantics, 1, math.MaxInt32)\n}\n\n\/\/ ParseOptional uses its subparser exaclty one time.\n\/\/ But the result is still positive even if the subparser didn't match.\nfunc ParseOptional(\n\tpd *ParseData,\n\tctx interface{},\n\tpluginSubparser SubparserOp,\n\tpluginSemantics SemanticsOp,\n) (*ParseData, interface{}) {\n\torgPos := pd.Source.pos\n\n\tpd, ctx = pluginSubparser(pd, ctx)\n\n\t\/\/ if error: reset to ignore\n\tif pd.Result.ErrPos >= 0 {\n\t\tpd.Result.ErrPos = -1\n\t\tpd.Result.Feedback = nil\n\t\tpd.Source.pos = orgPos\n\t}\n\n\treturn pd, ctx\n}\n\n\/\/ ParseAll calls multiple subparsers and all have to match for a successful result.\nfunc ParseAll(\n\tpd *ParseData,\n\tctx interface{},\n\tpluginSubparsers []SubparserOp,\n\tpluginSemantics SemanticsOp,\n) (*ParseData, interface{}) {\n\torgPos := pd.Source.pos\n\tsubresults := make([]*ParseResult, len(pluginSubparsers))\n\n\tfor i, subparser := range pluginSubparsers {\n\t\tpd, ctx = subparser(pd, ctx)\n\t\tif pd.Result.ErrPos >= 0 {\n\t\t\tpd.Source.pos = orgPos\n\t\t\tpd.Result.Pos = orgPos \/\/ make result 'our result'\n\t\t\treturn pd, ctx\n\t\t}\n\t\tsubresults[i] = pd.Result\n\t\tpd.Result = nil\n\t}\n\n\trelPos := pd.Source.pos - orgPos\n\tpd.Source.pos = orgPos\n\tcreateMatchedResult(pd, relPos)\n\tsaveAllValuesFeedback(pd, subresults)\n\tpd.SubResults = subresults\n\treturn handleSemantics(pluginSemantics, pd, ctx)\n}\n\n\/\/ ParseAny calls multiple subparsers until one matches.\n\/\/ The result is only unsuccessful if no subparser matches.\nfunc ParseAny(\n\tpd *ParseData,\n\tctx interface{},\n\tpluginSubparsers []SubparserOp,\n\tpluginSemantics SemanticsOp,\n) (*ParseData, interface{}) {\n\torgPos := pd.Source.pos\n\tallFeedback := make([]*FeedbackItem, 0, len(pluginSubparsers))\n\tlastPos := 0\n\n\tfor _, subparser := range pluginSubparsers {\n\t\tpd, ctx = subparser(pd, ctx)\n\t\tif pd.Result.ErrPos < 0 {\n\t\t\trelPos := pd.Source.pos - orgPos\n\t\t\tpd.Source.pos = orgPos\n\t\t\tcreateMatchedResult(pd, relPos)\n\t\t\treturn handleSemantics(pluginSemantics, pd, ctx)\n\t\t}\n\t\tlastPos = pd.Result.Pos\n\t\tpd.Source.pos = orgPos\n\t\tallFeedback = append(allFeedback, pd.Result.Feedback...)\n\t\tpd.Result = nil\n\t}\n\n\trelPos := lastPos - orgPos\n\tpd.Source.pos = orgPos\n\tpd.Result = nil\n\tcreateUnmatchedResult(pd, relPos, fmt.Sprintf(\"Any subparser should match. But all %d subparsers failed\", len(pluginSubparsers)), nil)\n\tpd.Result.Feedback = append(pd.Result.Feedback, allFeedback...)\n\treturn pd, ctx\n}\n\nfunc saveAllValuesFeedback(pd *ParseData, tmpSubresults []*ParseResult) {\n\ts := make([]interface{}, len(tmpSubresults))\n\tfor i, subres := range tmpSubresults {\n\t\ts[i] = subres.Value\n\t\tpd.Result.Feedback = append(pd.Result.Feedback, subres.Feedback...)\n\t}\n\tpd.Result.Value = s\n}\n<commit_msg>Fix result value bug with simplification<commit_after>package gparselib\n\nimport (\n\t\"fmt\"\n\t\"math\"\n)\n\n\/\/ SubparserOp is a simple filter to the outside and gets the same data as the\n\/\/ parent parser.\ntype SubparserOp func(pd *ParseData, ctx interface{}) (*ParseData, interface{})\n\n\/\/ ParseMulti uses a subparser multiple times.\n\/\/ The minimum times the subparser has to match and the maximum times the\n\/\/ subparser can match have to be configured.\nfunc ParseMulti(\n\tpd *ParseData,\n\tctx interface{},\n\tpluginSubparser SubparserOp,\n\tpluginSemantics SemanticsOp,\n\tcfgMin int,\n\tcfgMax int,\n) (*ParseData, interface{}) {\n\torgPos := pd.Source.pos\n\trelPos := 0\n\tsubresults := make([]*ParseResult, 0, min(cfgMax, 128))\n\n\tfor i := 0; i < cfgMax && pd.Result == nil; i++ {\n\t\tpd, ctx = pluginSubparser(pd, ctx)\n\t\tif pd.Result.ErrPos < 0 {\n\t\t\tsubresults = append(subresults, pd.Result)\n\t\t\tpd.Result = nil\n\t\t}\n\t}\n\n\trelPos = pd.Source.pos - orgPos\n\tpd.Source.pos = orgPos\n\tif len(subresults) >= cfgMin {\n\t\tpd.Result = nil\n\t\tcreateMatchedResult(pd, relPos)\n\t\tsaveAllValuesFeedback(pd, subresults)\n\t} else {\n\t\tsubresult := pd.Result\n\t\tpd.Result = nil\n\t\tcreateUnmatchedResult(pd, relPos, fmt.Sprintf(\"At least %d matches expected but got only %d.\", cfgMin, len(subresults)), nil)\n\t\tpd.Result.Feedback = append(pd.Result.Feedback, subresult.Feedback...)\n\t}\n\tpd.SubResults = subresults\n\treturn handleSemantics(pluginSemantics, pd, ctx)\n}\n\n\/\/ ParseMulti0 uses its subparser at least one time without upper bound.\n\/\/ But the result is still positive even if the subparser didn't match a single\n\/\/ time.\nfunc ParseMulti0(\n\tpd *ParseData,\n\tctx interface{},\n\tpluginSubparser SubparserOp,\n\tpluginSemantics SemanticsOp,\n) (*ParseData, interface{}) {\n\treturn ParseMulti(pd, ctx, pluginSubparser, pluginSemantics, 0, math.MaxInt32)\n}\n\n\/\/ ParseMulti1 uses its subparser at least one time without upper bound.\n\/\/ The result is positive as long as the subparser matches at least one time.\nfunc ParseMulti1(\n\tpd *ParseData,\n\tctx interface{},\n\tpluginSubparser SubparserOp,\n\tpluginSemantics SemanticsOp,\n) (*ParseData, interface{}) {\n\treturn ParseMulti(pd, ctx, pluginSubparser, pluginSemantics, 1, math.MaxInt32)\n}\n\n\/\/ ParseOptional uses its subparser exaclty one time.\n\/\/ But the result is still positive even if the subparser didn't match.\nfunc ParseOptional(\n\tpd *ParseData,\n\tctx interface{},\n\tpluginSubparser SubparserOp,\n\tpluginSemantics SemanticsOp,\n) (*ParseData, interface{}) {\n\torgPos := pd.Source.pos\n\n\tpd, ctx = pluginSubparser(pd, ctx)\n\n\t\/\/ if error: reset to ignore\n\tif pd.Result.ErrPos >= 0 {\n\t\tpd.Result.ErrPos = -1\n\t\tpd.Result.Feedback = nil\n\t\tpd.Source.pos = orgPos\n\t}\n\n\treturn pd, ctx\n}\n\n\/\/ ParseAll calls multiple subparsers and all have to match for a successful result.\nfunc ParseAll(\n\tpd *ParseData,\n\tctx interface{},\n\tpluginSubparsers []SubparserOp,\n\tpluginSemantics SemanticsOp,\n) (*ParseData, interface{}) {\n\torgPos := pd.Source.pos\n\tsubresults := make([]*ParseResult, len(pluginSubparsers))\n\n\tfor i, subparser := range pluginSubparsers {\n\t\tpd, ctx = subparser(pd, ctx)\n\t\tif pd.Result.ErrPos >= 0 {\n\t\t\tpd.Source.pos = orgPos\n\t\t\tpd.Result.Pos = orgPos \/\/ make result 'our result'\n\t\t\treturn pd, ctx\n\t\t}\n\t\tsubresults[i] = pd.Result\n\t\tpd.Result = nil\n\t}\n\n\trelPos := pd.Source.pos - orgPos\n\tpd.Source.pos = orgPos\n\tcreateMatchedResult(pd, relPos)\n\tsaveAllValuesFeedback(pd, subresults)\n\tpd.SubResults = subresults\n\treturn handleSemantics(pluginSemantics, pd, ctx)\n}\n\n\/\/ ParseAny calls multiple subparsers until one matches.\n\/\/ The result is only unsuccessful if no subparser matches.\nfunc ParseAny(\n\tpd *ParseData,\n\tctx interface{},\n\tpluginSubparsers []SubparserOp,\n\tpluginSemantics SemanticsOp,\n) (*ParseData, interface{}) {\n\torgPos := pd.Source.pos\n\tallFeedback := make([]*FeedbackItem, 0, len(pluginSubparsers))\n\tlastPos := 0\n\n\tfor _, subparser := range pluginSubparsers {\n\t\tpd, ctx = subparser(pd, ctx)\n\t\tif pd.Result.ErrPos < 0 {\n\t\t\treturn handleSemantics(pluginSemantics, pd, ctx)\n\t\t}\n\t\tlastPos = pd.Result.Pos\n\t\tpd.Source.pos = orgPos\n\t\tallFeedback = append(allFeedback, pd.Result.Feedback...)\n\t\tpd.Result = nil\n\t}\n\n\trelPos := lastPos - orgPos\n\tpd.Source.pos = orgPos\n\tpd.Result = nil\n\tcreateUnmatchedResult(pd, relPos, fmt.Sprintf(\"Any subparser should match. But all %d subparsers failed\", len(pluginSubparsers)), nil)\n\tpd.Result.Feedback = append(pd.Result.Feedback, allFeedback...)\n\treturn pd, ctx\n}\n\nfunc saveAllValuesFeedback(pd *ParseData, tmpSubresults []*ParseResult) {\n\ts := make([]interface{}, len(tmpSubresults))\n\tfor i, subres := range tmpSubresults {\n\t\ts[i] = subres.Value\n\t\tpd.Result.Feedback = append(pd.Result.Feedback, subres.Feedback...)\n\t}\n\tpd.Result.Value = s\n}\n<|endoftext|>"} {"text":"<commit_before>package framework\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/taskgraph\/taskgraph\/pkg\/etcdutil\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/metadata\"\n)\n\nvar (\n\tErrEpochMismatch = fmt.Errorf(\"server epoch mismatch\")\n)\n\nfunc (f *framework) CheckGRPCContext(ctx context.Context) error {\n\tmd, ok := metadata.FromContext(ctx)\n\tif !ok {\n\t\treturn fmt.Errorf(\"Can't get grpc.Metadata from context: %v\", ctx)\n\t}\n\tepoch, err := strconv.ParseUint(md[\"epoch\"], 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ send it to framework central select and check epoch.\n\tresChan := make(chan bool, 1)\n\tf.epochCheckChan <- &epochCheck{\n\t\tepoch: epoch,\n\t\tresChan: resChan,\n\t}\n\tok = <-resChan\n\tif ok {\n\t\treturn nil\n\t} else {\n\t\treturn ErrEpochMismatch\n\t}\n}\n\nfunc (f *framework) DataRequest(ctx context.Context, toID uint64, method string, input proto.Message) {\n\tepoch, ok := ctx.Value(epochKey).(uint64)\n\tif !ok {\n\t\tf.log.Fatalf(\"Can not find epochKey or cast is in DataRequest\")\n\t}\n\n\t\/\/ assumption here:\n\t\/\/ Event driven task will call this in a synchronous way so that\n\t\/\/ the epoch won't change at the time task sending this request.\n\t\/\/ Epoch may change, however, before the request is actually being sent.\n\tf.dataReqtoSendChan <- &dataRequest{\n\t\tctx: f.makeGRPCContext(ctx),\n\t\ttaskID: toID,\n\t\tepoch: epoch,\n\t\tinput: input,\n\t\tmethod: method,\n\t}\n}\n\n\/\/ encode metadata to context in grpc specific way\nfunc (f *framework) makeGRPCContext(ctx context.Context) context.Context {\n\tmd := metadata.MD{\n\t\t\"taskID\": strconv.FormatUint(f.taskID, 10),\n\t\t\"epoch\": strconv.FormatUint(f.epoch, 10),\n\t}\n\treturn metadata.NewContext(ctx, md)\n}\n\nfunc (f *framework) sendRequest(dr *dataRequest) {\n\taddr, err := etcdutil.GetAddress(f.etcdClient, f.name, dr.taskID)\n\tif err != nil {\n\t\tf.log.Printf(\"getAddress(%d) failed: %v\", dr.taskID, err)\n\t\tgo f.retrySendRequest(dr)\n\t\treturn\n\t}\n\t\/\/ TODO: save ClientConn creation steps.\n\tcc, err := grpc.Dial(addr, grpc.WithTimeout(heartbeatInterval))\n\t\/\/ we need to retry if some task failed and there is a temporary Get request failure.\n\tif err != nil {\n\t\tf.log.Printf(\"grpc.Dial from task %d (addr: %s) failed: %v\", dr.taskID, addr, err)\n\t\t\/\/ Should retry for other errors.\n\t\tgo f.retrySendRequest(dr)\n\t\treturn\n\t}\n\tdefer cc.Close()\n\tif dr.retry {\n\t\tf.log.Printf(\"retry data request %s to task %d, addr %s\", dr.method, dr.taskID, addr)\n\t} else {\n\t\tf.log.Printf(\"data request %s to task %d, addr %s\", dr.method, dr.taskID, addr)\n\t}\n\treply := f.task.CreateOutputMessage(dr.method)\n\terr = grpc.Invoke(dr.ctx, dr.method, dr.input, reply, cc)\n\tif err != nil {\n\t\tf.log.Printf(\"grpc.Invoke from task %d (addr: %s), method: %s, failed: %v\", dr.taskID, addr, dr.method, err)\n\t\tgo f.retrySendRequest(dr)\n\t\treturn\n\t}\n\n\tf.dataRespChan <- &dataResponse{\n\t\tepoch: dr.epoch,\n\t\ttaskID: dr.taskID,\n\t\tmethod: dr.method,\n\t\tinput: dr.input,\n\t\toutput: reply,\n\t}\n}\n\nfunc (f *framework) retrySendRequest(dr *dataRequest) {\n\t\/\/ we try again after the previous task key expires and hopefully another task\n\t\/\/ gets up and running.\n\ttime.Sleep(2 * heartbeatInterval)\n\tdr.retry = true\n\tf.dataReqtoSendChan <- dr\n}\n\n\/\/ Framework http server for data request.\n\/\/ Each request will be in the format: \"\/datareq?taskID=XXX&req=XXX\".\n\/\/ \"taskID\" indicates the requesting task. \"req\" is the meta data for this request.\n\/\/ On success, it should respond with requested data in http body.\nfunc (f *framework) startHTTP() {\n\tf.log.Printf(\"serving grpc on %s\\n\", f.ln.Addr())\n\tserver := f.task.CreateServer()\n\terr := server.Serve(f.ln)\n\tselect {\n\tcase <-f.httpStop:\n\t\tserver.Stop()\n\t\tf.log.Printf(\"grpc stops serving\")\n\tdefault:\n\t\tif err != nil {\n\t\t\tf.log.Fatalf(\"grpc.Serve returns error: %v\\n\", err)\n\t\t}\n\t}\n}\n\n\/\/ Close listener, stop HTTP server;\n\/\/ Write error message back to under-serving responses.\nfunc (f *framework) stopHTTP() {\n\tclose(f.httpStop)\n\tf.ln.Close()\n}\n\nfunc (f *framework) handleDataResp(ctx context.Context, resp *dataResponse) {\n\tf.task.DataReady(ctx, resp.taskID, resp.method, resp.output)\n}\n<commit_msg>grpc.WithTimeout comments<commit_after>package framework\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/taskgraph\/taskgraph\/pkg\/etcdutil\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/metadata\"\n)\n\nvar (\n\tErrEpochMismatch = fmt.Errorf(\"server epoch mismatch\")\n)\n\nfunc (f *framework) CheckGRPCContext(ctx context.Context) error {\n\tmd, ok := metadata.FromContext(ctx)\n\tif !ok {\n\t\treturn fmt.Errorf(\"Can't get grpc.Metadata from context: %v\", ctx)\n\t}\n\tepoch, err := strconv.ParseUint(md[\"epoch\"], 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ send it to framework central select and check epoch.\n\tresChan := make(chan bool, 1)\n\tf.epochCheckChan <- &epochCheck{\n\t\tepoch: epoch,\n\t\tresChan: resChan,\n\t}\n\tok = <-resChan\n\tif ok {\n\t\treturn nil\n\t} else {\n\t\treturn ErrEpochMismatch\n\t}\n}\n\nfunc (f *framework) DataRequest(ctx context.Context, toID uint64, method string, input proto.Message) {\n\tepoch, ok := ctx.Value(epochKey).(uint64)\n\tif !ok {\n\t\tf.log.Fatalf(\"Can not find epochKey or cast is in DataRequest\")\n\t}\n\n\t\/\/ assumption here:\n\t\/\/ Event driven task will call this in a synchronous way so that\n\t\/\/ the epoch won't change at the time task sending this request.\n\t\/\/ Epoch may change, however, before the request is actually being sent.\n\tf.dataReqtoSendChan <- &dataRequest{\n\t\tctx: f.makeGRPCContext(ctx),\n\t\ttaskID: toID,\n\t\tepoch: epoch,\n\t\tinput: input,\n\t\tmethod: method,\n\t}\n}\n\n\/\/ encode metadata to context in grpc specific way\nfunc (f *framework) makeGRPCContext(ctx context.Context) context.Context {\n\tmd := metadata.MD{\n\t\t\"taskID\": strconv.FormatUint(f.taskID, 10),\n\t\t\"epoch\": strconv.FormatUint(f.epoch, 10),\n\t}\n\treturn metadata.NewContext(ctx, md)\n}\n\nfunc (f *framework) sendRequest(dr *dataRequest) {\n\taddr, err := etcdutil.GetAddress(f.etcdClient, f.name, dr.taskID)\n\tif err != nil {\n\t\tf.log.Printf(\"getAddress(%d) failed: %v\", dr.taskID, err)\n\t\tgo f.retrySendRequest(dr)\n\t\treturn\n\t}\n\t\/\/ TODO: save ClientConn creation steps.\n\t\/\/ The grpc.WithTimeout would help detect any disconnection in failfast.\n\t\/\/ Otherwise grpc.Invoke will keep retrying.\n\tcc, err := grpc.Dial(addr, grpc.WithTimeout(heartbeatInterval))\n\t\/\/ we need to retry if some task failed and there is a temporary Get request failure.\n\tif err != nil {\n\t\tf.log.Printf(\"grpc.Dial from task %d (addr: %s) failed: %v\", dr.taskID, addr, err)\n\t\t\/\/ Should retry for other errors.\n\t\tgo f.retrySendRequest(dr)\n\t\treturn\n\t}\n\tdefer cc.Close()\n\tif dr.retry {\n\t\tf.log.Printf(\"retry data request %s to task %d, addr %s\", dr.method, dr.taskID, addr)\n\t} else {\n\t\tf.log.Printf(\"data request %s to task %d, addr %s\", dr.method, dr.taskID, addr)\n\t}\n\treply := f.task.CreateOutputMessage(dr.method)\n\terr = grpc.Invoke(dr.ctx, dr.method, dr.input, reply, cc)\n\tif err != nil {\n\t\tf.log.Printf(\"grpc.Invoke from task %d (addr: %s), method: %s, failed: %v\", dr.taskID, addr, dr.method, err)\n\t\tgo f.retrySendRequest(dr)\n\t\treturn\n\t}\n\n\tf.dataRespChan <- &dataResponse{\n\t\tepoch: dr.epoch,\n\t\ttaskID: dr.taskID,\n\t\tmethod: dr.method,\n\t\tinput: dr.input,\n\t\toutput: reply,\n\t}\n}\n\nfunc (f *framework) retrySendRequest(dr *dataRequest) {\n\t\/\/ we try again after the previous task key expires and hopefully another task\n\t\/\/ gets up and running.\n\ttime.Sleep(2 * heartbeatInterval)\n\tdr.retry = true\n\tf.dataReqtoSendChan <- dr\n}\n\n\/\/ Framework http server for data request.\n\/\/ Each request will be in the format: \"\/datareq?taskID=XXX&req=XXX\".\n\/\/ \"taskID\" indicates the requesting task. \"req\" is the meta data for this request.\n\/\/ On success, it should respond with requested data in http body.\nfunc (f *framework) startHTTP() {\n\tf.log.Printf(\"serving grpc on %s\\n\", f.ln.Addr())\n\tserver := f.task.CreateServer()\n\terr := server.Serve(f.ln)\n\tselect {\n\tcase <-f.httpStop:\n\t\tserver.Stop()\n\t\tf.log.Printf(\"grpc stops serving\")\n\tdefault:\n\t\tif err != nil {\n\t\t\tf.log.Fatalf(\"grpc.Serve returns error: %v\\n\", err)\n\t\t}\n\t}\n}\n\n\/\/ Close listener, stop HTTP server;\n\/\/ Write error message back to under-serving responses.\nfunc (f *framework) stopHTTP() {\n\tclose(f.httpStop)\n\tf.ln.Close()\n}\n\nfunc (f *framework) handleDataResp(ctx context.Context, resp *dataResponse) {\n\tf.task.DataReady(ctx, resp.taskID, resp.method, resp.output)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package ns provides name server methods for selected name server(s)\npackage ns\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/mehrdadrad\/mylg\/cli\"\n\t\"github.com\/mehrdadrad\/mylg\/data\"\n\t\"github.com\/miekg\/dns\"\n)\n\n\/\/ A Host represents a name server host\ntype Host struct {\n\tip string\n\talpha2 string\n\tcountry string\n\tcity string\n}\n\n\/\/ A Request represents a name server request\ntype Request struct {\n\tcountry string\n\thost string\n\thosts []Host\n}\n\n\/\/ NewRequest creates a new dns request object\nfunc NewRequest() *Request {\n\treturn &Request{host: \"\"}\n}\n\n\/\/ Init configure dns command and fetch name servers\nfunc (d *Request) Init() {\n\tif !d.cache(\"validate\") {\n\t\td.hosts = fetchNSHosts()\n\t\td.cache(\"write\")\n\t} else {\n\t\td.cache(\"read\")\n\t}\n}\n\n\/\/ SetCountryList init the connect contry items\nfunc (d *Request) SetCountryList(c *cli.Readline) {\n\tvar countries []string\n\tfor _, host := range d.hosts {\n\t\tcountries = append(countries, host.country)\n\t}\n\tcountries = uniqStrSlice(countries)\n\tsort.Strings(countries)\n\tc.UpdateCompleter(\"connect\", countries)\n}\n\n\/\/ SetArgs set requested country\nfunc (d *Request) SetArgs(args string) bool {\n\td.country = args\n\treturn true\n}\n\n\/\/ Look up name server\nfunc (d *Request) dnsLookup() {\n\t\/\/var list []DNSHost\n\n\tc := new(dns.Client)\n\tm := new(dns.Msg)\n\n\tm.SetQuestion(dns.Fqdn(\"yahoo.com\"), dns.TypeA)\n\tm.RecursionDesired = true\n\tr, _, err := c.Exchange(m, \"8.8.8.8:53\")\n\tif err != nil {\n\t\tprintln(err.Error())\n\t}\n\tfor _, a := range r.Answer {\n\t\tfmt.Printf(\"%#v\\n\", a)\n\t}\n}\n\n\/\/ cache provides caching for name servers\nfunc (d *Request) cache(r string) bool {\n\tswitch r {\n\tcase \"read\":\n\t\tb, err := ioutil.ReadFile(\"\/tmp\/mylg.ns\")\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\td.hosts = d.hosts[:0]\n\t\tr := bytes.NewBuffer(b)\n\t\ts := bufio.NewScanner(r)\n\t\tfor s.Scan() {\n\t\t\tcsv := strings.Split(s.Text(), \";\")\n\t\t\tif len(csv) != 4 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\td.hosts = append(d.hosts, Host{alpha2: csv[0], country: csv[1], city: csv[2], ip: csv[3]})\n\t\t}\n\tcase \"write\":\n\t\tvar data []string\n\t\tfor _, h := range d.hosts {\n\t\t\tdata = append(data, fmt.Sprintf(\"%s;%s;%s;%s\", h.alpha2, h.country, h.city, h.ip))\n\t\t}\n\t\terr := ioutil.WriteFile(\"\/tmp\/mylg.ns\", []byte(strings.Join(data, \"\\n\")), 0644)\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\tprintln(\"write done!\")\n\tcase \"validate\":\n\t\tf, err := os.Stat(\"\/tmp\/mylg.ns\")\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\td := time.Since(f.ModTime())\n\t\tif d.Hours() > 48 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Fetch name servers from public-dns.info\nfunc fetchNSHosts() []Host {\n\tvar (\n\t\thosts []Host\n\t\tcounter = make(map[string]int)\n\t)\n\tresp, err := http.Get(\"http:\/\/public-dns.info\/nameservers.csv\")\n\tif err != nil {\n\t\tprintln(err.Error())\n\t}\n\tdefer resp.Body.Close()\n\tscanner := bufio.NewScanner(resp.Body)\n\tfor scanner.Scan() {\n\t\tcsv := strings.Split(scanner.Text(), \",\")\n\t\tif csv[3] != \"\\\"\\\"\" {\n\t\t\tif name, ok := data.Country[csv[2]]; ok && counter[csv[2]] < 5 {\n\t\t\t\thosts = append(hosts, Host{ip: csv[0], alpha2: csv[2], country: name, city: csv[3]})\n\t\t\t\tcounter[csv[2]]++\n\t\t\t}\n\t\t}\n\t}\n\treturn hosts\n}\n\n\/\/ uniqStrSlice return unique slice\nfunc uniqStrSlice(src []string) []string {\n\tvar rst []string\n\ttmp := make(map[string]struct{})\n\tfor _, s := range src {\n\t\ttmp[s] = struct{}{}\n\t}\n\tfor s := range tmp {\n\t\trst = append(rst, s)\n\t}\n\treturn rst\n}\n<commit_msg>added set node list<commit_after>\/\/ Package ns provides name server methods for selected name server(s)\npackage ns\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/mehrdadrad\/mylg\/cli\"\n\t\"github.com\/mehrdadrad\/mylg\/data\"\n\t\"github.com\/miekg\/dns\"\n)\n\n\/\/ A Host represents a name server host\ntype Host struct {\n\tip string\n\talpha2 string\n\tcountry string\n\tcity string\n}\n\n\/\/ A Request represents a name server request\ntype Request struct {\n\tcountry string\n\thost string\n\thosts []Host\n}\n\n\/\/ NewRequest creates a new dns request object\nfunc NewRequest() *Request {\n\treturn &Request{host: \"\"}\n}\n\n\/\/ Init configure dns command and fetch name servers\nfunc (d *Request) Init() {\n\tif !d.cache(\"validate\") {\n\t\td.hosts = fetchNSHosts()\n\t\td.cache(\"write\")\n\t} else {\n\t\td.cache(\"read\")\n\t}\n}\n\n\/\/ SetCountryList init the connect contry items\nfunc (d *Request) SetCountryList(c *cli.Readline) {\n\tvar countries []string\n\tfor _, host := range d.hosts {\n\t\tcountries = append(countries, host.country)\n\t}\n\tcountries = uniqStrSlice(countries)\n\tsort.Strings(countries)\n\tc.UpdateCompleter(\"connect\", countries)\n}\n\n\/\/ SetNodeList init the node city items\nfunc (d *Request) SetNodeList(c *cli.Readline) {\n\tvar node []string\n\tfor _, host := range d.hosts {\n\t\tif host.country == d.country {\n\t\t\tnode = append(node, host.city)\n\t\t}\n\t}\n\tsort.Strings(node)\n\tc.UpdateCompleter(\"node\", node)\n}\n\n\/\/ ValidateCountry set requested country\nfunc (d *Request) ValidateCountry(args string) bool {\n\td.country = args\n\treturn true\n}\n\n\/\/ Look up name server\nfunc (d *Request) dnsLookup() {\n\t\/\/var list []DNSHost\n\n\tc := new(dns.Client)\n\tm := new(dns.Msg)\n\n\tm.SetQuestion(dns.Fqdn(\"yahoo.com\"), dns.TypeA)\n\tm.RecursionDesired = true\n\tr, _, err := c.Exchange(m, \"8.8.8.8:53\")\n\tif err != nil {\n\t\tprintln(err.Error())\n\t}\n\tfor _, a := range r.Answer {\n\t\tfmt.Printf(\"%#v\\n\", a)\n\t}\n}\n\n\/\/ cache provides caching for name servers\nfunc (d *Request) cache(r string) bool {\n\tswitch r {\n\tcase \"read\":\n\t\tb, err := ioutil.ReadFile(\"\/tmp\/mylg.ns\")\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\td.hosts = d.hosts[:0]\n\t\tr := bytes.NewBuffer(b)\n\t\ts := bufio.NewScanner(r)\n\t\tfor s.Scan() {\n\t\t\tcsv := strings.Split(s.Text(), \";\")\n\t\t\tif len(csv) != 4 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\td.hosts = append(d.hosts, Host{alpha2: csv[0], country: csv[1], city: csv[2], ip: csv[3]})\n\t\t}\n\tcase \"write\":\n\t\tvar data []string\n\t\tfor _, h := range d.hosts {\n\t\t\tdata = append(data, fmt.Sprintf(\"%s;%s;%s;%s\", h.alpha2, h.country, h.city, h.ip))\n\t\t}\n\t\terr := ioutil.WriteFile(\"\/tmp\/mylg.ns\", []byte(strings.Join(data, \"\\n\")), 0644)\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\tprintln(\"write done!\")\n\tcase \"validate\":\n\t\tf, err := os.Stat(\"\/tmp\/mylg.ns\")\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\td := time.Since(f.ModTime())\n\t\tif d.Hours() > 48 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Fetch name servers from public-dns.info\nfunc fetchNSHosts() []Host {\n\tvar (\n\t\thosts []Host\n\t\tcounter = make(map[string]int)\n\t)\n\tresp, err := http.Get(\"http:\/\/public-dns.info\/nameservers.csv\")\n\tif err != nil {\n\t\tprintln(err.Error())\n\t}\n\tdefer resp.Body.Close()\n\tscanner := bufio.NewScanner(resp.Body)\n\tfor scanner.Scan() {\n\t\tcsv := strings.Split(scanner.Text(), \",\")\n\t\tif csv[3] != \"\\\"\\\"\" {\n\t\t\tif name, ok := data.Country[csv[2]]; ok && counter[csv[2]] < 5 {\n\t\t\t\thosts = append(hosts, Host{ip: csv[0], alpha2: csv[2], country: name, city: csv[3]})\n\t\t\t\tcounter[csv[2]]++\n\t\t\t}\n\t\t}\n\t}\n\treturn hosts\n}\n\n\/\/ uniqStrSlice return unique slice\nfunc uniqStrSlice(src []string) []string {\n\tvar rst []string\n\ttmp := make(map[string]struct{})\n\tfor _, s := range src {\n\t\ttmp[s] = struct{}{}\n\t}\n\tfor s := range tmp {\n\t\trst = append(rst, s)\n\t}\n\treturn rst\n}\n<|endoftext|>"} {"text":"<commit_before>package dns\n\nimport (\n\t\"crypto\/sha1\"\n\t\"hash\"\n\t\"io\"\n\t\"strings\"\n)\n\nconst (\n\t_ = iota\n\tNSEC3_NXDOMAIN\n\tNSEC3_NODATA\n)\n\ntype saltWireFmt struct {\n\tSalt string \"size-hex\"\n}\n\n\/\/ HashName hashes a string (label) according to RFC5155. It returns the hashed string.\nfunc HashName(label string, ha uint8, iter uint16, salt string) string {\n\tsaltwire := new(saltWireFmt)\n\tsaltwire.Salt = salt\n\twire := make([]byte, DefaultMsgSize)\n\tn, ok := packStruct(saltwire, wire, 0)\n\tif !ok {\n\t\treturn \"\"\n\t}\n\twire = wire[:n]\n\tname := make([]byte, 255)\n\toff, ok1 := PackDomainName(strings.ToLower(label), name, 0, nil, false)\n\tif !ok1 {\n\t\treturn \"\"\n\t}\n\tname = name[:off]\n\tvar s hash.Hash\n\tswitch ha {\n\tcase SHA1:\n\t\ts = sha1.New()\n\tdefault:\n\t\treturn \"\"\n\t}\n\n\t\/\/ k = 0\n\tname = append(name, wire...)\n\tio.WriteString(s, string(name))\n\tnsec3 := s.Sum(nil)\n\t\/\/ k > 0\n\tfor k := uint16(0); k < iter; k++ {\n\t\ts.Reset()\n\t\tnsec3 = append(nsec3, wire...)\n\t\tio.WriteString(s, string(nsec3))\n\t\tnsec3 = s.Sum(nil)\n\t}\n\treturn unpackBase32(nsec3)\n}\n\n\/\/ HashNames hashes the ownername and the next owner name in an NSEC3 record according to RFC 5155.\n\/\/ It uses the paramaters as set in the NSEC3 record. The string zone is appended to the hashed\n\/\/ ownername.\nfunc (nsec3 *RR_NSEC3) HashNames(zone string) {\n\tnsec3.Header().Name = strings.ToLower(HashName(nsec3.Header().Name, nsec3.Hash, nsec3.Iterations, nsec3.Salt)) + \".\" + zone\n\tnsec3.NextDomain = HashName(nsec3.NextDomain, nsec3.Hash, nsec3.Iterations, nsec3.Salt)\n}\n\n\/\/ Match checks if domain matches the first (hashed) owner name of the NSEC3 record, domain must be given\n\/\/ in plain text.\nfunc (nsec3 *RR_NSEC3) Match(domain string) bool {\n\treturn strings.ToUpper(SplitLabels(nsec3.Header().Name)[0]) == strings.ToUpper(HashName(domain, nsec3.Hash, nsec3.Iterations, nsec3.Salt))\n}\n\n\/\/ Cover checks if domain is covered by the NSEC3 record, domain must be given in plain text.\nfunc (nsec3 *RR_NSEC3) Cover(domain string) bool {\n\thashdom := strings.ToUpper(HashName(domain, nsec3.Hash, nsec3.Iterations, nsec3.Salt))\n\tnextdom := strings.ToUpper(nsec3.NextDomain)\n\towner := strings.ToUpper(SplitLabels(nsec3.Header().Name)[0]) \/\/ The hashed part\n\tapex := strings.ToUpper(HashName(strings.Join(SplitLabels(nsec3.Header().Name)[1:], \".\"), nsec3.Hash, nsec3.Iterations, nsec3.Salt)) + \".\" \/\/ The name of the zone\n\t\/\/ if nextdomain equals the apex, it is considered The End. So in that case hashdom is always less then nextdomain\n\tif hashdom > owner && nextdom == apex {\n\t\treturn true\n\t}\n\n\tif hashdom > owner && hashdom <= nextdom {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/ NsecVerify verifies an denial of existence response with NSECs\n\/\/ NsecVerify returns nil when the NSECs in the message contain\n\/\/ the correct proof. This function does not validates the NSECs.\nfunc (m *Msg) NsecVerify(q Question) error {\n\n\treturn nil\n}\n\n\/\/ Nsec3Verify verifies an denial of existence response with NSEC3s.\n\/\/ This function does not validate the NSEC3s.\nfunc (m *Msg) Nsec3Verify(q Question) (int, error) {\n\tvar (\n\t\tnsec3 []*RR_NSEC3\n\t\tncdenied = false \/\/ next closer denied\n\t\tsodenied = false \/\/ source of synthesis denied\n\t\tce = \"\" \/\/ closest encloser\n\t\tnc = \"\" \/\/ next closer\n\t\tso = \"\" \/\/ source of synthesis\n\t)\n\tif len(m.Answer) > 0 && len(m.Ns) > 0 {\n\t\t\/\/ Wildcard expansion\n\t\t\/\/ Closest encloser inferred from SIG in authority and qname\n\t\t\/\/ println(\"EXPANDED WILDCARD PROOF or DNAME CNAME\")\n\t\t\/\/ println(\"NODATA\")\n\t\t\/\/ I need to check the type bitmap\n\t\t\/\/ wildcard bit not set?\n\t\t\/\/ MM: No need to check the wildcard bit here:\n\t\t\/\/ This response has only 1 NSEC4 and it does not match\n\t\t\/\/ the closest encloser (it covers next closer).\n\t}\n\tif len(m.Answer) == 0 && len(m.Ns) > 0 {\n\t\t\/\/ Maybe an NXDOMAIN or NODATA, we only know when we check\n\t\tfor _, n := range m.Ns {\n\t\t\tif n.Header().Rrtype == TypeNSEC3 {\n\t\t\t\tnsec3 = append(nsec3, n.(*RR_NSEC3))\n\t\t\t}\n\t\t}\n\t\tif len(nsec3) == 0 {\n\t\t\treturn 0, ErrDenialNsec3\n\t\t}\n\n\t\tlastchopped := \"\"\n\t\tlabels := SplitLabels(q.Name)\n\n\t\t\/\/ Find the closest encloser and create the next closer\n\t\tfor _, nsec := range nsec3 {\n\t\t\tcandidate := \"\"\n\t\t\tfor i := len(labels) - 1; i >= 0; i-- {\n\t\t\t\tcandidate = labels[i] + \".\" + candidate\n\t\t\t\tif nsec.Match(candidate) {\n\t\t\t\t\tce = candidate\n\t\t\t\t}\n\t\t\t\tlastchopped = labels[i]\n\t\t\t}\n\t\t}\n\t\tif ce == \"\" { \/\/ what about root label?\n\t\t\treturn 0, ErrDenialCe\n\t\t}\n\t\tnc = lastchopped + \".\" + ce\n\t\tso = \"*.\" + ce\n\t\t\/\/ Check if the next closer is covered and thus denied\n\t\tfor _, nsec := range nsec3 {\n\t\t\tif nsec.Cover(nc) {\n\t\t\t\tncdenied = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !ncdenied {\n\t\t\tif m.MsgHdr.Rcode == RcodeNameError {\n\t\t\t\t\/\/ For NXDOMAIN this is a problem\n\t\t\t\treturn 0, ErrDenialNc \/\/ add next closer name here\n\t\t\t}\n\t\t\tgoto NoData\n\t\t}\n\n\t\t\/\/ Check if the source of synthesis is covered and thus also denied\n\t\tfor _, nsec := range nsec3 {\n\t\t\tif nsec.Cover(so) {\n\t\t\t\tsodenied = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !sodenied {\n\t\t\treturn 0, ErrDenialSo\n\t\t}\n\t\t\/\/ The message headers claims something different!\n\t\tif m.MsgHdr.Rcode != RcodeNameError {\n\t\t\treturn 0, ErrDenialHdr\n\t\t}\n\n\t\treturn NSEC3_NXDOMAIN, nil\n\t}\n\treturn 0, nil\nNoData:\n\t\/\/ For NODATA we need to to check if the matching nsec3 has to correct type bit map\n\t\/\/ And we need to check that the wildcard does NOT exist\n\tfor _, nsec := range nsec3 {\n\t\tif nsec.Cover(so) {\n\t\t\tsodenied = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif sodenied {\n\t\t\/\/ Whoa, the closest encloser is denied, but there does exist\n\t\t\/\/ a wildcard a that level. That's not good\n\t\treturn 0, ErrDenialWc\n\t}\n\n\t\/\/ The closest encloser MUST be the query name\n\tfor _, nsec := range nsec3 {\n\t\tif nsec.Match(nc) {\n\t\t\t\/\/ This nsec3 must NOT have the type bitmap set of the qtype. If it does have it, return an error\n\t\t\tfor _, t := range nsec.TypeBitMap {\n\t\t\t\tif t == q.Qtype {\n\t\t\t\t\treturn 0, ErrDenialBit\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif m.MsgHdr.Rcode == RcodeNameError {\n\t\treturn 0, ErrDenialHdr\n\t}\n\treturn NSEC3_NODATA, nil\n}\n<commit_msg>prototype nsec cover method<commit_after>package dns\n\nimport (\n\t\"crypto\/sha1\"\n\t\"hash\"\n\t\"io\"\n\t\"strings\"\n)\n\nconst (\n\t_ = iota\n\tNSEC3_NXDOMAIN\n\tNSEC3_NODATA\n)\n\ntype saltWireFmt struct {\n\tSalt string \"size-hex\"\n}\n\n\/\/ HashName hashes a string (label) according to RFC5155. It returns the hashed string.\nfunc HashName(label string, ha uint8, iter uint16, salt string) string {\n\tsaltwire := new(saltWireFmt)\n\tsaltwire.Salt = salt\n\twire := make([]byte, DefaultMsgSize)\n\tn, ok := packStruct(saltwire, wire, 0)\n\tif !ok {\n\t\treturn \"\"\n\t}\n\twire = wire[:n]\n\tname := make([]byte, 255)\n\toff, ok1 := PackDomainName(strings.ToLower(label), name, 0, nil, false)\n\tif !ok1 {\n\t\treturn \"\"\n\t}\n\tname = name[:off]\n\tvar s hash.Hash\n\tswitch ha {\n\tcase SHA1:\n\t\ts = sha1.New()\n\tdefault:\n\t\treturn \"\"\n\t}\n\n\t\/\/ k = 0\n\tname = append(name, wire...)\n\tio.WriteString(s, string(name))\n\tnsec3 := s.Sum(nil)\n\t\/\/ k > 0\n\tfor k := uint16(0); k < iter; k++ {\n\t\ts.Reset()\n\t\tnsec3 = append(nsec3, wire...)\n\t\tio.WriteString(s, string(nsec3))\n\t\tnsec3 = s.Sum(nil)\n\t}\n\treturn unpackBase32(nsec3)\n}\n\n\/\/ HashNames hashes the ownername and the next owner name in an NSEC3 record according to RFC 5155.\n\/\/ It uses the paramaters as set in the NSEC3 record. The string zone is appended to the hashed\n\/\/ ownername.\nfunc (nsec3 *RR_NSEC3) HashNames(zone string) {\n\tnsec3.Header().Name = strings.ToLower(HashName(nsec3.Header().Name, nsec3.Hash, nsec3.Iterations, nsec3.Salt)) + \".\" + zone\n\tnsec3.NextDomain = HashName(nsec3.NextDomain, nsec3.Hash, nsec3.Iterations, nsec3.Salt)\n}\n\n\/\/ Match checks if domain matches the first (hashed) owner name of the NSEC3 record. Domain must be given\n\/\/ in plain text.\nfunc (nsec3 *RR_NSEC3) Match(domain string) bool {\n\treturn strings.ToUpper(SplitLabels(nsec3.Header().Name)[0]) == strings.ToUpper(HashName(domain, nsec3.Hash, nsec3.Iterations, nsec3.Salt))\n}\n\n\/\/ Cover checks if domain is covered by the NSEC3 record. Domain must be given in plain text.\nfunc (nsec3 *RR_NSEC3) Cover(domain string) bool {\n\thashdom := strings.ToUpper(HashName(domain, nsec3.Hash, nsec3.Iterations, nsec3.Salt))\n\tnextdom := strings.ToUpper(nsec3.NextDomain)\n\towner := strings.ToUpper(SplitLabels(nsec3.Header().Name)[0]) \/\/ The hashed part\n\tapex := strings.ToUpper(HashName(strings.Join(SplitLabels(nsec3.Header().Name)[1:], \".\"), nsec3.Hash, nsec3.Iterations, nsec3.Salt)) + \".\" \/\/ The name of the zone\n\t\/\/ if nextdomain equals the apex, it is considered The End. So in that case hashdom is always less then nextdomain\n\tif hashdom > owner && nextdom == apex {\n\t\treturn true\n\t}\n\n\tif hashdom > owner && hashdom <= nextdom {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/ Cover checks if domain is covered by the NSEC record. Domain must be given in plain text.\nfunc (nsec *RR_NSEC) Cover(domain string) bool {\n\treturn false\n}\n\n\/\/ NsecVerify verifies an denial of existence response with NSECs\n\/\/ NsecVerify returns nil when the NSECs in the message contain\n\/\/ the correct proof. This function does not validates the NSECs.\nfunc (m *Msg) NsecVerify(q Question) error {\n\n\treturn nil\n}\n\n\/\/ Nsec3Verify verifies an denial of existence response with NSEC3s.\n\/\/ This function does not validate the NSEC3s.\nfunc (m *Msg) Nsec3Verify(q Question) (int, error) {\n\tvar (\n\t\tnsec3 []*RR_NSEC3\n\t\tncdenied = false \/\/ next closer denied\n\t\tsodenied = false \/\/ source of synthesis denied\n\t\tce = \"\" \/\/ closest encloser\n\t\tnc = \"\" \/\/ next closer\n\t\tso = \"\" \/\/ source of synthesis\n\t)\n\tif len(m.Answer) > 0 && len(m.Ns) > 0 {\n\t\t\/\/ Wildcard expansion\n\t\t\/\/ Closest encloser inferred from SIG in authority and qname\n\t\t\/\/ println(\"EXPANDED WILDCARD PROOF or DNAME CNAME\")\n\t\t\/\/ println(\"NODATA\")\n\t\t\/\/ I need to check the type bitmap\n\t\t\/\/ wildcard bit not set?\n\t\t\/\/ MM: No need to check the wildcard bit here:\n\t\t\/\/ This response has only 1 NSEC4 and it does not match\n\t\t\/\/ the closest encloser (it covers next closer).\n\t}\n\tif len(m.Answer) == 0 && len(m.Ns) > 0 {\n\t\t\/\/ Maybe an NXDOMAIN or NODATA, we only know when we check\n\t\tfor _, n := range m.Ns {\n\t\t\tif n.Header().Rrtype == TypeNSEC3 {\n\t\t\t\tnsec3 = append(nsec3, n.(*RR_NSEC3))\n\t\t\t}\n\t\t}\n\t\tif len(nsec3) == 0 {\n\t\t\treturn 0, ErrDenialNsec3\n\t\t}\n\n\t\tlastchopped := \"\"\n\t\tlabels := SplitLabels(q.Name)\n\n\t\t\/\/ Find the closest encloser and create the next closer\n\t\tfor _, nsec := range nsec3 {\n\t\t\tcandidate := \"\"\n\t\t\tfor i := len(labels) - 1; i >= 0; i-- {\n\t\t\t\tcandidate = labels[i] + \".\" + candidate\n\t\t\t\tif nsec.Match(candidate) {\n\t\t\t\t\tce = candidate\n\t\t\t\t}\n\t\t\t\tlastchopped = labels[i]\n\t\t\t}\n\t\t}\n\t\tif ce == \"\" { \/\/ what about root label?\n\t\t\treturn 0, ErrDenialCe\n\t\t}\n\t\tnc = lastchopped + \".\" + ce\n\t\tso = \"*.\" + ce\n\t\t\/\/ Check if the next closer is covered and thus denied\n\t\tfor _, nsec := range nsec3 {\n\t\t\tif nsec.Cover(nc) {\n\t\t\t\tncdenied = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !ncdenied {\n\t\t\tif m.MsgHdr.Rcode == RcodeNameError {\n\t\t\t\t\/\/ For NXDOMAIN this is a problem\n\t\t\t\treturn 0, ErrDenialNc \/\/ add next closer name here\n\t\t\t}\n\t\t\tgoto NoData\n\t\t}\n\n\t\t\/\/ Check if the source of synthesis is covered and thus also denied\n\t\tfor _, nsec := range nsec3 {\n\t\t\tif nsec.Cover(so) {\n\t\t\t\tsodenied = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !sodenied {\n\t\t\treturn 0, ErrDenialSo\n\t\t}\n\t\t\/\/ The message headers claims something different!\n\t\tif m.MsgHdr.Rcode != RcodeNameError {\n\t\t\treturn 0, ErrDenialHdr\n\t\t}\n\n\t\treturn NSEC3_NXDOMAIN, nil\n\t}\n\treturn 0, nil\nNoData:\n\t\/\/ For NODATA we need to to check if the matching nsec3 has to correct type bit map\n\t\/\/ And we need to check that the wildcard does NOT exist\n\tfor _, nsec := range nsec3 {\n\t\tif nsec.Cover(so) {\n\t\t\tsodenied = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif sodenied {\n\t\t\/\/ Whoa, the closest encloser is denied, but there does exist\n\t\t\/\/ a wildcard a that level. That's not good\n\t\treturn 0, ErrDenialWc\n\t}\n\n\t\/\/ The closest encloser MUST be the query name\n\tfor _, nsec := range nsec3 {\n\t\tif nsec.Match(nc) {\n\t\t\t\/\/ This nsec3 must NOT have the type bitmap set of the qtype. If it does have it, return an error\n\t\t\tfor _, t := range nsec.TypeBitMap {\n\t\t\t\tif t == q.Qtype {\n\t\t\t\t\treturn 0, ErrDenialBit\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif m.MsgHdr.Rcode == RcodeNameError {\n\t\treturn 0, ErrDenialHdr\n\t}\n\treturn NSEC3_NODATA, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package multiverse\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n)\n\nfunc (r Rarity) Ok(c *Card) (bool, error) {\n\tfor _, printing := range c.Printings {\n\t\tif printing.Rarity == r {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}\n\nfunc (m ManaColor) Ok(c *Card) (bool, error) {\n\tif m == ManaColors.Colorless {\n\t\treturn c.Colors == 0, nil\n\t}\n\treturn c.Colors&m != 0, nil\n}\n\ntype Filter interface {\n\tOk(*Card) (bool, error)\n}\n\nfunc (m Multiverse) Search(f Filter) ([]*Card, error) {\n\tc := m.Cards\n\tcores := runtime.GOMAXPROCS(-1)\n\tsectionLen := c.Len() \/ cores\n\n\tcardChan := make(chan *Card, cores*16)\n\tdoneChan := make(chan bool)\n\terrChan := make(chan error)\n\tlist := make([]*Card, 0, 1)\n\n\tfor i := 0; i < cores; i++ {\n\t\tstart := sectionLen * i\n\t\tend := start + sectionLen\n\n\t\tif i == cores-1 {\n\t\t\tend = c.Len()\n\t\t}\n\n\t\tgo func(start, end int) {\n\t\t\tfor j := range c[start:end] {\n\t\t\t\tok, err := f.Ok(&c[j+start])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif ok {\n\t\t\t\t\tcardChan <- &c[j+start]\n\t\t\t\t}\n\t\t\t}\n\t\t\tdoneChan <- true\n\t\t}(start, end)\n\t}\n\n\tfor cores > 0 {\n\t\tselect {\n\t\tcase <-doneChan:\n\t\t\tcores--\n\t\tcase c := <-cardChan:\n\t\t\tappendNonDuplicateToCardList(&list, c)\n\t\tcase err := <-errChan:\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tfor len(cardChan) > 0 {\n\t\tc := <-cardChan\n\t\tappendNonDuplicateToCardList(&list, c)\n\n\t}\n\n\tshortList := make([]*Card, len(list))\n\n\tfor i, card := range list {\n\t\tshortList[i] = card\n\t}\n\n\treturn shortList, nil\n}\n\nfunc appendToCardList(list *[]*Card, c *Card) {\n\tfor _, card := range *list {\n\t\tif card == c {\n\t\t\treturn\n\t\t}\n\t}\n\tappendNonDuplicateToCardList(list, c)\n}\n\nfunc appendNonDuplicateToCardList(list *[]*Card, c *Card) {\n\tl := len(*list)\n\tif l < cap(*list) {\n\t\t*list = (*list)[:l+1]\n\t} else {\n\t\tnewList := make([]*Card, l+1, l*2)\n\t\tcopy(newList, *list)\n\t\t*list = newList\n\t}\n\t(*list)[l] = c\n}\n\ntype Not struct {\n\tFilter\n}\n\nfunc (n Not) Ok(c *Card) (bool, error) {\n\tok, err := n.Filter.Ok(c)\n\treturn !ok, err\n}\n\ntype And []Filter\n\nfunc (a And) Ok(c *Card) (bool, error) {\n\tfor _, f := range a {\n\t\tok, err := f.Ok(c)\n\t\tif !ok || err != nil {\n\t\t\treturn ok, err\n\t\t}\n\t}\n\treturn true, nil\n}\n\ntype Or []Filter\n\nfunc (o Or) Ok(c *Card) (bool, error) {\n\tfor _, f := range o {\n\t\tok, err := f.Ok(c)\n\t\tif ok || err != nil {\n\t\t\treturn ok, err\n\t\t}\n\t}\n\treturn false, nil\n}\n\ntype Cond map[string]interface{}\n\nfunc (c Cond) Ok(card *Card) (bool, error) {\n\tfor key, val := range c {\n\t\tswitch key {\n\t\tcase \"color\", \"colors\":\n\t\t\tsame, err := handleColorSearch(card.Colors, val)\n\t\t\tif err != nil || !same {\n\t\t\t\treturn same, err\n\t\t\t}\n\t\t}\n\t}\n\treturn true, nil\n}\n\nfunc handleColorSearch(cardColor ManaColor, val interface{}) (bool, error) {\n\tswitch val := val.(type) {\n\tcase ManaColor:\n\t\tif cardColor&val != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t}\n\n\treturn false, fmt.Errorf(\"unexpected color type %T\", val)\n}\n<commit_msg>Made sure we allow the search goroutines to exit.<commit_after>package multiverse\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n)\n\nfunc (r Rarity) Ok(c *Card) (bool, error) {\n\tfor _, printing := range c.Printings {\n\t\tif printing.Rarity == r {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}\n\nfunc (m ManaColor) Ok(c *Card) (bool, error) {\n\tif m == ManaColors.Colorless {\n\t\treturn c.Colors == 0, nil\n\t}\n\treturn c.Colors&m != 0, nil\n}\n\ntype Filter interface {\n\tOk(*Card) (bool, error)\n}\n\nfunc (m Multiverse) Search(f Filter) ([]*Card, error) {\n\tc := m.Cards\n\tcores := runtime.GOMAXPROCS(-1)\n\tsectionLen := c.Len() \/ cores\n\n\tcardChan := make(chan *Card, 16*cores)\n\tdoneChan := make(chan bool)\n\terrChan := make(chan error)\n\tlist := make([]*Card, 0, 1)\n\n\tfor i := 0; i < cores; i++ {\n\t\tstart := sectionLen * i\n\t\tend := start + sectionLen\n\n\t\tif i == cores-1 {\n\t\t\tend = c.Len()\n\t\t}\n\n\t\tgo func(start, end int) {\n\t\t\tfor j := range c[start:end] {\n\t\t\t\tok, err := f.Ok(&c[j+start])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif ok {\n\t\t\t\t\tcardChan <- &c[j+start]\n\t\t\t\t}\n\t\t\t}\n\t\t\tdoneChan <- true\n\t\t}(start, end)\n\t}\n\n\tfinishChans := func() {\n\t\tfor cores > 0 {\n\t\t\tselect {\n\t\t\tcase <-cardChan:\n\t\t\tcase <-errChan:\n\t\t\t\tcores--\n\t\t\tcase <-doneChan:\n\t\t\t\tcores--\n\t\t\t}\n\t\t}\n\n\t\tclose(cardChan)\n\t\tclose(errChan)\n\t\tclose(doneChan)\n\t}\n\n\tfor cores > 0 {\n\t\tselect {\n\t\tcase <-doneChan:\n\t\t\tcores--\n\t\tcase c := <-cardChan:\n\t\t\tappendNonDuplicateToCardList(&list, c)\n\t\tcase err := <-errChan:\n\t\t\tcores--\n\t\t\tgo finishChans()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tclose(cardChan)\n\tclose(errChan)\n\tclose(doneChan)\n\n\tfor c := range cardChan {\n\t\tappendNonDuplicateToCardList(&list, c)\n\n\t}\n\n\tshortList := make([]*Card, len(list))\n\n\tcopy(shortList, list)\n\n\treturn shortList, nil\n}\n\nfunc appendToCardList(list *[]*Card, c *Card) {\n\tfor _, card := range *list {\n\t\tif card == c {\n\t\t\treturn\n\t\t}\n\t}\n\tappendNonDuplicateToCardList(list, c)\n}\n\nfunc appendNonDuplicateToCardList(list *[]*Card, c *Card) {\n\tl := len(*list)\n\tif l < cap(*list) {\n\t\t*list = (*list)[:l+1]\n\t} else {\n\t\tnewList := make([]*Card, l+1, l*2)\n\t\tcopy(newList, *list)\n\t\t*list = newList\n\t}\n\t(*list)[l] = c\n}\n\ntype Not struct {\n\tFilter\n}\n\nfunc (n Not) Ok(c *Card) (bool, error) {\n\tok, err := n.Filter.Ok(c)\n\treturn !ok, err\n}\n\ntype And []Filter\n\nfunc (a And) Ok(c *Card) (bool, error) {\n\tfor _, f := range a {\n\t\tok, err := f.Ok(c)\n\t\tif !ok || err != nil {\n\t\t\treturn ok, err\n\t\t}\n\t}\n\treturn true, nil\n}\n\ntype Or []Filter\n\nfunc (o Or) Ok(c *Card) (bool, error) {\n\tfor _, f := range o {\n\t\tok, err := f.Ok(c)\n\t\tif ok || err != nil {\n\t\t\treturn ok, err\n\t\t}\n\t}\n\treturn false, nil\n}\n\ntype Cond map[string]interface{}\n\nfunc (c Cond) Ok(card *Card) (bool, error) {\n\tfor key, val := range c {\n\t\tswitch key {\n\t\tcase \"color\", \"colors\":\n\t\t\tsame, err := handleColorSearch(card.Colors, val)\n\t\t\tif err != nil || !same {\n\t\t\t\treturn same, err\n\t\t\t}\n\t\t}\n\t}\n\treturn true, nil\n}\n\nfunc handleColorSearch(cardColor ManaColor, val interface{}) (bool, error) {\n\tswitch val := val.(type) {\n\tcase ManaColor:\n\t\tif cardColor&val != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t}\n\n\treturn false, fmt.Errorf(\"unexpected color type %T\", val)\n}\n<|endoftext|>"} {"text":"<commit_before>package mysql\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"h12.me\/csv\"\n)\n\ntype Cmd struct {\n\tValue interface{}\n\tTagKey string\n\tExpandPath []string\n\tDB string\n\tEngine string\n\tTable string\n}\n\nfunc (cmd Cmd) CreateDB() string {\n\treturn fmt.Sprintf(\"CREATE DATABASE IF NOT EXISTS %s DEFAULT CHARACTER SET utf8;\\n\", cmd.DB)\n}\n\nfunc (cmd Cmd) CreateTable() (string, error) {\n\tw := new(bytes.Buffer)\n\tfmt.Fprintf(w, \"CREATE TABLE IF NOT EXISTS %s (\\n\", cmd.Table)\n\tfields, err := cmd.Fields()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvar pks []string\n\tsort.Sort(fields)\n\tfor _, field := range fields {\n\t\tif field.Tag.Get(\"PK\") == \"yes\" {\n\t\t\tpks = append(pks, field.Name)\n\t\t\tfmt.Fprintf(w, \"\\t%s %s,\\n\", field.Name, field.Tag.Get(\"TYPE\"))\n\t\t}\n\t}\n\tfor _, field := range fields {\n\t\tif field.Tag.Get(\"PK\") != \"yes\" {\n\t\t\tfmt.Fprintf(w, \"\\t%s %s,\\n\", field.Name, field.Tag.Get(\"TYPE\"))\n\t\t}\n\t}\n\tfmt.Fprintf(w, \"\\tPRIMARY KEY (%s)\\n\", strings.Join(pks, \",\"))\n\tfmt.Fprintf(w, \") ENGINE=%s DEFAULT CHARSET=UTF8;\\n\", cmd.Engine)\n\treturn w.String(), nil\n}\n\nfunc (cmd Cmd) LoadData() (string, error) {\n\tfields, err := cmd.Fields()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn fmt.Sprintf(\"LOAD DATA LOCAL INFILE 'Reader::%%[1]s' REPLACE INTO TABLE %%[1]s (%s);\\n\", strings.Join(fields.Names(), \", \")), nil\n}\n\nfunc (cmd Cmd) Fields() (csv.Fields, error) {\n\tenc := csv.NewEncoder(ioutil.Discard).SetTagKey(cmd.TagKey).SetExpandPath(cmd.ExpandPath...)\n\tif err := enc.Encode(cmd.Value); err != nil {\n\t\treturn nil, err\n\t}\n\treturn enc.Fields(), nil\n}\n<commit_msg>add full table name<commit_after>package mysql\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"h12.me\/csv\"\n)\n\ntype Cmd struct {\n\tValue interface{}\n\tTagKey string\n\tExpandPath []string\n\tDB string\n\tEngine string\n\tTable string\n}\n\nfunc (cmd Cmd) CreateDB() string {\n\treturn fmt.Sprintf(\"CREATE DATABASE IF NOT EXISTS %s DEFAULT CHARACTER SET utf8;\\n\", cmd.DB)\n}\n\nfunc (cmd Cmd) CreateTable() (string, error) {\n\tw := new(bytes.Buffer)\n\tfmt.Fprintf(w, \"CREATE TABLE IF NOT EXISTS %s (\\n\", cmd.Table)\n\tfields, err := cmd.Fields()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvar pks []string\n\tsort.Sort(fields)\n\tfor _, field := range fields {\n\t\tif field.Tag.Get(\"PK\") == \"yes\" {\n\t\t\tpks = append(pks, field.Name)\n\t\t\tfmt.Fprintf(w, \"\\t%s %s,\\n\", field.Name, field.Tag.Get(\"TYPE\"))\n\t\t}\n\t}\n\tfor _, field := range fields {\n\t\tif field.Tag.Get(\"PK\") != \"yes\" {\n\t\t\tfmt.Fprintf(w, \"\\t%s %s,\\n\", field.Name, field.Tag.Get(\"TYPE\"))\n\t\t}\n\t}\n\tfmt.Fprintf(w, \"\\tPRIMARY KEY (%s)\\n\", strings.Join(pks, \",\"))\n\tfmt.Fprintf(w, \") ENGINE=%s DEFAULT CHARSET=UTF8;\\n\", cmd.Engine)\n\treturn w.String(), nil\n}\n\nfunc (cmd Cmd) LoadData() (string, error) {\n\tfields, err := cmd.Fields()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn fmt.Sprintf(\"LOAD DATA LOCAL INFILE 'Reader::%%[1]s' REPLACE INTO TABLE %%[1]s (%s);\\n\", strings.Join(fields.Names(), \", \")), nil\n}\n\nfunc (cmd Cmd) Fields() (csv.Fields, error) {\n\tenc := csv.NewEncoder(ioutil.Discard).SetTagKey(cmd.TagKey).SetExpandPath(cmd.ExpandPath...)\n\tif err := enc.Encode(cmd.Value); err != nil {\n\t\treturn nil, err\n\t}\n\treturn enc.Fields(), nil\n}\n\nfunc (cmd Cmd) FullTableName() string {\n\treturn cmd.DB + \".\" + cmd.Table\n}\n<|endoftext|>"} {"text":"<commit_before>package gonameparts\n\nimport (\n\t\"strings\"\n)\n\ntype NameParts struct {\n\tProvidedName string `json:\"provided_name\"`\n\tFullName string `json:\"full_name\"`\n\tSalutation string `json:\"salutation\"`\n\tFirstName string `json:\"first_name\"`\n\tMiddleName string `json:\"middle_name\"`\n\tLastName string `json:\"last_name\"`\n\tSuffix string `json:\"suffix\"`\n\tAliases []string `json:\"aliases\"`\n}\n\ntype nameString struct {\n\tFullName string\n}\n\nfunc (n *nameString) cleaned() []string {\n\tunwanted := []string{\",\", \".\"}\n\tcleaned := []string{}\n\tfor _, x := range n.split() {\n\t\tfor _, y := range unwanted {\n\t\t\tx = strings.Replace(x, y, \"\", -1)\n\t\t}\n\t\tcleaned = append(cleaned, strings.Trim(x))\n\t}\n\treturn cleaned\n}\n\nfunc (n *nameString) searchParts(parts *[]string) int {\n\tfor i, x := range n.cleaned() {\n\t\tfor _, y := range *parts {\n\t\t\tif strings.ToUpper(x) == strings.ToUpper(y) {\n\t\t\t\treturn i\n\t\t\t}\n\t\t}\n\t}\n\treturn -1\n}\n\nfunc (n *nameString) looksCorporate() bool {\n\treturn n.searchParts(&corpEntity) > -1\n}\n\nfunc (n *nameString) find(part string) int {\n\tswitch part {\n\tcase \"salutation\":\n\t\treturn n.searchParts(&salutations)\n\tcase \"generation\":\n\t\treturn n.searchParts(&generations)\n\tcase \"suffix\":\n\t\treturn n.searchParts(&suffixes)\n\tcase \"lnprefix\":\n\t\treturn n.searchParts(&lnPrefixes)\n\tcase \"nonname\":\n\t\treturn n.searchParts(&nonName)\n\tcase \"supplemental\":\n\t\treturn n.searchParts(&supplementalInfo)\n\tdefault:\n\n\t}\n\treturn -1\n}\n\nfunc (n *nameString) split() []string {\n\treturn strings.Fields(n.FullName)\n}\n\nfunc Parse(name string) NameParts {\n\tn := nameString{FullName: name}\n\tp := NameParts{ProvidedName: name}\n\n\tpartMap = make(map[string]int)\n\tparts = []string{\"salutation\", \"generation\", \"suffix\", \"lnprefix\", \"nonname\", \"supplemental\"}\n\n\tfor _, part := range parts {\n\t\tpartMap[part] = n.find(part)\n\t}\n\n\treturn p\n}\n<commit_msg>Fix undefined<commit_after>package gonameparts\n\nimport (\n\t\"strings\"\n)\n\ntype NameParts struct {\n\tProvidedName string `json:\"provided_name\"`\n\tFullName string `json:\"full_name\"`\n\tSalutation string `json:\"salutation\"`\n\tFirstName string `json:\"first_name\"`\n\tMiddleName string `json:\"middle_name\"`\n\tLastName string `json:\"last_name\"`\n\tSuffix string `json:\"suffix\"`\n\tAliases []string `json:\"aliases\"`\n}\n\ntype nameString struct {\n\tFullName string\n}\n\nfunc (n *nameString) cleaned() []string {\n\tunwanted := []string{\",\", \".\"}\n\tcleaned := []string{}\n\tfor _, x := range n.split() {\n\t\tfor _, y := range unwanted {\n\t\t\tx = strings.Replace(x, y, \"\", -1)\n\t\t}\n\t\tcleaned = append(cleaned, strings.Trim(x, \" \"))\n\t}\n\treturn cleaned\n}\n\nfunc (n *nameString) searchParts(parts *[]string) int {\n\tfor i, x := range n.cleaned() {\n\t\tfor _, y := range *parts {\n\t\t\tif strings.ToUpper(x) == strings.ToUpper(y) {\n\t\t\t\treturn i\n\t\t\t}\n\t\t}\n\t}\n\treturn -1\n}\n\nfunc (n *nameString) looksCorporate() bool {\n\treturn n.searchParts(&corpEntity) > -1\n}\n\nfunc (n *nameString) find(part string) int {\n\tswitch part {\n\tcase \"salutation\":\n\t\treturn n.searchParts(&salutations)\n\tcase \"generation\":\n\t\treturn n.searchParts(&generations)\n\tcase \"suffix\":\n\t\treturn n.searchParts(&suffixes)\n\tcase \"lnprefix\":\n\t\treturn n.searchParts(&lnPrefixes)\n\tcase \"nonname\":\n\t\treturn n.searchParts(&nonName)\n\tcase \"supplemental\":\n\t\treturn n.searchParts(&supplementalInfo)\n\tdefault:\n\n\t}\n\treturn -1\n}\n\nfunc (n *nameString) split() []string {\n\treturn strings.Fields(n.FullName)\n}\n\nfunc Parse(name string) NameParts {\n\tn := nameString{FullName: name}\n\tp := NameParts{ProvidedName: name}\n\n\tpartMap := make(map[string]int)\n\tparts := []string{\"salutation\", \"generation\", \"suffix\", \"lnprefix\", \"nonname\", \"supplemental\"}\n\n\tfor _, part := range parts {\n\t\tpartMap[part] = n.find(part)\n\t}\n\n\treturn p\n}\n<|endoftext|>"} {"text":"<commit_before>package nats\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Package scoped specific tests here..\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/nats-io\/gnatsd\/server\"\n\tgnatsd \"github.com\/nats-io\/gnatsd\/test\"\n)\n\n\/\/ Dumb wait program to sync on callbacks, etc... Will timeout\nfunc Wait(ch chan bool) error {\n\treturn WaitTime(ch, 5*time.Second)\n}\n\nfunc WaitTime(ch chan bool, timeout time.Duration) error {\n\tselect {\n\tcase <-ch:\n\t\treturn nil\n\tcase <-time.After(timeout):\n\t}\n\treturn errors.New(\"timeout\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Reconnect tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nvar reconnectOpts = Options{\n\tUrl: \"nats:\/\/localhost:22222\",\n\tAllowReconnect: true,\n\tMaxReconnect: 10,\n\tReconnectWait: 100 * time.Millisecond,\n\tTimeout: DefaultTimeout,\n}\n\nfunc RunServerOnPort(port int) *server.Server {\n\topts := gnatsd.DefaultTestOptions\n\topts.Port = port\n\treturn RunServerWithOptions(opts)\n}\n\nfunc RunServerWithOptions(opts server.Options) *server.Server {\n\treturn gnatsd.RunServer(&opts)\n}\n\nfunc TestReconnectServerStats(t *testing.T) {\n\tts := RunServerOnPort(22222)\n\n\topts := reconnectOpts\n\tnc, _ := opts.Connect()\n\tdefer nc.Close()\n\tnc.Flush()\n\n\tts.Shutdown()\n\t\/\/ server is stopped here...\n\n\tts = RunServerOnPort(22222)\n\tdefer ts.Shutdown()\n\n\tif err := nc.FlushTimeout(5 * time.Second); err != nil {\n\t\tt.Fatalf(\"Error on Flush: %v\", err)\n\t}\n\n\t\/\/ Make sure the server who is reconnected has the reconnects stats reset.\n\t_, cur := nc.currentServer()\n\tif cur.reconnects != 0 {\n\t\tt.Fatalf(\"Current Server's reconnects should be 0 vs %d\\n\", cur.reconnects)\n\t}\n}\n\nfunc TestParseStateReconnectFunctionality(t *testing.T) {\n\tts := RunServerOnPort(22222)\n\tch := make(chan bool)\n\n\topts := reconnectOpts\n\tdch := make(chan bool)\n\topts.DisconnectedCB = func(_ *Conn) {\n\t\tdch <- true\n\t}\n\n\tnc, errc := opts.Connect()\n\tif errc != nil {\n\t\tt.Fatalf(\"Failed to create a connection: %v\\n\", errc)\n\t}\n\tec, errec := NewEncodedConn(nc, DEFAULT_ENCODER)\n\tif errec != nil {\n\t\tnc.Close()\n\t\tt.Fatalf(\"Failed to create an encoded connection: %v\\n\", errec)\n\t}\n\tdefer ec.Close()\n\n\ttestString := \"bar\"\n\tec.Subscribe(\"foo\", func(s string) {\n\t\tif s != testString {\n\t\t\tt.Fatal(\"String doesn't match\")\n\t\t}\n\t\tch <- true\n\t})\n\tec.Flush()\n\n\t\/\/ Got a RACE condition with Travis build. The locking below does not\n\t\/\/ really help because the parser running in the readLoop accesses\n\t\/\/ nc.ps without the connection lock. Sleeping may help better since\n\t\/\/ it would make the memory write in parse.go (when processing the\n\t\/\/ pong) further away from the modification below.\n\ttime.Sleep(1 * time.Second)\n\n\t\/\/ Simulate partialState, this needs to be cleared\n\tnc.mu.Lock()\n\tnc.ps.state = OP_PON\n\tnc.mu.Unlock()\n\n\tts.Shutdown()\n\t\/\/ server is stopped here...\n\n\tif err := Wait(dch); err != nil {\n\t\tt.Fatal(\"Did not get the DisconnectedCB\")\n\t}\n\n\tif err := ec.Publish(\"foo\", testString); err != nil {\n\t\tt.Fatalf(\"Failed to publish message: %v\\n\", err)\n\t}\n\n\tts = RunServerOnPort(22222)\n\tdefer ts.Shutdown()\n\n\tif err := ec.FlushTimeout(5 * time.Second); err != nil {\n\t\tt.Fatalf(\"Error on Flush: %v\", err)\n\t}\n\n\tif err := Wait(ch); err != nil {\n\t\tt.Fatal(\"Did not receive our message\")\n\t}\n\n\texpectedReconnectCount := uint64(1)\n\treconnectedCount := ec.Conn.Stats().Reconnects\n\n\tif reconnectedCount != expectedReconnectCount {\n\t\tt.Fatalf(\"Reconnect count incorrect: %d vs %d\\n\",\n\t\t\treconnectedCount, expectedReconnectCount)\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ ServerPool tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nvar testServers = []string{\n\t\"nats:\/\/localhost:1222\",\n\t\"nats:\/\/localhost:1223\",\n\t\"nats:\/\/localhost:1224\",\n\t\"nats:\/\/localhost:1225\",\n\t\"nats:\/\/localhost:1226\",\n\t\"nats:\/\/localhost:1227\",\n\t\"nats:\/\/localhost:1228\",\n}\n\nfunc TestServersRandomize(t *testing.T) {\n\topts := DefaultOptions\n\topts.Servers = testServers\n\tnc := &Conn{Opts: opts}\n\tif err := nc.setupServerPool(); err != nil {\n\t\tt.Fatalf(\"Problem setting up Server Pool: %v\\n\", err)\n\t}\n\t\/\/ Build []string from srvPool\n\tclientServers := []string{}\n\tfor _, s := range nc.srvPool {\n\t\tclientServers = append(clientServers, s.url.String())\n\t}\n\t\/\/ In theory this could happen..\n\tif reflect.DeepEqual(testServers, clientServers) {\n\t\tt.Fatalf(\"ServerPool list not randomized\\n\")\n\t}\n\n\t\/\/ Now test that we do not randomize if proper flag is set.\n\topts = DefaultOptions\n\topts.Servers = testServers\n\topts.NoRandomize = true\n\tnc = &Conn{Opts: opts}\n\tif err := nc.setupServerPool(); err != nil {\n\t\tt.Fatalf(\"Problem setting up Server Pool: %v\\n\", err)\n\t}\n\t\/\/ Build []string from srvPool\n\tclientServers = []string{}\n\tfor _, s := range nc.srvPool {\n\t\tclientServers = append(clientServers, s.url.String())\n\t}\n\tif !reflect.DeepEqual(testServers, clientServers) {\n\t\tt.Fatalf(\"ServerPool list should not be randomized\\n\")\n\t}\n}\n\nfunc TestSelectNextServer(t *testing.T) {\n\topts := DefaultOptions\n\topts.Servers = testServers\n\topts.NoRandomize = true\n\tnc := &Conn{Opts: opts}\n\tif err := nc.setupServerPool(); err != nil {\n\t\tt.Fatalf(\"Problem setting up Server Pool: %v\\n\", err)\n\t}\n\tif nc.url != nc.srvPool[0].url {\n\t\tt.Fatalf(\"Wrong default selection: %v\\n\", nc.url)\n\t}\n\n\tsel, err := nc.selectNextServer()\n\tif err != nil {\n\t\tt.Fatalf(\"Got an err: %v\\n\", err)\n\t}\n\t\/\/ Check that we are now looking at #2, and current is now last.\n\tif len(nc.srvPool) != len(testServers) {\n\t\tt.Fatalf(\"List is incorrect size: %d vs %d\\n\", len(nc.srvPool), len(testServers))\n\t}\n\tif nc.url.String() != testServers[1] {\n\t\tt.Fatalf(\"Selection incorrect: %v vs %v\\n\", nc.url, testServers[1])\n\t}\n\tif nc.srvPool[len(nc.srvPool)-1].url.String() != testServers[0] {\n\t\tt.Fatalf(\"Did not push old to last position\\n\")\n\t}\n\tif sel != nc.srvPool[0] {\n\t\tt.Fatalf(\"Did not return correct server: %v vs %v\\n\", sel.url, nc.srvPool[0].url)\n\t}\n\n\t\/\/ Test that we do not keep servers where we have tried to reconnect past our limit.\n\tnc.srvPool[0].reconnects = int(opts.MaxReconnect)\n\tif _, err := nc.selectNextServer(); err != nil {\n\t\tt.Fatalf(\"Got an err: %v\\n\", err)\n\t}\n\t\/\/ Check that we are now looking at #3, and current is not in the list.\n\tif len(nc.srvPool) != len(testServers)-1 {\n\t\tt.Fatalf(\"List is incorrect size: %d vs %d\\n\", len(nc.srvPool), len(testServers)-1)\n\t}\n\tif nc.url.String() != testServers[2] {\n\t\tt.Fatalf(\"Selection incorrect: %v vs %v\\n\", nc.url, testServers[2])\n\t}\n\tif nc.srvPool[len(nc.srvPool)-1].url.String() == testServers[1] {\n\t\tt.Fatalf(\"Did not throw away the last server correctly\\n\")\n\t}\n}\n\n\/\/ This will test that comma separated url strings work properly for\n\/\/ the Connect() command.\nfunc TestUrlArgument(t *testing.T) {\n\tcheck := func(url string, expected []string) {\n\t\tif !reflect.DeepEqual(processUrlString(url), expected) {\n\t\t\tt.Fatalf(\"Got wrong response processing URL: %q, RES: %#v\\n\", url, processUrlString(url))\n\t\t}\n\t}\n\t\/\/ This is normal case\n\toneExpected := []string{\"nats:\/\/localhost:1222\"}\n\n\tcheck(\"nats:\/\/localhost:1222\", oneExpected)\n\tcheck(\"nats:\/\/localhost:1222 \", oneExpected)\n\tcheck(\" nats:\/\/localhost:1222\", oneExpected)\n\tcheck(\" nats:\/\/localhost:1222 \", oneExpected)\n\n\tvar multiExpected = []string{\n\t\t\"nats:\/\/localhost:1222\",\n\t\t\"nats:\/\/localhost:1223\",\n\t\t\"nats:\/\/localhost:1224\",\n\t}\n\n\tcheck(\"nats:\/\/localhost:1222,nats:\/\/localhost:1223,nats:\/\/localhost:1224\", multiExpected)\n\tcheck(\"nats:\/\/localhost:1222, nats:\/\/localhost:1223, nats:\/\/localhost:1224\", multiExpected)\n\tcheck(\" nats:\/\/localhost:1222, nats:\/\/localhost:1223, nats:\/\/localhost:1224 \", multiExpected)\n\tcheck(\"nats:\/\/localhost:1222, nats:\/\/localhost:1223 ,nats:\/\/localhost:1224\", multiExpected)\n}\n<commit_msg>Fix for race<commit_after>package nats\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Package scoped specific tests here..\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/nats-io\/gnatsd\/server\"\n\tgnatsd \"github.com\/nats-io\/gnatsd\/test\"\n)\n\n\/\/ Dumb wait program to sync on callbacks, etc... Will timeout\nfunc Wait(ch chan bool) error {\n\treturn WaitTime(ch, 5*time.Second)\n}\n\nfunc WaitTime(ch chan bool, timeout time.Duration) error {\n\tselect {\n\tcase <-ch:\n\t\treturn nil\n\tcase <-time.After(timeout):\n\t}\n\treturn errors.New(\"timeout\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Reconnect tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nvar reconnectOpts = Options{\n\tUrl: \"nats:\/\/localhost:22222\",\n\tAllowReconnect: true,\n\tMaxReconnect: 10,\n\tReconnectWait: 100 * time.Millisecond,\n\tTimeout: DefaultTimeout,\n}\n\nfunc RunServerOnPort(port int) *server.Server {\n\topts := gnatsd.DefaultTestOptions\n\topts.Port = port\n\treturn RunServerWithOptions(opts)\n}\n\nfunc RunServerWithOptions(opts server.Options) *server.Server {\n\treturn gnatsd.RunServer(&opts)\n}\n\nfunc TestReconnectServerStats(t *testing.T) {\n\tts := RunServerOnPort(22222)\n\n\topts := reconnectOpts\n\tnc, _ := opts.Connect()\n\tdefer nc.Close()\n\tnc.Flush()\n\n\tts.Shutdown()\n\t\/\/ server is stopped here...\n\n\tts = RunServerOnPort(22222)\n\tdefer ts.Shutdown()\n\n\tif err := nc.FlushTimeout(5 * time.Second); err != nil {\n\t\tt.Fatalf(\"Error on Flush: %v\", err)\n\t}\n\n\t\/\/ Make sure the server who is reconnected has the reconnects stats reset.\n\tnc.mu.Lock()\n\t_, cur := nc.currentServer()\n\tnc.mu.Unlock()\n\n\tif cur.reconnects != 0 {\n\t\tt.Fatalf(\"Current Server's reconnects should be 0 vs %d\\n\", cur.reconnects)\n\t}\n}\n\nfunc TestParseStateReconnectFunctionality(t *testing.T) {\n\tts := RunServerOnPort(22222)\n\tch := make(chan bool)\n\n\topts := reconnectOpts\n\tdch := make(chan bool)\n\topts.DisconnectedCB = func(_ *Conn) {\n\t\tdch <- true\n\t}\n\n\tnc, errc := opts.Connect()\n\tif errc != nil {\n\t\tt.Fatalf(\"Failed to create a connection: %v\\n\", errc)\n\t}\n\tec, errec := NewEncodedConn(nc, DEFAULT_ENCODER)\n\tif errec != nil {\n\t\tnc.Close()\n\t\tt.Fatalf(\"Failed to create an encoded connection: %v\\n\", errec)\n\t}\n\tdefer ec.Close()\n\n\ttestString := \"bar\"\n\tec.Subscribe(\"foo\", func(s string) {\n\t\tif s != testString {\n\t\t\tt.Fatal(\"String doesn't match\")\n\t\t}\n\t\tch <- true\n\t})\n\tec.Flush()\n\n\t\/\/ Got a RACE condition with Travis build. The locking below does not\n\t\/\/ really help because the parser running in the readLoop accesses\n\t\/\/ nc.ps without the connection lock. Sleeping may help better since\n\t\/\/ it would make the memory write in parse.go (when processing the\n\t\/\/ pong) further away from the modification below.\n\ttime.Sleep(1 * time.Second)\n\n\t\/\/ Simulate partialState, this needs to be cleared\n\tnc.mu.Lock()\n\tnc.ps.state = OP_PON\n\tnc.mu.Unlock()\n\n\tts.Shutdown()\n\t\/\/ server is stopped here...\n\n\tif err := Wait(dch); err != nil {\n\t\tt.Fatal(\"Did not get the DisconnectedCB\")\n\t}\n\n\tif err := ec.Publish(\"foo\", testString); err != nil {\n\t\tt.Fatalf(\"Failed to publish message: %v\\n\", err)\n\t}\n\n\tts = RunServerOnPort(22222)\n\tdefer ts.Shutdown()\n\n\tif err := ec.FlushTimeout(5 * time.Second); err != nil {\n\t\tt.Fatalf(\"Error on Flush: %v\", err)\n\t}\n\n\tif err := Wait(ch); err != nil {\n\t\tt.Fatal(\"Did not receive our message\")\n\t}\n\n\texpectedReconnectCount := uint64(1)\n\treconnectedCount := ec.Conn.Stats().Reconnects\n\n\tif reconnectedCount != expectedReconnectCount {\n\t\tt.Fatalf(\"Reconnect count incorrect: %d vs %d\\n\",\n\t\t\treconnectedCount, expectedReconnectCount)\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ ServerPool tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nvar testServers = []string{\n\t\"nats:\/\/localhost:1222\",\n\t\"nats:\/\/localhost:1223\",\n\t\"nats:\/\/localhost:1224\",\n\t\"nats:\/\/localhost:1225\",\n\t\"nats:\/\/localhost:1226\",\n\t\"nats:\/\/localhost:1227\",\n\t\"nats:\/\/localhost:1228\",\n}\n\nfunc TestServersRandomize(t *testing.T) {\n\topts := DefaultOptions\n\topts.Servers = testServers\n\tnc := &Conn{Opts: opts}\n\tif err := nc.setupServerPool(); err != nil {\n\t\tt.Fatalf(\"Problem setting up Server Pool: %v\\n\", err)\n\t}\n\t\/\/ Build []string from srvPool\n\tclientServers := []string{}\n\tfor _, s := range nc.srvPool {\n\t\tclientServers = append(clientServers, s.url.String())\n\t}\n\t\/\/ In theory this could happen..\n\tif reflect.DeepEqual(testServers, clientServers) {\n\t\tt.Fatalf(\"ServerPool list not randomized\\n\")\n\t}\n\n\t\/\/ Now test that we do not randomize if proper flag is set.\n\topts = DefaultOptions\n\topts.Servers = testServers\n\topts.NoRandomize = true\n\tnc = &Conn{Opts: opts}\n\tif err := nc.setupServerPool(); err != nil {\n\t\tt.Fatalf(\"Problem setting up Server Pool: %v\\n\", err)\n\t}\n\t\/\/ Build []string from srvPool\n\tclientServers = []string{}\n\tfor _, s := range nc.srvPool {\n\t\tclientServers = append(clientServers, s.url.String())\n\t}\n\tif !reflect.DeepEqual(testServers, clientServers) {\n\t\tt.Fatalf(\"ServerPool list should not be randomized\\n\")\n\t}\n}\n\nfunc TestSelectNextServer(t *testing.T) {\n\topts := DefaultOptions\n\topts.Servers = testServers\n\topts.NoRandomize = true\n\tnc := &Conn{Opts: opts}\n\tif err := nc.setupServerPool(); err != nil {\n\t\tt.Fatalf(\"Problem setting up Server Pool: %v\\n\", err)\n\t}\n\tif nc.url != nc.srvPool[0].url {\n\t\tt.Fatalf(\"Wrong default selection: %v\\n\", nc.url)\n\t}\n\n\tsel, err := nc.selectNextServer()\n\tif err != nil {\n\t\tt.Fatalf(\"Got an err: %v\\n\", err)\n\t}\n\t\/\/ Check that we are now looking at #2, and current is now last.\n\tif len(nc.srvPool) != len(testServers) {\n\t\tt.Fatalf(\"List is incorrect size: %d vs %d\\n\", len(nc.srvPool), len(testServers))\n\t}\n\tif nc.url.String() != testServers[1] {\n\t\tt.Fatalf(\"Selection incorrect: %v vs %v\\n\", nc.url, testServers[1])\n\t}\n\tif nc.srvPool[len(nc.srvPool)-1].url.String() != testServers[0] {\n\t\tt.Fatalf(\"Did not push old to last position\\n\")\n\t}\n\tif sel != nc.srvPool[0] {\n\t\tt.Fatalf(\"Did not return correct server: %v vs %v\\n\", sel.url, nc.srvPool[0].url)\n\t}\n\n\t\/\/ Test that we do not keep servers where we have tried to reconnect past our limit.\n\tnc.srvPool[0].reconnects = int(opts.MaxReconnect)\n\tif _, err := nc.selectNextServer(); err != nil {\n\t\tt.Fatalf(\"Got an err: %v\\n\", err)\n\t}\n\t\/\/ Check that we are now looking at #3, and current is not in the list.\n\tif len(nc.srvPool) != len(testServers)-1 {\n\t\tt.Fatalf(\"List is incorrect size: %d vs %d\\n\", len(nc.srvPool), len(testServers)-1)\n\t}\n\tif nc.url.String() != testServers[2] {\n\t\tt.Fatalf(\"Selection incorrect: %v vs %v\\n\", nc.url, testServers[2])\n\t}\n\tif nc.srvPool[len(nc.srvPool)-1].url.String() == testServers[1] {\n\t\tt.Fatalf(\"Did not throw away the last server correctly\\n\")\n\t}\n}\n\n\/\/ This will test that comma separated url strings work properly for\n\/\/ the Connect() command.\nfunc TestUrlArgument(t *testing.T) {\n\tcheck := func(url string, expected []string) {\n\t\tif !reflect.DeepEqual(processUrlString(url), expected) {\n\t\t\tt.Fatalf(\"Got wrong response processing URL: %q, RES: %#v\\n\", url, processUrlString(url))\n\t\t}\n\t}\n\t\/\/ This is normal case\n\toneExpected := []string{\"nats:\/\/localhost:1222\"}\n\n\tcheck(\"nats:\/\/localhost:1222\", oneExpected)\n\tcheck(\"nats:\/\/localhost:1222 \", oneExpected)\n\tcheck(\" nats:\/\/localhost:1222\", oneExpected)\n\tcheck(\" nats:\/\/localhost:1222 \", oneExpected)\n\n\tvar multiExpected = []string{\n\t\t\"nats:\/\/localhost:1222\",\n\t\t\"nats:\/\/localhost:1223\",\n\t\t\"nats:\/\/localhost:1224\",\n\t}\n\n\tcheck(\"nats:\/\/localhost:1222,nats:\/\/localhost:1223,nats:\/\/localhost:1224\", multiExpected)\n\tcheck(\"nats:\/\/localhost:1222, nats:\/\/localhost:1223, nats:\/\/localhost:1224\", multiExpected)\n\tcheck(\" nats:\/\/localhost:1222, nats:\/\/localhost:1223, nats:\/\/localhost:1224 \", multiExpected)\n\tcheck(\"nats:\/\/localhost:1222, nats:\/\/localhost:1223 ,nats:\/\/localhost:1224\", multiExpected)\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>update w\/ new changes<commit_after><|endoftext|>"} {"text":"<commit_before>package main \/\/ FIXME omapi\n\nimport (\n\t\"bytes\"\n\t\"crypto\/hmac\"\n\t\"crypto\/md5\"\n\t\"encoding\/base64\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"os\"\n\t\"sort\"\n\t\"time\"\n)\n\ntype Opcode int32\n\nconst (\n\tOpOpen Opcode = 1 + iota\n\tOpRefresh\n\tOpUpdate\n\tOpNotify\n\tOpStatus\n\tOpDelete\n)\n\nvar Ethernet = []byte{0, 0, 0, 1}\nvar TokenRing = []byte{0, 0, 0, 6}\nvar FDDI = []byte{0, 0, 0, 8}\n\nvar True = []byte{0, 0, 0, 1}\n\nfunc (opcode Opcode) String() (ret string) {\n\tswitch opcode {\n\tcase 1:\n\t\tret = \"open\"\n\tcase 2:\n\t\tret = \"refresh\"\n\tcase 3:\n\t\tret = \"update\"\n\tcase 4:\n\t\tret = \"notify\"\n\tcase 5:\n\t\tret = \"status\"\n\tcase 6:\n\t\tret = \"delete\"\n\t}\n\n\treturn\n}\n\n\/\/ TODO add size checks for all operations\ntype buffer struct {\n\tbuffer *bytes.Buffer\n}\n\nfunc newBuffer() *buffer {\n\treturn &buffer{new(bytes.Buffer)}\n}\n\nfunc (b *buffer) add_bytes(data []byte) {\n\tb.buffer.Write(data)\n}\n\nfunc (b *buffer) add(data interface{}) {\n\terr := binary.Write(b.buffer, binary.BigEndian, data)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (b *buffer) add_map(data map[string][]byte) {\n\t\/\/ We need to add the map in a deterministic order for signing to\n\t\/\/ work, so we first sort the keys in alphabetical order, then use\n\t\/\/ that order to access the map entries.\n\n\tkeys := make(sort.StringSlice, 0, len(data))\n\n\tfor key := range data {\n\t\tkeys = append(keys, key)\n\t}\n\n\tsort.Sort(keys)\n\n\tfor _, key := range keys {\n\t\tvalue := data[key]\n\n\t\tb.add(int16(len(key)))\n\t\tb.add([]byte(key))\n\n\t\tb.add(int32(len(value)))\n\t\tb.add(value)\n\t}\n\n\tb.add([]byte(\"\\x00\\x00\"))\n}\n\nfunc (b *buffer) bytes() []byte {\n\treturn b.buffer.Bytes()\n}\n\ntype Message struct {\n\tAuthID int32\n\tOpcode Opcode\n\tHandle int32\n\tTid int32\n\tRid int32\n\tMessage map[string][]byte\n\tObject map[string][]byte\n\tSignature []byte\n}\n\nfunc NewMessage() *Message {\n\tmsg := &Message{\n\t\tTid: rand.Int31(),\n\t\tMessage: make(map[string][]byte),\n\t\tObject: make(map[string][]byte),\n\t}\n\n\treturn msg\n}\n\nfunc NewOpenMessage(typeName string) *Message {\n\tmessage := NewMessage()\n\tmessage.Opcode = OpOpen\n\tmessage.Message[\"type\"] = []byte(typeName)\n\n\treturn message\n}\n\nfunc NewDeleteMessage(handle int32) *Message {\n\tmessage := NewMessage()\n\tmessage.Opcode = OpDelete\n\tmessage.Handle = handle\n\n\treturn message\n}\n\nfunc (m *Message) Bytes(forSigning bool) []byte {\n\tret := newBuffer()\n\tif !forSigning {\n\t\tret.add(m.AuthID)\n\t}\n\n\tret.add(int32(len(m.Signature)))\n\tret.add(m.Opcode)\n\tret.add(m.Handle)\n\tret.add(m.Tid)\n\tret.add(m.Rid)\n\tret.add_map(m.Message)\n\tret.add_map(m.Object)\n\tif !forSigning {\n\t\tret.add(m.Signature)\n\t}\n\n\treturn ret.buffer.Bytes()\n}\n\nfunc (m *Message) Sign(auth Authenticator) {\n\tm.AuthID = auth.AuthID()\n\tm.Signature = auth.Sign(m)\n}\n\nfunc (m *Message) Verify(auth Authenticator) bool {\n\treturn bytes.Equal(auth.Sign(m), m.Signature)\n}\n\nfunc (m *Message) IsResponseTo(other *Message) bool {\n\treturn m.Rid == other.Tid\n}\n\ntype Connection struct {\n\tHostname string\n\tPort int\n\tUsername string\n\tKey string\n\tAuthenticator Authenticator\n\tconnection *net.TCPConn\n\tinBuffer *bytes.Buffer\n}\n\nfunc NewConnection(hostname string, port int, username string, key string) *Connection {\n\tcon := &Connection{\n\t\tHostname: hostname,\n\t\tPort: port,\n\t\tUsername: username,\n\t\tKey: key,\n\t\tAuthenticator: new(NullAuthenticator),\n\t\tinBuffer: new(bytes.Buffer),\n\t}\n\n\tvar newAuth Authenticator = new(NullAuthenticator)\n\n\tif len(username) > 0 && len(key) > 0 {\n\t\tdecodedKey, err := base64.StdEncoding.DecodeString(key)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tnewAuth = &HMACMD5Authenticator{username, decodedKey, -1}\n\t}\n\n\traddr, err := net.ResolveTCPAddr(\"tcp\", fmt.Sprintf(\"%s:%d\", hostname, port))\n\tif err != nil {\n\t\t\/\/ TODO return the error instead\n\t\tpanic(err)\n\t}\n\ttcpConn, err := net.DialTCP(\"tcp\", nil, raddr)\n\tif err != nil {\n\t\t\/\/ TODO return the error instead\n\t\tpanic(err)\n\t}\n\n\tcon.connection = tcpConn\n\n\tcon.SendProtocolInitialization()\n\tcon.ReceiveProtocolInitialization()\n\tcon.initializeAuthenticator(newAuth)\n\n\treturn con\n}\n\nfunc (con *Connection) initializeAuthenticator(auth Authenticator) {\n\tif _, ok := auth.(*NullAuthenticator); ok {\n\t\treturn\n\t}\n\n\tmessage := NewOpenMessage(\"authenticator\")\n\tfor key, value := range auth.AuthObject() {\n\t\tmessage.Object[key] = value\n\t}\n\n\tresponse := con.queryServer(message)\n\n\tif response.Opcode != OpUpdate {\n\t\tpanic(\"received non-update response for open\")\n\t}\n\n\tif response.Handle == 0 {\n\t\tpanic(\"received invalid authid from server\")\n\t}\n\n\tauth.SetAuthID(response.Handle)\n\tcon.Authenticator = auth\n}\n\nfunc (con *Connection) queryServer(msg *Message) *Message {\n\tmsg.Sign(con.Authenticator)\n\tcon.Send(msg.Bytes(false))\n\tresponse := con.parseMessage()\n\tif !response.IsResponseTo(msg) {\n\t\tpanic(\"received message is not the desired response\")\n\t}\n\n\t\/\/ TODO check authid\n\n\treturn response\n}\n\nfunc (con *Connection) Send(data []byte) (n int, err error) {\n\treturn con.connection.Write(data)\n}\n\nfunc (con *Connection) SendProtocolInitialization() {\n\tbuf := newBuffer()\n\tbuf.add(int32(100)) \/\/ Protocol version\n\tbuf.add(int32(24)) \/\/ Header size\n\tcon.Send(buf.bytes())\n}\n\nfunc (con *Connection) Read() {\n\tbuf := make([]byte, 2048)\n\tn, err := con.connection.Read(buf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tcon.inBuffer.Write(buf[0:n])\n}\n\nfunc (con *Connection) waitForN(n int) {\n\tfor con.inBuffer.Len() < n {\n\t\tcon.Read()\n\t}\n}\n\nfunc (con *Connection) parseStartupMessage() (version, headerSize int32) {\n\tcon.waitForN(8)\n\n\tbinary.Read(con.inBuffer, binary.BigEndian, &version)\n\tbinary.Read(con.inBuffer, binary.BigEndian, &headerSize)\n\n\treturn\n}\n\nfunc (con *Connection) parseMap() map[string][]byte {\n\tdict := make(map[string][]byte)\n\n\tvar (\n\t\tkeyLength int16\n\t\tvalueLength int32\n\t\tkey []byte\n\t\tvalue []byte\n\t)\n\n\tfor {\n\t\tcon.waitForN(2)\n\t\tbinary.Read(con.inBuffer, binary.BigEndian, &keyLength)\n\t\tif keyLength == 0 {\n\t\t\t\/\/ end of map\n\t\t\tbreak\n\t\t}\n\n\t\tcon.waitForN(int(keyLength))\n\t\tkey = make([]byte, keyLength)\n\t\tcon.inBuffer.Read(key)\n\n\t\tcon.waitForN(4)\n\t\tbinary.Read(con.inBuffer, binary.BigEndian, &valueLength)\n\t\tcon.waitForN(int(valueLength))\n\t\tvalue = make([]byte, valueLength)\n\t\tcon.inBuffer.Read(value)\n\n\t\tdict[string(key)] = value\n\t}\n\n\treturn dict\n}\n\nfunc (con *Connection) parseMessage() *Message {\n\tmessage := new(Message)\n\tcon.waitForN(24) \/\/ authid + authlen + opcode + handle + tid + rid\n\n\tvar authlen int32\n\n\tbinary.Read(con.inBuffer, binary.BigEndian, &message.AuthID)\n\tbinary.Read(con.inBuffer, binary.BigEndian, &authlen)\n\tbinary.Read(con.inBuffer, binary.BigEndian, &message.Opcode)\n\tbinary.Read(con.inBuffer, binary.BigEndian, &message.Handle)\n\tbinary.Read(con.inBuffer, binary.BigEndian, &message.Tid)\n\tbinary.Read(con.inBuffer, binary.BigEndian, &message.Rid)\n\n\tmessage.Message = con.parseMap()\n\tmessage.Object = con.parseMap()\n\n\tcon.waitForN(int(authlen))\n\tmessage.Signature = make([]byte, authlen)\n\tcon.inBuffer.Read(message.Signature)\n\n\treturn message\n}\n\nfunc (con *Connection) ReceiveProtocolInitialization() {\n\tversion, headerSize := con.parseStartupMessage()\n\tif version != 100 {\n\t\tpanic(\"version mismatch\")\n\t}\n\n\tif headerSize != 24 {\n\t\tpanic(\"header size mismatch\")\n\t}\n}\n\ntype Authenticator interface {\n\tSign(*Message) []byte\n\tAuthObject() map[string][]byte\n\tAuthLen() int32\n\tAuthID() int32\n\tSetAuthID(int32)\n}\n\ntype NullAuthenticator struct{}\n\nfunc (_ *NullAuthenticator) AuthObject() map[string][]byte {\n\treturn make(map[string][]byte)\n}\n\nfunc (_ *NullAuthenticator) Sign(_ *Message) []byte {\n\treturn []byte(\"\")\n}\n\nfunc (_ *NullAuthenticator) AuthLen() int32 {\n\treturn 0\n}\n\nfunc (_ *NullAuthenticator) AuthID() int32 {\n\treturn 0\n}\n\nfunc (_ *NullAuthenticator) SetAuthID(_ int32) {\n}\n\ntype HMACMD5Authenticator struct {\n\tUsername string\n\tKey []byte\n\t_AuthID int32\n}\n\nfunc (auth *HMACMD5Authenticator) AuthObject() map[string][]byte {\n\tret := make(map[string][]byte)\n\tret[\"name\"] = []byte(auth.Username)\n\tret[\"algorithm\"] = []byte(\"hmac-md5.SIG-ALG.REG.INT.\")\n\n\treturn ret\n}\n\nfunc (auth *HMACMD5Authenticator) Sign(m *Message) []byte {\n\thmac := hmac.New(md5.New, auth.Key)\n\n\t\/\/ The signature's length is part of the message that we are\n\t\/\/ signing, so initialize the signature with the correct length.\n\tm.Signature = bytes.Repeat([]byte(\"\\x00\"), int(auth.AuthLen()))\n\thmac.Write(m.Bytes(true))\n\n\treturn hmac.Sum(nil)\n}\n\nfunc (_ *HMACMD5Authenticator) AuthLen() int32 {\n\treturn 16\n}\n\nfunc (auth *HMACMD5Authenticator) AuthID() int32 {\n\treturn auth._AuthID\n}\n\nfunc (auth *HMACMD5Authenticator) SetAuthID(val int32) {\n\tauth._AuthID = val\n}\n\nfunc main() {\n\trand.Seed(time.Now().UTC().UnixNano())\n\n\tkey := os.Getenv(\"OMAPI_KEY\")\n\tconnection := NewConnection(\"192.168.1.1\", 7911, \"omapi_key\", key)\n\n\t\/\/ message := NewOpenMessage(\"lease\")\n\t\/\/ mac, _ := net.ParseMAC(\"bc:ae:c5:76:1d:5a\")\n\t\/\/ message.Object[\"hardware-address\"] = []byte(mac)\n\t\/\/ response := connection.queryServer(message)\n\t\/\/ fmt.Println(response)\n\n\t\/\/ message := NewOpenMessage(\"host\")\n\t\/\/ \/\/ message.Message[\"create\"] = Ethernet\n\t\/\/ mac, _ := net.ParseMAC(\"08:00:27:4f:72:21\")\n\t\/\/ message.Object[\"hardware-address\"] = []byte(mac)\n\n\t\/\/ \/\/ buf := new(bytes.Buffer)\n\t\/\/ \/\/ binary.Write(buf, binary.BigEndian, int32(1))\n\n\t\/\/ message.Object[\"hardware-type\"] = Ethernet\n\n\t\/\/ response := connection.queryServer(message)\n\t\/\/ fmt.Println(response)\n\t\/\/ response = connection.queryServer(NewDeleteMessage(response.Handle))\n\t\/\/ fmt.Println(response)\n\n\tmac, _ := net.ParseMAC(\"08:00:27:4f:72:21\")\n\tip := net.ParseIP(\"192.168.1.33\")\n\tmessage := NewOpenMessage(\"host\")\n\n\tmessage.Message[\"create\"] = True\n\tmessage.Message[\"exclusive\"] = True\n\tmessage.Object[\"hardware-address\"] = []byte(mac)\n\tmessage.Object[\"hardware-type\"] = Ethernet\n\tmessage.Object[\"ip-address\"] = []byte(ip[12:])\n\tmessage.Object[\"statements\"] = []byte(\"ddns-hostname=\\\"win7.vm\\\";\")\n\tmessage.Object[\"name\"] = []byte(\"win7.vm\")\n\n\tresponse := connection.queryServer(message)\n\tif response.Opcode != OpUpdate {\n\t\tfmt.Println(\"add failed:\", string(response.Message[\"message\"]))\n\t}\n\t\/\/ fmt.Println(response)\n}\n<commit_msg>more visibility changes<commit_after>package main \/\/ FIXME omapi\n\nimport (\n\t\"bytes\"\n\t\"crypto\/hmac\"\n\t\"crypto\/md5\"\n\t\"encoding\/base64\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"os\"\n\t\"sort\"\n\t\"time\"\n)\n\ntype Opcode int32\n\nconst (\n\tOpOpen Opcode = 1 + iota\n\tOpRefresh\n\tOpUpdate\n\tOpNotify\n\tOpStatus\n\tOpDelete\n)\n\nvar Ethernet = []byte{0, 0, 0, 1}\nvar TokenRing = []byte{0, 0, 0, 6}\nvar FDDI = []byte{0, 0, 0, 8}\n\nvar True = []byte{0, 0, 0, 1}\n\nfunc (opcode Opcode) String() (ret string) {\n\tswitch opcode {\n\tcase 1:\n\t\tret = \"open\"\n\tcase 2:\n\t\tret = \"refresh\"\n\tcase 3:\n\t\tret = \"update\"\n\tcase 4:\n\t\tret = \"notify\"\n\tcase 5:\n\t\tret = \"status\"\n\tcase 6:\n\t\tret = \"delete\"\n\t}\n\n\treturn\n}\n\n\/\/ TODO add size checks for all operations\ntype buffer struct {\n\tbuffer *bytes.Buffer\n}\n\nfunc newBuffer() *buffer {\n\treturn &buffer{new(bytes.Buffer)}\n}\n\nfunc (b *buffer) add_bytes(data []byte) {\n\tb.buffer.Write(data)\n}\n\nfunc (b *buffer) add(data interface{}) {\n\terr := binary.Write(b.buffer, binary.BigEndian, data)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (b *buffer) add_map(data map[string][]byte) {\n\t\/\/ We need to add the map in a deterministic order for signing to\n\t\/\/ work, so we first sort the keys in alphabetical order, then use\n\t\/\/ that order to access the map entries.\n\n\tkeys := make(sort.StringSlice, 0, len(data))\n\n\tfor key := range data {\n\t\tkeys = append(keys, key)\n\t}\n\n\tsort.Sort(keys)\n\n\tfor _, key := range keys {\n\t\tvalue := data[key]\n\n\t\tb.add(int16(len(key)))\n\t\tb.add([]byte(key))\n\n\t\tb.add(int32(len(value)))\n\t\tb.add(value)\n\t}\n\n\tb.add([]byte(\"\\x00\\x00\"))\n}\n\nfunc (b *buffer) bytes() []byte {\n\treturn b.buffer.Bytes()\n}\n\ntype Message struct {\n\tAuthID int32\n\tOpcode Opcode\n\tHandle int32\n\tTid int32\n\tRid int32\n\tMessage map[string][]byte\n\tObject map[string][]byte\n\tSignature []byte\n}\n\nfunc NewMessage() *Message {\n\tmsg := &Message{\n\t\tTid: rand.Int31(),\n\t\tMessage: make(map[string][]byte),\n\t\tObject: make(map[string][]byte),\n\t}\n\n\treturn msg\n}\n\nfunc NewOpenMessage(typeName string) *Message {\n\tmessage := NewMessage()\n\tmessage.Opcode = OpOpen\n\tmessage.Message[\"type\"] = []byte(typeName)\n\n\treturn message\n}\n\nfunc NewDeleteMessage(handle int32) *Message {\n\tmessage := NewMessage()\n\tmessage.Opcode = OpDelete\n\tmessage.Handle = handle\n\n\treturn message\n}\n\nfunc (m *Message) Bytes(forSigning bool) []byte {\n\tret := newBuffer()\n\tif !forSigning {\n\t\tret.add(m.AuthID)\n\t}\n\n\tret.add(int32(len(m.Signature)))\n\tret.add(m.Opcode)\n\tret.add(m.Handle)\n\tret.add(m.Tid)\n\tret.add(m.Rid)\n\tret.add_map(m.Message)\n\tret.add_map(m.Object)\n\tif !forSigning {\n\t\tret.add(m.Signature)\n\t}\n\n\treturn ret.buffer.Bytes()\n}\n\nfunc (m *Message) Sign(auth Authenticator) {\n\tm.AuthID = auth.AuthID()\n\tm.Signature = auth.Sign(m)\n}\n\nfunc (m *Message) Verify(auth Authenticator) bool {\n\treturn bytes.Equal(auth.Sign(m), m.Signature)\n}\n\nfunc (m *Message) IsResponseTo(other *Message) bool {\n\treturn m.Rid == other.Tid\n}\n\ntype Connection struct {\n\tHostname string\n\tPort int\n\tUsername string\n\tKey string\n\tAuthenticator Authenticator\n\tconnection *net.TCPConn\n\tinBuffer *bytes.Buffer\n}\n\nfunc NewConnection(hostname string, port int, username string, key string) *Connection {\n\tcon := &Connection{\n\t\tHostname: hostname,\n\t\tPort: port,\n\t\tUsername: username,\n\t\tKey: key,\n\t\tAuthenticator: new(NullAuthenticator),\n\t\tinBuffer: new(bytes.Buffer),\n\t}\n\n\tvar newAuth Authenticator = new(NullAuthenticator)\n\n\tif len(username) > 0 && len(key) > 0 {\n\t\tdecodedKey, err := base64.StdEncoding.DecodeString(key)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tnewAuth = &HMACMD5Authenticator{username, decodedKey, -1}\n\t}\n\n\traddr, err := net.ResolveTCPAddr(\"tcp\", fmt.Sprintf(\"%s:%d\", hostname, port))\n\tif err != nil {\n\t\t\/\/ TODO return the error instead\n\t\tpanic(err)\n\t}\n\ttcpConn, err := net.DialTCP(\"tcp\", nil, raddr)\n\tif err != nil {\n\t\t\/\/ TODO return the error instead\n\t\tpanic(err)\n\t}\n\n\tcon.connection = tcpConn\n\n\tcon.sendProtocolInitialization()\n\tcon.receiveProtocolInitialization()\n\tcon.initializeAuthenticator(newAuth)\n\n\treturn con\n}\n\nfunc (con *Connection) initializeAuthenticator(auth Authenticator) {\n\tif _, ok := auth.(*NullAuthenticator); ok {\n\t\treturn\n\t}\n\n\tmessage := NewOpenMessage(\"authenticator\")\n\tfor key, value := range auth.AuthObject() {\n\t\tmessage.Object[key] = value\n\t}\n\n\tresponse := con.queryServer(message)\n\n\tif response.Opcode != OpUpdate {\n\t\tpanic(\"received non-update response for open\")\n\t}\n\n\tif response.Handle == 0 {\n\t\tpanic(\"received invalid authid from server\")\n\t}\n\n\tauth.SetAuthID(response.Handle)\n\tcon.Authenticator = auth\n}\n\nfunc (con *Connection) queryServer(msg *Message) *Message {\n\tmsg.Sign(con.Authenticator)\n\tcon.send(msg.Bytes(false))\n\tresponse := con.parseMessage()\n\tif !response.IsResponseTo(msg) {\n\t\tpanic(\"received message is not the desired response\")\n\t}\n\n\t\/\/ TODO check authid\n\n\treturn response\n}\n\nfunc (con *Connection) send(data []byte) (n int, err error) {\n\treturn con.connection.Write(data)\n}\n\nfunc (con *Connection) sendProtocolInitialization() {\n\tbuf := newBuffer()\n\tbuf.add(int32(100)) \/\/ Protocol version\n\tbuf.add(int32(24)) \/\/ Header size\n\tcon.send(buf.bytes())\n}\n\nfunc (con *Connection) read() {\n\tbuf := make([]byte, 2048)\n\tn, err := con.connection.Read(buf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tcon.inBuffer.Write(buf[0:n])\n}\n\nfunc (con *Connection) waitForN(n int) {\n\tfor con.inBuffer.Len() < n {\n\t\tcon.read()\n\t}\n}\n\nfunc (con *Connection) parseStartupMessage() (version, headerSize int32) {\n\tcon.waitForN(8)\n\n\tbinary.Read(con.inBuffer, binary.BigEndian, &version)\n\tbinary.Read(con.inBuffer, binary.BigEndian, &headerSize)\n\n\treturn\n}\n\nfunc (con *Connection) parseMap() map[string][]byte {\n\tdict := make(map[string][]byte)\n\n\tvar (\n\t\tkeyLength int16\n\t\tvalueLength int32\n\t\tkey []byte\n\t\tvalue []byte\n\t)\n\n\tfor {\n\t\tcon.waitForN(2)\n\t\tbinary.Read(con.inBuffer, binary.BigEndian, &keyLength)\n\t\tif keyLength == 0 {\n\t\t\t\/\/ end of map\n\t\t\tbreak\n\t\t}\n\n\t\tcon.waitForN(int(keyLength))\n\t\tkey = make([]byte, keyLength)\n\t\tcon.inBuffer.Read(key)\n\n\t\tcon.waitForN(4)\n\t\tbinary.Read(con.inBuffer, binary.BigEndian, &valueLength)\n\t\tcon.waitForN(int(valueLength))\n\t\tvalue = make([]byte, valueLength)\n\t\tcon.inBuffer.Read(value)\n\n\t\tdict[string(key)] = value\n\t}\n\n\treturn dict\n}\n\nfunc (con *Connection) parseMessage() *Message {\n\tmessage := new(Message)\n\tcon.waitForN(24) \/\/ authid + authlen + opcode + handle + tid + rid\n\n\tvar authlen int32\n\n\tbinary.Read(con.inBuffer, binary.BigEndian, &message.AuthID)\n\tbinary.Read(con.inBuffer, binary.BigEndian, &authlen)\n\tbinary.Read(con.inBuffer, binary.BigEndian, &message.Opcode)\n\tbinary.Read(con.inBuffer, binary.BigEndian, &message.Handle)\n\tbinary.Read(con.inBuffer, binary.BigEndian, &message.Tid)\n\tbinary.Read(con.inBuffer, binary.BigEndian, &message.Rid)\n\n\tmessage.Message = con.parseMap()\n\tmessage.Object = con.parseMap()\n\n\tcon.waitForN(int(authlen))\n\tmessage.Signature = make([]byte, authlen)\n\tcon.inBuffer.Read(message.Signature)\n\n\treturn message\n}\n\nfunc (con *Connection) receiveProtocolInitialization() {\n\tversion, headerSize := con.parseStartupMessage()\n\tif version != 100 {\n\t\tpanic(\"version mismatch\")\n\t}\n\n\tif headerSize != 24 {\n\t\tpanic(\"header size mismatch\")\n\t}\n}\n\ntype Authenticator interface {\n\tSign(*Message) []byte\n\tAuthObject() map[string][]byte\n\tAuthLen() int32\n\tAuthID() int32\n\tSetAuthID(int32)\n}\n\ntype NullAuthenticator struct{}\n\nfunc (_ *NullAuthenticator) AuthObject() map[string][]byte {\n\treturn make(map[string][]byte)\n}\n\nfunc (_ *NullAuthenticator) Sign(_ *Message) []byte {\n\treturn []byte(\"\")\n}\n\nfunc (_ *NullAuthenticator) AuthLen() int32 {\n\treturn 0\n}\n\nfunc (_ *NullAuthenticator) AuthID() int32 {\n\treturn 0\n}\n\nfunc (_ *NullAuthenticator) SetAuthID(_ int32) {\n}\n\ntype HMACMD5Authenticator struct {\n\tUsername string\n\tKey []byte\n\t_AuthID int32\n}\n\nfunc (auth *HMACMD5Authenticator) AuthObject() map[string][]byte {\n\tret := make(map[string][]byte)\n\tret[\"name\"] = []byte(auth.Username)\n\tret[\"algorithm\"] = []byte(\"hmac-md5.SIG-ALG.REG.INT.\")\n\n\treturn ret\n}\n\nfunc (auth *HMACMD5Authenticator) Sign(m *Message) []byte {\n\thmac := hmac.New(md5.New, auth.Key)\n\n\t\/\/ The signature's length is part of the message that we are\n\t\/\/ signing, so initialize the signature with the correct length.\n\tm.Signature = bytes.Repeat([]byte(\"\\x00\"), int(auth.AuthLen()))\n\thmac.Write(m.Bytes(true))\n\n\treturn hmac.Sum(nil)\n}\n\nfunc (_ *HMACMD5Authenticator) AuthLen() int32 {\n\treturn 16\n}\n\nfunc (auth *HMACMD5Authenticator) AuthID() int32 {\n\treturn auth._AuthID\n}\n\nfunc (auth *HMACMD5Authenticator) SetAuthID(val int32) {\n\tauth._AuthID = val\n}\n\nfunc main() {\n\trand.Seed(time.Now().UTC().UnixNano())\n\n\tkey := os.Getenv(\"OMAPI_KEY\")\n\tconnection := NewConnection(\"192.168.1.1\", 7911, \"omapi_key\", key)\n\n\t\/\/ message := NewOpenMessage(\"lease\")\n\t\/\/ mac, _ := net.ParseMAC(\"bc:ae:c5:76:1d:5a\")\n\t\/\/ message.Object[\"hardware-address\"] = []byte(mac)\n\t\/\/ response := connection.queryServer(message)\n\t\/\/ fmt.Println(response)\n\n\t\/\/ message := NewOpenMessage(\"host\")\n\t\/\/ \/\/ message.Message[\"create\"] = Ethernet\n\t\/\/ mac, _ := net.ParseMAC(\"08:00:27:4f:72:21\")\n\t\/\/ message.Object[\"hardware-address\"] = []byte(mac)\n\n\t\/\/ \/\/ buf := new(bytes.Buffer)\n\t\/\/ \/\/ binary.Write(buf, binary.BigEndian, int32(1))\n\n\t\/\/ message.Object[\"hardware-type\"] = Ethernet\n\n\t\/\/ response := connection.queryServer(message)\n\t\/\/ fmt.Println(response)\n\t\/\/ response = connection.queryServer(NewDeleteMessage(response.Handle))\n\t\/\/ fmt.Println(response)\n\n\tmac, _ := net.ParseMAC(\"08:00:27:4f:72:21\")\n\tip := net.ParseIP(\"192.168.1.33\")\n\tmessage := NewOpenMessage(\"host\")\n\n\tmessage.Message[\"create\"] = True\n\tmessage.Message[\"exclusive\"] = True\n\tmessage.Object[\"hardware-address\"] = []byte(mac)\n\tmessage.Object[\"hardware-type\"] = Ethernet\n\tmessage.Object[\"ip-address\"] = []byte(ip[12:])\n\tmessage.Object[\"statements\"] = []byte(\"ddns-hostname=\\\"win7.vm\\\";\")\n\tmessage.Object[\"name\"] = []byte(\"win7.vm\")\n\n\tresponse := connection.queryServer(message)\n\tif response.Opcode != OpUpdate {\n\t\tfmt.Println(\"add failed:\", string(response.Message[\"message\"]))\n\t}\n\t\/\/ fmt.Println(response)\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tyaml \"gopkg.in\/yaml.v2\"\n\n\t\"github.com\/fatih\/set\"\n\tzglob \"github.com\/mattn\/go-zglob\"\n\thomedir \"github.com\/mitchellh\/go-homedir\"\n\t\"github.com\/mohae\/deepcopy\"\n\t\"github.com\/synchro-food\/filelint\/lib\"\n)\n\ntype Config struct {\n\tFile File `yaml:\"files\"`\n\tTargets []Target `yaml:\"targets\"`\n}\n\nfunc NewConfig(configFile string) (*Config, error) {\n\tconf, err := NewDefaultConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tuserConfig := &Config{}\n\tsrc, err := ioutil.ReadFile(configFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := yaml.Unmarshal(src, &userConfig); err != nil {\n\t\treturn nil, err\n\t}\n\n\tconf.Merge(userConfig)\n\n\treturn conf, nil\n}\n\nfunc NewDefaultConfig() (*Config, error) {\n\tconf := &Config{}\n\tsrc, err := configDefaultYmlBytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := yaml.Unmarshal(src, &conf); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn conf, nil\n}\n\nfunc (src *Config) Merge(dst *Config) {\n\tif len(dst.File.Include) > 0 {\n\t\tsrc.File.Include = dst.File.Include\n\t}\n\tsrc.File.Exclude = append(src.File.Exclude, dst.File.Exclude...)\n\tsrc.Targets = append(src.Targets, dst.Targets...)\n}\n\nfunc (cfg *Config) MatchedRule(file string) RuleMap {\n\trm := make(RuleMap)\n\n\tfor _, t := range cfg.Targets {\n\t\tif match(file, t.Patterns) {\n\t\t\trm = rm.Merge(t.Rule)\n\t\t}\n\t}\n\n\treturn rm\n}\n\nfunc match(file string, patterns []string) bool {\n\tif len(patterns) == 0 {\n\t\treturn true\n\t}\n\n\tfor _, pattern := range patterns {\n\t\tok, err := zglob.Match(pattern, file)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tif ok {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\ntype File struct {\n\tInclude []string `yaml:\"include\"`\n\tExclude []string `yaml:\"exclude\"`\n}\n\nfunc (f File) FindTargets() ([]string, error) {\n\tf.Include = addGlobSignIfDir(f.Include...)\n\tf.Exclude = addGlobSignIfDir(f.Exclude...)\n\n\tinSet, err := expandGlob(f.Include)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\texSet, _ := expandGlob(f.Exclude)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfileSet := set.Difference(inSet, exSet)\n\tfiles := set.StringSlice(fileSet)\n\tfiles = lib.FindTextFiles(files)\n\n\treturn files, nil\n}\n\nfunc expandGlob(files []string) (*set.Set, error) {\n\tfileSet := set.New()\n\n\tfor _, f := range files {\n\t\texpanded, err := zglob.Glob(f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, ex := range expanded {\n\t\t\tfileSet.Add(ex)\n\t\t}\n\t}\n\n\treturn fileSet, nil\n}\n\nfunc addGlobSignIfDir(files ...string) (dst []string) {\n\tdst = make([]string, 0, len(files))\n\n\tfor _, f := range files {\n\t\tif !strings.Contains(f, \"*\") {\n\t\t\tinfo, err := os.Stat(f)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif info.IsDir() {\n\t\t\t\tf = filepath.Join(f, \"**\", \"*\")\n\t\t\t}\n\t\t}\n\n\t\tdst = append(dst, f)\n\t}\n\n\treturn dst\n}\n\ntype Target struct {\n\tPatterns []string `yaml:\"patterns\"`\n\tRule RuleMap `yaml:\"rules\"`\n}\n\ntype RuleMap map[string]map[string]interface{}\n\nfunc (rm RuleMap) Merge(dst RuleMap) (ret RuleMap) {\n\tret = deepcopy.Copy(rm).(RuleMap)\n\n\tfor ruleName, options := range dst {\n\t\tif _, ok := ret[ruleName]; ok {\n\t\t\tfor optionKey, value := range options {\n\t\t\t\tret[ruleName][optionKey] = value\n\t\t\t}\n\t\t} else {\n\t\t\tret[ruleName] = options\n\t\t}\n\t}\n\n\treturn ret\n}\n\nvar (\n\tfileName = \".filelint.yml\"\n\tsearchPath = \".\"\n)\n\nfunc SearchConfigFile() (f string, ok bool, err error) {\n\tif f := filepath.Join(searchPath, fileName); lib.IsExist(f) {\n\t\treturn f, true, nil\n\t}\n\n\tif gitRoot, err := lib.FindGitRootPath(searchPath); err != nil {\n\t\tif err != lib.ErrNotGitRepository {\n\t\t\treturn \"\", false, err\n\t\t}\n\t} else {\n\t\tif f := filepath.Join(gitRoot, fileName); lib.IsExist(f) {\n\t\t\treturn f, true, nil\n\t\t}\n\t}\n\n\tif home, err := homedir.Dir(); err != nil {\n\t\treturn \"\", false, err\n\t} else {\n\t\tif f := filepath.Join(home, fileName); lib.IsExist(f) {\n\t\t\treturn f, true, nil\n\t\t}\n\t}\n\n\treturn \"\", false, nil\n}\n<commit_msg>Sort target files by name<commit_after>package config\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n\n\tyaml \"gopkg.in\/yaml.v2\"\n\n\t\"github.com\/fatih\/set\"\n\tzglob \"github.com\/mattn\/go-zglob\"\n\thomedir \"github.com\/mitchellh\/go-homedir\"\n\t\"github.com\/mohae\/deepcopy\"\n\t\"github.com\/synchro-food\/filelint\/lib\"\n)\n\ntype Config struct {\n\tFile File `yaml:\"files\"`\n\tTargets []Target `yaml:\"targets\"`\n}\n\nfunc NewConfig(configFile string) (*Config, error) {\n\tconf, err := NewDefaultConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tuserConfig := &Config{}\n\tsrc, err := ioutil.ReadFile(configFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := yaml.Unmarshal(src, &userConfig); err != nil {\n\t\treturn nil, err\n\t}\n\n\tconf.Merge(userConfig)\n\n\treturn conf, nil\n}\n\nfunc NewDefaultConfig() (*Config, error) {\n\tconf := &Config{}\n\tsrc, err := configDefaultYmlBytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := yaml.Unmarshal(src, &conf); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn conf, nil\n}\n\nfunc (src *Config) Merge(dst *Config) {\n\tif len(dst.File.Include) > 0 {\n\t\tsrc.File.Include = dst.File.Include\n\t}\n\tsrc.File.Exclude = append(src.File.Exclude, dst.File.Exclude...)\n\tsrc.Targets = append(src.Targets, dst.Targets...)\n}\n\nfunc (cfg *Config) MatchedRule(file string) RuleMap {\n\trm := make(RuleMap)\n\n\tfor _, t := range cfg.Targets {\n\t\tif match(file, t.Patterns) {\n\t\t\trm = rm.Merge(t.Rule)\n\t\t}\n\t}\n\n\treturn rm\n}\n\nfunc match(file string, patterns []string) bool {\n\tif len(patterns) == 0 {\n\t\treturn true\n\t}\n\n\tfor _, pattern := range patterns {\n\t\tok, err := zglob.Match(pattern, file)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tif ok {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\ntype File struct {\n\tInclude []string `yaml:\"include\"`\n\tExclude []string `yaml:\"exclude\"`\n}\n\nfunc (f File) FindTargets() ([]string, error) {\n\tf.Include = addGlobSignIfDir(f.Include...)\n\tf.Exclude = addGlobSignIfDir(f.Exclude...)\n\n\tinSet, err := expandGlob(f.Include)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\texSet, _ := expandGlob(f.Exclude)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfileSet := set.Difference(inSet, exSet)\n\tfiles := set.StringSlice(fileSet)\n\tfiles = lib.FindTextFiles(files)\n\n\tsort.Strings(files)\n\n\treturn files, nil\n}\n\nfunc expandGlob(files []string) (*set.Set, error) {\n\tfileSet := set.New()\n\n\tfor _, f := range files {\n\t\texpanded, err := zglob.Glob(f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, ex := range expanded {\n\t\t\tfileSet.Add(ex)\n\t\t}\n\t}\n\n\treturn fileSet, nil\n}\n\nfunc addGlobSignIfDir(files ...string) (dst []string) {\n\tdst = make([]string, 0, len(files))\n\n\tfor _, f := range files {\n\t\tif !strings.Contains(f, \"*\") {\n\t\t\tinfo, err := os.Stat(f)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif info.IsDir() {\n\t\t\t\tf = filepath.Join(f, \"**\", \"*\")\n\t\t\t}\n\t\t}\n\n\t\tdst = append(dst, f)\n\t}\n\n\treturn dst\n}\n\ntype Target struct {\n\tPatterns []string `yaml:\"patterns\"`\n\tRule RuleMap `yaml:\"rules\"`\n}\n\ntype RuleMap map[string]map[string]interface{}\n\nfunc (rm RuleMap) Merge(dst RuleMap) (ret RuleMap) {\n\tret = deepcopy.Copy(rm).(RuleMap)\n\n\tfor ruleName, options := range dst {\n\t\tif _, ok := ret[ruleName]; ok {\n\t\t\tfor optionKey, value := range options {\n\t\t\t\tret[ruleName][optionKey] = value\n\t\t\t}\n\t\t} else {\n\t\t\tret[ruleName] = options\n\t\t}\n\t}\n\n\treturn ret\n}\n\nvar (\n\tfileName = \".filelint.yml\"\n\tsearchPath = \".\"\n)\n\nfunc SearchConfigFile() (f string, ok bool, err error) {\n\tif f := filepath.Join(searchPath, fileName); lib.IsExist(f) {\n\t\treturn f, true, nil\n\t}\n\n\tif gitRoot, err := lib.FindGitRootPath(searchPath); err != nil {\n\t\tif err != lib.ErrNotGitRepository {\n\t\t\treturn \"\", false, err\n\t\t}\n\t} else {\n\t\tif f := filepath.Join(gitRoot, fileName); lib.IsExist(f) {\n\t\t\treturn f, true, nil\n\t\t}\n\t}\n\n\tif home, err := homedir.Dir(); err != nil {\n\t\treturn \"\", false, err\n\t} else {\n\t\tif f := filepath.Join(home, fileName); lib.IsExist(f) {\n\t\t\treturn f, true, nil\n\t\t}\n\t}\n\n\treturn \"\", false, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\npackage config\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/google\/syzkaller\/fileutil\"\n\t\"github.com\/google\/syzkaller\/sys\"\n\t\"github.com\/google\/syzkaller\/vm\"\n)\n\ntype Config struct {\n\tName string \/\/ Instance name (used for identification and as GCE instance prefix)\n\tHttp string \/\/ TCP address to serve HTTP stats page (e.g. \"localhost:50000\")\n\tRpc string \/\/ TCP address to serve RPC for fuzzer processes (optional, only useful for type \"none\")\n\tWorkdir string\n\tVmlinux string\n\tKernel string \/\/ e.g. arch\/x86\/boot\/bzImage\n\tTag string \/\/ arbitrary optional tag that is saved along with crash reports (e.g. kernel branch\/commit)\n\tCmdline string \/\/ kernel command line\n\tImage string \/\/ linux image for VMs\n\tInitrd string \/\/ linux initial ramdisk. (optional)\n\tCpu int \/\/ number of VM CPUs\n\tMem int \/\/ amount of VM memory in MBs\n\tSshkey string \/\/ root ssh key for the image\n\tBin string \/\/ qemu\/lkvm binary name\n\tBin_Args string \/\/ additional command line arguments for qemu\/lkvm binary\n\tDebug bool \/\/ dump all VM output to console\n\tOutput string \/\/ one of stdout\/dmesg\/file (useful only for local VM)\n\n\tHub_Addr string\n\tHub_Key string\n\n\tSyzkaller string \/\/ path to syzkaller checkout (syz-manager will look for binaries in bin subdir)\n\tType string \/\/ VM type (qemu, kvm, local)\n\tCount int \/\/ number of VMs (don't secify for adb, instead specify devices)\n\tDevices []string \/\/ device IDs for adb\n\tProcs int \/\/ number of parallel processes inside of every VM\n\n\tSandbox string \/\/ type of sandbox to use during fuzzing:\n\t\/\/ \"none\": don't do anything special (has false positives, e.g. due to killing init)\n\t\/\/ \"setuid\": impersonate into user nobody (65534), default\n\t\/\/ \"namespace\": create a new namespace for fuzzer using CLONE_NEWNS\/CLONE_NEWNET\/CLONE_NEWPID\/etc,\n\t\/\/\trequires building kernel with CONFIG_NAMESPACES, CONFIG_UTS_NS, CONFIG_USER_NS, CONFIG_PID_NS and CONFIG_NET_NS.\n\n\tMachine_Type string \/\/ GCE machine type (e.g. \"n1-highcpu-2\")\n\n\tCover bool \/\/ use kcov coverage (default: true)\n\tLeak bool \/\/ do memory leak checking\n\tReproduce bool \/\/ reproduce, localize and minimize crashers (on by default)\n\n\tEnable_Syscalls []string\n\tDisable_Syscalls []string\n\tSuppressions []string \/\/ don't save reports matching these regexps, but reboot VM after them\n\tIgnores []string \/\/ completely ignore reports matching these regexps (don't save nor reboot)\n\n\t\/\/ Implementation details beyond this point.\n\tParsedSuppressions []*regexp.Regexp `json:\"-\"`\n\tParsedIgnores []*regexp.Regexp `json:\"-\"`\n}\n\nfunc Parse(filename string) (*Config, map[int]bool, error) {\n\tif filename == \"\" {\n\t\treturn nil, nil, fmt.Errorf(\"supply config in -config flag\")\n\t}\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to read config file: %v\", err)\n\t}\n\treturn parse(data)\n}\n\nfunc parse(data []byte) (*Config, map[int]bool, error) {\n\tunknown, err := checkUnknownFields(data)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif unknown != \"\" {\n\t\treturn nil, nil, fmt.Errorf(\"unknown field '%v' in config\", unknown)\n\t}\n\tcfg := new(Config)\n\tcfg.Cover = true\n\tcfg.Reproduce = true\n\tcfg.Sandbox = \"setuid\"\n\tif err := json.Unmarshal(data, cfg); err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to parse config file: %v\", err)\n\t}\n\tif _, err := os.Stat(filepath.Join(cfg.Syzkaller, \"bin\/syz-fuzzer\")); err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"bad config syzkaller param: can't find bin\/syz-fuzzer\")\n\t}\n\tif _, err := os.Stat(filepath.Join(cfg.Syzkaller, \"bin\/syz-executor\")); err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"bad config syzkaller param: can't find bin\/syz-executor\")\n\t}\n\tif cfg.Http == \"\" {\n\t\treturn nil, nil, fmt.Errorf(\"config param http is empty\")\n\t}\n\tif cfg.Workdir == \"\" {\n\t\treturn nil, nil, fmt.Errorf(\"config param workdir is empty\")\n\t}\n\tif cfg.Vmlinux == \"\" {\n\t\treturn nil, nil, fmt.Errorf(\"config param vmlinux is empty\")\n\t}\n\tif cfg.Type == \"\" {\n\t\treturn nil, nil, fmt.Errorf(\"config param type is empty\")\n\t}\n\tswitch cfg.Type {\n\tcase \"none\":\n\t\tif cfg.Count != 0 {\n\t\t\treturn nil, nil, fmt.Errorf(\"invalid config param count: %v, type \\\"none\\\" does not support param count\", cfg.Count)\n\t\t}\n\t\tif cfg.Rpc == \"\" {\n\t\t\treturn nil, nil, fmt.Errorf(\"config param rpc is empty (required for type \\\"none\\\")\")\n\t\t}\n\t\tif len(cfg.Devices) != 0 {\n\t\t\treturn nil, nil, fmt.Errorf(\"type %v does not support devices param\", cfg.Type)\n\t\t}\n\tcase \"adb\":\n\t\tif cfg.Count != 0 {\n\t\t\treturn nil, nil, fmt.Errorf(\"don't specify count for adb, instead specify devices\")\n\t\t}\n\t\tif len(cfg.Devices) == 0 {\n\t\t\treturn nil, nil, fmt.Errorf(\"specify at least 1 adb device\")\n\t\t}\n\t\tcfg.Count = len(cfg.Devices)\n\tcase \"gce\":\n\t\tif cfg.Machine_Type == \"\" {\n\t\t\treturn nil, nil, fmt.Errorf(\"machine_type parameter is empty (required for gce)\")\n\t\t}\n\t\tfallthrough\n\tdefault:\n\t\tif cfg.Count <= 0 || cfg.Count > 1000 {\n\t\t\treturn nil, nil, fmt.Errorf(\"invalid config param count: %v, want (1, 1000]\", cfg.Count)\n\t\t}\n\t\tif len(cfg.Devices) != 0 {\n\t\t\treturn nil, nil, fmt.Errorf(\"type %v does not support devices param\", cfg.Type)\n\t\t}\n\t}\n\tif cfg.Rpc == \"\" {\n\t\tcfg.Rpc = \"localhost:0\"\n\t}\n\tif cfg.Procs <= 0 {\n\t\tcfg.Procs = 1\n\t}\n\tif cfg.Procs > 32 {\n\t\treturn nil, nil, fmt.Errorf(\"config param procs has higher value '%v' then the max supported 32\", cfg.Procs)\n\t}\n\tif cfg.Output == \"\" {\n\t\tif cfg.Type == \"local\" {\n\t\t\tcfg.Output = \"none\"\n\t\t} else {\n\t\t\tcfg.Output = \"stdout\"\n\t\t}\n\t}\n\tswitch cfg.Output {\n\tcase \"none\", \"stdout\", \"dmesg\", \"file\":\n\tdefault:\n\t\treturn nil, nil, fmt.Errorf(\"config param output must contain one of none\/stdout\/dmesg\/file\")\n\t}\n\tswitch cfg.Sandbox {\n\tcase \"none\", \"setuid\", \"namespace\":\n\tdefault:\n\t\treturn nil, nil, fmt.Errorf(\"config param sandbox must contain one of none\/setuid\/namespace\")\n\t}\n\n\tsyscalls, err := parseSyscalls(cfg)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif err := parseSuppressions(cfg); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn cfg, syscalls, nil\n}\n\nfunc parseSyscalls(cfg *Config) (map[int]bool, error) {\n\tmatch := func(call *sys.Call, str string) bool {\n\t\tif str == call.CallName || str == call.Name {\n\t\t\treturn true\n\t\t}\n\t\tif len(str) > 1 && str[len(str)-1] == '*' && strings.HasPrefix(call.Name, str[:len(str)-1]) {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tsyscalls := make(map[int]bool)\n\tif len(cfg.Enable_Syscalls) != 0 {\n\t\tfor _, c := range cfg.Enable_Syscalls {\n\t\t\tn := 0\n\t\t\tfor _, call := range sys.Calls {\n\t\t\t\tif match(call, c) {\n\t\t\t\t\tsyscalls[call.ID] = true\n\t\t\t\t\tn++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif n == 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"unknown enabled syscall: %v\", c)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfor _, call := range sys.Calls {\n\t\t\tsyscalls[call.ID] = true\n\t\t}\n\t}\n\tfor _, c := range cfg.Disable_Syscalls {\n\t\tn := 0\n\t\tfor _, call := range sys.Calls {\n\t\t\tif match(call, c) {\n\t\t\t\tdelete(syscalls, call.ID)\n\t\t\t\tn++\n\t\t\t}\n\t\t}\n\t\tif n == 0 {\n\t\t\treturn nil, fmt.Errorf(\"unknown disabled syscall: %v\", c)\n\t\t}\n\t}\n\t\/\/ mmap is used to allocate memory.\n\tsyscalls[sys.CallMap[\"mmap\"].ID] = true\n\n\treturn syscalls, nil\n}\n\nfunc parseSuppressions(cfg *Config) error {\n\t\/\/ Add some builtin suppressions.\n\tsupp := append(cfg.Suppressions, []string{\n\t\t\"panic: failed to start executor binary\",\n\t\t\"panic: executor failed: pthread_create failed\",\n\t\t\"panic: failed to create temp dir\",\n\t\t\"fatal error: runtime: out of memory\",\n\t\t\"fatal error: runtime: cannot allocate memory\",\n\t\t\"fatal error: unexpected signal during runtime execution\", \/\/ presubmably OOM turned into SIGBUS\n\t\t\"Out of memory: Kill process .* \\\\(syz-fuzzer\\\\)\",\n\t\t\"lowmemorykiller: Killing 'syz-fuzzer'\",\n\t\t\/\/\"INFO: lockdep is turned off\", \/\/ printed by some sysrq that dumps scheduler state, but also on all lockdep reports\n\t}...)\n\tfor _, s := range supp {\n\t\tre, err := regexp.Compile(s)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to compile suppression '%v': %v\", s, err)\n\t\t}\n\t\tcfg.ParsedSuppressions = append(cfg.ParsedSuppressions, re)\n\t}\n\tfor _, ignore := range cfg.Ignores {\n\t\tre, err := regexp.Compile(ignore)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to compile ignore '%v': %v\", ignore, err)\n\t\t}\n\t\tcfg.ParsedIgnores = append(cfg.ParsedIgnores, re)\n\t}\n\treturn nil\n}\n\nfunc CreateVMConfig(cfg *Config, index int) (*vm.Config, error) {\n\tif index < 0 || index >= cfg.Count {\n\t\treturn nil, fmt.Errorf(\"invalid VM index %v (count %v)\", index, cfg.Count)\n\t}\n\tworkdir, err := fileutil.ProcessTempDir(cfg.Workdir)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create instance temp dir: %v\", err)\n\t}\n\tvmCfg := &vm.Config{\n\t\tName: fmt.Sprintf(\"%v-%v-%v\", cfg.Type, cfg.Name, index),\n\t\tIndex: index,\n\t\tWorkdir: workdir,\n\t\tBin: cfg.Bin,\n\t\tBinArgs: cfg.Bin_Args,\n\t\tKernel: cfg.Kernel,\n\t\tCmdline: cfg.Cmdline,\n\t\tImage: cfg.Image,\n\t\tInitrd: cfg.Initrd,\n\t\tSshkey: cfg.Sshkey,\n\t\tExecutor: filepath.Join(cfg.Syzkaller, \"bin\", \"syz-executor\"),\n\t\tCpu: cfg.Cpu,\n\t\tMem: cfg.Mem,\n\t\tDebug: cfg.Debug,\n\t\tMachineType: cfg.Machine_Type,\n\t}\n\tif len(cfg.Devices) != 0 {\n\t\tvmCfg.Device = cfg.Devices[index]\n\t}\n\treturn vmCfg, nil\n}\n\nfunc checkUnknownFields(data []byte) (string, error) {\n\t\/\/ While https:\/\/github.com\/golang\/go\/issues\/15314 is not resolved\n\t\/\/ we don't have a better way than to enumerate all known fields.\n\tvar fields = []string{\n\t\t\"Name\",\n\t\t\"Http\",\n\t\t\"Rpc\",\n\t\t\"Workdir\",\n\t\t\"Vmlinux\",\n\t\t\"Kernel\",\n\t\t\"Tag\",\n\t\t\"Cmdline\",\n\t\t\"Image\",\n\t\t\"Cpu\",\n\t\t\"Mem\",\n\t\t\"Sshkey\",\n\t\t\"Bin\",\n\t\t\"Bin_Args\",\n\t\t\"Debug\",\n\t\t\"Output\",\n\t\t\"Hub_Addr\",\n\t\t\"Hub_Key\",\n\t\t\"Syzkaller\",\n\t\t\"Type\",\n\t\t\"Count\",\n\t\t\"Devices\",\n\t\t\"Procs\",\n\t\t\"Cover\",\n\t\t\"Reproduce\",\n\t\t\"Sandbox\",\n\t\t\"Leak\",\n\t\t\"Enable_Syscalls\",\n\t\t\"Disable_Syscalls\",\n\t\t\"Suppressions\",\n\t\t\"Ignores\",\n\t\t\"Initrd\",\n\t\t\"Machine_Type\",\n\t}\n\tf := make(map[string]interface{})\n\tif err := json.Unmarshal(data, &f); err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse config file: %v\", err)\n\t}\n\tfor k := range f {\n\t\tok := false\n\t\tfor _, k1 := range fields {\n\t\t\tif strings.ToLower(k) == strings.ToLower(k1) {\n\t\t\t\tok = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !ok {\n\t\t\treturn k, nil\n\t\t}\n\t}\n\treturn \"\", nil\n}\n<commit_msg>config: allow relative paths in configs<commit_after>\/\/ Copyright 2015 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\npackage config\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/google\/syzkaller\/fileutil\"\n\t\"github.com\/google\/syzkaller\/sys\"\n\t\"github.com\/google\/syzkaller\/vm\"\n)\n\ntype Config struct {\n\tName string \/\/ Instance name (used for identification and as GCE instance prefix)\n\tHttp string \/\/ TCP address to serve HTTP stats page (e.g. \"localhost:50000\")\n\tRpc string \/\/ TCP address to serve RPC for fuzzer processes (optional, only useful for type \"none\")\n\tWorkdir string\n\tVmlinux string\n\tKernel string \/\/ e.g. arch\/x86\/boot\/bzImage\n\tTag string \/\/ arbitrary optional tag that is saved along with crash reports (e.g. kernel branch\/commit)\n\tCmdline string \/\/ kernel command line\n\tImage string \/\/ linux image for VMs\n\tInitrd string \/\/ linux initial ramdisk. (optional)\n\tCpu int \/\/ number of VM CPUs\n\tMem int \/\/ amount of VM memory in MBs\n\tSshkey string \/\/ root ssh key for the image\n\tBin string \/\/ qemu\/lkvm binary name\n\tBin_Args string \/\/ additional command line arguments for qemu\/lkvm binary\n\tDebug bool \/\/ dump all VM output to console\n\tOutput string \/\/ one of stdout\/dmesg\/file (useful only for local VM)\n\n\tHub_Addr string\n\tHub_Key string\n\n\tSyzkaller string \/\/ path to syzkaller checkout (syz-manager will look for binaries in bin subdir)\n\tType string \/\/ VM type (qemu, kvm, local)\n\tCount int \/\/ number of VMs (don't secify for adb, instead specify devices)\n\tDevices []string \/\/ device IDs for adb\n\tProcs int \/\/ number of parallel processes inside of every VM\n\n\tSandbox string \/\/ type of sandbox to use during fuzzing:\n\t\/\/ \"none\": don't do anything special (has false positives, e.g. due to killing init)\n\t\/\/ \"setuid\": impersonate into user nobody (65534), default\n\t\/\/ \"namespace\": create a new namespace for fuzzer using CLONE_NEWNS\/CLONE_NEWNET\/CLONE_NEWPID\/etc,\n\t\/\/\trequires building kernel with CONFIG_NAMESPACES, CONFIG_UTS_NS, CONFIG_USER_NS, CONFIG_PID_NS and CONFIG_NET_NS.\n\n\tMachine_Type string \/\/ GCE machine type (e.g. \"n1-highcpu-2\")\n\n\tCover bool \/\/ use kcov coverage (default: true)\n\tLeak bool \/\/ do memory leak checking\n\tReproduce bool \/\/ reproduce, localize and minimize crashers (on by default)\n\n\tEnable_Syscalls []string\n\tDisable_Syscalls []string\n\tSuppressions []string \/\/ don't save reports matching these regexps, but reboot VM after them\n\tIgnores []string \/\/ completely ignore reports matching these regexps (don't save nor reboot)\n\n\t\/\/ Implementation details beyond this point.\n\tParsedSuppressions []*regexp.Regexp `json:\"-\"`\n\tParsedIgnores []*regexp.Regexp `json:\"-\"`\n}\n\nfunc Parse(filename string) (*Config, map[int]bool, error) {\n\tif filename == \"\" {\n\t\treturn nil, nil, fmt.Errorf(\"supply config in -config flag\")\n\t}\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to read config file: %v\", err)\n\t}\n\treturn parse(data)\n}\n\nfunc parse(data []byte) (*Config, map[int]bool, error) {\n\tunknown, err := checkUnknownFields(data)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif unknown != \"\" {\n\t\treturn nil, nil, fmt.Errorf(\"unknown field '%v' in config\", unknown)\n\t}\n\tcfg := new(Config)\n\tcfg.Cover = true\n\tcfg.Reproduce = true\n\tcfg.Sandbox = \"setuid\"\n\tif err := json.Unmarshal(data, cfg); err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to parse config file: %v\", err)\n\t}\n\tif _, err := os.Stat(filepath.Join(cfg.Syzkaller, \"bin\/syz-fuzzer\")); err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"bad config syzkaller param: can't find bin\/syz-fuzzer\")\n\t}\n\tif _, err := os.Stat(filepath.Join(cfg.Syzkaller, \"bin\/syz-executor\")); err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"bad config syzkaller param: can't find bin\/syz-executor\")\n\t}\n\tif cfg.Http == \"\" {\n\t\treturn nil, nil, fmt.Errorf(\"config param http is empty\")\n\t}\n\tif cfg.Workdir == \"\" {\n\t\treturn nil, nil, fmt.Errorf(\"config param workdir is empty\")\n\t}\n\tif cfg.Vmlinux == \"\" {\n\t\treturn nil, nil, fmt.Errorf(\"config param vmlinux is empty\")\n\t}\n\tif cfg.Type == \"\" {\n\t\treturn nil, nil, fmt.Errorf(\"config param type is empty\")\n\t}\n\tswitch cfg.Type {\n\tcase \"none\":\n\t\tif cfg.Count != 0 {\n\t\t\treturn nil, nil, fmt.Errorf(\"invalid config param count: %v, type \\\"none\\\" does not support param count\", cfg.Count)\n\t\t}\n\t\tif cfg.Rpc == \"\" {\n\t\t\treturn nil, nil, fmt.Errorf(\"config param rpc is empty (required for type \\\"none\\\")\")\n\t\t}\n\t\tif len(cfg.Devices) != 0 {\n\t\t\treturn nil, nil, fmt.Errorf(\"type %v does not support devices param\", cfg.Type)\n\t\t}\n\tcase \"adb\":\n\t\tif cfg.Count != 0 {\n\t\t\treturn nil, nil, fmt.Errorf(\"don't specify count for adb, instead specify devices\")\n\t\t}\n\t\tif len(cfg.Devices) == 0 {\n\t\t\treturn nil, nil, fmt.Errorf(\"specify at least 1 adb device\")\n\t\t}\n\t\tcfg.Count = len(cfg.Devices)\n\tcase \"gce\":\n\t\tif cfg.Machine_Type == \"\" {\n\t\t\treturn nil, nil, fmt.Errorf(\"machine_type parameter is empty (required for gce)\")\n\t\t}\n\t\tfallthrough\n\tdefault:\n\t\tif cfg.Count <= 0 || cfg.Count > 1000 {\n\t\t\treturn nil, nil, fmt.Errorf(\"invalid config param count: %v, want (1, 1000]\", cfg.Count)\n\t\t}\n\t\tif len(cfg.Devices) != 0 {\n\t\t\treturn nil, nil, fmt.Errorf(\"type %v does not support devices param\", cfg.Type)\n\t\t}\n\t}\n\tif cfg.Rpc == \"\" {\n\t\tcfg.Rpc = \"localhost:0\"\n\t}\n\tif cfg.Procs <= 0 {\n\t\tcfg.Procs = 1\n\t}\n\tif cfg.Procs > 32 {\n\t\treturn nil, nil, fmt.Errorf(\"config param procs has higher value '%v' then the max supported 32\", cfg.Procs)\n\t}\n\tif cfg.Output == \"\" {\n\t\tif cfg.Type == \"local\" {\n\t\t\tcfg.Output = \"none\"\n\t\t} else {\n\t\t\tcfg.Output = \"stdout\"\n\t\t}\n\t}\n\tswitch cfg.Output {\n\tcase \"none\", \"stdout\", \"dmesg\", \"file\":\n\tdefault:\n\t\treturn nil, nil, fmt.Errorf(\"config param output must contain one of none\/stdout\/dmesg\/file\")\n\t}\n\tswitch cfg.Sandbox {\n\tcase \"none\", \"setuid\", \"namespace\":\n\tdefault:\n\t\treturn nil, nil, fmt.Errorf(\"config param sandbox must contain one of none\/setuid\/namespace\")\n\t}\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to get wd: %v\", err)\n\t}\n\tabs := func(path string) string {\n\t\tif path != \"\" && !filepath.IsAbs(path) {\n\t\t\tpath = filepath.Join(wd, path)\n\t\t}\n\t\treturn path\n\t}\n\tif cfg.Image != \"9p\" {\n\t\tcfg.Image = abs(cfg.Image)\n\t}\n\tcfg.Workdir = abs(cfg.Workdir)\n\tcfg.Kernel = abs(cfg.Kernel)\n\tcfg.Vmlinux = abs(cfg.Vmlinux)\n\tcfg.Syzkaller = abs(cfg.Syzkaller)\n\tcfg.Initrd = abs(cfg.Initrd)\n\tcfg.Sshkey = abs(cfg.Sshkey)\n\tcfg.Bin = abs(cfg.Bin)\n\n\tsyscalls, err := parseSyscalls(cfg)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif err := parseSuppressions(cfg); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn cfg, syscalls, nil\n}\n\nfunc parseSyscalls(cfg *Config) (map[int]bool, error) {\n\tmatch := func(call *sys.Call, str string) bool {\n\t\tif str == call.CallName || str == call.Name {\n\t\t\treturn true\n\t\t}\n\t\tif len(str) > 1 && str[len(str)-1] == '*' && strings.HasPrefix(call.Name, str[:len(str)-1]) {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tsyscalls := make(map[int]bool)\n\tif len(cfg.Enable_Syscalls) != 0 {\n\t\tfor _, c := range cfg.Enable_Syscalls {\n\t\t\tn := 0\n\t\t\tfor _, call := range sys.Calls {\n\t\t\t\tif match(call, c) {\n\t\t\t\t\tsyscalls[call.ID] = true\n\t\t\t\t\tn++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif n == 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"unknown enabled syscall: %v\", c)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfor _, call := range sys.Calls {\n\t\t\tsyscalls[call.ID] = true\n\t\t}\n\t}\n\tfor _, c := range cfg.Disable_Syscalls {\n\t\tn := 0\n\t\tfor _, call := range sys.Calls {\n\t\t\tif match(call, c) {\n\t\t\t\tdelete(syscalls, call.ID)\n\t\t\t\tn++\n\t\t\t}\n\t\t}\n\t\tif n == 0 {\n\t\t\treturn nil, fmt.Errorf(\"unknown disabled syscall: %v\", c)\n\t\t}\n\t}\n\t\/\/ mmap is used to allocate memory.\n\tsyscalls[sys.CallMap[\"mmap\"].ID] = true\n\n\treturn syscalls, nil\n}\n\nfunc parseSuppressions(cfg *Config) error {\n\t\/\/ Add some builtin suppressions.\n\tsupp := append(cfg.Suppressions, []string{\n\t\t\"panic: failed to start executor binary\",\n\t\t\"panic: executor failed: pthread_create failed\",\n\t\t\"panic: failed to create temp dir\",\n\t\t\"fatal error: runtime: out of memory\",\n\t\t\"fatal error: runtime: cannot allocate memory\",\n\t\t\"fatal error: unexpected signal during runtime execution\", \/\/ presubmably OOM turned into SIGBUS\n\t\t\"Out of memory: Kill process .* \\\\(syz-fuzzer\\\\)\",\n\t\t\"lowmemorykiller: Killing 'syz-fuzzer'\",\n\t\t\/\/\"INFO: lockdep is turned off\", \/\/ printed by some sysrq that dumps scheduler state, but also on all lockdep reports\n\t}...)\n\tfor _, s := range supp {\n\t\tre, err := regexp.Compile(s)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to compile suppression '%v': %v\", s, err)\n\t\t}\n\t\tcfg.ParsedSuppressions = append(cfg.ParsedSuppressions, re)\n\t}\n\tfor _, ignore := range cfg.Ignores {\n\t\tre, err := regexp.Compile(ignore)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to compile ignore '%v': %v\", ignore, err)\n\t\t}\n\t\tcfg.ParsedIgnores = append(cfg.ParsedIgnores, re)\n\t}\n\treturn nil\n}\n\nfunc CreateVMConfig(cfg *Config, index int) (*vm.Config, error) {\n\tif index < 0 || index >= cfg.Count {\n\t\treturn nil, fmt.Errorf(\"invalid VM index %v (count %v)\", index, cfg.Count)\n\t}\n\tworkdir, err := fileutil.ProcessTempDir(cfg.Workdir)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create instance temp dir: %v\", err)\n\t}\n\tvmCfg := &vm.Config{\n\t\tName: fmt.Sprintf(\"%v-%v-%v\", cfg.Type, cfg.Name, index),\n\t\tIndex: index,\n\t\tWorkdir: workdir,\n\t\tBin: cfg.Bin,\n\t\tBinArgs: cfg.Bin_Args,\n\t\tKernel: cfg.Kernel,\n\t\tCmdline: cfg.Cmdline,\n\t\tImage: cfg.Image,\n\t\tInitrd: cfg.Initrd,\n\t\tSshkey: cfg.Sshkey,\n\t\tExecutor: filepath.Join(cfg.Syzkaller, \"bin\", \"syz-executor\"),\n\t\tCpu: cfg.Cpu,\n\t\tMem: cfg.Mem,\n\t\tDebug: cfg.Debug,\n\t\tMachineType: cfg.Machine_Type,\n\t}\n\tif len(cfg.Devices) != 0 {\n\t\tvmCfg.Device = cfg.Devices[index]\n\t}\n\treturn vmCfg, nil\n}\n\nfunc checkUnknownFields(data []byte) (string, error) {\n\t\/\/ While https:\/\/github.com\/golang\/go\/issues\/15314 is not resolved\n\t\/\/ we don't have a better way than to enumerate all known fields.\n\tvar fields = []string{\n\t\t\"Name\",\n\t\t\"Http\",\n\t\t\"Rpc\",\n\t\t\"Workdir\",\n\t\t\"Vmlinux\",\n\t\t\"Kernel\",\n\t\t\"Tag\",\n\t\t\"Cmdline\",\n\t\t\"Image\",\n\t\t\"Cpu\",\n\t\t\"Mem\",\n\t\t\"Sshkey\",\n\t\t\"Bin\",\n\t\t\"Bin_Args\",\n\t\t\"Debug\",\n\t\t\"Output\",\n\t\t\"Hub_Addr\",\n\t\t\"Hub_Key\",\n\t\t\"Syzkaller\",\n\t\t\"Type\",\n\t\t\"Count\",\n\t\t\"Devices\",\n\t\t\"Procs\",\n\t\t\"Cover\",\n\t\t\"Reproduce\",\n\t\t\"Sandbox\",\n\t\t\"Leak\",\n\t\t\"Enable_Syscalls\",\n\t\t\"Disable_Syscalls\",\n\t\t\"Suppressions\",\n\t\t\"Ignores\",\n\t\t\"Initrd\",\n\t\t\"Machine_Type\",\n\t}\n\tf := make(map[string]interface{})\n\tif err := json.Unmarshal(data, &f); err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse config file: %v\", err)\n\t}\n\tfor k := range f {\n\t\tok := false\n\t\tfor _, k1 := range fields {\n\t\t\tif strings.ToLower(k) == strings.ToLower(k1) {\n\t\t\t\tok = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !ok {\n\t\t\treturn k, nil\n\t\t}\n\t}\n\treturn \"\", nil\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"code.cloudfoundry.org\/bytefmt\"\n\n\tyaml \"gopkg.in\/yaml.v2\"\n)\n\ntype Config struct {\n\tBuildpacks BlobstoreConfig\n\tDroplets BlobstoreConfig\n\tPackages BlobstoreConfig\n\tAppStash BlobstoreConfig `yaml:\"app_stash\"`\n\n\t\/\/ BuildpackCache is a Pseudo blobstore, because in reality it is using the Droplets blobstore.\n\t\/\/ However, we want to be able to control its max_body_size.\n\tBuildpackCache BlobstoreConfig `yaml:\"buildpack_cache\"`\n\n\tLogging LoggingConfig\n\tPublicEndpoint string `yaml:\"public_endpoint\"`\n\tPrivateEndpoint string `yaml:\"private_endpoint\"`\n\tSecret string\n\tPort int\n\tSigningUsers []Credential `yaml:\"signing_users\"`\n\tMaxBodySize string `yaml:\"max_body_size\"`\n\tCertFile string `yaml:\"cert_file\"`\n\tKeyFile string `yaml:\"key_file\"`\n\n\tCCUpdater *CCUpdaterConfig `yaml:\"cc_updater\"`\n}\n\nfunc (config *Config) PublicEndpointUrl() *url.URL {\n\tu, e := url.Parse(config.PublicEndpoint)\n\tif e != nil {\n\t\tpanic(\"Unexpected error: \" + e.Error())\n\t}\n\treturn u\n}\n\nfunc (config *Config) PrivateEndpointUrl() *url.URL {\n\tu, e := url.Parse(config.PrivateEndpoint)\n\tif e != nil {\n\t\tpanic(\"Unexpected error: \" + e.Error())\n\t}\n\treturn u\n}\n\ntype BlobstoreConfig struct {\n\tBlobstoreType string `yaml:\"blobstore_type\"`\n\tLocalConfig *LocalBlobstoreConfig `yaml:\"local_config\"`\n\tS3Config *S3BlobstoreConfig `yaml:\"s3_config\"`\n\tGCPConfig *GCPBlobstoreConfig `yaml:\"gcp_config\"`\n\tAzureConfig *AzureBlobstoreConfig `yaml:\"azure_config\"`\n\tOpenstackConfig *OpenstackBlobstoreConfig `yaml:\"openstack_config\"`\n\tWebdavConfig *WebdavBlobstoreConfig `yaml:\"webdav_config\"`\n\tMaxBodySize string `yaml:\"max_body_size\"`\n\tGlobalMaxBodySize string \/\/ Not to be set by yaml\n}\n\nfunc (config *BlobstoreConfig) MaxBodySizeBytes() uint64 {\n\tif config.MaxBodySize == \"\" {\n\t\tif config.GlobalMaxBodySize == \"\" {\n\t\t\treturn 0\n\t\t}\n\t\tbytes, e := bytefmt.ToBytes(config.GlobalMaxBodySize)\n\t\tif e != nil {\n\t\t\tpanic(\"Unexpected error: \" + e.Error())\n\t\t}\n\t\treturn bytes\n\t}\n\tbytes, e := bytefmt.ToBytes(config.MaxBodySize)\n\tif e != nil {\n\t\tpanic(\"Unexpected error: \" + e.Error())\n\t}\n\treturn bytes\n}\n\ntype LocalBlobstoreConfig struct {\n\tPathPrefix string `yaml:\"path_prefix\"`\n}\n\ntype S3BlobstoreConfig struct {\n\tBucket string\n\tAccessKeyID string `yaml:\"access_key_id\"`\n\tSecretAccessKey string `yaml:\"secret_access_key\"`\n\tRegion string\n\tHost string\n}\n\ntype GCPBlobstoreConfig struct {\n\tBucket string\n\tPrivateKeyID string `yaml:\"private_key_id\"`\n\tPrivateKey string `yaml:\"private_key\"`\n\tEmail string\n\tTokenURL string `yaml:\"token_url\"`\n}\n\ntype AzureBlobstoreConfig struct {\n\tContainerName string `yaml:\"container_name\"`\n\tAccountName string `yaml:\"account_name\"`\n\tAccountKey string `yaml:\"account_key\"`\n}\n\ntype OpenstackBlobstoreConfig struct {\n\tContainerName string `yaml:\"container_name\"`\n\tDomainName string `yaml:\"domain_name\"`\n\tDomainId string `yaml:\"domain_id\"`\n\tUsername string\n\tApiKey string `yaml:\"api_key\"`\n\tAuthURL string `yaml:\"auth_url\"`\n\tRegion string\n\tAuthVersion int `yaml:\"auth_version\"`\n\tInternal bool \/\/ Set this to true to use the the internal \/ service network\n\tTenant string \/\/ Name of the tenant (v2,v3 auth only)\n\tTenantId string `yaml:\"tenant_id\"` \/\/ Id of the tenant (v2,v3 auth only)\n\tEndpointType string `yaml:\"endpoint_type\"` \/\/ Endpoint type (v2,v3 auth only) (default is public URL unless Internal is set)\n\tTenantDomain string `yaml:\"tenant_domain\"` \/\/ Name of the tenant's domain (v3 auth only), only needed if it differs from the user domain\n\tTenantDomainId string `yaml:\"tenant_domain_id\"` \/\/ Id of the tenant's domain (v3 auth only), only needed if it differs the from user domain\n\tTrustId string `yaml:\"trust_id\"` \/\/ Id of the trust (v3 auth only)\n\n\tAccountMetaTempURLKey string `yaml:\"account_meta_temp_url_key\"` \/\/ used as secret for signed URLs\n}\n\ntype WebdavBlobstoreConfig struct {\n\tPrivateEndpoint string `yaml:\"private_endpoint\"`\n\tPublicEndpoint string `yaml:\"public_endpoint\"`\n\tCACertPath string `yaml:\"ca_cert_path\"`\n\tSkipCertVerify bool `yaml:\"skip_cert_verify\"`\n\tUsername string\n\tPassword string\n}\n\nfunc (config WebdavBlobstoreConfig) CACert() string {\n\tcaCert, e := ioutil.ReadFile(config.CACertPath)\n\tif e != nil {\n\t\tpanic(errors.Wrapf(e, \"Error while reading CA cert file \\\"%v\\\"\", config.CACertPath))\n\t}\n\treturn string(caCert)\n}\n\ntype Credential struct {\n\tUsername string\n\tPassword string\n}\n\ntype LoggingConfig struct {\n\tLevel string\n}\n\ntype CCUpdaterConfig struct {\n\tEndpoint string\n\tMethod string\n\tClientCertFile string `yaml:\"client_cert_file\"`\n\tClientKeyFile string `yaml:\"client_key_file\"`\n\tCACertFile string `yaml:\"ca_cert_file\"`\n}\n\nfunc LoadConfig(filename string) (config Config, err error) {\n\tfile, e := os.Open(filename)\n\tif e != nil {\n\t\treturn Config{}, errors.New(\"error opening config. Caused by: \" + e.Error())\n\t}\n\tdefer file.Close()\n\tcontent, e := ioutil.ReadAll(file)\n\tif e != nil {\n\t\treturn Config{}, errors.New(\"error reading config. Caused by: \" + e.Error())\n\t}\n\te = yaml.Unmarshal(content, &config)\n\tif e != nil {\n\t\treturn Config{}, errors.New(\"error parsing config. Caused by: \" + e.Error())\n\t}\n\tconfig.Droplets.GlobalMaxBodySize = config.MaxBodySize\n\tconfig.Packages.GlobalMaxBodySize = config.MaxBodySize\n\tconfig.AppStash.GlobalMaxBodySize = config.MaxBodySize\n\tconfig.Buildpacks.GlobalMaxBodySize = config.MaxBodySize\n\tconfig.BuildpackCache.GlobalMaxBodySize = config.MaxBodySize\n\n\tvar errs []string\n\n\tif config.BuildpackCache.AzureConfig != nil ||\n\t\tconfig.BuildpackCache.GCPConfig != nil ||\n\t\tconfig.BuildpackCache.LocalConfig != nil ||\n\t\tconfig.BuildpackCache.OpenstackConfig != nil ||\n\t\tconfig.BuildpackCache.S3Config != nil ||\n\t\tconfig.BuildpackCache.WebdavConfig != nil {\n\t\terrs = append(errs, \"buildpack_cache must not have a blobstore configured, as it only exists to allow to configure max_body_size. \"+\n\t\t\t\"As blobstore, the droplet blobstore is used.\")\n\t}\n\tif config.Port == 0 {\n\t\terrs = append(errs, \"port must be an integer > 0\")\n\t}\n\tif config.PublicEndpoint == \"\" {\n\t\terrs = append(errs, \"public_endpoint must not be empty\")\n\t} else {\n\t\tpublicEndpoint, e := url.Parse(config.PublicEndpoint)\n\t\tif e != nil {\n\t\t\terrs = append(errs, \"public_endpoint is invalid. Caused by:\"+e.Error())\n\t\t} else {\n\t\t\tif publicEndpoint.Host == \"\" {\n\t\t\t\terrs = append(errs, \"public_endpoint host must not be empty\")\n\t\t\t}\n\t\t\tif publicEndpoint.Scheme != \"https\" {\n\t\t\t\terrs = append(errs, \"public_endpoint must use https:\/\/\")\n\t\t\t}\n\t\t}\n\t}\n\tif config.PrivateEndpoint == \"\" {\n\t\terrs = append(errs, \"private_endpoint must not be empty\")\n\t} else {\n\t\tprivateEndpoint, e := url.Parse(config.PrivateEndpoint)\n\t\tif e != nil {\n\t\t\terrs = append(errs, \"private_endpoint is invalid. Caused by:\"+e.Error())\n\t\t} else {\n\t\t\tif privateEndpoint.Host == \"\" {\n\t\t\t\terrs = append(errs, \"private_endpoint host must not be empty\")\n\t\t\t}\n\t\t\tif privateEndpoint.Scheme != \"https\" {\n\t\t\t\terrs = append(errs, \"private_endpoint must use https:\/\/\")\n\t\t\t}\n\t\t}\n\t}\n\tif config.CertFile == \"\" {\n\t\terrs = append(errs, \"cert_file must not be empty\")\n\t}\n\tif config.KeyFile == \"\" {\n\t\terrs = append(errs, \"key_file must not be empty\")\n\t}\n\tif config.MaxBodySize != \"\" {\n\t\t_, e = bytefmt.ToBytes(config.MaxBodySize)\n\t\tif e != nil {\n\t\t\terrs = append(errs, \"max_body_size is invalid. Caused by: \"+e.Error())\n\t\t}\n\t}\n\n\tif config.CCUpdater != nil {\n\t\tconfig.CCUpdater.Method = \"PATCH\"\n\t\tu, e := url.Parse(config.CCUpdater.Endpoint)\n\t\tif e != nil {\n\t\t\terrs = append(errs, \"cc_updater.endpoint is invalid. Caused by:\"+e.Error())\n\t\t} else if u.Host == \"\" {\n\t\t\terrs = append(errs, \"cc_updater.endpoint host must not be empty\")\n\t\t}\n\t}\n\n\t\/\/ TODO validate CACertsPaths\n\tif len(errs) > 0 {\n\t\treturn Config{}, errors.New(\"error in config values: \" + strings.Join(errs, \"; \"))\n\t}\n\treturn\n}\n<commit_msg>Fix config unmarshalling<commit_after>package config\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"code.cloudfoundry.org\/bytefmt\"\n\n\tyaml \"gopkg.in\/yaml.v2\"\n)\n\ntype Config struct {\n\tBuildpacks BlobstoreConfig\n\tDroplets BlobstoreConfig\n\tPackages BlobstoreConfig\n\tAppStash BlobstoreConfig `yaml:\"app_stash\"`\n\n\t\/\/ BuildpackCache is a Pseudo blobstore, because in reality it is using the Droplets blobstore.\n\t\/\/ However, we want to be able to control its max_body_size.\n\tBuildpackCache BlobstoreConfig `yaml:\"buildpack_cache\"`\n\n\tLogging LoggingConfig\n\tPublicEndpoint string `yaml:\"public_endpoint\"`\n\tPrivateEndpoint string `yaml:\"private_endpoint\"`\n\tSecret string\n\tPort int\n\tSigningUsers []Credential `yaml:\"signing_users\"`\n\tMaxBodySize string `yaml:\"max_body_size\"`\n\tCertFile string `yaml:\"cert_file\"`\n\tKeyFile string `yaml:\"key_file\"`\n\n\tCCUpdater *CCUpdaterConfig `yaml:\"cc_updater\"`\n}\n\nfunc (config *Config) PublicEndpointUrl() *url.URL {\n\tu, e := url.Parse(config.PublicEndpoint)\n\tif e != nil {\n\t\tpanic(\"Unexpected error: \" + e.Error())\n\t}\n\treturn u\n}\n\nfunc (config *Config) PrivateEndpointUrl() *url.URL {\n\tu, e := url.Parse(config.PrivateEndpoint)\n\tif e != nil {\n\t\tpanic(\"Unexpected error: \" + e.Error())\n\t}\n\treturn u\n}\n\ntype BlobstoreConfig struct {\n\tBlobstoreType string `yaml:\"blobstore_type\"`\n\tLocalConfig *LocalBlobstoreConfig `yaml:\"local_config\"`\n\tS3Config *S3BlobstoreConfig `yaml:\"s3_config\"`\n\tGCPConfig *GCPBlobstoreConfig `yaml:\"gcp_config\"`\n\tAzureConfig *AzureBlobstoreConfig `yaml:\"azure_config\"`\n\tOpenstackConfig *OpenstackBlobstoreConfig `yaml:\"openstack_config\"`\n\tWebdavConfig *WebdavBlobstoreConfig `yaml:\"webdav_config\"`\n\tMaxBodySize string `yaml:\"max_body_size\"`\n\tGlobalMaxBodySize string \/\/ Not to be set by yaml\n}\n\nfunc (config *BlobstoreConfig) MaxBodySizeBytes() uint64 {\n\tif config.MaxBodySize == \"\" {\n\t\tif config.GlobalMaxBodySize == \"\" {\n\t\t\treturn 0\n\t\t}\n\t\tbytes, e := bytefmt.ToBytes(config.GlobalMaxBodySize)\n\t\tif e != nil {\n\t\t\tpanic(\"Unexpected error: \" + e.Error())\n\t\t}\n\t\treturn bytes\n\t}\n\tbytes, e := bytefmt.ToBytes(config.MaxBodySize)\n\tif e != nil {\n\t\tpanic(\"Unexpected error: \" + e.Error())\n\t}\n\treturn bytes\n}\n\ntype LocalBlobstoreConfig struct {\n\tPathPrefix string `yaml:\"path_prefix\"`\n}\n\ntype S3BlobstoreConfig struct {\n\tBucket string\n\tAccessKeyID string `yaml:\"access_key_id\"`\n\tSecretAccessKey string `yaml:\"secret_access_key\"`\n\tRegion string\n\tHost string `yaml:\",omitempty\"`\n}\n\ntype GCPBlobstoreConfig struct {\n\tBucket string\n\tPrivateKeyID string `yaml:\"private_key_id\"`\n\tPrivateKey string `yaml:\"private_key\"`\n\tEmail string\n\tTokenURL string `yaml:\"token_url\"`\n}\n\ntype AzureBlobstoreConfig struct {\n\tContainerName string `yaml:\"container_name\"`\n\tAccountName string `yaml:\"account_name\"`\n\tAccountKey string `yaml:\"account_key\"`\n}\n\ntype OpenstackBlobstoreConfig struct {\n\tContainerName string `yaml:\"container_name\"`\n\tDomainName string `yaml:\"domain_name\"`\n\tDomainId string `yaml:\"domain_id\"`\n\tUsername string\n\tApiKey string `yaml:\"api_key\"`\n\tAuthURL string `yaml:\"auth_url\"`\n\tRegion string\n\tAuthVersion int `yaml:\"auth_version\"`\n\tInternal bool \/\/ Set this to true to use the the internal \/ service network\n\tTenant string \/\/ Name of the tenant (v2,v3 auth only)\n\tTenantId string `yaml:\"tenant_id\"` \/\/ Id of the tenant (v2,v3 auth only)\n\tEndpointType string `yaml:\"endpoint_type\"` \/\/ Endpoint type (v2,v3 auth only) (default is public URL unless Internal is set)\n\tTenantDomain string `yaml:\"tenant_domain\"` \/\/ Name of the tenant's domain (v3 auth only), only needed if it differs from the user domain\n\tTenantDomainId string `yaml:\"tenant_domain_id\"` \/\/ Id of the tenant's domain (v3 auth only), only needed if it differs the from user domain\n\tTrustId string `yaml:\"trust_id\"` \/\/ Id of the trust (v3 auth only)\n\n\tAccountMetaTempURLKey string `yaml:\"account_meta_temp_url_key\"` \/\/ used as secret for signed URLs\n}\n\ntype WebdavBlobstoreConfig struct {\n\tPrivateEndpoint string `yaml:\"private_endpoint\"`\n\tPublicEndpoint string `yaml:\"public_endpoint\"`\n\tCACertPath string `yaml:\"ca_cert_path\"`\n\tSkipCertVerify bool `yaml:\"skip_cert_verify\"`\n\tUsername string\n\tPassword string\n}\n\nfunc (config WebdavBlobstoreConfig) CACert() string {\n\tcaCert, e := ioutil.ReadFile(config.CACertPath)\n\tif e != nil {\n\t\tpanic(errors.Wrapf(e, \"Error while reading CA cert file \\\"%v\\\"\", config.CACertPath))\n\t}\n\treturn string(caCert)\n}\n\ntype Credential struct {\n\tUsername string\n\tPassword string\n}\n\ntype LoggingConfig struct {\n\tLevel string\n}\n\ntype CCUpdaterConfig struct {\n\tEndpoint string\n\tMethod string\n\tClientCertFile string `yaml:\"client_cert_file\"`\n\tClientKeyFile string `yaml:\"client_key_file\"`\n\tCACertFile string `yaml:\"ca_cert_file\"`\n}\n\nfunc LoadConfig(filename string) (config Config, err error) {\n\tfile, e := os.Open(filename)\n\tif e != nil {\n\t\treturn Config{}, errors.New(\"error opening config. Caused by: \" + e.Error())\n\t}\n\tdefer file.Close()\n\tcontent, e := ioutil.ReadAll(file)\n\tif e != nil {\n\t\treturn Config{}, errors.New(\"error reading config. Caused by: \" + e.Error())\n\t}\n\te = yaml.Unmarshal(content, &config)\n\tif e != nil {\n\t\treturn Config{}, errors.New(\"error parsing config. Caused by: \" + e.Error())\n\t}\n\tconfig.Droplets.GlobalMaxBodySize = config.MaxBodySize\n\tconfig.Packages.GlobalMaxBodySize = config.MaxBodySize\n\tconfig.AppStash.GlobalMaxBodySize = config.MaxBodySize\n\tconfig.Buildpacks.GlobalMaxBodySize = config.MaxBodySize\n\tconfig.BuildpackCache.GlobalMaxBodySize = config.MaxBodySize\n\n\tvar errs []string\n\n\tif config.BuildpackCache.AzureConfig != nil ||\n\t\tconfig.BuildpackCache.GCPConfig != nil ||\n\t\tconfig.BuildpackCache.LocalConfig != nil ||\n\t\tconfig.BuildpackCache.OpenstackConfig != nil ||\n\t\tconfig.BuildpackCache.S3Config != nil ||\n\t\tconfig.BuildpackCache.WebdavConfig != nil {\n\t\terrs = append(errs, \"buildpack_cache must not have a blobstore configured, as it only exists to allow to configure max_body_size. \"+\n\t\t\t\"As blobstore, the droplet blobstore is used.\")\n\t}\n\tif config.Port == 0 {\n\t\terrs = append(errs, \"port must be an integer > 0\")\n\t}\n\tif config.PublicEndpoint == \"\" {\n\t\terrs = append(errs, \"public_endpoint must not be empty\")\n\t} else {\n\t\tpublicEndpoint, e := url.Parse(config.PublicEndpoint)\n\t\tif e != nil {\n\t\t\terrs = append(errs, \"public_endpoint is invalid. Caused by:\"+e.Error())\n\t\t} else {\n\t\t\tif publicEndpoint.Host == \"\" {\n\t\t\t\terrs = append(errs, \"public_endpoint host must not be empty\")\n\t\t\t}\n\t\t\tif publicEndpoint.Scheme != \"https\" {\n\t\t\t\terrs = append(errs, \"public_endpoint must use https:\/\/\")\n\t\t\t}\n\t\t}\n\t}\n\tif config.PrivateEndpoint == \"\" {\n\t\terrs = append(errs, \"private_endpoint must not be empty\")\n\t} else {\n\t\tprivateEndpoint, e := url.Parse(config.PrivateEndpoint)\n\t\tif e != nil {\n\t\t\terrs = append(errs, \"private_endpoint is invalid. Caused by:\"+e.Error())\n\t\t} else {\n\t\t\tif privateEndpoint.Host == \"\" {\n\t\t\t\terrs = append(errs, \"private_endpoint host must not be empty\")\n\t\t\t}\n\t\t\tif privateEndpoint.Scheme != \"https\" {\n\t\t\t\terrs = append(errs, \"private_endpoint must use https:\/\/\")\n\t\t\t}\n\t\t}\n\t}\n\tif config.CertFile == \"\" {\n\t\terrs = append(errs, \"cert_file must not be empty\")\n\t}\n\tif config.KeyFile == \"\" {\n\t\terrs = append(errs, \"key_file must not be empty\")\n\t}\n\tif config.MaxBodySize != \"\" {\n\t\t_, e = bytefmt.ToBytes(config.MaxBodySize)\n\t\tif e != nil {\n\t\t\terrs = append(errs, \"max_body_size is invalid. Caused by: \"+e.Error())\n\t\t}\n\t}\n\n\tif config.CCUpdater != nil {\n\t\tconfig.CCUpdater.Method = \"PATCH\"\n\t\tu, e := url.Parse(config.CCUpdater.Endpoint)\n\t\tif e != nil {\n\t\t\terrs = append(errs, \"cc_updater.endpoint is invalid. Caused by:\"+e.Error())\n\t\t} else if u.Host == \"\" {\n\t\t\terrs = append(errs, \"cc_updater.endpoint host must not be empty\")\n\t\t}\n\t}\n\n\t\/\/ TODO validate CACertsPaths\n\tif len(errs) > 0 {\n\t\treturn Config{}, errors.New(\"error in config values: \" + strings.Join(errs, \"; \"))\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/UnnoTed\/fileb0x\/compression\"\n\t\"github.com\/UnnoTed\/fileb0x\/custom\"\n\t\"github.com\/UnnoTed\/fileb0x\/updater\"\n)\n\n\/\/ Config holds the json\/yaml\/toml data\ntype Config struct {\n\tDest string\n\tPkg string\n\tFmt bool \/\/ gofmt\n\tCompression *compression.Options\n\tTags string\n\n\tOutput string\n\n\tCustom []custom.Custom\n\n\tSpread bool\n\tUnexported bool\n\tClean bool\n\tDebug bool\n\tUpdater updater.Config\n}\n\n\/\/ Defaults set the default value for some variables\nfunc (cfg *Config) Defaults() error {\n\t\/\/ default destination\n\tif cfg.Dest == \"\" {\n\t\tcfg.Dest = \"\/\"\n\t}\n\n\t\/\/ insert \"\/\" at end of dest when it's not found\n\tif !strings.HasSuffix(cfg.Dest, \"\/\") {\n\t\tcfg.Dest += \"\/\"\n\t}\n\n\t\/\/ default file name\n\tif cfg.Output == \"\" {\n\t\tcfg.Output = \"b0x.go\"\n\t}\n\n\t\/\/ inserts .go at the end of file name\n\tif !strings.HasSuffix(cfg.Output, \".go\") {\n\t\tcfg.Output += \".go\"\n\t}\n\n\t\/\/ inserts an A before the output file's name so it can\n\t\/\/ run init() before b0xfile's\n\tif !strings.HasPrefix(cfg.Output, \"a\") {\n\t\tcfg.Output = \"a\" + cfg.Output\n\t}\n\n\t\/\/ default package\n\tif cfg.Pkg == \"\" {\n\t\tcfg.Pkg = \"main\"\n\t}\n\n\t\/\/ remove b0xfiles when [clean] is true\n\t\/\/ it doesn't clean destination's folders\n\tif cfg.Clean {\n\t\tmatches, err := filepath.Glob(cfg.Dest + \"b0xfile_*.go\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ remove matched file\n\t\tfor _, f := range matches {\n\t\t\terr = os.Remove(f)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>make sure cfg.Compression is populated<commit_after>package config\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/UnnoTed\/fileb0x\/compression\"\n\t\"github.com\/UnnoTed\/fileb0x\/custom\"\n\t\"github.com\/UnnoTed\/fileb0x\/updater\"\n)\n\n\/\/ Config holds the json\/yaml\/toml data\ntype Config struct {\n\tDest string\n\tPkg string\n\tFmt bool \/\/ gofmt\n\tCompression *compression.Options\n\tTags string\n\n\tOutput string\n\n\tCustom []custom.Custom\n\n\tSpread bool\n\tUnexported bool\n\tClean bool\n\tDebug bool\n\tUpdater updater.Config\n}\n\n\/\/ Defaults set the default value for some variables\nfunc (cfg *Config) Defaults() error {\n\t\/\/ default destination\n\tif cfg.Dest == \"\" {\n\t\tcfg.Dest = \"\/\"\n\t}\n\n\t\/\/ insert \"\/\" at end of dest when it's not found\n\tif !strings.HasSuffix(cfg.Dest, \"\/\") {\n\t\tcfg.Dest += \"\/\"\n\t}\n\n\t\/\/ default file name\n\tif cfg.Output == \"\" {\n\t\tcfg.Output = \"b0x.go\"\n\t}\n\n\t\/\/ inserts .go at the end of file name\n\tif !strings.HasSuffix(cfg.Output, \".go\") {\n\t\tcfg.Output += \".go\"\n\t}\n\n\t\/\/ inserts an A before the output file's name so it can\n\t\/\/ run init() before b0xfile's\n\tif !strings.HasPrefix(cfg.Output, \"a\") {\n\t\tcfg.Output = \"a\" + cfg.Output\n\t}\n\n\t\/\/ default package\n\tif cfg.Pkg == \"\" {\n\t\tcfg.Pkg = \"main\"\n\t}\n\n\t\/\/ remove b0xfiles when [clean] is true\n\t\/\/ it doesn't clean destination's folders\n\tif cfg.Clean {\n\t\tmatches, err := filepath.Glob(cfg.Dest + \"b0xfile_*.go\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ remove matched file\n\t\tfor _, f := range matches {\n\t\t\terr = os.Remove(f)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif cfg.Compression == nil {\n\t\tcfg.Compression = &compression.Options{\n\t\t\tCompress: false,\n\t\t\tMethod: \"DefaultCompression\",\n\t\t\tKeep: false,\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package infrastructure\n\nimport (\n\t\"database\/sql\"\n\n\t\"github.com\/GerardSoleCa\/PubKeyManager\/interfaces\"\n\t_ \"github.com\/xeodou\/go-sqlcipher\"\n)\n\ntype SqliteHandler struct {\n\tConn *sql.DB\n}\n\nfunc (handler *SqliteHandler) Execute(statement string, args ...interface{}) (interfaces.Result, error) {\n\tresult, err := handler.Conn.Exec(statement, args...)\n\tif err != nil {\n\t\treturn new(SqliteResult), err\n\t}\n\tr := new(SqliteResult)\n\tr.Result = result\n\treturn r, nil\n}\n\nfunc (handler *SqliteHandler) Query(statement string, args ...interface{}) (interfaces.Row, error) {\n\trows, err := handler.Conn.Query(statement, args...)\n\tif err != nil {\n\t\treturn new(SqliteRow), err\n\t}\n\trow := new(SqliteRow)\n\trow.Rows = rows\n\treturn row, nil\n}\n\ntype SqliteResult struct {\n\tResult sql.Result\n}\n\nfunc (r SqliteResult) LastInsertId() (int64, error) {\n\treturn r.Result.LastInsertId()\n}\n\nfunc (r SqliteResult) RowsAffected() (int64, error) {\n\treturn r.Result.RowsAffected()\n}\n\ntype SqliteRow struct {\n\tRows *sql.Rows\n}\n\nfunc (r SqliteRow) Scan(dest ...interface{}) {\n\tr.Rows.Scan(dest...)\n}\n\nfunc (r SqliteRow) Next() bool {\n\treturn r.Rows.Next()\n}\nfunc (r SqliteRow) Close() {\n\tr.Rows.Close()\n}\n\nfunc NewSqliteHandler(dbfileName string, password string) *SqliteHandler {\n\tconn, _ := sql.Open(\"sqlite3\", dbfileName)\n\n\tp := \"PRAGMA key = '\" + password + \"';\"\n\t_, err := conn.Exec(p)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tsqliteHandler := new(SqliteHandler)\n\tsqliteHandler.Conn = conn\n\treturn sqliteHandler\n}\n<commit_msg>Adding last docs<commit_after>package infrastructure\n\nimport (\n\t\"database\/sql\"\n\n\t\"github.com\/GerardSoleCa\/PubKeyManager\/interfaces\"\n\t\/\/ Blank import for go-sqlcipher\n\t_ \"github.com\/xeodou\/go-sqlcipher\"\n)\n\n\/\/ SqliteHandler struct\ntype SqliteHandler struct {\n\tConn *sql.DB\n}\n\n\/\/ Execute function contained on SqliteHandler\nfunc (handler *SqliteHandler) Execute(statement string, args ...interface{}) (interfaces.Result, error) {\n\tresult, err := handler.Conn.Exec(statement, args...)\n\tif err != nil {\n\t\treturn new(SqliteResult), err\n\t}\n\tr := new(SqliteResult)\n\tr.Result = result\n\treturn r, nil\n}\n\n\/\/ Query function contained on SqliteHandler\nfunc (handler *SqliteHandler) Query(statement string, args ...interface{}) (interfaces.Row, error) {\n\trows, err := handler.Conn.Query(statement, args...)\n\tif err != nil {\n\t\treturn new(SqliteRow), err\n\t}\n\trow := new(SqliteRow)\n\trow.Rows = rows\n\treturn row, nil\n}\n\n\/\/ SqliteResult struct\ntype SqliteResult struct {\n\tResult sql.Result\n}\n\n\/\/ LastInsertId function contained on SqliteResult\nfunc (r SqliteResult) LastInsertId() (int64, error) {\n\treturn r.Result.LastInsertId()\n}\n\n\/\/ RowsAffected function contained on SqliteResult\nfunc (r SqliteResult) RowsAffected() (int64, error) {\n\treturn r.Result.RowsAffected()\n}\n\n\/\/ SqliteRow struct\ntype SqliteRow struct {\n\tRows *sql.Rows\n}\n\n\/\/ Scan function contained on SqliteResult\nfunc (r SqliteRow) Scan(dest ...interface{}) {\n\tr.Rows.Scan(dest...)\n}\n\n\/\/ Next function contained on SqliteRow\nfunc (r SqliteRow) Next() bool {\n\treturn r.Rows.Next()\n}\n\/\/ Close function contained on SqliteRow\nfunc (r SqliteRow) Close() {\n\tr.Rows.Close()\n}\n\n\/\/ NewSqliteHanlder creates a new Ciphered SqliteHandler\nfunc NewSqliteHandler(dbfileName string, password string) *SqliteHandler {\n\tconn, _ := sql.Open(\"sqlite3\", dbfileName)\n\n\tp := \"PRAGMA key = '\" + password + \"';\"\n\t_, err := conn.Exec(p)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tsqliteHandler := new(SqliteHandler)\n\tsqliteHandler.Conn = conn\n\treturn sqliteHandler\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Eleme Inc. All rights reserved.\n\npackage config\n\n\/\/ Configuration for banshee with default values:\n\/\/\n\/\/\tGlobal Options\n\/\/\t\tdebug\t\t\tif on debug mode. [default: false]\n\/\/\t\tinterval\t\tincomding metrics time interval (in sec). [default: 10]\n\/\/\t\tperiodicity\t\tmetrics periodicity (in sec), NumTimeSpans x TimeSpan.\n\/\/\t\t\t\t\t\t[default: [480, 180]]\n\/\/\tSQLite Options\n\/\/\t\tfile\t\t\tfile path for sqlite, which maintains admin rules etc.\n\/\/\t\t\t\t\t\t[default: \"rules.db\"]\n\/\/\tLevelDB Options\n\/\/\t\tfile\t\t\tfile path for leveldb, which maintains analyzation\n\/\/\t\t\t\t\t\tresults. [default: \"stats.db\"]\n\/\/\tDetector Options\n\/\/\t\tport\t\t\tdetector tcp port to listen. [default: 2015]\n\/\/\t\ttrendFactor\t\tthe factor to calculate trending value via weighted\n\/\/\t\t\t\t\t\tmoving average algorithm. [default: 0.07]\n\/\/\t\tstrict\t\t\tif this is set false, detector will passivate latest\n\/\/\t\t\t\t\t\tmetric. [default: true]\n\/\/\t\twhitelist\t\tmetrics whitelist, if set empty `[]`, rule patterns\n\/\/\t\t\t\t\t\tfrom sqlite will be used. [default: [\"*\"]]\n\/\/\t\tblacklist\t\tmetrics blacklist, detector will allow one metric to\n\/\/\t\t\t\t\t\tpass only if it matches one pattern in whitelist and\n\/\/\t\t\t\t\t\tdosent match any pattern in blacklist. [default:\n\/\/\t\t\t\t\t\t[\"statsd.*\"]]\n\/\/\t\tstartSize\t\tdetector won't start to detect until the data set is\n\/\/\t\t\t\t\t\tlarger than this size. [default: 32]\n\/\/\tWebApp Options\n\/\/\t\tport\t\t\twebapp http port to listen. [default: 2016]\n\/\/\t\tauth\t\t\tusername and password for admin basic auth. [default:\n\/\/\t\t\t\t\t\t[\"admin\", \"admin\"]]\n\/\/\tAlerter Options\n\/\/\t\tcommand\t\t\tshell command to execute on anomalies detected, leaving\n\/\/\t\t\t\t\t\tempty means do nothing. [default: \"\"]\n\/\/\n\/\/ See also exampleConfig.json please.\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n)\n\ntype Config struct {\n\tDebug bool `json:\"debug\"`\n\tInterval int `json:\"interval\"`\n\tPeriodicity [2]int `json:\"periodicity\"`\n\tSQLite ConfigSQLite `json:\"sqlite\"`\n\tLevelDB ConfigLevelDB `json:\"leveldb\"`\n\tDetector ConfigDetector `json:\"detector\"`\n\tWebapp ConfigWebapp `json:\"webapp\"`\n\tAlerter ConfigAlerter `json:\"alerter\"`\n}\n\ntype ConfigSQLite struct {\n\tfile string `json:\"file\"`\n}\n\ntype ConfigLevelDB struct {\n\tfile string `json:\"file\"`\n}\n\ntype ConfigDetector struct {\n\tPort int `json:\"port\"`\n\tTrendFactor float64 `json:\"trendFactor\"`\n\tStrict bool `json:\"strict\"`\n\tWhiteList []string `json:\"whitelist\"`\n\tBlackList []string `json:\"blackList\"`\n\tStartSize int `json:\"startSize\"`\n}\n\ntype ConfigWebapp struct {\n\tPort int `json:\"port\"`\n\tAuth [2]string `json:\"auth\"`\n}\n\ntype ConfigAlerter struct {\n\tCommand string `json:\"command\"`\n}\n\n\/\/ NewConfigWithDefaults creates a Config with default values.\nfunc NewConfigWithDefaults() *Config {\n\tconfig := new(Config)\n\tconfig.Debug = false\n\tconfig.Interval = 10\n\tconfig.Periodicity = [2]int{480, 180}\n\tconfig.SQLite.file = \"rules.db\"\n\tconfig.LevelDB.file = \"stats.db\"\n\tconfig.Detector.Port = 2015\n\tconfig.Detector.TrendFactor = 0.07\n\tconfig.Detector.Strict = true\n\tconfig.Detector.WhiteList = []string{\"*\"}\n\tconfig.Detector.BlackList = []string{\"statsd.*\"}\n\tconfig.Detector.StartSize = 32\n\tconfig.Webapp.Port = 2016\n\tconfig.Webapp.Auth = [2]string{\"admin\", \"admin\"}\n\tconfig.Alerter.Command = \"\"\n\treturn config\n}\n\n\/\/ NewConfigWithJsonBytes creates a Config with json literal bytes.\nfunc NewConfigWithJsonBytes(b []byte) (*Config, error) {\n\tconfig := NewConfigWithDefaults()\n\terr := json.Unmarshal(b, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn config, nil\n}\n\n\/\/ NewConfigWithJsonFile creates a Config from a json file by fileName.\nfunc NewConfigWithJsonFile(fileName string) (*Config, error) {\n\tb, err := ioutil.ReadFile(fileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewConfigWithJsonBytes(b)\n}\n<commit_msg>Fix godoc<commit_after>\/\/ Copyright 2015 Eleme Inc. All rights reserved.\n\n\/\/ Configuration for banshee with default values.\n\/\/ Global Options\n\/\/ debug if on debug mode. [default: false]\n\/\/ interval incomding metrics time interval (in sec). [default: 10]\n\/\/ periodicity metrics periodicity (in sec), NumTimeSpans x TimeSpan.\n\/\/ [default: [480, 180]]\n\/\/ SQLite Options\n\/\/ file file path for sqlite, which maintains admin rules etc.\n\/\/ [default: \"rules.db\"]\n\/\/ LevelDB Options\n\/\/ file file path for leveldb, which maintains analyzation\n\/\/ results. [default: \"stats.db\"]\n\/\/ Detector Options\n\/\/ port detector tcp port to listen. [default: 2015]\n\/\/ trendFactor the factor to calculate trending value via weighted\n\/\/ moving average algorithm. [default: 0.07]\n\/\/ strict if this is set false, detector will passivate latest\n\/\/ metric. [default: true]\n\/\/ whitelist metrics whitelist, if set empty `[]`, rule patterns from\n\/\/ sqlite will be used. [default: [\"*\"]]\n\/\/ blacklist metrics blacklist, detector will allow one metric to pass\n\/\/ only if it matches one pattern in whitelist and dosent\n\/\/ match any pattern in blacklist. [default: [\"statsd.*\"]]\n\/\/ startSize detector won't start to detect until the data set is\n\/\/ larger than this size. [default: 32]\n\/\/ WebApp Options\n\/\/ port webapp http port to listen. [default: 2016]\n\/\/ auth username and password for admin basic auth. [default:\n\/\/ [\"admin\", \"admin\"]]\n\/\/ Alerter Options\n\/\/ command shell command to execute on anomalies detected, leaving\n\/\/ empty means do nothing. [default: \"\"]\n\/\/ See also exampleConfig.json please.\n\/\/\npackage config\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n)\n\ntype Config struct {\n\tDebug bool `json:\"debug\"`\n\tInterval int `json:\"interval\"`\n\tPeriodicity [2]int `json:\"periodicity\"`\n\tSQLite ConfigSQLite `json:\"sqlite\"`\n\tLevelDB ConfigLevelDB `json:\"leveldb\"`\n\tDetector ConfigDetector `json:\"detector\"`\n\tWebapp ConfigWebapp `json:\"webapp\"`\n\tAlerter ConfigAlerter `json:\"alerter\"`\n}\n\ntype ConfigSQLite struct {\n\tfile string `json:\"file\"`\n}\n\ntype ConfigLevelDB struct {\n\tfile string `json:\"file\"`\n}\n\ntype ConfigDetector struct {\n\tPort int `json:\"port\"`\n\tTrendFactor float64 `json:\"trendFactor\"`\n\tStrict bool `json:\"strict\"`\n\tWhiteList []string `json:\"whitelist\"`\n\tBlackList []string `json:\"blackList\"`\n\tStartSize int `json:\"startSize\"`\n}\n\ntype ConfigWebapp struct {\n\tPort int `json:\"port\"`\n\tAuth [2]string `json:\"auth\"`\n}\n\ntype ConfigAlerter struct {\n\tCommand string `json:\"command\"`\n}\n\n\/\/ NewConfigWithDefaults creates a Config with default values.\nfunc NewConfigWithDefaults() *Config {\n\tconfig := new(Config)\n\tconfig.Debug = false\n\tconfig.Interval = 10\n\tconfig.Periodicity = [2]int{480, 180}\n\tconfig.SQLite.file = \"rules.db\"\n\tconfig.LevelDB.file = \"stats.db\"\n\tconfig.Detector.Port = 2015\n\tconfig.Detector.TrendFactor = 0.07\n\tconfig.Detector.Strict = true\n\tconfig.Detector.WhiteList = []string{\"*\"}\n\tconfig.Detector.BlackList = []string{\"statsd.*\"}\n\tconfig.Detector.StartSize = 32\n\tconfig.Webapp.Port = 2016\n\tconfig.Webapp.Auth = [2]string{\"admin\", \"admin\"}\n\tconfig.Alerter.Command = \"\"\n\treturn config\n}\n\n\/\/ NewConfigWithJsonBytes creates a Config with json literal bytes.\nfunc NewConfigWithJsonBytes(b []byte) (*Config, error) {\n\tconfig := NewConfigWithDefaults()\n\terr := json.Unmarshal(b, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn config, nil\n}\n\n\/\/ NewConfigWithJsonFile creates a Config from a json file by fileName.\nfunc NewConfigWithJsonFile(fileName string) (*Config, error) {\n\tb, err := ioutil.ReadFile(fileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewConfigWithJsonBytes(b)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013, Cong Ding. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\/\/ author: Cong Ding <dinggnu@gmail.com>\n\/\/\npackage config\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n)\n\nvar commentPrefix = []string{\"\/\/\", \"#\", \";\"}\n\n\/\/ Config struct constructs a new configuration handler.\ntype Config struct {\n\tfilename string\n\tconfig map[string]map[string]string\n}\n\n\/\/ NewConfig function cnstructs a new Config struct with filename. You have to\n\/\/ call Read() function to let it read from the file. Otherwise you will get\n\/\/ empty string (i.e., \"\") when you are calling Get() function. Another usage\n\/\/ is that you call NewConfig() function and then call Add()\/Set() function to\n\/\/ add new key-values to the configuration. Finally you can call Write()\n\/\/ function to write the new configuration to the file.\nfunc NewConfig(filename string) *Config {\n\tc := new(Config)\n\tc.filename = filename\n\tc.config = make(map[string]map[string]string)\n\treturn c\n}\n\n\/\/ Filename function returns the filename of the configuration.\nfunc (c *Config) Filename() string {\n\treturn c.filename\n}\n\n\/\/ SetFilename function sets the filename of the configuration.\nfunc (c *Config) SetFilename(filename string) {\n\tc.filename = filename\n}\n\n\/\/ Reset function reset the map in the configuration.\nfunc (c *Config) Reset() {\n\tc.config = make(map[string]map[string]string)\n}\n\n\/\/ Read function reads configurations from the file defined in\n\/\/ Config.filename.\nfunc (c *Config) Read() error {\n\tin, err := os.Open(c.filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer in.Close()\n\tscanner := bufio.NewScanner(in)\n\tline := \"\"\n\tsection := \"\"\n\tfor scanner.Scan() {\n\t\tif scanner.Text() == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif line == \"\" {\n\t\t\tsec, ok := checkSection(scanner.Text())\n\t\t\tif ok {\n\t\t\t\tsection = sec\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif checkComment(scanner.Text()) {\n\t\t\tcontinue\n\t\t}\n\t\tline += scanner.Text()\n\t\tif strings.HasSuffix(line, \"\\\\\") {\n\t\t\tline = line[:len(line)-1]\n\t\t\tcontinue\n\t\t}\n\t\tkey, value, ok := checkLine(line)\n\t\tif !ok {\n\t\t\treturn errors.New(\"WRONG: \" + line)\n\t\t}\n\t\tc.Set(section, key, value)\n\t\tline = \"\"\n\t}\n\treturn nil\n}\n\n\/\/ Get function returns the value of a key in the configuration. If the key\n\/\/ does not exist, it returns empty string (i.e., \"\").\nfunc (c *Config) Get(section string, key string) string {\n\tvalue, ok := c.config[section][key]\n\tif !ok {\n\t\treturn \"\"\n\t}\n\treturn value\n}\n\n\/\/ Set function updates the value of a key in the configuration. Function\n\/\/ Set() is exactly the same as function Add().\nfunc (c *Config) Set(section string, key string, value string) {\n\t_, ok := c.config[section]\n\tif !ok {\n\t\tc.config[section] = make(map[string]string)\n\t}\n\tc.config[section][key] = value\n}\n\n\/\/ Add function adds a new key to the configuration. Function Add() is exactly\n\/\/ the same as function Set().\nfunc (c *Config) Add(section string, key string, value string) {\n\tc.Set(section, key, value)\n}\n\n\/\/ Del function deletes a key from the configuration.\nfunc (c *Config) Del(section string, key string) {\n\t_, ok := c.config[section]\n\tif ok {\n\t\tdelete(c.config[section], key)\n\t\tif len(c.config[section]) == 0 {\n\t\t\tdelete(c.config, section)\n\t\t}\n\t}\n}\n\n\/\/ Write function writes the updated configuration back.\nfunc (c *Config) Write() error {\n\treturn nil\n}\n\n\/\/ WriteTo function writes the configuration to a new file. This function\n\/\/ re-organizes the configuration and deletes all the comments.\nfunc (c *Config) WriteTo(filename string) error {\n\tcontent := \"\"\n\tfor k, v := range c.config {\n\t\tcontent += fmt.Sprintf(\"[%v]\\n\", k)\n\t\tfor key, value := range v {\n\t\t\tcontent += fmt.Sprintf(\"\\t%v = %v\\n\", key, value)\n\t\t}\n\t}\n\treturn ioutil.WriteFile(filename, []byte(content), 0644)\n}\n\n\/\/ To check this line if section or not. If it is not a section, it returns\n\/\/ \"\".\nfunc checkSection(line string) (string, bool) {\n\tline = strings.TrimSpace(line)\n\tlineLen := len(line)\n\tif lineLen < 2 {\n\t\treturn \"\", false\n\t}\n\tif line[0] == '[' && line[lineLen-1] == ']' {\n\t\treturn line[1 : lineLen-1], true\n\t}\n\treturn \"\", false\n}\n\n\/\/ To check this line is a valid key-value pair or not.\nfunc checkLine(line string) (string, string, bool) {\n\tkey := \"\"\n\tvalue := \"\"\n\tsp := strings.SplitN(line, \"=\", 2)\n\tif len(sp) != 2 {\n\t\treturn key, value, false\n\t}\n\tkey = strings.TrimSpace(sp[0])\n\tvalue = strings.TrimSpace(sp[1])\n\treturn key, value, true\n}\n\n\/\/ To check this line is a whole line comment or not.\nfunc checkComment(line string) bool {\n\tline = strings.TrimSpace(line)\n\tfor p := range commentPrefix {\n\t\tif strings.HasPrefix(line, commentPrefix[p]) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<commit_msg>handle global configurations<commit_after>\/\/ Copyright 2013, Cong Ding. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\/\/ author: Cong Ding <dinggnu@gmail.com>\n\/\/\npackage config\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n)\n\nvar commentPrefix = []string{\"\/\/\", \"#\", \";\"}\n\n\/\/ Config struct constructs a new configuration handler.\ntype Config struct {\n\tfilename string\n\tconfig map[string]map[string]string\n}\n\n\/\/ NewConfig function cnstructs a new Config struct with filename. You have to\n\/\/ call Read() function to let it read from the file. Otherwise you will get\n\/\/ empty string (i.e., \"\") when you are calling Get() function. Another usage\n\/\/ is that you call NewConfig() function and then call Add()\/Set() function to\n\/\/ add new key-values to the configuration. Finally you can call Write()\n\/\/ function to write the new configuration to the file.\nfunc NewConfig(filename string) *Config {\n\tc := new(Config)\n\tc.filename = filename\n\tc.config = make(map[string]map[string]string)\n\treturn c\n}\n\n\/\/ Filename function returns the filename of the configuration.\nfunc (c *Config) Filename() string {\n\treturn c.filename\n}\n\n\/\/ SetFilename function sets the filename of the configuration.\nfunc (c *Config) SetFilename(filename string) {\n\tc.filename = filename\n}\n\n\/\/ Reset function reset the map in the configuration.\nfunc (c *Config) Reset() {\n\tc.config = make(map[string]map[string]string)\n}\n\n\/\/ Read function reads configurations from the file defined in\n\/\/ Config.filename.\nfunc (c *Config) Read() error {\n\tin, err := os.Open(c.filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer in.Close()\n\tscanner := bufio.NewScanner(in)\n\tline := \"\"\n\tsection := \"\"\n\tfor scanner.Scan() {\n\t\tif scanner.Text() == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif line == \"\" {\n\t\t\tsec, ok := checkSection(scanner.Text())\n\t\t\tif ok {\n\t\t\t\tsection = sec\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif checkComment(scanner.Text()) {\n\t\t\tcontinue\n\t\t}\n\t\tline += scanner.Text()\n\t\tif strings.HasSuffix(line, \"\\\\\") {\n\t\t\tline = line[:len(line)-1]\n\t\t\tcontinue\n\t\t}\n\t\tkey, value, ok := checkLine(line)\n\t\tif !ok {\n\t\t\treturn errors.New(\"WRONG: \" + line)\n\t\t}\n\t\tc.Set(section, key, value)\n\t\tline = \"\"\n\t}\n\treturn nil\n}\n\n\/\/ Get function returns the value of a key in the configuration. If the key\n\/\/ does not exist, it returns empty string (i.e., \"\").\nfunc (c *Config) Get(section string, key string) string {\n\tvalue, ok := c.config[section][key]\n\tif !ok {\n\t\treturn \"\"\n\t}\n\treturn value\n}\n\n\/\/ Set function updates the value of a key in the configuration. Function\n\/\/ Set() is exactly the same as function Add().\nfunc (c *Config) Set(section string, key string, value string) {\n\t_, ok := c.config[section]\n\tif !ok {\n\t\tc.config[section] = make(map[string]string)\n\t}\n\tc.config[section][key] = value\n}\n\n\/\/ Add function adds a new key to the configuration. Function Add() is exactly\n\/\/ the same as function Set().\nfunc (c *Config) Add(section string, key string, value string) {\n\tc.Set(section, key, value)\n}\n\n\/\/ Del function deletes a key from the configuration.\nfunc (c *Config) Del(section string, key string) {\n\t_, ok := c.config[section]\n\tif ok {\n\t\tdelete(c.config[section], key)\n\t\tif len(c.config[section]) == 0 {\n\t\t\tdelete(c.config, section)\n\t\t}\n\t}\n}\n\n\/\/ Write function writes the updated configuration back.\nfunc (c *Config) Write() error {\n\treturn nil\n}\n\n\/\/ WriteTo function writes the configuration to a new file. This function\n\/\/ re-organizes the configuration and deletes all the comments.\nfunc (c *Config) WriteTo(filename string) error {\n\tcontent := \"\"\n\tfor k, v := range c.config {\n\t\tif k != \"\" {\n\t\t\tcontent += fmt.Sprintf(\"[%v]\\n\", k)\n\t\t}\n\t\tfor key, value := range v {\n\t\t\tif k != \"\" {\n\t\t\t\tcontent += fmt.Sprintf(\"\\t%v = %v\\n\", key, value)\n\t\t\t} else {\n\t\t\t\tcontent += fmt.Sprintf(\"%v = %v\\n\", key, value)\n\t\t\t}\n\t\t}\n\t}\n\treturn ioutil.WriteFile(filename, []byte(content), 0644)\n}\n\n\/\/ To check this line if section or not. If it is not a section, it returns\n\/\/ \"\".\nfunc checkSection(line string) (string, bool) {\n\tline = strings.TrimSpace(line)\n\tlineLen := len(line)\n\tif lineLen < 2 {\n\t\treturn \"\", false\n\t}\n\tif line[0] == '[' && line[lineLen-1] == ']' {\n\t\treturn line[1 : lineLen-1], true\n\t}\n\treturn \"\", false\n}\n\n\/\/ To check this line is a valid key-value pair or not.\nfunc checkLine(line string) (string, string, bool) {\n\tkey := \"\"\n\tvalue := \"\"\n\tsp := strings.SplitN(line, \"=\", 2)\n\tif len(sp) != 2 {\n\t\treturn key, value, false\n\t}\n\tkey = strings.TrimSpace(sp[0])\n\tvalue = strings.TrimSpace(sp[1])\n\treturn key, value, true\n}\n\n\/\/ To check this line is a whole line comment or not.\nfunc checkComment(line string) bool {\n\tline = strings.TrimSpace(line)\n\tfor p := range commentPrefix {\n\t\tif strings.HasPrefix(line, commentPrefix[p]) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage config helps creating configuration structs.\n\nThe intended usage would be a simple struct that calls Value() on\na fields initialization. E.g.\n\n\tvar portProperty = config.NewEnvProperty(\"PORT\", \"8080\")\n\t\/\/ If the $DOMAIN env variable not set the configuration creation will fail with a fatal error\n\tvar domainProperty = config.NewRequiredEnvProperty(\"DOMAIN\")\n\n\ttype Config struct {\n\t\tPort string\n\t\tDomain string\n\t}\n\n\tfunc NewConfig() Config {\n\t\treturn Config{\n\t\t\tPort: portProperty.Value(),\n\t\t\tDomain: domainProperty.Value(),\n\t\t}\n\t}\n*\/\npackage config\n\nimport (\n\t\"log\"\n\t\"os\"\n)\n\n\/\/ Property can be used to fetch default values for configuration properties.\ntype Property interface {\n\tValue() string\n}\n\n\/\/ NewRequiredEnvProperty returns a Property that gets its value from\n\/\/ the specified environment variable. Panics if the variable is not set.\nfunc NewRequiredEnvProperty(envVariableName string) Property {\n\treturn requiredEnvProperty{\n\t\tenvVariableName: envVariableName,\n\t}\n}\n\n\/\/ NewEnvProperty returns a Property that gets its value from the\n\/\/ specified environment variable. If the environment vatiable is not set\n\/\/ the fallback value will be used instead\nfunc NewEnvProperty(envVariableName string, fallbackValue string) Property {\n\treturn envProperty{\n\t\tenvVariableName: envVariableName,\n\t\tfallbackValue: fallbackValue,\n\t}\n}\n\ntype envProperty struct {\n\tenvVariableName string\n\tfallbackValue string\n}\n\nfunc (prop envProperty) Value() (defaultValue string) {\n\tdefaultValue = os.Getenv(prop.envVariableName)\n\tif defaultValue == \"\" {\n\t\tdefaultValue = prop.fallbackValue\n\t}\n\treturn\n}\n\ntype requiredEnvProperty struct {\n\tenvVariableName string\n}\n\nfunc (prop requiredEnvProperty) Value() (defaultValue string) {\n\tdefaultValue = os.Getenv(prop.envVariableName)\n\tif defaultValue == \"\" {\n\t\tlog.Fatalf(\"Please set the %s environment variable\", prop.envVariableName)\n\t}\n\treturn\n}\n<commit_msg>move config package to a separate repo<commit_after><|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/kardianos\/osext\"\n)\n\nvar InitialPath string\nvar InitialGoPath string\n\nvar Verbose bool\n\nvar SpinnerCharSet = 14\nvar SpinnerInterval = 50 * time.Millisecond\n\nfunc main() {\n\tcurrentExecutable, _ := osext.Executable()\n\tvendoredBunchPath, err := path.Join(\".vendor\", \"bin\", \"bunch\")\n\n\tfi1, errStat1 := os.Stat(currentExecutable)\n\tfi2, errStat2 := os.Stat(vendoredBunchPath)\n\n\tif exists, _ := pathExists(vendoredBunchPath); err == nil && errStat1 == nil && errStat2 == nil && exists && os.SameFile(fi1, fi2) {\n\t\tcmd := exec.Command(vendoredBunchPath, os.Args[1:]...)\n\t\tcmd.Stdin = os.Stdin\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\n\t\terr = cmd.Run()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"running vendored bin\/bunch was unsuccessful\")\n\t\t\tos.Exit(1)\n\t\t} else {\n\t\t\tif cmd.ProcessState.Success() {\n\t\t\t\tos.Exit(0)\n\t\t\t} else {\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t}\n\n\tInitialPath = os.Getenv(\"PATH\")\n\tInitialGoPath = os.Getenv(\"GOPATH\")\n\n\tapp := cli.NewApp()\n\tapp.Name = \"bunch\"\n\tapp.Usage = \"npm-like tool for managing Go dependencies\"\n\tapp.Version = \"0.0.1\"\n\tapp.Authors = []cli.Author{cli.Author{Name: \"Daniil Kulchenko\", Email: \"daniil@kulchenko.com\"}}\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"verbose\",\n\t\t\tUsage: \"output more information\",\n\t\t},\n\t}\n\n\tapp.Before = func(context *cli.Context) error {\n\t\tVerbose = context.GlobalBool(\"verbose\")\n\n\t\treturn nil\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"install\",\n\t\t\tAliases: []string{\"i\"},\n\t\t\tUsage: \"install package(s)\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"save\",\n\t\t\t\t\tUsage: \"save installed package to Bunchfile\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"g\",\n\t\t\t\t\tUsage: \"install package to global $GOPATH instead of vendored directory\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tinstallCommand(c, false, true)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"update\",\n\t\t\tAliases: []string{\"u\"},\n\t\t\tUsage: \"update package(s)\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tinstallCommand(c, true, true)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"uninstall\",\n\t\t\tAliases: []string{\"r\"},\n\t\t\tUsage: \"uninstall package(s)\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"save\",\n\t\t\t\t\tUsage: \"save uninstalled package to Bunchfile\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"g\",\n\t\t\t\t\tUsage: \"uninstall package from global $GOPATH instead of vendored directory\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tuninstallCommand(c)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"prune\",\n\t\t\tUsage: \"remove packages not referenced in Bunchfile\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tpruneCommand(c)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"outdated\",\n\t\t\tUsage: \"list outdated packages\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\toutdatedCommand(c)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"lock\",\n\t\t\tUsage: \"generate a file locking down current versions of dependencies\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tlockCommand(c)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"rebuild\",\n\t\t\tUsage: \"rebuild all dependencies\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tinstallCommand(c, true, false)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"generate\",\n\t\t\tUsage: \"generate a Bunchfile based on package imports in current directory\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tgenerateCommand(c)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"go\",\n\t\t\tUsage: \"run a Go command within the vendor environment (e.g. bunch go fmt)\",\n\t\t\tSkipFlagParsing: true,\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tgoCommand(c)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"exec\",\n\t\t\tUsage: \"run any command within the vendor environment (e.g. bunch exec make)\",\n\t\t\tSkipFlagParsing: true,\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\texecCommand(c)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"shell\",\n\t\t\tUsage: \"start a shell within the vendor environment\",\n\t\t\tSkipFlagParsing: true,\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tshellCommand(c)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"shim\",\n\t\t\tUsage: \"sourced in .bash_profile to alias the 'go' tool\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tshimCommand(c)\n\t\t\t},\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n}\n<commit_msg>syntax fixes<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/kardianos\/osext\"\n)\n\nvar InitialPath string\nvar InitialGoPath string\n\nvar Verbose bool\n\nvar SpinnerCharSet = 14\nvar SpinnerInterval = 50 * time.Millisecond\n\nfunc main() {\n\tcurrentExecutable, _ := osext.Executable()\n\tvendoredBunchPath := path.Join(\".vendor\", \"bin\", \"bunch\")\n\n\tfi1, errStat1 := os.Stat(currentExecutable)\n\tfi2, errStat2 := os.Stat(vendoredBunchPath)\n\n\tif exists, _ := pathExists(vendoredBunchPath); errStat1 == nil && errStat2 == nil && exists && os.SameFile(fi1, fi2) {\n\t\tcmd := exec.Command(vendoredBunchPath, os.Args[1:]...)\n\t\tcmd.Stdin = os.Stdin\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\n\t\terr := cmd.Run()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"running vendored bin\/bunch was unsuccessful\")\n\t\t\tos.Exit(1)\n\t\t} else {\n\t\t\tif cmd.ProcessState.Success() {\n\t\t\t\tos.Exit(0)\n\t\t\t} else {\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t}\n\n\tInitialPath = os.Getenv(\"PATH\")\n\tInitialGoPath = os.Getenv(\"GOPATH\")\n\n\tapp := cli.NewApp()\n\tapp.Name = \"bunch\"\n\tapp.Usage = \"npm-like tool for managing Go dependencies\"\n\tapp.Version = \"0.0.1\"\n\tapp.Authors = []cli.Author{cli.Author{Name: \"Daniil Kulchenko\", Email: \"daniil@kulchenko.com\"}}\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"verbose\",\n\t\t\tUsage: \"output more information\",\n\t\t},\n\t}\n\n\tapp.Before = func(context *cli.Context) error {\n\t\tVerbose = context.GlobalBool(\"verbose\")\n\n\t\treturn nil\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"install\",\n\t\t\tAliases: []string{\"i\"},\n\t\t\tUsage: \"install package(s)\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"save\",\n\t\t\t\t\tUsage: \"save installed package to Bunchfile\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"g\",\n\t\t\t\t\tUsage: \"install package to global $GOPATH instead of vendored directory\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tinstallCommand(c, false, true)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"update\",\n\t\t\tAliases: []string{\"u\"},\n\t\t\tUsage: \"update package(s)\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tinstallCommand(c, true, true)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"uninstall\",\n\t\t\tAliases: []string{\"r\"},\n\t\t\tUsage: \"uninstall package(s)\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"save\",\n\t\t\t\t\tUsage: \"save uninstalled package to Bunchfile\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"g\",\n\t\t\t\t\tUsage: \"uninstall package from global $GOPATH instead of vendored directory\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tuninstallCommand(c)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"prune\",\n\t\t\tUsage: \"remove packages not referenced in Bunchfile\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tpruneCommand(c)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"outdated\",\n\t\t\tUsage: \"list outdated packages\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\toutdatedCommand(c)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"lock\",\n\t\t\tUsage: \"generate a file locking down current versions of dependencies\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tlockCommand(c)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"rebuild\",\n\t\t\tUsage: \"rebuild all dependencies\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tinstallCommand(c, true, false)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"generate\",\n\t\t\tUsage: \"generate a Bunchfile based on package imports in current directory\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tgenerateCommand(c)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"go\",\n\t\t\tUsage: \"run a Go command within the vendor environment (e.g. bunch go fmt)\",\n\t\t\tSkipFlagParsing: true,\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tgoCommand(c)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"exec\",\n\t\t\tUsage: \"run any command within the vendor environment (e.g. bunch exec make)\",\n\t\t\tSkipFlagParsing: true,\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\texecCommand(c)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"shell\",\n\t\t\tUsage: \"start a shell within the vendor environment\",\n\t\t\tSkipFlagParsing: true,\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tshellCommand(c)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"shim\",\n\t\t\tUsage: \"sourced in .bash_profile to alias the 'go' tool\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tshimCommand(c)\n\t\t\t},\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>package terraform\n\n\/\/ ResourceProvider is an interface that must be implemented by any\n\/\/ resource provider: the thing that creates and manages the resources in\n\/\/ a Terraform configuration.\n\/\/\n\/\/ Important implementation note: All returned pointers, such as\n\/\/ *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to\n\/\/ shared data. Terraform is highly parallel and assumes that this data is safe\n\/\/ to read\/write in parallel so it must be unique references. Note that it is\n\/\/ safe to return arguments as results, however.\ntype ResourceProvider interface {\n\t\/*********************************************************************\n\t* Functions related to the provider\n\t*********************************************************************\/\n\n\t\/\/ Input is called to ask the provider to ask the user for input\n\t\/\/ for completing the configuration if necesarry.\n\t\/\/\n\t\/\/ This may or may not be called, so resource provider writers shouldn't\n\t\/\/ rely on this being available to set some default values for validate\n\t\/\/ later. Example of a situation where this wouldn't be called is if\n\t\/\/ the user is not using a TTY.\n\tInput(UIInput, *ResourceConfig) (*ResourceConfig, error)\n\n\t\/\/ Validate is called once at the beginning with the raw configuration\n\t\/\/ (no interpolation done) and can return a list of warnings and\/or\n\t\/\/ errors.\n\t\/\/\n\t\/\/ This is called once with the provider configuration only. It may not\n\t\/\/ be called at all if no provider configuration is given.\n\t\/\/\n\t\/\/ This should not assume that any values of the configurations are valid.\n\t\/\/ The primary use case of this call is to check that required keys are\n\t\/\/ set.\n\tValidate(*ResourceConfig) ([]string, []error)\n\n\t\/\/ Configure configures the provider itself with the configuration\n\t\/\/ given. This is useful for setting things like access keys.\n\t\/\/\n\t\/\/ This won't be called at all if no provider configuration is given.\n\t\/\/\n\t\/\/ Configure returns an error if it occurred.\n\tConfigure(*ResourceConfig) error\n\n\t\/\/ Resources returns all the available resource types that this provider\n\t\/\/ knows how to manage.\n\tResources() []ResourceType\n\n\t\/\/ Stop is called when the provider should halt any in-flight actions.\n\t\/\/\n\t\/\/ This can be used to make a nicer Ctrl-C experience for Terraform.\n\t\/\/ Even if this isn't implemented to do anything (just returns nil),\n\t\/\/ Terraform will still cleanly stop after the currently executing\n\t\/\/ graph node is complete. However, this API can be used to make more\n\t\/\/ efficient halts.\n\t\/\/\n\t\/\/ Stop doesn't have to and shouldn't block waiting for in-flight actions\n\t\/\/ to complete. It should take any action it wants and return immediately\n\t\/\/ acknowledging it has received the stop request. Terraform core will\n\t\/\/ automatically not make any further API calls to the provider soon\n\t\/\/ after Stop is called (technically exactly once the currently executing\n\t\/\/ graph nodes are complete).\n\t\/\/\n\t\/\/ The error returned, if non-nil, is assumed to mean that signaling the\n\t\/\/ stop somehow failed and that the user should expect potentially waiting\n\t\/\/ a longer period of time.\n\tStop() error\n\n\t\/*********************************************************************\n\t* Functions related to individual resources\n\t*********************************************************************\/\n\n\t\/\/ ValidateResource is called once at the beginning with the raw\n\t\/\/ configuration (no interpolation done) and can return a list of warnings\n\t\/\/ and\/or errors.\n\t\/\/\n\t\/\/ This is called once per resource.\n\t\/\/\n\t\/\/ This should not assume any of the values in the resource configuration\n\t\/\/ are valid since it is possible they have to be interpolated still.\n\t\/\/ The primary use case of this call is to check that the required keys\n\t\/\/ are set and that the general structure is correct.\n\tValidateResource(string, *ResourceConfig) ([]string, []error)\n\n\t\/\/ Apply applies a diff to a specific resource and returns the new\n\t\/\/ resource state along with an error.\n\t\/\/\n\t\/\/ If the resource state given has an empty ID, then a new resource\n\t\/\/ is expected to be created.\n\tApply(\n\t\t*InstanceInfo,\n\t\t*InstanceState,\n\t\t*InstanceDiff) (*InstanceState, error)\n\n\t\/\/ Diff diffs a resource versus a desired state and returns\n\t\/\/ a diff.\n\tDiff(\n\t\t*InstanceInfo,\n\t\t*InstanceState,\n\t\t*ResourceConfig) (*InstanceDiff, error)\n\n\t\/\/ Refresh refreshes a resource and updates all of its attributes\n\t\/\/ with the latest information.\n\tRefresh(*InstanceInfo, *InstanceState) (*InstanceState, error)\n\n\t\/*********************************************************************\n\t* Functions related to importing\n\t*********************************************************************\/\n\n\t\/\/ ImportState requests that the given resource be imported.\n\t\/\/\n\t\/\/ The returned InstanceState only requires ID be set. Importing\n\t\/\/ will always call Refresh after the state to complete it.\n\t\/\/\n\t\/\/ IMPORTANT: InstanceState doesn't have the resource type attached\n\t\/\/ to it. A type must be specified on the state via the Ephemeral\n\t\/\/ field on the state.\n\t\/\/\n\t\/\/ This function can return multiple states. Normally, an import\n\t\/\/ will map 1:1 to a physical resource. However, some resources map\n\t\/\/ to multiple. For example, an AWS security group may contain many rules.\n\t\/\/ Each rule is represented by a separate resource in Terraform,\n\t\/\/ therefore multiple states are returned.\n\tImportState(*InstanceInfo, string) ([]*InstanceState, error)\n\n\t\/*********************************************************************\n\t* Functions related to data resources\n\t*********************************************************************\/\n\n\t\/\/ ValidateDataSource is called once at the beginning with the raw\n\t\/\/ configuration (no interpolation done) and can return a list of warnings\n\t\/\/ and\/or errors.\n\t\/\/\n\t\/\/ This is called once per data source instance.\n\t\/\/\n\t\/\/ This should not assume any of the values in the resource configuration\n\t\/\/ are valid since it is possible they have to be interpolated still.\n\t\/\/ The primary use case of this call is to check that the required keys\n\t\/\/ are set and that the general structure is correct.\n\tValidateDataSource(string, *ResourceConfig) ([]string, []error)\n\n\t\/\/ DataSources returns all of the available data sources that this\n\t\/\/ provider implements.\n\tDataSources() []DataSource\n\n\t\/\/ ReadDataDiff produces a diff that represents the state that will\n\t\/\/ be produced when the given data source is read using a later call\n\t\/\/ to ReadDataApply.\n\tReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)\n\n\t\/\/ ReadDataApply initializes a data instance using the configuration\n\t\/\/ in a diff produced by ReadDataDiff.\n\tReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error)\n}\n\n\/\/ ResourceProviderCloser is an interface that providers that can close\n\/\/ connections that aren't needed anymore must implement.\ntype ResourceProviderCloser interface {\n\tClose() error\n}\n\n\/\/ ResourceType is a type of resource that a resource provider can manage.\ntype ResourceType struct {\n\tName string \/\/ Name of the resource, example \"instance\" (no provider prefix)\n\tImportable bool \/\/ Whether this resource supports importing\n}\n\n\/\/ DataSource is a data source that a resource provider implements.\ntype DataSource struct {\n\tName string\n}\n\n\/\/ ResourceProviderFactory is a function type that creates a new instance\n\/\/ of a resource provider.\ntype ResourceProviderFactory func() (ResourceProvider, error)\n\n\/\/ ResourceProviderFactoryFixed is a helper that creates a\n\/\/ ResourceProviderFactory that just returns some fixed provider.\nfunc ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory {\n\treturn func() (ResourceProvider, error) {\n\t\treturn p, nil\n\t}\n}\n\nfunc ProviderHasResource(p ResourceProvider, n string) bool {\n\tfor _, rt := range p.Resources() {\n\t\tif rt.Name == n {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc ProviderHasDataSource(p ResourceProvider, n string) bool {\n\tfor _, rt := range p.DataSources() {\n\t\tif rt.Name == n {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n<commit_msg>core: ResourceProviderResolver interface<commit_after>package terraform\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/hashicorp\/terraform\/plugin\/discovery\"\n)\n\n\/\/ ResourceProvider is an interface that must be implemented by any\n\/\/ resource provider: the thing that creates and manages the resources in\n\/\/ a Terraform configuration.\n\/\/\n\/\/ Important implementation note: All returned pointers, such as\n\/\/ *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to\n\/\/ shared data. Terraform is highly parallel and assumes that this data is safe\n\/\/ to read\/write in parallel so it must be unique references. Note that it is\n\/\/ safe to return arguments as results, however.\ntype ResourceProvider interface {\n\t\/*********************************************************************\n\t* Functions related to the provider\n\t*********************************************************************\/\n\n\t\/\/ Input is called to ask the provider to ask the user for input\n\t\/\/ for completing the configuration if necesarry.\n\t\/\/\n\t\/\/ This may or may not be called, so resource provider writers shouldn't\n\t\/\/ rely on this being available to set some default values for validate\n\t\/\/ later. Example of a situation where this wouldn't be called is if\n\t\/\/ the user is not using a TTY.\n\tInput(UIInput, *ResourceConfig) (*ResourceConfig, error)\n\n\t\/\/ Validate is called once at the beginning with the raw configuration\n\t\/\/ (no interpolation done) and can return a list of warnings and\/or\n\t\/\/ errors.\n\t\/\/\n\t\/\/ This is called once with the provider configuration only. It may not\n\t\/\/ be called at all if no provider configuration is given.\n\t\/\/\n\t\/\/ This should not assume that any values of the configurations are valid.\n\t\/\/ The primary use case of this call is to check that required keys are\n\t\/\/ set.\n\tValidate(*ResourceConfig) ([]string, []error)\n\n\t\/\/ Configure configures the provider itself with the configuration\n\t\/\/ given. This is useful for setting things like access keys.\n\t\/\/\n\t\/\/ This won't be called at all if no provider configuration is given.\n\t\/\/\n\t\/\/ Configure returns an error if it occurred.\n\tConfigure(*ResourceConfig) error\n\n\t\/\/ Resources returns all the available resource types that this provider\n\t\/\/ knows how to manage.\n\tResources() []ResourceType\n\n\t\/\/ Stop is called when the provider should halt any in-flight actions.\n\t\/\/\n\t\/\/ This can be used to make a nicer Ctrl-C experience for Terraform.\n\t\/\/ Even if this isn't implemented to do anything (just returns nil),\n\t\/\/ Terraform will still cleanly stop after the currently executing\n\t\/\/ graph node is complete. However, this API can be used to make more\n\t\/\/ efficient halts.\n\t\/\/\n\t\/\/ Stop doesn't have to and shouldn't block waiting for in-flight actions\n\t\/\/ to complete. It should take any action it wants and return immediately\n\t\/\/ acknowledging it has received the stop request. Terraform core will\n\t\/\/ automatically not make any further API calls to the provider soon\n\t\/\/ after Stop is called (technically exactly once the currently executing\n\t\/\/ graph nodes are complete).\n\t\/\/\n\t\/\/ The error returned, if non-nil, is assumed to mean that signaling the\n\t\/\/ stop somehow failed and that the user should expect potentially waiting\n\t\/\/ a longer period of time.\n\tStop() error\n\n\t\/*********************************************************************\n\t* Functions related to individual resources\n\t*********************************************************************\/\n\n\t\/\/ ValidateResource is called once at the beginning with the raw\n\t\/\/ configuration (no interpolation done) and can return a list of warnings\n\t\/\/ and\/or errors.\n\t\/\/\n\t\/\/ This is called once per resource.\n\t\/\/\n\t\/\/ This should not assume any of the values in the resource configuration\n\t\/\/ are valid since it is possible they have to be interpolated still.\n\t\/\/ The primary use case of this call is to check that the required keys\n\t\/\/ are set and that the general structure is correct.\n\tValidateResource(string, *ResourceConfig) ([]string, []error)\n\n\t\/\/ Apply applies a diff to a specific resource and returns the new\n\t\/\/ resource state along with an error.\n\t\/\/\n\t\/\/ If the resource state given has an empty ID, then a new resource\n\t\/\/ is expected to be created.\n\tApply(\n\t\t*InstanceInfo,\n\t\t*InstanceState,\n\t\t*InstanceDiff) (*InstanceState, error)\n\n\t\/\/ Diff diffs a resource versus a desired state and returns\n\t\/\/ a diff.\n\tDiff(\n\t\t*InstanceInfo,\n\t\t*InstanceState,\n\t\t*ResourceConfig) (*InstanceDiff, error)\n\n\t\/\/ Refresh refreshes a resource and updates all of its attributes\n\t\/\/ with the latest information.\n\tRefresh(*InstanceInfo, *InstanceState) (*InstanceState, error)\n\n\t\/*********************************************************************\n\t* Functions related to importing\n\t*********************************************************************\/\n\n\t\/\/ ImportState requests that the given resource be imported.\n\t\/\/\n\t\/\/ The returned InstanceState only requires ID be set. Importing\n\t\/\/ will always call Refresh after the state to complete it.\n\t\/\/\n\t\/\/ IMPORTANT: InstanceState doesn't have the resource type attached\n\t\/\/ to it. A type must be specified on the state via the Ephemeral\n\t\/\/ field on the state.\n\t\/\/\n\t\/\/ This function can return multiple states. Normally, an import\n\t\/\/ will map 1:1 to a physical resource. However, some resources map\n\t\/\/ to multiple. For example, an AWS security group may contain many rules.\n\t\/\/ Each rule is represented by a separate resource in Terraform,\n\t\/\/ therefore multiple states are returned.\n\tImportState(*InstanceInfo, string) ([]*InstanceState, error)\n\n\t\/*********************************************************************\n\t* Functions related to data resources\n\t*********************************************************************\/\n\n\t\/\/ ValidateDataSource is called once at the beginning with the raw\n\t\/\/ configuration (no interpolation done) and can return a list of warnings\n\t\/\/ and\/or errors.\n\t\/\/\n\t\/\/ This is called once per data source instance.\n\t\/\/\n\t\/\/ This should not assume any of the values in the resource configuration\n\t\/\/ are valid since it is possible they have to be interpolated still.\n\t\/\/ The primary use case of this call is to check that the required keys\n\t\/\/ are set and that the general structure is correct.\n\tValidateDataSource(string, *ResourceConfig) ([]string, []error)\n\n\t\/\/ DataSources returns all of the available data sources that this\n\t\/\/ provider implements.\n\tDataSources() []DataSource\n\n\t\/\/ ReadDataDiff produces a diff that represents the state that will\n\t\/\/ be produced when the given data source is read using a later call\n\t\/\/ to ReadDataApply.\n\tReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)\n\n\t\/\/ ReadDataApply initializes a data instance using the configuration\n\t\/\/ in a diff produced by ReadDataDiff.\n\tReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error)\n}\n\n\/\/ ResourceProviderCloser is an interface that providers that can close\n\/\/ connections that aren't needed anymore must implement.\ntype ResourceProviderCloser interface {\n\tClose() error\n}\n\n\/\/ ResourceType is a type of resource that a resource provider can manage.\ntype ResourceType struct {\n\tName string \/\/ Name of the resource, example \"instance\" (no provider prefix)\n\tImportable bool \/\/ Whether this resource supports importing\n}\n\n\/\/ DataSource is a data source that a resource provider implements.\ntype DataSource struct {\n\tName string\n}\n\n\/\/ ResourceProviderResolver is an interface implemented by objects that are\n\/\/ able to resolve a given set of resource provider version constraints\n\/\/ into ResourceProviderFactory callbacks.\ntype ResourceProviderResolver interface {\n\t\/\/ Given a constraint map, return a ResourceProviderFactory for each\n\t\/\/ requested provider. If some or all of the constraints cannot be\n\t\/\/ satisfied, return a non-nil slice of errors describing the problems.\n\tResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error)\n}\n\n\/\/ ResourceProviderResolverFunc wraps a callback function and turns it into\n\/\/ a ResourceProviderResolver implementation, for convenience in situations\n\/\/ where a function and its associated closure are sufficient as a resolver\n\/\/ implementation.\ntype ResourceProviderResolverFunc func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error)\n\n\/\/ ResolveProviders implements ResourceProviderResolver by calling the\n\/\/ wrapped function.\nfunc (f ResourceProviderResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) {\n\treturn f(reqd)\n}\n\n\/\/ ResourceProviderResolverFixed returns a ResourceProviderResolver that\n\/\/ has a fixed set of provider factories provided by the caller. The returned\n\/\/ resolver ignores version constraints entirely and just returns the given\n\/\/ factory for each requested provider name.\n\/\/\n\/\/ This function is primarily used in tests, to provide mock providers or\n\/\/ in-process providers under test.\nfunc ResourceProviderResolverFixed(factories map[string]ResourceProviderFactory) ResourceProviderResolver {\n\treturn ResourceProviderResolverFunc(func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) {\n\t\tret := make(map[string]ResourceProviderFactory, len(reqd))\n\t\tvar errs []error\n\t\tfor name := range reqd {\n\t\t\tif factory, exists := factories[name]; exists {\n\t\t\t\tret[name] = factory\n\t\t\t} else {\n\t\t\t\terrs = append(errs, fmt.Errorf(\"provider %q is not available\", name))\n\t\t\t}\n\t\t}\n\t\treturn ret, errs\n\t})\n}\n\n\/\/ ResourceProviderFactory is a function type that creates a new instance\n\/\/ of a resource provider.\ntype ResourceProviderFactory func() (ResourceProvider, error)\n\n\/\/ ResourceProviderFactoryFixed is a helper that creates a\n\/\/ ResourceProviderFactory that just returns some fixed provider.\nfunc ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory {\n\treturn func() (ResourceProvider, error) {\n\t\treturn p, nil\n\t}\n}\n\nfunc ProviderHasResource(p ResourceProvider, n string) bool {\n\tfor _, rt := range p.Resources() {\n\t\tif rt.Name == n {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc ProviderHasDataSource(p ResourceProvider, n string) bool {\n\tfor _, rt := range p.DataSources() {\n\t\tif rt.Name == n {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright ©2016 The gonum Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage mathext\n\nimport (\n\t\"math\"\n\t\"testing\"\n)\n\nfunc TestZeta(t *testing.T) {\n\tfor i, test := range []struct {\n\t\tx, q, want float64\n\t}{\n\t\t\/\/ Results computed using scipy.special.zeta\n\t\t{1, 1, math.Inf(1)},\n\t\t{10, 0.5, 1024.0174503557578},\n\t\t{5, 2.5, 0.013073166646113805},\n\t\t{1.5, 2, 1.6123753486854886},\n\t\t{1.5, 20, 0.45287361712938717},\n\t\t{2.5, 0.5, 6.2471106345688137},\n\t\t{10, 7.5, 2.5578265694201971e-9},\n\t\t{12, 2.5, 1.7089167198843551e-5},\n\t\t{20, 0.75, 315.3368689825316},\n\t\t{25, 0.25, 1125899906842624.0},\n\t} {\n\t\tif got := Zeta(test.x, test.q); math.Abs(got-test.want) > 1e-10 {\n\t\t\tt.Errorf(\"test %d Zeta(%g, %g) failed: got %g want %g\", i, test.x, test.q, got, test.want)\n\t\t}\n\t}\n}\n<commit_msg>Adding more zeta tests<commit_after>\/\/ Copyright ©2016 The gonum Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage mathext\n\nimport (\n\t\"math\"\n\t\"testing\"\n)\n\nfunc TestZeta(t *testing.T) {\n\tfor i, test := range []struct {\n\t\tx, q, want float64\n\t}{\n\t\t\/\/ Results computed using scipy.special.zeta\n\t\t{1, 1, math.Inf(1)},\n\t\t{1.00001, 0.5, 100001.96352290553},\n\t\t{1.0001, 25, 9996.8017690244506},\n\t\t{1.001, 1, 1000.5772884760117},\n\t\t{1.01, 10, 97.773405639173305},\n\t\t{1.5, 2, 1.6123753486854886},\n\t\t{1.5, 20, 0.45287361712938717},\n\t\t{2, -0.7, 14.28618087263834},\n\t\t{2.5, 0.5, 6.2471106345688137},\n\t\t{5, 2.5, 0.013073166646113805},\n\t\t{7.5, 5, 7.9463377443314306e-06},\n\t\t{10, -0.5, 2048.0174503557578},\n\t\t{10, 0.5, 1024.0174503557578},\n\t\t{10, 7.5, 2.5578265694201971e-9},\n\t\t{12, 2.5, 1.7089167198843551e-5},\n\t\t{17, 0.5, 131072.00101513157},\n\t\t{20, -2.5, 2097152.0006014798},\n\t\t{20, 0.75, 315.3368689825316},\n\t\t{25, 0.25, 1125899906842624.0},\n\t\t{30, 1, 1.0000000009313275},\n\t} {\n\t\tif got := Zeta(test.x, test.q); math.Abs(got-test.want) > 1e-10 {\n\t\t\tt.Errorf(\"test %d Zeta(%g, %g) failed: got %g want %g\", i, test.x, test.q, got, test.want)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package shell\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/types\"\n\t\"math\"\n\t\"sort\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/operation\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/master_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/volume_server_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/erasure_coding\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/needle\"\n\t\"google.golang.org\/grpc\"\n)\n\nfunc moveMountedShardToEcNode(commandEnv *CommandEnv, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) (err error) {\n\n\tcopiedShardIds := []uint32{uint32(shardId)}\n\n\tif applyBalancing {\n\n\t\texistingServerAddress := pb.NewServerAddressFromDataNode(existingLocation.info)\n\n\t\t\/\/ ask destination node to copy shard and the ecx file from source node, and mount it\n\t\tcopiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(commandEnv.option.GrpcDialOption, destinationEcNode, []uint32{uint32(shardId)}, vid, collection, existingServerAddress)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ unmount the to be deleted shards\n\t\terr = unmountEcShards(commandEnv.option.GrpcDialOption, vid, existingServerAddress, copiedShardIds)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ ask source node to delete the shard, and maybe the ecx file\n\t\terr = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, vid, existingServerAddress, copiedShardIds)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Printf(\"moved ec shard %d.%d %s => %s\\n\", vid, shardId, existingLocation.info.Id, destinationEcNode.info.Id)\n\n\t}\n\n\tdestinationEcNode.addEcVolumeShards(vid, collection, copiedShardIds)\n\texistingLocation.deleteEcVolumeShards(vid, copiedShardIds)\n\n\treturn nil\n\n}\n\nfunc oneServerCopyAndMountEcShardsFromSource(grpcDialOption grpc.DialOption,\n\ttargetServer *EcNode, shardIdsToCopy []uint32,\n\tvolumeId needle.VolumeId, collection string, existingLocation pb.ServerAddress) (copiedShardIds []uint32, err error) {\n\n\tfmt.Printf(\"allocate %d.%v %s => %s\\n\", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id)\n\n\ttargetAddress := pb.NewServerAddressFromDataNode(targetServer.info)\n\terr = operation.WithVolumeServerClient(false, targetAddress, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {\n\n\t\tif targetAddress != existingLocation {\n\n\t\t\tfmt.Printf(\"copy %d.%v %s => %s\\n\", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id)\n\t\t\t_, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{\n\t\t\t\tVolumeId: uint32(volumeId),\n\t\t\t\tCollection: collection,\n\t\t\t\tShardIds: shardIdsToCopy,\n\t\t\t\tCopyEcxFile: true,\n\t\t\t\tCopyEcjFile: true,\n\t\t\t\tCopyVifFile: true,\n\t\t\t\tSourceDataNode: string(existingLocation),\n\t\t\t})\n\t\t\tif copyErr != nil {\n\t\t\t\treturn fmt.Errorf(\"copy %d.%v %s => %s : %v\\n\", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id, copyErr)\n\t\t\t}\n\t\t}\n\n\t\tfmt.Printf(\"mount %d.%v on %s\\n\", volumeId, shardIdsToCopy, targetServer.info.Id)\n\t\t_, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{\n\t\t\tVolumeId: uint32(volumeId),\n\t\t\tCollection: collection,\n\t\t\tShardIds: shardIdsToCopy,\n\t\t})\n\t\tif mountErr != nil {\n\t\t\treturn fmt.Errorf(\"mount %d.%v on %s : %v\\n\", volumeId, shardIdsToCopy, targetServer.info.Id, mountErr)\n\t\t}\n\n\t\tif targetAddress != existingLocation {\n\t\t\tcopiedShardIds = shardIdsToCopy\n\t\t\tglog.V(0).Infof(\"%s ec volume %d deletes shards %+v\", existingLocation, volumeId, copiedShardIds)\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc eachDataNode(topo *master_pb.TopologyInfo, fn func(dc string, rack RackId, dn *master_pb.DataNodeInfo)) {\n\tfor _, dc := range topo.DataCenterInfos {\n\t\tfor _, rack := range dc.RackInfos {\n\t\t\tfor _, dn := range rack.DataNodeInfos {\n\t\t\t\tfn(dc.Id, RackId(rack.Id), dn)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc sortEcNodesByFreeslotsDecending(ecNodes []*EcNode) {\n\tsort.Slice(ecNodes, func(i, j int) bool {\n\t\treturn ecNodes[i].freeEcSlot > ecNodes[j].freeEcSlot\n\t})\n}\n\nfunc sortEcNodesByFreeslotsAscending(ecNodes []*EcNode) {\n\tsort.Slice(ecNodes, func(i, j int) bool {\n\t\treturn ecNodes[i].freeEcSlot < ecNodes[j].freeEcSlot\n\t})\n}\n\ntype CandidateEcNode struct {\n\tecNode *EcNode\n\tshardCount int\n}\n\n\/\/ if the index node changed the freeEcSlot, need to keep every EcNode still sorted\nfunc ensureSortedEcNodes(data []*CandidateEcNode, index int, lessThan func(i, j int) bool) {\n\tfor i := index - 1; i >= 0; i-- {\n\t\tif lessThan(i+1, i) {\n\t\t\tswap(data, i, i+1)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tfor i := index + 1; i < len(data); i++ {\n\t\tif lessThan(i, i-1) {\n\t\t\tswap(data, i, i-1)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc swap(data []*CandidateEcNode, i, j int) {\n\tt := data[i]\n\tdata[i] = data[j]\n\tdata[j] = t\n}\n\nfunc countShards(ecShardInfos []*master_pb.VolumeEcShardInformationMessage) (count int) {\n\tfor _, ecShardInfo := range ecShardInfos {\n\t\tshardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits)\n\t\tcount += shardBits.ShardIdCount()\n\t}\n\treturn\n}\n\nfunc countFreeShardSlots(dn *master_pb.DataNodeInfo, diskType types.DiskType) (count int) {\n\tif dn.DiskInfos == nil {\n\t\treturn 0\n\t}\n\tdiskInfo := dn.DiskInfos[string(diskType)]\n\tif diskInfo == nil {\n\t\treturn 0\n\t}\n\treturn int(diskInfo.MaxVolumeCount-diskInfo.ActiveVolumeCount)*erasure_coding.DataShardsCount - countShards(diskInfo.EcShardInfos)\n}\n\ntype RackId string\ntype EcNodeId string\n\ntype EcNode struct {\n\tinfo *master_pb.DataNodeInfo\n\tdc string\n\track RackId\n\tfreeEcSlot int\n}\n\nfunc (ecNode *EcNode) localShardIdCount(vid uint32) int {\n\tfor _, diskInfo := range ecNode.info.DiskInfos {\n\t\tfor _, ecShardInfo := range diskInfo.EcShardInfos {\n\t\t\tif vid == ecShardInfo.Id {\n\t\t\t\tshardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits)\n\t\t\t\treturn shardBits.ShardIdCount()\n\t\t\t}\n\t\t}\n\t}\n\treturn 0\n}\n\ntype EcRack struct {\n\tecNodes map[EcNodeId]*EcNode\n\tfreeEcSlot int\n}\n\nfunc collectEcNodes(commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) {\n\n\t\/\/ list all possible locations\n\t\/\/ collect topology information\n\ttopologyInfo, _, err := collectTopologyInfo(commandEnv, 0)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ find out all volume servers with one slot left.\n\tecNodes, totalFreeEcSlots = collectEcVolumeServersByDc(topologyInfo, selectedDataCenter)\n\n\tsortEcNodesByFreeslotsDecending(ecNodes)\n\n\treturn\n}\n\nfunc collectEcVolumeServersByDc(topo *master_pb.TopologyInfo, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int) {\n\teachDataNode(topo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {\n\t\tif selectedDataCenter != \"\" && selectedDataCenter != dc {\n\t\t\treturn\n\t\t}\n\n\t\tfreeEcSlots := countFreeShardSlots(dn, types.HardDriveType)\n\t\tecNodes = append(ecNodes, &EcNode{\n\t\t\tinfo: dn,\n\t\t\tdc: dc,\n\t\t\track: rack,\n\t\t\tfreeEcSlot: int(freeEcSlots),\n\t\t})\n\t\ttotalFreeEcSlots += freeEcSlots\n\t})\n\treturn\n}\n\nfunc sourceServerDeleteEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation pb.ServerAddress, toBeDeletedShardIds []uint32) error {\n\n\tfmt.Printf(\"delete %d.%v from %s\\n\", volumeId, toBeDeletedShardIds, sourceLocation)\n\n\treturn operation.WithVolumeServerClient(false, sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {\n\t\t_, deleteErr := volumeServerClient.VolumeEcShardsDelete(context.Background(), &volume_server_pb.VolumeEcShardsDeleteRequest{\n\t\t\tVolumeId: uint32(volumeId),\n\t\t\tCollection: collection,\n\t\t\tShardIds: toBeDeletedShardIds,\n\t\t})\n\t\treturn deleteErr\n\t})\n\n}\n\nfunc unmountEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceLocation pb.ServerAddress, toBeUnmountedhardIds []uint32) error {\n\n\tfmt.Printf(\"unmount %d.%v from %s\\n\", volumeId, toBeUnmountedhardIds, sourceLocation)\n\n\treturn operation.WithVolumeServerClient(false, sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {\n\t\t_, deleteErr := volumeServerClient.VolumeEcShardsUnmount(context.Background(), &volume_server_pb.VolumeEcShardsUnmountRequest{\n\t\t\tVolumeId: uint32(volumeId),\n\t\t\tShardIds: toBeUnmountedhardIds,\n\t\t})\n\t\treturn deleteErr\n\t})\n}\n\nfunc mountEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation pb.ServerAddress, toBeMountedhardIds []uint32) error {\n\n\tfmt.Printf(\"mount %d.%v on %s\\n\", volumeId, toBeMountedhardIds, sourceLocation)\n\n\treturn operation.WithVolumeServerClient(false, sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {\n\t\t_, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{\n\t\t\tVolumeId: uint32(volumeId),\n\t\t\tCollection: collection,\n\t\t\tShardIds: toBeMountedhardIds,\n\t\t})\n\t\treturn mountErr\n\t})\n}\n\nfunc divide(total, n int) float64 {\n\treturn float64(total) \/ float64(n)\n}\n\nfunc ceilDivide(total, n int) int {\n\treturn int(math.Ceil(float64(total) \/ float64(n)))\n}\n\nfunc findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.ShardBits {\n\n\tif diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]; found {\n\t\tfor _, shardInfo := range diskInfo.EcShardInfos {\n\t\t\tif needle.VolumeId(shardInfo.Id) == vid {\n\t\t\t\treturn erasure_coding.ShardBits(shardInfo.EcIndexBits)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn 0\n}\n\nfunc (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string, shardIds []uint32) *EcNode {\n\n\tfoundVolume := false\n\tdiskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]\n\tif found {\n\t\tfor _, shardInfo := range diskInfo.EcShardInfos {\n\t\t\tif needle.VolumeId(shardInfo.Id) == vid {\n\t\t\t\toldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)\n\t\t\t\tnewShardBits := oldShardBits\n\t\t\t\tfor _, shardId := range shardIds {\n\t\t\t\t\tnewShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))\n\t\t\t\t}\n\t\t\t\tshardInfo.EcIndexBits = uint32(newShardBits)\n\t\t\t\tecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()\n\t\t\t\tfoundVolume = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t} else {\n\t\tdiskInfo = &master_pb.DiskInfo{\n\t\t\tType: string(types.HardDriveType),\n\t\t}\n\t\tecNode.info.DiskInfos[string(types.HardDriveType)] = diskInfo\n\t}\n\n\tif !foundVolume {\n\t\tvar newShardBits erasure_coding.ShardBits\n\t\tfor _, shardId := range shardIds {\n\t\t\tnewShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))\n\t\t}\n\t\tdiskInfo.EcShardInfos = append(diskInfo.EcShardInfos, &master_pb.VolumeEcShardInformationMessage{\n\t\t\tId: uint32(vid),\n\t\t\tCollection: collection,\n\t\t\tEcIndexBits: uint32(newShardBits),\n\t\t\tDiskType: string(types.HardDriveType),\n\t\t})\n\t\tecNode.freeEcSlot -= len(shardIds)\n\t}\n\n\treturn ecNode\n}\n\nfunc (ecNode *EcNode) deleteEcVolumeShards(vid needle.VolumeId, shardIds []uint32) *EcNode {\n\n\tif diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]; found {\n\t\tfor _, shardInfo := range diskInfo.EcShardInfos {\n\t\t\tif needle.VolumeId(shardInfo.Id) == vid {\n\t\t\t\toldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)\n\t\t\t\tnewShardBits := oldShardBits\n\t\t\t\tfor _, shardId := range shardIds {\n\t\t\t\t\tnewShardBits = newShardBits.RemoveShardId(erasure_coding.ShardId(shardId))\n\t\t\t\t}\n\t\t\t\tshardInfo.EcIndexBits = uint32(newShardBits)\n\t\t\t\tecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ecNode\n}\n\nfunc groupByCount(data []*EcNode, identifierFn func(*EcNode) (id string, count int)) map[string]int {\n\tcountMap := make(map[string]int)\n\tfor _, d := range data {\n\t\tid, count := identifierFn(d)\n\t\tcountMap[id] += count\n\t}\n\treturn countMap\n}\n\nfunc groupBy(data []*EcNode, identifierFn func(*EcNode) (id string)) map[string][]*EcNode {\n\tgroupMap := make(map[string][]*EcNode)\n\tfor _, d := range data {\n\t\tid := identifierFn(d)\n\t\tgroupMap[id] = append(groupMap[id], d)\n\t}\n\treturn groupMap\n}\n<commit_msg>ec.encode: calculate free ec slots based on (maxVolumeCount-volumeCount)<commit_after>package shell\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/types\"\n\t\"math\"\n\t\"sort\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/operation\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/master_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/volume_server_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/erasure_coding\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/needle\"\n\t\"google.golang.org\/grpc\"\n)\n\nfunc moveMountedShardToEcNode(commandEnv *CommandEnv, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) (err error) {\n\n\tcopiedShardIds := []uint32{uint32(shardId)}\n\n\tif applyBalancing {\n\n\t\texistingServerAddress := pb.NewServerAddressFromDataNode(existingLocation.info)\n\n\t\t\/\/ ask destination node to copy shard and the ecx file from source node, and mount it\n\t\tcopiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(commandEnv.option.GrpcDialOption, destinationEcNode, []uint32{uint32(shardId)}, vid, collection, existingServerAddress)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ unmount the to be deleted shards\n\t\terr = unmountEcShards(commandEnv.option.GrpcDialOption, vid, existingServerAddress, copiedShardIds)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ ask source node to delete the shard, and maybe the ecx file\n\t\terr = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, vid, existingServerAddress, copiedShardIds)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Printf(\"moved ec shard %d.%d %s => %s\\n\", vid, shardId, existingLocation.info.Id, destinationEcNode.info.Id)\n\n\t}\n\n\tdestinationEcNode.addEcVolumeShards(vid, collection, copiedShardIds)\n\texistingLocation.deleteEcVolumeShards(vid, copiedShardIds)\n\n\treturn nil\n\n}\n\nfunc oneServerCopyAndMountEcShardsFromSource(grpcDialOption grpc.DialOption,\n\ttargetServer *EcNode, shardIdsToCopy []uint32,\n\tvolumeId needle.VolumeId, collection string, existingLocation pb.ServerAddress) (copiedShardIds []uint32, err error) {\n\n\tfmt.Printf(\"allocate %d.%v %s => %s\\n\", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id)\n\n\ttargetAddress := pb.NewServerAddressFromDataNode(targetServer.info)\n\terr = operation.WithVolumeServerClient(false, targetAddress, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {\n\n\t\tif targetAddress != existingLocation {\n\n\t\t\tfmt.Printf(\"copy %d.%v %s => %s\\n\", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id)\n\t\t\t_, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{\n\t\t\t\tVolumeId: uint32(volumeId),\n\t\t\t\tCollection: collection,\n\t\t\t\tShardIds: shardIdsToCopy,\n\t\t\t\tCopyEcxFile: true,\n\t\t\t\tCopyEcjFile: true,\n\t\t\t\tCopyVifFile: true,\n\t\t\t\tSourceDataNode: string(existingLocation),\n\t\t\t})\n\t\t\tif copyErr != nil {\n\t\t\t\treturn fmt.Errorf(\"copy %d.%v %s => %s : %v\\n\", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id, copyErr)\n\t\t\t}\n\t\t}\n\n\t\tfmt.Printf(\"mount %d.%v on %s\\n\", volumeId, shardIdsToCopy, targetServer.info.Id)\n\t\t_, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{\n\t\t\tVolumeId: uint32(volumeId),\n\t\t\tCollection: collection,\n\t\t\tShardIds: shardIdsToCopy,\n\t\t})\n\t\tif mountErr != nil {\n\t\t\treturn fmt.Errorf(\"mount %d.%v on %s : %v\\n\", volumeId, shardIdsToCopy, targetServer.info.Id, mountErr)\n\t\t}\n\n\t\tif targetAddress != existingLocation {\n\t\t\tcopiedShardIds = shardIdsToCopy\n\t\t\tglog.V(0).Infof(\"%s ec volume %d deletes shards %+v\", existingLocation, volumeId, copiedShardIds)\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc eachDataNode(topo *master_pb.TopologyInfo, fn func(dc string, rack RackId, dn *master_pb.DataNodeInfo)) {\n\tfor _, dc := range topo.DataCenterInfos {\n\t\tfor _, rack := range dc.RackInfos {\n\t\t\tfor _, dn := range rack.DataNodeInfos {\n\t\t\t\tfn(dc.Id, RackId(rack.Id), dn)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc sortEcNodesByFreeslotsDecending(ecNodes []*EcNode) {\n\tsort.Slice(ecNodes, func(i, j int) bool {\n\t\treturn ecNodes[i].freeEcSlot > ecNodes[j].freeEcSlot\n\t})\n}\n\nfunc sortEcNodesByFreeslotsAscending(ecNodes []*EcNode) {\n\tsort.Slice(ecNodes, func(i, j int) bool {\n\t\treturn ecNodes[i].freeEcSlot < ecNodes[j].freeEcSlot\n\t})\n}\n\ntype CandidateEcNode struct {\n\tecNode *EcNode\n\tshardCount int\n}\n\n\/\/ if the index node changed the freeEcSlot, need to keep every EcNode still sorted\nfunc ensureSortedEcNodes(data []*CandidateEcNode, index int, lessThan func(i, j int) bool) {\n\tfor i := index - 1; i >= 0; i-- {\n\t\tif lessThan(i+1, i) {\n\t\t\tswap(data, i, i+1)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tfor i := index + 1; i < len(data); i++ {\n\t\tif lessThan(i, i-1) {\n\t\t\tswap(data, i, i-1)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc swap(data []*CandidateEcNode, i, j int) {\n\tt := data[i]\n\tdata[i] = data[j]\n\tdata[j] = t\n}\n\nfunc countShards(ecShardInfos []*master_pb.VolumeEcShardInformationMessage) (count int) {\n\tfor _, ecShardInfo := range ecShardInfos {\n\t\tshardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits)\n\t\tcount += shardBits.ShardIdCount()\n\t}\n\treturn\n}\n\nfunc countFreeShardSlots(dn *master_pb.DataNodeInfo, diskType types.DiskType) (count int) {\n\tif dn.DiskInfos == nil {\n\t\treturn 0\n\t}\n\tdiskInfo := dn.DiskInfos[string(diskType)]\n\tif diskInfo == nil {\n\t\treturn 0\n\t}\n\treturn int(diskInfo.MaxVolumeCount-diskInfo.VolumeCount)*erasure_coding.DataShardsCount - countShards(diskInfo.EcShardInfos)\n}\n\ntype RackId string\ntype EcNodeId string\n\ntype EcNode struct {\n\tinfo *master_pb.DataNodeInfo\n\tdc string\n\track RackId\n\tfreeEcSlot int\n}\n\nfunc (ecNode *EcNode) localShardIdCount(vid uint32) int {\n\tfor _, diskInfo := range ecNode.info.DiskInfos {\n\t\tfor _, ecShardInfo := range diskInfo.EcShardInfos {\n\t\t\tif vid == ecShardInfo.Id {\n\t\t\t\tshardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits)\n\t\t\t\treturn shardBits.ShardIdCount()\n\t\t\t}\n\t\t}\n\t}\n\treturn 0\n}\n\ntype EcRack struct {\n\tecNodes map[EcNodeId]*EcNode\n\tfreeEcSlot int\n}\n\nfunc collectEcNodes(commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) {\n\n\t\/\/ list all possible locations\n\t\/\/ collect topology information\n\ttopologyInfo, _, err := collectTopologyInfo(commandEnv, 0)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ find out all volume servers with one slot left.\n\tecNodes, totalFreeEcSlots = collectEcVolumeServersByDc(topologyInfo, selectedDataCenter)\n\n\tsortEcNodesByFreeslotsDecending(ecNodes)\n\n\treturn\n}\n\nfunc collectEcVolumeServersByDc(topo *master_pb.TopologyInfo, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int) {\n\teachDataNode(topo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {\n\t\tif selectedDataCenter != \"\" && selectedDataCenter != dc {\n\t\t\treturn\n\t\t}\n\n\t\tfreeEcSlots := countFreeShardSlots(dn, types.HardDriveType)\n\t\tecNodes = append(ecNodes, &EcNode{\n\t\t\tinfo: dn,\n\t\t\tdc: dc,\n\t\t\track: rack,\n\t\t\tfreeEcSlot: int(freeEcSlots),\n\t\t})\n\t\ttotalFreeEcSlots += freeEcSlots\n\t})\n\treturn\n}\n\nfunc sourceServerDeleteEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation pb.ServerAddress, toBeDeletedShardIds []uint32) error {\n\n\tfmt.Printf(\"delete %d.%v from %s\\n\", volumeId, toBeDeletedShardIds, sourceLocation)\n\n\treturn operation.WithVolumeServerClient(false, sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {\n\t\t_, deleteErr := volumeServerClient.VolumeEcShardsDelete(context.Background(), &volume_server_pb.VolumeEcShardsDeleteRequest{\n\t\t\tVolumeId: uint32(volumeId),\n\t\t\tCollection: collection,\n\t\t\tShardIds: toBeDeletedShardIds,\n\t\t})\n\t\treturn deleteErr\n\t})\n\n}\n\nfunc unmountEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceLocation pb.ServerAddress, toBeUnmountedhardIds []uint32) error {\n\n\tfmt.Printf(\"unmount %d.%v from %s\\n\", volumeId, toBeUnmountedhardIds, sourceLocation)\n\n\treturn operation.WithVolumeServerClient(false, sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {\n\t\t_, deleteErr := volumeServerClient.VolumeEcShardsUnmount(context.Background(), &volume_server_pb.VolumeEcShardsUnmountRequest{\n\t\t\tVolumeId: uint32(volumeId),\n\t\t\tShardIds: toBeUnmountedhardIds,\n\t\t})\n\t\treturn deleteErr\n\t})\n}\n\nfunc mountEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation pb.ServerAddress, toBeMountedhardIds []uint32) error {\n\n\tfmt.Printf(\"mount %d.%v on %s\\n\", volumeId, toBeMountedhardIds, sourceLocation)\n\n\treturn operation.WithVolumeServerClient(false, sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {\n\t\t_, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{\n\t\t\tVolumeId: uint32(volumeId),\n\t\t\tCollection: collection,\n\t\t\tShardIds: toBeMountedhardIds,\n\t\t})\n\t\treturn mountErr\n\t})\n}\n\nfunc divide(total, n int) float64 {\n\treturn float64(total) \/ float64(n)\n}\n\nfunc ceilDivide(total, n int) int {\n\treturn int(math.Ceil(float64(total) \/ float64(n)))\n}\n\nfunc findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.ShardBits {\n\n\tif diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]; found {\n\t\tfor _, shardInfo := range diskInfo.EcShardInfos {\n\t\t\tif needle.VolumeId(shardInfo.Id) == vid {\n\t\t\t\treturn erasure_coding.ShardBits(shardInfo.EcIndexBits)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn 0\n}\n\nfunc (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string, shardIds []uint32) *EcNode {\n\n\tfoundVolume := false\n\tdiskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]\n\tif found {\n\t\tfor _, shardInfo := range diskInfo.EcShardInfos {\n\t\t\tif needle.VolumeId(shardInfo.Id) == vid {\n\t\t\t\toldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)\n\t\t\t\tnewShardBits := oldShardBits\n\t\t\t\tfor _, shardId := range shardIds {\n\t\t\t\t\tnewShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))\n\t\t\t\t}\n\t\t\t\tshardInfo.EcIndexBits = uint32(newShardBits)\n\t\t\t\tecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()\n\t\t\t\tfoundVolume = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t} else {\n\t\tdiskInfo = &master_pb.DiskInfo{\n\t\t\tType: string(types.HardDriveType),\n\t\t}\n\t\tecNode.info.DiskInfos[string(types.HardDriveType)] = diskInfo\n\t}\n\n\tif !foundVolume {\n\t\tvar newShardBits erasure_coding.ShardBits\n\t\tfor _, shardId := range shardIds {\n\t\t\tnewShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))\n\t\t}\n\t\tdiskInfo.EcShardInfos = append(diskInfo.EcShardInfos, &master_pb.VolumeEcShardInformationMessage{\n\t\t\tId: uint32(vid),\n\t\t\tCollection: collection,\n\t\t\tEcIndexBits: uint32(newShardBits),\n\t\t\tDiskType: string(types.HardDriveType),\n\t\t})\n\t\tecNode.freeEcSlot -= len(shardIds)\n\t}\n\n\treturn ecNode\n}\n\nfunc (ecNode *EcNode) deleteEcVolumeShards(vid needle.VolumeId, shardIds []uint32) *EcNode {\n\n\tif diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]; found {\n\t\tfor _, shardInfo := range diskInfo.EcShardInfos {\n\t\t\tif needle.VolumeId(shardInfo.Id) == vid {\n\t\t\t\toldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)\n\t\t\t\tnewShardBits := oldShardBits\n\t\t\t\tfor _, shardId := range shardIds {\n\t\t\t\t\tnewShardBits = newShardBits.RemoveShardId(erasure_coding.ShardId(shardId))\n\t\t\t\t}\n\t\t\t\tshardInfo.EcIndexBits = uint32(newShardBits)\n\t\t\t\tecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ecNode\n}\n\nfunc groupByCount(data []*EcNode, identifierFn func(*EcNode) (id string, count int)) map[string]int {\n\tcountMap := make(map[string]int)\n\tfor _, d := range data {\n\t\tid, count := identifierFn(d)\n\t\tcountMap[id] += count\n\t}\n\treturn countMap\n}\n\nfunc groupBy(data []*EcNode, identifierFn func(*EcNode) (id string)) map[string][]*EcNode {\n\tgroupMap := make(map[string][]*EcNode)\n\tfor _, d := range data {\n\t\tid := identifierFn(d)\n\t\tgroupMap[id] = append(groupMap[id], d)\n\t}\n\treturn groupMap\n}\n<|endoftext|>"} {"text":"<commit_before>package ot\n\ntype OpType int\n\nconst (\n\tOpRetain OpType = iota\n\tOpInsert\n\tOpDelete\n)\n\ntype Op interface {\n\tType() OpType\n\tSpan() int\n}\n\ntype RetainOp int\n\nfunc (p RetainOp) Type() OpType {\n\treturn OpRetain\n}\n\nfunc (p RetainOp) Span() int {\n\treturn int(p)\n}\n\nfunc (p RetainOp) ComposeRetain(q RetainOp) (Op, Op, Op) {\n\tswitch {\n\tcase p > q:\n\t\treturn q, p - q, Noop\n\n\tdefault:\n\t\treturn p, Noop, q - p\n\t}\n}\n\nfunc (p RetainOp) ComposeDelete(q DeleteOp) (Op, Op, Op) {\n\tswitch {\n\tcase int(p) < int(q):\n\t\treturn DeleteOp(p), Noop, q - DeleteOp(p)\n\n\tdefault:\n\t\treturn q, p - RetainOp(q), Noop\n\t}\n}\n\nfunc (p RetainOp) TransformRetain(q RetainOp) (Op, Op, Op, Op) {\n\tswitch {\n\tcase p > q:\n\t\treturn q, q, p - q, Noop\n\n\tdefault:\n\t\treturn p, p, Noop, q - p\n\t}\n}\n\nfunc (p RetainOp) TransformDelete(q DeleteOp) (Op, Op, Op, Op) {\n\tswitch {\n\tcase int(p) > int(q):\n\t\treturn Noop, q, p - RetainOp(q), Noop\n\n\tcase int(p) < int(q):\n\t\treturn Noop, DeleteOp(p), Noop, q - DeleteOp(p)\n\n\tdefault:\n\t\treturn Noop, DeleteOp(p), Noop, Noop\n\t}\n}\n\nvar Noop = RetainOp(0)\n\ntype InsertOp string\n\nfunc (p InsertOp) Type() OpType {\n\treturn OpInsert\n}\n\nfunc (p InsertOp) Span() int {\n\treturn len(p)\n}\n\nfunc (p InsertOp) ComposeRetain(q RetainOp) (Op, Op, Op) {\n\tswitch {\n\tcase len(p) > int(q):\n\t\treturn p[:int(q)], p[int(q):], Noop\n\n\tdefault:\n\t\treturn p, Noop, q - RetainOp(len(p))\n\t}\n}\n\nfunc (p InsertOp) ComposeDelete(q DeleteOp) (Op, Op, Op) {\n\tswitch {\n\tcase len(p) > int(q):\n\t\treturn Noop, p[int(q):], Noop\n\n\tcase len(p) < int(q):\n\t\treturn Noop, Noop, q - DeleteOp(len(p))\n\n\tdefault:\n\t\treturn Noop, Noop, Noop\n\t}\n}\n\ntype DeleteOp int\n\nfunc (p DeleteOp) Type() OpType {\n\treturn OpDelete\n}\n\nfunc (p DeleteOp) Span() int {\n\treturn int(p)\n}\n\nfunc (p DeleteOp) TransformRetain(q RetainOp) (Op, Op, Op, Op) {\n\tswitch {\n\tcase int(p) > int(q):\n\t\treturn DeleteOp(q), Noop, p - DeleteOp(q), Noop\n\n\tdefault:\n\t\treturn p, Noop, Noop, q - RetainOp(p)\n\t}\n}\n\nfunc (p DeleteOp) TransformDelete(q DeleteOp) (Op, Op, Op, Op) {\n\tswitch {\n\tcase p > q:\n\t\treturn Noop, Noop, p - q, Noop\n\n\tdefault:\n\t\treturn Noop, Noop, Noop, q - p\n\t}\n}\n\ntype RetainComposer interface {\n\tComposeRetain(q RetainOp) (Op, Op, Op)\n}\n\ntype DeleteComposer interface {\n\tComposeDelete(q DeleteOp) (Op, Op, Op)\n}\n\ntype RetainTransformer interface {\n\tTransformRetain(q RetainOp) (Op, Op, Op, Op)\n}\n\ntype DeleteTransformer interface {\n\tTransformDelete(q DeleteOp) (Op, Op, Op, Op)\n}\n<commit_msg>Fix delete-delete transform bug<commit_after>package ot\n\ntype OpType int\n\nconst (\n\tOpRetain OpType = iota\n\tOpInsert\n\tOpDelete\n)\n\ntype Op interface {\n\tType() OpType\n\tSpan() int\n}\n\ntype RetainOp int\n\nfunc (p RetainOp) Type() OpType {\n\treturn OpRetain\n}\n\nfunc (p RetainOp) Span() int {\n\treturn int(p)\n}\n\nfunc (p RetainOp) ComposeRetain(q RetainOp) (Op, Op, Op) {\n\tswitch {\n\tcase p > q:\n\t\treturn q, p - q, Noop\n\n\tdefault:\n\t\treturn p, Noop, q - p\n\t}\n}\n\nfunc (p RetainOp) ComposeDelete(q DeleteOp) (Op, Op, Op) {\n\tswitch {\n\tcase int(p) < int(q):\n\t\treturn DeleteOp(p), Noop, q - DeleteOp(p)\n\n\tdefault:\n\t\treturn q, p - RetainOp(q), Noop\n\t}\n}\n\nfunc (p RetainOp) TransformRetain(q RetainOp) (Op, Op, Op, Op) {\n\tswitch {\n\tcase p > q:\n\t\treturn q, q, p - q, Noop\n\n\tdefault:\n\t\treturn p, p, Noop, q - p\n\t}\n}\n\nfunc (p RetainOp) TransformDelete(q DeleteOp) (Op, Op, Op, Op) {\n\tswitch {\n\tcase int(p) > int(q):\n\t\treturn Noop, q, p - RetainOp(q), Noop\n\n\tcase int(p) < int(q):\n\t\treturn Noop, DeleteOp(p), Noop, q - DeleteOp(p)\n\n\tdefault:\n\t\treturn Noop, DeleteOp(p), Noop, Noop\n\t}\n}\n\nvar Noop = RetainOp(0)\n\ntype InsertOp string\n\nfunc (p InsertOp) Type() OpType {\n\treturn OpInsert\n}\n\nfunc (p InsertOp) Span() int {\n\treturn len(p)\n}\n\nfunc (p InsertOp) ComposeRetain(q RetainOp) (Op, Op, Op) {\n\tswitch {\n\tcase len(p) > int(q):\n\t\treturn p[:int(q)], p[int(q):], Noop\n\n\tdefault:\n\t\treturn p, Noop, q - RetainOp(len(p))\n\t}\n}\n\nfunc (p InsertOp) ComposeDelete(q DeleteOp) (Op, Op, Op) {\n\tswitch {\n\tcase len(p) > int(q):\n\t\treturn Noop, p[int(q):], Noop\n\n\tcase len(p) < int(q):\n\t\treturn Noop, Noop, q - DeleteOp(len(p))\n\n\tdefault:\n\t\treturn Noop, Noop, Noop\n\t}\n}\n\ntype DeleteOp int\n\nfunc (p DeleteOp) Type() OpType {\n\treturn OpDelete\n}\n\nfunc (p DeleteOp) Span() int {\n\treturn int(p)\n}\n\nfunc (p DeleteOp) TransformRetain(q RetainOp) (Op, Op, Op, Op) {\n\tswitch {\n\tcase int(p) > int(q):\n\t\treturn DeleteOp(q), Noop, p - DeleteOp(q), Noop\n\n\tdefault:\n\t\treturn p, Noop, Noop, q - RetainOp(p)\n\t}\n}\n\nfunc (p DeleteOp) TransformDelete(q DeleteOp) (Op, Op, Op, Op) {\n\tswitch {\n\tcase p > q:\n\t\treturn Noop, Noop, p - q, Noop\n\n\tcase p < q:\n\t\treturn Noop, Noop, Noop, q - p\n\n\tdefault:\n\t\treturn Noop, Noop, Noop, Noop\n\t}\n}\n\ntype RetainComposer interface {\n\tComposeRetain(q RetainOp) (Op, Op, Op)\n}\n\ntype DeleteComposer interface {\n\tComposeDelete(q DeleteOp) (Op, Op, Op)\n}\n\ntype RetainTransformer interface {\n\tTransformRetain(q RetainOp) (Op, Op, Op, Op)\n}\n\ntype DeleteTransformer interface {\n\tTransformDelete(q DeleteOp) (Op, Op, Op, Op)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2014 The Syncthing Authors.\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify it\n\/\/ under the terms of the GNU General Public License as published by the Free\n\/\/ Software Foundation, either version 3 of the License, or (at your option)\n\/\/ any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful, but WITHOUT\n\/\/ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n\/\/ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n\/\/ more details.\n\/\/\n\/\/ You should have received a copy of the GNU General Public License along\n\/\/ with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage versioner\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/syncthing\/syncthing\/internal\/osutil\"\n)\n\nfunc init() {\n\t\/\/ Register the constructor for this type of versioner with the name \"staggered\"\n\tFactories[\"staggered\"] = NewStaggered\n}\n\ntype Interval struct {\n\tstep int64\n\tend int64\n}\n\n\/\/ The type holds our configuration\ntype Staggered struct {\n\tversionsPath string\n\tcleanInterval int64\n\tfolderPath string\n\tinterval [4]Interval\n\tmutex *sync.Mutex\n}\n\n\/\/ Rename versions with old version format\nfunc (v Staggered) renameOld() {\n\terr := filepath.Walk(v.versionsPath, func(path string, f os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif f.Mode().IsRegular() {\n\t\t\tversionUnix, err := strconv.ParseInt(strings.Replace(filepath.Ext(path), \".v\", \"\", 1), 10, 0)\n\t\t\tif err == nil {\n\t\t\t\tl.Infoln(\"Renaming file\", path, \"from old to new version format\")\n\t\t\t\tversiondate := time.Unix(versionUnix, 0)\n\t\t\t\tname := path[:len(path)-len(filepath.Ext(path))]\n\t\t\t\terr = osutil.Rename(path, taggedFilename(name, versiondate.Format(TimeFormat)))\n\t\t\t\tif err != nil {\n\t\t\t\t\tl.Infoln(\"Error renaming to new format\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tl.Infoln(\"Versioner: error scanning versions dir\", err)\n\t\treturn\n\t}\n}\n\n\/\/ The constructor function takes a map of parameters and creates the type.\nfunc NewStaggered(folderID, folderPath string, params map[string]string) Versioner {\n\tmaxAge, err := strconv.ParseInt(params[\"maxAge\"], 10, 0)\n\tif err != nil {\n\t\tmaxAge = 31536000 \/\/ Default: ~1 year\n\t}\n\tcleanInterval, err := strconv.ParseInt(params[\"cleanInterval\"], 10, 0)\n\tif err != nil {\n\t\tcleanInterval = 3600 \/\/ Default: clean once per hour\n\t}\n\n\t\/\/ Use custom path if set, otherwise .stversions in folderPath\n\tvar versionsDir string\n\tif params[\"versionsPath\"] == \"\" {\n\t\tif debug {\n\t\t\tl.Debugln(\"using default dir .stversions\")\n\t\t}\n\t\tversionsDir = filepath.Join(folderPath, \".stversions\")\n\t} else {\n\t\tif debug {\n\t\t\tl.Debugln(\"using dir\", params[\"versionsPath\"])\n\t\t}\n\t\tversionsDir = params[\"versionsPath\"]\n\t}\n\n\tvar mutex sync.Mutex\n\ts := Staggered{\n\t\tversionsPath: versionsDir,\n\t\tcleanInterval: cleanInterval,\n\t\tfolderPath: folderPath,\n\t\tinterval: [4]Interval{\n\t\t\t{30, 3600}, \/\/ first hour -> 30 sec between versions\n\t\t\t{3600, 86400}, \/\/ next day -> 1 h between versions\n\t\t\t{86400, 592000}, \/\/ next 30 days -> 1 day between versions\n\t\t\t{604800, maxAge}, \/\/ next year -> 1 week between versions\n\t\t},\n\t\tmutex: &mutex,\n\t}\n\n\tif debug {\n\t\tl.Debugf(\"instantiated %#v\", s)\n\t}\n\n\t\/\/ Rename version with old version format\n\ts.renameOld()\n\n\tgo func() {\n\t\ts.clean()\n\t\tfor _ = range time.Tick(time.Duration(cleanInterval) * time.Second) {\n\t\t\ts.clean()\n\t\t}\n\t}()\n\n\treturn s\n}\n\nfunc (v Staggered) clean() {\n\tif debug {\n\t\tl.Debugln(\"Versioner clean: Waiting for lock on\", v.versionsPath)\n\t}\n\tv.mutex.Lock()\n\tdefer v.mutex.Unlock()\n\tif debug {\n\t\tl.Debugln(\"Versioner clean: Cleaning\", v.versionsPath)\n\t}\n\n\t_, err := os.Stat(v.versionsPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tif debug {\n\t\t\t\tl.Debugln(\"creating versions dir\", v.versionsPath)\n\t\t\t}\n\t\t\tos.MkdirAll(v.versionsPath, 0755)\n\t\t\tosutil.HideFile(v.versionsPath)\n\t\t} else {\n\t\t\tl.Warnln(\"Versioner: can't create versions dir\", err)\n\t\t}\n\t}\n\n\tversionsPerFile := make(map[string][]string)\n\tfilesPerDir := make(map[string]int)\n\n\terr = filepath.Walk(v.versionsPath, func(path string, f os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif f.Mode().IsDir() && f.Mode()&os.ModeSymlink == 0 {\n\t\t\tfilesPerDir[path] = 0\n\t\t\tif path != v.versionsPath {\n\t\t\t\tdir := filepath.Dir(path)\n\t\t\t\tfilesPerDir[dir]++\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Regular file, or possibly a symlink.\n\n\t\t\textension := filenameTag(path)\n\t\t\tdir := filepath.Dir(path)\n\t\t\tname := path[:len(path)-len(extension)-1]\n\n\t\t\tfilesPerDir[dir]++\n\t\t\tversionsPerFile[name] = append(versionsPerFile[name], path)\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tl.Warnln(\"Versioner: error scanning versions dir\", err)\n\t\treturn\n\t}\n\n\tfor _, versionList := range versionsPerFile {\n\t\t\/\/ List from filepath.Walk is sorted\n\t\tv.expire(versionList)\n\t}\n\n\tfor path, numFiles := range filesPerDir {\n\t\tif numFiles > 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif path == v.versionsPath {\n\t\t\tif debug {\n\t\t\t\tl.Debugln(\"Cleaner: versions dir is empty, don't delete\", path)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif debug {\n\t\t\tl.Debugln(\"Cleaner: deleting empty directory\", path)\n\t\t}\n\t\terr = os.Remove(path)\n\t\tif err != nil {\n\t\t\tl.Warnln(\"Versioner: can't remove directory\", path, err)\n\t\t}\n\t}\n\tif debug {\n\t\tl.Debugln(\"Cleaner: Finished cleaning\", v.versionsPath)\n\t}\n}\n\nfunc (v Staggered) expire(versions []string) {\n\tif debug {\n\t\tl.Debugln(\"Versioner: Expiring versions\", versions)\n\t}\n\tvar prevAge int64\n\tfirstFile := true\n\tfor _, file := range versions {\n\t\tfi, err := os.Stat(file)\n\t\tif err != nil {\n\t\t\tl.Warnln(\"versioner:\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif fi.IsDir() {\n\t\t\tl.Infof(\"non-file %q is named like a file version\", file)\n\t\t\tcontinue\n\t\t}\n\n\t\tversionTime, err := time.Parse(TimeFormat, filenameTag(file))\n\t\tif err != nil {\n\t\t\tl.Infof(\"Versioner: file name %q is invalid: %v\", file, err)\n\t\t\tcontinue\n\t\t}\n\t\tage := int64(time.Since(versionTime).Seconds())\n\n\t\t\/\/ If the file is older than the max age of the last interval, remove it\n\t\tif lastIntv := v.interval[len(v.interval)-1]; lastIntv.end > 0 && age > lastIntv.end {\n\t\t\tif debug {\n\t\t\t\tl.Debugln(\"Versioner: File over maximum age -> delete \", file)\n\t\t\t}\n\t\t\terr = os.Remove(file)\n\t\t\tif err != nil {\n\t\t\t\tl.Warnf(\"Versioner: can't remove %q: %v\", file, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ If it's the first (oldest) file in the list we can skip the interval checks\n\t\tif firstFile {\n\t\t\tprevAge = age\n\t\t\tfirstFile = false\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Find the interval the file fits in\n\t\tvar usedInterval Interval\n\t\tfor _, usedInterval = range v.interval {\n\t\t\tif age < usedInterval.end {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif prevAge-age < usedInterval.step {\n\t\t\tif debug {\n\t\t\t\tl.Debugln(\"too many files in step -> delete\", file)\n\t\t\t}\n\t\t\terr = os.Remove(file)\n\t\t\tif err != nil {\n\t\t\t\tl.Warnf(\"Versioner: can't remove %q: %v\", file, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tprevAge = age\n\t}\n}\n\n\/\/ Move away the named file to a version archive. If this function returns\n\/\/ nil, the named file does not exist any more (has been archived).\nfunc (v Staggered) Archive(filePath string) error {\n\tif debug {\n\t\tl.Debugln(\"Waiting for lock on \", v.versionsPath)\n\t}\n\tv.mutex.Lock()\n\tdefer v.mutex.Unlock()\n\n\tif _, err := os.Lstat(filePath); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tif debug {\n\t\t\t\tl.Debugln(\"not archiving nonexistent file\", filePath)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\tif _, err := os.Stat(v.versionsPath); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tif debug {\n\t\t\t\tl.Debugln(\"creating versions dir\", v.versionsPath)\n\t\t\t}\n\t\t\tos.MkdirAll(v.versionsPath, 0755)\n\t\t\tosutil.HideFile(v.versionsPath)\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif debug {\n\t\tl.Debugln(\"archiving\", filePath)\n\t}\n\n\tfile := filepath.Base(filePath)\n\tinFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdir := filepath.Join(v.versionsPath, inFolderPath)\n\terr = os.MkdirAll(dir, 0755)\n\tif err != nil && !os.IsExist(err) {\n\t\treturn err\n\t}\n\n\tver := taggedFilename(file, time.Now().Format(TimeFormat))\n\tdst := filepath.Join(dir, ver)\n\tif debug {\n\t\tl.Debugln(\"moving to\", dst)\n\t}\n\terr = osutil.Rename(filePath, dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Glob according to the new file~timestamp.ext pattern.\n\tnewVersions, err := filepath.Glob(filepath.Join(dir, taggedFilename(file, TimeGlob)))\n\tif err != nil {\n\t\tl.Warnln(\"globbing:\", err)\n\t\treturn nil\n\t}\n\n\t\/\/ Also according to the old file.ext~timestamp pattern.\n\toldVersions, err := filepath.Glob(filepath.Join(dir, file+\"~\"+TimeGlob))\n\tif err != nil {\n\t\tl.Warnln(\"globbing:\", err)\n\t\treturn nil\n\t}\n\n\t\/\/ Use all the found filenames.\n\tversions := append(oldVersions, newVersions...)\n\tv.expire(uniqueSortedStrings(versions))\n\n\treturn nil\n}\n<commit_msg>Silence versioner warnings for unmatched files (fixes #1117)<commit_after>\/\/ Copyright (C) 2014 The Syncthing Authors.\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify it\n\/\/ under the terms of the GNU General Public License as published by the Free\n\/\/ Software Foundation, either version 3 of the License, or (at your option)\n\/\/ any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful, but WITHOUT\n\/\/ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n\/\/ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n\/\/ more details.\n\/\/\n\/\/ You should have received a copy of the GNU General Public License along\n\/\/ with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage versioner\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/syncthing\/syncthing\/internal\/osutil\"\n)\n\nfunc init() {\n\t\/\/ Register the constructor for this type of versioner with the name \"staggered\"\n\tFactories[\"staggered\"] = NewStaggered\n}\n\ntype Interval struct {\n\tstep int64\n\tend int64\n}\n\n\/\/ The type holds our configuration\ntype Staggered struct {\n\tversionsPath string\n\tcleanInterval int64\n\tfolderPath string\n\tinterval [4]Interval\n\tmutex *sync.Mutex\n}\n\n\/\/ Rename versions with old version format\nfunc (v Staggered) renameOld() {\n\terr := filepath.Walk(v.versionsPath, func(path string, f os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif f.Mode().IsRegular() {\n\t\t\tversionUnix, err := strconv.ParseInt(strings.Replace(filepath.Ext(path), \".v\", \"\", 1), 10, 0)\n\t\t\tif err == nil {\n\t\t\t\tl.Infoln(\"Renaming file\", path, \"from old to new version format\")\n\t\t\t\tversiondate := time.Unix(versionUnix, 0)\n\t\t\t\tname := path[:len(path)-len(filepath.Ext(path))]\n\t\t\t\terr = osutil.Rename(path, taggedFilename(name, versiondate.Format(TimeFormat)))\n\t\t\t\tif err != nil {\n\t\t\t\t\tl.Infoln(\"Error renaming to new format\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tl.Infoln(\"Versioner: error scanning versions dir\", err)\n\t\treturn\n\t}\n}\n\n\/\/ The constructor function takes a map of parameters and creates the type.\nfunc NewStaggered(folderID, folderPath string, params map[string]string) Versioner {\n\tmaxAge, err := strconv.ParseInt(params[\"maxAge\"], 10, 0)\n\tif err != nil {\n\t\tmaxAge = 31536000 \/\/ Default: ~1 year\n\t}\n\tcleanInterval, err := strconv.ParseInt(params[\"cleanInterval\"], 10, 0)\n\tif err != nil {\n\t\tcleanInterval = 3600 \/\/ Default: clean once per hour\n\t}\n\n\t\/\/ Use custom path if set, otherwise .stversions in folderPath\n\tvar versionsDir string\n\tif params[\"versionsPath\"] == \"\" {\n\t\tif debug {\n\t\t\tl.Debugln(\"using default dir .stversions\")\n\t\t}\n\t\tversionsDir = filepath.Join(folderPath, \".stversions\")\n\t} else {\n\t\tif debug {\n\t\t\tl.Debugln(\"using dir\", params[\"versionsPath\"])\n\t\t}\n\t\tversionsDir = params[\"versionsPath\"]\n\t}\n\n\tvar mutex sync.Mutex\n\ts := Staggered{\n\t\tversionsPath: versionsDir,\n\t\tcleanInterval: cleanInterval,\n\t\tfolderPath: folderPath,\n\t\tinterval: [4]Interval{\n\t\t\t{30, 3600}, \/\/ first hour -> 30 sec between versions\n\t\t\t{3600, 86400}, \/\/ next day -> 1 h between versions\n\t\t\t{86400, 592000}, \/\/ next 30 days -> 1 day between versions\n\t\t\t{604800, maxAge}, \/\/ next year -> 1 week between versions\n\t\t},\n\t\tmutex: &mutex,\n\t}\n\n\tif debug {\n\t\tl.Debugf(\"instantiated %#v\", s)\n\t}\n\n\t\/\/ Rename version with old version format\n\ts.renameOld()\n\n\tgo func() {\n\t\ts.clean()\n\t\tfor _ = range time.Tick(time.Duration(cleanInterval) * time.Second) {\n\t\t\ts.clean()\n\t\t}\n\t}()\n\n\treturn s\n}\n\nfunc (v Staggered) clean() {\n\tif debug {\n\t\tl.Debugln(\"Versioner clean: Waiting for lock on\", v.versionsPath)\n\t}\n\tv.mutex.Lock()\n\tdefer v.mutex.Unlock()\n\tif debug {\n\t\tl.Debugln(\"Versioner clean: Cleaning\", v.versionsPath)\n\t}\n\n\t_, err := os.Stat(v.versionsPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tif debug {\n\t\t\t\tl.Debugln(\"creating versions dir\", v.versionsPath)\n\t\t\t}\n\t\t\tos.MkdirAll(v.versionsPath, 0755)\n\t\t\tosutil.HideFile(v.versionsPath)\n\t\t} else {\n\t\t\tl.Warnln(\"Versioner: can't create versions dir\", err)\n\t\t}\n\t}\n\n\tversionsPerFile := make(map[string][]string)\n\tfilesPerDir := make(map[string]int)\n\n\terr = filepath.Walk(v.versionsPath, func(path string, f os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif f.Mode().IsDir() && f.Mode()&os.ModeSymlink == 0 {\n\t\t\tfilesPerDir[path] = 0\n\t\t\tif path != v.versionsPath {\n\t\t\t\tdir := filepath.Dir(path)\n\t\t\t\tfilesPerDir[dir]++\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Regular file, or possibly a symlink.\n\n\t\t\textension := filenameTag(path)\n\t\t\tdir := filepath.Dir(path)\n\t\t\tname := path[:len(path)-len(extension)-1]\n\n\t\t\tfilesPerDir[dir]++\n\t\t\tversionsPerFile[name] = append(versionsPerFile[name], path)\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tl.Warnln(\"Versioner: error scanning versions dir\", err)\n\t\treturn\n\t}\n\n\tfor _, versionList := range versionsPerFile {\n\t\t\/\/ List from filepath.Walk is sorted\n\t\tv.expire(versionList)\n\t}\n\n\tfor path, numFiles := range filesPerDir {\n\t\tif numFiles > 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif path == v.versionsPath {\n\t\t\tif debug {\n\t\t\t\tl.Debugln(\"Cleaner: versions dir is empty, don't delete\", path)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif debug {\n\t\t\tl.Debugln(\"Cleaner: deleting empty directory\", path)\n\t\t}\n\t\terr = os.Remove(path)\n\t\tif err != nil {\n\t\t\tl.Warnln(\"Versioner: can't remove directory\", path, err)\n\t\t}\n\t}\n\tif debug {\n\t\tl.Debugln(\"Cleaner: Finished cleaning\", v.versionsPath)\n\t}\n}\n\nfunc (v Staggered) expire(versions []string) {\n\tif debug {\n\t\tl.Debugln(\"Versioner: Expiring versions\", versions)\n\t}\n\tvar prevAge int64\n\tfirstFile := true\n\tfor _, file := range versions {\n\t\tfi, err := os.Stat(file)\n\t\tif err != nil {\n\t\t\tl.Warnln(\"versioner:\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif fi.IsDir() {\n\t\t\tl.Infof(\"non-file %q is named like a file version\", file)\n\t\t\tcontinue\n\t\t}\n\n\t\tversionTime, err := time.Parse(TimeFormat, filenameTag(file))\n\t\tif err != nil {\n\t\t\tif debug {\n\t\t\t\tl.Debugf(\"Versioner: file name %q is invalid: %v\", file, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tage := int64(time.Since(versionTime).Seconds())\n\n\t\t\/\/ If the file is older than the max age of the last interval, remove it\n\t\tif lastIntv := v.interval[len(v.interval)-1]; lastIntv.end > 0 && age > lastIntv.end {\n\t\t\tif debug {\n\t\t\t\tl.Debugln(\"Versioner: File over maximum age -> delete \", file)\n\t\t\t}\n\t\t\terr = os.Remove(file)\n\t\t\tif err != nil {\n\t\t\t\tl.Warnf(\"Versioner: can't remove %q: %v\", file, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ If it's the first (oldest) file in the list we can skip the interval checks\n\t\tif firstFile {\n\t\t\tprevAge = age\n\t\t\tfirstFile = false\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Find the interval the file fits in\n\t\tvar usedInterval Interval\n\t\tfor _, usedInterval = range v.interval {\n\t\t\tif age < usedInterval.end {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif prevAge-age < usedInterval.step {\n\t\t\tif debug {\n\t\t\t\tl.Debugln(\"too many files in step -> delete\", file)\n\t\t\t}\n\t\t\terr = os.Remove(file)\n\t\t\tif err != nil {\n\t\t\t\tl.Warnf(\"Versioner: can't remove %q: %v\", file, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tprevAge = age\n\t}\n}\n\n\/\/ Move away the named file to a version archive. If this function returns\n\/\/ nil, the named file does not exist any more (has been archived).\nfunc (v Staggered) Archive(filePath string) error {\n\tif debug {\n\t\tl.Debugln(\"Waiting for lock on \", v.versionsPath)\n\t}\n\tv.mutex.Lock()\n\tdefer v.mutex.Unlock()\n\n\tif _, err := os.Lstat(filePath); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tif debug {\n\t\t\t\tl.Debugln(\"not archiving nonexistent file\", filePath)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\tif _, err := os.Stat(v.versionsPath); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tif debug {\n\t\t\t\tl.Debugln(\"creating versions dir\", v.versionsPath)\n\t\t\t}\n\t\t\tos.MkdirAll(v.versionsPath, 0755)\n\t\t\tosutil.HideFile(v.versionsPath)\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif debug {\n\t\tl.Debugln(\"archiving\", filePath)\n\t}\n\n\tfile := filepath.Base(filePath)\n\tinFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdir := filepath.Join(v.versionsPath, inFolderPath)\n\terr = os.MkdirAll(dir, 0755)\n\tif err != nil && !os.IsExist(err) {\n\t\treturn err\n\t}\n\n\tver := taggedFilename(file, time.Now().Format(TimeFormat))\n\tdst := filepath.Join(dir, ver)\n\tif debug {\n\t\tl.Debugln(\"moving to\", dst)\n\t}\n\terr = osutil.Rename(filePath, dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Glob according to the new file~timestamp.ext pattern.\n\tnewVersions, err := filepath.Glob(filepath.Join(dir, taggedFilename(file, TimeGlob)))\n\tif err != nil {\n\t\tl.Warnln(\"globbing:\", err)\n\t\treturn nil\n\t}\n\n\t\/\/ Also according to the old file.ext~timestamp pattern.\n\toldVersions, err := filepath.Glob(filepath.Join(dir, file+\"~\"+TimeGlob))\n\tif err != nil {\n\t\tl.Warnln(\"globbing:\", err)\n\t\treturn nil\n\t}\n\n\t\/\/ Use all the found filenames.\n\tversions := append(oldVersions, newVersions...)\n\tv.expire(uniqueSortedStrings(versions))\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/ This file is available under the Apache license.\n\npackage watcher\n\nimport (\n\t\"context\"\n\t\"expvar\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/fsnotify\/fsnotify\"\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/pkg\/errors\"\n)\n\nvar (\n\terrorCount = expvar.NewInt(\"log_watcher_errors_total\")\n)\n\ntype watch struct {\n\tps []Processor\n\tfi os.FileInfo\n}\n\n\/\/ hasChanged indicates that a FileInfo has changed.\n\/\/ http:\/\/apenwarr.ca\/log\/20181113 suggests that comparing mtime is\n\/\/ insufficient for sub-second resolution on many platforms, and we can do\n\/\/ better by comparing a few fields in the FileInfo. This set of tests is less\n\/\/ than the ones suggested in the blog post, but seem sufficient for making\n\/\/ tests (notably, sub-millisecond accuracy) pass quickly. mtime-only diff has\n\/\/ caused race conditions in test and likely caused strange behaviour in\n\/\/ production environments.\nfunc hasChanged(a, b os.FileInfo) bool {\n\tif a == nil || b == nil {\n\t\tglog.V(2).Info(\"One or both FileInfos are nil\")\n\t\treturn true\n\t}\n\tif a.ModTime() != b.ModTime() {\n\t\tglog.V(2).Info(\"modtimes differ\")\n\t\treturn true\n\t}\n\tif a.Size() != b.Size() {\n\t\tglog.V(2).Info(\"sizes differ\")\n\t\treturn true\n\t}\n\tif a.Mode() != b.Mode() {\n\t\tglog.V(2).Info(\"modes differ\")\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ LogWatcher implements a Watcher for watching real filesystems.\ntype LogWatcher struct {\n\twatcher *fsnotify.Watcher\n\tpollTicker *time.Ticker\n\n\twatchedMu sync.RWMutex \/\/ protects `watched'\n\twatched map[string]*watch\n\n\tstopTicks chan struct{} \/\/ Channel to notify ticker to stop.\n\n\tticksDone chan struct{} \/\/ Channel to notify when the ticks handler is done.\n\teventsDone chan struct{} \/\/ Channel to notify when the events handler is done.\n\n\tpollMu sync.Mutex \/\/ protects `Poll()`\n\n\tcloseOnce sync.Once\n}\n\n\/\/ NewLogWatcher returns a new LogWatcher, or returns an error.\nfunc NewLogWatcher(pollInterval time.Duration, enableFsnotify bool) (*LogWatcher, error) {\n\tvar f *fsnotify.Watcher\n\tif enableFsnotify {\n\t\tvar err error\n\t\tf, err = fsnotify.NewWatcher()\n\t\tif err != nil {\n\t\t\tglog.Warning(err)\n\t\t}\n\t}\n\tif f == nil && pollInterval == 0 {\n\t\tglog.Infof(\"fsnotify disabled and no poll interval specified; defaulting to 250ms poll\")\n\t\tpollInterval = time.Millisecond * 250\n\t}\n\tw := &LogWatcher{\n\t\twatcher: f,\n\t\twatched: make(map[string]*watch),\n\t}\n\tif pollInterval > 0 {\n\t\tw.pollTicker = time.NewTicker(pollInterval)\n\t\tw.stopTicks = make(chan struct{})\n\t\tw.ticksDone = make(chan struct{})\n\t\tgo w.runTicks()\n\t\tglog.V(2).Infof(\"started ticker with %s interval\", pollInterval)\n\t}\n\tif f != nil {\n\t\tw.eventsDone = make(chan struct{})\n\t\tgo w.runEvents()\n\t}\n\treturn w, nil\n}\n\nfunc (w *LogWatcher) sendEvent(e Event) {\n\tw.watchedMu.RLock()\n\twatch, ok := w.watched[e.Pathname]\n\tw.watchedMu.RUnlock()\n\tif !ok {\n\t\td := filepath.Dir(e.Pathname)\n\t\tw.watchedMu.RLock()\n\t\twatch, ok = w.watched[d]\n\t\tw.watchedMu.RUnlock()\n\t\tif !ok {\n\t\t\tglog.V(2).Infof(\"No watch for path %q\", e.Pathname)\n\t\t\treturn\n\t\t}\n\t}\n\tw.sendWatchedEvent(watch, e)\n}\n\n\/\/ Send an event to a watch; all locks assumed to be held.\nfunc (w *LogWatcher) sendWatchedEvent(watch *watch, e Event) {\n\tfor _, p := range watch.ps {\n\t\tp.ProcessFileEvent(context.TODO(), e)\n\t}\n}\n\nfunc (w *LogWatcher) runTicks() {\n\tdefer close(w.ticksDone)\n\n\tif w.pollTicker == nil {\n\t\treturn\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-w.pollTicker.C:\n\t\t\tw.Poll()\n\t\tcase <-w.stopTicks:\n\t\t\tw.pollTicker.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Poll all watched objects for updates, dispatching events if required.\nfunc (w *LogWatcher) Poll() {\n\tw.pollMu.Lock()\n\tdefer w.pollMu.Unlock()\n\tglog.V(2).Info(\"Polling watched files.\")\n\tw.watchedMu.RLock()\n\tfor n, watch := range w.watched {\n\t\tw.watchedMu.RUnlock()\n\t\tw.pollWatchedPath(n, watch)\n\t\tw.watchedMu.RLock()\n\t}\n\tw.watchedMu.RUnlock()\n}\n\n\/\/ pollWatchedPathLocked polls an already-watched path for updates.\nfunc (w *LogWatcher) pollWatchedPath(pathname string, watched *watch) {\n\tglog.V(2).Infof(\"Stat %q\", pathname)\n\tfi, err := os.Stat(pathname)\n\tif err != nil {\n\t\tglog.V(1).Info(err)\n\t\treturn\n\t}\n\n\t\/\/ fsnotify does not send update events for the directory itself.\n\tif fi.IsDir() {\n\t\tw.pollDirectory(watched, pathname)\n\t} else if hasChanged(fi, watched.fi) {\n\t\tglog.V(2).Infof(\"sending update for %s\", pathname)\n\t\tw.sendWatchedEvent(watched, Event{Update, pathname})\n\t}\n\n\tw.watchedMu.Lock()\n\tif _, ok := w.watched[pathname]; ok {\n\t\tw.watched[pathname].fi = fi\n\t}\n\tw.watchedMu.Unlock()\n}\n\nfunc (w *LogWatcher) pollDirectory(parentWatch *watch, pathname string) {\n\tmatches, err := filepath.Glob(path.Join(pathname, \"*\"))\n\tif err != nil {\n\t\tglog.V(1).Info(err)\n\t\treturn\n\t}\n\t\/\/ TODO(jaq): how do we avoid duplicate notifies for things that are already in the watch list?\n\tfor _, match := range matches {\n\t\tfi, err := os.Stat(match)\n\t\tif err != nil {\n\t\t\tglog.V(1).Info(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tw.watchedMu.RLock()\n\t\twatched, ok := w.watched[match]\n\t\tw.watchedMu.RUnlock()\n\t\tswitch {\n\t\tcase !ok:\n\t\t\t\/\/ The object has no watch object so it must be new, but we can't\n\t\t\t\/\/ decide that -- wait for the Tailer to match pattern and instruct\n\t\t\t\/\/ us to Observe it directly.\n\t\t\tglog.V(2).Infof(\"sending create for %s\", match)\n\t\t\tw.sendWatchedEvent(parentWatch, Event{Create, match})\n\t\tcase hasChanged(fi, watched.fi):\n\t\t\tglog.V(2).Infof(\"sending update for %s\", match)\n\t\t\tw.sendWatchedEvent(watched, Event{Update, match})\n\t\t\tw.watchedMu.Lock()\n\t\t\tw.watched[match].fi = fi\n\t\t\tw.watchedMu.Unlock()\n\t\tdefault:\n\t\t\tglog.V(2).Infof(\"No change for %s, no send\", match)\n\t\t}\n\t\tif fi.IsDir() {\n\t\t\tw.pollDirectory(parentWatch, match)\n\t\t}\n\t}\n}\n\n\/\/ runEvents assumes that w.watcher is not nil\nfunc (w *LogWatcher) runEvents() {\n\tdefer close(w.eventsDone)\n\n\t\/\/ Suck out errors and dump them to the error log.\n\tgo func() {\n\t\tfor err := range w.watcher.Errors {\n\t\t\terrorCount.Add(1)\n\t\t\tglog.Errorf(\"fsnotify error: %s\\n\", err)\n\t\t}\n\t}()\n\n\tfor e := range w.watcher.Events {\n\t\tglog.V(2).Infof(\"watcher event %v\", e)\n\t\tswitch {\n\t\tcase e.Op&fsnotify.Create == fsnotify.Create:\n\t\t\tw.sendEvent(Event{Create, e.Name})\n\t\tcase e.Op&fsnotify.Write == fsnotify.Write,\n\t\t\te.Op&fsnotify.Chmod == fsnotify.Chmod:\n\t\t\tw.sendEvent(Event{Update, e.Name})\n\t\tcase e.Op&fsnotify.Remove == fsnotify.Remove:\n\t\t\tw.sendEvent(Event{Delete, e.Name})\n\t\tcase e.Op&fsnotify.Rename == fsnotify.Rename:\n\t\t\t\/\/ Rename is only issued on the original file path; the new name receives a Create event\n\t\t\tw.sendEvent(Event{Delete, e.Name})\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"unknown op type %v\", e.Op))\n\t\t}\n\t}\n\tglog.Infof(\"Shutting down log watcher.\")\n}\n\n\/\/ Close shuts down the LogWatcher. It is safe to call this from multiple clients.\nfunc (w *LogWatcher) Close() (err error) {\n\tw.closeOnce.Do(func() {\n\t\tif w.watcher != nil {\n\t\t\terr = w.watcher.Close()\n\t\t\t<-w.eventsDone\n\t\t}\n\t\tif w.pollTicker != nil {\n\t\t\tclose(w.stopTicks)\n\t\t\t<-w.ticksDone\n\t\t}\n\t})\n\treturn nil\n}\n\n\/\/ Observe adds a path to the list of watched items.\n\/\/ If this path has a new event, then the processor being registered will be sent the event.\nfunc (w *LogWatcher) Observe(path string, processor Processor) error {\n\tabsPath, err := w.addWatch(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.watchedMu.Lock()\n\tdefer w.watchedMu.Unlock()\n\twatched, ok := w.watched[absPath]\n\tif !ok {\n\t\tfi, err := os.Stat(absPath)\n\t\tif err != nil {\n\t\t\tglog.V(1).Info(err)\n\t\t}\n\t\tw.watched[absPath] = &watch{ps: []Processor{processor}, fi: fi}\n\t\tglog.Infof(\"No abspath in watched list, added new one for %s\", absPath)\n\t\treturn nil\n\t}\n\tfor _, p := range watched.ps {\n\t\tif p == processor {\n\t\t\tglog.Infof(\"Found this processor in watched list\")\n\t\t\treturn nil\n\t\t}\n\t}\n\twatched.ps = append(watched.ps, processor)\n\tglog.Infof(\"appended this processor\")\n\treturn nil\n}\n\nfunc (w *LogWatcher) addWatch(path string) (string, error) {\n\tabsPath, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"Failed to lookup absolutepath of %q\", path)\n\t}\n\tglog.V(2).Infof(\"Adding a watch on resolved path %q\", absPath)\n\tif w.watcher != nil {\n\t\terr = w.watcher.Add(absPath)\n\t\tif err != nil {\n\t\t\tif os.IsPermission(err) {\n\t\t\t\tglog.V(2).Infof(\"Skipping permission denied error on adding a watch.\")\n\t\t\t} else {\n\t\t\t\treturn \"\", errors.Wrapf(err, \"Failed to create a new watch on %q\", absPath)\n\t\t\t}\n\t\t}\n\t}\n\treturn absPath, nil\n}\n\n\/\/ IsWatching indicates if the path is being watched. It includes both\n\/\/ filenames and directories.\nfunc (w *LogWatcher) IsWatching(path string) bool {\n\tabsPath, err := filepath.Abs(path)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Couldn't resolve path %q: %s\", absPath, err)\n\t\treturn false\n\t}\n\tglog.V(2).Infof(\"Resolved path for lookup %q\", absPath)\n\tw.watchedMu.RLock()\n\t_, ok := w.watched[absPath]\n\tw.watchedMu.RUnlock()\n\treturn ok\n}\n\nfunc (w *LogWatcher) Unobserve(path string, processor Processor) error {\n\tw.watchedMu.Lock()\n\tdefer w.watchedMu.Unlock()\n\t_, ok := w.watched[path]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tfor i, p := range w.watched[path].ps {\n\t\tif p == processor {\n\t\t\tw.watched[path].ps = append(w.watched[path].ps[0:i], w.watched[path].ps[i+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\tif len(w.watched[path].ps) == 0 {\n\t\tdelete(w.watched, path)\n\t}\n\tif w.watcher != nil {\n\t\treturn w.watcher.Remove(path)\n\t}\n\treturn nil\n}\n<commit_msg>Move os.Stat until after we know we watch the file.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/ This file is available under the Apache license.\n\npackage watcher\n\nimport (\n\t\"context\"\n\t\"expvar\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/fsnotify\/fsnotify\"\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/pkg\/errors\"\n)\n\nvar (\n\terrorCount = expvar.NewInt(\"log_watcher_errors_total\")\n)\n\ntype watch struct {\n\tps []Processor\n\tfi os.FileInfo\n}\n\n\/\/ hasChanged indicates that a FileInfo has changed.\n\/\/ http:\/\/apenwarr.ca\/log\/20181113 suggests that comparing mtime is\n\/\/ insufficient for sub-second resolution on many platforms, and we can do\n\/\/ better by comparing a few fields in the FileInfo. This set of tests is less\n\/\/ than the ones suggested in the blog post, but seem sufficient for making\n\/\/ tests (notably, sub-millisecond accuracy) pass quickly. mtime-only diff has\n\/\/ caused race conditions in test and likely caused strange behaviour in\n\/\/ production environments.\nfunc hasChanged(a, b os.FileInfo) bool {\n\tif a == nil || b == nil {\n\t\tglog.V(2).Info(\"One or both FileInfos are nil\")\n\t\treturn true\n\t}\n\tif a.ModTime() != b.ModTime() {\n\t\tglog.V(2).Info(\"modtimes differ\")\n\t\treturn true\n\t}\n\tif a.Size() != b.Size() {\n\t\tglog.V(2).Info(\"sizes differ\")\n\t\treturn true\n\t}\n\tif a.Mode() != b.Mode() {\n\t\tglog.V(2).Info(\"modes differ\")\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ LogWatcher implements a Watcher for watching real filesystems.\ntype LogWatcher struct {\n\twatcher *fsnotify.Watcher\n\tpollTicker *time.Ticker\n\n\twatchedMu sync.RWMutex \/\/ protects `watched'\n\twatched map[string]*watch\n\n\tstopTicks chan struct{} \/\/ Channel to notify ticker to stop.\n\n\tticksDone chan struct{} \/\/ Channel to notify when the ticks handler is done.\n\teventsDone chan struct{} \/\/ Channel to notify when the events handler is done.\n\n\tpollMu sync.Mutex \/\/ protects `Poll()`\n\n\tcloseOnce sync.Once\n}\n\n\/\/ NewLogWatcher returns a new LogWatcher, or returns an error.\nfunc NewLogWatcher(pollInterval time.Duration, enableFsnotify bool) (*LogWatcher, error) {\n\tvar f *fsnotify.Watcher\n\tif enableFsnotify {\n\t\tvar err error\n\t\tf, err = fsnotify.NewWatcher()\n\t\tif err != nil {\n\t\t\tglog.Warning(err)\n\t\t}\n\t}\n\tif f == nil && pollInterval == 0 {\n\t\tglog.Infof(\"fsnotify disabled and no poll interval specified; defaulting to 250ms poll\")\n\t\tpollInterval = time.Millisecond * 250\n\t}\n\tw := &LogWatcher{\n\t\twatcher: f,\n\t\twatched: make(map[string]*watch),\n\t}\n\tif pollInterval > 0 {\n\t\tw.pollTicker = time.NewTicker(pollInterval)\n\t\tw.stopTicks = make(chan struct{})\n\t\tw.ticksDone = make(chan struct{})\n\t\tgo w.runTicks()\n\t\tglog.V(2).Infof(\"started ticker with %s interval\", pollInterval)\n\t}\n\tif f != nil {\n\t\tw.eventsDone = make(chan struct{})\n\t\tgo w.runEvents()\n\t}\n\treturn w, nil\n}\n\nfunc (w *LogWatcher) sendEvent(e Event) {\n\tw.watchedMu.RLock()\n\twatch, ok := w.watched[e.Pathname]\n\tw.watchedMu.RUnlock()\n\tif !ok {\n\t\td := filepath.Dir(e.Pathname)\n\t\tw.watchedMu.RLock()\n\t\twatch, ok = w.watched[d]\n\t\tw.watchedMu.RUnlock()\n\t\tif !ok {\n\t\t\tglog.V(2).Infof(\"No watch for path %q\", e.Pathname)\n\t\t\treturn\n\t\t}\n\t}\n\tw.sendWatchedEvent(watch, e)\n}\n\n\/\/ Send an event to a watch; all locks assumed to be held.\nfunc (w *LogWatcher) sendWatchedEvent(watch *watch, e Event) {\n\tfor _, p := range watch.ps {\n\t\tp.ProcessFileEvent(context.TODO(), e)\n\t}\n}\n\nfunc (w *LogWatcher) runTicks() {\n\tdefer close(w.ticksDone)\n\n\tif w.pollTicker == nil {\n\t\treturn\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-w.pollTicker.C:\n\t\t\tw.Poll()\n\t\tcase <-w.stopTicks:\n\t\t\tw.pollTicker.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Poll all watched objects for updates, dispatching events if required.\nfunc (w *LogWatcher) Poll() {\n\tw.pollMu.Lock()\n\tdefer w.pollMu.Unlock()\n\tglog.V(2).Info(\"Polling watched files.\")\n\tw.watchedMu.RLock()\n\tfor n, watch := range w.watched {\n\t\tw.watchedMu.RUnlock()\n\t\tw.pollWatchedPath(n, watch)\n\t\tw.watchedMu.RLock()\n\t}\n\tw.watchedMu.RUnlock()\n}\n\n\/\/ pollWatchedPathLocked polls an already-watched path for updates.\nfunc (w *LogWatcher) pollWatchedPath(pathname string, watched *watch) {\n\tglog.V(2).Infof(\"Stat %q\", pathname)\n\tfi, err := os.Stat(pathname)\n\tif err != nil {\n\t\tglog.V(1).Info(err)\n\t\treturn\n\t}\n\n\t\/\/ fsnotify does not send update events for the directory itself.\n\tif fi.IsDir() {\n\t\tw.pollDirectory(watched, pathname)\n\t} else if hasChanged(fi, watched.fi) {\n\t\tglog.V(2).Infof(\"sending update for %s\", pathname)\n\t\tw.sendWatchedEvent(watched, Event{Update, pathname})\n\t}\n\n\tw.watchedMu.Lock()\n\tif _, ok := w.watched[pathname]; ok {\n\t\tw.watched[pathname].fi = fi\n\t}\n\tw.watchedMu.Unlock()\n}\n\nfunc (w *LogWatcher) pollDirectory(parentWatch *watch, pathname string) {\n\tmatches, err := filepath.Glob(path.Join(pathname, \"*\"))\n\tif err != nil {\n\t\tglog.V(1).Info(err)\n\t\treturn\n\t}\n\t\/\/ TODO(jaq): how do we avoid duplicate notifies for things that are already in the watch list?\n\tfor _, match := range matches {\n\t\tw.watchedMu.RLock()\n\t\twatched, ok := w.watched[match]\n\t\tw.watchedMu.RUnlock()\n\t\tif !ok {\n\t\t\t\/\/ The object has no watch object so it must be new, but we can't\n\t\t\t\/\/ decide that -- wait for the Tailer to match pattern and instruct\n\t\t\t\/\/ us to Observe it directly.\n\t\t\tglog.V(2).Infof(\"sending create for %s\", match)\n\t\t\tw.sendWatchedEvent(parentWatch, Event{Create, match})\n\t\t}\n\t\tfi, err := os.Stat(match)\n\t\tif err != nil {\n\t\t\tglog.V(1).Info(err)\n\t\t\tcontinue\n\t\t}\n\t\tif ok && hasChanged(fi, watched.fi) {\n\t\t\tglog.V(2).Infof(\"sending update for %s\", match)\n\t\t\tw.sendWatchedEvent(watched, Event{Update, match})\n\t\t\tw.watchedMu.Lock()\n\t\t\tw.watched[match].fi = fi\n\t\t\tw.watchedMu.Unlock()\n\t\t} else {\n\t\t\tglog.V(2).Infof(\"No change for %s, no send\", match)\n\t\t}\n\t\tif fi.IsDir() {\n\t\t\tw.pollDirectory(parentWatch, match)\n\t\t}\n\t}\n}\n\n\/\/ runEvents assumes that w.watcher is not nil\nfunc (w *LogWatcher) runEvents() {\n\tdefer close(w.eventsDone)\n\n\t\/\/ Suck out errors and dump them to the error log.\n\tgo func() {\n\t\tfor err := range w.watcher.Errors {\n\t\t\terrorCount.Add(1)\n\t\t\tglog.Errorf(\"fsnotify error: %s\\n\", err)\n\t\t}\n\t}()\n\n\tfor e := range w.watcher.Events {\n\t\tglog.V(2).Infof(\"watcher event %v\", e)\n\t\tswitch {\n\t\tcase e.Op&fsnotify.Create == fsnotify.Create:\n\t\t\tw.sendEvent(Event{Create, e.Name})\n\t\tcase e.Op&fsnotify.Write == fsnotify.Write,\n\t\t\te.Op&fsnotify.Chmod == fsnotify.Chmod:\n\t\t\tw.sendEvent(Event{Update, e.Name})\n\t\tcase e.Op&fsnotify.Remove == fsnotify.Remove:\n\t\t\tw.sendEvent(Event{Delete, e.Name})\n\t\tcase e.Op&fsnotify.Rename == fsnotify.Rename:\n\t\t\t\/\/ Rename is only issued on the original file path; the new name receives a Create event\n\t\t\tw.sendEvent(Event{Delete, e.Name})\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"unknown op type %v\", e.Op))\n\t\t}\n\t}\n\tglog.Infof(\"Shutting down log watcher.\")\n}\n\n\/\/ Close shuts down the LogWatcher. It is safe to call this from multiple clients.\nfunc (w *LogWatcher) Close() (err error) {\n\tw.closeOnce.Do(func() {\n\t\tif w.watcher != nil {\n\t\t\terr = w.watcher.Close()\n\t\t\t<-w.eventsDone\n\t\t}\n\t\tif w.pollTicker != nil {\n\t\t\tclose(w.stopTicks)\n\t\t\t<-w.ticksDone\n\t\t}\n\t})\n\treturn nil\n}\n\n\/\/ Observe adds a path to the list of watched items.\n\/\/ If this path has a new event, then the processor being registered will be sent the event.\nfunc (w *LogWatcher) Observe(path string, processor Processor) error {\n\tabsPath, err := w.addWatch(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.watchedMu.Lock()\n\tdefer w.watchedMu.Unlock()\n\twatched, ok := w.watched[absPath]\n\tif !ok {\n\t\tfi, err := os.Stat(absPath)\n\t\tif err != nil {\n\t\t\tglog.V(1).Info(err)\n\t\t}\n\t\tw.watched[absPath] = &watch{ps: []Processor{processor}, fi: fi}\n\t\tglog.Infof(\"No abspath in watched list, added new one for %s\", absPath)\n\t\treturn nil\n\t}\n\tfor _, p := range watched.ps {\n\t\tif p == processor {\n\t\t\tglog.Infof(\"Found this processor in watched list\")\n\t\t\treturn nil\n\t\t}\n\t}\n\twatched.ps = append(watched.ps, processor)\n\tglog.Infof(\"appended this processor\")\n\treturn nil\n}\n\nfunc (w *LogWatcher) addWatch(path string) (string, error) {\n\tabsPath, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"Failed to lookup absolutepath of %q\", path)\n\t}\n\tglog.V(2).Infof(\"Adding a watch on resolved path %q\", absPath)\n\tif w.watcher != nil {\n\t\terr = w.watcher.Add(absPath)\n\t\tif err != nil {\n\t\t\tif os.IsPermission(err) {\n\t\t\t\tglog.V(2).Infof(\"Skipping permission denied error on adding a watch.\")\n\t\t\t} else {\n\t\t\t\treturn \"\", errors.Wrapf(err, \"Failed to create a new watch on %q\", absPath)\n\t\t\t}\n\t\t}\n\t}\n\treturn absPath, nil\n}\n\n\/\/ IsWatching indicates if the path is being watched. It includes both\n\/\/ filenames and directories.\nfunc (w *LogWatcher) IsWatching(path string) bool {\n\tabsPath, err := filepath.Abs(path)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Couldn't resolve path %q: %s\", absPath, err)\n\t\treturn false\n\t}\n\tglog.V(2).Infof(\"Resolved path for lookup %q\", absPath)\n\tw.watchedMu.RLock()\n\t_, ok := w.watched[absPath]\n\tw.watchedMu.RUnlock()\n\treturn ok\n}\n\nfunc (w *LogWatcher) Unobserve(path string, processor Processor) error {\n\tw.watchedMu.Lock()\n\tdefer w.watchedMu.Unlock()\n\t_, ok := w.watched[path]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tfor i, p := range w.watched[path].ps {\n\t\tif p == processor {\n\t\t\tw.watched[path].ps = append(w.watched[path].ps[0:i], w.watched[path].ps[i+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\tif len(w.watched[path].ps) == 0 {\n\t\tdelete(w.watched, path)\n\t}\n\tif w.watcher != nil {\n\t\treturn w.watcher.Remove(path)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package node\n\nimport (\n \"github.com\/nictuku\/dht\"\n \"net\"\n \"crypto\/rsa\"\n \"crypto\/x509\"\n \"sync\"\n \"fmt\"\n \"io\"\n \"encoding\/binary\"\n \"errors\"\n \"golang.org\/x\/crypto\/nacl\/box\"\n \"crypto\/rand\"\n \"crypto\/sha1\"\n \"bytes\"\n \"encoding\/hex\"\n \"time\"\n\n \"github.com\/danoctavian\/bluntly\/stream\"\n)\n\n\/* NODE *\/\ntype Node struct { \n dht *DHT\n config *Config\n}\n\n\/* CONFIG *\/\ntype FileConfig struct {\n dhtPort int\n ownKeyFile string\n id string\n configRoot string\n holePunch HolePunchConf\n}\n\ntype Config struct {\n dhtPort int\n ownKey rsa.PrivateKey\n id string\n configRoot string\n holePunch HolePunchConf\n contactList *ContactList\n}\n\ntype HolePunchConf struct {\n\trecvPort int\n}\n\ntype ContactList struct {\n contacts *map[rsa.PublicKey]string\n mut *sync.Mutex\n}\n\nfunc NewNode(conf *Config) (node *Node, err error) {\n \/\/ setup the DHT\n dhtConf := dht.DefaultConfig\n dhtConf.Port = conf.dhtPort\n dhtClient, err := dht.New(dhtConf)\n if err != nil { return }\n dht := newDHT(dhtClient)\n\n go dht.Run()\n\n node.dht = dht\n\n\treturn node, nil\n}\n\n\/* CLIENT *\/\nfunc (n *Node) Dial(peerPubKey *rsa.PublicKey) (conn Conn, err error) {\n\n}\n\n\/* LISTENER *\/\ntype Listener struct {\n connChan chan *Conn\n}\n\nfunc (n *Node) Listen(port int) (listener *Listener, err error) {\n connChan := make(chan *Conn)\n listener = &Listener{connChan: connChan}\n \/\/ setup TCP listener\n tcpListener, err := net.Listen(\"tcp\", fmt.Sprintf(\":\"))\n if err != nil {return}\n\n \/\/ loop accepting TCP connections\n go func() {\n for {\n tcpConn, tcpErr := tcpListener.Accept()\n if tcpErr != nil {\n Log(LOG_ERROR, \"%s\", tcpErr)\n }\n\n go func() {\n conn, handshakeError := handleClientConn(tcpConn, &n.config.ownKey, n.config.contactList)\n if err != nil {\n Log(LOG_INFO,\n \"handling client connection from address %s %s\",\n tcpConn.RemoteAddr().String(), handshakeError)\n } else {\n connChan <- conn\n }\n }() \n }\n }()\n\n \/\/ announce yourself to the DHT\n go n.announceSelf()\n\n return\n}\n\nfunc (n *Node) announceSelf() error {\n infoHash, err := pubKeyToInfoHash(&n.config.ownKey.PublicKey)\n if (err != nil) {return err}\n\n for {\n \/\/ the peers request is done only for the purpose of announcing\n n.dht.PeersRequest(infoHash, true) \n time.Sleep(10 * time.Second)\n }\n}\n\nfunc pubKeyToInfoHash(pub *rsa.PublicKey) (string, error) {\n pubKeyBytes, err := x509.MarshalPKIXPublicKey(pub)\n if err != nil { return \"\", err}\n sha1Bytes := sha1.Sum(pubKeyBytes)\n return hex.EncodeToString(sha1Bytes[:]), nil\n}\n\nfunc (l *Listener) Accept() (c net.Conn, err error) {\n conn := <- l.connChan\n return conn, nil\n}\n\nfunc (l *Listener) Close() error {\n return nil\n}\n\nfunc Addr() net.Addr {\n return nil\n}\n\nfunc handleClientConn(rawConn net.Conn,\n ownKey *rsa.PrivateKey,\n contacts *ContactList) (conn *Conn, err error) {\n\n var handshakeLen int64\n err = binary.Read(rawConn, binary.BigEndian, &handshakeLen)\n if (err != nil) { return }\n\n ciphertext := make([]byte, handshakeLen)\n _, err = io.ReadFull(rawConn, ciphertext)\n if (err != nil) { return }\n\n plain, err := rsa.DecryptPKCS1v15(nil, ownKey, ciphertext)\n if (err != nil) { return }\n\n connReq := ConnRequest{}\n err = connReq.UnmarshalBinary(plain)\n if (err != nil) { return }\n\n _, privKey, err := box.GenerateKey(rand.Reader)\n if (err != nil) { return }\n\n var sharedKey [sessionKeyLen]byte\n box.Precompute(&sharedKey, connReq.sessionKey, privKey) \n\n return &Conn{Conn: rawConn,\n sharedKey: &sharedKey,\n \/\/ TODO: circular buffer capacity may cause \n \/\/ streaming to fail. to avoid,\n \/\/ put a cap on the size of the encrypted chunks\n readBuf: stream.NewCircularBuf(2048),\n readerBufMutex: &sync.Mutex{}}, nil\n}\n\nconst sessionKeyLen = 32\n\n\/* connection request *\/\ntype ConnRequest struct {\n peerPub *rsa.PublicKey\n sessionKey *[sessionKeyLen]byte \n}\n\ntype ConnResponse struct {\n\n}\n\nfunc (r *ConnRequest) MarshalBinary() (data []byte, err error) {\n pubKeyBytes, err := x509.MarshalPKIXPublicKey(r.peerPub)\n if (err != nil) { return }\n\n return append((*r.sessionKey)[:], pubKeyBytes...), nil\n}\n\nfunc (r *ConnRequest) UnmarshalBinary(data []byte) (err error) {\n copiedBytes := copy(r.sessionKey[:], data[:sessionKeyLen])\n if (copiedBytes < sessionKeyLen) {\n return errors.New(\"session key too short.\") \n }\n\n someKey, err := x509.ParsePKIXPublicKey(data)\n pubKey := someKey.(*rsa.PublicKey)\n if (err != nil) { return }\n r.peerPub = pubKey\n\n return\n}\n\n\/* CONNECTION *\/\ntype Conn struct {\n net.Conn \/\/ underlying network connection \n sharedKey *[sessionKeyLen]byte\n\n \/\/ a buffer that is used to store in excess data, not yet read\n \/\/ but already decrypted\n readBuf *stream.CircularBuf\n readerBufMutex *sync.Mutex\n}\n\nfunc (c *Conn) Read(b []byte) (readBytes int, err error) {\n c.readerBufMutex.Lock()\n defer c.readerBufMutex.Unlock()\n\n readBytes, _ = c.readBuf.Read(b)\n if readBytes > 0 { return readBytes, nil } \/\/ just serve the data from the buffer\n\n \/\/ if the buffer is empty\n msg, err := c.readFromConn()\n readBytes = copy(b, msg)\n if readBytes < len(msg) { \n \/\/ there's data unread that needs to be buffered\n _, err = c.readBuf.Write(msg[readBytes:])\n if (err != nil) {return 0, err}\n }\n\n return\n}\n\n\/\/ reads data from the network connection\nfunc (c *Conn) readFromConn() (msg []byte, err error) {\n var msgLen uint64\n err = binary.Read(c.Conn, binary.BigEndian, &msgLen)\n if (err != nil) { return nil, err}\n\n cipherBuf := make([]byte, msgLen)\n _, err = io.ReadFull(c.Conn, cipherBuf)\n if (err != nil) {return nil, err}\n msg, err = Decrypt(cipherBuf, c.sharedKey)\n return\n}\n\nfunc (c *Conn) Write(msg []byte) (n int, err error) {\n ciphertext, err := Encrypt(msg, c.sharedKey)\n if err != nil {return 0, err}\n final := lenPrefix(ciphertext)\n\n return c.Conn.Write(final) \n}\n\nfunc (c Conn) Close() error {\n return nil\n}\n\nconst nonceLen = 24\n\nfunc CiphertextLength(msgLen int) int {\n return box.Overhead + nonceLen + msgLen\n}\n\nfunc MsgLength(cipherLen int) int {\n return cipherLen - box.Overhead - nonceLen\n}\n\n\nfunc Encrypt(msg []byte, sharedKey *[sessionKeyLen]byte) (ciphertext []byte, err error) {\n err = nil\n ciphertext = make([]byte, nonceLen)\n var nonce [nonceLen]byte\n _, err = rand.Read(ciphertext[:nonceLen])\n if err != nil { return }\n\n copy(nonce[:], ciphertext[:nonceLen])\n\n ciphertext = box.SealAfterPrecomputation(ciphertext, msg, &nonce, sharedKey)\n return\n}\n\nfunc Decrypt(ciphertext []byte, sharedKey *[sessionKeyLen]byte) (msg []byte, err error) {\n nonceSlice := ciphertext[:nonceLen]\n var nonce [nonceLen]byte\n copy(nonce[:], nonceSlice)\n\n msgLen := MsgLength(len(ciphertext))\n msg = make([]byte, msgLen) \n\n _, success := box.OpenAfterPrecomputation(msg, ciphertext[nonceLen:], &nonce, sharedKey)\n\n if !success {\n return nil, DecryptError{}\n } else {\n return msg, nil\n }\n}\n\n\/\/ prefix a buffer with its length\nfunc lenPrefix(b []byte) ([]byte) {\n buf := new(bytes.Buffer)\n binary.Write(buf, binary.BigEndian, uint64(len(b)))\n return append(buf.Bytes(), b...)\n}\n\ntype DecryptError struct {\n}\nfunc (e DecryptError) Error() string { return \"failed to decrypt message.\"}\n\ntype PeerChan (chan<- string)\n\/* DHT *\/\n\/\/ an enhanced version of the DHT that allows per infohash subscriptions\n\/\/ with notifications sent down channels\ntype DHT struct {\n *dht.DHT\n\n subscribers *map[dht.InfoHash](*ChanSet)\n subMutex *sync.Mutex\n}\n\n\n\/\/ holy mother of god... no set type and no generics\ntype ChanSet struct {\n set map[PeerChan]bool\n}\n\nfunc (set *ChanSet) Add(ch PeerChan) bool {\n _, found := set.set[ch]\n set.set[ch] = true\n return !found \n}\nfunc (set *ChanSet) Remove(ch PeerChan) {\n delete(set.set, ch)\n}\n\nfunc newDHT(dhtClient *dht.DHT) *DHT {\n mp := make(map[dht.InfoHash](*ChanSet))\n return &DHT{dhtClient, &mp, &sync.Mutex{}}\n}\n\nfunc (d *DHT) Run() {\n \/\/ launch the goroutine that deals with dispatching notifications about peers\n go d.drainResults()\n\n \/\/ run the underlying dht client normally\n d.DHT.Run()\n}\n\n\/\/ subscribe for peer notifications on that infohash\nfunc (d *DHT) subscribeToInfoHash(infoHash dht.InfoHash, notificationChan PeerChan) {\n d.subMutex.Lock()\n defer d.subMutex.Unlock()\n chanSet := (*d.subscribers)[infoHash]\n if (chanSet == nil) {\n chanSet = &ChanSet{}\n (*d.subscribers)[infoHash] = chanSet\n }\n chanSet.Add(notificationChan)\n}\n\nfunc (dht *DHT) unsubscribeToInfoHash(infoHash dht.InfoHash, notificationChan PeerChan) {\n dht.subMutex.Lock()\n defer dht.subMutex.Unlock()\n\n chanSet := (*dht.subscribers)[infoHash]\n if (chanSet != nil) {\n chanSet.Remove(notificationChan)\n }\n}\n\nfunc (dht *DHT) drainResults() {\n for batch := range dht.PeersRequestResults {\n dht.notifyOfPeers(batch)\n }\n}\n\nfunc (dht *DHT) notifyOfPeers(batch map[dht.InfoHash][]string) {\n dht.subMutex.Lock()\n defer dht.subMutex.Unlock()\n\n for infoHash, peers := range batch {\n for _, peer := range peers {\n peerChanSet := (*dht.subscribers)[infoHash]\n for peerChan, _ := range peerChanSet.set {\n \/\/ do something with e.Value\n peerChan <- peer\n }\n }\n }\n}\n<commit_msg>rethinking the use of locks<commit_after>package node\n\nimport (\n \"github.com\/nictuku\/dht\"\n \"net\"\n \"crypto\/rsa\"\n \"crypto\/x509\"\n \"sync\"\n \"fmt\"\n \"io\"\n \"encoding\/binary\"\n \"errors\"\n \"golang.org\/x\/crypto\/nacl\/box\"\n \"crypto\/rand\"\n \"crypto\/sha1\"\n \"bytes\"\n \"encoding\/hex\"\n \"time\"\n\n \"github.com\/danoctavian\/bluntly\/stream\"\n)\n\n\/* CONSTANTS *\/\n\nconst sessionKeyLen = 32\nconst nonceLen = 24\nconst readBufferCapacity = 2048\n\n\/\/ size of the buffer for peer notifications\nconst peerChanBufferCapacity = 100\n\n\/* NODE *\/\ntype Node struct { \n dht *DHT\n config *Config\n}\n\n\/* CONFIG *\/\ntype FileConfig struct {\n dhtPort int\n ownKeyFile string\n id string\n configRoot string\n holePunch HolePunchConf\n}\n\ntype Config struct {\n dhtPort int\n ownKey rsa.PrivateKey\n id string\n configRoot string\n holePunch HolePunchConf\n contactList *ContactList\n}\n\ntype HolePunchConf struct {\n\trecvPort int\n}\n\ntype ContactList struct {\n contacts *map[rsa.PublicKey]string\n mut *sync.Mutex\n}\n\nfunc NewNode(conf *Config) (node *Node, err error) {\n \/\/ setup the DHT\n dhtConf := dht.DefaultConfig\n dhtConf.Port = conf.dhtPort\n dhtClient, err := dht.New(dhtConf)\n if err != nil { return }\n dht := newDHT(dhtClient)\n\n go dht.Run()\n\n node.dht = dht\n\n\treturn node, nil\n}\n\n\/* CLIENT *\/\nfunc (n *Node) Dial(peerPubKey *rsa.PublicKey) (conn Conn, err error) {\n infoHash, err := pubKeyToInfoHash(peerPubKey)\n if (err != nil) {return }\n\n peerNotifications := make(chan string, peerChanBufferCapacity)\n n.dht.subscribeToInfoHash(dht.InfoHash(infoHash), peerNotifications)\n\n \/\/ ask for the infohash\n n.dht.PeersRequest(infoHash, false) \n\n for peerNotification := range peerNotifications {\n go func(peerNotification string) {\n\n }(peerNotification)\n }\n\n return\n}\n\n\/* LISTENER *\/\ntype Listener struct {\n connChan chan *Conn\n}\n\nfunc (n *Node) Listen(port int) (listener *Listener, err error) {\n connChan := make(chan *Conn)\n listener = &Listener{connChan: connChan}\n \/\/ setup TCP listener\n tcpListener, err := net.Listen(\"tcp\", fmt.Sprintf(\":\"))\n if err != nil {return}\n\n \/\/ loop accepting TCP connections\n go func() {\n for {\n tcpConn, tcpErr := tcpListener.Accept()\n if tcpErr != nil {\n Log(LOG_ERROR, \"%s\", tcpErr)\n }\n\n go func() {\n conn, handshakeError := handleClientConn(tcpConn, &n.config.ownKey, n.config.contactList)\n if err != nil {\n Log(LOG_INFO,\n \"handling client connection from address %s %s\",\n tcpConn.RemoteAddr().String(), handshakeError)\n } else {\n connChan <- conn\n }\n }() \n }\n }()\n\n \/\/ announce yourself to the DHT\n go n.announceSelf()\n\n return\n}\n\nfunc (n *Node) announceSelf() error {\n infoHash, err := pubKeyToInfoHash(&n.config.ownKey.PublicKey)\n if (err != nil) {return err}\n\n for {\n \/\/ the peers request is done only for the purpose of announcing\n n.dht.PeersRequest(infoHash, true) \n time.Sleep(10 * time.Second)\n }\n}\n\nfunc pubKeyToInfoHash(pub *rsa.PublicKey) (string, error) {\n pubKeyBytes, err := x509.MarshalPKIXPublicKey(pub)\n if err != nil { return \"\", err}\n sha1Bytes := sha1.Sum(pubKeyBytes)\n return hex.EncodeToString(sha1Bytes[:]), nil\n}\n\nfunc (l *Listener) Accept() (c net.Conn, err error) {\n conn := <- l.connChan\n return conn, nil\n}\n\nfunc (l *Listener) Close() error {\n return nil\n}\n\nfunc Addr() net.Addr {\n return nil\n}\n\nfunc handleClientConn(rawConn net.Conn,\n ownKey *rsa.PrivateKey,\n contacts *ContactList) (conn *Conn, err error) {\n\n var handshakeLen int64\n err = binary.Read(rawConn, binary.BigEndian, &handshakeLen)\n if (err != nil) { return }\n\n ciphertext := make([]byte, handshakeLen)\n _, err = io.ReadFull(rawConn, ciphertext)\n if (err != nil) { return }\n\n plain, err := rsa.DecryptPKCS1v15(nil, ownKey, ciphertext)\n if (err != nil) { return }\n\n connReq := ConnRequest{}\n err = connReq.UnmarshalBinary(plain)\n if (err != nil) { return }\n\n _, privKey, err := box.GenerateKey(rand.Reader)\n if (err != nil) { return }\n\n var sharedKey [sessionKeyLen]byte\n box.Precompute(&sharedKey, connReq.sessionKey, privKey) \n\n return &Conn{Conn: rawConn,\n sharedKey: &sharedKey,\n \/\/ TODO: circular buffer capacity may cause \n \/\/ streaming to fail. to avoid,\n \/\/ put a cap on the size of the encrypted chunks\n readBuf: stream.NewCircularBuf(readBufferCapacity),\n readerBufMutex: &sync.Mutex{}}, nil\n}\n\n\/* connection request *\/\ntype ConnRequest struct {\n peerPub *rsa.PublicKey\n sessionKey *[sessionKeyLen]byte \n}\n\ntype ConnResponse struct {\n\n}\n\nfunc (r *ConnRequest) MarshalBinary() (data []byte, err error) {\n pubKeyBytes, err := x509.MarshalPKIXPublicKey(r.peerPub)\n if (err != nil) { return }\n\n return append((*r.sessionKey)[:], pubKeyBytes...), nil\n}\n\nfunc (r *ConnRequest) UnmarshalBinary(data []byte) (err error) {\n copiedBytes := copy(r.sessionKey[:], data[:sessionKeyLen])\n if (copiedBytes < sessionKeyLen) {\n return errors.New(\"session key too short.\") \n }\n\n someKey, err := x509.ParsePKIXPublicKey(data)\n pubKey := someKey.(*rsa.PublicKey)\n if (err != nil) { return }\n r.peerPub = pubKey\n\n return\n}\n\n\/* CONNECTION *\/\ntype Conn struct {\n net.Conn \/\/ underlying network connection \n sharedKey *[sessionKeyLen]byte\n\n \/\/ a buffer that is used to store in excess data, not yet read\n \/\/ but already decrypted\n readBuf *stream.CircularBuf\n readerBufMutex *sync.Mutex\n}\n\nfunc (c *Conn) Read(b []byte) (readBytes int, err error) {\n c.readerBufMutex.Lock()\n defer c.readerBufMutex.Unlock()\n\n readBytes, _ = c.readBuf.Read(b)\n if readBytes > 0 { return readBytes, nil } \/\/ just serve the data from the buffer\n\n \/\/ if the buffer is empty\n msg, err := c.readFromConn()\n readBytes = copy(b, msg)\n if readBytes < len(msg) { \n \/\/ there's data unread that needs to be buffered\n _, err = c.readBuf.Write(msg[readBytes:])\n if (err != nil) {return 0, err}\n }\n\n return\n}\n\n\/\/ reads data from the network connection\nfunc (c *Conn) readFromConn() (msg []byte, err error) {\n var msgLen uint64\n err = binary.Read(c.Conn, binary.BigEndian, &msgLen)\n if (err != nil) { return nil, err}\n\n cipherBuf := make([]byte, msgLen)\n _, err = io.ReadFull(c.Conn, cipherBuf)\n if (err != nil) {return nil, err}\n msg, err = Decrypt(cipherBuf, c.sharedKey)\n return\n}\n\nfunc (c *Conn) Write(msg []byte) (n int, err error) {\n ciphertext, err := Encrypt(msg, c.sharedKey)\n if err != nil {return 0, err}\n final := lenPrefix(ciphertext)\n\n return c.Conn.Write(final) \n}\n\nfunc (c Conn) Close() error {\n return nil\n}\n\nfunc CiphertextLength(msgLen int) int {\n return box.Overhead + nonceLen + msgLen\n}\n\nfunc MsgLength(cipherLen int) int {\n return cipherLen - box.Overhead - nonceLen\n}\n\n\nfunc Encrypt(msg []byte, sharedKey *[sessionKeyLen]byte) (ciphertext []byte, err error) {\n err = nil\n ciphertext = make([]byte, nonceLen)\n var nonce [nonceLen]byte\n _, err = rand.Read(ciphertext[:nonceLen])\n if err != nil { return }\n\n copy(nonce[:], ciphertext[:nonceLen])\n\n ciphertext = box.SealAfterPrecomputation(ciphertext, msg, &nonce, sharedKey)\n return\n}\n\nfunc Decrypt(ciphertext []byte, sharedKey *[sessionKeyLen]byte) (msg []byte, err error) {\n nonceSlice := ciphertext[:nonceLen]\n var nonce [nonceLen]byte\n copy(nonce[:], nonceSlice)\n\n msgLen := MsgLength(len(ciphertext))\n msg = make([]byte, msgLen) \n\n _, success := box.OpenAfterPrecomputation(msg, ciphertext[nonceLen:], &nonce, sharedKey)\n\n if !success {\n return nil, DecryptError{}\n } else {\n return msg, nil\n }\n}\n\n\/\/ prefix a buffer with its length\nfunc lenPrefix(b []byte) ([]byte) {\n buf := new(bytes.Buffer)\n binary.Write(buf, binary.BigEndian, uint64(len(b)))\n return append(buf.Bytes(), b...)\n}\n\ntype DecryptError struct {\n}\nfunc (e DecryptError) Error() string { return \"failed to decrypt message.\"}\n\ntype PeerChan (chan<- string)\n\/* DHT *\/\n\/\/ an enhanced version of the DHT that allows per infohash subscriptions\n\/\/ with notifications sent down channels\ntype DHT struct {\n *dht.DHT\n\n subscribers *map[dht.InfoHash](*ChanSet)\n subMutex *sync.Mutex\n}\n\n\n\/\/ holy mother of god... no set type and no generics\ntype ChanSet struct {\n set map[PeerChan]bool\n}\n\nfunc (set *ChanSet) Add(ch PeerChan) bool {\n _, found := set.set[ch]\n set.set[ch] = true\n return !found \n}\nfunc (set *ChanSet) Remove(ch PeerChan) {\n delete(set.set, ch)\n}\n\nfunc newDHT(dhtClient *dht.DHT) *DHT {\n mp := make(map[dht.InfoHash](*ChanSet))\n return &DHT{dhtClient, &mp, &sync.Mutex{}}\n}\n\nfunc (d *DHT) Run() {\n \/\/ launch the goroutine that deals with dispatching notifications about peers\n go d.drainResults()\n\n \/\/ run the underlying dht client normally\n d.DHT.Run()\n}\n\n\/\/ subscribe for peer notifications on that infohash\nfunc (d *DHT) subscribeToInfoHash(infoHash dht.InfoHash, notificationChan PeerChan) {\n d.subMutex.Lock()\n defer d.subMutex.Unlock()\n chanSet := (*d.subscribers)[infoHash]\n if (chanSet == nil) {\n chanSet = &ChanSet{}\n (*d.subscribers)[infoHash] = chanSet\n }\n chanSet.Add(notificationChan)\n}\n\nfunc (dht *DHT) unsubscribeToInfoHash(infoHash dht.InfoHash, notificationChan PeerChan) {\n dht.subMutex.Lock()\n defer dht.subMutex.Unlock()\n\n chanSet := (*dht.subscribers)[infoHash]\n if (chanSet != nil) {\n chanSet.Remove(notificationChan)\n }\n}\n\nfunc (dht *DHT) drainResults() {\n for batch := range dht.PeersRequestResults {\n dht.notifyOfPeers(batch)\n }\n}\n\nfunc (dht *DHT) notifyOfPeers(batch map[dht.InfoHash][]string) {\n dht.subMutex.Lock()\n defer dht.subMutex.Unlock()\n\n for infoHash, peers := range batch {\n for _, peer := range peers {\n peerChanSet := (*dht.subscribers)[infoHash]\n for peerChan, _ := range peerChanSet.set {\n \/\/ do something with e.Value\n peerChan <- peer\n }\n }\n }\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016, RadiantBlue Technologies, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage workflow\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/venicegeo\/pz-gocommon\/elasticsearch\"\n\t\"github.com\/venicegeo\/pz-gocommon\/gocommon\"\n)\n\ntype TestElasticsearchDB struct {\n\t*ResourceDB\n\tmapping string\n}\n\ntype TestElasticsearchBody struct {\n\tID piazza.Ident `json:\"id\"`\n\tValue int `json:\"value\"`\n}\n\nconst TestElasticsearchSettings = `{\n\t\"mappings\" : {\n\t\t\"Obj5\":{\n\t\t\t\"properties\":{\n\t\t\t\t\"id5\": {\n\t\t\t\t\t\"type\":\"integer\",\n\t\t\t\t\t\"index\": \"not_analyzed\"\n\t\t\t\t},\n\t\t\t\t\"data5\": {\n\t\t\t\t\t\"type\":\"string\",\n\t\t\t\t\t\"index\": \"not_analyzed\"\n\t\t\t\t},\n\t\t\t\t\"foo5\": {\n\t\t\t\t\t\"type\":\"boolean\",\n\t\t\t\t\t\"index\": \"not_analyzed\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}`\n\nconst TestElasticsearchMapping = \"TestElasticsearch\"\n\nfunc NewTestElasticsearchDB(service *Service, esi elasticsearch.IIndex) (*TestElasticsearchDB, error) {\n\n\trdb, err := NewResourceDB(service, esi, \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = esi.SetMapping(TestElasticsearchMapping, TestElasticsearchSettings)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tetrdb := TestElasticsearchDB{ResourceDB: rdb, mapping: TestElasticsearchMapping}\n\n\t\/*time.Sleep(5 * time.Second)\n\n\tok, err := esi.IndexExists()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Index %s failes to exist after creation\", esi.IndexName())\n\t}\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Index %s does not exist after creation\", esi.IndexName())\n\t}\n\n\tok, err = esi.TypeExists(TestElasticsearchDBMapping)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Type %s fails to exist in index %s after creation\", TestElasticsearchDBMapping, esi.IndexName())\n\t}\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Index %s does not exist in index %s after creation\", TestElasticsearchDBMapping, esi.IndexName())\n\t}*\/\n\n\treturn &etrdb, nil\n}\n\nfunc (db *TestElasticsearchDB) PostData(obj interface{}, id piazza.Ident) (piazza.Ident, error) {\n\tvar p *TestElasticsearchBody\n\tok1 := false\n\ttemp1, ok1 := obj.(TestElasticsearchBody)\n\tif !ok1 {\n\t\ttemp2, ok2 := obj.(*TestElasticsearchBody)\n\t\tif !ok2 {\n\t\t\treturn piazza.NoIdent, LoggedError(\"TestElasticsearchDB.PostData failed: was not given an TestElasticsearchBody to post\")\n\t\t}\n\t\tp = temp2\n\t} else {\n\t\tp = &temp1\n\t}\n\n\tindexResult, err := db.Esi.PostData(db.mapping, id.String(), p)\n\tif err != nil {\n\t\treturn piazza.NoIdent, LoggedError(\"TestElasticsearchDB.PostData failed: %s\\n%#v\\n%#v\", err, db.mapping, p)\n\t}\n\tif !indexResult.Created {\n\t\treturn piazza.NoIdent, LoggedError(\"TestElasticsearchDB.PostData failed: not created\")\n\t}\n\n\treturn id, nil\n}\n\nfunc (db *TestElasticsearchDB) GetAll(format *piazza.JsonPagination) ([]TestElasticsearchBody, int64, error) {\n\tbodies := []TestElasticsearchBody{}\n\n\texists, err := db.Esi.TypeExists(db.mapping)\n\tif err != nil {\n\t\treturn bodies, 0, err\n\t}\n\tif !exists {\n\t\treturn bodies, 0, nil\n\t}\n\n\tsearchResult, err := db.Esi.FilterByMatchAll(db.mapping, format)\n\tif err != nil {\n\t\treturn nil, 0, LoggedError(\"TestElasticsearchDB.GetAll failed: %s\", err)\n\t}\n\tif searchResult == nil {\n\t\treturn nil, 0, LoggedError(\"TestElasticsearchDB.GetAll failed: no searchResult\")\n\t}\n\n\tif searchResult != nil && searchResult.GetHits() != nil {\n\t\tfor _, hit := range *searchResult.GetHits() {\n\t\t\tvar body TestElasticsearchBody\n\t\t\terr := json.Unmarshal(*hit.Source, &body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, 0, err\n\t\t\t}\n\t\t\tbodies = append(bodies, body)\n\t\t}\n\t}\n\n\treturn bodies, searchResult.TotalHits(), nil\n}\n\nfunc (db *TestElasticsearchDB) GetVersion() (string, error) {\n\tv := db.Esi.GetVersion()\n\treturn v, nil\n}\n<commit_msg>fix payload<commit_after>\/\/ Copyright 2016, RadiantBlue Technologies, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage workflow\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/venicegeo\/pz-gocommon\/elasticsearch\"\n\t\"github.com\/venicegeo\/pz-gocommon\/gocommon\"\n)\n\ntype TestElasticsearchDB struct {\n\t*ResourceDB\n\tmapping string\n}\n\ntype TestElasticsearchBody struct {\n\tID piazza.Ident `json:\"id\"`\n\tValue int `json:\"value\"`\n}\n\nconst TestElasticsearchSettings = `{\n\t\t\"Obj5\":{\n\t\t\t\"properties\":{\n\t\t\t\t\"id5\": {\n\t\t\t\t\t\"type\":\"integer\",\n\t\t\t\t\t\"index\": \"not_analyzed\"\n\t\t\t\t},\n\t\t\t\t\"data5\": {\n\t\t\t\t\t\"type\":\"string\",\n\t\t\t\t\t\"index\": \"not_analyzed\"\n\t\t\t\t},\n\t\t\t\t\"foo5\": {\n\t\t\t\t\t\"type\":\"boolean\",\n\t\t\t\t\t\"index\": \"not_analyzed\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}`\n\nconst TestElasticsearchMapping = \"TestElasticsearch\"\n\nfunc NewTestElasticsearchDB(service *Service, esi elasticsearch.IIndex) (*TestElasticsearchDB, error) {\n\n\trdb, err := NewResourceDB(service, esi, \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = esi.SetMapping(TestElasticsearchMapping, TestElasticsearchSettings)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tetrdb := TestElasticsearchDB{ResourceDB: rdb, mapping: TestElasticsearchMapping}\n\n\t\/*time.Sleep(5 * time.Second)\n\n\tok, err := esi.IndexExists()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Index %s failes to exist after creation\", esi.IndexName())\n\t}\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Index %s does not exist after creation\", esi.IndexName())\n\t}\n\n\tok, err = esi.TypeExists(TestElasticsearchDBMapping)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Type %s fails to exist in index %s after creation\", TestElasticsearchDBMapping, esi.IndexName())\n\t}\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Index %s does not exist in index %s after creation\", TestElasticsearchDBMapping, esi.IndexName())\n\t}*\/\n\n\treturn &etrdb, nil\n}\n\nfunc (db *TestElasticsearchDB) PostData(obj interface{}, id piazza.Ident) (piazza.Ident, error) {\n\tvar p *TestElasticsearchBody\n\tok1 := false\n\ttemp1, ok1 := obj.(TestElasticsearchBody)\n\tif !ok1 {\n\t\ttemp2, ok2 := obj.(*TestElasticsearchBody)\n\t\tif !ok2 {\n\t\t\treturn piazza.NoIdent, LoggedError(\"TestElasticsearchDB.PostData failed: was not given an TestElasticsearchBody to post\")\n\t\t}\n\t\tp = temp2\n\t} else {\n\t\tp = &temp1\n\t}\n\n\tindexResult, err := db.Esi.PostData(db.mapping, id.String(), p)\n\tif err != nil {\n\t\treturn piazza.NoIdent, LoggedError(\"TestElasticsearchDB.PostData failed: %s\\n%#v\\n%#v\", err, db.mapping, p)\n\t}\n\tif !indexResult.Created {\n\t\treturn piazza.NoIdent, LoggedError(\"TestElasticsearchDB.PostData failed: not created\")\n\t}\n\n\treturn id, nil\n}\n\nfunc (db *TestElasticsearchDB) GetAll(format *piazza.JsonPagination) ([]TestElasticsearchBody, int64, error) {\n\tbodies := []TestElasticsearchBody{}\n\n\texists, err := db.Esi.TypeExists(db.mapping)\n\tif err != nil {\n\t\treturn bodies, 0, err\n\t}\n\tif !exists {\n\t\treturn bodies, 0, nil\n\t}\n\n\tsearchResult, err := db.Esi.FilterByMatchAll(db.mapping, format)\n\tif err != nil {\n\t\treturn nil, 0, LoggedError(\"TestElasticsearchDB.GetAll failed: %s\", err)\n\t}\n\tif searchResult == nil {\n\t\treturn nil, 0, LoggedError(\"TestElasticsearchDB.GetAll failed: no searchResult\")\n\t}\n\n\tif searchResult != nil && searchResult.GetHits() != nil {\n\t\tfor _, hit := range *searchResult.GetHits() {\n\t\t\tvar body TestElasticsearchBody\n\t\t\terr := json.Unmarshal(*hit.Source, &body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, 0, err\n\t\t\t}\n\t\t\tbodies = append(bodies, body)\n\t\t}\n\t}\n\n\treturn bodies, searchResult.TotalHits(), nil\n}\n\nfunc (db *TestElasticsearchDB) GetVersion() (string, error) {\n\tv := db.Esi.GetVersion()\n\treturn v, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package nycbcra provides non-alpha-premultiplied Y'CbCr-with-alpha image and\n\/\/ color types.\n\/\/\n\/\/ Deprecated: as of Go 1.6. Use the standard image and image\/color packages\n\/\/ instead.\npackage nycbcra \/\/ import \"golang.org\/x\/image\/webp\/nycbcra\"\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n)\n\nfunc init() {\n\tprintln(\"The golang.org\/x\/image\/webp\/nycbcra package is deprecated, as of Go 1.6. \" +\n\t\t\"Use the standard image and image\/color packages instead.\")\n}\n\n\/\/ TODO: move this to the standard image and image\/color packages, so that the\n\/\/ image\/draw package can have fast-path code. Moving would rename:\n\/\/\tnycbcra.Color to color.NYCbCrA\n\/\/\tnycbcra.ColorModel to color.NYCbCrAModel\n\/\/\tnycbcra.Image to image.NYCbCrA\n\n\/\/ Color represents a non-alpha-premultiplied Y'CbCr-with-alpha color, having\n\/\/ 8 bits each for one luma, two chroma and one alpha component.\ntype Color struct {\n\tcolor.YCbCr\n\tA uint8\n}\n\nfunc (c Color) RGBA() (r, g, b, a uint32) {\n\tr8, g8, b8 := color.YCbCrToRGB(c.Y, c.Cb, c.Cr)\n\ta = uint32(c.A) * 0x101\n\tr = uint32(r8) * 0x101 * a \/ 0xffff\n\tg = uint32(g8) * 0x101 * a \/ 0xffff\n\tb = uint32(b8) * 0x101 * a \/ 0xffff\n\treturn\n}\n\n\/\/ ColorModel is the Model for non-alpha-premultiplied Y'CbCr-with-alpha colors.\nvar ColorModel color.Model = color.ModelFunc(nYCbCrAModel)\n\nfunc nYCbCrAModel(c color.Color) color.Color {\n\tswitch c := c.(type) {\n\tcase Color:\n\t\treturn c\n\tcase color.YCbCr:\n\t\treturn Color{c, 0xff}\n\t}\n\tr, g, b, a := c.RGBA()\n\n\t\/\/ Convert from alpha-premultiplied to non-alpha-premultiplied.\n\tif a != 0 {\n\t\tr = (r * 0xffff) \/ a\n\t\tg = (g * 0xffff) \/ a\n\t\tb = (b * 0xffff) \/ a\n\t}\n\n\ty, u, v := color.RGBToYCbCr(uint8(r>>8), uint8(g>>8), uint8(b>>8))\n\treturn Color{color.YCbCr{Y: y, Cb: u, Cr: v}, uint8(a >> 8)}\n}\n\n\/\/ Image is an in-memory image of non-alpha-premultiplied Y'CbCr-with-alpha\n\/\/ colors. A and AStride are analogous to the Y and YStride fields of the\n\/\/ embedded YCbCr.\ntype Image struct {\n\timage.YCbCr\n\tA []uint8\n\tAStride int\n}\n\nfunc (p *Image) ColorModel() color.Model {\n\treturn ColorModel\n}\n\nfunc (p *Image) At(x, y int) color.Color {\n\treturn p.NYCbCrAAt(x, y)\n}\n\nfunc (p *Image) NYCbCrAAt(x, y int) Color {\n\tif !(image.Point{X: x, Y: y}.In(p.Rect)) {\n\t\treturn Color{}\n\t}\n\tyi := p.YOffset(x, y)\n\tci := p.COffset(x, y)\n\tai := p.AOffset(x, y)\n\treturn Color{\n\t\tcolor.YCbCr{\n\t\t\tY: p.Y[yi],\n\t\t\tCb: p.Cb[ci],\n\t\t\tCr: p.Cr[ci],\n\t\t},\n\t\tp.A[ai],\n\t}\n}\n\n\/\/ AOffset returns the index of the first element of A that corresponds to\n\/\/ the pixel at (x, y).\nfunc (p *Image) AOffset(x, y int) int {\n\treturn (y-p.Rect.Min.Y)*p.AStride + (x - p.Rect.Min.X)\n}\n\n\/\/ SubImage returns an image representing the portion of the image p visible\n\/\/ through r. The returned value shares pixels with the original image.\nfunc (p *Image) SubImage(r image.Rectangle) image.Image {\n\t\/\/ TODO: share code with image.NewYCbCr when this type moves into the\n\t\/\/ standard image package.\n\tr = r.Intersect(p.Rect)\n\t\/\/ If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside\n\t\/\/ either r1 or r2 if the intersection is empty. Without explicitly checking for\n\t\/\/ this, the Pix[i:] expression below can panic.\n\tif r.Empty() {\n\t\treturn &Image{\n\t\t\tYCbCr: image.YCbCr{\n\t\t\t\tSubsampleRatio: p.SubsampleRatio,\n\t\t\t},\n\t\t}\n\t}\n\tyi := p.YOffset(r.Min.X, r.Min.Y)\n\tci := p.COffset(r.Min.X, r.Min.Y)\n\tai := p.AOffset(r.Min.X, r.Min.Y)\n\treturn &Image{\n\t\tYCbCr: image.YCbCr{\n\t\t\tY: p.Y[yi:],\n\t\t\tCb: p.Cb[ci:],\n\t\t\tCr: p.Cr[ci:],\n\t\t\tSubsampleRatio: p.SubsampleRatio,\n\t\t\tYStride: p.YStride,\n\t\t\tCStride: p.CStride,\n\t\t\tRect: r,\n\t\t},\n\t\tA: p.A[ai:],\n\t\tAStride: p.AStride,\n\t}\n}\n\n\/\/ Opaque scans the entire image and reports whether it is fully opaque.\nfunc (p *Image) Opaque() bool {\n\tif p.Rect.Empty() {\n\t\treturn true\n\t}\n\ti0, i1 := 0, p.Rect.Dx()\n\tfor y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {\n\t\tfor _, a := range p.A[i0:i1] {\n\t\t\tif a != 0xff {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\ti0 += p.AStride\n\t\ti1 += p.AStride\n\t}\n\treturn true\n}\n\n\/\/ New returns a new Image with the given bounds and subsample ratio.\nfunc New(r image.Rectangle, subsampleRatio image.YCbCrSubsampleRatio) *Image {\n\t\/\/ TODO: share code with image.NewYCbCr when this type moves into the\n\t\/\/ standard image package.\n\tw, h, cw, ch := r.Dx(), r.Dy(), 0, 0\n\tswitch subsampleRatio {\n\tcase image.YCbCrSubsampleRatio422:\n\t\tcw = (r.Max.X+1)\/2 - r.Min.X\/2\n\t\tch = h\n\tcase image.YCbCrSubsampleRatio420:\n\t\tcw = (r.Max.X+1)\/2 - r.Min.X\/2\n\t\tch = (r.Max.Y+1)\/2 - r.Min.Y\/2\n\tcase image.YCbCrSubsampleRatio440:\n\t\tcw = w\n\t\tch = (r.Max.Y+1)\/2 - r.Min.Y\/2\n\tdefault:\n\t\t\/\/ Default to 4:4:4 subsampling.\n\t\tcw = w\n\t\tch = h\n\t}\n\tb := make([]byte, 2*w*h+2*cw*ch)\n\t\/\/ TODO: use s[i:j:k] notation to set the cap.\n\treturn &Image{\n\t\tYCbCr: image.YCbCr{\n\t\t\tY: b[:w*h],\n\t\t\tCb: b[w*h+0*cw*ch : w*h+1*cw*ch],\n\t\t\tCr: b[w*h+1*cw*ch : w*h+2*cw*ch],\n\t\t\tSubsampleRatio: subsampleRatio,\n\t\t\tYStride: w,\n\t\t\tCStride: cw,\n\t\t\tRect: r,\n\t\t},\n\t\tA: b[w*h+2*cw*ch:],\n\t\tAStride: w,\n\t}\n}\n<commit_msg>webp\/nycbcra: delete deprecated package<commit_after><|endoftext|>"} {"text":"<commit_before>package esi\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/evepraisal\/go-evepraisal\"\n\t\"github.com\/sethgrid\/pester\"\n)\n\ntype MarketOrder struct {\n\tID int64 `json:\"order_id\"`\n\tType int64 `json:\"type_id\"`\n\tStationID int64 `json:\"location_id\"`\n\tVolume int64 `json:\"volume_remain\"`\n\tMinVolume int64 `json:\"min_volume\"`\n\tPrice float64 `json:\"price\"`\n\tBuy bool `json:\"is_buy_order\"`\n\tDuration int64 `json:\"duration\"`\n\tIssued string `json:\"issued\"`\n\tVolumeEntered int64 `json:\"volumeEntered\"`\n\tRange string `json:\"range\"`\n}\n\nvar SpecialRegions = []struct {\n\tname string\n\tstations []int64\n}{\n\t{\n\t\t\/\/ 10000002\n\t\tname: \"jita\",\n\t\tstations: []int64{60003466, 60003760, 60003757, 60000361, 60000451, 60004423, 60002959, 60003460, 60003055, 60003469, 60000364, 60002953, 60000463, 60003463},\n\t}, {\n\t\t\/\/ 10000043\n\t\tname: \"amarr\",\n\t\tstations: []int64{60008950, 60002569, 60008494},\n\t}, {\n\t\t\/\/ 10000032\n\t\tname: \"dodixie\",\n\t\tstations: []int64{60011866, 60001867},\n\t}, {\n\t\t\/\/ 10000042\n\t\tname: \"hek\",\n\t\tstations: []int64{60005236, 60004516, 60015140, 60005686, 60011287, 60005236},\n\t},\n}\n\ntype PriceFetcher struct {\n\tdb evepraisal.PriceDB\n\tclient *pester.Client\n\tbaseURL string\n\n\tstop chan bool\n\twg *sync.WaitGroup\n}\n\nfunc NewPriceFetcher(priceDB evepraisal.PriceDB, baseURL string, client *pester.Client) (*PriceFetcher, error) {\n\n\tp := &PriceFetcher{\n\t\tdb: priceDB,\n\t\tclient: client,\n\t\tbaseURL: baseURL,\n\n\t\tstop: make(chan bool),\n\t\twg: &sync.WaitGroup{},\n\t}\n\n\tp.wg.Add(1)\n\tgo func() {\n\t\tdefer p.wg.Done()\n\t\tfor {\n\t\t\tstart := time.Now()\n\t\t\tp.runOnce()\n\t\t\tselect {\n\t\t\tcase <-time.After((5 * time.Minute) - time.Since(start)):\n\t\t\tcase <-p.stop:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn p, nil\n}\n\nfunc (p *PriceFetcher) Close() error {\n\tclose(p.stop)\n\tp.wg.Wait()\n\treturn nil\n}\n\nfunc regionNames() []string {\n\tregions := make([]string, len(SpecialRegions)+1)\n\tregions[0] = \"universe\"\n\tfor i, region := range SpecialRegions {\n\t\tregions[i+1] = region.name\n\t}\n\treturn regions\n}\n\nfunc (p *PriceFetcher) runOnce() {\n\tlog.Println(\"Fetch market data\")\n\tpriceMap, err := p.FetchOrderData(p.client, p.baseURL, []int{10000002, 10000042, 10000027, 10000032, 10000043})\n\tif err != nil {\n\t\tlog.Println(\"ERROR: fetching market data: \", err)\n\t\treturn\n\t}\n\n\tpricesFromCCP, err := p.FetchPriceData(p.client, p.baseURL)\n\tif err != nil {\n\t\tlog.Println(\"ERROR: fetching CCP price data: \", err)\n\t\treturn\n\t}\n\n\tfor _, regionName := range regionNames() {\n\t\t\/\/ Use CCP's price if our regional price is too low\n\t\tfor typeID, prices := range pricesFromCCP {\n\t\t\tp, ok := priceMap[regionName][typeID]\n\t\t\tif !ok || p.All.Volume < 50 {\n\t\t\t\tpriceMap[regionName][typeID] = prices\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Use the universe price if our regional price is too low (override CCP's price)\n\t\tfor typeID, p := range priceMap[regionName] {\n\t\t\tuniversePrice, ok := priceMap[\"universe\"][typeID]\n\t\t\tif ok && p.All.Volume < 50 {\n\t\t\t\tuniversePrice.Strategy = \"orders_universe\"\n\t\t\t\tpriceMap[regionName][typeID] = universePrice\n\t\t\t}\n\t\t}\n\t}\n\n\tfor market, pmap := range priceMap {\n\t\t\/\/ this takes awhile, so let's check to see if we should stop between markets\n\t\tselect {\n\t\tcase <-p.stop:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\tfor itemName, price := range pmap {\n\t\t\terr = p.db.UpdatePrice(market, itemName, price)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error when updating price: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\tlog.Println(\"Done fetching market data\")\n}\n\nfunc (p *PriceFetcher) freshPriceMap() map[string]map[int64]evepraisal.Prices {\n\tpriceMap := make(map[string]map[int64]evepraisal.Prices)\n\tfor _, region := range SpecialRegions {\n\t\tpriceMap[region.name] = make(map[int64]evepraisal.Prices)\n\t}\n\tpriceMap[\"universe\"] = make(map[int64]evepraisal.Prices)\n\treturn priceMap\n}\n\nfunc (p *PriceFetcher) FetchPriceData(client *pester.Client, baseURL string) (map[int64]evepraisal.Prices, error) {\n\tstart := time.Now()\n\turl := fmt.Sprintf(\"%s\/markets\/prices\/?datasource=tranquility\", baseURL)\n\tesiPrices := make([]struct {\n\t\tTypeID int64 `json:\"type_id\"`\n\t\tAveragePrice float64 `json:\"average_price\"`\n\t\tAdjustedPrice float64 `json:\"adjusted_price\"`\n\t}, 0)\n\terr := fetchURL(client, url, &esiPrices)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tallPrices := make(map[int64]evepraisal.Prices, len(esiPrices))\n\tfor _, p := range esiPrices {\n\t\tstats := evepraisal.PriceStats{\n\t\t\tAverage: p.AveragePrice,\n\t\t\tMax: p.AdjustedPrice,\n\t\t\tMedian: p.AdjustedPrice,\n\t\t\tMin: p.AdjustedPrice,\n\t\t\tPercentile: p.AdjustedPrice,\n\t\t}\n\t\tallPrices[p.TypeID] = evepraisal.Prices{\n\t\t\tAll: stats,\n\t\t\tBuy: stats,\n\t\t\tSell: stats,\n\t\t\tUpdated: start,\n\t\t\tStrategy: \"ccp\",\n\t\t}\n\t}\n\treturn allPrices, nil\n}\n\nfunc (p *PriceFetcher) FetchOrderData(client *pester.Client, baseURL string, regionIDs []int) (map[string]map[int64]evepraisal.Prices, error) {\n\tallOrdersByType := make(map[int64][]MarketOrder)\n\tfinished := make(chan bool, 1)\n\tworkerStop := make(chan bool, 1)\n\terrChannel := make(chan error, 1)\n\tfetchStart := time.Now()\n\n\tl := &sync.Mutex{}\n\trequestAndProcess := func(url string) (bool, error) {\n\t\tvar orders []MarketOrder\n\t\terr := fetchURL(client, url, &orders)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tl.Lock()\n\t\tfor _, order := range orders {\n\t\t\tallOrdersByType[order.Type] = append(allOrdersByType[order.Type], order)\n\t\t}\n\t\tl.Unlock()\n\t\tif len(orders) == 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t}\n\n\twg := &sync.WaitGroup{}\n\tfor _, regionID := range regionIDs {\n\t\twg.Add(1)\n\t\tgo func(regionID int) {\n\t\t\tdefer wg.Done()\n\t\t\tpage := 1\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-workerStop:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\t\turl := fmt.Sprintf(\"%s\/markets\/%d\/orders\/?datasource=tranquility&order_type=all&page=%d\", baseURL, regionID, page)\n\t\t\t\thasMore, err := requestAndProcess(url)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrChannel <- fmt.Errorf(\"Failed to fetch market orders: %s\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif !hasMore {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tpage++\n\t\t\t}\n\t\t}(regionID)\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(finished)\n\t}()\n\n\tselect {\n\tcase <-finished:\n\tcase <-p.stop:\n\t\tclose(workerStop)\n\t\treturn nil, errors.New(\"Stopping during price fetch\")\n\tcase err := <-errChannel:\n\t\tif err != nil {\n\t\t\tclose(workerStop)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tlog.Println(\"Performing aggregates on order data\")\n\t\/\/ Calculate aggregates that we care about:\n\tnewPriceMap := p.freshPriceMap()\n\tfor k, orders := range allOrdersByType {\n\t\tfor _, region := range SpecialRegions {\n\t\t\tfilteredOrders := make([]MarketOrder, 0)\n\t\t\tordercount := 0\n\t\t\tfor _, order := range orders {\n\t\t\t\tmatched := false\n\t\t\t\tfor _, station := range region.stations {\n\t\t\t\t\tif station == order.StationID {\n\t\t\t\t\t\tmatched = true\n\t\t\t\t\t\tordercount++\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif matched {\n\t\t\t\t\tfilteredOrders = append(filteredOrders, order)\n\t\t\t\t}\n\t\t\t}\n\t\t\tagg := getPriceAggregatesForOrders(filteredOrders)\n\t\t\tagg.Updated = fetchStart\n\t\t\tagg.Strategy = \"orders\"\n\t\t\tnewPriceMap[region.name][k] = agg\n\t\t}\n\t\tagg := getPriceAggregatesForOrders(orders)\n\t\tagg.Updated = fetchStart\n\t\tnewPriceMap[\"universe\"][k] = agg\n\t}\n\n\tlog.Println(\"Finished performing aggregates on order data\")\n\n\treturn newPriceMap, nil\n}\n<commit_msg>Use sell volume (instead of buy\/sell) to determine pricing fallback<commit_after>package esi\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/evepraisal\/go-evepraisal\"\n\t\"github.com\/sethgrid\/pester\"\n)\n\ntype MarketOrder struct {\n\tID int64 `json:\"order_id\"`\n\tType int64 `json:\"type_id\"`\n\tStationID int64 `json:\"location_id\"`\n\tVolume int64 `json:\"volume_remain\"`\n\tMinVolume int64 `json:\"min_volume\"`\n\tPrice float64 `json:\"price\"`\n\tBuy bool `json:\"is_buy_order\"`\n\tDuration int64 `json:\"duration\"`\n\tIssued string `json:\"issued\"`\n\tVolumeEntered int64 `json:\"volumeEntered\"`\n\tRange string `json:\"range\"`\n}\n\nvar SpecialRegions = []struct {\n\tname string\n\tstations []int64\n}{\n\t{\n\t\t\/\/ 10000002\n\t\tname: \"jita\",\n\t\tstations: []int64{60003466, 60003760, 60003757, 60000361, 60000451, 60004423, 60002959, 60003460, 60003055, 60003469, 60000364, 60002953, 60000463, 60003463},\n\t}, {\n\t\t\/\/ 10000043\n\t\tname: \"amarr\",\n\t\tstations: []int64{60008950, 60002569, 60008494},\n\t}, {\n\t\t\/\/ 10000032\n\t\tname: \"dodixie\",\n\t\tstations: []int64{60011866, 60001867},\n\t}, {\n\t\t\/\/ 10000042\n\t\tname: \"hek\",\n\t\tstations: []int64{60005236, 60004516, 60015140, 60005686, 60011287, 60005236},\n\t},\n}\n\ntype PriceFetcher struct {\n\tdb evepraisal.PriceDB\n\tclient *pester.Client\n\tbaseURL string\n\n\tstop chan bool\n\twg *sync.WaitGroup\n}\n\nfunc NewPriceFetcher(priceDB evepraisal.PriceDB, baseURL string, client *pester.Client) (*PriceFetcher, error) {\n\n\tp := &PriceFetcher{\n\t\tdb: priceDB,\n\t\tclient: client,\n\t\tbaseURL: baseURL,\n\n\t\tstop: make(chan bool),\n\t\twg: &sync.WaitGroup{},\n\t}\n\n\tp.wg.Add(1)\n\tgo func() {\n\t\tdefer p.wg.Done()\n\t\tfor {\n\t\t\tstart := time.Now()\n\t\t\tp.runOnce()\n\t\t\tselect {\n\t\t\tcase <-time.After((5 * time.Minute) - time.Since(start)):\n\t\t\tcase <-p.stop:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn p, nil\n}\n\nfunc (p *PriceFetcher) Close() error {\n\tclose(p.stop)\n\tp.wg.Wait()\n\treturn nil\n}\n\nfunc regionNames() []string {\n\tregions := make([]string, len(SpecialRegions)+1)\n\tregions[0] = \"universe\"\n\tfor i, region := range SpecialRegions {\n\t\tregions[i+1] = region.name\n\t}\n\treturn regions\n}\n\nfunc (p *PriceFetcher) runOnce() {\n\tlog.Println(\"Fetch market data\")\n\tpriceMap, err := p.FetchOrderData(p.client, p.baseURL, []int{10000002, 10000042, 10000027, 10000032, 10000043})\n\tif err != nil {\n\t\tlog.Println(\"ERROR: fetching market data: \", err)\n\t\treturn\n\t}\n\n\tpricesFromCCP, err := p.FetchPriceData(p.client, p.baseURL)\n\tif err != nil {\n\t\tlog.Println(\"ERROR: fetching CCP price data: \", err)\n\t\treturn\n\t}\n\n\tfor _, regionName := range regionNames() {\n\t\t\/\/ Use CCP's price if our regional price is too low\n\t\tfor typeID, prices := range pricesFromCCP {\n\t\t\tp, ok := priceMap[regionName][typeID]\n\t\t\tif !ok || p.Sell.Volume < 20 {\n\t\t\t\tpriceMap[regionName][typeID] = prices\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Use the universe price if our regional price is too low (override CCP's price)\n\t\tfor typeID, p := range priceMap[regionName] {\n\t\t\tuniversePrice, ok := priceMap[\"universe\"][typeID]\n\t\t\tif ok && p.Sell.Volume < 20 {\n\t\t\t\tuniversePrice.Strategy = \"orders_universe\"\n\t\t\t\tpriceMap[regionName][typeID] = universePrice\n\t\t\t}\n\t\t}\n\t}\n\n\tfor market, pmap := range priceMap {\n\t\t\/\/ this takes awhile, so let's check to see if we should stop between markets\n\t\tselect {\n\t\tcase <-p.stop:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\tfor itemName, price := range pmap {\n\t\t\terr = p.db.UpdatePrice(market, itemName, price)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error when updating price: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\tlog.Println(\"Done fetching market data\")\n}\n\nfunc (p *PriceFetcher) freshPriceMap() map[string]map[int64]evepraisal.Prices {\n\tpriceMap := make(map[string]map[int64]evepraisal.Prices)\n\tfor _, region := range SpecialRegions {\n\t\tpriceMap[region.name] = make(map[int64]evepraisal.Prices)\n\t}\n\tpriceMap[\"universe\"] = make(map[int64]evepraisal.Prices)\n\treturn priceMap\n}\n\nfunc (p *PriceFetcher) FetchPriceData(client *pester.Client, baseURL string) (map[int64]evepraisal.Prices, error) {\n\tstart := time.Now()\n\turl := fmt.Sprintf(\"%s\/markets\/prices\/?datasource=tranquility\", baseURL)\n\tesiPrices := make([]struct {\n\t\tTypeID int64 `json:\"type_id\"`\n\t\tAveragePrice float64 `json:\"average_price\"`\n\t\tAdjustedPrice float64 `json:\"adjusted_price\"`\n\t}, 0)\n\terr := fetchURL(client, url, &esiPrices)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tallPrices := make(map[int64]evepraisal.Prices, len(esiPrices))\n\tfor _, p := range esiPrices {\n\t\tstats := evepraisal.PriceStats{\n\t\t\tAverage: p.AveragePrice,\n\t\t\tMax: p.AdjustedPrice,\n\t\t\tMedian: p.AdjustedPrice,\n\t\t\tMin: p.AdjustedPrice,\n\t\t\tPercentile: p.AdjustedPrice,\n\t\t}\n\t\tallPrices[p.TypeID] = evepraisal.Prices{\n\t\t\tAll: stats,\n\t\t\tBuy: stats,\n\t\t\tSell: stats,\n\t\t\tUpdated: start,\n\t\t\tStrategy: \"ccp\",\n\t\t}\n\t}\n\treturn allPrices, nil\n}\n\nfunc (p *PriceFetcher) FetchOrderData(client *pester.Client, baseURL string, regionIDs []int) (map[string]map[int64]evepraisal.Prices, error) {\n\tallOrdersByType := make(map[int64][]MarketOrder)\n\tfinished := make(chan bool, 1)\n\tworkerStop := make(chan bool, 1)\n\terrChannel := make(chan error, 1)\n\tfetchStart := time.Now()\n\n\tl := &sync.Mutex{}\n\trequestAndProcess := func(url string) (bool, error) {\n\t\tvar orders []MarketOrder\n\t\terr := fetchURL(client, url, &orders)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tl.Lock()\n\t\tfor _, order := range orders {\n\t\t\tallOrdersByType[order.Type] = append(allOrdersByType[order.Type], order)\n\t\t}\n\t\tl.Unlock()\n\t\tif len(orders) == 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t}\n\n\twg := &sync.WaitGroup{}\n\tfor _, regionID := range regionIDs {\n\t\twg.Add(1)\n\t\tgo func(regionID int) {\n\t\t\tdefer wg.Done()\n\t\t\tpage := 1\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-workerStop:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\t\turl := fmt.Sprintf(\"%s\/markets\/%d\/orders\/?datasource=tranquility&order_type=all&page=%d\", baseURL, regionID, page)\n\t\t\t\thasMore, err := requestAndProcess(url)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrChannel <- fmt.Errorf(\"Failed to fetch market orders: %s\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif !hasMore {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tpage++\n\t\t\t}\n\t\t}(regionID)\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(finished)\n\t}()\n\n\tselect {\n\tcase <-finished:\n\tcase <-p.stop:\n\t\tclose(workerStop)\n\t\treturn nil, errors.New(\"Stopping during price fetch\")\n\tcase err := <-errChannel:\n\t\tif err != nil {\n\t\t\tclose(workerStop)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tlog.Println(\"Performing aggregates on order data\")\n\t\/\/ Calculate aggregates that we care about:\n\tnewPriceMap := p.freshPriceMap()\n\tfor k, orders := range allOrdersByType {\n\t\tfor _, region := range SpecialRegions {\n\t\t\tfilteredOrders := make([]MarketOrder, 0)\n\t\t\tordercount := 0\n\t\t\tfor _, order := range orders {\n\t\t\t\tmatched := false\n\t\t\t\tfor _, station := range region.stations {\n\t\t\t\t\tif station == order.StationID {\n\t\t\t\t\t\tmatched = true\n\t\t\t\t\t\tordercount++\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif matched {\n\t\t\t\t\tfilteredOrders = append(filteredOrders, order)\n\t\t\t\t}\n\t\t\t}\n\t\t\tagg := getPriceAggregatesForOrders(filteredOrders)\n\t\t\tagg.Updated = fetchStart\n\t\t\tagg.Strategy = \"orders\"\n\t\t\tnewPriceMap[region.name][k] = agg\n\t\t}\n\t\tagg := getPriceAggregatesForOrders(orders)\n\t\tagg.Updated = fetchStart\n\t\tnewPriceMap[\"universe\"][k] = agg\n\t}\n\n\tlog.Println(\"Finished performing aggregates on order data\")\n\n\treturn newPriceMap, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"encoding\/json\"\n\t\"compress\/gzip\"\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\/\/\"os\"\n)\n\n\/*\n\ntype FileStat struct {\n\tPath string\n\tHash string\n\tSize int64\n\tHostname string\n}\n*\/\n\n\/\/ Make the Hostnames an array,\n\/\/ Store every hostname that matches the hash\n\/\/ Fake the file for each to the stdout buffer\ntype Hash struct {\n\tHostnames []string\n\tContents []byte\n}\n\nfunc StringInArray(a string, list map[string]Hash) bool {\n\tfor q, _ := range list {\n\t\tif q == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\nfunc cater(path string) bytes.Buffer {\n\tvar response bytes.Buffer\n\tknownHashes := grab(path)\n\n\tfor _, hash := range knownHashes {\n\n\t\tvar b bytes.Buffer\n\t\tb.Write([]byte(hash.Contents))\n\n\t\tgr, _ := gzip.NewReader(&b)\n\t\tdefer gr.Close()\n\t\tplaintext, _ := ioutil.ReadAll(gr)\n\n\t\tfor _, a := range strings.Split(string(plaintext), \"\\n\") {\n\t\t\tif len(a) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, b := range hash.Hostnames {\n\t\t\t\tline := fmt.Sprintf(\"%s:%s\\n\", b, a)\n\t\t\t\tresponse.Write([]byte(line))\n\t\t\t}\n\t\t}\n\t}\n\treturn response\n\n}\n\n\nfunc grab(path string) map[string]Hash {\n\tvar file_contents_gzipped []byte\n\tknownHashes := make(map[string]Hash)\n\n\tfmt.Printf(\"search path: %s\\n\", path)\n\tconn, _ := redis.Dial(\"tcp\", \":6379\")\n\tdefer conn.Close()\n\tx, _ := redis.Values(conn.Do(\"KEYS\", path))\n\tvar fs FileStat\n\tfor _, z := range x {\n\t\tz = fmt.Sprintf(\"%s\", z)\n\t\treply, _ := redis.String(conn.Do(\"GET\", z))\n\t\ty := []byte(reply)\n\n\t\tjson.Unmarshal(y, &fs)\n\n\t\t\/\/ Check if we already have this hash, before wasting redis time\n\t\tif ! StringInArray(fs.Hash, knownHashes) {\n\t\t\tfile_contents_gzipped_string, _ := redis.String(conn.Do(\"GET\", fs.Hash))\n\t\t\tfile_contents_gzipped = []byte(file_contents_gzipped_string)\n\t\t\t\n\t\t\tfmt.Println(\"NEW HASH\")\n\t\t\tknownHashes[fs.Hash] = Hash{\n\t\t\t\tHostnames: []string{fs.Hostname},\n\t\t\t\tContents: []byte(file_contents_gzipped),\n\t\t\t}\n\t\t} else {\n\t\t\tfile_contents_gzipped = knownHashes[fs.Hash].Contents\n\t\t\told_hostnames := knownHashes[fs.Hash].Hostnames\n\t\t\tnew_hostnames := append(old_hostnames, fs.Hostname)\n\t\t\tknownHashes[fs.Hash] = Hash{\n\t\t\t\tHostnames: new_hostnames,\n\t\t\t\tContents: file_contents_gzipped,\n\t\t\t}\n\t\t}\n\t}\n\treturn knownHashes\n}\n\n\/*\nfunc main() {\n\tif len(os.Args) != 2 {\n\t\tfmt.Printf(\"usage: %s filepattern\\n\", os.Args[0])\n\t\treturn\n\t}\n\tcater_response := cater(os.Args[1])\n\tfmt.Printf(cater_response.String())\n}\n*\/\n<commit_msg>Added grep & grepcount functions<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"encoding\/json\"\n\t\"compress\/gzip\"\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"os\"\n\t\"github.com\/codegangsta\/cli\"\n)\n\ntype FileStat struct {\n\tPath string\n\tHash string\n\tSize int64\n\tHostname string\n}\n\n\/\/ Make the Hostnames an array,\n\/\/ Store every hostname that matches the hash\n\/\/ Fake the file for each to the stdout buffer\ntype Hash struct {\n\tHostnames []string\n\tContents []byte\n}\n\nfunc StringInArray(a string, list map[string]Hash) bool {\n\tfor q, _ := range list {\n\t\tif q == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc greper(path string, pattern string, countonly bool) {\n\tvar response bytes.Buffer\n\tvar count int\n\tcount = 0\n\tknownHashes := grab(path)\n\tfmt.Printf(\"searching for '%s' in %s\\n\", pattern, path)\n\n\tfor _, hash := range knownHashes {\n\n\t\tvar b bytes.Buffer\n\t\tb.Write([]byte(hash.Contents))\n\n\t\tgr, _ := gzip.NewReader(&b)\n\t\tdefer gr.Close()\n\t\tplaintext, _ := ioutil.ReadAll(gr)\n\n\t\tif countonly {\n\t\t\tif strings.Contains(string(plaintext), pattern) {\n\t\t\t\tcount += len(hash.Hostnames)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\n\t\tfor _, a := range strings.Split(string(plaintext), \"\\n\") {\n\t\t\tif len(a) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif ! strings.Contains(a, pattern) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, b := range hash.Hostnames {\n\n\t\t\t\tline := fmt.Sprintf(\"%s:%s\\n\", b, a)\n\t\t\t\tresponse.Write([]byte(line))\n\t\t\t}\n\t\t}\n\t}\n\tif countonly {\n\t\tfmt.Printf(\"Count: %d\\n\", count)\n\t}\n\tfmt.Printf(response.String())\n\n}\n\n\n\nfunc cater(path string) {\n\tvar response bytes.Buffer\n\tknownHashes := grab(path)\n\n\n\tfor _, hash := range knownHashes {\n\n\t\tvar b bytes.Buffer\n\t\tb.Write([]byte(hash.Contents))\n\n\t\tgr, _ := gzip.NewReader(&b)\n\t\tdefer gr.Close()\n\t\tplaintext, _ := ioutil.ReadAll(gr)\n\n\t\tfor _, a := range strings.Split(string(plaintext), \"\\n\") {\n\t\t\tif len(a) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, b := range hash.Hostnames {\n\t\t\t\tline := fmt.Sprintf(\"%s:%s\\n\", b, a)\n\t\t\t\tresponse.Write([]byte(line))\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ return response\n\tfmt.Printf(response.String())\n\n}\n\n\nfunc grab(path string) map[string]Hash {\n\tvar file_contents_gzipped []byte\n\tknownHashes := make(map[string]Hash)\n\n\tconn, _ := redis.Dial(\"tcp\", \":6379\")\n\tdefer conn.Close()\n\tx, _ := redis.Values(conn.Do(\"KEYS\", path))\n\tvar fs FileStat\n\tfor _, z := range x {\n\t\tz = fmt.Sprintf(\"%s\", z)\n\t\treply, _ := redis.String(conn.Do(\"GET\", z))\n\t\ty := []byte(reply)\n\n\t\tjson.Unmarshal(y, &fs)\n\n\t\t\/\/ Check if we already have this hash, before wasting redis time\n\t\tif ! StringInArray(fs.Hash, knownHashes) {\n\t\t\tfile_contents_gzipped_string, _ := redis.String(conn.Do(\"GET\", fs.Hash))\n\t\t\tfile_contents_gzipped = []byte(file_contents_gzipped_string)\n\t\t\t\n\t\t\tknownHashes[fs.Hash] = Hash{\n\t\t\t\tHostnames: []string{fs.Hostname},\n\t\t\t\tContents: []byte(file_contents_gzipped),\n\t\t\t}\n\t\t} else {\n\t\t\tfile_contents_gzipped = knownHashes[fs.Hash].Contents\n\t\t\told_hostnames := knownHashes[fs.Hash].Hostnames\n\t\t\tnew_hostnames := append(old_hostnames, fs.Hostname)\n\t\t\tknownHashes[fs.Hash] = Hash{\n\t\t\t\tHostnames: new_hostnames,\n\t\t\t\tContents: file_contents_gzipped,\n\t\t\t}\n\t\t}\n\t}\n\treturn knownHashes\n}\n\nfunc main() {\n\n\tapp := cli.NewApp()\n\tapp.Name = \"grab\"\n\tapp.Usage = \"Grab file data from redis server\"\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"cat\",\n\t\t\tUsage: \"add a task to the list\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tif len(c.Args()) != 1 {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tpath := c.Args().First()\n\t\t\t\tcater(path)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"grep\",\n\t\t\tUsage: \"grep for pattern\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tif len(c.Args()) != 2 {\n\t\t\t\t\tfmt.Printf(\"missing file and pattern\\n\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tpath := c.Args().First()\n\t\t\t\tpattern := c.Args()[1]\n\t\t\t\tgreper(path, pattern, false)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"grepcount\",\n\t\t\tUsage: \"count pattern\",\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tif len(c.Args()) != 2 {\n\t\t\t\t\tfmt.Printf(\"missing file and pattern\\n\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tpath := c.Args().First()\n\t\t\t\tpattern := c.Args()[1]\n\t\t\t\tgreper(path, pattern, true)\n\t\t\t},\n\t\t},\n\t}\n\t\n\tapp.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e_node\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"regexp\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/resource\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/uuid\"\n\t\"k8s.io\/kubernetes\/pkg\/features\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/apis\/kubeletconfig\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\n\tpluginapi \"k8s.io\/kubernetes\/pkg\/kubelet\/apis\/deviceplugin\/v1beta1\"\n\tdm \"k8s.io\/kubernetes\/pkg\/kubelet\/cm\/devicemanager\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nconst (\n\t\/\/ fake resource name\n\tresourceName = \"fake.com\/resource\"\n\tresourceNameWithProbeSupport = \"fake.com\/resource2\"\n)\n\n\/\/ Serial because the test restarts Kubelet\nvar _ = framework.KubeDescribe(\"Device Plugin [Feature:DevicePlugin][NodeFeature:DevicePlugin][Serial]\", func() {\n\tf := framework.NewDefaultFramework(\"device-plugin-errors\")\n\ttestDevicePlugin(f, false, pluginapi.DevicePluginPath)\n})\n\nvar _ = framework.KubeDescribe(\"Device Plugin [Feature:DevicePluginProbe][NodeFeature:DevicePluginProbe][Serial]\", func() {\n\tf := framework.NewDefaultFramework(\"device-plugin-errors\")\n\ttestDevicePlugin(f, true, \"\/var\/lib\/kubelet\/plugins\/\")\n})\n\nfunc testDevicePlugin(f *framework.Framework, enablePluginWatcher bool, pluginSockDir string) {\n\tContext(\"DevicePlugin\", func() {\n\t\tBy(\"Enabling support for Kubelet Plugins Watcher\")\n\t\ttempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {\n\t\t\tinitialConfig.FeatureGates[string(features.KubeletPluginsWatcher)] = enablePluginWatcher\n\t\t})\n\t\t\/\/devicePluginSockPaths := []string{pluginapi.DevicePluginPath}\n\t\tIt(\"Verifies the Kubelet device plugin functionality.\", func() {\n\t\t\tBy(\"Start stub device plugin\")\n\t\t\t\/\/ fake devices for e2e test\n\t\t\tdevs := []*pluginapi.Device{\n\t\t\t\t{ID: \"Dev-1\", Health: pluginapi.Healthy},\n\t\t\t\t{ID: \"Dev-2\", Health: pluginapi.Healthy},\n\t\t\t}\n\n\t\t\tsocketPath := pluginSockDir + \"dp.\" + fmt.Sprintf(\"%d\", time.Now().Unix())\n\t\t\tframework.Logf(\"socketPath %v\", socketPath)\n\n\t\t\tdp1 := dm.NewDevicePluginStub(devs, socketPath, resourceName, false)\n\t\t\tdp1.SetAllocFunc(stubAllocFunc)\n\t\t\terr := dp1.Start()\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\tBy(\"Register resources\")\n\t\t\terr = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginapi.DevicePluginPath)\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\tBy(\"Waiting for the resource exported by the stub device plugin to become available on the local node\")\n\t\t\tdevsLen := int64(len(devs))\n\t\t\tEventually(func() bool {\n\t\t\t\tnode, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})\n\t\t\t\tframework.ExpectNoError(err)\n\t\t\t\treturn numberOfDevicesCapacity(node, resourceName) == devsLen &&\n\t\t\t\t\tnumberOfDevicesAllocatable(node, resourceName) == devsLen\n\t\t\t}, 30*time.Second, framework.Poll).Should(BeTrue())\n\n\t\t\tBy(\"Creating one pod on node with at least one fake-device\")\n\t\t\tpodRECMD := \"devs=$(ls \/tmp\/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs\"\n\t\t\tpod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))\n\t\t\tdeviceIDRE := \"stub devices: (Dev-[0-9]+)\"\n\t\t\tdevId1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)\n\t\t\tExpect(devId1).To(Not(Equal(\"\")))\n\n\t\t\tpod1, err = f.PodClient().Get(pod1.Name, metav1.GetOptions{})\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\tensurePodContainerRestart(f, pod1.Name, pod1.Name)\n\n\t\t\tBy(\"Confirming that device assignment persists even after container restart\")\n\t\t\tdevIdAfterRestart := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)\n\t\t\tExpect(devIdAfterRestart).To(Equal(devId1))\n\n\t\t\tBy(\"Restarting Kubelet\")\n\t\t\trestartKubelet()\n\n\t\t\tensurePodContainerRestart(f, pod1.Name, pod1.Name)\n\t\t\tBy(\"Confirming that after a kubelet restart, fake-device assignement is kept\")\n\t\t\tdevIdRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)\n\t\t\tExpect(devIdRestart1).To(Equal(devId1))\n\n\t\t\tBy(\"Wait for node is ready\")\n\t\t\tframework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)\n\n\t\t\tBy(\"Re-Register resources\")\n\t\t\tdp1 = dm.NewDevicePluginStub(devs, socketPath, resourceName, false)\n\t\t\tdp1.SetAllocFunc(stubAllocFunc)\n\t\t\terr = dp1.Start()\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\terr = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginSockDir)\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\tBy(\"Waiting for resource to become available on the local node after re-registration\")\n\t\t\tEventually(func() bool {\n\t\t\t\tnode, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})\n\t\t\t\tframework.ExpectNoError(err)\n\t\t\t\treturn numberOfDevicesCapacity(node, resourceName) == devsLen &&\n\t\t\t\t\tnumberOfDevicesAllocatable(node, resourceName) == devsLen\n\t\t\t}, 30*time.Second, framework.Poll).Should(BeTrue())\n\n\t\t\tBy(\"Creating another pod\")\n\t\t\tpod2 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))\n\n\t\t\tBy(\"Checking that pod got a different fake device\")\n\t\t\tdevId2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)\n\n\t\t\tExpect(devId1).To(Not(Equal(devId2)))\n\n\t\t\tBy(\"Deleting device plugin.\")\n\t\t\terr = dp1.Stop()\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\tBy(\"Waiting for stub device plugin to become unhealthy on the local node\")\n\t\t\tEventually(func() int64 {\n\t\t\t\tnode, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})\n\t\t\t\tframework.ExpectNoError(err)\n\t\t\t\treturn numberOfDevicesAllocatable(node, resourceName)\n\t\t\t}, 30*time.Second, framework.Poll).Should(Equal(int64(0)))\n\n\t\t\tBy(\"Checking that scheduled pods can continue to run even after we delete device plugin.\")\n\t\t\tensurePodContainerRestart(f, pod1.Name, pod1.Name)\n\t\t\tdevIdRestart1 = parseLog(f, pod1.Name, pod1.Name, deviceIDRE)\n\t\t\tExpect(devIdRestart1).To(Equal(devId1))\n\n\t\t\tensurePodContainerRestart(f, pod2.Name, pod2.Name)\n\t\t\tdevIdRestart2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)\n\t\t\tExpect(devIdRestart2).To(Equal(devId2))\n\n\t\t\tBy(\"Re-register resources\")\n\t\t\tdp1 = dm.NewDevicePluginStub(devs, socketPath, resourceName, false)\n\t\t\tdp1.SetAllocFunc(stubAllocFunc)\n\t\t\terr = dp1.Start()\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\terr = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginSockDir)\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\tBy(\"Waiting for the resource exported by the stub device plugin to become healthy on the local node\")\n\t\t\tEventually(func() int64 {\n\t\t\t\tnode, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})\n\t\t\t\tframework.ExpectNoError(err)\n\t\t\t\treturn numberOfDevicesAllocatable(node, resourceName)\n\t\t\t}, 30*time.Second, framework.Poll).Should(Equal(devsLen))\n\n\t\t\tBy(\"Deleting device plugin again.\")\n\t\t\terr = dp1.Stop()\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\tBy(\"Waiting for stub device plugin to become unavailable on the local node\")\n\t\t\tEventually(func() bool {\n\t\t\t\tnode, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})\n\t\t\t\tframework.ExpectNoError(err)\n\t\t\t\treturn numberOfDevicesCapacity(node, resourceName) <= 0\n\t\t\t}, 10*time.Minute, framework.Poll).Should(BeTrue())\n\n\t\t\tBy(\"Restarting Kubelet second time.\")\n\t\t\trestartKubelet()\n\n\t\t\tBy(\"Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet Eventually.\")\n\t\t\tensurePodContainerRestart(f, pod1.Name, pod1.Name)\n\t\t\tdevIdRestart1 = parseLog(f, pod1.Name, pod1.Name, deviceIDRE)\n\t\t\tExpect(devIdRestart1).To(Equal(devId1))\n\n\t\t\tensurePodContainerRestart(f, pod2.Name, pod2.Name)\n\t\t\tdevIdRestart2 = parseLog(f, pod2.Name, pod2.Name, deviceIDRE)\n\t\t\tExpect(devIdRestart2).To(Equal(devId2))\n\n\t\t\t\/\/ Cleanup\n\t\t\tf.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)\n\t\t\tf.PodClient().DeleteSync(pod2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)\n\t\t})\n\t})\n}\n\n\/\/ makeBusyboxPod returns a simple Pod spec with a busybox container\n\/\/ that requests resourceName and runs the specified command.\nfunc makeBusyboxPod(resourceName, cmd string) *v1.Pod {\n\tpodName := \"device-plugin-test-\" + string(uuid.NewUUID())\n\trl := v1.ResourceList{v1.ResourceName(resourceName): *resource.NewQuantity(1, resource.DecimalSI)}\n\n\treturn &v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{Name: podName},\n\t\tSpec: v1.PodSpec{\n\t\t\tRestartPolicy: v1.RestartPolicyAlways,\n\t\t\tContainers: []v1.Container{{\n\t\t\t\tImage: busyboxImage,\n\t\t\t\tName: podName,\n\t\t\t\t\/\/ Runs the specified command in the test pod.\n\t\t\t\tCommand: []string{\"sh\", \"-c\", cmd},\n\t\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\t\tLimits: rl,\n\t\t\t\t\tRequests: rl,\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t}\n}\n\n\/\/ ensurePodContainerRestart confirms that pod container has restarted at least once\nfunc ensurePodContainerRestart(f *framework.Framework, podName string, contName string) {\n\tvar initialCount int32\n\tvar currentCount int32\n\tp, err := f.PodClient().Get(podName, metav1.GetOptions{})\n\tif err != nil || len(p.Status.ContainerStatuses) < 1 {\n\t\tframework.Failf(\"ensurePodContainerRestart failed for pod %q: %v\", podName, err)\n\t}\n\tinitialCount = p.Status.ContainerStatuses[0].RestartCount\n\tEventually(func() bool {\n\t\tp, err = f.PodClient().Get(podName, metav1.GetOptions{})\n\t\tif err != nil || len(p.Status.ContainerStatuses) < 1 {\n\t\t\treturn false\n\t\t}\n\t\tcurrentCount = p.Status.ContainerStatuses[0].RestartCount\n\t\tframework.Logf(\"initial %v, current %v\", initialCount, currentCount)\n\t\treturn currentCount > initialCount\n\t}, 5*time.Minute, framework.Poll).Should(BeTrue())\n}\n\n\/\/ parseLog returns the matching string for the specified regular expression parsed from the container logs.\nfunc parseLog(f *framework.Framework, podName string, contName string, re string) string {\n\tlogs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName)\n\tif err != nil {\n\t\tframework.Failf(\"GetPodLogs for pod %q failed: %v\", podName, err)\n\t}\n\n\tframework.Logf(\"got pod logs: %v\", logs)\n\tregex := regexp.MustCompile(re)\n\tmatches := regex.FindStringSubmatch(logs)\n\tif len(matches) < 2 {\n\t\treturn \"\"\n\t}\n\n\treturn matches[1]\n}\n\n\/\/ numberOfDevicesCapacity returns the number of devices of resourceName advertised by a node capacity\nfunc numberOfDevicesCapacity(node *v1.Node, resourceName string) int64 {\n\tval, ok := node.Status.Capacity[v1.ResourceName(resourceName)]\n\tif !ok {\n\t\treturn 0\n\t}\n\n\treturn val.Value()\n}\n\n\/\/ numberOfDevicesAllocatable returns the number of devices of resourceName advertised by a node allocatable\nfunc numberOfDevicesAllocatable(node *v1.Node, resourceName string) int64 {\n\tval, ok := node.Status.Allocatable[v1.ResourceName(resourceName)]\n\tif !ok {\n\t\treturn 0\n\t}\n\n\treturn val.Value()\n}\n\n\/\/ stubAllocFunc will pass to stub device plugin\nfunc stubAllocFunc(r *pluginapi.AllocateRequest, devs map[string]pluginapi.Device) (*pluginapi.AllocateResponse, error) {\n\tvar responses pluginapi.AllocateResponse\n\tfor _, req := range r.ContainerRequests {\n\t\tresponse := &pluginapi.ContainerAllocateResponse{}\n\t\tfor _, requestID := range req.DevicesIDs {\n\t\t\tdev, ok := devs[requestID]\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid allocation request with non-existing device %s\", requestID)\n\t\t\t}\n\n\t\t\tif dev.Health != pluginapi.Healthy {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid allocation request with unhealthy device: %s\", requestID)\n\t\t\t}\n\n\t\t\t\/\/ create fake device file\n\t\t\tfpath := filepath.Join(\"\/tmp\", dev.ID)\n\n\t\t\t\/\/ clean first\n\t\t\tos.RemoveAll(fpath)\n\t\t\tf, err := os.Create(fpath)\n\t\t\tif err != nil && !os.IsExist(err) {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to create fake device file: %s\", err)\n\t\t\t}\n\n\t\t\tf.Close()\n\n\t\t\tresponse.Mounts = append(response.Mounts, &pluginapi.Mount{\n\t\t\t\tContainerPath: fpath,\n\t\t\t\tHostPath: fpath,\n\t\t\t})\n\t\t}\n\t\tresponses.ContainerResponses = append(responses.ContainerResponses, response)\n\t}\n\n\treturn &responses, nil\n}\n<commit_msg>Updates test\/e2e_node\/device_plugin.go to cope with recent device manager change in commit 7b1ae66. Also changes the test to make sure node is indeed ready after Kubelet restart. The previous readiness check may use old API state but didn't run into the issue due to the delay of waiting for pod restart.<commit_after>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e_node\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"regexp\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/resource\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/uuid\"\n\t\"k8s.io\/kubernetes\/pkg\/features\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/apis\/kubeletconfig\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\n\tpluginapi \"k8s.io\/kubernetes\/pkg\/kubelet\/apis\/deviceplugin\/v1beta1\"\n\tdm \"k8s.io\/kubernetes\/pkg\/kubelet\/cm\/devicemanager\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nconst (\n\t\/\/ fake resource name\n\tresourceName = \"fake.com\/resource\"\n\tresourceNameWithProbeSupport = \"fake.com\/resource2\"\n)\n\n\/\/ Serial because the test restarts Kubelet\nvar _ = framework.KubeDescribe(\"Device Plugin [Feature:DevicePlugin][NodeFeature:DevicePlugin][Serial]\", func() {\n\tf := framework.NewDefaultFramework(\"device-plugin-errors\")\n\ttestDevicePlugin(f, false, pluginapi.DevicePluginPath)\n})\n\nvar _ = framework.KubeDescribe(\"Device Plugin [Feature:DevicePluginProbe][NodeFeature:DevicePluginProbe][Serial]\", func() {\n\tf := framework.NewDefaultFramework(\"device-plugin-errors\")\n\ttestDevicePlugin(f, true, \"\/var\/lib\/kubelet\/plugins\/\")\n})\n\nfunc testDevicePlugin(f *framework.Framework, enablePluginWatcher bool, pluginSockDir string) {\n\tContext(\"DevicePlugin\", func() {\n\t\tBy(\"Enabling support for Kubelet Plugins Watcher\")\n\t\ttempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {\n\t\t\tinitialConfig.FeatureGates[string(features.KubeletPluginsWatcher)] = enablePluginWatcher\n\t\t})\n\t\tIt(\"Verifies the Kubelet device plugin functionality.\", func() {\n\t\t\tBy(\"Start stub device plugin\")\n\t\t\t\/\/ fake devices for e2e test\n\t\t\tdevs := []*pluginapi.Device{\n\t\t\t\t{ID: \"Dev-1\", Health: pluginapi.Healthy},\n\t\t\t\t{ID: \"Dev-2\", Health: pluginapi.Healthy},\n\t\t\t}\n\n\t\t\tsocketPath := pluginSockDir + \"dp.\" + fmt.Sprintf(\"%d\", time.Now().Unix())\n\t\t\tframework.Logf(\"socketPath %v\", socketPath)\n\n\t\t\tdp1 := dm.NewDevicePluginStub(devs, socketPath, resourceName, false)\n\t\t\tdp1.SetAllocFunc(stubAllocFunc)\n\t\t\terr := dp1.Start()\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\tBy(\"Register resources\")\n\t\t\terr = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginSockDir)\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\tBy(\"Waiting for the resource exported by the stub device plugin to become available on the local node\")\n\t\t\tdevsLen := int64(len(devs))\n\t\t\tEventually(func() bool {\n\t\t\t\tnode, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})\n\t\t\t\tframework.ExpectNoError(err)\n\t\t\t\treturn numberOfDevicesCapacity(node, resourceName) == devsLen &&\n\t\t\t\t\tnumberOfDevicesAllocatable(node, resourceName) == devsLen\n\t\t\t}, 30*time.Second, framework.Poll).Should(BeTrue())\n\n\t\t\tBy(\"Creating one pod on node with at least one fake-device\")\n\t\t\tpodRECMD := \"devs=$(ls \/tmp\/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs\"\n\t\t\tpod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))\n\t\t\tdeviceIDRE := \"stub devices: (Dev-[0-9]+)\"\n\t\t\tdevId1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)\n\t\t\tExpect(devId1).To(Not(Equal(\"\")))\n\n\t\t\tpod1, err = f.PodClient().Get(pod1.Name, metav1.GetOptions{})\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\tensurePodContainerRestart(f, pod1.Name, pod1.Name)\n\n\t\t\tBy(\"Confirming that device assignment persists even after container restart\")\n\t\t\tdevIdAfterRestart := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)\n\t\t\tExpect(devIdAfterRestart).To(Equal(devId1))\n\n\t\t\trestartTime := time.Now()\n\t\t\tBy(\"Restarting Kubelet\")\n\t\t\trestartKubelet()\n\n\t\t\t\/\/ We need to wait for node to be ready before re-registering stub device plugin.\n\t\t\t\/\/ Otherwise, Kubelet DeviceManager may remove the re-registered sockets after it starts.\n\t\t\tBy(\"Wait for node is ready\")\n\t\t\tEventually(func() bool {\n\t\t\t\tnode, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})\n\t\t\t\tframework.ExpectNoError(err)\n\t\t\t\tfor _, cond := range node.Status.Conditions {\n\t\t\t\t\tif cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue && cond.LastHeartbeatTime.After(restartTime) {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn false\n\t\t\t}, 5*time.Minute, framework.Poll).Should(BeTrue())\n\n\t\t\tBy(\"Re-Register resources\")\n\t\t\tdp1 = dm.NewDevicePluginStub(devs, socketPath, resourceName, false)\n\t\t\tdp1.SetAllocFunc(stubAllocFunc)\n\t\t\terr = dp1.Start()\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\terr = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginSockDir)\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\tensurePodContainerRestart(f, pod1.Name, pod1.Name)\n\t\t\tBy(\"Confirming that after a kubelet restart, fake-device assignement is kept\")\n\t\t\tdevIdRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)\n\t\t\tExpect(devIdRestart1).To(Equal(devId1))\n\n\t\t\tBy(\"Waiting for resource to become available on the local node after re-registration\")\n\t\t\tEventually(func() bool {\n\t\t\t\tnode, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})\n\t\t\t\tframework.ExpectNoError(err)\n\t\t\t\treturn numberOfDevicesCapacity(node, resourceName) == devsLen &&\n\t\t\t\t\tnumberOfDevicesAllocatable(node, resourceName) == devsLen\n\t\t\t}, 30*time.Second, framework.Poll).Should(BeTrue())\n\n\t\t\tBy(\"Creating another pod\")\n\t\t\tpod2 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))\n\n\t\t\tBy(\"Checking that pod got a different fake device\")\n\t\t\tdevId2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)\n\n\t\t\tExpect(devId1).To(Not(Equal(devId2)))\n\n\t\t\tBy(\"Deleting device plugin.\")\n\t\t\terr = dp1.Stop()\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\tBy(\"Waiting for stub device plugin to become unhealthy on the local node\")\n\t\t\tEventually(func() int64 {\n\t\t\t\tnode, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})\n\t\t\t\tframework.ExpectNoError(err)\n\t\t\t\treturn numberOfDevicesAllocatable(node, resourceName)\n\t\t\t}, 30*time.Second, framework.Poll).Should(Equal(int64(0)))\n\n\t\t\tBy(\"Checking that scheduled pods can continue to run even after we delete device plugin.\")\n\t\t\tensurePodContainerRestart(f, pod1.Name, pod1.Name)\n\t\t\tdevIdRestart1 = parseLog(f, pod1.Name, pod1.Name, deviceIDRE)\n\t\t\tExpect(devIdRestart1).To(Equal(devId1))\n\n\t\t\tensurePodContainerRestart(f, pod2.Name, pod2.Name)\n\t\t\tdevIdRestart2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)\n\t\t\tExpect(devIdRestart2).To(Equal(devId2))\n\n\t\t\tBy(\"Re-register resources\")\n\t\t\tdp1 = dm.NewDevicePluginStub(devs, socketPath, resourceName, false)\n\t\t\tdp1.SetAllocFunc(stubAllocFunc)\n\t\t\terr = dp1.Start()\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\terr = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginSockDir)\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\tBy(\"Waiting for the resource exported by the stub device plugin to become healthy on the local node\")\n\t\t\tEventually(func() int64 {\n\t\t\t\tnode, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})\n\t\t\t\tframework.ExpectNoError(err)\n\t\t\t\treturn numberOfDevicesAllocatable(node, resourceName)\n\t\t\t}, 30*time.Second, framework.Poll).Should(Equal(devsLen))\n\n\t\t\tBy(\"Deleting device plugin again.\")\n\t\t\terr = dp1.Stop()\n\t\t\tframework.ExpectNoError(err)\n\n\t\t\tBy(\"Waiting for stub device plugin to become unavailable on the local node\")\n\t\t\tEventually(func() bool {\n\t\t\t\tnode, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})\n\t\t\t\tframework.ExpectNoError(err)\n\t\t\t\treturn numberOfDevicesCapacity(node, resourceName) <= 0\n\t\t\t}, 10*time.Minute, framework.Poll).Should(BeTrue())\n\n\t\t\t\/\/ Cleanup\n\t\t\tf.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)\n\t\t\tf.PodClient().DeleteSync(pod2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)\n\t\t})\n\t})\n}\n\n\/\/ makeBusyboxPod returns a simple Pod spec with a busybox container\n\/\/ that requests resourceName and runs the specified command.\nfunc makeBusyboxPod(resourceName, cmd string) *v1.Pod {\n\tpodName := \"device-plugin-test-\" + string(uuid.NewUUID())\n\trl := v1.ResourceList{v1.ResourceName(resourceName): *resource.NewQuantity(1, resource.DecimalSI)}\n\n\treturn &v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{Name: podName},\n\t\tSpec: v1.PodSpec{\n\t\t\tRestartPolicy: v1.RestartPolicyAlways,\n\t\t\tContainers: []v1.Container{{\n\t\t\t\tImage: busyboxImage,\n\t\t\t\tName: podName,\n\t\t\t\t\/\/ Runs the specified command in the test pod.\n\t\t\t\tCommand: []string{\"sh\", \"-c\", cmd},\n\t\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\t\tLimits: rl,\n\t\t\t\t\tRequests: rl,\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t}\n}\n\n\/\/ ensurePodContainerRestart confirms that pod container has restarted at least once\nfunc ensurePodContainerRestart(f *framework.Framework, podName string, contName string) {\n\tvar initialCount int32\n\tvar currentCount int32\n\tp, err := f.PodClient().Get(podName, metav1.GetOptions{})\n\tif err != nil || len(p.Status.ContainerStatuses) < 1 {\n\t\tframework.Failf(\"ensurePodContainerRestart failed for pod %q: %v\", podName, err)\n\t}\n\tinitialCount = p.Status.ContainerStatuses[0].RestartCount\n\tEventually(func() bool {\n\t\tp, err = f.PodClient().Get(podName, metav1.GetOptions{})\n\t\tif err != nil || len(p.Status.ContainerStatuses) < 1 {\n\t\t\treturn false\n\t\t}\n\t\tcurrentCount = p.Status.ContainerStatuses[0].RestartCount\n\t\tframework.Logf(\"initial %v, current %v\", initialCount, currentCount)\n\t\treturn currentCount > initialCount\n\t}, 5*time.Minute, framework.Poll).Should(BeTrue())\n}\n\n\/\/ parseLog returns the matching string for the specified regular expression parsed from the container logs.\nfunc parseLog(f *framework.Framework, podName string, contName string, re string) string {\n\tlogs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName)\n\tif err != nil {\n\t\tframework.Failf(\"GetPodLogs for pod %q failed: %v\", podName, err)\n\t}\n\n\tframework.Logf(\"got pod logs: %v\", logs)\n\tregex := regexp.MustCompile(re)\n\tmatches := regex.FindStringSubmatch(logs)\n\tif len(matches) < 2 {\n\t\treturn \"\"\n\t}\n\n\treturn matches[1]\n}\n\n\/\/ numberOfDevicesCapacity returns the number of devices of resourceName advertised by a node capacity\nfunc numberOfDevicesCapacity(node *v1.Node, resourceName string) int64 {\n\tval, ok := node.Status.Capacity[v1.ResourceName(resourceName)]\n\tif !ok {\n\t\treturn 0\n\t}\n\n\treturn val.Value()\n}\n\n\/\/ numberOfDevicesAllocatable returns the number of devices of resourceName advertised by a node allocatable\nfunc numberOfDevicesAllocatable(node *v1.Node, resourceName string) int64 {\n\tval, ok := node.Status.Allocatable[v1.ResourceName(resourceName)]\n\tif !ok {\n\t\treturn 0\n\t}\n\n\treturn val.Value()\n}\n\n\/\/ stubAllocFunc will pass to stub device plugin\nfunc stubAllocFunc(r *pluginapi.AllocateRequest, devs map[string]pluginapi.Device) (*pluginapi.AllocateResponse, error) {\n\tvar responses pluginapi.AllocateResponse\n\tfor _, req := range r.ContainerRequests {\n\t\tresponse := &pluginapi.ContainerAllocateResponse{}\n\t\tfor _, requestID := range req.DevicesIDs {\n\t\t\tdev, ok := devs[requestID]\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid allocation request with non-existing device %s\", requestID)\n\t\t\t}\n\n\t\t\tif dev.Health != pluginapi.Healthy {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid allocation request with unhealthy device: %s\", requestID)\n\t\t\t}\n\n\t\t\t\/\/ create fake device file\n\t\t\tfpath := filepath.Join(\"\/tmp\", dev.ID)\n\n\t\t\t\/\/ clean first\n\t\t\tos.RemoveAll(fpath)\n\t\t\tf, err := os.Create(fpath)\n\t\t\tif err != nil && !os.IsExist(err) {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to create fake device file: %s\", err)\n\t\t\t}\n\n\t\t\tf.Close()\n\n\t\t\tresponse.Mounts = append(response.Mounts, &pluginapi.Mount{\n\t\t\t\tContainerPath: fpath,\n\t\t\t\tHostPath: fpath,\n\t\t\t})\n\t\t}\n\t\tresponses.ContainerResponses = append(responses.ContainerResponses, response)\n\t}\n\n\treturn &responses, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The prometheus-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage framework\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/intstr\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/yaml\"\n\n\t\"github.com\/coreos\/prometheus-operator\/pkg\/alertmanager\"\n\tmonitoringv1 \"github.com\/coreos\/prometheus-operator\/pkg\/client\/monitoring\/v1\"\n\t\"github.com\/coreos\/prometheus-operator\/pkg\/client\/monitoring\/v1alpha1\"\n\t\"github.com\/pkg\/errors\"\n)\n\nvar ValidAlertmanagerConfig = `global:\n resolve_timeout: 5m\nroute:\n group_by: ['job']\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 12h\n receiver: 'webhook'\nreceivers:\n- name: 'webhook'\n webhook_configs:\n - url: 'http:\/\/alertmanagerwh:30500\/'\n`\n\nfunc (f *Framework) MakeBasicAlertmanager(name string, replicas int32) *monitoringv1.Alertmanager {\n\treturn &monitoringv1.Alertmanager{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tSpec: monitoringv1.AlertmanagerSpec{\n\t\t\tReplicas: &replicas,\n\t\t},\n\t}\n}\n\nfunc (f *Framework) MakeBasicAlertmanagerV1alpha1(name string, replicas int32) *v1alpha1.Alertmanager {\n\treturn &v1alpha1.Alertmanager{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tSpec: v1alpha1.AlertmanagerSpec{\n\t\t\tReplicas: &replicas,\n\t\t},\n\t}\n}\n\nfunc (f *Framework) MakeAlertmanagerService(name, group string, serviceType v1.ServiceType) *v1.Service {\n\tservice := &v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: fmt.Sprintf(\"alertmanager-%s\", name),\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"group\": group,\n\t\t\t},\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tType: serviceType,\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\tv1.ServicePort{\n\t\t\t\t\tName: \"web\",\n\t\t\t\t\tPort: 9093,\n\t\t\t\t\tTargetPort: intstr.FromString(\"web\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: map[string]string{\n\t\t\t\t\"alertmanager\": name,\n\t\t\t},\n\t\t},\n\t}\n\n\treturn service\n}\n\nfunc (f *Framework) SecretFromYaml(filepath string) (*v1.Secret, error) {\n\tmanifest, err := os.Open(filepath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := v1.Secret{}\n\terr = yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &s, nil\n}\n\nfunc (f *Framework) AlertmanagerConfigSecret(name string) (*v1.Secret, error) {\n\ts, err := f.SecretFromYaml(\"..\/..\/contrib\/kube-prometheus\/manifests\/alertmanager\/alertmanager-config.yaml\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts.Name = name\n\treturn s, nil\n}\n\nfunc (f *Framework) CreateAlertmanagerAndWaitUntilReady(ns string, a *monitoringv1.Alertmanager) error {\n\tamConfigSecretName := fmt.Sprintf(\"alertmanager-%s\", a.Name)\n\ts, err := f.AlertmanagerConfigSecret(amConfigSecretName)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"making alertmanager config secret %v failed\", amConfigSecretName))\n\t}\n\t_, err = f.KubeClient.CoreV1().Secrets(ns).Create(s)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"creating alertmanager config secret %v failed\", s.Name))\n\t}\n\n\t_, err = f.MonClient.Alertmanagers(ns).Create(a)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"creating alertmanager %v failed\", a.Name))\n\t}\n\n\treturn f.WaitForAlertmanagerReady(ns, a.Name, int(*a.Spec.Replicas))\n}\n\nfunc (f *Framework) WaitForAlertmanagerReady(ns, name string, replicas int) error {\n\terr := WaitForPodsReady(\n\t\tf.KubeClient,\n\t\tns,\n\t\t5*time.Minute,\n\t\treplicas,\n\t\talertmanager.ListOptions(name),\n\t)\n\n\treturn errors.Wrap(err, fmt.Sprintf(\"failed to create an Alertmanager cluster (%s) with %d instances\", name, replicas))\n}\n\nfunc (f *Framework) UpdateAlertmanagerAndWaitUntilReady(ns string, a *monitoringv1.Alertmanager) error {\n\t_, err := f.MonClient.Alertmanagers(ns).Update(a)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = WaitForPodsReady(\n\t\tf.KubeClient,\n\t\tns,\n\t\t5*time.Minute,\n\t\tint(*a.Spec.Replicas),\n\t\talertmanager.ListOptions(a.Name),\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to update %d Alertmanager instances (%s): %v\", a.Spec.Replicas, a.Name, err)\n\t}\n\n\treturn nil\n}\n\nfunc (f *Framework) DeleteAlertmanagerAndWaitUntilGone(ns, name string) error {\n\t_, err := f.MonClient.Alertmanagers(ns).Get(name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"requesting Alertmanager tpr %v failed\", name))\n\t}\n\n\tif err := f.MonClient.Alertmanagers(ns).Delete(name, nil); err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"deleting Alertmanager tpr %v failed\", name))\n\t}\n\n\tif err := WaitForPodsReady(\n\t\tf.KubeClient,\n\t\tns,\n\t\tf.DefaultTimeout,\n\t\t0,\n\t\talertmanager.ListOptions(name),\n\t); err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"waiting for Alertmanager tpr (%s) to vanish timed out\", name))\n\t}\n\n\treturn f.KubeClient.CoreV1().Secrets(ns).Delete(fmt.Sprintf(\"alertmanager-%s\", name), nil)\n}\n\nfunc amImage(version string) string {\n\treturn fmt.Sprintf(\"quay.io\/prometheus\/alertmanager:%s\", version)\n}\n\nfunc (f *Framework) WaitForAlertmanagerInitializedMesh(ns, name string, amountPeers int) error {\n\treturn wait.Poll(time.Second, time.Second*20, func() (bool, error) {\n\t\tamStatus, err := f.GetAlertmanagerConfig(ns, name)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(amStatus.Data.MeshStatus.Peers) == amountPeers {\n\t\t\treturn true, nil\n\t\t}\n\n\t\treturn false, nil\n\t})\n}\n\nfunc (f *Framework) GetAlertmanagerConfig(ns, n string) (alertmanagerStatus, error) {\n\tvar amStatus alertmanagerStatus\n\trequest := ProxyGetPod(f.KubeClient, ns, n, \"9093\", \"\/api\/v1\/status\")\n\tresp, err := request.DoRaw()\n\tif err != nil {\n\t\treturn amStatus, err\n\t}\n\n\tif err := json.Unmarshal(resp, &amStatus); err != nil {\n\t\treturn amStatus, err\n\t}\n\n\treturn amStatus, nil\n}\n\nfunc (f *Framework) WaitForSpecificAlertmanagerConfig(ns, amName string, expectedConfig string) error {\n\treturn wait.Poll(time.Second, time.Minute*5, func() (bool, error) {\n\t\tconfig, err := f.GetAlertmanagerConfig(ns, \"alertmanager-\"+amName+\"-0\")\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tif config.Data.ConfigYAML == expectedConfig {\n\t\t\treturn true, nil\n\t\t}\n\n\t\treturn false, nil\n\t})\n}\n\ntype alertmanagerStatus struct {\n\tData alertmanagerStatusData `json:\"data\"`\n}\n\ntype alertmanagerStatusData struct {\n\tMeshStatus meshStatus `json:\"meshStatus\"`\n\tConfigYAML string `json:\"configYAML\"`\n}\n\ntype meshStatus struct {\n\tPeers []interface{} `json:\"peers\"`\n}\n<commit_msg>test\/alertmanager: print expected and found alertmanager config<commit_after>\/\/ Copyright 2016 The prometheus-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage framework\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/intstr\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/yaml\"\n\n\t\"github.com\/coreos\/prometheus-operator\/pkg\/alertmanager\"\n\tmonitoringv1 \"github.com\/coreos\/prometheus-operator\/pkg\/client\/monitoring\/v1\"\n\t\"github.com\/coreos\/prometheus-operator\/pkg\/client\/monitoring\/v1alpha1\"\n\t\"github.com\/pkg\/errors\"\n)\n\nvar ValidAlertmanagerConfig = `global:\n resolve_timeout: 5m\nroute:\n group_by: ['job']\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 12h\n receiver: 'webhook'\nreceivers:\n- name: 'webhook'\n webhook_configs:\n - url: 'http:\/\/alertmanagerwh:30500\/'\n`\n\nfunc (f *Framework) MakeBasicAlertmanager(name string, replicas int32) *monitoringv1.Alertmanager {\n\treturn &monitoringv1.Alertmanager{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tSpec: monitoringv1.AlertmanagerSpec{\n\t\t\tReplicas: &replicas,\n\t\t},\n\t}\n}\n\nfunc (f *Framework) MakeBasicAlertmanagerV1alpha1(name string, replicas int32) *v1alpha1.Alertmanager {\n\treturn &v1alpha1.Alertmanager{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tSpec: v1alpha1.AlertmanagerSpec{\n\t\t\tReplicas: &replicas,\n\t\t},\n\t}\n}\n\nfunc (f *Framework) MakeAlertmanagerService(name, group string, serviceType v1.ServiceType) *v1.Service {\n\tservice := &v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: fmt.Sprintf(\"alertmanager-%s\", name),\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"group\": group,\n\t\t\t},\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tType: serviceType,\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\tv1.ServicePort{\n\t\t\t\t\tName: \"web\",\n\t\t\t\t\tPort: 9093,\n\t\t\t\t\tTargetPort: intstr.FromString(\"web\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: map[string]string{\n\t\t\t\t\"alertmanager\": name,\n\t\t\t},\n\t\t},\n\t}\n\n\treturn service\n}\n\nfunc (f *Framework) SecretFromYaml(filepath string) (*v1.Secret, error) {\n\tmanifest, err := os.Open(filepath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := v1.Secret{}\n\terr = yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &s, nil\n}\n\nfunc (f *Framework) AlertmanagerConfigSecret(name string) (*v1.Secret, error) {\n\ts, err := f.SecretFromYaml(\"..\/..\/contrib\/kube-prometheus\/manifests\/alertmanager\/alertmanager-config.yaml\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts.Name = name\n\treturn s, nil\n}\n\nfunc (f *Framework) CreateAlertmanagerAndWaitUntilReady(ns string, a *monitoringv1.Alertmanager) error {\n\tamConfigSecretName := fmt.Sprintf(\"alertmanager-%s\", a.Name)\n\ts, err := f.AlertmanagerConfigSecret(amConfigSecretName)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"making alertmanager config secret %v failed\", amConfigSecretName))\n\t}\n\t_, err = f.KubeClient.CoreV1().Secrets(ns).Create(s)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"creating alertmanager config secret %v failed\", s.Name))\n\t}\n\n\t_, err = f.MonClient.Alertmanagers(ns).Create(a)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"creating alertmanager %v failed\", a.Name))\n\t}\n\n\treturn f.WaitForAlertmanagerReady(ns, a.Name, int(*a.Spec.Replicas))\n}\n\nfunc (f *Framework) WaitForAlertmanagerReady(ns, name string, replicas int) error {\n\terr := WaitForPodsReady(\n\t\tf.KubeClient,\n\t\tns,\n\t\t5*time.Minute,\n\t\treplicas,\n\t\talertmanager.ListOptions(name),\n\t)\n\n\treturn errors.Wrap(err, fmt.Sprintf(\"failed to create an Alertmanager cluster (%s) with %d instances\", name, replicas))\n}\n\nfunc (f *Framework) UpdateAlertmanagerAndWaitUntilReady(ns string, a *monitoringv1.Alertmanager) error {\n\t_, err := f.MonClient.Alertmanagers(ns).Update(a)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = WaitForPodsReady(\n\t\tf.KubeClient,\n\t\tns,\n\t\t5*time.Minute,\n\t\tint(*a.Spec.Replicas),\n\t\talertmanager.ListOptions(a.Name),\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to update %d Alertmanager instances (%s): %v\", a.Spec.Replicas, a.Name, err)\n\t}\n\n\treturn nil\n}\n\nfunc (f *Framework) DeleteAlertmanagerAndWaitUntilGone(ns, name string) error {\n\t_, err := f.MonClient.Alertmanagers(ns).Get(name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"requesting Alertmanager tpr %v failed\", name))\n\t}\n\n\tif err := f.MonClient.Alertmanagers(ns).Delete(name, nil); err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"deleting Alertmanager tpr %v failed\", name))\n\t}\n\n\tif err := WaitForPodsReady(\n\t\tf.KubeClient,\n\t\tns,\n\t\tf.DefaultTimeout,\n\t\t0,\n\t\talertmanager.ListOptions(name),\n\t); err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"waiting for Alertmanager tpr (%s) to vanish timed out\", name))\n\t}\n\n\treturn f.KubeClient.CoreV1().Secrets(ns).Delete(fmt.Sprintf(\"alertmanager-%s\", name), nil)\n}\n\nfunc amImage(version string) string {\n\treturn fmt.Sprintf(\"quay.io\/prometheus\/alertmanager:%s\", version)\n}\n\nfunc (f *Framework) WaitForAlertmanagerInitializedMesh(ns, name string, amountPeers int) error {\n\treturn wait.Poll(time.Second, time.Second*20, func() (bool, error) {\n\t\tamStatus, err := f.GetAlertmanagerConfig(ns, name)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(amStatus.Data.MeshStatus.Peers) == amountPeers {\n\t\t\treturn true, nil\n\t\t}\n\n\t\treturn false, nil\n\t})\n}\n\nfunc (f *Framework) GetAlertmanagerConfig(ns, n string) (alertmanagerStatus, error) {\n\tvar amStatus alertmanagerStatus\n\trequest := ProxyGetPod(f.KubeClient, ns, n, \"9093\", \"\/api\/v1\/status\")\n\tresp, err := request.DoRaw()\n\tif err != nil {\n\t\treturn amStatus, err\n\t}\n\n\tif err := json.Unmarshal(resp, &amStatus); err != nil {\n\t\treturn amStatus, err\n\t}\n\n\treturn amStatus, nil\n}\n\nfunc (f *Framework) WaitForSpecificAlertmanagerConfig(ns, amName string, expectedConfig string) error {\n\treturn wait.Poll(10*time.Second, time.Minute*5, func() (bool, error) {\n\t\tconfig, err := f.GetAlertmanagerConfig(ns, \"alertmanager-\"+amName+\"-0\")\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tif config.Data.ConfigYAML == expectedConfig {\n\t\t\treturn true, nil\n\t\t}\n\n\t\tlog.Printf(\"\\n\\nFound:\\n\\n%#+v\\n\\nExpected:\\n\\n%#+v\\n\\n\", config.Data.ConfigYAML, expectedConfig)\n\n\t\treturn false, nil\n\t})\n}\n\ntype alertmanagerStatus struct {\n\tData alertmanagerStatusData `json:\"data\"`\n}\n\ntype alertmanagerStatusData struct {\n\tMeshStatus meshStatus `json:\"meshStatus\"`\n\tConfigYAML string `json:\"configYAML\"`\n}\n\ntype meshStatus struct {\n\tPeers []interface{} `json:\"peers\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package file\n\nimport (\n\t\"os\"\n\n\t\"github.com\/antham\/doc-hunt\/util\"\n)\n\n\/\/go:generate stringer -type=ItemStatus\n\n\/\/ ItemStatus represents a source file status\ntype ItemStatus int\n\n\/\/ Source files status\nconst (\n\tINONE ItemStatus = iota\n\tIFAILED\n\tIADDED\n\tIUPDATED\n\tIDELETED\n)\n\nvar statusCache map[string]ItemStatus\n\n\/\/ BuildStatus retrieves sources file status\nfunc BuildStatus() (*[]Result, error) {\n\tresults := []Result{}\n\n\tconfigs, err := ListConfig()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, config := range *configs {\n\t\tfor _, source := range config.Sources {\n\t\t\tresult := Result{\n\t\t\t\tStatus: map[ItemStatus][]string{},\n\t\t\t}\n\n\t\t\tresult.Doc = config.Doc\n\t\t\tresult.Source = source\n\n\t\t\tswitch source.Category {\n\t\t\tcase SFILE:\n\t\t\t\tstatus, err := retrieveFileItem(&source)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\tresult.Status[status] = append(result.Status[status], source.Identifier)\n\t\t\tcase SFOLDER:\n\t\t\t\tfolderItems, err := retrieveFolderItems(&source)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\tfor identifier, status := range *folderItems {\n\t\t\t\t\tresult.Status[status] = append(result.Status[status], identifier)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tresults = append(results, result)\n\t\t}\n\t}\n\n\treturn &results, nil\n}\n\nfunc retrieveFolderItems(source *Source) (*map[string]ItemStatus, error) {\n\titems := map[string]ItemStatus{}\n\tdbItems, err := getItems(source)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, item := range *dbItems {\n\t\titems[item.Identifier] = getFileStatus(item.Identifier, item.Fingerprint)\n\t}\n\n\tfiles, err := util.ExtractFolderFiles(source.Identifier)\n\n\tswitch err.(type) {\n\tcase nil:\n\tcase *os.PathError:\n\tdefault:\n\t\treturn &items, err\n\t}\n\n\tfor _, file := range *files {\n\t\tif _, ok := items[file]; !ok {\n\t\t\titems[file] = IADDED\n\t\t}\n\t}\n\n\treturn &items, nil\n}\n\nfunc retrieveFileItem(source *Source) (ItemStatus, error) {\n\titems, err := getItems(source)\n\n\tif err != nil {\n\t\treturn INONE, err\n\t}\n\n\treturn getFileStatus((*items)[0].Identifier, (*items)[0].Fingerprint), nil\n}\n\nfunc getFileStatus(path string, origFingerprint string) ItemStatus {\n\tfilename := util.GetAbsPath(path)\n\n\t_, err := os.Stat(filename)\n\n\tif err != nil {\n\t\treturn IDELETED\n\t}\n\n\tfingerprint, err := calculateFingerprint(path)\n\n\tif err != nil {\n\t\treturn IFAILED\n\t}\n\n\tif hasChanged(fingerprint, origFingerprint) {\n\t\treturn IUPDATED\n\t}\n\n\treturn INONE\n}\n<commit_msg>fix(file\/status) : remove useless stmt<commit_after>package file\n\nimport (\n\t\"os\"\n\n\t\"github.com\/antham\/doc-hunt\/util\"\n)\n\n\/\/go:generate stringer -type=ItemStatus\n\n\/\/ ItemStatus represents a source file status\ntype ItemStatus int\n\n\/\/ Source files status\nconst (\n\tINONE ItemStatus = iota\n\tIFAILED\n\tIADDED\n\tIUPDATED\n\tIDELETED\n)\n\n\/\/ BuildStatus retrieves sources file status\nfunc BuildStatus() (*[]Result, error) {\n\tresults := []Result{}\n\n\tconfigs, err := ListConfig()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, config := range *configs {\n\t\tfor _, source := range config.Sources {\n\t\t\tresult := Result{\n\t\t\t\tStatus: map[ItemStatus][]string{},\n\t\t\t}\n\n\t\t\tresult.Doc = config.Doc\n\t\t\tresult.Source = source\n\n\t\t\tswitch source.Category {\n\t\t\tcase SFILE:\n\t\t\t\tstatus, err := retrieveFileItem(&source)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\tresult.Status[status] = append(result.Status[status], source.Identifier)\n\t\t\tcase SFOLDER:\n\t\t\t\tfolderItems, err := retrieveFolderItems(&source)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\tfor identifier, status := range *folderItems {\n\t\t\t\t\tresult.Status[status] = append(result.Status[status], identifier)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tresults = append(results, result)\n\t\t}\n\t}\n\n\treturn &results, nil\n}\n\nfunc retrieveFolderItems(source *Source) (*map[string]ItemStatus, error) {\n\titems := map[string]ItemStatus{}\n\tdbItems, err := getItems(source)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, item := range *dbItems {\n\t\titems[item.Identifier] = getFileStatus(item.Identifier, item.Fingerprint)\n\t}\n\n\tfiles, err := util.ExtractFolderFiles(source.Identifier)\n\n\tswitch err.(type) {\n\tcase nil:\n\tcase *os.PathError:\n\tdefault:\n\t\treturn &items, err\n\t}\n\n\tfor _, file := range *files {\n\t\tif _, ok := items[file]; !ok {\n\t\t\titems[file] = IADDED\n\t\t}\n\t}\n\n\treturn &items, nil\n}\n\nfunc retrieveFileItem(source *Source) (ItemStatus, error) {\n\titems, err := getItems(source)\n\n\tif err != nil {\n\t\treturn INONE, err\n\t}\n\n\treturn getFileStatus((*items)[0].Identifier, (*items)[0].Fingerprint), nil\n}\n\nfunc getFileStatus(path string, origFingerprint string) ItemStatus {\n\tfilename := util.GetAbsPath(path)\n\n\t_, err := os.Stat(filename)\n\n\tif err != nil {\n\t\treturn IDELETED\n\t}\n\n\tfingerprint, err := calculateFingerprint(path)\n\n\tif err != nil {\n\t\treturn IFAILED\n\t}\n\n\tif hasChanged(fingerprint, origFingerprint) {\n\t\treturn IUPDATED\n\t}\n\n\treturn INONE\n}\n<|endoftext|>"} {"text":"<commit_before>package ftime\n\n\/\/ A fast timing counter accessing the cheapest fastest\n\/\/ counter your cpu can provide. Guaranteed to increase monotonically\n\/\/ across successive calls on the same CPU core.\n\/\/ On AMD64 CPUs we use the RDTSC instruction\nfunc Counter() (count int64)\n\n\/\/ Indicates whether the results returned from a call to Counter()\n\/\/ increase at a uniform rate, independent of the actual clock speed\n\/\/ of the CPU it is running on.\n\/\/ On AMD64 CPUs we test for the 'Invariant TSC' property using CPUID\nfunc IsCounterSteady() bool {\n\t_, _, ecx, _ := cpuid(0X80000007)\n\treturn ecx&(1<<8) != 0\n}\n\n\/\/ Indicates whether the results returned from a call to Counter()\n\/\/ is guaranteed to be monotonically increasing per CPU and across\n\/\/ multiple CPUs on the same socket. No guarantee is made across CPU\n\/\/ sockets\n\/\/ On AMD64 CPUs we test for the 'Invariant TSC' property using CPUID\nfunc IsCounterSMPMonotonic() bool {\n\t_, _, ecx, _ := cpuid(0X80000007)\n\treturn ecx&(1<<8) != 0\n}\n\nfunc cpuid(eaxi uint32) (eax, ebx, ecx, edx uint32)\n<commit_msg>Now identifying TSC_Invariant in the edx register<commit_after>package ftime\n\n\/\/ A fast timing counter accessing the cheapest fastest\n\/\/ counter your cpu can provide. Guaranteed to increase monotonically\n\/\/ across successive calls on the same CPU core.\n\/\/ On AMD64 CPUs we use the RDTSC instruction\nfunc Counter() (count int64)\n\n\/\/ Indicates whether the results returned from a call to Counter()\n\/\/ increase at a uniform rate, independent of the actual clock speed\n\/\/ of the CPU it is running on.\n\/\/ On AMD64 CPUs we test for the 'Invariant TSC' property using CPUID\nfunc IsCounterSteady() bool {\n\t_, _, _, edx := cpuid(0X80000007)\n\treturn edx&(1<<8) != 0\n}\n\n\/\/ Indicates whether the results returned from a call to Counter()\n\/\/ is guaranteed to be monotonically increasing per CPU and across\n\/\/ multiple CPUs on the same socket. No guarantee is made across CPU\n\/\/ sockets\n\/\/ On AMD64 CPUs we test for the 'Invariant TSC' property using CPUID\nfunc IsCounterSMPMonotonic() bool {\n\t_, _, _, edx := cpuid(0X80000007)\n\treturn edx&(1<<8) != 0\n}\n\nfunc cpuid(eaxi uint32) (eax, ebx, ecx, edx uint32)\n<|endoftext|>"} {"text":"<commit_before>\/\/ 29 june 2015\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"io\"\n\t\"path\/filepath\"\n\t\"github.com\/andlabs\/nointrochk\/clrmamepro\"\n)\n\n\/\/ fields in dat file\nconst (\n\tfGame = \"game\"\n\tfROM = \"rom\"\n\tfFilename = \"name\"\n\tfSize = \"size\"\n\tfCRC32 = \"crc\"\n\tfMD5 = \"md5\"\n\tfSHA1 = \"sha1\"\n)\n\nvar nroms, ngood, nbad, nmiss uint\n\nfunc checksumsPass(romname string, rom *clrmamepro.Block, f io.Reader) bool {\n\t\/\/ TODO\n\treturn true\n}\n\nfunc check(b *clrmamepro.Block, folder string) {\n\tif b.Name != fGame {\n\t\treturn\n\t}\n\tnroms++\n\trom := b.Blocks[fROM]\n\tif rom == nil {\n\t\tdie(\"checking ROM: game block without rom block: %#v\", b)\n\t}\n\tromname := rom.Texts[fFilename]\n\tif !exists(romname) {\n\t\talert(\"MISSING\", romname)\n\t\tnmiss++\n\t\treturn\n\t}\n\tdefer markProcessed(romname)\n\n\tfilename := filepath.Join(folder, romname)\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\tdie(\"opening ROM %q to check it: %v\", filename, err)\n\t}\n\tdefer f.Close()\n\n\t\/\/ first check size\n\ts, err := f.Stat()\n\tif err != nil {\n\t\tdie(\"getting ROM %q stats to check size: %v\", romname, err)\n\t}\n\tif fmt.Sprint(s.Size()) != rom.Texts[fSize] {\n\t\talert(\"BAD SIZE\", romname)\n\t\tnbad++\n\t\treturn\n\t}\n\n\t\/\/ now check checksums\n\tif !checksumsPass(romname, rom, f) {\n\t\tnbad++\n\t\treturn\n\t}\n\n\t\/\/ otherwise it all checked out\n\talert(\"GOOD\", romname)\n\tngood++\n}\n<commit_msg>Added the actual checksumming.<commit_after>\/\/ 29 june 2015\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"io\"\n\t\"path\/filepath\"\n\t\"hash\"\n\t\"hash\/crc32\"\n\t\"crypto\/md5\"\n\t\"crypto\/sha1\"\n\t\"strings\"\n\t\"github.com\/andlabs\/nointrochk\/clrmamepro\"\n)\n\n\/\/ fields in dat file\nconst (\n\tfGame = \"game\"\n\tfROM = \"rom\"\n\tfFilename = \"name\"\n\tfSize = \"size\"\n\tfCRC32 = \"crc\"\n\tfMD5 = \"md5\"\n\tfSHA1 = \"sha1\"\n)\n\nvar nroms, ngood, nbad, nmiss uint\n\nfunc checksumsPass(romname string, rom *clrmamepro.Block, f io.Reader) bool {\n\tcrc := crc32.NewIEEE()\n\tmd := md5.New()\n\tsha := sha1.New()\n\tchecksums := io.MultiWriter(crc, md, sha)\n\n\t_, err := io.Copy(checksums, f)\n\tif err != nil {\n\t\tdie(\"checksumming ROM %q: %v\", romname, err)\n\t}\n\n\tcompare := func(h hash.Hash, expected string) bool {\n\t\treturn strings.ToUpper(fmt.Sprintf(\"%x\", h.Sum(nil))) == strings.ToUpper(expected)\n\t}\n\tif !compare(crc, rom.Texts[fCRC32]) {\n\t\talert(\"BAD CRC32\", romname)\n\t\treturn false\n\t}\n\tif !compare(md, rom.Texts[fMD5]) {\n\t\talert(\"BAD MD5\", romname)\n\t\treturn false\n\t}\n\tif !compare(sha, rom.Texts[fSHA1]) {\n\t\talert(\"BAD SHA1\", romname)\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc check(b *clrmamepro.Block, folder string) {\n\tif b.Name != fGame {\n\t\treturn\n\t}\n\tnroms++\n\trom := b.Blocks[fROM]\n\tif rom == nil {\n\t\tdie(\"checking ROM: game block without rom block: %#v\", b)\n\t}\n\tromname := rom.Texts[fFilename]\n\tif !exists(romname) {\n\t\talert(\"MISSING\", romname)\n\t\tnmiss++\n\t\treturn\n\t}\n\tdefer markProcessed(romname)\n\n\tfilename := filepath.Join(folder, romname)\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\tdie(\"opening ROM %q to check it: %v\", filename, err)\n\t}\n\tdefer f.Close()\n\n\t\/\/ first check size\n\ts, err := f.Stat()\n\tif err != nil {\n\t\tdie(\"getting ROM %q stats to check size: %v\", romname, err)\n\t}\n\tif fmt.Sprint(s.Size()) != rom.Texts[fSize] {\n\t\talert(\"BAD SIZE\", romname)\n\t\tnbad++\n\t\treturn\n\t}\n\n\t\/\/ now check checksums\n\tif !checksumsPass(romname, rom, f) {\n\t\tnbad++\n\t\treturn\n\t}\n\n\t\/\/ otherwise it all checked out\n\talert(\"GOOD\", romname)\n\tngood++\n}\n<|endoftext|>"} {"text":"<commit_before>package log\n\nimport (\n\t\"io\"\n\n\t\"github.com\/inconshreveable\/log15\"\n)\n\nvar _ io.Writer = Writer{}\n\ntype Writer struct {\n\tLog log15.Logger\n\tLevel log15.Lvl\n\tMsg string\n}\n\nfunc (lw Writer) Write(msg []byte) (int, error) {\n\t\/\/ goddamnit, expose your fucking `write` method\n\tswitch lw.Level {\n\tcase log15.LvlDebug:\n\t\tlw.Log.Debug(lw.Msg, \"chunk\", string(msg))\n\tcase log15.LvlInfo:\n\t\tlw.Log.Info(lw.Msg, \"chunk\", string(msg))\n\tcase log15.LvlWarn:\n\t\tlw.Log.Warn(lw.Msg, \"chunk\", string(msg))\n\tcase log15.LvlError:\n\t\tlw.Log.Error(lw.Msg, \"chunk\", string(msg))\n\tcase log15.LvlCrit:\n\t\tlw.Log.Crit(lw.Msg, \"chunk\", string(msg))\n\t}\n\treturn len(msg), nil\n}\n<commit_msg>Add log handler for print methods.<commit_after>package log\n\nimport (\n\t\"io\"\n\n\t\"github.com\/inconshreveable\/log15\"\n)\n\nvar _ io.Writer = Writer{}\n\n\/*\n\tExposes a logger as an `io.Writer`, so you can have any system output tagged logs.\n*\/\ntype Writer struct {\n\tLog log15.Logger\n\tLevel log15.Lvl\n\tMsg string\n}\n\nfunc (lw Writer) Write(msg []byte) (int, error) {\n\t\/\/ goddamnit, expose your fucking `write` method\n\tswitch lw.Level {\n\tcase log15.LvlDebug:\n\t\tlw.Log.Debug(lw.Msg, \"chunk\", string(msg))\n\tcase log15.LvlInfo:\n\t\tlw.Log.Info(lw.Msg, \"chunk\", string(msg))\n\tcase log15.LvlWarn:\n\t\tlw.Log.Warn(lw.Msg, \"chunk\", string(msg))\n\tcase log15.LvlError:\n\t\tlw.Log.Error(lw.Msg, \"chunk\", string(msg))\n\tcase log15.LvlCrit:\n\t\tlw.Log.Crit(lw.Msg, \"chunk\", string(msg))\n\t}\n\treturn len(msg), nil\n}\n\ntype Printer func(a ...interface{}) (n int, err error)\n\n\/*\n\tDumps logs to a printer. Any regular `Printer` will do -- `fmt.Print`\n\tqualifies.\n*\/\nfunc PrinterHandler(printer Printer, fmtr log15.Format) log15.Handler {\n\treturn log15.FuncHandler(func(r *log15.Record) error {\n\t\tprinter(string(fmtr.Format(r)))\n\t\treturn nil\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package crypto\n\nimport (\n\t\"github.com\/dedis\/crypto\/abstract\"\n\t\"github.com\/lbarman\/prifi\/prifi-lib\/config\"\n\t\"testing\"\n\n\t\"fmt\"\n\t\"github.com\/dedis\/crypto\/random\"\n\t\"strconv\"\n)\n\nfunc TestSchnorr(t *testing.T) {\n\n\tpub, priv := NewKeyPair()\n\tpub2, priv2 := NewKeyPair()\n\n\t\/\/with empty data\n\tdata := make([]byte, 0)\n\tsig := SchnorrSign(config.CryptoSuite, random.Stream, data, priv)\n\terr := SchnorrVerify(config.CryptoSuite, data, pub, sig)\n\n\tif err != nil {\n\t\tt.Error(\"Should validate with nil message, err is \" + err.Error())\n\t}\n\n\t\/\/with empty data\n\tdata = random.Bits(100, false, random.Stream)\n\tsig = SchnorrSign(config.CryptoSuite, random.Stream, data, priv)\n\terr = SchnorrVerify(config.CryptoSuite, data, pub, sig)\n\n\tif err != nil {\n\t\tt.Error(\"Should validate with random message, err is \" + err.Error())\n\t}\n\n\t\/\/should trivially not validate with other keys\n\tdata = random.Bits(100, false, random.Stream)\n\tsig = SchnorrSign(config.CryptoSuite, random.Stream, data, priv2)\n\terr = SchnorrVerify(config.CryptoSuite, data, pub, sig)\n\n\tif err == nil {\n\t\tt.Error(\"Should not validate with wrong keys\")\n\t}\n\tdata = random.Bits(100, false, random.Stream)\n\tsig = SchnorrSign(config.CryptoSuite, random.Stream, data, priv)\n\terr = SchnorrVerify(config.CryptoSuite, data, pub2, sig)\n\n\tif err == nil {\n\t\tt.Error(\"Should not validate with wrong keys\")\n\t}\n\n}\n\nfunc TestSchnorrHash(t *testing.T) {\n\n\tpub, _ := NewKeyPair()\n\tdata := random.Bits(100, false, random.Stream)\n\tsecret := hashSchnorr(config.CryptoSuite, data, pub)\n\n\tif secret == nil {\n\t\tt.Error(\"Secret should not be nil\")\n\t}\n}\n\nfunc TestNeffShuffle(t *testing.T) {\n\n\t\/\/output distribution testing.\n\tnClientsRange := []int{2, 3, 4, 5, 10}\n\trepetition := 100\n\tbase := config.CryptoSuite.Point().Base()\n\tmaxUnfairness := 30 \/\/30% of difference between the shuffle's homogeneity\n\n\tfor _, nClients := range nClientsRange {\n\t\tfmt.Println(\"Testing shuffle for \", nClients, \" clients.\")\n\n\t\t\/\/build the client's public key array\n\t\tclientPks := make([]abstract.Point, nClients)\n\t\tclientPrivKeys := make([]abstract.Scalar, nClients)\n\t\tfor i := 0; i < nClients; i++ {\n\t\t\tpub, priv := NewKeyPair()\n\t\t\tclientPks[i] = pub\n\t\t\tclientPrivKeys[i] = priv\n\t\t}\n\n\t\t\/\/shuffle\n\t\tshuffledKeys, newBase, secretCoeff, proof, err := NeffShuffle(clientPks, base, config.CryptoSuite, true)\n\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tif shuffledKeys == nil {\n\t\t\tt.Error(\"ShuffledKeys is nil\")\n\t\t}\n\t\tif newBase == nil {\n\t\t\tt.Error(\"newBase is nil\")\n\t\t}\n\t\tif secretCoeff == nil {\n\t\t\tt.Error(\"secretCoeff is nil\")\n\t\t}\n\t\tif proof == nil {\n\t\t\tt.Error(\"proof is nil\")\n\t\t}\n\n\t\t\/\/now test that the shuffled keys are indeed the old keys in the new base\n\t\ttransformedKeys := make([]abstract.Point, nClients)\n\t\tfor i := 0; i < nClients; i++ {\n\t\t\ttransformedKeys[i] = config.CryptoSuite.Point().Mul(newBase, clientPrivKeys[i])\n\t\t}\n\n\t\t\/\/for every key, check that it exists in the remaining array\n\t\tfor _, v := range transformedKeys {\n\t\t\tfound := false\n\t\t\tfor i := 0; i < nClients; i++ {\n\t\t\t\tif shuffledKeys[i].Equal(v) {\n\t\t\t\t\tfound = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !found {\n\t\t\t\tt.Error(\"Public key not found in outbound array\")\n\t\t\t}\n\t\t}\n\n\t\t\/\/test the distribution\n\t\tmappingDistrib := make([][]float64, nClients)\n\t\tfor k := 0; k < nClients; k++ {\n\t\t\tmappingDistrib[k] = make([]float64, nClients)\n\t\t}\n\t\tfmt.Print(\"Testing distribution for \", nClients, \" clients.\")\n\t\tfor i := 0; i < repetition; i++ {\n\t\t\tshuffledKeys, newBase, secretCoeff, proof, err = NeffShuffle(clientPks, base, config.CryptoSuite, true)\n\n\t\t\tmapping := make([]int, nClients)\n\t\t\ttransformedKeys := make([]abstract.Point, nClients)\n\t\t\tfor i := 0; i < nClients; i++ {\n\t\t\t\ttransformedKeys[i] = config.CryptoSuite.Point().Mul(newBase, clientPrivKeys[i])\n\t\t\t}\n\t\t\tfor k, v := range transformedKeys {\n\t\t\t\tfor i := 0; i < nClients; i++ {\n\t\t\t\t\tif shuffledKeys[i].Equal(v) {\n\t\t\t\t\t\tmapping[k] = i\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor clientID, slot := range mapping {\n\t\t\t\tmappingDistrib[clientID][slot] += float64(1)\n\t\t\t}\n\t\t}\n\t\tmaxDeviation := float64(-1)\n\t\tfor clientID, _ := range mappingDistrib {\n\t\t\tfor slot := 0; slot < nClients; slot++ {\n\t\t\t\t\/\/compute deviation\n\t\t\t\texpectedValue := float64(100) \/ float64(len(mappingDistrib[clientID]))\n\t\t\t\tmappingDistrib[clientID][slot] -= expectedValue\n\t\t\t\tif mappingDistrib[clientID][slot] < 0 {\n\t\t\t\t\tmappingDistrib[clientID][slot] = -mappingDistrib[clientID][slot]\n\t\t\t\t}\n\n\t\t\t\t\/\/store max deviation\n\t\t\t\tif mappingDistrib[clientID][slot] > maxDeviation {\n\t\t\t\t\tmaxDeviation = mappingDistrib[clientID][slot]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfmt.Printf(\"+-%d%%\\n\", int(maxDeviation))\n\t\tif int(maxDeviation) > maxUnfairness {\n\t\t\tt.Error(\"Max allowed distribution biais is \" + strconv.Itoa(maxUnfairness) + \" percent.\")\n\t\t}\n\n\t}\n\n}\n<commit_msg>Improves coverage of prifi-lib\/crypto<commit_after>package crypto\n\nimport (\n\t\"github.com\/dedis\/crypto\/abstract\"\n\t\"github.com\/lbarman\/prifi\/prifi-lib\/config\"\n\t\"testing\"\n\n\t\"fmt\"\n\t\"github.com\/dedis\/crypto\/random\"\n\t\"strconv\"\n)\n\nfunc TestSchnorr(t *testing.T) {\n\n\tpub, priv := NewKeyPair()\n\tpub2, priv2 := NewKeyPair()\n\n\t\/\/with empty data\n\tdata := make([]byte, 0)\n\tsig := SchnorrSign(config.CryptoSuite, random.Stream, data, priv)\n\terr := SchnorrVerify(config.CryptoSuite, data, pub, sig)\n\n\tif err != nil {\n\t\tt.Error(\"Should validate with nil message, err is \" + err.Error())\n\t}\n\n\t\/\/with empty data\n\tdata = random.Bits(100, false, random.Stream)\n\tsig = SchnorrSign(config.CryptoSuite, random.Stream, data, priv)\n\terr = SchnorrVerify(config.CryptoSuite, data, pub, sig)\n\n\tif err != nil {\n\t\tt.Error(\"Should validate with random message, err is \" + err.Error())\n\t}\n\n\t\/\/should trivially not validate with other keys\n\tdata = random.Bits(100, false, random.Stream)\n\tsig = SchnorrSign(config.CryptoSuite, random.Stream, data, priv2)\n\terr = SchnorrVerify(config.CryptoSuite, data, pub, sig)\n\n\tif err == nil {\n\t\tt.Error(\"Should not validate with wrong keys\")\n\t}\n\tdata = random.Bits(100, false, random.Stream)\n\tsig = SchnorrSign(config.CryptoSuite, random.Stream, data, priv)\n\terr = SchnorrVerify(config.CryptoSuite, data, pub2, sig)\n\n\tif err == nil {\n\t\tt.Error(\"Should not validate with wrong keys\")\n\t}\n\n}\n\nfunc TestNeffErrors(t *testing.T) {\n\n\tnClients := 2\n\tbase := config.CryptoSuite.Point().Base()\n\n\t\/\/build the client's public key array\n\tclientPks := make([]abstract.Point, nClients)\n\tclientPrivKeys := make([]abstract.Scalar, nClients)\n\tfor i := 0; i < nClients; i++ {\n\t\tpub, priv := NewKeyPair()\n\t\tclientPks[i] = pub\n\t\tclientPrivKeys[i] = priv\n\t}\n\n\t\/\/each of those call should fail\n\t_, _, _, _, err := NeffShuffle(nil, base, config.CryptoSuite, true)\n\tif err == nil {\n\t\tt.Error(\"NeffShuffle without a public key array should fail\")\n\t}\n\t_, _, _, _, err = NeffShuffle(clientPks, nil, config.CryptoSuite, true)\n\tif err == nil {\n\t\tt.Error(\"NeffShuffle without a base should fail\")\n\t}\n\t_, _, _, _, err = NeffShuffle(clientPks, base, nil, true)\n\tif err == nil {\n\t\tt.Error(\"NeffShuffle without a suite should fail\")\n\t}\n\t_, _, _, _, err = NeffShuffle(make([]abstract.Point, 0), base, config.CryptoSuite, true)\n\tif err == nil {\n\t\tt.Error(\"NeffShuffle with 0 public keys should fail\")\n\t}\n\n}\n\nfunc TestSchnorrHash(t *testing.T) {\n\n\tpub, _ := NewKeyPair()\n\tdata := random.Bits(100, false, random.Stream)\n\tsecret := hashSchnorr(config.CryptoSuite, data, pub)\n\n\tif secret == nil {\n\t\tt.Error(\"Secret should not be nil\")\n\t}\n}\n\nfunc TestNeffShuffle(t *testing.T) {\n\n\t\/\/output distribution testing.\n\tnClientsRange := []int{2, 3, 4, 5, 10}\n\trepetition := 100\n\tbase := config.CryptoSuite.Point().Base()\n\tmaxUnfairness := 30 \/\/30% of difference between the shuffle's homogeneity\n\n\tfor _, nClients := range nClientsRange {\n\t\tfmt.Println(\"Testing shuffle for \", nClients, \" clients.\")\n\n\t\t\/\/build the client's public key array\n\t\tclientPks := make([]abstract.Point, nClients)\n\t\tclientPrivKeys := make([]abstract.Scalar, nClients)\n\t\tfor i := 0; i < nClients; i++ {\n\t\t\tpub, priv := NewKeyPair()\n\t\t\tclientPks[i] = pub\n\t\t\tclientPrivKeys[i] = priv\n\t\t}\n\n\t\t\/\/shuffle\n\t\tshuffledKeys, newBase, secretCoeff, proof, err := NeffShuffle(clientPks, base, config.CryptoSuite, true)\n\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tif shuffledKeys == nil {\n\t\t\tt.Error(\"ShuffledKeys is nil\")\n\t\t}\n\t\tif newBase == nil {\n\t\t\tt.Error(\"newBase is nil\")\n\t\t}\n\t\tif secretCoeff == nil {\n\t\t\tt.Error(\"secretCoeff is nil\")\n\t\t}\n\t\tif proof == nil {\n\t\t\tt.Error(\"proof is nil\")\n\t\t}\n\n\t\t\/\/now test that the shuffled keys are indeed the old keys in the new base\n\t\ttransformedKeys := make([]abstract.Point, nClients)\n\t\tfor i := 0; i < nClients; i++ {\n\t\t\ttransformedKeys[i] = config.CryptoSuite.Point().Mul(newBase, clientPrivKeys[i])\n\t\t}\n\n\t\t\/\/for every key, check that it exists in the remaining array\n\t\tfor _, v := range transformedKeys {\n\t\t\tfound := false\n\t\t\tfor i := 0; i < nClients; i++ {\n\t\t\t\tif shuffledKeys[i].Equal(v) {\n\t\t\t\t\tfound = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !found {\n\t\t\t\tt.Error(\"Public key not found in outbound array\")\n\t\t\t}\n\t\t}\n\n\t\t\/\/test the distribution\n\t\tmappingDistrib := make([][]float64, nClients)\n\t\tfor k := 0; k < nClients; k++ {\n\t\t\tmappingDistrib[k] = make([]float64, nClients)\n\t\t}\n\t\tfmt.Print(\"Testing distribution for \", nClients, \" clients.\")\n\t\tfor i := 0; i < repetition; i++ {\n\t\t\tshuffledKeys, newBase, secretCoeff, proof, err = NeffShuffle(clientPks, base, config.CryptoSuite, true)\n\n\t\t\tmapping := make([]int, nClients)\n\t\t\ttransformedKeys := make([]abstract.Point, nClients)\n\t\t\tfor i := 0; i < nClients; i++ {\n\t\t\t\ttransformedKeys[i] = config.CryptoSuite.Point().Mul(newBase, clientPrivKeys[i])\n\t\t\t}\n\t\t\tfor k, v := range transformedKeys {\n\t\t\t\tfor i := 0; i < nClients; i++ {\n\t\t\t\t\tif shuffledKeys[i].Equal(v) {\n\t\t\t\t\t\tmapping[k] = i\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor clientID, slot := range mapping {\n\t\t\t\tmappingDistrib[clientID][slot] += float64(1)\n\t\t\t}\n\t\t}\n\t\tmaxDeviation := float64(-1)\n\t\tfor clientID, _ := range mappingDistrib {\n\t\t\tfor slot := 0; slot < nClients; slot++ {\n\t\t\t\t\/\/compute deviation\n\t\t\t\texpectedValue := float64(100) \/ float64(len(mappingDistrib[clientID]))\n\t\t\t\tmappingDistrib[clientID][slot] -= expectedValue\n\t\t\t\tif mappingDistrib[clientID][slot] < 0 {\n\t\t\t\t\tmappingDistrib[clientID][slot] = -mappingDistrib[clientID][slot]\n\t\t\t\t}\n\n\t\t\t\t\/\/store max deviation\n\t\t\t\tif mappingDistrib[clientID][slot] > maxDeviation {\n\t\t\t\t\tmaxDeviation = mappingDistrib[clientID][slot]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfmt.Printf(\"+-%d%%\\n\", int(maxDeviation))\n\t\tif int(maxDeviation) > maxUnfairness {\n\t\t\tt.Error(\"Max allowed distribution biais is \" + strconv.Itoa(maxUnfairness) + \" percent.\")\n\t\t}\n\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package nzb\n\nimport (\n \"sync\"\n)\n\n\/\/ Chunk of data to download\ntype Chunk struct {\n Groups []string\n Segment *Segment\n}\n\n\/\/ Chunks list of all chunks within NZB\ntype Chunks struct {\n c []*Chunk\n mu *sync.Mutex\n Total int\n Marker int\n}\n\nfunc (c *Chunks) addChunks(groups []string, segments []*Segment) {\n for _, s := range segments {\n cnk := &Chunk {\n Groups: groups,\n Segment: s,\n }\n\n c.c = append(c.c, cnk)\n }\n}\n\n\/\/ GetChunks limited by max\nfunc (c *Chunks) GetChunks(max int) []*Chunk {\n c.mu.Lock()\n defer c.mu.Unlock()\n\n chunks := make([]*Chunk, max)\n\n if c.Marker < len(c.c) {\n for i := 0; i < max; i++ {\n chunks[i] = c.c[c.Marker]\n c.Marker++\n }\n }\n\n return chunks\n}\n\n\/\/ GetNext get next chunk from Chunks\nfunc (c *Chunks) GetNext() *Chunk {\n c.mu.Lock()\n defer c.mu.Unlock()\n\n if c.Marker < len(c.c) {\n cnk := c.c[c.Marker]\n c.Marker++\n return cnk\n }\n\n return nil\n}\n\n\/\/ EOF\n<commit_msg>added Remove(id string) to Chunks, allows to remove previous downloaded chunk based on article id<commit_after>package nzb\n\nimport (\n \"fmt\"\n \"sync\"\n)\n\n\/\/ Chunk of data to download\ntype Chunk struct {\n Groups []string\n Segment *Segment\n}\n\n\/\/ Chunks list of all chunks within NZB\ntype Chunks struct {\n c []*Chunk\n mu *sync.Mutex\n Total int\n Marker int\n}\n\nfunc (c *Chunks) addChunks(groups []string, segments []*Segment) {\n for _, s := range segments {\n cnk := &Chunk {\n Groups: groups,\n Segment: s,\n }\n\n c.c = append(c.c, cnk)\n }\n}\n\n\/\/ GetChunks limited by max\nfunc (c *Chunks) GetChunks(max int) []*Chunk {\n c.mu.Lock()\n defer c.mu.Unlock()\n\n chunks := make([]*Chunk, max)\n\n if c.Marker < len(c.c) {\n for i := 0; i < max; i++ {\n chunks[i] = c.c[c.Marker]\n c.Marker++\n }\n }\n\n return chunks\n}\n\n\/\/ GetNext get next chunk from Chunks\nfunc (c *Chunks) GetNext() *Chunk {\n c.mu.Lock()\n defer c.mu.Unlock()\n\n if c.Marker < len(c.c) {\n cnk := c.c[c.Marker]\n c.Marker++\n return cnk\n }\n\n return nil\n}\n\n\/\/ Remove chunk from chunks based on article id\n\/\/ This allows to re-continue a download\n\/\/ and remove already downloaded chunks from the chunk list\nfunc (c *Chunks) Remove(id string) error {\n\n c.mu.Lock()\n defer c.mu.Unlock()\n\n rm := -1\n for i, cnk := range c.c {\n if cnk.Segment.ID == id {\n \/\/ Found chunk to remove\n rm = i\n }\n }\n\n \/\/ Remove element\n if rm > -1 {\n c.c = append(c.c[:rm], c.c[rm+1:]...)\n \/\/ Update Total\n c.Total = len(c.c)\n\n return nil\n }\n\n return fmt.Errorf(\"id does not exists\")\n}\n\n\/\/ EOF\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/LK4D4\/vndr\/build\"\n\t\"github.com\/LK4D4\/vndr\/godl\"\n)\n\nfunc isCDir(fis []os.FileInfo) bool {\n\tvar hFound bool\n\tfor _, fi := range fis {\n\t\text := filepath.Ext(fi.Name())\n\t\tif ext == \".cc\" || ext == \".cpp\" || ext == \".py\" {\n\t\t\treturn false\n\t\t}\n\t\tif ext == \".h\" {\n\t\t\thFound = true\n\t\t}\n\t}\n\treturn hFound\n}\n\nfunc isPBDir(fis []os.FileInfo) bool {\n\tvar pbFound bool\n\tfor _, fi := range fis {\n\t\tif fi.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\text := filepath.Ext(fi.Name())\n\t\tif ext != \".proto\" {\n\t\t\treturn false\n\t\t}\n\t\tpbFound = true\n\t}\n\treturn pbFound\n}\n\nfunc isInterestingDir(path string) bool {\n\tfis, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn isCDir(fis) || isPBDir(fis)\n}\n\nfunc isGoFile(path string) bool {\n\text := filepath.Ext(path)\n\treturn ext == \".go\" || ext == \".c\" || ext == \".h\" || ext == \".s\" || ext == \".proto\"\n}\n\nvar licenseFiles = map[string]bool{\n\t\"LICENSE\": true,\n\t\"COPYING\": true,\n\t\"PATENTS\": true,\n\t\"NOTICE\": true,\n}\n\n\/\/ cleanVendor removes files from unused pacakges and non-go files\nfunc cleanVendor(vendorDir string, realDeps []*build.Package) error {\n\trealPaths := make(map[string]bool)\n\tfor _, pkg := range realDeps {\n\t\trealPaths[pkg.Dir] = true\n\t}\n\tvar paths []string\n\terr := filepath.Walk(vendorDir, func(path string, i os.FileInfo, err error) error {\n\t\tif path == vendorDir {\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tif strings.HasPrefix(i.Name(), \".\") || strings.HasPrefix(i.Name(), \"_\") {\n\t\t\treturn os.RemoveAll(path)\n\t\t}\n\t\tif i.IsDir() {\n\t\t\tif i.Name() == \"testdata\" {\n\t\t\t\treturn os.RemoveAll(path)\n\t\t\t}\n\t\t\tif isInterestingDir(path) {\n\t\t\t\trealPaths[path] = true\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif !realPaths[path] {\n\t\t\t\tpaths = append(paths, path)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ keep files for licenses\n\t\tif licenseFiles[i.Name()] {\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ remove files from non-deps, non-go files and test files\n\t\tif !realPaths[filepath.Dir(path)] || !isGoFile(path) || strings.HasSuffix(path, \"_test.go\") {\n\t\t\treturn os.Remove(path)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tsort.Sort(sort.Reverse(sort.StringSlice(paths)))\n\t\/\/ iterate over paths (longest first)\n\tfor _, p := range paths {\n\t\t\/\/ at this point we cleaned all files from unused deps dirs\n\t\tlst, err := ioutil.ReadDir(p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ remove licenses if it's only files\n\t\tvar onlyLicenses = true\n\t\tfor _, fi := range lst {\n\t\t\tif !licenseFiles[fi.Name()] {\n\t\t\t\tonlyLicenses = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !onlyLicenses {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ remove all directories if they're not in dependency paths\n\t\tif err := os.RemoveAll(p); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc cleanVCS(v *godl.VCS) error {\n\tif err := os.RemoveAll(filepath.Join(v.Root, \".\"+v.Type)); err != nil {\n\t\treturn err\n\t}\n\treturn filepath.Walk(v.Root, func(path string, i os.FileInfo, err error) error {\n\t\tif path == vendorDir {\n\t\t\treturn nil\n\t\t}\n\t\tif !i.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tname := i.Name()\n\t\tif name == \"vendor\" || name == \"Godeps\" || name == \"_vendor\" {\n\t\t\tif err := os.RemoveAll(path); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\treturn nil\n\t})\n}\n<commit_msg>clean.go: detect more license files<commit_after>package main\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/LK4D4\/vndr\/build\"\n\t\"github.com\/LK4D4\/vndr\/godl\"\n)\n\nfunc isCDir(fis []os.FileInfo) bool {\n\tvar hFound bool\n\tfor _, fi := range fis {\n\t\text := filepath.Ext(fi.Name())\n\t\tif ext == \".cc\" || ext == \".cpp\" || ext == \".py\" {\n\t\t\treturn false\n\t\t}\n\t\tif ext == \".h\" {\n\t\t\thFound = true\n\t\t}\n\t}\n\treturn hFound\n}\n\nfunc isPBDir(fis []os.FileInfo) bool {\n\tvar pbFound bool\n\tfor _, fi := range fis {\n\t\tif fi.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\text := filepath.Ext(fi.Name())\n\t\tif ext != \".proto\" {\n\t\t\treturn false\n\t\t}\n\t\tpbFound = true\n\t}\n\treturn pbFound\n}\n\nfunc isInterestingDir(path string) bool {\n\tfis, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn isCDir(fis) || isPBDir(fis)\n}\n\nfunc isGoFile(path string) bool {\n\text := filepath.Ext(path)\n\treturn ext == \".go\" || ext == \".c\" || ext == \".h\" || ext == \".s\" || ext == \".proto\"\n}\n\n\/\/ licenseFilesRegexp is a regexp of file names that are likely to contain licensing information\nvar licenseFilesRegexp = regexp.MustCompile(`(?i).*(LICENSE|COPYING|PATENT|NOTICE|README).*`)\n\n\/\/ cleanVendor removes files from unused pacakges and non-go files\nfunc cleanVendor(vendorDir string, realDeps []*build.Package) error {\n\trealPaths := make(map[string]bool)\n\tfor _, pkg := range realDeps {\n\t\trealPaths[pkg.Dir] = true\n\t}\n\tvar paths []string\n\terr := filepath.Walk(vendorDir, func(path string, i os.FileInfo, err error) error {\n\t\tif path == vendorDir {\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tif strings.HasPrefix(i.Name(), \".\") || strings.HasPrefix(i.Name(), \"_\") {\n\t\t\treturn os.RemoveAll(path)\n\t\t}\n\t\tif i.IsDir() {\n\t\t\tif i.Name() == \"testdata\" {\n\t\t\t\treturn os.RemoveAll(path)\n\t\t\t}\n\t\t\tif isInterestingDir(path) {\n\t\t\t\trealPaths[path] = true\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif !realPaths[path] {\n\t\t\t\tpaths = append(paths, path)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ keep files for licenses\n\t\tif licenseFilesRegexp.MatchString(i.Name()) {\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ remove files from non-deps, non-go files and test files\n\t\tif !realPaths[filepath.Dir(path)] || !isGoFile(path) || strings.HasSuffix(path, \"_test.go\") {\n\t\t\treturn os.Remove(path)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tsort.Sort(sort.Reverse(sort.StringSlice(paths)))\n\t\/\/ iterate over paths (longest first)\n\tfor _, p := range paths {\n\t\t\/\/ at this point we cleaned all files from unused deps dirs\n\t\tlst, err := ioutil.ReadDir(p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ remove licenses if it's only files\n\t\tvar onlyLicenses = true\n\t\tfor _, fi := range lst {\n\t\t\tif !licenseFilesRegexp.MatchString(fi.Name()) {\n\t\t\t\tonlyLicenses = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !onlyLicenses {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ remove all directories if they're not in dependency paths\n\t\tif err := os.RemoveAll(p); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc cleanVCS(v *godl.VCS) error {\n\tif err := os.RemoveAll(filepath.Join(v.Root, \".\"+v.Type)); err != nil {\n\t\treturn err\n\t}\n\treturn filepath.Walk(v.Root, func(path string, i os.FileInfo, err error) error {\n\t\tif path == vendorDir {\n\t\t\treturn nil\n\t\t}\n\t\tif !i.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tname := i.Name()\n\t\tif name == \"vendor\" || name == \"Godeps\" || name == \"_vendor\" {\n\t\t\tif err := os.RemoveAll(path); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\treturn nil\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build integration\n\n\/*\nCopyright 2020 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage integration\n\nimport (\n\t\"context\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestPause(t *testing.T) {\n\tMaybeParallel(t)\n\n\ttype validateFunc func(context.Context, *testing.T, string)\n\tprofile := UniqueProfileName(\"pause\")\n\tctx, cancel := context.WithTimeout(context.Background(), Minutes(30))\n\tdefer CleanupWithLogs(t, profile, cancel)\n\n\t\/\/ Serial tests\n\tt.Run(\"serial\", func(t *testing.T) {\n\t\ttests := []struct {\n\t\t\tname string\n\t\t\tvalidator validateFunc\n\t\t}{\n\t\t\t{\"Start\", validateFreshStart},\n\t\t\t{\"SecondStartNoReset\", validateStartNoReset},\n\t\t\t{\"Pause\", validatePause},\n\t\t\t{\"Unpause\", validateUnpause},\n\t\t\t{\"PauseAgain\", validatePause},\n\t\t\t{\"DeletePaused\", validateDelete},\n\t\t}\n\t\tfor _, tc := range tests {\n\t\t\ttc := tc\n\t\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t\ttc.validator(ctx, t, profile)\n\t\t\t})\n\t\t}\n\t})\n}\n\nfunc validateFreshStart(ctx context.Context, t *testing.T, profile string) {\n\targs := append([]string{\"start\", \"-p\", profile, \"--memory=1800\", \"--install-addons=false\", \"--wait=false\"}, StartArgs()...)\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to start minikube with args: %q : %v\", rr.Command(), err)\n\t}\n}\n\n\/\/ validateStartNoReset validates that starting a running cluster won't invoke a reset\nfunc validateStartNoReset(ctx context.Context, t *testing.T, profile string) {\n\targs := []string{\"start\", \"-p\", profile, \"--alsologtostderr\", \"-v=5\"}\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to second start a running minikube with args: %q : %v\", rr.Command(), err)\n\t}\n\tif !NoneDriver() {\n\t\tsoftLog := \"The running cluster does not need a reset\"\n\t\tif !strings.Contains(rr.Output(), softLog) {\n\t\t\tt.Errorf(\"expected the second start log outputs to include %q but got: %s\", softLog, rr.Output())\n\t\t}\n\t}\n\n}\n\nfunc validatePause(ctx context.Context, t *testing.T, profile string) {\n\targs := []string{\"pause\", \"-p\", profile, \"--alsologtostderr\", \"-v=5\"}\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Errorf(\"failed to pause minikube with args: %q : %v\", rr.Command(), err)\n\t}\n}\n\nfunc validateUnpause(ctx context.Context, t *testing.T, profile string) {\n\targs := []string{\"unpause\", \"-p\", profile, \"--alsologtostderr\", \"-v=5\"}\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Errorf(\"failed to unpause minikube with args: %q : %v\", rr.Command(), err)\n\t}\n}\n\nfunc validateDelete(ctx context.Context, t *testing.T, profile string) {\n\t\/\/ vervose logging because this might go wrong, if container get stuck\n\targs := []string{\"delete\", \"-p\", profile, \"--alsologtostderr\", \"-v=5\"}\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Errorf(\"failed to delete minikube with args: %q : %v\", rr.Command(), err)\n\t}\n}\n<commit_msg>wait true<commit_after>\/\/ +build integration\n\n\/*\nCopyright 2020 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage integration\n\nimport (\n\t\"context\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestPause(t *testing.T) {\n\tMaybeParallel(t)\n\n\ttype validateFunc func(context.Context, *testing.T, string)\n\tprofile := UniqueProfileName(\"pause\")\n\tctx, cancel := context.WithTimeout(context.Background(), Minutes(30))\n\tdefer CleanupWithLogs(t, profile, cancel)\n\n\t\/\/ Serial tests\n\tt.Run(\"serial\", func(t *testing.T) {\n\t\ttests := []struct {\n\t\t\tname string\n\t\t\tvalidator validateFunc\n\t\t}{\n\t\t\t{\"Start\", validateFreshStart},\n\t\t\t{\"SecondStartNoReset\", validateStartNoReset},\n\t\t\t{\"Pause\", validatePause},\n\t\t\t{\"Unpause\", validateUnpause},\n\t\t\t{\"PauseAgain\", validatePause},\n\t\t\t{\"DeletePaused\", validateDelete},\n\t\t}\n\t\tfor _, tc := range tests {\n\t\t\ttc := tc\n\t\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t\ttc.validator(ctx, t, profile)\n\t\t\t})\n\t\t}\n\t})\n}\n\nfunc validateFreshStart(ctx context.Context, t *testing.T, profile string) {\n\targs := append([]string{\"start\", \"-p\", profile, \"--memory=1800\", \"--install-addons=false\", \"--wait=true\"}, StartArgs()...)\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to start minikube with args: %q : %v\", rr.Command(), err)\n\t}\n}\n\n\/\/ validateStartNoReset validates that starting a running cluster won't invoke a reset\nfunc validateStartNoReset(ctx context.Context, t *testing.T, profile string) {\n\targs := []string{\"start\", \"-p\", profile, \"--alsologtostderr\", \"-v=5\"}\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to second start a running minikube with args: %q : %v\", rr.Command(), err)\n\t}\n\tif !NoneDriver() {\n\t\tsoftLog := \"The running cluster does not need a reset\"\n\t\tif !strings.Contains(rr.Output(), softLog) {\n\t\t\tt.Errorf(\"expected the second start log outputs to include %q but got: %s\", softLog, rr.Output())\n\t\t}\n\t}\n\n}\n\nfunc validatePause(ctx context.Context, t *testing.T, profile string) {\n\targs := []string{\"pause\", \"-p\", profile, \"--alsologtostderr\", \"-v=5\"}\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Errorf(\"failed to pause minikube with args: %q : %v\", rr.Command(), err)\n\t}\n}\n\nfunc validateUnpause(ctx context.Context, t *testing.T, profile string) {\n\targs := []string{\"unpause\", \"-p\", profile, \"--alsologtostderr\", \"-v=5\"}\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Errorf(\"failed to unpause minikube with args: %q : %v\", rr.Command(), err)\n\t}\n}\n\nfunc validateDelete(ctx context.Context, t *testing.T, profile string) {\n\t\/\/ vervose logging because this might go wrong, if container get stuck\n\targs := []string{\"delete\", \"-p\", profile, \"--alsologtostderr\", \"-v=5\"}\n\trr, err := Run(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Errorf(\"failed to delete minikube with args: %q : %v\", rr.Command(), err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Clock counts down to or up from a target time.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n)\n\nfunc main() {\n\ttarget := time.Date(2018, 12, 14, 21, 16, 0, 0, time.Local)\n\tmotto := \"Just Go\"\n\tprintTargetTime(target, motto)\n\texitOnEnterKey()\n\n\tvar previous time.Time\n\tfor {\n\t\tnow := time.Now().Truncate(time.Second)\n\t\tif now != previous {\n\t\t\tprevious = now\n\t\t\tcountdown := now.Sub(target) \/\/ Negative times are before the target\n\t\t\tprintCountdown(now, countdown)\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n}\n\nfunc exitOnEnterKey() {\n\tgo func() {\n\t\tbuf := make([]byte, 1)\n\t\t_, _ = os.Stdin.Read(buf)\n\t\tos.Exit(0)\n\t}()\n}\n\nconst (\n\thighlightStart = \"\\x1b[1;35m\"\n\thighlightEnd = \"\\x1b[0m\"\n\tindent = \"\\t\"\n)\n\nfunc printTargetTime(target time.Time, motto string) {\n\tfmt.Print(indent, highlightStart, motto, highlightEnd, \"\\n\")\n\tfmt.Print(indent, target.Format(time.UnixDate), \"\\n\")\n}\n\nfunc printCountdown(now time.Time, countdown time.Duration) {\n\tvar sign string\n\tif countdown >= 0 {\n\t\tsign = \"+\"\n\t} else {\n\t\tsign = \"-\"\n\t\tcountdown = -countdown\n\t}\n\n\tdays := int(countdown \/ (24 * time.Hour))\n\tcountdown = countdown % (24 * time.Hour)\n\n\tfmt.Print(indent, now.Format(time.UnixDate), \" \", sign)\n\tif days > 0 {\n\t\tfmt.Print(days, \"d\")\n\t}\n\tfmt.Print(countdown, \" \\r\")\n\tos.Stdout.Sync()\n}\n<commit_msg>Solstice<commit_after>\/\/ Clock counts down to or up from a target time.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n)\n\nfunc main() {\n\ttarget := time.Date(2018, 12, 21, 17, 22, 41, 0, time.Local)\n\tmotto := \"Just Go\"\n\tprintTargetTime(target, motto)\n\texitOnEnterKey()\n\n\tvar previous time.Time\n\tfor {\n\t\tnow := time.Now().Truncate(time.Second)\n\t\tif now != previous {\n\t\t\tprevious = now\n\t\t\tcountdown := now.Sub(target) \/\/ Negative times are before the target\n\t\t\tprintCountdown(now, countdown)\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n}\n\nfunc exitOnEnterKey() {\n\tgo func() {\n\t\tbuf := make([]byte, 1)\n\t\t_, _ = os.Stdin.Read(buf)\n\t\tos.Exit(0)\n\t}()\n}\n\nconst (\n\thighlightStart = \"\\x1b[1;35m\"\n\thighlightEnd = \"\\x1b[0m\"\n\tindent = \"\\t\"\n)\n\nfunc printTargetTime(target time.Time, motto string) {\n\tfmt.Print(indent, highlightStart, motto, highlightEnd, \"\\n\")\n\tfmt.Print(indent, target.Format(time.UnixDate), \"\\n\")\n}\n\nfunc printCountdown(now time.Time, countdown time.Duration) {\n\tvar sign string\n\tif countdown >= 0 {\n\t\tsign = \"+\"\n\t} else {\n\t\tsign = \"-\"\n\t\tcountdown = -countdown\n\t}\n\n\tdays := int(countdown \/ (24 * time.Hour))\n\tcountdown = countdown % (24 * time.Hour)\n\n\tfmt.Print(indent, now.Format(time.UnixDate), \" \", sign)\n\tif days > 0 {\n\t\tfmt.Print(days, \"d\")\n\t}\n\tfmt.Print(countdown, \" \\r\")\n\tos.Stdout.Sync()\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cloudprovider\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/client-go\/informers\"\n\tclientset \"k8s.io\/client-go\/kubernetes\"\n\trestclient \"k8s.io\/client-go\/rest\"\n)\n\n\/\/ ControllerClientBuilder allows you to get clients and configs for controllers\n\/\/ Please note a copy also exists in pkg\/controller\/client_builder.go\n\/\/ TODO: Make this depend on the separate controller utilities repo (issues\/68947)\ntype ControllerClientBuilder interface {\n\tConfig(name string) (*restclient.Config, error)\n\tConfigOrDie(name string) *restclient.Config\n\tClient(name string) (clientset.Interface, error)\n\tClientOrDie(name string) clientset.Interface\n}\n\n\/\/ Interface is an abstract, pluggable interface for cloud providers.\ntype Interface interface {\n\t\/\/ Initialize provides the cloud with a kubernetes client builder and may spawn goroutines\n\t\/\/ to perform housekeeping or run custom controllers specific to the cloud provider.\n\t\/\/ Any tasks started here should be cleaned up when the stop channel closes.\n\tInitialize(clientBuilder ControllerClientBuilder, stop <-chan struct{})\n\t\/\/ LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.\n\tLoadBalancer() (LoadBalancer, bool)\n\t\/\/ Instances returns an instances interface. Also returns true if the interface is supported, false otherwise.\n\tInstances() (Instances, bool)\n\t\/\/ Zones returns a zones interface. Also returns true if the interface is supported, false otherwise.\n\tZones() (Zones, bool)\n\t\/\/ Clusters returns a clusters interface. Also returns true if the interface is supported, false otherwise.\n\tClusters() (Clusters, bool)\n\t\/\/ Routes returns a routes interface along with whether the interface is supported.\n\tRoutes() (Routes, bool)\n\t\/\/ ProviderName returns the cloud provider ID.\n\tProviderName() string\n\t\/\/ HasClusterID returns true if a ClusterID is required and set\n\tHasClusterID() bool\n}\n\ntype InformerUser interface {\n\t\/\/ SetInformers sets the informer on the cloud object.\n\tSetInformers(informerFactory informers.SharedInformerFactory)\n}\n\n\/\/ Clusters is an abstract, pluggable interface for clusters of containers.\ntype Clusters interface {\n\t\/\/ ListClusters lists the names of the available clusters.\n\tListClusters(ctx context.Context) ([]string, error)\n\t\/\/ Master gets back the address (either DNS name or IP address) of the master node for the cluster.\n\tMaster(ctx context.Context, clusterName string) (string, error)\n}\n\n\/\/ (DEPRECATED) DefaultLoadBalancerName is the default load balancer name that is called from\n\/\/ LoadBalancer.GetLoadBalancerName. Use this method to maintain backward compatible names for\n\/\/ LoadBalancers that were created prior to Kubernetes v1.12. In the future, each provider should\n\/\/ replace this method call in GetLoadBalancerName with a provider-specific implementation that\n\/\/ is less cryptic than the Service's UUID.\nfunc DefaultLoadBalancerName(service *v1.Service) string {\n\t\/\/GCE requires that the name of a load balancer starts with a lower case letter.\n\tret := \"a\" + string(service.UID)\n\tret = strings.Replace(ret, \"-\", \"\", -1)\n\t\/\/AWS requires that the name of a load balancer is shorter than 32 bytes.\n\tif len(ret) > 32 {\n\t\tret = ret[:32]\n\t}\n\treturn ret\n}\n\n\/\/ GetInstanceProviderID builds a ProviderID for a node in a cloud.\nfunc GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types.NodeName) (string, error) {\n\tinstances, ok := cloud.Instances()\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"failed to get instances from cloud provider\")\n\t}\n\tinstanceID, err := instances.InstanceID(ctx, nodeName)\n\tif err != nil {\n\t\tif err == NotImplemented {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn \"\", fmt.Errorf(\"failed to get instance ID from cloud provider: %v\", err)\n\t}\n\treturn cloud.ProviderName() + \":\/\/\" + instanceID, nil\n}\n\n\/\/ LoadBalancer is an abstract, pluggable interface for load balancers.\n\/\/\n\/\/ Cloud provider may chose to implement the logic for\n\/\/ constructing\/destroying specific kinds of load balancers in a\n\/\/ controller separate from the ServiceController. If this is the case,\n\/\/ then {Ensure,Update}LoadBalancer must return the ImplementedElsewhere error.\n\/\/ For the given LB service, the GetLoadBalancer must return \"exists=True\" if\n\/\/ there exists a LoadBalancer instance created by ServiceController.\n\/\/ In all other cases, GetLoadBalancer must return a NotFound error.\n\/\/ EnsureLoadBalancerDeleted must not return ImplementedElsewhere to ensure\n\/\/ proper teardown of resources that were allocated by the ServiceController.\n\/\/ This can happen if a user changes the type of LB via an update to the resource\n\/\/ or when migrating from ServiceController to alternate implementation.\n\/\/ The finalizer on the service will be added and removed by ServiceController\n\/\/ irrespective of the ImplementedElsewhere error. Additional finalizers for\n\/\/ LB services must be managed in the alternate implementation.\ntype LoadBalancer interface {\n\t\/\/ TODO: Break this up into different interfaces (LB, etc) when we have more than one type of service\n\t\/\/ GetLoadBalancer returns whether the specified load balancer exists, and\n\t\/\/ if so, what its status is.\n\t\/\/ Implementations must treat the *v1.Service parameter as read-only and not modify it.\n\t\/\/ Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager\n\tGetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error)\n\t\/\/ GetLoadBalancerName returns the name of the load balancer. Implementations must treat the\n\t\/\/ *v1.Service parameter as read-only and not modify it.\n\tGetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string\n\t\/\/ EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer\n\t\/\/ Implementations must treat the *v1.Service and *v1.Node\n\t\/\/ parameters as read-only and not modify them.\n\t\/\/ Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager\n\tEnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error)\n\t\/\/ UpdateLoadBalancer updates hosts under the specified load balancer.\n\t\/\/ Implementations must treat the *v1.Service and *v1.Node\n\t\/\/ parameters as read-only and not modify them.\n\t\/\/ Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager\n\tUpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error\n\t\/\/ EnsureLoadBalancerDeleted deletes the specified load balancer if it\n\t\/\/ exists, returning nil if the load balancer specified either didn't exist or\n\t\/\/ was successfully deleted.\n\t\/\/ This construction is useful because many cloud providers' load balancers\n\t\/\/ have multiple underlying components, meaning a Get could say that the LB\n\t\/\/ doesn't exist even if some part of it is still laying around.\n\t\/\/ Implementations must treat the *v1.Service parameter as read-only and not modify it.\n\t\/\/ Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager\n\tEnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error\n}\n\n\/\/ Instances is an abstract, pluggable interface for sets of instances.\ntype Instances interface {\n\t\/\/ NodeAddresses returns the addresses of the specified instance.\n\tNodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error)\n\t\/\/ NodeAddressesByProviderID returns the addresses of the specified instance.\n\t\/\/ The instance is specified using the providerID of the node. The\n\t\/\/ ProviderID is a unique identifier of the node. This will not be called\n\t\/\/ from the node whose nodeaddresses are being queried. i.e. local metadata\n\t\/\/ services cannot be used in this method to obtain nodeaddresses\n\t\/\/ Deprecated: Remove once all calls are migrated to InstanceMetadataByProviderID\n\tNodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error)\n\t\/\/ InstanceID returns the cloud provider ID of the node with the specified NodeName.\n\t\/\/ Note that if the instance does not exist, we must return (\"\", cloudprovider.InstanceNotFound)\n\t\/\/ cloudprovider.InstanceNotFound should NOT be returned for instances that exist but are stopped\/sleeping\n\tInstanceID(ctx context.Context, nodeName types.NodeName) (string, error)\n\t\/\/ InstanceType returns the type of the specified instance.\n\tInstanceType(ctx context.Context, name types.NodeName) (string, error)\n\t\/\/ InstanceTypeByProviderID returns the type of the specified instance.\n\t\/\/ Deprecated: Remove once all calls are migrated to InstanceMetadataByProviderID\n\tInstanceTypeByProviderID(ctx context.Context, providerID string) (string, error)\n\t\/\/ AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances\n\t\/\/ expected format for the key is standard ssh-keygen format: <protocol> <blob>\n\tAddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error\n\t\/\/ CurrentNodeName returns the name of the node we are currently running on\n\t\/\/ On most clouds (e.g. GCE) this is the hostname, so we provide the hostname\n\tCurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error)\n\t\/\/ InstanceExistsByProviderID returns true if the instance for the given provider exists.\n\t\/\/ If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.\n\t\/\/ This method should still return true for instances that exist but are stopped\/sleeping.\n\t\/\/ Deprecated: Remove once all calls are migrated to InstanceMetadataByProviderID\n\tInstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error)\n\t\/\/ InstanceShutdownByProviderID returns true if the instance is shutdown in cloudprovider\n\tInstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error)\n\t\/\/ InstanceMetadataByProviderID returns the instance's metadata.\n\tInstanceMetadataByProviderID(ctx context.Context, providerID string) (*InstanceMetadata, error)\n}\n\n\/\/ Route is a representation of an advanced routing rule.\ntype Route struct {\n\t\/\/ Name is the name of the routing rule in the cloud-provider.\n\t\/\/ It will be ignored in a Create (although nameHint may influence it)\n\tName string\n\t\/\/ TargetNode is the NodeName of the target instance.\n\tTargetNode types.NodeName\n\t\/\/ DestinationCIDR is the CIDR format IP range that this routing rule\n\t\/\/ applies to.\n\tDestinationCIDR string\n\t\/\/ Blackhole is set to true if this is a blackhole route\n\t\/\/ The node controller will delete the route if it is in the managed range.\n\tBlackhole bool\n}\n\n\/\/ Routes is an abstract, pluggable interface for advanced routing rules.\ntype Routes interface {\n\t\/\/ ListRoutes lists all managed routes that belong to the specified clusterName\n\tListRoutes(ctx context.Context, clusterName string) ([]*Route, error)\n\t\/\/ CreateRoute creates the described managed route\n\t\/\/ route.Name will be ignored, although the cloud-provider may use nameHint\n\t\/\/ to create a more user-meaningful name.\n\tCreateRoute(ctx context.Context, clusterName string, nameHint string, route *Route) error\n\t\/\/ DeleteRoute deletes the specified managed route\n\t\/\/ Route should be as returned by ListRoutes\n\tDeleteRoute(ctx context.Context, clusterName string, route *Route) error\n}\n\nvar (\n\tDiskNotFound = errors.New(\"disk is not found\")\n\tImplementedElsewhere = errors.New(\"implemented by alternate to cloud provider\")\n\tInstanceNotFound = errors.New(\"instance not found\")\n\tNotImplemented = errors.New(\"unimplemented\")\n)\n\n\/\/ Zone represents the location of a particular machine.\ntype Zone struct {\n\tFailureDomain string\n\tRegion string\n}\n\n\/\/ Zones is an abstract, pluggable interface for zone enumeration.\ntype Zones interface {\n\t\/\/ GetZone returns the Zone containing the current failure zone and locality region that the program is running in\n\t\/\/ In most cases, this method is called from the kubelet querying a local metadata service to acquire its zone.\n\t\/\/ For the case of external cloud providers, use GetZoneByProviderID or GetZoneByNodeName since GetZone\n\t\/\/ can no longer be called from the kubelets.\n\tGetZone(ctx context.Context) (Zone, error)\n\n\t\/\/ GetZoneByProviderID returns the Zone containing the current zone and locality region of the node specified by providerID\n\t\/\/ This method is particularly used in the context of external cloud providers where node initialization must be done\n\t\/\/ outside the kubelets.\n\tGetZoneByProviderID(ctx context.Context, providerID string) (Zone, error)\n\n\t\/\/ GetZoneByNodeName returns the Zone containing the current zone and locality region of the node specified by node name\n\t\/\/ This method is particularly used in the context of external cloud providers where node initialization must be done\n\t\/\/ outside the kubelets.\n\tGetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (Zone, error)\n}\n\n\/\/ PVLabeler is an abstract, pluggable interface for fetching labels for volumes\ntype PVLabeler interface {\n\tGetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error)\n}\n\n\/\/ InstanceMetadata contains metadata about the specific instance.\ntype InstanceMetadata struct {\n\t\/\/ ProviderID is provider's id that instance belongs to.\n\tProviderID string\n\t\/\/ Type is instance's type.\n\tType string\n\t\/\/ NodeAddress contains information for the instance's address.\n\tNodeAddresses []v1.NodeAddress\n}\n<commit_msg>revert InstanceMetadataByProviderID definition and deprecation of related instance functions<commit_after>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cloudprovider\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/client-go\/informers\"\n\tclientset \"k8s.io\/client-go\/kubernetes\"\n\trestclient \"k8s.io\/client-go\/rest\"\n)\n\n\/\/ ControllerClientBuilder allows you to get clients and configs for controllers\n\/\/ Please note a copy also exists in pkg\/controller\/client_builder.go\n\/\/ TODO: Make this depend on the separate controller utilities repo (issues\/68947)\ntype ControllerClientBuilder interface {\n\tConfig(name string) (*restclient.Config, error)\n\tConfigOrDie(name string) *restclient.Config\n\tClient(name string) (clientset.Interface, error)\n\tClientOrDie(name string) clientset.Interface\n}\n\n\/\/ Interface is an abstract, pluggable interface for cloud providers.\ntype Interface interface {\n\t\/\/ Initialize provides the cloud with a kubernetes client builder and may spawn goroutines\n\t\/\/ to perform housekeeping or run custom controllers specific to the cloud provider.\n\t\/\/ Any tasks started here should be cleaned up when the stop channel closes.\n\tInitialize(clientBuilder ControllerClientBuilder, stop <-chan struct{})\n\t\/\/ LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.\n\tLoadBalancer() (LoadBalancer, bool)\n\t\/\/ Instances returns an instances interface. Also returns true if the interface is supported, false otherwise.\n\tInstances() (Instances, bool)\n\t\/\/ Zones returns a zones interface. Also returns true if the interface is supported, false otherwise.\n\tZones() (Zones, bool)\n\t\/\/ Clusters returns a clusters interface. Also returns true if the interface is supported, false otherwise.\n\tClusters() (Clusters, bool)\n\t\/\/ Routes returns a routes interface along with whether the interface is supported.\n\tRoutes() (Routes, bool)\n\t\/\/ ProviderName returns the cloud provider ID.\n\tProviderName() string\n\t\/\/ HasClusterID returns true if a ClusterID is required and set\n\tHasClusterID() bool\n}\n\ntype InformerUser interface {\n\t\/\/ SetInformers sets the informer on the cloud object.\n\tSetInformers(informerFactory informers.SharedInformerFactory)\n}\n\n\/\/ Clusters is an abstract, pluggable interface for clusters of containers.\ntype Clusters interface {\n\t\/\/ ListClusters lists the names of the available clusters.\n\tListClusters(ctx context.Context) ([]string, error)\n\t\/\/ Master gets back the address (either DNS name or IP address) of the master node for the cluster.\n\tMaster(ctx context.Context, clusterName string) (string, error)\n}\n\n\/\/ (DEPRECATED) DefaultLoadBalancerName is the default load balancer name that is called from\n\/\/ LoadBalancer.GetLoadBalancerName. Use this method to maintain backward compatible names for\n\/\/ LoadBalancers that were created prior to Kubernetes v1.12. In the future, each provider should\n\/\/ replace this method call in GetLoadBalancerName with a provider-specific implementation that\n\/\/ is less cryptic than the Service's UUID.\nfunc DefaultLoadBalancerName(service *v1.Service) string {\n\t\/\/GCE requires that the name of a load balancer starts with a lower case letter.\n\tret := \"a\" + string(service.UID)\n\tret = strings.Replace(ret, \"-\", \"\", -1)\n\t\/\/AWS requires that the name of a load balancer is shorter than 32 bytes.\n\tif len(ret) > 32 {\n\t\tret = ret[:32]\n\t}\n\treturn ret\n}\n\n\/\/ GetInstanceProviderID builds a ProviderID for a node in a cloud.\nfunc GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types.NodeName) (string, error) {\n\tinstances, ok := cloud.Instances()\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"failed to get instances from cloud provider\")\n\t}\n\tinstanceID, err := instances.InstanceID(ctx, nodeName)\n\tif err != nil {\n\t\tif err == NotImplemented {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn \"\", fmt.Errorf(\"failed to get instance ID from cloud provider: %v\", err)\n\t}\n\treturn cloud.ProviderName() + \":\/\/\" + instanceID, nil\n}\n\n\/\/ LoadBalancer is an abstract, pluggable interface for load balancers.\n\/\/\n\/\/ Cloud provider may chose to implement the logic for\n\/\/ constructing\/destroying specific kinds of load balancers in a\n\/\/ controller separate from the ServiceController. If this is the case,\n\/\/ then {Ensure,Update}LoadBalancer must return the ImplementedElsewhere error.\n\/\/ For the given LB service, the GetLoadBalancer must return \"exists=True\" if\n\/\/ there exists a LoadBalancer instance created by ServiceController.\n\/\/ In all other cases, GetLoadBalancer must return a NotFound error.\n\/\/ EnsureLoadBalancerDeleted must not return ImplementedElsewhere to ensure\n\/\/ proper teardown of resources that were allocated by the ServiceController.\n\/\/ This can happen if a user changes the type of LB via an update to the resource\n\/\/ or when migrating from ServiceController to alternate implementation.\n\/\/ The finalizer on the service will be added and removed by ServiceController\n\/\/ irrespective of the ImplementedElsewhere error. Additional finalizers for\n\/\/ LB services must be managed in the alternate implementation.\ntype LoadBalancer interface {\n\t\/\/ TODO: Break this up into different interfaces (LB, etc) when we have more than one type of service\n\t\/\/ GetLoadBalancer returns whether the specified load balancer exists, and\n\t\/\/ if so, what its status is.\n\t\/\/ Implementations must treat the *v1.Service parameter as read-only and not modify it.\n\t\/\/ Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager\n\tGetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error)\n\t\/\/ GetLoadBalancerName returns the name of the load balancer. Implementations must treat the\n\t\/\/ *v1.Service parameter as read-only and not modify it.\n\tGetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string\n\t\/\/ EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer\n\t\/\/ Implementations must treat the *v1.Service and *v1.Node\n\t\/\/ parameters as read-only and not modify them.\n\t\/\/ Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager\n\tEnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error)\n\t\/\/ UpdateLoadBalancer updates hosts under the specified load balancer.\n\t\/\/ Implementations must treat the *v1.Service and *v1.Node\n\t\/\/ parameters as read-only and not modify them.\n\t\/\/ Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager\n\tUpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error\n\t\/\/ EnsureLoadBalancerDeleted deletes the specified load balancer if it\n\t\/\/ exists, returning nil if the load balancer specified either didn't exist or\n\t\/\/ was successfully deleted.\n\t\/\/ This construction is useful because many cloud providers' load balancers\n\t\/\/ have multiple underlying components, meaning a Get could say that the LB\n\t\/\/ doesn't exist even if some part of it is still laying around.\n\t\/\/ Implementations must treat the *v1.Service parameter as read-only and not modify it.\n\t\/\/ Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager\n\tEnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error\n}\n\n\/\/ Instances is an abstract, pluggable interface for sets of instances.\ntype Instances interface {\n\t\/\/ NodeAddresses returns the addresses of the specified instance.\n\tNodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error)\n\t\/\/ NodeAddressesByProviderID returns the addresses of the specified instance.\n\t\/\/ The instance is specified using the providerID of the node. The\n\t\/\/ ProviderID is a unique identifier of the node. This will not be called\n\t\/\/ from the node whose nodeaddresses are being queried. i.e. local metadata\n\t\/\/ services cannot be used in this method to obtain nodeaddresses\n\tNodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error)\n\t\/\/ InstanceID returns the cloud provider ID of the node with the specified NodeName.\n\t\/\/ Note that if the instance does not exist, we must return (\"\", cloudprovider.InstanceNotFound)\n\t\/\/ cloudprovider.InstanceNotFound should NOT be returned for instances that exist but are stopped\/sleeping\n\tInstanceID(ctx context.Context, nodeName types.NodeName) (string, error)\n\t\/\/ InstanceType returns the type of the specified instance.\n\tInstanceType(ctx context.Context, name types.NodeName) (string, error)\n\t\/\/ InstanceTypeByProviderID returns the type of the specified instance.\n\tInstanceTypeByProviderID(ctx context.Context, providerID string) (string, error)\n\t\/\/ AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances\n\t\/\/ expected format for the key is standard ssh-keygen format: <protocol> <blob>\n\tAddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error\n\t\/\/ CurrentNodeName returns the name of the node we are currently running on\n\t\/\/ On most clouds (e.g. GCE) this is the hostname, so we provide the hostname\n\tCurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error)\n\t\/\/ InstanceExistsByProviderID returns true if the instance for the given provider exists.\n\t\/\/ If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.\n\t\/\/ This method should still return true for instances that exist but are stopped\/sleeping.\n\tInstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error)\n\t\/\/ InstanceShutdownByProviderID returns true if the instance is shutdown in cloudprovider\n\tInstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error)\n}\n\n\/\/ Route is a representation of an advanced routing rule.\ntype Route struct {\n\t\/\/ Name is the name of the routing rule in the cloud-provider.\n\t\/\/ It will be ignored in a Create (although nameHint may influence it)\n\tName string\n\t\/\/ TargetNode is the NodeName of the target instance.\n\tTargetNode types.NodeName\n\t\/\/ DestinationCIDR is the CIDR format IP range that this routing rule\n\t\/\/ applies to.\n\tDestinationCIDR string\n\t\/\/ Blackhole is set to true if this is a blackhole route\n\t\/\/ The node controller will delete the route if it is in the managed range.\n\tBlackhole bool\n}\n\n\/\/ Routes is an abstract, pluggable interface for advanced routing rules.\ntype Routes interface {\n\t\/\/ ListRoutes lists all managed routes that belong to the specified clusterName\n\tListRoutes(ctx context.Context, clusterName string) ([]*Route, error)\n\t\/\/ CreateRoute creates the described managed route\n\t\/\/ route.Name will be ignored, although the cloud-provider may use nameHint\n\t\/\/ to create a more user-meaningful name.\n\tCreateRoute(ctx context.Context, clusterName string, nameHint string, route *Route) error\n\t\/\/ DeleteRoute deletes the specified managed route\n\t\/\/ Route should be as returned by ListRoutes\n\tDeleteRoute(ctx context.Context, clusterName string, route *Route) error\n}\n\nvar (\n\tDiskNotFound = errors.New(\"disk is not found\")\n\tImplementedElsewhere = errors.New(\"implemented by alternate to cloud provider\")\n\tInstanceNotFound = errors.New(\"instance not found\")\n\tNotImplemented = errors.New(\"unimplemented\")\n)\n\n\/\/ Zone represents the location of a particular machine.\ntype Zone struct {\n\tFailureDomain string\n\tRegion string\n}\n\n\/\/ Zones is an abstract, pluggable interface for zone enumeration.\ntype Zones interface {\n\t\/\/ GetZone returns the Zone containing the current failure zone and locality region that the program is running in\n\t\/\/ In most cases, this method is called from the kubelet querying a local metadata service to acquire its zone.\n\t\/\/ For the case of external cloud providers, use GetZoneByProviderID or GetZoneByNodeName since GetZone\n\t\/\/ can no longer be called from the kubelets.\n\tGetZone(ctx context.Context) (Zone, error)\n\n\t\/\/ GetZoneByProviderID returns the Zone containing the current zone and locality region of the node specified by providerID\n\t\/\/ This method is particularly used in the context of external cloud providers where node initialization must be done\n\t\/\/ outside the kubelets.\n\tGetZoneByProviderID(ctx context.Context, providerID string) (Zone, error)\n\n\t\/\/ GetZoneByNodeName returns the Zone containing the current zone and locality region of the node specified by node name\n\t\/\/ This method is particularly used in the context of external cloud providers where node initialization must be done\n\t\/\/ outside the kubelets.\n\tGetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (Zone, error)\n}\n\n\/\/ PVLabeler is an abstract, pluggable interface for fetching labels for volumes\ntype PVLabeler interface {\n\tGetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error)\n}\n\n\/\/ InstanceMetadata contains metadata about the specific instance.\ntype InstanceMetadata struct {\n\t\/\/ ProviderID is provider's id that instance belongs to.\n\tProviderID string\n\t\/\/ Type is instance's type.\n\tType string\n\t\/\/ NodeAddress contains information for the instance's address.\n\tNodeAddresses []v1.NodeAddress\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"context\"\n\t\"encoding\/csv\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/golang\/protobuf\/jsonpb\"\n\n\t\"google.golang.org\/grpc\"\n\tqspb \"knative.dev\/pkg\/third_party\/mako\/proto\/quickstore_go_proto\"\n)\n\nconst (\n\tport = \":9813\"\n)\n\ntype server struct {\n\tstopOnce sync.Once\n\tstopCh chan struct{}\n}\n\nfunc (s *server) Store(ctx context.Context, in *qspb.StoreInput) (*qspb.StoreOutput, error) {\n\tm := jsonpb.Marshaler{}\n\tqi, _ := m.MarshalToString(in.GetQuickstoreInput())\n\tfmt.Printf(\"# %s\\n\", qi)\n\tcols := []string{\"inputValue\", \"errorMessage\"}\n\twriter := csv.NewWriter(os.Stdout)\n\n\tfor _, sp := range in.GetSamplePoints() {\n\t\tfor _, mv := range sp.GetMetricValueList() {\n\t\t\tcols = updateCols(cols, mv.GetValueKey())\n\t\t\tvals := map[string]string{\"inputValue\": fmt.Sprintf(\"%f\", sp.GetInputValue())}\n\t\t\tvals[mv.GetValueKey()] = fmt.Sprintf(\"%f\", mv.GetValue())\n\t\t\twriter.Write(makeRow(cols, vals))\n\t\t}\n\t}\n\n\tfor _, ra := range in.GetRunAggregates() {\n\t\tcols = updateCols(cols, ra.GetValueKey())\n\t\tvals := map[string]string{ra.GetValueKey(): fmt.Sprintf(\"%f\", ra.GetValue())}\n\t\twriter.Write(makeRow(cols, vals))\n\t}\n\n\tfor _, sa := range in.GetSampleErrors() {\n\t\tvals := map[string]string{\"inputValue\": fmt.Sprintf(\"%f\", sa.GetInputValue()), \"errorMessage\": sa.GetErrorMessage()}\n\t\twriter.Write(makeRow(cols, vals))\n\t}\n\n\twriter.Flush()\n\tfmt.Printf(\"# %s\\n\", strings.Join(cols, \",\"))\n\n\treturn &qspb.StoreOutput{}, nil\n}\n\nfunc updateCols(prototype []string, k string) []string {\n\tfor _, n := range prototype {\n\t\tif n == k {\n\t\t\treturn prototype\n\t\t}\n\t}\n\tprototype = append(prototype, k)\n\treturn prototype\n}\n\nfunc makeRow(prototype []string, points map[string]string) []string {\n\trow := make([]string, len(prototype))\n\t\/\/ n^2 but whatever\n\tfor k, v := range points {\n\t\tfor i, n := range prototype {\n\t\t\tif k == n {\n\t\t\t\trow[i] = v\n\t\t\t}\n\t\t}\n\t}\n\treturn row\n}\n\nfunc (s *server) ShutdownMicroservice(ctx context.Context, in *qspb.ShutdownInput) (*qspb.ShutdownOutput, error) {\n\ts.stopOnce.Do(func() { close(s.stopCh) })\n\treturn &qspb.ShutdownOutput{}, nil\n}\n\nfunc main() {\n\tlis, err := net.Listen(\"tcp\", port)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\ts := grpc.NewServer()\n\tstopCh := make(chan struct{})\n\tgo func() {\n\t\tqspb.RegisterQuickstoreServer(s, &server{stopCh: stopCh})\n\t\tif err := s.Serve(lis); err != nil {\n\t\t\tlog.Fatalf(\"failed to serve: %v\", err)\n\t\t}\n\t}()\n\t<-stopCh\n\ts.GracefulStop()\n}\n<commit_msg>Increase maxRecvMsgSize in grpc server of mako-stub (#707)<commit_after>\/*\nCopyright 2019 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"context\"\n\t\"encoding\/csv\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/golang\/protobuf\/jsonpb\"\n\n\t\"google.golang.org\/grpc\"\n\tqspb \"knative.dev\/pkg\/third_party\/mako\/proto\/quickstore_go_proto\"\n)\n\nconst (\n\tport = \":9813\"\n\t\/\/ A 10 minutes run at 1000 rps of eventing perf tests is usually ~= 70 MBi, so 100MBi is reasonable\n\tdefaultServerMaxReceiveMessageSize = 1024 * 1024 * 100\n)\n\ntype server struct {\n\tstopOnce sync.Once\n\tstopCh chan struct{}\n}\n\nfunc (s *server) Store(ctx context.Context, in *qspb.StoreInput) (*qspb.StoreOutput, error) {\n\tm := jsonpb.Marshaler{}\n\tqi, _ := m.MarshalToString(in.GetQuickstoreInput())\n\tfmt.Printf(\"# %s\\n\", qi)\n\tcols := []string{\"inputValue\", \"errorMessage\"}\n\twriter := csv.NewWriter(os.Stdout)\n\n\tfor _, sp := range in.GetSamplePoints() {\n\t\tfor _, mv := range sp.GetMetricValueList() {\n\t\t\tcols = updateCols(cols, mv.GetValueKey())\n\t\t\tvals := map[string]string{\"inputValue\": fmt.Sprintf(\"%f\", sp.GetInputValue())}\n\t\t\tvals[mv.GetValueKey()] = fmt.Sprintf(\"%f\", mv.GetValue())\n\t\t\twriter.Write(makeRow(cols, vals))\n\t\t}\n\t}\n\n\tfor _, ra := range in.GetRunAggregates() {\n\t\tcols = updateCols(cols, ra.GetValueKey())\n\t\tvals := map[string]string{ra.GetValueKey(): fmt.Sprintf(\"%f\", ra.GetValue())}\n\t\twriter.Write(makeRow(cols, vals))\n\t}\n\n\tfor _, sa := range in.GetSampleErrors() {\n\t\tvals := map[string]string{\"inputValue\": fmt.Sprintf(\"%f\", sa.GetInputValue()), \"errorMessage\": sa.GetErrorMessage()}\n\t\twriter.Write(makeRow(cols, vals))\n\t}\n\n\twriter.Flush()\n\tfmt.Printf(\"# %s\\n\", strings.Join(cols, \",\"))\n\n\treturn &qspb.StoreOutput{}, nil\n}\n\nfunc updateCols(prototype []string, k string) []string {\n\tfor _, n := range prototype {\n\t\tif n == k {\n\t\t\treturn prototype\n\t\t}\n\t}\n\tprototype = append(prototype, k)\n\treturn prototype\n}\n\nfunc makeRow(prototype []string, points map[string]string) []string {\n\trow := make([]string, len(prototype))\n\t\/\/ n^2 but whatever\n\tfor k, v := range points {\n\t\tfor i, n := range prototype {\n\t\t\tif k == n {\n\t\t\t\trow[i] = v\n\t\t\t}\n\t\t}\n\t}\n\treturn row\n}\n\nfunc (s *server) ShutdownMicroservice(ctx context.Context, in *qspb.ShutdownInput) (*qspb.ShutdownOutput, error) {\n\ts.stopOnce.Do(func() { close(s.stopCh) })\n\treturn &qspb.ShutdownOutput{}, nil\n}\n\nfunc main() {\n\tlis, err := net.Listen(\"tcp\", port)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\ts := grpc.NewServer(grpc.MaxRecvMsgSize(defaultServerMaxReceiveMessageSize))\n\tstopCh := make(chan struct{})\n\tgo func() {\n\t\tqspb.RegisterQuickstoreServer(s, &server{stopCh: stopCh})\n\t\tif err := s.Serve(lis); err != nil {\n\t\t\tlog.Fatalf(\"failed to serve: %v\", err)\n\t\t}\n\t}()\n\t<-stopCh\n\ts.GracefulStop()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Michal Witkowski. All Rights Reserved.\n\/\/ See LICENSE for licensing terms.\n\npackage grpc_testing\n\nimport (\n\t\"context\"\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"encoding\/pem\"\n\t\"flag\"\n\t\"math\/big\"\n\t\"net\"\n\t\"time\"\n\n\tpb_testproto \"github.com\/grpc-ecosystem\/go-grpc-middleware\/testing\/testproto\"\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"github.com\/stretchr\/testify\/suite\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n)\n\nvar (\n\tflagTls = flag.Bool(\"use_tls\", true, \"whether all gRPC middleware tests should use tls\")\n\n\tcertPEM []byte\n\tkeyPEM []byte\n)\n\n\/\/ InterceptorTestSuite is a testify\/Suite that starts a gRPC PingService server and a client.\ntype InterceptorTestSuite struct {\n\tsuite.Suite\n\n\tTestService pb_testproto.TestServiceServer\n\tServerOpts []grpc.ServerOption\n\tClientOpts []grpc.DialOption\n\n\tserverAddr string\n\tServerListener net.Listener\n\tServer *grpc.Server\n\tclientConn *grpc.ClientConn\n\tClient pb_testproto.TestServiceClient\n\n\trestartServerWithDelayedStart chan time.Duration\n\tserverRunning chan bool\n}\n\nfunc (s *InterceptorTestSuite) SetupSuite() {\n\ts.restartServerWithDelayedStart = make(chan time.Duration)\n\ts.serverRunning = make(chan bool)\n\n\ts.serverAddr = \"127.0.0.1:0\"\n\tvar err error\n\tcertPEM, keyPEM, err = generateCertAndKey([]string{\"localhost\", \"example.com\"})\n\tif err != nil {\n\t\ts.T().Fatalf(\"unable to generate test certificate\/key: \" + err.Error())\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tvar err error\n\t\t\ts.ServerListener, err = net.Listen(\"tcp\", s.serverAddr)\n\t\t\tif err != nil {\n\t\t\t\ts.T().Fatalf(\"unable to listen on address %s: %v\", s.serverAddr, err)\n\t\t\t}\n\t\t\ts.serverAddr = s.ServerListener.Addr().String()\n\t\t\trequire.NoError(s.T(), err, \"must be able to allocate a port for serverListener\")\n\t\t\tif *flagTls {\n\t\t\t\tcert, err := tls.X509KeyPair(certPEM, keyPEM)\n\t\t\t\tif err != nil {\n\t\t\t\t\ts.T().Fatalf(\"unable to load test TLS certificate: %v\", err)\n\t\t\t\t}\n\t\t\t\tcreds := credentials.NewServerTLSFromCert(&cert)\n\t\t\t\ts.ServerOpts = append(s.ServerOpts, grpc.Creds(creds))\n\t\t\t}\n\t\t\t\/\/ This is the point where we hook up the interceptor\n\t\t\ts.Server = grpc.NewServer(s.ServerOpts...)\n\t\t\t\/\/ Create a service of the instantiator hasn't provided one.\n\t\t\tif s.TestService == nil {\n\t\t\t\ts.TestService = &TestPingService{T: s.T()}\n\t\t\t}\n\t\t\tpb_testproto.RegisterTestServiceServer(s.Server, s.TestService)\n\n\t\t\tgo func() {\n\t\t\t\ts.Server.Serve(s.ServerListener)\n\t\t\t}()\n\t\t\tif s.Client == nil {\n\t\t\t\ts.Client = s.NewClient(s.ClientOpts...)\n\t\t\t}\n\n\t\t\ts.serverRunning <- true\n\n\t\t\td := <-s.restartServerWithDelayedStart\n\t\t\ts.Server.Stop()\n\t\t\ttime.Sleep(d)\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-s.serverRunning:\n\tcase <-time.After(2 * time.Second):\n\t\ts.T().Fatal(\"server failed to start before deadline\")\n\t}\n}\n\nfunc (s *InterceptorTestSuite) RestartServer(delayedStart time.Duration) <-chan bool {\n\ts.restartServerWithDelayedStart <- delayedStart\n\ttime.Sleep(10 * time.Millisecond)\n\treturn s.serverRunning\n}\n\nfunc (s *InterceptorTestSuite) NewClient(dialOpts ...grpc.DialOption) pb_testproto.TestServiceClient {\n\tnewDialOpts := append(dialOpts, grpc.WithBlock())\n\tif *flagTls {\n\t\tcp := x509.NewCertPool()\n\t\tif !cp.AppendCertsFromPEM(certPEM) {\n\t\t\ts.T().Fatal(\"failed to append certificate\")\n\t\t}\n\t\tcreds := credentials.NewTLS(&tls.Config{ServerName: \"localhost\", RootCAs: cp})\n\t\tnewDialOpts = append(newDialOpts, grpc.WithTransportCredentials(creds))\n\t} else {\n\t\tnewDialOpts = append(newDialOpts, grpc.WithInsecure())\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)\n\tdefer cancel()\n\tclientConn, err := grpc.DialContext(ctx, s.ServerAddr(), newDialOpts...)\n\trequire.NoError(s.T(), err, \"must not error on client Dial\")\n\treturn pb_testproto.NewTestServiceClient(clientConn)\n}\n\nfunc (s *InterceptorTestSuite) ServerAddr() string {\n\treturn s.serverAddr\n}\n\nfunc (s *InterceptorTestSuite) SimpleCtx() context.Context {\n\tctx, _ := context.WithTimeout(context.TODO(), 2*time.Second)\n\treturn ctx\n}\n\nfunc (s *InterceptorTestSuite) DeadlineCtx(deadline time.Time) context.Context {\n\tctx, _ := context.WithDeadline(context.TODO(), deadline)\n\treturn ctx\n}\n\nfunc (s *InterceptorTestSuite) TearDownSuite() {\n\ttime.Sleep(10 * time.Millisecond)\n\tif s.ServerListener != nil {\n\t\ts.Server.GracefulStop()\n\t\ts.T().Logf(\"stopped grpc.Server at: %v\", s.ServerAddr())\n\t\ts.ServerListener.Close()\n\t}\n\tif s.clientConn != nil {\n\t\ts.clientConn.Close()\n\t}\n}\n\n\/\/ generateCertAndKey copied from https:\/\/github.com\/johanbrandhorst\/certify\/blob\/master\/issuers\/vault\/vault_suite_test.go#L255\n\/\/ with minor modifications.\nfunc generateCertAndKey(san []string) ([]byte, []byte, error) {\n\tpriv, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tnotBefore := time.Now()\n\tnotAfter := notBefore.Add(time.Hour)\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tCommonName: \"Certify Test Cert\",\n\t\t},\n\t\tNotBefore: notBefore,\n\t\tNotAfter: notAfter,\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t\tDNSNames: san,\n\t}\n\tderBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, priv.Public(), priv)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tcertOut := pem.EncodeToMemory(&pem.Block{\n\t\tType: \"CERTIFICATE\",\n\t\tBytes: derBytes,\n\t})\n\tkeyOut := pem.EncodeToMemory(&pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tBytes: x509.MarshalPKCS1PrivateKey(priv),\n\t})\n\n\treturn certOut, keyOut, nil\n}\n<commit_msg>Set CommonName to example.com in test certificates (#334)<commit_after>\/\/ Copyright 2016 Michal Witkowski. All Rights Reserved.\n\/\/ See LICENSE for licensing terms.\n\npackage grpc_testing\n\nimport (\n\t\"context\"\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"encoding\/pem\"\n\t\"flag\"\n\t\"math\/big\"\n\t\"net\"\n\t\"time\"\n\n\tpb_testproto \"github.com\/grpc-ecosystem\/go-grpc-middleware\/testing\/testproto\"\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"github.com\/stretchr\/testify\/suite\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n)\n\nvar (\n\tflagTls = flag.Bool(\"use_tls\", true, \"whether all gRPC middleware tests should use tls\")\n\n\tcertPEM []byte\n\tkeyPEM []byte\n)\n\n\/\/ InterceptorTestSuite is a testify\/Suite that starts a gRPC PingService server and a client.\ntype InterceptorTestSuite struct {\n\tsuite.Suite\n\n\tTestService pb_testproto.TestServiceServer\n\tServerOpts []grpc.ServerOption\n\tClientOpts []grpc.DialOption\n\n\tserverAddr string\n\tServerListener net.Listener\n\tServer *grpc.Server\n\tclientConn *grpc.ClientConn\n\tClient pb_testproto.TestServiceClient\n\n\trestartServerWithDelayedStart chan time.Duration\n\tserverRunning chan bool\n}\n\nfunc (s *InterceptorTestSuite) SetupSuite() {\n\ts.restartServerWithDelayedStart = make(chan time.Duration)\n\ts.serverRunning = make(chan bool)\n\n\ts.serverAddr = \"127.0.0.1:0\"\n\tvar err error\n\tcertPEM, keyPEM, err = generateCertAndKey([]string{\"localhost\", \"example.com\"})\n\tif err != nil {\n\t\ts.T().Fatalf(\"unable to generate test certificate\/key: \" + err.Error())\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tvar err error\n\t\t\ts.ServerListener, err = net.Listen(\"tcp\", s.serverAddr)\n\t\t\tif err != nil {\n\t\t\t\ts.T().Fatalf(\"unable to listen on address %s: %v\", s.serverAddr, err)\n\t\t\t}\n\t\t\ts.serverAddr = s.ServerListener.Addr().String()\n\t\t\trequire.NoError(s.T(), err, \"must be able to allocate a port for serverListener\")\n\t\t\tif *flagTls {\n\t\t\t\tcert, err := tls.X509KeyPair(certPEM, keyPEM)\n\t\t\t\tif err != nil {\n\t\t\t\t\ts.T().Fatalf(\"unable to load test TLS certificate: %v\", err)\n\t\t\t\t}\n\t\t\t\tcreds := credentials.NewServerTLSFromCert(&cert)\n\t\t\t\ts.ServerOpts = append(s.ServerOpts, grpc.Creds(creds))\n\t\t\t}\n\t\t\t\/\/ This is the point where we hook up the interceptor\n\t\t\ts.Server = grpc.NewServer(s.ServerOpts...)\n\t\t\t\/\/ Create a service of the instantiator hasn't provided one.\n\t\t\tif s.TestService == nil {\n\t\t\t\ts.TestService = &TestPingService{T: s.T()}\n\t\t\t}\n\t\t\tpb_testproto.RegisterTestServiceServer(s.Server, s.TestService)\n\n\t\t\tgo func() {\n\t\t\t\ts.Server.Serve(s.ServerListener)\n\t\t\t}()\n\t\t\tif s.Client == nil {\n\t\t\t\ts.Client = s.NewClient(s.ClientOpts...)\n\t\t\t}\n\n\t\t\ts.serverRunning <- true\n\n\t\t\td := <-s.restartServerWithDelayedStart\n\t\t\ts.Server.Stop()\n\t\t\ttime.Sleep(d)\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-s.serverRunning:\n\tcase <-time.After(2 * time.Second):\n\t\ts.T().Fatal(\"server failed to start before deadline\")\n\t}\n}\n\nfunc (s *InterceptorTestSuite) RestartServer(delayedStart time.Duration) <-chan bool {\n\ts.restartServerWithDelayedStart <- delayedStart\n\ttime.Sleep(10 * time.Millisecond)\n\treturn s.serverRunning\n}\n\nfunc (s *InterceptorTestSuite) NewClient(dialOpts ...grpc.DialOption) pb_testproto.TestServiceClient {\n\tnewDialOpts := append(dialOpts, grpc.WithBlock())\n\tif *flagTls {\n\t\tcp := x509.NewCertPool()\n\t\tif !cp.AppendCertsFromPEM(certPEM) {\n\t\t\ts.T().Fatal(\"failed to append certificate\")\n\t\t}\n\t\tcreds := credentials.NewTLS(&tls.Config{ServerName: \"localhost\", RootCAs: cp})\n\t\tnewDialOpts = append(newDialOpts, grpc.WithTransportCredentials(creds))\n\t} else {\n\t\tnewDialOpts = append(newDialOpts, grpc.WithInsecure())\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)\n\tdefer cancel()\n\tclientConn, err := grpc.DialContext(ctx, s.ServerAddr(), newDialOpts...)\n\trequire.NoError(s.T(), err, \"must not error on client Dial\")\n\treturn pb_testproto.NewTestServiceClient(clientConn)\n}\n\nfunc (s *InterceptorTestSuite) ServerAddr() string {\n\treturn s.serverAddr\n}\n\nfunc (s *InterceptorTestSuite) SimpleCtx() context.Context {\n\tctx, _ := context.WithTimeout(context.TODO(), 2*time.Second)\n\treturn ctx\n}\n\nfunc (s *InterceptorTestSuite) DeadlineCtx(deadline time.Time) context.Context {\n\tctx, _ := context.WithDeadline(context.TODO(), deadline)\n\treturn ctx\n}\n\nfunc (s *InterceptorTestSuite) TearDownSuite() {\n\ttime.Sleep(10 * time.Millisecond)\n\tif s.ServerListener != nil {\n\t\ts.Server.GracefulStop()\n\t\ts.T().Logf(\"stopped grpc.Server at: %v\", s.ServerAddr())\n\t\ts.ServerListener.Close()\n\t}\n\tif s.clientConn != nil {\n\t\ts.clientConn.Close()\n\t}\n}\n\n\/\/ generateCertAndKey copied from https:\/\/github.com\/johanbrandhorst\/certify\/blob\/master\/issuers\/vault\/vault_suite_test.go#L255\n\/\/ with minor modifications.\nfunc generateCertAndKey(san []string) ([]byte, []byte, error) {\n\tpriv, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tnotBefore := time.Now()\n\tnotAfter := notBefore.Add(time.Hour)\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tCommonName: \"example.com\",\n\t\t},\n\t\tNotBefore: notBefore,\n\t\tNotAfter: notAfter,\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t\tDNSNames: san,\n\t}\n\tderBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, priv.Public(), priv)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tcertOut := pem.EncodeToMemory(&pem.Block{\n\t\tType: \"CERTIFICATE\",\n\t\tBytes: derBytes,\n\t})\n\tkeyOut := pem.EncodeToMemory(&pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tBytes: x509.MarshalPKCS1PrivateKey(priv),\n\t})\n\n\treturn certOut, keyOut, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package orgs\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/Financial-Times\/up-neoutil-go\"\n\t\"github.com\/jmcvetta\/neoism\"\n\t\"strings\"\n)\n\ntype RolesNeoEngine struct{}\n\nfunc (bnc RolesNeoEngine) DecodeJSON(dec *json.Decoder) (interface{}, string, error) {\n\tb := Organisation{}\n\terr := dec.Decode(&b)\n\treturn b, b.UUID, err\n}\n\nfunc (bnc RolesNeoEngine) SuggestedIndexes() map[string]string {\n\treturn map[string]string{\n\t\t\"Organisation\": \"uuid\",\n\t\t\"Concept\": \"uuid\",\n\t\t\"Industry\": \"uuid\",\n\t}\n}\n\nfunc (bnc RolesNeoEngine) Read(cr neoutil.CypherRunner, identity string) (interface{}, bool, error) {\n\tpanic(\"not implemented\")\n}\n\nfunc (bnc RolesNeoEngine) CreateOrUpdate(cr neoutil.CypherRunner, obj interface{}) error {\n\to := obj.(Organisation)\n\n\tp := map[string]interface{}{\n\t\t\"uuid\": o.UUID,\n\t}\n\n\tif o.Extinct == true {\n\t\tp[\"extinct\"] = true\n\t}\n\tif o.FormerNames != nil && len(o.FormerNames) != 0 {\n\t\tp[\"formerNames\"] = o.FormerNames\n\t}\n\tif o.HiddenLabel != \"\" {\n\t\tp[\"hiddenLabel\"] = o.HiddenLabel\n\t}\n\tif o.LegalName != \"\" {\n\t\tp[\"legalName\"] = o.LegalName\n\t}\n\tif o.LocalNames != nil && len(o.LocalNames) != 0 {\n\t\tp[\"localNames\"] = o.LocalNames\n\t}\n\tif o.ProperName != \"\" {\n\t\tp[\"properName\"] = o.ProperName\n\t\tp[\"prefLabel\"] = o.ProperName\n\t}\n\tif o.ShortName != \"\" {\n\t\tp[\"shortName\"] = o.ShortName\n\t}\n\tif o.TradeNames != nil && len(o.TradeNames) != 0 {\n\t\tp[\"tradeNames\"] = o.TradeNames\n\t}\n\tfor _, identifier := range o.Identifiers {\n\t\tif identifier.Authority == fsAuthority {\n\t\t\tp[\"factsetIdentifier\"] = identifier.IdentifierValue\n\t\t}\n\t\tif identifier.Authority == leiAuthority {\n\t\t\tp[\"leiIdentifier\"] = identifier.IdentifierValue\n\t\t}\n\t}\n\tp[\"uuid\"] = o.UUID\n\n\tparms := map[string]interface{}{\n\t\t\"uuid\": o.UUID,\n\t\t\"allProps\": neoism.Props(p),\n\t\t\"puuid\": o.ParentOrganisation,\n\t\t\"icuuid\": o.IndustryClassification,\n\t}\n\n\tstatement := `\n\t\t\tMERGE (n:Concept {uuid: {uuid}})\n\t\t\tSET n = {allProps}\n\t\t\tSET n :Organisation\n\t\t`\n\n\tif o.Type != \"Organisation\" && o.Type != \"\" {\n\t\tstatement += fmt.Sprintf(\"SET n :%s\\n\", o.Type)\n\t}\n\n\tif o.ParentOrganisation != \"\" {\n\t\tstatement += `\n\t\t\tMERGE (p:Concept {uuid: {puuid}})\n\t\t\tMERGE (n)-[:SUB_ORG_OF]->(p)\n\t\t\tSET p :Organisation\n\t\t`\n\t}\n\n\tif o.IndustryClassification != \"\" {\n\t\tstatement += `\n\t\t\tMERGE (ic:Concept {uuid: {icuuid}})\n\t\t\tMERGE (n)-[:IN_INDUSTRY]->(ic)\n\t\t\tSET ic :Industry\n\t\t`\n\t}\n\n\tqueries := []*neoism.CypherQuery{\n\t\t&neoism.CypherQuery{Statement: statement, Parameters: parms},\n\t}\n\n\treturn cr.CypherBatch(queries)\n}\n\nfunc (bnc RolesNeoEngine) Delete(cr neoutil.CypherRunner, identity string) (bool, error) {\n\tpanic(\"not implemented\")\n}\n\nfunc uriToUUID(uri string) string {\n\t\/\/ TODO: make this more robust\n\treturn strings.Replace(uri, \"http:\/\/api.ft.com\/things\/\", \"\", 1)\n}\n\nconst (\n\tfsAuthority = \"http:\/\/api.ft.com\/system\/FACTSET-EDM\"\n\tleiAuthority = \"http:\/\/api.ft.com\/system\/LEI\"\n)\n<commit_msg>update cypher to match agreed approach<commit_after>package orgs\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/Financial-Times\/up-neoutil-go\"\n\t\"github.com\/jmcvetta\/neoism\"\n\t\"strings\"\n)\n\ntype RolesNeoEngine struct{}\n\nfunc (bnc RolesNeoEngine) DecodeJSON(dec *json.Decoder) (interface{}, string, error) {\n\tb := Organisation{}\n\terr := dec.Decode(&b)\n\treturn b, b.UUID, err\n}\n\nfunc (bnc RolesNeoEngine) SuggestedIndexes() map[string]string {\n\treturn map[string]string{\n\t\t\"Organisation\": \"uuid\",\n\t\t\"Concept\": \"uuid\",\n\t\t\"Thing\": \"uuid\",\n\t}\n}\n\nfunc (bnc RolesNeoEngine) Read(cr neoutil.CypherRunner, identity string) (interface{}, bool, error) {\n\tpanic(\"not implemented\")\n}\n\nfunc (bnc RolesNeoEngine) CreateOrUpdate(cr neoutil.CypherRunner, obj interface{}) error {\n\to := obj.(Organisation)\n\n\tp := map[string]interface{}{\n\t\t\"uuid\": o.UUID,\n\t}\n\n\tif o.Extinct == true {\n\t\tp[\"extinct\"] = true\n\t}\n\tif o.FormerNames != nil && len(o.FormerNames) != 0 {\n\t\tp[\"formerNames\"] = o.FormerNames\n\t}\n\tif o.HiddenLabel != \"\" {\n\t\tp[\"hiddenLabel\"] = o.HiddenLabel\n\t}\n\tif o.LegalName != \"\" {\n\t\tp[\"legalName\"] = o.LegalName\n\t}\n\tif o.LocalNames != nil && len(o.LocalNames) != 0 {\n\t\tp[\"localNames\"] = o.LocalNames\n\t}\n\tif o.ProperName != \"\" {\n\t\tp[\"properName\"] = o.ProperName\n\t\tp[\"prefLabel\"] = o.ProperName\n\t}\n\tif o.ShortName != \"\" {\n\t\tp[\"shortName\"] = o.ShortName\n\t}\n\tif o.TradeNames != nil && len(o.TradeNames) != 0 {\n\t\tp[\"tradeNames\"] = o.TradeNames\n\t}\n\tfor _, identifier := range o.Identifiers {\n\t\tif identifier.Authority == fsAuthority {\n\t\t\tp[\"factsetIdentifier\"] = identifier.IdentifierValue\n\t\t}\n\t\tif identifier.Authority == leiAuthority {\n\t\t\tp[\"leiIdentifier\"] = identifier.IdentifierValue\n\t\t}\n\t}\n\tp[\"uuid\"] = o.UUID\n\n\tparms := map[string]interface{}{\n\t\t\"uuid\": o.UUID,\n\t\t\"allProps\": neoism.Props(p),\n\t\t\"puuid\": o.ParentOrganisation,\n\t\t\"icuuid\": o.IndustryClassification,\n\t}\n\n\tstatement := `\n\t\t\tMERGE (n:Thing {uuid: {uuid}})\n\t\t\tSET n = {allProps}\n\t\t\tSET n :Concept\n\t\t\tSET n :Organisation\n\t\t`\n\n\tif o.Type != \"Organisation\" && o.Type != \"\" {\n\t\tstatement += fmt.Sprintf(\"SET n :%s\\n\", o.Type)\n\t}\n\n\tif o.ParentOrganisation != \"\" {\n\t\tstatement += `\n\t\t\tMERGE (p:Thing {uuid: {puuid}})\n\t\t\tMERGE (n)-[:SUB_ORG_OF]->(p)\n\t\t`\n\t}\n\n\tif o.IndustryClassification != \"\" {\n\t\tstatement += `\n\t\t\tMERGE (ic:Thing {uuid: {icuuid}})\n\t\t\tMERGE (n)-[:IN_INDUSTRY]->(ic)\n\t\t`\n\t}\n\n\tqueries := []*neoism.CypherQuery{\n\t\t&neoism.CypherQuery{Statement: statement, Parameters: parms},\n\t}\n\n\treturn cr.CypherBatch(queries)\n}\n\nfunc (bnc RolesNeoEngine) Delete(cr neoutil.CypherRunner, identity string) (bool, error) {\n\tpanic(\"not implemented\")\n}\n\nfunc uriToUUID(uri string) string {\n\t\/\/ TODO: make this more robust\n\treturn strings.Replace(uri, \"http:\/\/api.ft.com\/things\/\", \"\", 1)\n}\n\nconst (\n\tfsAuthority = \"http:\/\/api.ft.com\/system\/FACTSET-EDM\"\n\tleiAuthority = \"http:\/\/api.ft.com\/system\/LEI\"\n)\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 Mirantis\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e\n\nimport (\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/gomega\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/pkg\/api\/v1\"\n\n\t\"github.com\/Mirantis\/virtlet\/tests\/e2e\/framework\"\n\t. \"github.com\/Mirantis\/virtlet\/tests\/e2e\/ginkgo-ext\"\n)\n\nconst (\n\tgendocTmpDir = \"\/tmp\"\n)\n\nvar _ = Describe(\"virtletctl\", func() {\n\tIt(\"Should display usage info on help subcommand\", func(done Done) {\n\t\tdefer close(done)\n\n\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\tdefer closeFunc()\n\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\tBy(\"Calling virtletctl help\")\n\t\t_, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"help\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t}, 10)\n\n\tContext(\"Tests depending on spawning VM\", func() {\n\t\tvar (\n\t\t\tvm *framework.VMInterface\n\t\t\ttempfileName string\n\t\t)\n\n\t\tBeforeAll(func() {\n\t\t\tcm := &v1.ConfigMap{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sshkey\",\n\t\t\t\t},\n\t\t\t\tData: map[string]string{\n\t\t\t\t\t\"authorized_keys\": sshPublicKey,\n\t\t\t\t},\n\t\t\t}\n\t\t\t_, err := controller.ConfigMaps().Create(cm)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tvm = controller.VM(\"virtletctl-cirros-vm\")\n\t\t\tvm.Create(VMOptions{\n\t\t\t\tSSHKeySource: \"configmap\/sshkey\",\n\t\t\t}.applyDefaults(), time.Minute*5, nil)\n\n\t\t\twaitSSH(vm)\n\n\t\t\ttempfile, err := ioutil.TempFile(\"\", \"\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tdefer tempfile.Close()\n\t\t\ttempfileName = tempfile.Name()\n\n\t\t\tstrippedKey := strings.Replace(sshPrivateKey, \"\\t\", \"\", -1)\n\t\t\t_, err = tempfile.Write([]byte(strippedKey))\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tExpect(os.Chmod(tempfileName, 0600)).To(Succeed())\n\t\t})\n\n\t\tAfterAll(func() {\n\t\t\tdeleteVM(vm)\n\t\t\tcontroller.ConfigMaps().Delete(\"sshkey\", nil)\n\t\t\tos.Remove(tempfileName)\n\t\t})\n\n\t\tIt(\"Should be able to access the pod via ssh using ssh subcommand\", func(done Done) {\n\t\t\tBy(\"Calling virtletctl ssh cirros@cirros-vm hostname\")\n\t\t\tdefer close(done)\n\n\t\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\t\tdefer closeFunc()\n\t\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\t\toutput, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"ssh\", \"--namespace\", controller.Namespace(), \"cirros@virtletctl-cirros-vm\", \"--\", \"-i\", tempfileName, \"hostname\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tExpect(output).To(Equal(\"virtletctl-cirros-vm\"))\n\t\t}, 60)\n\n\t\tIt(\"Should dump Virtlet metadata on dump-metadata subcommand\", func(done Done) {\n\t\t\tdefer close(done)\n\n\t\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\t\tdefer closeFunc()\n\t\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\t\tBy(\"Calling virtletctl dump-metadata\")\n\t\t\toutput, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"dump-metadata\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tExpect(output).To(ContainSubstring(\"virtletctl-cirros-vm\"))\n\t\t}, 60)\n\n\t})\n\n\tIt(\"Should return libvirt version on virsh subcommand\", func(done Done) {\n\t\tdefer close(done)\n\n\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\tdefer closeFunc()\n\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\tBy(\"Calling virtletctl virsh version\")\n\t\toutput, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"virsh\", \"version\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(output).To(ContainSubstring(\"Compiled against library:\"))\n\t\tExpect(output).To(ContainSubstring(\"Using library:\"))\n\t}, 60)\n})\n\nvar _ = Describe(\"virtletctl unsafe\", func() {\n\tBeforeAll(func() {\n\t\tincludeUnsafe()\n\t})\n\n\tIt(\"Should install itself as a kubectl plugin on install subcommand\", func() {\n\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\tdefer closeFunc()\n\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\tBy(\"Calling virtletctl install\")\n\t\t_, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"install\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tBy(\"Calling kubectl plugin virt help\")\n\t\t_, err = framework.RunSimple(localExecutor, \"kubectl\", \"plugin\", \"virt\", \"help\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t}, 60)\n})\n<commit_msg>Move unsafe test to common Describe()<commit_after>\/*\nCopyright 2018 Mirantis\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e\n\nimport (\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/gomega\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/pkg\/api\/v1\"\n\n\t\"github.com\/Mirantis\/virtlet\/tests\/e2e\/framework\"\n\t. \"github.com\/Mirantis\/virtlet\/tests\/e2e\/ginkgo-ext\"\n)\n\nconst (\n\tgendocTmpDir = \"\/tmp\"\n)\n\nvar _ = Describe(\"virtletctl\", func() {\n\tIt(\"Should display usage info on help subcommand\", func(done Done) {\n\t\tdefer close(done)\n\n\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\tdefer closeFunc()\n\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\tBy(\"Calling virtletctl help\")\n\t\t_, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"help\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t}, 10)\n\n\tContext(\"Tests depending on spawning VM\", func() {\n\t\tvar (\n\t\t\tvm *framework.VMInterface\n\t\t\ttempfileName string\n\t\t)\n\n\t\tBeforeAll(func() {\n\t\t\tcm := &v1.ConfigMap{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sshkey\",\n\t\t\t\t},\n\t\t\t\tData: map[string]string{\n\t\t\t\t\t\"authorized_keys\": sshPublicKey,\n\t\t\t\t},\n\t\t\t}\n\t\t\t_, err := controller.ConfigMaps().Create(cm)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tvm = controller.VM(\"virtletctl-cirros-vm\")\n\t\t\tvm.Create(VMOptions{\n\t\t\t\tSSHKeySource: \"configmap\/sshkey\",\n\t\t\t}.applyDefaults(), time.Minute*5, nil)\n\n\t\t\twaitSSH(vm)\n\n\t\t\ttempfile, err := ioutil.TempFile(\"\", \"\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tdefer tempfile.Close()\n\t\t\ttempfileName = tempfile.Name()\n\n\t\t\tstrippedKey := strings.Replace(sshPrivateKey, \"\\t\", \"\", -1)\n\t\t\t_, err = tempfile.Write([]byte(strippedKey))\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tExpect(os.Chmod(tempfileName, 0600)).To(Succeed())\n\t\t})\n\n\t\tAfterAll(func() {\n\t\t\tdeleteVM(vm)\n\t\t\tcontroller.ConfigMaps().Delete(\"sshkey\", nil)\n\t\t\tos.Remove(tempfileName)\n\t\t})\n\n\t\tIt(\"Should be able to access the pod via ssh using ssh subcommand\", func(done Done) {\n\t\t\tBy(\"Calling virtletctl ssh cirros@cirros-vm hostname\")\n\t\t\tdefer close(done)\n\n\t\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\t\tdefer closeFunc()\n\t\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\t\toutput, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"ssh\", \"--namespace\", controller.Namespace(), \"cirros@virtletctl-cirros-vm\", \"--\", \"-i\", tempfileName, \"hostname\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tExpect(output).To(Equal(\"virtletctl-cirros-vm\"))\n\t\t}, 60)\n\n\t\tIt(\"Should dump Virtlet metadata on dump-metadata subcommand\", func(done Done) {\n\t\t\tdefer close(done)\n\n\t\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\t\tdefer closeFunc()\n\t\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\t\tBy(\"Calling virtletctl dump-metadata\")\n\t\t\toutput, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"dump-metadata\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tExpect(output).To(ContainSubstring(\"virtletctl-cirros-vm\"))\n\t\t}, 60)\n\n\t})\n\n\tIt(\"Should return libvirt version on virsh subcommand\", func(done Done) {\n\t\tdefer close(done)\n\n\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\tdefer closeFunc()\n\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\tBy(\"Calling virtletctl virsh version\")\n\t\toutput, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"virsh\", \"version\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(output).To(ContainSubstring(\"Compiled against library:\"))\n\t\tExpect(output).To(ContainSubstring(\"Using library:\"))\n\t}, 60)\n\n\tIt(\"Should install itself as a kubectl plugin on install subcommand [unsafe]\", func() {\n\t\tincludeUnsafe()\n\t\tctx, closeFunc := context.WithCancel(context.Background())\n\t\tdefer closeFunc()\n\t\tlocalExecutor := framework.LocalExecutor(ctx)\n\n\t\tBy(\"Calling virtletctl install\")\n\t\t_, err := framework.RunSimple(localExecutor, \"_output\/virtletctl\", \"install\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tBy(\"Calling kubectl plugin virt help\")\n\t\t_, err = framework.RunSimple(localExecutor, \"kubectl\", \"plugin\", \"virt\", \"help\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t}, 60)\n})\n<|endoftext|>"} {"text":"<commit_before>package dexcom\n\n\/\/go:generate ..\/gen_crc_table\/gen_crc_table\n\nfunc crc16(msg []byte) []byte {\n\tres := uint16(0)\n\tfor _, b := range msg {\n\t\tres = res<<8 ^ crc16Table[byte(res>>8)^b]\n\t}\n\treturn MarshalUint16(res)\n}\n<commit_msg>Change CRC table generator to \"crcgen\"<commit_after>package dexcom\n\n\/\/go:generate ..\/crcgen\/crcgen\n\nfunc crc16(msg []byte) []byte {\n\tres := uint16(0)\n\tfor _, b := range msg {\n\t\tres = res<<8 ^ crc16Table[byte(res>>8)^b]\n\t}\n\treturn MarshalUint16(res)\n}\n<|endoftext|>"} {"text":"<commit_before>package tcclient\n\nimport (\n\t\"context\"\n\t\"crypto\/hmac\"\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/taskcluster\/jsonschema2go\/text\"\n\t\"github.com\/taskcluster\/slugid-go\/slugid\"\n)\n\n\/\/ Credentials represents the set of credentials required to access protected Taskcluster HTTP APIs.\ntype Credentials struct {\n\t\/\/ ClientID must conform to ^[A-Za-z0-9@\/:.+|_-]+$\n\tClientID string `json:\"clientId\"`\n\t\/\/ AccessToken must conform to ^[a-zA-Z0-9_-]{22,66}$\n\tAccessToken string `json:\"accessToken\"`\n\t\/\/ Certificate used only for temporary credentials\n\tCertificate string `json:\"certificate\"`\n\t\/\/ AuthorizedScopes if set to nil, is ignored. Otherwise, it should be a\n\t\/\/ subset of the scopes that the ClientId already has, and restricts the\n\t\/\/ Credentials to only having these scopes. This is useful when performing\n\t\/\/ actions on behalf of a client which has more restricted scopes. Setting\n\t\/\/ to nil is not the same as setting to an empty array. If AuthorizedScopes\n\t\/\/ is set to an empty array rather than nil, this is equivalent to having\n\t\/\/ no scopes at all.\n\t\/\/ See https:\/\/docs.taskcluster.net\/manual\/apis\/authorized-scopes\n\tAuthorizedScopes []string `json:\"authorizedScopes\"`\n}\n\nvar (\n\tRegExpClientID *regexp.Regexp = regexp.MustCompile(`^[A-Za-z0-9@\/:.+|_-]+$`)\n\tRegExpAccessToken *regexp.Regexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{22,66}$`)\n)\n\nfunc (creds *Credentials) String() string {\n\treturn fmt.Sprintf(\n\t\t\"ClientId: %q\\nAccessToken: %q\\nCertificate: %q\\nAuthorizedScopes: %q\",\n\t\tcreds.ClientID,\n\t\ttext.StarOut(creds.AccessToken),\n\t\ttext.StarOut(creds.Certificate),\n\t\tcreds.AuthorizedScopes,\n\t)\n}\n\n\/\/ Client is the entry point into all the functionality in this package. It\n\/\/ contains authentication credentials, and a service endpoint, which are\n\/\/ required for all HTTP operations.\ntype Client struct {\n\tCredentials *Credentials\n\t\/\/ The URL of the API endpoint to hit.\n\t\/\/ For example, \"https:\/\/auth.taskcluster.net\/v1\" for production auth service.\n\tBaseURL string\n\t\/\/ Whether authentication is enabled (e.g. set to 'false' when using taskcluster-proxy)\n\tAuthenticate bool\n\t\/\/ HTTPClient is a ReducedHTTPClient to be used for the http call instead of\n\t\/\/ the DefaultHTTPClient.\n\tHTTPClient ReducedHTTPClient\n\t\/\/ Context that aborts all requests with this client\n\tContext context.Context\n}\n\n\/\/ Certificate represents the certificate used in Temporary Credentials. See\n\/\/ https:\/\/docs.taskcluster.net\/manual\/apis\/temporary-credentials\ntype Certificate struct {\n\tVersion int `json:\"version\"`\n\tScopes []string `json:\"scopes\"`\n\tStart int64 `json:\"start\"`\n\tExpiry int64 `json:\"expiry\"`\n\tSeed string `json:\"seed\"`\n\tSignature string `json:\"signature\"`\n\tIssuer string `json:\"issuer,omitempty\"`\n}\n\n\/\/ CreateNamedTemporaryCredentials generates temporary credentials from permanent\n\/\/ credentials, valid for the given duration, starting immediately. The\n\/\/ temporary credentials' scopes must be a subset of the permanent credentials'\n\/\/ scopes. The duration may not be more than 31 days. Any authorized scopes of\n\/\/ the permanent credentials will be passed through as authorized scopes to the\n\/\/ temporary credentials, but will not be restricted via the certificate.\n\/\/\n\/\/ Note that the auth service already applies a 5 minute clock skew to the\n\/\/ start and expiry times in\n\/\/ https:\/\/github.com\/taskcluster\/taskcluster-auth\/pull\/117 so no clock skew is\n\/\/ applied in this method, nor should be applied by the caller.\n\/\/\n\/\/ See https:\/\/docs.taskcluster.net\/manual\/apis\/temporary-credentials\nfunc (permaCreds *Credentials) CreateNamedTemporaryCredentials(tempClientID string, duration time.Duration, scopes ...string) (tempCreds *Credentials, err error) {\n\tif duration > 31*24*time.Hour {\n\t\treturn nil, errors.New(\"Temporary credentials must expire within 31 days; however a duration of \" + duration.String() + \" was specified to (*tcclient.Client).CreateTemporaryCredentials(...) method\")\n\t}\n\n\tnow := time.Now()\n\tstart := now\n\texpiry := now.Add(duration)\n\n\tif permaCreds.ClientID == \"\" {\n\t\treturn nil, errors.New(\"Temporary credentials cannot be created from credentials that have an empty ClientId\")\n\t}\n\tif permaCreds.AccessToken == \"\" {\n\t\treturn nil, errors.New(\"Temporary credentials cannot be created from credentials that have an empty AccessToken\")\n\t}\n\tif permaCreds.Certificate != \"\" {\n\t\treturn nil, errors.New(\"Temporary credentials cannot be created from temporary credentials, only from permanent credentials\")\n\t}\n\n\tcert := &Certificate{\n\t\tVersion: 1,\n\t\tScopes: scopes,\n\t\tStart: start.UnixNano() \/ 1e6,\n\t\tExpiry: expiry.UnixNano() \/ 1e6,\n\t\tSeed: slugid.V4() + slugid.V4(),\n\t\tSignature: \"\", \/\/ gets set in Sign() method below\n\t}\n\t\/\/ include the issuer iff this is a named credential\n\tif tempClientID != \"\" {\n\t\tcert.Issuer = permaCreds.ClientID\n\t}\n\n\tcert.Sign(permaCreds.AccessToken, tempClientID)\n\n\tcertBytes, err := json.Marshal(cert)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttempAccessToken, err := generateTemporaryAccessToken(permaCreds.AccessToken, cert.Seed)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttempCreds = &Credentials{\n\t\tClientID: permaCreds.ClientID,\n\t\tAccessToken: tempAccessToken,\n\t\tCertificate: string(certBytes),\n\t\tAuthorizedScopes: permaCreds.AuthorizedScopes,\n\t}\n\tif tempClientID != \"\" {\n\t\ttempCreds.ClientID = tempClientID\n\t}\n\n\treturn\n}\n\n\/\/ CreateTemporaryCredentials is an alias for CreateNamedTemporaryCredentials\n\/\/ with an empty name.\nfunc (permaCreds *Credentials) CreateTemporaryCredentials(duration time.Duration, scopes ...string) (tempCreds *Credentials, err error) {\n\treturn permaCreds.CreateNamedTemporaryCredentials(\"\", duration, scopes...)\n}\n\nfunc (cert *Certificate) Sign(accessToken string, tempClientID string) (err error) {\n\tlines := []string{\"version:\" + strconv.Itoa(cert.Version)}\n\t\/\/ iff this is a named credential, include clientId and issuer\n\tif cert.Issuer != \"\" {\n\t\tlines = append(lines,\n\t\t\t\"clientId:\"+tempClientID,\n\t\t\t\"issuer:\"+cert.Issuer,\n\t\t)\n\t}\n\tlines = append(lines,\n\t\t\"seed:\"+cert.Seed,\n\t\t\"start:\"+strconv.FormatInt(cert.Start, 10),\n\t\t\"expiry:\"+strconv.FormatInt(cert.Expiry, 10),\n\t\t\"scopes:\",\n\t)\n\tlines = append(lines, cert.Scopes...)\n\thash := hmac.New(sha256.New, []byte(accessToken))\n\ttext := strings.Join(lines, \"\\n\")\n\t_, err = hash.Write([]byte(text))\n\tif err != nil {\n\t\treturn err\n\t}\n\tcert.Signature = base64.StdEncoding.EncodeToString(hash.Sum([]byte{}))\n\treturn\n}\n\nfunc generateTemporaryAccessToken(permAccessToken, seed string) (tempAccessToken string, err error) {\n\thash := hmac.New(sha256.New, []byte(permAccessToken))\n\t_, err = hash.Write([]byte(seed))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ttempAccessToken = strings.TrimRight(base64.URLEncoding.EncodeToString(hash.Sum([]byte{})), \"=\")\n\treturn\n}\n\n\/\/ Cert attempts to parse the certificate string to return it as an object. If\n\/\/ the certificate is an empty string (e.g. in the case of permanent\n\/\/ credentials) then a nil pointer is returned for the certificate. If a\n\/\/ certificate has been specified but cannot be parsed, an error is returned,\n\/\/ and cert is an empty certificate (rather than nil).\nfunc (creds *Credentials) Cert() (cert *Certificate, err error) {\n\tif creds.Certificate == \"\" {\n\t\treturn\n\t}\n\tcert = new(Certificate)\n\terr = json.Unmarshal([]byte(creds.Certificate), cert)\n\treturn\n}\n\n\/\/ Validate performs a sanity check of the given certificate and returns an\n\/\/ error if it is able to determine that the certificate is not malformed,\n\/\/ expired, or for any other reason invalid. Note, it does not perform any\n\/\/ network transactions against any live services, it only performs sanity\n\/\/ checks that can be executed locally. If cert is nil, an error is returned.\nfunc (cert *Certificate) Validate() error {\n\tif cert == nil {\n\t\treturn fmt.Errorf(\"nil certificate does not pass certificate validation\")\n\t}\n\tif cert.Version != 1 {\n\t\treturn fmt.Errorf(\"Certificate version not 1: %v\", cert.Version)\n\t}\n\tnow := time.Now().UnixNano() \/ 1e6\n\t\/\/ See https:\/\/github.com\/taskcluster\/taskcluster-auth\/pull\/117\/files\n\t\/\/ A five minute tolerance is allowed. This is important, if we receive\n\t\/\/ credentials from a different machine.\n\tif cert.Start > now+5*60*1000 {\n\t\treturn fmt.Errorf(\"Certificate validity starts more than five minutes in the future (now = %v; start = %v)\", now, cert.Start)\n\t}\n\tif cert.Expiry < now-5*60*1000 {\n\t\treturn fmt.Errorf(\"Certificate expired more than five minutes ago (now = %v; expiry = %v)\", now, cert.Expiry)\n\t}\n\tif durationMillis := cert.Expiry - cert.Start; durationMillis > 31*24*60*60*1000 {\n\t\treturn fmt.Errorf(\"Certificate is valid for more than 31 days (%v milliseconds)\", durationMillis)\n\t}\n\tif len(cert.Seed) != 44 {\n\t\treturn fmt.Errorf(\"Certificate seed not 44 bytes: '%v'\", cert.Seed)\n\t}\n\tif _, err := base64.StdEncoding.DecodeString(cert.Signature); err != nil {\n\t\treturn fmt.Errorf(\"Certificate signature is not valid base64 content: %v\", err)\n\t}\n\treturn nil\n}\n\n\/\/ CredentialsFromEnvVars creates and returns Taskcluster credentials\n\/\/ initialised from the values of environment variables:\n\/\/ TASKCLUSTER_CLIENT_ID\n\/\/ TASKCLUSTER_ACCESS_TOKEN\n\/\/ TASKCLUSTER_CERTIFICATE\n\/\/ No validation is performed on the loaded values, and unset environment\n\/\/ variables will result in empty string values.\nfunc CredentialsFromEnvVars() *Credentials {\n\treturn &Credentials{\n\t\tClientID: os.Getenv(\"TASKCLUSTER_CLIENT_ID\"),\n\t\tAccessToken: os.Getenv(\"TASKCLUSTER_ACCESS_TOKEN\"),\n\t\tCertificate: os.Getenv(\"TASKCLUSTER_CERTIFICATE\"),\n\t}\n}\n\n\/\/ Validate performs local lexical validation of creds to ensure the\n\/\/ credentials are syntactically valid and returns a non-nil error if they are\n\/\/ not. No authentication is performed, so a call to Validate with invalid\n\/\/ credentials that are syntactically valid will not return an error.\nfunc (creds *Credentials) Validate() error {\n\tif creds == nil {\n\t\treturn fmt.Errorf(\"Nil credentials are not valid\")\n\t}\n\t\/\/ Special case: see https:\/\/docs.taskcluster.net\/reference\/platform\/taskcluster-auth\/references\/api#testAuthenticate\n\tif creds.ClientID == \"tester\" && creds.AccessToken == \"no-secret\" {\n\t\treturn nil\n\t}\n\tif !RegExpClientID.MatchString(creds.ClientID) {\n\t\treturn fmt.Errorf(\"Client ID %v does not match regular expression %v\", creds.ClientID, RegExpAccessToken)\n\t}\n\tif !RegExpAccessToken.MatchString(creds.AccessToken) {\n\t\treturn fmt.Errorf(\"Access Token does not match regular expression %v\", RegExpAccessToken)\n\t}\n\tcert, err := creds.Cert()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Certificate for client ID %v is invalid: %v\", creds.ClientID, err)\n\t}\n\tif cert != nil {\n\t\treturn cert.Validate()\n\t}\n\treturn nil\n}\n<commit_msg>Updated clientId regexp as per taskcluster\/taskcluster-auth#137<commit_after>package tcclient\n\nimport (\n\t\"context\"\n\t\"crypto\/hmac\"\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/taskcluster\/jsonschema2go\/text\"\n\t\"github.com\/taskcluster\/slugid-go\/slugid\"\n)\n\n\/\/ Credentials represents the set of credentials required to access protected Taskcluster HTTP APIs.\ntype Credentials struct {\n\t\/\/ ClientID must conform to ^[A-Za-z0-9@\/:.+|_-]+$\n\tClientID string `json:\"clientId\"`\n\t\/\/ AccessToken must conform to ^[a-zA-Z0-9_-]{22,66}$\n\tAccessToken string `json:\"accessToken\"`\n\t\/\/ Certificate used only for temporary credentials\n\tCertificate string `json:\"certificate\"`\n\t\/\/ AuthorizedScopes if set to nil, is ignored. Otherwise, it should be a\n\t\/\/ subset of the scopes that the ClientId already has, and restricts the\n\t\/\/ Credentials to only having these scopes. This is useful when performing\n\t\/\/ actions on behalf of a client which has more restricted scopes. Setting\n\t\/\/ to nil is not the same as setting to an empty array. If AuthorizedScopes\n\t\/\/ is set to an empty array rather than nil, this is equivalent to having\n\t\/\/ no scopes at all.\n\t\/\/ See https:\/\/docs.taskcluster.net\/manual\/apis\/authorized-scopes\n\tAuthorizedScopes []string `json:\"authorizedScopes\"`\n}\n\nvar (\n\tRegExpClientID *regexp.Regexp = regexp.MustCompile(`^[A-Za-z0-9!@\/:.+|_-]+$`)\n\tRegExpAccessToken *regexp.Regexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{22,66}$`)\n)\n\nfunc (creds *Credentials) String() string {\n\treturn fmt.Sprintf(\n\t\t\"ClientId: %q\\nAccessToken: %q\\nCertificate: %q\\nAuthorizedScopes: %q\",\n\t\tcreds.ClientID,\n\t\ttext.StarOut(creds.AccessToken),\n\t\ttext.StarOut(creds.Certificate),\n\t\tcreds.AuthorizedScopes,\n\t)\n}\n\n\/\/ Client is the entry point into all the functionality in this package. It\n\/\/ contains authentication credentials, and a service endpoint, which are\n\/\/ required for all HTTP operations.\ntype Client struct {\n\tCredentials *Credentials\n\t\/\/ The URL of the API endpoint to hit.\n\t\/\/ For example, \"https:\/\/auth.taskcluster.net\/v1\" for production auth service.\n\tBaseURL string\n\t\/\/ Whether authentication is enabled (e.g. set to 'false' when using taskcluster-proxy)\n\tAuthenticate bool\n\t\/\/ HTTPClient is a ReducedHTTPClient to be used for the http call instead of\n\t\/\/ the DefaultHTTPClient.\n\tHTTPClient ReducedHTTPClient\n\t\/\/ Context that aborts all requests with this client\n\tContext context.Context\n}\n\n\/\/ Certificate represents the certificate used in Temporary Credentials. See\n\/\/ https:\/\/docs.taskcluster.net\/manual\/apis\/temporary-credentials\ntype Certificate struct {\n\tVersion int `json:\"version\"`\n\tScopes []string `json:\"scopes\"`\n\tStart int64 `json:\"start\"`\n\tExpiry int64 `json:\"expiry\"`\n\tSeed string `json:\"seed\"`\n\tSignature string `json:\"signature\"`\n\tIssuer string `json:\"issuer,omitempty\"`\n}\n\n\/\/ CreateNamedTemporaryCredentials generates temporary credentials from permanent\n\/\/ credentials, valid for the given duration, starting immediately. The\n\/\/ temporary credentials' scopes must be a subset of the permanent credentials'\n\/\/ scopes. The duration may not be more than 31 days. Any authorized scopes of\n\/\/ the permanent credentials will be passed through as authorized scopes to the\n\/\/ temporary credentials, but will not be restricted via the certificate.\n\/\/\n\/\/ Note that the auth service already applies a 5 minute clock skew to the\n\/\/ start and expiry times in\n\/\/ https:\/\/github.com\/taskcluster\/taskcluster-auth\/pull\/117 so no clock skew is\n\/\/ applied in this method, nor should be applied by the caller.\n\/\/\n\/\/ See https:\/\/docs.taskcluster.net\/manual\/apis\/temporary-credentials\nfunc (permaCreds *Credentials) CreateNamedTemporaryCredentials(tempClientID string, duration time.Duration, scopes ...string) (tempCreds *Credentials, err error) {\n\tif duration > 31*24*time.Hour {\n\t\treturn nil, errors.New(\"Temporary credentials must expire within 31 days; however a duration of \" + duration.String() + \" was specified to (*tcclient.Client).CreateTemporaryCredentials(...) method\")\n\t}\n\n\tnow := time.Now()\n\tstart := now\n\texpiry := now.Add(duration)\n\n\tif permaCreds.ClientID == \"\" {\n\t\treturn nil, errors.New(\"Temporary credentials cannot be created from credentials that have an empty ClientId\")\n\t}\n\tif permaCreds.AccessToken == \"\" {\n\t\treturn nil, errors.New(\"Temporary credentials cannot be created from credentials that have an empty AccessToken\")\n\t}\n\tif permaCreds.Certificate != \"\" {\n\t\treturn nil, errors.New(\"Temporary credentials cannot be created from temporary credentials, only from permanent credentials\")\n\t}\n\n\tcert := &Certificate{\n\t\tVersion: 1,\n\t\tScopes: scopes,\n\t\tStart: start.UnixNano() \/ 1e6,\n\t\tExpiry: expiry.UnixNano() \/ 1e6,\n\t\tSeed: slugid.V4() + slugid.V4(),\n\t\tSignature: \"\", \/\/ gets set in Sign() method below\n\t}\n\t\/\/ include the issuer iff this is a named credential\n\tif tempClientID != \"\" {\n\t\tcert.Issuer = permaCreds.ClientID\n\t}\n\n\tcert.Sign(permaCreds.AccessToken, tempClientID)\n\n\tcertBytes, err := json.Marshal(cert)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttempAccessToken, err := generateTemporaryAccessToken(permaCreds.AccessToken, cert.Seed)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttempCreds = &Credentials{\n\t\tClientID: permaCreds.ClientID,\n\t\tAccessToken: tempAccessToken,\n\t\tCertificate: string(certBytes),\n\t\tAuthorizedScopes: permaCreds.AuthorizedScopes,\n\t}\n\tif tempClientID != \"\" {\n\t\ttempCreds.ClientID = tempClientID\n\t}\n\n\treturn\n}\n\n\/\/ CreateTemporaryCredentials is an alias for CreateNamedTemporaryCredentials\n\/\/ with an empty name.\nfunc (permaCreds *Credentials) CreateTemporaryCredentials(duration time.Duration, scopes ...string) (tempCreds *Credentials, err error) {\n\treturn permaCreds.CreateNamedTemporaryCredentials(\"\", duration, scopes...)\n}\n\nfunc (cert *Certificate) Sign(accessToken string, tempClientID string) (err error) {\n\tlines := []string{\"version:\" + strconv.Itoa(cert.Version)}\n\t\/\/ iff this is a named credential, include clientId and issuer\n\tif cert.Issuer != \"\" {\n\t\tlines = append(lines,\n\t\t\t\"clientId:\"+tempClientID,\n\t\t\t\"issuer:\"+cert.Issuer,\n\t\t)\n\t}\n\tlines = append(lines,\n\t\t\"seed:\"+cert.Seed,\n\t\t\"start:\"+strconv.FormatInt(cert.Start, 10),\n\t\t\"expiry:\"+strconv.FormatInt(cert.Expiry, 10),\n\t\t\"scopes:\",\n\t)\n\tlines = append(lines, cert.Scopes...)\n\thash := hmac.New(sha256.New, []byte(accessToken))\n\ttext := strings.Join(lines, \"\\n\")\n\t_, err = hash.Write([]byte(text))\n\tif err != nil {\n\t\treturn err\n\t}\n\tcert.Signature = base64.StdEncoding.EncodeToString(hash.Sum([]byte{}))\n\treturn\n}\n\nfunc generateTemporaryAccessToken(permAccessToken, seed string) (tempAccessToken string, err error) {\n\thash := hmac.New(sha256.New, []byte(permAccessToken))\n\t_, err = hash.Write([]byte(seed))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ttempAccessToken = strings.TrimRight(base64.URLEncoding.EncodeToString(hash.Sum([]byte{})), \"=\")\n\treturn\n}\n\n\/\/ Cert attempts to parse the certificate string to return it as an object. If\n\/\/ the certificate is an empty string (e.g. in the case of permanent\n\/\/ credentials) then a nil pointer is returned for the certificate. If a\n\/\/ certificate has been specified but cannot be parsed, an error is returned,\n\/\/ and cert is an empty certificate (rather than nil).\nfunc (creds *Credentials) Cert() (cert *Certificate, err error) {\n\tif creds.Certificate == \"\" {\n\t\treturn\n\t}\n\tcert = new(Certificate)\n\terr = json.Unmarshal([]byte(creds.Certificate), cert)\n\treturn\n}\n\n\/\/ Validate performs a sanity check of the given certificate and returns an\n\/\/ error if it is able to determine that the certificate is not malformed,\n\/\/ expired, or for any other reason invalid. Note, it does not perform any\n\/\/ network transactions against any live services, it only performs sanity\n\/\/ checks that can be executed locally. If cert is nil, an error is returned.\nfunc (cert *Certificate) Validate() error {\n\tif cert == nil {\n\t\treturn fmt.Errorf(\"nil certificate does not pass certificate validation\")\n\t}\n\tif cert.Version != 1 {\n\t\treturn fmt.Errorf(\"Certificate version not 1: %v\", cert.Version)\n\t}\n\tnow := time.Now().UnixNano() \/ 1e6\n\t\/\/ See https:\/\/github.com\/taskcluster\/taskcluster-auth\/pull\/117\/files\n\t\/\/ A five minute tolerance is allowed. This is important, if we receive\n\t\/\/ credentials from a different machine.\n\tif cert.Start > now+5*60*1000 {\n\t\treturn fmt.Errorf(\"Certificate validity starts more than five minutes in the future (now = %v; start = %v)\", now, cert.Start)\n\t}\n\tif cert.Expiry < now-5*60*1000 {\n\t\treturn fmt.Errorf(\"Certificate expired more than five minutes ago (now = %v; expiry = %v)\", now, cert.Expiry)\n\t}\n\tif durationMillis := cert.Expiry - cert.Start; durationMillis > 31*24*60*60*1000 {\n\t\treturn fmt.Errorf(\"Certificate is valid for more than 31 days (%v milliseconds)\", durationMillis)\n\t}\n\tif len(cert.Seed) != 44 {\n\t\treturn fmt.Errorf(\"Certificate seed not 44 bytes: '%v'\", cert.Seed)\n\t}\n\tif _, err := base64.StdEncoding.DecodeString(cert.Signature); err != nil {\n\t\treturn fmt.Errorf(\"Certificate signature is not valid base64 content: %v\", err)\n\t}\n\treturn nil\n}\n\n\/\/ CredentialsFromEnvVars creates and returns Taskcluster credentials\n\/\/ initialised from the values of environment variables:\n\/\/ TASKCLUSTER_CLIENT_ID\n\/\/ TASKCLUSTER_ACCESS_TOKEN\n\/\/ TASKCLUSTER_CERTIFICATE\n\/\/ No validation is performed on the loaded values, and unset environment\n\/\/ variables will result in empty string values.\nfunc CredentialsFromEnvVars() *Credentials {\n\treturn &Credentials{\n\t\tClientID: os.Getenv(\"TASKCLUSTER_CLIENT_ID\"),\n\t\tAccessToken: os.Getenv(\"TASKCLUSTER_ACCESS_TOKEN\"),\n\t\tCertificate: os.Getenv(\"TASKCLUSTER_CERTIFICATE\"),\n\t}\n}\n\n\/\/ Validate performs local lexical validation of creds to ensure the\n\/\/ credentials are syntactically valid and returns a non-nil error if they are\n\/\/ not. No authentication is performed, so a call to Validate with invalid\n\/\/ credentials that are syntactically valid will not return an error.\nfunc (creds *Credentials) Validate() error {\n\tif creds == nil {\n\t\treturn fmt.Errorf(\"Nil credentials are not valid\")\n\t}\n\t\/\/ Special case: see https:\/\/docs.taskcluster.net\/reference\/platform\/taskcluster-auth\/references\/api#testAuthenticate\n\tif creds.ClientID == \"tester\" && creds.AccessToken == \"no-secret\" {\n\t\treturn nil\n\t}\n\tif !RegExpClientID.MatchString(creds.ClientID) {\n\t\treturn fmt.Errorf(\"Client ID %v does not match regular expression %v\", creds.ClientID, RegExpAccessToken)\n\t}\n\tif !RegExpAccessToken.MatchString(creds.AccessToken) {\n\t\treturn fmt.Errorf(\"Access Token does not match regular expression %v\", RegExpAccessToken)\n\t}\n\tcert, err := creds.Cert()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Certificate for client ID %v is invalid: %v\", creds.ClientID, err)\n\t}\n\tif cert != nil {\n\t\treturn cert.Validate()\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Azure\/azure-docker-extension\/pkg\/driver\"\n\t\"github.com\/Azure\/azure-docker-extension\/pkg\/executil\"\n\t\"github.com\/Azure\/azure-docker-extension\/pkg\/util\"\n\t\"github.com\/Azure\/azure-docker-extension\/pkg\/vmextension\"\n\n\tyaml \"github.com\/cloudfoundry-incubator\/candiedyaml\"\n)\n\nconst (\n\tcomposeBin = \"docker-compose\"\n\tcomposeTimeoutSecs = 600\n\n\tcomposeYml = \"docker-compose.yml\"\n\tcomposeYmlDir = \"\/etc\/docker\/compose\"\n\tcomposeProject = \"compose\" \/\/ prefix for compose-created containers\n\n\tdockerCfgDir = \"\/etc\/docker\"\n\tdockerCaCert = \"ca.pem\"\n\tdockerSrvCert = \"cert.pem\"\n\tdockerSrvKey = \"key.pem\"\n)\n\nfunc enable(he vmextension.HandlerEnvironment, d driver.DistroDriver) error {\n\tsettings, err := parseSettings(he.HandlerEnvironment.ConfigFolder)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdockerUrl, composeUrl := getDockerUrls(settings.AzureEnv)\n\n\t\/\/ Install docker daemon\n\tlog.Printf(\"++ install docker\")\n\tif _, err := exec.LookPath(\"docker\"); err == nil {\n\t\tlog.Printf(\"docker already installed. not re-installing\")\n\t} else {\n\t\t\/\/ TODO(ahmetb) Temporary retry logic around installation for serialization\n\t\t\/\/ problem in Azure VM Scale Sets. In case of scale-up event, the new VM with\n\t\t\/\/ multiple extensions (such as Linux Diagnostics and Docker Extension) will install\n\t\t\/\/ the extensions in parallel and that will result in non-deterministic\n\t\t\/\/ acquisition of dpkg lock (apt-get install) and thus causing one of the\n\t\t\/\/ extensions to fail.\n\t\t\/\/\n\t\t\/\/ Adding this temporary retry logic just for Linux Diagnostics extension\n\t\t\/\/ assuming it will take at most 5 minutes to be done with apt-get lock.\n\t\t\/\/\n\t\t\/\/ This retry logic should be removed once the issue is fixed on the resource\n\t\t\/\/ provider layer.\n\n\t\tvar (\n\t\t\tnRetries = 6\n\t\t\tretryInterval = time.Minute * 1\n\t\t)\n\n\t\tfor nRetries > 0 {\n\t\t\tif err := d.InstallDocker(dockerUrl); err != nil {\n\t\t\t\tnRetries--\n\t\t\t\tif nRetries == 0 {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlog.Printf(\"install failed. remaining attempts=%d. error=%v\", nRetries, err)\n\t\t\t\tlog.Printf(\"sleeping %s\", retryInterval)\n\t\t\t\ttime.Sleep(retryInterval)\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tlog.Printf(\"-- install docker\")\n\n\t\/\/ Install docker-compose\n\tlog.Printf(\"++ install docker-compose\")\n\tif err := installCompose(composeBinPath(d), composeUrl); err != nil {\n\t\treturn fmt.Errorf(\"error installing docker-compose: %v\", err)\n\t}\n\tlog.Printf(\"-- install docker-compose\")\n\n\t\/\/ Add user to 'docker' group to user docker as non-root\n\tu, err := util.GetAzureUser()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get provisioned user: %v\", err)\n\t}\n\tlog.Printf(\"++ add user to docker group\")\n\tif out, err := executil.Exec(\"usermod\", \"-aG\", \"docker\", u); err != nil {\n\t\tlog.Printf(\"%s\", string(out))\n\t\treturn err\n\t}\n\tlog.Printf(\"-- add user to docker group\")\n\n\t\/\/ Install docker remote access certs\n\tlog.Printf(\"++ setup docker certs\")\n\tif err := installDockerCerts(*settings, dockerCfgDir); err != nil {\n\t\treturn fmt.Errorf(\"error installing docker certs: %v\", err)\n\t}\n\tlog.Printf(\"-- setup docker certs\")\n\n\t\/\/ Update dockeropts\n\tlog.Printf(\"++ update dockeropts\")\n\trestartNeeded, err := updateDockerOpts(d, getArgs(*settings, d))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to update dockeropts: %v\", err)\n\t}\n\tlog.Printf(\"restart needed: %v\", restartNeeded)\n\tlog.Printf(\"-- update dockeropts\")\n\n\t\/\/ Restart docker\n\tlog.Printf(\"++ restart docker\")\n\tif !restartNeeded {\n\t\tlog.Printf(\"no restart needed. issuing only a start command.\")\n\t\t_ = d.StartDocker() \/\/ ignore error as it already may be running due to multiple calls to enable\n\t} else {\n\t\tlog.Printf(\"restarting docker-engine\")\n\t\tif err := d.RestartDocker(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\ttime.Sleep(3 * time.Second) \/\/ wait for instance to come up\n\tlog.Printf(\"-- restart docker\")\n\n\t\/\/ Login Docker registry server\n\tlog.Printf(\"++ login docker registry\")\n\tif err := loginRegistry(settings.Login); err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"-- login docker registry\")\n\n\t\/\/ Compose Up\n\tlog.Printf(\"++ compose up\")\n\tif err := composeUp(d, settings.ComposeJson, settings.ComposeEnv, settings.ComposeProtectedEnv); err != nil {\n\t\treturn fmt.Errorf(\"'docker-compose up' failed: %v. Check logs at %s.\", err, filepath.Join(he.HandlerEnvironment.LogFolder, LogFilename))\n\t}\n\tlog.Printf(\"-- compose up\")\n\treturn nil\n}\n\n\/\/ installCompose download docker-compose and saves to the specified path if it\n\/\/ is not already installed.\nfunc installCompose(path string, composeUrl string) error {\n\t\/\/ Check if already installed at path.\n\tif ok, err := util.PathExists(path); err != nil {\n\t\treturn err\n\t} else if ok {\n\t\tlog.Printf(\"docker-compose is already installed at %s\", path)\n\t\treturn nil\n\t}\n\n\t\/\/ Create dir if not exists\n\tdir := filepath.Dir(path)\n\tok, err := util.PathExists(dir)\n\tif err != nil {\n\t\treturn err\n\t} else if !ok {\n\t\tif err := os.MkdirAll(dir, 755); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Printf(\"Downloading compose from %s\", composeUrl)\n\tresp, err := http.Get(composeUrl)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error downloading docker-compose: %v\", err)\n\t}\n\tif resp.StatusCode\/100 != 2 {\n\t\treturn fmt.Errorf(\"response status code from %s: %s\", composeUrl, resp.Status)\n\t}\n\tdefer resp.Body.Close()\n\n\tf, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0777)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating %s: %v\", path, err)\n\t}\n\n\tdefer f.Close()\n\tif _, err := io.Copy(f, resp.Body); err != nil {\n\t\treturn fmt.Errorf(\"failed to save response body to %s: %v\", path, err)\n\t}\n\treturn nil\n}\n\n\/\/ loginRegistry calls the `docker login` command to authenticate the engine to the\n\/\/ specified registry with given credentials.\nfunc loginRegistry(s dockerLoginSettings) error {\n\tif !s.HasLoginInfo() {\n\t\tlog.Println(\"registry login not specificied\")\n\t\treturn nil\n\t}\n\topts := []string{\n\t\t\"login\",\n\t\t\"--email=\" + s.Email,\n\t\t\"--username=\" + s.Username,\n\t\t\"--password=\" + s.Password,\n\t}\n\tif s.Server != \"\" {\n\t\topts = append(opts, s.Server)\n\t}\n\t_, err := executil.Exec(\"docker\", opts...)\n\tif err != nil {\n\t\treturn errors.New(\"'docker login' failed\")\n\t}\n\treturn nil\n}\n\n\/\/ composeBinPath returns the path docker-compose binary should be installed at\n\/\/ on the host operating system.\nfunc composeBinPath(d driver.DistroDriver) string {\n\treturn filepath.Join(d.DockerComposeDir(), composeBin)\n}\n\n\/\/ composeUp converts given json to yaml, saves to a file on the host and\n\/\/ uses `docker-compose up -d` to create the containers.\nfunc composeUp(d driver.DistroDriver, json map[string]interface{}, publicEnv, protectedEnv map[string]string) error {\n\tif len(json) == 0 {\n\t\tlog.Println(\"docker-compose config not specified, noop\")\n\t\treturn nil\n\t}\n\n\t\/\/ Convert json to yaml\n\tyaml, err := yaml.Marshal(json)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error converting to compose.yml: %v\", err)\n\t}\n\n\tif err := os.MkdirAll(composeYmlDir, 0777); err != nil {\n\t\treturn fmt.Errorf(\"failed creating %s: %v\", composeYmlDir, err)\n\t}\n\tlog.Printf(\"Using compose yaml:>>>>>\\n%s\\n<<<<<\", string(yaml))\n\tymlPath := filepath.Join(composeYmlDir, composeYml)\n\tif err := ioutil.WriteFile(ymlPath, yaml, 0666); err != nil {\n\t\treturn fmt.Errorf(\"error writing %s: %v\", ymlPath, err)\n\t}\n\n\tif publicEnv == nil {\n\t\tpublicEnv = make(map[string]string)\n\t}\n\n\t\/\/ set timeout for docker-compose -> docker-engine interactions.\n\t\/\/ When downloading large images, docker-compose intermittently times out\n\t\/\/ (gh#docker\/compose\/issues\/2186) (gh#Azure\/azure-docker-extension\/issues\/87).\n\tif _, ok := publicEnv[\"COMPOSE_HTTP_TIMEOUT\"]; !ok {\n\t\tpublicEnv[\"COMPOSE_HTTP_TIMEOUT\"] = fmt.Sprintf(\"%d\", composeTimeoutSecs)\n\t}\n\n\t\/\/ provide a consistent default project name for docker-compose. this is to prevent\n\t\/\/ inconsistencies that may occur when we change where docker-compose.yml lives.\n\tif _, ok := publicEnv[\"COMPOSE_PROJECT_NAME\"]; !ok {\n\t\tpublicEnv[\"COMPOSE_PROJECT_NAME\"] = composeProject\n\t}\n\n\t\/\/ set public environment variables to be used in docker-compose\n\tfor k, v := range publicEnv {\n\t\tlog.Printf(\"Setting docker-compose environment variable %q=%q.\", k, v)\n\t\tos.Setenv(k, v)\n\t\tdefer os.Unsetenv(k)\n\t}\n\n\t\/\/ set protected environment variables to be used in docker-compose\n\tfor k, v := range protectedEnv {\n\t\tlog.Printf(\"Setting protected docker-compose environment variable %q.\", k)\n\t\tos.Setenv(k, v)\n\t\tdefer os.Unsetenv(k)\n\t}\n\n\treturn executil.ExecPipeToFds(executil.Fds{Out: ioutil.Discard}, composeBinPath(d), \"-f\", ymlPath, \"up\", \"-d\")\n}\n\n\/\/ installDockerCerts saves the configured certs to the specified dir\n\/\/ if and only if the certs are not already placed there. If no certs\n\/\/ are provided or some certs already exist, nothing is written.\nfunc installDockerCerts(s DockerHandlerSettings, dstDir string) error {\n\tm := []struct {\n\t\tsrc string\n\t\tdst string\n\t}{\n\t\t{s.Certs.CABase64, filepath.Join(dstDir, dockerCaCert)},\n\t\t{s.Certs.ServerCertBase64, filepath.Join(dstDir, dockerSrvCert)},\n\t\t{s.Certs.ServerKeyBase64, filepath.Join(dstDir, dockerSrvKey)},\n\t}\n\n\t\/\/ Check if certs are provided\n\tfor _, v := range m {\n\t\tif len(v.src) == 0 {\n\t\t\tlog.Printf(\"Docker certificate %s is not provided in the extension settings, skipping docker certs installation\", v.dst)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ Check the target directory, if not create\n\tif ok, err := util.PathExists(dstDir); err != nil {\n\t\treturn fmt.Errorf(\"error checking cert dir: %v\", err)\n\t} else if !ok {\n\t\tif err := os.MkdirAll(dstDir, 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Write the certs\n\tfor _, v := range m {\n\t\t\/\/ Decode base64\n\t\tin := strings.TrimSpace(v.src)\n\t\tf, err := base64.StdEncoding.DecodeString(in)\n\t\tif err != nil {\n\t\t\t\/\/ Fallback to original file input\n\t\t\tf = []byte(in)\n\t\t}\n\n\t\tif err := ioutil.WriteFile(v.dst, f, 0600); err != nil {\n\t\t\treturn fmt.Errorf(\"error writing certificate: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc updateDockerOpts(dd driver.DistroDriver, args string) (bool, error) {\n\tlog.Printf(\"Updating daemon args to: %s\", args)\n\trestartNeeded, err := dd.UpdateDockerArgs(args)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"error updating DOCKER_OPTS: %v\", err)\n\t}\n\treturn restartNeeded, nil\n}\n\n\/\/ getArgs provides set of arguments that should be used in updating Docker\n\/\/ daemon options based on the distro.\nfunc getArgs(s DockerHandlerSettings, dd driver.DistroDriver) string {\n\targs := dd.BaseOpts()\n\n\tif s.Certs.HasDockerCerts() {\n\t\ttls := []string{\"--tlsverify\",\n\t\t\tfmt.Sprintf(\"--tlscacert=%s\", filepath.Join(dockerCfgDir, dockerCaCert)),\n\t\t\tfmt.Sprintf(\"--tlscert=%s\", filepath.Join(dockerCfgDir, dockerSrvCert)),\n\t\t\tfmt.Sprintf(\"--tlskey=%s\", filepath.Join(dockerCfgDir, dockerSrvKey)),\n\t\t}\n\t\targs = append(args, tls...)\n\t}\n\n\tif s.Docker.Port != \"\" {\n\t\targs = append(args, fmt.Sprintf(\"-H=0.0.0.0:%s\", s.Docker.Port))\n\t}\n\n\tif len(s.Docker.Options) > 0 {\n\t\targs = append(args, s.Docker.Options...)\n\t}\n\n\treturn strings.Join(args, \" \")\n}\n\n\nfunc getDockerUrls(env string) (string, string) {\n\turlMap := map[string]map[string]string{\n\t\t\"china\": map[string]string{\n\t\t\t\/\/ This script is the same as https:\/\/get.docker.com, except\n\t\t\t\/\/ 1. apt_url and yum_url points to the mirror in China\n\t\t\t\/\/ 2. fix an issue in detecting distro versions\n\t\t\t\"docker\" : \"http:\/\/mirror.azure.cn\/repo\/install-docker-engine.sh\", \n\t\t\t\"compose\": \"http:\/\/mirror.azure.cn\/docker-toolbox\/linux\/compose\/1.6.2\/docker-compose-Linux-x86_64\",\n\t\t},\n\t\t\"global\": map[string]string{\n\t\t\t\"docker\" : \"https:\/\/get.docker.com\/\",\n\t\t\t\"compose\": \"https:\/\/github.com\/docker\/compose\/releases\/download\/1.6.2\/docker-compose-Linux-x86_64\",\n\t\t},\n\t}\n\tif value, ok := urlMap[strings.ToLower(env)]; ok {\n\t\treturn value[\"docker\"], value[\"compose\"]\n\t}\n\treturn urlMap[\"global\"][\"docker\"], urlMap[\"global\"][\"docker\"]\n}\n<commit_msg>Update comments<commit_after>package main\n\nimport (\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Azure\/azure-docker-extension\/pkg\/driver\"\n\t\"github.com\/Azure\/azure-docker-extension\/pkg\/executil\"\n\t\"github.com\/Azure\/azure-docker-extension\/pkg\/util\"\n\t\"github.com\/Azure\/azure-docker-extension\/pkg\/vmextension\"\n\n\tyaml \"github.com\/cloudfoundry-incubator\/candiedyaml\"\n)\n\nconst (\n\tcomposeBin = \"docker-compose\"\n\tcomposeTimeoutSecs = 600\n\n\tcomposeYml = \"docker-compose.yml\"\n\tcomposeYmlDir = \"\/etc\/docker\/compose\"\n\tcomposeProject = \"compose\" \/\/ prefix for compose-created containers\n\n\tdockerCfgDir = \"\/etc\/docker\"\n\tdockerCaCert = \"ca.pem\"\n\tdockerSrvCert = \"cert.pem\"\n\tdockerSrvKey = \"key.pem\"\n)\n\nfunc enable(he vmextension.HandlerEnvironment, d driver.DistroDriver) error {\n\tsettings, err := parseSettings(he.HandlerEnvironment.ConfigFolder)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdockerUrl, composeUrl := getDockerUrls(settings.AzureEnv)\n\n\t\/\/ Install docker daemon\n\tlog.Printf(\"++ install docker\")\n\tif _, err := exec.LookPath(\"docker\"); err == nil {\n\t\tlog.Printf(\"docker already installed. not re-installing\")\n\t} else {\n\t\t\/\/ TODO(ahmetb) Temporary retry logic around installation for serialization\n\t\t\/\/ problem in Azure VM Scale Sets. In case of scale-up event, the new VM with\n\t\t\/\/ multiple extensions (such as Linux Diagnostics and Docker Extension) will install\n\t\t\/\/ the extensions in parallel and that will result in non-deterministic\n\t\t\/\/ acquisition of dpkg lock (apt-get install) and thus causing one of the\n\t\t\/\/ extensions to fail.\n\t\t\/\/\n\t\t\/\/ Adding this temporary retry logic just for Linux Diagnostics extension\n\t\t\/\/ assuming it will take at most 5 minutes to be done with apt-get lock.\n\t\t\/\/\n\t\t\/\/ This retry logic should be removed once the issue is fixed on the resource\n\t\t\/\/ provider layer.\n\n\t\tvar (\n\t\t\tnRetries = 6\n\t\t\tretryInterval = time.Minute * 1\n\t\t)\n\n\t\tfor nRetries > 0 {\n\t\t\tif err := d.InstallDocker(dockerUrl); err != nil {\n\t\t\t\tnRetries--\n\t\t\t\tif nRetries == 0 {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlog.Printf(\"install failed. remaining attempts=%d. error=%v\", nRetries, err)\n\t\t\t\tlog.Printf(\"sleeping %s\", retryInterval)\n\t\t\t\ttime.Sleep(retryInterval)\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tlog.Printf(\"-- install docker\")\n\n\t\/\/ Install docker-compose\n\tlog.Printf(\"++ install docker-compose\")\n\tif err := installCompose(composeBinPath(d), composeUrl); err != nil {\n\t\treturn fmt.Errorf(\"error installing docker-compose: %v\", err)\n\t}\n\tlog.Printf(\"-- install docker-compose\")\n\n\t\/\/ Add user to 'docker' group to user docker as non-root\n\tu, err := util.GetAzureUser()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get provisioned user: %v\", err)\n\t}\n\tlog.Printf(\"++ add user to docker group\")\n\tif out, err := executil.Exec(\"usermod\", \"-aG\", \"docker\", u); err != nil {\n\t\tlog.Printf(\"%s\", string(out))\n\t\treturn err\n\t}\n\tlog.Printf(\"-- add user to docker group\")\n\n\t\/\/ Install docker remote access certs\n\tlog.Printf(\"++ setup docker certs\")\n\tif err := installDockerCerts(*settings, dockerCfgDir); err != nil {\n\t\treturn fmt.Errorf(\"error installing docker certs: %v\", err)\n\t}\n\tlog.Printf(\"-- setup docker certs\")\n\n\t\/\/ Update dockeropts\n\tlog.Printf(\"++ update dockeropts\")\n\trestartNeeded, err := updateDockerOpts(d, getArgs(*settings, d))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to update dockeropts: %v\", err)\n\t}\n\tlog.Printf(\"restart needed: %v\", restartNeeded)\n\tlog.Printf(\"-- update dockeropts\")\n\n\t\/\/ Restart docker\n\tlog.Printf(\"++ restart docker\")\n\tif !restartNeeded {\n\t\tlog.Printf(\"no restart needed. issuing only a start command.\")\n\t\t_ = d.StartDocker() \/\/ ignore error as it already may be running due to multiple calls to enable\n\t} else {\n\t\tlog.Printf(\"restarting docker-engine\")\n\t\tif err := d.RestartDocker(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\ttime.Sleep(3 * time.Second) \/\/ wait for instance to come up\n\tlog.Printf(\"-- restart docker\")\n\n\t\/\/ Login Docker registry server\n\tlog.Printf(\"++ login docker registry\")\n\tif err := loginRegistry(settings.Login); err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"-- login docker registry\")\n\n\t\/\/ Compose Up\n\tlog.Printf(\"++ compose up\")\n\tif err := composeUp(d, settings.ComposeJson, settings.ComposeEnv, settings.ComposeProtectedEnv); err != nil {\n\t\treturn fmt.Errorf(\"'docker-compose up' failed: %v. Check logs at %s.\", err, filepath.Join(he.HandlerEnvironment.LogFolder, LogFilename))\n\t}\n\tlog.Printf(\"-- compose up\")\n\treturn nil\n}\n\n\/\/ installCompose download docker-compose and saves to the specified path if it\n\/\/ is not already installed.\nfunc installCompose(path string, composeUrl string) error {\n\t\/\/ Check if already installed at path.\n\tif ok, err := util.PathExists(path); err != nil {\n\t\treturn err\n\t} else if ok {\n\t\tlog.Printf(\"docker-compose is already installed at %s\", path)\n\t\treturn nil\n\t}\n\n\t\/\/ Create dir if not exists\n\tdir := filepath.Dir(path)\n\tok, err := util.PathExists(dir)\n\tif err != nil {\n\t\treturn err\n\t} else if !ok {\n\t\tif err := os.MkdirAll(dir, 755); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Printf(\"Downloading compose from %s\", composeUrl)\n\tresp, err := http.Get(composeUrl)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error downloading docker-compose: %v\", err)\n\t}\n\tif resp.StatusCode\/100 != 2 {\n\t\treturn fmt.Errorf(\"response status code from %s: %s\", composeUrl, resp.Status)\n\t}\n\tdefer resp.Body.Close()\n\n\tf, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0777)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating %s: %v\", path, err)\n\t}\n\n\tdefer f.Close()\n\tif _, err := io.Copy(f, resp.Body); err != nil {\n\t\treturn fmt.Errorf(\"failed to save response body to %s: %v\", path, err)\n\t}\n\treturn nil\n}\n\n\/\/ loginRegistry calls the `docker login` command to authenticate the engine to the\n\/\/ specified registry with given credentials.\nfunc loginRegistry(s dockerLoginSettings) error {\n\tif !s.HasLoginInfo() {\n\t\tlog.Println(\"registry login not specificied\")\n\t\treturn nil\n\t}\n\topts := []string{\n\t\t\"login\",\n\t\t\"--email=\" + s.Email,\n\t\t\"--username=\" + s.Username,\n\t\t\"--password=\" + s.Password,\n\t}\n\tif s.Server != \"\" {\n\t\topts = append(opts, s.Server)\n\t}\n\t_, err := executil.Exec(\"docker\", opts...)\n\tif err != nil {\n\t\treturn errors.New(\"'docker login' failed\")\n\t}\n\treturn nil\n}\n\n\/\/ composeBinPath returns the path docker-compose binary should be installed at\n\/\/ on the host operating system.\nfunc composeBinPath(d driver.DistroDriver) string {\n\treturn filepath.Join(d.DockerComposeDir(), composeBin)\n}\n\n\/\/ composeUp converts given json to yaml, saves to a file on the host and\n\/\/ uses `docker-compose up -d` to create the containers.\nfunc composeUp(d driver.DistroDriver, json map[string]interface{}, publicEnv, protectedEnv map[string]string) error {\n\tif len(json) == 0 {\n\t\tlog.Println(\"docker-compose config not specified, noop\")\n\t\treturn nil\n\t}\n\n\t\/\/ Convert json to yaml\n\tyaml, err := yaml.Marshal(json)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error converting to compose.yml: %v\", err)\n\t}\n\n\tif err := os.MkdirAll(composeYmlDir, 0777); err != nil {\n\t\treturn fmt.Errorf(\"failed creating %s: %v\", composeYmlDir, err)\n\t}\n\tlog.Printf(\"Using compose yaml:>>>>>\\n%s\\n<<<<<\", string(yaml))\n\tymlPath := filepath.Join(composeYmlDir, composeYml)\n\tif err := ioutil.WriteFile(ymlPath, yaml, 0666); err != nil {\n\t\treturn fmt.Errorf(\"error writing %s: %v\", ymlPath, err)\n\t}\n\n\tif publicEnv == nil {\n\t\tpublicEnv = make(map[string]string)\n\t}\n\n\t\/\/ set timeout for docker-compose -> docker-engine interactions.\n\t\/\/ When downloading large images, docker-compose intermittently times out\n\t\/\/ (gh#docker\/compose\/issues\/2186) (gh#Azure\/azure-docker-extension\/issues\/87).\n\tif _, ok := publicEnv[\"COMPOSE_HTTP_TIMEOUT\"]; !ok {\n\t\tpublicEnv[\"COMPOSE_HTTP_TIMEOUT\"] = fmt.Sprintf(\"%d\", composeTimeoutSecs)\n\t}\n\n\t\/\/ provide a consistent default project name for docker-compose. this is to prevent\n\t\/\/ inconsistencies that may occur when we change where docker-compose.yml lives.\n\tif _, ok := publicEnv[\"COMPOSE_PROJECT_NAME\"]; !ok {\n\t\tpublicEnv[\"COMPOSE_PROJECT_NAME\"] = composeProject\n\t}\n\n\t\/\/ set public environment variables to be used in docker-compose\n\tfor k, v := range publicEnv {\n\t\tlog.Printf(\"Setting docker-compose environment variable %q=%q.\", k, v)\n\t\tos.Setenv(k, v)\n\t\tdefer os.Unsetenv(k)\n\t}\n\n\t\/\/ set protected environment variables to be used in docker-compose\n\tfor k, v := range protectedEnv {\n\t\tlog.Printf(\"Setting protected docker-compose environment variable %q.\", k)\n\t\tos.Setenv(k, v)\n\t\tdefer os.Unsetenv(k)\n\t}\n\n\treturn executil.ExecPipeToFds(executil.Fds{Out: ioutil.Discard}, composeBinPath(d), \"-f\", ymlPath, \"up\", \"-d\")\n}\n\n\/\/ installDockerCerts saves the configured certs to the specified dir\n\/\/ if and only if the certs are not already placed there. If no certs\n\/\/ are provided or some certs already exist, nothing is written.\nfunc installDockerCerts(s DockerHandlerSettings, dstDir string) error {\n\tm := []struct {\n\t\tsrc string\n\t\tdst string\n\t}{\n\t\t{s.Certs.CABase64, filepath.Join(dstDir, dockerCaCert)},\n\t\t{s.Certs.ServerCertBase64, filepath.Join(dstDir, dockerSrvCert)},\n\t\t{s.Certs.ServerKeyBase64, filepath.Join(dstDir, dockerSrvKey)},\n\t}\n\n\t\/\/ Check if certs are provided\n\tfor _, v := range m {\n\t\tif len(v.src) == 0 {\n\t\t\tlog.Printf(\"Docker certificate %s is not provided in the extension settings, skipping docker certs installation\", v.dst)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ Check the target directory, if not create\n\tif ok, err := util.PathExists(dstDir); err != nil {\n\t\treturn fmt.Errorf(\"error checking cert dir: %v\", err)\n\t} else if !ok {\n\t\tif err := os.MkdirAll(dstDir, 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Write the certs\n\tfor _, v := range m {\n\t\t\/\/ Decode base64\n\t\tin := strings.TrimSpace(v.src)\n\t\tf, err := base64.StdEncoding.DecodeString(in)\n\t\tif err != nil {\n\t\t\t\/\/ Fallback to original file input\n\t\t\tf = []byte(in)\n\t\t}\n\n\t\tif err := ioutil.WriteFile(v.dst, f, 0600); err != nil {\n\t\t\treturn fmt.Errorf(\"error writing certificate: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc updateDockerOpts(dd driver.DistroDriver, args string) (bool, error) {\n\tlog.Printf(\"Updating daemon args to: %s\", args)\n\trestartNeeded, err := dd.UpdateDockerArgs(args)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"error updating DOCKER_OPTS: %v\", err)\n\t}\n\treturn restartNeeded, nil\n}\n\n\/\/ getArgs provides set of arguments that should be used in updating Docker\n\/\/ daemon options based on the distro.\nfunc getArgs(s DockerHandlerSettings, dd driver.DistroDriver) string {\n\targs := dd.BaseOpts()\n\n\tif s.Certs.HasDockerCerts() {\n\t\ttls := []string{\"--tlsverify\",\n\t\t\tfmt.Sprintf(\"--tlscacert=%s\", filepath.Join(dockerCfgDir, dockerCaCert)),\n\t\t\tfmt.Sprintf(\"--tlscert=%s\", filepath.Join(dockerCfgDir, dockerSrvCert)),\n\t\t\tfmt.Sprintf(\"--tlskey=%s\", filepath.Join(dockerCfgDir, dockerSrvKey)),\n\t\t}\n\t\targs = append(args, tls...)\n\t}\n\n\tif s.Docker.Port != \"\" {\n\t\targs = append(args, fmt.Sprintf(\"-H=0.0.0.0:%s\", s.Docker.Port))\n\t}\n\n\tif len(s.Docker.Options) > 0 {\n\t\targs = append(args, s.Docker.Options...)\n\t}\n\n\treturn strings.Join(args, \" \")\n}\n\n\nfunc getDockerUrls(env string) (string, string) {\n\turlMap := map[string]map[string]string{\n\t\t\"china\": map[string]string{\n\t\t\t\/\/ This script is synced with https:\/\/get.docker.com daily, with the change that apt_url and yum_url point to the mirror in China\n\t\t\t\"docker\" : \"http:\/\/mirror.azure.cn\/repo\/install-docker-engine.sh\",\n\t\t\t\"compose\": \"http:\/\/mirror.azure.cn\/docker-toolbox\/linux\/compose\/1.6.2\/docker-compose-Linux-x86_64\",\n\t\t},\n\t\t\"global\": map[string]string{\n\t\t\t\"docker\" : \"https:\/\/get.docker.com\/\",\n\t\t\t\"compose\": \"https:\/\/github.com\/docker\/compose\/releases\/download\/1.6.2\/docker-compose-Linux-x86_64\",\n\t\t},\n\t}\n\tif value, ok := urlMap[strings.ToLower(env)]; ok {\n\t\treturn value[\"docker\"], value[\"compose\"]\n\t}\n\treturn urlMap[\"global\"][\"docker\"], urlMap[\"global\"][\"docker\"]\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/go:build linux && cgo\n\/\/ +build linux,cgo\n\npackage drivers\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"unsafe\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\/\/ Used by cgo\n\t_ \"github.com\/lxc\/lxd\/lxd\/include\"\n)\n\n\/*\n#ifndef _GNU_SOURCE\n#define _GNU_SOURCE 1\n#endif\n#define _FILE_OFFSET_BITS 64\n#include <dirent.h>\n#include <errno.h>\n#include <fcntl.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n#include <linux\/loop.h>\n#include <sys\/ioctl.h>\n#include <sys\/mount.h>\n#include <sys\/stat.h>\n#include <sys\/types.h>\n\n#include \"..\/..\/include\/macro.h\"\n#include \"..\/..\/include\/memory_utils.h\"\n\n#define LXD_MAXPATH 4096\n#define LXD_NUMSTRLEN64 21\n#define LXD_MAX_LOOP_PATHLEN (2 * sizeof(\"loop\/\")) + LXD_NUMSTRLEN64 + sizeof(\"backing_file\") + 1\n\n\/\/ If a loop file is already associated with a loop device, find it.\n\/\/ This looks at \"\/sys\/block\" to avoid having to parse all of \"\/dev\". Also, this\n\/\/ allows to retrieve the full name of the backing file even if\n\/\/ strlen(backing file) > LO_NAME_SIZE.\nstatic int find_associated_loop_device(const char *loop_file,\n\t\t\t\t char *loop_dev_name)\n{\n\t__do_closedir DIR *dir = NULL;\n\tchar looppath[LXD_MAX_LOOP_PATHLEN];\n\tchar buf[LXD_MAXPATH];\n\tstruct dirent *dp;\n\n\tdir = opendir(\"\/sys\/block\");\n\tif (!dir)\n\t\treturn -1;\n\n\twhile ((dp = readdir(dir))) {\n\t\t__do_close int loop_path_fd = -EBADF;\n\t\tint ret;\n\t\tsize_t totlen;\n\t\tstruct stat fstatbuf;\n\t\tint dfd = -1;\n\n\t\tif (!dp)\n\t\t\tbreak;\n\n\t\tif (strncmp(dp->d_name, \"loop\", 4))\n\t\t\tcontinue;\n\n\t\tdfd = dirfd(dir);\n\t\tif (dfd < 0)\n\t\t\tcontinue;\n\n\t\tret = snprintf(looppath, sizeof(looppath), \"%s\/loop\/backing_file\", dp->d_name);\n\t\tif (ret < 0 || (size_t)ret >= sizeof(looppath))\n\t\t\tcontinue;\n\n\t\tret = fstatat(dfd, looppath, &fstatbuf, 0);\n\t\tif (ret < 0)\n\t\t\tcontinue;\n\n\t\tloop_path_fd = openat(dfd, looppath, O_RDONLY | O_CLOEXEC, 0);\n\t\tif (ret < 0)\n\t\t\tcontinue;\n\n\t\t\/\/ Clear buffer.\n\t\tmemset(buf, 0, sizeof(buf));\n\t\tret = read(loop_path_fd, buf, sizeof(buf));\n\t\tif (ret < 0)\n\t\t\tcontinue;\n\n\t\ttotlen = strlen(buf);\n\n\t\t\/\/ Trim newlines.\n\t\twhile ((totlen > 0) && (buf[totlen - 1] == '\\n'))\n\t\t\tbuf[--totlen] = '\\0';\n\n\t\tif (strcmp(buf, loop_file))\n\t\t\tcontinue;\n\n\t\t\/\/ Create path to loop device.\n\t\tret = snprintf(loop_dev_name, LO_NAME_SIZE, \"\/dev\/%s\",\n\t\t\t dp->d_name);\n\t\tif (ret < 0 || ret >= LO_NAME_SIZE)\n\t\t\tcontinue;\n\n\t\t\/\/ Open fd to loop device.\n\t\treturn open(loop_dev_name, O_RDWR);\n\t}\n\n\treturn -1;\n}\n\nstatic int get_unused_loop_dev_legacy(char *loop_name)\n{\n\t__do_closedir DIR *dir = NULL;\n\tstruct dirent *dp;\n\tstruct loop_info64 lo64;\n\n\tdir = opendir(\"\/dev\");\n\tif (!dir)\n\t\treturn -1;\n\n\twhile ((dp = readdir(dir))) {\n\t\t__do_close int dfd = -EBADF, fd = -EBADF;\n\t\tint ret;\n\n\t\tif (!dp)\n\t\t\tbreak;\n\n\t\tif (strncmp(dp->d_name, \"loop\", 4) != 0)\n\t\t\tcontinue;\n\n\t\tdfd = dirfd(dir);\n\t\tif (dfd < 0)\n\t\t\tcontinue;\n\n\t\tfd = openat(dfd, dp->d_name, O_RDWR);\n\t\tif (fd < 0)\n\t\t\tcontinue;\n\n\t\tret = ioctl(fd, LOOP_GET_STATUS64, &lo64);\n\t\tif (ret < 0)\n\t\t\tif (ioctl(fd, LOOP_GET_STATUS64, &lo64) == 0 || errno != ENXIO)\n\t\t\t\tcontinue;\n\n\t\tret = snprintf(loop_name, LO_NAME_SIZE, \"\/dev\/%s\", dp->d_name);\n\t\tif (ret < 0 || ret >= LO_NAME_SIZE)\n\t\t\tcontinue;\n\n\t\treturn move_fd(fd);\n\t}\n\n\treturn -1;\n}\n\nstatic int get_unused_loop_dev(char *name_loop)\n{\n\t__do_close int fd_ctl = -EBADF;\n\tint loop_nr, ret;\n\n\tfd_ctl = open(\"\/dev\/loop-control\", O_RDWR | O_CLOEXEC);\n\tif (fd_ctl < 0)\n\t\treturn -ENODEV;\n\n\tloop_nr = ioctl(fd_ctl, LOOP_CTL_GET_FREE);\n\tif (loop_nr < 0)\n\t\treturn -1;\n\n\tret = snprintf(name_loop, LO_NAME_SIZE, \"\/dev\/loop%d\", loop_nr);\n\tif (ret < 0 || ret >= LO_NAME_SIZE)\n\t\treturn -1;\n\n\treturn open(name_loop, O_RDWR | O_CLOEXEC);\n}\n\nstatic int prepare_loop_dev(const char *source, char *loop_dev, int flags)\n{\n\t__do_close int fd_img = -EBADF, fd_loop = -EBADF;\n\tint ret;\n\tstruct loop_info64 lo64;\n\n\tfd_loop = get_unused_loop_dev(loop_dev);\n\tif (fd_loop < 0) {\n\t\tif (fd_loop == -ENODEV)\n\t\t\tfd_loop = get_unused_loop_dev_legacy(loop_dev);\n\t\telse\n\t\t\treturn -1;\n\t}\n\n\tfd_img = open(source, O_RDWR | O_CLOEXEC);\n\tif (fd_img < 0)\n\t\treturn -1;\n\n\tret = ioctl(fd_loop, LOOP_SET_FD, fd_img);\n\tif (ret < 0)\n\t\treturn -1;\n\n\tmemset(&lo64, 0, sizeof(lo64));\n\tlo64.lo_flags = flags;\n\n\tret = ioctl(fd_loop, LOOP_SET_STATUS64, &lo64);\n\tif (ret < 0)\n\t\treturn -1;\n\n\treturn move_fd(fd_loop);\n}\n\nstatic inline int prepare_loop_dev_retry(const char *source, char *loop_dev, int flags)\n{\n\tint ret;\n\tunsigned int idx = 0;\n\n\tdo {\n\t\tret = prepare_loop_dev(source, loop_dev, flags);\n\t\tidx++;\n\t} while (ret < 0 && errno == EBUSY && idx < 30);\n\n\treturn ret;\n}\n\n\/\/ Note that this does not guarantee to clear the loop device in time so that\n\/\/ find_associated_loop_device() will not report that there still is a\n\/\/ configured device (udev and so on...). So don't call\n\/\/ find_associated_loop_device() after having called\n\/\/ set_autoclear_loop_device().\nint set_autoclear_loop_device(int fd_loop)\n{\n\tstruct loop_info64 lo64;\n\n\tmemset(&lo64, 0, sizeof(lo64));\n\tlo64.lo_flags = LO_FLAGS_AUTOCLEAR;\n\terrno = 0;\n\treturn ioctl(fd_loop, LOOP_SET_STATUS64, &lo64);\n}\n\n\/\/ Directly release the loop device\nint free_loop_device(int fd_loop)\n{\n\treturn ioctl(fd_loop, LOOP_CLR_FD);\n}\n\n\/\/ Unset the LO_FLAGS_AUTOCLEAR flag on the given loop device file descriptor.\nint unset_autoclear_loop_device(int fd_loop)\n{\n\tint ret;\n\tstruct loop_info64 lo64;\n\n\terrno = 0;\n\tret = ioctl(fd_loop, LOOP_GET_STATUS64, &lo64);\n\tif (ret < 0)\n\t\treturn -1;\n\n\tif ((lo64.lo_flags & LO_FLAGS_AUTOCLEAR) == 0)\n\t\treturn 0;\n\n\tlo64.lo_flags &= ~LO_FLAGS_AUTOCLEAR;\n\terrno = 0;\n\treturn ioctl(fd_loop, LOOP_SET_STATUS64, &lo64);\n}\n*\/\nimport \"C\"\n\n\/\/ LoFlagsAutoclear determines whether the loop device will autodestruct on last\n\/\/ close.\nconst LoFlagsAutoclear int = C.LO_FLAGS_AUTOCLEAR\n\n\/\/ PrepareLoopDev detects and sets up a loop device for source. It returns an\n\/\/ open file descriptor to the free loop device and the path of the free loop\n\/\/ device. It's the callers responsibility to close the open file descriptor.\nfunc PrepareLoopDev(source string, flags int) (*os.File, error) {\n\tcLoopDev := C.malloc(C.size_t(C.LO_NAME_SIZE))\n\tif cLoopDev == nil {\n\t\treturn nil, fmt.Errorf(\"Failed to allocate memory in C\")\n\t}\n\tdefer C.free(cLoopDev)\n\n\tcSource := C.CString(source)\n\tdefer C.free(unsafe.Pointer(cSource))\n\tloopFd, _ := C.find_associated_loop_device(cSource, (*C.char)(cLoopDev))\n\tif loopFd >= 0 {\n\t\treturn os.NewFile(uintptr(loopFd), C.GoString((*C.char)(cLoopDev))), nil\n\t}\n\n\tloopFd, err := C.prepare_loop_dev_retry(cSource, (*C.char)(cLoopDev), C.int(flags))\n\tif loopFd < 0 {\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"Failed to prepare loop device for %q\", source)\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Failed to prepare loop device for %q\", source)\n\t}\n\n\treturn os.NewFile(uintptr(loopFd), C.GoString((*C.char)(cLoopDev))), nil\n}\n\n\/\/ releaseLoopDev releases the loop dev assigned to the provided file.\nfunc releaseLoopDev(source string) error {\n\tcLoopDev := C.malloc(C.size_t(C.LO_NAME_SIZE))\n\tif cLoopDev == nil {\n\t\treturn fmt.Errorf(\"Failed to allocate memory in C\")\n\t}\n\tdefer C.free(cLoopDev)\n\n\tcSource := C.CString(source)\n\tloopFd, err := C.find_associated_loop_device(cSource, (*C.char)(cLoopDev))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Prepare a Go file and defer close on the loop device.\n\tfd := os.NewFile(uintptr(loopFd), C.GoString((*C.char)(cLoopDev)))\n\tdefer fd.Close()\n\n\tif loopFd >= 0 {\n\t\t_, err := C.free_loop_device(C.int(loopFd))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ SetAutoclearOnLoopDev enables autodestruction of the provided loopback device.\nfunc SetAutoclearOnLoopDev(loopFd int) error {\n\tret, err := C.set_autoclear_loop_device(C.int(loopFd))\n\tif ret < 0 {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn fmt.Errorf(\"Failed to set LO_FLAGS_AUTOCLEAR\")\n\t}\n\n\treturn nil\n}\n\n\/\/ UnsetAutoclearOnLoopDev disables autodestruction of the provided loopback device.\nfunc UnsetAutoclearOnLoopDev(loopFd int) error {\n\tret, err := C.unset_autoclear_loop_device(C.int(loopFd))\n\tif ret < 0 {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn fmt.Errorf(\"Failed to unset LO_FLAGS_AUTOCLEAR\")\n\t}\n\n\treturn nil\n}\n<commit_msg>lxd\/storage: Use O_CLOEXEC<commit_after>\/\/go:build linux && cgo\n\/\/ +build linux,cgo\n\npackage drivers\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"unsafe\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\/\/ Used by cgo\n\t_ \"github.com\/lxc\/lxd\/lxd\/include\"\n)\n\n\/*\n#ifndef _GNU_SOURCE\n#define _GNU_SOURCE 1\n#endif\n#define _FILE_OFFSET_BITS 64\n#include <dirent.h>\n#include <errno.h>\n#include <fcntl.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n#include <linux\/loop.h>\n#include <sys\/ioctl.h>\n#include <sys\/mount.h>\n#include <sys\/stat.h>\n#include <sys\/types.h>\n\n#include \"..\/..\/include\/macro.h\"\n#include \"..\/..\/include\/memory_utils.h\"\n\n#define LXD_MAXPATH 4096\n#define LXD_NUMSTRLEN64 21\n#define LXD_MAX_LOOP_PATHLEN (2 * sizeof(\"loop\/\")) + LXD_NUMSTRLEN64 + sizeof(\"backing_file\") + 1\n\n\/\/ If a loop file is already associated with a loop device, find it.\n\/\/ This looks at \"\/sys\/block\" to avoid having to parse all of \"\/dev\". Also, this\n\/\/ allows to retrieve the full name of the backing file even if\n\/\/ strlen(backing file) > LO_NAME_SIZE.\nstatic int find_associated_loop_device(const char *loop_file,\n\t\t\t\t char *loop_dev_name)\n{\n\t__do_closedir DIR *dir = NULL;\n\tchar looppath[LXD_MAX_LOOP_PATHLEN];\n\tchar buf[LXD_MAXPATH];\n\tstruct dirent *dp;\n\n\tdir = opendir(\"\/sys\/block\");\n\tif (!dir)\n\t\treturn -1;\n\n\twhile ((dp = readdir(dir))) {\n\t\t__do_close int loop_path_fd = -EBADF;\n\t\tint ret;\n\t\tsize_t totlen;\n\t\tstruct stat fstatbuf;\n\t\tint dfd = -1;\n\n\t\tif (!dp)\n\t\t\tbreak;\n\n\t\tif (strncmp(dp->d_name, \"loop\", 4))\n\t\t\tcontinue;\n\n\t\tdfd = dirfd(dir);\n\t\tif (dfd < 0)\n\t\t\tcontinue;\n\n\t\tret = snprintf(looppath, sizeof(looppath), \"%s\/loop\/backing_file\", dp->d_name);\n\t\tif (ret < 0 || (size_t)ret >= sizeof(looppath))\n\t\t\tcontinue;\n\n\t\tret = fstatat(dfd, looppath, &fstatbuf, 0);\n\t\tif (ret < 0)\n\t\t\tcontinue;\n\n\t\tloop_path_fd = openat(dfd, looppath, O_RDONLY | O_CLOEXEC, 0);\n\t\tif (ret < 0)\n\t\t\tcontinue;\n\n\t\t\/\/ Clear buffer.\n\t\tmemset(buf, 0, sizeof(buf));\n\t\tret = read(loop_path_fd, buf, sizeof(buf));\n\t\tif (ret < 0)\n\t\t\tcontinue;\n\n\t\ttotlen = strlen(buf);\n\n\t\t\/\/ Trim newlines.\n\t\twhile ((totlen > 0) && (buf[totlen - 1] == '\\n'))\n\t\t\tbuf[--totlen] = '\\0';\n\n\t\tif (strcmp(buf, loop_file))\n\t\t\tcontinue;\n\n\t\t\/\/ Create path to loop device.\n\t\tret = snprintf(loop_dev_name, LO_NAME_SIZE, \"\/dev\/%s\",\n\t\t\t dp->d_name);\n\t\tif (ret < 0 || ret >= LO_NAME_SIZE)\n\t\t\tcontinue;\n\n\t\t\/\/ Open fd to loop device.\n\t\treturn open(loop_dev_name, O_RDWR | O_CLOEXEC);\n\t}\n\n\treturn -1;\n}\n\nstatic int get_unused_loop_dev_legacy(char *loop_name)\n{\n\t__do_closedir DIR *dir = NULL;\n\tstruct dirent *dp;\n\tstruct loop_info64 lo64;\n\n\tdir = opendir(\"\/dev\");\n\tif (!dir)\n\t\treturn -1;\n\n\twhile ((dp = readdir(dir))) {\n\t\t__do_close int dfd = -EBADF, fd = -EBADF;\n\t\tint ret;\n\n\t\tif (!dp)\n\t\t\tbreak;\n\n\t\tif (strncmp(dp->d_name, \"loop\", 4) != 0)\n\t\t\tcontinue;\n\n\t\tdfd = dirfd(dir);\n\t\tif (dfd < 0)\n\t\t\tcontinue;\n\n\t\tfd = openat(dfd, dp->d_name, O_RDWR);\n\t\tif (fd < 0)\n\t\t\tcontinue;\n\n\t\tret = ioctl(fd, LOOP_GET_STATUS64, &lo64);\n\t\tif (ret < 0)\n\t\t\tif (ioctl(fd, LOOP_GET_STATUS64, &lo64) == 0 || errno != ENXIO)\n\t\t\t\tcontinue;\n\n\t\tret = snprintf(loop_name, LO_NAME_SIZE, \"\/dev\/%s\", dp->d_name);\n\t\tif (ret < 0 || ret >= LO_NAME_SIZE)\n\t\t\tcontinue;\n\n\t\treturn move_fd(fd);\n\t}\n\n\treturn -1;\n}\n\nstatic int get_unused_loop_dev(char *name_loop)\n{\n\t__do_close int fd_ctl = -EBADF;\n\tint loop_nr, ret;\n\n\tfd_ctl = open(\"\/dev\/loop-control\", O_RDWR | O_CLOEXEC);\n\tif (fd_ctl < 0)\n\t\treturn -ENODEV;\n\n\tloop_nr = ioctl(fd_ctl, LOOP_CTL_GET_FREE);\n\tif (loop_nr < 0)\n\t\treturn -1;\n\n\tret = snprintf(name_loop, LO_NAME_SIZE, \"\/dev\/loop%d\", loop_nr);\n\tif (ret < 0 || ret >= LO_NAME_SIZE)\n\t\treturn -1;\n\n\treturn open(name_loop, O_RDWR | O_CLOEXEC);\n}\n\nstatic int prepare_loop_dev(const char *source, char *loop_dev, int flags)\n{\n\t__do_close int fd_img = -EBADF, fd_loop = -EBADF;\n\tint ret;\n\tstruct loop_info64 lo64;\n\n\tfd_loop = get_unused_loop_dev(loop_dev);\n\tif (fd_loop < 0) {\n\t\tif (fd_loop == -ENODEV)\n\t\t\tfd_loop = get_unused_loop_dev_legacy(loop_dev);\n\t\telse\n\t\t\treturn -1;\n\t}\n\n\tfd_img = open(source, O_RDWR | O_CLOEXEC);\n\tif (fd_img < 0)\n\t\treturn -1;\n\n\tret = ioctl(fd_loop, LOOP_SET_FD, fd_img);\n\tif (ret < 0)\n\t\treturn -1;\n\n\tmemset(&lo64, 0, sizeof(lo64));\n\tlo64.lo_flags = flags;\n\n\tret = ioctl(fd_loop, LOOP_SET_STATUS64, &lo64);\n\tif (ret < 0)\n\t\treturn -1;\n\n\treturn move_fd(fd_loop);\n}\n\nstatic inline int prepare_loop_dev_retry(const char *source, char *loop_dev, int flags)\n{\n\tint ret;\n\tunsigned int idx = 0;\n\n\tdo {\n\t\tret = prepare_loop_dev(source, loop_dev, flags);\n\t\tidx++;\n\t} while (ret < 0 && errno == EBUSY && idx < 30);\n\n\treturn ret;\n}\n\n\/\/ Note that this does not guarantee to clear the loop device in time so that\n\/\/ find_associated_loop_device() will not report that there still is a\n\/\/ configured device (udev and so on...). So don't call\n\/\/ find_associated_loop_device() after having called\n\/\/ set_autoclear_loop_device().\nint set_autoclear_loop_device(int fd_loop)\n{\n\tstruct loop_info64 lo64;\n\n\tmemset(&lo64, 0, sizeof(lo64));\n\tlo64.lo_flags = LO_FLAGS_AUTOCLEAR;\n\terrno = 0;\n\treturn ioctl(fd_loop, LOOP_SET_STATUS64, &lo64);\n}\n\n\/\/ Directly release the loop device\nint free_loop_device(int fd_loop)\n{\n\treturn ioctl(fd_loop, LOOP_CLR_FD);\n}\n\n\/\/ Unset the LO_FLAGS_AUTOCLEAR flag on the given loop device file descriptor.\nint unset_autoclear_loop_device(int fd_loop)\n{\n\tint ret;\n\tstruct loop_info64 lo64;\n\n\terrno = 0;\n\tret = ioctl(fd_loop, LOOP_GET_STATUS64, &lo64);\n\tif (ret < 0)\n\t\treturn -1;\n\n\tif ((lo64.lo_flags & LO_FLAGS_AUTOCLEAR) == 0)\n\t\treturn 0;\n\n\tlo64.lo_flags &= ~LO_FLAGS_AUTOCLEAR;\n\terrno = 0;\n\treturn ioctl(fd_loop, LOOP_SET_STATUS64, &lo64);\n}\n*\/\nimport \"C\"\n\n\/\/ LoFlagsAutoclear determines whether the loop device will autodestruct on last\n\/\/ close.\nconst LoFlagsAutoclear int = C.LO_FLAGS_AUTOCLEAR\n\n\/\/ PrepareLoopDev detects and sets up a loop device for source. It returns an\n\/\/ open file descriptor to the free loop device and the path of the free loop\n\/\/ device. It's the callers responsibility to close the open file descriptor.\nfunc PrepareLoopDev(source string, flags int) (*os.File, error) {\n\tcLoopDev := C.malloc(C.size_t(C.LO_NAME_SIZE))\n\tif cLoopDev == nil {\n\t\treturn nil, fmt.Errorf(\"Failed to allocate memory in C\")\n\t}\n\tdefer C.free(cLoopDev)\n\n\tcSource := C.CString(source)\n\tdefer C.free(unsafe.Pointer(cSource))\n\tloopFd, _ := C.find_associated_loop_device(cSource, (*C.char)(cLoopDev))\n\tif loopFd >= 0 {\n\t\treturn os.NewFile(uintptr(loopFd), C.GoString((*C.char)(cLoopDev))), nil\n\t}\n\n\tloopFd, err := C.prepare_loop_dev_retry(cSource, (*C.char)(cLoopDev), C.int(flags))\n\tif loopFd < 0 {\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"Failed to prepare loop device for %q\", source)\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Failed to prepare loop device for %q\", source)\n\t}\n\n\treturn os.NewFile(uintptr(loopFd), C.GoString((*C.char)(cLoopDev))), nil\n}\n\n\/\/ releaseLoopDev releases the loop dev assigned to the provided file.\nfunc releaseLoopDev(source string) error {\n\tcLoopDev := C.malloc(C.size_t(C.LO_NAME_SIZE))\n\tif cLoopDev == nil {\n\t\treturn fmt.Errorf(\"Failed to allocate memory in C\")\n\t}\n\tdefer C.free(cLoopDev)\n\n\tcSource := C.CString(source)\n\tloopFd, err := C.find_associated_loop_device(cSource, (*C.char)(cLoopDev))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Prepare a Go file and defer close on the loop device.\n\tfd := os.NewFile(uintptr(loopFd), C.GoString((*C.char)(cLoopDev)))\n\tdefer fd.Close()\n\n\tif loopFd >= 0 {\n\t\t_, err := C.free_loop_device(C.int(loopFd))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ SetAutoclearOnLoopDev enables autodestruction of the provided loopback device.\nfunc SetAutoclearOnLoopDev(loopFd int) error {\n\tret, err := C.set_autoclear_loop_device(C.int(loopFd))\n\tif ret < 0 {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn fmt.Errorf(\"Failed to set LO_FLAGS_AUTOCLEAR\")\n\t}\n\n\treturn nil\n}\n\n\/\/ UnsetAutoclearOnLoopDev disables autodestruction of the provided loopback device.\nfunc UnsetAutoclearOnLoopDev(loopFd int) error {\n\tret, err := C.unset_autoclear_loop_device(C.int(loopFd))\n\tif ret < 0 {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn fmt.Errorf(\"Failed to unset LO_FLAGS_AUTOCLEAR\")\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage sql\n\nimport (\n\t\"reflect\"\n\t\"github.com\/ligato\/cn-infra\/utils\/safeclose\"\n)\n\n\/\/ SliceIt reads everything from the ValIterator & stores it to pointerToASlice\n\/\/ It closes the iterator (since nothing left in the iterator)\nfunc SliceIt(pointerToASlice interface{}, it ValIterator) error {\n\t\/* TODO defer func() {\n\t\tif exp := recover(); exp != nil && it != nil {\n\t\t\tlogger.Error(exp)\n\t\t\texp = safeclose.Close(it)\n\t\t\tif exp != nil {\n\t\t\t\tlogger.Error(exp)\n\t\t\t}\n\t\t}\n\t}()*\/\n\n\tsl := reflect.ValueOf(pointerToASlice)\n\tif sl.Kind() == reflect.Ptr {\n\t\tsl = sl.Elem()\n\t} else {\n\t\tpanic(\"must be pointer\")\n\t}\n\n\tif sl.Kind() != reflect.Slice {\n\t\tpanic(\"must be slice\")\n\t}\n\n\tsliceType := sl.Type()\n\n\tsliceElemType := sliceType.Elem()\n\tsliceElemPtr := sliceElemType.Kind() == reflect.Ptr\n\tif sliceElemPtr {\n\t\tsliceElemType = sliceElemType.Elem()\n\t}\n\tfor i := 0; i < 5; i++ {\n\t\trow := reflect.New(sliceElemType)\n\t\tif stop := it.GetNext(row.Interface()); stop {\n\t\t\tbreak\n\t\t}\n\n\t\tif sliceElemPtr {\n\t\t\tsl.Set(reflect.Append(sl, row))\n\t\t} else {\n\t\t\tsl.Set(reflect.Append(sl, row.Elem()))\n\t\t}\n\t}\n\n\treturn safeclose.Close(it)\n}\n<commit_msg>ODPM-419 removed safety for loop upper bound from slice_utils.go<commit_after>\/\/ Copyright (c) 2017 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage sql\n\nimport (\n\t\"reflect\"\n\t\"github.com\/ligato\/cn-infra\/utils\/safeclose\"\n)\n\n\/\/ SliceIt reads everything from the ValIterator & stores it to pointerToASlice\n\/\/ It closes the iterator (since nothing left in the iterator)\nfunc SliceIt(pointerToASlice interface{}, it ValIterator) error {\n\t\/* TODO defer func() {\n\t\tif exp := recover(); exp != nil && it != nil {\n\t\t\tlogger.Error(exp)\n\t\t\texp = safeclose.Close(it)\n\t\t\tif exp != nil {\n\t\t\t\tlogger.Error(exp)\n\t\t\t}\n\t\t}\n\t}()*\/\n\n\tsl := reflect.ValueOf(pointerToASlice)\n\tif sl.Kind() == reflect.Ptr {\n\t\tsl = sl.Elem()\n\t} else {\n\t\tpanic(\"must be pointer\")\n\t}\n\n\tif sl.Kind() != reflect.Slice {\n\t\tpanic(\"must be slice\")\n\t}\n\n\tsliceType := sl.Type()\n\n\tsliceElemType := sliceType.Elem()\n\tsliceElemPtr := sliceElemType.Kind() == reflect.Ptr\n\tif sliceElemPtr {\n\t\tsliceElemType = sliceElemType.Elem()\n\t}\n\tfor {\n\t\trow := reflect.New(sliceElemType)\n\t\tif stop := it.GetNext(row.Interface()); stop {\n\t\t\tbreak\n\t\t}\n\n\t\tif sliceElemPtr {\n\t\t\tsl.Set(reflect.Append(sl, row))\n\t\t} else {\n\t\t\tsl.Set(reflect.Append(sl, row.Elem()))\n\t\t}\n\t}\n\n\treturn safeclose.Close(it)\n}\n<|endoftext|>"} {"text":"<commit_before>package orm\n\nimport (\n\t\"fmt\"\n\t\"gnd.la\/orm\/driver\"\n\t\"gnd.la\/orm\/query\"\n)\n\ntype Query struct {\n\torm *Orm\n\tmodel *joinModel\n\tmethods []*driver.Methods\n\tjtype JoinType\n\tq query.Q\n\tlimit int\n\toffset int\n\tsortField string\n\tsortDir int\n\tdepth int\n\terr error\n}\n\nfunc (q *Query) ensureTable(f string) error {\n\tif q.model == nil {\n\t\tfmt.Errorf(\"no table selected, set one with Table() before calling %s()\", f)\n\t}\n\treturn nil\n}\n\n\/\/ Table sets the table for the query. If the table was\n\/\/ previously set, it's overridden. Rather than using\n\/\/ strings to select tables, a Table object (which is\n\/\/ returned from Register) is used. This way is not\n\/\/ possible to mistype a table name, which avoids lots\n\/\/ of errors.\nfunc (q *Query) Table(t *Table) *Query {\n\tq.model = t.model\n\treturn q\n}\n\nfunc (q *Query) Join(jt JoinType) *Query {\n\tq.jtype = jt\n\treturn q\n}\n\n\/\/ Filter adds another condition to the query. In other\n\/\/ words, it ANDs the previous condition with the one passed in.\nfunc (q *Query) Filter(qu query.Q) *Query {\n\tif qu != nil {\n\t\tif q.q == nil {\n\t\t\tq.q = qu\n\t\t} else {\n\t\t\tswitch x := q.q.(type) {\n\t\t\tcase *query.And:\n\t\t\t\tx.Conditions = append(x.Conditions, qu)\n\t\t\tdefault:\n\t\t\t\tq.q = And(q.q, qu)\n\t\t\t}\n\t\t}\n\t}\n\treturn q\n}\n\n\/\/ Limit sets the maximum number of results\n\/\/ for the query.\nfunc (q *Query) Limit(limit int) *Query {\n\tq.limit = limit\n\treturn q\n}\n\n\/\/ Offset sets the offset for the query.\nfunc (q *Query) Offset(offset int) *Query {\n\tq.offset = offset\n\treturn q\n}\n\n\/\/ Sort sets the field and direction used for sorting\n\/\/ this query.\nfunc (q *Query) Sort(field string, dir Sort) *Query {\n\tq.sortField = field\n\tq.sortDir = int(dir)\n\treturn q\n}\n\n\/\/ One fetches the first result for this query. If there\n\/\/ are no results, it returns ErrNotFound.\nfunc (q *Query) One(out ...interface{}) error {\n\titer := q.iter(1)\n\tif iter.Next(out...) {\n\t\t\/\/ Must close the iter manually, because we're not\n\t\t\/\/ reaching the end.\n\t\titer.Close()\n\t\treturn nil\n\t}\n\tif err := iter.Err(); err != nil {\n\t\treturn err\n\t}\n\treturn ErrNotFound\n}\n\n\/\/ Exists returns wheter a result with the specified query\n\/\/ exists.\nfunc (q *Query) Exists() (bool, error) {\n\tif err := q.ensureTable(\"Exists\"); err != nil {\n\t\treturn false, err\n\t}\n\tq.orm.numQueries++\n\treturn q.orm.driver.Exists(q.model, q.q)\n}\n\n\/\/ Iter returns an Iter object which lets you\n\/\/ iterate over the results produced by the\n\/\/ query.\nfunc (q *Query) Iter() *Iter {\n\treturn q.iter(q.limit)\n}\n\n\/\/ Count returns the number of results for the query. Note that\n\/\/ you have to set the table manually before calling Count().\nfunc (q *Query) Count() (uint64, error) {\n\tif err := q.ensureTable(\"Count\"); err != nil {\n\t\treturn 0, err\n\t}\n\tq.orm.numQueries++\n\treturn q.orm.driver.Count(q.model, q.q, q.limit, q.offset)\n}\n\n\/\/ MustCount works like Count, but panics if there's an error.\nfunc (q *Query) MustCount() uint64 {\n\tc, err := q.Count()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn c\n}\n\nfunc (q *Query) SetDepth(depth int) {\n\tq.depth = depth\n}\n\n\/\/ Clone returns a copy of the query.\nfunc (q *Query) Clone() *Query {\n\treturn &Query{\n\t\torm: q.orm,\n\t\tmodel: q.model,\n\t\tq: q.q,\n\t\tlimit: q.limit,\n\t\toffset: q.offset,\n\t\tsortField: q.sortField,\n\t\tsortDir: q.sortDir,\n\t\tdepth: q.depth,\n\t\terr: q.err,\n\t}\n}\n\nfunc (q *Query) iter(limit int) *Iter {\n\treturn &Iter{\n\t\tq: q,\n\t\tlimit: limit,\n\t\terr: q.err,\n\t}\n}\n<commit_msg>Document Join() method<commit_after>package orm\n\nimport (\n\t\"fmt\"\n\t\"gnd.la\/orm\/driver\"\n\t\"gnd.la\/orm\/query\"\n)\n\ntype Query struct {\n\torm *Orm\n\tmodel *joinModel\n\tmethods []*driver.Methods\n\tjtype JoinType\n\tq query.Q\n\tlimit int\n\toffset int\n\tsortField string\n\tsortDir int\n\tdepth int\n\terr error\n}\n\nfunc (q *Query) ensureTable(f string) error {\n\tif q.model == nil {\n\t\tfmt.Errorf(\"no table selected, set one with Table() before calling %s()\", f)\n\t}\n\treturn nil\n}\n\n\/\/ Table sets the table for the query. If the table was\n\/\/ previously set, it's overridden. Rather than using\n\/\/ strings to select tables, a Table object (which is\n\/\/ returned from Register) is used. This way is not\n\/\/ possible to mistype a table name, which avoids lots\n\/\/ of errors.\nfunc (q *Query) Table(t *Table) *Query {\n\tq.model = t.model\n\treturn q\n}\n\n\/\/ Join sets the default join type for this query. If not\n\/\/ specifed, an INNER JOIN is performed. Note that not all\n\/\/ drivers support RIGHT joins (e.g. sqlite).\nfunc (q *Query) Join(jt JoinType) *Query {\n\tq.jtype = jt\n\treturn q\n}\n\n\/\/ Filter adds another condition to the query. In other\n\/\/ words, it ANDs the previous condition with the one passed in.\nfunc (q *Query) Filter(qu query.Q) *Query {\n\tif qu != nil {\n\t\tif q.q == nil {\n\t\t\tq.q = qu\n\t\t} else {\n\t\t\tswitch x := q.q.(type) {\n\t\t\tcase *query.And:\n\t\t\t\tx.Conditions = append(x.Conditions, qu)\n\t\t\tdefault:\n\t\t\t\tq.q = And(q.q, qu)\n\t\t\t}\n\t\t}\n\t}\n\treturn q\n}\n\n\/\/ Limit sets the maximum number of results\n\/\/ for the query.\nfunc (q *Query) Limit(limit int) *Query {\n\tq.limit = limit\n\treturn q\n}\n\n\/\/ Offset sets the offset for the query.\nfunc (q *Query) Offset(offset int) *Query {\n\tq.offset = offset\n\treturn q\n}\n\n\/\/ Sort sets the field and direction used for sorting\n\/\/ this query.\nfunc (q *Query) Sort(field string, dir Sort) *Query {\n\tq.sortField = field\n\tq.sortDir = int(dir)\n\treturn q\n}\n\n\/\/ One fetches the first result for this query. If there\n\/\/ are no results, it returns ErrNotFound.\nfunc (q *Query) One(out ...interface{}) error {\n\titer := q.iter(1)\n\tif iter.Next(out...) {\n\t\t\/\/ Must close the iter manually, because we're not\n\t\t\/\/ reaching the end.\n\t\titer.Close()\n\t\treturn nil\n\t}\n\tif err := iter.Err(); err != nil {\n\t\treturn err\n\t}\n\treturn ErrNotFound\n}\n\n\/\/ Exists returns wheter a result with the specified query\n\/\/ exists.\nfunc (q *Query) Exists() (bool, error) {\n\tif err := q.ensureTable(\"Exists\"); err != nil {\n\t\treturn false, err\n\t}\n\tq.orm.numQueries++\n\treturn q.orm.driver.Exists(q.model, q.q)\n}\n\n\/\/ Iter returns an Iter object which lets you\n\/\/ iterate over the results produced by the\n\/\/ query.\nfunc (q *Query) Iter() *Iter {\n\treturn q.iter(q.limit)\n}\n\n\/\/ Count returns the number of results for the query. Note that\n\/\/ you have to set the table manually before calling Count().\nfunc (q *Query) Count() (uint64, error) {\n\tif err := q.ensureTable(\"Count\"); err != nil {\n\t\treturn 0, err\n\t}\n\tq.orm.numQueries++\n\treturn q.orm.driver.Count(q.model, q.q, q.limit, q.offset)\n}\n\n\/\/ MustCount works like Count, but panics if there's an error.\nfunc (q *Query) MustCount() uint64 {\n\tc, err := q.Count()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn c\n}\n\nfunc (q *Query) SetDepth(depth int) {\n\tq.depth = depth\n}\n\n\/\/ Clone returns a copy of the query.\nfunc (q *Query) Clone() *Query {\n\treturn &Query{\n\t\torm: q.orm,\n\t\tmodel: q.model,\n\t\tq: q.q,\n\t\tlimit: q.limit,\n\t\toffset: q.offset,\n\t\tsortField: q.sortField,\n\t\tsortDir: q.sortDir,\n\t\tdepth: q.depth,\n\t\terr: q.err,\n\t}\n}\n\nfunc (q *Query) iter(limit int) *Iter {\n\treturn &Iter{\n\t\tq: q,\n\t\tlimit: limit,\n\t\terr: q.err,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ gce_test is an integration test meant to verify the Cloud SQL Proxy works as\n\/\/ expected on a Google Compute Engine VM. It provisions a GCE VM, loads a\n\/\/ newly-compiled proxy client onto that VM, and then does some connectivity tests.\n\/\/\n\/\/ If the VM specified by -vm_name doesn't exist already a new VM is created.\n\/\/ If a VM does already exist, its 'sshKeys' metadata value is set to a newly\n\/\/ generated key.\n\/\/\n\/\/ Required flags:\n\/\/ -db_name, -project\n\/\/\n\/\/ Example invocation:\n\/\/ go test -v -run TestConnectionLimit -args -project=my-project -db_name=my-project:the-region:sql-name\npackage tests\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\t\"log\"\n)\n\n\/\/ TestConnectionLimit provisions a new GCE VM and verifies that the proxy works on it.\n\/\/ It uses application default credentials.\nfunc TestConnectionLimit(t *testing.T) {\n\terr, ssh := setupGCEProxy(t, []string{\"-max_connections\", \"5\"})\n\n\tcmd := fmt.Sprintf(`mysql -uroot -S cloudsql\/%s -e \"SELECT 1; SELECT SLEEP(120);\"`, *databaseName)\n\n\t\/\/ Use less than the sshd MaxStartups configuration (defaults to 10)\n\tfor i := 0; i < 5; i++ {\n\t\tgo func() {\n\t\t\tlog.Print(\"Starting blocking mysql command\")\n\t\t\tvar sout, serr bytes.Buffer\n\t\t\tif err = sshRun(ssh, cmd, nil, &sout, &serr); err != nil {\n\t\t\t\tt.Fatalf(\"Error running mysql: %v\\n\\nstandard out:\\n%s\\nstandard err:\\n%s\", err, &sout, &serr)\n\t\t\t}\n\t\t\tt.Logf(\"Blocking command output %s\", &sout)\n\t\t}()\n\t}\n\n\ttime.Sleep(time.Second * 5)\n\tvar sout, serr bytes.Buffer\n\tlog.Print(\"Test connection refusal\")\n\tcmd = fmt.Sprintf(`mysql -uroot -S cloudsql\/%s -e \"SELECT 1;\"`, *databaseName)\n\tif err = sshRun(ssh, cmd, nil, &sout, &serr); err == nil {\n\t\tt.Fatalf(\"Mysql connection should have been refused:\\n\\nstandard out:\\n%s\\nstandard err:\\n%s\", &sout, &serr)\n\t}\n\tt.Logf(\"Test command output %s\", &sout)\n}\n<commit_msg>Use t.Errorf() instead of t.Fatalf() in child goroutines to continue execution<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ gce_test is an integration test meant to verify the Cloud SQL Proxy works as\n\/\/ expected on a Google Compute Engine VM. It provisions a GCE VM, loads a\n\/\/ newly-compiled proxy client onto that VM, and then does some connectivity tests.\n\/\/\n\/\/ If the VM specified by -vm_name doesn't exist already a new VM is created.\n\/\/ If a VM does already exist, its 'sshKeys' metadata value is set to a newly\n\/\/ generated key.\n\/\/\n\/\/ Required flags:\n\/\/ -db_name, -project\n\/\/\n\/\/ Example invocation:\n\/\/ go test -v -run TestConnectionLimit -args -project=my-project -db_name=my-project:the-region:sql-name\npackage tests\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\t\"log\"\n)\n\n\/\/ TestConnectionLimit provisions a new GCE VM and verifies that the proxy works on it.\n\/\/ It uses application default credentials.\nfunc TestConnectionLimit(t *testing.T) {\n\terr, ssh := setupGCEProxy(t, []string{\"-max_connections\", \"5\"})\n\n\tcmd := fmt.Sprintf(`mysql -uroot -S cloudsql\/%s -e \"SELECT 1; SELECT SLEEP(120);\"`, *databaseName)\n\n\t\/\/ Use less than the sshd MaxStartups configuration (defaults to 10)\n\tfor i := 0; i < 5; i++ {\n\t\tgo func() {\n\t\t\tlog.Print(\"Starting blocking mysql command\")\n\t\t\tvar sout, serr bytes.Buffer\n\t\t\tif err = sshRun(ssh, cmd, nil, &sout, &serr); err != nil {\n\t\t\t\tt.Errorf(\"Error running mysql: %v\\n\\nstandard out:\\n%s\\nstandard err:\\n%s\", err, &sout, &serr)\n\t\t\t}\n\t\t\tt.Logf(\"Blocking command output %s\", &sout)\n\t\t}()\n\t}\n\n\ttime.Sleep(time.Second * 5)\n\tvar sout, serr bytes.Buffer\n\tlog.Print(\"Test connection refusal\")\n\tcmd = fmt.Sprintf(`mysql -uroot -S cloudsql\/%s -e \"SELECT 1;\"`, *databaseName)\n\tif err = sshRun(ssh, cmd, nil, &sout, &serr); err == nil {\n\t\tt.Fatalf(\"Mysql connection should have been refused:\\n\\nstandard out:\\n%s\\nstandard err:\\n%s\", &sout, &serr)\n\t}\n\tt.Logf(\"Test command output %s\", &sout)\n}\n<|endoftext|>"} {"text":"<commit_before>package bxmpp\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/42wim\/matterbridge\/bridge\"\n\t\"github.com\/42wim\/matterbridge\/bridge\/config\"\n\t\"github.com\/42wim\/matterbridge\/bridge\/helper\"\n\t\"github.com\/jpillora\/backoff\"\n\t\"github.com\/matterbridge\/go-xmpp\"\n\t\"github.com\/rs\/xid\"\n)\n\ntype Bxmpp struct {\n\t*bridge.Config\n\n\tstartTime time.Time\n\txc *xmpp.Client\n\txmppMap map[string]string\n\tconnected bool\n\tsync.RWMutex\n\n\tavatarAvailability map[string]bool\n\tavatarMap map[string]string\n}\n\nfunc New(cfg *bridge.Config) bridge.Bridger {\n\treturn &Bxmpp{\n\t\tConfig: cfg,\n\t\txmppMap: make(map[string]string),\n\t\tavatarAvailability: make(map[string]bool),\n\t\tavatarMap: make(map[string]string),\n\t}\n}\n\nfunc (b *Bxmpp) Connect() error {\n\tb.Log.Infof(\"Connecting %s\", b.GetString(\"Server\"))\n\tif err := b.createXMPP(); err != nil {\n\t\tb.Log.Debugf(\"%#v\", err)\n\t\treturn err\n\t}\n\n\tb.Log.Info(\"Connection succeeded\")\n\tgo b.manageConnection()\n\treturn nil\n}\n\nfunc (b *Bxmpp) Disconnect() error {\n\treturn nil\n}\n\nfunc (b *Bxmpp) JoinChannel(channel config.ChannelInfo) error {\n\tif channel.Options.Key != \"\" {\n\t\tb.Log.Debugf(\"using key %s for channel %s\", channel.Options.Key, channel.Name)\n\t\tb.xc.JoinProtectedMUC(channel.Name+\"@\"+b.GetString(\"Muc\"), b.GetString(\"Nick\"), channel.Options.Key, xmpp.NoHistory, 0, nil)\n\t} else {\n\t\tb.xc.JoinMUCNoHistory(channel.Name+\"@\"+b.GetString(\"Muc\"), b.GetString(\"Nick\"))\n\t}\n\treturn nil\n}\n\nfunc (b *Bxmpp) Send(msg config.Message) (string, error) {\n\t\/\/ should be fixed by using a cache instead of dropping\n\tif !b.Connected() {\n\t\treturn \"\", fmt.Errorf(\"bridge %s not connected, dropping message %#v to bridge\", b.Account, msg)\n\t}\n\t\/\/ ignore delete messages\n\tif msg.Event == config.EventMsgDelete {\n\t\treturn \"\", nil\n\t}\n\n\tb.Log.Debugf(\"=> Receiving %#v\", msg)\n\n\tif msg.Event == config.EventAvatarDownload {\n\t\treturn b.cacheAvatar(&msg), nil\n\t}\n\n\t\/\/ Make a action \/me of the message, prepend the username with it.\n\t\/\/ https:\/\/xmpp.org\/extensions\/xep-0245.html\n\tif msg.Event == config.EventUserAction {\n\t\tmsg.Username = \"\/me \" + msg.Username\n\t}\n\n\t\/\/ Upload a file (in XMPP case send the upload URL because XMPP has no native upload support).\n\tvar err error\n\tif msg.Extra != nil {\n\t\tfor _, rmsg := range helper.HandleExtra(&msg, b.General) {\n\t\t\tb.Log.Debugf(\"=> Sending attachement message %#v\", rmsg)\n\t\t\tif b.GetString(\"WebhookURL\") != \"\" {\n\t\t\t\terr = b.postSlackCompatibleWebhook(msg)\n\t\t\t} else {\n\t\t\t\t_, err = b.xc.Send(xmpp.Chat{\n\t\t\t\t\tType: \"groupchat\",\n\t\t\t\t\tRemote: rmsg.Channel + \"@\" + b.GetString(\"Muc\"),\n\t\t\t\t\tText: rmsg.Username + rmsg.Text,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tb.Log.WithError(err).Error(\"Unable to send message with share URL.\")\n\t\t\t}\n\t\t}\n\t\tif len(msg.Extra[\"file\"]) > 0 {\n\t\t\treturn \"\", b.handleUploadFile(&msg)\n\t\t}\n\t}\n\n\tif b.GetString(\"WebhookURL\") != \"\" {\n\t\tb.Log.Debugf(\"Sending message using Webhook\")\n\t\terr := b.postSlackCompatibleWebhook(msg)\n\t\tif err != nil {\n\t\t\tb.Log.Errorf(\"Failed to send message using webhook: %s\", err)\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn \"\", nil\n\t}\n\n\t\/\/ Post normal message.\n\tvar msgReplaceID string\n\tmsgID := xid.New().String()\n\tif msg.ID != \"\" {\n\t\tmsgID = msg.ID\n\t\tmsgReplaceID = msg.ID\n\t}\n\tb.Log.Debugf(\"=> Sending message %#v\", msg)\n\tif _, err := b.xc.Send(xmpp.Chat{\n\t\tType: \"groupchat\",\n\t\tRemote: msg.Channel + \"@\" + b.GetString(\"Muc\"),\n\t\tText: msg.Username + msg.Text,\n\t\tID: msgID,\n\t\tReplaceID: msgReplaceID,\n\t}); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn msgID, nil\n}\n\nfunc (b *Bxmpp) postSlackCompatibleWebhook(msg config.Message) error {\n\ttype XMPPWebhook struct {\n\t\tUsername string `json:\"username\"`\n\t\tText string `json:\"text\"`\n\t}\n\twebhookBody, err := json.Marshal(XMPPWebhook{\n\t\tUsername: msg.Username,\n\t\tText: msg.Text,\n\t})\n\tif err != nil {\n\t\tb.Log.Errorf(\"Failed to marshal webhook: %s\", err)\n\t\treturn err\n\t}\n\n\tresp, err := http.Post(b.GetString(\"WebhookURL\")+\"\/\"+url.QueryEscape(msg.Channel), \"application\/json\", bytes.NewReader(webhookBody))\n\tif err != nil {\n\t\tb.Log.Errorf(\"Failed to POST webhook: %s\", err)\n\t\treturn err\n\t}\n\n\tresp.Body.Close()\n\treturn nil\n}\n\nfunc (b *Bxmpp) createXMPP() error {\n\tvar serverName string\n\tswitch {\n\tcase !b.GetBool(\"Anonymous\"):\n\t\tif !strings.Contains(b.GetString(\"Jid\"), \"@\") {\n\t\t\treturn fmt.Errorf(\"the Jid %s doesn't contain an @\", b.GetString(\"Jid\"))\n\t\t}\n\t\tserverName = strings.Split(b.GetString(\"Jid\"), \"@\")[1]\n\tcase !strings.Contains(b.GetString(\"Server\"), \":\"):\n\t\tserverName = strings.Split(b.GetString(\"Server\"), \":\")[0]\n\tdefault:\n\t\tserverName = b.GetString(\"Server\")\n\t}\n\n\ttc := &tls.Config{\n\t\tServerName: serverName,\n\t\tInsecureSkipVerify: b.GetBool(\"SkipTLSVerify\"), \/\/ nolint: gosec\n\t}\n\n\txmpp.DebugWriter = b.Log.Writer()\n\n\toptions := xmpp.Options{\n\t\tHost: b.GetString(\"Server\"),\n\t\tUser: b.GetString(\"Jid\"),\n\t\tPassword: b.GetString(\"Password\"),\n\t\tNoTLS: true,\n\t\tStartTLS: !b.GetBool(\"NoTLS\"),\n\t\tTLSConfig: tc,\n\t\tDebug: b.GetBool(\"debug\"),\n\t\tSession: true,\n\t\tStatus: \"\",\n\t\tStatusMessage: \"\",\n\t\tResource: \"\",\n\t\tInsecureAllowUnencryptedAuth: b.GetBool(\"NoTLS\"),\n\t}\n\tvar err error\n\tb.xc, err = options.NewClient()\n\treturn err\n}\n\nfunc (b *Bxmpp) manageConnection() {\n\tb.setConnected(true)\n\tinitial := true\n\tbf := &backoff.Backoff{\n\t\tMin: time.Second,\n\t\tMax: 5 * time.Minute,\n\t\tJitter: true,\n\t}\n\n\t\/\/ Main connection loop. Each iteration corresponds to a successful\n\t\/\/ connection attempt and the subsequent handling of the connection.\n\tfor {\n\t\tif initial {\n\t\t\tinitial = false\n\t\t} else {\n\t\t\tb.Remote <- config.Message{\n\t\t\t\tUsername: \"system\",\n\t\t\t\tText: \"rejoin\",\n\t\t\t\tChannel: \"\",\n\t\t\t\tAccount: b.Account,\n\t\t\t\tEvent: config.EventRejoinChannels,\n\t\t\t}\n\t\t}\n\n\t\tif err := b.handleXMPP(); err != nil {\n\t\t\tb.Log.WithError(err).Error(\"Disconnected.\")\n\t\t\tb.setConnected(false)\n\t\t}\n\n\t\t\/\/ Reconnection loop using an exponential back-off strategy. We\n\t\t\/\/ only break out of the loop if we have successfully reconnected.\n\t\tfor {\n\t\t\td := bf.Duration()\n\t\t\tb.Log.Infof(\"Reconnecting in %s.\", d)\n\t\t\ttime.Sleep(d)\n\n\t\t\tb.Log.Infof(\"Reconnecting now.\")\n\t\t\tif err := b.createXMPP(); err == nil {\n\t\t\t\tb.setConnected(true)\n\t\t\t\tbf.Reset()\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tb.Log.Warn(\"Failed to reconnect.\")\n\t\t}\n\t}\n}\n\nfunc (b *Bxmpp) xmppKeepAlive() chan bool {\n\tdone := make(chan bool)\n\tgo func() {\n\t\tticker := time.NewTicker(90 * time.Second)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tb.Log.Debugf(\"PING\")\n\t\t\t\tif err := b.xc.PingC2S(\"\", \"\"); err != nil {\n\t\t\t\t\tb.Log.Debugf(\"PING failed %#v\", err)\n\t\t\t\t}\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn done\n}\n\nfunc (b *Bxmpp) handleXMPP() error {\n\tb.startTime = time.Now()\n\n\tdone := b.xmppKeepAlive()\n\tdefer close(done)\n\n\tfor {\n\t\tm, err := b.xc.Recv()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch v := m.(type) {\n\t\tcase xmpp.Chat:\n\t\t\tif v.Type == \"groupchat\" {\n\t\t\t\tb.Log.Debugf(\"== Receiving %#v\", v)\n\n\t\t\t\t\/\/ Skip invalid messages.\n\t\t\t\tif b.skipMessage(v) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tvar event string\n\t\t\t\tif strings.Contains(v.Text, \"has set the subject to:\") {\n\t\t\t\t\tevent = config.EventTopicChange\n\t\t\t\t}\n\n\t\t\t\tavailable, sok := b.avatarAvailability[v.Remote]\n\t\t\t\tavatar := \"\"\n\t\t\t\tif !sok {\n\t\t\t\t\tb.Log.Debugf(\"Requesting avatar data\")\n\t\t\t\t\tb.avatarAvailability[v.Remote] = false\n\t\t\t\t\tb.xc.AvatarRequestData(v.Remote)\n\t\t\t\t} else if available {\n\t\t\t\t\tavatar = getAvatar(b.avatarMap, v.Remote, b.General)\n\t\t\t\t}\n\n\t\t\t\tmsgID := v.ID\n\t\t\t\tif v.ReplaceID != \"\" {\n\t\t\t\t\tmsgID = v.ReplaceID\n\t\t\t\t}\n\t\t\t\trmsg := config.Message{\n\t\t\t\t\tUsername: b.parseNick(v.Remote),\n\t\t\t\t\tText: v.Text,\n\t\t\t\t\tChannel: b.parseChannel(v.Remote),\n\t\t\t\t\tAccount: b.Account,\n\t\t\t\t\tAvatar: avatar,\n\t\t\t\t\tUserID: v.Remote,\n\t\t\t\t\tID: msgID,\n\t\t\t\t\tEvent: event,\n\t\t\t\t}\n\n\t\t\t\t\/\/ Check if we have an action event.\n\t\t\t\tvar ok bool\n\t\t\t\trmsg.Text, ok = b.replaceAction(rmsg.Text)\n\t\t\t\tif ok {\n\t\t\t\t\trmsg.Event = config.EventUserAction\n\t\t\t\t}\n\n\t\t\t\tb.Log.Debugf(\"<= Sending message from %s on %s to gateway\", rmsg.Username, b.Account)\n\t\t\t\tb.Log.Debugf(\"<= Message is %#v\", rmsg)\n\t\t\t\tb.Remote <- rmsg\n\t\t\t}\n\t\tcase xmpp.AvatarData:\n\t\t\tb.handleDownloadAvatar(v)\n\t\t\tb.avatarAvailability[v.From] = true\n\t\t\tb.Log.Debugf(\"Avatar for %s is now available\", v.From)\n\t\tcase xmpp.Presence:\n\t\t\t\/\/ Do nothing.\n\t\t}\n\t}\n}\n\nfunc (b *Bxmpp) replaceAction(text string) (string, bool) {\n\tif strings.HasPrefix(text, \"\/me \") {\n\t\treturn strings.Replace(text, \"\/me \", \"\", -1), true\n\t}\n\treturn text, false\n}\n\n\/\/ handleUploadFile handles native upload of files\nfunc (b *Bxmpp) handleUploadFile(msg *config.Message) error {\n\tvar urlDesc string\n\n\tfor _, file := range msg.Extra[\"file\"] {\n\t\tfileInfo := file.(config.FileInfo)\n\t\tif fileInfo.Comment != \"\" {\n\t\t\tmsg.Text += fileInfo.Comment + \": \"\n\t\t}\n\t\tif fileInfo.URL != \"\" {\n\t\t\tmsg.Text = fileInfo.URL\n\t\t\tif fileInfo.Comment != \"\" {\n\t\t\t\tmsg.Text = fileInfo.Comment + \": \" + fileInfo.URL\n\t\t\t\turlDesc = fileInfo.Comment\n\t\t\t}\n\t\t}\n\t\tif _, err := b.xc.Send(xmpp.Chat{\n\t\t\tType: \"groupchat\",\n\t\t\tRemote: msg.Channel + \"@\" + b.GetString(\"Muc\"),\n\t\t\tText: msg.Username + msg.Text,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif fileInfo.URL != \"\" {\n\t\t\tif _, err := b.xc.SendOOB(xmpp.Chat{\n\t\t\t\tType: \"groupchat\",\n\t\t\t\tRemote: msg.Channel + \"@\" + b.GetString(\"Muc\"),\n\t\t\t\tOoburl: fileInfo.URL,\n\t\t\t\tOobdesc: urlDesc,\n\t\t\t}); err != nil {\n\t\t\t\tb.Log.WithError(err).Warn(\"Failed to send share URL.\")\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (b *Bxmpp) parseNick(remote string) string {\n\ts := strings.Split(remote, \"@\")\n\tif len(s) > 1 {\n\t\ts = strings.Split(s[1], \"\/\")\n\t\tif len(s) == 2 {\n\t\t\treturn s[1] \/\/ nick\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc (b *Bxmpp) parseChannel(remote string) string {\n\ts := strings.Split(remote, \"@\")\n\tif len(s) >= 2 {\n\t\treturn s[0] \/\/ channel\n\t}\n\treturn \"\"\n}\n\n\/\/ skipMessage skips messages that need to be skipped\nfunc (b *Bxmpp) skipMessage(message xmpp.Chat) bool {\n\t\/\/ skip messages from ourselves\n\tif b.parseNick(message.Remote) == b.GetString(\"Nick\") {\n\t\treturn true\n\t}\n\n\t\/\/ skip empty messages\n\tif message.Text == \"\" {\n\t\treturn true\n\t}\n\n\t\/\/ skip subject messages\n\tif strings.Contains(message.Text, \"<\/subject>\") {\n\t\treturn true\n\t}\n\n\t\/\/ do not show subjects on connect #732\n\tif strings.Contains(message.Text, \"has set the subject to:\") && time.Since(b.startTime) < time.Second*5 {\n\t\treturn true\n\t}\n\n\t\/\/ Ignore messages posted by our webhook\n\tif b.GetString(\"WebhookURL\") != \"\" && strings.Contains(message.ID, \"webhookbot\") {\n\t\treturn true\n\t}\n\n\t\/\/ skip delayed messages\n\treturn !message.Stamp.IsZero() && time.Since(message.Stamp).Minutes() > 5\n}\n\nfunc (b *Bxmpp) setConnected(state bool) {\n\tb.Lock()\n\tb.connected = state\n\tdefer b.Unlock()\n}\n\nfunc (b *Bxmpp) Connected() bool {\n\tb.RLock()\n\tdefer b.RUnlock()\n\treturn b.connected\n}\n<commit_msg>Use a new msgID when replacing messages (xmpp). Fixes #1584 (#1623)<commit_after>package bxmpp\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/42wim\/matterbridge\/bridge\"\n\t\"github.com\/42wim\/matterbridge\/bridge\/config\"\n\t\"github.com\/42wim\/matterbridge\/bridge\/helper\"\n\t\"github.com\/jpillora\/backoff\"\n\t\"github.com\/matterbridge\/go-xmpp\"\n\t\"github.com\/rs\/xid\"\n)\n\ntype Bxmpp struct {\n\t*bridge.Config\n\n\tstartTime time.Time\n\txc *xmpp.Client\n\txmppMap map[string]string\n\tconnected bool\n\tsync.RWMutex\n\n\tavatarAvailability map[string]bool\n\tavatarMap map[string]string\n}\n\nfunc New(cfg *bridge.Config) bridge.Bridger {\n\treturn &Bxmpp{\n\t\tConfig: cfg,\n\t\txmppMap: make(map[string]string),\n\t\tavatarAvailability: make(map[string]bool),\n\t\tavatarMap: make(map[string]string),\n\t}\n}\n\nfunc (b *Bxmpp) Connect() error {\n\tb.Log.Infof(\"Connecting %s\", b.GetString(\"Server\"))\n\tif err := b.createXMPP(); err != nil {\n\t\tb.Log.Debugf(\"%#v\", err)\n\t\treturn err\n\t}\n\n\tb.Log.Info(\"Connection succeeded\")\n\tgo b.manageConnection()\n\treturn nil\n}\n\nfunc (b *Bxmpp) Disconnect() error {\n\treturn nil\n}\n\nfunc (b *Bxmpp) JoinChannel(channel config.ChannelInfo) error {\n\tif channel.Options.Key != \"\" {\n\t\tb.Log.Debugf(\"using key %s for channel %s\", channel.Options.Key, channel.Name)\n\t\tb.xc.JoinProtectedMUC(channel.Name+\"@\"+b.GetString(\"Muc\"), b.GetString(\"Nick\"), channel.Options.Key, xmpp.NoHistory, 0, nil)\n\t} else {\n\t\tb.xc.JoinMUCNoHistory(channel.Name+\"@\"+b.GetString(\"Muc\"), b.GetString(\"Nick\"))\n\t}\n\treturn nil\n}\n\nfunc (b *Bxmpp) Send(msg config.Message) (string, error) {\n\t\/\/ should be fixed by using a cache instead of dropping\n\tif !b.Connected() {\n\t\treturn \"\", fmt.Errorf(\"bridge %s not connected, dropping message %#v to bridge\", b.Account, msg)\n\t}\n\t\/\/ ignore delete messages\n\tif msg.Event == config.EventMsgDelete {\n\t\treturn \"\", nil\n\t}\n\n\tb.Log.Debugf(\"=> Receiving %#v\", msg)\n\n\tif msg.Event == config.EventAvatarDownload {\n\t\treturn b.cacheAvatar(&msg), nil\n\t}\n\n\t\/\/ Make a action \/me of the message, prepend the username with it.\n\t\/\/ https:\/\/xmpp.org\/extensions\/xep-0245.html\n\tif msg.Event == config.EventUserAction {\n\t\tmsg.Username = \"\/me \" + msg.Username\n\t}\n\n\t\/\/ Upload a file (in XMPP case send the upload URL because XMPP has no native upload support).\n\tvar err error\n\tif msg.Extra != nil {\n\t\tfor _, rmsg := range helper.HandleExtra(&msg, b.General) {\n\t\t\tb.Log.Debugf(\"=> Sending attachement message %#v\", rmsg)\n\t\t\tif b.GetString(\"WebhookURL\") != \"\" {\n\t\t\t\terr = b.postSlackCompatibleWebhook(msg)\n\t\t\t} else {\n\t\t\t\t_, err = b.xc.Send(xmpp.Chat{\n\t\t\t\t\tType: \"groupchat\",\n\t\t\t\t\tRemote: rmsg.Channel + \"@\" + b.GetString(\"Muc\"),\n\t\t\t\t\tText: rmsg.Username + rmsg.Text,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tb.Log.WithError(err).Error(\"Unable to send message with share URL.\")\n\t\t\t}\n\t\t}\n\t\tif len(msg.Extra[\"file\"]) > 0 {\n\t\t\treturn \"\", b.handleUploadFile(&msg)\n\t\t}\n\t}\n\n\tif b.GetString(\"WebhookURL\") != \"\" {\n\t\tb.Log.Debugf(\"Sending message using Webhook\")\n\t\terr := b.postSlackCompatibleWebhook(msg)\n\t\tif err != nil {\n\t\t\tb.Log.Errorf(\"Failed to send message using webhook: %s\", err)\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn \"\", nil\n\t}\n\n\t\/\/ Post normal message.\n\tvar msgReplaceID string\n\tmsgID := xid.New().String()\n\tif msg.ID != \"\" {\n\t\tmsgReplaceID = msg.ID\n\t}\n\tb.Log.Debugf(\"=> Sending message %#v\", msg)\n\tif _, err := b.xc.Send(xmpp.Chat{\n\t\tType: \"groupchat\",\n\t\tRemote: msg.Channel + \"@\" + b.GetString(\"Muc\"),\n\t\tText: msg.Username + msg.Text,\n\t\tID: msgID,\n\t\tReplaceID: msgReplaceID,\n\t}); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn msgID, nil\n}\n\nfunc (b *Bxmpp) postSlackCompatibleWebhook(msg config.Message) error {\n\ttype XMPPWebhook struct {\n\t\tUsername string `json:\"username\"`\n\t\tText string `json:\"text\"`\n\t}\n\twebhookBody, err := json.Marshal(XMPPWebhook{\n\t\tUsername: msg.Username,\n\t\tText: msg.Text,\n\t})\n\tif err != nil {\n\t\tb.Log.Errorf(\"Failed to marshal webhook: %s\", err)\n\t\treturn err\n\t}\n\n\tresp, err := http.Post(b.GetString(\"WebhookURL\")+\"\/\"+url.QueryEscape(msg.Channel), \"application\/json\", bytes.NewReader(webhookBody))\n\tif err != nil {\n\t\tb.Log.Errorf(\"Failed to POST webhook: %s\", err)\n\t\treturn err\n\t}\n\n\tresp.Body.Close()\n\treturn nil\n}\n\nfunc (b *Bxmpp) createXMPP() error {\n\tvar serverName string\n\tswitch {\n\tcase !b.GetBool(\"Anonymous\"):\n\t\tif !strings.Contains(b.GetString(\"Jid\"), \"@\") {\n\t\t\treturn fmt.Errorf(\"the Jid %s doesn't contain an @\", b.GetString(\"Jid\"))\n\t\t}\n\t\tserverName = strings.Split(b.GetString(\"Jid\"), \"@\")[1]\n\tcase !strings.Contains(b.GetString(\"Server\"), \":\"):\n\t\tserverName = strings.Split(b.GetString(\"Server\"), \":\")[0]\n\tdefault:\n\t\tserverName = b.GetString(\"Server\")\n\t}\n\n\ttc := &tls.Config{\n\t\tServerName: serverName,\n\t\tInsecureSkipVerify: b.GetBool(\"SkipTLSVerify\"), \/\/ nolint: gosec\n\t}\n\n\txmpp.DebugWriter = b.Log.Writer()\n\n\toptions := xmpp.Options{\n\t\tHost: b.GetString(\"Server\"),\n\t\tUser: b.GetString(\"Jid\"),\n\t\tPassword: b.GetString(\"Password\"),\n\t\tNoTLS: true,\n\t\tStartTLS: !b.GetBool(\"NoTLS\"),\n\t\tTLSConfig: tc,\n\t\tDebug: b.GetBool(\"debug\"),\n\t\tSession: true,\n\t\tStatus: \"\",\n\t\tStatusMessage: \"\",\n\t\tResource: \"\",\n\t\tInsecureAllowUnencryptedAuth: b.GetBool(\"NoTLS\"),\n\t}\n\tvar err error\n\tb.xc, err = options.NewClient()\n\treturn err\n}\n\nfunc (b *Bxmpp) manageConnection() {\n\tb.setConnected(true)\n\tinitial := true\n\tbf := &backoff.Backoff{\n\t\tMin: time.Second,\n\t\tMax: 5 * time.Minute,\n\t\tJitter: true,\n\t}\n\n\t\/\/ Main connection loop. Each iteration corresponds to a successful\n\t\/\/ connection attempt and the subsequent handling of the connection.\n\tfor {\n\t\tif initial {\n\t\t\tinitial = false\n\t\t} else {\n\t\t\tb.Remote <- config.Message{\n\t\t\t\tUsername: \"system\",\n\t\t\t\tText: \"rejoin\",\n\t\t\t\tChannel: \"\",\n\t\t\t\tAccount: b.Account,\n\t\t\t\tEvent: config.EventRejoinChannels,\n\t\t\t}\n\t\t}\n\n\t\tif err := b.handleXMPP(); err != nil {\n\t\t\tb.Log.WithError(err).Error(\"Disconnected.\")\n\t\t\tb.setConnected(false)\n\t\t}\n\n\t\t\/\/ Reconnection loop using an exponential back-off strategy. We\n\t\t\/\/ only break out of the loop if we have successfully reconnected.\n\t\tfor {\n\t\t\td := bf.Duration()\n\t\t\tb.Log.Infof(\"Reconnecting in %s.\", d)\n\t\t\ttime.Sleep(d)\n\n\t\t\tb.Log.Infof(\"Reconnecting now.\")\n\t\t\tif err := b.createXMPP(); err == nil {\n\t\t\t\tb.setConnected(true)\n\t\t\t\tbf.Reset()\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tb.Log.Warn(\"Failed to reconnect.\")\n\t\t}\n\t}\n}\n\nfunc (b *Bxmpp) xmppKeepAlive() chan bool {\n\tdone := make(chan bool)\n\tgo func() {\n\t\tticker := time.NewTicker(90 * time.Second)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tb.Log.Debugf(\"PING\")\n\t\t\t\tif err := b.xc.PingC2S(\"\", \"\"); err != nil {\n\t\t\t\t\tb.Log.Debugf(\"PING failed %#v\", err)\n\t\t\t\t}\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn done\n}\n\nfunc (b *Bxmpp) handleXMPP() error {\n\tb.startTime = time.Now()\n\n\tdone := b.xmppKeepAlive()\n\tdefer close(done)\n\n\tfor {\n\t\tm, err := b.xc.Recv()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch v := m.(type) {\n\t\tcase xmpp.Chat:\n\t\t\tif v.Type == \"groupchat\" {\n\t\t\t\tb.Log.Debugf(\"== Receiving %#v\", v)\n\n\t\t\t\t\/\/ Skip invalid messages.\n\t\t\t\tif b.skipMessage(v) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tvar event string\n\t\t\t\tif strings.Contains(v.Text, \"has set the subject to:\") {\n\t\t\t\t\tevent = config.EventTopicChange\n\t\t\t\t}\n\n\t\t\t\tavailable, sok := b.avatarAvailability[v.Remote]\n\t\t\t\tavatar := \"\"\n\t\t\t\tif !sok {\n\t\t\t\t\tb.Log.Debugf(\"Requesting avatar data\")\n\t\t\t\t\tb.avatarAvailability[v.Remote] = false\n\t\t\t\t\tb.xc.AvatarRequestData(v.Remote)\n\t\t\t\t} else if available {\n\t\t\t\t\tavatar = getAvatar(b.avatarMap, v.Remote, b.General)\n\t\t\t\t}\n\n\t\t\t\tmsgID := v.ID\n\t\t\t\tif v.ReplaceID != \"\" {\n\t\t\t\t\tmsgID = v.ReplaceID\n\t\t\t\t}\n\t\t\t\trmsg := config.Message{\n\t\t\t\t\tUsername: b.parseNick(v.Remote),\n\t\t\t\t\tText: v.Text,\n\t\t\t\t\tChannel: b.parseChannel(v.Remote),\n\t\t\t\t\tAccount: b.Account,\n\t\t\t\t\tAvatar: avatar,\n\t\t\t\t\tUserID: v.Remote,\n\t\t\t\t\tID: msgID,\n\t\t\t\t\tEvent: event,\n\t\t\t\t}\n\n\t\t\t\t\/\/ Check if we have an action event.\n\t\t\t\tvar ok bool\n\t\t\t\trmsg.Text, ok = b.replaceAction(rmsg.Text)\n\t\t\t\tif ok {\n\t\t\t\t\trmsg.Event = config.EventUserAction\n\t\t\t\t}\n\n\t\t\t\tb.Log.Debugf(\"<= Sending message from %s on %s to gateway\", rmsg.Username, b.Account)\n\t\t\t\tb.Log.Debugf(\"<= Message is %#v\", rmsg)\n\t\t\t\tb.Remote <- rmsg\n\t\t\t}\n\t\tcase xmpp.AvatarData:\n\t\t\tb.handleDownloadAvatar(v)\n\t\t\tb.avatarAvailability[v.From] = true\n\t\t\tb.Log.Debugf(\"Avatar for %s is now available\", v.From)\n\t\tcase xmpp.Presence:\n\t\t\t\/\/ Do nothing.\n\t\t}\n\t}\n}\n\nfunc (b *Bxmpp) replaceAction(text string) (string, bool) {\n\tif strings.HasPrefix(text, \"\/me \") {\n\t\treturn strings.Replace(text, \"\/me \", \"\", -1), true\n\t}\n\treturn text, false\n}\n\n\/\/ handleUploadFile handles native upload of files\nfunc (b *Bxmpp) handleUploadFile(msg *config.Message) error {\n\tvar urlDesc string\n\n\tfor _, file := range msg.Extra[\"file\"] {\n\t\tfileInfo := file.(config.FileInfo)\n\t\tif fileInfo.Comment != \"\" {\n\t\t\tmsg.Text += fileInfo.Comment + \": \"\n\t\t}\n\t\tif fileInfo.URL != \"\" {\n\t\t\tmsg.Text = fileInfo.URL\n\t\t\tif fileInfo.Comment != \"\" {\n\t\t\t\tmsg.Text = fileInfo.Comment + \": \" + fileInfo.URL\n\t\t\t\turlDesc = fileInfo.Comment\n\t\t\t}\n\t\t}\n\t\tif _, err := b.xc.Send(xmpp.Chat{\n\t\t\tType: \"groupchat\",\n\t\t\tRemote: msg.Channel + \"@\" + b.GetString(\"Muc\"),\n\t\t\tText: msg.Username + msg.Text,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif fileInfo.URL != \"\" {\n\t\t\tif _, err := b.xc.SendOOB(xmpp.Chat{\n\t\t\t\tType: \"groupchat\",\n\t\t\t\tRemote: msg.Channel + \"@\" + b.GetString(\"Muc\"),\n\t\t\t\tOoburl: fileInfo.URL,\n\t\t\t\tOobdesc: urlDesc,\n\t\t\t}); err != nil {\n\t\t\t\tb.Log.WithError(err).Warn(\"Failed to send share URL.\")\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (b *Bxmpp) parseNick(remote string) string {\n\ts := strings.Split(remote, \"@\")\n\tif len(s) > 1 {\n\t\ts = strings.Split(s[1], \"\/\")\n\t\tif len(s) == 2 {\n\t\t\treturn s[1] \/\/ nick\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc (b *Bxmpp) parseChannel(remote string) string {\n\ts := strings.Split(remote, \"@\")\n\tif len(s) >= 2 {\n\t\treturn s[0] \/\/ channel\n\t}\n\treturn \"\"\n}\n\n\/\/ skipMessage skips messages that need to be skipped\nfunc (b *Bxmpp) skipMessage(message xmpp.Chat) bool {\n\t\/\/ skip messages from ourselves\n\tif b.parseNick(message.Remote) == b.GetString(\"Nick\") {\n\t\treturn true\n\t}\n\n\t\/\/ skip empty messages\n\tif message.Text == \"\" {\n\t\treturn true\n\t}\n\n\t\/\/ skip subject messages\n\tif strings.Contains(message.Text, \"<\/subject>\") {\n\t\treturn true\n\t}\n\n\t\/\/ do not show subjects on connect #732\n\tif strings.Contains(message.Text, \"has set the subject to:\") && time.Since(b.startTime) < time.Second*5 {\n\t\treturn true\n\t}\n\n\t\/\/ Ignore messages posted by our webhook\n\tif b.GetString(\"WebhookURL\") != \"\" && strings.Contains(message.ID, \"webhookbot\") {\n\t\treturn true\n\t}\n\n\t\/\/ skip delayed messages\n\treturn !message.Stamp.IsZero() && time.Since(message.Stamp).Minutes() > 5\n}\n\nfunc (b *Bxmpp) setConnected(state bool) {\n\tb.Lock()\n\tb.connected = state\n\tdefer b.Unlock()\n}\n\nfunc (b *Bxmpp) Connected() bool {\n\tb.RLock()\n\tdefer b.RUnlock()\n\treturn b.connected\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\npackage dokodemo\n\nimport (\n\t\"syscall\"\n\n\t\"github.com\/v2ray\/v2ray-core\/common\/log\"\n\tv2net \"github.com\/v2ray\/v2ray-core\/common\/net\"\n\t\"github.com\/v2ray\/v2ray-core\/transport\/internet\"\n\t\"github.com\/v2ray\/v2ray-core\/transport\/internet\/tcp\"\n)\n\nconst SO_ORIGINAL_DST = 80\n\nfunc GetOriginalDestination(conn internet.Connection) v2net.Destination {\n\ttcpConn, ok := conn.(internet.SysFd)\n\tif !ok {\n\t\tlog.Info(\"Dokodemo: Failed to get sys fd.\")\n\t\treturn nil\n\t}\n\tfd, err := tcpConn.SysFd()\n\tif err != nil {\n\t\tlog.Info(\"Dokodemo: Failed to get original destination: \", err)\n\t\treturn nil\n\t}\n\n\taddr, err := syscall.GetsockoptIPv6Mreq(fd, syscall.IPPROTO_IP, SO_ORIGINAL_DST)\n\tif err != nil {\n\t\tlog.Info(\"Dokodemo: Failed to call getsockopt: \", err)\n\t\treturn nil\n\t}\n\tip := v2net.IPAddress(addr.Multiaddr[4:8])\n\tport := uint16(addr.Multiaddr[2])<<8 + uint16(addr.Multiaddr[3])\n\treturn v2net.TCPDestination(ip, v2net.Port(port))\n}\n<commit_msg>remove unused import<commit_after>\/\/ +build linux\n\npackage dokodemo\n\nimport (\n\t\"syscall\"\n\n\t\"github.com\/v2ray\/v2ray-core\/common\/log\"\n\tv2net \"github.com\/v2ray\/v2ray-core\/common\/net\"\n\t\"github.com\/v2ray\/v2ray-core\/transport\/internet\"\n)\n\nconst SO_ORIGINAL_DST = 80\n\nfunc GetOriginalDestination(conn internet.Connection) v2net.Destination {\n\ttcpConn, ok := conn.(internet.SysFd)\n\tif !ok {\n\t\tlog.Info(\"Dokodemo: Failed to get sys fd.\")\n\t\treturn nil\n\t}\n\tfd, err := tcpConn.SysFd()\n\tif err != nil {\n\t\tlog.Info(\"Dokodemo: Failed to get original destination: \", err)\n\t\treturn nil\n\t}\n\n\taddr, err := syscall.GetsockoptIPv6Mreq(fd, syscall.IPPROTO_IP, SO_ORIGINAL_DST)\n\tif err != nil {\n\t\tlog.Info(\"Dokodemo: Failed to call getsockopt: \", err)\n\t\treturn nil\n\t}\n\tip := v2net.IPAddress(addr.Multiaddr[4:8])\n\tport := uint16(addr.Multiaddr[2])<<8 + uint16(addr.Multiaddr[3])\n\treturn v2net.TCPDestination(ip, v2net.Port(port))\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/gobuffalo\/buffalo\/meta\"\n\t\"github.com\/gobuffalo\/envy\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/gobuffalo\/pop\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nconst vendorPattern = \"\/vendor\/\"\n\nvar vendorRegex = regexp.MustCompile(vendorPattern)\n\nfunc init() {\n\tdecorate(\"test\", testCmd)\n\tRootCmd.AddCommand(testCmd)\n}\n\nvar testCmd = &cobra.Command{\n\tUse: \"test\",\n\tShort: \"Runs the tests for your Buffalo app\",\n\tDisableFlagParsing: true,\n\tRunE: func(c *cobra.Command, args []string) error {\n\t\tos.Setenv(\"GO_ENV\", \"test\")\n\t\tif _, err := os.Stat(\"database.yml\"); err == nil {\n\t\t\t\/\/ there's a database\n\t\t\ttest, err := pop.Connect(\"test\")\n\t\t\tif err != nil {\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\n\t\t\t\/\/ drop the test db:\n\t\t\ttest.Dialect.DropDB()\n\n\t\t\t\/\/ create the test db:\n\t\t\terr = test.Dialect.CreateDB()\n\t\t\tif err != nil {\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\n\t\t\tif schema := findSchema(); schema != nil {\n\t\t\t\terr = test.Dialect.LoadSchema(schema)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.WithStack(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn testRunner(args)\n\t},\n}\n\nfunc findSchema() io.Reader {\n\tif f, err := os.Open(filepath.Join(\"migrations\", \"schema.sql\")); err == nil {\n\t\treturn f\n\t}\n\tif dev, err := pop.Connect(\"development\"); err == nil {\n\t\tschema := &bytes.Buffer{}\n\t\tif err = dev.Dialect.DumpSchema(schema); err == nil {\n\t\t\treturn schema\n\t\t}\n\t}\n\n\tif test, err := pop.Connect(\"test\"); err == nil {\n\t\tif err := test.MigrateUp(\".\/migrations\"); err == nil {\n\t\t\tif f, err := os.Open(filepath.Join(\"migrations\", \"schema.sql\")); err == nil {\n\t\t\t\treturn f\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc testRunner(args []string) error {\n\tvar mFlag bool\n\tcargs := []string{}\n\tpargs := []string{}\n\tvar larg string\n\tfor i, a := range args {\n\t\tswitch a {\n\t\tcase \"-run\":\n\t\t\tcargs = append(cargs, \"-run\", args[i+1])\n\t\tcase \"-m\":\n\t\t\tmFlag = true\n\t\t\tcargs = append(cargs, \"-testify.m\", args[i+1])\n\t\tcase \"-v\":\n\t\t\tcargs = append(cargs, \"-v\")\n\t\tdefault:\n\t\t\tif larg != \"-run\" && larg != \"-m\" {\n\t\t\t\tpargs = append(pargs, a)\n\t\t\t}\n\t\t}\n\t\tlarg = a\n\t}\n\n\tcmd := newTestCmd(cargs)\n\tif mFlag {\n\t\treturn mFlagRunner(cargs, pargs)\n\t}\n\n\tpkgs, err := testPackages(pargs)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tcmd.Args = append(cmd.Args, pkgs...)\n\tlogrus.Info(strings.Join(cmd.Args, \" \"))\n\treturn cmd.Run()\n}\n\nfunc mFlagRunner(args []string, pargs []string) error {\n\tapp := meta.New(\".\")\n\tpwd, _ := os.Getwd()\n\tdefer os.Chdir(pwd)\n\n\tpkgs, err := testPackages(pargs)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tvar errs bool\n\tfor _, p := range pkgs {\n\t\tos.Chdir(pwd)\n\t\tif p == app.PackagePkg {\n\t\t\tcontinue\n\t\t}\n\t\tcmd := newTestCmd(args)\n\t\tp = strings.TrimPrefix(p, app.PackagePkg+string(filepath.Separator))\n\t\tlogrus.Info(strings.Join(cmd.Args, \" \"))\n\t\tos.Chdir(p)\n\t\tif err := cmd.Run(); err != nil {\n\t\t\terrs = true\n\t\t}\n\t}\n\tif errs {\n\t\treturn errors.New(\"errors running tests\")\n\t}\n\treturn nil\n}\n\nfunc testPackages(givenArgs []string) ([]string, error) {\n\t\/\/ If there are args, then assume these are the packages to test.\n\t\/\/\n\t\/\/ Instead of always returning all packages from 'go list .\/...', just\n\t\/\/ return the given packages in this case\n\tif len(givenArgs) > 0 {\n\t\treturn givenArgs, nil\n\t}\n\targs := []string{}\n\tout, err := exec.Command(envy.Get(\"GO_BIN\", \"go\"), \"list\", \".\/...\").Output()\n\tif err != nil {\n\t\treturn args, err\n\t}\n\tpkgs := bytes.Split(bytes.TrimSpace(out), []byte(\"\\n\"))\n\tfor _, p := range pkgs {\n\t\tif !vendorRegex.Match(p) {\n\t\t\targs = append(args, string(p))\n\t\t}\n\t}\n\treturn args, nil\n}\n\nfunc newTestCmd(args []string) *exec.Cmd {\n\tcargs := []string{\"test\", \"-p\", \"1\"}\n\tcargs = append(cargs, args...)\n\tcmd := exec.Command(envy.Get(\"GO_BIN\", \"go\"), cargs...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd\n}\n<commit_msg>buffalo test needs to support sqlite<commit_after>package cmd\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/gobuffalo\/buffalo\/meta\"\n\t\"github.com\/gobuffalo\/envy\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/gobuffalo\/pop\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nconst vendorPattern = \"\/vendor\/\"\n\nvar vendorRegex = regexp.MustCompile(vendorPattern)\n\nfunc init() {\n\tdecorate(\"test\", testCmd)\n\tRootCmd.AddCommand(testCmd)\n}\n\nvar testCmd = &cobra.Command{\n\tUse: \"test\",\n\tShort: \"Runs the tests for your Buffalo app\",\n\tDisableFlagParsing: true,\n\tRunE: func(c *cobra.Command, args []string) error {\n\t\tos.Setenv(\"GO_ENV\", \"test\")\n\t\tif _, err := os.Stat(\"database.yml\"); err == nil {\n\t\t\t\/\/ there's a database\n\t\t\ttest, err := pop.Connect(\"test\")\n\t\t\tif err != nil {\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\n\t\t\t\/\/ drop the test db:\n\t\t\ttest.Dialect.DropDB()\n\n\t\t\t\/\/ create the test db:\n\t\t\terr = test.Dialect.CreateDB()\n\t\t\tif err != nil {\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\n\t\t\tif schema := findSchema(); schema != nil {\n\t\t\t\terr = test.Dialect.LoadSchema(schema)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.WithStack(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn testRunner(args)\n\t},\n}\n\nfunc findSchema() io.Reader {\n\tif f, err := os.Open(filepath.Join(\"migrations\", \"schema.sql\")); err == nil {\n\t\treturn f\n\t}\n\tif dev, err := pop.Connect(\"development\"); err == nil {\n\t\tschema := &bytes.Buffer{}\n\t\tif err = dev.Dialect.DumpSchema(schema); err == nil {\n\t\t\treturn schema\n\t\t}\n\t}\n\n\tif test, err := pop.Connect(\"test\"); err == nil {\n\t\tif err := test.MigrateUp(\".\/migrations\"); err == nil {\n\t\t\tif f, err := os.Open(filepath.Join(\"migrations\", \"schema.sql\")); err == nil {\n\t\t\t\treturn f\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc testRunner(args []string) error {\n\tvar mFlag bool\n\tcargs := []string{}\n\tpargs := []string{}\n\tvar larg string\n\tfor i, a := range args {\n\t\tswitch a {\n\t\tcase \"-run\":\n\t\t\tcargs = append(cargs, \"-run\", args[i+1])\n\t\tcase \"-m\":\n\t\t\tmFlag = true\n\t\t\tcargs = append(cargs, \"-testify.m\", args[i+1])\n\t\tcase \"-v\":\n\t\t\tcargs = append(cargs, \"-v\")\n\t\tdefault:\n\t\t\tif larg != \"-run\" && larg != \"-m\" {\n\t\t\t\tpargs = append(pargs, a)\n\t\t\t}\n\t\t}\n\t\tlarg = a\n\t}\n\n\tcmd := newTestCmd(cargs)\n\tif mFlag {\n\t\treturn mFlagRunner(cargs, pargs)\n\t}\n\n\tpkgs, err := testPackages(pargs)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tcmd.Args = append(cmd.Args, pkgs...)\n\tlogrus.Info(strings.Join(cmd.Args, \" \"))\n\treturn cmd.Run()\n}\n\nfunc mFlagRunner(args []string, pargs []string) error {\n\tapp := meta.New(\".\")\n\tpwd, _ := os.Getwd()\n\tdefer os.Chdir(pwd)\n\n\tpkgs, err := testPackages(pargs)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tvar errs bool\n\tfor _, p := range pkgs {\n\t\tos.Chdir(pwd)\n\t\tif p == app.PackagePkg {\n\t\t\tcontinue\n\t\t}\n\t\tcmd := newTestCmd(args)\n\t\tp = strings.TrimPrefix(p, app.PackagePkg+string(filepath.Separator))\n\t\tlogrus.Info(strings.Join(cmd.Args, \" \"))\n\t\tos.Chdir(p)\n\t\tif err := cmd.Run(); err != nil {\n\t\t\terrs = true\n\t\t}\n\t}\n\tif errs {\n\t\treturn errors.New(\"errors running tests\")\n\t}\n\treturn nil\n}\n\nfunc testPackages(givenArgs []string) ([]string, error) {\n\t\/\/ If there are args, then assume these are the packages to test.\n\t\/\/\n\t\/\/ Instead of always returning all packages from 'go list .\/...', just\n\t\/\/ return the given packages in this case\n\tif len(givenArgs) > 0 {\n\t\treturn givenArgs, nil\n\t}\n\targs := []string{}\n\tout, err := exec.Command(envy.Get(\"GO_BIN\", \"go\"), \"list\", \".\/...\").Output()\n\tif err != nil {\n\t\treturn args, err\n\t}\n\tpkgs := bytes.Split(bytes.TrimSpace(out), []byte(\"\\n\"))\n\tfor _, p := range pkgs {\n\t\tif !vendorRegex.Match(p) {\n\t\t\targs = append(args, string(p))\n\t\t}\n\t}\n\treturn args, nil\n}\n\nfunc newTestCmd(args []string) *exec.Cmd {\n\tcargs := []string{\"test\", \"-p\", \"1\"}\n\tif b, err := ioutil.ReadFile(\"database.yml\"); err == nil {\n\t\tif bytes.Contains(b, []byte(\"sqlite\")) {\n\t\t\tcargs = append(cargs, \"-tags\", \"sqlite\")\n\t\t}\n\t}\n\tcargs = append(cargs, args...)\n\tcmd := exec.Command(envy.Get(\"GO_BIN\", \"go\"), cargs...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd\n}\n<|endoftext|>"} {"text":"<commit_before>package kf\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/silenceper\/wechat\/v2\/util\"\n)\n\nconst (\n\t\/\/ 获取会话状态\n\tserviceStateGetAddr = \"https:\/\/qyapi.weixin.qq.com\/cgi-bin\/kf\/service_state\/get?access_token=%s\"\n\t\/\/ 变更会话状态\n\tserviceStateTransAddr = \"https:\/\/qyapi.weixin.qq.com\/cgi-bin\/kf\/service_state\/trans?access_token=%s\"\n)\n\n\/\/ ServiceStateGetOptions 获取会话状态请求参数\ntype ServiceStateGetOptions struct {\n\tOpenKFID string `json:\"open_kfid\"` \/\/ 客服帐号ID\n\tExternalUserID string `json:\"external_userid\"` \/\/ 微信客户的external_userid\n}\n\n\/\/ ServiceStateGetSchema 获取会话状态响应内容\ntype ServiceStateGetSchema struct {\n\tutil.CommonError\n\tServiceState int `json:\"service_state\"` \/\/ 当前的会话状态,状态定义参考概述中的表格\n\tServiceUserID string `json:\"service_userid\"` \/\/ 接待人员的userid,仅当state=3时有效\n}\n\n\/\/ ServiceStateGet 获取会话状态\n\/\/ 0\t未处理\t新会话接入。可选择:1.直接用API自动回复消息。2.放进待接入池等待接待人员接待。3.指定接待人员进行接待\n\/\/ 1\t由智能助手接待\t可使用API回复消息。可选择转入待接入池或者指定接待人员处理。\n\/\/ 2\t待接入池排队中\t在待接入池中排队等待接待人员接入。可选择转为指定人员接待\n\/\/ 3\t由人工接待\t人工接待中。可选择结束会话\n\/\/ 4\t已结束\t会话已经结束或未开始。不允许变更会话状态,等待用户发起咨询\n\/\/ 注:一个微信用户向一个客服帐号发起咨询后,在48h内,或主动结束会话前(包括接待人员手动结束,或企业通过API结束会话),都算是一次会话\nfunc (r *Client) ServiceStateGet(options ServiceStateGetOptions) (info ServiceStateGetSchema, err error) {\n\tvar (\n\t\taccessToken string\n\t\tdata []byte\n\t)\n\taccessToken, err = r.ctx.GetAccessToken()\n\tif err != nil {\n\t\treturn\n\t}\n\tdata, err = util.PostJSON(fmt.Sprintf(serviceStateGetAddr, accessToken), options)\n\tif err != nil {\n\t\treturn\n\t}\n\tif err = json.Unmarshal(data, &info); err != nil {\n\t\treturn\n\t}\n\tif info.ErrCode != 0 {\n\t\treturn info, NewSDKErr(info.ErrCode, info.ErrMsg)\n\t}\n\treturn info, nil\n}\n\n\/\/ ServiceStateTransOptions 变更会话状态请求参数\ntype ServiceStateTransOptions struct {\n\tOpenKFID string `json:\"open_kfid\"` \/\/ 客服帐号ID\n\tExternalUserID string `json:\"external_userid\"` \/\/ 微信客户的external_userid\n\tServiceState int `json:\"service_state\"` \/\/ 变更的目标状态,状态定义和所允许的变更可参考概述中的流程图和表格\n\tServicerUserID string `json:\"servicer_userid\"` \/\/ 接待人员的userid,当state=3时要求必填,接待人员须处于“正在接待”中\n}\n\n\/\/ ServiceStateTrans 变更会话状态\nfunc (r *Client) ServiceStateTrans(options ServiceStateTransOptions) (info util.CommonError, err error) {\n\tvar (\n\t\taccessToken string\n\t\tdata []byte\n\t)\n\taccessToken, err = r.ctx.GetAccessToken()\n\tif err != nil {\n\t\treturn\n\t}\n\tdata, err = util.PostJSON(fmt.Sprintf(serviceStateTransAddr, accessToken), options)\n\tif err != nil {\n\t\treturn\n\t}\n\tif err = json.Unmarshal(data, &info); err != nil {\n\t\treturn\n\t}\n\tif info.ErrCode != 0 {\n\t\treturn info, NewSDKErr(info.ErrCode, info.ErrMsg)\n\t}\n\treturn info, nil\n}\n<commit_msg>feat:微信客服支持通过接口转接人工会话 (#492)<commit_after>package kf\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/silenceper\/wechat\/v2\/util\"\n)\n\nconst (\n\t\/\/ 获取会话状态\n\tserviceStateGetAddr = \"https:\/\/qyapi.weixin.qq.com\/cgi-bin\/kf\/service_state\/get?access_token=%s\"\n\t\/\/ 变更会话状态\n\tserviceStateTransAddr = \"https:\/\/qyapi.weixin.qq.com\/cgi-bin\/kf\/service_state\/trans?access_token=%s\"\n)\n\n\/\/ ServiceStateGetOptions 获取会话状态请求参数\ntype ServiceStateGetOptions struct {\n\tOpenKFID string `json:\"open_kfid\"` \/\/ 客服帐号ID\n\tExternalUserID string `json:\"external_userid\"` \/\/ 微信客户的external_userid\n}\n\n\/\/ ServiceStateGetSchema 获取会话状态响应内容\ntype ServiceStateGetSchema struct {\n\tutil.CommonError\n\tServiceState int `json:\"service_state\"` \/\/ 当前的会话状态,状态定义参考概述中的表格\n\tServiceUserID string `json:\"service_userid\"` \/\/ 接待人员的userid,仅当state=3时有效\n}\n\n\/\/ ServiceStateGet 获取会话状态\n\/\/ 0\t未处理\t新会话接入(客户发信咨询)。可选择:1.直接用API自动回复消息。2.放进待接入池等待接待人员接待。3.指定接待人员(接待人员须处于“正在接待”中,下同)进行接待\n\/\/ 1\t由智能助手接待\t\t可使用API回复消息。可选择转入待接入池或者指定接待人员处理\n\/\/ 2\t待接入池排队中\t\t在待接入池中排队等待接待人员接入。可选择转为指定人员接待\n\/\/ 3\t由人工接待\t人工接待中。可选择转接给其他接待人员处理或者结束会话\n\/\/ 4\t已结束\t会话已经结束或未开始。不允许变更会话状态,客户重新发信咨询后会话状态变为“未处理”\n\/\/ 注:一个微信用户向一个客服帐号发起咨询后,在48h内,或主动结束会话前(包括接待人员手动结束,或企业通过API结束会话),都算是一次会话\nfunc (r *Client) ServiceStateGet(options ServiceStateGetOptions) (info ServiceStateGetSchema, err error) {\n\tvar (\n\t\taccessToken string\n\t\tdata []byte\n\t)\n\taccessToken, err = r.ctx.GetAccessToken()\n\tif err != nil {\n\t\treturn\n\t}\n\tdata, err = util.PostJSON(fmt.Sprintf(serviceStateGetAddr, accessToken), options)\n\tif err != nil {\n\t\treturn\n\t}\n\tif err = json.Unmarshal(data, &info); err != nil {\n\t\treturn\n\t}\n\tif info.ErrCode != 0 {\n\t\treturn info, NewSDKErr(info.ErrCode, info.ErrMsg)\n\t}\n\treturn info, nil\n}\n\n\/\/ ServiceStateTransOptions 变更会话状态请求参数\ntype ServiceStateTransOptions struct {\n\tOpenKFID string `json:\"open_kfid\"` \/\/ 客服帐号ID\n\tExternalUserID string `json:\"external_userid\"` \/\/ 微信客户的external_userid\n\tServiceState int `json:\"service_state\"` \/\/ 变更的目标状态,状态定义和所允许的变更可参考概述中的流程图和表格\n\tServicerUserID string `json:\"servicer_userid\"` \/\/ 接待人员的userid,当state=3时要求必填,接待人员须处于“正在接待”中\n}\n\n\/\/ ServiceStateTrans 变更会话状态\nfunc (r *Client) ServiceStateTrans(options ServiceStateTransOptions) (info util.CommonError, err error) {\n\tvar (\n\t\taccessToken string\n\t\tdata []byte\n\t)\n\taccessToken, err = r.ctx.GetAccessToken()\n\tif err != nil {\n\t\treturn\n\t}\n\tdata, err = util.PostJSON(fmt.Sprintf(serviceStateTransAddr, accessToken), options)\n\tif err != nil {\n\t\treturn\n\t}\n\tif err = json.Unmarshal(data, &info); err != nil {\n\t\treturn\n\t}\n\tif info.ErrCode != 0 {\n\t\treturn info, NewSDKErr(info.ErrCode, info.ErrMsg)\n\t}\n\treturn info, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package workers\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\t_STORAGE_REDIS = \"redis\"\n\t_STORAGE_MYSQL = \"mysql\"\n\t_STORAGE_ORACLE = \"oracle\"\n)\n\ntype StorageOption struct {\n\tType string\n\tDb string\n\tTable string \/\/当Type为mysql\/oracle有用\n\tHost string\n\tPort string\n\tPwd string\n}\n\ntype LocalStorage struct {\n\toption *StorageOption \/\/ 存储选项\n\tkey string\n\tvalue string\n\texpire int\n\tts int\n}\n\n\/* {{{ func (w *OmqWorker) localStorage(cmd []string) error\n * 处理SET\/DEL命令\n *\/\nfunc (w *OmqWorker) localStorage(cmd []string) error { \/\/set + del\n\n\tif Redis == nil { \/\/没有本地存储\n\t\treturn fmt.Errorf(\"can't reach localstorage\")\n\t} else {\n\t\tw.Trace(\"save local storage: %q\", cmd)\n\t}\n\n\t\/\/ 解析命令\n\tif len(cmd) >= 4 {\n\t\tact := strings.ToUpper(cmd[0])\n\t\tls := new(LocalStorage)\n\t\toption := cmd[1]\n\t\tls.key = cmd[2]\n\t\tif option == \"\" || strings.ToLower(option) == \"redis\" {\n\t\t\t\/\/兼容旧版, 新版应该传入一个json,或者为空\n\t\t\tls.option = &StorageOption{Type: _STORAGE_REDIS}\n\t\t} else {\n\t\t\t\/\/解析\n\t\t\to := new(StorageOption)\n\t\t\tif err := json.Unmarshal([]byte(option), o); err != nil {\n\t\t\t\tw.Info(\"unmarshal option failed: %s\", option)\n\t\t\t\tls.option = &StorageOption{Type: _STORAGE_REDIS} \/\/默认\n\t\t\t} else {\n\t\t\t\tls.option = o\n\t\t\t}\n\t\t}\n\t\tls.value = cmd[3]\n\t\tw.Info(\"[act: %s][key: %s][value: %s][expire: %d]\", act, ls.key, ls.value, ls.expire)\n\t\tswitch act {\n\t\tcase COMMAND_SET:\n\t\t\tif len(cmd) >= 5 {\n\t\t\t\tls.expire, _ = strconv.Atoi(cmd[4])\n\t\t\t}\n\t\t\treturn ls.Set()\n\t\tcase COMMAND_DEL:\n\t\t\treturn ls.Del()\n\t\tcase COMMAND_SCHEDULE:\n\t\t\tif len(cmd) >= 5 {\n\t\t\t\tls.ts, _ = strconv.Atoi(cmd[4])\n\t\t\t}\n\t\t\treturn ls.Schedule()\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"action error: %s\", act)\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"command error: %s\", cmd)\n\t}\n\n\treturn nil\n}\n\n\/* }}} *\/\n\n\/* {{{ func (w *OmqWorker) localGet(cmd []string) ([]string, error)\n * 处理SET\/DEL命令\n *\/\nfunc (w *OmqWorker) localGet(cmd []string) (r []string, err error) { \/\/set + del\n\n\tif Redis == nil { \/\/没有本地存储\n\t\terr = fmt.Errorf(\"can't reach localstorage\")\n\t\treturn\n\t} else {\n\t\tw.Trace(\"save local storage: %q\", cmd)\n\t}\n\n\t\/\/ 解析命令\n\tif len(cmd) >= 3 {\n\t\tact := strings.ToUpper(cmd[0])\n\t\tls := new(LocalStorage)\n\t\toption := cmd[1]\n\t\tls.key = cmd[2]\n\t\tif option == \"\" || strings.ToLower(option) == \"redis\" {\n\t\t\t\/\/兼容旧版, 新版应该传入一个json,或者为空\n\t\t\tls.option = &StorageOption{Type: _STORAGE_REDIS}\n\t\t} else {\n\t\t\t\/\/解析\n\t\t\to := new(StorageOption)\n\t\t\tif err := json.Unmarshal([]byte(option), o); err != nil {\n\t\t\t\tw.Info(\"unmarshal option failed: %s\", option)\n\t\t\t\tls.option = &StorageOption{Type: _STORAGE_REDIS} \/\/默认\n\t\t\t} else {\n\t\t\t\tls.option = o\n\t\t\t}\n\t\t}\n\t\tw.Info(\"[act: %s][key: %s]\", act, ls.key)\n\t\tswitch act {\n\t\tcase COMMAND_GET:\n\t\t\treturn ls.Get()\n\t\tcase COMMAND_TIMING:\n\t\t\treturn ls.Timing()\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"action error: %s\", act)\n\t\t}\n\t} else {\n\t\terr = fmt.Errorf(\"command error: %s\", cmd)\n\t}\n\n\treturn\n}\n\n\/* }}} *\/\n\nfunc (ls *LocalStorage) Set() (err error) {\n\tredisConn := Redis.Pool.Get()\n\tdefer redisConn.Close()\n\tif ls.expire > 0 {\n\t\t_, err = redisConn.Do(\"SET\", ls.key, ls.value, \"EX\", ls.expire)\n\t} else {\n\t\t_, err = redisConn.Do(\"SET\", ls.key, ls.value)\n\t}\n\treturn\n}\n\nfunc (ls *LocalStorage) Get() (r []string, err error) {\n\tredisConn := Redis.Pool.Get()\n\tdefer redisConn.Close()\n\tif result, e := redisConn.Do(\"GET\", ls.key); e == nil {\n\t\tif result == nil {\n\t\t\tr = nil\n\t\t} else {\n\t\t\tr = []string{string(result.([]byte))}\n\t\t}\n\t} else {\n\t\terr = e\n\t}\n\treturn\n}\n\nfunc (ls *LocalStorage) Timing() (r []string, err error) {\n\tredisConn := Redis.Pool.Get()\n\tdefer redisConn.Close()\n\tnow := time.Now().Unix() \/\/当前的时间戳\n\t\/\/if result, e := redisConn.Do(\"ZRANGEBYSCORE\", ls.key, 0, now, \"LIMIT\", 0, 1); e == nil {\n\tif result, e := redisConn.Do(\"ZRANGEBYSCORE\", ls.key, 0, now, \"WITHSCORES\", \"LIMIT\", 0, 10); e == nil {\n\t\tif result != nil {\n\t\t\tswitch rt := result.(type) {\n\t\t\tcase []byte:\n\t\t\t\tif len(rt) == 0 {\n\t\t\t\t\treturn nil, ErrNil\n\t\t\t\t}\n\t\t\t\tr = []string{string(rt)}\n\t\t\tcase []interface{}:\n\t\t\t\tif len(rt) == 0 {\n\t\t\t\t\treturn nil, ErrNil\n\t\t\t\t}\n\t\t\t\tr = make([]string, 0)\n\t\t\t\tfor _, rtt := range rt {\n\t\t\t\t\tr = append(r, string(rtt.([]byte)))\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\terr = fmt.Errorf(\"unknown type\")\n\t\t\t}\n\t\t}\n\t} else {\n\t\terr = e\n\t}\n\treturn\n}\n\nfunc (ls *LocalStorage) Del() error {\n\tredisConn := Redis.Pool.Get()\n\tdefer redisConn.Close()\n\t_, err := redisConn.Do(\"DEL\", ls.key)\n\treturn err\n}\n\nfunc (ls *LocalStorage) Schedule() (err error) {\n\tredisConn := Redis.Pool.Get()\n\tdefer redisConn.Close()\n\t_, err = redisConn.Do(\"ZADD\", ls.key, ls.ts, ls.value)\n\treturn\n}\n<commit_msg>timing auto remove member<commit_after>package workers\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\t_STORAGE_REDIS = \"redis\"\n\t_STORAGE_MYSQL = \"mysql\"\n\t_STORAGE_ORACLE = \"oracle\"\n)\n\ntype StorageOption struct {\n\tType string\n\tDb string\n\tTable string \/\/当Type为mysql\/oracle有用\n\tHost string\n\tPort string\n\tPwd string\n}\n\ntype LocalStorage struct {\n\toption *StorageOption \/\/ 存储选项\n\tkey string\n\tvalue string\n\texpire int\n\tts int\n}\n\n\/* {{{ func (w *OmqWorker) localStorage(cmd []string) error\n * 处理SET\/DEL命令\n *\/\nfunc (w *OmqWorker) localStorage(cmd []string) error { \/\/set + del\n\n\tif Redis == nil { \/\/没有本地存储\n\t\treturn fmt.Errorf(\"can't reach localstorage\")\n\t} else {\n\t\tw.Trace(\"save local storage: %q\", cmd)\n\t}\n\n\t\/\/ 解析命令\n\tif len(cmd) >= 4 {\n\t\tact := strings.ToUpper(cmd[0])\n\t\tls := new(LocalStorage)\n\t\toption := cmd[1]\n\t\tls.key = cmd[2]\n\t\tif option == \"\" || strings.ToLower(option) == \"redis\" {\n\t\t\t\/\/兼容旧版, 新版应该传入一个json,或者为空\n\t\t\tls.option = &StorageOption{Type: _STORAGE_REDIS}\n\t\t} else {\n\t\t\t\/\/解析\n\t\t\to := new(StorageOption)\n\t\t\tif err := json.Unmarshal([]byte(option), o); err != nil {\n\t\t\t\tw.Info(\"unmarshal option failed: %s\", option)\n\t\t\t\tls.option = &StorageOption{Type: _STORAGE_REDIS} \/\/默认\n\t\t\t} else {\n\t\t\t\tls.option = o\n\t\t\t}\n\t\t}\n\t\tls.value = cmd[3]\n\t\tw.Info(\"[act: %s][key: %s][value: %s][expire: %d]\", act, ls.key, ls.value, ls.expire)\n\t\tswitch act {\n\t\tcase COMMAND_SET:\n\t\t\tif len(cmd) >= 5 {\n\t\t\t\tls.expire, _ = strconv.Atoi(cmd[4])\n\t\t\t}\n\t\t\treturn ls.Set()\n\t\tcase COMMAND_DEL:\n\t\t\treturn ls.Del()\n\t\tcase COMMAND_SCHEDULE:\n\t\t\tif len(cmd) >= 5 {\n\t\t\t\tls.ts, _ = strconv.Atoi(cmd[4])\n\t\t\t}\n\t\t\treturn ls.Schedule()\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"action error: %s\", act)\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"command error: %s\", cmd)\n\t}\n\n\treturn nil\n}\n\n\/* }}} *\/\n\n\/* {{{ func (w *OmqWorker) localGet(cmd []string) ([]string, error)\n * 处理SET\/DEL命令\n *\/\nfunc (w *OmqWorker) localGet(cmd []string) (r []string, err error) { \/\/set + del\n\n\tif Redis == nil { \/\/没有本地存储\n\t\terr = fmt.Errorf(\"can't reach localstorage\")\n\t\treturn\n\t} else {\n\t\tw.Trace(\"save local storage: %q\", cmd)\n\t}\n\n\t\/\/ 解析命令\n\tif len(cmd) >= 3 {\n\t\tact := strings.ToUpper(cmd[0])\n\t\tls := new(LocalStorage)\n\t\toption := cmd[1]\n\t\tls.key = cmd[2]\n\t\tif option == \"\" || strings.ToLower(option) == \"redis\" {\n\t\t\t\/\/兼容旧版, 新版应该传入一个json,或者为空\n\t\t\tls.option = &StorageOption{Type: _STORAGE_REDIS}\n\t\t} else {\n\t\t\t\/\/解析\n\t\t\to := new(StorageOption)\n\t\t\tif err := json.Unmarshal([]byte(option), o); err != nil {\n\t\t\t\tw.Info(\"unmarshal option failed: %s\", option)\n\t\t\t\tls.option = &StorageOption{Type: _STORAGE_REDIS} \/\/默认\n\t\t\t} else {\n\t\t\t\tls.option = o\n\t\t\t}\n\t\t}\n\t\tw.Info(\"[act: %s][key: %s]\", act, ls.key)\n\t\tswitch act {\n\t\tcase COMMAND_GET:\n\t\t\treturn ls.Get()\n\t\tcase COMMAND_TIMING:\n\t\t\treturn ls.Timing()\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"action error: %s\", act)\n\t\t}\n\t} else {\n\t\terr = fmt.Errorf(\"command error: %s\", cmd)\n\t}\n\n\treturn\n}\n\n\/* }}} *\/\n\nfunc (ls *LocalStorage) Set() (err error) {\n\tredisConn := Redis.Pool.Get()\n\tdefer redisConn.Close()\n\tif ls.expire > 0 {\n\t\t_, err = redisConn.Do(\"SET\", ls.key, ls.value, \"EX\", ls.expire)\n\t} else {\n\t\t_, err = redisConn.Do(\"SET\", ls.key, ls.value)\n\t}\n\treturn\n}\n\nfunc (ls *LocalStorage) Get() (r []string, err error) {\n\tredisConn := Redis.Pool.Get()\n\tdefer redisConn.Close()\n\tif result, e := redisConn.Do(\"GET\", ls.key); e == nil {\n\t\tif result == nil {\n\t\t\tr = nil\n\t\t} else {\n\t\t\tr = []string{string(result.([]byte))}\n\t\t}\n\t} else {\n\t\terr = e\n\t}\n\treturn\n}\n\nfunc (ls *LocalStorage) Timing() (r []string, err error) {\n\tredisConn := Redis.Pool.Get()\n\tdefer redisConn.Close()\n\tnow := time.Now().Unix() \/\/当前的时间戳\n\t\/\/if result, e := redisConn.Do(\"ZRANGEBYSCORE\", ls.key, 0, now, \"LIMIT\", 0, 1); e == nil {\n\tif result, e := redisConn.Do(\"ZRANGEBYSCORE\", ls.key, 0, now, \"WITHSCORES\", \"LIMIT\", 0, 10); e == nil {\n\t\tif result != nil {\n\t\t\tswitch rt := result.(type) {\n\t\t\tcase []byte:\n\t\t\t\tif len(rt) == 0 {\n\t\t\t\t\treturn nil, ErrNil\n\t\t\t\t}\n\t\t\t\tr = []string{string(rt)}\n\t\t\tcase []interface{}:\n\t\t\t\tif len(rt) == 0 {\n\t\t\t\t\treturn nil, ErrNil\n\t\t\t\t}\n\t\t\t\t\/\/zrem := make([]string, 0)\n\t\t\t\tr = make([]string, 0)\n\t\t\t\tfor k, rtt := range rt {\n\t\t\t\t\tv := string(rtt.([]byte))\n\t\t\t\t\tr = append(r, v)\n\t\t\t\t\tif k%2 == 0 { \/\/偶数为member(0-based)\n\t\t\t\t\t\t\/\/fmt.Printf(\"k:%d,v:%s\\n\", k, v)\n\t\t\t\t\t\t\/\/zrem = append(zrem, v)\n\t\t\t\t\t\tredisConn.Do(\"ZREM\", ls.key, v)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\/\/redisConn.Do(\"ZREM\", ls.key, strings.Join(zrem, \" \"))\n\t\t\t\t\/\/redisConn.Do(\"ZREM\", ls.key, zrem)\n\t\t\tdefault:\n\t\t\t\terr = fmt.Errorf(\"unknown type\")\n\t\t\t}\n\t\t}\n\t} else {\n\t\terr = e\n\t}\n\treturn\n}\n\nfunc (ls *LocalStorage) Del() error {\n\tredisConn := Redis.Pool.Get()\n\tdefer redisConn.Close()\n\t_, err := redisConn.Do(\"DEL\", ls.key)\n\treturn err\n}\n\nfunc (ls *LocalStorage) Schedule() (err error) {\n\tredisConn := Redis.Pool.Get()\n\tdefer redisConn.Close()\n\t_, err = redisConn.Do(\"ZADD\", ls.key, ls.ts, ls.value)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package workers\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\t_STORAGE_REDIS = \"redis\"\n\t_STORAGE_MYSQL = \"mysql\"\n\t_STORAGE_ORACLE = \"oracle\"\n)\n\ntype StorageOption struct {\n\tType string\n\tDb string\n\tTable string \/\/当Type为mysql\/oracle有用\n\tHost string\n\tPort string\n\tPwd string\n}\n\ntype LocalStorage struct {\n\toption *StorageOption \/\/ 存储选项\n\tkey string\n\tvalue string\n\texpire int\n}\n\nfunc (w *OmqWorker) localStorage(cmd []string) error {\n\n\tif Redis == nil { \/\/没有本地存储\n\t\treturn fmt.Errorf(\"can't reach localstorage\")\n\t} else {\n\t\tw.Trace(\"save local storage: %q\", cmd)\n\t}\n\n\t\/\/ 解析命令\n\tif len(cmd) >= 3 {\n\t\tact := strings.ToLower(cmd[0])\n\t\tls := new(LocalStorage)\n\t\toption := cmd[1]\n\t\tls.key = cmd[2]\n\t\tls.value = cmd[3]\n\t\tif option == \"\" || strings.ToLower(option) == \"redis\" {\n\t\t\t\/\/兼容旧版, 新版应该传入一个json,或者为空\n\t\t\tls.option = &StorageOption{Type: _STORAGE_REDIS}\n\t\t} else {\n\t\t\t\/\/解析\n\t\t\to := new(StorageOption)\n\t\t\tif err := json.Unmarshal([]byte(option), o); err != nil {\n\t\t\t\tw.Info(\"unmarshal option failed: %s\", option)\n\t\t\t\tls.option = &StorageOption{Type: _STORAGE_REDIS} \/\/默认\n\t\t\t} else {\n\t\t\t\tls.option = o\n\t\t\t}\n\t\t}\n\t\tif len(cmd) >= 5 {\n\t\t\tls.expire, _ = strconv.Atoi(cmd[3])\n\t\t}\n\t\tw.Info(\"[act: %s][key: %s][value: %s][expire: %d]\", act, ls.key, ls.value, ls.expire)\n\t\tswitch act {\n\t\tcase \"set\":\n\t\t\treturn ls.Set()\n\t\tcase \"get\":\n\t\t\treturn ls.Get()\n\t\tcase \"del\":\n\t\t\treturn ls.Del()\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"action error: %s\", act)\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"command error: %s\", cmd)\n\t}\n\n\treturn nil\n}\n\nfunc (ls *LocalStorage) Set() (err error) {\n\tredisConn := Redis.Pool.Get()\n\tdefer redisConn.Close()\n\tif ls.expire > 0 {\n\t\t_, err = redisConn.Do(\"SET\", ls.key, ls.value, \"EX\", ls.expire)\n\t} else {\n\t\t_, err = redisConn.Do(\"SET\", ls.key, ls.value)\n\t}\n\treturn\n}\n\nfunc (ls *LocalStorage) Get() error {\n\treturn nil\n}\nfunc (ls *LocalStorage) Del() error {\n\tredisConn := Redis.Pool.Get()\n\tdefer redisConn.Close()\n\t_, err := redisConn.Do(\"DEL\", ls.key)\n\treturn err\n}\n<commit_msg>bugfix<commit_after>package workers\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\t_STORAGE_REDIS = \"redis\"\n\t_STORAGE_MYSQL = \"mysql\"\n\t_STORAGE_ORACLE = \"oracle\"\n)\n\ntype StorageOption struct {\n\tType string\n\tDb string\n\tTable string \/\/当Type为mysql\/oracle有用\n\tHost string\n\tPort string\n\tPwd string\n}\n\ntype LocalStorage struct {\n\toption *StorageOption \/\/ 存储选项\n\tkey string\n\tvalue string\n\texpire int\n}\n\nfunc (w *OmqWorker) localStorage(cmd []string) error {\n\n\tif Redis == nil { \/\/没有本地存储\n\t\treturn fmt.Errorf(\"can't reach localstorage\")\n\t} else {\n\t\tw.Trace(\"save local storage: %q\", cmd)\n\t}\n\n\t\/\/ 解析命令\n\tif len(cmd) >= 3 {\n\t\tact := strings.ToLower(cmd[0])\n\t\tls := new(LocalStorage)\n\t\toption := cmd[1]\n\t\tls.key = cmd[2]\n\t\tif option == \"\" || strings.ToLower(option) == \"redis\" {\n\t\t\t\/\/兼容旧版, 新版应该传入一个json,或者为空\n\t\t\tls.option = &StorageOption{Type: _STORAGE_REDIS}\n\t\t} else {\n\t\t\t\/\/解析\n\t\t\to := new(StorageOption)\n\t\t\tif err := json.Unmarshal([]byte(option), o); err != nil {\n\t\t\t\tw.Info(\"unmarshal option failed: %s\", option)\n\t\t\t\tls.option = &StorageOption{Type: _STORAGE_REDIS} \/\/默认\n\t\t\t} else {\n\t\t\t\tls.option = o\n\t\t\t}\n\t\t}\n\t\tif len(cmd) >= 4 {\n\t\t\tls.value = cmd[3]\n\t\t}\n\t\tif len(cmd) >= 5 {\n\t\t\tls.expire, _ = strconv.Atoi(cmd[3])\n\t\t}\n\t\tw.Info(\"[act: %s][key: %s][value: %s][expire: %d]\", act, ls.key, ls.value, ls.expire)\n\t\tswitch act {\n\t\tcase \"set\":\n\t\t\treturn ls.Set()\n\t\tcase \"get\":\n\t\t\treturn ls.Get()\n\t\tcase \"del\":\n\t\t\treturn ls.Del()\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"action error: %s\", act)\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"command error: %s\", cmd)\n\t}\n\n\treturn nil\n}\n\nfunc (ls *LocalStorage) Set() (err error) {\n\tredisConn := Redis.Pool.Get()\n\tdefer redisConn.Close()\n\tif ls.expire > 0 {\n\t\t_, err = redisConn.Do(\"SET\", ls.key, ls.value, \"EX\", ls.expire)\n\t} else {\n\t\t_, err = redisConn.Do(\"SET\", ls.key, ls.value)\n\t}\n\treturn\n}\n\nfunc (ls *LocalStorage) Get() error {\n\treturn nil\n}\nfunc (ls *LocalStorage) Del() error {\n\tredisConn := Redis.Pool.Get()\n\tdefer redisConn.Close()\n\t_, err := redisConn.Do(\"DEL\", ls.key)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport (\n\t\"github.com\/ghts\/ghts\/lib\"\n\t\"github.com\/ghts\/ghts\/lib\/daily_price_data\"\n\t\"github.com\/ghts\/ghts\/lib\/krx_time\"\n\txt \"github.com\/ghts\/ghts\/xing\/base\"\n\txing \"github.com\/ghts\/ghts\/xing\/go\"\n\n\t\"database\/sql\"\n\t\"time\"\n)\n\nfunc F당일_일일_가격정보_수집(db *sql.DB) (에러 error) {\n\tif krx.F한국증시_동시호가_시간임() || krx.F한국증시_정규_거래_시간임() {\n\t\tlib.F문자열_출력(\"장 중에는 정확한 데이터를 수집할 수 없습니다.\")\n\t\treturn\n\t}\n\n\tdaily_price_data.F일일_가격정보_테이블_생성(db)\n\n\t당일 := lib.F일자2정수(xing.F당일())\n\t현재가_맵, 에러 := xing.TrT8407_현물_멀티_현재가_조회_전종목()\n\tlib.F확인(에러)\n\n\tfor 종목코드, 값 := range 현재가_맵 {\n\t\ts := new(daily_price_data.S일일_가격정보)\n\t\ts.M종목코드 = 종목코드\n\t\ts.M일자 = 당일\n\t\ts.M시가 = float64(값.M시가)\n\t\ts.M고가 = float64(값.M고가)\n\t\ts.M저가 = float64(값.M저가)\n\t\ts.M종가 = float64(값.M현재가)\n\t\ts.M거래량 = float64(값.M누적_거래량)\n\n\t\t종목별_일일_가격정보_모음, 에러 := daily_price_data.New종목별_일일_가격정보_모음([]*daily_price_data.S일일_가격정보{s})\n\t\tlib.F확인(에러)\n\n\t\tlib.F확인(종목별_일일_가격정보_모음.DB저장(db))\n\t}\n\n\tlib.F문자열_출력(\"당일 가격정보 수집 완료.\")\n\n\treturn nil\n}\n\nfunc F일개월_일일_가격정보_수집(db *sql.DB, 종목코드_모음 []string) (에러 error) {\n\tvar 시작일, 종료일 time.Time\n\tvar 종목별_일일_가격정보_모음 *daily_price_data.S종목별_일일_가격정보_모음\n\n\tdaily_price_data.F일일_가격정보_테이블_생성(db)\n\n\tfor i, 종목코드 := range 종목코드_모음 {\n\t\tfor {\n\t\t\tif xing.C32_재시작_실행_중.G값() {\n\t\t\t\tlib.F대기(lib.P10초)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\n\t\t종목별_일일_가격정보_모음, 에러 = daily_price_data.New종목별_일일_가격정보_모음_DB읽기(db, 종목코드)\n\t\tlib.F확인(에러)\n\n\t\t\/\/ 시작일 설정. 데이터 수량이 1개이나 100개이나 소요 시간은 비슷함.\n\t\t시작일 = lib.F지금().AddDate(0, 0, -31)\n\n\t\t\/\/ 종료일 설정\n\t\tif lib.F지금().After(xing.F당일().Add(15*lib.P1시간 + lib.P30분)) {\n\t\t\t종료일 = xing.F당일()\n\t\t} else {\n\t\t\t종료일 = xing.F전일()\n\t\t}\n\n\t\tf일일_가격정보_수집_도우미(db, 종목코드, 시작일, 종료일, 종목별_일일_가격정보_모음, i)\n\t}\n\n\treturn nil\n}\n\nfunc F일일_가격정보_수집(db *sql.DB, 종목코드_모음 []string) (에러 error) {\n\tvar 시작일, 종료일, 마지막_저장일 time.Time\n\tvar 종목별_일일_가격정보_모음 *daily_price_data.S종목별_일일_가격정보_모음\n\n\tdaily_price_data.F일일_가격정보_테이블_생성(db)\n\n\tfor i, 종목코드 := range 종목코드_모음 {\n\t\tfor {\n\t\t\tif xing.C32_재시작_실행_중.G값() {\n\t\t\t\tlib.F대기(lib.P10초)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\n\t\t종목별_일일_가격정보_모음, 에러 = daily_price_data.New종목별_일일_가격정보_모음_DB읽기(db, 종목코드)\n\t\tlib.F확인(에러)\n\n\t\t\/\/ 시작일 설정\n\t\t시작일 = lib.F지금().AddDate(-30, 0, 0)\n\t\tif 에러 == nil && len(종목별_일일_가격정보_모음.M저장소) > 0 {\n\t\t\t\/\/ lib.S종목별_일일_가격정보_모음 는 일자 순서로 정렬되어 있음.\n\t\t\t마지막_저장일 = 종목별_일일_가격정보_모음.M저장소[len(종목별_일일_가격정보_모음.M저장소)-1].G일자2()\n\t\t\t시작일 = 마지막_저장일.AddDate(0, 0, 1)\n\t\t}\n\n\t\tif 시작일.After(xing.F당일()) {\n\t\t\tlib.F문자열_출력(\"%v [%v] : 최신 데이터 업데이트.\", i, 종목코드)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ 종료일 설정\n\t\tif lib.F지금().After(xing.F당일().Add(15*lib.P1시간 + lib.P30분)) {\n\t\t\t종료일 = xing.F당일()\n\t\t} else {\n\t\t\t종료일 = xing.F전일()\n\t\t}\n\n\t\tif 시작일.After(종료일) {\n\t\t\tcontinue\n\t\t} else if 시작일.Equal(종료일) { \/\/ 시작일과 종료일이 같으면 수천 개의 데이터를 불러오는 현상이 있음.\n\t\t\t시작일 = 시작일.AddDate(0, 0, -1)\n\t\t}\n\n\t\t\/\/ 데이터 수량이 1개이나 100개이나 소요 시간은 비슷함.\n\t\tif 시작일.After(lib.F금일().AddDate(0, 0, -14)) {\n\t\t\t시작일 = lib.F금일().AddDate(0, 0, -14)\n\t\t}\n\n\t\tf일일_가격정보_수집_도우미(db, 종목코드, 시작일, 종료일, 종목별_일일_가격정보_모음, i)\n\t}\n\n\treturn nil\n}\n\nfunc f일일_가격정보_수집_도우미(db *sql.DB,\n\t종목코드 string, 시작일, 종료일 time.Time,\n\t종목별_일일_가격정보_모음 *daily_price_data.S종목별_일일_가격정보_모음, i int) {\n\t\/\/ 데이터 수집\n\t값_모음, 에러 := xing.TrT8413_현물_차트_일주월(종목코드, 시작일, 종료일, xt.P일주월_일)\n\tif 에러 != nil {\n\t\tlib.F에러_출력(에러)\n\t\treturn\n\t} else if len(값_모음) == 0 {\n\t\tlib.F체크포인트(i, 종목코드, \"추가 저장할 데이터가 없음.\")\n\t\treturn \/\/ 추가 저장할 데이터가 없음.\n\t}\n\n\t일일_가격정보_슬라이스 := make([]*daily_price_data.S일일_가격정보, len(값_모음))\n\n\tfor i, 일일_데이터 := range 값_모음 {\n\t\t일일_가격정보_슬라이스[i] = daily_price_data.New일일_가격정보(\n\t\t\t일일_데이터.M종목코드,\n\t\t\t일일_데이터.M일자,\n\t\t\t일일_데이터.M시가,\n\t\t\t일일_데이터.M고가,\n\t\t\t일일_데이터.M저가,\n\t\t\t일일_데이터.M종가,\n\t\t\t일일_데이터.M거래량)\n\t}\n\n\tlib.F문자열_출력(\"%v %v %v~%v %v개\", i, 종목코드, 시작일.Format(lib.P일자_형식), 종료일.Format(lib.P일자_형식), len(값_모음))\n\n\t종목별_일일_가격정보_모음, 에러 = daily_price_data.New종목별_일일_가격정보_모음(일일_가격정보_슬라이스)\n\tif 에러 != nil {\n\t\tlib.F에러_출력(에러)\n\t\treturn\n\t}\n\n\tlib.F확인(종목별_일일_가격정보_모음.DB저장(db))\n}\n<commit_msg>불필요한 출력 문자열 비활성화<commit_after>package util\n\nimport (\n\t\"github.com\/ghts\/ghts\/lib\"\n\t\"github.com\/ghts\/ghts\/lib\/daily_price_data\"\n\t\"github.com\/ghts\/ghts\/lib\/krx_time\"\n\txt \"github.com\/ghts\/ghts\/xing\/base\"\n\txing \"github.com\/ghts\/ghts\/xing\/go\"\n\n\t\"database\/sql\"\n\t\"time\"\n)\n\nfunc F당일_일일_가격정보_수집(db *sql.DB) (에러 error) {\n\tif krx.F한국증시_동시호가_시간임() || krx.F한국증시_정규_거래_시간임() {\n\t\tlib.F문자열_출력(\"장 중에는 정확한 데이터를 수집할 수 없습니다.\")\n\t\treturn\n\t}\n\n\tdaily_price_data.F일일_가격정보_테이블_생성(db)\n\n\t당일 := lib.F일자2정수(xing.F당일())\n\t현재가_맵, 에러 := xing.TrT8407_현물_멀티_현재가_조회_전종목()\n\tlib.F확인(에러)\n\n\tfor 종목코드, 값 := range 현재가_맵 {\n\t\ts := new(daily_price_data.S일일_가격정보)\n\t\ts.M종목코드 = 종목코드\n\t\ts.M일자 = 당일\n\t\ts.M시가 = float64(값.M시가)\n\t\ts.M고가 = float64(값.M고가)\n\t\ts.M저가 = float64(값.M저가)\n\t\ts.M종가 = float64(값.M현재가)\n\t\ts.M거래량 = float64(값.M누적_거래량)\n\n\t\t종목별_일일_가격정보_모음, 에러 := daily_price_data.New종목별_일일_가격정보_모음([]*daily_price_data.S일일_가격정보{s})\n\t\tlib.F확인(에러)\n\n\t\tlib.F확인(종목별_일일_가격정보_모음.DB저장(db))\n\t}\n\n\tlib.F문자열_출력(\"당일 가격정보 수집 완료.\")\n\n\treturn nil\n}\n\nfunc F일개월_일일_가격정보_수집(db *sql.DB, 종목코드_모음 []string) (에러 error) {\n\tvar 시작일, 종료일 time.Time\n\tvar 종목별_일일_가격정보_모음 *daily_price_data.S종목별_일일_가격정보_모음\n\n\tdaily_price_data.F일일_가격정보_테이블_생성(db)\n\n\tfor i, 종목코드 := range 종목코드_모음 {\n\t\tfor {\n\t\t\tif xing.C32_재시작_실행_중.G값() {\n\t\t\t\tlib.F대기(lib.P10초)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\n\t\t종목별_일일_가격정보_모음, 에러 = daily_price_data.New종목별_일일_가격정보_모음_DB읽기(db, 종목코드)\n\t\tlib.F확인(에러)\n\n\t\t\/\/ 시작일 설정. 데이터 수량이 1개이나 100개이나 소요 시간은 비슷함.\n\t\t시작일 = lib.F지금().AddDate(0, 0, -31)\n\n\t\t\/\/ 종료일 설정\n\t\tif lib.F지금().After(xing.F당일().Add(15*lib.P1시간 + lib.P30분)) {\n\t\t\t종료일 = xing.F당일()\n\t\t} else {\n\t\t\t종료일 = xing.F전일()\n\t\t}\n\n\t\tf일일_가격정보_수집_도우미(db, 종목코드, 시작일, 종료일, 종목별_일일_가격정보_모음, i)\n\t}\n\n\treturn nil\n}\n\nfunc F일일_가격정보_수집(db *sql.DB, 종목코드_모음 []string) (에러 error) {\n\tvar 시작일, 종료일, 마지막_저장일 time.Time\n\tvar 종목별_일일_가격정보_모음 *daily_price_data.S종목별_일일_가격정보_모음\n\n\tdaily_price_data.F일일_가격정보_테이블_생성(db)\n\n\tfor i, 종목코드 := range 종목코드_모음 {\n\t\tfor {\n\t\t\tif xing.C32_재시작_실행_중.G값() {\n\t\t\t\tlib.F대기(lib.P10초)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\n\t\t종목별_일일_가격정보_모음, 에러 = daily_price_data.New종목별_일일_가격정보_모음_DB읽기(db, 종목코드)\n\t\tlib.F확인(에러)\n\n\t\t\/\/ 시작일 설정\n\t\t시작일 = lib.F지금().AddDate(-30, 0, 0)\n\t\tif 에러 == nil && len(종목별_일일_가격정보_모음.M저장소) > 0 {\n\t\t\t\/\/ lib.S종목별_일일_가격정보_모음 는 일자 순서로 정렬되어 있음.\n\t\t\t마지막_저장일 = 종목별_일일_가격정보_모음.M저장소[len(종목별_일일_가격정보_모음.M저장소)-1].G일자2()\n\t\t\t시작일 = 마지막_저장일.AddDate(0, 0, 1)\n\t\t}\n\n\t\tif 시작일.After(xing.F당일()) {\n\t\t\t\/\/lib.F문자열_출력(\"%v [%v] : 최신 데이터 업데이트.\", i, 종목코드)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ 종료일 설정\n\t\tif lib.F지금().After(xing.F당일().Add(15*lib.P1시간 + lib.P30분)) {\n\t\t\t종료일 = xing.F당일()\n\t\t} else {\n\t\t\t종료일 = xing.F전일()\n\t\t}\n\n\t\tif 시작일.After(종료일) {\n\t\t\tcontinue\n\t\t} else if 시작일.Equal(종료일) { \/\/ 시작일과 종료일이 같으면 수천 개의 데이터를 불러오는 현상이 있음.\n\t\t\t시작일 = 시작일.AddDate(0, 0, -1)\n\t\t}\n\n\t\t\/\/ 데이터 수량이 1개이나 100개이나 소요 시간은 비슷함.\n\t\tif 시작일.After(lib.F금일().AddDate(0, 0, -14)) {\n\t\t\t시작일 = lib.F금일().AddDate(0, 0, -14)\n\t\t}\n\n\t\tf일일_가격정보_수집_도우미(db, 종목코드, 시작일, 종료일, 종목별_일일_가격정보_모음, i)\n\t}\n\n\treturn nil\n}\n\nfunc f일일_가격정보_수집_도우미(db *sql.DB,\n\t종목코드 string, 시작일, 종료일 time.Time,\n\t종목별_일일_가격정보_모음 *daily_price_data.S종목별_일일_가격정보_모음, i int) {\n\t\/\/ 데이터 수집\n\t값_모음, 에러 := xing.TrT8413_현물_차트_일주월(종목코드, 시작일, 종료일, xt.P일주월_일)\n\tif 에러 != nil {\n\t\tlib.F에러_출력(에러)\n\t\treturn\n\t} else if len(값_모음) == 0 {\n\t\tlib.F체크포인트(i, 종목코드, \"추가 저장할 데이터가 없음.\")\n\t\treturn \/\/ 추가 저장할 데이터가 없음.\n\t}\n\n\t일일_가격정보_슬라이스 := make([]*daily_price_data.S일일_가격정보, len(값_모음))\n\n\tfor i, 일일_데이터 := range 값_모음 {\n\t\t일일_가격정보_슬라이스[i] = daily_price_data.New일일_가격정보(\n\t\t\t일일_데이터.M종목코드,\n\t\t\t일일_데이터.M일자,\n\t\t\t일일_데이터.M시가,\n\t\t\t일일_데이터.M고가,\n\t\t\t일일_데이터.M저가,\n\t\t\t일일_데이터.M종가,\n\t\t\t일일_데이터.M거래량)\n\t}\n\n\tlib.F문자열_출력(\"%v %v %v~%v %v개\", i, 종목코드, 시작일.Format(lib.P일자_형식), 종료일.Format(lib.P일자_형식), len(값_모음))\n\n\t종목별_일일_가격정보_모음, 에러 = daily_price_data.New종목별_일일_가격정보_모음(일일_가격정보_슬라이스)\n\tif 에러 != nil {\n\t\tlib.F에러_출력(에러)\n\t\treturn\n\t}\n\n\tlib.F확인(종목별_일일_가격정보_모음.DB저장(db))\n}\n<|endoftext|>"} {"text":"<commit_before>package xmmsclient\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n)\n\nfunc serializeInt(i XmmsInt, buffer *bytes.Buffer) error {\n\treturn binary.Write(buffer, binary.BigEndian, i)\n}\n\nfunc serializeString(s []byte, buffer *bytes.Buffer) error {\n\terr := binary.Write(buffer, binary.BigEndian, uint32(len(s)+1))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = binary.Write(buffer, binary.BigEndian, s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = binary.Write(buffer, binary.BigEndian, byte(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc serializeList(l XmmsList, buffer *bytes.Buffer) error {\n\terr := binary.Write(buffer, binary.BigEndian, TypeNone)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = binary.Write(buffer, binary.BigEndian, uint32(len(l)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, entry := range l {\n\t\terr = serializeXmmsValue(entry, buffer)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc serializeDict(dict XmmsDict, buffer *bytes.Buffer) error {\n\terr := binary.Write(buffer, binary.BigEndian, uint32(len(dict)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor k, v := range dict {\n\t\terr = serializeString([]byte(k), buffer)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = serializeXmmsValue(v, buffer)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc serializeXmmsValue(value XmmsValue, buffer *bytes.Buffer) (err error) {\n\tswitch value.(type) {\n\tcase XmmsInt:\n\t\terr = binary.Write(buffer, binary.BigEndian, TypeInt64)\n\t\tif err == nil {\n\t\t\tserializeInt(value.(XmmsInt), buffer)\n\t\t}\n\tcase XmmsString:\n\t\terr = binary.Write(buffer, binary.BigEndian, TypeString)\n\t\tif err == nil {\n\t\t\tserializeString([]byte(value.(XmmsString)), buffer)\n\t\t}\n\tcase XmmsError:\n\t\terr = binary.Write(buffer, binary.BigEndian, TypeError)\n\t\tif err == nil {\n\t\t\tserializeString([]byte(value.(XmmsError)), buffer)\n\t\t}\n\tcase XmmsDict:\n\t\terr = binary.Write(buffer, binary.BigEndian, TypeDict)\n\t\tif err == nil {\n\t\t\tserializeDict(value.(XmmsDict), buffer)\n\t\t}\n\tcase XmmsList:\n\t\terr = binary.Write(buffer, binary.BigEndian, TypeList)\n\t\tif err == nil {\n\t\t\tserializeList(value.(XmmsList), buffer)\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Make it possible to serialize any list.<commit_after>package xmmsclient\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n)\n\ntype listProducer func(buffer *bytes.Buffer) error\n\nfunc serializeInt(i XmmsInt, buffer *bytes.Buffer) error {\n\treturn binary.Write(buffer, binary.BigEndian, i)\n}\n\nfunc serializeString(s []byte, buffer *bytes.Buffer) error {\n\terr := binary.Write(buffer, binary.BigEndian, uint32(len(s)+1))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = binary.Write(buffer, binary.BigEndian, s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = binary.Write(buffer, binary.BigEndian, byte(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc serializeAnyList(buffer *bytes.Buffer, length int, restrict uint32, producer listProducer) error {\n\terr := binary.Write(buffer, binary.BigEndian, restrict)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = binary.Write(buffer, binary.BigEndian, uint32(length))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn producer(buffer)\n}\n\nfunc serializeList(list XmmsList, buffer *bytes.Buffer) error {\n\treturn serializeAnyList(buffer, len(list), TypeNone,\n\t\tfunc(buffer *bytes.Buffer) error {\n\t\t\tfor _, entry := range list {\n\t\t\t\terr := serializeXmmsValue(entry, buffer)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t)\n}\n\nfunc serializeDict(dict XmmsDict, buffer *bytes.Buffer) error {\n\terr := binary.Write(buffer, binary.BigEndian, uint32(len(dict)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor k, v := range dict {\n\t\terr = serializeString([]byte(k), buffer)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = serializeXmmsValue(v, buffer)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc serializeXmmsValue(value XmmsValue, buffer *bytes.Buffer) (err error) {\n\tswitch value.(type) {\n\tcase XmmsInt:\n\t\terr = binary.Write(buffer, binary.BigEndian, TypeInt64)\n\t\tif err == nil {\n\t\t\tserializeInt(value.(XmmsInt), buffer)\n\t\t}\n\tcase XmmsString:\n\t\terr = binary.Write(buffer, binary.BigEndian, TypeString)\n\t\tif err == nil {\n\t\t\tserializeString([]byte(value.(XmmsString)), buffer)\n\t\t}\n\tcase XmmsError:\n\t\terr = binary.Write(buffer, binary.BigEndian, TypeError)\n\t\tif err == nil {\n\t\t\tserializeString([]byte(value.(XmmsError)), buffer)\n\t\t}\n\tcase XmmsDict:\n\t\terr = binary.Write(buffer, binary.BigEndian, TypeDict)\n\t\tif err == nil {\n\t\t\tserializeDict(value.(XmmsDict), buffer)\n\t\t}\n\tcase XmmsList:\n\t\terr = binary.Write(buffer, binary.BigEndian, TypeList)\n\t\tif err == nil {\n\t\t\tserializeList(value.(XmmsList), buffer)\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cache\n\nimport (\n\t\"sort\"\n)\n\ntype SplitSizeChild struct {\n\tMaxSize int\n\tCache Cache\n}\n\n\/\/ A SplitSize contains multiple caches, and sends objects to different\n\/\/ caches depending on the size of the item.\ntype SplitSize []SplitSizeChild\n\nfunc NewSplitSize(children ...SplitSizeChild) SplitSize {\n\ts := SplitSize(children)\n\tsort.Sort(s)\n\treturn s\n}\n\nfunc (c SplitSize) AddChildCache(maxSize int, cache Cache) SplitSize {\n\tc = append(c, SplitSizeChild{maxSize, cache})\n\tsort.Sort(c)\n\treturn c\n}\n\n\/\/ Get an item from the cache, checking all caches.\nfunc (c SplitSize) Get(path string, filler Filler) (o Object, err error) {\n\tfor _, child := range c {\n\t\to, err = child.Cache.Get(path, nil)\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif err != nil {\n\t\to, err = filler.Fill(c, path)\n\t}\n\n\treturn\n}\n\n\/\/ Set adds an object to the appropriate cache for the size of the object.\n\/\/ Note that it is valid for no cache to be large enough.\nfunc (c SplitSize) Set(path string, object Object) error {\n\tobjectSize := len(object.Data)\n\tfor _, child := range c {\n\t\tif objectSize < child.MaxSize {\n\t\t\treturn child.Cache.Set(path, object)\n\t\t}\n\t}\n\n\t\/\/ No cache is large enough to hold this object, but that's ok.\n\treturn nil\n}\n\nfunc (c SplitSize) Del(path string) {\n\tfor _, child := range c {\n\t\tchild.Cache.Del(path)\n\t}\n}\n\nfunc (c SplitSize) Less(i, j int) bool {\n\treturn c[i].MaxSize < c[j].MaxSize\n}\n\nfunc (c SplitSize) Len() int {\n\treturn len(c)\n}\n\nfunc (c SplitSize) Swap(i, j int) {\n\tc[i], c[j] = c[j], c[i]\n}\n<commit_msg>Allow MaxSize of 0 for no limit<commit_after>package cache\n\nimport (\n\t\"sort\"\n)\n\ntype SplitSizeChild struct {\n\tMaxSize int\n\tCache Cache\n}\n\n\/\/ A SplitSize contains multiple caches, and sends objects to different\n\/\/ caches depending on the size of the item.\ntype SplitSize []SplitSizeChild\n\nfunc NewSplitSize(children ...SplitSizeChild) SplitSize {\n\ts := SplitSize(children)\n\tsort.Sort(s)\n\treturn s\n}\n\nfunc (c SplitSize) AddChildCache(maxSize int, cache Cache) SplitSize {\n\tc = append(c, SplitSizeChild{maxSize, cache})\n\tsort.Sort(c)\n\treturn c\n}\n\n\/\/ Get an item from the cache, checking all caches.\nfunc (c SplitSize) Get(path string, filler Filler) (o Object, err error) {\n\tfor _, child := range c {\n\t\to, err = child.Cache.Get(path, nil)\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif err != nil {\n\t\to, err = filler.Fill(c, path)\n\t}\n\n\treturn\n}\n\n\/\/ Set adds an object to the appropriate cache for the size of the object.\n\/\/ Note that it is valid for no cache to be large enough.\nfunc (c SplitSize) Set(path string, object Object) error {\n\tobjectSize := len(object.Data)\n\tfor _, child := range c {\n\t\tif objectSize < child.MaxSize || child.MaxSize == 0 {\n\t\t\treturn child.Cache.Set(path, object)\n\t\t}\n\t}\n\n\t\/\/ No cache is large enough to hold this object, but that's ok.\n\treturn nil\n}\n\nfunc (c SplitSize) Del(path string) {\n\tfor _, child := range c {\n\t\tchild.Cache.Del(path)\n\t}\n}\n\nfunc (c SplitSize) Less(i, j int) bool {\n\treturn c[i].MaxSize < c[j].MaxSize || c[j].MaxSize == 0\n}\n\nfunc (c SplitSize) Len() int {\n\treturn len(c)\n}\n\nfunc (c SplitSize) Swap(i, j int) {\n\tc[i], c[j] = c[j], c[i]\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 M-Lab\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build appengine\n\npackage rtt\n\nimport (\n\t\"appengine\"\n\t\"appengine\/datastore\"\n\t\/\/ \"appengine\/urlfetch\"\n\t\/\/ \"code.google.com\/p\/golog2bq\/log2bq\"\n\t\"code.google.com\/p\/google-api-go-client\/bigquery\/v2\"\n\t\"code.google.com\/p\/mlab-ns2\/gae\/ns\/data\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n)\n\nconst (\n\tMaxDSReadPerQuery = 1000\n\tMaxDSWritePerQuery = 500\n\tMaxBQResponseRows = 50000 \/\/Response size must be less than 32MB. 100k rows occasionally caused problems.\n\tBigQueryBillableProjectID = \"mlab-ns2\"\n)\n\n\/\/ bqQueryFormat is the query used to pull RTT data from the M-Lab BigQuery\n\/\/ dataset.\n\/\/ NOTE: It must be formatted with Table Name, Year, Month, Day, Year, Month,\n\/\/ Day to specify which day the query is being performed for.\n\/\/\n\/\/ The following columns are selected:\n\/\/ - Logged Time (log_time)\n\/\/ - M-Lab Server IP (connection_spec.server_ip)\n\/\/ - Destination IP for traceroute hop, towards client (paris_traceroute_hop.dest_ip)\n\/\/ - Average of RTT in same traceroute and hop\n\/\/\n\/\/ The Query is performed for entries logged on specified days and for cases\n\/\/ where the field paris_traceroute_hop.rtt is not null. (RTT data exists)\n\/\/ The query also excludes RTT to the client due to the variability of the last\n\/\/ hop.\n\/\/\n\/\/ The result is grouped by time and the IPs such that multiple traceroute rtt\n\/\/ entries can be averaged.\n\n\/\/ The result is ordered by server and client IPs to allow for more efficient\n\/\/ traversal of response entries.\nconst bqQueryFormat = `SELECT\n\t\tlog_time,\n\t\tconnection_spec.server_ip,\n\t\tparis_traceroute_hop.dest_ip,\n\t\tAVG(paris_traceroute_hop.rtt) AS rtt\n\tFROM [%s]\n\tWHERE\n\t\tproject = 3 AND\n\t\tlog_time > %d AND\n\t\tlog_time < %d AND\n\t\tlog_time IS NOT NULL AND\n\t\tconnection_spec.server_ip IS NOT NULL AND\n\t\tparis_traceroute_hop.dest_ip IS NOT NULL AND\n\t\tparis_traceroute_hop.rtt IS NOT NULL AND\n\t\tconnection_spec.client_ip != paris_traceroute_hop.dest_ip\n\tGROUP EACH BY\n\t\tlog_time,\n\t\tconnection_spec.server_ip,\n\t\tparis_traceroute_hop.dest_ip;`\n\n\/\/ bqInit authenticates a transport using OAuth and returns a *bigquery.Service\n\/\/ with which to make queries to bigquery.\nfunc bqInit(r *http.Request) (*bigquery.Service, error) {\n\tc := appengine.NewContext(r)\n\n\t\/\/ Get transport from log2bq's utility function GAETransport\n\ttransport, err := log2bq.GAETransport(c, bigquery.BigqueryScope)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Set maximum urlfetch request deadline\n\ttransport.Transport = &urlfetch.Transport{\n\t\tContext: c,\n\t\tDeadline: 10 * time.Minute,\n\t}\n\n\tclient, err := transport.Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tservice, err := bigquery.New(client)\n\treturn service, err\n}\n\n\/\/ BQImportDay queries BigQuery for RTT data from a specific day and stores new\n\/\/ data into datastore\nfunc BQImportDay(r *http.Request, t time.Time) {\n\tc := appengine.NewContext(r)\n\tservice, err := bqInit(r)\n\tif err != nil {\n\t\tc.Errorf(\"rtt: BQImportDay.bqInit: %s\", err)\n\t\treturn\n\t}\n\n\t\/\/ Format strings to insert into bqQueryFormat\n\ttableName := fmt.Sprintf(\"measurement-lab:m_lab.%.4d_%.2d\", t.Year(), t.Month())\n\tdateStr := t.Format(dateFormat)\n\tstartTime, _ := time.Parse(timeFormat, dateStr+\" 00:00:00\")\n\tendTime, _ := time.Parse(timeFormat, dateStr+\" 23:59:59\")\n\n\t\/\/ Construct query\n\tqText := fmt.Sprintf(bqQueryFormat, tableName, startTime.Unix(), endTime.Unix())\n\tq := &bigquery.QueryRequest{\n\t\tQuery: qText,\n\t\tMaxResults: MaxBQResponseRows,\n\t\tTimeoutMs: 600000,\n\t\tUseQueryCache: true,\n\t}\n\tc.Debugf(\"rtt: BQImportDay.qText (%s): %s\", dateStr, qText)\n\n\t\/\/ Make first query to BigQuery\n\tjobsService := bigquery.NewJobsService(service)\n\tqueryCall := jobsService.Query(BigQueryBillableProjectID, q)\n\tresponse, err := queryCall.Do()\n\tif err != nil {\n\t\tc.Errorf(\"rtt: BQImportDay.bigquery.JobsService.Query: %s\", err)\n\t\treturn\n\t}\n\tc.Infof(\"rtt: Received %d rows in query response (Total: %d rows).\", len(response.Rows), response.TotalRows)\n\n\tnewCGs := make(map[string]*ClientGroup)\n\tsliverTools, err := data.GetSliverTools(c)\n\tif err != nil {\n\t\tc.Errorf(\"rtt: BQImportDay.data.GetSliverTools: %s\", err)\n\t}\n\tsliverIPMap := makeMapIPStrToSiteID(sliverTools)\n\tsliverTools = nil\n\tbqProcessQuery(response.Rows, sliverIPMap, newCGs)\n\n\t\/\/ Cache details from response to use in subsequent requests if any.\n\tprojID := response.JobReference.ProjectId\n\tjobID := response.JobReference.JobId\n\tpageToken := response.PageToken\n\tn := len(response.Rows)\n\ttotalN := int(response.TotalRows)\n\tresponse = nil\n\n\t\/\/ Request for more results if not all results returned.\n\tif n < totalN {\n\t\t\/\/ Make further requests\n\t\tgetQueryResultsCall := jobsService.GetQueryResults(projID, jobID)\n\t\tvar respMore *bigquery.GetQueryResultsResponse\n\n\t\tfor n < totalN { \/\/ Make requests until total number of rows queried.\n\t\t\tgetQueryResultsCall.MaxResults(MaxBQResponseRows)\n\t\t\tgetQueryResultsCall.PageToken(pageToken)\n\n\t\t\trespMore, err = getQueryResultsCall.Do()\n\t\t\tif err != nil {\n\t\t\t\tc.Errorf(\"rtt: BQImportDay.bigquery.JobsGetQueryResponseCall: %s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpageToken = respMore.PageToken \/\/ Update pageToken to get next page.\n\n\t\t\tn += len(respMore.Rows)\n\t\t\tc.Infof(\"rtt: Received %d additional rows. (Total: %d rows)\", len(respMore.Rows), n)\n\n\t\t\tbqProcessQuery(respMore.Rows, sliverIPMap, newCGs)\n\t\t\trespMore = nil\n\t\t}\n\t}\n\n\tc.Infof(\"rtt: Reduced %d rows to %d rows. Merging into datastore.\", totalN, len(newCGs))\n\n\tbqMergeWithDatastore(c, newCGs)\n}\n\n\/\/ bqProcessQuery processes the output of the BigQuery query performed in\n\/\/ BQImport and parses the response into data structures.\nfunc bqProcessQuery(resp []*bigquery.TableRow, sliverIPMap map[string]string, newCGs map[string]*ClientGroup) {\n\trows := simplifyBQResponse(resp)\n\tbqMergeIntoClientGroups(rows, sliverIPMap, newCGs)\n}\n\n\/\/ dsWriteChunk is a structure with which new ClientGroup lists can be split\n\/\/ into lengths <= MaxDSWritePerQuery such that datastore.PutMulti works.\ntype dsReadChunk struct {\n\tKeys []*datastore.Key\n\tCGs []*ClientGroup\n}\n\n\/\/ bqMergeWithDatastore takes a list of ClientGroup generated by bqProcessQuery\n\/\/ and merges the new data with existing data in datastore\n\/\/ TODO(gavaletz): Evaluate whether this func could be split into smaller funcs,\n\/\/ also consider whether it's fine to have 3 local funcs.\nfunc bqMergeWithDatastore(c appengine.Context, newCGs map[string]*ClientGroup) {\n\t\/\/ Divide GetMulti and PutMulti operations into MaxDSReadPerQuery sized\n\t\/\/ operations to adhere with GAE limits.\n\tchunks := make([]*dsReadChunk, 0)\n\tvar thisChunk *dsReadChunk\n\t\/\/ newChunk creates a new dsReadChunk as necessary\n\tnewChunk := func() {\n\t\tthisChunk = &dsReadChunk{\n\t\t\tKeys: make([]*datastore.Key, 0, MaxDSReadPerQuery),\n\t\t\tCGs: make([]*ClientGroup, 0, MaxDSReadPerQuery),\n\t\t}\n\t\tchunks = append(chunks, thisChunk)\n\t}\n\tnewChunk() \/\/ Create initial dsReadChunk\n\n\trttKey := datastore.NewKey(c, \"string\", \"rtt\", 0, nil) \/\/ Parent key for ClientGroup entities\n\tfor cgStr, cg := range newCGs {\n\t\t\/\/ Add into chunk\n\t\tthisChunk.Keys = append(thisChunk.Keys, datastore.NewKey(c, \"ClientGroup\", cgStr, 0, rttKey))\n\t\tthisChunk.CGs = append(thisChunk.CGs, cg)\n\n\t\t\/\/ Make sure read chunks are only as large as MaxDSReadPerQuery.\n\t\t\/\/ Create new chunk if size reached.\n\t\tif len(thisChunk.CGs) == MaxDSReadPerQuery {\n\t\t\tnewChunk()\n\t\t}\n\t}\n\n\tvar oldCGs []ClientGroup\n\tvar merr appengine.MultiError\n\tvar err error\n\n\tkeysToPut := make([]*datastore.Key, 0, MaxDSWritePerQuery)\n\tcgsToPut := make([]ClientGroup, 0, MaxDSWritePerQuery)\n\ttotalPutN := 0\n\n\t\/\/ processPutQueue processes a queue of newly updated ClientGroups. This is\n\t\/\/ done so that MaxDSWritePerQuery no. of Puts can be done to reduce the\n\t\/\/ number of queries to datastore and therefore the time taken to Put all\n\t\/\/ changes to datastore.\n\tprocessPutQueue := func() {\n\t\ttotalPutN += len(cgsToPut)\n\t\tc.Infof(\"rtt: Putting %v records into datastore. (Total: %d rows)\", len(cgsToPut), totalPutN)\n\n\t\t_, err = datastore.PutMulti(c, keysToPut, cgsToPut)\n\t\tif err != nil {\n\t\t\tc.Errorf(\"rtt: bqMergeWithDatastore.datastore.PutMulti: %s\", err)\n\t\t}\n\t\tkeysToPut = make([]*datastore.Key, 0, MaxDSWritePerQuery)\n\t\tcgsToPut = make([]ClientGroup, 0, MaxDSWritePerQuery)\n\t}\n\n\t\/\/ processPutQueue places a newly updated ClientGroup in a queue. This queue\n\t\/\/ is later processed by processPutQueue.\n\tqueueToPut := func(idx int, keys []*datastore.Key) {\n\t\tkeysToPut = append(keysToPut, keys[idx])\n\t\tcgsToPut = append(cgsToPut, oldCGs[idx])\n\n\t\t\/\/ Write in MaxDSWritePerQuery chunks to adhere with GAE limits.\n\t\tif len(keysToPut) == MaxDSWritePerQuery {\n\t\t\tprocessPutQueue()\n\t\t}\n\t}\n\n\t\/\/ Process chunk by chunk\n\tfor _, chunk := range chunks {\n\t\toldCGs = make([]ClientGroup, len(chunk.CGs))\n\t\terr = datastore.GetMulti(c, chunk.Keys, oldCGs) \/\/ Get existing ClientGroup data\n\n\t\tswitch err.(type) {\n\t\tcase appengine.MultiError: \/\/ Multiple errors, deal with individually\n\t\t\tmerr = err.(appengine.MultiError)\n\t\t\tfor i, e := range merr { \/\/ Range over errors\n\t\t\t\tswitch e {\n\t\t\t\tcase datastore.ErrNoSuchEntity: \/\/ New entry\n\t\t\t\t\toldCGs[i] = *chunk.CGs[i]\n\t\t\t\t\tqueueToPut(i, chunk.Keys)\n\t\t\t\tcase nil: \/\/ Entry exists, merge new data with old data\n\t\t\t\t\t\/\/ If for some reason data is corrupted and nil is returned\n\t\t\t\t\tif oldCGs[i].SiteRTTs == nil {\n\t\t\t\t\t\toldCGs[i] = *chunk.CGs[i]\n\t\t\t\t\t\tqueueToPut(i, chunk.Keys)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tchanged, err := MergeClientGroups(&oldCGs[i], chunk.CGs[i])\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tc.Errorf(\"rtt: bqMergeWithDatastore.MergeClientGroups: %s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif changed {\n\t\t\t\t\t\t\tqueueToPut(i, chunk.Keys)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tdefault: \/\/ Other unknown error\n\t\t\t\t\tc.Errorf(\"rtt: bqMergeWithDatastore.datastore.GetMulti: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\tcase nil: \/\/ No errors, data exists so merge with old data\n\t\t\tfor i, _ := range oldCGs {\n\t\t\t\tchanged, err := MergeClientGroups(&oldCGs[i], chunk.CGs[i])\n\t\t\t\tif err != nil {\n\t\t\t\t\tc.Errorf(\"rtt: bqMergeWithDatastore.MergeClientGroups: %s\", err)\n\t\t\t\t}\n\t\t\t\tif changed {\n\t\t\t\t\tqueueToPut(i, chunk.Keys)\n\t\t\t\t}\n\t\t\t}\n\t\tdefault: \/\/ Other unknown errors from GetMulti\n\t\t\tc.Errorf(\"rtt: bqMergeWithDatastore.datastore.GetMulti: %s\", err)\n\t\t}\n\t}\n\n\t\/\/ Process remaining Put operations.\n\tif len(keysToPut) > 0 {\n\t\tprocessPutQueue()\n\t}\n\n\tc.Infof(\"rtt: Completed merging in %d rows from BigQuery.\", len(newCGs))\n}\n<commit_msg>rtt: Enable imports to fix e4006b744b<commit_after>\/\/ Copyright 2013 M-Lab\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build appengine\n\npackage rtt\n\nimport (\n\t\"appengine\"\n\t\"appengine\/datastore\"\n\t\"appengine\/urlfetch\"\n\t\"code.google.com\/p\/golog2bq\/log2bq\"\n\t\"code.google.com\/p\/google-api-go-client\/bigquery\/v2\"\n\t\"code.google.com\/p\/mlab-ns2\/gae\/ns\/data\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n)\n\nconst (\n\tMaxDSReadPerQuery = 1000\n\tMaxDSWritePerQuery = 500\n\tMaxBQResponseRows = 50000 \/\/Response size must be less than 32MB. 100k rows occasionally caused problems.\n\tBigQueryBillableProjectID = \"mlab-ns2\"\n)\n\n\/\/ bqQueryFormat is the query used to pull RTT data from the M-Lab BigQuery\n\/\/ dataset.\n\/\/ NOTE: It must be formatted with Table Name, Year, Month, Day, Year, Month,\n\/\/ Day to specify which day the query is being performed for.\n\/\/\n\/\/ The following columns are selected:\n\/\/ - Logged Time (log_time)\n\/\/ - M-Lab Server IP (connection_spec.server_ip)\n\/\/ - Destination IP for traceroute hop, towards client (paris_traceroute_hop.dest_ip)\n\/\/ - Average of RTT in same traceroute and hop\n\/\/\n\/\/ The Query is performed for entries logged on specified days and for cases\n\/\/ where the field paris_traceroute_hop.rtt is not null. (RTT data exists)\n\/\/ The query also excludes RTT to the client due to the variability of the last\n\/\/ hop.\n\/\/\n\/\/ The result is grouped by time and the IPs such that multiple traceroute rtt\n\/\/ entries can be averaged.\n\n\/\/ The result is ordered by server and client IPs to allow for more efficient\n\/\/ traversal of response entries.\nconst bqQueryFormat = `SELECT\n\t\tlog_time,\n\t\tconnection_spec.server_ip,\n\t\tparis_traceroute_hop.dest_ip,\n\t\tAVG(paris_traceroute_hop.rtt) AS rtt\n\tFROM [%s]\n\tWHERE\n\t\tproject = 3 AND\n\t\tlog_time > %d AND\n\t\tlog_time < %d AND\n\t\tlog_time IS NOT NULL AND\n\t\tconnection_spec.server_ip IS NOT NULL AND\n\t\tparis_traceroute_hop.dest_ip IS NOT NULL AND\n\t\tparis_traceroute_hop.rtt IS NOT NULL AND\n\t\tconnection_spec.client_ip != paris_traceroute_hop.dest_ip\n\tGROUP EACH BY\n\t\tlog_time,\n\t\tconnection_spec.server_ip,\n\t\tparis_traceroute_hop.dest_ip;`\n\n\/\/ bqInit authenticates a transport using OAuth and returns a *bigquery.Service\n\/\/ with which to make queries to bigquery.\nfunc bqInit(r *http.Request) (*bigquery.Service, error) {\n\tc := appengine.NewContext(r)\n\n\t\/\/ Get transport from log2bq's utility function GAETransport\n\ttransport, err := log2bq.GAETransport(c, bigquery.BigqueryScope)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Set maximum urlfetch request deadline\n\ttransport.Transport = &urlfetch.Transport{\n\t\tContext: c,\n\t\tDeadline: 10 * time.Minute,\n\t}\n\n\tclient, err := transport.Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tservice, err := bigquery.New(client)\n\treturn service, err\n}\n\n\/\/ BQImportDay queries BigQuery for RTT data from a specific day and stores new\n\/\/ data into datastore\nfunc BQImportDay(r *http.Request, t time.Time) {\n\tc := appengine.NewContext(r)\n\tservice, err := bqInit(r)\n\tif err != nil {\n\t\tc.Errorf(\"rtt: BQImportDay.bqInit: %s\", err)\n\t\treturn\n\t}\n\n\t\/\/ Format strings to insert into bqQueryFormat\n\ttableName := fmt.Sprintf(\"measurement-lab:m_lab.%.4d_%.2d\", t.Year(), t.Month())\n\tdateStr := t.Format(dateFormat)\n\tstartTime, _ := time.Parse(timeFormat, dateStr+\" 00:00:00\")\n\tendTime, _ := time.Parse(timeFormat, dateStr+\" 23:59:59\")\n\n\t\/\/ Construct query\n\tqText := fmt.Sprintf(bqQueryFormat, tableName, startTime.Unix(), endTime.Unix())\n\tq := &bigquery.QueryRequest{\n\t\tQuery: qText,\n\t\tMaxResults: MaxBQResponseRows,\n\t\tTimeoutMs: 600000,\n\t\tUseQueryCache: true,\n\t}\n\tc.Debugf(\"rtt: BQImportDay.qText (%s): %s\", dateStr, qText)\n\n\t\/\/ Make first query to BigQuery\n\tjobsService := bigquery.NewJobsService(service)\n\tqueryCall := jobsService.Query(BigQueryBillableProjectID, q)\n\tresponse, err := queryCall.Do()\n\tif err != nil {\n\t\tc.Errorf(\"rtt: BQImportDay.bigquery.JobsService.Query: %s\", err)\n\t\treturn\n\t}\n\tc.Infof(\"rtt: Received %d rows in query response (Total: %d rows).\", len(response.Rows), response.TotalRows)\n\n\tnewCGs := make(map[string]*ClientGroup)\n\tsliverTools, err := data.GetSliverTools(c)\n\tif err != nil {\n\t\tc.Errorf(\"rtt: BQImportDay.data.GetSliverTools: %s\", err)\n\t}\n\tsliverIPMap := makeMapIPStrToSiteID(sliverTools)\n\tsliverTools = nil\n\tbqProcessQuery(response.Rows, sliverIPMap, newCGs)\n\n\t\/\/ Cache details from response to use in subsequent requests if any.\n\tprojID := response.JobReference.ProjectId\n\tjobID := response.JobReference.JobId\n\tpageToken := response.PageToken\n\tn := len(response.Rows)\n\ttotalN := int(response.TotalRows)\n\tresponse = nil\n\n\t\/\/ Request for more results if not all results returned.\n\tif n < totalN {\n\t\t\/\/ Make further requests\n\t\tgetQueryResultsCall := jobsService.GetQueryResults(projID, jobID)\n\t\tvar respMore *bigquery.GetQueryResultsResponse\n\n\t\tfor n < totalN { \/\/ Make requests until total number of rows queried.\n\t\t\tgetQueryResultsCall.MaxResults(MaxBQResponseRows)\n\t\t\tgetQueryResultsCall.PageToken(pageToken)\n\n\t\t\trespMore, err = getQueryResultsCall.Do()\n\t\t\tif err != nil {\n\t\t\t\tc.Errorf(\"rtt: BQImportDay.bigquery.JobsGetQueryResponseCall: %s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpageToken = respMore.PageToken \/\/ Update pageToken to get next page.\n\n\t\t\tn += len(respMore.Rows)\n\t\t\tc.Infof(\"rtt: Received %d additional rows. (Total: %d rows)\", len(respMore.Rows), n)\n\n\t\t\tbqProcessQuery(respMore.Rows, sliverIPMap, newCGs)\n\t\t\trespMore = nil\n\t\t}\n\t}\n\n\tc.Infof(\"rtt: Reduced %d rows to %d rows. Merging into datastore.\", totalN, len(newCGs))\n\n\tbqMergeWithDatastore(c, newCGs)\n}\n\n\/\/ bqProcessQuery processes the output of the BigQuery query performed in\n\/\/ BQImport and parses the response into data structures.\nfunc bqProcessQuery(resp []*bigquery.TableRow, sliverIPMap map[string]string, newCGs map[string]*ClientGroup) {\n\trows := simplifyBQResponse(resp)\n\tbqMergeIntoClientGroups(rows, sliverIPMap, newCGs)\n}\n\n\/\/ dsWriteChunk is a structure with which new ClientGroup lists can be split\n\/\/ into lengths <= MaxDSWritePerQuery such that datastore.PutMulti works.\ntype dsReadChunk struct {\n\tKeys []*datastore.Key\n\tCGs []*ClientGroup\n}\n\n\/\/ bqMergeWithDatastore takes a list of ClientGroup generated by bqProcessQuery\n\/\/ and merges the new data with existing data in datastore\n\/\/ TODO(gavaletz): Evaluate whether this func could be split into smaller funcs,\n\/\/ also consider whether it's fine to have 3 local funcs.\nfunc bqMergeWithDatastore(c appengine.Context, newCGs map[string]*ClientGroup) {\n\t\/\/ Divide GetMulti and PutMulti operations into MaxDSReadPerQuery sized\n\t\/\/ operations to adhere with GAE limits.\n\tchunks := make([]*dsReadChunk, 0)\n\tvar thisChunk *dsReadChunk\n\t\/\/ newChunk creates a new dsReadChunk as necessary\n\tnewChunk := func() {\n\t\tthisChunk = &dsReadChunk{\n\t\t\tKeys: make([]*datastore.Key, 0, MaxDSReadPerQuery),\n\t\t\tCGs: make([]*ClientGroup, 0, MaxDSReadPerQuery),\n\t\t}\n\t\tchunks = append(chunks, thisChunk)\n\t}\n\tnewChunk() \/\/ Create initial dsReadChunk\n\n\trttKey := datastore.NewKey(c, \"string\", \"rtt\", 0, nil) \/\/ Parent key for ClientGroup entities\n\tfor cgStr, cg := range newCGs {\n\t\t\/\/ Add into chunk\n\t\tthisChunk.Keys = append(thisChunk.Keys, datastore.NewKey(c, \"ClientGroup\", cgStr, 0, rttKey))\n\t\tthisChunk.CGs = append(thisChunk.CGs, cg)\n\n\t\t\/\/ Make sure read chunks are only as large as MaxDSReadPerQuery.\n\t\t\/\/ Create new chunk if size reached.\n\t\tif len(thisChunk.CGs) == MaxDSReadPerQuery {\n\t\t\tnewChunk()\n\t\t}\n\t}\n\n\tvar oldCGs []ClientGroup\n\tvar merr appengine.MultiError\n\tvar err error\n\n\tkeysToPut := make([]*datastore.Key, 0, MaxDSWritePerQuery)\n\tcgsToPut := make([]ClientGroup, 0, MaxDSWritePerQuery)\n\ttotalPutN := 0\n\n\t\/\/ processPutQueue processes a queue of newly updated ClientGroups. This is\n\t\/\/ done so that MaxDSWritePerQuery no. of Puts can be done to reduce the\n\t\/\/ number of queries to datastore and therefore the time taken to Put all\n\t\/\/ changes to datastore.\n\tprocessPutQueue := func() {\n\t\ttotalPutN += len(cgsToPut)\n\t\tc.Infof(\"rtt: Putting %v records into datastore. (Total: %d rows)\", len(cgsToPut), totalPutN)\n\n\t\t_, err = datastore.PutMulti(c, keysToPut, cgsToPut)\n\t\tif err != nil {\n\t\t\tc.Errorf(\"rtt: bqMergeWithDatastore.datastore.PutMulti: %s\", err)\n\t\t}\n\t\tkeysToPut = make([]*datastore.Key, 0, MaxDSWritePerQuery)\n\t\tcgsToPut = make([]ClientGroup, 0, MaxDSWritePerQuery)\n\t}\n\n\t\/\/ processPutQueue places a newly updated ClientGroup in a queue. This queue\n\t\/\/ is later processed by processPutQueue.\n\tqueueToPut := func(idx int, keys []*datastore.Key) {\n\t\tkeysToPut = append(keysToPut, keys[idx])\n\t\tcgsToPut = append(cgsToPut, oldCGs[idx])\n\n\t\t\/\/ Write in MaxDSWritePerQuery chunks to adhere with GAE limits.\n\t\tif len(keysToPut) == MaxDSWritePerQuery {\n\t\t\tprocessPutQueue()\n\t\t}\n\t}\n\n\t\/\/ Process chunk by chunk\n\tfor _, chunk := range chunks {\n\t\toldCGs = make([]ClientGroup, len(chunk.CGs))\n\t\terr = datastore.GetMulti(c, chunk.Keys, oldCGs) \/\/ Get existing ClientGroup data\n\n\t\tswitch err.(type) {\n\t\tcase appengine.MultiError: \/\/ Multiple errors, deal with individually\n\t\t\tmerr = err.(appengine.MultiError)\n\t\t\tfor i, e := range merr { \/\/ Range over errors\n\t\t\t\tswitch e {\n\t\t\t\tcase datastore.ErrNoSuchEntity: \/\/ New entry\n\t\t\t\t\toldCGs[i] = *chunk.CGs[i]\n\t\t\t\t\tqueueToPut(i, chunk.Keys)\n\t\t\t\tcase nil: \/\/ Entry exists, merge new data with old data\n\t\t\t\t\t\/\/ If for some reason data is corrupted and nil is returned\n\t\t\t\t\tif oldCGs[i].SiteRTTs == nil {\n\t\t\t\t\t\toldCGs[i] = *chunk.CGs[i]\n\t\t\t\t\t\tqueueToPut(i, chunk.Keys)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tchanged, err := MergeClientGroups(&oldCGs[i], chunk.CGs[i])\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tc.Errorf(\"rtt: bqMergeWithDatastore.MergeClientGroups: %s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif changed {\n\t\t\t\t\t\t\tqueueToPut(i, chunk.Keys)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tdefault: \/\/ Other unknown error\n\t\t\t\t\tc.Errorf(\"rtt: bqMergeWithDatastore.datastore.GetMulti: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\tcase nil: \/\/ No errors, data exists so merge with old data\n\t\t\tfor i, _ := range oldCGs {\n\t\t\t\tchanged, err := MergeClientGroups(&oldCGs[i], chunk.CGs[i])\n\t\t\t\tif err != nil {\n\t\t\t\t\tc.Errorf(\"rtt: bqMergeWithDatastore.MergeClientGroups: %s\", err)\n\t\t\t\t}\n\t\t\t\tif changed {\n\t\t\t\t\tqueueToPut(i, chunk.Keys)\n\t\t\t\t}\n\t\t\t}\n\t\tdefault: \/\/ Other unknown errors from GetMulti\n\t\t\tc.Errorf(\"rtt: bqMergeWithDatastore.datastore.GetMulti: %s\", err)\n\t\t}\n\t}\n\n\t\/\/ Process remaining Put operations.\n\tif len(keysToPut) > 0 {\n\t\tprocessPutQueue()\n\t}\n\n\tc.Infof(\"rtt: Completed merging in %d rows from BigQuery.\", len(newCGs))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014, The Serviced Authors. All rights reserved.\n\/\/ Use of this source code is governed by a\n\/\/ license that can be found in the LICENSE file.\n\npackage web\n\nimport (\n\t\"github.com\/zenoss\/glog\"\n\t\"github.com\/zenoss\/go-json-rest\"\n\t\"github.com\/zenoss\/serviced\/domain\"\n\t\"github.com\/zenoss\/serviced\/domain\/host\"\n\t\"github.com\/zenoss\/serviced\/rpc\/agent\"\n\n\t\"net\/url\"\n\t\"strings\"\n)\n\n\/\/restGetHosts gets all hosts. Response is map[host-id]host.Host\nfunc restGetHosts(w *rest.ResponseWriter, r *rest.Request, ctx *requestContext) {\n\tresponse := make(map[string]*host.Host)\n\tclient, err := ctx.getMasterClient()\n\tif err != nil {\n\t\trestServerError(w)\n\t\treturn\n\t}\n\n\thosts, err := client.GetHosts()\n\tif err != nil {\n\t\tglog.Errorf(\"Could not get hosts: %v\", err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\tglog.V(2).Infof(\"Returning %d hosts\", len(hosts))\n\tfor _, host := range hosts {\n\t\tresponse[host.ID] = host\n\t\terr = buildHostMonitoringProfile(host)\n\t\tif err != nil {\n\t\t\trestServerError(w)\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.WriteJson(&response)\n}\n\n\/\/restGetHost retrieves a host. Response is Host\nfunc restGetHost(w *rest.ResponseWriter, r *rest.Request, ctx *requestContext) {\n\thostID, err := url.QueryUnescape(r.PathParam(\"hostId\"))\n\tif err != nil {\n\t\trestBadRequest(w)\n\t\treturn\n\t}\n\n\tclient, err := ctx.getMasterClient()\n\tif err != nil {\n\t\trestServerError(w)\n\t\treturn\n\t}\n\n\thost, err := client.GetHost(hostID)\n\tif err != nil {\n\t\tglog.Error(\"Could not get host: \", err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\n\terr = buildHostMonitoringProfile(host)\n\tif err != nil {\n\t\trestServerError(w)\n\t\treturn\n\t}\n\n\tglog.V(4).Infof(\"restGetHost: id %s, host %#v\", hostID, host)\n\tw.WriteJson(&host)\n}\n\n\/\/restAddHost adds a Host. Request input is host.Host\nfunc restAddHost(w *rest.ResponseWriter, r *rest.Request, ctx *requestContext) {\n\tvar payload host.Host\n\terr := r.DecodeJsonPayload(&payload)\n\tif err != nil {\n\t\tglog.V(1).Infof(\"Could not decode host payload: %v\", err)\n\t\trestBadRequest(w)\n\t\treturn\n\t}\n\t\/\/ Save the pool ID and IP address for later. GetInfo wipes these\n\tipAddr := payload.IPAddr\n\tparts := strings.Split(ipAddr, \":\")\n\thostIP := parts[0]\n\n\tagentClient, err := agent.NewClient(payload.IPAddr)\n\t\/\/\tremoteClient, err := serviced.NewAgentClient(payload.IPAddr)\n\tif err != nil {\n\t\tglog.Errorf(\"Could not create connection to host %s: %v\", payload.IPAddr, err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\n\tIPs := []string{}\n\tfor _, ip := range payload.IPs {\n\t\tIPs = append(IPs, ip.IPAddress)\n\t}\n\tbuildRequest := agent.BuildHostRequest{\n\t\tIP: hostIP,\n\t\tPoolID: payload.PoolID,\n\t}\n\thost, err := agentClient.BuildHost(buildRequest)\n\tif err != nil {\n\t\tglog.Errorf(\"Unable to get remote host info: %v\", err)\n\t\trestBadRequest(w)\n\t\treturn\n\t}\n\tmasterClient, err := ctx.getMasterClient()\n\tif err != nil {\n\t\tglog.Errorf(\"Unable to add host: %v\", err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\terr = masterClient.AddHost(*host)\n\tif err != nil {\n\t\tglog.Errorf(\"Unable to add host: %v\", err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\tglog.V(0).Info(\"Added host \", host.ID)\n\tw.WriteJson(&simpleResponse{\"Added host\", hostLinks(host.ID)})\n}\n\n\/\/restUpdateHost updates a host. Request input is host.Host\nfunc restUpdateHost(w *rest.ResponseWriter, r *rest.Request, ctx *requestContext) {\n\thostID, err := url.QueryUnescape(r.PathParam(\"hostId\"))\n\tif err != nil {\n\t\trestBadRequest(w)\n\t\treturn\n\t}\n\tglog.V(3).Infof(\"Received update request for %s\", hostID)\n\tvar payload host.Host\n\terr = r.DecodeJsonPayload(&payload)\n\tif err != nil {\n\t\tglog.V(1).Infof(\"Could not decode host payload: %v\", err)\n\t\trestBadRequest(w)\n\t\treturn\n\t}\n\n\tmasterClient, err := ctx.getMasterClient()\n\tif err != nil {\n\t\tglog.Errorf(\"Unable to add host: %v\", err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\terr = masterClient.UpdateHost(payload)\n\tif err != nil {\n\t\tglog.Errorf(\"Unable to update host: %v\", err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\tglog.V(1).Info(\"Updated host \", hostID)\n\tw.WriteJson(&simpleResponse{\"Updated host\", hostLinks(hostID)})\n}\n\n\/\/restRemoveHost removes a host using host-id\nfunc restRemoveHost(w *rest.ResponseWriter, r *rest.Request, ctx *requestContext) {\n\thostID, err := url.QueryUnescape(r.PathParam(\"hostId\"))\n\tif err != nil {\n\t\trestBadRequest(w)\n\t\treturn\n\t}\n\n\tmasterClient, err := ctx.getMasterClient()\n\tif err != nil {\n\t\tglog.Errorf(\"Unable to add host: %v\", err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\terr = masterClient.RemoveHost(hostID)\n\tif err != nil {\n\t\tglog.Errorf(\"Could not remove host: %v\", err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\tglog.V(0).Info(\"Removed host \", hostID)\n\tw.WriteJson(&simpleResponse{\"Removed host\", hostsLinks()})\n}\n\ntype metric struct {\n\tID string\n\tName string\n\tDescription string\n}\n\nvar (\n\tmetrics = []metric{\n\t\tmetric{\n\t\t\t\"CpuacctStat.system\",\n\t\t\t\"CPU System\",\n\t\t\t\"System CPU Usage\",\n\t\t},\n\t\tmetric{\n\t\t\t\"CpuacctStat.user\",\n\t\t\t\"CPU User\",\n\t\t\t\"User CPU Usage\",\n\t\t},\n\t\tmetric{\n\t\t\t\"MemoryStat.pgfault\",\n\t\t\t\"Memory Page Fault\",\n\t\t\t\"Page Fault Stats\",\n\t\t},\n\t\tmetric{\n\t\t\t\"MemoryStat.rss\",\n\t\t\t\"Resident Memory\",\n\t\t\t\"Resident Memory Usage\",\n\t\t},\n\t}\n)\n\nfunc buildHostMonitoringProfile(host *host.Host) error {\n\thost.MonitoringProfile = domain.MonitorProfile{\n\t\tMetrics: make([]domain.MetricConfig, len(metrics)),\n\t}\n\n\tbuild, err := domain.NewMetricConfigBuilder(\"\/metrics\/api\/performance\/query\", \"POST\")\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to create metric builder: %s\", err)\n\t\treturn err\n\t}\n\n\tfor i := range metrics {\n\t\tm := &metrics[i]\n\t\tbuild.Metric(m.ID, m.Name).SetTag(\"controlplane_host_id\", host.ID)\n\t\tconfig, err := build.Config(m.ID, m.Name, m.Description, \"1h-ago\")\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Failed to build metric: %s\", err)\n\t\t}\n\t\thost.MonitoringProfile.Metrics[i] = *config\n\t}\n\n\treturn nil\n}\n<commit_msg>Response to code review<commit_after>\/\/ Copyright 2014, The Serviced Authors. All rights reserved.\n\/\/ Use of this source code is governed by a\n\/\/ license that can be found in the LICENSE file.\n\npackage web\n\nimport (\n\t\"github.com\/zenoss\/glog\"\n\t\"github.com\/zenoss\/go-json-rest\"\n\t\"github.com\/zenoss\/serviced\/domain\"\n\t\"github.com\/zenoss\/serviced\/domain\/host\"\n\t\"github.com\/zenoss\/serviced\/rpc\/agent\"\n\n\t\"net\/url\"\n\t\"strings\"\n)\n\n\/\/restGetHosts gets all hosts. Response is map[host-id]host.Host\nfunc restGetHosts(w *rest.ResponseWriter, r *rest.Request, ctx *requestContext) {\n\tresponse := make(map[string]*host.Host)\n\tclient, err := ctx.getMasterClient()\n\tif err != nil {\n\t\trestServerError(w)\n\t\treturn\n\t}\n\n\thosts, err := client.GetHosts()\n\tif err != nil {\n\t\tglog.Errorf(\"Could not get hosts: %v\", err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\tglog.V(2).Infof(\"Returning %d hosts\", len(hosts))\n\tfor _, host := range hosts {\n\t\tresponse[host.ID] = host\n\t\tif err := buildHostMonitoringProfile(host); err != nil {\n\t\t\trestServerError(w)\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.WriteJson(&response)\n}\n\n\/\/restGetHost retrieves a host. Response is Host\nfunc restGetHost(w *rest.ResponseWriter, r *rest.Request, ctx *requestContext) {\n\thostID, err := url.QueryUnescape(r.PathParam(\"hostId\"))\n\tif err != nil {\n\t\trestBadRequest(w)\n\t\treturn\n\t}\n\n\tclient, err := ctx.getMasterClient()\n\tif err != nil {\n\t\trestServerError(w)\n\t\treturn\n\t}\n\n\thost, err := client.GetHost(hostID)\n\tif err != nil {\n\t\tglog.Error(\"Could not get host: \", err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\n\tif err := buildHostMonitoringProfile(host); err != nil {\n\t\trestServerError(w)\n\t\treturn\n\t}\n\n\tglog.V(4).Infof(\"restGetHost: id %s, host %#v\", hostID, host)\n\tw.WriteJson(&host)\n}\n\n\/\/restAddHost adds a Host. Request input is host.Host\nfunc restAddHost(w *rest.ResponseWriter, r *rest.Request, ctx *requestContext) {\n\tvar payload host.Host\n\terr := r.DecodeJsonPayload(&payload)\n\tif err != nil {\n\t\tglog.V(1).Infof(\"Could not decode host payload: %v\", err)\n\t\trestBadRequest(w)\n\t\treturn\n\t}\n\t\/\/ Save the pool ID and IP address for later. GetInfo wipes these\n\tipAddr := payload.IPAddr\n\tparts := strings.Split(ipAddr, \":\")\n\thostIP := parts[0]\n\n\tagentClient, err := agent.NewClient(payload.IPAddr)\n\t\/\/\tremoteClient, err := serviced.NewAgentClient(payload.IPAddr)\n\tif err != nil {\n\t\tglog.Errorf(\"Could not create connection to host %s: %v\", payload.IPAddr, err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\n\tIPs := []string{}\n\tfor _, ip := range payload.IPs {\n\t\tIPs = append(IPs, ip.IPAddress)\n\t}\n\tbuildRequest := agent.BuildHostRequest{\n\t\tIP: hostIP,\n\t\tPoolID: payload.PoolID,\n\t}\n\thost, err := agentClient.BuildHost(buildRequest)\n\tif err != nil {\n\t\tglog.Errorf(\"Unable to get remote host info: %v\", err)\n\t\trestBadRequest(w)\n\t\treturn\n\t}\n\tmasterClient, err := ctx.getMasterClient()\n\tif err != nil {\n\t\tglog.Errorf(\"Unable to add host: %v\", err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\terr = masterClient.AddHost(*host)\n\tif err != nil {\n\t\tglog.Errorf(\"Unable to add host: %v\", err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\tglog.V(0).Info(\"Added host \", host.ID)\n\tw.WriteJson(&simpleResponse{\"Added host\", hostLinks(host.ID)})\n}\n\n\/\/restUpdateHost updates a host. Request input is host.Host\nfunc restUpdateHost(w *rest.ResponseWriter, r *rest.Request, ctx *requestContext) {\n\thostID, err := url.QueryUnescape(r.PathParam(\"hostId\"))\n\tif err != nil {\n\t\trestBadRequest(w)\n\t\treturn\n\t}\n\tglog.V(3).Infof(\"Received update request for %s\", hostID)\n\tvar payload host.Host\n\terr = r.DecodeJsonPayload(&payload)\n\tif err != nil {\n\t\tglog.V(1).Infof(\"Could not decode host payload: %v\", err)\n\t\trestBadRequest(w)\n\t\treturn\n\t}\n\n\tmasterClient, err := ctx.getMasterClient()\n\tif err != nil {\n\t\tglog.Errorf(\"Unable to add host: %v\", err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\terr = masterClient.UpdateHost(payload)\n\tif err != nil {\n\t\tglog.Errorf(\"Unable to update host: %v\", err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\tglog.V(1).Info(\"Updated host \", hostID)\n\tw.WriteJson(&simpleResponse{\"Updated host\", hostLinks(hostID)})\n}\n\n\/\/restRemoveHost removes a host using host-id\nfunc restRemoveHost(w *rest.ResponseWriter, r *rest.Request, ctx *requestContext) {\n\thostID, err := url.QueryUnescape(r.PathParam(\"hostId\"))\n\tif err != nil {\n\t\trestBadRequest(w)\n\t\treturn\n\t}\n\n\tmasterClient, err := ctx.getMasterClient()\n\tif err != nil {\n\t\tglog.Errorf(\"Unable to add host: %v\", err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\terr = masterClient.RemoveHost(hostID)\n\tif err != nil {\n\t\tglog.Errorf(\"Could not remove host: %v\", err)\n\t\trestServerError(w)\n\t\treturn\n\t}\n\tglog.V(0).Info(\"Removed host \", hostID)\n\tw.WriteJson(&simpleResponse{\"Removed host\", hostsLinks()})\n}\n\ntype metric struct {\n\tID string\n\tName string\n\tDescription string\n}\n\nvar (\n\tmetrics = []metric{\n\t\tmetric{\n\t\t\t\"CpuacctStat.system\",\n\t\t\t\"CPU System\",\n\t\t\t\"System CPU Usage\",\n\t\t},\n\t\tmetric{\n\t\t\t\"CpuacctStat.user\",\n\t\t\t\"CPU User\",\n\t\t\t\"User CPU Usage\",\n\t\t},\n\t\tmetric{\n\t\t\t\"MemoryStat.pgfault\",\n\t\t\t\"Memory Page Fault\",\n\t\t\t\"Page Fault Stats\",\n\t\t},\n\t\tmetric{\n\t\t\t\"MemoryStat.rss\",\n\t\t\t\"Resident Memory\",\n\t\t\t\"Resident Memory Usage\",\n\t\t},\n\t}\n)\n\nfunc buildHostMonitoringProfile(host *host.Host) error {\n\thost.MonitoringProfile = domain.MonitorProfile{\n\t\tMetrics: make([]domain.MetricConfig, len(metrics)),\n\t}\n\n\tbuild, err := domain.NewMetricConfigBuilder(\"\/metrics\/api\/performance\/query\", \"POST\")\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to create metric builder: %s\", err)\n\t\treturn err\n\t}\n\n\tfor i := range metrics {\n\t\tbuild.Metric(metrics[i].ID, metrics[i].Name).SetTag(\"controlplane_host_id\", host.ID)\n\t\tconfig, err := build.Config(metrics[i].ID, metrics[i].Name, metrics[i].Description, \"1h-ago\")\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Failed to build metric: %s\", err)\n\t\t\thost.MonitoringProfile = domain.MonitorProfile{}\n\t\t\treturn err\n\t\t}\n\t\thost.MonitoringProfile.Metrics[i] = *config\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n** Copyright [2013-2016] [Megam Systems]\n**\n** Licensed under the Apache License, Version 2.0 (the \"License\");\n** you may not use this file except in compliance with the License.\n** You may obtain a copy of the License at\n**\n** http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n**\n** Unless required by applicable law or agreed to in writing, software\n** distributed under the License is distributed on an \"AS IS\" BASIS,\n** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n** See the License for the specific language governing permissions and\n** limitations under the License.\n *\/\npackage carton\n\nimport (\n\t\"fmt\"\n\tldb \"github.com\/megamsys\/libgo\/db\"\n\t\"github.com\/megamsys\/libgo\/pairs\"\n\t\"github.com\/megamsys\/libgo\/utils\"\n\t\"github.com\/megamsys\/vertice\/carton\/bind\"\n\t\"github.com\/megamsys\/vertice\/meta\"\n\t\"github.com\/megamsys\/vertice\/provision\"\n\t\"github.com\/megamsys\/vertice\/repository\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tDOMAIN = \"domain\"\n\tPUBLICIPV4 = \"publicipv4\"\n\tPRIVATEIPV4 = \"privateipv4\"\n\tCOMPBUCKET = \"components\"\n\tIMAGE_VERSION = \"version\"\n\tONECLICK = \"oneclick\"\n\tHOSTIP = \"hostip\"\n TRUE = \"true\"\n)\n\ntype Artifacts struct {\n\tType string `json:\"artifact_type\" cql:\"type\"`\n\tContent string `json:\"content\" cql:\"content\"`\n\tRequirements pairs.JsonPairs `json:\"requirements\" cql:\"requirements\"`\n}\n\n\/* Repository represents a repository managed by the manager. *\/\ntype Repo struct {\n\tRtype string `json:\"rtype\" cql:\"rtype\"`\n\tSource string `json:\"source\" cql:\"source\"`\n\tOneclick string `json:\"oneclick\" cql:\"oneclick\"`\n\tRurl string `json:\"url\" cql:\"url\"`\n}\n\ntype Component struct {\n\tId string `json:\"id\"`\n\tName string `json:\"name\"`\n\tTosca string `json:\"tosca_type\"`\n\tInputs pairs.JsonPairs `json:\"inputs\"`\n\tOutputs pairs.JsonPairs `json:\"outputs\"`\n\tEnvs pairs.JsonPairs `json:\"envs\"`\n\tRepo Repo `json:\"repo\"`\n\tArtifacts *Artifacts `json:\"artifacts\"`\n\tRelatedComponents []string `json:\"related_components\"`\n\tOperations []*Operations `json:\"operations\"`\n\tStatus string `json:\"status\"`\n\tCreatedAt string `json:\"created_at\"`\n}\n\ntype ComponentTable struct {\n\tId string `json:\"id\" cql:\"id\"`\n\tName string `json:\"name\" cql:\"name\"`\n\tTosca string `json:\"tosca_type\" cql:\"tosca_type\"`\n\tInputs []string `json:\"inputs\" cql:\"inputs\"`\n\tOutputs []string `json:\"outputs\" cql:\"outputs\"`\n\tEnvs []string `json:\"envs\" cql:\"envs\"`\n\tRepo string `json:\"repo\" cql:\"repo\"`\n\tArtifacts string `json:\"artifacts\" cql:\"artifacts\"`\n\tRelatedComponents []string `json:\"related_components\" cql:\"related_components\"`\n\tOperations []string `json:\"operations\" cql:\"operations\"`\n\tStatus string `json:\"status\" cql:\"status\"`\n\tCreatedAt string `json:\"created_at\" cql:\"created_at\"`\n}\n\nfunc (a *Component) String() string {\n\tif d, err := yaml.Marshal(a); err != nil {\n\t\treturn err.Error()\n\t} else {\n\t\treturn string(d)\n\t}\n}\n\n\/**\n**fetch the component json from riak and parse the json to struct\n**\/\nfunc NewComponent(id string) (*Component, error) {\n\tc := &ComponentTable{Id: id}\n\tops := ldb.Options{\n\t\tTableName: COMPBUCKET,\n\t\tPks: []string{\"Id\"},\n\t\tCcms: []string{},\n\t\tHosts: meta.MC.Scylla,\n\t\tKeyspace: meta.MC.ScyllaKeyspace,\n\t\tPksClauses: map[string]interface{}{\"Id\": id},\n\t\tCcmsClauses: make(map[string]interface{}),\n\t}\n\tif err := ldb.Fetchdb(ops, c); err != nil {\n\t\treturn nil, err\n\t}\n\tcom, _ := c.dig()\n\treturn &com, nil\n}\n\n\/\/make a box with the details for a provisioner.\nfunc (c *Component) mkBox() (provision.Box, error) {\n\tbt := provision.Box{\n\t\tId: c.Id,\n\t\tLevel: provision.BoxSome,\n\t\tName: c.Name,\n\t\tDomainName: c.domain(),\n\t\tEnvs: c.envs(),\n\t\tTosca: c.Tosca,\n\t\tCommit: \"\",\n\t\tProvider: c.provider(),\n\t\tPublicIp: c.publicIp(),\n\t}\n fmt.Println(\"***********Component*********\")\nfmt.Println(c)\nfmt.Println(\"_______\",c.Repo)\n\tif &c.Repo != nil {\n\t\tbt.Repo = &repository.Repo{\n\t\t\tType: c.Repo.Rtype,\n\t\t\tSource: c.Repo.Source,\n\t\t\tOneClick: c.withOneClick(),\n\t\t\tURL: c.Repo.Rurl,\n\t\t}\n\t\t\/\/ if bt.Repo.URL == \"\" && bt.Repo.Source == VERTICE && bt.Repo.Type == IMAGE {\n\t\t\/\/ \tbt.Repo.URL = strings.Split(c.Tosca,\".\")[2]\n\t\t\/\/ }\n\n\t\tbt.Repo.Hook = BuildHook(c.Operations, repository.CIHOOK)\n\t}\n\treturn bt, nil\n}\n\nfunc (c *Component) SetStatus(status utils.Status) error {\n\tLastStatusUpdate := time.Now().Local().Format(time.RFC822)\n\tm := make(map[string][]string, 2)\n\tm[\"lastsuccessstatusupdate\"] = []string{LastStatusUpdate}\n\tm[\"status\"] = []string{status.String()}\n\tc.Inputs.NukeAndSet(m) \/\/just nuke the matching output key:\n\n\tupdate_fields := make(map[string]interface{})\n\tupdate_fields[\"Inputs\"] = c.Inputs.ToString()\n\tupdate_fields[\"Status\"] = status.String()\n\tops := ldb.Options{\n\t\tTableName: COMPBUCKET,\n\t\tPks: []string{\"Id\"},\n\t\tCcms: []string{},\n\t\tHosts: meta.MC.Scylla,\n\t\tKeyspace: meta.MC.ScyllaKeyspace,\n\t\tPksClauses: map[string]interface{}{\"Id\": c.Id},\n\t\tCcmsClauses: make(map[string]interface{}),\n\t}\n\tif err := ldb.Updatedb(ops, update_fields); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/*func (c *Component) UpdateOpsRun(opsRan upgrade.OperationsRan) error {\n\tmutatedOps := make([]*upgrade.Operation, 0, len(opsRan))\n\n\tfor _, o := range opsRan {\n\t\tmutatedOps = append(mutatedOps, o.Raw)\n\t}\n\tc.Operations = mutatedOps\n\n\tif err := db.Store(COMPBUCKET, c.Id, c); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}*\/\n\nfunc (c *Component) Delete(compid string) {\n\tops := ldb.Options{\n\t\tTableName: COMPBUCKET,\n\t\tPks: []string{\"id\"},\n\t\tCcms: []string{},\n\t\tHosts: meta.MC.Scylla,\n\t\tKeyspace: meta.MC.ScyllaKeyspace,\n\t\tPksClauses: map[string]interface{}{\"id\": compid},\n\t\tCcmsClauses: make(map[string]interface{}),\n\t}\n\tif err := ldb.Deletedb(ops, ComponentTable{}); err != nil {\n\t\treturn\n\t}\n}\n\nfunc (c *Component) setDeployData(dd DeployData) error {\n\t\/*c.Inputs = append(c.Inputs, utils.NewJsonPair(\"lastsuccessstatusupdate\", \"\"))\n\tc.Inputs = append(c.Inputs, utils.NewJsonPair(\"status\", \"\"))\n\n\tif err := db.Store(COMPBUCKET, c.Id, c); err != nil {\n\t\treturn err\n\t}*\/\n\treturn nil\n\n}\n\nfunc (a *ComponentTable) dig() (Component, error) {\n\tasm := Component{}\n\tasm.Id = a.Id\n\tasm.Name = a.Name\n\tasm.Tosca = a.Tosca\n\tasm.Inputs = a.getInputs()\n\tasm.Outputs = a.getOutputs()\n\tasm.Envs = a.getEnvs()\n\tasm.Repo = a.getRepo()\n\tasm.Artifacts = a.getArtifacts()\n\tasm.RelatedComponents = a.RelatedComponents\n\tasm.Operations = a.getOperations()\n\tasm.Status = a.Status\n\tasm.CreatedAt = a.CreatedAt\n\treturn asm, nil\n}\n\nfunc (c *Component) domain() string {\n\treturn c.Inputs.Match(DOMAIN)\n}\n\nfunc (c *Component) provider() string {\n\treturn c.Inputs.Match(utils.PROVIDER)\n}\n\nfunc (c *Component) publicIp() string {\n\treturn c.Outputs.Match(PUBLICIPV4)\n}\n\nfunc (c *Component) withOneClick() bool {\n\treturn (strings.TrimSpace(c.Repo.Oneclick) == TRUE)\n}\n\n\/\/all the variables in the inputs shall be treated as ENV.\n\/\/we can use a filtered approach as well.\nfunc (c *Component) envs() []bind.EnvVar {\n\tenvs := make([]bind.EnvVar, 0, len(c.Envs))\n\tfor _, i := range c.Envs {\n\t\tenvs = append(envs, bind.EnvVar{Name: i.K, Value: i.V})\n\t}\n\treturn envs\n}\n\nfunc (a *ComponentTable) getInputs() pairs.JsonPairs {\n\tkeys := make([]*pairs.JsonPair, 0)\n\tfor _, in := range a.Inputs {\n\t\tinputs := pairs.JsonPair{}\n\t\tparseStringToStruct(in, &inputs)\n\t\tkeys = append(keys, &inputs)\n\t}\n\treturn keys\n}\n\nfunc (a *ComponentTable) getOutputs() pairs.JsonPairs {\n\tkeys := make([]*pairs.JsonPair, 0)\n\tfor _, in := range a.Outputs {\n\t\toutputs := pairs.JsonPair{}\n\t\tparseStringToStruct(in, &outputs)\n\t\tkeys = append(keys, &outputs)\n\t}\n\treturn keys\n}\n\nfunc (a *ComponentTable) getEnvs() pairs.JsonPairs {\n\tkeys := make([]*pairs.JsonPair, 0)\n\tfor _, in := range a.Envs {\n\t\toutputs := pairs.JsonPair{}\n\t\tparseStringToStruct(in, &outputs)\n\t\tkeys = append(keys, &outputs)\n\t}\n\treturn keys\n}\n\nfunc (a *ComponentTable) getRepo() Repo {\n\toutputs := Repo{}\n\tparseStringToStruct(a.Repo, &outputs)\n\treturn outputs\n}\n\nfunc (a *ComponentTable) getArtifacts() *Artifacts {\n\toutputs := Artifacts{}\n\tparseStringToStruct(a.Artifacts, &outputs)\n\treturn &outputs\n}\n\nfunc (a *ComponentTable) getOperations() []*Operations {\n\tkeys := make([]*Operations, 0)\n\tfor _, in := range a.Operations {\n\t\tp := Operations{}\n\t\tparseStringToStruct(in, &p)\n\t\tkeys = append(keys, &p)\n\t}\n\treturn keys\n}\n<commit_msg>remove prints<commit_after>\/*\n** Copyright [2013-2016] [Megam Systems]\n**\n** Licensed under the Apache License, Version 2.0 (the \"License\");\n** you may not use this file except in compliance with the License.\n** You may obtain a copy of the License at\n**\n** http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n**\n** Unless required by applicable law or agreed to in writing, software\n** distributed under the License is distributed on an \"AS IS\" BASIS,\n** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n** See the License for the specific language governing permissions and\n** limitations under the License.\n *\/\npackage carton\n\nimport (\n\tldb \"github.com\/megamsys\/libgo\/db\"\n\t\"github.com\/megamsys\/libgo\/pairs\"\n\t\"github.com\/megamsys\/libgo\/utils\"\n\t\"github.com\/megamsys\/vertice\/carton\/bind\"\n\t\"github.com\/megamsys\/vertice\/meta\"\n\t\"github.com\/megamsys\/vertice\/provision\"\n\t\"github.com\/megamsys\/vertice\/repository\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tDOMAIN = \"domain\"\n\tPUBLICIPV4 = \"publicipv4\"\n\tPRIVATEIPV4 = \"privateipv4\"\n\tCOMPBUCKET = \"components\"\n\tIMAGE_VERSION = \"version\"\n\tONECLICK = \"oneclick\"\n\tHOSTIP = \"hostip\"\n TRUE = \"true\"\n)\n\ntype Artifacts struct {\n\tType string `json:\"artifact_type\" cql:\"type\"`\n\tContent string `json:\"content\" cql:\"content\"`\n\tRequirements pairs.JsonPairs `json:\"requirements\" cql:\"requirements\"`\n}\n\n\/* Repository represents a repository managed by the manager. *\/\ntype Repo struct {\n\tRtype string `json:\"rtype\" cql:\"rtype\"`\n\tSource string `json:\"source\" cql:\"source\"`\n\tOneclick string `json:\"oneclick\" cql:\"oneclick\"`\n\tRurl string `json:\"url\" cql:\"url\"`\n}\n\ntype Component struct {\n\tId string `json:\"id\"`\n\tName string `json:\"name\"`\n\tTosca string `json:\"tosca_type\"`\n\tInputs pairs.JsonPairs `json:\"inputs\"`\n\tOutputs pairs.JsonPairs `json:\"outputs\"`\n\tEnvs pairs.JsonPairs `json:\"envs\"`\n\tRepo Repo `json:\"repo\"`\n\tArtifacts *Artifacts `json:\"artifacts\"`\n\tRelatedComponents []string `json:\"related_components\"`\n\tOperations []*Operations `json:\"operations\"`\n\tStatus string `json:\"status\"`\n\tCreatedAt string `json:\"created_at\"`\n}\n\ntype ComponentTable struct {\n\tId string `json:\"id\" cql:\"id\"`\n\tName string `json:\"name\" cql:\"name\"`\n\tTosca string `json:\"tosca_type\" cql:\"tosca_type\"`\n\tInputs []string `json:\"inputs\" cql:\"inputs\"`\n\tOutputs []string `json:\"outputs\" cql:\"outputs\"`\n\tEnvs []string `json:\"envs\" cql:\"envs\"`\n\tRepo string `json:\"repo\" cql:\"repo\"`\n\tArtifacts string `json:\"artifacts\" cql:\"artifacts\"`\n\tRelatedComponents []string `json:\"related_components\" cql:\"related_components\"`\n\tOperations []string `json:\"operations\" cql:\"operations\"`\n\tStatus string `json:\"status\" cql:\"status\"`\n\tCreatedAt string `json:\"created_at\" cql:\"created_at\"`\n}\n\nfunc (a *Component) String() string {\n\tif d, err := yaml.Marshal(a); err != nil {\n\t\treturn err.Error()\n\t} else {\n\t\treturn string(d)\n\t}\n}\n\n\/**\n**fetch the component json from riak and parse the json to struct\n**\/\nfunc NewComponent(id string) (*Component, error) {\n\tc := &ComponentTable{Id: id}\n\tops := ldb.Options{\n\t\tTableName: COMPBUCKET,\n\t\tPks: []string{\"Id\"},\n\t\tCcms: []string{},\n\t\tHosts: meta.MC.Scylla,\n\t\tKeyspace: meta.MC.ScyllaKeyspace,\n\t\tPksClauses: map[string]interface{}{\"Id\": id},\n\t\tCcmsClauses: make(map[string]interface{}),\n\t}\n\tif err := ldb.Fetchdb(ops, c); err != nil {\n\t\treturn nil, err\n\t}\n\tcom, _ := c.dig()\n\treturn &com, nil\n}\n\n\/\/make a box with the details for a provisioner.\nfunc (c *Component) mkBox() (provision.Box, error) {\n\tbt := provision.Box{\n\t\tId: c.Id,\n\t\tLevel: provision.BoxSome,\n\t\tName: c.Name,\n\t\tDomainName: c.domain(),\n\t\tEnvs: c.envs(),\n\t\tTosca: c.Tosca,\n\t\tCommit: \"\",\n\t\tProvider: c.provider(),\n\t\tPublicIp: c.publicIp(),\n\t}\n\tif &c.Repo != nil {\n\t\tbt.Repo = &repository.Repo{\n\t\t\tType: c.Repo.Rtype,\n\t\t\tSource: c.Repo.Source,\n\t\t\tOneClick: c.withOneClick(),\n\t\t\tURL: c.Repo.Rurl,\n\t\t}\n\t\t\/\/ if bt.Repo.URL == \"\" && bt.Repo.Source == VERTICE && bt.Repo.Type == IMAGE {\n\t\t\/\/ \tbt.Repo.URL = strings.Split(c.Tosca,\".\")[2]\n\t\t\/\/ }\n\n\t\tbt.Repo.Hook = BuildHook(c.Operations, repository.CIHOOK)\n\t}\n\treturn bt, nil\n}\n\nfunc (c *Component) SetStatus(status utils.Status) error {\n\tLastStatusUpdate := time.Now().Local().Format(time.RFC822)\n\tm := make(map[string][]string, 2)\n\tm[\"lastsuccessstatusupdate\"] = []string{LastStatusUpdate}\n\tm[\"status\"] = []string{status.String()}\n\tc.Inputs.NukeAndSet(m) \/\/just nuke the matching output key:\n\n\tupdate_fields := make(map[string]interface{})\n\tupdate_fields[\"Inputs\"] = c.Inputs.ToString()\n\tupdate_fields[\"Status\"] = status.String()\n\tops := ldb.Options{\n\t\tTableName: COMPBUCKET,\n\t\tPks: []string{\"Id\"},\n\t\tCcms: []string{},\n\t\tHosts: meta.MC.Scylla,\n\t\tKeyspace: meta.MC.ScyllaKeyspace,\n\t\tPksClauses: map[string]interface{}{\"Id\": c.Id},\n\t\tCcmsClauses: make(map[string]interface{}),\n\t}\n\tif err := ldb.Updatedb(ops, update_fields); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/*func (c *Component) UpdateOpsRun(opsRan upgrade.OperationsRan) error {\n\tmutatedOps := make([]*upgrade.Operation, 0, len(opsRan))\n\n\tfor _, o := range opsRan {\n\t\tmutatedOps = append(mutatedOps, o.Raw)\n\t}\n\tc.Operations = mutatedOps\n\n\tif err := db.Store(COMPBUCKET, c.Id, c); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}*\/\n\nfunc (c *Component) Delete(compid string) {\n\tops := ldb.Options{\n\t\tTableName: COMPBUCKET,\n\t\tPks: []string{\"id\"},\n\t\tCcms: []string{},\n\t\tHosts: meta.MC.Scylla,\n\t\tKeyspace: meta.MC.ScyllaKeyspace,\n\t\tPksClauses: map[string]interface{}{\"id\": compid},\n\t\tCcmsClauses: make(map[string]interface{}),\n\t}\n\tif err := ldb.Deletedb(ops, ComponentTable{}); err != nil {\n\t\treturn\n\t}\n}\n\nfunc (c *Component) setDeployData(dd DeployData) error {\n\t\/*c.Inputs = append(c.Inputs, utils.NewJsonPair(\"lastsuccessstatusupdate\", \"\"))\n\tc.Inputs = append(c.Inputs, utils.NewJsonPair(\"status\", \"\"))\n\n\tif err := db.Store(COMPBUCKET, c.Id, c); err != nil {\n\t\treturn err\n\t}*\/\n\treturn nil\n\n}\n\nfunc (a *ComponentTable) dig() (Component, error) {\n\tasm := Component{}\n\tasm.Id = a.Id\n\tasm.Name = a.Name\n\tasm.Tosca = a.Tosca\n\tasm.Inputs = a.getInputs()\n\tasm.Outputs = a.getOutputs()\n\tasm.Envs = a.getEnvs()\n\tasm.Repo = a.getRepo()\n\tasm.Artifacts = a.getArtifacts()\n\tasm.RelatedComponents = a.RelatedComponents\n\tasm.Operations = a.getOperations()\n\tasm.Status = a.Status\n\tasm.CreatedAt = a.CreatedAt\n\treturn asm, nil\n}\n\nfunc (c *Component) domain() string {\n\treturn c.Inputs.Match(DOMAIN)\n}\n\nfunc (c *Component) provider() string {\n\treturn c.Inputs.Match(utils.PROVIDER)\n}\n\nfunc (c *Component) publicIp() string {\n\treturn c.Outputs.Match(PUBLICIPV4)\n}\n\nfunc (c *Component) withOneClick() bool {\n\treturn (strings.TrimSpace(c.Repo.Oneclick) == TRUE)\n}\n\n\/\/all the variables in the inputs shall be treated as ENV.\n\/\/we can use a filtered approach as well.\nfunc (c *Component) envs() []bind.EnvVar {\n\tenvs := make([]bind.EnvVar, 0, len(c.Envs))\n\tfor _, i := range c.Envs {\n\t\tenvs = append(envs, bind.EnvVar{Name: i.K, Value: i.V})\n\t}\n\treturn envs\n}\n\nfunc (a *ComponentTable) getInputs() pairs.JsonPairs {\n\tkeys := make([]*pairs.JsonPair, 0)\n\tfor _, in := range a.Inputs {\n\t\tinputs := pairs.JsonPair{}\n\t\tparseStringToStruct(in, &inputs)\n\t\tkeys = append(keys, &inputs)\n\t}\n\treturn keys\n}\n\nfunc (a *ComponentTable) getOutputs() pairs.JsonPairs {\n\tkeys := make([]*pairs.JsonPair, 0)\n\tfor _, in := range a.Outputs {\n\t\toutputs := pairs.JsonPair{}\n\t\tparseStringToStruct(in, &outputs)\n\t\tkeys = append(keys, &outputs)\n\t}\n\treturn keys\n}\n\nfunc (a *ComponentTable) getEnvs() pairs.JsonPairs {\n\tkeys := make([]*pairs.JsonPair, 0)\n\tfor _, in := range a.Envs {\n\t\toutputs := pairs.JsonPair{}\n\t\tparseStringToStruct(in, &outputs)\n\t\tkeys = append(keys, &outputs)\n\t}\n\treturn keys\n}\n\nfunc (a *ComponentTable) getRepo() Repo {\n\toutputs := Repo{}\n\tparseStringToStruct(a.Repo, &outputs)\n\treturn outputs\n}\n\nfunc (a *ComponentTable) getArtifacts() *Artifacts {\n\toutputs := Artifacts{}\n\tparseStringToStruct(a.Artifacts, &outputs)\n\treturn &outputs\n}\n\nfunc (a *ComponentTable) getOperations() []*Operations {\n\tkeys := make([]*Operations, 0)\n\tfor _, in := range a.Operations {\n\t\tp := Operations{}\n\t\tparseStringToStruct(in, &p)\n\t\tkeys = append(keys, &p)\n\t}\n\treturn keys\n}\n<|endoftext|>"} {"text":"<commit_before>package kafkafs\n\nimport (\n\t\"path\/filepath\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\t\"regexp\"\n\t\"time\"\n\t\"fmt\"\n\n\t\"github.com\/hanwen\/go-fuse\/fuse\"\n\t\"github.com\/hanwen\/go-fuse\/fuse\/nodefs\"\n\t\"github.com\/hanwen\/go-fuse\/fuse\/pathfs\"\n)\n\n\/\/ Implements a FUSE filesystem backed by a Kafka installation.\n\/\/\n\/\/ This version is read only, so it cannot post to topics, only read\n\/\/ from them.\ntype KafkaRoFs struct {\n\tpathfs.FileSystem\n\n\tKafkaClient KafkaClient\n\tuserFiles map[string]bool\n}\n\ntype parsedPath struct {\n IsValid bool\n\tIsRoot bool\n\tTopic string\n\tPartition int32\n\tOffset int64\n}\n\nfunc (fs *KafkaRoFs) parseAndValidate(name string) (parsedPath, error) {\n\tparsed := parsedPath{IsValid: true, IsRoot: false, Topic: \"\", Partition: -1,\n\t\t\tOffset: -1}\n\tslashed := filepath.ToSlash(name)\n\tre := regexp.MustCompile(\"\/{2,}\")\n\tnormal := re.ReplaceAllString(slashed, \"\/\")\n\tif normal == \"\" {\n\t\tparsed.IsRoot = true\n\t\treturn parsed, nil\n\t}\n\n\tsplits := strings.Split(normal, \"\/\")\n\n\tif len(splits) > 3 {\n\t\tparsed.IsValid = false\n\t\treturn parsed, nil\n\t}\n\n\tif len(splits) >= 1 {\n\t\tmaybeTopic := splits[0]\n\t\tisTop, err := fs.isTopic(maybeTopic)\n\t\tif err != nil {\n\t\t\treturn parsed, err\n\t\t}\n\t\tif !isTop {\n\t\t\tparsed.IsValid = false\n\t\t\treturn parsed, nil\n\t\t}\n\t\tparsed.Topic = maybeTopic\n\t}\n\n\tif len(splits) >= 2 {\n\t\tmaybePartition := splits[1]\n\t\tisPart, err := fs.isPartition(parsed.Topic, maybePartition)\n\t\tif err != nil {\n\t\t\treturn parsed, err\n\t\t}\n\t\tif !isPart {\n\t\t\tparsed.IsValid = false\n\t\t\treturn parsed, nil\n\t\t}\n\t\t\/\/ this should always succeed if isPartition returned true.\n\t\tpartition64, _ := strconv.ParseInt(maybePartition, 10, 32)\n\t\tparsed.Partition = int32(partition64)\n\t}\n\n\tif len(splits) == 3 {\n\t\tmaybeOffset := splits[2]\n\t\toffset, err := strconv.ParseInt(maybeOffset, 10, 64)\n\t\tif err != nil {\n\t\t\tparsed.IsValid = false\n\t\t\treturn parsed, nil\n\t\t}\n\t\tparsed.Offset = offset\n\t}\n\n\treturn parsed, nil\n}\n\nfunc (fs *KafkaRoFs) isTopic(maybeTopic string) (bool, error) {\n\ttopics, err := fs.KafkaClient.GetTopics()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, topic := range topics {\n\t\tif topic == maybeTopic {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}\n\nfunc (fs *KafkaRoFs) isPartition(topic string, maybePartition string) (bool, error) {\n\tmaybePartition64, err := strconv.ParseInt(maybePartition, 10, 32)\n\tif err != nil {\n\t\treturn false, nil\n\t}\n\tmaybePartition32 := int32(maybePartition64)\n\tpartitions, err := fs.KafkaClient.GetPartitions(topic)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, partition := range partitions {\n\t\tif partition == maybePartition32 {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}\n\nfunc (fs *KafkaRoFs) GetAttr(name string, context *fuse.Context) (*fuse.Attr,\n\tfuse.Status) {\n\tlog.Printf(\"GetAttr name: %s\", name)\n\n\tparsed, err := fs.parseAndValidate(name)\n\n\tif err != nil {\n\t\treturn nil, fuse.EIO\n\t}\n\n\tif !parsed.IsValid {\n\t\treturn nil, fuse.ENOENT\n\t}\n\n\tswitch {\n\t\/\/ root or a topic\n\tcase parsed.IsRoot, parsed.Offset == -1 && parsed.Partition == -1:\n\t\treturn &fuse.Attr{Mode: fuse.S_IFDIR | 0500}, fuse.OK\n \/\/ partition\n\tcase parsed.Offset == -1:\n\t\treturn &fuse.Attr{Mode: fuse.S_IFDIR | 0700}, fuse.OK\n \/\/ offset \/ msg\n case true:\n\t\tmsgBytes, err := fs.KafkaClient.GetMessage(parsed.Topic, parsed.Partition,\n\t\t\tparsed.Offset)\n\t\tif err != nil {\n\t\t\treturn nil, fuse.EIO\n\t\t}\n\t\tif msgBytes == nil {\n\t\t\treturn nil, fuse.ENOENT\n\t\t}\n\t\tfs.userFiles[name] = true\n\t\treturn &fuse.Attr{Mode: fuse.S_IFREG | 0400,\n\t\t\tSize: uint64(len(msgBytes))}, fuse.OK\n\t}\n\n\treturn nil, fuse.ENOENT\n}\n\nfunc (fs *KafkaRoFs) OpenDir(name string, context *fuse.Context) ([]fuse.DirEntry,\n\tfuse.Status) {\n\tlog.Printf(\"OpenDir name: %s\", name)\n\n\tparsed, err := fs.parseAndValidate(name)\n\tif err != nil {\n\t\treturn nil, fuse.EIO\n\t}\n\n\tif !parsed.IsValid {\n\t\treturn nil, fuse.ENOENT\n\t}\n\n\tswitch {\n\tcase parsed.IsRoot:\n\t\treturn fs.openRoot(context)\n\tcase parsed.Partition == -1:\n\t\treturn fs.openTopic(parsed.Topic, context)\n\tcase parsed.Offset == -1:\n\t\treturn fs.openPartition(parsed.Topic, parsed.Partition, context)\n\t}\n\n\treturn nil, fuse.ENOENT\n}\n\nfunc (fs *KafkaRoFs) openPartition(topic string, partition int32,\n\tcontext *fuse.Context) ([]fuse.DirEntry,\n\tfuse.Status) {\n\tearliest, next, err := fs.KafkaClient.GetBoundingOffsets(topic, partition)\n\tif err != nil {\n\t\treturn nil, fuse.EIO\n\t}\n\tif next == int64(0) {\n\t\t\/\/ this means an empty partition\n\t\treturn []fuse.DirEntry{}, fuse.OK\n\t}\n\n\tentries := []fuse.DirEntry{fuse.DirEntry{Name: strconv.FormatInt(earliest, 10),\n\t\tMode: fuse.S_IFREG}}\n\tif earliest != next - 1 {\n\t\tentries = append(entries, fuse.DirEntry{Name: strconv.FormatInt(next - 1, 10),\n\t\t\tMode: fuse.S_IFREG})\n\t}\n\n\tfor path, _ := range fs.userFiles {\n\t\tparsed, err := fs.parseAndValidate(path)\n\t\tif err != nil {\n\t\t\tlog.Panicf(\"Error was non-nil, bad user path %s\", err)\n\t\t}\n\t\tif parsed.Partition == partition && parsed.Offset != earliest &&\n\t\t\tparsed.Offset != next - 1 {\n\t\t\tentries = append(entries, fuse.DirEntry{\n\t\t\t\tName: strconv.FormatInt(parsed.Offset, 10), Mode: fuse.S_IFREG})\n\t\t}\n\t}\n\n\treturn entries, fuse.OK\n}\n\nfunc (fs *KafkaRoFs) openRoot(context *fuse.Context) ([]fuse.DirEntry, fuse.Status) {\n\t\/\/ root, show all the topics\n\tvar topicDirs []fuse.DirEntry\n\ttopics, err := fs.KafkaClient.GetTopics()\n\n\tif err != nil {\n\t\treturn nil, fuse.EIO\n\t}\n\n\tfor _, topic := range topics {\n\t\ttopicDirs = append(topicDirs, fuse.DirEntry{Name: topic, Mode: fuse.S_IFDIR})\n\t}\n\n\treturn topicDirs, fuse.OK\n}\n\nfunc (fs *KafkaRoFs) openTopic(name string, context *fuse.Context) ([]fuse.DirEntry,\n\tfuse.Status) {\n\tvar partitionDirs []fuse.DirEntry\n\tpartitions, err := fs.KafkaClient.GetPartitions(name)\n\n\tif err != nil {\n\t\treturn nil, fuse.EIO\n\t}\n\n\tfor _, partition := range partitions {\n\t\tpartitionDirs = append(partitionDirs,\n\t\t\tfuse.DirEntry{Name: strconv.FormatInt(int64(partition), 10),\n\t\t\t\tMode: fuse.S_IFDIR})\n\t}\n\n\treturn partitionDirs, fuse.OK\n}\n\nfunc (fs *KafkaRoFs) Open(name string, flags uint32,\n\tcontext *fuse.Context) (nodefs.File, fuse.Status) {\n\tparsed, err := fs.parseAndValidate(name)\n\tif err != nil {\n\t\treturn nil, fuse.EIO\n\t}\n\n\tif !parsed.IsValid || parsed.Offset == -1 {\n\t\treturn nil, fuse.ENOENT\n\t}\n\n\tif flags & fuse.O_ANYWRITE != 0 {\n\t\treturn nil, fuse.EPERM\n\t}\n\n\tmsgBytes, err := fs.KafkaClient.GetMessage(parsed.Topic, parsed.Partition,\n\t\tparsed.Offset)\n\tif err != nil {\n\t\treturn nil, fuse.EIO\n\t}\n\n\treturn nodefs.NewDataFile(msgBytes), fuse.OK\n}\n\nfunc (fs *KafkaRoFs) Mknod(name string, mode uint32, dev uint32,\n\tcontext *fuse.Context) fuse.Status {\n\n\tparsed, err := fs.parseAndValidate(name)\n\tif err != nil {\n\t\treturn fuse.EIO\n\t}\n\n\tif !parsed.IsValid || parsed.Offset == -1 {\n\t\treturn fuse.ENOENT\n\t}\n\n\t\/\/ we don't want to block waiting on an offset that is in the past\n\t\/\/ and no longer available from kafka\n\n\tnE := func() (bool, error) {\n\t\treturn fs.offsetNotExpired(parsed.Topic, parsed.Partition, parsed.Offset)\n\t}\n\n\tfor notExpired, err := nE(); notExpired; notExpired, err = nE() {\n\t\tif err != nil {\n\t\t\treturn fuse.EIO\n\t\t}\n\n\t\tisFuture, err := fs.offsetIsFuture(parsed.Topic, parsed.Partition,\n\t\t\tparsed.Offset)\n\t\tif err != nil {\n\t\t\treturn fuse.EIO\n\t\t}\n\n\t\tif isFuture {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\tmsgBytes, err := fs.KafkaClient.GetMessage(parsed.Topic, parsed.Partition,\n\t\t\tparsed.Offset)\n\t\tif err != nil {\n\t\t\treturn fuse.EIO\n\t\t}\n\n\t\tif msgBytes == nil {\n\t\t\t\/\/ this shouldn't happen unless the messages are expiring\n\t\t\t\/\/ very fast, but who knows\n\t\t\treturn fuse.ENOENT\n\t\t}\n\n\t\terr = fs.addUserFile(parsed.Topic, parsed.Partition, parsed.Offset, msgBytes)\n\t\tif err != nil {\n\t\t\treturn fuse.EIO\n\t\t}\n\n\t\treturn fuse.OK\n\t}\n\n\treturn fuse.ENOENT\n}\n\nfunc (fs *KafkaRoFs) addUserFile(topic string, partition int32, offset int64,\n\tmsgBytes []byte) error {\n\tpath := fmt.Sprintf(\"%s\/%d\/%d\", topic, partition, offset)\n\tfs.userFiles[path] = true\n\treturn nil\n}\n\nfunc (fs *KafkaRoFs) offsetNotExpired(topic string, partition int32,\n\toffset int64) (bool, error) {\n\tearliest, _, err := fs.KafkaClient.GetBoundingOffsets(topic, partition)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn offset >= earliest, nil\n}\n\nfunc (fs *KafkaRoFs) offsetIsFuture(topic string, partition int32,\n\toffset int64) (bool, error) {\n\t_, next, err := fs.KafkaClient.GetBoundingOffsets(topic, partition)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn offset >= next, nil\n}\n\n\/\/ just pretend we set the times to keep touch happy\nfunc (fs *KafkaRoFs) Utimens(name string, Atime *time.Time, Mtime *time.Time,\n\tcontext *fuse.Context) fuse.Status {\n\treturn fuse.OK\n}\n\nfunc NewKafkaRoFs(kClient KafkaClient) *KafkaRoFs {\n\treturn &KafkaRoFs{FileSystem: pathfs.NewDefaultFileSystem(), KafkaClient: kClient,\n\t\tuserFiles: make(map[string]bool)}\n}\n\nfunc (fs *KafkaRoFs) Unlink(name string, context *fuse.Context) fuse.Status {\n\t_, ok := fs.userFiles[name]\n\tif !ok {\n\t\treturn fuse.EPERM\n\t}\n\tdelete(fs.userFiles, name)\n\treturn fuse.OK\n}\n<commit_msg>Tabs.<commit_after>package kafkafs\n\nimport (\n\t\"path\/filepath\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\t\"regexp\"\n\t\"time\"\n\t\"fmt\"\n\n\t\"github.com\/hanwen\/go-fuse\/fuse\"\n\t\"github.com\/hanwen\/go-fuse\/fuse\/nodefs\"\n\t\"github.com\/hanwen\/go-fuse\/fuse\/pathfs\"\n)\n\n\/\/ Implements a FUSE filesystem backed by a Kafka installation.\n\/\/\n\/\/ This version is read only, so it cannot post to topics, only read\n\/\/ from them.\ntype KafkaRoFs struct {\n\tpathfs.FileSystem\n\n\tKafkaClient KafkaClient\n\tuserFiles map[string]bool\n}\n\ntype parsedPath struct {\n\tIsValid bool\n\tIsRoot bool\n\tTopic string\n\tPartition int32\n\tOffset int64\n}\n\nfunc (fs *KafkaRoFs) parseAndValidate(name string) (parsedPath, error) {\n\tparsed := parsedPath{IsValid: true, IsRoot: false, Topic: \"\", Partition: -1,\n\t\t\tOffset: -1}\n\tslashed := filepath.ToSlash(name)\n\tre := regexp.MustCompile(\"\/{2,}\")\n\tnormal := re.ReplaceAllString(slashed, \"\/\")\n\tif normal == \"\" {\n\t\tparsed.IsRoot = true\n\t\treturn parsed, nil\n\t}\n\n\tsplits := strings.Split(normal, \"\/\")\n\n\tif len(splits) > 3 {\n\t\tparsed.IsValid = false\n\t\treturn parsed, nil\n\t}\n\n\tif len(splits) >= 1 {\n\t\tmaybeTopic := splits[0]\n\t\tisTop, err := fs.isTopic(maybeTopic)\n\t\tif err != nil {\n\t\t\treturn parsed, err\n\t\t}\n\t\tif !isTop {\n\t\t\tparsed.IsValid = false\n\t\t\treturn parsed, nil\n\t\t}\n\t\tparsed.Topic = maybeTopic\n\t}\n\n\tif len(splits) >= 2 {\n\t\tmaybePartition := splits[1]\n\t\tisPart, err := fs.isPartition(parsed.Topic, maybePartition)\n\t\tif err != nil {\n\t\t\treturn parsed, err\n\t\t}\n\t\tif !isPart {\n\t\t\tparsed.IsValid = false\n\t\t\treturn parsed, nil\n\t\t}\n\t\t\/\/ this should always succeed if isPartition returned true.\n\t\tpartition64, _ := strconv.ParseInt(maybePartition, 10, 32)\n\t\tparsed.Partition = int32(partition64)\n\t}\n\n\tif len(splits) == 3 {\n\t\tmaybeOffset := splits[2]\n\t\toffset, err := strconv.ParseInt(maybeOffset, 10, 64)\n\t\tif err != nil {\n\t\t\tparsed.IsValid = false\n\t\t\treturn parsed, nil\n\t\t}\n\t\tparsed.Offset = offset\n\t}\n\n\treturn parsed, nil\n}\n\nfunc (fs *KafkaRoFs) isTopic(maybeTopic string) (bool, error) {\n\ttopics, err := fs.KafkaClient.GetTopics()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, topic := range topics {\n\t\tif topic == maybeTopic {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}\n\nfunc (fs *KafkaRoFs) isPartition(topic string, maybePartition string) (bool, error) {\n\tmaybePartition64, err := strconv.ParseInt(maybePartition, 10, 32)\n\tif err != nil {\n\t\treturn false, nil\n\t}\n\tmaybePartition32 := int32(maybePartition64)\n\tpartitions, err := fs.KafkaClient.GetPartitions(topic)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, partition := range partitions {\n\t\tif partition == maybePartition32 {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}\n\nfunc (fs *KafkaRoFs) GetAttr(name string, context *fuse.Context) (*fuse.Attr,\n\tfuse.Status) {\n\tlog.Printf(\"GetAttr name: %s\", name)\n\n\tparsed, err := fs.parseAndValidate(name)\n\n\tif err != nil {\n\t\treturn nil, fuse.EIO\n\t}\n\n\tif !parsed.IsValid {\n\t\treturn nil, fuse.ENOENT\n\t}\n\n\tswitch {\n\t\/\/ root or a topic\n\tcase parsed.IsRoot, parsed.Offset == -1 && parsed.Partition == -1:\n\t\treturn &fuse.Attr{Mode: fuse.S_IFDIR | 0500}, fuse.OK\n\t\/\/ partition\n\tcase parsed.Offset == -1:\n\t\treturn &fuse.Attr{Mode: fuse.S_IFDIR | 0700}, fuse.OK\n\t\/\/ offset \/ msg\n\tcase true:\n\t\tmsgBytes, err := fs.KafkaClient.GetMessage(parsed.Topic, parsed.Partition,\n\t\t\tparsed.Offset)\n\t\tif err != nil {\n\t\t\treturn nil, fuse.EIO\n\t\t}\n\t\tif msgBytes == nil {\n\t\t\treturn nil, fuse.ENOENT\n\t\t}\n\t\tfs.userFiles[name] = true\n\t\treturn &fuse.Attr{Mode: fuse.S_IFREG | 0400,\n\t\t\tSize: uint64(len(msgBytes))}, fuse.OK\n\t}\n\n\treturn nil, fuse.ENOENT\n}\n\nfunc (fs *KafkaRoFs) OpenDir(name string, context *fuse.Context) ([]fuse.DirEntry,\n\tfuse.Status) {\n\tlog.Printf(\"OpenDir name: %s\", name)\n\n\tparsed, err := fs.parseAndValidate(name)\n\tif err != nil {\n\t\treturn nil, fuse.EIO\n\t}\n\n\tif !parsed.IsValid {\n\t\treturn nil, fuse.ENOENT\n\t}\n\n\tswitch {\n\tcase parsed.IsRoot:\n\t\treturn fs.openRoot(context)\n\tcase parsed.Partition == -1:\n\t\treturn fs.openTopic(parsed.Topic, context)\n\tcase parsed.Offset == -1:\n\t\treturn fs.openPartition(parsed.Topic, parsed.Partition, context)\n\t}\n\n\treturn nil, fuse.ENOENT\n}\n\nfunc (fs *KafkaRoFs) openPartition(topic string, partition int32,\n\tcontext *fuse.Context) ([]fuse.DirEntry,\n\tfuse.Status) {\n\tearliest, next, err := fs.KafkaClient.GetBoundingOffsets(topic, partition)\n\tif err != nil {\n\t\treturn nil, fuse.EIO\n\t}\n\tif next == int64(0) {\n\t\t\/\/ this means an empty partition\n\t\treturn []fuse.DirEntry{}, fuse.OK\n\t}\n\n\tentries := []fuse.DirEntry{fuse.DirEntry{Name: strconv.FormatInt(earliest, 10),\n\t\tMode: fuse.S_IFREG}}\n\tif earliest != next - 1 {\n\t\tentries = append(entries, fuse.DirEntry{Name: strconv.FormatInt(next - 1, 10),\n\t\t\tMode: fuse.S_IFREG})\n\t}\n\n\tfor path, _ := range fs.userFiles {\n\t\tparsed, err := fs.parseAndValidate(path)\n\t\tif err != nil {\n\t\t\tlog.Panicf(\"Error was non-nil, bad user path %s\", err)\n\t\t}\n\t\tif parsed.Partition == partition && parsed.Offset != earliest &&\n\t\t\tparsed.Offset != next - 1 {\n\t\t\tentries = append(entries, fuse.DirEntry{\n\t\t\t\tName: strconv.FormatInt(parsed.Offset, 10), Mode: fuse.S_IFREG})\n\t\t}\n\t}\n\n\treturn entries, fuse.OK\n}\n\nfunc (fs *KafkaRoFs) openRoot(context *fuse.Context) ([]fuse.DirEntry, fuse.Status) {\n\t\/\/ root, show all the topics\n\tvar topicDirs []fuse.DirEntry\n\ttopics, err := fs.KafkaClient.GetTopics()\n\n\tif err != nil {\n\t\treturn nil, fuse.EIO\n\t}\n\n\tfor _, topic := range topics {\n\t\ttopicDirs = append(topicDirs, fuse.DirEntry{Name: topic, Mode: fuse.S_IFDIR})\n\t}\n\n\treturn topicDirs, fuse.OK\n}\n\nfunc (fs *KafkaRoFs) openTopic(name string, context *fuse.Context) ([]fuse.DirEntry,\n\tfuse.Status) {\n\tvar partitionDirs []fuse.DirEntry\n\tpartitions, err := fs.KafkaClient.GetPartitions(name)\n\n\tif err != nil {\n\t\treturn nil, fuse.EIO\n\t}\n\n\tfor _, partition := range partitions {\n\t\tpartitionDirs = append(partitionDirs,\n\t\t\tfuse.DirEntry{Name: strconv.FormatInt(int64(partition), 10),\n\t\t\t\tMode: fuse.S_IFDIR})\n\t}\n\n\treturn partitionDirs, fuse.OK\n}\n\nfunc (fs *KafkaRoFs) Open(name string, flags uint32,\n\tcontext *fuse.Context) (nodefs.File, fuse.Status) {\n\tparsed, err := fs.parseAndValidate(name)\n\tif err != nil {\n\t\treturn nil, fuse.EIO\n\t}\n\n\tif !parsed.IsValid || parsed.Offset == -1 {\n\t\treturn nil, fuse.ENOENT\n\t}\n\n\tif flags & fuse.O_ANYWRITE != 0 {\n\t\treturn nil, fuse.EPERM\n\t}\n\n\tmsgBytes, err := fs.KafkaClient.GetMessage(parsed.Topic, parsed.Partition,\n\t\tparsed.Offset)\n\tif err != nil {\n\t\treturn nil, fuse.EIO\n\t}\n\n\treturn nodefs.NewDataFile(msgBytes), fuse.OK\n}\n\nfunc (fs *KafkaRoFs) Mknod(name string, mode uint32, dev uint32,\n\tcontext *fuse.Context) fuse.Status {\n\n\tparsed, err := fs.parseAndValidate(name)\n\tif err != nil {\n\t\treturn fuse.EIO\n\t}\n\n\tif !parsed.IsValid || parsed.Offset == -1 {\n\t\treturn fuse.ENOENT\n\t}\n\n\t\/\/ we don't want to block waiting on an offset that is in the past\n\t\/\/ and no longer available from kafka\n\n\tnE := func() (bool, error) {\n\t\treturn fs.offsetNotExpired(parsed.Topic, parsed.Partition, parsed.Offset)\n\t}\n\n\tfor notExpired, err := nE(); notExpired; notExpired, err = nE() {\n\t\tif err != nil {\n\t\t\treturn fuse.EIO\n\t\t}\n\n\t\tisFuture, err := fs.offsetIsFuture(parsed.Topic, parsed.Partition,\n\t\t\tparsed.Offset)\n\t\tif err != nil {\n\t\t\treturn fuse.EIO\n\t\t}\n\n\t\tif isFuture {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\tmsgBytes, err := fs.KafkaClient.GetMessage(parsed.Topic, parsed.Partition,\n\t\t\tparsed.Offset)\n\t\tif err != nil {\n\t\t\treturn fuse.EIO\n\t\t}\n\n\t\tif msgBytes == nil {\n\t\t\t\/\/ this shouldn't happen unless the messages are expiring\n\t\t\t\/\/ very fast, but who knows\n\t\t\treturn fuse.ENOENT\n\t\t}\n\n\t\terr = fs.addUserFile(parsed.Topic, parsed.Partition, parsed.Offset, msgBytes)\n\t\tif err != nil {\n\t\t\treturn fuse.EIO\n\t\t}\n\n\t\treturn fuse.OK\n\t}\n\n\treturn fuse.ENOENT\n}\n\nfunc (fs *KafkaRoFs) addUserFile(topic string, partition int32, offset int64,\n\tmsgBytes []byte) error {\n\tpath := fmt.Sprintf(\"%s\/%d\/%d\", topic, partition, offset)\n\tfs.userFiles[path] = true\n\treturn nil\n}\n\nfunc (fs *KafkaRoFs) offsetNotExpired(topic string, partition int32,\n\toffset int64) (bool, error) {\n\tearliest, _, err := fs.KafkaClient.GetBoundingOffsets(topic, partition)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn offset >= earliest, nil\n}\n\nfunc (fs *KafkaRoFs) offsetIsFuture(topic string, partition int32,\n\toffset int64) (bool, error) {\n\t_, next, err := fs.KafkaClient.GetBoundingOffsets(topic, partition)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn offset >= next, nil\n}\n\n\/\/ just pretend we set the times to keep touch happy\nfunc (fs *KafkaRoFs) Utimens(name string, Atime *time.Time, Mtime *time.Time,\n\tcontext *fuse.Context) fuse.Status {\n\treturn fuse.OK\n}\n\nfunc NewKafkaRoFs(kClient KafkaClient) *KafkaRoFs {\n\treturn &KafkaRoFs{FileSystem: pathfs.NewDefaultFileSystem(), KafkaClient: kClient,\n\t\tuserFiles: make(map[string]bool)}\n}\n\nfunc (fs *KafkaRoFs) Unlink(name string, context *fuse.Context) fuse.Status {\n\t_, ok := fs.userFiles[name]\n\tif !ok {\n\t\treturn fuse.EPERM\n\t}\n\tdelete(fs.userFiles, name)\n\treturn fuse.OK\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype token string\n\nfunc tokenize(ior io.Reader, c chan<- token) {\n\t\/\/ Tokenizer states\n\tconst (\n\t\tREADY = iota\n\t\tREADING\n\t\tSTRLIT\n\t)\n\n\t\/\/ Single-rune tokens\n\tconst TOKS = \"()\"\n\tconst WS = \" \\t\\n\"\n\tconst SPLIT = TOKS + WS\n\n\tr := bufio.NewReader(ior)\n\n\tstate := READY\n\tvar tmp bytes.Buffer\n\n\tch, _, err := r.ReadRune()\n\tfor err == nil {\n\t\tswitch state {\n\t\tcase READY:\n\t\t\t\/\/ c either begins or is a token\n\t\t\tif strings.ContainsRune(TOKS, ch) {\n\t\t\t\tc <- token(ch)\n\t\t\t} else if strings.ContainsRune(WS, ch) {\n\t\t\t\t\/\/ whitespace; ignore it\n\t\t\t} else if ch == '\"' {\n\t\t\t\ttmp.WriteRune(ch)\n\t\t\t\tstate = STRLIT\n\t\t\t} else {\n\t\t\t\ttmp.WriteRune(ch)\n\t\t\t\tstate = READING\n\t\t\t}\n\t\tcase READING:\n\t\t\tif strings.ContainsRune(SPLIT, ch) {\n\t\t\t\t\/\/ the current token is done\n\t\t\t\tc <- token(tmp.String())\n\t\t\t\ttmp.Reset()\n\t\t\t\tstate = READY\n\t\t\t\tr.UnreadRune()\n\t\t\t} else {\n\t\t\t\ttmp.WriteRune(ch)\n\t\t\t}\n\t\tcase STRLIT:\n\t\t\ttmp.WriteRune(ch)\n\t\t\tif ch == '\"' {\n\t\t\t\tc <- token(tmp.String())\n\t\t\t\ttmp.Reset()\n\t\t\t\tstate = READY\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(\"Invalid state\")\n\t\t}\n\t\tch, _, err = r.ReadRune()\n\t}\n\n\tclose(c)\n\tif err != nil && err != io.EOF {\n\t\tpanic(err)\n\t}\n}\n\ntype sexpr interface{}\ntype atom interface{}\ntype sym string\n\ntype cons struct {\n\tcar sexpr\n\tcdr sexpr\n}\n\nvar Nil = interface{}(nil)\n\n\/\/ hard tokens\nconst (\n\t_LPAREN = \"(\"\n\t_RPAREN = \")\"\n)\n\nfunc parse(tc <-chan token, sc chan<- sexpr) {\n\tfor tok := range tc {\n\t\tsc <- parseNext(tok, tc)\n\t}\n\tclose(sc)\n}\n\nfunc parseNext(tok token, tc <-chan token) sexpr {\n\tswitch tok {\n\tcase _LPAREN:\n\t\treturn parseCons(tc)\n\tcase _RPAREN:\n\t\tpanic(\"Unmatched ')'\")\n\t}\n\treturn parseAtom(tok)\n}\n\nfunc parseCons(tc <-chan token) sexpr {\n\t\/\/ note that we assume the LPAREN has already been read\n\ttok := <-tc\n\tif tok == _RPAREN {\n\t\t\/\/ nil atom\n\t\treturn Nil\n\t}\n\tcar := parseNext(tok, tc)\n\tcdr := parseCons(tc)\n\treturn cons{car, cdr}\n}\n\nfunc parseAtom(tok token) (e sexpr) {\n\te = sym(tok)\n\n\t\/\/ try as string literal\n\tif tok[0] == '\"' {\n\t\te = string(tok[1 : len(tok)-1])\n\t}\n\n\t\/\/ try as number\n\tn, err := strconv.ParseFloat(string(tok), 64)\n\tif err == nil {\n\t\te = n\n\t}\n\treturn\n}\n<commit_msg>Comments<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype token string\n\nfunc tokenize(ior io.Reader, c chan<- token) {\n\t\/\/ Tokenizer states\n\tconst (\n\t\tREADY = iota\n\t\tREADING\n\t\tSTRLIT\n\t\tCOMMENT\n\t)\n\n\t\/\/ Single-rune tokens\n\tconst TOKS = \"()\"\n\tconst WS = \" \\t\\n\"\n\tconst SPLIT = TOKS + WS + \";\"\n\n\tr := bufio.NewReader(ior)\n\n\tstate := READY\n\tvar tmp bytes.Buffer\n\n\tch, _, err := r.ReadRune()\n\tfor err == nil {\n\t\tswitch state {\n\t\tcase READY:\n\t\t\t\/\/ c either begins or is a token\n\t\t\tif strings.ContainsRune(TOKS, ch) {\n\t\t\t\tc <- token(ch)\n\t\t\t} else if strings.ContainsRune(WS, ch) {\n\t\t\t\t\/\/ whitespace; ignore it\n\t\t\t} else if ch == ';' {\n\t\t\t\t\/\/ read to EOL\n\t\t\t\tstate = COMMENT\n\t\t\t} else if ch == '\"' {\n\t\t\t\ttmp.WriteRune(ch)\n\t\t\t\tstate = STRLIT\n\t\t\t} else {\n\t\t\t\ttmp.WriteRune(ch)\n\t\t\t\tstate = READING\n\t\t\t}\n\t\tcase READING:\n\t\t\tif strings.ContainsRune(SPLIT, ch) {\n\t\t\t\t\/\/ the current token is done\n\t\t\t\tc <- token(tmp.String())\n\t\t\t\ttmp.Reset()\n\t\t\t\tstate = READY\n\t\t\t\tr.UnreadRune()\n\t\t\t} else {\n\t\t\t\ttmp.WriteRune(ch)\n\t\t\t}\n\t\tcase STRLIT:\n\t\t\ttmp.WriteRune(ch)\n\t\t\tif ch == '\"' {\n\t\t\t\tc <- token(tmp.String())\n\t\t\t\ttmp.Reset()\n\t\t\t\tstate = READY\n\t\t\t}\n\t\tcase COMMENT:\n\t\t\tif ch == '\\n' {\n\t\t\t\tstate = READY\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(\"Invalid state\")\n\t\t}\n\t\tch, _, err = r.ReadRune()\n\t}\n\n\tclose(c)\n\tif err != nil && err != io.EOF {\n\t\tpanic(err)\n\t}\n}\n\ntype sexpr interface{}\ntype atom interface{}\ntype sym string\n\ntype cons struct {\n\tcar sexpr\n\tcdr sexpr\n}\n\nvar Nil = interface{}(nil)\n\n\/\/ hard tokens\nconst (\n\t_LPAREN = \"(\"\n\t_RPAREN = \")\"\n)\n\nfunc parse(tc <-chan token, sc chan<- sexpr) {\n\tfor tok := range tc {\n\t\tsc <- parseNext(tok, tc)\n\t}\n\tclose(sc)\n}\n\nfunc parseNext(tok token, tc <-chan token) sexpr {\n\tswitch tok {\n\tcase _LPAREN:\n\t\treturn parseCons(tc)\n\tcase _RPAREN:\n\t\tpanic(\"Unmatched ')'\")\n\t}\n\treturn parseAtom(tok)\n}\n\nfunc parseCons(tc <-chan token) sexpr {\n\t\/\/ note that we assume the LPAREN has already been read\n\ttok := <-tc\n\tif tok == _RPAREN {\n\t\t\/\/ nil atom\n\t\treturn Nil\n\t}\n\tcar := parseNext(tok, tc)\n\tcdr := parseCons(tc)\n\treturn cons{car, cdr}\n}\n\nfunc parseAtom(tok token) (e sexpr) {\n\te = sym(tok)\n\n\t\/\/ try as string literal\n\tif tok[0] == '\"' {\n\t\te = string(tok[1 : len(tok)-1])\n\t}\n\n\t\/\/ try as number\n\tn, err := strconv.ParseFloat(string(tok), 64)\n\tif err == nil {\n\t\te = n\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package carton\n\nimport (\n\t\"bytes\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/megamsys\/libgo\/cmd\"\n\t\"github.com\/megamsys\/vertice\/provision\"\n\t\"io\"\n\t\"time\"\n)\n\ntype DestroyOpts struct {\n\tB *provision.Box\n}\n\n\/\/ ChangeState runs a state increment of a machine or a container.\nfunc Destroy(opts *DestroyOpts) error {\n\tvar outBuffer bytes.Buffer\n\tstart := time.Now()\n\tlogWriter := LogWriter{Box: opts.B}\n\tlogWriter.Async()\n\tdefer logWriter.Close()\n\twriter := io.MultiWriter(&outBuffer, &logWriter)\n\terr := ProvisionerMap[opts.B.Provider].Destroy(opts.B, writer)\n\telapsed := time.Since(start)\n\t\/\/ saveErr := saveDestroyedData(opts, outBuffer.String(), elapsed, err)\n\t\/\/ if saveErr != nil {\n\t\/\/ \tlog.Errorf(\"WARNING: couldn't save destroyed data, destroy opts: %#v\", opts)\n\t\/\/ }\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc saveDestroyedData(opts *DestroyOpts, slog string, duration time.Duration, destroyError error) error {\n\tlog.Debugf(\"%s in (%s)\\n%s\",\n\t\tcmd.Colorfy(opts.B.GetFullName(), \"cyan\", \"\", \"bold\"),\n\t\tcmd.Colorfy(duration.String(), \"green\", \"\", \"bold\"),\n\t\tcmd.Colorfy(slog, \"yellow\", \"\", \"\"))\n\tif destroyError == nil {\n\t\tmarkDeploysAsRemoved(opts)\n\t}\n\treturn nil\n}\n\nfunc markDeploysAsRemoved(opts *DestroyOpts) {\n\tremovedAssemblys := make([]string, 1)\n\n\tif _, err := NewAssembly(opts.B.CartonId,opts.B.AccountId,opts.B.OrgId); err == nil {\n\t\tremovedAssemblys[0] = opts.B.CartonId\n\t}\n\n\tif opts.B.Level == provision.BoxSome {\n\t\tif comp, err := NewComponent(opts.B.Id,opts.B.AccountId,opts.B.OrgId); err == nil {\n\t\t\tcomp.Delete(opts.B.AccountId,opts.B.OrgId)\n\t\t}\n\t}\n\n\tif asms, err := Get(opts.B.CartonsId, opts.B.AccountId); err == nil {\n\t\tasms.Delete(opts.B.CartonsId, opts.B.AccountId, removedAssemblys)\n\t}\n\n}\n<commit_msg>print duration after delete<commit_after>package carton\n\nimport (\n\t\"bytes\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/megamsys\/libgo\/cmd\"\n\t\"github.com\/megamsys\/vertice\/provision\"\n\t\"io\"\n\t\"time\"\n)\n\ntype DestroyOpts struct {\n\tB *provision.Box\n}\n\n\/\/ ChangeState runs a state increment of a machine or a container.\nfunc Destroy(opts *DestroyOpts) error {\n\tvar outBuffer bytes.Buffer\n\tstart := time.Now()\n\tlogWriter := LogWriter{Box: opts.B}\n\tlogWriter.Async()\n\tdefer logWriter.Close()\n\twriter := io.MultiWriter(&outBuffer, &logWriter)\n\terr := ProvisionerMap[opts.B.Provider].Destroy(opts.B, writer)\n\telapsed := time.Since(start)\n\tlog.Debugf(\"%s in (%s)\\n%s\",\n\t\tcmd.Colorfy(opts.B.GetFullName(), \"cyan\", \"\", \"bold\"),\n\t\tcmd.Colorfy(elapsed.String(), \"green\", \"\", \"bold\"),\n\t\tcmd.Colorfy(outBuffer.String(), \"yellow\", \"\", \"\"))\n\t\/\/ saveErr := saveDestroyedData(opts, outBuffer.String(), elapsed, err)\n\t\/\/ if saveErr != nil {\n\t\/\/ \tlog.Errorf(\"WARNING: couldn't save destroyed data, destroy opts: %#v\", opts)\n\t\/\/ }\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc saveDestroyedData(opts *DestroyOpts, slog string, duration time.Duration, destroyError error) error {\n\tlog.Debugf(\"%s in (%s)\\n%s\",\n\t\tcmd.Colorfy(opts.B.GetFullName(), \"cyan\", \"\", \"bold\"),\n\t\tcmd.Colorfy(duration.String(), \"green\", \"\", \"bold\"),\n\t\tcmd.Colorfy(slog, \"yellow\", \"\", \"\"))\n\tif destroyError == nil {\n\t\tmarkDeploysAsRemoved(opts)\n\t}\n\treturn nil\n}\n\nfunc markDeploysAsRemoved(opts *DestroyOpts) {\n\tremovedAssemblys := make([]string, 1)\n\n\tif _, err := NewAssembly(opts.B.CartonId,opts.B.AccountId,opts.B.OrgId); err == nil {\n\t\tremovedAssemblys[0] = opts.B.CartonId\n\t}\n\n\tif opts.B.Level == provision.BoxSome {\n\t\tif comp, err := NewComponent(opts.B.Id,opts.B.AccountId,opts.B.OrgId); err == nil {\n\t\t\tcomp.Delete(opts.B.AccountId,opts.B.OrgId)\n\t\t}\n\t}\n\n\tif asms, err := Get(opts.B.CartonsId, opts.B.AccountId); err == nil {\n\t\tasms.Delete(opts.B.CartonsId, opts.B.AccountId, removedAssemblys)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package walnut\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\nconst whitespace = \" \\t\\n\\v\\f\\r\\u0085\\u00A0\"\n\n\/\/ Outlines a \"key = value\" assignment.\ntype assignment struct {\n\tkey, value string\n\tline int\n}\n\n\/\/ Provides information about an indentation syntax error.\ntype indentError struct {\n\terror string\n\tline int\n}\n\n\/\/ Returns a description of the error.\nfunc (e indentError) Error() string {\n\treturn e.error\n}\n\n\/\/ Generates a map of resolved keys and raw string values from a byte slice.\n\/\/ Returns an error if the configuration source is not properly indented.\nfunc parse(buf []byte) ([]assignment, *indentError) {\n\tlines := strings.Split(string(buf), \"\\n\")\n\traw := make([]assignment, 0)\n\n\t\/\/ collapse lines without any content\n\tfor i, line := range lines {\n\t\tlines[i] = collapse(line)\n\t}\n\n\tparents := make([]string, 0)\n\tindents := make([]string, 0)\n\n\tfor n, line := range lines {\n\t\tif line == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tk := key(line)\n\t\ti := indentation(line)\n\t\td := depth(indents, i)\n\n\t\t\/\/ check for invalid indentation\n\t\tif d == -1 {\n\t\t\te := fmt.Sprintf(\"invalid indentation on line %d\", n)\n\t\t\treturn nil, &indentError{e, n}\n\t\t}\n\n\t\t\/\/ trim now redundant levels\n\t\tif d < len(indents) {\n\t\t\tparents = parents[:d]\n\t\t\tindents = indents[:d]\n\t\t}\n\n\t\t\/\/ if the line contains an assignment, record the the value\n\t\tif strings.ContainsRune(line, '=') {\n\t\t\traw = append(raw, assignment{\n\t\t\t\tkey: strings.Join(append(parents, k), \".\"),\n\t\t\t\tvalue: value(line),\n\t\t\t\tline: n,\n\t\t\t})\n\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ push the key and indentation onto their respective stacks\n\t\tparents = append(parents, k)\n\t\tindents = append(indents, i)\n\t}\n\n\treturn raw, nil\n}\n\n\/\/ Trims trailing whitespace or, in the case of comment lines, returns\n\/\/ an empty string.\nfunc collapse(input string) string {\n\ts := strings.TrimRight(input, whitespace)\n\n\tfor _, r := range s {\n\t\tif strings.ContainsRune(whitespace, r) {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ comment detected, blank this line\n\t\tif r == '#' {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ if the first non-whitespace character @todo\n\t\treturn input\n\t}\n\n\treturn \"\"\n}\n\n\/\/ Returns the \"key\" component from a \"key = value\" string.\nfunc key(input string) string {\n\tif eq := strings.IndexRune(input, '='); eq != -1 {\n\t\tinput = input[:eq]\n\t}\n\n\treturn strings.Trim(input, whitespace)\n}\n\n\/\/ Returns the \"value\" component from a \"key = value\" string.\nfunc value(input string) string {\n\tif eq := strings.IndexRune(input, '='); eq != -1 {\n\t\tinput = input[eq+1:]\n\t}\n\n\treturn strings.Trim(input, whitespace)\n}\n\n\/\/ Returns the string's whitespace prefix.\nfunc indentation(input string) string {\n\tend := strings.IndexFunc(input, func(r rune) bool {\n\t\treturn strings.IndexRune(whitespace, r) == -1\n\t})\n\n\tif end == -1 {\n\t\treturn \"\"\n\t}\n\n\treturn input[:end]\n}\n\n\/\/ Given a list of previous indentation levels, finds the provided indentation\n\/\/ level's depth value. A depth of 0 represents the lowest possible level of\n\/\/ indentation.\n\/\/ Returns -1 on errors caused by illegal indentation.\nfunc depth(parents []string, current string) int {\n\tif current == \"\" {\n\t\treturn 0\n\t}\n\n\t\/\/ the base indentation level must be an empty string\n\tif len(parents) == 0 {\n\t\treturn -1\n\t}\n\n\tfor i, prefix := range parents {\n\t\tswitch {\n\t\tcase current == prefix:\n\t\t\treturn i\n\t\tcase !strings.HasPrefix(current, prefix):\n\t\t\treturn -1\n\t\t}\n\t}\n\n\t\/\/ if we get this far, the current line is further indented\n\t\/\/ than its parent\n\treturn len(parents)\n}\n<commit_msg>Rename the 'assignment' struct to 'definition'<commit_after>package walnut\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\nconst whitespace = \" \\t\\n\\v\\f\\r\\u0085\\u00A0\"\n\n\/\/ Outlines a \"key = value\" assignment.\ntype definition struct {\n\tkey, value string\n\tline int\n}\n\n\/\/ Provides information about an indentation syntax error.\ntype indentError struct {\n\terror string\n\tline int\n}\n\n\/\/ Returns a description of the error.\nfunc (e indentError) Error() string {\n\treturn e.error\n}\n\n\/\/ Generates a map of resolved keys and raw string values from a byte slice.\n\/\/ Returns an error if the configuration source is not properly indented.\nfunc parse(buf []byte) ([]definition, *indentError) {\n\tlines := strings.Split(string(buf), \"\\n\")\n\traw := make([]definition, 0)\n\n\t\/\/ collapse lines without any content\n\tfor i, line := range lines {\n\t\tlines[i] = collapse(line)\n\t}\n\n\tparents := make([]string, 0)\n\tindents := make([]string, 0)\n\n\tfor n, line := range lines {\n\t\tif line == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tk := key(line)\n\t\ti := indentation(line)\n\t\td := depth(indents, i)\n\n\t\t\/\/ check for invalid indentation\n\t\tif d == -1 {\n\t\t\te := fmt.Sprintf(\"invalid indentation on line %d\", n)\n\t\t\treturn nil, &indentError{e, n}\n\t\t}\n\n\t\t\/\/ trim now redundant levels\n\t\tif d < len(indents) {\n\t\t\tparents = parents[:d]\n\t\t\tindents = indents[:d]\n\t\t}\n\n\t\t\/\/ if the line contains an assignment, record the the value\n\t\tif strings.ContainsRune(line, '=') {\n\t\t\traw = append(raw, definition{\n\t\t\t\tkey: strings.Join(append(parents, k), \".\"),\n\t\t\t\tvalue: value(line),\n\t\t\t\tline: n,\n\t\t\t})\n\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ push the key and indentation onto their respective stacks\n\t\tparents = append(parents, k)\n\t\tindents = append(indents, i)\n\t}\n\n\treturn raw, nil\n}\n\n\/\/ Trims trailing whitespace or, in the case of comment lines, returns\n\/\/ an empty string.\nfunc collapse(input string) string {\n\ts := strings.TrimRight(input, whitespace)\n\n\tfor _, r := range s {\n\t\tif strings.ContainsRune(whitespace, r) {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ comment detected, blank this line\n\t\tif r == '#' {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ if the first non-whitespace character @todo\n\t\treturn input\n\t}\n\n\treturn \"\"\n}\n\n\/\/ Returns the \"key\" component from a \"key = value\" string.\nfunc key(input string) string {\n\tif eq := strings.IndexRune(input, '='); eq != -1 {\n\t\tinput = input[:eq]\n\t}\n\n\treturn strings.Trim(input, whitespace)\n}\n\n\/\/ Returns the \"value\" component from a \"key = value\" string.\nfunc value(input string) string {\n\tif eq := strings.IndexRune(input, '='); eq != -1 {\n\t\tinput = input[eq+1:]\n\t}\n\n\treturn strings.Trim(input, whitespace)\n}\n\n\/\/ Returns the string's whitespace prefix.\nfunc indentation(input string) string {\n\tend := strings.IndexFunc(input, func(r rune) bool {\n\t\treturn strings.IndexRune(whitespace, r) == -1\n\t})\n\n\tif end == -1 {\n\t\treturn \"\"\n\t}\n\n\treturn input[:end]\n}\n\n\/\/ Given a list of previous indentation levels, finds the provided indentation\n\/\/ level's depth value. A depth of 0 represents the lowest possible level of\n\/\/ indentation.\n\/\/ Returns -1 on errors caused by illegal indentation.\nfunc depth(parents []string, current string) int {\n\tif current == \"\" {\n\t\treturn 0\n\t}\n\n\t\/\/ the base indentation level must be an empty string\n\tif len(parents) == 0 {\n\t\treturn -1\n\t}\n\n\tfor i, prefix := range parents {\n\t\tswitch {\n\t\tcase current == prefix:\n\t\t\treturn i\n\t\tcase !strings.HasPrefix(current, prefix):\n\t\t\treturn -1\n\t\t}\n\t}\n\n\t\/\/ if we get this far, the current line is further indented\n\t\/\/ than its parent\n\treturn len(parents)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package parse provides a client for the Parse API.\npackage parse\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\n\t\"github.com\/facebookgo\/httperr\"\n)\n\nconst (\n\tuserAgentHeader = \"User-Agent\"\n\tuserAgent = \"go-parse-2015-01-31\"\n\tmasterKeyHeader = \"X-Parse-Master-Key\"\n\trestAPIKeyHeader = \"X-Parse-REST-API-Key\"\n\tsessionTokenHeader = \"X-Parse-Session-Token\"\n\tapplicationIDHeader = \"X-Parse-Application-Id\"\n)\n\nvar (\n\terrEmptyMasterKey = errors.New(\"parse: cannot use empty MasterKey Credentials\")\n\terrEmptyRestAPIKey = errors.New(\"parse: cannot use empty RestAPIKey Credentials\")\n\terrEmptySessionToken = errors.New(\"parse: cannot use empty SessionToken Credentials\")\n\n\t\/\/ The default base URL for the API.\n\tdefaultBaseURL = &url.URL{\n\t\tScheme: \"https\",\n\t\tHost: \"api.parse.com\",\n\t\tPath: \"\/1\/\",\n\t}\n)\n\n\/\/ Credentials allows for adding authentication information to a request.\ntype Credentials interface {\n\tModify(r *http.Request) error\n}\n\n\/\/ MasterKey adds the Master Key to the request.\ntype MasterKey string\n\n\/\/ Modify adds the Master Key header.\nfunc (m MasterKey) Modify(r *http.Request) error {\n\tif m == \"\" {\n\t\treturn errEmptyMasterKey\n\t}\n\tr.Header.Set(masterKeyHeader, string(m))\n\treturn nil\n}\n\n\/\/ RestAPIKey adds the Rest API Key to the request.\ntype RestAPIKey string\n\n\/\/ Modify adds the Rest API Key header.\nfunc (k RestAPIKey) Modify(r *http.Request) error {\n\tif k == \"\" {\n\t\treturn errEmptyRestAPIKey\n\t}\n\tr.Header.Set(restAPIKeyHeader, string(k))\n\treturn nil\n}\n\n\/\/ SessionToken adds the Rest API Key and the Session Token to the request.\ntype SessionToken struct {\n\tRestAPIKey string\n\tSessionToken string\n}\n\n\/\/ Modify adds the Session Token header.\nfunc (t SessionToken) Modify(r *http.Request) error {\n\tif t.RestAPIKey == \"\" {\n\t\treturn errEmptyRestAPIKey\n\t}\n\tif t.SessionToken == \"\" {\n\t\treturn errEmptySessionToken\n\t}\n\tr.Header.Set(restAPIKeyHeader, string(t.RestAPIKey))\n\tr.Header.Set(sessionTokenHeader, string(t.SessionToken))\n\treturn nil\n}\n\n\/\/ An Error from the Parse API.\ntype Error struct {\n\t\/\/ These are provided by the Parse API and may not always be available.\n\tMessage string `json:\"error\"`\n\tCode int `json:\"code\"`\n\n\t\/\/ This is the HTTP StatusCode.\n\tStatusCode int `json:\"-\"`\n\n\trequest *http.Request\n\tresponse *http.Response\n}\n\nfunc (e *Error) Error() string {\n\tvar buf bytes.Buffer\n\tif e.Code != 0 {\n\t\tfmt.Fprintf(&buf, \"code %d\", e.Code)\n\t}\n\tif e.Code != 0 && e.Message != \"\" {\n\t\tfmt.Fprint(&buf, \" and \")\n\t}\n\tif e.Message != \"\" {\n\t\tfmt.Fprintf(&buf, \"message %s\", e.Message)\n\t}\n\treturn httperr.NewError(\n\t\terrors.New(buf.String()),\n\t\thttperr.RedactNoOp(),\n\t\te.request,\n\t\te.response,\n\t).Error()\n}\n\n\/\/ Client provides access to the Parse API.\ntype Client struct {\n\t\/\/ The underlying http.RoundTripper to perform the individual requests. When\n\t\/\/ nil http.DefaultTransport will be used.\n\tTransport http.RoundTripper\n\n\t\/\/ The base URL to parse relative URLs off. If you pass absolute URLs to\n\t\/\/ Client functions they are used as-is. When nil, the production Parse URL\n\t\/\/ will be used.\n\tBaseURL *url.URL\n\n\t\/\/ Application ID will be included if not empty.\n\tApplicationID string\n\n\t\/\/ Credentials if set, will be included on every request.\n\tCredentials Credentials\n}\n\nfunc (c *Client) transport() http.RoundTripper {\n\tif c.Transport == nil {\n\t\treturn http.DefaultTransport\n\t}\n\treturn c.Transport\n}\n\n\/\/ Get performs a GET method call on the given url and unmarshal response into\n\/\/ result.\nfunc (c *Client) Get(u *url.URL, result interface{}) (*http.Response, error) {\n\treturn c.Do(&http.Request{Method: \"GET\", URL: u}, nil, result)\n}\n\n\/\/ Post performs a POST method call on the given url with the given body and\n\/\/ unmarshal response into result.\nfunc (c *Client) Post(u *url.URL, body, result interface{}) (*http.Response, error) {\n\treturn c.Do(&http.Request{Method: \"POST\", URL: u}, body, result)\n}\n\n\/\/ Put performs a PUT method call on the given url with the given body and\n\/\/ unmarshal response into result.\nfunc (c *Client) Put(u *url.URL, body, result interface{}) (*http.Response, error) {\n\treturn c.Do(&http.Request{Method: \"PUT\", URL: u}, body, result)\n}\n\n\/\/ Delete performs a DELETE method call on the given url and unmarshal response\n\/\/ into result.\nfunc (c *Client) Delete(u *url.URL, result interface{}) (*http.Response, error) {\n\treturn c.Do(&http.Request{Method: \"DELETE\", URL: u}, nil, result)\n}\n\n\/\/ Do performs a Parse API call. This method modifies the request and adds the\n\/\/ Authentication headers. The body is JSON encoded and for responses in the\n\/\/ 2xx or 3xx range the response will be JSON decoded into result, for others\n\/\/ an error of type Error will be returned.\nfunc (c *Client) Do(req *http.Request, body, result interface{}) (*http.Response, error) {\n\treq.Proto = \"HTTP\/1.1\"\n\treq.ProtoMajor = 1\n\treq.ProtoMinor = 1\n\n\tif req.URL == nil {\n\t\tif c.BaseURL == nil {\n\t\t\treq.URL = defaultBaseURL\n\t\t} else {\n\t\t\treq.URL = c.BaseURL\n\t\t}\n\t} else {\n\t\tif !req.URL.IsAbs() {\n\t\t\tif c.BaseURL == nil {\n\t\t\t\treq.URL = defaultBaseURL.ResolveReference(req.URL)\n\t\t\t} else {\n\t\t\t\treq.URL = c.BaseURL.ResolveReference(req.URL)\n\t\t\t}\n\t\t}\n\t}\n\n\tif req.Host == \"\" {\n\t\treq.Host = req.URL.Host\n\t}\n\n\tif req.Header == nil {\n\t\treq.Header = make(http.Header)\n\t}\n\n\treq.Header.Add(userAgentHeader, userAgent)\n\tif c.ApplicationID != \"\" {\n\t\treq.Header.Add(applicationIDHeader, c.ApplicationID)\n\t}\n\n\tif c.Credentials != nil {\n\t\tif err := c.Credentials.Modify(req); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ we need to buffer as Parse requires a Content-Length\n\tif body != nil {\n\t\tbd, err := json.Marshal(body)\n\t\tif err != nil {\n\t\t\treturn nil, httperr.NewError(err, httperr.RedactNoOp(), req, nil)\n\t\t}\n\t\treq.Body = ioutil.NopCloser(bytes.NewReader(bd))\n\t\treq.ContentLength = int64(len(bd))\n\t}\n\n\tres, err := c.transport().RoundTrip(req)\n\tif err != nil {\n\t\treturn res, httperr.NewError(err, httperr.RedactNoOp(), req, res)\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode > 399 || res.StatusCode < 200 {\n\t\tbody, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn res, httperr.NewError(err, httperr.RedactNoOp(), req, res)\n\t\t}\n\n\t\tapiErr := &Error{\n\t\t\tStatusCode: res.StatusCode,\n\t\t\trequest: req,\n\t\t\tresponse: res,\n\t\t}\n\t\tif len(body) > 0 {\n\t\t\terr = json.Unmarshal(body, apiErr)\n\t\t\tif err != nil {\n\t\t\t\treturn res, httperr.NewError(err, httperr.RedactNoOp(), req, res)\n\t\t\t}\n\t\t}\n\t\treturn res, apiErr\n\t}\n\n\tif result != nil {\n\t\tif err := json.NewDecoder(res.Body).Decode(result); err != nil {\n\t\t\treturn res, httperr.NewError(err, httperr.RedactNoOp(), req, res)\n\t\t}\n\t}\n\treturn res, nil\n}\n\n\/\/ WithCredentials returns a new instance of the Client using the given\n\/\/ Credentials. It discards the previous Credentials.\nfunc (c *Client) WithCredentials(cr Credentials) *Client {\n\tvar c2 Client\n\tc2 = *c\n\tc2.Credentials = cr\n\treturn &c2\n}\n<commit_msg>split RoundTrip method to allow for more complex scenarios<commit_after>\/\/ Package parse provides a client for the Parse API.\npackage parse\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\n\t\"github.com\/facebookgo\/httperr\"\n)\n\nconst (\n\tuserAgentHeader = \"User-Agent\"\n\tuserAgent = \"go-parse-2015-01-31\"\n\tmasterKeyHeader = \"X-Parse-Master-Key\"\n\trestAPIKeyHeader = \"X-Parse-REST-API-Key\"\n\tsessionTokenHeader = \"X-Parse-Session-Token\"\n\tapplicationIDHeader = \"X-Parse-Application-Id\"\n)\n\nvar (\n\terrEmptyMasterKey = errors.New(\"parse: cannot use empty MasterKey Credentials\")\n\terrEmptyRestAPIKey = errors.New(\"parse: cannot use empty RestAPIKey Credentials\")\n\terrEmptySessionToken = errors.New(\"parse: cannot use empty SessionToken Credentials\")\n\n\t\/\/ The default base URL for the API.\n\tdefaultBaseURL = &url.URL{\n\t\tScheme: \"https\",\n\t\tHost: \"api.parse.com\",\n\t\tPath: \"\/1\/\",\n\t}\n)\n\n\/\/ Credentials allows for adding authentication information to a request.\ntype Credentials interface {\n\tModify(r *http.Request) error\n}\n\n\/\/ MasterKey adds the Master Key to the request.\ntype MasterKey string\n\n\/\/ Modify adds the Master Key header.\nfunc (m MasterKey) Modify(r *http.Request) error {\n\tif m == \"\" {\n\t\treturn errEmptyMasterKey\n\t}\n\tr.Header.Set(masterKeyHeader, string(m))\n\treturn nil\n}\n\n\/\/ RestAPIKey adds the Rest API Key to the request.\ntype RestAPIKey string\n\n\/\/ Modify adds the Rest API Key header.\nfunc (k RestAPIKey) Modify(r *http.Request) error {\n\tif k == \"\" {\n\t\treturn errEmptyRestAPIKey\n\t}\n\tr.Header.Set(restAPIKeyHeader, string(k))\n\treturn nil\n}\n\n\/\/ SessionToken adds the Rest API Key and the Session Token to the request.\ntype SessionToken struct {\n\tRestAPIKey string\n\tSessionToken string\n}\n\n\/\/ Modify adds the Session Token header.\nfunc (t SessionToken) Modify(r *http.Request) error {\n\tif t.RestAPIKey == \"\" {\n\t\treturn errEmptyRestAPIKey\n\t}\n\tif t.SessionToken == \"\" {\n\t\treturn errEmptySessionToken\n\t}\n\tr.Header.Set(restAPIKeyHeader, string(t.RestAPIKey))\n\tr.Header.Set(sessionTokenHeader, string(t.SessionToken))\n\treturn nil\n}\n\n\/\/ An Error from the Parse API.\ntype Error struct {\n\t\/\/ These are provided by the Parse API and may not always be available.\n\tMessage string `json:\"error\"`\n\tCode int `json:\"code\"`\n\n\t\/\/ This is the HTTP StatusCode.\n\tStatusCode int `json:\"-\"`\n\n\trequest *http.Request\n\tresponse *http.Response\n}\n\nfunc (e *Error) Error() string {\n\tvar buf bytes.Buffer\n\tif e.Code != 0 {\n\t\tfmt.Fprintf(&buf, \"code %d\", e.Code)\n\t}\n\tif e.Code != 0 && e.Message != \"\" {\n\t\tfmt.Fprint(&buf, \" and \")\n\t}\n\tif e.Message != \"\" {\n\t\tfmt.Fprintf(&buf, \"message %s\", e.Message)\n\t}\n\treturn httperr.NewError(\n\t\terrors.New(buf.String()),\n\t\thttperr.RedactNoOp(),\n\t\te.request,\n\t\te.response,\n\t).Error()\n}\n\n\/\/ Client provides access to the Parse API.\ntype Client struct {\n\t\/\/ The underlying http.RoundTripper to perform the individual requests. When\n\t\/\/ nil http.DefaultTransport will be used.\n\tTransport http.RoundTripper\n\n\t\/\/ The base URL to parse relative URLs off. If you pass absolute URLs to\n\t\/\/ Client functions they are used as-is. When nil, the production Parse URL\n\t\/\/ will be used.\n\tBaseURL *url.URL\n\n\t\/\/ Application ID will be included if not empty.\n\tApplicationID string\n\n\t\/\/ Credentials if set, will be included on every request.\n\tCredentials Credentials\n}\n\nfunc (c *Client) transport() http.RoundTripper {\n\tif c.Transport == nil {\n\t\treturn http.DefaultTransport\n\t}\n\treturn c.Transport\n}\n\n\/\/ Get performs a GET method call on the given url and unmarshal response into\n\/\/ result.\nfunc (c *Client) Get(u *url.URL, result interface{}) (*http.Response, error) {\n\treturn c.Do(&http.Request{Method: \"GET\", URL: u}, nil, result)\n}\n\n\/\/ Post performs a POST method call on the given url with the given body and\n\/\/ unmarshal response into result.\nfunc (c *Client) Post(u *url.URL, body, result interface{}) (*http.Response, error) {\n\treturn c.Do(&http.Request{Method: \"POST\", URL: u}, body, result)\n}\n\n\/\/ Put performs a PUT method call on the given url with the given body and\n\/\/ unmarshal response into result.\nfunc (c *Client) Put(u *url.URL, body, result interface{}) (*http.Response, error) {\n\treturn c.Do(&http.Request{Method: \"PUT\", URL: u}, body, result)\n}\n\n\/\/ Delete performs a DELETE method call on the given url and unmarshal response\n\/\/ into result.\nfunc (c *Client) Delete(u *url.URL, result interface{}) (*http.Response, error) {\n\treturn c.Do(&http.Request{Method: \"DELETE\", URL: u}, nil, result)\n}\n\n\/\/ RoundTrip performs a RoundTrip ignoring the request and response bodies. It\n\/\/ is up to the caller to close them. This method modifies the request.\nfunc (c *Client) RoundTrip(req *http.Request) (*http.Response, error) {\n\treq.Proto = \"HTTP\/1.1\"\n\treq.ProtoMajor = 1\n\treq.ProtoMinor = 1\n\n\tif req.URL == nil {\n\t\tif c.BaseURL == nil {\n\t\t\treq.URL = defaultBaseURL\n\t\t} else {\n\t\t\treq.URL = c.BaseURL\n\t\t}\n\t} else {\n\t\tif !req.URL.IsAbs() {\n\t\t\tif c.BaseURL == nil {\n\t\t\t\treq.URL = defaultBaseURL.ResolveReference(req.URL)\n\t\t\t} else {\n\t\t\t\treq.URL = c.BaseURL.ResolveReference(req.URL)\n\t\t\t}\n\t\t}\n\t}\n\n\tif req.Host == \"\" {\n\t\treq.Host = req.URL.Host\n\t}\n\n\tif req.Header == nil {\n\t\treq.Header = make(http.Header)\n\t}\n\n\treq.Header.Add(userAgentHeader, userAgent)\n\tif c.ApplicationID != \"\" {\n\t\treq.Header.Add(applicationIDHeader, c.ApplicationID)\n\t}\n\n\tif c.Credentials != nil {\n\t\tif err := c.Credentials.Modify(req); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tres, err := c.transport().RoundTrip(req)\n\tif err != nil {\n\t\treturn res, httperr.NewError(err, httperr.RedactNoOp(), req, res)\n\t}\n\n\tif res.StatusCode > 399 || res.StatusCode < 200 {\n\t\tbody, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn res, httperr.NewError(err, httperr.RedactNoOp(), req, res)\n\t\t}\n\n\t\tapiErr := &Error{\n\t\t\tStatusCode: res.StatusCode,\n\t\t\trequest: req,\n\t\t\tresponse: res,\n\t\t}\n\t\tif len(body) > 0 {\n\t\t\terr = json.Unmarshal(body, apiErr)\n\t\t\tif err != nil {\n\t\t\t\treturn res, httperr.NewError(err, httperr.RedactNoOp(), req, res)\n\t\t\t}\n\t\t}\n\t\treturn res, apiErr\n\t}\n\n\treturn res, nil\n}\n\n\/\/ Do performs a Parse API call. This method modifies the request and adds the\n\/\/ Authentication headers. The body is JSON encoded and for responses in the\n\/\/ 2xx or 3xx range the response will be JSON decoded into result, for others\n\/\/ an error of type Error will be returned.\nfunc (c *Client) Do(req *http.Request, body, result interface{}) (*http.Response, error) {\n\t\/\/ we need to buffer as Parse requires a Content-Length\n\tif body != nil {\n\t\tbd, err := json.Marshal(body)\n\t\tif err != nil {\n\t\t\treturn nil, httperr.NewError(err, httperr.RedactNoOp(), req, nil)\n\t\t}\n\t\treq.Body = ioutil.NopCloser(bytes.NewReader(bd))\n\t\treq.ContentLength = int64(len(bd))\n\t}\n\n\tres, err := c.RoundTrip(req)\n\tif err != nil {\n\t\treturn res, err\n\t}\n\tdefer res.Body.Close()\n\n\tif result != nil {\n\t\tif err := json.NewDecoder(res.Body).Decode(result); err != nil {\n\t\t\treturn res, httperr.NewError(err, httperr.RedactNoOp(), req, res)\n\t\t}\n\t}\n\treturn res, nil\n}\n\n\/\/ WithCredentials returns a new instance of the Client using the given\n\/\/ Credentials. It discards the previous Credentials.\nfunc (c *Client) WithCredentials(cr Credentials) *Client {\n\tvar c2 Client\n\tc2 = *c\n\tc2.Credentials = cr\n\treturn &c2\n}\n<|endoftext|>"} {"text":"<commit_before>package catTracks\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/rotblauer\/trackpoints\/trackPoint\"\n\t\"log\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n)\n\nvar punktlichTileDBPathRelHome = filepath.Join(\"punktlich.rotblauer.com\", \"tester.db\")\n\ntype LastKnown map[string]trackPoint.TrackPoint\ntype Metadata struct {\n\tKeyN int\n\tKeyNUpdated time.Time\n\tLastUpdatedAt time.Time\n\tLastUpdatedBy string\n\tLastUpdatedPointsN int\n\tTileDBLastUpdated time.Time\n}\n\nfunc getmetadata() (out []byte, err error) {\n\terr = GetDB().View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(statsKey))\n\t\tout = b.Get([]byte(\"metadata\"))\n\t\treturn nil\n\t})\n\treturn\n}\nfunc storemetadata(lastpoint trackPoint.TrackPoint, lenpointsupdated int) error {\n\tdb := GetDB()\n\te := db.Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(statsKey))\n\n\t\t\/\/ if not initialized, run the stats which takes a hot second\n\t\tvar keyN int\n\n\t\tvar tileDBLastUpdated time.Time\n\t\tvar homedir string\n\t\tusr, err := user.Current()\n\t\tif err != nil {\n\t\t\tlog.Println(\"get current user err\", err)\n\t\t\thomedir = os.Getenv(\"HOME\")\n\t\t} else {\n\t\t\thomedir = usr.HomeDir\n\t\t}\n\t\tdbpath := filepath.Join(homedir, punktlichTileDBPathRelHome)\n\t\tdbpath = filepath.Clean(dbpath)\n\t\tlog.Println(\"dbpath\", dbpath)\n\t\tdbfi, err := os.Stat(dbpath)\n\t\tif err == nil {\n\t\t\ttileDBLastUpdated = dbfi.ModTime()\n\t\t} else {\n\t\t\tlog.Println(\"err tile db path stat:\", err)\n\t\t}\n\n\t\tv := b.Get([]byte(\"metadata\"))\n\t\tmd := &Metadata{}\n\t\tvar keyNUpdated time.Time\n\n\t\tif v == nil {\n\t\t\tlog.Println(\"updating bucket stats key_n...\")\n\t\t\tkeyN = tx.Bucket([]byte(trackKey)).Stats().KeyN\n\t\t\tlog.Println(\"initialized metadata\", \"keyN:\", keyN)\n\t\t\tkeyNUpdated = time.Now().UTC()\n\t\t} else {\n\t\t\tif e := json.Unmarshal(v, md); e != nil {\n\t\t\t\treturn e\n\t\t\t}\n\t\t}\n\t\tif md != nil && (md.KeyNUpdated.IsZero() || time.Since(md.KeyNUpdated) > 24*time.Hour) {\n\t\t\tlog.Println(\"updating bucket stats key_n...\")\n\t\t\tlog.Println(\" because\", md == nil, md.KeyNUpdated, md.KeyNUpdated.IsZero(), time.Since(md.KeyNUpdated) > 24*time.Hour)\n\t\t\tkeyN = 180000000\n\t\t\t\/\/ keyN = tx.Bucket([]byte(trackKey)).Stats().KeyN\n\t\t\tlog.Println(\"updated metadata keyN:\", keyN)\n\t\t\tkeyNUpdated = time.Now().UTC()\n\t\t} else {\n\t\t\tlog.Println(\"dont update keyn\", md == nil, md.KeyNUpdated, md.KeyNUpdated.IsZero(), time.Since(md.KeyNUpdated) > 24*time.Hour)\n\t\t\tkeyN = md.KeyN + lenpointsupdated\n\t\t}\n\n\t\td := &Metadata{\n\t\t\tKeyN: keyN,\n\t\t\tLastUpdatedAt: time.Now().UTC(),\n\t\t\tLastUpdatedBy: lastpoint.Name,\n\t\t\tLastUpdatedPointsN: lenpointsupdated,\n\t\t\tTileDBLastUpdated: tileDBLastUpdated,\n\t\t}\n\t\tif !keyNUpdated.IsZero() {\n\t\t\td.KeyNUpdated = keyNUpdated\n\t\t} else {\n\t\t\td.KeyNUpdated = md.KeyNUpdated\n\t\t}\n\t\tby, e := json.Marshal(d)\n\t\tif e != nil {\n\t\t\treturn nil\n\t\t}\n\t\tif e := b.Put([]byte(\"metadata\"), by); e != nil {\n\t\t\treturn e\n\t\t}\n\n\t\treturn nil\n\t})\n\treturn e\n}\n\nfunc getLastKnownData() (out []byte, err error) {\n\terr = GetDB().View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(statsKey))\n\t\tout = b.Get([]byte(\"lastknown\"))\n\t\treturn nil\n\t})\n\treturn\n}\n\nfunc storeLastKnown(tp trackPoint.TrackPoint) {\n\t\/\/lastKnownMap[tp.Name] = tp\n\tlk := LastKnown{}\n\tif err := GetDB().Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(statsKey))\n\n\t\tv := b.Get([]byte(\"lastknown\"))\n\t\tif e := json.Unmarshal(v, &lk); e != nil {\n\t\t\tlog.Println(\"error unmarshalling nil lastknown\", tp)\n\t\t}\n\t\tlk[tp.Name] = tp\n\t\tif by, e := json.Marshal(lk); e == nil {\n\t\t\tif e := b.Put([]byte(\"lastknown\"), by); e != nil {\n\t\t\t\treturn e\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Println(\"err marshalling lastknown\", tp)\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\tlog.Println(\"error storing last known: %v\", err)\n\t} else {\n\t\tlog.Printf(\"stored last known: lk=%v\\ntp=%v\", lk, tp)\n\t}\n}\n\nfunc storePoints(trackPoints trackPoint.TrackPoints) error {\n\tvar err error\n\tfor _, point := range trackPoints {\n\t\terr = storePoint(point)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err == nil {\n\t\tl := len(trackPoints)\n\t\terr = storemetadata(trackPoints[l-1], l)\n\t\tstoreLastKnown(trackPoints[l-1])\n\t}\n\treturn err\n}\n\nfunc buildTrackpointKey(tp trackPoint.TrackPoint) []byte {\n\tif tp.Uuid == \"\" {\n\t\tif tp.ID != 0 {\n\t\t\treturn i64tob(tp.ID)\n\t\t}\n\t\treturn i64tob(tp.Time.UnixNano())\n\t}\n\t\/\/ have uuid\n\tk := []byte{}\n\tk = append(k, i64tob(tp.Time.UnixNano())...)\n\tk = append(k, []byte(tp.Uuid)...)\n\treturn k\n}\n\nfunc storePoint(tp trackPoint.TrackPoint) error {\n\n\tvar err error\n\tif tp.Time.IsZero() {\n\t\ttp.Time = time.Now()\n\t}\n\n\tGetDB().Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(trackKey))\n\n\t\t\/\/ id, _ := b.NextSequence()\n\t\t\/\/ trackPoint.ID = int(id)\n\n\t\ttp.ID = tp.Time.UnixNano() \/\/dunno if can really get nanoy, or if will just *1000.\n\n\t\tkey := buildTrackpointKey(tp)\n\n\t\tif exists := b.Get(key); exists != nil {\n\t\t\t\/\/ make sure it's ours\n\t\t\tvar existingTrackpoint trackPoint.TrackPoint\n\t\t\te := json.Unmarshal(exists, &existingTrackpoint)\n\t\t\tif e != nil {\n\t\t\t\tfmt.Println(\"Checking on an existing trackpoint and got an error with one of the existing trackpoints unmarshaling.\")\n\t\t\t}\n\t\t\tif existingTrackpoint.Name == tp.Name && existingTrackpoint.Uuid == tp.Uuid {\n\t\t\t\tfmt.Println(\"Got redundant track; not storing: \", tp.Name, tp.Uuid, tp.Time)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\t\/\/ gets \"\" case nontestesing\n\t\ttp.Name = getTestesPrefix() + tp.Name\n\n\t\ttrackPointJSON, err := json.Marshal(tp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = b.Put(key, trackPointJSON)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Didn't save post trackPoint in bolt.\", err)\n\t\t\treturn err\n\t\t}\n\t\t\/\/ p := quadtree.NewPoint(tp.Lat, tp.Lng, &tp)\n\t\t\/\/ if !GetQT().Insert(p) {\n\t\t\/\/ \tfmt.Println(\"Couldn't add to quadtree: \", p)\n\t\t\/\/ }\n\t\tfmt.Println(\"Saved trackpoint: \", tp)\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn err\n}\n\nfunc getAllStoredPoints() (tps trackPoint.TPs, e error) {\n\tstart := time.Now()\n\n\te = GetDB().View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(trackKey))\n\n\t\t\/\/ can swap out for- eacher if we figure indexing, or even want it\n\t\tb.ForEach(func(trackPointKey, trackPointVal []byte) error {\n\n\t\t\tvar trackPointCurrent trackPoint.TrackPoint\n\t\t\terr := json.Unmarshal(trackPointVal, &trackPointCurrent)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\ttps = append(tps, &trackPointCurrent)\n\t\t\treturn nil\n\t\t})\n\t\treturn nil\n\t})\n\tfmt.Printf(\"Found %d points with iterator method - %s\\n\", len(tps), time.Since(start))\n\n\treturn tps, e\n}\n\n\/\/TODO make queryable ala which cat when\n\/\/ , channel chan *trackPoint.TrackPoint\nfunc getPointsQT(query *query) (tps trackPoint.TPs, err error) {\n\n\tif query == nil {\n\t\tquery = NewQuery()\n\t}\n\n\tquery.SetDefaults() \/\/ eps, lim catches empty vals\n\n\tif query.IsBounded() {\n\t\ttps = getPointsFromQT(query)\n\t} else {\n\t\ttps, err = getAllStoredPoints()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(tps) > query.Limit {\n\t\tlimitedTPs, err := limitTrackPoints(query, tps)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn tps, err\n\t\t}\n\t\ttps = limitedTPs\n\t}\n\n\tsort.Sort(tps)\n\n\treturn tps, err\n}\n<commit_msg>problem: should not use fake number<commit_after>package catTracks\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/rotblauer\/trackpoints\/trackPoint\"\n\t\"log\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n)\n\nvar punktlichTileDBPathRelHome = filepath.Join(\"punktlich.rotblauer.com\", \"tester.db\")\n\ntype LastKnown map[string]trackPoint.TrackPoint\ntype Metadata struct {\n\tKeyN int\n\tKeyNUpdated time.Time\n\tLastUpdatedAt time.Time\n\tLastUpdatedBy string\n\tLastUpdatedPointsN int\n\tTileDBLastUpdated time.Time\n}\n\nfunc getmetadata() (out []byte, err error) {\n\terr = GetDB().View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(statsKey))\n\t\tout = b.Get([]byte(\"metadata\"))\n\t\treturn nil\n\t})\n\treturn\n}\nfunc storemetadata(lastpoint trackPoint.TrackPoint, lenpointsupdated int) error {\n\tdb := GetDB()\n\te := db.Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(statsKey))\n\n\t\t\/\/ if not initialized, run the stats which takes a hot second\n\t\tvar keyN int\n\n\t\tvar tileDBLastUpdated time.Time\n\t\tvar homedir string\n\t\tusr, err := user.Current()\n\t\tif err != nil {\n\t\t\tlog.Println(\"get current user err\", err)\n\t\t\thomedir = os.Getenv(\"HOME\")\n\t\t} else {\n\t\t\thomedir = usr.HomeDir\n\t\t}\n\t\tdbpath := filepath.Join(homedir, punktlichTileDBPathRelHome)\n\t\tdbpath = filepath.Clean(dbpath)\n\t\tlog.Println(\"dbpath\", dbpath)\n\t\tdbfi, err := os.Stat(dbpath)\n\t\tif err == nil {\n\t\t\ttileDBLastUpdated = dbfi.ModTime()\n\t\t} else {\n\t\t\tlog.Println(\"err tile db path stat:\", err)\n\t\t}\n\n\t\tv := b.Get([]byte(\"metadata\"))\n\t\tmd := &Metadata{}\n\t\tvar keyNUpdated time.Time\n\n\t\tif v == nil {\n\t\t\tlog.Println(\"updating bucket stats key_n...\")\n\t\t\tkeyN = tx.Bucket([]byte(trackKey)).Stats().KeyN\n\t\t\tlog.Println(\"initialized metadata\", \"keyN:\", keyN)\n\t\t\tkeyNUpdated = time.Now().UTC()\n\t\t} else {\n\t\t\tif e := json.Unmarshal(v, md); e != nil {\n\t\t\t\treturn e\n\t\t\t}\n\t\t}\n\t\tif md != nil && (md.KeyNUpdated.IsZero() || time.Since(md.KeyNUpdated) > 24*time.Hour) {\n\t\t\tlog.Println(\"updating bucket stats key_n...\")\n\t\t\tlog.Println(\" because\", md == nil, md.KeyNUpdated, md.KeyNUpdated.IsZero(), time.Since(md.KeyNUpdated) > 24*time.Hour)\n\t\t\tkeyN = 0\n\t\t\t\/\/ keyN = tx.Bucket([]byte(trackKey)).Stats().KeyN\n\t\t\tlog.Println(\"updated metadata keyN:\", keyN)\n\t\t\tkeyNUpdated = time.Now().UTC()\n\t\t} else {\n\t\t\tlog.Println(\"dont update keyn\", md == nil, md.KeyNUpdated, md.KeyNUpdated.IsZero(), time.Since(md.KeyNUpdated) > 24*time.Hour)\n\t\t\tkeyN = md.KeyN + lenpointsupdated\n\t\t}\n\n\t\td := &Metadata{\n\t\t\tKeyN: keyN,\n\t\t\tLastUpdatedAt: time.Now().UTC(),\n\t\t\tLastUpdatedBy: lastpoint.Name,\n\t\t\tLastUpdatedPointsN: lenpointsupdated,\n\t\t\tTileDBLastUpdated: tileDBLastUpdated,\n\t\t}\n\t\tif !keyNUpdated.IsZero() {\n\t\t\td.KeyNUpdated = keyNUpdated\n\t\t} else {\n\t\t\td.KeyNUpdated = md.KeyNUpdated\n\t\t}\n\t\tby, e := json.Marshal(d)\n\t\tif e != nil {\n\t\t\treturn nil\n\t\t}\n\t\tif e := b.Put([]byte(\"metadata\"), by); e != nil {\n\t\t\treturn e\n\t\t}\n\n\t\treturn nil\n\t})\n\treturn e\n}\n\nfunc getLastKnownData() (out []byte, err error) {\n\terr = GetDB().View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(statsKey))\n\t\tout = b.Get([]byte(\"lastknown\"))\n\t\treturn nil\n\t})\n\treturn\n}\n\nfunc storeLastKnown(tp trackPoint.TrackPoint) {\n\t\/\/lastKnownMap[tp.Name] = tp\n\tlk := LastKnown{}\n\tif err := GetDB().Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(statsKey))\n\n\t\tv := b.Get([]byte(\"lastknown\"))\n\t\tif e := json.Unmarshal(v, &lk); e != nil {\n\t\t\tlog.Println(\"error unmarshalling nil lastknown\", tp)\n\t\t}\n\t\tlk[tp.Name] = tp\n\t\tif by, e := json.Marshal(lk); e == nil {\n\t\t\tif e := b.Put([]byte(\"lastknown\"), by); e != nil {\n\t\t\t\treturn e\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Println(\"err marshalling lastknown\", tp)\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\tlog.Println(\"error storing last known: %v\", err)\n\t} else {\n\t\tlog.Printf(\"stored last known: lk=%v\\ntp=%v\", lk, tp)\n\t}\n}\n\nfunc storePoints(trackPoints trackPoint.TrackPoints) error {\n\tvar err error\n\tfor _, point := range trackPoints {\n\t\terr = storePoint(point)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err == nil {\n\t\tl := len(trackPoints)\n\t\terr = storemetadata(trackPoints[l-1], l)\n\t\tstoreLastKnown(trackPoints[l-1])\n\t}\n\treturn err\n}\n\nfunc buildTrackpointKey(tp trackPoint.TrackPoint) []byte {\n\tif tp.Uuid == \"\" {\n\t\tif tp.ID != 0 {\n\t\t\treturn i64tob(tp.ID)\n\t\t}\n\t\treturn i64tob(tp.Time.UnixNano())\n\t}\n\t\/\/ have uuid\n\tk := []byte{}\n\tk = append(k, i64tob(tp.Time.UnixNano())...)\n\tk = append(k, []byte(tp.Uuid)...)\n\treturn k\n}\n\nfunc storePoint(tp trackPoint.TrackPoint) error {\n\n\tvar err error\n\tif tp.Time.IsZero() {\n\t\ttp.Time = time.Now()\n\t}\n\n\tGetDB().Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(trackKey))\n\n\t\t\/\/ id, _ := b.NextSequence()\n\t\t\/\/ trackPoint.ID = int(id)\n\n\t\ttp.ID = tp.Time.UnixNano() \/\/dunno if can really get nanoy, or if will just *1000.\n\n\t\tkey := buildTrackpointKey(tp)\n\n\t\tif exists := b.Get(key); exists != nil {\n\t\t\t\/\/ make sure it's ours\n\t\t\tvar existingTrackpoint trackPoint.TrackPoint\n\t\t\te := json.Unmarshal(exists, &existingTrackpoint)\n\t\t\tif e != nil {\n\t\t\t\tfmt.Println(\"Checking on an existing trackpoint and got an error with one of the existing trackpoints unmarshaling.\")\n\t\t\t}\n\t\t\tif existingTrackpoint.Name == tp.Name && existingTrackpoint.Uuid == tp.Uuid {\n\t\t\t\tfmt.Println(\"Got redundant track; not storing: \", tp.Name, tp.Uuid, tp.Time)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\t\/\/ gets \"\" case nontestesing\n\t\ttp.Name = getTestesPrefix() + tp.Name\n\n\t\ttrackPointJSON, err := json.Marshal(tp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = b.Put(key, trackPointJSON)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Didn't save post trackPoint in bolt.\", err)\n\t\t\treturn err\n\t\t}\n\t\t\/\/ p := quadtree.NewPoint(tp.Lat, tp.Lng, &tp)\n\t\t\/\/ if !GetQT().Insert(p) {\n\t\t\/\/ \tfmt.Println(\"Couldn't add to quadtree: \", p)\n\t\t\/\/ }\n\t\tfmt.Println(\"Saved trackpoint: \", tp)\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn err\n}\n\nfunc getAllStoredPoints() (tps trackPoint.TPs, e error) {\n\tstart := time.Now()\n\n\te = GetDB().View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(trackKey))\n\n\t\t\/\/ can swap out for- eacher if we figure indexing, or even want it\n\t\tb.ForEach(func(trackPointKey, trackPointVal []byte) error {\n\n\t\t\tvar trackPointCurrent trackPoint.TrackPoint\n\t\t\terr := json.Unmarshal(trackPointVal, &trackPointCurrent)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\ttps = append(tps, &trackPointCurrent)\n\t\t\treturn nil\n\t\t})\n\t\treturn nil\n\t})\n\tfmt.Printf(\"Found %d points with iterator method - %s\\n\", len(tps), time.Since(start))\n\n\treturn tps, e\n}\n\n\/\/TODO make queryable ala which cat when\n\/\/ , channel chan *trackPoint.TrackPoint\nfunc getPointsQT(query *query) (tps trackPoint.TPs, err error) {\n\n\tif query == nil {\n\t\tquery = NewQuery()\n\t}\n\n\tquery.SetDefaults() \/\/ eps, lim catches empty vals\n\n\tif query.IsBounded() {\n\t\ttps = getPointsFromQT(query)\n\t} else {\n\t\ttps, err = getAllStoredPoints()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(tps) > query.Limit {\n\t\tlimitedTPs, err := limitTrackPoints(query, tps)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn tps, err\n\t\t}\n\t\ttps = limitedTPs\n\t}\n\n\tsort.Sort(tps)\n\n\treturn tps, err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file contains tests verifying the types associated with an AST after\n\/\/ type checking.\n\npackage types\n\nimport (\n\t\"go\/ast\"\n\t\"go\/parser\"\n\t\"testing\"\n)\n\nconst filename = \"<src>\"\n\nfunc makePkg(t *testing.T, src string) (*ast.Package, error) {\n\tfile, err := parser.ParseFile(fset, filename, src, parser.DeclarationErrors)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfiles := map[string]*ast.File{filename: file}\n\tpkg, err := ast.NewPackage(fset, files, GcImport, Universe)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := Check(fset, pkg, nil, nil); err != nil {\n\t\treturn nil, err\n\t}\n\treturn pkg, nil\n}\n\ntype testEntry struct {\n\tsrc, str string\n}\n\n\/\/ dup returns a testEntry where both src and str are the same.\nfunc dup(s string) testEntry {\n\treturn testEntry{s, s}\n}\n\nvar testTypes = []testEntry{\n\t\/\/ basic types\n\tdup(\"int\"),\n\tdup(\"float32\"),\n\tdup(\"string\"),\n\n\t\/\/ arrays\n\tdup(\"[10]int\"),\n\n\t\/\/ slices\n\tdup(\"[]int\"),\n\tdup(\"[][]int\"),\n\n\t\/\/ structs\n\tdup(\"struct{}\"),\n\tdup(\"struct{x int}\"),\n\t{`struct {\n\t\tx, y int\n\t\tz float32 \"foo\"\n\t}`, `struct{x int; y int; z float32 \"foo\"}`},\n\t{`struct {\n\t\tstring\n\t\telems []T\n\t}`, `struct{string; elems []T}`},\n\n\t\/\/ pointers\n\tdup(\"*int\"),\n\tdup(\"***struct{}\"),\n\tdup(\"*struct{a int; b float32}\"),\n\n\t\/\/ functions\n\tdup(\"func()\"),\n\tdup(\"func(x int)\"),\n\t{\"func(x, y int)\", \"func(x int, y int)\"},\n\t{\"func(x, y int, z string)\", \"func(x int, y int, z string)\"},\n\tdup(\"func(int)\"),\n\t{\"func(int, string, byte)\", \"func(int, string, byte)\"},\n\n\tdup(\"func() int\"),\n\t{\"func() (string)\", \"func() string\"},\n\tdup(\"func() (u int)\"),\n\t{\"func() (u, v int, w string)\", \"func() (u int, v int, w string)\"},\n\n\tdup(\"func(int) string\"),\n\tdup(\"func(x int) string\"),\n\tdup(\"func(x int) (u string)\"),\n\t{\"func(x, y int) (u string)\", \"func(x int, y int) (u string)\"},\n\n\tdup(\"func(...int) string\"),\n\tdup(\"func(x ...int) string\"),\n\tdup(\"func(x ...int) (u string)\"),\n\t{\"func(x, y ...int) (u string)\", \"func(x int, y ...int) (u string)\"},\n\n\t\/\/ interfaces\n\tdup(\"interface{}\"),\n\tdup(\"interface{m()}\"),\n\t{`interface{\n\t\tm(int) float32\n\t\tString() string\n\t}`, `interface{String() string; m(int) float32}`}, \/\/ methods are sorted\n\t\/\/ TODO(gri) add test for interface w\/ anonymous field\n\n\t\/\/ maps\n\tdup(\"map[string]int\"),\n\t{\"map[struct{x, y int}][]byte\", \"map[struct{x int; y int}][]byte\"},\n\n\t\/\/ channels\n\tdup(\"chan int\"),\n\tdup(\"chan<- func()\"),\n\tdup(\"<-chan []func() int\"),\n}\n\nfunc TestTypes(t *testing.T) {\n\tfor _, test := range testTypes {\n\t\tsrc := \"package p; type T \" + test.src\n\t\tpkg, err := makePkg(t, src)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: %s\", src, err)\n\t\t\tcontinue\n\t\t}\n\t\ttyp := underlying(pkg.Scope.Lookup(\"T\").Type.(Type))\n\t\tstr := typeString(typ)\n\t\tif str != test.str {\n\t\t\tt.Errorf(\"%s: got %s, want %s\", test.src, str, test.str)\n\t\t}\n\t}\n}\n\nvar testExprs = []testEntry{\n\t\/\/ basic type literals\n\tdup(\"x\"),\n\tdup(\"true\"),\n\tdup(\"42\"),\n\tdup(\"3.1415\"),\n\tdup(\"2.71828i\"),\n\tdup(`'a'`),\n\tdup(`\"foo\"`),\n\tdup(\"`bar`\"),\n\n\t\/\/ arbitrary expressions\n\tdup(\"&x\"),\n\tdup(\"*x\"),\n\tdup(\"(x)\"),\n\tdup(\"x + y\"),\n\tdup(\"x + y * 10\"),\n\tdup(\"s.foo\"),\n\tdup(\"s[0]\"),\n\tdup(\"s[x:y]\"),\n\tdup(\"s[:y]\"),\n\tdup(\"s[x:]\"),\n\tdup(\"s[:]\"),\n\tdup(\"f(1, 2.3)\"),\n\tdup(\"-f(10, 20)\"),\n\tdup(\"f(x + y, +3.1415)\"),\n\t{\"func(a, b int) {}\", \"(func literal)\"},\n\t{\"func(a, b int) []int {}()[x]\", \"(func literal)()[x]\"},\n\t{\"[]int{1, 2, 3}\", \"(composite literal)\"},\n\t{\"[]int{1, 2, 3}[x:]\", \"(composite literal)[x:]\"},\n\t{\"x.([]string)\", \"x.(...)\"},\n}\n\nfunc TestExprs(t *testing.T) {\n\tfor _, test := range testExprs {\n\t\tsrc := \"package p; var _ = \" + test.src + \"; var (x, y int; s []string; f func(int, float32))\"\n\t\tpkg, err := makePkg(t, src)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: %s\", src, err)\n\t\t\tcontinue\n\t\t}\n\t\texpr := pkg.Files[filename].Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec).Values[0]\n\t\tstr := exprString(expr)\n\t\tif str != test.str {\n\t\t\tt.Errorf(\"%s: got %s, want %s\", test.src, str, test.str)\n\t\t}\n\t}\n}\n<commit_msg>fix build: use temporary variable to avoid compiler error<commit_after>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file contains tests verifying the types associated with an AST after\n\/\/ type checking.\n\npackage types\n\nimport (\n\t\"go\/ast\"\n\t\"go\/parser\"\n\t\"testing\"\n)\n\nconst filename = \"<src>\"\n\nfunc makePkg(t *testing.T, src string) (*ast.Package, error) {\n\tfile, err := parser.ParseFile(fset, filename, src, parser.DeclarationErrors)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfiles := map[string]*ast.File{filename: file}\n\tpkg, err := ast.NewPackage(fset, files, GcImport, Universe)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := Check(fset, pkg, nil, nil); err != nil {\n\t\treturn nil, err\n\t}\n\treturn pkg, nil\n}\n\ntype testEntry struct {\n\tsrc, str string\n}\n\n\/\/ dup returns a testEntry where both src and str are the same.\nfunc dup(s string) testEntry {\n\treturn testEntry{s, s}\n}\n\nvar testTypes = []testEntry{\n\t\/\/ basic types\n\tdup(\"int\"),\n\tdup(\"float32\"),\n\tdup(\"string\"),\n\n\t\/\/ arrays\n\tdup(\"[10]int\"),\n\n\t\/\/ slices\n\tdup(\"[]int\"),\n\tdup(\"[][]int\"),\n\n\t\/\/ structs\n\tdup(\"struct{}\"),\n\tdup(\"struct{x int}\"),\n\t{`struct {\n\t\tx, y int\n\t\tz float32 \"foo\"\n\t}`, `struct{x int; y int; z float32 \"foo\"}`},\n\t{`struct {\n\t\tstring\n\t\telems []T\n\t}`, `struct{string; elems []T}`},\n\n\t\/\/ pointers\n\tdup(\"*int\"),\n\tdup(\"***struct{}\"),\n\tdup(\"*struct{a int; b float32}\"),\n\n\t\/\/ functions\n\tdup(\"func()\"),\n\tdup(\"func(x int)\"),\n\t{\"func(x, y int)\", \"func(x int, y int)\"},\n\t{\"func(x, y int, z string)\", \"func(x int, y int, z string)\"},\n\tdup(\"func(int)\"),\n\t{\"func(int, string, byte)\", \"func(int, string, byte)\"},\n\n\tdup(\"func() int\"),\n\t{\"func() (string)\", \"func() string\"},\n\tdup(\"func() (u int)\"),\n\t{\"func() (u, v int, w string)\", \"func() (u int, v int, w string)\"},\n\n\tdup(\"func(int) string\"),\n\tdup(\"func(x int) string\"),\n\tdup(\"func(x int) (u string)\"),\n\t{\"func(x, y int) (u string)\", \"func(x int, y int) (u string)\"},\n\n\tdup(\"func(...int) string\"),\n\tdup(\"func(x ...int) string\"),\n\tdup(\"func(x ...int) (u string)\"),\n\t{\"func(x, y ...int) (u string)\", \"func(x int, y ...int) (u string)\"},\n\n\t\/\/ interfaces\n\tdup(\"interface{}\"),\n\tdup(\"interface{m()}\"),\n\t{`interface{\n\t\tm(int) float32\n\t\tString() string\n\t}`, `interface{String() string; m(int) float32}`}, \/\/ methods are sorted\n\t\/\/ TODO(gri) add test for interface w\/ anonymous field\n\n\t\/\/ maps\n\tdup(\"map[string]int\"),\n\t{\"map[struct{x, y int}][]byte\", \"map[struct{x int; y int}][]byte\"},\n\n\t\/\/ channels\n\tdup(\"chan int\"),\n\tdup(\"chan<- func()\"),\n\tdup(\"<-chan []func() int\"),\n}\n\nfunc TestTypes(t *testing.T) {\n\tfor _, test := range testTypes {\n\t\tsrc := \"package p; type T \" + test.src\n\t\tpkg, err := makePkg(t, src)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: %s\", src, err)\n\t\t\tcontinue\n\t\t}\n\t\ttyp := underlying(pkg.Scope.Lookup(\"T\").Type.(Type))\n\t\tstr := typeString(typ)\n\t\tif str != test.str {\n\t\t\tt.Errorf(\"%s: got %s, want %s\", test.src, str, test.str)\n\t\t}\n\t}\n}\n\nvar testExprs = []testEntry{\n\t\/\/ basic type literals\n\tdup(\"x\"),\n\tdup(\"true\"),\n\tdup(\"42\"),\n\tdup(\"3.1415\"),\n\tdup(\"2.71828i\"),\n\tdup(`'a'`),\n\tdup(`\"foo\"`),\n\tdup(\"`bar`\"),\n\n\t\/\/ arbitrary expressions\n\tdup(\"&x\"),\n\tdup(\"*x\"),\n\tdup(\"(x)\"),\n\tdup(\"x + y\"),\n\tdup(\"x + y * 10\"),\n\tdup(\"s.foo\"),\n\tdup(\"s[0]\"),\n\tdup(\"s[x:y]\"),\n\tdup(\"s[:y]\"),\n\tdup(\"s[x:]\"),\n\tdup(\"s[:]\"),\n\tdup(\"f(1, 2.3)\"),\n\tdup(\"-f(10, 20)\"),\n\tdup(\"f(x + y, +3.1415)\"),\n\t{\"func(a, b int) {}\", \"(func literal)\"},\n\t{\"func(a, b int) []int {}()[x]\", \"(func literal)()[x]\"},\n\t{\"[]int{1, 2, 3}\", \"(composite literal)\"},\n\t{\"[]int{1, 2, 3}[x:]\", \"(composite literal)[x:]\"},\n\t{\"x.([]string)\", \"x.(...)\"},\n}\n\nfunc TestExprs(t *testing.T) {\n\tfor _, test := range testExprs {\n\t\tsrc := \"package p; var _ = \" + test.src + \"; var (x, y int; s []string; f func(int, float32))\"\n\t\tpkg, err := makePkg(t, src)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: %s\", src, err)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ TODO(gri) writing the code below w\/o the decl variable will\n\t\t\/\/ cause a 386 compiler error (out of fixed registers)\n\t\tdecl := pkg.Files[filename].Decls[0].(*ast.GenDecl)\n\t\texpr := decl.Specs[0].(*ast.ValueSpec).Values[0]\n\t\tstr := exprString(expr)\n\t\tif str != test.str {\n\t\t\tt.Errorf(\"%s: got %s, want %s\", test.src, str, test.str)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/* Original C version https:\/\/github.com\/jgm\/peg-markdown\/\r\n *\tCopyright 2008 John MacFarlane (jgm at berkeley dot edu).\r\n *\r\n * Modifications and translation from C into Go\r\n * based on markdown_output.c\r\n *\tCopyright 2010 Michael Teichgräber (mt at wmipf dot de)\r\n *\r\n * This program is free software; you can redistribute it and\/or modify\r\n * it under the terms of the GNU General Public License or the MIT\r\n * license. See LICENSE for details.\r\n *\r\n * This program is distributed in the hope that it will be useful,\r\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n * GNU General Public License for more details.\r\n *\/\r\n\r\npackage markdown\r\n\r\n\/\/ HTML output functions\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"log\"\r\n\t\"math\/rand\"\r\n\t\"strings\"\r\n)\r\n\r\ntype Writer interface {\r\n\tWrite([]byte) (int, error)\r\n\tWriteString(string) (int, error)\r\n\tWriteRune(rune) (int, error)\r\n\tWriteByte(byte) error\r\n}\r\n\r\ntype baseWriter struct {\r\n\tWriter\r\n\tpadded int\r\n}\r\n\r\ntype htmlOut struct {\r\n\tbaseWriter\r\n\tobfuscate bool\r\n\r\n\tnotenum int\r\n\tendNotes []*element \/* List of endnotes to print after main content. *\/\r\n}\r\n\r\nfunc ToHTML(w Writer) Formatter {\r\n\tf := new(htmlOut)\r\n\tf.baseWriter = baseWriter{w, 2}\r\n\treturn f\r\n}\r\nfunc (f *htmlOut) FormatBlock(tree *element) {\r\n\tf.elist(tree)\r\n}\r\nfunc (f *htmlOut) Finish() {\r\n\tif len(f.endNotes) != 0 {\r\n\t\tf.sp()\r\n\t\tf.printEndnotes()\r\n\t}\r\n\tf.WriteByte('\\n')\r\n\tf.padded = 2\r\n}\r\n\r\n\/\/ pad - add a number of newlines, the value of the\r\n\/\/ argument minus the value of `padded'\r\n\/\/ One newline means a line break, similar to troff's .br\r\n\/\/ request, two newlines mean a line break plus an\r\n\/\/ empty line, similar to troff's .sp request\r\nfunc (w *baseWriter) pad(n int) {\r\n\tfor ; n > w.padded; n-- {\r\n\t\tw.WriteByte('\\n')\r\n\t}\r\n\tw.padded = 0\r\n}\r\n\r\nfunc (h *htmlOut) br() *htmlOut {\r\n\th.pad(1)\r\n\treturn h\r\n}\r\n\r\nfunc (h *htmlOut) sp() *htmlOut {\r\n\th.pad(2)\r\n\treturn h\r\n}\r\n\r\nfunc (h *htmlOut) skipPadding() *htmlOut {\r\n\th.padded = 2\r\n\treturn h\r\n}\r\n\r\n\/\/ print a string\r\nfunc (w *htmlOut) s(s string) *htmlOut {\r\n\tw.WriteString(s)\r\n\treturn w\r\n}\r\n\r\n\/* print string, escaping for HTML\r\n * If obfuscate selected, convert characters to hex or decimal entities at random\r\n *\/\r\nfunc (w *htmlOut) str(s string) *htmlOut {\r\n\tvar ws string\r\n\tvar i0 = 0\r\n\r\n\to := w.obfuscate\r\n\tfor i, r := range s {\r\n\t\tswitch r {\r\n\t\tcase '&':\r\n\t\t\tws = \"&\"\r\n\t\tcase '<':\r\n\t\t\tws = \"<\"\r\n\t\tcase '>':\r\n\t\t\tws = \">\"\r\n\t\tcase '\"':\r\n\t\t\tws = \""\"\r\n\t\tdefault:\r\n\t\t\tif o && r < 128 && r >= 0 {\r\n\t\t\t\tif rand.Intn(2) == 0 {\r\n\t\t\t\t\tws = fmt.Sprintf(\"&#%d;\", r)\r\n\t\t\t\t} else {\r\n\t\t\t\t\tws = fmt.Sprintf(\"&#%x;\", r)\r\n\t\t\t\t}\r\n\t\t\t} else {\r\n\t\t\t\tif i0 == -1 {\r\n\t\t\t\t\ti0 = i\r\n\t\t\t\t}\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t}\r\n\t\tif i0 != -1 {\r\n\t\t\tw.WriteString(s[i0:i])\r\n\t\t\ti0 = -1\r\n\t\t}\r\n\t\tw.WriteString(ws)\r\n\t}\r\n\tif i0 != -1 {\r\n\t\tw.WriteString(s[i0:])\r\n\t}\r\n\treturn w\r\n}\r\n\r\nfunc (w *htmlOut) children(el *element) *htmlOut {\r\n\treturn w.elist(el.children)\r\n}\r\nfunc (w *htmlOut) inline(tag string, el *element) *htmlOut {\r\n\treturn w.s(tag).children(el).s(\"<\/\").s(tag[1:])\r\n}\r\nfunc (w *htmlOut) listBlock(tag string, el *element) *htmlOut {\r\n\treturn w.sp().s(tag).elist(el.children).br().s(\"<\/\").s(tag[1:])\r\n}\r\nfunc (w *htmlOut) listItem(tag string, el *element) *htmlOut {\r\n\treturn w.br().s(tag).skipPadding().elist(el.children).s(\"<\/\").s(tag[1:])\r\n}\r\n\r\n\/* print a list of elements\r\n *\/\r\nfunc (w *htmlOut) elist(list *element) *htmlOut {\r\n\tfor list != nil {\r\n\t\tw.elem(list)\r\n\t\tlist = list.next\r\n\t}\r\n\treturn w\r\n}\r\n\r\n\/\/ print an element\r\nfunc (w *htmlOut) elem(elt *element) *htmlOut {\r\n\tvar s string\r\n\r\n\tswitch elt.key {\r\n\tcase SPACE:\r\n\t\ts = elt.contents.str\r\n\tcase LINEBREAK:\r\n\t\ts = \"<br\/>\\n\"\r\n\tcase STR:\r\n\t\tw.str(elt.contents.str)\r\n\tcase ELLIPSIS:\r\n\t\ts = \"…\"\r\n\tcase EMDASH:\r\n\t\ts = \"—\"\r\n\tcase ENDASH:\r\n\t\ts = \"–\"\r\n\tcase APOSTROPHE:\r\n\t\ts = \"’\"\r\n\tcase SINGLEQUOTED:\r\n\t\tw.s(\"‘\").children(elt).s(\"’\")\r\n\tcase DOUBLEQUOTED:\r\n\t\tw.s(\"“\").children(elt).s(\"”\")\r\n\tcase CODE:\r\n\t\tw.s(\"<code>\").str(elt.contents.str).s(\"<\/code>\")\r\n\tcase HTML:\r\n\t\ts = elt.contents.str\r\n\tcase LINK:\r\n\t\to := w.obfuscate\r\n\t\tif strings.Index(elt.contents.link.url, \"mailto:\") == 0 {\r\n\t\t\tw.obfuscate = true \/* obfuscate mailto: links *\/\r\n\t\t}\r\n\t\tw.s(`<a href=\"`).str(elt.contents.link.url).s(`\"`)\r\n\t\tif len(elt.contents.link.title) > 0 {\r\n\t\t\tw.s(` title=\"`).str(elt.contents.link.title).s(`\"`)\r\n\t\t}\r\n\t\tw.s(\">\").elist(elt.contents.link.label).s(\"<\/a>\")\r\n\t\tw.obfuscate = o\r\n\tcase IMAGE:\r\n\t\tw.s(`<img src=\"`).str(elt.contents.link.url).s(`\" alt=\"`)\r\n\t\tw.elist(elt.contents.link.label).s(`\"`)\r\n\t\tif len(elt.contents.link.title) > 0 {\r\n\t\t\tw.s(` title=\"`).str(elt.contents.link.title).s(`\"`)\r\n\t\t}\r\n\t\tw.s(\" \/>\")\r\n\tcase EMPH:\r\n\t\tw.inline(\"<em>\", elt)\r\n\tcase STRONG:\r\n\t\tw.inline(\"<strong>\", elt)\r\n\tcase STRIKE:\r\n\t\tw.inline(\"<del>\", elt)\r\n\tcase LIST:\r\n\t\tw.children(elt)\r\n\tcase RAW:\r\n\t\t\/* Shouldn't occur - these are handled by process_raw_blocks() *\/\r\n\t\tlog.Fatalf(\"RAW\")\r\n\tcase H1, H2, H3, H4, H5, H6:\r\n\t\th := \"<h\" + string('1'+elt.key-H1) + \">\" \/* assumes H1 ... H6 are in order *\/\r\n\t\tw.sp().inline(h, elt)\r\n\tcase PLAIN:\r\n\t\tw.br().children(elt)\r\n\tcase PARA:\r\n\t\tw.sp().inline(\"<p>\", elt)\r\n\tcase HRULE:\r\n\t\tw.sp().s(\"<hr \/>\")\r\n\tcase HTMLBLOCK:\r\n\t\tw.sp().s(elt.contents.str)\r\n\tcase VERBATIM:\r\n\t\tw.sp().s(\"<pre><code>\").str(elt.contents.str).s(\"<\/code><\/pre>\")\r\n\tcase BULLETLIST:\r\n\t\tw.listBlock(\"<ul>\", elt)\r\n\tcase ORDEREDLIST:\r\n\t\tw.listBlock(\"<ol>\", elt)\r\n\tcase DEFINITIONLIST:\r\n\t\tw.listBlock(\"<dl>\", elt)\r\n\tcase DEFTITLE:\r\n\t\tw.listItem(\"<dt>\", elt)\r\n\tcase DEFDATA:\r\n\t\tw.listItem(\"<dd>\", elt)\r\n\tcase LISTITEM:\r\n\t\tw.listItem(\"<li>\", elt)\r\n\tcase BLOCKQUOTE:\r\n\t\tw.sp().s(\"<blockquote>\\n\").skipPadding().children(elt).br().s(\"<\/blockquote>\")\r\n\tcase REFERENCE:\r\n\t\t\/* Nonprinting *\/\r\n\tcase NOTE:\r\n\t\t\/* if contents.str == 0, then print note; else ignore, since this\r\n\t\t * is a note block that has been incorporated into the notes list\r\n\t\t *\/\r\n\t\tif elt.contents.str == \"\" {\r\n\t\t\tw.endNotes = append(w.endNotes, elt) \/* add an endnote to global endnotes list *\/\r\n\t\t\tw.notenum++\r\n\t\t\tnn := w.notenum\r\n\t\t\ts = fmt.Sprintf(`<a class=\"noteref\" id=\"fnref%d\" href=\"#fn%d\" title=\"Jump to note %d\">[%d]<\/a>`,\r\n\t\t\t\tnn, nn, nn, nn)\r\n\t\t}\r\n\tdefault:\r\n\t\tlog.Fatalf(\"htmlOut.elem encountered unknown element key = %d\\n\", elt.key)\r\n\t}\r\n\tif s != \"\" {\r\n\t\tw.s(s)\r\n\t}\r\n\treturn w\r\n}\r\n\r\nfunc (w *htmlOut) printEndnotes() {\r\n\tcounter := 0\r\n\r\n\tw.s(\"<hr\/>\\n<ol id=\\\"notes\\\">\")\r\n\tfor _, elt := range w.endNotes {\r\n\t\tcounter++\r\n\t\tw.br().s(fmt.Sprintf(\"<li id=\\\"fn%d\\\">\\n\", counter)).skipPadding()\r\n\t\tw.children(elt)\r\n\t\tw.s(fmt.Sprintf(\" <a href=\\\"#fnref%d\\\" title=\\\"Jump back to reference\\\">[back]<\/a>\", counter))\r\n\t\tw.br().s(\"<\/li>\")\r\n\t}\r\n\tw.br().s(\"<\/ol>\")\r\n}\r\n<commit_msg>Update markdown parser to escape HTML outside of code blocks<commit_after>\/* Original C version https:\/\/github.com\/jgm\/peg-markdown\/\r\n *\tCopyright 2008 John MacFarlane (jgm at berkeley dot edu).\r\n *\r\n * Modifications and translation from C into Go\r\n * based on markdown_output.c\r\n *\tCopyright 2010 Michael Teichgräber (mt at wmipf dot de)\r\n *\r\n * This program is free software; you can redistribute it and\/or modify\r\n * it under the terms of the GNU General Public License or the MIT\r\n * license. See LICENSE for details.\r\n *\r\n * This program is distributed in the hope that it will be useful,\r\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n * GNU General Public License for more details.\r\n *\/\r\n\r\npackage markdown\r\n\r\n\/\/ HTML output functions\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"html\"\r\n\t\"log\"\r\n\t\"math\/rand\"\r\n\t\"strings\"\r\n)\r\n\r\ntype Writer interface {\r\n\tWrite([]byte) (int, error)\r\n\tWriteString(string) (int, error)\r\n\tWriteRune(rune) (int, error)\r\n\tWriteByte(byte) error\r\n}\r\n\r\ntype baseWriter struct {\r\n\tWriter\r\n\tpadded int\r\n}\r\n\r\ntype htmlOut struct {\r\n\tbaseWriter\r\n\tobfuscate bool\r\n\r\n\tnotenum int\r\n\tendNotes []*element \/* List of endnotes to print after main content. *\/\r\n}\r\n\r\nfunc ToHTML(w Writer) Formatter {\r\n\tf := new(htmlOut)\r\n\tf.baseWriter = baseWriter{w, 2}\r\n\treturn f\r\n}\r\nfunc (f *htmlOut) FormatBlock(tree *element) {\r\n\tf.elist(tree)\r\n}\r\nfunc (f *htmlOut) Finish() {\r\n\tif len(f.endNotes) != 0 {\r\n\t\tf.sp()\r\n\t\tf.printEndnotes()\r\n\t}\r\n\tf.WriteByte('\\n')\r\n\tf.padded = 2\r\n}\r\n\r\n\/\/ pad - add a number of newlines, the value of the\r\n\/\/ argument minus the value of `padded'\r\n\/\/ One newline means a line break, similar to troff's .br\r\n\/\/ request, two newlines mean a line break plus an\r\n\/\/ empty line, similar to troff's .sp request\r\nfunc (w *baseWriter) pad(n int) {\r\n\tfor ; n > w.padded; n-- {\r\n\t\tw.WriteByte('\\n')\r\n\t}\r\n\tw.padded = 0\r\n}\r\n\r\nfunc (h *htmlOut) br() *htmlOut {\r\n\th.pad(1)\r\n\treturn h\r\n}\r\n\r\nfunc (h *htmlOut) sp() *htmlOut {\r\n\th.pad(2)\r\n\treturn h\r\n}\r\n\r\nfunc (h *htmlOut) skipPadding() *htmlOut {\r\n\th.padded = 2\r\n\treturn h\r\n}\r\n\r\n\/\/ print a string\r\nfunc (w *htmlOut) s(s string) *htmlOut {\r\n\tw.WriteString(s)\r\n\treturn w\r\n}\r\n\r\n\/* print string, escaping for HTML\r\n * If obfuscate selected, convert characters to hex or decimal entities at random\r\n *\/\r\nfunc (w *htmlOut) str(s string) *htmlOut {\r\n\tvar ws string\r\n\tvar i0 = 0\r\n\r\n\to := w.obfuscate\r\n\tfor i, r := range s {\r\n\t\tswitch r {\r\n\t\tcase '&':\r\n\t\t\tws = \"&\"\r\n\t\tcase '<':\r\n\t\t\tws = \"<\"\r\n\t\tcase '>':\r\n\t\t\tws = \">\"\r\n\t\tcase '\"':\r\n\t\t\tws = \""\"\r\n\t\tdefault:\r\n\t\t\tif o && r < 128 && r >= 0 {\r\n\t\t\t\tif rand.Intn(2) == 0 {\r\n\t\t\t\t\tws = fmt.Sprintf(\"&#%d;\", r)\r\n\t\t\t\t} else {\r\n\t\t\t\t\tws = fmt.Sprintf(\"&#%x;\", r)\r\n\t\t\t\t}\r\n\t\t\t} else {\r\n\t\t\t\tif i0 == -1 {\r\n\t\t\t\t\ti0 = i\r\n\t\t\t\t}\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t}\r\n\t\tif i0 != -1 {\r\n\t\t\tw.WriteString(s[i0:i])\r\n\t\t\ti0 = -1\r\n\t\t}\r\n\t\tw.WriteString(ws)\r\n\t}\r\n\tif i0 != -1 {\r\n\t\tw.WriteString(s[i0:])\r\n\t}\r\n\treturn w\r\n}\r\n\r\nfunc (w *htmlOut) children(el *element) *htmlOut {\r\n\treturn w.elist(el.children)\r\n}\r\nfunc (w *htmlOut) inline(tag string, el *element) *htmlOut {\r\n\treturn w.s(tag).children(el).s(\"<\/\").s(tag[1:])\r\n}\r\nfunc (w *htmlOut) listBlock(tag string, el *element) *htmlOut {\r\n\treturn w.sp().s(tag).elist(el.children).br().s(\"<\/\").s(tag[1:])\r\n}\r\nfunc (w *htmlOut) listItem(tag string, el *element) *htmlOut {\r\n\treturn w.br().s(tag).skipPadding().elist(el.children).s(\"<\/\").s(tag[1:])\r\n}\r\n\r\n\/* print a list of elements\r\n *\/\r\nfunc (w *htmlOut) elist(list *element) *htmlOut {\r\n\tfor list != nil {\r\n\t\tw.elem(list)\r\n\t\tlist = list.next\r\n\t}\r\n\treturn w\r\n}\r\n\r\n\/\/ print an element\r\nfunc (w *htmlOut) elem(elt *element) *htmlOut {\r\n\tvar s string\r\n\r\n\tswitch elt.key {\r\n\tcase SPACE:\r\n\t\ts = elt.contents.str\r\n\tcase LINEBREAK:\r\n\t\ts = \"<br\/>\\n\"\r\n\tcase STR:\r\n\t\tw.str(elt.contents.str)\r\n\tcase ELLIPSIS:\r\n\t\ts = \"…\"\r\n\tcase EMDASH:\r\n\t\ts = \"—\"\r\n\tcase ENDASH:\r\n\t\ts = \"–\"\r\n\tcase APOSTROPHE:\r\n\t\ts = \"’\"\r\n\tcase SINGLEQUOTED:\r\n\t\tw.s(\"‘\").children(elt).s(\"’\")\r\n\tcase DOUBLEQUOTED:\r\n\t\tw.s(\"“\").children(elt).s(\"”\")\r\n\tcase CODE:\r\n\t\tw.s(\"<code>\").str(elt.contents.str).s(\"<\/code>\")\r\n\tcase HTML:\r\n\t\ts = html.EscapeString(elt.contents.str)\r\n\tcase LINK:\r\n\t\to := w.obfuscate\r\n\t\tif strings.Index(elt.contents.link.url, \"mailto:\") == 0 {\r\n\t\t\tw.obfuscate = true \/* obfuscate mailto: links *\/\r\n\t\t}\r\n\t\tw.s(`<a href=\"`).str(elt.contents.link.url).s(`\"`)\r\n\t\tif len(elt.contents.link.title) > 0 {\r\n\t\t\tw.s(` title=\"`).str(elt.contents.link.title).s(`\"`)\r\n\t\t}\r\n\t\tw.s(\">\").elist(elt.contents.link.label).s(\"<\/a>\")\r\n\t\tw.obfuscate = o\r\n\tcase IMAGE:\r\n\t\tw.s(`<img src=\"`).str(elt.contents.link.url).s(`\" alt=\"`)\r\n\t\tw.elist(elt.contents.link.label).s(`\"`)\r\n\t\tif len(elt.contents.link.title) > 0 {\r\n\t\t\tw.s(` title=\"`).str(elt.contents.link.title).s(`\"`)\r\n\t\t}\r\n\t\tw.s(\" \/>\")\r\n\tcase EMPH:\r\n\t\tw.inline(\"<em>\", elt)\r\n\tcase STRONG:\r\n\t\tw.inline(\"<strong>\", elt)\r\n\tcase STRIKE:\r\n\t\tw.inline(\"<del>\", elt)\r\n\tcase LIST:\r\n\t\tw.children(elt)\r\n\tcase RAW:\r\n\t\t\/* Shouldn't occur - these are handled by process_raw_blocks() *\/\r\n\t\tlog.Fatalf(\"RAW\")\r\n\tcase H1, H2, H3, H4, H5, H6:\r\n\t\th := \"<h\" + string('1'+elt.key-H1) + \">\" \/* assumes H1 ... H6 are in order *\/\r\n\t\tw.sp().inline(h, elt)\r\n\tcase PLAIN:\r\n\t\tw.br().children(elt)\r\n\tcase PARA:\r\n\t\tw.sp().inline(\"<p>\", elt)\r\n\tcase HRULE:\r\n\t\tw.sp().s(\"<hr \/>\")\r\n\tcase HTMLBLOCK:\r\n\t\tw.sp().s(html.EscapeString(elt.contents.str))\r\n\tcase VERBATIM:\r\n\t\tw.sp().s(\"<pre><code>\").str(elt.contents.str).s(\"<\/code><\/pre>\")\r\n\tcase BULLETLIST:\r\n\t\tw.listBlock(\"<ul>\", elt)\r\n\tcase ORDEREDLIST:\r\n\t\tw.listBlock(\"<ol>\", elt)\r\n\tcase DEFINITIONLIST:\r\n\t\tw.listBlock(\"<dl>\", elt)\r\n\tcase DEFTITLE:\r\n\t\tw.listItem(\"<dt>\", elt)\r\n\tcase DEFDATA:\r\n\t\tw.listItem(\"<dd>\", elt)\r\n\tcase LISTITEM:\r\n\t\tw.listItem(\"<li>\", elt)\r\n\tcase BLOCKQUOTE:\r\n\t\tw.sp().s(\"<blockquote>\\n\").skipPadding().children(elt).br().s(\"<\/blockquote>\")\r\n\tcase REFERENCE:\r\n\t\t\/* Nonprinting *\/\r\n\tcase NOTE:\r\n\t\t\/* if contents.str == 0, then print note; else ignore, since this\r\n\t\t * is a note block that has been incorporated into the notes list\r\n\t\t *\/\r\n\t\tif elt.contents.str == \"\" {\r\n\t\t\tw.endNotes = append(w.endNotes, elt) \/* add an endnote to global endnotes list *\/\r\n\t\t\tw.notenum++\r\n\t\t\tnn := w.notenum\r\n\t\t\ts = fmt.Sprintf(`<a class=\"noteref\" id=\"fnref%d\" href=\"#fn%d\" title=\"Jump to note %d\">[%d]<\/a>`,\r\n\t\t\t\tnn, nn, nn, nn)\r\n\t\t}\r\n\tdefault:\r\n\t\tlog.Fatalf(\"htmlOut.elem encountered unknown element key = %d\\n\", elt.key)\r\n\t}\r\n\tif s != \"\" {\r\n\t\tw.s(s)\r\n\t}\r\n\treturn w\r\n}\r\n\r\nfunc (w *htmlOut) printEndnotes() {\r\n\tcounter := 0\r\n\r\n\tw.s(\"<hr\/>\\n<ol id=\\\"notes\\\">\")\r\n\tfor _, elt := range w.endNotes {\r\n\t\tcounter++\r\n\t\tw.br().s(fmt.Sprintf(\"<li id=\\\"fn%d\\\">\\n\", counter)).skipPadding()\r\n\t\tw.children(elt)\r\n\t\tw.s(fmt.Sprintf(\" <a href=\\\"#fnref%d\\\" title=\\\"Jump back to reference\\\">[back]<\/a>\", counter))\r\n\t\tw.br().s(\"<\/li>\")\r\n\t}\r\n\tw.br().s(\"<\/ol>\")\r\n}\r\n<|endoftext|>"} {"text":"<commit_before>\/\/ A http.RoundTripper that retries common errors, with convenience constructors.\n\/\/\n\/\/ NOTE: This meant for TEMPORARY, TRANSIENT ERRORS.\n\/\/ Do not use for waiting on operations or polling of resource state,\n\/\/ especially if the expected state (operation done, resource ready, etc)\n\/\/ takes longer to reach than the default client Timeout.\n\/\/ In those cases, retryTimeDuration(...)\/resource.Retry with appropriate timeout\n\/\/ and error predicates\/handling should be used as a wrapper around the request\n\/\/ instead.\n\/\/\n\/\/ Example Usage:\n\/\/ For handwritten\/Go clients, the retry transport should be provided via\n\/\/ the main client or a shallow copy of the HTTP resources, depending on the\n\/\/ API-specific retry predicates.\n\/\/ Example Usage in Terraform Config:\n\/\/\tclient := oauth2.NewClient(ctx, tokenSource)\n\/\/\t\/\/ Create with default retry predicates\n\/\/\tclient.Transport := NewTransportWithDefaultRetries(client.Transport, defaultTimeout)\n\/\/\n\/\/\t\/\/ If API uses just default retry predicates:\n\/\/\tc.clientCompute, err = compute.NewService(ctx, option.WithHTTPClient(client))\n\/\/\t...\n\/\/\t\/\/ If API needs custom additional retry predicates:\n\/\/\tsqlAdminHttpClient := ClientWithAdditionalRetries(client, retryTransport,\n\/\/\t\t\tisTemporarySqlError1,\n\/\/\t\t\tisTemporarySqlError2)\n\/\/\tc.clientSqlAdmin, err = compute.NewService(ctx, option.WithHTTPClient(sqlAdminHttpClient))\n\/\/ ...\n\npackage google\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/hashicorp\/errwrap\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/resource\"\n\t\"google.golang.org\/api\/googleapi\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"time\"\n)\n\nconst defaultRetryTransportTimeoutSec = 30\n\n\/\/ NewTransportWithDefaultRetries constructs a default retryTransport that will retry common temporary errors\nfunc NewTransportWithDefaultRetries(t http.RoundTripper) *retryTransport {\n\treturn &retryTransport{\n\t\tretryPredicates: defaultErrorRetryPredicates,\n\t\tinternal: t,\n\t}\n}\n\n\/\/ Helper method to create a shallow copy of an HTTP client with a shallow-copied retryTransport\n\/\/ s.t. the base HTTP transport is the same (i.e. client connection pools are shared, retryPredicates are different)\nfunc ClientWithAdditionalRetries(baseClient *http.Client, baseRetryTransport *retryTransport, predicates ...RetryErrorPredicateFunc) *http.Client {\n\tcopied := *baseClient\n\tif baseRetryTransport == nil {\n\t\tbaseRetryTransport = NewTransportWithDefaultRetries(baseClient.Transport)\n\t}\n\tcopied.Transport = baseRetryTransport.WithAddedPredicates(predicates...)\n\treturn &copied\n}\n\n\/\/ Returns a shallow copy of the retry transport with additional retry\n\/\/ predicates but same wrapped http.RoundTripper\nfunc (t *retryTransport) WithAddedPredicates(predicates ...RetryErrorPredicateFunc) *retryTransport {\n\tcopyT := *t\n\tcopyT.retryPredicates = append(t.retryPredicates, predicates...)\n\treturn ©T\n}\n\ntype retryTransport struct {\n\tretryPredicates []RetryErrorPredicateFunc\n\tinternal http.RoundTripper\n}\n\n\/\/ RoundTrip implements the RoundTripper interface method.\n\/\/ It retries the given HTTP request based on the retry predicates\n\/\/ registered under the retryTransport.\nfunc (t *retryTransport) RoundTrip(req *http.Request) (resp *http.Response, respErr error) {\n\t\/\/ Set timeout to default value.\n\tctx := req.Context()\n\tvar ccancel context.CancelFunc\n\tif _, ok := ctx.Deadline(); !ok {\n\t\tctx, ccancel = context.WithTimeout(ctx, defaultRetryTransportTimeoutSec*time.Second)\n\t\tdefer func() {\n\t\t\tif ctx.Err() == nil {\n\t\t\t\t\/\/ Cleanup child context created for retry loop if ctx not done.\n\t\t\t\tccancel()\n\t\t\t}\n\t\t}()\n\t}\n\n\tattempts := 0\n\tbackoff := time.Millisecond * 500\n\tnextBackoff := time.Millisecond * 500\n\n\tlog.Printf(\"[DEBUG] Retry Transport: starting RoundTrip retry loop\")\nRetry:\n\tfor {\n\t\tlog.Printf(\"[DEBUG] Retry Transport: request attempt %d\", attempts)\n\n\t\t\/\/ Copy the request - we dont want to use the original request as\n\t\t\/\/ RoundTrip contract says request body can\/will be consumed\n\t\tnewRequest, copyErr := copyHttpRequest(req)\n\t\tif copyErr != nil {\n\t\t\trespErr = errwrap.Wrapf(\"unable to copy invalid http.Request for retry: {{err}}\", copyErr)\n\t\t\tbreak Retry\n\t\t}\n\n\t\t\/\/ Do the wrapped Roundtrip. This is one request in the retry loop.\n\t\tresp, respErr = t.internal.RoundTrip(newRequest)\n\t\tattempts++\n\n\t\tretryErr := t.checkForRetryableError(resp, respErr)\n\t\tif retryErr == nil {\n\t\t\tlog.Printf(\"[DEBUG] Retry Transport: Stopping retries, last request was successful\")\n\t\t\tbreak Retry\n\t\t}\n\t\tif !retryErr.Retryable {\n\t\t\tlog.Printf(\"[DEBUG] Retry Transport: Stopping retries, last request failed with non-retryable error: %s\", retryErr.Err)\n\t\t\tbreak Retry\n\t\t}\n\n\t\tlog.Printf(\"[DEBUG] Retry Transport: Waiting %s before trying request again\", backoff)\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlog.Printf(\"[DEBUG] Retry Transport: Stopping retries, context done: %v\", ctx.Err())\n\t\t\tbreak Retry\n\t\tcase <-time.After(backoff):\n\t\t\tlog.Printf(\"[DEBUG] Retry Transport: Finished waiting %s before next retry\", backoff)\n\n\t\t\t\/\/ Fibonnaci backoff - 0.5, 1, 1.5, 2.5, 4, 6.5, 10.5, ...\n\t\t\tlastBackoff := backoff\n\t\t\tbackoff = backoff + nextBackoff\n\t\t\tnextBackoff = lastBackoff\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tlog.Printf(\"[DEBUG] Retry Transport: Returning after %d attempts\", attempts)\n\treturn resp, respErr\n}\n\n\/\/ copyHttpRequest provides an copy of the given HTTP request for one RoundTrip.\n\/\/ If the request has a non-empty body (io.ReadCloser), the body is deep copied\n\/\/ so it can be consumed.\nfunc copyHttpRequest(req *http.Request) (*http.Request, error) {\n\tnewRequest := *req\n\n\tif req.Body == nil || req.Body == http.NoBody {\n\t\treturn &newRequest, nil\n\t}\n\n\t\/\/ Helpers like http.NewRequest add a GetBody for copying.\n\t\/\/ If not given, we should reject the request.\n\tif req.GetBody == nil {\n\t\treturn nil, errors.New(\"invalid HTTP request for transport, expected request.GetBody for non-empty Body\")\n\t}\n\n\tbd, err := req.GetBody()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnewRequest.Body = bd\n\treturn &newRequest, nil\n}\n\n\/\/ checkForRetryableError uses the googleapi.CheckResponse util to check for\n\/\/ errors in the response, and determines whether there is a retryable error.\n\/\/ in response\/response error.\nfunc (t *retryTransport) checkForRetryableError(resp *http.Response, respErr error) *resource.RetryError {\n\tvar errToCheck error\n\n\tif respErr != nil {\n\t\terrToCheck = respErr\n\t} else {\n\t\trespToCheck := *resp\n\t\t\/\/ The RoundTrip contract states that the HTTP response\/response error\n\t\t\/\/ returned cannot be edited. We need to consume the Body to check for\n\t\t\/\/ errors, so we need to create a copy if the Response has a body.\n\t\tif resp.Body != nil && resp.Body != http.NoBody {\n\t\t\t\/\/ Use httputil.DumpResponse since the only important info is\n\t\t\t\/\/ error code and messages in the response body.\n\t\t\tdumpBytes, err := httputil.DumpResponse(resp, true)\n\t\t\tif err != nil {\n\t\t\t\treturn resource.NonRetryableError(fmt.Errorf(\"unable to check response for error: %v\", err))\n\t\t\t}\n\t\t\trespToCheck.Body = ioutil.NopCloser(bytes.NewReader(dumpBytes))\n\t\t}\n\t\terrToCheck = googleapi.CheckResponse(&respToCheck)\n\t}\n\n\tif isRetryableError(errToCheck, t.retryPredicates...) {\n\t\treturn resource.RetryableError(errToCheck)\n\t}\n\treturn resource.NonRetryableError(errToCheck)\n}\n<commit_msg>Consume original request body from retry transport (#3276) (#396)<commit_after>\/\/ A http.RoundTripper that retries common errors, with convenience constructors.\n\/\/\n\/\/ NOTE: This meant for TEMPORARY, TRANSIENT ERRORS.\n\/\/ Do not use for waiting on operations or polling of resource state,\n\/\/ especially if the expected state (operation done, resource ready, etc)\n\/\/ takes longer to reach than the default client Timeout.\n\/\/ In those cases, retryTimeDuration(...)\/resource.Retry with appropriate timeout\n\/\/ and error predicates\/handling should be used as a wrapper around the request\n\/\/ instead.\n\/\/\n\/\/ Example Usage:\n\/\/ For handwritten\/Go clients, the retry transport should be provided via\n\/\/ the main client or a shallow copy of the HTTP resources, depending on the\n\/\/ API-specific retry predicates.\n\/\/ Example Usage in Terraform Config:\n\/\/\tclient := oauth2.NewClient(ctx, tokenSource)\n\/\/\t\/\/ Create with default retry predicates\n\/\/\tclient.Transport := NewTransportWithDefaultRetries(client.Transport, defaultTimeout)\n\/\/\n\/\/\t\/\/ If API uses just default retry predicates:\n\/\/\tc.clientCompute, err = compute.NewService(ctx, option.WithHTTPClient(client))\n\/\/\t...\n\/\/\t\/\/ If API needs custom additional retry predicates:\n\/\/\tsqlAdminHttpClient := ClientWithAdditionalRetries(client, retryTransport,\n\/\/\t\t\tisTemporarySqlError1,\n\/\/\t\t\tisTemporarySqlError2)\n\/\/\tc.clientSqlAdmin, err = compute.NewService(ctx, option.WithHTTPClient(sqlAdminHttpClient))\n\/\/ ...\n\npackage google\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/hashicorp\/errwrap\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/resource\"\n\t\"google.golang.org\/api\/googleapi\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"time\"\n)\n\nconst defaultRetryTransportTimeoutSec = 30\n\n\/\/ NewTransportWithDefaultRetries constructs a default retryTransport that will retry common temporary errors\nfunc NewTransportWithDefaultRetries(t http.RoundTripper) *retryTransport {\n\treturn &retryTransport{\n\t\tretryPredicates: defaultErrorRetryPredicates,\n\t\tinternal: t,\n\t}\n}\n\n\/\/ Helper method to create a shallow copy of an HTTP client with a shallow-copied retryTransport\n\/\/ s.t. the base HTTP transport is the same (i.e. client connection pools are shared, retryPredicates are different)\nfunc ClientWithAdditionalRetries(baseClient *http.Client, baseRetryTransport *retryTransport, predicates ...RetryErrorPredicateFunc) *http.Client {\n\tcopied := *baseClient\n\tif baseRetryTransport == nil {\n\t\tbaseRetryTransport = NewTransportWithDefaultRetries(baseClient.Transport)\n\t}\n\tcopied.Transport = baseRetryTransport.WithAddedPredicates(predicates...)\n\treturn &copied\n}\n\n\/\/ Returns a shallow copy of the retry transport with additional retry\n\/\/ predicates but same wrapped http.RoundTripper\nfunc (t *retryTransport) WithAddedPredicates(predicates ...RetryErrorPredicateFunc) *retryTransport {\n\tcopyT := *t\n\tcopyT.retryPredicates = append(t.retryPredicates, predicates...)\n\treturn ©T\n}\n\ntype retryTransport struct {\n\tretryPredicates []RetryErrorPredicateFunc\n\tinternal http.RoundTripper\n}\n\n\/\/ RoundTrip implements the RoundTripper interface method.\n\/\/ It retries the given HTTP request based on the retry predicates\n\/\/ registered under the retryTransport.\nfunc (t *retryTransport) RoundTrip(req *http.Request) (resp *http.Response, respErr error) {\n\t\/\/ Set timeout to default value.\n\tctx := req.Context()\n\tvar ccancel context.CancelFunc\n\tif _, ok := ctx.Deadline(); !ok {\n\t\tctx, ccancel = context.WithTimeout(ctx, defaultRetryTransportTimeoutSec*time.Second)\n\t\tdefer func() {\n\t\t\tif ctx.Err() == nil {\n\t\t\t\t\/\/ Cleanup child context created for retry loop if ctx not done.\n\t\t\t\tccancel()\n\t\t\t}\n\t\t}()\n\t}\n\n\tattempts := 0\n\tbackoff := time.Millisecond * 500\n\tnextBackoff := time.Millisecond * 500\n\n\tlog.Printf(\"[DEBUG] Retry Transport: starting RoundTrip retry loop\")\nRetry:\n\tfor {\n\t\tlog.Printf(\"[DEBUG] Retry Transport: request attempt %d\", attempts)\n\n\t\t\/\/ Copy the request - we dont want to use the original request as\n\t\t\/\/ RoundTrip contract says request body can\/will be consumed\n\t\tnewRequest, copyErr := copyHttpRequest(req)\n\t\tif copyErr != nil {\n\t\t\trespErr = errwrap.Wrapf(\"unable to copy invalid http.Request for retry: {{err}}\", copyErr)\n\t\t\tbreak Retry\n\t\t}\n\n\t\t\/\/ Do the wrapped Roundtrip. This is one request in the retry loop.\n\t\tresp, respErr = t.internal.RoundTrip(newRequest)\n\t\tattempts++\n\n\t\tretryErr := t.checkForRetryableError(resp, respErr)\n\t\tif retryErr == nil {\n\t\t\tlog.Printf(\"[DEBUG] Retry Transport: Stopping retries, last request was successful\")\n\t\t\tbreak Retry\n\t\t}\n\t\tif !retryErr.Retryable {\n\t\t\tlog.Printf(\"[DEBUG] Retry Transport: Stopping retries, last request failed with non-retryable error: %s\", retryErr.Err)\n\t\t\tbreak Retry\n\t\t}\n\n\t\tlog.Printf(\"[DEBUG] Retry Transport: Waiting %s before trying request again\", backoff)\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlog.Printf(\"[DEBUG] Retry Transport: Stopping retries, context done: %v\", ctx.Err())\n\t\t\tbreak Retry\n\t\tcase <-time.After(backoff):\n\t\t\tlog.Printf(\"[DEBUG] Retry Transport: Finished waiting %s before next retry\", backoff)\n\n\t\t\t\/\/ Fibonnaci backoff - 0.5, 1, 1.5, 2.5, 4, 6.5, 10.5, ...\n\t\t\tlastBackoff := backoff\n\t\t\tbackoff = backoff + nextBackoff\n\t\t\tnextBackoff = lastBackoff\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t\/\/ VCR depends on the original request body being consumed, so consume it here\n\t_, err := httputil.DumpRequestOut(req, true)\n\tif err != nil {\n\t\tlog.Printf(\"[DEBUG] Retry Transport: Reading request failed: %v\", err)\n\t}\n\n\tlog.Printf(\"[DEBUG] Retry Transport: Returning after %d attempts\", attempts)\n\treturn resp, respErr\n}\n\n\/\/ copyHttpRequest provides an copy of the given HTTP request for one RoundTrip.\n\/\/ If the request has a non-empty body (io.ReadCloser), the body is deep copied\n\/\/ so it can be consumed.\nfunc copyHttpRequest(req *http.Request) (*http.Request, error) {\n\tnewRequest := *req\n\n\tif req.Body == nil || req.Body == http.NoBody {\n\t\treturn &newRequest, nil\n\t}\n\n\t\/\/ Helpers like http.NewRequest add a GetBody for copying.\n\t\/\/ If not given, we should reject the request.\n\tif req.GetBody == nil {\n\t\treturn nil, errors.New(\"invalid HTTP request for transport, expected request.GetBody for non-empty Body\")\n\t}\n\n\tbd, err := req.GetBody()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnewRequest.Body = bd\n\treturn &newRequest, nil\n}\n\n\/\/ checkForRetryableError uses the googleapi.CheckResponse util to check for\n\/\/ errors in the response, and determines whether there is a retryable error.\n\/\/ in response\/response error.\nfunc (t *retryTransport) checkForRetryableError(resp *http.Response, respErr error) *resource.RetryError {\n\tvar errToCheck error\n\n\tif respErr != nil {\n\t\terrToCheck = respErr\n\t} else {\n\t\trespToCheck := *resp\n\t\t\/\/ The RoundTrip contract states that the HTTP response\/response error\n\t\t\/\/ returned cannot be edited. We need to consume the Body to check for\n\t\t\/\/ errors, so we need to create a copy if the Response has a body.\n\t\tif resp.Body != nil && resp.Body != http.NoBody {\n\t\t\t\/\/ Use httputil.DumpResponse since the only important info is\n\t\t\t\/\/ error code and messages in the response body.\n\t\t\tdumpBytes, err := httputil.DumpResponse(resp, true)\n\t\t\tif err != nil {\n\t\t\t\treturn resource.NonRetryableError(fmt.Errorf(\"unable to check response for error: %v\", err))\n\t\t\t}\n\t\t\trespToCheck.Body = ioutil.NopCloser(bytes.NewReader(dumpBytes))\n\t\t}\n\t\terrToCheck = googleapi.CheckResponse(&respToCheck)\n\t}\n\n\tif isRetryableError(errToCheck, t.retryPredicates...) {\n\t\treturn resource.RetryableError(errToCheck)\n\t}\n\treturn resource.NonRetryableError(errToCheck)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build coprocess\n\/\/ +build lua\n\npackage main\n\n\/*\n#cgo pkg-config: luajit\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\n#include \"coprocess\/sds\/sds.h\"\n\n#include \"coprocess\/api.h\"\n\n#include \"coprocess\/lua\/binding.h\"\n\n#include <lua.h>\n#include <lualib.h>\n#include <lauxlib.h>\n\nstatic void LuaInit() {\n \/\/ TODO: Cache the middlewares.\n}\n\nstatic struct CoProcessMessage* LuaDispatchHook(struct CoProcessMessage* object) {\n\n struct CoProcessMessage* outputObject = malloc(sizeof *outputObject);\n\n lua_State *L = luaL_newstate();\n\n luaL_openlibs(L);\n luaL_dofile(L, \"coprocess\/lua\/tyk\/core.lua\");\n\n lua_getglobal(L, \"dispatch\");\n lua_pushlstring(L, object->p_data, object->length);\n lua_pcall(L, 1, 1, 0);\n\n size_t lua_output_length = lua_tointeger(L, 0);\n const char* lua_output_data = lua_tolstring(L, 1, &lua_output_length);\n\n char* output = malloc(lua_output_length);\n memmove(output, lua_output_data, lua_output_length);\n\n lua_close(L);\n\n outputObject->p_data = (void*)output;\n outputObject->length = lua_output_length;\n\n return outputObject;\n}\n*\/\nimport \"C\"\n\nimport(\n \"unsafe\"\n\n \"github.com\/TykTechnologies\/tyk\/coprocess\"\n \"github.com\/Sirupsen\/logrus\"\n)\n\n\/\/ CoProcessName declares the driver name.\nconst CoProcessName string = \"lua\"\n\n\/\/ LuaDispatcher implements a coprocess.Dispatcher\ntype LuaDispatcher struct {\n\tcoprocess.Dispatcher\n}\n\n\/\/ Dispatch takes a CoProcessMessage and sends it to the CP.\nfunc (d *LuaDispatcher) Dispatch(objectPtr unsafe.Pointer) unsafe.Pointer {\n\tvar object *C.struct_CoProcessMessage\n\tobject = (*C.struct_CoProcessMessage)(objectPtr)\n\n\tvar newObjectPtr *C.struct_CoProcessMessage\n\tnewObjectPtr = C.LuaDispatchHook(object)\n\n\treturn unsafe.Pointer(newObjectPtr)\n}\n\nfunc LuaInit() {\n C.LuaInit()\n}\n\n\/\/ NewCoProcessDispatcher wraps all the actions needed for this CP.\nfunc NewCoProcessDispatcher() (dispatcher coprocess.Dispatcher, err error) {\n\n LuaInit()\n\n dispatcher, err = &LuaDispatcher{}, nil\n\n\tif err != nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"prefix\": \"coprocess\",\n\t\t}).Error(err)\n\t}\n\n\treturn dispatcher, err\n}\n<commit_msg>Use JSON for Lua CP.<commit_after>\/\/ +build coprocess\n\/\/ +build lua\n\npackage main\n\n\/*\n#cgo pkg-config: luajit\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\n#include \"coprocess\/sds\/sds.h\"\n\n#include \"coprocess\/api.h\"\n\n#include \"coprocess\/lua\/binding.h\"\n\n#include <lua.h>\n#include <lualib.h>\n#include <lauxlib.h>\n\nstatic void LuaInit() {\n \/\/ TODO: Cache the middlewares.\n}\n\nstatic struct CoProcessMessage* LuaDispatchHook(struct CoProcessMessage* object) {\n\n struct CoProcessMessage* outputObject = malloc(sizeof *outputObject);\n\n lua_State *L = luaL_newstate();\n\n luaL_openlibs(L);\n luaL_dofile(L, \"coprocess\/lua\/tyk\/core.lua\");\n\n lua_getglobal(L, \"dispatch\");\n lua_pushlstring(L, object->p_data, object->length);\n lua_pcall(L, 1, 1, 0);\n\n size_t lua_output_length = lua_tointeger(L, 0);\n const char* lua_output_data = lua_tolstring(L, 1, &lua_output_length);\n\n char* output = malloc(lua_output_length);\n memmove(output, lua_output_data, lua_output_length);\n\n lua_close(L);\n\n outputObject->p_data = (void*)output;\n outputObject->length = lua_output_length;\n\n return outputObject;\n}\n*\/\nimport \"C\"\n\nimport(\n \"unsafe\"\n\n \"github.com\/TykTechnologies\/tyk\/coprocess\"\n \"github.com\/Sirupsen\/logrus\"\n)\n\n\/\/ CoProcessName declares the driver name.\nconst CoProcessName string = \"lua\"\n\n\/\/ MessageType sets the default message type.\nvar MessageType = coprocess.JsonMessage\n\n\/\/ LuaDispatcher implements a coprocess.Dispatcher\ntype LuaDispatcher struct {\n\tcoprocess.Dispatcher\n}\n\n\/\/ Dispatch takes a CoProcessMessage and sends it to the CP.\nfunc (d *LuaDispatcher) Dispatch(objectPtr unsafe.Pointer) unsafe.Pointer {\n\tvar object *C.struct_CoProcessMessage\n\tobject = (*C.struct_CoProcessMessage)(objectPtr)\n\n\tvar newObjectPtr *C.struct_CoProcessMessage\n\tnewObjectPtr = C.LuaDispatchHook(object)\n\n\treturn unsafe.Pointer(newObjectPtr)\n}\n\nfunc LuaInit() {\n C.LuaInit()\n}\n\n\/\/ NewCoProcessDispatcher wraps all the actions needed for this CP.\nfunc NewCoProcessDispatcher() (dispatcher coprocess.Dispatcher, err error) {\n\n LuaInit()\n\n dispatcher, err = &LuaDispatcher{}, nil\n\n\tif err != nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"prefix\": \"coprocess\",\n\t\t}).Error(err)\n\t}\n\n\treturn dispatcher, err\n}\n<|endoftext|>"} {"text":"<commit_before>package slack\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/go-chat-bot\/bot\"\n\t\"github.com\/nlopes\/slack\"\n)\n\nvar (\n\trtm *slack.RTM\n\tapi *slack.Client\n)\n\nfunc responseHandler(target string, message string, sender *bot.User) {\n\trtm.SendMessage(rtm.NewOutgoingMessage(message, target))\n}\n\n\/\/ Extracts user information from slack API\nfunc extractUser(userID string) *bot.User {\n\tslackUser, err := api.GetUserInfo(userID)\n\tif err != nil {\n\t\tfmt.Printf(\"Error retrieving slack user: %s\\n\", err)\n\t\treturn &bot.User{Nick: userID}\n\t}\n\treturn &bot.User{Nick: userID, RealName: slackUser.Profile.RealName}\n}\n\n\/\/ Run connects to slack RTM API using the provided token\nfunc Run(token string) {\n\tapi = slack.New(token)\n\trtm = api.NewRTM()\n\n\tb := bot.New(&bot.Handlers{\n\t\tResponse: responseHandler,\n\t})\n\n\tgo rtm.ManageConnection()\n\nLoop:\n\tfor {\n\t\tselect {\n\t\tcase msg := <-rtm.IncomingEvents:\n\t\t\tswitch ev := msg.Data.(type) {\n\t\t\tcase *slack.MessageEvent:\n\t\t\t\tb.MessageReceived(ev.Channel, ev.Text, extractUser(ev.User))\n\n\t\t\tcase *slack.RTMError:\n\t\t\t\tfmt.Printf(\"Error: %s\\n\", ev.Error())\n\n\t\t\tcase *slack.InvalidAuthEvent:\n\t\t\t\tfmt.Printf(\"Invalid credentials\")\n\t\t\t\tbreak Loop\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>* Updated slack's extractUser to send return the nickname instead of the id<commit_after>package slack\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/go-chat-bot\/bot\"\n\t\"github.com\/nlopes\/slack\"\n)\n\nvar (\n\trtm *slack.RTM\n\tapi *slack.Client\n)\n\nfunc responseHandler(target string, message string, sender *bot.User) {\n\trtm.SendMessage(rtm.NewOutgoingMessage(message, target))\n}\n\n\/\/ Extracts user information from slack API\nfunc extractUser(userID string) *bot.User {\n\tslackUser, err := api.GetUserInfo(userID)\n\tif err != nil {\n\t\tfmt.Printf(\"Error retrieving slack user: %s\\n\", err)\n\t\treturn &bot.User{Nick: userID}\n\t}\n\treturn &bot.User{Nick: slackUser.Name, RealName: slackUser.Profile.RealName}\n}\n\n\/\/ Run connects to slack RTM API using the provided token\nfunc Run(token string) {\n\tapi = slack.New(token)\n\trtm = api.NewRTM()\n\n\tb := bot.New(&bot.Handlers{\n\t\tResponse: responseHandler,\n\t})\n\n\tgo rtm.ManageConnection()\n\nLoop:\n\tfor {\n\t\tselect {\n\t\tcase msg := <-rtm.IncomingEvents:\n\t\t\tswitch ev := msg.Data.(type) {\n\t\t\tcase *slack.MessageEvent:\n\t\t\t\tb.MessageReceived(ev.Channel, ev.Text, extractUser(ev.User))\n\n\t\t\tcase *slack.RTMError:\n\t\t\t\tfmt.Printf(\"Error: %s\\n\", ev.Error())\n\n\t\t\tcase *slack.InvalidAuthEvent:\n\t\t\t\tfmt.Printf(\"Invalid credentials\")\n\t\t\t\tbreak Loop\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/dragonfi\/go-retro\/snake\/arena\"\n\t\"github.com\/nsf\/termbox-go\"\n\t\"math\/rand\"\n\t\"time\"\n)\n\nfunc putString(x, y int, s string) {\n\tfor i, r := range s {\n\t\ttermbox.SetCell(x+i, y, r, 0, 0)\n\t}\n}\n\ntype Position struct {\n\tX, Y int\n}\n\ntype ArenaWidget struct {\n\tarena arena.Arena\n\toffset Position\n\tsize Position\n\tstate arena.State\n}\n\nfunc (w *ArenaWidget) Tick() {\n\tw.arena.Tick()\n\tw.state = w.arena.State()\n}\n\nfunc (w *ArenaWidget) SetSnakeHeading(direction arena.Direction) {\n\tw.arena.SetSnakeHeading(0, direction)\n}\n\nfunc (w ArenaWidget) setCell(x, y int, r rune, fg, bg termbox.Attribute) {\n\ttermbox.SetCell(w.offset.X+x, w.offset.Y+y, r, fg, bg)\n}\n\nfunc (w ArenaWidget) putString(x, y int, str string) {\n\tputString(w.offset.X+x, w.offset.Y+y, str)\n}\n\nfunc (w ArenaWidget) drawBorder() {\n\ts := w.state\n\tfor i := -1; i <= s.Size.X; i++ {\n\t\tfor j := -1; j <= s.Size.Y; j++ {\n\t\t\tif i == -1 || i == s.Size.X || j == -1 || j == s.Size.Y {\n\t\t\t\tw.setCell(i, j, '#', 0, 0)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (w ArenaWidget) drawSnakes() {\n\tfor _, snake := range w.state.Snakes {\n\t\tw.drawSnake(snake)\n\t}\n}\nfunc (w ArenaWidget) drawSnake(snake arena.Snake) {\n\tfor _, p := range snake.Segments {\n\t\tw.setCell(p.X, p.Y, '#', 0, 0)\n\t}\n}\n\nfunc (w ArenaWidget) drawPointItem() {\n\tp := w.state.PointItem\n\tw.setCell(p.X, p.Y, '*', 0, 0)\n}\n\nfunc (w ArenaWidget) putGameOverText() {\n\ts := w.state\n\tw.putString(s.Size.X\/2-9, s.Size.Y\/2-3, \"##################\")\n\tw.putString(s.Size.X\/2-9, s.Size.Y\/2-2, \"# Game Over #\")\n\tw.putString(s.Size.X\/2-9, s.Size.Y\/2-1, \"# #\")\n\tw.putString(s.Size.X\/2-9, s.Size.Y\/2+0, \"# Enter: Restart #\")\n\tw.putString(s.Size.X\/2-9, s.Size.Y\/2+1, \"# ESC: Exit #\")\n\tw.putString(s.Size.X\/2-9, s.Size.Y\/2+2, \"##################\")\n}\n\nfunc (w ArenaWidget) putScore() {\n\ts := w.state\n\tfor i, snake := range s.Snakes {\n\t\tw.putString(1, 1+i, fmt.Sprintf(\"Score: %d\", len(snake.Segments)))\n\t}\n}\n\nfunc (w ArenaWidget) Draw() {\n\tw.drawBorder()\n\tw.putScore()\n\tw.drawSnakes()\n\tw.drawPointItem()\n\tif w.state.GameIsOver {\n\t\tw.putGameOverText()\n\t}\n}\n\nfunc (w *ArenaWidget) ResetArena() {\n\tw.arena = arena.New(w.size.X, w.size.Y)\n\tw.arena.AddSnake(w.size.X\/2, w.size.Y\/2, 5, arena.EAST)\n}\n\nfunc NewArenaWidget(ox, oy, x, y int) ArenaWidget {\n\tw := ArenaWidget{offset: Position{ox, oy}, size: Position{x, y}}\n\tw.ResetArena()\n\treturn w\n}\n\nfunc eventChannel() <-chan termbox.Event {\n\tevents := make(chan termbox.Event)\n\tgo func() {\n\t\tfor {\n\t\t\tevents <- termbox.PollEvent()\n\t\t}\n\t}()\n\treturn events\n}\n\nfunc main() {\n\trand.Seed(time.Now().UnixNano())\n\ttermbox.Init()\n\tdefer termbox.Close()\n\tx, y := termbox.Size()\n\toffsetx, offsety := 2, 2\n\taw := NewArenaWidget(offsetx, offsety, x-2*offsetx, y-2*offsety)\n\ttick := time.Tick(100 * time.Millisecond)\n\tevent := eventChannel()\n\trunning := true\n\n\thandleKey := map[termbox.Key]func(){\n\t\ttermbox.KeyEsc: func() { running = false },\n\t\ttermbox.KeyEnter: func() { aw.ResetArena() },\n\t\ttermbox.KeyArrowRight: func() { aw.SetSnakeHeading(arena.EAST) },\n\t\ttermbox.KeyArrowUp: func() { aw.SetSnakeHeading(arena.NORTH) },\n\t\ttermbox.KeyArrowLeft: func() { aw.SetSnakeHeading(arena.WEST) },\n\t\ttermbox.KeyArrowDown: func() { aw.SetSnakeHeading(arena.SOUTH) },\n\t}\n\n\tfor running {\n\t\ttermbox.Clear(0, 0)\n\t\taw.Draw()\n\t\ttermbox.Flush()\n\t\tselect {\n\t\tcase ev := <-event:\n\t\t\tif ev.Type == termbox.EventKey {\n\t\t\t\tf := handleKey[ev.Key]\n\t\t\t\tif f != nil {\n\t\t\t\t\tf()\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-tick:\n\t\t\taw.Tick()\n\t\t}\n\t}\n}\n<commit_msg>Make snake.go into a two player game.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/dragonfi\/go-retro\/snake\/arena\"\n\t\"github.com\/nsf\/termbox-go\"\n\t\"math\/rand\"\n\t\"time\"\n)\n\nfunc putString(x, y int, s string) {\n\tfor i, r := range s {\n\t\ttermbox.SetCell(x+i, y, r, 0, 0)\n\t}\n}\n\ntype Position struct {\n\tX, Y int\n}\n\ntype ArenaWidget struct {\n\tarena arena.Arena\n\toffset Position\n\tsize Position\n\tstate arena.State\n}\n\nfunc (w *ArenaWidget) Tick() {\n\tw.arena.Tick()\n\tw.state = w.arena.State()\n}\n\nfunc (w *ArenaWidget) SetSnakeHeading(snake int, direction arena.Direction) {\n\tw.arena.SetSnakeHeading(snake, direction)\n}\n\nfunc (w ArenaWidget) setCell(x, y int, r rune, fg, bg termbox.Attribute) {\n\ttermbox.SetCell(w.offset.X+x, w.offset.Y+y, r, fg, bg)\n}\n\nfunc (w ArenaWidget) putString(x, y int, str string) {\n\tputString(w.offset.X+x, w.offset.Y+y, str)\n}\n\nfunc (w ArenaWidget) drawBorder() {\n\ts := w.state\n\tfor i := -1; i <= s.Size.X; i++ {\n\t\tfor j := -1; j <= s.Size.Y; j++ {\n\t\t\tif i == -1 || i == s.Size.X || j == -1 || j == s.Size.Y {\n\t\t\t\tw.setCell(i, j, '#', 0, 0)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (w ArenaWidget) drawSnakes() {\n\tfor _, snake := range w.state.Snakes {\n\t\tw.drawSnake(snake)\n\t}\n}\nfunc (w ArenaWidget) drawSnake(snake arena.Snake) {\n\tfor _, p := range snake.Segments {\n\t\tw.setCell(p.X, p.Y, '#', 0, 0)\n\t}\n}\n\nfunc (w ArenaWidget) drawPointItem() {\n\tp := w.state.PointItem\n\tw.setCell(p.X, p.Y, '*', 0, 0)\n}\n\nfunc (w ArenaWidget) putGameOverText() {\n\ts := w.state\n\tw.putString(s.Size.X\/2-9, s.Size.Y\/2-3, \"##################\")\n\tw.putString(s.Size.X\/2-9, s.Size.Y\/2-2, \"# Game Over #\")\n\tw.putString(s.Size.X\/2-9, s.Size.Y\/2-1, \"# #\")\n\tw.putString(s.Size.X\/2-9, s.Size.Y\/2+0, \"# Enter: Restart #\")\n\tw.putString(s.Size.X\/2-9, s.Size.Y\/2+1, \"# ESC: Exit #\")\n\tw.putString(s.Size.X\/2-9, s.Size.Y\/2+2, \"##################\")\n}\n\nfunc (w ArenaWidget) putScore() {\n\ts := w.state\n\tfor i, snake := range s.Snakes {\n\t\tw.putString(1, 1+i, fmt.Sprintf(\"Score: %d\", len(snake.Segments)))\n\t}\n}\n\nfunc (w ArenaWidget) Draw() {\n\tw.drawBorder()\n\tw.putScore()\n\tw.drawSnakes()\n\tw.drawPointItem()\n\tif w.state.GameIsOver {\n\t\tw.putGameOverText()\n\t}\n}\n\nfunc (w *ArenaWidget) ResetArena() {\n\tw.arena = arena.New(w.size.X, w.size.Y)\n\tw.arena.AddSnake(w.size.X\/2, w.size.Y\/2, 5, arena.EAST)\n\tw.arena.AddSnake(w.size.X\/3, w.size.Y\/3, 5, arena.EAST)\n}\n\nfunc NewArenaWidget(ox, oy, x, y int) ArenaWidget {\n\tw := ArenaWidget{offset: Position{ox, oy}, size: Position{x, y}}\n\tw.ResetArena()\n\treturn w\n}\n\nfunc eventChannel() <-chan termbox.Event {\n\tevents := make(chan termbox.Event)\n\tgo func() {\n\t\tfor {\n\t\t\tevents <- termbox.PollEvent()\n\t\t}\n\t}()\n\treturn events\n}\n\nfunc main() {\n\trand.Seed(time.Now().UnixNano())\n\ttermbox.Init()\n\tdefer termbox.Close()\n\tx, y := termbox.Size()\n\toffsetx, offsety := 2, 2\n\taw := NewArenaWidget(offsetx, offsety, x-2*offsetx, y-2*offsety)\n\ttick := time.Tick(100 * time.Millisecond)\n\tevent := eventChannel()\n\trunning := true\n\n\thandleKey := map[termbox.Key]func(){\n\t\ttermbox.KeyEsc: func() { running = false },\n\t\ttermbox.KeyEnter: func() { aw.ResetArena() },\n\t\ttermbox.KeyArrowRight: func() { aw.SetSnakeHeading(0, arena.EAST) },\n\t\ttermbox.KeyArrowUp: func() { aw.SetSnakeHeading(0, arena.NORTH) },\n\t\ttermbox.KeyArrowLeft: func() { aw.SetSnakeHeading(0, arena.WEST) },\n\t\ttermbox.KeyArrowDown: func() { aw.SetSnakeHeading(0, arena.SOUTH) },\n\t}\n\n\thandleCh := map[rune]func(){\n\t\t'D': func() { aw.SetSnakeHeading(1, arena.EAST) },\n\t\t'W': func() { aw.SetSnakeHeading(1, arena.NORTH) },\n\t\t'A': func() { aw.SetSnakeHeading(1, arena.WEST) },\n\t\t'S': func() { aw.SetSnakeHeading(1, arena.SOUTH) },\n\t\t'd': func() { aw.SetSnakeHeading(1, arena.EAST) },\n\t\t'w': func() { aw.SetSnakeHeading(1, arena.NORTH) },\n\t\t'a': func() { aw.SetSnakeHeading(1, arena.WEST) },\n\t\t's': func() { aw.SetSnakeHeading(1, arena.SOUTH) },\n\t}\n\n\tfor running {\n\t\ttermbox.Clear(0, 0)\n\t\taw.Draw()\n\t\ttermbox.Flush()\n\t\tselect {\n\t\tcase ev := <-event:\n\t\t\tif ev.Type == termbox.EventKey {\n\t\t\t\tf := handleCh[ev.Ch]\n\t\t\t\tif ev.Ch == 0 {\n\t\t\t\t\tf = handleKey[ev.Key]\n\t\t\t\t}\n\t\t\t\tif f != nil {\n\t\t\t\t\tf()\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-tick:\n\t\t\taw.Tick()\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package filesys\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"path\"\n\t\"sync\"\n\t\"time\"\n\n\t\"google.golang.org\/grpc\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\/grace\"\n\n\t\"github.com\/seaweedfs\/fuse\"\n\t\"github.com\/seaweedfs\/fuse\/fs\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/filesys\/meta_cache\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/filer_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\/chunk_cache\"\n)\n\ntype Option struct {\n\tFilerGrpcAddress string\n\tGrpcDialOption grpc.DialOption\n\tFilerMountRootPath string\n\tCollection string\n\tReplication string\n\tTtlSec int32\n\tChunkSizeLimit int64\n\tCacheDir string\n\tCacheSizeMB int64\n\tDataCenter string\n\tEntryCacheTtl time.Duration\n\tUmask os.FileMode\n\n\tMountCtime time.Time\n\tMountMtime time.Time\n\n\tOutsideContainerClusterMode bool \/\/ whether the mount runs outside SeaweedFS containers\n\tCipher bool \/\/ whether encrypt data on volume server\n\tUidGidMapper *meta_cache.UidGidMapper\n}\n\nvar _ = fs.FS(&WFS{})\nvar _ = fs.FSStatfser(&WFS{})\n\ntype WFS struct {\n\toption *Option\n\n\t\/\/ contains all open handles, protected by handlesLock\n\thandlesLock sync.Mutex\n\thandles map[uint64]*FileHandle\n\n\tbufPool sync.Pool\n\n\tstats statsCache\n\n\troot fs.Node\n\tfsNodeCache *FsCache\n\n\tchunkCache *chunk_cache.TieredChunkCache\n\tmetaCache *meta_cache.MetaCache\n\tsignature int32\n}\ntype statsCache struct {\n\tfiler_pb.StatisticsResponse\n\tlastChecked int64 \/\/ unix time in seconds\n}\n\nfunc NewSeaweedFileSystem(option *Option) *WFS {\n\twfs := &WFS{\n\t\toption: option,\n\t\thandles: make(map[uint64]*FileHandle),\n\t\tbufPool: sync.Pool{\n\t\t\tNew: func() interface{} {\n\t\t\t\treturn make([]byte, option.ChunkSizeLimit)\n\t\t\t},\n\t\t},\n\t\tsignature: util.RandomInt32(),\n\t}\n\tcacheUniqueId := util.Md5String([]byte(option.FilerGrpcAddress + option.FilerMountRootPath + util.Version()))[0:4]\n\tcacheDir := path.Join(option.CacheDir, cacheUniqueId)\n\tif option.CacheSizeMB > 0 {\n\t\tos.MkdirAll(cacheDir, os.FileMode(0777)&^option.Umask)\n\t\twfs.chunkCache = chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB)\n\t}\n\n\twfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, \"meta\"), option.UidGidMapper)\n\tstartTime := time.Now()\n\tgo meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano())\n\tgrace.OnInterrupt(func() {\n\t\twfs.metaCache.Shutdown()\n\t})\n\n\tentry, _ := filer_pb.GetEntry(wfs, util.FullPath(wfs.option.FilerMountRootPath))\n\twfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs, entry: entry}\n\twfs.fsNodeCache = newFsCache(wfs.root)\n\n\treturn wfs\n}\n\nfunc (wfs *WFS) Root() (fs.Node, error) {\n\treturn wfs.root, nil\n}\n\nfunc (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) {\n\n\tfullpath := file.fullpath()\n\tglog.V(4).Infof(\"AcquireHandle %s uid=%d gid=%d\", fullpath, uid, gid)\n\n\twfs.handlesLock.Lock()\n\tdefer wfs.handlesLock.Unlock()\n\n\tinodeId := file.fullpath().AsInode()\n\texistingHandle, found := wfs.handles[inodeId]\n\tif found && existingHandle != nil {\n\t\tfile.isOpen++\n\t\treturn existingHandle\n\t}\n\n\tfileHandle = newFileHandle(file, uid, gid)\n\tfile.maybeLoadEntry(context.Background())\n\tfile.isOpen++\n\n\twfs.handles[inodeId] = fileHandle\n\tfileHandle.handle = inodeId\n\n\treturn\n}\n\nfunc (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {\n\twfs.handlesLock.Lock()\n\tdefer wfs.handlesLock.Unlock()\n\n\tglog.V(4).Infof(\"%s ReleaseHandle id %d current handles length %d\", fullpath, handleId, len(wfs.handles))\n\n\tdelete(wfs.handles, fullpath.AsInode())\n\n\treturn\n}\n\n\/\/ Statfs is called to obtain file system metadata. Implements fuse.FSStatfser\nfunc (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error {\n\n\tglog.V(4).Infof(\"reading fs stats: %+v\", req)\n\n\tif wfs.stats.lastChecked < time.Now().Unix()-20 {\n\n\t\terr := wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {\n\n\t\t\trequest := &filer_pb.StatisticsRequest{\n\t\t\t\tCollection: wfs.option.Collection,\n\t\t\t\tReplication: wfs.option.Replication,\n\t\t\t\tTtl: fmt.Sprintf(\"%ds\", wfs.option.TtlSec),\n\t\t\t}\n\n\t\t\tglog.V(4).Infof(\"reading filer stats: %+v\", request)\n\t\t\tresp, err := client.Statistics(context.Background(), request)\n\t\t\tif err != nil {\n\t\t\t\tglog.V(0).Infof(\"reading filer stats %v: %v\", request, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tglog.V(4).Infof(\"read filer stats: %+v\", resp)\n\n\t\t\twfs.stats.TotalSize = resp.TotalSize\n\t\t\twfs.stats.UsedSize = resp.UsedSize\n\t\t\twfs.stats.FileCount = resp.FileCount\n\t\t\twfs.stats.lastChecked = time.Now().Unix()\n\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\tglog.V(0).Infof(\"filer Statistics: %v\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttotalDiskSize := wfs.stats.TotalSize\n\tusedDiskSize := wfs.stats.UsedSize\n\tactualFileCount := wfs.stats.FileCount\n\n\t\/\/ Compute the total number of available blocks\n\tresp.Blocks = totalDiskSize \/ blockSize\n\n\t\/\/ Compute the number of used blocks\n\tnumBlocks := uint64(usedDiskSize \/ blockSize)\n\n\t\/\/ Report the number of free and available blocks for the block size\n\tresp.Bfree = resp.Blocks - numBlocks\n\tresp.Bavail = resp.Blocks - numBlocks\n\tresp.Bsize = uint32(blockSize)\n\n\t\/\/ Report the total number of possible files in the file system (and those free)\n\tresp.Files = math.MaxInt64\n\tresp.Ffree = math.MaxInt64 - actualFileCount\n\n\t\/\/ Report the maximum length of a name and the minimum fragment size\n\tresp.Namelen = 1024\n\tresp.Frsize = uint32(blockSize)\n\n\treturn nil\n}\n\nfunc (wfs *WFS) mapPbIdFromFilerToLocal(entry *filer_pb.Entry) {\n\tif entry.Attributes == nil {\n\t\treturn\n\t}\n\tentry.Attributes.Uid, entry.Attributes.Gid = wfs.option.UidGidMapper.FilerToLocal(entry.Attributes.Uid, entry.Attributes.Gid)\n}\nfunc (wfs *WFS) mapPbIdFromLocalToFiler(entry *filer_pb.Entry) {\n\tif entry.Attributes == nil {\n\t\treturn\n\t}\n\tentry.Attributes.Uid, entry.Attributes.Gid = wfs.option.UidGidMapper.LocalToFiler(entry.Attributes.Uid, entry.Attributes.Gid)\n}\n<commit_msg>code style<commit_after>package filesys\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"path\"\n\t\"sync\"\n\t\"time\"\n\n\t\"google.golang.org\/grpc\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\/grace\"\n\n\t\"github.com\/seaweedfs\/fuse\"\n\t\"github.com\/seaweedfs\/fuse\/fs\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/filesys\/meta_cache\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/filer_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\/chunk_cache\"\n)\n\ntype Option struct {\n\tFilerGrpcAddress string\n\tGrpcDialOption grpc.DialOption\n\tFilerMountRootPath string\n\tCollection string\n\tReplication string\n\tTtlSec int32\n\tChunkSizeLimit int64\n\tCacheDir string\n\tCacheSizeMB int64\n\tDataCenter string\n\tEntryCacheTtl time.Duration\n\tUmask os.FileMode\n\n\tMountCtime time.Time\n\tMountMtime time.Time\n\n\tOutsideContainerClusterMode bool \/\/ whether the mount runs outside SeaweedFS containers\n\tCipher bool \/\/ whether encrypt data on volume server\n\tUidGidMapper *meta_cache.UidGidMapper\n}\n\nvar _ = fs.FS(&WFS{})\nvar _ = fs.FSStatfser(&WFS{})\n\ntype WFS struct {\n\toption *Option\n\n\t\/\/ contains all open handles, protected by handlesLock\n\thandlesLock sync.Mutex\n\thandles map[uint64]*FileHandle\n\n\tbufPool sync.Pool\n\n\tstats statsCache\n\n\troot fs.Node\n\tfsNodeCache *FsCache\n\n\tchunkCache *chunk_cache.TieredChunkCache\n\tmetaCache *meta_cache.MetaCache\n\tsignature int32\n}\ntype statsCache struct {\n\tfiler_pb.StatisticsResponse\n\tlastChecked int64 \/\/ unix time in seconds\n}\n\nfunc NewSeaweedFileSystem(option *Option) *WFS {\n\twfs := &WFS{\n\t\toption: option,\n\t\thandles: make(map[uint64]*FileHandle),\n\t\tbufPool: sync.Pool{\n\t\t\tNew: func() interface{} {\n\t\t\t\treturn make([]byte, option.ChunkSizeLimit)\n\t\t\t},\n\t\t},\n\t\tsignature: util.RandomInt32(),\n\t}\n\tcacheUniqueId := util.Md5String([]byte(option.FilerGrpcAddress + option.FilerMountRootPath + util.Version()))[0:4]\n\tcacheDir := path.Join(option.CacheDir, cacheUniqueId)\n\tif option.CacheSizeMB > 0 {\n\t\tos.MkdirAll(cacheDir, os.FileMode(0777)&^option.Umask)\n\t\twfs.chunkCache = chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB)\n\t}\n\n\twfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, \"meta\"), option.UidGidMapper)\n\tstartTime := time.Now()\n\tgo meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano())\n\tgrace.OnInterrupt(func() {\n\t\twfs.metaCache.Shutdown()\n\t})\n\n\tentry, _ := filer_pb.GetEntry(wfs, util.FullPath(wfs.option.FilerMountRootPath))\n\n\twfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs, entry: entry}\n\twfs.fsNodeCache = newFsCache(wfs.root)\n\n\treturn wfs\n}\n\nfunc (wfs *WFS) Root() (fs.Node, error) {\n\treturn wfs.root, nil\n}\n\nfunc (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) {\n\n\tfullpath := file.fullpath()\n\tglog.V(4).Infof(\"AcquireHandle %s uid=%d gid=%d\", fullpath, uid, gid)\n\n\twfs.handlesLock.Lock()\n\tdefer wfs.handlesLock.Unlock()\n\n\tinodeId := file.fullpath().AsInode()\n\texistingHandle, found := wfs.handles[inodeId]\n\tif found && existingHandle != nil {\n\t\tfile.isOpen++\n\t\treturn existingHandle\n\t}\n\n\tfileHandle = newFileHandle(file, uid, gid)\n\tfile.maybeLoadEntry(context.Background())\n\tfile.isOpen++\n\n\twfs.handles[inodeId] = fileHandle\n\tfileHandle.handle = inodeId\n\n\treturn\n}\n\nfunc (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {\n\twfs.handlesLock.Lock()\n\tdefer wfs.handlesLock.Unlock()\n\n\tglog.V(4).Infof(\"%s ReleaseHandle id %d current handles length %d\", fullpath, handleId, len(wfs.handles))\n\n\tdelete(wfs.handles, fullpath.AsInode())\n\n\treturn\n}\n\n\/\/ Statfs is called to obtain file system metadata. Implements fuse.FSStatfser\nfunc (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error {\n\n\tglog.V(4).Infof(\"reading fs stats: %+v\", req)\n\n\tif wfs.stats.lastChecked < time.Now().Unix()-20 {\n\n\t\terr := wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {\n\n\t\t\trequest := &filer_pb.StatisticsRequest{\n\t\t\t\tCollection: wfs.option.Collection,\n\t\t\t\tReplication: wfs.option.Replication,\n\t\t\t\tTtl: fmt.Sprintf(\"%ds\", wfs.option.TtlSec),\n\t\t\t}\n\n\t\t\tglog.V(4).Infof(\"reading filer stats: %+v\", request)\n\t\t\tresp, err := client.Statistics(context.Background(), request)\n\t\t\tif err != nil {\n\t\t\t\tglog.V(0).Infof(\"reading filer stats %v: %v\", request, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tglog.V(4).Infof(\"read filer stats: %+v\", resp)\n\n\t\t\twfs.stats.TotalSize = resp.TotalSize\n\t\t\twfs.stats.UsedSize = resp.UsedSize\n\t\t\twfs.stats.FileCount = resp.FileCount\n\t\t\twfs.stats.lastChecked = time.Now().Unix()\n\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\tglog.V(0).Infof(\"filer Statistics: %v\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttotalDiskSize := wfs.stats.TotalSize\n\tusedDiskSize := wfs.stats.UsedSize\n\tactualFileCount := wfs.stats.FileCount\n\n\t\/\/ Compute the total number of available blocks\n\tresp.Blocks = totalDiskSize \/ blockSize\n\n\t\/\/ Compute the number of used blocks\n\tnumBlocks := uint64(usedDiskSize \/ blockSize)\n\n\t\/\/ Report the number of free and available blocks for the block size\n\tresp.Bfree = resp.Blocks - numBlocks\n\tresp.Bavail = resp.Blocks - numBlocks\n\tresp.Bsize = uint32(blockSize)\n\n\t\/\/ Report the total number of possible files in the file system (and those free)\n\tresp.Files = math.MaxInt64\n\tresp.Ffree = math.MaxInt64 - actualFileCount\n\n\t\/\/ Report the maximum length of a name and the minimum fragment size\n\tresp.Namelen = 1024\n\tresp.Frsize = uint32(blockSize)\n\n\treturn nil\n}\n\nfunc (wfs *WFS) mapPbIdFromFilerToLocal(entry *filer_pb.Entry) {\n\tif entry.Attributes == nil {\n\t\treturn\n\t}\n\tentry.Attributes.Uid, entry.Attributes.Gid = wfs.option.UidGidMapper.FilerToLocal(entry.Attributes.Uid, entry.Attributes.Gid)\n}\nfunc (wfs *WFS) mapPbIdFromLocalToFiler(entry *filer_pb.Entry) {\n\tif entry.Attributes == nil {\n\t\treturn\n\t}\n\tentry.Attributes.Uid, entry.Attributes.Gid = wfs.option.UidGidMapper.LocalToFiler(entry.Attributes.Uid, entry.Attributes.Gid)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n)\n\nfunc main() {\n\tif err := run(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"error:\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc run() error {\n\topts, err := parseOptions()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar writer io.Writer = os.Stdout\n\tif opts.buffered {\n\t\twriter = bufio.NewWriter(writer)\n\t}\n\tduration, err := benchmark(writer, opts.benchSize)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(os.Stderr, \"Completed in\", duration)\n\treturn nil\n}\n\ntype options struct {\n\tbuffered bool\n\tbenchSize int\n}\n\nfunc parseOptions() (*options, error) {\n\topts := options{\n\t\tbuffered: false,\n\t\tbenchSize: 10000000,\n\t}\n\tif len(os.Args) > 3 {\n\t\treturn nil, fmt.Errorf(\"unrecognized options: %s\", os.Args[3:])\n\t}\n\tif len(os.Args) > 2 {\n\t\tn, err := strconv.Atoi(os.Args[2])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\topts.benchSize = n\n\t}\n\tif len(os.Args) > 1 {\n\t\tswitch os.Args[1] {\n\t\tcase \"-u\":\n\t\t\topts.buffered = false\n\t\tcase \"-b\":\n\t\t\topts.buffered = true\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unrecognized option: %s\", os.Args[1])\n\t\t}\n\t}\n\treturn &opts, nil\n}\n\nfunc benchmark(writer io.Writer, benchSize int) (time.Duration, error) {\n\tdata := []byte(\"Lorem Ipsum\\n\")\n\tstart := time.Now()\n\tfor i := 0; i < benchSize; i++ {\n\t\tif _, err := writer.Write(data); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn time.Since(start), nil\n}\n<commit_msg>12: Add flushing for buffered output<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n)\n\nfunc main() {\n\tif err := run(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"error:\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc run() error {\n\topts, err := parseOptions()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar writer io.Writer = os.Stdout\n\tif opts.buffered {\n\t\twriter = bufio.NewWriter(writer)\n\t}\n\tduration, err := benchmark(writer, opts.benchSize)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(os.Stderr, \"Completed in\", duration)\n\treturn nil\n}\n\ntype options struct {\n\tbuffered bool\n\tbenchSize int\n}\n\nfunc parseOptions() (*options, error) {\n\topts := options{\n\t\tbuffered: false,\n\t\tbenchSize: 10000000,\n\t}\n\tif len(os.Args) > 3 {\n\t\treturn nil, fmt.Errorf(\"unrecognized options: %s\", os.Args[3:])\n\t}\n\tif len(os.Args) > 2 {\n\t\tn, err := strconv.Atoi(os.Args[2])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\topts.benchSize = n\n\t}\n\tif len(os.Args) > 1 {\n\t\tswitch os.Args[1] {\n\t\tcase \"-u\":\n\t\t\topts.buffered = false\n\t\tcase \"-b\":\n\t\t\topts.buffered = true\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unrecognized option: %s\", os.Args[1])\n\t\t}\n\t}\n\treturn &opts, nil\n}\n\nfunc benchmark(writer io.Writer, benchSize int) (time.Duration, error) {\n\tdata := []byte(\"Lorem Ipsum\\n\")\n\tstart := time.Now()\n\tfor i := 0; i < benchSize; i++ {\n\t\tif _, err := writer.Write(data); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\tif buf, buffered := writer.(*bufio.Writer); buffered {\n\t\tbuf.Flush()\n\t}\n\treturn time.Since(start), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n)\n\nfunc main() {\n\taddr := flag.String(\"addr\", \":4000\", \"HTTP network address\")\n\tcertFile := flag.String(\"certfile\", \"cert.pem\", \"certificate PEM file\")\n\tkeyFile := flag.String(\"keyfile\", \"key.pem\", \"key PEM file\")\n\tflag.Parse()\n\n\ttlsConfig := &tls.Config{\n\t\tPreferServerCipherSuites: true,\n\t\tCurvePreferences: []tls.CurveID{tls.X25519, tls.CurveP256},\n\t}\n\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"\/ping\", func(w http.ResponseWriter, req *http.Request) {\n\t\tfmt.Fprintf(w, \"Pong\")\n\t})\n\tmux.HandleFunc(\"\/\", func(w http.ResponseWriter, req *http.Request) {\n\t\t\/\/ The \"\/\" pattern matches everything, so we need to check\n\t\t\/\/ that we're at the root here.\n\t\tif req.URL.Path != \"\/\" {\n\t\t\thttp.NotFound(w, req)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintf(w, \"Welcome to the home page!\")\n\t})\n\n\tsrv := &http.Server{\n\t\tAddr: *addr,\n\t\tHandler: mux,\n\t\tTLSConfig: tlsConfig,\n\t\tIdleTimeout: time.Minute,\n\t\tReadTimeout: 5 * time.Second,\n\t\tWriteTimeout: 10 * time.Second,\n\t}\n\n\tlog.Printf(\"Starting server on %s\", *addr)\n\terr := srv.ListenAndServeTLS(*certFile, *keyFile)\n\tlog.Fatal(err)\n}\n<commit_msg>Tweak server to be shorter<commit_after>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n)\n\nfunc main() {\n\taddr := flag.String(\"addr\", \":4000\", \"HTTP network address\")\n\tcertFile := flag.String(\"certfile\", \"cert.pem\", \"certificate PEM file\")\n\tkeyFile := flag.String(\"keyfile\", \"key.pem\", \"key PEM file\")\n\tflag.Parse()\n\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"\/\", func(w http.ResponseWriter, req *http.Request) {\n\t\tif req.URL.Path != \"\/\" {\n\t\t\thttp.NotFound(w, req)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintf(w, \"Proudly served with Go and HTTPS!\")\n\t})\n\n\tsrv := &http.Server{\n\t\tAddr: *addr,\n\t\tHandler: mux,\n\t\tTLSConfig: &tls.Config{\n\t\t\tMinVersion: tls.VersionTLS13,\n\t\t\tPreferServerCipherSuites: true,\n\t\t},\n\t}\n\n\tlog.Printf(\"Starting server on %s\", *addr)\n\terr := srv.ListenAndServeTLS(*certFile, *keyFile)\n\tlog.Fatal(err)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"github.com\/aws\/aws-k8s-tester\/eks\"\n\t\"github.com\/aws\/aws-k8s-tester\/eksconfig\"\n\t\"github.com\/spf13\/pflag\"\n\t\"k8s.io\/test-infra\/kubetest2\/pkg\/app\"\n\t\"k8s.io\/test-infra\/kubetest2\/pkg\/types\"\n\n\t\/\/ import the standard set of testers so they are loaded & registered\n\t_ \"k8s.io\/test-infra\/kubetest2\/pkg\/app\/testers\/standard\"\n)\n\nfunc main() {\n\tapp.Main(\"eks\", newEKS)\n}\n\n\/\/ \"opts\" is not used in eks deployer, handled via \"kubetest2\/pkg\/app\"\n\/\/ eks uses env vars, so just return empty flag set\nfunc newEKS(opts types.Options) (types.Deployer, *pflag.FlagSet) {\n\tcfg := eksconfig.NewDefault() \/\/ use auto-generated config\n\tcfg.Sync()\n\n\terr := cfg.UpdateFromEnvs()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcfg.Sync()\n\n\tif err = cfg.ValidateAndSetDefaults(); err != nil {\n\t\tpanic(err)\n\t}\n\tcfg.Sync()\n\n\tdp, err := eks.New(cfg)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn dp, pflag.NewFlagSet(\"eks\", pflag.ContinueOnError)\n}\n<commit_msg>kubetest2\/kubetest2-eks: remove unnecessary sync calls<commit_after>\/*\nCopyright 2020 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"github.com\/aws\/aws-k8s-tester\/eks\"\n\t\"github.com\/aws\/aws-k8s-tester\/eksconfig\"\n\t\"github.com\/spf13\/pflag\"\n\t\"k8s.io\/test-infra\/kubetest2\/pkg\/app\"\n\t\"k8s.io\/test-infra\/kubetest2\/pkg\/types\"\n\n\t\/\/ import the standard set of testers so they are loaded & registered\n\t_ \"k8s.io\/test-infra\/kubetest2\/pkg\/app\/testers\/standard\"\n)\n\nfunc main() {\n\tapp.Main(\"eks\", newEKS)\n}\n\n\/\/ \"opts\" is not used in eks deployer, handled via \"kubetest2\/pkg\/app\"\n\/\/ eks uses env vars, so just return empty flag set\nfunc newEKS(opts types.Options) (types.Deployer, *pflag.FlagSet) {\n\tcfg := eksconfig.NewDefault() \/\/ use auto-generated config\n\n\terr := cfg.UpdateFromEnvs()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif err = cfg.ValidateAndSetDefaults(); err != nil {\n\t\tpanic(err)\n\t}\n\n\tdp, err := eks.New(cfg)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn dp, pflag.NewFlagSet(\"eks\", pflag.ContinueOnError)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package parth provides functions for accessing path segments.\n\/\/\n\/\/ When returning an int of any size, the first whole number within the\n\/\/ specified segment will be returned. When returning a float of any size,\n\/\/ the first decimal number within the specified segment will be returned.\npackage parth\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"unicode\"\n)\n\n\/\/ SegmentToString receives an int representing a path segment, and returns both\n\/\/ the specified segment as a string and a nil error. If any error is\n\/\/ encountered, a zero value string and error are returned.\nfunc SegmentToString(path string, i int) (string, error) {\n\tif i >= 0 {\n\t\treturn posSegToString(path, i)\n\t}\n\n\treturn negSegToString(path, i)\n}\n\n\/\/ SegmentToInt64 receives an int representing a path segment, and returns both\n\/\/ the specified segment as an int64 and a nil error. If any error is\n\/\/ encountered, a zero value int64 and error are returned.\nfunc SegmentToInt64(path string, i int) (int64, error) {\n\tvar s string\n\tvar err error\n\n\tif s, err = SegmentToString(path, i); err != nil {\n\t\treturn 0, err\n\t}\n\n\tif s, err = firstIntFromString(s); err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar v int64\n\n\tif v, err = strconv.ParseInt(s, 10, 64); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn v, nil\n}\n\n\/\/ SegmentToInt32 receives an int representing a path segment, and returns both\n\/\/ the specified segment as an int32 and a nil error. If any error is\n\/\/ encountered, a zero value int32 and error are returned.\nfunc SegmentToInt32(path string, i int) (int32, error) {\n\tvar s string\n\tvar err error\n\n\tif s, err = SegmentToString(path, i); err != nil {\n\t\treturn 0, err\n\t}\n\n\tif s, err = firstIntFromString(s); err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar v int64\n\n\tif v, err = strconv.ParseInt(s, 10, 32); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn int32(v), nil\n}\n\n\/\/ SegmentToInt16 receives an int representing a path segment, and returns both\n\/\/ the specified segment as an int16 and a nil error. If any error is\n\/\/ encountered, a zero value int16 and error are returned.\nfunc SegmentToInt16(path string, i int) (int16, error) {\n\tvar s string\n\tvar err error\n\n\tif s, err = SegmentToString(path, i); err != nil {\n\t\treturn 0, err\n\t}\n\n\tif s, err = firstIntFromString(s); err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar v int64\n\n\tif v, err = strconv.ParseInt(s, 10, 16); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn int16(v), nil\n}\n\n\/\/ SegmentToInt8 receives an int representing a path segment, and returns both\n\/\/ the specified segment as an int8 and a nil error. If any error is\n\/\/ encountered, a zero value int8 and error are returned.\nfunc SegmentToInt8(path string, i int) (int8, error) {\n\tvar s string\n\tvar err error\n\n\tif s, err = SegmentToString(path, i); err != nil {\n\t\treturn 0, err\n\t}\n\n\tif s, err = firstIntFromString(s); err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar v int64\n\n\tif v, err = strconv.ParseInt(s, 10, 8); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn int8(v), nil\n}\n\n\/\/ SegmentToInt receives an int representing a path segment, and returns both\n\/\/ the specified segment as an int and a nil error. If any error is\n\/\/ encountered, a zero value int and error are returned.\nfunc SegmentToInt(path string, i int) (int, error) {\n\tvar s string\n\tvar err error\n\n\tif s, err = SegmentToString(path, i); err != nil {\n\t\treturn 0, err\n\t}\n\n\tif s, err = firstIntFromString(s); err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar v int64\n\n\tif v, err = strconv.ParseInt(s, 10, 0); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn int(v), nil\n}\n\n\/\/ SegmentToBool receives an int representing a path segment, and returns both\n\/\/ the specified segment as a bool and a nil error. If any error is\n\/\/ encountered, a zero value bool and error are returned.\nfunc SegmentToBool(path string, i int) (bool, error) {\n\tvar s string\n\tvar err error\n\n\tif s, err = SegmentToString(path, i); err != nil {\n\t\treturn false, err\n\t}\n\n\tvar v bool\n\n\tif v, err = strconv.ParseBool(s); err != nil {\n\t\treturn false, err\n\t}\n\n\treturn v, nil\n}\n\n\/\/ SegmentToFloat64 receives an int representing a path segment, and returns\n\/\/ both the specified segment as a float64 and a nil error. If any error is\n\/\/ encountered, a zero value float64 and error are returned.\nfunc SegmentToFloat64(path string, i int) (float64, error) {\n\tvar s string\n\tvar err error\n\n\tif s, err = SegmentToString(path, i); err != nil {\n\t\treturn 0.0, err\n\t}\n\n\tif s, err = firstFloatFromString(s); err != nil {\n\t\treturn 0.0, err\n\t}\n\n\tvar v float64\n\n\tif v, err = strconv.ParseFloat(s, 64); err != nil {\n\t\treturn 0.0, err\n\t}\n\n\treturn v, nil\n}\n\n\/\/ SegmentToFloat32 receives an int representing a path segment, and returns\n\/\/ both the specified segment as a float32 and a nil error. If any error is\n\/\/ encountered, a zero value float32 and error are returned.\nfunc SegmentToFloat32(path string, i int) (float32, error) {\n\tvar s string\n\tvar err error\n\n\tif s, err = SegmentToString(path, i); err != nil {\n\t\treturn 0.0, err\n\t}\n\n\tif s, err = firstFloatFromString(s); err != nil {\n\t\treturn 0.0, err\n\t}\n\n\tvar v float64\n\n\tif v, err = strconv.ParseFloat(s, 32); err != nil {\n\t\treturn 0.0, err\n\t}\n\n\treturn float32(v), nil\n}\n\n\/\/ SpanToString receives two int values representing path segments, and\n\/\/ returns the content between those segments, including the first segment, as\n\/\/ a string and a nil error. If any error is encountered, a zero value string\n\/\/ and error are returned. The segments can be of negative values, but firstSeg\n\/\/ must come before the lastSeg. Providing a 0 int for the lastSeg is a special\n\/\/ case which indicates the end of the path.\nfunc SpanToString(path string, firstSeg, lastSeg int) (string, error) {\n\tvar f, l int\n\tvar err error\n\n\tif firstSeg < 0 {\n\t\tf, err = segStartIndexFromEnd(path, firstSeg)\n\t} else {\n\t\tf, err = segStartIndexFromStart(path, firstSeg)\n\t}\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"first path segment index %d does not exist\", firstSeg)\n\t}\n\n\tif lastSeg > 0 {\n\t\tl, err = segEndIndexFromStart(path, lastSeg)\n\t} else {\n\t\tl, err = segEndIndexFromEnd(path, lastSeg)\n\t}\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"last path segment index %d does not exist\", lastSeg)\n\t}\n\n\tif f == l {\n\t\treturn \"\", nil\n\t}\n\n\tif f > l {\n\t\treturn \"\", fmt.Errorf(\"first segment must come before the last segment\")\n\t}\n\n\treturn path[f:l], nil\n}\n\nfunc posSegToString(path string, i int) (string, error) {\n\tc, ind0, ind1 := 0, 0, 0\n\n\tfor n := 0; n < len(path); n++ {\n\t\tif path[n] == '\/' {\n\t\t\tif c == i {\n\t\t\t\tif n+1 < len(path) && path[n+1] != '\/' {\n\t\t\t\t\tind0 = n + 1\n\t\t\t\t} else {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif c > i {\n\t\t\t\tind1 = n\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tc++\n\t\t} else if n == 0 {\n\t\t\tif c == i {\n\t\t\t\tind0 = n\n\t\t\t}\n\n\t\t\tc++\n\t\t} else if n == len(path)-1 {\n\t\t\tif c > i {\n\t\t\t\tind1 = n + 1\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif i < 0 || ind1 == 0 {\n\t\treturn \"\", fmt.Errorf(\"path segment index %d does not exist\", i)\n\t}\n\n\treturn path[ind0:ind1], nil\n}\n\nfunc negSegToString(path string, i int) (string, error) {\n\ti = i * -1\n\tc, ind0, ind1 := 1, 0, 0\n\n\tfor n := len(path) - 1; n >= 0; n-- {\n\t\tif path[n] == '\/' {\n\t\t\tif c == i {\n\t\t\t\tif n-1 >= 0 && path[n-1] != '\/' {\n\t\t\t\t\tind1 = n\n\t\t\t\t} else {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif c > i {\n\t\t\t\tind0 = n + 1\n\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tc++\n\t\t} else if n == len(path)-1 {\n\t\t\tif c == i {\n\t\t\t\tind1 = n + 1\n\t\t\t}\n\n\t\t\tc++\n\t\t} else if n == 0 {\n\t\t\tif c > i {\n\t\t\t\tind0 = n + 1\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif i < 1 || ind0 == 0 {\n\t\treturn \"\", fmt.Errorf(\"path segment index %d does not exist\", i*-1)\n\t}\n\n\treturn path[ind0:ind1], nil\n}\n\nfunc firstIntFromString(s string) (string, error) {\n\tind, l := 0, 0\n\n\tfor n := 0; n < len(s); n++ {\n\t\tif unicode.IsDigit(rune(s[n])) {\n\t\t\tif l == 0 {\n\t\t\t\tind = n\n\t\t\t}\n\n\t\t\tl++\n\t\t} else if s[n] == '-' {\n\t\t\tif l == 0 {\n\t\t\t\tind = n\n\t\t\t\tl++\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tif l == 0 && s[n] == '.' {\n\t\t\t\tif n+1 < len(s) && unicode.IsDigit(rune(s[n+1])) {\n\t\t\t\t\treturn \"0\", nil\n\t\t\t\t}\n\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif l > 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif l == 0 {\n\t\treturn \"\", fmt.Errorf(\"path segment does not contain int\")\n\t}\n\n\treturn s[ind : ind+l], nil\n}\n\nfunc firstFloatFromString(s string) (string, error) {\n\tc, ind, l := 0, 0, 0\n\n\tfor n := 0; n < len(s); n++ {\n\t\tif unicode.IsDigit(rune(s[n])) {\n\t\t\tif l == 0 {\n\t\t\t\tind = n\n\t\t\t}\n\n\t\t\tl++\n\t\t} else if s[n] == '-' {\n\t\t\tif l == 0 {\n\t\t\t\tind = n\n\t\t\t\tl++\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else if s[n] == '.' {\n\t\t\tif l == 0 {\n\t\t\t\tind = n\n\t\t\t}\n\n\t\t\tif c > 0 {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tl++\n\t\t\tc++\n\t\t} else if s[n] == 'e' && l > 0 && n+1 < len(s) && s[n+1] == '+' {\n\t\t\tl++\n\t\t} else if s[n] == '+' && l > 0 && s[n-1] == 'e' {\n\t\t\tif n+1 < len(s) && unicode.IsDigit(rune(s[n+1])) {\n\t\t\t\tl++\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tl--\n\t\t\tbreak\n\t\t} else {\n\t\t\tif l > 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif l == 0 || s[ind:ind+l] == \".\" {\n\t\treturn \"\", fmt.Errorf(\"path segment does not contain float\")\n\t}\n\n\treturn s[ind : ind+l], nil\n}\n\nfunc segStartIndexFromStart(path string, seg int) (int, error) {\n\tif seg < 0 {\n\t\treturn 0, fmt.Errorf(\"index cannot be found\")\n\t}\n\n\tfor n, ct := 0, 0; n < len(path); n++ {\n\t\tif n > 0 && path[n] == '\/' {\n\t\t\tct++\n\t\t}\n\n\t\tif ct == seg {\n\t\t\treturn n, nil\n\t\t}\n\t}\n\n\treturn 0, fmt.Errorf(\"no index found\")\n}\n\nfunc segStartIndexFromEnd(path string, seg int) (int, error) {\n\tif seg > -1 {\n\t\treturn 0, fmt.Errorf(\"index cannot be found\")\n\t}\n\n\tfor n, ct := len(path)-1, 0; n >= 0; n-- {\n\t\tif path[n] == '\/' || n == 0 {\n\t\t\tct--\n\t\t}\n\n\t\tif ct == seg {\n\t\t\treturn n, nil\n\t\t}\n\t}\n\n\treturn 0, fmt.Errorf(\"no index found\")\n}\n\nfunc segEndIndexFromStart(path string, seg int) (int, error) {\n\tif seg < 1 {\n\t\treturn 0, fmt.Errorf(\"index cannot be found\")\n\t}\n\n\tfor n, ct := 0, 0; n < len(path); n++ {\n\t\tif path[n] == '\/' && n > 0 {\n\t\t\tct++\n\t\t}\n\n\t\tif ct == seg {\n\t\t\treturn n, nil\n\t\t}\n\n\t\tif n+1 == len(path) && ct+1 == seg {\n\t\t\treturn n + 1, nil\n\t\t}\n\t}\n\n\treturn 0, fmt.Errorf(\"no index found\")\n}\n\nfunc segEndIndexFromEnd(path string, seg int) (int, error) {\n\tif seg > 0 {\n\t\treturn 0, fmt.Errorf(\"index cannot be found\")\n\t}\n\n\tif seg == 0 {\n\t\treturn len(path), nil\n\t}\n\n\tif len(path) == 1 && path[0] == '\/' {\n\t\treturn 0, nil\n\t}\n\n\tfor n, ct := len(path)-1, 0; n >= 0; n-- {\n\t\tif n == 0 || path[n] == '\/' {\n\t\t\tct--\n\t\t}\n\n\t\tif ct == seg {\n\t\t\treturn n, nil\n\t\t}\n\n\t}\n\n\treturn 0, fmt.Errorf(\"no index found\")\n}\n<commit_msg>Define errors.<commit_after>\/\/ Package parth provides functions for accessing path segments.\n\/\/\n\/\/ When returning an int of any size, the first whole number within the\n\/\/ specified segment will be returned. When returning a float of any size,\n\/\/ the first decimal number within the specified segment will be returned.\npackage parth\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n\t\"unicode\"\n)\n\nvar (\n\tErrFirstSegNotExist = errors.New(\"first segment index does not exist\")\n\tErrLastSegNotExist = errors.New(\"last segment index does not exist\")\n\tErrSegOrderReversed = errors.New(\"first segment must precede last segment\")\n\tErrSegNotExist = errors.New(\"segment index does not exist\")\n\tErrIntNotFound = errors.New(\"segment does not contain int\")\n\tErrFloatNotFound = errors.New(\"segment does not contain float\")\n\tErrUnparsable = errors.New(\"unable to parse segment\")\n\n\tErrIndexBad = errors.New(\"index cannot be found\")\n\tErrIndexNotFound = errors.New(\"no index found\")\n)\n\n\/\/ SegmentToString receives an int representing a path segment, and returns both\n\/\/ the specified segment as a string and a nil error. If any error is\n\/\/ encountered, a zero value string and error are returned.\nfunc SegmentToString(path string, i int) (string, error) {\n\tif i >= 0 {\n\t\treturn posSegToString(path, i)\n\t}\n\n\treturn negSegToString(path, i)\n}\n\n\/\/ SegmentToInt64 receives an int representing a path segment, and returns both\n\/\/ the specified segment as an int64 and a nil error. If any error is\n\/\/ encountered, a zero value int64 and error are returned.\nfunc SegmentToInt64(path string, i int) (int64, error) {\n\tvar s string\n\tvar err error\n\n\tif s, err = SegmentToString(path, i); err != nil {\n\t\treturn 0, err\n\t}\n\n\tif s, err = firstIntFromString(s); err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar v int64\n\n\tif v, err = strconv.ParseInt(s, 10, 64); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn v, nil\n}\n\n\/\/ SegmentToInt32 receives an int representing a path segment, and returns both\n\/\/ the specified segment as an int32 and a nil error. If any error is\n\/\/ encountered, a zero value int32 and error are returned.\nfunc SegmentToInt32(path string, i int) (int32, error) {\n\tvar s string\n\tvar err error\n\n\tif s, err = SegmentToString(path, i); err != nil {\n\t\treturn 0, err\n\t}\n\n\tif s, err = firstIntFromString(s); err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar v int64\n\n\tif v, err = strconv.ParseInt(s, 10, 32); err != nil {\n\t\treturn 0, ErrUnparsable\n\t}\n\n\treturn int32(v), nil\n}\n\n\/\/ SegmentToInt16 receives an int representing a path segment, and returns both\n\/\/ the specified segment as an int16 and a nil error. If any error is\n\/\/ encountered, a zero value int16 and error are returned.\nfunc SegmentToInt16(path string, i int) (int16, error) {\n\tvar s string\n\tvar err error\n\n\tif s, err = SegmentToString(path, i); err != nil {\n\t\treturn 0, err\n\t}\n\n\tif s, err = firstIntFromString(s); err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar v int64\n\n\tif v, err = strconv.ParseInt(s, 10, 16); err != nil {\n\t\treturn 0, ErrUnparsable\n\t}\n\n\treturn int16(v), nil\n}\n\n\/\/ SegmentToInt8 receives an int representing a path segment, and returns both\n\/\/ the specified segment as an int8 and a nil error. If any error is\n\/\/ encountered, a zero value int8 and error are returned.\nfunc SegmentToInt8(path string, i int) (int8, error) {\n\tvar s string\n\tvar err error\n\n\tif s, err = SegmentToString(path, i); err != nil {\n\t\treturn 0, err\n\t}\n\n\tif s, err = firstIntFromString(s); err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar v int64\n\n\tif v, err = strconv.ParseInt(s, 10, 8); err != nil {\n\t\treturn 0, ErrUnparsable\n\t}\n\n\treturn int8(v), nil\n}\n\n\/\/ SegmentToInt receives an int representing a path segment, and returns both\n\/\/ the specified segment as an int and a nil error. If any error is\n\/\/ encountered, a zero value int and error are returned.\nfunc SegmentToInt(path string, i int) (int, error) {\n\tvar s string\n\tvar err error\n\n\tif s, err = SegmentToString(path, i); err != nil {\n\t\treturn 0, err\n\t}\n\n\tif s, err = firstIntFromString(s); err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar v int64\n\n\tif v, err = strconv.ParseInt(s, 10, 0); err != nil {\n\t\treturn 0, ErrUnparsable\n\t}\n\n\treturn int(v), nil\n}\n\n\/\/ SegmentToBool receives an int representing a path segment, and returns both\n\/\/ the specified segment as a bool and a nil error. If any error is\n\/\/ encountered, a zero value bool and error are returned.\nfunc SegmentToBool(path string, i int) (bool, error) {\n\tvar s string\n\tvar err error\n\n\tif s, err = SegmentToString(path, i); err != nil {\n\t\treturn false, err\n\t}\n\n\tvar v bool\n\n\tif v, err = strconv.ParseBool(s); err != nil {\n\t\treturn false, ErrUnparsable\n\t}\n\n\treturn v, nil\n}\n\n\/\/ SegmentToFloat64 receives an int representing a path segment, and returns\n\/\/ both the specified segment as a float64 and a nil error. If any error is\n\/\/ encountered, a zero value float64 and error are returned.\nfunc SegmentToFloat64(path string, i int) (float64, error) {\n\tvar s string\n\tvar err error\n\n\tif s, err = SegmentToString(path, i); err != nil {\n\t\treturn 0.0, err\n\t}\n\n\tif s, err = firstFloatFromString(s); err != nil {\n\t\treturn 0.0, err\n\t}\n\n\tvar v float64\n\n\tif v, err = strconv.ParseFloat(s, 64); err != nil {\n\t\treturn 0.0, ErrUnparsable\n\t}\n\n\treturn v, nil\n}\n\n\/\/ SegmentToFloat32 receives an int representing a path segment, and returns\n\/\/ both the specified segment as a float32 and a nil error. If any error is\n\/\/ encountered, a zero value float32 and error are returned.\nfunc SegmentToFloat32(path string, i int) (float32, error) {\n\tvar s string\n\tvar err error\n\n\tif s, err = SegmentToString(path, i); err != nil {\n\t\treturn 0.0, err\n\t}\n\n\tif s, err = firstFloatFromString(s); err != nil {\n\t\treturn 0.0, err\n\t}\n\n\tvar v float64\n\n\tif v, err = strconv.ParseFloat(s, 32); err != nil {\n\t\treturn 0.0, ErrUnparsable\n\t}\n\n\treturn float32(v), nil\n}\n\n\/\/ SpanToString receives two int values representing path segments, and\n\/\/ returns the content between those segments, including the first segment, as\n\/\/ a string and a nil error. If any error is encountered, a zero value string\n\/\/ and error are returned. The segments can be of negative values, but firstSeg\n\/\/ must come before the lastSeg. Providing a 0 int for the lastSeg is a special\n\/\/ case which indicates the end of the path.\nfunc SpanToString(path string, firstSeg, lastSeg int) (string, error) {\n\tvar f, l int\n\tvar err error\n\n\tif firstSeg < 0 {\n\t\tf, err = segStartIndexFromEnd(path, firstSeg)\n\t} else {\n\t\tf, err = segStartIndexFromStart(path, firstSeg)\n\t}\n\tif err != nil {\n\t\treturn \"\", ErrFirstSegNotExist\n\t}\n\n\tif lastSeg > 0 {\n\t\tl, err = segEndIndexFromStart(path, lastSeg)\n\t} else {\n\t\tl, err = segEndIndexFromEnd(path, lastSeg)\n\t}\n\tif err != nil {\n\t\treturn \"\", ErrLastSegNotExist\n\t}\n\n\tif f == l {\n\t\treturn \"\", nil\n\t}\n\n\tif f > l {\n\t\treturn \"\", ErrSegOrderReversed\n\t}\n\n\treturn path[f:l], nil\n}\n\nfunc posSegToString(path string, i int) (string, error) {\n\tc, ind0, ind1 := 0, 0, 0\n\n\tfor n := 0; n < len(path); n++ {\n\t\tif path[n] == '\/' {\n\t\t\tif c == i {\n\t\t\t\tif n+1 < len(path) && path[n+1] != '\/' {\n\t\t\t\t\tind0 = n + 1\n\t\t\t\t} else {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif c > i {\n\t\t\t\tind1 = n\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tc++\n\t\t} else if n == 0 {\n\t\t\tif c == i {\n\t\t\t\tind0 = n\n\t\t\t}\n\n\t\t\tc++\n\t\t} else if n == len(path)-1 {\n\t\t\tif c > i {\n\t\t\t\tind1 = n + 1\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif i < 0 || ind1 == 0 {\n\t\treturn \"\", ErrSegNotExist\n\t}\n\n\treturn path[ind0:ind1], nil\n}\n\nfunc negSegToString(path string, i int) (string, error) {\n\ti = i * -1\n\tc, ind0, ind1 := 1, 0, 0\n\n\tfor n := len(path) - 1; n >= 0; n-- {\n\t\tif path[n] == '\/' {\n\t\t\tif c == i {\n\t\t\t\tif n-1 >= 0 && path[n-1] != '\/' {\n\t\t\t\t\tind1 = n\n\t\t\t\t} else {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif c > i {\n\t\t\t\tind0 = n + 1\n\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tc++\n\t\t} else if n == len(path)-1 {\n\t\t\tif c == i {\n\t\t\t\tind1 = n + 1\n\t\t\t}\n\n\t\t\tc++\n\t\t} else if n == 0 {\n\t\t\tif c > i {\n\t\t\t\tind0 = n + 1\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif i < 1 || ind0 == 0 {\n\t\treturn \"\", ErrSegNotExist\n\t}\n\n\treturn path[ind0:ind1], nil\n}\n\nfunc firstIntFromString(s string) (string, error) {\n\tind, l := 0, 0\n\n\tfor n := 0; n < len(s); n++ {\n\t\tif unicode.IsDigit(rune(s[n])) {\n\t\t\tif l == 0 {\n\t\t\t\tind = n\n\t\t\t}\n\n\t\t\tl++\n\t\t} else if s[n] == '-' {\n\t\t\tif l == 0 {\n\t\t\t\tind = n\n\t\t\t\tl++\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tif l == 0 && s[n] == '.' {\n\t\t\t\tif n+1 < len(s) && unicode.IsDigit(rune(s[n+1])) {\n\t\t\t\t\treturn \"0\", nil\n\t\t\t\t}\n\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif l > 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif l == 0 {\n\t\treturn \"\", ErrIntNotFound\n\t}\n\n\treturn s[ind : ind+l], nil\n}\n\nfunc firstFloatFromString(s string) (string, error) {\n\tc, ind, l := 0, 0, 0\n\n\tfor n := 0; n < len(s); n++ {\n\t\tif unicode.IsDigit(rune(s[n])) {\n\t\t\tif l == 0 {\n\t\t\t\tind = n\n\t\t\t}\n\n\t\t\tl++\n\t\t} else if s[n] == '-' {\n\t\t\tif l == 0 {\n\t\t\t\tind = n\n\t\t\t\tl++\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else if s[n] == '.' {\n\t\t\tif l == 0 {\n\t\t\t\tind = n\n\t\t\t}\n\n\t\t\tif c > 0 {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tl++\n\t\t\tc++\n\t\t} else if s[n] == 'e' && l > 0 && n+1 < len(s) && s[n+1] == '+' {\n\t\t\tl++\n\t\t} else if s[n] == '+' && l > 0 && s[n-1] == 'e' {\n\t\t\tif n+1 < len(s) && unicode.IsDigit(rune(s[n+1])) {\n\t\t\t\tl++\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tl--\n\t\t\tbreak\n\t\t} else {\n\t\t\tif l > 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif l == 0 || s[ind:ind+l] == \".\" {\n\t\treturn \"\", ErrFloatNotFound\n\t}\n\n\treturn s[ind : ind+l], nil\n}\n\nfunc segStartIndexFromStart(path string, seg int) (int, error) {\n\tif seg < 0 {\n\t\treturn 0, ErrIndexBad\n\t}\n\n\tfor n, ct := 0, 0; n < len(path); n++ {\n\t\tif n > 0 && path[n] == '\/' {\n\t\t\tct++\n\t\t}\n\n\t\tif ct == seg {\n\t\t\treturn n, nil\n\t\t}\n\t}\n\n\treturn 0, ErrIndexNotFound\n}\n\nfunc segStartIndexFromEnd(path string, seg int) (int, error) {\n\tif seg > -1 {\n\t\treturn 0, ErrIndexBad\n\t}\n\n\tfor n, ct := len(path)-1, 0; n >= 0; n-- {\n\t\tif path[n] == '\/' || n == 0 {\n\t\t\tct--\n\t\t}\n\n\t\tif ct == seg {\n\t\t\treturn n, nil\n\t\t}\n\t}\n\n\treturn 0, ErrIndexNotFound\n}\n\nfunc segEndIndexFromStart(path string, seg int) (int, error) {\n\tif seg < 1 {\n\t\treturn 0, ErrIndexBad\n\t}\n\n\tfor n, ct := 0, 0; n < len(path); n++ {\n\t\tif path[n] == '\/' && n > 0 {\n\t\t\tct++\n\t\t}\n\n\t\tif ct == seg {\n\t\t\treturn n, nil\n\t\t}\n\n\t\tif n+1 == len(path) && ct+1 == seg {\n\t\t\treturn n + 1, nil\n\t\t}\n\t}\n\n\treturn 0, ErrIndexNotFound\n}\n\nfunc segEndIndexFromEnd(path string, seg int) (int, error) {\n\tif seg > 0 {\n\t\treturn 0, ErrIndexBad\n\t}\n\n\tif seg == 0 {\n\t\treturn len(path), nil\n\t}\n\n\tif len(path) == 1 && path[0] == '\/' {\n\t\treturn 0, nil\n\t}\n\n\tfor n, ct := len(path)-1, 0; n >= 0; n-- {\n\t\tif n == 0 || path[n] == '\/' {\n\t\t\tct--\n\t\t}\n\n\t\tif ct == seg {\n\t\t\treturn n, nil\n\t\t}\n\n\t}\n\n\treturn 0, ErrIndexNotFound\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"github.com\/cloudfoundry\/cli\/cf\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/api\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/command_metadata\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/configuration\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/requirements\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/terminal\"\n\t\"github.com\/codegangsta\/cli\"\n)\n\ntype Authenticate struct {\n\tui terminal.UI\n\tconfig configuration.ReadWriter\n\tauthenticator api.AuthenticationRepository\n}\n\nfunc NewAuthenticate(ui terminal.UI, config configuration.ReadWriter, authenticator api.AuthenticationRepository) (cmd Authenticate) {\n\tcmd.ui = ui\n\tcmd.config = config\n\tcmd.authenticator = authenticator\n\treturn\n}\n\nfunc (cmd Authenticate) Metadata() command_metadata.CommandMetadata {\n\treturn command_metadata.CommandMetadata{\n\t\tName: \"auth\",\n\t\tDescription: T(\"Authenticate user non-interactively\"),\n\t\tUsage: T(\"CF_NAME auth USERNAME PASSWORD\\n\\n\") +\n\t\t\tterminal.WarningColor(T(\"WARNING:\\n Providing your password as a command line option is highly discouraged\\n Your password may be visible to others and may be recorded in your shell history\\n\\n\")) + T(\"EXAMPLE:\\n\") + T(\" CF_NAME auth name@example.com \\\"my password\\\" (use quotes for passwords with a space)\\n\") + T(\" CF_NAME auth name@example.com \\\"\\\\\\\"password\\\\\\\"\\\" (escape quotes if used in password)\"),\n\t}\n}\n\nfunc (cmd Authenticate) GetRequirements(requirementsFactory requirements.Factory, c *cli.Context) (reqs []requirements.Requirement, err error) {\n\tif len(c.Args()) != 2 {\n\t\tcmd.ui.FailWithUsage(c)\n\t}\n\n\treqs = append(reqs, requirementsFactory.NewApiEndpointRequirement())\n\treturn\n}\n\nfunc (cmd Authenticate) Run(c *cli.Context) {\n\tcmd.config.ClearSession()\n\tcmd.authenticator.GetLoginPromptsAndSaveUAAServerURL()\n\n\tcmd.ui.Say(T(\"API endpoint: {{.ApiEndpoint}}\",\n\t\tmap[string]interface{}{\"ApiEndpoint\": terminal.EntityNameColor(cmd.config.ApiEndpoint())}))\n\tcmd.ui.Say(T(\"Authenticating...\"))\n\n apiErr := cmd.authenticator.Authenticate(map[string]string{\"username\": c.Args()[0], \"password\": c.Args()[1]})\n\tif apiErr != nil {\n\t\tcmd.ui.Failed(apiErr.Error())\n\t\treturn\n\t}\n\n\tcmd.ui.Ok()\n\tcmd.ui.Say(T(\"Use '{{.Name}}' to view or set your target org and space\",\n\t\tmap[string]interface{}{\"Name\": terminal.CommandColor(cf.Name() + \" target\")}))\n\treturn\n}\n<commit_msg>go fmt<commit_after>package commands\n\nimport (\n\t\"github.com\/cloudfoundry\/cli\/cf\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/api\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/command_metadata\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/configuration\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/requirements\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/terminal\"\n\t\"github.com\/codegangsta\/cli\"\n)\n\ntype Authenticate struct {\n\tui terminal.UI\n\tconfig configuration.ReadWriter\n\tauthenticator api.AuthenticationRepository\n}\n\nfunc NewAuthenticate(ui terminal.UI, config configuration.ReadWriter, authenticator api.AuthenticationRepository) (cmd Authenticate) {\n\tcmd.ui = ui\n\tcmd.config = config\n\tcmd.authenticator = authenticator\n\treturn\n}\n\nfunc (cmd Authenticate) Metadata() command_metadata.CommandMetadata {\n\treturn command_metadata.CommandMetadata{\n\t\tName: \"auth\",\n\t\tDescription: T(\"Authenticate user non-interactively\"),\n\t\tUsage: T(\"CF_NAME auth USERNAME PASSWORD\\n\\n\") +\n\t\t\tterminal.WarningColor(T(\"WARNING:\\n Providing your password as a command line option is highly discouraged\\n Your password may be visible to others and may be recorded in your shell history\\n\\n\")) + T(\"EXAMPLE:\\n\") + T(\" CF_NAME auth name@example.com \\\"my password\\\" (use quotes for passwords with a space)\\n\") + T(\" CF_NAME auth name@example.com \\\"\\\\\\\"password\\\\\\\"\\\" (escape quotes if used in password)\"),\n\t}\n}\n\nfunc (cmd Authenticate) GetRequirements(requirementsFactory requirements.Factory, c *cli.Context) (reqs []requirements.Requirement, err error) {\n\tif len(c.Args()) != 2 {\n\t\tcmd.ui.FailWithUsage(c)\n\t}\n\n\treqs = append(reqs, requirementsFactory.NewApiEndpointRequirement())\n\treturn\n}\n\nfunc (cmd Authenticate) Run(c *cli.Context) {\n\tcmd.config.ClearSession()\n\tcmd.authenticator.GetLoginPromptsAndSaveUAAServerURL()\n\n\tcmd.ui.Say(T(\"API endpoint: {{.ApiEndpoint}}\",\n\t\tmap[string]interface{}{\"ApiEndpoint\": terminal.EntityNameColor(cmd.config.ApiEndpoint())}))\n\tcmd.ui.Say(T(\"Authenticating...\"))\n\n\tapiErr := cmd.authenticator.Authenticate(map[string]string{\"username\": c.Args()[0], \"password\": c.Args()[1]})\n\tif apiErr != nil {\n\t\tcmd.ui.Failed(apiErr.Error())\n\t\treturn\n\t}\n\n\tcmd.ui.Ok()\n\tcmd.ui.Say(T(\"Use '{{.Name}}' to view or set your target org and space\",\n\t\tmap[string]interface{}{\"Name\": terminal.CommandColor(cf.Name() + \" target\")}))\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package app_lang\n\nimport (\n\t\"golang.org\/x\/text\/language\"\n\t\"testing\"\n)\n\nfunc TestBase(t *testing.T) {\n\tif base := Base(language.English); base != \"en\" {\n\t\tt.Error(base)\n\t}\n\tif base := Base(language.Japanese); base != \"ja\" {\n\t\tt.Error(base)\n\t}\n}\n\nfunc TestPathSuffix(t *testing.T) {\n\tif suffix := PathSuffix(language.English); suffix != \"\" {\n\t\tt.Error(suffix)\n\t}\n\tif suffix := PathSuffix(language.Japanese); suffix != \"_ja\" {\n\t\tt.Error(suffix)\n\t}\n}\n\nfunc TestSelect(t *testing.T) {\n\tif sel := Select(\"en\"); sel != language.English {\n\t\tt.Error(sel)\n\t}\n\tif sel := Select(\"en-US\"); sel != language.English {\n\t\tt.Error(sel)\n\t}\n\n\tif sel := Select(\"ja-Jpan-JP\"); sel != language.Japanese {\n\t\tt.Error(sel)\n\t}\n\tif sel := Select(\"ja-JP\"); sel != language.Japanese {\n\t\tt.Error(sel)\n\t}\n\tif sel := Select(\"ja\"); sel != language.Japanese {\n\t\tt.Error(sel)\n\t}\n\n\t\/\/ fallback to English\n\tif sel := Select(\"und\"); sel != language.English {\n\t\tt.Error(sel)\n\t}\n}\n\nfunc TestDetect(t *testing.T) {\n\td := Detect()\n\tfor _, l := range SupportedLanguages {\n\t\tif l == d {\n\t\t\treturn\n\t\t}\n\t}\n\tt.Error(d)\n}\n<commit_msg>fix test<commit_after>package app_lang\n\nimport (\n\t\"golang.org\/x\/text\/language\"\n\t\"testing\"\n)\n\nfunc TestBase(t *testing.T) {\n\tif base := Base(language.English); base != \"en\" {\n\t\tt.Error(base)\n\t}\n\tif base := Base(language.Japanese); base != \"ja\" {\n\t\tt.Error(base)\n\t}\n}\n\nfunc TestPathSuffix(t *testing.T) {\n\tif suffix := PathSuffix(language.English); suffix != \"\" {\n\t\tt.Error(suffix)\n\t}\n\tif suffix := PathSuffix(language.Japanese); suffix != \"_ja\" {\n\t\tt.Error(suffix)\n\t}\n}\n\nfunc TestSelect(t *testing.T) {\n\tif sel := Base(Select(\"en\")); sel != \"en\" {\n\t\tt.Error(sel)\n\t}\n\tif sel := Base(Select(\"en-US\")); sel != \"en\" {\n\t\tt.Error(sel)\n\t}\n\n\tif sel := Base(Select(\"ja-Jpan-JP\")); sel != \"ja\" {\n\t\tt.Error(sel)\n\t}\n\tif sel := Base(Select(\"ja-JP\")); sel != \"ja\" {\n\t\tt.Error(sel)\n\t}\n\tif sel := Base(Select(\"ja\")); sel != \"ja\" {\n\t\tt.Error(sel)\n\t}\n\n\t\/\/ fallback to English\n\tif sel := Base(Select(\"und\")); sel != \"en\" {\n\t\tt.Error(sel)\n\t}\n}\n\nfunc TestDetect(t *testing.T) {\n\td := Detect()\n\tfor _, l := range SupportedLanguages {\n\t\tif l == d {\n\t\t\treturn\n\t\t}\n\t}\n\tt.Error(d)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"). You may\n\/\/ not use this file except in compliance with the License. A copy of the\n\/\/ License is located at\n\/\/\n\/\/ http:\/\/aws.amazon.com\/apache2.0\/\n\/\/\n\/\/ or in the \"license\" file accompanying this file. This file is distributed\n\/\/ on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n\/\/ express or implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/blox\/blox\/cluster-state-service\/cmd\"\n\t\"github.com\/blox\/blox\/cluster-state-service\/config\"\n\t\"github.com\/blox\/blox\/cluster-state-service\/handler\/run\"\n\t\"github.com\/blox\/blox\/cluster-state-service\/logger\"\n\t\"github.com\/blox\/blox\/cluster-state-service\/versioning\"\n\n\tlog \"github.com\/cihub\/seelog\"\n\n\t\"os\"\n)\n\nconst errorCode = 1\n\nfunc main() {\n\tdefer log.Flush()\n\terr := logger.InitLogger()\n\tif err != nil {\n\t\tfmt.Printf(\"Could not initialize logger: %+v\", err)\n\t}\n\tif err := cmd.RootCmd.Execute(); err != nil {\n\t\tlog.Criticalf(\"Error executing: %v\", err)\n\t\tos.Exit(errorCode)\n\t}\n\tif config.PrintVersion {\n\t\tversioning.PrintVersion()\n\t\tos.Exit(0)\n\t}\n\tif err := run.StartEventStreamHandler(config.SQSQueueName, config.CSSBindAddr, config.EtcdEndpoints); err != nil {\n\t\tlog.Criticalf(\"Error starting event stream handler: %v\", err)\n\t\tos.Exit(errorCode)\n\t}\n}\n<commit_msg>Fix css main.go to use StartClusterStateService instead of StartEventStreamHandler<commit_after>\/\/ Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"). You may\n\/\/ not use this file except in compliance with the License. A copy of the\n\/\/ License is located at\n\/\/\n\/\/ http:\/\/aws.amazon.com\/apache2.0\/\n\/\/\n\/\/ or in the \"license\" file accompanying this file. This file is distributed\n\/\/ on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n\/\/ express or implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/blox\/blox\/cluster-state-service\/cmd\"\n\t\"github.com\/blox\/blox\/cluster-state-service\/config\"\n\t\"github.com\/blox\/blox\/cluster-state-service\/handler\/run\"\n\t\"github.com\/blox\/blox\/cluster-state-service\/logger\"\n\t\"github.com\/blox\/blox\/cluster-state-service\/versioning\"\n\n\tlog \"github.com\/cihub\/seelog\"\n\n\t\"os\"\n)\n\nconst errorCode = 1\n\nfunc main() {\n\tdefer log.Flush()\n\terr := logger.InitLogger()\n\tif err != nil {\n\t\tfmt.Printf(\"Could not initialize logger: %+v\", err)\n\t}\n\tif err := cmd.RootCmd.Execute(); err != nil {\n\t\tlog.Criticalf(\"Error executing: %v\", err)\n\t\tos.Exit(errorCode)\n\t}\n\tif config.PrintVersion {\n\t\tversioning.PrintVersion()\n\t\tos.Exit(0)\n\t}\n\tif err := run.StartClusterStateService(config.SQSQueueName, config.CSSBindAddr, config.EtcdEndpoints); err != nil {\n\t\tlog.Criticalf(\"Error starting event stream handler: %v\", err)\n\t\tos.Exit(errorCode)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 bee authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\n\/\/ Package dlv ...\npackage dlv\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/beego\/bee\/cmd\/commands\"\n\t\"github.com\/beego\/bee\/cmd\/commands\/version\"\n\tbeeLogger \"github.com\/beego\/bee\/logger\"\n\t\"github.com\/beego\/bee\/utils\"\n\t\"github.com\/derekparker\/delve\/pkg\/terminal\"\n\t\"github.com\/derekparker\/delve\/service\"\n\t\"github.com\/derekparker\/delve\/service\/rpc2\"\n\t\"github.com\/derekparker\/delve\/service\/rpccommon\"\n\t\"github.com\/fsnotify\/fsnotify\"\n)\n\nvar cmdDlv = &commands.Command{\n\tCustomFlags: true,\n\tUsageLine: \"dlv [-package=\\\"\\\"] [-port=8181] [-verbose=false]\",\n\tShort: \"Start a debugging session using Delve\",\n\tLong: `dlv command start a debugging session using debugging tool Delve.\n\n To debug your application using Delve, use: {{\"$ bee dlv\" | bold}}\n\n For more information on Delve: https:\/\/github.com\/derekparker\/delve\n`,\n\tPreRun: func(cmd *commands.Command, args []string) { version.ShowShortVersionBanner() },\n\tRun: runDlv,\n}\n\nvar (\n\tpackageName string\n\tverbose bool\n\tport int\n)\n\nfunc init() {\n\tfs := flag.NewFlagSet(\"dlv\", flag.ContinueOnError)\n\tfs.StringVar(&packageName, \"package\", \"\", \"The package to debug (Must have a main package)\")\n\tfs.BoolVar(&verbose, \"verbose\", false, \"Enable verbose mode\")\n\tfs.IntVar(&port, \"port\", 8181, \"Port to listen to for clients\")\n\tcmdDlv.Flag = *fs\n\tcommands.AvailableCommands = append(commands.AvailableCommands, cmdDlv)\n}\n\nfunc runDlv(cmd *commands.Command, args []string) int {\n\tif err := cmd.Flag.Parse(args); err != nil {\n\t\tbeeLogger.Log.Fatalf(\"Error while parsing flags: %v\", err.Error())\n\t}\n\n\tvar (\n\t\taddr = fmt.Sprintf(\"127.0.0.1:%d\", port)\n\t\tpaths = make([]string, 0)\n\t\tnotifyChan = make(chan int)\n\t)\n\n\tif err := loadPathsToWatch(&paths); err != nil {\n\t\tbeeLogger.Log.Fatalf(\"Error while loading paths to watch: %v\", err.Error())\n\t}\n\tgo startWatcher(paths, notifyChan)\n\treturn startDelveDebugger(addr, notifyChan)\n}\n\n\/\/ buildDebug builds a debug binary in the current working directory\nfunc buildDebug() (string, error) {\n\targs := []string{\"-gcflags\", \"-N -l\", \"-o\", \"debug\"}\n\targs = append(args, utils.SplitQuotedFields(\"-ldflags='-linkmode internal'\")...)\n\targs = append(args, packageName)\n\tif err := utils.GoCommand(\"build\", args...); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfp, err := filepath.Abs(\".\/debug\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn fp, nil\n}\n\n\/\/ loadPathsToWatch loads the paths that needs to be watched for changes\nfunc loadPathsToWatch(paths *[]string) error {\n\tdirectory, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfilepath.Walk(directory, func(path string, info os.FileInfo, _ error) error {\n\t\tif strings.HasSuffix(info.Name(), \"docs\") {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\tif strings.HasSuffix(info.Name(), \"swagger\") {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\tif strings.HasSuffix(info.Name(), \"vendor\") {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\n\t\tif filepath.Ext(info.Name()) == \".go\" {\n\t\t\t*paths = append(*paths, path)\n\t\t}\n\t\treturn nil\n\t})\n\treturn nil\n}\n\n\/\/ startDelveDebugger starts the Delve debugger server\nfunc startDelveDebugger(addr string, ch chan int) int {\n\tbeeLogger.Log.Info(\"Starting Delve Debugger...\")\n\n\tfp, err := buildDebug()\n\tif err != nil {\n\t\tbeeLogger.Log.Fatalf(\"Error while building debug binary: %v\", err)\n\t}\n\tdefer os.Remove(fp)\n\n\tabs, err := filepath.Abs(\".\/debug\")\n\tif err != nil {\n\t\tbeeLogger.Log.Fatalf(\"%v\", err)\n\t}\n\n\t\/\/ Create and start the debugger server\n\tlistener, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\tbeeLogger.Log.Fatalf(\"Could not start listener: %s\", err)\n\t}\n\tdefer listener.Close()\n\n\tserver := rpccommon.NewServer(&service.Config{\n\t\tListener: listener,\n\t\tAcceptMulti: true,\n\t\tAttachPid: 0,\n\t\tAPIVersion: 2,\n\t\tWorkingDir: \".\",\n\t\tProcessArgs: []string{abs},\n\t})\n\tif err := server.Run(); err != nil {\n\t\tbeeLogger.Log.Fatalf(\"Could not start debugger server: %v\", err)\n\t}\n\n\t\/\/ Start the Delve client REPL\n\tclient := rpc2.NewClient(addr)\n\t\/\/ Make sure the client is restarted when new changes are introduced\n\tgo func() {\n\t\tfor {\n\t\t\tif val := <-ch; val == 0 {\n\t\t\t\tif _, err := client.Restart(); err != nil {\n\t\t\t\t\tutils.Notify(\"Error while restarting the client: \"+err.Error(), \"bee\")\n\t\t\t\t} else {\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tutils.Notify(\"Delve Debugger Restarted\", \"bee\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ Create the terminal and connect it to the client debugger\n\tterm := terminal.New(client, nil)\n\tstatus, err := term.Run()\n\tif err != nil {\n\t\tbeeLogger.Log.Fatalf(\"Could not start Delve REPL: %v\", err)\n\t}\n\n\t\/\/ Stop and kill the debugger server once user quits the REPL\n\tif err := server.Stop(); err != nil {\n\t\tbeeLogger.Log.Fatalf(\"Could not stop Delve server: %v\", err)\n\t}\n\treturn status\n}\n\nvar eventsModTime = make(map[string]int64)\n\n\/\/ startWatcher starts the fsnotify watcher on the passed paths\nfunc startWatcher(paths []string, ch chan int) {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tbeeLogger.Log.Fatalf(\"Could not start the watcher: %v\", err)\n\t}\n\tdefer watcher.Close()\n\n\t\/\/ Feed the paths to the watcher\n\tfor _, path := range paths {\n\t\tif err := watcher.Add(path); err != nil {\n\t\t\tbeeLogger.Log.Fatalf(\"Could not set a watch on path: %v\", err)\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase evt := <-watcher.Events:\n\t\t\tbuild := true\n\t\t\tif filepath.Ext(evt.Name) != \".go\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tmt := utils.GetFileModTime(evt.Name)\n\t\t\tif t := eventsModTime[evt.Name]; mt == t {\n\t\t\t\tbuild = false\n\t\t\t}\n\t\t\teventsModTime[evt.Name] = mt\n\n\t\t\tif build {\n\t\t\t\tgo func() {\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tutils.Notify(\"Rebuilding application with the new changes\", \"bee\")\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Wait 1s before re-build until there is no file change\n\t\t\t\t\tscheduleTime := time.Now().Add(1 * time.Second)\n\t\t\t\t\ttime.Sleep(scheduleTime.Sub(time.Now()))\n\t\t\t\t\t_, err := buildDebug()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tutils.Notify(\"Build Failed: \"+err.Error(), \"bee\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tch <- 0 \/\/ Notify listeners\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t}\n\t\tcase err := <-watcher.Errors:\n\t\t\tif err != nil {\n\t\t\t\tch <- -1\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>Fix unknown backend \"\" error<commit_after>\/\/ Copyright 2017 bee authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\n\/\/ Package dlv ...\npackage dlv\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/beego\/bee\/cmd\/commands\"\n\t\"github.com\/beego\/bee\/cmd\/commands\/version\"\n\tbeeLogger \"github.com\/beego\/bee\/logger\"\n\t\"github.com\/beego\/bee\/utils\"\n\t\"github.com\/derekparker\/delve\/pkg\/terminal\"\n\t\"github.com\/derekparker\/delve\/service\"\n\t\"github.com\/derekparker\/delve\/service\/rpc2\"\n\t\"github.com\/derekparker\/delve\/service\/rpccommon\"\n\t\"github.com\/fsnotify\/fsnotify\"\n)\n\nvar cmdDlv = &commands.Command{\n\tCustomFlags: true,\n\tUsageLine: \"dlv [-package=\\\"\\\"] [-port=8181] [-verbose=false]\",\n\tShort: \"Start a debugging session using Delve\",\n\tLong: `dlv command start a debugging session using debugging tool Delve.\n\n To debug your application using Delve, use: {{\"$ bee dlv\" | bold}}\n\n For more information on Delve: https:\/\/github.com\/derekparker\/delve\n`,\n\tPreRun: func(cmd *commands.Command, args []string) { version.ShowShortVersionBanner() },\n\tRun: runDlv,\n}\n\nvar (\n\tpackageName string\n\tverbose bool\n\tport int\n)\n\nfunc init() {\n\tfs := flag.NewFlagSet(\"dlv\", flag.ContinueOnError)\n\tfs.StringVar(&packageName, \"package\", \"\", \"The package to debug (Must have a main package)\")\n\tfs.BoolVar(&verbose, \"verbose\", false, \"Enable verbose mode\")\n\tfs.IntVar(&port, \"port\", 8181, \"Port to listen to for clients\")\n\tcmdDlv.Flag = *fs\n\tcommands.AvailableCommands = append(commands.AvailableCommands, cmdDlv)\n}\n\nfunc runDlv(cmd *commands.Command, args []string) int {\n\tif err := cmd.Flag.Parse(args); err != nil {\n\t\tbeeLogger.Log.Fatalf(\"Error while parsing flags: %v\", err.Error())\n\t}\n\n\tvar (\n\t\taddr = fmt.Sprintf(\"127.0.0.1:%d\", port)\n\t\tpaths = make([]string, 0)\n\t\tnotifyChan = make(chan int)\n\t)\n\n\tif err := loadPathsToWatch(&paths); err != nil {\n\t\tbeeLogger.Log.Fatalf(\"Error while loading paths to watch: %v\", err.Error())\n\t}\n\tgo startWatcher(paths, notifyChan)\n\treturn startDelveDebugger(addr, notifyChan)\n}\n\n\/\/ buildDebug builds a debug binary in the current working directory\nfunc buildDebug() (string, error) {\n\targs := []string{\"-gcflags\", \"-N -l\", \"-o\", \"debug\"}\n\targs = append(args, utils.SplitQuotedFields(\"-ldflags='-linkmode internal'\")...)\n\targs = append(args, packageName)\n\tif err := utils.GoCommand(\"build\", args...); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfp, err := filepath.Abs(\".\/debug\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn fp, nil\n}\n\n\/\/ loadPathsToWatch loads the paths that needs to be watched for changes\nfunc loadPathsToWatch(paths *[]string) error {\n\tdirectory, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfilepath.Walk(directory, func(path string, info os.FileInfo, _ error) error {\n\t\tif strings.HasSuffix(info.Name(), \"docs\") {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\tif strings.HasSuffix(info.Name(), \"swagger\") {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\tif strings.HasSuffix(info.Name(), \"vendor\") {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\n\t\tif filepath.Ext(info.Name()) == \".go\" {\n\t\t\t*paths = append(*paths, path)\n\t\t}\n\t\treturn nil\n\t})\n\treturn nil\n}\n\n\/\/ startDelveDebugger starts the Delve debugger server\nfunc startDelveDebugger(addr string, ch chan int) int {\n\tbeeLogger.Log.Info(\"Starting Delve Debugger...\")\n\n\tfp, err := buildDebug()\n\tif err != nil {\n\t\tbeeLogger.Log.Fatalf(\"Error while building debug binary: %v\", err)\n\t}\n\tdefer os.Remove(fp)\n\n\tabs, err := filepath.Abs(\".\/debug\")\n\tif err != nil {\n\t\tbeeLogger.Log.Fatalf(\"%v\", err)\n\t}\n\n\t\/\/ Create and start the debugger server\n\tlistener, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\tbeeLogger.Log.Fatalf(\"Could not start listener: %s\", err)\n\t}\n\tdefer listener.Close()\n\n\tserver := rpccommon.NewServer(&service.Config{\n\t\tListener: listener,\n\t\tAcceptMulti: true,\n\t\tAttachPid: 0,\n\t\tAPIVersion: 2,\n\t\tWorkingDir: \".\",\n\t\tProcessArgs: []string{abs},\n\t\tBackend: \"default\",\n\t})\n\tif err := server.Run(); err != nil {\n\t\tbeeLogger.Log.Fatalf(\"Could not start debugger server: %v\", err)\n\t}\n\n\t\/\/ Start the Delve client REPL\n\tclient := rpc2.NewClient(addr)\n\t\/\/ Make sure the client is restarted when new changes are introduced\n\tgo func() {\n\t\tfor {\n\t\t\tif val := <-ch; val == 0 {\n\t\t\t\tif _, err := client.Restart(); err != nil {\n\t\t\t\t\tutils.Notify(\"Error while restarting the client: \"+err.Error(), \"bee\")\n\t\t\t\t} else {\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tutils.Notify(\"Delve Debugger Restarted\", \"bee\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ Create the terminal and connect it to the client debugger\n\tterm := terminal.New(client, nil)\n\tstatus, err := term.Run()\n\tif err != nil {\n\t\tbeeLogger.Log.Fatalf(\"Could not start Delve REPL: %v\", err)\n\t}\n\n\t\/\/ Stop and kill the debugger server once user quits the REPL\n\tif err := server.Stop(); err != nil {\n\t\tbeeLogger.Log.Fatalf(\"Could not stop Delve server: %v\", err)\n\t}\n\treturn status\n}\n\nvar eventsModTime = make(map[string]int64)\n\n\/\/ startWatcher starts the fsnotify watcher on the passed paths\nfunc startWatcher(paths []string, ch chan int) {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tbeeLogger.Log.Fatalf(\"Could not start the watcher: %v\", err)\n\t}\n\tdefer watcher.Close()\n\n\t\/\/ Feed the paths to the watcher\n\tfor _, path := range paths {\n\t\tif err := watcher.Add(path); err != nil {\n\t\t\tbeeLogger.Log.Fatalf(\"Could not set a watch on path: %v\", err)\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase evt := <-watcher.Events:\n\t\t\tbuild := true\n\t\t\tif filepath.Ext(evt.Name) != \".go\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tmt := utils.GetFileModTime(evt.Name)\n\t\t\tif t := eventsModTime[evt.Name]; mt == t {\n\t\t\t\tbuild = false\n\t\t\t}\n\t\t\teventsModTime[evt.Name] = mt\n\n\t\t\tif build {\n\t\t\t\tgo func() {\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tutils.Notify(\"Rebuilding application with the new changes\", \"bee\")\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Wait 1s before re-build until there is no file change\n\t\t\t\t\tscheduleTime := time.Now().Add(1 * time.Second)\n\t\t\t\t\ttime.Sleep(scheduleTime.Sub(time.Now()))\n\t\t\t\t\t_, err := buildDebug()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tutils.Notify(\"Build Failed: \"+err.Error(), \"bee\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tch <- 0 \/\/ Notify listeners\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t}\n\t\tcase err := <-watcher.Errors:\n\t\t\tif err != nil {\n\t\t\t\tch <- -1\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage installer \/\/ import \"k8s.io\/helm\/cmd\/helm\/installer\"\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"text\/template\"\n\n\t\"github.com\/Masterminds\/sprig\"\n\n\t\"k8s.io\/helm\/pkg\/kube\"\n)\n\n\/\/ Install uses kubernetes client to install tiller\n\/\/\n\/\/ Returns the string output received from the operation, and an error if the\n\/\/ command failed.\n\/\/\n\/\/ If verbose is true, this will print the manifest to stdout.\nfunc Install(namespace, image string, verbose bool) error {\n\tkc := kube.New(nil)\n\n\tif namespace == \"\" {\n\t\tns, _, err := kc.DefaultNamespace()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnamespace = ns\n\t}\n\n\tvar b bytes.Buffer\n\n\t\/\/ Add main install YAML\n\tistpl := template.New(\"install\").Funcs(sprig.TxtFuncMap())\n\n\tcfg := struct {\n\t\tNamespace, Image string\n\t}{namespace, image}\n\n\tif err := template.Must(istpl.Parse(InstallYAML)).Execute(&b, cfg); err != nil {\n\t\treturn err\n\t}\n\n\tif verbose {\n\t\tfmt.Println(b.String())\n\t}\n\n\treturn kc.Create(namespace, &b)\n}\n\n\/\/ InstallYAML is the installation YAML for DM.\nconst InstallYAML = `\n---\napiVersion: extensions\/v1beta1\nkind: Deployment\nmetadata:\n name: tiller-deploy\n namespace: {{ .Namespace }}\nspec:\n replicas: 1\n template:\n metadata:\n labels:\n app: helm\n name: tiller\n spec:\n containers:\n - image: {{default \"gcr.io\/kubernetes-helm\/tiller:canary\" .Image}}\n name: tiller\n ports:\n - containerPort: 44134\n name: tiller\n imagePullPolicy: Always\n livenessProbe:\n httpGet:\n path: \/liveness\n port: 44135\n initialDelaySeconds: 1\n timeoutSeconds: 1\n readinessProbe:\n httpGet:\n path: \/readiness\n port: 44135\n initialDelaySeconds: 1\n timeoutSeconds: 1\n`\n<commit_msg>feat(cmd): install latest tagged image on `helm init`<commit_after>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage installer \/\/ import \"k8s.io\/helm\/cmd\/helm\/installer\"\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/errors\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/extensions\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/intstr\"\n\n\t\"k8s.io\/helm\/pkg\/kube\"\n\t\"k8s.io\/helm\/pkg\/version\"\n)\n\nconst defaultImage = \"gcr.io\/kubernetes-helm\/tiller\"\n\n\/\/ Install uses kubernetes client to install tiller\n\/\/\n\/\/ Returns the string output received from the operation, and an error if the\n\/\/ command failed.\n\/\/\n\/\/ If verbose is true, this will print the manifest to stdout.\nfunc Install(namespace, image string, verbose bool) error {\n\tkc := kube.New(nil)\n\n\tif namespace == \"\" {\n\t\tns, _, err := kc.DefaultNamespace()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnamespace = ns\n\t}\n\n\tc, err := kc.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tns := generateNamespace(namespace)\n\tif _, err := c.Namespaces().Create(ns); err != nil {\n\t\tif !errors.IsAlreadyExists(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif image == \"\" {\n\t\t\/\/ strip git sha off version\n\t\ttag := strings.Split(version.Version, \"+\")[0]\n\t\timage = fmt.Sprintf(\"%s:%s\", defaultImage, tag)\n\t}\n\n\trc := generateDeployment(image)\n\n\t_, err = c.Deployments(namespace).Create(rc)\n\treturn err\n}\n\nfunc generateLabels(labels map[string]string) map[string]string {\n\tlabels[\"app\"] = \"helm\"\n\treturn labels\n}\n\nfunc generateDeployment(image string) *extensions.Deployment {\n\tlabels := generateLabels(map[string]string{\"name\": \"tiller\"})\n\td := &extensions.Deployment{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: \"tiller-deploy\",\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: extensions.DeploymentSpec{\n\t\t\tReplicas: 1,\n\t\t\tTemplate: api.PodTemplateSpec{\n\t\t\t\tObjectMeta: api.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tContainers: []api.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"tiller\",\n\t\t\t\t\t\t\tImage: image,\n\t\t\t\t\t\t\tImagePullPolicy: \"Always\",\n\t\t\t\t\t\t\tPorts: []api.ContainerPort{{ContainerPort: 44134, Name: \"tiller\"}},\n\t\t\t\t\t\t\tLivenessProbe: &api.Probe{\n\t\t\t\t\t\t\t\tHandler: api.Handler{\n\t\t\t\t\t\t\t\t\tHTTPGet: &api.HTTPGetAction{\n\t\t\t\t\t\t\t\t\t\tPath: \"\/liveness\",\n\t\t\t\t\t\t\t\t\t\tPort: intstr.FromInt(44135),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: 1,\n\t\t\t\t\t\t\t\tTimeoutSeconds: 1,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tReadinessProbe: &api.Probe{\n\t\t\t\t\t\t\t\tHandler: api.Handler{\n\t\t\t\t\t\t\t\t\tHTTPGet: &api.HTTPGetAction{\n\t\t\t\t\t\t\t\t\t\tPath: \"\/readiness\",\n\t\t\t\t\t\t\t\t\t\tPort: intstr.FromInt(44135),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: 1,\n\t\t\t\t\t\t\t\tTimeoutSeconds: 1,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn d\n}\n\nfunc generateNamespace(namespace string) *api.Namespace {\n\treturn &api.Namespace{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: namespace,\n\t\t\tLabels: generateLabels(map[string]string{\"name\": \"helm-namespace\"}),\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Explicitly check for errors when contacting the BBS<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package cover provides support for parsing coverage profiles\n\/\/ generated by \"go test -coverprofile=cover.out\".\npackage cover \/\/ import \"golang.org\/x\/tools\/cover\"\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ Profile represents the profiling data for a specific file.\ntype Profile struct {\n\tFileName string\n\tMode string\n\tBlocks []ProfileBlock\n}\n\n\/\/ ProfileBlock represents a single block of profiling data.\ntype ProfileBlock struct {\n\tStartLine, StartCol int\n\tEndLine, EndCol int\n\tNumStmt, Count int\n}\n\ntype byFileName []*Profile\n\nfunc (p byFileName) Len() int { return len(p) }\nfunc (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName }\nfunc (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] }\n\n\/\/ ParseProfiles parses profile data in the specified file and returns a\n\/\/ Profile for each source file described therein.\nfunc ParseProfiles(fileName string) ([]*Profile, error) {\n\tpf, err := os.Open(fileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer pf.Close()\n\n\tfiles := make(map[string]*Profile)\n\tbuf := bufio.NewReader(pf)\n\t\/\/ First line is \"mode: foo\", where foo is \"set\", \"count\", or \"atomic\".\n\t\/\/ Rest of file is in the format\n\t\/\/\tencoding\/base64\/base64.go:34.44,37.40 3 1\n\t\/\/ where the fields are: name.go:line.column,line.column numberOfStatements count\n\ts := bufio.NewScanner(buf)\n\tmode := \"\"\n\tfor s.Scan() {\n\t\tline := s.Text()\n\t\tif mode == \"\" {\n\t\t\tconst p = \"mode: \"\n\t\t\tif !strings.HasPrefix(line, p) || line == p {\n\t\t\t\treturn nil, fmt.Errorf(\"bad mode line: %v\", line)\n\t\t\t}\n\t\t\tmode = line[len(p):]\n\t\t\tcontinue\n\t\t}\n\t\tm := lineRe.FindStringSubmatch(line)\n\t\tif m == nil {\n\t\t\treturn nil, fmt.Errorf(\"line %q doesn't match expected format: %v\", m, lineRe)\n\t\t}\n\t\tfn := m[1]\n\t\tp := files[fn]\n\t\tif p == nil {\n\t\t\tp = &Profile{\n\t\t\t\tFileName: fn,\n\t\t\t\tMode: mode,\n\t\t\t}\n\t\t\tfiles[fn] = p\n\t\t}\n\t\tp.Blocks = append(p.Blocks, ProfileBlock{\n\t\t\tStartLine: toInt(m[2]),\n\t\t\tStartCol: toInt(m[3]),\n\t\t\tEndLine: toInt(m[4]),\n\t\t\tEndCol: toInt(m[5]),\n\t\t\tNumStmt: toInt(m[6]),\n\t\t\tCount: toInt(m[7]),\n\t\t})\n\t}\n\tif err := s.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, p := range files {\n\t\tsort.Sort(blocksByStart(p.Blocks))\n\t}\n\t\/\/ Generate a sorted slice.\n\tprofiles := make([]*Profile, 0, len(files))\n\tfor _, profile := range files {\n\t\tprofiles = append(profiles, profile)\n\t}\n\tsort.Sort(byFileName(profiles))\n\treturn profiles, nil\n}\n\ntype blocksByStart []ProfileBlock\n\nfunc (b blocksByStart) Len() int { return len(b) }\nfunc (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] }\nfunc (b blocksByStart) Less(i, j int) bool {\n\tbi, bj := b[i], b[j]\n\treturn bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol\n}\n\nvar lineRe = regexp.MustCompile(`^(.+):([0-9]+).([0-9]+),([0-9]+).([0-9]+) ([0-9]+) ([0-9]+)$`)\n\nfunc toInt(s string) int {\n\ti, err := strconv.Atoi(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn i\n}\n\n\/\/ Boundary represents the position in a source file of the beginning or end of a\n\/\/ block as reported by the coverage profile. In HTML mode, it will correspond to\n\/\/ the opening or closing of a <span> tag and will be used to colorize the source\ntype Boundary struct {\n\tOffset int \/\/ Location as a byte offset in the source file.\n\tStart bool \/\/ Is this the start of a block?\n\tCount int \/\/ Event count from the cover profile.\n\tNorm float64 \/\/ Count normalized to [0..1].\n}\n\n\/\/ Boundaries returns a Profile as a set of Boundary objects within the provided src.\nfunc (p *Profile) Boundaries(src []byte) (boundaries []Boundary) {\n\t\/\/ Find maximum count.\n\tmax := 0\n\tfor _, b := range p.Blocks {\n\t\tif b.Count > max {\n\t\t\tmax = b.Count\n\t\t}\n\t}\n\t\/\/ Divisor for normalization.\n\tdivisor := math.Log(float64(max))\n\n\t\/\/ boundary returns a Boundary, populating the Norm field with a normalized Count.\n\tboundary := func(offset int, start bool, count int) Boundary {\n\t\tb := Boundary{Offset: offset, Start: start, Count: count}\n\t\tif !start || count == 0 {\n\t\t\treturn b\n\t\t}\n\t\tif max <= 1 {\n\t\t\tb.Norm = 0.8 \/\/ Profile is in\"set\" mode; we want a heat map. Use cov8 in the CSS.\n\t\t} else if count > 0 {\n\t\t\tb.Norm = math.Log(float64(count)) \/ divisor\n\t\t}\n\t\treturn b\n\t}\n\n\tline, col := 1, 2 \/\/ TODO: Why is this 2?\n\tfor si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); {\n\t\tb := p.Blocks[bi]\n\t\tif b.StartLine == line && b.StartCol == col {\n\t\t\tboundaries = append(boundaries, boundary(si, true, b.Count))\n\t\t}\n\t\tif b.EndLine == line && b.EndCol == col || line > b.EndLine {\n\t\t\tboundaries = append(boundaries, boundary(si, false, 0))\n\t\t\tbi++\n\t\t\tcontinue \/\/ Don't advance through src; maybe the next block starts here.\n\t\t}\n\t\tif src[si] == '\\n' {\n\t\t\tline++\n\t\t\tcol = 0\n\t\t}\n\t\tcol++\n\t\tsi++\n\t}\n\tsort.Sort(boundariesByPos(boundaries))\n\treturn\n}\n\ntype boundariesByPos []Boundary\n\nfunc (b boundariesByPos) Len() int { return len(b) }\nfunc (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] }\nfunc (b boundariesByPos) Less(i, j int) bool {\n\tif b[i].Offset == b[j].Offset {\n\t\treturn !b[i].Start && b[j].Start\n\t}\n\treturn b[i].Offset < b[j].Offset\n}\n<commit_msg>cover: fixed broken error message<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package cover provides support for parsing coverage profiles\n\/\/ generated by \"go test -coverprofile=cover.out\".\npackage cover \/\/ import \"golang.org\/x\/tools\/cover\"\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ Profile represents the profiling data for a specific file.\ntype Profile struct {\n\tFileName string\n\tMode string\n\tBlocks []ProfileBlock\n}\n\n\/\/ ProfileBlock represents a single block of profiling data.\ntype ProfileBlock struct {\n\tStartLine, StartCol int\n\tEndLine, EndCol int\n\tNumStmt, Count int\n}\n\ntype byFileName []*Profile\n\nfunc (p byFileName) Len() int { return len(p) }\nfunc (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName }\nfunc (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] }\n\n\/\/ ParseProfiles parses profile data in the specified file and returns a\n\/\/ Profile for each source file described therein.\nfunc ParseProfiles(fileName string) ([]*Profile, error) {\n\tpf, err := os.Open(fileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer pf.Close()\n\n\tfiles := make(map[string]*Profile)\n\tbuf := bufio.NewReader(pf)\n\t\/\/ First line is \"mode: foo\", where foo is \"set\", \"count\", or \"atomic\".\n\t\/\/ Rest of file is in the format\n\t\/\/\tencoding\/base64\/base64.go:34.44,37.40 3 1\n\t\/\/ where the fields are: name.go:line.column,line.column numberOfStatements count\n\ts := bufio.NewScanner(buf)\n\tmode := \"\"\n\tfor s.Scan() {\n\t\tline := s.Text()\n\t\tif mode == \"\" {\n\t\t\tconst p = \"mode: \"\n\t\t\tif !strings.HasPrefix(line, p) || line == p {\n\t\t\t\treturn nil, fmt.Errorf(\"bad mode line: %v\", line)\n\t\t\t}\n\t\t\tmode = line[len(p):]\n\t\t\tcontinue\n\t\t}\n\t\tm := lineRe.FindStringSubmatch(line)\n\t\tif m == nil {\n\t\t\treturn nil, fmt.Errorf(\"line %q doesn't match expected format: %v\", line, lineRe)\n\t\t}\n\t\tfn := m[1]\n\t\tp := files[fn]\n\t\tif p == nil {\n\t\t\tp = &Profile{\n\t\t\t\tFileName: fn,\n\t\t\t\tMode: mode,\n\t\t\t}\n\t\t\tfiles[fn] = p\n\t\t}\n\t\tp.Blocks = append(p.Blocks, ProfileBlock{\n\t\t\tStartLine: toInt(m[2]),\n\t\t\tStartCol: toInt(m[3]),\n\t\t\tEndLine: toInt(m[4]),\n\t\t\tEndCol: toInt(m[5]),\n\t\t\tNumStmt: toInt(m[6]),\n\t\t\tCount: toInt(m[7]),\n\t\t})\n\t}\n\tif err := s.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, p := range files {\n\t\tsort.Sort(blocksByStart(p.Blocks))\n\t}\n\t\/\/ Generate a sorted slice.\n\tprofiles := make([]*Profile, 0, len(files))\n\tfor _, profile := range files {\n\t\tprofiles = append(profiles, profile)\n\t}\n\tsort.Sort(byFileName(profiles))\n\treturn profiles, nil\n}\n\ntype blocksByStart []ProfileBlock\n\nfunc (b blocksByStart) Len() int { return len(b) }\nfunc (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] }\nfunc (b blocksByStart) Less(i, j int) bool {\n\tbi, bj := b[i], b[j]\n\treturn bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol\n}\n\nvar lineRe = regexp.MustCompile(`^(.+):([0-9]+).([0-9]+),([0-9]+).([0-9]+) ([0-9]+) ([0-9]+)$`)\n\nfunc toInt(s string) int {\n\ti, err := strconv.Atoi(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn i\n}\n\n\/\/ Boundary represents the position in a source file of the beginning or end of a\n\/\/ block as reported by the coverage profile. In HTML mode, it will correspond to\n\/\/ the opening or closing of a <span> tag and will be used to colorize the source\ntype Boundary struct {\n\tOffset int \/\/ Location as a byte offset in the source file.\n\tStart bool \/\/ Is this the start of a block?\n\tCount int \/\/ Event count from the cover profile.\n\tNorm float64 \/\/ Count normalized to [0..1].\n}\n\n\/\/ Boundaries returns a Profile as a set of Boundary objects within the provided src.\nfunc (p *Profile) Boundaries(src []byte) (boundaries []Boundary) {\n\t\/\/ Find maximum count.\n\tmax := 0\n\tfor _, b := range p.Blocks {\n\t\tif b.Count > max {\n\t\t\tmax = b.Count\n\t\t}\n\t}\n\t\/\/ Divisor for normalization.\n\tdivisor := math.Log(float64(max))\n\n\t\/\/ boundary returns a Boundary, populating the Norm field with a normalized Count.\n\tboundary := func(offset int, start bool, count int) Boundary {\n\t\tb := Boundary{Offset: offset, Start: start, Count: count}\n\t\tif !start || count == 0 {\n\t\t\treturn b\n\t\t}\n\t\tif max <= 1 {\n\t\t\tb.Norm = 0.8 \/\/ Profile is in\"set\" mode; we want a heat map. Use cov8 in the CSS.\n\t\t} else if count > 0 {\n\t\t\tb.Norm = math.Log(float64(count)) \/ divisor\n\t\t}\n\t\treturn b\n\t}\n\n\tline, col := 1, 2 \/\/ TODO: Why is this 2?\n\tfor si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); {\n\t\tb := p.Blocks[bi]\n\t\tif b.StartLine == line && b.StartCol == col {\n\t\t\tboundaries = append(boundaries, boundary(si, true, b.Count))\n\t\t}\n\t\tif b.EndLine == line && b.EndCol == col || line > b.EndLine {\n\t\t\tboundaries = append(boundaries, boundary(si, false, 0))\n\t\t\tbi++\n\t\t\tcontinue \/\/ Don't advance through src; maybe the next block starts here.\n\t\t}\n\t\tif src[si] == '\\n' {\n\t\t\tline++\n\t\t\tcol = 0\n\t\t}\n\t\tcol++\n\t\tsi++\n\t}\n\tsort.Sort(boundariesByPos(boundaries))\n\treturn\n}\n\ntype boundariesByPos []Boundary\n\nfunc (b boundariesByPos) Len() int { return len(b) }\nfunc (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] }\nfunc (b boundariesByPos) Less(i, j int) bool {\n\tif b[i].Offset == b[j].Offset {\n\t\treturn !b[i].Start && b[j].Start\n\t}\n\treturn b[i].Offset < b[j].Offset\n}\n<|endoftext|>"} {"text":"<commit_before>package context\n\nimport (\n\t\"github.com\/cihub\/seelog\"\n\t\"github.com\/dropbox\/dropbox-sdk-go-unofficial\"\n\t\"github.com\/watermint\/dcfg\/cli\"\n\t\"github.com\/watermint\/dcfg\/common\/file\"\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/oauth2\/google\"\n\t\"google.golang.org\/api\/admin\/directory\/v1\"\n\t\"io\/ioutil\"\n\t\"path\"\n\t\"runtime\"\n)\n\ntype ExecutionContext struct {\n\t\/\/ Options\n\tOptions cli.Options\n\n\t\/\/ Dropbox Client\n\tDropboxClient dropbox.Api\n\tDropboxToken DropboxToken\n\n\t\/\/ Google Client\n\tGoogleClient *admin.Service\n\tGoogleClientConfig *oauth2.Config\n\tGoogleToken *oauth2.Token\n}\n\ntype DropboxToken struct {\n\tTeamManagementToken string `json:\"token-team-management\"`\n}\n\nfunc (e *ExecutionContext) CreateGoogleClientByToken(token *oauth2.Token) (*admin.Service, error) {\n\tcontext := context.Background()\n\tclient := e.GoogleClientConfig.Client(context, token)\n\tservice, err := admin.New(client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn service, nil\n}\n\nfunc (e *ExecutionContext) loadGoogleClient() error {\n\tclient, err := e.CreateGoogleClientByToken(e.GoogleToken)\n\tif err != nil {\n\t\treturn err\n\t}\n\te.GoogleClient = client\n\treturn nil\n}\n\nfunc (e *ExecutionContext) loadGoogleClientConfig() error {\n\tjson, err := ioutil.ReadFile(e.Options.PathGoogleClientSecret())\n\tif err != nil {\n\t\treturn err\n\t}\n\tconfig, err := google.ConfigFromJSON(json,\n\t\tadmin.AdminDirectoryUserReadonlyScope,\n\t\tadmin.AdminDirectoryGroupReadonlyScope)\n\tif err != nil {\n\t\treturn err\n\t}\n\te.GoogleClientConfig = config\n\treturn nil\n}\n\nfunc (e *ExecutionContext) loadGoogleToken() error {\n\tpath := e.Options.PathGoogleToken()\n\ttoken := oauth2.Token{}\n\t_, err := file.LoadJSON(path, &token)\n\tif err != nil {\n\t\treturn err\n\t}\n\te.GoogleToken = &token\n\treturn nil\n}\n\nfunc (e *ExecutionContext) CreateDropboxClientByToken(token string) dropbox.Api {\n\treturn dropbox.Client(token, dropbox.Options{})\n}\n\nfunc (e *ExecutionContext) loadDropboxClient() error {\n\te.DropboxClient = e.CreateDropboxClientByToken(e.DropboxToken.TeamManagementToken)\n\treturn nil\n}\n\nfunc (e *ExecutionContext) loadDropboxToken() error {\n\tpath := e.Options.PathDropboxToken()\n\ttoken := DropboxToken{}\n\t_, err := file.LoadJSON(path, &token)\n\tif err != nil {\n\t\treturn err\n\t}\n\te.DropboxToken = token\n\treturn nil\n}\n\nfunc (e *ExecutionContext) InitDropboxClient() error {\n\tif err := e.loadDropboxToken(); err != nil {\n\t\treturn err\n\t}\n\tif err := e.loadDropboxClient(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (e *ExecutionContext) InitGoogleClient() error {\n\tif err := e.loadGoogleClientConfig(); err != nil {\n\t\treturn err\n\t}\n\tif err := e.loadGoogleToken(); err != nil {\n\t\treturn err\n\t}\n\tif err := e.loadGoogleClient(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (e *ExecutionContext) InitForSync() error {\n\tif err := e.InitDropboxClient(); err != nil {\n\t\treturn err\n\t}\n\tif err := e.InitGoogleClient(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<commit_msg>fix error<commit_after>package context\n\nimport (\n\t\"github.com\/dropbox\/dropbox-sdk-go-unofficial\"\n\t\"github.com\/watermint\/dcfg\/cli\"\n\t\"github.com\/watermint\/dcfg\/common\/file\"\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/oauth2\/google\"\n\t\"google.golang.org\/api\/admin\/directory\/v1\"\n\t\"io\/ioutil\"\n)\n\ntype ExecutionContext struct {\n\t\/\/ Options\n\tOptions cli.Options\n\n\t\/\/ Dropbox Client\n\tDropboxClient dropbox.Api\n\tDropboxToken DropboxToken\n\n\t\/\/ Google Client\n\tGoogleClient *admin.Service\n\tGoogleClientConfig *oauth2.Config\n\tGoogleToken *oauth2.Token\n}\n\ntype DropboxToken struct {\n\tTeamManagementToken string `json:\"token-team-management\"`\n}\n\nfunc (e *ExecutionContext) CreateGoogleClientByToken(token *oauth2.Token) (*admin.Service, error) {\n\tcontext := context.Background()\n\tclient := e.GoogleClientConfig.Client(context, token)\n\tservice, err := admin.New(client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn service, nil\n}\n\nfunc (e *ExecutionContext) loadGoogleClient() error {\n\tclient, err := e.CreateGoogleClientByToken(e.GoogleToken)\n\tif err != nil {\n\t\treturn err\n\t}\n\te.GoogleClient = client\n\treturn nil\n}\n\nfunc (e *ExecutionContext) loadGoogleClientConfig() error {\n\tjson, err := ioutil.ReadFile(e.Options.PathGoogleClientSecret())\n\tif err != nil {\n\t\treturn err\n\t}\n\tconfig, err := google.ConfigFromJSON(json,\n\t\tadmin.AdminDirectoryUserReadonlyScope,\n\t\tadmin.AdminDirectoryGroupReadonlyScope)\n\tif err != nil {\n\t\treturn err\n\t}\n\te.GoogleClientConfig = config\n\treturn nil\n}\n\nfunc (e *ExecutionContext) loadGoogleToken() error {\n\tpath := e.Options.PathGoogleToken()\n\ttoken := oauth2.Token{}\n\t_, err := file.LoadJSON(path, &token)\n\tif err != nil {\n\t\treturn err\n\t}\n\te.GoogleToken = &token\n\treturn nil\n}\n\nfunc (e *ExecutionContext) CreateDropboxClientByToken(token string) dropbox.Api {\n\treturn dropbox.Client(token, dropbox.Options{})\n}\n\nfunc (e *ExecutionContext) loadDropboxClient() error {\n\te.DropboxClient = e.CreateDropboxClientByToken(e.DropboxToken.TeamManagementToken)\n\treturn nil\n}\n\nfunc (e *ExecutionContext) loadDropboxToken() error {\n\tpath := e.Options.PathDropboxToken()\n\ttoken := DropboxToken{}\n\t_, err := file.LoadJSON(path, &token)\n\tif err != nil {\n\t\treturn err\n\t}\n\te.DropboxToken = token\n\treturn nil\n}\n\nfunc (e *ExecutionContext) InitDropboxClient() error {\n\tif err := e.loadDropboxToken(); err != nil {\n\t\treturn err\n\t}\n\tif err := e.loadDropboxClient(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (e *ExecutionContext) InitGoogleClient() error {\n\tif err := e.loadGoogleClientConfig(); err != nil {\n\t\treturn err\n\t}\n\tif err := e.loadGoogleToken(); err != nil {\n\t\treturn err\n\t}\n\tif err := e.loadGoogleClient(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (e *ExecutionContext) InitForSync() error {\n\tif err := e.InitDropboxClient(); err != nil {\n\t\treturn err\n\t}\n\tif err := e.InitGoogleClient(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage value\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\n\/\/ A Value is an object; it corresponds to an 'atom' in the schema.\ntype Value struct {\n\t\/\/ Exactly one of the below must be set.\n\tFloatValue *Float\n\tIntValue *Int\n\tStringValue *String\n\tBooleanValue *Boolean\n\tListValue *List\n\tMapValue *Map\n\tNull bool \/\/ represents an explicit `\"foo\" = null`\n}\n\ntype Int int64\ntype Float float64\ntype String string\ntype Boolean bool\n\n\/\/ Field is an individual key-value pair.\ntype Field struct {\n\tName string\n\tValue Value\n}\n\n\/\/ List is a list of items.\ntype List struct {\n\tItems []Value\n}\n\n\/\/ Map is a map of key-value pairs. It represents both structs and maps. We use\n\/\/ a list and a go-language map to preserve order.\n\/\/\n\/\/ Set and Get helpers are provided.\ntype Map struct {\n\tItems []Field\n\n\t\/\/ may be nil; lazily constructed.\n\t\/\/ TODO: Direct modifications to Items above will cause serious problems.\n\tindex map[string]*Field\n}\n\n\/\/ Get returns the (Field, true) or (nil, false) if it is not present\nfunc (m *Map) Get(key string) (*Field, bool) {\n\tif m.index == nil {\n\t\tm.index = map[string]*Field{}\n\t\tfor i := range m.Items {\n\t\t\tf := &m.Items[i]\n\t\t\tm.index[f.Name] = f\n\t\t}\n\t}\n\tf, ok := m.index[key]\n\treturn f, ok\n}\n\n\/\/ Set inserts or updates the given item.\nfunc (m *Map) Set(key string, value Value) {\n\tif f, ok := m.Get(key); ok {\n\t\tf.Value = value\n\t\treturn\n\t}\n\tm.Items = append(m.Items, Field{Name: key, Value: value})\n\tm.index = nil \/\/ Since the append might have reallocated\n}\n\n\/\/ StringValue returns s as a scalar string Value.\nfunc StringValue(s string) Value {\n\ts2 := String(s)\n\treturn Value{StringValue: &s2}\n}\n\n\/\/ IntValue returns i as a scalar numeric (integer) Value.\nfunc IntValue(i int) Value {\n\ti2 := Int(i)\n\treturn Value{IntValue: &i2}\n}\n\n\/\/ FloatValue returns f as a scalar numeric (float) Value.\nfunc FloatValue(f float64) Value {\n\tf2 := Float(f)\n\treturn Value{FloatValue: &f2}\n}\n\n\/\/ BooleanValue returns b as a scalar boolean Value.\nfunc BooleanValue(b bool) Value {\n\tb2 := Boolean(b)\n\treturn Value{BooleanValue: &b2}\n}\n\n\/\/ String returns a human-readable representation of the value.\nfunc (v Value) String() string {\n\tswitch {\n\tcase v.FloatValue != nil:\n\t\treturn fmt.Sprintf(\"%v\", *v.FloatValue)\n\tcase v.IntValue != nil:\n\t\treturn fmt.Sprintf(\"%v\", *v.IntValue)\n\tcase v.StringValue != nil:\n\t\treturn fmt.Sprintf(\"%q\", *v.StringValue)\n\tcase v.BooleanValue != nil:\n\t\treturn fmt.Sprintf(\"%v\", *v.BooleanValue)\n\tcase v.ListValue != nil:\n\t\tstrs := []string{}\n\t\tfor _, item := range v.ListValue.Items {\n\t\t\tstrs = append(strs, item.String())\n\t\t}\n\t\treturn \"[\" + strings.Join(strs, \",\") + \"]\"\n\tcase v.MapValue != nil:\n\t\tstrs := []string{}\n\t\tfor _, i := range v.MapValue.Items {\n\t\t\tstrs = append(strs, fmt.Sprintf(\"%v=%v\", i.Name, i.Value))\n\t\t}\n\t\treturn \"{\" + strings.Join(strs, \";\") + \"}\"\n\tdefault:\n\t\tfallthrough\n\tcase v.Null == true:\n\t\treturn \"null\"\n\t}\n}\n<commit_msg>Add method to delete items from maps<commit_after>\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage value\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\n\/\/ A Value is an object; it corresponds to an 'atom' in the schema.\ntype Value struct {\n\t\/\/ Exactly one of the below must be set.\n\tFloatValue *Float\n\tIntValue *Int\n\tStringValue *String\n\tBooleanValue *Boolean\n\tListValue *List\n\tMapValue *Map\n\tNull bool \/\/ represents an explicit `\"foo\" = null`\n}\n\ntype Int int64\ntype Float float64\ntype String string\ntype Boolean bool\n\n\/\/ Field is an individual key-value pair.\ntype Field struct {\n\tName string\n\tValue Value\n}\n\n\/\/ List is a list of items.\ntype List struct {\n\tItems []Value\n}\n\n\/\/ Map is a map of key-value pairs. It represents both structs and maps. We use\n\/\/ a list and a go-language map to preserve order.\n\/\/\n\/\/ Set and Get helpers are provided.\ntype Map struct {\n\tItems []Field\n\n\t\/\/ may be nil; lazily constructed.\n\t\/\/ TODO: Direct modifications to Items above will cause serious problems.\n\tindex map[string]*Field\n}\n\n\/\/ Get returns the (Field, true) or (nil, false) if it is not present\nfunc (m *Map) Get(key string) (*Field, bool) {\n\tif m.index == nil {\n\t\tm.index = map[string]*Field{}\n\t\tfor i := range m.Items {\n\t\t\tf := &m.Items[i]\n\t\t\tm.index[f.Name] = f\n\t\t}\n\t}\n\tf, ok := m.index[key]\n\treturn f, ok\n}\n\n\/\/ Set inserts or updates the given item.\nfunc (m *Map) Set(key string, value Value) {\n\tif f, ok := m.Get(key); ok {\n\t\tf.Value = value\n\t\treturn\n\t}\n\tm.Items = append(m.Items, Field{Name: key, Value: value})\n\tm.index = nil \/\/ Since the append might have reallocated\n}\n\n\/\/ Delete removes the key from the set.\nfunc (m *Map) Delete(key string) {\n\titems := []Field{}\n\tfor i := range m.Items {\n\t\tif m.Items[i].Name != key {\n\t\t\titems = append(items, m.Items[i])\n\t\t}\n\t}\n\tm.Items = items\n\tm.index = nil \/\/ Since the list has changed\n}\n\n\/\/ StringValue returns s as a scalar string Value.\nfunc StringValue(s string) Value {\n\ts2 := String(s)\n\treturn Value{StringValue: &s2}\n}\n\n\/\/ IntValue returns i as a scalar numeric (integer) Value.\nfunc IntValue(i int) Value {\n\ti2 := Int(i)\n\treturn Value{IntValue: &i2}\n}\n\n\/\/ FloatValue returns f as a scalar numeric (float) Value.\nfunc FloatValue(f float64) Value {\n\tf2 := Float(f)\n\treturn Value{FloatValue: &f2}\n}\n\n\/\/ BooleanValue returns b as a scalar boolean Value.\nfunc BooleanValue(b bool) Value {\n\tb2 := Boolean(b)\n\treturn Value{BooleanValue: &b2}\n}\n\n\/\/ String returns a human-readable representation of the value.\nfunc (v Value) String() string {\n\tswitch {\n\tcase v.FloatValue != nil:\n\t\treturn fmt.Sprintf(\"%v\", *v.FloatValue)\n\tcase v.IntValue != nil:\n\t\treturn fmt.Sprintf(\"%v\", *v.IntValue)\n\tcase v.StringValue != nil:\n\t\treturn fmt.Sprintf(\"%q\", *v.StringValue)\n\tcase v.BooleanValue != nil:\n\t\treturn fmt.Sprintf(\"%v\", *v.BooleanValue)\n\tcase v.ListValue != nil:\n\t\tstrs := []string{}\n\t\tfor _, item := range v.ListValue.Items {\n\t\t\tstrs = append(strs, item.String())\n\t\t}\n\t\treturn \"[\" + strings.Join(strs, \",\") + \"]\"\n\tcase v.MapValue != nil:\n\t\tstrs := []string{}\n\t\tfor _, i := range v.MapValue.Items {\n\t\t\tstrs = append(strs, fmt.Sprintf(\"%v=%v\", i.Name, i.Value))\n\t\t}\n\t\treturn \"{\" + strings.Join(strs, \";\") + \"}\"\n\tdefault:\n\t\tfallthrough\n\tcase v.Null == true:\n\t\treturn \"null\"\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage integration\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\truntimeapi \"k8s.io\/kubernetes\/pkg\/kubelet\/apis\/cri\/v1alpha1\/runtime\"\n)\n\nfunc genTruncIndex(normalName string) string {\n\treturn normalName[:(len(normalName)+1)\/2]\n}\n\nfunc TestTruncIndex(t *testing.T) {\n\tt.Logf(\"Pull an image\")\n\tconst appImage = \"busybox\"\n\timgID, err := imageService.PullImage(&runtimeapi.ImageSpec{Image: appImage}, nil)\n\trequire.NoError(t, err)\n\timgTruncId := genTruncIndex(imgID)\n\tdefer func() {\n\t\tassert.NoError(t, imageService.RemoveImage(&runtimeapi.ImageSpec{Image: imgTruncId}))\n\t}()\n\n\tt.Logf(\"Get image status by truncindex, truncId: %s\", imgTruncId)\n\tres, err := imageService.ImageStatus(&runtimeapi.ImageSpec{Image: imgTruncId})\n\trequire.NoError(t, err)\n\trequire.NotEqual(t, nil, res)\n\tassert.Equal(t, imgID, res.Id)\n\n\t\/\/ TODO(yanxuean): for failure test case where there are two images with the same truncindex.\n\t\/\/ if you add n images at least two will share the same leading digit.\n\t\/\/ \"sha256:n\" where n is the a number from 0-9 where two images have the same trunc,\n\t\/\/ for example sha256:9\n\t\/\/ https:\/\/github.com\/kubernetes-incubator\/cri-containerd\/pull\/352\n\t\/\/ I am thinking how I get the two image which have same trunc.\n\n\t\/\/ TODO(yanxuean): add test case for ListImages\n\n\tt.Logf(\"Create a sandbox\")\n\tsbConfig := PodSandboxConfig(\"sandbox\", \"truncindex\")\n\tsb, err := runtimeService.RunPodSandbox(sbConfig)\n\trequire.NoError(t, err)\n\tsbTruncIndex := genTruncIndex(sb)\n\tvar hasStopedSandbox bool\n\tdefer func() {\n\t\t\/\/ The 2th StopPodSandbox will fail, the 2th RemovePodSandbox will success.\n\t\tif !hasStopedSandbox {\n\t\t\tassert.NoError(t, runtimeService.StopPodSandbox(sbTruncIndex))\n\t\t}\n\t\tassert.NoError(t, runtimeService.RemovePodSandbox(sbTruncIndex))\n\t}()\n\n\tt.Logf(\"Get sandbox status by truncindex\")\n\tsbStatus, err := runtimeService.PodSandboxStatus(sbTruncIndex)\n\trequire.NoError(t, err)\n\tassert.Equal(t, sb, sbStatus.Id)\n\n\tt.Logf(\"Forward port for sandbox by truncindex\")\n\t_, err = runtimeService.PortForward(&runtimeapi.PortForwardRequest{sbTruncIndex, []int32{80}})\n\tassert.NoError(t, err)\n\n\t\/\/ TODO(yanxuean): add test case for ListPodSandbox\n\n\tt.Logf(\"Create a container\")\n\tcnConfig := ContainerConfig(\n\t\t\"containerTruncIndex\",\n\t\tappImage,\n\t\tWithCommand(\"top\"),\n\t)\n\tcn, err := runtimeService.CreateContainer(sbTruncIndex, cnConfig, sbConfig)\n\trequire.NoError(t, err)\n\tcnTruncIndex := genTruncIndex(cn)\n\tdefer func() {\n\t\t\/\/ the 2th RemovePodSandbox will success.\n\t\tassert.NoError(t, runtimeService.RemoveContainer(cnTruncIndex))\n\t}()\n\n\tt.Logf(\"Get container status by truncindex\")\n\tcStatus, err := runtimeService.ContainerStatus(cnTruncIndex)\n\trequire.NoError(t, err)\n\tassert.Equal(t, cn, cStatus.Id)\n\n\tt.Logf(\"Start the container\")\n\trequire.NoError(t, runtimeService.StartContainer(cnTruncIndex))\n\tvar hasStopedContainer bool\n\tdefer func() {\n\t\t\/\/ The 2th StopPodSandbox will fail\n\t\tif !hasStopedContainer {\n\t\t\tassert.NoError(t, runtimeService.StopContainer(cnTruncIndex, 10))\n\t\t}\n\t}()\n\n\tt.Logf(\"Stats the container\")\n\tcStats, err := runtimeService.ContainerStats(cnTruncIndex)\n\trequire.NoError(t, err)\n\tassert.Equal(t, cn, cStats.Attributes.Id)\n\n\tt.Logf(\"Update container memory limit after started\")\n\terr = runtimeService.UpdateContainerResources(cnTruncIndex, &runtimeapi.LinuxContainerResources{\n\t\tMemoryLimitInBytes: 50 * 1024 * 1024,\n\t})\n\tassert.NoError(t, err)\n\n\tt.Logf(\"Execute cmd in container\")\n\texecReq := &runtimeapi.ExecRequest{\n\t\tContainerId: cnTruncIndex,\n\t\tCmd: []string{\"pwd\"},\n\t\tStdout: true,\n\t}\n\t_, err = runtimeService.Exec(execReq)\n\tassert.NoError(t, err)\n\n\tt.Logf(\"Execute cmd in container by sync\")\n\t_, _, err = runtimeService.ExecSync(cnTruncIndex, []string{\"pwd\"}, 10)\n\tassert.NoError(t, err)\n\n\t\/\/ TODO(yanxuean): add test case for ListContainers\n\n\tt.Logf(\"Get a non exist container status by truncindex\")\n\terr = runtimeService.StopContainer(cnTruncIndex, 10)\n\tassert.NoError(t, err)\n\tif err == nil {\n\t\thasStopedContainer = true\n\t}\n\t_, err = runtimeService.ContainerStats(cnTruncIndex)\n\tassert.Error(t, err)\n\tassert.NoError(t, runtimeService.RemoveContainer(cnTruncIndex))\n\t_, err = runtimeService.ContainerStatus(cnTruncIndex)\n\tassert.Error(t, err)\n\n\tt.Logf(\"Get a non exist sandbox status by truncindex\")\n\terr = runtimeService.StopPodSandbox(sbTruncIndex)\n\tassert.NoError(t, err)\n\tif err == nil {\n\t\thasStopedSandbox = true\n\t}\n\tassert.NoError(t, runtimeService.RemovePodSandbox(sbTruncIndex))\n\t_, err = runtimeService.PodSandboxStatus(sbTruncIndex)\n\tassert.Error(t, err)\n}\n<commit_msg>Fix typos.<commit_after>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage integration\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\truntimeapi \"k8s.io\/kubernetes\/pkg\/kubelet\/apis\/cri\/v1alpha1\/runtime\"\n)\n\nfunc genTruncIndex(normalName string) string {\n\treturn normalName[:(len(normalName)+1)\/2]\n}\n\nfunc TestTruncIndex(t *testing.T) {\n\tt.Logf(\"Pull an image\")\n\tconst appImage = \"busybox\"\n\timgID, err := imageService.PullImage(&runtimeapi.ImageSpec{Image: appImage}, nil)\n\trequire.NoError(t, err)\n\timgTruncId := genTruncIndex(imgID)\n\tdefer func() {\n\t\tassert.NoError(t, imageService.RemoveImage(&runtimeapi.ImageSpec{Image: imgTruncId}))\n\t}()\n\n\tt.Logf(\"Get image status by truncindex, truncId: %s\", imgTruncId)\n\tres, err := imageService.ImageStatus(&runtimeapi.ImageSpec{Image: imgTruncId})\n\trequire.NoError(t, err)\n\trequire.NotEqual(t, nil, res)\n\tassert.Equal(t, imgID, res.Id)\n\n\t\/\/ TODO(yanxuean): for failure test case where there are two images with the same truncindex.\n\t\/\/ if you add n images at least two will share the same leading digit.\n\t\/\/ \"sha256:n\" where n is the a number from 0-9 where two images have the same trunc,\n\t\/\/ for example sha256:9\n\t\/\/ https:\/\/github.com\/kubernetes-incubator\/cri-containerd\/pull\/352\n\t\/\/ I am thinking how I get the two image which have same trunc.\n\n\t\/\/ TODO(yanxuean): add test case for ListImages\n\n\tt.Logf(\"Create a sandbox\")\n\tsbConfig := PodSandboxConfig(\"sandbox\", \"truncindex\")\n\tsb, err := runtimeService.RunPodSandbox(sbConfig)\n\trequire.NoError(t, err)\n\tsbTruncIndex := genTruncIndex(sb)\n\tvar hasStoppedSandbox bool\n\tdefer func() {\n\t\t\/\/ The 2th StopPodSandbox will fail, the 2th RemovePodSandbox will success.\n\t\tif !hasStoppedSandbox {\n\t\t\tassert.NoError(t, runtimeService.StopPodSandbox(sbTruncIndex))\n\t\t}\n\t\tassert.NoError(t, runtimeService.RemovePodSandbox(sbTruncIndex))\n\t}()\n\n\tt.Logf(\"Get sandbox status by truncindex\")\n\tsbStatus, err := runtimeService.PodSandboxStatus(sbTruncIndex)\n\trequire.NoError(t, err)\n\tassert.Equal(t, sb, sbStatus.Id)\n\n\tt.Logf(\"Forward port for sandbox by truncindex\")\n\t_, err = runtimeService.PortForward(&runtimeapi.PortForwardRequest{sbTruncIndex, []int32{80}})\n\tassert.NoError(t, err)\n\n\t\/\/ TODO(yanxuean): add test case for ListPodSandbox\n\n\tt.Logf(\"Create a container\")\n\tcnConfig := ContainerConfig(\n\t\t\"containerTruncIndex\",\n\t\tappImage,\n\t\tWithCommand(\"top\"),\n\t)\n\tcn, err := runtimeService.CreateContainer(sbTruncIndex, cnConfig, sbConfig)\n\trequire.NoError(t, err)\n\tcnTruncIndex := genTruncIndex(cn)\n\tdefer func() {\n\t\t\/\/ the 2th RemovePodSandbox will success.\n\t\tassert.NoError(t, runtimeService.RemoveContainer(cnTruncIndex))\n\t}()\n\n\tt.Logf(\"Get container status by truncindex\")\n\tcStatus, err := runtimeService.ContainerStatus(cnTruncIndex)\n\trequire.NoError(t, err)\n\tassert.Equal(t, cn, cStatus.Id)\n\n\tt.Logf(\"Start the container\")\n\trequire.NoError(t, runtimeService.StartContainer(cnTruncIndex))\n\tvar hasStoppedContainer bool\n\tdefer func() {\n\t\t\/\/ The 2th StopPodSandbox will fail\n\t\tif !hasStoppedContainer {\n\t\t\tassert.NoError(t, runtimeService.StopContainer(cnTruncIndex, 10))\n\t\t}\n\t}()\n\n\tt.Logf(\"Stats the container\")\n\tcStats, err := runtimeService.ContainerStats(cnTruncIndex)\n\trequire.NoError(t, err)\n\tassert.Equal(t, cn, cStats.Attributes.Id)\n\n\tt.Logf(\"Update container memory limit after started\")\n\terr = runtimeService.UpdateContainerResources(cnTruncIndex, &runtimeapi.LinuxContainerResources{\n\t\tMemoryLimitInBytes: 50 * 1024 * 1024,\n\t})\n\tassert.NoError(t, err)\n\n\tt.Logf(\"Execute cmd in container\")\n\texecReq := &runtimeapi.ExecRequest{\n\t\tContainerId: cnTruncIndex,\n\t\tCmd: []string{\"pwd\"},\n\t\tStdout: true,\n\t}\n\t_, err = runtimeService.Exec(execReq)\n\tassert.NoError(t, err)\n\n\tt.Logf(\"Execute cmd in container by sync\")\n\t_, _, err = runtimeService.ExecSync(cnTruncIndex, []string{\"pwd\"}, 10)\n\tassert.NoError(t, err)\n\n\t\/\/ TODO(yanxuean): add test case for ListContainers\n\n\tt.Logf(\"Get a non exist container status by truncindex\")\n\terr = runtimeService.StopContainer(cnTruncIndex, 10)\n\tassert.NoError(t, err)\n\tif err == nil {\n\t\thasStoppedContainer = true\n\t}\n\t_, err = runtimeService.ContainerStats(cnTruncIndex)\n\tassert.Error(t, err)\n\tassert.NoError(t, runtimeService.RemoveContainer(cnTruncIndex))\n\t_, err = runtimeService.ContainerStatus(cnTruncIndex)\n\tassert.Error(t, err)\n\n\tt.Logf(\"Get a non exist sandbox status by truncindex\")\n\terr = runtimeService.StopPodSandbox(sbTruncIndex)\n\tassert.NoError(t, err)\n\tif err == nil {\n\t\thasStoppedSandbox = true\n\t}\n\tassert.NoError(t, runtimeService.RemovePodSandbox(sbTruncIndex))\n\t_, err = runtimeService.PodSandboxStatus(sbTruncIndex)\n\tassert.Error(t, err)\n}\n<|endoftext|>"} {"text":"<commit_before>package pow\n\nimport (\n\t\"fmt\"\n\t\"math\/big\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com\/ericlagergren\/decimal\/internal\/c\"\n)\n\nfunc TestBigTen(t *testing.T) {\n\terrc := make(chan error)\n\n\tvar wg sync.WaitGroup\n\tfor i := uint64(0); i < BigTabLen+10; i++ {\n\t\twg.Add(1)\n\t\tgo func(i uint64) {\n\t\t\tcomp := BigTen(i)\n\t\t\tn := new(big.Int).SetUint64(i)\n\t\t\tact := n.Exp(c.TenInt, n, nil)\n\t\t\tif act.Cmp(comp) != 0 {\n\t\t\t\tcs, as := comp.String(), act.String()\n\t\t\t\terrc <- fmt.Errorf(`%d:\ngot : (%d) %s\nwanted: (%d) %s\n`, i, len(cs), cs, len(as), as)\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\n\tdonec := make(chan struct{})\n\tgo func() {\n\t\twg.Wait()\n\t\tdonec <- struct{}{}\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase err := <-errc:\n\t\t\tt.Fatal(err.Error())\n\t\tcase <-donec:\n\t\t\treturn\n\t\t}\n\t}\n}\n<commit_msg>internal\/arith\/pow: limit number of workers to 1000 so we don't hit race's limit of 8192 goroutines<commit_after>package pow\n\nimport (\n\t\"fmt\"\n\t\"math\/big\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com\/ericlagergren\/decimal\/internal\/c\"\n)\n\nfunc TestBigTen(t *testing.T) {\n\terrc := make(chan error)\n\tworkc := make(chan struct{}, 1000)\n\tfor i := 0; i < 1000; i++ {\n\t\tworkc <- struct{}{}\n\t}\n\n\tvar wg sync.WaitGroup\n\tfor i := uint64(0); i < BigTabLen+10; i++ {\n\t\t<-workc\n\t\twg.Add(1)\n\t\tgo func(i uint64) {\n\t\t\tcomp := BigTen(i)\n\t\t\tn := new(big.Int).SetUint64(i)\n\t\t\tact := n.Exp(c.TenInt, n, nil)\n\t\t\tif act.Cmp(comp) != 0 {\n\t\t\t\tcs, as := comp.String(), act.String()\n\t\t\t\terrc <- fmt.Errorf(`%d:\ngot : (%d) %s\nwanted: (%d) %s\n`, i, len(cs), cs, len(as), as)\n\t\t\t}\n\t\t\twg.Done()\n\t\t\tworkc <- struct{}{}\n\t\t}(i)\n\t}\n\n\tdonec := make(chan struct{})\n\tgo func() {\n\t\twg.Wait()\n\t\tdonec <- struct{}{}\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase err := <-errc:\n\t\t\tt.Fatal(err.Error())\n\t\tcase <-donec:\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 The Go Cloud Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ contributebot is a service for keeping the Go Cloud project tidy.\npackage main\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"cloud.google.com\/go\/pubsub\"\n\t\"github.com\/google\/go-github\/github\"\n)\n\nconst userAgent = \"google\/go-cloud Contribute Bot\"\n\ntype flagConfig struct {\n\tproject string\n\tsubscription string\n\tgitHubAppID int64\n\tkeyPath string\n}\n\nfunc main() {\n\taddr := flag.String(\"listen\", \":8080\", \"address to listen for health checks\")\n\tvar cfg flagConfig\n\tflag.StringVar(&cfg.project, \"project\", \"\", \"GCP project for topic\")\n\tflag.StringVar(&cfg.subscription, \"subscription\", \"contributebot-github-events\", \"subscription name inside project\")\n\tflag.Int64Var(&cfg.gitHubAppID, \"github_app\", 0, \"GitHub application ID\")\n\tflag.StringVar(&cfg.keyPath, \"github_key\", \"\", \"path to GitHub application private key\")\n\tflag.Parse()\n\tif cfg.project == \"\" || cfg.gitHubAppID == 0 || cfg.keyPath == \"\" {\n\t\tfmt.Fprintln(os.Stderr, \"contributebot: must specify -project, -github_app, and -github_key\")\n\t\tos.Exit(2)\n\t}\n\n\tctx := context.Background()\n\tw, server, cleanup, err := setup(ctx, cfg)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer cleanup()\n\tlog.Printf(\"Serving health checks at %s\", *addr)\n\tgo server.ListenAndServe(*addr, w)\n\tlog.Fatal(w.receive(ctx))\n}\n\n\/\/ worker contains the connections used by this server.\ntype worker struct {\n\tsub *pubsub.Subscription\n\tauth *gitHubAppAuth\n\n\tmu sync.Mutex\n\tconfigCache map[repoKey]*repoConfigCacheEntry\n}\n\nfunc newWorker(sub *pubsub.Subscription, auth *gitHubAppAuth) *worker {\n\treturn &worker{\n\t\tsub: sub,\n\t\tauth: auth,\n\t\tconfigCache: make(map[repoKey]*repoConfigCacheEntry),\n\t}\n}\n\nconst configCacheTTL = 2 * time.Minute\n\ntype repoKey struct {\n\towner string\n\trepo string\n}\n\ntype repoConfigCacheEntry struct {\n\t\/\/ On initial placement in the cache, ready will be an open channel.\n\t\/\/ It will be closed once the entry has been fetched and the other\n\t\/\/ fields are thus safe to read.\n\tready <-chan struct{}\n\n\tconfig repoConfig\n\terr error\n\tfetched time.Time\n}\n\n\/\/ receive listens for events on its subscription and handles them.\nfunc (w *worker) receive(ctx context.Context) error {\n\treturn w.sub.Receive(ctx, func(ctx context.Context, msg *pubsub.Message) {\n\t\tid := msg.Attributes[\"X-GitHub-Delivery\"]\n\t\teventType := msg.Attributes[\"X-GitHub-Event\"]\n\t\tif eventType == \"integration_installation\" || eventType == \"integration_installation_repositories\" {\n\t\t\t\/\/ Deprecated event types. Ignore them in favor of supported ones.\n\t\t\tlog.Printf(\"Skipped event %s of deprecated type %s\", id, eventType)\n\t\t\tmsg.Ack()\n\t\t\treturn\n\t\t}\n\t\tevent, err := github.ParseWebHook(eventType, msg.Data)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Parsing %s event %s: %v\", eventType, id, err)\n\t\t\tmsg.Nack()\n\t\t\treturn\n\t\t}\n\t\tvar handleErr error\n\t\tswitch event := event.(type) {\n\t\tcase *github.IssuesEvent:\n\t\t\thandleErr = w.receiveIssueEvent(ctx, event)\n\t\tcase *github.PullRequestEvent:\n\t\t\thandleErr = w.receivePullRequestEvent(ctx, event)\n\t\tcase *github.PingEvent, *github.InstallationEvent, *github.CheckSuiteEvent:\n\t\t\t\/\/ No-op.\n\t\tdefault:\n\t\t\tlog.Printf(\"Unhandled webhook event type %s (%T) for %s\", eventType, event, id)\n\t\t\tmsg.Nack()\n\t\t\treturn\n\t\t}\n\t\tif handleErr != nil {\n\t\t\tlog.Printf(\"Failed processing %s event %s: %v\", eventType, id, handleErr)\n\t\t\tmsg.Nack()\n\t\t\treturn\n\t\t}\n\t\tmsg.Ack()\n\t\tlog.Printf(\"Processed %s event %s\", eventType, id)\n\t})\n}\n\nfunc (w *worker) receiveIssueEvent(ctx context.Context, e *github.IssuesEvent) error {\n\tclient := w.ghClient(e.GetInstallation().GetID())\n\n\t\/\/ Pull out the interesting data from the event.\n\tdata := &issueData{\n\t\tAction: e.GetAction(),\n\t\tOwner: e.GetRepo().GetOwner().GetLogin(),\n\t\tRepo: e.GetRepo().GetName(),\n\t\tIssue: e.GetIssue(),\n\t\tChange: e.GetChanges(),\n\t}\n\n\t\/\/ Fetch repository configuration.\n\tcfg, err := w.repoConfig(ctx, client, data.Owner, data.Repo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Refetch the issue in case the event data is stale.\n\tiss, _, err := client.Issues.Get(ctx, data.Owner, data.Repo, data.Issue.GetNumber())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata.Issue = iss\n\n\t\/\/ Process the issue, deciding what actions to take (if any).\n\tedits := processIssueEvent(cfg, data)\n\t\/\/ Execute the actions (if any).\n\treturn edits.Execute(ctx, client, data)\n}\n\nfunc (w *worker) receivePullRequestEvent(ctx context.Context, e *github.PullRequestEvent) error {\n\tclient := w.ghClient(e.GetInstallation().GetID())\n\n\t\/\/ Pull out the interesting data from the event.\n\tdata := &pullRequestData{\n\t\tAction: e.GetAction(),\n\t\tOwnerLogin: e.GetRepo().GetOwner().GetLogin(),\n\t\tRepo: e.GetRepo().GetName(),\n\t\tPullRequest: e.GetPullRequest(),\n\t\tChange: e.GetChanges(),\n\t}\n\n\t\/\/ Fetch repository configuration.\n\tcfg, err := w.repoConfig(ctx, client, data.OwnerLogin, data.Repo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Refetch the pull request in case the event data is stale.\n\tpr, _, err := client.PullRequests.Get(ctx, data.OwnerLogin, data.Repo, data.PullRequest.GetNumber())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata.PullRequest = pr\n\n\t\/\/ Process the pull request, deciding what actions to take (if any).\n\tedits := processPullRequestEvent(cfg, data)\n\t\/\/ Execute the actions (if any).\n\treturn edits.Execute(ctx, client, data)\n}\n\n\/\/ repoConfig fetches the parsed Contribute Bot configuration for the given\n\/\/ repository. The result may be cached.\nfunc (w *worker) repoConfig(ctx context.Context, client *github.Client, owner, repo string) (_ *repoConfig, err error) {\n\tcacheKey := repoKey{owner, repo}\n\tqueryTime := time.Now()\n\tw.mu.Lock()\n\tent := w.configCache[cacheKey]\n\tif ent != nil {\n\t\tselect {\n\t\tcase <-ent.ready:\n\t\t\t\/\/ Entry has been fully written. Check if we can use its results.\n\t\t\tif ent.err == nil && queryTime.Sub(ent.fetched) < configCacheTTL {\n\t\t\t\t\/\/ Cache hit; no processing necessary.\n\t\t\t\tw.mu.Unlock()\n\t\t\t\treturn &ent.config, nil\n\t\t\t}\n\t\tdefault:\n\t\t\t\/\/ Another goroutine is currently retrieving the configuration. Block on that result.\n\t\t\tw.mu.Unlock()\n\t\t\tselect {\n\t\t\tcase <-ent.ready:\n\t\t\t\tif ent.err != nil {\n\t\t\t\t\treturn nil, ent.err\n\t\t\t\t}\n\t\t\t\treturn &ent.config, nil\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn nil, fmt.Errorf(\"read repository %s\/%s config: %v\", owner, repo, ctx.Err())\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ Cache miss. Reserve the fetch work.\n\tdone := make(chan struct{})\n\tent = &repoConfigCacheEntry{\n\t\tready: done,\n\t\tconfig: *defaultRepoConfig(),\n\t}\n\tw.configCache[cacheKey] = ent\n\tw.mu.Unlock()\n\tdefer func() {\n\t\tent.fetched = time.Now()\n\t\tent.err = err \/\/ err is the named return value.\n\t\tclose(done)\n\t}()\n\n\t\/\/ Fetch the configuration from the repository.\n\tcontent, _, response, err := client.Repositories.GetContents(ctx, owner, repo, \".contributebot\", nil)\n\tif response != nil && response.StatusCode == http.StatusNotFound {\n\t\t\/\/ File not found. Use default configuration.\n\t\treturn &ent.config, nil\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"read repository %s\/%s config: %v\", owner, repo, err)\n\t}\n\tdata, err := content.GetContent()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"read repository %s\/%s config: %v\", owner, repo, err)\n\t}\n\tif err := json.Unmarshal([]byte(data), &ent.config); err != nil {\n\t\treturn nil, fmt.Errorf(\"read repository %s\/%s config: %v\", owner, repo, err)\n\t}\n\treturn &ent.config, nil\n}\n\n\/\/ isClosed tests whether a channel is closed without blocking.\nfunc isClosed(c <-chan struct{}) bool {\n\tselect {\n\tcase _, ok := <-c:\n\t\treturn !ok\n\tdefault:\n\t\treturn false\n\t}\n}\n\n\/\/ ghClient creates a GitHub client authenticated for the given installation.\nfunc (w *worker) ghClient(installID int64) *github.Client {\n\tc := github.NewClient(&http.Client{Transport: w.auth.forInstall(installID)})\n\tc.UserAgent = userAgent\n\treturn c\n}\n\n\/\/ ServeHTTP serves a page explaining that this port is only open for health checks.\nfunc (w *worker) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\tif req.URL.Path != \"\/\" {\n\t\thttp.NotFound(resp, req)\n\t\treturn\n\t}\n\tconst responseData = `<!DOCTYPE html>\n<title>Go Cloud Contribute Bot Worker<\/title>\n<h1>Go Cloud Contribute Bot Worker<\/h1>\n<p>This HTTP port is only open to serve health checks.<\/p>`\n\tresp.Header().Set(\"Content-Length\", fmt.Sprint(len(responseData)))\n\tresp.Header().Set(\"Content-Type\", \"text\/html; charset=utf-8\")\n\tio.WriteString(resp, responseData)\n}\n\nfunc (w *worker) CheckHealth() error {\n\treturn w.auth.CheckHealth()\n}\n<commit_msg>internal\/contributebot: ignore check run and push events (#995)<commit_after>\/\/ Copyright 2018 The Go Cloud Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ contributebot is a service for keeping the Go Cloud project tidy.\npackage main\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"cloud.google.com\/go\/pubsub\"\n\t\"github.com\/google\/go-github\/github\"\n)\n\nconst userAgent = \"google\/go-cloud Contribute Bot\"\n\ntype flagConfig struct {\n\tproject string\n\tsubscription string\n\tgitHubAppID int64\n\tkeyPath string\n}\n\nfunc main() {\n\taddr := flag.String(\"listen\", \":8080\", \"address to listen for health checks\")\n\tvar cfg flagConfig\n\tflag.StringVar(&cfg.project, \"project\", \"\", \"GCP project for topic\")\n\tflag.StringVar(&cfg.subscription, \"subscription\", \"contributebot-github-events\", \"subscription name inside project\")\n\tflag.Int64Var(&cfg.gitHubAppID, \"github_app\", 0, \"GitHub application ID\")\n\tflag.StringVar(&cfg.keyPath, \"github_key\", \"\", \"path to GitHub application private key\")\n\tflag.Parse()\n\tif cfg.project == \"\" || cfg.gitHubAppID == 0 || cfg.keyPath == \"\" {\n\t\tfmt.Fprintln(os.Stderr, \"contributebot: must specify -project, -github_app, and -github_key\")\n\t\tos.Exit(2)\n\t}\n\n\tctx := context.Background()\n\tw, server, cleanup, err := setup(ctx, cfg)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer cleanup()\n\tlog.Printf(\"Serving health checks at %s\", *addr)\n\tgo server.ListenAndServe(*addr, w)\n\tlog.Fatal(w.receive(ctx))\n}\n\n\/\/ worker contains the connections used by this server.\ntype worker struct {\n\tsub *pubsub.Subscription\n\tauth *gitHubAppAuth\n\n\tmu sync.Mutex\n\tconfigCache map[repoKey]*repoConfigCacheEntry\n}\n\nfunc newWorker(sub *pubsub.Subscription, auth *gitHubAppAuth) *worker {\n\treturn &worker{\n\t\tsub: sub,\n\t\tauth: auth,\n\t\tconfigCache: make(map[repoKey]*repoConfigCacheEntry),\n\t}\n}\n\nconst configCacheTTL = 2 * time.Minute\n\ntype repoKey struct {\n\towner string\n\trepo string\n}\n\ntype repoConfigCacheEntry struct {\n\t\/\/ On initial placement in the cache, ready will be an open channel.\n\t\/\/ It will be closed once the entry has been fetched and the other\n\t\/\/ fields are thus safe to read.\n\tready <-chan struct{}\n\n\tconfig repoConfig\n\terr error\n\tfetched time.Time\n}\n\n\/\/ receive listens for events on its subscription and handles them.\nfunc (w *worker) receive(ctx context.Context) error {\n\treturn w.sub.Receive(ctx, func(ctx context.Context, msg *pubsub.Message) {\n\t\tid := msg.Attributes[\"X-GitHub-Delivery\"]\n\t\teventType := msg.Attributes[\"X-GitHub-Event\"]\n\t\tif eventType == \"integration_installation\" || eventType == \"integration_installation_repositories\" {\n\t\t\t\/\/ Deprecated event types. Ignore them in favor of supported ones.\n\t\t\tlog.Printf(\"Skipped event %s of deprecated type %s\", id, eventType)\n\t\t\tmsg.Ack()\n\t\t\treturn\n\t\t}\n\t\tevent, err := github.ParseWebHook(eventType, msg.Data)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Parsing %s event %s: %v\", eventType, id, err)\n\t\t\tmsg.Nack()\n\t\t\treturn\n\t\t}\n\t\tvar handleErr error\n\t\tswitch event := event.(type) {\n\t\tcase *github.IssuesEvent:\n\t\t\thandleErr = w.receiveIssueEvent(ctx, event)\n\t\tcase *github.PullRequestEvent:\n\t\t\thandleErr = w.receivePullRequestEvent(ctx, event)\n\t\tcase *github.PingEvent, *github.InstallationEvent, *github.CheckRunEvent, *github.CheckSuiteEvent, *github.PushEvent:\n\t\t\t\/\/ No-op.\n\t\tdefault:\n\t\t\tlog.Printf(\"Unhandled webhook event type %s (%T) for %s\", eventType, event, id)\n\t\t\tmsg.Nack()\n\t\t\treturn\n\t\t}\n\t\tif handleErr != nil {\n\t\t\tlog.Printf(\"Failed processing %s event %s: %v\", eventType, id, handleErr)\n\t\t\tmsg.Nack()\n\t\t\treturn\n\t\t}\n\t\tmsg.Ack()\n\t\tlog.Printf(\"Processed %s event %s\", eventType, id)\n\t})\n}\n\nfunc (w *worker) receiveIssueEvent(ctx context.Context, e *github.IssuesEvent) error {\n\tclient := w.ghClient(e.GetInstallation().GetID())\n\n\t\/\/ Pull out the interesting data from the event.\n\tdata := &issueData{\n\t\tAction: e.GetAction(),\n\t\tOwner: e.GetRepo().GetOwner().GetLogin(),\n\t\tRepo: e.GetRepo().GetName(),\n\t\tIssue: e.GetIssue(),\n\t\tChange: e.GetChanges(),\n\t}\n\n\t\/\/ Fetch repository configuration.\n\tcfg, err := w.repoConfig(ctx, client, data.Owner, data.Repo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Refetch the issue in case the event data is stale.\n\tiss, _, err := client.Issues.Get(ctx, data.Owner, data.Repo, data.Issue.GetNumber())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata.Issue = iss\n\n\t\/\/ Process the issue, deciding what actions to take (if any).\n\tedits := processIssueEvent(cfg, data)\n\t\/\/ Execute the actions (if any).\n\treturn edits.Execute(ctx, client, data)\n}\n\nfunc (w *worker) receivePullRequestEvent(ctx context.Context, e *github.PullRequestEvent) error {\n\tclient := w.ghClient(e.GetInstallation().GetID())\n\n\t\/\/ Pull out the interesting data from the event.\n\tdata := &pullRequestData{\n\t\tAction: e.GetAction(),\n\t\tOwnerLogin: e.GetRepo().GetOwner().GetLogin(),\n\t\tRepo: e.GetRepo().GetName(),\n\t\tPullRequest: e.GetPullRequest(),\n\t\tChange: e.GetChanges(),\n\t}\n\n\t\/\/ Fetch repository configuration.\n\tcfg, err := w.repoConfig(ctx, client, data.OwnerLogin, data.Repo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Refetch the pull request in case the event data is stale.\n\tpr, _, err := client.PullRequests.Get(ctx, data.OwnerLogin, data.Repo, data.PullRequest.GetNumber())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata.PullRequest = pr\n\n\t\/\/ Process the pull request, deciding what actions to take (if any).\n\tedits := processPullRequestEvent(cfg, data)\n\t\/\/ Execute the actions (if any).\n\treturn edits.Execute(ctx, client, data)\n}\n\n\/\/ repoConfig fetches the parsed Contribute Bot configuration for the given\n\/\/ repository. The result may be cached.\nfunc (w *worker) repoConfig(ctx context.Context, client *github.Client, owner, repo string) (_ *repoConfig, err error) {\n\tcacheKey := repoKey{owner, repo}\n\tqueryTime := time.Now()\n\tw.mu.Lock()\n\tent := w.configCache[cacheKey]\n\tif ent != nil {\n\t\tselect {\n\t\tcase <-ent.ready:\n\t\t\t\/\/ Entry has been fully written. Check if we can use its results.\n\t\t\tif ent.err == nil && queryTime.Sub(ent.fetched) < configCacheTTL {\n\t\t\t\t\/\/ Cache hit; no processing necessary.\n\t\t\t\tw.mu.Unlock()\n\t\t\t\treturn &ent.config, nil\n\t\t\t}\n\t\tdefault:\n\t\t\t\/\/ Another goroutine is currently retrieving the configuration. Block on that result.\n\t\t\tw.mu.Unlock()\n\t\t\tselect {\n\t\t\tcase <-ent.ready:\n\t\t\t\tif ent.err != nil {\n\t\t\t\t\treturn nil, ent.err\n\t\t\t\t}\n\t\t\t\treturn &ent.config, nil\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn nil, fmt.Errorf(\"read repository %s\/%s config: %v\", owner, repo, ctx.Err())\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ Cache miss. Reserve the fetch work.\n\tdone := make(chan struct{})\n\tent = &repoConfigCacheEntry{\n\t\tready: done,\n\t\tconfig: *defaultRepoConfig(),\n\t}\n\tw.configCache[cacheKey] = ent\n\tw.mu.Unlock()\n\tdefer func() {\n\t\tent.fetched = time.Now()\n\t\tent.err = err \/\/ err is the named return value.\n\t\tclose(done)\n\t}()\n\n\t\/\/ Fetch the configuration from the repository.\n\tcontent, _, response, err := client.Repositories.GetContents(ctx, owner, repo, \".contributebot\", nil)\n\tif response != nil && response.StatusCode == http.StatusNotFound {\n\t\t\/\/ File not found. Use default configuration.\n\t\treturn &ent.config, nil\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"read repository %s\/%s config: %v\", owner, repo, err)\n\t}\n\tdata, err := content.GetContent()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"read repository %s\/%s config: %v\", owner, repo, err)\n\t}\n\tif err := json.Unmarshal([]byte(data), &ent.config); err != nil {\n\t\treturn nil, fmt.Errorf(\"read repository %s\/%s config: %v\", owner, repo, err)\n\t}\n\treturn &ent.config, nil\n}\n\n\/\/ isClosed tests whether a channel is closed without blocking.\nfunc isClosed(c <-chan struct{}) bool {\n\tselect {\n\tcase _, ok := <-c:\n\t\treturn !ok\n\tdefault:\n\t\treturn false\n\t}\n}\n\n\/\/ ghClient creates a GitHub client authenticated for the given installation.\nfunc (w *worker) ghClient(installID int64) *github.Client {\n\tc := github.NewClient(&http.Client{Transport: w.auth.forInstall(installID)})\n\tc.UserAgent = userAgent\n\treturn c\n}\n\n\/\/ ServeHTTP serves a page explaining that this port is only open for health checks.\nfunc (w *worker) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\tif req.URL.Path != \"\/\" {\n\t\thttp.NotFound(resp, req)\n\t\treturn\n\t}\n\tconst responseData = `<!DOCTYPE html>\n<title>Go Cloud Contribute Bot Worker<\/title>\n<h1>Go Cloud Contribute Bot Worker<\/h1>\n<p>This HTTP port is only open to serve health checks.<\/p>`\n\tresp.Header().Set(\"Content-Length\", fmt.Sprint(len(responseData)))\n\tresp.Header().Set(\"Content-Type\", \"text\/html; charset=utf-8\")\n\tio.WriteString(resp, responseData)\n}\n\nfunc (w *worker) CheckHealth() error {\n\treturn w.auth.CheckHealth()\n}\n<|endoftext|>"} {"text":"<commit_before>package mount\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/net\/webdav\"\n\n\t\"github.com\/kopia\/kopia\/fs\"\n\t\"github.com\/kopia\/kopia\/internal\/webdavmount\"\n)\n\nfunc webdavServerLogger(r *http.Request, err error) {\n\tvar maybeRange string\n\tif r := r.Header.Get(\"Range\"); r != \"\" {\n\t\tmaybeRange = \" \" + r\n\t}\n\n\tif err != nil {\n\t\tlog(r.Context()).Warningf(\"%v %v%v err: %v\\n\", r.Method, r.URL.RequestURI(), maybeRange, err)\n\t} else {\n\t\tlog(r.Context()).Debugf(\"%v %v%v OK\\n\", r.Method, r.URL.RequestURI(), maybeRange)\n\t}\n}\n\n\/\/ DirectoryWebDAV exposes the provided filesystem directory via WebDAV on a random port on localhost\n\/\/ and returns a controller.\nfunc DirectoryWebDAV(ctx context.Context, entry fs.Directory) (Controller, error) {\n\tlog(ctx).Debugf(\"creating webdav server...\")\n\n\tmux := http.NewServeMux()\n\n\tvar logger func(r *http.Request, err error)\n\n\tif os.Getenv(\"WEBDAV_LOG_REQUESTS\") != \"\" {\n\t\tlogger = webdavServerLogger\n\t}\n\n\tmux.Handle(\"\/\", &webdav.Handler{\n\t\tFileSystem: webdavmount.WebDAVFS(entry),\n\t\tLockSystem: webdav.NewMemLS(),\n\t\tLogger: logger,\n\t})\n\n\tl, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"listen error\")\n\t}\n\n\tsrv := &http.Server{\n\t\tHandler: mux,\n\t}\n\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\tlog(ctx).Debugf(\"web server finished with %v\", srv.Serve(l))\n\t}()\n\n\treturn webdavController{\"http:\/\/\" + l.Addr().String(), srv, done}, nil\n}\n\ntype webdavController struct {\n\twebdavURL string\n\ts *http.Server\n\tdone chan struct{}\n}\n\nfunc (c webdavController) Unmount(ctx context.Context) error {\n\treturn c.s.Shutdown(ctx)\n}\n\nfunc (c webdavController) MountPath() string {\n\treturn c.webdavURL\n}\n\nfunc (c webdavController) Done() <-chan struct{} {\n\treturn c.done\n}\n<commit_msg>Fix hang at unmount when Ctrl-C is pressed (#754)<commit_after>package mount\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/net\/webdav\"\n\n\t\"github.com\/kopia\/kopia\/fs\"\n\t\"github.com\/kopia\/kopia\/internal\/webdavmount\"\n)\n\nfunc webdavServerLogger(r *http.Request, err error) {\n\tvar maybeRange string\n\tif r := r.Header.Get(\"Range\"); r != \"\" {\n\t\tmaybeRange = \" \" + r\n\t}\n\n\tif err != nil {\n\t\tlog(r.Context()).Warningf(\"%v %v%v err: %v\\n\", r.Method, r.URL.RequestURI(), maybeRange, err)\n\t} else {\n\t\tlog(r.Context()).Debugf(\"%v %v%v OK\\n\", r.Method, r.URL.RequestURI(), maybeRange)\n\t}\n}\n\n\/\/ DirectoryWebDAV exposes the provided filesystem directory via WebDAV on a random port on localhost\n\/\/ and returns a controller.\nfunc DirectoryWebDAV(ctx context.Context, entry fs.Directory) (Controller, error) {\n\tlog(ctx).Debugf(\"creating webdav server...\")\n\n\tmux := http.NewServeMux()\n\n\tvar logger func(r *http.Request, err error)\n\n\tif os.Getenv(\"WEBDAV_LOG_REQUESTS\") != \"\" {\n\t\tlogger = webdavServerLogger\n\t}\n\n\tmux.Handle(\"\/\", &webdav.Handler{\n\t\tFileSystem: webdavmount.WebDAVFS(entry),\n\t\tLockSystem: webdav.NewMemLS(),\n\t\tLogger: logger,\n\t})\n\n\tl, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"listen error\")\n\t}\n\n\tsrv := &http.Server{\n\t\tHandler: mux,\n\t}\n\n\tdone := make(chan struct{})\n\n\tsrv.RegisterOnShutdown(func() {\n\t\tclose(done)\n\t})\n\n\tgo func() {\n\t\tlog(ctx).Debugf(\"web server finished with %v\", srv.Serve(l))\n\t}()\n\n\treturn webdavController{\"http:\/\/\" + l.Addr().String(), srv, done}, nil\n}\n\ntype webdavController struct {\n\twebdavURL string\n\ts *http.Server\n\tdone chan struct{}\n}\n\nfunc (c webdavController) Unmount(ctx context.Context) error {\n\treturn c.s.Shutdown(ctx)\n}\n\nfunc (c webdavController) MountPath() string {\n\treturn c.webdavURL\n}\n\nfunc (c webdavController) Done() <-chan struct{} {\n\treturn c.done\n}\n<|endoftext|>"} {"text":"<commit_before>package paillier\n\nimport (\n\t\"crypto\/rand\"\n\t\"math\/big\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nvar MockGenerateSafePrimes = func() (*big.Int, *big.Int, error) {\n\treturn big.NewInt(887), big.NewInt(443), nil\n}\n\nfunc TestGenerateSafePrimesOfThresholdKeyGenerator(t *testing.T) {\n\ttkh := new(ThresholdKeyGenerator)\n\ttkh.publicKeyBitLength = 32\n\ttkh.Random = rand.Reader\n\tp, q, err := tkh.generateSafePrimes()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tAreSafePrimes(p, q, 16, t)\n}\n\nfunc TestInitPandP1(t *testing.T) {\n\ttkh := new(ThresholdKeyGenerator)\n\ttkh.publicKeyBitLength = 32\n\ttkh.Random = rand.Reader\n\n\ttkh.initPandP1()\n\tAreSafePrimes(tkh.p, tkh.p1, 16, t)\n}\n\nfunc TestInitQandQ1(t *testing.T) {\n\ttkh := new(ThresholdKeyGenerator)\n\ttkh.publicKeyBitLength = 32\n\ttkh.Random = rand.Reader\n\n\ttkh.initQandQ1()\n\tAreSafePrimes(tkh.q, tkh.q1, 16, t)\n}\n\nfunc TestInitPsAndQs(t *testing.T) {\n\ttkh := new(ThresholdKeyGenerator)\n\ttkh.publicKeyBitLength = 32\n\ttkh.Random = rand.Reader\n\n\ttkh.initPsAndQs()\n\n\tAreSafePrimes(tkh.p, tkh.p1, 16, t)\n\tAreSafePrimes(tkh.q, tkh.q1, 16, t)\n}\n\nfunc TestArePsAndQsGood(t *testing.T) {\n\ttkh := new(ThresholdKeyGenerator)\n\ttkh.p, tkh.p1, tkh.q, tkh.q1 = b(887), b(443), b(839), b(419)\n\tif !tkh.arePsAndQsGood() {\n\t\tt.Fail()\n\t}\n\n\ttkh.p, tkh.p1, tkh.q, tkh.q1 = b(887), b(443), b(887), b(443)\n\tif tkh.arePsAndQsGood() {\n\t\tt.Fail()\n\t}\n\n\ttkh.p, tkh.p1, tkh.q, tkh.q1 = b(887), b(443), b(443), b(221)\n\tif tkh.arePsAndQsGood() {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestInitShortcuts(t *testing.T) {\n\ttkh := new(ThresholdKeyGenerator)\n\ttkh.p, tkh.p1, tkh.q, tkh.q1 = b(839), b(419), b(887), b(443)\n\ttkh.initShortcuts()\n\n\tif !reflect.DeepEqual(tkh.n, b(744193)) {\n\t\tt.Error(\"wrong n\", tkh.n)\n\t}\n\tif !reflect.DeepEqual(tkh.m, b(185617)) {\n\t\tt.Error(\"wrong m\", tkh.m)\n\t}\n\tif !reflect.DeepEqual(tkh.nm, new(big.Int).Mul(b(744193), b(185617))) {\n\t\tt.Error(\"wrong nm\", tkh.nm)\n\t}\n\tif !reflect.DeepEqual(tkh.nSquare, new(big.Int).Mul(b(744193), b(744193))) {\n\t\tt.Error(\"wrong nSquare\", tkh.nSquare)\n\t}\n}\n\nfunc TestInitD(t *testing.T) {\n\ttkh := new(ThresholdKeyGenerator)\n\ttkh.p, tkh.p1, tkh.q, tkh.q1 = b(863), b(431), b(839), b(419)\n\ttkh.initShortcuts()\n\ttkh.initD()\n\tif n(tkh.d)%n(tkh.m) != 0 {\n\t\tt.Fail()\n\t}\n\tif n(tkh.d)%n(tkh.n) != 1 {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestInitNumerialValues(t *testing.T) {\n\ttkh := new(ThresholdKeyGenerator)\n\ttkh.publicKeyBitLength = 32\n\ttkh.Random = rand.Reader\n\n\tif err := tkh.initNumerialValues(); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestGenerateHidingPolynomial(t *testing.T) {\n\ttkh := new(ThresholdKeyGenerator)\n\ttkh.publicKeyBitLength = 32\n\ttkh.Threshold = 10\n\ttkh.Random = rand.Reader\n\tif err := tkh.initNumerialValues(); err != nil {\n\t\tt.Error(err)\n\t}\n\tif err := tkh.generateHidingPolynomial(); err != nil {\n\t\tt.Error(err)\n\t}\n\tp := tkh.polynomialCoefficients\n\tif len(p) != tkh.Threshold {\n\t\tt.Fail()\n\t}\n\tif n(p[0]) != n(tkh.d) {\n\t\tt.Fail()\n\t}\n\tfor i := 1; i < len(p); i++ {\n\t\tif j := n(p[i]); j < 0 || j >= n(tkh.nm) {\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n\nfunc TestComputeShare(t *testing.T) {\n\ttkh := new(ThresholdKeyGenerator)\n\ttkh.publicKeyBitLength = 32\n\ttkh.Threshold = 3\n\ttkh.TotalNumberOfDecryptionServers = 5\n\ttkh.nm = b(103)\n\ttkh.polynomialCoefficients = []*big.Int{b(29), b(88), b(51)}\n\tshare := tkh.computeShare(2)\n\tif n(share) != 31 {\n\t\tt.Error(\"error computing a share. \", share)\n\t}\n}\n\nfunc TestCreateShares(t *testing.T) {\n\ttkh := new(ThresholdKeyGenerator)\n\ttkh.publicKeyBitLength = 32\n\ttkh.Threshold = 10\n\ttkh.TotalNumberOfDecryptionServers = 100\n\ttkh.Random = rand.Reader\n\tif err := tkh.initNumerialValues(); err != nil {\n\t\tt.Error(err)\n\t}\n\tif err := tkh.generateHidingPolynomial(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif shares := tkh.createShares(); len(shares) != 100 {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestCreateViArray(t *testing.T) {\n\ttkh := new(ThresholdKeyGenerator)\n\ttkh.TotalNumberOfDecryptionServers = 10\n\ttkh.v = b(54)\n\ttkh.nSquare = b(101 * 101)\n\tvArr := tkh.createViArray([]*big.Int{b(12), b(90), b(103)})\n\texp := []*big.Int{b(6162), b(304), b(2728)}\n\tif !reflect.DeepEqual(vArr, exp) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestGetThresholdKeyGenerator(t *testing.T) {\n\ttkh := GetThresholdKeyGenerator(50, 10, 6, rand.Reader)\n\tif err := tkh.initNumerialValues(); err != nil {\n\t\tt.Error(nil)\n\t}\n}\n\nfunc TestGenerate(t *testing.T) {\n\ttkh := GetThresholdKeyGenerator(32, 10, 6, rand.Reader)\n\ttpks, err := tkh.Generate()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif len(tpks) != 10 {\n\t\tt.Fail()\n\t}\n\tfor i, tpk := range tpks {\n\t\tif tpk.Id != i+1 {\n\t\t\tt.Fail()\n\t\t}\n\t\tif len(tpk.Vi) != 10 {\n\t\t\tt.Fail()\n\t\t}\n\t\tif tpk.N == nil {\n\t\t\tt.Fail()\n\t\t}\n\t\tif tpk.Threshold != 6 || tpk.TotalNumberOfDecryptionServers != 10 {\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n\nfunc TestComputeV(t *testing.T) {\n\ttkh := GetThresholdKeyGenerator(32, 10, 6, rand.Reader)\n\ttkh.n = b(1907 * 1823)\n\ttkh.nSquare = new(big.Int).Mul(tkh.n, tkh.n)\n\tfor i := 0; i < 100; i++ {\n\t\tif err := tkh.computeV(); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tif tkh.v.Cmp(tkh.nSquare) > 0 {\n\t\t\tt.Error(\"v is too big\")\n\t\t}\n\t\tif tkh.v.Cmp(tkh.n) > 0 {\n\t\t\treturn\n\t\t}\n\t}\n\tt.Error(`v has never been bigger than n. It is suspicious in the sense<\n\tthan it was taken in the range 0...n**2 -1\n\t`)\n}\n<commit_msg>Use GenThresholdKeyGenerator whenever it's possible<commit_after>package paillier\n\nimport (\n\t\"crypto\/rand\"\n\t\"math\/big\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nvar MockGenerateSafePrimes = func() (*big.Int, *big.Int, error) {\n\treturn big.NewInt(887), big.NewInt(443), nil\n}\n\nfunc TestGenerateSafePrimesOfThresholdKeyGenerator(t *testing.T) {\n\ttkh := GetThresholdKeyGenerator(32, 4, 3, rand.Reader)\n\n\tp, q, err := tkh.generateSafePrimes()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tAreSafePrimes(p, q, 16, t)\n}\n\nfunc TestInitPandP1(t *testing.T) {\n\ttkh := GetThresholdKeyGenerator(32, 4, 3, rand.Reader)\n\n\ttkh.initPandP1()\n\tAreSafePrimes(tkh.p, tkh.p1, 16, t)\n}\n\nfunc TestInitQandQ1(t *testing.T) {\n\ttkh := GetThresholdKeyGenerator(32, 4, 3, rand.Reader)\n\n\ttkh.initQandQ1()\n\tAreSafePrimes(tkh.q, tkh.q1, 16, t)\n}\n\nfunc TestInitPsAndQs(t *testing.T) {\n\ttkh := GetThresholdKeyGenerator(32, 4, 3, rand.Reader)\n\n\ttkh.initPsAndQs()\n\n\tAreSafePrimes(tkh.p, tkh.p1, 16, t)\n\tAreSafePrimes(tkh.q, tkh.q1, 16, t)\n}\n\nfunc TestArePsAndQsGood(t *testing.T) {\n\ttkh := new(ThresholdKeyGenerator)\n\ttkh.p, tkh.p1, tkh.q, tkh.q1 = b(887), b(443), b(839), b(419)\n\tif !tkh.arePsAndQsGood() {\n\t\tt.Fail()\n\t}\n\n\ttkh.p, tkh.p1, tkh.q, tkh.q1 = b(887), b(443), b(887), b(443)\n\tif tkh.arePsAndQsGood() {\n\t\tt.Fail()\n\t}\n\n\ttkh.p, tkh.p1, tkh.q, tkh.q1 = b(887), b(443), b(443), b(221)\n\tif tkh.arePsAndQsGood() {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestInitShortcuts(t *testing.T) {\n\ttkh := new(ThresholdKeyGenerator)\n\ttkh.p, tkh.p1, tkh.q, tkh.q1 = b(839), b(419), b(887), b(443)\n\ttkh.initShortcuts()\n\n\tif !reflect.DeepEqual(tkh.n, b(744193)) {\n\t\tt.Error(\"wrong n\", tkh.n)\n\t}\n\tif !reflect.DeepEqual(tkh.m, b(185617)) {\n\t\tt.Error(\"wrong m\", tkh.m)\n\t}\n\tif !reflect.DeepEqual(tkh.nm, new(big.Int).Mul(b(744193), b(185617))) {\n\t\tt.Error(\"wrong nm\", tkh.nm)\n\t}\n\tif !reflect.DeepEqual(tkh.nSquare, new(big.Int).Mul(b(744193), b(744193))) {\n\t\tt.Error(\"wrong nSquare\", tkh.nSquare)\n\t}\n}\n\nfunc TestInitD(t *testing.T) {\n\ttkh := new(ThresholdKeyGenerator)\n\ttkh.p, tkh.p1, tkh.q, tkh.q1 = b(863), b(431), b(839), b(419)\n\ttkh.initShortcuts()\n\ttkh.initD()\n\tif n(tkh.d)%n(tkh.m) != 0 {\n\t\tt.Fail()\n\t}\n\tif n(tkh.d)%n(tkh.n) != 1 {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestInitNumerialValues(t *testing.T) {\n\ttkh := GetThresholdKeyGenerator(32, 4, 3, rand.Reader)\n\n\tif err := tkh.initNumerialValues(); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestGenerateHidingPolynomial(t *testing.T) {\n\ttkh := GetThresholdKeyGenerator(32, 15, 10, rand.Reader)\n\tif err := tkh.initNumerialValues(); err != nil {\n\t\tt.Error(err)\n\t}\n\tif err := tkh.generateHidingPolynomial(); err != nil {\n\t\tt.Error(err)\n\t}\n\tp := tkh.polynomialCoefficients\n\tif len(p) != tkh.Threshold {\n\t\tt.Fail()\n\t}\n\tif n(p[0]) != n(tkh.d) {\n\t\tt.Fail()\n\t}\n\tfor i := 1; i < len(p); i++ {\n\t\tif j := n(p[i]); j < 0 || j >= n(tkh.nm) {\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n\nfunc TestComputeShare(t *testing.T) {\n\ttkh := GetThresholdKeyGenerator(32, 5, 3, rand.Reader)\n\ttkh.nm = b(103)\n\ttkh.polynomialCoefficients = []*big.Int{b(29), b(88), b(51)}\n\tshare := tkh.computeShare(2)\n\tif n(share) != 31 {\n\t\tt.Error(\"error computing a share. \", share)\n\t}\n}\n\nfunc TestCreateShares(t *testing.T) {\n\ttkh := GetThresholdKeyGenerator(32, 100, 10, rand.Reader)\n\ttkh.Random = rand.Reader\n\tif err := tkh.initNumerialValues(); err != nil {\n\t\tt.Error(err)\n\t}\n\tif err := tkh.generateHidingPolynomial(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif shares := tkh.createShares(); len(shares) != 100 {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestCreateViArray(t *testing.T) {\n\ttkh := new(ThresholdKeyGenerator)\n\ttkh.TotalNumberOfDecryptionServers = 10\n\ttkh.v = b(54)\n\ttkh.nSquare = b(101 * 101)\n\tvArr := tkh.createViArray([]*big.Int{b(12), b(90), b(103)})\n\texp := []*big.Int{b(6162), b(304), b(2728)}\n\tif !reflect.DeepEqual(vArr, exp) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestGetThresholdKeyGenerator(t *testing.T) {\n\ttkh := GetThresholdKeyGenerator(50, 10, 6, rand.Reader)\n\tif err := tkh.initNumerialValues(); err != nil {\n\t\tt.Error(nil)\n\t}\n}\n\nfunc TestGenerate(t *testing.T) {\n\ttkh := GetThresholdKeyGenerator(32, 10, 6, rand.Reader)\n\ttpks, err := tkh.Generate()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif len(tpks) != 10 {\n\t\tt.Fail()\n\t}\n\tfor i, tpk := range tpks {\n\t\tif tpk.Id != i+1 {\n\t\t\tt.Fail()\n\t\t}\n\t\tif len(tpk.Vi) != 10 {\n\t\t\tt.Fail()\n\t\t}\n\t\tif tpk.N == nil {\n\t\t\tt.Fail()\n\t\t}\n\t\tif tpk.Threshold != 6 || tpk.TotalNumberOfDecryptionServers != 10 {\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n\nfunc TestComputeV(t *testing.T) {\n\ttkh := GetThresholdKeyGenerator(32, 10, 6, rand.Reader)\n\ttkh.n = b(1907 * 1823)\n\ttkh.nSquare = new(big.Int).Mul(tkh.n, tkh.n)\n\tfor i := 0; i < 100; i++ {\n\t\tif err := tkh.computeV(); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tif tkh.v.Cmp(tkh.nSquare) > 0 {\n\t\t\tt.Error(\"v is too big\")\n\t\t}\n\t\tif tkh.v.Cmp(tkh.n) > 0 {\n\t\t\treturn\n\t\t}\n\t}\n\tt.Error(`v has never been bigger than n. It is suspicious in the sense<\n\tthan it was taken in the range 0...n**2 -1\n\t`)\n}\n<|endoftext|>"} {"text":"<commit_before>package dns\n\nimport (\n\t\"github.com\/alekar\/route53\/src\/route53\"\n\t\"github.com\/crowdmob\/goamz\/aws\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Route53Provider struct {\n\tr53 *route53.Route53\n\tZone route53.HostedZone\n\tTTL uint\n}\n\nfunc (r *Route53Provider) createRecords(comment string, rrsets ...route53.RRSet) (error, chan error) {\n\tchanges := make([]route53.RRSetChange, len(rrsets))\n\tfor i, rrset := range rrsets {\n\t\tchanges[i] = route53.RRSetChange{\n\t\t\tAction: \"CREATE\",\n\t\t\tRRSet: rrset,\n\t\t}\n\t}\n\tinfo, err := r.r53.ChangeRRSet(r.Zone.Id, changes, comment)\n\tif err != nil {\n\t\treturn err, nil\n\t}\n\treturn nil, info.PollForSync(time.Second, 60*time.Second)\n}\n\nfunc (r *Route53Provider) baseRRSet(id, name, failover string) route53.RRSet {\n\trrset := route53.RRSet{\n\t\tName: name,\n\t\tType: \"A\",\n\t\tTTL: r.TTL,\n\t\tSetIdentifier: id,\n\t\tWeight: 0,\n\t}\n\tif failover == \"PRIMARY\" || failover == \"SECONDARY\" {\n\t\trrset.Failover = failover\n\t}\n\treturn rrset\n}\n\nfunc (r *Route53Provider) CreateAliases(comment string, aliases []Alias) (error, chan error) {\n\trrsets := make([]route53.RRSet, len(aliases))\n\tcount := 0\n\tfor _, alias := range aliases {\n\t\trrsets[count] = r.baseRRSet(alias.Id(), alias.Alias, alias.Failover)\n\t\trrsets[count].HostedZoneId = r.Zone.Id\n\t\trrsets[count].DNSName = alias.Original\n\t\trrsets[count].EvaluateTargetHealth = true\n\t\tcount++\n\t}\n\treturn r.createRecords(comment, rrsets...)\n}\n\nfunc (r *Route53Provider) CreateCNames(comment string, cnames []CName) (error, chan error) {\n\trrsets := make([]route53.RRSet, len(cnames))\n\tcount := 0\n\tfor _, cname := range cnames {\n\t\trrsets[count] = r.baseRRSet(cname.Id(), cname.CName, cname.Failover)\n\t\trrsets[count].Values = []string{cname.IP}\n\t\trrsets[count].HealthCheckId = cname.HealthCheckId\n\t\tcount++\n\t}\n\treturn r.createRecords(comment, rrsets...)\n}\n\nfunc (r *Route53Provider) DeleteRecords(comment string, ids ...string) (error, chan error) {\n\tif len(ids) == 0 {\n\t\terrChan := make(chan error)\n\t\tgo func(ch chan error) { \/\/ fake channel with nil error\n\t\t\tch <- nil\n\t\t}(errChan)\n\t\treturn nil, errChan\n\t}\n\t\/\/ fetch all records\n\trrsets, err := r.r53.ListRRSets(r.Zone.Id)\n\tif err != nil {\n\t\treturn err, nil\n\t}\n\t\/\/ create record map to make things easier\n\trrsetMap := map[string]route53.RRSet{}\n\tfor _, rrset := range rrsets {\n\t\trrsetMap[rrset.SetIdentifier] = rrset\n\t}\n\t\/\/ filter by id and delete\n\ttoDelete := []route53.RRSet{}\n\tfor _, id := range ids {\n\t\tif rrset, exists := rrsetMap[id]; exists {\n\t\t\ttoDelete = append(toDelete, rrset)\n\t\t}\n\t}\n\tchanges := make([]route53.RRSetChange, len(toDelete))\n\tfor i, rrset := range toDelete {\n\t\tchanges[i] = route53.RRSetChange{\n\t\t\tAction: \"DELETE\",\n\t\t\tRRSet: rrset,\n\t\t}\n\t}\n\tinfo, err := r.r53.ChangeRRSet(r.Zone.Id, changes, comment)\n\tif err != nil {\n\t\treturn err, nil\n\t}\n\treturn nil, info.PollForSync(time.Second, 60*time.Second)\n}\n\nfunc (r *Route53Provider) CreateHealthCheck(ip string, port uint16) (string, error) {\n\t\/\/ health check to make sure TCP 80 is reachable\n\tconfig := route53.HealthCheckConfig{\n\t\tIPAddress: ip,\n\t\tPort: port,\n\t\tType: \"TCP\",\n\t}\n\t\/\/ add health check for ip, return health check id\n\treturn r.r53.CreateHealthCheck(config, \"\")\n}\n\nfunc (r *Route53Provider) DeleteHealthCheck(id string) error {\n\treturn r.r53.DeleteHealthCheck(id)\n}\n\nfunc (r *Route53Provider) GetRecordsForIP(ip string) ([]string, error) {\n\trrsets, err := r.r53.ListRRSets(r.Zone.Id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tids := []string{}\n\tfor _, rrset := range rrsets {\n\t\tfor _, value := range rrset.Values {\n\t\t\tif value == ip {\n\t\t\t\tids = append(ids, rrset.SetIdentifier)\n\t\t\t}\n\t\t}\n\t}\n\treturn ids, nil\n}\n\nfunc (r *Route53Provider) Suffix() string {\n\treturn strings.TrimRight(r.Zone.Name, \".\")\n}\n\nfunc NewRoute53Provider(zoneId string, ttl uint) (*Route53Provider, error) {\n\troute53.DebugOn()\n\tauth, err := aws.GetAuth(\"\", \"\", \"\", time.Time{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr53 := route53.New(auth)\n\tzone, err := r53.GetHostedZone(zoneId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Route53Provider{r53: r53, Zone: zone, TTL: ttl}, nil\n}\n<commit_msg>Zone.Id returns weird shit<commit_after>package dns\n\nimport (\n\t\"github.com\/alekar\/route53\/src\/route53\"\n\t\"github.com\/crowdmob\/goamz\/aws\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Route53Provider struct {\n\tr53 *route53.Route53\n\tZone route53.HostedZone\n\tZoneId string\n\tTTL uint\n}\n\nfunc (r *Route53Provider) createRecords(comment string, rrsets ...route53.RRSet) (error, chan error) {\n\tchanges := make([]route53.RRSetChange, len(rrsets))\n\tfor i, rrset := range rrsets {\n\t\tchanges[i] = route53.RRSetChange{\n\t\t\tAction: \"CREATE\",\n\t\t\tRRSet: rrset,\n\t\t}\n\t}\n\tinfo, err := r.r53.ChangeRRSet(r.ZoneId, changes, comment)\n\tif err != nil {\n\t\treturn err, nil\n\t}\n\treturn nil, info.PollForSync(time.Second, 60*time.Second)\n}\n\nfunc (r *Route53Provider) baseRRSet(id, name, failover string) route53.RRSet {\n\trrset := route53.RRSet{\n\t\tName: name,\n\t\tType: \"A\",\n\t\tTTL: r.TTL,\n\t\tSetIdentifier: id,\n\t\tWeight: 0,\n\t}\n\tif failover == \"PRIMARY\" || failover == \"SECONDARY\" {\n\t\trrset.Failover = failover\n\t}\n\treturn rrset\n}\n\nfunc (r *Route53Provider) CreateAliases(comment string, aliases []Alias) (error, chan error) {\n\trrsets := make([]route53.RRSet, len(aliases))\n\tcount := 0\n\tfor _, alias := range aliases {\n\t\trrsets[count] = r.baseRRSet(alias.Id(), alias.Alias, alias.Failover)\n\t\trrsets[count].HostedZoneId = r.ZoneId\n\t\trrsets[count].DNSName = alias.Original\n\t\trrsets[count].EvaluateTargetHealth = true\n\t\tcount++\n\t}\n\treturn r.createRecords(comment, rrsets...)\n}\n\nfunc (r *Route53Provider) CreateCNames(comment string, cnames []CName) (error, chan error) {\n\trrsets := make([]route53.RRSet, len(cnames))\n\tcount := 0\n\tfor _, cname := range cnames {\n\t\trrsets[count] = r.baseRRSet(cname.Id(), cname.CName, cname.Failover)\n\t\trrsets[count].Values = []string{cname.IP}\n\t\trrsets[count].HealthCheckId = cname.HealthCheckId\n\t\tcount++\n\t}\n\treturn r.createRecords(comment, rrsets...)\n}\n\nfunc (r *Route53Provider) DeleteRecords(comment string, ids ...string) (error, chan error) {\n\tif len(ids) == 0 {\n\t\terrChan := make(chan error)\n\t\tgo func(ch chan error) { \/\/ fake channel with nil error\n\t\t\tch <- nil\n\t\t}(errChan)\n\t\treturn nil, errChan\n\t}\n\t\/\/ fetch all records\n\trrsets, err := r.r53.ListRRSets(r.ZoneId)\n\tif err != nil {\n\t\treturn err, nil\n\t}\n\t\/\/ create record map to make things easier\n\trrsetMap := map[string]route53.RRSet{}\n\tfor _, rrset := range rrsets {\n\t\trrsetMap[rrset.SetIdentifier] = rrset\n\t}\n\t\/\/ filter by id and delete\n\ttoDelete := []route53.RRSet{}\n\tfor _, id := range ids {\n\t\tif rrset, exists := rrsetMap[id]; exists {\n\t\t\ttoDelete = append(toDelete, rrset)\n\t\t}\n\t}\n\tchanges := make([]route53.RRSetChange, len(toDelete))\n\tfor i, rrset := range toDelete {\n\t\tchanges[i] = route53.RRSetChange{\n\t\t\tAction: \"DELETE\",\n\t\t\tRRSet: rrset,\n\t\t}\n\t}\n\tinfo, err := r.r53.ChangeRRSet(r.ZoneId, changes, comment)\n\tif err != nil {\n\t\treturn err, nil\n\t}\n\treturn nil, info.PollForSync(time.Second, 60*time.Second)\n}\n\nfunc (r *Route53Provider) CreateHealthCheck(ip string, port uint16) (string, error) {\n\t\/\/ health check to make sure TCP 80 is reachable\n\tconfig := route53.HealthCheckConfig{\n\t\tIPAddress: ip,\n\t\tPort: port,\n\t\tType: \"TCP\",\n\t}\n\t\/\/ add health check for ip, return health check id\n\treturn r.r53.CreateHealthCheck(config, \"\")\n}\n\nfunc (r *Route53Provider) DeleteHealthCheck(id string) error {\n\treturn r.r53.DeleteHealthCheck(id)\n}\n\nfunc (r *Route53Provider) GetRecordsForIP(ip string) ([]string, error) {\n\trrsets, err := r.r53.ListRRSets(r.ZoneId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tids := []string{}\n\tfor _, rrset := range rrsets {\n\t\tfor _, value := range rrset.Values {\n\t\t\tif value == ip {\n\t\t\t\tids = append(ids, rrset.SetIdentifier)\n\t\t\t}\n\t\t}\n\t}\n\treturn ids, nil\n}\n\nfunc (r *Route53Provider) Suffix() string {\n\treturn strings.TrimRight(r.Zone.Name, \".\")\n}\n\nfunc NewRoute53Provider(zoneId string, ttl uint) (*Route53Provider, error) {\n\troute53.DebugOn()\n\tauth, err := aws.GetAuth(\"\", \"\", \"\", time.Time{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr53 := route53.New(auth)\n\tzone, err := r53.GetHostedZone(zoneId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Route53Provider{r53: r53, Zone: zone, ZoneId: zoneId, TTL: ttl}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016, RadiantBlue Technologies, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage workflow\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/venicegeo\/pz-gocommon\/elasticsearch\"\n\t\"github.com\/venicegeo\/pz-gocommon\/gocommon\"\n)\n\ntype EventDB struct {\n\t*ResourceDB\n}\n\nfunc NewEventDB(service *WorkflowService, esi elasticsearch.IIndex) (*EventDB, error) {\n\trdb, err := NewResourceDB(service, esi, EventIndexSettings)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terdb := EventDB{ResourceDB: rdb}\n\treturn &erdb, nil\n}\n\nfunc (db *EventDB) PostData(mapping string, obj interface{}, id piazza.Ident) (piazza.Ident, error) {\n\tvar event Event\n\tok1 := false\n\tevent, ok1 = obj.(Event)\n\tif !ok1 {\n\t\ttemp, ok2 := obj.(*Event)\n\t\tif !ok2 {\n\t\t\treturn piazza.NoIdent, LoggedError(\"EventDB.PostData failed: was not given an Event\")\n\t\t}\n\t\tevent = *temp\n\t}\n\n\terr := verifyEventReadyToPost(&event, db)\n\tif err != nil {\n\t\treturn piazza.NoIdent, err\n\t}\n\n\tindexResult, err := db.Esi.PostData(mapping, id.String(), obj)\n\tif err != nil {\n\t\treturn piazza.NoIdent, LoggedError(\"EventDB.PostData failed: %s\", err)\n\t}\n\tif !indexResult.Created {\n\t\treturn piazza.NoIdent, LoggedError(\"EventDB.PostData failed: not created\")\n\t}\n\n\treturn id, nil\n}\n\nfunc verifyEventReadyToPost(event *Event, db *EventDB) error {\n\teventTypeJson := db.service.GetEventType(event.EventTypeId)\n\teventTypeObj := eventTypeJson.Data\n\teventType, ok := eventTypeObj.(*EventType)\n\tif !ok {\n\t\treturn LoggedError(\"EventDB.PostData failed: unable to obtain specified eventtype\")\n\t}\n\teventTypeMappingVars, err := piazza.GetVarsFromStruct(eventType.Mapping)\n\tif err != nil {\n\t\treturn LoggedError(\"EventDB.PostData failed: %s\", err)\n\t}\n\teventDataVars, err := piazza.GetVarsFromStruct(event.Data)\n\tif err != nil {\n\t\treturn LoggedError(\"EventDB.PostData failed: %s\", err)\n\t}\n\tnotFound := []string{}\n\tfor k, v := range eventTypeMappingVars {\n\t\tfound := false\n\t\tfor k2, v2 := range eventDataVars {\n\t\t\tif k2 == k {\n\t\t\t\tfound = true\n\t\t\t\tif !elasticsearch.IsValidArrayTypeMapping(v) {\n\t\t\t\t\tif piazza.ValueIsValidArray(v2) {\n\t\t\t\t\t\treturn LoggedError(\"EventDB.PostData failed: an array was passed into the non-array field %s\", k)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif !piazza.ValueIsValidArray(v2) {\n\t\t\t\t\t\treturn LoggedError(\"EventDB.PostData failed: a non-array was pasted into the array field %s\", k)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tnotFound = append(notFound, k)\n\t\t}\n\t}\n\tif len(notFound) > 0 {\n\t\treturn LoggedError(\"EventDB.PostData failed: the variables %s were specified in the EventType but were not found in the Event\", notFound)\n\t}\n\treturn nil\n}\n\nfunc (db *EventDB) GetAll(mapping string, format *piazza.JsonPagination) ([]Event, int64, error) {\n\tevents := []Event{}\n\tvar err error\n\n\texists := true\n\tif mapping != \"\" {\n\t\texists, err = db.Esi.TypeExists(mapping)\n\t\tif err != nil {\n\t\t\treturn events, 0, err\n\t\t}\n\t}\n\tif !exists {\n\t\treturn nil, 0, fmt.Errorf(\"Type %s does not exist\", mapping)\n\t}\n\n\tsearchResult, err := db.Esi.FilterByMatchAll(mapping, format)\n\tif err != nil {\n\t\treturn nil, 0, LoggedError(\"EventDB.GetAll failed: %s\", err)\n\t}\n\tif searchResult == nil {\n\t\treturn nil, 0, LoggedError(\"EventDB.GetAll failed: no searchResult\")\n\t}\n\n\tif searchResult != nil && searchResult.GetHits() != nil {\n\t\tfor _, hit := range *searchResult.GetHits() {\n\t\t\tvar event Event\n\t\t\terr := json.Unmarshal(*hit.Source, &event)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, 0, err\n\t\t\t}\n\t\t\tevents = append(events, event)\n\t\t}\n\t}\n\n\treturn events, searchResult.TotalHits(), nil\n}\n\nfunc (db *EventDB) lookupEventTypeNameByEventID(id piazza.Ident) (string, error) {\n\tvar mapping string = \"\"\n\n\ttypes, err := db.Esi.GetTypes()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor _, typ := range types {\n\t\tok, err := db.Esi.ItemExists(typ, id.String())\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif ok {\n\t\t\tmapping = typ\n\t\t\tbreak\n\t\t}\n\t}\n\tif mapping == \"\" {\n\t\treturn \"\", LoggedError(\"EventDB.lookupEventTypeNameByEventID failed: [Item %s in index events does not exist]\", id.String())\n\t}\n\n\treturn mapping, nil\n}\n\n\/\/ NameExists checks if an EventType name exists.\n\/\/ This is easier to check in EventDB, as the mappings use the EventType.Name.\nfunc (db *EventDB) NameExists(name string) (bool, error) {\n\treturn db.Esi.TypeExists(name)\n}\n\nfunc (db *EventDB) GetOne(mapping string, id piazza.Ident) (*Event, bool, error) {\n\tgetResult, err := db.Esi.GetByID(mapping, id.String())\n\tif err != nil {\n\t\treturn nil, false, LoggedError(\"EventDB.GetOne failed: %s\", err)\n\t}\n\tif getResult == nil {\n\t\treturn nil, true, LoggedError(\"EventDB.GetOne failed: no getResult\")\n\t}\n\n\tsrc := getResult.Source\n\tvar event Event\n\terr = json.Unmarshal(*src, &event)\n\tif err != nil {\n\t\treturn nil, getResult.Found, err\n\t}\n\n\treturn &event, getResult.Found, nil\n}\n\nfunc (db *EventDB) DeleteByID(mapping string, id piazza.Ident) (bool, error) {\n\tdeleteResult, err := db.Esi.DeleteByID(mapping, string(id))\n\tif err != nil {\n\t\treturn deleteResult.Found, LoggedError(\"EventDB.DeleteById failed: %s\", err)\n\t}\n\tif deleteResult == nil {\n\t\treturn false, LoggedError(\"EventDB.DeleteById failed: no deleteResult\")\n\t}\n\n\treturn deleteResult.Found, nil\n}\n\nfunc (db *EventDB) AddMapping(name string, mapping map[string]interface{}) error {\n\tjsn, err := ConstructEventMappingSchema(name, mapping)\n\tif err != nil {\n\t\treturn LoggedError(\"EventDB.AddMapping failed: %s\", err)\n\t}\n\terr = db.Esi.SetMapping(name, jsn)\n\tif err != nil {\n\t\treturn LoggedError(\"EventDB.AddMapping SetMapping failed: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc ConstructEventMappingSchema(name string, mapping map[string]interface{}) (piazza.JsonString, error) {\n\tconst template string = `{\n\t\t\"%s\":{\n\t\t\t\"properties\":{\n\t\t\t\t\"data\":{\n\t\t\t\t\t\"dynamic\": \"strict\",\n\t\t\t\t\t\"properties\": %s\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}`\n\tesdsl, err := buildMapping(mapping)\n\tif err != nil {\n\t\treturn piazza.JsonString(\"\"), err\n\t}\n\tstrDsl, err := piazza.StructInterfaceToString(esdsl)\n\tif err != nil {\n\t\treturn piazza.JsonString(\"\"), err\n\t}\n\tjsn := fmt.Sprintf(template, name, strDsl)\n\treturn piazza.JsonString(jsn), nil\n}\n\nfunc buildMapping(input map[string]interface{}) (map[string]interface{}, error) {\n\treturn visitNode(input)\n}\nfunc visitNode(inputObj map[string]interface{}) (map[string]interface{}, error) {\n\toutputObj := map[string]interface{}{}\n\tfor k, v := range inputObj {\n\t\tswitch t := v.(type) {\n\t\tcase string:\n\t\t\ttree, err := visitLeaf(k, v)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\toutputObj[k] = tree\n\t\tcase map[string]interface{}:\n\t\t\ttree, err := visitTree(k, v.(map[string]interface{}))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\toutputObj[k] = tree\n\t\tdefault:\n\t\t\treturn nil, LoggedError(\"EventDB.ConstructEventMappingSchema failed: unexpected type %T\", t)\n\t\t}\n\t}\n\treturn outputObj, nil\n}\nfunc visitTree(k string, v map[string]interface{}) (map[string]interface{}, error) {\n\tsubtree, err := visitNode(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twrapperTree := map[string]interface{}{}\n\twrapperTree[\"dynamic\"] = \"strict\"\n\twrapperTree[\"properties\"] = subtree\n\treturn wrapperTree, nil\n}\nfunc visitLeaf(k string, v interface{}) (map[string]interface{}, error) {\n\tif !elasticsearch.IsValidMappingType(v) {\n\t\treturn nil, LoggedError(\"EventDB.ConstructEventMappingSchema failed: %s was not recognized as a valid mapping type\")\n\t}\n\tif elasticsearch.IsValidArrayTypeMapping(v) {\n\t\tv = v.(string)[1 : len(v.(string))-1]\n\t}\n\ttree := map[string]interface{}{}\n\ttree[\"type\"] = v\n\treturn tree, nil\n}\n\nfunc (db *EventDB) PercolateEventData(eventType string, data map[string]interface{}, id piazza.Ident) (*[]piazza.Ident, error) {\n\tfixed := map[string]interface{}{}\n\tfixed[\"data\"] = data\n\tpercolateResponse, err := db.Esi.AddPercolationDocument(eventType, fixed)\n\n\tif err != nil {\n\t\treturn nil, LoggedError(\"EventDB.PercolateEventData failed: %s\", err)\n\t}\n\tif percolateResponse == nil {\n\t\treturn nil, LoggedError(\"EventDB.PercolateEventData failed: no percolateResult\")\n\t}\n\n\t\/\/ add the triggers to the alert queue\n\tids := make([]piazza.Ident, len(percolateResponse.Matches))\n\tfor i, v := range percolateResponse.Matches {\n\t\tids[i] = piazza.Ident(v.Id)\n\t}\n\n\treturn &ids, nil\n}\n<commit_msg>format typo<commit_after>\/\/ Copyright 2016, RadiantBlue Technologies, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage workflow\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/venicegeo\/pz-gocommon\/elasticsearch\"\n\t\"github.com\/venicegeo\/pz-gocommon\/gocommon\"\n)\n\ntype EventDB struct {\n\t*ResourceDB\n}\n\nfunc NewEventDB(service *WorkflowService, esi elasticsearch.IIndex) (*EventDB, error) {\n\trdb, err := NewResourceDB(service, esi, EventIndexSettings)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terdb := EventDB{ResourceDB: rdb}\n\treturn &erdb, nil\n}\n\nfunc (db *EventDB) PostData(mapping string, obj interface{}, id piazza.Ident) (piazza.Ident, error) {\n\tvar event Event\n\tok1 := false\n\tevent, ok1 = obj.(Event)\n\tif !ok1 {\n\t\ttemp, ok2 := obj.(*Event)\n\t\tif !ok2 {\n\t\t\treturn piazza.NoIdent, LoggedError(\"EventDB.PostData failed: was not given an Event\")\n\t\t}\n\t\tevent = *temp\n\t}\n\n\terr := verifyEventReadyToPost(&event, db)\n\tif err != nil {\n\t\treturn piazza.NoIdent, err\n\t}\n\n\tindexResult, err := db.Esi.PostData(mapping, id.String(), obj)\n\tif err != nil {\n\t\treturn piazza.NoIdent, LoggedError(\"EventDB.PostData failed: %s\", err)\n\t}\n\tif !indexResult.Created {\n\t\treturn piazza.NoIdent, LoggedError(\"EventDB.PostData failed: not created\")\n\t}\n\n\treturn id, nil\n}\n\nfunc verifyEventReadyToPost(event *Event, db *EventDB) error {\n\teventTypeJson := db.service.GetEventType(event.EventTypeId)\n\teventTypeObj := eventTypeJson.Data\n\teventType, ok := eventTypeObj.(*EventType)\n\tif !ok {\n\t\treturn LoggedError(\"EventDB.PostData failed: unable to obtain specified eventtype\")\n\t}\n\teventTypeMappingVars, err := piazza.GetVarsFromStruct(eventType.Mapping)\n\tif err != nil {\n\t\treturn LoggedError(\"EventDB.PostData failed: %s\", err)\n\t}\n\teventDataVars, err := piazza.GetVarsFromStruct(event.Data)\n\tif err != nil {\n\t\treturn LoggedError(\"EventDB.PostData failed: %s\", err)\n\t}\n\tnotFound := []string{}\n\tfor k, v := range eventTypeMappingVars {\n\t\tfound := false\n\t\tfor k2, v2 := range eventDataVars {\n\t\t\tif k2 == k {\n\t\t\t\tfound = true\n\t\t\t\tif !elasticsearch.IsValidArrayTypeMapping(v) {\n\t\t\t\t\tif piazza.ValueIsValidArray(v2) {\n\t\t\t\t\t\treturn LoggedError(\"EventDB.PostData failed: an array was passed into the non-array field %s\", k)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif !piazza.ValueIsValidArray(v2) {\n\t\t\t\t\t\treturn LoggedError(\"EventDB.PostData failed: a non-array was pasted into the array field %s\", k)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tnotFound = append(notFound, k)\n\t\t}\n\t}\n\tif len(notFound) > 0 {\n\t\treturn LoggedError(\"EventDB.PostData failed: the variables %s were specified in the EventType but were not found in the Event\", notFound)\n\t}\n\treturn nil\n}\n\nfunc (db *EventDB) GetAll(mapping string, format *piazza.JsonPagination) ([]Event, int64, error) {\n\tevents := []Event{}\n\tvar err error\n\n\texists := true\n\tif mapping != \"\" {\n\t\texists, err = db.Esi.TypeExists(mapping)\n\t\tif err != nil {\n\t\t\treturn events, 0, err\n\t\t}\n\t}\n\tif !exists {\n\t\treturn nil, 0, fmt.Errorf(\"Type %s does not exist\", mapping)\n\t}\n\n\tsearchResult, err := db.Esi.FilterByMatchAll(mapping, format)\n\tif err != nil {\n\t\treturn nil, 0, LoggedError(\"EventDB.GetAll failed: %s\", err)\n\t}\n\tif searchResult == nil {\n\t\treturn nil, 0, LoggedError(\"EventDB.GetAll failed: no searchResult\")\n\t}\n\n\tif searchResult != nil && searchResult.GetHits() != nil {\n\t\tfor _, hit := range *searchResult.GetHits() {\n\t\t\tvar event Event\n\t\t\terr := json.Unmarshal(*hit.Source, &event)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, 0, err\n\t\t\t}\n\t\t\tevents = append(events, event)\n\t\t}\n\t}\n\n\treturn events, searchResult.TotalHits(), nil\n}\n\nfunc (db *EventDB) lookupEventTypeNameByEventID(id piazza.Ident) (string, error) {\n\tvar mapping string = \"\"\n\n\ttypes, err := db.Esi.GetTypes()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor _, typ := range types {\n\t\tok, err := db.Esi.ItemExists(typ, id.String())\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif ok {\n\t\t\tmapping = typ\n\t\t\tbreak\n\t\t}\n\t}\n\tif mapping == \"\" {\n\t\treturn \"\", LoggedError(\"EventDB.lookupEventTypeNameByEventID failed: [Item %s in index events does not exist]\", id.String())\n\t}\n\n\treturn mapping, nil\n}\n\n\/\/ NameExists checks if an EventType name exists.\n\/\/ This is easier to check in EventDB, as the mappings use the EventType.Name.\nfunc (db *EventDB) NameExists(name string) (bool, error) {\n\treturn db.Esi.TypeExists(name)\n}\n\nfunc (db *EventDB) GetOne(mapping string, id piazza.Ident) (*Event, bool, error) {\n\tgetResult, err := db.Esi.GetByID(mapping, id.String())\n\tif err != nil {\n\t\treturn nil, false, LoggedError(\"EventDB.GetOne failed: %s\", err)\n\t}\n\tif getResult == nil {\n\t\treturn nil, true, LoggedError(\"EventDB.GetOne failed: no getResult\")\n\t}\n\n\tsrc := getResult.Source\n\tvar event Event\n\terr = json.Unmarshal(*src, &event)\n\tif err != nil {\n\t\treturn nil, getResult.Found, err\n\t}\n\n\treturn &event, getResult.Found, nil\n}\n\nfunc (db *EventDB) DeleteByID(mapping string, id piazza.Ident) (bool, error) {\n\tdeleteResult, err := db.Esi.DeleteByID(mapping, string(id))\n\tif err != nil {\n\t\treturn deleteResult.Found, LoggedError(\"EventDB.DeleteById failed: %s\", err)\n\t}\n\tif deleteResult == nil {\n\t\treturn false, LoggedError(\"EventDB.DeleteById failed: no deleteResult\")\n\t}\n\n\treturn deleteResult.Found, nil\n}\n\nfunc (db *EventDB) AddMapping(name string, mapping map[string]interface{}) error {\n\tjsn, err := ConstructEventMappingSchema(name, mapping)\n\tif err != nil {\n\t\treturn LoggedError(\"EventDB.AddMapping failed: %s\", err)\n\t}\n\terr = db.Esi.SetMapping(name, jsn)\n\tif err != nil {\n\t\treturn LoggedError(\"EventDB.AddMapping SetMapping failed: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc ConstructEventMappingSchema(name string, mapping map[string]interface{}) (piazza.JsonString, error) {\n\tconst template string = `{\n\t\t\"%s\":{\n\t\t\t\"properties\":{\n\t\t\t\t\"data\":{\n\t\t\t\t\t\"dynamic\": \"strict\",\n\t\t\t\t\t\"properties\": %s\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}`\n\tesdsl, err := buildMapping(mapping)\n\tif err != nil {\n\t\treturn piazza.JsonString(\"\"), err\n\t}\n\tstrDsl, err := piazza.StructInterfaceToString(esdsl)\n\tif err != nil {\n\t\treturn piazza.JsonString(\"\"), err\n\t}\n\tjsn := fmt.Sprintf(template, name, strDsl)\n\treturn piazza.JsonString(jsn), nil\n}\n\nfunc buildMapping(input map[string]interface{}) (map[string]interface{}, error) {\n\treturn visitNode(input)\n}\nfunc visitNode(inputObj map[string]interface{}) (map[string]interface{}, error) {\n\toutputObj := map[string]interface{}{}\n\tfor k, v := range inputObj {\n\t\tswitch t := v.(type) {\n\t\tcase string:\n\t\t\ttree, err := visitLeaf(k, v)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\toutputObj[k] = tree\n\t\tcase map[string]interface{}:\n\t\t\ttree, err := visitTree(k, v.(map[string]interface{}))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\toutputObj[k] = tree\n\t\tdefault:\n\t\t\treturn nil, LoggedError(\"EventDB.ConstructEventMappingSchema failed: unexpected type %T\", t)\n\t\t}\n\t}\n\treturn outputObj, nil\n}\nfunc visitTree(k string, v map[string]interface{}) (map[string]interface{}, error) {\n\tsubtree, err := visitNode(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twrapperTree := map[string]interface{}{}\n\twrapperTree[\"dynamic\"] = \"strict\"\n\twrapperTree[\"properties\"] = subtree\n\treturn wrapperTree, nil\n}\nfunc visitLeaf(k string, v interface{}) (map[string]interface{}, error) {\n\tif !elasticsearch.IsValidMappingType(v) {\n\t\treturn nil, LoggedError(\"EventDB.ConstructEventMappingSchema failed: \\\"%#v\\\" was not recognized as a valid mapping type\", v)\n\t}\n\tif elasticsearch.IsValidArrayTypeMapping(v) {\n\t\tv = v.(string)[1 : len(v.(string))-1]\n\t}\n\ttree := map[string]interface{}{}\n\ttree[\"type\"] = v\n\treturn tree, nil\n}\n\nfunc (db *EventDB) PercolateEventData(eventType string, data map[string]interface{}, id piazza.Ident) (*[]piazza.Ident, error) {\n\tfixed := map[string]interface{}{}\n\tfixed[\"data\"] = data\n\tpercolateResponse, err := db.Esi.AddPercolationDocument(eventType, fixed)\n\n\tif err != nil {\n\t\treturn nil, LoggedError(\"EventDB.PercolateEventData failed: %s\", err)\n\t}\n\tif percolateResponse == nil {\n\t\treturn nil, LoggedError(\"EventDB.PercolateEventData failed: no percolateResult\")\n\t}\n\n\t\/\/ add the triggers to the alert queue\n\tids := make([]piazza.Ident, len(percolateResponse.Matches))\n\tfor i, v := range percolateResponse.Matches {\n\t\tids[i] = piazza.Ident(v.Id)\n\t}\n\n\treturn &ids, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/clientv3\"\n\t\"github.com\/coreos\/etcd\/pkg\/transport\"\n\t\"golang.org\/x\/net\/context\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/runtime\/serializer\/json\"\n\t\"k8s.io\/kubernetes\/pkg\/runtime\/serializer\/protobuf\"\n\n\t\/\/ install all APIs\n\t_ \"github.com\/openshift\/origin\/pkg\/api\/install\"\n\t_ \"github.com\/openshift\/origin\/pkg\/quota\/api\/install\"\n\t_ \"k8s.io\/kubernetes\/pkg\/api\/install\"\n)\n\nfunc main() {\n\tvar endpoint, keyFile, certFile, caFile string\n\tflag.StringVar(&endpoint, \"endpoint\", \"https:\/\/127.0.0.1:4001\", \"Etcd endpoint.\")\n\tflag.StringVar(&keyFile, \"key-file\", \"\", \"TLS client key.\")\n\tflag.StringVar(&certFile, \"cert-file\", \"\", \"TLS client certificate.\")\n\tflag.StringVar(&caFile, \"ca-file\", \"\", \"Server TLS CA certificate.\")\n\tflag.Parse()\n\n\tif flag.NArg() == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: you need to specify action ls [<key>] or get <key>\\n\")\n\t\tos.Exit(1)\n\t}\n\tif flag.Arg(0) == \"get\" && flag.NArg() == 1 {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: you need to specify <key> for get operation\\n\")\n\t\tos.Exit(1)\n\t}\n\taction := flag.Arg(0)\n\tkey := \"\"\n\tif flag.NArg() > 1 {\n\t\tkey = flag.Arg(1)\n\t}\n\n\tvar tlsConfig *tls.Config\n\tif len(certFile) != 0 || len(keyFile) != 0 || len(caFile) != 0 {\n\t\ttlsInfo := transport.TLSInfo{\n\t\t\tCertFile: certFile,\n\t\t\tKeyFile: keyFile,\n\t\t\tCAFile: caFile,\n\t\t}\n\t\tvar err error\n\t\ttlsConfig, err = tlsInfo.ClientConfig()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: unable to create client config: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\tconfig := clientv3.Config{\n\t\tEndpoints: []string{endpoint},\n\t\tTLS: tlsConfig,\n\t\tDialTimeout: 5 * time.Second,\n\t}\n\tclient, err := clientv3.New(config)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: unable to connect to etcd: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tdefer client.Close()\n\n\tswitch action {\n\tcase \"ls\":\n\t\terr = listKeys(client, key)\n\tcase \"get\":\n\t\terr = getKey(client, key)\n\t}\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s-ing %s: %v\\n\", action, key, err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc listKeys(client *clientv3.Client, key string) error {\n\tvar resp *clientv3.GetResponse\n\tvar err error\n\tif len(key) == 0 {\n\t\tresp, err = client.Get(context.TODO(), \"\/\", clientv3.WithFromKey(), clientv3.WithKeysOnly())\n\t} else {\n\t\tresp, err = client.Get(context.TODO(), key, clientv3.WithPrefix(), clientv3.WithKeysOnly())\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, ev := range resp.Kvs {\n\t\tfmt.Printf(\"%s\\n\", ev.Key)\n\t}\n\treturn nil\n}\n\nfunc getKey(client *clientv3.Client, key string) error {\n\tresp, err := client.Get(context.TODO(), key)\n\tif err != nil {\n\t\treturn err\n\t}\n\tps := protobuf.NewSerializer(api.Scheme, api.Scheme, \"application\/vnd.kubernetes.protobuf\")\n\tjs := json.NewSerializer(json.DefaultMetaFactory, api.Scheme, api.Scheme, true)\n\tfor _, ev := range resp.Kvs {\n\t\tobj, gvk, err := ps.Decode(ev.Value, nil, nil)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: unable to decode %s: %v\\n\", ev.Key, err)\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(gvk)\n\t\terr = js.Encode(obj, os.Stdout)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: unable to decode %s: %v\\n\", ev.Key, err)\n\t\t\tcontinue\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Fix etcdhelper imports<commit_after>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/clientv3\"\n\t\"github.com\/coreos\/etcd\/pkg\/transport\"\n\t\"golang.org\/x\/net\/context\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/serializer\/json\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/serializer\/protobuf\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\n\t\/\/ install all APIs\n\t_ \"github.com\/openshift\/origin\/pkg\/api\/install\"\n\t_ \"github.com\/openshift\/origin\/pkg\/quota\/api\/install\"\n\t_ \"k8s.io\/kubernetes\/pkg\/api\/install\"\n)\n\nfunc main() {\n\tvar endpoint, keyFile, certFile, caFile string\n\tflag.StringVar(&endpoint, \"endpoint\", \"https:\/\/127.0.0.1:4001\", \"Etcd endpoint.\")\n\tflag.StringVar(&keyFile, \"key-file\", \"\", \"TLS client key.\")\n\tflag.StringVar(&certFile, \"cert-file\", \"\", \"TLS client certificate.\")\n\tflag.StringVar(&caFile, \"ca-file\", \"\", \"Server TLS CA certificate.\")\n\tflag.Parse()\n\n\tif flag.NArg() == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: you need to specify action ls [<key>] or get <key>\\n\")\n\t\tos.Exit(1)\n\t}\n\tif flag.Arg(0) == \"get\" && flag.NArg() == 1 {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: you need to specify <key> for get operation\\n\")\n\t\tos.Exit(1)\n\t}\n\taction := flag.Arg(0)\n\tkey := \"\"\n\tif flag.NArg() > 1 {\n\t\tkey = flag.Arg(1)\n\t}\n\n\tvar tlsConfig *tls.Config\n\tif len(certFile) != 0 || len(keyFile) != 0 || len(caFile) != 0 {\n\t\ttlsInfo := transport.TLSInfo{\n\t\t\tCertFile: certFile,\n\t\t\tKeyFile: keyFile,\n\t\t\tCAFile: caFile,\n\t\t}\n\t\tvar err error\n\t\ttlsConfig, err = tlsInfo.ClientConfig()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: unable to create client config: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\tconfig := clientv3.Config{\n\t\tEndpoints: []string{endpoint},\n\t\tTLS: tlsConfig,\n\t\tDialTimeout: 5 * time.Second,\n\t}\n\tclient, err := clientv3.New(config)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: unable to connect to etcd: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tdefer client.Close()\n\n\tswitch action {\n\tcase \"ls\":\n\t\terr = listKeys(client, key)\n\tcase \"get\":\n\t\terr = getKey(client, key)\n\t}\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: %s-ing %s: %v\\n\", action, key, err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc listKeys(client *clientv3.Client, key string) error {\n\tvar resp *clientv3.GetResponse\n\tvar err error\n\tif len(key) == 0 {\n\t\tresp, err = client.Get(context.TODO(), \"\/\", clientv3.WithFromKey(), clientv3.WithKeysOnly())\n\t} else {\n\t\tresp, err = client.Get(context.TODO(), key, clientv3.WithPrefix(), clientv3.WithKeysOnly())\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, ev := range resp.Kvs {\n\t\tfmt.Printf(\"%s\\n\", ev.Key)\n\t}\n\treturn nil\n}\n\nfunc getKey(client *clientv3.Client, key string) error {\n\tresp, err := client.Get(context.TODO(), key)\n\tif err != nil {\n\t\treturn err\n\t}\n\tps := protobuf.NewSerializer(api.Scheme, api.Scheme, \"application\/vnd.kubernetes.protobuf\")\n\tjs := json.NewSerializer(json.DefaultMetaFactory, api.Scheme, api.Scheme, true)\n\tfor _, ev := range resp.Kvs {\n\t\tobj, gvk, err := ps.Decode(ev.Value, nil, nil)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: unable to decode %s: %v\\n\", ev.Key, err)\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(gvk)\n\t\terr = js.Encode(obj, os.Stdout)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"ERROR: unable to decode %s: %v\\n\", ev.Key, err)\n\t\t\tcontinue\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package ploop\n\nimport \"strings\"\nimport \"os\/exec\"\nimport \"sync\"\n\n\/\/ #include <ploop\/libploop.h>\nimport \"C\"\n\n\/\/ Possible SetVerboseLevel arguments\nconst (\n\tNoConsole = C.PLOOP_LOG_NOCONSOLE\n\tNoStdout = C.PLOOP_LOG_NOSTDOUT\n\tTimestamps = C.PLOOP_LOG_TIMESTAMPS\n)\n\n\/\/ SetVerboseLevel sets a level of verbosity when logging to stdout\/stderr\nfunc SetVerboseLevel(v int) {\n\tC.ploop_set_verbose_level(C.int(v))\n}\n\n\/\/ SetLogFile enables logging to a file and sets log file name\nfunc SetLogFile(file string) error {\n\tcfile := C.CString(file)\n\tdefer cfree(cfile)\n\n\tret := C.ploop_set_log_file(cfile)\n\n\treturn mkerr(ret)\n}\n\n\/\/ SetLogLevel sets a level of verbosity when logging to a file\nfunc SetLogLevel(v int) {\n\tC.ploop_set_log_level(C.int(v))\n}\n\n\/\/ Ploop is a type containing DiskDescriptor.xml opened by the library\ntype Ploop struct {\n\td *C.struct_ploop_disk_images_data\n}\n\nvar once sync.Once\n\n\/\/ load ploop modules\nfunc loadKmod() {\n\t\/\/ try to load ploop modules\n\tmodules := []string{\"ploop\", \"pfmt_ploop1\", \"pfmt_raw\", \"pio_direct\", \"pio_nfs\", \"pio_kaio\"}\n\tfor _, m := range modules {\n\t\texec.Command(\"modprobe\", m).Run()\n\t}\n}\n\n\/\/ Open opens a ploop DiskDescriptor.xml, most ploop operations require it\nfunc Open(file string) (Ploop, error) {\n\tvar d Ploop\n\n\tonce.Do(loadKmod)\n\n\tcfile := C.CString(file)\n\tdefer cfree(cfile)\n\n\tret := C.ploop_open_dd(&d.d, cfile)\n\n\treturn d, mkerr(ret)\n}\n\n\/\/ Close closes a ploop disk descriptor when it is no longer needed\nfunc (d Ploop) Close() {\n\tC.ploop_close_dd(d.d)\n}\n\ntype ImageMode int\n\n\/\/ Possible values for ImageMode\nconst (\n\tExpanded ImageMode = C.PLOOP_EXPANDED_MODE\n\tPreallocated ImageMode = C.PLOOP_EXPANDED_PREALLOCATED_MODE\n\tRaw ImageMode = C.PLOOP_RAW_MODE\n)\n\n\/\/ ParseImageMode converts a string to ImageMode value\nfunc ParseImageMode(s string) (ImageMode, error) {\n\tswitch strings.ToLower(s) {\n\tcase \"expanded\":\n\t\treturn Expanded, nil\n\tcase \"preallocated\":\n\t\treturn Preallocated, nil\n\tcase \"raw\":\n\t\treturn Raw, nil\n\tdefault:\n\t\treturn Expanded, mkerr(E_PARAM)\n\t}\n}\n\n\/\/ String converts an ImageMode value to string\nfunc (m ImageMode) String() string {\n\tswitch m {\n\tcase Expanded:\n\t\treturn \"Expanded\"\n\tcase Preallocated:\n\t\treturn \"Preallocated\"\n\tcase Raw:\n\t\treturn \"Raw\"\n\t}\n\treturn \"<unknown>\"\n}\n\n\/\/ CreateFlags is a type for CreateParam.Flags\ntype CreateFlags uint\n\n\/\/ Possible values for CreateFlags\nconst (\n\tNoLazy CreateFlags = C.PLOOP_CREATE_NOLAZY\n)\n\n\/\/ CreateParam is a set of parameters for a newly created ploop\ntype CreateParam struct {\n\tSize uint64 \/\/ image size, in kilobytes (FS size is about 10% smaller)\n\tMode ImageMode \/\/ image mode\n\tFile string \/\/ path to and a file name for base delta image\n\tCLog uint \/\/ cluster block size log (6 to 15, default 11)\n\tFlags CreateFlags \/\/ flags\n}\n\n\/\/ Create creates a ploop image and its DiskDescriptor.xml\nfunc Create(p *CreateParam) error {\n\tvar a C.struct_ploop_create_param\n\n\tonce.Do(loadKmod)\n\n\t\/\/ default image file name\n\tif p.File == \"\" {\n\t\tp.File = \"root.hdd\"\n\t}\n\n\ta.size = convertSize(p.Size)\n\ta.mode = C.int(p.Mode)\n\tif p.CLog != 0 {\n\t\t\/\/ ploop cluster block size, in 512-byte sectors\n\t\t\/\/ default is 1M cluster block size (CLog=11)\n\t\t\/\/ 2^11 = 2048 sectors, 2048*512 = 1M\n\t\ta.blocksize = 1 << p.CLog\n\t}\n\ta.flags = C.uint(p.Flags)\n\ta.image = C.CString(p.File)\n\tdefer cfree(a.image)\n\ta.fstype = C.CString(\"ext4\")\n\tdefer cfree(a.fstype)\n\n\tret := C.ploop_create_image(&a)\n\treturn mkerr(ret)\n}\n\n\/\/ MountParam is a set of parameters to pass to Mount()\ntype MountParam struct {\n\tUUID string \/\/ snapshot uuid (empty for top delta)\n\tTarget string \/\/ mount point (empty if no mount is needed)\n\tFlags int \/\/ bit mount flags such as MS_NOATIME\n\tData string \/\/ auxiliary mount options\n\tReadonly bool \/\/ mount read-only\n\tFsck bool \/\/ do fsck before mounting inner FS\n\tQuota bool \/\/ enable quota for inner FS\n}\n\n\/\/ Mount creates a ploop device and (optionally) mounts it\nfunc (d Ploop) Mount(p *MountParam) (string, error) {\n\tvar a C.struct_ploop_mount_param\n\tvar device string\n\n\tif p.UUID != \"\" {\n\t\ta.guid = C.CString(p.UUID)\n\t\tdefer cfree(a.guid)\n\t}\n\tif p.Target != \"\" {\n\t\ta.target = C.CString(p.Target)\n\t\tdefer cfree(a.target)\n\t}\n\n\t\/\/ mount_data should not be NULL\n\ta.mount_data = C.CString(p.Data)\n\tdefer cfree(a.mount_data)\n\n\ta.flags = C.int(p.Flags)\n\ta.ro = boolToC(p.Readonly)\n\ta.fsck = boolToC(p.Fsck)\n\ta.quota = boolToC(p.Quota)\n\n\tret := C.ploop_mount_image(d.d, &a)\n\tif ret == 0 {\n\t\tdevice = C.GoString(&a.device[0])\n\t\t\/\/ TODO? fsck_code = C.GoString(a.fsck_rc)\n\t}\n\treturn device, mkerr(ret)\n}\n\n\/\/ Umount unmounts the ploop filesystem and dismantles the device\nfunc (d Ploop) Umount() error {\n\tret := C.ploop_umount_image(d.d)\n\n\treturn mkerr(ret)\n}\n\n\/\/ UmountByDevice unmounts the ploop filesystem and dismantles the device.\n\/\/ Unlike Umount(), this is a lower-level function meaning it can be less\n\/\/ safe and should generally not be used.\nfunc UmountByDevice(dev string) error {\n\tcdev := C.CString(dev)\n\tdefer cfree(cdev)\n\n\tret := C.ploop_umount(cdev, nil)\n\n\treturn mkerr(ret)\n}\n\n\/\/ Resize changes the ploop size. Online resize is recommended.\nfunc (d Ploop) Resize(size uint64, offline bool) error {\n\tvar p C.struct_ploop_resize_param\n\n\tp.size = convertSize(size)\n\tp.offline_resize = boolToC(offline)\n\n\tret := C.ploop_resize_image(d.d, &p)\n\treturn mkerr(ret)\n}\n\n\/\/ Snapshot creates a ploop snapshot, returning its uuid\nfunc (d Ploop) Snapshot() (string, error) {\n\tvar p C.struct_ploop_snapshot_param\n\tvar uuid, err = UUID()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tp.guid = C.CString(uuid)\n\tdefer cfree(p.guid)\n\n\tret := C.ploop_create_snapshot(d.d, &p)\n\tif ret == 0 {\n\t\tuuid = C.GoString(p.guid)\n\t}\n\n\treturn uuid, mkerr(ret)\n}\n\n\/\/ SwitchSnapshot switches to a specified snapshot,\n\/\/ creates a new empty delta on top of it, and makes it a top one\n\/\/ (i.e. the one new data will be written to).\n\/\/ Old top delta (i.e. data modified since the last snapshot) is lost.\nfunc (d Ploop) SwitchSnapshot(uuid string) error {\n\tvar p C.struct_ploop_snapshot_switch_param\n\n\tp.guid = C.CString(uuid)\n\tdefer cfree(p.guid)\n\n\tret := C.ploop_switch_snapshot_ex(d.d, &p)\n\n\treturn mkerr(ret)\n}\n\n\/\/ Possible values for SwitchSnapshotExtended flags argument\ntype SwitchFlag uint\n\nconst (\n\t\/\/ SkipDestroy, if set, modifies the behavior of\n\t\/\/ SwitchSnapshotExtended to not delete the old top delta, but\n\t\/\/ make it a snapshot and return its uuid. Without this flag,\n\t\/\/ old top delta (i.e. data modified since the last snapshot)\n\t\/\/ is lost.\n\tSkipDestroy SwitchFlag = C.PLOOP_SNAP_SKIP_TOPDELTA_DESTROY\n\t\/\/ SkipCreate flag, if set, modifies the behavior of\n\t\/\/ SwitchSnapshotExtended to not create a new top delta,\n\t\/\/ but rather transform the specified snapshot itself to be\n\t\/\/ the new top delta), so all new changes will be written\n\t\/\/ right to it. Snapshot UUID is lost in this case.\n\tSkipCreate SwitchFlag = C.PLOOP_SNAP_SKIP_TOPDELTA_CREATE\n)\n\n\/\/ SwitchSnapshotExtended is same as SwitchSnapshot but with additional\n\/\/ flags modifying its behavior. Please see individual flags description.\n\/\/ Returns uuid of what was the old top delta if SkipDestroy flag is set.\nfunc (d Ploop) SwitchSnapshotExtended(uuid string, flags SwitchFlag) (string, error) {\n\tvar p C.struct_ploop_snapshot_switch_param\n\toldUUID := \"\"\n\n\tp.guid = C.CString(uuid)\n\tdefer cfree(p.guid)\n\n\tp.flags = C.int(flags)\n\n\tif flags&SkipDestroy != 0 {\n\t\toldUUID, err := UUID()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tp.guid_old = C.CString(oldUUID)\n\t\tdefer cfree(p.guid_old)\n\t}\n\n\tret := C.ploop_switch_snapshot_ex(d.d, &p)\n\n\treturn oldUUID, mkerr(ret)\n}\n\n\/\/ DeleteSnapshot deletes a snapshot (merging it down if necessary)\nfunc (d Ploop) DeleteSnapshot(uuid string) error {\n\tcuuid := C.CString(uuid)\n\tdefer cfree(cuuid)\n\n\tret := C.ploop_delete_snapshot(d.d, cuuid)\n\n\treturn mkerr(ret)\n}\n\ntype ReplaceFlag int\n\n\/\/ Possible values for ReplaceParam.flags\nconst (\n\t\/\/ KeepName renames the new file to old file name after replace;\n\t\/\/ note that if this option is used the old file is removed.\n\tKeepName ReplaceFlag = C.PLOOP_REPLACE_KEEP_NAME\n)\n\n\/\/ ReplaceParam is a set of parameters to Replace()\ntype ReplaceParam struct {\n\tFile string \/\/ new image file name\n\t\/\/ Image to be replaced is specified by either\n\t\/\/ uuid, current file name, or level,\n\t\/\/ in the above order of preference.\n\tUUID string\n\tCurFile string\n\tLevel int\n\tFlags ReplaceFlag\n}\n\n\/\/ Replace replaces a ploop image to a different (but identical) one\nfunc (d Ploop) Replace(p *ReplaceParam) error {\n\tvar a C.struct_ploop_replace_param\n\n\ta.file = C.CString(p.File)\n\tdefer cfree(a.file)\n\n\tif p.UUID != \"\" {\n\t\ta.guid = C.CString(p.UUID)\n\t\tdefer cfree(a.guid)\n\t} else if p.CurFile != \"\" {\n\t\ta.cur_file = C.CString(p.CurFile)\n\t\tdefer cfree(a.cur_file)\n\t} else {\n\t\ta.level = C.int(p.Level)\n\t}\n\n\ta.flags = C.int(p.Flags)\n\n\tret := C.ploop_replace_image(d.d, &a)\n\n\treturn mkerr(ret)\n}\n\n\/\/ IsMounted returns true if ploop is mounted\nfunc (d Ploop) IsMounted() (bool, error) {\n\tret := C.ploop_is_mounted(d.d)\n\tif ret == 0 {\n\t\treturn false, nil\n\t} else if ret == 1 {\n\t\treturn true, nil\n\t} else {\n\t\t\/\/ error, but no code, make our own\n\t\treturn false, mkerr(E_SYS)\n\t}\n}\n\n\/\/ FSInfoData holds information about ploop inner file system\ntype FSInfoData struct {\n\tBlockSize uint64\n\tBlocks uint64\n\tBlocksFree uint64\n\tInodes uint64\n\tInodesFree uint64\n}\n\n\/\/ FSInfo gets info of ploop's inner file system\nfunc FSInfo(file string) (FSInfoData, error) {\n\tvar cinfo C.struct_ploop_info\n\tvar info FSInfoData\n\tcfile := C.CString(file)\n\tdefer cfree(cfile)\n\n\tonce.Do(loadKmod)\n\n\tret := C.ploop_get_info_by_descr(cfile, &cinfo)\n\tif ret == 0 {\n\t\tinfo.BlockSize = uint64(cinfo.fs_bsize)\n\t\tinfo.Blocks = uint64(cinfo.fs_blocks)\n\t\tinfo.BlocksFree = uint64(cinfo.fs_bfree)\n\t\tinfo.Inodes = uint64(cinfo.fs_inodes)\n\t\tinfo.InodesFree = uint64(cinfo.fs_ifree)\n\t}\n\n\treturn info, mkerr(ret)\n}\n\n\/\/ ImageInfoData holds information about ploop image\ntype ImageInfoData struct {\n\tBlocks uint64\n\tBlockSize uint32\n\tVersion int\n}\n\n\/\/ ImageInfo gets information about a ploop image\nfunc (d Ploop) ImageInfo() (ImageInfoData, error) {\n\tvar cinfo C.struct_ploop_spec\n\tvar info ImageInfoData\n\n\tret := C.ploop_get_spec(d.d, &cinfo)\n\tif ret == 0 {\n\t\tinfo.Blocks = uint64(cinfo.size)\n\t\tinfo.BlockSize = uint32(cinfo.blocksize)\n\t\tinfo.Version = int(cinfo.fmt_version)\n\t}\n\n\treturn info, mkerr(ret)\n}\n\n\/\/ TopDeltaFile returns file name of top delta\nfunc (d Ploop) TopDeltaFile() (string, error) {\n\tconst len = 4096 \/\/ PATH_MAX\n\tvar out [len]C.char\n\n\tret := C.ploop_get_top_delta_fname(d.d, &out[0], len)\n\tif ret != 0 {\n\t\t\/\/ error, but no code, make our own\n\t\treturn \"\", mkerr(E_SYS)\n\t}\n\n\tfile := C.GoString(&out[0])\n\treturn file, nil\n}\n\n\/\/ UUID generates a ploop UUID\nfunc UUID() (string, error) {\n\tvar cuuid [39]C.char\n\n\tret := C.ploop_uuid_generate(&cuuid[0], 39)\n\tif ret != 0 {\n\t\treturn \"\", mkerr(ret)\n\t}\n\n\tuuid := C.GoString(&cuuid[0])\n\treturn uuid, nil\n}\n<commit_msg>ploop.go: add\/improve some comments<commit_after>package ploop\n\nimport \"strings\"\nimport \"os\/exec\"\nimport \"sync\"\n\n\/\/ #include <ploop\/libploop.h>\nimport \"C\"\n\n\/\/ Possible SetVerboseLevel arguments\nconst (\n\tNoConsole = C.PLOOP_LOG_NOCONSOLE\n\tNoStdout = C.PLOOP_LOG_NOSTDOUT\n\tTimestamps = C.PLOOP_LOG_TIMESTAMPS\n)\n\n\/\/ SetVerboseLevel sets a level of verbosity when logging to stdout\/stderr\nfunc SetVerboseLevel(v int) {\n\tC.ploop_set_verbose_level(C.int(v))\n}\n\n\/\/ SetLogFile enables logging to a file and sets log file name\nfunc SetLogFile(file string) error {\n\tcfile := C.CString(file)\n\tdefer cfree(cfile)\n\n\tret := C.ploop_set_log_file(cfile)\n\n\treturn mkerr(ret)\n}\n\n\/\/ SetLogLevel sets a level of verbosity when logging to a file\nfunc SetLogLevel(v int) {\n\tC.ploop_set_log_level(C.int(v))\n}\n\n\/\/ Ploop is a type containing DiskDescriptor.xml opened by the library\ntype Ploop struct {\n\td *C.struct_ploop_disk_images_data\n}\n\nvar once sync.Once\n\n\/\/ load ploop modules\nfunc loadKmod() {\n\t\/\/ try to load ploop modules\n\tmodules := []string{\"ploop\", \"pfmt_ploop1\", \"pfmt_raw\", \"pio_direct\", \"pio_nfs\", \"pio_kaio\"}\n\tfor _, m := range modules {\n\t\texec.Command(\"modprobe\", m).Run()\n\t}\n}\n\n\/\/ Open opens a ploop DiskDescriptor.xml, most ploop operations require it\nfunc Open(file string) (Ploop, error) {\n\tvar d Ploop\n\n\tonce.Do(loadKmod)\n\n\tcfile := C.CString(file)\n\tdefer cfree(cfile)\n\n\tret := C.ploop_open_dd(&d.d, cfile)\n\n\treturn d, mkerr(ret)\n}\n\n\/\/ Close closes a ploop disk descriptor when it is no longer needed\nfunc (d Ploop) Close() {\n\tC.ploop_close_dd(d.d)\n}\n\n\/\/ ImageMode is a type for CreateParam.Mode field\ntype ImageMode int\n\n\/\/ Possible values for ImageMode\nconst (\n\tExpanded ImageMode = C.PLOOP_EXPANDED_MODE\n\tPreallocated ImageMode = C.PLOOP_EXPANDED_PREALLOCATED_MODE\n\tRaw ImageMode = C.PLOOP_RAW_MODE\n)\n\n\/\/ ParseImageMode converts a string to ImageMode value\nfunc ParseImageMode(s string) (ImageMode, error) {\n\tswitch strings.ToLower(s) {\n\tcase \"expanded\":\n\t\treturn Expanded, nil\n\tcase \"preallocated\":\n\t\treturn Preallocated, nil\n\tcase \"raw\":\n\t\treturn Raw, nil\n\tdefault:\n\t\treturn Expanded, mkerr(E_PARAM)\n\t}\n}\n\n\/\/ String converts an ImageMode value to string\nfunc (m ImageMode) String() string {\n\tswitch m {\n\tcase Expanded:\n\t\treturn \"Expanded\"\n\tcase Preallocated:\n\t\treturn \"Preallocated\"\n\tcase Raw:\n\t\treturn \"Raw\"\n\t}\n\treturn \"<unknown>\"\n}\n\n\/\/ CreateFlags is a type for CreateParam.Flags\ntype CreateFlags uint\n\n\/\/ Possible values for CreateFlags\nconst (\n\tNoLazy CreateFlags = C.PLOOP_CREATE_NOLAZY\n)\n\n\/\/ CreateParam is a set of parameters for a newly created ploop\ntype CreateParam struct {\n\tSize uint64 \/\/ image size, in kilobytes (FS size is about 10% smaller)\n\tMode ImageMode \/\/ image mode\n\tFile string \/\/ path to and a file name for base delta image\n\tCLog uint \/\/ cluster block size log (6 to 15, default 11)\n\tFlags CreateFlags \/\/ flags\n}\n\n\/\/ Create creates a ploop image and its DiskDescriptor.xml\nfunc Create(p *CreateParam) error {\n\tvar a C.struct_ploop_create_param\n\n\tonce.Do(loadKmod)\n\n\t\/\/ default image file name\n\tif p.File == \"\" {\n\t\tp.File = \"root.hdd\"\n\t}\n\n\ta.size = convertSize(p.Size)\n\ta.mode = C.int(p.Mode)\n\tif p.CLog != 0 {\n\t\t\/\/ ploop cluster block size, in 512-byte sectors\n\t\t\/\/ default is 1M cluster block size (CLog=11)\n\t\t\/\/ 2^11 = 2048 sectors, 2048*512 = 1M\n\t\ta.blocksize = 1 << p.CLog\n\t}\n\ta.flags = C.uint(p.Flags)\n\ta.image = C.CString(p.File)\n\tdefer cfree(a.image)\n\ta.fstype = C.CString(\"ext4\")\n\tdefer cfree(a.fstype)\n\n\tret := C.ploop_create_image(&a)\n\treturn mkerr(ret)\n}\n\n\/\/ MountParam is a set of parameters to pass to Mount()\ntype MountParam struct {\n\tUUID string \/\/ snapshot uuid (empty for top delta)\n\tTarget string \/\/ mount point (empty if no mount is needed)\n\tFlags int \/\/ bit mount flags such as MS_NOATIME\n\tData string \/\/ auxiliary mount options\n\tReadonly bool \/\/ mount read-only\n\tFsck bool \/\/ do fsck before mounting inner FS\n\tQuota bool \/\/ enable quota for inner FS\n}\n\n\/\/ Mount creates a ploop device and (optionally) mounts it\nfunc (d Ploop) Mount(p *MountParam) (string, error) {\n\tvar a C.struct_ploop_mount_param\n\tvar device string\n\n\tif p.UUID != \"\" {\n\t\ta.guid = C.CString(p.UUID)\n\t\tdefer cfree(a.guid)\n\t}\n\tif p.Target != \"\" {\n\t\ta.target = C.CString(p.Target)\n\t\tdefer cfree(a.target)\n\t}\n\n\t\/\/ mount_data should not be NULL\n\ta.mount_data = C.CString(p.Data)\n\tdefer cfree(a.mount_data)\n\n\ta.flags = C.int(p.Flags)\n\ta.ro = boolToC(p.Readonly)\n\ta.fsck = boolToC(p.Fsck)\n\ta.quota = boolToC(p.Quota)\n\n\tret := C.ploop_mount_image(d.d, &a)\n\tif ret == 0 {\n\t\tdevice = C.GoString(&a.device[0])\n\t\t\/\/ TODO? fsck_code = C.GoString(a.fsck_rc)\n\t}\n\treturn device, mkerr(ret)\n}\n\n\/\/ Umount unmounts the ploop filesystem and dismantles the device\nfunc (d Ploop) Umount() error {\n\tret := C.ploop_umount_image(d.d)\n\n\treturn mkerr(ret)\n}\n\n\/\/ UmountByDevice unmounts the ploop filesystem and dismantles the device.\n\/\/ Unlike Umount(), this is a lower-level function meaning it can be less\n\/\/ safe and should generally not be used.\nfunc UmountByDevice(dev string) error {\n\tcdev := C.CString(dev)\n\tdefer cfree(cdev)\n\n\tret := C.ploop_umount(cdev, nil)\n\n\treturn mkerr(ret)\n}\n\n\/\/ Resize changes the ploop size. Online resize is recommended.\nfunc (d Ploop) Resize(size uint64, offline bool) error {\n\tvar p C.struct_ploop_resize_param\n\n\tp.size = convertSize(size)\n\tp.offline_resize = boolToC(offline)\n\n\tret := C.ploop_resize_image(d.d, &p)\n\treturn mkerr(ret)\n}\n\n\/\/ Snapshot creates a ploop snapshot, returning its uuid\nfunc (d Ploop) Snapshot() (string, error) {\n\tvar p C.struct_ploop_snapshot_param\n\tvar uuid, err = UUID()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tp.guid = C.CString(uuid)\n\tdefer cfree(p.guid)\n\n\tret := C.ploop_create_snapshot(d.d, &p)\n\tif ret == 0 {\n\t\tuuid = C.GoString(p.guid)\n\t}\n\n\treturn uuid, mkerr(ret)\n}\n\n\/\/ SwitchSnapshot switches to a specified snapshot,\n\/\/ creates a new empty delta on top of it, and makes it a top one\n\/\/ (i.e. the one new data will be written to).\n\/\/ Old top delta (i.e. data modified since the last snapshot) is lost.\nfunc (d Ploop) SwitchSnapshot(uuid string) error {\n\tvar p C.struct_ploop_snapshot_switch_param\n\n\tp.guid = C.CString(uuid)\n\tdefer cfree(p.guid)\n\n\tret := C.ploop_switch_snapshot_ex(d.d, &p)\n\n\treturn mkerr(ret)\n}\n\n\/\/ SwitchFlag is a type for SwitchSnapshotExtended.Flags\ntype SwitchFlag uint\n\nconst (\n\t\/\/ SkipDestroy flag, if set, modifies the behavior of\n\t\/\/ SwitchSnapshotExtended to not delete the old top delta, but\n\t\/\/ make it a snapshot and return its uuid. Without this flag,\n\t\/\/ old top delta (i.e. data modified since the last snapshot)\n\t\/\/ is lost.\n\tSkipDestroy SwitchFlag = C.PLOOP_SNAP_SKIP_TOPDELTA_DESTROY\n\t\/\/ SkipCreate flag, if set, modifies the behavior of\n\t\/\/ SwitchSnapshotExtended to not create a new top delta,\n\t\/\/ but rather transform the specified snapshot itself to be\n\t\/\/ the new top delta), so all new changes will be written\n\t\/\/ right to it. Snapshot UUID is lost in this case.\n\tSkipCreate SwitchFlag = C.PLOOP_SNAP_SKIP_TOPDELTA_CREATE\n)\n\n\/\/ SwitchSnapshotExtended is same as SwitchSnapshot but with additional\n\/\/ flags modifying its behavior. Please see individual flags description.\n\/\/ Returns uuid of what was the old top delta if SkipDestroy flag is set.\nfunc (d Ploop) SwitchSnapshotExtended(uuid string, flags SwitchFlag) (string, error) {\n\tvar p C.struct_ploop_snapshot_switch_param\n\toldUUID := \"\"\n\n\tp.guid = C.CString(uuid)\n\tdefer cfree(p.guid)\n\n\tp.flags = C.int(flags)\n\n\tif flags&SkipDestroy != 0 {\n\t\toldUUID, err := UUID()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tp.guid_old = C.CString(oldUUID)\n\t\tdefer cfree(p.guid_old)\n\t}\n\n\tret := C.ploop_switch_snapshot_ex(d.d, &p)\n\n\treturn oldUUID, mkerr(ret)\n}\n\n\/\/ DeleteSnapshot deletes a snapshot (merging it down if necessary)\nfunc (d Ploop) DeleteSnapshot(uuid string) error {\n\tcuuid := C.CString(uuid)\n\tdefer cfree(cuuid)\n\n\tret := C.ploop_delete_snapshot(d.d, cuuid)\n\n\treturn mkerr(ret)\n}\n\n\/\/ ReplaceFlag is a type for ReplaceParam.Flags field\ntype ReplaceFlag int\n\n\/\/ Possible values for ReplaceParam.Flags field\nconst (\n\t\/\/ KeepName renames the new file to old file name after replace;\n\t\/\/ note that if this option is used the old file is removed.\n\tKeepName ReplaceFlag = C.PLOOP_REPLACE_KEEP_NAME\n)\n\n\/\/ ReplaceParam is a set of parameters to Replace()\ntype ReplaceParam struct {\n\tFile string \/\/ new image file name\n\t\/\/ Image to be replaced is specified by either\n\t\/\/ uuid, current file name, or level,\n\t\/\/ in the above order of preference.\n\tUUID string\n\tCurFile string\n\tLevel int\n\tFlags ReplaceFlag\n}\n\n\/\/ Replace replaces a ploop image to a different (but identical) one\nfunc (d Ploop) Replace(p *ReplaceParam) error {\n\tvar a C.struct_ploop_replace_param\n\n\ta.file = C.CString(p.File)\n\tdefer cfree(a.file)\n\n\tif p.UUID != \"\" {\n\t\ta.guid = C.CString(p.UUID)\n\t\tdefer cfree(a.guid)\n\t} else if p.CurFile != \"\" {\n\t\ta.cur_file = C.CString(p.CurFile)\n\t\tdefer cfree(a.cur_file)\n\t} else {\n\t\ta.level = C.int(p.Level)\n\t}\n\n\ta.flags = C.int(p.Flags)\n\n\tret := C.ploop_replace_image(d.d, &a)\n\n\treturn mkerr(ret)\n}\n\n\/\/ IsMounted returns true if ploop is mounted\nfunc (d Ploop) IsMounted() (bool, error) {\n\tret := C.ploop_is_mounted(d.d)\n\tif ret == 0 {\n\t\treturn false, nil\n\t} else if ret == 1 {\n\t\treturn true, nil\n\t} else {\n\t\t\/\/ error, but no code, make our own\n\t\treturn false, mkerr(E_SYS)\n\t}\n}\n\n\/\/ FSInfoData holds information about ploop inner file system\ntype FSInfoData struct {\n\tBlockSize uint64\n\tBlocks uint64\n\tBlocksFree uint64\n\tInodes uint64\n\tInodesFree uint64\n}\n\n\/\/ FSInfo gets info of ploop's inner file system\nfunc FSInfo(file string) (FSInfoData, error) {\n\tvar cinfo C.struct_ploop_info\n\tvar info FSInfoData\n\tcfile := C.CString(file)\n\tdefer cfree(cfile)\n\n\tonce.Do(loadKmod)\n\n\tret := C.ploop_get_info_by_descr(cfile, &cinfo)\n\tif ret == 0 {\n\t\tinfo.BlockSize = uint64(cinfo.fs_bsize)\n\t\tinfo.Blocks = uint64(cinfo.fs_blocks)\n\t\tinfo.BlocksFree = uint64(cinfo.fs_bfree)\n\t\tinfo.Inodes = uint64(cinfo.fs_inodes)\n\t\tinfo.InodesFree = uint64(cinfo.fs_ifree)\n\t}\n\n\treturn info, mkerr(ret)\n}\n\n\/\/ ImageInfoData holds information about ploop image\ntype ImageInfoData struct {\n\tBlocks uint64\n\tBlockSize uint32\n\tVersion int\n}\n\n\/\/ ImageInfo gets information about a ploop image\nfunc (d Ploop) ImageInfo() (ImageInfoData, error) {\n\tvar cinfo C.struct_ploop_spec\n\tvar info ImageInfoData\n\n\tret := C.ploop_get_spec(d.d, &cinfo)\n\tif ret == 0 {\n\t\tinfo.Blocks = uint64(cinfo.size)\n\t\tinfo.BlockSize = uint32(cinfo.blocksize)\n\t\tinfo.Version = int(cinfo.fmt_version)\n\t}\n\n\treturn info, mkerr(ret)\n}\n\n\/\/ TopDeltaFile returns file name of top delta\nfunc (d Ploop) TopDeltaFile() (string, error) {\n\tconst len = 4096 \/\/ PATH_MAX\n\tvar out [len]C.char\n\n\tret := C.ploop_get_top_delta_fname(d.d, &out[0], len)\n\tif ret != 0 {\n\t\t\/\/ error, but no code, make our own\n\t\treturn \"\", mkerr(E_SYS)\n\t}\n\n\tfile := C.GoString(&out[0])\n\treturn file, nil\n}\n\n\/\/ UUID generates a ploop UUID\nfunc UUID() (string, error) {\n\tvar cuuid [39]C.char\n\n\tret := C.ploop_uuid_generate(&cuuid[0], 39)\n\tif ret != 0 {\n\t\treturn \"\", mkerr(ret)\n\t}\n\n\tuuid := C.GoString(&cuuid[0])\n\treturn uuid, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\n\/\/ execprog executes a single program or a set of programs\n\/\/ and optionally prints information about execution.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/google\/syzkaller\/pkg\/cover\"\n\t\"github.com\/google\/syzkaller\/pkg\/csource\"\n\t\"github.com\/google\/syzkaller\/pkg\/host\"\n\t\"github.com\/google\/syzkaller\/pkg\/ipc\"\n\t\"github.com\/google\/syzkaller\/pkg\/ipc\/ipcconfig\"\n\t\"github.com\/google\/syzkaller\/pkg\/log\"\n\t\"github.com\/google\/syzkaller\/pkg\/osutil\"\n\t\"github.com\/google\/syzkaller\/pkg\/tool\"\n\t\"github.com\/google\/syzkaller\/prog\"\n\t_ \"github.com\/google\/syzkaller\/sys\"\n)\n\nvar (\n\tflagOS = flag.String(\"os\", runtime.GOOS, \"target os\")\n\tflagArch = flag.String(\"arch\", runtime.GOARCH, \"target arch\")\n\tflagCoverFile = flag.String(\"coverfile\", \"\", \"write coverage to the file\")\n\tflagRepeat = flag.Int(\"repeat\", 1, \"repeat execution that many times (0 for infinite loop)\")\n\tflagProcs = flag.Int(\"procs\", 1, \"number of parallel processes to execute programs\")\n\tflagOutput = flag.Bool(\"output\", false, \"write programs and results to stdout\")\n\tflagHints = flag.Bool(\"hints\", false, \"do a hints-generation run\")\n\tflagEnable = flag.String(\"enable\", \"none\", \"enable only listed additional features\")\n\tflagDisable = flag.String(\"disable\", \"none\", \"enable all additional features except listed\")\n)\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"usage: execprog [flags] file-with-programs+\\n\")\n\t\tflag.PrintDefaults()\n\t\tcsource.PrintAvailableFeaturesFlags()\n\t}\n\tdefer tool.Init()()\n\tif len(flag.Args()) == 0 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\tfeaturesFlags, err := csource.ParseFeaturesFlags(*flagEnable, *flagDisable, true)\n\tif err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\n\ttarget, err := prog.GetTarget(*flagOS, *flagArch)\n\tif err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\tentries := loadPrograms(target, flag.Args())\n\tif len(entries) == 0 {\n\t\treturn\n\t}\n\tfeatures, err := host.Check(target)\n\tif err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\tif *flagOutput {\n\t\tfor _, feat := range features.Supported() {\n\t\t\tlog.Logf(0, \"%-24v: %v\", feat.Name, feat.Reason)\n\t\t}\n\t}\n\tconfig, execOpts := createConfig(target, features, featuresFlags)\n\tif err = host.Setup(target, features, featuresFlags, config.Executor); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar gateCallback func()\n\tif features[host.FeatureLeak].Enabled {\n\t\tgateCallback = func() {\n\t\t\toutput, err := osutil.RunCmd(10*time.Minute, \"\", config.Executor, \"leak\")\n\t\t\tif err != nil {\n\t\t\t\tos.Stdout.Write(output)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t}\n\tctx := &Context{\n\t\tentries: entries,\n\t\tconfig: config,\n\t\texecOpts: execOpts,\n\t\tgate: ipc.NewGate(2**flagProcs, gateCallback),\n\t\tshutdown: make(chan struct{}),\n\t\trepeat: *flagRepeat,\n\t}\n\tvar wg sync.WaitGroup\n\twg.Add(*flagProcs)\n\tfor p := 0; p < *flagProcs; p++ {\n\t\tpid := p\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tctx.run(pid)\n\t\t}()\n\t}\n\tosutil.HandleInterrupts(ctx.shutdown)\n\twg.Wait()\n}\n\ntype Context struct {\n\tentries []*prog.LogEntry\n\tconfig *ipc.Config\n\texecOpts *ipc.ExecOpts\n\tgate *ipc.Gate\n\tshutdown chan struct{}\n\tlogMu sync.Mutex\n\tposMu sync.Mutex\n\trepeat int\n\tpos int\n\tlastPrint time.Time\n}\n\nfunc (ctx *Context) run(pid int) {\n\tenv, err := ipc.MakeEnv(ctx.config, pid)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create ipc env: %v\", err)\n\t}\n\tdefer env.Close()\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.shutdown:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\tidx := ctx.getProgramIndex()\n\t\tif ctx.repeat > 0 && idx >= len(ctx.entries)*ctx.repeat {\n\t\t\treturn\n\t\t}\n\t\tentry := ctx.entries[idx%len(ctx.entries)]\n\t\tctx.execute(pid, env, entry)\n\t}\n}\n\nfunc (ctx *Context) execute(pid int, env *ipc.Env, entry *prog.LogEntry) {\n\t\/\/ Limit concurrency window.\n\tticket := ctx.gate.Enter()\n\tdefer ctx.gate.Leave(ticket)\n\n\tcallOpts := ctx.execOpts\n\tif *flagOutput {\n\t\tctx.logProgram(pid, entry.P, callOpts)\n\t}\n\t\/\/ This mimics the syz-fuzzer logic. This is important for reproduction.\n\tfor try := 0; ; try++ {\n\t\toutput, info, hanged, err := env.Exec(callOpts, entry.P)\n\t\tif err != nil && err != prog.ErrExecBufferTooSmall {\n\t\t\tif try > 10 {\n\t\t\t\tlog.Fatalf(\"executor failed %v times: %v\\n%s\", try, err, output)\n\t\t\t}\n\t\t\t\/\/ Don't print err\/output in this case as it may contain \"SYZFAIL\" and we want to fail yet.\n\t\t\tlog.Logf(1, \"executor failed, retrying\")\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tif ctx.config.Flags&ipc.FlagDebug != 0 || err != nil {\n\t\t\tlog.Logf(0, \"result: hanged=%v err=%v\\n\\n%s\", hanged, err, output)\n\t\t}\n\t\tif info != nil {\n\t\t\tctx.printCallResults(info)\n\t\t\tif *flagHints {\n\t\t\t\tctx.printHints(entry.P, info)\n\t\t\t}\n\t\t\tif *flagCoverFile != \"\" {\n\t\t\t\tctx.dumpCoverage(*flagCoverFile, info)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Logf(1, \"RESULT: no calls executed\")\n\t\t}\n\t\tbreak\n\t}\n}\n\nfunc (ctx *Context) logProgram(pid int, p *prog.Prog, callOpts *ipc.ExecOpts) {\n\tdata := p.Serialize()\n\tctx.logMu.Lock()\n\tlog.Logf(0, \"executing program %v:\\n%s\", pid, data)\n\tctx.logMu.Unlock()\n}\n\nfunc (ctx *Context) printCallResults(info *ipc.ProgInfo) {\n\tfor i, inf := range info.Calls {\n\t\tif inf.Flags&ipc.CallExecuted == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tflags := \"\"\n\t\tif inf.Flags&ipc.CallFinished == 0 {\n\t\t\tflags += \" unfinished\"\n\t\t}\n\t\tif inf.Flags&ipc.CallBlocked != 0 {\n\t\t\tflags += \" blocked\"\n\t\t}\n\t\tif inf.Flags&ipc.CallFaultInjected != 0 {\n\t\t\tflags += \" faulted\"\n\t\t}\n\t\tlog.Logf(1, \"CALL %v: signal %v, coverage %v errno %v%v\",\n\t\t\ti, len(inf.Signal), len(inf.Cover), inf.Errno, flags)\n\t}\n}\n\nfunc (ctx *Context) printHints(p *prog.Prog, info *ipc.ProgInfo) {\n\tncomps, ncandidates := 0, 0\n\tfor i := range p.Calls {\n\t\tif *flagOutput {\n\t\t\tfmt.Printf(\"call %v:\\n\", i)\n\t\t}\n\t\tcomps := info.Calls[i].Comps\n\t\tfor v, args := range comps {\n\t\t\tncomps += len(args)\n\t\t\tif *flagOutput {\n\t\t\t\tfmt.Printf(\"comp 0x%x:\", v)\n\t\t\t\tfor arg := range args {\n\t\t\t\t\tfmt.Printf(\" 0x%x\", arg)\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"\\n\")\n\t\t\t}\n\t\t}\n\t\tp.MutateWithHints(i, comps, func(p *prog.Prog) {\n\t\t\tncandidates++\n\t\t\tif *flagOutput {\n\t\t\t\tlog.Logf(1, \"PROGRAM:\\n%s\", p.Serialize())\n\t\t\t}\n\t\t})\n\t}\n\tlog.Logf(0, \"ncomps=%v ncandidates=%v\", ncomps, ncandidates)\n}\n\nfunc (ctx *Context) dumpCallCoverage(coverFile string, info *ipc.CallInfo) {\n\tif len(info.Cover) == 0 {\n\t\treturn\n\t}\n\tbuf := new(bytes.Buffer)\n\tfor _, pc := range info.Cover {\n\t\tfmt.Fprintf(buf, \"0x%x\\n\", cover.RestorePC(pc, 0xffffffff))\n\t}\n\terr := osutil.WriteFile(coverFile, buf.Bytes())\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to write coverage file: %v\", err)\n\t}\n}\n\nfunc (ctx *Context) dumpCoverage(coverFile string, info *ipc.ProgInfo) {\n\tfor i, inf := range info.Calls {\n\t\tlog.Logf(0, \"call #%v: signal %v, coverage %v\", i, len(inf.Signal), len(inf.Cover))\n\t\tctx.dumpCallCoverage(fmt.Sprintf(\"%v.%v\", coverFile, i), &inf)\n\t}\n\tlog.Logf(0, \"extra: signal %v, coverage %v\", len(info.Extra.Signal), len(info.Extra.Cover))\n\tctx.dumpCallCoverage(fmt.Sprintf(\"%v.extra\", coverFile), &info.Extra)\n}\n\nfunc (ctx *Context) getProgramIndex() int {\n\tctx.posMu.Lock()\n\tidx := ctx.pos\n\tctx.pos++\n\tif idx%len(ctx.entries) == 0 && time.Since(ctx.lastPrint) > 5*time.Second {\n\t\tlog.Logf(0, \"executed programs: %v\", idx)\n\t\tctx.lastPrint = time.Now()\n\t}\n\tctx.posMu.Unlock()\n\treturn idx\n}\n\nfunc loadPrograms(target *prog.Target, files []string) []*prog.LogEntry {\n\tvar entries []*prog.LogEntry\n\tfor _, fn := range files {\n\t\tdata, err := ioutil.ReadFile(fn)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to read log file: %v\", err)\n\t\t}\n\t\tentries = append(entries, target.ParseLog(data)...)\n\t}\n\tlog.Logf(0, \"parsed %v programs\", len(entries))\n\treturn entries\n}\n\nfunc createConfig(target *prog.Target, features *host.Features, featuresFlags csource.Features) (\n\t*ipc.Config, *ipc.ExecOpts) {\n\tconfig, execOpts, err := ipcconfig.Default(target)\n\tif err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\tif config.Flags&ipc.FlagSignal != 0 {\n\t\texecOpts.Flags |= ipc.FlagCollectCover\n\t}\n\tif *flagCoverFile != \"\" {\n\t\tconfig.Flags |= ipc.FlagSignal\n\t\texecOpts.Flags |= ipc.FlagCollectCover\n\t\texecOpts.Flags &^= ipc.FlagDedupCover\n\t}\n\tif *flagHints {\n\t\tif execOpts.Flags&ipc.FlagCollectCover != 0 {\n\t\t\texecOpts.Flags ^= ipc.FlagCollectCover\n\t\t}\n\t\texecOpts.Flags |= ipc.FlagCollectComps\n\t}\n\tif features[host.FeatureExtraCoverage].Enabled {\n\t\tconfig.Flags |= ipc.FlagExtraCover\n\t}\n\tif featuresFlags[\"tun\"].Enabled && features[host.FeatureNetInjection].Enabled {\n\t\tconfig.Flags |= ipc.FlagEnableTun\n\t}\n\tif featuresFlags[\"net_dev\"].Enabled && features[host.FeatureNetDevices].Enabled {\n\t\tconfig.Flags |= ipc.FlagEnableNetDev\n\t}\n\tif featuresFlags[\"net_reset\"].Enabled {\n\t\tconfig.Flags |= ipc.FlagEnableNetReset\n\t}\n\tif featuresFlags[\"cgroups\"].Enabled {\n\t\tconfig.Flags |= ipc.FlagEnableCgroups\n\t}\n\tif featuresFlags[\"close_fds\"].Enabled {\n\t\tconfig.Flags |= ipc.FlagEnableCloseFds\n\t}\n\tif featuresFlags[\"devlink_pci\"].Enabled && features[host.FeatureDevlinkPCI].Enabled {\n\t\tconfig.Flags |= ipc.FlagEnableDevlinkPCI\n\t}\n\tif featuresFlags[\"vhci\"].Enabled && features[host.FeatureVhciInjection].Enabled {\n\t\tconfig.Flags |= ipc.FlagEnableVhciInjection\n\t}\n\tif featuresFlags[\"wifi\"].Enabled && features[host.FeatureWifiEmulation].Enabled {\n\t\tconfig.Flags |= ipc.FlagEnableWifi\n\t}\n\treturn config, execOpts\n}\n<commit_msg>tools\/syz-execprog: don't store prog.LogEntry's<commit_after>\/\/ Copyright 2015 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\n\/\/ execprog executes a single program or a set of programs\n\/\/ and optionally prints information about execution.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/google\/syzkaller\/pkg\/cover\"\n\t\"github.com\/google\/syzkaller\/pkg\/csource\"\n\t\"github.com\/google\/syzkaller\/pkg\/host\"\n\t\"github.com\/google\/syzkaller\/pkg\/ipc\"\n\t\"github.com\/google\/syzkaller\/pkg\/ipc\/ipcconfig\"\n\t\"github.com\/google\/syzkaller\/pkg\/log\"\n\t\"github.com\/google\/syzkaller\/pkg\/osutil\"\n\t\"github.com\/google\/syzkaller\/pkg\/tool\"\n\t\"github.com\/google\/syzkaller\/prog\"\n\t_ \"github.com\/google\/syzkaller\/sys\"\n)\n\nvar (\n\tflagOS = flag.String(\"os\", runtime.GOOS, \"target os\")\n\tflagArch = flag.String(\"arch\", runtime.GOARCH, \"target arch\")\n\tflagCoverFile = flag.String(\"coverfile\", \"\", \"write coverage to the file\")\n\tflagRepeat = flag.Int(\"repeat\", 1, \"repeat execution that many times (0 for infinite loop)\")\n\tflagProcs = flag.Int(\"procs\", 1, \"number of parallel processes to execute programs\")\n\tflagOutput = flag.Bool(\"output\", false, \"write programs and results to stdout\")\n\tflagHints = flag.Bool(\"hints\", false, \"do a hints-generation run\")\n\tflagEnable = flag.String(\"enable\", \"none\", \"enable only listed additional features\")\n\tflagDisable = flag.String(\"disable\", \"none\", \"enable all additional features except listed\")\n)\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"usage: execprog [flags] file-with-programs+\\n\")\n\t\tflag.PrintDefaults()\n\t\tcsource.PrintAvailableFeaturesFlags()\n\t}\n\tdefer tool.Init()()\n\tif len(flag.Args()) == 0 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\tfeaturesFlags, err := csource.ParseFeaturesFlags(*flagEnable, *flagDisable, true)\n\tif err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\n\ttarget, err := prog.GetTarget(*flagOS, *flagArch)\n\tif err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\tprogs := loadPrograms(target, flag.Args())\n\tif len(progs) == 0 {\n\t\treturn\n\t}\n\tfeatures, err := host.Check(target)\n\tif err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\tif *flagOutput {\n\t\tfor _, feat := range features.Supported() {\n\t\t\tlog.Logf(0, \"%-24v: %v\", feat.Name, feat.Reason)\n\t\t}\n\t}\n\tconfig, execOpts := createConfig(target, features, featuresFlags)\n\tif err = host.Setup(target, features, featuresFlags, config.Executor); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar gateCallback func()\n\tif features[host.FeatureLeak].Enabled {\n\t\tgateCallback = func() {\n\t\t\toutput, err := osutil.RunCmd(10*time.Minute, \"\", config.Executor, \"leak\")\n\t\t\tif err != nil {\n\t\t\t\tos.Stdout.Write(output)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t}\n\tctx := &Context{\n\t\tprogs: progs,\n\t\tconfig: config,\n\t\texecOpts: execOpts,\n\t\tgate: ipc.NewGate(2**flagProcs, gateCallback),\n\t\tshutdown: make(chan struct{}),\n\t\trepeat: *flagRepeat,\n\t}\n\tvar wg sync.WaitGroup\n\twg.Add(*flagProcs)\n\tfor p := 0; p < *flagProcs; p++ {\n\t\tpid := p\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tctx.run(pid)\n\t\t}()\n\t}\n\tosutil.HandleInterrupts(ctx.shutdown)\n\twg.Wait()\n}\n\ntype Context struct {\n\tprogs []*prog.Prog\n\tconfig *ipc.Config\n\texecOpts *ipc.ExecOpts\n\tgate *ipc.Gate\n\tshutdown chan struct{}\n\tlogMu sync.Mutex\n\tposMu sync.Mutex\n\trepeat int\n\tpos int\n\tlastPrint time.Time\n}\n\nfunc (ctx *Context) run(pid int) {\n\tenv, err := ipc.MakeEnv(ctx.config, pid)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create ipc env: %v\", err)\n\t}\n\tdefer env.Close()\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.shutdown:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\tidx := ctx.getProgramIndex()\n\t\tif ctx.repeat > 0 && idx >= len(ctx.progs)*ctx.repeat {\n\t\t\treturn\n\t\t}\n\t\tentry := ctx.progs[idx%len(ctx.progs)]\n\t\tctx.execute(pid, env, entry)\n\t}\n}\n\nfunc (ctx *Context) execute(pid int, env *ipc.Env, p *prog.Prog) {\n\t\/\/ Limit concurrency window.\n\tticket := ctx.gate.Enter()\n\tdefer ctx.gate.Leave(ticket)\n\n\tcallOpts := ctx.execOpts\n\tif *flagOutput {\n\t\tctx.logProgram(pid, p, callOpts)\n\t}\n\t\/\/ This mimics the syz-fuzzer logic. This is important for reproduction.\n\tfor try := 0; ; try++ {\n\t\toutput, info, hanged, err := env.Exec(callOpts, p)\n\t\tif err != nil && err != prog.ErrExecBufferTooSmall {\n\t\t\tif try > 10 {\n\t\t\t\tlog.Fatalf(\"executor failed %v times: %v\\n%s\", try, err, output)\n\t\t\t}\n\t\t\t\/\/ Don't print err\/output in this case as it may contain \"SYZFAIL\" and we want to fail yet.\n\t\t\tlog.Logf(1, \"executor failed, retrying\")\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tif ctx.config.Flags&ipc.FlagDebug != 0 || err != nil {\n\t\t\tlog.Logf(0, \"result: hanged=%v err=%v\\n\\n%s\", hanged, err, output)\n\t\t}\n\t\tif info != nil {\n\t\t\tctx.printCallResults(info)\n\t\t\tif *flagHints {\n\t\t\t\tctx.printHints(p, info)\n\t\t\t}\n\t\t\tif *flagCoverFile != \"\" {\n\t\t\t\tctx.dumpCoverage(*flagCoverFile, info)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Logf(1, \"RESULT: no calls executed\")\n\t\t}\n\t\tbreak\n\t}\n}\n\nfunc (ctx *Context) logProgram(pid int, p *prog.Prog, callOpts *ipc.ExecOpts) {\n\tdata := p.Serialize()\n\tctx.logMu.Lock()\n\tlog.Logf(0, \"executing program %v:\\n%s\", pid, data)\n\tctx.logMu.Unlock()\n}\n\nfunc (ctx *Context) printCallResults(info *ipc.ProgInfo) {\n\tfor i, inf := range info.Calls {\n\t\tif inf.Flags&ipc.CallExecuted == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tflags := \"\"\n\t\tif inf.Flags&ipc.CallFinished == 0 {\n\t\t\tflags += \" unfinished\"\n\t\t}\n\t\tif inf.Flags&ipc.CallBlocked != 0 {\n\t\t\tflags += \" blocked\"\n\t\t}\n\t\tif inf.Flags&ipc.CallFaultInjected != 0 {\n\t\t\tflags += \" faulted\"\n\t\t}\n\t\tlog.Logf(1, \"CALL %v: signal %v, coverage %v errno %v%v\",\n\t\t\ti, len(inf.Signal), len(inf.Cover), inf.Errno, flags)\n\t}\n}\n\nfunc (ctx *Context) printHints(p *prog.Prog, info *ipc.ProgInfo) {\n\tncomps, ncandidates := 0, 0\n\tfor i := range p.Calls {\n\t\tif *flagOutput {\n\t\t\tfmt.Printf(\"call %v:\\n\", i)\n\t\t}\n\t\tcomps := info.Calls[i].Comps\n\t\tfor v, args := range comps {\n\t\t\tncomps += len(args)\n\t\t\tif *flagOutput {\n\t\t\t\tfmt.Printf(\"comp 0x%x:\", v)\n\t\t\t\tfor arg := range args {\n\t\t\t\t\tfmt.Printf(\" 0x%x\", arg)\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"\\n\")\n\t\t\t}\n\t\t}\n\t\tp.MutateWithHints(i, comps, func(p *prog.Prog) {\n\t\t\tncandidates++\n\t\t\tif *flagOutput {\n\t\t\t\tlog.Logf(1, \"PROGRAM:\\n%s\", p.Serialize())\n\t\t\t}\n\t\t})\n\t}\n\tlog.Logf(0, \"ncomps=%v ncandidates=%v\", ncomps, ncandidates)\n}\n\nfunc (ctx *Context) dumpCallCoverage(coverFile string, info *ipc.CallInfo) {\n\tif len(info.Cover) == 0 {\n\t\treturn\n\t}\n\tbuf := new(bytes.Buffer)\n\tfor _, pc := range info.Cover {\n\t\tfmt.Fprintf(buf, \"0x%x\\n\", cover.RestorePC(pc, 0xffffffff))\n\t}\n\terr := osutil.WriteFile(coverFile, buf.Bytes())\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to write coverage file: %v\", err)\n\t}\n}\n\nfunc (ctx *Context) dumpCoverage(coverFile string, info *ipc.ProgInfo) {\n\tfor i, inf := range info.Calls {\n\t\tlog.Logf(0, \"call #%v: signal %v, coverage %v\", i, len(inf.Signal), len(inf.Cover))\n\t\tctx.dumpCallCoverage(fmt.Sprintf(\"%v.%v\", coverFile, i), &inf)\n\t}\n\tlog.Logf(0, \"extra: signal %v, coverage %v\", len(info.Extra.Signal), len(info.Extra.Cover))\n\tctx.dumpCallCoverage(fmt.Sprintf(\"%v.extra\", coverFile), &info.Extra)\n}\n\nfunc (ctx *Context) getProgramIndex() int {\n\tctx.posMu.Lock()\n\tidx := ctx.pos\n\tctx.pos++\n\tif idx%len(ctx.progs) == 0 && time.Since(ctx.lastPrint) > 5*time.Second {\n\t\tlog.Logf(0, \"executed programs: %v\", idx)\n\t\tctx.lastPrint = time.Now()\n\t}\n\tctx.posMu.Unlock()\n\treturn idx\n}\n\nfunc loadPrograms(target *prog.Target, files []string) []*prog.Prog {\n\tvar progs []*prog.Prog\n\tfor _, fn := range files {\n\t\tdata, err := ioutil.ReadFile(fn)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to read log file: %v\", err)\n\t\t}\n\t\tfor _, entry := range target.ParseLog(data) {\n\t\t\tprogs = append(progs, entry.P)\n\t\t}\n\t}\n\tlog.Logf(0, \"parsed %v programs\", len(progs))\n\treturn progs\n}\n\nfunc createConfig(target *prog.Target, features *host.Features, featuresFlags csource.Features) (\n\t*ipc.Config, *ipc.ExecOpts) {\n\tconfig, execOpts, err := ipcconfig.Default(target)\n\tif err != nil {\n\t\tlog.Fatalf(\"%v\", err)\n\t}\n\tif config.Flags&ipc.FlagSignal != 0 {\n\t\texecOpts.Flags |= ipc.FlagCollectCover\n\t}\n\tif *flagCoverFile != \"\" {\n\t\tconfig.Flags |= ipc.FlagSignal\n\t\texecOpts.Flags |= ipc.FlagCollectCover\n\t\texecOpts.Flags &^= ipc.FlagDedupCover\n\t}\n\tif *flagHints {\n\t\tif execOpts.Flags&ipc.FlagCollectCover != 0 {\n\t\t\texecOpts.Flags ^= ipc.FlagCollectCover\n\t\t}\n\t\texecOpts.Flags |= ipc.FlagCollectComps\n\t}\n\tif features[host.FeatureExtraCoverage].Enabled {\n\t\tconfig.Flags |= ipc.FlagExtraCover\n\t}\n\tif featuresFlags[\"tun\"].Enabled && features[host.FeatureNetInjection].Enabled {\n\t\tconfig.Flags |= ipc.FlagEnableTun\n\t}\n\tif featuresFlags[\"net_dev\"].Enabled && features[host.FeatureNetDevices].Enabled {\n\t\tconfig.Flags |= ipc.FlagEnableNetDev\n\t}\n\tif featuresFlags[\"net_reset\"].Enabled {\n\t\tconfig.Flags |= ipc.FlagEnableNetReset\n\t}\n\tif featuresFlags[\"cgroups\"].Enabled {\n\t\tconfig.Flags |= ipc.FlagEnableCgroups\n\t}\n\tif featuresFlags[\"close_fds\"].Enabled {\n\t\tconfig.Flags |= ipc.FlagEnableCloseFds\n\t}\n\tif featuresFlags[\"devlink_pci\"].Enabled && features[host.FeatureDevlinkPCI].Enabled {\n\t\tconfig.Flags |= ipc.FlagEnableDevlinkPCI\n\t}\n\tif featuresFlags[\"vhci\"].Enabled && features[host.FeatureVhciInjection].Enabled {\n\t\tconfig.Flags |= ipc.FlagEnableVhciInjection\n\t}\n\tif featuresFlags[\"wifi\"].Enabled && features[host.FeatureWifiEmulation].Enabled {\n\t\tconfig.Flags |= ipc.FlagEnableWifi\n\t}\n\treturn config, execOpts\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright 2018 IBM Corp.\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\n *\/\n\npackage k8s\n\nimport (\n\t\"github.com\/skydive-project\/skydive\/logging\"\n\t\"github.com\/skydive-project\/skydive\/probe\"\n\t\"github.com\/skydive-project\/skydive\/topology\"\n\t\"github.com\/skydive-project\/skydive\/topology\/graph\"\n)\n\n\/\/ ClusterName is the name of the k8s cluster\nconst ClusterName = \"cluster\"\n\nvar clusterEventHandler = graph.NewEventHandler(100)\n\ntype clusterCache struct {\n\t*graph.EventHandler\n\tgraph *graph.Graph\n}\n\nfunc (c *clusterCache) Start() {\n\tc.graph.Lock()\n\tdefer c.graph.Unlock()\n\n\tm := NewMetadata(Manager, \"cluster\", nil, ClusterName)\n\tnode := c.graph.NewNode(graph.GenID(), m, \"\")\n\tc.NotifyEvent(graph.NodeAdded, node)\n\tlogging.GetLogger().Debugf(\"Added cluster{Name: %s}\", ClusterName)\n}\n\nfunc (c *clusterCache) Stop() {\n}\n\nfunc newClusterProbe(clientset interface{}, g *graph.Graph) Subprobe {\n\treturn &clusterCache{\n\t\tEventHandler: clusterEventHandler,\n\t\tgraph: g,\n\t}\n}\n\nfunc newClusterLinker(g *graph.Graph, manager string, types ...string) probe.Probe {\n\tclusterIndexer := graph.NewMetadataIndexer(g, clusterEventHandler, graph.Metadata{\"Manager\": Manager, \"Type\": \"cluster\"})\n\tclusterIndexer.Start()\n\n\tobjectFilter := newTypesFilter(manager, types...)\n\tobjectIndexer := newObjectIndexerFromFilter(g, g, objectFilter)\n\tobjectIndexer.Start()\n\n\treturn graph.NewMetadataIndexerLinker(g, clusterIndexer, objectIndexer, topology.OwnershipMetadata())\n}\n<commit_msg>k8s: new cluster linker<commit_after>\/*\n * Copyright 2018 IBM Corp.\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\n *\/\n\npackage k8s\n\nimport (\n\t\"github.com\/skydive-project\/skydive\/logging\"\n\t\"github.com\/skydive-project\/skydive\/probe\"\n\t\"github.com\/skydive-project\/skydive\/topology\"\n\t\"github.com\/skydive-project\/skydive\/topology\/graph\"\n)\n\n\/\/ ClusterName is the name of the k8s cluster\nconst ClusterName = \"cluster\"\n\nvar clusterEventHandler = graph.NewEventHandler(100)\n\nvar clusterNode *graph.Node\n\ntype clusterCache struct {\n\t*graph.EventHandler\n\tgraph *graph.Graph\n}\n\nfunc (c *clusterCache) addClusterNode() {\n\tc.graph.Lock()\n\tdefer c.graph.Unlock()\n\n\tm := NewMetadata(Manager, \"cluster\", nil, ClusterName)\n\tclusterNode = c.graph.NewNode(graph.GenID(), m, \"\")\n\tc.NotifyEvent(graph.NodeAdded, clusterNode)\n\tlogging.GetLogger().Debugf(\"Added cluster{Name: %s}\", ClusterName)\n}\n\nfunc (c *clusterCache) Start() {\n}\n\nfunc (c *clusterCache) Stop() {\n}\n\nfunc newClusterProbe(clientset interface{}, g *graph.Graph) Subprobe {\n\tc := &clusterCache{\n\t\tEventHandler: clusterEventHandler,\n\t\tgraph: g,\n\t}\n\tc.addClusterNode()\n\treturn c\n}\n\ntype clusterLinker struct {\n\tgraph.DefaultLinker\n\t*graph.ResourceLinker\n\tg *graph.Graph\n\tobjectIndexer *graph.MetadataIndexer\n}\n\nfunc (linker *clusterLinker) createEdge(cluster, object *graph.Node) *graph.Edge {\n\tid := graph.GenID(string(cluster.ID), string(object.ID))\n\treturn linker.g.CreateEdge(id, cluster, object, topology.OwnershipMetadata(), graph.TimeUTC(), \"\")\n}\n\n\/\/ GetBALinks returns all the incoming links for a node\nfunc (linker *clusterLinker) GetBALinks(objectNode *graph.Node) (edges []*graph.Edge) {\n\tedges = append(edges, linker.createEdge(clusterNode, objectNode))\n\treturn\n}\n\nfunc newClusterLinker(g *graph.Graph, manager string, types ...string) probe.Probe {\n\tobjectIndexer := newObjectIndexerFromFilter(g, g, newTypesFilter(manager, types...))\n\tobjectIndexer.Start()\n\n\treturn graph.NewResourceLinker(\n\t\tg,\n\t\tnil,\n\t\tobjectIndexer,\n\t\t&clusterLinker{g: g},\n\t\ttopology.OwnershipMetadata(),\n\t)\n}\n<|endoftext|>"} {"text":"<commit_before>package excelize\n\nimport \"encoding\/xml\"\n\n\/\/ xmlxWorkbookRels contains xmlxWorkbookRelations which maps sheet id and sheet XML.\ntype xlsxWorkbookRels struct {\n\tXMLName xml.Name `xml:\"http:\/\/schemas.openxmlformats.org\/package\/2006\/relationships Relationships\"`\n\tRelationships []xlsxWorkbookRelation `xml:\"Relationship\"`\n}\n\n\/\/ xmlxWorkbookRelation maps sheet id and xl\/worksheets\/_rels\/sheet%d.xml.rels\ntype xlsxWorkbookRelation struct {\n\tID string `xml:\"Id,attr\"`\n\tTarget string `xml:\",attr\"`\n\tType string `xml:\",attr\"`\n\tTargetMode string `xml:\",attr,omitempty\"`\n}\n\n\/\/ xlsxWorkbook directly maps the workbook element from the namespace\n\/\/ http:\/\/schemas.openxmlformats.org\/spreadsheetml\/2006\/main - currently I have\n\/\/ not checked it for completeness - it does as much as I need.\ntype xlsxWorkbook struct {\n\tXMLName xml.Name `xml:\"http:\/\/schemas.openxmlformats.org\/spreadsheetml\/2006\/main workbook\"`\n\tFileVersion *xlsxFileVersion `xml:\"fileVersion\"`\n\tWorkbookPr *xlsxWorkbookPr `xml:\"workbookPr\"`\n\tWorkbookProtection *xlsxWorkbookProtection `xml:\"workbookProtection\"`\n\tBookViews xlsxBookViews `xml:\"bookViews\"`\n\tSheets xlsxSheets `xml:\"sheets\"`\n\tExternalReferences *xlsxExternalReferences `xml:\"externalReferences\"`\n\tDefinedNames *xlsxDefinedNames `xml:\"definedNames\"`\n\tCalcPr *xlsxCalcPr `xml:\"calcPr\"`\n\tPivotCaches *xlsxPivotCaches `xml:\"pivotCaches\"`\n\tExtLst *xlsxExtLst `xml:\"extLst\"`\n\tFileRecoveryPr *xlsxFileRecoveryPr `xml:\"fileRecoveryPr\"`\n}\n\n\/\/ xlsxFileRecoveryPr maps sheet recovery information. This element defines\n\/\/ properties that track the state of the workbook file, such as whether the\n\/\/ file was saved during a crash, or whether it should be opened in auto-recover\n\/\/ mode.\ntype xlsxFileRecoveryPr struct {\n\tAutoRecover bool `xml:\"autoRecover,attr,omitempty\"`\n\tCrashSave bool `xml:\"crashSave,attr,omitempty\"`\n\tDataExtractLoad bool `xml:\"dataExtractLoad,attr,omitempty\"`\n\tRepairLoad bool `xml:\"repairLoad,attr,omitempty\"`\n}\n\n\/\/ xlsxWorkbookProtection directly maps the workbookProtection element. This\n\/\/ element specifies options for protecting data in the workbook. Applications\n\/\/ might use workbook protection to prevent anyone from accidentally changing,\n\/\/ moving, or deleting important data. This protection can be ignored by\n\/\/ applications which choose not to support this optional protection mechanism.\n\/\/ When a password is to be hashed and stored in this element, it shall be\n\/\/ hashed as defined below, starting from a UTF-16LE encoded string value. If\n\/\/ there is a leading BOM character (U+FEFF) in the encoded password it is\n\/\/ removed before hash calculation.\ntype xlsxWorkbookProtection struct {\n\tLockRevision bool `xml:\"lockRevision,attr,omitempty\"`\n\tLockStructure bool `xml:\"lockStructure,attr,omitempty\"`\n\tLockWindows bool `xml:\"lockWindows,attr,omitempty\"`\n\tRevisionsAlgorithmName string `xml:\"revisionsAlgorithmName,attr,omitempty\"`\n\tRevisionsHashValue string `xml:\"revisionsHashValue,attr,omitempty\"`\n\tRevisionsSaltValue string `xml:\"revisionsSaltValue,attr,omitempty\"`\n\tRevisionsSpinCount int `xml:\"revisionsSpinCount,attr,omitempty\"`\n\tWorkbookAlgorithmName string `xml:\"workbookAlgorithmName,attr,omitempty\"`\n\tWorkbookHashValue string `xml:\"workbookHashValue,attr,omitempty\"`\n\tWorkbookSaltValue string `xml:\"workbookSaltValue,attr,omitempty\"`\n\tWorkbookSpinCount int `xml:\"workbookSpinCount,attr,omitempty\"`\n}\n\n\/\/ xlsxFileVersion directly maps the fileVersion element. This element defines\n\/\/ properties that track which version of the application accessed the data and\n\/\/ source code contained in the file.\ntype xlsxFileVersion struct {\n\tAppName string `xml:\"appName,attr,omitempty\"`\n\tCodeName string `xml:\"codeName,attr,omitempty\"`\n\tLastEdited string `xml:\"lastEdited,attr,omitempty\"`\n\tLowestEdited string `xml:\"lowestEdited,attr,omitempty\"`\n\tRupBuild string `xml:\"rupBuild,attr,omitempty\"`\n}\n\n\/\/ xlsxWorkbookPr directly maps the workbookPr element from the namespace\n\/\/ http:\/\/schemas.openxmlformats.org\/spreadsheetml\/2006\/main This element\n\/\/ defines a collection of workbook properties.\ntype xlsxWorkbookPr struct {\n\tAllowRefreshQuery bool `xml:\"allowRefreshQuery,attr,omitempty\"`\n\tAutoCompressPictures bool `xml:\"autoCompressPictures,attr,omitempty\"`\n\tBackupFile bool `xml:\"backupFile,attr,omitempty\"`\n\tCheckCompatibility bool `xml:\"checkCompatibility,attr,omitempty\"`\n\tCodeName string `xml:\"codeName,attr,omitempty\"`\n\tDate1904 bool `xml:\"date1904,attr,omitempty\"`\n\tDefaultThemeVersion string `xml:\"defaultThemeVersion,attr,omitempty\"`\n\tFilterPrivacy bool `xml:\"filterPrivacy,attr,omitempty\"`\n\tHidePivotFieldList bool `xml:\"hidePivotFieldList,attr,omitempty\"`\n\tPromptedSolutions bool `xml:\"promptedSolutions,attr,omitempty\"`\n\tPublishItems bool `xml:\"publishItems,attr,omitempty\"`\n\tRefreshAllConnections bool `xml:\"refreshAllConnections,attr,omitempty\"`\n\tSaveExternalLinkValues bool `xml:\"saveExternalLinkValues,attr,omitempty\"`\n\tShowBorderUnselectedTables bool `xml:\"showBorderUnselectedTables,attr,omitempty\"`\n\tShowInkAnnotation bool `xml:\"showInkAnnotation,attr,omitempty\"`\n\tShowObjects string `xml:\"showObjects,attr,omitempty\"`\n\tShowPivotChartFilter bool `xml:\"showPivotChartFilter,attr,omitempty\"`\n\tUpdateLinks string `xml:\"updateLinks,attr,omitempty\"`\n}\n\n\/\/ xlsxBookViews directly maps the bookViews element. This element specifies the\n\/\/ collection of workbook views of the enclosing workbook. Each view can specify\n\/\/ a window position, filter options, and other configurations. There is no\n\/\/ limit on the number of workbook views that can be defined for a workbook.\ntype xlsxBookViews struct {\n\tWorkBookView []xlsxWorkBookView `xml:\"workbookView\"`\n}\n\n\/\/ xlsxWorkBookView directly maps the workbookView element from the namespace\n\/\/ http:\/\/schemas.openxmlformats.org\/spreadsheetml\/2006\/main This element\n\/\/ specifies a single Workbook view.\ntype xlsxWorkBookView struct {\n\tActiveTab int `xml:\"activeTab,attr,omitempty\"`\n\tAutoFilterDateGrouping bool `xml:\"autoFilterDateGrouping,attr,omitempty\"`\n\tFirstSheet int `xml:\"firstSheet,attr,omitempty\"`\n\tMinimized bool `xml:\"minimized,attr,omitempty\"`\n\tShowHorizontalScroll bool `xml:\"showHorizontalScroll,attr,omitempty\"`\n\tShowSheetTabs bool `xml:\"showSheetTabs,attr,omitempty\"`\n\tShowVerticalScroll bool `xml:\"showVerticalScroll,attr,omitempty\"`\n\tTabRatio int `xml:\"tabRatio,attr,omitempty\"`\n\tVisibility string `xml:\"visibility,attr,omitempty\"`\n\tWindowHeight int `xml:\"windowHeight,attr,omitempty\"`\n\tWindowWidth int `xml:\"windowWidth,attr,omitempty\"`\n\tXWindow string `xml:\"xWindow,attr,omitempty\"`\n\tYWindow string `xml:\"yWindow,attr,omitempty\"`\n}\n\n\/\/ xlsxSheets directly maps the sheets element from the namespace\n\/\/ http:\/\/schemas.openxmlformats.org\/spreadsheetml\/2006\/main.\ntype xlsxSheets struct {\n\tSheet []xlsxSheet `xml:\"sheet\"`\n}\n\n\/\/ xlsxSheet directly maps the sheet element from the namespace\n\/\/ http:\/\/schemas.openxmlformats.org\/spreadsheetml\/2006\/main - currently I have\n\/\/ not checked it for completeness - it does as much as I need.\ntype xlsxSheet struct {\n\tName string `xml:\"name,attr,omitempty\"`\n\tSheetID string `xml:\"sheetId,attr,omitempty\"`\n\tID string `xml:\"http:\/\/schemas.openxmlformats.org\/officeDocument\/2006\/relationships id,attr,omitempty\"`\n\tState string `xml:\"state,attr,omitempty\"`\n}\n\n\/\/ xlsxExternalReferences directly maps the externalReferences element of the\n\/\/ external workbook references part.\ntype xlsxExternalReferences struct {\n\tExternalReference []xlsxExternalReference `xml:\"externalReference\"`\n}\n\n\/\/ xlsxExternalReference directly maps the externalReference element of the\n\/\/ external workbook references part.\ntype xlsxExternalReference struct {\n\tRID string `xml:\"http:\/\/schemas.openxmlformats.org\/officeDocument\/2006\/relationships id,attr,omitempty\"`\n}\n\n\/\/ xlsxPivotCaches element enumerates pivot cache definition parts used by pivot\n\/\/ tables and formulas in this workbook.\ntype xlsxPivotCaches struct {\n\tPivotCache []xlsxPivotCache `xml:\"pivotCache\"`\n}\n\n\/\/ xlsxPivotCache directly maps the pivotCache element.\ntype xlsxPivotCache struct {\n\tCacheID int `xml:\"cacheId,attr,omitempty\"`\n\tRID string `xml:\"http:\/\/schemas.openxmlformats.org\/officeDocument\/2006\/relationships id,attr,omitempty\"`\n}\n\n\/\/ extLst element provides a convention for extending spreadsheetML in\n\/\/ predefined locations. The locations shall be denoted with the extLst element,\n\/\/ and are called extension lists. Extension list locations within the markup\n\/\/ document are specified in the markup specification and can be used to store\n\/\/ extensions to the markup specification, whether those are future version\n\/\/ extensions of the markup specification or are private extensions implemented\n\/\/ independently from the markup specification. Markup within an extension might\n\/\/ not be understood by a consumer.\ntype xlsxExtLst struct {\n\tExt string `xml:\",innerxml\"`\n}\n\n\/\/ xlsxDefinedNames directly maps the definedNames element. This element defines\n\/\/ the collection of defined names for this workbook. Defined names are\n\/\/ descriptive names to represent cells, ranges of cells, formulas, or constant\n\/\/ values. Defined names can be used to represent a range on any worksheet.\ntype xlsxDefinedNames struct {\n\tDefinedName []xlsxDefinedName `xml:\"definedName\"`\n}\n\n\/\/ xlsxDefinedName directly maps the definedName element from the namespace\n\/\/ http:\/\/schemas.openxmlformats.org\/spreadsheetml\/2006\/main This element\n\/\/ defines a defined name within this workbook. A defined name is descriptive\n\/\/ text that is used to represents a cell, range of cells, formula, or constant\n\/\/ value. For a descriptions of the attributes see https:\/\/msdn.microsoft.com\/en-us\/library\/office\/documentformat.openxml.spreadsheet.definedname.aspx\ntype xlsxDefinedName struct {\n\tComment string `xml:\"comment,attr,omitempty\"`\n\tCustomMenu string `xml:\"customMenu,attr,omitempty\"`\n\tDescription string `xml:\"description,attr,omitempty\"`\n\tFunction bool `xml:\"function,attr,omitempty\"`\n\tFunctionGroupID int `xml:\"functionGroupId,attr,omitempty\"`\n\tHelp string `xml:\"help,attr,omitempty\"`\n\tHidden bool `xml:\"hidden,attr,omitempty\"`\n\tLocalSheetID *int `xml:\"localSheetId,attr\"`\n\tName string `xml:\"name,attr,omitempty\"`\n\tPublishToServer bool `xml:\"publishToServer,attr,omitempty\"`\n\tShortcutKey string `xml:\"shortcutKey,attr,omitempty\"`\n\tStatusBar string `xml:\"statusBar,attr,omitempty\"`\n\tVbProcedure bool `xml:\"vbProcedure,attr,omitempty\"`\n\tWorkbookParameter bool `xml:\"workbookParameter,attr,omitempty\"`\n\tXlm bool `xml:\"xml,attr,omitempty\"`\n\tData string `xml:\",chardata\"`\n}\n\n\/\/ xlsxCalcPr directly maps the calcPr element. This element defines the\n\/\/ collection of properties the application uses to record calculation status\n\/\/ and details. Calculation is the process of computing formulas and then\n\/\/ displaying the results as values in the cells that contain the formulas.\ntype xlsxCalcPr struct {\n\tCalcCompleted bool `xml:\"calcCompleted,attr,omitempty\"`\n\tCalcID string `xml:\"calcId,attr,omitempty\"`\n\tCalcMode string `xml:\"calcMode,attr,omitempty\"`\n\tCalcOnSave bool `xml:\"calcOnSave,attr,omitempty\"`\n\tConcurrentCalc *bool `xml:\"concurrentCalc,attr\"`\n\tConcurrentManualCount int `xml:\"concurrentManualCount,attr,omitempty\"`\n\tForceFullCalc bool `xml:\"forceFullCalc,attr,omitempty\"`\n\tFullCalcOnLoad bool `xml:\"fullCalcOnLoad,attr,omitempty\"`\n\tFullPrecision bool `xml:\"fullPrecision,attr,omitempty\"`\n\tIterate bool `xml:\"iterate,attr,omitempty\"`\n\tIterateCount int `xml:\"iterateCount,attr,omitempty\"`\n\tIterateDelta float64 `xml:\"iterateDelta,attr,omitempty\"`\n\tRefMode string `xml:\"refMode,attr,omitempty\"`\n}\n<commit_msg>Add missing XML attributes `customWorkbookViews` and `customWorkbookView` in the workbook, relate issue #152.<commit_after>package excelize\n\nimport \"encoding\/xml\"\n\n\/\/ xmlxWorkbookRels contains xmlxWorkbookRelations which maps sheet id and sheet XML.\ntype xlsxWorkbookRels struct {\n\tXMLName xml.Name `xml:\"http:\/\/schemas.openxmlformats.org\/package\/2006\/relationships Relationships\"`\n\tRelationships []xlsxWorkbookRelation `xml:\"Relationship\"`\n}\n\n\/\/ xmlxWorkbookRelation maps sheet id and xl\/worksheets\/_rels\/sheet%d.xml.rels\ntype xlsxWorkbookRelation struct {\n\tID string `xml:\"Id,attr\"`\n\tTarget string `xml:\",attr\"`\n\tType string `xml:\",attr\"`\n\tTargetMode string `xml:\",attr,omitempty\"`\n}\n\n\/\/ xlsxWorkbook directly maps the workbook element from the namespace\n\/\/ http:\/\/schemas.openxmlformats.org\/spreadsheetml\/2006\/main - currently I have\n\/\/ not checked it for completeness - it does as much as I need.\ntype xlsxWorkbook struct {\n\tXMLName xml.Name `xml:\"http:\/\/schemas.openxmlformats.org\/spreadsheetml\/2006\/main workbook\"`\n\tFileVersion *xlsxFileVersion `xml:\"fileVersion\"`\n\tWorkbookPr *xlsxWorkbookPr `xml:\"workbookPr\"`\n\tWorkbookProtection *xlsxWorkbookProtection `xml:\"workbookProtection\"`\n\tBookViews xlsxBookViews `xml:\"bookViews\"`\n\tSheets xlsxSheets `xml:\"sheets\"`\n\tExternalReferences *xlsxExternalReferences `xml:\"externalReferences\"`\n\tDefinedNames *xlsxDefinedNames `xml:\"definedNames\"`\n\tCalcPr *xlsxCalcPr `xml:\"calcPr\"`\n\tCustomWorkbookViews *xlsxCustomWorkbookViews `xml:\"customWorkbookViews\"`\n\tPivotCaches *xlsxPivotCaches `xml:\"pivotCaches\"`\n\tExtLst *xlsxExtLst `xml:\"extLst\"`\n\tFileRecoveryPr *xlsxFileRecoveryPr `xml:\"fileRecoveryPr\"`\n}\n\n\/\/ xlsxFileRecoveryPr maps sheet recovery information. This element defines\n\/\/ properties that track the state of the workbook file, such as whether the\n\/\/ file was saved during a crash, or whether it should be opened in auto-recover\n\/\/ mode.\ntype xlsxFileRecoveryPr struct {\n\tAutoRecover bool `xml:\"autoRecover,attr,omitempty\"`\n\tCrashSave bool `xml:\"crashSave,attr,omitempty\"`\n\tDataExtractLoad bool `xml:\"dataExtractLoad,attr,omitempty\"`\n\tRepairLoad bool `xml:\"repairLoad,attr,omitempty\"`\n}\n\n\/\/ xlsxWorkbookProtection directly maps the workbookProtection element. This\n\/\/ element specifies options for protecting data in the workbook. Applications\n\/\/ might use workbook protection to prevent anyone from accidentally changing,\n\/\/ moving, or deleting important data. This protection can be ignored by\n\/\/ applications which choose not to support this optional protection mechanism.\n\/\/ When a password is to be hashed and stored in this element, it shall be\n\/\/ hashed as defined below, starting from a UTF-16LE encoded string value. If\n\/\/ there is a leading BOM character (U+FEFF) in the encoded password it is\n\/\/ removed before hash calculation.\ntype xlsxWorkbookProtection struct {\n\tLockRevision bool `xml:\"lockRevision,attr,omitempty\"`\n\tLockStructure bool `xml:\"lockStructure,attr,omitempty\"`\n\tLockWindows bool `xml:\"lockWindows,attr,omitempty\"`\n\tRevisionsAlgorithmName string `xml:\"revisionsAlgorithmName,attr,omitempty\"`\n\tRevisionsHashValue string `xml:\"revisionsHashValue,attr,omitempty\"`\n\tRevisionsSaltValue string `xml:\"revisionsSaltValue,attr,omitempty\"`\n\tRevisionsSpinCount int `xml:\"revisionsSpinCount,attr,omitempty\"`\n\tWorkbookAlgorithmName string `xml:\"workbookAlgorithmName,attr,omitempty\"`\n\tWorkbookHashValue string `xml:\"workbookHashValue,attr,omitempty\"`\n\tWorkbookSaltValue string `xml:\"workbookSaltValue,attr,omitempty\"`\n\tWorkbookSpinCount int `xml:\"workbookSpinCount,attr,omitempty\"`\n}\n\n\/\/ xlsxFileVersion directly maps the fileVersion element. This element defines\n\/\/ properties that track which version of the application accessed the data and\n\/\/ source code contained in the file.\ntype xlsxFileVersion struct {\n\tAppName string `xml:\"appName,attr,omitempty\"`\n\tCodeName string `xml:\"codeName,attr,omitempty\"`\n\tLastEdited string `xml:\"lastEdited,attr,omitempty\"`\n\tLowestEdited string `xml:\"lowestEdited,attr,omitempty\"`\n\tRupBuild string `xml:\"rupBuild,attr,omitempty\"`\n}\n\n\/\/ xlsxWorkbookPr directly maps the workbookPr element from the namespace\n\/\/ http:\/\/schemas.openxmlformats.org\/spreadsheetml\/2006\/main This element\n\/\/ defines a collection of workbook properties.\ntype xlsxWorkbookPr struct {\n\tAllowRefreshQuery bool `xml:\"allowRefreshQuery,attr,omitempty\"`\n\tAutoCompressPictures bool `xml:\"autoCompressPictures,attr,omitempty\"`\n\tBackupFile bool `xml:\"backupFile,attr,omitempty\"`\n\tCheckCompatibility bool `xml:\"checkCompatibility,attr,omitempty\"`\n\tCodeName string `xml:\"codeName,attr,omitempty\"`\n\tDate1904 bool `xml:\"date1904,attr,omitempty\"`\n\tDefaultThemeVersion string `xml:\"defaultThemeVersion,attr,omitempty\"`\n\tFilterPrivacy bool `xml:\"filterPrivacy,attr,omitempty\"`\n\tHidePivotFieldList bool `xml:\"hidePivotFieldList,attr,omitempty\"`\n\tPromptedSolutions bool `xml:\"promptedSolutions,attr,omitempty\"`\n\tPublishItems bool `xml:\"publishItems,attr,omitempty\"`\n\tRefreshAllConnections bool `xml:\"refreshAllConnections,attr,omitempty\"`\n\tSaveExternalLinkValues bool `xml:\"saveExternalLinkValues,attr,omitempty\"`\n\tShowBorderUnselectedTables bool `xml:\"showBorderUnselectedTables,attr,omitempty\"`\n\tShowInkAnnotation bool `xml:\"showInkAnnotation,attr,omitempty\"`\n\tShowObjects string `xml:\"showObjects,attr,omitempty\"`\n\tShowPivotChartFilter bool `xml:\"showPivotChartFilter,attr,omitempty\"`\n\tUpdateLinks string `xml:\"updateLinks,attr,omitempty\"`\n}\n\n\/\/ xlsxBookViews directly maps the bookViews element. This element specifies the\n\/\/ collection of workbook views of the enclosing workbook. Each view can specify\n\/\/ a window position, filter options, and other configurations. There is no\n\/\/ limit on the number of workbook views that can be defined for a workbook.\ntype xlsxBookViews struct {\n\tWorkBookView []xlsxWorkBookView `xml:\"workbookView\"`\n}\n\n\/\/ xlsxWorkBookView directly maps the workbookView element from the namespace\n\/\/ http:\/\/schemas.openxmlformats.org\/spreadsheetml\/2006\/main This element\n\/\/ specifies a single Workbook view.\ntype xlsxWorkBookView struct {\n\tActiveTab int `xml:\"activeTab,attr,omitempty\"`\n\tAutoFilterDateGrouping bool `xml:\"autoFilterDateGrouping,attr,omitempty\"`\n\tFirstSheet int `xml:\"firstSheet,attr,omitempty\"`\n\tMinimized bool `xml:\"minimized,attr,omitempty\"`\n\tShowHorizontalScroll bool `xml:\"showHorizontalScroll,attr,omitempty\"`\n\tShowSheetTabs bool `xml:\"showSheetTabs,attr,omitempty\"`\n\tShowVerticalScroll bool `xml:\"showVerticalScroll,attr,omitempty\"`\n\tTabRatio int `xml:\"tabRatio,attr,omitempty\"`\n\tVisibility string `xml:\"visibility,attr,omitempty\"`\n\tWindowHeight int `xml:\"windowHeight,attr,omitempty\"`\n\tWindowWidth int `xml:\"windowWidth,attr,omitempty\"`\n\tXWindow string `xml:\"xWindow,attr,omitempty\"`\n\tYWindow string `xml:\"yWindow,attr,omitempty\"`\n}\n\n\/\/ xlsxSheets directly maps the sheets element from the namespace\n\/\/ http:\/\/schemas.openxmlformats.org\/spreadsheetml\/2006\/main.\ntype xlsxSheets struct {\n\tSheet []xlsxSheet `xml:\"sheet\"`\n}\n\n\/\/ xlsxSheet directly maps the sheet element from the namespace\n\/\/ http:\/\/schemas.openxmlformats.org\/spreadsheetml\/2006\/main - currently I have\n\/\/ not checked it for completeness - it does as much as I need.\ntype xlsxSheet struct {\n\tName string `xml:\"name,attr,omitempty\"`\n\tSheetID string `xml:\"sheetId,attr,omitempty\"`\n\tID string `xml:\"http:\/\/schemas.openxmlformats.org\/officeDocument\/2006\/relationships id,attr,omitempty\"`\n\tState string `xml:\"state,attr,omitempty\"`\n}\n\n\/\/ xlsxExternalReferences directly maps the externalReferences element of the\n\/\/ external workbook references part.\ntype xlsxExternalReferences struct {\n\tExternalReference []xlsxExternalReference `xml:\"externalReference\"`\n}\n\n\/\/ xlsxExternalReference directly maps the externalReference element of the\n\/\/ external workbook references part.\ntype xlsxExternalReference struct {\n\tRID string `xml:\"http:\/\/schemas.openxmlformats.org\/officeDocument\/2006\/relationships id,attr,omitempty\"`\n}\n\n\/\/ xlsxPivotCaches element enumerates pivot cache definition parts used by pivot\n\/\/ tables and formulas in this workbook.\ntype xlsxPivotCaches struct {\n\tPivotCache []xlsxPivotCache `xml:\"pivotCache\"`\n}\n\n\/\/ xlsxPivotCache directly maps the pivotCache element.\ntype xlsxPivotCache struct {\n\tCacheID int `xml:\"cacheId,attr,omitempty\"`\n\tRID string `xml:\"http:\/\/schemas.openxmlformats.org\/officeDocument\/2006\/relationships id,attr,omitempty\"`\n}\n\n\/\/ extLst element provides a convention for extending spreadsheetML in\n\/\/ predefined locations. The locations shall be denoted with the extLst element,\n\/\/ and are called extension lists. Extension list locations within the markup\n\/\/ document are specified in the markup specification and can be used to store\n\/\/ extensions to the markup specification, whether those are future version\n\/\/ extensions of the markup specification or are private extensions implemented\n\/\/ independently from the markup specification. Markup within an extension might\n\/\/ not be understood by a consumer.\ntype xlsxExtLst struct {\n\tExt string `xml:\",innerxml\"`\n}\n\n\/\/ xlsxDefinedNames directly maps the definedNames element. This element defines\n\/\/ the collection of defined names for this workbook. Defined names are\n\/\/ descriptive names to represent cells, ranges of cells, formulas, or constant\n\/\/ values. Defined names can be used to represent a range on any worksheet.\ntype xlsxDefinedNames struct {\n\tDefinedName []xlsxDefinedName `xml:\"definedName\"`\n}\n\n\/\/ xlsxDefinedName directly maps the definedName element from the namespace\n\/\/ http:\/\/schemas.openxmlformats.org\/spreadsheetml\/2006\/main This element\n\/\/ defines a defined name within this workbook. A defined name is descriptive\n\/\/ text that is used to represents a cell, range of cells, formula, or constant\n\/\/ value. For a descriptions of the attributes see https:\/\/msdn.microsoft.com\/en-us\/library\/office\/documentformat.openxml.spreadsheet.definedname.aspx\ntype xlsxDefinedName struct {\n\tComment string `xml:\"comment,attr,omitempty\"`\n\tCustomMenu string `xml:\"customMenu,attr,omitempty\"`\n\tDescription string `xml:\"description,attr,omitempty\"`\n\tFunction bool `xml:\"function,attr,omitempty\"`\n\tFunctionGroupID int `xml:\"functionGroupId,attr,omitempty\"`\n\tHelp string `xml:\"help,attr,omitempty\"`\n\tHidden bool `xml:\"hidden,attr,omitempty\"`\n\tLocalSheetID *int `xml:\"localSheetId,attr\"`\n\tName string `xml:\"name,attr,omitempty\"`\n\tPublishToServer bool `xml:\"publishToServer,attr,omitempty\"`\n\tShortcutKey string `xml:\"shortcutKey,attr,omitempty\"`\n\tStatusBar string `xml:\"statusBar,attr,omitempty\"`\n\tVbProcedure bool `xml:\"vbProcedure,attr,omitempty\"`\n\tWorkbookParameter bool `xml:\"workbookParameter,attr,omitempty\"`\n\tXlm bool `xml:\"xml,attr,omitempty\"`\n\tData string `xml:\",chardata\"`\n}\n\n\/\/ xlsxCalcPr directly maps the calcPr element. This element defines the\n\/\/ collection of properties the application uses to record calculation status\n\/\/ and details. Calculation is the process of computing formulas and then\n\/\/ displaying the results as values in the cells that contain the formulas.\ntype xlsxCalcPr struct {\n\tCalcCompleted bool `xml:\"calcCompleted,attr,omitempty\"`\n\tCalcID string `xml:\"calcId,attr,omitempty\"`\n\tCalcMode string `xml:\"calcMode,attr,omitempty\"`\n\tCalcOnSave bool `xml:\"calcOnSave,attr,omitempty\"`\n\tConcurrentCalc *bool `xml:\"concurrentCalc,attr\"`\n\tConcurrentManualCount int `xml:\"concurrentManualCount,attr,omitempty\"`\n\tForceFullCalc bool `xml:\"forceFullCalc,attr,omitempty\"`\n\tFullCalcOnLoad bool `xml:\"fullCalcOnLoad,attr,omitempty\"`\n\tFullPrecision bool `xml:\"fullPrecision,attr,omitempty\"`\n\tIterate bool `xml:\"iterate,attr,omitempty\"`\n\tIterateCount int `xml:\"iterateCount,attr,omitempty\"`\n\tIterateDelta float64 `xml:\"iterateDelta,attr,omitempty\"`\n\tRefMode string `xml:\"refMode,attr,omitempty\"`\n}\n\n\/\/ xlsxCustomWorkbookViews defines the collection of custom workbook views that\n\/\/ are defined for this workbook. A customWorkbookView is similar in concept to\n\/\/ a workbookView in that its attributes contain settings related to the way\n\/\/ that the workbook should be displayed on a screen by a spreadsheet\n\/\/ application.\ntype xlsxCustomWorkbookViews struct {\n\tCustomWorkbookView []xlsxCustomWorkbookView `xml:\"customWorkbookView\"`\n}\n\n\/\/ xlsxCustomWorkbookView directly maps the customWorkbookView element. This\n\/\/ element specifies a single custom workbook view. A custom workbook view\n\/\/ consists of a set of display and print settings that you can name and apply\n\/\/ to a workbook. You can create more than one custom workbook view of the same\n\/\/ workbook. Custom Workbook Views are not required in order to construct a\n\/\/ valid SpreadsheetML document, and are not necessary if the document is never\n\/\/ displayed by a spreadsheet application, or if the spreadsheet application has\n\/\/ a fixed display for workbooks. However, if a spreadsheet application chooses\n\/\/ to implement configurable display modes, the customWorkbookView element\n\/\/ should be used to persist the settings for those display modes.\ntype xlsxCustomWorkbookView struct {\n\tActiveSheetID *int `xml:\"activeSheetId,attr\"`\n\tAutoUpdate *bool `xml:\"autoUpdate,attr\"`\n\tChangesSavedWin *bool `xml:\"changesSavedWin,attr\"`\n\tGUID *string `xml:\"guid,attr\"`\n\tIncludeHiddenRowCol *bool `xml:\"includeHiddenRowCol,attr\"`\n\tIncludePrintSettings *bool `xml:\"includePrintSettings,attr\"`\n\tMaximized *bool `xml:\"maximized,attr\"`\n\tMergeInterval int `xml:\"mergeInterval,attr\"`\n\tMinimized *bool `xml:\"minimized,attr\"`\n\tName *string `xml:\"name,attr\"`\n\tOnlySync *bool `xml:\"onlySync,attr\"`\n\tPersonalView *bool `xml:\"personalView,attr\"`\n\tShowComments *string `xml:\"showComments,attr\"`\n\tShowFormulaBar *bool `xml:\"showFormulaBar,attr\"`\n\tShowHorizontalScroll *bool `xml:\"showHorizontalScroll,attr\"`\n\tShowObjects *string `xml:\"showObjects,attr\"`\n\tShowSheetTabs *bool `xml:\"showSheetTabs,attr\"`\n\tShowStatusbar *bool `xml:\"showStatusbar,attr\"`\n\tShowVerticalScroll *bool `xml:\"showVerticalScroll,attr\"`\n\tTabRatio *int `xml:\"tabRatio,attr\"`\n\tWindowHeight *int `xml:\"windowHeight,attr\"`\n\tWindowWidth *int `xml:\"windowWidth,attr\"`\n\tXWindow *int `xml:\"xWindow,attr\"`\n\tYWindow *int `xml:\"yWindow,attr\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package design\n\nimport (\n\t\"net\/http\"\n\n\t\"goa.design\/goa.v2\/codegen\"\n\t\"goa.design\/goa.v2\/design\"\n)\n\n\/\/ RequestBody returns an attribute describing the request body of the given\n\/\/ endpoint. If the DSL defines a body explicitly via the Body function then the\n\/\/ corresponding attribute is used. Otherwise the attribute is computed by\n\/\/ removing the attributes of the method payload used to define headers and\n\/\/ parameters.\nfunc RequestBody(a *EndpointExpr) *design.AttributeExpr {\n\tif a.Body != nil {\n\t\treturn a.Body\n\t}\n\n\tvar (\n\t\tpayload = a.MethodExpr.Payload\n\t\theaders = a.MappedHeaders()\n\t\tparams = a.AllParams()\n\t\tsuffix = \"RequestBody\"\n\t\tname = codegen.Goify(a.Name(), true) + suffix\n\t)\n\n\tbodyOnly := len(*design.AsObject(headers.Type)) == 0 &&\n\t\tlen(*design.AsObject(params.Type)) == 0\n\n\t\/\/ 1. If Payload is not an object then check whether there are params or\n\t\/\/ headers defined and if so return empty type (payload encoded in\n\t\/\/ request params or headers) otherwise return payload type (payload\n\t\/\/ encoded in request body).\n\tif !design.IsObject(payload.Type) {\n\t\tif bodyOnly {\n\t\t\trenameType(payload, name, \"RequestBody\")\n\t\t\treturn payload\n\t\t}\n\t\treturn &design.AttributeExpr{Type: design.Empty}\n\t}\n\n\t\/\/ 2. Remove header and param attributes\n\tbody := design.NewMappedAttributeExpr(payload)\n\tremoveAttributes(body, headers)\n\tremoveAttributes(body, params)\n\n\t\/\/ 3. Return empty type if no attribute left\n\tif len(*design.AsObject(body.Type)) == 0 {\n\t\treturn &design.AttributeExpr{Type: design.Empty}\n\t}\n\n\t\/\/ 4. Build computed user type\n\tatt := body.Attribute()\n\tut := &design.UserTypeExpr{\n\t\tAttributeExpr: att,\n\t\tTypeName: name,\n\t}\n\tappendSuffix(ut.Attribute().Type, \"RequestBody\")\n\n\treturn &design.AttributeExpr{\n\t\tType: ut,\n\t\tValidation: att.Validation,\n\t\tUserExamples: att.UserExamples,\n\t}\n}\n\n\/\/ ResponseBody returns an attribute representing the response body for the\n\/\/ given endpoint and response. If the DSL defines a body explicitly via the\n\/\/ Body function then the corresponding attribute is used. Otherwise the\n\/\/ attribute is computed by removing the attributes of the method payload used\n\/\/ to define headers and parameters. Also if the response defines a view then\n\/\/ the response result type is projected first.\nfunc ResponseBody(a *EndpointExpr, resp *HTTPResponseExpr) *design.AttributeExpr {\n\tresult := a.MethodExpr.Result\n\tif result == nil || result.Type == design.Empty {\n\t\treturn &design.AttributeExpr{Type: design.Empty}\n\t}\n\tif resp.Body != nil {\n\t\treturn resp.Body\n\t}\n\n\tvar suffix string\n\tif len(a.Responses) > 1 {\n\t\tsuffix = http.StatusText(resp.StatusCode)\n\t}\n\n\tvar (\n\t\theaders = resp.MappedHeaders()\n\t\tname = codegen.Goify(a.Name(), true) + suffix + \"ResponseBody\"\n\t)\n\n\t\/\/ 1. If Result is not an object then check whether there are headers\n\t\/\/ defined and if so return empty type (result encoded in response\n\t\/\/ headers) otherwise return renamed result type (result encoded in\n\t\/\/ response body).\n\tif !design.IsObject(result.Type) {\n\t\tif len(*design.AsObject(resp.Headers().Type)) == 0 {\n\t\t\tresult = design.DupAtt(result)\n\t\t\trenameType(result, name, \"ResponseBody\")\n\t\t\treturn result\n\t\t}\n\t\treturn &design.AttributeExpr{Type: design.Empty}\n\t}\n\n\t\/\/ 2. Project if response type is result type and attribute has a view.\n\trt, isrt := result.Type.(*design.ResultTypeExpr)\n\tif isrt {\n\t\tif v := result.Metadata[\"view\"]; len(v) > 0 {\n\t\t\tdt, err := design.Project(rt, v[0])\n\t\t\tif err != nil {\n\t\t\t\tpanic(err) \/\/ bug\n\t\t\t}\n\t\t\tresult = design.DupAtt(result)\n\t\t\tresult.Type = dt\n\t\t}\n\t}\n\n\t\/\/ 3. Remove header attributes\n\tbody := design.NewMappedAttributeExpr(result)\n\tremoveAttributes(body, headers)\n\n\t\/\/ 4. Return empty type if no attribute left\n\tif len(*design.AsObject(body.Type)) == 0 {\n\t\treturn &design.AttributeExpr{Type: design.Empty}\n\t}\n\n\t\/\/ 5. Build computed user type\n\tuserType := &design.UserTypeExpr{\n\t\tAttributeExpr: body.Attribute(),\n\t\tTypeName: name,\n\t}\n\tif isrt {\n\t\tviews := make([]*design.ViewExpr, len(rt.Views))\n\t\tfor i, v := range rt.Views {\n\t\t\tmv := design.NewMappedAttributeExpr(v.AttributeExpr)\n\t\t\tremoveAttributes(mv, headers)\n\t\t\tnv := &design.ViewExpr{\n\t\t\t\tAttributeExpr: mv.Attribute(),\n\t\t\t\tName: v.Name,\n\t\t\t}\n\t\t\tviews[i] = nv\n\t\t}\n\t\tnmt := &design.ResultTypeExpr{\n\t\t\tUserTypeExpr: userType,\n\t\t\tIdentifier: rt.Identifier,\n\t\t\tContentType: rt.ContentType,\n\t\t\tViews: views,\n\t\t}\n\t\tfor _, v := range views {\n\t\t\tv.Parent = nmt\n\t\t}\n\t\treturn &design.AttributeExpr{Type: nmt, Validation: userType.Validation}\n\t}\n\tappendSuffix(userType.Attribute().Type, \"ResponseBody\")\n\n\treturn &design.AttributeExpr{Type: userType, Validation: userType.Validation}\n}\n\n\/\/ ErrorResponseBody returns an attribute describing the response body of a\n\/\/ given error. If the DSL defines a body explicitly via the Body function then\n\/\/ the corresponding attribute is returned. Otherwise the attribute is computed\n\/\/ by removing the attributes of the error used to define headers and\n\/\/ parameters. Also if the error response defines a view then the result type is\n\/\/ projected first.\nfunc ErrorResponseBody(a *EndpointExpr, v *ErrorExpr) *design.AttributeExpr {\n\tresult := v.ErrorExpr.AttributeExpr\n\tif result == nil || result.Type == design.Empty {\n\t\treturn &design.AttributeExpr{Type: design.Empty}\n\t}\n\tresp := v.Response\n\tif resp.Body != nil {\n\t\treturn resp.Body\n\t}\n\n\tvar (\n\t\theaders = resp.MappedHeaders()\n\t\tsuffix = codegen.Goify(v.ErrorExpr.Name, true) + \"ResponseBody\"\n\t\tname = codegen.Goify(a.Name(), true) + suffix\n\t)\n\n\t\/\/ 1. If Result is not an object then check whether there are headers\n\t\/\/ defined and if so return empty type (result encoded in response\n\t\/\/ headers) otherwise return renamed result type (result encoded in\n\t\/\/ response body).\n\tif !design.IsObject(result.Type) {\n\t\tif len(*design.AsObject(resp.Headers().Type)) == 0 {\n\t\t\trenameType(result, name, suffix)\n\t\t\treturn result\n\t\t}\n\t\treturn &design.AttributeExpr{Type: design.Empty}\n\t}\n\n\t\/\/ 2. Project if errorResponse type is result type and attribute has a view.\n\trt, isrt := v.ErrorExpr.AttributeExpr.Type.(*design.ResultTypeExpr)\n\tif isrt {\n\t\tif v := result.Metadata[\"view\"]; len(v) > 0 {\n\t\t\tdt, err := design.Project(rt, v[0])\n\t\t\tif err != nil {\n\t\t\t\tpanic(err) \/\/ bug\n\t\t\t}\n\t\t\tresult = design.DupAtt(result)\n\t\t\tresult.Type = dt\n\t\t}\n\t}\n\n\t\/\/ 3. Remove header attributes\n\tbody := design.NewMappedAttributeExpr(result)\n\tremoveAttributes(body, headers)\n\n\t\/\/ 4. Return empty type if no attribute left\n\tif len(*design.AsObject(body.Type)) == 0 {\n\t\treturn &design.AttributeExpr{Type: design.Empty}\n\t}\n\n\t\/\/ 5. Build computed user type\n\tuserType := &design.UserTypeExpr{\n\t\tAttributeExpr: body.Attribute(),\n\t\tTypeName: name,\n\t}\n\tappendSuffix(userType.Attribute().Type, suffix)\n\n\tif !isrt {\n\t\treturn &design.AttributeExpr{Type: userType, Validation: userType.Validation}\n\t}\n\n\tviews := make([]*design.ViewExpr, len(rt.Views))\n\tfor i, v := range rt.Views {\n\t\tmv := design.NewMappedAttributeExpr(v.AttributeExpr)\n\t\tremoveAttributes(mv, headers)\n\t\tnv := &design.ViewExpr{\n\t\t\tAttributeExpr: mv.Attribute(),\n\t\t\tName: v.Name,\n\t\t}\n\t\tviews[i] = nv\n\t}\n\tnmt := &design.ResultTypeExpr{\n\t\tUserTypeExpr: userType,\n\t\tIdentifier: rt.Identifier,\n\t\tContentType: rt.ContentType,\n\t\tViews: views,\n\t}\n\tfor _, v := range views {\n\t\tv.Parent = nmt\n\t}\n\treturn &design.AttributeExpr{Type: nmt, Validation: userType.Validation}\n}\n\nfunc renameType(att *design.AttributeExpr, name, suffix string) {\n\trt := att.Type\n\tswitch rt.(type) {\n\tcase design.UserType:\n\t\trt = design.Dup(rt)\n\t\trt.(design.UserType).Rename(name)\n\t\tappendSuffix(rt.(design.UserType).Attribute().Type, suffix)\n\tcase *design.Object:\n\t\trt = design.Dup(rt)\n\t\tappendSuffix(rt, suffix)\n\tcase *design.Array:\n\t\trt = design.Dup(rt)\n\t\tappendSuffix(rt, suffix)\n\tcase *design.Map:\n\t\trt = design.Dup(rt)\n\t\tappendSuffix(rt, suffix)\n\t}\n\tatt.Type = rt\n}\n\nfunc appendSuffix(dt design.DataType, suffix string, seen ...map[string]struct{}) {\n\tswitch actual := dt.(type) {\n\tcase design.UserType:\n\t\tvar s map[string]struct{}\n\t\tif len(seen) > 0 {\n\t\t\ts = seen[0]\n\t\t} else {\n\t\t\ts = make(map[string]struct{})\n\t\t}\n\t\tif _, ok := s[actual.Name()]; ok {\n\t\t\treturn\n\t\t}\n\t\tactual.Rename(actual.Name() + suffix)\n\t\ts[actual.Name()] = struct{}{}\n\t\tappendSuffix(actual.Attribute().Type, suffix, s)\n\tcase *design.Object:\n\t\tfor _, nat := range *actual {\n\t\t\tappendSuffix(nat.Attribute.Type, suffix, seen...)\n\t\t}\n\tcase *design.Array:\n\t\tappendSuffix(actual.ElemType.Type, suffix, seen...)\n\tcase *design.Map:\n\t\tappendSuffix(actual.KeyType.Type, suffix, seen...)\n\t\tappendSuffix(actual.ElemType.Type, suffix, seen...)\n\t}\n}\n\nfunc removeAttributes(attr, sub *design.MappedAttributeExpr) {\n\tcodegen.WalkMappedAttr(sub, func(name, _ string, _ bool, _ *design.AttributeExpr) error {\n\t\tattr.Delete(name)\n\t\tif attr.Validation != nil {\n\t\t\tattr.Validation.RemoveRequired(name)\n\t\t}\n\t\tfor _, ex := range attr.UserExamples {\n\t\t\tif m, ok := ex.Value.(map[string]interface{}); ok {\n\t\t\t\tdelete(m, name)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}\n<commit_msg>Fix regression.<commit_after>package design\n\nimport (\n\t\"net\/http\"\n\n\t\"goa.design\/goa.v2\/codegen\"\n\t\"goa.design\/goa.v2\/design\"\n)\n\n\/\/ RequestBody returns an attribute describing the request body of the given\n\/\/ endpoint. If the DSL defines a body explicitly via the Body function then the\n\/\/ corresponding attribute is used. Otherwise the attribute is computed by\n\/\/ removing the attributes of the method payload used to define headers and\n\/\/ parameters.\nfunc RequestBody(a *EndpointExpr) *design.AttributeExpr {\n\tif a.Body != nil {\n\t\treturn a.Body\n\t}\n\n\tvar (\n\t\tpayload = a.MethodExpr.Payload\n\t\theaders = a.MappedHeaders()\n\t\tparams = a.AllParams()\n\t\tsuffix = \"RequestBody\"\n\t\tname = codegen.Goify(a.Name(), true) + suffix\n\t)\n\n\tbodyOnly := len(*design.AsObject(headers.Type)) == 0 &&\n\t\tlen(*design.AsObject(params.Type)) == 0\n\n\t\/\/ 1. If Payload is not an object then check whether there are params or\n\t\/\/ headers defined and if so return empty type (payload encoded in\n\t\/\/ request params or headers) otherwise return payload type (payload\n\t\/\/ encoded in request body).\n\tif !design.IsObject(payload.Type) {\n\t\tif bodyOnly {\n\t\t\tpayload = design.DupAtt(payload)\n\t\t\trenameType(payload, name, \"RequestBody\")\n\t\t\treturn payload\n\t\t}\n\t\treturn &design.AttributeExpr{Type: design.Empty}\n\t}\n\n\t\/\/ 2. Remove header and param attributes\n\tbody := design.NewMappedAttributeExpr(payload)\n\tremoveAttributes(body, headers)\n\tremoveAttributes(body, params)\n\n\t\/\/ 3. Return empty type if no attribute left\n\tif len(*design.AsObject(body.Type)) == 0 {\n\t\treturn &design.AttributeExpr{Type: design.Empty}\n\t}\n\n\t\/\/ 4. Build computed user type\n\tatt := body.Attribute()\n\tut := &design.UserTypeExpr{\n\t\tAttributeExpr: att,\n\t\tTypeName: name,\n\t}\n\tappendSuffix(ut.Attribute().Type, \"RequestBody\")\n\n\treturn &design.AttributeExpr{\n\t\tType: ut,\n\t\tValidation: att.Validation,\n\t\tUserExamples: att.UserExamples,\n\t}\n}\n\n\/\/ ResponseBody returns an attribute representing the response body for the\n\/\/ given endpoint and response. If the DSL defines a body explicitly via the\n\/\/ Body function then the corresponding attribute is used. Otherwise the\n\/\/ attribute is computed by removing the attributes of the method payload used\n\/\/ to define headers and parameters. Also if the response defines a view then\n\/\/ the response result type is projected first.\nfunc ResponseBody(a *EndpointExpr, resp *HTTPResponseExpr) *design.AttributeExpr {\n\tresult := a.MethodExpr.Result\n\tif result == nil || result.Type == design.Empty {\n\t\treturn &design.AttributeExpr{Type: design.Empty}\n\t}\n\tif resp.Body != nil {\n\t\treturn resp.Body\n\t}\n\n\tvar suffix string\n\tif len(a.Responses) > 1 {\n\t\tsuffix = http.StatusText(resp.StatusCode)\n\t}\n\n\tvar (\n\t\theaders = resp.MappedHeaders()\n\t\tname = codegen.Goify(a.Name(), true) + suffix + \"ResponseBody\"\n\t)\n\n\t\/\/ 1. If Result is not an object then check whether there are headers\n\t\/\/ defined and if so return empty type (result encoded in response\n\t\/\/ headers) otherwise return renamed result type (result encoded in\n\t\/\/ response body).\n\tif !design.IsObject(result.Type) {\n\t\tif len(*design.AsObject(resp.Headers().Type)) == 0 {\n\t\t\tresult = design.DupAtt(result)\n\t\t\trenameType(result, name, \"ResponseBody\")\n\t\t\treturn result\n\t\t}\n\t\treturn &design.AttributeExpr{Type: design.Empty}\n\t}\n\n\t\/\/ 2. Project if response type is result type and attribute has a view.\n\trt, isrt := result.Type.(*design.ResultTypeExpr)\n\tif isrt {\n\t\tif v := result.Metadata[\"view\"]; len(v) > 0 {\n\t\t\tdt, err := design.Project(rt, v[0])\n\t\t\tif err != nil {\n\t\t\t\tpanic(err) \/\/ bug\n\t\t\t}\n\t\t\tresult = design.DupAtt(result)\n\t\t\tresult.Type = dt\n\t\t}\n\t}\n\n\t\/\/ 3. Remove header attributes\n\tbody := design.NewMappedAttributeExpr(result)\n\tremoveAttributes(body, headers)\n\n\t\/\/ 4. Return empty type if no attribute left\n\tif len(*design.AsObject(body.Type)) == 0 {\n\t\treturn &design.AttributeExpr{Type: design.Empty}\n\t}\n\n\t\/\/ 5. Build computed user type\n\tuserType := &design.UserTypeExpr{\n\t\tAttributeExpr: body.Attribute(),\n\t\tTypeName: name,\n\t}\n\tif isrt {\n\t\tviews := make([]*design.ViewExpr, len(rt.Views))\n\t\tfor i, v := range rt.Views {\n\t\t\tmv := design.NewMappedAttributeExpr(v.AttributeExpr)\n\t\t\tremoveAttributes(mv, headers)\n\t\t\tnv := &design.ViewExpr{\n\t\t\t\tAttributeExpr: mv.Attribute(),\n\t\t\t\tName: v.Name,\n\t\t\t}\n\t\t\tviews[i] = nv\n\t\t}\n\t\tnmt := &design.ResultTypeExpr{\n\t\t\tUserTypeExpr: userType,\n\t\t\tIdentifier: rt.Identifier,\n\t\t\tContentType: rt.ContentType,\n\t\t\tViews: views,\n\t\t}\n\t\tfor _, v := range views {\n\t\t\tv.Parent = nmt\n\t\t}\n\t\treturn &design.AttributeExpr{Type: nmt, Validation: userType.Validation}\n\t}\n\tappendSuffix(userType.Attribute().Type, \"ResponseBody\")\n\n\treturn &design.AttributeExpr{Type: userType, Validation: userType.Validation}\n}\n\n\/\/ ErrorResponseBody returns an attribute describing the response body of a\n\/\/ given error. If the DSL defines a body explicitly via the Body function then\n\/\/ the corresponding attribute is returned. Otherwise the attribute is computed\n\/\/ by removing the attributes of the error used to define headers and\n\/\/ parameters. Also if the error response defines a view then the result type is\n\/\/ projected first.\nfunc ErrorResponseBody(a *EndpointExpr, v *ErrorExpr) *design.AttributeExpr {\n\tresult := v.ErrorExpr.AttributeExpr\n\tif result == nil || result.Type == design.Empty {\n\t\treturn &design.AttributeExpr{Type: design.Empty}\n\t}\n\tresp := v.Response\n\tif resp.Body != nil {\n\t\treturn resp.Body\n\t}\n\n\tvar (\n\t\theaders = resp.MappedHeaders()\n\t\tsuffix = codegen.Goify(v.ErrorExpr.Name, true) + \"ResponseBody\"\n\t\tname = codegen.Goify(a.Name(), true) + suffix\n\t)\n\n\t\/\/ 1. If Result is not an object then check whether there are headers\n\t\/\/ defined and if so return empty type (result encoded in response\n\t\/\/ headers) otherwise return renamed result type (result encoded in\n\t\/\/ response body).\n\tif !design.IsObject(result.Type) {\n\t\tif len(*design.AsObject(resp.Headers().Type)) == 0 {\n\t\t\trenameType(result, name, suffix)\n\t\t\treturn result\n\t\t}\n\t\treturn &design.AttributeExpr{Type: design.Empty}\n\t}\n\n\t\/\/ 2. Project if errorResponse type is result type and attribute has a view.\n\trt, isrt := v.ErrorExpr.AttributeExpr.Type.(*design.ResultTypeExpr)\n\tif isrt {\n\t\tif v := result.Metadata[\"view\"]; len(v) > 0 {\n\t\t\tdt, err := design.Project(rt, v[0])\n\t\t\tif err != nil {\n\t\t\t\tpanic(err) \/\/ bug\n\t\t\t}\n\t\t\tresult = design.DupAtt(result)\n\t\t\tresult.Type = dt\n\t\t}\n\t}\n\n\t\/\/ 3. Remove header attributes\n\tbody := design.NewMappedAttributeExpr(result)\n\tremoveAttributes(body, headers)\n\n\t\/\/ 4. Return empty type if no attribute left\n\tif len(*design.AsObject(body.Type)) == 0 {\n\t\treturn &design.AttributeExpr{Type: design.Empty}\n\t}\n\n\t\/\/ 5. Build computed user type\n\tuserType := &design.UserTypeExpr{\n\t\tAttributeExpr: body.Attribute(),\n\t\tTypeName: name,\n\t}\n\tappendSuffix(userType.Attribute().Type, suffix)\n\n\tif !isrt {\n\t\treturn &design.AttributeExpr{Type: userType, Validation: userType.Validation}\n\t}\n\n\tviews := make([]*design.ViewExpr, len(rt.Views))\n\tfor i, v := range rt.Views {\n\t\tmv := design.NewMappedAttributeExpr(v.AttributeExpr)\n\t\tremoveAttributes(mv, headers)\n\t\tnv := &design.ViewExpr{\n\t\t\tAttributeExpr: mv.Attribute(),\n\t\t\tName: v.Name,\n\t\t}\n\t\tviews[i] = nv\n\t}\n\tnmt := &design.ResultTypeExpr{\n\t\tUserTypeExpr: userType,\n\t\tIdentifier: rt.Identifier,\n\t\tContentType: rt.ContentType,\n\t\tViews: views,\n\t}\n\tfor _, v := range views {\n\t\tv.Parent = nmt\n\t}\n\treturn &design.AttributeExpr{Type: nmt, Validation: userType.Validation}\n}\n\nfunc renameType(att *design.AttributeExpr, name, suffix string) {\n\trt := att.Type\n\tswitch rt.(type) {\n\tcase design.UserType:\n\t\trt = design.Dup(rt)\n\t\trt.(design.UserType).Rename(name)\n\t\tappendSuffix(rt.(design.UserType).Attribute().Type, suffix)\n\tcase *design.Object:\n\t\trt = design.Dup(rt)\n\t\tappendSuffix(rt, suffix)\n\tcase *design.Array:\n\t\trt = design.Dup(rt)\n\t\tappendSuffix(rt, suffix)\n\tcase *design.Map:\n\t\trt = design.Dup(rt)\n\t\tappendSuffix(rt, suffix)\n\t}\n\tatt.Type = rt\n}\n\nfunc appendSuffix(dt design.DataType, suffix string, seen ...map[string]struct{}) {\n\tswitch actual := dt.(type) {\n\tcase design.UserType:\n\t\tvar s map[string]struct{}\n\t\tif len(seen) > 0 {\n\t\t\ts = seen[0]\n\t\t} else {\n\t\t\ts = make(map[string]struct{})\n\t\t}\n\t\tif _, ok := s[actual.Name()]; ok {\n\t\t\treturn\n\t\t}\n\t\tactual.Rename(actual.Name() + suffix)\n\t\ts[actual.Name()] = struct{}{}\n\t\tappendSuffix(actual.Attribute().Type, suffix, s)\n\tcase *design.Object:\n\t\tfor _, nat := range *actual {\n\t\t\tappendSuffix(nat.Attribute.Type, suffix, seen...)\n\t\t}\n\tcase *design.Array:\n\t\tappendSuffix(actual.ElemType.Type, suffix, seen...)\n\tcase *design.Map:\n\t\tappendSuffix(actual.KeyType.Type, suffix, seen...)\n\t\tappendSuffix(actual.ElemType.Type, suffix, seen...)\n\t}\n}\n\nfunc removeAttributes(attr, sub *design.MappedAttributeExpr) {\n\tcodegen.WalkMappedAttr(sub, func(name, _ string, _ bool, _ *design.AttributeExpr) error {\n\t\tattr.Delete(name)\n\t\tif attr.Validation != nil {\n\t\t\tattr.Validation.RemoveRequired(name)\n\t\t}\n\t\tfor _, ex := range attr.UserExamples {\n\t\t\tif m, ok := ex.Value.(map[string]interface{}); ok {\n\t\t\t\tdelete(m, name)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package gocube\n\nimport (\n\t\"strconv\"\n)\n\n\/\/ A CubieCorner represents a physical corner of a cube.\n\/\/\n\/\/ To understand the meaning of a CubieCorner's fields, you must first\n\/\/ understand the coordinate system. There are there axes, x, y, and z.\n\/\/ The x axis is 0 at the L face and 1 at the R face.\n\/\/ The y axis is 0 at the D face and 1 at the U face.\n\/\/ The z axis is 0 at the B face and 1 at the F face.\n\/\/\n\/\/ A corner piece's index is determined by it's original position on the cube.\n\/\/ The index is a binary number of the form ZYX, where Z is the most significant\n\/\/ digit. Thus, the BLD corner is 0, the BRU corner is 3, the FRU corner is 7,\n\/\/ etc.\n\/\/\n\/\/ The orientation of a corner tells how it is twisted. It is an axis number 0,\n\/\/ 1, or 2 for x, y, or z respectively. It indicates the direction normal to the\n\/\/ red or orange sticker (i.e. the sticker that is usually normal to the x\n\/\/ axis).\ntype CubieCorner struct {\n\tPiece int\n\tOrientation int\n}\n\n\/\/ CubieCorners represents the corners of a cube.\ntype CubieCorners [8]CubieCorner\n\n\/\/ SolvedCubieCorners generates the corners of a solved cube.\nfunc SolvedCubieCorners() CubieCorners {\n\tvar res CubieCorners\n\tfor i := 0; i < 8; i++ {\n\t\tres[i].Piece = i\n\t}\n\treturn res\n}\n\n\/\/ HalfTurn performs a 180 degree turn on a given face.\nfunc (c *CubieCorners) HalfTurn(face int) {\n\t\/\/ A double turn is really just two swaps.\n\tswitch face {\n\tcase 1: \/\/ Top face\n\t\tc[2], c[7] = c[7], c[2]\n\t\tc[3], c[6] = c[6], c[3]\n\tcase 2: \/\/ Bottom face\n\t\tc[0], c[5] = c[5], c[0]\n\t\tc[1], c[4] = c[4], c[1]\n\tcase 3: \/\/ Front face\n\t\tc[5], c[6] = c[6], c[5]\n\t\tc[4], c[7] = c[7], c[4]\n\tcase 4: \/\/ Back face\n\t\tc[0], c[3] = c[3], c[0]\n\t\tc[1], c[2] = c[2], c[1]\n\tcase 5: \/\/ Right face\n\t\tc[1], c[7] = c[7], c[1]\n\t\tc[3], c[5] = c[5], c[3]\n\tcase 6: \/\/ Left face\n\t\tc[0], c[6] = c[6], c[0]\n\t\tc[2], c[4] = c[4], c[2]\n\tdefault:\n\t\tpanic(\"Unsupported half-turn applied to CubieCorners: \" +\n\t\t\tstrconv.Itoa(face))\n\t}\n}\n\n\/\/ Move applies a face turn to the corners.\nfunc (c *CubieCorners) Move(m Move) {\n\t\/\/ Half turns are a simple case.\n\tif m.Turns == 2 {\n\t\tc.HalfTurn(m.Face)\n\t} else {\n\t\tc.QuarterTurn(m.Face, m.Turns)\n\t}\n}\n\n\/\/ QuarterTurn performs a 90 degree turn on a given face.\nfunc (c *CubieCorners) QuarterTurn(face, turns int) {\n\t\/\/ This code is not particularly graceful, but it is rather efficient and\n\t\/\/ quite readable compared to a pure array of transformations.\n\tswitch face {\n\tcase 1: \/\/ Top face\n\t\tif turns == 1 {\n\t\t\tc[2], c[3], c[7], c[6] = c[6], c[2], c[3], c[7]\n\t\t} else {\n\t\t\tc[6], c[2], c[3], c[7] = c[2], c[3], c[7], c[6]\n\t\t}\n\t\t\/\/ Swap orientation 0 with orientation 2.\n\t\tfor _, i := range []int{2, 3, 6, 7} {\n\t\t\tc[i].Orientation = 2 - c[i].Orientation\n\t\t}\n\tcase 2: \/\/ Bottom face\n\t\tif turns == 1 {\n\t\t\tc[4], c[0], c[1], c[5] = c[0], c[1], c[5], c[4]\n\t\t} else {\n\t\t\tc[0], c[1], c[5], c[4] = c[4], c[0], c[1], c[5]\n\t\t}\n\t\t\/\/ Swap orientation 0 with orientation 2.\n\t\tfor _, i := range []int{0, 1, 4, 5} {\n\t\t\tc[i].Orientation = 2 - c[i].Orientation\n\t\t}\n\tcase 3: \/\/ Front face\n\t\tif turns == 1 {\n\t\t\tc[6], c[7], c[5], c[4] = c[4], c[6], c[7], c[5]\n\t\t} else {\n\t\t\tc[4], c[6], c[7], c[5] = c[6], c[7], c[5], c[4]\n\t\t}\n\t\t\/\/ Swap orientation 0 with orientation 1.\n\t\tfor _, i := range []int{4, 5, 6, 7} {\n\t\t\tif c[i].Orientation == 0 {\n\t\t\t\tc[i].Orientation = 1\n\t\t\t} else if c[i].Orientation == 1 {\n\t\t\t\tc[i].Orientation = 0\n\t\t\t}\n\t\t}\n\tcase 4: \/\/ Back face\n\t\tif turns == 1 {\n\t\t\tc[0], c[2], c[3], c[1] = c[2], c[3], c[1], c[0]\n\t\t} else {\n\t\t\tc[2], c[3], c[1], c[0] = c[0], c[2], c[3], c[1]\n\t\t}\n\t\t\/\/ Swap orientation 0 with orientation 1.\n\t\tfor _, i := range []int{0, 1, 2, 3} {\n\t\t\tif c[i].Orientation == 0 {\n\t\t\t\tc[i].Orientation = 1\n\t\t\t} else if c[i].Orientation == 1 {\n\t\t\t\tc[i].Orientation = 0\n\t\t\t}\n\t\t}\n\tcase 5: \/\/ Right face\n\t\tif turns == 1 {\n\t\t\tc[7], c[3], c[1], c[5] = c[5], c[7], c[3], c[1]\n\t\t} else {\n\t\t\tc[5], c[7], c[3], c[1] = c[7], c[3], c[1], c[5]\n\t\t}\n\t\t\/\/ Swap orientation 2 with orientation 1.\n\t\tfor _, i := range []int{1, 3, 5, 7} {\n\t\t\tif c[i].Orientation == 1 {\n\t\t\t\tc[i].Orientation = 2\n\t\t\t} else if c[i].Orientation == 2 {\n\t\t\t\tc[i].Orientation = 1\n\t\t\t}\n\t\t}\n\tcase 6: \/\/ Left face\n\t\tif turns == 1 {\n\t\t\tc[4], c[6], c[2], c[0] = c[6], c[2], c[0], c[4]\n\t\t} else {\n\t\t\tc[6], c[2], c[0], c[4] = c[4], c[6], c[2], c[0]\n\t\t}\n\t\t\/\/ Swap orientation 2 with orientation 1.\n\t\tfor _, i := range []int{0, 2, 4, 6} {\n\t\t\tif c[i].Orientation == 1 {\n\t\t\t\tc[i].Orientation = 2\n\t\t\t} else if c[i].Orientation == 2 {\n\t\t\t\tc[i].Orientation = 1\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tpanic(\"Unsupported quarter-turn applied to CubieCorners: \" +\n\t\t\tstrconv.Itoa(face))\n\t}\n}\n\n\/\/ Solved returns true if all the corners are properly positioned and oriented.\nfunc (c *CubieCorners) Solved() bool {\n\tfor i := 0; i < 8; i++ {\n\t\tif c[i].Piece != i || c[i].Orientation != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n<commit_msg>drafted CubieCorners.Search()<commit_after>package gocube\n\nimport (\n\t\"strconv\"\n\t\"sync\"\n)\n\n\/\/ A CornersGoal represents an abstract goal state for a depth-first search of\n\/\/ the cube's corners.\ntype CornersGoal interface {\n\tIsGoal(c CubieCorners) bool\n}\n\n\/\/ A CornersPruner is used as a lower-bound heuristic for a depth-first search\n\/\/ of the cube's corners.\ntype CornersPruner interface {\n\tMinMoves(c CubieCorners) int\n}\n\n\/\/ A CubieCorner represents a physical corner of a cube.\n\/\/\n\/\/ To understand the meaning of a CubieCorner's fields, you must first\n\/\/ understand the coordinate system. There are there axes, x, y, and z.\n\/\/ The x axis is 0 at the L face and 1 at the R face.\n\/\/ The y axis is 0 at the D face and 1 at the U face.\n\/\/ The z axis is 0 at the B face and 1 at the F face.\n\/\/\n\/\/ A corner piece's index is determined by it's original position on the cube.\n\/\/ The index is a binary number of the form ZYX, where Z is the most significant\n\/\/ digit. Thus, the BLD corner is 0, the BRU corner is 3, the FRU corner is 7,\n\/\/ etc.\n\/\/\n\/\/ The orientation of a corner tells how it is twisted. It is an axis number 0,\n\/\/ 1, or 2 for x, y, or z respectively. It indicates the direction normal to the\n\/\/ red or orange sticker (i.e. the sticker that is usually normal to the x\n\/\/ axis).\ntype CubieCorner struct {\n\tPiece int\n\tOrientation int\n}\n\n\/\/ CubieCorners represents the corners of a cube.\ntype CubieCorners [8]CubieCorner\n\n\/\/ SolvedCubieCorners generates the corners of a solved cube.\nfunc SolvedCubieCorners() CubieCorners {\n\tvar res CubieCorners\n\tfor i := 0; i < 8; i++ {\n\t\tres[i].Piece = i\n\t}\n\treturn res\n}\n\n\/\/ HalfTurn performs a 180 degree turn on a given face.\nfunc (c *CubieCorners) HalfTurn(face int) {\n\t\/\/ A double turn is really just two swaps.\n\tswitch face {\n\tcase 1: \/\/ Top face\n\t\tc[2], c[7] = c[7], c[2]\n\t\tc[3], c[6] = c[6], c[3]\n\tcase 2: \/\/ Bottom face\n\t\tc[0], c[5] = c[5], c[0]\n\t\tc[1], c[4] = c[4], c[1]\n\tcase 3: \/\/ Front face\n\t\tc[5], c[6] = c[6], c[5]\n\t\tc[4], c[7] = c[7], c[4]\n\tcase 4: \/\/ Back face\n\t\tc[0], c[3] = c[3], c[0]\n\t\tc[1], c[2] = c[2], c[1]\n\tcase 5: \/\/ Right face\n\t\tc[1], c[7] = c[7], c[1]\n\t\tc[3], c[5] = c[5], c[3]\n\tcase 6: \/\/ Left face\n\t\tc[0], c[6] = c[6], c[0]\n\t\tc[2], c[4] = c[4], c[2]\n\tdefault:\n\t\tpanic(\"Unsupported half-turn applied to CubieCorners: \" +\n\t\t\tstrconv.Itoa(face))\n\t}\n}\n\n\/\/ Move applies a face turn to the corners.\nfunc (c *CubieCorners) Move(m Move) {\n\t\/\/ Half turns are a simple case.\n\tif m.Turns == 2 {\n\t\tc.HalfTurn(m.Face)\n\t} else {\n\t\tc.QuarterTurn(m.Face, m.Turns)\n\t}\n}\n\n\/\/ QuarterTurn performs a 90 degree turn on a given face.\nfunc (c *CubieCorners) QuarterTurn(face, turns int) {\n\t\/\/ This code is not particularly graceful, but it is rather efficient and\n\t\/\/ quite readable compared to a pure array of transformations.\n\tswitch face {\n\tcase 1: \/\/ Top face\n\t\tif turns == 1 {\n\t\t\tc[2], c[3], c[7], c[6] = c[6], c[2], c[3], c[7]\n\t\t} else {\n\t\t\tc[6], c[2], c[3], c[7] = c[2], c[3], c[7], c[6]\n\t\t}\n\t\t\/\/ Swap orientation 0 with orientation 2.\n\t\tfor _, i := range []int{2, 3, 6, 7} {\n\t\t\tc[i].Orientation = 2 - c[i].Orientation\n\t\t}\n\tcase 2: \/\/ Bottom face\n\t\tif turns == 1 {\n\t\t\tc[4], c[0], c[1], c[5] = c[0], c[1], c[5], c[4]\n\t\t} else {\n\t\t\tc[0], c[1], c[5], c[4] = c[4], c[0], c[1], c[5]\n\t\t}\n\t\t\/\/ Swap orientation 0 with orientation 2.\n\t\tfor _, i := range []int{0, 1, 4, 5} {\n\t\t\tc[i].Orientation = 2 - c[i].Orientation\n\t\t}\n\tcase 3: \/\/ Front face\n\t\tif turns == 1 {\n\t\t\tc[6], c[7], c[5], c[4] = c[4], c[6], c[7], c[5]\n\t\t} else {\n\t\t\tc[4], c[6], c[7], c[5] = c[6], c[7], c[5], c[4]\n\t\t}\n\t\t\/\/ Swap orientation 0 with orientation 1.\n\t\tfor _, i := range []int{4, 5, 6, 7} {\n\t\t\tif c[i].Orientation == 0 {\n\t\t\t\tc[i].Orientation = 1\n\t\t\t} else if c[i].Orientation == 1 {\n\t\t\t\tc[i].Orientation = 0\n\t\t\t}\n\t\t}\n\tcase 4: \/\/ Back face\n\t\tif turns == 1 {\n\t\t\tc[0], c[2], c[3], c[1] = c[2], c[3], c[1], c[0]\n\t\t} else {\n\t\t\tc[2], c[3], c[1], c[0] = c[0], c[2], c[3], c[1]\n\t\t}\n\t\t\/\/ Swap orientation 0 with orientation 1.\n\t\tfor _, i := range []int{0, 1, 2, 3} {\n\t\t\tif c[i].Orientation == 0 {\n\t\t\t\tc[i].Orientation = 1\n\t\t\t} else if c[i].Orientation == 1 {\n\t\t\t\tc[i].Orientation = 0\n\t\t\t}\n\t\t}\n\tcase 5: \/\/ Right face\n\t\tif turns == 1 {\n\t\t\tc[7], c[3], c[1], c[5] = c[5], c[7], c[3], c[1]\n\t\t} else {\n\t\t\tc[5], c[7], c[3], c[1] = c[7], c[3], c[1], c[5]\n\t\t}\n\t\t\/\/ Swap orientation 2 with orientation 1.\n\t\tfor _, i := range []int{1, 3, 5, 7} {\n\t\t\tif c[i].Orientation == 1 {\n\t\t\t\tc[i].Orientation = 2\n\t\t\t} else if c[i].Orientation == 2 {\n\t\t\t\tc[i].Orientation = 1\n\t\t\t}\n\t\t}\n\tcase 6: \/\/ Left face\n\t\tif turns == 1 {\n\t\t\tc[4], c[6], c[2], c[0] = c[6], c[2], c[0], c[4]\n\t\t} else {\n\t\t\tc[6], c[2], c[0], c[4] = c[4], c[6], c[2], c[0]\n\t\t}\n\t\t\/\/ Swap orientation 2 with orientation 1.\n\t\tfor _, i := range []int{0, 2, 4, 6} {\n\t\t\tif c[i].Orientation == 1 {\n\t\t\t\tc[i].Orientation = 2\n\t\t\t} else if c[i].Orientation == 2 {\n\t\t\t\tc[i].Orientation = 1\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tpanic(\"Unsupported quarter-turn applied to CubieCorners: \" +\n\t\t\tstrconv.Itoa(face))\n\t}\n}\n\n\/\/ Search starts a search using the receiver as the starting state.\n\/\/ If the specified CornersPruner is nil, no pruning will be performed.\n\/\/ The depth argument specifies the maximum depth for the search.\n\/\/ The branch argument specifies how many levels of the search to parallelize.\nfunc (c *CubieCorners) Search(g CornersGoal, p CornersPruner, moves []Move,\n\tdepth, branch int) Search {\n\tres := &cornersSearch{newSimpleSearch(moves), g, p}\n\tgo func(st CubieCorners) {\n\t\tprefix := make([]Move, 0, depth)\n\t\tsearchCornersBranch(st, res, depth, branch, prefix)\n\t\tclose(res.channel)\n\t}(*c)\n\treturn res\n}\n\n\/\/ Solved returns true if all the corners are properly positioned and oriented.\nfunc (c *CubieCorners) Solved() bool {\n\tfor i := 0; i < 8; i++ {\n\t\tif c[i].Piece != i || c[i].Orientation != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ A SolveCornersGoal is satisfied when a CubieCorners is completely solved.\ntype SolveCornersGoal struct{}\n\n\/\/ IsGoal returns corners.Solved().\nfunc (_ SolveCornersGoal) IsGoal(corners CubieCorners) bool {\n\treturn corners.Solved()\n}\n\nfunc searchCorners(st CubieCorners, s *cornersSearch, depth int,\n\tprefix []Move) {\n\t\/\/ If we can't search any further, check if it's the goal.\n\tif depth == 0 {\n\t\tif s.goal.IsGoal(st) {\n\t\t\t\/\/ We must make a copy of the prefix before sending it as a\n\t\t\t\/\/ solution, since it may be modified after we return.\n\t\t\tsolution := make([]Move, len(prefix))\n\t\t\tcopy(solution, prefix)\n\t\t\ts.channel <- solution\n\t\t}\n\t\treturn\n\t}\n\n\t\/\/ Prune the state\n\tif s.prune(st) > depth {\n\t\treturn\n\t}\n\n\t\/\/ Apply each move and recurse.\n\tfor _, move := range s.moves {\n\t\tif depth > 5 && s.cancelled() {\n\t\t\treturn\n\t\t}\n\t\tnewState := st\n\t\tnewState.Move(move)\n\t\tsearchCorners(newState, s, depth-1, append(prefix, move))\n\t}\n}\n\nfunc searchCornersBranch(st CubieCorners, s *cornersSearch, depth, branch int,\n\tprefix []Move) {\n\t\/\/ If we shouldn't branch, do a regular search.\n\tif branch == 0 || depth == 0 {\n\t\tsearchCorners(st, s, depth, prefix)\n\t\treturn\n\t}\n\n\t\/\/ Prune the state\n\tif s.prune(st) > depth {\n\t\treturn\n\t}\n\n\t\/\/ Run each search on a different goroutine\n\twg := sync.WaitGroup{}\n\tfor _, move := range s.moves {\n\t\twg.Add(1)\n\t\tgo func(m Move, newState CubieCorners) {\n\t\t\t\/\/ Apply the move\n\t\t\tnewState.Move(m)\n\n\t\t\t\/\/ Create the new prefix by copying the old one.\n\t\t\tpref := make([]Move, len(prefix)+1, len(prefix)+depth)\n\t\t\tcopy(pref, prefix)\n\t\t\tpref[len(prefix)] = m\n\n\t\t\t\/\/ Branch out and search.\n\t\t\tsearchCornersBranch(newState, s, depth-1, branch-1, pref)\n\n\t\t\twg.Done()\n\t\t}(move, st)\n\t}\n\twg.Wait()\n}\n\ntype cornersSearch struct {\n\t*simpleSearch\n\tgoal CornersGoal\n\tpruner CornersPruner\n}\n\nfunc (c *cornersSearch) prune(state CubieCorners) int {\n\tif c.pruner == nil {\n\t\treturn 0\n\t}\n\treturn c.pruner.MinMoves(state)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"flag\"\n \"fmt\"\n \"log\"\n \"os\"\n \"math\/big\"\n \"time\"\n\n \"github.com\/tooru\/pnbot\/prime\"\n \"github.com\/dghubble\/go-twitter\/twitter\"\n \"github.com\/dghubble\/oauth1\"\n)\n\nconst (\n queueSize = 10\n maxRetry = 10\n)\n\nfunc main() {\n log.SetFlags(log.Ldate | log.Ltime | log.Llongfile)\n\n flags := flag.NewFlagSet(\"pnbot\", flag.ExitOnError)\n consumerKey := flags.String(\"ck\", \"\", \"Twitter Consumer Key\")\n consumerSecret := flags.String(\"cs\", \"\", \"Twitter Consumer Secret\")\n accessToken := flags.String(\"at\", \"\", \"Twitter Access Token\")\n accessSecret := flags.String(\"as\", \"\", \"Twitter Access Secret\")\n flags.Parse(os.Args[1:])\n\n if *consumerKey == \"\" || *consumerSecret == \"\" || *accessToken == \"\" || *accessSecret == \"\" {\n log.Fatal(\"Consumer key\/secret and Access token\/secret required\")\n }\n\n pnbot := NewPNBot(consumerKey, consumerSecret, accessToken, accessSecret)\n pnbot.Start()\n}\n\nfunc NewPNBot(consumerKey *string, consumerSecret *string,\n accessToken *string, accessSecret *string) *PNBot {\n pnbot := &PNBot{\n consumerKey: consumerKey,\n consumerSecret: consumerSecret,\n accessToken: accessToken,\n accessSecret: accessSecret,\n prime: prime.NewPrime(),\n ch: make(chan *big.Int, queueSize),\n }\n\n return pnbot\n}\n\ntype PNBot struct {\n consumerKey *string\n consumerSecret *string\n accessToken *string\n accessSecret *string\n\n prime *prime.Prime\n ch chan *big.Int\n\n client *twitter.Client\n\n}\n\nfunc (pnbot *PNBot) Start() error {\n pnbot.client = pnbot.newClient()\n\n maxPrime, err := pnbot.getMaxPrime()\n if err != nil {\n log.Printf(\"getMaxPrime: %v\\n\", err)\n return err\n }\n\n go pnbot.makePrimes(maxPrime)\n\n return pnbot.tweetPrimes()\n}\n\nfunc (pnbot *PNBot) newClient() *twitter.Client {\n config := oauth1.NewConfig(*pnbot.consumerKey, *pnbot.consumerSecret)\n token := oauth1.NewToken(*pnbot.accessToken, *pnbot.accessSecret)\n httpClient := config.Client(oauth1.NoContext, token)\n\n return twitter.NewClient(httpClient)\n}\n\nfunc (pnbot *PNBot) getMaxPrime() (prime *big.Int, err error) {\n tweets, _, err := pnbot.client.Timelines.HomeTimeline(&twitter.HomeTimelineParams{\n SinceID: 0,\n })\n if err != nil {\n return nil, err\n }\n\n var maxPrime big.Int\n\n for _, tweet := range tweets {\n if _, ok := maxPrime.SetString(tweet.Text, 10); ok {\n log.Printf(\"continue from '%s'\", tweet.Text)\n return &maxPrime, nil\n }\n log.Printf(\"skip '%v'\", tweet.Text)\n }\n maxPrime.SetInt64(0)\n return &maxPrime, nil\n}\n\nfunc (pnbot *PNBot) makePrimes(maxPrime *big.Int) {\n var prime *big.Int\n var err error\n\n for {\n prime, err = pnbot.prime.Next()\n if err != nil {\n log.Fatalf(\"makePrimes: %v\\n\", err)\n pnbot.ch <- nil\n return\n } \n\n if prime.Cmp(maxPrime) > 0 {\n break;\n }\n log.Printf(\"makePrimes: skip %v\\n\", prime)\n }\n\n for {\n log.Printf(\"makePrimes: found %v\\n\", prime)\n pnbot.ch <- prime\n\n prime, err = pnbot.prime.Next()\n if err != nil {\n log.Fatalf(\"makePrimes: %v\\n\", err)\n pnbot.ch <- nil\n return\n } \n }\n}\n\nfunc (pnbot *PNBot) tweetPrimes() error {\n interval := 1 * time.Second\n\n for {\n prime := <- pnbot.ch\n if prime == nil {\n log.Fatalf(\"prime generator was died\")\n return fmt.Errorf(\"prime generator was died\")\n }\n\n text := prime.Text(10)\n retry := 0\n retryInterval := 10 * time.Second\n for {\n _, _, err := pnbot.client.Statuses.Update(text, nil)\n if err == nil {\n break\n }\n if retry >= maxRetry {\n return fmt.Errorf(\"Too many tweet error: %v\\n\", err)\n }\n log.Printf(\"Tweet error[%d\/%d]:sleep=%s: %v\\n\", retry+1, maxRetry, retryInterval, err)\n\n time.Sleep(retryInterval)\n pnbot.client = pnbot.newClient()\n retry++\n retryInterval *= 2\n continue\n }\n if retry > 0 {\n interval += 1 * time.Second\n }\n\n log.Printf(\"tweet %s :sleep=%s\\n\", text, interval)\n time.Sleep(interval)\n }\n}\n<commit_msg>Modified pnbot.go.<commit_after>package main\n\nimport (\n \"flag\"\n \"fmt\"\n \"log\"\n \"os\"\n \"math\/big\"\n \"time\"\n\n \"github.com\/tooru\/pnbot\/prime\"\n \"github.com\/dghubble\/go-twitter\/twitter\"\n \"github.com\/dghubble\/oauth1\"\n)\n\nconst (\n queueSize = 10\n maxRetry = 10\n)\n\nfunc main() {\n log.SetFlags(log.Ldate | log.Ltime | log.Llongfile)\n\n flags := flag.NewFlagSet(\"pnbot\", flag.ExitOnError)\n consumerKey := flags.String(\"ck\", \"\", \"Twitter Consumer Key\")\n consumerSecret := flags.String(\"cs\", \"\", \"Twitter Consumer Secret\")\n accessToken := flags.String(\"at\", \"\", \"Twitter Access Token\")\n accessSecret := flags.String(\"as\", \"\", \"Twitter Access Secret\")\n flags.Parse(os.Args[1:])\n\n if *consumerKey == \"\" || *consumerSecret == \"\" || *accessToken == \"\" || *accessSecret == \"\" {\n log.Fatal(\"Consumer key\/secret and Access token\/secret required\")\n }\n\n pnbot := NewPNBot(consumerKey, consumerSecret, accessToken, accessSecret)\n pnbot.Start()\n}\n\nfunc NewPNBot(consumerKey *string, consumerSecret *string,\n accessToken *string, accessSecret *string) *PNBot {\n pnbot := &PNBot{\n consumerKey: consumerKey,\n consumerSecret: consumerSecret,\n accessToken: accessToken,\n accessSecret: accessSecret,\n prime: prime.NewPrime(),\n ch: make(chan *big.Int, queueSize),\n }\n\n return pnbot\n}\n\ntype PNBot struct {\n consumerKey *string\n consumerSecret *string\n accessToken *string\n accessSecret *string\n\n prime *prime.Prime\n ch chan *big.Int\n\n client *twitter.Client\n\n}\n\nfunc (pnbot *PNBot) Start() error {\n pnbot.client = pnbot.newClient()\n\n maxPrime, err := pnbot.getMaxPrime()\n if err != nil {\n log.Printf(\"getMaxPrime: %v\\n\", err)\n return err\n }\n\n go pnbot.makePrimes(maxPrime)\n\n return pnbot.tweetPrimes()\n}\n\nfunc (pnbot *PNBot) newClient() *twitter.Client {\n config := oauth1.NewConfig(*pnbot.consumerKey, *pnbot.consumerSecret)\n token := oauth1.NewToken(*pnbot.accessToken, *pnbot.accessSecret)\n httpClient := config.Client(oauth1.NoContext, token)\n\n return twitter.NewClient(httpClient)\n}\n\nfunc (pnbot *PNBot) getMaxPrime() (prime *big.Int, err error) {\n tweets, _, err := pnbot.client.Timelines.HomeTimeline(&twitter.HomeTimelineParams{\n SinceID: 0,\n })\n if err != nil {\n return nil, err\n }\n\n var maxPrime big.Int\n\n for _, tweet := range tweets {\n if _, ok := maxPrime.SetString(tweet.Text, 10); ok {\n log.Printf(\"continue from '%s'\", tweet.Text)\n return &maxPrime, nil\n }\n log.Printf(\"skip '%v'\", tweet.Text)\n }\n maxPrime.SetInt64(0)\n return &maxPrime, nil\n}\n\nfunc (pnbot *PNBot) makePrimes(maxPrime *big.Int) {\n var prime *big.Int\n var err error\n\n for {\n prime, err = pnbot.prime.Next()\n if err != nil {\n log.Fatalf(\"makePrimes: %v\\n\", err)\n pnbot.ch <- nil\n return\n } \n\n if prime.Cmp(maxPrime) > 0 {\n break;\n }\n log.Printf(\"makePrimes: skip %v\\n\", prime)\n }\n\n for {\n log.Printf(\"makePrimes: found %v\\n\", prime)\n pnbot.ch <- prime\n\n prime, err = pnbot.prime.Next()\n if err != nil {\n log.Fatalf(\"makePrimes: %v\\n\", err)\n pnbot.ch <- nil\n return\n } \n }\n}\n\nfunc (pnbot *PNBot) tweetPrimes() error {\n interval := 1 * time.Second\n\n for {\n prime := <- pnbot.ch\n if prime == nil {\n log.Fatalf(\"prime generator was died\")\n return fmt.Errorf(\"prime generator was died\")\n }\n\n text := prime.Text(10)\n retry := 0\n retryInterval := 10 * time.Second\n for {\n _, _, err := pnbot.client.Statuses.Update(text, nil)\n if err == nil {\n break\n }\n if retry >= maxRetry {\n return fmt.Errorf(\"Too many tweet error: %v\\n\", err)\n }\n log.Printf(\"Tweet error[%d\/%d]:sleep=%s: %v\\n\", retry+1, maxRetry, retryInterval, err)\n\n time.Sleep(retryInterval)\n pnbot.client = pnbot.newClient()\n retry++\n retryInterval = max(retryInterval * 2, 30 * time.Minute)\n continue\n }\n if retry > 0 {\n interval += 1 * time.Second\n }\n\n log.Printf(\"tweet %s :sleep=%s\\n\", text, interval)\n time.Sleep(interval)\n }\n}\n\nfunc max(a time.Duration, b time.Duration) time.Duration {\n if a > b {\n return b\n } else {\n return a\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package bncDataStoreBuntdb\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/goshuirc\/bnc\/lib\"\n\t\"github.com\/tidwall\/buntdb\"\n)\n\ntype DataStore struct {\n\tircbnc.DataStoreInterface\n\tDb *buntdb.DB\n\tManager *ircbnc.Manager\n\tsalt []byte\n}\n\nfunc (ds *DataStore) Init(manager *ircbnc.Manager) error {\n\tds.Manager = manager\n\n\tvar err error\n\n\tdbPath, _ := manager.Config.Bouncer.Storage[\"database\"]\n\tif dbPath == \"\" {\n\t\treturn errors.New(\"No database file has been configured\")\n\t}\n\n\tdb, err := buntdb.Open(dbPath)\n\tif err != nil {\n\t\treturn errors.New(\"Could not open database: \" + err.Error())\n\t}\n\n\tds.Db = db\n\n\terr = ds.LoadSalt()\n\tif err != nil {\n\t\treturn errors.New(\"Could not initialize database: \" + err.Error())\n\t}\n\treturn nil\n}\n\nfunc (ds *DataStore) Setup() error {\n\t\/\/ generate bouncer salt\n\tbncSalt := NewSalt()\n\tencodedBncSalt := base64.StdEncoding.EncodeToString(bncSalt)\n\terr := ds.Db.Update(func(tx *buntdb.Tx) error {\n\t\ttx.Set(KeySalt, encodedBncSalt, nil)\n\t\treturn nil\n\t})\n\n\tds.salt = bncSalt\n\n\treturn err\n}\n\nfunc (ds *DataStore) LoadSalt() error {\n\terr := ds.Db.View(func(tx *buntdb.Tx) error {\n\t\tsaltString, err := tx.Get(KeySalt)\n\t\tif err != nil && err.Error() == \"not found\" {\n\t\t\treturn nil\n\t\t} else if err != nil {\n\t\t\treturn fmt.Errorf(\"Could not get salt string: %s\", err.Error())\n\t\t}\n\n\t\tds.salt, err = base64.StdEncoding.DecodeString(saltString)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Could not decode b64'd salt: %s\", err.Error())\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn err\n}\n\nfunc (ds *DataStore) GetAllUsers() []*ircbnc.User {\n\tuserIds := []string{}\n\tusers := []*ircbnc.User{}\n\n\tds.Db.View(func(tx *buntdb.Tx) error {\n\t\ttx.DescendKeys(\"user.info *\", func(key, value string) bool {\n\t\t\tuserIds = append(userIds, strings.TrimPrefix(key, \"user.info \"))\n\t\t\treturn true\n\t\t})\n\n\t\t\/\/ Iterate through the user IDs and generate our user objects\n\t\tfor _, userId := range userIds {\n\t\t\tuser, err := ds.loadUser(tx, userId)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Error loading user \" + userId)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tusers = append(users, user)\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn users\n}\n\nfunc (ds *DataStore) GetUserById(id string) *ircbnc.User {\n\tvar user *ircbnc.User\n\tds.Db.View(func(tx *buntdb.Tx) error {\n\t\tuser, _ = ds.loadUser(tx, id)\n\t\treturn nil\n\t})\n\n\treturn user\n}\n\nfunc (ds *DataStore) GetUserByUsername(username string) *ircbnc.User {\n\t\/\/ Since the IDs are just the usernames we can simply forward this on\n\treturn ds.GetUserById(strings.ToLower(username))\n}\n\nfunc (ds *DataStore) SaveUser(user *ircbnc.User) error {\n\t\/\/ TODO: If ID isn't set, set the ID now.\n\t\/\/ An ID set = the user object was saved or retrieved from the db\n\tui := UserInfo{}\n\tui.Name = user.Name\n\tui.Role = user.Role\n\tui.EncodedSalt = base64.StdEncoding.EncodeToString(user.Salt)\n\tui.EncodedPasswordHash = base64.StdEncoding.EncodeToString(user.HashedPassword)\n\tui.DefaultNick = user.DefaultNick\n\tui.DefaultNickFallback = user.DefaultFbNick\n\tui.DefaultUsername = user.DefaultUser\n\tui.DefaultRealname = user.DefaultReal\n\n\t\/\/ Just use the username as the ID\n\tif user.ID == \"\" {\n\t\tui.ID = strings.ToLower(user.Name)\n\t}\n\n\tuiBytes, err := json.Marshal(ui)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error marshalling user info: %s\", err.Error())\n\t}\n\tuiString := string(uiBytes) \/\/TODO(dan): Should we do this in a safer way?\n\n\t\/\/ User permissions\n\tupBytes, err := json.Marshal(user.Permissions)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error marshalling user permissions: %s\", err.Error())\n\t}\n\tupString := string(upBytes) \/\/TODO(dan): Should we do this in a safer way?\n\n\tupdateErr := ds.Db.Update(func(tx *buntdb.Tx) error {\n\t\tvar err error\n\t\t_, _, err = tx.Set(fmt.Sprintf(KeyUserInfo, ui.ID), uiString, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, _, err = tx.Set(fmt.Sprintf(KeyUserPermissions, ui.ID), upString, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Make sure the User instance has the uptodate ID\n\t\tuser.ID = ui.ID\n\t\treturn nil\n\t})\n\n\treturn updateErr\n}\n\nfunc (ds *DataStore) AuthUser(username string, password string) (string, bool) {\n\tuser := ds.GetUserByUsername(username)\n\tif user == nil {\n\t\treturn \"\", false\n\t}\n\n\tpassMatches := CompareHashAndPassword(user.HashedPassword, ds.salt, user.Salt, password)\n\tif !passMatches {\n\t\treturn \"\", false\n\t}\n\n\treturn user.ID, true\n}\n\nfunc (ds *DataStore) SetUserPassword(user *ircbnc.User, newPassword string) {\n\tuserSalt := NewSalt()\n\tpassHash, _ := GenerateFromPassword(ds.salt, userSalt, newPassword)\n\n\tuser.Salt = userSalt\n\tuser.HashedPassword = passHash\n}\n\nfunc (ds *DataStore) GetUserNetworks(userId string) {\n\t\/\/ TODO: Return a slice of network objects of some kind\n}\n\nfunc (ds *DataStore) SaveConnection(connection *ircbnc.ServerConnection) error {\n\t\/\/ Store server info\n\tsc := ServerConnectionMapping{\n\t\tName: connection.Name,\n\t\tEnabled: true,\n\t\tConnectPassword: connection.Password,\n\t\tNickname: connection.Nickname,\n\t\tNicknameFallback: connection.FbNickname,\n\t\tUsername: connection.Username,\n\t\tRealname: connection.Realname,\n\t}\n\tscBytes, err := json.Marshal(sc)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error marshalling user info: %s\", err.Error())\n\t}\n\tscString := string(scBytes) \/\/TODO(dan): Should we do this in a safer way?\n\n\t\/\/ Store server addresses\n\tsaBytes, err := json.Marshal(connection.Addresses)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error marshalling user permissions: %s\", err.Error())\n\t}\n\tsaString := string(saBytes) \/\/TODO(dan): Should we do this in a safer way?\n\n\t\/\/ Store server channels (Convert the string map to a slice)\n\tscChannels := []ircbnc.ServerConnectionChannel{}\n\tfor _, channel := range connection.Channels {\n\t\tscChannels = append(scChannels, channel)\n\t}\n\tscChanBytes, err := json.Marshal(scChannels)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error marshalling user permissions: %s\", err.Error())\n\t}\n\tscChanString := string(scChanBytes) \/\/TODO(dan): Should we do this in a safer way?\n\n\tsaveErr := ds.Db.Update(func(tx *buntdb.Tx) error {\n\t\tvar err error\n\t\t_, _, err = tx.Set(fmt.Sprintf(KeyServerConnectionInfo, connection.User.ID, connection.Name), scString, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, _, err = tx.Set(fmt.Sprintf(KeyServerConnectionAddresses, connection.User.ID, connection.Name), saString, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, _, err = tx.Set(fmt.Sprintf(KeyServerConnectionChannels, connection.User.ID, connection.Name), scChanString, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn saveErr\n}\n\nfunc (ds *DataStore) loadUser(tx *buntdb.Tx, userId string) (*ircbnc.User, error) {\n\tuser := ircbnc.NewUser(ds.Manager)\n\n\tuser.ID = userId\n\tuser.Name = userId \/\/TODO(dan): Store Name and ID separately in the future if we want to\n\n\tinfoString, err := tx.Get(fmt.Sprintf(KeyUserInfo, userId))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not load user (loading user info from db): %s\", err.Error())\n\t}\n\tui := &UserInfo{}\n\terr = json.Unmarshal([]byte(infoString), ui)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not load user (unmarshalling user info from db): %s\", err.Error())\n\t}\n\n\tuser.Salt, err = base64.StdEncoding.DecodeString(ui.EncodedSalt)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not load user (decoding salt): %s\", err.Error())\n\t}\n\n\t\/\/TODO(dan): Make the below both have the same named fields\n\tuser.HashedPassword, err = base64.StdEncoding.DecodeString(ui.EncodedPasswordHash)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not load user (decoding password): %s\", err.Error())\n\t}\n\tuser.DefaultNick = ui.DefaultNick\n\tuser.DefaultFbNick = ui.DefaultNickFallback\n\tuser.DefaultUser = ui.DefaultUsername\n\tuser.DefaultReal = ui.DefaultRealname\n\n\tds.loadUserConnections(user)\n\n\treturn user, nil\n}\n\nfunc (ds *DataStore) loadUserConnections(user *ircbnc.User) {\n\tds.Db.View(func(tx *buntdb.Tx) error {\n\t\ttx.DescendKeys(fmt.Sprintf(\"user.server.info %s *\", user.ID), func(key, value string) bool {\n\t\t\tname := strings.TrimPrefix(key, fmt.Sprintf(\"user.server.info %s \", user.ID))\n\n\t\t\tsc, err := loadServerConnection(name, user, tx)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Could not load user network: %s\", err.Error())\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tuser.Networks[name] = sc\n\n\t\t\treturn true\n\t\t})\n\n\t\treturn nil\n\t})\n}\n\n\/\/ LoadServerConnection loads the given server connection from our database.\nfunc loadServerConnection(name string, user *ircbnc.User, tx *buntdb.Tx) (*ircbnc.ServerConnection, error) {\n\tsc := ircbnc.NewServerConnection()\n\tsc.Name = name\n\tsc.User = user\n\n\t\/\/ load general info\n\tscInfo := &ServerConnectionMapping{}\n\tscInfoString, err := tx.Get(fmt.Sprintf(KeyServerConnectionInfo, user.ID, name))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not create new ServerConnection (getting sc details from db): %s\", err.Error())\n\t}\n\n\terr = json.Unmarshal([]byte(scInfoString), scInfo)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not create new ServerConnection (unmarshalling sc details): %s\", err.Error())\n\t}\n\n\tsc.Nickname = scInfo.Nickname\n\tsc.FbNickname = scInfo.NicknameFallback\n\tsc.Username = scInfo.Username\n\tsc.Realname = scInfo.Realname\n\tsc.Password = scInfo.ConnectPassword\n\n\t\/\/ set default values\n\tif sc.Nickname == \"\" {\n\t\tsc.Nickname = user.DefaultNick\n\t}\n\tif sc.FbNickname == \"\" {\n\t\tsc.FbNickname = user.DefaultFbNick\n\t}\n\tif sc.Username == \"\" {\n\t\tsc.Username = user.DefaultUser\n\t}\n\tif sc.Realname == \"\" {\n\t\tsc.Realname = user.DefaultReal\n\t}\n\n\t\/\/ load channels\n\tscChannelString, err := tx.Get(fmt.Sprintf(KeyServerConnectionChannels, user.ID, name))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not create new ServerConnection (getting sc channels from db): %s\", err.Error())\n\t}\n\n\tscChans := &[]ServerConnectionChannelMapping{}\n\terr = json.Unmarshal([]byte(scChannelString), scChans)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not create new ServerConnection (unmarshalling sc channels): %s\", err.Error())\n\t}\n\n\tsc.Channels = make(map[string]ircbnc.ServerConnectionChannel)\n\tfor _, channel := range *scChans {\n\t\tsc.Channels[channel.Name] = ircbnc.ServerConnectionChannel{\n\t\t\tName: channel.Name,\n\t\t\tKey: channel.Key,\n\t\t\tUseKey: channel.UseKey,\n\t\t}\n\n\t}\n\n\t\/\/ load addresses\n\tscAddressesString, err := tx.Get(fmt.Sprintf(KeyServerConnectionAddresses, user.ID, name))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not create new ServerConnection (getting sc addresses from db): %s\", err.Error())\n\t}\n\n\tscAddresses := &[]ServerConnectionAddressMapping{}\n\terr = json.Unmarshal([]byte(scAddressesString), scAddresses)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not create new ServerConnection (unmarshalling sc addresses): %s\", err.Error())\n\t}\n\n\t\/\/ check port number and add addresses\n\tfor _, address := range *scAddresses {\n\t\tif address.Port < 1 || address.Port > 65535 {\n\t\t\treturn nil, fmt.Errorf(\"Could not create new ServerConnection (port %d is not valid)\", address.Port)\n\t\t}\n\n\t\tsc.Addresses = append(sc.Addresses, ircbnc.ServerConnectionAddress(address))\n\t}\n\n\treturn sc, nil\n}\n<commit_msg>Saving connection addresses ccorrectly<commit_after>package bncDataStoreBuntdb\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/goshuirc\/bnc\/lib\"\n\t\"github.com\/tidwall\/buntdb\"\n)\n\ntype DataStore struct {\n\tircbnc.DataStoreInterface\n\tDb *buntdb.DB\n\tManager *ircbnc.Manager\n\tsalt []byte\n}\n\nfunc (ds *DataStore) Init(manager *ircbnc.Manager) error {\n\tds.Manager = manager\n\n\tvar err error\n\n\tdbPath, _ := manager.Config.Bouncer.Storage[\"database\"]\n\tif dbPath == \"\" {\n\t\treturn errors.New(\"No database file has been configured\")\n\t}\n\n\tdb, err := buntdb.Open(dbPath)\n\tif err != nil {\n\t\treturn errors.New(\"Could not open database: \" + err.Error())\n\t}\n\n\tds.Db = db\n\n\terr = ds.LoadSalt()\n\tif err != nil {\n\t\treturn errors.New(\"Could not initialize database: \" + err.Error())\n\t}\n\treturn nil\n}\n\nfunc (ds *DataStore) Setup() error {\n\t\/\/ generate bouncer salt\n\tbncSalt := NewSalt()\n\tencodedBncSalt := base64.StdEncoding.EncodeToString(bncSalt)\n\terr := ds.Db.Update(func(tx *buntdb.Tx) error {\n\t\ttx.Set(KeySalt, encodedBncSalt, nil)\n\t\treturn nil\n\t})\n\n\tds.salt = bncSalt\n\n\treturn err\n}\n\nfunc (ds *DataStore) LoadSalt() error {\n\terr := ds.Db.View(func(tx *buntdb.Tx) error {\n\t\tsaltString, err := tx.Get(KeySalt)\n\t\tif err != nil && err.Error() == \"not found\" {\n\t\t\treturn nil\n\t\t} else if err != nil {\n\t\t\treturn fmt.Errorf(\"Could not get salt string: %s\", err.Error())\n\t\t}\n\n\t\tds.salt, err = base64.StdEncoding.DecodeString(saltString)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Could not decode b64'd salt: %s\", err.Error())\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn err\n}\n\nfunc (ds *DataStore) GetAllUsers() []*ircbnc.User {\n\tuserIds := []string{}\n\tusers := []*ircbnc.User{}\n\n\tds.Db.View(func(tx *buntdb.Tx) error {\n\t\ttx.DescendKeys(\"user.info *\", func(key, value string) bool {\n\t\t\tuserIds = append(userIds, strings.TrimPrefix(key, \"user.info \"))\n\t\t\treturn true\n\t\t})\n\n\t\t\/\/ Iterate through the user IDs and generate our user objects\n\t\tfor _, userId := range userIds {\n\t\t\tuser, err := ds.loadUser(tx, userId)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Error loading user \" + userId)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tusers = append(users, user)\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn users\n}\n\nfunc (ds *DataStore) GetUserById(id string) *ircbnc.User {\n\tvar user *ircbnc.User\n\tds.Db.View(func(tx *buntdb.Tx) error {\n\t\tuser, _ = ds.loadUser(tx, id)\n\t\treturn nil\n\t})\n\n\treturn user\n}\n\nfunc (ds *DataStore) GetUserByUsername(username string) *ircbnc.User {\n\t\/\/ Since the IDs are just the usernames we can simply forward this on\n\treturn ds.GetUserById(strings.ToLower(username))\n}\n\nfunc (ds *DataStore) SaveUser(user *ircbnc.User) error {\n\t\/\/ TODO: If ID isn't set, set the ID now.\n\t\/\/ An ID set = the user object was saved or retrieved from the db\n\tui := UserInfo{}\n\tui.Name = user.Name\n\tui.Role = user.Role\n\tui.EncodedSalt = base64.StdEncoding.EncodeToString(user.Salt)\n\tui.EncodedPasswordHash = base64.StdEncoding.EncodeToString(user.HashedPassword)\n\tui.DefaultNick = user.DefaultNick\n\tui.DefaultNickFallback = user.DefaultFbNick\n\tui.DefaultUsername = user.DefaultUser\n\tui.DefaultRealname = user.DefaultReal\n\n\t\/\/ Just use the username as the ID\n\tif user.ID == \"\" {\n\t\tui.ID = strings.ToLower(user.Name)\n\t}\n\n\tuiBytes, err := json.Marshal(ui)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error marshalling user info: %s\", err.Error())\n\t}\n\tuiString := string(uiBytes) \/\/TODO(dan): Should we do this in a safer way?\n\n\t\/\/ User permissions\n\tupBytes, err := json.Marshal(user.Permissions)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error marshalling user permissions: %s\", err.Error())\n\t}\n\tupString := string(upBytes) \/\/TODO(dan): Should we do this in a safer way?\n\n\tupdateErr := ds.Db.Update(func(tx *buntdb.Tx) error {\n\t\tvar err error\n\t\t_, _, err = tx.Set(fmt.Sprintf(KeyUserInfo, ui.ID), uiString, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, _, err = tx.Set(fmt.Sprintf(KeyUserPermissions, ui.ID), upString, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Make sure the User instance has the uptodate ID\n\t\tuser.ID = ui.ID\n\t\treturn nil\n\t})\n\n\treturn updateErr\n}\n\nfunc (ds *DataStore) AuthUser(username string, password string) (string, bool) {\n\tuser := ds.GetUserByUsername(username)\n\tif user == nil {\n\t\treturn \"\", false\n\t}\n\n\tpassMatches := CompareHashAndPassword(user.HashedPassword, ds.salt, user.Salt, password)\n\tif !passMatches {\n\t\treturn \"\", false\n\t}\n\n\treturn user.ID, true\n}\n\nfunc (ds *DataStore) SetUserPassword(user *ircbnc.User, newPassword string) {\n\tuserSalt := NewSalt()\n\tpassHash, _ := GenerateFromPassword(ds.salt, userSalt, newPassword)\n\n\tuser.Salt = userSalt\n\tuser.HashedPassword = passHash\n}\n\nfunc (ds *DataStore) GetUserNetworks(userId string) {\n\t\/\/ TODO: Return a slice of network objects of some kind\n}\n\nfunc (ds *DataStore) SaveConnection(connection *ircbnc.ServerConnection) error {\n\t\/\/ Store server info\n\tsc := ServerConnectionMapping{\n\t\tName: connection.Name,\n\t\tEnabled: true,\n\t\tConnectPassword: connection.Password,\n\t\tNickname: connection.Nickname,\n\t\tNicknameFallback: connection.FbNickname,\n\t\tUsername: connection.Username,\n\t\tRealname: connection.Realname,\n\t}\n\tscBytes, err := json.Marshal(sc)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error marshalling user info: %s\", err.Error())\n\t}\n\tscString := string(scBytes) \/\/TODO(dan): Should we do this in a safer way?\n\n\t\/\/ Store server addresses\n\taddresses := make([]ServerConnectionAddressMapping, len(connection.Addresses))\n\tfor idx, addr := range connection.Addresses {\n\t\taddresses[idx].Host = addr.Host\n\t\taddresses[idx].Port = addr.Port\n\t\taddresses[idx].UseTLS = addr.UseTLS\n\t\taddresses[idx].VerifyTLS = addr.VerifyTLS\n\t}\n\n\tsaBytes, err := json.Marshal(addresses)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error marshalling user permissions: %s\", err.Error())\n\t}\n\tsaString := string(saBytes) \/\/TODO(dan): Should we do this in a safer way?\n\n\t\/\/ Store server channels (Convert the string map to a slice)\n\tscChannels := []ircbnc.ServerConnectionChannel{}\n\tfor _, channel := range connection.Channels {\n\t\tscChannels = append(scChannels, channel)\n\t}\n\tscChanBytes, err := json.Marshal(scChannels)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error marshalling user permissions: %s\", err.Error())\n\t}\n\tscChanString := string(scChanBytes) \/\/TODO(dan): Should we do this in a safer way?\n\n\tsaveErr := ds.Db.Update(func(tx *buntdb.Tx) error {\n\t\tvar err error\n\t\t_, _, err = tx.Set(fmt.Sprintf(KeyServerConnectionInfo, connection.User.ID, connection.Name), scString, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, _, err = tx.Set(fmt.Sprintf(KeyServerConnectionAddresses, connection.User.ID, connection.Name), saString, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, _, err = tx.Set(fmt.Sprintf(KeyServerConnectionChannels, connection.User.ID, connection.Name), scChanString, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn saveErr\n}\n\nfunc (ds *DataStore) loadUser(tx *buntdb.Tx, userId string) (*ircbnc.User, error) {\n\tuser := ircbnc.NewUser(ds.Manager)\n\n\tuser.ID = userId\n\tuser.Name = userId \/\/TODO(dan): Store Name and ID separately in the future if we want to\n\n\tinfoString, err := tx.Get(fmt.Sprintf(KeyUserInfo, userId))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not load user (loading user info from db): %s\", err.Error())\n\t}\n\tui := &UserInfo{}\n\terr = json.Unmarshal([]byte(infoString), ui)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not load user (unmarshalling user info from db): %s\", err.Error())\n\t}\n\n\tuser.Salt, err = base64.StdEncoding.DecodeString(ui.EncodedSalt)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not load user (decoding salt): %s\", err.Error())\n\t}\n\n\t\/\/TODO(dan): Make the below both have the same named fields\n\tuser.HashedPassword, err = base64.StdEncoding.DecodeString(ui.EncodedPasswordHash)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not load user (decoding password): %s\", err.Error())\n\t}\n\tuser.DefaultNick = ui.DefaultNick\n\tuser.DefaultFbNick = ui.DefaultNickFallback\n\tuser.DefaultUser = ui.DefaultUsername\n\tuser.DefaultReal = ui.DefaultRealname\n\n\tds.loadUserConnections(user)\n\n\treturn user, nil\n}\n\nfunc (ds *DataStore) loadUserConnections(user *ircbnc.User) {\n\tds.Db.View(func(tx *buntdb.Tx) error {\n\t\ttx.DescendKeys(fmt.Sprintf(\"user.server.info %s *\", user.ID), func(key, value string) bool {\n\t\t\tname := strings.TrimPrefix(key, fmt.Sprintf(\"user.server.info %s \", user.ID))\n\n\t\t\tsc, err := loadServerConnection(name, user, tx)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Could not load user network: %s\", err.Error())\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tuser.Networks[name] = sc\n\n\t\t\treturn true\n\t\t})\n\n\t\treturn nil\n\t})\n}\n\n\/\/ LoadServerConnection loads the given server connection from our database.\nfunc loadServerConnection(name string, user *ircbnc.User, tx *buntdb.Tx) (*ircbnc.ServerConnection, error) {\n\tsc := ircbnc.NewServerConnection()\n\tsc.Name = name\n\tsc.User = user\n\n\t\/\/ load general info\n\tscInfo := &ServerConnectionMapping{}\n\tscInfoString, err := tx.Get(fmt.Sprintf(KeyServerConnectionInfo, user.ID, name))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not create new ServerConnection (getting sc details from db): %s\", err.Error())\n\t}\n\n\terr = json.Unmarshal([]byte(scInfoString), scInfo)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not create new ServerConnection (unmarshalling sc details): %s\", err.Error())\n\t}\n\n\tsc.Nickname = scInfo.Nickname\n\tsc.FbNickname = scInfo.NicknameFallback\n\tsc.Username = scInfo.Username\n\tsc.Realname = scInfo.Realname\n\tsc.Password = scInfo.ConnectPassword\n\n\t\/\/ set default values\n\tif sc.Nickname == \"\" {\n\t\tsc.Nickname = user.DefaultNick\n\t}\n\tif sc.FbNickname == \"\" {\n\t\tsc.FbNickname = user.DefaultFbNick\n\t}\n\tif sc.Username == \"\" {\n\t\tsc.Username = user.DefaultUser\n\t}\n\tif sc.Realname == \"\" {\n\t\tsc.Realname = user.DefaultReal\n\t}\n\n\t\/\/ load channels\n\tscChannelString, err := tx.Get(fmt.Sprintf(KeyServerConnectionChannels, user.ID, name))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not create new ServerConnection (getting sc channels from db): %s\", err.Error())\n\t}\n\n\tscChans := &[]ServerConnectionChannelMapping{}\n\terr = json.Unmarshal([]byte(scChannelString), scChans)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not create new ServerConnection (unmarshalling sc channels): %s\", err.Error())\n\t}\n\n\tsc.Channels = make(map[string]ircbnc.ServerConnectionChannel)\n\tfor _, channel := range *scChans {\n\t\tsc.Channels[channel.Name] = ircbnc.ServerConnectionChannel{\n\t\t\tName: channel.Name,\n\t\t\tKey: channel.Key,\n\t\t\tUseKey: channel.UseKey,\n\t\t}\n\n\t}\n\n\t\/\/ load addresses\n\tscAddressesString, err := tx.Get(fmt.Sprintf(KeyServerConnectionAddresses, user.ID, name))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not create new ServerConnection (getting sc addresses from db): %s\", err.Error())\n\t}\n\n\tscAddresses := &[]ServerConnectionAddressMapping{}\n\terr = json.Unmarshal([]byte(scAddressesString), scAddresses)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not create new ServerConnection (unmarshalling sc addresses): %s\", err.Error())\n\t}\n\n\t\/\/ check port number and add addresses\n\tfor _, address := range *scAddresses {\n\t\tif address.Port < 1 || address.Port > 65535 {\n\t\t\treturn nil, fmt.Errorf(\"Could not create new ServerConnection (port %d is not valid)\", address.Port)\n\t\t}\n\n\t\tsc.Addresses = append(sc.Addresses, ircbnc.ServerConnectionAddress(address))\n\t}\n\n\treturn sc, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/elazarl\/goproxy\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nfunc main() {\n\tlog.SetLevel(log.DebugLevel)\n\tlog.SetFormatter(&log.TextFormatter{FullTimestamp: true})\n\n\tfilename := \"cuttle.yml\"\n\tbytes, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to load configuration from %s.\", filename)\n\t\tlog.Fatal(err)\n\t}\n\n\tcfg := Config{Addr: \":8123\"}\n\tif err := yaml.Unmarshal(bytes, &cfg); err != nil {\n\t\tlog.Errorf(\"Malformed YAML in %s.\", filename)\n\t\tlog.Fatal(err)\n\t}\n\n\tzones := make([]Zone, len(cfg.Zones))\n\tfor i, c := range cfg.Zones {\n\t\tzones[i] = *NewZone(c.Host, c.Shared, c.Control, c.Limit)\n\t}\n\n\t\/\/ Config proxy.\n\tproxy := goproxy.NewProxyHttpServer()\n\n\tproxy.OnRequest().DoFunc(\n\t\tfunc(r *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response) {\n\t\t\tfor _, zone := range zones {\n\t\t\t\tif !zone.MatchHost(r.URL.Host) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t\/\/ Acquire permission to forward request to upstream server.\n\t\t\t\tzone.GetController(r.URL.Host).Acquire()\n\n\t\t\t\t\/\/ Forward request.\n\t\t\t\tlog.Infof(\"Main: Forwarding request to %s\", r.URL)\n\t\t\t\treturn r, nil\n\t\t\t}\n\n\t\t\t\/\/ Forward request without rate limit.\n\t\t\tlog.Warnf(\"Main: No zone is applied to %s\", r.URL)\n\t\t\tlog.Infof(\"Main: Forwarding request to %s\", r.URL)\n\t\t\treturn r, nil\n\t\t})\n\n\tlog.Fatal(http.ListenAndServe(cfg.Addr, proxy))\n}\n\ntype Config struct {\n\tAddr string\n\n\tZones []ZoneConfig\n}\n\ntype ZoneConfig struct {\n\tHost string\n\tShared bool\n\tControl string\n\tLimit int\n}\n<commit_msg>Simplify zone matching flow<commit_after>package main\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/elazarl\/goproxy\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nfunc main() {\n\tlog.SetLevel(log.DebugLevel)\n\tlog.SetFormatter(&log.TextFormatter{FullTimestamp: true})\n\n\tfilename := \"cuttle.yml\"\n\tbytes, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to load configuration from %s.\", filename)\n\t\tlog.Fatal(err)\n\t}\n\n\tcfg := Config{Addr: \":8123\"}\n\tif err := yaml.Unmarshal(bytes, &cfg); err != nil {\n\t\tlog.Errorf(\"Malformed YAML in %s.\", filename)\n\t\tlog.Fatal(err)\n\t}\n\n\tzones := make([]Zone, len(cfg.Zones))\n\tfor i, c := range cfg.Zones {\n\t\tzones[i] = *NewZone(c.Host, c.Shared, c.Control, c.Limit)\n\t}\n\n\t\/\/ Config proxy.\n\tproxy := goproxy.NewProxyHttpServer()\n\n\tproxy.OnRequest().DoFunc(\n\t\tfunc(r *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response) {\n\t\t\tvar zone *Zone\n\t\t\tfor _, z := range zones {\n\t\t\t\tif z.MatchHost(r.URL.Host) {\n\t\t\t\t\tzone = &z\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif zone != nil {\n\t\t\t\t\/\/ Acquire permission to forward request to upstream server.\n\t\t\t\tzone.GetController(r.URL.Host).Acquire()\n\t\t\t} else {\n\t\t\t\t\/\/ No rate limit applied.\n\t\t\t\tlog.Warnf(\"Main: No zone is applied to %s\", r.URL)\n\t\t\t}\n\n\t\t\t\/\/ Forward request.\n\t\t\tlog.Infof(\"Main: Forwarding request to %s\", r.URL)\n\t\t\treturn r, nil\n\t\t})\n\n\tlog.Fatalln(http.ListenAndServe(cfg.Addr, proxy))\n}\n\ntype Config struct {\n\tAddr string\n\n\tZones []ZoneConfig\n}\n\ntype ZoneConfig struct {\n\tHost string\n\tShared bool\n\tControl string\n\tLimit int\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/\tcalc.go implements a simple calculator using the iNamik lexer and parser api.\n\/\/\n\/\/\tInput is read from STDIN\n\/\/\n\/\/\tThe input expression is matched against the following pattern:\n\/\/\n\/\/\tinput_exp:\n\/\/\t( id '=' )? general_exp\n\/\/\tgeneral_exp:\n\/\/\t\toperand ( operator operand )?\n\/\/\toperand:\n\/\/\t\tnumber | id | '(' general_exp ')'\n\/\/\toperator:\n\/\/\t\t'+' | '-' | '*' | '\/'\n\/\/\tnumber:\n\/\/\t\tdigit+ ( '.' digit+ )?\n\/\/\tdigit:\n\/\/\t\t['0'..'9']\n\/\/\tid:\n\/\/\t\talpha ( alpha | digit )*\n\/\/\talpha:\n\/\/\t\t['a'..'z'] | ['A'..'Z']\n\/\/\n\/\/\tPrecedence is as expected, with '*' and '\/' have higher precedence\n\/\/\tthan '+' and '-', as follows:\n\/\/\n\/\/\t1 + 2 * 3 - 4 \/ 5 == 1 + (2 * 3) - (4 \/ 5)\n\/\/\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\nimport (\n\t\"github.com\/iNamik\/go_lexer\"\n\t\"github.com\/iNamik\/go_lexer\/rangeutil\"\n\t\"github.com\/iNamik\/go_parser\"\n)\n\n\/\/ We define our lexer tokens starting from the pre-defined EOF token\nconst (\n\tT_EOF lexer.TokenType = lexer.TokenTypeEOF\n\tT_NIL = lexer.TokenTypeEOF + iota\n\tT_ID\n\tT_NUMBER\n\tT_PLUS\n\tT_MINUS\n\tT_MULTIPLY\n\tT_DIVIDE\n\tT_EQUALS\n\tT_OPEN_PAREN\n\tT_CLOSE_PAREN\n)\n\n\/\/ To store variables\nvar vars = map[string]float64{}\n\n\/\/ Single-character tokens\nvar singleChars = []byte{'+', '-', '*', '\/', '=', '(', ')'}\n\nvar singleTokens = []lexer.TokenType{T_PLUS, T_MINUS, T_MULTIPLY, T_DIVIDE, T_EQUALS, T_OPEN_PAREN, T_CLOSE_PAREN}\n\n\/\/ Multi-character tokens\nvar bytesWhitespace = []byte{' ', '\\t'}\n\nvar bytesDigits = rangeutil.RangeToBytes(\"0-9\")\n\nvar bytesAlpha = rangeutil.RangeToBytes(\"a-zA-Z\")\n\nvar bytesAlphaNum = rangeutil.RangeToBytes(\"0-9a-zA-Z\")\n\n\/\/ main\nfunc main() {\n\t\/\/ Create a buffered reader from STDIN\n\tstdin := bufio.NewReader(os.Stdin)\n\n\tfor {\n\t\t\/\/ Read a line of input\n\t\tinput, _, err := stdin.ReadLine()\n\n\t\t\/\/ Error? we're done\n\t\tif nil != err {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Anything to process?\n\t\tif len(input) > 0 {\n\t\t\t\/\/ Create a new lexer to turn the input text into tokens\n\t\t\tl := lexer.NewFromBytes(lex, input, 2)\n\n\t\t\t\/\/ Create a new parser that feeds off the lexer and generates expression values\n\t\t\tp := parser.New(parse, l, 2)\n\n\t\t\t\/\/ Loop over parser emits\n\t\t\tfor i := p.Next(); nil != i; i = p.Next() {\n\t\t\t\tfmt.Printf(\"%v\\n\", i)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ lex is the starting (and only) StateFn for lexing the input into tokens\nfunc lex(l lexer.Lexer) lexer.StateFn {\n\n\t\/\/ EOF\n\tif l.MatchEOF() {\n\t\tl.EmitEOF()\n\t\treturn nil \/\/ We're done here\n\t}\n\n\t\/\/ Single-char token?\n\tif i := bytes.IndexRune(singleChars, l.PeekRune(0)); i >= 0 {\n\t\tl.NextRune()\n\t\tl.EmitToken(singleTokens[i])\n\t\treturn lex\n\t}\n\n\tswitch {\n\n\t\/\/ Skip whitespace\n\tcase l.MatchOneOrMoreBytes(bytesWhitespace):\n\t\tl.IgnoreToken()\n\n\t\/\/ Number\n\tcase l.MatchOneOrMoreBytes(bytesDigits):\n\t\tif l.PeekRune(0) == '.' {\n\t\t\tl.NextRune() \/\/ skip '.'\n\t\t\tif !l.MatchOneOrMoreBytes(bytesDigits) {\n\t\t\t\tprintError(l.Column(), \"Illegal number format - Missing digits after '.'\")\n\t\t\t\tl.IgnoreToken()\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tl.EmitTokenWithBytes(T_NUMBER)\n\n\t\/\/ ID\n\tcase l.MatchOneBytes(bytesAlpha) && l.MatchZeroOrMoreBytes(bytesAlphaNum):\n\t\tl.EmitTokenWithBytes(T_ID)\n\n\t\/\/ Unknown\n\tdefault:\n\t\tl.NextRune()\n\t\tprintError(l.Column(), \"Unknown Character\")\n\t\tl.IgnoreToken()\n\t}\n\n\t\/\/ See you again soon!\n\treturn lex\n}\n\n\/\/ parse tries to execute a general expression from the lexed tokens.\n\/\/ Returns nil - We only take one pass at the input string\nfunc parse(p parser.Parser) parser.StateFn {\n\n\tif p.PeekTokenType(0) != T_EOF {\n\t\t\/\/ Assignment ( id = general_expression )\n\t\tif p.PeekTokenType(0) == T_ID && p.PeekTokenType(1) == T_EQUALS {\n\t\t\ttId := p.NextToken()\n\n\t\t\tp.SkipToken() \/\/ skip '='\n\n\t\t\tval, ok := pGeneralExpression(p)\n\n\t\t\tif ok {\n\t\t\t\tt := p.NextToken()\n\t\t\t\tif t.Type() != T_EOF {\n\t\t\t\t\tprintError(t.Column(), \"Expecting operator\")\n\t\t\t\t} else {\n\t\t\t\t\tid := string(tId.Bytes())\n\t\t\t\t\tvars[id] = val\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ General expression\n\t\t} else {\n\t\t\tval, ok := pGeneralExpression(p)\n\n\t\t\tif ok {\n\t\t\t\tt := p.NextToken()\n\t\t\t\tif t.Type() != T_EOF {\n\t\t\t\t\tprintError(t.Column(), \"Expecting operator\")\n\t\t\t\t} else {\n\t\t\t\t\tp.Emit(val)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tp.ClearTokens()\n\tp.Emit(nil) \/\/ We're done - One pass only\n\n\treturn nil\n}\n\n\/\/ pGeneralExpression is the starting point for parsing a General Expression.\n\/\/ It is basically a pass-through to pAdditiveExpression, but it feels cleaner\nfunc pGeneralExpression(p parser.Parser) (f float64, ok bool) {\n\treturn pAdditiveExpression(p)\n}\n\n\/\/ pAdditiveExpression parses [ expression ( ( '+' | '-' ) expression )? ]\nfunc pAdditiveExpression(p parser.Parser) (f float64, ok bool) {\n\n\tf, ok = pMultiplicitiveExpression(p)\n\n\tif ok {\n\t\tt := p.NextToken()\n\t\tswitch t.Type() {\n\n\t\t\/\/ Add (+)\n\t\tcase T_PLUS:\n\t\t\tr, ok := pAdditiveExpression(p)\n\t\t\tif ok {\n\t\t\t\tf += r\n\t\t\t}\n\n\t\t\/\/ Subtract (-)\n\t\tcase T_MINUS:\n\t\t\tr, ok := pAdditiveExpression(p)\n\t\t\tif ok {\n\t\t\t\tf -= r\n\t\t\t}\n\n\t\t\/\/ Unknown - Send it back upstream\n\t\tdefault:\n\t\t\tp.BackupToken()\n\t\t\tok = true\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ pMultiplicitiveExpression parses [ expression ( ( '*' | '\/' ) expression )? ]\nfunc pMultiplicitiveExpression(p parser.Parser) (f float64, ok bool) {\n\tf, ok = pOperand(p)\n\n\tif ok {\n\t\tt := p.NextToken()\n\t\tswitch t.Type() {\n\n\t\t\/\/ Multiply (*)\n\t\tcase T_MULTIPLY:\n\t\t\tr, ok := pMultiplicitiveExpression(p)\n\t\t\tif ok {\n\t\t\t\tf *= r\n\t\t\t}\n\n\t\t\/\/ Divide (\/)\n\t\tcase T_DIVIDE:\n\t\t\tr, ok := pMultiplicitiveExpression(p)\n\t\t\tif ok {\n\t\t\t\tf \/= r\n\t\t\t}\n\n\t\t\/\/ Unknown - Send it back upstream\n\t\tdefault:\n\t\t\tp.BackupToken()\n\t\t\tok = true\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ pOperand parses [ id | number | '(' expression ')' ]\nfunc pOperand(p parser.Parser) (f float64, ok bool) {\n\tvar err error\n\n\tm := p.Marker()\n\tt := p.NextToken()\n\n\tswitch t.Type() {\n\n\t\/\/ ID\n\tcase T_ID:\n\t\tvar id = string(t.Bytes())\n\t\tf, ok = vars[id]\n\t\tif !ok {\n\t\t\tprintError(t.Column(), fmt.Sprint(\"id '\", id, \"' not defined\"))\n\t\t\tf = 0.0\n\t\t}\n\n\t\/\/ Number\n\tcase T_NUMBER:\n\t\tf, err = strconv.ParseFloat(string(t.Bytes()), 64)\n\t\tok = nil == err\n\t\tif !ok {\n\t\t\tprintError(t.Column(), fmt.Sprint(\"Error reading number: \", err.Error()))\n\t\t\tf = 0.0\n\t\t}\n\n\t\/\/ '(' Expresson ')'\n\tcase T_OPEN_PAREN:\n\t\tf, ok = pGeneralExpression(p)\n\t\tif ok {\n\t\t\tt2 := p.NextToken()\n\t\t\tif t2.Type() != T_CLOSE_PAREN {\n\t\t\t\tprintError(t.Column(), \"Unbalanced Paren\")\n\t\t\t\tok = false\n\t\t\t\tf = 0.0\n\t\t\t}\n\t\t}\n\n\t\/\/ EOF\n\tcase T_EOF:\n\t\tprintError(t.Column(), \"Unexpected EOF - Expecting operand\")\n\t\tok = false\n\t\tf = 0.0\n\n\t\/\/ Unknown\n\tdefault:\n\t\tprintError(t.Column(), \"Expecting operand\")\n\t\tok = false\n\t\tf = 0.0\n\t}\n\n\tif !ok {\n\t\tp.Reset(m)\n\t}\n\n\treturn\n}\n\n\/\/ printError prints an error msg pointing to the specified column of the input.\nfunc printError(col int, msg string) {\n\tfmt.Print(strings.Repeat(\" \", col-1), \"^ \", msg, \"\\n\")\n}\n<commit_msg>Uses T_EOF for eof token<commit_after>\/\/\n\/\/\tcalc.go implements a simple calculator using the iNamik lexer and parser api.\n\/\/\n\/\/\tInput is read from STDIN\n\/\/\n\/\/\tThe input expression is matched against the following pattern:\n\/\/\n\/\/\tinput_exp:\n\/\/\t( id '=' )? general_exp\n\/\/\tgeneral_exp:\n\/\/\t\toperand ( operator operand )?\n\/\/\toperand:\n\/\/\t\tnumber | id | '(' general_exp ')'\n\/\/\toperator:\n\/\/\t\t'+' | '-' | '*' | '\/'\n\/\/\tnumber:\n\/\/\t\tdigit+ ( '.' digit+ )?\n\/\/\tdigit:\n\/\/\t\t['0'..'9']\n\/\/\tid:\n\/\/\t\talpha ( alpha | digit )*\n\/\/\talpha:\n\/\/\t\t['a'..'z'] | ['A'..'Z']\n\/\/\n\/\/\tPrecedence is as expected, with '*' and '\/' have higher precedence\n\/\/\tthan '+' and '-', as follows:\n\/\/\n\/\/\t1 + 2 * 3 - 4 \/ 5 == 1 + (2 * 3) - (4 \/ 5)\n\/\/\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\nimport (\n\t\"github.com\/iNamik\/go_lexer\"\n\t\"github.com\/iNamik\/go_lexer\/rangeutil\"\n\t\"github.com\/iNamik\/go_parser\"\n)\n\n\/\/ We define our lexer tokens starting from the pre-defined EOF token\nconst (\n\tT_EOF lexer.TokenType = lexer.T_EOF\n\tT_NIL = lexer.T_EOF + iota\n\tT_ID\n\tT_NUMBER\n\tT_PLUS\n\tT_MINUS\n\tT_MULTIPLY\n\tT_DIVIDE\n\tT_EQUALS\n\tT_OPEN_PAREN\n\tT_CLOSE_PAREN\n)\n\n\/\/ To store variables\nvar vars = map[string]float64{}\n\n\/\/ Single-character tokens\nvar singleChars = []byte{'+', '-', '*', '\/', '=', '(', ')'}\n\nvar singleTokens = []lexer.TokenType{T_PLUS, T_MINUS, T_MULTIPLY, T_DIVIDE, T_EQUALS, T_OPEN_PAREN, T_CLOSE_PAREN}\n\n\/\/ Multi-character tokens\nvar bytesWhitespace = []byte{' ', '\\t'}\n\nvar bytesDigits = rangeutil.RangeToBytes(\"0-9\")\n\nvar bytesAlpha = rangeutil.RangeToBytes(\"a-zA-Z\")\n\nvar bytesAlphaNum = rangeutil.RangeToBytes(\"0-9a-zA-Z\")\n\n\/\/ main\nfunc main() {\n\t\/\/ Create a buffered reader from STDIN\n\tstdin := bufio.NewReader(os.Stdin)\n\n\tfor {\n\t\t\/\/ Read a line of input\n\t\tinput, _, err := stdin.ReadLine()\n\n\t\t\/\/ Error? we're done\n\t\tif nil != err {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Anything to process?\n\t\tif len(input) > 0 {\n\t\t\t\/\/ Create a new lexer to turn the input text into tokens\n\t\t\tl := lexer.NewFromBytes(lex, input, 2)\n\n\t\t\t\/\/ Create a new parser that feeds off the lexer and generates expression values\n\t\t\tp := parser.New(parse, l, 2)\n\n\t\t\t\/\/ Loop over parser emits\n\t\t\tfor i := p.Next(); nil != i; i = p.Next() {\n\t\t\t\tfmt.Printf(\"%v\\n\", i)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ lex is the starting (and only) StateFn for lexing the input into tokens\nfunc lex(l lexer.Lexer) lexer.StateFn {\n\n\t\/\/ EOF\n\tif l.MatchEOF() {\n\t\tl.EmitEOF()\n\t\treturn nil \/\/ We're done here\n\t}\n\n\t\/\/ Single-char token?\n\tif i := bytes.IndexRune(singleChars, l.PeekRune(0)); i >= 0 {\n\t\tl.NextRune()\n\t\tl.EmitToken(singleTokens[i])\n\t\treturn lex\n\t}\n\n\tswitch {\n\n\t\/\/ Skip whitespace\n\tcase l.MatchOneOrMoreBytes(bytesWhitespace):\n\t\tl.IgnoreToken()\n\n\t\/\/ Number\n\tcase l.MatchOneOrMoreBytes(bytesDigits):\n\t\tif l.PeekRune(0) == '.' {\n\t\t\tl.NextRune() \/\/ skip '.'\n\t\t\tif !l.MatchOneOrMoreBytes(bytesDigits) {\n\t\t\t\tprintError(l.Column(), \"Illegal number format - Missing digits after '.'\")\n\t\t\t\tl.IgnoreToken()\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tl.EmitTokenWithBytes(T_NUMBER)\n\n\t\/\/ ID\n\tcase l.MatchOneBytes(bytesAlpha) && l.MatchZeroOrMoreBytes(bytesAlphaNum):\n\t\tl.EmitTokenWithBytes(T_ID)\n\n\t\/\/ Unknown\n\tdefault:\n\t\tl.NextRune()\n\t\tprintError(l.Column(), \"Unknown Character\")\n\t\tl.IgnoreToken()\n\t}\n\n\t\/\/ See you again soon!\n\treturn lex\n}\n\n\/\/ parse tries to execute a general expression from the lexed tokens.\n\/\/ Returns nil - We only take one pass at the input string\nfunc parse(p parser.Parser) parser.StateFn {\n\n\tif p.PeekTokenType(0) != T_EOF {\n\t\t\/\/ Assignment ( id = general_expression )\n\t\tif p.PeekTokenType(0) == T_ID && p.PeekTokenType(1) == T_EQUALS {\n\t\t\ttId := p.NextToken()\n\n\t\t\tp.SkipToken() \/\/ skip '='\n\n\t\t\tval, ok := pGeneralExpression(p)\n\n\t\t\tif ok {\n\t\t\t\tt := p.NextToken()\n\t\t\t\tif t.Type() != T_EOF {\n\t\t\t\t\tprintError(t.Column(), \"Expecting operator\")\n\t\t\t\t} else {\n\t\t\t\t\tid := string(tId.Bytes())\n\t\t\t\t\tvars[id] = val\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ General expression\n\t\t} else {\n\t\t\tval, ok := pGeneralExpression(p)\n\n\t\t\tif ok {\n\t\t\t\tt := p.NextToken()\n\t\t\t\tif t.Type() != T_EOF {\n\t\t\t\t\tprintError(t.Column(), \"Expecting operator\")\n\t\t\t\t} else {\n\t\t\t\t\tp.Emit(val)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tp.ClearTokens()\n\tp.Emit(nil) \/\/ We're done - One pass only\n\n\treturn nil\n}\n\n\/\/ pGeneralExpression is the starting point for parsing a General Expression.\n\/\/ It is basically a pass-through to pAdditiveExpression, but it feels cleaner\nfunc pGeneralExpression(p parser.Parser) (f float64, ok bool) {\n\treturn pAdditiveExpression(p)\n}\n\n\/\/ pAdditiveExpression parses [ expression ( ( '+' | '-' ) expression )? ]\nfunc pAdditiveExpression(p parser.Parser) (f float64, ok bool) {\n\n\tf, ok = pMultiplicitiveExpression(p)\n\n\tif ok {\n\t\tt := p.NextToken()\n\t\tswitch t.Type() {\n\n\t\t\/\/ Add (+)\n\t\tcase T_PLUS:\n\t\t\tr, ok := pAdditiveExpression(p)\n\t\t\tif ok {\n\t\t\t\tf += r\n\t\t\t}\n\n\t\t\/\/ Subtract (-)\n\t\tcase T_MINUS:\n\t\t\tr, ok := pAdditiveExpression(p)\n\t\t\tif ok {\n\t\t\t\tf -= r\n\t\t\t}\n\n\t\t\/\/ Unknown - Send it back upstream\n\t\tdefault:\n\t\t\tp.BackupToken()\n\t\t\tok = true\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ pMultiplicitiveExpression parses [ expression ( ( '*' | '\/' ) expression )? ]\nfunc pMultiplicitiveExpression(p parser.Parser) (f float64, ok bool) {\n\tf, ok = pOperand(p)\n\n\tif ok {\n\t\tt := p.NextToken()\n\t\tswitch t.Type() {\n\n\t\t\/\/ Multiply (*)\n\t\tcase T_MULTIPLY:\n\t\t\tr, ok := pMultiplicitiveExpression(p)\n\t\t\tif ok {\n\t\t\t\tf *= r\n\t\t\t}\n\n\t\t\/\/ Divide (\/)\n\t\tcase T_DIVIDE:\n\t\t\tr, ok := pMultiplicitiveExpression(p)\n\t\t\tif ok {\n\t\t\t\tf \/= r\n\t\t\t}\n\n\t\t\/\/ Unknown - Send it back upstream\n\t\tdefault:\n\t\t\tp.BackupToken()\n\t\t\tok = true\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ pOperand parses [ id | number | '(' expression ')' ]\nfunc pOperand(p parser.Parser) (f float64, ok bool) {\n\tvar err error\n\n\tm := p.Marker()\n\tt := p.NextToken()\n\n\tswitch t.Type() {\n\n\t\/\/ ID\n\tcase T_ID:\n\t\tvar id = string(t.Bytes())\n\t\tf, ok = vars[id]\n\t\tif !ok {\n\t\t\tprintError(t.Column(), fmt.Sprint(\"id '\", id, \"' not defined\"))\n\t\t\tf = 0.0\n\t\t}\n\n\t\/\/ Number\n\tcase T_NUMBER:\n\t\tf, err = strconv.ParseFloat(string(t.Bytes()), 64)\n\t\tok = nil == err\n\t\tif !ok {\n\t\t\tprintError(t.Column(), fmt.Sprint(\"Error reading number: \", err.Error()))\n\t\t\tf = 0.0\n\t\t}\n\n\t\/\/ '(' Expresson ')'\n\tcase T_OPEN_PAREN:\n\t\tf, ok = pGeneralExpression(p)\n\t\tif ok {\n\t\t\tt2 := p.NextToken()\n\t\t\tif t2.Type() != T_CLOSE_PAREN {\n\t\t\t\tprintError(t.Column(), \"Unbalanced Paren\")\n\t\t\t\tok = false\n\t\t\t\tf = 0.0\n\t\t\t}\n\t\t}\n\n\t\/\/ EOF\n\tcase T_EOF:\n\t\tprintError(t.Column(), \"Unexpected EOF - Expecting operand\")\n\t\tok = false\n\t\tf = 0.0\n\n\t\/\/ Unknown\n\tdefault:\n\t\tprintError(t.Column(), \"Expecting operand\")\n\t\tok = false\n\t\tf = 0.0\n\t}\n\n\tif !ok {\n\t\tp.Reset(m)\n\t}\n\n\treturn\n}\n\n\/\/ printError prints an error msg pointing to the specified column of the input.\nfunc printError(col int, msg string) {\n\tfmt.Print(strings.Repeat(\" \", col-1), \"^ \", msg, \"\\n\")\n}\n<|endoftext|>"} {"text":"<commit_before>package google\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/acctest\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\n\t\"google.golang.org\/api\/googleapi\"\n\tstorage \"google.golang.org\/api\/storage\/v1\"\n)\n\nfunc TestAccStorage_basic(t *testing.T) {\n\tbucketName := fmt.Sprintf(\"tf-test-acl-bucket-%d\", acctest.RandInt())\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccGoogleStorageDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderDefaults(bucketName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketExists(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", bucketName),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"location\", \"US\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"force_destroy\", \"false\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccStorageCustomAttributes(t *testing.T) {\n\tbucketName := fmt.Sprintf(\"tf-test-acl-bucket-%d\", acctest.RandInt())\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccGoogleStorageDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderCustomAttributes(bucketName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketExists(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", bucketName),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"location\", \"EU\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"force_destroy\", \"true\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccStorageStorageClass(t *testing.T) {\n\tbucketName := fmt.Sprintf(\"tf-test-acc-bucket-%d\", acctest.RandInt())\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccGoogleStorageDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderStorageClass(bucketName, \"MULTI_REGIONAL\"),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketExists(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", bucketName),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"storage_class\", \"MULTI_REGIONAL\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderStorageClass(bucketName, \"NEARLINE\"),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketExists(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", bucketName),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"storage_class\", \"NEARLINE\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderStorageClass(bucketName, \"REGIONAL\"),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketExists(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", bucketName),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"storage_class\", \"REGIONAL\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccStorageBucketUpdate(t *testing.T) {\n\tbucketName := fmt.Sprintf(\"tf-test-acl-bucket-%d\", acctest.RandInt())\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccGoogleStorageDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderDefaults(bucketName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketExists(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", bucketName),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"location\", \"US\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"force_destroy\", \"false\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderCustomAttributes(bucketName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketExists(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", bucketName),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"predefined_acl\", \"publicReadWrite\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"location\", \"EU\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"force_destroy\", \"true\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccStorageForceDestroy(t *testing.T) {\n\tbucketName := fmt.Sprintf(\"tf-test-acl-bucket-%d\", acctest.RandInt())\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccGoogleStorageDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderCustomAttributes(bucketName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketExists(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", bucketName),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderCustomAttributes(bucketName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketPutItem(bucketName),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderCustomAttributes(\"idontexist\"),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketMissing(bucketName),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testAccCheckCloudStorageBucketExists(n string, bucketName string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No Project_ID is set\")\n\t\t}\n\n\t\tconfig := testAccProvider.Meta().(*Config)\n\n\t\tfound, err := config.clientStorage.Buckets.Get(rs.Primary.ID).Do()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif found.Id != rs.Primary.ID {\n\t\t\treturn fmt.Errorf(\"Bucket not found\")\n\t\t}\n\n\t\tif found.Name != bucketName {\n\t\t\treturn fmt.Errorf(\"expected name %s, got %s\", bucketName, found.Name)\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckCloudStorageBucketPutItem(bucketName string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tconfig := testAccProvider.Meta().(*Config)\n\n\t\tdata := bytes.NewBufferString(\"test\")\n\t\tdataReader := bytes.NewReader(data.Bytes())\n\t\tobject := &storage.Object{Name: \"bucketDestroyTestFile\"}\n\n\t\t\/\/ This needs to use Media(io.Reader) call, otherwise it does not go to \/upload API and fails\n\t\tif res, err := config.clientStorage.Objects.Insert(bucketName, object).Media(dataReader).Do(); err == nil {\n\t\t\tfmt.Printf(\"Created object %v at location %v\\n\\n\", res.Name, res.SelfLink)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"Objects.Insert failed: %v\", err)\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckCloudStorageBucketMissing(bucketName string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tconfig := testAccProvider.Meta().(*Config)\n\n\t\t_, err := config.clientStorage.Buckets.Get(bucketName).Do()\n\t\tif err == nil {\n\t\t\treturn fmt.Errorf(\"Found %s\", bucketName)\n\t\t}\n\n\t\tif gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn err\n\t}\n}\n\nfunc testAccGoogleStorageDestroy(s *terraform.State) error {\n\tconfig := testAccProvider.Meta().(*Config)\n\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"google_storage_bucket\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err := config.clientStorage.Buckets.Get(rs.Primary.ID).Do()\n\t\tif err == nil {\n\t\t\treturn fmt.Errorf(\"Bucket still exists\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc testGoogleStorageBucketsReaderDefaults(bucketName string) string {\n\treturn fmt.Sprintf(`\nresource \"google_storage_bucket\" \"bucket\" {\n\tname = \"%s\"\n}\n`, bucketName)\n}\n\nfunc testGoogleStorageBucketsReaderCustomAttributes(bucketName string) string {\n\treturn fmt.Sprintf(`\nresource \"google_storage_bucket\" \"bucket\" {\n\tname = \"%s\"\n\tpredefined_acl = \"publicReadWrite\"\n\tlocation = \"EU\"\n\tforce_destroy = \"true\"\n}\n`, bucketName)\n}\n\nfunc testGoogleStorageBucketsReaderStorageClass(bucketName string, storageClass string) string {\n\treturn fmt.Sprintf(`\nresource \"google_storage_bucket\" \"bucket\" {\n\tname = \"%s\"\n\tstorage_class = \"%s\"\n}\n`, bucketName, storageClass)\n}\n<commit_msg>provider\/google: add location to storage tests.<commit_after>package google\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/acctest\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\n\t\"google.golang.org\/api\/googleapi\"\n\tstorage \"google.golang.org\/api\/storage\/v1\"\n)\n\nfunc TestAccStorage_basic(t *testing.T) {\n\tbucketName := fmt.Sprintf(\"tf-test-acl-bucket-%d\", acctest.RandInt())\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccGoogleStorageDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderDefaults(bucketName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketExists(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", bucketName),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"location\", \"US\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"force_destroy\", \"false\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccStorageCustomAttributes(t *testing.T) {\n\tbucketName := fmt.Sprintf(\"tf-test-acl-bucket-%d\", acctest.RandInt())\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccGoogleStorageDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderCustomAttributes(bucketName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketExists(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", bucketName),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"location\", \"EU\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"force_destroy\", \"true\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccStorageStorageClass(t *testing.T) {\n\tbucketName := fmt.Sprintf(\"tf-test-acc-bucket-%d\", acctest.RandInt())\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccGoogleStorageDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderStorageClass(bucketName, \"MULTI_REGIONAL\", \"\"),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketExists(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", bucketName),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"storage_class\", \"MULTI_REGIONAL\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderStorageClass(bucketName, \"NEARLINE\", \"\"),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketExists(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", bucketName),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"storage_class\", \"NEARLINE\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderStorageClass(bucketName, \"REGIONAL\", \"us-central1\"),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketExists(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", bucketName),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"storage_class\", \"REGIONAL\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"location\", \"us-central1\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccStorageBucketUpdate(t *testing.T) {\n\tbucketName := fmt.Sprintf(\"tf-test-acl-bucket-%d\", acctest.RandInt())\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccGoogleStorageDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderDefaults(bucketName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketExists(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", bucketName),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"location\", \"US\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"force_destroy\", \"false\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderCustomAttributes(bucketName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketExists(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", bucketName),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"predefined_acl\", \"publicReadWrite\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"location\", \"EU\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", \"force_destroy\", \"true\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccStorageForceDestroy(t *testing.T) {\n\tbucketName := fmt.Sprintf(\"tf-test-acl-bucket-%d\", acctest.RandInt())\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccGoogleStorageDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderCustomAttributes(bucketName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketExists(\n\t\t\t\t\t\t\"google_storage_bucket.bucket\", bucketName),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderCustomAttributes(bucketName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketPutItem(bucketName),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testGoogleStorageBucketsReaderCustomAttributes(\"idontexist\"),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckCloudStorageBucketMissing(bucketName),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testAccCheckCloudStorageBucketExists(n string, bucketName string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No Project_ID is set\")\n\t\t}\n\n\t\tconfig := testAccProvider.Meta().(*Config)\n\n\t\tfound, err := config.clientStorage.Buckets.Get(rs.Primary.ID).Do()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif found.Id != rs.Primary.ID {\n\t\t\treturn fmt.Errorf(\"Bucket not found\")\n\t\t}\n\n\t\tif found.Name != bucketName {\n\t\t\treturn fmt.Errorf(\"expected name %s, got %s\", bucketName, found.Name)\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckCloudStorageBucketPutItem(bucketName string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tconfig := testAccProvider.Meta().(*Config)\n\n\t\tdata := bytes.NewBufferString(\"test\")\n\t\tdataReader := bytes.NewReader(data.Bytes())\n\t\tobject := &storage.Object{Name: \"bucketDestroyTestFile\"}\n\n\t\t\/\/ This needs to use Media(io.Reader) call, otherwise it does not go to \/upload API and fails\n\t\tif res, err := config.clientStorage.Objects.Insert(bucketName, object).Media(dataReader).Do(); err == nil {\n\t\t\tfmt.Printf(\"Created object %v at location %v\\n\\n\", res.Name, res.SelfLink)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"Objects.Insert failed: %v\", err)\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckCloudStorageBucketMissing(bucketName string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tconfig := testAccProvider.Meta().(*Config)\n\n\t\t_, err := config.clientStorage.Buckets.Get(bucketName).Do()\n\t\tif err == nil {\n\t\t\treturn fmt.Errorf(\"Found %s\", bucketName)\n\t\t}\n\n\t\tif gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn err\n\t}\n}\n\nfunc testAccGoogleStorageDestroy(s *terraform.State) error {\n\tconfig := testAccProvider.Meta().(*Config)\n\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"google_storage_bucket\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err := config.clientStorage.Buckets.Get(rs.Primary.ID).Do()\n\t\tif err == nil {\n\t\t\treturn fmt.Errorf(\"Bucket still exists\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc testGoogleStorageBucketsReaderDefaults(bucketName string) string {\n\treturn fmt.Sprintf(`\nresource \"google_storage_bucket\" \"bucket\" {\n\tname = \"%s\"\n}\n`, bucketName)\n}\n\nfunc testGoogleStorageBucketsReaderCustomAttributes(bucketName string) string {\n\treturn fmt.Sprintf(`\nresource \"google_storage_bucket\" \"bucket\" {\n\tname = \"%s\"\n\tpredefined_acl = \"publicReadWrite\"\n\tlocation = \"EU\"\n\tforce_destroy = \"true\"\n}\n`, bucketName)\n}\n\nfunc testGoogleStorageBucketsReaderStorageClass(bucketName, storageClass, location string) string {\n\tvar locationBlock string\n\tif location != \"\" {\n\t\tlocationBlock = fmt.Sprintf(`\n\tlocation = \"%s\"`, location)\n\t}\n\treturn fmt.Sprintf(`\nresource \"google_storage_bucket\" \"bucket\" {\n\tname = \"%s\"\n\tstorage_class = \"%s\"%s\n}\n`, bucketName, storageClass, locationBlock)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package exec provides the entry point of the daemon sub-program and helpers\n\/\/ to spawn a daemon process.\npackage daemon\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/elves\/elvish\/util\"\n)\n\n\/\/ Daemon keeps configurations for the daemon process.\ntype Daemon struct {\n\tForked int\n\tBinPath string\n\tDbPath string\n\tSockPath string\n\tLogPathPrefix string\n}\n\n\/\/ Main is the entry point of the daemon sub-program.\nfunc (d *Daemon) Main(serve func(string, string)) int {\n\tswitch d.Forked {\n\tcase 0:\n\t\terrored := false\n\t\tabsify := func(f string, s *string) {\n\t\t\tif *s == \"\" {\n\t\t\t\tlog.Println(\"flag\", f, \"is required for daemon\")\n\t\t\t\terrored = true\n\t\t\t\treturn\n\t\t\t}\n\t\t\tp, err := filepath.Abs(*s)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"abs:\", err)\n\t\t\t\terrored = true\n\t\t\t} else {\n\t\t\t\t*s = p\n\t\t\t}\n\t\t}\n\t\tabsify(\"-bin\", &d.BinPath)\n\t\tabsify(\"-db\", &d.DbPath)\n\t\tabsify(\"-sock\", &d.SockPath)\n\t\tabsify(\"-logprefix\", &d.LogPathPrefix)\n\t\tif errored {\n\t\t\treturn 2\n\t\t}\n\n\t\tsyscall.Umask(0077)\n\t\treturn d.pseudoFork(\n\t\t\t&exec.Cmd{\n\t\t\t\tDir: \"\/\", \/\/ cd to \/\n\t\t\t\tEnv: nil, \/\/ empty environment\n\t\t\t\tSysProcAttr: &syscall.SysProcAttr{Setsid: true},\n\t\t\t})\n\tcase 1:\n\t\treturn d.pseudoFork(nil)\n\tcase 2:\n\t\tserve(d.SockPath, d.DbPath)\n\t\treturn 0\n\tdefault:\n\t\treturn 2\n\t}\n}\n\n\/\/ Spawn spawns a daemon in the background. It is supposed to be called from a\n\/\/ client.\nfunc (d *Daemon) Spawn() error {\n\tbinPath := d.BinPath\n\t\/\/ Determine binPath.\n\tif binPath == \"\" {\n\t\tif len(os.Args) > 0 && path.IsAbs(os.Args[0]) {\n\t\t\tbinPath = os.Args[0]\n\t\t} else if len(os.Args) > 0 && strings.Contains(os.Args[0], \"\/\") {\n\t\t\tbinPath = filepath.Abs(os.Args[0])\n\t\t} else {\n\t\t\t\/\/ Find elvish in PATH\n\t\t\tpaths := strings.Split(os.Getenv(\"PATH\"), \":\")\n\t\t\tresult, err := util.Search(paths, \"elvish\")\n\t\t\tif err != nil {\n\t\t\t\treturn errors.New(\"cannot find elvish: \" + err.Error())\n\t\t\t}\n\t\t\tbinPath = result\n\t\t}\n\t}\n\n\treturn setArgs(\n\t\tnil,\n\t\t0,\n\t\tbinPath,\n\t\td.DbPath,\n\t\td.SockPath,\n\t\td.LogPathPrefix,\n\t).Run()\n}\n\n\/\/ pseudoFork forks a daemon. It is supposed to be called from the daemon.\nfunc (d *Daemon) pseudoFork(cmd *exec.Cmd) int {\n\terr := setArgs(\n\t\tcmd,\n\t\td.Forked+1,\n\t\td.BinPath,\n\t\td.DbPath,\n\t\td.SockPath,\n\t\td.LogPathPrefix,\n\t).Start()\n\n\tif err != nil {\n\t\treturn 2\n\t}\n\treturn 0\n}\n\nfunc setArgs(cmd *exec.Cmd, forkLevel int, binPath, dbPath, sockPath,\n\tlogPathPrefix string) *exec.Cmd {\n\n\tif cmd == nil {\n\t\tcmd = &exec.Cmd{}\n\t}\n\n\tcmd.Path = binPath\n\tcmd.Args = []string{\n\t\tbinPath,\n\t\t\"-daemon\",\n\t\t\"-forked\", strconv.Itoa(forkLevel),\n\t\t\"-bin\", binPath,\n\t\t\"-db\", dbPath,\n\t\t\"-sock\", sockPath,\n\t\t\"-logprefix\", logPathPrefix,\n\t}\n\n\treturn cmd\n}\n<commit_msg>Fixup for last args[0] fix.<commit_after>\/\/ Package exec provides the entry point of the daemon sub-program and helpers\n\/\/ to spawn a daemon process.\npackage daemon\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/elves\/elvish\/util\"\n)\n\n\/\/ Daemon keeps configurations for the daemon process.\ntype Daemon struct {\n\tForked int\n\tBinPath string\n\tDbPath string\n\tSockPath string\n\tLogPathPrefix string\n}\n\n\/\/ Main is the entry point of the daemon sub-program.\nfunc (d *Daemon) Main(serve func(string, string)) int {\n\tswitch d.Forked {\n\tcase 0:\n\t\terrored := false\n\t\tabsify := func(f string, s *string) {\n\t\t\tif *s == \"\" {\n\t\t\t\tlog.Println(\"flag\", f, \"is required for daemon\")\n\t\t\t\terrored = true\n\t\t\t\treturn\n\t\t\t}\n\t\t\tp, err := filepath.Abs(*s)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"abs:\", err)\n\t\t\t\terrored = true\n\t\t\t} else {\n\t\t\t\t*s = p\n\t\t\t}\n\t\t}\n\t\tabsify(\"-bin\", &d.BinPath)\n\t\tabsify(\"-db\", &d.DbPath)\n\t\tabsify(\"-sock\", &d.SockPath)\n\t\tabsify(\"-logprefix\", &d.LogPathPrefix)\n\t\tif errored {\n\t\t\treturn 2\n\t\t}\n\n\t\tsyscall.Umask(0077)\n\t\treturn d.pseudoFork(\n\t\t\t&exec.Cmd{\n\t\t\t\tDir: \"\/\", \/\/ cd to \/\n\t\t\t\tEnv: nil, \/\/ empty environment\n\t\t\t\tSysProcAttr: &syscall.SysProcAttr{Setsid: true},\n\t\t\t})\n\tcase 1:\n\t\treturn d.pseudoFork(nil)\n\tcase 2:\n\t\tserve(d.SockPath, d.DbPath)\n\t\treturn 0\n\tdefault:\n\t\treturn 2\n\t}\n}\n\n\/\/ Spawn spawns a daemon in the background. It is supposed to be called from a\n\/\/ client.\nfunc (d *Daemon) Spawn() error {\n\tbinPath := d.BinPath\n\t\/\/ Determine binPath.\n\tif binPath == \"\" {\n\t\tbin, err := getAbsBinPath()\n\t\tif err != nil {\n\t\t\treturn errors.New(\"cannot find elvish: \" + err.Error())\n\t\t}\n\t\tbinPath = bin\n\t}\n\n\treturn setArgs(\n\t\tnil,\n\t\t0,\n\t\tbinPath,\n\t\td.DbPath,\n\t\td.SockPath,\n\t\td.LogPathPrefix,\n\t).Run()\n}\n\n\/\/ getAbsBinPath determines the absolute path to the Elvish binary, first by\n\/\/ looking at os.Args[0] and then searching for \"elvish\" in PATH.\nfunc getAbsBinPath() (string, error) {\n\tif len(os.Args) > 0 {\n\t\targ0 := os.Args[0]\n\t\tif path.IsAbs(arg0) {\n\t\t\treturn arg0, nil\n\t\t} else if strings.Contains(arg0, \"\/\") {\n\t\t\tabs, err := filepath.Abs(arg0)\n\t\t\tif err == nil {\n\t\t\t\treturn abs, nil\n\t\t\t}\n\t\t\tlog.Printf(\"cannot resolve relative arg0 %q, searching in PATH\", arg0)\n\t\t}\n\t}\n\t\/\/ Find elvish in PATH\n\tpaths := strings.Split(os.Getenv(\"PATH\"), \":\")\n\tbinpath, err := util.Search(paths, \"elvish\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn binpath, nil\n}\n\n\/\/ pseudoFork forks a daemon. It is supposed to be called from the daemon.\nfunc (d *Daemon) pseudoFork(cmd *exec.Cmd) int {\n\terr := setArgs(\n\t\tcmd,\n\t\td.Forked+1,\n\t\td.BinPath,\n\t\td.DbPath,\n\t\td.SockPath,\n\t\td.LogPathPrefix,\n\t).Start()\n\n\tif err != nil {\n\t\treturn 2\n\t}\n\treturn 0\n}\n\nfunc setArgs(cmd *exec.Cmd, forkLevel int, binPath, dbPath, sockPath,\n\tlogPathPrefix string) *exec.Cmd {\n\n\tif cmd == nil {\n\t\tcmd = &exec.Cmd{}\n\t}\n\n\tcmd.Path = binPath\n\tcmd.Args = []string{\n\t\tbinPath,\n\t\t\"-daemon\",\n\t\t\"-forked\", strconv.Itoa(forkLevel),\n\t\t\"-bin\", binPath,\n\t\t\"-db\", dbPath,\n\t\t\"-sock\", sockPath,\n\t\t\"-logprefix\", logPathPrefix,\n\t}\n\n\treturn cmd\n}\n<|endoftext|>"} {"text":"<commit_before>package daemon\n\nimport (\n \"os\"\n \"reflect\"\n \"time\"\n \"fmt\"\n \"github.com\/sevlyar\/go-daemon\"\n \"github.com\/Sirupsen\/logrus\"\n \"github.com\/BluePecker\/JwtAuth\/server\/types\/token\"\n \"github.com\/BluePecker\/JwtAuth\/server\"\n \"github.com\/BluePecker\/JwtAuth\/storage\"\n \"github.com\/BluePecker\/JwtAuth\/server\/router\"\n _ \"github.com\/BluePecker\/JwtAuth\/storage\/redis\"\n \/\/_ \"github.com\/BluePecker\/JwtAuth\/storage\/ram\"\n \"github.com\/dgrijalva\/jwt-go\"\n RouteToken \"github.com\/BluePecker\/JwtAuth\/server\/router\/token\"\n)\n\nconst (\n TOKEN_TTL = 2 * 3600\n \n VERSION = \"1.0.0\"\n \n ALLOW_LOGIN_NUM = 3\n)\n\ntype Storage struct {\n Driver string\n Path string\n Host string\n Port int\n MaxRetries int\n Username string\n Password string\n PoolSize int\n Database string\n}\n\ntype Security struct {\n TLS bool\n Key string\n Cert string\n}\n\ntype Options struct {\n PidFile string\n LogFile string\n LogLevel string\n Port int\n Host string\n Daemon bool\n Version bool\n Security Security\n Storage Storage\n Secret string\n}\n\ntype Daemon struct {\n Options *Options\n Server *server.Server\n Storage storage.Driver\n}\n\ntype (\n CustomClaims struct {\n Device string `json:\"device\"`\n Unique string `json:\"unique\"`\n Timestamp int64 `json:\"timestamp\"`\n Addr string `json:\"addr\"`\n jwt.StandardClaims\n }\n)\n\nfunc (d *Daemon) storageOptionInject(p2 *storage.Option) {\n p1 := &(d.Options.Storage)\n u1 := reflect.ValueOf(p1).Elem()\n u2 := reflect.ValueOf(p2).Elem()\n \n for seq := 0; seq < u2.NumField(); seq++ {\n item := u2.Type().Field(seq)\n v1 := u1.FieldByName(item.Name)\n v2 := u2.FieldByName(item.Name)\n if v1.IsValid() {\n if v2.Type() == v1.Type() {\n v2.Set(v1)\n }\n }\n }\n}\n\nfunc (d *Daemon) NewStorage() (*storage.Driver, error) {\n option := &storage.Option{}\n d.storageOptionInject(option)\n driver, err := storage.New(d.Options.Storage.Driver, *option)\n return &driver, err\n}\n\nfunc (d *Daemon) NewServer() {\n d.Server = &server.Server{}\n}\n\nfunc (d *Daemon) Listen() {\n if d.Server == nil {\n d.NewServer()\n }\n \n options := server.Options{\n Host: d.Options.Host,\n Port: d.Options.Port,\n }\n \n if d.Options.Security.TLS {\n options.Tls = &server.TLS{\n Cert: d.Options.Security.Cert,\n Key: d.Options.Security.Key,\n }\n }\n \n d.Server.Accept(options)\n}\n\nfunc (d *Daemon) addRouter(routers... router.Router) {\n if d.Server == nil {\n d.NewServer()\n }\n for _, route := range routers {\n d.Server.AddRouter(route)\n }\n}\n\nfunc (d *Daemon) Generate(req token.GenerateRequest) (string, error) {\n Claims := CustomClaims{\n req.Device,\n req.Unique,\n time.Now().Unix(),\n req.Addr,\n jwt.StandardClaims{\n ExpiresAt: time.Now().Add(time.Second * TOKEN_TTL).Unix(),\n Issuer: \"shuc324@gmail.com\",\n },\n }\n Token := jwt.NewWithClaims(jwt.SigningMethodHS256, Claims)\n if Signed, err := Token.SignedString([]byte(d.Options.Secret)); err != nil {\n return \"\", err\n } else {\n err := d.Storage.LKeep(req.Unique, Signed, ALLOW_LOGIN_NUM, TOKEN_TTL)\n if err != nil {\n return \"\", err\n }\n return Signed, err\n }\n}\n\nfunc (d *Daemon) Auth(req token.AuthRequest) (interface{}, error) {\n Token, err := jwt.ParseWithClaims(\n req.JsonWebToken,\n &CustomClaims{},\n func(token *jwt.Token) (interface{}, error) {\n if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n return nil, fmt.Errorf(\"Unexpected signing method %v\", token.Header[\"alg\"])\n }\n return []byte(d.Options.Secret), nil\n })\n if err == nil && Token.Valid {\n if Claims, ok := Token.Claims.(*CustomClaims); ok {\n if d.Storage.LExist(Claims.Unique, req.JsonWebToken) {\n return Claims, nil\n }\n }\n }\n return nil, err\n}\n\nfunc NewStart(args Options) {\n var err error;\n \n if args.Version == true {\n fmt.Printf(\"JwtAuth version %s.\\n\", VERSION)\n os.Exit(0)\n }\n \n if args.Daemon == true {\n dCtx := daemon.Context{\n PidFileName: args.PidFile,\n PidFilePerm: 0644,\n LogFilePerm: 0640,\n Umask: 027,\n WorkDir: \"\/\",\n LogFileName: args.LogFile,\n }\n \n level, err := logrus.ParseLevel(args.LogLevel)\n if err == nil {\n logrus.SetLevel(level)\n logrus.SetFormatter(&logrus.TextFormatter{\n TimestampFormat: \"2006-01-02 15:04:05\",\n })\n } else {\n logrus.Fatal(err)\n }\n defer dCtx.Release()\n \n if child, err := dCtx.Reborn(); err != nil {\n logrus.Fatal(err)\n } else if child != nil {\n return\n }\n }\n \n Daemon := &Daemon{\n Options: &args,\n }\n \n Storage, err := Daemon.NewStorage()\n if err != nil {\n logrus.Error(err)\n os.Exit(0)\n }\n Daemon.Storage = *Storage\n \n if Daemon.Options.Secret == \"\" {\n logrus.Error(\"please specify the key.\")\n os.Exit(0)\n }\n \n Daemon.addRouter(RouteToken.NewRouter(Daemon))\n Daemon.Listen()\n}<commit_msg>fix bug<commit_after>package daemon\n\nimport (\n \"os\"\n \"reflect\"\n \"time\"\n \"fmt\"\n \"github.com\/sevlyar\/go-daemon\"\n \"github.com\/Sirupsen\/logrus\"\n \"github.com\/BluePecker\/JwtAuth\/server\/types\/token\"\n \"github.com\/BluePecker\/JwtAuth\/server\"\n \"github.com\/BluePecker\/JwtAuth\/storage\"\n \"github.com\/BluePecker\/JwtAuth\/server\/router\"\n _ \"github.com\/BluePecker\/JwtAuth\/storage\/redis\"\n \/\/_ \"github.com\/BluePecker\/JwtAuth\/storage\/ram\"\n \"github.com\/dgrijalva\/jwt-go\"\n RouteToken \"github.com\/BluePecker\/JwtAuth\/server\/router\/token\"\n)\n\nconst (\n TOKEN_TTL = 2 * 3600\n \n VERSION = \"1.0.0\"\n \n ALLOW_LOGIN_NUM = 3\n)\n\ntype Storage struct {\n Driver string\n Path string\n Host string\n Port int\n MaxRetries int\n Username string\n Password string\n PoolSize int\n Database string\n}\n\ntype Security struct {\n TLS bool\n Key string\n Cert string\n}\n\ntype Options struct {\n PidFile string\n LogFile string\n LogLevel string\n Port int\n Host string\n Daemon bool\n Version bool\n Security Security\n Storage Storage\n Secret string\n}\n\ntype Daemon struct {\n Options *Options\n Server *server.Server\n Storage storage.Driver\n}\n\ntype (\n CustomClaims struct {\n Device string `json:\"device\"`\n Unique string `json:\"unique\"`\n Timestamp int64 `json:\"timestamp\"`\n Addr string `json:\"addr\"`\n jwt.StandardClaims\n }\n)\n\nfunc (d *Daemon) storageOptionInject(p2 *storage.Option) {\n p1 := &(d.Options.Storage)\n u1 := reflect.ValueOf(p1).Elem()\n u2 := reflect.ValueOf(p2).Elem()\n \n for seq := 0; seq < u2.NumField(); seq++ {\n item := u2.Type().Field(seq)\n v1 := u1.FieldByName(item.Name)\n v2 := u2.FieldByName(item.Name)\n if v1.IsValid() {\n if v2.Type() == v1.Type() {\n v2.Set(v1)\n }\n }\n }\n}\n\nfunc (d *Daemon) NewStorage() (*storage.Driver, error) {\n option := &storage.Option{}\n d.storageOptionInject(option)\n driver, err := storage.New(d.Options.Storage.Driver, *option)\n return &driver, err\n}\n\nfunc (d *Daemon) NewServer() {\n d.Server = &server.Server{}\n}\n\nfunc (d *Daemon) Listen() {\n if d.Server == nil {\n d.NewServer()\n }\n \n options := server.Options{\n Host: d.Options.Host,\n Port: d.Options.Port,\n }\n \n if d.Options.Security.TLS {\n options.Tls = &server.TLS{\n Cert: d.Options.Security.Cert,\n Key: d.Options.Security.Key,\n }\n }\n \n d.Server.Accept(options)\n}\n\nfunc (d *Daemon) addRouter(routers... router.Router) {\n if d.Server == nil {\n d.NewServer()\n }\n for _, route := range routers {\n d.Server.AddRouter(route)\n }\n}\n\nfunc (d *Daemon) Generate(req token.GenerateRequest) (string, error) {\n Claims := CustomClaims{\n req.Device,\n req.Unique,\n time.Now().Unix(),\n req.Addr,\n jwt.StandardClaims{\n ExpiresAt: time.Now().Add(time.Second * TOKEN_TTL).Unix(),\n Issuer: \"shuc324@gmail.com\",\n },\n }\n Token := jwt.NewWithClaims(jwt.SigningMethodHS256, Claims)\n if Signed, err := Token.SignedString([]byte(d.Options.Secret)); err != nil {\n return \"\", err\n } else {\n err := d.Storage.LKeep(req.Unique, Signed, ALLOW_LOGIN_NUM, TOKEN_TTL)\n if err != nil {\n return \"\", err\n }\n return Signed, err\n }\n}\n\nfunc (d *Daemon) Auth(req token.AuthRequest) (interface{}, error) {\n Token, err := jwt.ParseWithClaims(\n req.JsonWebToken,\n &CustomClaims{},\n func(token *jwt.Token) (interface{}, error) {\n if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n return nil, fmt.Errorf(\"Unexpected signing method %v\", token.Header[\"alg\"])\n }\n return []byte(d.Options.Secret), nil\n })\n if err == nil && Token.Valid {\n if Claims, ok := Token.Claims.(*CustomClaims); ok {\n if d.Storage.LExist(Claims.Unique, req.JsonWebToken) {\n return Claims, nil\n }\n }\n }\n return nil, err\n}\n\nfunc NewStart(args Options) {\n var err error;\n \n if args.Version == true {\n fmt.Printf(\"JwtAuth version %s.\\n\", VERSION)\n os.Exit(0)\n }\n \n if args.Daemon == true {\n dCtx := daemon.Context{\n PidFileName: args.PidFile,\n PidFilePerm: 0644,\n LogFilePerm: 0640,\n Umask: 027,\n WorkDir: \"\/\",\n LogFileName: args.LogFile,\n }\n \n level, err := logrus.ParseLevel(args.LogLevel)\n if err == nil {\n logrus.SetLevel(level)\n logrus.SetFormatter(&logrus.TextFormatter{\n TimestampFormat: \"2006-01-02 15:04:05\",\n })\n } else {\n logrus.Error(err)\n os.Exit(0)\n }\n defer dCtx.Release()\n \n if child, err := dCtx.Reborn(); err != nil {\n logrus.Error(err)\n os.Exit(0)\n } else if child != nil {\n return\n }\n }\n \n Daemon := &Daemon{\n Options: &args,\n }\n \n Storage, err := Daemon.NewStorage()\n if err != nil {\n logrus.Error(err)\n os.Exit(0)\n }\n Daemon.Storage = *Storage\n \n if Daemon.Options.Secret == \"\" {\n logrus.Error(\"please specify the key.\")\n os.Exit(0)\n }\n \n Daemon.addRouter(RouteToken.NewRouter(Daemon))\n Daemon.Listen()\n}<|endoftext|>"} {"text":"<commit_before>package cliconfig\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/buildkite\/agent\/logger\"\n\t\"github.com\/buildkite\/agent\/utils\"\n\t\"github.com\/urfave\/cli\"\n\t\"github.com\/oleiade\/reflections\"\n)\n\ntype Loader struct {\n\t\/\/ The context that is passed when using a codegangsta\/cli action\n\tCLI *cli.Context\n\n\t\/\/ The struct that the config values will be loaded into\n\tConfig interface{}\n\n\t\/\/ A slice of paths to files that should be used as config files\n\tDefaultConfigFilePaths []string\n\n\t\/\/ The file that was used when loading this configuration\n\tFile *File\n}\n\nvar argCliNameRegexp = regexp.MustCompile(`arg:(\\d+)`)\n\n\/\/ A shortcut for loading a config from the CLI\nfunc Load(c *cli.Context, cfg interface{}) error {\n\tl := Loader{CLI: c, Config: cfg}\n\n\treturn l.Load()\n}\n\n\/\/ Loads the config from the CLI and config files that are present.\nfunc (l *Loader) Load() error {\n\t\/\/ Try and find a config file, either passed in the command line using\n\t\/\/ --config, or in one of the default configuration file paths.\n\tif l.CLI.String(\"config\") != \"\" {\n\t\tfile := File{Path: l.CLI.String(\"config\")}\n\n\t\t\/\/ Because this file was passed in manually, we should throw an error\n\t\t\/\/ if it doesn't exist.\n\t\tif file.Exists() {\n\t\t\tl.File = &file\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"A configuration file could not be found at: %s\", file.AbsolutePath())\n\t\t}\n\t} else if len(l.DefaultConfigFilePaths) > 0 {\n\t\tfor _, path := range l.DefaultConfigFilePaths {\n\t\t\tfile := File{Path: path}\n\n\t\t\t\/\/ If the config file exists, save it to the loader and\n\t\t\t\/\/ don't bother checking the others.\n\t\t\tif file.Exists() {\n\t\t\t\tl.File = &file\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ If a file was found, then we should load it\n\tif l.File != nil {\n\t\t\/\/ Attempt to load the config file we've found\n\t\tif err := l.File.Load(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Now it's onto actually setting the fields. We start by getting all\n\t\/\/ the fields from the configuration interface\n\tvar fields []string\n\tfields, _ = reflections.Fields(l.Config)\n\n\t\/\/ Loop through each of the fields, and look for tags and handle them\n\t\/\/ appropriately\n\tfor _, fieldName := range fields {\n\t\t\/\/ Start by loading the value from the CLI context if the tag\n\t\t\/\/ exists\n\t\tcliName, _ := reflections.GetFieldTag(l.Config, fieldName, \"cli\")\n\t\tif cliName != \"\" {\n\t\t\t\/\/ Load the value from the CLI Context\n\t\t\terr := l.setFieldValueFromCLI(fieldName, cliName)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Are there any normalizations we need to make?\n\t\tnormalization, _ := reflections.GetFieldTag(l.Config, fieldName, \"normalize\")\n\t\tif normalization != \"\" {\n\t\t\t\/\/ Apply the normalization\n\t\t\terr := l.normalizeField(fieldName, normalization)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Check for field rename deprecations\n\t\trenamedToFieldName, _ := reflections.GetFieldTag(l.Config, fieldName, \"deprecated-and-renamed-to\")\n\t\tif renamedToFieldName != \"\" {\n\t\t\t\/\/ If the deprecated field's value isn't empty, then we\n\t\t\t\/\/ log a message, and set the proper config for them.\n\t\t\tif !l.fieldValueIsEmpty(fieldName) {\n\t\t\t\trenamedFieldCliName, _ := reflections.GetFieldTag(l.Config, renamedToFieldName, \"cli\")\n\t\t\t\tif renamedFieldCliName != \"\" {\n\t\t\t\t\tlogger.Warn(\"The config option `%s` has been renamed to `%s`. Please update your configuration.\", cliName, renamedFieldCliName)\n\t\t\t\t}\n\n\t\t\t\t\/\/ Error if they specify the deprecated version and the new version\n\t\t\t\trenamedFieldValue, _ := reflections.GetField(l.Config, renamedToFieldName)\n\t\t\t\tif renamedFieldValue != \"\" {\n\t\t\t\t\treturn fmt.Errorf(\"Can't set config option `%s` because `%s=%v` has already been set\", cliName, renamedFieldCliName, renamedFieldValue)\n\t\t\t\t}\n\n\t\t\t\t\/\/ Fetch the value of the deprecated config, and set the renamed\n\t\t\t\t\/\/ config based on its value\n\t\t\t\tvalue, _ := reflections.GetField(l.Config, fieldName)\n\n\t\t\t\tif value != nil {\n\t\t\t\t\terr := reflections.SetField(l.Config, renamedToFieldName, value)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"Could not set value `%s` to field `%s` (%s)\", value, renamedToFieldName, err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Check for field deprecation\n\t\tdeprecationError, _ := reflections.GetFieldTag(l.Config, fieldName, \"deprecated\")\n\t\tif deprecationError != \"\" {\n\t\t\t\/\/ If the deprecated field's value isn't empty, then we\n\t\t\t\/\/ return the deprecation error message.\n\t\t\tif !l.fieldValueIsEmpty(fieldName) {\n\t\t\t\treturn fmt.Errorf(deprecationError)\n\t\t\t}\n\t\t}\n\t\t\n\t\t\/\/ Perform validations\n\t\tvalidationRules, _ := reflections.GetFieldTag(l.Config, fieldName, \"validate\")\n\t\tif validationRules != \"\" {\n\t\t\t\/\/ Determine the label for the field\n\t\t\tlabel, _ := reflections.GetFieldTag(l.Config, fieldName, \"label\")\n\t\t\tif label == \"\" {\n\t\t\t\t\/\/ Use the cli name if it exists, but if it\n\t\t\t\t\/\/ doesn't, just default to the structs field\n\t\t\t\t\/\/ name. Not great, but works!\n\t\t\t\tif cliName != \"\" {\n\t\t\t\t\tlabel = cliName\n\t\t\t\t} else {\n\t\t\t\t\tlabel = fieldName\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Validate the fieid, and if it fails, return it's\n\t\t\t\/\/ error.\n\t\t\terr := l.validateField(fieldName, label, validationRules)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (l Loader) setFieldValueFromCLI(fieldName string, cliName string) error {\n\t\/\/ Get the kind of field we need to set\n\tfieldKind, err := reflections.GetFieldKind(l.Config, fieldName)\n\tif err != nil {\n\t\treturn fmt.Errorf(`Failed to get the type of struct field %s`, fieldName)\n\t}\n\n\tvar value interface{}\n\n\t\/\/ See the if the cli option is using the arg format i.e. (arg:1)\n\targMatch := argCliNameRegexp.FindStringSubmatch(cliName)\n\tif len(argMatch) > 0 {\n\t\targNum := argMatch[1]\n\n\t\t\/\/ Convert the arg position to an integer\n\t\targIndex, err := strconv.Atoi(argNum)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert string to int: %s\", err)\n\t\t}\n\n\t\t\/\/ Only set the value if the args are long enough for\n\t\t\/\/ the position to exist.\n\t\tif len(l.CLI.Args()) > argIndex {\n\t\t\tvalue = l.CLI.Args()[argIndex]\n\t\t}\n\n\t\t\/\/ Otherwise see if we can pull it from an environment variable\n\t\t\/\/ (and fail gracefuly if we can't)\n\t\tif value == nil {\n\t\t\tenvName, err := reflections.GetFieldTag(l.Config, fieldName, \"env\")\n\t\t\tif err == nil {\n\t\t\t\tif envValue, envSet := os.LookupEnv(envName); envSet {\n\t\t\t\t\tvalue = envValue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\t\/\/ If the cli name didn't have the special format, then we need to\n\t\t\/\/ either load from the context's flags, or from a config file.\n\n\t\t\/\/ We start by defaulting the value to what ever was provided\n\t\t\/\/ by the configuration file\n\t\tif l.File != nil {\n\t\t\tif configFileValue, ok := l.File.Config[cliName]; ok {\n\t\t\t\t\/\/ Convert the config file value to it's correct type\n\t\t\t\tif fieldKind == reflect.String {\n\t\t\t\t\tvalue = configFileValue\n\t\t\t\t} else if fieldKind == reflect.Slice {\n\t\t\t\t\tvalue = strings.Split(configFileValue, \",\")\n\t\t\t\t} else if fieldKind == reflect.Bool {\n\t\t\t\t\tvalue, _ = strconv.ParseBool(configFileValue)\n\t\t\t\t} else if fieldKind == reflect.Int {\n\t\t\t\t\tvalue, _ = strconv.Atoi(configFileValue)\n\t\t\t\t} else {\n\t\t\t\t\treturn fmt.Errorf(\"Unable to convert string to type %s\", fieldKind)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ If a value hasn't been found in a config file, but there\n\t\t\/\/ _is_ one provided by the CLI context, then use that.\n\t\tif value == nil || l.cliValueIsSet(cliName) {\n\t\t\tif fieldKind == reflect.String {\n\t\t\t\tvalue = l.CLI.String(cliName)\n\t\t\t} else if fieldKind == reflect.Slice {\n\t\t\t\tvalue = l.CLI.StringSlice(cliName)\n\t\t\t} else if fieldKind == reflect.Bool {\n\t\t\t\tvalue = l.CLI.Bool(cliName)\n\t\t\t} else if fieldKind == reflect.Int {\n\t\t\t\tvalue = l.CLI.Int(cliName)\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"Unable to handle type: %s\", fieldKind)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Set the value to the cfg\n\tif value != nil {\n\t\terr = reflections.SetField(l.Config, fieldName, value)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Could not set value `%s` to field `%s` (%s)\", value, fieldName, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (l Loader) Errorf(format string, v ...interface{}) error {\n\tsuffix := fmt.Sprintf(\" See: `%s %s --help`\", l.CLI.App.Name, l.CLI.Command.Name)\n\n\treturn fmt.Errorf(format+suffix, v...)\n}\n\nfunc (l Loader) cliValueIsSet(cliName string) bool {\n\tif l.CLI.IsSet(cliName) {\n\t\treturn true\n\t} else {\n\t\t\/\/ cli.Context#IsSet only checks to see if the command was set via the cli, not\n\t\t\/\/ via the environment. So here we do some hacks to find out the name of the\n\t\t\/\/ EnvVar, and return true if it was set.\n\t\tfor _, flag := range l.CLI.Command.Flags {\n\t\t\tname, _ := reflections.GetField(flag, \"Name\")\n\t\t\tenvVar, _ := reflections.GetField(flag, \"EnvVar\")\n\t\t\tif name == cliName && envVar != \"\" {\n\t\t\t\t\/\/ Make sure envVar is a string\n\t\t\t\tif envVarStr, ok := envVar.(string); ok {\n\t\t\t\t\tenvVarStr = strings.TrimSpace(string(envVarStr))\n\n\t\t\t\t\treturn os.Getenv(envVarStr) != \"\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (l Loader) fieldValueIsEmpty(fieldName string) bool {\n\t\/\/ We need to use the field kind to determine the type of empty test.\n\tvalue, _ := reflections.GetField(l.Config, fieldName)\n\tfieldKind, _ := reflections.GetFieldKind(l.Config, fieldName)\n\n\tif fieldKind == reflect.String {\n\t\treturn value == \"\"\n\t} else if fieldKind == reflect.Slice {\n\t\tv := reflect.ValueOf(value)\n\t\treturn v.Len() == 0\n\t} else if fieldKind == reflect.Bool {\n\t\treturn value == false\n\t} else {\n\t\tpanic(fmt.Sprintf(\"Can't determine empty-ness for field type %s\", fieldKind))\n\t}\n\n\treturn false\n}\n\nfunc (l Loader) validateField(fieldName string, label string, validationRules string) error {\n\t\/\/ Split up the validation rules\n\trules := strings.Split(validationRules, \",\")\n\n\t\/\/ Loop through each rule, and perform it\n\tfor _, rule := range rules {\n\t\tif rule == \"required\" {\n\t\t\tif l.fieldValueIsEmpty(fieldName) {\n\t\t\t\treturn l.Errorf(\"Missing %s.\", label)\n\t\t\t}\n\t\t} else if rule == \"file-exists\" {\n\t\t\tvalue, _ := reflections.GetField(l.Config, fieldName)\n\n\t\t\t\/\/ Make sure the value is converted to a string\n\t\t\tif valueAsString, ok := value.(string); ok {\n\t\t\t\t\/\/ Return an error if the path doesn't exist\n\t\t\t\tif _, err := os.Stat(valueAsString); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Could not find %s located at %s\", label, value)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"Unknown config validation rule `%s`\", rule)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (l Loader) normalizeField(fieldName string, normalization string) error {\n\tif normalization == \"filepath\" {\n\t\tvalue, _ := reflections.GetField(l.Config, fieldName)\n\t\tfieldKind, _ := reflections.GetFieldKind(l.Config, fieldName)\n\n\t\t\/\/ Make sure we're normalizing a string filed\n\t\tif fieldKind != reflect.String {\n\t\t\treturn fmt.Errorf(\"filepath normalization only works on string fields\")\n\t\t}\n\n\t\t\/\/ Normalize the field to be a filepath\n\t\tif valueAsString, ok := value.(string); ok {\n\t\t\tnormalizedPath := utils.NormalizeFilePath(valueAsString)\n\t\t\tif err := reflections.SetField(l.Config, fieldName, normalizedPath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"Unknown normalization `%s`\", normalization)\n\t}\n\n\treturn nil\n}\n<commit_msg>Use proper emptiness check, and show the clashing values too<commit_after>package cliconfig\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/buildkite\/agent\/logger\"\n\t\"github.com\/buildkite\/agent\/utils\"\n\t\"github.com\/urfave\/cli\"\n\t\"github.com\/oleiade\/reflections\"\n)\n\ntype Loader struct {\n\t\/\/ The context that is passed when using a codegangsta\/cli action\n\tCLI *cli.Context\n\n\t\/\/ The struct that the config values will be loaded into\n\tConfig interface{}\n\n\t\/\/ A slice of paths to files that should be used as config files\n\tDefaultConfigFilePaths []string\n\n\t\/\/ The file that was used when loading this configuration\n\tFile *File\n}\n\nvar argCliNameRegexp = regexp.MustCompile(`arg:(\\d+)`)\n\n\/\/ A shortcut for loading a config from the CLI\nfunc Load(c *cli.Context, cfg interface{}) error {\n\tl := Loader{CLI: c, Config: cfg}\n\n\treturn l.Load()\n}\n\n\/\/ Loads the config from the CLI and config files that are present.\nfunc (l *Loader) Load() error {\n\t\/\/ Try and find a config file, either passed in the command line using\n\t\/\/ --config, or in one of the default configuration file paths.\n\tif l.CLI.String(\"config\") != \"\" {\n\t\tfile := File{Path: l.CLI.String(\"config\")}\n\n\t\t\/\/ Because this file was passed in manually, we should throw an error\n\t\t\/\/ if it doesn't exist.\n\t\tif file.Exists() {\n\t\t\tl.File = &file\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"A configuration file could not be found at: %s\", file.AbsolutePath())\n\t\t}\n\t} else if len(l.DefaultConfigFilePaths) > 0 {\n\t\tfor _, path := range l.DefaultConfigFilePaths {\n\t\t\tfile := File{Path: path}\n\n\t\t\t\/\/ If the config file exists, save it to the loader and\n\t\t\t\/\/ don't bother checking the others.\n\t\t\tif file.Exists() {\n\t\t\t\tl.File = &file\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ If a file was found, then we should load it\n\tif l.File != nil {\n\t\t\/\/ Attempt to load the config file we've found\n\t\tif err := l.File.Load(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Now it's onto actually setting the fields. We start by getting all\n\t\/\/ the fields from the configuration interface\n\tvar fields []string\n\tfields, _ = reflections.Fields(l.Config)\n\n\t\/\/ Loop through each of the fields, and look for tags and handle them\n\t\/\/ appropriately\n\tfor _, fieldName := range fields {\n\t\t\/\/ Start by loading the value from the CLI context if the tag\n\t\t\/\/ exists\n\t\tcliName, _ := reflections.GetFieldTag(l.Config, fieldName, \"cli\")\n\t\tif cliName != \"\" {\n\t\t\t\/\/ Load the value from the CLI Context\n\t\t\terr := l.setFieldValueFromCLI(fieldName, cliName)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Are there any normalizations we need to make?\n\t\tnormalization, _ := reflections.GetFieldTag(l.Config, fieldName, \"normalize\")\n\t\tif normalization != \"\" {\n\t\t\t\/\/ Apply the normalization\n\t\t\terr := l.normalizeField(fieldName, normalization)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Check for field rename deprecations\n\t\trenamedToFieldName, _ := reflections.GetFieldTag(l.Config, fieldName, \"deprecated-and-renamed-to\")\n\t\tif renamedToFieldName != \"\" {\n\t\t\t\/\/ If the deprecated field's value isn't empty, then we\n\t\t\t\/\/ log a message, and set the proper config for them.\n\t\t\tif !l.fieldValueIsEmpty(fieldName) {\n\t\t\t\trenamedFieldCliName, _ := reflections.GetFieldTag(l.Config, renamedToFieldName, \"cli\")\n\t\t\t\tif renamedFieldCliName != \"\" {\n\t\t\t\t\tlogger.Warn(\"The config option `%s` has been renamed to `%s`. Please update your configuration.\", cliName, renamedFieldCliName)\n\t\t\t\t}\n\n\t\t\t\tvalue, _ := reflections.GetField(l.Config, fieldName)\n\n\t\t\t\t\/\/ Error if they specify the deprecated version and the new version\n\t\t\t\tif !l.fieldValueIsEmpty(renamedToFieldName) {\n\t\t\t\t\trenamedFieldValue, _ := reflections.GetField(l.Config, renamedToFieldName)\n\t\t\t\t\treturn fmt.Errorf(\"Can't set config option `%s=%v` because `%s=%v` has already been set\", cliName, value, renamedFieldCliName, renamedFieldValue)\n\t\t\t\t}\n\n\t\t\t\t\/\/ Set the proper config based on the deprecated value\n\t\t\t\tif value != nil {\n\t\t\t\t\terr := reflections.SetField(l.Config, renamedToFieldName, value)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"Could not set value `%s` to field `%s` (%s)\", value, renamedToFieldName, err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Check for field deprecation\n\t\tdeprecationError, _ := reflections.GetFieldTag(l.Config, fieldName, \"deprecated\")\n\t\tif deprecationError != \"\" {\n\t\t\t\/\/ If the deprecated field's value isn't empty, then we\n\t\t\t\/\/ return the deprecation error message.\n\t\t\tif !l.fieldValueIsEmpty(fieldName) {\n\t\t\t\treturn fmt.Errorf(deprecationError)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Perform validations\n\t\tvalidationRules, _ := reflections.GetFieldTag(l.Config, fieldName, \"validate\")\n\t\tif validationRules != \"\" {\n\t\t\t\/\/ Determine the label for the field\n\t\t\tlabel, _ := reflections.GetFieldTag(l.Config, fieldName, \"label\")\n\t\t\tif label == \"\" {\n\t\t\t\t\/\/ Use the cli name if it exists, but if it\n\t\t\t\t\/\/ doesn't, just default to the structs field\n\t\t\t\t\/\/ name. Not great, but works!\n\t\t\t\tif cliName != \"\" {\n\t\t\t\t\tlabel = cliName\n\t\t\t\t} else {\n\t\t\t\t\tlabel = fieldName\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Validate the fieid, and if it fails, return it's\n\t\t\t\/\/ error.\n\t\t\terr := l.validateField(fieldName, label, validationRules)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (l Loader) setFieldValueFromCLI(fieldName string, cliName string) error {\n\t\/\/ Get the kind of field we need to set\n\tfieldKind, err := reflections.GetFieldKind(l.Config, fieldName)\n\tif err != nil {\n\t\treturn fmt.Errorf(`Failed to get the type of struct field %s`, fieldName)\n\t}\n\n\tvar value interface{}\n\n\t\/\/ See the if the cli option is using the arg format i.e. (arg:1)\n\targMatch := argCliNameRegexp.FindStringSubmatch(cliName)\n\tif len(argMatch) > 0 {\n\t\targNum := argMatch[1]\n\n\t\t\/\/ Convert the arg position to an integer\n\t\targIndex, err := strconv.Atoi(argNum)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert string to int: %s\", err)\n\t\t}\n\n\t\t\/\/ Only set the value if the args are long enough for\n\t\t\/\/ the position to exist.\n\t\tif len(l.CLI.Args()) > argIndex {\n\t\t\tvalue = l.CLI.Args()[argIndex]\n\t\t}\n\n\t\t\/\/ Otherwise see if we can pull it from an environment variable\n\t\t\/\/ (and fail gracefuly if we can't)\n\t\tif value == nil {\n\t\t\tenvName, err := reflections.GetFieldTag(l.Config, fieldName, \"env\")\n\t\t\tif err == nil {\n\t\t\t\tif envValue, envSet := os.LookupEnv(envName); envSet {\n\t\t\t\t\tvalue = envValue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\t\/\/ If the cli name didn't have the special format, then we need to\n\t\t\/\/ either load from the context's flags, or from a config file.\n\n\t\t\/\/ We start by defaulting the value to what ever was provided\n\t\t\/\/ by the configuration file\n\t\tif l.File != nil {\n\t\t\tif configFileValue, ok := l.File.Config[cliName]; ok {\n\t\t\t\t\/\/ Convert the config file value to it's correct type\n\t\t\t\tif fieldKind == reflect.String {\n\t\t\t\t\tvalue = configFileValue\n\t\t\t\t} else if fieldKind == reflect.Slice {\n\t\t\t\t\tvalue = strings.Split(configFileValue, \",\")\n\t\t\t\t} else if fieldKind == reflect.Bool {\n\t\t\t\t\tvalue, _ = strconv.ParseBool(configFileValue)\n\t\t\t\t} else if fieldKind == reflect.Int {\n\t\t\t\t\tvalue, _ = strconv.Atoi(configFileValue)\n\t\t\t\t} else {\n\t\t\t\t\treturn fmt.Errorf(\"Unable to convert string to type %s\", fieldKind)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ If a value hasn't been found in a config file, but there\n\t\t\/\/ _is_ one provided by the CLI context, then use that.\n\t\tif value == nil || l.cliValueIsSet(cliName) {\n\t\t\tif fieldKind == reflect.String {\n\t\t\t\tvalue = l.CLI.String(cliName)\n\t\t\t} else if fieldKind == reflect.Slice {\n\t\t\t\tvalue = l.CLI.StringSlice(cliName)\n\t\t\t} else if fieldKind == reflect.Bool {\n\t\t\t\tvalue = l.CLI.Bool(cliName)\n\t\t\t} else if fieldKind == reflect.Int {\n\t\t\t\tvalue = l.CLI.Int(cliName)\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"Unable to handle type: %s\", fieldKind)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Set the value to the cfg\n\tif value != nil {\n\t\terr = reflections.SetField(l.Config, fieldName, value)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Could not set value `%s` to field `%s` (%s)\", value, fieldName, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (l Loader) Errorf(format string, v ...interface{}) error {\n\tsuffix := fmt.Sprintf(\" See: `%s %s --help`\", l.CLI.App.Name, l.CLI.Command.Name)\n\n\treturn fmt.Errorf(format+suffix, v...)\n}\n\nfunc (l Loader) cliValueIsSet(cliName string) bool {\n\tif l.CLI.IsSet(cliName) {\n\t\treturn true\n\t} else {\n\t\t\/\/ cli.Context#IsSet only checks to see if the command was set via the cli, not\n\t\t\/\/ via the environment. So here we do some hacks to find out the name of the\n\t\t\/\/ EnvVar, and return true if it was set.\n\t\tfor _, flag := range l.CLI.Command.Flags {\n\t\t\tname, _ := reflections.GetField(flag, \"Name\")\n\t\t\tenvVar, _ := reflections.GetField(flag, \"EnvVar\")\n\t\t\tif name == cliName && envVar != \"\" {\n\t\t\t\t\/\/ Make sure envVar is a string\n\t\t\t\tif envVarStr, ok := envVar.(string); ok {\n\t\t\t\t\tenvVarStr = strings.TrimSpace(string(envVarStr))\n\n\t\t\t\t\treturn os.Getenv(envVarStr) != \"\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (l Loader) fieldValueIsEmpty(fieldName string) bool {\n\t\/\/ We need to use the field kind to determine the type of empty test.\n\tvalue, _ := reflections.GetField(l.Config, fieldName)\n\tfieldKind, _ := reflections.GetFieldKind(l.Config, fieldName)\n\n\tif fieldKind == reflect.String {\n\t\treturn value == \"\"\n\t} else if fieldKind == reflect.Slice {\n\t\tv := reflect.ValueOf(value)\n\t\treturn v.Len() == 0\n\t} else if fieldKind == reflect.Bool {\n\t\treturn value == false\n\t} else {\n\t\tpanic(fmt.Sprintf(\"Can't determine empty-ness for field type %s\", fieldKind))\n\t}\n\n\treturn false\n}\n\nfunc (l Loader) validateField(fieldName string, label string, validationRules string) error {\n\t\/\/ Split up the validation rules\n\trules := strings.Split(validationRules, \",\")\n\n\t\/\/ Loop through each rule, and perform it\n\tfor _, rule := range rules {\n\t\tif rule == \"required\" {\n\t\t\tif l.fieldValueIsEmpty(fieldName) {\n\t\t\t\treturn l.Errorf(\"Missing %s.\", label)\n\t\t\t}\n\t\t} else if rule == \"file-exists\" {\n\t\t\tvalue, _ := reflections.GetField(l.Config, fieldName)\n\n\t\t\t\/\/ Make sure the value is converted to a string\n\t\t\tif valueAsString, ok := value.(string); ok {\n\t\t\t\t\/\/ Return an error if the path doesn't exist\n\t\t\t\tif _, err := os.Stat(valueAsString); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Could not find %s located at %s\", label, value)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"Unknown config validation rule `%s`\", rule)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (l Loader) normalizeField(fieldName string, normalization string) error {\n\tif normalization == \"filepath\" {\n\t\tvalue, _ := reflections.GetField(l.Config, fieldName)\n\t\tfieldKind, _ := reflections.GetFieldKind(l.Config, fieldName)\n\n\t\t\/\/ Make sure we're normalizing a string filed\n\t\tif fieldKind != reflect.String {\n\t\t\treturn fmt.Errorf(\"filepath normalization only works on string fields\")\n\t\t}\n\n\t\t\/\/ Normalize the field to be a filepath\n\t\tif valueAsString, ok := value.(string); ok {\n\t\t\tnormalizedPath := utils.NormalizeFilePath(valueAsString)\n\t\t\tif err := reflections.SetField(l.Config, fieldName, normalizedPath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"Unknown normalization `%s`\", normalization)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package client\n\n\/*\n Copyleft 2016 Alexander I.Grafov <grafov@gmail.com>\n\n This program is free software: you can redistribute it and\/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\n ॐ तारे तुत्तारे तुरे स्व\n*\/\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/grafov\/autograf\/grafana\"\n)\n\n\/\/ BoardProperties keeps metadata of a dashboard.\ntype BoardProperties struct {\n\tIsStarred bool `json:\"isStarred,omitempty\"`\n\tIsHome bool `json:\"isHome,omitempty\"`\n\tIsSnapshot bool `json:\"isSnapshot,omitempty\"`\n\tType string `json:\"type,omitempty\"`\n\tCanSave bool `json:\"canSave\"`\n\tCanEdit bool `json:\"canEdit\"`\n\tCanStar bool `json:\"canStar\"`\n\tSlug string `json:\"slug\"`\n\tExpires time.Time `json:\"expires\"`\n\tCreated time.Time `json:\"created\"`\n\tUpdated time.Time `json:\"updated\"`\n\tUpdatedBy string `json:\"updatedBy\"`\n\tCreatedBy string `json:\"createdBy\"`\n\tVersion int `json:\"version\"`\n}\n\n\/\/ GetDashboard loads a dashboard from Grafana instance along with metadata for a dashboard.\nfunc (r *Instance) GetDashboard(slug string) (grafana.Board, BoardProperties, error) {\n\tvar (\n\t\traw []byte\n\t\tresult struct {\n\t\t\tMeta BoardProperties `json:\"meta\"`\n\t\t\tBoard grafana.Board `json:\"dashboard\"`\n\t\t}\n\t\tcode int\n\t\terr error\n\t)\n\tslug, _ = setPrefix(slug)\n\tif raw, code, err = r.get(fmt.Sprintf(\"api\/dashboards\/%s\", slug), nil); err != nil {\n\t\treturn grafana.Board{}, BoardProperties{}, err\n\t}\n\tif code != 200 {\n\t\treturn grafana.Board{}, BoardProperties{}, fmt.Errorf(\"HTTP error %d: returns %s\", code, raw)\n\t}\n\tdec := json.NewDecoder(bytes.NewReader(raw))\n\tdec.UseNumber()\n\tif err := dec.Decode(&result); err != nil {\n\t\treturn grafana.Board{}, BoardProperties{}, fmt.Errorf(\"unmarshal board with meta: %s\\n%s\", err, raw)\n\t}\n\treturn result.Board, result.Meta, err\n}\n\n\/\/ GetRawDashboard loads a dashboard JSON from Grafana instance along with metadata for a dashboard.\n\/\/ Contrary to GetDashboard() it not unpack loaded JSON to grafana.Board structure. Instead it\n\/\/ returns it as byte slice. It guarantee that data of dashboard returned untouched by conversion\n\/\/ with grafana.Board so no matter how properly fields from a current version of Grafana mapped to\n\/\/ our grafana.Board fields. It useful for backuping purposes when you want a dashboard exactly with\n\/\/ same data as it exported by Grafana.\nfunc (r *Instance) GetRawDashboard(slug string) ([]byte, BoardProperties, error) {\n\tvar (\n\t\traw []byte\n\t\tresult struct {\n\t\t\tMeta BoardProperties `json:\"meta\"`\n\t\t\tBoard json.RawMessage `json:\"dashboard\"`\n\t\t}\n\t\tcode int\n\t\terr error\n\t)\n\tslug, _ = setPrefix(slug)\n\tif raw, code, err = r.get(fmt.Sprintf(\"api\/dashboards\/%s\", slug), nil); err != nil {\n\t\treturn nil, BoardProperties{}, err\n\t}\n\tif code != 200 {\n\t\treturn nil, BoardProperties{}, fmt.Errorf(\"HTTP error %d: returns %s\", code, raw)\n\t}\n\tdec := json.NewDecoder(bytes.NewReader(raw))\n\tdec.UseNumber()\n\tif err := dec.Decode(&result); err != nil {\n\t\treturn nil, BoardProperties{}, fmt.Errorf(\"unmarshal board with meta: %s\\n%s\", err, raw)\n\t}\n\treturn []byte(result.Board), result.Meta, err\n}\n\n\/\/ FoundBoard keeps result of search with metadata of a dashboard.\ntype FoundBoard struct {\n\tID uint `json:\"id\"`\n\tTitle string `json:\"title\"`\n\tURI string `json:\"uri\"`\n\tType string `json:\"type\"`\n\tTags []string `json:\"tags\"`\n\tIsStarred bool `json:\"isStarred\"`\n}\n\n\/\/ SearchDashboards search dashboards by substring of their title. It allows restrict the result set with\n\/\/ only starred dashboards and only for tags (logical OR applied to multiple tags).\nfunc (r *Instance) SearchDashboards(query string, starred bool, tags ...string) ([]FoundBoard, error) {\n\tvar (\n\t\traw []byte\n\t\tboards []FoundBoard\n\t\tcode int\n\t\terr error\n\t)\n\tu := url.URL{}\n\tq := u.Query()\n\tif query != \"\" {\n\t\tq.Set(\"query\", query)\n\t}\n\tif starred {\n\t\tq.Set(\"starred\", \"true\")\n\t}\n\tfor _, tag := range tags {\n\t\tq.Add(\"tag\", tag)\n\t}\n\tif raw, code, err = r.get(\"api\/search\", q); err != nil {\n\t\treturn nil, err\n\t}\n\tif code != 200 {\n\t\treturn nil, fmt.Errorf(\"HTTP error %d: returns %s\", code, raw)\n\t}\n\terr = json.Unmarshal(raw, &boards)\n\treturn boards, err\n}\n\n\/\/ SetDashboard updates existing dashboard or creates a new one.\n\/\/ Set dasboard ID to nil to create a new dashboard.\n\/\/ Set overwrite to true if you want to overwrite existing dashboard with\n\/\/ newer version or with same dashboard title.\nfunc (r *Instance) SetDashboard(board grafana.Board, overwrite bool) error {\n\tvar (\n\t\tisBoardFromDB bool\n\t\tnewBoard struct {\n\t\t\tDashboard grafana.Board `json:\"dashboard\"`\n\t\t\tOverwrite bool `json:\"overwrite\"`\n\t\t}\n\t\traw []byte\n\t\tresp StatusMessage\n\t\tcode int\n\t\terr error\n\t)\n\tif board.Slug, isBoardFromDB = cleanPrefix(board.Slug); !isBoardFromDB {\n\t\treturn errors.New(\"only database dashboard (with 'db\/' prefix in a slug) can be set\")\n\t}\n\tnewBoard.Dashboard = board\n\tnewBoard.Overwrite = overwrite\n\tif !overwrite {\n\t\tnewBoard.Dashboard.ID = 0\n\t}\n\tif raw, err = json.Marshal(newBoard); err != nil {\n\t\treturn err\n\t}\n\tif raw, code, err = r.post(\"api\/dashboards\/db\", nil, raw); err != nil {\n\t\treturn err\n\t}\n\tif err = json.Unmarshal(raw, &resp); err != nil {\n\t\treturn err\n\t}\n\tswitch code {\n\tcase 401:\n\t\treturn fmt.Errorf(\"%d %s\", code, *resp.Message)\n\tcase 412:\n\t\treturn fmt.Errorf(\"%d %s\", code, *resp.Message)\n\t}\n\treturn nil\n}\n\n\/\/ DeleteDashboard deletes dashboard that selected by slug string.\nfunc (r *Instance) DeleteDashboard(slug string) (StatusMessage, error) {\n\tvar (\n\t\tisBoardFromDB bool\n\t\traw []byte\n\t\treply StatusMessage\n\t\terr error\n\t)\n\tif slug, isBoardFromDB = setPrefix(slug); !isBoardFromDB {\n\t\treturn StatusMessage{}, errors.New(\"only database dashboards (with 'db\/' prefix in a slug) can be removed\")\n\t}\n\tif raw, err = r.delete(fmt.Sprintf(\"api\/dashboards\/db\/%s\", slug)); err != nil {\n\t\treturn StatusMessage{}, err\n\t}\n\terr = json.Unmarshal(raw, &reply)\n\treturn reply, err\n}\n\n\/\/ implicitely use dashboards from Grafana DB not from a file system\nfunc setPrefix(slug string) (string, bool) {\n\tif strings.HasPrefix(slug, \"db\") {\n\t\treturn slug, true\n\t}\n\tif strings.HasPrefix(slug, \"file\") {\n\t\treturn slug, false\n\t}\n\treturn fmt.Sprintf(\"db\/%s\", slug), true\n}\n\n\/\/ assume we use database dashboard by default\nfunc cleanPrefix(slug string) (string, bool) {\n\tif strings.HasPrefix(slug, \"db\") {\n\t\treturn slug[3:], true\n\t}\n\tif strings.HasPrefix(slug, \"file\") {\n\t\treturn slug[3:], false\n\t}\n\treturn fmt.Sprintf(\"%s\", slug), true\n}\n<commit_msg>Add docs about file and db dashboards.<commit_after>package client\n\n\/*\n Copyleft 2016 Alexander I.Grafov <grafov@gmail.com>\n\n This program is free software: you can redistribute it and\/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\n ॐ तारे तुत्तारे तुरे स्व\n*\/\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/grafov\/autograf\/grafana\"\n)\n\n\/\/ BoardProperties keeps metadata of a dashboard.\ntype BoardProperties struct {\n\tIsStarred bool `json:\"isStarred,omitempty\"`\n\tIsHome bool `json:\"isHome,omitempty\"`\n\tIsSnapshot bool `json:\"isSnapshot,omitempty\"`\n\tType string `json:\"type,omitempty\"`\n\tCanSave bool `json:\"canSave\"`\n\tCanEdit bool `json:\"canEdit\"`\n\tCanStar bool `json:\"canStar\"`\n\tSlug string `json:\"slug\"`\n\tExpires time.Time `json:\"expires\"`\n\tCreated time.Time `json:\"created\"`\n\tUpdated time.Time `json:\"updated\"`\n\tUpdatedBy string `json:\"updatedBy\"`\n\tCreatedBy string `json:\"createdBy\"`\n\tVersion int `json:\"version\"`\n}\n\n\/\/ GetDashboard loads a dashboard from Grafana instance along with metadata for a dashboard.\n\/\/ For dashboards from a filesystem set \"file\/\" prefix for slug. By default dashboards from\n\/\/ a database assumed. Database dashboards may have \"db\/\" prefix or may have not, it will\n\/\/ be appended automatically.\nfunc (r *Instance) GetDashboard(slug string) (grafana.Board, BoardProperties, error) {\n\tvar (\n\t\traw []byte\n\t\tresult struct {\n\t\t\tMeta BoardProperties `json:\"meta\"`\n\t\t\tBoard grafana.Board `json:\"dashboard\"`\n\t\t}\n\t\tcode int\n\t\terr error\n\t)\n\tslug, _ = setPrefix(slug)\n\tif raw, code, err = r.get(fmt.Sprintf(\"api\/dashboards\/%s\", slug), nil); err != nil {\n\t\treturn grafana.Board{}, BoardProperties{}, err\n\t}\n\tif code != 200 {\n\t\treturn grafana.Board{}, BoardProperties{}, fmt.Errorf(\"HTTP error %d: returns %s\", code, raw)\n\t}\n\tdec := json.NewDecoder(bytes.NewReader(raw))\n\tdec.UseNumber()\n\tif err := dec.Decode(&result); err != nil {\n\t\treturn grafana.Board{}, BoardProperties{}, fmt.Errorf(\"unmarshal board with meta: %s\\n%s\", err, raw)\n\t}\n\treturn result.Board, result.Meta, err\n}\n\n\/\/ GetRawDashboard loads a dashboard JSON from Grafana instance along with metadata for a dashboard.\n\/\/ Contrary to GetDashboard() it not unpack loaded JSON to grafana.Board structure. Instead it\n\/\/ returns it as byte slice. It guarantee that data of dashboard returned untouched by conversion\n\/\/ with grafana.Board so no matter how properly fields from a current version of Grafana mapped to\n\/\/ our grafana.Board fields. It useful for backuping purposes when you want a dashboard exactly with\n\/\/ same data as it exported by Grafana.\n\/\/\n\/\/ For dashboards from a filesystem set \"file\/\" prefix for slug. By default dashboards from\n\/\/ a database assumed. Database dashboards may have \"db\/\" prefix or may have not, it will\n\/\/ be appended automatically.\nfunc (r *Instance) GetRawDashboard(slug string) ([]byte, BoardProperties, error) {\n\tvar (\n\t\traw []byte\n\t\tresult struct {\n\t\t\tMeta BoardProperties `json:\"meta\"`\n\t\t\tBoard json.RawMessage `json:\"dashboard\"`\n\t\t}\n\t\tcode int\n\t\terr error\n\t)\n\tslug, _ = setPrefix(slug)\n\tif raw, code, err = r.get(fmt.Sprintf(\"api\/dashboards\/%s\", slug), nil); err != nil {\n\t\treturn nil, BoardProperties{}, err\n\t}\n\tif code != 200 {\n\t\treturn nil, BoardProperties{}, fmt.Errorf(\"HTTP error %d: returns %s\", code, raw)\n\t}\n\tdec := json.NewDecoder(bytes.NewReader(raw))\n\tdec.UseNumber()\n\tif err := dec.Decode(&result); err != nil {\n\t\treturn nil, BoardProperties{}, fmt.Errorf(\"unmarshal board with meta: %s\\n%s\", err, raw)\n\t}\n\treturn []byte(result.Board), result.Meta, err\n}\n\n\/\/ FoundBoard keeps result of search with metadata of a dashboard.\ntype FoundBoard struct {\n\tID uint `json:\"id\"`\n\tTitle string `json:\"title\"`\n\tURI string `json:\"uri\"`\n\tType string `json:\"type\"`\n\tTags []string `json:\"tags\"`\n\tIsStarred bool `json:\"isStarred\"`\n}\n\n\/\/ SearchDashboards search dashboards by substring of their title. It allows restrict the result set with\n\/\/ only starred dashboards and only for tags (logical OR applied to multiple tags).\nfunc (r *Instance) SearchDashboards(query string, starred bool, tags ...string) ([]FoundBoard, error) {\n\tvar (\n\t\traw []byte\n\t\tboards []FoundBoard\n\t\tcode int\n\t\terr error\n\t)\n\tu := url.URL{}\n\tq := u.Query()\n\tif query != \"\" {\n\t\tq.Set(\"query\", query)\n\t}\n\tif starred {\n\t\tq.Set(\"starred\", \"true\")\n\t}\n\tfor _, tag := range tags {\n\t\tq.Add(\"tag\", tag)\n\t}\n\tif raw, code, err = r.get(\"api\/search\", q); err != nil {\n\t\treturn nil, err\n\t}\n\tif code != 200 {\n\t\treturn nil, fmt.Errorf(\"HTTP error %d: returns %s\", code, raw)\n\t}\n\terr = json.Unmarshal(raw, &boards)\n\treturn boards, err\n}\n\n\/\/ SetDashboard updates existing dashboard or creates a new one.\n\/\/ Set dasboard ID to nil to create a new dashboard.\n\/\/ Set overwrite to true if you want to overwrite existing dashboard with\n\/\/ newer version or with same dashboard title.\n\/\/ Grafana only can create or update a dashboard in a database. File dashboards\n\/\/ may be only loaded with HTTP API but not created or updated.\nfunc (r *Instance) SetDashboard(board grafana.Board, overwrite bool) error {\n\tvar (\n\t\tisBoardFromDB bool\n\t\tnewBoard struct {\n\t\t\tDashboard grafana.Board `json:\"dashboard\"`\n\t\t\tOverwrite bool `json:\"overwrite\"`\n\t\t}\n\t\traw []byte\n\t\tresp StatusMessage\n\t\tcode int\n\t\terr error\n\t)\n\tif board.Slug, isBoardFromDB = cleanPrefix(board.Slug); !isBoardFromDB {\n\t\treturn errors.New(\"only database dashboard (with 'db\/' prefix in a slug) can be set\")\n\t}\n\tnewBoard.Dashboard = board\n\tnewBoard.Overwrite = overwrite\n\tif !overwrite {\n\t\tnewBoard.Dashboard.ID = 0\n\t}\n\tif raw, err = json.Marshal(newBoard); err != nil {\n\t\treturn err\n\t}\n\tif raw, code, err = r.post(\"api\/dashboards\/db\", nil, raw); err != nil {\n\t\treturn err\n\t}\n\tif err = json.Unmarshal(raw, &resp); err != nil {\n\t\treturn err\n\t}\n\tswitch code {\n\tcase 401:\n\t\treturn fmt.Errorf(\"%d %s\", code, *resp.Message)\n\tcase 412:\n\t\treturn fmt.Errorf(\"%d %s\", code, *resp.Message)\n\t}\n\treturn nil\n}\n\n\/\/ DeleteDashboard deletes dashboard that selected by slug string.\n\/\/ Grafana only can delete a dashboard in a database. File dashboards\n\/\/ may be only loaded with HTTP API but not deteled.\nfunc (r *Instance) DeleteDashboard(slug string) (StatusMessage, error) {\n\tvar (\n\t\tisBoardFromDB bool\n\t\traw []byte\n\t\treply StatusMessage\n\t\terr error\n\t)\n\tif slug, isBoardFromDB = setPrefix(slug); !isBoardFromDB {\n\t\treturn StatusMessage{}, errors.New(\"only database dashboards (with 'db\/' prefix in a slug) can be removed\")\n\t}\n\tif raw, err = r.delete(fmt.Sprintf(\"api\/dashboards\/db\/%s\", slug)); err != nil {\n\t\treturn StatusMessage{}, err\n\t}\n\terr = json.Unmarshal(raw, &reply)\n\treturn reply, err\n}\n\n\/\/ implicitely use dashboards from Grafana DB not from a file system\nfunc setPrefix(slug string) (string, bool) {\n\tif strings.HasPrefix(slug, \"db\") {\n\t\treturn slug, true\n\t}\n\tif strings.HasPrefix(slug, \"file\") {\n\t\treturn slug, false\n\t}\n\treturn fmt.Sprintf(\"db\/%s\", slug), true\n}\n\n\/\/ assume we use database dashboard by default\nfunc cleanPrefix(slug string) (string, bool) {\n\tif strings.HasPrefix(slug, \"db\") {\n\t\treturn slug[3:], true\n\t}\n\tif strings.HasPrefix(slug, \"file\") {\n\t\treturn slug[3:], false\n\t}\n\treturn fmt.Sprintf(\"%s\", slug), true\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\tdocker \"github.com\/bywan\/go-dockercommand\"\n)\n\nconst (\n\tdockerEndpoint = \"unix:\/\/\/docker.sock\"\n)\n\ntype LanguageParser struct {\n\tImage string\n}\n\nfunc (p *LanguageParser) Parse() error {\n\n\tlog.Printf(\"Lauching language parser %s\\n\", p.Image)\n\n\tclient, err := docker.NewDocker(dockerEndpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbazookaHome := os.Getenv(\"BZK_HOME\")\n\tcontainer, err := client.Run(&docker.RunOptions{\n\t\tImage: p.Image,\n\t\tVolumeBinds: []string{\n\t\t\tfmt.Sprintf(\"%s\/source\/:\/bazooka\", bazookaHome),\n\t\t\tfmt.Sprintf(\"%s\/work\/:\/bazooka-output\", bazookaHome),\n\t\t\tfmt.Sprintf(\"%s\/meta\/:\/meta\", bazookaHome),\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdetails, err := container.Inspect()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif details.State.ExitCode != 0 {\n\t\treturn fmt.Errorf(\"Error during execution of Language Parser container %s\/parser\\n Check Docker container logs, id is %s\\n\", p.Image, container.ID())\n\t}\n\n\treturn container.Remove(&docker.RemoveOptions{\n\t\tForce: true,\n\t\tRemoveVolumes: true,\n\t})\n}\n<commit_msg>Retrieve parserlang logs<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\tdocker \"github.com\/bywan\/go-dockercommand\"\n)\n\nconst (\n\tdockerEndpoint = \"unix:\/\/\/docker.sock\"\n)\n\ntype LanguageParser struct {\n\tImage string\n}\n\nfunc (p *LanguageParser) Parse() error {\n\n\tlog.Printf(\"Lauching language parser %s\\n\", p.Image)\n\n\tclient, err := docker.NewDocker(dockerEndpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbazookaHome := os.Getenv(\"BZK_HOME\")\n\tcontainer, err := client.Run(&docker.RunOptions{\n\t\tImage: p.Image,\n\t\tVolumeBinds: []string{\n\t\t\tfmt.Sprintf(\"%s\/source\/:\/bazooka\", bazookaHome),\n\t\t\tfmt.Sprintf(\"%s\/work\/:\/bazooka-output\", bazookaHome),\n\t\t\tfmt.Sprintf(\"%s\/meta\/:\/meta\", bazookaHome),\n\t\t},\n\t\tDetach: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontainer.Logs(p.Image)\n\n\texitCode, err := container.Wait()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif exitCode != 0 {\n\t\treturn fmt.Errorf(\"Error during execution of Language Parser container %s\/parser\\n Check Docker container logs, id is %s\\n\", p.Image, container.ID())\n\t}\n\n\treturn container.Remove(&docker.RemoveOptions{\n\t\tForce: true,\n\t\tRemoveVolumes: true,\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package db\n\nimport (\n\t\"database\/sql\"\n\t\"math\/rand\"\n\t\"time\"\n\n\tsq \"github.com\/Masterminds\/squirrel\"\n\t\"github.com\/lib\/pq\"\n)\n\nvar componentsQuery = psql.Select(\"c.id, c.name, c.interval, c.last_ran, c.paused\").\n\tFrom(\"components c\")\n\n\/\/counterfeiter:generate . Component\ntype Component interface {\n\tID() int\n\tName() string\n\tInterval() time.Duration\n\tLastRan() time.Time\n\tPaused() bool\n\n\tReload() (bool, error)\n\tIntervalElapsed() bool\n\tUpdateLastRan() error\n}\n\ntype component struct {\n\tid int\n\tname string\n\tinterval time.Duration\n\tlastRan time.Time\n\tpaused bool\n\n\tconn Conn\n}\n\nfunc (c *component) ID() int { return c.id }\nfunc (c *component) Name() string { return c.name }\nfunc (c *component) Interval() time.Duration { return c.interval }\nfunc (c *component) LastRan() time.Time { return c.lastRan }\nfunc (c *component) Paused() bool { return c.paused }\n\nfunc (c *component) Reload() (bool, error) {\n\trow := componentsQuery.Where(sq.Eq{\"c.id\": c.id}).\n\t\tRunWith(c.conn).\n\t\tQueryRow()\n\n\terr := scanComponent(c, row)\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\n\/\/ IntervalElapsed adds a random tiny drift [-1, 1] seconds to interval, when\n\/\/ there are multiple ATCs, each ATC uses a slightly different interval, so\n\/\/ that shorter interval gets more chance to run. This mechanism helps distribute\n\/\/ component to across ATCs more evenly.\nfunc (c *component) IntervalElapsed() bool {\n\tinterval := c.interval\n\tdrift := time.Duration(rand.Int())%(2*time.Second) - time.Second\n\tif interval+drift > 0 {\n\t\tinterval += drift\n\t}\n\treturn time.Now().After(c.lastRan.Add(interval))\n}\n\nfunc (c *component) UpdateLastRan() error {\n\t_, err := psql.Update(\"components\").\n\t\tSet(\"last_ran\", sq.Expr(\"now()\")).\n\t\tWhere(sq.Eq{\n\t\t\t\"id\": c.id,\n\t\t}).\n\t\tRunWith(c.conn).\n\t\tExec()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc scanComponent(c *component, row scannable) error {\n\tvar (\n\t\tlastRan pq.NullTime\n\t\tinterval string\n\t)\n\n\terr := row.Scan(\n\t\t&c.id,\n\t\t&c.name,\n\t\t&interval,\n\t\t&lastRan,\n\t\t&c.paused,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.lastRan = lastRan.Time\n\n\tc.interval, err = time.ParseDuration(interval)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>address review comment<commit_after>package db\n\nimport (\n\t\"database\/sql\"\n\t\"math\/rand\"\n\t\"time\"\n\n\tsq \"github.com\/Masterminds\/squirrel\"\n\t\"github.com\/lib\/pq\"\n)\n\nvar componentsQuery = psql.Select(\"c.id, c.name, c.interval, c.last_ran, c.paused\").\n\tFrom(\"components c\")\n\n\/\/counterfeiter:generate . Component\ntype Component interface {\n\tID() int\n\tName() string\n\tInterval() time.Duration\n\tLastRan() time.Time\n\tPaused() bool\n\n\tReload() (bool, error)\n\tIntervalElapsed() bool\n\tUpdateLastRan() error\n}\n\ntype component struct {\n\tid int\n\tname string\n\tinterval time.Duration\n\tlastRan time.Time\n\tpaused bool\n\trander *rand.Rand\n\n\tconn Conn\n}\n\nfunc (c *component) ID() int { return c.id }\nfunc (c *component) Name() string { return c.name }\nfunc (c *component) Interval() time.Duration { return c.interval }\nfunc (c *component) LastRan() time.Time { return c.lastRan }\nfunc (c *component) Paused() bool { return c.paused }\n\nfunc (c *component) Reload() (bool, error) {\n\trow := componentsQuery.Where(sq.Eq{\"c.id\": c.id}).\n\t\tRunWith(c.conn).\n\t\tQueryRow()\n\n\terr := scanComponent(c, row)\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\n\tif c.rander == nil {\n\t\tc.rander = rand.New(rand.NewSource(time.Now().Unix()))\n\t}\n\n\treturn true, nil\n}\n\n\/\/ IntervalElapsed adds a random tiny drift [-1, 1] seconds to interval, when\n\/\/ there are multiple ATCs, each ATC uses a slightly different interval, so\n\/\/ that shorter interval gets more chance to run. This mechanism helps distribute\n\/\/ component to across ATCs more evenly.\nfunc (c *component) IntervalElapsed() bool {\n\tinterval := c.interval\n\tdrift := time.Duration(c.rander.Int())%(2*time.Second) - time.Second\n\tif interval+drift > 0 {\n\t\tinterval += drift\n\t}\n\treturn time.Now().After(c.lastRan.Add(interval))\n}\n\nfunc (c *component) UpdateLastRan() error {\n\t_, err := psql.Update(\"components\").\n\t\tSet(\"last_ran\", sq.Expr(\"now()\")).\n\t\tWhere(sq.Eq{\n\t\t\t\"id\": c.id,\n\t\t}).\n\t\tRunWith(c.conn).\n\t\tExec()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc scanComponent(c *component, row scannable) error {\n\tvar (\n\t\tlastRan pq.NullTime\n\t\tinterval string\n\t)\n\n\terr := row.Scan(\n\t\t&c.id,\n\t\t&c.name,\n\t\t&interval,\n\t\t&lastRan,\n\t\t&c.paused,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.lastRan = lastRan.Time\n\n\tc.interval, err = time.ParseDuration(interval)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Formatting.<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Package resolver implements utilities for resolving paths within ipfs.\npackage resolver\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\tdag \"github.com\/ipfs\/go-ipfs\/merkledag\"\n\tpath \"github.com\/ipfs\/go-ipfs\/path\"\n\n\tcid \"gx\/ipfs\/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP\/go-cid\"\n\tipld \"gx\/ipfs\/QmZtNq8dArGfnpCZfx2pUNY7UcjGhVp5qqwQ4hH6mpTMRQ\/go-ipld-format\"\n\tlogging \"gx\/ipfs\/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb\/go-log\"\n)\n\nvar log = logging.Logger(\"pathresolv\")\n\n\/\/ ErrNoComponents is used when Paths after a protocol\n\/\/ do not contain at least one component\nvar ErrNoComponents = errors.New(\n\t\"path must contain at least one component\")\n\n\/\/ ErrNoLink is returned when a link is not found in a path\ntype ErrNoLink struct {\n\tName string\n\tNode *cid.Cid\n}\n\n\/\/ Error implements the Error interface for ErrNoLink with a useful\n\/\/ human readable message.\nfunc (e ErrNoLink) Error() string {\n\treturn fmt.Sprintf(\"no link named %q under %s\", e.Name, e.Node.String())\n}\n\n\/\/ ResolveOnce resolves path through a single node\ntype ResolveOnce func(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, names []string) (*ipld.Link, []string, error)\n\n\/\/ Resolver provides path resolution to IPFS\n\/\/ It has a pointer to a DAGService, which is uses to resolve nodes.\n\/\/ TODO: now that this is more modular, try to unify this code with the\n\/\/ the resolvers in namesys\ntype Resolver struct {\n\tDAG ipld.NodeGetter\n\n\tResolveOnce ResolveOnce\n}\n\n\/\/ NewBasicResolver constructs a new basic resolver.\nfunc NewBasicResolver(ds ipld.DAGService) *Resolver {\n\treturn &Resolver{\n\t\tDAG: ds,\n\t\tResolveOnce: ResolveSingle,\n\t}\n}\n\n\/\/ ResolveToLastNode walks the given path and returns the ipld.Node\n\/\/ referenced by the last element in it.\nfunc (r *Resolver) ResolveToLastNode(ctx context.Context, fpath path.Path) (ipld.Node, []string, error) {\n\tc, p, err := path.SplitAbsPath(fpath)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tnd, err := r.DAG.Get(ctx, c)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tfor len(p) > 0 {\n\t\tlnk, rest, err := r.ResolveOnce(ctx, r.DAG, nd, p)\n\t\tif lnk == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tnext, err := lnk.GetNode(ctx, r.DAG)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tnd = next\n\t\tp = rest\n\t}\n\n\tif len(p) == 0 {\n\t\treturn nd, nil, nil\n\t}\n\n\t\/\/ Confirm the path exists within the object\n\tval, rest, err := nd.Resolve(p)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif len(rest) > 0 {\n\t\treturn nil, nil, errors.New(\"path failed to resolve fully\")\n\t}\n\tswitch val.(type) {\n\tcase *ipld.Link:\n\t\treturn nil, nil, errors.New(\"inconsistent ResolveOnce \/ nd.Resolve\")\n\tdefault:\n\t\treturn nd, p, nil\n\t}\n}\n\n\/\/ ResolvePath fetches the node for given path. It returns the last item\n\/\/ returned by ResolvePathComponents.\nfunc (r *Resolver) ResolvePath(ctx context.Context, fpath path.Path) (ipld.Node, error) {\n\t\/\/ validate path\n\tif err := fpath.IsValid(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tnodes, err := r.ResolvePathComponents(ctx, fpath)\n\tif err != nil || nodes == nil {\n\t\treturn nil, err\n\t}\n\treturn nodes[len(nodes)-1], err\n}\n\n\/\/ ResolveSingle simply resolves one hop of a path through a graph with no\n\/\/ extra context (does not opaquely resolve through sharded nodes)\nfunc ResolveSingle(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, names []string) (*ipld.Link, []string, error) {\n\treturn nd.ResolveLink(names)\n}\n\n\/\/ ResolvePathComponents fetches the nodes for each segment of the given path.\n\/\/ It uses the first path component as a hash (key) of the first node, then\n\/\/ resolves all other components walking the links, with ResolveLinks.\nfunc (r *Resolver) ResolvePathComponents(ctx context.Context, fpath path.Path) ([]ipld.Node, error) {\n\tevt := log.EventBegin(ctx, \"resolvePathComponents\", logging.LoggableMap{\"fpath\": fpath})\n\tdefer evt.Done()\n\n\th, parts, err := path.SplitAbsPath(fpath)\n\tif err != nil {\n\t\tevt.Append(logging.LoggableMap{\"error\": err.Error()})\n\t\treturn nil, err\n\t}\n\n\tlog.Debug(\"resolve dag get\")\n\tnd, err := r.DAG.Get(ctx, h)\n\tif err != nil {\n\t\tevt.Append(logging.LoggableMap{\"error\": err.Error()})\n\t\treturn nil, err\n\t}\n\n\treturn r.ResolveLinks(ctx, nd, parts)\n}\n\n\/\/ ResolveLinks iteratively resolves names by walking the link hierarchy.\n\/\/ Every node is fetched from the DAGService, resolving the next name.\n\/\/ Returns the list of nodes forming the path, starting with ndd. This list is\n\/\/ guaranteed never to be empty.\n\/\/\n\/\/ ResolveLinks(nd, []string{\"foo\", \"bar\", \"baz\"})\n\/\/ would retrieve \"baz\" in (\"bar\" in (\"foo\" in nd.Links).Links).Links\nfunc (r *Resolver) ResolveLinks(ctx context.Context, ndd ipld.Node, names []string) ([]ipld.Node, error) {\n\n\tevt := log.EventBegin(ctx, \"resolveLinks\", logging.LoggableMap{\"names\": names})\n\tdefer evt.Done()\n\tresult := make([]ipld.Node, 0, len(names)+1)\n\tresult = append(result, ndd)\n\tnd := ndd \/\/ dup arg workaround\n\n\t\/\/ for each of the path components\n\tfor len(names) > 0 {\n\t\tvar cancel context.CancelFunc\n\t\tctx, cancel = context.WithTimeout(ctx, time.Minute)\n\t\tdefer cancel()\n\n\t\tlnk, rest, err := r.ResolveOnce(ctx, r.DAG, nd, names)\n\t\tif err == dag.ErrLinkNotFound {\n\t\t\tevt.Append(logging.LoggableMap{\"error\": err.Error()})\n\t\t\treturn result, ErrNoLink{Name: names[0], Node: nd.Cid()}\n\t\t} else if err != nil {\n\t\t\tevt.Append(logging.LoggableMap{\"error\": err.Error()})\n\t\t\treturn result, err\n\t\t}\n\n\t\tnextnode, err := lnk.GetNode(ctx, r.DAG)\n\t\tif err != nil {\n\t\t\tevt.Append(logging.LoggableMap{\"error\": err.Error()})\n\t\t\treturn result, err\n\t\t}\n\n\t\tnd = nextnode\n\t\tresult = append(result, nextnode)\n\t\tnames = rest\n\t}\n\treturn result, nil\n}\n<commit_msg>path: add a comment on dropping error in ResolveToLastNode<commit_after>\/\/ Package resolver implements utilities for resolving paths within ipfs.\npackage resolver\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\tdag \"github.com\/ipfs\/go-ipfs\/merkledag\"\n\tpath \"github.com\/ipfs\/go-ipfs\/path\"\n\n\tcid \"gx\/ipfs\/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP\/go-cid\"\n\tipld \"gx\/ipfs\/QmZtNq8dArGfnpCZfx2pUNY7UcjGhVp5qqwQ4hH6mpTMRQ\/go-ipld-format\"\n\tlogging \"gx\/ipfs\/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb\/go-log\"\n)\n\nvar log = logging.Logger(\"pathresolv\")\n\n\/\/ ErrNoComponents is used when Paths after a protocol\n\/\/ do not contain at least one component\nvar ErrNoComponents = errors.New(\n\t\"path must contain at least one component\")\n\n\/\/ ErrNoLink is returned when a link is not found in a path\ntype ErrNoLink struct {\n\tName string\n\tNode *cid.Cid\n}\n\n\/\/ Error implements the Error interface for ErrNoLink with a useful\n\/\/ human readable message.\nfunc (e ErrNoLink) Error() string {\n\treturn fmt.Sprintf(\"no link named %q under %s\", e.Name, e.Node.String())\n}\n\n\/\/ ResolveOnce resolves path through a single node\ntype ResolveOnce func(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, names []string) (*ipld.Link, []string, error)\n\n\/\/ Resolver provides path resolution to IPFS\n\/\/ It has a pointer to a DAGService, which is uses to resolve nodes.\n\/\/ TODO: now that this is more modular, try to unify this code with the\n\/\/ the resolvers in namesys\ntype Resolver struct {\n\tDAG ipld.NodeGetter\n\n\tResolveOnce ResolveOnce\n}\n\n\/\/ NewBasicResolver constructs a new basic resolver.\nfunc NewBasicResolver(ds ipld.DAGService) *Resolver {\n\treturn &Resolver{\n\t\tDAG: ds,\n\t\tResolveOnce: ResolveSingle,\n\t}\n}\n\n\/\/ ResolveToLastNode walks the given path and returns the ipld.Node\n\/\/ referenced by the last element in it.\nfunc (r *Resolver) ResolveToLastNode(ctx context.Context, fpath path.Path) (ipld.Node, []string, error) {\n\tc, p, err := path.SplitAbsPath(fpath)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tnd, err := r.DAG.Get(ctx, c)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tfor len(p) > 0 {\n\t\tlnk, rest, err := r.ResolveOnce(ctx, r.DAG, nd, p)\n\n\t\t\/\/ Note: have to drop the error here as `ResolveOnce` doesn't handle 'leaf'\n\t\t\/\/ paths (so e.g. for `echo '{\"foo\":123}' | ipfs dag put` we wouldn't be\n\t\t\/\/ able to resolve `zdpu[...]\/foo`)\n\t\tif lnk == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tnext, err := lnk.GetNode(ctx, r.DAG)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tnd = next\n\t\tp = rest\n\t}\n\n\tif len(p) == 0 {\n\t\treturn nd, nil, nil\n\t}\n\n\t\/\/ Confirm the path exists within the object\n\tval, rest, err := nd.Resolve(p)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif len(rest) > 0 {\n\t\treturn nil, nil, errors.New(\"path failed to resolve fully\")\n\t}\n\tswitch val.(type) {\n\tcase *ipld.Link:\n\t\treturn nil, nil, errors.New(\"inconsistent ResolveOnce \/ nd.Resolve\")\n\tdefault:\n\t\treturn nd, p, nil\n\t}\n}\n\n\/\/ ResolvePath fetches the node for given path. It returns the last item\n\/\/ returned by ResolvePathComponents.\nfunc (r *Resolver) ResolvePath(ctx context.Context, fpath path.Path) (ipld.Node, error) {\n\t\/\/ validate path\n\tif err := fpath.IsValid(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tnodes, err := r.ResolvePathComponents(ctx, fpath)\n\tif err != nil || nodes == nil {\n\t\treturn nil, err\n\t}\n\treturn nodes[len(nodes)-1], err\n}\n\n\/\/ ResolveSingle simply resolves one hop of a path through a graph with no\n\/\/ extra context (does not opaquely resolve through sharded nodes)\nfunc ResolveSingle(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, names []string) (*ipld.Link, []string, error) {\n\treturn nd.ResolveLink(names)\n}\n\n\/\/ ResolvePathComponents fetches the nodes for each segment of the given path.\n\/\/ It uses the first path component as a hash (key) of the first node, then\n\/\/ resolves all other components walking the links, with ResolveLinks.\nfunc (r *Resolver) ResolvePathComponents(ctx context.Context, fpath path.Path) ([]ipld.Node, error) {\n\tevt := log.EventBegin(ctx, \"resolvePathComponents\", logging.LoggableMap{\"fpath\": fpath})\n\tdefer evt.Done()\n\n\th, parts, err := path.SplitAbsPath(fpath)\n\tif err != nil {\n\t\tevt.Append(logging.LoggableMap{\"error\": err.Error()})\n\t\treturn nil, err\n\t}\n\n\tlog.Debug(\"resolve dag get\")\n\tnd, err := r.DAG.Get(ctx, h)\n\tif err != nil {\n\t\tevt.Append(logging.LoggableMap{\"error\": err.Error()})\n\t\treturn nil, err\n\t}\n\n\treturn r.ResolveLinks(ctx, nd, parts)\n}\n\n\/\/ ResolveLinks iteratively resolves names by walking the link hierarchy.\n\/\/ Every node is fetched from the DAGService, resolving the next name.\n\/\/ Returns the list of nodes forming the path, starting with ndd. This list is\n\/\/ guaranteed never to be empty.\n\/\/\n\/\/ ResolveLinks(nd, []string{\"foo\", \"bar\", \"baz\"})\n\/\/ would retrieve \"baz\" in (\"bar\" in (\"foo\" in nd.Links).Links).Links\nfunc (r *Resolver) ResolveLinks(ctx context.Context, ndd ipld.Node, names []string) ([]ipld.Node, error) {\n\n\tevt := log.EventBegin(ctx, \"resolveLinks\", logging.LoggableMap{\"names\": names})\n\tdefer evt.Done()\n\tresult := make([]ipld.Node, 0, len(names)+1)\n\tresult = append(result, ndd)\n\tnd := ndd \/\/ dup arg workaround\n\n\t\/\/ for each of the path components\n\tfor len(names) > 0 {\n\t\tvar cancel context.CancelFunc\n\t\tctx, cancel = context.WithTimeout(ctx, time.Minute)\n\t\tdefer cancel()\n\n\t\tlnk, rest, err := r.ResolveOnce(ctx, r.DAG, nd, names)\n\t\tif err == dag.ErrLinkNotFound {\n\t\t\tevt.Append(logging.LoggableMap{\"error\": err.Error()})\n\t\t\treturn result, ErrNoLink{Name: names[0], Node: nd.Cid()}\n\t\t} else if err != nil {\n\t\t\tevt.Append(logging.LoggableMap{\"error\": err.Error()})\n\t\t\treturn result, err\n\t\t}\n\n\t\tnextnode, err := lnk.GetNode(ctx, r.DAG)\n\t\tif err != nil {\n\t\t\tevt.Append(logging.LoggableMap{\"error\": err.Error()})\n\t\t\treturn result, err\n\t\t}\n\n\t\tnd = nextnode\n\t\tresult = append(result, nextnode)\n\t\tnames = rest\n\t}\n\treturn result, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Kubernetes Authors.\n\/\/ SPDX-License-Identifier: Apache-2.0\n\npackage filters\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"sigs.k8s.io\/kustomize\/kyaml\/kio\"\n\t\"sigs.k8s.io\/kustomize\/kyaml\/kio\/kioutil\"\n\n\t\"sigs.k8s.io\/kustomize\/kyaml\/yaml\"\n)\n\n\/\/ ContainerFilter filters Resources using a container image.\n\/\/ The container must start a process that reads the list of\n\/\/ input Resources from stdin, reads the Configuration from the env\n\/\/ API_CONFIG, and writes the filtered Resources to stdout.\n\/\/ If there is a error or validation failure, the process must exit\n\/\/ non-zero.\n\/\/ The full set of environment variables from the parent process\n\/\/ are passed to the container.\ntype ContainerFilter struct {\n\tmountPath string\n\n\t\/\/ Image is the container image to use to create a container.\n\tImage string `yaml:\"image,omitempty\"`\n\n\t\/\/ Network is the container network to use.\n\tNetwork string `yaml:\"network,omitempty\"`\n\n\t\/\/ LocalVolume is the volume the container uses.\n\tLocalVolume string `yaml:\"localVolume,omitempty\"`\n\n\t\/\/ Config is the API configuration for the container and passed through the\n\t\/\/ API_CONFIG env var to the container.\n\t\/\/ Typically a Kubernetes style Resource Config.\n\tConfig *yaml.RNode `yaml:\"config,omitempty\"`\n\n\t\/\/ args may be specified by tests to override how a container is spawned\n\targs []string\n\n\tcheckInput func(string)\n}\n\nfunc (c *ContainerFilter) SetMountPath(path string) {\n\tc.mountPath = path\n}\n\n\/\/ GrepFilter implements kio.GrepFilter\nfunc (c *ContainerFilter) Filter(input []*yaml.RNode) ([]*yaml.RNode, error) {\n\t\/\/ get the command to filter the Resources\n\tcmd, err := c.getCommand()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tin := &bytes.Buffer{}\n\tout := &bytes.Buffer{}\n\n\t\/\/ write the input\n\terr = kio.ByteWriter{\n\t\tWrappingApiVersion: kio.ResourceListApiVersion,\n\t\tWrappingKind: kio.ResourceListKind,\n\t\tWriter: in, KeepReaderAnnotations: true, FunctionConfig: c.Config}.Write(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ capture the command stdout for the return value\n\tr := &kio.ByteReader{Reader: out}\n\n\t\/\/ do the filtering\n\tif c.checkInput != nil {\n\t\tc.checkInput(in.String())\n\t}\n\tcmd.Stdin = in\n\tcmd.Stdout = out\n\tif err := cmd.Run(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r.Read()\n}\n\n\/\/ getArgs returns the command + args to run to spawn the container\nfunc (c *ContainerFilter) getArgs() []string {\n\t\/\/ run the container using docker. this is simpler than using the docker\n\t\/\/ libraries, and ensures things like auth work the same as if the container\n\t\/\/ was run from the cli.\n\n\tnetwork := \"none\"\n\tif c.Network != \"\" {\n\t\tnetwork = c.Network\n\t}\n\n\targs := []string{\"docker\", \"run\",\n\t\t\"--rm\", \/\/ delete the container afterward\n\t\t\"-i\", \"-a\", \"STDIN\", \"-a\", \"STDOUT\", \"-a\", \"STDERR\", \/\/ attach stdin, stdout, stderr\n\n\t\t\/\/ added security options\n\t\t\"--network\", network,\n\t\t\"--user\", \"nobody\", \/\/ run as nobody\n\t\t\/\/ don't make fs readonly because things like heredoc rely on writing tmp files\n\t\t\"--security-opt=no-new-privileges\", \/\/ don't allow the user to escalate privileges\n\t}\n\t\/\/ mount the directory containing the function as read-only\n\tif c.mountPath != \"\" {\n\t\targs = append(args, \"-v\", fmt.Sprintf(\"%s:\/local\/:ro\", c.mountPath))\n\t}\n\n\tif c.LocalVolume != \"\" {\n\t\targs = append(args, \"--mount\", fmt.Sprintf(\"'type=volume,src=%s,dst=\/local\/'\", c.LocalVolume))\n\t}\n\n\t\/\/ export the local environment vars to the container\n\tfor _, pair := range os.Environ() {\n\t\targs = append(args, \"-e\", strings.Split(pair, \"=\")[0])\n\t}\n\treturn append(args, c.Image)\n\n}\n\n\/\/ getCommand returns a command which will apply the GrepFilter using the container image\nfunc (c *ContainerFilter) getCommand() (*exec.Cmd, error) {\n\t\/\/ encode the filter command API configuration\n\tcfg := &bytes.Buffer{}\n\tif err := func() error {\n\t\te := yaml.NewEncoder(cfg)\n\t\tdefer e.Close()\n\t\t\/\/ make it fit on a single line\n\t\tc.Config.YNode().Style = yaml.FlowStyle\n\t\treturn e.Encode(c.Config.YNode())\n\t}(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(c.args) == 0 {\n\t\tc.args = c.getArgs()\n\t}\n\n\tcmd := exec.Command(c.args[0], c.args[1:]...)\n\tcmd.Stderr = os.Stderr\n\tcmd.Env = os.Environ()\n\n\t\/\/ set stderr for err messaging\n\treturn cmd, nil\n}\n\n\/\/ IsReconcilerFilter filters Resources based on whether or not they are Reconciler Resource.\n\/\/ Resources with an apiVersion starting with '*.gcr.io', 'gcr.io' or 'docker.io' are considered\n\/\/ Reconciler Resources.\ntype IsReconcilerFilter struct {\n\t\/\/ ExcludeReconcilers if set to true, then Reconcilers will be excluded -- e.g.\n\t\/\/ Resources with a reconcile container through the apiVersion (gcr.io prefix) or\n\t\/\/ through the annotations\n\tExcludeReconcilers bool `yaml:\"excludeReconcilers,omitempty\"`\n\n\t\/\/ IncludeNonReconcilers if set to true, the NonReconciler will be included.\n\tIncludeNonReconcilers bool `yaml:\"includeNonReconcilers,omitempty\"`\n}\n\n\/\/ Filter implements kio.Filter\nfunc (c *IsReconcilerFilter) Filter(inputs []*yaml.RNode) ([]*yaml.RNode, error) {\n\tvar out []*yaml.RNode\n\tfor i := range inputs {\n\t\timg, _ := GetContainerName(inputs[i])\n\t\tisContainerResource := img != \"\"\n\t\tif isContainerResource && !c.ExcludeReconcilers {\n\t\t\tout = append(out, inputs[i])\n\t\t}\n\t\tif !isContainerResource && c.IncludeNonReconcilers {\n\t\t\tout = append(out, inputs[i])\n\t\t}\n\t}\n\treturn out, nil\n}\n\n\/\/ GetContainerName returns the container image for an API if one exists\nfunc GetContainerName(n *yaml.RNode) (string, string) {\n\tmeta, _ := n.GetMeta()\n\n\t\/\/ path to the function, this will be mounted into the container\n\tpath := meta.Annotations[kioutil.PathAnnotation]\n\n\tcontainer := meta.Annotations[\"config.kubernetes.io\/container\"]\n\tif container != \"\" {\n\t\treturn container, path\n\t}\n\n\timage, err := n.Pipe(yaml.Lookup(\"metadata\", \"configFn\", \"container\", \"image\"))\n\tif err != nil || yaml.IsMissingOrNull(image) {\n\t\treturn \"\", path\n\t}\n\treturn image.YNode().Value, path\n}\n<commit_msg>Make LocalVolume read only<commit_after>\/\/ Copyright 2019 The Kubernetes Authors.\n\/\/ SPDX-License-Identifier: Apache-2.0\n\npackage filters\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"sigs.k8s.io\/kustomize\/kyaml\/kio\"\n\t\"sigs.k8s.io\/kustomize\/kyaml\/kio\/kioutil\"\n\n\t\"sigs.k8s.io\/kustomize\/kyaml\/yaml\"\n)\n\n\/\/ ContainerFilter filters Resources using a container image.\n\/\/ The container must start a process that reads the list of\n\/\/ input Resources from stdin, reads the Configuration from the env\n\/\/ API_CONFIG, and writes the filtered Resources to stdout.\n\/\/ If there is a error or validation failure, the process must exit\n\/\/ non-zero.\n\/\/ The full set of environment variables from the parent process\n\/\/ are passed to the container.\ntype ContainerFilter struct {\n\tmountPath string\n\n\t\/\/ Image is the container image to use to create a container.\n\tImage string `yaml:\"image,omitempty\"`\n\n\t\/\/ Network is the container network to use.\n\tNetwork string `yaml:\"network,omitempty\"`\n\n\t\/\/ LocalVolume is the volume the container uses.\n\tLocalVolume string `yaml:\"localVolume,omitempty\"`\n\n\t\/\/ Config is the API configuration for the container and passed through the\n\t\/\/ API_CONFIG env var to the container.\n\t\/\/ Typically a Kubernetes style Resource Config.\n\tConfig *yaml.RNode `yaml:\"config,omitempty\"`\n\n\t\/\/ args may be specified by tests to override how a container is spawned\n\targs []string\n\n\tcheckInput func(string)\n}\n\nfunc (c *ContainerFilter) SetMountPath(path string) {\n\tc.mountPath = path\n}\n\n\/\/ GrepFilter implements kio.GrepFilter\nfunc (c *ContainerFilter) Filter(input []*yaml.RNode) ([]*yaml.RNode, error) {\n\t\/\/ get the command to filter the Resources\n\tcmd, err := c.getCommand()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tin := &bytes.Buffer{}\n\tout := &bytes.Buffer{}\n\n\t\/\/ write the input\n\terr = kio.ByteWriter{\n\t\tWrappingApiVersion: kio.ResourceListApiVersion,\n\t\tWrappingKind: kio.ResourceListKind,\n\t\tWriter: in, KeepReaderAnnotations: true, FunctionConfig: c.Config}.Write(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ capture the command stdout for the return value\n\tr := &kio.ByteReader{Reader: out}\n\n\t\/\/ do the filtering\n\tif c.checkInput != nil {\n\t\tc.checkInput(in.String())\n\t}\n\tcmd.Stdin = in\n\tcmd.Stdout = out\n\tif err := cmd.Run(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r.Read()\n}\n\n\/\/ getArgs returns the command + args to run to spawn the container\nfunc (c *ContainerFilter) getArgs() []string {\n\t\/\/ run the container using docker. this is simpler than using the docker\n\t\/\/ libraries, and ensures things like auth work the same as if the container\n\t\/\/ was run from the cli.\n\n\tnetwork := \"none\"\n\tif c.Network != \"\" {\n\t\tnetwork = c.Network\n\t}\n\n\targs := []string{\"docker\", \"run\",\n\t\t\"--rm\", \/\/ delete the container afterward\n\t\t\"-i\", \"-a\", \"STDIN\", \"-a\", \"STDOUT\", \"-a\", \"STDERR\", \/\/ attach stdin, stdout, stderr\n\n\t\t\/\/ added security options\n\t\t\"--network\", network,\n\t\t\"--user\", \"nobody\", \/\/ run as nobody\n\t\t\/\/ don't make fs readonly because things like heredoc rely on writing tmp files\n\t\t\"--security-opt=no-new-privileges\", \/\/ don't allow the user to escalate privileges\n\t}\n\t\/\/ mount the directory containing the function as read-only\n\tif c.mountPath != \"\" {\n\t\targs = append(args, \"-v\", fmt.Sprintf(\"%s:\/local\/:ro\", c.mountPath))\n\t}\n\n\tif c.LocalVolume != \"\" {\n\t\targs = append(args, \"--mount\", fmt.Sprintf(\"'type=volume,src=%s,dst=\/local\/:ro'\", c.LocalVolume))\n\t}\n\n\t\/\/ export the local environment vars to the container\n\tfor _, pair := range os.Environ() {\n\t\targs = append(args, \"-e\", strings.Split(pair, \"=\")[0])\n\t}\n\treturn append(args, c.Image)\n\n}\n\n\/\/ getCommand returns a command which will apply the GrepFilter using the container image\nfunc (c *ContainerFilter) getCommand() (*exec.Cmd, error) {\n\t\/\/ encode the filter command API configuration\n\tcfg := &bytes.Buffer{}\n\tif err := func() error {\n\t\te := yaml.NewEncoder(cfg)\n\t\tdefer e.Close()\n\t\t\/\/ make it fit on a single line\n\t\tc.Config.YNode().Style = yaml.FlowStyle\n\t\treturn e.Encode(c.Config.YNode())\n\t}(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(c.args) == 0 {\n\t\tc.args = c.getArgs()\n\t}\n\n\tcmd := exec.Command(c.args[0], c.args[1:]...)\n\tcmd.Stderr = os.Stderr\n\tcmd.Env = os.Environ()\n\n\t\/\/ set stderr for err messaging\n\treturn cmd, nil\n}\n\n\/\/ IsReconcilerFilter filters Resources based on whether or not they are Reconciler Resource.\n\/\/ Resources with an apiVersion starting with '*.gcr.io', 'gcr.io' or 'docker.io' are considered\n\/\/ Reconciler Resources.\ntype IsReconcilerFilter struct {\n\t\/\/ ExcludeReconcilers if set to true, then Reconcilers will be excluded -- e.g.\n\t\/\/ Resources with a reconcile container through the apiVersion (gcr.io prefix) or\n\t\/\/ through the annotations\n\tExcludeReconcilers bool `yaml:\"excludeReconcilers,omitempty\"`\n\n\t\/\/ IncludeNonReconcilers if set to true, the NonReconciler will be included.\n\tIncludeNonReconcilers bool `yaml:\"includeNonReconcilers,omitempty\"`\n}\n\n\/\/ Filter implements kio.Filter\nfunc (c *IsReconcilerFilter) Filter(inputs []*yaml.RNode) ([]*yaml.RNode, error) {\n\tvar out []*yaml.RNode\n\tfor i := range inputs {\n\t\timg, _ := GetContainerName(inputs[i])\n\t\tisContainerResource := img != \"\"\n\t\tif isContainerResource && !c.ExcludeReconcilers {\n\t\t\tout = append(out, inputs[i])\n\t\t}\n\t\tif !isContainerResource && c.IncludeNonReconcilers {\n\t\t\tout = append(out, inputs[i])\n\t\t}\n\t}\n\treturn out, nil\n}\n\n\/\/ GetContainerName returns the container image for an API if one exists\nfunc GetContainerName(n *yaml.RNode) (string, string) {\n\tmeta, _ := n.GetMeta()\n\n\t\/\/ path to the function, this will be mounted into the container\n\tpath := meta.Annotations[kioutil.PathAnnotation]\n\n\tcontainer := meta.Annotations[\"config.kubernetes.io\/container\"]\n\tif container != \"\" {\n\t\treturn container, path\n\t}\n\n\timage, err := n.Pipe(yaml.Lookup(\"metadata\", \"configFn\", \"container\", \"image\"))\n\tif err != nil || yaml.IsMissingOrNull(image) {\n\t\treturn \"\", path\n\t}\n\treturn image.YNode().Value, path\n}\n<|endoftext|>"} {"text":"<commit_before>package autorest\n\nimport (\n\t\"fmt\"\n)\n\nconst (\n\tmajor = \"7\"\n\tminor = \"3\"\n\tpatch = \"1\"\n\ttag = \"\"\n\tsemVerFormat = \"%s.%s.%s%s\"\n)\n\nvar version string\n\n\/\/ Version returns the semantic version (see http:\/\/semver.org).\nfunc Version() string {\n\tif version == \"\" {\n\t\tversion = fmt.Sprintf(semVerFormat, major, minor, patch, tag)\n\t}\n\treturn version\n}\n<commit_msg>Fixing a personal nit in the Version() function.<commit_after>package autorest\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n)\n\nconst (\n\tmajor = 7\n\tminor = 3\n\tpatch = 1\n\ttag = \"\"\n)\n\nvar version string\n\n\/\/ Version returns the semantic version (see http:\/\/semver.org).\nfunc Version() string {\n\tif version == \"\" {\n\t\tverBuilder := bytes.NewBufferString(fmt.Sprintf(\"%d.%d.%d\", major, minor, patch))\n\t\tif tag != \"\" && tag != \"-\" {\n\t\t\tupdated := strings.TrimPrefix(tag, \"-\")\n\t\t\tverBuilder.WriteRune('-')\n\t\t\tverBuilder.WriteString(updated)\n\t\t}\n\t\tversion = verBuilder.String()\n\t}\n\treturn version\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 Gravitational, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage scp\n\nimport (\n\t\"os\"\n\t\"syscall\"\n\t\"time\"\n)\n\n\/\/ Source: os\/stat_darwin.go\nfunc atime(fi os.FileInfo) time.Time {\n\treturn timespecToTime(fi.Sys().(*syscall.Stat_t).Atimespec)\n}\n\nfunc timespecToTime(ts syscall.Timespec) time.Time {\n\treturn time.Unix(int64(ts.Sec), int64(ts.Nsec))\n}\n<commit_msg>Suppress linter warning about unnecessary type conversion on darwin. (#5302)<commit_after>\/*\nCopyright 2020 Gravitational, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage scp\n\nimport (\n\t\"os\"\n\t\"syscall\"\n\t\"time\"\n)\n\n\/\/ Source: os\/stat_darwin.go\nfunc atime(fi os.FileInfo) time.Time {\n\treturn timespecToTime(fi.Sys().(*syscall.Stat_t).Atimespec)\n}\n\nfunc timespecToTime(ts syscall.Timespec) time.Time {\n\treturn time.Unix(int64(ts.Sec), int64(ts.Nsec)) \/\/nolint:unconvert\n}\n<|endoftext|>"} {"text":"<commit_before>package listmap\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst N = 1 << 11\n\nfunc assertOrder(l *Listmap) bool {\n\tprev := []byte{}\n\tfor c := l.NewCursor(); c != nil; c = c.Next() {\n\t\tif bytes.Compare(c.Key(), prev) < 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc assertZeroMissingKeys(l *Listmap) bool {\n\ti := 0\n\tfor c := l.NewCursor(); c != nil; c = c.Next() {\n\t\texpectedKey := []byte(fmt.Sprintf(\"%020d\", i))\n\t\tif bytes.Compare(c.Key(), expectedKey) != 0 {\n\t\t\treturn false\n\t\t}\n\n\t\ti++\n\t}\n\n\treturn true\n}\n\nfunc checkError(err error, t *testing.T) {\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc Test1(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.1\")\n\n\tcheckError(l.Set([]byte(\"1\"), []byte(\"bar\")), t)\n\tcheckError(l.Set([]byte(\"2\"), []byte(\"foobar\")), t)\n\tcheckError(l.Set([]byte(\"3\"), []byte(\"barbaz\")), t)\n\tcheckError(l.Set([]byte(\"4\"), []byte(\"b\")), t)\n\tcheckError(l.Set([]byte(\"45\"), []byte(\"foo\")), t)\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc Test2(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.2\")\n\n\tcheckError(l.Set([]byte(\"a\"), []byte(\"AAAAA\")), t)\n\tcheckError(l.Set([]byte(\"c\"), []byte(\"CCCCC\")), t)\n\tcheckError(l.Set([]byte(\"b\"), []byte(\"BBBBB\")), t)\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc Test3(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.3\")\n\n\tcheckError(l.Set([]byte(\"1\"), []byte(\"AAAAA\")), t)\n\tcheckError(l.Set([]byte(\"3\"), []byte(\"CCCCC\")), t)\n\tcheckError(l.Set([]byte(\"2\"), []byte(\"BBBBB\")), t)\n\tcheckError(l.Set([]byte(\"0\"), []byte(\"00000\")), t)\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc TestRemove(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.remove\")\n\n\tcheckError(l.Set([]byte(\"foo\"), []byte(\"bar\")), t)\n\tval, err := l.Get([]byte(\"foo\"))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif bytes.Compare(val, []byte(\"bar\")) != 0 {\n\t\tt.Errorf(\"expected value to be %v, got %v\", []byte(\"bar\"), val)\n\t}\n\n\tl.Remove([]byte(\"foo\"))\n\n\tval, err = l.Get([]byte(\"foo\"))\n\tif err != ErrKeyNotFound {\n\t\tt.Errorf(\"expected error `%v', got %v\", ErrKeyNotFound, err)\n\t}\n\n\tif bytes.Compare(val, nil) != 0 {\n\t\tt.Errorf(\"expected value to be %v, got %v\", nil, val)\n\t}\n\n\tcheckError(l.Set([]byte(\"foo\"), []byte(\"baz\")), t)\n\tval, err = l.Get([]byte(\"foo\"))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif bytes.Compare(val, []byte(\"baz\")) != 0 {\n\t\tt.Errorf(\"expected value to be %v, got %v\", []byte(\"baz\"), val)\n\t}\n\n\tl.Destroy()\n}\n\nfunc TestSequentialShort(t *testing.T) {\n\tl := NewListmap(\"test.sequential_short\")\n\n\tstart := time.Now()\n\tfor i := 0; i < N; i++ {\n\t\tl.Set([]byte(fmt.Sprintf(\"%09d\", i)), []byte(fmt.Sprint(i)))\n\t}\n\tt.Log(\"Time to insert\", N, \"sequential integers:\", time.Now().Sub(start))\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Close()\n}\n\nfunc TestSequentialLong(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.sequential_long\")\n\n\tstart := time.Now()\n\tfor i := 0; i < N*8; i++ {\n\t\tcheckError(l.Set([]byte(fmt.Sprintf(\"%09d\", i)), []byte(fmt.Sprint(i))), t)\n\t}\n\tt.Log(\"Time to insert\", N*8, \"sequential integers:\", time.Now().Sub(start))\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc TestRead(t *testing.T) {\n\tl := OpenListmap(\"test.sequential_short\")\n\tif l == nil {\n\t\tt.Error(\"Couldn't open list\")\n\t}\n\n\tif val, err := l.Get([]byte(\"000000005\")); err != nil || bytes.Compare(val, []byte(\"5\")) != 0 {\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t} else {\n\t\t\tt.Errorf(\"expected value %v, got %v\", []byte(\"5\"), val)\n\t\t}\n\t}\n\n\tif val, err := l.Get([]byte(\"000000013\")); err != nil || bytes.Compare(val, []byte(\"13\")) != 0 {\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t} else {\n\t\t\tt.Errorf(\"expected value %v, got %v\", []byte(\"13\"), val)\n\t\t}\n\t}\n\n\tif val, err := l.Get([]byte(\"5\")); err == nil {\n\t\tt.Errorf(\"expected error `%v', got %v with value `%v'\", ErrKeyNotFound,\n\t\t\tnil, val)\n\t}\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc TestRandomShort(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.random_short\")\n\n\tstart := time.Now()\n\tfor i := 0; i < N; i++ {\n\t\tcheckError(l.Set([]byte(fmt.Sprint(rand.Int())), []byte(fmt.Sprint(i))), t)\n\t}\n\tt.Log(\"Time to insert\", N, \"random integers:\", time.Now().Sub(start))\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc TestRandomLong(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.random_long\")\n\n\tstart := time.Now()\n\tfor i := 0; i < N*8; i++ {\n\t\tcheckError(l.Set([]byte(fmt.Sprint(rand.Int())), []byte(fmt.Sprint(i))), t)\n\t}\n\tt.Log(\"Time to insert\", N*8, \"random integers:\", time.Now().Sub(start))\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc TestConcurrentSequential(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.concurrent_sequential\")\n\tvar wg sync.WaitGroup\n\n\trun := func(l *Listmap, n int) {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < N*4; i++ {\n\t\t\tif i%10 == n {\n\t\t\t\tcheckError(l.Set([]byte(fmt.Sprintf(\"%020d\", i)), []byte(fmt.Sprint(i))), t)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo run(l, i)\n\t}\n\n\twg.Wait()\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tif !assertZeroMissingKeys(l) {\n\t\tt.Error(\"there are missing keys\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc TestConcurrentSequential2(t *testing.T) {\n\tt.Parallel()\n\trand.Seed(time.Now().Unix())\n\tl := NewListmap(\"test.concurrent_sequential_2\")\n\tvar wg sync.WaitGroup\n\n\trun := func(l *Listmap, n int) {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < N*4; i++ {\n\t\t\tif rand.Float32() < 0.85 {\n\t\t\t\tl.Set([]byte(fmt.Sprintf(\"%020d\", i)), []byte(fmt.Sprint(i)))\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo run(l, i)\n\t}\n\n\twg.Wait()\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tif !assertZeroMissingKeys(l) {\n\t\tt.Error(\"there are missing keys\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc TestConcurrentRandom(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.concurrent_random\")\n\tvar wg sync.WaitGroup\n\n\trun := func(l *Listmap, n int) {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < N*4; i++ {\n\t\t\tif i%10 == n {\n\t\t\t\tcheckError(l.Set([]byte(fmt.Sprint(rand.Int())), []byte(fmt.Sprint(i))), t)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo run(l, i)\n\t}\n\n\twg.Wait()\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc BenchmarkSequentialWrites(b *testing.B) {\n\tl := NewListmap(\"benchmark.sequential\")\n\n\tfor i := 0; i < b.N; i++ {\n\t\tl.Set([]byte(fmt.Sprintf(\"%020d\", i)), []byte(fmt.Sprint(i)))\n\t}\n\n\tl.Destroy()\n}\n\nfunc BenchmarkRandomWrites(b *testing.B) {\n\tl := NewListmap(\"benchmark.sequential\")\n\n\tfor i := 0; i < b.N; i++ {\n\t\tl.Set([]byte(fmt.Sprint(rand.Int())), []byte(fmt.Sprint(i)))\n\t}\n\n\tl.Destroy()\n}\n\nfunc BenchmarkSequentialWritesWithVerification(b *testing.B) {\n\tl := NewListmap(\"benchmark.sequential\")\n\n\tfor i := 0; i < b.N; i++ {\n\t\tl.Set([]byte(fmt.Sprintf(\"%020d\", i)), []byte(fmt.Sprint(i)))\n\t}\n\n\tif !assertOrder(l) {\n\t\tb.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc BenchmarkRandomWritesWithVerification(b *testing.B) {\n\tl := NewListmap(\"benchmark.sequential\")\n\n\tfor i := 0; i < b.N; i++ {\n\t\tl.Set([]byte(fmt.Sprint(rand.Int())), []byte(fmt.Sprint(i)))\n\t}\n\n\tif !assertOrder(l) {\n\t\tb.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n<commit_msg>Fix test file names<commit_after>package listmap\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst N = 1 << 11\n\nfunc assertOrder(l *Listmap) bool {\n\tprev := []byte{}\n\tfor c := l.NewCursor(); c != nil; c = c.Next() {\n\t\tif bytes.Compare(c.Key(), prev) < 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc assertZeroMissingKeys(l *Listmap) bool {\n\ti := 0\n\tfor c := l.NewCursor(); c != nil; c = c.Next() {\n\t\texpectedKey := []byte(fmt.Sprintf(\"%020d\", i))\n\t\tif bytes.Compare(c.Key(), expectedKey) != 0 {\n\t\t\treturn false\n\t\t}\n\n\t\ti++\n\t}\n\n\treturn true\n}\n\nfunc checkError(err error, t *testing.T) {\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc Test1(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.1\")\n\n\tcheckError(l.Set([]byte(\"1\"), []byte(\"bar\")), t)\n\tcheckError(l.Set([]byte(\"2\"), []byte(\"foobar\")), t)\n\tcheckError(l.Set([]byte(\"3\"), []byte(\"barbaz\")), t)\n\tcheckError(l.Set([]byte(\"4\"), []byte(\"b\")), t)\n\tcheckError(l.Set([]byte(\"45\"), []byte(\"foo\")), t)\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc Test2(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.2\")\n\n\tcheckError(l.Set([]byte(\"a\"), []byte(\"AAAAA\")), t)\n\tcheckError(l.Set([]byte(\"c\"), []byte(\"CCCCC\")), t)\n\tcheckError(l.Set([]byte(\"b\"), []byte(\"BBBBB\")), t)\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc Test3(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.3\")\n\n\tcheckError(l.Set([]byte(\"1\"), []byte(\"AAAAA\")), t)\n\tcheckError(l.Set([]byte(\"3\"), []byte(\"CCCCC\")), t)\n\tcheckError(l.Set([]byte(\"2\"), []byte(\"BBBBB\")), t)\n\tcheckError(l.Set([]byte(\"0\"), []byte(\"00000\")), t)\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc TestRemove(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.remove\")\n\n\tcheckError(l.Set([]byte(\"foo\"), []byte(\"bar\")), t)\n\tval, err := l.Get([]byte(\"foo\"))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif bytes.Compare(val, []byte(\"bar\")) != 0 {\n\t\tt.Errorf(\"expected value to be %v, got %v\", []byte(\"bar\"), val)\n\t}\n\n\tl.Remove([]byte(\"foo\"))\n\n\tval, err = l.Get([]byte(\"foo\"))\n\tif err != ErrKeyNotFound {\n\t\tt.Errorf(\"expected error `%v', got %v\", ErrKeyNotFound, err)\n\t}\n\n\tif bytes.Compare(val, nil) != 0 {\n\t\tt.Errorf(\"expected value to be %v, got %v\", nil, val)\n\t}\n\n\tcheckError(l.Set([]byte(\"foo\"), []byte(\"baz\")), t)\n\tval, err = l.Get([]byte(\"foo\"))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif bytes.Compare(val, []byte(\"baz\")) != 0 {\n\t\tt.Errorf(\"expected value to be %v, got %v\", []byte(\"baz\"), val)\n\t}\n\n\tl.Destroy()\n}\n\nfunc TestSequentialShort(t *testing.T) {\n\tl := NewListmap(\"test.sequential_short\")\n\n\tstart := time.Now()\n\tfor i := 0; i < N; i++ {\n\t\tl.Set([]byte(fmt.Sprintf(\"%09d\", i)), []byte(fmt.Sprint(i)))\n\t}\n\tt.Log(\"Time to insert\", N, \"sequential integers:\", time.Now().Sub(start))\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Close()\n}\n\nfunc TestSequentialLong(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.sequential_long\")\n\n\tstart := time.Now()\n\tfor i := 0; i < N*8; i++ {\n\t\tcheckError(l.Set([]byte(fmt.Sprintf(\"%09d\", i)), []byte(fmt.Sprint(i))), t)\n\t}\n\tt.Log(\"Time to insert\", N*8, \"sequential integers:\", time.Now().Sub(start))\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc TestRead(t *testing.T) {\n\tl := OpenListmap(\"test.sequential_short\")\n\tif l == nil {\n\t\tt.Error(\"Couldn't open list\")\n\t}\n\n\tif val, err := l.Get([]byte(\"000000005\")); err != nil || bytes.Compare(val, []byte(\"5\")) != 0 {\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t} else {\n\t\t\tt.Errorf(\"expected value %v, got %v\", []byte(\"5\"), val)\n\t\t}\n\t}\n\n\tif val, err := l.Get([]byte(\"000000013\")); err != nil || bytes.Compare(val, []byte(\"13\")) != 0 {\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t} else {\n\t\t\tt.Errorf(\"expected value %v, got %v\", []byte(\"13\"), val)\n\t\t}\n\t}\n\n\tif val, err := l.Get([]byte(\"5\")); err == nil {\n\t\tt.Errorf(\"expected error `%v', got %v with value `%v'\", ErrKeyNotFound,\n\t\t\tnil, val)\n\t}\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc TestRandomShort(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.random_short\")\n\n\tstart := time.Now()\n\tfor i := 0; i < N; i++ {\n\t\tcheckError(l.Set([]byte(fmt.Sprint(rand.Int())), []byte(fmt.Sprint(i))), t)\n\t}\n\tt.Log(\"Time to insert\", N, \"random integers:\", time.Now().Sub(start))\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc TestRandomLong(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.random_long\")\n\n\tstart := time.Now()\n\tfor i := 0; i < N*8; i++ {\n\t\tcheckError(l.Set([]byte(fmt.Sprint(rand.Int())), []byte(fmt.Sprint(i))), t)\n\t}\n\tt.Log(\"Time to insert\", N*8, \"random integers:\", time.Now().Sub(start))\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc TestConcurrentSequential(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.concurrent_sequential\")\n\tvar wg sync.WaitGroup\n\n\trun := func(l *Listmap, n int) {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < N*4; i++ {\n\t\t\tif i%10 == n {\n\t\t\t\tcheckError(l.Set([]byte(fmt.Sprintf(\"%020d\", i)), []byte(fmt.Sprint(i))), t)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo run(l, i)\n\t}\n\n\twg.Wait()\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tif !assertZeroMissingKeys(l) {\n\t\tt.Error(\"there are missing keys\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc TestConcurrentSequential2(t *testing.T) {\n\tt.Parallel()\n\trand.Seed(time.Now().Unix())\n\tl := NewListmap(\"test.concurrent_sequential_2\")\n\tvar wg sync.WaitGroup\n\n\trun := func(l *Listmap, n int) {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < N*4; i++ {\n\t\t\tif rand.Float32() < 0.85 {\n\t\t\t\tl.Set([]byte(fmt.Sprintf(\"%020d\", i)), []byte(fmt.Sprint(i)))\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo run(l, i)\n\t}\n\n\twg.Wait()\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tif !assertZeroMissingKeys(l) {\n\t\tt.Error(\"there are missing keys\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc TestConcurrentRandom(t *testing.T) {\n\tt.Parallel()\n\tl := NewListmap(\"test.concurrent_random\")\n\tvar wg sync.WaitGroup\n\n\trun := func(l *Listmap, n int) {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < N*4; i++ {\n\t\t\tif i%10 == n {\n\t\t\t\tcheckError(l.Set([]byte(fmt.Sprint(rand.Int())), []byte(fmt.Sprint(i))), t)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo run(l, i)\n\t}\n\n\twg.Wait()\n\n\tif !assertOrder(l) {\n\t\tt.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc BenchmarkSequentialWrites(b *testing.B) {\n\tl := NewListmap(\"benchmark.sequential\")\n\n\tfor i := 0; i < b.N; i++ {\n\t\tl.Set([]byte(fmt.Sprintf(\"%020d\", i)), []byte(fmt.Sprint(i)))\n\t}\n\n\tl.Destroy()\n}\n\nfunc BenchmarkRandomWrites(b *testing.B) {\n\tl := NewListmap(\"benchmark.random\")\n\n\tfor i := 0; i < b.N; i++ {\n\t\tl.Set([]byte(fmt.Sprint(rand.Int())), []byte(fmt.Sprint(i)))\n\t}\n\n\tl.Destroy()\n}\n\nfunc BenchmarkSequentialWritesWithVerification(b *testing.B) {\n\tl := NewListmap(\"benchmark.sequential_verify\")\n\n\tfor i := 0; i < b.N; i++ {\n\t\tl.Set([]byte(fmt.Sprintf(\"%020d\", i)), []byte(fmt.Sprint(i)))\n\t}\n\n\tif !assertOrder(l) {\n\t\tb.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n\nfunc BenchmarkRandomWritesWithVerification(b *testing.B) {\n\tl := NewListmap(\"benchmark.random_verify\")\n\n\tfor i := 0; i < b.N; i++ {\n\t\tl.Set([]byte(fmt.Sprint(rand.Int())), []byte(fmt.Sprint(i)))\n\t}\n\n\tif !assertOrder(l) {\n\t\tb.Error(\"keys were not in order\")\n\t}\n\n\tl.Destroy()\n}\n<|endoftext|>"} {"text":"<commit_before>package vagrant\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nfunc resourceVagrantVm() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceVagrantVmCreate,\n\t\tRead: resourceVagrantVmRead,\n\t\tUpdate: resourceVagrantVmUpdate,\n\t\tDelete: resourceVagrantVmDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"boot_timeout\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t\tDescription: \"The time in seconds that Vagrant will wait for the machine to boot.\",\n\t\t\t},\n\t\t\t\"box\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"Installed box name or Atlas box shorthand name to use.\",\n\t\t\t},\n\t\t\t\"box_check_update\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t\tDescription: \"Whether or not to check for updates to the box on each `vagrant up`.\",\n\t\t\t},\n\t\t\t\"box_download_checksum\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"The expected checksum of the box file.\",\n\t\t\t},\n\t\t\t\"box_download_checksum_type\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"The type of checksum specified by `box_download_checksum`. Should be `md5`, `sha1`, or `sha256`.\",\n\t\t\t\tValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {\n\t\t\t\t\tvalue := v.(string)\n\t\t\t\t\tvalid_values := []string{\"md5\", \"sha1\", \"sha256\"}\n\n\t\t\t\t\tfor _, valid_value := range valid_values {\n\t\t\t\t\t\tif value == valid_value {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\terrors = append(errors, fmt.Errorf(\"%q must be one of md5, sha1, or sha256\", k))\n\n\t\t\t\t\treturn\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"box_url\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"The URL to download the box.\",\n\t\t\t},\n\t\t\t\"box_version\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"The version of the box to use.\",\n\t\t\t},\n\t\t\t\"hostname\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"The hostname the machine should have.\",\n\t\t\t},\n\t\t\t\"forwarded_port\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"auto_correct\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"Whether or not to automatically resolve port collisions on the host.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"guest\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\tDescription: \"The port on the guest you want to be exposed on the host.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"guest_ip\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"The IP the forwarded port should be bound to within the guest.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"host\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\tDescription: \"The port on the host used to access the port on the guest.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"host_ip\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"The IP the forwarded port should be bound to on the host.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"protocol\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"The network protcol to use, one of `tcp` or `udp`.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\treturn 0\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"private_network\": resourceVagrantVmPublicOrPrivateNetwork(),\n\t\t\t\"public_network\": resourceVagrantVmPublicOrPrivateNetwork(),\n\t\t\t\"virtualbox_provider\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"cpus\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"The number of CPUs to give the VM.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"customize\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"A call to customize the VM just before it is booted.\",\n\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\t\t\t\"arguments\": &schema.Schema{\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeList,\n\t\t\t\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\t\t\t\tDescription: \"The list of arguments for the customization command.\",\n\t\t\t\t\t\t\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\t\t\t\treturn 0\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"gui\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"Whether or not to create a VM with a GUI.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"memory\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"The amount of memory to give the VM, in MB.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"name\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"A custom name for the VM within VirtualBox's GUI app.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\treturn 0\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceVagrantVmCreate(data *schema.ResourceData, meta interface{}) error {\n\treturn nil\n}\n\nfunc resourceVagrantVmRead(data *schema.ResourceData, meta interface{}) error {\n\treturn nil\n}\n\nfunc resourceVagrantVmUpdate(data *schema.ResourceData, meta interface{}) error {\n\treturn nil\n}\n\nfunc resourceVagrantVmDelete(data *schema.ResourceData, meta interface{}) error {\n\treturn nil\n}\n\nfunc resourceVagrantVmPublicOrPrivateNetwork() *schema.Schema {\n\treturn &schema.Schema{\n\t\tType: schema.TypeSet,\n\t\tOptional: true,\n\t\tElem: &schema.Resource{\n\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\"auto_config\": &schema.Schema{\n\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tDescription: \"Whether or not to automatically configure the network interface.\",\n\t\t\t\t},\n\t\t\t\t\"dhcp\": &schema.Schema{\n\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\tComputed: true,\n\t\t\t\t\tDescription: \"Whether or not to use DHCP to assign the IP on the host.\",\n\t\t\t\t},\n\t\t\t\t\"ip\": &schema.Schema{\n\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tComputed: true,\n\t\t\t\t\tDescription: \"The static IP to assign the interface on the host. Do not set this to use DHCP instead.\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tSet: func(v interface{}) int {\n\t\t\treturn 0\n\t\t},\n\t}\n}\n<commit_msg>Add the remaining VM configuration attributes.<commit_after>package vagrant\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nfunc resourceVagrantVm() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceVagrantVmCreate,\n\t\tRead: resourceVagrantVmRead,\n\t\tUpdate: resourceVagrantVmUpdate,\n\t\tDelete: resourceVagrantVmDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"boot_timeout\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t\tDescription: \"The time in seconds that Vagrant will wait for the machine to boot.\",\n\t\t\t},\n\t\t\t\"box\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"Installed box name or Atlas box shorthand name to use.\",\n\t\t\t},\n\t\t\t\"box_check_update\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t\tDescription: \"Whether or not to check for updates to the box on each `vagrant up`.\",\n\t\t\t},\n\t\t\t\"box_download_checksum\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"The expected checksum of the box file.\",\n\t\t\t},\n\t\t\t\"box_download_checksum_type\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"The type of checksum specified by `box_download_checksum`. Should be `md5`, `sha1`, or `sha256`.\",\n\t\t\t\tValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {\n\t\t\t\t\tvalue := v.(string)\n\t\t\t\t\tvalid_values := []string{\"md5\", \"sha1\", \"sha256\"}\n\n\t\t\t\t\tfor _, valid_value := range valid_values {\n\t\t\t\t\t\tif value == valid_value {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\terrors = append(errors, fmt.Errorf(\"%q must be one of md5, sha1, or sha256\", k))\n\n\t\t\t\t\treturn\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"box_download_client_cert\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"Path to a client certificate to use.\",\n\t\t\t},\n\t\t\t\"box_download_ca_cert\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"Path to a certificate authority certificate bundle to use.\",\n\t\t\t},\n\t\t\t\"box_download_ca_path\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"Path to a directory containing certificate authority certificates to use.\",\n\t\t\t},\n\t\t\t\"box_download_insecure\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"Whether or not to skip verification of digital certificates.\",\n\t\t\t},\n\t\t\t\"box_download_location_trusted\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"Whether or not to allow authentication credentials to be used for subsequent HTTP requests when redirected.\",\n\t\t\t},\n\t\t\t\"box_url\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"The URL to download the box.\",\n\t\t\t},\n\t\t\t\"box_version\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"The version of the box to use.\",\n\t\t\t},\n\t\t\t\"communicator\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"How to connect to the guest box.\",\n\t\t\t\tValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {\n\t\t\t\t\tvalue := v.(string)\n\n\t\t\t\t\tif value == \"ssh\" || value == \"winrm\" {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\terrors = append(errors, fmt.Errorf(\"%q must be ssh or winrm\", k))\n\n\t\t\t\t\treturn\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"graceful_halt_timeout\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"The time in seconds to wait when `vagrant halt` is called.\",\n\t\t\t},\n\t\t\t\"guest\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"The operating system that will be running in the guest box.\",\n\t\t\t},\n\t\t\t\"hostname\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"The hostname the machine should have.\",\n\t\t\t},\n\t\t\t\"forwarded_port\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"auto_correct\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"Whether or not to automatically resolve port collisions on the host.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"guest\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\tDescription: \"The port on the guest you want to be exposed on the host.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"guest_ip\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"The IP the forwarded port should be bound to within the guest.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"host\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\tDescription: \"The port on the host used to access the port on the guest.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"host_ip\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"The IP the forwarded port should be bound to on the host.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"protocol\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"The network protcol to use, one of `tcp` or `udp`.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\treturn 0\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"post_up_message\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"A message to display after `vagrant up` completes.\",\n\t\t\t},\n\t\t\t\"private_network\": resourceVagrantVmPublicOrPrivateNetwork(),\n\t\t\t\"public_network\": resourceVagrantVmPublicOrPrivateNetwork(),\n\t\t\t\"usable_port_range_lower_bound\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"The lower end (inclusive) of the port range to use for handling port collisions.\",\n\t\t\t},\n\t\t\t\"usable_port_range_upper_bound\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tDescription: \"The upper end (inclusive) of the port range to use for handling port collisions.\",\n\t\t\t},\n\t\t\t\"virtualbox_provider\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"cpus\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"The number of CPUs to give the VM.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"customize\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"A call to customize the VM just before it is booted.\",\n\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\t\t\t\"arguments\": &schema.Schema{\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeList,\n\t\t\t\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\t\t\t\tDescription: \"The list of arguments for the customization command.\",\n\t\t\t\t\t\t\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\t\t\t\treturn 0\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"gui\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"Whether or not to create a VM with a GUI.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"memory\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"The amount of memory to give the VM, in MB.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"name\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\tDescription: \"A custom name for the VM within VirtualBox's GUI app.\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\treturn 0\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceVagrantVmCreate(data *schema.ResourceData, meta interface{}) error {\n\treturn nil\n}\n\nfunc resourceVagrantVmRead(data *schema.ResourceData, meta interface{}) error {\n\treturn nil\n}\n\nfunc resourceVagrantVmUpdate(data *schema.ResourceData, meta interface{}) error {\n\treturn nil\n}\n\nfunc resourceVagrantVmDelete(data *schema.ResourceData, meta interface{}) error {\n\treturn nil\n}\n\nfunc resourceVagrantVmPublicOrPrivateNetwork() *schema.Schema {\n\treturn &schema.Schema{\n\t\tType: schema.TypeSet,\n\t\tOptional: true,\n\t\tElem: &schema.Resource{\n\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\"auto_config\": &schema.Schema{\n\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tDescription: \"Whether or not to automatically configure the network interface.\",\n\t\t\t\t},\n\t\t\t\t\"dhcp\": &schema.Schema{\n\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\tComputed: true,\n\t\t\t\t\tDescription: \"Whether or not to use DHCP to assign the IP on the host.\",\n\t\t\t\t},\n\t\t\t\t\"ip\": &schema.Schema{\n\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tComputed: true,\n\t\t\t\t\tDescription: \"The static IP to assign the interface on the host. Do not set this to use DHCP instead.\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tSet: func(v interface{}) int {\n\t\t\treturn 0\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"encoding\/json\"\n \"fmt\"\n \"net\/http\"\n \"strconv\"\n \"github.com\/rcrowley\/go-tigertonic\"\n \"github.com\/rkbodenner\/parallel_universe\/collection\"\n \"github.com\/rkbodenner\/parallel_universe\/game\"\n \"github.com\/rkbodenner\/parallel_universe\/session\"\n)\n\ntype Player struct {\n Id int `json:\"id\"`\n Name string `json:\"name\"`\n}\n\nvar players = []Player{\n {1, \"Player One\"},\n {2, \"Player Two\"},\n}\n\nvar gameCollection = collection.NewCollection()\nvar gameIndex = make(map[uint64]*game.Game)\n\nfunc initGameData() {\n for i,game := range gameCollection.Games {\n game.Id = (uint)(i+1)\n gameIndex[(uint64)(i+1)] = game\n }\n}\n\nvar sessions []*session.Session\nvar sessionIndex = make(map[uint64]*session.Session)\n\nfunc initSessionData() {\n sessions = make([]*session.Session, 2)\n sessions[0] = session.NewSession(gameCollection.Games[0], 2)\n sessions[1] = session.NewSession(gameCollection.Games[1], 2)\n\n for i,session := range sessions {\n session.Id = (uint)(i+1)\n sessionIndex[(uint64)(i+1)] = session\n }\n}\n\nfunc corsHandler(handler func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {\n return func(w http.ResponseWriter, r *http.Request) {\n header := w.Header()\n header.Add(\"Access-Control-Allow-Origin\", \"http:\/\/localhost:8000\")\n handler(w, r)\n }\n}\n\nfunc collectionHandler(w http.ResponseWriter, r *http.Request) {\n err := json.NewEncoder(w).Encode(gameCollection)\n if ( nil != err ) {\n fmt.Fprintln(w, err)\n }\n}\n\nfunc gameHandler(w http.ResponseWriter, r *http.Request) {\n id_str := r.URL.Query().Get(\"id\")\n id, err := strconv.ParseUint(id_str, 10, 64)\n if nil != err {\n http.Error(w, \"Not found\", http.StatusNotFound)\n return\n }\n\n game, ok := gameIndex[id]\n if ok {\n err := json.NewEncoder(w).Encode(game)\n if ( nil != err ) {\n http.Error(w, \"Error\", http.StatusInternalServerError)\n }\n } else {\n http.Error(w, \"Not found\", http.StatusNotFound)\n }\n}\n\nfunc playersHandler(w http.ResponseWriter, r *http.Request) {\n err := json.NewEncoder(w).Encode(players)\n if ( nil != err ) {\n fmt.Fprintln(w, err)\n }\n}\n\nfunc sessionsHandler(w http.ResponseWriter, r *http.Request) {\n err := json.NewEncoder(w).Encode(sessions)\n if ( nil != err ) {\n http.Error(w, \"Error\", http.StatusInternalServerError)\n }\n}\n\nfunc sessionHandler(w http.ResponseWriter, r *http.Request) {\n id_str := r.URL.Query().Get(\"id\")\n id, err := strconv.ParseUint(id_str, 10, 64)\n if nil != err {\n http.Error(w, \"Not found\", http.StatusNotFound)\n return\n }\n\n session, ok := sessionIndex[id]\n if ok {\n err := json.NewEncoder(w).Encode(session)\n if ( nil != err ) {\n http.Error(w, \"Error\", http.StatusInternalServerError)\n }\n } else {\n http.Error(w, \"Not found\", http.StatusNotFound)\n }\n}\n\nfunc main() {\n initGameData()\n initSessionData()\n\n mux := tigertonic.NewTrieServeMux()\n mux.HandleFunc(\"GET\", \"\/games\", corsHandler(collectionHandler))\n mux.HandleFunc(\"GET\", \"\/games\/{id}\", corsHandler(gameHandler))\n mux.HandleFunc(\"GET\", \"\/players\", corsHandler(playersHandler))\n mux.HandleFunc(\"GET\", \"\/sessions\", corsHandler(sessionsHandler))\n mux.HandleFunc(\"GET\", \"\/sessions\/{id}\", corsHandler(sessionHandler))\n http.ListenAndServe(\":8080\", mux)\n}\n<commit_msg>Use player type from parallel_universe<commit_after>package main\n\nimport (\n \"encoding\/json\"\n \"fmt\"\n \"net\/http\"\n \"strconv\"\n \"github.com\/rcrowley\/go-tigertonic\"\n \"github.com\/rkbodenner\/parallel_universe\/collection\"\n \"github.com\/rkbodenner\/parallel_universe\/game\"\n \"github.com\/rkbodenner\/parallel_universe\/session\"\n)\n\nvar players = []*game.Player{\n &game.Player{1, \"Player One\"},\n &game.Player{2, \"Player Two\"},\n}\n\nvar gameCollection = collection.NewCollection()\nvar gameIndex = make(map[uint64]*game.Game)\n\nfunc initGameData() {\n for i,game := range gameCollection.Games {\n game.Id = (uint)(i+1)\n gameIndex[(uint64)(i+1)] = game\n }\n}\n\nvar sessions []*session.Session\nvar sessionIndex = make(map[uint64]*session.Session)\n\nfunc initSessionData() {\n sessions = make([]*session.Session, 2)\n sessions[0] = session.NewSession(gameCollection.Games[0], 2)\n sessions[1] = session.NewSession(gameCollection.Games[1], 2)\n\n for i,session := range sessions {\n session.Id = (uint)(i+1)\n sessionIndex[(uint64)(i+1)] = session\n }\n}\n\nfunc corsHandler(handler func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {\n return func(w http.ResponseWriter, r *http.Request) {\n header := w.Header()\n header.Add(\"Access-Control-Allow-Origin\", \"http:\/\/localhost:8000\")\n handler(w, r)\n }\n}\n\nfunc collectionHandler(w http.ResponseWriter, r *http.Request) {\n err := json.NewEncoder(w).Encode(gameCollection)\n if ( nil != err ) {\n fmt.Fprintln(w, err)\n }\n}\n\nfunc gameHandler(w http.ResponseWriter, r *http.Request) {\n id_str := r.URL.Query().Get(\"id\")\n id, err := strconv.ParseUint(id_str, 10, 64)\n if nil != err {\n http.Error(w, \"Not found\", http.StatusNotFound)\n return\n }\n\n game, ok := gameIndex[id]\n if ok {\n err := json.NewEncoder(w).Encode(game)\n if ( nil != err ) {\n http.Error(w, \"Error\", http.StatusInternalServerError)\n }\n } else {\n http.Error(w, \"Not found\", http.StatusNotFound)\n }\n}\n\nfunc playersHandler(w http.ResponseWriter, r *http.Request) {\n err := json.NewEncoder(w).Encode(players)\n if ( nil != err ) {\n fmt.Fprintln(w, err)\n }\n}\n\nfunc sessionsHandler(w http.ResponseWriter, r *http.Request) {\n err := json.NewEncoder(w).Encode(sessions)\n if ( nil != err ) {\n http.Error(w, \"Error\", http.StatusInternalServerError)\n }\n}\n\nfunc sessionHandler(w http.ResponseWriter, r *http.Request) {\n id_str := r.URL.Query().Get(\"id\")\n id, err := strconv.ParseUint(id_str, 10, 64)\n if nil != err {\n http.Error(w, \"Not found\", http.StatusNotFound)\n return\n }\n\n session, ok := sessionIndex[id]\n if ok {\n err := json.NewEncoder(w).Encode(session)\n if ( nil != err ) {\n http.Error(w, \"Error\", http.StatusInternalServerError)\n }\n } else {\n http.Error(w, \"Not found\", http.StatusNotFound)\n }\n}\n\nfunc main() {\n initGameData()\n initSessionData()\n\n mux := tigertonic.NewTrieServeMux()\n mux.HandleFunc(\"GET\", \"\/games\", corsHandler(collectionHandler))\n mux.HandleFunc(\"GET\", \"\/games\/{id}\", corsHandler(gameHandler))\n mux.HandleFunc(\"GET\", \"\/players\", corsHandler(playersHandler))\n mux.HandleFunc(\"GET\", \"\/sessions\", corsHandler(sessionsHandler))\n mux.HandleFunc(\"GET\", \"\/sessions\/{id}\", corsHandler(sessionHandler))\n http.ListenAndServe(\":8080\", mux)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"html\/template\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\t\/\/ defaultTitle is the site pages default title.\n\tdefaultTitle = \"Google I\/O 2015\"\n\t\/\/ descDefault is the default site description\n\tdescDefault = \"Google I\/O 2015 brings together developers for an immersive, \" +\n\t\t\"two-day experience focused on exploring the next generation \" +\n\t\t\"of technology, mobile and beyond. Join us online or in person \" +\n\t\t\"May 28-29, 2015.\"\n\t\/\/ descExperiment is used when users share an experiment link on social.\n\tdescExperiment = \"Make music with instruments inspired by material design \" +\n\t\t\"for Google I\/O 2015. Play, record and share.\"\n\t\/\/ images for og:image meta tag\n\togImageDefault = \"io15-color.png\"\n\togImageExperiment = \"io15-experiment.png\"\n\n\t\/\/ layout for templateData.StartDateStr\n\ttimeISO8601 = \"Jan 02 2006 15:04:05 GMT-0700 (MST)\"\n\n\t\/\/ templatesDir is the templates directory path relative to config.Dir.\n\ttemplatesDir = \"templates\"\n)\n\nvar (\n\t\/\/ tmplFunc is a map of functions available to all templates.\n\ttmplFunc = template.FuncMap{\n\t\t\"safeHTML\": func(v string) template.HTML { return template.HTML(v) },\n\t\t\"canonical\": canonicalURL,\n\t}\n\t\/\/ tmplCache caches HTML templates parsed in parseTemplate()\n\ttmplCache = &templateCache{templates: make(map[string]*template.Template)}\n)\n\n\/\/ templateCache is in-memory cache for parsed templates\ntype templateCache struct {\n\tsync.Mutex\n\ttemplates map[string]*template.Template\n}\n\n\/\/ templateData is the templates context\ntype templateData struct {\n\tEnv string\n\tClientID string\n\tPrefix string\n\tSlug string\n\tTitle string\n\tDesc string\n\tOgTitle string\n\tOgImage string\n\tStartDateStr string\n}\n\n\/\/ renderTemplate executes a template found in name.html file\n\/\/ using either layout_full.html or layout_partial.html as the root template.\n\/\/ env is the app current environment: \"dev\", \"stage\" or \"prod\".\nfunc renderTemplate(c context.Context, name string, partial bool, data *templateData) ([]byte, error) {\n\ttpl, err := parseTemplate(name, partial)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif data == nil {\n\t\tdata = &templateData{}\n\t}\n\tif data.Env == \"\" {\n\t\tdata.Env = config.Env\n\t}\n\tdata.ClientID = config.Google.Auth.Client\n\tdata.Title = pageTitle(tpl)\n\tdata.Slug = name\n\tdata.Prefix = config.Prefix\n\tdata.StartDateStr = config.Schedule.Start.In(config.Schedule.Location).Format(timeISO8601)\n\tif data.Desc == \"\" {\n\t\tdata.Desc = descDefault\n\t}\n\tif data.OgImage == \"\" {\n\t\tdata.OgImage = ogImageDefault\n\t}\n\tif data.OgTitle == \"\" {\n\t\tdata.OgTitle = data.Title\n\t}\n\n\tvar b bytes.Buffer\n\tif err := tpl.Execute(&b, data); err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.Bytes(), nil\n}\n\n\/\/ parseTemplate creates a template identified by name, using appropriate layout.\n\/\/ HTTP error layout is used for name arg prefixed with \"error_\", e.g. \"error_404\".\nfunc parseTemplate(name string, partial bool) (*template.Template, error) {\n\tvar layout string\n\tswitch {\n\tcase strings.HasPrefix(name, \"error_\"):\n\t\tlayout = \"layout_error.html\"\n\tcase name == \"upgrade\":\n\t\tlayout = \"layout_bare.html\"\n\tcase partial:\n\t\tlayout = \"layout_partial.html\"\n\tdefault:\n\t\tlayout = \"layout_full.html\"\n\t}\n\n\tkey := name + layout\n\ttmplCache.Lock()\n\tdefer tmplCache.Unlock()\n\tif t, ok := tmplCache.templates[key]; ok {\n\t\treturn t, nil\n\t}\n\n\tt, err := template.New(layout).Delims(\"{%\", \"%}\").Funcs(tmplFunc).ParseFiles(\n\t\tfilepath.Join(config.Dir, templatesDir, layout),\n\t\tfilepath.Join(config.Dir, templatesDir, name+\".html\"),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !isDev() {\n\t\ttmplCache.templates[key] = t\n\t}\n\treturn t, nil\n}\n\n\/\/ pageTitle executes \"title\" template and returns its result or defaultTitle.\nfunc pageTitle(t *template.Template) string {\n\tb := new(bytes.Buffer)\n\tif err := t.ExecuteTemplate(b, \"title\", nil); err != nil || b.Len() == 0 {\n\t\treturn defaultTitle\n\t}\n\treturn b.String()\n}\n\n\/\/ canonicalURL returns a canonical URL of path p.\n\/\/ Relative paths are based off of config.Prefix.\nfunc canonicalURL(p string) string {\n\tif p == \"home\" || p == \"\/\" || p == \"\" {\n\t\treturn config.Prefix + \"\/\"\n\t}\n\treturn path.Join(config.Prefix, p)\n}\n<commit_msg>Fixes #921<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"html\/template\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\t\/\/ defaultTitle is the site pages default title.\n\tdefaultTitle = \"Google I\/O 2015\"\n\t\/\/ descDefault is the default site description\n\tdescDefault = \"Google I\/O 2015 brings together developers for an immersive,\" +\n \" two-day experience focused on exploring the next generation of \" +\n \"technology, mobile and beyond. Join us online or in person May 28-29, \" +\n \"2015. #io15\"\n\t\/\/ descExperiment is used when users share an experiment link on social.\n\tdescExperiment = \"Make music with instruments inspired by material design \" +\n\t\t\"for #io15. Play, record and share.\"\n\t\/\/ images for og:image meta tag\n\togImageDefault = \"io15-color.png\"\n\togImageExperiment = \"io15-experiment.png\"\n\n\t\/\/ layout for templateData.StartDateStr\n\ttimeISO8601 = \"Jan 02 2006 15:04:05 GMT-0700 (MST)\"\n\n\t\/\/ templatesDir is the templates directory path relative to config.Dir.\n\ttemplatesDir = \"templates\"\n)\n\nvar (\n\t\/\/ tmplFunc is a map of functions available to all templates.\n\ttmplFunc = template.FuncMap{\n\t\t\"safeHTML\": func(v string) template.HTML { return template.HTML(v) },\n\t\t\"canonical\": canonicalURL,\n\t}\n\t\/\/ tmplCache caches HTML templates parsed in parseTemplate()\n\ttmplCache = &templateCache{templates: make(map[string]*template.Template)}\n)\n\n\/\/ templateCache is in-memory cache for parsed templates\ntype templateCache struct {\n\tsync.Mutex\n\ttemplates map[string]*template.Template\n}\n\n\/\/ templateData is the templates context\ntype templateData struct {\n\tEnv string\n\tClientID string\n\tPrefix string\n\tSlug string\n\tTitle string\n\tDesc string\n\tOgTitle string\n\tOgImage string\n\tStartDateStr string\n}\n\n\/\/ renderTemplate executes a template found in name.html file\n\/\/ using either layout_full.html or layout_partial.html as the root template.\n\/\/ env is the app current environment: \"dev\", \"stage\" or \"prod\".\nfunc renderTemplate(c context.Context, name string, partial bool, data *templateData) ([]byte, error) {\n\ttpl, err := parseTemplate(name, partial)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif data == nil {\n\t\tdata = &templateData{}\n\t}\n\tif data.Env == \"\" {\n\t\tdata.Env = config.Env\n\t}\n\tdata.ClientID = config.Google.Auth.Client\n\tdata.Title = pageTitle(tpl)\n\tdata.Slug = name\n\tdata.Prefix = config.Prefix\n\tdata.StartDateStr = config.Schedule.Start.In(config.Schedule.Location).Format(timeISO8601)\n\tif data.Desc == \"\" {\n\t\tdata.Desc = descDefault\n\t}\n\tif data.OgImage == \"\" {\n\t\tdata.OgImage = ogImageDefault\n\t}\n\tif data.OgTitle == \"\" {\n\t\tdata.OgTitle = data.Title\n\t}\n\n\tvar b bytes.Buffer\n\tif err := tpl.Execute(&b, data); err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.Bytes(), nil\n}\n\n\/\/ parseTemplate creates a template identified by name, using appropriate layout.\n\/\/ HTTP error layout is used for name arg prefixed with \"error_\", e.g. \"error_404\".\nfunc parseTemplate(name string, partial bool) (*template.Template, error) {\n\tvar layout string\n\tswitch {\n\tcase strings.HasPrefix(name, \"error_\"):\n\t\tlayout = \"layout_error.html\"\n\tcase name == \"upgrade\":\n\t\tlayout = \"layout_bare.html\"\n\tcase partial:\n\t\tlayout = \"layout_partial.html\"\n\tdefault:\n\t\tlayout = \"layout_full.html\"\n\t}\n\n\tkey := name + layout\n\ttmplCache.Lock()\n\tdefer tmplCache.Unlock()\n\tif t, ok := tmplCache.templates[key]; ok {\n\t\treturn t, nil\n\t}\n\n\tt, err := template.New(layout).Delims(\"{%\", \"%}\").Funcs(tmplFunc).ParseFiles(\n\t\tfilepath.Join(config.Dir, templatesDir, layout),\n\t\tfilepath.Join(config.Dir, templatesDir, name+\".html\"),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !isDev() {\n\t\ttmplCache.templates[key] = t\n\t}\n\treturn t, nil\n}\n\n\/\/ pageTitle executes \"title\" template and returns its result or defaultTitle.\nfunc pageTitle(t *template.Template) string {\n\tb := new(bytes.Buffer)\n\tif err := t.ExecuteTemplate(b, \"title\", nil); err != nil || b.Len() == 0 {\n\t\treturn defaultTitle\n\t}\n\treturn b.String()\n}\n\n\/\/ canonicalURL returns a canonical URL of path p.\n\/\/ Relative paths are based off of config.Prefix.\nfunc canonicalURL(p string) string {\n\tif p == \"home\" || p == \"\/\" || p == \"\" {\n\t\treturn config.Prefix + \"\/\"\n\t}\n\treturn path.Join(config.Prefix, p)\n}\n<|endoftext|>"} {"text":"<commit_before>package acme\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/golang\/mock\/gomock\"\n\t\"github.com\/jetstack\/kube-lego\/pkg\/mocks\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/intstr\"\n)\n\nfunc TestAcme_E2E(t *testing.T) {\n\tlogrus.SetLevel(logrus.DebugLevel)\n\tlog := logrus.WithField(\"context\", \"test-mock\")\n\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\t\/\/ mock kube lego\n\tmockKL := mocks.NewMockKubeLego(ctrl)\n\tmockKL.EXPECT().Log().AnyTimes().Return(log)\n\tmockKL.EXPECT().Version().AnyTimes().Return(\"mocked-version\")\n\tmockKL.EXPECT().LegoHTTPPort().AnyTimes().Return(intstr.FromInt(8181))\n\tmockKL.EXPECT().AcmeUser().MinTimes(1).Return(nil, errors.New(\"I am only mocked\"))\n\tmockKL.EXPECT().LegoURL().MinTimes(1).Return(\"https:\/\/acme-staging.api.letsencrypt.org\/directory\")\n\tmockKL.EXPECT().LegoEmail().MinTimes(1).Return(\"kube-lego-e2e@example.com\")\n\tmockKL.EXPECT().SaveAcmeUser(gomock.Any()).MinTimes(1).Return(nil)\n\tmockKL.EXPECT().LegoRsaKeySize().AnyTimes().Return(2048)\n\tmockKL.EXPECT().ExponentialBackoffMaxElapsedTime().MinTimes(1).Return(time.Minute * 5)\n\tmockKL.EXPECT().ExponentialBackoffInitialInterval().MinTimes(1).Return(time.Second * 30)\n\tmockKL.EXPECT().ExponentialBackoffMultiplier().MinTimes(1).Return(2.0)\n\n\t\/\/ set up ngrok\n\tcommand := []string{\"ngrok\", \"http\", \"--bind-tls\", \"false\", \"8181\"}\n\tcmdNgrok := exec.Command(command[0], command[1:]...)\n\terr := cmdNgrok.Start()\n\tif err != nil {\n\t\tt.Skip(\"Skipping e2e test as ngrok executable is not available: \", err)\n\t}\n\tdefer cmdNgrok.Process.Kill()\n\n\t\/\/ get domain main for forwarding the acme validation\n\tregexDomain := regexp.MustCompile(\"http:\/\/([a-z0-9]+.\\\\.ngrok\\\\.io)\")\n\tvar domain string\n\tfor {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\tresp, err := http.Get(\"http:\/\/localhost:4040\/status\")\n\t\tif err != nil {\n\t\t\tlog.Warn(err)\n\t\t\tcontinue\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tlog.Warn(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tmatched := regexDomain.FindStringSubmatch(string(body))\n\t\tif matched == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tdomain = matched[1]\n\t\tlog.Infof(\"kube-lego domain is %s\", domain)\n\n\t\tbreak\n\t}\n\n\tstopCh := make(chan struct{})\n\ta := New(mockKL)\n\tgo a.RunServer(stopCh)\n\n\tlog.Infof(\"trying to obtain a certificate for the domain\")\n\ta.ObtainCertificate([]string{domain})\n\n}\n<commit_msg>Skip acme e2e test due to LE staging outage<commit_after>package acme\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/golang\/mock\/gomock\"\n\t\"github.com\/jetstack\/kube-lego\/pkg\/mocks\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/intstr\"\n)\n\nfunc TestAcme_E2E(t *testing.T) {\n\tt.Skip(\"Temporarily skipping due to Let's Encrypt staging outage\")\n\tlogrus.SetLevel(logrus.DebugLevel)\n\tlog := logrus.WithField(\"context\", \"test-mock\")\n\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\t\/\/ mock kube lego\n\tmockKL := mocks.NewMockKubeLego(ctrl)\n\tmockKL.EXPECT().Log().AnyTimes().Return(log)\n\tmockKL.EXPECT().Version().AnyTimes().Return(\"mocked-version\")\n\tmockKL.EXPECT().LegoHTTPPort().AnyTimes().Return(intstr.FromInt(8181))\n\tmockKL.EXPECT().AcmeUser().MinTimes(1).Return(nil, errors.New(\"I am only mocked\"))\n\tmockKL.EXPECT().LegoURL().MinTimes(1).Return(\"https:\/\/acme-staging.api.letsencrypt.org\/directory\")\n\tmockKL.EXPECT().LegoEmail().MinTimes(1).Return(\"kube-lego-e2e@example.com\")\n\tmockKL.EXPECT().SaveAcmeUser(gomock.Any()).MinTimes(1).Return(nil)\n\tmockKL.EXPECT().LegoRsaKeySize().AnyTimes().Return(2048)\n\tmockKL.EXPECT().ExponentialBackoffMaxElapsedTime().MinTimes(1).Return(time.Minute * 5)\n\tmockKL.EXPECT().ExponentialBackoffInitialInterval().MinTimes(1).Return(time.Second * 30)\n\tmockKL.EXPECT().ExponentialBackoffMultiplier().MinTimes(1).Return(2.0)\n\n\t\/\/ set up ngrok\n\tcommand := []string{\"ngrok\", \"http\", \"--bind-tls\", \"false\", \"8181\"}\n\tcmdNgrok := exec.Command(command[0], command[1:]...)\n\terr := cmdNgrok.Start()\n\tif err != nil {\n\t\tt.Skip(\"Skipping e2e test as ngrok executable is not available: \", err)\n\t}\n\tdefer cmdNgrok.Process.Kill()\n\n\t\/\/ get domain main for forwarding the acme validation\n\tregexDomain := regexp.MustCompile(\"http:\/\/([a-z0-9]+.\\\\.ngrok\\\\.io)\")\n\tvar domain string\n\tfor {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\tresp, err := http.Get(\"http:\/\/localhost:4040\/status\")\n\t\tif err != nil {\n\t\t\tlog.Warn(err)\n\t\t\tcontinue\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tlog.Warn(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tmatched := regexDomain.FindStringSubmatch(string(body))\n\t\tif matched == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tdomain = matched[1]\n\t\tlog.Infof(\"kube-lego domain is %s\", domain)\n\n\t\tbreak\n\t}\n\n\tstopCh := make(chan struct{})\n\ta := New(mockKL)\n\tgo a.RunServer(stopCh)\n\n\tlog.Infof(\"trying to obtain a certificate for the domain\")\n\ta.ObtainCertificate([]string{domain})\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build !privileged_tests\n\npackage api\n\nimport (\n\t\"context\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/api\/metrics\/mock\"\n\n\t\"gopkg.in\/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\ntype ApiSuite struct{}\n\nvar _ = check.Suite(&ApiSuite{})\n\nfunc (a *ApiSuite) TestRateLimit(c *check.C) {\n\tmetricsAPI := mock.NewMockMetrics()\n\tclient, err := NewClient(\"\", \"dummy-subscription\", \"dummy-resource-group\", \"\", metricsAPI, 10.0, 4, true)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(client, check.Not(check.IsNil))\n\n\tfor i := 0; i < 10; i++ {\n\t\tclient.limiter.Limit(context.TODO(), \"test\")\n\t}\n\n\tc.Assert(metricsAPI.RateLimit(\"test\"), check.Not(check.DeepEquals), time.Duration(0))\n}\n<commit_msg>azure: Fix API rate limit test<commit_after>\/\/ Copyright 2020 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build !privileged_tests\n\npackage api\n\nimport (\n\t\"context\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/api\/metrics\/mock\"\n\n\t\"gopkg.in\/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\ntype ApiSuite struct{}\n\nvar _ = check.Suite(&ApiSuite{})\n\nfunc (a *ApiSuite) TestRateLimit(c *check.C) {\n\t\/\/ Since github.com\/Azure\/go-autorest\/autorest\/azure\/auth v0.5.6, the\n\t\/\/ MSI_ENDPOINT environment variable must be set, otherwise this test will\n\t\/\/ fail with the error \"failed to get oauth token from MSI: MSI not\n\t\/\/ available\".\n\t\/\/\n\t\/\/ Temporarily set the environment variable for the duration of the test,\n\t\/\/ using the same workaround as used in\n\t\/\/ github.com\/Azure\/go-autorest\/autorest\/azure\/auth's tests.\n\tos.Setenv(\"MSI_ENDPOINT\", \"http:\/\/localhost\")\n\tdefer func() {\n\t\tos.Unsetenv(\"MSI_ENDPOINT\")\n\t}()\n\n\tmetricsAPI := mock.NewMockMetrics()\n\tclient, err := NewClient(\"\", \"dummy-subscription\", \"dummy-resource-group\", \"\", metricsAPI, 10.0, 4, true)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(client, check.Not(check.IsNil))\n\n\tfor i := 0; i < 10; i++ {\n\t\tclient.limiter.Limit(context.TODO(), \"test\")\n\t}\n\n\tc.Assert(metricsAPI.RateLimit(\"test\"), check.Not(check.DeepEquals), time.Duration(0))\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"time\"\n\n\t\"github.com\/docker\/docker\/pkg\/units\"\n\t\"github.com\/spf13\/cobra\"\n\n\tkapi \"k8s.io\/kubernetes\/pkg\/api\"\n\tkerrors \"k8s.io\/kubernetes\/pkg\/api\/errors\"\n\tkclient \"k8s.io\/kubernetes\/pkg\/client\/unversioned\"\n\tkcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\/resource\"\n\n\t\"github.com\/openshift\/origin\/pkg\/client\"\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/cli\/describe\"\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/util\/clientcmd\"\n\tdeployapi \"github.com\/openshift\/origin\/pkg\/deploy\/api\"\n\tdeployutil \"github.com\/openshift\/origin\/pkg\/deploy\/util\"\n)\n\n\/\/ DeployOptions holds all the options for the `deploy` command\ntype DeployOptions struct {\n\tout io.Writer\n\tosClient client.Interface\n\tkubeClient kclient.Interface\n\tbuilder *resource.Builder\n\tnamespace string\n\tbaseCommandName string\n\n\tdeploymentConfigName string\n\tdeployLatest bool\n\tretryDeploy bool\n\tcancelDeploy bool\n\tenableTriggers bool\n}\n\nconst (\n\tdeployLong = `\nView, start, cancel, or retry a deployment\n\nThis command allows you to control a deployment config. Each individual deployment is exposed\nas a new replication controller, and the deployment process manages scaling down old deployments\nand scaling up new ones. You can rollback to any previous deployment, or even scale multiple\ndeployments up at the same time.\n\nThere are several deployment strategies defined:\n\n* Rolling (default) - scales up the new deployment in stages, gradually reducing the number\n of old deployments. If one of the new deployed pods never becomes \"ready\", the new deployment\n will be rolled back (scaled down to zero). Use when your application can tolerate two versions\n of code running at the same time (many web applications, scalable databases)\n* Recreate - scales the old deployment down to zero, then scales the new deployment up to full.\n Use when your application cannot tolerate two versions of code running at the same time\n* Custom - run your own deployment process inside a Docker container using your own scripts.\n\nIf a deployment fails, you may opt to retry it (if the error was transient). Some deployments may\nnever successfully complete - in which case you can use the '--latest' flag to force a redeployment.\nWhen rolling back to a previous deployment, a new deployment will be created with an identical copy\nof your config at the latest position.\n\nIf you want to cancel a running deployment, use '--cancel' but keep in mind that this is a best-effort\noperation and may take some time to complete. It’s possible the deployment will partially or totally\ncomplete before the cancellation is effective. In such a case an appropriate event will be emitted.\n\nIf no options are given, shows information about the latest deployment.`\n\n\tdeployExample = ` # Display the latest deployment for the 'database' deployment config\n $ %[1]s deploy database\n\n # Start a new deployment based on the 'database'\n $ %[1]s deploy database --latest\n\n # Retry the latest failed deployment based on 'frontend'\n # The deployer pod and any hook pods are deleted for the latest failed deployment\n $ %[1]s deploy frontend --retry\n\n # Cancel the in-progress deployment based on 'frontend'\n $ %[1]s deploy frontend --cancel`\n)\n\n\/\/ NewCmdDeploy creates a new `deploy` command.\nfunc NewCmdDeploy(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\toptions := &DeployOptions{\n\t\tbaseCommandName: fullName,\n\t}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"deploy DEPLOYMENTCONFIG [--latest|--retry|--cancel|--enable-triggers]\",\n\t\tShort: \"View, start, cancel, or retry a deployment\",\n\t\tLong: deployLong,\n\t\tExample: fmt.Sprintf(deployExample, fullName),\n\t\tSuggestFor: []string{\"deployment\"},\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tif err := options.Complete(f, args, out); err != nil {\n\t\t\t\tkcmdutil.CheckErr(err)\n\t\t\t}\n\n\t\t\tif err := options.Validate(); err != nil {\n\t\t\t\tkcmdutil.CheckErr(kcmdutil.UsageError(cmd, err.Error()))\n\t\t\t}\n\n\t\t\tif err := options.RunDeploy(); err != nil {\n\t\t\t\tkcmdutil.CheckErr(err)\n\t\t\t}\n\t\t},\n\t}\n\n\tcmd.Flags().BoolVar(&options.deployLatest, \"latest\", false, \"Start a new deployment now.\")\n\tcmd.Flags().BoolVar(&options.retryDeploy, \"retry\", false, \"Retry the latest failed deployment.\")\n\tcmd.Flags().BoolVar(&options.cancelDeploy, \"cancel\", false, \"Cancel the in-progress deployment.\")\n\tcmd.Flags().BoolVar(&options.enableTriggers, \"enable-triggers\", false, \"Enables all image triggers for the deployment config.\")\n\n\treturn cmd\n}\n\nfunc (o *DeployOptions) Complete(f *clientcmd.Factory, args []string, out io.Writer) error {\n\tif len(args) > 1 {\n\t\treturn errors.New(\"only one deployment config name is supported as argument.\")\n\t}\n\tvar err error\n\n\to.osClient, o.kubeClient, err = f.Clients()\n\tif err != nil {\n\t\treturn err\n\t}\n\to.namespace, _, err = f.DefaultNamespace()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmapper, typer := f.Object()\n\to.builder = resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), kapi.Codecs.UniversalDecoder())\n\n\to.out = out\n\n\tif len(args) > 0 {\n\t\to.deploymentConfigName = args[0]\n\t}\n\n\treturn nil\n}\n\nfunc (o DeployOptions) Validate() error {\n\tif len(o.deploymentConfigName) == 0 {\n\t\treturn errors.New(\"a deployment config name is required.\")\n\t}\n\tnumOptions := 0\n\tif o.deployLatest {\n\t\tnumOptions++\n\t}\n\tif o.retryDeploy {\n\t\tnumOptions++\n\t}\n\tif o.cancelDeploy {\n\t\tnumOptions++\n\t}\n\tif o.enableTriggers {\n\t\tnumOptions++\n\t}\n\tif numOptions > 1 {\n\t\treturn errors.New(\"only one of --latest, --retry, --cancel, or --enable-triggers is allowed.\")\n\t}\n\treturn nil\n}\n\nfunc (o DeployOptions) RunDeploy() error {\n\tr := o.builder.\n\t\tNamespaceParam(o.namespace).\n\t\tResourceNames(\"deploymentconfigs\", o.deploymentConfigName).\n\t\tSingleResourceType().\n\t\tDo()\n\tresultObj, err := r.Object()\n\tif err != nil {\n\t\treturn err\n\t}\n\tconfig, ok := resultObj.(*deployapi.DeploymentConfig)\n\tif !ok {\n\t\treturn fmt.Errorf(\"%s is not a valid deployment config\", o.deploymentConfigName)\n\t}\n\n\tswitch {\n\tcase o.deployLatest:\n\t\terr = o.deploy(config, o.out)\n\tcase o.retryDeploy:\n\t\terr = o.retry(config, o.out)\n\tcase o.cancelDeploy:\n\t\terr = o.cancel(config, o.out)\n\tcase o.enableTriggers:\n\t\terr = o.reenableTriggers(config, o.out)\n\tdefault:\n\t\tdescriber := describe.NewLatestDeploymentsDescriber(o.osClient, o.kubeClient, -1)\n\t\tdesc, err := describer.Describe(config.Namespace, config.Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprint(o.out, desc)\n\t}\n\n\treturn err\n}\n\n\/\/ deploy launches a new deployment unless there's already a deployment\n\/\/ process in progress for config.\nfunc (o DeployOptions) deploy(config *deployapi.DeploymentConfig, out io.Writer) error {\n\tdeploymentName := deployutil.LatestDeploymentNameForConfig(config)\n\tdeployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName)\n\tif err == nil {\n\t\t\/\/ Reject attempts to start a concurrent deployment.\n\t\tstatus := deployutil.DeploymentStatusFor(deployment)\n\t\tif status != deployapi.DeploymentStatusComplete && status != deployapi.DeploymentStatusFailed {\n\t\t\treturn fmt.Errorf(\"#%d is already in progress (%s).\\nOptionally, you can cancel this deployment using the --cancel option.\", config.Status.LatestVersion, status)\n\t\t}\n\t} else {\n\t\tif !kerrors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tconfig.Status.LatestVersion++\n\t_, err = o.osClient.DeploymentConfigs(config.Namespace).Update(config)\n\tif err == nil {\n\t\tfmt.Fprintf(out, \"Started deployment #%d\\n\", config.Status.LatestVersion)\n\t}\n\treturn err\n}\n\n\/\/ retry resets the status of the latest deployment to New, which will cause\n\/\/ the deployment to be retried. An error is returned if the deployment is not\n\/\/ currently in a failed state.\nfunc (o DeployOptions) retry(config *deployapi.DeploymentConfig, out io.Writer) error {\n\tif config.Status.LatestVersion == 0 {\n\t\treturn fmt.Errorf(\"no deployments found for %s\/%s\", config.Namespace, config.Name)\n\t}\n\tdeploymentName := deployutil.LatestDeploymentNameForConfig(config)\n\tdeployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName)\n\tif err != nil {\n\t\tif kerrors.IsNotFound(err) {\n\t\t\treturn fmt.Errorf(\"unable to find the latest deployment (#%d).\\nYou can start a new deployment using the --latest option.\", config.Status.LatestVersion)\n\t\t}\n\t\treturn err\n\t}\n\n\tif status := deployutil.DeploymentStatusFor(deployment); status != deployapi.DeploymentStatusFailed {\n\t\tmessage := fmt.Sprintf(\"#%d is %s; only failed deployments can be retried.\\n\", config.Status.LatestVersion, status)\n\t\tif status == deployapi.DeploymentStatusComplete {\n\t\t\tmessage += \"You can start a new deployment using the --latest option.\"\n\t\t} else {\n\t\t\tmessage += \"Optionally, you can cancel this deployment using the --cancel option.\"\n\t\t}\n\n\t\treturn fmt.Errorf(message)\n\t}\n\n\t\/\/ Delete the deployer pod as well as the deployment hooks pods, if any\n\tpods, err := o.kubeClient.Pods(config.Namespace).List(kapi.ListOptions{LabelSelector: deployutil.DeployerPodSelector(deploymentName)})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to list deployer\/hook pods for deployment #%d: %v\", config.Status.LatestVersion, err)\n\t}\n\tfor _, pod := range pods.Items {\n\t\terr := o.kubeClient.Pods(pod.Namespace).Delete(pod.Name, kapi.NewDeleteOptions(0))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to delete deployer\/hook pod %s for deployment #%d: %v\", pod.Name, config.Status.LatestVersion, err)\n\t\t}\n\t}\n\n\tdeployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)\n\t\/\/ clear out the cancellation flag as well as any previous status-reason annotation\n\tdelete(deployment.Annotations, deployapi.DeploymentStatusReasonAnnotation)\n\tdelete(deployment.Annotations, deployapi.DeploymentCancelledAnnotation)\n\t_, err = o.kubeClient.ReplicationControllers(deployment.Namespace).Update(deployment)\n\tif err == nil {\n\t\tfmt.Fprintf(out, \"Retried #%d\\n\", config.Status.LatestVersion)\n\t}\n\treturn err\n}\n\n\/\/ cancel cancels any deployment process in progress for config.\nfunc (o DeployOptions) cancel(config *deployapi.DeploymentConfig, out io.Writer) error {\n\tdeployments, err := o.kubeClient.ReplicationControllers(config.Namespace).List(kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(config.Name)})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(deployments.Items) == 0 {\n\t\tfmt.Fprintf(out, \"There have been no deployments for %s\/%s\\n\", config.Namespace, config.Name)\n\t\treturn nil\n\t}\n\tsort.Sort(deployutil.ByLatestVersionDesc(deployments.Items))\n\tfailedCancellations := []string{}\n\tanyCancelled := false\n\tfor _, deployment := range deployments.Items {\n\t\tstatus := deployutil.DeploymentStatusFor(&deployment)\n\t\tswitch status {\n\t\tcase deployapi.DeploymentStatusNew,\n\t\t\tdeployapi.DeploymentStatusPending,\n\t\t\tdeployapi.DeploymentStatusRunning:\n\n\t\t\tif deployutil.IsDeploymentCancelled(&deployment) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdeployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue\n\t\t\tdeployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledByUser\n\t\t\t_, err := o.kubeClient.ReplicationControllers(deployment.Namespace).Update(&deployment)\n\t\t\tif err == nil {\n\t\t\t\tfmt.Fprintf(out, \"Cancelled deployment #%d\\n\", config.Status.LatestVersion)\n\t\t\t\tanyCancelled = true\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(out, \"Couldn't cancel deployment #%d (status: %s): %v\\n\", deployutil.DeploymentVersionFor(&deployment), status, err)\n\t\t\t\tfailedCancellations = append(failedCancellations, strconv.Itoa(deployutil.DeploymentVersionFor(&deployment)))\n\t\t\t}\n\t\t}\n\t}\n\tif len(failedCancellations) > 0 {\n\t\treturn fmt.Errorf(\"couldn't cancel deployment %s\", strings.Join(failedCancellations, \", \"))\n\t}\n\tif !anyCancelled {\n\t\tlatest := &deployments.Items[0]\n\t\tstatus := string(deployutil.DeploymentStatusFor(latest))\n\t\tif deployutil.IsDeploymentCancelled(latest) {\n\t\t\tstatus = \"cancelled\"\n\t\t}\n\t\ttimeAt := strings.ToLower(units.HumanDuration(time.Now().Sub(latest.CreationTimestamp.Time)))\n\t\tfmt.Fprintf(out, \"No deployments are in progress (latest deployment #%d %s %s ago)\\n\",\n\t\t\tdeployutil.DeploymentVersionFor(latest),\n\t\t\tstrings.ToLower(status),\n\t\t\ttimeAt)\n\t}\n\treturn nil\n}\n\n\/\/ reenableTriggers enables all image triggers and then persists config.\nfunc (o DeployOptions) reenableTriggers(config *deployapi.DeploymentConfig, out io.Writer) error {\n\tenabled := []string{}\n\tfor _, trigger := range config.Spec.Triggers {\n\t\tif trigger.Type == deployapi.DeploymentTriggerOnImageChange {\n\t\t\ttrigger.ImageChangeParams.Automatic = true\n\t\t\tenabled = append(enabled, trigger.ImageChangeParams.From.Name)\n\t\t}\n\t}\n\tif len(enabled) == 0 {\n\t\tfmt.Fprintln(out, \"No image triggers found to enable\")\n\t\treturn nil\n\t}\n\t_, err := o.osClient.DeploymentConfigs(config.Namespace).Update(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintf(out, \"Enabled image triggers: %s\\n\", strings.Join(enabled, \",\"))\n\treturn nil\n}\n<commit_msg>oc: fix help message for deploy<commit_after>package cmd\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"time\"\n\n\t\"github.com\/docker\/docker\/pkg\/units\"\n\t\"github.com\/spf13\/cobra\"\n\n\tkapi \"k8s.io\/kubernetes\/pkg\/api\"\n\tkerrors \"k8s.io\/kubernetes\/pkg\/api\/errors\"\n\tkclient \"k8s.io\/kubernetes\/pkg\/client\/unversioned\"\n\tkcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\/resource\"\n\n\t\"github.com\/openshift\/origin\/pkg\/client\"\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/cli\/describe\"\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/util\/clientcmd\"\n\tdeployapi \"github.com\/openshift\/origin\/pkg\/deploy\/api\"\n\tdeployutil \"github.com\/openshift\/origin\/pkg\/deploy\/util\"\n)\n\n\/\/ DeployOptions holds all the options for the `deploy` command\ntype DeployOptions struct {\n\tout io.Writer\n\tosClient client.Interface\n\tkubeClient kclient.Interface\n\tbuilder *resource.Builder\n\tnamespace string\n\tbaseCommandName string\n\n\tdeploymentConfigName string\n\tdeployLatest bool\n\tretryDeploy bool\n\tcancelDeploy bool\n\tenableTriggers bool\n}\n\nconst (\n\tdeployLong = `\nView, start, cancel, or retry a deployment\n\nThis command allows you to control a deployment config. Each individual deployment is exposed\nas a new replication controller, and the deployment process manages scaling down old deployments\nand scaling up new ones. Use '%[1]s rollback' to rollback to any previous deployment.\n\nThere are several deployment strategies defined:\n\n* Rolling (default) - scales up the new deployment in stages, gradually reducing the number\n of old deployments. If one of the new deployed pods never becomes \"ready\", the new deployment\n will be rolled back (scaled down to zero). Use when your application can tolerate two versions\n of code running at the same time (many web applications, scalable databases)\n* Recreate - scales the old deployment down to zero, then scales the new deployment up to full.\n Use when your application cannot tolerate two versions of code running at the same time\n* Custom - run your own deployment process inside a Docker container using your own scripts.\n\nIf a deployment fails, you may opt to retry it (if the error was transient). Some deployments may\nnever successfully complete - in which case you can use the '--latest' flag to force a redeployment.\nIf a deployment config has completed deploying successfully at least once in the past, it would be\nautomatically rolled back in the event of a new failed deployment. Note that you would still need\nto update the erroneous deployment config in order to have its template persisted across your\napplication.\n\nIf you want to cancel a running deployment, use '--cancel' but keep in mind that this is a best-effort\noperation and may take some time to complete. It’s possible the deployment will partially or totally\ncomplete before the cancellation is effective. In such a case an appropriate event will be emitted.\n\nIf no options are given, shows information about the latest deployment.`\n\n\tdeployExample = ` # Display the latest deployment for the 'database' deployment config\n $ %[1]s deploy database\n\n # Start a new deployment based on the 'database'\n $ %[1]s deploy database --latest\n\n # Retry the latest failed deployment based on 'frontend'\n # The deployer pod and any hook pods are deleted for the latest failed deployment\n $ %[1]s deploy frontend --retry\n\n # Cancel the in-progress deployment based on 'frontend'\n $ %[1]s deploy frontend --cancel`\n)\n\n\/\/ NewCmdDeploy creates a new `deploy` command.\nfunc NewCmdDeploy(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\toptions := &DeployOptions{\n\t\tbaseCommandName: fullName,\n\t}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"deploy DEPLOYMENTCONFIG [--latest|--retry|--cancel|--enable-triggers]\",\n\t\tShort: \"View, start, cancel, or retry a deployment\",\n\t\tLong: fmt.Sprintf(deployLong, fullName),\n\t\tExample: fmt.Sprintf(deployExample, fullName),\n\t\tSuggestFor: []string{\"deployment\"},\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tif err := options.Complete(f, args, out); err != nil {\n\t\t\t\tkcmdutil.CheckErr(err)\n\t\t\t}\n\n\t\t\tif err := options.Validate(); err != nil {\n\t\t\t\tkcmdutil.CheckErr(kcmdutil.UsageError(cmd, err.Error()))\n\t\t\t}\n\n\t\t\tif err := options.RunDeploy(); err != nil {\n\t\t\t\tkcmdutil.CheckErr(err)\n\t\t\t}\n\t\t},\n\t}\n\n\tcmd.Flags().BoolVar(&options.deployLatest, \"latest\", false, \"Start a new deployment now.\")\n\tcmd.Flags().BoolVar(&options.retryDeploy, \"retry\", false, \"Retry the latest failed deployment.\")\n\tcmd.Flags().BoolVar(&options.cancelDeploy, \"cancel\", false, \"Cancel the in-progress deployment.\")\n\tcmd.Flags().BoolVar(&options.enableTriggers, \"enable-triggers\", false, \"Enables all image triggers for the deployment config.\")\n\n\treturn cmd\n}\n\nfunc (o *DeployOptions) Complete(f *clientcmd.Factory, args []string, out io.Writer) error {\n\tif len(args) > 1 {\n\t\treturn errors.New(\"only one deployment config name is supported as argument.\")\n\t}\n\tvar err error\n\n\to.osClient, o.kubeClient, err = f.Clients()\n\tif err != nil {\n\t\treturn err\n\t}\n\to.namespace, _, err = f.DefaultNamespace()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmapper, typer := f.Object()\n\to.builder = resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), kapi.Codecs.UniversalDecoder())\n\n\to.out = out\n\n\tif len(args) > 0 {\n\t\to.deploymentConfigName = args[0]\n\t}\n\n\treturn nil\n}\n\nfunc (o DeployOptions) Validate() error {\n\tif len(o.deploymentConfigName) == 0 {\n\t\treturn errors.New(\"a deployment config name is required.\")\n\t}\n\tnumOptions := 0\n\tif o.deployLatest {\n\t\tnumOptions++\n\t}\n\tif o.retryDeploy {\n\t\tnumOptions++\n\t}\n\tif o.cancelDeploy {\n\t\tnumOptions++\n\t}\n\tif o.enableTriggers {\n\t\tnumOptions++\n\t}\n\tif numOptions > 1 {\n\t\treturn errors.New(\"only one of --latest, --retry, --cancel, or --enable-triggers is allowed.\")\n\t}\n\treturn nil\n}\n\nfunc (o DeployOptions) RunDeploy() error {\n\tr := o.builder.\n\t\tNamespaceParam(o.namespace).\n\t\tResourceNames(\"deploymentconfigs\", o.deploymentConfigName).\n\t\tSingleResourceType().\n\t\tDo()\n\tresultObj, err := r.Object()\n\tif err != nil {\n\t\treturn err\n\t}\n\tconfig, ok := resultObj.(*deployapi.DeploymentConfig)\n\tif !ok {\n\t\treturn fmt.Errorf(\"%s is not a valid deployment config\", o.deploymentConfigName)\n\t}\n\n\tswitch {\n\tcase o.deployLatest:\n\t\terr = o.deploy(config, o.out)\n\tcase o.retryDeploy:\n\t\terr = o.retry(config, o.out)\n\tcase o.cancelDeploy:\n\t\terr = o.cancel(config, o.out)\n\tcase o.enableTriggers:\n\t\terr = o.reenableTriggers(config, o.out)\n\tdefault:\n\t\tdescriber := describe.NewLatestDeploymentsDescriber(o.osClient, o.kubeClient, -1)\n\t\tdesc, err := describer.Describe(config.Namespace, config.Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprint(o.out, desc)\n\t}\n\n\treturn err\n}\n\n\/\/ deploy launches a new deployment unless there's already a deployment\n\/\/ process in progress for config.\nfunc (o DeployOptions) deploy(config *deployapi.DeploymentConfig, out io.Writer) error {\n\tdeploymentName := deployutil.LatestDeploymentNameForConfig(config)\n\tdeployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName)\n\tif err == nil {\n\t\t\/\/ Reject attempts to start a concurrent deployment.\n\t\tstatus := deployutil.DeploymentStatusFor(deployment)\n\t\tif status != deployapi.DeploymentStatusComplete && status != deployapi.DeploymentStatusFailed {\n\t\t\treturn fmt.Errorf(\"#%d is already in progress (%s).\\nOptionally, you can cancel this deployment using the --cancel option.\", config.Status.LatestVersion, status)\n\t\t}\n\t} else {\n\t\tif !kerrors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tconfig.Status.LatestVersion++\n\t_, err = o.osClient.DeploymentConfigs(config.Namespace).Update(config)\n\tif err == nil {\n\t\tfmt.Fprintf(out, \"Started deployment #%d\\n\", config.Status.LatestVersion)\n\t\tfmt.Fprintf(out, \"Use '%s logs -f dc\/%s' to track its progress.\\n\", o.baseCommandName, config.Name)\n\t}\n\treturn err\n}\n\n\/\/ retry resets the status of the latest deployment to New, which will cause\n\/\/ the deployment to be retried. An error is returned if the deployment is not\n\/\/ currently in a failed state.\nfunc (o DeployOptions) retry(config *deployapi.DeploymentConfig, out io.Writer) error {\n\tif config.Status.LatestVersion == 0 {\n\t\treturn fmt.Errorf(\"no deployments found for %s\/%s\", config.Namespace, config.Name)\n\t}\n\tdeploymentName := deployutil.LatestDeploymentNameForConfig(config)\n\tdeployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName)\n\tif err != nil {\n\t\tif kerrors.IsNotFound(err) {\n\t\t\treturn fmt.Errorf(\"unable to find the latest deployment (#%d).\\nYou can start a new deployment using the --latest option.\", config.Status.LatestVersion)\n\t\t}\n\t\treturn err\n\t}\n\n\tif status := deployutil.DeploymentStatusFor(deployment); status != deployapi.DeploymentStatusFailed {\n\t\tmessage := fmt.Sprintf(\"#%d is %s; only failed deployments can be retried.\\n\", config.Status.LatestVersion, status)\n\t\tif status == deployapi.DeploymentStatusComplete {\n\t\t\tmessage += \"You can start a new deployment using the --latest option.\"\n\t\t} else {\n\t\t\tmessage += \"Optionally, you can cancel this deployment using the --cancel option.\"\n\t\t}\n\n\t\treturn fmt.Errorf(message)\n\t}\n\n\t\/\/ Delete the deployer pod as well as the deployment hooks pods, if any\n\tpods, err := o.kubeClient.Pods(config.Namespace).List(kapi.ListOptions{LabelSelector: deployutil.DeployerPodSelector(deploymentName)})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to list deployer\/hook pods for deployment #%d: %v\", config.Status.LatestVersion, err)\n\t}\n\tfor _, pod := range pods.Items {\n\t\terr := o.kubeClient.Pods(pod.Namespace).Delete(pod.Name, kapi.NewDeleteOptions(0))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to delete deployer\/hook pod %s for deployment #%d: %v\", pod.Name, config.Status.LatestVersion, err)\n\t\t}\n\t}\n\n\tdeployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)\n\t\/\/ clear out the cancellation flag as well as any previous status-reason annotation\n\tdelete(deployment.Annotations, deployapi.DeploymentStatusReasonAnnotation)\n\tdelete(deployment.Annotations, deployapi.DeploymentCancelledAnnotation)\n\t_, err = o.kubeClient.ReplicationControllers(deployment.Namespace).Update(deployment)\n\tif err == nil {\n\t\tfmt.Fprintf(out, \"Retried #%d\\n\", config.Status.LatestVersion)\n\t}\n\treturn err\n}\n\n\/\/ cancel cancels any deployment process in progress for config.\nfunc (o DeployOptions) cancel(config *deployapi.DeploymentConfig, out io.Writer) error {\n\tdeployments, err := o.kubeClient.ReplicationControllers(config.Namespace).List(kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(config.Name)})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(deployments.Items) == 0 {\n\t\tfmt.Fprintf(out, \"There have been no deployments for %s\/%s\\n\", config.Namespace, config.Name)\n\t\treturn nil\n\t}\n\tsort.Sort(deployutil.ByLatestVersionDesc(deployments.Items))\n\tfailedCancellations := []string{}\n\tanyCancelled := false\n\tfor _, deployment := range deployments.Items {\n\t\tstatus := deployutil.DeploymentStatusFor(&deployment)\n\t\tswitch status {\n\t\tcase deployapi.DeploymentStatusNew,\n\t\t\tdeployapi.DeploymentStatusPending,\n\t\t\tdeployapi.DeploymentStatusRunning:\n\n\t\t\tif deployutil.IsDeploymentCancelled(&deployment) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdeployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue\n\t\t\tdeployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledByUser\n\t\t\t_, err := o.kubeClient.ReplicationControllers(deployment.Namespace).Update(&deployment)\n\t\t\tif err == nil {\n\t\t\t\tfmt.Fprintf(out, \"Cancelled deployment #%d\\n\", config.Status.LatestVersion)\n\t\t\t\tanyCancelled = true\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(out, \"Couldn't cancel deployment #%d (status: %s): %v\\n\", deployutil.DeploymentVersionFor(&deployment), status, err)\n\t\t\t\tfailedCancellations = append(failedCancellations, strconv.Itoa(deployutil.DeploymentVersionFor(&deployment)))\n\t\t\t}\n\t\t}\n\t}\n\tif len(failedCancellations) > 0 {\n\t\treturn fmt.Errorf(\"couldn't cancel deployment %s\", strings.Join(failedCancellations, \", \"))\n\t}\n\tif !anyCancelled {\n\t\tlatest := &deployments.Items[0]\n\t\tstatus := string(deployutil.DeploymentStatusFor(latest))\n\t\tif deployutil.IsDeploymentCancelled(latest) {\n\t\t\tstatus = \"cancelled\"\n\t\t}\n\t\ttimeAt := strings.ToLower(units.HumanDuration(time.Now().Sub(latest.CreationTimestamp.Time)))\n\t\tfmt.Fprintf(out, \"No deployments are in progress (latest deployment #%d %s %s ago)\\n\",\n\t\t\tdeployutil.DeploymentVersionFor(latest),\n\t\t\tstrings.ToLower(status),\n\t\t\ttimeAt)\n\t}\n\treturn nil\n}\n\n\/\/ reenableTriggers enables all image triggers and then persists config.\nfunc (o DeployOptions) reenableTriggers(config *deployapi.DeploymentConfig, out io.Writer) error {\n\tenabled := []string{}\n\tfor _, trigger := range config.Spec.Triggers {\n\t\tif trigger.Type == deployapi.DeploymentTriggerOnImageChange {\n\t\t\ttrigger.ImageChangeParams.Automatic = true\n\t\t\tenabled = append(enabled, trigger.ImageChangeParams.From.Name)\n\t\t}\n\t}\n\tif len(enabled) == 0 {\n\t\tfmt.Fprintln(out, \"No image triggers found to enable\")\n\t\treturn nil\n\t}\n\t_, err := o.osClient.DeploymentConfigs(config.Namespace).Update(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintf(out, \"Enabled image triggers: %s\\n\", strings.Join(enabled, \",\"))\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package engine\n\nimport (\n\t\"github.com\/Aptomi\/aptomi\/pkg\/lang\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/runtime\"\n\t\"time\"\n)\n\n\/\/ PolicyDataObject is an informational data structure with Kind and Constructor for PolicyData\nvar PolicyDataObject = &runtime.Info{\n\tKind: \"policy\",\n\tStorable: true,\n\tVersioned: true,\n\tConstructor: func() runtime.Object { return &PolicyData{} },\n}\n\n\/\/ PolicyDataKey is the default key for the policy object (there is only one policy exists but with multiple generations)\nvar PolicyDataKey = runtime.KeyFromParts(runtime.SystemNS, PolicyDataObject.Kind, runtime.EmptyName)\n\n\/\/ PolicyData is a struct which contains references to a generation for each object included into the policy\ntype PolicyData struct {\n\truntime.TypeKind `yaml:\",inline\"`\n\tMetadata PolicyDataMetadata\n\n\t\/\/ Objects stores all policy objects in map: namespace -> kind -> name -> generation\n\tObjects map[string]map[string]map[string]runtime.Generation\n}\n\n\/\/ PolicyDataMetadata is the metadata for PolicyData object that includes generation\ntype PolicyDataMetadata struct {\n\tGeneration runtime.Generation\n\tUpdatedAt time.Time\n\tUpdatedBy string\n}\n\n\/\/ GetName returns PolicyData name\nfunc (policyData *PolicyData) GetName() string {\n\treturn runtime.EmptyName\n}\n\n\/\/ GetNamespace returns PolicyData namespace\nfunc (policyData *PolicyData) GetNamespace() string {\n\treturn runtime.SystemNS\n}\n\n\/\/ GetGeneration returns PolicyData generation\nfunc (policyData *PolicyData) GetGeneration() runtime.Generation {\n\treturn policyData.Metadata.Generation\n}\n\n\/\/ SetGeneration sets PolicyData generation\nfunc (policyData *PolicyData) SetGeneration(gen runtime.Generation) {\n\tpolicyData.Metadata.Generation = gen\n}\n\n\/\/ Add adds an object to PolicyData\nfunc (policyData *PolicyData) Add(obj lang.Base) {\n\tbyNs, exist := policyData.Objects[obj.GetNamespace()]\n\tif !exist {\n\t\tbyNs = make(map[string]map[string]runtime.Generation)\n\t\tpolicyData.Objects[obj.GetNamespace()] = byNs\n\t}\n\tbyKind, exist := byNs[obj.GetKind()]\n\tif !exist {\n\t\tbyKind = make(map[string]runtime.Generation)\n\t\tbyNs[obj.GetKind()] = byKind\n\t}\n\tbyKind[obj.GetName()] = obj.GetGeneration()\n}\n\n\/\/ Remove deletes an object from PolicyData\nfunc (policyData *PolicyData) Remove(obj lang.Base) bool { \/\/ nolint: interfacer\n\tbyNs, exist := policyData.Objects[obj.GetNamespace()]\n\tif !exist {\n\t\treturn false\n\t}\n\tbyKind, exist := byNs[obj.GetKind()]\n\tif !exist {\n\t\treturn false\n\t}\n\t_, exist = byKind[obj.GetName()]\n\tdelete(byKind, obj.GetName())\n\n\treturn exist\n}\n\n\/\/ GetDefaultColumns returns default set of columns to be displayed\nfunc (policyData *PolicyData) GetDefaultColumns() []string {\n\treturn []string{\"Policy Version\"}\n}\n\n\/\/ AsColumns returns PolicyData representation as columns\nfunc (policyData *PolicyData) AsColumns() map[string]string {\n\tresult := make(map[string]string)\n\n\tresult[\"Policy Version\"] = policyData.GetGeneration().String()\n\n\treturn result\n}\n<commit_msg>removed empty lines<commit_after>package engine\n\nimport (\n\t\"github.com\/Aptomi\/aptomi\/pkg\/lang\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/runtime\"\n\t\"time\"\n)\n\n\/\/ PolicyDataObject is an informational data structure with Kind and Constructor for PolicyData\nvar PolicyDataObject = &runtime.Info{\n\tKind: \"policy\",\n\tStorable: true,\n\tVersioned: true,\n\tConstructor: func() runtime.Object { return &PolicyData{} },\n}\n\n\/\/ PolicyDataKey is the default key for the policy object (there is only one policy exists but with multiple generations)\nvar PolicyDataKey = runtime.KeyFromParts(runtime.SystemNS, PolicyDataObject.Kind, runtime.EmptyName)\n\n\/\/ PolicyData is a struct which contains references to a generation for each object included into the policy\ntype PolicyData struct {\n\truntime.TypeKind `yaml:\",inline\"`\n\tMetadata PolicyDataMetadata\n\n\t\/\/ Objects stores all policy objects in map: namespace -> kind -> name -> generation\n\tObjects map[string]map[string]map[string]runtime.Generation\n}\n\n\/\/ PolicyDataMetadata is the metadata for PolicyData object that includes generation\ntype PolicyDataMetadata struct {\n\tGeneration runtime.Generation\n\tUpdatedAt time.Time\n\tUpdatedBy string\n}\n\n\/\/ GetName returns PolicyData name\nfunc (policyData *PolicyData) GetName() string {\n\treturn runtime.EmptyName\n}\n\n\/\/ GetNamespace returns PolicyData namespace\nfunc (policyData *PolicyData) GetNamespace() string {\n\treturn runtime.SystemNS\n}\n\n\/\/ GetGeneration returns PolicyData generation\nfunc (policyData *PolicyData) GetGeneration() runtime.Generation {\n\treturn policyData.Metadata.Generation\n}\n\n\/\/ SetGeneration sets PolicyData generation\nfunc (policyData *PolicyData) SetGeneration(gen runtime.Generation) {\n\tpolicyData.Metadata.Generation = gen\n}\n\n\/\/ Add adds an object to PolicyData\nfunc (policyData *PolicyData) Add(obj lang.Base) {\n\tbyNs, exist := policyData.Objects[obj.GetNamespace()]\n\tif !exist {\n\t\tbyNs = make(map[string]map[string]runtime.Generation)\n\t\tpolicyData.Objects[obj.GetNamespace()] = byNs\n\t}\n\tbyKind, exist := byNs[obj.GetKind()]\n\tif !exist {\n\t\tbyKind = make(map[string]runtime.Generation)\n\t\tbyNs[obj.GetKind()] = byKind\n\t}\n\tbyKind[obj.GetName()] = obj.GetGeneration()\n}\n\n\/\/ Remove deletes an object from PolicyData\nfunc (policyData *PolicyData) Remove(obj lang.Base) bool { \/\/ nolint: interfacer\n\tbyNs, exist := policyData.Objects[obj.GetNamespace()]\n\tif !exist {\n\t\treturn false\n\t}\n\tbyKind, exist := byNs[obj.GetKind()]\n\tif !exist {\n\t\treturn false\n\t}\n\t_, exist = byKind[obj.GetName()]\n\tdelete(byKind, obj.GetName())\n\n\treturn exist\n}\n\n\/\/ GetDefaultColumns returns default set of columns to be displayed\nfunc (policyData *PolicyData) GetDefaultColumns() []string {\n\treturn []string{\"Policy Version\"}\n}\n\n\/\/ AsColumns returns PolicyData representation as columns\nfunc (policyData *PolicyData) AsColumns() map[string]string {\n\tresult := make(map[string]string)\n\tresult[\"Policy Version\"] = policyData.GetGeneration().String()\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ ClawIO - Scalable Distributed High-Performance Synchronisation and Sharing Service\n\/\/\n\/\/ Copyright (C) 2015 Hugo González Labrador <clawio@hugo.labkode.com>\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Affero General Public License as published\n\/\/ by the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version. See file COPYNG.\n\npackage api\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/clawio\/clawiod\/Godeps\/_workspace\/src\/github.com\/gorilla\/handlers\"\n\t\"github.com\/clawio\/clawiod\/Godeps\/_workspace\/src\/golang.org\/x\/net\/context\"\n\t\"github.com\/clawio\/clawiod\/pkg\/config\"\n\t\"github.com\/clawio\/clawiod\/pkg\/httpserver\"\n\t\"io\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"net\/http\"\n\t\"time\"\n\n\tapidisp \"github.com\/clawio\/clawiod\/pkg\/api\/dispatcher\"\n\tauthdisp \"github.com\/clawio\/clawiod\/pkg\/auth\/dispatcher\"\n\tstoragedisp \"github.com\/clawio\/clawiod\/pkg\/storage\/dispatcher\"\n\n\tlogger \"github.com\/clawio\/clawiod\/pkg\/logger\/logrus\"\n\n\t\"github.com\/clawio\/clawiod\/Godeps\/_workspace\/src\/code.google.com\/p\/go-uuid\/uuid\"\n\t\"github.com\/clawio\/clawiod\/Godeps\/_workspace\/src\/github.com\/tylerb\/graceful\"\n)\n\ntype apiServer struct {\n\tappLogWriter io.Writer\n\treqLogWriter io.Writer\n\tapidisp apidisp.Dispatcher\n\tadisp authdisp.Dispatcher\n\tsdisp storagedisp.Dispatcher\n\tsrv *graceful.Server\n\tcfg config.Config\n}\n\n\/\/ New returns a new HTTPServer\nfunc New(appLogWriter io.Writer, reqLogWriter io.Writer, apidisp apidisp.Dispatcher, adisp authdisp.Dispatcher, sdisp storagedisp.Dispatcher, cfg config.Config) (httpserver.HTTPServer, error) {\n\tdirectives, err := cfg.GetDirectives()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsrv := &graceful.Server{\n\t\tNoSignalHandling: true,\n\t\tTimeout: time.Duration(directives.ShutdownTimeout) * time.Second,\n\t\tServer: &http.Server{\n\t\t\tAddr: fmt.Sprintf(\":%d\", directives.Port),\n\t\t},\n\t}\n\treturn &apiServer{appLogWriter: appLogWriter, reqLogWriter: reqLogWriter, apidisp: apidisp, adisp: adisp, sdisp: sdisp, srv: srv, cfg: cfg}, nil\n}\n\nfunc (s *apiServer) Start() error {\n\tdirectives, err := s.cfg.GetDirectives()\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.srv.Server.Handler = s.HandleRequest()\n\tif directives.TLSEnabled == true {\n\t\treturn s.srv.ListenAndServeTLS(directives.TLSCertificate, directives.TLSCertificatePrivateKey)\n\t}\n\treturn s.srv.ListenAndServe()\n}\nfunc (s *apiServer) StopChan() <-chan struct{} {\n\treturn s.srv.StopChan()\n}\nfunc (s *apiServer) Stop() {\n\ts.srv.Stop(10 * time.Second)\n}\n\nfunc (s *apiServer) HandleRequest() http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\t\/******************************************\n\t\t ** 1. Create logger for request *******\n\t\t ******************************************\/\n\t\tlog, err := logger.New(s.appLogWriter, \"api-\"+uuid.New(), s.cfg)\n\t\tif err != nil {\n\t\t\t\/\/ At this point we don't have a logger, so the output go to stderr\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t\tlog.Info(\"Request started:\" + r.Method + \" \" + r.RequestURI)\n\t\tdefer func() {\n\t\t\tlog.Info(\"Request finished\")\n\n\t\t\t\/\/ Catch panic and return 500\n\t\t\tvar err error\n\t\t\tr := recover()\n\t\t\tif r != nil {\n\t\t\t\tswitch t := r.(type) {\n\t\t\t\tcase string:\n\t\t\t\t\terr = errors.New(t)\n\t\t\t\tcase error:\n\t\t\t\t\terr = t\n\t\t\t\tdefault:\n\t\t\t\t\terr = errors.New(fmt.Sprintln(r))\n\t\t\t\t}\n\t\t\t\ttrace := make([]byte, 2048)\n\t\t\t\tcount := runtime.Stack(trace, true)\n\t\t\t\tlog.Err(fmt.Sprintf(\"Recover from panic: %s\\nStack of %d bytes: %s\\n\", err.Error(), count, trace))\n\t\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}()\n\n\t\tdirectives, err := s.cfg.GetDirectives()\n\t\tif err != nil {\n\t\t\tlog.Err(err.Error())\n\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\t\/\/ Check the server is not in maintenance mode\n\t\tif directives.Maintenance == true {\n\t\t\thttp.Error(w, directives.MaintenanceMessage, http.StatusServiceUnavailable)\n\t\t\treturn\n\t\t}\n\n\t\trootCtx := context.Background()\n\t\tctx := context.WithValue(rootCtx, \"log\", log)\n\t\tctx = context.WithValue(ctx, \"adisp\", s.adisp)\n\t\tctx = context.WithValue(ctx, \"sdisp\", s.sdisp)\n\n\t\ts.apidisp.HandleRequest(ctx, w, r)\n\n\t}\n\n\tfn500 := func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tdirectives, err := s.cfg.GetDirectives()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"error at apiServer.HandleRequest():\", err.Error())\n\t\treturn http.HandlerFunc(fn500)\n\t}\n\n\tif directives.LogRequests == true {\n\t\treturn handlers.CombinedLoggingHandler(s.reqLogWriter, http.HandlerFunc(fn))\n\t}\n\treturn http.HandlerFunc(fn)\n}\n<commit_msg>Use configuration to enable OCWebDAV API<commit_after>\/\/ ClawIO - Scalable Distributed High-Performance Synchronisation and Sharing Service\n\/\/\n\/\/ Copyright (C) 2015 Hugo González Labrador <clawio@hugo.labkode.com>\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Affero General Public License as published\n\/\/ by the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version. See file COPYNG.\n\npackage api\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/clawio\/clawiod\/Godeps\/_workspace\/src\/github.com\/gorilla\/handlers\"\n\t\"github.com\/clawio\/clawiod\/Godeps\/_workspace\/src\/golang.org\/x\/net\/context\"\n\t\"github.com\/clawio\/clawiod\/pkg\/config\"\n\t\"github.com\/clawio\/clawiod\/pkg\/httpserver\"\n\t\"io\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"net\/http\"\n\t\"time\"\n\n\tapidisp \"github.com\/clawio\/clawiod\/pkg\/api\/dispatcher\"\n\tauthdisp \"github.com\/clawio\/clawiod\/pkg\/auth\/dispatcher\"\n\tstoragedisp \"github.com\/clawio\/clawiod\/pkg\/storage\/dispatcher\"\n\n\tlogger \"github.com\/clawio\/clawiod\/pkg\/logger\/logrus\"\n\n\t\"github.com\/clawio\/clawiod\/Godeps\/_workspace\/src\/code.google.com\/p\/go-uuid\/uuid\"\n\t\"github.com\/clawio\/clawiod\/Godeps\/_workspace\/src\/github.com\/tylerb\/graceful\"\n)\n\ntype apiServer struct {\n\tappLogWriter io.Writer\n\treqLogWriter io.Writer\n\tapidisp apidisp.Dispatcher\n\tadisp authdisp.Dispatcher\n\tsdisp storagedisp.Dispatcher\n\tsrv *graceful.Server\n\tcfg config.Config\n}\n\n\/\/ New returns a new HTTPServer\nfunc New(appLogWriter io.Writer, reqLogWriter io.Writer, apidisp apidisp.Dispatcher, adisp authdisp.Dispatcher, sdisp storagedisp.Dispatcher, cfg config.Config) (httpserver.HTTPServer, error) {\n\tdirectives, err := cfg.GetDirectives()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsrv := &graceful.Server{\n\t\tNoSignalHandling: true,\n\t\tTimeout: time.Duration(directives.ShutdownTimeout) * time.Second,\n\t\tServer: &http.Server{\n\t\t\tAddr: fmt.Sprintf(\":%d\", directives.Port),\n\t\t},\n\t}\n\treturn &apiServer{appLogWriter: appLogWriter, reqLogWriter: reqLogWriter, apidisp: apidisp, adisp: adisp, sdisp: sdisp, srv: srv, cfg: cfg}, nil\n}\n\nfunc (s *apiServer) Start() error {\n\tdirectives, err := s.cfg.GetDirectives()\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.srv.Server.Handler = s.HandleRequest()\n\tif directives.TLSEnabled == true {\n\t\treturn s.srv.ListenAndServeTLS(directives.TLSCertificate, directives.TLSCertificatePrivateKey)\n\t}\n\treturn s.srv.ListenAndServe()\n}\nfunc (s *apiServer) StopChan() <-chan struct{} {\n\treturn s.srv.StopChan()\n}\nfunc (s *apiServer) Stop() {\n\ts.srv.Stop(10 * time.Second)\n}\n\nfunc (s *apiServer) HandleRequest() http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\t\/******************************************\n\t\t ** 1. Create logger for request *******\n\t\t ******************************************\/\n\t\tlog, err := logger.New(s.appLogWriter, \"api-\"+uuid.New(), s.cfg)\n\t\tif err != nil {\n\t\t\t\/\/ At this point we don't have a logger, so the output go to stderr\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t\tlog.Info(\"Request started: \" + r.Method + \" \" + r.RequestURI)\n\t\tdefer func() {\n\t\t\tlog.Info(\"Request finished\")\n\n\t\t\t\/\/ Catch panic and return 500\n\t\t\tvar err error\n\t\t\tr := recover()\n\t\t\tif r != nil {\n\t\t\t\tswitch t := r.(type) {\n\t\t\t\tcase string:\n\t\t\t\t\terr = errors.New(t)\n\t\t\t\tcase error:\n\t\t\t\t\terr = t\n\t\t\t\tdefault:\n\t\t\t\t\terr = errors.New(fmt.Sprintln(r))\n\t\t\t\t}\n\t\t\t\ttrace := make([]byte, 2048)\n\t\t\t\tcount := runtime.Stack(trace, true)\n\t\t\t\tlog.Err(fmt.Sprintf(\"Recover from panic: %s\\nStack of %d bytes: %s\\n\", err.Error(), count, trace))\n\t\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}()\n\n\t\tdirectives, err := s.cfg.GetDirectives()\n\t\tif err != nil {\n\t\t\tlog.Err(err.Error())\n\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\t\/\/ Check the server is not in maintenance mode\n\t\tif directives.Maintenance == true {\n\t\t\thttp.Error(w, directives.MaintenanceMessage, http.StatusServiceUnavailable)\n\t\t\treturn\n\t\t}\n\n\t\trootCtx := context.Background()\n\t\tctx := context.WithValue(rootCtx, \"log\", log)\n\t\tctx = context.WithValue(ctx, \"adisp\", s.adisp)\n\t\tctx = context.WithValue(ctx, \"sdisp\", s.sdisp)\n\n\t\ts.apidisp.HandleRequest(ctx, w, r)\n\n\t}\n\n\tfn500 := func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tdirectives, err := s.cfg.GetDirectives()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"error at apiServer.HandleRequest():\", err.Error())\n\t\treturn http.HandlerFunc(fn500)\n\t}\n\n\tif directives.LogRequests == true {\n\t\treturn handlers.CombinedLoggingHandler(s.reqLogWriter, http.HandlerFunc(fn))\n\t}\n\treturn http.HandlerFunc(fn)\n}\n<|endoftext|>"} {"text":"<commit_before>package normalize\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\n\t\"github.com\/StackExchange\/dnscontrol\/models\"\n\t\"github.com\/StackExchange\/dnscontrol\/pkg\/transform\"\n\t\"github.com\/StackExchange\/dnscontrol\/providers\"\n\t\"github.com\/miekg\/dns\"\n\t\"github.com\/miekg\/dns\/dnsutil\"\n)\n\n\/\/ Returns false if target does not validate.\nfunc checkIPv4(label string) error {\n\tif net.ParseIP(label).To4() == nil {\n\t\treturn fmt.Errorf(\"WARNING: target (%v) is not an IPv4 address\", label)\n\t}\n\treturn nil\n}\n\n\/\/ Returns false if target does not validate.\nfunc checkIPv6(label string) error {\n\tif net.ParseIP(label).To16() == nil {\n\t\treturn fmt.Errorf(\"WARNING: target (%v) is not an IPv6 address\", label)\n\t}\n\treturn nil\n}\n\n\/\/ make sure target is valid reference for cnames, mx, etc.\nfunc checkTarget(target string) error {\n\tif target == \"@\" {\n\t\treturn nil\n\t}\n\tif len(target) < 1 {\n\t\treturn fmt.Errorf(\"empty target\")\n\t}\n\t\/\/ If it containts a \".\", it must end in a \".\".\n\tif strings.ContainsRune(target, '.') && target[len(target)-1] != '.' {\n\t\treturn fmt.Errorf(\"target (%v) must end with a (.) [Required if target is not single label]\", target)\n\t}\n\treturn nil\n}\n\n\/\/ validateRecordTypes list of valid rec.Type values. Returns true if this is a real DNS record type, false means it is a pseudo-type used internally.\nfunc validateRecordTypes(rec *models.RecordConfig, domain string, pTypes []string) error {\n\tvar validTypes = map[string]bool{\n\t\t\"A\": true,\n\t\t\"AAAA\": true,\n\t\t\"CNAME\": true,\n\t\t\"IMPORT_TRANSFORM\": false,\n\t\t\"MX\": true,\n\t\t\"TXT\": true,\n\t\t\"NS\": true,\n\t\t\"ALIAS\": false,\n\t}\n\t_, ok := validTypes[rec.Type]\n\tif !ok {\n\t\tcType := providers.GetCustomRecordType(rec.Type)\n\t\tif cType == nil {\n\t\t\treturn fmt.Errorf(\"Unsupported record type (%v) domain=%v name=%v\", rec.Type, domain, rec.Name)\n\t\t}\n\t\tfor _, providerType := range pTypes {\n\t\t\tif providerType != cType.Provider {\n\t\t\t\treturn fmt.Errorf(\"Custom record type %s is not compatible with provider type %s\", rec.Type, providerType)\n\t\t\t}\n\t\t}\n\t\t\/\/it is ok. Lets replace the type with real type and add metadata to say we checked it\n\t\trec.Metadata[\"orig_custom_type\"] = rec.Type\n\t\tif cType.RealType != \"\" {\n\t\t\trec.Type = cType.RealType\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ underscores in names are often used erroneously. They are valid for dns records, but invalid for urls.\n\/\/ here we list common records expected to have underscores. Anything else containing an underscore will print a warning.\nvar expectedUnderscores = []string{\"_domainkey\", \"_dmarc\"}\n\nfunc checkLabel(label string, rType string, domain string) error {\n\tif label == \"@\" {\n\t\treturn nil\n\t}\n\tif len(label) < 1 {\n\t\treturn fmt.Errorf(\"empty %s label in %s\", rType, domain)\n\t}\n\tif label[len(label)-1] == '.' {\n\t\treturn fmt.Errorf(\"label %s.%s ends with a (.)\", label, domain)\n\t}\n\n\t\/\/underscores are warnings\n\tif strings.ContainsRune(label, '_') {\n\t\t\/\/unless it is in our exclusion list\n\t\tok := false\n\t\tfor _, ex := range expectedUnderscores {\n\t\t\tif strings.Contains(label, ex) {\n\t\t\t\tok = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !ok {\n\t\t\treturn Warning{fmt.Errorf(\"label %s.%s contains an underscore\", label, domain)}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ checkTargets returns true if rec.Target is valid for the rec.Type.\nfunc checkTargets(rec *models.RecordConfig, domain string) (errs []error) {\n\tlabel := rec.Name\n\ttarget := rec.Target\n\tcheck := func(e error) {\n\t\tif e != nil {\n\t\t\terr := fmt.Errorf(\"In %s %s.%s: %s\", rec.Type, rec.Name, domain, e.Error())\n\t\t\tif _, ok := e.(Warning); ok {\n\t\t\t\terr = Warning{err}\n\t\t\t}\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\tswitch rec.Type {\n\tcase \"A\":\n\t\tcheck(checkIPv4(target))\n\tcase \"AAAA\":\n\t\tcheck(checkIPv6(target))\n\tcase \"CNAME\":\n\t\tcheck(checkTarget(target))\n\t\tif label == \"@\" {\n\t\t\tcheck(fmt.Errorf(\"cannot create CNAME record for bare domain\"))\n\t\t}\n\tcase \"MX\":\n\t\tcheck(checkTarget(target))\n\tcase \"NS\":\n\t\tcheck(checkTarget(target))\n\t\tif label == \"@\" {\n\t\t\tcheck(fmt.Errorf(\"cannot create NS record for bare domain. Use NAMESERVER instead\"))\n\t\t}\n\tcase \"ALIAS\":\n\t\tcheck(checkTarget(target))\n\tcase \"TXT\", \"IMPORT_TRANSFORM\":\n\tdefault:\n\t\tif rec.Metadata[\"orig_custom_type\"] != \"\" {\n\t\t\t\/\/it is a valid custom type. We perform no validation on target\n\t\t\treturn\n\t\t}\n\t\terrs = append(errs, fmt.Errorf(\"Unimplemented record type (%v) domain=%v name=%v\",\n\t\t\trec.Type, domain, rec.Name))\n\t}\n\treturn\n}\n\nfunc transformCNAME(target, oldDomain, newDomain string) string {\n\t\/\/ Canonicalize. If it isn't a FQDN, add the newDomain.\n\tresult := dnsutil.AddOrigin(target, oldDomain)\n\tif dns.IsFqdn(result) {\n\t\tresult = result[:len(result)-1]\n\t}\n\treturn dnsutil.AddOrigin(result, newDomain) + \".\"\n}\n\n\/\/ import_transform imports the records of one zone into another, modifying records along the way.\nfunc importTransform(srcDomain, dstDomain *models.DomainConfig, transforms []transform.IpConversion, ttl uint32) error {\n\t\/\/ Read srcDomain.Records, transform, and append to dstDomain.Records:\n\t\/\/ 1. Skip any that aren't A or CNAMEs.\n\t\/\/ 2. Append destDomainname to the end of the label.\n\t\/\/ 3. For CNAMEs, append destDomainname to the end of the target.\n\t\/\/ 4. For As, change the target as described the transforms.\n\n\tfor _, rec := range srcDomain.Records {\n\t\tif dstDomain.HasRecordTypeName(rec.Type, rec.NameFQDN) {\n\t\t\tcontinue\n\t\t}\n\t\tnewRec := func() *models.RecordConfig {\n\t\t\trec2, _ := rec.Copy()\n\t\t\trec2.Name = rec2.NameFQDN\n\t\t\trec2.NameFQDN = dnsutil.AddOrigin(rec2.Name, dstDomain.Name)\n\t\t\tif ttl != 0 {\n\t\t\t\trec2.TTL = ttl\n\t\t\t}\n\t\t\treturn rec2\n\t\t}\n\t\tswitch rec.Type {\n\t\tcase \"A\":\n\t\t\ttrs, err := transform.TransformIPToList(net.ParseIP(rec.Target), transforms)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"import_transform: TransformIP(%v, %v) returned err=%s\", rec.Target, transforms, err)\n\t\t\t}\n\t\t\tfor _, tr := range trs {\n\t\t\t\tr := newRec()\n\t\t\t\tr.Target = tr.String()\n\t\t\t\tdstDomain.Records = append(dstDomain.Records, r)\n\t\t\t}\n\t\tcase \"CNAME\":\n\t\t\tr := newRec()\n\t\t\tr.Target = transformCNAME(r.Target, srcDomain.Name, dstDomain.Name)\n\t\t\tdstDomain.Records = append(dstDomain.Records, r)\n\t\tcase \"MX\", \"NS\", \"TXT\":\n\t\t\t\/\/ Not imported.\n\t\t\tcontinue\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"import_transform: Unimplemented record type %v (%v)\",\n\t\t\t\trec.Type, rec.Name)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ deleteImportTransformRecords deletes any IMPORT_TRANSFORM records from a domain.\nfunc deleteImportTransformRecords(domain *models.DomainConfig) {\n\tfor i := len(domain.Records) - 1; i >= 0; i-- {\n\t\trec := domain.Records[i]\n\t\tif rec.Type == \"IMPORT_TRANSFORM\" {\n\t\t\tdomain.Records = append(domain.Records[:i], domain.Records[i+1:]...)\n\t\t}\n\t}\n}\n\n\/\/ Warning is a wrapper around error that can be used to indicate it should not\n\/\/ stop execution, but is still likely a problem.\ntype Warning struct {\n\terror\n}\n\nfunc NormalizeAndValidateConfig(config *models.DNSConfig) (errs []error) {\n\tptypeMap := map[string]string{}\n\tfor _, p := range config.DNSProviders {\n\t\tptypeMap[p.Name] = p.Type\n\t}\n\n\tfor _, domain := range config.Domains {\n\t\tpTypes := []string{}\n\t\tfor p := range domain.DNSProviders {\n\t\t\tpType, ok := ptypeMap[p]\n\t\t\tif !ok {\n\t\t\t\terrs = append(errs, fmt.Errorf(\"%s uses undefined DNS provider %s\", domain.Name, p))\n\t\t\t} else {\n\t\t\t\tpTypes = append(pTypes, pType)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Normalize Nameservers.\n\t\tfor _, ns := range domain.Nameservers {\n\t\t\tns.Name = dnsutil.AddOrigin(ns.Name, domain.Name)\n\t\t\tns.Name = strings.TrimRight(ns.Name, \".\")\n\t\t}\n\t\t\/\/ Normalize Records.\n\t\tfor _, rec := range domain.Records {\n\t\t\tif rec.TTL == 0 {\n\t\t\t\trec.TTL = models.DefaultTTL\n\t\t\t}\n\t\t\t\/\/ Validate the unmodified inputs:\n\t\t\tif err := validateRecordTypes(rec, domain.Name, pTypes); err != nil {\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\t\t\tif err := checkLabel(rec.Name, rec.Type, domain.Name); err != nil {\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\t\t\tif errs2 := checkTargets(rec, domain.Name); errs2 != nil {\n\t\t\t\terrs = append(errs, errs2...)\n\t\t\t}\n\n\t\t\t\/\/ Canonicalize Targets.\n\t\t\tif rec.Type == \"CNAME\" || rec.Type == \"MX\" || rec.Type == \"NS\" {\n\t\t\t\trec.Target = dnsutil.AddOrigin(rec.Target, domain.Name+\".\")\n\t\t\t} else if rec.Type == \"A\" || rec.Type == \"AAAA\" {\n\t\t\t\trec.Target = net.ParseIP(rec.Target).String()\n\t\t\t}\n\t\t\t\/\/ Populate FQDN:\n\t\t\trec.NameFQDN = dnsutil.AddOrigin(rec.Name, domain.Name)\n\t\t}\n\t}\n\n\t\/\/ Process any pseudo-records:\n\tfor _, domain := range config.Domains {\n\t\tfor _, rec := range domain.Records {\n\t\t\tif rec.Type == \"IMPORT_TRANSFORM\" {\n\t\t\t\ttable, err := transform.DecodeTransformTable(rec.Metadata[\"transform_table\"])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\terr = importTransform(config.FindDomain(rec.Target), domain, table, rec.TTL)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ Clean up:\n\tfor _, domain := range config.Domains {\n\t\tdeleteImportTransformRecords(domain)\n\t}\n\n\t\/\/ Run record transforms\n\tfor _, domain := range config.Domains {\n\t\tif err := applyRecordTransforms(domain); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\n\t\/\/Check that CNAMES don't have to co-exist with any other records\n\tfor _, d := range config.Domains {\n\t\terrs = append(errs, checkCNAMEs(d)...)\n\t}\n\n\t\/\/Check that if any aliases are used in a domain, every provider for that domain supports them\n\tfor _, d := range config.Domains {\n\t\terr := checkALIASes(d, config.DNSProviders)\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\n\treturn errs\n}\n\nfunc checkCNAMEs(dc *models.DomainConfig) (errs []error) {\n\tcnames := map[string]bool{}\n\tfor _, r := range dc.Records {\n\t\tif r.Type == \"CNAME\" {\n\t\t\tif cnames[r.Name] {\n\t\t\t\terrs = append(errs, fmt.Errorf(\"Cannot have multiple CNAMEs with same name: %s\", r.NameFQDN))\n\t\t\t}\n\t\t\tcnames[r.Name] = true\n\t\t}\n\t}\n\tfor _, r := range dc.Records {\n\t\tif cnames[r.Name] && r.Type != \"CNAME\" {\n\t\t\terrs = append(errs, fmt.Errorf(\"Cannot have CNAME and %s record with same name: %s\", r.Type, r.NameFQDN))\n\t\t}\n\t}\n\treturn\n}\n\nfunc checkALIASes(dc *models.DomainConfig, pList []*models.DNSProviderConfig) error {\n\thasAlias := false\n\tfor _, r := range dc.Records {\n\t\tif r.Type == \"ALIAS\" {\n\t\t\thasAlias = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !hasAlias {\n\t\treturn nil\n\t}\n\tfor pName := range dc.DNSProviders {\n\t\tfor _, p := range pList {\n\t\t\tif p.Name == pName {\n\t\t\t\tif !providers.ProviderHasCabability(p.Type, providers.CanUseAlias) {\n\t\t\t\t\treturn fmt.Errorf(\"Domain %s uses ALIAS records, but DNS provider type %s does not support them\", dc.Name, p.Type)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc applyRecordTransforms(domain *models.DomainConfig) error {\n\tfor _, rec := range domain.Records {\n\t\tif rec.Type != \"A\" {\n\t\t\tcontinue\n\t\t}\n\t\ttt, ok := rec.Metadata[\"transform\"]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\ttable, err := transform.DecodeTransformTable(tt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tip := net.ParseIP(rec.Target) \/\/ip already validated above\n\t\tnewIPs, err := transform.TransformIPToList(net.ParseIP(rec.Target), table)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor i, newIP := range newIPs {\n\t\t\tif i == 0 && !newIP.Equal(ip) {\n\t\t\t\trec.Target = newIP.String() \/\/replace target of first record if different\n\t\t\t} else if i > 0 {\n\t\t\t\t\/\/ any additional ips need identical records with the alternate ip added to the domain\n\t\t\t\tcopy, err := rec.Copy()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcopy.Target = newIP.String()\n\t\t\t\tdomain.Records = append(domain.Records, copy)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Added _amazonses to list of allowed name with underscore (#133)<commit_after>package normalize\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\n\t\"github.com\/StackExchange\/dnscontrol\/models\"\n\t\"github.com\/StackExchange\/dnscontrol\/pkg\/transform\"\n\t\"github.com\/StackExchange\/dnscontrol\/providers\"\n\t\"github.com\/miekg\/dns\"\n\t\"github.com\/miekg\/dns\/dnsutil\"\n)\n\n\/\/ Returns false if target does not validate.\nfunc checkIPv4(label string) error {\n\tif net.ParseIP(label).To4() == nil {\n\t\treturn fmt.Errorf(\"WARNING: target (%v) is not an IPv4 address\", label)\n\t}\n\treturn nil\n}\n\n\/\/ Returns false if target does not validate.\nfunc checkIPv6(label string) error {\n\tif net.ParseIP(label).To16() == nil {\n\t\treturn fmt.Errorf(\"WARNING: target (%v) is not an IPv6 address\", label)\n\t}\n\treturn nil\n}\n\n\/\/ make sure target is valid reference for cnames, mx, etc.\nfunc checkTarget(target string) error {\n\tif target == \"@\" {\n\t\treturn nil\n\t}\n\tif len(target) < 1 {\n\t\treturn fmt.Errorf(\"empty target\")\n\t}\n\t\/\/ If it containts a \".\", it must end in a \".\".\n\tif strings.ContainsRune(target, '.') && target[len(target)-1] != '.' {\n\t\treturn fmt.Errorf(\"target (%v) must end with a (.) [Required if target is not single label]\", target)\n\t}\n\treturn nil\n}\n\n\/\/ validateRecordTypes list of valid rec.Type values. Returns true if this is a real DNS record type, false means it is a pseudo-type used internally.\nfunc validateRecordTypes(rec *models.RecordConfig, domain string, pTypes []string) error {\n\tvar validTypes = map[string]bool{\n\t\t\"A\": true,\n\t\t\"AAAA\": true,\n\t\t\"CNAME\": true,\n\t\t\"IMPORT_TRANSFORM\": false,\n\t\t\"MX\": true,\n\t\t\"TXT\": true,\n\t\t\"NS\": true,\n\t\t\"ALIAS\": false,\n\t}\n\t_, ok := validTypes[rec.Type]\n\tif !ok {\n\t\tcType := providers.GetCustomRecordType(rec.Type)\n\t\tif cType == nil {\n\t\t\treturn fmt.Errorf(\"Unsupported record type (%v) domain=%v name=%v\", rec.Type, domain, rec.Name)\n\t\t}\n\t\tfor _, providerType := range pTypes {\n\t\t\tif providerType != cType.Provider {\n\t\t\t\treturn fmt.Errorf(\"Custom record type %s is not compatible with provider type %s\", rec.Type, providerType)\n\t\t\t}\n\t\t}\n\t\t\/\/it is ok. Lets replace the type with real type and add metadata to say we checked it\n\t\trec.Metadata[\"orig_custom_type\"] = rec.Type\n\t\tif cType.RealType != \"\" {\n\t\t\trec.Type = cType.RealType\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ underscores in names are often used erroneously. They are valid for dns records, but invalid for urls.\n\/\/ here we list common records expected to have underscores. Anything else containing an underscore will print a warning.\nvar expectedUnderscores = []string{\"_domainkey\", \"_dmarc\", \"_amazonses\"}\n\nfunc checkLabel(label string, rType string, domain string) error {\n\tif label == \"@\" {\n\t\treturn nil\n\t}\n\tif len(label) < 1 {\n\t\treturn fmt.Errorf(\"empty %s label in %s\", rType, domain)\n\t}\n\tif label[len(label)-1] == '.' {\n\t\treturn fmt.Errorf(\"label %s.%s ends with a (.)\", label, domain)\n\t}\n\n\t\/\/underscores are warnings\n\tif strings.ContainsRune(label, '_') {\n\t\t\/\/unless it is in our exclusion list\n\t\tok := false\n\t\tfor _, ex := range expectedUnderscores {\n\t\t\tif strings.Contains(label, ex) {\n\t\t\t\tok = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !ok {\n\t\t\treturn Warning{fmt.Errorf(\"label %s.%s contains an underscore\", label, domain)}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ checkTargets returns true if rec.Target is valid for the rec.Type.\nfunc checkTargets(rec *models.RecordConfig, domain string) (errs []error) {\n\tlabel := rec.Name\n\ttarget := rec.Target\n\tcheck := func(e error) {\n\t\tif e != nil {\n\t\t\terr := fmt.Errorf(\"In %s %s.%s: %s\", rec.Type, rec.Name, domain, e.Error())\n\t\t\tif _, ok := e.(Warning); ok {\n\t\t\t\terr = Warning{err}\n\t\t\t}\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\tswitch rec.Type {\n\tcase \"A\":\n\t\tcheck(checkIPv4(target))\n\tcase \"AAAA\":\n\t\tcheck(checkIPv6(target))\n\tcase \"CNAME\":\n\t\tcheck(checkTarget(target))\n\t\tif label == \"@\" {\n\t\t\tcheck(fmt.Errorf(\"cannot create CNAME record for bare domain\"))\n\t\t}\n\tcase \"MX\":\n\t\tcheck(checkTarget(target))\n\tcase \"NS\":\n\t\tcheck(checkTarget(target))\n\t\tif label == \"@\" {\n\t\t\tcheck(fmt.Errorf(\"cannot create NS record for bare domain. Use NAMESERVER instead\"))\n\t\t}\n\tcase \"ALIAS\":\n\t\tcheck(checkTarget(target))\n\tcase \"TXT\", \"IMPORT_TRANSFORM\":\n\tdefault:\n\t\tif rec.Metadata[\"orig_custom_type\"] != \"\" {\n\t\t\t\/\/it is a valid custom type. We perform no validation on target\n\t\t\treturn\n\t\t}\n\t\terrs = append(errs, fmt.Errorf(\"Unimplemented record type (%v) domain=%v name=%v\",\n\t\t\trec.Type, domain, rec.Name))\n\t}\n\treturn\n}\n\nfunc transformCNAME(target, oldDomain, newDomain string) string {\n\t\/\/ Canonicalize. If it isn't a FQDN, add the newDomain.\n\tresult := dnsutil.AddOrigin(target, oldDomain)\n\tif dns.IsFqdn(result) {\n\t\tresult = result[:len(result)-1]\n\t}\n\treturn dnsutil.AddOrigin(result, newDomain) + \".\"\n}\n\n\/\/ import_transform imports the records of one zone into another, modifying records along the way.\nfunc importTransform(srcDomain, dstDomain *models.DomainConfig, transforms []transform.IpConversion, ttl uint32) error {\n\t\/\/ Read srcDomain.Records, transform, and append to dstDomain.Records:\n\t\/\/ 1. Skip any that aren't A or CNAMEs.\n\t\/\/ 2. Append destDomainname to the end of the label.\n\t\/\/ 3. For CNAMEs, append destDomainname to the end of the target.\n\t\/\/ 4. For As, change the target as described the transforms.\n\n\tfor _, rec := range srcDomain.Records {\n\t\tif dstDomain.HasRecordTypeName(rec.Type, rec.NameFQDN) {\n\t\t\tcontinue\n\t\t}\n\t\tnewRec := func() *models.RecordConfig {\n\t\t\trec2, _ := rec.Copy()\n\t\t\trec2.Name = rec2.NameFQDN\n\t\t\trec2.NameFQDN = dnsutil.AddOrigin(rec2.Name, dstDomain.Name)\n\t\t\tif ttl != 0 {\n\t\t\t\trec2.TTL = ttl\n\t\t\t}\n\t\t\treturn rec2\n\t\t}\n\t\tswitch rec.Type {\n\t\tcase \"A\":\n\t\t\ttrs, err := transform.TransformIPToList(net.ParseIP(rec.Target), transforms)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"import_transform: TransformIP(%v, %v) returned err=%s\", rec.Target, transforms, err)\n\t\t\t}\n\t\t\tfor _, tr := range trs {\n\t\t\t\tr := newRec()\n\t\t\t\tr.Target = tr.String()\n\t\t\t\tdstDomain.Records = append(dstDomain.Records, r)\n\t\t\t}\n\t\tcase \"CNAME\":\n\t\t\tr := newRec()\n\t\t\tr.Target = transformCNAME(r.Target, srcDomain.Name, dstDomain.Name)\n\t\t\tdstDomain.Records = append(dstDomain.Records, r)\n\t\tcase \"MX\", \"NS\", \"TXT\":\n\t\t\t\/\/ Not imported.\n\t\t\tcontinue\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"import_transform: Unimplemented record type %v (%v)\",\n\t\t\t\trec.Type, rec.Name)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ deleteImportTransformRecords deletes any IMPORT_TRANSFORM records from a domain.\nfunc deleteImportTransformRecords(domain *models.DomainConfig) {\n\tfor i := len(domain.Records) - 1; i >= 0; i-- {\n\t\trec := domain.Records[i]\n\t\tif rec.Type == \"IMPORT_TRANSFORM\" {\n\t\t\tdomain.Records = append(domain.Records[:i], domain.Records[i+1:]...)\n\t\t}\n\t}\n}\n\n\/\/ Warning is a wrapper around error that can be used to indicate it should not\n\/\/ stop execution, but is still likely a problem.\ntype Warning struct {\n\terror\n}\n\nfunc NormalizeAndValidateConfig(config *models.DNSConfig) (errs []error) {\n\tptypeMap := map[string]string{}\n\tfor _, p := range config.DNSProviders {\n\t\tptypeMap[p.Name] = p.Type\n\t}\n\n\tfor _, domain := range config.Domains {\n\t\tpTypes := []string{}\n\t\tfor p := range domain.DNSProviders {\n\t\t\tpType, ok := ptypeMap[p]\n\t\t\tif !ok {\n\t\t\t\terrs = append(errs, fmt.Errorf(\"%s uses undefined DNS provider %s\", domain.Name, p))\n\t\t\t} else {\n\t\t\t\tpTypes = append(pTypes, pType)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Normalize Nameservers.\n\t\tfor _, ns := range domain.Nameservers {\n\t\t\tns.Name = dnsutil.AddOrigin(ns.Name, domain.Name)\n\t\t\tns.Name = strings.TrimRight(ns.Name, \".\")\n\t\t}\n\t\t\/\/ Normalize Records.\n\t\tfor _, rec := range domain.Records {\n\t\t\tif rec.TTL == 0 {\n\t\t\t\trec.TTL = models.DefaultTTL\n\t\t\t}\n\t\t\t\/\/ Validate the unmodified inputs:\n\t\t\tif err := validateRecordTypes(rec, domain.Name, pTypes); err != nil {\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\t\t\tif err := checkLabel(rec.Name, rec.Type, domain.Name); err != nil {\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\t\t\tif errs2 := checkTargets(rec, domain.Name); errs2 != nil {\n\t\t\t\terrs = append(errs, errs2...)\n\t\t\t}\n\n\t\t\t\/\/ Canonicalize Targets.\n\t\t\tif rec.Type == \"CNAME\" || rec.Type == \"MX\" || rec.Type == \"NS\" {\n\t\t\t\trec.Target = dnsutil.AddOrigin(rec.Target, domain.Name+\".\")\n\t\t\t} else if rec.Type == \"A\" || rec.Type == \"AAAA\" {\n\t\t\t\trec.Target = net.ParseIP(rec.Target).String()\n\t\t\t}\n\t\t\t\/\/ Populate FQDN:\n\t\t\trec.NameFQDN = dnsutil.AddOrigin(rec.Name, domain.Name)\n\t\t}\n\t}\n\n\t\/\/ Process any pseudo-records:\n\tfor _, domain := range config.Domains {\n\t\tfor _, rec := range domain.Records {\n\t\t\tif rec.Type == \"IMPORT_TRANSFORM\" {\n\t\t\t\ttable, err := transform.DecodeTransformTable(rec.Metadata[\"transform_table\"])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\terr = importTransform(config.FindDomain(rec.Target), domain, table, rec.TTL)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ Clean up:\n\tfor _, domain := range config.Domains {\n\t\tdeleteImportTransformRecords(domain)\n\t}\n\n\t\/\/ Run record transforms\n\tfor _, domain := range config.Domains {\n\t\tif err := applyRecordTransforms(domain); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\n\t\/\/Check that CNAMES don't have to co-exist with any other records\n\tfor _, d := range config.Domains {\n\t\terrs = append(errs, checkCNAMEs(d)...)\n\t}\n\n\t\/\/Check that if any aliases are used in a domain, every provider for that domain supports them\n\tfor _, d := range config.Domains {\n\t\terr := checkALIASes(d, config.DNSProviders)\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\n\treturn errs\n}\n\nfunc checkCNAMEs(dc *models.DomainConfig) (errs []error) {\n\tcnames := map[string]bool{}\n\tfor _, r := range dc.Records {\n\t\tif r.Type == \"CNAME\" {\n\t\t\tif cnames[r.Name] {\n\t\t\t\terrs = append(errs, fmt.Errorf(\"Cannot have multiple CNAMEs with same name: %s\", r.NameFQDN))\n\t\t\t}\n\t\t\tcnames[r.Name] = true\n\t\t}\n\t}\n\tfor _, r := range dc.Records {\n\t\tif cnames[r.Name] && r.Type != \"CNAME\" {\n\t\t\terrs = append(errs, fmt.Errorf(\"Cannot have CNAME and %s record with same name: %s\", r.Type, r.NameFQDN))\n\t\t}\n\t}\n\treturn\n}\n\nfunc checkALIASes(dc *models.DomainConfig, pList []*models.DNSProviderConfig) error {\n\thasAlias := false\n\tfor _, r := range dc.Records {\n\t\tif r.Type == \"ALIAS\" {\n\t\t\thasAlias = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !hasAlias {\n\t\treturn nil\n\t}\n\tfor pName := range dc.DNSProviders {\n\t\tfor _, p := range pList {\n\t\t\tif p.Name == pName {\n\t\t\t\tif !providers.ProviderHasCabability(p.Type, providers.CanUseAlias) {\n\t\t\t\t\treturn fmt.Errorf(\"Domain %s uses ALIAS records, but DNS provider type %s does not support them\", dc.Name, p.Type)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc applyRecordTransforms(domain *models.DomainConfig) error {\n\tfor _, rec := range domain.Records {\n\t\tif rec.Type != \"A\" {\n\t\t\tcontinue\n\t\t}\n\t\ttt, ok := rec.Metadata[\"transform\"]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\ttable, err := transform.DecodeTransformTable(tt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tip := net.ParseIP(rec.Target) \/\/ip already validated above\n\t\tnewIPs, err := transform.TransformIPToList(net.ParseIP(rec.Target), table)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor i, newIP := range newIPs {\n\t\t\tif i == 0 && !newIP.Equal(ip) {\n\t\t\t\trec.Target = newIP.String() \/\/replace target of first record if different\n\t\t\t} else if i > 0 {\n\t\t\t\t\/\/ any additional ips need identical records with the alternate ip added to the domain\n\t\t\t\tcopy, err := rec.Copy()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcopy.Target = newIP.String()\n\t\t\t\tdomain.Records = append(domain.Records, copy)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage options\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestResourceSetSet(t *testing.T) {\n\ttests := []struct {\n\t\tDesc string\n\t\tValue string\n\t\tWanted ResourceSet\n\t\tWantedError bool\n\t}{\n\t\t{\n\t\t\tDesc: \"empty resources\",\n\t\t\tValue: \"\",\n\t\t\tWanted: ResourceSet{},\n\t\t\tWantedError: false,\n\t\t},\n\t\t{\n\t\t\tDesc: \"normal resources\",\n\t\t\tValue: \"configmaps,cronjobs,daemonsets,deployments\",\n\t\t\tWanted: ResourceSet(map[string]struct{}{\n\t\t\t\t\"configmaps\": {},\n\t\t\t\t\"cronjobs\": {},\n\t\t\t\t\"daemonsets\": {},\n\t\t\t\t\"deployments\": {},\n\t\t\t}),\n\t\t\tWantedError: false,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tcs := &ResourceSet{}\n\t\tgotError := cs.Set(test.Value)\n\t\tif !(((gotError == nil && !test.WantedError) || (gotError != nil && test.WantedError)) && reflect.DeepEqual(*cs, test.Wanted)) {\n\t\t\tt.Errorf(\"Test error for Desc: %s. Want: %+v. Got: %+v. Wanted Error: %v, Got Error: %v\", test.Desc, test.Wanted, *cs, test.WantedError, gotError)\n\t\t}\n\t}\n}\n\nfunc TestNamespaceListSet(t *testing.T) {\n\ttests := []struct {\n\t\tDesc string\n\t\tValue string\n\t\tWanted NamespaceList\n\t}{\n\t\t{\n\t\t\tDesc: \"empty namespacelist\",\n\t\t\tValue: \"\",\n\t\t\tWanted: NamespaceList{},\n\t\t},\n\t\t{\n\t\t\tDesc: \"normal namespacelist\",\n\t\t\tValue: \"default, kube-system\",\n\t\t\tWanted: NamespaceList([]string{\n\t\t\t\t\"default\",\n\t\t\t\t\"kube-system\",\n\t\t\t}),\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tns := &NamespaceList{}\n\t\tgotError := ns.Set(test.Value)\n\t\tif gotError != nil || !reflect.DeepEqual(*ns, test.Wanted) {\n\t\t\tt.Errorf(\"Test error for Desc: %s. Want: %+v. Got: %+v. Got Error: %v\", test.Desc, test.Wanted, *ns, gotError)\n\t\t}\n\t}\n}\n\nfunc TestMetricSetSet(t *testing.T) {\n\ttests := []struct {\n\t\tDesc string\n\t\tValue string\n\t\tWanted MetricSet\n\t}{\n\t\t{\n\t\t\tDesc: \"empty metrics\",\n\t\t\tValue: \"\",\n\t\t\tWanted: MetricSet{},\n\t\t},\n\t\t{\n\t\t\tDesc: \"normal metrics\",\n\t\t\tValue: \"kube_cronjob_info, kube_cronjob_labels, kube_daemonset_labels\",\n\t\t\tWanted: MetricSet(map[string]struct{}{\n\t\t\t\t\"kube_cronjob_info\": {},\n\t\t\t\t\"kube_cronjob_labels\": {},\n\t\t\t\t\"kube_daemonset_labels\": {},\n\t\t\t}),\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tms := &MetricSet{}\n\t\tgotError := ms.Set(test.Value)\n\t\tif gotError != nil || !reflect.DeepEqual(*ms, test.Wanted) {\n\t\t\tt.Errorf(\"Test error for Desc: %s. Want: %+v. Got: %+v. Got Error: %v\", test.Desc, test.Wanted, *ms, gotError)\n\t\t}\n\t}\n}\n<commit_msg>Assert that newlines in coma separated arguments are ignored<commit_after>\/*\nCopyright 2018 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage options\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestResourceSetSet(t *testing.T) {\n\ttests := []struct {\n\t\tDesc string\n\t\tValue string\n\t\tWanted ResourceSet\n\t\tWantedError bool\n\t}{\n\t\t{\n\t\t\tDesc: \"empty resources\",\n\t\t\tValue: \"\",\n\t\t\tWanted: ResourceSet{},\n\t\t\tWantedError: false,\n\t\t},\n\t\t{\n\t\t\tDesc: \"normal resources\",\n\t\t\tValue: \"configmaps,cronjobs,daemonsets,deployments\",\n\t\t\tWanted: ResourceSet(map[string]struct{}{\n\t\t\t\t\"configmaps\": {},\n\t\t\t\t\"cronjobs\": {},\n\t\t\t\t\"daemonsets\": {},\n\t\t\t\t\"deployments\": {},\n\t\t\t}),\n\t\t\tWantedError: false,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tcs := &ResourceSet{}\n\t\tgotError := cs.Set(test.Value)\n\t\tif !(((gotError == nil && !test.WantedError) || (gotError != nil && test.WantedError)) && reflect.DeepEqual(*cs, test.Wanted)) {\n\t\t\tt.Errorf(\"Test error for Desc: %s. Want: %+v. Got: %+v. Wanted Error: %v, Got Error: %v\", test.Desc, test.Wanted, *cs, test.WantedError, gotError)\n\t\t}\n\t}\n}\n\nfunc TestNamespaceListSet(t *testing.T) {\n\ttests := []struct {\n\t\tDesc string\n\t\tValue string\n\t\tWanted NamespaceList\n\t}{\n\t\t{\n\t\t\tDesc: \"empty namespacelist\",\n\t\t\tValue: \"\",\n\t\t\tWanted: NamespaceList{},\n\t\t},\n\t\t{\n\t\t\tDesc: \"normal namespacelist\",\n\t\t\tValue: \"default, kube-system\",\n\t\t\tWanted: NamespaceList([]string{\n\t\t\t\t\"default\",\n\t\t\t\t\"kube-system\",\n\t\t\t}),\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tns := &NamespaceList{}\n\t\tgotError := ns.Set(test.Value)\n\t\tif gotError != nil || !reflect.DeepEqual(*ns, test.Wanted) {\n\t\t\tt.Errorf(\"Test error for Desc: %s. Want: %+v. Got: %+v. Got Error: %v\", test.Desc, test.Wanted, *ns, gotError)\n\t\t}\n\t}\n}\n\nfunc TestMetricSetSet(t *testing.T) {\n\ttests := []struct {\n\t\tDesc string\n\t\tValue string\n\t\tWanted MetricSet\n\t}{\n\t\t{\n\t\t\tDesc: \"empty metrics\",\n\t\t\tValue: \"\",\n\t\t\tWanted: MetricSet{},\n\t\t},\n\t\t{\n\t\t\tDesc: \"normal metrics\",\n\t\t\tValue: \"kube_cronjob_info, kube_cronjob_labels, kube_daemonset_labels\",\n\t\t\tWanted: MetricSet(map[string]struct{}{\n\t\t\t\t\"kube_cronjob_info\": {},\n\t\t\t\t\"kube_cronjob_labels\": {},\n\t\t\t\t\"kube_daemonset_labels\": {},\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tDesc: \"newlines are ignored\",\n\t\t\tValue: \"\\n^kube_.+_annotations$,\\n ^kube_secret_labels$\\n\",\n\t\t\tWanted: MetricSet{\n\t\t\t\t\"^kube_secret_labels$\": struct{}{},\n\t\t\t\t\"^kube_.+_annotations$\": struct{}{},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tms := &MetricSet{}\n\t\tgotError := ms.Set(test.Value)\n\t\tif gotError != nil || !reflect.DeepEqual(*ms, test.Wanted) {\n\t\t\tt.Errorf(\"Test error for Desc: %s. Want: %+v. Got: %+v. Got Error: %v\", test.Desc, test.Wanted, *ms, gotError)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package status\n\nimport (\n\t\"sync\"\n\n\t\"k8s.io\/apimachinery\/pkg\/api\/equality\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/meta\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\tv1 \"kubevirt.io\/api\/core\/v1\"\n\t\"kubevirt.io\/client-go\/kubecli\"\n)\n\nconst unknownObj = \"Unknown object\"\n\n\/\/ updater transparently switches for status updates between \/status and the main entrypoint for resource,\n\/\/ allowing CRDs to enable or disable the status subresource support anytime.\ntype updater struct {\n\tlock sync.Mutex\n\tsubresource bool\n\tcli kubecli.KubevirtClient\n}\n\nfunc (u *updater) update(obj runtime.Object) (err error) {\n\tif u.getSubresource() {\n\t\treturn u.updateWithSubresource(obj)\n\t} else {\n\t\treturn u.updateWithoutSubresource(obj)\n\t}\n}\n\nfunc (u *updater) patch(obj runtime.Object, pt types.PatchType, data []byte, patchOptions *metav1.PatchOptions) (err error) {\n\tif u.getSubresource() {\n\t\treturn u.patchWithSubresource(obj, pt, data, patchOptions)\n\t} else {\n\t\treturn u.patchWithoutSubresource(obj, pt, data, patchOptions)\n\t}\n}\n\n\/\/ updateWithoutSubresource will try to update the status via PUT sent to the main REST endpoint.\n\/\/ If status of the returned object did not change, it knows that it should have used the \/status subresource\n\/\/ and will switch the updater itself over to permanently use the \/status subresource.\nfunc (u *updater) updateWithoutSubresource(obj runtime.Object) (err error) {\n\toldStatus, newStatus, err := u.updateUnstructured(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !equality.Semantic.DeepEqual(oldStatus, newStatus) {\n\t\tu.setSubresource(true)\n\t\treturn u.updateStatusUnstructured(obj)\n\t}\n\treturn nil\n}\n\n\/\/ updateWithSubresource will try to update the status via PUT sent to the \/status subresource.\n\/\/ If a 404 error is returned, it will try the main rest entrypoint instead. In case that this\n\/\/ call succeeds, it will switch the updater to permanently use the main entrypoint.\nfunc (u *updater) updateWithSubresource(obj runtime.Object) (updateStatusErr error) {\n\tupdateStatusErr = u.updateStatusUnstructured(obj)\n\tif updateStatusErr != nil && !errors.IsNotFound(updateStatusErr) {\n\t\treturn updateStatusErr\n\t}\n\tif errors.IsNotFound(updateStatusErr) {\n\t\toldStatus, newStatus, err := u.updateUnstructured(obj)\n\t\tif errors.IsNotFound(err) {\n\t\t\t\/\/ object does not exist\n\t\t\treturn err\n\t\t}\n\t\tif err == nil && equality.Semantic.DeepEqual(oldStatus, newStatus) {\n\t\t\tu.setSubresource(false)\n\t\t} else if err == nil {\n\t\t\treturn updateStatusErr\n\t\t}\n\t\treturn err\n\t} else {\n\t\treturn updateStatusErr\n\t}\n}\n\n\/\/ patchWithoutSubresource will try to update the status via PATCH sent to the main REST endpoint.\n\/\/ If the resource version of the returned object did not change, it knows that it should have used the \/status subresource\n\/\/ and will switch the updater itself over to permanently use the \/status subresource.\nfunc (u *updater) patchWithoutSubresource(obj runtime.Object, patchType types.PatchType, data []byte, patchOptions *metav1.PatchOptions) (err error) {\n\toldResourceVersion, newResourceVersion, err := u.patchUnstructured(obj, patchType, data, patchOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif oldResourceVersion == newResourceVersion {\n\t\tu.setSubresource(true)\n\t\treturn u.patchStatusUnstructured(obj, patchType, data, patchOptions)\n\t}\n\treturn nil\n}\n\n\/\/ patchWithSubresource will try to update the status via PATCH sent to the \/status subresource.\n\/\/ If a 404 error is returned, it will try the main rest entrypoint instead. In case that this\n\/\/ call succeeds, it will switch the updater to permanently use the main entrypoint.\nfunc (u *updater) patchWithSubresource(obj runtime.Object, patchType types.PatchType, data []byte, patchOptions *metav1.PatchOptions) (patchStatusErr error) {\n\tpatchStatusErr = u.patchStatusUnstructured(obj, patchType, data, patchOptions)\n\tif patchStatusErr != nil && !errors.IsNotFound(patchStatusErr) {\n\t\treturn patchStatusErr\n\t}\n\tif errors.IsNotFound(patchStatusErr) {\n\t\toldResourceVersion, newResourceVersions, err := u.patchUnstructured(obj, patchType, data, patchOptions)\n\t\tif errors.IsNotFound(err) {\n\t\t\t\/\/ object does not exist\n\t\t\treturn err\n\t\t}\n\t\tif err == nil && oldResourceVersion != newResourceVersions {\n\t\t\tu.setSubresource(false)\n\t\t} else if err == nil {\n\t\t\treturn patchStatusErr\n\t\t}\n\t\treturn err\n\t} else {\n\t\treturn patchStatusErr\n\t}\n}\n\nfunc (u *updater) patchUnstructured(obj runtime.Object, patchType types.PatchType, data []byte, patchOptions *metav1.PatchOptions) (oldResourceVersion, newResourceVerions string, err error) {\n\ta, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tswitch obj.(type) {\n\tcase *v1.VirtualMachine:\n\t\toldObj := obj.(*v1.VirtualMachine)\n\t\tnewObj, err := u.cli.VirtualMachine(a.GetNamespace()).Patch(a.GetName(), patchType, data, patchOptions)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\treturn oldObj.ResourceVersion, newObj.ResourceVersion, nil\n\tcase *v1.KubeVirt:\n\t\toldObj := obj.(*v1.KubeVirt)\n\t\tnewObj, err := u.cli.KubeVirt(a.GetNamespace()).Patch(a.GetName(), patchType, data, patchOptions)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\treturn oldObj.ResourceVersion, newObj.ResourceVersion, nil\n\tdefault:\n\t\tpanic(unknownObj)\n\t}\n}\n\nfunc (u *updater) patchStatusUnstructured(obj runtime.Object, patchType types.PatchType, data []byte, patchOptions *metav1.PatchOptions) (err error) {\n\ta, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch obj.(type) {\n\tcase *v1.VirtualMachine:\n\t\t_, err = u.cli.VirtualMachine(a.GetNamespace()).PatchStatus(a.GetName(), patchType, data, patchOptions)\n\t\treturn err\n\tcase *v1.KubeVirt:\n\t\t_, err = u.cli.KubeVirt(a.GetNamespace()).PatchStatus(a.GetName(), patchType, data, patchOptions)\n\t\treturn err\n\tdefault:\n\t\tpanic(unknownObj)\n\t}\n}\n\nfunc (u *updater) updateUnstructured(obj runtime.Object) (oldStatus interface{}, newStatus interface{}, err error) {\n\ta, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch obj.(type) {\n\tcase *v1.VirtualMachine:\n\t\toldObj := obj.(*v1.VirtualMachine)\n\t\tnewObj, err := u.cli.VirtualMachine(a.GetNamespace()).Update(oldObj)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\treturn oldObj.Status, newObj.Status, nil\n\tcase *v1.VirtualMachineInstanceReplicaSet:\n\t\toldObj := obj.(*v1.VirtualMachineInstanceReplicaSet)\n\t\tnewObj, err := u.cli.ReplicaSet(a.GetNamespace()).Update(oldObj)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\treturn oldObj.Status, newObj.Status, nil\n\tcase *v1.VirtualMachineInstanceMigration:\n\t\toldObj := obj.(*v1.VirtualMachineInstanceMigration)\n\t\tnewObj, err := u.cli.VirtualMachineInstanceMigration(a.GetNamespace()).Update(oldObj)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\treturn oldObj.Status, newObj.Status, nil\n\tcase *v1.KubeVirt:\n\t\toldObj := obj.(*v1.KubeVirt)\n\t\tnewObj, err := u.cli.KubeVirt(a.GetNamespace()).Update(oldObj)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\treturn oldObj.Status, newObj.Status, nil\n\tdefault:\n\t\tpanic(unknownObj)\n\t}\n}\n\nfunc (u *updater) updateStatusUnstructured(obj runtime.Object) (err error) {\n\ta, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch obj.(type) {\n\tcase *v1.VirtualMachine:\n\t\toldObj := obj.(*v1.VirtualMachine)\n\t\t_, err = u.cli.VirtualMachine(a.GetNamespace()).UpdateStatus(oldObj)\n\t\treturn err\n\tcase *v1.VirtualMachineInstanceReplicaSet:\n\t\toldObj := obj.(*v1.VirtualMachineInstanceReplicaSet)\n\t\t_, err = u.cli.ReplicaSet(a.GetNamespace()).UpdateStatus(oldObj)\n\t\treturn err\n\tcase *v1.VirtualMachineInstanceMigration:\n\t\toldObj := obj.(*v1.VirtualMachineInstanceMigration)\n\t\t_, err = u.cli.VirtualMachineInstanceMigration(a.GetNamespace()).UpdateStatus(oldObj)\n\t\treturn err\n\tcase *v1.KubeVirt:\n\t\toldObj := obj.(*v1.KubeVirt)\n\t\t_, err = u.cli.KubeVirt(a.GetNamespace()).UpdateStatus(oldObj)\n\t\treturn err\n\tdefault:\n\t\tpanic(unknownObj)\n\t}\n}\n\nfunc (u *updater) setSubresource(exists bool) {\n\tu.lock.Lock()\n\tdefer u.lock.Unlock()\n\tu.subresource = exists\n}\n\nfunc (u *updater) getSubresource() bool {\n\tu.lock.Lock()\n\tdefer u.lock.Unlock()\n\treturn u.subresource\n}\n\ntype VMStatusUpdater struct {\n\tupdater updater\n}\n\nfunc (v *VMStatusUpdater) UpdateStatus(vm *v1.VirtualMachine) error {\n\treturn v.updater.update(vm)\n}\n\nfunc (v *VMStatusUpdater) PatchStatus(vm *v1.VirtualMachine, pt types.PatchType, data []byte, patchOptions *metav1.PatchOptions) error {\n\treturn v.updater.patch(vm, pt, data, patchOptions)\n}\n\nfunc NewVMStatusUpdater(cli kubecli.KubevirtClient) *VMStatusUpdater {\n\treturn &VMStatusUpdater{\n\t\tupdater: updater{\n\t\t\tlock: sync.Mutex{},\n\t\t\tsubresource: true,\n\t\t\tcli: cli,\n\t\t},\n\t}\n}\n\ntype VMIRSStatusUpdater struct {\n\tupdater updater\n}\n\nfunc (v *VMIRSStatusUpdater) UpdateStatus(vmirs *v1.VirtualMachineInstanceReplicaSet) error {\n\treturn v.updater.update(vmirs)\n}\n\nfunc NewVMIRSStatusUpdater(cli kubecli.KubevirtClient) *VMIRSStatusUpdater {\n\treturn &VMIRSStatusUpdater{\n\t\tupdater: updater{\n\t\t\tlock: sync.Mutex{},\n\t\t\tsubresource: true,\n\t\t\tcli: cli,\n\t\t},\n\t}\n}\n\ntype KVStatusUpdater struct {\n\tupdater updater\n}\n\nfunc (v *KVStatusUpdater) UpdateStatus(kv *v1.KubeVirt) error {\n\treturn v.updater.update(kv)\n}\n\nfunc (v *KVStatusUpdater) PatchStatus(kv *v1.KubeVirt, pt types.PatchType, data []byte) error {\n\treturn v.updater.patch(kv, pt, data, &metav1.PatchOptions{})\n}\n\nfunc NewKubeVirtStatusUpdater(cli kubecli.KubevirtClient) *KVStatusUpdater {\n\treturn &KVStatusUpdater{\n\t\tupdater: updater{\n\t\t\tlock: sync.Mutex{},\n\t\t\tsubresource: true,\n\t\t\tcli: cli,\n\t\t},\n\t}\n}\n\ntype MigrationStatusUpdater struct {\n\tupdater updater\n}\n\nfunc (v *MigrationStatusUpdater) UpdateStatus(migration *v1.VirtualMachineInstanceMigration) error {\n\treturn v.updater.update(migration)\n}\n\nfunc NewMigrationStatusUpdater(cli kubecli.KubevirtClient) *MigrationStatusUpdater {\n\treturn &MigrationStatusUpdater{\n\t\tupdater: updater{\n\t\t\tlock: sync.Mutex{},\n\t\t\tsubresource: true,\n\t\t\tcli: cli,\n\t\t},\n\t}\n}\n<commit_msg>Remove redundant IsNotFound calls<commit_after>package status\n\nimport (\n\t\"sync\"\n\n\t\"k8s.io\/apimachinery\/pkg\/api\/equality\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/meta\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\tv1 \"kubevirt.io\/api\/core\/v1\"\n\t\"kubevirt.io\/client-go\/kubecli\"\n)\n\nconst unknownObj = \"Unknown object\"\n\n\/\/ updater transparently switches for status updates between \/status and the main entrypoint for resource,\n\/\/ allowing CRDs to enable or disable the status subresource support anytime.\ntype updater struct {\n\tlock sync.Mutex\n\tsubresource bool\n\tcli kubecli.KubevirtClient\n}\n\nfunc (u *updater) update(obj runtime.Object) (err error) {\n\tif u.getSubresource() {\n\t\treturn u.updateWithSubresource(obj)\n\t} else {\n\t\treturn u.updateWithoutSubresource(obj)\n\t}\n}\n\nfunc (u *updater) patch(obj runtime.Object, pt types.PatchType, data []byte, patchOptions *metav1.PatchOptions) (err error) {\n\tif u.getSubresource() {\n\t\treturn u.patchWithSubresource(obj, pt, data, patchOptions)\n\t} else {\n\t\treturn u.patchWithoutSubresource(obj, pt, data, patchOptions)\n\t}\n}\n\n\/\/ updateWithoutSubresource will try to update the status via PUT sent to the main REST endpoint.\n\/\/ If status of the returned object did not change, it knows that it should have used the \/status subresource\n\/\/ and will switch the updater itself over to permanently use the \/status subresource.\nfunc (u *updater) updateWithoutSubresource(obj runtime.Object) (err error) {\n\toldStatus, newStatus, err := u.updateUnstructured(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !equality.Semantic.DeepEqual(oldStatus, newStatus) {\n\t\tu.setSubresource(true)\n\t\treturn u.updateStatusUnstructured(obj)\n\t}\n\treturn nil\n}\n\n\/\/ updateWithSubresource will try to update the status via PUT sent to the \/status subresource.\n\/\/ If a 404 error is returned, it will try the main rest entrypoint instead. In case that this\n\/\/ call succeeds, it will switch the updater to permanently use the main entrypoint.\nfunc (u *updater) updateWithSubresource(obj runtime.Object) (updateStatusErr error) {\n\tupdateStatusErr = u.updateStatusUnstructured(obj)\n\tif !errors.IsNotFound(updateStatusErr) {\n\t\treturn updateStatusErr\n\t}\n\toldStatus, newStatus, err := u.updateUnstructured(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !equality.Semantic.DeepEqual(oldStatus, newStatus) {\n\t\treturn updateStatusErr\n\t}\n\tu.setSubresource(false)\n\treturn nil\n}\n\n\/\/ patchWithoutSubresource will try to update the status via PATCH sent to the main REST endpoint.\n\/\/ If the resource version of the returned object did not change, it knows that it should have used the \/status subresource\n\/\/ and will switch the updater itself over to permanently use the \/status subresource.\nfunc (u *updater) patchWithoutSubresource(obj runtime.Object, patchType types.PatchType, data []byte, patchOptions *metav1.PatchOptions) (err error) {\n\toldResourceVersion, newResourceVersion, err := u.patchUnstructured(obj, patchType, data, patchOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif oldResourceVersion == newResourceVersion {\n\t\tu.setSubresource(true)\n\t\treturn u.patchStatusUnstructured(obj, patchType, data, patchOptions)\n\t}\n\treturn nil\n}\n\n\/\/ patchWithSubresource will try to update the status via PATCH sent to the \/status subresource.\n\/\/ If a 404 error is returned, it will try the main rest entrypoint instead. In case that this\n\/\/ call succeeds, it will switch the updater to permanently use the main entrypoint.\nfunc (u *updater) patchWithSubresource(obj runtime.Object, patchType types.PatchType, data []byte, patchOptions *metav1.PatchOptions) (patchStatusErr error) {\n\tpatchStatusErr = u.patchStatusUnstructured(obj, patchType, data, patchOptions)\n\tif !errors.IsNotFound(patchStatusErr) {\n\t\treturn patchStatusErr\n\t}\n\toldResourceVersion, newResourceVersions, err := u.patchUnstructured(obj, patchType, data, patchOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif oldResourceVersion == newResourceVersions {\n\t\treturn patchStatusErr\n\t}\n\tu.setSubresource(false)\n\treturn nil\n}\n\nfunc (u *updater) patchUnstructured(obj runtime.Object, patchType types.PatchType, data []byte, patchOptions *metav1.PatchOptions) (oldResourceVersion, newResourceVerions string, err error) {\n\ta, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tswitch obj.(type) {\n\tcase *v1.VirtualMachine:\n\t\toldObj := obj.(*v1.VirtualMachine)\n\t\tnewObj, err := u.cli.VirtualMachine(a.GetNamespace()).Patch(a.GetName(), patchType, data, patchOptions)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\treturn oldObj.ResourceVersion, newObj.ResourceVersion, nil\n\tcase *v1.KubeVirt:\n\t\toldObj := obj.(*v1.KubeVirt)\n\t\tnewObj, err := u.cli.KubeVirt(a.GetNamespace()).Patch(a.GetName(), patchType, data, patchOptions)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\treturn oldObj.ResourceVersion, newObj.ResourceVersion, nil\n\tdefault:\n\t\tpanic(unknownObj)\n\t}\n}\n\nfunc (u *updater) patchStatusUnstructured(obj runtime.Object, patchType types.PatchType, data []byte, patchOptions *metav1.PatchOptions) (err error) {\n\ta, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch obj.(type) {\n\tcase *v1.VirtualMachine:\n\t\t_, err = u.cli.VirtualMachine(a.GetNamespace()).PatchStatus(a.GetName(), patchType, data, patchOptions)\n\t\treturn err\n\tcase *v1.KubeVirt:\n\t\t_, err = u.cli.KubeVirt(a.GetNamespace()).PatchStatus(a.GetName(), patchType, data, patchOptions)\n\t\treturn err\n\tdefault:\n\t\tpanic(unknownObj)\n\t}\n}\n\nfunc (u *updater) updateUnstructured(obj runtime.Object) (oldStatus interface{}, newStatus interface{}, err error) {\n\ta, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch obj.(type) {\n\tcase *v1.VirtualMachine:\n\t\toldObj := obj.(*v1.VirtualMachine)\n\t\tnewObj, err := u.cli.VirtualMachine(a.GetNamespace()).Update(oldObj)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\treturn oldObj.Status, newObj.Status, nil\n\tcase *v1.VirtualMachineInstanceReplicaSet:\n\t\toldObj := obj.(*v1.VirtualMachineInstanceReplicaSet)\n\t\tnewObj, err := u.cli.ReplicaSet(a.GetNamespace()).Update(oldObj)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\treturn oldObj.Status, newObj.Status, nil\n\tcase *v1.VirtualMachineInstanceMigration:\n\t\toldObj := obj.(*v1.VirtualMachineInstanceMigration)\n\t\tnewObj, err := u.cli.VirtualMachineInstanceMigration(a.GetNamespace()).Update(oldObj)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\treturn oldObj.Status, newObj.Status, nil\n\tcase *v1.KubeVirt:\n\t\toldObj := obj.(*v1.KubeVirt)\n\t\tnewObj, err := u.cli.KubeVirt(a.GetNamespace()).Update(oldObj)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\treturn oldObj.Status, newObj.Status, nil\n\tdefault:\n\t\tpanic(unknownObj)\n\t}\n}\n\nfunc (u *updater) updateStatusUnstructured(obj runtime.Object) (err error) {\n\ta, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch obj.(type) {\n\tcase *v1.VirtualMachine:\n\t\toldObj := obj.(*v1.VirtualMachine)\n\t\t_, err = u.cli.VirtualMachine(a.GetNamespace()).UpdateStatus(oldObj)\n\t\treturn err\n\tcase *v1.VirtualMachineInstanceReplicaSet:\n\t\toldObj := obj.(*v1.VirtualMachineInstanceReplicaSet)\n\t\t_, err = u.cli.ReplicaSet(a.GetNamespace()).UpdateStatus(oldObj)\n\t\treturn err\n\tcase *v1.VirtualMachineInstanceMigration:\n\t\toldObj := obj.(*v1.VirtualMachineInstanceMigration)\n\t\t_, err = u.cli.VirtualMachineInstanceMigration(a.GetNamespace()).UpdateStatus(oldObj)\n\t\treturn err\n\tcase *v1.KubeVirt:\n\t\toldObj := obj.(*v1.KubeVirt)\n\t\t_, err = u.cli.KubeVirt(a.GetNamespace()).UpdateStatus(oldObj)\n\t\treturn err\n\tdefault:\n\t\tpanic(unknownObj)\n\t}\n}\n\nfunc (u *updater) setSubresource(exists bool) {\n\tu.lock.Lock()\n\tdefer u.lock.Unlock()\n\tu.subresource = exists\n}\n\nfunc (u *updater) getSubresource() bool {\n\tu.lock.Lock()\n\tdefer u.lock.Unlock()\n\treturn u.subresource\n}\n\ntype VMStatusUpdater struct {\n\tupdater updater\n}\n\nfunc (v *VMStatusUpdater) UpdateStatus(vm *v1.VirtualMachine) error {\n\treturn v.updater.update(vm)\n}\n\nfunc (v *VMStatusUpdater) PatchStatus(vm *v1.VirtualMachine, pt types.PatchType, data []byte, patchOptions *metav1.PatchOptions) error {\n\treturn v.updater.patch(vm, pt, data, patchOptions)\n}\n\nfunc NewVMStatusUpdater(cli kubecli.KubevirtClient) *VMStatusUpdater {\n\treturn &VMStatusUpdater{\n\t\tupdater: updater{\n\t\t\tlock: sync.Mutex{},\n\t\t\tsubresource: true,\n\t\t\tcli: cli,\n\t\t},\n\t}\n}\n\ntype VMIRSStatusUpdater struct {\n\tupdater updater\n}\n\nfunc (v *VMIRSStatusUpdater) UpdateStatus(vmirs *v1.VirtualMachineInstanceReplicaSet) error {\n\treturn v.updater.update(vmirs)\n}\n\nfunc NewVMIRSStatusUpdater(cli kubecli.KubevirtClient) *VMIRSStatusUpdater {\n\treturn &VMIRSStatusUpdater{\n\t\tupdater: updater{\n\t\t\tlock: sync.Mutex{},\n\t\t\tsubresource: true,\n\t\t\tcli: cli,\n\t\t},\n\t}\n}\n\ntype KVStatusUpdater struct {\n\tupdater updater\n}\n\nfunc (v *KVStatusUpdater) UpdateStatus(kv *v1.KubeVirt) error {\n\treturn v.updater.update(kv)\n}\n\nfunc (v *KVStatusUpdater) PatchStatus(kv *v1.KubeVirt, pt types.PatchType, data []byte) error {\n\treturn v.updater.patch(kv, pt, data, &metav1.PatchOptions{})\n}\n\nfunc NewKubeVirtStatusUpdater(cli kubecli.KubevirtClient) *KVStatusUpdater {\n\treturn &KVStatusUpdater{\n\t\tupdater: updater{\n\t\t\tlock: sync.Mutex{},\n\t\t\tsubresource: true,\n\t\t\tcli: cli,\n\t\t},\n\t}\n}\n\ntype MigrationStatusUpdater struct {\n\tupdater updater\n}\n\nfunc (v *MigrationStatusUpdater) UpdateStatus(migration *v1.VirtualMachineInstanceMigration) error {\n\treturn v.updater.update(migration)\n}\n\nfunc NewMigrationStatusUpdater(cli kubecli.KubevirtClient) *MigrationStatusUpdater {\n\treturn &MigrationStatusUpdater{\n\t\tupdater: updater{\n\t\t\tlock: sync.Mutex{},\n\t\t\tsubresource: true,\n\t\t\tcli: cli,\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"flag\"\n\n\t\"github.com\/mitchellh\/cli\"\n\t\"github.com\/tleyden\/awsutil\"\n)\n\nfunc main() {\n\n\tc := cli.NewCLI(\"awsutil\", \"1.0.0\")\n\tc.Args = os.Args[1:]\n\tc.Commands = map[string]cli.CommandFactory{\n\t\t\"cloudformation stop-instances\": func() (cli.Command, error) {\n\t\t\treturn &cloudformationStopInstancesCommand{}, nil\n\t\t},\n\t}\n\n\texitStatus, err := c.Run()\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tos.Exit(exitStatus)\n}\n\ntype cloudformationStopInstancesCommand struct {\n\targs []string\n}\n\nfunc (c *cloudformationStopInstancesCommand) Run(args []string) int {\n\n\t\/\/ Parse args + get the name of the stack\n\tcmdFlags := flag.NewFlagSet(\"cloudformation\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { c.Help() }\n\tstackname := cmdFlags.String(\"stackname\", \"\", \"\")\n\tregion := cmdFlags.String(\"region\", \"\", \"\")\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\tif *region == \"\" {\n\t\tlog.Fatalf(\"You must pass an AWS region\")\n\t\treturn 1\n\t}\n\n\tif *stackname == \"\" {\n\t\tlog.Fatalf(\"You must pass a CloudFormation stack name\")\n\t\treturn 1\n\t}\n\n\t\/\/ create a session\n\tcfnUtil, err := awsutil.NewCloudformationUtilFromRegion(*region)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating cnf: %v\", err)\n\t\treturn 1\n\t}\n\n\t\/\/ Stop all instances in stack\n\terr = cfnUtil.StopEC2Instances(*stackname)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error stopping instances for stack: %v. Err: %v\", *stackname, err)\n\t\treturn 1\n\t}\n\n\tlog.Printf(\"Stopped all instances in stack: %v\", *stackname)\n\treturn 0\n\n}\n\nfunc (c *cloudformationStopInstancesCommand) Synopsis() string {\n\treturn \"Stop all instances in given cloudformation stack\"\n}\n\nfunc (c *cloudformationStopInstancesCommand) Help() string {\n\n\thelpText := `\nUsage: cloudformation stop-instances [options]\n\n Starts the Consul agent and runs until an interrupt is received. The\n agent represents a single node in a cluster.\n\nOptions:\n\n -stack=stackname The name of the cloudformation stack\n -region=region The AWS region\n\n `\n\treturn strings.TrimSpace(helpText)\n\n}\n<commit_msg>Fix help text<commit_after>package main\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"flag\"\n\n\t\"github.com\/mitchellh\/cli\"\n\t\"github.com\/tleyden\/awsutil\"\n)\n\nfunc main() {\n\n\tc := cli.NewCLI(\"awsutil\", \"1.0.0\")\n\tc.Args = os.Args[1:]\n\tc.Commands = map[string]cli.CommandFactory{\n\t\t\"cloudformation stop-instances\": func() (cli.Command, error) {\n\t\t\treturn &cloudformationStopInstancesCommand{}, nil\n\t\t},\n\t}\n\n\texitStatus, err := c.Run()\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tos.Exit(exitStatus)\n}\n\ntype cloudformationStopInstancesCommand struct {\n\targs []string\n}\n\nfunc (c *cloudformationStopInstancesCommand) Run(args []string) int {\n\n\t\/\/ Parse args + get the name of the stack\n\tcmdFlags := flag.NewFlagSet(\"cloudformation\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { c.Help() }\n\tstackname := cmdFlags.String(\"stackname\", \"\", \"\")\n\tregion := cmdFlags.String(\"region\", \"\", \"\")\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\tif *region == \"\" {\n\t\tlog.Fatalf(\"You must pass an AWS region\")\n\t\treturn 1\n\t}\n\n\tif *stackname == \"\" {\n\t\tlog.Fatalf(\"You must pass a CloudFormation stack name\")\n\t\treturn 1\n\t}\n\n\t\/\/ create a session\n\tcfnUtil, err := awsutil.NewCloudformationUtilFromRegion(*region)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating cnf: %v\", err)\n\t\treturn 1\n\t}\n\n\t\/\/ Stop all instances in stack\n\terr = cfnUtil.StopEC2Instances(*stackname)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error stopping instances for stack: %v. Err: %v\", *stackname, err)\n\t\treturn 1\n\t}\n\n\tlog.Printf(\"Stopped all instances in stack: %v\", *stackname)\n\treturn 0\n\n}\n\nfunc (c *cloudformationStopInstancesCommand) Synopsis() string {\n\treturn \"Stop all instances in given cloudformation stack\"\n}\n\nfunc (c *cloudformationStopInstancesCommand) Help() string {\n\n\thelpText := `\nUsage: cloudformation stop-instances [options]\n\n Stops all instances in the given cloudformation stack\n\nOptions:\n\n -stackname=stackname The name of the cloudformation stack\n -region=region The AWS region\n\n `\n\treturn strings.TrimSpace(helpText)\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"dokugen\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n)\n\n\/\/TODO: let people pass in a filename to export to.\n\ntype appOptions struct {\n\tGENERATE bool\n\tHELP bool\n\tPUZZLE_TO_SOLVE string\n\tNUM int\n\tPRINT_STATS bool\n\tWALKTHROUGH bool\n\tRAW_SYMMETRY string\n\tSYMMETRY sudoku.SymmetryType\n\tSYMMETRY_PROPORTION float64\n\tMIN_DIFFICULTY float64\n\tMAX_DIFFICULTY float64\n}\n\nfunc init() {\n\t\/\/grid.Difficulty can make use of a number of processes simultaneously.\n\truntime.GOMAXPROCS(6)\n}\n\nfunc main() {\n\n\t\/\/TODO: figure out how to test this.\n\n\tvar options appOptions\n\n\tflag.BoolVar(&options.GENERATE, \"g\", false, \"if true, will generate a puzzle.\")\n\tflag.BoolVar(&options.HELP, \"h\", false, \"If provided, will print help and exit.\")\n\tflag.IntVar(&options.NUM, \"n\", 1, \"Number of things to generate\")\n\tflag.BoolVar(&options.PRINT_STATS, \"p\", false, \"If provided, will print stats.\")\n\tflag.StringVar(&options.PUZZLE_TO_SOLVE, \"s\", \"\", \"If provided, will solve the puzzle at the given filename and print solution.\")\n\tflag.BoolVar(&options.WALKTHROUGH, \"w\", false, \"If provided, will print out a walkthrough to solve the provided puzzle.\")\n\tflag.StringVar(&options.RAW_SYMMETRY, \"y\", \"vertical\", \"Valid values: 'none', 'both', 'horizontal', 'vertical\")\n\tflag.Float64Var(&options.SYMMETRY_PROPORTION, \"r\", 0.7, \"What proportion of cells should be filled according to symmetry\")\n\tflag.Float64Var(&options.MIN_DIFFICULTY, \"min\", 0.0, \"Minimum difficulty for generated puzzle\")\n\tflag.Float64Var(&options.MAX_DIFFICULTY, \"max\", 1.0, \"Maximum difficulty for generated puzzle\")\n\n\tflag.Parse()\n\n\toptions.RAW_SYMMETRY = strings.ToLower(options.RAW_SYMMETRY)\n\tswitch options.RAW_SYMMETRY {\n\tcase \"none\":\n\t\toptions.SYMMETRY = sudoku.SYMMETRY_NONE\n\tcase \"both\":\n\t\toptions.SYMMETRY = sudoku.SYMMETRY_BOTH\n\tcase \"horizontal\":\n\t\toptions.SYMMETRY = sudoku.SYMMETRY_HORIZONTAL\n\tcase \"vertical\":\n\t\toptions.SYMMETRY = sudoku.SYMMETRY_VERTICAL\n\tdefault:\n\t\tlog.Fatal(\"Unknown symmetry flag: \", options.RAW_SYMMETRY)\n\t}\n\n\toutput := os.Stdout\n\n\tif options.HELP {\n\t\tflag.PrintDefaults()\n\t\treturn\n\t}\n\n\tvar grid *sudoku.Grid\n\n\tfor i := 0; i < options.NUM; i++ {\n\t\t\/\/TODO: allow the type of symmetry to be configured.\n\t\tif options.GENERATE {\n\t\t\tgrid = generatePuzzle(options.MIN_DIFFICULTY, options.MAX_DIFFICULTY, options.SYMMETRY, options.SYMMETRY_PROPORTION)\n\t\t\tfmt.Fprintln(output, grid.DataString())\n\t\t} else if options.PUZZLE_TO_SOLVE != \"\" {\n\t\t\t\/\/TODO: detect if the load failed.\n\t\t\tgrid = sudoku.NewGrid()\n\t\t\tgrid.LoadFromFile(options.PUZZLE_TO_SOLVE)\n\t\t}\n\n\t\tif grid == nil {\n\t\t\t\/\/No grid to do anything with.\n\t\t\tlog.Fatalln(\"No grid loaded.\")\n\t\t}\n\n\t\t\/\/TODO: use of this option leads to a busy loop somewhere... Is it related to the generate-multiple-and-difficulty hang?\n\n\t\tvar directions sudoku.SolveDirections\n\n\t\tif options.WALKTHROUGH || options.PRINT_STATS {\n\t\t\tdirections = grid.HumanSolution()\n\t\t\tif len(directions) == 0 {\n\t\t\t\t\/\/We couldn't solve it. Let's check and see if the puzzle is well formed.\n\t\t\t\tif grid.HasMultipleSolutions() {\n\t\t\t\t\t\/\/TODO: figure out why guesses wouldn't be used here effectively.\n\t\t\t\t\tlog.Println(\"The puzzle had multiple solutions; that means it's not well-formed\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif options.WALKTHROUGH {\n\t\t\tfmt.Fprintln(output, directions.Walkthrough(grid))\n\t\t}\n\t\tif options.PRINT_STATS {\n\t\t\tfmt.Fprintln(output, grid.Difficulty())\n\t\t\tfmt.Fprintln(output, strings.Join(directions.Stats(), \"\\n\"))\n\t\t}\n\t\tif options.PUZZLE_TO_SOLVE != \"\" {\n\t\t\tgrid.Solve()\n\t\t\tfmt.Fprintln(output, grid.DataString())\n\t\t\t\/\/If we're asked to solve, n could only be 1 anyway.\n\t\t\treturn\n\t\t}\n\t\tgrid.Done()\n\t}\n\n}\n\nfunc generatePuzzle(min float64, max float64, symmetryType sudoku.SymmetryType, symmetryPercentage float64) *sudoku.Grid {\n\tvar result *sudoku.Grid\n\tcount := 0\n\tfor {\n\t\tlog.Println(\"Attempt\", count, \"at generating puzzle.\")\n\n\t\tresult = sudoku.GenerateGrid(symmetryType, symmetryPercentage)\n\n\t\tdifficulty := result.Difficulty()\n\n\t\tif difficulty >= min && difficulty <= max {\n\t\t\treturn result\n\t\t}\n\n\t\tlog.Println(\"Rejecting grid of difficulty\", difficulty)\n\n\t\tcount++\n\t}\n\treturn nil\n}\n<commit_msg>Another TODO<commit_after>package main\n\nimport (\n\t\"dokugen\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n)\n\n\/\/TODO: let people pass in a filename to export to.\n\ntype appOptions struct {\n\tGENERATE bool\n\tHELP bool\n\tPUZZLE_TO_SOLVE string\n\tNUM int\n\tPRINT_STATS bool\n\tWALKTHROUGH bool\n\tRAW_SYMMETRY string\n\tSYMMETRY sudoku.SymmetryType\n\tSYMMETRY_PROPORTION float64\n\tMIN_DIFFICULTY float64\n\tMAX_DIFFICULTY float64\n}\n\nfunc init() {\n\t\/\/grid.Difficulty can make use of a number of processes simultaneously.\n\truntime.GOMAXPROCS(6)\n}\n\nfunc main() {\n\n\t\/\/TODO: figure out how to test this.\n\n\tvar options appOptions\n\n\tflag.BoolVar(&options.GENERATE, \"g\", false, \"if true, will generate a puzzle.\")\n\tflag.BoolVar(&options.HELP, \"h\", false, \"If provided, will print help and exit.\")\n\tflag.IntVar(&options.NUM, \"n\", 1, \"Number of things to generate\")\n\tflag.BoolVar(&options.PRINT_STATS, \"p\", false, \"If provided, will print stats.\")\n\tflag.StringVar(&options.PUZZLE_TO_SOLVE, \"s\", \"\", \"If provided, will solve the puzzle at the given filename and print solution.\")\n\tflag.BoolVar(&options.WALKTHROUGH, \"w\", false, \"If provided, will print out a walkthrough to solve the provided puzzle.\")\n\tflag.StringVar(&options.RAW_SYMMETRY, \"y\", \"vertical\", \"Valid values: 'none', 'both', 'horizontal', 'vertical\")\n\tflag.Float64Var(&options.SYMMETRY_PROPORTION, \"r\", 0.7, \"What proportion of cells should be filled according to symmetry\")\n\tflag.Float64Var(&options.MIN_DIFFICULTY, \"min\", 0.0, \"Minimum difficulty for generated puzzle\")\n\tflag.Float64Var(&options.MAX_DIFFICULTY, \"max\", 1.0, \"Maximum difficulty for generated puzzle\")\n\n\tflag.Parse()\n\n\toptions.RAW_SYMMETRY = strings.ToLower(options.RAW_SYMMETRY)\n\tswitch options.RAW_SYMMETRY {\n\tcase \"none\":\n\t\toptions.SYMMETRY = sudoku.SYMMETRY_NONE\n\tcase \"both\":\n\t\toptions.SYMMETRY = sudoku.SYMMETRY_BOTH\n\tcase \"horizontal\":\n\t\toptions.SYMMETRY = sudoku.SYMMETRY_HORIZONTAL\n\tcase \"vertical\":\n\t\toptions.SYMMETRY = sudoku.SYMMETRY_VERTICAL\n\tdefault:\n\t\tlog.Fatal(\"Unknown symmetry flag: \", options.RAW_SYMMETRY)\n\t}\n\n\toutput := os.Stdout\n\n\tif options.HELP {\n\t\tflag.PrintDefaults()\n\t\treturn\n\t}\n\n\tvar grid *sudoku.Grid\n\n\tfor i := 0; i < options.NUM; i++ {\n\t\t\/\/TODO: allow the type of symmetry to be configured.\n\t\tif options.GENERATE {\n\t\t\tgrid = generatePuzzle(options.MIN_DIFFICULTY, options.MAX_DIFFICULTY, options.SYMMETRY, options.SYMMETRY_PROPORTION)\n\t\t\tfmt.Fprintln(output, grid.DataString())\n\t\t} else if options.PUZZLE_TO_SOLVE != \"\" {\n\t\t\t\/\/TODO: detect if the load failed.\n\t\t\tgrid = sudoku.NewGrid()\n\t\t\tgrid.LoadFromFile(options.PUZZLE_TO_SOLVE)\n\t\t}\n\n\t\tif grid == nil {\n\t\t\t\/\/No grid to do anything with.\n\t\t\tlog.Fatalln(\"No grid loaded.\")\n\t\t}\n\n\t\t\/\/TODO: use of this option leads to a busy loop somewhere... Is it related to the generate-multiple-and-difficulty hang?\n\n\t\tvar directions sudoku.SolveDirections\n\n\t\tif options.WALKTHROUGH || options.PRINT_STATS {\n\t\t\tdirections = grid.HumanSolution()\n\t\t\tif len(directions) == 0 {\n\t\t\t\t\/\/We couldn't solve it. Let's check and see if the puzzle is well formed.\n\t\t\t\tif grid.HasMultipleSolutions() {\n\t\t\t\t\t\/\/TODO: figure out why guesses wouldn't be used here effectively.\n\t\t\t\t\tlog.Println(\"The puzzle had multiple solutions; that means it's not well-formed\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif options.WALKTHROUGH {\n\t\t\tfmt.Fprintln(output, directions.Walkthrough(grid))\n\t\t}\n\t\tif options.PRINT_STATS {\n\t\t\tfmt.Fprintln(output, grid.Difficulty())\n\t\t\t\/\/TODO: consider actually printing out the Signals stats (with a Stats method on signals)\n\t\t\tfmt.Fprintln(output, strings.Join(directions.Stats(), \"\\n\"))\n\t\t}\n\t\tif options.PUZZLE_TO_SOLVE != \"\" {\n\t\t\tgrid.Solve()\n\t\t\tfmt.Fprintln(output, grid.DataString())\n\t\t\t\/\/If we're asked to solve, n could only be 1 anyway.\n\t\t\treturn\n\t\t}\n\t\tgrid.Done()\n\t}\n\n}\n\nfunc generatePuzzle(min float64, max float64, symmetryType sudoku.SymmetryType, symmetryPercentage float64) *sudoku.Grid {\n\tvar result *sudoku.Grid\n\tcount := 0\n\tfor {\n\t\tlog.Println(\"Attempt\", count, \"at generating puzzle.\")\n\n\t\tresult = sudoku.GenerateGrid(symmetryType, symmetryPercentage)\n\n\t\tdifficulty := result.Difficulty()\n\n\t\tif difficulty >= min && difficulty <= max {\n\t\t\treturn result\n\t\t}\n\n\t\tlog.Println(\"Rejecting grid of difficulty\", difficulty)\n\n\t\tcount++\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Frederik Zipp. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Gocyclo calculates the cyclomatic complexities of functions and\n\/\/ methods in Go source code.\n\/\/\n\/\/ Usage:\n\/\/ gocyclo [<flag> ...] <Go file or directory> ...\n\/\/\n\/\/ Flags:\n\/\/ -over N show functions with complexity > N only and\n\/\/ return exit code 1 if the output is non-empty\n\/\/ -top N show the top N most complex functions only\n\/\/ -avg, -avg-short show the average complexity;\n\/\/ the short option prints the value without a label\n\/\/ -total, -total-short show the total complexity;\n\/\/ the short option prints the value without a label\n\/\/ -ignore REGEX exclude files matching the given regular expression\n\/\/\n\/\/ The output fields for each line are:\n\/\/ <complexity> <package> <function> <file:line:column>\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\n\t\"github.com\/fzipp\/gocyclo\"\n)\n\nconst usageDoc = `Calculate cyclomatic complexities of Go functions.\nUsage:\n gocyclo [flags] <Go file or directory> ...\n\nFlags:\n -over N show functions with complexity > N only and\n return exit code 1 if the set is non-empty\n -top N show the top N most complex functions only\n -avg, -avg-short show the average complexity over all functions;\n the short option prints the value without a label\n -total, -total-short show the total complexity for all functions;\n the short option prints the value without a label\n -ignore REGEX exclude files matching the given regular expression\n\nThe output fields for each line are:\n<complexity> <package> <function> <file:line:column>\n`\n\nfunc main() {\n\tover := flag.Int(\"over\", 0, \"show functions with complexity > N only\")\n\ttop := flag.Int(\"top\", -1, \"show the top N most complex functions only\")\n\tavg := flag.Bool(\"avg\", false, \"show the average complexity\")\n\tavgShort := flag.Bool(\"avg-short\", false, \"show the average complexity without a label\")\n\ttotal := flag.Bool(\"total\", false, \"show the total complexity\")\n\ttotalShort := flag.Bool(\"total-short\", false, \"show the total complexity without a label\")\n\tignore := flag.String(\"ignore\", \"\", \"exclude files matching the given regular expression\")\n\n\tlog.SetFlags(0)\n\tlog.SetPrefix(\"gocyclo: \")\n\tflag.Usage = usage\n\tflag.Parse()\n\tpaths := flag.Args()\n\tif len(paths) == 0 {\n\t\tusage()\n\t}\n\n\tallStats := gocyclo.Analyze(paths, regex(*ignore))\n\tshownStats := allStats.SortAndFilter(*top, *over)\n\n\tprintStats(shownStats)\n\tif *avg || *avgShort {\n\t\tprintAverage(allStats, *avgShort)\n\t}\n\tif *total || *totalShort {\n\t\tprintTotal(allStats, *totalShort)\n\t}\n\n\tif *over > 0 && len(shownStats) > 0 {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc regex(expr string) *regexp.Regexp {\n\tif expr == \"\" {\n\t\treturn nil\n\t}\n\tre, err := regexp.Compile(expr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn re\n}\n\nfunc printStats(s gocyclo.Stats) {\n\tfor _, stat := range s {\n\t\tfmt.Println(stat)\n\t}\n}\n\nfunc printAverage(s gocyclo.Stats, short bool) {\n\tif !short {\n\t\tfmt.Print(\"Average: \")\n\t}\n\tfmt.Printf(\"%.3g\\n\", s.AverageComplexity())\n}\n\nfunc printTotal(s gocyclo.Stats, short bool) {\n\tif !short {\n\t\tfmt.Print(\"Total: \")\n\t}\n\tfmt.Printf(\"%d\\n\", s.TotalComplexity())\n}\n\nfunc usage() {\n\t_, _ = fmt.Fprint(os.Stderr, usageDoc)\n\tos.Exit(2)\n}\n<commit_msg>cmd\/gocyclo: gofmt<commit_after>\/\/ Copyright 2013 Frederik Zipp. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Gocyclo calculates the cyclomatic complexities of functions and\n\/\/ methods in Go source code.\n\/\/\n\/\/ Usage:\n\/\/\n\/\/\tgocyclo [<flag> ...] <Go file or directory> ...\n\/\/\n\/\/ Flags:\n\/\/\n\/\/\t-over N show functions with complexity > N only and\n\/\/\t return exit code 1 if the output is non-empty\n\/\/\t-top N show the top N most complex functions only\n\/\/\t-avg, -avg-short show the average complexity;\n\/\/\t the short option prints the value without a label\n\/\/\t-total, -total-short show the total complexity;\n\/\/\t the short option prints the value without a label\n\/\/\t-ignore REGEX exclude files matching the given regular expression\n\/\/\n\/\/ The output fields for each line are:\n\/\/ <complexity> <package> <function> <file:line:column>\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\n\t\"github.com\/fzipp\/gocyclo\"\n)\n\nconst usageDoc = `Calculate cyclomatic complexities of Go functions.\nUsage:\n gocyclo [flags] <Go file or directory> ...\n\nFlags:\n -over N show functions with complexity > N only and\n return exit code 1 if the set is non-empty\n -top N show the top N most complex functions only\n -avg, -avg-short show the average complexity over all functions;\n the short option prints the value without a label\n -total, -total-short show the total complexity for all functions;\n the short option prints the value without a label\n -ignore REGEX exclude files matching the given regular expression\n\nThe output fields for each line are:\n<complexity> <package> <function> <file:line:column>\n`\n\nfunc main() {\n\tover := flag.Int(\"over\", 0, \"show functions with complexity > N only\")\n\ttop := flag.Int(\"top\", -1, \"show the top N most complex functions only\")\n\tavg := flag.Bool(\"avg\", false, \"show the average complexity\")\n\tavgShort := flag.Bool(\"avg-short\", false, \"show the average complexity without a label\")\n\ttotal := flag.Bool(\"total\", false, \"show the total complexity\")\n\ttotalShort := flag.Bool(\"total-short\", false, \"show the total complexity without a label\")\n\tignore := flag.String(\"ignore\", \"\", \"exclude files matching the given regular expression\")\n\n\tlog.SetFlags(0)\n\tlog.SetPrefix(\"gocyclo: \")\n\tflag.Usage = usage\n\tflag.Parse()\n\tpaths := flag.Args()\n\tif len(paths) == 0 {\n\t\tusage()\n\t}\n\n\tallStats := gocyclo.Analyze(paths, regex(*ignore))\n\tshownStats := allStats.SortAndFilter(*top, *over)\n\n\tprintStats(shownStats)\n\tif *avg || *avgShort {\n\t\tprintAverage(allStats, *avgShort)\n\t}\n\tif *total || *totalShort {\n\t\tprintTotal(allStats, *totalShort)\n\t}\n\n\tif *over > 0 && len(shownStats) > 0 {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc regex(expr string) *regexp.Regexp {\n\tif expr == \"\" {\n\t\treturn nil\n\t}\n\tre, err := regexp.Compile(expr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn re\n}\n\nfunc printStats(s gocyclo.Stats) {\n\tfor _, stat := range s {\n\t\tfmt.Println(stat)\n\t}\n}\n\nfunc printAverage(s gocyclo.Stats, short bool) {\n\tif !short {\n\t\tfmt.Print(\"Average: \")\n\t}\n\tfmt.Printf(\"%.3g\\n\", s.AverageComplexity())\n}\n\nfunc printTotal(s gocyclo.Stats, short bool) {\n\tif !short {\n\t\tfmt.Print(\"Total: \")\n\t}\n\tfmt.Printf(\"%d\\n\", s.TotalComplexity())\n}\n\nfunc usage() {\n\t_, _ = fmt.Fprint(os.Stderr, usageDoc)\n\tos.Exit(2)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/foolusion\/choices\"\n\t\"github.com\/foolusion\/choices\/storage\/mongo\"\n\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\nconst (\n\trootEndpoint = \"\/\"\n\thealthEndpoint = \"\/healtz\"\n\treadinessEndpoint = \"readiness\"\n\tlaunchPrefix = \"\/launch\/\"\n\tdeletePrefix = \"\/delete\/\"\n)\n\nfunc init() {\n\thttp.HandleFunc(rootEndpoint, rootHandler)\n\thttp.HandleFunc(launchPrefix, launchHandler)\n\thttp.HandleFunc(deletePrefix, deleteHandler)\n\thttp.HandleFunc(healthEndpoint, healthHandler)\n\thttp.HandleFunc(readinessEndpoint, readinessHandler)\n}\n\ntype config struct {\n\tmongoAddr string\n\tmongoDB string\n\ttestCollection string\n\tprodCollection string\n\tusername string\n\tpassword string\n\taddr string\n\tmongo *mgo.Session\n}\n\nvar cfg = config{\n\tmongoAddr: \"elwin-storage\",\n\tmongoDB: \"elwin\",\n\ttestCollection: \"test\",\n\tprodCollection: \"prod\",\n\tusername: \"elwin\",\n\tpassword: \"philologist\",\n\taddr: \":8080\",\n}\n\nconst (\n\tenvMongoAddress = \"MONGO_ADDRESS\"\n\tenvMongoDatabase = \"MONGO_DATABASE\"\n\tenvMongoTestCollection = \"MONGO_TEST_COLLECTION\"\n\tenvMongoProdCollection = \"MONGO_PROD_COLLECTION\"\n\tenvUsername = \"USERNAME\"\n\tenvPassword = \"PASSWORD\"\n\tenvAddr = \"ADDRESS\"\n)\n\nfunc main() {\n\tlog.Println(\"Starting Houston...\")\n\n\tif os.Getenv(envMongoAddress) != \"\" {\n\t\tcfg.mongoAddr = os.Getenv(envMongoAddress)\n\t\tlog.Printf(\"Setting Mongo Address: %q\", cfg.mongoAddr)\n\t}\n\tif os.Getenv(envMongoDatabase) != \"\" {\n\t\tcfg.mongoDB = os.Getenv(envMongoDatabase)\n\t\tlog.Printf(\"Setting Mongo Database: %q\", cfg.mongoDB)\n\t}\n\tif os.Getenv(envMongoTestCollection) != \"\" {\n\t\tcfg.testCollection = os.Getenv(envMongoTestCollection)\n\t\tlog.Printf(\"Setting Mongo Test Collection: %q\", cfg.testCollection)\n\t}\n\tif os.Getenv(envMongoProdCollection) != \"\" {\n\t\tcfg.prodCollection = os.Getenv(envMongoProdCollection)\n\t\tlog.Printf(\"Setting Mongo Prod Collection: %q\", cfg.prodCollection)\n\t}\n\tif os.Getenv(envUsername) != \"\" {\n\t\tcfg.username = os.Getenv(envUsername)\n\t\tlog.Printf(\"Setting Username: %q\", cfg.username)\n\t}\n\tif os.Getenv(envPassword) != \"\" {\n\t\tcfg.password = os.Getenv(envPassword)\n\t\tlog.Printf(\"Setting Password: %q\", cfg.password)\n\t}\n\n\terrCh := make(chan error, 1)\n\n\t\/\/ setup mongo\n\tgo func(c *config) {\n\t\tvar err error\n\t\tc.mongo, err = mgo.Dial(c.mongoAddr)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"could not dial mongo database: %s\", err)\n\t\t\terrCh <- err\n\t\t}\n\t}(&cfg)\n\n\tgo func() {\n\t\terrCh <- http.ListenAndServe(cfg.addr, nil)\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase err := <-errCh:\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t\t\/\/ graceful shutdown\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Namespace container for data from mongo.\ntype Namespace struct {\n\tName string\n\tLabels []string `bson:\"teamid\"`\n\tExperiments []struct {\n\t\tName string\n\t\tParams []mongo.Param\n\t}\n}\n\n\/\/ TableData container for data to be output.\ntype TableData struct {\n\tName string\n\tLabels string\n\tExperiments []struct {\n\t\tName string\n\t\tParams []struct {\n\t\t\tName string\n\t\t\tValues string\n\t\t}\n\t}\n}\n\ntype RootTmplData struct {\n\tTestRaw []Namespace\n\tProdRaw []Namespace\n\tTest []TableData\n\tProd []TableData\n}\n\nfunc namespaceToTableData(ns []Namespace) []TableData {\n\ttableData := make([]TableData, len(ns))\n\tfor i, v := range ns {\n\t\ttableData[i].Name = v.Name\n\t\ttableData[i].Labels = strings.Join(v.Labels, \", \")\n\t\texperiments := make(\n\t\t\t[]struct {\n\t\t\t\tName string\n\t\t\t\tParams []struct {\n\t\t\t\t\tName string\n\t\t\t\t\tValues string\n\t\t\t\t}\n\t\t\t}, len(v.Experiments))\n\t\ttableData[i].Experiments = experiments\n\t\tfor j, e := range v.Experiments {\n\t\t\ttableData[i].Experiments[j].Name = e.Name\n\t\t\tparams := make(\n\t\t\t\t[]struct {\n\t\t\t\t\tName string\n\t\t\t\t\tValues string\n\t\t\t\t}, len(e.Params))\n\t\t\tfor k, p := range e.Params {\n\t\t\t\tparams[k].Name = p.Name\n\t\t\t\tswitch p.Type {\n\t\t\t\tcase choices.ValueTypeUniform:\n\t\t\t\t\tvar uniform choices.Uniform\n\t\t\t\t\tp.Value.Unmarshal(&uniform)\n\t\t\t\t\tparams[k].Values = strings.Join(uniform.Choices, \", \")\n\t\t\t\tcase choices.ValueTypeWeighted:\n\t\t\t\t\tvar weighted choices.Weighted\n\t\t\t\t\tp.Value.Unmarshal(&weighted)\n\t\t\t\t\tparams[k].Values = strings.Join(weighted.Choices, \", \")\n\t\t\t\t}\n\t\t\t}\n\t\t\ttableData[i].Experiments[j].Params = params\n\t\t}\n\t}\n\treturn tableData\n}\n\nfunc rootHandler(w http.ResponseWriter, r *http.Request) {\n\tvar buf []byte\n\tvar err error\n\tif buf, err = httputil.DumpRequest(r, true); err != nil {\n\t\tlog.Printf(\"could not dump request: %v\", err)\n\t\treturn\n\t}\n\tlog.Printf(\"%s\", buf)\n\n\tvar test []Namespace\n\tvar prod []Namespace\n\tcfg.mongo.DB(cfg.mongoDB).C(cfg.prodCollection).Find(nil).All(&prod)\n\n\tdata := RootTmplData{\n\t\tTestRaw: test,\n\t\tProdRaw: prod,\n\t\tTest: namespaceToTableData(test),\n\t\tProd: namespaceToTableData(prod),\n\t}\n\n\tlog.Println(data)\n\n\tw.WriteHeader(http.StatusOK)\n\tw.Header().Set(\"Content-Type\", \"text\/html; charset=utf-8\")\n\tif err := rootTemplate.Execute(w, data); err != nil {\n\t\tlog.Println(err)\n\t}\n}\n\nvar rootTemplate = template.Must(template.New(\"root\").Parse(rootTmpl))\n\nconst rootTmpl = `<!doctype html>\n<html lang=\"en\">\n<head>\n<title>Houston!<\/title>\n<\/head>\n<body>\n<h1>Houston<\/h1>\n<div>\n{{with .Test}}\n<h2>Test<\/h2>\n<table>\n<tr>\n <th>Namespace<\/th>\n <th>Labels<\/th>\n <th>Experiment<\/th>\n <th>Params<\/th>\n <th>Delete?<\/th>\n <th>Launch?<\/th>\n<\/tr>\n{{range $ns := .}}\n{{range $exp := .Experiments}}\n<tr>\n\t<th>{{$ns.Name}}<\/th>\n\t<th>{{$ns.Labels}}<\/th>\n\t<th>{{$exp.Name}}<\/th>\n\t<th>{{range .Params}}<strong>{{.Name}}<\/strong>: ({{.Values}})<br\/>{{end}}<\/th>\n\t<th><a href=\"\/delete\/{{$exp.Name}}\">Delete<\/a><\/th>\n\t<th><a href=\"\/launch\/{{$exp.Name}}\">Launch<\/a><\/th>\n<\/tr>\n{{end}}\n{{end}}\n<\/table>\n{{end}}\n\n{{with .Prod}}\n<h2>Prod<\/h2>\n<table>\n<tr>\n <th>Namespace<\/th>\n <th>Labels<\/th>\n <th>Experiment<\/th>\n <th>Params<\/th>\n <th>Delete?<\/th>\n <th>Launch?<\/th>\n<\/tr>\n{{range $ns := .}}\n{{range $exp := .Experiments}}\n<tr>\n\t<th>{{$ns.Name}}<\/th>\n\t<th>{{$ns.Labels}}<\/th>\n\t<th>{{$exp.Name}}<\/th>\n\t<th>{{range .Params}}<strong>{{.Name}}<\/strong>: ({{.Values}})<br\/>{{end}}<\/th>\n\t<th><a href=\"\/delete\/{{$exp.Name}}\">Delete<\/a><\/th>\n\t<th><a href=\"\/launch\/{{$exp.Name}}\">Launch<\/a><\/th>\n<\/tr>\n{{end}}\n{{end}}\n<\/table>\n{{end}}\n\n<\/div>\n<\/body>\n<\/html>\n`\n\nfunc launchHandler(w http.ResponseWriter, r *http.Request) {\n\texperiment := r.URL.Path[len(launchPrefix):]\n\n\t\/\/ get the namespace from test\n\tvar test Namespace\n\tif err := cfg.mongo.DB(cfg.mongoDB).C(cfg.testCollection).Find(bson.M{\"experiments.name\": experiment}).One(&test); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tw.Header().Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\t\tw.Write([]byte(\"not found\"))\n\t\treturn\n\t}\n\n\t\/\/ check for namespace in prod\n\tvar prod Namespace\n\tif err := cfg.mongo.DB(cfg.mongoDB).C(cfg.prodCollection).Find(bson.M{\"name\": test.Name}).One(&prod); err == mgo.ErrNotFound {\n\t\t\/\/ namespace not found in prod so create namespace and add\n\t\t\/\/ experiment\n\t\treturn\n\t}\n\n\t\/\/ subtract segments from prod namespace and add experiment\n\n}\n\nfunc deleteHandler(w http.ResponseWriter, r *http.Request) {\n}\n\nfunc healthHandler(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Header().Set(\"Content-Type\", \"text\/plain\")\n\tw.Write([]byte(\"OK\"))\n}\n\nfunc readinessHandler(w http.ResponseWriter, r *http.Request) {\n\tif err := cfg.mongo.Ping(); err != nil {\n\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t\tw.Header().Set(\"Content-Type\", \"text\/plain\")\n\t\tw.Write([]byte(\"Not Ready\"))\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Header().Set(\"Content-Type\", \"text\/plain\")\n\tw.Write([]byte(\"OK\"))\n}\n<commit_msg>cmd\/houston: use query functions in launch cmd<commit_after>package main\n\nimport (\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/foolusion\/choices\"\n\t\"github.com\/foolusion\/choices\/storage\/mongo\"\n\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\nconst (\n\trootEndpoint = \"\/\"\n\thealthEndpoint = \"\/healtz\"\n\treadinessEndpoint = \"readiness\"\n\tlaunchPrefix = \"\/launch\/\"\n\tdeletePrefix = \"\/delete\/\"\n)\n\nfunc init() {\n\thttp.HandleFunc(rootEndpoint, rootHandler)\n\thttp.HandleFunc(launchPrefix, launchHandler)\n\thttp.HandleFunc(deletePrefix, deleteHandler)\n\thttp.HandleFunc(healthEndpoint, healthHandler)\n\thttp.HandleFunc(readinessEndpoint, readinessHandler)\n}\n\ntype config struct {\n\tmongoAddr string\n\tmongoDB string\n\ttestCollection string\n\tprodCollection string\n\tusername string\n\tpassword string\n\taddr string\n\tmongo *mgo.Session\n}\n\nvar cfg = config{\n\tmongoAddr: \"elwin-storage\",\n\tmongoDB: \"elwin\",\n\ttestCollection: \"test\",\n\tprodCollection: \"prod\",\n\tusername: \"elwin\",\n\tpassword: \"philologist\",\n\taddr: \":8080\",\n}\n\nconst (\n\tenvMongoAddress = \"MONGO_ADDRESS\"\n\tenvMongoDatabase = \"MONGO_DATABASE\"\n\tenvMongoTestCollection = \"MONGO_TEST_COLLECTION\"\n\tenvMongoProdCollection = \"MONGO_PROD_COLLECTION\"\n\tenvUsername = \"USERNAME\"\n\tenvPassword = \"PASSWORD\"\n\tenvAddr = \"ADDRESS\"\n)\n\nfunc main() {\n\tlog.Println(\"Starting Houston...\")\n\n\tif os.Getenv(envMongoAddress) != \"\" {\n\t\tcfg.mongoAddr = os.Getenv(envMongoAddress)\n\t\tlog.Printf(\"Setting Mongo Address: %q\", cfg.mongoAddr)\n\t}\n\tif os.Getenv(envMongoDatabase) != \"\" {\n\t\tcfg.mongoDB = os.Getenv(envMongoDatabase)\n\t\tlog.Printf(\"Setting Mongo Database: %q\", cfg.mongoDB)\n\t}\n\tif os.Getenv(envMongoTestCollection) != \"\" {\n\t\tcfg.testCollection = os.Getenv(envMongoTestCollection)\n\t\tlog.Printf(\"Setting Mongo Test Collection: %q\", cfg.testCollection)\n\t}\n\tif os.Getenv(envMongoProdCollection) != \"\" {\n\t\tcfg.prodCollection = os.Getenv(envMongoProdCollection)\n\t\tlog.Printf(\"Setting Mongo Prod Collection: %q\", cfg.prodCollection)\n\t}\n\tif os.Getenv(envUsername) != \"\" {\n\t\tcfg.username = os.Getenv(envUsername)\n\t\tlog.Printf(\"Setting Username: %q\", cfg.username)\n\t}\n\tif os.Getenv(envPassword) != \"\" {\n\t\tcfg.password = os.Getenv(envPassword)\n\t\tlog.Printf(\"Setting Password: %q\", cfg.password)\n\t}\n\n\terrCh := make(chan error, 1)\n\n\t\/\/ setup mongo\n\tgo func(c *config) {\n\t\tvar err error\n\t\tc.mongo, err = mgo.Dial(c.mongoAddr)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"could not dial mongo database: %s\", err)\n\t\t\terrCh <- err\n\t\t}\n\t}(&cfg)\n\n\tgo func() {\n\t\terrCh <- http.ListenAndServe(cfg.addr, nil)\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase err := <-errCh:\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t\t\/\/ graceful shutdown\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Namespace container for data from mongo.\ntype Namespace struct {\n\tName string\n\tLabels []string `bson:\"teamid\"`\n\tExperiments []struct {\n\t\tName string\n\t\tParams []mongo.Param\n\t}\n}\n\n\/\/ TableData container for data to be output.\ntype TableData struct {\n\tName string\n\tLabels string\n\tExperiments []struct {\n\t\tName string\n\t\tParams []struct {\n\t\t\tName string\n\t\t\tValues string\n\t\t}\n\t}\n}\n\ntype RootTmplData struct {\n\tTestRaw []Namespace\n\tProdRaw []Namespace\n\tTest []TableData\n\tProd []TableData\n}\n\nfunc namespaceToTableData(ns []Namespace) []TableData {\n\ttableData := make([]TableData, len(ns))\n\tfor i, v := range ns {\n\t\ttableData[i].Name = v.Name\n\t\ttableData[i].Labels = strings.Join(v.Labels, \", \")\n\t\texperiments := make(\n\t\t\t[]struct {\n\t\t\t\tName string\n\t\t\t\tParams []struct {\n\t\t\t\t\tName string\n\t\t\t\t\tValues string\n\t\t\t\t}\n\t\t\t}, len(v.Experiments))\n\t\ttableData[i].Experiments = experiments\n\t\tfor j, e := range v.Experiments {\n\t\t\ttableData[i].Experiments[j].Name = e.Name\n\t\t\tparams := make(\n\t\t\t\t[]struct {\n\t\t\t\t\tName string\n\t\t\t\t\tValues string\n\t\t\t\t}, len(e.Params))\n\t\t\tfor k, p := range e.Params {\n\t\t\t\tparams[k].Name = p.Name\n\t\t\t\tswitch p.Type {\n\t\t\t\tcase choices.ValueTypeUniform:\n\t\t\t\t\tvar uniform choices.Uniform\n\t\t\t\t\tp.Value.Unmarshal(&uniform)\n\t\t\t\t\tparams[k].Values = strings.Join(uniform.Choices, \", \")\n\t\t\t\tcase choices.ValueTypeWeighted:\n\t\t\t\t\tvar weighted choices.Weighted\n\t\t\t\t\tp.Value.Unmarshal(&weighted)\n\t\t\t\t\tparams[k].Values = strings.Join(weighted.Choices, \", \")\n\t\t\t\t}\n\t\t\t}\n\t\t\ttableData[i].Experiments[j].Params = params\n\t\t}\n\t}\n\treturn tableData\n}\n\nfunc rootHandler(w http.ResponseWriter, r *http.Request) {\n\tvar buf []byte\n\tvar err error\n\tif buf, err = httputil.DumpRequest(r, true); err != nil {\n\t\tlog.Printf(\"could not dump request: %v\", err)\n\t\treturn\n\t}\n\tlog.Printf(\"%s\", buf)\n\n\tvar test []Namespace\n\tvar prod []Namespace\n\tcfg.mongo.DB(cfg.mongoDB).C(cfg.prodCollection).Find(nil).All(&prod)\n\n\tdata := RootTmplData{\n\t\tTestRaw: test,\n\t\tProdRaw: prod,\n\t\tTest: namespaceToTableData(test),\n\t\tProd: namespaceToTableData(prod),\n\t}\n\n\tlog.Println(data)\n\n\tw.WriteHeader(http.StatusOK)\n\tw.Header().Set(\"Content-Type\", \"text\/html; charset=utf-8\")\n\tif err := rootTemplate.Execute(w, data); err != nil {\n\t\tlog.Println(err)\n\t}\n}\n\nvar rootTemplate = template.Must(template.New(\"root\").Parse(rootTmpl))\n\nconst rootTmpl = `<!doctype html>\n<html lang=\"en\">\n<head>\n<title>Houston!<\/title>\n<\/head>\n<body>\n<h1>Houston<\/h1>\n<div>\n{{with .Test}}\n<h2>Test<\/h2>\n<table>\n<tr>\n <th>Namespace<\/th>\n <th>Labels<\/th>\n <th>Experiment<\/th>\n <th>Params<\/th>\n <th>Delete?<\/th>\n <th>Launch?<\/th>\n<\/tr>\n{{range $ns := .}}\n{{range $exp := .Experiments}}\n<tr>\n\t<th>{{$ns.Name}}<\/th>\n\t<th>{{$ns.Labels}}<\/th>\n\t<th>{{$exp.Name}}<\/th>\n\t<th>{{range .Params}}<strong>{{.Name}}<\/strong>: ({{.Values}})<br\/>{{end}}<\/th>\n\t<th><a href=\"\/delete\/{{$exp.Name}}\">Delete<\/a><\/th>\n\t<th><a href=\"\/launch\/{{$exp.Name}}\">Launch<\/a><\/th>\n<\/tr>\n{{end}}\n{{end}}\n<\/table>\n{{end}}\n\n{{with .Prod}}\n<h2>Prod<\/h2>\n<table>\n<tr>\n <th>Namespace<\/th>\n <th>Labels<\/th>\n <th>Experiment<\/th>\n <th>Params<\/th>\n <th>Delete?<\/th>\n <th>Launch?<\/th>\n<\/tr>\n{{range $ns := .}}\n{{range $exp := .Experiments}}\n<tr>\n\t<th>{{$ns.Name}}<\/th>\n\t<th>{{$ns.Labels}}<\/th>\n\t<th>{{$exp.Name}}<\/th>\n\t<th>{{range .Params}}<strong>{{.Name}}<\/strong>: ({{.Values}})<br\/>{{end}}<\/th>\n\t<th><a href=\"\/delete\/{{$exp.Name}}\">Delete<\/a><\/th>\n\t<th><a href=\"\/launch\/{{$exp.Name}}\">Launch<\/a><\/th>\n<\/tr>\n{{end}}\n{{end}}\n<\/table>\n{{end}}\n\n<\/div>\n<\/body>\n<\/html>\n`\n\nfunc launchHandler(w http.ResponseWriter, r *http.Request) {\n\texperiment := r.URL.Path[len(launchPrefix):]\n\n\t\/\/ get the namespace from test\n\ttest, err := mongo.QueryAll(cfg.mongo.DB(cfg.mongoDB).C(cfg.testCollection), bson.M{\"experiments.name\": experiment})\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tw.Header().Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\t\tw.Write([]byte(\"not found\"))\n\t\treturn\n\t}\n\n\t\/\/ check for namespace in prod\n\tprod, err := mongo.QueryOne(cfg.mongo.DB(cfg.mongoDB).C(cfg.prodCollection), bson.M{\"name\": test.Name})\n\tif err == mgo.ErrNotFound {\n\t\t\/\/ namespace not found in prod so create namespace and add\n\t\t\/\/ experiment\n\t\treturn\n\t} else if err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Header().Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\t\tw.Write([]byte(\"something went wrong\"))\n\t\treturn\n\t}\n\n\t\/\/ subtract segments from prod namespace and add experiment\n\n}\n\nfunc deleteHandler(w http.ResponseWriter, r *http.Request) {\n}\n\nfunc healthHandler(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Header().Set(\"Content-Type\", \"text\/plain\")\n\tw.Write([]byte(\"OK\"))\n}\n\nfunc readinessHandler(w http.ResponseWriter, r *http.Request) {\n\tif err := cfg.mongo.Ping(); err != nil {\n\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t\tw.Header().Set(\"Content-Type\", \"text\/plain\")\n\t\tw.Write([]byte(\"Not Ready\"))\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Header().Set(\"Content-Type\", \"text\/plain\")\n\tw.Write([]byte(\"OK\"))\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"github.com\/spf13\/pflag\"\n\t\"k8s.io\/kubernetes\/cmd\/kube-dns\/app\"\n\t\"k8s.io\/kubernetes\/cmd\/kube-dns\/app\/options\"\n\t_ \"k8s.io\/kubernetes\/pkg\/client\/metrics\/prometheus\" \/\/ for client metric registration\n\t\"k8s.io\/kubernetes\/pkg\/util\/flag\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/logs\"\n\t\"k8s.io\/kubernetes\/pkg\/version\/verflag\"\n)\n\nfunc main() {\n\tconfig := options.NewKubeDNSConfig()\n\tconfig.AddFlags(pflag.CommandLine)\n\n\tflag.InitFlags()\n\tlogs.InitLogs()\n\tdefer logs.FlushLogs()\n\n\tverflag.PrintAndExitIfRequested()\n\tserver := app.NewKubeDNSServerDefault(config)\n\tserver.Run()\n}\n<commit_msg>Split the version metric out to its own package<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"github.com\/spf13\/pflag\"\n\t\"k8s.io\/kubernetes\/cmd\/kube-dns\/app\"\n\t\"k8s.io\/kubernetes\/cmd\/kube-dns\/app\/options\"\n\t_ \"k8s.io\/kubernetes\/pkg\/client\/metrics\/prometheus\" \/\/ for client metric registration\n\t\"k8s.io\/kubernetes\/pkg\/util\/flag\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/logs\"\n\t_ \"k8s.io\/kubernetes\/pkg\/version\/prometheus\" \/\/ for version metric registration\n\t\"k8s.io\/kubernetes\/pkg\/version\/verflag\"\n)\n\nfunc main() {\n\tconfig := options.NewKubeDNSConfig()\n\tconfig.AddFlags(pflag.CommandLine)\n\n\tflag.InitFlags()\n\tlogs.InitLogs()\n\tdefer logs.FlushLogs()\n\n\tverflag.PrintAndExitIfRequested()\n\tserver := app.NewKubeDNSServerDefault(config)\n\tserver.Run()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage cmd_test\n\nimport (\n\t\"testing\"\n\n\tgc \"launchpad.net\/gocheck\"\n\n\t\"launchpad.net\/juju-core\/testing\/testbase\"\n)\n\nfunc Test(t *testing.T) { gc.TestingT(t) }\n\ntype Dependencies struct{}\n\nvar _ = gc.Suite(&Dependencies{})\n\nfunc (*Dependencies) TestPackageDependencies(c *gc.C) {\n\t\/\/ This test is to ensure we don't bring in dependencies without thinking.\n\t\/\/ Looking at the \"environs\/config\", it is just for JujuHome. This should\n\t\/\/ really be moved into \"juju\/osenv\".\n\tc.Assert(testbase.FindJujuCoreImports(c, \"launchpad.net\/juju-core\/cmd\"),\n\t\tgc.DeepEquals,\n\t\t[]string{\"juju\/arch\", \"juju\/osenv\", \"names\", \"version\"})\n}\n<commit_msg>Fix package dependencies test<commit_after>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage cmd_test\n\nimport (\n\t\"testing\"\n\n\tgc \"launchpad.net\/gocheck\"\n\n\t\"launchpad.net\/juju-core\/testing\/testbase\"\n)\n\nfunc Test(t *testing.T) { gc.TestingT(t) }\n\ntype Dependencies struct{}\n\nvar _ = gc.Suite(&Dependencies{})\n\nfunc (*Dependencies) TestPackageDependencies(c *gc.C) {\n\t\/\/ This test is to ensure we don't bring in dependencies without thinking.\n\t\/\/ Looking at the \"environs\/config\", it is just for JujuHome. This should\n\t\/\/ really be moved into \"juju\/osenv\".\n\tc.Assert(testbase.FindJujuCoreImports(c, \"launchpad.net\/juju-core\/cmd\"),\n\t\tgc.DeepEquals,\n\t\t[]string{\"juju\/arch\", \"juju\/osenv\", \"names\", \"thirdparty\/pbkdf2\", \"utils\", \"version\"})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\n\t\"github.com\/go-chi\/chi\"\n\t\"github.com\/go-chi\/chi\/middleware\"\n\t\"github.com\/go-chi\/cors\"\n\t\"github.com\/gocontrib\/pubsub\"\n\t_ \"github.com\/gocontrib\/pubsub\/nats\"\n\t_ \"github.com\/gocontrib\/pubsub\/redis\"\n\t\"github.com\/gocontrib\/pubsub\/sse\"\n)\n\nfunc opt(name, defval string) string {\n\tval := os.Getenv(name)\n\tif len(val) == 0 {\n\t\treturn defval\n\t}\n\treturn val\n}\n\nfunc main() {\n\taddr := opt(\"PUBSUBD_ADDR\", \":4302\")\n\tnats := opt(\"NATS_URI\", \"nats:4222\")\n\n\tfmt.Printf(\"starting pubsub --addr %s --nats %s\", addr, nats)\n\n\tstart := func() {\n\t\tinitHub(nats)\n\t\tstartServer(addr)\n\t}\n\n\tstop := func() {\n\t\tpubsub.Cleanup()\n\t\tstopServer()\n\t}\n\n\tdie := make(chan bool)\n\tsig := make(chan os.Signal)\n\tsignal.Notify(sig, os.Interrupt, os.Kill)\n\tgo func() {\n\t\t<-sig\n\t\tdie <- true\n\t}()\n\n\tgo start()\n\t<-die\n\n\tstop()\n}\n\nfunc initHub(nats string) {\n\terr := pubsub.Init(pubsub.HubConfig{\n\t\t\"driver\": \"nats\",\n\t\t\"url\": nats,\n\t})\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot initialize hub\")\n\t}\n}\n\nvar server *http.Server\n\nfunc startServer(addr string) {\n\tfmt.Printf(\"listening %s\\n\", addr)\n\n\tserver = &http.Server{\n\t\tAddr: addr,\n\t\tHandler: makeHandler(),\n\t}\n\n\terr := server.ListenAndServe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc stopServer() {\n\tfmt.Println(\"shutting down\")\n\tserver.Shutdown(nil)\n}\n\nfunc makeHandler() http.Handler {\n\tr := chi.NewRouter()\n\n\tr.Use(middleware.RequestID)\n\tr.Use(middleware.Logger)\n\tr.Use(middleware.Recoverer)\n\n\t\/\/ Basic CORS\n\t\/\/ for more ideas, see: https:\/\/developer.github.com\/v3\/#cross-origin-resource-sharing\n\tcors := cors.New(cors.Options{\n\t\tAllowedOrigins: []string{\"*\"},\n\t\tAllowedMethods: []string{\"GET\", \"POST\", \"PUT\", \"DELETE\", \"OPTIONS\"},\n\t\tAllowedHeaders: []string{\"Accept\", \"Authorization\", \"Content-Type\", \"X-CSRF-Token\"},\n\t\tExposedHeaders: []string{\"Link\"},\n\t\tAllowCredentials: true,\n\t\tMaxAge: 300, \/\/ Maximum value not ignored by any of major browsers\n\t})\n\tr.Use(cors.Handler)\n\n\tr.Group(eventAPI)\n\n\treturn r\n}\n\nfunc eventAPI(r chi.Router) {\n\t\/\/ TODO configurable api path\n\tr.Get(\"\/api\/event\/stream\", sse.GetEventStream)\n\tr.Get(\"\/api\/event\/stream\/{channel}\", sse.GetEventStream)\n}\n<commit_msg>use gorilla logger<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\n\t\"github.com\/go-chi\/chi\"\n\t\"github.com\/go-chi\/chi\/middleware\"\n\t\"github.com\/go-chi\/cors\"\n\t\"github.com\/gocontrib\/pubsub\"\n\t_ \"github.com\/gocontrib\/pubsub\/nats\"\n\t_ \"github.com\/gocontrib\/pubsub\/redis\"\n\t\"github.com\/gocontrib\/pubsub\/sse\"\n\t\"github.com\/gorilla\/handlers\"\n)\n\nfunc opt(name, defval string) string {\n\tval := os.Getenv(name)\n\tif len(val) == 0 {\n\t\treturn defval\n\t}\n\treturn val\n}\n\nfunc main() {\n\taddr := opt(\"PUBSUBD_ADDR\", \":4302\")\n\tnats := opt(\"NATS_URI\", \"nats:4222\")\n\n\tfmt.Printf(\"starting pubsub --addr %s --nats %s\", addr, nats)\n\n\tstart := func() {\n\t\tinitHub(nats)\n\t\tstartServer(addr)\n\t}\n\n\tstop := func() {\n\t\tpubsub.Cleanup()\n\t\tstopServer()\n\t}\n\n\tdie := make(chan bool)\n\tsig := make(chan os.Signal)\n\tsignal.Notify(sig, os.Interrupt, os.Kill)\n\tgo func() {\n\t\t<-sig\n\t\tdie <- true\n\t}()\n\n\tgo start()\n\t<-die\n\n\tstop()\n}\n\nfunc initHub(nats string) {\n\terr := pubsub.Init(pubsub.HubConfig{\n\t\t\"driver\": \"nats\",\n\t\t\"url\": nats,\n\t})\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot initialize hub\")\n\t}\n}\n\nvar server *http.Server\n\nfunc startServer(addr string) {\n\tfmt.Printf(\"listening %s\\n\", addr)\n\n\tserver = &http.Server{\n\t\tAddr: addr,\n\t\tHandler: makeHandler(),\n\t}\n\n\terr := server.ListenAndServe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc stopServer() {\n\tfmt.Println(\"shutting down\")\n\tserver.Shutdown(nil)\n}\n\nfunc makeHandler() http.Handler {\n\tr := chi.NewRouter()\n\n\tr.Use(middleware.RequestID)\n\tr.Use(Logger)\n\tr.Use(middleware.Recoverer)\n\n\t\/\/ Basic CORS\n\t\/\/ for more ideas, see: https:\/\/developer.github.com\/v3\/#cross-origin-resource-sharing\n\tcors := cors.New(cors.Options{\n\t\tAllowedOrigins: []string{\"*\"},\n\t\tAllowedMethods: []string{\"GET\", \"POST\", \"PUT\", \"DELETE\", \"OPTIONS\"},\n\t\tAllowedHeaders: []string{\"Accept\", \"Authorization\", \"Content-Type\", \"X-CSRF-Token\"},\n\t\tExposedHeaders: []string{\"Link\"},\n\t\tAllowCredentials: true,\n\t\tMaxAge: 300, \/\/ Maximum value not ignored by any of major browsers\n\t})\n\tr.Use(cors.Handler)\n\n\tr.Group(eventAPI)\n\n\treturn r\n}\n\nfunc eventAPI(r chi.Router) {\n\t\/\/ TODO configurable api path\n\tr.Get(\"\/api\/event\/stream\", sse.GetEventStream)\n\tr.Get(\"\/api\/event\/stream\/{channel}\", sse.GetEventStream)\n}\n\nfunc Logger(next http.Handler) http.Handler {\n\treturn handlers.LoggingHandler(os.Stdout, next)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/gempir\/gempbot\/pkg\/config\"\n\t\"github.com\/gempir\/gempbot\/pkg\/helix\"\n\t\"github.com\/gempir\/gempbot\/pkg\/log\"\n\t\"github.com\/gempir\/gempbot\/pkg\/store\"\n)\n\nvar (\n\tcfg *config.Config\n\tdb *store.Database\n\thelixClient *helix.Client\n)\n\nfunc main() {\n\tlog.Info(os.Getenv(\"TWITCH_CLIENT_ID\"))\n\tcfg = config.FromEnv()\n\tlog.Info(\"clientid:\" + cfg.ClientID)\n\tdb = store.NewDatabase(cfg)\n\thelixClient = helix.NewClient(cfg, db)\n\n\ttokens := db.GetAllUserAccessToken()\n\n\tfor _, token := range tokens {\n\t\terr := refreshToken(token)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to refresh token for user %s %s\", token.OwnerTwitchID, err)\n\t\t} else {\n\t\t\tlog.Infof(\"refreshed token for user %s\", token.OwnerTwitchID)\n\t\t}\n\t\ttime.Sleep(time.Millisecond * 500)\n\t}\n}\n\nfunc refreshToken(token store.UserAccessToken) error {\n\tresp, err := helixClient.Client.RefreshUserAccessToken(token.RefreshToken)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = db.SaveUserAccessToken(context.Background(), token.OwnerTwitchID, resp.Data.AccessToken, resp.Data.RefreshToken, strings.Join(resp.Data.Scopes, \" \"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>remove debug statements<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/gempir\/gempbot\/pkg\/config\"\n\t\"github.com\/gempir\/gempbot\/pkg\/helix\"\n\t\"github.com\/gempir\/gempbot\/pkg\/log\"\n\t\"github.com\/gempir\/gempbot\/pkg\/store\"\n)\n\nvar (\n\tcfg *config.Config\n\tdb *store.Database\n\thelixClient *helix.Client\n)\n\nfunc main() {\n\tcfg = config.FromEnv()\n\tdb = store.NewDatabase(cfg)\n\thelixClient = helix.NewClient(cfg, db)\n\n\ttokens := db.GetAllUserAccessToken()\n\n\tfor _, token := range tokens {\n\t\terr := refreshToken(token)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to refresh token for user %s %s\", token.OwnerTwitchID, err)\n\t\t} else {\n\t\t\tlog.Infof(\"refreshed token for user %s\", token.OwnerTwitchID)\n\t\t}\n\t\ttime.Sleep(time.Millisecond * 500)\n\t}\n}\n\nfunc refreshToken(token store.UserAccessToken) error {\n\tresp, err := helixClient.Client.RefreshUserAccessToken(token.RefreshToken)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = db.SaveUserAccessToken(context.Background(), token.OwnerTwitchID, resp.Data.AccessToken, resp.Data.RefreshToken, strings.Join(resp.Data.Scopes, \" \"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Commands from http:\/\/redis.io\/commands#transactions\n\npackage miniredis\n\nimport (\n\t\"github.com\/bsm\/redeo\"\n)\n\n\/\/ commandsTransaction handles MULTI &c.\nfunc commandsTransaction(m *Miniredis, srv *redeo.Server) {\n\tsrv.HandleFunc(\"DISCARD\", m.cmdDiscard)\n\tsrv.HandleFunc(\"EXEC\", m.cmdExec)\n\tsrv.HandleFunc(\"MULTI\", m.cmdMulti)\n\tsrv.HandleFunc(\"UNWATCH\", m.cmdUnwatch)\n\tsrv.HandleFunc(\"WATCH\", m.cmdWatch)\n}\n\n\/\/ MULTI\nfunc (m *Miniredis) cmdMulti(out *redeo.Responder, r *redeo.Request) error {\n\tif len(r.Args) != 0 {\n\t\treturn r.WrongNumberOfArgs()\n\t}\n\tif !m.handleAuth(r.Client(), out) {\n\t\treturn nil\n\t}\n\n\tctx := getCtx(r.Client())\n\n\tif inTx(ctx) {\n\t\treturn redeo.ClientError(\"MULTI calls can not be nested\")\n\t}\n\n\tstartTx(ctx)\n\n\tout.WriteOK()\n\treturn nil\n}\n\n\/\/ EXEC\nfunc (m *Miniredis) cmdExec(out *redeo.Responder, r *redeo.Request) error {\n\tif len(r.Args) != 0 {\n\t\tsetDirty(r.Client())\n\t\treturn r.WrongNumberOfArgs()\n\t}\n\tif !m.handleAuth(r.Client(), out) {\n\t\treturn nil\n\t}\n\n\tctx := getCtx(r.Client())\n\n\tif !inTx(ctx) {\n\t\treturn redeo.ClientError(\"EXEC without MULTI\")\n\t}\n\n\tif dirtyTx(ctx) {\n\t\tout.WriteErrorString(\"EXECABORT Transaction discarded because of previous errors.\")\n\t\treturn nil\n\t}\n\n\tm.Lock()\n\tdefer m.Unlock()\n\n\t\/\/ Check WATCHed keys.\n\tfor t, version := range ctx.watch {\n\t\tif m.db(t.db).keyVersion[t.key] > version {\n\t\t\t\/\/ Abort! Abort!\n\t\t\tstopTx(ctx)\n\t\t\tout.WriteBulkLen(0)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tout.WriteBulkLen(len(ctx.transaction))\n\tfor _, cb := range ctx.transaction {\n\t\tcb(out, ctx)\n\t}\n\t\/\/ We're done\n\tstopTx(ctx)\n\treturn nil\n}\n\n\/\/ DISCARD\nfunc (m *Miniredis) cmdDiscard(out *redeo.Responder, r *redeo.Request) error {\n\tif len(r.Args) != 0 {\n\t\tsetDirty(r.Client())\n\t\treturn r.WrongNumberOfArgs()\n\t}\n\tif !m.handleAuth(r.Client(), out) {\n\t\treturn nil\n\t}\n\n\tctx := getCtx(r.Client())\n\tif !inTx(ctx) {\n\t\treturn redeo.ClientError(\"DISCARD without MULTI\")\n\t}\n\n\tstopTx(ctx)\n\tout.WriteOK()\n\treturn nil\n}\n\n\/\/ WATCH\nfunc (m *Miniredis) cmdWatch(out *redeo.Responder, r *redeo.Request) error {\n\tif len(r.Args) == 0 {\n\t\tsetDirty(r.Client())\n\t\treturn r.WrongNumberOfArgs()\n\t}\n\tif !m.handleAuth(r.Client(), out) {\n\t\treturn nil\n\t}\n\n\tctx := getCtx(r.Client())\n\tif inTx(ctx) {\n\t\treturn redeo.ClientError(\"WATCH in MULTI\")\n\t}\n\n\tm.Lock()\n\tdefer m.Unlock()\n\tdb := m.db(ctx.selectedDB)\n\n\tfor _, key := range r.Args {\n\t\twatch(db, ctx, key)\n\t}\n\tout.WriteOK()\n\treturn nil\n}\n\n\/\/ UNWATCH\nfunc (m *Miniredis) cmdUnwatch(out *redeo.Responder, r *redeo.Request) error {\n\tif len(r.Args) != 0 {\n\t\tsetDirty(r.Client())\n\t\treturn r.WrongNumberOfArgs()\n\t}\n\tif !m.handleAuth(r.Client(), out) {\n\t\treturn nil\n\t}\n\n\t\/\/ Doesn't matter if UNWATCH is in a TX or not. Looks like a Redis bug to me.\n\tunwatch(getCtx(r.Client()))\n\n\treturn withTx(m, out, r, func(out *redeo.Responder, ctx *connCtx) {\n\t\t\/\/ Do nothing if it's called in a transaction.\n\t\tout.WriteOK()\n\t})\n}\n<commit_msg>wakeup blocked commands on EXEC<commit_after>\/\/ Commands from http:\/\/redis.io\/commands#transactions\n\npackage miniredis\n\nimport (\n\t\"github.com\/bsm\/redeo\"\n)\n\n\/\/ commandsTransaction handles MULTI &c.\nfunc commandsTransaction(m *Miniredis, srv *redeo.Server) {\n\tsrv.HandleFunc(\"DISCARD\", m.cmdDiscard)\n\tsrv.HandleFunc(\"EXEC\", m.cmdExec)\n\tsrv.HandleFunc(\"MULTI\", m.cmdMulti)\n\tsrv.HandleFunc(\"UNWATCH\", m.cmdUnwatch)\n\tsrv.HandleFunc(\"WATCH\", m.cmdWatch)\n}\n\n\/\/ MULTI\nfunc (m *Miniredis) cmdMulti(out *redeo.Responder, r *redeo.Request) error {\n\tif len(r.Args) != 0 {\n\t\treturn r.WrongNumberOfArgs()\n\t}\n\tif !m.handleAuth(r.Client(), out) {\n\t\treturn nil\n\t}\n\n\tctx := getCtx(r.Client())\n\n\tif inTx(ctx) {\n\t\treturn redeo.ClientError(\"MULTI calls can not be nested\")\n\t}\n\n\tstartTx(ctx)\n\n\tout.WriteOK()\n\treturn nil\n}\n\n\/\/ EXEC\nfunc (m *Miniredis) cmdExec(out *redeo.Responder, r *redeo.Request) error {\n\tif len(r.Args) != 0 {\n\t\tsetDirty(r.Client())\n\t\treturn r.WrongNumberOfArgs()\n\t}\n\tif !m.handleAuth(r.Client(), out) {\n\t\treturn nil\n\t}\n\n\tctx := getCtx(r.Client())\n\n\tif !inTx(ctx) {\n\t\treturn redeo.ClientError(\"EXEC without MULTI\")\n\t}\n\n\tif dirtyTx(ctx) {\n\t\tout.WriteErrorString(\"EXECABORT Transaction discarded because of previous errors.\")\n\t\treturn nil\n\t}\n\n\tm.Lock()\n\tdefer m.Unlock()\n\n\t\/\/ Check WATCHed keys.\n\tfor t, version := range ctx.watch {\n\t\tif m.db(t.db).keyVersion[t.key] > version {\n\t\t\t\/\/ Abort! Abort!\n\t\t\tstopTx(ctx)\n\t\t\tout.WriteBulkLen(0)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tout.WriteBulkLen(len(ctx.transaction))\n\tfor _, cb := range ctx.transaction {\n\t\tcb(out, ctx)\n\t}\n\t\/\/ wake up anyone who waits on anything.\n\tm.signal.Broadcast()\n\n\tstopTx(ctx)\n\treturn nil\n}\n\n\/\/ DISCARD\nfunc (m *Miniredis) cmdDiscard(out *redeo.Responder, r *redeo.Request) error {\n\tif len(r.Args) != 0 {\n\t\tsetDirty(r.Client())\n\t\treturn r.WrongNumberOfArgs()\n\t}\n\tif !m.handleAuth(r.Client(), out) {\n\t\treturn nil\n\t}\n\n\tctx := getCtx(r.Client())\n\tif !inTx(ctx) {\n\t\treturn redeo.ClientError(\"DISCARD without MULTI\")\n\t}\n\n\tstopTx(ctx)\n\tout.WriteOK()\n\treturn nil\n}\n\n\/\/ WATCH\nfunc (m *Miniredis) cmdWatch(out *redeo.Responder, r *redeo.Request) error {\n\tif len(r.Args) == 0 {\n\t\tsetDirty(r.Client())\n\t\treturn r.WrongNumberOfArgs()\n\t}\n\tif !m.handleAuth(r.Client(), out) {\n\t\treturn nil\n\t}\n\n\tctx := getCtx(r.Client())\n\tif inTx(ctx) {\n\t\treturn redeo.ClientError(\"WATCH in MULTI\")\n\t}\n\n\tm.Lock()\n\tdefer m.Unlock()\n\tdb := m.db(ctx.selectedDB)\n\n\tfor _, key := range r.Args {\n\t\twatch(db, ctx, key)\n\t}\n\tout.WriteOK()\n\treturn nil\n}\n\n\/\/ UNWATCH\nfunc (m *Miniredis) cmdUnwatch(out *redeo.Responder, r *redeo.Request) error {\n\tif len(r.Args) != 0 {\n\t\tsetDirty(r.Client())\n\t\treturn r.WrongNumberOfArgs()\n\t}\n\tif !m.handleAuth(r.Client(), out) {\n\t\treturn nil\n\t}\n\n\t\/\/ Doesn't matter if UNWATCH is in a TX or not. Looks like a Redis bug to me.\n\tunwatch(getCtx(r.Client()))\n\n\treturn withTx(m, out, r, func(out *redeo.Responder, ctx *connCtx) {\n\t\t\/\/ Do nothing if it's called in a transaction.\n\t\tout.WriteOK()\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 the u-root Authors. All rights reserved\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"github.com\/spf13\/pflag\"\n)\n\nfunc helpPrinter() {\n\tfmt.Printf(\"Usage:\\ncksum <File Name>\\n\")\n\tpflag.PrintDefaults()\n\tos.Exit(0)\n}\n\nfunc versionPrinter() {\n\tfmt.Println(\"cksum utility, URoot Version.\")\n\tos.Exit(0)\n}\n\nfunc getInput(fileName string) (input []byte, err error) {\n\tif fileName != \"\" {\n\t\treturn ioutil.ReadFile(fileName)\n\t}\n\treturn ioutil.ReadAll(os.Stdin)\n}\n\nfunc calculateCksum(input []byte) uint32 {\n\n\tvar cksumTable = []uint32{\n\t\t\/*\n\t\t * Following table is originally contributed by\n\t\t * Q. Frank Xia et. al (qx@math.columbia.edu)\n\t\t * under GPL License.\n\t\t * http:\/\/ftp-archive.freebsd.org\/pub\/FreeBSD-Archive\/\n\t\t * old-releases\/i386\/1.0-RELEASE\/ports\/textutils\/src\/cksum.c\n\t\t *\n\t\t * Polynomial same as Linux cksum utility i.e. 0x04C11DB7\n\t\t *\/\n\t\t0x0,\n\t\t0x04C11DB7, 0x09823B6E, 0x0D4326D9, 0x130476DC, 0x17C56B6B,\n\t\t0x1A864DB2, 0x1E475005, 0x2608EDB8, 0x22C9F00F, 0x2F8AD6D6,\n\t\t0x2B4BCB61, 0x350C9B64, 0x31CD86D3, 0x3C8EA00A, 0x384FBDBD,\n\t\t0x4C11DB70, 0x48D0C6C7, 0x4593E01E, 0x4152FDA9, 0x5F15ADAC,\n\t\t0x5BD4B01B, 0x569796C2, 0x52568B75, 0x6A1936C8, 0x6ED82B7F,\n\t\t0x639B0DA6, 0x675A1011, 0x791D4014, 0x7DDC5DA3, 0x709F7B7A,\n\t\t0x745E66CD, 0x9823B6E0, 0x9CE2AB57, 0x91A18D8E, 0x95609039,\n\t\t0x8B27C03C, 0x8FE6DD8B, 0x82A5FB52, 0x8664E6E5, 0xBE2B5B58,\n\t\t0xBAEA46EF, 0xB7A96036, 0xB3687D81, 0xAD2F2D84, 0xA9EE3033,\n\t\t0xA4AD16EA, 0xA06C0B5D, 0xD4326D90, 0xD0F37027, 0xDDB056FE,\n\t\t0xD9714B49, 0xC7361B4C, 0xC3F706FB, 0xCEB42022, 0xCA753D95,\n\t\t0xF23A8028, 0xF6FB9D9F, 0xFBB8BB46, 0xFF79A6F1, 0xE13EF6F4,\n\t\t0xE5FFEB43, 0xE8BCCD9A, 0xEC7DD02D, 0x34867077, 0x30476DC0,\n\t\t0x3D044B19, 0x39C556AE, 0x278206AB, 0x23431B1C, 0x2E003DC5,\n\t\t0x2AC12072, 0x128E9DCF, 0x164F8078, 0x1B0CA6A1, 0x1FCDBB16,\n\t\t0x018AEB13, 0x054BF6A4, 0x0808D07D, 0x0CC9CDCA, 0x7897AB07,\n\t\t0x7C56B6B0, 0x71159069, 0x75D48DDE, 0x6B93DDDB, 0x6F52C06C,\n\t\t0x6211E6B5, 0x66D0FB02, 0x5E9F46BF, 0x5A5E5B08, 0x571D7DD1,\n\t\t0x53DC6066, 0x4D9B3063, 0x495A2DD4, 0x44190B0D, 0x40D816BA,\n\t\t0xACA5C697, 0xA864DB20, 0xA527FDF9, 0xA1E6E04E, 0xBFA1B04B,\n\t\t0xBB60ADFC, 0xB6238B25, 0xB2E29692, 0x8AAD2B2F, 0x8E6C3698,\n\t\t0x832F1041, 0x87EE0DF6, 0x99A95DF3, 0x9D684044, 0x902B669D,\n\t\t0x94EA7B2A, 0xE0B41DE7, 0xE4750050, 0xE9362689, 0xEDF73B3E,\n\t\t0xF3B06B3B, 0xF771768C, 0xFA325055, 0xFEF34DE2, 0xC6BCF05F,\n\t\t0xC27DEDE8, 0xCF3ECB31, 0xCBFFD686, 0xD5B88683, 0xD1799B34,\n\t\t0xDC3ABDED, 0xD8FBA05A, 0x690CE0EE, 0x6DCDFD59, 0x608EDB80,\n\t\t0x644FC637, 0x7A089632, 0x7EC98B85, 0x738AAD5C, 0x774BB0EB,\n\t\t0x4F040D56, 0x4BC510E1, 0x46863638, 0x42472B8F, 0x5C007B8A,\n\t\t0x58C1663D, 0x558240E4, 0x51435D53, 0x251D3B9E, 0x21DC2629,\n\t\t0x2C9F00F0, 0x285E1D47, 0x36194D42, 0x32D850F5, 0x3F9B762C,\n\t\t0x3B5A6B9B, 0x0315D626, 0x07D4CB91, 0x0A97ED48, 0x0E56F0FF,\n\t\t0x1011A0FA, 0x14D0BD4D, 0x19939B94, 0x1D528623, 0xF12F560E,\n\t\t0xF5EE4BB9, 0xF8AD6D60, 0xFC6C70D7, 0xE22B20D2, 0xE6EA3D65,\n\t\t0xEBA91BBC, 0xEF68060B, 0xD727BBB6, 0xD3E6A601, 0xDEA580D8,\n\t\t0xDA649D6F, 0xC423CD6A, 0xC0E2D0DD, 0xCDA1F604, 0xC960EBB3,\n\t\t0xBD3E8D7E, 0xB9FF90C9, 0xB4BCB610, 0xB07DABA7, 0xAE3AFBA2,\n\t\t0xAAFBE615, 0xA7B8C0CC, 0xA379DD7B, 0x9B3660C6, 0x9FF77D71,\n\t\t0x92B45BA8, 0x9675461F, 0x8832161A, 0x8CF30BAD, 0x81B02D74,\n\t\t0x857130C3, 0x5D8A9099, 0x594B8D2E, 0x5408ABF7, 0x50C9B640,\n\t\t0x4E8EE645, 0x4A4FFBF2, 0x470CDD2B, 0x43CDC09C, 0x7B827D21,\n\t\t0x7F436096, 0x7200464F, 0x76C15BF8, 0x68860BFD, 0x6C47164A,\n\t\t0x61043093, 0x65C52D24, 0x119B4BE9, 0x155A565E, 0x18197087,\n\t\t0x1CD86D30, 0x029F3D35, 0x065E2082, 0x0B1D065B, 0x0FDC1BEC,\n\t\t0x3793A651, 0x3352BBE6, 0x3E119D3F, 0x3AD08088, 0x2497D08D,\n\t\t0x2056CD3A, 0x2D15EBE3, 0x29D4F654, 0xC5A92679, 0xC1683BCE,\n\t\t0xCC2B1D17, 0xC8EA00A0, 0xD6AD50A5, 0xD26C4D12, 0xDF2F6BCB,\n\t\t0xDBEE767C, 0xE3A1CBC1, 0xE760D676, 0xEA23F0AF, 0xEEE2ED18,\n\t\t0xF0A5BD1D, 0xF464A0AA, 0xF9278673, 0xFDE69BC4, 0x89B8FD09,\n\t\t0x8D79E0BE, 0x803AC667, 0x84FBDBD0, 0x9ABC8BD5, 0x9E7D9662,\n\t\t0x933EB0BB, 0x97FFAD0C, 0xAFB010B1, 0xAB710D06, 0xA6322BDF,\n\t\t0xA2F33668, 0xBCB4666D, 0xB8757BDA, 0xB5365D03, 0xB1F740B4,\n\t}\n\n\tcksum := uint32(0)\n\tfor i := 0; i < len(input); i++ {\n\t\tcksum = (cksum << 8) ^ uint32(cksumTable[((cksum>>24)^uint32(input[i]))&0xFF])\n\t\ttestVal := cksum\n\t\ttestVal = ^testVal\n\t}\n\t\/\/ checksum for data length\n\tdataLength := len(input)\n\tfor dataLength > 0 {\n\t\tcksum = (cksum << 8) ^ uint32(cksumTable[((cksum>>24)^uint32(dataLength))&0xFF])\n\t\tdataLength = (dataLength >> 8)\n\t}\n\treturn (^cksum)\n}\n\nfunc main() {\n\tvar (\n\t\thelp bool\n\t\tversion bool\n\t)\n\tcliArgs := \"\"\n\tflag.BoolVar(&help, \"help\", \"h\", false, \"Show this help and exit\")\n\tflag.BoolVar(&version, \"version\", \"v\", false, \"Print Version\")\n\tflag.Parse()\n\n\tif help {\n\t\thelpPrinter()\n\t}\n\n\tif version {\n\t\tversionPrinter()\n\t}\n\tif len(os.Args) >= 2 {\n\t\tcliArgs = os.Args[1]\n\t}\n\tinput, err := getInput(cliArgs)\n\tif err != nil {\n\t\treturn\n\t}\n\tfmt.Println(calculateCksum(input), len(input), cliArgs)\n}\n<commit_msg>* Fixed build issues.<commit_after>\/\/ Copyright 2018 the u-root Authors. All rights reserved\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"github.com\/spf13\/pflag\"\n)\n\nfunc helpPrinter() {\n\tfmt.Printf(\"Usage:\\ncksum <File Name>\\n\")\n\tpflag.PrintDefaults()\n\tos.Exit(0)\n}\n\nfunc versionPrinter() {\n\tfmt.Println(\"cksum utility, URoot Version.\")\n\tos.Exit(0)\n}\n\nfunc getInput(fileName string) (input []byte, err error) {\n\tif fileName != \"\" {\n\t\treturn ioutil.ReadFile(fileName)\n\t}\n\treturn ioutil.ReadAll(os.Stdin)\n}\n\nfunc calculateCksum(input []byte) uint32 {\n\n\tvar cksumTable = []uint32{\n\t\t\/*\n\t\t * Following table is originally contributed by\n\t\t * Q. Frank Xia et. al (qx@math.columbia.edu)\n\t\t * under GPL License.\n\t\t * http:\/\/ftp-archive.freebsd.org\/pub\/FreeBSD-Archive\/\n\t\t * old-releases\/i386\/1.0-RELEASE\/ports\/textutils\/src\/cksum.c\n\t\t *\n\t\t * Polynomial same as Linux cksum utility i.e. 0x04C11DB7\n\t\t *\/\n\t\t0x0,\n\t\t0x04C11DB7, 0x09823B6E, 0x0D4326D9, 0x130476DC, 0x17C56B6B,\n\t\t0x1A864DB2, 0x1E475005, 0x2608EDB8, 0x22C9F00F, 0x2F8AD6D6,\n\t\t0x2B4BCB61, 0x350C9B64, 0x31CD86D3, 0x3C8EA00A, 0x384FBDBD,\n\t\t0x4C11DB70, 0x48D0C6C7, 0x4593E01E, 0x4152FDA9, 0x5F15ADAC,\n\t\t0x5BD4B01B, 0x569796C2, 0x52568B75, 0x6A1936C8, 0x6ED82B7F,\n\t\t0x639B0DA6, 0x675A1011, 0x791D4014, 0x7DDC5DA3, 0x709F7B7A,\n\t\t0x745E66CD, 0x9823B6E0, 0x9CE2AB57, 0x91A18D8E, 0x95609039,\n\t\t0x8B27C03C, 0x8FE6DD8B, 0x82A5FB52, 0x8664E6E5, 0xBE2B5B58,\n\t\t0xBAEA46EF, 0xB7A96036, 0xB3687D81, 0xAD2F2D84, 0xA9EE3033,\n\t\t0xA4AD16EA, 0xA06C0B5D, 0xD4326D90, 0xD0F37027, 0xDDB056FE,\n\t\t0xD9714B49, 0xC7361B4C, 0xC3F706FB, 0xCEB42022, 0xCA753D95,\n\t\t0xF23A8028, 0xF6FB9D9F, 0xFBB8BB46, 0xFF79A6F1, 0xE13EF6F4,\n\t\t0xE5FFEB43, 0xE8BCCD9A, 0xEC7DD02D, 0x34867077, 0x30476DC0,\n\t\t0x3D044B19, 0x39C556AE, 0x278206AB, 0x23431B1C, 0x2E003DC5,\n\t\t0x2AC12072, 0x128E9DCF, 0x164F8078, 0x1B0CA6A1, 0x1FCDBB16,\n\t\t0x018AEB13, 0x054BF6A4, 0x0808D07D, 0x0CC9CDCA, 0x7897AB07,\n\t\t0x7C56B6B0, 0x71159069, 0x75D48DDE, 0x6B93DDDB, 0x6F52C06C,\n\t\t0x6211E6B5, 0x66D0FB02, 0x5E9F46BF, 0x5A5E5B08, 0x571D7DD1,\n\t\t0x53DC6066, 0x4D9B3063, 0x495A2DD4, 0x44190B0D, 0x40D816BA,\n\t\t0xACA5C697, 0xA864DB20, 0xA527FDF9, 0xA1E6E04E, 0xBFA1B04B,\n\t\t0xBB60ADFC, 0xB6238B25, 0xB2E29692, 0x8AAD2B2F, 0x8E6C3698,\n\t\t0x832F1041, 0x87EE0DF6, 0x99A95DF3, 0x9D684044, 0x902B669D,\n\t\t0x94EA7B2A, 0xE0B41DE7, 0xE4750050, 0xE9362689, 0xEDF73B3E,\n\t\t0xF3B06B3B, 0xF771768C, 0xFA325055, 0xFEF34DE2, 0xC6BCF05F,\n\t\t0xC27DEDE8, 0xCF3ECB31, 0xCBFFD686, 0xD5B88683, 0xD1799B34,\n\t\t0xDC3ABDED, 0xD8FBA05A, 0x690CE0EE, 0x6DCDFD59, 0x608EDB80,\n\t\t0x644FC637, 0x7A089632, 0x7EC98B85, 0x738AAD5C, 0x774BB0EB,\n\t\t0x4F040D56, 0x4BC510E1, 0x46863638, 0x42472B8F, 0x5C007B8A,\n\t\t0x58C1663D, 0x558240E4, 0x51435D53, 0x251D3B9E, 0x21DC2629,\n\t\t0x2C9F00F0, 0x285E1D47, 0x36194D42, 0x32D850F5, 0x3F9B762C,\n\t\t0x3B5A6B9B, 0x0315D626, 0x07D4CB91, 0x0A97ED48, 0x0E56F0FF,\n\t\t0x1011A0FA, 0x14D0BD4D, 0x19939B94, 0x1D528623, 0xF12F560E,\n\t\t0xF5EE4BB9, 0xF8AD6D60, 0xFC6C70D7, 0xE22B20D2, 0xE6EA3D65,\n\t\t0xEBA91BBC, 0xEF68060B, 0xD727BBB6, 0xD3E6A601, 0xDEA580D8,\n\t\t0xDA649D6F, 0xC423CD6A, 0xC0E2D0DD, 0xCDA1F604, 0xC960EBB3,\n\t\t0xBD3E8D7E, 0xB9FF90C9, 0xB4BCB610, 0xB07DABA7, 0xAE3AFBA2,\n\t\t0xAAFBE615, 0xA7B8C0CC, 0xA379DD7B, 0x9B3660C6, 0x9FF77D71,\n\t\t0x92B45BA8, 0x9675461F, 0x8832161A, 0x8CF30BAD, 0x81B02D74,\n\t\t0x857130C3, 0x5D8A9099, 0x594B8D2E, 0x5408ABF7, 0x50C9B640,\n\t\t0x4E8EE645, 0x4A4FFBF2, 0x470CDD2B, 0x43CDC09C, 0x7B827D21,\n\t\t0x7F436096, 0x7200464F, 0x76C15BF8, 0x68860BFD, 0x6C47164A,\n\t\t0x61043093, 0x65C52D24, 0x119B4BE9, 0x155A565E, 0x18197087,\n\t\t0x1CD86D30, 0x029F3D35, 0x065E2082, 0x0B1D065B, 0x0FDC1BEC,\n\t\t0x3793A651, 0x3352BBE6, 0x3E119D3F, 0x3AD08088, 0x2497D08D,\n\t\t0x2056CD3A, 0x2D15EBE3, 0x29D4F654, 0xC5A92679, 0xC1683BCE,\n\t\t0xCC2B1D17, 0xC8EA00A0, 0xD6AD50A5, 0xD26C4D12, 0xDF2F6BCB,\n\t\t0xDBEE767C, 0xE3A1CBC1, 0xE760D676, 0xEA23F0AF, 0xEEE2ED18,\n\t\t0xF0A5BD1D, 0xF464A0AA, 0xF9278673, 0xFDE69BC4, 0x89B8FD09,\n\t\t0x8D79E0BE, 0x803AC667, 0x84FBDBD0, 0x9ABC8BD5, 0x9E7D9662,\n\t\t0x933EB0BB, 0x97FFAD0C, 0xAFB010B1, 0xAB710D06, 0xA6322BDF,\n\t\t0xA2F33668, 0xBCB4666D, 0xB8757BDA, 0xB5365D03, 0xB1F740B4,\n\t}\n\n\tcksum := uint32(0)\n\tfor i := 0; i < len(input); i++ {\n\t\tcksum = (cksum << 8) ^ uint32(cksumTable[((cksum>>24)^uint32(input[i]))&0xFF])\n\t}\n\t\/\/ checksum for data length\n\tdataLength := len(input)\n\tfor dataLength > 0 {\n\t\tcksum = (cksum << 8) ^ uint32(cksumTable[((cksum>>24)^uint32(dataLength))&0xFF])\n\t\tdataLength = (dataLength >> 8)\n\t}\n\treturn (^cksum)\n}\n\nfunc main() {\n\tvar (\n\t\thelp bool\n\t\tversion bool\n\t)\n\tcliArgs := \"\"\n\tflag.BoolVar(&help, \"help\", \"h\", false, \"Show this help and exit\")\n\tflag.BoolVar(&version, \"version\", \"v\", false, \"Print Version\")\n\tflag.Parse()\n\n\tif help {\n\t\thelpPrinter()\n\t}\n\n\tif version {\n\t\tversionPrinter()\n\t}\n\tif len(os.Args) >= 2 {\n\t\tcliArgs = os.Args[1]\n\t}\n\tinput, err := getInput(cliArgs)\n\tif err != nil {\n\t\treturn\n\t}\n\tfmt.Println(calculateCksum(input), len(input), cliArgs)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ send.go - mixnet client send\n\/\/ Copyright (C) 2018 David Stainton.\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Affero General Public License as\n\/\/ published by the Free Software Foundation, either version 3 of the\n\/\/ License, or (at your option) any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Affero General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Affero General Public License\n\/\/ along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage session\n\nimport (\n\t\"encoding\/binary\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\tcConstants \"github.com\/katzenpost\/client\/constants\"\n\t\"github.com\/katzenpost\/core\/constants\"\n\t\"github.com\/katzenpost\/core\/crypto\/rand\"\n\tsConstants \"github.com\/katzenpost\/core\/sphinx\/constants\"\n)\n\nconst roundTripTimeSlop = time.Duration(88 * time.Second)\n\nvar ReplyTimeoutError = errors.New(\"Failure waiting for reply, timeout reached\")\n\nfunc (s *Session) sendNext() error {\n\tmsg, err := s.egressQueue.Peek()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif msg == nil {\n\t\treturn errors.New(\"send next failure, message is nil\")\n\t}\n\tm, ok := msg.(*Message)\n\tif !ok {\n\t\treturn errors.New(\"send next failure, unknown message type\")\n\t}\n\terr = s.doSend(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = s.egressQueue.Pop()\n\treturn err\n}\n\nfunc (s *Session) doSend(msg *Message) error {\n\tsurbID := [sConstants.SURBIDLength]byte{}\n\t_, err := io.ReadFull(rand.Reader, surbID[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\tidStr := fmt.Sprintf(\"[%v]\", hex.EncodeToString(surbID[:]))\n\ts.log.Debugf(\"doSend with SURB ID %x\", idStr)\n\tkey := []byte{}\n\tvar eta time.Duration\n\tif msg.WithSURB {\n\t\tkey, eta, err = s.minclient.SendCiphertext(msg.Recipient, msg.Provider, &surbID, msg.Payload)\n\t} else {\n\t\terr = s.minclient.SendUnreliableCiphertext(msg.Recipient, msg.Provider, msg.Payload)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tif msg.WithSURB {\n\t\ts.log.Debugf(\"doSend setting ReplyETA to %v\", eta)\n\t\tmsg.Key = key\n\t\tmsg.SentAt = time.Now()\n\t\tmsg.Sent = true\n\t\tmsg.ReplyETA = eta\n\t\ts.surbIDMap.Store(surbID, msg)\n\t}\n\tif msg.IsBlocking {\n\t\tsentWaitChanRaw, ok := s.sentWaitChanMap.Load(*msg.ID)\n\t\tif !ok {\n\t\t\terr := fmt.Errorf(\"Impossible failure, sentWaitChan not found for message ID %x\", *msg.ID)\n\t\t\ts.log.Error(err.Error())\n\t\t\ts.fatalErrCh <- err\n\t\t}\n\t\tsentWaitChan := sentWaitChanRaw.(chan *Message)\n\t\tsentWaitChan <- msg\n\t} else {\n\t\ts.eventCh.In() <- &MessageSentEvent{\n\t\t\tMessageID: msg.ID,\n\t\t\tErr: nil, \/\/ XXX send error\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *Session) sendLoopDecoy() error {\n\ts.log.Info(\"sending loop decoy\")\n\tconst loopService = \"loop\"\n\tserviceDesc, err := s.GetService(loopService)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpayload := [constants.UserForwardPayloadLength]byte{}\n\tid := [cConstants.MessageIDLength]byte{}\n\t_, err = io.ReadFull(rand.Reader, id[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\tmsg := &Message{\n\t\tID: &id,\n\t\tRecipient: serviceDesc.Name,\n\t\tProvider: serviceDesc.Provider,\n\t\tPayload: payload[:],\n\t\tWithSURB: true,\n\t\tIsDecoy: true,\n\t}\n\tdefer s.incrementDecoyLoopTally()\n\treturn s.doSend(msg)\n}\n\nfunc (s *Session) sendDropDecoy() error {\n\ts.log.Info(\"sending drop decoy\")\n\tconst loopService = \"loop\"\n\tserviceDesc, err := s.GetService(loopService)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpayload := [constants.UserForwardPayloadLength]byte{}\n\tid := [cConstants.MessageIDLength]byte{}\n\t_, err = io.ReadFull(rand.Reader, id[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\tmsg := &Message{\n\t\tID: &id,\n\t\tRecipient: serviceDesc.Name,\n\t\tProvider: serviceDesc.Provider,\n\t\tPayload: payload[:],\n\t\tWithSURB: false,\n\t\tIsDecoy: true,\n\t}\n\treturn s.doSend(msg)\n}\n\nfunc (s *Session) composeMessage(recipient, provider string, message []byte, isBlocking bool) (*Message, error) {\n\ts.log.Debug(\"SendMessage\")\n\tif len(message) > constants.UserForwardPayloadLength-4 {\n\t\treturn nil, fmt.Errorf(\"invalid message size: %v\", len(message))\n\t}\n\tpayload := [constants.UserForwardPayloadLength]byte{}\n\tbinary.BigEndian.PutUint32(payload[:4], uint32(len(message)))\n\tcopy(payload[4:], message)\n\tid := [cConstants.MessageIDLength]byte{}\n\t_, err := io.ReadFull(rand.Reader, id[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar msg = Message{\n\t\tID: &id,\n\t\tRecipient: recipient,\n\t\tProvider: provider,\n\t\tPayload: payload[:],\n\t\tWithSURB: true,\n\t\tSURBType: cConstants.SurbTypeKaetzchen,\n\t\tIsBlocking: isBlocking,\n\t}\n\treturn &msg, nil\n}\n\n\/\/ SendUnreliableMessage asynchronously sends message without any automatic retransmissions.\nfunc (s *Session) SendUnreliableMessage(recipient, provider string, message []byte) (MessageID, error) {\n\tmsg, err := s.composeMessage(recipient, provider, message, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = s.egressQueue.Push(msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn msg.ID, nil\n}\n\nfunc (s *Session) BlockingSendUnreliableMessage(recipient, provider string, message []byte) ([]byte, error) {\n\tmsg, err := s.composeMessage(recipient, provider, message, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsentWaitChan := make(chan *Message)\n\ts.sentWaitChanMap.Store(*msg.ID, sentWaitChan)\n\n\treplyWaitChan := make(chan []byte)\n\ts.replyWaitChanMap.Store(*msg.ID, replyWaitChan)\n\n\terr = s.egressQueue.Push(msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ wait until sent so that we know the ReplyETA for the waiting below\n\tsentMessageRaw := <-sentWaitChan\n\tsentMessage := sentMessageRaw.(*Message)\n\ts.sentWaitChanMap.Delete(*msg.ID)\n\t\/\/ wait for reply or round trip timeout\n\tselect {\n\tcase reply := <-replyWaitChan:\n\t\ts.replyWaitChanMap.Delete(*msg.ID)\n\t\treturn reply, nil\n\tcase <-time.After(sentMessage.ReplyETA + roundTripTimeSlop):\n\t\treturn nil, ReplyTimeoutError\n\t}\n\t\/\/ unreachable\n}\n<commit_msg>Fix sentWaitChan usage<commit_after>\/\/ send.go - mixnet client send\n\/\/ Copyright (C) 2018 David Stainton.\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Affero General Public License as\n\/\/ published by the Free Software Foundation, either version 3 of the\n\/\/ License, or (at your option) any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Affero General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Affero General Public License\n\/\/ along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage session\n\nimport (\n\t\"encoding\/binary\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\tcConstants \"github.com\/katzenpost\/client\/constants\"\n\t\"github.com\/katzenpost\/core\/constants\"\n\t\"github.com\/katzenpost\/core\/crypto\/rand\"\n\tsConstants \"github.com\/katzenpost\/core\/sphinx\/constants\"\n)\n\nconst roundTripTimeSlop = time.Duration(88 * time.Second)\n\nvar ReplyTimeoutError = errors.New(\"Failure waiting for reply, timeout reached\")\n\nfunc (s *Session) sendNext() error {\n\tmsg, err := s.egressQueue.Peek()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif msg == nil {\n\t\treturn errors.New(\"send next failure, message is nil\")\n\t}\n\tm, ok := msg.(*Message)\n\tif !ok {\n\t\treturn errors.New(\"send next failure, unknown message type\")\n\t}\n\terr = s.doSend(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = s.egressQueue.Pop()\n\treturn err\n}\n\nfunc (s *Session) doSend(msg *Message) error {\n\tsurbID := [sConstants.SURBIDLength]byte{}\n\t_, err := io.ReadFull(rand.Reader, surbID[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\tidStr := fmt.Sprintf(\"[%v]\", hex.EncodeToString(surbID[:]))\n\ts.log.Debugf(\"doSend with SURB ID %x\", idStr)\n\tkey := []byte{}\n\tvar eta time.Duration\n\tif msg.WithSURB {\n\t\tkey, eta, err = s.minclient.SendCiphertext(msg.Recipient, msg.Provider, &surbID, msg.Payload)\n\t} else {\n\t\terr = s.minclient.SendUnreliableCiphertext(msg.Recipient, msg.Provider, msg.Payload)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tif msg.WithSURB {\n\t\ts.log.Debugf(\"doSend setting ReplyETA to %v\", eta)\n\t\tmsg.Key = key\n\t\tmsg.SentAt = time.Now()\n\t\tmsg.Sent = true\n\t\tmsg.ReplyETA = eta\n\t\ts.surbIDMap.Store(surbID, msg)\n\t}\n\tif msg.IsBlocking {\n\t\tsentWaitChanRaw, ok := s.sentWaitChanMap.Load(*msg.ID)\n\t\tif !ok {\n\t\t\terr := fmt.Errorf(\"Impossible failure, sentWaitChan not found for message ID %x\", *msg.ID)\n\t\t\ts.log.Error(err.Error())\n\t\t\ts.fatalErrCh <- err\n\t\t}\n\t\tsentWaitChan := sentWaitChanRaw.(chan *Message)\n\t\tsentWaitChan <- msg\n\t} else {\n\t\ts.eventCh.In() <- &MessageSentEvent{\n\t\t\tMessageID: msg.ID,\n\t\t\tErr: nil, \/\/ XXX send error\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *Session) sendLoopDecoy() error {\n\ts.log.Info(\"sending loop decoy\")\n\tconst loopService = \"loop\"\n\tserviceDesc, err := s.GetService(loopService)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpayload := [constants.UserForwardPayloadLength]byte{}\n\tid := [cConstants.MessageIDLength]byte{}\n\t_, err = io.ReadFull(rand.Reader, id[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\tmsg := &Message{\n\t\tID: &id,\n\t\tRecipient: serviceDesc.Name,\n\t\tProvider: serviceDesc.Provider,\n\t\tPayload: payload[:],\n\t\tWithSURB: true,\n\t\tIsDecoy: true,\n\t}\n\tdefer s.incrementDecoyLoopTally()\n\treturn s.doSend(msg)\n}\n\nfunc (s *Session) sendDropDecoy() error {\n\ts.log.Info(\"sending drop decoy\")\n\tconst loopService = \"loop\"\n\tserviceDesc, err := s.GetService(loopService)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpayload := [constants.UserForwardPayloadLength]byte{}\n\tid := [cConstants.MessageIDLength]byte{}\n\t_, err = io.ReadFull(rand.Reader, id[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\tmsg := &Message{\n\t\tID: &id,\n\t\tRecipient: serviceDesc.Name,\n\t\tProvider: serviceDesc.Provider,\n\t\tPayload: payload[:],\n\t\tWithSURB: false,\n\t\tIsDecoy: true,\n\t}\n\treturn s.doSend(msg)\n}\n\nfunc (s *Session) composeMessage(recipient, provider string, message []byte, isBlocking bool) (*Message, error) {\n\ts.log.Debug(\"SendMessage\")\n\tif len(message) > constants.UserForwardPayloadLength-4 {\n\t\treturn nil, fmt.Errorf(\"invalid message size: %v\", len(message))\n\t}\n\tpayload := [constants.UserForwardPayloadLength]byte{}\n\tbinary.BigEndian.PutUint32(payload[:4], uint32(len(message)))\n\tcopy(payload[4:], message)\n\tid := [cConstants.MessageIDLength]byte{}\n\t_, err := io.ReadFull(rand.Reader, id[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar msg = Message{\n\t\tID: &id,\n\t\tRecipient: recipient,\n\t\tProvider: provider,\n\t\tPayload: payload[:],\n\t\tWithSURB: true,\n\t\tSURBType: cConstants.SurbTypeKaetzchen,\n\t\tIsBlocking: isBlocking,\n\t}\n\treturn &msg, nil\n}\n\n\/\/ SendUnreliableMessage asynchronously sends message without any automatic retransmissions.\nfunc (s *Session) SendUnreliableMessage(recipient, provider string, message []byte) (MessageID, error) {\n\tmsg, err := s.composeMessage(recipient, provider, message, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = s.egressQueue.Push(msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn msg.ID, nil\n}\n\nfunc (s *Session) BlockingSendUnreliableMessage(recipient, provider string, message []byte) ([]byte, error) {\n\tmsg, err := s.composeMessage(recipient, provider, message, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsentWaitChan := make(chan *Message)\n\ts.sentWaitChanMap.Store(*msg.ID, sentWaitChan)\n\n\treplyWaitChan := make(chan []byte)\n\ts.replyWaitChanMap.Store(*msg.ID, replyWaitChan)\n\n\terr = s.egressQueue.Push(msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ wait until sent so that we know the ReplyETA for the waiting below\n\tsentMessage := <-sentWaitChan\n\ts.sentWaitChanMap.Delete(*msg.ID)\n\t\/\/ wait for reply or round trip timeout\n\tselect {\n\tcase reply := <-replyWaitChan:\n\t\ts.replyWaitChanMap.Delete(*msg.ID)\n\t\treturn reply, nil\n\tcase <-time.After(sentMessage.ReplyETA + roundTripTimeSlop):\n\t\treturn nil, ReplyTimeoutError\n\t}\n\t\/\/ unreachable\n}\n<|endoftext|>"} {"text":"<commit_before>package fetch\n\nimport (\n\t\"io\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/slotix\/dataflowkit\/errs\"\n)\n\n\/\/RobotsTxtMiddleware checks if scraping of specified resource is allowed by robots.txt\nfunc RobotsTxtMiddleware() ServiceMiddleware {\n\treturn func(next Service) Service {\n\t\treturn robotstxtMiddleware{next}\n\t}\n}\n\ntype robotstxtMiddleware struct {\n\tService\n}\n\n\/\/Fetch gets response from req.URL, then passes response.URL to Robots.txt validator.\n\/\/issue #1 https:\/\/github.com\/slotix\/dataflowkit\/issues\/1\nfunc (mw robotstxtMiddleware) Fetch(req FetchRequester) (out io.ReadCloser, err error) {\n\turl := req.GetURL()\n\t\/\/to avoid recursion while retrieving robots.txt\n\tif !IsRobotsTxt(url) {\n\t\trobotsData, err := RobotstxtData(url)\n\t\tif err != nil {\n\t\t\trobotsURL, err1 := assembleRobotstxtURL(url)\n\t\t\tif err1 != nil {\n\t\t\t\treturn nil, err1\n\t\t\t}\n\t\t\t\/\/robots.txt may be empty but we have to continue processing the page\n\t\t\tlogger.WithFields(\n\t\t\t\tlogrus.Fields{\n\t\t\t\t\t\"err\": err,\n\t\t\t\t}).Warn(\"Robots.txt URL: \", robotsURL)\n\n\t\t}\n\t\tif !AllowedByRobots(url, robotsData) {\n\t\t\t\/\/no need a body retrieve to get information about redirects\n\t\t\tr := BaseFetcherRequest{URL: url, Method: \"HEAD\"}\n\t\t\tresp, err := fetchRobots(r)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t\/\/if initial URL is not equal to final URL (Redirected) f.e. domains are different\n\t\t\t\/\/then try to fetch robots following by final URL\n\t\t\tfinalURL := resp.GetURL()\n\t\t\t\/\/\tfinalURL := resp.Request.URL.String()\n\t\t\tif url != finalURL {\n\t\t\t\trobotsData, err = RobotstxtData(finalURL)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tif !AllowedByRobots(finalURL, robotsData) {\n\t\t\t\t\treturn nil, &errs.ForbiddenByRobots{finalURL}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn nil, &errs.ForbiddenByRobots{url}\n\t\t\t}\n\t\t\t\/\/\tbfReq := BaseFetcherRequest{URL: finalURL}\n\t\t\t\/\/\treq = bfReq\n\t\t}\n\n\t}\n\t\/\/out, err = mw.Service.Fetch(req)\n\t\/\/if err != nil {\n\t\/\/\treturn nil, err\n\t\/\/}\n\t\/\/return response, err\n\treturn mw.Service.Fetch(req)\n}\n\n\/\/Response passes req to the next middleware.\nfunc (mw robotstxtMiddleware) Response(req FetchRequester) (FetchResponser, error) {\n\treturn mw.Service.Response(req)\n}\n<commit_msg>move logging from robots.txt to transport layer<commit_after>package fetch\n\nimport (\n\t\"io\"\n\n\t\"github.com\/slotix\/dataflowkit\/errs\"\n)\n\n\/\/RobotsTxtMiddleware checks if scraping of specified resource is allowed by robots.txt\nfunc RobotsTxtMiddleware() ServiceMiddleware {\n\treturn func(next Service) Service {\n\t\treturn robotstxtMiddleware{next}\n\t}\n}\n\ntype robotstxtMiddleware struct {\n\tService\n}\n\n\/\/Fetch gets response from req.URL, then passes response.URL to Robots.txt validator.\n\/\/issue #1 https:\/\/github.com\/slotix\/dataflowkit\/issues\/1\nfunc (mw robotstxtMiddleware) Fetch(req FetchRequester) (out io.ReadCloser, err error) {\n\turl := req.GetURL()\n\t\/\/to avoid recursion while retrieving robots.txt\n\tif !IsRobotsTxt(url) {\n\t\trobotsData, _ := RobotstxtData(url)\n\t\t\/\/robots.txt may be empty but we have to continue processing the page\n\t\t\n\t\t\/\/if err != nil {\n\t\t\t\/\/ robotsURL, err1 := assembleRobotstxtURL(url)\n\t\t\t\/\/ if err1 != nil {\n\t\t\t\/\/ \treturn nil, err1\n\t\t\t\/\/ }\n\t\t\t\n\t\t\t\/\/ logger.WithFields(\n\t\t\t\/\/ \tlogrus.Fields{\n\t\t\t\/\/ \t\t\"err\": err,\n\t\t\t\/\/ \t}).Warn(\"Robots.txt URL: \", robotsURL)\n\n\t\t\/\/}\n\t\tif !AllowedByRobots(url, robotsData) {\n\t\t\t\/\/no need a body retrieve to get information about redirects\n\t\t\tr := BaseFetcherRequest{URL: url, Method: \"HEAD\"}\n\t\t\tresp, err := fetchRobots(r)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t\/\/if initial URL is not equal to final URL (Redirected) f.e. domains are different\n\t\t\t\/\/then try to fetch robots following by final URL\n\t\t\tfinalURL := resp.GetURL()\n\t\t\t\/\/\tfinalURL := resp.Request.URL.String()\n\t\t\tif url != finalURL {\n\t\t\t\trobotsData, err = RobotstxtData(finalURL)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tif !AllowedByRobots(finalURL, robotsData) {\n\t\t\t\t\treturn nil, &errs.ForbiddenByRobots{finalURL}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn nil, &errs.ForbiddenByRobots{url}\n\t\t\t}\n\t\t\t\/\/\tbfReq := BaseFetcherRequest{URL: finalURL}\n\t\t\t\/\/\treq = bfReq\n\t\t}\n\n\t}\n\t\/\/out, err = mw.Service.Fetch(req)\n\t\/\/if err != nil {\n\t\/\/\treturn nil, err\n\t\/\/}\n\t\/\/return response, err\n\treturn mw.Service.Fetch(req)\n}\n\n\/\/Response passes req to the next middleware.\nfunc (mw robotstxtMiddleware) Response(req FetchRequester) (FetchResponser, error) {\n\treturn mw.Service.Response(req)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage frontend\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"golang.org\/x\/pkgsite\/internal\"\n\t\"golang.org\/x\/pkgsite\/internal\/licenses\"\n\t\"golang.org\/x\/pkgsite\/internal\/postgres\"\n)\n\n\/\/ TabSettings defines tab-specific metadata.\ntype TabSettings struct {\n\t\/\/ Name is the tab name used in the URL.\n\tName string\n\n\t\/\/ DisplayName is the formatted tab name.\n\tDisplayName string\n\n\t\/\/ AlwaysShowDetails defines whether the tab content can be shown even if the\n\t\/\/ package is not determined to be redistributable.\n\tAlwaysShowDetails bool\n\n\t\/\/ TemplateName is the name of the template used to render the\n\t\/\/ corresponding tab, as defined in Server.templates.\n\tTemplateName string\n\n\t\/\/ Disabled indicates whether a tab should be displayed as disabled.\n\tDisabled bool\n}\n\nvar (\n\tpackageTabSettings = []TabSettings{\n\t\t{\n\t\t\tName: \"doc\",\n\t\t\tDisplayName: \"Doc\",\n\t\t\tTemplateName: \"pkg_doc.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"overview\",\n\t\t\tAlwaysShowDetails: true,\n\t\t\tDisplayName: \"Overview\",\n\t\t\tTemplateName: \"overview.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"subdirectories\",\n\t\t\tAlwaysShowDetails: true,\n\t\t\tDisplayName: \"Subdirectories\",\n\t\t\tTemplateName: \"subdirectories.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"versions\",\n\t\t\tAlwaysShowDetails: true,\n\t\t\tDisplayName: \"Versions\",\n\t\t\tTemplateName: \"versions.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"imports\",\n\t\t\tDisplayName: \"Imports\",\n\t\t\tAlwaysShowDetails: true,\n\t\t\tTemplateName: \"pkg_imports.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"importedby\",\n\t\t\tDisplayName: \"Imported By\",\n\t\t\tAlwaysShowDetails: true,\n\t\t\tTemplateName: \"pkg_importedby.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"licenses\",\n\t\t\tDisplayName: \"Licenses\",\n\t\t\tTemplateName: \"licenses.tmpl\",\n\t\t},\n\t}\n\tpackageTabLookup = make(map[string]TabSettings)\n\n\tdirectoryTabSettings = make([]TabSettings, len(packageTabSettings))\n\tdirectoryTabLookup = make(map[string]TabSettings)\n\n\tmoduleTabSettings = []TabSettings{\n\t\t{\n\t\t\tName: \"overview\",\n\t\t\tAlwaysShowDetails: true,\n\t\t\tDisplayName: \"Overview\",\n\t\t\tTemplateName: \"overview.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"packages\",\n\t\t\tAlwaysShowDetails: true,\n\t\t\tDisplayName: \"Packages\",\n\t\t\tTemplateName: \"subdirectories.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"versions\",\n\t\t\tAlwaysShowDetails: true,\n\t\t\tDisplayName: \"Versions\",\n\t\t\tTemplateName: \"versions.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"licenses\",\n\t\t\tDisplayName: \"Licenses\",\n\t\t\tTemplateName: \"licenses.tmpl\",\n\t\t},\n\t}\n\tmoduleTabLookup = make(map[string]TabSettings)\n)\n\n\/\/ validDirectoryTabs indicates if a tab is enabled in the directory view.\nvar validDirectoryTabs = map[string]bool{\n\t\"licenses\": true,\n\t\"overview\": true,\n\t\"subdirectories\": true,\n}\n\nfunc init() {\n\tfor i, ts := range packageTabSettings {\n\t\t\/\/ The directory view uses the same design as the packages view\n\t\t\/\/ for visual consistency, but some tabs don't make sense, so\n\t\t\/\/ we disable them.\n\t\tif !validDirectoryTabs[ts.Name] {\n\t\t\tts.Disabled = true\n\t\t}\n\t\tdirectoryTabSettings[i] = ts\n\t}\n\tfor _, d := range packageTabSettings {\n\t\tpackageTabLookup[d.Name] = d\n\t}\n\tfor _, d := range directoryTabSettings {\n\t\tdirectoryTabLookup[d.Name] = d\n\t}\n\tfor _, d := range moduleTabSettings {\n\t\tmoduleTabLookup[d.Name] = d\n\t}\n}\n\n\/\/ legacyFetchDetailsForPackage returns tab details by delegating to the correct detail\n\/\/ handler.\nfunc legacyFetchDetailsForPackage(r *http.Request, tab string, ds internal.DataSource, pkg *internal.LegacyVersionedPackage) (interface{}, error) {\n\tctx := r.Context()\n\tswitch tab {\n\tcase \"doc\":\n\t\treturn legacyFetchDocumentationDetails(pkg), nil\n\tcase \"versions\":\n\t\treturn legacyFetchPackageVersionsDetails(ctx, ds, pkg.Path, pkg.V1Path, pkg.ModulePath)\n\tcase \"subdirectories\":\n\t\treturn legacyFetchDirectoryDetails(ctx, ds, pkg.Path, &pkg.ModuleInfo, pkg.Licenses, false)\n\tcase \"imports\":\n\t\treturn fetchImportsDetails(ctx, ds, pkg.Path, pkg.ModulePath, pkg.Version)\n\tcase \"importedby\":\n\t\tdb, ok := ds.(*postgres.DB)\n\t\tif !ok {\n\t\t\t\/\/ The proxydatasource does not support the imported by page.\n\t\t\treturn nil, proxydatasourceNotSupportedErr()\n\t\t}\n\t\treturn fetchImportedByDetails(ctx, db, pkg.Path, pkg.ModulePath)\n\tcase \"licenses\":\n\t\treturn legacyFetchPackageLicensesDetails(ctx, ds, pkg.Path, pkg.ModulePath, pkg.Version)\n\tcase \"overview\":\n\t\treturn legacyFetchPackageOverviewDetails(ctx, pkg, urlIsVersioned(r.URL))\n\t}\n\treturn nil, fmt.Errorf(\"BUG: unable to fetch details: unknown tab %q\", tab)\n}\n\n\/\/ fetchDetailsForPackage returns tab details by delegating to the correct detail\n\/\/ handler.\nfunc fetchDetailsForPackage(r *http.Request, tab string, ds internal.DataSource, vdir *internal.VersionedDirectory) (interface{}, error) {\n\tctx := r.Context()\n\tswitch tab {\n\tcase \"doc\":\n\t\treturn fetchDocumentationDetails(vdir.Package.Documentation), nil\n\tcase \"overview\":\n\t\treturn fetchPackageOverviewDetails(ctx, vdir, urlIsVersioned(r.URL))\n\tcase \"subdirectories\":\n\t\treturn fetchDirectoryDetails(ctx, ds, vdir, false)\n\tcase \"versions\":\n\t\treturn legacyFetchPackageVersionsDetails(ctx, ds, vdir.Path, vdir.V1Path, vdir.ModulePath)\n\tcase \"imports\":\n\t\treturn fetchImportsDetails(ctx, ds, vdir.Path, vdir.ModulePath, vdir.Version)\n\tcase \"importedby\":\n\t\treturn fetchImportedByDetails(ctx, ds, vdir.Path, vdir.ModulePath)\n\tcase \"licenses\":\n\t\treturn legacyFetchPackageLicensesDetails(ctx, ds, vdir.Path, vdir.ModulePath, vdir.Version)\n\t}\n\treturn nil, fmt.Errorf(\"BUG: unable to fetch details: unknown tab %q\", tab)\n}\n\nfunc urlIsVersioned(url *url.URL) bool {\n\treturn strings.ContainsRune(url.Path, '@')\n}\n\n\/\/ fetchDetailsForModule returns tab details by delegating to the correct detail\n\/\/ handler.\nfunc fetchDetailsForModule(r *http.Request, tab string, ds internal.DataSource, mi *internal.ModuleInfo, licenses []*licenses.License, readme *internal.Readme) (interface{}, error) {\n\tctx := r.Context()\n\tswitch tab {\n\tcase \"packages\":\n\t\tif isActiveUseDirectories(ctx) {\n\t\t\tvdir := &internal.VersionedDirectory{\n\t\t\t\tModuleInfo: *mi,\n\t\t\t\tDirectory: internal.Directory{\n\t\t\t\t\tDirectoryMeta: internal.DirectoryMeta{\n\t\t\t\t\t\tPath: mi.ModulePath,\n\t\t\t\t\t\tV1Path: mi.SeriesPath(),\n\t\t\t\t\t\tIsRedistributable: mi.IsRedistributable,\n\t\t\t\t\t\tLicenses: licensesToMetadatas(licenses),\n\t\t\t\t\t},\n\t\t\t\t\tReadme: readme,\n\t\t\t\t},\n\t\t\t}\n\t\t\treturn fetchDirectoryDetails(ctx, ds, vdir, true)\n\t\t}\n\t\treturn legacyFetchDirectoryDetails(ctx, ds, mi.ModulePath, mi, licensesToMetadatas(licenses), true)\n\tcase \"licenses\":\n\t\treturn &LicensesDetails{Licenses: transformLicenses(mi.ModulePath, mi.Version, licenses)}, nil\n\tcase \"versions\":\n\t\treturn legacyFetchModuleVersionsDetails(ctx, ds, mi)\n\tcase \"overview\":\n\t\treturn constructOverviewDetails(ctx, mi, readme, mi.IsRedistributable, urlIsVersioned(r.URL))\n\t}\n\treturn nil, fmt.Errorf(\"BUG: unable to fetch details: unknown tab %q\", tab)\n}\n\n\/\/ fetchDetailsForDirectory returns tab details by delegating to the correct\n\/\/ detail handler.\nfunc fetchDetailsForDirectory(r *http.Request, tab string, ds internal.DataSource, vdir *internal.VersionedDirectory) (interface{}, error) {\n\tctx := r.Context()\n\tswitch tab {\n\tcase \"overview\":\n\t\treturn constructOverviewDetails(ctx, &vdir.ModuleInfo, vdir.Readme, vdir.IsRedistributable, urlIsVersioned(r.URL))\n\tcase \"subdirectories\":\n\t\treturn fetchDirectoryDetails(ctx, ds, vdir, false)\n\tcase \"licenses\":\n\t\t\/\/ TODO(https:\/\/golang.org\/issue\/40027): replace logic below with\n\t\t\/\/ GetLicenses.\n\t\tlicenses, err := ds.LegacyGetModuleLicenses(ctx, vdir.ModulePath, vdir.Version)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &LicensesDetails{Licenses: transformLicenses(vdir.ModulePath, vdir.Version, licenses)}, nil\n\t}\n\treturn nil, fmt.Errorf(\"BUG: unable to fetch details: unknown tab %q\", tab)\n}\n\n\/\/ legacyFetchDetailsForDirectory returns tab details by delegating to the correct\n\/\/ detail handler.\nfunc legacyFetchDetailsForDirectory(r *http.Request, tab string, dir *internal.LegacyDirectory, licenses []*licenses.License) (interface{}, error) {\n\tswitch tab {\n\tcase \"overview\":\n\t\treadme := &internal.Readme{Filepath: dir.LegacyReadmeFilePath, Contents: dir.LegacyReadmeContents}\n\t\treturn constructOverviewDetails(r.Context(), &dir.ModuleInfo, readme, dir.LegacyModuleInfo.IsRedistributable, urlIsVersioned(r.URL))\n\tcase \"subdirectories\":\n\t\t\/\/ Ideally we would just use fetchDirectoryDetails here so that it\n\t\t\/\/ follows the same code path as fetchDetailsForModule and\n\t\t\/\/ fetchDetailsForPackage. However, since we already have the directory\n\t\t\/\/ and licenses info, it doesn't make sense to call\n\t\t\/\/ postgres.GetDirectory again.\n\t\treturn legacyCreateDirectory(dir, licensesToMetadatas(licenses), false)\n\tcase \"licenses\":\n\t\treturn &LicensesDetails{Licenses: transformLicenses(dir.ModulePath, dir.Version, licenses)}, nil\n\t}\n\treturn nil, fmt.Errorf(\"BUG: unable to fetch details: unknown tab %q\", tab)\n}\n<commit_msg>internal\/frontend: rearrange functions in tabs.go<commit_after>\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage frontend\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"golang.org\/x\/pkgsite\/internal\"\n\t\"golang.org\/x\/pkgsite\/internal\/licenses\"\n\t\"golang.org\/x\/pkgsite\/internal\/postgres\"\n)\n\n\/\/ TabSettings defines tab-specific metadata.\ntype TabSettings struct {\n\t\/\/ Name is the tab name used in the URL.\n\tName string\n\n\t\/\/ DisplayName is the formatted tab name.\n\tDisplayName string\n\n\t\/\/ AlwaysShowDetails defines whether the tab content can be shown even if the\n\t\/\/ package is not determined to be redistributable.\n\tAlwaysShowDetails bool\n\n\t\/\/ TemplateName is the name of the template used to render the\n\t\/\/ corresponding tab, as defined in Server.templates.\n\tTemplateName string\n\n\t\/\/ Disabled indicates whether a tab should be displayed as disabled.\n\tDisabled bool\n}\n\nvar (\n\tpackageTabSettings = []TabSettings{\n\t\t{\n\t\t\tName: \"doc\",\n\t\t\tDisplayName: \"Doc\",\n\t\t\tTemplateName: \"pkg_doc.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"overview\",\n\t\t\tAlwaysShowDetails: true,\n\t\t\tDisplayName: \"Overview\",\n\t\t\tTemplateName: \"overview.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"subdirectories\",\n\t\t\tAlwaysShowDetails: true,\n\t\t\tDisplayName: \"Subdirectories\",\n\t\t\tTemplateName: \"subdirectories.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"versions\",\n\t\t\tAlwaysShowDetails: true,\n\t\t\tDisplayName: \"Versions\",\n\t\t\tTemplateName: \"versions.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"imports\",\n\t\t\tDisplayName: \"Imports\",\n\t\t\tAlwaysShowDetails: true,\n\t\t\tTemplateName: \"pkg_imports.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"importedby\",\n\t\t\tDisplayName: \"Imported By\",\n\t\t\tAlwaysShowDetails: true,\n\t\t\tTemplateName: \"pkg_importedby.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"licenses\",\n\t\t\tDisplayName: \"Licenses\",\n\t\t\tTemplateName: \"licenses.tmpl\",\n\t\t},\n\t}\n\tpackageTabLookup = make(map[string]TabSettings)\n\n\tdirectoryTabSettings = make([]TabSettings, len(packageTabSettings))\n\tdirectoryTabLookup = make(map[string]TabSettings)\n\n\tmoduleTabSettings = []TabSettings{\n\t\t{\n\t\t\tName: \"overview\",\n\t\t\tAlwaysShowDetails: true,\n\t\t\tDisplayName: \"Overview\",\n\t\t\tTemplateName: \"overview.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"packages\",\n\t\t\tAlwaysShowDetails: true,\n\t\t\tDisplayName: \"Packages\",\n\t\t\tTemplateName: \"subdirectories.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"versions\",\n\t\t\tAlwaysShowDetails: true,\n\t\t\tDisplayName: \"Versions\",\n\t\t\tTemplateName: \"versions.tmpl\",\n\t\t},\n\t\t{\n\t\t\tName: \"licenses\",\n\t\t\tDisplayName: \"Licenses\",\n\t\t\tTemplateName: \"licenses.tmpl\",\n\t\t},\n\t}\n\tmoduleTabLookup = make(map[string]TabSettings)\n)\n\n\/\/ validDirectoryTabs indicates if a tab is enabled in the directory view.\nvar validDirectoryTabs = map[string]bool{\n\t\"licenses\": true,\n\t\"overview\": true,\n\t\"subdirectories\": true,\n}\n\nfunc init() {\n\tfor i, ts := range packageTabSettings {\n\t\t\/\/ The directory view uses the same design as the packages view\n\t\t\/\/ for visual consistency, but some tabs don't make sense, so\n\t\t\/\/ we disable them.\n\t\tif !validDirectoryTabs[ts.Name] {\n\t\t\tts.Disabled = true\n\t\t}\n\t\tdirectoryTabSettings[i] = ts\n\t}\n\tfor _, d := range packageTabSettings {\n\t\tpackageTabLookup[d.Name] = d\n\t}\n\tfor _, d := range directoryTabSettings {\n\t\tdirectoryTabLookup[d.Name] = d\n\t}\n\tfor _, d := range moduleTabSettings {\n\t\tmoduleTabLookup[d.Name] = d\n\t}\n}\n\n\/\/ fetchDetailsForPackage returns tab details by delegating to the correct detail\n\/\/ handler.\nfunc fetchDetailsForPackage(r *http.Request, tab string, ds internal.DataSource, vdir *internal.VersionedDirectory) (interface{}, error) {\n\tctx := r.Context()\n\tswitch tab {\n\tcase \"doc\":\n\t\treturn fetchDocumentationDetails(vdir.Package.Documentation), nil\n\tcase \"overview\":\n\t\treturn fetchPackageOverviewDetails(ctx, vdir, urlIsVersioned(r.URL))\n\tcase \"subdirectories\":\n\t\treturn fetchDirectoryDetails(ctx, ds, vdir, false)\n\tcase \"versions\":\n\t\treturn legacyFetchPackageVersionsDetails(ctx, ds, vdir.Path, vdir.V1Path, vdir.ModulePath)\n\tcase \"imports\":\n\t\treturn fetchImportsDetails(ctx, ds, vdir.Path, vdir.ModulePath, vdir.Version)\n\tcase \"importedby\":\n\t\treturn fetchImportedByDetails(ctx, ds, vdir.Path, vdir.ModulePath)\n\tcase \"licenses\":\n\t\treturn legacyFetchPackageLicensesDetails(ctx, ds, vdir.Path, vdir.ModulePath, vdir.Version)\n\t}\n\treturn nil, fmt.Errorf(\"BUG: unable to fetch details: unknown tab %q\", tab)\n}\n\n\/\/ fetchDetailsForModule returns tab details by delegating to the correct detail\n\/\/ handler.\nfunc fetchDetailsForModule(r *http.Request, tab string, ds internal.DataSource, mi *internal.ModuleInfo, licenses []*licenses.License, readme *internal.Readme) (interface{}, error) {\n\tctx := r.Context()\n\tswitch tab {\n\tcase \"packages\":\n\t\tif isActiveUseDirectories(ctx) {\n\t\t\tvdir := &internal.VersionedDirectory{\n\t\t\t\tModuleInfo: *mi,\n\t\t\t\tDirectory: internal.Directory{\n\t\t\t\t\tDirectoryMeta: internal.DirectoryMeta{\n\t\t\t\t\t\tPath: mi.ModulePath,\n\t\t\t\t\t\tV1Path: mi.SeriesPath(),\n\t\t\t\t\t\tIsRedistributable: mi.IsRedistributable,\n\t\t\t\t\t\tLicenses: licensesToMetadatas(licenses),\n\t\t\t\t\t},\n\t\t\t\t\tReadme: readme,\n\t\t\t\t},\n\t\t\t}\n\t\t\treturn fetchDirectoryDetails(ctx, ds, vdir, true)\n\t\t}\n\t\treturn legacyFetchDirectoryDetails(ctx, ds, mi.ModulePath, mi, licensesToMetadatas(licenses), true)\n\tcase \"licenses\":\n\t\treturn &LicensesDetails{Licenses: transformLicenses(mi.ModulePath, mi.Version, licenses)}, nil\n\tcase \"versions\":\n\t\treturn legacyFetchModuleVersionsDetails(ctx, ds, mi)\n\tcase \"overview\":\n\t\treturn constructOverviewDetails(ctx, mi, readme, mi.IsRedistributable, urlIsVersioned(r.URL))\n\t}\n\treturn nil, fmt.Errorf(\"BUG: unable to fetch details: unknown tab %q\", tab)\n}\n\n\/\/ fetchDetailsForDirectory returns tab details by delegating to the correct\n\/\/ detail handler.\nfunc fetchDetailsForDirectory(r *http.Request, tab string, ds internal.DataSource, vdir *internal.VersionedDirectory) (interface{}, error) {\n\tctx := r.Context()\n\tswitch tab {\n\tcase \"overview\":\n\t\treturn constructOverviewDetails(ctx, &vdir.ModuleInfo, vdir.Readme, vdir.IsRedistributable, urlIsVersioned(r.URL))\n\tcase \"subdirectories\":\n\t\treturn fetchDirectoryDetails(ctx, ds, vdir, false)\n\tcase \"licenses\":\n\t\t\/\/ TODO(https:\/\/golang.org\/issue\/40027): replace logic below with\n\t\t\/\/ GetLicenses.\n\t\tlicenses, err := ds.LegacyGetModuleLicenses(ctx, vdir.ModulePath, vdir.Version)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &LicensesDetails{Licenses: transformLicenses(vdir.ModulePath, vdir.Version, licenses)}, nil\n\t}\n\treturn nil, fmt.Errorf(\"BUG: unable to fetch details: unknown tab %q\", tab)\n}\n\n\/\/ legacyFetchDetailsForDirectory returns tab details by delegating to the correct\n\/\/ detail handler.\nfunc legacyFetchDetailsForDirectory(r *http.Request, tab string, dir *internal.LegacyDirectory, licenses []*licenses.License) (interface{}, error) {\n\tswitch tab {\n\tcase \"overview\":\n\t\treadme := &internal.Readme{Filepath: dir.LegacyReadmeFilePath, Contents: dir.LegacyReadmeContents}\n\t\treturn constructOverviewDetails(r.Context(), &dir.ModuleInfo, readme, dir.LegacyModuleInfo.IsRedistributable, urlIsVersioned(r.URL))\n\tcase \"subdirectories\":\n\t\t\/\/ Ideally we would just use fetchDirectoryDetails here so that it\n\t\t\/\/ follows the same code path as fetchDetailsForModule and\n\t\t\/\/ fetchDetailsForPackage. However, since we already have the directory\n\t\t\/\/ and licenses info, it doesn't make sense to call\n\t\t\/\/ postgres.GetDirectory again.\n\t\treturn legacyCreateDirectory(dir, licensesToMetadatas(licenses), false)\n\tcase \"licenses\":\n\t\treturn &LicensesDetails{Licenses: transformLicenses(dir.ModulePath, dir.Version, licenses)}, nil\n\t}\n\treturn nil, fmt.Errorf(\"BUG: unable to fetch details: unknown tab %q\", tab)\n}\n\n\/\/ legacyFetchDetailsForPackage returns tab details by delegating to the correct detail\n\/\/ handler.\nfunc legacyFetchDetailsForPackage(r *http.Request, tab string, ds internal.DataSource, pkg *internal.LegacyVersionedPackage) (interface{}, error) {\n\tctx := r.Context()\n\tswitch tab {\n\tcase \"doc\":\n\t\treturn legacyFetchDocumentationDetails(pkg), nil\n\tcase \"versions\":\n\t\treturn legacyFetchPackageVersionsDetails(ctx, ds, pkg.Path, pkg.V1Path, pkg.ModulePath)\n\tcase \"subdirectories\":\n\t\treturn legacyFetchDirectoryDetails(ctx, ds, pkg.Path, &pkg.ModuleInfo, pkg.Licenses, false)\n\tcase \"imports\":\n\t\treturn fetchImportsDetails(ctx, ds, pkg.Path, pkg.ModulePath, pkg.Version)\n\tcase \"importedby\":\n\t\tdb, ok := ds.(*postgres.DB)\n\t\tif !ok {\n\t\t\t\/\/ The proxydatasource does not support the imported by page.\n\t\t\treturn nil, proxydatasourceNotSupportedErr()\n\t\t}\n\t\treturn fetchImportedByDetails(ctx, db, pkg.Path, pkg.ModulePath)\n\tcase \"licenses\":\n\t\treturn legacyFetchPackageLicensesDetails(ctx, ds, pkg.Path, pkg.ModulePath, pkg.Version)\n\tcase \"overview\":\n\t\treturn legacyFetchPackageOverviewDetails(ctx, pkg, urlIsVersioned(r.URL))\n\t}\n\treturn nil, fmt.Errorf(\"BUG: unable to fetch details: unknown tab %q\", tab)\n}\n\nfunc urlIsVersioned(url *url.URL) bool {\n\treturn strings.ContainsRune(url.Path, '@')\n}\n<|endoftext|>"} {"text":"<commit_before>package iovecs\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\ntype Writer interface {\n\tWrite(p []byte) (int, error)\n\tWritev(slices ...[]byte) (int64, error)\n}\n\ntype fileWriter struct {\n\t*os.File\n\tfd uintptr\n\tvecs []syscall.Iovec\n}\n\nfunc (f *fileWriter) Writev(slices ...[]byte) (int64, error) {\n\tvecs := f.vecs\n\tswitch n := len(slices); cap(vecs) < n {\n\tcase true:\n\t\tvecs = make([]syscall.Iovec, 0, n)\n\t\tf.vecs = vecs\n\tdefault:\n\t\tvecs = vecs[:0]\n\t}\n\tfor _, b := range slices {\n\t\tif len(b) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvecs = append(vecs, syscall.Iovec{Base: &b[0], Len: uint64(len(b))})\n\t}\n\tr, _, errno := syscall.Syscall(syscall.SYS_WRITEV, f.fd, uintptr(unsafe.Pointer(&vecs[0])), uintptr(len(vecs)))\n\tif errno != 0 {\n\t\treturn int64(r), errno\n\t}\n\treturn int64(r), nil\n}\n\ntype ioWriter struct {\n\tio.Writer\n}\n\nfunc (w *ioWriter) Writev(slices ...[]byte) (int64, error) {\n\tvar written int64\n\tfor _, b := range slices {\n\t\tn, err := w.Write(b)\n\t\twritten += int64(n)\n\t\tif err != nil {\n\t\t\treturn written, err\n\t\t}\n\t}\n\treturn written, nil\n}\n\nfunc NewWriter(w io.Writer) Writer {\n\tif f, ok := w.(*os.File); ok {\n\t\treturn &fileWriter{File: f, fd: f.Fd()}\n\t}\n\treturn &ioWriter{w}\n}\n<commit_msg>Make iovecs.Writer goroutine-safe<commit_after>package iovecs\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\ntype Writer interface {\n\tWrite(p []byte) (int, error)\n\tWritev(slices ...[]byte) (int64, error)\n}\n\ntype fileWriter struct {\n\t*os.File\n\tfd uintptr\n}\n\nfunc (f *fileWriter) Writev(slices ...[]byte) (int64, error) {\n\tvecs := make([]syscall.Iovec, 0, len(slices))\n\tfor _, b := range slices {\n\t\tif len(b) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvecs = append(vecs, syscall.Iovec{Base: &b[0], Len: uint64(len(b))})\n\t}\n\tif len(vecs) == 0 {\n\t\treturn 0, nil\n\t}\n\tr, _, errno := syscall.Syscall(syscall.SYS_WRITEV, f.fd, uintptr(unsafe.Pointer(&vecs[0])), uintptr(len(vecs)))\n\tif errno != 0 {\n\t\treturn int64(r), errno\n\t}\n\treturn int64(r), nil\n}\n\ntype ioWriter struct {\n\tio.Writer\n}\n\nfunc (w *ioWriter) Writev(slices ...[]byte) (int64, error) {\n\tvar written int64\n\tfor _, b := range slices {\n\t\tn, err := w.Write(b)\n\t\twritten += int64(n)\n\t\tif err != nil {\n\t\t\treturn written, err\n\t\t}\n\t}\n\treturn written, nil\n}\n\nfunc NewWriter(w io.Writer) Writer {\n\tif f, ok := w.(*os.File); ok {\n\t\treturn &fileWriter{File: f, fd: f.Fd()}\n\t}\n\treturn &ioWriter{w}\n}\n<|endoftext|>"} {"text":"<commit_before>package logger\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\n\t\"github.com\/cenkalti\/log\"\n)\n\nvar DefaultLogHandler log.Handler\n\nfunc SetLogLevel(l log.Level) { DefaultLogHandler.SetLevel(l) }\n\nfunc init() {\n\th := log.NewWriterHandler(os.Stderr)\n\th.SetFormatter(logFormatter{})\n\tDefaultLogHandler = h\n}\n\ntype Logger log.Logger\n\nfunc New(name string) Logger {\n\tlogger := log.NewLogger(name)\n\tlogger.SetLevel(log.DEBUG) \/\/ forward all messages to handler\n\tlogger.SetHandler(DefaultLogHandler)\n\treturn logger\n}\n\ntype logFormatter struct{}\n\n\/\/ Format outputs a message like \"2014-02-28 18:15:57 [example] INFO somethinfig happened\"\nfunc (f logFormatter) Format(rec *log.Record) string {\n\treturn fmt.Sprintf(\"%s %-8s [%s] %-8s %s\",\n\t\tfmt.Sprint(rec.Time)[:19],\n\t\trec.Level,\n\t\trec.LoggerName,\n\t\tfilepath.Base(rec.Filename)+\":\"+strconv.Itoa(rec.Line),\n\t\trec.Message)\n}\n<commit_msg>colored log output<commit_after>package logger\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\n\t\"github.com\/cenkalti\/log\"\n)\n\nvar DefaultLogHandler log.Handler\n\nfunc SetLogLevel(l log.Level) { DefaultLogHandler.SetLevel(l) }\n\nfunc init() {\n\th := log.NewFileHandler(os.Stderr)\n\th.SetFormatter(logFormatter{})\n\tDefaultLogHandler = h\n}\n\ntype Logger log.Logger\n\nfunc New(name string) Logger {\n\tlogger := log.NewLogger(name)\n\tlogger.SetLevel(log.DEBUG) \/\/ forward all messages to handler\n\tlogger.SetHandler(DefaultLogHandler)\n\treturn logger\n}\n\ntype logFormatter struct{}\n\n\/\/ Format outputs a message like \"2014-02-28 18:15:57 [example] INFO somethinfig happened\"\nfunc (f logFormatter) Format(rec *log.Record) string {\n\treturn fmt.Sprintf(\"%s %-8s [%s] %-8s %s\",\n\t\tfmt.Sprint(rec.Time)[:19],\n\t\trec.Level,\n\t\trec.LoggerName,\n\t\tfilepath.Base(rec.Filename)+\":\"+strconv.Itoa(rec.Line),\n\t\trec.Message)\n}\n<|endoftext|>"} {"text":"<commit_before>package logger\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\tgolog \"log\"\n\t\"log\/syslog\"\n\t\"os\"\n\t\"sync\"\n\n\t\"gitlab.com\/gitlab-org\/gitlab-shell\/internal\/config\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nvar (\n\tlogWriter io.Writer\n\tbootstrapLogger *golog.Logger\n\tpid int\n\tmutex sync.Mutex\n\tProgName string\n)\n\nfunc Configure(cfg *config.Config) error {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\n\tpid = os.Getpid()\n\n\tvar err error\n\tlogWriter, err = os.OpenFile(cfg.LogFile, os.O_WRONLY|os.O_APPEND, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.SetOutput(logWriter)\n\tif cfg.LogFormat == \"json\" {\n\t\tlog.SetFormatter(&log.JSONFormatter{})\n\t}\n\n\treturn nil\n}\n\nfunc logPrint(msg string, err error) {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\n\tif logWriter == nil {\n\t\tbootstrapLogPrint(msg, err)\n\t\treturn\n\t}\n\n\tlog.WithError(err).WithFields(log.Fields{\n\t\t\"pid\": pid,\n\t}).Error(msg)\n}\n\nfunc Fatal(msg string, err error) {\n\tlogPrint(msg, err)\n\t\/\/ We don't show the error to the end user because it can leak\n\t\/\/ information that is private to the GitLab server.\n\tfmt.Fprintf(os.Stderr, \"%s: fatal: %s\\n\", ProgName, msg)\n\tos.Exit(1)\n}\n\n\/\/ If our log file is not available we want to log somewhere else, but\n\/\/ not to standard error because that leaks information to the user. This\n\/\/ function attemps to log to syslog.\n\/\/\n\/\/ We assume the logging mutex is already locked.\nfunc bootstrapLogPrint(msg string, err error) {\n\tif bootstrapLogger == nil {\n\t\tvar err error\n\t\tbootstrapLogger, err = syslog.NewLogger(syslog.LOG_ERR|syslog.LOG_USER, 0)\n\t\tif err != nil {\n\t\t\t\/\/ The message will not be logged.\n\t\t\treturn\n\t\t}\n\t}\n\n\tbootstrapLogger.Print(ProgName+\":\", msg+\":\", err)\n}\n<commit_msg>Fix Typos<commit_after>package logger\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\tgolog \"log\"\n\t\"log\/syslog\"\n\t\"os\"\n\t\"sync\"\n\n\t\"gitlab.com\/gitlab-org\/gitlab-shell\/internal\/config\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nvar (\n\tlogWriter io.Writer\n\tbootstrapLogger *golog.Logger\n\tpid int\n\tmutex sync.Mutex\n\tProgName string\n)\n\nfunc Configure(cfg *config.Config) error {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\n\tpid = os.Getpid()\n\n\tvar err error\n\tlogWriter, err = os.OpenFile(cfg.LogFile, os.O_WRONLY|os.O_APPEND, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.SetOutput(logWriter)\n\tif cfg.LogFormat == \"json\" {\n\t\tlog.SetFormatter(&log.JSONFormatter{})\n\t}\n\n\treturn nil\n}\n\nfunc logPrint(msg string, err error) {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\n\tif logWriter == nil {\n\t\tbootstrapLogPrint(msg, err)\n\t\treturn\n\t}\n\n\tlog.WithError(err).WithFields(log.Fields{\n\t\t\"pid\": pid,\n\t}).Error(msg)\n}\n\nfunc Fatal(msg string, err error) {\n\tlogPrint(msg, err)\n\t\/\/ We don't show the error to the end user because it can leak\n\t\/\/ information that is private to the GitLab server.\n\tfmt.Fprintf(os.Stderr, \"%s: fatal: %s\\n\", ProgName, msg)\n\tos.Exit(1)\n}\n\n\/\/ If our log file is not available we want to log somewhere else, but\n\/\/ not to standard error because that leaks information to the user. This\n\/\/ function attempts to log to syslog.\n\/\/\n\/\/ We assume the logging mutex is already locked.\nfunc bootstrapLogPrint(msg string, err error) {\n\tif bootstrapLogger == nil {\n\t\tvar err error\n\t\tbootstrapLogger, err = syslog.NewLogger(syslog.LOG_ERR|syslog.LOG_USER, 0)\n\t\tif err != nil {\n\t\t\t\/\/ The message will not be logged.\n\t\t\treturn\n\t\t}\n\t}\n\n\tbootstrapLogger.Print(ProgName+\":\", msg+\":\", err)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package magnet provides support for parsing magnet links.\npackage magnet\n\nimport (\n\t\"encoding\/base32\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/multiformats\/go-multihash\"\n)\n\ntype Magnet struct {\n\tInfoHash [20]byte\n\tName string\n\tTrackers [][]string\n\tPeers []string\n}\n\nfunc New(s string) (*Magnet, error) {\n\tu, err := url.Parse(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif u.Scheme != \"magnet\" {\n\t\treturn nil, errors.New(\"not a magnet link\")\n\t}\n\n\tparams := u.Query()\n\n\txts, ok := params[\"xt\"]\n\tif !ok {\n\t\treturn nil, errors.New(\"missing xt param\")\n\t}\n\tif len(xts) == 0 {\n\t\treturn nil, errors.New(\"empty xt param\")\n\t}\n\txt := xts[0]\n\n\tvar magnet Magnet\n\tmagnet.InfoHash, err = infoHashString(xt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnames := params[\"dn\"]\n\tif len(names) != 0 {\n\t\tmagnet.Name = names[0]\n\t}\n\n\tvar tiers []trackerTier\n\tfor key, tier := range params {\n\t\tif key == \"tr\" {\n\t\t\tfor i, tr := range tier {\n\t\t\t\ttiers = append(tiers, trackerTier{trackers: []string{tr}, index: i - len(tier)})\n\t\t\t}\n\t\t} else if strings.HasPrefix(key, \"tr.\") {\n\t\t\tindex, err := strconv.Atoi(key[3:])\n\t\t\tif err == nil && index >= 0 {\n\t\t\t\ttiers = append(tiers, trackerTier{trackers: tier, index: index})\n\t\t\t}\n\t\t}\n\t}\n\n\tsort.Slice(tiers, func(i, j int) bool { return tiers[i].index < tiers[j].index })\n\n\tmagnet.Trackers = make([][]string, len(tiers))\n\tfor i, ti := range tiers {\n\t\tmagnet.Trackers[i] = ti.trackers\n\t}\n\n\tmagnet.Peers = params[\"x.pe\"]\n\n\treturn &magnet, nil\n}\n\ntype trackerTier struct {\n\ttrackers []string\n\tindex int\n}\n\n\/\/ infoHashString returns a new info hash value from a string.\n\/\/ s must be 40 (hex encoded) or 32 (base32 encoded) characters, otherwise it returns error.\nfunc infoHashString(xt string) ([20]byte, error) {\n\tvar ih [20]byte\n\tvar b []byte\n\tvar err error\n\tswitch {\n\tcase strings.HasPrefix(xt, \"urn:btih:\"):\n\t\txt = xt[9:]\n\t\tswitch len(xt) {\n\t\tcase 40:\n\t\t\tb, err = hex.DecodeString(xt)\n\t\tcase 32:\n\t\t\tb, err = base32.StdEncoding.DecodeString(xt)\n\t\tdefault:\n\t\t\treturn ih, errors.New(\"info hash must be 32 or 40 characters\")\n\t\t}\n\t\tif err != nil {\n\t\t\treturn ih, err\n\t\t}\n\tcase strings.HasPrefix(xt, \"urn:btmh:\"):\n\t\txt = xt[9:]\n\t\tb, err = multihash.FromHexString(xt)\n\t\tif err != nil {\n\t\t\treturn ih, err\n\t\t}\n\t\tif len(b) != 20 {\n\t\t\treturn ih, errors.New(\"invalid multihash (len != 20)\")\n\t\t}\n\tdefault:\n\t\treturn ih, errors.New(\"invalid xt param: must start with \\\"urn:btih:\\\" or \\\"urn:btmh\\\"\")\n\t}\n\tcopy(ih[:], b)\n\treturn ih, nil\n}\n<commit_msg>filter out control chars in magnet links<commit_after>\/\/ Package magnet provides support for parsing magnet links.\npackage magnet\n\nimport (\n\t\"encoding\/base32\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/multiformats\/go-multihash\"\n)\n\ntype Magnet struct {\n\tInfoHash [20]byte\n\tName string\n\tTrackers [][]string\n\tPeers []string\n}\n\nfunc New(s string) (*Magnet, error) {\n\ts = filterOutControlChars(s)\n\n\tu, err := url.Parse(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif u.Scheme != \"magnet\" {\n\t\treturn nil, errors.New(\"not a magnet link\")\n\t}\n\n\tparams := u.Query()\n\n\txts, ok := params[\"xt\"]\n\tif !ok {\n\t\treturn nil, errors.New(\"missing xt param\")\n\t}\n\tif len(xts) == 0 {\n\t\treturn nil, errors.New(\"empty xt param\")\n\t}\n\txt := xts[0]\n\n\tvar magnet Magnet\n\tmagnet.InfoHash, err = infoHashString(xt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnames := params[\"dn\"]\n\tif len(names) != 0 {\n\t\tmagnet.Name = names[0]\n\t}\n\n\tvar tiers []trackerTier\n\tfor key, tier := range params {\n\t\tif key == \"tr\" {\n\t\t\tfor i, tr := range tier {\n\t\t\t\ttiers = append(tiers, trackerTier{trackers: []string{tr}, index: i - len(tier)})\n\t\t\t}\n\t\t} else if strings.HasPrefix(key, \"tr.\") {\n\t\t\tindex, err := strconv.Atoi(key[3:])\n\t\t\tif err == nil && index >= 0 {\n\t\t\t\ttiers = append(tiers, trackerTier{trackers: tier, index: index})\n\t\t\t}\n\t\t}\n\t}\n\n\tsort.Slice(tiers, func(i, j int) bool { return tiers[i].index < tiers[j].index })\n\n\tmagnet.Trackers = make([][]string, len(tiers))\n\tfor i, ti := range tiers {\n\t\tmagnet.Trackers[i] = ti.trackers\n\t}\n\n\tmagnet.Peers = params[\"x.pe\"]\n\n\treturn &magnet, nil\n}\n\ntype trackerTier struct {\n\ttrackers []string\n\tindex int\n}\n\n\/\/ infoHashString returns a new info hash value from a string.\n\/\/ s must be 40 (hex encoded) or 32 (base32 encoded) characters, otherwise it returns error.\nfunc infoHashString(xt string) ([20]byte, error) {\n\tvar ih [20]byte\n\tvar b []byte\n\tvar err error\n\tswitch {\n\tcase strings.HasPrefix(xt, \"urn:btih:\"):\n\t\txt = xt[9:]\n\t\tswitch len(xt) {\n\t\tcase 40:\n\t\t\tb, err = hex.DecodeString(xt)\n\t\tcase 32:\n\t\t\tb, err = base32.StdEncoding.DecodeString(xt)\n\t\tdefault:\n\t\t\treturn ih, errors.New(\"info hash must be 32 or 40 characters\")\n\t\t}\n\t\tif err != nil {\n\t\t\treturn ih, err\n\t\t}\n\tcase strings.HasPrefix(xt, \"urn:btmh:\"):\n\t\txt = xt[9:]\n\t\tb, err = multihash.FromHexString(xt)\n\t\tif err != nil {\n\t\t\treturn ih, err\n\t\t}\n\t\tif len(b) != 20 {\n\t\t\treturn ih, errors.New(\"invalid multihash (len != 20)\")\n\t\t}\n\tdefault:\n\t\treturn ih, errors.New(\"invalid xt param: must start with \\\"urn:btih:\\\" or \\\"urn:btmh\\\"\")\n\t}\n\tcopy(ih[:], b)\n\treturn ih, nil\n}\n\nfunc filterOutControlChars(s string) string {\n\tvar sb strings.Builder\n\tsb.Grow(len(s))\n\tfor i := 0; i < len(s); i++ {\n\t\tb := s[i]\n\t\tif b < ' ' || b == 0x7f {\n\t\t\tcontinue\n\t\t}\n\t\tsb.WriteByte(b)\n\t}\n\treturn sb.String()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2021 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package static builds static assets for the frontend and the worker.\npackage static\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/evanw\/esbuild\/pkg\/api\"\n)\n\ntype Config struct {\n\t\/\/ Entrypoint is a directory in which to to build TypeScript\n\t\/\/ sources.\n\tEntryPoint string\n\n\t\/\/ Bundle is true if files imported by an entry file\n\t\/\/ should be joined together in a single output file.\n\tBundle bool\n\n\t\/\/ Watch is true in development. Sourcemaps are placed inline,\n\t\/\/ the output is unminified, and changes to any TypeScript\n\t\/\/ files will force a rebuild of the JavaScript output.\n\tWatch bool\n}\n\n\/\/ Build compiles TypeScript files into minified JavaScript\n\/\/ files using github.com\/evanw\/esbuild.\n\/\/\n\/\/ This function is used in Server.staticHandler with Watch=true\n\/\/ when cmd\/frontend is run in dev mode and in\n\/\/ devtools\/cmd\/static\/main.go with Watch=false for building\n\/\/ productionized assets.\nfunc Build(config Config) (*api.BuildResult, error) {\n\tfiles, err := getEntry(config.EntryPoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\toptions := api.BuildOptions{\n\t\tEntryPoints: files,\n\t\tBundle: config.Bundle,\n\t\tOutdir: config.EntryPoint,\n\t\tWrite: true,\n\t}\n\tif config.Watch {\n\t\toptions.Sourcemap = api.SourceMapInline\n\t\toptions.Watch = &api.WatchMode{}\n\t} else {\n\t\toptions.MinifyIdentifiers = true\n\t\toptions.MinifySyntax = true\n\t\toptions.MinifyWhitespace = true\n\t\toptions.Sourcemap = api.SourceMapLinked\n\t}\n\tresult := api.Build(options)\n\tif len(result.Errors) > 0 {\n\t\treturn nil, fmt.Errorf(\"error building static files: %v\", result.Errors)\n\t}\n\tif len(result.Warnings) > 0 {\n\t\treturn nil, fmt.Errorf(\"error building static files: %v\", result.Warnings)\n\t}\n\treturn &result, nil\n}\n\n\/\/ getEntry walks the the given directory and collects entry file paths\n\/\/ for esbuild. It ignores test files and files prefixed with an underscore.\n\/\/ Underscore prefixed files are assumed to be imported by and bundled together\n\/\/ with the output of an entry file.\nfunc getEntry(dir string) ([]string, error) {\n\tvar matches []string\n\terr := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tbasePath := filepath.Base(path)\n\t\tnotPartial := !strings.HasPrefix(basePath, \"_\")\n\t\tnotTest := !strings.HasSuffix(basePath, \".test.ts\")\n\t\tmatched, err := filepath.Match(\"*.ts\", basePath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif notPartial && notTest && matched {\n\t\t\tmatches = append(matches, path)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn matches, nil\n}\n<commit_msg>internal\/static: bundle and minifiy css files during static build<commit_after>\/\/ Copyright 2021 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package static builds static assets for the frontend and the worker.\npackage static\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/evanw\/esbuild\/pkg\/api\"\n)\n\ntype Config struct {\n\t\/\/ Entrypoint is a directory in which to to build TypeScript\n\t\/\/ sources.\n\tEntryPoint string\n\n\t\/\/ Bundle is true if files imported by an entry file\n\t\/\/ should be joined together in a single output file.\n\tBundle bool\n\n\t\/\/ Watch is true in development. Sourcemaps are placed inline,\n\t\/\/ the output is unminified, and changes to any TypeScript\n\t\/\/ files will force a rebuild of the JavaScript output.\n\tWatch bool\n}\n\n\/\/ Build compiles TypeScript files into minified JavaScript\n\/\/ files using github.com\/evanw\/esbuild.\n\/\/\n\/\/ This function is used in Server.staticHandler with Watch=true\n\/\/ when cmd\/frontend is run in dev mode and in\n\/\/ devtools\/cmd\/static\/main.go with Watch=false for building\n\/\/ productionized assets.\nfunc Build(config Config) (*api.BuildResult, error) {\n\tfiles, err := getEntry(config.EntryPoint, config.Bundle)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\toptions := api.BuildOptions{\n\t\tEntryPoints: files,\n\t\tBundle: config.Bundle,\n\t\tOutdir: config.EntryPoint,\n\t\tWrite: true,\n\t\tOutExtensions: map[string]string{\".css\": \".min.css\"},\n\t\tExternal: []string{\"*.svg\"},\n\t\tBanner: map[string]string{\"css\": \"\/*!\\n\" +\n\t\t\t\" * Copyright 2021 The Go Authors. All rights reserved.\\n\" +\n\t\t\t\" * Use of this source code is governed by a BSD-style\\n\" +\n\t\t\t\" * license that can be found in the LICENSE file.\\n\" +\n\t\t\t\" *\/\"},\n\t}\n\tif config.Watch {\n\t\toptions.Sourcemap = api.SourceMapInline\n\t\toptions.Watch = &api.WatchMode{}\n\t} else {\n\t\toptions.MinifyIdentifiers = true\n\t\toptions.MinifySyntax = true\n\t\toptions.MinifyWhitespace = true\n\t\toptions.Sourcemap = api.SourceMapLinked\n\t}\n\tresult := api.Build(options)\n\tif len(result.Errors) > 0 {\n\t\treturn nil, fmt.Errorf(\"error building static files: %v\", result.Errors)\n\t}\n\tif len(result.Warnings) > 0 {\n\t\treturn nil, fmt.Errorf(\"error building static files: %v\", result.Warnings)\n\t}\n\treturn &result, nil\n}\n\n\/\/ getEntry walks the the given directory and collects entry file paths\n\/\/ for esbuild. It ignores test files and files prefixed with an underscore.\n\/\/ Underscore prefixed files are assumed to be imported by and bundled together\n\/\/ with the output of an entry file.\nfunc getEntry(dir string, bundle bool) ([]string, error) {\n\tvar matches []string\n\terr := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tbasePath := filepath.Base(path)\n\t\tnotPartial := !strings.HasPrefix(basePath, \"_\")\n\t\tnotTest := !strings.HasSuffix(basePath, \".test.ts\")\n\t\tisTS := strings.HasSuffix(basePath, \".ts\")\n\t\tisCSS := strings.HasSuffix(basePath, \".css\") && !strings.HasSuffix(basePath, \".min.css\")\n\t\tif notPartial && notTest && (isTS || (bundle && isCSS)) {\n\t\t\tmatches = append(matches, path)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn matches, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build !js\n\npackage ui\n\n\/\/ TODO: Use golang.org\/x\/sys\/windows (NewLazyDLL) instead of cgo.\n\n\/\/ #cgo LDFLAGS: -lgdi32\n\/\/\n\/\/ #include <windows.h>\n\/\/\n\/\/ static int getCaptionHeight() {\n\/\/ return GetSystemMetrics(SM_CYCAPTION);\n\/\/ }\nimport \"C\"\n\nimport (\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/devicescale\"\n)\n\nfunc glfwScale() float64 {\n\treturn devicescale.DeviceScale()\n}\n\nfunc adjustWindowPosition(x, y int) (int, int) {\n\t\/\/ As the video width\/height might be wrong,\n\t\/\/ adjust x\/y at least to enable to handle the window (#328)\n\tif x < 0 {\n\t\tx = 0\n\t}\n\tt := int(C.getCaptionHeight())\n\tif y < t {\n\t\ty = t\n\t}\n\treturn x, y\n}\n<commit_msg>ui: Don't call devicescale.DeviceScale too often<commit_after>\/\/ Copyright 2016 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build !js\n\npackage ui\n\n\/\/ TODO: Use golang.org\/x\/sys\/windows (NewLazyDLL) instead of cgo.\n\n\/\/ #cgo LDFLAGS: -lgdi32\n\/\/\n\/\/ #include <windows.h>\n\/\/\n\/\/ static int getCaptionHeight() {\n\/\/ return GetSystemMetrics(SM_CYCAPTION);\n\/\/ }\nimport \"C\"\n\nfunc glfwScale() float64 {\n\t\/\/ This function must be called on the main thread.\n\treturn currentUI.deviceScale.Get()\n}\n\nfunc adjustWindowPosition(x, y int) (int, int) {\n\t\/\/ As the video width\/height might be wrong,\n\t\/\/ adjust x\/y at least to enable to handle the window (#328)\n\tif x < 0 {\n\t\tx = 0\n\t}\n\tt := int(C.getCaptionHeight())\n\tif y < t {\n\t\ty = t\n\t}\n\treturn x, y\n}\n<|endoftext|>"} {"text":"<commit_before>package interpreter\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/maknahar\/go-utils\"\n\t\"github.com\/nlopes\/slack\"\n)\n\ntype ReportResponse struct {\n\tCreatedAt string `json:\"CreatedAt\"`\n\tCurrentTime string `json:\"CurrentTime\"`\n\tFailureReason string `json:\"FailureReason\"`\n\tFromTime string `json:\"FromTime\"`\n\tStatus string `json:\"Status\"`\n\tToTime string `json:\"ToTime\"`\n\tUpdatedAt string `json:\"UpdatedAt\"`\n\tLink string\n\tMissingSession string\n}\n\nfunc GetReportStatus(staging bool) (*ReportResponse, error) {\n\turl := os.Getenv(\"REPORT_STATUS\")\n\tif staging {\n\t\turl = os.Getenv(\"STAGING_REPORT_STATUS\")\n\t}\n\tres, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error in getting order data %v\", err)\n\t}\n\n\td, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error in reading order data %v\", err)\n\t}\n\n\torder := make([]ReportResponse, 0)\n\terr = json.Unmarshal(d, &order)\n\tif err != nil {\n\t\tlog.Println(\"Error in decoding report response\", err, go_utils.JsonPrettyPrint(string(d),\n\t\t\t\"\", \"\\t\"))\n\t\treturn nil, errors.New(\"Error in decoding report response\")\n\t}\n\n\tl := len(order)\n\tif l == 0 {\n\t\treturn nil, errors.New(\"Report Service might be down\")\n\t}\n\tactivity := &order[l-1]\n\tactivity.Link = url\n\n\treturn activity, nil\n}\n\nfunc (r *ReportResponse) GetDelayReason() string {\n\tloc, _ := time.LoadLocation(\"Asia\/Kolkata\")\n\tfromTime, err := time.ParseInLocation(\"2006-01-02T15:04:05.999999\", r.FromTime, loc)\n\tif err != nil {\n\t\treturn \"Unable to parse to time. Err: %+v\"\n\t}\n\n\tif strings.Contains(r.FailureReason, \"SessionNotFound\") {\n\t\ts := regexp.MustCompile(\"[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$\")\n\t\tr.MissingSession = s.FindString(r.FailureReason)\n\t\treturn fmt.Sprintf(\"A Session is missing from data pack since %s\", fromTime)\n\t}\n\n\tif r.FailureReason == \"\" {\n\t\tif time.Since(fromTime).Minutes() > 15 {\n\t\t\treturn fmt.Sprintf(\"Report sync is running smoothly.\")\n\t\t}\n\t\treturn fmt.Sprintf(\"Report is in sync.\")\n\t}\n\n\treturn r.FailureReason\n}\n\nfunc (r *ReportResponse) FormatSlackMessage(attachment *slack.Attachment) {\n\tcause := r.GetDelayReason()\n\tattachment.Pretext = cause\n\tattachment.Title = \"Report Sync Activity Details\"\n\tattachment.TitleLink = r.Link\n\tif r.MissingSession != \"\" {\n\t\tattachment.Fields = append(attachment.Fields, slack.AttachmentField{\n\t\t\tTitle: \"Missing Session ID:\",\n\t\t\tValue: r.MissingSession,\n\t\t\tShort: true})\n\t}\n\n\tattachment.Fields = append(attachment.Fields, slack.AttachmentField{\n\t\tTitle: \"Status:\",\n\t\tValue: r.Status,\n\t\tShort: true})\n\n\tloc, _ := time.LoadLocation(\"Asia\/Kolkata\")\n\tfromTime, err := time.ParseInLocation(\"2006-01-02T15:04:05.999999\", r.FromTime, loc)\n\tif err == nil {\n\t\tattachment.Fields = append(attachment.Fields, slack.AttachmentField{\n\t\t\tTitle: \"Delay:\",\n\t\t\tValue: time.Since(fromTime).String(),\n\t\t\tShort: true})\n\t}\n\n\tct, err := time.ParseInLocation(\"2006-01-02T15:04:05.999999\", r.CreatedAt, loc)\n\tif err == nil {\n\t\tattachment.Fields = append(attachment.Fields, slack.AttachmentField{\n\t\t\tTitle: \"Created At:\",\n\t\t\tValue: ct.Format(\"2006-01-02 15:04:05\"),\n\t\t\tShort: true})\n\t}\n\n\tut, err := time.ParseInLocation(\"2006-01-02T15:04:05.999999\", r.UpdatedAt, loc)\n\tif err == nil {\n\t\tattachment.Fields = append(attachment.Fields, slack.AttachmentField{\n\t\t\tTitle: \"Last Updated At:\",\n\t\t\tValue: ut.Format(\"2006-01-02 15:04:05\"),\n\t\t\tShort: true})\n\t}\n\n}\n<commit_msg>Added service availability check<commit_after>package interpreter\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/maknahar\/go-utils\"\n\t\"github.com\/nlopes\/slack\"\n)\n\ntype ReportResponse struct {\n\tCreatedAt string `json:\"CreatedAt\"`\n\tCurrentTime string `json:\"CurrentTime\"`\n\tFailureReason string `json:\"FailureReason\"`\n\tFromTime string `json:\"FromTime\"`\n\tStatus string `json:\"Status\"`\n\tToTime string `json:\"ToTime\"`\n\tUpdatedAt string `json:\"UpdatedAt\"`\n\tLink string\n\tMissingSession string\n}\n\nfunc GetReportStatus(staging bool) (*ReportResponse, error) {\n\turl := os.Getenv(\"REPORT_STATUS\")\n\tif staging {\n\t\turl = os.Getenv(\"STAGING_REPORT_STATUS\")\n\t}\n\tres, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error in getting order data %v\", err)\n\t}\n\n\td, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error in reading order data %v\", err)\n\t}\n\n\torder := make([]ReportResponse, 0)\n\terr = json.Unmarshal(d, &order)\n\tif err != nil {\n\t\tif res.StatusCode == http.StatusServiceUnavailable {\n\t\t\treturn nil, errors.New(\"Report Service is unavailable\")\n\t\t}\n\t\tlog.Println(\"Error in decoding report response\", err, go_utils.JsonPrettyPrint(string(d),\n\t\t\t\"\", \"\\t\"))\n\t\treturn nil, errors.New(\"Error in decoding report response\")\n\t}\n\n\tl := len(order)\n\tif l == 0 {\n\t\treturn nil, errors.New(\"Report Service might be down\")\n\t}\n\tactivity := &order[l-1]\n\tactivity.Link = url\n\n\treturn activity, nil\n}\n\nfunc (r *ReportResponse) GetDelayReason() string {\n\tloc, _ := time.LoadLocation(\"Asia\/Kolkata\")\n\tfromTime, err := time.ParseInLocation(\"2006-01-02T15:04:05.999999\", r.FromTime, loc)\n\tif err != nil {\n\t\treturn \"Unable to parse to time. Err: %+v\"\n\t}\n\n\tif strings.Contains(r.FailureReason, \"SessionNotFound\") {\n\t\ts := regexp.MustCompile(\"[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$\")\n\t\tr.MissingSession = s.FindString(r.FailureReason)\n\t\treturn fmt.Sprintf(\"A Session is missing from data pack since %s\", fromTime)\n\t}\n\n\tif r.FailureReason == \"\" {\n\t\tif time.Since(fromTime).Minutes() > 15 {\n\t\t\treturn fmt.Sprintf(\"Report sync is running smoothly.\")\n\t\t}\n\t\treturn fmt.Sprintf(\"Report is in sync.\")\n\t}\n\n\treturn r.FailureReason\n}\n\nfunc (r *ReportResponse) FormatSlackMessage(attachment *slack.Attachment) {\n\tcause := r.GetDelayReason()\n\tattachment.Pretext = cause\n\tattachment.Title = \"Report Sync Activity Details\"\n\tattachment.TitleLink = r.Link\n\tif r.MissingSession != \"\" {\n\t\tattachment.Fields = append(attachment.Fields, slack.AttachmentField{\n\t\t\tTitle: \"Missing Session ID:\",\n\t\t\tValue: r.MissingSession,\n\t\t\tShort: true})\n\t}\n\n\tattachment.Fields = append(attachment.Fields, slack.AttachmentField{\n\t\tTitle: \"Status:\",\n\t\tValue: r.Status,\n\t\tShort: true})\n\n\tloc, _ := time.LoadLocation(\"Asia\/Kolkata\")\n\tfromTime, err := time.ParseInLocation(\"2006-01-02T15:04:05.999999\", r.FromTime, loc)\n\tif err == nil {\n\t\tattachment.Fields = append(attachment.Fields, slack.AttachmentField{\n\t\t\tTitle: \"Delay:\",\n\t\t\tValue: time.Since(fromTime).String(),\n\t\t\tShort: true})\n\t}\n\n\tct, err := time.ParseInLocation(\"2006-01-02T15:04:05.999999\", r.CreatedAt, loc)\n\tif err == nil {\n\t\tattachment.Fields = append(attachment.Fields, slack.AttachmentField{\n\t\t\tTitle: \"Created At:\",\n\t\t\tValue: ct.Format(\"2006-01-02 15:04:05\"),\n\t\t\tShort: true})\n\t}\n\n\tut, err := time.ParseInLocation(\"2006-01-02T15:04:05.999999\", r.UpdatedAt, loc)\n\tif err == nil {\n\t\tattachment.Fields = append(attachment.Fields, slack.AttachmentField{\n\t\t\tTitle: \"Last Updated At:\",\n\t\t\tValue: ut.Format(\"2006-01-02 15:04:05\"),\n\t\t\tShort: true})\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package inmem\n\nimport (\n\t\"github.com\/docker\/libswarm\/beam\"\n\t\"io\"\n\t\"sync\"\n)\n\nfunc Pipe() (*PipeReceiver, *PipeSender) {\n\tp := new(pipe)\n\tp.rwait.L = &p.l\n\tp.wwait.L = &p.l\n\tr := &PipeReceiver{p}\n\tw := &PipeSender{p}\n\treturn r, w\n}\n\ntype pipe struct {\n\tch chan *pipeMessage\n\trwait sync.Cond\n\twwait sync.Cond\n\tl sync.Mutex\n\trl sync.Mutex\n\twl sync.Mutex\n\trerr error \/\/ if reader closed, error to give writes\n\twerr error \/\/ if writer closed, error to give reads\n\tpmsg *pipeMessage\n}\n\ntype pipeMessage struct {\n\tmsg *beam.Message\n\tout *PipeSender\n\tin *PipeReceiver\n}\n\nfunc (p *pipe) psend(pmsg *pipeMessage) error {\n\tvar err error\n\t\/\/ One writer at a time.\n\tp.wl.Lock()\n\tdefer p.wl.Unlock()\n\n\tp.l.Lock()\n\tdefer p.l.Unlock()\n\tp.pmsg = pmsg\n\tp.rwait.Signal()\n\tfor {\n\t\tif p.pmsg == nil {\n\t\t\tbreak\n\t\t}\n\t\tif p.rerr != nil {\n\t\t\terr = p.rerr\n\t\t\tbreak\n\t\t}\n\t\tif p.werr != nil {\n\t\t\terr = io.ErrClosedPipe\n\t\t}\n\t\tp.wwait.Wait()\n\t}\n\tp.pmsg = nil \/\/ in case of rerr or werr\n\treturn err\n}\n\nfunc (p *pipe) send(msg *beam.Message, mode int) (in *PipeReceiver, out *PipeSender, err error) {\n\t\/\/ Prepare the message\n\tpmsg := &pipeMessage{msg: msg}\n\tif mode&beam.R != 0 {\n\t\tin, pmsg.out = Pipe()\n\t\tdefer func() {\n\t\t\tif err != nil {\n\t\t\t\tin.Close()\n\t\t\t\tin = nil\n\t\t\t\tpmsg.out.Close()\n\t\t\t}\n\t\t}()\n\t}\n\tif mode&beam.W != 0 {\n\t\tpmsg.in, out = Pipe()\n\t\tdefer func() {\n\t\t\tif err != nil {\n\t\t\t\tout.Close()\n\t\t\t\tout = nil\n\t\t\t\tpmsg.in.Close()\n\t\t\t}\n\t\t}()\n\t}\n\terr = p.psend(pmsg)\n\treturn\n}\n\nfunc (p *pipe) preceive() (*pipeMessage, error) {\n\tp.rl.Lock()\n\tdefer p.rl.Unlock()\n\n\tp.l.Lock()\n\tdefer p.l.Unlock()\n\tfor {\n\t\tif p.rerr != nil {\n\t\t\treturn nil, io.ErrClosedPipe\n\t\t}\n\t\tif p.pmsg != nil {\n\t\t\tbreak\n\t\t}\n\t\tif p.werr != nil {\n\t\t\treturn nil, p.werr\n\t\t}\n\t\tp.rwait.Wait()\n\t}\n\tpmsg := p.pmsg\n\tp.pmsg = nil\n\tp.wwait.Signal()\n\treturn pmsg, nil\n}\n\nfunc (p *pipe) receive(mode int) (*beam.Message, *PipeReceiver, *PipeSender, error) {\n\tpmsg, err := p.preceive()\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tif pmsg.out != nil && mode&beam.W == 0 {\n\t\tpmsg.out.Close()\n\t\tpmsg.out = nil\n\t}\n\tif pmsg.in != nil && mode&beam.R == 0 {\n\t\tpmsg.in.Close()\n\t\tpmsg.in = nil\n\t}\n\treturn pmsg.msg, pmsg.in, pmsg.out, nil\n}\n\nfunc (p *pipe) rclose(err error) {\n\tif err == nil {\n\t\terr = io.ErrClosedPipe\n\t}\n\tp.l.Lock()\n\tdefer p.l.Unlock()\n\tp.rerr = err\n\tp.rwait.Signal()\n\tp.wwait.Signal()\n}\n\nfunc (p *pipe) wclose(err error) {\n\tif err == nil {\n\t\terr = io.EOF\n\t}\n\tp.l.Lock()\n\tdefer p.l.Unlock()\n\tp.werr = err\n\tp.rwait.Signal()\n\tp.wwait.Signal()\n}\n\n\/\/ PipeReceiver\n\ntype PipeReceiver struct {\n\tp *pipe\n}\n\nfunc (r *PipeReceiver) Receive(mode int) (*beam.Message, beam.Receiver, beam.Sender, error) {\n\tmsg, pin, pout, err := r.p.receive(mode)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tvar (\n\t\t\/\/ Always return NopReceiver\/NopSender instead of nil values,\n\t\t\/\/ because:\n\t\t\/\/ - if they were requested in the mode, they can safely be used\n\t\t\/\/ - if they were not requested, they can safely be ignored (ie no leak if they\n\t\t\/\/ aren't closed)\n\t\tin beam.Receiver = beam.NopReceiver{}\n\t\tout beam.Sender = beam.NopSender{}\n\t)\n\tif pin != nil {\n\t\tin = pin\n\t}\n\tif pout != nil {\n\t\tout = pout\n\t}\n\treturn msg, in, out, err\n}\n\nfunc (r *PipeReceiver) SendTo(dst beam.Sender) (int, error) {\n\tvar n int\n\t\/\/ If the destination is a PipeSender, we can cheat\n\tpdst, ok := dst.(*PipeSender)\n\tif !ok {\n\t\treturn 0, beam.ErrIncompatibleSender\n\t}\n\tfor {\n\t\tpmsg, err := r.p.preceive()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tif err := pdst.p.psend(pmsg); err != nil {\n\t\t\treturn n, err\n\t\t}\n\t}\n\tn++\n\treturn n, nil\n}\n\nfunc (r *PipeReceiver) Close() error {\n\treturn r.CloseWithError(nil)\n}\n\nfunc (r *PipeReceiver) CloseWithError(err error) error {\n\tr.p.rclose(err)\n\treturn nil\n}\n\n\/\/ PipeSender\n\ntype PipeSender struct {\n\tp *pipe\n}\n\nfunc (w *PipeSender) Send(msg *beam.Message, mode int) (beam.Receiver, beam.Sender, error) {\n\tpin, pout, err := w.p.send(msg, mode)\n\tvar (\n\t\tin beam.Receiver\n\t\tout beam.Sender\n\t)\n\tif pin != nil {\n\t\tin = pin\n\t}\n\tif pout != nil {\n\t\tout = pout\n\t}\n\treturn in, out, err\n}\n\nfunc (w *PipeSender) ReceiveFrom(src beam.Receiver) (int, error) {\n\tvar n int\n\t\/\/ If the destination is a PipeReceiver, we can cheat\n\tpsrc, ok := src.(*PipeReceiver)\n\tif !ok {\n\t\treturn 0, beam.ErrIncompatibleReceiver\n\t}\n\tfor {\n\t\tpmsg, err := psrc.p.preceive()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tif err := w.p.psend(pmsg); err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tn++\n\t}\n\treturn n, nil\n}\n\nfunc (w *PipeSender) Close() error {\n\treturn w.CloseWithError(nil)\n}\n\nfunc (w *PipeSender) CloseWithError(err error) error {\n\tw.p.wclose(err)\n\treturn nil\n}\n<commit_msg>beam\/inmem: remove unused structure field<commit_after>package inmem\n\nimport (\n\t\"github.com\/docker\/libswarm\/beam\"\n\t\"io\"\n\t\"sync\"\n)\n\nfunc Pipe() (*PipeReceiver, *PipeSender) {\n\tp := new(pipe)\n\tp.rwait.L = &p.l\n\tp.wwait.L = &p.l\n\tr := &PipeReceiver{p}\n\tw := &PipeSender{p}\n\treturn r, w\n}\n\ntype pipe struct {\n\trwait sync.Cond\n\twwait sync.Cond\n\tl sync.Mutex\n\trl sync.Mutex\n\twl sync.Mutex\n\trerr error \/\/ if reader closed, error to give writes\n\twerr error \/\/ if writer closed, error to give reads\n\tpmsg *pipeMessage\n}\n\ntype pipeMessage struct {\n\tmsg *beam.Message\n\tout *PipeSender\n\tin *PipeReceiver\n}\n\nfunc (p *pipe) psend(pmsg *pipeMessage) error {\n\tvar err error\n\t\/\/ One writer at a time.\n\tp.wl.Lock()\n\tdefer p.wl.Unlock()\n\n\tp.l.Lock()\n\tdefer p.l.Unlock()\n\tp.pmsg = pmsg\n\tp.rwait.Signal()\n\tfor {\n\t\tif p.pmsg == nil {\n\t\t\tbreak\n\t\t}\n\t\tif p.rerr != nil {\n\t\t\terr = p.rerr\n\t\t\tbreak\n\t\t}\n\t\tif p.werr != nil {\n\t\t\terr = io.ErrClosedPipe\n\t\t}\n\t\tp.wwait.Wait()\n\t}\n\tp.pmsg = nil \/\/ in case of rerr or werr\n\treturn err\n}\n\nfunc (p *pipe) send(msg *beam.Message, mode int) (in *PipeReceiver, out *PipeSender, err error) {\n\t\/\/ Prepare the message\n\tpmsg := &pipeMessage{msg: msg}\n\tif mode&beam.R != 0 {\n\t\tin, pmsg.out = Pipe()\n\t\tdefer func() {\n\t\t\tif err != nil {\n\t\t\t\tin.Close()\n\t\t\t\tin = nil\n\t\t\t\tpmsg.out.Close()\n\t\t\t}\n\t\t}()\n\t}\n\tif mode&beam.W != 0 {\n\t\tpmsg.in, out = Pipe()\n\t\tdefer func() {\n\t\t\tif err != nil {\n\t\t\t\tout.Close()\n\t\t\t\tout = nil\n\t\t\t\tpmsg.in.Close()\n\t\t\t}\n\t\t}()\n\t}\n\terr = p.psend(pmsg)\n\treturn\n}\n\nfunc (p *pipe) preceive() (*pipeMessage, error) {\n\tp.rl.Lock()\n\tdefer p.rl.Unlock()\n\n\tp.l.Lock()\n\tdefer p.l.Unlock()\n\tfor {\n\t\tif p.rerr != nil {\n\t\t\treturn nil, io.ErrClosedPipe\n\t\t}\n\t\tif p.pmsg != nil {\n\t\t\tbreak\n\t\t}\n\t\tif p.werr != nil {\n\t\t\treturn nil, p.werr\n\t\t}\n\t\tp.rwait.Wait()\n\t}\n\tpmsg := p.pmsg\n\tp.pmsg = nil\n\tp.wwait.Signal()\n\treturn pmsg, nil\n}\n\nfunc (p *pipe) receive(mode int) (*beam.Message, *PipeReceiver, *PipeSender, error) {\n\tpmsg, err := p.preceive()\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tif pmsg.out != nil && mode&beam.W == 0 {\n\t\tpmsg.out.Close()\n\t\tpmsg.out = nil\n\t}\n\tif pmsg.in != nil && mode&beam.R == 0 {\n\t\tpmsg.in.Close()\n\t\tpmsg.in = nil\n\t}\n\treturn pmsg.msg, pmsg.in, pmsg.out, nil\n}\n\nfunc (p *pipe) rclose(err error) {\n\tif err == nil {\n\t\terr = io.ErrClosedPipe\n\t}\n\tp.l.Lock()\n\tdefer p.l.Unlock()\n\tp.rerr = err\n\tp.rwait.Signal()\n\tp.wwait.Signal()\n}\n\nfunc (p *pipe) wclose(err error) {\n\tif err == nil {\n\t\terr = io.EOF\n\t}\n\tp.l.Lock()\n\tdefer p.l.Unlock()\n\tp.werr = err\n\tp.rwait.Signal()\n\tp.wwait.Signal()\n}\n\n\/\/ PipeReceiver\n\ntype PipeReceiver struct {\n\tp *pipe\n}\n\nfunc (r *PipeReceiver) Receive(mode int) (*beam.Message, beam.Receiver, beam.Sender, error) {\n\tmsg, pin, pout, err := r.p.receive(mode)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tvar (\n\t\t\/\/ Always return NopReceiver\/NopSender instead of nil values,\n\t\t\/\/ because:\n\t\t\/\/ - if they were requested in the mode, they can safely be used\n\t\t\/\/ - if they were not requested, they can safely be ignored (ie no leak if they\n\t\t\/\/ aren't closed)\n\t\tin beam.Receiver = beam.NopReceiver{}\n\t\tout beam.Sender = beam.NopSender{}\n\t)\n\tif pin != nil {\n\t\tin = pin\n\t}\n\tif pout != nil {\n\t\tout = pout\n\t}\n\treturn msg, in, out, err\n}\n\nfunc (r *PipeReceiver) SendTo(dst beam.Sender) (int, error) {\n\tvar n int\n\t\/\/ If the destination is a PipeSender, we can cheat\n\tpdst, ok := dst.(*PipeSender)\n\tif !ok {\n\t\treturn 0, beam.ErrIncompatibleSender\n\t}\n\tfor {\n\t\tpmsg, err := r.p.preceive()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tif err := pdst.p.psend(pmsg); err != nil {\n\t\t\treturn n, err\n\t\t}\n\t}\n\tn++\n\treturn n, nil\n}\n\nfunc (r *PipeReceiver) Close() error {\n\treturn r.CloseWithError(nil)\n}\n\nfunc (r *PipeReceiver) CloseWithError(err error) error {\n\tr.p.rclose(err)\n\treturn nil\n}\n\n\/\/ PipeSender\n\ntype PipeSender struct {\n\tp *pipe\n}\n\nfunc (w *PipeSender) Send(msg *beam.Message, mode int) (beam.Receiver, beam.Sender, error) {\n\tpin, pout, err := w.p.send(msg, mode)\n\tvar (\n\t\tin beam.Receiver\n\t\tout beam.Sender\n\t)\n\tif pin != nil {\n\t\tin = pin\n\t}\n\tif pout != nil {\n\t\tout = pout\n\t}\n\treturn in, out, err\n}\n\nfunc (w *PipeSender) ReceiveFrom(src beam.Receiver) (int, error) {\n\tvar n int\n\t\/\/ If the destination is a PipeReceiver, we can cheat\n\tpsrc, ok := src.(*PipeReceiver)\n\tif !ok {\n\t\treturn 0, beam.ErrIncompatibleReceiver\n\t}\n\tfor {\n\t\tpmsg, err := psrc.p.preceive()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tif err := w.p.psend(pmsg); err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tn++\n\t}\n\treturn n, nil\n}\n\nfunc (w *PipeSender) Close() error {\n\treturn w.CloseWithError(nil)\n}\n\nfunc (w *PipeSender) CloseWithError(err error) error {\n\tw.p.wclose(err)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package router\n\nimport (\n\t\"net\/http\"\n\t\"appengine\" \n)\n\ntype RequestStruct struct {\n\tRoute Route\n\tPathParams map[string]string\n\tHttpRequest *http.Request\n\tResponseWriter http.ResponseWriter\n}\n\ntype Request interface {\n\tGetRoute() Route\n\tGetPathParams() map[string]string\n\tGetHttpRequest() *http.Request\n\tGetResponseWriter() http.ResponseWriter\n\tGetContext() appengine.Context\n\tGetContent() []byte\n}\n\nfunc (this RequestStruct) GetRoute() Route {\n\treturn this.Route\n}\n\nfunc (this RequestStruct) GetPathParams() map[string]string {\n\treturn this.PathParams\n}\n\nfunc (this RequestStruct) GetHttpRequest() *http.Request {\n\treturn this.HttpRequest\n}\n\nfunc (this RequestStruct) GetResponseWriter() http.ResponseWriter {\n\treturn this.ResponseWriter\n}\n\nfunc (this RequestStruct) GetContext() appengine.Context {\n\treturn appengine.NewContext(this.HttpRequest)\n}\n\nfunc (this RequestStruct) GetContent() []byte {\n\tcontent := make([]byte, this.HttpRequest.ContentLength)\n\tthis.HttpRequest.Body.Read(content)\n\treturn content\n}<commit_msg>Fixing formatting for request.go<commit_after>package router\n\nimport (\n\t\"appengine\"\n\t\"net\/http\"\n)\n\ntype RequestStruct struct {\n\tRoute Route\n\tPathParams map[string]string\n\tHttpRequest *http.Request\n\tResponseWriter http.ResponseWriter\n}\n\ntype Request interface {\n\tGetRoute() Route\n\tGetPathParams() map[string]string\n\tGetHttpRequest() *http.Request\n\tGetResponseWriter() http.ResponseWriter\n\tGetContext() appengine.Context\n\tGetContent() []byte\n}\n\nfunc (this RequestStruct) GetRoute() Route {\n\treturn this.Route\n}\n\nfunc (this RequestStruct) GetPathParams() map[string]string {\n\treturn this.PathParams\n}\n\nfunc (this RequestStruct) GetHttpRequest() *http.Request {\n\treturn this.HttpRequest\n}\n\nfunc (this RequestStruct) GetResponseWriter() http.ResponseWriter {\n\treturn this.ResponseWriter\n}\n\nfunc (this RequestStruct) GetContext() appengine.Context {\n\treturn appengine.NewContext(this.HttpRequest)\n}\n\nfunc (this RequestStruct) GetContent() []byte {\n\tcontent := make([]byte, this.HttpRequest.ContentLength)\n\tthis.HttpRequest.Body.Read(content)\n\treturn content\n}\n<|endoftext|>"} {"text":"<commit_before>package focker_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/hatofmonkeys\/cloudfocker\/focker\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n)\n\nvar _ = Describe(\"Focker\", func() {\n\tvar (\n\t\ttestfocker *focker.Focker\n\t\tbuffer *gbytes.Buffer\n\t)\n\tBeforeEach(func() {\n\t\ttestfocker = focker.NewFocker()\n\t\tbuffer = gbytes.NewBuffer()\n\t})\n\n\tDescribe(\"Displaying the docker version\", func() {\n\t\tIt(\"should tell Docker to output its version\", func() {\n\t\t\ttestfocker.DockerVersion(buffer)\n\t\t\tEventually(buffer).Should(gbytes.Say(`Checking Docker version`))\n\t\t\tEventually(buffer).Should(gbytes.Say(`Client API version: `))\n\t\t\tEventually(buffer).Should(gbytes.Say(`Go version \\(client\\): go`))\n\t\t})\n\t})\n\n\tDescribe(\"Bootstrapping the base image\", func() {\n\t\t\/\/This works, but speed depends on your net connection\n\t\tXIt(\"should download and tag the lucid64 filesystem\", func() {\n\t\t\tfmt.Println(\"Downloading lucid64 - this could take a while\")\n\t\t\ttestfocker.ImportRootfsImage(buffer)\n\t\t\tEventually(buffer, 600).Should(gbytes.Say(`[a-f0-9]{64}`))\n\t\t})\n\t})\n\n\tDescribe(\"Writing a dockerfile\", func() {\n\t\tIt(\"should write a valid dockerfile\", func() {\n\t\t\ttestfocker.WriteDockerfile(buffer)\n\t\t\tEventually(buffer).Should(gbytes.Say(`FROM`))\n\t\t})\n\t})\n\n\tDescribe(\"Building a docker image\", func() {\n\t\tIt(\"should output a built image tag\", func() {\n\t\t\ttestfocker.BuildImage(buffer)\n\t\t\tEventually(buffer, 20).Should(gbytes.Say(`Successfully built [a-f0-9]{12}`))\n\t\t})\n\t})\n\n\tDescribe(\"Running the docker container\", func() {\n\t\tIt(\"should output a valid URL for the running application\", func() {\n\t\t\ttestfocker.RunContainer(buffer)\n\t\t\tEventually(buffer, 20).Should(gbytes.Say(`Successfully built [a-f0-9]{12}`))\n\t\t\tEventually(buffer).Should(gbytes.Say(`[a-f0-9]{64}`))\n\t\t\tEventually(buffer).Should(gbytes.Say(`Connect to your running application at http:\/\/localhost:8080\/`))\n\t\t\tEventually(statusCodeChecker).Should(Equal(200))\n\t\t\ttestfocker.StopContainer(buffer)\n\t\t})\n\t})\n\n\tDescribe(\"Stopping the docker container\", func() {\n\t\tIt(\"should output the stopped image ID, not respond to HTTP, and delete the container\", func() {\n\t\t\ttestfocker.RunContainer(buffer)\n\t\t\ttestfocker.StopContainer(buffer)\n\t\t\tEventually(buffer).Should(gbytes.Say(`Stopping the CloudFocker container...`))\n\t\t\tEventually(buffer).Should(gbytes.Say(`cloudfocker-container`))\n\t\t\tEventually(statusCodeChecker).Should(Equal(0))\n\t\t\tEventually(buffer).Should(gbytes.Say(`Deleting the CloudFocker container...`))\n\t\t\tEventually(buffer).Should(gbytes.Say(`cloudfocker-container`))\n\t\t})\n\t})\n\n\tDescribe(\"Adding a buildpack\", func() {\n\t\tIt(\"should download the buildpack and add it to the buildpack directory\", func() {\n\t\t\tbuildpackDir, _ := ioutil.TempDir(os.TempDir(), \"cfocker-buildpack-test\")\n\t\t\tbuffer := gbytes.NewBuffer()\n\t\t\ttestfocker.AddBuildpack(buffer, \"https:\/\/github.com\/hatofmonkeys\/ruby-buildpack\", buildpackDir)\n\t\t\tEventually(buffer).Should(gbytes.Say(`Downloading buildpack...`))\n\t\t\tEventually(buffer, 120).Should(gbytes.Say(`Downloaded buildpack.`))\n\t\t\tos.RemoveAll(buildpackDir)\n\t\t})\n\t})\n})\n\nfunc statusCodeChecker() int {\n\tres, err := http.Get(\"http:\/\/localhost:8080\/\")\n\tif err != nil {\n\t\treturn 0\n\t} else {\n\t\treturn res.StatusCode\n\t}\n}\n<commit_msg>Disabled buildpack download test for speed<commit_after>package focker_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/hatofmonkeys\/cloudfocker\/focker\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n)\n\nvar _ = Describe(\"Focker\", func() {\n\tvar (\n\t\ttestfocker *focker.Focker\n\t\tbuffer *gbytes.Buffer\n\t)\n\tBeforeEach(func() {\n\t\ttestfocker = focker.NewFocker()\n\t\tbuffer = gbytes.NewBuffer()\n\t})\n\n\tDescribe(\"Displaying the docker version\", func() {\n\t\tIt(\"should tell Docker to output its version\", func() {\n\t\t\ttestfocker.DockerVersion(buffer)\n\t\t\tEventually(buffer).Should(gbytes.Say(`Checking Docker version`))\n\t\t\tEventually(buffer).Should(gbytes.Say(`Client API version: `))\n\t\t\tEventually(buffer).Should(gbytes.Say(`Go version \\(client\\): go`))\n\t\t})\n\t})\n\n\tDescribe(\"Bootstrapping the base image\", func() {\n\t\t\/\/This works, but speed depends on your net connection\n\t\tXIt(\"should download and tag the lucid64 filesystem\", func() {\n\t\t\tfmt.Println(\"Downloading lucid64 - this could take a while\")\n\t\t\ttestfocker.ImportRootfsImage(buffer)\n\t\t\tEventually(buffer, 600).Should(gbytes.Say(`[a-f0-9]{64}`))\n\t\t})\n\t})\n\n\tDescribe(\"Writing a dockerfile\", func() {\n\t\tIt(\"should write a valid dockerfile\", func() {\n\t\t\ttestfocker.WriteDockerfile(buffer)\n\t\t\tEventually(buffer).Should(gbytes.Say(`FROM`))\n\t\t})\n\t})\n\n\tDescribe(\"Building a docker image\", func() {\n\t\tIt(\"should output a built image tag\", func() {\n\t\t\ttestfocker.BuildImage(buffer)\n\t\t\tEventually(buffer, 20).Should(gbytes.Say(`Successfully built [a-f0-9]{12}`))\n\t\t})\n\t})\n\n\tDescribe(\"Running the docker container\", func() {\n\t\tIt(\"should output a valid URL for the running application\", func() {\n\t\t\ttestfocker.RunContainer(buffer)\n\t\t\tEventually(buffer, 20).Should(gbytes.Say(`Successfully built [a-f0-9]{12}`))\n\t\t\tEventually(buffer).Should(gbytes.Say(`[a-f0-9]{64}`))\n\t\t\tEventually(buffer).Should(gbytes.Say(`Connect to your running application at http:\/\/localhost:8080\/`))\n\t\t\tEventually(statusCodeChecker).Should(Equal(200))\n\t\t\ttestfocker.StopContainer(buffer)\n\t\t})\n\t})\n\n\tDescribe(\"Stopping the docker container\", func() {\n\t\tIt(\"should output the stopped image ID, not respond to HTTP, and delete the container\", func() {\n\t\t\ttestfocker.RunContainer(buffer)\n\t\t\ttestfocker.StopContainer(buffer)\n\t\t\tEventually(buffer).Should(gbytes.Say(`Stopping the CloudFocker container...`))\n\t\t\tEventually(buffer).Should(gbytes.Say(`cloudfocker-container`))\n\t\t\tEventually(statusCodeChecker).Should(Equal(0))\n\t\t\tEventually(buffer).Should(gbytes.Say(`Deleting the CloudFocker container...`))\n\t\t\tEventually(buffer).Should(gbytes.Say(`cloudfocker-container`))\n\t\t})\n\t})\n\n\tDescribe(\"Adding a buildpack\", func() {\n\t\tXIt(\"should download the buildpack and add it to the buildpack directory\", func() {\n\t\t\t\/\/This works, but speed depends on your net connection\n\t\t\tbuildpackDir, _ := ioutil.TempDir(os.TempDir(), \"cfocker-buildpack-test\")\n\t\t\tbuffer := gbytes.NewBuffer()\n\t\t\ttestfocker.AddBuildpack(buffer, \"https:\/\/github.com\/hatofmonkeys\/ruby-buildpack\", buildpackDir)\n\t\t\tEventually(buffer).Should(gbytes.Say(`Downloading buildpack...`))\n\t\t\tEventually(buffer, 120).Should(gbytes.Say(`Downloaded buildpack.`))\n\t\t\tos.RemoveAll(buildpackDir)\n\t\t})\n\t})\n})\n\nfunc statusCodeChecker() int {\n\tres, err := http.Get(\"http:\/\/localhost:8080\/\")\n\tif err != nil {\n\t\treturn 0\n\t} else {\n\t\treturn res.StatusCode\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package all\n\nimport (\n\t_ \"github.com\/karimra\/gnmic\/formatters\/event_convert\"\n\t_ \"github.com\/karimra\/gnmic\/formatters\/event_date_string\"\n\t_ \"github.com\/karimra\/gnmic\/formatters\/event_delete\"\n\t_ \"github.com\/karimra\/gnmic\/formatters\/event_drop\"\n\t_ \"github.com\/karimra\/gnmic\/formatters\/event_strings\"\n\t_ \"github.com\/karimra\/gnmic\/formatters\/event_to_tag\"\n\t_ \"github.com\/karimra\/gnmic\/formatters\/event_write\"\n)\n<commit_msg>register processor<commit_after>package all\n\nimport (\n\t_ \"github.com\/karimra\/gnmic\/formatters\/event_add_tag\"\n\t_ \"github.com\/karimra\/gnmic\/formatters\/event_convert\"\n\t_ \"github.com\/karimra\/gnmic\/formatters\/event_date_string\"\n\t_ \"github.com\/karimra\/gnmic\/formatters\/event_delete\"\n\t_ \"github.com\/karimra\/gnmic\/formatters\/event_drop\"\n\t_ \"github.com\/karimra\/gnmic\/formatters\/event_strings\"\n\t_ \"github.com\/karimra\/gnmic\/formatters\/event_to_tag\"\n\t_ \"github.com\/karimra\/gnmic\/formatters\/event_write\"\n)\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 Google Inc. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage commands\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/google\/git-appraise\/commands\/input\"\n\t\"github.com\/google\/git-appraise\/repository\"\n\t\"github.com\/google\/git-appraise\/review\"\n\t\"github.com\/google\/git-appraise\/review\/comment\"\n\t\"strings\"\n)\n\nvar commentFlagSet = flag.NewFlagSet(\"comment\", flag.ExitOnError)\n\nvar (\n\tcommentMessage = commentFlagSet.String(\"m\", \"\", \"Message to attach to the review\")\n\tcommentParent = commentFlagSet.String(\"p\", \"\", \"Parent comment\")\n\tcommentFile = commentFlagSet.String(\"f\", \"\", \"File being commented upon\")\n\tcommentLine = commentFlagSet.Uint(\"l\", 0, \"Line being commented upon; requires that the -f flag also be set\")\n\tcommentLgtm = commentFlagSet.Bool(\"lgtm\", false, \"'Looks Good To Me'. Set this to express your approval. This cannot be combined with nmw\")\n\tcommentNmw = commentFlagSet.Bool(\"nmw\", false, \"'Needs More Work'. Set this to express your disapproval. This cannot be combined with lgtm\")\n)\n\n\/\/ commentHashExists checks if the given comment hash exists in the given comment threads.\nfunc commentHashExists(hashToFind string, threads []review.CommentThread) bool {\n\tfor _, thread := range threads {\n\t\tif thread.Hash == hashToFind {\n\t\t\treturn true\n\t\t}\n\t\tif commentHashExists(hashToFind, thread.Children) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ checkCommentLocation verifies that the given location exists at the given commit.\nfunc checkCommentLocation(repo repository.Repo, commit, file string, line uint) error {\n\tcontents, err := repo.Show(commit, file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlines := strings.Split(contents, \"\\n\")\n\tif line >= uint(len(lines)) {\n\t\treturn fmt.Errorf(\"Line number %d does not exist in file %q\", line, file)\n\t}\n\treturn nil\n}\n\n\/\/ commentOnReview adds a comment to the current code review.\nfunc commentOnReview(repo repository.Repo, args []string) error {\n\tcommentFlagSet.Parse(args)\n\targs = commentFlagSet.Args()\n\n\tvar r *review.Review\n\tvar err error\n\tif len(args) > 1 {\n\t\treturn errors.New(\"Only accepting a single review is supported.\")\n\t}\n\n\tif len(args) == 1 {\n\t\tr, err = review.Get(repo, args[0])\n\t} else {\n\t\tr, err = review.GetCurrent(repo)\n\t}\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to load the review: %v\\n\", err)\n\t}\n\tif r == nil {\n\t\treturn errors.New(\"There is no matching review.\")\n\t}\n\n\tif *commentLgtm && *commentNmw {\n\t\treturn errors.New(\"You cannot combine the flags -lgtm and -nmw.\")\n\t}\n\tif *commentLine != 0 && *commentFile == \"\" {\n\t\treturn errors.New(\"Specifying a line number with the -l flag requires that you also specify a file name with the -f flag.\")\n\t}\n\tif *commentParent != \"\" && !commentHashExists(*commentParent, r.Comments) {\n\t\treturn errors.New(\"There is no matching parent comment.\")\n\t}\n\n\tif *commentMessage == \"\" {\n\t\t*commentMessage, err = input.LaunchEditor(repo, commentFilename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tcommentedUponCommit, err := r.GetHeadCommit()\n\tif err != nil {\n\t\treturn err\n\t}\n\tlocation := comment.Location{\n\t\tCommit: commentedUponCommit,\n\t}\n\tif *commentFile != \"\" {\n\t\tif err := checkCommentLocation(r.Repo, commentedUponCommit, *commentFile, *commentLine); err != nil {\n\t\t\treturn fmt.Errorf(\"Unabled to comment on the given location: %v\", err)\n\t\t}\n\t\tlocation.Path = *commentFile\n\t\tif *commentLine != 0 {\n\t\t\tlocation.Range = &comment.Range{\n\t\t\t\tStartLine: uint32(*commentLine),\n\t\t\t}\n\t\t}\n\t}\n\n\tuserEmail, err := repo.GetUserEmail()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc := comment.New(userEmail, *commentMessage)\n\tc.Location = &location\n\tc.Parent = *commentParent\n\tif *commentLgtm || *commentNmw {\n\t\tresolved := *commentLgtm\n\t\tc.Resolved = &resolved\n\t}\n\treturn r.AddComment(c)\n}\n\n\/\/ commentCmd defines the \"comment\" subcommand.\nvar commentCmd = &Command{\n\tUsage: func(arg0 string) {\n\t\tfmt.Printf(\"Usage: %s comment [<option>...] [<review-hash>]\\n\\nOptions:\\n\", arg0)\n\t\tcommentFlagSet.PrintDefaults()\n\t},\n\tRunMethod: func(repo repository.Repo, args []string) error {\n\t\treturn commentOnReview(repo, args)\n\t},\n}\n<commit_msg>Submitting review 1b6d44cd17cb<commit_after>\/*\nCopyright 2015 Google Inc. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage commands\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/google\/git-appraise\/commands\/input\"\n\t\"github.com\/google\/git-appraise\/repository\"\n\t\"github.com\/google\/git-appraise\/review\"\n\t\"github.com\/google\/git-appraise\/review\/comment\"\n\t\"strings\"\n)\n\nvar commentFlagSet = flag.NewFlagSet(\"comment\", flag.ExitOnError)\n\nvar (\n\tcommentMessage = commentFlagSet.String(\"m\", \"\", \"Message to attach to the review\")\n\tcommentParent = commentFlagSet.String(\"p\", \"\", \"Parent comment\")\n\tcommentFile = commentFlagSet.String(\"f\", \"\", \"File being commented upon\")\n\tcommentLine = commentFlagSet.Uint(\"l\", 0, \"Line being commented upon; requires that the -f flag also be set\")\n\tcommentLgtm = commentFlagSet.Bool(\"lgtm\", false, \"'Looks Good To Me'. Set this to express your approval. This cannot be combined with nmw\")\n\tcommentNmw = commentFlagSet.Bool(\"nmw\", false, \"'Needs More Work'. Set this to express your disapproval. This cannot be combined with lgtm\")\n)\n\n\/\/ commentHashExists checks if the given comment hash exists in the given comment threads.\nfunc commentHashExists(hashToFind string, threads []review.CommentThread) bool {\n\tfor _, thread := range threads {\n\t\tif thread.Hash == hashToFind {\n\t\t\treturn true\n\t\t}\n\t\tif commentHashExists(hashToFind, thread.Children) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ checkCommentLocation verifies that the given location exists at the given commit.\nfunc checkCommentLocation(repo repository.Repo, commit, file string, line uint) error {\n\tcontents, err := repo.Show(commit, file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlines := strings.Split(contents, \"\\n\")\n\tif line > uint(len(lines)) {\n\t\treturn fmt.Errorf(\"Line number %d does not exist in file %q\", line, file)\n\t}\n\treturn nil\n}\n\n\/\/ commentOnReview adds a comment to the current code review.\nfunc commentOnReview(repo repository.Repo, args []string) error {\n\tcommentFlagSet.Parse(args)\n\targs = commentFlagSet.Args()\n\n\tvar r *review.Review\n\tvar err error\n\tif len(args) > 1 {\n\t\treturn errors.New(\"Only accepting a single review is supported.\")\n\t}\n\n\tif len(args) == 1 {\n\t\tr, err = review.Get(repo, args[0])\n\t} else {\n\t\tr, err = review.GetCurrent(repo)\n\t}\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to load the review: %v\\n\", err)\n\t}\n\tif r == nil {\n\t\treturn errors.New(\"There is no matching review.\")\n\t}\n\n\tif *commentLgtm && *commentNmw {\n\t\treturn errors.New(\"You cannot combine the flags -lgtm and -nmw.\")\n\t}\n\tif *commentLine != 0 && *commentFile == \"\" {\n\t\treturn errors.New(\"Specifying a line number with the -l flag requires that you also specify a file name with the -f flag.\")\n\t}\n\tif *commentParent != \"\" && !commentHashExists(*commentParent, r.Comments) {\n\t\treturn errors.New(\"There is no matching parent comment.\")\n\t}\n\n\tif *commentMessage == \"\" {\n\t\t*commentMessage, err = input.LaunchEditor(repo, commentFilename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tcommentedUponCommit, err := r.GetHeadCommit()\n\tif err != nil {\n\t\treturn err\n\t}\n\tlocation := comment.Location{\n\t\tCommit: commentedUponCommit,\n\t}\n\tif *commentFile != \"\" {\n\t\tif err := checkCommentLocation(r.Repo, commentedUponCommit, *commentFile, *commentLine); err != nil {\n\t\t\treturn fmt.Errorf(\"Unabled to comment on the given location: %v\", err)\n\t\t}\n\t\tlocation.Path = *commentFile\n\t\tif *commentLine != 0 {\n\t\t\tlocation.Range = &comment.Range{\n\t\t\t\tStartLine: uint32(*commentLine),\n\t\t\t}\n\t\t}\n\t}\n\n\tuserEmail, err := repo.GetUserEmail()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc := comment.New(userEmail, *commentMessage)\n\tc.Location = &location\n\tc.Parent = *commentParent\n\tif *commentLgtm || *commentNmw {\n\t\tresolved := *commentLgtm\n\t\tc.Resolved = &resolved\n\t}\n\treturn r.AddComment(c)\n}\n\n\/\/ commentCmd defines the \"comment\" subcommand.\nvar commentCmd = &Command{\n\tUsage: func(arg0 string) {\n\t\tfmt.Printf(\"Usage: %s comment [<option>...] [<review-hash>]\\n\\nOptions:\\n\", arg0)\n\t\tcommentFlagSet.PrintDefaults()\n\t},\n\tRunMethod: func(repo repository.Repo, args []string) error {\n\t\treturn commentOnReview(repo, args)\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"fmt\"\n\t\"github.com\/jingweno\/gh\/github\"\n\t\"github.com\/jingweno\/gh\/utils\"\n\t\"regexp\"\n)\n\nvar cmdCompare = &Command{\n\tRun: compare,\n\tUsage: \"compare [-u] [USER] [<START>...]<END>\",\n\tShort: \"Open a compare page on GitHub\",\n\tLong: `Open a GitHub compare view page in the system's default web browser.\n<START> to <END> are branch names, tag names, or commit SHA1s specifying\nthe range of history to compare. If a range with two dots (\"a..b\") is given,\nit will be transformed into one with three dots. If <START> is omitted,\nGitHub will compare against the base branch (the default is \"master\").\nIf <END> is omitted, GitHub compare view is opened for the current branch.\nWith \"-u\", outputs the URL rather than opening the browser.\n`,\n}\n\nvar (\n\tflagCompareURLOnly bool\n)\n\nfunc init() {\n\tcmdCompare.Flag.BoolVarP(&flagCompareURLOnly, \"url-only\", \"u\", false, \"URL only\")\n\n\tCmdRunner.Use(cmdCompare)\n}\n\n\/*\n $ gh compare refactor\n > open https:\/\/github.com\/CURRENT_REPO\/compare\/refactor\n\n $ gh compare 1.0..1.1\n > open https:\/\/github.com\/CURRENT_REPO\/compare\/1.0...1.1\n\n $ gh compare -u other-user patch\n > open https:\/\/github.com\/other-user\/REPO\/compare\/patch\n*\/\nfunc compare(command *Command, args *Args) {\n\tlocalRepo := github.LocalRepo()\n\tvar (\n\t\tbranch *github.Branch\n\t\tproject *github.Project\n\t\tr string\n\t\terr error\n\t)\n\n\tbranch, project, err = localRepo.RemoteBranchAndProject(\"\")\n\tutils.Check(err)\n\n\tif args.IsParamsEmpty() {\n\t\tmaster := localRepo.MasterBranch()\n\t\tif master.ShortName() == branch.ShortName() {\n\t\t\terr = fmt.Errorf(command.FormattedUsage())\n\t\t\tutils.Check(err)\n\t\t} else {\n\t\t\tr = branch.ShortName()\n\t\t}\n\t} else {\n\t\tr = parseCompareRange(args.RemoveParam(args.ParamsSize() - 1))\n\t\tif args.IsParamsEmpty() {\n\t\t\tproject, err = localRepo.CurrentProject()\n\t\t\tutils.Check(err)\n\t\t} else {\n\t\t\tproject = github.NewProject(args.RemoveParam(args.ParamsSize()-1), \"\", \"\")\n\t\t}\n\t}\n\n\tsubpage := utils.ConcatPaths(\"compare\", r)\n\turl := project.WebURL(\"\", \"\", subpage)\n\tlauncher, err := utils.BrowserLauncher()\n\tutils.Check(err)\n\n\tif flagCompareURLOnly {\n\t\targs.Replace(\"echo\", url)\n\t} else {\n\t\targs.Replace(launcher[0], \"\", launcher[1:]...)\n\t\targs.AppendParams(url)\n\t}\n}\n\nfunc parseCompareRange(r string) string {\n\tshaOrTag := fmt.Sprintf(\"((?:%s:)?\\\\w[\\\\w.-]+\\\\w)\", OwnerRe)\n\tshaOrTagRange := fmt.Sprintf(\"^%s\\\\.\\\\.%s$\", shaOrTag, shaOrTag)\n\tshaOrTagRangeRegexp := regexp.MustCompile(shaOrTagRange)\n\treturn shaOrTagRangeRegexp.ReplaceAllString(r, \"$1...$2\")\n}\n<commit_msg>Escape slashes in compare URLs with semicolon<commit_after>package commands\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/jingweno\/gh\/github\"\n\t\"github.com\/jingweno\/gh\/utils\"\n)\n\nvar cmdCompare = &Command{\n\tRun: compare,\n\tUsage: \"compare [-u] [USER] [<START>...]<END>\",\n\tShort: \"Open a compare page on GitHub\",\n\tLong: `Open a GitHub compare view page in the system's default web browser.\n<START> to <END> are branch names, tag names, or commit SHA1s specifying\nthe range of history to compare. If a range with two dots (\"a..b\") is given,\nit will be transformed into one with three dots. If <START> is omitted,\nGitHub will compare against the base branch (the default is \"master\").\nIf <END> is omitted, GitHub compare view is opened for the current branch.\nWith \"-u\", outputs the URL rather than opening the browser.\n`,\n}\n\nvar (\n\tflagCompareURLOnly bool\n)\n\nfunc init() {\n\tcmdCompare.Flag.BoolVarP(&flagCompareURLOnly, \"url-only\", \"u\", false, \"URL only\")\n\n\tCmdRunner.Use(cmdCompare)\n}\n\n\/*\n $ gh compare refactor\n > open https:\/\/github.com\/CURRENT_REPO\/compare\/refactor\n\n $ gh compare 1.0..1.1\n > open https:\/\/github.com\/CURRENT_REPO\/compare\/1.0...1.1\n\n $ gh compare -u other-user patch\n > open https:\/\/github.com\/other-user\/REPO\/compare\/patch\n*\/\nfunc compare(command *Command, args *Args) {\n\tlocalRepo := github.LocalRepo()\n\tvar (\n\t\tbranch *github.Branch\n\t\tproject *github.Project\n\t\tr string\n\t\terr error\n\t)\n\n\tbranch, project, err = localRepo.RemoteBranchAndProject(\"\")\n\tutils.Check(err)\n\n\tif args.IsParamsEmpty() {\n\t\tmaster := localRepo.MasterBranch()\n\t\tif master.ShortName() == branch.ShortName() {\n\t\t\terr = fmt.Errorf(command.FormattedUsage())\n\t\t\tutils.Check(err)\n\t\t} else {\n\t\t\tr = branch.ShortName()\n\t\t}\n\t} else {\n\t\tr = parseCompareRange(args.RemoveParam(args.ParamsSize() - 1))\n\t\tif args.IsParamsEmpty() {\n\t\t\tproject, err = localRepo.CurrentProject()\n\t\t\tutils.Check(err)\n\t\t} else {\n\t\t\tproject = github.NewProject(args.RemoveParam(args.ParamsSize()-1), \"\", \"\")\n\t\t}\n\t}\n\n\tr = strings.Replace(r, \"\/\", \";\", -1)\n\tsubpage := utils.ConcatPaths(\"compare\", r)\n\turl := project.WebURL(\"\", \"\", subpage)\n\tlauncher, err := utils.BrowserLauncher()\n\tutils.Check(err)\n\n\tif flagCompareURLOnly {\n\t\targs.Replace(\"echo\", url)\n\t} else {\n\t\targs.Replace(launcher[0], \"\", launcher[1:]...)\n\t\targs.AppendParams(url)\n\t}\n}\n\nfunc parseCompareRange(r string) string {\n\tshaOrTag := fmt.Sprintf(\"((?:%s:)?\\\\w[\\\\w.-]+\\\\w)\", OwnerRe)\n\tshaOrTagRange := fmt.Sprintf(\"^%s\\\\.\\\\.%s$\", shaOrTag, shaOrTag)\n\tshaOrTagRangeRegexp := regexp.MustCompile(shaOrTagRange)\n\treturn shaOrTagRangeRegexp.ReplaceAllString(r, \"$1...$2\")\n}\n<|endoftext|>"} {"text":"<commit_before>package po\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar example = `\n#: faq_whyipv6.html\nmsgid \"Note this is in addition to any NAT you do at home.\"\nmsgstr \"\"\n\n#: faq_whyipv6.html\nmsgid \"Q: So, why worry? NAT will work, right? I use NAT at home today after all..\"\nmsgstr \"\"\n`\n\nvar reWHITESPACE = regexp.MustCompile(`\\s+`)\n\nfunc unquote(s string) (string, error) {\n\tif len(s) == 0 {\n\t\treturn s, nil\n\t}\n\ts = strings.TrimSpace(s)\n\treturn strconv.Unquote(s)\n}\n\n\/\/ Languages returns the list of locales loaded in the combined *Files object\nfunc (combined *Files) Languages() []string {\n\tret := []string{}\n\n\tfor k := range combined.ByLanguage {\n\t\tret = append(ret, k)\n\t}\n\tsort.Strings(ret)\n\treturn ret\n}\n\n\/\/ GetLocale simply returns the locale name; ie en_US or pt_BR\nfunc (f *File) GetLocale() string {\n\ts := f.Locale\n\treturn s\n}\n\n\/\/ GetLang returns the lowercase name string; ie en or pt\nfunc (f *File) GetLang() string {\n\ts := f.Locale\n\tp := strings.Split(s, \"_\")\n\treturn p[0]\n}\n\n\/\/ GetLangUC returns the uppercase name string; ie EN or PT\nfunc (f *File) GetLangUC() string {\n\ts := f.Locale\n\tp := strings.Split(s, \"_\")\n\treturn strings.ToUpper(p[0])\n}\n\n\/\/ GetLang returns the lowercase name string; ie en or pt\nfunc (f *File) GetLangName() string {\n\ts := f.Language\n\ts = Friendly(s)\n\treturn s\n}\n\n\/\/ Translate takes a given input text, and returns back\n\/\/ either the translated text, or the original text again.\nfunc (f *File) Translate(input string, escapequotes bool) string {\n\n\t\/\/ Canonicalize.\n\t\/\/ Remove redundant, leading, and trailing whitespace.\n\tinput = strings.TrimSpace(input)\n\tinput = reWHITESPACE.ReplaceAllString(input, \" \")\n\n\tif input == \"lang\" {\n\t\treturn f.GetLang()\n\t}\n\tif input == \"langUC\" {\n\t\treturn f.GetLangUC()\n\t}\n\tif input == \"locale\" {\n\t\treturn f.GetLocale()\n\t}\n\tif input == \"langname\" {\n\t\treturn f.GetLangName()\n\t}\n\n\tnewtext := input\n\n\tif found, ok := f.ByID[input]; ok {\n\t\tc := found.MsgStr\n\t\tif c != \"\" {\n\t\t\tnewtext = c\n\t\t}\n\t}\n\n\tif escapequotes {\n\t\tnewtext = strings.Replace(newtext, `\"`, `\\\"`, -1)\n\t\tnewtext = strings.Replace(newtext, `'`, `\\'`, -1)\n\t}\n\t\/\/ TODO escapequotes\n\t\/\/ Perl does this:\n\t\/\/ $text =~ s\/(?<![\\\\])\"\/\\\\\"\/g;\n\t\/\/ $text =~ s\/(?<![\\\\])'\/\\\\'\/g;\n\t\/\/ GO does not do look-behind assertions\n\treturn newtext\n}\n\n\/\/ Translate takes a given input text, and returns back\n\/\/ either the translated text, or the original text again.\nfunc (f *File) Add(input string, context string, escapequotes bool) {\n\n\t\/\/\tlog.Printf(\"po file Add(%s)\\n\", input)\n\t\/\/ Canonicalize.\n\t\/\/ Remove redundant, leading, and trailing whitespace.\n\tinput = strings.TrimSpace(input)\n\tinput = reWHITESPACE.ReplaceAllString(input, \" \")\n\n\t\/\/\tlog.Printf(\"po Add input=%s context=%s escape=%v\\n\", input, context, escapequotes)\n\n\t\/\/ Skip these, these will be dynamically responded to.\n\tif input == \"lang\" || input == \"langUC\" || input == \"locale\" {\n\t\treturn\n\t}\n\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\t_ = \"breakpoint\"\n\n\tif v, ok := f.ByID[input]; ok == false {\n\t\t\/\/ Not yet set? Let's do so.\n\t\t\/\/\tfmt.Printf(\"DEBUG Saving Comment=%s MsgID=%s\\n\", context, input)\n\t\tf.ByID[input] = &Record{\n\t\t\tComment: context,\n\t\t\tMsgID: input,\n\t\t\tMsgStr: \"\",\n\t\t}\n\t\tf.InOrder = append(f.InOrder, input)\n\t} else if strings.Contains(v.Comment, \"not-used\") {\n\t\tv.Comment = context \/\/ Update context\n\t} else {\n\t\t\/\/\tfmt.Printf(\"DEBUG ALREADY HAVE %#v\\n\", f.ByID[input])\n\n\t}\n\n\t\/*\n\t\tif escapequotes {\n\t\t\tnewtext = strings.Replace(newtext, `\"`, `\\\"`, -1)\n\t\t\tnewtext = strings.Replace(newtext, `'`, `\\'`, -1)\n\t\t}\n\t*\/\n\n}\n\n\/\/ ApacheAddLanguage Generates the Apache \"AddLanguage\" text\nfunc (f *Files) ApacheAddLanguage() string {\n\tlist := append([]string{\"en_US\"}, f.Languages()...)\n\ttext := \"\"\n\tseen := make(map[string]bool)\n\n\tadd := func(s string) {\n\t\tif seen[s] == false {\n\t\t\ttext = text + s + \"\\n\"\n\t\t\tseen[s] = true\n\t\t}\n\t}\n\n\tfor _, locale := range list {\n\t\tparts := strings.Split(locale, \"_\")\n\t\tif len(parts) > 0 {\n\t\t\tadd(fmt.Sprintf(\"AddLanguage %s .%s\", parts[0], locale))\n\t\t}\n\t\tdashed := strings.Replace(locale, \"_\", \"-\", -1)\n\t\tadd(fmt.Sprintf(\"AddLanguage %s .%s\", dashed, locale))\n\n\t}\n\n\treturn text\n\n}\n<commit_msg>make percenttranslated available to translations<commit_after>package po\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar example = `\n#: faq_whyipv6.html\nmsgid \"Note this is in addition to any NAT you do at home.\"\nmsgstr \"\"\n\n#: faq_whyipv6.html\nmsgid \"Q: So, why worry? NAT will work, right? I use NAT at home today after all..\"\nmsgstr \"\"\n`\n\nvar reWHITESPACE = regexp.MustCompile(`\\s+`)\n\nfunc unquote(s string) (string, error) {\n\tif len(s) == 0 {\n\t\treturn s, nil\n\t}\n\ts = strings.TrimSpace(s)\n\treturn strconv.Unquote(s)\n}\n\n\/\/ Languages returns the list of locales loaded in the combined *Files object\nfunc (combined *Files) Languages() []string {\n\tret := []string{}\n\n\tfor k := range combined.ByLanguage {\n\t\tret = append(ret, k)\n\t}\n\tsort.Strings(ret)\n\treturn ret\n}\n\n\/\/ GetLocale simply returns the locale name; ie en_US or pt_BR\nfunc (f *File) GetLocale() string {\n\ts := f.Locale\n\treturn s\n}\n\n\/\/ GetLang returns the lowercase name string; ie en or pt\nfunc (f *File) GetLang() string {\n\ts := f.Locale\n\tp := strings.Split(s, \"_\")\n\treturn p[0]\n}\n\n\/\/ GetLangUC returns the uppercase name string; ie EN or PT\nfunc (f *File) GetLangUC() string {\n\ts := f.Locale\n\tp := strings.Split(s, \"_\")\n\treturn strings.ToUpper(p[0])\n}\n\n\/\/ GetLang returns the lowercase name string; ie en or pt\nfunc (f *File) GetLangName() string {\n\ts := f.Language\n\ts = Friendly(s)\n\treturn s\n}\n\n\/\/ GetLangPercentTranslated returns what percentage of the translation is done\nfunc (f *File) GetLangPercentTranslated() string {\n\ts := f.PercentTranslated\n\treturn s\n}\n\n\/\/ Translate takes a given input text, and returns back\n\/\/ either the translated text, or the original text again.\nfunc (f *File) Translate(input string, escapequotes bool) string {\n\n\t\/\/ Canonicalize.\n\t\/\/ Remove redundant, leading, and trailing whitespace.\n\tinput = strings.TrimSpace(input)\n\tinput = reWHITESPACE.ReplaceAllString(input, \" \")\n\n\tif input == \"lang\" {\n\t\treturn f.GetLang()\n\t}\n\tif input == \"langUC\" {\n\t\treturn f.GetLangUC()\n\t}\n\tif input == \"locale\" {\n\t\treturn f.GetLocale()\n\t}\n\tif input == \"langname\" {\n\t\treturn f.GetLangName()\n\t}\n\tif input == \"percenttranslated\" {\n\t\treturn f.GetLangPercentTranslated()\n\t}\n\n\tnewtext := input\n\n\tif found, ok := f.ByID[input]; ok {\n\t\tc := found.MsgStr\n\t\tif c != \"\" {\n\t\t\tnewtext = c\n\t\t}\n\t}\n\n\tif escapequotes {\n\t\tnewtext = strings.Replace(newtext, `\"`, `\\\"`, -1)\n\t\tnewtext = strings.Replace(newtext, `'`, `\\'`, -1)\n\t}\n\t\/\/ TODO escapequotes\n\t\/\/ Perl does this:\n\t\/\/ $text =~ s\/(?<![\\\\])\"\/\\\\\"\/g;\n\t\/\/ $text =~ s\/(?<![\\\\])'\/\\\\'\/g;\n\t\/\/ GO does not do look-behind assertions\n\treturn newtext\n}\n\n\/\/ Translate takes a given input text, and returns back\n\/\/ either the translated text, or the original text again.\nfunc (f *File) Add(input string, context string, escapequotes bool) {\n\n\t\/\/\tlog.Printf(\"po file Add(%s)\\n\", input)\n\t\/\/ Canonicalize.\n\t\/\/ Remove redundant, leading, and trailing whitespace.\n\tinput = strings.TrimSpace(input)\n\tinput = reWHITESPACE.ReplaceAllString(input, \" \")\n\n\t\/\/\tlog.Printf(\"po Add input=%s context=%s escape=%v\\n\", input, context, escapequotes)\n\n\t\/\/ Skip these, these will be dynamically responded to.\n\tif input == \"lang\" || input == \"langUC\" || input == \"locale\" {\n\t\treturn\n\t}\n\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\t_ = \"breakpoint\"\n\n\tif v, ok := f.ByID[input]; ok == false {\n\t\t\/\/ Not yet set? Let's do so.\n\t\t\/\/\tfmt.Printf(\"DEBUG Saving Comment=%s MsgID=%s\\n\", context, input)\n\t\tf.ByID[input] = &Record{\n\t\t\tComment: context,\n\t\t\tMsgID: input,\n\t\t\tMsgStr: \"\",\n\t\t}\n\t\tf.InOrder = append(f.InOrder, input)\n\t} else if strings.Contains(v.Comment, \"not-used\") {\n\t\tv.Comment = context \/\/ Update context\n\t} else {\n\t\t\/\/\tfmt.Printf(\"DEBUG ALREADY HAVE %#v\\n\", f.ByID[input])\n\n\t}\n\n\t\/*\n\t\tif escapequotes {\n\t\t\tnewtext = strings.Replace(newtext, `\"`, `\\\"`, -1)\n\t\t\tnewtext = strings.Replace(newtext, `'`, `\\'`, -1)\n\t\t}\n\t*\/\n\n}\n\n\/\/ ApacheAddLanguage Generates the Apache \"AddLanguage\" text\nfunc (f *Files) ApacheAddLanguage() string {\n\tlist := append([]string{\"en_US\"}, f.Languages()...)\n\ttext := \"\"\n\tseen := make(map[string]bool)\n\n\tadd := func(s string) {\n\t\tif seen[s] == false {\n\t\t\ttext = text + s + \"\\n\"\n\t\t\tseen[s] = true\n\t\t}\n\t}\n\n\tfor _, locale := range list {\n\t\tparts := strings.Split(locale, \"_\")\n\t\tif len(parts) > 0 {\n\t\t\tadd(fmt.Sprintf(\"AddLanguage %s .%s\", parts[0], locale))\n\t\t}\n\t\tdashed := strings.Replace(locale, \"_\", \"-\", -1)\n\t\tadd(fmt.Sprintf(\"AddLanguage %s .%s\", dashed, locale))\n\n\t}\n\n\treturn text\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux darwin freebsd\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"github.com\/PagerDuty\/godspeed\"\n\t\"github.com\/zorkian\/go-datadog-api\"\n\t\"os\"\n)\n\n\/\/ StatsdIn sends metrics to Dogstatsd on a `kvexpress in` operation.\nfunc StatsdIn(key string, dataLength int, data string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' stats='in'\", key), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, \"complete\")\n\t\tstatsd.Incr(\"kvexpress.in\", tags)\n\t\tstatsd.Gauge(\"kvexpress.bytes\", float64(dataLength), tags)\n\t\tstatsd.Gauge(\"kvexpress.lines\", float64(LineCount(data)), tags)\n\t}\n}\n\n\/\/ StatsdOut sends metrics to Dogstatsd on a `kvexpress out` operation.\nfunc StatsdOut(key string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' stats='out'\", key), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, \"complete\")\n\t\tstatsd.Incr(\"kvexpress.out\", tags)\n\t}\n}\n\n\/\/ StatsdLocked sends metrics to Dogstatsd on a `kvexpress out` operation\n\/\/ that is blocked by a locked file.\nfunc StatsdLocked(file string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' file='%s' stats='locked'\", file), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(file, \"complete\")\n\t\tstatsd.Incr(\"kvexpress.locked\", tags)\n\t}\n}\n\n\/\/ StatsdLength sends metrics to Dogstatsd on a `kvexpress out` operation\n\/\/ where the file isn't long enough.\nfunc StatsdLength(key string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' stats='not_long_enough'\", key), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, \"not_long_enough\")\n\t\tstatsd.Incr(\"kvexpress.not_long_enough\", tags)\n\t}\n}\n\n\/\/ StatsdChecksum sends metrics to Dogstatsd on a `kvexpress out` operation\n\/\/ where the checksum doesn't match.\nfunc StatsdChecksum(key string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' stats='checksum_mismatch'\", key), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, \"checksum_mismatch\")\n\t\tstatsd.Incr(\"kvexpress.checksum_mismatch\", tags)\n\t}\n}\n\n\/\/ StatsdLock sends metrics to Dogstatsd on a `kvexpress lock` operation.\nfunc StatsdLock(key string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' stats='lock'\", key), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, \"complete\")\n\t\tstatsd.Incr(\"kvexpress.lock\", tags)\n\t}\n}\n\n\/\/ StatsdUnlock sends metrics to Dogstatsd on a `kvexpress unlock` operation.\nfunc StatsdUnlock(key string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' stats='unlock'\", key), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, \"complete\")\n\t\tstatsd.Incr(\"kvexpress.unlock\", tags)\n\t}\n}\n\n\/\/ StatsdRaw sends metrics to Dogstatsd on a `kvexpress raw` operation.\nfunc StatsdRaw(key string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' stats='raw'\", key), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, \"complete\")\n\t\tstatsd.Incr(\"kvexpress.raw\", tags)\n\t}\n}\n\n\/\/ StatsdReconnect sends metrics when we have Consul connection retries.\nfunc StatsdReconnect(times int) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' reconnect='%d'\", times), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := make([]string, 2)\n\t\thostname, _ := os.Hostname()\n\t\thostTag := fmt.Sprintf(\"host:%s\", hostname)\n\t\tdirectionTag := fmt.Sprintf(\"direction:%s\", Direction)\n\t\ttags = append(tags, hostTag)\n\t\ttags = append(tags, directionTag)\n\t\tstatsd.Incr(\"kvexpress.consul_reconnect\", tags)\n\t}\n}\n\n\/\/ StatsdRunTime sends metrics to Dogstatsd on various operations.\nfunc StatsdRunTime(key string, location string, msec int64) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' location='%s' msec='%d'\", key, location, msec), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, location)\n\t\tlocationTag := fmt.Sprintf(\"location:%s\", location)\n\t\ttags = append(tags, locationTag)\n\t\tstatsd.Gauge(\"kvexpress.time\", float64(msec), tags)\n\t}\n}\n\n\/\/ StatsdPanic sends metrics to Dogstatsd when something really bad happens.\n\/\/ It also stops the execution of kvexpress.\nfunc StatsdPanic(key, location string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' location='%s' stats='panic'\", key, location), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, location)\n\t\tstatsd.Incr(\"kvexpress.panic\", tags)\n\t}\n\t\/\/ If we're going to panic, we might as well stop right here.\n\t\/\/ Means we can't connect to Consul, download a URL or\n\t\/\/ write and\/or chown files.\n\tos.Exit(0)\n}\n\n\/\/ DDAPIConnect connects to the Datadog API and returns a client object.\nfunc DDAPIConnect(api, app string) *datadog.Client {\n\tclient := datadog.NewClient(api, app)\n\treturn client\n}\n\n\/\/ makeTags creates some standard tags for use with Dogstatsd and the Datadog API.\nfunc makeTags(key, location string) []string {\n\ttags := make([]string, 4)\n\tkeyTag := fmt.Sprintf(\"key:%s\", key)\n\thostname, _ := os.Hostname()\n\thostTag := fmt.Sprintf(\"host:%s\", hostname)\n\tdirectionTag := fmt.Sprintf(\"direction:%s\", Direction)\n\tlocationTag := fmt.Sprintf(\"location:%s\", location)\n\ttags = append(tags, keyTag)\n\ttags = append(tags, hostTag)\n\ttags = append(tags, directionTag)\n\ttags = append(tags, locationTag)\n\treturn tags\n}\n\n\/\/ TODO: These three functions are ripe for refactoring to be more Golang like.\n\n\/\/ DDStopEvent sends a Datadog event to the API when there's a stop key present.\nfunc DDStopEvent(dd *datadog.Client, key, value string) {\n\tLog(fmt.Sprintf(\"datadog='true' DDStopEvent='true' key='%s'\", key), \"debug\")\n\ttags := makeTags(key, \"stop_key_present\")\n\ttags = append(tags, \"kvexpress:stop\")\n\ttitle := fmt.Sprintf(\"Stop key is present: %s. Stopping.\", key)\n\tevent := datadog.Event{Title: title, Text: value, AlertType: \"error\", Tags: tags}\n\tpost, _ := dd.PostEvent(&event)\n\tif post != nil {\n\n\t}\n}\n\n\/\/ DDLengthEvent sends a Datadog event to the API when the file\/url is too short.\nfunc DDLengthEvent(dd *datadog.Client, key, value string) {\n\tLog(fmt.Sprintf(\"datadog='true' DDLengthEvent='true' key='%s'\", key), \"debug\")\n\ttags := makeTags(key, \"not_long_enough\")\n\ttags = append(tags, \"kvexpress:length\")\n\ttitle := fmt.Sprintf(\"Not long enough: %s. Stopping.\", key)\n\tevent := datadog.Event{Title: title, Text: value, AlertType: \"error\", Tags: tags}\n\tpost, _ := dd.PostEvent(&event)\n\tif post != nil {\n\n\t}\n}\n\n\/\/ DDSaveDataEvent sends a Datadog event to the API when we have updated a Consul key.\nfunc DDSaveDataEvent(dd *datadog.Client, key, value string) {\n\tLog(fmt.Sprintf(\"datadog='true' DDSaveDataEvent='true' key='%s'\", key), \"debug\")\n\ttags := makeTags(key, \"complete\")\n\ttags = append(tags, \"kvexpress:success\")\n\ttitle := fmt.Sprintf(\"Updated: %s\", key)\n\tevent := datadog.Event{Title: title, Text: value, AlertType: \"info\", Tags: tags}\n\tpost, _ := dd.PostEvent(&event)\n\tif post != nil {\n\n\t}\n}\n\n\/\/ DDCopyDataEvent sends a Datadog event to the API when we have used `kvexpress copy`\n\/\/ to copy a Consul key.\nfunc DDCopyDataEvent(dd *datadog.Client, keyFrom, keyTo string) {\n\tLog(fmt.Sprintf(\"datadog='true' DDCopyDataEvent='true' keyFrom='%s' keyTo='%s'\", keyFrom, keyTo), \"debug\")\n\ttags := makeTags(keyTo, \"complete\")\n\ttags = append(tags, \"kvexpress:success\")\n\ttags = append(tags, fmt.Sprintf(\"keyFrom:%s\", keyFrom))\n\ttitle := fmt.Sprintf(\"Copy: %s to %s\", keyFrom, keyTo)\n\tevent := datadog.Event{Title: title, Text: title, AlertType: \"info\", Tags: tags}\n\tpost, _ := dd.PostEvent(&event)\n\tif post != nil {\n\n\t}\n}\n\n\/\/ DDSaveStopEvent sends a Datadog event when we have added a stop key to Consul.\nfunc DDSaveStopEvent(dd *datadog.Client, key, value string) {\n\tLog(fmt.Sprintf(\"datadog='true' DDSaveStopEvent='true' key='%s'\", key), \"debug\")\n\ttags := makeTags(key, \"stop_key_save\")\n\ttags = append(tags, \"kvexpress:stop_set\")\n\ttitle := fmt.Sprintf(\"Set Stop Key: %s\", key)\n\tevent := datadog.Event{Title: title, Text: value, AlertType: \"warning\", Tags: tags}\n\tpost, _ := dd.PostEvent(&event)\n\tif post != nil {\n\n\t}\n}\n<commit_msg>Decompress and convert string if it's compressed to make sure the line stats are correct. This should close #91.<commit_after>\/\/ +build linux darwin freebsd\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"github.com\/PagerDuty\/godspeed\"\n\t\"github.com\/zorkian\/go-datadog-api\"\n\t\"os\"\n)\n\n\/\/ StatsdIn sends metrics to Dogstatsd on a `kvexpress in` operation.\nfunc StatsdIn(key string, dataLength int, data string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' stats='in'\", key), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, \"complete\")\n\t\tstatsd.Incr(\"kvexpress.in\", tags)\n\t\tstatsd.Gauge(\"kvexpress.bytes\", float64(dataLength), tags)\n\t\t\/\/ If the data is compressed - then LineCount will always return 1.\n\t\t\/\/ That's not useful or accurate, so let's decompress and count that.\n\t\tif Compress {\n\t\t\tdata = DecompressData(data)\n\t\t}\n\t\tstatsd.Gauge(\"kvexpress.lines\", float64(LineCount(data)), tags)\n\t}\n}\n\n\/\/ StatsdOut sends metrics to Dogstatsd on a `kvexpress out` operation.\nfunc StatsdOut(key string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' stats='out'\", key), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, \"complete\")\n\t\tstatsd.Incr(\"kvexpress.out\", tags)\n\t}\n}\n\n\/\/ StatsdLocked sends metrics to Dogstatsd on a `kvexpress out` operation\n\/\/ that is blocked by a locked file.\nfunc StatsdLocked(file string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' file='%s' stats='locked'\", file), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(file, \"complete\")\n\t\tstatsd.Incr(\"kvexpress.locked\", tags)\n\t}\n}\n\n\/\/ StatsdLength sends metrics to Dogstatsd on a `kvexpress out` operation\n\/\/ where the file isn't long enough.\nfunc StatsdLength(key string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' stats='not_long_enough'\", key), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, \"not_long_enough\")\n\t\tstatsd.Incr(\"kvexpress.not_long_enough\", tags)\n\t}\n}\n\n\/\/ StatsdChecksum sends metrics to Dogstatsd on a `kvexpress out` operation\n\/\/ where the checksum doesn't match.\nfunc StatsdChecksum(key string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' stats='checksum_mismatch'\", key), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, \"checksum_mismatch\")\n\t\tstatsd.Incr(\"kvexpress.checksum_mismatch\", tags)\n\t}\n}\n\n\/\/ StatsdLock sends metrics to Dogstatsd on a `kvexpress lock` operation.\nfunc StatsdLock(key string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' stats='lock'\", key), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, \"complete\")\n\t\tstatsd.Incr(\"kvexpress.lock\", tags)\n\t}\n}\n\n\/\/ StatsdUnlock sends metrics to Dogstatsd on a `kvexpress unlock` operation.\nfunc StatsdUnlock(key string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' stats='unlock'\", key), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, \"complete\")\n\t\tstatsd.Incr(\"kvexpress.unlock\", tags)\n\t}\n}\n\n\/\/ StatsdRaw sends metrics to Dogstatsd on a `kvexpress raw` operation.\nfunc StatsdRaw(key string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' stats='raw'\", key), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, \"complete\")\n\t\tstatsd.Incr(\"kvexpress.raw\", tags)\n\t}\n}\n\n\/\/ StatsdReconnect sends metrics when we have Consul connection retries.\nfunc StatsdReconnect(times int) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' reconnect='%d'\", times), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := make([]string, 2)\n\t\thostname, _ := os.Hostname()\n\t\thostTag := fmt.Sprintf(\"host:%s\", hostname)\n\t\tdirectionTag := fmt.Sprintf(\"direction:%s\", Direction)\n\t\ttags = append(tags, hostTag)\n\t\ttags = append(tags, directionTag)\n\t\tstatsd.Incr(\"kvexpress.consul_reconnect\", tags)\n\t}\n}\n\n\/\/ StatsdRunTime sends metrics to Dogstatsd on various operations.\nfunc StatsdRunTime(key string, location string, msec int64) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' location='%s' msec='%d'\", key, location, msec), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, location)\n\t\tlocationTag := fmt.Sprintf(\"location:%s\", location)\n\t\ttags = append(tags, locationTag)\n\t\tstatsd.Gauge(\"kvexpress.time\", float64(msec), tags)\n\t}\n}\n\n\/\/ StatsdPanic sends metrics to Dogstatsd when something really bad happens.\n\/\/ It also stops the execution of kvexpress.\nfunc StatsdPanic(key, location string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"dogstatsd='true' key='%s' location='%s' stats='panic'\", key, location), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(key, location)\n\t\tstatsd.Incr(\"kvexpress.panic\", tags)\n\t}\n\t\/\/ If we're going to panic, we might as well stop right here.\n\t\/\/ Means we can't connect to Consul, download a URL or\n\t\/\/ write and\/or chown files.\n\tos.Exit(0)\n}\n\n\/\/ DDAPIConnect connects to the Datadog API and returns a client object.\nfunc DDAPIConnect(api, app string) *datadog.Client {\n\tclient := datadog.NewClient(api, app)\n\treturn client\n}\n\n\/\/ makeTags creates some standard tags for use with Dogstatsd and the Datadog API.\nfunc makeTags(key, location string) []string {\n\ttags := make([]string, 4)\n\tkeyTag := fmt.Sprintf(\"key:%s\", key)\n\thostname, _ := os.Hostname()\n\thostTag := fmt.Sprintf(\"host:%s\", hostname)\n\tdirectionTag := fmt.Sprintf(\"direction:%s\", Direction)\n\tlocationTag := fmt.Sprintf(\"location:%s\", location)\n\ttags = append(tags, keyTag)\n\ttags = append(tags, hostTag)\n\ttags = append(tags, directionTag)\n\ttags = append(tags, locationTag)\n\treturn tags\n}\n\n\/\/ TODO: These three functions are ripe for refactoring to be more Golang like.\n\n\/\/ DDStopEvent sends a Datadog event to the API when there's a stop key present.\nfunc DDStopEvent(dd *datadog.Client, key, value string) {\n\tLog(fmt.Sprintf(\"datadog='true' DDStopEvent='true' key='%s'\", key), \"debug\")\n\ttags := makeTags(key, \"stop_key_present\")\n\ttags = append(tags, \"kvexpress:stop\")\n\ttitle := fmt.Sprintf(\"Stop key is present: %s. Stopping.\", key)\n\tevent := datadog.Event{Title: title, Text: value, AlertType: \"error\", Tags: tags}\n\tpost, _ := dd.PostEvent(&event)\n\tif post != nil {\n\n\t}\n}\n\n\/\/ DDLengthEvent sends a Datadog event to the API when the file\/url is too short.\nfunc DDLengthEvent(dd *datadog.Client, key, value string) {\n\tLog(fmt.Sprintf(\"datadog='true' DDLengthEvent='true' key='%s'\", key), \"debug\")\n\ttags := makeTags(key, \"not_long_enough\")\n\ttags = append(tags, \"kvexpress:length\")\n\ttitle := fmt.Sprintf(\"Not long enough: %s. Stopping.\", key)\n\tevent := datadog.Event{Title: title, Text: value, AlertType: \"error\", Tags: tags}\n\tpost, _ := dd.PostEvent(&event)\n\tif post != nil {\n\n\t}\n}\n\n\/\/ DDSaveDataEvent sends a Datadog event to the API when we have updated a Consul key.\nfunc DDSaveDataEvent(dd *datadog.Client, key, value string) {\n\tLog(fmt.Sprintf(\"datadog='true' DDSaveDataEvent='true' key='%s'\", key), \"debug\")\n\ttags := makeTags(key, \"complete\")\n\ttags = append(tags, \"kvexpress:success\")\n\ttitle := fmt.Sprintf(\"Updated: %s\", key)\n\tevent := datadog.Event{Title: title, Text: value, AlertType: \"info\", Tags: tags}\n\tpost, _ := dd.PostEvent(&event)\n\tif post != nil {\n\n\t}\n}\n\n\/\/ DDCopyDataEvent sends a Datadog event to the API when we have used `kvexpress copy`\n\/\/ to copy a Consul key.\nfunc DDCopyDataEvent(dd *datadog.Client, keyFrom, keyTo string) {\n\tLog(fmt.Sprintf(\"datadog='true' DDCopyDataEvent='true' keyFrom='%s' keyTo='%s'\", keyFrom, keyTo), \"debug\")\n\ttags := makeTags(keyTo, \"complete\")\n\ttags = append(tags, \"kvexpress:success\")\n\ttags = append(tags, fmt.Sprintf(\"keyFrom:%s\", keyFrom))\n\ttitle := fmt.Sprintf(\"Copy: %s to %s\", keyFrom, keyTo)\n\tevent := datadog.Event{Title: title, Text: title, AlertType: \"info\", Tags: tags}\n\tpost, _ := dd.PostEvent(&event)\n\tif post != nil {\n\n\t}\n}\n\n\/\/ DDSaveStopEvent sends a Datadog event when we have added a stop key to Consul.\nfunc DDSaveStopEvent(dd *datadog.Client, key, value string) {\n\tLog(fmt.Sprintf(\"datadog='true' DDSaveStopEvent='true' key='%s'\", key), \"debug\")\n\ttags := makeTags(key, \"stop_key_save\")\n\ttags = append(tags, \"kvexpress:stop_set\")\n\ttitle := fmt.Sprintf(\"Set Stop Key: %s\", key)\n\tevent := datadog.Event{Title: title, Text: value, AlertType: \"warning\", Tags: tags}\n\tpost, _ := dd.PostEvent(&event)\n\tif post != nil {\n\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"text\/template\"\n\n\t\"runtime\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\tforce = false\n\tclobber = false\n\tcopy = false\n)\n\nfunc init() {\n\tinstallCmd.Flags().BoolVarP(&force, \"force\", \"f\", false, \"Allow installation to existing directory.\")\n\tinstallCmd.Flags().BoolVarP(&clobber, \"clobber\", \"c\", false, \"Overwrite existing files in target destination.\")\n\tinstallCmd.Flags().BoolVar(©, \"copy\", false, \"Copy this file to the target destination as well. [Currently unimplemented]\")\n\tinstallCmd.Flags().StringSliceP(\"skip\", \"s\", []string{}, \"Commands to skip during installation.\")\n\n\tRootCmd.AddCommand(installCmd)\n}\n\n\/\/ Commands to not create shims for\nvar skipCommands = []string{\n\t\"help\",\n\t\"install\",\n\t\"version\",\n}\n\nvar installCmd = &cobra.Command{\n\tUse: \"install [flags] targetdirectory\",\n\tShort: \"Install cmdo and all subcommand shims.\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) == 0 {\n\t\t\tcmd.Usage()\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Installation target\n\t\ttarget, err := filepath.Abs(args[0])\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Unable to determine absolute path for:\", args[0])\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\t\/\/ Should never fail, but better safe than sorry\n\t\tfi, err := os.Stat(target)\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\tfmt.Println(\"Unable to access target path:\", err)\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\t\/\/ Supplied a file as target? Shame\n\t\tif fi != nil && fi.IsDir() == false {\n\t\t\tfmt.Println(\"Target is a file. Try again.\")\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\tif fi != nil && fi.IsDir() && force == false {\n\t\t\tfmt.Println(\"Target directory exists. Use --force if you want to install to existing directory.\")\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\tif toSkip, err := cmd.Flags().GetStringSlice(\"skip\"); err == nil {\n\t\t\tskipCommands = append(skipCommands, toSkip...)\n\t\t}\n\n\t\t\/\/ Create target directory if it does not exist\n\t\tif fi == nil {\n\t\t\tif err := os.MkdirAll(target, 0755); err != nil {\n\t\t\t\tfmt.Println(\"Unable to create target directory:\", err)\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Loop through all non-hidden, non-skipped commands and create shims\n\t\tfmt.Println(\"Installing shims.\")\n\t\tfor _, cmd := range RootCmd.Commands() {\n\t\t\tname := cmd.Name()\n\t\t\t\/\/ Make sure we shouldn't skip this command\n\t\t\tif stringSliceContains(skipCommands, name) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfmt.Printf(\" Creating shim for '%s'...\\n\", cmd.Name())\n\t\t\tif err := createShim(target, cmd.Name()); err != nil {\n\t\t\t\tfmt.Printf(\"Error creating shim for %s: %s\\n\", cmd.Name(), err.Error())\n\t\t\t}\n\t\t}\n\n\t\t\/\/ All done!\n\t\tfmt.Println(\"All done! Shims should be in:\", target)\n\t},\n}\n\nfunc stringSliceContains(slice []string, search string) bool {\n\tfor _, v := range slice {\n\t\tif v == search {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ TODO Implement target exists && clobber == false\nfunc targetExists(target string) (bool, error) {\n\t_, err := os.Stat(target)\n\tif err == nil {\n\t\treturn false, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn true, err\n}\n\nfunc createShim(dir, command string) error {\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\treturn winShim(dir, command)\n\tdefault:\n\t\treturn shellShim(dir, command)\n\t}\n}\n\nfunc winShim(dir, command string) error {\n\ttarget := filepath.Join(dir, command+\".cmd\")\n\tif fi, err := os.Stat(target); err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t} else if fi != nil && !clobber {\n\t\tfmt.Printf(\" Skipping shim for '%s', target exists and clobber == false.\\n\", command)\n\t\treturn nil\n\t}\n\n\tf, err := os.Create(target)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tcmdo, _ := filepath.Abs(os.Args[0])\n\tcmd := fmt.Sprintf(`@\"%s\" %s %%*`, cmdo, command)\n\tif _, err := f.WriteString(cmd); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc shellShim(dir, command string) error {\n\ttarget := filepath.Join(dir, command)\n\tif _, err := os.Stat(target); err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\n\tf, err := os.Create(target)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\ttmpl := template.New(\"\")\n\ttmpl, err = tmpl.Parse(shellShimTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmdo, _ := filepath.Abs(os.Args[0])\n\tif err := tmpl.Execute(f, map[string]string{\"CmdoPath\": cmdo, \"Command\": command}); err != nil {\n\t\t\/\/ TODO Remove the created file\n\t\treturn err\n\t}\n\n\tif err := os.Chmod(target, 0755); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nvar shellShimTemplate = `#!\/bin\/sh\n\ncmdo=\"{{.CmdoPath}}\"\n\n# TODO: Test in msys\/mingw\/etc.\ncase $(uname) in\n *CYGWIN*) cmdo=$(cygpath -w \"${cmdo}\");;\nesac\n\nif [ -x \"${cmdo}\" ]; then\n \"${cmdo}\" \"{{.Command}}\" \"$@\"\n ret=$?\n exit $ret\nelse\n echo \"Unable to find base executable: ${cmdo}\"\n exit 1\nfi\n`\n<commit_msg>Switching to osext -- os.Args[0] wasn't cutting it<commit_after>package commands\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"text\/template\"\n\n\t\"runtime\"\n\n\t\"github.com\/kardianos\/osext\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\tforce = false\n\tclobber = false\n\tcopy = false\n)\n\nfunc init() {\n\tinstallCmd.Flags().BoolVarP(&force, \"force\", \"f\", false, \"Allow installation to existing directory.\")\n\tinstallCmd.Flags().BoolVarP(&clobber, \"clobber\", \"c\", false, \"Overwrite existing files in target destination.\")\n\tinstallCmd.Flags().BoolVar(©, \"copy\", false, \"Copy this file to the target destination as well. [Currently unimplemented]\")\n\tinstallCmd.Flags().StringSliceP(\"skip\", \"s\", []string{}, \"Commands to skip during installation.\")\n\n\tRootCmd.AddCommand(installCmd)\n}\n\n\/\/ Commands to not create shims for\nvar skipCommands = []string{\n\t\"help\",\n\t\"install\",\n\t\"version\",\n}\n\nvar installCmd = &cobra.Command{\n\tUse: \"install [flags] targetdirectory\",\n\tShort: \"Install cmdo and all subcommand shims.\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) == 0 {\n\t\t\tcmd.Usage()\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\t\/\/ Installation target\n\t\ttarget, err := filepath.Abs(args[0])\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Unable to determine absolute path for:\", args[0])\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\t\/\/ Should never fail, but better safe than sorry\n\t\tfi, err := os.Stat(target)\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\tfmt.Println(\"Unable to access target path:\", err)\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\t\/\/ Supplied a file as target? Shame\n\t\tif fi != nil && fi.IsDir() == false {\n\t\t\tfmt.Println(\"Target is a file. Try again.\")\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\tif fi != nil && fi.IsDir() && force == false {\n\t\t\tfmt.Println(\"Target directory exists. Use --force if you want to install to existing directory.\")\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\tif toSkip, err := cmd.Flags().GetStringSlice(\"skip\"); err == nil {\n\t\t\tskipCommands = append(skipCommands, toSkip...)\n\t\t}\n\n\t\t\/\/ Create target directory if it does not exist\n\t\tif fi == nil {\n\t\t\tif err := os.MkdirAll(target, 0755); err != nil {\n\t\t\t\tfmt.Println(\"Unable to create target directory:\", err)\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Loop through all non-hidden, non-skipped commands and create shims\n\t\tfmt.Println(\"Installing shims.\")\n\t\tfor _, cmd := range RootCmd.Commands() {\n\t\t\tname := cmd.Name()\n\t\t\t\/\/ Make sure we shouldn't skip this command\n\t\t\tif stringSliceContains(skipCommands, name) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfmt.Printf(\" Creating shim for '%s'...\\n\", cmd.Name())\n\t\t\tif err := createShim(target, cmd.Name()); err != nil {\n\t\t\t\tfmt.Printf(\"Error creating shim for %s: %s\\n\", cmd.Name(), err.Error())\n\t\t\t}\n\t\t}\n\n\t\t\/\/ All done!\n\t\tfmt.Println(\"All done! Shims should be in:\", target)\n\t},\n}\n\nfunc stringSliceContains(slice []string, search string) bool {\n\tfor _, v := range slice {\n\t\tif v == search {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ TODO Implement target exists && clobber == false\nfunc targetExists(target string) (bool, error) {\n\t_, err := os.Stat(target)\n\tif err == nil {\n\t\treturn false, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn true, err\n}\n\nfunc createShim(dir, command string) error {\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\treturn winShim(dir, command)\n\tdefault:\n\t\treturn shellShim(dir, command)\n\t}\n}\n\nfunc winShim(dir, command string) error {\n\ttarget := filepath.Join(dir, command+\".cmd\")\n\tif fi, err := os.Stat(target); err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t} else if fi != nil && !clobber {\n\t\tfmt.Printf(\" Skipping shim for '%s', target exists and clobber == false.\\n\", command)\n\t\treturn nil\n\t}\n\n\tf, err := os.Create(target)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tcmdo, _ := filepath.Abs(os.Args[0])\n\tcmd := fmt.Sprintf(`@\"%s\" %s %%*`, cmdo, command)\n\tif _, err := f.WriteString(cmd); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc shellShim(dir, command string) error {\n\ttarget := filepath.Join(dir, command)\n\tif _, err := os.Stat(target); err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\n\tf, err := os.Create(target)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\ttmpl := template.New(\"\")\n\ttmpl, err = tmpl.Parse(shellShimTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmdo, _ := osext.Executable()\n\tif err := tmpl.Execute(f, map[string]string{\"CmdoPath\": cmdo, \"Command\": command}); err != nil {\n\t\t\/\/ TODO Remove the created file\n\t\treturn err\n\t}\n\n\tif err := os.Chmod(target, 0755); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nvar shellShimTemplate = `#!\/bin\/sh\n\ncmdo=\"{{.CmdoPath}}\"\n\n# TODO: Test in msys\/mingw\/etc.\ncase $(uname) in\n *CYGWIN*) cmdo=$(cygpath -w \"${cmdo}\");;\nesac\n\nif [ -x \"${cmdo}\" ]; then\n \"${cmdo}\" \"{{.Command}}\" \"$@\"\n ret=$?\n exit $ret\nelse\n echo \"Unable to find base executable: ${cmdo}\"\n exit 1\nfi\n`\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\n\/\/ TODO: Check CPU performance governor before each benchmark.\n\n\/\/ TODO: Mode to prioritize commits around big performance changes.\n\n\/\/ TODO: Support running pre-built binaries without specific hashes.\n\/\/ This is useful for testing things that aren't yet committed or that\n\/\/ require unusual build steps.\n\nvar run struct {\n\torder string\n\ttopLevel string\n\tbenchFlags string\n\titerations int\n\tgoverSave bool\n}\n\nvar cmdRunFlags = flag.NewFlagSet(os.Args[0]+\" run\", flag.ExitOnError)\n\nfunc init() {\n\tf := cmdRunFlags\n\tf.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s run [flags] <revision range>\\n\", os.Args[0])\n\t\tf.PrintDefaults()\n\t}\n\tf.StringVar(&run.order, \"order\", \"seq\", \"run benchmarks in `order`, which must be one of: seq, spread\")\n\tf.StringVar(&gitDir, \"C\", \"\", \"run git in `dir`\")\n\tf.StringVar(&run.benchFlags, \"benchflags\", \"\", \"pass `flags` to benchmark\")\n\tf.IntVar(&run.iterations, \"n\", 5, \"run each benchmark `N` times\")\n\tf.StringVar(&outDir, \"o\", \"\", \"write binaries and logs to `directory`\")\n\tf.BoolVar(&run.goverSave, \"gover-save\", false, \"save toolchain builds with gover\")\n\tf.BoolVar(&dryRun, \"dry-run\", false, \"print commands but do not run them\")\n\tregisterSubcommand(\"run\", \"[flags] <revision range> - run benchmarks\", cmdRun, f)\n}\n\nfunc cmdRun() {\n\tif cmdRunFlags.NArg() < 1 {\n\t\tcmdRunFlags.Usage()\n\t\tos.Exit(2)\n\t}\n\n\tvar pickCommit func([]*commitInfo) *commitInfo\n\tswitch run.order {\n\tcase \"seq\":\n\t\tpickCommit = pickCommitSeq\n\tcase \"spread\":\n\t\tpickCommit = pickCommitSpread\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"unknown order: %s\\n\", run.order)\n\t\tcmdRunFlags.Usage()\n\t\tos.Exit(2)\n\t}\n\n\tcommits := getCommits(cmdRunFlags.Args())\n\n\t\/\/ Get other git information.\n\trun.topLevel = trimNL(git(\"rev-parse\", \"--show-toplevel\"))\n\n\tstatus := NewStatusReporter()\n\tdefer status.Stop()\n\n\tfor {\n\t\tdoneIters, totalIters, partialCommits, doneCommits, failedCommits := runStats(commits)\n\t\tunstartedCommits := len(commits) - (partialCommits + doneCommits + failedCommits)\n\t\tmsg := fmt.Sprintf(\"%d\/%d runs, %d unstarted+%d partial+%d done+%d failed commits\", doneIters, totalIters, unstartedCommits, partialCommits, doneCommits, failedCommits)\n\t\t\/\/ TODO: Count builds and runs separately.\n\t\tstatus.Progress(msg, float64(doneIters)\/float64(totalIters))\n\n\t\tcommit := pickCommit(commits)\n\t\tif commit == nil {\n\t\t\tbreak\n\t\t}\n\t\trunBenchmark(commit, status)\n\t}\n}\n\nfunc runStats(commits []*commitInfo) (doneIters, totalIters, partialCommits, doneCommits, failedCommits int) {\n\tfor _, c := range commits {\n\t\tif c.count >= run.iterations {\n\t\t\t\/\/ Don't care if it failed.\n\t\t\tdoneIters += c.count\n\t\t\ttotalIters += c.count\n\t\t} else if c.runnable() {\n\t\t\tdoneIters += c.count\n\t\t\ttotalIters += run.iterations\n\t\t}\n\n\t\tif c.count == run.iterations {\n\t\t\tdoneCommits++\n\t\t} else if c.runnable() {\n\t\t\tif c.count != 0 {\n\t\t\t\tpartialCommits++\n\t\t\t}\n\t\t} else {\n\t\t\tfailedCommits++\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ pickCommitSeq picks the next commit to run based on the most recent\n\/\/ commit with the fewest iterations.\nfunc pickCommitSeq(commits []*commitInfo) *commitInfo {\n\tvar minCommit *commitInfo\n\tfor _, commit := range commits {\n\t\tif !commit.runnable() {\n\t\t\tcontinue\n\t\t}\n\t\tif minCommit == nil || commit.count < minCommit.count {\n\t\t\tminCommit = commit\n\t\t}\n\t}\n\treturn minCommit\n}\n\n\/\/ pickCommitSpread picks the next commit to run from commits using an\n\/\/ algorithm that spreads out the runs.\nfunc pickCommitSpread(commits []*commitInfo) *commitInfo {\n\t\/\/ Assign weights to each commit. This is thoroughly\n\t\/\/ heuristic, but it's geared toward either increasing the\n\t\/\/ iteration count of commits that we have, or picking a new\n\t\/\/ commit so as to spread out the commits we have.\n\tweights := make([]int, len(commits))\n\ttotalWeight := 0\n\n\tnPartial := 0\n\tfor _, commit := range commits {\n\t\tif commit.partial() {\n\t\t\tnPartial++\n\t\t}\n\t}\n\tif nPartial >= len(commits)\/10 {\n\t\t\/\/ Limit the number of partially completed revisions\n\t\t\/\/ to 10% by only choosing a partial commit in this\n\t\t\/\/ case.\n\t\tfor i, commit := range commits {\n\t\t\tif commit.partial() {\n\t\t\t\t\/\/ Bias toward commits that are\n\t\t\t\t\/\/ further from done.\n\t\t\t\tweights[i] = run.iterations - commit.count\n\t\t\t}\n\t\t}\n\t} else {\n\t\t\/\/ Pick a new commit weighted by its distance from a\n\t\t\/\/ commit that we already have.\n\n\t\t\/\/ Find distance from left to right.\n\t\tdistance := len(commits)\n\t\thaveAny := false\n\t\tfor i, commit := range commits {\n\t\t\tif commit.count > 0 {\n\t\t\t\tdistance = 1\n\t\t\t\thaveAny = true\n\t\t\t} else if commit.runnable() {\n\t\t\t\tdistance++\n\t\t\t}\n\t\t\tweights[i] = distance\n\t\t}\n\n\t\t\/\/ Find distance from right to left.\n\t\tdistance = len(commits)\n\t\tfor i := len(commits) - 1; i >= 0; i-- {\n\t\t\tcommit := commits[i]\n\t\t\tif commit.count > 0 {\n\t\t\t\tdistance = 1\n\t\t\t} else if commit.runnable() {\n\t\t\t\tdistance++\n\t\t\t}\n\n\t\t\tif distance < weights[i] {\n\t\t\t\tweights[i] = distance\n\t\t\t}\n\t\t}\n\n\t\tif !haveAny {\n\t\t\t\/\/ We don't have any commits. Pick one uniformly.\n\t\t\tfor i := range commits {\n\t\t\t\tweights[i] = 1\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Zero non-runnable commits.\n\t\tfor i, commit := range commits {\n\t\t\tif !commit.runnable() {\n\t\t\t\tweights[i] = 0\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, w := range weights {\n\t\ttotalWeight += w\n\t}\n\tif totalWeight == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ Pick a commit based on the weights.\n\tx := rand.Intn(totalWeight)\n\tcumulative := 0\n\tfor i, w := range weights {\n\t\tcumulative += w\n\t\tif cumulative > x {\n\t\t\treturn commits[i]\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}\n\n\/\/ runBenchmark runs the benchmark at commit. It updates commit.count,\n\/\/ commit.fails, and commit.buildFailed as appropriate and writes to\n\/\/ the commit log to record the outcome.\nfunc runBenchmark(commit *commitInfo, status *StatusReporter) {\n\tisXBenchmark := false\n\tif abs, _ := os.Getwd(); strings.Contains(abs, \"golang.org\/x\/benchmarks\/\") {\n\t\tisXBenchmark = true\n\t}\n\n\t\/\/ Build the benchmark if necessary.\n\tif !exists(commit.binPath) {\n\t\trunStatus(status, commit, \"building\")\n\n\t\t\/\/ Check out the appropriate commit. This is necessary\n\t\t\/\/ even if we're using gover because the benchmark\n\t\t\/\/ itself might have changed (e.g., bug fixes).\n\t\tgit(\"checkout\", \"-q\", commit.hash)\n\n\t\tvar buildCmd []string\n\t\tif commit.gover {\n\t\t\t\/\/ TODO: It would be better if gover took a\n\t\t\t\/\/ long hash and did the right thing.\n\t\t\tbuildCmd = []string{\"gover\", \"run\", commit.hash[:7], \"go\"}\n\t\t} else {\n\t\t\t\/\/ If this is the Go toolchain, do a full\n\t\t\t\/\/ make.bash. Otherwise, we assume that go\n\t\t\t\/\/ test -c will build the necessary\n\t\t\t\/\/ dependencies.\n\t\t\tif exists(filepath.Join(run.topLevel, \"src\", \"make.bash\")) {\n\t\t\t\tcmd := exec.Command(\".\/make.bash\")\n\t\t\t\tcmd.Dir = filepath.Join(run.topLevel, \"src\")\n\t\t\t\tif dryRun {\n\t\t\t\t\tdryPrint(cmd)\n\t\t\t\t} else if out, err := cmd.CombinedOutput(); err != nil {\n\t\t\t\t\tdetail := indent(string(out)) + indent(err.Error())\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"failed to build toolchain at %s:\\n%s\", commit.hash, detail)\n\t\t\t\t\tcommit.buildFailed = true\n\t\t\t\t\tcommit.writeLog([]byte(\"BUILD FAILED:\\n\" + detail))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif run.goverSave && doGoverSave() == nil {\n\t\t\t\t\tcommit.gover = true\n\t\t\t\t}\n\t\t\t\tbuildCmd = []string{filepath.Join(run.topLevel, \"bin\", \"go\")}\n\t\t\t} else {\n\t\t\t\t\/\/ Assume go is in $PATH.\n\t\t\t\tbuildCmd = []string{\"go\"}\n\t\t\t}\n\t\t}\n\n\t\tif isXBenchmark {\n\t\t\tbuildCmd = append(buildCmd, \"build\", \"-o\", commit.binPath)\n\t\t} else {\n\t\t\tbuildCmd = append(buildCmd, \"test\", \"-c\", \"-o\", commit.binPath)\n\t\t}\n\t\tcmd := exec.Command(buildCmd[0], buildCmd[1:]...)\n\t\tif dryRun {\n\t\t\tdryPrint(cmd)\n\t\t} else if out, err := cmd.CombinedOutput(); err != nil {\n\t\t\tdetail := indent(string(out)) + indent(err.Error())\n\t\t\tfmt.Fprintf(os.Stderr, \"failed to build tests at %s:\\n%s\", commit.hash, detail)\n\t\t\tcommit.buildFailed = true\n\t\t\tcommit.writeLog([]byte(\"BUILD FAILED:\\n\" + detail))\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Run the benchmark.\n\trunStatus(status, commit, \"running\")\n\tvar args []string\n\tif isXBenchmark {\n\t\targs = []string{}\n\t} else {\n\t\targs = []string{\"-test.run\", \"NONE\", \"-test.bench\", \".\"}\n\t}\n\targs = append(args, strings.Fields(run.benchFlags)...)\n\tname := commit.binPath\n\tif filepath.Base(name) == name {\n\t\t\/\/ Make exec.Command treat this as a relative path.\n\t\tname = \".\/\" + name\n\t}\n\tcmd := exec.Command(name, args...)\n\tif dryRun {\n\t\tdryPrint(cmd)\n\t\tcommit.count++\n\t\treturn\n\t}\n\tout, err := cmd.CombinedOutput()\n\tif err == nil {\n\t\tcommit.count++\n\t} else {\n\t\tfmt.Fprintf(os.Stderr, \"failed to run benchmark at %s:\\n%s\", commit.hash, out)\n\t\tcommit.fails++\n\t\t\/\/ Indent the output so we don't get confused by\n\t\t\/\/ benchmarks in it or \"PASS\" lines.\n\t\tout = []byte(\"FAILED:\\n\" + indent(string(out)) + indent(err.Error()))\n\t}\n\n\t\/\/ Write the benchmark output.\n\tcommit.writeLog(out)\n}\n\nfunc doGoverSave() error {\n\tcmd := exec.Command(\"gover\", \"save\")\n\tcmd.Env = append([]string{\"GOROOT=\" + run.topLevel}, os.Environ()...)\n\tif dryRun {\n\t\tdryPrint(cmd)\n\t\treturn nil\n\t} else {\n\t\tout, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"gover save failed: %s:\\n%s\", err, indent(string(out)))\n\t\t}\n\t\treturn err\n\t}\n}\n\n\/\/ runStatus updates the status message for commit.\nfunc runStatus(sr *StatusReporter, commit *commitInfo, status string) {\n\tsr.Message(fmt.Sprintf(\"commit %s, iteration %d\/%d: %s...\", commit.hash[:7], commit.count+1, run.iterations, status))\n}\n<commit_msg>benchmany: fix gover save<commit_after>\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\n\/\/ TODO: Check CPU performance governor before each benchmark.\n\n\/\/ TODO: Mode to prioritize commits around big performance changes.\n\n\/\/ TODO: Support running pre-built binaries without specific hashes.\n\/\/ This is useful for testing things that aren't yet committed or that\n\/\/ require unusual build steps.\n\nvar run struct {\n\torder string\n\ttopLevel string\n\tbenchFlags string\n\titerations int\n\tgoverSave bool\n}\n\nvar cmdRunFlags = flag.NewFlagSet(os.Args[0]+\" run\", flag.ExitOnError)\n\nfunc init() {\n\tf := cmdRunFlags\n\tf.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s run [flags] <revision range>\\n\", os.Args[0])\n\t\tf.PrintDefaults()\n\t}\n\tf.StringVar(&run.order, \"order\", \"seq\", \"run benchmarks in `order`, which must be one of: seq, spread\")\n\tf.StringVar(&gitDir, \"C\", \"\", \"run git in `dir`\")\n\tf.StringVar(&run.benchFlags, \"benchflags\", \"\", \"pass `flags` to benchmark\")\n\tf.IntVar(&run.iterations, \"n\", 5, \"run each benchmark `N` times\")\n\tf.StringVar(&outDir, \"o\", \"\", \"write binaries and logs to `directory`\")\n\tf.BoolVar(&run.goverSave, \"gover-save\", false, \"save toolchain builds with gover\")\n\tf.BoolVar(&dryRun, \"dry-run\", false, \"print commands but do not run them\")\n\tregisterSubcommand(\"run\", \"[flags] <revision range> - run benchmarks\", cmdRun, f)\n}\n\nfunc cmdRun() {\n\tif cmdRunFlags.NArg() < 1 {\n\t\tcmdRunFlags.Usage()\n\t\tos.Exit(2)\n\t}\n\n\tvar pickCommit func([]*commitInfo) *commitInfo\n\tswitch run.order {\n\tcase \"seq\":\n\t\tpickCommit = pickCommitSeq\n\tcase \"spread\":\n\t\tpickCommit = pickCommitSpread\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"unknown order: %s\\n\", run.order)\n\t\tcmdRunFlags.Usage()\n\t\tos.Exit(2)\n\t}\n\n\tcommits := getCommits(cmdRunFlags.Args())\n\n\t\/\/ Get other git information.\n\trun.topLevel = trimNL(git(\"rev-parse\", \"--show-toplevel\"))\n\n\tstatus := NewStatusReporter()\n\tdefer status.Stop()\n\n\tfor {\n\t\tdoneIters, totalIters, partialCommits, doneCommits, failedCommits := runStats(commits)\n\t\tunstartedCommits := len(commits) - (partialCommits + doneCommits + failedCommits)\n\t\tmsg := fmt.Sprintf(\"%d\/%d runs, %d unstarted+%d partial+%d done+%d failed commits\", doneIters, totalIters, unstartedCommits, partialCommits, doneCommits, failedCommits)\n\t\t\/\/ TODO: Count builds and runs separately.\n\t\tstatus.Progress(msg, float64(doneIters)\/float64(totalIters))\n\n\t\tcommit := pickCommit(commits)\n\t\tif commit == nil {\n\t\t\tbreak\n\t\t}\n\t\trunBenchmark(commit, status)\n\t}\n}\n\nfunc runStats(commits []*commitInfo) (doneIters, totalIters, partialCommits, doneCommits, failedCommits int) {\n\tfor _, c := range commits {\n\t\tif c.count >= run.iterations {\n\t\t\t\/\/ Don't care if it failed.\n\t\t\tdoneIters += c.count\n\t\t\ttotalIters += c.count\n\t\t} else if c.runnable() {\n\t\t\tdoneIters += c.count\n\t\t\ttotalIters += run.iterations\n\t\t}\n\n\t\tif c.count == run.iterations {\n\t\t\tdoneCommits++\n\t\t} else if c.runnable() {\n\t\t\tif c.count != 0 {\n\t\t\t\tpartialCommits++\n\t\t\t}\n\t\t} else {\n\t\t\tfailedCommits++\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ pickCommitSeq picks the next commit to run based on the most recent\n\/\/ commit with the fewest iterations.\nfunc pickCommitSeq(commits []*commitInfo) *commitInfo {\n\tvar minCommit *commitInfo\n\tfor _, commit := range commits {\n\t\tif !commit.runnable() {\n\t\t\tcontinue\n\t\t}\n\t\tif minCommit == nil || commit.count < minCommit.count {\n\t\t\tminCommit = commit\n\t\t}\n\t}\n\treturn minCommit\n}\n\n\/\/ pickCommitSpread picks the next commit to run from commits using an\n\/\/ algorithm that spreads out the runs.\nfunc pickCommitSpread(commits []*commitInfo) *commitInfo {\n\t\/\/ Assign weights to each commit. This is thoroughly\n\t\/\/ heuristic, but it's geared toward either increasing the\n\t\/\/ iteration count of commits that we have, or picking a new\n\t\/\/ commit so as to spread out the commits we have.\n\tweights := make([]int, len(commits))\n\ttotalWeight := 0\n\n\tnPartial := 0\n\tfor _, commit := range commits {\n\t\tif commit.partial() {\n\t\t\tnPartial++\n\t\t}\n\t}\n\tif nPartial >= len(commits)\/10 {\n\t\t\/\/ Limit the number of partially completed revisions\n\t\t\/\/ to 10% by only choosing a partial commit in this\n\t\t\/\/ case.\n\t\tfor i, commit := range commits {\n\t\t\tif commit.partial() {\n\t\t\t\t\/\/ Bias toward commits that are\n\t\t\t\t\/\/ further from done.\n\t\t\t\tweights[i] = run.iterations - commit.count\n\t\t\t}\n\t\t}\n\t} else {\n\t\t\/\/ Pick a new commit weighted by its distance from a\n\t\t\/\/ commit that we already have.\n\n\t\t\/\/ Find distance from left to right.\n\t\tdistance := len(commits)\n\t\thaveAny := false\n\t\tfor i, commit := range commits {\n\t\t\tif commit.count > 0 {\n\t\t\t\tdistance = 1\n\t\t\t\thaveAny = true\n\t\t\t} else if commit.runnable() {\n\t\t\t\tdistance++\n\t\t\t}\n\t\t\tweights[i] = distance\n\t\t}\n\n\t\t\/\/ Find distance from right to left.\n\t\tdistance = len(commits)\n\t\tfor i := len(commits) - 1; i >= 0; i-- {\n\t\t\tcommit := commits[i]\n\t\t\tif commit.count > 0 {\n\t\t\t\tdistance = 1\n\t\t\t} else if commit.runnable() {\n\t\t\t\tdistance++\n\t\t\t}\n\n\t\t\tif distance < weights[i] {\n\t\t\t\tweights[i] = distance\n\t\t\t}\n\t\t}\n\n\t\tif !haveAny {\n\t\t\t\/\/ We don't have any commits. Pick one uniformly.\n\t\t\tfor i := range commits {\n\t\t\t\tweights[i] = 1\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Zero non-runnable commits.\n\t\tfor i, commit := range commits {\n\t\t\tif !commit.runnable() {\n\t\t\t\tweights[i] = 0\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, w := range weights {\n\t\ttotalWeight += w\n\t}\n\tif totalWeight == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ Pick a commit based on the weights.\n\tx := rand.Intn(totalWeight)\n\tcumulative := 0\n\tfor i, w := range weights {\n\t\tcumulative += w\n\t\tif cumulative > x {\n\t\t\treturn commits[i]\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}\n\n\/\/ runBenchmark runs the benchmark at commit. It updates commit.count,\n\/\/ commit.fails, and commit.buildFailed as appropriate and writes to\n\/\/ the commit log to record the outcome.\nfunc runBenchmark(commit *commitInfo, status *StatusReporter) {\n\tisXBenchmark := false\n\tif abs, _ := os.Getwd(); strings.Contains(abs, \"golang.org\/x\/benchmarks\/\") {\n\t\tisXBenchmark = true\n\t}\n\n\t\/\/ Build the benchmark if necessary.\n\tif !exists(commit.binPath) {\n\t\trunStatus(status, commit, \"building\")\n\n\t\t\/\/ Check out the appropriate commit. This is necessary\n\t\t\/\/ even if we're using gover because the benchmark\n\t\t\/\/ itself might have changed (e.g., bug fixes).\n\t\tgit(\"checkout\", \"-q\", commit.hash)\n\n\t\tvar buildCmd []string\n\t\tif commit.gover {\n\t\t\t\/\/ TODO: It would be better if gover took a\n\t\t\t\/\/ long hash and did the right thing.\n\t\t\tbuildCmd = []string{\"gover\", \"run\", commit.hash[:7], \"go\"}\n\t\t} else {\n\t\t\t\/\/ If this is the Go toolchain, do a full\n\t\t\t\/\/ make.bash. Otherwise, we assume that go\n\t\t\t\/\/ test -c will build the necessary\n\t\t\t\/\/ dependencies.\n\t\t\tif exists(filepath.Join(run.topLevel, \"src\", \"make.bash\")) {\n\t\t\t\tcmd := exec.Command(\".\/make.bash\")\n\t\t\t\tcmd.Dir = filepath.Join(run.topLevel, \"src\")\n\t\t\t\tif dryRun {\n\t\t\t\t\tdryPrint(cmd)\n\t\t\t\t} else if out, err := cmd.CombinedOutput(); err != nil {\n\t\t\t\t\tdetail := indent(string(out)) + indent(err.Error())\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"failed to build toolchain at %s:\\n%s\", commit.hash, detail)\n\t\t\t\t\tcommit.buildFailed = true\n\t\t\t\t\tcommit.writeLog([]byte(\"BUILD FAILED:\\n\" + detail))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif run.goverSave && doGoverSave() == nil {\n\t\t\t\t\tcommit.gover = true\n\t\t\t\t}\n\t\t\t\tbuildCmd = []string{filepath.Join(run.topLevel, \"bin\", \"go\")}\n\t\t\t} else {\n\t\t\t\t\/\/ Assume go is in $PATH.\n\t\t\t\tbuildCmd = []string{\"go\"}\n\t\t\t}\n\t\t}\n\n\t\tif isXBenchmark {\n\t\t\tbuildCmd = append(buildCmd, \"build\", \"-o\", commit.binPath)\n\t\t} else {\n\t\t\tbuildCmd = append(buildCmd, \"test\", \"-c\", \"-o\", commit.binPath)\n\t\t}\n\t\tcmd := exec.Command(buildCmd[0], buildCmd[1:]...)\n\t\tif dryRun {\n\t\t\tdryPrint(cmd)\n\t\t} else if out, err := cmd.CombinedOutput(); err != nil {\n\t\t\tdetail := indent(string(out)) + indent(err.Error())\n\t\t\tfmt.Fprintf(os.Stderr, \"failed to build tests at %s:\\n%s\", commit.hash, detail)\n\t\t\tcommit.buildFailed = true\n\t\t\tcommit.writeLog([]byte(\"BUILD FAILED:\\n\" + detail))\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Run the benchmark.\n\trunStatus(status, commit, \"running\")\n\tvar args []string\n\tif isXBenchmark {\n\t\targs = []string{}\n\t} else {\n\t\targs = []string{\"-test.run\", \"NONE\", \"-test.bench\", \".\"}\n\t}\n\targs = append(args, strings.Fields(run.benchFlags)...)\n\tname := commit.binPath\n\tif filepath.Base(name) == name {\n\t\t\/\/ Make exec.Command treat this as a relative path.\n\t\tname = \".\/\" + name\n\t}\n\tcmd := exec.Command(name, args...)\n\tif dryRun {\n\t\tdryPrint(cmd)\n\t\tcommit.count++\n\t\treturn\n\t}\n\tout, err := cmd.CombinedOutput()\n\tif err == nil {\n\t\tcommit.count++\n\t} else {\n\t\tfmt.Fprintf(os.Stderr, \"failed to run benchmark at %s:\\n%s\", commit.hash, out)\n\t\tcommit.fails++\n\t\t\/\/ Indent the output so we don't get confused by\n\t\t\/\/ benchmarks in it or \"PASS\" lines.\n\t\tout = []byte(\"FAILED:\\n\" + indent(string(out)) + indent(err.Error()))\n\t}\n\n\t\/\/ Write the benchmark output.\n\tcommit.writeLog(out)\n}\n\nfunc doGoverSave() error {\n\tcmd := exec.Command(\"gover\", \"save\")\n\tcmd.Dir = run.topLevel\n\tif dryRun {\n\t\tdryPrint(cmd)\n\t\treturn nil\n\t} else {\n\t\tout, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"gover save failed: %s:\\n%s\", err, indent(string(out)))\n\t\t}\n\t\treturn err\n\t}\n}\n\n\/\/ runStatus updates the status message for commit.\nfunc runStatus(sr *StatusReporter, commit *commitInfo, status string) {\n\tsr.Message(fmt.Sprintf(\"commit %s, iteration %d\/%d: %s...\", commit.hash[:7], commit.count+1, run.iterations, status))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage precis\n\nimport (\n\t\"testing\"\n)\n\nfunc BenchmarkEnforce(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tUsernameCaseMapped.String(\"Malvolio\")\n\t}\n}\n<commit_msg>secure\/precis: Add more benchmarks<commit_after>\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage precis\n\nimport (\n\t\"testing\"\n)\n\nfunc BenchmarkUsernameCaseMapped(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tUsernameCaseMapped.String(\"Malvolio\")\n\t}\n}\n\nfunc BenchmarkUsernameCasePreserved(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tUsernameCasePreserved.String(\"Malvolio\")\n\t}\n}\n\nfunc BenchmarkOpaqueString(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tOpaqueString.String(\"Malvolio\")\n\t}\n}\n\nfunc BenchmarkNickname(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tNickname.String(\"Malvolio\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport (\n\t\"fmt\"\n\t\"github.com\/meshbird\/meshbird\/log\"\n\t\"github.com\/meshbird\/meshbird\/secure\"\n\t\"sync\"\n)\n\ntype LocalNode struct {\n\tNode\n\n\tsecret *secure.NetworkSecret\n\tconfig *Config\n\tstate *State\n\n\tmutex sync.Mutex\n\twaitGroup sync.WaitGroup\n\n\tservices map[string]Service\n\n\tlogger log.Logger\n}\n\nfunc NewLocalNode(cfg *Config) (*LocalNode, error) {\n\tvar err error\n\tn := new(LocalNode)\n\tn.logger = log.L(\"local\")\n\n\tn.secret, err = secure.NetworkSecretUnmarshal(cfg.SecretKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tn.config = cfg\n\tn.config.NetworkID = n.secret.InfoHash()\n\tn.state = NewState(n.secret)\n\n\tn.services = make(map[string]Service)\n\n\tn.AddService(&NetTable{})\n\tn.AddService(&ListenerService{})\n\tn.AddService(&DiscoveryDHT{})\n\tn.AddService(&InterfaceService{})\n\t\/\/n.AddService(&STUNService{})\n\t\/\/n.AddService(&UPnPService{})\n\tn.AddService(&HttpService{})\n\treturn n, nil\n}\n\nfunc (n *LocalNode) Config() Config {\n\treturn *n.config\n}\n\nfunc (n *LocalNode) State() State {\n\treturn *n.state\n}\n\nfunc (n *LocalNode) AddService(srv Service) {\n\tn.services[srv.Name()] = srv\n}\n\nfunc (n *LocalNode) Start() error {\n\tfor _, service := range n.services {\n\t\tn.logger.Info(\"initializing %s service\", service.Name())\n\t\tif err := service.Init(n); err != nil {\n\t\t\treturn fmt.Errorf(\"initialization of %q finished with error: %v\", service.Name(), err)\n\t\t}\n\t\tn.waitGroup.Add(1)\n\n\t\tgo func(srv Service) {\n\t\t\tdefer n.waitGroup.Done()\n\t\t\tn.logger.Info(\"running %q service\", srv.Name())\n\t\t\tif err := srv.Run(); err != nil {\n\t\t\t\tn.logger.Error(\"error on run %q service, %v\", srv.Name(), err)\n\t\t\t}\n\t\t}(service)\n\t}\n\treturn nil\n}\n\nfunc (n *LocalNode) Service(name string) Service {\n\tservice, ok := n.services[name]\n\tif !ok {\n\t\tn.logger.Fatal(\"service %q not found\", name)\n\t}\n\treturn service\n}\n\nfunc (n *LocalNode) WaitStop() {\n\tn.waitGroup.Wait()\n}\n\nfunc (n *LocalNode) Stop() error {\n\tn.logger.Info(\"stopping...\")\n\tfor _, service := range n.services {\n\t\tservice.Stop()\n\t}\n\treturn nil\n}\n\nfunc (n *LocalNode) NetworkSecret() *secure.NetworkSecret {\n\treturn n.secret\n}\n\nfunc (n *LocalNode) NetTable() *NetTable {\n\tservice, ok := n.services[\"net-table\"]\n\tif !ok {\n\t\tn.logger.Panic(\"net-table not found\")\n\t}\n\treturn service.(*NetTable)\n}\n<commit_msg>revert back upnp<commit_after>package common\n\nimport (\n\t\"fmt\"\n\t\"github.com\/meshbird\/meshbird\/log\"\n\t\"github.com\/meshbird\/meshbird\/secure\"\n\t\"sync\"\n)\n\ntype LocalNode struct {\n\tNode\n\n\tsecret *secure.NetworkSecret\n\tconfig *Config\n\tstate *State\n\n\tmutex sync.Mutex\n\twaitGroup sync.WaitGroup\n\n\tservices map[string]Service\n\n\tlogger log.Logger\n}\n\nfunc NewLocalNode(cfg *Config) (*LocalNode, error) {\n\tvar err error\n\tn := new(LocalNode)\n\tn.logger = log.L(\"local\")\n\n\tn.secret, err = secure.NetworkSecretUnmarshal(cfg.SecretKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tn.config = cfg\n\tn.config.NetworkID = n.secret.InfoHash()\n\tn.state = NewState(n.secret)\n\n\tn.services = make(map[string]Service)\n\n\tn.AddService(&NetTable{})\n\tn.AddService(&ListenerService{})\n\tn.AddService(&DiscoveryDHT{})\n\tn.AddService(&InterfaceService{})\n\t\/\/n.AddService(&STUNService{})\n\tn.AddService(&UPnPService{})\n\tn.AddService(&HttpService{})\n\treturn n, nil\n}\n\nfunc (n *LocalNode) Config() Config {\n\treturn *n.config\n}\n\nfunc (n *LocalNode) State() State {\n\treturn *n.state\n}\n\nfunc (n *LocalNode) AddService(srv Service) {\n\tn.services[srv.Name()] = srv\n}\n\nfunc (n *LocalNode) Start() error {\n\tfor _, service := range n.services {\n\t\tn.logger.Info(\"initializing %s service\", service.Name())\n\t\tif err := service.Init(n); err != nil {\n\t\t\treturn fmt.Errorf(\"initialization of %q finished with error: %v\", service.Name(), err)\n\t\t}\n\t\tn.waitGroup.Add(1)\n\n\t\tgo func(srv Service) {\n\t\t\tdefer n.waitGroup.Done()\n\t\t\tn.logger.Info(\"running %q service\", srv.Name())\n\t\t\tif err := srv.Run(); err != nil {\n\t\t\t\tn.logger.Error(\"error on run %q service, %v\", srv.Name(), err)\n\t\t\t}\n\t\t}(service)\n\t}\n\treturn nil\n}\n\nfunc (n *LocalNode) Service(name string) Service {\n\tservice, ok := n.services[name]\n\tif !ok {\n\t\tn.logger.Fatal(\"service %q not found\", name)\n\t}\n\treturn service\n}\n\nfunc (n *LocalNode) WaitStop() {\n\tn.waitGroup.Wait()\n}\n\nfunc (n *LocalNode) Stop() error {\n\tn.logger.Info(\"stopping...\")\n\tfor _, service := range n.services {\n\t\tservice.Stop()\n\t}\n\treturn nil\n}\n\nfunc (n *LocalNode) NetworkSecret() *secure.NetworkSecret {\n\treturn n.secret\n}\n\nfunc (n *LocalNode) NetTable() *NetTable {\n\tservice, ok := n.services[\"net-table\"]\n\tif !ok {\n\t\tn.logger.Panic(\"net-table not found\")\n\t}\n\treturn service.(*NetTable)\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Add test to verify that match labels can be updated after its creation. (#770)<commit_after><|endoftext|>"} {"text":"<commit_before>package jump\n\nfunc canJump(arr []int) bool {\n return false\n}\n<commit_msg>solve 55 use greedy<commit_after>package jump\n\nimport \"github.com\/catorpilor\/leetcode\/utils\"\n\nfunc canJump(arr []int) bool {\n return useGreedyForward(arr)\n}\n\n\/\/ useGreedyForward time complexity O(N), space complexity O(1)\nfunc useGreedyForward(nums []int) bool {\n n := len(nums)\n if n < 1 {\n return false\n }\n \/\/ the maxPos can get so far\n maxPos := 0\n for i := 0; i <= maxPos; i++ {\n \/\/ update the right boundary if necessary.\n maxPos = utils.Max(maxPos, i+nums[i])\n if maxPos >= n-1 {\n return true\n }\n }\n return false\n}\n<|endoftext|>"} {"text":"<commit_before>package completion\n\nimport (\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"..\/dos\"\n\t\"..\/interpreter\"\n)\n\nvar rxEnvPattern = regexp.MustCompile(\"%[^%]+%\")\nvar rxTilde = regexp.MustCompile(\"^~[\/\\\\\\\\]\")\n\nfunc listUpFiles(str string) ([]string, error) {\n\tstr = rxEnvPattern.ReplaceAllStringFunc(str, func(p string) string {\n\t\tif len(p) == 2 {\n\t\t\treturn \"%\"\n\t\t} else if val := os.Getenv(p[1 : len(p)-1]); val != \"\" {\n\t\t\treturn val\n\t\t} else if f, ok := interpreter.PercentFunc[p[1:len(p)-1]]; ok {\n\t\t\treturn f()\n\t\t} else {\n\t\t\treturn p\n\t\t}\n\t})\n\n\tstr = rxTilde.ReplaceAllStringFunc(str, func(p string) string {\n\t\tif home := dos.GetHome(); home != \"\" {\n\t\t\treturn home + \"\\\\\"\n\t\t} else {\n\t\t\treturn p\n\t\t}\n\t})\n\tstr = strings.Replace(strings.Replace(str, \"\\\\\", \"\/\", -1), \"\\\"\", \"\", -1)\n\tvar directory string\n\tstr = strings.Replace(str, \"\\\\\", \"\/\", -1)\n\tif strings.HasSuffix(str, \"\/\") {\n\t\tdirectory = path.Join(str, \".\")\n\t} else {\n\t\tdirectory = path.Dir(str)\n\t}\n\n\t\/\/ Drive letter\n\tcutprefix := 0\n\tif strings.HasPrefix(directory, \"\/\") {\n\t\twd, _ := os.Getwd()\n\t\tdirectory = wd[0:2] + directory\n\t\tcutprefix = 2\n\t}\n\n\tfd, fdErr := os.Open(directory)\n\tif fdErr != nil {\n\t\treturn nil, fdErr\n\t}\n\tdefer fd.Close()\n\tfiles, filesErr := fd.Readdir(-1)\n\tif filesErr != nil {\n\t\treturn nil, filesErr\n\t}\n\tcommons := make([]string, 0)\n\tif str != \"\" {\n\t\tstr = path.Clean(str)\n\t\t\/\/ Since path.Clean(\"\") -> \".\", completed name to \".xxxx\"\n\t}\n\tSTR := strings.ToUpper(str)\n\tfor _, node1 := range files {\n\t\tname := path.Join(directory, node1.Name())\n\t\tif attr, attrErr := dos.GetFileAttributes(name); attrErr == nil && (attr&dos.FILE_ATTRIBUTE_HIDDEN) != 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif node1.IsDir() {\n\t\t\tname += \"\/\"\n\t\t}\n\t\tif cutprefix > 0 {\n\t\t\tname = name[2:]\n\t\t}\n\t\tnameUpr := strings.ToUpper(path.Clean(name))\n\t\tif strings.HasPrefix(nameUpr, STR) {\n\t\t\tcommons = append(commons, name)\n\t\t}\n\t}\n\treturn commons, nil\n}\n<commit_msg>Fix #61 : completion removed .\\ (dot-slash)<commit_after>package completion\n\nimport (\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"..\/dos\"\n\t\"..\/interpreter\"\n)\n\nconst (\n\tSTD_SLASH = \"\\\\\"\n\tOPT_SLASH = \"\/\"\n)\n\nvar rxEnvPattern = regexp.MustCompile(\"%[^%]+%\")\nvar rxTilde = regexp.MustCompile(\"^~[\/\\\\\\\\]\")\n\nfunc listUpFiles(str string) ([]string, error) {\n\tstr = rxEnvPattern.ReplaceAllStringFunc(str, func(p string) string {\n\t\tif len(p) == 2 {\n\t\t\treturn \"%\"\n\t\t} else if val := os.Getenv(p[1 : len(p)-1]); val != \"\" {\n\t\t\treturn val\n\t\t} else if f, ok := interpreter.PercentFunc[p[1:len(p)-1]]; ok {\n\t\t\treturn f()\n\t\t} else {\n\t\t\treturn p\n\t\t}\n\t})\n\n\tstr = rxTilde.ReplaceAllStringFunc(str, func(p string) string {\n\t\tif home := dos.GetHome(); home != \"\" {\n\t\t\treturn home + \"\\\\\"\n\t\t} else {\n\t\t\treturn p\n\t\t}\n\t})\n\tstr = strings.Replace(strings.Replace(str, OPT_SLASH, STD_SLASH, -1), \"\\\"\", \"\", -1)\n\tdirectory := dos.DirName(str)\n\twildcard := dos.Join(directory, \"*\")\n\n\t\/\/ Drive letter\n\tcutprefix := 0\n\tif strings.HasPrefix(directory, STD_SLASH) {\n\t\twd, _ := os.Getwd()\n\t\tdirectory = wd[0:2] + directory\n\t\tcutprefix = 2\n\t}\n\n\tfd, fdErr := dos.FindFirst(wildcard)\n\tif fdErr != nil {\n\t\treturn nil, fdErr\n\t}\n\tdefer fd.Close()\n\tcommons := make([]string, 0)\n\tSTR := strings.ToUpper(str)\n\tfor ; fdErr == nil; fdErr = fd.FindNext() {\n\t\tif fd.Name() == \".\" || fd.Name() == \"..\" || fd.IsHidden() {\n\t\t\tcontinue\n\t\t}\n\t\tname := dos.Join(directory, fd.Name())\n\t\tif fd.IsDir() {\n\t\t\tname += STD_SLASH\n\t\t}\n\t\tif cutprefix > 0 {\n\t\t\tname = name[2:]\n\t\t}\n\t\tnameUpr := strings.ToUpper(name)\n\t\tif strings.HasPrefix(nameUpr, STR) {\n\t\t\tcommons = append(commons, name)\n\t\t}\n\t}\n\treturn commons, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package stdio\n\nimport (\n\t\"github.com\/Microsoft\/opengcs\/service\/gcs\/transport\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ ConnectionSettings describe the stdin, stdout, stderr ports to connect the\n\/\/ transport to. A nil port specifies no connection.\ntype ConnectionSettings struct {\n\tStdIn *uint32\n\tStdOut *uint32\n\tStdErr *uint32\n}\n\n\/\/ Connect returns new transport.Connection instances, one for each stdio pipe\n\/\/ to be used. If CreateStd*Pipe for a given pipe is false, the given Connection\n\/\/ is set to nil.\nfunc Connect(tport transport.Transport, settings ConnectionSettings) (_ *ConnectionSet, err error) {\n\tconnSet := &ConnectionSet{}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tconnSet.Close()\n\t\t}\n\t}()\n\tif settings.StdIn != nil {\n\t\tconnSet.In, err = tport.Dial(*settings.StdIn)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed creating stdin Connection\")\n\t\t}\n\t}\n\tif settings.StdOut != nil {\n\t\tconnSet.Out, err = tport.Dial(*settings.StdOut)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed creating stdout Connection\")\n\t\t}\n\t}\n\tif settings.StdErr != nil {\n\t\tconnSet.Err, err = tport.Dial(*settings.StdErr)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed creating stderr Connection\")\n\t\t}\n\t}\n\treturn connSet, nil\n}\n<commit_msg>Adding log support on stdio connections<commit_after>package stdio\n\nimport (\n\t\"os\"\n\n\t\"github.com\/Microsoft\/opengcs\/service\/gcs\/transport\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ ConnectionSettings describe the stdin, stdout, stderr ports to connect the\n\/\/ transport to. A nil port specifies no connection.\ntype ConnectionSettings struct {\n\tStdIn *uint32\n\tStdOut *uint32\n\tStdErr *uint32\n}\n\ntype logConnection struct {\n\tcon transport.Connection\n\tport uint32\n}\n\nfunc (lc *logConnection) Read(b []byte) (int, error) {\n\treturn lc.con.Read(b)\n}\n\nfunc (lc *logConnection) Write(b []byte) (int, error) {\n\treturn lc.con.Write(b)\n}\n\nfunc (lc *logConnection) Close() error {\n\tlogrus.Debugf(\"Closing connection on port: %d\", lc.port)\n\treturn lc.con.Close()\n}\n\nfunc (lc *logConnection) CloseRead() error {\n\tlogrus.Debugf(\"Closing read connection on port: %d\", lc.port)\n\treturn lc.con.CloseRead()\n}\n\nfunc (lc *logConnection) CloseWrite() error {\n\tlogrus.Debugf(\"Closing write connection on port: %d\", lc.port)\n\treturn lc.con.CloseWrite()\n}\n\nfunc (lc *logConnection) File() (*os.File, error) {\n\treturn lc.con.File()\n}\n\nvar _ = (transport.Connection)(&logConnection{})\n\n\/\/ Connect returns new transport.Connection instances, one for each stdio pipe\n\/\/ to be used. If CreateStd*Pipe for a given pipe is false, the given Connection\n\/\/ is set to nil.\nfunc Connect(tport transport.Transport, settings ConnectionSettings) (_ *ConnectionSet, err error) {\n\tconnSet := &ConnectionSet{}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tconnSet.Close()\n\t\t}\n\t}()\n\tif settings.StdIn != nil {\n\t\tc, err := tport.Dial(*settings.StdIn)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed creating stdin Connection\")\n\t\t}\n\t\tconnSet.In = &logConnection{\n\t\t\tcon: c,\n\t\t\tport: *settings.StdIn,\n\t\t}\n\t}\n\tif settings.StdOut != nil {\n\t\tc, err := tport.Dial(*settings.StdOut)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed creating stdout Connection\")\n\t\t}\n\t\tconnSet.Out = &logConnection{\n\t\t\tcon: c,\n\t\t\tport: *settings.StdOut,\n\t\t}\n\t}\n\tif settings.StdErr != nil {\n\t\tc, err := tport.Dial(*settings.StdErr)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed creating stderr Connection\")\n\t\t}\n\t\tconnSet.Err = &logConnection{\n\t\t\tcon: c,\n\t\t\tport: *settings.StdErr,\n\t\t}\n\t}\n\treturn connSet, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 Gravitational, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage scp\n\nimport (\n\t\"os\"\n\t\"syscall\"\n\t\"time\"\n)\n\n\/\/ Source: os\/stat_linux.go\nfunc atime(fi os.FileInfo) time.Time {\n\treturn timespecToTime(fi.Sys().(*syscall.Stat_t).Atim)\n}\n\nfunc timespecToTime(ts syscall.Timespec) time.Time {\n\treturn time.Unix(ts.Sec, ts.Nsec)\n}\n<commit_msg>Explicitly cast time values to int64 to enable 32-bit builds. (#5291)<commit_after>\/*\nCopyright 2020 Gravitational, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage scp\n\nimport (\n\t\"os\"\n\t\"syscall\"\n\t\"time\"\n)\n\n\/\/ Source: os\/stat_linux.go\nfunc atime(fi os.FileInfo) time.Time {\n\treturn timespecToTime(fi.Sys().(*syscall.Stat_t).Atim)\n}\n\nfunc timespecToTime(ts syscall.Timespec) time.Time {\n\treturn time.Unix(int64(ts.Sec), int64(ts.Nsec)) \/\/nolint:unconvert\n}\n<|endoftext|>"} {"text":"<commit_before>package emotechief\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/gempir\/bitraft\/pkg\/log\"\n\t\"github.com\/gempir\/bitraft\/pkg\/store\"\n)\n\ntype bttvDashboardResponse struct {\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDisplayname string `json:\"displayName\"`\n\tProviderid string `json:\"providerId\"`\n\tBots []string `json:\"bots\"`\n\tChannelemotes []struct {\n\t\tID string `json:\"id\"`\n\t\tCode string `json:\"code\"`\n\t\tImagetype string `json:\"imageType\"`\n\t\tUserid string `json:\"userId\"`\n\t\tCreatedat time.Time `json:\"createdAt\"`\n\t\tUpdatedat time.Time `json:\"updatedAt\"`\n\t\tGlobal bool `json:\"global\"`\n\t\tLive bool `json:\"live\"`\n\t\tSharing bool `json:\"sharing\"`\n\t\tApprovalstatus string `json:\"approvalStatus\"`\n\t} `json:\"channelEmotes\"`\n\tSharedemotes []struct {\n\t\tID string `json:\"id\"`\n\t\tCode string `json:\"code\"`\n\t\tImagetype string `json:\"imageType\"`\n\t\tCreatedat time.Time `json:\"createdAt\"`\n\t\tUpdatedat time.Time `json:\"updatedAt\"`\n\t\tGlobal bool `json:\"global\"`\n\t\tLive bool `json:\"live\"`\n\t\tSharing bool `json:\"sharing\"`\n\t\tApprovalstatus string `json:\"approvalStatus\"`\n\t\tUser struct {\n\t\t\tID string `json:\"id\"`\n\t\t\tName string `json:\"name\"`\n\t\t\tDisplayname string `json:\"displayName\"`\n\t\t\tProviderid string `json:\"providerId\"`\n\t\t} `json:\"user\"`\n\t} `json:\"sharedEmotes\"`\n}\n\ntype bttvEmoteResponse struct {\n\tID string `json:\"id\"`\n\tCode string `json:\"code\"`\n\tImagetype string `json:\"imageType\"`\n\tCreatedat time.Time `json:\"createdAt\"`\n\tUpdatedat time.Time `json:\"updatedAt\"`\n\tGlobal bool `json:\"global\"`\n\tLive bool `json:\"live\"`\n\tSharing bool `json:\"sharing\"`\n\tApprovalstatus string `json:\"approvalStatus\"`\n\tUser struct {\n\t\tID string `json:\"id\"`\n\t\tName string `json:\"name\"`\n\t\tDisplayname string `json:\"displayName\"`\n\t\tProviderid string `json:\"providerId\"`\n\t} `json:\"user\"`\n}\n\ntype dashboardsResponse []dashboardCfg\n\ntype dashboardCfg struct {\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDisplayname string `json:\"displayName\"`\n\tProviderid string `json:\"providerId\"`\n\tAvatar string `json:\"avatar\"`\n\tLimits struct {\n\t\tChannelemotes int `json:\"channelEmotes\"`\n\t\tSharedemotes int `json:\"sharedEmotes\"`\n\t\tPersonalemotes int `json:\"personalEmotes\"`\n\t} `json:\"limits\"`\n}\n\nfunc (e *EmoteChief) SetEmote(channelUserID, emoteId, channel string, slots int) (addedEmote *bttvEmoteResponse, removedEmote *bttvEmoteResponse, err error) {\n\taddedEmote, err = getBttvEmote(emoteId)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif !addedEmote.Sharing {\n\t\terr = errors.New(\"Emote is not shared\")\n\t\treturn\n\t}\n\n\t\/\/ first figure out the bttvUserId for the channel, might cache this later on\n\tvar resp *http.Response\n\tresp, err = http.Get(\"https:\/\/api.betterttv.net\/3\/cached\/users\/twitch\/\" + channelUserID)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar userResp struct {\n\t\tID string `json:\"id\"`\n\t}\n\terr = json.NewDecoder(resp.Body).Decode(&userResp)\n\tif err != nil {\n\t\treturn\n\t}\n\tbttvUserId := userResp.ID\n\n\t\/\/ figure out the limit for the channel, might also chache this later on with expiry\n\tvar req *http.Request\n\treq, err = http.NewRequest(\"GET\", \"https:\/\/api.betterttv.net\/3\/account\/dashboards\", nil)\n\treq.Header.Set(\"authorization\", \"Bearer \"+e.cfg.BttvToken)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\n\tresp, err = e.httpClient.Do(req)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\n\tvar dashboards dashboardsResponse\n\terr = json.NewDecoder(resp.Body).Decode(&dashboards)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar dbCfg dashboardCfg\n\tfor _, db := range dashboards {\n\t\tif db.ID == bttvUserId {\n\t\t\tdbCfg = db\n\t\t}\n\t}\n\tif dbCfg.ID == \"\" {\n\t\terr = errors.New(\"No permission to moderate, add gempbot as BetterTTV editor\")\n\t\treturn\n\t}\n\tsharedEmotesLimit := dbCfg.Limits.Sharedemotes\n\n\t\/\/ figure currently added emotes\n\tresp, err = http.Get(\"https:\/\/api.betterttv.net\/3\/users\/\" + bttvUserId + \"?limited=false&personal=false\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar dashboard bttvDashboardResponse\n\terr = json.NewDecoder(resp.Body).Decode(&dashboard)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, emote := range dashboard.Sharedemotes {\n\t\tif emote.ID == emoteId {\n\t\t\terr = errors.New(\"Emote already added\")\n\t\t\treturn\n\t\t}\n\t\tif emote.Code == addedEmote.Code {\n\t\t\terr = errors.New(\"Emote code already added\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor _, emote := range dashboard.Channelemotes {\n\t\tif emote.ID == emoteId {\n\t\t\terr = errors.New(\"Emote already a channelEmote\")\n\t\t\treturn\n\t\t}\n\t\tif emote.Code == addedEmote.Code {\n\t\t\terr = errors.New(\"Emote code already a channelEmote\")\n\t\t\treturn\n\t\t}\n\t}\n\tlog.Debugf(\"Current shared emotes: %d\/%d\", len(dashboard.Sharedemotes), sharedEmotesLimit)\n\n\tvar removalTargetEmoteId string\n\n\temotesAdded := e.db.GetEmoteAdded(channelUserID, slots)\n\tlog.Infof(\"Total Previous emotes %d in %s\", len(emotesAdded), channelUserID)\n\n\tconfirmedEmotesAdded := []store.EmoteAdd{}\n\tfor _, emote := range emotesAdded {\n\t\tfor _, sharedEmote := range dashboard.Sharedemotes {\n\t\t\tif emote.EmoteID == sharedEmote.ID {\n\t\t\t\tconfirmedEmotesAdded = append(confirmedEmotesAdded, emote)\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(confirmedEmotesAdded) == slots {\n\t\tremovalTargetEmoteId = confirmedEmotesAdded[len(confirmedEmotesAdded)-1].EmoteID\n\t\tlog.Infof(\"Found removal target %s in %s\", removalTargetEmoteId, channelUserID)\n\t} else if len(dashboard.Sharedemotes) >= sharedEmotesLimit {\n\t\tlog.Infof(\"Didn't find previous slot and limit reached, choosing random in %s\", channelUserID)\n\t\tremovalTargetEmoteId = dashboard.Sharedemotes[rand.Intn(len(dashboard.Sharedemotes))].ID\n\t}\n\n\t\/\/ do we need to remove the emote?\n\tif removalTargetEmoteId != \"\" {\n\t\t\/\/ Delete the current emote\n\t\treq, err = http.NewRequest(\"DELETE\", \"https:\/\/api.betterttv.net\/3\/emotes\/\"+removalTargetEmoteId+\"\/shared\/\"+bttvUserId, nil)\n\t\treq.Header.Set(\"authorization\", \"Bearer \"+e.cfg.BttvToken)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\tresp, err = e.httpClient.Do(req)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t\tlog.Infof(\"[%d] Deleted channelId: %s emoteId: %s\", resp.StatusCode, channelUserID, removalTargetEmoteId)\n\t}\n\n\t\/\/ Add new emote\n\treq, err = http.NewRequest(\"PUT\", \"https:\/\/api.betterttv.net\/3\/emotes\/\"+emoteId+\"\/shared\/\"+bttvUserId, nil)\n\treq.Header.Set(\"authorization\", \"Bearer \"+e.cfg.BttvToken)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\n\tresp, err = e.httpClient.Do(req)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\tlog.Infof(\"[%d] Added channelId: %s emoteId: %s\", resp.StatusCode, channelUserID, emoteId)\n\n\tif resp.StatusCode < http.StatusBadRequest {\n\t\te.db.CreateEmoteAdd(channelUserID, emoteId)\n\t}\n\n\tif removalTargetEmoteId != \"\" {\n\t\tremovedEmote, err = getBttvEmote(removalTargetEmoteId)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc getBttvEmote(emoteID string) (*bttvEmoteResponse, error) {\n\tif emoteID == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tresponse, err := http.Get(\"https:\/\/api.betterttv.net\/3\/emotes\/\" + emoteID)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn nil, err\n\t}\n\n\tif response.StatusCode <= 100 || response.StatusCode >= 400 {\n\t\treturn nil, fmt.Errorf(\"Bad bttv response: %d\", response.StatusCode)\n\t}\n\n\tvar emoteResponse bttvEmoteResponse\n\terr = json.NewDecoder(response.Body).Decode(&emoteResponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &emoteResponse, nil\n}\n<commit_msg>cleaner log message<commit_after>package emotechief\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/gempir\/bitraft\/pkg\/log\"\n\t\"github.com\/gempir\/bitraft\/pkg\/store\"\n)\n\ntype bttvDashboardResponse struct {\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDisplayname string `json:\"displayName\"`\n\tProviderid string `json:\"providerId\"`\n\tBots []string `json:\"bots\"`\n\tChannelemotes []struct {\n\t\tID string `json:\"id\"`\n\t\tCode string `json:\"code\"`\n\t\tImagetype string `json:\"imageType\"`\n\t\tUserid string `json:\"userId\"`\n\t\tCreatedat time.Time `json:\"createdAt\"`\n\t\tUpdatedat time.Time `json:\"updatedAt\"`\n\t\tGlobal bool `json:\"global\"`\n\t\tLive bool `json:\"live\"`\n\t\tSharing bool `json:\"sharing\"`\n\t\tApprovalstatus string `json:\"approvalStatus\"`\n\t} `json:\"channelEmotes\"`\n\tSharedemotes []struct {\n\t\tID string `json:\"id\"`\n\t\tCode string `json:\"code\"`\n\t\tImagetype string `json:\"imageType\"`\n\t\tCreatedat time.Time `json:\"createdAt\"`\n\t\tUpdatedat time.Time `json:\"updatedAt\"`\n\t\tGlobal bool `json:\"global\"`\n\t\tLive bool `json:\"live\"`\n\t\tSharing bool `json:\"sharing\"`\n\t\tApprovalstatus string `json:\"approvalStatus\"`\n\t\tUser struct {\n\t\t\tID string `json:\"id\"`\n\t\t\tName string `json:\"name\"`\n\t\t\tDisplayname string `json:\"displayName\"`\n\t\t\tProviderid string `json:\"providerId\"`\n\t\t} `json:\"user\"`\n\t} `json:\"sharedEmotes\"`\n}\n\ntype bttvEmoteResponse struct {\n\tID string `json:\"id\"`\n\tCode string `json:\"code\"`\n\tImagetype string `json:\"imageType\"`\n\tCreatedat time.Time `json:\"createdAt\"`\n\tUpdatedat time.Time `json:\"updatedAt\"`\n\tGlobal bool `json:\"global\"`\n\tLive bool `json:\"live\"`\n\tSharing bool `json:\"sharing\"`\n\tApprovalstatus string `json:\"approvalStatus\"`\n\tUser struct {\n\t\tID string `json:\"id\"`\n\t\tName string `json:\"name\"`\n\t\tDisplayname string `json:\"displayName\"`\n\t\tProviderid string `json:\"providerId\"`\n\t} `json:\"user\"`\n}\n\ntype dashboardsResponse []dashboardCfg\n\ntype dashboardCfg struct {\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDisplayname string `json:\"displayName\"`\n\tProviderid string `json:\"providerId\"`\n\tAvatar string `json:\"avatar\"`\n\tLimits struct {\n\t\tChannelemotes int `json:\"channelEmotes\"`\n\t\tSharedemotes int `json:\"sharedEmotes\"`\n\t\tPersonalemotes int `json:\"personalEmotes\"`\n\t} `json:\"limits\"`\n}\n\nfunc (e *EmoteChief) SetEmote(channelUserID, emoteId, channel string, slots int) (addedEmote *bttvEmoteResponse, removedEmote *bttvEmoteResponse, err error) {\n\taddedEmote, err = getBttvEmote(emoteId)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif !addedEmote.Sharing {\n\t\terr = errors.New(\"Emote is not shared\")\n\t\treturn\n\t}\n\n\t\/\/ first figure out the bttvUserId for the channel, might cache this later on\n\tvar resp *http.Response\n\tresp, err = http.Get(\"https:\/\/api.betterttv.net\/3\/cached\/users\/twitch\/\" + channelUserID)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar userResp struct {\n\t\tID string `json:\"id\"`\n\t}\n\terr = json.NewDecoder(resp.Body).Decode(&userResp)\n\tif err != nil {\n\t\treturn\n\t}\n\tbttvUserId := userResp.ID\n\n\t\/\/ figure out the limit for the channel, might also chache this later on with expiry\n\tvar req *http.Request\n\treq, err = http.NewRequest(\"GET\", \"https:\/\/api.betterttv.net\/3\/account\/dashboards\", nil)\n\treq.Header.Set(\"authorization\", \"Bearer \"+e.cfg.BttvToken)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\n\tresp, err = e.httpClient.Do(req)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\n\tvar dashboards dashboardsResponse\n\terr = json.NewDecoder(resp.Body).Decode(&dashboards)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar dbCfg dashboardCfg\n\tfor _, db := range dashboards {\n\t\tif db.ID == bttvUserId {\n\t\t\tdbCfg = db\n\t\t}\n\t}\n\tif dbCfg.ID == \"\" {\n\t\terr = errors.New(\"No permission to moderate, add gempbot as BetterTTV editor\")\n\t\treturn\n\t}\n\tsharedEmotesLimit := dbCfg.Limits.Sharedemotes\n\n\t\/\/ figure currently added emotes\n\tresp, err = http.Get(\"https:\/\/api.betterttv.net\/3\/users\/\" + bttvUserId + \"?limited=false&personal=false\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar dashboard bttvDashboardResponse\n\terr = json.NewDecoder(resp.Body).Decode(&dashboard)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, emote := range dashboard.Sharedemotes {\n\t\tif emote.ID == emoteId {\n\t\t\terr = errors.New(\"Emote already added\")\n\t\t\treturn\n\t\t}\n\t\tif emote.Code == addedEmote.Code {\n\t\t\terr = errors.New(\"Emote code already added\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor _, emote := range dashboard.Channelemotes {\n\t\tif emote.ID == emoteId {\n\t\t\terr = errors.New(\"Emote already a channelEmote\")\n\t\t\treturn\n\t\t}\n\t\tif emote.Code == addedEmote.Code {\n\t\t\terr = errors.New(\"Emote code already a channelEmote\")\n\t\t\treturn\n\t\t}\n\t}\n\tlog.Debugf(\"Current shared emotes: %d\/%d\", len(dashboard.Sharedemotes), sharedEmotesLimit)\n\n\tvar removalTargetEmoteId string\n\n\temotesAdded := e.db.GetEmoteAdded(channelUserID, slots)\n\tlog.Infof(\"Total Previous emotes %d in %s\", len(emotesAdded), channelUserID)\n\n\tconfirmedEmotesAdded := []store.EmoteAdd{}\n\tfor _, emote := range emotesAdded {\n\t\tfor _, sharedEmote := range dashboard.Sharedemotes {\n\t\t\tif emote.EmoteID == sharedEmote.ID {\n\t\t\t\tconfirmedEmotesAdded = append(confirmedEmotesAdded, emote)\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(confirmedEmotesAdded) == slots {\n\t\tremovalTargetEmoteId = confirmedEmotesAdded[len(confirmedEmotesAdded)-1].EmoteID\n\t\tlog.Infof(\"Found removal target %s in %s\", removalTargetEmoteId, channelUserID)\n\t} else if len(dashboard.Sharedemotes) >= sharedEmotesLimit {\n\t\tlog.Infof(\"Didn't find previous emote history of %d emotes and limit reached, choosing random in %s\", slots, channelUserID)\n\t\tremovalTargetEmoteId = dashboard.Sharedemotes[rand.Intn(len(dashboard.Sharedemotes))].ID\n\t}\n\n\t\/\/ do we need to remove the emote?\n\tif removalTargetEmoteId != \"\" {\n\t\t\/\/ Delete the current emote\n\t\treq, err = http.NewRequest(\"DELETE\", \"https:\/\/api.betterttv.net\/3\/emotes\/\"+removalTargetEmoteId+\"\/shared\/\"+bttvUserId, nil)\n\t\treq.Header.Set(\"authorization\", \"Bearer \"+e.cfg.BttvToken)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\tresp, err = e.httpClient.Do(req)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t\tlog.Infof(\"[%d] Deleted channelId: %s emoteId: %s\", resp.StatusCode, channelUserID, removalTargetEmoteId)\n\t}\n\n\t\/\/ Add new emote\n\treq, err = http.NewRequest(\"PUT\", \"https:\/\/api.betterttv.net\/3\/emotes\/\"+emoteId+\"\/shared\/\"+bttvUserId, nil)\n\treq.Header.Set(\"authorization\", \"Bearer \"+e.cfg.BttvToken)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\n\tresp, err = e.httpClient.Do(req)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\tlog.Infof(\"[%d] Added channelId: %s emoteId: %s\", resp.StatusCode, channelUserID, emoteId)\n\n\tif resp.StatusCode < http.StatusBadRequest {\n\t\te.db.CreateEmoteAdd(channelUserID, emoteId)\n\t}\n\n\tif removalTargetEmoteId != \"\" {\n\t\tremovedEmote, err = getBttvEmote(removalTargetEmoteId)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc getBttvEmote(emoteID string) (*bttvEmoteResponse, error) {\n\tif emoteID == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tresponse, err := http.Get(\"https:\/\/api.betterttv.net\/3\/emotes\/\" + emoteID)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn nil, err\n\t}\n\n\tif response.StatusCode <= 100 || response.StatusCode >= 400 {\n\t\treturn nil, fmt.Errorf(\"Bad bttv response: %d\", response.StatusCode)\n\t}\n\n\tvar emoteResponse bttvEmoteResponse\n\terr = json.NewDecoder(response.Body).Decode(&emoteResponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &emoteResponse, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/topicai\/candy\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nfunc TestInitialEtcdCluster(t *testing.T) {\n\tc := &Cluster{}\n\tcandy.Must(yaml.Unmarshal([]byte(ExampleYAML), c))\n\tassert.Equal(t,\n\t\tc.InitialEtcdCluster(),\n\t\t\"00-25-90-c0-f7-80=http:\/\/10.0.2.21:2380,\"+\n\t\t\t\"00-25-90-c0-f6-ee=http:\/\/10.0.2.22:2380,\"+\n\t\t\t\"00-25-90-c0-f6-d6=http:\/\/00-25-90-c0-f6-d6:2380\")\n}\n\nfunc TestGetEtcdMachines(t *testing.T) {\n\tc := &Cluster{}\n\tcandy.Must(yaml.Unmarshal([]byte(ExampleYAML), c))\n\tassert.Equal(t, c.GetEtcdMachines(),\n\t\t\"http:\/\/10.0.2.21:2379,http:\/\/10.0.2.22:2379\")\n}\n<commit_msg>etcd test fix hostname<commit_after>package config\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/topicai\/candy\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nfunc TestInitialEtcdCluster(t *testing.T) {\n\tc := &Cluster{}\n\tcandy.Must(yaml.Unmarshal([]byte(ExampleYAML), c))\n\tassert.Equal(t,\n\t\tc.InitialEtcdCluster(),\n\t\t\"00-25-90-c0-f7-80=http:\/\/10.0.2.21:2380,\"+\n\t\t\t\"00-25-90-c0-f6-ee=http:\/\/10.0.2.22:2380,\"+\n\t\t\t\"00-25-90-c0-f6-d6=http:\/\/00-25-90-c0-f6-d6:2380\")\n}\n\nfunc TestGetEtcdMachines(t *testing.T) {\n\tc := &Cluster{}\n\tcandy.Must(yaml.Unmarshal([]byte(ExampleYAML), c))\n\tassert.Equal(t, c.GetEtcdMachines(),\n\t\t\"http:\/\/00-25-90-c0-f7-80:2379,http:\/\/00-25-90-c0-f6-ee:2379\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\n\/\/ Package voyeur implements a concurrency-safe value that can be watched for\n\/\/ changes.\npackage voyeur\n\nimport (\n\t\"sync\"\n)\n\n\/\/ Value represents a shared value that can be watched for changes. Methods on\n\/\/ a Value may be called concurrently. The zero Value is\n\/\/ ok to use, and is equivalent to a NewValue result\n\/\/ with a nil initial value.\ntype Value struct {\n\tval interface{}\n\tversion int\n\tmu sync.RWMutex\n\twait sync.Cond\n\tclosed bool\n}\n\n\/\/ NewValue creates a new Value holding the given initial value. If initial is\n\/\/ nil, any watchers will wait until a value is set.\nfunc NewValue(initial interface{}) *Value {\n\tv := new(Value)\n\tv.init()\n\tif initial != nil {\n\t\tv.val = initial\n\t\tv.version++\n\t}\n\treturn v\n}\n\nfunc (v *Value) needsInit() bool {\n\treturn v.wait.L == nil\n}\n\nfunc (v *Value) init() {\n\tif v.needsInit() {\n\t\tv.wait.L = v.mu.RLocker()\n\t}\n}\n\n\/\/ Set sets the shared value to val.\nfunc (v *Value) Set(val interface{}) {\n\tv.mu.Lock()\n\tv.init()\n\tv.val = val\n\tv.version++\n\tv.mu.Unlock()\n\tv.wait.Broadcast()\n}\n\n\/\/ Close closes the Value, unblocking any outstanding watchers. Close always\n\/\/ returns nil.\nfunc (v *Value) Close() error {\n\tv.mu.Lock()\n\tv.init()\n\tv.closed = true\n\tv.mu.Unlock()\n\tv.wait.Broadcast()\n\treturn nil\n}\n\n\/\/ Closed reports whether the value has been closed.\nfunc (v *Value) Closed() bool {\n\tv.mu.RLock()\n\tdefer v.mu.RUnlock()\n\treturn v.closed\n}\n\n\/\/ Get returns the current value. \nfunc (v *Value) Get() interface{} {\n\tv.mu.RLock()\n\tdefer v.mu.RUnlock()\n\treturn v.val\n}\n\n\/\/ Watch returns a Watcher that can be used to watch for changes to the value.\nfunc (v *Value) Watch() *Watcher {\n\treturn &Watcher{value: v}\n}\n\n\/\/ Watcher represents a single watcher of a shared value.\ntype Watcher struct {\n\tvalue *Value\n\tversion int\n\tcurrent interface{}\n\tclosed bool\n}\n\n\/\/ Next blocks until there is a new value to be retrieved from the value that is\n\/\/ being watched. It also unblocks when the value or the Watcher itself is\n\/\/ closed. Next returns false if the value or the Watcher itself have been\n\/\/ closed.\nfunc (w *Watcher) Next() bool {\n\tval := w.value\n\tval.mu.RLock()\n\tdefer val.mu.RUnlock()\n\tif val.needsInit() {\n\t\tval.mu.RUnlock()\n\t\tval.mu.Lock()\n\t\tval.init()\n\t\tval.mu.Unlock()\n\t\tval.mu.RLock()\n\t}\n\n\t\/\/ We should never go around this loop more than twice.\n\tfor {\n\t\tif w.version != val.version {\n\t\t\tw.version = val.version\n\t\t\tw.current = val.val\n\t\t\treturn true\n\t\t}\n\t\tif val.closed || w.closed {\n\t\t\treturn false\n\t\t}\n\n\t\t\/\/ Wait releases the lock until triggered and then reacquires the lock,\n\t\t\/\/ thus avoiding a deadlock.\n\t\tval.wait.Wait()\n\t}\n}\n\n\/\/ Close closes the Watcher without closing the underlying\n\/\/ value. It may be called concurrently with Next.\nfunc (w *Watcher) Close() {\n\tw.value.mu.Lock()\n\tw.value.init()\n\tw.closed = true\n\tw.value.mu.Unlock()\n\tw.value.wait.Broadcast()\n}\n\n\/\/ Value returns the last value that was retrieved from the watched Value by\n\/\/ Next.\nfunc (w *Watcher) Value() interface{} {\n\treturn w.current\n}\n<commit_msg>worker\/peergrouper: TestSetsMembersInitially passes<commit_after>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\n\/\/ Package voyeur implements a concurrency-safe value that can be watched for\n\/\/ changes.\npackage voyeur\n\nimport (\n\t\"log\"\n\t\"sync\"\n)\n\n\/\/ Value represents a shared value that can be watched for changes. Methods on\n\/\/ a Value may be called concurrently. The zero Value is\n\/\/ ok to use, and is equivalent to a NewValue result\n\/\/ with a nil initial value.\ntype Value struct {\n\tval interface{}\n\tversion int\n\tmu sync.RWMutex\n\twait sync.Cond\n\tclosed bool\n}\n\n\/\/ NewValue creates a new Value holding the given initial value. If initial is\n\/\/ nil, any watchers will wait until a value is set.\nfunc NewValue(initial interface{}) *Value {\n\tv := new(Value)\n\tv.init()\n\tif initial != nil {\n\t\tv.val = initial\n\t\tv.version++\n\t}\n\treturn v\n}\n\nfunc (v *Value) needsInit() bool {\n\treturn v.wait.L == nil\n}\n\nfunc (v *Value) init() {\n\tif v.needsInit() {\n\t\tv.wait.L = v.mu.RLocker()\n\t}\n}\n\n\/\/ Set sets the shared value to val.\nfunc (v *Value) Set(val interface{}) {\n\tv.mu.Lock()\n\tv.init()\n\tv.val = val\n\tv.version++\n\tv.mu.Unlock()\n\tv.wait.Broadcast()\n}\n\n\/\/ Close closes the Value, unblocking any outstanding watchers. Close always\n\/\/ returns nil.\nfunc (v *Value) Close() error {\n\tv.mu.Lock()\n\tv.init()\n\tv.closed = true\n\tv.mu.Unlock()\n\tv.wait.Broadcast()\n\treturn nil\n}\n\n\/\/ Closed reports whether the value has been closed.\nfunc (v *Value) Closed() bool {\n\tv.mu.RLock()\n\tdefer v.mu.RUnlock()\n\treturn v.closed\n}\n\n\/\/ Get returns the current value. \nfunc (v *Value) Get() interface{} {\n\tv.mu.RLock()\n\tdefer v.mu.RUnlock()\n\treturn v.val\n}\n\n\/\/ Watch returns a Watcher that can be used to watch for changes to the value.\nfunc (v *Value) Watch() *Watcher {\n\treturn &Watcher{value: v}\n}\n\n\/\/ Watcher represents a single watcher of a shared value.\ntype Watcher struct {\n\tvalue *Value\n\tversion int\n\tcurrent interface{}\n\tclosed bool\n}\n\n\/\/ Next blocks until there is a new value to be retrieved from the value that is\n\/\/ being watched. It also unblocks when the value or the Watcher itself is\n\/\/ closed. Next returns false if the value or the Watcher itself have been\n\/\/ closed.\nfunc (w *Watcher) Next() bool {\n\tval := w.value\n\tval.mu.RLock()\n\tdefer val.mu.RUnlock()\n\tif val.needsInit() {\n\t\tval.mu.RUnlock()\n\t\tval.mu.Lock()\n\t\tval.init()\n\t\tval.mu.Unlock()\n\t\tval.mu.RLock()\n\t}\n\n\t\/\/ We should never go around this loop more than twice.\n\tfor {\n\t\tlog.Printf(\"Watcher.Next, local version %v; val version %v\", w.version, val.version)\n\t\tif w.version != val.version {\n\t\t\tw.version = val.version\n\t\t\tw.current = val.val\n\t\t\treturn true\n\t\t}\n\t\tif val.closed || w.closed {\n\t\t\treturn false\n\t\t}\n\n\t\t\/\/ Wait releases the lock until triggered and then reacquires the lock,\n\t\t\/\/ thus avoiding a deadlock.\n\t\tlog.Printf(\"Watcher.Next, waiting\")\n\t\tval.wait.Wait()\n\t}\n}\n\n\/\/ Close closes the Watcher without closing the underlying\n\/\/ value. It may be called concurrently with Next.\nfunc (w *Watcher) Close() {\n\tw.value.mu.Lock()\n\tw.value.init()\n\tw.closed = true\n\tw.value.mu.Unlock()\n\tw.value.wait.Broadcast()\n}\n\n\/\/ Value returns the last value that was retrieved from the watched Value by\n\/\/ Next.\nfunc (w *Watcher) Value() interface{} {\n\treturn w.current\n}\n<|endoftext|>"} {"text":"<commit_before>package weave\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"time\"\n)\n\nconst (\n\tInitialInterval = 5 * time.Second\n\tMaxInterval = 10 * time.Minute\n\tMaxAttemptCount = 100\n\tCMInitiate = iota\n\tCMStatus = iota\n\tCMAttemptDone = iota\n)\n\nconst (\n\tCSUnconnected ConnectionState = iota\n\tCSAttempting = iota\n)\n\nfunc StartConnectionMaker(router *Router) *ConnectionMaker {\n\tqueryChan := make(chan *ConnectionMakerInteraction, ChannelSize)\n\tstate := &ConnectionMaker{\n\t\trouter: router,\n\t\tqueryChan: queryChan,\n\t\tcmdLineAddress: make(map[string]bool),\n\t\ttargets: make(map[string]*Target)}\n\tgo state.queryLoop(queryChan)\n\treturn state\n}\n\nfunc (cm *ConnectionMaker) InitiateConnection(address string) {\n\tcm.queryChan <- &ConnectionMakerInteraction{\n\t\tInteraction: Interaction{code: CMInitiate},\n\t\taddress: address}\n}\n\nfunc (cm *ConnectionMaker) String() string {\n\tresultChan := make(chan interface{}, 0)\n\tcm.queryChan <- &ConnectionMakerInteraction{\n\t\tInteraction: Interaction{code: CMStatus, resultChan: resultChan}}\n\tresult := <-resultChan\n\treturn result.(string)\n}\n\nfunc (cm *ConnectionMaker) queryLoop(queryChan <-chan *ConnectionMakerInteraction) {\n\tvar tick <-chan time.Time\n\tmaybeTick := func() {\n\t\t\/\/ would be nice to optimise this to stop ticking when there is nothing worth trying\n\t\tif tick == nil {\n\t\t\ttick = time.After(5 * time.Second)\n\t\t}\n\t}\n\ttickNow := func() {\n\t\tif tick == nil {\n\t\t\ttick = time.After(0 * time.Second)\n\t\t}\n\t}\n\tfor {\n\t\tselect {\n\t\tcase query, ok := <-queryChan:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tswitch {\n\t\t\tcase query.code == CMInitiate:\n\t\t\t\tcm.cmdLineAddress[query.address] = true\n\t\t\t\ttickNow()\n\t\t\tcase query.code == CMStatus:\n\t\t\t\tquery.resultChan <- cm.status()\n\t\t\tcase query.code == CMAttemptDone:\n\t\t\t\ttarget := cm.targets[query.address]\n\t\t\t\tif target != nil {\n\t\t\t\t\ttarget.state = CSUnconnected\n\t\t\t\t\ttarget.tryAfter, target.tryInterval = tryAfter(target.tryInterval)\n\t\t\t\t\tmaybeTick()\n\t\t\t\t} else {\n\t\t\t\t\tlog.Fatal(\"CMAttemptDone unknown address\", query.address)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tlog.Fatal(\"Unexpected connection maker query:\", query)\n\t\t\t}\n\t\tcase now := <-tick:\n\t\t\tcm.checkStateAndAttemptConnections(now)\n\t\t\ttick = nil\n\t\t\tmaybeTick()\n\t\t}\n\t}\n}\n\nfunc (cm *ConnectionMaker) checkStateAndAttemptConnections(now time.Time) {\n\tourself := cm.router.Ourself\n\tvalidTarget := make(map[string]bool)\n\n\t\/\/ copy the set of peers we are connected to, so we can access it without locking\n\tour_connected_peers := make(map[PeerName]bool)\n\tour_connected_targets := make(map[string]bool)\n\tourself.ForEachConnection(func(peer PeerName, conn Connection) {\n\t\tour_connected_peers[peer] = true\n\t\tour_connected_targets[conn.RemoteTCPAddr()] = true\n\t})\n\n\t\/\/ Add command-line targets that are not connected\n\tfor address, _ := range cm.cmdLineAddress {\n\t\tif !our_connected_targets[address] {\n\t\t\t\/\/log.Println(\"Unconnected cmdline:\", address)\n\t\t\tvalidTarget[address] = true\n\t\t}\n\t}\n\n\t\/\/ Add peers that someone else is connected to, but we aren't\n\tcm.router.Peers.ForEach(func(name PeerName, peer *Peer) {\n\t\tpeer.ForEachConnection(func(peer2 PeerName, conn Connection) {\n\t\t\tif peer2 != ourself.Name && !our_connected_peers[peer2] {\n\t\t\t\taddress := conn.RemoteTCPAddr()\n\t\t\t\t\/\/log.Println(\"Unconnected peer:\", peer2, address)\n\t\t\t\t\/\/ try both portnumber of connection and standart port\n\t\t\t\tif host, port, err := ExtractHostPort(address); err == nil {\n\t\t\t\t\tif port != Port {\n\t\t\t\t\t\tvalidTarget[address] = true\n\t\t\t\t\t}\n\t\t\t\t\tvalidTarget[host] = true\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t})\n\n\tfor address, _ := range validTarget {\n\t\tcm.addToTargets(address)\n\t}\n\n\tfor address, target := range cm.targets {\n\t\tif target.state == CSUnconnected {\n\t\t\tif our_connected_targets[address] || !validTarget[address] {\n\t\t\t\t\/\/log.Println(\"Deleting target no longer valid:\", address)\n\t\t\t\tdelete(cm.targets, address)\n\t\t\t} else if now.After(target.tryAfter) {\n\t\t\t\ttarget.attemptCount += 1\n\t\t\t\ttarget.state = CSAttempting\n\t\t\t\tgo cm.attemptConnection(address, cm.cmdLineAddress[address])\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (cm *ConnectionMaker) addToTargets(address string) {\n\ttarget := cm.targets[address]\n\tif target == nil {\n\t\ttarget = &Target{\n\t\t\tstate: CSUnconnected,\n\t\t}\n\t\ttarget.tryAfter, target.tryInterval = tryImmediately()\n\t\tcm.targets[address] = target\n\t}\n}\n\nfunc (cm *ConnectionMaker) status() string {\n\tvar buf bytes.Buffer\n\tfor address, target := range cm.targets {\n\t\tif target.state == CSAttempting {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%s (%v attempts, trying since %v)\\n\", address, target.attemptCount, target.tryAfter))\n\t\t} else {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%s (%v attempts, next at %v)\\n\", address, target.attemptCount, target.tryAfter))\n\t\t}\n\t}\n\treturn buf.String()\n}\n\nfunc (cm *ConnectionMaker) attemptConnection(address string, acceptNewPeer bool) {\n\tlog.Println(\"Attempting connection to\", address)\n\tif err := cm.router.Ourself.CreateConnection(address, acceptNewPeer); err != nil {\n\t\tlog.Println(err)\n\t}\n\tcm.queryChan <- &ConnectionMakerInteraction{\n\t\tInteraction: Interaction{code: CMAttemptDone},\n\t\taddress: address}\n}\n\nfunc tryImmediately() (time.Time, time.Duration) {\n\tinterval := time.Duration(rand.Int63n(int64(InitialInterval)))\n\treturn time.Now(), interval\n}\n\nfunc tryAfter(interval time.Duration) (time.Time, time.Duration) {\n\tinterval += time.Duration(rand.Int63n(int64(interval)))\n\tif interval > MaxInterval {\n\t\tinterval = MaxInterval\n\t}\n\treturn time.Now().Add(interval), interval\n}\n<commit_msg>Normalise peer address before checking<commit_after>package weave\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"time\"\n)\n\nconst (\n\tInitialInterval = 5 * time.Second\n\tMaxInterval = 10 * time.Minute\n\tMaxAttemptCount = 100\n\tCMInitiate = iota\n\tCMStatus = iota\n\tCMAttemptDone = iota\n)\n\nconst (\n\tCSUnconnected ConnectionState = iota\n\tCSAttempting = iota\n)\n\nfunc StartConnectionMaker(router *Router) *ConnectionMaker {\n\tqueryChan := make(chan *ConnectionMakerInteraction, ChannelSize)\n\tstate := &ConnectionMaker{\n\t\trouter: router,\n\t\tqueryChan: queryChan,\n\t\tcmdLineAddress: make(map[string]bool),\n\t\ttargets: make(map[string]*Target)}\n\tgo state.queryLoop(queryChan)\n\treturn state\n}\n\nfunc (cm *ConnectionMaker) InitiateConnection(address string) {\n\tcm.queryChan <- &ConnectionMakerInteraction{\n\t\tInteraction: Interaction{code: CMInitiate},\n\t\taddress: address}\n}\n\nfunc (cm *ConnectionMaker) String() string {\n\tresultChan := make(chan interface{}, 0)\n\tcm.queryChan <- &ConnectionMakerInteraction{\n\t\tInteraction: Interaction{code: CMStatus, resultChan: resultChan}}\n\tresult := <-resultChan\n\treturn result.(string)\n}\n\nfunc (cm *ConnectionMaker) queryLoop(queryChan <-chan *ConnectionMakerInteraction) {\n\tvar tick <-chan time.Time\n\tmaybeTick := func() {\n\t\t\/\/ would be nice to optimise this to stop ticking when there is nothing worth trying\n\t\tif tick == nil {\n\t\t\ttick = time.After(5 * time.Second)\n\t\t}\n\t}\n\ttickNow := func() {\n\t\tif tick == nil {\n\t\t\ttick = time.After(0 * time.Second)\n\t\t}\n\t}\n\tfor {\n\t\tselect {\n\t\tcase query, ok := <-queryChan:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tswitch {\n\t\t\tcase query.code == CMInitiate:\n\t\t\t\tcm.cmdLineAddress[query.address] = true\n\t\t\t\ttickNow()\n\t\t\tcase query.code == CMStatus:\n\t\t\t\tquery.resultChan <- cm.status()\n\t\t\tcase query.code == CMAttemptDone:\n\t\t\t\ttarget := cm.targets[query.address]\n\t\t\t\tif target != nil {\n\t\t\t\t\ttarget.state = CSUnconnected\n\t\t\t\t\ttarget.tryAfter, target.tryInterval = tryAfter(target.tryInterval)\n\t\t\t\t\tmaybeTick()\n\t\t\t\t} else {\n\t\t\t\t\tlog.Fatal(\"CMAttemptDone unknown address\", query.address)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tlog.Fatal(\"Unexpected connection maker query:\", query)\n\t\t\t}\n\t\tcase now := <-tick:\n\t\t\tcm.checkStateAndAttemptConnections(now)\n\t\t\ttick = nil\n\t\t\tmaybeTick()\n\t\t}\n\t}\n}\n\nfunc (cm *ConnectionMaker) checkStateAndAttemptConnections(now time.Time) {\n\tourself := cm.router.Ourself\n\tvalidTarget := make(map[string]bool)\n\n\t\/\/ copy the set of peers we are connected to, so we can access it without locking\n\tour_connected_peers := make(map[PeerName]bool)\n\tour_connected_targets := make(map[string]bool)\n\tourself.ForEachConnection(func(peer PeerName, conn Connection) {\n\t\t\/\/log.Println(\"Connected peer:\", peer, conn.RemoteTCPAddr())\n\t\tour_connected_peers[peer] = true\n\t\tour_connected_targets[conn.RemoteTCPAddr()] = true\n\t})\n\n\t\/\/ Add command-line targets that are not connected\n\tfor address, _ := range cm.cmdLineAddress {\n\t\tif !our_connected_targets[NormalisePeerAddr(address)] {\n\t\t\t\/\/log.Println(\"Unconnected cmdline:\", address)\n\t\t\tvalidTarget[address] = true\n\t\t}\n\t}\n\n\t\/\/ Add peers that someone else is connected to, but we aren't\n\tcm.router.Peers.ForEach(func(name PeerName, peer *Peer) {\n\t\tpeer.ForEachConnection(func(peer2 PeerName, conn Connection) {\n\t\t\tif peer2 != ourself.Name && !our_connected_peers[peer2] {\n\t\t\t\taddress := conn.RemoteTCPAddr()\n\t\t\t\t\/\/log.Println(\"Unconnected peer:\", peer2, address)\n\t\t\t\t\/\/ try both portnumber of connection and standart port\n\t\t\t\tif host, port, err := ExtractHostPort(address); err == nil {\n\t\t\t\t\tif port != Port {\n\t\t\t\t\t\tvalidTarget[address] = true\n\t\t\t\t\t}\n\t\t\t\t\tvalidTarget[host] = true\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t})\n\n\tfor address, _ := range validTarget {\n\t\tcm.addToTargets(address)\n\t}\n\n\tfor address, target := range cm.targets {\n\t\tif target.state == CSUnconnected {\n\t\t\tif our_connected_targets[address] || !validTarget[address] {\n\t\t\t\t\/\/log.Println(\"Deleting target no longer valid:\", address)\n\t\t\t\tdelete(cm.targets, address)\n\t\t\t} else if now.After(target.tryAfter) {\n\t\t\t\ttarget.attemptCount += 1\n\t\t\t\ttarget.state = CSAttempting\n\t\t\t\tgo cm.attemptConnection(address, cm.cmdLineAddress[address])\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (cm *ConnectionMaker) addToTargets(address string) {\n\ttarget := cm.targets[address]\n\tif target == nil {\n\t\ttarget = &Target{\n\t\t\tstate: CSUnconnected,\n\t\t}\n\t\ttarget.tryAfter, target.tryInterval = tryImmediately()\n\t\tcm.targets[address] = target\n\t}\n}\n\nfunc (cm *ConnectionMaker) status() string {\n\tvar buf bytes.Buffer\n\tfor address, target := range cm.targets {\n\t\tif target.state == CSAttempting {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%s (%v attempts, trying since %v)\\n\", address, target.attemptCount, target.tryAfter))\n\t\t} else {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%s (%v attempts, next at %v)\\n\", address, target.attemptCount, target.tryAfter))\n\t\t}\n\t}\n\treturn buf.String()\n}\n\nfunc (cm *ConnectionMaker) attemptConnection(address string, acceptNewPeer bool) {\n\tlog.Println(\"Attempting connection to\", address)\n\tif err := cm.router.Ourself.CreateConnection(address, acceptNewPeer); err != nil {\n\t\tlog.Println(err)\n\t}\n\tcm.queryChan <- &ConnectionMakerInteraction{\n\t\tInteraction: Interaction{code: CMAttemptDone},\n\t\taddress: address}\n}\n\nfunc tryImmediately() (time.Time, time.Duration) {\n\tinterval := time.Duration(rand.Int63n(int64(InitialInterval)))\n\treturn time.Now(), interval\n}\n\nfunc tryAfter(interval time.Duration) (time.Time, time.Duration) {\n\tinterval += time.Duration(rand.Int63n(int64(interval)))\n\tif interval > MaxInterval {\n\t\tinterval = MaxInterval\n\t}\n\treturn time.Now().Add(interval), interval\n}\n<|endoftext|>"} {"text":"<commit_before>package connectors\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/masterzen\/winrm\"\n\t\"github.com\/projectjane\/jane\/models\"\n\t\"github.com\/projectjane\/jane\/parse\"\n)\n\n\/\/WinRM Struct representing a WinRM Connector\ntype WinRM struct {\n}\n\n\/\/Listen Listen not implemented for WinRM\nfunc (x WinRM) Listen(commandMsgs chan<- models.Message, connector models.Connector) {\n\tdefer Recovery(connector)\n\tfor _, command := range connector.Commands {\n\t\tif command.RunCheck {\n\t\t\tif connector.Debug {\n\t\t\t\tlog.Print(\"Starting Listener for \" + connector.ID + \" \" + command.Name)\n\t\t\t}\n\t\t\tgo checkRM(commandMsgs, command, connector)\n\t\t}\n\t}\n}\n\n\/\/Command Standard command parser\nfunc (x WinRM) Command(message models.Message, publishMsgs chan<- models.Message, connector models.Connector) {\n\tfor _, command := range connector.Commands {\n\t\tif match, tokens := parse.Match(command.Match, message.In.Text); match {\n\t\t\targs := parse.Substitute(command.Args, tokens)\n\n\t\t\ttokens[\"STDOUT\"] = sendCommand(command.Cmd, args, connector)\n\n\t\t\tvar color = \"NONE\"\n\t\t\tvar match = false\n\t\t\tif match, _ = parse.Match(command.Green, tokens[\"STDOUT\"]); match {\n\t\t\t\tcolor = \"SUCCESS\"\n\t\t\t}\n\t\t\tif match, _ = parse.Match(command.Yellow, tokens[\"STDOUT\"]); match {\n\t\t\t\tcolor = \"WARN\"\n\t\t\t}\n\t\t\tif match, _ = parse.Match(command.Red, tokens[\"STDOUT\"]); match {\n\t\t\t\tcolor = \"FAIL\"\n\t\t\t}\n\t\t\tmessage.Out.Text = connector.ID + \" \" + command.Name\n\t\t\tmessage.Out.Detail = parse.Substitute(command.Output, tokens)\n\t\t\tmessage.Out.Status = color\n\t\t\tpublishMsgs <- message\n\t\t}\n\t}\n}\n\n\/\/Publish Not implemented for WinRM\nfunc (x WinRM) Publish(connector models.Connector, message models.Message, target string) {\n\treturn\n}\n\n\/\/Help Returns help information for the connector\nfunc (x WinRM) Help(connector models.Connector) (help string) {\n\tfor _, command := range connector.Commands {\n\t\tif !command.HideHelp {\n\t\t\tif command.Help != \"\" {\n\t\t\t\thelp += command.Help + \"\\n\"\n\t\t\t} else {\n\t\t\t\thelp += command.Match + \"\\n\"\n\t\t\t}\n\t\t}\n\t}\n\treturn help\n}\n\nfunc checkRM(commandMsgs chan<- models.Message, command models.Command, connector models.Connector) {\n\tvar state = command.Green\n\tvar interval = 1\n\tvar remind = 0\n\tif command.Interval > 0 {\n\t\tinterval = command.Interval\n\t}\n\tif command.Remind > 0 {\n\t\tremind = command.Remind\n\t}\n\tvar counter = 0\n\tfor {\n\t\tvar color = \"NONE\"\n\t\tvar match = false\n\t\tvar newstate = \"\"\n\t\tcounter++\n\t\tvar out string\n\n\t\tout = sendCommand(command.Cmd, command.Args, connector)\n\n\t\tif match, _ = parse.Match(command.Green, out); match {\n\t\t\tnewstate = command.Green\n\t\t\tcolor = \"SUCCESS\"\n\t\t}\n\t\tif match, _ = parse.Match(command.Yellow, out); match {\n\t\t\tnewstate = command.Yellow\n\t\t\tcolor = \"WARN\"\n\t\t}\n\t\tif match, _ = parse.Match(command.Red, out); match {\n\t\t\tnewstate = command.Red\n\t\t\tcolor = \"FAIL\"\n\t\t}\n\t\tif newstate != state || (newstate != command.Green && counter == remind && remind != 0) {\n\t\t\tvar message models.Message\n\t\t\tmessage.Routes = connector.Routes\n\t\t\tmessage.In.Source = connector.ID\n\t\t\tmessage.In.Process = false\n\t\t\tmessage.Out.Text = connector.ID + \" \" + command.Name\n\t\t\tmessage.Out.Detail = strings.Replace(command.Output, \"${STDOUT}\", out, -1)\n\t\t\tmessage.Out.Status = color\n\t\t\tcommandMsgs <- message\n\t\t\tstate = newstate\n\t\t}\n\t\tif counter >= remind {\n\t\t\tcounter = 0\n\t\t}\n\t\ttime.Sleep(time.Duration(interval) * time.Minute)\n\t}\n}\n\nfunc sendCommand(command, args string, connector models.Connector) string {\n\tport := 5985\n\tif connector.Port != \"\" {\n\t\tport, _ = strconv.Atoi(connector.Port)\n\t}\n\n\tendpoint := winrm.NewEndpoint(connector.Server, port, false, false, nil, nil, nil, 0)\n\trmclient, err := winrm.NewClient(endpoint, connector.Login, connector.Pass)\n\tif err != nil {\n\t\tlog.Println(\"Error connecting to endpoint:\", err)\n\t\treturn \"Error connecting to endpoint: \" + err.Error()\n\t}\n\n\tvar in bytes.Buffer\n\tvar out bytes.Buffer\n\tvar e bytes.Buffer\n\n\tstdin := bufio.NewReader(&in)\n\tstdout := bufio.NewWriter(&out)\n\tstderr := bufio.NewWriter(&e)\n\n\t_, err = rmclient.RunWithInput(command, stdout, stderr, stdin)\n\tif err != nil {\n\t\treturn \"Error running command: \" + err.Error()\n\t}\n\n\tif e.String() != \"\" {\n\t\treturn e.String()\n\t}\n\n\treturn out.String()\n}\n<commit_msg>oops, fixing winrm ref issue<commit_after>package connectors\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/projectjane\/jane\/models\"\n\t\"github.com\/projectjane\/jane\/parse\"\n\t\"github.com\/projectjane\/winrm\"\n)\n\n\/\/WinRM Struct representing a WinRM Connector\ntype WinRM struct {\n}\n\n\/\/Listen Listen not implemented for WinRM\nfunc (x WinRM) Listen(commandMsgs chan<- models.Message, connector models.Connector) {\n\tdefer Recovery(connector)\n\tfor _, command := range connector.Commands {\n\t\tif command.RunCheck {\n\t\t\tif connector.Debug {\n\t\t\t\tlog.Print(\"Starting Listener for \" + connector.ID + \" \" + command.Name)\n\t\t\t}\n\t\t\tgo checkRM(commandMsgs, command, connector)\n\t\t}\n\t}\n}\n\n\/\/Command Standard command parser\nfunc (x WinRM) Command(message models.Message, publishMsgs chan<- models.Message, connector models.Connector) {\n\tfor _, command := range connector.Commands {\n\t\tif match, tokens := parse.Match(command.Match, message.In.Text); match {\n\t\t\targs := parse.Substitute(command.Args, tokens)\n\n\t\t\ttokens[\"STDOUT\"] = sendCommand(command.Cmd, args, connector)\n\n\t\t\tvar color = \"NONE\"\n\t\t\tvar match = false\n\t\t\tif match, _ = parse.Match(command.Green, tokens[\"STDOUT\"]); match {\n\t\t\t\tcolor = \"SUCCESS\"\n\t\t\t}\n\t\t\tif match, _ = parse.Match(command.Yellow, tokens[\"STDOUT\"]); match {\n\t\t\t\tcolor = \"WARN\"\n\t\t\t}\n\t\t\tif match, _ = parse.Match(command.Red, tokens[\"STDOUT\"]); match {\n\t\t\t\tcolor = \"FAIL\"\n\t\t\t}\n\t\t\tmessage.Out.Text = connector.ID + \" \" + command.Name\n\t\t\tmessage.Out.Detail = parse.Substitute(command.Output, tokens)\n\t\t\tmessage.Out.Status = color\n\t\t\tpublishMsgs <- message\n\t\t}\n\t}\n}\n\n\/\/Publish Not implemented for WinRM\nfunc (x WinRM) Publish(connector models.Connector, message models.Message, target string) {\n\treturn\n}\n\n\/\/Help Returns help information for the connector\nfunc (x WinRM) Help(connector models.Connector) (help string) {\n\tfor _, command := range connector.Commands {\n\t\tif !command.HideHelp {\n\t\t\tif command.Help != \"\" {\n\t\t\t\thelp += command.Help + \"\\n\"\n\t\t\t} else {\n\t\t\t\thelp += command.Match + \"\\n\"\n\t\t\t}\n\t\t}\n\t}\n\treturn help\n}\n\nfunc checkRM(commandMsgs chan<- models.Message, command models.Command, connector models.Connector) {\n\tvar state = command.Green\n\tvar interval = 1\n\tvar remind = 0\n\tif command.Interval > 0 {\n\t\tinterval = command.Interval\n\t}\n\tif command.Remind > 0 {\n\t\tremind = command.Remind\n\t}\n\tvar counter = 0\n\tfor {\n\t\tvar color = \"NONE\"\n\t\tvar match = false\n\t\tvar newstate = \"\"\n\t\tcounter++\n\t\tvar out string\n\n\t\tout = sendCommand(command.Cmd, command.Args, connector)\n\n\t\tif match, _ = parse.Match(command.Green, out); match {\n\t\t\tnewstate = command.Green\n\t\t\tcolor = \"SUCCESS\"\n\t\t}\n\t\tif match, _ = parse.Match(command.Yellow, out); match {\n\t\t\tnewstate = command.Yellow\n\t\t\tcolor = \"WARN\"\n\t\t}\n\t\tif match, _ = parse.Match(command.Red, out); match {\n\t\t\tnewstate = command.Red\n\t\t\tcolor = \"FAIL\"\n\t\t}\n\t\tif newstate != state || (newstate != command.Green && counter == remind && remind != 0) {\n\t\t\tvar message models.Message\n\t\t\tmessage.Routes = connector.Routes\n\t\t\tmessage.In.Source = connector.ID\n\t\t\tmessage.In.Process = false\n\t\t\tmessage.Out.Text = connector.ID + \" \" + command.Name\n\t\t\tmessage.Out.Detail = strings.Replace(command.Output, \"${STDOUT}\", out, -1)\n\t\t\tmessage.Out.Status = color\n\t\t\tcommandMsgs <- message\n\t\t\tstate = newstate\n\t\t}\n\t\tif counter >= remind {\n\t\t\tcounter = 0\n\t\t}\n\t\ttime.Sleep(time.Duration(interval) * time.Minute)\n\t}\n}\n\nfunc sendCommand(command, args string, connector models.Connector) string {\n\tport := 5985\n\tif connector.Port != \"\" {\n\t\tport, _ = strconv.Atoi(connector.Port)\n\t}\n\n\tendpoint := winrm.NewEndpoint(connector.Server, port, false, false, nil, nil, nil, 0)\n\trmclient, err := winrm.NewClient(endpoint, connector.Login, connector.Pass)\n\tif err != nil {\n\t\tlog.Println(\"Error connecting to endpoint:\", err)\n\t\treturn \"Error connecting to endpoint: \" + err.Error()\n\t}\n\n\tvar in bytes.Buffer\n\tvar out bytes.Buffer\n\tvar e bytes.Buffer\n\n\tstdin := bufio.NewReader(&in)\n\tstdout := bufio.NewWriter(&out)\n\tstderr := bufio.NewWriter(&e)\n\n\t_, err = rmclient.RunWithInput(command, stdout, stderr, stdin)\n\tif err != nil {\n\t\treturn \"Error running command: \" + err.Error()\n\t}\n\n\tif e.String() != \"\" {\n\t\treturn e.String()\n\t}\n\n\treturn out.String()\n}\n<|endoftext|>"} {"text":"<commit_before>package connectors\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/masterzen\/winrm\"\n\t\"github.com\/projectjane\/jane\/models\"\n)\n\n\/\/WinRM Struct representing a WinRM Connector\ntype WinRM struct {\n\tClient *winrm.Client\n}\n\n\/\/Listen Standard listen\nfunc (x WinRM) Listen(commandMsgs chan<- models.Message, connector models.Connector) {\n\tdefer Recovery(connector)\n\n\tvar err error\n\n\tendpoint := winrm.NewEndpoint(connector.Server, 5985, false, false, nil, nil, nil, 0)\n\tx.Client, err = winrm.NewClient(endpoint, connector.Login, connector.Pass)\n\tif err != nil {\n\t\tlog.Println(\"Error connecting to endpoint:\", err)\n\t}\n}\n\n\/\/Command Standard command parser\nfunc (x WinRM) Command(message models.Message, publishMsgs chan<- models.Message, connector models.Connector) {\n\t\/\/ shell, err := x.Client.CreateShell()\n\t\/\/ if err != nil {\n\t\/\/ \tlog.Println(\"Error creating shell:\", err)\n\t\/\/ }\n\t\/\/ defer shell.Close()\n\n\tif x.Client == nil {\n\t\tlog.Println(\"Client is nil. Connecting...\")\n\n\t\tvar err error\n\n\t\tendpoint := winrm.NewEndpoint(connector.Server, 5985, false, false, nil, nil, nil, 0)\n\t\tx.Client, err = winrm.NewClient(endpoint, connector.Login, connector.Pass)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error connecting to endpoint:\", err)\n\t\t}\n\n\t\tlog.Println(x.Client)\n\t}\n\n\t_, err := x.Client.RunWithInput(\"ipconfig\", os.Stdout, os.Stderr, os.Stdin)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n\n\/\/Publish Not implemented\nfunc (x WinRM) Publish(connector models.Connector, message models.Message, target string) {\n\treturn\n}\n\n\/\/Help Returns help information for the connector\nfunc (x WinRM) Help(connector models.Connector) (help string) {\n\thelp += \"winrm\"\n\treturn help\n}\n<commit_msg>Cleaning up the output a bit. Stopped using stdin and stdout<commit_after>package connectors\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"log\"\n\n\t\"github.com\/masterzen\/winrm\"\n\t\"github.com\/projectjane\/jane\/models\"\n\t\"github.com\/projectjane\/jane\/parse\"\n)\n\n\/\/WinRM Struct representing a WinRM Connector\ntype WinRM struct {\n\tClient *winrm.Client\n}\n\n\/\/Listen Listen not implemented for WinRM\nfunc (x WinRM) Listen(commandMsgs chan<- models.Message, connector models.Connector) {\n\tdefer Recovery(connector)\n}\n\n\/\/Command Standard command parser\nfunc (x WinRM) Command(message models.Message, publishMsgs chan<- models.Message, connector models.Connector) {\n\tif match, tokens := parse.Match(\"winrm*\", message.In.Text); match {\n\t\tmessage.Out.Text = callWolfram(tokens[\"*\"], connector.Key)\n\t\tpublishMsgs <- message\n\t}\n\n\tif x.Client == nil {\n\t\tlog.Println(\"Client is nil. Connecting...\")\n\n\t\tvar err error\n\n\t\tendpoint := winrm.NewEndpoint(connector.Server, 5985, false, false, nil, nil, nil, 0)\n\t\tx.Client, err = winrm.NewClient(endpoint, connector.Login, connector.Pass)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error connecting to endpoint:\", err)\n\t\t}\n\t}\n\n\tout, err := x.sendCommand(\"ipconfig\")\n\tif err != nil {\n\t\tlog.Println(\"Error sending command:\", err)\n\t\tmessage.Out.Text = \"Error processing command: \"\n\t}\n\n\tmessage.Out.Text = out\n\n\tpublishMsgs <- message\n}\n\n\/\/Publish Not implemented for WinRM\nfunc (x WinRM) Publish(connector models.Connector, message models.Message, target string) {\n\treturn\n}\n\n\/\/Help Returns help information for the connector\nfunc (x WinRM) Help(connector models.Connector) (help string) {\n\thelp += \"winrm {cmd}\"\n\treturn help\n}\n\nfunc (x WinRM) sendCommand(command string) (string, error) {\n\tvar in bytes.Buffer\n\tvar out bytes.Buffer\n\tvar e bytes.Buffer\n\n\tstdin := bufio.NewReader(&in)\n\tstdout := bufio.NewWriter(&out)\n\tstderr := bufio.NewWriter(&e)\n\n\t_, err := x.Client.RunWithInput(command, stdout, stderr, stdin)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif e.String() != \"\" {\n\t\treturn \"\", errors.New(e.String())\n\t}\n\n\treturn out.String(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package controller\n\nimport (\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/mebiusashan\/beaker\/common\"\n)\n\ntype IndexController struct {\n\tBaseController\n}\n\nfunc (ct *IndexController) Do(c *gin.Context) {\n\tbodyStr, err := ct.Context.Cache.GET(common.TAG_HOME, \"\")\n\tif err == nil && bodyStr != \"\" {\n\t\twrite200(c, bodyStr)\n\t\treturn\n\t}\n\n\tpages, err := ct.Context.Model.PageFindAll()\n\tif err != nil {\n\t\tct.Context.Ctrl.ErrC.Do500(c)\n\t\treturn\n\t}\n\n\tcats, err := ct.Context.Model.CategoryFindAll()\n\tif err != nil {\n\t\tct.Context.Ctrl.ErrC.Do500(c)\n\t\treturn\n\t}\n\n\tarcs, err := ct.Context.Model.ArticleFindWithNum(ct.Context.Config.Website.INDEX_LIST_NUM)\n\tif err != nil {\n\t\tct.Context.Ctrl.ErrC.Do500(c)\n\t\treturn\n\t}\n\n\tvars := ct.Context.View.GetVarMap()\n\tvars.Set(\"title\", \"Home\")\n\tvars.Set(\"cats\", cats)\n\tvars.Set(\"pages\", pages)\n\tvars.Set(\"arcs\", arcs)\n\tvars.Set(\"ISINDEX\", true)\n\n\tbodyStr, err = ct.Context.View.Render(common.TEMPLATE_HOME, vars)\n\tif err != nil {\n\t\tct.Context.Ctrl.ErrC.Do500(c)\n\t\treturn\n\t}\n\n\tct.Context.Cache.SETNX(common.TAG_HOME, \"\", bodyStr, ct.Context.Config.Redis.EXPIRE_TIME)\n\twrite200(c, bodyStr)\n}\n<commit_msg>chagne index feature<commit_after>package controller\n\nimport (\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/mebiusashan\/beaker\/common\"\n)\n\ntype IndexController struct {\n\tBaseController\n}\n\nfunc (ct *IndexController) Do(c *gin.Context) {\n\tif hasCacheWriteBody(c, ct.Context.Cache, common.TAG_HOME, \"\") {\n\t\treturn\n\t}\n\n\tpages, err := ct.Context.Model.PageFindAll()\n\tif hasErrDo500(c, ct.Context.Ctrl.ErrC, err) {\n\t\treturn\n\t}\n\n\tcats, err := ct.Context.Model.CategoryFindAll()\n\tif hasErrDo500(c, ct.Context.Ctrl.ErrC, err) {\n\t\treturn\n\t}\n\tarcs, err := ct.Context.Model.ArticleFindWithNum(ct.Context.Config.Website.INDEX_LIST_NUM)\n\tif hasErrDo500(c, ct.Context.Ctrl.ErrC, err) {\n\t\treturn\n\t}\n\n\tvars := ct.Context.View.GetVarMap()\n\tvars.Set(\"title\", \"Home\")\n\tvars.Set(\"cats\", cats)\n\tvars.Set(\"pages\", pages)\n\tvars.Set(\"arcs\", arcs)\n\tvars.Set(\"ISINDEX\", true)\n\n\tbodyStr, err := ct.Context.View.Render(common.TEMPLATE_HOME, vars)\n\tif hasErrDo500(c, ct.Context.Ctrl.ErrC, err) {\n\t\treturn\n\t}\n\n\tct.Context.Cache.SETNX(common.TAG_HOME, \"\", bodyStr, ct.Context.Config.Redis.EXPIRE_TIME)\n\twrite200(c, bodyStr)\n}\n<|endoftext|>"} {"text":"<commit_before>package controllers\n\nimport (\n\t\"encoding\/hex\"\n\t\"net\/http\"\n\n\t\"github.com\/herald-it\/goncord\/keygen\"\n\t\"github.com\/herald-it\/goncord\/models\"\n\t\"github.com\/herald-it\/goncord\/pwd_hash\"\n\t\"github.com\/herald-it\/goncord\/querying\"\n\t\"github.com\/herald-it\/goncord\/utils\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"gopkg.in\/mgo.v2\"\n)\n\ntype UserController struct {\n\tsession *mgo.Session\n}\n\nfunc (uc UserController) GetDB() *mgo.Database {\n\treturn uc.session.DB(\"auth_service\")\n}\n\nfunc NewUserController(s *mgo.Session) *UserController {\n\treturn &UserController{s}\n}\n\n\/\/ Save user and token to table token_dump.\nfunc (uc UserController) dumpUser(usr *models.User, token string) error {\n\tdump_token := models.NewDumpToken(usr, token)\n\terr := uc.GetDB().C(\"token_dump\").Insert(&dump_token)\n\n\treturn err\n}\n\nfunc (uc UserController) LoginUser(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) {\n\n\tcollect := uc.GetDB().C(\"users\")\n\n\terr := r.ParseForm()\n\tutils.LogError(err)\n\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tusr := new(models.User)\n\tutils.Fill(usr, r.PostForm)\n\n\tusr.Password = hex.EncodeToString(pwd_hash.Sum([]byte(usr.Password)))\n\n\tuser_exist, err := querying.Find(usr, collect)\n\tutils.LogError(err)\n\n\tif user_exist == nil {\n\t\thttp.Error(w, http.StatusText(http.StatusPreconditionFailed), http.StatusPreconditionFailed)\n\t\treturn\n\t}\n\n\tkey_pair, err := keygen.NewKeyPair()\n\tutils.LogError(err)\n\n\ttoken, err := user_exist.NewToken(key_pair.Private)\n\tutils.LogError(err)\n\n\thttp.SetCookie(w, &http.Cookie{\n\t\tName: \"jwt\",\n\t\tValue: token,\n\t\tHttpOnly: true,\n\t\tSecure: true})\n\n\terr := uc.dumpUser(user_exist, token)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusConflict), http.StatusConflict)\n\t\treturn\n\t}\n\n\tw.Write([]byte(\"Token succesfully added.\"))\n}\n\nfunc (uc UserController) RegisterUser(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) {\n\n\tcollect := uc.GetDB().C(\"users\")\n\n\terr := r.ParseForm()\n\tutils.LogError(err)\n\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t}\n\n\tusr := new(models.User)\n\tutils.Fill(usr, r.PostForm)\n\n\tusr.Password = hex.EncodeToString(pwd_hash.Sum([]byte(usr.Password)))\n\n\tuser_exist, err := querying.IsExist(usr, collect)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif user_exist {\n\t\thttp.Error(w, http.StatusText(http.StatusNotAcceptable), http.StatusNotAcceptable)\n\t\tw.Write([]byte(\"User already exist\"))\n\t\treturn\n\t}\n\n\tcollect.Insert(&usr)\n\tw.Write([]byte(\"Succesfully added\"))\n}\n<commit_msg>Add more response information.<commit_after>package controllers\n\nimport (\n\t\"encoding\/hex\"\n\t\"net\/http\"\n\n\t\"github.com\/herald-it\/goncord\/keygen\"\n\t\"github.com\/herald-it\/goncord\/models\"\n\t\"github.com\/herald-it\/goncord\/pwd_hash\"\n\t\"github.com\/herald-it\/goncord\/querying\"\n\t\"github.com\/herald-it\/goncord\/utils\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"gopkg.in\/mgo.v2\"\n)\n\ntype UserController struct {\n\tsession *mgo.Session\n}\n\nfunc (uc UserController) GetDB() *mgo.Database {\n\treturn uc.session.DB(\"auth_service\")\n}\n\nfunc NewUserController(s *mgo.Session) *UserController {\n\treturn &UserController{s}\n}\n\n\/\/ Save user and token to table token_dump.\nfunc (uc UserController) dumpUser(usr *models.User, token string) error {\n\tdump_token := models.NewDumpToken(usr, token)\n\terr := uc.GetDB().C(\"token_dump\").Insert(&dump_token)\n\n\treturn err\n}\n\nfunc (uc UserController) LoginUser(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) {\n\n\tcollect := uc.GetDB().C(\"users\")\n\n\terr := r.ParseForm()\n\tutils.LogError(err)\n\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\tw.Write([]byte(\"Post form can not be parsed.\"))\n\t\treturn\n\t}\n\n\tusr := new(models.User)\n\tutils.Fill(usr, r.PostForm)\n\n\tusr.Password = hex.EncodeToString(pwd_hash.Sum([]byte(usr.Password)))\n\n\tuser_exist, err := querying.FindUser(usr, collect)\n\tutils.LogError(err)\n\n\tif user_exist == nil {\n\t\thttp.Error(w, http.StatusText(http.StatusPreconditionFailed), http.StatusPreconditionFailed)\n\t\tw.Write([]byte(\"User does not exist.\"))\n\t\treturn\n\t}\n\n\tkey_pair, err := keygen.NewKeyPair()\n\tutils.LogError(err)\n\n\ttoken, err := user_exist.NewToken(key_pair.Private)\n\tutils.LogError(err)\n\n\thttp.SetCookie(w, &http.Cookie{\n\t\tName: \"jwt\",\n\t\tValue: token,\n\t\tHttpOnly: true,\n\t\tSecure: true})\n\n\terr = uc.dumpUser(user_exist, token)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusConflict), http.StatusConflict)\n\t\tw.Write([]byte(\"Token can not be dump.\"))\n\t\treturn\n\t}\n\n\tw.Write([]byte(\"Token succesfully added.\"))\n}\n\nfunc (uc UserController) RegisterUser(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) {\n\n\tcollect := uc.GetDB().C(\"users\")\n\n\terr := r.ParseForm()\n\tutils.LogError(err)\n\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\tw.Write([]byte(\"Post form can not be parsed.\"))\n\t\treturn\n\t}\n\n\tusr := new(models.User)\n\tutils.Fill(usr, r.PostForm)\n\n\tusr.Password = hex.EncodeToString(pwd_hash.Sum([]byte(usr.Password)))\n\n\tuser_exist, err := querying.IsExistUser(usr, collect)\n\tutils.LogError(err)\n\n\tif user_exist {\n\t\thttp.Error(w, http.StatusText(http.StatusNotAcceptable), http.StatusNotAcceptable)\n\t\tw.Write([]byte(\"User already exist\"))\n\t\treturn\n\t}\n\n\tcollect.Insert(&usr)\n\tw.Write([]byte(\"Succesfully added\"))\n}\n<|endoftext|>"} {"text":"<commit_before>package controllers\n\nimport (\n\t\"encoding\/hex\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/herald-it\/goncord\/models\"\n\t. \"github.com\/herald-it\/goncord\/utils\"\n\t\"github.com\/herald-it\/goncord\/utils\/keygen\"\n\t\"github.com\/herald-it\/goncord\/utils\/pwd_hash\"\n\t\"github.com\/herald-it\/goncord\/utils\/querying\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"gopkg.in\/mgo.v2\"\n)\n\ntype UserController struct {\n\tsession *mgo.Session\n}\n\nfunc (uc UserController) GetDB() *mgo.Database {\n\treturn uc.session.DB(models.Set.Database.DbName)\n}\n\nfunc NewUserController(s *mgo.Session) *UserController {\n\treturn &UserController{s}\n}\n\n\/\/ dumpUser save user and token to table token_dump.\nfunc (uc UserController) dumpUser(usr *models.User, token string) error {\n\tdumpToken := models.NewDumpToken(usr, token)\n\terr := uc.GetDB().C(models.Set.Database.TokenTable).Insert(&dumpToken)\n\n\treturn err\n}\n\n\/\/ LoginUser user authorization.\n\/\/ Authorization information is obtained from\n\/\/ form post. In order to log in\n\/\/ post the form should contain fields such as:\n\/\/ \tlogin\n\/\/ \tpassword\n\/\/ \temail\n\/\/ If authentication is successful, the user in the cookie\n\/\/ will add the jwt token. Cook's name will be the jwt and the value\n\/\/ the issued token.\n\/\/ The token lifetime is 7 days. After the expiration of\n\/\/ the lifetime of the token, the authorization process need\n\/\/ pass again.\nfunc (uc UserController) LoginUser(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) *HttpError {\n\n\tcollect := uc.GetDB().C(models.Set.Database.UserTable)\n\n\tif err := r.ParseForm(); err != nil {\n\t\treturn &HttpError{err, \"Post form can not be parsed.\", 500}\n\t}\n\n\tusr := new(models.User)\n\tif err := Fill(usr, r.PostForm, \"login\", \"email\", \"password\"); err != nil {\n\t\treturn &HttpError{err, \"Error fill form. Not all fields are specified.\", 500}\n\t}\n\n\tusr.Password = hex.EncodeToString(pwd_hash.Sum([]byte(usr.Password)))\n\n\tuserExist, err := querying.FindUser(usr, collect)\n\tif userExist == nil || err != nil {\n\t\treturn &HttpError{err, \"User not exist.\", 500}\n\t}\n\n\tkeyPair, err := keygen.NewKeyPair()\n\tif err != nil {\n\t\treturn &HttpError{err, \"New key pair error.\", 500}\n\t}\n\n\ttoken, err := userExist.NewToken(keyPair.Private)\n\tif err != nil {\n\t\treturn &HttpError{err, \"New token error.\", 500}\n\t}\n\n\thttp.SetCookie(w, &http.Cookie{\n\t\tName: \"jwt\",\n\t\tValue: token,\n\t\tDomain: models.Set.Domain,\n\t\tHttpOnly: true,\n\t\tSecure: false}) \/\/ TODO: HTTPS. Если true то токена не видно.\n\n\tif err = uc.dumpUser(userExist, token); err != nil {\n\t\treturn &HttpError{err, \"Token can not be dumped.\", 500}\n\t}\n\n\tw.Write([]byte(\"Token succesfully added.\"))\n\n\tlog.Println(\"Token added: \", token)\n\tusr.Password = usr.Password[:5] + \"...\"\n\tlog.Println(\"For user: \", usr)\n\treturn nil\n}\n\n\/\/ RegisterUser registration of the user.\n\/\/ Details for registration are obtained from\n\/\/ form post.\n\/\/ For registration must be post\n\/\/ the form contained fields such as:\n\/\/ \tlogin\n\/\/ \tpassword\n\/\/ \temail\n\/\/ After registration the token is not issued.\n\/\/ To retrieve the token you need to pass the operation\n\/\/ a login.\nfunc (uc UserController) RegisterUser(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) *HttpError {\n\n\tcollect := uc.GetDB().C(models.Set.Database.UserTable)\n\n\tif err := r.ParseForm(); err != nil {\n\t\treturn &HttpError{err, \"Post form can not be parsed.\", 500}\n\t}\n\n\tusr := new(models.User)\n\tif err := Fill(usr, r.PostForm, \"login\", \"email\", \"password\"); err != nil {\n\t\treturn &HttpError{err, \"Error fill form. Not all fields are specified.\", 500}\n\t}\n\n\tif usr.Login == \"\" || usr.Email == \"\" || usr.Password == \"\" {\n\t\treturn &HttpError{nil, \"All required fields were not filled.\", 500}\n\t}\n\n\tusr.Password = hex.EncodeToString(pwd_hash.Sum([]byte(usr.Password)))\n\n\tisUserExist, err := querying.IsExistUser(usr, collect)\n\tif err != nil {\n\t\treturn &HttpError{err, \"Error check user exist.\", 500}\n\t}\n\n\tif isUserExist {\n\t\treturn &HttpError{nil, \"User already exist.\", 500}\n\t}\n\n\tcollect.Insert(&usr)\n\n\tusr.Password = usr.Password[:5] + \"...\"\n\tlog.Println(\"User added: \", usr)\n\treturn nil\n}\n<commit_msg>Add logs, del double write<commit_after>package controllers\n\nimport (\n\t\"encoding\/hex\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/herald-it\/goncord\/models\"\n\t. \"github.com\/herald-it\/goncord\/utils\"\n\t\"github.com\/herald-it\/goncord\/utils\/keygen\"\n\t\"github.com\/herald-it\/goncord\/utils\/pwd_hash\"\n\t\"github.com\/herald-it\/goncord\/utils\/querying\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"gopkg.in\/mgo.v2\"\n)\n\ntype UserController struct {\n\tsession *mgo.Session\n}\n\nfunc (uc UserController) GetDB() *mgo.Database {\n\treturn uc.session.DB(models.Set.Database.DbName)\n}\n\nfunc NewUserController(s *mgo.Session) *UserController {\n\treturn &UserController{s}\n}\n\n\/\/ dumpUser save user and token to table token_dump.\nfunc (uc UserController) dumpUser(usr *models.User, token string) error {\n\tdumpToken := models.NewDumpToken(usr, token)\n\terr := uc.GetDB().C(models.Set.Database.TokenTable).Insert(&dumpToken)\n\n\treturn err\n}\n\n\/\/ LoginUser user authorization.\n\/\/ Authorization information is obtained from\n\/\/ form post. In order to log in\n\/\/ post the form should contain fields such as:\n\/\/ \tlogin\n\/\/ \tpassword\n\/\/ \temail\n\/\/ If authentication is successful, the user in the cookie\n\/\/ will add the jwt token. Cook's name will be the jwt and the value\n\/\/ the issued token.\n\/\/ The token lifetime is 7 days. After the expiration of\n\/\/ the lifetime of the token, the authorization process need\n\/\/ pass again.\nfunc (uc UserController) LoginUser(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) *HttpError {\n\n\tlog.Println(\"Host: \", r.Host)\n\tlog.Println(\"Remote: \", r.RemoteAddr)\n\n\tcollect := uc.GetDB().C(models.Set.Database.UserTable)\n\n\tif err := r.ParseForm(); err != nil {\n\t\treturn &HttpError{err, \"Post form can not be parsed.\", 500}\n\t}\n\n\tusr := new(models.User)\n\tif err := Fill(usr, r.PostForm, \"login\", \"email\", \"password\"); err != nil {\n\t\treturn &HttpError{err, \"Error fill form. Not all fields are specified.\", 500}\n\t}\n\n\tusr.Password = hex.EncodeToString(pwd_hash.Sum([]byte(usr.Password)))\n\n\tuserExist, err := querying.FindUser(usr, collect)\n\tif userExist == nil || err != nil {\n\t\treturn &HttpError{err, \"User not exist.\", 500}\n\t}\n\n\tkeyPair, err := keygen.NewKeyPair()\n\tif err != nil {\n\t\treturn &HttpError{err, \"New key pair error.\", 500}\n\t}\n\n\ttoken, err := userExist.NewToken(keyPair.Private)\n\tif err != nil {\n\t\treturn &HttpError{err, \"New token error.\", 500}\n\t}\n\n\thttp.SetCookie(w, &http.Cookie{\n\t\tName: \"jwt\",\n\t\tValue: token,\n\t\tDomain: models.Set.Domain,\n\t\tHttpOnly: true,\n\t\tSecure: false}) \/\/ TODO: HTTPS. Если true то токена не видно.\n\n\tif err = uc.dumpUser(userExist, token); err != nil {\n\t\treturn &HttpError{err, \"Token can not be dumped.\", 500}\n\t}\n\n\tlog.Println(\"Token added: \", token)\n\tusr.Password = usr.Password[:5] + \"...\"\n\tlog.Println(\"For user: \", usr)\n\treturn nil\n}\n\n\/\/ RegisterUser registration of the user.\n\/\/ Details for registration are obtained from\n\/\/ form post.\n\/\/ For registration must be post\n\/\/ the form contained fields such as:\n\/\/ \tlogin\n\/\/ \tpassword\n\/\/ \temail\n\/\/ After registration the token is not issued.\n\/\/ To retrieve the token you need to pass the operation\n\/\/ a login.\nfunc (uc UserController) RegisterUser(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) *HttpError {\n\n\tcollect := uc.GetDB().C(models.Set.Database.UserTable)\n\n\tif err := r.ParseForm(); err != nil {\n\t\treturn &HttpError{err, \"Post form can not be parsed.\", 500}\n\t}\n\n\tusr := new(models.User)\n\tif err := Fill(usr, r.PostForm, \"login\", \"email\", \"password\"); err != nil {\n\t\treturn &HttpError{err, \"Error fill form. Not all fields are specified.\", 500}\n\t}\n\n\tif usr.Login == \"\" || usr.Email == \"\" || usr.Password == \"\" {\n\t\treturn &HttpError{nil, \"All required fields were not filled.\", 500}\n\t}\n\n\tusr.Password = hex.EncodeToString(pwd_hash.Sum([]byte(usr.Password)))\n\n\tisUserExist, err := querying.IsExistUser(usr, collect)\n\tif err != nil {\n\t\treturn &HttpError{err, \"Error check user exist.\", 500}\n\t}\n\n\tif isUserExist {\n\t\treturn &HttpError{nil, \"User already exist.\", 500}\n\t}\n\n\tcollect.Insert(&usr)\n\n\tusr.Password = usr.Password[:5] + \"...\"\n\tlog.Println(\"User added: \", usr)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package controllers\n\nimport (\n\t\"regexp\"\n)\n\nfunc IsEmail(email string) bool {\n\tconst email_regex = `^([\\w\\.\\_]{2,10})@(\\w{1,}).([a-z]{2,4})$`\n\tif m, _ := regexp.MatchString(email_regex, email); !m {\n\t\treturn false\n\t}\n\n\treturn true\n}\n<commit_msg>Fixed email regex since it did not validate email addresses such as thomasbullshit@jumdum.dk<commit_after>package controllers\n\nimport (\n\t\"regexp\"\n)\n\nfunc IsEmail(email string) bool {\n\tconst email_regex = \"^[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$\"\n\tif m, _ := regexp.MatchString(email_regex, email); !m {\n\t\treturn false\n\t}\n\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package core\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/ethereum\/go-ethereum\/common\"\n\t\"github.com\/ethereum\/go-ethereum\/core\/types\"\n)\n\n\/\/ BlockCache implements a caching mechanism specifically for blocks and uses FILO to pop\ntype BlockCache struct {\n\tsize int\n\n\thashes []common.Hash\n\tblocks map[common.Hash]*types.Block\n\n\tmu sync.RWMutex\n}\n\n\/\/ Creates and returns a `BlockCache` with `size`. If `size` is smaller than 1 it will panic\nfunc NewBlockCache(size int) *BlockCache {\n\tif size < 1 {\n\t\tpanic(\"block cache size not allowed to be smaller than 1\")\n\t}\n\n\tbc := &BlockCache{size: size}\n\tbc.Clear()\n\treturn bc\n}\n\nfunc (bc *BlockCache) Clear() {\n\tbc.blocks = make(map[common.Hash]*types.Block)\n\tbc.hashes = nil\n\n}\n\nfunc (bc *BlockCache) Push(block *types.Block) {\n\tbc.mu.Lock()\n\tdefer bc.mu.Unlock()\n\n\tif len(bc.hashes) == bc.size {\n\t\tdelete(bc.blocks, bc.hashes[0])\n\n\t\t\/\/ XXX There are a few other options on solving this\n\t\t\/\/ 1) use a poller \/ GC like mechanism to clean up untracked objects\n\t\t\/\/ 2) copy as below\n\t\t\/\/ re-use the slice and remove the reference to bc.hashes[0]\n\t\t\/\/ this will allow the element to be garbage collected.\n\t\tcopy(bc.hashes, bc.hashes[1:])\n\t} else {\n\t\tbc.hashes = append(bc.hashes, common.Hash{})\n\t}\n\n\thash := block.Hash()\n\tbc.blocks[hash] = block\n\tbc.hashes[len(bc.hashes)-1] = hash\n}\n\nfunc (bc *BlockCache) Delete(hash common.Hash) {\n\tbc.mu.Lock()\n\tdefer bc.mu.Unlock()\n\n\tif _, ok := bc.blocks[hash]; ok {\n\t\tdelete(bc.blocks, hash)\n\t\tfor i, h := range bc.hashes {\n\t\t\tif hash == h {\n\t\t\t\tbc.hashes = bc.hashes[:i+copy(bc.hashes[i:], bc.hashes[i+1:])]\n\t\t\t\t\/\/ or ? => bc.hashes = append(bc.hashes[:i], bc.hashes[i+1]...)\n\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (bc *BlockCache) Get(hash common.Hash) *types.Block {\n\tbc.mu.RLock()\n\tdefer bc.mu.RUnlock()\n\n\tif block, haz := bc.blocks[hash]; haz {\n\t\treturn block\n\t}\n\n\treturn nil\n}\n\nfunc (bc *BlockCache) Has(hash common.Hash) bool {\n\t_, ok := bc.blocks[hash]\n\treturn ok\n}\n\nfunc (bc *BlockCache) Each(cb func(int, *types.Block)) {\n\tbc.mu.Lock()\n\tdefer bc.mu.Unlock()\n\n\ti := 0\n\tfor _, block := range bc.blocks {\n\t\tcb(i, block)\n\t\ti++\n\t}\n}\n<commit_msg>core: block cache Has method thread safe<commit_after>package core\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/ethereum\/go-ethereum\/common\"\n\t\"github.com\/ethereum\/go-ethereum\/core\/types\"\n)\n\n\/\/ BlockCache implements a caching mechanism specifically for blocks and uses FILO to pop\ntype BlockCache struct {\n\tsize int\n\n\thashes []common.Hash\n\tblocks map[common.Hash]*types.Block\n\n\tmu sync.RWMutex\n}\n\n\/\/ Creates and returns a `BlockCache` with `size`. If `size` is smaller than 1 it will panic\nfunc NewBlockCache(size int) *BlockCache {\n\tif size < 1 {\n\t\tpanic(\"block cache size not allowed to be smaller than 1\")\n\t}\n\n\tbc := &BlockCache{size: size}\n\tbc.Clear()\n\treturn bc\n}\n\nfunc (bc *BlockCache) Clear() {\n\tbc.blocks = make(map[common.Hash]*types.Block)\n\tbc.hashes = nil\n\n}\n\nfunc (bc *BlockCache) Push(block *types.Block) {\n\tbc.mu.Lock()\n\tdefer bc.mu.Unlock()\n\n\tif len(bc.hashes) == bc.size {\n\t\tdelete(bc.blocks, bc.hashes[0])\n\n\t\t\/\/ XXX There are a few other options on solving this\n\t\t\/\/ 1) use a poller \/ GC like mechanism to clean up untracked objects\n\t\t\/\/ 2) copy as below\n\t\t\/\/ re-use the slice and remove the reference to bc.hashes[0]\n\t\t\/\/ this will allow the element to be garbage collected.\n\t\tcopy(bc.hashes, bc.hashes[1:])\n\t} else {\n\t\tbc.hashes = append(bc.hashes, common.Hash{})\n\t}\n\n\thash := block.Hash()\n\tbc.blocks[hash] = block\n\tbc.hashes[len(bc.hashes)-1] = hash\n}\n\nfunc (bc *BlockCache) Delete(hash common.Hash) {\n\tbc.mu.Lock()\n\tdefer bc.mu.Unlock()\n\n\tif _, ok := bc.blocks[hash]; ok {\n\t\tdelete(bc.blocks, hash)\n\t\tfor i, h := range bc.hashes {\n\t\t\tif hash == h {\n\t\t\t\tbc.hashes = bc.hashes[:i+copy(bc.hashes[i:], bc.hashes[i+1:])]\n\t\t\t\t\/\/ or ? => bc.hashes = append(bc.hashes[:i], bc.hashes[i+1]...)\n\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (bc *BlockCache) Get(hash common.Hash) *types.Block {\n\tbc.mu.RLock()\n\tdefer bc.mu.RUnlock()\n\n\tif block, haz := bc.blocks[hash]; haz {\n\t\treturn block\n\t}\n\n\treturn nil\n}\n\nfunc (bc *BlockCache) Has(hash common.Hash) bool {\n\tbc.mu.RLock()\n\tdefer bc.mu.RUnlock()\n\n\t_, ok := bc.blocks[hash]\n\treturn ok\n}\n\nfunc (bc *BlockCache) Each(cb func(int, *types.Block)) {\n\tbc.mu.Lock()\n\tdefer bc.mu.Unlock()\n\n\ti := 0\n\tfor _, block := range bc.blocks {\n\t\tcb(i, block)\n\t\ti++\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package index\n\nimport (\n\t\"github.com\/balzaczyy\/golucene\/core\/index\/model\"\n)\n\n\/*\nInterface for internal atomic events. See DocumentsWriter fo details.\nEvents are executed concurrently and no order is guaranteed. Each\nevent should only rely on the serializeability within its process\nmethod. All actions that must happen before or after a certain action\nmust be encoded inside the process() method.\n*\/\ntype Event func(writer *IndexWriter, triggerMerge, clearBuffers bool) error\n\nvar applyDeletesEvent = Event(func(writer *IndexWriter, triggerMerge, forcePurge bool) error {\n\tpanic(\"not implemented yet\")\n})\n\nvar mergePendingEvent = Event(func(writer *IndexWriter, triggerMerge, forcePurge bool) error {\n\tpanic(\"not implemented yet\")\n})\n\nvar forcedPurgeEvent = Event(func(writer *IndexWriter, triggerMerge, forcePurge bool) error {\n\tpanic(\"not implemented yet\")\n})\n\nfunc newFlushFailedEvent(info *model.SegmentInfo) Event {\n\treturn Event(func(writer *IndexWriter, triggerMerge, forcePurge bool) error {\n\t\treturn writer.flushFailed(info)\n\t})\n}\n\nfunc newDeleteNewFilesEvent(files map[string]bool) Event {\n\treturn Event(func(writer *IndexWriter, triggerMerge, forcePurge bool) error {\n\t\twriter.Lock()\n\t\tdefer writer.Unlock()\n\t\tvar fileList []string\n\t\tfor file, _ := range files {\n\t\t\tfileList = append(fileList, file)\n\t\t}\n\t\twriter.deleter.deleteNewFiles(fileList)\n\t\treturn nil\n\t})\n}\n<commit_msg>implement ForcedPurgeEvent<commit_after>package index\n\nimport (\n\t\"github.com\/balzaczyy\/golucene\/core\/index\/model\"\n)\n\n\/*\nInterface for internal atomic events. See DocumentsWriter fo details.\nEvents are executed concurrently and no order is guaranteed. Each\nevent should only rely on the serializeability within its process\nmethod. All actions that must happen before or after a certain action\nmust be encoded inside the process() method.\n*\/\ntype Event func(writer *IndexWriter, triggerMerge, clearBuffers bool) error\n\nvar applyDeletesEvent = Event(func(writer *IndexWriter, triggerMerge, forcePurge bool) error {\n\tpanic(\"not implemented yet\")\n})\n\nvar mergePendingEvent = Event(func(writer *IndexWriter, triggerMerge, forcePurge bool) error {\n\tpanic(\"not implemented yet\")\n})\n\nvar forcedPurgeEvent = Event(func(writer *IndexWriter, triggerMerge, forcePurge bool) error {\n\t_, err := writer.purge(true)\n\treturn err\n})\n\nfunc newFlushFailedEvent(info *model.SegmentInfo) Event {\n\treturn Event(func(writer *IndexWriter, triggerMerge, forcePurge bool) error {\n\t\treturn writer.flushFailed(info)\n\t})\n}\n\nfunc newDeleteNewFilesEvent(files map[string]bool) Event {\n\treturn Event(func(writer *IndexWriter, triggerMerge, forcePurge bool) error {\n\t\twriter.Lock()\n\t\tdefer writer.Unlock()\n\t\tvar fileList []string\n\t\tfor file, _ := range files {\n\t\t\tfileList = append(fileList, file)\n\t\t}\n\t\twriter.deleter.deleteNewFiles(fileList)\n\t\treturn nil\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package cli\n\nimport (\n\t\"flag\"\n\t\"testing\"\n\n\trpcpb \"github.com\/google\/shipshape\/shipshape\/proto\/shipshape_rpc_proto\" \n)\n\nvar dockerTag = flag.String(\"shipshape_test_docker_tag\", \"\", \"the docker tag for the images to use for testing\")\n\nfunc countFailures(resp rpcpb.ShipshapeResponse) int {\n\tfailures := 0\n\tfor _, analyzeResp := range resp.AnalyzeResponse {\n\t\tfailures += len(analyzeResp.Failure)\n\t}\n\treturn failures\n}\n\nfunc countNotes(resp rpcpb.ShipshapeResponse) int {\n\tnotes := 0\n\tfor _, analyzeResp := range resp.AnalyzeResponse {\n\t\tnotes += len(analyzeResp.Note)\n\t}\n\treturn notes\n}\n\nfunc countCategoryNotes(resp rpcpb.ShipshapeResponse, category string) int {\n\tnotes := 0\n\tfor _, analyzeResp := range resp.AnalyzeResponse {\n\t\tfor _, note := range analyzeResp.Note {\n\t\t\tif *note.Category == category {\n\t\t\t\tnotes += 1\n\t\t\t}\n\t\t}\n\t}\n\treturn notes\n}\n\nfunc TestExternalAnalyzers(t *testing.T) {\n\t\/\/ Replaces part of the e2e test\n\t\/\/ Create a fake maven project with android failures\n\n\t\/\/ Run CLI using a .shipshape file\n}\n\nfunc TestBuiltInAnalyzersPreBuild(t *testing.T) {\n\toptions := Options{\n\t\tFile: \"shipshape\/cli\/testdata\/workspace1\",\n\t\tThirdPartyAnalyzers: []string{},\n\t\tBuild: \"\",\n\t\tTriggerCats: []string{\"PostMessage\", \"JSHint\", \"go vet\", \"PyLint\"},\n\t\tDind: false,\n\t\tEvent: \"manual\", \/\/ TODO: const\n\t\tRepo: \"gcr.io\/shipshape_releases\", \/\/ TODO: const\n\t\tStayUp: true,\n\t\tTag: *dockerTag,\n\t\t\/\/ TODO(rsk): current e2e test can be run both with & without kythe.\n\t\tLocalKythe: false,\n\t}\n\tvar allResponses rpcpb.ShipshapeResponse\n\toptions.HandleResponse = func(shipshapeResp *rpcpb.ShipshapeResponse, _ string) error {\n\t\tallResponses.AnalyzeResponse = append(allResponses.AnalyzeResponse, shipshapeResp.AnalyzeResponse...)\n\t\treturn nil\n\t}\n\treturnedNotesCount, err := New(options).Run()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttestName := \"TestBuiltInAnalyzerPreBuild\"\n\n\tif got, want := countFailures(allResponses), 0; got != want {\n\t\tt.Errorf(\"%v: Wrong number of failures; got %v, want %v (proto data: %v)\", testName, got, want, allResponses)\n\t}\n\tif countedNotes := countNotes(allResponses); returnedNotesCount != countedNotes {\n\t\tt.Errorf(\"%v: Inconsistent note count: returned %v, counted %v (proto data: %v\", testName, returnedNotesCount, countedNotes, allResponses)\n\t}\n\tif got, want := returnedNotesCount, 21; got != want {\n\t\tt.Errorf(\"%v: Wrong number of notes; got %v, want %v (proto data: %v)\", testName, got, want, allResponses)\n\t}\n\tif got, want := countCategoryNotes(allResponses, \"PostMessage\"), 2; got != want {\n\t\tt.Errorf(\"%v: Wrong number of PostMessage notes; got %v, want %v (proto data: %v)\", testName, got, want, allResponses)\n\t}\n\tif got, want := countCategoryNotes(allResponses, \"JSHint\"), 8; got != want {\n\t\tt.Errorf(\"%v: Wrong number of JSHint notes; got %v, want %v (proto data: %v)\", testName, got, want, allResponses)\n\t}\n\tif got, want := countCategoryNotes(allResponses, \"go vet\"), 0; got != want {\n\t\tt.Errorf(\"%v: Wrong number of go vet notes; got %v, want %v (proto data: %v)\", testName, got, want, allResponses)\n\t}\n\tif got, want := countCategoryNotes(allResponses, \"PyLint\"), 10; got != want {\n\t\tt.Errorf(\"%v: Wrong number of PyLint notes; got %v, want %v (proto data: %v)\", testName, got, want, allResponses)\n\t}\n}\n\nfunc TestBuiltInAnalyzersPostBuild(t *testing.T) {\n\t\/\/ Replaces part of the e2e test\n\t\/\/ Test with a kythe maven build\n\t\/\/ PostMessage and ErrorProne\n}\n\nfunc TestStreamsMode(t *testing.T) {\n\t\/\/ Test whether it works in streams mode\n\t\/\/ Before creating this, ensure that streams mode\n\t\/\/ is actually still something we need to support.\n}\n\nfunc TestChangingDirectories(t *testing.T) {\n\t\/\/ Replaces the changedir test\n\t\/\/ Make sure to test changing down, changing up, running on the same directory, running on a single file in the same directory, and changing to a sibling\n}\n\nfunc dumpLogs() {\n\n}\n\nfunc checkOutput(category string, numResults int) {\n\n}\n<commit_msg>Fixes test typo<commit_after>package cli\n\nimport (\n\t\"flag\"\n\t\"testing\"\n\n\trpcpb \"github.com\/google\/shipshape\/shipshape\/proto\/shipshape_rpc_proto\" \n)\n\nvar dockerTag = flag.String(\"shipshape_test_docker_tag\", \"\", \"the docker tag for the images to use for testing\")\n\nfunc countFailures(resp rpcpb.ShipshapeResponse) int {\n\tfailures := 0\n\tfor _, analyzeResp := range resp.AnalyzeResponse {\n\t\tfailures += len(analyzeResp.Failure)\n\t}\n\treturn failures\n}\n\nfunc countNotes(resp rpcpb.ShipshapeResponse) int {\n\tnotes := 0\n\tfor _, analyzeResp := range resp.AnalyzeResponse {\n\t\tnotes += len(analyzeResp.Note)\n\t}\n\treturn notes\n}\n\nfunc countCategoryNotes(resp rpcpb.ShipshapeResponse, category string) int {\n\tnotes := 0\n\tfor _, analyzeResp := range resp.AnalyzeResponse {\n\t\tfor _, note := range analyzeResp.Note {\n\t\t\tif *note.Category == category {\n\t\t\t\tnotes += 1\n\t\t\t}\n\t\t}\n\t}\n\treturn notes\n}\n\nfunc TestExternalAnalyzers(t *testing.T) {\n\t\/\/ Replaces part of the e2e test\n\t\/\/ Create a fake maven project with android failures\n\n\t\/\/ Run CLI using a .shipshape file\n}\n\nfunc TestBuiltInAnalyzersPreBuild(t *testing.T) {\n\toptions := Options{\n\t\tFile: \"shipshape\/cli\/testdata\/workspace1\",\n\t\tThirdPartyAnalyzers: []string{},\n\t\tBuild: \"\",\n\t\tTriggerCats: []string{\"PostMessage\", \"JSHint\", \"go vet\", \"PyLint\"},\n\t\tDind: false,\n\t\tEvent: \"manual\", \/\/ TODO: const\n\t\tRepo: \"gcr.io\/shipshape_releases\", \/\/ TODO: const\n\t\tStayUp: true,\n\t\tTag: *dockerTag,\n\t\t\/\/ TODO(rsk): current e2e test can be run both with & without kythe.\n\t\tLocalKythe: false,\n\t}\n\tvar allResponses rpcpb.ShipshapeResponse\n\toptions.HandleResponse = func(shipshapeResp *rpcpb.ShipshapeResponse, _ string) error {\n\t\tallResponses.AnalyzeResponse = append(allResponses.AnalyzeResponse, shipshapeResp.AnalyzeResponse...)\n\t\treturn nil\n\t}\n\treturnedNotesCount, err := New(options).Run()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttestName := \"TestBuiltInAnalyzerPreBuild\"\n\n\tif got, want := countFailures(allResponses), 0; got != want {\n\t\tt.Errorf(\"%v: Wrong number of failures; got %v, want %v (proto data: %v)\", testName, got, want, allResponses)\n\t}\n\tif countedNotes := countNotes(allResponses); returnedNotesCount != countedNotes {\n\t\tt.Errorf(\"%v: Inconsistent note count: returned %v, counted %v (proto data: %v\", testName, returnedNotesCount, countedNotes, allResponses)\n\t}\n\tif got, want := returnedNotesCount, 21; got != want {\n\t\tt.Errorf(\"%v: Wrong number of notes; got %v, want %v (proto data: %v)\", testName, got, want, allResponses)\n\t}\n\tif got, want := countCategoryNotes(allResponses, \"PostMessage\"), 2; got != want {\n\t\tt.Errorf(\"%v: Wrong number of PostMessage notes; got %v, want %v (proto data: %v)\", testName, got, want, allResponses)\n\t}\n\tif got, want := countCategoryNotes(allResponses, \"JSHint\"), 8; got != want {\n\t\tt.Errorf(\"%v: Wrong number of JSHint notes; got %v, want %v (proto data: %v)\", testName, got, want, allResponses)\n\t}\n\tif got, want := countCategoryNotes(allResponses, \"go vet\"), 0; got != want {\n\t\tt.Errorf(\"%v: Wrong number of go vet notes; got %v, want %v (proto data: %v)\", testName, got, want, allResponses)\n\t}\n\tif got, want := countCategoryNotes(allResponses, \"PyLint\"), 11; got != want {\n\t\tt.Errorf(\"%v: Wrong number of PyLint notes; got %v, want %v (proto data: %v)\", testName, got, want, allResponses)\n\t}\n}\n\nfunc TestBuiltInAnalyzersPostBuild(t *testing.T) {\n\t\/\/ Replaces part of the e2e test\n\t\/\/ Test with a kythe maven build\n\t\/\/ PostMessage and ErrorProne\n}\n\nfunc TestStreamsMode(t *testing.T) {\n\t\/\/ Test whether it works in streams mode\n\t\/\/ Before creating this, ensure that streams mode\n\t\/\/ is actually still something we need to support.\n}\n\nfunc TestChangingDirectories(t *testing.T) {\n\t\/\/ Replaces the changedir test\n\t\/\/ Make sure to test changing down, changing up, running on the same directory, running on a single file in the same directory, and changing to a sibling\n}\n\nfunc dumpLogs() {\n\n}\n\nfunc checkOutput(category string, numResults int) {\n\n}\n<|endoftext|>"} {"text":"<commit_before>package gitManip\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/fatih\/color\"\n\n\t\"bytes\"\n\n\t\"github.com\/k0pernicus\/goyave\/traces\"\n\tgit \"gopkg.in\/libgit2\/git2go.v26\"\n)\n\n\/*Map to match the RepositoryState enum type with a string\n *\/\nvar repositoryStateToString = map[git.RepositoryState]string{\n\tgit.RepositoryStateNone: \"None\",\n\tgit.RepositoryStateMerge: \"Merge\",\n\tgit.RepositoryStateRevert: \"Revert\",\n\tgit.RepositoryStateCherrypick: \"Cherrypick\",\n\tgit.RepositoryStateBisect: \"Bisect\",\n\tgit.RepositoryStateRebase: \"Rebase\",\n\tgit.RepositoryStateRebaseInteractive: \"Rebase Interactive\",\n\tgit.RepositoryStateRebaseMerge: \"Rebase Merge\",\n\tgit.RepositoryStateApplyMailbox: \"Apply Mailbox\",\n\tgit.RepositoryStateApplyMailboxOrRebase: \"Apply Mailbox or Rebase\",\n}\n\n\/*Global variable to set the StatusOption parameter, in order to list each file status\n *\/\nvar statusOption = git.StatusOptions{\n\tShow: git.StatusShowIndexAndWorkdir,\n\tFlags: git.StatusOptIncludeUntracked,\n\tPathspec: []string{},\n}\n\n\/*GitObject contains informations about the current git repository\n *\n *The structure is:\n * accessible:\n *\t\tIs the repository still exists in the hard drive?\n *\tpath:\n *\t\tThe path file.\n *\trepository:\n *\t\tThe object repository.\n *\/\ntype GitObject struct {\n\taccessible error\n\tpath string\n\trepository git.Repository\n}\n\n\/*New is a constructor for GitObject\n *\n * It neeeds:\n *\tpath:\n *\t\tThe path of the current repository.\n *\/\nfunc New(path string) *GitObject {\n\tr, err := git.OpenRepository(path)\n\treturn &GitObject{accessible: err, path: path, repository: *r}\n}\n\n\/*Clone is cloning a given repository, from a public URL\n *\n * It needs:\n * path:\n *\t\tThe local path to clone the repository.\n *\tURL:\n *\t\tThe remote URL to fetch the repository.\n *\/\nfunc Clone(path, URL string) error {\n\t_, err := git.Clone(URL, path, &git.CloneOptions{})\n\treturn err\n}\n\n\/*GetRemoteURL returns the associated remote URL of a given local path repository\n *\n * It needs:\n *\tpath\n *\t\tThe local path of a git repository\n *\/\nfunc GetRemoteURL(path string) string {\n\tr, err := git.OpenRepository(path)\n\tif err != nil {\n\t\tfmt.Println(\"The repository can't be opened\")\n\t\treturn \"\"\n\t}\n\tremoteCollection := r.Remotes\n\toriginRemote, err := remoteCollection.Lookup(\"origin\")\n\tif err != nil {\n\t\ttraces.WarningTracer.Printf(\"can't lookup origin remote URL for %s\", path)\n\t\treturn \"\"\n\t}\n\treturn originRemote.Url()\n}\n\n\/*isAccesible returns the information that is the current git repository is existing or not.\n *This method returns a boolean value: true if the git repository is still accesible (still exists), or false if not.\n *\/\nfunc (g *GitObject) isAccessible() bool {\n\treturn g.accessible == nil\n}\n\n\/*Status prints the current status of the repository, accessible via the structure path field.\n *This method works only if the repository is accessible.\n *\/\nfunc (g *GitObject) Status() {\n\tif g.isAccessible() {\n\t\tif err := g.printChanges(); err != nil {\n\t\t\tcolor.RedString(\"Impossible to get stats from %s, due to error %s\", g.path, err)\n\t\t}\n\t} else {\n\t\tcolor.RedString(\"Repository %s not found!\", g.path)\n\t}\n}\n\n\/*getDiffWithWT returns the difference between the working tree and the index, for the current git repository.\n *If there is an error processing the request, it returns an error.\n *\/\nfunc (g *GitObject) getDiffWithWT() (*git.Diff, error) {\n\t\/\/ Get the index of the repository\n\tcurrentIndex, err := g.repository.Index()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Get the default diff options, and add it custom flags\n\tdefaultDiffOptions, err := git.DefaultDiffOptions()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefaultDiffOptions.Flags = defaultDiffOptions.Flags | git.DiffIncludeUntracked | git.DiffIncludeTypeChange\n\t\/\/ Check the difference between the working directory and the index\n\tdiff, err := g.repository.DiffIndexToWorkdir(currentIndex, &defaultDiffOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn diff, nil\n}\n\n\/*printChanges prints out all changes for the current git repository.\n *If there is an error processing the request, it returns this one.\n *\/\nfunc (g *GitObject) printChanges() error {\n\tdiff, err := g.getDiffWithWT()\n\tvar buffer bytes.Buffer\n\tif err != nil {\n\t\treturn err\n\t}\n\tnumDeltas, err := diff.NumDeltas()\n\tif err != nil {\n\t\treturn err\n\t}\n\theadDetached, err := g.repository.IsHeadDetached()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif headDetached {\n\t\toutputHead := fmt.Sprintf(\"%s\", color.RedString(\"\\t\/!\\\\ The repository's HEAD is detached! \/!\\\\\\n\"))\n\t\tbuffer.WriteString(outputHead)\n\t}\n\tif numDeltas > 0 {\n\t\tbuffer.WriteString(fmt.Sprintf(\"%s %s\\t[%d modification(s)]\\n\", color.RedString(\"✘\"), g.path, numDeltas))\n\t\tfor i := 0; i < numDeltas; i++ {\n\t\t\tdelta, _ := diff.GetDelta(i)\n\t\t\tcurrentStatus := delta.Status\n\t\t\tnewFile := delta.NewFile.Path\n\t\t\toldFile := delta.OldFile.Path\n\t\t\tswitch currentStatus {\n\t\t\tcase git.DeltaAdded:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s has been added!\\n\", color.MagentaString(newFile)))\n\t\t\tcase git.DeltaDeleted:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s has been deleted!\\n\", color.MagentaString(newFile)))\n\t\t\tcase git.DeltaModified:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s has been modified!\\n\", color.MagentaString(newFile)))\n\t\t\tcase git.DeltaRenamed:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s has been renamed to %s!\\n\", color.MagentaString(oldFile), color.MagentaString(newFile)))\n\t\t\tcase git.DeltaUntracked:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s is untracked - please to add it or update the gitignore file!\\n\", color.MagentaString(newFile)))\n\t\t\tcase git.DeltaTypeChange:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> the type of %s has been changed from %d to %d!\", color.MagentaString(newFile), delta.OldFile.Mode, delta.NewFile.Mode))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tbuffer.WriteString(fmt.Sprintf(\"%s %s\\n\", color.GreenString(\"✔\"), g.path))\n\t}\n\tfmt.Print(buffer.String())\n\treturn nil\n}\n<commit_msg>Add needToCommit? feature<commit_after>package gitManip\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/fatih\/color\"\n\n\t\"bytes\"\n\n\t\"github.com\/k0pernicus\/goyave\/traces\"\n\tgit \"gopkg.in\/libgit2\/git2go.v26\"\n)\n\n\/*Map to match the RepositoryState enum type with a string\n *\/\nvar repositoryStateToString = map[git.RepositoryState]string{\n\tgit.RepositoryStateNone: \"None\",\n\tgit.RepositoryStateMerge: \"Merge\",\n\tgit.RepositoryStateRevert: \"Revert\",\n\tgit.RepositoryStateCherrypick: \"Cherrypick\",\n\tgit.RepositoryStateBisect: \"Bisect\",\n\tgit.RepositoryStateRebase: \"Rebase\",\n\tgit.RepositoryStateRebaseInteractive: \"Rebase Interactive\",\n\tgit.RepositoryStateRebaseMerge: \"Rebase Merge\",\n\tgit.RepositoryStateApplyMailbox: \"Apply Mailbox\",\n\tgit.RepositoryStateApplyMailboxOrRebase: \"Apply Mailbox or Rebase\",\n}\n\n\/*Global variable to set the StatusOption parameter, in order to list each file status\n *\/\nvar statusOption = git.StatusOptions{\n\tShow: git.StatusShowIndexAndWorkdir,\n\tFlags: git.StatusOptIncludeUntracked,\n\tPathspec: []string{},\n}\n\n\/*GitObject contains informations about the current git repository\n *\n *The structure is:\n * accessible:\n *\t\tIs the repository still exists in the hard drive?\n *\tpath:\n *\t\tThe path file.\n *\trepository:\n *\t\tThe object repository.\n *\/\ntype GitObject struct {\n\taccessible error\n\tpath string\n\trepository git.Repository\n}\n\n\/*New is a constructor for GitObject\n *\n * It neeeds:\n *\tpath:\n *\t\tThe path of the current repository.\n *\/\nfunc New(path string) *GitObject {\n\tr, err := git.OpenRepository(path)\n\treturn &GitObject{accessible: err, path: path, repository: *r}\n}\n\n\/*Clone is cloning a given repository, from a public URL\n *\n * It needs:\n * path:\n *\t\tThe local path to clone the repository.\n *\tURL:\n *\t\tThe remote URL to fetch the repository.\n *\/\nfunc Clone(path, URL string) error {\n\t_, err := git.Clone(URL, path, &git.CloneOptions{})\n\treturn err\n}\n\n\/*GetRemoteURL returns the associated remote URL of a given local path repository\n *\n * It needs:\n *\tpath\n *\t\tThe local path of a git repository\n *\/\nfunc GetRemoteURL(path string) string {\n\tr, err := git.OpenRepository(path)\n\tif err != nil {\n\t\tfmt.Println(\"The repository can't be opened\")\n\t\treturn \"\"\n\t}\n\tremoteCollection := r.Remotes\n\toriginRemote, err := remoteCollection.Lookup(\"origin\")\n\tif err != nil {\n\t\ttraces.WarningTracer.Printf(\"can't lookup origin remote URL for %s\", path)\n\t\treturn \"\"\n\t}\n\treturn originRemote.Url()\n}\n\n\/*isAccesible returns the information that is the current git repository is existing or not.\n *This method returns a boolean value: true if the git repository is still accesible (still exists), or false if not.\n *\/\nfunc (g *GitObject) isAccessible() bool {\n\treturn g.accessible == nil\n}\n\n\/*Status prints the current status of the repository, accessible via the structure path field.\n *This method works only if the repository is accessible.\n *\/\nfunc (g *GitObject) Status() {\n\tif g.isAccessible() {\n\t\tif err := g.printChanges(); err != nil {\n\t\t\tcolor.RedString(\"Impossible to get stats from %s, due to error %s\", g.path, err)\n\t\t}\n\t} else {\n\t\tcolor.RedString(\"Repository %s not found!\", g.path)\n\t}\n}\n\n\/*getDiffWithWT returns the difference between the working tree and the index, for the current git repository.\n *If there is an error processing the request, it returns an error.\n *\/\nfunc (g *GitObject) getDiffWithWT() (*git.Diff, error) {\n\t\/\/ Get the index of the repository\n\tcurrentIndex, err := g.repository.Index()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Get the default diff options, and add it custom flags\n\tdefaultDiffOptions, err := git.DefaultDiffOptions()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefaultDiffOptions.Flags = defaultDiffOptions.Flags | git.DiffIncludeUntracked | git.DiffIncludeTypeChange\n\t\/\/ Check the difference between the working directory and the index\n\tdiff, err := g.repository.DiffIndexToWorkdir(currentIndex, &defaultDiffOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn diff, nil\n}\n\n\/*printChanges prints out all changes for the current git repository.\n *If there is an error processing the request, it returns this one.\n *\/\nfunc (g *GitObject) printChanges() error {\n\tdiff, err := g.getDiffWithWT()\n\tvar buffer bytes.Buffer\n\tif err != nil {\n\t\treturn err\n\t}\n\tnumDeltas, err := diff.NumDeltas()\n\tif err != nil {\n\t\treturn err\n\t}\n\theadDetached, err := g.repository.IsHeadDetached()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif headDetached {\n\t\toutputHead := fmt.Sprintf(\"%s\", color.RedString(\"\\t\/!\\\\ The repository's HEAD is detached! \/!\\\\\\n\"))\n\t\tbuffer.WriteString(outputHead)\n\t}\n\tif numDeltas > 0 {\n\t\tbuffer.WriteString(fmt.Sprintf(\"%s %s\\t[%d modification(s)]\\n\", color.RedString(\"✘\"), g.path, numDeltas))\n\t\tfor i := 0; i < numDeltas; i++ {\n\t\t\tdelta, _ := diff.GetDelta(i)\n\t\t\tcurrentStatus := delta.Status\n\t\t\tnewFile := delta.NewFile.Path\n\t\t\toldFile := delta.OldFile.Path\n\t\t\tswitch currentStatus {\n\t\t\tcase git.DeltaAdded:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s has been added!\\n\", color.MagentaString(newFile)))\n\t\t\tcase git.DeltaDeleted:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s has been deleted!\\n\", color.MagentaString(newFile)))\n\t\t\tcase git.DeltaModified:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s has been modified!\\n\", color.MagentaString(newFile)))\n\t\t\tcase git.DeltaRenamed:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s has been renamed to %s!\\n\", color.MagentaString(oldFile), color.MagentaString(newFile)))\n\t\t\tcase git.DeltaUntracked:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> %s is untracked - please to add it or update the gitignore file!\\n\", color.MagentaString(newFile)))\n\t\t\tcase git.DeltaTypeChange:\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\t===> the type of %s has been changed from %d to %d!\", color.MagentaString(newFile), delta.OldFile.Mode, delta.NewFile.Mode))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tbuffer.WriteString(fmt.Sprintf(\"%s %s\\n\", color.GreenString(\"✔\"), g.path))\n\t}\n\trepository_head, err := g.repository.Head()\n\tif err == nil {\n\t\trepository_id := repository_head.Target()\n\t\tcommits_ahead, _, err := g.repository.AheadBehind(repository_id, repository_id)\n\t\tif err != nil {\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"%s\", color.RedString(\"\\tAn error occured checking the ahead\/behind commits...\\n\")))\n\t\t} else if commits_ahead != 0 {\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\tYou need to push the last modifications!\\n\"))\n\t\t}\n\t}\n\tfmt.Print(buffer.String())\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package ac\n\nimport \"testing\"\n\nfunc TestAlienOrder(t *testing.T) {\n\tst := []struct {\n\t\tname string\n\t\twords []string\n\t\texp string\n\t}{\n\t\t{\"empty words\", []string{}, \"\"},\n\t\t{\"test1\", []string{\"wrt\", \"wrf\", \"er\", \"ett\", \"rftt\"}, \"wertf\"},\n\t\t{\"test2\", []string{\"z\", \"x\"}, \"zx\"},\n\t\t{\"test3\", []string{\"z\", \"x\", \"z\"}, \"\"},\n\t\t{\"failed1\", []string{\"za\", \"zb\", \"ca\", \"cb\"}, \"abzc\"},\n\t}\n\tfor _, tt := range st {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tout := alienOrder(tt.words)\n\t\t\tif out != tt.exp {\n\t\t\t\tt.Fatalf(\"with input words: %v wanted %s but got %s\", tt.words, tt.exp, out)\n\t\t\t}\n\t\t\tt.Log(\"pass\")\n\t\t})\n\t}\n}\n<commit_msg>fix failed test:<commit_after>package ac\n\nimport \"testing\"\n\nfunc TestAlienOrder(t *testing.T) {\n\tst := []struct {\n\t\tname string\n\t\twords []string\n\t\texp string\n\t}{\n\t\t{\"empty words\", []string{}, \"\"},\n\t\t{\"test1\", []string{\"wrt\", \"wrf\", \"er\", \"ett\", \"rftt\"}, \"wertf\"},\n\t\t{\"test2\", []string{\"z\", \"x\"}, \"zx\"},\n\t\t{\"test3\", []string{\"z\", \"x\", \"z\"}, \"\"},\n\t\t{\"failed1\", []string{\"za\", \"zb\", \"ca\", \"cb\"}, \"zacb\"},\n\t}\n\tfor _, tt := range st {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tout := alienOrder(tt.words)\n\t\t\tif out != tt.exp {\n\t\t\t\tt.Fatalf(\"with input words: %v wanted %s but got %s\", tt.words, tt.exp, out)\n\t\t\t}\n\t\t\tt.Log(\"pass\")\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport \"testing\"\n\nfunc TestCreate(t *testing.T) {\n\n}\n\nfunc TestGitCloneCreate(t *testing.T) {\n\n}\n<commit_msg>Created test cases for Migration creation<commit_after>package cmd\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com\/freneticmonkey\/migrate\/go\/exec\"\n\t\"github.com\/freneticmonkey\/migrate\/go\/metadata\"\n\t\"github.com\/freneticmonkey\/migrate\/go\/migration\"\n\t\"github.com\/freneticmonkey\/migrate\/go\/mysql\"\n\t\"github.com\/freneticmonkey\/migrate\/go\/table\"\n\t\"github.com\/freneticmonkey\/migrate\/go\/test\"\n\t\"github.com\/freneticmonkey\/migrate\/go\/util\"\n\t\"github.com\/urfave\/cli\"\n)\n\nfunc TestCreateFailNoProject(t *testing.T) {\n\ttestName := \"TestCreateFailNoProject\"\n\n\tutil.LogAlert(testName)\n\n\t\/\/ Test Configuration\n\ttestConfig := test.GetTestConfig()\n\n\tproject := \"\"\n\n\ttestConfig.Project.Name = project\n\n\tresult := create(project, \"\", testConfig)\n\n\tif result.ExitCode() < 1 {\n\t\tt.Errorf(\"%s succeeded when it should have failed.\", testName)\n\t\treturn\n\t}\n}\n\nfunc TestCreateFailNoProjectVersion(t *testing.T) {\n\ttestName := \"TestCreateFailNoProjectVersion\"\n\n\tutil.LogAlert(testName)\n\n\t\/\/ Test Configuration\n\ttestConfig := test.GetTestConfig()\n\n\tproject := \"UnitTestProject\"\n\tversion := \"\"\n\n\ttestConfig.Project.Name = project\n\ttestConfig.Project.Schema.Version = version\n\n\tresult := create(project, version, testConfig)\n\n\tif result.ExitCode() < 1 {\n\t\tt.Errorf(\"%s succeeded when it should have failed.\", testName)\n\t\treturn\n\t}\n}\n\nfunc TestCreate(t *testing.T) {\n\ttestName := \"TestCreate\"\n\tutil.LogAlert(testName)\n\n\tvar err error\n\tvar result *cli.ExitError\n\tvar projectDB test.ProjectDB\n\tvar mgmtDB test.ManagementDB\n\n\tutil.SetConfigTesting()\n\n\tproject := \"UnitTestProject\"\n\tversion := \"abc123\"\n\n\t\/\/ Configure testing data\n\ttestConfig := test.GetTestConfig()\n\tdogsTbl := GetTableDogs()\n\tdogsAddTbl := GetTableAddressDogs()\n\n\t\/\/ Configuring the expected MDID for the new Column\n\texpectedAddressMetadata := dogsAddTbl.Columns[1].Metadata\n\texpectedAddressMetadata.MDID = 4\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Configure Git Checkout\n\n\tvar sparseExists bool\n\tvar data []byte\n\n\texpectedSparseFile := `schema\/*\nschemaTwo\/*`\n\n\t\/\/ Configure unit test shell\n\tutil.Config(testConfig)\n\n\tcheckoutPath := util.WorkingSubDir(testConfig.Project.Name)\n\n\t\/\/ Build Git Commands\n\n\tshell := util.GetShell().(*util.MockShellExecutor)\n\n\t\/\/ init\n\tparams := []string{\n\t\t\"-C\",\n\t\tcheckoutPath,\n\t\t\"init\",\n\t}\n\tshell.ExpectExec(\"git\", params, \"\", nil)\n\n\t\/\/ git add remote\n\tparams = []string{\n\t\t\"-C\",\n\t\tcheckoutPath,\n\t\t\"remote\",\n\t\t\"add\",\n\t\t\"-f\",\n\t\t\"origin\",\n\t\ttestConfig.Project.Schema.Url,\n\t}\n\tshell.ExpectExec(\"git\", params, \"\", nil)\n\n\t\/\/ git config sparse checkout\n\tparams = []string{\n\t\t\"-C\",\n\t\tcheckoutPath,\n\t\t\"config\",\n\t\t\"core.sparseCheckout\",\n\t\t\"true\",\n\t}\n\tshell.ExpectExec(\"git\", params, \"\", nil)\n\n\t\/\/ Checkout\n\t\/\/ git config sparse checkout\n\tparams = []string{\n\t\t\"-C\",\n\t\tcheckoutPath,\n\t\t\"checkout\",\n\t\ttestConfig.Project.Schema.Version,\n\t}\n\n\tshell.ExpectExecWithLambda(\n\t\t\"git\",\n\t\tparams,\n\t\t\"\",\n\t\tnil,\n\t\tfunc(cmd string, args []string) error {\n\n\t\t\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\t\t\/\/ Configure source YAML files for the 'checked out' Schema\n\t\t\t\/\/\n\n\t\t\ttest.WriteFile(\n\t\t\t\t\"UnitTestProject\/dogs.yml\",\n\t\t\t\tGetYAMLTableAddressDogs(),\n\t\t\t\t0644,\n\t\t\t\tfalse,\n\t\t\t)\n\n\t\t\t\/\/\n\t\t\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\t\treturn nil\n\t\t},\n\t)\n\n\t\/\/\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Configure MySQL db reads for MySQL Schema\n\t\/\/\n\n\t\/\/ Configure the test databases\n\t\/\/ Setup the mock project database\n\tprojectDB, err = test.CreateProjectDB(testName, t)\n\n\tif err == nil {\n\t\t\/\/ Connect to Project DB\n\t\texec.SetProjectDB(projectDB.Db)\n\t\tmysql.Setup(testConfig)\n\n\t\t\/\/ Connect to Project DB\n\t\tmysql.SetProjectDB(projectDB.Db.Db)\n\t}\n\n\t\/\/ Configure the Mock Managment DB\n\tmgmtDB, err = test.CreateManagementDB(testName, t)\n\n\tif err == nil {\n\t\t\/\/ migration.Setup(mgmtDB.Db, 1)\n\t\texec.Setup(mgmtDB.Db, 1, testConfig.Project.DB.ConnectString())\n\t\tmigration.Setup(mgmtDB.Db, 1)\n\t\tmetadata.Setup(mgmtDB.Db, 1)\n\t}\n\n\t\/\/ SHOW TABLES Query\n\tprojectDB.ShowTables([]test.DBRow{{dogsTbl.Name}}, false)\n\n\t\/\/ SHOW CREATE TABLE Query\n\tprojectDB.ShowCreateTable(dogsTbl.Name, GetMySQLCreateTableDogs())\n\n\t\/\/ ParseCreateTable which includes a Table.LoadDBMetadata() call\n\tmgmtDB.MetadataSelectName(\n\t\tdogsTbl.Name,\n\t\ttest.GetDBRowMetadata(dogsTbl.Metadata),\n\t\tfalse,\n\t)\n\n\tmgmtDB.MetadataLoadAllTableMetadata(dogsTbl.Metadata.PropertyID,\n\t\t1,\n\t\t[]test.DBRow{\n\t\t\ttest.GetDBRowMetadata(dogsTbl.Metadata),\n\t\t\ttest.GetDBRowMetadata(dogsTbl.Columns[0].Metadata),\n\t\t\ttest.GetDBRowMetadata(dogsTbl.PrimaryIndex.Metadata),\n\t\t},\n\t\tfalse,\n\t)\n\n\t\/\/ STARTING Diff Queries - forwards\n\n\tmgmtDB.MetadataSelectName(\n\t\tdogsAddTbl.Name,\n\t\ttest.GetDBRowMetadata(dogsAddTbl.Metadata),\n\t\tfalse,\n\t)\n\n\t\/\/Diff will also sync metadata for the YAML Schema\n\tmgmtDB.MetadataLoadAllTableMetadata(dogsAddTbl.Metadata.PropertyID,\n\t\t1,\n\t\t[]test.DBRow{\n\t\t\ttest.GetDBRowMetadata(dogsAddTbl.Metadata),\n\t\t\ttest.GetDBRowMetadata(dogsAddTbl.Columns[0].Metadata),\n\t\t\ttest.GetDBRowMetadata(dogsAddTbl.Columns[1].Metadata),\n\t\t\ttest.GetDBRowMetadata(dogsAddTbl.PrimaryIndex.Metadata),\n\t\t},\n\t\tfalse,\n\t)\n\n\t\/\/ Expect an insert for Metadata for the new column\n\tmgmtDB.MetadataInsert(\n\t\ttest.DBRow{\n\t\t\tdogsAddTbl.Columns[1].Metadata.DB,\n\t\t\tdogsAddTbl.Columns[1].Metadata.PropertyID,\n\t\t\tdogsAddTbl.Columns[1].Metadata.ParentID,\n\t\t\tdogsAddTbl.Columns[1].Metadata.Type,\n\t\t\tdogsAddTbl.Columns[1].Metadata.Name,\n\t\t\tfalse,\n\t\t},\n\t\texpectedAddressMetadata.MDID,\n\t\t1,\n\t)\n\n\t\/\/\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Git requests to pull back state of current checkout\n\n\t\/\/ GitVersionTime\n\tgitTime := \"2016-07-12T22:04:05+10:00\"\n\tmysqlTime := \"2016-07-12 12:04:05\"\n\n\tparams = []string{\n\t\t\"-C\",\n\t\tutil.WorkingSubDir(project),\n\t\t\"show\",\n\t\t\"-s\",\n\t\t\"--format=%%cI\",\n\t}\n\tshell.ExpectExec(\"git\", params, gitTime, nil)\n\n\t\/\/ GitVersionDetails\n\tgitDetails := `commit abc123\n Author: Scott Porter <sporter@ea.com>\n Date: Tue Jul 12 22:04:05 2016 +1000\n\n An example git commit for unit testing`\n\n\tparams = []string{\n\t\t\"-C\",\n\t\tutil.WorkingSubDir(project),\n\t\t\"show\",\n\t\t\"-s\",\n\t\t\"--pretty=medium\",\n\t}\n\tshell.ExpectExec(\"git\", params, gitDetails, nil)\n\n\t\/\/\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ New migration being inserted into the database\n\n\t\/\/ Helper operation\n\tforward := mysql.SQLOperation{\n\t\tStatement: \"ALTER TABLE `unittestproject_dogs` COLUMN `address` varchar(128) NOT NULL;\",\n\t\tOp: table.Add,\n\t\tName: \"address\",\n\t\tMetadata: metadata.Metadata{\n\t\t\tMDID: 4,\n\t\t\tDB: 1,\n\t\t\tPropertyID: \"unittestproject_dogs_col_address\",\n\t\t\tParentID: \"unittestproject_dogs\",\n\t\t\tName: \"address\",\n\t\t},\n\t}\n\tbackwardsStatement := \"ALTER TABLE `unittestproject_dogs` DROP COLUMN `address`;\"\n\n\t\/\/ Pulling table metadata - diff backwards\n\tmgmtDB.MetadataSelectName(\n\t\tdogsAddTbl.Name,\n\t\ttest.GetDBRowMetadata(dogsAddTbl.Metadata),\n\t\tfalse,\n\t)\n\n\t\/\/Diff will also sync metadata for the YAML Schema\n\tmgmtDB.MetadataLoadAllTableMetadata(dogsAddTbl.Metadata.PropertyID,\n\t\t1,\n\t\t[]test.DBRow{\n\t\t\ttest.GetDBRowMetadata(dogsAddTbl.Metadata),\n\t\t\ttest.GetDBRowMetadata(dogsAddTbl.Columns[0].Metadata),\n\t\t\ttest.GetDBRowMetadata(dogsAddTbl.Columns[1].Metadata),\n\t\t\ttest.GetDBRowMetadata(dogsAddTbl.PrimaryIndex.Metadata),\n\t\t},\n\t\tfalse,\n\t)\n\n\t\/\/ Counting the Migrations\n\tmgmtDB.MigrationCount(test.DBRow{0}, false)\n\n\t\/\/ Inserting a new Migration\n\tmgmtDB.MigrationInsert(\n\t\ttest.DBRow{\n\t\t\t1,\n\t\t\ttestConfig.Project.Name,\n\t\t\ttestConfig.Project.Schema.Version,\n\t\t\tmysqlTime,\n\t\t\tgitDetails,\n\t\t\t0,\n\t\t},\n\t\t1,\n\t\t1,\n\t)\n\n\t\/\/ Inserting the Migration Step\n\tmgmtDB.MigrationInsertStep(\n\t\ttest.DBRow{\n\t\t\t1,\n\t\t\tforward.Op,\n\t\t\tforward.Metadata.MDID,\n\t\t\tforward.Name,\n\t\t\tforward.Statement,\n\t\t\tbackwardsStatement,\n\t\t\t\"\",\n\t\t\t0,\n\t\t},\n\t\t1,\n\t\t1,\n\t)\n\n\t\/\/\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tresult = create(project, version, testConfig)\n\n\tif result.ExitCode() > 0 {\n\t\tt.Errorf(\"%s failed with error: %v\", testName, result)\n\t\treturn\n\t}\n\n\tprojectDB.ExpectionsMet(testName, t)\n\n\tmgmtDB.ExpectionsMet(testName, t)\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Git was checked out correctly\n\n\tif err != nil {\n\t\tt.Errorf(\"%s FAILED with error: %v\", testName, err)\n\t\treturn\n\t}\n\n\t\/\/ Validate that the sparse checkout file was created correctly\n\tsparseFile := fmt.Sprintf(\"%s\/.git\/info\/sparse-checkout\", checkoutPath)\n\tsparseExists, err = util.FileExists(sparseFile)\n\n\tif err != nil {\n\t\tt.Errorf(\"%s FAILED: there was an error while reading the sparse checkut file at path: [%s] with error: [%v]\", testName, sparseFile, err)\n\t\treturn\n\t}\n\n\tif !sparseExists {\n\t\tt.Errorf(\"%s FAILED: sparse checkut file missing from path: [%s]\", testName, sparseFile)\n\t\treturn\n\t}\n\n\tdata, err = util.ReadFile(sparseFile)\n\tsparseData := string(data)\n\n\tif sparseData != expectedSparseFile {\n\t\tt.Errorf(\"%s FAILED: sparse data file contents: [%s] doesn't match expected contents: [%s]\", testName, sparseData, expectedSparseFile)\n\t\treturn\n\t}\n\n\t\/\/ Ensure that all of the anticipated shell calls were made\n\tif err = shell.ExpectationsWereMet(); err != nil {\n\t\tt.Errorf(\"%s FAILED: Not all shell commands were executed: error [%v]\", testName, err)\n\t}\n\n\tTeardown()\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage mysql\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\n\t\"vitess.io\/vitess\/go\/vt\/sqlparser\"\n)\n\n\/\/ SQLError is the error structure returned from calling a db library function\ntype SQLError struct {\n\tNum int\n\tState string\n\tMessage string\n\tQuery string\n}\n\n\/\/ NewSQLError creates a new SQLError.\n\/\/ If sqlState is left empty, it will default to \"HY000\" (general error).\nfunc NewSQLError(number int, sqlState string, format string, args ...interface{}) *SQLError {\n\tif sqlState == \"\" {\n\t\tsqlState = SSUnknownSQLState\n\t}\n\treturn &SQLError{\n\t\tNum: number,\n\t\tState: sqlState,\n\t\tMessage: fmt.Sprintf(format, args...),\n\t}\n}\n\n\/\/ Error implements the error interface\nfunc (se *SQLError) Error() string {\n\tbuf := &bytes.Buffer{}\n\tbuf.WriteString(se.Message)\n\n\t\/\/ Add MySQL errno and SQLSTATE in a format that we can later parse.\n\t\/\/ There's no avoiding string parsing because all errors\n\t\/\/ are converted to strings anyway at RPC boundaries.\n\t\/\/ See NewSQLErrorFromError.\n\tfmt.Fprintf(buf, \" (errno %v) (sqlstate %v)\", se.Num, se.State)\n\n\tif se.Query != \"\" {\n\t\tfmt.Fprintf(buf, \" during query: %s\", sqlparser.TruncateForLog(se.Query))\n\t}\n\n\treturn buf.String()\n}\n\n\/\/ Number returns the internal MySQL error code.\nfunc (se *SQLError) Number() int {\n\treturn se.Num\n}\n\n\/\/ SQLState returns the SQLSTATE value.\nfunc (se *SQLError) SQLState() string {\n\treturn se.State\n}\n\nvar errExtract = regexp.MustCompile(`.*\\(errno ([0-9]*)\\) \\(sqlstate ([0-9a-zA-Z]{5})\\).*`)\n\n\/\/ NewSQLErrorFromError returns a *SQLError from the provided error.\n\/\/ If it's not the right type, it still tries to get it from a regexp.\nfunc NewSQLErrorFromError(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif serr, ok := err.(*SQLError); ok {\n\t\treturn serr\n\t}\n\n\tmsg := err.Error()\n\tmatch := errExtract.FindStringSubmatch(msg)\n\tif len(match) < 2 {\n\t\t\/\/ Not found, build a generic SQLError.\n\t\t\/\/ TODO(alainjobart) maybe we can also check the canonical\n\t\t\/\/ error code, and translate that into the right error.\n\t\treturn &SQLError{\n\t\t\tNum: ERUnknownError,\n\t\t\tState: SSUnknownSQLState,\n\t\t\tMessage: msg,\n\t\t}\n\t}\n\n\tnum, err := strconv.Atoi(match[1])\n\tif err != nil {\n\t\treturn &SQLError{\n\t\t\tNum: ERUnknownError,\n\t\t\tState: SSUnknownSQLState,\n\t\t\tMessage: msg,\n\t\t}\n\t}\n\n\tserr := &SQLError{\n\t\tNum: num,\n\t\tState: match[2],\n\t\tMessage: msg,\n\t}\n\treturn serr\n}\n<commit_msg>set the most appropriate mysql error code when creating a SQLError<commit_after>\/*\nCopyright 2017 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage mysql\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\n\t\"vitess.io\/vitess\/go\/vt\/sqlparser\"\n\t\"vitess.io\/vitess\/go\/vt\/vterrors\"\n\n\tvtrpcpb \"vitess.io\/vitess\/go\/vt\/proto\/vtrpc\"\n)\n\n\/\/ SQLError is the error structure returned from calling a db library function\ntype SQLError struct {\n\tNum int\n\tState string\n\tMessage string\n\tQuery string\n}\n\n\/\/ NewSQLError creates a new SQLError.\n\/\/ If sqlState is left empty, it will default to \"HY000\" (general error).\nfunc NewSQLError(number int, sqlState string, format string, args ...interface{}) *SQLError {\n\tif sqlState == \"\" {\n\t\tsqlState = SSUnknownSQLState\n\t}\n\treturn &SQLError{\n\t\tNum: number,\n\t\tState: sqlState,\n\t\tMessage: fmt.Sprintf(format, args...),\n\t}\n}\n\n\/\/ Error implements the error interface\nfunc (se *SQLError) Error() string {\n\tbuf := &bytes.Buffer{}\n\tbuf.WriteString(se.Message)\n\n\t\/\/ Add MySQL errno and SQLSTATE in a format that we can later parse.\n\t\/\/ There's no avoiding string parsing because all errors\n\t\/\/ are converted to strings anyway at RPC boundaries.\n\t\/\/ See NewSQLErrorFromError.\n\tfmt.Fprintf(buf, \" (errno %v) (sqlstate %v)\", se.Num, se.State)\n\n\tif se.Query != \"\" {\n\t\tfmt.Fprintf(buf, \" during query: %s\", sqlparser.TruncateForLog(se.Query))\n\t}\n\n\treturn buf.String()\n}\n\n\/\/ Number returns the internal MySQL error code.\nfunc (se *SQLError) Number() int {\n\treturn se.Num\n}\n\n\/\/ SQLState returns the SQLSTATE value.\nfunc (se *SQLError) SQLState() string {\n\treturn se.State\n}\n\nvar errExtract = regexp.MustCompile(`.*\\(errno ([0-9]*)\\) \\(sqlstate ([0-9a-zA-Z]{5})\\).*`)\n\n\/\/ NewSQLErrorFromError returns a *SQLError from the provided error.\n\/\/ If it's not the right type, it still tries to get it from a regexp.\nfunc NewSQLErrorFromError(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif serr, ok := err.(*SQLError); ok {\n\t\treturn serr\n\t}\n\n\tmsg := err.Error()\n\tmatch := errExtract.FindStringSubmatch(msg)\n\tif len(match) < 2 {\n\t\t\/\/ Map vitess error codes into the mysql equivalent\n\t\tcode := vterrors.Code(err)\n\t\tnum := ERUnknownError\n\t\tswitch code {\n\t\tcase vtrpcpb.Code_CANCELED:\n\t\t\tnum = ERQueryInterrupted\n\t\tcase vtrpcpb.Code_UNKNOWN:\n\t\t\tnum = ERUnknownError\n\t\tcase vtrpcpb.Code_INVALID_ARGUMENT:\n\t\t\t\/\/ TODO\/demmer there are several more appropriate mysql error\n\t\t\t\/\/ codes for the various invalid argument cases.\n\t\t\t\/\/ it would be better to change the call sites to use\n\t\t\t\/\/ the mysql style \"(errno X) (sqlstate Y)\" format rather than\n\t\t\t\/\/ trying to add vitess error codes for all these cases\n\t\t\tnum = ERUnknownError\n\t\tcase vtrpcpb.Code_DEADLINE_EXCEEDED:\n\t\t\tnum = ERQueryInterrupted\n\t\tcase vtrpcpb.Code_NOT_FOUND:\n\t\t\tnum = ERUnknownError\n\t\tcase vtrpcpb.Code_ALREADY_EXISTS:\n\t\t\tnum = ERUnknownError\n\t\tcase vtrpcpb.Code_PERMISSION_DENIED:\n\t\t\tnum = ERAccessDeniedError\n\t\tcase vtrpcpb.Code_UNAUTHENTICATED:\n\t\t\tnum = ERAccessDeniedError\n\t\tcase vtrpcpb.Code_RESOURCE_EXHAUSTED:\n\t\t\tnum = ERTooManyUserConnections\n\t\tcase vtrpcpb.Code_FAILED_PRECONDITION:\n\t\t\tnum = ERUnknownError\n\t\tcase vtrpcpb.Code_ABORTED:\n\t\t\tnum = ERQueryInterrupted\n\t\tcase vtrpcpb.Code_OUT_OF_RANGE:\n\t\t\tnum = ERUnknownError\n\t\tcase vtrpcpb.Code_UNIMPLEMENTED:\n\t\t\tnum = ERNotSupportedYet\n\t\tcase vtrpcpb.Code_INTERNAL:\n\t\t\tnum = ERUnknownError\n\t\tcase vtrpcpb.Code_UNAVAILABLE:\n\t\t\tnum = ERUnknownError\n\t\tcase vtrpcpb.Code_DATA_LOSS:\n\t\t\tnum = ERUnknownError\n\t\t}\n\n\t\t\/\/ Not found, build a generic SQLError.\n\t\treturn &SQLError{\n\t\t\tNum: num,\n\t\t\tState: SSUnknownSQLState,\n\t\t\tMessage: msg,\n\t\t}\n\t}\n\n\tnum, err := strconv.Atoi(match[1])\n\tif err != nil {\n\t\treturn &SQLError{\n\t\t\tNum: ERUnknownError,\n\t\t\tState: SSUnknownSQLState,\n\t\t\tMessage: msg,\n\t\t}\n\t}\n\n\tserr := &SQLError{\n\t\tNum: num,\n\t\tState: match[2],\n\t\tMessage: msg,\n\t}\n\treturn serr\n}\n<|endoftext|>"} {"text":"<commit_before>package provision\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"text\/template\"\n\n\t\"github.com\/docker\/machine\/libmachine\/auth\"\n\t\"github.com\/docker\/machine\/libmachine\/drivers\"\n\t\"github.com\/docker\/machine\/libmachine\/engine\"\n\t\"github.com\/docker\/machine\/libmachine\/log\"\n\t\"github.com\/docker\/machine\/libmachine\/provision\/pkgaction\"\n\t\"github.com\/docker\/machine\/libmachine\/swarm\"\n)\n\nconst (\n\thostTmpl = `sudo tee \/var\/tmp\/hostname.yml << EOF\n#cloud-config\n\nhostname: %s\nEOF\n`\n)\n\nfunc init() {\n\tRegister(\"CoreOS\", &RegisteredProvisioner{\n\t\tNew: NewCoreOSProvisioner,\n\t})\n}\n\nfunc NewCoreOSProvisioner(d drivers.Driver) Provisioner {\n\treturn &CoreOSProvisioner{\n\t\tNewSystemdProvisioner(\"coreos\", d),\n\t}\n}\n\ntype CoreOSProvisioner struct {\n\tSystemdProvisioner\n}\n\nfunc (provisioner *CoreOSProvisioner) String() string {\n\treturn \"coreOS\"\n}\n\nfunc (provisioner *CoreOSProvisioner) SetHostname(hostname string) error {\n\tlog.Debugf(\"SetHostname: %s\", hostname)\n\n\tif _, err := provisioner.SSHCommand(fmt.Sprintf(hostTmpl, hostname)); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := provisioner.SSHCommand(\"sudo systemctl start system-cloudinit@var-tmp-hostname.yml.service\"); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (provisioner *CoreOSProvisioner) GenerateDockerOptions(dockerPort int) (*DockerOptions, error) {\n\tvar (\n\t\tengineCfg bytes.Buffer\n\t)\n\n\tdriverNameLabel := fmt.Sprintf(\"provider=%s\", provisioner.Driver.DriverName())\n\tprovisioner.EngineOptions.Labels = append(provisioner.EngineOptions.Labels, driverNameLabel)\n\n\tengineConfigTmpl := `[Unit]\nDescription=Docker Socket for the API\nAfter=docker.socket early-docker.target network.target\nRequires=docker.socket early-docker.target\n\n[Service]\nEnvironment=TMPDIR=\/var\/tmp\nEnvironmentFile=-\/run\/flannel_docker_opts.env\nMountFlags=slave\nLimitNOFILE=1048576\nLimitNPROC=1048576\nExecStart=\/usr\/lib\/coreos\/dockerd --daemon --host=unix:\/\/\/var\/run\/docker.sock --host=tcp:\/\/0.0.0.0:{{.DockerPort}} --tlsverify --tlscacert {{.AuthOptions.CaCertRemotePath}} --tlscert {{.AuthOptions.ServerCertRemotePath}} --tlskey {{.AuthOptions.ServerKeyRemotePath}}{{ range .EngineOptions.Labels }} --label {{.}}{{ end }}{{ range .EngineOptions.InsecureRegistry }} --insecure-registry {{.}}{{ end }}{{ range .EngineOptions.RegistryMirror }} --registry-mirror {{.}}{{ end }}{{ range .EngineOptions.ArbitraryFlags }} --{{.}}{{ end }} \\$DOCKER_OPTS \\$DOCKER_OPT_BIP \\$DOCKER_OPT_MTU \\$DOCKER_OPT_IPMASQ\n\n[Install]\nWantedBy=multi-user.target\n`\n\n\tt, err := template.New(\"engineConfig\").Parse(engineConfigTmpl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tengineConfigContext := EngineConfigContext{\n\t\tDockerPort: dockerPort,\n\t\tAuthOptions: provisioner.AuthOptions,\n\t\tEngineOptions: provisioner.EngineOptions,\n\t}\n\n\tt.Execute(&engineCfg, engineConfigContext)\n\n\treturn &DockerOptions{\n\t\tEngineOptions: engineCfg.String(),\n\t\tEngineOptionsPath: provisioner.DaemonOptionsFile,\n\t}, nil\n}\n\nfunc (provisioner *CoreOSProvisioner) Package(name string, action pkgaction.PackageAction) error {\n\treturn nil\n}\n\nfunc (provisioner *CoreOSProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error {\n\tprovisioner.SwarmOptions = swarmOptions\n\tprovisioner.AuthOptions = authOptions\n\tprovisioner.EngineOptions = engineOptions\n\n\tif err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil {\n\t\treturn err\n\t}\n\n\tif err := makeDockerOptionsDir(provisioner); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debugf(\"Preparing certificates\")\n\tprovisioner.AuthOptions = setRemoteAuthOptions(provisioner)\n\n\tlog.Debugf(\"Setting up certificates\")\n\tif err := ConfigureAuth(provisioner); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debug(\"Configuring swarm\")\n\tif err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>Update CoreOS provisioner to use 'docker daemon'<commit_after>package provision\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"text\/template\"\n\n\t\"github.com\/docker\/machine\/libmachine\/auth\"\n\t\"github.com\/docker\/machine\/libmachine\/drivers\"\n\t\"github.com\/docker\/machine\/libmachine\/engine\"\n\t\"github.com\/docker\/machine\/libmachine\/log\"\n\t\"github.com\/docker\/machine\/libmachine\/provision\/pkgaction\"\n\t\"github.com\/docker\/machine\/libmachine\/swarm\"\n)\n\nconst (\n\thostTmpl = `sudo tee \/var\/tmp\/hostname.yml << EOF\n#cloud-config\n\nhostname: %s\nEOF\n`\n)\n\nfunc init() {\n\tRegister(\"CoreOS\", &RegisteredProvisioner{\n\t\tNew: NewCoreOSProvisioner,\n\t})\n}\n\nfunc NewCoreOSProvisioner(d drivers.Driver) Provisioner {\n\treturn &CoreOSProvisioner{\n\t\tNewSystemdProvisioner(\"coreos\", d),\n\t}\n}\n\ntype CoreOSProvisioner struct {\n\tSystemdProvisioner\n}\n\nfunc (provisioner *CoreOSProvisioner) String() string {\n\treturn \"coreOS\"\n}\n\nfunc (provisioner *CoreOSProvisioner) SetHostname(hostname string) error {\n\tlog.Debugf(\"SetHostname: %s\", hostname)\n\n\tif _, err := provisioner.SSHCommand(fmt.Sprintf(hostTmpl, hostname)); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := provisioner.SSHCommand(\"sudo systemctl start system-cloudinit@var-tmp-hostname.yml.service\"); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (provisioner *CoreOSProvisioner) GenerateDockerOptions(dockerPort int) (*DockerOptions, error) {\n\tvar (\n\t\tengineCfg bytes.Buffer\n\t)\n\n\tdriverNameLabel := fmt.Sprintf(\"provider=%s\", provisioner.Driver.DriverName())\n\tprovisioner.EngineOptions.Labels = append(provisioner.EngineOptions.Labels, driverNameLabel)\n\n\tengineConfigTmpl := `[Unit]\nDescription=Docker Socket for the API\nAfter=docker.socket early-docker.target network.target\nRequires=docker.socket early-docker.target\n\n[Service]\nEnvironment=TMPDIR=\/var\/tmp\nEnvironmentFile=-\/run\/flannel_docker_opts.env\nMountFlags=slave\nLimitNOFILE=1048576\nLimitNPROC=1048576\nExecStart=\/usr\/lib\/coreos\/dockerd daemon --host=unix:\/\/\/var\/run\/docker.sock --host=tcp:\/\/0.0.0.0:{{.DockerPort}} --tlsverify --tlscacert {{.AuthOptions.CaCertRemotePath}} --tlscert {{.AuthOptions.ServerCertRemotePath}} --tlskey {{.AuthOptions.ServerKeyRemotePath}}{{ range .EngineOptions.Labels }} --label {{.}}{{ end }}{{ range .EngineOptions.InsecureRegistry }} --insecure-registry {{.}}{{ end }}{{ range .EngineOptions.RegistryMirror }} --registry-mirror {{.}}{{ end }}{{ range .EngineOptions.ArbitraryFlags }} --{{.}}{{ end }} \\$DOCKER_OPTS \\$DOCKER_OPT_BIP \\$DOCKER_OPT_MTU \\$DOCKER_OPT_IPMASQ\n\n[Install]\nWantedBy=multi-user.target\n`\n\n\tt, err := template.New(\"engineConfig\").Parse(engineConfigTmpl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tengineConfigContext := EngineConfigContext{\n\t\tDockerPort: dockerPort,\n\t\tAuthOptions: provisioner.AuthOptions,\n\t\tEngineOptions: provisioner.EngineOptions,\n\t}\n\n\tt.Execute(&engineCfg, engineConfigContext)\n\n\treturn &DockerOptions{\n\t\tEngineOptions: engineCfg.String(),\n\t\tEngineOptionsPath: provisioner.DaemonOptionsFile,\n\t}, nil\n}\n\nfunc (provisioner *CoreOSProvisioner) Package(name string, action pkgaction.PackageAction) error {\n\treturn nil\n}\n\nfunc (provisioner *CoreOSProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error {\n\tprovisioner.SwarmOptions = swarmOptions\n\tprovisioner.AuthOptions = authOptions\n\tprovisioner.EngineOptions = engineOptions\n\n\tif err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil {\n\t\treturn err\n\t}\n\n\tif err := makeDockerOptionsDir(provisioner); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debugf(\"Preparing certificates\")\n\tprovisioner.AuthOptions = setRemoteAuthOptions(provisioner)\n\n\tlog.Debugf(\"Setting up certificates\")\n\tif err := ConfigureAuth(provisioner); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debug(\"Configuring swarm\")\n\tif err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package provision\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"text\/template\"\n\n\t\"github.com\/docker\/machine\/drivers\"\n\t\"github.com\/docker\/machine\/libmachine\/auth\"\n\t\"github.com\/docker\/machine\/libmachine\/engine\"\n\t\"github.com\/docker\/machine\/libmachine\/provision\/pkgaction\"\n\t\"github.com\/docker\/machine\/libmachine\/swarm\"\n\t\"github.com\/docker\/machine\/log\"\n\t\"github.com\/docker\/machine\/utils\"\n)\n\nfunc init() {\n\tRegister(\"Debian\", &RegisteredProvisioner{\n\t\tNew: NewDebianProvisioner,\n\t})\n}\n\nfunc NewDebianProvisioner(d drivers.Driver) Provisioner {\n\treturn &DebianProvisioner{\n\t\tGenericProvisioner{\n\t\t\tDockerOptionsDir: \"\/etc\/docker\",\n\t\t\tDaemonOptionsFile: \"\/etc\/systemd\/system\/docker.service\",\n\t\t\tOsReleaseId: \"debian\",\n\t\t\tPackages: []string{\n\t\t\t\t\"curl\",\n\t\t\t},\n\t\t\tDriver: d,\n\t\t},\n\t}\n}\n\ntype DebianProvisioner struct {\n\tGenericProvisioner\n}\n\nfunc (provisioner *DebianProvisioner) Service(name string, action pkgaction.ServiceAction) error {\n\t\/\/ daemon-reload to catch config updates; systemd -- ugh\n\tif _, err := provisioner.SSHCommand(\"sudo systemctl daemon-reload\"); err != nil {\n\t\treturn err\n\t}\n\n\tcommand := fmt.Sprintf(\"sudo systemctl %s %s\", action.String(), name)\n\n\tif _, err := provisioner.SSHCommand(command); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (provisioner *DebianProvisioner) Package(name string, action pkgaction.PackageAction) error {\n\tvar packageAction string\n\n\tupdateMetadata := true\n\n\tswitch action {\n\tcase pkgaction.Install:\n\t\tpackageAction = \"install\"\n\tcase pkgaction.Remove:\n\t\tpackageAction = \"remove\"\n\t\tupdateMetadata = false\n\tcase pkgaction.Upgrade:\n\t\tpackageAction = \"upgrade\"\n\t}\n\n\tswitch name {\n\tcase \"docker\":\n\t\tname = \"lxc-docker\"\n\t}\n\n\tif updateMetadata {\n\t\tif _, err := provisioner.SSHCommand(\"sudo apt-get update\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tcommand := fmt.Sprintf(\"DEBIAN_FRONTEND=noninteractive sudo -E apt-get %s -y %s\", packageAction, name)\n\n\tlog.Debugf(\"package: action=%s name=%s\", action.String(), name)\n\n\tif _, err := provisioner.SSHCommand(command); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (provisioner *DebianProvisioner) dockerDaemonResponding() bool {\n\tif _, err := provisioner.SSHCommand(\"sudo docker version\"); err != nil {\n\t\tlog.Warnf(\"Error getting SSH command to check if the daemon is up: %s\", err)\n\t\treturn false\n\t}\n\n\t\/\/ The daemon is up if the command worked. Carry on.\n\treturn true\n}\n\nfunc (provisioner *DebianProvisioner) Provision(swarmOptions swarm.SwarmOptions, authOptions auth.AuthOptions, engineOptions engine.EngineOptions) error {\n\tprovisioner.SwarmOptions = swarmOptions\n\tprovisioner.AuthOptions = authOptions\n\tprovisioner.EngineOptions = engineOptions\n\n\tif provisioner.EngineOptions.StorageDriver == \"\" {\n\t\tprovisioner.EngineOptions.StorageDriver = \"aufs\"\n\t}\n\n\t\/\/ HACK: since debian does not come with sudo by default we install\n\tlog.Debug(\"installing sudo\")\n\tif _, err := provisioner.SSHCommand(\"if ! type sudo; then apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y sudo; fi\"); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debug(\"setting hostname\")\n\tif err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debug(\"installing base packages\")\n\tfor _, pkg := range provisioner.Packages {\n\t\tif err := provisioner.Package(pkg, pkgaction.Install); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Debug(\"installing docker\")\n\tif err := installDockerGeneric(provisioner); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debug(\"waiting for docker daemon\")\n\tif err := utils.WaitFor(provisioner.dockerDaemonResponding); err != nil {\n\t\treturn err\n\t}\n\n\tprovisioner.AuthOptions = setRemoteAuthOptions(provisioner)\n\n\tlog.Debug(\"configuring auth\")\n\tif err := ConfigureAuth(provisioner); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debug(\"configuring swarm\")\n\tif err := configureSwarm(provisioner, swarmOptions); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ enable in systemd\n\tlog.Debug(\"enabling docker in systemd\")\n\tif err := provisioner.Service(\"docker\", pkgaction.Enable); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (provisioner *DebianProvisioner) GenerateDockerOptions(dockerPort int) (*DockerOptions, error) {\n\tvar (\n\t\tengineCfg bytes.Buffer\n\t)\n\n\tdriverNameLabel := fmt.Sprintf(\"provider=%s\", provisioner.Driver.DriverName())\n\tprovisioner.EngineOptions.Labels = append(provisioner.EngineOptions.Labels, driverNameLabel)\n\n\tengineConfigTmpl := `[Service]\nExecStart=\/usr\/bin\/docker -d -H tcp:\/\/0.0.0.0:{{.DockerPort}} -H unix:\/\/\/var\/run\/docker.sock --storage-driver {{.EngineOptions.StorageDriver}} --tlsverify --tlscacert {{.AuthOptions.CaCertRemotePath}} --tlscert {{.AuthOptions.ServerCertRemotePath}} --tlskey {{.AuthOptions.ServerKeyRemotePath}} {{ range .EngineOptions.Labels }}--label {{.}} {{ end }}{{ range .EngineOptions.InsecureRegistry }}--insecure-registry {{.}} {{ end }}{{ range .EngineOptions.RegistryMirror }}--registry-mirror {{.}} {{ end }}{{ range .EngineOptions.ArbitraryFlags }}--{{.}} {{ end }}\nMountFlags=slave\nLimitNOFILE=1048576\nLimitNPROC=1048576\nLimitCORE=infinity\n`\n\tt, err := template.New(\"engineConfig\").Parse(engineConfigTmpl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tengineConfigContext := EngineConfigContext{\n\t\tDockerPort: dockerPort,\n\t\tAuthOptions: provisioner.AuthOptions,\n\t\tEngineOptions: provisioner.EngineOptions,\n\t}\n\n\tt.Execute(&engineCfg, engineConfigContext)\n\n\treturn &DockerOptions{\n\t\tEngineOptions: engineCfg.String(),\n\t\tEngineOptionsPath: provisioner.DaemonOptionsFile,\n\t}, nil\n}\n<commit_msg>debian: rebase and fix configureSwarm<commit_after>package provision\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"text\/template\"\n\n\t\"github.com\/docker\/machine\/drivers\"\n\t\"github.com\/docker\/machine\/libmachine\/auth\"\n\t\"github.com\/docker\/machine\/libmachine\/engine\"\n\t\"github.com\/docker\/machine\/libmachine\/provision\/pkgaction\"\n\t\"github.com\/docker\/machine\/libmachine\/swarm\"\n\t\"github.com\/docker\/machine\/log\"\n\t\"github.com\/docker\/machine\/utils\"\n)\n\nfunc init() {\n\tRegister(\"Debian\", &RegisteredProvisioner{\n\t\tNew: NewDebianProvisioner,\n\t})\n}\n\nfunc NewDebianProvisioner(d drivers.Driver) Provisioner {\n\treturn &DebianProvisioner{\n\t\tGenericProvisioner{\n\t\t\tDockerOptionsDir: \"\/etc\/docker\",\n\t\t\tDaemonOptionsFile: \"\/etc\/systemd\/system\/docker.service\",\n\t\t\tOsReleaseId: \"debian\",\n\t\t\tPackages: []string{\n\t\t\t\t\"curl\",\n\t\t\t},\n\t\t\tDriver: d,\n\t\t},\n\t}\n}\n\ntype DebianProvisioner struct {\n\tGenericProvisioner\n}\n\nfunc (provisioner *DebianProvisioner) Service(name string, action pkgaction.ServiceAction) error {\n\t\/\/ daemon-reload to catch config updates; systemd -- ugh\n\tif _, err := provisioner.SSHCommand(\"sudo systemctl daemon-reload\"); err != nil {\n\t\treturn err\n\t}\n\n\tcommand := fmt.Sprintf(\"sudo systemctl %s %s\", action.String(), name)\n\n\tif _, err := provisioner.SSHCommand(command); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (provisioner *DebianProvisioner) Package(name string, action pkgaction.PackageAction) error {\n\tvar packageAction string\n\n\tupdateMetadata := true\n\n\tswitch action {\n\tcase pkgaction.Install:\n\t\tpackageAction = \"install\"\n\tcase pkgaction.Remove:\n\t\tpackageAction = \"remove\"\n\t\tupdateMetadata = false\n\tcase pkgaction.Upgrade:\n\t\tpackageAction = \"upgrade\"\n\t}\n\n\tswitch name {\n\tcase \"docker\":\n\t\tname = \"lxc-docker\"\n\t}\n\n\tif updateMetadata {\n\t\tif _, err := provisioner.SSHCommand(\"sudo apt-get update\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tcommand := fmt.Sprintf(\"DEBIAN_FRONTEND=noninteractive sudo -E apt-get %s -y %s\", packageAction, name)\n\n\tlog.Debugf(\"package: action=%s name=%s\", action.String(), name)\n\n\tif _, err := provisioner.SSHCommand(command); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (provisioner *DebianProvisioner) dockerDaemonResponding() bool {\n\tif _, err := provisioner.SSHCommand(\"sudo docker version\"); err != nil {\n\t\tlog.Warnf(\"Error getting SSH command to check if the daemon is up: %s\", err)\n\t\treturn false\n\t}\n\n\t\/\/ The daemon is up if the command worked. Carry on.\n\treturn true\n}\n\nfunc (provisioner *DebianProvisioner) Provision(swarmOptions swarm.SwarmOptions, authOptions auth.AuthOptions, engineOptions engine.EngineOptions) error {\n\tprovisioner.SwarmOptions = swarmOptions\n\tprovisioner.AuthOptions = authOptions\n\tprovisioner.EngineOptions = engineOptions\n\n\tif provisioner.EngineOptions.StorageDriver == \"\" {\n\t\tprovisioner.EngineOptions.StorageDriver = \"aufs\"\n\t}\n\n\t\/\/ HACK: since debian does not come with sudo by default we install\n\tlog.Debug(\"installing sudo\")\n\tif _, err := provisioner.SSHCommand(\"if ! type sudo; then apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y sudo; fi\"); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debug(\"setting hostname\")\n\tif err := provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debug(\"installing base packages\")\n\tfor _, pkg := range provisioner.Packages {\n\t\tif err := provisioner.Package(pkg, pkgaction.Install); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Debug(\"installing docker\")\n\tif err := installDockerGeneric(provisioner); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debug(\"waiting for docker daemon\")\n\tif err := utils.WaitFor(provisioner.dockerDaemonResponding); err != nil {\n\t\treturn err\n\t}\n\n\tprovisioner.AuthOptions = setRemoteAuthOptions(provisioner)\n\n\tlog.Debug(\"configuring auth\")\n\tif err := ConfigureAuth(provisioner); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debug(\"configuring swarm\")\n\tif err := configureSwarm(provisioner, swarmOptions, provisioner.AuthOptions); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ enable in systemd\n\tlog.Debug(\"enabling docker in systemd\")\n\tif err := provisioner.Service(\"docker\", pkgaction.Enable); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (provisioner *DebianProvisioner) GenerateDockerOptions(dockerPort int) (*DockerOptions, error) {\n\tvar (\n\t\tengineCfg bytes.Buffer\n\t)\n\n\tdriverNameLabel := fmt.Sprintf(\"provider=%s\", provisioner.Driver.DriverName())\n\tprovisioner.EngineOptions.Labels = append(provisioner.EngineOptions.Labels, driverNameLabel)\n\n\tengineConfigTmpl := `[Service]\nExecStart=\/usr\/bin\/docker -d -H tcp:\/\/0.0.0.0:{{.DockerPort}} -H unix:\/\/\/var\/run\/docker.sock --storage-driver {{.EngineOptions.StorageDriver}} --tlsverify --tlscacert {{.AuthOptions.CaCertRemotePath}} --tlscert {{.AuthOptions.ServerCertRemotePath}} --tlskey {{.AuthOptions.ServerKeyRemotePath}} {{ range .EngineOptions.Labels }}--label {{.}} {{ end }}{{ range .EngineOptions.InsecureRegistry }}--insecure-registry {{.}} {{ end }}{{ range .EngineOptions.RegistryMirror }}--registry-mirror {{.}} {{ end }}{{ range .EngineOptions.ArbitraryFlags }}--{{.}} {{ end }}\nMountFlags=slave\nLimitNOFILE=1048576\nLimitNPROC=1048576\nLimitCORE=infinity\n`\n\tt, err := template.New(\"engineConfig\").Parse(engineConfigTmpl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tengineConfigContext := EngineConfigContext{\n\t\tDockerPort: dockerPort,\n\t\tAuthOptions: provisioner.AuthOptions,\n\t\tEngineOptions: provisioner.EngineOptions,\n\t}\n\n\tt.Execute(&engineCfg, engineConfigContext)\n\n\treturn &DockerOptions{\n\t\tEngineOptions: engineCfg.String(),\n\t\tEngineOptionsPath: provisioner.DaemonOptionsFile,\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package provision\n\nimport (\n\t\"github.com\/docker\/machine\/drivers\"\n)\n\nconst (\n\t\/\/ TODO: eventually the RPM install process will be integrated\n\t\/\/ into the get.docker.com install script; for now\n\t\/\/ we install via vendored RPMs\n\tdockerFedoraRPMPath = \"https:\/\/docker-mcn.s3.amazonaws.com\/public\/fedora\/rpms\/docker-engine-1.6.1-0.0.20150511.171646.git1b47f9f.fc21.x86_64.rpm\"\n)\n\nfunc init() {\n\tRegister(\"Fedora\", &RegisteredProvisioner{\n\t\tNew: NewFedoraProvisioner,\n\t})\n}\n\nfunc NewFedoraProvisioner(d drivers.Driver) Provisioner {\n\tg := GenericProvisioner{\n\t\tDockerOptionsDir: \"\/etc\/docker\",\n\t\tDaemonOptionsFile: \"\/etc\/systemd\/system\/docker.service\",\n\t\tOsReleaseId: \"fedora\",\n\t\tPackages: []string{\n\t\t\t\"curl\",\n\t\t\t\"yum-utils\",\n\t\t},\n\t\tDriver: d,\n\t}\n\tp := &FedoraProvisioner{\n\t\tRedHatProvisioner{\n\t\t\tg,\n\t\t\tdockerFedoraRPMPath,\n\t\t},\n\t}\n\treturn p\n}\n\ntype FedoraProvisioner struct {\n\tRedHatProvisioner\n}\n<commit_msg>fedora: remove configure repo<commit_after>package provision\n\nimport (\n\t\"github.com\/docker\/machine\/drivers\"\n)\n\nconst (\n\t\/\/ TODO: eventually the RPM install process will be integrated\n\t\/\/ into the get.docker.com install script; for now\n\t\/\/ we install via vendored RPMs\n\tdockerFedoraRPMPath = \"https:\/\/docker-mcn.s3.amazonaws.com\/public\/fedora\/rpms\/docker-engine-1.6.1-0.0.20150511.171646.git1b47f9f.fc21.x86_64.rpm\"\n)\n\nfunc init() {\n\tRegister(\"Fedora\", &RegisteredProvisioner{\n\t\tNew: NewFedoraProvisioner,\n\t})\n}\n\nfunc NewFedoraProvisioner(d drivers.Driver) Provisioner {\n\tg := GenericProvisioner{\n\t\tDockerOptionsDir: \"\/etc\/docker\",\n\t\tDaemonOptionsFile: \"\/etc\/systemd\/system\/docker.service\",\n\t\tOsReleaseId: \"fedora\",\n\t\tPackages: []string{},\n\t\tDriver: d,\n\t}\n\tp := &FedoraProvisioner{\n\t\tRedHatProvisioner{\n\t\t\tg,\n\t\t\tdockerFedoraRPMPath,\n\t\t},\n\t}\n\treturn p\n}\n\ntype FedoraProvisioner struct {\n\tRedHatProvisioner\n}\n<|endoftext|>"} {"text":"<commit_before>package gopherjs_http\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.net\/html\"\n\t\"code.google.com\/p\/go.net\/html\/atom\"\n\t\"github.com\/shurcooL\/go\/pipe_util\"\n\t\"gopkg.in\/pipe.v2\"\n)\n\n\/\/ HtmlFile returns a handler that serves the given .html file, with the \"text\/go\" scripts compiled to JavaScript via GopherJS.\nfunc HtmlFile(name string) http.Handler {\n\treturn &htmlFile{name: name}\n}\n\ntype htmlFile struct {\n\tname string\n}\n\nfunc (this *htmlFile) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tfile, err := os.Open(this.name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer file.Close()\n\n\tw.Header().Set(\"Content-Type\", \"text\/html; charset=utf-8\")\n\tProcessHtml(file).WriteTo(w)\n}\n\n\/\/ TODO: Write into writer, no need for buffer (unless want to be able to abort on error?). Or, alternatively, parse html and serve minified version?\nfunc ProcessHtml(r io.Reader) *bytes.Buffer {\n\tinsideTextGo := false\n\ttokenizer := html.NewTokenizer(r)\n\tvar buf bytes.Buffer\n\n\tfor {\n\t\tif tokenizer.Next() == html.ErrorToken {\n\t\t\terr := tokenizer.Err()\n\t\t\tif err == io.EOF {\n\t\t\t\treturn &buf\n\t\t\t}\n\n\t\t\treturn &bytes.Buffer{}\n\t\t}\n\n\t\ttoken := tokenizer.Token()\n\t\tswitch token.Type {\n\t\tcase html.DoctypeToken:\n\t\t\tbuf.WriteString(token.String())\n\t\tcase html.CommentToken:\n\t\t\tbuf.WriteString(token.String())\n\t\tcase html.StartTagToken:\n\t\t\tif token.DataAtom == atom.Script && getType(token.Attr) == \"text\/go\" {\n\t\t\t\tinsideTextGo = true\n\n\t\t\t\tbuf.WriteString(`<script type=\"text\/javascript\">`)\n\n\t\t\t\tif src := getSrc(token.Attr); src != \"\" {\n\t\t\t\t\tif data, err := ioutil.ReadFile(src); err == nil {\n\t\t\t\t\t\tbuf.WriteString(goToJs(string(data)))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tbuf.WriteString(token.String())\n\t\t\t}\n\t\tcase html.EndTagToken:\n\t\t\tif token.DataAtom == atom.Script && insideTextGo {\n\t\t\t\tinsideTextGo = false\n\t\t\t}\n\t\t\tbuf.WriteString(token.String())\n\t\tcase html.SelfClosingTagToken:\n\t\t\t\/\/ TODO: Support <script type=\"text\/go\" src=\"...\" \/>.\n\t\t\tbuf.WriteString(token.String())\n\t\tcase html.TextToken:\n\t\t\tif insideTextGo {\n\t\t\t\tbuf.WriteString(goToJs(token.Data))\n\t\t\t} else {\n\t\t\t\tbuf.WriteString(token.Data)\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(\"unknown token type\")\n\t\t}\n\t}\n}\n\nfunc getType(attrs []html.Attribute) string {\n\tfor _, attr := range attrs {\n\t\tif attr.Key == \"type\" {\n\t\t\treturn attr.Val\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc getSrc(attrs []html.Attribute) string {\n\tfor _, attr := range attrs {\n\t\tif attr.Key == \"src\" {\n\t\t\treturn attr.Val\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc goToJs(goCode string) (jsCode string) {\n\tstarted := time.Now()\n\tdefer func() { fmt.Println(\"goToJs taken:\", time.Since(started)) }()\n\n\t\/\/ TODO: Don't shell out, and avoid having to write\/read temporary files, instead\n\t\/\/ use http:\/\/godoc.org\/github.com\/gopherjs\/gopherjs\/compiler directly, etc.\n\tp := pipe.Script(\n\t\tpipe.Line(\n\t\t\tpipe.Print(goCode),\n\t\t\tpipe.WriteFile(\"tmp.go\", 0666),\n\t\t),\n\t\tpipe.Exec(\"gopherjs\", \"build\", \"tmp.go\"),\n\t\tpipe.ReadFile(\"tmp.js\"),\n\t)\n\n\t\/\/ Use a temporary dir.\n\ttempDir, err := ioutil.TempDir(\"\", \"gopherjs_\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer func() {\n\t\terr := os.RemoveAll(tempDir)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"warning: error removing temp dir:\", err)\n\t\t}\n\t}()\n\n\tstdout, stderr, err := pipe_util.DividedOutputDir(p, tempDir)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Stderr.Write(stderr)\n\t}\n\n\t\/\/ TODO: Serve the .map too, somehow?\n\tstdout = bytes.Replace(stdout, []byte(\"\/\/# sourceMappingURL=tmp.js.map\\n\"), []byte(\"\\n\"), -1)\n\n\treturn string(stdout)\n}\n<commit_msg>gopherjs_http: Don't shell out, use gopherjslib to compile in-process.<commit_after>package gopherjs_http\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.net\/html\"\n\t\"code.google.com\/p\/go.net\/html\/atom\"\n\t\"github.com\/go-on\/gopherjslib\"\n)\n\n\/\/ HtmlFile returns a handler that serves the given .html file, with the \"text\/go\" scripts compiled to JavaScript via GopherJS.\nfunc HtmlFile(name string) http.Handler {\n\treturn &htmlFile{name: name}\n}\n\ntype htmlFile struct {\n\tname string\n}\n\nfunc (this *htmlFile) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tfile, err := os.Open(this.name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer file.Close()\n\n\tw.Header().Set(\"Content-Type\", \"text\/html; charset=utf-8\")\n\tProcessHtml(file).WriteTo(w)\n}\n\n\/\/ TODO: Write into writer, no need for buffer (unless want to be able to abort on error?). Or, alternatively, parse html and serve minified version?\nfunc ProcessHtml(r io.Reader) *bytes.Buffer {\n\tinsideTextGo := false\n\ttokenizer := html.NewTokenizer(r)\n\tvar buf bytes.Buffer\n\n\tfor {\n\t\tif tokenizer.Next() == html.ErrorToken {\n\t\t\terr := tokenizer.Err()\n\t\t\tif err == io.EOF {\n\t\t\t\treturn &buf\n\t\t\t}\n\n\t\t\treturn &bytes.Buffer{}\n\t\t}\n\n\t\ttoken := tokenizer.Token()\n\t\tswitch token.Type {\n\t\tcase html.DoctypeToken:\n\t\t\tbuf.WriteString(token.String())\n\t\tcase html.CommentToken:\n\t\t\tbuf.WriteString(token.String())\n\t\tcase html.StartTagToken:\n\t\t\tif token.DataAtom == atom.Script && getType(token.Attr) == \"text\/go\" {\n\t\t\t\tinsideTextGo = true\n\n\t\t\t\tbuf.WriteString(`<script type=\"text\/javascript\">`)\n\n\t\t\t\tif src := getSrc(token.Attr); src != \"\" {\n\t\t\t\t\tif data, err := ioutil.ReadFile(src); err == nil {\n\t\t\t\t\t\tbuf.WriteString(goToJs(string(data)))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tbuf.WriteString(token.String())\n\t\t\t}\n\t\tcase html.EndTagToken:\n\t\t\tif token.DataAtom == atom.Script && insideTextGo {\n\t\t\t\tinsideTextGo = false\n\t\t\t}\n\t\t\tbuf.WriteString(token.String())\n\t\tcase html.SelfClosingTagToken:\n\t\t\t\/\/ TODO: Support <script type=\"text\/go\" src=\"...\" \/>.\n\t\t\tbuf.WriteString(token.String())\n\t\tcase html.TextToken:\n\t\t\tif insideTextGo {\n\t\t\t\tbuf.WriteString(goToJs(token.Data))\n\t\t\t} else {\n\t\t\t\tbuf.WriteString(token.Data)\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(\"unknown token type\")\n\t\t}\n\t}\n}\n\nfunc getType(attrs []html.Attribute) string {\n\tfor _, attr := range attrs {\n\t\tif attr.Key == \"type\" {\n\t\t\treturn attr.Val\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc getSrc(attrs []html.Attribute) string {\n\tfor _, attr := range attrs {\n\t\tif attr.Key == \"src\" {\n\t\t\treturn attr.Val\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc goToJs(goCode string) (jsCode string) {\n\tstarted := time.Now()\n\tdefer func() { fmt.Println(\"goToJs taken:\", time.Since(started)) }()\n\n\tcode := strings.NewReader(goCode)\n\n\tvar out bytes.Buffer\n\terr := gopherjslib.Build(code, &out, nil)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn goCode\n\t}\n\n\treturn out.String()\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"errors\"\n\t\"time\"\n\t\"strconv\"\n\t\/\/\"github.com\/shirou\/gopsutil\/mem\"\n\t\"github.com\/synw\/microb\/libmicrob\/db\"\n\t\"github.com\/synw\/microb\/libmicrob\/datatypes\"\n\t\"github.com\/synw\/microb\/libmicrob\/datatypes\/encoding\"\n\t\"github.com\/synw\/microb\/libmicrob\/events\"\n\t\"github.com\/synw\/microb\/libmicrob\/events\/format\"\n\t\"github.com\/synw\/microb\/libmicrob\/state\"\n\t\"github.com\/synw\/microb\/libmicrob\/state\/mutate\"\n\t\"github.com\/synw\/microb\/libmicrob\/commands\/methods\"\n\t\"github.com\/synw\/microb\/libmicrob\/http_handlers\"\n\t\n)\n\n\nfunc Run(command *datatypes.Command) {\n\tc := make(chan *datatypes.Command)\n\tgo runCommand(command, c)\n\tselect {\n\t\tcase cmd := <- c:\n\t\t\tclose(c)\n\t\t\t\/\/ process command results\n\t\t\tHandleCommandFeedback(cmd)\n\t}\n}\n\nfunc HandleCommandFeedback(command *datatypes.Command) {\n\tif command.Status == \"error\" {\n\t\tevents.Error(\"command_execution\", command.Error)\n\t}\n\tif state.Verbosity > 0 {\n\t\tevents.PrintCommandReport(command)\n\t}\n}\n\nfunc GetCommandFromPayload(message *datatypes.WsIncomingMessage, from string) *datatypes.Command {\n\tdata := message.Data\n\tname := data[\"name\"].(string)\n\treason := \"\"\n\tif data[\"from\"] != nil {\n\t\tfrom = data[\"from\"].(string)\n\t}\n\tvar command *datatypes.Command\n\tif data[\"reason\"].(string) != \"\" {\n\t\treason = data[\"reason\"].(string)\n\t}\n\tif data[\"args\"] != nil {\n\t\targs := data[\"args\"].([]interface{})\n\t\tcommand = NewWithArgs(name, from, reason, args)\n\t} else {\n\t\tcommand = New(name, from, reason)\n\t}\n\treturn command\n}\n\n\/\/ constructors\nfunc New(name string, from string, reason string) *datatypes.Command {\n\tnow := time.Now()\n\tvar args[]interface{}\n\tvar rv []interface{}\n\tid := encoding.GenerateId()\n\tcmd := &datatypes.Command{id, name, from, reason, now, args, \"pending\", nil, rv}\n\treturn cmd\n}\n\nfunc NewWithArgs(name string, from string, reason string, args []interface{}) *datatypes.Command {\n\tnow := time.Now()\n\tvar rv []interface{}\n\tid := encoding.GenerateId()\n\tcmd := &datatypes.Command{id, name, from, reason, now, args, \"pending\", nil, rv}\n\treturn cmd\n}\n\n\/\/ commands\nfunc reparseTemplates() error {\n\thttp_handlers.ReparseTemplates()\n\treturn nil\n}\n\nfunc updateRoutes() error {\n\t\/\/ hit db\n\troutestab, err := db.GetRoutes()\n\tif err != nil {\n\t\tevents.Error(\"commands.updateRoutes\", err)\n\t}\n\tvar routestr string\n\tvar route string\n\tfor i := range(routestab) {\n\t\troute = routestab[i]\n\t\troutestr = routestr+fmt.Sprintf(\"page('%s', function(ctx, next) { loadPage('\/x%s') } );\", route, route)\n\t}\n str := []byte(routestr)\n err = ioutil.WriteFile(\"templates\/routes.js\", str, 0644)\n if err != nil {\n events.Error(\"commands.updateRoutes\", err)\n return err\n }\n \/\/ auto reparse templates if the routes change\n \tcmd := New(\"reparse_templates\", \"commands.updateRoutes()\", \"Routes update\")\n go Run(cmd)\n \treturn nil\n}\n\nfunc dbStatus() (map[string]interface{}, error) {\n\tstatus, err := db.ReportStatus()\n\tif err != nil {\n\t\treturn status, err\n\t}\n\treturn status, nil\n}\n\/*\nfunc sysInfo(*datatypes.Command) *datatypes.Command {\n\tv, _ := mem.VirtualMemory()\n\t\n}*\/\n\n\/\/ utilities\nfunc handleCommandError(command *datatypes.Command, err error, c chan *datatypes.Command) {\n\tif err != nil { \n\t\tcommand.Status = \"error\" \n\t\tcommand.Error = err\n\t\tc <- command\n\t}\n\treturn\n}\n\/*\nfunc NewWithData(name string, from string, reason string, data []interface{}) *datatypes.Command {\n\tnow := time.Now()\n\tcmd := &datatypes.Command{name, from, reason, now, \"pending\", nil, data}\n\treturn cmd\n}\n*\/\n\/\/ handlers\nfunc runCommand(command *datatypes.Command, c chan *datatypes.Command) {\n\tevents.New(\"command\", \"RunCommand()\", command.Name)\n\tif commands_methods.IsValid(command) == false {\n\t\tmsg := \"Unknown command: \"+command.Name\n\t\terr := errors.New(msg)\n\t\thandleCommandError(command, err, c)\n\t\treturn\n\t}\n\tif (command.Name == \"update_routes\") {\n\t\t\/\/ UPDATE ROUTES\n\t\terr := updateRoutes()\n\t\tif err != nil {\n\t\t\thandleCommandError(command, err, c)\n\t\t\treturn\n\t\t} else {\n\t\t\tgo updateRoutes()\n\t\t\tcommand.Status = \"success\"\n\t\t}\n\t} else if (command.Name == \"reparse_templates\") {\n\t\t\/\/ REPARSE TEMPLATES\n\t\terr := reparseTemplates()\n\t\tif err != nil { \n\t\t\thandleCommandError(command, err, c)\n\t\t\treturn\n\t\t} else {\n\t\t\tgo reparseTemplates()\n\t\t\tcommand.Status = \"success\"\n\t\t}\n\t} else if (command.Name == \"ping\") {\n\t\t\/\/ PING\n\t\tcommand.ReturnValues = append(command.ReturnValues, \"PONG\")\n\t\tcommand.Status = \"success\"\n\t} else if (command.Name == \"db_status\") {\n\t\tstatus, err := db.ReportStatus()\n\t\tif err != nil { \n\t\t\thandleCommandError(command, err, c)\n\t\t\treturn\n\t\t}\n\t\tline := \"------------------------------\"\n\t\tversion := \"\\n \"+line+\"\\n - Version: \"+status[\"version\"].(string)\n\t\tcache_size_mb := \"- Cache size: \"+strconv.FormatFloat(status[\"cache_size_mb\"].(float64), 'f', 2, 64)+\" Mb\"\n\t\ttime_started := line+\"\\nStarted since \"+format.FormatTime(status[\"time_started\"].(time.Time))\n\t\tcommand.ReturnValues = append(command.ReturnValues, version)\n\t\tcommand.ReturnValues = append(command.ReturnValues, cache_size_mb)\n\t\tcommand.ReturnValues = append(command.ReturnValues, time_started)\n\t\tcommand.Status = \"success\"\n\t} else if (command.Name == \"routes\") {\n\t\t\/\/ ROUTES\n\t\troutes, err := db.GetRoutes()\n\t\tif err != nil {\n\t\t\thandleCommandError(command, err, c)\n\t\t\treturn\n\t\t}\n\t\tvar rvs []interface{}\n\t\tfor _, route := range(routes) {\n\t\t\trvs = append(rvs, route)\n\t\t}\n\t\tcommand.ReturnValues = rvs\n\t\tcommand.Status = \"success\"\n\t} else if command.Name == \"state\" {\n\t\t\/\/ STATE\n\t\tmsg := state.FormatState()\n\t\tvar rvs []interface{}\n\t\trvs = append(rvs, msg)\n\t\tcommand.ReturnValues = rvs\n\t\tcommand.Status = \"success\"\n\t} else if command.Name == \"set\" {\n\t\t\/\/ SET\n\t\tnum_args := len(command.Args)\n\t\tif num_args == 0 {\n\t\t\tmsg := \"Command set incoming from \"+command.From+\" with no arguments\"\n\t\t\tevents.ErrMsg(\"commands.run\", msg)\n\t\t} else if num_args == 1 {\n\t\t\tmsg := \"Command set incoming from \"+command.From+\" with only one argument: \"+command.Args[0].(string)\n\t\t\tevents.ErrMsg(\"commands.run\", msg)\n\t\t} else if num_args > 2 {\n\t\t\tvar arguments string\n\t\t\tfor _, a := range(command.Args) {\n\t\t\t\targuments = arguments+\" \"+a.(string)\n\t\t\t}\n\t\t\tmsg := \"Command set incoming from \"+command.From+\" with more than two arguments:\"+arguments\n\t\t\tevents.ErrMsg(\"commands.run\", msg)\n\t\t} else {\n\t\t\tcmd := command.Args[0]\n\t\t\targ := command.Args[1]\n\t\t\tif cmd == \"verbosity\" {\n\t\t\t\tmsg := mutate.Verbosity(arg.(string))\n\t\t\t\tvar rvs []interface{}\n\t\t\t\trvs = append(rvs, msg)\n\t\t\t\tcommand.ReturnValues = rvs\n\t\t\t\tcommand.Status = \"success\"\n\t\t\t} else if cmd == \"debug\" {\n\t\t\t\tmsg, err := mutate.Debug(arg.(string))\n\t\t\t\tif err != nil {\n\t\t\t\t\tcommand.Status = \"error\"\n\t\t\t\t\tcommand.Error = err\n\t\t\t\t} else {\n\t\t\t\t\tvar rvs []interface{}\n\t\t\t\t\trvs = append(rvs, msg)\n\t\t\t\t\tcommand.Status = \"success\"\n\t\t\t\t}\n\t\t\t} \n\t\t}\n\t}\n\t\/*else if (command.Name == \"db_list\") {\n\t\tfor role, db := range metadata.GetDatabasesAndRoles() {\n\t\t\tmsg := database.Name\n\t\t\tcommand.ReturnValues = append(command.ReturnValues, )\n\t\t}\n\t\tcommand.Status = \"success\"\n\t}*\/\n\tc <- command\n\t\/*else if (command.Name == \"syncdb\") {\n\t\tgo db.ImportPagesFromMainDb(command.Values.(string))\n\t}*\/\n\t\/\/ save in db\n\t\/\/go db.SaveCommand(command)\n\treturn\n}\n<commit_msg>State db command<commit_after>package commands\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"errors\"\n\t\"time\"\n\t\"strconv\"\n\t\/\/\"github.com\/shirou\/gopsutil\/mem\"\n\t\"github.com\/synw\/microb\/libmicrob\/db\"\n\t\"github.com\/synw\/microb\/libmicrob\/datatypes\"\n\t\"github.com\/synw\/microb\/libmicrob\/datatypes\/encoding\"\n\t\"github.com\/synw\/microb\/libmicrob\/events\"\n\t\"github.com\/synw\/microb\/libmicrob\/events\/format\"\n\t\"github.com\/synw\/microb\/libmicrob\/state\"\n\t\"github.com\/synw\/microb\/libmicrob\/state\/mutate\"\n\t\"github.com\/synw\/microb\/libmicrob\/commands\/methods\"\n\t\"github.com\/synw\/microb\/libmicrob\/http_handlers\"\n\t\n)\n\n\nfunc Run(command *datatypes.Command) {\n\tc := make(chan *datatypes.Command)\n\tgo runCommand(command, c)\n\tselect {\n\t\tcase cmd := <- c:\n\t\t\tclose(c)\n\t\t\t\/\/ process command results\n\t\t\tHandleCommandFeedback(cmd)\n\t}\n}\n\nfunc HandleCommandFeedback(command *datatypes.Command) {\n\tif command.Status == \"error\" {\n\t\tevents.Error(\"command_execution\", command.Error)\n\t}\n\tif state.Verbosity > 0 {\n\t\tevents.PrintCommandReport(command)\n\t}\n}\n\nfunc GetCommandFromPayload(message *datatypes.WsIncomingMessage, from string) *datatypes.Command {\n\tdata := message.Data\n\tname := data[\"name\"].(string)\n\treason := \"\"\n\tif data[\"from\"] != nil {\n\t\tfrom = data[\"from\"].(string)\n\t}\n\tvar command *datatypes.Command\n\tif data[\"reason\"].(string) != \"\" {\n\t\treason = data[\"reason\"].(string)\n\t}\n\tif data[\"args\"] != nil {\n\t\targs := data[\"args\"].([]interface{})\n\t\tcommand = NewWithArgs(name, from, reason, args)\n\t} else {\n\t\tcommand = New(name, from, reason)\n\t}\n\treturn command\n}\n\n\/\/ constructors\nfunc New(name string, from string, reason string) *datatypes.Command {\n\tnow := time.Now()\n\tvar args[]interface{}\n\tvar rv []interface{}\n\tid := encoding.GenerateId()\n\tcmd := &datatypes.Command{id, name, from, reason, now, args, \"pending\", nil, rv}\n\treturn cmd\n}\n\nfunc NewWithArgs(name string, from string, reason string, args []interface{}) *datatypes.Command {\n\tnow := time.Now()\n\tvar rv []interface{}\n\tid := encoding.GenerateId()\n\tcmd := &datatypes.Command{id, name, from, reason, now, args, \"pending\", nil, rv}\n\treturn cmd\n}\n\n\/\/ commands\nfunc reparseTemplates() error {\n\thttp_handlers.ReparseTemplates()\n\treturn nil\n}\n\nfunc updateRoutes() error {\n\t\/\/ hit db\n\troutestab, err := db.GetRoutes()\n\tif err != nil {\n\t\tevents.Error(\"commands.updateRoutes\", err)\n\t}\n\tvar routestr string\n\tvar route string\n\tfor i := range(routestab) {\n\t\troute = routestab[i]\n\t\troutestr = routestr+fmt.Sprintf(\"page('%s', function(ctx, next) { loadPage('\/x%s') } );\", route, route)\n\t}\n str := []byte(routestr)\n err = ioutil.WriteFile(\"templates\/routes.js\", str, 0644)\n if err != nil {\n events.Error(\"commands.updateRoutes\", err)\n return err\n }\n \/\/ auto reparse templates if the routes change\n \tcmd := New(\"reparse_templates\", \"commands.updateRoutes()\", \"Routes update\")\n go Run(cmd)\n \treturn nil\n}\n\nfunc dbStatus() (map[string]interface{}, error) {\n\tstatus, err := db.ReportStatus()\n\tif err != nil {\n\t\treturn status, err\n\t}\n\treturn status, nil\n}\n\/*\nfunc sysInfo(*datatypes.Command) *datatypes.Command {\n\tv, _ := mem.VirtualMemory()\n\t\n}*\/\n\n\/\/ utilities\nfunc handleCommandError(command *datatypes.Command, err error, c chan *datatypes.Command) {\n\tif err != nil { \n\t\tcommand.Status = \"error\" \n\t\tcommand.Error = err\n\t\tc <- command\n\t}\n\treturn\n}\n\/*\nfunc NewWithData(name string, from string, reason string, data []interface{}) *datatypes.Command {\n\tnow := time.Now()\n\tcmd := &datatypes.Command{name, from, reason, now, \"pending\", nil, data}\n\treturn cmd\n}\n*\/\n\/\/ handlers\nfunc runCommand(command *datatypes.Command, c chan *datatypes.Command) {\n\tevents.New(\"command\", \"RunCommand()\", command.Name)\n\tif commands_methods.IsValid(command) == false {\n\t\tmsg := \"Unknown command: \"+command.Name\n\t\terr := errors.New(msg)\n\t\thandleCommandError(command, err, c)\n\t\treturn\n\t}\n\tif (command.Name == \"update_routes\") {\n\t\t\/\/ UPDATE ROUTES\n\t\terr := updateRoutes()\n\t\tif err != nil {\n\t\t\thandleCommandError(command, err, c)\n\t\t\treturn\n\t\t} else {\n\t\t\tgo updateRoutes()\n\t\t\tcommand.Status = \"success\"\n\t\t}\n\t} else if (command.Name == \"reparse_templates\") {\n\t\t\/\/ REPARSE TEMPLATES\n\t\terr := reparseTemplates()\n\t\tif err != nil { \n\t\t\thandleCommandError(command, err, c)\n\t\t\treturn\n\t\t} else {\n\t\t\tgo reparseTemplates()\n\t\t\tcommand.Status = \"success\"\n\t\t}\n\t} else if (command.Name == \"ping\") {\n\t\t\/\/ PING\n\t\tcommand.ReturnValues = append(command.ReturnValues, \"PONG\")\n\t\tcommand.Status = \"success\"\n\t} else if (command.Name == \"db_status\") {\n\t\tstatus, err := db.ReportStatus()\n\t\tif err != nil { \n\t\t\thandleCommandError(command, err, c)\n\t\t\treturn\n\t\t}\n\t\tline := \"------------------------------\"\n\t\tversion := \"\\n \"+line+\"\\n - Version: \"+status[\"version\"].(string)\n\t\tcache_size_mb := \"- Cache size: \"+strconv.FormatFloat(status[\"cache_size_mb\"].(float64), 'f', 2, 64)+\" Mb\"\n\t\ttime_started := line+\"\\nStarted since \"+format.FormatTime(status[\"time_started\"].(time.Time))\n\t\tcommand.ReturnValues = append(command.ReturnValues, version)\n\t\tcommand.ReturnValues = append(command.ReturnValues, cache_size_mb)\n\t\tcommand.ReturnValues = append(command.ReturnValues, time_started)\n\t\tcommand.Status = \"success\"\n\t} else if (command.Name == \"routes\") {\n\t\t\/\/ ROUTES\n\t\troutes, err := db.GetRoutes()\n\t\tif err != nil {\n\t\t\thandleCommandError(command, err, c)\n\t\t\treturn\n\t\t}\n\t\tvar rvs []interface{}\n\t\tfor _, route := range(routes) {\n\t\t\trvs = append(rvs, route)\n\t\t}\n\t\tcommand.ReturnValues = rvs\n\t\tcommand.Status = \"success\"\n\t} else if command.Name == \"state\" {\n\t\t\/\/ STATE\n\t\tnum_args := len(command.Args)\n\t\tif num_args == 0 {\n\t\t\tmsg := state.FormatState()\n\t\t\tvar rvs []interface{}\n\t\t\trvs = append(rvs, msg)\n\t\t\tcommand.ReturnValues = rvs\n\t\t\tcommand.Status = \"success\"\n\t\t} else if num_args > 1 {\n\t\t\tvar rvs []interface{}\n\t\t\tmsg := \"Too many arguments to command state\"\n\t\t\trvs = append(rvs, msg)\n\t\t\tcommand.ReturnValues = rvs\n\t\t\tcommand.Status = \"error\"\n\t\t} else {\n\t\t\tif command.Args[0].(string) == \"db\" {\n\t\t\t\t\/\/ STATE DB\n\t\t\t\tvar rvs []interface{}\n\t\t\t\tmsg := \"Database state:\\n\"\n\t\t\t\tmsg = msg+\" - Pages database is \"+state.Server.PagesDb.Name+\" (\"+state.Server.PagesDb.Type+\") at \"+state.Server.PagesDb.Host+\"\\n\"\n\t\t\t\tmsg = msg+\" - Hits database is \"+state.Server.HitsDb.Name+\" (\"+state.Server.HitsDb.Type+\") at \"+state.Server.HitsDb.Host+\"\\n\"\n\t\t\t\tmsg = msg+\" - Commands database is \"+state.Server.CommandsDb.Name+\" (\"+state.Server.CommandsDb.Type+\") at \"+state.Server.CommandsDb.Host\n\t\t\t\trvs = append(rvs, msg)\n\t\t\t\tcommand.ReturnValues = rvs\n\t\t\t\tcommand.Status = \"success\"\n\t\t\t}\n\t\t}\n\t} else if command.Name == \"set\" {\n\t\t\/\/ SET\n\t\tnum_args := len(command.Args)\n\t\tif num_args == 0 {\n\t\t\tmsg := \"Command set incoming from \"+command.From+\" with no arguments\"\n\t\t\tevents.ErrMsg(\"commands.run\", msg)\n\t\t} else if num_args == 1 {\n\t\t\tmsg := \"Command set incoming from \"+command.From+\" with only one argument: \"+command.Args[0].(string)\n\t\t\tevents.ErrMsg(\"commands.run\", msg)\n\t\t} else if num_args > 2 {\n\t\t\tvar arguments string\n\t\t\tfor _, a := range(command.Args) {\n\t\t\t\targuments = arguments+\" \"+a.(string)\n\t\t\t}\n\t\t\tmsg := \"Command set incoming from \"+command.From+\" with more than two arguments:\"+arguments\n\t\t\tevents.ErrMsg(\"commands.run\", msg)\n\t\t} else {\n\t\t\tcmd := command.Args[0]\n\t\t\targ := command.Args[1]\n\t\t\tif cmd == \"verbosity\" {\n\t\t\t\tmsg := mutate.Verbosity(arg.(string))\n\t\t\t\tvar rvs []interface{}\n\t\t\t\trvs = append(rvs, msg)\n\t\t\t\tcommand.ReturnValues = rvs\n\t\t\t\tcommand.Status = \"success\"\n\t\t\t} else if cmd == \"debug\" {\n\t\t\t\tmsg, err := mutate.Debug(arg.(string))\n\t\t\t\tif err != nil {\n\t\t\t\t\tcommand.Status = \"error\"\n\t\t\t\t\tcommand.Error = err\n\t\t\t\t} else {\n\t\t\t\t\tvar rvs []interface{}\n\t\t\t\t\trvs = append(rvs, msg)\n\t\t\t\t\tcommand.Status = \"success\"\n\t\t\t\t}\n\t\t\t} \n\t\t}\n\t}\n\t\/*else if (command.Name == \"db_list\") {\n\t\tfor role, db := range metadata.GetDatabasesAndRoles() {\n\t\t\tmsg := database.Name\n\t\t\tcommand.ReturnValues = append(command.ReturnValues, )\n\t\t}\n\t\tcommand.Status = \"success\"\n\t}*\/\n\tc <- command\n\t\/*else if (command.Name == \"syncdb\") {\n\t\tgo db.ImportPagesFromMainDb(command.Values.(string))\n\t}*\/\n\t\/\/ save in db\n\t\/\/go db.SaveCommand(command)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package csv_test\n\nimport (\n\t\"fmt\"\n\t\"simonwaldherr.de\/go\/golibs\/csv\"\n\t\"sort\"\n)\n\nvar userdata string = `id;name;email\n0;John Doe;jDoe@example.org\n1;Jane Doe;jane.doe@example.com\n2;Max Mustermann;m.mustermann@alpha.tld`\n\nvar userdata2 string = `id;name;email\n0;John Doe;jDoe@example.org`\n\ntype kv struct {\n\tKey int\n\tValue []string\n}\n\nfunc sorting(m map[int][]string) []kv {\n\tvar ss []kv\n\tfor k, v := range m {\n\t\tss = append(ss, kv{k, v})\n\t}\n\tsort.Slice(ss, func(i, j int) bool {\n\t\treturn ss[i].Value[1] > ss[j].Value[1]\n\t})\n\treturn ss\n}\n\nfunc Example() {\n\tcsvmap, _ := csv.LoadCSVfromString(userdata)\n\tsortedmap := sorting(csvmap)\n\n\tfor _, user := range sortedmap {\n\t\tfmt.Println(string(user.Value[1]))\n\t}\n\n\t\/\/ Output: Max Mustermann\n\t\/\/ John Doe\n\t\/\/ Jane Doe\n}\n\nfunc Example_second() {\n\tcsvmap, _ := csv.LoadCSVfromFile(\".\/test.csv\")\n\tsortedmap := sorting(csvmap)\n\n\tfor _, user := range sortedmap {\n\t\tfmt.Println(string(user.Value[1]))\n\t}\n\n\t\/\/ Output: Max Mustermann\n\t\/\/ John Doe\n\t\/\/ Jane Doe\n}\n\nfunc Example_third() {\n\tcsvmap, k := csv.LoadCSVfromString(userdata2)\n\tfor _, user := range csvmap {\n\t\tfmt.Println(user[k[\"email\"]])\n\t}\n\n\t\/\/ Output: jDoe@example.org\n}\n<commit_msg>change example tests to fix error at semaphoreci.com<commit_after>package csv_test\n\nimport (\n\t\"fmt\"\n\t\"simonwaldherr.de\/go\/golibs\/csv\"\n\t\"sort\"\n)\n\nvar userdata string = `id;name;email\n0;John Doe;jDoe@example.org\n1;Jane Doe;jane.doe@example.com\n2;Max Mustermann;m.mustermann@alpha.tld`\n\nvar userdata2 string = `id;name;email\n0;John Doe;jDoe@example.org`\n\ntype kv struct {\n\tKey int\n\tValue []string\n}\n\nfunc sorting(m map[int][]string) []kv {\n\tvar ss []kv\n\tfor k, v := range m {\n\t\tss = append(ss, kv{k, v})\n\t}\n\tsort.Slice(ss, func(i, j int) bool {\n\t\treturn ss[i].Value[1] > ss[j].Value[1]\n\t})\n\treturn ss\n}\n\nfunc Example() {\n\tcsvmap, k := csv.LoadCSVfromString(userdata)\n\n\tfor _, user := range csvmap {\n\t\tfmt.Println(user[k[\"email\"]])\n\t}\n}\n\nfunc Example_second() {\n\tcsvmap, k := csv.LoadCSVfromFile(\".\/test.csv\")\n\n\tfor _, user := range csvmap {\n\t\tfmt.Println(user[k[\"email\"]])\n\t}\n}\n\nfunc Example_third() {\n\tcsvmap, k := csv.LoadCSVfromString(userdata2)\n\tfor _, user := range csvmap {\n\t\tfmt.Println(user[k[\"email\"]])\n\t}\n\n\t\/\/ Output: jDoe@example.org\n}\n<|endoftext|>"} {"text":"<commit_before>package exec\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/docker\/docker\/daemon\/execdriver\"\n\tderr \"github.com\/docker\/docker\/errors\"\n\t\"github.com\/docker\/docker\/pkg\/stringid\"\n\t\"github.com\/docker\/docker\/runconfig\"\n)\n\n\/\/ Config holds the configurations for execs. The Daemon keeps\n\/\/ track of both running and finished execs so that they can be\n\/\/ examined both during and after completion.\ntype Config struct {\n\tsync.Mutex\n\t*runconfig.StreamConfig\n\tID string\n\tRunning bool\n\tExitCode *int\n\tProcessConfig *execdriver.ProcessConfig\n\tOpenStdin bool\n\tOpenStderr bool\n\tOpenStdout bool\n\tCanRemove bool\n\tContainerID string\n\tDetachKeys []byte\n\n\t\/\/ waitStart will be closed immediately after the exec is really started.\n\twaitStart chan struct{}\n}\n\n\/\/ NewConfig initializes the a new exec configuration\nfunc NewConfig() *Config {\n\treturn &Config{\n\t\tID: stringid.GenerateNonCryptoID(),\n\t\tStreamConfig: runconfig.NewStreamConfig(),\n\t\twaitStart: make(chan struct{}),\n\t}\n}\n\n\/\/ Store keeps track of the exec configurations.\ntype Store struct {\n\tcommands map[string]*Config\n\tsync.RWMutex\n}\n\n\/\/ NewStore initializes a new exec store.\nfunc NewStore() *Store {\n\treturn &Store{commands: make(map[string]*Config, 0)}\n}\n\n\/\/ Commands returns the exec configurations in the store.\nfunc (e *Store) Commands() map[string]*Config {\n\treturn e.commands\n}\n\n\/\/ Add adds a new exec configuration to the store.\nfunc (e *Store) Add(id string, Config *Config) {\n\te.Lock()\n\te.commands[id] = Config\n\te.Unlock()\n}\n\n\/\/ Get returns an exec configuration by its id.\nfunc (e *Store) Get(id string) *Config {\n\te.RLock()\n\tres := e.commands[id]\n\te.RUnlock()\n\treturn res\n}\n\n\/\/ Delete removes an exec configuration from the store.\nfunc (e *Store) Delete(id string) {\n\te.Lock()\n\tdelete(e.commands, id)\n\te.Unlock()\n}\n\n\/\/ List returns the list of exec ids in the store.\nfunc (e *Store) List() []string {\n\tvar IDs []string\n\te.RLock()\n\tfor id := range e.commands {\n\t\tIDs = append(IDs, id)\n\t}\n\te.RUnlock()\n\treturn IDs\n}\n\n\/\/ Wait waits until the exec process finishes or there is an error in the error channel.\nfunc (c *Config) Wait(cErr chan error) error {\n\t\/\/ Exec should not return until the process is actually running\n\tselect {\n\tcase <-c.waitStart:\n\tcase err := <-cErr:\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Close closes the wait channel for the progress.\nfunc (c *Config) Close() {\n\tclose(c.waitStart)\n}\n\n\/\/ Resize changes the size of the terminal for the exec process.\nfunc (c *Config) Resize(h, w int) error {\n\tselect {\n\tcase <-c.waitStart:\n\tcase <-time.After(time.Second):\n\t\treturn derr.ErrorCodeExecResize.WithArgs(c.ID)\n\t}\n\treturn c.ProcessConfig.Terminal.Resize(h, w)\n}\n<commit_msg>Fix race condition in execCommandGC<commit_after>package exec\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/docker\/docker\/daemon\/execdriver\"\n\tderr \"github.com\/docker\/docker\/errors\"\n\t\"github.com\/docker\/docker\/pkg\/stringid\"\n\t\"github.com\/docker\/docker\/runconfig\"\n)\n\n\/\/ Config holds the configurations for execs. The Daemon keeps\n\/\/ track of both running and finished execs so that they can be\n\/\/ examined both during and after completion.\ntype Config struct {\n\tsync.Mutex\n\t*runconfig.StreamConfig\n\tID string\n\tRunning bool\n\tExitCode *int\n\tProcessConfig *execdriver.ProcessConfig\n\tOpenStdin bool\n\tOpenStderr bool\n\tOpenStdout bool\n\tCanRemove bool\n\tContainerID string\n\tDetachKeys []byte\n\n\t\/\/ waitStart will be closed immediately after the exec is really started.\n\twaitStart chan struct{}\n}\n\n\/\/ NewConfig initializes the a new exec configuration\nfunc NewConfig() *Config {\n\treturn &Config{\n\t\tID: stringid.GenerateNonCryptoID(),\n\t\tStreamConfig: runconfig.NewStreamConfig(),\n\t\twaitStart: make(chan struct{}),\n\t}\n}\n\n\/\/ Store keeps track of the exec configurations.\ntype Store struct {\n\tcommands map[string]*Config\n\tsync.RWMutex\n}\n\n\/\/ NewStore initializes a new exec store.\nfunc NewStore() *Store {\n\treturn &Store{commands: make(map[string]*Config, 0)}\n}\n\n\/\/ Commands returns the exec configurations in the store.\nfunc (e *Store) Commands() map[string]*Config {\n\te.RLock()\n\tcommands := make(map[string]*Config, len(e.commands))\n\tfor id, config := range e.commands {\n\t\tcommands[id] = config\n\t}\n\te.RUnlock()\n\treturn commands\n}\n\n\/\/ Add adds a new exec configuration to the store.\nfunc (e *Store) Add(id string, Config *Config) {\n\te.Lock()\n\te.commands[id] = Config\n\te.Unlock()\n}\n\n\/\/ Get returns an exec configuration by its id.\nfunc (e *Store) Get(id string) *Config {\n\te.RLock()\n\tres := e.commands[id]\n\te.RUnlock()\n\treturn res\n}\n\n\/\/ Delete removes an exec configuration from the store.\nfunc (e *Store) Delete(id string) {\n\te.Lock()\n\tdelete(e.commands, id)\n\te.Unlock()\n}\n\n\/\/ List returns the list of exec ids in the store.\nfunc (e *Store) List() []string {\n\tvar IDs []string\n\te.RLock()\n\tfor id := range e.commands {\n\t\tIDs = append(IDs, id)\n\t}\n\te.RUnlock()\n\treturn IDs\n}\n\n\/\/ Wait waits until the exec process finishes or there is an error in the error channel.\nfunc (c *Config) Wait(cErr chan error) error {\n\t\/\/ Exec should not return until the process is actually running\n\tselect {\n\tcase <-c.waitStart:\n\tcase err := <-cErr:\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Close closes the wait channel for the progress.\nfunc (c *Config) Close() {\n\tclose(c.waitStart)\n}\n\n\/\/ Resize changes the size of the terminal for the exec process.\nfunc (c *Config) Resize(h, w int) error {\n\tselect {\n\tcase <-c.waitStart:\n\tcase <-time.After(time.Second):\n\t\treturn derr.ErrorCodeExecResize.WithArgs(c.ID)\n\t}\n\treturn c.ProcessConfig.Terminal.Resize(h, w)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !windows\n\npackage daemon \/\/ import \"github.com\/docker\/docker\/daemon\"\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/docker\/docker\/api\/types\"\n\tcontainertypes \"github.com\/docker\/docker\/api\/types\/container\"\n\t\"github.com\/docker\/docker\/dockerversion\"\n\t\"github.com\/docker\/docker\/pkg\/sysinfo\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ fillPlatformInfo fills the platform related info.\nfunc (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) {\n\tv.CgroupDriver = daemon.getCgroupDriver()\n\tv.CgroupVersion = \"1\"\n\tif sysInfo.CgroupUnified {\n\t\tv.CgroupVersion = \"2\"\n\t}\n\n\tv.MemoryLimit = sysInfo.MemoryLimit\n\tv.SwapLimit = sysInfo.SwapLimit\n\tv.KernelMemory = sysInfo.KernelMemory\n\tv.KernelMemoryTCP = sysInfo.KernelMemoryTCP\n\tv.OomKillDisable = sysInfo.OomKillDisable\n\tv.CPUCfsPeriod = sysInfo.CPUCfs\n\tv.CPUCfsQuota = sysInfo.CPUCfs\n\tv.CPUShares = sysInfo.CPUShares\n\tv.CPUSet = sysInfo.Cpuset\n\tv.PidsLimit = sysInfo.PidsLimit\n\tv.Runtimes = daemon.configStore.GetAllRuntimes()\n\tv.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName()\n\tv.InitBinary = daemon.configStore.GetInitPath()\n\n\tdefaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path\n\tif rv, err := exec.Command(defaultRuntimeBinary, \"--version\").Output(); err == nil {\n\t\tif _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil {\n\t\t\tlogrus.Warnf(\"failed to parse %s version: %v\", defaultRuntimeBinary, err)\n\t\t\tv.RuncCommit.ID = \"N\/A\"\n\t\t} else {\n\t\t\tv.RuncCommit.ID = commit\n\t\t}\n\t} else {\n\t\tlogrus.Warnf(\"failed to retrieve %s version: %v\", defaultRuntimeBinary, err)\n\t\tv.RuncCommit.ID = \"N\/A\"\n\t}\n\n\t\/\/ runc is now shipped as a separate package. Set \"expected\" to same value\n\t\/\/ as \"ID\" to prevent clients from reporting a version-mismatch\n\tv.RuncCommit.Expected = v.RuncCommit.ID\n\n\tif rv, err := daemon.containerd.Version(context.Background()); err == nil {\n\t\tv.ContainerdCommit.ID = rv.Revision\n\t} else {\n\t\tlogrus.Warnf(\"failed to retrieve containerd version: %v\", err)\n\t\tv.ContainerdCommit.ID = \"N\/A\"\n\t}\n\n\t\/\/ containerd is now shipped as a separate package. Set \"expected\" to same\n\t\/\/ value as \"ID\" to prevent clients from reporting a version-mismatch\n\tv.ContainerdCommit.Expected = v.ContainerdCommit.ID\n\n\t\/\/ TODO is there still a need to check the expected version for tini?\n\t\/\/ if not, we can change this, and just set \"Expected\" to v.InitCommit.ID\n\tv.InitCommit.Expected = dockerversion.InitCommitID\n\n\tdefaultInitBinary := daemon.configStore.GetInitPath()\n\tif rv, err := exec.Command(defaultInitBinary, \"--version\").Output(); err == nil {\n\t\tif _, commit, err := parseInitVersion(string(rv)); err != nil {\n\t\t\tlogrus.Warnf(\"failed to parse %s version: %s\", defaultInitBinary, err)\n\t\t\tv.InitCommit.ID = \"N\/A\"\n\t\t} else {\n\t\t\tv.InitCommit.ID = commit\n\t\t\tif len(dockerversion.InitCommitID) > len(commit) {\n\t\t\t\tv.InitCommit.Expected = dockerversion.InitCommitID[0:len(commit)]\n\t\t\t}\n\t\t}\n\t} else {\n\t\tlogrus.Warnf(\"failed to retrieve %s version: %s\", defaultInitBinary, err)\n\t\tv.InitCommit.ID = \"N\/A\"\n\t}\n\n\tif v.CgroupDriver == cgroupNoneDriver {\n\t\tif v.CgroupVersion == \"2\" {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: Running in rootless-mode without cgroups. Systemd is required to enable cgroups in rootless-mode.\")\n\t\t} else {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: Running in rootless-mode without cgroups. To enable cgroups in rootless-mode, you need to boot the system in cgroup v2 mode.\")\n\t\t}\n\t} else {\n\t\tif !v.MemoryLimit {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No memory limit support\")\n\t\t}\n\t\tif !v.SwapLimit {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No swap limit support\")\n\t\t}\n\t\tif !v.KernelMemoryTCP && v.CgroupVersion == \"1\" {\n\t\t\t\/\/ kernel memory is not available for cgroup v2.\n\t\t\t\/\/ Warning is not printed on cgroup v2, because there is no action user can take.\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No kernel memory TCP limit support\")\n\t\t}\n\t\tif !v.OomKillDisable && v.CgroupVersion == \"1\" {\n\t\t\t\/\/ oom kill disable is not available for cgroup v2.\n\t\t\t\/\/ Warning is not printed on cgroup v2, because there is no action user can take.\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No oom kill disable support\")\n\t\t}\n\t\tif !v.CPUCfsQuota {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No cpu cfs quota support\")\n\t\t}\n\t\tif !v.CPUCfsPeriod {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No cpu cfs period support\")\n\t\t}\n\t\tif !v.CPUShares {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No cpu shares support\")\n\t\t}\n\t\tif !v.CPUSet {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No cpuset support\")\n\t\t}\n\t\t\/\/ TODO add fields for these options in types.Info\n\t\tif !sysInfo.BlkioWeight && v.CgroupVersion == \"2\" {\n\t\t\t\/\/ blkio weight is not available on cgroup v1 since kernel 5.0.\n\t\t\t\/\/ Warning is not printed on cgroup v1, because there is no action user can take.\n\t\t\t\/\/ On cgroup v2, blkio weight is implemented using io.weight\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No io.weight support\")\n\t\t}\n\t\tif !sysInfo.BlkioWeightDevice && v.CgroupVersion == \"2\" {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No io.weight (per device) support\")\n\t\t}\n\t\tif !sysInfo.BlkioReadBpsDevice {\n\t\t\tif v.CgroupVersion == \"2\" {\n\t\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No io.max (rbps) support\")\n\t\t\t} else {\n\t\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No blkio throttle.read_bps_device support\")\n\t\t\t}\n\t\t}\n\t\tif !sysInfo.BlkioWriteBpsDevice {\n\t\t\tif v.CgroupVersion == \"2\" {\n\t\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No io.max (wbps) support\")\n\t\t\t} else {\n\t\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No blkio throttle.write_bps_device support\")\n\t\t\t}\n\t\t}\n\t\tif !sysInfo.BlkioReadIOpsDevice {\n\t\t\tif v.CgroupVersion == \"2\" {\n\t\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No io.max (riops) support\")\n\t\t\t} else {\n\t\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No blkio throttle.read_iops_device support\")\n\t\t\t}\n\t\t}\n\t\tif !sysInfo.BlkioWriteIOpsDevice {\n\t\t\tif v.CgroupVersion == \"2\" {\n\t\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No io.max (wiops) support\")\n\t\t\t} else {\n\t\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No blkio throttle.write_iops_device support\")\n\t\t\t}\n\t\t}\n\t}\n\tif !v.IPv4Forwarding {\n\t\tv.Warnings = append(v.Warnings, \"WARNING: IPv4 forwarding is disabled\")\n\t}\n\tif !v.BridgeNfIptables {\n\t\tv.Warnings = append(v.Warnings, \"WARNING: bridge-nf-call-iptables is disabled\")\n\t}\n\tif !v.BridgeNfIP6tables {\n\t\tv.Warnings = append(v.Warnings, \"WARNING: bridge-nf-call-ip6tables is disabled\")\n\t}\n}\n\nfunc (daemon *Daemon) fillPlatformVersion(v *types.Version) {\n\tif rv, err := daemon.containerd.Version(context.Background()); err == nil {\n\t\tv.Components = append(v.Components, types.ComponentVersion{\n\t\t\tName: \"containerd\",\n\t\t\tVersion: rv.Version,\n\t\t\tDetails: map[string]string{\n\t\t\t\t\"GitCommit\": rv.Revision,\n\t\t\t},\n\t\t})\n\t}\n\n\tdefaultRuntime := daemon.configStore.GetDefaultRuntimeName()\n\tdefaultRuntimeBinary := daemon.configStore.GetRuntime(defaultRuntime).Path\n\tif rv, err := exec.Command(defaultRuntimeBinary, \"--version\").Output(); err == nil {\n\t\tif _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil {\n\t\t\tlogrus.Warnf(\"failed to parse %s version: %v\", defaultRuntimeBinary, err)\n\t\t} else {\n\t\t\tv.Components = append(v.Components, types.ComponentVersion{\n\t\t\t\tName: defaultRuntime,\n\t\t\t\tVersion: ver,\n\t\t\t\tDetails: map[string]string{\n\t\t\t\t\t\"GitCommit\": commit,\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t} else {\n\t\tlogrus.Warnf(\"failed to retrieve %s version: %v\", defaultRuntimeBinary, err)\n\t}\n\n\tdefaultInitBinary := daemon.configStore.GetInitPath()\n\tif rv, err := exec.Command(defaultInitBinary, \"--version\").Output(); err == nil {\n\t\tif ver, commit, err := parseInitVersion(string(rv)); err != nil {\n\t\t\tlogrus.Warnf(\"failed to parse %s version: %s\", defaultInitBinary, err)\n\t\t} else {\n\t\t\tv.Components = append(v.Components, types.ComponentVersion{\n\t\t\t\tName: filepath.Base(defaultInitBinary),\n\t\t\t\tVersion: ver,\n\t\t\t\tDetails: map[string]string{\n\t\t\t\t\t\"GitCommit\": commit,\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t} else {\n\t\tlogrus.Warnf(\"failed to retrieve %s version: %s\", defaultInitBinary, err)\n\t}\n}\n\nfunc fillDriverWarnings(v *types.Info) {\n\tfor _, pair := range v.DriverStatus {\n\t\tif pair[0] == \"Data loop file\" {\n\t\t\tmsg := fmt.Sprintf(\"WARNING: %s: usage of loopback devices is \"+\n\t\t\t\t\"strongly discouraged for production use.\\n \"+\n\t\t\t\t\"Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.\", v.Driver)\n\n\t\t\tv.Warnings = append(v.Warnings, msg)\n\t\t\tcontinue\n\t\t}\n\t\tif pair[0] == \"Supports d_type\" && pair[1] == \"false\" {\n\t\t\tbackingFs := getBackingFs(v)\n\n\t\t\tmsg := fmt.Sprintf(\"WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\\n\", v.Driver, backingFs)\n\t\t\tif backingFs == \"xfs\" {\n\t\t\t\tmsg += \" Reformat the filesystem with ftype=1 to enable d_type support.\\n\"\n\t\t\t}\n\t\t\tmsg += \" Running without d_type support will not be supported in future releases.\"\n\n\t\t\tv.Warnings = append(v.Warnings, msg)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc getBackingFs(v *types.Info) string {\n\tfor _, pair := range v.DriverStatus {\n\t\tif pair[0] == \"Backing Filesystem\" {\n\t\t\treturn pair[1]\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ parseInitVersion parses a Tini version string, and extracts the \"version\"\n\/\/ and \"git commit\" from the output.\n\/\/\n\/\/ Output example from `docker-init --version`:\n\/\/\n\/\/ tini version 0.18.0 - git.fec3683\nfunc parseInitVersion(v string) (version string, commit string, err error) {\n\tparts := strings.Split(v, \" - \")\n\n\tif len(parts) >= 2 {\n\t\tgitParts := strings.Split(strings.TrimSpace(parts[1]), \".\")\n\t\tif len(gitParts) == 2 && gitParts[0] == \"git\" {\n\t\t\tcommit = gitParts[1]\n\t\t}\n\t}\n\tparts[0] = strings.TrimSpace(parts[0])\n\tif strings.HasPrefix(parts[0], \"tini version \") {\n\t\tversion = strings.TrimPrefix(parts[0], \"tini version \")\n\t}\n\tif version == \"\" && commit == \"\" {\n\t\terr = errors.Errorf(\"unknown output format: %s\", v)\n\t}\n\treturn version, commit, err\n}\n\n\/\/ parseRuntimeVersion parses the output of `[runtime] --version` and extracts the\n\/\/ \"name\", \"version\" and \"git commit\" from the output.\n\/\/\n\/\/ Output example from `runc --version`:\n\/\/\n\/\/ runc version 1.0.0-rc5+dev\n\/\/ commit: 69663f0bd4b60df09991c08812a60108003fa340\n\/\/ spec: 1.0.0\nfunc parseRuntimeVersion(v string) (runtime string, version string, commit string, err error) {\n\tlines := strings.Split(strings.TrimSpace(v), \"\\n\")\n\tfor _, line := range lines {\n\t\tif strings.Contains(line, \"version\") {\n\t\t\ts := strings.Split(line, \"version\")\n\t\t\truntime = strings.TrimSpace(s[0])\n\t\t\tversion = strings.TrimSpace(s[len(s)-1])\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(line, \"commit:\") {\n\t\t\tcommit = strings.TrimSpace(strings.TrimPrefix(line, \"commit:\"))\n\t\t\tcontinue\n\t\t}\n\t}\n\tif version == \"\" && commit == \"\" {\n\t\terr = errors.Errorf(\"unknown output format: %s\", v)\n\t}\n\treturn runtime, version, commit, err\n}\n\nfunc (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool {\n\treturn sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode).IsPrivate()\n}\n\n\/\/ Rootless returns true if daemon is running in rootless mode\nfunc (daemon *Daemon) Rootless() bool {\n\treturn daemon.configStore.Rootless\n}\n<commit_msg>info: unset cgroup-related fields when CgroupDriver == none<commit_after>\/\/ +build !windows\n\npackage daemon \/\/ import \"github.com\/docker\/docker\/daemon\"\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/docker\/docker\/api\/types\"\n\tcontainertypes \"github.com\/docker\/docker\/api\/types\/container\"\n\t\"github.com\/docker\/docker\/dockerversion\"\n\t\"github.com\/docker\/docker\/pkg\/sysinfo\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ fillPlatformInfo fills the platform related info.\nfunc (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) {\n\tv.CgroupDriver = daemon.getCgroupDriver()\n\tv.CgroupVersion = \"1\"\n\tif sysInfo.CgroupUnified {\n\t\tv.CgroupVersion = \"2\"\n\t}\n\n\tif v.CgroupDriver != cgroupNoneDriver {\n\t\tv.MemoryLimit = sysInfo.MemoryLimit\n\t\tv.SwapLimit = sysInfo.SwapLimit\n\t\tv.KernelMemory = sysInfo.KernelMemory\n\t\tv.KernelMemoryTCP = sysInfo.KernelMemoryTCP\n\t\tv.OomKillDisable = sysInfo.OomKillDisable\n\t\tv.CPUCfsPeriod = sysInfo.CPUCfs\n\t\tv.CPUCfsQuota = sysInfo.CPUCfs\n\t\tv.CPUShares = sysInfo.CPUShares\n\t\tv.CPUSet = sysInfo.Cpuset\n\t\tv.PidsLimit = sysInfo.PidsLimit\n\t}\n\tv.Runtimes = daemon.configStore.GetAllRuntimes()\n\tv.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName()\n\tv.InitBinary = daemon.configStore.GetInitPath()\n\n\tdefaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path\n\tif rv, err := exec.Command(defaultRuntimeBinary, \"--version\").Output(); err == nil {\n\t\tif _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil {\n\t\t\tlogrus.Warnf(\"failed to parse %s version: %v\", defaultRuntimeBinary, err)\n\t\t\tv.RuncCommit.ID = \"N\/A\"\n\t\t} else {\n\t\t\tv.RuncCommit.ID = commit\n\t\t}\n\t} else {\n\t\tlogrus.Warnf(\"failed to retrieve %s version: %v\", defaultRuntimeBinary, err)\n\t\tv.RuncCommit.ID = \"N\/A\"\n\t}\n\n\t\/\/ runc is now shipped as a separate package. Set \"expected\" to same value\n\t\/\/ as \"ID\" to prevent clients from reporting a version-mismatch\n\tv.RuncCommit.Expected = v.RuncCommit.ID\n\n\tif rv, err := daemon.containerd.Version(context.Background()); err == nil {\n\t\tv.ContainerdCommit.ID = rv.Revision\n\t} else {\n\t\tlogrus.Warnf(\"failed to retrieve containerd version: %v\", err)\n\t\tv.ContainerdCommit.ID = \"N\/A\"\n\t}\n\n\t\/\/ containerd is now shipped as a separate package. Set \"expected\" to same\n\t\/\/ value as \"ID\" to prevent clients from reporting a version-mismatch\n\tv.ContainerdCommit.Expected = v.ContainerdCommit.ID\n\n\t\/\/ TODO is there still a need to check the expected version for tini?\n\t\/\/ if not, we can change this, and just set \"Expected\" to v.InitCommit.ID\n\tv.InitCommit.Expected = dockerversion.InitCommitID\n\n\tdefaultInitBinary := daemon.configStore.GetInitPath()\n\tif rv, err := exec.Command(defaultInitBinary, \"--version\").Output(); err == nil {\n\t\tif _, commit, err := parseInitVersion(string(rv)); err != nil {\n\t\t\tlogrus.Warnf(\"failed to parse %s version: %s\", defaultInitBinary, err)\n\t\t\tv.InitCommit.ID = \"N\/A\"\n\t\t} else {\n\t\t\tv.InitCommit.ID = commit\n\t\t\tif len(dockerversion.InitCommitID) > len(commit) {\n\t\t\t\tv.InitCommit.Expected = dockerversion.InitCommitID[0:len(commit)]\n\t\t\t}\n\t\t}\n\t} else {\n\t\tlogrus.Warnf(\"failed to retrieve %s version: %s\", defaultInitBinary, err)\n\t\tv.InitCommit.ID = \"N\/A\"\n\t}\n\n\tif v.CgroupDriver == cgroupNoneDriver {\n\t\tif v.CgroupVersion == \"2\" {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: Running in rootless-mode without cgroups. Systemd is required to enable cgroups in rootless-mode.\")\n\t\t} else {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: Running in rootless-mode without cgroups. To enable cgroups in rootless-mode, you need to boot the system in cgroup v2 mode.\")\n\t\t}\n\t} else {\n\t\tif !v.MemoryLimit {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No memory limit support\")\n\t\t}\n\t\tif !v.SwapLimit {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No swap limit support\")\n\t\t}\n\t\tif !v.KernelMemoryTCP && v.CgroupVersion == \"1\" {\n\t\t\t\/\/ kernel memory is not available for cgroup v2.\n\t\t\t\/\/ Warning is not printed on cgroup v2, because there is no action user can take.\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No kernel memory TCP limit support\")\n\t\t}\n\t\tif !v.OomKillDisable && v.CgroupVersion == \"1\" {\n\t\t\t\/\/ oom kill disable is not available for cgroup v2.\n\t\t\t\/\/ Warning is not printed on cgroup v2, because there is no action user can take.\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No oom kill disable support\")\n\t\t}\n\t\tif !v.CPUCfsQuota {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No cpu cfs quota support\")\n\t\t}\n\t\tif !v.CPUCfsPeriod {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No cpu cfs period support\")\n\t\t}\n\t\tif !v.CPUShares {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No cpu shares support\")\n\t\t}\n\t\tif !v.CPUSet {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No cpuset support\")\n\t\t}\n\t\t\/\/ TODO add fields for these options in types.Info\n\t\tif !sysInfo.BlkioWeight && v.CgroupVersion == \"2\" {\n\t\t\t\/\/ blkio weight is not available on cgroup v1 since kernel 5.0.\n\t\t\t\/\/ Warning is not printed on cgroup v1, because there is no action user can take.\n\t\t\t\/\/ On cgroup v2, blkio weight is implemented using io.weight\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No io.weight support\")\n\t\t}\n\t\tif !sysInfo.BlkioWeightDevice && v.CgroupVersion == \"2\" {\n\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No io.weight (per device) support\")\n\t\t}\n\t\tif !sysInfo.BlkioReadBpsDevice {\n\t\t\tif v.CgroupVersion == \"2\" {\n\t\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No io.max (rbps) support\")\n\t\t\t} else {\n\t\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No blkio throttle.read_bps_device support\")\n\t\t\t}\n\t\t}\n\t\tif !sysInfo.BlkioWriteBpsDevice {\n\t\t\tif v.CgroupVersion == \"2\" {\n\t\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No io.max (wbps) support\")\n\t\t\t} else {\n\t\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No blkio throttle.write_bps_device support\")\n\t\t\t}\n\t\t}\n\t\tif !sysInfo.BlkioReadIOpsDevice {\n\t\t\tif v.CgroupVersion == \"2\" {\n\t\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No io.max (riops) support\")\n\t\t\t} else {\n\t\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No blkio throttle.read_iops_device support\")\n\t\t\t}\n\t\t}\n\t\tif !sysInfo.BlkioWriteIOpsDevice {\n\t\t\tif v.CgroupVersion == \"2\" {\n\t\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No io.max (wiops) support\")\n\t\t\t} else {\n\t\t\t\tv.Warnings = append(v.Warnings, \"WARNING: No blkio throttle.write_iops_device support\")\n\t\t\t}\n\t\t}\n\t}\n\tif !v.IPv4Forwarding {\n\t\tv.Warnings = append(v.Warnings, \"WARNING: IPv4 forwarding is disabled\")\n\t}\n\tif !v.BridgeNfIptables {\n\t\tv.Warnings = append(v.Warnings, \"WARNING: bridge-nf-call-iptables is disabled\")\n\t}\n\tif !v.BridgeNfIP6tables {\n\t\tv.Warnings = append(v.Warnings, \"WARNING: bridge-nf-call-ip6tables is disabled\")\n\t}\n}\n\nfunc (daemon *Daemon) fillPlatformVersion(v *types.Version) {\n\tif rv, err := daemon.containerd.Version(context.Background()); err == nil {\n\t\tv.Components = append(v.Components, types.ComponentVersion{\n\t\t\tName: \"containerd\",\n\t\t\tVersion: rv.Version,\n\t\t\tDetails: map[string]string{\n\t\t\t\t\"GitCommit\": rv.Revision,\n\t\t\t},\n\t\t})\n\t}\n\n\tdefaultRuntime := daemon.configStore.GetDefaultRuntimeName()\n\tdefaultRuntimeBinary := daemon.configStore.GetRuntime(defaultRuntime).Path\n\tif rv, err := exec.Command(defaultRuntimeBinary, \"--version\").Output(); err == nil {\n\t\tif _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil {\n\t\t\tlogrus.Warnf(\"failed to parse %s version: %v\", defaultRuntimeBinary, err)\n\t\t} else {\n\t\t\tv.Components = append(v.Components, types.ComponentVersion{\n\t\t\t\tName: defaultRuntime,\n\t\t\t\tVersion: ver,\n\t\t\t\tDetails: map[string]string{\n\t\t\t\t\t\"GitCommit\": commit,\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t} else {\n\t\tlogrus.Warnf(\"failed to retrieve %s version: %v\", defaultRuntimeBinary, err)\n\t}\n\n\tdefaultInitBinary := daemon.configStore.GetInitPath()\n\tif rv, err := exec.Command(defaultInitBinary, \"--version\").Output(); err == nil {\n\t\tif ver, commit, err := parseInitVersion(string(rv)); err != nil {\n\t\t\tlogrus.Warnf(\"failed to parse %s version: %s\", defaultInitBinary, err)\n\t\t} else {\n\t\t\tv.Components = append(v.Components, types.ComponentVersion{\n\t\t\t\tName: filepath.Base(defaultInitBinary),\n\t\t\t\tVersion: ver,\n\t\t\t\tDetails: map[string]string{\n\t\t\t\t\t\"GitCommit\": commit,\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t} else {\n\t\tlogrus.Warnf(\"failed to retrieve %s version: %s\", defaultInitBinary, err)\n\t}\n}\n\nfunc fillDriverWarnings(v *types.Info) {\n\tfor _, pair := range v.DriverStatus {\n\t\tif pair[0] == \"Data loop file\" {\n\t\t\tmsg := fmt.Sprintf(\"WARNING: %s: usage of loopback devices is \"+\n\t\t\t\t\"strongly discouraged for production use.\\n \"+\n\t\t\t\t\"Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.\", v.Driver)\n\n\t\t\tv.Warnings = append(v.Warnings, msg)\n\t\t\tcontinue\n\t\t}\n\t\tif pair[0] == \"Supports d_type\" && pair[1] == \"false\" {\n\t\t\tbackingFs := getBackingFs(v)\n\n\t\t\tmsg := fmt.Sprintf(\"WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\\n\", v.Driver, backingFs)\n\t\t\tif backingFs == \"xfs\" {\n\t\t\t\tmsg += \" Reformat the filesystem with ftype=1 to enable d_type support.\\n\"\n\t\t\t}\n\t\t\tmsg += \" Running without d_type support will not be supported in future releases.\"\n\n\t\t\tv.Warnings = append(v.Warnings, msg)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc getBackingFs(v *types.Info) string {\n\tfor _, pair := range v.DriverStatus {\n\t\tif pair[0] == \"Backing Filesystem\" {\n\t\t\treturn pair[1]\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ parseInitVersion parses a Tini version string, and extracts the \"version\"\n\/\/ and \"git commit\" from the output.\n\/\/\n\/\/ Output example from `docker-init --version`:\n\/\/\n\/\/ tini version 0.18.0 - git.fec3683\nfunc parseInitVersion(v string) (version string, commit string, err error) {\n\tparts := strings.Split(v, \" - \")\n\n\tif len(parts) >= 2 {\n\t\tgitParts := strings.Split(strings.TrimSpace(parts[1]), \".\")\n\t\tif len(gitParts) == 2 && gitParts[0] == \"git\" {\n\t\t\tcommit = gitParts[1]\n\t\t}\n\t}\n\tparts[0] = strings.TrimSpace(parts[0])\n\tif strings.HasPrefix(parts[0], \"tini version \") {\n\t\tversion = strings.TrimPrefix(parts[0], \"tini version \")\n\t}\n\tif version == \"\" && commit == \"\" {\n\t\terr = errors.Errorf(\"unknown output format: %s\", v)\n\t}\n\treturn version, commit, err\n}\n\n\/\/ parseRuntimeVersion parses the output of `[runtime] --version` and extracts the\n\/\/ \"name\", \"version\" and \"git commit\" from the output.\n\/\/\n\/\/ Output example from `runc --version`:\n\/\/\n\/\/ runc version 1.0.0-rc5+dev\n\/\/ commit: 69663f0bd4b60df09991c08812a60108003fa340\n\/\/ spec: 1.0.0\nfunc parseRuntimeVersion(v string) (runtime string, version string, commit string, err error) {\n\tlines := strings.Split(strings.TrimSpace(v), \"\\n\")\n\tfor _, line := range lines {\n\t\tif strings.Contains(line, \"version\") {\n\t\t\ts := strings.Split(line, \"version\")\n\t\t\truntime = strings.TrimSpace(s[0])\n\t\t\tversion = strings.TrimSpace(s[len(s)-1])\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(line, \"commit:\") {\n\t\t\tcommit = strings.TrimSpace(strings.TrimPrefix(line, \"commit:\"))\n\t\t\tcontinue\n\t\t}\n\t}\n\tif version == \"\" && commit == \"\" {\n\t\terr = errors.Errorf(\"unknown output format: %s\", v)\n\t}\n\treturn runtime, version, commit, err\n}\n\nfunc (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool {\n\treturn sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode).IsPrivate()\n}\n\n\/\/ Rootless returns true if daemon is running in rootless mode\nfunc (daemon *Daemon) Rootless() bool {\n\treturn daemon.configStore.Rootless\n}\n<|endoftext|>"} {"text":"<commit_before>package sockd\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\tpseudoRand \"math\/rand\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/HouzuoGuo\/laitos\/daemon\/common\"\n\t\"github.com\/HouzuoGuo\/laitos\/daemon\/dnsd\"\n\t\"github.com\/HouzuoGuo\/laitos\/lalog\"\n\t\"github.com\/HouzuoGuo\/laitos\/misc\"\n)\n\nfunc WriteRand(conn net.Conn) {\n\trandBytesWritten := 0\n\tfor i := 0; i < RandNum(0, 1, 2); i++ {\n\t\trandBuf := make([]byte, RandNum(70, 120, 200))\n\t\tif _, err := pseudoRand.Read(randBuf); err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif err := conn.SetWriteDeadline(time.Now().Add(time.Duration(RandNum(330, 540, 880)) * time.Millisecond)); err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif n, err := conn.Write(randBuf); err != nil && !strings.Contains(err.Error(), \"closed\") && !strings.Contains(err.Error(), \"broken\") {\n\t\t\tbreak\n\t\t} else {\n\t\t\trandBytesWritten += n\n\t\t}\n\t}\n\tif pseudoRand.Intn(100) < 3 {\n\t\tlalog.DefaultLogger.Info(\"sockd.TCP.WriteRand\", conn.RemoteAddr().String(), nil, \"wrote %d rand bytes\", randBytesWritten)\n\t}\n}\n\nfunc TweakTCPConnection(conn *net.TCPConn) {\n\t_ = conn.SetNoDelay(true)\n\t_ = conn.SetKeepAlive(true)\n\t_ = conn.SetKeepAlivePeriod(60 * time.Second)\n}\n\nfunc PipeTCPConnection(fromConn, toConn net.Conn, doWriteRand bool) {\n\tdefer func() {\n\t\t_ = toConn.Close()\n\t}()\n\tbuf := make([]byte, MaxPacketSize)\n\tfor {\n\t\tif misc.EmergencyLockDown {\n\t\t\tlalog.DefaultLogger.Warning(\"PipeTCPConnection\", \"\", misc.ErrEmergencyLockDown, \"\")\n\t\t\treturn\n\t\t} else if err := fromConn.SetReadDeadline(time.Now().Add(IOTimeoutSec * time.Second)); err != nil {\n\t\t\treturn\n\t\t}\n\t\tlength, err := fromConn.Read(buf)\n\t\tif length > 0 {\n\t\t\tif err := toConn.SetWriteDeadline(time.Now().Add(IOTimeoutSec * time.Second)); err != nil {\n\t\t\t\treturn\n\t\t\t} else if _, err := toConn.Write(buf[:length]); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tif doWriteRand {\n\t\t\t\tWriteRand(fromConn)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}\n\ntype TCPDaemon struct {\n\tAddress string `json:\"Address\"`\n\tPassword string `json:\"Password\"`\n\tPerIPLimit int `json:\"PerIPLimit\"`\n\tTCPPort int `json:\"TCPPort\"`\n\n\tDNSDaemon *dnsd.Daemon `json:\"-\"` \/\/ it is assumed to be already initialised\n\n\tcipher *Cipher\n\ttcpServer *common.TCPServer\n}\n\nfunc (daemon *TCPDaemon) Initialise() error {\n\tdaemon.cipher = &Cipher{}\n\tdaemon.cipher.Initialise(daemon.Password)\n\tdaemon.tcpServer = &common.TCPServer{\n\t\tListenAddr: daemon.Address,\n\t\tListenPort: daemon.TCPPort,\n\t\tAppName: \"sockd\",\n\t\tApp: daemon,\n\t\tLimitPerSec: daemon.PerIPLimit,\n\t}\n\tdaemon.tcpServer.Initialise()\n\treturn nil\n}\n\nfunc (daemon *TCPDaemon) GetTCPStatsCollector() *misc.Stats {\n\treturn common.SOCKDStatsTCP\n}\n\nfunc (daemon *TCPDaemon) HandleTCPConnection(logger lalog.Logger, ip string, client *net.TCPConn) {\n\tNewTCPCipherConnection(daemon, client, daemon.cipher.Copy(), logger).HandleTCPConnection()\n}\n\nfunc (daemon *TCPDaemon) StartAndBlock() error {\n\treturn daemon.tcpServer.StartAndBlock()\n}\n\nfunc (daemon *TCPDaemon) Stop() {\n\tdaemon.tcpServer.Stop()\n}\n\ntype TCPCipherConnection struct {\n\tnet.Conn\n\t*Cipher\n\tdaemon *TCPDaemon\n\tmutex sync.Mutex\n\treadBuf, writeBuf []byte\n\tlogger lalog.Logger\n}\n\nfunc NewTCPCipherConnection(daemon *TCPDaemon, netConn net.Conn, cip *Cipher, logger lalog.Logger) *TCPCipherConnection {\n\treturn &TCPCipherConnection{\n\t\tConn: netConn,\n\t\tdaemon: daemon,\n\t\tCipher: cip,\n\t\treadBuf: make([]byte, MaxPacketSize),\n\t\twriteBuf: make([]byte, MaxPacketSize),\n\t\tlogger: logger,\n\t}\n}\n\nfunc (conn *TCPCipherConnection) Close() error {\n\treturn conn.Conn.Close()\n}\n\nfunc (conn *TCPCipherConnection) Read(b []byte) (n int, err error) {\n\tif conn.DecryptionStream == nil {\n\t\tiv := make([]byte, conn.IVLength)\n\t\tif _, err = io.ReadFull(conn.Conn, iv); err != nil {\n\t\t\treturn\n\t\t}\n\t\tconn.InitDecryptionStream(iv)\n\t\tif len(conn.IV) == 0 {\n\t\t\tconn.IV = iv\n\t\t}\n\t}\n\n\tcipherData := conn.readBuf\n\tif len(b) > len(cipherData) {\n\t\tcipherData = make([]byte, len(b))\n\t} else {\n\t\tcipherData = cipherData[:len(b)]\n\t}\n\n\tn, err = conn.Conn.Read(cipherData)\n\tif n > 0 {\n\t\tconn.Decrypt(b[0:n], cipherData[0:n])\n\t}\n\treturn\n}\n\nfunc (conn *TCPCipherConnection) Write(buf []byte) (n int, err error) {\n\tconn.mutex.Lock()\n\tbufSize := len(buf)\n\theaderLen := len(buf) - bufSize\n\n\tvar iv []byte\n\tif conn.EncryptionStream == nil {\n\t\tiv = conn.InitEncryptionStream()\n\t}\n\n\tcipherData := conn.writeBuf\n\tdataSize := len(buf) + len(iv)\n\tif dataSize > len(cipherData) {\n\t\tcipherData = make([]byte, dataSize)\n\t} else {\n\t\tcipherData = cipherData[:dataSize]\n\t}\n\n\tif iv != nil {\n\t\tcopy(cipherData, iv)\n\t}\n\n\tconn.Encrypt(cipherData[len(iv):], buf)\n\tn, err = conn.Conn.Write(cipherData)\n\n\tif n >= headerLen {\n\t\tn -= headerLen\n\t}\n\tconn.mutex.Unlock()\n\treturn\n}\n\nfunc (conn *TCPCipherConnection) ParseRequest() (destIP net.IP, destNoPort, destWithPort string, err error) {\n\tif err = conn.SetReadDeadline(time.Now().Add(IOTimeoutSec * time.Second)); err != nil {\n\t\tconn.logger.MaybeMinorError(err)\n\t\treturn\n\t}\n\n\tbuf := make([]byte, 269)\n\tif _, err = io.ReadFull(conn, buf[:AddressTypeIndex+1]); err != nil {\n\t\treturn\n\t}\n\n\tvar reqStart, reqEnd int\n\taddrType := buf[AddressTypeIndex]\n\tmaskedType := addrType & AddressTypeMask\n\tswitch maskedType {\n\tcase AddressTypeIPv4:\n\t\treqStart, reqEnd = IPPacketIndex, IPPacketIndex+IPv4PacketLength\n\tcase AddressTypeIPv6:\n\t\treqStart, reqEnd = IPPacketIndex, IPPacketIndex+IPv6PacketLength\n\tcase AddressTypeDM:\n\t\tif _, err = io.ReadFull(conn, buf[AddressTypeIndex+1:DMAddrLengthIndex+1]); err != nil {\n\t\t\treturn\n\t\t}\n\t\treqStart, reqEnd = DMAddrIndex, DMAddrIndex+int(buf[DMAddrLengthIndex])+DMAddrHeaderLength\n\tdefault:\n\t\terr = fmt.Errorf(\"TCPCipherConnection.ParseRequest: unknown mask type %d\", maskedType)\n\t\treturn\n\t}\n\n\tif _, err = io.ReadFull(conn, buf[reqStart:reqEnd]); err != nil {\n\t\treturn\n\t}\n\tport := binary.BigEndian.Uint16(buf[reqEnd-2 : reqEnd])\n\tif port < 1 {\n\t\terr = fmt.Errorf(\"TCPCipherConnection.ParseRequest: invalid destination port %d\", port)\n\t\treturn\n\t}\n\n\tswitch maskedType {\n\tcase AddressTypeIPv4:\n\t\tdestIP = buf[IPPacketIndex : IPPacketIndex+net.IPv4len]\n\t\tdestNoPort = destIP.String()\n\t\tdestWithPort = net.JoinHostPort(destIP.String(), strconv.Itoa(int(port)))\n\tcase AddressTypeIPv6:\n\t\tdestIP = buf[IPPacketIndex : IPPacketIndex+net.IPv6len]\n\t\tdestNoPort = destIP.String()\n\t\tdestWithPort = net.JoinHostPort(destIP.String(), strconv.Itoa(int(port)))\n\tcase AddressTypeDM:\n\t\tdest := string(buf[DMAddrIndex : DMAddrIndex+int(buf[DMAddrLengthIndex])])\n\t\tdestNoPort = dest\n\t\tdestIP = net.ParseIP(dest)\n\t\tdestWithPort = net.JoinHostPort(dest, strconv.Itoa(int(port)))\n\t}\n\tif strings.ContainsRune(destNoPort, 0) || strings.ContainsRune(destWithPort, 0) {\n\t\terr = fmt.Errorf(\"TCPCipherConnection.ParseRequest: destination must not contain NULL byte\")\n\t}\n\treturn\n}\n\nfunc (conn *TCPCipherConnection) WriteRandAndClose() {\n\tdefer func() {\n\t\t_ = conn.Close()\n\t}()\n\trandBuf := make([]byte, RandNum(20, 70, 200))\n\t_, err := rand.Read(randBuf)\n\tif err != nil {\n\t\tconn.logger.Warning(\"WriteRandAndClose\", conn.Conn.RemoteAddr().String(), err, \"failed to get random bytes\")\n\t\treturn\n\t}\n\tif err := conn.SetWriteDeadline(time.Now().Add(IOTimeoutSec * time.Second)); err != nil {\n\t\tconn.logger.Warning(\"WriteRandAndClose\", conn.Conn.RemoteAddr().String(), err, \"failed to write random bytes\")\n\t\treturn\n\t}\n\tif _, err := conn.Write(randBuf); err != nil {\n\t\tconn.logger.Warning(\"WriteRandAndClose\", conn.Conn.RemoteAddr().String(), err, \"failed to write random bytes\")\n\t\treturn\n\t}\n}\n\nfunc (conn *TCPCipherConnection) HandleTCPConnection() {\n\tremoteAddr := conn.RemoteAddr().String()\n\tdestIP, destNoPort, destWithPort, err := conn.ParseRequest()\n\tif err != nil {\n\t\tconn.logger.Warning(\"HandleTCPConnection\", remoteAddr, err, \"failed to get destination address\")\n\t\tconn.WriteRandAndClose()\n\t\treturn\n\t}\n\tif strings.ContainsRune(destWithPort, 0) {\n\t\tconn.logger.Warning(\"HandleTCPConnection\", remoteAddr, nil, \"will not serve invalid destination address with 0 in it\")\n\t\tconn.WriteRandAndClose()\n\t\treturn\n\t}\n\tif destIP != nil && IsReservedAddr(destIP) {\n\t\tconn.logger.Info(\"HandleTCPConnection\", remoteAddr, nil, \"will not serve reserved address %s\", destNoPort)\n\t\t_ = conn.Close()\n\t\treturn\n\t}\n\tif conn.daemon.DNSDaemon.IsInBlacklist(destNoPort) {\n\t\tconn.logger.Info(\"HandleTCPConnection\", remoteAddr, nil, \"will not serve blacklisted address %s\", destNoPort)\n\t\t_ = conn.Close()\n\t\treturn\n\t}\n\tdest, err := net.DialTimeout(\"tcp\", destWithPort, IOTimeoutSec*time.Second)\n\tif err != nil {\n\t\tconn.logger.Warning(\"HandleTCPConnection\", remoteAddr, err, \"failed to connect to destination \\\"%s\\\"\", destWithPort)\n\t\t_ = conn.Close()\n\t\treturn\n\t}\n\tTweakTCPConnection(conn.Conn.(*net.TCPConn))\n\tTweakTCPConnection(dest.(*net.TCPConn))\n\tgo PipeTCPConnection(conn, dest, true)\n\tPipeTCPConnection(dest, conn, false)\n}\n<commit_msg>tweak socket pipe behaviours in sockd<commit_after>package sockd\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\tpseudoRand \"math\/rand\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/HouzuoGuo\/laitos\/daemon\/common\"\n\t\"github.com\/HouzuoGuo\/laitos\/daemon\/dnsd\"\n\t\"github.com\/HouzuoGuo\/laitos\/lalog\"\n\t\"github.com\/HouzuoGuo\/laitos\/misc\"\n)\n\n\/\/ WriteRand writes up to 5 packets of random data to the connection, each packet contains up to 600 bytes of data.\nfunc WriteRand(conn net.Conn) {\n\trandBytesWritten := 0\n\tfor i := 0; i < RandNum(1, 2, 5); i++ {\n\t\trandBuf := make([]byte, RandNum(80, 210, 550))\n\t\tif _, err := pseudoRand.Read(randBuf); err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif err := conn.SetWriteDeadline(time.Now().Add(time.Duration(RandNum(890, 1440, 2330)) * time.Millisecond)); err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif n, err := conn.Write(randBuf); err != nil && !strings.Contains(err.Error(), \"closed\") && !strings.Contains(err.Error(), \"broken\") {\n\t\t\tbreak\n\t\t} else {\n\t\t\trandBytesWritten += n\n\t\t}\n\t}\n\tif pseudoRand.Intn(100) < 3 {\n\t\tlalog.DefaultLogger.Info(\"sockd.TCP.WriteRand\", conn.RemoteAddr().String(), nil, \"wrote %d rand bytes\", randBytesWritten)\n\t}\n}\n\n\/\/ TweakTCPConnection tweaks the TCP connection settings for improved responsiveness.\nfunc TweakTCPConnection(conn *net.TCPConn) {\n\t_ = conn.SetNoDelay(true)\n\t_ = conn.SetKeepAlive(true)\n\t_ = conn.SetKeepAlivePeriod(60 * time.Second)\n\t_ = conn.SetDeadline(time.Now().Add(time.Duration(IOTimeoutSec * time.Second)))\n\t_ = conn.SetLinger(5)\n}\n\n\/*\nReadWithRetry makes at most 5 attempts at reading incoming data from the connection.\nIf data is partially read before an IO error occurs, then the connection will be closed.\n*\/\nfunc ReadWithRetry(conn net.Conn, buf []byte) (n int, err error) {\n\tattempts := 0\n\tfor ; attempts < 5; attempts++ {\n\t\tif err = conn.SetReadDeadline(time.Now().Add(IOTimeoutSec * time.Second)); err == nil {\n\t\t\tif n, err = conn.Read(buf); err == nil {\n\t\t\t\tbreak\n\t\t\t} else if n > 0 {\n\t\t\t\t\/\/ IO error occurred after data is partially read, the data stream is now broken.\n\t\t\t\t_ = conn.Close()\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t\/\/ Sleep couple of seconds in between attempts\n\t\ttime.Sleep(time.Second * time.Duration(attempts))\n\t}\n\tif pseudoRand.Intn(100) < 3 {\n\t\tlalog.DefaultLogger.Info(\"sockd.TCP.ReadWithRetry\", conn.RemoteAddr().String(), err, \"read %d amount of data in %d attempts\", n, attempts+1)\n\t}\n\treturn\n}\n\n\/*\nWriteWithRetry makes at most 5 attempts at writing the data into the connection.\nIf data is partially written before an IO error occurs, then the connection will be closed.\n*\/\nfunc WriteWithRetry(conn net.Conn, buf []byte) (n int, err error) {\n\tattempts := 0\n\tfor ; attempts < 5; attempts++ {\n\t\tif err = conn.SetWriteDeadline(time.Now().Add(IOTimeoutSec * time.Second)); err == nil {\n\t\t\tif n, err = conn.Write(buf); err == nil {\n\t\t\t\tbreak\n\t\t\t} else if n > 0 {\n\t\t\t\t\/\/ IO error occurred after data is partially written, the data stream is now broken.\n\t\t\t\t_ = conn.Close()\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t\/\/ Sleep couple of seconds in between attempts\n\t\ttime.Sleep(time.Second * time.Duration(attempts))\n\t}\n\tif pseudoRand.Intn(100) < 3 {\n\t\tlalog.DefaultLogger.Info(\"sockd.TCP.WriteWithRetry\", conn.RemoteAddr().String(), err, \"wrote %d amount of data in %d attempts\", n, attempts+1)\n\t}\n\treturn\n}\n\nfunc PipeTCPConnection(fromConn, toConn net.Conn, doWriteRand bool) {\n\tdefer func() {\n\t\t_ = toConn.Close()\n\t}()\n\tbuf := make([]byte, MaxPacketSize)\n\tfor {\n\t\tif misc.EmergencyLockDown {\n\t\t\tlalog.DefaultLogger.Warning(\"PipeTCPConnection\", \"\", misc.ErrEmergencyLockDown, \"\")\n\t\t\treturn\n\t\t}\n\t\tlength, err := ReadWithRetry(fromConn, buf)\n\t\tif err != nil {\n\t\t\tif doWriteRand {\n\t\t\t\tWriteRand(fromConn)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tif length > 0 {\n\t\t\tif _, err := WriteWithRetry(toConn, buf[:length]); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype TCPDaemon struct {\n\tAddress string `json:\"Address\"`\n\tPassword string `json:\"Password\"`\n\tPerIPLimit int `json:\"PerIPLimit\"`\n\tTCPPort int `json:\"TCPPort\"`\n\n\tDNSDaemon *dnsd.Daemon `json:\"-\"` \/\/ it is assumed to be already initialised\n\n\tcipher *Cipher\n\ttcpServer *common.TCPServer\n}\n\nfunc (daemon *TCPDaemon) Initialise() error {\n\tdaemon.cipher = &Cipher{}\n\tdaemon.cipher.Initialise(daemon.Password)\n\tdaemon.tcpServer = &common.TCPServer{\n\t\tListenAddr: daemon.Address,\n\t\tListenPort: daemon.TCPPort,\n\t\tAppName: \"sockd\",\n\t\tApp: daemon,\n\t\tLimitPerSec: daemon.PerIPLimit,\n\t}\n\tdaemon.tcpServer.Initialise()\n\treturn nil\n}\n\nfunc (daemon *TCPDaemon) GetTCPStatsCollector() *misc.Stats {\n\treturn common.SOCKDStatsTCP\n}\n\nfunc (daemon *TCPDaemon) HandleTCPConnection(logger lalog.Logger, ip string, client *net.TCPConn) {\n\tNewTCPCipherConnection(daemon, client, daemon.cipher.Copy(), logger).HandleTCPConnection()\n}\n\nfunc (daemon *TCPDaemon) StartAndBlock() error {\n\treturn daemon.tcpServer.StartAndBlock()\n}\n\nfunc (daemon *TCPDaemon) Stop() {\n\tdaemon.tcpServer.Stop()\n}\n\ntype TCPCipherConnection struct {\n\tnet.Conn\n\t*Cipher\n\tdaemon *TCPDaemon\n\tmutex sync.Mutex\n\treadBuf, writeBuf []byte\n\tlogger lalog.Logger\n}\n\nfunc NewTCPCipherConnection(daemon *TCPDaemon, netConn net.Conn, cip *Cipher, logger lalog.Logger) *TCPCipherConnection {\n\treturn &TCPCipherConnection{\n\t\tConn: netConn,\n\t\tdaemon: daemon,\n\t\tCipher: cip,\n\t\treadBuf: make([]byte, MaxPacketSize),\n\t\twriteBuf: make([]byte, MaxPacketSize),\n\t\tlogger: logger,\n\t}\n}\n\nfunc (conn *TCPCipherConnection) Close() error {\n\treturn conn.Conn.Close()\n}\n\nfunc (conn *TCPCipherConnection) Read(b []byte) (n int, err error) {\n\tif conn.DecryptionStream == nil {\n\t\tiv := make([]byte, conn.IVLength)\n\t\tif _, err = io.ReadFull(conn.Conn, iv); err != nil {\n\t\t\treturn\n\t\t}\n\t\tconn.InitDecryptionStream(iv)\n\t\tif len(conn.IV) == 0 {\n\t\t\tconn.IV = iv\n\t\t}\n\t}\n\n\tcipherData := conn.readBuf\n\tif len(b) > len(cipherData) {\n\t\tcipherData = make([]byte, len(b))\n\t} else {\n\t\tcipherData = cipherData[:len(b)]\n\t}\n\n\tn, err = conn.Conn.Read(cipherData)\n\tif n > 0 {\n\t\tconn.Decrypt(b[0:n], cipherData[0:n])\n\t}\n\treturn\n}\n\nfunc (conn *TCPCipherConnection) Write(buf []byte) (n int, err error) {\n\tconn.mutex.Lock()\n\tbufSize := len(buf)\n\theaderLen := len(buf) - bufSize\n\n\tvar iv []byte\n\tif conn.EncryptionStream == nil {\n\t\tiv = conn.InitEncryptionStream()\n\t}\n\n\tcipherData := conn.writeBuf\n\tdataSize := len(buf) + len(iv)\n\tif dataSize > len(cipherData) {\n\t\tcipherData = make([]byte, dataSize)\n\t} else {\n\t\tcipherData = cipherData[:dataSize]\n\t}\n\n\tif iv != nil {\n\t\tcopy(cipherData, iv)\n\t}\n\n\tconn.Encrypt(cipherData[len(iv):], buf)\n\tn, err = conn.Conn.Write(cipherData)\n\n\tif n >= headerLen {\n\t\tn -= headerLen\n\t}\n\tconn.mutex.Unlock()\n\treturn\n}\n\nfunc (conn *TCPCipherConnection) ParseRequest() (destIP net.IP, destNoPort, destWithPort string, err error) {\n\tif err = conn.SetReadDeadline(time.Now().Add(IOTimeoutSec * time.Second)); err != nil {\n\t\tconn.logger.MaybeMinorError(err)\n\t\treturn\n\t}\n\n\tbuf := make([]byte, 269)\n\tif _, err = io.ReadFull(conn, buf[:AddressTypeIndex+1]); err != nil {\n\t\treturn\n\t}\n\n\tvar reqStart, reqEnd int\n\taddrType := buf[AddressTypeIndex]\n\tmaskedType := addrType & AddressTypeMask\n\tswitch maskedType {\n\tcase AddressTypeIPv4:\n\t\treqStart, reqEnd = IPPacketIndex, IPPacketIndex+IPv4PacketLength\n\tcase AddressTypeIPv6:\n\t\treqStart, reqEnd = IPPacketIndex, IPPacketIndex+IPv6PacketLength\n\tcase AddressTypeDM:\n\t\tif _, err = io.ReadFull(conn, buf[AddressTypeIndex+1:DMAddrLengthIndex+1]); err != nil {\n\t\t\treturn\n\t\t}\n\t\treqStart, reqEnd = DMAddrIndex, DMAddrIndex+int(buf[DMAddrLengthIndex])+DMAddrHeaderLength\n\tdefault:\n\t\terr = fmt.Errorf(\"TCPCipherConnection.ParseRequest: unknown mask type %d\", maskedType)\n\t\treturn\n\t}\n\n\tif _, err = io.ReadFull(conn, buf[reqStart:reqEnd]); err != nil {\n\t\treturn\n\t}\n\tport := binary.BigEndian.Uint16(buf[reqEnd-2 : reqEnd])\n\tif port < 1 {\n\t\terr = fmt.Errorf(\"TCPCipherConnection.ParseRequest: invalid destination port %d\", port)\n\t\treturn\n\t}\n\n\tswitch maskedType {\n\tcase AddressTypeIPv4:\n\t\tdestIP = buf[IPPacketIndex : IPPacketIndex+net.IPv4len]\n\t\tdestNoPort = destIP.String()\n\t\tdestWithPort = net.JoinHostPort(destIP.String(), strconv.Itoa(int(port)))\n\tcase AddressTypeIPv6:\n\t\tdestIP = buf[IPPacketIndex : IPPacketIndex+net.IPv6len]\n\t\tdestNoPort = destIP.String()\n\t\tdestWithPort = net.JoinHostPort(destIP.String(), strconv.Itoa(int(port)))\n\tcase AddressTypeDM:\n\t\tdest := string(buf[DMAddrIndex : DMAddrIndex+int(buf[DMAddrLengthIndex])])\n\t\tdestNoPort = dest\n\t\tdestIP = net.ParseIP(dest)\n\t\tdestWithPort = net.JoinHostPort(dest, strconv.Itoa(int(port)))\n\t}\n\tif strings.ContainsRune(destNoPort, 0) || strings.ContainsRune(destWithPort, 0) {\n\t\terr = fmt.Errorf(\"TCPCipherConnection.ParseRequest: destination must not contain NULL byte\")\n\t}\n\treturn\n}\n\nfunc (conn *TCPCipherConnection) WriteRandAndClose() {\n\tdefer func() {\n\t\t_ = conn.Close()\n\t}()\n\trandBuf := make([]byte, RandNum(20, 70, 200))\n\t_, err := rand.Read(randBuf)\n\tif err != nil {\n\t\tconn.logger.Warning(\"WriteRandAndClose\", conn.Conn.RemoteAddr().String(), err, \"failed to get random bytes\")\n\t\treturn\n\t}\n\tif err := conn.SetWriteDeadline(time.Now().Add(IOTimeoutSec * time.Second)); err != nil {\n\t\tconn.logger.Warning(\"WriteRandAndClose\", conn.Conn.RemoteAddr().String(), err, \"failed to write random bytes\")\n\t\treturn\n\t}\n\tif _, err := conn.Write(randBuf); err != nil {\n\t\tconn.logger.Warning(\"WriteRandAndClose\", conn.Conn.RemoteAddr().String(), err, \"failed to write random bytes\")\n\t\treturn\n\t}\n}\n\nfunc (conn *TCPCipherConnection) HandleTCPConnection() {\n\tremoteAddr := conn.RemoteAddr().String()\n\tdestIP, destNoPort, destWithPort, err := conn.ParseRequest()\n\tif err != nil {\n\t\tconn.logger.Warning(\"HandleTCPConnection\", remoteAddr, err, \"failed to get destination address\")\n\t\tconn.WriteRandAndClose()\n\t\treturn\n\t}\n\tif strings.ContainsRune(destWithPort, 0) {\n\t\tconn.logger.Warning(\"HandleTCPConnection\", remoteAddr, nil, \"will not serve invalid destination address with 0 in it\")\n\t\tconn.WriteRandAndClose()\n\t\treturn\n\t}\n\tif destIP != nil && IsReservedAddr(destIP) {\n\t\tconn.logger.Info(\"HandleTCPConnection\", remoteAddr, nil, \"will not serve reserved address %s\", destNoPort)\n\t\t_ = conn.Close()\n\t\treturn\n\t}\n\tif conn.daemon.DNSDaemon.IsInBlacklist(destNoPort) {\n\t\tconn.logger.Info(\"HandleTCPConnection\", remoteAddr, nil, \"will not serve blacklisted address %s\", destNoPort)\n\t\t_ = conn.Close()\n\t\treturn\n\t}\n\tdest, err := net.DialTimeout(\"tcp\", destWithPort, IOTimeoutSec*time.Second)\n\tif err != nil {\n\t\tconn.logger.Warning(\"HandleTCPConnection\", remoteAddr, err, \"failed to connect to destination \\\"%s\\\"\", destWithPort)\n\t\t_ = conn.Close()\n\t\treturn\n\t}\n\tTweakTCPConnection(conn.Conn.(*net.TCPConn))\n\tTweakTCPConnection(dest.(*net.TCPConn))\n\tgo PipeTCPConnection(conn, dest, true)\n\tPipeTCPConnection(dest, conn, false)\n}\n<|endoftext|>"} {"text":"<commit_before>package poego\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\ntype Poego struct {\n\tclient *http.Client\n}\n\nconst (\n\tBaseUrl = \"api.pathofexile.com\"\n\tScheme = \"http\"\n)\n\n\/\/Initialize the API with a http.client and base values\nfunc NewApi() *Poego {\n\treturn &Poego{\n\t\tclient: &http.Client{},\n\t}\n}\n\nfunc (p *Poego) buildRequest(method, endpoint string, v url.Values) *http.Request {\n\n\tif v == nil {\n\t\tv = url.Values{}\n\t}\n\n\tu := &url.URL{\n\t\tScheme: Scheme,\n\t\tPath: BaseUrl + endpoint,\n\t\tRawQuery: v.Encode(),\n\t}\n\n\tr, err := http.NewRequest(method, u.String(), nil)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn r\n}\n\nfunc (p *Poego) makeRequest(r *http.Request, d interface{}) error {\n\n\tres, err := p.client.Do(r)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn handleApiError(res)\n\t}\n\n\terr = json.NewDecoder(res.Body).Decode(d)\n\n\treturn err\n}\n\nfunc handleApiError(res *http.Response) error {\n\n\tvar apiError ApiError\n\terr := json.NewDecoder(res.Body).Decode(&apiError)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Status Code: %d\\nMessage: Couldn't decode API error\", res.StatusCode)\n\t}\n\n\treturn fmt.Errorf(\"Status Code: %d\\nPoE Error Code: %d\\nMessage: %s\", res.StatusCode, apiError.Error.Code, apiError.Error.Message)\n}\n<commit_msg>Add global documentation value to godocs<commit_after>\/\/Package poego provides a wrapper of the Path of Exile API in Go\n\npackage poego\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\ntype Poego struct {\n\tclient *http.Client\n}\n\nconst (\n\tBaseUrl = \"api.pathofexile.com\"\n\tScheme = \"http\"\n)\n\n\/\/Initialize the API with a http.client and base values\nfunc NewApi() *Poego {\n\treturn &Poego{\n\t\tclient: &http.Client{},\n\t}\n}\n\nfunc (p *Poego) buildRequest(method, endpoint string, v url.Values) *http.Request {\n\n\tif v == nil {\n\t\tv = url.Values{}\n\t}\n\n\tu := &url.URL{\n\t\tScheme: Scheme,\n\t\tPath: BaseUrl + endpoint,\n\t\tRawQuery: v.Encode(),\n\t}\n\n\tr, err := http.NewRequest(method, u.String(), nil)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn r\n}\n\nfunc (p *Poego) makeRequest(r *http.Request, d interface{}) error {\n\n\tres, err := p.client.Do(r)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn handleApiError(res)\n\t}\n\n\terr = json.NewDecoder(res.Body).Decode(d)\n\n\treturn err\n}\n\nfunc handleApiError(res *http.Response) error {\n\n\tvar apiError ApiError\n\terr := json.NewDecoder(res.Body).Decode(&apiError)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Status Code: %d\\nMessage: Couldn't decode API error\", res.StatusCode)\n\t}\n\n\treturn fmt.Errorf(\"Status Code: %d\\nPoE Error Code: %d\\nMessage: %s\", res.StatusCode, apiError.Error.Code, apiError.Error.Message)\n}\n<|endoftext|>"} {"text":"<commit_before>package geo\n\nimport \"math\"\n\n\/\/ Point описывает координаты географической точки: долгота, широта. Именно такая последовательность\n\/\/ выбрана только потому, что это наиболее близко представлению координат на плоскости: x и y, а не\n\/\/ наоборот. Вторая причина — именно такой формат точек поддерживается в MongoDB, а данная\n\/\/ библиотека сделана в первую очередь именно для работы с этой базой данных.\ntype Point [2]float64\n\n\/\/ NewPoint возвращает новое описание точки с указанными координатами. Если координаты выходят\n\/\/ за допустимый диапазон данных для географических координат, то возникает panic.\nfunc NewPoint(lon, lat float64) Point {\n\tif lon < -180 || lon > 180 {\n\t\tpanic(\"bad longitude\")\n\t}\n\tif lat < -90 || lat > 90 {\n\t\tpanic(\"bad latitude\")\n\t}\n\treturn Point{lon, lat}\n}\n\n\/\/ Longitude возвращает долготу точки.\nfunc (p Point) Longitude() float64 {\n\treturn p[0]\n}\n\n\/\/ Latitude возвращает широту точки.\nfunc (p Point) Latitude() float64 {\n\treturn p[1]\n}\n\n\/\/ IsZero возвращает true, если обе координаты точки равны нулю.\nfunc (p Point) IsZero() bool {\n\treturn p[0] == 0 && p[1] == 0\n}\n\n\/\/ Geo возвращает представление точки в формате GeoJSON.\nfunc (p Point) Geo() *GeoJSON {\n\tif p.IsZero() {\n\t\treturn nil\n\t}\n\treturn &GeoJSON{\n\t\tType: \"Point\",\n\t\tCoordinates: p[:],\n\t}\n}\n\nconst (\n\tEarthRadius float64 = 6378137.0 \/\/ радиус Земли в метрах\n)\n\n\/\/ Move возвращает новую точку, перемещенную от изначально на dist метров в направлении bearing\n\/\/ в градусах.\nfunc (p Point) Move(dist float64, bearing float64) Point {\n\tdr := dist \/ EarthRadius\n\tbearing = bearing * math.Pi \/ 180.0\n\tlon1 := p.Longitude() * math.Pi \/ 180.0\n\tlat1 := p.Latitude() * math.Pi \/ 180.0\n\tlat2_part1 := math.Sin(lat1) * math.Cos(dr)\n\tlat2_part2 := math.Cos(lat1) * math.Sin(dr) * math.Cos(bearing)\n\tlat2 := math.Asin(lat2_part1 + lat2_part2)\n\tlon2_part1 := math.Sin(bearing) * math.Sin(dr) * math.Cos(lat1)\n\tlon2_part2 := math.Cos(dr) - (math.Sin(lat1) * math.Sin(lat2))\n\tlon2 := lon1 + math.Atan2(lon2_part1, lon2_part2)\n\tlon2 = math.Mod((lon2+3*math.Pi), (2*math.Pi)) - math.Pi\n\tlon2 = lon2 * 180.0 \/ math.Pi\n\tlat2 = lat2 * 180.0 \/ math.Pi\n\treturn NewPoint(lon2, lat2)\n}\n\n\/\/ BearingTo возвращает направление в градусах на указанную точку.\nfunc (p Point) BearingTo(p2 Point) float64 {\n\tdLon := (p2.Longitude() - p.Longitude()) * math.Pi \/ 180.0\n\tlat1 := p.Latitude() * math.Pi \/ 180.0\n\tlat2 := p2.Latitude() * math.Pi \/ 180.0\n\ty := math.Sin(dLon) * math.Cos(lat2)\n\tx := math.Cos(lat1)*math.Sin(lat2) -\n\t\tmath.Sin(lat1)*math.Cos(lat2)*math.Cos(dLon)\n\treturn math.Atan2(y, x) * 180.0 \/ math.Pi\n}\n\n\/\/ Distance возвращает дистанцию между двумя точками в метрах.\nfunc (p Point) Distance(p2 Point) float64 {\n\tdLon := (p2.Longitude() - p.Longitude()) * math.Pi \/ 180.0\n\tdLat := (p2.Latitude() - p.Latitude()) * math.Pi \/ 180.0\n\tlat1 := p.Latitude() * math.Pi \/ 180.0\n\tlat2 := p2.Latitude() * math.Pi \/ 180.0\n\ta := math.Sin(dLat\/2)*math.Sin(dLat\/2) +\n\t\tmath.Sin(dLon\/2)*math.Sin(dLon\/2)*math.Cos(lat1)*math.Cos(lat2)\n\treturn EarthRadius * 2 * math.Asin(math.Sqrt(a))\n}\n\n\/\/ Centroid возвращает цент окружности, содержащей все указанные точки.\nfunc Centroid(points ...Point) Point {\n\tswitch l := float64(len(points)); l {\n\tcase 0:\n\t\treturn Point{}\n\tcase 1:\n\t\treturn points[0]\n\tdefault:\n\t\tvar lon, lat float64\n\t\tfor _, point := range points {\n\t\t\tlon += point.Longitude()\n\t\t\tlat += point.Latitude()\n\t\t}\n\t\treturn Point{lon \/ l, lat \/ l}\n\t}\n}\n<commit_msg>JSON представление<commit_after>package geo\n\nimport (\n\t\"fmt\"\n\t\"math\"\n)\n\n\/\/ Point описывает координаты географической точки: долгота, широта. Именно такая последовательность\n\/\/ выбрана только потому, что это наиболее близко представлению координат на плоскости: x и y, а не\n\/\/ наоборот. Вторая причина — именно такой формат точек поддерживается в MongoDB, а данная\n\/\/ библиотека сделана в первую очередь именно для работы с этой базой данных.\ntype Point [2]float64\n\n\/\/ NewPoint возвращает новое описание точки с указанными координатами. Если координаты выходят\n\/\/ за допустимый диапазон данных для географических координат, то возникает panic.\nfunc NewPoint(lon, lat float64) Point {\n\tif lon < -180 || lon > 180 {\n\t\tpanic(\"bad longitude\")\n\t}\n\tif lat < -90 || lat > 90 {\n\t\tpanic(\"bad latitude\")\n\t}\n\treturn Point{lon, lat}\n}\n\n\/\/ Longitude возвращает долготу точки.\nfunc (p Point) Longitude() float64 {\n\treturn p[0]\n}\n\n\/\/ Latitude возвращает широту точки.\nfunc (p Point) Latitude() float64 {\n\treturn p[1]\n}\n\n\/\/ IsZero возвращает true, если обе координаты точки равны нулю.\nfunc (p Point) IsZero() bool {\n\treturn p[0] == 0 && p[1] == 0\n}\n\n\/\/ Geo возвращает представление точки в формате GeoJSON.\nfunc (p Point) Geo() *GeoJSON {\n\tif p.IsZero() {\n\t\treturn nil\n\t}\n\treturn &GeoJSON{\n\t\tType: \"Point\",\n\t\tCoordinates: p[:],\n\t}\n}\n\n\/\/ MarshalJSON отдает форматированное представление JSON.\nfunc (p Point) MarshalJSON() ([]byte, error) {\n\tif p.IsZero() {\n\t\treturn []byte(\"null\"), nil\n\t}\n\treturn []byte(fmt.Sprintf(\"[%.6f, %.6f]\", p.Longitude(), p.Latitude()))\n}\n\nconst (\n\tEarthRadius float64 = 6378137.0 \/\/ радиус Земли в метрах\n)\n\n\/\/ Move возвращает новую точку, перемещенную от изначально на dist метров в направлении bearing\n\/\/ в градусах.\nfunc (p Point) Move(dist float64, bearing float64) Point {\n\tdr := dist \/ EarthRadius\n\tbearing = bearing * math.Pi \/ 180.0\n\tlon1 := p.Longitude() * math.Pi \/ 180.0\n\tlat1 := p.Latitude() * math.Pi \/ 180.0\n\tlat2_part1 := math.Sin(lat1) * math.Cos(dr)\n\tlat2_part2 := math.Cos(lat1) * math.Sin(dr) * math.Cos(bearing)\n\tlat2 := math.Asin(lat2_part1 + lat2_part2)\n\tlon2_part1 := math.Sin(bearing) * math.Sin(dr) * math.Cos(lat1)\n\tlon2_part2 := math.Cos(dr) - (math.Sin(lat1) * math.Sin(lat2))\n\tlon2 := lon1 + math.Atan2(lon2_part1, lon2_part2)\n\tlon2 = math.Mod((lon2+3*math.Pi), (2*math.Pi)) - math.Pi\n\tlon2 = lon2 * 180.0 \/ math.Pi\n\tlat2 = lat2 * 180.0 \/ math.Pi\n\treturn NewPoint(lon2, lat2)\n}\n\n\/\/ BearingTo возвращает направление в градусах на указанную точку.\nfunc (p Point) BearingTo(p2 Point) float64 {\n\tdLon := (p2.Longitude() - p.Longitude()) * math.Pi \/ 180.0\n\tlat1 := p.Latitude() * math.Pi \/ 180.0\n\tlat2 := p2.Latitude() * math.Pi \/ 180.0\n\ty := math.Sin(dLon) * math.Cos(lat2)\n\tx := math.Cos(lat1)*math.Sin(lat2) -\n\t\tmath.Sin(lat1)*math.Cos(lat2)*math.Cos(dLon)\n\treturn math.Atan2(y, x) * 180.0 \/ math.Pi\n}\n\n\/\/ Distance возвращает дистанцию между двумя точками в метрах.\nfunc (p Point) Distance(p2 Point) float64 {\n\tdLon := (p2.Longitude() - p.Longitude()) * math.Pi \/ 180.0\n\tdLat := (p2.Latitude() - p.Latitude()) * math.Pi \/ 180.0\n\tlat1 := p.Latitude() * math.Pi \/ 180.0\n\tlat2 := p2.Latitude() * math.Pi \/ 180.0\n\ta := math.Sin(dLat\/2)*math.Sin(dLat\/2) +\n\t\tmath.Sin(dLon\/2)*math.Sin(dLon\/2)*math.Cos(lat1)*math.Cos(lat2)\n\treturn EarthRadius * 2 * math.Asin(math.Sqrt(a))\n}\n\n\/\/ Centroid возвращает цент окружности, содержащей все указанные точки.\nfunc Centroid(points ...Point) Point {\n\tswitch l := float64(len(points)); l {\n\tcase 0:\n\t\treturn Point{}\n\tcase 1:\n\t\treturn points[0]\n\tdefault:\n\t\tvar lon, lat float64\n\t\tfor _, point := range points {\n\t\t\tlon += point.Longitude()\n\t\t\tlat += point.Latitude()\n\t\t}\n\t\treturn Point{lon \/ l, lat \/ l}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package framework\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/mitchellh\/mapstructure\"\n)\n\n\/\/ FieldData is the structure passed to the callback to handle a path\n\/\/ containing the populated parameters for fields. This should be used\n\/\/ instead of the raw (*vault.Request).Data to access data in a type-safe\n\/\/ way.\ntype FieldData struct {\n\tRaw map[string]interface{}\n\tSchema map[string]*FieldSchema\n}\n\n\/\/ Cycle through raw data and validate conversions in\n\/\/ the schema, so we don't get an error\/panic later when\n\/\/ trying to get data out. Data not in the schema is not\n\/\/ an error at this point, so we don't worry about it.\nfunc (d *FieldData) Validate() error {\n\tfor field, value := range d.Raw {\n\n\t\tschema, ok := d.Schema[field]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch schema.Type {\n\t\tcase TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString:\n\t\t\t_, _, err := d.getPrimitive(field, schema)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error converting input %v for field %s: %s\", value, field, err)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unknown field type %s for field %s\",\n\t\t\t\tschema.Type, field)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Get gets the value for the given field. If the key is an invalid field,\n\/\/ FieldData will panic. If you want a safer version of this method, use\n\/\/ GetOk. If the field k is not set, the default value (if set) will be\n\/\/ returned, otherwise the zero value will be returned.\nfunc (d *FieldData) Get(k string) interface{} {\n\tschema, ok := d.Schema[k]\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"field %s not in the schema\", k))\n\t}\n\n\tvalue, ok := d.GetOk(k)\n\tif !ok {\n\t\tvalue = schema.DefaultOrZero()\n\t}\n\n\treturn value\n}\n\n\/\/ GetOk gets the value for the given field. The second return value\n\/\/ will be false if the key is invalid or the key is not set at all.\nfunc (d *FieldData) GetOk(k string) (interface{}, bool) {\n\tschema, ok := d.Schema[k]\n\tif !ok {\n\t\treturn nil, false\n\t}\n\n\tresult, ok, err := d.GetOkErr(k)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"error reading %s: %s\", k, err))\n\t}\n\n\tif ok && result == nil {\n\t\tresult = schema.DefaultOrZero()\n\t}\n\n\treturn result, ok\n}\n\n\/\/ GetOkErr is the most conservative of all the Get methods. It returns\n\/\/ whether key is set or not, but also an error value. The error value is\n\/\/ non-nil if the field doesn't exist or there was an error parsing the\n\/\/ field value.\nfunc (d *FieldData) GetOkErr(k string) (interface{}, bool, error) {\n\tschema, ok := d.Schema[k]\n\tif !ok {\n\t\treturn nil, false, fmt.Errorf(\"unknown field: %s\", k)\n\t}\n\n\tswitch schema.Type {\n\tcase TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString:\n\t\treturn d.getPrimitive(k, schema)\n\tdefault:\n\t\treturn nil, false,\n\t\t\tfmt.Errorf(\"unknown field type %s for field %s\", schema.Type, k)\n\t}\n}\n\nfunc (d *FieldData) getPrimitive(\n\tk string, schema *FieldSchema) (interface{}, bool, error) {\n\traw, ok := d.Raw[k]\n\tif !ok {\n\t\treturn nil, false, nil\n\t}\n\n\tswitch schema.Type {\n\tcase TypeBool:\n\t\tvar result bool\n\t\tif err := mapstructure.WeakDecode(raw, &result); err != nil {\n\t\t\treturn nil, true, err\n\t\t}\n\t\treturn result, true, nil\n\n\tcase TypeInt:\n\t\tvar result int\n\t\tif err := mapstructure.WeakDecode(raw, &result); err != nil {\n\t\t\treturn nil, true, err\n\t\t}\n\t\treturn result, true, nil\n\n\tcase TypeString:\n\t\tvar result string\n\t\tif err := mapstructure.WeakDecode(raw, &result); err != nil {\n\t\t\treturn nil, true, err\n\t\t}\n\t\treturn result, true, nil\n\n\tcase TypeMap:\n\t\tvar result map[string]interface{}\n\t\tif err := mapstructure.WeakDecode(raw, &result); err != nil {\n\t\t\treturn nil, true, err\n\t\t}\n\t\treturn result, true, nil\n\n\tcase TypeDurationSecond:\n\t\tvar result int\n\t\tswitch inp := raw.(type) {\n\t\tcase nil:\n\t\t\treturn nil, false, nil\n\t\tcase int:\n\t\t\tresult = inp\n\t\tcase float32:\n\t\t\tresult = int(inp)\n\t\tcase float64:\n\t\t\tresult = int(inp)\n\t\tcase string:\n\t\t\t\/\/ Look for a suffix otherwise its a plain second value\n\t\t\tif strings.HasSuffix(inp, \"s\") || strings.HasSuffix(inp, \"m\") || strings.HasSuffix(inp, \"h\") {\n\t\t\t\tdur, err := time.ParseDuration(inp)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, true, err\n\t\t\t\t}\n\t\t\t\tresult = int(dur.Seconds())\n\t\t\t} else {\n\t\t\t\t\/\/ Plain integer\n\t\t\t\tval, err := strconv.ParseInt(inp, 10, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, true, err\n\t\t\t\t}\n\t\t\t\tresult = int(val)\n\t\t\t}\n\n\t\tdefault:\n\t\t\treturn nil, false, fmt.Errorf(\"invalid input '%v'\", raw)\n\t\t}\n\t\treturn result, true, nil\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Unknown type: %s\", schema.Type))\n\t}\n}\n<commit_msg>Added GetDefaultOrZero method to FieldData<commit_after>package framework\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/mitchellh\/mapstructure\"\n)\n\n\/\/ FieldData is the structure passed to the callback to handle a path\n\/\/ containing the populated parameters for fields. This should be used\n\/\/ instead of the raw (*vault.Request).Data to access data in a type-safe\n\/\/ way.\ntype FieldData struct {\n\tRaw map[string]interface{}\n\tSchema map[string]*FieldSchema\n}\n\n\/\/ Cycle through raw data and validate conversions in\n\/\/ the schema, so we don't get an error\/panic later when\n\/\/ trying to get data out. Data not in the schema is not\n\/\/ an error at this point, so we don't worry about it.\nfunc (d *FieldData) Validate() error {\n\tfor field, value := range d.Raw {\n\n\t\tschema, ok := d.Schema[field]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch schema.Type {\n\t\tcase TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString:\n\t\t\t_, _, err := d.getPrimitive(field, schema)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error converting input %v for field %s: %s\", value, field, err)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unknown field type %s for field %s\",\n\t\t\t\tschema.Type, field)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Get gets the value for the given field. If the key is an invalid field,\n\/\/ FieldData will panic. If you want a safer version of this method, use\n\/\/ GetOk. If the field k is not set, the default value (if set) will be\n\/\/ returned, otherwise the zero value will be returned.\nfunc (d *FieldData) Get(k string) interface{} {\n\tschema, ok := d.Schema[k]\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"field %s not in the schema\", k))\n\t}\n\n\tvalue, ok := d.GetOk(k)\n\tif !ok {\n\t\tvalue = schema.DefaultOrZero()\n\t}\n\n\treturn value\n}\n\n\/\/ GetDefaultOrZero gets the default value set on the schema for the given\n\/\/ field. If there is no default value set, the zero value of the type\n\/\/ will be returned.\nfunc (d *FieldData) GetDefaultOrZero(k string) interface{} {\n\tschema, ok := d.Schema[k]\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"field %s not in the schema\", k))\n\t}\n\n\treturn schema.DefaultOrZero()\n}\n\n\/\/ GetOk gets the value for the given field. The second return value\n\/\/ will be false if the key is invalid or the key is not set at all.\nfunc (d *FieldData) GetOk(k string) (interface{}, bool) {\n\tschema, ok := d.Schema[k]\n\tif !ok {\n\t\treturn nil, false\n\t}\n\n\tresult, ok, err := d.GetOkErr(k)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"error reading %s: %s\", k, err))\n\t}\n\n\tif ok && result == nil {\n\t\tresult = schema.DefaultOrZero()\n\t}\n\n\treturn result, ok\n}\n\n\/\/ GetOkErr is the most conservative of all the Get methods. It returns\n\/\/ whether key is set or not, but also an error value. The error value is\n\/\/ non-nil if the field doesn't exist or there was an error parsing the\n\/\/ field value.\nfunc (d *FieldData) GetOkErr(k string) (interface{}, bool, error) {\n\tschema, ok := d.Schema[k]\n\tif !ok {\n\t\treturn nil, false, fmt.Errorf(\"unknown field: %s\", k)\n\t}\n\n\tswitch schema.Type {\n\tcase TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString:\n\t\treturn d.getPrimitive(k, schema)\n\tdefault:\n\t\treturn nil, false,\n\t\t\tfmt.Errorf(\"unknown field type %s for field %s\", schema.Type, k)\n\t}\n}\n\nfunc (d *FieldData) getPrimitive(\n\tk string, schema *FieldSchema) (interface{}, bool, error) {\n\traw, ok := d.Raw[k]\n\tif !ok {\n\t\treturn nil, false, nil\n\t}\n\n\tswitch schema.Type {\n\tcase TypeBool:\n\t\tvar result bool\n\t\tif err := mapstructure.WeakDecode(raw, &result); err != nil {\n\t\t\treturn nil, true, err\n\t\t}\n\t\treturn result, true, nil\n\n\tcase TypeInt:\n\t\tvar result int\n\t\tif err := mapstructure.WeakDecode(raw, &result); err != nil {\n\t\t\treturn nil, true, err\n\t\t}\n\t\treturn result, true, nil\n\n\tcase TypeString:\n\t\tvar result string\n\t\tif err := mapstructure.WeakDecode(raw, &result); err != nil {\n\t\t\treturn nil, true, err\n\t\t}\n\t\treturn result, true, nil\n\n\tcase TypeMap:\n\t\tvar result map[string]interface{}\n\t\tif err := mapstructure.WeakDecode(raw, &result); err != nil {\n\t\t\treturn nil, true, err\n\t\t}\n\t\treturn result, true, nil\n\n\tcase TypeDurationSecond:\n\t\tvar result int\n\t\tswitch inp := raw.(type) {\n\t\tcase nil:\n\t\t\treturn nil, false, nil\n\t\tcase int:\n\t\t\tresult = inp\n\t\tcase float32:\n\t\t\tresult = int(inp)\n\t\tcase float64:\n\t\t\tresult = int(inp)\n\t\tcase string:\n\t\t\t\/\/ Look for a suffix otherwise its a plain second value\n\t\t\tif strings.HasSuffix(inp, \"s\") || strings.HasSuffix(inp, \"m\") || strings.HasSuffix(inp, \"h\") {\n\t\t\t\tdur, err := time.ParseDuration(inp)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, true, err\n\t\t\t\t}\n\t\t\t\tresult = int(dur.Seconds())\n\t\t\t} else {\n\t\t\t\t\/\/ Plain integer\n\t\t\t\tval, err := strconv.ParseInt(inp, 10, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, true, err\n\t\t\t\t}\n\t\t\t\tresult = int(val)\n\t\t\t}\n\n\t\tdefault:\n\t\t\treturn nil, false, fmt.Errorf(\"invalid input '%v'\", raw)\n\t\t}\n\t\treturn result, true, nil\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Unknown type: %s\", schema.Type))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\n\t\"github.com\/serbe\/pool\"\n)\n\nfunc findProxy() {\n\tvar addedProxy int64\n\tdebugmsg(\"Start find proxy\")\n\tp := pool.New(numWorkers)\n\tp.SetTimeout(timeout)\n\tml := getMapLink()\n\tmp := newMapProxy()\n\tlist := getProxyListFromDB()\n\tmp.fillMapProxy(list)\n\tmp.loadProxyFromFile()\n\n\tdebugmsg(\"load links\", len(ml.values))\n\tdebugmsg(\"load proxies\", len(mp.values))\n\tdebugmsg(\"start add to pool\")\n\tp.SetTimeout(timeout)\n\tp.SetQuitTimeout(2000)\n\tfor _, link := range ml.values {\n\t\tif useAddLink || useTestLink || link.Iterate && time.Since(link.UpdateAt) > time.Duration(1)*time.Hour {\n\t\t\terr := p.Add(link.Hostname, nil)\n\t\t\tif err == nil {\n\t\t\t\taddedProxy++\n\t\t\t}\n\t\t\tchkErr(\"findProxy p.Add\", err)\n\t\t}\n\t}\n\tif addedProxy == 0 {\n\t\tdebugmsg(\"not added tasks to pool\")\n\t\treturn\n\t}\n\tdebugmsg(\"end add to pool, added\", p.GetAddedTasks(), \"links\")\n\tdebugmsg(\"start get from chan\")\n\tfor result := range p.ResultChan {\n\t\tif result.Error != nil {\n\t\t\terrmsg(\"result\", result.Error)\n\t\t\tcontinue\n\t\t}\n\t\tml.update(result.Hostname)\n\t\tlinks := ml.getNewLinksFromTask(result)\n\t\tnum := mp.numOfNewProxyInTask(result)\n\t\tif num > 0 {\n\t\t\tdebugmsg(\"find\", num, \"proxy in\", result.Hostname)\n\t\t\tif link, ok := ml.get(result.Hostname); ok {\n\t\t\t\tlink.Num = link.Num + num\n\t\t\t\tml.set(link)\n\t\t\t}\n\t\t}\n\t\tif !useNoAddLinks {\n\t\t\tfor _, l := range links {\n\t\t\t\tchkErr(\"findProxy add to pool\", p.Add(l.Hostname, nil))\n\t\t\t\tdebugmsg(\"add to pool\", l.Hostname)\n\t\t\t}\n\t\t} else {\n\t\t\tdebugmsg(\"find\", len(links), \"links in\", result.Hostname)\n\t\t}\n\t\taddedProxy = addedProxy + num\n\t}\n\tif !useTestLink {\n\t\tdebugmsg(\"save proxy\")\n\t\tml.saveAll()\n\t}\n\tdebugmsg(addedProxy, \"new proxy found\")\n\tdebugmsg(\"end findProxy\")\n}\n\nfunc checkProxy() {\n\tdebugmsg(\"start checkProxy\")\n\tvar (\n\t\tchecked int64\n\t\ttotalProxy int64\n\t\tanonProxy int64\n\t\terr error\n\t)\n\tlist := getProxyListFromDB()\n\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tlistLen := len(list)\n\tdebugmsg(\"load proxies\", listLen)\n\nbreakCheckProxyLoop:\n\tfor j := 0; j < listLen; {\n\t\tmp := newMapProxy()\n\t\tvar r = 10000\n\t\tif j+10000 > listLen {\n\t\t\tr = listLen % 10000\n\t\t}\n\t\tfor i := 0; i < r; i++ {\n\t\t\tmp.set(list[j])\n\t\t\tj++\n\t\t}\n\t\tp := pool.New(numWorkers)\n\t\tp.SetTimeout(timeout)\n\t\ttargetURL := getTarget()\n\t\tmyIP, err = getExternalIP()\n\t\tif err != nil {\n\t\t\terrmsg(\"checkProxy getExternalIP\", err)\n\t\t\treturn\n\t\t}\n\t\tdebugmsg(\"start add to pool\")\n\t\tfor _, proxy := range mp.values {\n\t\t\tproxyURL, err := url.Parse(proxy.Hostname)\n\t\t\tchkErr(\"parse url\", err)\n\t\t\tchkErr(\"add to pool\", p.Add(targetURL, proxyURL))\n\t\t}\n\t\tdebugmsg(\"end add to pool\")\n\t\tp.EndWaitingTasks()\n\t\tif p.GetAddedTasks() == 0 {\n\t\t\tdebugmsg(\"no task added to pool\")\n\t\t\treturn\n\t\t}\n\tcheckProxyLoop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase task, ok := <-p.ResultChan:\n\t\t\t\tif !ok {\n\t\t\t\t\tdebugmsg(\"break loop by close chan ResultChan\")\n\t\t\t\t\tbreak checkProxyLoop\n\t\t\t\t}\n\t\t\t\tchecked++\n\t\t\t\tproxy, isOk := mp.taskToProxy(task)\n\t\t\t\tif !isOk {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tmp.set(proxy)\n\t\t\t\tif !useFUP {\n\t\t\t\t\tsaveProxy(proxy)\n\t\t\t\t}\n\t\t\t\tif proxy.IsWork {\n\t\t\t\t\tif useFUP {\n\t\t\t\t\t\tsaveProxy(proxy)\n\t\t\t\t\t}\n\t\t\t\t\ttotalProxy++\n\t\t\t\t\tlog.Printf(\"%d\/%d\/%d %-15v %-5v %-6v anon=%v\\n\",\n\t\t\t\t\t\tchecked,\n\t\t\t\t\t\tlistLen,\n\t\t\t\t\t\ttotalProxy,\n\t\t\t\t\t\ttask.Proxy.Hostname(),\n\t\t\t\t\t\ttask.Proxy.Port(),\n\t\t\t\t\t\ttask.Proxy.Scheme,\n\t\t\t\t\t\tproxy.IsAnon,\n\t\t\t\t\t)\n\t\t\t\t\tif proxy.IsAnon {\n\t\t\t\t\t\tanonProxy++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase <-c:\n\t\t\t\tdebugmsg(\"break loop by pressing ctrl+c\")\n\t\t\t\tbreak breakCheckProxyLoop\n\t\t\t}\n\t\t}\n\t}\n\tlog.Printf(\"%d is good\\n\", totalProxy)\n\tlog.Printf(\"%d is anon\\n\", anonProxy)\n\tdebugmsg(\"end checkProxy\")\n}\n<commit_msg>less save<commit_after>package main\n\nimport (\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\n\t\"github.com\/serbe\/pool\"\n)\n\nfunc findProxy() {\n\tvar addedProxy int64\n\tdebugmsg(\"Start find proxy\")\n\tp := pool.New(numWorkers)\n\tp.SetTimeout(timeout)\n\tml := getMapLink()\n\tmp := newMapProxy()\n\tlist := getProxyListFromDB()\n\tmp.fillMapProxy(list)\n\tmp.loadProxyFromFile()\n\n\tdebugmsg(\"load links\", len(ml.values))\n\tdebugmsg(\"load proxies\", len(mp.values))\n\tdebugmsg(\"start add to pool\")\n\tp.SetTimeout(timeout)\n\tp.SetQuitTimeout(2000)\n\tfor _, link := range ml.values {\n\t\tif useAddLink || useTestLink || link.Iterate && time.Since(link.UpdateAt) > time.Duration(1)*time.Hour {\n\t\t\terr := p.Add(link.Hostname, nil)\n\t\t\tif err == nil {\n\t\t\t\taddedProxy++\n\t\t\t}\n\t\t\tchkErr(\"findProxy p.Add\", err)\n\t\t}\n\t}\n\tif addedProxy == 0 {\n\t\tdebugmsg(\"not added tasks to pool\")\n\t\treturn\n\t}\n\tdebugmsg(\"end add to pool, added\", p.GetAddedTasks(), \"links\")\n\tdebugmsg(\"start get from chan\")\n\tfor result := range p.ResultChan {\n\t\tif result.Error != nil {\n\t\t\terrmsg(\"result\", result.Error)\n\t\t\tcontinue\n\t\t}\n\t\tml.update(result.Hostname)\n\t\tlinks := ml.getNewLinksFromTask(result)\n\t\tnum := mp.numOfNewProxyInTask(result)\n\t\tif num > 0 {\n\t\t\tdebugmsg(\"find\", num, \"proxy in\", result.Hostname)\n\t\t\tif link, ok := ml.get(result.Hostname); ok {\n\t\t\t\tlink.Num = link.Num + num\n\t\t\t\tml.set(link)\n\t\t\t}\n\t\t}\n\t\tif !useNoAddLinks {\n\t\t\tfor _, l := range links {\n\t\t\t\tchkErr(\"findProxy add to pool\", p.Add(l.Hostname, nil))\n\t\t\t\tdebugmsg(\"add to pool\", l.Hostname)\n\t\t\t}\n\t\t} else {\n\t\t\tdebugmsg(\"find\", len(links), \"links in\", result.Hostname)\n\t\t}\n\t\taddedProxy = addedProxy + num\n\t}\n\tif !useTestLink {\n\t\tdebugmsg(\"save proxy\")\n\t\tml.saveAll()\n\t}\n\tdebugmsg(addedProxy, \"new proxy found\")\n\tdebugmsg(\"end findProxy\")\n}\n\nfunc checkProxy() {\n\tdebugmsg(\"start checkProxy\")\n\tvar (\n\t\tchecked int64\n\t\ttotalProxy int64\n\t\tanonProxy int64\n\t\terr error\n\t)\n\tlist := getProxyListFromDB()\n\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tlistLen := len(list)\n\tdebugmsg(\"load proxies\", listLen)\n\nbreakCheckProxyLoop:\n\tfor j := 0; j < listLen; {\n\t\tmp := newMapProxy()\n\t\tvar r = 10000\n\t\tif j+10000 > listLen {\n\t\t\tr = listLen % 10000\n\t\t}\n\t\tfor i := 0; i < r; i++ {\n\t\t\tmp.set(list[j])\n\t\t\tj++\n\t\t}\n\t\tp := pool.New(numWorkers)\n\t\tp.SetTimeout(timeout)\n\t\ttargetURL := getTarget()\n\t\tmyIP, err = getExternalIP()\n\t\tif err != nil {\n\t\t\terrmsg(\"checkProxy getExternalIP\", err)\n\t\t\treturn\n\t\t}\n\t\tdebugmsg(\"start add to pool\")\n\t\tfor _, proxy := range mp.values {\n\t\t\tproxyURL, err := url.Parse(proxy.Hostname)\n\t\t\tchkErr(\"parse url\", err)\n\t\t\tchkErr(\"add to pool\", p.Add(targetURL, proxyURL))\n\t\t}\n\t\tdebugmsg(\"end add to pool\")\n\t\tp.EndWaitingTasks()\n\t\tif p.GetAddedTasks() == 0 {\n\t\t\tdebugmsg(\"no task added to pool\")\n\t\t\treturn\n\t\t}\n\tcheckProxyLoop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase task, ok := <-p.ResultChan:\n\t\t\t\tif !ok {\n\t\t\t\t\tdebugmsg(\"break loop by close chan ResultChan\")\n\t\t\t\t\tbreak checkProxyLoop\n\t\t\t\t}\n\t\t\t\tchecked++\n\t\t\t\tproxy, isOk := mp.taskToProxy(task)\n\t\t\t\tif !isOk {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tmp.set(proxy)\n\t\t\t\tif !(useFUP || useTestScheme) {\n\t\t\t\t\tsaveProxy(proxy)\n\t\t\t\t}\n\t\t\t\tif proxy.IsWork {\n\t\t\t\t\tif useFUP || useTestScheme {\n\t\t\t\t\t\tsaveProxy(proxy)\n\t\t\t\t\t}\n\t\t\t\t\ttotalProxy++\n\t\t\t\t\tlog.Printf(\"%d\/%d\/%d %-15v %-5v %-6v anon=%v\\n\",\n\t\t\t\t\t\tchecked,\n\t\t\t\t\t\tlistLen,\n\t\t\t\t\t\ttotalProxy,\n\t\t\t\t\t\ttask.Proxy.Hostname(),\n\t\t\t\t\t\ttask.Proxy.Port(),\n\t\t\t\t\t\ttask.Proxy.Scheme,\n\t\t\t\t\t\tproxy.IsAnon,\n\t\t\t\t\t)\n\t\t\t\t\tif proxy.IsAnon {\n\t\t\t\t\t\tanonProxy++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase <-c:\n\t\t\t\tdebugmsg(\"break loop by pressing ctrl+c\")\n\t\t\t\tbreak breakCheckProxyLoop\n\t\t\t}\n\t\t}\n\t}\n\tlog.Printf(\"%d is good\\n\", totalProxy)\n\tlog.Printf(\"%d is anon\\n\", anonProxy)\n\tdebugmsg(\"end checkProxy\")\n}\n<|endoftext|>"} {"text":"<commit_before>package candyjs\n\nimport \"C\"\nimport (\n\t\"errors\"\n\t\"reflect\"\n)\n\n\/\/ ErrUndefinedProperty is throw when a property for a given proxied object on\n\/\/ javascript cannot be found, basically a valid method or field cannot found.\nvar ErrUndefinedProperty = errors.New(\"undefined property\")\n\nvar p = &proxy{}\n\ntype proxy struct{}\n\nfunc (p *proxy) has(t interface{}, k string) bool {\n\t_, err := p.getProperty(t, k)\n\treturn err != ErrUndefinedProperty\n}\n\nfunc (p *proxy) get(t interface{}, k string, recv interface{}) (interface{}, error) {\n\tf, err := p.getProperty(t, k)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn f.Interface(), nil\n}\n\nfunc (p *proxy) set(t interface{}, k string, v, recv interface{}) (bool, error) {\n\tf, err := p.getProperty(t, k)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif !f.CanSet() {\n\t\treturn false, nil\n\t}\n\n\tvalue := reflect.Zero(f.Type())\n\tif v != nil {\n\t\tvalue = reflect.ValueOf(castNumberToGoType(f.Kind(), v))\n\t}\n\n\tf.Set(value)\n\treturn true, nil\n}\n\nfunc (p *proxy) getProperty(t interface{}, key string) (reflect.Value, error) {\n\tv := reflect.ValueOf(t)\n\tr, found := p.getValueFromKind(key, v)\n\tif !found {\n\t\treturn r, ErrUndefinedProperty\n\t}\n\n\treturn r, nil\n}\n\nfunc (p *proxy) getValueFromKind(key string, v reflect.Value) (reflect.Value, bool) {\n\tvar value reflect.Value\n\tvar found bool\n\tswitch v.Kind() {\n\tcase reflect.Ptr:\n\t\tvalue, found = p.getValueFromKindPtr(key, v)\n\tcase reflect.Struct:\n\t\tvalue, found = p.getValueFromKindStruct(key, v)\n\tcase reflect.Map:\n\t\tvalue, found = p.getValueFromKindMap(key, v)\n\t}\n\n\tif !found {\n\t\treturn p.getMethod(key, v)\n\t}\n\n\treturn value, found\n}\n\nfunc (p *proxy) getValueFromKindPtr(key string, v reflect.Value) (reflect.Value, bool) {\n\tr, found := p.getMethod(key, v)\n\tif !found {\n\t\treturn p.getValueFromKind(key, v.Elem())\n\t}\n\n\treturn r, found\n}\n\nfunc (p *proxy) getValueFromKindStruct(key string, v reflect.Value) (reflect.Value, bool) {\n\tvar r reflect.Value\n\tfor _, name := range nameToGo(key) {\n\t\tr = v.FieldByName(name)\n\t\tif r.IsValid() {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn r, r.IsValid()\n}\n\nfunc (p *proxy) getValueFromKindMap(key string, v reflect.Value) (reflect.Value, bool) {\n\tkeyValue := reflect.ValueOf(key)\n\tr := v.MapIndex(keyValue)\n\n\treturn r, r.IsValid()\n}\n\nfunc (p *proxy) getMethod(key string, v reflect.Value) (reflect.Value, bool) {\n\tvar r reflect.Value\n\tfor _, name := range nameToGo(key) {\n\t\tr = v.MethodByName(name)\n\t\tif r.IsValid() {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn r, r.IsValid()\n}\n\nfunc (p *proxy) enumerate(t interface{}) (interface{}, error) {\n\treturn p.getPropertyNames(t)\n}\n\nfunc (p *proxy) getPropertyNames(t interface{}) ([]string, error) {\n\tv := reflect.ValueOf(t)\n\n\tvar names []string\n\tvar err error\n\tswitch v.Kind() {\n\tcase reflect.Ptr:\n\t\tnames, err = p.getPropertyNames(v.Elem().Interface())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase reflect.Struct:\n\t\tcFields := v.NumField()\n\t\tfor i := 0; i < cFields; i++ {\n\t\t\tfieldName := v.Type().Field(i).Name\n\t\t\tif !isExported(fieldName) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tnames = append(names, nameToJavaScript(fieldName))\n\t\t}\n\t}\n\n\tmCount := v.NumMethod()\n\tfor i := 0; i < mCount; i++ {\n\t\tmethodName := v.Type().Method(i).Name\n\t\tif !isExported(methodName) {\n\t\t\tcontinue\n\t\t}\n\n\t\tnames = append(names, nameToJavaScript(methodName))\n\t}\n\n\treturn names, nil\n}\n\nfunc castNumberToGoType(k reflect.Kind, v interface{}) interface{} {\n\tswitch k {\n\tcase reflect.Int:\n\t\tv = int(v.(float64))\n\tcase reflect.Int8:\n\t\tv = int8(v.(float64))\n\tcase reflect.Int16:\n\t\tv = int16(v.(float64))\n\tcase reflect.Int32:\n\t\tv = int32(v.(float64))\n\tcase reflect.Int64:\n\t\tv = int64(v.(float64))\n\tcase reflect.Uint:\n\t\tv = uint(v.(float64))\n\tcase reflect.Uint8:\n\t\tv = uint8(v.(float64))\n\tcase reflect.Uint16:\n\t\tv = uint16(v.(float64))\n\tcase reflect.Uint32:\n\t\tv = uint32(v.(float64))\n\tcase reflect.Uint64:\n\t\tv = uint64(v.(float64))\n\tcase reflect.Float32:\n\t\tv = float32(v.(float64))\n\t}\n\n\treturn v\n}\n<commit_msg>Added JSON marshalling to property setters.<commit_after>package candyjs\n\nimport \"C\"\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"reflect\"\n)\n\n\/\/ ErrUndefinedProperty is throw when a property for a given proxied object on\n\/\/ javascript cannot be found, basically a valid method or field cannot found.\nvar ErrUndefinedProperty = errors.New(\"undefined property\")\n\nvar p = &proxy{}\n\ntype proxy struct{}\n\nfunc (p *proxy) has(t interface{}, k string) bool {\n\t_, err := p.getProperty(t, k)\n\treturn err != ErrUndefinedProperty\n}\n\nfunc (p *proxy) get(t interface{}, k string, recv interface{}) (interface{}, error) {\n\tf, err := p.getProperty(t, k)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn f.Interface(), nil\n}\n\nfunc (p *proxy) set(t interface{}, k string, v, recv interface{}) (bool, error) {\n\tf, err := p.getProperty(t, k)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif !f.CanSet() {\n\t\treturn false, nil\n\t}\n\n\tvalue := reflect.Zero(f.Type())\n\tif v != nil {\n\t\tv, err = convert(f, v)\n\t\tif err != nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tvalue = reflect.ValueOf(v)\n\t}\n\n\tf.Set(value)\n\treturn true, nil\n}\n\nfunc (p *proxy) getProperty(t interface{}, key string) (reflect.Value, error) {\n\tv := reflect.ValueOf(t)\n\tr, found := p.getValueFromKind(key, v)\n\tif !found {\n\t\treturn r, ErrUndefinedProperty\n\t}\n\n\treturn r, nil\n}\n\nfunc (p *proxy) getValueFromKind(key string, v reflect.Value) (reflect.Value, bool) {\n\tvar value reflect.Value\n\tvar found bool\n\tswitch v.Kind() {\n\tcase reflect.Ptr:\n\t\tvalue, found = p.getValueFromKindPtr(key, v)\n\tcase reflect.Struct:\n\t\tvalue, found = p.getValueFromKindStruct(key, v)\n\tcase reflect.Map:\n\t\tvalue, found = p.getValueFromKindMap(key, v)\n\t}\n\n\tif !found {\n\t\treturn p.getMethod(key, v)\n\t}\n\n\treturn value, found\n}\n\nfunc (p *proxy) getValueFromKindPtr(key string, v reflect.Value) (reflect.Value, bool) {\n\tr, found := p.getMethod(key, v)\n\tif !found {\n\t\treturn p.getValueFromKind(key, v.Elem())\n\t}\n\n\treturn r, found\n}\n\nfunc (p *proxy) getValueFromKindStruct(key string, v reflect.Value) (reflect.Value, bool) {\n\tvar r reflect.Value\n\tfor _, name := range nameToGo(key) {\n\t\tr = v.FieldByName(name)\n\t\tif r.IsValid() {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn r, r.IsValid()\n}\n\nfunc (p *proxy) getValueFromKindMap(key string, v reflect.Value) (reflect.Value, bool) {\n\tkeyValue := reflect.ValueOf(key)\n\tr := v.MapIndex(keyValue)\n\n\treturn r, r.IsValid()\n}\n\nfunc (p *proxy) getMethod(key string, v reflect.Value) (reflect.Value, bool) {\n\tvar r reflect.Value\n\tfor _, name := range nameToGo(key) {\n\t\tr = v.MethodByName(name)\n\t\tif r.IsValid() {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn r, r.IsValid()\n}\n\nfunc (p *proxy) enumerate(t interface{}) (interface{}, error) {\n\treturn p.getPropertyNames(t)\n}\n\nfunc (p *proxy) getPropertyNames(t interface{}) ([]string, error) {\n\tv := reflect.ValueOf(t)\n\n\tvar names []string\n\tvar err error\n\tswitch v.Kind() {\n\tcase reflect.Ptr:\n\t\tnames, err = p.getPropertyNames(v.Elem().Interface())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase reflect.Struct:\n\t\tcFields := v.NumField()\n\t\tfor i := 0; i < cFields; i++ {\n\t\t\tfieldName := v.Type().Field(i).Name\n\t\t\tif !isExported(fieldName) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tnames = append(names, nameToJavaScript(fieldName))\n\t\t}\n\t}\n\n\tmCount := v.NumMethod()\n\tfor i := 0; i < mCount; i++ {\n\t\tmethodName := v.Type().Method(i).Name\n\t\tif !isExported(methodName) {\n\t\t\tcontinue\n\t\t}\n\n\t\tnames = append(names, nameToJavaScript(methodName))\n\t}\n\n\treturn names, nil\n}\n\nfunc convert(t reflect.Value, value interface{}) (interface{}, error) {\n\n\ts := reflect.ValueOf(value)\n\tif t.Type() == s.Type() {\n\t\treturn value, nil \/\/ no conversion required\n\t}\n\n\tvalue, ok := castNumberToGoType(t.Kind(), value)\n\tif ok {\n\t\treturn value, nil\n\t}\n\n\treturn convertUsingJSON(t, value)\n}\n\nfunc convertUsingJSON(t reflect.Value, value interface{}) (interface{}, error) {\n\n\ttv := reflect.New(t.Type()).Interface()\n\tjs, err := json.Marshal(value)\n\tif err == nil {\n\t\terr = json.Unmarshal([]byte(js), tv)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn reflect.ValueOf(tv).Elem().Interface(), nil\n}\n\nfunc castNumberToGoType(k reflect.Kind, v interface{}) (interface{}, bool) {\n\tswitch k {\n\tcase reflect.Int:\n\t\tv = int(v.(float64))\n\tcase reflect.Int8:\n\t\tv = int8(v.(float64))\n\tcase reflect.Int16:\n\t\tv = int16(v.(float64))\n\tcase reflect.Int32:\n\t\tv = int32(v.(float64))\n\tcase reflect.Int64:\n\t\tv = int64(v.(float64))\n\tcase reflect.Uint:\n\t\tv = uint(v.(float64))\n\tcase reflect.Uint8:\n\t\tv = uint8(v.(float64))\n\tcase reflect.Uint16:\n\t\tv = uint16(v.(float64))\n\tcase reflect.Uint32:\n\t\tv = uint32(v.(float64))\n\tcase reflect.Uint64:\n\t\tv = uint64(v.(float64))\n\tcase reflect.Float32:\n\t\tv = float32(v.(float64))\n\tdefault:\n\t\treturn v, false\n\t}\n\n\treturn v, true\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>[perf] Chunk writing ParamSets.<commit_after><|endoftext|>"} {"text":"<commit_before>package actor\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\tErrTimeout = errors.New(\"timeout\")\n)\n\nfunc NewFuture(timeout time.Duration) *Future {\n\tfut := &Future{cond: sync.NewCond(&sync.Mutex{})}\n\n\tref := &FutureActorRef{f: fut}\n\tid := ProcessRegistry.getAutoId()\n\n\tpid, ok := ProcessRegistry.add(ref, id)\n\tif !ok {\n\t\tlog.Printf(\"[ACTOR] Failed to register future actorref '%v'\", id)\n\t\tlog.Println(id)\n\t}\n\n\tfut.pid = pid\n\tfut.t = time.AfterFunc(timeout, func() {\n\t\tfut.err = ErrTimeout\n\t\tref.Stop(pid)\n\t})\n\n\treturn fut\n}\n\ntype Future struct {\n\tdone bool\n\tpid *PID\n\tcond *sync.Cond\n\tresult interface{}\n\terr error\n\tt *time.Timer\n}\n\n\/\/ PID to the backing actor for the Future result\nfunc (f *Future) PID() *PID {\n\treturn f.pid\n}\n\n\/\/ PipeTo starts a go routine and waits for the `Future.Result()`, then sends the result to the given `PID`\nfunc (f *Future) PipeTo(pid *PID) {\n\tgo func() {\n\t\tres, err := f.Result()\n\t\tif err != nil {\n\t\t\tpid.Tell(err)\n\t\t} else {\n\t\t\tpid.Tell(res)\n\t\t}\n\t}()\n}\n\nfunc (f *Future) wait() {\n\tf.cond.L.Lock()\n\tif !f.done {\n\t\tf.cond.Wait()\n\t}\n\tf.cond.L.Unlock()\n}\n\nfunc (f *Future) Result() (interface{}, error) {\n\tf.wait()\n\treturn f.result, f.err\n}\n\nfunc (f *Future) Wait() error {\n\tf.wait()\n\treturn f.err\n}\n\n\/\/ FutureActorRef is a struct carrying a response PID and a channel where the response is placed\ntype FutureActorRef struct {\n\tf *Future\n}\n\nfunc (ref *FutureActorRef) SendUserMessage(pid *PID, message interface{}, sender *PID) {\n\tref.f.result = message\n\tref.Stop(pid)\n}\n\nfunc (ref *FutureActorRef) SendSystemMessage(pid *PID, message SystemMessage) {\n\tref.f.result = message\n\tref.Stop(pid)\n}\n\nfunc (ref *FutureActorRef) Stop(pid *PID) {\n\tref.f.cond.L.Lock()\n\tif ref.f.done {\n\t\tref.f.cond.L.Unlock()\n\t\treturn\n\t}\n\n\tref.f.done = true\n\tref.f.t.Stop()\n\tProcessRegistry.remove(pid)\n\n\tref.f.cond.L.Unlock()\n\tref.f.cond.Signal()\n}\n\nfunc (ref *FutureActorRef) Watch(pid *PID) {}\nfunc (ref *FutureActorRef) UnWatch(pid *PID) {}\n<commit_msg>update comment<commit_after>package actor\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\tErrTimeout = errors.New(\"timeout\")\n)\n\nfunc NewFuture(timeout time.Duration) *Future {\n\tfut := &Future{cond: sync.NewCond(&sync.Mutex{})}\n\n\tref := &FutureActorRef{f: fut}\n\tid := ProcessRegistry.getAutoId()\n\n\tpid, ok := ProcessRegistry.add(ref, id)\n\tif !ok {\n\t\tlog.Printf(\"[ACTOR] Failed to register future actorref '%v'\", id)\n\t\tlog.Println(id)\n\t}\n\n\tfut.pid = pid\n\tfut.t = time.AfterFunc(timeout, func() {\n\t\tfut.err = ErrTimeout\n\t\tref.Stop(pid)\n\t})\n\n\treturn fut\n}\n\ntype Future struct {\n\tpid *PID\n\tcond *sync.Cond\n\t\/\/ protected by cond\n\tdone bool\n\tresult interface{}\n\terr error\n\tt *time.Timer\n}\n\n\/\/ PID to the backing actor for the Future result\nfunc (f *Future) PID() *PID {\n\treturn f.pid\n}\n\n\/\/ PipeTo starts a go routine and waits for the `Future.Result()`, then sends the result to the given `PID`\nfunc (f *Future) PipeTo(pid *PID) {\n\tgo func() {\n\t\tres, err := f.Result()\n\t\tif err != nil {\n\t\t\tpid.Tell(err)\n\t\t} else {\n\t\t\tpid.Tell(res)\n\t\t}\n\t}()\n}\n\nfunc (f *Future) wait() {\n\tf.cond.L.Lock()\n\tif !f.done {\n\t\tf.cond.Wait()\n\t}\n\tf.cond.L.Unlock()\n}\n\nfunc (f *Future) Result() (interface{}, error) {\n\tf.wait()\n\treturn f.result, f.err\n}\n\nfunc (f *Future) Wait() error {\n\tf.wait()\n\treturn f.err\n}\n\n\/\/ FutureActorRef is a struct carrying a response PID and a channel where the response is placed\ntype FutureActorRef struct {\n\tf *Future\n}\n\nfunc (ref *FutureActorRef) SendUserMessage(pid *PID, message interface{}, sender *PID) {\n\tref.f.result = message\n\tref.Stop(pid)\n}\n\nfunc (ref *FutureActorRef) SendSystemMessage(pid *PID, message SystemMessage) {\n\tref.f.result = message\n\tref.Stop(pid)\n}\n\nfunc (ref *FutureActorRef) Stop(pid *PID) {\n\tref.f.cond.L.Lock()\n\tif ref.f.done {\n\t\tref.f.cond.L.Unlock()\n\t\treturn\n\t}\n\n\tref.f.done = true\n\tref.f.t.Stop()\n\tProcessRegistry.remove(pid)\n\n\tref.f.cond.L.Unlock()\n\tref.f.cond.Signal()\n}\n\nfunc (ref *FutureActorRef) Watch(pid *PID) {}\nfunc (ref *FutureActorRef) UnWatch(pid *PID) {}\n<|endoftext|>"} {"text":"<commit_before>package bxmpp\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/42wim\/matterbridge\/bridge\"\n\t\"github.com\/42wim\/matterbridge\/bridge\/config\"\n\t\"github.com\/42wim\/matterbridge\/bridge\/helper\"\n\t\"github.com\/jpillora\/backoff\"\n\t\"github.com\/matterbridge\/go-xmpp\"\n\t\"github.com\/rs\/xid\"\n)\n\ntype Bxmpp struct {\n\t*bridge.Config\n\n\tstartTime time.Time\n\txc *xmpp.Client\n\txmppMap map[string]string\n\tconnected bool\n\tsync.RWMutex\n\n\tavatarMap map[string]string\n}\n\nfunc New(cfg *bridge.Config) bridge.Bridger {\n\treturn &Bxmpp{\n\t\tConfig: cfg,\n\t\txmppMap: make(map[string]string),\n\t\tavatarMap: make(map[string]string),\n\t}\n}\n\nfunc (b *Bxmpp) Connect() error {\n\tb.Log.Infof(\"Connecting %s\", b.GetString(\"Server\"))\n\tif err := b.createXMPP(); err != nil {\n\t\tb.Log.Debugf(\"%#v\", err)\n\t\treturn err\n\t}\n\n\tb.Log.Info(\"Connection succeeded\")\n\tgo b.manageConnection()\n\treturn nil\n}\n\nfunc (b *Bxmpp) Disconnect() error {\n\treturn nil\n}\n\nfunc (b *Bxmpp) JoinChannel(channel config.ChannelInfo) error {\n\tif channel.Options.Key != \"\" {\n\t\tb.Log.Debugf(\"using key %s for channel %s\", channel.Options.Key, channel.Name)\n\t\tb.xc.JoinProtectedMUC(channel.Name+\"@\"+b.GetString(\"Muc\"), b.GetString(\"Nick\"), channel.Options.Key, xmpp.NoHistory, 0, nil)\n\t} else {\n\t\tb.xc.JoinMUCNoHistory(channel.Name+\"@\"+b.GetString(\"Muc\"), b.GetString(\"Nick\"))\n\t}\n\treturn nil\n}\n\nfunc (b *Bxmpp) Send(msg config.Message) (string, error) {\n\t\/\/ should be fixed by using a cache instead of dropping\n\tif !b.Connected() {\n\t\treturn \"\", fmt.Errorf(\"bridge %s not connected, dropping message %#v to bridge\", b.Account, msg)\n\t}\n\t\/\/ ignore delete messages\n\tif msg.Event == config.EventMsgDelete {\n\t\treturn \"\", nil\n\t}\n\tb.Log.Debugf(\"=> Receiving %#v\", msg)\n\n\tif msg.Event == config.EventAvatarDownload {\n\t\treturn b.cacheAvatar(&msg), nil\n\t}\n\n\t\/\/ Upload a file (in XMPP case send the upload URL because XMPP has no native upload support).\n\tif msg.Extra != nil {\n\t\tfor _, rmsg := range helper.HandleExtra(&msg, b.General) {\n\t\t\tb.Log.Debugf(\"=> Sending attachement message %#v\", rmsg)\n\t\t\tif _, err := b.xc.Send(xmpp.Chat{\n\t\t\t\tType: \"groupchat\",\n\t\t\t\tRemote: rmsg.Channel + \"@\" + b.GetString(\"Muc\"),\n\t\t\t\tText: rmsg.Username + rmsg.Text,\n\t\t\t}); err != nil {\n\t\t\t\tb.Log.WithError(err).Error(\"Unable to send message with share URL.\")\n\t\t\t}\n\t\t}\n\t\tif len(msg.Extra[\"file\"]) > 0 {\n\t\t\treturn \"\", b.handleUploadFile(&msg)\n\t\t}\n\t}\n\n\tvar msgReplaceID string\n\tmsgID := xid.New().String()\n\tif msg.ID != \"\" {\n\t\tmsgID = msg.ID\n\t\tmsgReplaceID = msg.ID\n\t}\n\t\/\/ Post normal message.\n\tb.Log.Debugf(\"=> Sending message %#v\", msg)\n\tif _, err := b.xc.Send(xmpp.Chat{\n\t\tType: \"groupchat\",\n\t\tRemote: msg.Channel + \"@\" + b.GetString(\"Muc\"),\n\t\tText: msg.Username + msg.Text,\n\t\tID: msgID,\n\t\tReplaceID: msgReplaceID,\n\t}); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn msgID, nil\n}\n\nfunc (b *Bxmpp) createXMPP() error {\n\tif !strings.Contains(b.GetString(\"Jid\"), \"@\") {\n\t\treturn fmt.Errorf(\"the Jid %s doesn't contain an @\", b.GetString(\"Jid\"))\n\t}\n\ttc := &tls.Config{\n\t\tServerName: strings.Split(b.GetString(\"Jid\"), \"@\")[1],\n\t\tInsecureSkipVerify: b.GetBool(\"SkipTLSVerify\"), \/\/ nolint: gosec\n\t}\n\n\txmpp.DebugWriter = b.Log.Writer()\n\n\toptions := xmpp.Options{\n\t\tHost: b.GetString(\"Server\"),\n\t\tUser: b.GetString(\"Jid\"),\n\t\tPassword: b.GetString(\"Password\"),\n\t\tNoTLS: true,\n\t\tStartTLS: true,\n\t\tTLSConfig: tc,\n\t\tDebug: b.GetBool(\"debug\"),\n\t\tSession: true,\n\t\tStatus: \"\",\n\t\tStatusMessage: \"\",\n\t\tResource: \"\",\n\t\tInsecureAllowUnencryptedAuth: false,\n\t}\n\tvar err error\n\tb.xc, err = options.NewClient()\n\treturn err\n}\n\nfunc (b *Bxmpp) manageConnection() {\n\tb.setConnected(true)\n\tinitial := true\n\tbf := &backoff.Backoff{\n\t\tMin: time.Second,\n\t\tMax: 5 * time.Minute,\n\t\tJitter: true,\n\t}\n\n\t\/\/ Main connection loop. Each iteration corresponds to a successful\n\t\/\/ connection attempt and the subsequent handling of the connection.\n\tfor {\n\t\tif initial {\n\t\t\tinitial = false\n\t\t} else {\n\t\t\tb.Remote <- config.Message{\n\t\t\t\tUsername: \"system\",\n\t\t\t\tText: \"rejoin\",\n\t\t\t\tChannel: \"\",\n\t\t\t\tAccount: b.Account,\n\t\t\t\tEvent: config.EventRejoinChannels,\n\t\t\t}\n\t\t}\n\n\t\tif err := b.handleXMPP(); err != nil {\n\t\t\tb.Log.WithError(err).Error(\"Disconnected.\")\n\t\t\tb.setConnected(false)\n\t\t}\n\n\t\t\/\/ Reconnection loop using an exponential back-off strategy. We\n\t\t\/\/ only break out of the loop if we have successfully reconnected.\n\t\tfor {\n\t\t\td := bf.Duration()\n\t\t\tb.Log.Infof(\"Reconnecting in %s.\", d)\n\t\t\ttime.Sleep(d)\n\n\t\t\tb.Log.Infof(\"Reconnecting now.\")\n\t\t\tif err := b.createXMPP(); err == nil {\n\t\t\t\tb.setConnected(true)\n\t\t\t\tbf.Reset()\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tb.Log.Warn(\"Failed to reconnect.\")\n\t\t}\n\t}\n}\n\nfunc (b *Bxmpp) xmppKeepAlive() chan bool {\n\tdone := make(chan bool)\n\tgo func() {\n\t\tticker := time.NewTicker(90 * time.Second)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tb.Log.Debugf(\"PING\")\n\t\t\t\tif err := b.xc.PingC2S(\"\", \"\"); err != nil {\n\t\t\t\t\tb.Log.Debugf(\"PING failed %#v\", err)\n\t\t\t\t}\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn done\n}\n\nfunc (b *Bxmpp) handleXMPP() error {\n\tb.startTime = time.Now()\n\n\tdone := b.xmppKeepAlive()\n\tdefer close(done)\n\n\tfor {\n\t\tm, err := b.xc.Recv()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch v := m.(type) {\n\t\tcase xmpp.Chat:\n\t\t\tif v.Type == \"groupchat\" {\n\t\t\t\tb.Log.Debugf(\"== Receiving %#v\", v)\n\n\t\t\t\t\/\/ Skip invalid messages.\n\t\t\t\tif b.skipMessage(v) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tvar event string\n\t\t\t\tif strings.Contains(v.Text, \"has set the subject to:\") {\n\t\t\t\t\tevent = config.EventTopicChange\n\t\t\t\t}\n\n\t\t\t\tavatar := getAvatar(b.avatarMap, v.Remote, b.General)\n\t\t\t\tif avatar == \"\" {\n\t\t\t\t\tb.Log.Debugf(\"Requesting avatar data\")\n\t\t\t\t\tb.xc.AvatarRequestData(v.Remote)\n\t\t\t\t}\n\n\t\t\t\tmsgID := v.ID\n\t\t\t\tif v.ReplaceID != \"\" {\n\t\t\t\t\tmsgID = v.ReplaceID\n\t\t\t\t}\n\t\t\t\trmsg := config.Message{\n\t\t\t\t\tUsername: b.parseNick(v.Remote),\n\t\t\t\t\tText: v.Text,\n\t\t\t\t\tChannel: b.parseChannel(v.Remote),\n\t\t\t\t\tAccount: b.Account,\n\t\t\t\t\tAvatar: avatar,\n\t\t\t\t\tUserID: v.Remote,\n\t\t\t\t\tID: msgID,\n\t\t\t\t\tEvent: event,\n\t\t\t\t}\n\n\t\t\t\t\/\/ Check if we have an action event.\n\t\t\t\tvar ok bool\n\t\t\t\trmsg.Text, ok = b.replaceAction(rmsg.Text)\n\t\t\t\tif ok {\n\t\t\t\t\trmsg.Event = config.EventUserAction\n\t\t\t\t}\n\n\t\t\t\tb.Log.Debugf(\"<= Sending message from %s on %s to gateway\", rmsg.Username, b.Account)\n\t\t\t\tb.Log.Debugf(\"<= Message is %#v\", rmsg)\n\t\t\t\tb.Remote <- rmsg\n\t\t\t}\n\t\tcase xmpp.AvatarData:\n\t\t\tb.handleDownloadAvatar(v)\n\t\tcase xmpp.Presence:\n\t\t\t\/\/ Do nothing.\n\t\t}\n\t}\n}\n\nfunc (b *Bxmpp) replaceAction(text string) (string, bool) {\n\tif strings.HasPrefix(text, \"\/me \") {\n\t\treturn strings.Replace(text, \"\/me \", \"\", -1), true\n\t}\n\treturn text, false\n}\n\n\/\/ handleUploadFile handles native upload of files\nfunc (b *Bxmpp) handleUploadFile(msg *config.Message) error {\n\tvar urlDesc string\n\n\tfor _, file := range msg.Extra[\"file\"] {\n\t\tfileInfo := file.(config.FileInfo)\n\t\tif fileInfo.Comment != \"\" {\n\t\t\tmsg.Text += fileInfo.Comment + \": \"\n\t\t}\n\t\tif fileInfo.URL != \"\" {\n\t\t\tmsg.Text = fileInfo.URL\n\t\t\tif fileInfo.Comment != \"\" {\n\t\t\t\tmsg.Text = fileInfo.Comment + \": \" + fileInfo.URL\n\t\t\t\turlDesc = fileInfo.Comment\n\t\t\t}\n\t\t}\n\t\tif _, err := b.xc.Send(xmpp.Chat{\n\t\t\tType: \"groupchat\",\n\t\t\tRemote: msg.Channel + \"@\" + b.GetString(\"Muc\"),\n\t\t\tText: msg.Username + msg.Text,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif fileInfo.URL != \"\" {\n\t\t\tif _, err := b.xc.SendOOB(xmpp.Chat{\n\t\t\t\tType: \"groupchat\",\n\t\t\t\tRemote: msg.Channel + \"@\" + b.GetString(\"Muc\"),\n\t\t\t\tOoburl: fileInfo.URL,\n\t\t\t\tOobdesc: urlDesc,\n\t\t\t}); err != nil {\n\t\t\t\tb.Log.WithError(err).Warn(\"Failed to send share URL.\")\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (b *Bxmpp) parseNick(remote string) string {\n\ts := strings.Split(remote, \"@\")\n\tif len(s) > 0 {\n\t\ts = strings.Split(s[1], \"\/\")\n\t\tif len(s) == 2 {\n\t\t\treturn s[1] \/\/ nick\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc (b *Bxmpp) parseChannel(remote string) string {\n\ts := strings.Split(remote, \"@\")\n\tif len(s) >= 2 {\n\t\treturn s[0] \/\/ channel\n\t}\n\treturn \"\"\n}\n\n\/\/ skipMessage skips messages that need to be skipped\nfunc (b *Bxmpp) skipMessage(message xmpp.Chat) bool {\n\t\/\/ skip messages from ourselves\n\tif b.parseNick(message.Remote) == b.GetString(\"Nick\") {\n\t\treturn true\n\t}\n\n\t\/\/ skip empty messages\n\tif message.Text == \"\" {\n\t\treturn true\n\t}\n\n\t\/\/ skip subject messages\n\tif strings.Contains(message.Text, \"<\/subject>\") {\n\t\treturn true\n\t}\n\n\t\/\/ do not show subjects on connect #732\n\tif strings.Contains(message.Text, \"has set the subject to:\") && time.Since(b.startTime) < time.Second*5 {\n\t\treturn true\n\t}\n\n\t\/\/ skip delayed messages\n\treturn !message.Stamp.IsZero() && time.Since(message.Stamp).Minutes() > 5\n}\n\nfunc (b *Bxmpp) setConnected(state bool) {\n\tb.Lock()\n\tb.connected = state\n\tdefer b.Unlock()\n}\n\nfunc (b *Bxmpp) Connected() bool {\n\tb.RLock()\n\tdefer b.RUnlock()\n\treturn b.connected\n}\n<commit_msg>Implement xep-0245 (xmpp). Closes #1137 (#1144)<commit_after>package bxmpp\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/42wim\/matterbridge\/bridge\"\n\t\"github.com\/42wim\/matterbridge\/bridge\/config\"\n\t\"github.com\/42wim\/matterbridge\/bridge\/helper\"\n\t\"github.com\/jpillora\/backoff\"\n\t\"github.com\/matterbridge\/go-xmpp\"\n\t\"github.com\/rs\/xid\"\n)\n\ntype Bxmpp struct {\n\t*bridge.Config\n\n\tstartTime time.Time\n\txc *xmpp.Client\n\txmppMap map[string]string\n\tconnected bool\n\tsync.RWMutex\n\n\tavatarMap map[string]string\n}\n\nfunc New(cfg *bridge.Config) bridge.Bridger {\n\treturn &Bxmpp{\n\t\tConfig: cfg,\n\t\txmppMap: make(map[string]string),\n\t\tavatarMap: make(map[string]string),\n\t}\n}\n\nfunc (b *Bxmpp) Connect() error {\n\tb.Log.Infof(\"Connecting %s\", b.GetString(\"Server\"))\n\tif err := b.createXMPP(); err != nil {\n\t\tb.Log.Debugf(\"%#v\", err)\n\t\treturn err\n\t}\n\n\tb.Log.Info(\"Connection succeeded\")\n\tgo b.manageConnection()\n\treturn nil\n}\n\nfunc (b *Bxmpp) Disconnect() error {\n\treturn nil\n}\n\nfunc (b *Bxmpp) JoinChannel(channel config.ChannelInfo) error {\n\tif channel.Options.Key != \"\" {\n\t\tb.Log.Debugf(\"using key %s for channel %s\", channel.Options.Key, channel.Name)\n\t\tb.xc.JoinProtectedMUC(channel.Name+\"@\"+b.GetString(\"Muc\"), b.GetString(\"Nick\"), channel.Options.Key, xmpp.NoHistory, 0, nil)\n\t} else {\n\t\tb.xc.JoinMUCNoHistory(channel.Name+\"@\"+b.GetString(\"Muc\"), b.GetString(\"Nick\"))\n\t}\n\treturn nil\n}\n\nfunc (b *Bxmpp) Send(msg config.Message) (string, error) {\n\t\/\/ should be fixed by using a cache instead of dropping\n\tif !b.Connected() {\n\t\treturn \"\", fmt.Errorf(\"bridge %s not connected, dropping message %#v to bridge\", b.Account, msg)\n\t}\n\t\/\/ ignore delete messages\n\tif msg.Event == config.EventMsgDelete {\n\t\treturn \"\", nil\n\t}\n\n\tb.Log.Debugf(\"=> Receiving %#v\", msg)\n\n\tif msg.Event == config.EventAvatarDownload {\n\t\treturn b.cacheAvatar(&msg), nil\n\t}\n\n\t\/\/ Make a action \/me of the message, prepend the username with it.\n\t\/\/ https:\/\/xmpp.org\/extensions\/xep-0245.html\n\tif msg.Event == config.EventUserAction {\n\t\tmsg.Username = \"\/me \" + msg.Username\n\t}\n\n\t\/\/ Upload a file (in XMPP case send the upload URL because XMPP has no native upload support).\n\tif msg.Extra != nil {\n\t\tfor _, rmsg := range helper.HandleExtra(&msg, b.General) {\n\t\t\tb.Log.Debugf(\"=> Sending attachement message %#v\", rmsg)\n\t\t\tif _, err := b.xc.Send(xmpp.Chat{\n\t\t\t\tType: \"groupchat\",\n\t\t\t\tRemote: rmsg.Channel + \"@\" + b.GetString(\"Muc\"),\n\t\t\t\tText: rmsg.Username + rmsg.Text,\n\t\t\t}); err != nil {\n\t\t\t\tb.Log.WithError(err).Error(\"Unable to send message with share URL.\")\n\t\t\t}\n\t\t}\n\t\tif len(msg.Extra[\"file\"]) > 0 {\n\t\t\treturn \"\", b.handleUploadFile(&msg)\n\t\t}\n\t}\n\n\tvar msgReplaceID string\n\tmsgID := xid.New().String()\n\tif msg.ID != \"\" {\n\t\tmsgID = msg.ID\n\t\tmsgReplaceID = msg.ID\n\t}\n\t\/\/ Post normal message.\n\tb.Log.Debugf(\"=> Sending message %#v\", msg)\n\tif _, err := b.xc.Send(xmpp.Chat{\n\t\tType: \"groupchat\",\n\t\tRemote: msg.Channel + \"@\" + b.GetString(\"Muc\"),\n\t\tText: msg.Username + msg.Text,\n\t\tID: msgID,\n\t\tReplaceID: msgReplaceID,\n\t}); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn msgID, nil\n}\n\nfunc (b *Bxmpp) createXMPP() error {\n\tif !strings.Contains(b.GetString(\"Jid\"), \"@\") {\n\t\treturn fmt.Errorf(\"the Jid %s doesn't contain an @\", b.GetString(\"Jid\"))\n\t}\n\ttc := &tls.Config{\n\t\tServerName: strings.Split(b.GetString(\"Jid\"), \"@\")[1],\n\t\tInsecureSkipVerify: b.GetBool(\"SkipTLSVerify\"), \/\/ nolint: gosec\n\t}\n\n\txmpp.DebugWriter = b.Log.Writer()\n\n\toptions := xmpp.Options{\n\t\tHost: b.GetString(\"Server\"),\n\t\tUser: b.GetString(\"Jid\"),\n\t\tPassword: b.GetString(\"Password\"),\n\t\tNoTLS: true,\n\t\tStartTLS: true,\n\t\tTLSConfig: tc,\n\t\tDebug: b.GetBool(\"debug\"),\n\t\tSession: true,\n\t\tStatus: \"\",\n\t\tStatusMessage: \"\",\n\t\tResource: \"\",\n\t\tInsecureAllowUnencryptedAuth: false,\n\t}\n\tvar err error\n\tb.xc, err = options.NewClient()\n\treturn err\n}\n\nfunc (b *Bxmpp) manageConnection() {\n\tb.setConnected(true)\n\tinitial := true\n\tbf := &backoff.Backoff{\n\t\tMin: time.Second,\n\t\tMax: 5 * time.Minute,\n\t\tJitter: true,\n\t}\n\n\t\/\/ Main connection loop. Each iteration corresponds to a successful\n\t\/\/ connection attempt and the subsequent handling of the connection.\n\tfor {\n\t\tif initial {\n\t\t\tinitial = false\n\t\t} else {\n\t\t\tb.Remote <- config.Message{\n\t\t\t\tUsername: \"system\",\n\t\t\t\tText: \"rejoin\",\n\t\t\t\tChannel: \"\",\n\t\t\t\tAccount: b.Account,\n\t\t\t\tEvent: config.EventRejoinChannels,\n\t\t\t}\n\t\t}\n\n\t\tif err := b.handleXMPP(); err != nil {\n\t\t\tb.Log.WithError(err).Error(\"Disconnected.\")\n\t\t\tb.setConnected(false)\n\t\t}\n\n\t\t\/\/ Reconnection loop using an exponential back-off strategy. We\n\t\t\/\/ only break out of the loop if we have successfully reconnected.\n\t\tfor {\n\t\t\td := bf.Duration()\n\t\t\tb.Log.Infof(\"Reconnecting in %s.\", d)\n\t\t\ttime.Sleep(d)\n\n\t\t\tb.Log.Infof(\"Reconnecting now.\")\n\t\t\tif err := b.createXMPP(); err == nil {\n\t\t\t\tb.setConnected(true)\n\t\t\t\tbf.Reset()\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tb.Log.Warn(\"Failed to reconnect.\")\n\t\t}\n\t}\n}\n\nfunc (b *Bxmpp) xmppKeepAlive() chan bool {\n\tdone := make(chan bool)\n\tgo func() {\n\t\tticker := time.NewTicker(90 * time.Second)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tb.Log.Debugf(\"PING\")\n\t\t\t\tif err := b.xc.PingC2S(\"\", \"\"); err != nil {\n\t\t\t\t\tb.Log.Debugf(\"PING failed %#v\", err)\n\t\t\t\t}\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn done\n}\n\nfunc (b *Bxmpp) handleXMPP() error {\n\tb.startTime = time.Now()\n\n\tdone := b.xmppKeepAlive()\n\tdefer close(done)\n\n\tfor {\n\t\tm, err := b.xc.Recv()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch v := m.(type) {\n\t\tcase xmpp.Chat:\n\t\t\tif v.Type == \"groupchat\" {\n\t\t\t\tb.Log.Debugf(\"== Receiving %#v\", v)\n\n\t\t\t\t\/\/ Skip invalid messages.\n\t\t\t\tif b.skipMessage(v) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tvar event string\n\t\t\t\tif strings.Contains(v.Text, \"has set the subject to:\") {\n\t\t\t\t\tevent = config.EventTopicChange\n\t\t\t\t}\n\n\t\t\t\tavatar := getAvatar(b.avatarMap, v.Remote, b.General)\n\t\t\t\tif avatar == \"\" {\n\t\t\t\t\tb.Log.Debugf(\"Requesting avatar data\")\n\t\t\t\t\tb.xc.AvatarRequestData(v.Remote)\n\t\t\t\t}\n\n\t\t\t\tmsgID := v.ID\n\t\t\t\tif v.ReplaceID != \"\" {\n\t\t\t\t\tmsgID = v.ReplaceID\n\t\t\t\t}\n\t\t\t\trmsg := config.Message{\n\t\t\t\t\tUsername: b.parseNick(v.Remote),\n\t\t\t\t\tText: v.Text,\n\t\t\t\t\tChannel: b.parseChannel(v.Remote),\n\t\t\t\t\tAccount: b.Account,\n\t\t\t\t\tAvatar: avatar,\n\t\t\t\t\tUserID: v.Remote,\n\t\t\t\t\tID: msgID,\n\t\t\t\t\tEvent: event,\n\t\t\t\t}\n\n\t\t\t\t\/\/ Check if we have an action event.\n\t\t\t\tvar ok bool\n\t\t\t\trmsg.Text, ok = b.replaceAction(rmsg.Text)\n\t\t\t\tif ok {\n\t\t\t\t\trmsg.Event = config.EventUserAction\n\t\t\t\t}\n\n\t\t\t\tb.Log.Debugf(\"<= Sending message from %s on %s to gateway\", rmsg.Username, b.Account)\n\t\t\t\tb.Log.Debugf(\"<= Message is %#v\", rmsg)\n\t\t\t\tb.Remote <- rmsg\n\t\t\t}\n\t\tcase xmpp.AvatarData:\n\t\t\tb.handleDownloadAvatar(v)\n\t\tcase xmpp.Presence:\n\t\t\t\/\/ Do nothing.\n\t\t}\n\t}\n}\n\nfunc (b *Bxmpp) replaceAction(text string) (string, bool) {\n\tif strings.HasPrefix(text, \"\/me \") {\n\t\treturn strings.Replace(text, \"\/me \", \"\", -1), true\n\t}\n\treturn text, false\n}\n\n\/\/ handleUploadFile handles native upload of files\nfunc (b *Bxmpp) handleUploadFile(msg *config.Message) error {\n\tvar urlDesc string\n\n\tfor _, file := range msg.Extra[\"file\"] {\n\t\tfileInfo := file.(config.FileInfo)\n\t\tif fileInfo.Comment != \"\" {\n\t\t\tmsg.Text += fileInfo.Comment + \": \"\n\t\t}\n\t\tif fileInfo.URL != \"\" {\n\t\t\tmsg.Text = fileInfo.URL\n\t\t\tif fileInfo.Comment != \"\" {\n\t\t\t\tmsg.Text = fileInfo.Comment + \": \" + fileInfo.URL\n\t\t\t\turlDesc = fileInfo.Comment\n\t\t\t}\n\t\t}\n\t\tif _, err := b.xc.Send(xmpp.Chat{\n\t\t\tType: \"groupchat\",\n\t\t\tRemote: msg.Channel + \"@\" + b.GetString(\"Muc\"),\n\t\t\tText: msg.Username + msg.Text,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif fileInfo.URL != \"\" {\n\t\t\tif _, err := b.xc.SendOOB(xmpp.Chat{\n\t\t\t\tType: \"groupchat\",\n\t\t\t\tRemote: msg.Channel + \"@\" + b.GetString(\"Muc\"),\n\t\t\t\tOoburl: fileInfo.URL,\n\t\t\t\tOobdesc: urlDesc,\n\t\t\t}); err != nil {\n\t\t\t\tb.Log.WithError(err).Warn(\"Failed to send share URL.\")\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (b *Bxmpp) parseNick(remote string) string {\n\ts := strings.Split(remote, \"@\")\n\tif len(s) > 0 {\n\t\ts = strings.Split(s[1], \"\/\")\n\t\tif len(s) == 2 {\n\t\t\treturn s[1] \/\/ nick\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc (b *Bxmpp) parseChannel(remote string) string {\n\ts := strings.Split(remote, \"@\")\n\tif len(s) >= 2 {\n\t\treturn s[0] \/\/ channel\n\t}\n\treturn \"\"\n}\n\n\/\/ skipMessage skips messages that need to be skipped\nfunc (b *Bxmpp) skipMessage(message xmpp.Chat) bool {\n\t\/\/ skip messages from ourselves\n\tif b.parseNick(message.Remote) == b.GetString(\"Nick\") {\n\t\treturn true\n\t}\n\n\t\/\/ skip empty messages\n\tif message.Text == \"\" {\n\t\treturn true\n\t}\n\n\t\/\/ skip subject messages\n\tif strings.Contains(message.Text, \"<\/subject>\") {\n\t\treturn true\n\t}\n\n\t\/\/ do not show subjects on connect #732\n\tif strings.Contains(message.Text, \"has set the subject to:\") && time.Since(b.startTime) < time.Second*5 {\n\t\treturn true\n\t}\n\n\t\/\/ skip delayed messages\n\treturn !message.Stamp.IsZero() && time.Since(message.Stamp).Minutes() > 5\n}\n\nfunc (b *Bxmpp) setConnected(state bool) {\n\tb.Lock()\n\tb.connected = state\n\tdefer b.Unlock()\n}\n\nfunc (b *Bxmpp) Connected() bool {\n\tb.RLock()\n\tdefer b.RUnlock()\n\treturn b.connected\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage lib\n\nimport (\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/bazelbuild\/remote-apis-sdks\/go\/pkg\/client\"\n\t\"github.com\/bazelbuild\/remote-apis-sdks\/go\/pkg\/fakes\"\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n\n\t\"go.chromium.org\/luci\/auth\"\n\t\"go.chromium.org\/luci\/common\/testing\/testfs\"\n)\n\nfunc TestArchiveDownload(t *testing.T) {\n\tctx := context.Background()\n\n\tConvey(`Upload and download`, t, func() {\n\t\tfakeEnv, cleanup := fakes.NewTestEnv(t)\n\t\tt.Cleanup(cleanup)\n\n\t\tnewCasClient = func(ctx context.Context, instance string, opts auth.Options, readOnly bool) (*client.Client, error) {\n\t\t\treturn fakeEnv.Server.NewTestClient(ctx)\n\t\t}\n\n\t\tuploaded := t.TempDir()\n\t\tlayout := map[string]string{\n\t\t\t\"a\": \"a\",\n\t\t\t\"b\/c\": \"bc\",\n\t\t\t\"empty\/\": \"\",\n\t\t}\n\t\tSo(testfs.Build(uploaded, layout), ShouldBeNil)\n\n\t\tvar ar archiveRun\n\t\tar.dumpDigest = filepath.Join(t.TempDir(), \"digest\")\n\t\tSo(ar.paths.Set(uploaded+\":.\"), ShouldBeNil)\n\t\tSo(ar.doArchive(ctx), ShouldBeNil)\n\n\t\tdigest, err := ioutil.ReadFile(ar.dumpDigest)\n\t\tSo(err, ShouldBeNil)\n\n\t\tvar dr downloadRun\n\t\tdr.digest = string(digest)\n\t\tdr.dir = t.TempDir()\n\t\terr = dr.doDownload(ctx)\n\t\tSo(err, ShouldBeNil)\n\n\t\tdownloaded, err := testfs.Collect(dr.dir)\n\t\tSo(err, ShouldBeNil)\n\t\tSo(downloaded, ShouldResemble, layout)\n\t})\n}\n<commit_msg>[cas] run test in parallel<commit_after>\/\/ Copyright 2020 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage lib\n\nimport (\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/bazelbuild\/remote-apis-sdks\/go\/pkg\/client\"\n\t\"github.com\/bazelbuild\/remote-apis-sdks\/go\/pkg\/fakes\"\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n\n\t\"go.chromium.org\/luci\/auth\"\n\t\"go.chromium.org\/luci\/common\/testing\/testfs\"\n)\n\nfunc TestArchiveDownload(t *testing.T) {\n\tt.Parallel()\n\tctx := context.Background()\n\n\tConvey(`Upload and download`, t, func() {\n\t\tfakeEnv, cleanup := fakes.NewTestEnv(t)\n\t\tt.Cleanup(cleanup)\n\n\t\tnewCasClient = func(ctx context.Context, instance string, opts auth.Options, readOnly bool) (*client.Client, error) {\n\t\t\treturn fakeEnv.Server.NewTestClient(ctx)\n\t\t}\n\n\t\tuploaded := t.TempDir()\n\t\tlayout := map[string]string{\n\t\t\t\"a\": \"a\",\n\t\t\t\"b\/c\": \"bc\",\n\t\t\t\"empty\/\": \"\",\n\t\t}\n\t\tSo(testfs.Build(uploaded, layout), ShouldBeNil)\n\n\t\tvar ar archiveRun\n\t\tar.dumpDigest = filepath.Join(t.TempDir(), \"digest\")\n\t\tSo(ar.paths.Set(uploaded+\":.\"), ShouldBeNil)\n\t\tSo(ar.doArchive(ctx), ShouldBeNil)\n\n\t\tdigest, err := ioutil.ReadFile(ar.dumpDigest)\n\t\tSo(err, ShouldBeNil)\n\n\t\tvar dr downloadRun\n\t\tdr.digest = string(digest)\n\t\tdr.dir = t.TempDir()\n\t\terr = dr.doDownload(ctx)\n\t\tSo(err, ShouldBeNil)\n\n\t\tdownloaded, err := testfs.Collect(dr.dir)\n\t\tSo(err, ShouldBeNil)\n\t\tSo(downloaded, ShouldResemble, layout)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Jetstack cert-manager contributors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage chart\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\t\"github.com\/jetstack\/cert-manager\/test\/e2e\/framework\/addon\/tiller\"\n\t\"github.com\/jetstack\/cert-manager\/test\/e2e\/framework\/config\"\n\t\"github.com\/jetstack\/cert-manager\/test\/e2e\/framework\/log\"\n)\n\n\/\/ Chart is a generic Helm chart addon for the test environment\ntype Chart struct {\n\tconfig *config.Config\n\ttillerDetails *tiller.Details\n\t\/\/ temporary directory used as the --home flag to Helm\n\thome string\n\n\t\/\/ Tiller is the tiller instance to submit the release to\n\tTiller *tiller.Tiller\n\n\t\/\/ ReleaseName for this Helm release\n\t\/\/ `helm install --name {{ReleaseName}}`\n\tReleaseName string\n\n\t\/\/ Namespace for the Helm release\n\t\/\/ `helm install --namespace {{Namespace}}`\n\tNamespace string\n\n\t\/\/ ChartName is the name of the chart to deploy\n\t\/\/ `helm install {{ChartName}}``\n\tChartName string\n\n\t\/\/ ChartVersion is the version of the chart to deploy\n\t\/\/ `helm install --version {{ChartVersion}}`\n\tChartVersion string\n\n\t\/\/ Vars are additional --set arguments for helm install\n\t\/\/ `helm install --set {{Vars[0].Key}}={{Vars[0].Value}} --set {{Vars[1].Key}}={{Vars[1].Value}} ...`\n\tVars []StringTuple\n\n\t\/\/ Values is a list of paths to additional values.yaml files to include\n\t\/\/ `helm install --values {{Values[0]}} --values {{Values[1]}} ...`\n\tValues []string\n\n\t\/\/ If UpdateDeps is true, 'helm dep update' will be run against the chart\n\t\/\/ before installing.\n\t\/\/ This should only be set to true when the ChartName is a local path on disk.\n\tUpdateDeps bool\n}\n\n\/\/ StringTuple is a tuple of strings, used to create ordered maps\ntype StringTuple struct {\n\tKey string\n\tValue string\n}\n\n\/\/ Details return the details about the Tiller instance deployed\ntype Details struct {\n\t\/\/ Helm chart release name\n\tReleaseName string\n\n\t\/\/ Namespace that Tiller has been deployed into\n\tNamespace string\n}\n\nfunc (c *Chart) Setup(cfg *config.Config) error {\n\tvar err error\n\n\tc.config = cfg\n\tif c.config.Addons.Helm.Path == \"\" {\n\t\treturn fmt.Errorf(\"--helm-binary-path must be set\")\n\t}\n\tif c.Tiller == nil {\n\t\treturn fmt.Errorf(\"tiller base addon must be provided\")\n\t}\n\tc.tillerDetails, err = c.Tiller.Details()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.home, err = ioutil.TempDir(\"\", \"helm-chart-install\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Provision an instance of tiller-deploy\nfunc (c *Chart) Provision() error {\n\terr := c.runHelmClientInit()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error running 'helm init': %v\", err)\n\t}\n\n\tif c.UpdateDeps {\n\t\terr := c.runDepUpdate()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error updating helm chart dependencies: %v\", err)\n\t\t}\n\t}\n\n\terr = c.runInstall()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error install helm chart: %v\", err)\n\t}\n\n\terr = c.Tiller.Base.Details().Helper().WaitForAllPodsRunningInNamespace(c.Namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *Chart) runHelmClientInit() error {\n\terr := c.buildHelmCmd(\"init\", \"--client-only\").Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *Chart) runDepUpdate() error {\n\terr := c.buildHelmCmd(\"dep\", \"update\", c.ChartName).Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *Chart) runInstall() error {\n\targs := []string{\"install\", c.ChartName,\n\t\t\"--wait\",\n\t\t\"--namespace\", c.Namespace,\n\t\t\"--name\", c.ReleaseName,\n\t\t\"--version\", c.ChartVersion}\n\n\tfor _, v := range c.Values {\n\t\targs = append(args, \"--values\", v)\n\t}\n\n\tfor _, s := range c.Vars {\n\t\targs = append(args, \"--set\", fmt.Sprintf(\"%s=%s\", s.Key, s.Value))\n\t}\n\n\tcmd := c.buildHelmCmd(args...)\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *Chart) buildHelmCmd(args ...string) *exec.Cmd {\n\targs = append([]string{\n\t\t\"--home\", c.home,\n\t\t\"--kubeconfig\", c.tillerDetails.KubeConfig,\n\t\t\"--kube-context\", c.tillerDetails.KubeContext,\n\t\t\"--tiller-namespace\", c.tillerDetails.Namespace,\n\t}, args...)\n\tcmd := exec.Command(c.config.Addons.Helm.Path, args...)\n\tcmd.Stdout = log.Writer\n\tcmd.Stderr = log.Writer\n\treturn cmd\n}\n\nfunc (c *Chart) getHelmVersion() (string, error) {\n\tcmd := c.buildHelmCmd(\"version\", \"--template\", \"{{.Client.Version}}\")\n\tcmd.Stdout = nil\n\tout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer out.Close()\n\n\terr = cmd.Run()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\toutBytes, err := ioutil.ReadAll(out)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(outBytes), nil\n}\n\n\/\/ Deprovision the deployed instance of tiller-deploy\nfunc (c *Chart) Deprovision() error {\n\terr := c.buildHelmCmd(\"delete\", \"--purge\", c.ReleaseName).Run()\n\tif err != nil {\n\t\t\/\/ Ignore deprovisioning errors\n\t\t\/\/ TODO: only ignore failed to delete because it doesn't exist errors\n\t\treturn nil\n\t}\n\n\t\/\/ attempt to cleanup\n\tos.RemoveAll(c.home)\n\n\t\/\/ TODO: delete namespace manually too\n\treturn nil\n}\n\n\/\/ Details must be possible to compute without Provision being called if we want\n\/\/ to be able to provision global\/shared instances of Tiller.\nfunc (c *Chart) Details() (*Details, error) {\n\td := &Details{\n\t\tReleaseName: c.ReleaseName,\n\t\tNamespace: c.Namespace,\n\t}\n\n\treturn d, nil\n}\n\nfunc (c *Chart) SupportsGlobal() bool {\n\t\/\/ We can't run in global mode if the release name is not set, as there's\n\t\/\/ no way for us to communicate the generated release name to other test\n\t\/\/ runners when running in parallel mode.\n\tif c.ReleaseName == \"\" {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (c *Chart) Logs() (map[string]string, error) {\n\tkc := c.Tiller.Base.Details().KubeClient\n\tpods, err := kc.CoreV1().Pods(c.Namespace).List(metav1.ListOptions{LabelSelector: \"release=\" + c.ReleaseName})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := make(map[string]string)\n\tfor _, pod := range pods.Items {\n\t\t\/\/ Only grab logs from the first container in the pod\n\t\t\/\/ TODO: grab logs from all containers\n\t\tcontainerName := pod.Spec.Containers[0].Name\n\t\tresp := kc.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{\n\t\t\tContainer: containerName,\n\t\t}).Do()\n\n\t\terr := resp.Error()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tlogs, err := resp.Raw()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\toutPath := path.Join(c.Namespace, pod.Name)\n\t\tout[outPath] = string(logs)\n\t}\n\n\treturn out, nil\n}\n<commit_msg>Fix fetching logs in e2e test framework<commit_after>\/*\nCopyright 2019 The Jetstack cert-manager contributors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage chart\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\t\"github.com\/jetstack\/cert-manager\/test\/e2e\/framework\/addon\/tiller\"\n\t\"github.com\/jetstack\/cert-manager\/test\/e2e\/framework\/config\"\n\t\"github.com\/jetstack\/cert-manager\/test\/e2e\/framework\/log\"\n)\n\n\/\/ Chart is a generic Helm chart addon for the test environment\ntype Chart struct {\n\tconfig *config.Config\n\ttillerDetails *tiller.Details\n\t\/\/ temporary directory used as the --home flag to Helm\n\thome string\n\n\t\/\/ Tiller is the tiller instance to submit the release to\n\tTiller *tiller.Tiller\n\n\t\/\/ ReleaseName for this Helm release\n\t\/\/ `helm install --name {{ReleaseName}}`\n\tReleaseName string\n\n\t\/\/ Namespace for the Helm release\n\t\/\/ `helm install --namespace {{Namespace}}`\n\tNamespace string\n\n\t\/\/ ChartName is the name of the chart to deploy\n\t\/\/ `helm install {{ChartName}}``\n\tChartName string\n\n\t\/\/ ChartVersion is the version of the chart to deploy\n\t\/\/ `helm install --version {{ChartVersion}}`\n\tChartVersion string\n\n\t\/\/ Vars are additional --set arguments for helm install\n\t\/\/ `helm install --set {{Vars[0].Key}}={{Vars[0].Value}} --set {{Vars[1].Key}}={{Vars[1].Value}} ...`\n\tVars []StringTuple\n\n\t\/\/ Values is a list of paths to additional values.yaml files to include\n\t\/\/ `helm install --values {{Values[0]}} --values {{Values[1]}} ...`\n\tValues []string\n\n\t\/\/ If UpdateDeps is true, 'helm dep update' will be run against the chart\n\t\/\/ before installing.\n\t\/\/ This should only be set to true when the ChartName is a local path on disk.\n\tUpdateDeps bool\n}\n\n\/\/ StringTuple is a tuple of strings, used to create ordered maps\ntype StringTuple struct {\n\tKey string\n\tValue string\n}\n\n\/\/ Details return the details about the Tiller instance deployed\ntype Details struct {\n\t\/\/ Helm chart release name\n\tReleaseName string\n\n\t\/\/ Namespace that Tiller has been deployed into\n\tNamespace string\n}\n\nfunc (c *Chart) Setup(cfg *config.Config) error {\n\tvar err error\n\n\tc.config = cfg\n\tif c.config.Addons.Helm.Path == \"\" {\n\t\treturn fmt.Errorf(\"--helm-binary-path must be set\")\n\t}\n\tif c.Tiller == nil {\n\t\treturn fmt.Errorf(\"tiller base addon must be provided\")\n\t}\n\tc.tillerDetails, err = c.Tiller.Details()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.home, err = ioutil.TempDir(\"\", \"helm-chart-install\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Provision an instance of tiller-deploy\nfunc (c *Chart) Provision() error {\n\terr := c.runHelmClientInit()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error running 'helm init': %v\", err)\n\t}\n\n\tif c.UpdateDeps {\n\t\terr := c.runDepUpdate()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error updating helm chart dependencies: %v\", err)\n\t\t}\n\t}\n\n\terr = c.runInstall()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error install helm chart: %v\", err)\n\t}\n\n\terr = c.Tiller.Base.Details().Helper().WaitForAllPodsRunningInNamespace(c.Namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *Chart) runHelmClientInit() error {\n\terr := c.buildHelmCmd(\"init\", \"--client-only\").Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *Chart) runDepUpdate() error {\n\terr := c.buildHelmCmd(\"dep\", \"update\", c.ChartName).Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *Chart) runInstall() error {\n\targs := []string{\"install\", c.ChartName,\n\t\t\"--wait\",\n\t\t\"--namespace\", c.Namespace,\n\t\t\"--name\", c.ReleaseName,\n\t\t\"--version\", c.ChartVersion}\n\n\tfor _, v := range c.Values {\n\t\targs = append(args, \"--values\", v)\n\t}\n\n\tfor _, s := range c.Vars {\n\t\targs = append(args, \"--set\", fmt.Sprintf(\"%s=%s\", s.Key, s.Value))\n\t}\n\n\tcmd := c.buildHelmCmd(args...)\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *Chart) buildHelmCmd(args ...string) *exec.Cmd {\n\targs = append([]string{\n\t\t\"--home\", c.home,\n\t\t\"--kubeconfig\", c.tillerDetails.KubeConfig,\n\t\t\"--kube-context\", c.tillerDetails.KubeContext,\n\t\t\"--tiller-namespace\", c.tillerDetails.Namespace,\n\t}, args...)\n\tcmd := exec.Command(c.config.Addons.Helm.Path, args...)\n\tcmd.Stdout = log.Writer\n\tcmd.Stderr = log.Writer\n\treturn cmd\n}\n\nfunc (c *Chart) getHelmVersion() (string, error) {\n\tcmd := c.buildHelmCmd(\"version\", \"--template\", \"{{.Client.Version}}\")\n\tcmd.Stdout = nil\n\tout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer out.Close()\n\n\terr = cmd.Run()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\toutBytes, err := ioutil.ReadAll(out)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(outBytes), nil\n}\n\n\/\/ Deprovision the deployed instance of tiller-deploy\nfunc (c *Chart) Deprovision() error {\n\terr := c.buildHelmCmd(\"delete\", \"--purge\", c.ReleaseName).Run()\n\tif err != nil {\n\t\t\/\/ Ignore deprovisioning errors\n\t\t\/\/ TODO: only ignore failed to delete because it doesn't exist errors\n\t\treturn nil\n\t}\n\n\t\/\/ attempt to cleanup\n\tos.RemoveAll(c.home)\n\n\t\/\/ TODO: delete namespace manually too\n\treturn nil\n}\n\n\/\/ Details must be possible to compute without Provision being called if we want\n\/\/ to be able to provision global\/shared instances of Tiller.\nfunc (c *Chart) Details() (*Details, error) {\n\td := &Details{\n\t\tReleaseName: c.ReleaseName,\n\t\tNamespace: c.Namespace,\n\t}\n\n\treturn d, nil\n}\n\nfunc (c *Chart) SupportsGlobal() bool {\n\t\/\/ We can't run in global mode if the release name is not set, as there's\n\t\/\/ no way for us to communicate the generated release name to other test\n\t\/\/ runners when running in parallel mode.\n\tif c.ReleaseName == \"\" {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (c *Chart) Logs() (map[string]string, error) {\n\tkc := c.Tiller.Base.Details().KubeClient\n\toldLabelPods, err := kc.CoreV1().Pods(c.Namespace).List(metav1.ListOptions{LabelSelector: \"release=\" + c.ReleaseName})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ also check pods with the new style labels used in the cert-manager chart\n\tnewLabelPods, err := kc.CoreV1().Pods(c.Namespace).List(metav1.ListOptions{LabelSelector: \"app.kubernetes.io\/instance=\" + c.ReleaseName})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpodList := append(oldLabelPods.Items, newLabelPods.Items...)\n\n\tout := make(map[string]string)\n\tfor _, pod := range podList {\n\t\t\/\/ Only grab logs from the first container in the pod\n\t\t\/\/ TODO: grab logs from all containers\n\t\tcontainerName := pod.Spec.Containers[0].Name\n\t\tresp := kc.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{\n\t\t\tContainer: containerName,\n\t\t}).Do()\n\n\t\terr := resp.Error()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tlogs, err := resp.Raw()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\toutPath := path.Join(c.Namespace, pod.Name)\n\t\tout[outPath] = string(logs)\n\t}\n\n\treturn out, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package kinesis\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/AdRoll\/goamz\/aws\"\n\tkin \"github.com\/AdRoll\/goamz\/kinesis\"\n\t\"github.com\/mozilla-services\/heka\/pipeline\"\n\t\"time\"\n)\n\ntype KinesisOutput struct {\n\tauth aws.Auth\n\tconfig *KinesisOutputConfig\n\tClient *kin.Kinesis\n}\n\ntype KinesisOutputConfig struct {\n\tRegion string `toml:\"region\"`\n\tStream string `toml:\"stream\"`\n\tAccessKeyID string `toml:\"access_key_id\"`\n\tSecretAccessKey string `toml:\"secret_access_key\"`\n\tToken string `toml:\"token\"`\n\tPayloadOnly bool `toml:\"payload_only\"`\n}\n\nfunc (k *KinesisOutput) ConfigStruct() interface{} {\n\treturn &KinesisOutputConfig{\n\t\tRegion: \"us-east-1\",\n\t\tStream: \"\",\n\t\tAccessKeyID: \"\",\n\t\tSecretAccessKey: \"\",\n\t\tToken: \"\",\n\t}\n}\n\nfunc (k *KinesisOutput) Init(config interface{}) error {\n\tk.config = config.(*KinesisOutputConfig)\n\ta, err := aws.GetAuth(k.config.AccessKeyID, k.config.SecretAccessKey, k.config.Token, time.Now())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error authenticating: %s\", err)\n\t}\n\tk.auth = a\n\n\tregion, ok := aws.Regions[k.config.Region]\n\tif !ok {\n\t\treturn fmt.Errorf(\"Region does not exist: %s\", k.config.Region)\n\t}\n\n\tk.Client = kin.New(k.auth, region)\n\n\treturn nil\n}\n\nfunc (k *KinesisOutput) Run(or pipeline.OutputRunner, helper pipeline.PluginHelper) error {\n\tvar (\n\t\tpack *pipeline.PipelinePack\n\t\tcontents []byte\n\t\tmsg []byte\n\t\terr error\n\t)\n\n\tfor pack = range or.InChan() {\n\t\tmsg, err = or.Encode(pack)\n\t\tif err != nil {\n\t\t\tor.LogError(fmt.Errorf(\"Error encoding message: %s\", err))\n\t\t\tpack.Recycle()\n\t\t\tcontinue\n\t\t}\n\t\tif contents, err = json.Marshal(msg); err != nil {\n\t\t\tor.LogError(fmt.Errorf(\"Error marshalling: %s\", err))\n\t\t\tpack.Recycle()\n\t\t\tcontinue\n\t\t} else {\n\t\t\tpk := fmt.Sprintf(\"%d-%s\", pack.Message.Timestamp, pack.Message.Hostname)\n\t\t\t_, err = k.Client.PutRecord(k.config.Stream, pk, contents, \"\", \"\")\n\t\t\tif err != nil {\n\t\t\t\tor.LogError(fmt.Errorf(\"Error pushing message to Kinesis: %s\", err))\n\t\t\t\tpack.Recycle()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tpack.Recycle()\n\t}\n\n\treturn nil\n}\n\nfunc init() {\n\tpipeline.RegisterPlugin(\"KinesisOutput\", func() interface{} { return new(KinesisOutput) })\n}\n<commit_msg>base64 decode the message<commit_after>package kinesis\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/AdRoll\/goamz\/aws\"\n\tkin \"github.com\/AdRoll\/goamz\/kinesis\"\n\t\"github.com\/mozilla-services\/heka\/pipeline\"\n\t\"time\"\n)\n\ntype KinesisOutput struct {\n\tauth aws.Auth\n\tconfig *KinesisOutputConfig\n\tClient *kin.Kinesis\n}\n\ntype KinesisOutputConfig struct {\n\tRegion string `toml:\"region\"`\n\tStream string `toml:\"stream\"`\n\tAccessKeyID string `toml:\"access_key_id\"`\n\tSecretAccessKey string `toml:\"secret_access_key\"`\n\tToken string `toml:\"token\"`\n\tPayloadOnly bool `toml:\"payload_only\"`\n}\n\nfunc (k *KinesisOutput) ConfigStruct() interface{} {\n\treturn &KinesisOutputConfig{\n\t\tRegion: \"us-east-1\",\n\t\tStream: \"\",\n\t\tAccessKeyID: \"\",\n\t\tSecretAccessKey: \"\",\n\t\tToken: \"\",\n\t}\n}\n\nfunc (k *KinesisOutput) Init(config interface{}) error {\n\tk.config = config.(*KinesisOutputConfig)\n\ta, err := aws.GetAuth(k.config.AccessKeyID, k.config.SecretAccessKey, k.config.Token, time.Now())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error authenticating: %s\", err)\n\t}\n\tk.auth = a\n\n\tregion, ok := aws.Regions[k.config.Region]\n\tif !ok {\n\t\treturn fmt.Errorf(\"Region does not exist: %s\", k.config.Region)\n\t}\n\n\tk.Client = kin.New(k.auth, region)\n\n\treturn nil\n}\n\nfunc (k *KinesisOutput) Run(or pipeline.OutputRunner, helper pipeline.PluginHelper) error {\n\tvar (\n\t\tpack *pipeline.PipelinePack\n\t\tcontents []byte\n\t\tmsg []byte\n\t\terr error\n\t)\n\n\tfor pack = range or.InChan() {\n\t\tmsg, err = or.Encode(pack)\n\t\tif err != nil {\n\t\t\tor.LogError(fmt.Errorf(\"Error encoding message: %s\", err))\n\t\t\tpack.Recycle()\n\t\t\tcontinue\n\t\t}\n\t\tmsg, err = base64.StdEncoding.DecodeString(string(msg))\n\t\tif err != nil {\n\t\t\tor.LogError(fmt.Errorf(\"Error decoding: %s\", err))\n\t\t\tpack.Recycle()\n\t\t\tcontinue\n\t\t}\n\t\tif contents, err = json.Marshal(msg); err != nil {\n\t\t\tor.LogError(fmt.Errorf(\"Error marshalling: %s\", err))\n\t\t\tpack.Recycle()\n\t\t\tcontinue\n\t\t} else {\n\t\t\tpk := fmt.Sprintf(\"%d-%s\", pack.Message.Timestamp, pack.Message.Hostname)\n\t\t\t_, err = k.Client.PutRecord(k.config.Stream, pk, contents, \"\", \"\")\n\t\t\tif err != nil {\n\t\t\t\tor.LogError(fmt.Errorf(\"Error pushing message to Kinesis: %s\", err))\n\t\t\t\tpack.Recycle()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tpack.Recycle()\n\t}\n\n\treturn nil\n}\n\nfunc init() {\n\tpipeline.RegisterPlugin(\"KinesisOutput\", func() interface{} { return new(KinesisOutput) })\n}\n<|endoftext|>"} {"text":"<commit_before>package cockroach\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\n\t\"github.com\/pressly\/goose\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/rs\/zerolog\/log\"\n\n\t\/\/ use postgres client library to connect to cockroachdb\n\t_ \"github.com\/lib\/pq\"\n)\n\n\/\/ DBClient is the interface for communicating with CockroachDB\ntype DBClient interface {\n\tConnect() error\n\tConnectWithDriverAndSource(string, string) error\n\tMigrateSchema() error\n\tInsertBuildJobLogs(BuildJobLogs) error\n\tGetBuildLogs(BuildJobLogs) error\n}\n\ntype cockroachDBClientImpl struct {\n\tdatabaseDriver string\n\tmigrationsDir string\n\tcockroachDatabase string\n\tcockroachHost string\n\tcockroachInsecure bool\n\tcockroachCertificateDir string\n\tcockroachPort int\n\tcockroachUser string\n\tcockroachPassword string\n\tPrometheusOutboundAPICallTotals *prometheus.CounterVec\n\tdatabaseConnection *sql.DB\n}\n\n\/\/ NewCockroachDBClient returns a new cockroach.DBClient\nfunc NewCockroachDBClient(cockroachDatabase, cockroachHost string, cockroachInsecure bool, cockroachCertificateDir string, cockroachPort int, cockroachUser, cockroachPassword string, prometheusOutboundAPICallTotals *prometheus.CounterVec) (cockroachDBClient DBClient) {\n\n\tcockroachDBClient = &cockroachDBClientImpl{\n\t\tdatabaseDriver: \"postgres\",\n\t\tmigrationsDir: \"\/migrations\",\n\t\tcockroachDatabase: cockroachDatabase,\n\t\tcockroachHost: cockroachHost,\n\t\tcockroachInsecure: cockroachInsecure,\n\t\tcockroachCertificateDir: cockroachCertificateDir,\n\t\tcockroachPort: cockroachPort,\n\t\tcockroachUser: cockroachUser,\n\t\tcockroachPassword: cockroachPassword,\n\t\tPrometheusOutboundAPICallTotals: prometheusOutboundAPICallTotals,\n\t}\n\n\treturn\n}\n\n\/\/ Connect sets up a connection with CockroachDB\nfunc (dbc *cockroachDBClientImpl) Connect() (err error) {\n\n\tlog.Debug().Msgf(\"Connecting to database %v on host %v...\", dbc.cockroachDatabase, dbc.cockroachHost)\n\n\tsslMode := \"\"\n\tif dbc.cockroachInsecure {\n\t\tsslMode = \"?sslmode=disable\"\n\t}\n\n\tdataSourceName := fmt.Sprintf(\"postgresql:\/\/%v:%v@%v:%v\/%v%v\", dbc.cockroachUser, dbc.cockroachPassword, dbc.cockroachHost, dbc.cockroachPort, dbc.cockroachDatabase, sslMode)\n\n\treturn dbc.ConnectWithDriverAndSource(dbc.databaseDriver, dataSourceName)\n}\n\n\/\/ ConnectWithDriverAndSource set up a connection with any database\nfunc (dbc *cockroachDBClientImpl) ConnectWithDriverAndSource(driverName string, dataSourceName string) (err error) {\n\n\tdbc.databaseConnection, err = sql.Open(driverName, dataSourceName)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ MigrateSchema migrates the schema in CockroachDB\nfunc (dbc *cockroachDBClientImpl) MigrateSchema() (err error) {\n\n\terr = goose.SetDialect(dbc.databaseDriver)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = goose.Status(dbc.databaseConnection, dbc.migrationsDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = goose.Up(dbc.databaseConnection, dbc.migrationsDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn\n}\n\n\/\/ InsertBuildJobLogs inserts build logs into the database\nfunc (dbc *cockroachDBClientImpl) InsertBuildJobLogs(buildJobLogs BuildJobLogs) (err error) {\n\n\tdbc.PrometheusOutboundAPICallTotals.With(prometheus.Labels{\"target\": \"cockroachdb\"}).Inc()\n\n\t\/\/ insert logs\n\tr, err := dbc.databaseConnection.Exec(\n\t\t\"INSERT INTO build_logs (repo_full_name,repo_branch,repo_revision,repo_source,log_text) VALUES ($1,$2,$3,$4,$5)\",\n\t\tbuildJobLogs.RepoFullName,\n\t\tbuildJobLogs.RepoBranch,\n\t\tbuildJobLogs.RepoRevision,\n\t\tbuildJobLogs.RepoSource,\n\t\tbuildJobLogs.LogText,\n\t)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tlastInsertID, err := r.LastInsertId()\n\tif err != nil {\n\t\treturn\n\t}\n\n\trowsAffected, err := r.RowsAffected()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tlog.Debug().\n\t\tInt64(\"LastInsertId\", lastInsertID).\n\t\tInt64(\"RowsAffected\", rowsAffected).\n\t\tMsg(\"Inserted log record\")\n\n\treturn\n}\n\n\/\/ GetBuildJobLogs reads a specific log\nfunc (dbc *cockroachDBClientImpl) GetBuildLogs(buildJobLogs BuildJobLogs) (err error) {\n\n\trows, err := dbc.databaseConnection.Query(\"SELECT * FROM build_logs WHERE repo_full_name=$1 AND repo_branch=$2 AND repo_revision=$3 AND repo_source=$4\",\n\t\tbuildJobLogs.RepoFullName,\n\t\tbuildJobLogs.RepoBranch,\n\t\tbuildJobLogs.RepoRevision,\n\t\tbuildJobLogs.RepoSource,\n\t)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar id int\n\t\tvar repo_full_name, repo_branch, repo_revision, repo_source, log_text string\n\n\t\tif err := rows.Scan(&id, &repo_full_name, &repo_branch, &repo_revision, &repo_source, &log_text); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Debug().\n\t\t\tInt(\"id\", id).\n\t\t\tStr(\"repo_full_name\", repo_full_name).\n\t\t\tStr(\"repo_branch\", repo_branch).\n\t\t\tStr(\"repo_revision\", repo_revision).\n\t\t\tStr(\"repo_source\", repo_source).\n\t\t\tStr(\"log_text\", log_text).\n\t\t\tMsg(\"Read logs\")\n\t}\n\n\treturn\n}\n<commit_msg>warn if getting LastInsertId or RowsAffected fails, but don't error<commit_after>package cockroach\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\n\t\"github.com\/pressly\/goose\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/rs\/zerolog\/log\"\n\n\t\/\/ use postgres client library to connect to cockroachdb\n\t_ \"github.com\/lib\/pq\"\n)\n\n\/\/ DBClient is the interface for communicating with CockroachDB\ntype DBClient interface {\n\tConnect() error\n\tConnectWithDriverAndSource(string, string) error\n\tMigrateSchema() error\n\tInsertBuildJobLogs(BuildJobLogs) error\n\tGetBuildLogs(BuildJobLogs) error\n}\n\ntype cockroachDBClientImpl struct {\n\tdatabaseDriver string\n\tmigrationsDir string\n\tcockroachDatabase string\n\tcockroachHost string\n\tcockroachInsecure bool\n\tcockroachCertificateDir string\n\tcockroachPort int\n\tcockroachUser string\n\tcockroachPassword string\n\tPrometheusOutboundAPICallTotals *prometheus.CounterVec\n\tdatabaseConnection *sql.DB\n}\n\n\/\/ NewCockroachDBClient returns a new cockroach.DBClient\nfunc NewCockroachDBClient(cockroachDatabase, cockroachHost string, cockroachInsecure bool, cockroachCertificateDir string, cockroachPort int, cockroachUser, cockroachPassword string, prometheusOutboundAPICallTotals *prometheus.CounterVec) (cockroachDBClient DBClient) {\n\n\tcockroachDBClient = &cockroachDBClientImpl{\n\t\tdatabaseDriver: \"postgres\",\n\t\tmigrationsDir: \"\/migrations\",\n\t\tcockroachDatabase: cockroachDatabase,\n\t\tcockroachHost: cockroachHost,\n\t\tcockroachInsecure: cockroachInsecure,\n\t\tcockroachCertificateDir: cockroachCertificateDir,\n\t\tcockroachPort: cockroachPort,\n\t\tcockroachUser: cockroachUser,\n\t\tcockroachPassword: cockroachPassword,\n\t\tPrometheusOutboundAPICallTotals: prometheusOutboundAPICallTotals,\n\t}\n\n\treturn\n}\n\n\/\/ Connect sets up a connection with CockroachDB\nfunc (dbc *cockroachDBClientImpl) Connect() (err error) {\n\n\tlog.Debug().Msgf(\"Connecting to database %v on host %v...\", dbc.cockroachDatabase, dbc.cockroachHost)\n\n\tsslMode := \"\"\n\tif dbc.cockroachInsecure {\n\t\tsslMode = \"?sslmode=disable\"\n\t}\n\n\tdataSourceName := fmt.Sprintf(\"postgresql:\/\/%v:%v@%v:%v\/%v%v\", dbc.cockroachUser, dbc.cockroachPassword, dbc.cockroachHost, dbc.cockroachPort, dbc.cockroachDatabase, sslMode)\n\n\treturn dbc.ConnectWithDriverAndSource(dbc.databaseDriver, dataSourceName)\n}\n\n\/\/ ConnectWithDriverAndSource set up a connection with any database\nfunc (dbc *cockroachDBClientImpl) ConnectWithDriverAndSource(driverName string, dataSourceName string) (err error) {\n\n\tdbc.databaseConnection, err = sql.Open(driverName, dataSourceName)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ MigrateSchema migrates the schema in CockroachDB\nfunc (dbc *cockroachDBClientImpl) MigrateSchema() (err error) {\n\n\terr = goose.SetDialect(dbc.databaseDriver)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = goose.Status(dbc.databaseConnection, dbc.migrationsDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = goose.Up(dbc.databaseConnection, dbc.migrationsDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn\n}\n\n\/\/ InsertBuildJobLogs inserts build logs into the database\nfunc (dbc *cockroachDBClientImpl) InsertBuildJobLogs(buildJobLogs BuildJobLogs) (err error) {\n\n\tdbc.PrometheusOutboundAPICallTotals.With(prometheus.Labels{\"target\": \"cockroachdb\"}).Inc()\n\n\t\/\/ insert logs\n\tr, err := dbc.databaseConnection.Exec(\n\t\t\"INSERT INTO build_logs (repo_full_name,repo_branch,repo_revision,repo_source,log_text) VALUES ($1,$2,$3,$4,$5)\",\n\t\tbuildJobLogs.RepoFullName,\n\t\tbuildJobLogs.RepoBranch,\n\t\tbuildJobLogs.RepoRevision,\n\t\tbuildJobLogs.RepoSource,\n\t\tbuildJobLogs.LogText,\n\t)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tlastInsertID, err := r.LastInsertId()\n\tif err != nil {\n\t\tlog.Warn().Err(err).\n\t\t\tInterface(\"buildJobLogs\", buildJobLogs).\n\t\t\tMsgf(\"Getting LastInsertId for %v failed\", buildJobLogs.RepoFullName)\n\t}\n\n\trowsAffected, err := r.RowsAffected()\n\tif err != nil {\n\t\tlog.Warn().Err(err).\n\t\t\tInterface(\"buildJobLogs\", buildJobLogs).\n\t\t\tMsgf(\"Getting RowsAffected for %v failed\", buildJobLogs.RepoFullName)\n\t}\n\n\tlog.Debug().\n\t\tInt64(\"LastInsertId\", lastInsertID).\n\t\tInt64(\"RowsAffected\", rowsAffected).\n\t\tMsg(\"Inserted log record\")\n\n\treturn\n}\n\n\/\/ GetBuildJobLogs reads a specific log\nfunc (dbc *cockroachDBClientImpl) GetBuildLogs(buildJobLogs BuildJobLogs) (err error) {\n\n\trows, err := dbc.databaseConnection.Query(\"SELECT * FROM build_logs WHERE repo_full_name=$1 AND repo_branch=$2 AND repo_revision=$3 AND repo_source=$4\",\n\t\tbuildJobLogs.RepoFullName,\n\t\tbuildJobLogs.RepoBranch,\n\t\tbuildJobLogs.RepoRevision,\n\t\tbuildJobLogs.RepoSource,\n\t)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar id int\n\t\tvar repo_full_name, repo_branch, repo_revision, repo_source, log_text string\n\n\t\tif err := rows.Scan(&id, &repo_full_name, &repo_branch, &repo_revision, &repo_source, &log_text); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Debug().\n\t\t\tInt(\"id\", id).\n\t\t\tStr(\"repo_full_name\", repo_full_name).\n\t\t\tStr(\"repo_branch\", repo_branch).\n\t\t\tStr(\"repo_revision\", repo_revision).\n\t\t\tStr(\"repo_source\", repo_source).\n\t\t\tStr(\"log_text\", log_text).\n\t\t\tMsg(\"Read logs\")\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package handlers\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"golang.org\/x\/crypto\/acme\/autocert\"\n\n\t\"github.com\/googollee\/go-socket.io\"\n\tgh \"github.com\/gorilla\/handlers\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/play-with-docker\/play-with-docker\/config\"\n\t\"github.com\/play-with-docker\/play-with-docker\/event\"\n\t\"github.com\/play-with-docker\/play-with-docker\/pwd\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\/promhttp\"\n\t\"github.com\/urfave\/negroni\"\n)\n\nvar core pwd.PWDApi\nvar e event.EventApi\nvar ws *socketio.Server\n\ntype HandlerExtender func(h *mux.Router)\n\nfunc Bootstrap(c pwd.PWDApi, ev event.EventApi) {\n\tcore = c\n\te = ev\n}\n\nfunc Register(extend HandlerExtender) {\n\tserver, err := socketio.NewServer(nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tserver.On(\"connection\", WS)\n\tserver.On(\"error\", WSError)\n\n\tRegisterEvents(server)\n\n\tr := mux.NewRouter()\n\tcorsRouter := mux.NewRouter()\n\n\tcorsHandler := gh.CORS(gh.AllowCredentials(), gh.AllowedHeaders([]string{\"x-requested-with\", \"content-type\"}), gh.AllowedOrigins([]string{\"*\"}))\n\n\t\/\/ Specific routes\n\tr.HandleFunc(\"\/ping\", Ping).Methods(\"GET\")\n\tcorsRouter.HandleFunc(\"\/instances\/images\", GetInstanceImages).Methods(\"GET\")\n\tcorsRouter.HandleFunc(\"\/sessions\/{sessionId}\", GetSession).Methods(\"GET\")\n\tcorsRouter.HandleFunc(\"\/sessions\/{sessionId}\", CloseSession).Methods(\"DELETE\")\n\tcorsRouter.HandleFunc(\"\/sessions\/{sessionId}\/setup\", SessionSetup).Methods(\"POST\")\n\tcorsRouter.HandleFunc(\"\/sessions\/{sessionId}\/instances\", NewInstance).Methods(\"POST\")\n\tcorsRouter.HandleFunc(\"\/sessions\/{sessionId}\/instances\/{instanceName}\/uploads\", FileUpload).Methods(\"POST\")\n\tcorsRouter.HandleFunc(\"\/sessions\/{sessionId}\/instances\/{instanceName}\", DeleteInstance).Methods(\"DELETE\")\n\tcorsRouter.HandleFunc(\"\/sessions\/{sessionId}\/instances\/{instanceName}\/exec\", Exec).Methods(\"POST\")\n\n\tr.HandleFunc(\"\/ooc\", func(rw http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(rw, r, \".\/www\/ooc.html\")\n\t}).Methods(\"GET\")\n\tr.HandleFunc(\"\/503\", func(rw http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(rw, r, \".\/www\/503.html\")\n\t}).Methods(\"GET\")\n\tr.HandleFunc(\"\/p\/{sessionId}\", Home).Methods(\"GET\")\n\tr.PathPrefix(\"\/assets\").Handler(http.FileServer(http.Dir(\".\/www\")))\n\tr.HandleFunc(\"\/robots.txt\", func(rw http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(rw, r, \"www\/robots.txt\")\n\t})\n\tr.HandleFunc(\"\/sdk.js\", func(rw http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(rw, r, \"www\/sdk.js\")\n\t})\n\n\tcorsRouter.Handle(\"\/sessions\/{sessionId}\/ws\/\", server)\n\tr.Handle(\"\/metrics\", promhttp.Handler())\n\n\t\/\/ Generic routes\n\tr.HandleFunc(\"\/\", func(rw http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(rw, r, \".\/www\/landing.html\")\n\t}).Methods(\"GET\")\n\n\tr.HandleFunc(\"\/users\/me\", LoggedInUser).Methods(\"GET\")\n\tr.HandleFunc(\"\/oauth\/providers\", ListProviders).Methods(\"GET\")\n\tr.HandleFunc(\"\/oauth\/providers\/{provider}\/login\", Login).Methods(\"GET\")\n\tr.HandleFunc(\"\/oauth\/providers\/{provider}\/callback\", LoginCallback).Methods(\"GET\")\n\n\tcorsRouter.HandleFunc(\"\/\", NewSession).Methods(\"POST\")\n\n\tif extend != nil {\n\t\textend(corsRouter)\n\t}\n\n\tn := negroni.Classic()\n\tr.PathPrefix(\"\/\").Handler(negroni.New(negroni.Wrap(corsHandler(corsRouter))))\n\tn.UseHandler(r)\n\n\thttpServer := http.Server{\n\t\tAddr: \"0.0.0.0:\" + config.PortNumber,\n\t\tHandler: n,\n\t\tIdleTimeout: 30 * time.Second,\n\t\tReadHeaderTimeout: 5 * time.Second,\n\t}\n\tif config.UseLetsEncrypt {\n\t\tcertManager := autocert.Manager{\n\t\t\tPrompt: autocert.AcceptTOS,\n\t\t\tHostPolicy: autocert.HostWhitelist(config.LetsEncryptDomains...),\n\t\t\tCache: autocert.DirCache(config.LetsEncryptCertsDir),\n\t\t}\n\n\t\thttpServer.TLSConfig = &tls.Config{\n\t\t\tGetCertificate: certManager.GetCertificate,\n\t\t}\n\n\t\tgo func() {\n\t\t\thttp.HandleFunc(\"\/\", func(rw http.ResponseWriter, r *http.Request) {\n\t\t\t\thttp.Redirect(rw, r, fmt.Sprintf(\"https:\/\/%s\", r.Host), http.StatusMovedPermanently)\n\t\t\t})\n\t\t\tlog.Println(\"Starting redirect server\")\n\t\t\tlog.Fatal(http.ListenAndServe(\":3001\", nil))\n\t\t\tlog.Fatal(httpServer.ListenAndServe())\n\t\t}()\n\n\t\tlog.Println(\"Listening on port \" + config.PortNumber)\n\t\tlog.Fatal(httpServer.ListenAndServeTLS(\"\", \"\"))\n\t} else {\n\t\tlog.Println(\"Listening on port \" + config.PortNumber)\n\t\tlog.Fatal(httpServer.ListenAndServe())\n\t}\n\n}\n\nfunc RegisterEvents(s *socketio.Server) {\n\tws = s\n\te.OnAny(broadcastEvent)\n}\n\nfunc broadcastEvent(eventType event.EventType, sessionId string, args ...interface{}) {\n\tws.BroadcastTo(sessionId, eventType.String(), args...)\n}\n<commit_msg>Add methods in CORS requests<commit_after>package handlers\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"golang.org\/x\/crypto\/acme\/autocert\"\n\n\t\"github.com\/googollee\/go-socket.io\"\n\tgh \"github.com\/gorilla\/handlers\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/play-with-docker\/play-with-docker\/config\"\n\t\"github.com\/play-with-docker\/play-with-docker\/event\"\n\t\"github.com\/play-with-docker\/play-with-docker\/pwd\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\/promhttp\"\n\t\"github.com\/urfave\/negroni\"\n)\n\nvar core pwd.PWDApi\nvar e event.EventApi\nvar ws *socketio.Server\n\ntype HandlerExtender func(h *mux.Router)\n\nfunc Bootstrap(c pwd.PWDApi, ev event.EventApi) {\n\tcore = c\n\te = ev\n}\n\nfunc Register(extend HandlerExtender) {\n\tserver, err := socketio.NewServer(nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tserver.On(\"connection\", WS)\n\tserver.On(\"error\", WSError)\n\n\tRegisterEvents(server)\n\n\tr := mux.NewRouter()\n\tcorsRouter := mux.NewRouter()\n\n\tcorsHandler := gh.CORS(gh.AllowCredentials(), gh.AllowedHeaders([]string{\"x-requested-with\", \"content-type\"}), gh.AllowedMethods([]string{\"GET\", \"POST\", \"HEAD\", \"DELETE\"}), gh.AllowedOrigins([]string{\"*\"}))\n\n\t\/\/ Specific routes\n\tr.HandleFunc(\"\/ping\", Ping).Methods(\"GET\")\n\tcorsRouter.HandleFunc(\"\/instances\/images\", GetInstanceImages).Methods(\"GET\")\n\tcorsRouter.HandleFunc(\"\/sessions\/{sessionId}\", GetSession).Methods(\"GET\")\n\tcorsRouter.HandleFunc(\"\/sessions\/{sessionId}\", CloseSession).Methods(\"DELETE\")\n\tcorsRouter.HandleFunc(\"\/sessions\/{sessionId}\/setup\", SessionSetup).Methods(\"POST\")\n\tcorsRouter.HandleFunc(\"\/sessions\/{sessionId}\/instances\", NewInstance).Methods(\"POST\")\n\tcorsRouter.HandleFunc(\"\/sessions\/{sessionId}\/instances\/{instanceName}\/uploads\", FileUpload).Methods(\"POST\")\n\tcorsRouter.HandleFunc(\"\/sessions\/{sessionId}\/instances\/{instanceName}\", DeleteInstance).Methods(\"DELETE\")\n\tcorsRouter.HandleFunc(\"\/sessions\/{sessionId}\/instances\/{instanceName}\/exec\", Exec).Methods(\"POST\")\n\n\tr.HandleFunc(\"\/ooc\", func(rw http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(rw, r, \".\/www\/ooc.html\")\n\t}).Methods(\"GET\")\n\tr.HandleFunc(\"\/503\", func(rw http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(rw, r, \".\/www\/503.html\")\n\t}).Methods(\"GET\")\n\tr.HandleFunc(\"\/p\/{sessionId}\", Home).Methods(\"GET\")\n\tr.PathPrefix(\"\/assets\").Handler(http.FileServer(http.Dir(\".\/www\")))\n\tr.HandleFunc(\"\/robots.txt\", func(rw http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(rw, r, \"www\/robots.txt\")\n\t})\n\tr.HandleFunc(\"\/sdk.js\", func(rw http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(rw, r, \"www\/sdk.js\")\n\t})\n\n\tcorsRouter.Handle(\"\/sessions\/{sessionId}\/ws\/\", server)\n\tr.Handle(\"\/metrics\", promhttp.Handler())\n\n\t\/\/ Generic routes\n\tr.HandleFunc(\"\/\", func(rw http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(rw, r, \".\/www\/landing.html\")\n\t}).Methods(\"GET\")\n\n\tr.HandleFunc(\"\/users\/me\", LoggedInUser).Methods(\"GET\")\n\tr.HandleFunc(\"\/oauth\/providers\", ListProviders).Methods(\"GET\")\n\tr.HandleFunc(\"\/oauth\/providers\/{provider}\/login\", Login).Methods(\"GET\")\n\tr.HandleFunc(\"\/oauth\/providers\/{provider}\/callback\", LoginCallback).Methods(\"GET\")\n\n\tcorsRouter.HandleFunc(\"\/\", NewSession).Methods(\"POST\")\n\n\tif extend != nil {\n\t\textend(corsRouter)\n\t}\n\n\tn := negroni.Classic()\n\tr.PathPrefix(\"\/\").Handler(negroni.New(negroni.Wrap(corsHandler(corsRouter))))\n\tn.UseHandler(r)\n\n\thttpServer := http.Server{\n\t\tAddr: \"0.0.0.0:\" + config.PortNumber,\n\t\tHandler: n,\n\t\tIdleTimeout: 30 * time.Second,\n\t\tReadHeaderTimeout: 5 * time.Second,\n\t}\n\tif config.UseLetsEncrypt {\n\t\tcertManager := autocert.Manager{\n\t\t\tPrompt: autocert.AcceptTOS,\n\t\t\tHostPolicy: autocert.HostWhitelist(config.LetsEncryptDomains...),\n\t\t\tCache: autocert.DirCache(config.LetsEncryptCertsDir),\n\t\t}\n\n\t\thttpServer.TLSConfig = &tls.Config{\n\t\t\tGetCertificate: certManager.GetCertificate,\n\t\t}\n\n\t\tgo func() {\n\t\t\thttp.HandleFunc(\"\/\", func(rw http.ResponseWriter, r *http.Request) {\n\t\t\t\thttp.Redirect(rw, r, fmt.Sprintf(\"https:\/\/%s\", r.Host), http.StatusMovedPermanently)\n\t\t\t})\n\t\t\tlog.Println(\"Starting redirect server\")\n\t\t\tlog.Fatal(http.ListenAndServe(\":3001\", nil))\n\t\t\tlog.Fatal(httpServer.ListenAndServe())\n\t\t}()\n\n\t\tlog.Println(\"Listening on port \" + config.PortNumber)\n\t\tlog.Fatal(httpServer.ListenAndServeTLS(\"\", \"\"))\n\t} else {\n\t\tlog.Println(\"Listening on port \" + config.PortNumber)\n\t\tlog.Fatal(httpServer.ListenAndServe())\n\t}\n\n}\n\nfunc RegisterEvents(s *socketio.Server) {\n\tws = s\n\te.OnAny(broadcastEvent)\n}\n\nfunc broadcastEvent(eventType event.EventType, sessionId string, args ...interface{}) {\n\tws.BroadcastTo(sessionId, eventType.String(), args...)\n}\n<|endoftext|>"} {"text":"<commit_before>package builder\n\nimport (\n\t\"github.com\/appc\/spec\/schema\/types\"\n\t\"github.com\/blablacar\/cnt\/dist\"\n\t\"github.com\/blablacar\/cnt\/log\"\n\t\"github.com\/blablacar\/cnt\/spec\"\n\t\"github.com\/blablacar\/cnt\/utils\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n)\n\nconst BATS_ACI = \"aci.blablacar.com\/aci-bats:2\"\nconst PATH_RESULT = \"\/result\"\nconst STATUS_SUFFIX = \"_status\"\nconst TEST_INIT_SCRIPT = `#!\/bin\/bash\nset -x\n\n%%COMMAND%% &\n\nwaitsuccess=\"0\"\nif [ -f \"\/tests\/wait.sh\" ]; then\n\tchmod +x \/tests\/wait.sh\n\ti=\"0\"\n\twhile [ $i -lt 60 ]; do\n\t \/tests\/wait.sh\n\t if [ $? == 0 ]; then\n\t \twaitsuccess=\"1\"\n\t \tbreak;\n\t fi\n\t i=$[$i+1]\n\t sleep 1\n\tdone\nfi\n\nif [ wait_success == \"0\" ]; then\n\techo \"1\\n\" > \/result\/wait.sh\n\techo \"WAIT FAILED\"\n\texit 1\nfi\n\n\/test.sh\n`\n\nfunc (cnt *Img) Test() {\n\tcnt.Install()\n\tlog.Get().Info(\"Testing \" + cnt.manifest.NameAndVersion)\n\n\tif _, err := os.Stat(cnt.path + PATH_TESTS); err != nil {\n\t\tif cnt.args.NoTestFail {\n\t\t\tlog.Get().Panic(\"Test directory does not exists but tests are mandatory\")\n\t\t}\n\t\tlog.Get().Warn(\"Tests directory does not exists\")\n\t\treturn\n\t}\n\n\tcnt.importAciBats()\n\n\ttestAci, err := cnt.prepareTestAci()\n\tif err != nil {\n\t\tlog.Get().Panic(err)\n\t}\n\ttestAci.Build()\n\n\tos.MkdirAll(cnt.target+PATH_TESTS+PATH_TARGET+PATH_RESULT, 0777)\n\n\tif err := utils.ExecCmd(\"rkt\",\n\t\t\"--insecure-skip-verify=true\",\n\t\t\"run\",\n\t\t\"--mds-register=false\",\n\t\t\"--volume=result,kind=host,source=\"+cnt.target+PATH_TESTS+PATH_TARGET+PATH_RESULT,\n\t\tcnt.target+PATH_TESTS+PATH_TARGET+\"\/image.aci\"); err != nil {\n\t\t\/\/ rkt+systemd cannot exit with fail status yet\n\t\tlog.Get().Panic(err)\n\t}\n\n\tcnt.checkResult()\n}\n\nfunc (cnt *Img) checkResult() {\n\tfiles, err := ioutil.ReadDir(cnt.target + PATH_TESTS + PATH_TARGET + PATH_RESULT)\n\tif err != nil {\n\t\tlog.Get().Panic(\"Cannot read test result directory\", err)\n\t}\n\ttestFound := false\n\tfor _, f := range files {\n\t\tfullPath := cnt.target + PATH_TESTS + PATH_TARGET + PATH_RESULT + \"\/\" + f.Name()\n\t\tcontent, err := ioutil.ReadFile(fullPath)\n\t\tif err != nil {\n\t\t\tlog.Get().Panic(\"Cannot read result file\", f.Name(), err)\n\t\t}\n\t\tif !strings.HasSuffix(f.Name(), STATUS_SUFFIX) {\n\t\t\tif testFound == false && string(content) != \"1..0\\n\" {\n\t\t\t\ttestFound = true\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif string(content) != \"0\\n\" {\n\t\t\tlog.Get().Error(\"Failed test file : \", f.Name())\n\t\t\tos.Exit(2)\n\t\t}\n\t}\n\n\tif cnt.args.NoTestFail && !testFound {\n\t\tlog.Get().Panic(\"No tests found\")\n\t}\n}\n\nfunc (cnt *Img) importAciBats() {\n\tif err := utils.ExecCmd(\"bash\", \"-c\", \"rkt image list --fields name --no-legend | grep -q \"+BATS_ACI); err != nil {\n\t\tcontent, _ := dist.Asset(\"dist\/bindata\/aci-bats.aci\")\n\t\tif err := ioutil.WriteFile(\"\/tmp\/aci-bats.aci\", content, 0644); err != nil {\n\t\t\tlog.Get().Panic(err)\n\t\t}\n\t\tutils.ExecCmd(\"rkt\", \"--insecure-skip-verify=true\", \"fetch\", \"\/tmp\/aci-bats.aci\")\n\t\tos.Remove(\"\/tmp\/aci-bats.aci\")\n\t}\n}\n\nfunc (cnt *Img) prepareTestAci() (*Img, error) {\n\tfiles, err := ioutil.ReadDir(cnt.path + PATH_TESTS)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tutils.CopyDir(cnt.path+PATH_TESTS+PATH_FILES, cnt.target+PATH_TESTS+PATH_FILES)\n\tutils.CopyDir(cnt.path+PATH_TESTS+PATH_ATTRIBUTES, cnt.target+PATH_TESTS+PATH_ATTRIBUTES)\n\tutils.CopyDir(cnt.path+PATH_TESTS+PATH_CONFD, cnt.target+PATH_TESTS+PATH_CONFD)\n\tutils.CopyDir(cnt.path+PATH_TESTS+PATH_RUNLEVELS, cnt.target+PATH_TESTS+PATH_RUNLEVELS)\n\n\tos.MkdirAll(cnt.target+PATH_TESTS+PATH_FILES+PATH_TESTS, 0777)\n\tfor _, f := range files {\n\t\tif !f.IsDir() {\n\t\t\tif err := utils.CopyFile(cnt.path+PATH_TESTS+\"\/\"+f.Name(), cnt.target+PATH_TESTS+PATH_FILES+PATH_TESTS+\"\/\"+f.Name()); err != nil {\n\t\t\t\tlog.Get().Panic(err)\n\t\t\t}\n\t\t}\n\t}\n\n\tExecScript := strings.Replace(TEST_INIT_SCRIPT, \"%%COMMAND%%\", \"'\"+strings.Join(cnt.manifest.Aci.App.Exec, \"' '\")+\"'\", 1)\n\n\tioutil.WriteFile(cnt.target+PATH_TESTS+PATH_FILES+\"\/init.sh\", []byte(ExecScript), 0777)\n\n\tfullname, err := spec.NewACFullName(cnt.manifest.NameAndVersion.Name() + \"_test:\" + cnt.manifest.NameAndVersion.Version())\n\tif err != nil {\n\t\tlog.Get().Panic(err)\n\t}\n\n\tresultMountName, _ := types.NewACName(\"result\")\n\ttestAci, err := NewAciWithManifest(cnt.target+PATH_TESTS, cnt.args, spec.AciManifest{\n\t\tAci: spec.AciDefinition{\n\t\t\tApp: &spec.CntApp{\n\t\t\t\tExec: []string{\"\/init.sh\"},\n\t\t\t\tMountPoints: []types.MountPoint{{Path: PATH_RESULT, Name: *resultMountName}},\n\t\t\t},\n\t\t\tDependencies: []spec.ACFullname{BATS_ACI, cnt.manifest.NameAndVersion},\n\t\t},\n\t\tNameAndVersion: *fullname,\n\t})\n\tif err != nil {\n\t\tlog.Get().Panic(\"Cannot build test aci\", err)\n\t}\n\treturn testAci, nil\n}\n<commit_msg>do not use overlay for tests<commit_after>package builder\n\nimport (\n\t\"github.com\/appc\/spec\/schema\/types\"\n\t\"github.com\/blablacar\/cnt\/dist\"\n\t\"github.com\/blablacar\/cnt\/log\"\n\t\"github.com\/blablacar\/cnt\/spec\"\n\t\"github.com\/blablacar\/cnt\/utils\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n)\n\nconst BATS_ACI = \"aci.blablacar.com\/aci-bats:2\"\nconst PATH_RESULT = \"\/result\"\nconst STATUS_SUFFIX = \"_status\"\nconst TEST_INIT_SCRIPT = `#!\/bin\/bash\nset -x\n\n%%COMMAND%% &\n\nwaitsuccess=\"0\"\nif [ -f \"\/tests\/wait.sh\" ]; then\n\tchmod +x \/tests\/wait.sh\n\ti=\"0\"\n\twhile [ $i -lt 60 ]; do\n\t \/tests\/wait.sh\n\t if [ $? == 0 ]; then\n\t \twaitsuccess=\"1\"\n\t \tbreak;\n\t fi\n\t i=$[$i+1]\n\t sleep 1\n\tdone\nfi\n\nif [ wait_success == \"0\" ]; then\n\techo \"1\\n\" > \/result\/wait.sh\n\techo \"WAIT FAILED\"\n\texit 1\nfi\n\n\/test.sh\n`\n\nfunc (cnt *Img) Test() {\n\tcnt.Install()\n\tlog.Get().Info(\"Testing \" + cnt.manifest.NameAndVersion)\n\n\tif _, err := os.Stat(cnt.path + PATH_TESTS); err != nil {\n\t\tif cnt.args.NoTestFail {\n\t\t\tlog.Get().Panic(\"Test directory does not exists but tests are mandatory\")\n\t\t}\n\t\tlog.Get().Warn(\"Tests directory does not exists\")\n\t\treturn\n\t}\n\n\tcnt.importAciBats()\n\n\ttestAci, err := cnt.prepareTestAci()\n\tif err != nil {\n\t\tlog.Get().Panic(err)\n\t}\n\ttestAci.Build()\n\n\tos.MkdirAll(cnt.target+PATH_TESTS+PATH_TARGET+PATH_RESULT, 0777)\n\n\tif err := utils.ExecCmd(\"rkt\",\n\t\t\"--insecure-skip-verify=true\",\n\t\t\"run\",\n\t\t\"--mds-register=false\",\n\t\t\"--no-overlay=true\",\n\t\t\"--volume=result,kind=host,source=\"+cnt.target+PATH_TESTS+PATH_TARGET+PATH_RESULT,\n\t\tcnt.target+PATH_TESTS+PATH_TARGET+\"\/image.aci\"); err != nil {\n\t\t\/\/ rkt+systemd cannot exit with fail status yet\n\t\tlog.Get().Panic(err)\n\t}\n\n\tcnt.checkResult()\n}\n\nfunc (cnt *Img) checkResult() {\n\tfiles, err := ioutil.ReadDir(cnt.target + PATH_TESTS + PATH_TARGET + PATH_RESULT)\n\tif err != nil {\n\t\tlog.Get().Panic(\"Cannot read test result directory\", err)\n\t}\n\ttestFound := false\n\tfor _, f := range files {\n\t\tfullPath := cnt.target + PATH_TESTS + PATH_TARGET + PATH_RESULT + \"\/\" + f.Name()\n\t\tcontent, err := ioutil.ReadFile(fullPath)\n\t\tif err != nil {\n\t\t\tlog.Get().Panic(\"Cannot read result file\", f.Name(), err)\n\t\t}\n\t\tif !strings.HasSuffix(f.Name(), STATUS_SUFFIX) {\n\t\t\tif testFound == false && string(content) != \"1..0\\n\" {\n\t\t\t\ttestFound = true\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif string(content) != \"0\\n\" {\n\t\t\tlog.Get().Error(\"Failed test file : \", f.Name())\n\t\t\tos.Exit(2)\n\t\t}\n\t}\n\n\tif cnt.args.NoTestFail && !testFound {\n\t\tlog.Get().Panic(\"No tests found\")\n\t}\n}\n\nfunc (cnt *Img) importAciBats() {\n\tif err := utils.ExecCmd(\"bash\", \"-c\", \"rkt image list --fields name --no-legend | grep -q \"+BATS_ACI); err != nil {\n\t\tcontent, _ := dist.Asset(\"dist\/bindata\/aci-bats.aci\")\n\t\tif err := ioutil.WriteFile(\"\/tmp\/aci-bats.aci\", content, 0644); err != nil {\n\t\t\tlog.Get().Panic(err)\n\t\t}\n\t\tutils.ExecCmd(\"rkt\", \"--insecure-skip-verify=true\", \"fetch\", \"\/tmp\/aci-bats.aci\")\n\t\tos.Remove(\"\/tmp\/aci-bats.aci\")\n\t}\n}\n\nfunc (cnt *Img) prepareTestAci() (*Img, error) {\n\tfiles, err := ioutil.ReadDir(cnt.path + PATH_TESTS)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tutils.CopyDir(cnt.path+PATH_TESTS+PATH_FILES, cnt.target+PATH_TESTS+PATH_FILES)\n\tutils.CopyDir(cnt.path+PATH_TESTS+PATH_ATTRIBUTES, cnt.target+PATH_TESTS+PATH_ATTRIBUTES)\n\tutils.CopyDir(cnt.path+PATH_TESTS+PATH_CONFD, cnt.target+PATH_TESTS+PATH_CONFD)\n\tutils.CopyDir(cnt.path+PATH_TESTS+PATH_RUNLEVELS, cnt.target+PATH_TESTS+PATH_RUNLEVELS)\n\n\tos.MkdirAll(cnt.target+PATH_TESTS+PATH_FILES+PATH_TESTS, 0777)\n\tfor _, f := range files {\n\t\tif !f.IsDir() {\n\t\t\tif err := utils.CopyFile(cnt.path+PATH_TESTS+\"\/\"+f.Name(), cnt.target+PATH_TESTS+PATH_FILES+PATH_TESTS+\"\/\"+f.Name()); err != nil {\n\t\t\t\tlog.Get().Panic(err)\n\t\t\t}\n\t\t}\n\t}\n\n\tExecScript := strings.Replace(TEST_INIT_SCRIPT, \"%%COMMAND%%\", \"'\"+strings.Join(cnt.manifest.Aci.App.Exec, \"' '\")+\"'\", 1)\n\n\tioutil.WriteFile(cnt.target+PATH_TESTS+PATH_FILES+\"\/init.sh\", []byte(ExecScript), 0777)\n\n\tfullname, err := spec.NewACFullName(cnt.manifest.NameAndVersion.Name() + \"_test:\" + cnt.manifest.NameAndVersion.Version())\n\tif err != nil {\n\t\tlog.Get().Panic(err)\n\t}\n\n\tresultMountName, _ := types.NewACName(\"result\")\n\ttestAci, err := NewAciWithManifest(cnt.target+PATH_TESTS, cnt.args, spec.AciManifest{\n\t\tAci: spec.AciDefinition{\n\t\t\tApp: &spec.CntApp{\n\t\t\t\tExec: []string{\"\/init.sh\"},\n\t\t\t\tMountPoints: []types.MountPoint{{Path: PATH_RESULT, Name: *resultMountName}},\n\t\t\t},\n\t\t\tDependencies: []spec.ACFullname{BATS_ACI, cnt.manifest.NameAndVersion},\n\t\t},\n\t\tNameAndVersion: *fullname,\n\t})\n\tif err != nil {\n\t\tlog.Get().Panic(\"Cannot build test aci\", err)\n\t}\n\treturn testAci, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2016 Jip J. Dekker <jip@dekker.li>\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage helpers\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\n\/\/ CleanPath returns a cleaned path from a given string relative\n\/\/ to the working directory unless explicitly absolute\nfunc CleanPath(path string) (string, error) {\n\tif path == \"\" {\n\t\t\/\/ No given path, use working directory\n\t\treturn os.Getwd()\n\t} else if filepath.IsAbs(path) {\n\t\t\/\/ Given path is absolute\n\t\treturn filepath.Clean(path), nil\n\t} else {\n\t\t\/\/ Given path is a relative path\n\t\tdir, err := os.Getwd()\n\t\tif err == nil {\n\t\t\treturn filepath.Join(dir, path), nil\n\t\t}\n\t\treturn \"\", err\n\t}\n}\n\n\/\/ Exists checks if a file or directory exists.\nfunc Exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif !os.IsNotExist(err) {\n\t\tlog.WithFields(log.Fields{\"error\": err, \"path\": path}).\n\t\t\tPanic(\"Unable to check path\")\n\t}\n\treturn false\n}\n\n\/\/ FindFileDir returns the path of the\nfunc FindFileDir(file string) (string, error) {\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\tfor path := wd; path != \".\"; path = filepath.Dir(path) {\n\t\tif Exists(filepath.Join(path, file)) {\n\t\t\treturn path, nil\n\t\t}\n\t}\n\treturn \"\", &os.PathError{\n\t\tPath: wd,\n\t\tErr: errors.New(\"directory containing \" + file + \" not found\"),\n\t}\n}\n<commit_msg>Adds helper to generate absolute paths<commit_after>\/\/ Copyright © 2016 Jip J. Dekker <jip@dekker.li>\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage helpers\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\n\/\/ CleanPath returns a cleaned path from a given string relative\n\/\/ to the working directory unless explicitly absolute\nfunc CleanPath(path string) (string, error) {\n\tif path == \"\" {\n\t\t\/\/ No given path, use working directory\n\t\treturn os.Getwd()\n\t} else if filepath.IsAbs(path) {\n\t\t\/\/ Given path is absolute\n\t\treturn filepath.Clean(path), nil\n\t} else {\n\t\t\/\/ Given path is a relative path\n\t\tdir, err := os.Getwd()\n\t\tif err == nil {\n\t\t\treturn filepath.Join(dir, path), nil\n\t\t}\n\t\treturn \"\", err\n\t}\n}\n\n\/\/ Exists checks if a file or directory exists.\nfunc Exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif !os.IsNotExist(err) {\n\t\tlog.WithFields(log.Fields{\"error\": err, \"path\": path}).\n\t\t\tPanic(\"Unable to check path\")\n\t}\n\treturn false\n}\n\n\/\/ FindFileDir returns the path of the\nfunc FindFileDir(file string) (string, error) {\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\tfor path := wd; path != \".\"; path = filepath.Dir(path) {\n\t\tif Exists(filepath.Join(path, file)) {\n\t\t\treturn path, nil\n\t\t}\n\t}\n\treturn \"\", &os.PathError{\n\t\tPath: wd,\n\t\tErr: errors.New(\"directory containing \" + file + \" not found\"),\n\t}\n}\n\n\/\/ AbsolutePath returns an the path if it is absolute or\n\/\/ otherwise filepath.Join(lib, path)\nfunc AbsolutePath(path, lib string) string {\n\tif filepath.IsAbs(path) {\n\t\treturn path\n\t}\n\treturn filepath.Join(lib, path)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage lease\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sync\"\n)\n\n\/\/ A read-write wrapper around a file. Unlike a read lease, this cannot be\n\/\/ revoked.\n\/\/\n\/\/ All methods are safe for concurrent access.\ntype ReadWriteLease interface {\n\t\/\/ Methods with semantics matching *os.File.\n\tio.ReadWriteSeeker\n\tio.ReaderAt\n\tio.WriterAt\n\tTruncate(size int64) (err error)\n\n\t\/\/ Return the current size of the underlying file.\n\tSize() (size int64, err error)\n\n\t\/\/ Downgrade to a read lease, releasing any resources pinned by this lease to\n\t\/\/ the pool that may be revoked, as with any read lease. After successfully\n\t\/\/ downgrading, this lease must not be used again.\n\tDowngrade() (rl ReadLease, err error)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Implementation\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype readWriteLease struct {\n\tmu sync.Mutex\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Dependencies\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ The leaser that issued this lease.\n\tleaser *FileLeaser\n\n\t\/\/ The underlying file. We serialize access using the mutex.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tfile *os.File\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ The cumulative number of bytes we have reported to the leaser using\n\t\/\/ FileLeaser.addReadWriteByteDelta. When the size changes, we report the\n\t\/\/ difference between the new size and this value.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\treportedSize int64\n}\n\nvar _ ReadWriteLease = &readWriteLease{}\n\nfunc newReadWriteLease(\n\tleaser *FileLeaser,\n\tfile *os.File) (rwl *readWriteLease) {\n\trwl = &readWriteLease{\n\t\tleaser: leaser,\n\t\tfile: file,\n\t}\n\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Read(p []byte) (n int, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\tn, err = rwl.file.Read(p)\n\treturn\n}\n\nfunc (rwl *readWriteLease) Write(p []byte) (n int, err error) {\n\terr = errors.New(\"TODO\")\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Seek(\n\toffset int64,\n\twhence int) (off int64, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\toff, err = rwl.file.Seek(offset, whence)\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) ReadAt(p []byte, off int64) (n int, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\tn, err = rwl.file.ReadAt(p, off)\n\treturn\n}\n\nfunc (rwl *readWriteLease) WriteAt(p []byte, off int64) (n int, err error) {\n\terr = errors.New(\"TODO\")\n\treturn\n}\n\nfunc (rwl *readWriteLease) Truncate(size int64) (err error) {\n\terr = errors.New(\"TODO\")\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Size() (size int64, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\t\/\/ Stat the file to get its size.\n\tfi, err := rwl.file.Stat()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Stat: %v\", err)\n\t\treturn\n\t}\n\n\tsize = fi.Size()\n\treturn\n}\n\nfunc (rwl *readWriteLease) Downgrade() (rl ReadLease, err error) {\n\terr = errors.New(\"TODO\")\n\treturn\n}\n<commit_msg>More lock annotations.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage lease\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sync\"\n)\n\n\/\/ A read-write wrapper around a file. Unlike a read lease, this cannot be\n\/\/ revoked.\n\/\/\n\/\/ All methods are safe for concurrent access.\ntype ReadWriteLease interface {\n\t\/\/ Methods with semantics matching *os.File.\n\tio.ReadWriteSeeker\n\tio.ReaderAt\n\tio.WriterAt\n\tTruncate(size int64) (err error)\n\n\t\/\/ Return the current size of the underlying file.\n\tSize() (size int64, err error)\n\n\t\/\/ Downgrade to a read lease, releasing any resources pinned by this lease to\n\t\/\/ the pool that may be revoked, as with any read lease. After successfully\n\t\/\/ downgrading, this lease must not be used again.\n\tDowngrade() (rl ReadLease, err error)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Implementation\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype readWriteLease struct {\n\tmu sync.Mutex\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Dependencies\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ The leaser that issued this lease.\n\tleaser *FileLeaser\n\n\t\/\/ The underlying file. We serialize access using the mutex.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tfile *os.File\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ The cumulative number of bytes we have reported to the leaser using\n\t\/\/ FileLeaser.addReadWriteByteDelta. When the size changes, we report the\n\t\/\/ difference between the new size and this value.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\treportedSize int64\n}\n\nvar _ ReadWriteLease = &readWriteLease{}\n\nfunc newReadWriteLease(\n\tleaser *FileLeaser,\n\tfile *os.File) (rwl *readWriteLease) {\n\trwl = &readWriteLease{\n\t\tleaser: leaser,\n\t\tfile: file,\n\t}\n\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Read(p []byte) (n int, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\tn, err = rwl.file.Read(p)\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Write(p []byte) (n int, err error) {\n\terr = errors.New(\"TODO\")\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Seek(\n\toffset int64,\n\twhence int) (off int64, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\toff, err = rwl.file.Seek(offset, whence)\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) ReadAt(p []byte, off int64) (n int, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\tn, err = rwl.file.ReadAt(p, off)\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) WriteAt(p []byte, off int64) (n int, err error) {\n\terr = errors.New(\"TODO\")\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Truncate(size int64) (err error) {\n\terr = errors.New(\"TODO\")\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Size() (size int64, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\t\/\/ Stat the file to get its size.\n\tfi, err := rwl.file.Stat()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Stat: %v\", err)\n\t\treturn\n\t}\n\n\tsize = fi.Size()\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Downgrade() (rl ReadLease, err error) {\n\terr = errors.New(\"TODO\")\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"math\"\n\t\"sort\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/mattn\/go-tty\"\n\t\"github.com\/pkg\/browser\"\n)\n\ntype ActionType int\n\nconst (\n\tLABEL_AS_POSITIVE ActionType = iota\n\tLABEL_AS_NEGATIVE\n\tSAVE\n\tHELP\n\tSKIP\n\tEXIT\n)\n\nfunc rune2ActionType(r rune) ActionType {\n\tswitch r {\n\tcase 'p':\n\t\treturn LABEL_AS_POSITIVE\n\tcase 'n':\n\t\treturn LABEL_AS_NEGATIVE\n\tcase 's':\n\t\treturn SAVE\n\tcase 'h':\n\t\treturn HELP\n\tcase 'e':\n\t\treturn EXIT\n\tdefault:\n\t\treturn SKIP\n\t}\n}\n\nfunc input2ActionType() (ActionType, error) {\n\tt, err := tty.Open()\n\tdefer t.Close()\n\tif err != nil {\n\t\treturn EXIT, err\n\t}\n\tvar r rune\n\tfor r == 0 {\n\t\tr, err = t.ReadRune()\n\t\tif err != nil {\n\t\t\treturn SKIP, err\n\t\t}\n\t}\n\treturn rune2ActionType(r), nil\n}\n\nfunc NextExampleToBeAnnotated(model *Model, examples Examples) *Example {\n\tunlabeledExamples := model.SortByScore(examples)\n\tif len(unlabeledExamples) == 0 {\n\t\treturn nil\n\t}\n\te := unlabeledExamples[0]\n\tif e == nil {\n\t\treturn nil\n\t}\n\treturn e\n}\n\nvar ActionHelpDoc = `\np: Label this example as positive.\nn: Label this example as negative.\ns: Save additionally annotated examples in 'output-filename'.\nh: Show this help.\ne: Exit.\n`\n\nfunc doAnnotate(c *cli.Context) error {\n\tinputFilename := c.String(\"input-filename\")\n\toutputFilename := c.String(\"output-filename\")\n\topenUrl := c.Bool(\"open-url\")\n\tfilterStatusCodeOk := c.Bool(\"filter-status-code-ok\")\n\tshowActiveFeatures := c.Bool(\"show-active-features\")\n\n\tif inputFilename == \"\" {\n\t\t_ = cli.ShowCommandHelp(c, \"annotate\")\n\t\treturn cli.NewExitError(\"`input-filename` is a required field.\", 1)\n\t}\n\n\tif outputFilename == \"\" {\n\t\t_ = cli.ShowCommandHelp(c, \"annotate\")\n\t\treturn cli.NewExitError(\"`output-filename` is a required field.\", 1)\n\t}\n\n\tcacheFilename := CacheFilename\n\n\tcache, err := LoadCache(cacheFilename)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t}\n\n\texamples, err := ReadExamples(inputFilename)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tAttachMetaData(cache, examples)\n\tif filterStatusCodeOk {\n\t\texamples = FilterStatusCodeOkExamples(examples)\n\t}\n\tmodel := TrainedModel(examples)\n\nannotationLoop:\n\tfor {\n\t\te := NextExampleToBeAnnotated(model, examples)\n\t\tfmt.Println(\"Label this example (Score: \" + fmt.Sprintf(\"%+0.03f\", e.Score) + \"): \" + e.Url + \" (\" + e.Title + \")\")\n\n\t\tif openUrl {\n\t\t\tbrowser.OpenURL(e.Url)\n\t\t}\n\t\tif showActiveFeatures {\n\t\t\tShowActiveFeatures(model, *e, 5)\n\t\t}\n\n\t\tact, err := input2ActionType()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tswitch act {\n\t\tcase LABEL_AS_POSITIVE:\n\t\t\tfmt.Println(\"Labeled as positive\")\n\t\t\te.Annotate(POSITIVE)\n\t\tcase LABEL_AS_NEGATIVE:\n\t\t\tfmt.Println(\"Labeled as negative\")\n\t\t\te.Annotate(NEGATIVE)\n\t\tcase SKIP:\n\t\t\tfmt.Println(\"Skiped this example\")\n\t\t\tcontinue\n\t\tcase SAVE:\n\t\t\tfmt.Println(\"Saved labeld examples\")\n\t\t\tWriteExamples(examples, outputFilename)\n\t\tcase HELP:\n\t\t\tfmt.Println(ActionHelpDoc)\n\t\tcase EXIT:\n\t\t\tfmt.Println(\"EXIT\")\n\t\t\tbreak annotationLoop\n\t\tdefault:\n\t\t\tbreak annotationLoop\n\t\t}\n\t\tmodel = TrainedModel(examples)\n\t}\n\n\tWriteExamples(examples, outputFilename)\n\tcache.Save(cacheFilename)\n\n\treturn nil\n}\n\nvar commandAnnotate = cli.Command{\n\tName: \"annotate\",\n\tUsage: \"Annotate URLs\",\n\tDescription: `\nAnnotate URLs using active learning.\n`,\n\tAction: doAnnotate,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{Name: \"input-filename\"},\n\t\tcli.StringFlag{Name: \"output-filename\"},\n\t\tcli.BoolFlag{Name: \"open-url\", Usage: \"Open url in background\"},\n\t\tcli.BoolFlag{Name: \"filter-status-code-ok\", Usage: \"Use only examples with status code = 200\"},\n\t\tcli.BoolFlag{Name: \"show-active-features\"},\n\t},\n}\n\ntype FeatureWeightPair struct {\n\tFeature string\n\tWeight float64\n}\n\ntype FeatureWeightPairs []FeatureWeightPair\n\nfunc ShowActiveFeatures(model *Model, example Example, n int) {\n\tresult := FeatureWeightPairs{}\n\tfor _, f := range example.Fv {\n\t\tresult = append(result, FeatureWeightPair{f, model.GetAveragedWeight(f)})\n\t}\n\tsort.Sort(sort.Reverse(result))\n\n\tcnt := 0\n\tfor _, pair := range result {\n\t\tif cnt >= n {\n\t\t\tbreak\n\t\t}\n\t\tif (example.Score > 0.0 && pair.Weight > 0.0) || (example.Score < 0.0 && pair.Weight < 0.0) {\n\t\t\tfmt.Println(fmt.Sprintf(\"%+0.1f %s\", pair.Weight, pair.Feature))\n\t\t\tcnt++\n\t\t}\n\t}\n}\n\nfunc (slice FeatureWeightPairs) Len() int {\n\treturn len(slice)\n}\n\nfunc (slice FeatureWeightPairs) Less(i, j int) bool {\n\treturn math.Abs(slice[i].Weight) < math.Abs(slice[j].Weight)\n}\n\nfunc (slice FeatureWeightPairs) Swap(i, j int) {\n\tslice[i], slice[j] = slice[j], slice[i]\n}\n<commit_msg>SKIPしても現状同じ事例が出てくるので、ヘルプを出すようにする<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"math\"\n\t\"sort\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/mattn\/go-tty\"\n\t\"github.com\/pkg\/browser\"\n)\n\ntype ActionType int\n\nconst (\n\tLABEL_AS_POSITIVE ActionType = iota\n\tLABEL_AS_NEGATIVE\n\tSAVE\n\tHELP\n\tSKIP\n\tEXIT\n)\n\nfunc rune2ActionType(r rune) ActionType {\n\tswitch r {\n\tcase 'p':\n\t\treturn LABEL_AS_POSITIVE\n\tcase 'n':\n\t\treturn LABEL_AS_NEGATIVE\n\tcase 's':\n\t\treturn SAVE\n\tcase 'h':\n\t\treturn HELP\n\tcase 'e':\n\t\treturn EXIT\n\tdefault:\n\t\treturn HELP\n\t}\n}\n\nfunc input2ActionType() (ActionType, error) {\n\tt, err := tty.Open()\n\tdefer t.Close()\n\tif err != nil {\n\t\treturn EXIT, err\n\t}\n\tvar r rune\n\tfor r == 0 {\n\t\tr, err = t.ReadRune()\n\t\tif err != nil {\n\t\t\treturn HELP, err\n\t\t}\n\t}\n\treturn rune2ActionType(r), nil\n}\n\nfunc NextExampleToBeAnnotated(model *Model, examples Examples) *Example {\n\tunlabeledExamples := model.SortByScore(examples)\n\tif len(unlabeledExamples) == 0 {\n\t\treturn nil\n\t}\n\te := unlabeledExamples[0]\n\tif e == nil {\n\t\treturn nil\n\t}\n\treturn e\n}\n\nvar ActionHelpDoc = `\np: Label this example as positive.\nn: Label this example as negative.\ns: Save additionally annotated examples in 'output-filename'.\nh: Show this help.\ne: Exit.\n`\n\nfunc doAnnotate(c *cli.Context) error {\n\tinputFilename := c.String(\"input-filename\")\n\toutputFilename := c.String(\"output-filename\")\n\topenUrl := c.Bool(\"open-url\")\n\tfilterStatusCodeOk := c.Bool(\"filter-status-code-ok\")\n\tshowActiveFeatures := c.Bool(\"show-active-features\")\n\n\tif inputFilename == \"\" {\n\t\t_ = cli.ShowCommandHelp(c, \"annotate\")\n\t\treturn cli.NewExitError(\"`input-filename` is a required field.\", 1)\n\t}\n\n\tif outputFilename == \"\" {\n\t\t_ = cli.ShowCommandHelp(c, \"annotate\")\n\t\treturn cli.NewExitError(\"`output-filename` is a required field.\", 1)\n\t}\n\n\tcacheFilename := CacheFilename\n\n\tcache, err := LoadCache(cacheFilename)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t}\n\n\texamples, err := ReadExamples(inputFilename)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tAttachMetaData(cache, examples)\n\tif filterStatusCodeOk {\n\t\texamples = FilterStatusCodeOkExamples(examples)\n\t}\n\tmodel := TrainedModel(examples)\n\nannotationLoop:\n\tfor {\n\t\te := NextExampleToBeAnnotated(model, examples)\n\t\tfmt.Println(\"Label this example (Score: \" + fmt.Sprintf(\"%+0.03f\", e.Score) + \"): \" + e.Url + \" (\" + e.Title + \")\")\n\n\t\tif openUrl {\n\t\t\tbrowser.OpenURL(e.Url)\n\t\t}\n\t\tif showActiveFeatures {\n\t\t\tShowActiveFeatures(model, *e, 5)\n\t\t}\n\n\t\tact, err := input2ActionType()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tswitch act {\n\t\tcase LABEL_AS_POSITIVE:\n\t\t\tfmt.Println(\"Labeled as positive\")\n\t\t\te.Annotate(POSITIVE)\n\t\tcase LABEL_AS_NEGATIVE:\n\t\t\tfmt.Println(\"Labeled as negative\")\n\t\t\te.Annotate(NEGATIVE)\n\t\tcase SKIP:\n\t\t\tfmt.Println(\"Skiped this example\")\n\t\t\tcontinue\n\t\tcase SAVE:\n\t\t\tfmt.Println(\"Saved labeld examples\")\n\t\t\tWriteExamples(examples, outputFilename)\n\t\tcase HELP:\n\t\t\tfmt.Println(ActionHelpDoc)\n\t\tcase EXIT:\n\t\t\tfmt.Println(\"EXIT\")\n\t\t\tbreak annotationLoop\n\t\tdefault:\n\t\t\tbreak annotationLoop\n\t\t}\n\t\tmodel = TrainedModel(examples)\n\t}\n\n\tWriteExamples(examples, outputFilename)\n\tcache.Save(cacheFilename)\n\n\treturn nil\n}\n\nvar commandAnnotate = cli.Command{\n\tName: \"annotate\",\n\tUsage: \"Annotate URLs\",\n\tDescription: `\nAnnotate URLs using active learning.\n`,\n\tAction: doAnnotate,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{Name: \"input-filename\"},\n\t\tcli.StringFlag{Name: \"output-filename\"},\n\t\tcli.BoolFlag{Name: \"open-url\", Usage: \"Open url in background\"},\n\t\tcli.BoolFlag{Name: \"filter-status-code-ok\", Usage: \"Use only examples with status code = 200\"},\n\t\tcli.BoolFlag{Name: \"show-active-features\"},\n\t},\n}\n\ntype FeatureWeightPair struct {\n\tFeature string\n\tWeight float64\n}\n\ntype FeatureWeightPairs []FeatureWeightPair\n\nfunc ShowActiveFeatures(model *Model, example Example, n int) {\n\tresult := FeatureWeightPairs{}\n\tfor _, f := range example.Fv {\n\t\tresult = append(result, FeatureWeightPair{f, model.GetAveragedWeight(f)})\n\t}\n\tsort.Sort(sort.Reverse(result))\n\n\tcnt := 0\n\tfor _, pair := range result {\n\t\tif cnt >= n {\n\t\t\tbreak\n\t\t}\n\t\tif (example.Score > 0.0 && pair.Weight > 0.0) || (example.Score < 0.0 && pair.Weight < 0.0) {\n\t\t\tfmt.Println(fmt.Sprintf(\"%+0.1f %s\", pair.Weight, pair.Feature))\n\t\t\tcnt++\n\t\t}\n\t}\n}\n\nfunc (slice FeatureWeightPairs) Len() int {\n\treturn len(slice)\n}\n\nfunc (slice FeatureWeightPairs) Less(i, j int) bool {\n\treturn math.Abs(slice[i].Weight) < math.Abs(slice[j].Weight)\n}\n\nfunc (slice FeatureWeightPairs) Swap(i, j int) {\n\tslice[i], slice[j] = slice[j], slice[i]\n}\n<|endoftext|>"} {"text":"<commit_before>package lfsapi\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/git-lfs\/git-lfs\/config\"\n\t\"github.com\/git-lfs\/git-lfs\/git\"\n\t\"github.com\/rubyist\/tracerx\"\n)\n\ntype Access string\n\nconst (\n\tNoneAccess Access = \"none\"\n\tBasicAccess Access = \"basic\"\n\tPrivateAccess Access = \"private\"\n\tNegotiateAccess Access = \"negotiate\"\n\tNTLMAccess Access = \"ntlm\"\n\temptyAccess Access = \"\"\n\tdefaultRemote = \"origin\"\n)\n\ntype EndpointFinder interface {\n\tNewEndpointFromCloneURL(rawurl string) Endpoint\n\tNewEndpoint(rawurl string) Endpoint\n\tEndpoint(operation, remote string) Endpoint\n\tRemoteEndpoint(operation, remote string) Endpoint\n\tGitRemoteURL(remote string, forpush bool) string\n\tAccessFor(rawurl string) Access\n\tSetAccess(rawurl string, access Access)\n\tGitProtocol() string\n}\n\ntype endpointGitFinder struct {\n\tgit Env\n\tgitProtocol string\n\n\taliasMu sync.Mutex\n\taliases map[string]string\n\n\taccessMu sync.Mutex\n\turlAccess map[string]Access\n\turlConfig *config.URLConfig\n}\n\nfunc NewEndpointFinder(git Env) EndpointFinder {\n\te := &endpointGitFinder{\n\t\tgitProtocol: \"https\",\n\t\taliases: make(map[string]string),\n\t\turlAccess: make(map[string]Access),\n\t}\n\n\tif git != nil {\n\t\te.git = git\n\t\te.urlConfig = config.NewURLConfig(e.git)\n\t\tif v, ok := git.Get(\"lfs.gitprotocol\"); ok {\n\t\t\te.gitProtocol = v\n\t\t}\n\t\tinitAliases(e, git)\n\t}\n\n\treturn e\n}\n\nfunc (e *endpointGitFinder) Endpoint(operation, remote string) Endpoint {\n\tep := e.getEndpoint(operation, remote)\n\tep.Operation = operation\n\treturn ep\n}\n\nfunc (e *endpointGitFinder) getEndpoint(operation, remote string) Endpoint {\n\tif e.git == nil {\n\t\treturn Endpoint{}\n\t}\n\n\tif operation == \"upload\" {\n\t\tif url, ok := e.git.Get(\"lfs.pushurl\"); ok {\n\t\t\treturn e.NewEndpoint(url)\n\t\t}\n\t}\n\n\tif url, ok := e.git.Get(\"lfs.url\"); ok {\n\t\treturn e.NewEndpoint(url)\n\t}\n\n\tif len(remote) > 0 && remote != defaultRemote {\n\t\tif e := e.RemoteEndpoint(operation, remote); len(e.Url) > 0 {\n\t\t\treturn e\n\t\t}\n\t}\n\n\treturn e.RemoteEndpoint(operation, defaultRemote)\n}\n\nfunc (e *endpointGitFinder) RemoteEndpoint(operation, remote string) Endpoint {\n\tif e.git == nil {\n\t\treturn Endpoint{}\n\t}\n\n\tif len(remote) == 0 {\n\t\tremote = defaultRemote\n\t}\n\n\t\/\/ Support separate push URL if specified and pushing\n\tif operation == \"upload\" {\n\t\tif url, ok := e.git.Get(\"remote.\" + remote + \".lfspushurl\"); ok {\n\t\t\treturn e.NewEndpoint(url)\n\t\t}\n\t}\n\tif url, ok := e.git.Get(\"remote.\" + remote + \".lfsurl\"); ok {\n\t\treturn e.NewEndpoint(url)\n\t}\n\n\t\/\/ finally fall back on git remote url (also supports pushurl)\n\tif url := e.GitRemoteURL(remote, operation == \"upload\"); url != \"\" {\n\t\treturn e.NewEndpointFromCloneURL(url)\n\t}\n\n\treturn Endpoint{}\n}\n\nfunc (e *endpointGitFinder) GitRemoteURL(remote string, forpush bool) string {\n\tif e.git != nil {\n\t\tif forpush {\n\t\t\tif u, ok := e.git.Get(\"remote.\" + remote + \".pushurl\"); ok {\n\t\t\t\treturn u\n\t\t\t}\n\t\t}\n\n\t\tif u, ok := e.git.Get(\"remote.\" + remote + \".url\"); ok {\n\t\t\treturn u\n\t\t}\n\t}\n\n\tif err := git.ValidateRemote(remote); err == nil {\n\t\treturn remote\n\t}\n\n\treturn \"\"\n}\n\nfunc (e *endpointGitFinder) NewEndpointFromCloneURL(rawurl string) Endpoint {\n\tep := e.NewEndpoint(rawurl)\n\tif ep.Url == UrlUnknown {\n\t\treturn ep\n\t}\n\n\tif strings.HasSuffix(rawurl, \"\/\") {\n\t\tep.Url = rawurl[0 : len(rawurl)-1]\n\t}\n\n\t\/\/ When using main remote URL for HTTP, append info\/lfs\n\tif path.Ext(ep.Url) == \".git\" {\n\t\tep.Url += \"\/info\/lfs\"\n\t} else {\n\t\tep.Url += \".git\/info\/lfs\"\n\t}\n\n\treturn ep\n}\n\nfunc (e *endpointGitFinder) NewEndpoint(rawurl string) Endpoint {\n\trawurl = e.ReplaceUrlAlias(rawurl)\n\tu, err := url.Parse(rawurl)\n\tif err != nil {\n\t\treturn endpointFromBareSshUrl(rawurl)\n\t}\n\n\tswitch u.Scheme {\n\tcase \"ssh\":\n\t\treturn endpointFromSshUrl(u)\n\tcase \"http\", \"https\":\n\t\treturn endpointFromHttpUrl(u)\n\tcase \"git\":\n\t\treturn endpointFromGitUrl(u, e)\n\tcase \"\":\n\t\treturn endpointFromBareSshUrl(u.String())\n\tdefault:\n\t\t\/\/ Just passthrough to preserve\n\t\treturn Endpoint{Url: rawurl}\n\t}\n}\n\nfunc (e *endpointGitFinder) AccessFor(rawurl string) Access {\n\tif e.git == nil {\n\t\treturn NoneAccess\n\t}\n\n\taccessurl := urlWithoutAuth(rawurl)\n\n\te.accessMu.Lock()\n\tdefer e.accessMu.Unlock()\n\n\tif cached, ok := e.urlAccess[accessurl]; ok {\n\t\treturn cached\n\t}\n\n\tkey := fmt.Sprintf(\"lfs.%s.access\", accessurl)\n\te.urlAccess[accessurl] = e.fetchGitAccess(key)\n\treturn e.urlAccess[accessurl]\n}\n\nfunc (e *endpointGitFinder) SetAccess(rawurl string, access Access) {\n\taccessurl := urlWithoutAuth(rawurl)\n\tkey := fmt.Sprintf(\"lfs.%s.access\", accessurl)\n\ttracerx.Printf(\"setting repository access to %s\", access)\n\n\te.accessMu.Lock()\n\tdefer e.accessMu.Unlock()\n\n\tswitch access {\n\tcase emptyAccess, NoneAccess:\n\t\tgit.Config.UnsetLocalKey(\"\", key)\n\t\te.urlAccess[accessurl] = NoneAccess\n\tdefault:\n\t\tgit.Config.SetLocal(\"\", key, string(access))\n\t\te.urlAccess[accessurl] = access\n\t}\n}\n\nfunc urlWithoutAuth(rawurl string) string {\n\tif !strings.Contains(rawurl, \"@\") {\n\t\treturn rawurl\n\t}\n\n\tu, err := url.Parse(rawurl)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error parsing URL %q: %s\", rawurl, err)\n\t\treturn rawurl\n\t}\n\n\tu.User = nil\n\treturn u.String()\n}\n\nfunc (e *endpointGitFinder) fetchGitAccess(key string) Access {\n\tif v, _ := e.git.Get(key); len(v) > 0 {\n\t\taccess := Access(strings.ToLower(v))\n\t\tif access == PrivateAccess {\n\t\t\treturn BasicAccess\n\t\t}\n\t\treturn access\n\t}\n\treturn NoneAccess\n}\n\nfunc (e *endpointGitFinder) GitProtocol() string {\n\treturn e.gitProtocol\n}\n\n\/\/ ReplaceUrlAlias returns a url with a prefix from a `url.*.insteadof` git\n\/\/ config setting. If multiple aliases match, use the longest one.\n\/\/ See https:\/\/git-scm.com\/docs\/git-config for Git's docs.\nfunc (e *endpointGitFinder) ReplaceUrlAlias(rawurl string) string {\n\te.aliasMu.Lock()\n\tdefer e.aliasMu.Unlock()\n\n\tvar longestalias string\n\tfor alias, _ := range e.aliases {\n\t\tif !strings.HasPrefix(rawurl, alias) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif longestalias < alias {\n\t\t\tlongestalias = alias\n\t\t}\n\t}\n\n\tif len(longestalias) > 0 {\n\t\treturn e.aliases[longestalias] + rawurl[len(longestalias):]\n\t}\n\n\treturn rawurl\n}\n\nfunc initAliases(e *endpointGitFinder, git Env) {\n\tprefix := \"url.\"\n\tsuffix := \".insteadof\"\n\tfor gitkey, gitval := range git.All() {\n\t\tif len(gitval) == 0 || !(strings.HasPrefix(gitkey, prefix) && strings.HasSuffix(gitkey, suffix)) {\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := e.aliases[gitval[len(gitval)-1]]; ok {\n\t\t\tfmt.Fprintf(os.Stderr, \"WARNING: Multiple 'url.*.insteadof' keys with the same alias: %q\\n\", gitval)\n\t\t}\n\t\te.aliases[gitval[len(gitval)-1]] = gitkey[len(prefix) : len(gitkey)-len(suffix)]\n\t}\n}\n<commit_msg>lfsapi\/endpoint_finder: use `*URLConfig` to do access lookups<commit_after>package lfsapi\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/git-lfs\/git-lfs\/config\"\n\t\"github.com\/git-lfs\/git-lfs\/git\"\n\t\"github.com\/rubyist\/tracerx\"\n)\n\ntype Access string\n\nconst (\n\tNoneAccess Access = \"none\"\n\tBasicAccess Access = \"basic\"\n\tPrivateAccess Access = \"private\"\n\tNegotiateAccess Access = \"negotiate\"\n\tNTLMAccess Access = \"ntlm\"\n\temptyAccess Access = \"\"\n\tdefaultRemote = \"origin\"\n)\n\ntype EndpointFinder interface {\n\tNewEndpointFromCloneURL(rawurl string) Endpoint\n\tNewEndpoint(rawurl string) Endpoint\n\tEndpoint(operation, remote string) Endpoint\n\tRemoteEndpoint(operation, remote string) Endpoint\n\tGitRemoteURL(remote string, forpush bool) string\n\tAccessFor(rawurl string) Access\n\tSetAccess(rawurl string, access Access)\n\tGitProtocol() string\n}\n\ntype endpointGitFinder struct {\n\tgit Env\n\tgitProtocol string\n\n\taliasMu sync.Mutex\n\taliases map[string]string\n\n\taccessMu sync.Mutex\n\turlAccess map[string]Access\n\turlConfig *config.URLConfig\n}\n\nfunc NewEndpointFinder(git Env) EndpointFinder {\n\te := &endpointGitFinder{\n\t\tgitProtocol: \"https\",\n\t\taliases: make(map[string]string),\n\t\turlAccess: make(map[string]Access),\n\t}\n\n\tif git != nil {\n\t\te.git = git\n\t\te.urlConfig = config.NewURLConfig(e.git)\n\t\tif v, ok := git.Get(\"lfs.gitprotocol\"); ok {\n\t\t\te.gitProtocol = v\n\t\t}\n\t\tinitAliases(e, git)\n\t}\n\n\treturn e\n}\n\nfunc (e *endpointGitFinder) Endpoint(operation, remote string) Endpoint {\n\tep := e.getEndpoint(operation, remote)\n\tep.Operation = operation\n\treturn ep\n}\n\nfunc (e *endpointGitFinder) getEndpoint(operation, remote string) Endpoint {\n\tif e.git == nil {\n\t\treturn Endpoint{}\n\t}\n\n\tif operation == \"upload\" {\n\t\tif url, ok := e.git.Get(\"lfs.pushurl\"); ok {\n\t\t\treturn e.NewEndpoint(url)\n\t\t}\n\t}\n\n\tif url, ok := e.git.Get(\"lfs.url\"); ok {\n\t\treturn e.NewEndpoint(url)\n\t}\n\n\tif len(remote) > 0 && remote != defaultRemote {\n\t\tif e := e.RemoteEndpoint(operation, remote); len(e.Url) > 0 {\n\t\t\treturn e\n\t\t}\n\t}\n\n\treturn e.RemoteEndpoint(operation, defaultRemote)\n}\n\nfunc (e *endpointGitFinder) RemoteEndpoint(operation, remote string) Endpoint {\n\tif e.git == nil {\n\t\treturn Endpoint{}\n\t}\n\n\tif len(remote) == 0 {\n\t\tremote = defaultRemote\n\t}\n\n\t\/\/ Support separate push URL if specified and pushing\n\tif operation == \"upload\" {\n\t\tif url, ok := e.git.Get(\"remote.\" + remote + \".lfspushurl\"); ok {\n\t\t\treturn e.NewEndpoint(url)\n\t\t}\n\t}\n\tif url, ok := e.git.Get(\"remote.\" + remote + \".lfsurl\"); ok {\n\t\treturn e.NewEndpoint(url)\n\t}\n\n\t\/\/ finally fall back on git remote url (also supports pushurl)\n\tif url := e.GitRemoteURL(remote, operation == \"upload\"); url != \"\" {\n\t\treturn e.NewEndpointFromCloneURL(url)\n\t}\n\n\treturn Endpoint{}\n}\n\nfunc (e *endpointGitFinder) GitRemoteURL(remote string, forpush bool) string {\n\tif e.git != nil {\n\t\tif forpush {\n\t\t\tif u, ok := e.git.Get(\"remote.\" + remote + \".pushurl\"); ok {\n\t\t\t\treturn u\n\t\t\t}\n\t\t}\n\n\t\tif u, ok := e.git.Get(\"remote.\" + remote + \".url\"); ok {\n\t\t\treturn u\n\t\t}\n\t}\n\n\tif err := git.ValidateRemote(remote); err == nil {\n\t\treturn remote\n\t}\n\n\treturn \"\"\n}\n\nfunc (e *endpointGitFinder) NewEndpointFromCloneURL(rawurl string) Endpoint {\n\tep := e.NewEndpoint(rawurl)\n\tif ep.Url == UrlUnknown {\n\t\treturn ep\n\t}\n\n\tif strings.HasSuffix(rawurl, \"\/\") {\n\t\tep.Url = rawurl[0 : len(rawurl)-1]\n\t}\n\n\t\/\/ When using main remote URL for HTTP, append info\/lfs\n\tif path.Ext(ep.Url) == \".git\" {\n\t\tep.Url += \"\/info\/lfs\"\n\t} else {\n\t\tep.Url += \".git\/info\/lfs\"\n\t}\n\n\treturn ep\n}\n\nfunc (e *endpointGitFinder) NewEndpoint(rawurl string) Endpoint {\n\trawurl = e.ReplaceUrlAlias(rawurl)\n\tu, err := url.Parse(rawurl)\n\tif err != nil {\n\t\treturn endpointFromBareSshUrl(rawurl)\n\t}\n\n\tswitch u.Scheme {\n\tcase \"ssh\":\n\t\treturn endpointFromSshUrl(u)\n\tcase \"http\", \"https\":\n\t\treturn endpointFromHttpUrl(u)\n\tcase \"git\":\n\t\treturn endpointFromGitUrl(u, e)\n\tcase \"\":\n\t\treturn endpointFromBareSshUrl(u.String())\n\tdefault:\n\t\t\/\/ Just passthrough to preserve\n\t\treturn Endpoint{Url: rawurl}\n\t}\n}\n\nfunc (e *endpointGitFinder) AccessFor(rawurl string) Access {\n\tif e.git == nil {\n\t\treturn NoneAccess\n\t}\n\n\taccessurl := urlWithoutAuth(rawurl)\n\n\te.accessMu.Lock()\n\tdefer e.accessMu.Unlock()\n\n\tif cached, ok := e.urlAccess[accessurl]; ok {\n\t\treturn cached\n\t}\n\n\te.urlAccess[accessurl] = e.fetchGitAccess(accessurl)\n\treturn e.urlAccess[accessurl]\n}\n\nfunc (e *endpointGitFinder) SetAccess(rawurl string, access Access) {\n\taccessurl := urlWithoutAuth(rawurl)\n\tkey := fmt.Sprintf(\"lfs.%s.access\", accessurl)\n\ttracerx.Printf(\"setting repository access to %s\", access)\n\n\te.accessMu.Lock()\n\tdefer e.accessMu.Unlock()\n\n\tswitch access {\n\tcase emptyAccess, NoneAccess:\n\t\tgit.Config.UnsetLocalKey(\"\", key)\n\t\te.urlAccess[accessurl] = NoneAccess\n\tdefault:\n\t\tgit.Config.SetLocal(\"\", key, string(access))\n\t\te.urlAccess[accessurl] = access\n\t}\n}\n\nfunc urlWithoutAuth(rawurl string) string {\n\tif !strings.Contains(rawurl, \"@\") {\n\t\treturn rawurl\n\t}\n\n\tu, err := url.Parse(rawurl)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error parsing URL %q: %s\", rawurl, err)\n\t\treturn rawurl\n\t}\n\n\tu.User = nil\n\treturn u.String()\n}\n\nfunc (e *endpointGitFinder) fetchGitAccess(rawurl string) Access {\n\tif v, _ := e.urlConfig.Get(\"lfs\", rawurl, \"access\"); len(v) > 0 {\n\t\taccess := Access(strings.ToLower(v))\n\t\tif access == PrivateAccess {\n\t\t\treturn BasicAccess\n\t\t}\n\t\treturn access\n\t}\n\treturn NoneAccess\n}\n\nfunc (e *endpointGitFinder) GitProtocol() string {\n\treturn e.gitProtocol\n}\n\n\/\/ ReplaceUrlAlias returns a url with a prefix from a `url.*.insteadof` git\n\/\/ config setting. If multiple aliases match, use the longest one.\n\/\/ See https:\/\/git-scm.com\/docs\/git-config for Git's docs.\nfunc (e *endpointGitFinder) ReplaceUrlAlias(rawurl string) string {\n\te.aliasMu.Lock()\n\tdefer e.aliasMu.Unlock()\n\n\tvar longestalias string\n\tfor alias, _ := range e.aliases {\n\t\tif !strings.HasPrefix(rawurl, alias) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif longestalias < alias {\n\t\t\tlongestalias = alias\n\t\t}\n\t}\n\n\tif len(longestalias) > 0 {\n\t\treturn e.aliases[longestalias] + rawurl[len(longestalias):]\n\t}\n\n\treturn rawurl\n}\n\nfunc initAliases(e *endpointGitFinder, git Env) {\n\tprefix := \"url.\"\n\tsuffix := \".insteadof\"\n\tfor gitkey, gitval := range git.All() {\n\t\tif len(gitval) == 0 || !(strings.HasPrefix(gitkey, prefix) && strings.HasSuffix(gitkey, suffix)) {\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := e.aliases[gitval[len(gitval)-1]]; ok {\n\t\t\tfmt.Fprintf(os.Stderr, \"WARNING: Multiple 'url.*.insteadof' keys with the same alias: %q\\n\", gitval)\n\t\t}\n\t\te.aliases[gitval[len(gitval)-1]] = gitkey[len(prefix) : len(gitkey)-len(suffix)]\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Validator struct {\n\troot string \/\/ work dir\n\tcode []Code \/\/ list of submissions\n}\n\nfunc NewValidator(dir string, train bool) (Validator, error) {\n\tvar err error\n\tv := Validator{\n\t\troot: dir,\n\t\tcode: make([]Code, 0, 2),\n\t}\n\n\t\/\/ find all 'higgsml-pred' executables\n\texes := make([]string, 0)\n\terr = filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {\n\t\tif fi.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tif strings.Contains(strings.ToLower(path), runscript) {\n\t\t\texes = append(exes, path)\n\t\t}\n\t\treturn err\n\t})\n\n\tif len(exes) <= 0 {\n\t\treturn v, fmt.Errorf(\"hml: could not find any suitable executable in zip-file\")\n\t}\n\n\tif len(exes) > 2 {\n\t\treturn v, fmt.Errorf(\"hml: too many higgsml-pred executables (got=%d, max=%d)\", len(exes), 2)\n\t}\n\n\tfor _, exe := range exes {\n\t\tdir := filepath.Dir(exe)\n\t\tcode, err := NewCode(dir, train)\n\t\tif err != nil {\n\t\t\treturn v, err\n\t\t}\n\t\tcode.Name = dir[len(v.root):]\n\t\tif code.Name[0] == '\/' {\n\t\t\tcode.Name = code.Name[1:]\n\t\t}\n\t\tv.code = append(v.code, code)\n\t}\n\n\treturn v, err\n}\n\nfunc (v Validator) Run() error {\n\tvar err error\n\n\tfor i, code := range v.code {\n\t\tstart := time.Now()\n\t\tprintf(\"\\n=== code submission #%d\/%d (%s)...\\n\", i+1, len(v.code), code.Name)\n\t\terr = code.run()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tprintf(\"=== code submission #%d\/%d (%s)... [ok] (delta=%v)\\n\",\n\t\t\ti+1, len(v.code), code.Name,\n\t\t\ttime.Since(start),\n\t\t)\n\t}\n\n\treturn err\n}\n\ntype Code struct {\n\tRoot string \/\/ directory containing sources\/binaries\n\tName string \/\/ name of this code submission (e.g. team\/code)\n\tTrain string \/\/ path to training executable\n\tPred string \/\/ path to prediction executable\n\tTrained string \/\/ path to trained data parameters\n\n\tLicense string \/\/ path to LICENSE file\n\tReadme string \/\/ path to README file\n\n\tDoTraining bool\n\n\ttrainfile string \/\/ path to training.csv file\n\ttestfile string \/\/ path to test.csv file\n}\n\nfunc NewCode(dir string, train bool) (Code, error) {\n\tvar err error\n\n\tcode := Code{\n\t\tRoot: dir,\n\t\tName: filepath.Base(dir),\n\t\tTrained: \"trained.dat\",\n\t\tDoTraining: train,\n\t\ttrainfile: trainfile,\n\t\ttestfile: testfile,\n\t}\n\n\t\/\/ FIXME: handle multiple-submissions zipfiles\n\t\/\/ presumably: 1 directory per submission.\n\n\texes := make([]string, 0)\n\t\/\/ find executables\n\terr = filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {\n\t\tif fi.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tif strings.Contains(strings.ToLower(path), \"readme\") {\n\t\t\tcode.Readme = path\n\t\t}\n\n\t\tif strings.Contains(strings.ToLower(path), \"license\") {\n\t\t\tcode.License = path\n\t\t}\n\n\t\tif strings.Contains(strings.ToLower(path), \"trained.dat\") {\n\t\t\tcode.Trained = path\n\t\t}\n\n\t\t\/\/ FIXME: better way ?\n\t\tif !strings.Contains(fi.Mode().String(), \"x\") {\n\t\t\treturn nil\n\t\t}\n\t\texes = append(exes, path)\n\t\t\/\/ printf(\">>> %s\\n\", path)\n\t\tif strings.HasSuffix(strings.ToLower(path), trainscript) {\n\t\t\tcode.Train = path\n\t\t}\n\t\tif strings.HasSuffix(strings.ToLower(path), runscript) {\n\t\t\tcode.Pred = path\n\t\t}\n\t\treturn err\n\t})\n\n\tif len(exes) <= 0 {\n\t\treturn code, fmt.Errorf(\"hml: could not find any suitable executable in zip-file\")\n\t}\n\n\tif code.Train == \"\" && code.Pred == \"\" {\n\t\t\/\/ take first one\n\t\tcode.Train = exes[0]\n\t\tcode.Pred = exes[0]\n\t}\n\n\tif code.Train == \"\" && code.Pred != \"\" {\n\t\tcode.Train = code.Pred\n\t}\n\n\tif code.Train != \"\" && code.Pred == \"\" {\n\t\tcode.Pred = code.Train\n\t}\n\n\tif code.License == \"\" {\n\t\treturn code, fmt.Errorf(\"hml: could not find a LICENSE file under [%s]\", code.Root)\n\t}\n\n\treturn code, err\n}\n\nfunc (code Code) run() error {\n\tvar err error\n\n\tdir := filepath.Join(code.Root, \".higgsml-work\")\n\terr = os.MkdirAll(dir, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif code.DoTraining {\n\t\terr = code.run_training(dir)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tprintf(\"\\n\")\n\t}\n\n\terr = code.run_pred(dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn err\n}\n\nfunc (code Code) run_training(dir string) error {\n\tvar err error\n\terrch := make(chan error)\n\n\tprintf(\"::: run training...\\n\")\n\n\tcmd := exec.Command(code.Train, code.trainfile, pdir(dir, \"trained.dat\"))\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdout = os.Stdout\n\tcmd.Stdin = os.Stdin\n\tcmd.Dir = code.Root\n\n\tstart := time.Now()\n\tgo func() {\n\t\terr = cmd.Start()\n\t\tif err != nil {\n\t\t\terrch <- err\n\t\t\treturn\n\t\t}\n\t\terrch <- cmd.Wait()\n\t}()\n\n\tduration := 1 * time.Hour\n\tselect {\n\tcase <-time.After(duration):\n\t\tcmd.Process.Kill()\n\t\treturn fmt.Errorf(\"hml: training timed out (%v)\\n\", duration)\n\tcase err = <-errch:\n\t\tbreak\n\t}\n\n\tif err != nil {\n\t\tprintf(\"::: run training... [ERR] (delta=%v)\\n\", time.Since(start))\n\t\treturn err\n\t}\n\n\tprintf(\"::: run training... [ok] (delta=%v)\\n\", time.Since(start))\n\treturn err\n}\n\nfunc (code Code) run_pred(dir string) error {\n\tvar err error\n\terrch := make(chan error)\n\n\tprintf(\"::: run prediction...\\n\")\n\n\ttrained := code.Trained\n\tif code.DoTraining {\n\t\t\/\/ if we ran the training, then use that file instead.\n\t\ttrained = pdir(dir, \"trained.dat\")\n\t}\n\n\tcmd := exec.Command(code.Pred, code.testfile, trained, pdir(dir, \"scores_test.csv\"))\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdout = os.Stdout\n\tcmd.Stdin = os.Stdin\n\tcmd.Dir = code.Root\n\n\tstart := time.Now()\n\tgo func() {\n\t\terr = cmd.Start()\n\t\tif err != nil {\n\t\t\terrch <- err\n\t\t\treturn\n\t\t}\n\t\terrch <- cmd.Wait()\n\t}()\n\n\tduration := 1 * time.Hour\n\tselect {\n\tcase <-time.After(duration):\n\t\tcmd.Process.Kill()\n\t\treturn fmt.Errorf(\"hml: prediction timed out (%v)\\n\", duration)\n\tcase err = <-errch:\n\t\tbreak\n\t}\n\n\tif err != nil {\n\t\tprintf(\"::: run prediction... [ERR] (delta=%v)\\n\", time.Since(start))\n\t\treturn err\n\t}\n\n\tprintf(\"::: run prediction... [ok] (delta=%v)\\n\", time.Since(start))\n\treturn err\n}\n\nfunc pdir(dirs ...string) string {\n\treturn filepath.Join(dirs...)\n}\n<commit_msg>hml-validate: use constants<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Validator struct {\n\troot string \/\/ work dir\n\tcode []Code \/\/ list of submissions\n}\n\nfunc NewValidator(dir string, train bool) (Validator, error) {\n\tvar err error\n\tv := Validator{\n\t\troot: dir,\n\t\tcode: make([]Code, 0, 2),\n\t}\n\n\t\/\/ find all 'higgsml-pred' executables\n\texes := make([]string, 0)\n\terr = filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {\n\t\tif fi.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tif strings.Contains(strings.ToLower(path), runscript) {\n\t\t\texes = append(exes, path)\n\t\t}\n\t\treturn err\n\t})\n\n\tif len(exes) <= 0 {\n\t\treturn v, fmt.Errorf(\"hml: could not find any suitable executable in zip-file\")\n\t}\n\n\tif len(exes) > 2 {\n\t\treturn v, fmt.Errorf(\"hml: too many higgsml-pred executables (got=%d, max=%d)\", len(exes), 2)\n\t}\n\n\tfor _, exe := range exes {\n\t\tdir := filepath.Dir(exe)\n\t\tcode, err := NewCode(dir, train)\n\t\tif err != nil {\n\t\t\treturn v, err\n\t\t}\n\t\tcode.Name = dir[len(v.root):]\n\t\tif code.Name[0] == '\/' {\n\t\t\tcode.Name = code.Name[1:]\n\t\t}\n\t\tv.code = append(v.code, code)\n\t}\n\n\treturn v, err\n}\n\nfunc (v Validator) Run() error {\n\tvar err error\n\n\tfor i, code := range v.code {\n\t\tstart := time.Now()\n\t\tprintf(\"\\n=== code submission #%d\/%d (%s)...\\n\", i+1, len(v.code), code.Name)\n\t\terr = code.run()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tprintf(\"=== code submission #%d\/%d (%s)... [ok] (delta=%v)\\n\",\n\t\t\ti+1, len(v.code), code.Name,\n\t\t\ttime.Since(start),\n\t\t)\n\t}\n\n\treturn err\n}\n\ntype Code struct {\n\tRoot string \/\/ directory containing sources\/binaries\n\tName string \/\/ name of this code submission (e.g. team\/code)\n\tTrain string \/\/ path to training executable\n\tPred string \/\/ path to prediction executable\n\tTrained string \/\/ path to trained data parameters\n\n\tLicense string \/\/ path to LICENSE file\n\tReadme string \/\/ path to README file\n\n\tDoTraining bool\n\n\ttrainfile string \/\/ path to training.csv file\n\ttestfile string \/\/ path to test.csv file\n}\n\nfunc NewCode(dir string, train bool) (Code, error) {\n\tvar err error\n\n\tcode := Code{\n\t\tRoot: dir,\n\t\tName: filepath.Base(dir),\n\t\tTrained: trainedname,\n\t\tDoTraining: train,\n\t\ttrainfile: trainfile,\n\t\ttestfile: testfile,\n\t}\n\n\t\/\/ FIXME: handle multiple-submissions zipfiles\n\t\/\/ presumably: 1 directory per submission.\n\n\texes := make([]string, 0)\n\t\/\/ find executables\n\terr = filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {\n\t\tif fi.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tif strings.Contains(strings.ToLower(path), \"readme\") {\n\t\t\tcode.Readme = path\n\t\t}\n\n\t\tif strings.Contains(strings.ToLower(path), \"license\") {\n\t\t\tcode.License = path\n\t\t}\n\n\t\tif strings.Contains(strings.ToLower(path), trainedname) {\n\t\t\tcode.Trained = path\n\t\t}\n\n\t\t\/\/ FIXME: better way ?\n\t\tif !strings.Contains(fi.Mode().String(), \"x\") {\n\t\t\treturn nil\n\t\t}\n\t\texes = append(exes, path)\n\t\t\/\/ printf(\">>> %s\\n\", path)\n\t\tif strings.HasSuffix(strings.ToLower(path), trainscript) {\n\t\t\tcode.Train = path\n\t\t}\n\t\tif strings.HasSuffix(strings.ToLower(path), runscript) {\n\t\t\tcode.Pred = path\n\t\t}\n\t\treturn err\n\t})\n\n\tif len(exes) <= 0 {\n\t\treturn code, fmt.Errorf(\"hml: could not find any suitable executable in zip-file\")\n\t}\n\n\tif code.Train == \"\" && code.Pred == \"\" {\n\t\t\/\/ take first one\n\t\tcode.Train = exes[0]\n\t\tcode.Pred = exes[0]\n\t}\n\n\tif code.Train == \"\" && code.Pred != \"\" {\n\t\tcode.Train = code.Pred\n\t}\n\n\tif code.Train != \"\" && code.Pred == \"\" {\n\t\tcode.Pred = code.Train\n\t}\n\n\tif code.License == \"\" {\n\t\treturn code, fmt.Errorf(\"hml: could not find a LICENSE file under [%s]\", code.Root)\n\t}\n\n\treturn code, err\n}\n\nfunc (code Code) run() error {\n\tvar err error\n\n\tdir := filepath.Join(code.Root, \".higgsml-work\")\n\terr = os.MkdirAll(dir, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif code.DoTraining {\n\t\terr = code.run_training(dir)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tprintf(\"\\n\")\n\t}\n\n\terr = code.run_pred(dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn err\n}\n\nfunc (code Code) run_training(dir string) error {\n\tvar err error\n\terrch := make(chan error)\n\n\tprintf(\"::: run training...\\n\")\n\n\ttrained := pdir(dir, trainedname)\n\n\tcmd := exec.Command(code.Train, code.trainfile, trained)\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdout = os.Stdout\n\tcmd.Stdin = os.Stdin\n\tcmd.Dir = code.Root\n\n\tstart := time.Now()\n\tgo func() {\n\t\terr = cmd.Start()\n\t\tif err != nil {\n\t\t\terrch <- err\n\t\t\treturn\n\t\t}\n\t\terrch <- cmd.Wait()\n\t}()\n\n\tduration := 1 * time.Hour\n\tselect {\n\tcase <-time.After(duration):\n\t\tcmd.Process.Kill()\n\t\treturn fmt.Errorf(\"hml: training timed out (%v)\\n\", duration)\n\tcase err = <-errch:\n\t\tbreak\n\t}\n\n\tif err != nil {\n\t\tprintf(\"::: run training... [ERR] (delta=%v)\\n\", time.Since(start))\n\t\treturn err\n\t}\n\n\tprintf(\"::: run training... [ok] (delta=%v)\\n\", time.Since(start))\n\treturn err\n}\n\nfunc (code Code) run_pred(dir string) error {\n\tvar err error\n\terrch := make(chan error)\n\n\tprintf(\"::: run prediction...\\n\")\n\n\ttrained := code.Trained\n\tif code.DoTraining {\n\t\t\/\/ if we ran the training, then use that file instead.\n\t\ttrained = pdir(dir, trainedname)\n\t}\n\n\tresults := pdir(dir, csv_results)\n\tcmd := exec.Command(code.Pred, code.testfile, trained, results)\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdout = os.Stdout\n\tcmd.Stdin = os.Stdin\n\tcmd.Dir = code.Root\n\n\tstart := time.Now()\n\tgo func() {\n\t\terr = cmd.Start()\n\t\tif err != nil {\n\t\t\terrch <- err\n\t\t\treturn\n\t\t}\n\t\terrch <- cmd.Wait()\n\t}()\n\n\tduration := 1 * time.Hour\n\tselect {\n\tcase <-time.After(duration):\n\t\tcmd.Process.Kill()\n\t\treturn fmt.Errorf(\"hml: prediction timed out (%v)\\n\", duration)\n\tcase err = <-errch:\n\t\tbreak\n\t}\n\n\tif err != nil {\n\t\tprintf(\"::: run prediction... [ERR] (delta=%v)\\n\", time.Since(start))\n\t\treturn err\n\t}\n\n\tprintf(\"::: run prediction... [ok] (delta=%v)\\n\", time.Since(start))\n\treturn err\n}\n\nfunc pdir(dirs ...string) string {\n\treturn filepath.Join(dirs...)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage api\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/tsuru\/tsuru\/app\"\n\t\"github.com\/tsuru\/tsuru\/auth\"\n\ttsuruErrors \"github.com\/tsuru\/tsuru\/errors\"\n\t\"github.com\/tsuru\/tsuru\/event\"\n\ttsuruIo \"github.com\/tsuru\/tsuru\/io\"\n\t\"github.com\/tsuru\/tsuru\/permission\"\n\t\"github.com\/tsuru\/tsuru\/repository\"\n)\n\n\/\/ title: app deploy\n\/\/ path: \/apps\/{appname}\/deploy\n\/\/ method: POST\n\/\/ consume: application\/x-www-form-urlencoded\n\/\/ responses:\n\/\/ 200: OK\n\/\/ 400: Invalid data\n\/\/ 403: Forbidden\n\/\/ 404: Not found\nfunc deploy(w http.ResponseWriter, r *http.Request, t auth.Token) (err error) {\n\tvar file multipart.File\n\tvar fileSize int64\n\tif strings.HasPrefix(r.Header.Get(\"Content-Type\"), \"multipart\/\") {\n\t\tfile, _, err = r.FormFile(\"file\")\n\t\tif err != nil {\n\t\t\treturn &tsuruErrors.HTTP{\n\t\t\t\tCode: http.StatusBadRequest,\n\t\t\t\tMessage: err.Error(),\n\t\t\t}\n\t\t}\n\t\tfileSize, err = file.Seek(0, io.SeekEnd)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to find uploaded file size\")\n\t\t}\n\t\tfile.Seek(0, io.SeekStart)\n\t\tdefer file.Close()\n\t}\n\tarchiveURL := r.FormValue(\"archive-url\")\n\timage := r.FormValue(\"image\")\n\tif image == \"\" && archiveURL == \"\" && file == nil {\n\t\treturn &tsuruErrors.HTTP{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: \"you must specify either the archive-url, a image url or upload a file.\",\n\t\t}\n\t}\n\tcommit := r.FormValue(\"commit\")\n\tw.Header().Set(\"Content-Type\", \"text\")\n\tappName := r.URL.Query().Get(\":appname\")\n\torigin := r.FormValue(\"origin\")\n\tif image != \"\" {\n\t\torigin = \"image\"\n\t}\n\tif origin != \"\" {\n\t\tif !app.ValidateOrigin(origin) {\n\t\t\treturn &tsuruErrors.HTTP{\n\t\t\t\tCode: http.StatusBadRequest,\n\t\t\t\tMessage: \"Invalid deployment origin\",\n\t\t\t}\n\t\t}\n\t}\n\tvar userName string\n\tif t.IsAppToken() {\n\t\tif t.GetAppName() != appName && t.GetAppName() != app.InternalAppName {\n\t\t\treturn &tsuruErrors.HTTP{Code: http.StatusUnauthorized, Message: \"invalid app token\"}\n\t\t}\n\t\tuserName = r.FormValue(\"user\")\n\t} else {\n\t\tcommit = \"\"\n\t\tuserName = t.GetUserName()\n\t}\n\tinstance, err := app.GetByName(appName)\n\tif err != nil {\n\t\treturn &tsuruErrors.HTTP{Code: http.StatusNotFound, Message: err.Error()}\n\t}\n\tif (instance.GetPlatform() == \"\") && (origin != \"image\") {\n\t\treturn &tsuruErrors.HTTP{Code: http.StatusUnauthorized, Message: \"can't deploy app without platform, if it's not an image\"}\n\t}\n\tvar build bool\n\tbuildString := r.FormValue(\"build\")\n\tif buildString != \"\" {\n\t\tbuild, err = strconv.ParseBool(buildString)\n\t\tif err != nil {\n\t\t\treturn &tsuruErrors.HTTP{\n\t\t\t\tCode: http.StatusBadRequest,\n\t\t\t\tMessage: err.Error(),\n\t\t\t}\n\t\t}\n\t}\n\tmessage := r.FormValue(\"message\")\n\tif commit != \"\" && message == \"\" {\n\t\tvar messages []string\n\t\tmessages, err = repository.Manager().CommitMessages(instance.Name, commit, 1)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(messages) > 0 {\n\t\t\tmessage = messages[0]\n\t\t}\n\t}\n\tif origin == \"\" && commit != \"\" {\n\t\torigin = \"git\"\n\t}\n\topts := app.DeployOptions{\n\t\tApp: instance,\n\t\tCommit: commit,\n\t\tFileSize: fileSize,\n\t\tFile: file,\n\t\tArchiveURL: archiveURL,\n\t\tUser: userName,\n\t\tImage: image,\n\t\tOrigin: origin,\n\t\tBuild: build,\n\t\tMessage: message,\n\t}\n\topts.GetKind()\n\tif t.GetAppName() != app.InternalAppName {\n\t\tcanDeploy := permission.Check(t, permSchemeForDeploy(opts), contextsForApp(instance)...)\n\t\tif !canDeploy {\n\t\t\treturn &tsuruErrors.HTTP{Code: http.StatusForbidden, Message: \"User does not have permission to do this action in this app\"}\n\t\t}\n\t}\n\tvar imageID string\n\tevt, err := event.New(&event.Opts{\n\t\tTarget: appTarget(appName),\n\t\tKind: permission.PermAppDeploy,\n\t\tRawOwner: event.Owner{Type: event.OwnerTypeUser, Name: userName},\n\t\tCustomData: opts,\n\t\tAllowed: event.Allowed(permission.PermAppReadEvents, contextsForApp(instance)...),\n\t\tAllowedCancel: event.Allowed(permission.PermAppUpdateEvents, contextsForApp(instance)...),\n\t\tCancelable: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { evt.DoneCustomData(err, map[string]string{\"image\": imageID}) }()\n\topts.Event = evt\n\twriter := tsuruIo.NewKeepAliveWriter(w, 30*time.Second, \"please wait...\")\n\tdefer writer.Stop()\n\topts.OutputStream = writer\n\timageID, err = app.Deploy(opts)\n\tif err == nil {\n\t\tfmt.Fprintln(w, \"\\nOK\")\n\t}\n\treturn err\n}\n\nfunc permSchemeForDeploy(opts app.DeployOptions) *permission.PermissionScheme {\n\tswitch opts.GetKind() {\n\tcase app.DeployGit:\n\t\treturn permission.PermAppDeployGit\n\tcase app.DeployImage:\n\t\treturn permission.PermAppDeployImage\n\tcase app.DeployUpload:\n\t\treturn permission.PermAppDeployUpload\n\tcase app.DeployUploadBuild:\n\t\treturn permission.PermAppDeployBuild\n\tcase app.DeployArchiveURL:\n\t\treturn permission.PermAppDeployArchiveUrl\n\tcase app.DeployRollback:\n\t\treturn permission.PermAppDeployRollback\n\tdefault:\n\t\treturn permission.PermAppDeploy\n\t}\n}\n\n\/\/ title: deploy diff\n\/\/ path: \/apps\/{appname}\/diff\n\/\/ method: POST\n\/\/ consume: application\/x-www-form-urlencoded\n\/\/ responses:\n\/\/ 200: OK\n\/\/ 400: Invalid data\n\/\/ 403: Forbidden\n\/\/ 404: Not found\nfunc diffDeploy(w http.ResponseWriter, r *http.Request, t auth.Token) error {\n\twriter := tsuruIo.NewKeepAliveWriter(w, 30*time.Second, \"\")\n\tdefer writer.Stop()\n\tfmt.Fprint(w, \"Saving the difference between the old and new code\\n\")\n\tappName := r.URL.Query().Get(\":appname\")\n\tdiff := r.FormValue(\"customdata\")\n\tinstance, err := app.GetByName(appName)\n\tif err != nil {\n\t\treturn &tsuruErrors.HTTP{Code: http.StatusNotFound, Message: err.Error()}\n\t}\n\tif t.GetAppName() != app.InternalAppName {\n\t\tcanDiffDeploy := permission.Check(t, permission.PermAppReadDeploy, contextsForApp(instance)...)\n\t\tif !canDiffDeploy {\n\t\t\treturn &tsuruErrors.HTTP{Code: http.StatusForbidden, Message: permission.ErrUnauthorized.Error()}\n\t\t}\n\t}\n\tevt, err := event.GetRunning(appTarget(appName), permission.PermAppDeploy.FullName())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn evt.SetOtherCustomData(map[string]string{\n\t\t\"diff\": diff,\n\t})\n}\n\n\/\/ title: rollback\n\/\/ path: \/apps\/{appname}\/deploy\/rollback\n\/\/ method: POST\n\/\/ consume: application\/x-www-form-urlencoded\n\/\/ produce: application\/x-json-stream\n\/\/ responses:\n\/\/ 200: OK\n\/\/ 400: Invalid data\n\/\/ 403: Forbidden\n\/\/ 404: Not found\nfunc deployRollback(w http.ResponseWriter, r *http.Request, t auth.Token) error {\n\tappName := r.URL.Query().Get(\":appname\")\n\tinstance, err := app.GetByName(appName)\n\tif err != nil {\n\t\treturn &tsuruErrors.HTTP{Code: http.StatusNotFound, Message: fmt.Sprintf(\"App %s not found.\", appName)}\n\t}\n\timage := r.FormValue(\"image\")\n\tif image == \"\" {\n\t\treturn &tsuruErrors.HTTP{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: \"you cannot rollback without an image name\",\n\t\t}\n\t}\n\torigin := r.FormValue(\"origin\")\n\tif origin != \"\" {\n\t\tif !app.ValidateOrigin(origin) {\n\t\t\treturn &tsuruErrors.HTTP{\n\t\t\t\tCode: http.StatusBadRequest,\n\t\t\t\tMessage: \"Invalid deployment origin\",\n\t\t\t}\n\t\t}\n\t}\n\tw.Header().Set(\"Content-Type\", \"application\/x-json-stream\")\n\tkeepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 30*time.Second, \"\")\n\tdefer keepAliveWriter.Stop()\n\twriter := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}\n\topts := app.DeployOptions{\n\t\tApp: instance,\n\t\tOutputStream: writer,\n\t\tImage: image,\n\t\tUser: t.GetUserName(),\n\t\tOrigin: origin,\n\t\tRollback: true,\n\t}\n\topts.GetKind()\n\tcanRollback := permission.Check(t, permSchemeForDeploy(opts), contextsForApp(instance)...)\n\tif !canRollback {\n\t\treturn &tsuruErrors.HTTP{Code: http.StatusForbidden, Message: permission.ErrUnauthorized.Error()}\n\t}\n\tvar imageID string\n\tevt, err := event.New(&event.Opts{\n\t\tTarget: appTarget(appName),\n\t\tKind: permission.PermAppDeploy,\n\t\tOwner: t,\n\t\tCustomData: opts,\n\t\tAllowed: event.Allowed(permission.PermAppReadEvents, contextsForApp(instance)...),\n\t\tAllowedCancel: event.Allowed(permission.PermAppUpdateEvents, contextsForApp(instance)...),\n\t\tCancelable: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { evt.DoneCustomData(err, map[string]string{\"image\": imageID}) }()\n\topts.Event = evt\n\timageID, err = app.Deploy(opts)\n\tif err != nil {\n\t\twriter.Encode(tsuruIo.SimpleJsonMessage{Error: err.Error()})\n\t}\n\treturn nil\n}\n\n\/\/ title: deploy list\n\/\/ path: \/deploys\n\/\/ method: GET\n\/\/ produce: application\/json\n\/\/ responses:\n\/\/ 200: OK\n\/\/ 204: No content\nfunc deploysList(w http.ResponseWriter, r *http.Request, t auth.Token) error {\n\tcontexts := permission.ContextsForPermission(t, permission.PermAppReadDeploy)\n\tif len(contexts) == 0 {\n\t\tw.WriteHeader(http.StatusNoContent)\n\t\treturn nil\n\t}\n\tfilter := appFilterByContext(contexts, nil)\n\tfilter.Name = r.URL.Query().Get(\"app\")\n\tskip := r.URL.Query().Get(\"skip\")\n\tlimit := r.URL.Query().Get(\"limit\")\n\tskipInt, _ := strconv.Atoi(skip)\n\tlimitInt, _ := strconv.Atoi(limit)\n\tdeploys, err := app.ListDeploys(filter, skipInt, limitInt)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(deploys) == 0 {\n\t\tw.WriteHeader(http.StatusNoContent)\n\t\treturn nil\n\t}\n\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\treturn json.NewEncoder(w).Encode(deploys)\n}\n\n\/\/ title: deploy info\n\/\/ path: \/deploys\/{deploy}\n\/\/ method: GET\n\/\/ produce: application\/json\n\/\/ responses:\n\/\/ 200: OK\n\/\/ 401: Unauthorized\n\/\/ 404: Not found\nfunc deployInfo(w http.ResponseWriter, r *http.Request, t auth.Token) error {\n\tdepID := r.URL.Query().Get(\":deploy\")\n\tdeploy, err := app.GetDeploy(depID)\n\tif err != nil {\n\t\tif err == event.ErrEventNotFound {\n\t\t\treturn &tsuruErrors.HTTP{Code: http.StatusNotFound, Message: \"Deploy not found.\"}\n\t\t}\n\t\treturn err\n\t}\n\tdbApp, err := app.GetByName(deploy.App)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcanGet := permission.Check(t, permission.PermAppReadDeploy, contextsForApp(dbApp)...)\n\tif !canGet {\n\t\treturn &tsuruErrors.HTTP{Code: http.StatusNotFound, Message: \"Deploy not found.\"}\n\t}\n\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\treturn json.NewEncoder(w).Encode(deploy)\n}\n\n\/\/ title: rebuild\n\/\/ path: \/apps\/{appname}\/deploy\/rebuild\n\/\/ method: POST\n\/\/ consume: application\/x-www-form-urlencoded\n\/\/ produce: application\/x-json-stream\n\/\/ responses:\n\/\/ 200: OK\n\/\/ 400: Invalid data\n\/\/ 403: Forbidden\n\/\/ 404: Not found\nfunc deployRebuild(w http.ResponseWriter, r *http.Request, t auth.Token) error {\n\tappName := r.URL.Query().Get(\":appname\")\n\tinstance, err := app.GetByName(appName)\n\tif err != nil {\n\t\treturn &tsuruErrors.HTTP{Code: http.StatusNotFound, Message: fmt.Sprintf(\"App %s not found.\", appName)}\n\t}\n\torigin := r.FormValue(\"origin\")\n\tif !app.ValidateOrigin(origin) {\n\t\treturn &tsuruErrors.HTTP{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: \"Invalid deployment origin\",\n\t\t}\n\t}\n\tw.Header().Set(\"Content-Type\", \"application\/x-json-stream\")\n\tkeepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 30*time.Second, \"\")\n\tdefer keepAliveWriter.Stop()\n\twriter := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}\n\topts := app.DeployOptions{\n\t\tApp: instance,\n\t\tOutputStream: writer,\n\t\tUser: t.GetUserName(),\n\t\tOrigin: origin,\n\t\tKind: app.DeployRebuild,\n\t}\n\tcanDeploy := permission.Check(t, permSchemeForDeploy(opts), contextsForApp(instance)...)\n\tif !canDeploy {\n\t\treturn &tsuruErrors.HTTP{Code: http.StatusForbidden, Message: permission.ErrUnauthorized.Error()}\n\t}\n\tvar imageID string\n\tevt, err := event.New(&event.Opts{\n\t\tTarget: appTarget(appName),\n\t\tKind: permission.PermAppDeploy,\n\t\tOwner: t,\n\t\tCustomData: opts,\n\t\tAllowed: event.Allowed(permission.PermAppReadEvents, contextsForApp(instance)...),\n\t\tAllowedCancel: event.Allowed(permission.PermAppUpdateEvents, contextsForApp(instance)...),\n\t\tCancelable: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { evt.DoneCustomData(err, map[string]string{\"image\": imageID}) }()\n\topts.Event = evt\n\timageID, err = app.Deploy(opts)\n\tif err != nil {\n\t\twriter.Encode(tsuruIo.SimpleJsonMessage{Error: err.Error()})\n\t}\n\treturn nil\n}\n<commit_msg>api: changes response error status<commit_after>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage api\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/tsuru\/tsuru\/app\"\n\t\"github.com\/tsuru\/tsuru\/auth\"\n\ttsuruErrors \"github.com\/tsuru\/tsuru\/errors\"\n\t\"github.com\/tsuru\/tsuru\/event\"\n\ttsuruIo \"github.com\/tsuru\/tsuru\/io\"\n\t\"github.com\/tsuru\/tsuru\/permission\"\n\t\"github.com\/tsuru\/tsuru\/repository\"\n)\n\n\/\/ title: app deploy\n\/\/ path: \/apps\/{appname}\/deploy\n\/\/ method: POST\n\/\/ consume: application\/x-www-form-urlencoded\n\/\/ responses:\n\/\/ 200: OK\n\/\/ 400: Invalid data\n\/\/ 403: Forbidden\n\/\/ 404: Not found\nfunc deploy(w http.ResponseWriter, r *http.Request, t auth.Token) (err error) {\n\tvar file multipart.File\n\tvar fileSize int64\n\tif strings.HasPrefix(r.Header.Get(\"Content-Type\"), \"multipart\/\") {\n\t\tfile, _, err = r.FormFile(\"file\")\n\t\tif err != nil {\n\t\t\treturn &tsuruErrors.HTTP{\n\t\t\t\tCode: http.StatusBadRequest,\n\t\t\t\tMessage: err.Error(),\n\t\t\t}\n\t\t}\n\t\tfileSize, err = file.Seek(0, io.SeekEnd)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to find uploaded file size\")\n\t\t}\n\t\tfile.Seek(0, io.SeekStart)\n\t\tdefer file.Close()\n\t}\n\tarchiveURL := r.FormValue(\"archive-url\")\n\timage := r.FormValue(\"image\")\n\tif image == \"\" && archiveURL == \"\" && file == nil {\n\t\treturn &tsuruErrors.HTTP{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: \"you must specify either the archive-url, a image url or upload a file.\",\n\t\t}\n\t}\n\tcommit := r.FormValue(\"commit\")\n\tw.Header().Set(\"Content-Type\", \"text\")\n\tappName := r.URL.Query().Get(\":appname\")\n\torigin := r.FormValue(\"origin\")\n\tif image != \"\" {\n\t\torigin = \"image\"\n\t}\n\tif origin != \"\" {\n\t\tif !app.ValidateOrigin(origin) {\n\t\t\treturn &tsuruErrors.HTTP{\n\t\t\t\tCode: http.StatusBadRequest,\n\t\t\t\tMessage: \"Invalid deployment origin\",\n\t\t\t}\n\t\t}\n\t}\n\tvar userName string\n\tif t.IsAppToken() {\n\t\tif t.GetAppName() != appName && t.GetAppName() != app.InternalAppName {\n\t\t\treturn &tsuruErrors.HTTP{Code: http.StatusUnauthorized, Message: \"invalid app token\"}\n\t\t}\n\t\tuserName = r.FormValue(\"user\")\n\t} else {\n\t\tcommit = \"\"\n\t\tuserName = t.GetUserName()\n\t}\n\tinstance, err := app.GetByName(appName)\n\tif err != nil {\n\t\treturn &tsuruErrors.HTTP{Code: http.StatusNotFound, Message: err.Error()}\n\t}\n\tif (instance.GetPlatform() == \"\") && (origin != \"image\") {\n\t\treturn &tsuruErrors.HTTP{Code: http.StatusBadRequest, Message: \"can't deploy app without platform, if it's not an image\"}\n\t}\n\tvar build bool\n\tbuildString := r.FormValue(\"build\")\n\tif buildString != \"\" {\n\t\tbuild, err = strconv.ParseBool(buildString)\n\t\tif err != nil {\n\t\t\treturn &tsuruErrors.HTTP{\n\t\t\t\tCode: http.StatusBadRequest,\n\t\t\t\tMessage: err.Error(),\n\t\t\t}\n\t\t}\n\t}\n\tmessage := r.FormValue(\"message\")\n\tif commit != \"\" && message == \"\" {\n\t\tvar messages []string\n\t\tmessages, err = repository.Manager().CommitMessages(instance.Name, commit, 1)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(messages) > 0 {\n\t\t\tmessage = messages[0]\n\t\t}\n\t}\n\tif origin == \"\" && commit != \"\" {\n\t\torigin = \"git\"\n\t}\n\topts := app.DeployOptions{\n\t\tApp: instance,\n\t\tCommit: commit,\n\t\tFileSize: fileSize,\n\t\tFile: file,\n\t\tArchiveURL: archiveURL,\n\t\tUser: userName,\n\t\tImage: image,\n\t\tOrigin: origin,\n\t\tBuild: build,\n\t\tMessage: message,\n\t}\n\topts.GetKind()\n\tif t.GetAppName() != app.InternalAppName {\n\t\tcanDeploy := permission.Check(t, permSchemeForDeploy(opts), contextsForApp(instance)...)\n\t\tif !canDeploy {\n\t\t\treturn &tsuruErrors.HTTP{Code: http.StatusForbidden, Message: \"User does not have permission to do this action in this app\"}\n\t\t}\n\t}\n\tvar imageID string\n\tevt, err := event.New(&event.Opts{\n\t\tTarget: appTarget(appName),\n\t\tKind: permission.PermAppDeploy,\n\t\tRawOwner: event.Owner{Type: event.OwnerTypeUser, Name: userName},\n\t\tCustomData: opts,\n\t\tAllowed: event.Allowed(permission.PermAppReadEvents, contextsForApp(instance)...),\n\t\tAllowedCancel: event.Allowed(permission.PermAppUpdateEvents, contextsForApp(instance)...),\n\t\tCancelable: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { evt.DoneCustomData(err, map[string]string{\"image\": imageID}) }()\n\topts.Event = evt\n\twriter := tsuruIo.NewKeepAliveWriter(w, 30*time.Second, \"please wait...\")\n\tdefer writer.Stop()\n\topts.OutputStream = writer\n\timageID, err = app.Deploy(opts)\n\tif err == nil {\n\t\tfmt.Fprintln(w, \"\\nOK\")\n\t}\n\treturn err\n}\n\nfunc permSchemeForDeploy(opts app.DeployOptions) *permission.PermissionScheme {\n\tswitch opts.GetKind() {\n\tcase app.DeployGit:\n\t\treturn permission.PermAppDeployGit\n\tcase app.DeployImage:\n\t\treturn permission.PermAppDeployImage\n\tcase app.DeployUpload:\n\t\treturn permission.PermAppDeployUpload\n\tcase app.DeployUploadBuild:\n\t\treturn permission.PermAppDeployBuild\n\tcase app.DeployArchiveURL:\n\t\treturn permission.PermAppDeployArchiveUrl\n\tcase app.DeployRollback:\n\t\treturn permission.PermAppDeployRollback\n\tdefault:\n\t\treturn permission.PermAppDeploy\n\t}\n}\n\n\/\/ title: deploy diff\n\/\/ path: \/apps\/{appname}\/diff\n\/\/ method: POST\n\/\/ consume: application\/x-www-form-urlencoded\n\/\/ responses:\n\/\/ 200: OK\n\/\/ 400: Invalid data\n\/\/ 403: Forbidden\n\/\/ 404: Not found\nfunc diffDeploy(w http.ResponseWriter, r *http.Request, t auth.Token) error {\n\twriter := tsuruIo.NewKeepAliveWriter(w, 30*time.Second, \"\")\n\tdefer writer.Stop()\n\tfmt.Fprint(w, \"Saving the difference between the old and new code\\n\")\n\tappName := r.URL.Query().Get(\":appname\")\n\tdiff := r.FormValue(\"customdata\")\n\tinstance, err := app.GetByName(appName)\n\tif err != nil {\n\t\treturn &tsuruErrors.HTTP{Code: http.StatusNotFound, Message: err.Error()}\n\t}\n\tif t.GetAppName() != app.InternalAppName {\n\t\tcanDiffDeploy := permission.Check(t, permission.PermAppReadDeploy, contextsForApp(instance)...)\n\t\tif !canDiffDeploy {\n\t\t\treturn &tsuruErrors.HTTP{Code: http.StatusForbidden, Message: permission.ErrUnauthorized.Error()}\n\t\t}\n\t}\n\tevt, err := event.GetRunning(appTarget(appName), permission.PermAppDeploy.FullName())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn evt.SetOtherCustomData(map[string]string{\n\t\t\"diff\": diff,\n\t})\n}\n\n\/\/ title: rollback\n\/\/ path: \/apps\/{appname}\/deploy\/rollback\n\/\/ method: POST\n\/\/ consume: application\/x-www-form-urlencoded\n\/\/ produce: application\/x-json-stream\n\/\/ responses:\n\/\/ 200: OK\n\/\/ 400: Invalid data\n\/\/ 403: Forbidden\n\/\/ 404: Not found\nfunc deployRollback(w http.ResponseWriter, r *http.Request, t auth.Token) error {\n\tappName := r.URL.Query().Get(\":appname\")\n\tinstance, err := app.GetByName(appName)\n\tif err != nil {\n\t\treturn &tsuruErrors.HTTP{Code: http.StatusNotFound, Message: fmt.Sprintf(\"App %s not found.\", appName)}\n\t}\n\timage := r.FormValue(\"image\")\n\tif image == \"\" {\n\t\treturn &tsuruErrors.HTTP{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: \"you cannot rollback without an image name\",\n\t\t}\n\t}\n\torigin := r.FormValue(\"origin\")\n\tif origin != \"\" {\n\t\tif !app.ValidateOrigin(origin) {\n\t\t\treturn &tsuruErrors.HTTP{\n\t\t\t\tCode: http.StatusBadRequest,\n\t\t\t\tMessage: \"Invalid deployment origin\",\n\t\t\t}\n\t\t}\n\t}\n\tw.Header().Set(\"Content-Type\", \"application\/x-json-stream\")\n\tkeepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 30*time.Second, \"\")\n\tdefer keepAliveWriter.Stop()\n\twriter := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}\n\topts := app.DeployOptions{\n\t\tApp: instance,\n\t\tOutputStream: writer,\n\t\tImage: image,\n\t\tUser: t.GetUserName(),\n\t\tOrigin: origin,\n\t\tRollback: true,\n\t}\n\topts.GetKind()\n\tcanRollback := permission.Check(t, permSchemeForDeploy(opts), contextsForApp(instance)...)\n\tif !canRollback {\n\t\treturn &tsuruErrors.HTTP{Code: http.StatusForbidden, Message: permission.ErrUnauthorized.Error()}\n\t}\n\tvar imageID string\n\tevt, err := event.New(&event.Opts{\n\t\tTarget: appTarget(appName),\n\t\tKind: permission.PermAppDeploy,\n\t\tOwner: t,\n\t\tCustomData: opts,\n\t\tAllowed: event.Allowed(permission.PermAppReadEvents, contextsForApp(instance)...),\n\t\tAllowedCancel: event.Allowed(permission.PermAppUpdateEvents, contextsForApp(instance)...),\n\t\tCancelable: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { evt.DoneCustomData(err, map[string]string{\"image\": imageID}) }()\n\topts.Event = evt\n\timageID, err = app.Deploy(opts)\n\tif err != nil {\n\t\twriter.Encode(tsuruIo.SimpleJsonMessage{Error: err.Error()})\n\t}\n\treturn nil\n}\n\n\/\/ title: deploy list\n\/\/ path: \/deploys\n\/\/ method: GET\n\/\/ produce: application\/json\n\/\/ responses:\n\/\/ 200: OK\n\/\/ 204: No content\nfunc deploysList(w http.ResponseWriter, r *http.Request, t auth.Token) error {\n\tcontexts := permission.ContextsForPermission(t, permission.PermAppReadDeploy)\n\tif len(contexts) == 0 {\n\t\tw.WriteHeader(http.StatusNoContent)\n\t\treturn nil\n\t}\n\tfilter := appFilterByContext(contexts, nil)\n\tfilter.Name = r.URL.Query().Get(\"app\")\n\tskip := r.URL.Query().Get(\"skip\")\n\tlimit := r.URL.Query().Get(\"limit\")\n\tskipInt, _ := strconv.Atoi(skip)\n\tlimitInt, _ := strconv.Atoi(limit)\n\tdeploys, err := app.ListDeploys(filter, skipInt, limitInt)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(deploys) == 0 {\n\t\tw.WriteHeader(http.StatusNoContent)\n\t\treturn nil\n\t}\n\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\treturn json.NewEncoder(w).Encode(deploys)\n}\n\n\/\/ title: deploy info\n\/\/ path: \/deploys\/{deploy}\n\/\/ method: GET\n\/\/ produce: application\/json\n\/\/ responses:\n\/\/ 200: OK\n\/\/ 401: Unauthorized\n\/\/ 404: Not found\nfunc deployInfo(w http.ResponseWriter, r *http.Request, t auth.Token) error {\n\tdepID := r.URL.Query().Get(\":deploy\")\n\tdeploy, err := app.GetDeploy(depID)\n\tif err != nil {\n\t\tif err == event.ErrEventNotFound {\n\t\t\treturn &tsuruErrors.HTTP{Code: http.StatusNotFound, Message: \"Deploy not found.\"}\n\t\t}\n\t\treturn err\n\t}\n\tdbApp, err := app.GetByName(deploy.App)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcanGet := permission.Check(t, permission.PermAppReadDeploy, contextsForApp(dbApp)...)\n\tif !canGet {\n\t\treturn &tsuruErrors.HTTP{Code: http.StatusNotFound, Message: \"Deploy not found.\"}\n\t}\n\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\treturn json.NewEncoder(w).Encode(deploy)\n}\n\n\/\/ title: rebuild\n\/\/ path: \/apps\/{appname}\/deploy\/rebuild\n\/\/ method: POST\n\/\/ consume: application\/x-www-form-urlencoded\n\/\/ produce: application\/x-json-stream\n\/\/ responses:\n\/\/ 200: OK\n\/\/ 400: Invalid data\n\/\/ 403: Forbidden\n\/\/ 404: Not found\nfunc deployRebuild(w http.ResponseWriter, r *http.Request, t auth.Token) error {\n\tappName := r.URL.Query().Get(\":appname\")\n\tinstance, err := app.GetByName(appName)\n\tif err != nil {\n\t\treturn &tsuruErrors.HTTP{Code: http.StatusNotFound, Message: fmt.Sprintf(\"App %s not found.\", appName)}\n\t}\n\torigin := r.FormValue(\"origin\")\n\tif !app.ValidateOrigin(origin) {\n\t\treturn &tsuruErrors.HTTP{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: \"Invalid deployment origin\",\n\t\t}\n\t}\n\tw.Header().Set(\"Content-Type\", \"application\/x-json-stream\")\n\tkeepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 30*time.Second, \"\")\n\tdefer keepAliveWriter.Stop()\n\twriter := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}\n\topts := app.DeployOptions{\n\t\tApp: instance,\n\t\tOutputStream: writer,\n\t\tUser: t.GetUserName(),\n\t\tOrigin: origin,\n\t\tKind: app.DeployRebuild,\n\t}\n\tcanDeploy := permission.Check(t, permSchemeForDeploy(opts), contextsForApp(instance)...)\n\tif !canDeploy {\n\t\treturn &tsuruErrors.HTTP{Code: http.StatusForbidden, Message: permission.ErrUnauthorized.Error()}\n\t}\n\tvar imageID string\n\tevt, err := event.New(&event.Opts{\n\t\tTarget: appTarget(appName),\n\t\tKind: permission.PermAppDeploy,\n\t\tOwner: t,\n\t\tCustomData: opts,\n\t\tAllowed: event.Allowed(permission.PermAppReadEvents, contextsForApp(instance)...),\n\t\tAllowedCancel: event.Allowed(permission.PermAppUpdateEvents, contextsForApp(instance)...),\n\t\tCancelable: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { evt.DoneCustomData(err, map[string]string{\"image\": imageID}) }()\n\topts.Event = evt\n\timageID, err = app.Deploy(opts)\n\tif err != nil {\n\t\twriter.Encode(tsuruIo.SimpleJsonMessage{Error: err.Error()})\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"github.com\/martinp\/atbapi\/atb\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype BusStops struct {\n\tStops []BusStop `json:\"stops\"`\n}\n\ntype BusStop struct {\n\tStopId int `json:\"stopId\"`\n\tNodeId int `json:\"nodeId\"`\n\tDescription string `json:\"description\"`\n\tLongitude float64 `json:\"longitude\"`\n\tLatitude float64 `json:\"latitude\"`\n\tMobileCode string `json:\"mobileCode\"`\n\tMobileName string `json:\"mobileName\"`\n}\n\ntype Departures struct {\n\tTowardsCentrum bool `json:\"isGoingTowardsCentrum\"`\n\tDepartures []Departure `json:\"departures\"`\n}\n\ntype Departure struct {\n\tLineId string `json:\"line\"`\n\tRegisteredDepartureTime string `json:\"registeredDepartureTime\"`\n\tScheduledDepartureTime string `json:\"scheduledDepartureTime\"`\n\tDestination string `json:\"destination\"`\n\tIsRealtimeData bool `json:\"isRealtimeData\"`\n}\n\nfunc convertBusStop(s atb.BusStop) (BusStop, error) {\n\tnodeId, err := strconv.Atoi(s.NodeId)\n\tif err != nil {\n\t\treturn BusStop{}, err\n\t}\n\tlongitude, err := strconv.ParseFloat(s.Longitude, 64)\n\tif err != nil {\n\t\treturn BusStop{}, err\n\t}\n\tlatitude := float64(s.Latitude)\n\treturn BusStop{\n\t\tStopId: s.StopId,\n\t\tNodeId: nodeId,\n\t\tDescription: s.Description,\n\t\tLongitude: longitude,\n\t\tLatitude: latitude,\n\t\tMobileCode: s.MobileCode,\n\t\tMobileName: s.MobileName,\n\t}, nil\n}\n\nfunc convertBusStops(s atb.BusStops) (BusStops, error) {\n\tstops := make([]BusStop, 0, len(s.Stops))\n\tfor _, stop := range s.Stops {\n\t\tconverted, err := convertBusStop(stop)\n\t\tif err != nil {\n\t\t\treturn BusStops{}, err\n\t\t}\n\t\tstops = append(stops, converted)\n\t}\n\treturn BusStops{Stops: stops}, nil\n}\n\nfunc convertTime(src string) (string, error) {\n\tt, err := time.Parse(\"02.01.2006 15:04\", src)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn t.Format(\"2006-01-02T15:04:05.000\"), nil\n}\n\nfunc isRealtime(s string) bool {\n\treturn strings.EqualFold(s, \"prev\")\n}\n\nfunc convertForecast(f atb.Forecast) (Departure, error) {\n\tregisteredDeparture, err := convertTime(f.RegisteredDepartureTime)\n\tif err != nil {\n\t\treturn Departure{}, err\n\t}\n\tscheduledDeparture, err := convertTime(f.ScheduledDepartureTime)\n\tif err != nil {\n\t\treturn Departure{}, err\n\t}\n\treturn Departure{\n\t\tLineId: f.LineId,\n\t\tDestination: f.Destination,\n\t\tRegisteredDepartureTime: registeredDeparture,\n\t\tScheduledDepartureTime: scheduledDeparture,\n\t\tIsRealtimeData: isRealtime(f.StationForecast),\n\t}, nil\n}\n\nfunc isTowardsCentrum(nodeId int) bool {\n\treturn (nodeId\/1000)%2 == 1\n}\n\nfunc convertForecasts(f atb.Forecasts) (Departures, error) {\n\ttowardsCentrum := false\n\tif len(f.Nodes) > 0 {\n\t\tnodeId, err := strconv.Atoi(f.Nodes[0].NodeId)\n\t\tif err != nil {\n\t\t\treturn Departures{}, err\n\t\t}\n\t\ttowardsCentrum = isTowardsCentrum(nodeId)\n\t}\n\tdepartures := make([]Departure, 0, len(f.Forecasts))\n\tfor _, forecast := range f.Forecasts {\n\t\tdeparture, err := convertForecast(forecast)\n\t\tif err != nil {\n\t\t\treturn Departures{}, err\n\t\t}\n\t\tdepartures = append(departures, departure)\n\t}\n\treturn Departures{\n\t\tTowardsCentrum: towardsCentrum,\n\t\tDepartures: departures,\n\t}, nil\n}\n<commit_msg>Use int for bus stop lat\/lon<commit_after>package api\n\nimport (\n\t\"github.com\/martinp\/atbapi\/atb\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype BusStops struct {\n\tStops []BusStop `json:\"stops\"`\n}\n\ntype BusStop struct {\n\tStopId int `json:\"stopId\"`\n\tNodeId int `json:\"nodeId\"`\n\tDescription string `json:\"description\"`\n\tLongitude int `json:\"longitude\"`\n\tLatitude int `json:\"latitude\"`\n\tMobileCode string `json:\"mobileCode\"`\n\tMobileName string `json:\"mobileName\"`\n}\n\ntype Departures struct {\n\tTowardsCentrum bool `json:\"isGoingTowardsCentrum\"`\n\tDepartures []Departure `json:\"departures\"`\n}\n\ntype Departure struct {\n\tLineId string `json:\"line\"`\n\tRegisteredDepartureTime string `json:\"registeredDepartureTime\"`\n\tScheduledDepartureTime string `json:\"scheduledDepartureTime\"`\n\tDestination string `json:\"destination\"`\n\tIsRealtimeData bool `json:\"isRealtimeData\"`\n}\n\nfunc convertBusStop(s atb.BusStop) (BusStop, error) {\n\tnodeId, err := strconv.Atoi(s.NodeId)\n\tif err != nil {\n\t\treturn BusStop{}, err\n\t}\n\tlongitude, err := strconv.Atoi(s.Longitude)\n\tif err != nil {\n\t\treturn BusStop{}, err\n\t}\n\treturn BusStop{\n\t\tStopId: s.StopId,\n\t\tNodeId: nodeId,\n\t\tDescription: s.Description,\n\t\tLongitude: longitude,\n\t\tLatitude: s.Latitude,\n\t\tMobileCode: s.MobileCode,\n\t\tMobileName: s.MobileName,\n\t}, nil\n}\n\nfunc convertBusStops(s atb.BusStops) (BusStops, error) {\n\tstops := make([]BusStop, 0, len(s.Stops))\n\tfor _, stop := range s.Stops {\n\t\tconverted, err := convertBusStop(stop)\n\t\tif err != nil {\n\t\t\treturn BusStops{}, err\n\t\t}\n\t\tstops = append(stops, converted)\n\t}\n\treturn BusStops{Stops: stops}, nil\n}\n\nfunc convertTime(src string) (string, error) {\n\tt, err := time.Parse(\"02.01.2006 15:04\", src)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn t.Format(\"2006-01-02T15:04:05.000\"), nil\n}\n\nfunc isRealtime(s string) bool {\n\treturn strings.EqualFold(s, \"prev\")\n}\n\nfunc convertForecast(f atb.Forecast) (Departure, error) {\n\tregisteredDeparture, err := convertTime(f.RegisteredDepartureTime)\n\tif err != nil {\n\t\treturn Departure{}, err\n\t}\n\tscheduledDeparture, err := convertTime(f.ScheduledDepartureTime)\n\tif err != nil {\n\t\treturn Departure{}, err\n\t}\n\treturn Departure{\n\t\tLineId: f.LineId,\n\t\tDestination: f.Destination,\n\t\tRegisteredDepartureTime: registeredDeparture,\n\t\tScheduledDepartureTime: scheduledDeparture,\n\t\tIsRealtimeData: isRealtime(f.StationForecast),\n\t}, nil\n}\n\nfunc isTowardsCentrum(nodeId int) bool {\n\treturn (nodeId\/1000)%2 == 1\n}\n\nfunc convertForecasts(f atb.Forecasts) (Departures, error) {\n\ttowardsCentrum := false\n\tif len(f.Nodes) > 0 {\n\t\tnodeId, err := strconv.Atoi(f.Nodes[0].NodeId)\n\t\tif err != nil {\n\t\t\treturn Departures{}, err\n\t\t}\n\t\ttowardsCentrum = isTowardsCentrum(nodeId)\n\t}\n\tdepartures := make([]Departure, 0, len(f.Forecasts))\n\tfor _, forecast := range f.Forecasts {\n\t\tdeparture, err := convertForecast(forecast)\n\t\tif err != nil {\n\t\t\treturn Departures{}, err\n\t\t}\n\t\tdepartures = append(departures, departure)\n\t}\n\treturn Departures{\n\t\tTowardsCentrum: towardsCentrum,\n\t\tDepartures: departures,\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package configuration\n\nimport (\n\t\"github.com\/bborbe\/monitoring\/check\"\n\t\"github.com\/bborbe\/monitoring\/check\/http\"\n)\n\ntype Configuration interface {\n\tChecks() []check.Check\n}\n\ntype configuration struct {\n}\n\nfunc New() Configuration {\n\treturn new(configuration)\n}\n\nfunc (c *configuration) Checks() []check.Check {\n\tlist := make([]check.Check, 0)\n\tlist = append(list, http.New(\"http:\/\/www.benjamin-borbe.de\").ExpectTitle(\"Benjamin Borbe Fotografie\"))\n\tlist = append(list, http.New(\"https:\/\/www.benjamin-borbe.de\").ExpectTitle(\"Benjamin Borbe Fotografie\"))\n\tlist = append(list, http.New(\"http:\/\/www.benjaminborbe.de\").ExpectTitle(\"Benjamin Borbe Fotografie\"))\n\tlist = append(list, http.New(\"http:\/\/jenkins.benjamin-borbe.de\").ExpectTitle(\"Dashboard [Jenkins]\"))\n\tlist = append(list, http.New(\"http:\/\/www.harteslicht.de\").ExpectTitle(\"www.Harteslicht.com | Fotografieren das Spass macht.\"))\n\tlist = append(list, http.New(\"http:\/\/www.harteslicht.com\").ExpectTitle(\"www.Harteslicht.com | Fotografieren das Spass macht.\"))\n\tlist = append(list, http.New(\"http:\/\/kickstart.benjamin-borbe.de\").ExpectContent(\"ks.cfg\"))\n\tlist = append(list, http.New(\"http:\/\/ip.benjamin-borbe.de\"))\n\tlist = append(list, http.New(\"http:\/\/slideshow.benjamin-borbe.de\").ExpectContent(\"go.html\"))\n\tlist = append(list, http.New(\"https:\/\/www.benjamin-borbe.de\/confluence\/\").ExpectTitle(\"Dashboard - Confluence\"))\n\treturn list\n}\n<commit_msg>add checks<commit_after>package configuration\n\nimport (\n\t\"github.com\/bborbe\/monitoring\/check\"\n\t\"github.com\/bborbe\/monitoring\/check\/http\"\n)\n\ntype Configuration interface {\n\tChecks() []check.Check\n}\n\ntype configuration struct {\n}\n\nfunc New() Configuration {\n\treturn new(configuration)\n}\n\nfunc (c *configuration) Checks() []check.Check {\n\tlist := make([]check.Check, 0)\n\tlist = append(list, http.New(\"http:\/\/www.benjamin-borbe.de\").ExpectTitle(\"Benjamin Borbe Fotografie\"))\n\tlist = append(list, http.New(\"https:\/\/www.benjamin-borbe.de\").ExpectTitle(\"Benjamin Borbe Fotografie\"))\n\tlist = append(list, http.New(\"http:\/\/www.benjaminborbe.de\").ExpectTitle(\"Benjamin Borbe Fotografie\"))\n\tlist = append(list, http.New(\"http:\/\/jenkins.benjamin-borbe.de\").ExpectTitle(\"Dashboard [Jenkins]\"))\n\tlist = append(list, http.New(\"http:\/\/www.harteslicht.de\").ExpectTitle(\"www.Harteslicht.com | Fotografieren das Spass macht.\"))\n\tlist = append(list, http.New(\"http:\/\/www.harteslicht.com\").ExpectTitle(\"www.Harteslicht.com | Fotografieren das Spass macht.\"))\n\tlist = append(list, http.New(\"http:\/\/kickstart.benjamin-borbe.de\").ExpectContent(\"ks.cfg\"))\n\tlist = append(list, http.New(\"http:\/\/ip.benjamin-borbe.de\"))\n\tlist = append(list, http.New(\"http:\/\/slideshow.benjamin-borbe.de\").ExpectContent(\"go.html\"))\n\tlist = append(list, http.New(\"https:\/\/www.benjamin-borbe.de\/confluence\/\").ExpectTitle(\"Dashboard - Confluence\"))\n\tlist = append(list, http.New(\"http:\/\/apt.benjamin-borbe.de\").ExpectTitle(\"403 Forbidden\"))\n\tlist = append(list, http.New(\"http:\/\/blog.benjamin-borbe.de\").ExpectTitle(\"Benjamin Borbe Fotografie\"))\n\treturn list\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 CloudAwan LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage configuration\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/cloudawan\/kubernetes_management_utility\/configuration\/Godeps\/_workspace\/src\/code.google.com\/p\/log4go\"\n\t\"github.com\/cloudawan\/kubernetes_management_utility\/configuration\/Godeps\/_workspace\/src\/github.com\/cloudawan\/kubernetes_management_utility\/logger\"\n\t\"io\"\n\t\"os\"\n)\n\nvar log log4go.Logger = logger.GetLogger(\"utility\")\n\nconst (\n\trootPath = \"\/etc\"\n\tfileName = \"configuration.json\"\n)\n\ntype Configuration struct {\n\tjsonMap map[string]interface{}\n\tfileNameAndPath string\n}\n\nfunc CreateConfiguration(programName string, configurationContent string) (*Configuration, error) {\n\tconfiguration := new(Configuration)\n\tif err := configuration.createConfigurationFileIfNotExist(programName, configurationContent); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := configuration.readConfigurationFromFile(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn configuration, nil\n}\n\nfunc (configuration *Configuration) GetNative(key string) interface{} {\n\treturn configuration.jsonMap[key]\n}\n\nfunc (configuration *Configuration) GetString(key string) (string, bool) {\n\tvalue, ok := configuration.jsonMap[key].(string)\n\treturn value, ok\n}\n\nfunc (configuration *Configuration) GetStringSlice(key string) ([]string, bool) {\n\tstringSlice := make([]string, 0)\n\tvalueSlice, ok := configuration.jsonMap[key].([]interface{})\n\tif ok == false {\n\t\treturn nil, false\n\t}\n\tfor _, value := range valueSlice {\n\t\ttext, ok := value.(string)\n\t\tif ok == false {\n\t\t\treturn nil, false\n\t\t}\n\t\tstringSlice = append(stringSlice, text)\n\t}\n\treturn stringSlice, true\n}\n\nfunc (configuration *Configuration) GetInt(key string) (int, bool) {\n\tnumber, ok := configuration.jsonMap[key].(float64)\n\tif ok {\n\t\treturn int(number), true\n\t} else {\n\t\treturn 0, false\n\t}\n}\n\nfunc (configuration *Configuration) GetFloat64(key string) (float64, bool) {\n\tvalue, ok := configuration.jsonMap[key].(float64)\n\treturn value, ok\n}\n\nfunc (configuration *Configuration) createConfigurationFileIfNotExist(programName string, configurationContent string) (returnedError error) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Error(\"configuration Error: %s\", err)\n\t\t\tlog.Error(logger.GetStackTrace(4096, false))\n\t\t\treturnedError = err.(error)\n\t\t}\n\t}()\n\n\tdirectoryPath := rootPath + string(os.PathSeparator) + programName\n\tif err := os.MkdirAll(directoryPath, os.ModePerm); err != nil {\n\t\treturn err\n\t}\n\n\tfileNameAndPath := directoryPath + string(os.PathSeparator) + fileName\n\n\tif _, err := os.Stat(fileNameAndPath); os.IsNotExist(err) {\n\t\toutputFile, err := os.Create(fileNameAndPath)\n\t\tif err != nil {\n\t\t\tlog.Error(\"createConfigurationFileIfNotExist create file %s with error: %s\", fileNameAndPath, err)\n\t\t\treturn err\n\t\t}\n\t\toutputFile.WriteString(configurationContent)\n\t}\n\n\tconfiguration.fileNameAndPath = fileNameAndPath\n\n\treturn nil\n}\n\nfunc (configuration *Configuration) Reload() error {\n\treturn configuration.readConfigurationFromFile()\n}\n\nfunc (configuration *Configuration) readConfigurationFromFile() (returnedError error) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Error(\"readConfigurationFromFile Error: %s\", err)\n\t\t\tlog.Error(logger.GetStackTrace(4096, false))\n\t\t\treturnedError = err.(error)\n\t\t}\n\t}()\n\n\t\/\/ open input file\n\tinputFile, err := os.Open(configuration.fileNameAndPath)\n\tif err != nil {\n\t\tlog.Error(\"readConfigurationFromFile open file error: %s\", err)\n\t\treturn err\n\t}\n\tdefer inputFile.Close()\n\n\tbyteSlice := make([]byte, 0)\n\tbuffer := make([]byte, 1024)\n\tfor {\n\t\t\/\/ read a chunk\n\t\tn, err := inputFile.Read(buffer)\n\t\tif err != nil && err != io.EOF {\n\t\t\tlog.Error(\"readConfigurationFromFile read file error: %s\", err)\n\t\t\treturn err\n\t\t}\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tbyteSlice = append(byteSlice, buffer[0:n]...)\n\t}\n\n\tjsonMap := make(map[string]interface{})\n\terr = json.Unmarshal(byteSlice, &jsonMap)\n\tif err != nil {\n\t\tlog.Error(\"readConfigurationFromFile parse file error: %s\", err)\n\t\treturn err\n\t}\n\n\tconfiguration.jsonMap = jsonMap\n\n\treturn nil\n}\n<commit_msg>Fix Godeps<commit_after>\/\/ Copyright 2015 CloudAwan LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage configuration\n\nimport (\n\t\"code.google.com\/p\/log4go\"\n\t\"encoding\/json\"\n\t\"github.com\/cloudawan\/kubernetes_management_utility\/logger\"\n\t\"io\"\n\t\"os\"\n)\n\nvar log log4go.Logger = logger.GetLogger(\"utility\")\n\nconst (\n\trootPath = \"\/etc\"\n\tfileName = \"configuration.json\"\n)\n\ntype Configuration struct {\n\tjsonMap map[string]interface{}\n\tfileNameAndPath string\n}\n\nfunc CreateConfiguration(programName string, configurationContent string) (*Configuration, error) {\n\tconfiguration := new(Configuration)\n\tif err := configuration.createConfigurationFileIfNotExist(programName, configurationContent); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := configuration.readConfigurationFromFile(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn configuration, nil\n}\n\nfunc (configuration *Configuration) GetNative(key string) interface{} {\n\treturn configuration.jsonMap[key]\n}\n\nfunc (configuration *Configuration) GetString(key string) (string, bool) {\n\tvalue, ok := configuration.jsonMap[key].(string)\n\treturn value, ok\n}\n\nfunc (configuration *Configuration) GetStringSlice(key string) ([]string, bool) {\n\tstringSlice := make([]string, 0)\n\tvalueSlice, ok := configuration.jsonMap[key].([]interface{})\n\tif ok == false {\n\t\treturn nil, false\n\t}\n\tfor _, value := range valueSlice {\n\t\ttext, ok := value.(string)\n\t\tif ok == false {\n\t\t\treturn nil, false\n\t\t}\n\t\tstringSlice = append(stringSlice, text)\n\t}\n\treturn stringSlice, true\n}\n\nfunc (configuration *Configuration) GetInt(key string) (int, bool) {\n\tnumber, ok := configuration.jsonMap[key].(float64)\n\tif ok {\n\t\treturn int(number), true\n\t} else {\n\t\treturn 0, false\n\t}\n}\n\nfunc (configuration *Configuration) GetFloat64(key string) (float64, bool) {\n\tvalue, ok := configuration.jsonMap[key].(float64)\n\treturn value, ok\n}\n\nfunc (configuration *Configuration) createConfigurationFileIfNotExist(programName string, configurationContent string) (returnedError error) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Error(\"configuration Error: %s\", err)\n\t\t\tlog.Error(logger.GetStackTrace(4096, false))\n\t\t\treturnedError = err.(error)\n\t\t}\n\t}()\n\n\tdirectoryPath := rootPath + string(os.PathSeparator) + programName\n\tif err := os.MkdirAll(directoryPath, os.ModePerm); err != nil {\n\t\treturn err\n\t}\n\n\tfileNameAndPath := directoryPath + string(os.PathSeparator) + fileName\n\n\tif _, err := os.Stat(fileNameAndPath); os.IsNotExist(err) {\n\t\toutputFile, err := os.Create(fileNameAndPath)\n\t\tif err != nil {\n\t\t\tlog.Error(\"createConfigurationFileIfNotExist create file %s with error: %s\", fileNameAndPath, err)\n\t\t\treturn err\n\t\t}\n\t\toutputFile.WriteString(configurationContent)\n\t}\n\n\tconfiguration.fileNameAndPath = fileNameAndPath\n\n\treturn nil\n}\n\nfunc (configuration *Configuration) Reload() error {\n\treturn configuration.readConfigurationFromFile()\n}\n\nfunc (configuration *Configuration) readConfigurationFromFile() (returnedError error) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Error(\"readConfigurationFromFile Error: %s\", err)\n\t\t\tlog.Error(logger.GetStackTrace(4096, false))\n\t\t\treturnedError = err.(error)\n\t\t}\n\t}()\n\n\t\/\/ open input file\n\tinputFile, err := os.Open(configuration.fileNameAndPath)\n\tif err != nil {\n\t\tlog.Error(\"readConfigurationFromFile open file error: %s\", err)\n\t\treturn err\n\t}\n\tdefer inputFile.Close()\n\n\tbyteSlice := make([]byte, 0)\n\tbuffer := make([]byte, 1024)\n\tfor {\n\t\t\/\/ read a chunk\n\t\tn, err := inputFile.Read(buffer)\n\t\tif err != nil && err != io.EOF {\n\t\t\tlog.Error(\"readConfigurationFromFile read file error: %s\", err)\n\t\t\treturn err\n\t\t}\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tbyteSlice = append(byteSlice, buffer[0:n]...)\n\t}\n\n\tjsonMap := make(map[string]interface{})\n\terr = json.Unmarshal(byteSlice, &jsonMap)\n\tif err != nil {\n\t\tlog.Error(\"readConfigurationFromFile parse file error: %s\", err)\n\t\treturn err\n\t}\n\n\tconfiguration.jsonMap = jsonMap\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"fmt\"\n\t\"github.com\/loadimpact\/k6\/api\/common\"\n\t\"github.com\/loadimpact\/k6\/api\/v1\"\n\t\"github.com\/loadimpact\/k6\/api\/v2\"\n\t\"github.com\/loadimpact\/k6\/lib\"\n\t\"github.com\/urfave\/negroni\"\n\t\"net\/http\"\n)\n\nfunc ListenAndServe(addr string, engine *lib.Engine) error {\n\tmux := http.NewServeMux()\n\tmux.Handle(\"\/v1\/\", v1.NewHandler())\n\tmux.Handle(\"\/v2\/\", v2.NewHandler())\n\tmux.HandleFunc(\"\/ping\", HandlePing)\n\n\tn := negroni.Classic()\n\tn.UseFunc(WithEngine(engine))\n\tn.UseHandler(mux)\n\treturn http.ListenAndServe(addr, n)\n}\n\nfunc WithEngine(engine *lib.Engine) negroni.HandlerFunc {\n\treturn negroni.HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\t\tr = r.WithContext(common.WithEngine(r.Context(), engine))\n\t\tnext(rw, r)\n\t})\n}\n\nfunc HandlePing(rw http.ResponseWriter, r *http.Request) {\n\trw.Header().Add(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\tfmt.Fprint(rw, \"ok\")\n}\n<commit_msg>[feat] Panic recovery + request logging<commit_after>package api\n\nimport (\n\t\"fmt\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/loadimpact\/k6\/api\/common\"\n\t\"github.com\/loadimpact\/k6\/api\/v1\"\n\t\"github.com\/loadimpact\/k6\/api\/v2\"\n\t\"github.com\/loadimpact\/k6\/lib\"\n\t\"github.com\/urfave\/negroni\"\n\t\"net\/http\"\n)\n\nfunc ListenAndServe(addr string, engine *lib.Engine) error {\n\tmux := http.NewServeMux()\n\tmux.Handle(\"\/v1\/\", v1.NewHandler())\n\tmux.Handle(\"\/v2\/\", v2.NewHandler())\n\tmux.HandleFunc(\"\/ping\", HandlePing)\n\n\tn := negroni.New()\n\tn.Use(negroni.NewRecovery())\n\tn.UseFunc(WithEngine(engine))\n\tn.UseFunc(Logger)\n\tn.UseHandler(mux)\n\n\treturn http.ListenAndServe(addr, n)\n}\n\nfunc Logger(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\tnext(rw, r)\n\n\tres := rw.(negroni.ResponseWriter)\n\tlog.WithFields(log.Fields{\"status\": res.Status()}).Debugf(\"%s %s\", r.Method, r.URL.Path)\n}\n\nfunc WithEngine(engine *lib.Engine) negroni.HandlerFunc {\n\treturn negroni.HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\t\tr = r.WithContext(common.WithEngine(r.Context(), engine))\n\t\tnext(rw, r)\n\t})\n}\n\nfunc HandlePing(rw http.ResponseWriter, r *http.Request) {\n\trw.Header().Add(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\tfmt.Fprint(rw, \"ok\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage api\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\n\t\"github.com\/codegangsta\/negroni\"\n\t\"github.com\/tsuru\/config\"\n\t\"github.com\/tsuru\/tsuru\/app\"\n\t\"github.com\/tsuru\/tsuru\/auth\"\n\t_ \"github.com\/tsuru\/tsuru\/auth\/native\"\n\t_ \"github.com\/tsuru\/tsuru\/auth\/oauth\"\n\t\"github.com\/tsuru\/tsuru\/db\"\n\t\"github.com\/tsuru\/tsuru\/log\"\n\t\"github.com\/tsuru\/tsuru\/provision\"\n)\n\nfunc getProvisioner() (string, error) {\n\tprovisioner, err := config.GetString(\"provisioner\")\n\tif provisioner == \"\" {\n\t\tprovisioner = \"docker\"\n\t}\n\treturn provisioner, err\n}\n\ntype TsuruHandler struct {\n\tmethod string\n\tpath string\n\th http.Handler\n}\n\nfunc fatal(err error) {\n\tfmt.Println(err.Error())\n\tlog.Fatal(err.Error())\n}\n\nvar tsuruHandlerList []TsuruHandler\n\n\/\/RegisterHandler inserts a handler on a list of handlers\nfunc RegisterHandler(path string, method string, h http.Handler) {\n\tvar th TsuruHandler\n\tth.path = path\n\tth.method = method\n\tth.h = h\n\ttsuruHandlerList = append(tsuruHandlerList, th)\n}\n\nfunc getAuthScheme() (string, error) {\n\tname, err := config.GetString(\"auth:scheme\")\n\tif name == \"\" {\n\t\tname = \"native\"\n\t}\n\treturn name, err\n}\n\n\/\/ RunServer starts tsuru API server. The dry parameter indicates whether the\n\/\/ server should run in dry mode, not starting the HTTP listener (for testing\n\/\/ purposes).\nfunc RunServer(dry bool) http.Handler {\n\tlog.Init()\n\tconnString, err := config.GetString(\"database:url\")\n\tif err != nil {\n\t\tconnString = db.DefaultDatabaseURL\n\t}\n\tdbName, err := config.GetString(\"database:name\")\n\tif err != nil {\n\t\tdbName = db.DefaultDatabaseName\n\t}\n\tfmt.Printf(\"Using the database %q from the server %q.\\n\\n\", dbName, connString)\n\n\tm := &delayedRouter{}\n\n\tfor _, handler := range tsuruHandlerList {\n\t\tm.Add(handler.method, handler.path, handler.h)\n\t}\n\n\tm.Add(\"Get\", \"\/services\/instances\", authorizationRequiredHandler(serviceInstances))\n\tm.Add(\"Get\", \"\/services\/instances\/{name}\", authorizationRequiredHandler(serviceInstance))\n\tm.Add(\"Delete\", \"\/services\/instances\/{name}\", authorizationRequiredHandler(removeServiceInstance))\n\tm.Add(\"Post\", \"\/services\/instances\", authorizationRequiredHandler(createServiceInstance))\n\tm.Add(\"Put\", \"\/services\/instances\/{instance}\/{app}\", authorizationRequiredHandler(bindServiceInstance))\n\tm.Add(\"Delete\", \"\/services\/instances\/{instance}\/{app}\", authorizationRequiredHandler(unbindServiceInstance))\n\tm.Add(\"Get\", \"\/services\/instances\/{instance}\/status\", authorizationRequiredHandler(serviceInstanceStatus))\n\n\tm.AddAll(\"\/services\/proxy\/{instance}\", authorizationRequiredHandler(serviceProxy))\n\n\tm.Add(\"Get\", \"\/services\", authorizationRequiredHandler(serviceList))\n\tm.Add(\"Post\", \"\/services\", authorizationRequiredHandler(serviceCreate))\n\tm.Add(\"Put\", \"\/services\", authorizationRequiredHandler(serviceUpdate))\n\tm.Add(\"Delete\", \"\/services\/{name}\", authorizationRequiredHandler(serviceDelete))\n\tm.Add(\"Get\", \"\/services\/{name}\", authorizationRequiredHandler(serviceInfo))\n\tm.Add(\"Get\", \"\/services\/{name}\/plans\", authorizationRequiredHandler(servicePlans))\n\tm.Add(\"Get\", \"\/services\/{name}\/doc\", authorizationRequiredHandler(serviceDoc))\n\tm.Add(\"Put\", \"\/services\/{name}\/doc\", authorizationRequiredHandler(serviceAddDoc))\n\tm.Add(\"Put\", \"\/services\/{service}\/{team}\", authorizationRequiredHandler(grantServiceAccess))\n\tm.Add(\"Delete\", \"\/services\/{service}\/{team}\", authorizationRequiredHandler(revokeServiceAccess))\n\n\tm.Add(\"Delete\", \"\/apps\/{app}\", authorizationRequiredHandler(appDelete))\n\tm.Add(\"Get\", \"\/apps\/{app}\", authorizationRequiredHandler(appInfo))\n\tm.Add(\"Post\", \"\/apps\/{app}\/cname\", authorizationRequiredHandler(setCName))\n\tm.Add(\"Delete\", \"\/apps\/{app}\/cname\", authorizationRequiredHandler(unsetCName))\n\trunHandler := authorizationRequiredHandler(runCommand)\n\tm.Add(\"Post\", \"\/apps\/{app}\/run\", runHandler)\n\tm.Add(\"Post\", \"\/apps\/{app}\/restart\", authorizationRequiredHandler(restart))\n\tm.Add(\"Post\", \"\/apps\/{app}\/start\", authorizationRequiredHandler(start))\n\tm.Add(\"Post\", \"\/apps\/{app}\/stop\", authorizationRequiredHandler(stop))\n\tm.Add(\"Get\", \"\/apps\/{appname}\/quota\", AdminRequiredHandler(getAppQuota))\n\tm.Add(\"Post\", \"\/apps\/{appname}\/quota\", AdminRequiredHandler(changeAppQuota))\n\tm.Add(\"Get\", \"\/apps\/{app}\/env\", authorizationRequiredHandler(getEnv))\n\tm.Add(\"Post\", \"\/apps\/{app}\/env\", authorizationRequiredHandler(setEnv))\n\tm.Add(\"Delete\", \"\/apps\/{app}\/env\", authorizationRequiredHandler(unsetEnv))\n\tm.Add(\"Get\", \"\/apps\", authorizationRequiredHandler(appList))\n\tm.Add(\"Post\", \"\/apps\", authorizationRequiredHandler(createApp))\n\tm.Add(\"Post\", \"\/apps\/{app}\/team-owner\", authorizationRequiredHandler(setTeamOwner))\n\tforceDeleteLockHandler := AdminRequiredHandler(forceDeleteLock)\n\tm.Add(\"Delete\", \"\/apps\/{app}\/lock\", forceDeleteLockHandler)\n\tm.Add(\"Put\", \"\/apps\/{app}\/units\", authorizationRequiredHandler(addUnits))\n\tm.Add(\"Delete\", \"\/apps\/{app}\/units\", authorizationRequiredHandler(removeUnits))\n\tregisterUnitHandler := authorizationRequiredHandler(registerUnit)\n\tm.Add(\"Post\", \"\/apps\/{app}\/units\/register\", registerUnitHandler)\n\tsetUnitStatusHandler := authorizationRequiredHandler(setUnitStatus)\n\tm.Add(\"Post\", \"\/apps\/{app}\/units\/{unit}\", setUnitStatusHandler)\n\tm.Add(\"Put\", \"\/apps\/{app}\/teams\/{team}\", authorizationRequiredHandler(grantAppAccess))\n\tm.Add(\"Delete\", \"\/apps\/{app}\/teams\/{team}\", authorizationRequiredHandler(revokeAppAccess))\n\tm.Add(\"Get\", \"\/apps\/{app}\/log\", authorizationRequiredHandler(appLog))\n\tlogPostHandler := authorizationRequiredHandler(addLog)\n\tm.Add(\"Post\", \"\/apps\/{app}\/log\", logPostHandler)\n\tsaveCustomDataHandler := authorizationRequiredHandler(saveAppCustomData)\n\tm.Add(\"Post\", \"\/apps\/{app}\/customdata\", saveCustomDataHandler)\n\n\tm.Add(\"Get\", \"\/autoscale\", authorizationRequiredHandler(autoScaleHistoryHandler))\n\tm.Add(\"Put\", \"\/autoscale\/{app}\", authorizationRequiredHandler(autoScaleConfig))\n\tm.Add(\"Put\", \"\/autoscale\/{app}\/enable\", authorizationRequiredHandler(autoScaleEnable))\n\tm.Add(\"Put\", \"\/autoscale\/{app}\/disable\", authorizationRequiredHandler(autoScaleDisable))\n\n\tm.Add(\"Get\", \"\/deploys\", AdminRequiredHandler(deploysList))\n\tm.Add(\"Get\", \"\/deploys\/{deploy}\", authorizationRequiredHandler(deployInfo))\n\n\tm.Add(\"Get\", \"\/platforms\", authorizationRequiredHandler(platformList))\n\tm.Add(\"Post\", \"\/platforms\", AdminRequiredHandler(platformAdd))\n\tm.Add(\"Put\", \"\/platforms\/{name}\", AdminRequiredHandler(platformUpdate))\n\tm.Add(\"Delete\", \"\/platforms\/{name}\", AdminRequiredHandler(platformRemove))\n\n\t\/\/ These handlers don't use :app on purpose. Using :app means that only\n\t\/\/ the token generate for the given app is valid, but these handlers\n\t\/\/ use a token generated for Gandalf.\n\tm.Add(\"Get\", \"\/apps\/{appname}\/available\", authorizationRequiredHandler(appIsAvailable))\n\tm.Add(\"Post\", \"\/apps\/{appname}\/repository\/clone\", authorizationRequiredHandler(deploy))\n\tm.Add(\"Post\", \"\/apps\/{appname}\/deploy\", authorizationRequiredHandler(deploy))\n\n\tm.Add(\"Get\", \"\/users\", AdminRequiredHandler(listUsers))\n\tm.Add(\"Post\", \"\/users\", Handler(createUser))\n\tm.Add(\"Get\", \"\/auth\/scheme\", Handler(authScheme))\n\tm.Add(\"Post\", \"\/auth\/login\", Handler(login))\n\tm.Add(\"Post\", \"\/users\/{email}\/password\", Handler(resetPassword))\n\tm.Add(\"Post\", \"\/users\/{email}\/tokens\", Handler(login))\n\tm.Add(\"Get\", \"\/users\/{email}\/quota\", AdminRequiredHandler(getUserQuota))\n\tm.Add(\"Post\", \"\/users\/{email}\/quota\", AdminRequiredHandler(changeUserQuota))\n\tm.Add(\"Delete\", \"\/users\/tokens\", authorizationRequiredHandler(logout))\n\tm.Add(\"Put\", \"\/users\/password\", authorizationRequiredHandler(changePassword))\n\tm.Add(\"Delete\", \"\/users\", authorizationRequiredHandler(removeUser))\n\tm.Add(\"Get\", \"\/users\/keys\", authorizationRequiredHandler(listKeys))\n\tm.Add(\"Post\", \"\/users\/keys\", authorizationRequiredHandler(addKeyToUser))\n\tm.Add(\"Delete\", \"\/users\/keys\", authorizationRequiredHandler(removeKeyFromUser))\n\tm.Add(\"Get\", \"\/users\/api-key\", authorizationRequiredHandler(showAPIToken))\n\tm.Add(\"Post\", \"\/users\/api-key\", authorizationRequiredHandler(regenerateAPIToken))\n\n\tm.Add(\"Post\", \"\/tokens\", AdminRequiredHandler(generateAppToken))\n\n\tm.Add(\"Delete\", \"\/logs\", AdminRequiredHandler(logRemove))\n\n\tm.Add(\"Get\", \"\/teams\", authorizationRequiredHandler(teamList))\n\tm.Add(\"Post\", \"\/teams\", authorizationRequiredHandler(createTeam))\n\tm.Add(\"Get\", \"\/teams\/{name}\", authorizationRequiredHandler(getTeam))\n\tm.Add(\"Delete\", \"\/teams\/{name}\", authorizationRequiredHandler(removeTeam))\n\tm.Add(\"Put\", \"\/teams\/{team}\/{user}\", authorizationRequiredHandler(addUserToTeam))\n\tm.Add(\"Delete\", \"\/teams\/{team}\/{user}\", authorizationRequiredHandler(removeUserFromTeam))\n\n\tm.Add(\"Put\", \"\/swap\", authorizationRequiredHandler(swap))\n\n\tm.Add(\"Get\", \"\/healthcheck\/\", http.HandlerFunc(healthcheck))\n\n\tm.Add(\"Get\", \"\/iaas\/machines\", AdminRequiredHandler(machinesList))\n\tm.Add(\"Delete\", \"\/iaas\/machines\/{machine_id}\", AdminRequiredHandler(machineDestroy))\n\tm.Add(\"Get\", \"\/iaas\/templates\", AdminRequiredHandler(templatesList))\n\tm.Add(\"Post\", \"\/iaas\/templates\", AdminRequiredHandler(templateCreate))\n\tm.Add(\"Delete\", \"\/iaas\/templates\/{template_name}\", AdminRequiredHandler(templateDestroy))\n\n\tm.Add(\"Get\", \"\/plans\", authorizationRequiredHandler(listPlans))\n\tm.Add(\"Post\", \"\/plans\", AdminRequiredHandler(addPlan))\n\tm.Add(\"Delete\", \"\/plans\/{planname}\", AdminRequiredHandler(removePlan))\n\n\tm.Add(\"Get\", \"\/debug\/goroutines\", AdminRequiredHandler(dumpGoroutines))\n\n\tn := negroni.New()\n\tn.Use(negroni.NewRecovery())\n\tn.Use(newLoggerMiddleware())\n\tn.UseHandler(m)\n\tn.Use(negroni.HandlerFunc(contextClearerMiddleware))\n\tn.Use(negroni.HandlerFunc(flushingWriterMiddleware))\n\tn.Use(negroni.HandlerFunc(errorHandlingMiddleware))\n\tn.Use(negroni.HandlerFunc(setVersionHeadersMiddleware))\n\tn.Use(negroni.HandlerFunc(authTokenMiddleware))\n\tn.Use(&appLockMiddleware{excludedHandlers: []http.Handler{\n\t\tlogPostHandler,\n\t\trunHandler,\n\t\tforceDeleteLockHandler,\n\t\tregisterUnitHandler,\n\t\tsaveCustomDataHandler,\n\t\tsetUnitStatusHandler,\n\t}})\n\tn.UseHandler(http.HandlerFunc(runDelayedHandler))\n\n\tif !dry {\n\t\tprovisioner, err := getProvisioner()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Warning: configuration didn't declare a provisioner, using default provisioner.\\n\")\n\t\t}\n\t\tapp.Provisioner, err = provision.Get(provisioner)\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tfmt.Printf(\"Using %q provisioner.\\n\\n\", provisioner)\n\t\tif initializableProvisioner, ok := app.Provisioner.(provision.InitializableProvisioner); ok {\n\t\t\terr = initializableProvisioner.Initialize()\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t}\n\t\tscheme, err := getAuthScheme()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Warning: configuration didn't declare a auth:scheme, using default scheme.\\n\")\n\t\t}\n\t\tapp.AuthScheme, err = auth.GetScheme(scheme)\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tfmt.Printf(\"Using %q auth scheme.\\n\\n\", scheme)\n\t\tlisten, err := config.GetString(\"listen\")\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tapp.StartAutoScale()\n\t\ttls, _ := config.GetBool(\"use-tls\")\n\t\tif tls {\n\t\t\tcertFile, err := config.GetString(\"tls:cert-file\")\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t\tkeyFile, err := config.GetString(\"tls:key-file\")\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t\tfmt.Printf(\"tsuru HTTP\/TLS server listening at %s...\\n\", listen)\n\t\t\tfatal(http.ListenAndServeTLS(listen, certFile, keyFile, n))\n\t\t} else {\n\t\t\tlistener, err := net.Listen(\"tcp\", listen)\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t\tfmt.Printf(\"tsuru HTTP server listening at %s...\\n\", listen)\n\t\t\thttp.Handle(\"\/\", n)\n\t\t\tfatal(http.Serve(listener, nil))\n\t\t}\n\t}\n\treturn n\n}\n<commit_msg>Less newlines in RunServer<commit_after>\/\/ Copyright 2014 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage api\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\n\t\"github.com\/codegangsta\/negroni\"\n\t\"github.com\/tsuru\/config\"\n\t\"github.com\/tsuru\/tsuru\/app\"\n\t\"github.com\/tsuru\/tsuru\/auth\"\n\t_ \"github.com\/tsuru\/tsuru\/auth\/native\"\n\t_ \"github.com\/tsuru\/tsuru\/auth\/oauth\"\n\t\"github.com\/tsuru\/tsuru\/db\"\n\t\"github.com\/tsuru\/tsuru\/log\"\n\t\"github.com\/tsuru\/tsuru\/provision\"\n)\n\nfunc getProvisioner() (string, error) {\n\tprovisioner, err := config.GetString(\"provisioner\")\n\tif provisioner == \"\" {\n\t\tprovisioner = \"docker\"\n\t}\n\treturn provisioner, err\n}\n\ntype TsuruHandler struct {\n\tmethod string\n\tpath string\n\th http.Handler\n}\n\nfunc fatal(err error) {\n\tfmt.Println(err.Error())\n\tlog.Fatal(err.Error())\n}\n\nvar tsuruHandlerList []TsuruHandler\n\n\/\/RegisterHandler inserts a handler on a list of handlers\nfunc RegisterHandler(path string, method string, h http.Handler) {\n\tvar th TsuruHandler\n\tth.path = path\n\tth.method = method\n\tth.h = h\n\ttsuruHandlerList = append(tsuruHandlerList, th)\n}\n\nfunc getAuthScheme() (string, error) {\n\tname, err := config.GetString(\"auth:scheme\")\n\tif name == \"\" {\n\t\tname = \"native\"\n\t}\n\treturn name, err\n}\n\n\/\/ RunServer starts tsuru API server. The dry parameter indicates whether the\n\/\/ server should run in dry mode, not starting the HTTP listener (for testing\n\/\/ purposes).\nfunc RunServer(dry bool) http.Handler {\n\tlog.Init()\n\tconnString, err := config.GetString(\"database:url\")\n\tif err != nil {\n\t\tconnString = db.DefaultDatabaseURL\n\t}\n\tdbName, err := config.GetString(\"database:name\")\n\tif err != nil {\n\t\tdbName = db.DefaultDatabaseName\n\t}\n\tfmt.Printf(\"Using the database %q from the server %q.\\n\", dbName, connString)\n\n\tm := &delayedRouter{}\n\n\tfor _, handler := range tsuruHandlerList {\n\t\tm.Add(handler.method, handler.path, handler.h)\n\t}\n\n\tm.Add(\"Get\", \"\/services\/instances\", authorizationRequiredHandler(serviceInstances))\n\tm.Add(\"Get\", \"\/services\/instances\/{name}\", authorizationRequiredHandler(serviceInstance))\n\tm.Add(\"Delete\", \"\/services\/instances\/{name}\", authorizationRequiredHandler(removeServiceInstance))\n\tm.Add(\"Post\", \"\/services\/instances\", authorizationRequiredHandler(createServiceInstance))\n\tm.Add(\"Put\", \"\/services\/instances\/{instance}\/{app}\", authorizationRequiredHandler(bindServiceInstance))\n\tm.Add(\"Delete\", \"\/services\/instances\/{instance}\/{app}\", authorizationRequiredHandler(unbindServiceInstance))\n\tm.Add(\"Get\", \"\/services\/instances\/{instance}\/status\", authorizationRequiredHandler(serviceInstanceStatus))\n\n\tm.AddAll(\"\/services\/proxy\/{instance}\", authorizationRequiredHandler(serviceProxy))\n\n\tm.Add(\"Get\", \"\/services\", authorizationRequiredHandler(serviceList))\n\tm.Add(\"Post\", \"\/services\", authorizationRequiredHandler(serviceCreate))\n\tm.Add(\"Put\", \"\/services\", authorizationRequiredHandler(serviceUpdate))\n\tm.Add(\"Delete\", \"\/services\/{name}\", authorizationRequiredHandler(serviceDelete))\n\tm.Add(\"Get\", \"\/services\/{name}\", authorizationRequiredHandler(serviceInfo))\n\tm.Add(\"Get\", \"\/services\/{name}\/plans\", authorizationRequiredHandler(servicePlans))\n\tm.Add(\"Get\", \"\/services\/{name}\/doc\", authorizationRequiredHandler(serviceDoc))\n\tm.Add(\"Put\", \"\/services\/{name}\/doc\", authorizationRequiredHandler(serviceAddDoc))\n\tm.Add(\"Put\", \"\/services\/{service}\/{team}\", authorizationRequiredHandler(grantServiceAccess))\n\tm.Add(\"Delete\", \"\/services\/{service}\/{team}\", authorizationRequiredHandler(revokeServiceAccess))\n\n\tm.Add(\"Delete\", \"\/apps\/{app}\", authorizationRequiredHandler(appDelete))\n\tm.Add(\"Get\", \"\/apps\/{app}\", authorizationRequiredHandler(appInfo))\n\tm.Add(\"Post\", \"\/apps\/{app}\/cname\", authorizationRequiredHandler(setCName))\n\tm.Add(\"Delete\", \"\/apps\/{app}\/cname\", authorizationRequiredHandler(unsetCName))\n\trunHandler := authorizationRequiredHandler(runCommand)\n\tm.Add(\"Post\", \"\/apps\/{app}\/run\", runHandler)\n\tm.Add(\"Post\", \"\/apps\/{app}\/restart\", authorizationRequiredHandler(restart))\n\tm.Add(\"Post\", \"\/apps\/{app}\/start\", authorizationRequiredHandler(start))\n\tm.Add(\"Post\", \"\/apps\/{app}\/stop\", authorizationRequiredHandler(stop))\n\tm.Add(\"Get\", \"\/apps\/{appname}\/quota\", AdminRequiredHandler(getAppQuota))\n\tm.Add(\"Post\", \"\/apps\/{appname}\/quota\", AdminRequiredHandler(changeAppQuota))\n\tm.Add(\"Get\", \"\/apps\/{app}\/env\", authorizationRequiredHandler(getEnv))\n\tm.Add(\"Post\", \"\/apps\/{app}\/env\", authorizationRequiredHandler(setEnv))\n\tm.Add(\"Delete\", \"\/apps\/{app}\/env\", authorizationRequiredHandler(unsetEnv))\n\tm.Add(\"Get\", \"\/apps\", authorizationRequiredHandler(appList))\n\tm.Add(\"Post\", \"\/apps\", authorizationRequiredHandler(createApp))\n\tm.Add(\"Post\", \"\/apps\/{app}\/team-owner\", authorizationRequiredHandler(setTeamOwner))\n\tforceDeleteLockHandler := AdminRequiredHandler(forceDeleteLock)\n\tm.Add(\"Delete\", \"\/apps\/{app}\/lock\", forceDeleteLockHandler)\n\tm.Add(\"Put\", \"\/apps\/{app}\/units\", authorizationRequiredHandler(addUnits))\n\tm.Add(\"Delete\", \"\/apps\/{app}\/units\", authorizationRequiredHandler(removeUnits))\n\tregisterUnitHandler := authorizationRequiredHandler(registerUnit)\n\tm.Add(\"Post\", \"\/apps\/{app}\/units\/register\", registerUnitHandler)\n\tsetUnitStatusHandler := authorizationRequiredHandler(setUnitStatus)\n\tm.Add(\"Post\", \"\/apps\/{app}\/units\/{unit}\", setUnitStatusHandler)\n\tm.Add(\"Put\", \"\/apps\/{app}\/teams\/{team}\", authorizationRequiredHandler(grantAppAccess))\n\tm.Add(\"Delete\", \"\/apps\/{app}\/teams\/{team}\", authorizationRequiredHandler(revokeAppAccess))\n\tm.Add(\"Get\", \"\/apps\/{app}\/log\", authorizationRequiredHandler(appLog))\n\tlogPostHandler := authorizationRequiredHandler(addLog)\n\tm.Add(\"Post\", \"\/apps\/{app}\/log\", logPostHandler)\n\tsaveCustomDataHandler := authorizationRequiredHandler(saveAppCustomData)\n\tm.Add(\"Post\", \"\/apps\/{app}\/customdata\", saveCustomDataHandler)\n\n\tm.Add(\"Get\", \"\/autoscale\", authorizationRequiredHandler(autoScaleHistoryHandler))\n\tm.Add(\"Put\", \"\/autoscale\/{app}\", authorizationRequiredHandler(autoScaleConfig))\n\tm.Add(\"Put\", \"\/autoscale\/{app}\/enable\", authorizationRequiredHandler(autoScaleEnable))\n\tm.Add(\"Put\", \"\/autoscale\/{app}\/disable\", authorizationRequiredHandler(autoScaleDisable))\n\n\tm.Add(\"Get\", \"\/deploys\", AdminRequiredHandler(deploysList))\n\tm.Add(\"Get\", \"\/deploys\/{deploy}\", authorizationRequiredHandler(deployInfo))\n\n\tm.Add(\"Get\", \"\/platforms\", authorizationRequiredHandler(platformList))\n\tm.Add(\"Post\", \"\/platforms\", AdminRequiredHandler(platformAdd))\n\tm.Add(\"Put\", \"\/platforms\/{name}\", AdminRequiredHandler(platformUpdate))\n\tm.Add(\"Delete\", \"\/platforms\/{name}\", AdminRequiredHandler(platformRemove))\n\n\t\/\/ These handlers don't use :app on purpose. Using :app means that only\n\t\/\/ the token generate for the given app is valid, but these handlers\n\t\/\/ use a token generated for Gandalf.\n\tm.Add(\"Get\", \"\/apps\/{appname}\/available\", authorizationRequiredHandler(appIsAvailable))\n\tm.Add(\"Post\", \"\/apps\/{appname}\/repository\/clone\", authorizationRequiredHandler(deploy))\n\tm.Add(\"Post\", \"\/apps\/{appname}\/deploy\", authorizationRequiredHandler(deploy))\n\n\tm.Add(\"Get\", \"\/users\", AdminRequiredHandler(listUsers))\n\tm.Add(\"Post\", \"\/users\", Handler(createUser))\n\tm.Add(\"Get\", \"\/auth\/scheme\", Handler(authScheme))\n\tm.Add(\"Post\", \"\/auth\/login\", Handler(login))\n\tm.Add(\"Post\", \"\/users\/{email}\/password\", Handler(resetPassword))\n\tm.Add(\"Post\", \"\/users\/{email}\/tokens\", Handler(login))\n\tm.Add(\"Get\", \"\/users\/{email}\/quota\", AdminRequiredHandler(getUserQuota))\n\tm.Add(\"Post\", \"\/users\/{email}\/quota\", AdminRequiredHandler(changeUserQuota))\n\tm.Add(\"Delete\", \"\/users\/tokens\", authorizationRequiredHandler(logout))\n\tm.Add(\"Put\", \"\/users\/password\", authorizationRequiredHandler(changePassword))\n\tm.Add(\"Delete\", \"\/users\", authorizationRequiredHandler(removeUser))\n\tm.Add(\"Get\", \"\/users\/keys\", authorizationRequiredHandler(listKeys))\n\tm.Add(\"Post\", \"\/users\/keys\", authorizationRequiredHandler(addKeyToUser))\n\tm.Add(\"Delete\", \"\/users\/keys\", authorizationRequiredHandler(removeKeyFromUser))\n\tm.Add(\"Get\", \"\/users\/api-key\", authorizationRequiredHandler(showAPIToken))\n\tm.Add(\"Post\", \"\/users\/api-key\", authorizationRequiredHandler(regenerateAPIToken))\n\n\tm.Add(\"Post\", \"\/tokens\", AdminRequiredHandler(generateAppToken))\n\n\tm.Add(\"Delete\", \"\/logs\", AdminRequiredHandler(logRemove))\n\n\tm.Add(\"Get\", \"\/teams\", authorizationRequiredHandler(teamList))\n\tm.Add(\"Post\", \"\/teams\", authorizationRequiredHandler(createTeam))\n\tm.Add(\"Get\", \"\/teams\/{name}\", authorizationRequiredHandler(getTeam))\n\tm.Add(\"Delete\", \"\/teams\/{name}\", authorizationRequiredHandler(removeTeam))\n\tm.Add(\"Put\", \"\/teams\/{team}\/{user}\", authorizationRequiredHandler(addUserToTeam))\n\tm.Add(\"Delete\", \"\/teams\/{team}\/{user}\", authorizationRequiredHandler(removeUserFromTeam))\n\n\tm.Add(\"Put\", \"\/swap\", authorizationRequiredHandler(swap))\n\n\tm.Add(\"Get\", \"\/healthcheck\/\", http.HandlerFunc(healthcheck))\n\n\tm.Add(\"Get\", \"\/iaas\/machines\", AdminRequiredHandler(machinesList))\n\tm.Add(\"Delete\", \"\/iaas\/machines\/{machine_id}\", AdminRequiredHandler(machineDestroy))\n\tm.Add(\"Get\", \"\/iaas\/templates\", AdminRequiredHandler(templatesList))\n\tm.Add(\"Post\", \"\/iaas\/templates\", AdminRequiredHandler(templateCreate))\n\tm.Add(\"Delete\", \"\/iaas\/templates\/{template_name}\", AdminRequiredHandler(templateDestroy))\n\n\tm.Add(\"Get\", \"\/plans\", authorizationRequiredHandler(listPlans))\n\tm.Add(\"Post\", \"\/plans\", AdminRequiredHandler(addPlan))\n\tm.Add(\"Delete\", \"\/plans\/{planname}\", AdminRequiredHandler(removePlan))\n\n\tm.Add(\"Get\", \"\/debug\/goroutines\", AdminRequiredHandler(dumpGoroutines))\n\n\tn := negroni.New()\n\tn.Use(negroni.NewRecovery())\n\tn.Use(newLoggerMiddleware())\n\tn.UseHandler(m)\n\tn.Use(negroni.HandlerFunc(contextClearerMiddleware))\n\tn.Use(negroni.HandlerFunc(flushingWriterMiddleware))\n\tn.Use(negroni.HandlerFunc(errorHandlingMiddleware))\n\tn.Use(negroni.HandlerFunc(setVersionHeadersMiddleware))\n\tn.Use(negroni.HandlerFunc(authTokenMiddleware))\n\tn.Use(&appLockMiddleware{excludedHandlers: []http.Handler{\n\t\tlogPostHandler,\n\t\trunHandler,\n\t\tforceDeleteLockHandler,\n\t\tregisterUnitHandler,\n\t\tsaveCustomDataHandler,\n\t\tsetUnitStatusHandler,\n\t}})\n\tn.UseHandler(http.HandlerFunc(runDelayedHandler))\n\n\tif !dry {\n\t\tprovisioner, err := getProvisioner()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Warning: configuration didn't declare a provisioner, using default provisioner.\\n\")\n\t\t}\n\t\tapp.Provisioner, err = provision.Get(provisioner)\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tfmt.Printf(\"Using %q provisioner.\\n\", provisioner)\n\t\tif initializableProvisioner, ok := app.Provisioner.(provision.InitializableProvisioner); ok {\n\t\t\terr = initializableProvisioner.Initialize()\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t}\n\t\tscheme, err := getAuthScheme()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Warning: configuration didn't declare a auth:scheme, using default scheme.\\n\")\n\t\t}\n\t\tapp.AuthScheme, err = auth.GetScheme(scheme)\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tfmt.Printf(\"Using %q auth scheme.\\n\", scheme)\n\t\tlisten, err := config.GetString(\"listen\")\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tapp.StartAutoScale()\n\t\ttls, _ := config.GetBool(\"use-tls\")\n\t\tif tls {\n\t\t\tcertFile, err := config.GetString(\"tls:cert-file\")\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t\tkeyFile, err := config.GetString(\"tls:key-file\")\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t\tfmt.Printf(\"tsuru HTTP\/TLS server listening at %s...\\n\", listen)\n\t\t\tfatal(http.ListenAndServeTLS(listen, certFile, keyFile, n))\n\t\t} else {\n\t\t\tlistener, err := net.Listen(\"tcp\", listen)\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t\tfmt.Printf(\"tsuru HTTP server listening at %s...\\n\", listen)\n\t\t\thttp.Handle(\"\/\", n)\n\t\t\tfatal(http.Serve(listener, nil))\n\t\t}\n\t}\n\treturn n\n}\n<|endoftext|>"} {"text":"<commit_before>package apiv3\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n)\n\ntype XMS struct {\n\tendpoint string\n\tinsecure bool\n\tusername string\n\tpassword string\n\thttpClient *http.Client\n}\n\nfunc New(endpoint string, insecure bool, username string, password string) (*XMS, error) {\n\tif endpoint == \"\" || username == \"\" || password == \"\" {\n\t\treturn nil, errors.New(\"Missing endpoint, username, or password\")\n\t}\n\n\tvar client *http.Client\n\tif insecure {\n\t\tclient = &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\t\tInsecureSkipVerify: insecure,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t} else {\n\t\tclient = &http.Client{}\n\t}\n\n\treturn &XMS{endpoint, insecure, username, password, client}, nil\n}\n\nfunc multimap(p map[string]string) url.Values {\n\tq := make(url.Values, len(p))\n\tfor k, v := range p {\n\t\tif v != \"\" {\n\t\t\tq[k] = []string{v}\n\t\t}\n\t}\n\treturn q\n}\n\ntype Error struct {\n\tStatusCode int\n\tMessage string `json:\"message\"`\n\tCode int `json:\"error_code\"`\n}\n\nfunc (err *Error) Error() string {\n\tif err.Code == 0 {\n\t\treturn err.Message\n\t}\n\n\treturn fmt.Sprintf(\"%s\", err.Message)\n}\n\nfunc buildError(r *http.Response) error {\n\tjsonError := Error{}\n\tjson.NewDecoder(r.Body).Decode(&jsonError)\n\n\tjsonError.StatusCode = r.StatusCode\n\tif jsonError.Message == \"\" {\n\t\tjsonError.Message = r.Status\n\t}\n\n\treturn &jsonError\n}\n\nfunc (xms *XMS) query(method string, path string, id string, params map[string]string, body interface{}, resp interface{}) error {\n\n\tendpoint := fmt.Sprintf(\"%s\/%s\", xms.endpoint, path)\n\tif id != \"\" {\n\t\tendpoint = fmt.Sprintf(\"%s\/%s\", endpoint, id)\n\t}\n\n\tencodedParams := multimap(params).Encode()\n\tif encodedParams != \"\" {\n\t\tendpoint = fmt.Sprintf(\"%s?%s\", endpoint, encodedParams)\n\t}\n\n\tbodyBytes, _ := json.Marshal(body)\n\n\tif debug {\n\t\tlog.Printf(\"endpoint: %v\\n\", endpoint)\n\t}\n\n\tif debug && body != nil {\n\t\tlog.Printf(\"body:\\n\")\n\t\tlog.Printf(\"%v\\n\", string(bodyBytes))\n\t}\n\n\treq, err := http.NewRequest(method, endpoint, bytes.NewBuffer(bodyBytes))\n\treq.SetBasicAuth(xms.username, xms.password)\n\n\tr, err := xms.httpClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Body.Close()\n\n\tif debug {\n\t\tdump, _ := httputil.DumpResponse(r, true)\n\t\tlog.Printf(\"response:\\n\")\n\t\tlog.Printf(\"%v\\n}\\n\", string(dump))\n\t}\n\n\tswitch {\n\tcase r.StatusCode == 400:\n\t\treturn buildError(r)\n\tcase resp == nil:\n\t\treturn nil\n\tcase r.StatusCode == 200 || r.StatusCode == 201:\n\t\terr = json.NewDecoder(r.Body).Decode(resp)\n\t\treturn err\n\tdefault:\n\t\treturn buildError(r)\n\t}\n\n}\n<commit_msg>Logrus Support<commit_after>package apiv3\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\ntype XMS struct {\n\tendpoint string\n\tinsecure bool\n\tusername string\n\tpassword string\n\thttpClient *http.Client\n}\n\nfunc New(endpoint string, insecure bool, username string, password string) (*XMS, error) {\n\tif endpoint == \"\" || username == \"\" || password == \"\" {\n\t\treturn nil, errors.New(\"Missing endpoint, username, or password\")\n\t}\n\n\tvar client *http.Client\n\tif insecure {\n\t\tclient = &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\t\tInsecureSkipVerify: insecure,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t} else {\n\t\tclient = &http.Client{}\n\t}\n\n\treturn &XMS{endpoint, insecure, username, password, client}, nil\n}\n\nfunc multimap(p map[string]string) url.Values {\n\tq := make(url.Values, len(p))\n\tfor k, v := range p {\n\t\tif v != \"\" {\n\t\t\tq[k] = []string{v}\n\t\t}\n\t}\n\treturn q\n}\n\ntype Error struct {\n\tStatusCode int\n\tMessage string `json:\"message\"`\n\tCode int `json:\"error_code\"`\n}\n\nfunc (err *Error) Error() string {\n\tif err.Code == 0 {\n\t\treturn err.Message\n\t}\n\n\treturn fmt.Sprintf(\"%s\", err.Message)\n}\n\nfunc buildError(r *http.Response) error {\n\tjsonError := Error{}\n\tjson.NewDecoder(r.Body).Decode(&jsonError)\n\n\tjsonError.StatusCode = r.StatusCode\n\tif jsonError.Message == \"\" {\n\t\tjsonError.Message = r.Status\n\t}\n\n\treturn &jsonError\n}\n\nfunc (xms *XMS) query(method string, path string, id string, params map[string]string, body interface{}, resp interface{}) error {\n\n\tendpoint := fmt.Sprintf(\"%s\/%s\", xms.endpoint, path)\n\tif id != \"\" {\n\t\tendpoint = fmt.Sprintf(\"%s\/%s\", endpoint, id)\n\t}\n\n\tencodedParams := multimap(params).Encode()\n\tif encodedParams != \"\" {\n\t\tendpoint = fmt.Sprintf(\"%s?%s\", endpoint, encodedParams)\n\t}\n\n\tbodyBytes, _ := json.Marshal(body)\n\n\tif debug {\n\t\tlog.Printf(\"endpoint: %v\\n\", endpoint)\n\t}\n\n\tif debug && body != nil {\n\t\tlog.Printf(\"body:\\n\")\n\t\tlog.Printf(\"%v\\n\", string(bodyBytes))\n\t}\n\n\treq, err := http.NewRequest(method, endpoint, bytes.NewBuffer(bodyBytes))\n\treq.SetBasicAuth(xms.username, xms.password)\n\n\tr, err := xms.httpClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Body.Close()\n\n\tif debug {\n\t\tdump, _ := httputil.DumpResponse(r, true)\n\t\tlog.Printf(\"response:\\n\")\n\t\tlog.Printf(\"%v\\n}\\n\", string(dump))\n\t}\n\n\tswitch {\n\tcase r.StatusCode == 400:\n\t\treturn buildError(r)\n\tcase resp == nil:\n\t\treturn nil\n\tcase r.StatusCode == 200 || r.StatusCode == 201:\n\t\terr = json.NewDecoder(r.Body).Decode(resp)\n\t\treturn err\n\tdefault:\n\t\treturn buildError(r)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>atg. doc++ on GetHalter() vs Done()<commit_after><|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\".\/db\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\t_FOLLOW = iota\n\t_UNFOLLOW = iota\n\t_FAVORITE = iota\n\t_TWEET = iota\n\t_REPLY = iota\n)\n\ntype Action struct {\n\tname int\n\tweight int\n}\n\nfunc performAction() {\n\tactions := make([]Action, 0, 5)\n\n\tactions = append(actions, Action{name: _FOLLOW, weight: ACTION_FOLLOW_WEIGHT * rand.Intn(100)})\n\tactions = append(actions, Action{name: _UNFOLLOW, weight: ACTION_UNFOLLOW_WEIGHT * rand.Intn(100)})\n\tactions = append(actions, Action{name: _FAVORITE, weight: ACTION_FAVORITE_WEIGHT * rand.Intn(100)})\n\tactions = append(actions, Action{name: _TWEET, weight: ACTION_TWEET_WEIGHT * rand.Intn(100)})\n\tactions = append(actions, Action{name: _REPLY, weight: ACTION_REPLY_WEIGHT * rand.Intn(100)})\n\n\tselectedAction := Action{name: -1, weight: -1}\n\n\tfor _, action := range actions {\n\t\tif action.weight > selectedAction.weight {\n\t\t\tselectedAction = action\n\t\t}\n\t}\n\n\tswitch selectedAction.name {\n\tcase _FOLLOW:\n\t\tactionFollow()\n\t\tbreak\n\tcase _UNFOLLOW:\n\t\tactionUnfollow()\n\t\tbreak\n\tcase _FAVORITE:\n\t\tactionFavorite()\n\t\tbreak\n\tcase _TWEET:\n\t\tactionTweet()\n\t\tbreak\n\tcase _REPLY:\n\t\tactionReply()\n\t\tbreak\n\t}\n}\n\nfunc actionFollow() {\n\tfmt.Println(\"Action follow\")\n\n\tsearch, v := generateAPISearchValues(KEYWORDS[rand.Intn(len(KEYWORDS))])\n\n\tsearchResult, err := api.GetSearch(search, v)\n\tif err != nil {\n\t\tfmt.Println(\"Error while querying twitter API\", err)\n\t\treturn\n\t}\n\n\tfor _, tweet := range searchResult.Statuses {\n\n\t\tif !isUserAcceptable(tweet) {\n\t\t\tfmt.Println(\"Ignoring user for follow : @\" + tweet.User.ScreenName)\n\t\t\tcontinue\n\t\t}\n\n\t\tfollow, err := db.AlreadyFollow(tweet.User.Id)\n\t\tif err == nil && !follow {\n\n\t\t\terr := db.Follow{UserId: tweet.User.Id, UserName: tweet.User.ScreenName, Status: tweet.Text, FollowDate: time.Now()}.Persist()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error while persisting follow\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t_, err = api.FollowUser(tweet.User.ScreenName)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error while following user \"+tweet.User.ScreenName+\" : \", err)\n\t\t\t}\n\n\t\t\tfmt.Println(\"Now follow \", tweet.User.ScreenName)\n\t\t\treturn\n\t\t}\n\n\t}\n}\n\nfunc actionUnfollow() {\n\tfmt.Println(\"Action unfollow\")\n\n\tdate := time.Now()\n\tduration, err := time.ParseDuration(\"-72\") \/\/ -3 days\n\tdate = date.Add(duration)\n\n\tfollows, err := db.GetNotUnfollowed(date, UNFOLLOW_LIMIT_IN_A_ROW)\n\tif err != nil {\n\t\tfmt.Println(\"Error while querying db to find people to unfollow\", err)\n\t\treturn\n\t}\n\n\tfor _, follow := range follows {\n\t\tfollow.LastAction = time.Now()\n\n\t\tisFollowing, err := isUserFollowing(follow.UserName)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error while querying API for friendships\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif isFollowing {\n\t\t\terr = follow.Persist()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error while persisting follow\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tfollow.UnfollowDate = time.Now()\n\n\t\terr = follow.Persist()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error while persisting follow\", err)\n\t\t\treturn\n\t\t}\n\n\t\t_, err = api.UnfollowUser(follow.UserName)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error while querying API to unfollow @\"+follow.UserName, err)\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Println(\"Unfollowed @\" + follow.UserName)\n\t}\n}\n\nfunc actionFavorite() {\n\tfmt.Println(\"Action fav\")\n\n\tsearch, v := generateAPISearchValues(KEYWORDS[rand.Intn(len(KEYWORDS))])\n\n\tsearchResult, err := api.GetSearch(search, v)\n\tif err != nil {\n\t\tfmt.Println(\"Error while querying twitter API\", err)\n\t\treturn\n\t}\n\n\ti := 0\n\tfor _, tweet := range searchResult.Statuses {\n\t\tif i >= FAV_LIMIT_IN_A_ROW {\n\t\t\treturn\n\t\t}\n\n\t\tif !isUserAcceptable(tweet) {\n\t\t\tfmt.Println(\"Ignoring user for favorite : @\" + tweet.User.ScreenName)\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err = api.Favorite(tweet.Id)\n\t\tif err != nil {\n\t\t\tif strings.Contains(err.Error(), \"139\") { \/\/ Case of an already favorited tweet\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfmt.Println(\"Error while favoriting tweet\", err)\n\t\t} else {\n\t\t\tfmt.Println(\"Just favorited tweet : \", tweet.Text)\n\t\t}\n\n\t\ti++\n\t}\n}\n\nfunc actionTweet() {\n\tfmt.Println(\"Action tweet\")\n\n\tcontent, err := generateTweetContent()\n\tif err != nil {\n\t\tfmt.Println(\"Error while getting tweet content : \", err)\n\t\treturn\n\t}\n\n\ttweetText := content.text + \" \" + content.url + content.hashtags\n\n\terr = db.Tweet{Content: tweetText, Date: time.Now()}.Persist()\n\tif err != nil {\n\t\tfmt.Println(\"Error while persisting tweet\", err)\n\t\treturn\n\t}\n\n\ttweet, err := api.PostTweet(tweetText, nil)\n\tif err != nil {\n\t\tfmt.Println(\"Error while posting tweet\", err)\n\t\treturn\n\t}\n\n\tfmt.Println(\"Tweet posted : \", tweet.Text)\n}\n\nfunc actionReply() {\n\tfmt.Println(\"Action reply\")\n\n\ttweets, err := api.GetMentionsTimeline(nil)\n\tif err != nil {\n\t\tfmt.Println(\"Error while querying twitter mention API\", err)\n\t\treturn\n\t}\n\n\tfor _, tweet := range tweets {\n\n\t\treplied, err := db.HasAlreadyReplied(tweet.Id)\n\t\tif err == nil && !replied {\n\t\t\tfmt.Println(\"Building reply for tweet : \" + tweet.Text)\n\n\t\t\tresponse, err := buildReply(tweet)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error while building reply\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr = db.Reply{UserId: tweet.User.Id, UserName: tweet.User.ScreenName, TweetId: tweet.Id, Status: tweet.Text, Answer: response, ReplyDate: time.Now()}.Persist()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error while persisting reply\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif response != \"\" {\n\t\t\t\trespTweet, err := api.PostTweet(response, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Error while posting reply\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tfmt.Println(\"Reply posted : \", respTweet.Text)\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"No response found for tweet : \" + tweet.Text)\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\t}\n\n\tfmt.Println(\"Nothing to reply found :(\")\n}\n<commit_msg>Add in reply for status API call for replies<commit_after>package main\n\nimport (\n\t\".\/db\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\t_FOLLOW = iota\n\t_UNFOLLOW = iota\n\t_FAVORITE = iota\n\t_TWEET = iota\n\t_REPLY = iota\n)\n\ntype Action struct {\n\tname int\n\tweight int\n}\n\nfunc performAction() {\n\tactions := make([]Action, 0, 5)\n\n\tactions = append(actions, Action{name: _FOLLOW, weight: ACTION_FOLLOW_WEIGHT * rand.Intn(100)})\n\tactions = append(actions, Action{name: _UNFOLLOW, weight: ACTION_UNFOLLOW_WEIGHT * rand.Intn(100)})\n\tactions = append(actions, Action{name: _FAVORITE, weight: ACTION_FAVORITE_WEIGHT * rand.Intn(100)})\n\tactions = append(actions, Action{name: _TWEET, weight: ACTION_TWEET_WEIGHT * rand.Intn(100)})\n\tactions = append(actions, Action{name: _REPLY, weight: ACTION_REPLY_WEIGHT * rand.Intn(100)})\n\n\tselectedAction := Action{name: -1, weight: -1}\n\n\tfor _, action := range actions {\n\t\tif action.weight > selectedAction.weight {\n\t\t\tselectedAction = action\n\t\t}\n\t}\n\n\tswitch selectedAction.name {\n\tcase _FOLLOW:\n\t\tactionFollow()\n\t\tbreak\n\tcase _UNFOLLOW:\n\t\tactionUnfollow()\n\t\tbreak\n\tcase _FAVORITE:\n\t\tactionFavorite()\n\t\tbreak\n\tcase _TWEET:\n\t\tactionTweet()\n\t\tbreak\n\tcase _REPLY:\n\t\tactionReply()\n\t\tbreak\n\t}\n}\n\nfunc actionFollow() {\n\tfmt.Println(\"Action follow\")\n\n\tsearch, v := generateAPISearchValues(KEYWORDS[rand.Intn(len(KEYWORDS))])\n\n\tsearchResult, err := api.GetSearch(search, v)\n\tif err != nil {\n\t\tfmt.Println(\"Error while querying twitter API\", err)\n\t\treturn\n\t}\n\n\tfor _, tweet := range searchResult.Statuses {\n\n\t\tif !isUserAcceptable(tweet) {\n\t\t\tfmt.Println(\"Ignoring user for follow : @\" + tweet.User.ScreenName)\n\t\t\tcontinue\n\t\t}\n\n\t\tfollow, err := db.AlreadyFollow(tweet.User.Id)\n\t\tif err == nil && !follow {\n\n\t\t\terr := db.Follow{UserId: tweet.User.Id, UserName: tweet.User.ScreenName, Status: tweet.Text, FollowDate: time.Now()}.Persist()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error while persisting follow\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t_, err = api.FollowUser(tweet.User.ScreenName)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error while following user \"+tweet.User.ScreenName+\" : \", err)\n\t\t\t}\n\n\t\t\tfmt.Println(\"Now follow \", tweet.User.ScreenName)\n\t\t\treturn\n\t\t}\n\n\t}\n}\n\nfunc actionUnfollow() {\n\tfmt.Println(\"Action unfollow\")\n\n\tdate := time.Now()\n\tduration, err := time.ParseDuration(\"-72\") \/\/ -3 days\n\tdate = date.Add(duration)\n\n\tfollows, err := db.GetNotUnfollowed(date, UNFOLLOW_LIMIT_IN_A_ROW)\n\tif err != nil {\n\t\tfmt.Println(\"Error while querying db to find people to unfollow\", err)\n\t\treturn\n\t}\n\n\tfor _, follow := range follows {\n\t\tfollow.LastAction = time.Now()\n\n\t\tisFollowing, err := isUserFollowing(follow.UserName)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error while querying API for friendships\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif isFollowing {\n\t\t\terr = follow.Persist()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error while persisting follow\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tfollow.UnfollowDate = time.Now()\n\n\t\terr = follow.Persist()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error while persisting follow\", err)\n\t\t\treturn\n\t\t}\n\n\t\t_, err = api.UnfollowUser(follow.UserName)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error while querying API to unfollow @\"+follow.UserName, err)\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Println(\"Unfollowed @\" + follow.UserName)\n\t}\n}\n\nfunc actionFavorite() {\n\tfmt.Println(\"Action fav\")\n\n\tsearch, v := generateAPISearchValues(KEYWORDS[rand.Intn(len(KEYWORDS))])\n\n\tsearchResult, err := api.GetSearch(search, v)\n\tif err != nil {\n\t\tfmt.Println(\"Error while querying twitter API\", err)\n\t\treturn\n\t}\n\n\ti := 0\n\tfor _, tweet := range searchResult.Statuses {\n\t\tif i >= FAV_LIMIT_IN_A_ROW {\n\t\t\treturn\n\t\t}\n\n\t\tif !isUserAcceptable(tweet) {\n\t\t\tfmt.Println(\"Ignoring user for favorite : @\" + tweet.User.ScreenName)\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err = api.Favorite(tweet.Id)\n\t\tif err != nil {\n\t\t\tif strings.Contains(err.Error(), \"139\") { \/\/ Case of an already favorited tweet\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfmt.Println(\"Error while favoriting tweet\", err)\n\t\t} else {\n\t\t\tfmt.Println(\"Just favorited tweet : \", tweet.Text)\n\t\t}\n\n\t\ti++\n\t}\n}\n\nfunc actionTweet() {\n\tfmt.Println(\"Action tweet\")\n\n\tcontent, err := generateTweetContent()\n\tif err != nil {\n\t\tfmt.Println(\"Error while getting tweet content : \", err)\n\t\treturn\n\t}\n\n\ttweetText := content.text + \" \" + content.url + content.hashtags\n\n\terr = db.Tweet{Content: tweetText, Date: time.Now()}.Persist()\n\tif err != nil {\n\t\tfmt.Println(\"Error while persisting tweet\", err)\n\t\treturn\n\t}\n\n\ttweet, err := api.PostTweet(tweetText, nil)\n\tif err != nil {\n\t\tfmt.Println(\"Error while posting tweet\", err)\n\t\treturn\n\t}\n\n\tfmt.Println(\"Tweet posted : \", tweet.Text)\n}\n\nfunc actionReply() {\n\tfmt.Println(\"Action reply\")\n\n\ttweets, err := api.GetMentionsTimeline(nil)\n\tif err != nil {\n\t\tfmt.Println(\"Error while querying twitter mention API\", err)\n\t\treturn\n\t}\n\n\tfor _, tweet := range tweets {\n\n\t\treplied, err := db.HasAlreadyReplied(tweet.Id)\n\t\tif err == nil && !replied {\n\t\t\tfmt.Println(\"Building reply for tweet : \" + tweet.Text)\n\n\t\t\tresponse, err := buildReply(tweet)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error while building reply\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr = db.Reply{UserId: tweet.User.Id, UserName: tweet.User.ScreenName, TweetId: tweet.Id, Status: tweet.Text, Answer: response, ReplyDate: time.Now()}.Persist()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error while persisting reply\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif response != \"\" {\n\t\t\t\tv := url.Values{}\n\t\t\t\tv.Add(\"in_reply_to_status_id\", tweet.Id)\n\n\t\t\t\trespTweet, err := api.PostTweet(response, v)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Error while posting reply\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tfmt.Println(\"Reply posted : \", respTweet.Text)\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"No response found for tweet : \" + tweet.Text)\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\t}\n\n\tfmt.Println(\"Nothing to reply found :(\")\n}\n<|endoftext|>"} {"text":"<commit_before>package importer\n\nimport (\n\t\"github.com\/jbowtie\/gokogiri\/xml\"\n\t\"github.com\/jbowtie\/gokogiri\/xpath\"\n\t\"github.com\/pebbe\/util\"\n\t\"github.com\/projectcypress\/cdatools\/models\"\n)\n\nfunc EncounterOrderExtractor(entry *models.Entry, entryElement xml.Node) interface{} {\n\tencounterOrder := models.Encounter{}\n\tencounterOrder.Entry = *entry\n\n\t\/\/extract codes\n\tvar codePath = xpath.Compile(\"cda:code\")\n\tExtractCodes(&encounterOrder.Entry.Coded, entryElement, codePath)\n\n\t\/\/extract order specific dates\n\tvar orderTimeXPath = xpath.Compile(\"cda:author\/cda:time\/@value\")\n\tencounterOrder.StartTime = GetTimestamp(orderTimeXPath, entryElement)\n\tencounterOrder.EndTime = GetTimestamp(orderTimeXPath, entryElement)\n\n\treturn encounterOrder\n}\n\nfunc EncounterPerformedExtractor(entry *models.Entry, entryElement xml.Node) interface{} {\n\tencounter := models.Encounter{}\n\tencounter.Entry = *entry\n\n\t\/\/extract codes\n\tvar codePath = xpath.Compile(\"cda:code\")\n\tExtractCodes(&encounter.Entry.Coded, entryElement, codePath)\n\n\t\/\/set admit\/discharge times\n\tencounter.AdmitTime = encounter.Entry.StartTime\n\tencounter.DischargeTime = encounter.Entry.EndTime\n\n\t\/\/extract reason\n\textractReason(&encounter, entryElement)\n\n\t\/\/extract diagnoses\n\tvar pdXPath = xpath.Compile(\"cda:entryRelationship\/cda:observation[cda:code\/@code='8319008']\/cda:value\")\n\tvar diagXPath = xpath.Compile(\"cda:entryRelationship\/cda:act\/cda:entryRelationship\/cda:observation[cda:templateId\/@root='2.16.840.1.113883.10.20.22.4.4']\/cda:value\")\n\tencounter.PrincipalDiagnosis.Coded.Codes = map[string][]string{}\n\tencounter.Diagnosis.Coded.Codes = map[string][]string{}\n\tExtractCodes(&encounter.PrincipalDiagnosis.Coded, entryElement, pdXPath)\n\tExtractCodes(&encounter.Diagnosis.Coded, entryElement, diagXPath)\n\n\t\/\/encounter.PrincipalDiagnosis.Code = &models.CodedConcept{}\n\tExtractCodedConcept(&encounter.PrincipalDiagnosis.CodedConcept, entryElement, pdXPath)\n\t\/\/encounter.Diagnosis.Code = &models.CodedConcept{}\n\tExtractCodedConcept(&encounter.Diagnosis.CodedConcept, entryElement, diagXPath)\n\n\t\/\/extract facility\n\textractFacility(&encounter, entryElement)\n\n\t\/\/extract discharge disposition\n\tvar dischargeDispositionCodeXPath = xpath.Compile(\"sdtc:dischargeDispositionCode\/@code\")\n\tvar dischargeDispositionCodeSystemXPath = xpath.Compile(\"sdtc:dischargeDispositionCode\/@codeSystem\")\n\tdischargeDispositionCode := FirstElementContent(dischargeDispositionCodeXPath, entryElement)\n\tdischargeDispositionCodeSystemOid := FirstElementContent(dischargeDispositionCodeSystemXPath, entryElement)\n\tdischargeDispositionCodeSystem := models.CodeSystemFor(dischargeDispositionCodeSystemOid)\n\tif dischargeDispositionCode != \"\" {\n\t\tencounter.DischargeDisposition = map[string]string{\n\t\t\t\"code\": dischargeDispositionCode,\n\t\t\t\"codeSystem\": dischargeDispositionCodeSystem,\n\t\t\t\"codeSystemOid\": dischargeDispositionCodeSystemOid,\n\t\t}\n\t}\n\n\treturn encounter\n}\n\nfunc extractFacility(encounter *models.Encounter, entryElement xml.Node) {\n\tvar participantXPath = xpath.Compile(\"cda:participant[@typeCode='LOC']\/cda:participantRole[@classCode='SDLOC']\")\n\tparticipantElement := FirstElement(participantXPath, entryElement)\n\n\tif participantElement != nil {\n\t\tvar facility = models.Facility{}\n\n\t\tvar nameXPath = xpath.Compile(\"cda:playingEntity\/cda:name\")\n\t\tfacility.Name = FirstElementContent(nameXPath, participantElement)\n\n\t\taddressXPath := xpath.Compile(\"cda:addr\")\n\t\taddressElements, err := participantElement.Search(addressXPath)\n\t\tutil.CheckErr(err)\n\t\tfacility.Addresses = make([]models.Address, len(addressElements))\n\t\tfor i, addressElement := range addressElements {\n\t\t\tfacility.Addresses[i] = ImportAddress(addressElement)\n\t\t}\n\n\t\ttelecomXPath := xpath.Compile(\"cda:telecom\")\n\t\ttelecomElements, err := participantElement.Search(telecomXPath)\n\t\tutil.CheckErr(err)\n\t\tfacility.Telecoms = make([]models.Telecom, len(telecomElements))\n\t\tfor i, telecomElement := range telecomElements {\n\t\t\tfacility.Telecoms[i] = ImportTelecom(telecomElement)\n\t\t}\n\n\t\tfacility.Code = &models.CodedConcept{}\n\t\tExtractCodedConcept(facility.Code, participantElement, xpath.Compile(\"cda:code\"))\n\n\t\tvar timeLowXPath = xpath.Compile(\"cda:effectiveTime\/cda:low\/@value\")\n\t\tvar timeHighXPath = xpath.Compile(\"cda:effectiveTime\/cda:high\/@value\")\n\t\tfacility.StartTime = GetTimestamp(timeLowXPath, entryElement)\n\t\tfacility.EndTime = GetTimestamp(timeHighXPath, entryElement)\n\n\t\tencounter.Facility = facility\n\t}\n}\n\nfunc extractReason(encounter *models.Encounter, entryElement xml.Node) {\n\tvar reasonXPath = xpath.Compile(\"cda:entryRelationship[@typeCode='RSON']\/cda:observation\")\n\treasonElement := FirstElement(reasonXPath, entryElement)\n\n\tif reasonElement != nil {\n\t\t\/\/extract reason value code\n\t\tvar valueCodeXPath = xpath.Compile(\"cda:value\/@code\")\n\t\tvar valueCodeSystemXPath = xpath.Compile(\"cda:value\/@codeSystem\")\n\t\tvalueCode := FirstElementContent(valueCodeXPath, reasonElement)\n\t\tvalueCodeSystem := models.CodeSystemFor(FirstElementContent(valueCodeSystemXPath, reasonElement))\n\t\tencounter.Reason.Code = valueCode\n\t\tencounter.Reason.CodeSystem = valueCodeSystem\n\t\tencounter.Reason.CodeSystemName = valueCodeSystem\n\t}\n}\n<commit_msg>remove unnecessary commented out lines<commit_after>package importer\n\nimport (\n\t\"github.com\/jbowtie\/gokogiri\/xml\"\n\t\"github.com\/jbowtie\/gokogiri\/xpath\"\n\t\"github.com\/pebbe\/util\"\n\t\"github.com\/projectcypress\/cdatools\/models\"\n)\n\nfunc EncounterOrderExtractor(entry *models.Entry, entryElement xml.Node) interface{} {\n\tencounterOrder := models.Encounter{}\n\tencounterOrder.Entry = *entry\n\n\t\/\/extract codes\n\tvar codePath = xpath.Compile(\"cda:code\")\n\tExtractCodes(&encounterOrder.Entry.Coded, entryElement, codePath)\n\n\t\/\/extract order specific dates\n\tvar orderTimeXPath = xpath.Compile(\"cda:author\/cda:time\/@value\")\n\tencounterOrder.StartTime = GetTimestamp(orderTimeXPath, entryElement)\n\tencounterOrder.EndTime = GetTimestamp(orderTimeXPath, entryElement)\n\n\treturn encounterOrder\n}\n\nfunc EncounterPerformedExtractor(entry *models.Entry, entryElement xml.Node) interface{} {\n\tencounter := models.Encounter{}\n\tencounter.Entry = *entry\n\n\t\/\/extract codes\n\tvar codePath = xpath.Compile(\"cda:code\")\n\tExtractCodes(&encounter.Entry.Coded, entryElement, codePath)\n\n\t\/\/set admit\/discharge times\n\tencounter.AdmitTime = encounter.Entry.StartTime\n\tencounter.DischargeTime = encounter.Entry.EndTime\n\n\t\/\/extract reason\n\textractReason(&encounter, entryElement)\n\n\t\/\/extract diagnoses\n\tvar pdXPath = xpath.Compile(\"cda:entryRelationship\/cda:observation[cda:code\/@code='8319008']\/cda:value\")\n\tvar diagXPath = xpath.Compile(\"cda:entryRelationship\/cda:act\/cda:entryRelationship\/cda:observation[cda:templateId\/@root='2.16.840.1.113883.10.20.22.4.4']\/cda:value\")\n\tencounter.PrincipalDiagnosis.Coded.Codes = map[string][]string{}\n\tencounter.Diagnosis.Coded.Codes = map[string][]string{}\n\tExtractCodes(&encounter.PrincipalDiagnosis.Coded, entryElement, pdXPath)\n\tExtractCodes(&encounter.Diagnosis.Coded, entryElement, diagXPath)\n\tExtractCodedConcept(&encounter.PrincipalDiagnosis.CodedConcept, entryElement, pdXPath)\n\tExtractCodedConcept(&encounter.Diagnosis.CodedConcept, entryElement, diagXPath)\n\n\t\/\/extract facility\n\textractFacility(&encounter, entryElement)\n\n\t\/\/extract discharge disposition\n\tvar dischargeDispositionCodeXPath = xpath.Compile(\"sdtc:dischargeDispositionCode\/@code\")\n\tvar dischargeDispositionCodeSystemXPath = xpath.Compile(\"sdtc:dischargeDispositionCode\/@codeSystem\")\n\tdischargeDispositionCode := FirstElementContent(dischargeDispositionCodeXPath, entryElement)\n\tdischargeDispositionCodeSystemOid := FirstElementContent(dischargeDispositionCodeSystemXPath, entryElement)\n\tdischargeDispositionCodeSystem := models.CodeSystemFor(dischargeDispositionCodeSystemOid)\n\tif dischargeDispositionCode != \"\" {\n\t\tencounter.DischargeDisposition = map[string]string{\n\t\t\t\"code\": dischargeDispositionCode,\n\t\t\t\"codeSystem\": dischargeDispositionCodeSystem,\n\t\t\t\"codeSystemOid\": dischargeDispositionCodeSystemOid,\n\t\t}\n\t}\n\n\treturn encounter\n}\n\nfunc extractFacility(encounter *models.Encounter, entryElement xml.Node) {\n\tvar participantXPath = xpath.Compile(\"cda:participant[@typeCode='LOC']\/cda:participantRole[@classCode='SDLOC']\")\n\tparticipantElement := FirstElement(participantXPath, entryElement)\n\n\tif participantElement != nil {\n\t\tvar facility = models.Facility{}\n\n\t\tvar nameXPath = xpath.Compile(\"cda:playingEntity\/cda:name\")\n\t\tfacility.Name = FirstElementContent(nameXPath, participantElement)\n\n\t\taddressXPath := xpath.Compile(\"cda:addr\")\n\t\taddressElements, err := participantElement.Search(addressXPath)\n\t\tutil.CheckErr(err)\n\t\tfacility.Addresses = make([]models.Address, len(addressElements))\n\t\tfor i, addressElement := range addressElements {\n\t\t\tfacility.Addresses[i] = ImportAddress(addressElement)\n\t\t}\n\n\t\ttelecomXPath := xpath.Compile(\"cda:telecom\")\n\t\ttelecomElements, err := participantElement.Search(telecomXPath)\n\t\tutil.CheckErr(err)\n\t\tfacility.Telecoms = make([]models.Telecom, len(telecomElements))\n\t\tfor i, telecomElement := range telecomElements {\n\t\t\tfacility.Telecoms[i] = ImportTelecom(telecomElement)\n\t\t}\n\n\t\tfacility.Code = &models.CodedConcept{}\n\t\tExtractCodedConcept(facility.Code, participantElement, xpath.Compile(\"cda:code\"))\n\n\t\tvar timeLowXPath = xpath.Compile(\"cda:effectiveTime\/cda:low\/@value\")\n\t\tvar timeHighXPath = xpath.Compile(\"cda:effectiveTime\/cda:high\/@value\")\n\t\tfacility.StartTime = GetTimestamp(timeLowXPath, entryElement)\n\t\tfacility.EndTime = GetTimestamp(timeHighXPath, entryElement)\n\n\t\tencounter.Facility = facility\n\t}\n}\n\nfunc extractReason(encounter *models.Encounter, entryElement xml.Node) {\n\tvar reasonXPath = xpath.Compile(\"cda:entryRelationship[@typeCode='RSON']\/cda:observation\")\n\treasonElement := FirstElement(reasonXPath, entryElement)\n\n\tif reasonElement != nil {\n\t\t\/\/extract reason value code\n\t\tvar valueCodeXPath = xpath.Compile(\"cda:value\/@code\")\n\t\tvar valueCodeSystemXPath = xpath.Compile(\"cda:value\/@codeSystem\")\n\t\tvalueCode := FirstElementContent(valueCodeXPath, reasonElement)\n\t\tvalueCodeSystem := models.CodeSystemFor(FirstElementContent(valueCodeSystemXPath, reasonElement))\n\t\tencounter.Reason.Code = valueCode\n\t\tencounter.Reason.CodeSystem = valueCodeSystem\n\t\tencounter.Reason.CodeSystemName = valueCodeSystem\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package charts\n\nimport (\n\t\"github.com\/go-echarts\/go-echarts\/v2\/opts\"\n)\n\ntype Overlaper interface {\n\toverlap() MultiSeries\n}\n\n\/\/ XYAxis represent the X and Y axis in the rectangular coordinate.\ntype XYAxis struct {\n\tXAxisList []opts.XAxis `json:\"xaxis\"`\n\tYAxisList []opts.YAxis `json:\"yaxis\"`\n}\n\nfunc (xy *XYAxis) initXYAxis() {\n\txy.XAxisList = append(xy.XAxisList, opts.XAxis{})\n\txy.YAxisList = append(xy.YAxisList, opts.YAxis{})\n}\n\n\/\/ ExtendXAxis adds new X axes.\nfunc (xy *XYAxis) ExtendXAxis(xAxis ...opts.XAxis) {\n\txy.XAxisList = append(xy.XAxisList, xAxis...)\n}\n\n\/\/ ExtendYAxis adds new Y axes.\nfunc (xy *XYAxis) ExtendYAxis(yAxis ...opts.YAxis) {\n\txy.YAxisList = append(xy.YAxisList, yAxis...)\n}\n\n\/\/ WithXAxisOpts\nfunc WithXAxisOpts(opt opts.XAxis, index ...int) GlobalOpts {\n\treturn func(bc *BaseConfiguration) {\n\t\tif len(index) == 0 {\n\t\t\tindex = []int{0}\n\t\t}\n\t\tfor i := 0; i < len(index); i++ {\n\t\t\tbc.XYAxis.XAxisList[index[i]] = opt\n\t\t}\n\t}\n}\n\n\/\/ WithYAxisOpts\nfunc WithYAxisOpts(opt opts.YAxis, index ...int) GlobalOpts {\n\treturn func(bc *BaseConfiguration) {\n\t\tif len(index) == 0 {\n\t\t\tindex = []int{0}\n\t\t}\n\t\tfor i := 0; i < len(index); i++ {\n\t\t\tbc.XYAxis.YAxisList[index[i]] = opt\n\t\t}\n\t}\n}\n\n\/\/ RectConfiguration contains options for the rectangular coordinate.\ntype RectConfiguration struct {\n\tBaseConfiguration\n}\n\nfunc (rect *RectConfiguration) setRectGlobalOptions(options ...GlobalOpts) {\n\trect.BaseConfiguration.setBaseGlobalOptions(options...)\n}\n\n\/\/ RectChart is a chart in RectChart coordinate.\ntype RectChart struct {\n\tRectConfiguration\n\tMultiSeries\n\n\txAxisData interface{}\n}\n\nfunc (rc *RectChart) overlap() MultiSeries {\n\treturn rc.MultiSeries\n}\n\n\/\/ SetGlobalOptions sets options for the RectChart instance.\nfunc (rc *RectChart) SetGlobalOptions(options ...GlobalOpts) *RectChart {\n\trc.RectConfiguration.setRectGlobalOptions(options...)\n\treturn rc\n}\n\n\/\/ Overlap composites multiple charts into one single canvas.\n\/\/ 结合不同类型图表叠加画在同张图上\n\/\/ 只适用于 RectChart 图表,其实现了 rectCharter 接口\n\/\/ RectChart 图表包括 Bar\/BoxPlot\/Line\/Scatter\/EffectScatter\/Kline\/HeatMap\n\/\/ 将 RectChart 图表的 Series 追加到调用者的 Series 里面,Series 是完全独立的\n\/\/ 而全局配置使用的是调用者的配置项\nfunc (rc *RectChart) Overlap(a ...Overlaper) {\n\tfor i := 0; i < len(a); i++ {\n\t\trc.MultiSeries = append(rc.MultiSeries, a[i].overlap()...)\n\t}\n}\n\n\/\/ Validate\nfunc (rc *RectChart) Validate() {\n\t\/\/ 确保 X 轴数据不会因为设置了 XAxisOpts 而被抹除\n\trc.XAxisList[0].Data = rc.xAxisData\n\t\/\/ 确保 Y 轴数标签正确显示\n\tfor i := 0; i < len(rc.YAxisList); i++ {\n\t\trc.YAxisList[i].AxisLabel.Show = true\n\t}\n\trc.Assets.Validate(rc.AssetsHost)\n}\n<commit_msg>[update] update comments. (#116)<commit_after>package charts\n\nimport (\n\t\"github.com\/go-echarts\/go-echarts\/v2\/opts\"\n)\n\ntype Overlaper interface {\n\toverlap() MultiSeries\n}\n\n\/\/ XYAxis represent the X and Y axis in the rectangular coordinate.\ntype XYAxis struct {\n\tXAxisList []opts.XAxis `json:\"xaxis\"`\n\tYAxisList []opts.YAxis `json:\"yaxis\"`\n}\n\nfunc (xy *XYAxis) initXYAxis() {\n\txy.XAxisList = append(xy.XAxisList, opts.XAxis{})\n\txy.YAxisList = append(xy.YAxisList, opts.YAxis{})\n}\n\n\/\/ ExtendXAxis adds new X axes.\nfunc (xy *XYAxis) ExtendXAxis(xAxis ...opts.XAxis) {\n\txy.XAxisList = append(xy.XAxisList, xAxis...)\n}\n\n\/\/ ExtendYAxis adds new Y axes.\nfunc (xy *XYAxis) ExtendYAxis(yAxis ...opts.YAxis) {\n\txy.YAxisList = append(xy.YAxisList, yAxis...)\n}\n\n\/\/ WithXAxisOpts\nfunc WithXAxisOpts(opt opts.XAxis, index ...int) GlobalOpts {\n\treturn func(bc *BaseConfiguration) {\n\t\tif len(index) == 0 {\n\t\t\tindex = []int{0}\n\t\t}\n\t\tfor i := 0; i < len(index); i++ {\n\t\t\tbc.XYAxis.XAxisList[index[i]] = opt\n\t\t}\n\t}\n}\n\n\/\/ WithYAxisOpts\nfunc WithYAxisOpts(opt opts.YAxis, index ...int) GlobalOpts {\n\treturn func(bc *BaseConfiguration) {\n\t\tif len(index) == 0 {\n\t\t\tindex = []int{0}\n\t\t}\n\t\tfor i := 0; i < len(index); i++ {\n\t\t\tbc.XYAxis.YAxisList[index[i]] = opt\n\t\t}\n\t}\n}\n\n\/\/ RectConfiguration contains options for the rectangular coordinate.\ntype RectConfiguration struct {\n\tBaseConfiguration\n}\n\nfunc (rect *RectConfiguration) setRectGlobalOptions(options ...GlobalOpts) {\n\trect.BaseConfiguration.setBaseGlobalOptions(options...)\n}\n\n\/\/ RectChart is a chart in RectChart coordinate.\ntype RectChart struct {\n\tRectConfiguration\n\tMultiSeries\n\n\txAxisData interface{}\n}\n\nfunc (rc *RectChart) overlap() MultiSeries {\n\treturn rc.MultiSeries\n}\n\n\/\/ SetGlobalOptions sets options for the RectChart instance.\nfunc (rc *RectChart) SetGlobalOptions(options ...GlobalOpts) *RectChart {\n\trc.RectConfiguration.setRectGlobalOptions(options...)\n\treturn rc\n}\n\n\/\/ Overlap composites multiple charts into one single canvas.\n\/\/ It is only suit for some of the charts which are in rectangular coordinate.\n\/\/ Support charts: Bar\/BoxPlot\/Line\/Scatter\/EffectScatter\/Kline\/HeatMap\nfunc (rc *RectChart) Overlap(a ...Overlaper) {\n\tfor i := 0; i < len(a); i++ {\n\t\trc.MultiSeries = append(rc.MultiSeries, a[i].overlap()...)\n\t}\n}\n\n\/\/ Validate\nfunc (rc *RectChart) Validate() {\n\t\/\/ Make sure that the data of X axis won't be cleaned for XAxisOpts\n\trc.XAxisList[0].Data = rc.xAxisData\n\t\/\/ Make sure that the labels of Y axis show correctly\n\tfor i := 0; i < len(rc.YAxisList); i++ {\n\t\trc.YAxisList[i].AxisLabel.Show = true\n\t}\n\trc.Assets.Validate(rc.AssetsHost)\n}\n<|endoftext|>"} {"text":"<commit_before>package check\n\nimport (\n\t\"testing\"\n\t\"github.com\/foomo\/petze\/config\"\n)\n\nvar checkMinMaxCountTestCases = []struct {\n\texpect config.Expect\n\tlength int64\n\tok bool\n\tinfo string\n\tmessage string\n}{\n\t{expect: config.Expect{Min: &[]int64{3}[0]}, length: int64(4), ok: true, info: \"\", message: \"check minimum true\"},\n\t{expect: config.Expect{Min: &[]int64{5}[0]}, length: int64(4), ok: true, info: \"min actual: 4 < expected: 5\", message: \"check minimum false\"},\n\t{expect: config.Expect{Max: &[]int64{5}[0]}, length: int64(4), ok: true, info: \"\", message: \"check max true\"},\n\t{expect: config.Expect{Max: &[]int64{3}[0]}, length: int64(4), ok: true, info: \"max actual: 4 > expected: 3\", message: \"check max false\"},\n\t{expect: config.Expect{Count: &[]int64{3}[0]}, length: int64(3), ok: true, info: \"\", message: \"check count true\"},\n\t{expect: config.Expect{Count: &[]int64{3}[0]}, length: int64(4), ok: true, info: \"count actual: 4 != expected: 3\", message: \"check count false\"},\n}\n\nfunc TestCheckMinMaxCount(t *testing.T) {\n\tfor _, test := range checkMinMaxCountTestCases {\n\t\tok, info := checkMinMaxCount(test.expect, test.length)\n\t\tif ok != test.ok && info != test.info {\n\t\t\tt.Error(test.message)\n\t\t}\n\t}\n}\n<commit_msg>Add full test coverage for check method<commit_after>package check\n\nimport (\n\t\"testing\"\n\t\"github.com\/foomo\/petze\/config\"\n)\n\nvar checkMinMaxCountTestCases = []struct {\n\texpect config.Expect\n\tlength int64\n\tok bool\n\tinfo string\n\tmessage string\n}{\n\t{expect: config.Expect{Min: &[]int64{3}[0]}, length: 4, ok: true, info: \"\", message: \"check minimum true\"},\n\t{expect: config.Expect{Min: &[]int64{5}[0]}, length: 4, ok: true, info: \"min actual: 4 < expected: 5\", message: \"check minimum false\"},\n\t{expect: config.Expect{Max: &[]int64{5}[0]}, length: 4, ok: true, info: \"\", message: \"check max true\"},\n\t{expect: config.Expect{Max: &[]int64{3}[0]}, length: 4, ok: true, info: \"max actual: 4 > expected: 3\", message: \"check max false\"},\n\t{expect: config.Expect{Count: &[]int64{3}[0]}, length: 3, ok: true, info: \"\", message: \"check count true\"},\n\t{expect: config.Expect{Count: &[]int64{3}[0]}, length: 4, ok: true, info: \"count actual: 4 != expected: 3\", message: \"check count false\"},\n}\n\nfunc TestCheckMinMaxCount(t *testing.T) {\n\tfor _, test := range checkMinMaxCountTestCases {\n\t\tok, info := checkMinMaxCount(test.expect, test.length)\n\t\tif ok != test.ok && info != test.info {\n\t\t\tt.Error(test.message)\n\t\t}\n\t}\n}\n\nfunc TestCheckMinMaxPanic(t *testing.T) {\n\tdefer func() {\n\t\tif r := recover(); r == nil {\n\t\t\tt.Error(\"should throw a panic\")\n\t\t}\n\t}()\n\n\tcheckMinMaxCount(config.Expect{Equals: \"\"}, 3)\n}\n<|endoftext|>"} {"text":"<commit_before>package check\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"time\"\n\n\tlogr \"github.com\/Sirupsen\/logrus\"\n)\n\n\/\/ ParseCommandLineArguments parses the command line arguments.\nfunc (check *HealthCheck) ParseCommandLineArguments() {\n\tflag.UintVar(&check.config.brokerID, \"broker-id\", 0, \"id of the Kafka broker to health check\")\n\tflag.UintVar(&check.config.brokerPort, \"broker-port\", 9092, \"Kafka broker port\")\n\tflag.UintVar(&check.config.statusServerPort, \"server-port\", 8000, \"port to open for http health status queries\")\n\tflag.StringVar(&check.config.zookeeperConnect, \"zookeeper\", \"\", \"ZooKeeper connect string (e.g. node1:2181,node2:2181,...\/chroot)\")\n\tflag.StringVar(&check.config.topicName, \"topic\", \"\", \"name of the topic to use - use one per broker, defaults to broker-<id>-health-check\")\n\tflag.StringVar(&check.config.replicationTopicName, \"replication-topic\", \"\",\n\t\t\"name of the topic to use for replication checks - use one per cluster, defaults to broker-replication-check\")\n\tflag.UintVar(&check.config.replicationFailureThreshold, \"replication-failures-count\", 5,\n\t\t\"number of replication failures before broker is reported unhealthy\")\n\tflag.DurationVar(&check.config.CheckInterval, \"check-interval\", 10*time.Second, \"how frequently to perform health checks\")\n\tflag.BoolVar(&check.config.NoTopicCreation, \"no-topic-creation\", false, \"disable automatic topic creation and deletion\")\n\tflag.Parse()\n\tl := log.New(os.Stderr, \"\", 0)\n\tvalid := check.validateConfig(l)\n\tif !valid {\n\t\tl.Printf(\"%s usage:\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t\tl.Fatal(\"One or more mandatory command line parameters are missing.\")\n\t} else {\n\t\tif check.config.topicName == \"\" {\n\t\t\tcheck.config.topicName = fmt.Sprintf(\"broker-%d-health-check\", check.config.brokerID)\n\t\t\tlogr.Println(\"using topic\", check.config.topicName, \"for broker\", check.config.brokerID, \"health check\")\n\t\t}\n\t\tif check.config.replicationTopicName == \"\" {\n\t\t\tcheck.config.replicationTopicName = \"broker-replication-check\"\n\t\t\tlogr.Println(\"using topic\", check.config.topicName, \"for broker\", check.config.brokerID, \"replication check\")\n\t\t}\n\t\tcheck.config.retryInterval = check.config.CheckInterval \/ 2\n\t}\n}\n\nfunc (check *HealthCheck) validateConfig(l *log.Logger) bool {\n\tvalid := true\n\tif check.config.zookeeperConnect == \"\" {\n\t\tl.Println(\"parameter -zookeeper required.\")\n\t\tvalid = false\n\t}\n\tif check.config.topicName != \"\" && !validTopicName(check.config.topicName) {\n\t\tl.Println(\"topic name\", check.config.topicName, \"is not a valid Kafka topic name.\")\n\t\tvalid = false\n\t}\n\n\treturn valid\n}\n\n\/\/ from https:\/\/github.com\/apache\/kafka\/blob\/6eacc0de303e4d29e083b89c1f53615c1dfa291e\/core\/src\/main\/scala\/kafka\/common\/Topic.scala\nvar legalTopicPattern = regexp.MustCompile(\"^[a-zA-Z0-9\\\\._\\\\-]+$\")\n\nfunc validTopicName(name string) bool {\n\treturn legalTopicPattern.MatchString(name) && len(name) <= 255\n}\n<commit_msg>Enable specification of broker host from CLI<commit_after>package check\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"time\"\n\n\tlogr \"github.com\/Sirupsen\/logrus\"\n)\n\n\/\/ ParseCommandLineArguments parses the command line arguments.\nfunc (check *HealthCheck) ParseCommandLineArguments() {\n\tflag.StringVar(&check.config.brokerHost, \"broker-host\", \"localhost\", \"ip address or hostname of broker host\")\n\tflag.UintVar(&check.config.brokerID, \"broker-id\", 0, \"id of the Kafka broker to health check\")\n\tflag.UintVar(&check.config.brokerPort, \"broker-port\", 9092, \"Kafka broker port\")\n\tflag.UintVar(&check.config.statusServerPort, \"server-port\", 8000, \"port to open for http health status queries\")\n\tflag.StringVar(&check.config.zookeeperConnect, \"zookeeper\", \"\", \"ZooKeeper connect string (e.g. node1:2181,node2:2181,...\/chroot)\")\n\tflag.StringVar(&check.config.topicName, \"topic\", \"\", \"name of the topic to use - use one per broker, defaults to broker-<id>-health-check\")\n\tflag.StringVar(&check.config.replicationTopicName, \"replication-topic\", \"\",\n\t\t\"name of the topic to use for replication checks - use one per cluster, defaults to broker-replication-check\")\n\tflag.UintVar(&check.config.replicationFailureThreshold, \"replication-failures-count\", 5,\n\t\t\"number of replication failures before broker is reported unhealthy\")\n\tflag.DurationVar(&check.config.CheckInterval, \"check-interval\", 10*time.Second, \"how frequently to perform health checks\")\n\tflag.BoolVar(&check.config.NoTopicCreation, \"no-topic-creation\", false, \"disable automatic topic creation and deletion\")\n\tflag.Parse()\n\tl := log.New(os.Stderr, \"\", 0)\n\tvalid := check.validateConfig(l)\n\tif !valid {\n\t\tl.Printf(\"%s usage:\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t\tl.Fatal(\"One or more mandatory command line parameters are missing.\")\n\t} else {\n\t\tif check.config.topicName == \"\" {\n\t\t\tcheck.config.topicName = fmt.Sprintf(\"broker-%d-health-check\", check.config.brokerID)\n\t\t\tlogr.Println(\"using topic\", check.config.topicName, \"for broker\", check.config.brokerID, \"health check\")\n\t\t}\n\t\tif check.config.replicationTopicName == \"\" {\n\t\t\tcheck.config.replicationTopicName = \"broker-replication-check\"\n\t\t\tlogr.Println(\"using topic\", check.config.topicName, \"for broker\", check.config.brokerID, \"replication check\")\n\t\t}\n\t\tcheck.config.retryInterval = check.config.CheckInterval \/ 2\n\t}\n}\n\nfunc (check *HealthCheck) validateConfig(l *log.Logger) bool {\n\tvalid := true\n\tif check.config.zookeeperConnect == \"\" {\n\t\tl.Println(\"parameter -zookeeper required.\")\n\t\tvalid = false\n\t}\n\tif check.config.topicName != \"\" && !validTopicName(check.config.topicName) {\n\t\tl.Println(\"topic name\", check.config.topicName, \"is not a valid Kafka topic name.\")\n\t\tvalid = false\n\t}\n\n\treturn valid\n}\n\n\/\/ from https:\/\/github.com\/apache\/kafka\/blob\/6eacc0de303e4d29e083b89c1f53615c1dfa291e\/core\/src\/main\/scala\/kafka\/common\/Topic.scala\nvar legalTopicPattern = regexp.MustCompile(\"^[a-zA-Z0-9\\\\._\\\\-]+$\")\n\nfunc validTopicName(name string) bool {\n\treturn legalTopicPattern.MatchString(name) && len(name) <= 255\n}\n<|endoftext|>"} {"text":"<commit_before>package coordinator\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\tlog \"code.google.com\/p\/log4go\"\n\t\"github.com\/influxdb\/influxdb\/cluster\"\n\t\"github.com\/influxdb\/influxdb\/protocol\"\n)\n\ntype ProtobufClient struct {\n\tconnLock sync.Mutex\n\tconn net.Conn\n\thostAndPort string\n\trequestBufferLock sync.RWMutex\n\trequestBuffer map[uint32]*runningRequest\n\treconnectWait sync.WaitGroup\n\tconnectCalled bool\n\tlastRequestId uint32\n\twriteTimeout time.Duration\n\tattempts int\n\tstopped bool\n\treconChan chan struct{}\n\treconGroup *sync.WaitGroup\n\tonce *sync.Once\n}\n\ntype runningRequest struct {\n\ttimeMade time.Time\n\tr cluster.ResponseChannel\n\trequest *protocol.Request\n}\n\nconst (\n\tREQUEST_RETRY_ATTEMPTS = 2\n\tMAX_RESPONSE_SIZE = MAX_REQUEST_SIZE\n\tMAX_REQUEST_TIME = time.Second * 1200\n\tRECONNECT_RETRY_WAIT = time.Millisecond * 100\n)\n\nfunc NewProtobufClient(hostAndPort string, writeTimeout time.Duration) *ProtobufClient {\n\tlog.Debug(\"NewProtobufClient: \", hostAndPort)\n\treturn &ProtobufClient{\n\t\thostAndPort: hostAndPort,\n\t\trequestBuffer: make(map[uint32]*runningRequest),\n\t\twriteTimeout: writeTimeout,\n\t\treconChan: make(chan struct{}, 1),\n\t\treconGroup: new(sync.WaitGroup),\n\t\tonce: new(sync.Once),\n\t\tstopped: false,\n\t}\n}\n\nfunc (self *ProtobufClient) Connect() {\n\tself.once.Do(self.connect)\n}\n\nfunc (self *ProtobufClient) connect() {\n\tself.reconChan <- struct{}{}\n\tgo func() {\n\t\tself.reconnect()\n\t\tself.readResponses()\n\t}()\n\tgo self.peridicallySweepTimedOutRequests()\n}\n\nfunc (self *ProtobufClient) Close() {\n\tself.connLock.Lock()\n\tdefer self.connLock.Unlock()\n\tif self.conn != nil {\n\t\tself.conn.Close()\n\t\tself.stopped = true\n\t\tself.conn = nil\n\t}\n\tself.ClearRequests()\n}\n\nfunc (self *ProtobufClient) getConnection() net.Conn {\n\tself.connLock.Lock()\n\tdefer self.connLock.Unlock()\n\treturn self.conn\n}\n\nfunc (self *ProtobufClient) ClearRequests() {\n\tself.requestBufferLock.Lock()\n\tdefer self.requestBufferLock.Unlock()\n\n\tfor _, req := range self.requestBuffer {\n\t\tself.cancelRequest(req.request)\n\t}\n\n\tself.requestBuffer = map[uint32]*runningRequest{}\n}\n\nfunc (self *ProtobufClient) CancelRequest(request *protocol.Request) {\n\tself.requestBufferLock.Lock()\n\tdefer self.requestBufferLock.Unlock()\n\tself.cancelRequest(request)\n}\n\nfunc (self *ProtobufClient) cancelRequest(request *protocol.Request) {\n\treq, ok := self.requestBuffer[*request.Id]\n\tif !ok {\n\t\treturn\n\t}\n\tmessage := \"cancelling request\"\n\treq.r.Yield(&protocol.Response{\n\t\tType: protocol.Response_ERROR.Enum(),\n\t\tErrorMessage: &message,\n\t})\n\tdelete(self.requestBuffer, *request.Id)\n}\n\n\/\/ Makes a request to the server. If the responseStream chan is not nil it will expect a response from the server\n\/\/ with a matching request.Id. The REQUEST_RETRY_ATTEMPTS constant of 3 and the RECONNECT_RETRY_WAIT of 100ms means\n\/\/ that an attempt to make a request to a downed server will take 300ms to time out.\nfunc (self *ProtobufClient) MakeRequest(request *protocol.Request, r cluster.ResponseChannel) error {\n\tif request.Id == nil {\n\t\tid := atomic.AddUint32(&self.lastRequestId, uint32(1))\n\t\trequest.Id = &id\n\t}\n\tif r != nil {\n\t\tself.requestBufferLock.Lock()\n\n\t\t\/\/ this should actually never happen. The sweeper should clear out dead requests\n\t\t\/\/ before the uint32 ids roll over.\n\t\tif oldReq, alreadyHasRequestById := self.requestBuffer[*request.Id]; alreadyHasRequestById {\n\t\t\tmessage := \"already has a request with this id, must have timed out\"\n\t\t\tlog.Error(message)\n\t\t\toldReq.r.Yield(&protocol.Response{\n\t\t\t\tType: protocol.Response_ERROR.Enum(),\n\t\t\t\tErrorMessage: &message,\n\t\t\t})\n\t\t}\n\t\tself.requestBuffer[*request.Id] = &runningRequest{timeMade: time.Now(), r: r, request: request}\n\t\tself.requestBufferLock.Unlock()\n\t}\n\n\tdata, err := request.Encode()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconn := self.getConnection()\n\tif conn == nil {\n\t\tconn = self.reconnect()\n\t\tif conn == nil {\n\t\t\treturn fmt.Errorf(\"Failed to connect to server %s\", self.hostAndPort)\n\t\t}\n\t}\n\n\tif self.writeTimeout > 0 {\n\t\tconn.SetWriteDeadline(time.Now().Add(self.writeTimeout))\n\t}\n\tbuff := bytes.NewBuffer(make([]byte, 0, len(data)+8))\n\tbinary.Write(buff, binary.LittleEndian, uint32(len(data)))\n\t_, err = conn.Write(append(buff.Bytes(), data...))\n\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ if we got here it errored out, clear out the request\n\tself.requestBufferLock.Lock()\n\tdelete(self.requestBuffer, *request.Id)\n\tself.requestBufferLock.Unlock()\n\tself.reconnect()\n\treturn err\n}\n\nfunc (self *ProtobufClient) readResponses() {\n\tmessage := make([]byte, 0, MAX_RESPONSE_SIZE)\n\tbuff := bytes.NewBuffer(message)\n\tfor !self.stopped {\n\t\tbuff.Reset()\n\t\tconn := self.getConnection()\n\t\tif conn == nil {\n\t\t\ttime.Sleep(200 * time.Millisecond)\n\t\t\tcontinue\n\t\t}\n\t\tvar messageSizeU uint32\n\t\tvar err error\n\t\terr = binary.Read(conn, binary.LittleEndian, &messageSizeU)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Error while reading messsage size: %d\", err)\n\t\t\ttime.Sleep(200 * time.Millisecond)\n\t\t\tcontinue\n\t\t}\n\t\tmessageSize := int64(messageSizeU)\n\t\tmessageReader := io.LimitReader(conn, messageSize)\n\t\t_, err = io.Copy(buff, messageReader)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Error while reading message: %d\", err)\n\t\t\ttime.Sleep(200 * time.Millisecond)\n\t\t\tcontinue\n\t\t}\n\t\tresponse, err := protocol.DecodeResponse(buff)\n\t\tif err != nil {\n\t\t\tlog.Error(\"error unmarshaling response: %s\", err)\n\t\t\ttime.Sleep(200 * time.Millisecond)\n\t\t} else {\n\t\t\tself.sendResponse(response)\n\t\t}\n\t}\n}\n\nfunc (self *ProtobufClient) sendResponse(response *protocol.Response) {\n\tself.requestBufferLock.RLock()\n\treq, ok := self.requestBuffer[*response.RequestId]\n\tself.requestBufferLock.RUnlock()\n\tif !ok {\n\t\treturn\n\t}\n\n\tdeleteRequest := false\n\tswitch rt := response.GetType(); rt {\n\tcase protocol.Response_END_STREAM,\n\t\tprotocol.Response_HEARTBEAT,\n\t\tprotocol.Response_ERROR:\n\t\tdeleteRequest = true\n\tcase protocol.Response_QUERY:\n\t\t\/\/ do nothing\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Unknown response type: %s\", rt))\n\t}\n\n\tself.requestBufferLock.Lock()\n\treq, ok = self.requestBuffer[*response.RequestId]\n\tif deleteRequest {\n\t\tdelete(self.requestBuffer, *response.RequestId)\n\t}\n\tself.requestBufferLock.Unlock()\n\tif !ok {\n\t\treturn\n\t}\n\n\tlog.Debug(\"ProtobufClient yielding to %s %s\", req.r.Name(), response)\n\treq.r.Yield(response)\n}\n\nfunc (self *ProtobufClient) reconnect() net.Conn {\n\tselect {\n\tcase <-self.reconChan:\n\t\tself.reconGroup.Add(1)\n\t\tdefer func() {\n\t\t\tself.reconGroup.Done()\n\t\t\tself.reconChan <- struct{}{}\n\t\t}()\n\tdefault:\n\t\tself.reconGroup.Wait()\n\t\treturn self.conn\n\t}\n\n\tif self.conn != nil {\n\t\tself.conn.Close()\n\t}\n\tconn, err := net.DialTimeout(\"tcp\", self.hostAndPort, self.writeTimeout)\n\tif err != nil {\n\t\tself.attempts++\n\t\tif self.attempts < 100 {\n\t\t\treturn nil\n\t\t}\n\n\t\tlog.Error(\"failed to connect to %s %d times\", self.hostAndPort, self.attempts)\n\t\tself.attempts = 0\n\t}\n\n\tself.conn = conn\n\tlog.Info(\"connected to %s\", self.hostAndPort)\n\treturn conn\n}\n\nfunc (self *ProtobufClient) peridicallySweepTimedOutRequests() {\n\tfor {\n\t\ttime.Sleep(time.Minute)\n\t\tself.requestBufferLock.Lock()\n\t\tmaxAge := time.Now().Add(-MAX_REQUEST_TIME)\n\t\tfor k, req := range self.requestBuffer {\n\t\t\tif req.timeMade.Before(maxAge) {\n\t\t\t\tdelete(self.requestBuffer, k)\n\t\t\t\tlog.Warn(\"Request timed out: \", req.request)\n\t\t\t}\n\t\t}\n\t\tself.requestBufferLock.Unlock()\n\t}\n}\n<commit_msg>Write to the buffer instead of using append<commit_after>package coordinator\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\tlog \"code.google.com\/p\/log4go\"\n\t\"github.com\/influxdb\/influxdb\/cluster\"\n\t\"github.com\/influxdb\/influxdb\/protocol\"\n)\n\ntype ProtobufClient struct {\n\tconnLock sync.Mutex\n\tconn net.Conn\n\thostAndPort string\n\trequestBufferLock sync.RWMutex\n\trequestBuffer map[uint32]*runningRequest\n\treconnectWait sync.WaitGroup\n\tconnectCalled bool\n\tlastRequestId uint32\n\twriteTimeout time.Duration\n\tattempts int\n\tstopped bool\n\treconChan chan struct{}\n\treconGroup *sync.WaitGroup\n\tonce *sync.Once\n}\n\ntype runningRequest struct {\n\ttimeMade time.Time\n\tr cluster.ResponseChannel\n\trequest *protocol.Request\n}\n\nconst (\n\tREQUEST_RETRY_ATTEMPTS = 2\n\tMAX_RESPONSE_SIZE = MAX_REQUEST_SIZE\n\tMAX_REQUEST_TIME = time.Second * 1200\n\tRECONNECT_RETRY_WAIT = time.Millisecond * 100\n)\n\nfunc NewProtobufClient(hostAndPort string, writeTimeout time.Duration) *ProtobufClient {\n\tlog.Debug(\"NewProtobufClient: \", hostAndPort)\n\treturn &ProtobufClient{\n\t\thostAndPort: hostAndPort,\n\t\trequestBuffer: make(map[uint32]*runningRequest),\n\t\twriteTimeout: writeTimeout,\n\t\treconChan: make(chan struct{}, 1),\n\t\treconGroup: new(sync.WaitGroup),\n\t\tonce: new(sync.Once),\n\t\tstopped: false,\n\t}\n}\n\nfunc (self *ProtobufClient) Connect() {\n\tself.once.Do(self.connect)\n}\n\nfunc (self *ProtobufClient) connect() {\n\tself.reconChan <- struct{}{}\n\tgo func() {\n\t\tself.reconnect()\n\t\tself.readResponses()\n\t}()\n\tgo self.peridicallySweepTimedOutRequests()\n}\n\nfunc (self *ProtobufClient) Close() {\n\tself.connLock.Lock()\n\tdefer self.connLock.Unlock()\n\tif self.conn != nil {\n\t\tself.conn.Close()\n\t\tself.stopped = true\n\t\tself.conn = nil\n\t}\n\tself.ClearRequests()\n}\n\nfunc (self *ProtobufClient) getConnection() net.Conn {\n\tself.connLock.Lock()\n\tdefer self.connLock.Unlock()\n\treturn self.conn\n}\n\nfunc (self *ProtobufClient) ClearRequests() {\n\tself.requestBufferLock.Lock()\n\tdefer self.requestBufferLock.Unlock()\n\n\tfor _, req := range self.requestBuffer {\n\t\tself.cancelRequest(req.request)\n\t}\n\n\tself.requestBuffer = map[uint32]*runningRequest{}\n}\n\nfunc (self *ProtobufClient) CancelRequest(request *protocol.Request) {\n\tself.requestBufferLock.Lock()\n\tdefer self.requestBufferLock.Unlock()\n\tself.cancelRequest(request)\n}\n\nfunc (self *ProtobufClient) cancelRequest(request *protocol.Request) {\n\treq, ok := self.requestBuffer[*request.Id]\n\tif !ok {\n\t\treturn\n\t}\n\tmessage := \"cancelling request\"\n\treq.r.Yield(&protocol.Response{\n\t\tType: protocol.Response_ERROR.Enum(),\n\t\tErrorMessage: &message,\n\t})\n\tdelete(self.requestBuffer, *request.Id)\n}\n\n\/\/ Makes a request to the server. If the responseStream chan is not nil it will expect a response from the server\n\/\/ with a matching request.Id. The REQUEST_RETRY_ATTEMPTS constant of 3 and the RECONNECT_RETRY_WAIT of 100ms means\n\/\/ that an attempt to make a request to a downed server will take 300ms to time out.\nfunc (self *ProtobufClient) MakeRequest(request *protocol.Request, r cluster.ResponseChannel) error {\n\tif request.Id == nil {\n\t\tid := atomic.AddUint32(&self.lastRequestId, uint32(1))\n\t\trequest.Id = &id\n\t}\n\tif r != nil {\n\t\tself.requestBufferLock.Lock()\n\n\t\t\/\/ this should actually never happen. The sweeper should clear out dead requests\n\t\t\/\/ before the uint32 ids roll over.\n\t\tif oldReq, alreadyHasRequestById := self.requestBuffer[*request.Id]; alreadyHasRequestById {\n\t\t\tmessage := \"already has a request with this id, must have timed out\"\n\t\t\tlog.Error(message)\n\t\t\toldReq.r.Yield(&protocol.Response{\n\t\t\t\tType: protocol.Response_ERROR.Enum(),\n\t\t\t\tErrorMessage: &message,\n\t\t\t})\n\t\t}\n\t\tself.requestBuffer[*request.Id] = &runningRequest{timeMade: time.Now(), r: r, request: request}\n\t\tself.requestBufferLock.Unlock()\n\t}\n\n\tdata, err := request.Encode()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconn := self.getConnection()\n\tif conn == nil {\n\t\tconn = self.reconnect()\n\t\tif conn == nil {\n\t\t\treturn fmt.Errorf(\"Failed to connect to server %s\", self.hostAndPort)\n\t\t}\n\t}\n\n\tif self.writeTimeout > 0 {\n\t\tconn.SetWriteDeadline(time.Now().Add(self.writeTimeout))\n\t}\n\tbuff := bytes.NewBuffer(make([]byte, 0, len(data)+8))\n\tbinary.Write(buff, binary.LittleEndian, uint32(len(data)))\n\tbuff.Write(data)\n\t_, err = conn.Write(buff.Bytes())\n\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ if we got here it errored out, clear out the request\n\tself.requestBufferLock.Lock()\n\tdelete(self.requestBuffer, *request.Id)\n\tself.requestBufferLock.Unlock()\n\tself.reconnect()\n\treturn err\n}\n\nfunc (self *ProtobufClient) readResponses() {\n\tmessage := make([]byte, 0, MAX_RESPONSE_SIZE)\n\tbuff := bytes.NewBuffer(message)\n\tfor !self.stopped {\n\t\tbuff.Reset()\n\t\tconn := self.getConnection()\n\t\tif conn == nil {\n\t\t\ttime.Sleep(200 * time.Millisecond)\n\t\t\tcontinue\n\t\t}\n\t\tvar messageSizeU uint32\n\t\tvar err error\n\t\terr = binary.Read(conn, binary.LittleEndian, &messageSizeU)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Error while reading messsage size: %d\", err)\n\t\t\ttime.Sleep(200 * time.Millisecond)\n\t\t\tcontinue\n\t\t}\n\t\tmessageSize := int64(messageSizeU)\n\t\tmessageReader := io.LimitReader(conn, messageSize)\n\t\t_, err = io.Copy(buff, messageReader)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Error while reading message: %d\", err)\n\t\t\ttime.Sleep(200 * time.Millisecond)\n\t\t\tcontinue\n\t\t}\n\t\tresponse, err := protocol.DecodeResponse(buff)\n\t\tif err != nil {\n\t\t\tlog.Error(\"error unmarshaling response: %s\", err)\n\t\t\ttime.Sleep(200 * time.Millisecond)\n\t\t} else {\n\t\t\tself.sendResponse(response)\n\t\t}\n\t}\n}\n\nfunc (self *ProtobufClient) sendResponse(response *protocol.Response) {\n\tself.requestBufferLock.RLock()\n\treq, ok := self.requestBuffer[*response.RequestId]\n\tself.requestBufferLock.RUnlock()\n\tif !ok {\n\t\treturn\n\t}\n\n\tdeleteRequest := false\n\tswitch rt := response.GetType(); rt {\n\tcase protocol.Response_END_STREAM,\n\t\tprotocol.Response_HEARTBEAT,\n\t\tprotocol.Response_ERROR:\n\t\tdeleteRequest = true\n\tcase protocol.Response_QUERY:\n\t\t\/\/ do nothing\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Unknown response type: %s\", rt))\n\t}\n\n\tself.requestBufferLock.Lock()\n\treq, ok = self.requestBuffer[*response.RequestId]\n\tif deleteRequest {\n\t\tdelete(self.requestBuffer, *response.RequestId)\n\t}\n\tself.requestBufferLock.Unlock()\n\tif !ok {\n\t\treturn\n\t}\n\n\tlog.Debug(\"ProtobufClient yielding to %s %s\", req.r.Name(), response)\n\treq.r.Yield(response)\n}\n\nfunc (self *ProtobufClient) reconnect() net.Conn {\n\tselect {\n\tcase <-self.reconChan:\n\t\tself.reconGroup.Add(1)\n\t\tdefer func() {\n\t\t\tself.reconGroup.Done()\n\t\t\tself.reconChan <- struct{}{}\n\t\t}()\n\tdefault:\n\t\tself.reconGroup.Wait()\n\t\treturn self.conn\n\t}\n\n\tif self.conn != nil {\n\t\tself.conn.Close()\n\t}\n\tconn, err := net.DialTimeout(\"tcp\", self.hostAndPort, self.writeTimeout)\n\tif err != nil {\n\t\tself.attempts++\n\t\tif self.attempts < 100 {\n\t\t\treturn nil\n\t\t}\n\n\t\tlog.Error(\"failed to connect to %s %d times\", self.hostAndPort, self.attempts)\n\t\tself.attempts = 0\n\t}\n\n\tself.conn = conn\n\tlog.Info(\"connected to %s\", self.hostAndPort)\n\treturn conn\n}\n\nfunc (self *ProtobufClient) peridicallySweepTimedOutRequests() {\n\tfor {\n\t\ttime.Sleep(time.Minute)\n\t\tself.requestBufferLock.Lock()\n\t\tmaxAge := time.Now().Add(-MAX_REQUEST_TIME)\n\t\tfor k, req := range self.requestBuffer {\n\t\t\tif req.timeMade.Before(maxAge) {\n\t\t\t\tdelete(self.requestBuffer, k)\n\t\t\t\tlog.Warn(\"Request timed out: \", req.request)\n\t\t\t}\n\t\t}\n\t\tself.requestBufferLock.Unlock()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ The MIT License (MIT)\n\/\/\n\/\/ Copyright (c) 2013-2016 Oryx(ossrs)\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy of\n\/\/ this software and associated documentation files (the \"Software\"), to deal in\n\/\/ the Software without restriction, including without limitation the rights to\n\/\/ use, copy, modify, merge, publish, distribute, sublicense, and\/or sell copies of\n\/\/ the Software, and to permit persons to whom the Software is furnished to do so,\n\/\/ subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in all\n\/\/ copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n\/\/ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n\/\/ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n\/\/ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n\/\/ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\npackage app\n\nimport (\n\t\"fmt\"\n\t\"github.com\/ossrs\/go-oryx\/agent\"\n\t\"github.com\/ossrs\/go-oryx\/core\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"runtime\/debug\"\n\t\"runtime\/pprof\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n)\n\n\/\/ the state of server, state graph:\n\/\/ Init => Normal(Ready => Running)\n\/\/ Init\/Normal => Closed\ntype ServerState int\n\nconst (\n\tStateInit ServerState = 1 << iota\n\tStateReady\n\tStateRunning\n\tStateClosed\n)\n\ntype Server struct {\n\tctx core.Context\n\t\/\/ signal handler.\n\tsigs chan os.Signal\n\t\/\/ whether closed.\n\tclosed ServerState\n\tclosing chan bool\n\t\/\/ for system internal to notify quit.\n\tquit chan bool\n\twg sync.WaitGroup\n\t\/\/ core components.\n\thtbt *Heartbeat\n\tlogger *simpleLogger\n\trtmp core.OpenCloser\n\t\/\/ the locker for state, for instance, the closed.\n\tlock sync.Mutex\n}\n\nfunc NewServer(ctx core.Context) *Server {\n\tv := &Server{\n\t\tctx: ctx,\n\t\tsigs: make(chan os.Signal, 1),\n\t\tclosed: StateInit,\n\t\tclosing: make(chan bool, 1),\n\t\tquit: make(chan bool, 1),\n\t\thtbt: NewHeartbeat(ctx),\n\t\tlogger: &simpleLogger{ctx: ctx},\n\t}\n\tv.rtmp = agent.NewRtmp(ctx, v)\n\n\tcore.Conf.Subscribe(v)\n\n\treturn v\n}\n\n\/\/ notify server to stop and wait for cleanup.\nfunc (v *Server) Close() {\n\tctx := v.ctx\n\n\t\/\/ wait for stopped.\n\tv.lock.Lock()\n\tdefer v.lock.Unlock()\n\n\t\/\/ only create?\n\tif v.closed == StateInit {\n\t\treturn\n\t}\n\n\t\/\/ closed?\n\tif v.closed == StateClosed {\n\t\tcore.Warn.Println(ctx, \"server already closed.\")\n\t\treturn\n\t}\n\n\t\/\/ notify to close.\n\tif v.closed == StateRunning {\n\t\tcore.Info.Println(ctx, \"notify server to stop.\")\n\t\tselect {\n\t\tcase v.quit <- true:\n\t\tdefault:\n\t\t}\n\t}\n\n\t\/\/ wait for closed.\n\tif v.closed == StateRunning {\n\t\t<-v.closing\n\t}\n\n\t\/\/ do cleanup when stopped.\n\tcore.Conf.Unsubscribe(v)\n\n\t\/\/ close the rtmp agent.\n\tif err := v.rtmp.Close(); err != nil {\n\t\tcore.Warn.Println(ctx, \"close rtmp agent failed. err is\", err)\n\t}\n\n\t\/\/ close the agent manager.\n\tagent.Manager.Close()\n\n\t\/\/ when cpu profile is enabled, close it.\n\tif core.Conf.Go.CpuProfile != \"\" {\n\t\tpprof.StopCPUProfile()\n\t\tcore.Trace.Println(ctx, \"cpu profile ok, file is\", core.Conf.Go.CpuProfile)\n\t}\n\n\t\/\/ when memory profile enabled, write heap info.\n\tif core.Conf.Go.MemProfile != \"\" {\n\t\tif f, err := os.Create(core.Conf.Go.MemProfile); err != nil {\n\t\t\tcore.Warn.Println(ctx, \"ignore open memory profile failed. err is\", err)\n\t\t} else {\n\t\t\tdefer f.Close()\n\t\t\tif err = pprof.Lookup(\"heap\").WriteTo(f, 0); err != nil {\n\t\t\t\tcore.Warn.Println(ctx, \"write memory profile failed. err is\", err)\n\t\t\t}\n\t\t}\n\t\tcore.Trace.Println(ctx, \"mem profile ok, file is\", core.Conf.Go.MemProfile)\n\t}\n\n\t\/\/ ok, closed.\n\tv.closed = StateClosed\n\tcore.Trace.Println(ctx, \"server closed\")\n}\n\nfunc (v *Server) ParseConfig(conf string) (err error) {\n\tctx := v.ctx\n\n\tv.lock.Lock()\n\tdefer v.lock.Unlock()\n\n\tif v.closed != StateInit {\n\t\tpanic(\"server invalid state.\")\n\t}\n\n\tcore.Trace.Println(ctx, \"start to parse config file\", conf)\n\tif err = core.Conf.Loads(conf); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (v *Server) PrepareLogger() (err error) {\n\tv.lock.Lock()\n\tdefer v.lock.Unlock()\n\n\tif v.closed != StateInit {\n\t\tpanic(\"server invalid state.\")\n\t}\n\n\tif err = v.applyLogger(core.Conf); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (v *Server) initializeRuntime() (err error) {\n\tctx := v.ctx\n\n\t\/\/ install signals.\n\t\/\/ TODO: FIXME: when process the current signal, others may drop.\n\tsignal.Notify(v.sigs)\n\n\t\/\/ apply the cpu profile.\n\tif err = v.applyCpuProfile(core.Conf); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ apply the gc percent.\n\tif err = v.applyGcPercent(core.Conf); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ show gc trace.\n\tgo func() {\n\t\tstat := &debug.GCStats{}\n\n\t\tfor {\n\t\t\tif core.Conf.Go.GcTrace > 0 {\n\t\t\t\tpgc := stat.NumGC\n\t\t\t\tdebug.ReadGCStats(stat)\n\t\t\t\tif len(stat.Pause) > 3 {\n\t\t\t\t\tstat.Pause = append([]time.Duration{}, stat.Pause[:3]...)\n\t\t\t\t}\n\t\t\t\tif pgc < stat.NumGC {\n\t\t\t\t\tcore.Trace.Println(ctx, \"gc\", stat.NumGC, stat.PauseTotal, stat.Pause, stat.PauseQuantiles)\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Duration(core.Conf.Go.GcTrace) * time.Second)\n\t\t\t} else {\n\t\t\t\ttime.Sleep(3 * time.Second)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn\n}\n\nfunc (v *Server) Initialize() (err error) {\n\tctx := v.ctx\n\n\tv.lock.Lock()\n\tdefer v.lock.Unlock()\n\n\tif v.closed != StateInit {\n\t\tpanic(\"server invalid state.\")\n\t}\n\n\t\/\/ about the runtime.\n\tif err = v.initializeRuntime(); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ use worker container to fork.\n\tvar wc core.WorkerContainer = v\n\n\t\/\/ heartbeat with http api.\n\tif err = v.htbt.Initialize(wc); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ reload goroutine\n\twc.GFork(\"reload\", core.Conf.ReloadCycle)\n\t\/\/ heartbeat goroutine\n\twc.GFork(\"htbt(discovery)\", v.htbt.discoveryCycle)\n\twc.GFork(\"htbt(main)\", v.htbt.beatCycle)\n\t\/\/ open rtmp agent.\n\tif err = v.rtmp.Open(); err != nil {\n\t\tcore.Error.Println(ctx, \"open rtmp agent failed. err is\", err)\n\t\treturn\n\t}\n\n\tc := core.Conf\n\tl := fmt.Sprintf(\"%v(%v\/%v)\", c.Log.Tank, c.Log.Level, c.Log.File)\n\tif !c.LogToFile() {\n\t\tl = fmt.Sprintf(\"%v(%v)\", c.Log.Tank, c.Log.Level)\n\t}\n\tcore.Trace.Println(ctx, fmt.Sprintf(\"init server ok, conf=%v, log=%v, workers=%v\/%v, gc=%v\/%v%%, daemon=%v\",\n\t\tc.Conf(), l, c.Workers, runtime.NumCPU(), c.Go.GcInterval, c.Go.GcPercent, c.Daemon))\n\n\t\/\/ set to ready, requires cleanup.\n\tv.closed = StateReady\n\n\treturn\n}\n\nfunc (v *Server) Run() (err error) {\n\tctx := v.ctx\n\n\tfunc() {\n\t\tv.lock.Lock()\n\t\tdefer v.lock.Unlock()\n\n\t\tif v.closed != StateReady {\n\t\t\tpanic(\"server invalid state.\")\n\t\t}\n\t\tv.closed = StateRunning\n\t}()\n\n\t\/\/ when terminated, notify the chan.\n\tdefer func() {\n\t\tselect {\n\t\tcase v.closing <- true:\n\t\tdefault:\n\t\t}\n\t}()\n\n\tcore.Info.Println(ctx, \"server cycle running\")\n\n\t\/\/ run server, apply settings.\n\tv.applyMultipleProcesses(core.Conf.Workers)\n\n\tvar wc core.WorkerContainer = v\n\tfor {\n\t\tvar gcc <-chan time.Time = nil\n\t\tif core.Conf.Go.GcInterval > 0 {\n\t\t\tgcc = time.After(time.Second * time.Duration(core.Conf.Go.GcInterval))\n\t\t}\n\n\t\tselect {\n\t\tcase signal := <-v.sigs:\n\t\t\tcore.Trace.Println(ctx, \"got signal\", signal)\n\t\t\tswitch signal {\n\t\t\tcase SIGUSR1, SIGUSR2:\n\t\t\t\tpanic(\"panic by SIGUSR1\/2\")\n\t\t\tcase os.Interrupt, syscall.SIGTERM:\n\t\t\t\t\/\/ SIGINT, SIGTERM\n\t\t\t\twc.Quit()\n\t\t\t}\n\t\tcase <-wc.QC():\n\t\t\twc.Quit()\n\n\t\t\t\/\/ wait for all goroutines quit.\n\t\t\tv.wg.Wait()\n\t\t\tcore.Warn.Println(ctx, \"server cycle ok\")\n\t\t\treturn\n\t\tcase <-gcc:\n\t\t\truntime.GC()\n\t\t\tcore.Info.Println(ctx, \"go runtime gc every\", core.Conf.Go.GcInterval, \"seconds\")\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ interface WorkContainer\nfunc (v *Server) QC() <-chan bool {\n\treturn v.quit\n}\n\nfunc (v *Server) Quit() error {\n\tselect {\n\tcase v.quit <- true:\n\tdefault:\n\t}\n\n\treturn core.QuitError\n}\n\nfunc (v *Server) GFork(name string, f func(core.WorkerContainer)) {\n\tctx := v.ctx\n\n\tv.wg.Add(1)\n\tgo func() {\n\t\tdefer v.wg.Done()\n\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\tif !core.IsNormalQuit(r) {\n\t\t\t\t\tcore.Warn.Println(ctx, \"rtmp ignore\", r)\n\t\t\t\t}\n\n\t\t\t\tcore.Error.Println(ctx, string(debug.Stack()))\n\n\t\t\t\tv.Quit()\n\t\t\t}\n\t\t}()\n\n\t\tf(v)\n\n\t\tif name != \"\" {\n\t\t\tcore.Trace.Println(ctx, name, \"worker terminated.\")\n\t\t}\n\t}()\n}\n\nfunc (v *Server) applyMultipleProcesses(workers int) {\n\tctx := v.ctx\n\n\tif workers < 0 {\n\t\tpanic(\"should not be negative workers\")\n\t}\n\n\tif workers == 0 {\n\t\tworkers = runtime.NumCPU()\n\t}\n\tpv := runtime.GOMAXPROCS(workers)\n\n\tcore.Trace.Println(ctx, \"apply workers\", workers, \"and previous is\", pv)\n}\n\nfunc (v *Server) applyLogger(c *core.Config) (err error) {\n\tctx := v.ctx\n\n\tif err = v.logger.close(c); err != nil {\n\t\treturn\n\t}\n\tcore.Info.Println(ctx, \"close logger ok\")\n\n\tif err = v.logger.open(c); err != nil {\n\t\treturn\n\t}\n\tcore.Info.Println(ctx, \"open logger ok\")\n\n\treturn\n}\n\nfunc (v *Server) applyCpuProfile(c *core.Config) (err error) {\n\tctx := v.ctx\n\n\tpprof.StopCPUProfile()\n\n\tif c.Go.CpuProfile == \"\" {\n\t\treturn\n\t}\n\n\tvar f *os.File\n\tif f, err = os.Create(c.Go.CpuProfile); err != nil {\n\t\tcore.Error.Println(ctx, \"open cpu profile file failed. err is\", err)\n\t\treturn\n\t}\n\tif err = pprof.StartCPUProfile(f); err != nil {\n\t\tcore.Error.Println(ctx, \"start cpu profile failed. err is\", err)\n\t\treturn\n\t}\n\treturn\n}\n\nfunc (v *Server) applyGcPercent(c *core.Config) (err error) {\n\tctx := v.ctx\n\n\tif c.Go.GcPercent == 0 {\n\t\tdebug.SetGCPercent(100)\n\t\treturn\n\t}\n\n\tpv := debug.SetGCPercent(c.Go.GcPercent)\n\tcore.Trace.Println(ctx, \"set gc percent from\", pv, \"to\", c.Go.GcPercent)\n\treturn\n}\n\n\/\/ interface ReloadHandler\nfunc (v *Server) OnReloadGlobal(scope int, cc, pc *core.Config) (err error) {\n\tif scope == core.ReloadWorkers {\n\t\tv.applyMultipleProcesses(cc.Workers)\n\t} else if scope == core.ReloadLog {\n\t\tv.applyLogger(cc)\n\t} else if scope == core.ReloadCpuProfile {\n\t\tv.applyCpuProfile(cc)\n\t} else if scope == core.ReloadGcPercent {\n\t\tv.applyGcPercent(cc)\n\t}\n\n\treturn\n}\n\nfunc (v *Server) OnReloadVhost(vhost string, scope int, cc, pc *core.Config) (err error) {\n\treturn\n}\n<commit_msg>for #52, start new goroutine for signal when quit.<commit_after>\/\/ The MIT License (MIT)\n\/\/\n\/\/ Copyright (c) 2013-2016 Oryx(ossrs)\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy of\n\/\/ this software and associated documentation files (the \"Software\"), to deal in\n\/\/ the Software without restriction, including without limitation the rights to\n\/\/ use, copy, modify, merge, publish, distribute, sublicense, and\/or sell copies of\n\/\/ the Software, and to permit persons to whom the Software is furnished to do so,\n\/\/ subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in all\n\/\/ copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n\/\/ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n\/\/ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n\/\/ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n\/\/ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\npackage app\n\nimport (\n\t\"fmt\"\n\t\"github.com\/ossrs\/go-oryx\/agent\"\n\t\"github.com\/ossrs\/go-oryx\/core\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"runtime\/debug\"\n\t\"runtime\/pprof\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n)\n\n\/\/ the state of server, state graph:\n\/\/ Init => Normal(Ready => Running)\n\/\/ Init\/Normal => Closed\ntype ServerState int\n\nconst (\n\tStateInit ServerState = 1 << iota\n\tStateReady\n\tStateRunning\n\tStateClosed\n)\n\ntype Server struct {\n\tctx core.Context\n\t\/\/ signal handler.\n\tsigs chan os.Signal\n\t\/\/ whether closed.\n\tclosed ServerState\n\tclosing chan bool\n\t\/\/ for system internal to notify quit.\n\tquit chan bool\n\twg sync.WaitGroup\n\t\/\/ core components.\n\thtbt *Heartbeat\n\tlogger *simpleLogger\n\trtmp core.OpenCloser\n\t\/\/ the locker for state, for instance, the closed.\n\tlock sync.Mutex\n}\n\nfunc NewServer(ctx core.Context) *Server {\n\tv := &Server{\n\t\tctx: ctx,\n\t\tsigs: make(chan os.Signal, 1),\n\t\tclosed: StateInit,\n\t\tclosing: make(chan bool, 1),\n\t\tquit: make(chan bool, 1),\n\t\thtbt: NewHeartbeat(ctx),\n\t\tlogger: &simpleLogger{ctx: ctx},\n\t}\n\tv.rtmp = agent.NewRtmp(ctx, v)\n\n\tcore.Conf.Subscribe(v)\n\n\treturn v\n}\n\n\/\/ notify server to stop and wait for cleanup.\nfunc (v *Server) Close() {\n\tctx := v.ctx\n\n\t\/\/ wait for stopped.\n\tv.lock.Lock()\n\tdefer v.lock.Unlock()\n\n\t\/\/ only create?\n\tif v.closed == StateInit {\n\t\treturn\n\t}\n\n\t\/\/ closed?\n\tif v.closed == StateClosed {\n\t\tcore.Warn.Println(ctx, \"server already closed.\")\n\t\treturn\n\t}\n\n\t\/\/ notify to close.\n\tif v.closed == StateRunning {\n\t\tcore.Info.Println(ctx, \"notify server to stop.\")\n\t\tselect {\n\t\tcase v.quit <- true:\n\t\tdefault:\n\t\t}\n\t}\n\n\t\/\/ wait for closed.\n\tif v.closed == StateRunning {\n\t\t<-v.closing\n\t}\n\n\t\/\/ do cleanup when stopped.\n\tcore.Conf.Unsubscribe(v)\n\n\t\/\/ close the rtmp agent.\n\tif err := v.rtmp.Close(); err != nil {\n\t\tcore.Warn.Println(ctx, \"close rtmp agent failed. err is\", err)\n\t}\n\n\t\/\/ close the agent manager.\n\tagent.Manager.Close()\n\n\t\/\/ when cpu profile is enabled, close it.\n\tif core.Conf.Go.CpuProfile != \"\" {\n\t\tpprof.StopCPUProfile()\n\t\tcore.Trace.Println(ctx, \"cpu profile ok, file is\", core.Conf.Go.CpuProfile)\n\t}\n\n\t\/\/ when memory profile enabled, write heap info.\n\tif core.Conf.Go.MemProfile != \"\" {\n\t\tif f, err := os.Create(core.Conf.Go.MemProfile); err != nil {\n\t\t\tcore.Warn.Println(ctx, \"ignore open memory profile failed. err is\", err)\n\t\t} else {\n\t\t\tdefer f.Close()\n\t\t\tif err = pprof.Lookup(\"heap\").WriteTo(f, 0); err != nil {\n\t\t\t\tcore.Warn.Println(ctx, \"write memory profile failed. err is\", err)\n\t\t\t}\n\t\t}\n\t\tcore.Trace.Println(ctx, \"mem profile ok, file is\", core.Conf.Go.MemProfile)\n\t}\n\n\t\/\/ ok, closed.\n\tv.closed = StateClosed\n\tcore.Trace.Println(ctx, \"server closed\")\n}\n\nfunc (v *Server) ParseConfig(conf string) (err error) {\n\tctx := v.ctx\n\n\tv.lock.Lock()\n\tdefer v.lock.Unlock()\n\n\tif v.closed != StateInit {\n\t\tpanic(\"server invalid state.\")\n\t}\n\n\tcore.Trace.Println(ctx, \"start to parse config file\", conf)\n\tif err = core.Conf.Loads(conf); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (v *Server) PrepareLogger() (err error) {\n\tv.lock.Lock()\n\tdefer v.lock.Unlock()\n\n\tif v.closed != StateInit {\n\t\tpanic(\"server invalid state.\")\n\t}\n\n\tif err = v.applyLogger(core.Conf); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (v *Server) initializeRuntime() (err error) {\n\tctx := v.ctx\n\n\t\/\/ install signals.\n\t\/\/ TODO: FIXME: when process the current signal, others may drop.\n\tsignal.Notify(v.sigs)\n\n\t\/\/ apply the cpu profile.\n\tif err = v.applyCpuProfile(core.Conf); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ apply the gc percent.\n\tif err = v.applyGcPercent(core.Conf); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ show gc trace.\n\tgo func() {\n\t\tstat := &debug.GCStats{}\n\n\t\tfor {\n\t\t\tif core.Conf.Go.GcTrace > 0 {\n\t\t\t\tpgc := stat.NumGC\n\t\t\t\tdebug.ReadGCStats(stat)\n\t\t\t\tif len(stat.Pause) > 3 {\n\t\t\t\t\tstat.Pause = append([]time.Duration{}, stat.Pause[:3]...)\n\t\t\t\t}\n\t\t\t\tif pgc < stat.NumGC {\n\t\t\t\t\tcore.Trace.Println(ctx, \"gc\", stat.NumGC, stat.PauseTotal, stat.Pause, stat.PauseQuantiles)\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Duration(core.Conf.Go.GcTrace) * time.Second)\n\t\t\t} else {\n\t\t\t\ttime.Sleep(3 * time.Second)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn\n}\n\nfunc (v *Server) Initialize() (err error) {\n\tctx := v.ctx\n\n\tv.lock.Lock()\n\tdefer v.lock.Unlock()\n\n\tif v.closed != StateInit {\n\t\tpanic(\"server invalid state.\")\n\t}\n\n\t\/\/ about the runtime.\n\tif err = v.initializeRuntime(); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ use worker container to fork.\n\tvar wc core.WorkerContainer = v\n\n\t\/\/ heartbeat with http api.\n\tif err = v.htbt.Initialize(wc); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ reload goroutine\n\twc.GFork(\"reload\", core.Conf.ReloadCycle)\n\t\/\/ heartbeat goroutine\n\twc.GFork(\"htbt(discovery)\", v.htbt.discoveryCycle)\n\twc.GFork(\"htbt(main)\", v.htbt.beatCycle)\n\t\/\/ open rtmp agent.\n\tif err = v.rtmp.Open(); err != nil {\n\t\tcore.Error.Println(ctx, \"open rtmp agent failed. err is\", err)\n\t\treturn\n\t}\n\n\tc := core.Conf\n\tl := fmt.Sprintf(\"%v(%v\/%v)\", c.Log.Tank, c.Log.Level, c.Log.File)\n\tif !c.LogToFile() {\n\t\tl = fmt.Sprintf(\"%v(%v)\", c.Log.Tank, c.Log.Level)\n\t}\n\tcore.Trace.Println(ctx, fmt.Sprintf(\"init server ok, conf=%v, log=%v, workers=%v\/%v, gc=%v\/%v%%, daemon=%v\",\n\t\tc.Conf(), l, c.Workers, runtime.NumCPU(), c.Go.GcInterval, c.Go.GcPercent, c.Daemon))\n\n\t\/\/ set to ready, requires cleanup.\n\tv.closed = StateReady\n\n\treturn\n}\n\nfunc (v *Server) onSignal(signal syscall.Signal) {\n\tctx := v.ctx\n\twc := v\n\n\tcore.Trace.Println(ctx, \"got signal\", signal)\n\tswitch signal {\n\tcase SIGUSR1, SIGUSR2:\n\t\tpanic(\"panic by SIGUSR1\/2\")\n\tcase os.Interrupt, syscall.SIGTERM:\n\t\t\/\/ SIGINT, SIGTERM\n\t\twc.Quit()\n\t}\n}\n\nfunc (v *Server) Run() (err error) {\n\tctx := v.ctx\n\n\tfunc() {\n\t\tv.lock.Lock()\n\t\tdefer v.lock.Unlock()\n\n\t\tif v.closed != StateReady {\n\t\t\tpanic(\"server invalid state.\")\n\t\t}\n\t\tv.closed = StateRunning\n\t}()\n\n\t\/\/ when terminated, notify the chan.\n\tdefer func() {\n\t\tselect {\n\t\tcase v.closing <- true:\n\t\tdefault:\n\t\t}\n\t}()\n\n\tcore.Info.Println(ctx, \"server cycle running\")\n\n\t\/\/ run server, apply settings.\n\tv.applyMultipleProcesses(core.Conf.Workers)\n\n\tvar wc core.WorkerContainer = v\n\tfor {\n\t\tvar gcc <-chan time.Time = nil\n\t\tif core.Conf.Go.GcInterval > 0 {\n\t\t\tgcc = time.After(time.Second * time.Duration(core.Conf.Go.GcInterval))\n\t\t}\n\n\t\tselect {\n\t\tcase signal := <-v.sigs:\n\t\t\tv.onSignal(signal)\n\t\tcase <-wc.QC():\n\t\t\twc.Quit()\n\n\t\t\t\/\/ for the following quit will block all signal process,\n\t\t\t\/\/ we start new goroutine to process the panic signal only.\n\t\t\tgo func() {\n\t\t\t\tfor s := range v.sigs {\n\t\t\t\t\tv.onSignal(s)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t\/\/ wait for all goroutines quit.\n\t\t\tv.wg.Wait()\n\t\t\tcore.Warn.Println(ctx, \"server cycle ok\")\n\t\t\treturn\n\t\tcase <-gcc:\n\t\t\truntime.GC()\n\t\t\tcore.Info.Println(ctx, \"go runtime gc every\", core.Conf.Go.GcInterval, \"seconds\")\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ interface WorkContainer\nfunc (v *Server) QC() <-chan bool {\n\treturn v.quit\n}\n\nfunc (v *Server) Quit() error {\n\tselect {\n\tcase v.quit <- true:\n\tdefault:\n\t}\n\n\treturn core.QuitError\n}\n\nfunc (v *Server) GFork(name string, f func(core.WorkerContainer)) {\n\tctx := v.ctx\n\n\tv.wg.Add(1)\n\tgo func() {\n\t\tdefer v.wg.Done()\n\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\tif !core.IsNormalQuit(r) {\n\t\t\t\t\tcore.Warn.Println(ctx, \"rtmp ignore\", r)\n\t\t\t\t}\n\n\t\t\t\tcore.Error.Println(ctx, string(debug.Stack()))\n\n\t\t\t\tv.Quit()\n\t\t\t}\n\t\t}()\n\n\t\tf(v)\n\n\t\tif name != \"\" {\n\t\t\tcore.Trace.Println(ctx, name, \"worker terminated.\")\n\t\t}\n\t}()\n}\n\nfunc (v *Server) applyMultipleProcesses(workers int) {\n\tctx := v.ctx\n\n\tif workers < 0 {\n\t\tpanic(\"should not be negative workers\")\n\t}\n\n\tif workers == 0 {\n\t\tworkers = runtime.NumCPU()\n\t}\n\tpv := runtime.GOMAXPROCS(workers)\n\n\tcore.Trace.Println(ctx, \"apply workers\", workers, \"and previous is\", pv)\n}\n\nfunc (v *Server) applyLogger(c *core.Config) (err error) {\n\tctx := v.ctx\n\n\tif err = v.logger.close(c); err != nil {\n\t\treturn\n\t}\n\tcore.Info.Println(ctx, \"close logger ok\")\n\n\tif err = v.logger.open(c); err != nil {\n\t\treturn\n\t}\n\tcore.Info.Println(ctx, \"open logger ok\")\n\n\treturn\n}\n\nfunc (v *Server) applyCpuProfile(c *core.Config) (err error) {\n\tctx := v.ctx\n\n\tpprof.StopCPUProfile()\n\n\tif c.Go.CpuProfile == \"\" {\n\t\treturn\n\t}\n\n\tvar f *os.File\n\tif f, err = os.Create(c.Go.CpuProfile); err != nil {\n\t\tcore.Error.Println(ctx, \"open cpu profile file failed. err is\", err)\n\t\treturn\n\t}\n\tif err = pprof.StartCPUProfile(f); err != nil {\n\t\tcore.Error.Println(ctx, \"start cpu profile failed. err is\", err)\n\t\treturn\n\t}\n\treturn\n}\n\nfunc (v *Server) applyGcPercent(c *core.Config) (err error) {\n\tctx := v.ctx\n\n\tif c.Go.GcPercent == 0 {\n\t\tdebug.SetGCPercent(100)\n\t\treturn\n\t}\n\n\tpv := debug.SetGCPercent(c.Go.GcPercent)\n\tcore.Trace.Println(ctx, \"set gc percent from\", pv, \"to\", c.Go.GcPercent)\n\treturn\n}\n\n\/\/ interface ReloadHandler\nfunc (v *Server) OnReloadGlobal(scope int, cc, pc *core.Config) (err error) {\n\tif scope == core.ReloadWorkers {\n\t\tv.applyMultipleProcesses(cc.Workers)\n\t} else if scope == core.ReloadLog {\n\t\tv.applyLogger(cc)\n\t} else if scope == core.ReloadCpuProfile {\n\t\tv.applyCpuProfile(cc)\n\t} else if scope == core.ReloadGcPercent {\n\t\tv.applyGcPercent(cc)\n\t}\n\n\treturn\n}\n\nfunc (v *Server) OnReloadVhost(vhost string, scope int, cc, pc *core.Config) (err error) {\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package executer\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"github.com\/subutai-io\/base\/agent\/agent\/container\"\n\t\"github.com\/subutai-io\/base\/agent\/config\"\n\t\"github.com\/subutai-io\/base\/agent\/log\"\n\t\"gopkg.in\/lxc\/go-lxc.v2\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/user\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\t\"unsafe\"\n)\n\ntype EncRequest struct {\n\tHostId string `json:\"hostid\"`\n\tRequest string `json:\"request\"`\n}\ntype Request struct {\n\tId string `json:\"id\"`\n\tRequest RequestOptions `json:\"request\"`\n}\n\ntype RequestOptions struct {\n\tType string `json:\"type\"`\n\tId string `json:\"id\"`\n\tCommandId string `json:\"commandId\"`\n\tWorkingDir string `json:\"workingDirectory\"`\n\tCommand string `json:\"command\"`\n\tArgs []string `json:\"args\"`\n\tEnvironment map[string]string `json:\"environment\"`\n\tStdOut string `json:\"stdOut\"`\n\tStdErr string `json:\"stdErr\"`\n\tRunAs string `json:\"runAs\"`\n\tTimeout int `json:\"timeout\"`\n\tIsDaemon int `json:\"isDaemon\"`\n}\n\ntype Response struct {\n\tResponseOpts ResponseOptions `json:\"response\"`\n\tId string `json:\"id\"`\n}\n\ntype ResponseOptions struct {\n\tType string `json:\"type\"`\n\tId string `json:\"id\"`\n\tCommandId string `json:\"commandId\"`\n\tPid int `json:\"pid\"`\n\tResponseNumber int `json:\"responseNumber,omitempty\"`\n\tStdOut string `json:\"stdOut,omitempty\"`\n\tStdErr string `json:\"stdErr,omitempty\"`\n\tExitCode string `json:\"exitCode,omitempty\"`\n}\n\nfunc ExecHost(req RequestOptions, out_c chan<- ResponseOptions) {\n\tcmd := buildCmd(&req)\n\tif cmd == nil {\n\t\tclose(out_c)\n\t\treturn\n\t}\n\t\/\/read\/write pipes stdout\n\trop, wop, _ := os.Pipe()\n\t\/\/read\/out pipes stderr\n\trep, wep, _ := os.Pipe()\n\n\tdefer rop.Close()\n\tdefer rep.Close()\n\n\tcmd.Stdout = wop\n\tcmd.Stderr = wep\n\tif req.IsDaemon == 1 {\n\t\tcmd.SysProcAttr = &syscall.SysProcAttr{}\n\t\tcmd.SysProcAttr.Setpgid = true\n\t\tcmd.SysProcAttr.Credential = &syscall.Credential{Uid: 0, Gid: 0}\n\t}\n\terr := cmd.Start()\n\tlog.Check(log.WarnLevel, \"Executing command: \"+req.CommandId+\" \"+req.Command+\" \"+strings.Join(req.Args, \" \"), err)\n\n\twop.Close()\n\twep.Close()\n\n\tstart := time.Now().Unix()\n\tend := time.Now().Add(time.Duration(req.Timeout) * time.Second).Unix()\n\tvar response = genericResponse(&req)\n\tresponse.ResponseNumber = 1\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tr := bufio.NewReader(rop)\n\t\tfor line, isPrefix, err := r.ReadLine(); err == nil; line, isPrefix, err = r.ReadLine() {\n\t\t\tresponse.StdOut = response.StdOut + string(line)\n\t\t\tif !isPrefix {\n\t\t\t\tresponse.StdOut = response.StdOut + \"\\n\"\n\t\t\t}\n\t\t\tif len(response.StdOut) > 50000 {\n\t\t\t\tout_c <- response\n\t\t\t\tstart = now()\n\t\t\t\tresponse.StdErr, response.StdOut = \"\", \"\"\n\t\t\t\tresponse.ResponseNumber++\n\t\t\t}\n\t\t\tif end-now() < 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tscanErr := bufio.NewScanner(rep)\n\t\tfor scanErr.Scan() {\n\t\t\tresponse.StdErr = response.StdErr + scanErr.Text() + \"\\n\"\n\t\t\tif len(response.StdErr) > 50000 {\n\t\t\t\tout_c <- response\n\t\t\t\tstart = now()\n\t\t\t\tresponse.StdErr, response.StdOut = \"\", \"\"\n\t\t\t\tresponse.ResponseNumber++\n\t\t\t}\n\t\t\tif end-now() < 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor end-now() > 0 {\n\t\t\tif now()-start > 10 {\n\t\t\t\tout_c <- response\n\t\t\t\tstart = now()\n\t\t\t\tresponse.StdErr, response.StdOut = \"\", \"\"\n\t\t\t\tresponse.ResponseNumber++\n\t\t\t}\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}()\n\n\tdone := make(chan error)\n\tgo func() { done <- cmd.Wait() }()\n\tselect {\n\tcase <-done:\n\t\tresponse.ExitCode = \"0\"\n\t\tif req.IsDaemon != 1 {\n\t\t\tresponse.ExitCode = strconv.Itoa(cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus())\n\t\t}\n\t\tout_c <- response\n\t\tend = -1\n\tcase <-time.After(time.Duration(req.Timeout) * time.Second):\n\t\tif req.IsDaemon == 1 {\n\t\t\tresponse.ExitCode = \"0\"\n\t\t\tout_c <- response\n\t\t\t<-done\n\t\t} else {\n\t\t\tcmd.Process.Kill()\n\t\t\tresponse.Type = config.Broker.ExecuteTimeout\n\t\t\tcmd.Process.Wait()\n\t\t\tif cmd.ProcessState != nil {\n\t\t\t\tresponse.ExitCode = strconv.Itoa(cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus())\n\t\t\t} else {\n\t\t\t\tresponse.ExitCode = \"-1\"\n\t\t\t}\n\t\t\tout_c <- response\n\t\t}\n\t}\n\twg.Wait()\n\tclose(out_c)\n}\n\nfunc buildCmd(r *RequestOptions) *exec.Cmd {\n\tuser, err := user.Lookup(r.RunAs)\n\tif log.Check(log.WarnLevel, \"User lookup: \"+r.RunAs, err) {\n\t\treturn nil\n\t}\n\tuid, err := strconv.Atoi(user.Uid)\n\tif log.Check(log.WarnLevel, \"UID lookup: \"+user.Uid, err) {\n\t\treturn nil\n\t}\n\tgid, err := strconv.Atoi(user.Gid)\n\tif log.Check(log.WarnLevel, \"GID lookup: \"+user.Gid, err) {\n\t\treturn nil\n\t}\n\tgid32 := *(*uint32)(unsafe.Pointer(&gid))\n\tuid32 := *(*uint32)(unsafe.Pointer(&uid))\n\n\tvar buff bytes.Buffer\n\tbuff.WriteString(r.Command + \" \")\n\tfor _, arg := range r.Args {\n\t\tbuff.WriteString(arg + \" \")\n\t}\n\tcmd := exec.Command(\"\/bin\/bash\", \"-c\", buff.String())\n\tcmd.Dir = r.WorkingDir\n\tcmd.SysProcAttr = &syscall.SysProcAttr{}\n\tcmd.SysProcAttr.Credential = &syscall.Credential{Uid: uid32, Gid: gid32}\n\n\treturn cmd\n}\n\n\/\/prepare basic response\nfunc genericResponse(req *RequestOptions) ResponseOptions {\n\treturn ResponseOptions{\n\t\tType: config.Broker.ExecuteResponce,\n\t\tCommandId: req.CommandId,\n\t\tId: req.Id,\n\t}\n}\n\nfunc now() int64 {\n\treturn time.Now().Unix()\n}\n\nfunc AttachContainer(name string, r RequestOptions, out_c chan<- ResponseOptions) {\n\tlxc_c, _ := lxc.NewContainer(name, config.Agent.LxcPrefix)\n\topts := lxc.DefaultAttachOptions\n\n\tvar res ResponseOptions = genericResponse(&r)\n\n\to_read, o_write, err := os.Pipe()\n\tlog.Check(log.WarnLevel, \"Creating pipe for container attach\", err)\n\te_read, e_write, err := os.Pipe()\n\tlog.Check(log.WarnLevel, \"Creating pipe for container attach\", err)\n\n\tvar chunk bytes.Buffer\n\tdefer o_read.Close()\n\tdefer e_read.Close()\n\n\topts.StdoutFd = o_write.Fd()\n\topts.StderrFd = e_write.Fd()\n\n\topts.UID, opts.GID = container.GetCredentials(r.RunAs, name)\n\n\topts.Cwd = r.WorkingDir\n\n\tvar exitCode int\n\tvar cmd bytes.Buffer\n\n\tcmd.WriteString(r.Command)\n\tfor _, a := range r.Args {\n\t\tcmd.WriteString(a + \" \")\n\t}\n\n\tlog.Debug(\"Executing command in container \" + name + \":\" + cmd.String())\n\tgo func() {\n\t\tif lxc_c.Running() {\n\t\t\tlog.Debug(\"Container \" + name + \" is running\")\n\t\t}\n\n\t\texitCode, err = lxc_c.RunCommandStatus([]string{\"timeout\", strconv.Itoa(r.Timeout), \"\/bin\/bash\", \"-c\", cmd.String()}, opts)\n\t\tlog.Check(log.WarnLevel, \"Execution command\", err)\n\n\t\to_write.Close()\n\t\te_write.Close()\n\t}()\n\n\tout := bufio.NewScanner(o_read)\n\tvar e bytes.Buffer\n\tstart_time := time.Now().Unix()\n\n\t\/\/response counter\n\tvar resN int = 1\n\n\tfor out.Scan() {\n\t\t\/\/we have more stdout coming in\n\t\t\/\/collect 1000 bytes and send the chunk\n\t\tchunk.WriteString(out.Text() + \"\\n\")\n\t\t\/\/send chunk every 1000 bytes or 10 seconds\n\t\tif chunk.Len() >= 1000 || now()-start_time >= 10 {\n\t\t\tres.StdOut = chunk.String()\n\t\t\tres.ResponseNumber = resN\n\t\t\tlog.Info(\"Command intermediate result \" + chunk.String())\n\t\t\tout_c <- res\n\t\t\tchunk.Truncate(0)\n\t\t\tresN++\n\t\t\tstart_time = now()\n\t\t}\n\t}\n\n\tlog.Info(\"Command intermediate result \" + chunk.String())\n\tio.Copy(&e, e_read)\n\tif exitCode == 0 {\n\t\tres.Type = config.Broker.ExecuteResponce\n\t\tres.ExitCode = strconv.Itoa(exitCode)\n\t\tif chunk.Len() > 0 {\n\t\t\tif resN > 1 {\n\t\t\t\tres.ResponseNumber = resN\n\t\t\t}\n\t\t\tlog.Info(\"Command intermediate result \" + chunk.String())\n\t\t\tres.StdOut = chunk.String()\n\t\t}\n\t\tres.StdErr = e.String()\n\t\tout_c <- res\n\t} else {\n\t\tlog.Debug(\"Exited with exit code\", strconv.Itoa(exitCode))\n\t\tif exitCode\/256 == 124 {\n\t\t\tres.Type = config.Broker.ExecuteTimeout\n\t\t}\n\t\tres.ExitCode = strconv.Itoa(exitCode \/ 256)\n\t\tres.StdOut = chunk.String()\n\t\tres.StdErr = e.String()\n\t\tout_c <- res\n\t}\n\tlxc.Release(lxc_c)\n\tclose(out_c)\n}\n<commit_msg>Fixed empty responseNumber #458<commit_after>package executer\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"github.com\/subutai-io\/base\/agent\/agent\/container\"\n\t\"github.com\/subutai-io\/base\/agent\/config\"\n\t\"github.com\/subutai-io\/base\/agent\/log\"\n\t\"gopkg.in\/lxc\/go-lxc.v2\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/user\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\t\"unsafe\"\n)\n\ntype EncRequest struct {\n\tHostId string `json:\"hostid\"`\n\tRequest string `json:\"request\"`\n}\ntype Request struct {\n\tId string `json:\"id\"`\n\tRequest RequestOptions `json:\"request\"`\n}\n\ntype RequestOptions struct {\n\tType string `json:\"type\"`\n\tId string `json:\"id\"`\n\tCommandId string `json:\"commandId\"`\n\tWorkingDir string `json:\"workingDirectory\"`\n\tCommand string `json:\"command\"`\n\tArgs []string `json:\"args\"`\n\tEnvironment map[string]string `json:\"environment\"`\n\tStdOut string `json:\"stdOut\"`\n\tStdErr string `json:\"stdErr\"`\n\tRunAs string `json:\"runAs\"`\n\tTimeout int `json:\"timeout\"`\n\tIsDaemon int `json:\"isDaemon\"`\n}\n\ntype Response struct {\n\tResponseOpts ResponseOptions `json:\"response\"`\n\tId string `json:\"id\"`\n}\n\ntype ResponseOptions struct {\n\tType string `json:\"type\"`\n\tId string `json:\"id\"`\n\tCommandId string `json:\"commandId\"`\n\tPid int `json:\"pid\"`\n\tResponseNumber int `json:\"responseNumber,omitempty\"`\n\tStdOut string `json:\"stdOut,omitempty\"`\n\tStdErr string `json:\"stdErr,omitempty\"`\n\tExitCode string `json:\"exitCode,omitempty\"`\n}\n\nfunc ExecHost(req RequestOptions, out_c chan<- ResponseOptions) {\n\tcmd := buildCmd(&req)\n\tif cmd == nil {\n\t\tclose(out_c)\n\t\treturn\n\t}\n\t\/\/read\/write pipes stdout\n\trop, wop, _ := os.Pipe()\n\t\/\/read\/out pipes stderr\n\trep, wep, _ := os.Pipe()\n\n\tdefer rop.Close()\n\tdefer rep.Close()\n\n\tcmd.Stdout = wop\n\tcmd.Stderr = wep\n\tif req.IsDaemon == 1 {\n\t\tcmd.SysProcAttr = &syscall.SysProcAttr{}\n\t\tcmd.SysProcAttr.Setpgid = true\n\t\tcmd.SysProcAttr.Credential = &syscall.Credential{Uid: 0, Gid: 0}\n\t}\n\terr := cmd.Start()\n\tlog.Check(log.WarnLevel, \"Executing command: \"+req.CommandId+\" \"+req.Command+\" \"+strings.Join(req.Args, \" \"), err)\n\n\twop.Close()\n\twep.Close()\n\n\tstart := time.Now().Unix()\n\tend := time.Now().Add(time.Duration(req.Timeout) * time.Second).Unix()\n\tvar response = genericResponse(&req)\n\tresponse.ResponseNumber = 1\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tr := bufio.NewReader(rop)\n\t\tfor line, isPrefix, err := r.ReadLine(); err == nil; line, isPrefix, err = r.ReadLine() {\n\t\t\tresponse.StdOut = response.StdOut + string(line)\n\t\t\tif !isPrefix {\n\t\t\t\tresponse.StdOut = response.StdOut + \"\\n\"\n\t\t\t}\n\t\t\tif len(response.StdOut) > 50000 {\n\t\t\t\tout_c <- response\n\t\t\t\tstart = now()\n\t\t\t\tresponse.StdErr, response.StdOut = \"\", \"\"\n\t\t\t\tresponse.ResponseNumber++\n\t\t\t}\n\t\t\tif end-now() < 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tscanErr := bufio.NewScanner(rep)\n\t\tfor scanErr.Scan() {\n\t\t\tresponse.StdErr = response.StdErr + scanErr.Text() + \"\\n\"\n\t\t\tif len(response.StdErr) > 50000 {\n\t\t\t\tout_c <- response\n\t\t\t\tstart = now()\n\t\t\t\tresponse.StdErr, response.StdOut = \"\", \"\"\n\t\t\t\tresponse.ResponseNumber++\n\t\t\t}\n\t\t\tif end-now() < 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor end-now() > 0 {\n\t\t\tif now()-start > 10 {\n\t\t\t\tout_c <- response\n\t\t\t\tstart = now()\n\t\t\t\tresponse.StdErr, response.StdOut = \"\", \"\"\n\t\t\t\tresponse.ResponseNumber++\n\t\t\t}\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}()\n\n\tdone := make(chan error)\n\tgo func() { done <- cmd.Wait() }()\n\tselect {\n\tcase <-done:\n\t\tresponse.ExitCode = \"0\"\n\t\tif req.IsDaemon != 1 {\n\t\t\tresponse.ExitCode = strconv.Itoa(cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus())\n\t\t}\n\t\tout_c <- response\n\t\tend = -1\n\tcase <-time.After(time.Duration(req.Timeout) * time.Second):\n\t\tif req.IsDaemon == 1 {\n\t\t\tresponse.ExitCode = \"0\"\n\t\t\tout_c <- response\n\t\t\t<-done\n\t\t} else {\n\t\t\tcmd.Process.Kill()\n\t\t\tresponse.Type = config.Broker.ExecuteTimeout\n\t\t\tcmd.Process.Wait()\n\t\t\tif cmd.ProcessState != nil {\n\t\t\t\tresponse.ExitCode = strconv.Itoa(cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus())\n\t\t\t} else {\n\t\t\t\tresponse.ExitCode = \"-1\"\n\t\t\t}\n\t\t\tout_c <- response\n\t\t}\n\t}\n\twg.Wait()\n\tclose(out_c)\n}\n\nfunc buildCmd(r *RequestOptions) *exec.Cmd {\n\tuser, err := user.Lookup(r.RunAs)\n\tif log.Check(log.WarnLevel, \"User lookup: \"+r.RunAs, err) {\n\t\treturn nil\n\t}\n\tuid, err := strconv.Atoi(user.Uid)\n\tif log.Check(log.WarnLevel, \"UID lookup: \"+user.Uid, err) {\n\t\treturn nil\n\t}\n\tgid, err := strconv.Atoi(user.Gid)\n\tif log.Check(log.WarnLevel, \"GID lookup: \"+user.Gid, err) {\n\t\treturn nil\n\t}\n\tgid32 := *(*uint32)(unsafe.Pointer(&gid))\n\tuid32 := *(*uint32)(unsafe.Pointer(&uid))\n\n\tvar buff bytes.Buffer\n\tbuff.WriteString(r.Command + \" \")\n\tfor _, arg := range r.Args {\n\t\tbuff.WriteString(arg + \" \")\n\t}\n\tcmd := exec.Command(\"\/bin\/bash\", \"-c\", buff.String())\n\tcmd.Dir = r.WorkingDir\n\tcmd.SysProcAttr = &syscall.SysProcAttr{}\n\tcmd.SysProcAttr.Credential = &syscall.Credential{Uid: uid32, Gid: gid32}\n\n\treturn cmd\n}\n\n\/\/prepare basic response\nfunc genericResponse(req *RequestOptions) ResponseOptions {\n\treturn ResponseOptions{\n\t\tType: config.Broker.ExecuteResponce,\n\t\tCommandId: req.CommandId,\n\t\tId: req.Id,\n\t}\n}\n\nfunc now() int64 {\n\treturn time.Now().Unix()\n}\n\nfunc AttachContainer(name string, r RequestOptions, out_c chan<- ResponseOptions) {\n\tlxc_c, _ := lxc.NewContainer(name, config.Agent.LxcPrefix)\n\topts := lxc.DefaultAttachOptions\n\n\tvar res ResponseOptions = genericResponse(&r)\n\n\to_read, o_write, _ := os.Pipe()\n\te_read, e_write, _ := os.Pipe()\n\n\tvar chunk bytes.Buffer\n\tdefer o_read.Close()\n\tdefer e_read.Close()\n\n\topts.StdoutFd = o_write.Fd()\n\topts.StderrFd = e_write.Fd()\n\n\topts.UID, opts.GID = container.GetCredentials(r.RunAs, name)\n\n\topts.Cwd = r.WorkingDir\n\n\tvar exitCode int\n\tvar cmd bytes.Buffer\n\n\tcmd.WriteString(r.Command)\n\tfor _, a := range r.Args {\n\t\tcmd.WriteString(a + \" \")\n\t}\n\n\tlog.Debug(\"Executing command in container \" + name + \":\" + cmd.String())\n\tgo func() {\n\t\texitCode, _ = lxc_c.RunCommandStatus([]string{\"timeout\", strconv.Itoa(r.Timeout), \"\/bin\/bash\", \"-c\", cmd.String()}, opts)\n\n\t\to_write.Close()\n\t\te_write.Close()\n\t}()\n\n\tout := bufio.NewScanner(o_read)\n\tvar e bytes.Buffer\n\tstart_time := time.Now().Unix()\n\n\tres.ResponseNumber = 1\n\tfor out.Scan() {\n\t\t\/\/we have more stdout coming in\n\t\t\/\/collect 1000 bytes and send the chunk\n\t\tchunk.WriteString(out.Text() + \"\\n\")\n\t\t\/\/send chunk every 1000 bytes or 10 seconds\n\t\tif chunk.Len() >= 1000 || now()-start_time >= 10 {\n\t\t\tres.StdOut = chunk.String()\n\t\t\tout_c <- res\n\t\t\tchunk.Truncate(0)\n\t\t\tres.ResponseNumber++\n\t\t\tstart_time = now()\n\t\t}\n\t}\n\n\tio.Copy(&e, e_read)\n\tif exitCode == 0 {\n\t\tres.Type = config.Broker.ExecuteResponce\n\t\tres.ExitCode = strconv.Itoa(exitCode)\n\t\tif chunk.Len() > 0 {\n\t\t\tres.StdOut = chunk.String()\n\t\t}\n\t\tres.StdErr = e.String()\n\t\tout_c <- res\n\t} else {\n\t\tif exitCode\/256 == 124 {\n\t\t\tres.Type = config.Broker.ExecuteTimeout\n\t\t}\n\t\tres.ExitCode = strconv.Itoa(exitCode \/ 256)\n\t\tres.StdOut = chunk.String()\n\t\tres.StdErr = e.String()\n\t\tout_c <- res\n\t}\n\tlxc.Release(lxc_c)\n\tclose(out_c)\n}\n<|endoftext|>"} {"text":"<commit_before>package containerd\n\nimport (\n\tgocontext \"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/containerd\/containerd\"\n\t\"github.com\/containerd\/containerd\/cio\"\n\t\"github.com\/containerd\/containerd\/containers\"\n\t\"github.com\/containerd\/containerd\/errdefs\"\n\t\"github.com\/containerd\/containerd\/oci\"\n\t\"github.com\/docker\/docker\/pkg\/signal\"\n\t\"github.com\/docker\/swarmkit\/agent\/exec\"\n\t\"github.com\/docker\/swarmkit\/api\"\n\t\"github.com\/docker\/swarmkit\/api\/naming\"\n\t\"github.com\/docker\/swarmkit\/log\"\n\tgogotypes \"github.com\/gogo\/protobuf\/types\"\n\t\"github.com\/opencontainers\/runtime-spec\/specs-go\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar (\n\tdevNull *os.File\n\terrAdapterNotPrepared = errors.New(\"container adapter not prepared\")\n\tmountPropagationReverseMap = map[api.Mount_BindOptions_MountPropagation]string{\n\t\tapi.MountPropagationPrivate: \"private\",\n\t\tapi.MountPropagationRPrivate: \"rprivate\",\n\t\tapi.MountPropagationShared: \"shared\",\n\t\tapi.MountPropagationRShared: \"rshared\",\n\t\tapi.MountPropagationRSlave: \"slave\",\n\t\tapi.MountPropagationSlave: \"rslave\",\n\t}\n)\n\n\/\/ containerAdapter conducts remote operations for a container. All calls\n\/\/ are mostly naked calls to the client API, seeded with information from\n\/\/ containerConfig.\ntype containerAdapter struct {\n\tclient *containerd.Client\n\tspec *api.ContainerSpec\n\tsecrets exec.SecretGetter\n\tname string\n\timage containerd.Image \/\/ Pulled image\n\tcontainer containerd.Container\n\ttask containerd.Task\n\texitStatus error\n}\n\nfunc newContainerAdapter(client *containerd.Client, task *api.Task, secrets exec.SecretGetter) (*containerAdapter, error) {\n\tspec := task.Spec.GetContainer()\n\tif spec == nil {\n\t\treturn nil, exec.ErrRuntimeUnsupported\n\t}\n\n\tc := &containerAdapter{\n\t\tclient: client,\n\t\tspec: spec,\n\t\tsecrets: secrets,\n\t\tname: naming.Task(task),\n\t}\n\n\tif err := c.reattach(context.Background()); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}\n\n\/\/ reattaches to an existing container. If the container is found but\n\/\/ the task is missing then still succeeds, allowing subsequent use of\n\/\/ c.delete()\nfunc (c *containerAdapter) reattach(ctx context.Context) error {\n\tcontainer, err := c.client.LoadContainer(ctx, c.name)\n\tif err != nil {\n\t\tif errdefs.IsNotFound(err) {\n\t\t\tc.log(ctx).Debug(\"reattach: container not found\")\n\t\t\treturn nil\n\t\t}\n\n\t\treturn errors.Wrap(err, \"reattach: loading container\")\n\t}\n\tc.log(ctx).Debug(\"reattach: loaded container\")\n\tc.container = container\n\n\t\/\/ TODO(ijc) Consider an addition to container library which\n\t\/\/ directly attaches stdin to \/dev\/null.\n\tif devNull == nil {\n\t\tif devNull, err = os.Open(os.DevNull); err != nil {\n\t\t\treturn errors.Wrap(err, \"reattach: opening null device\")\n\t\t}\n\t}\n\n\ttask, err := container.Task(ctx, cio.WithAttach(devNull, os.Stdout, os.Stderr))\n\tif err != nil {\n\t\tif errdefs.IsNotFound(err) {\n\t\t\tc.log(ctx).WithError(err).Info(\"reattach: no running task\")\n\t\t\treturn nil\n\t\t}\n\t\treturn errors.Wrap(err, \"reattach: reattaching task\")\n\t}\n\tc.task = task\n\tc.log(ctx).Debug(\"reattach: successful\")\n\treturn nil\n}\n\nfunc (c *containerAdapter) log(ctx context.Context) *logrus.Entry {\n\treturn log.G(ctx).WithFields(logrus.Fields{\n\t\t\"container.id\": c.name,\n\t})\n}\n\nfunc (c *containerAdapter) pullImage(ctx context.Context) error {\n\timage, err := c.client.Pull(ctx, c.spec.Image, containerd.WithPullUnpack)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"pulling container image\")\n\t}\n\tc.image = image\n\n\treturn nil\n}\n\nfunc withMounts(ctx context.Context, ms []api.Mount) oci.SpecOpts {\n\tsort.Sort(mounts(ms))\n\n\treturn func(_ gocontext.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {\n\t\tfor _, m := range ms {\n\t\t\tif !filepath.IsAbs(m.Target) {\n\t\t\t\treturn errors.Errorf(\"mount %s is not absolute\", m.Target)\n\t\t\t}\n\n\t\t\tswitch m.Type {\n\t\t\tcase api.MountTypeTmpfs:\n\t\t\t\topts := []string{\"noexec\", \"nosuid\", \"nodev\", \"rprivate\"}\n\t\t\t\tif m.TmpfsOptions != nil {\n\t\t\t\t\tif m.TmpfsOptions.SizeBytes <= 0 {\n\t\t\t\t\t\treturn errors.New(\"invalid tmpfs size give\")\n\t\t\t\t\t}\n\t\t\t\t\topts = append(opts, fmt.Sprintf(\"size=%d\", m.TmpfsOptions.SizeBytes))\n\t\t\t\t\topts = append(opts, fmt.Sprintf(\"mode=%o\", m.TmpfsOptions.Mode))\n\t\t\t\t}\n\t\t\t\tif m.ReadOnly {\n\t\t\t\t\topts = append(opts, \"ro\")\n\t\t\t\t} else {\n\t\t\t\t\topts = append(opts, \"rw\")\n\t\t\t\t}\n\n\t\t\t\ts.Mounts = append(s.Mounts, specs.Mount{\n\t\t\t\t\tDestination: m.Target,\n\t\t\t\t\tType: \"tmpfs\",\n\t\t\t\t\tSource: \"tmpfs\",\n\t\t\t\t\tOptions: opts,\n\t\t\t\t})\n\n\t\t\tcase api.MountTypeVolume:\n\t\t\t\treturn errors.Errorf(\"volume mounts not implemented, ignoring %v\", m)\n\n\t\t\tcase api.MountTypeBind:\n\t\t\t\topts := []string{\"rbind\"}\n\t\t\t\tif m.ReadOnly {\n\t\t\t\t\topts = append(opts, \"ro\")\n\t\t\t\t} else {\n\t\t\t\t\topts = append(opts, \"rw\")\n\t\t\t\t}\n\n\t\t\t\tpropagation := \"rprivate\"\n\t\t\t\tif m.BindOptions != nil {\n\t\t\t\t\tif p, ok := mountPropagationReverseMap[m.BindOptions.Propagation]; ok {\n\t\t\t\t\t\tpropagation = p\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.G(ctx).Warningf(\"unknown bind mount propagation, using %q\", propagation)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\topts = append(opts, propagation)\n\n\t\t\t\ts.Mounts = append(s.Mounts, specs.Mount{\n\t\t\t\t\tDestination: m.Target,\n\t\t\t\t\tType: \"bind\",\n\t\t\t\t\tSource: m.Source,\n\t\t\t\t\tOptions: opts,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc (c *containerAdapter) isPrepared() bool {\n\treturn c.container != nil && c.task != nil\n}\n\nfunc (c *containerAdapter) prepare(ctx context.Context) error {\n\tif c.isPrepared() {\n\t\treturn errors.New(\"adapter already prepared\")\n\t}\n\tif c.image == nil {\n\t\treturn errors.New(\"image has not been pulled\")\n\t}\n\n\tspecOpts := []oci.SpecOpts{\n\t\toci.WithImageConfig(c.image),\n\t\twithMounts(ctx, c.spec.Mounts),\n\t}\n\n\t\/\/ spec.Process.Args is config.Entrypoint + config.Cmd at this\n\t\/\/ point from WithImageConfig above. If the ContainerSpec\n\t\/\/ specifies a Command then we can completely override. If it\n\t\/\/ does not then all we can do is append our Args and hope\n\t\/\/ they do not conflict.\n\t\/\/ TODO(ijc) Improve this\n\tif len(c.spec.Command) > 0 {\n\t\targs := append(c.spec.Command, c.spec.Args...)\n\t\tspecOpts = append(specOpts, oci.WithProcessArgs(args...))\n\t} else {\n\t\tspecOpts = append(specOpts, func(_ gocontext.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {\n\t\t\ts.Process.Args = append(s.Process.Args, c.spec.Args...)\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tspec, err := oci.GenerateSpec(ctx, c.client, &containers.Container{}, specOpts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ TODO(ijc) Consider an addition to container library which\n\t\/\/ directly attaches stdin to \/dev\/null.\n\tif devNull == nil {\n\t\tif devNull, err = os.Open(os.DevNull); err != nil {\n\t\t\treturn errors.Wrap(err, \"opening null device\")\n\t\t}\n\t}\n\n\tc.container, err = c.client.NewContainer(ctx, c.name,\n\t\tcontainerd.WithSpec(spec),\n\t\tcontainerd.WithNewSnapshot(c.name, c.image))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"creating container\")\n\t}\n\n\t\/\/ TODO(ijc) support ControllerLogs interface.\n\tio := cio.NewIOWithTerminal(devNull, os.Stdout, os.Stderr, spec.Process.Terminal)\n\n\tc.task, err = c.container.NewTask(ctx, io)\n\tif err != nil {\n\t\t\/\/ Destroy the container we created above, but\n\t\t\/\/ propagate the original error.\n\t\tif err2 := c.container.Delete(ctx); err2 != nil {\n\t\t\tc.log(ctx).WithError(err2).Error(\"failed to delete container on prepare failure\")\n\t\t}\n\t\tc.container = nil\n\t\treturn errors.Wrap(err, \"creating task\")\n\t}\n\n\treturn nil\n}\n\nfunc (c *containerAdapter) start(ctx context.Context) error {\n\tif !c.isPrepared() {\n\t\treturn errAdapterNotPrepared\n\t}\n\terr := c.task.Start(ctx)\n\treturn errors.Wrap(err, \"starting\")\n}\n\nfunc (c *containerAdapter) wait(ctx context.Context) error {\n\tif !c.isPrepared() {\n\t\treturn errAdapterNotPrepared\n\t}\n\tstatus, err := c.task.Wait(ctx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"waiting\")\n\t}\n\t\/\/ Should update c.exitStatus or not?\n\tec := <-status\n\tif ec.Error() != nil {\n\t\treturn err\n\t}\n\treturn makeExitError(ec.ExitCode(), \"\")\n}\n\ntype status struct {\n\tID string\n\tPid uint32\n\tStatus containerd.Status\n\tExitStatus error\n}\n\nfunc (c *containerAdapter) inspect(ctx context.Context) (status, error) {\n\tif !c.isPrepared() {\n\t\treturn status{}, errAdapterNotPrepared\n\t}\n\n\tts, err := c.task.Status(ctx)\n\tif err != nil {\n\t\treturn status{}, err\n\t}\n\ts := status{\n\t\tID: c.container.ID(),\n\t\tPid: c.task.Pid(),\n\t\tStatus: ts,\n\t\tExitStatus: c.exitStatus,\n\t}\n\treturn s, nil\n}\n\nfunc (c *containerAdapter) shutdown(ctx context.Context) error {\n\tif !c.isPrepared() {\n\t\treturn errAdapterNotPrepared\n\t}\n\n\tvar (\n\t\tsig syscall.Signal\n\t\ttimeout = time.Duration(10 * time.Second)\n\t\terr error\n\t)\n\n\tif c.spec.StopSignal != \"\" {\n\t\tif sig, err = signal.ParseSignal(c.spec.StopSignal); err != nil {\n\t\t\tsig = syscall.SIGTERM\n\t\t\tc.log(ctx).WithError(err).Errorf(\"unknown StopSignal, using %q\", sig)\n\t\t}\n\t} else {\n\t\tsig = syscall.SIGTERM\n\t\tc.log(ctx).Infof(\"no StopSignal given, using %q\", sig)\n\t}\n\n\tif c.spec.StopGracePeriod != nil {\n\t\ttimeout, _ = gogotypes.DurationFromProto(c.spec.StopGracePeriod)\n\t}\n\n\tdeleteErr := make(chan error, 1)\n\tdeleteCtx, deleteCancel := context.WithCancel(ctx)\n\tdefer deleteCancel()\n\n\tgo func(ctx context.Context, ch chan error) {\n\t\tstatus, err := c.task.Delete(ctx)\n\t\tif err != nil {\n\t\t\tc.log(ctx).WithError(err).Debug(\"Task.Delete failed\")\n\t\t\tch <- err\n\t\t}\n\t\tc.log(ctx).Debugf(\"Task.Delete success, status=%d\", status)\n\t\tch <- makeExitError(status.ExitCode(), \"\")\n\t}(deleteCtx, deleteErr)\n\n\tc.log(ctx).Debugf(\"Killing task with %q signal\", sig)\n\tif err := c.task.Kill(ctx, sig); err != nil {\n\t\treturn errors.Wrapf(err, \"killing task with %q\", sig)\n\t}\n\n\tselect {\n\tcase c.exitStatus = <-deleteErr:\n\t\treturn c.exitStatus\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase <-time.After(timeout):\n\t\tc.log(ctx).Infof(\"Task did not exit after %s\", timeout)\n\t\t\/\/ Fall through\n\t}\n\n\tif sig == syscall.SIGKILL {\n\t\t\/\/ We've tried as hard as we can.\n\t\treturn errors.New(\"task is unkillable\")\n\t}\n\n\t\/\/ Bring out the big guns\n\tsig = syscall.SIGKILL\n\tc.log(ctx).Debugf(\"Killing task harder with %q signal\", sig)\n\tif err := c.task.Kill(ctx, sig); err != nil {\n\t\treturn errors.Wrapf(err, \"killing task with %q\", sig)\n\t}\n\n\tselect {\n\tcase c.exitStatus = <-deleteErr:\n\t\treturn c.exitStatus\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n\n}\n\nfunc (c *containerAdapter) terminate(ctx context.Context) error {\n\tif !c.isPrepared() {\n\t\treturn errAdapterNotPrepared\n\t}\n\n\tc.log(ctx).Debug(\"Terminate\")\n\treturn errors.New(\"terminate not implemented\")\n}\n\nfunc (c *containerAdapter) remove(ctx context.Context) error {\n\t\/\/ Unlike most other entry points we don't use c.isPrepared\n\t\/\/ here so that we can clean up a container which was\n\t\/\/ partially reattached (via c.attach).\n\tif c.container == nil {\n\t\treturn errAdapterNotPrepared\n\t}\n\n\tc.log(ctx).Debug(\"Remove\")\n\terr := c.container.Delete(ctx)\n\treturn errors.Wrap(err, \"removing container\")\n}\n\nfunc isContainerCreateNameConflict(err error) bool {\n\t\/\/ container \".*\" already exists\n\tsplits := strings.SplitN(err.Error(), \"\\\"\", 3)\n\treturn splits[0] == \"container \" && splits[2] == \" already exists\"\n}\n\nfunc isUnknownContainer(err error) bool {\n\treturn strings.Contains(err.Error(), \"container does not exist\")\n}\n\n\/\/ For sort.Sort\ntype mounts []api.Mount\n\n\/\/ Len returns the number of mounts. Used in sorting.\nfunc (m mounts) Len() int {\n\treturn len(m)\n}\n\n\/\/ Less returns true if the number of parts (a\/b\/c would be 3 parts) in the\n\/\/ mount indexed by parameter 1 is less than that of the mount indexed by\n\/\/ parameter 2. Used in sorting.\nfunc (m mounts) Less(i, j int) bool {\n\treturn m.parts(i) < m.parts(j)\n}\n\n\/\/ Swap swaps two items in an array of mounts. Used in sorting\nfunc (m mounts) Swap(i, j int) {\n\tm[i], m[j] = m[j], m[i]\n}\n\n\/\/ parts returns the number of parts in the destination of a mount. Used in sorting.\nfunc (m mounts) parts(i int) int {\n\treturn strings.Count(filepath.Clean(m[i].Target), string(os.PathSeparator))\n}\n<commit_msg>Update agent containerd adapter for changes in cio<commit_after>package containerd\n\nimport (\n\tgocontext \"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/containerd\/containerd\"\n\t\"github.com\/containerd\/containerd\/cio\"\n\t\"github.com\/containerd\/containerd\/containers\"\n\t\"github.com\/containerd\/containerd\/errdefs\"\n\t\"github.com\/containerd\/containerd\/oci\"\n\t\"github.com\/docker\/docker\/pkg\/signal\"\n\t\"github.com\/docker\/swarmkit\/agent\/exec\"\n\t\"github.com\/docker\/swarmkit\/api\"\n\t\"github.com\/docker\/swarmkit\/api\/naming\"\n\t\"github.com\/docker\/swarmkit\/log\"\n\tgogotypes \"github.com\/gogo\/protobuf\/types\"\n\t\"github.com\/opencontainers\/runtime-spec\/specs-go\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar (\n\tdevNull *os.File\n\terrAdapterNotPrepared = errors.New(\"container adapter not prepared\")\n\tmountPropagationReverseMap = map[api.Mount_BindOptions_MountPropagation]string{\n\t\tapi.MountPropagationPrivate: \"private\",\n\t\tapi.MountPropagationRPrivate: \"rprivate\",\n\t\tapi.MountPropagationShared: \"shared\",\n\t\tapi.MountPropagationRShared: \"rshared\",\n\t\tapi.MountPropagationRSlave: \"slave\",\n\t\tapi.MountPropagationSlave: \"rslave\",\n\t}\n)\n\n\/\/ containerAdapter conducts remote operations for a container. All calls\n\/\/ are mostly naked calls to the client API, seeded with information from\n\/\/ containerConfig.\ntype containerAdapter struct {\n\tclient *containerd.Client\n\tspec *api.ContainerSpec\n\tsecrets exec.SecretGetter\n\tname string\n\timage containerd.Image \/\/ Pulled image\n\tcontainer containerd.Container\n\ttask containerd.Task\n\texitStatus error\n}\n\nfunc newContainerAdapter(client *containerd.Client, task *api.Task, secrets exec.SecretGetter) (*containerAdapter, error) {\n\tspec := task.Spec.GetContainer()\n\tif spec == nil {\n\t\treturn nil, exec.ErrRuntimeUnsupported\n\t}\n\n\tc := &containerAdapter{\n\t\tclient: client,\n\t\tspec: spec,\n\t\tsecrets: secrets,\n\t\tname: naming.Task(task),\n\t}\n\n\tif err := c.reattach(context.Background()); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}\n\n\/\/ reattaches to an existing container. If the container is found but\n\/\/ the task is missing then still succeeds, allowing subsequent use of\n\/\/ c.delete()\nfunc (c *containerAdapter) reattach(ctx context.Context) error {\n\tcontainer, err := c.client.LoadContainer(ctx, c.name)\n\tif err != nil {\n\t\tif errdefs.IsNotFound(err) {\n\t\t\tc.log(ctx).Debug(\"reattach: container not found\")\n\t\t\treturn nil\n\t\t}\n\n\t\treturn errors.Wrap(err, \"reattach: loading container\")\n\t}\n\tc.log(ctx).Debug(\"reattach: loaded container\")\n\tc.container = container\n\n\t\/\/ TODO(ijc) Consider an addition to container library which\n\t\/\/ directly attaches stdin to \/dev\/null.\n\tif devNull == nil {\n\t\tif devNull, err = os.Open(os.DevNull); err != nil {\n\t\t\treturn errors.Wrap(err, \"reattach: opening null device\")\n\t\t}\n\t}\n\n\ttask, err := container.Task(ctx, cio.NewAttach(cio.WithStreams(devNull, os.Stdout, os.Stderr)))\n\tif err != nil {\n\t\tif errdefs.IsNotFound(err) {\n\t\t\tc.log(ctx).WithError(err).Info(\"reattach: no running task\")\n\t\t\treturn nil\n\t\t}\n\t\treturn errors.Wrap(err, \"reattach: reattaching task\")\n\t}\n\tc.task = task\n\tc.log(ctx).Debug(\"reattach: successful\")\n\treturn nil\n}\n\nfunc (c *containerAdapter) log(ctx context.Context) *logrus.Entry {\n\treturn log.G(ctx).WithFields(logrus.Fields{\n\t\t\"container.id\": c.name,\n\t})\n}\n\nfunc (c *containerAdapter) pullImage(ctx context.Context) error {\n\timage, err := c.client.Pull(ctx, c.spec.Image, containerd.WithPullUnpack)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"pulling container image\")\n\t}\n\tc.image = image\n\n\treturn nil\n}\n\nfunc withMounts(ctx context.Context, ms []api.Mount) oci.SpecOpts {\n\tsort.Sort(mounts(ms))\n\n\treturn func(_ gocontext.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {\n\t\tfor _, m := range ms {\n\t\t\tif !filepath.IsAbs(m.Target) {\n\t\t\t\treturn errors.Errorf(\"mount %s is not absolute\", m.Target)\n\t\t\t}\n\n\t\t\tswitch m.Type {\n\t\t\tcase api.MountTypeTmpfs:\n\t\t\t\topts := []string{\"noexec\", \"nosuid\", \"nodev\", \"rprivate\"}\n\t\t\t\tif m.TmpfsOptions != nil {\n\t\t\t\t\tif m.TmpfsOptions.SizeBytes <= 0 {\n\t\t\t\t\t\treturn errors.New(\"invalid tmpfs size give\")\n\t\t\t\t\t}\n\t\t\t\t\topts = append(opts, fmt.Sprintf(\"size=%d\", m.TmpfsOptions.SizeBytes))\n\t\t\t\t\topts = append(opts, fmt.Sprintf(\"mode=%o\", m.TmpfsOptions.Mode))\n\t\t\t\t}\n\t\t\t\tif m.ReadOnly {\n\t\t\t\t\topts = append(opts, \"ro\")\n\t\t\t\t} else {\n\t\t\t\t\topts = append(opts, \"rw\")\n\t\t\t\t}\n\n\t\t\t\ts.Mounts = append(s.Mounts, specs.Mount{\n\t\t\t\t\tDestination: m.Target,\n\t\t\t\t\tType: \"tmpfs\",\n\t\t\t\t\tSource: \"tmpfs\",\n\t\t\t\t\tOptions: opts,\n\t\t\t\t})\n\n\t\t\tcase api.MountTypeVolume:\n\t\t\t\treturn errors.Errorf(\"volume mounts not implemented, ignoring %v\", m)\n\n\t\t\tcase api.MountTypeBind:\n\t\t\t\topts := []string{\"rbind\"}\n\t\t\t\tif m.ReadOnly {\n\t\t\t\t\topts = append(opts, \"ro\")\n\t\t\t\t} else {\n\t\t\t\t\topts = append(opts, \"rw\")\n\t\t\t\t}\n\n\t\t\t\tpropagation := \"rprivate\"\n\t\t\t\tif m.BindOptions != nil {\n\t\t\t\t\tif p, ok := mountPropagationReverseMap[m.BindOptions.Propagation]; ok {\n\t\t\t\t\t\tpropagation = p\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.G(ctx).Warningf(\"unknown bind mount propagation, using %q\", propagation)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\topts = append(opts, propagation)\n\n\t\t\t\ts.Mounts = append(s.Mounts, specs.Mount{\n\t\t\t\t\tDestination: m.Target,\n\t\t\t\t\tType: \"bind\",\n\t\t\t\t\tSource: m.Source,\n\t\t\t\t\tOptions: opts,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc (c *containerAdapter) isPrepared() bool {\n\treturn c.container != nil && c.task != nil\n}\n\nfunc (c *containerAdapter) prepare(ctx context.Context) error {\n\tif c.isPrepared() {\n\t\treturn errors.New(\"adapter already prepared\")\n\t}\n\tif c.image == nil {\n\t\treturn errors.New(\"image has not been pulled\")\n\t}\n\n\tspecOpts := []oci.SpecOpts{\n\t\toci.WithImageConfig(c.image),\n\t\twithMounts(ctx, c.spec.Mounts),\n\t}\n\n\t\/\/ spec.Process.Args is config.Entrypoint + config.Cmd at this\n\t\/\/ point from WithImageConfig above. If the ContainerSpec\n\t\/\/ specifies a Command then we can completely override. If it\n\t\/\/ does not then all we can do is append our Args and hope\n\t\/\/ they do not conflict.\n\t\/\/ TODO(ijc) Improve this\n\tif len(c.spec.Command) > 0 {\n\t\targs := append(c.spec.Command, c.spec.Args...)\n\t\tspecOpts = append(specOpts, oci.WithProcessArgs(args...))\n\t} else {\n\t\tspecOpts = append(specOpts, func(_ gocontext.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {\n\t\t\ts.Process.Args = append(s.Process.Args, c.spec.Args...)\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tspec, err := oci.GenerateSpec(ctx, c.client, &containers.Container{}, specOpts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ TODO(ijc) Consider an addition to container library which\n\t\/\/ directly attaches stdin to \/dev\/null.\n\tif devNull == nil {\n\t\tif devNull, err = os.Open(os.DevNull); err != nil {\n\t\t\treturn errors.Wrap(err, \"opening null device\")\n\t\t}\n\t}\n\n\tc.container, err = c.client.NewContainer(ctx, c.name,\n\t\tcontainerd.WithSpec(spec),\n\t\tcontainerd.WithNewSnapshot(c.name, c.image))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"creating container\")\n\t}\n\n\t\/\/ TODO(ijc) support ControllerLogs interface.\n\topts := []cio.Opt{cio.WithStreams(devNull, os.Stdout, os.Stderr)}\n\tif spec.Process.Terminal {\n\t\topts = append(opts, cio.WithTerminal)\n\t}\n\tc.task, err = c.container.NewTask(ctx, cio.NewCreator(opts...))\n\tif err != nil {\n\t\t\/\/ Destroy the container we created above, but\n\t\t\/\/ propagate the original error.\n\t\tif err2 := c.container.Delete(ctx); err2 != nil {\n\t\t\tc.log(ctx).WithError(err2).Error(\"failed to delete container on prepare failure\")\n\t\t}\n\t\tc.container = nil\n\t\treturn errors.Wrap(err, \"creating task\")\n\t}\n\n\treturn nil\n}\n\nfunc (c *containerAdapter) start(ctx context.Context) error {\n\tif !c.isPrepared() {\n\t\treturn errAdapterNotPrepared\n\t}\n\terr := c.task.Start(ctx)\n\treturn errors.Wrap(err, \"starting\")\n}\n\nfunc (c *containerAdapter) wait(ctx context.Context) error {\n\tif !c.isPrepared() {\n\t\treturn errAdapterNotPrepared\n\t}\n\tstatus, err := c.task.Wait(ctx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"waiting\")\n\t}\n\t\/\/ Should update c.exitStatus or not?\n\tec := <-status\n\tif ec.Error() != nil {\n\t\treturn err\n\t}\n\treturn makeExitError(ec.ExitCode(), \"\")\n}\n\ntype status struct {\n\tID string\n\tPid uint32\n\tStatus containerd.Status\n\tExitStatus error\n}\n\nfunc (c *containerAdapter) inspect(ctx context.Context) (status, error) {\n\tif !c.isPrepared() {\n\t\treturn status{}, errAdapterNotPrepared\n\t}\n\n\tts, err := c.task.Status(ctx)\n\tif err != nil {\n\t\treturn status{}, err\n\t}\n\ts := status{\n\t\tID: c.container.ID(),\n\t\tPid: c.task.Pid(),\n\t\tStatus: ts,\n\t\tExitStatus: c.exitStatus,\n\t}\n\treturn s, nil\n}\n\nfunc (c *containerAdapter) shutdown(ctx context.Context) error {\n\tif !c.isPrepared() {\n\t\treturn errAdapterNotPrepared\n\t}\n\n\tvar (\n\t\tsig syscall.Signal\n\t\ttimeout = time.Duration(10 * time.Second)\n\t\terr error\n\t)\n\n\tif c.spec.StopSignal != \"\" {\n\t\tif sig, err = signal.ParseSignal(c.spec.StopSignal); err != nil {\n\t\t\tsig = syscall.SIGTERM\n\t\t\tc.log(ctx).WithError(err).Errorf(\"unknown StopSignal, using %q\", sig)\n\t\t}\n\t} else {\n\t\tsig = syscall.SIGTERM\n\t\tc.log(ctx).Infof(\"no StopSignal given, using %q\", sig)\n\t}\n\n\tif c.spec.StopGracePeriod != nil {\n\t\ttimeout, _ = gogotypes.DurationFromProto(c.spec.StopGracePeriod)\n\t}\n\n\tdeleteErr := make(chan error, 1)\n\tdeleteCtx, deleteCancel := context.WithCancel(ctx)\n\tdefer deleteCancel()\n\n\tgo func(ctx context.Context, ch chan error) {\n\t\tstatus, err := c.task.Delete(ctx)\n\t\tif err != nil {\n\t\t\tc.log(ctx).WithError(err).Debug(\"Task.Delete failed\")\n\t\t\tch <- err\n\t\t}\n\t\tc.log(ctx).Debugf(\"Task.Delete success, status=%d\", status)\n\t\tch <- makeExitError(status.ExitCode(), \"\")\n\t}(deleteCtx, deleteErr)\n\n\tc.log(ctx).Debugf(\"Killing task with %q signal\", sig)\n\tif err := c.task.Kill(ctx, sig); err != nil {\n\t\treturn errors.Wrapf(err, \"killing task with %q\", sig)\n\t}\n\n\tselect {\n\tcase c.exitStatus = <-deleteErr:\n\t\treturn c.exitStatus\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase <-time.After(timeout):\n\t\tc.log(ctx).Infof(\"Task did not exit after %s\", timeout)\n\t\t\/\/ Fall through\n\t}\n\n\tif sig == syscall.SIGKILL {\n\t\t\/\/ We've tried as hard as we can.\n\t\treturn errors.New(\"task is unkillable\")\n\t}\n\n\t\/\/ Bring out the big guns\n\tsig = syscall.SIGKILL\n\tc.log(ctx).Debugf(\"Killing task harder with %q signal\", sig)\n\tif err := c.task.Kill(ctx, sig); err != nil {\n\t\treturn errors.Wrapf(err, \"killing task with %q\", sig)\n\t}\n\n\tselect {\n\tcase c.exitStatus = <-deleteErr:\n\t\treturn c.exitStatus\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n\n}\n\nfunc (c *containerAdapter) terminate(ctx context.Context) error {\n\tif !c.isPrepared() {\n\t\treturn errAdapterNotPrepared\n\t}\n\n\tc.log(ctx).Debug(\"Terminate\")\n\treturn errors.New(\"terminate not implemented\")\n}\n\nfunc (c *containerAdapter) remove(ctx context.Context) error {\n\t\/\/ Unlike most other entry points we don't use c.isPrepared\n\t\/\/ here so that we can clean up a container which was\n\t\/\/ partially reattached (via c.attach).\n\tif c.container == nil {\n\t\treturn errAdapterNotPrepared\n\t}\n\n\tc.log(ctx).Debug(\"Remove\")\n\terr := c.container.Delete(ctx)\n\treturn errors.Wrap(err, \"removing container\")\n}\n\nfunc isContainerCreateNameConflict(err error) bool {\n\t\/\/ container \".*\" already exists\n\tsplits := strings.SplitN(err.Error(), \"\\\"\", 3)\n\treturn splits[0] == \"container \" && splits[2] == \" already exists\"\n}\n\nfunc isUnknownContainer(err error) bool {\n\treturn strings.Contains(err.Error(), \"container does not exist\")\n}\n\n\/\/ For sort.Sort\ntype mounts []api.Mount\n\n\/\/ Len returns the number of mounts. Used in sorting.\nfunc (m mounts) Len() int {\n\treturn len(m)\n}\n\n\/\/ Less returns true if the number of parts (a\/b\/c would be 3 parts) in the\n\/\/ mount indexed by parameter 1 is less than that of the mount indexed by\n\/\/ parameter 2. Used in sorting.\nfunc (m mounts) Less(i, j int) bool {\n\treturn m.parts(i) < m.parts(j)\n}\n\n\/\/ Swap swaps two items in an array of mounts. Used in sorting\nfunc (m mounts) Swap(i, j int) {\n\tm[i], m[j] = m[j], m[i]\n}\n\n\/\/ parts returns the number of parts in the destination of a mount. Used in sorting.\nfunc (m mounts) parts(i int) int {\n\treturn strings.Count(filepath.Clean(m[i].Target), string(os.PathSeparator))\n}\n<|endoftext|>"} {"text":"<commit_before>package app\n\nimport (\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/koding\/klient\/Godeps\/_workspace\/src\/github.com\/hashicorp\/go-version\"\n\t\"github.com\/koding\/klient\/Godeps\/_workspace\/src\/github.com\/inconshreveable\/go-update\"\n\t\"github.com\/koding\/klient\/Godeps\/_workspace\/src\/github.com\/koding\/kite\"\n\t\"github.com\/koding\/klient\/Godeps\/_workspace\/src\/github.com\/mitchellh\/osext\"\n\t\"github.com\/koding\/klient\/protocol\"\n)\n\ntype Updater struct {\n\tEndpoint string\n\tInterval time.Duration\n\tCurrentVersion string\n\tLog kite.Logger\n}\n\ntype UpdateData struct {\n\tKlientURL string\n}\n\nvar AuthenticatedUser = \"koding\"\n\nfunc (u *Updater) ServeKite(r *kite.Request) (interface{}, error) {\n\tif r.Username != AuthenticatedUser {\n\t\treturn nil, fmt.Errorf(\"Not authenticated to make an update: %s\", r.Username)\n\t}\n\n\tgo func() {\n\t\tr.LocalKite.Log.Info(\"klient.Update is called. Updating binary via latest version\")\n\n\t\tupdatingState(true)\n\t\tif err := u.checkAndUpdate(); err != nil {\n\t\t\tu.Log.Warning(\"klient.update: %s\", err)\n\t\t}\n\t\tupdatingState(false)\n\t}()\n\n\treturn true, nil\n}\n\nfunc updatingState(state bool) {\n\tupdatingMu.Lock()\n\tupdating = state\n\tupdatingMu.Unlock()\n}\n\nfunc (u *Updater) checkAndUpdate() error {\n\tl, err := u.latestVersion()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlatestVer := \"0.1.\" + l\n\tlatest, err := version.NewVersion(latestVer)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcurrent, err := version.NewVersion(u.CurrentVersion)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !current.LessThan(latest) {\n\t\t\/\/ current running binary version is equal or greater than what we fetched, so return we don't need to update\n\t\treturn nil\n\t}\n\n\tu.Log.Info(\"Current version: %s is old. Going to update to: %s\", u.CurrentVersion, latestVer)\n\n\tbasePath := \"https:\/\/s3.amazonaws.com\/koding-klient\/\" + protocol.Environment + \"\/latest\"\n\tlatestKlientURL := basePath + \"\/klient-\" + latestVer + \".gz\"\n\n\treturn u.updateBinary(latestKlientURL)\n}\n\nfunc (u *Updater) updateBinary(url string) error {\n\tupdater := update.New()\n\terr := updater.CanUpdate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tself, err := osext.Executable()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tu.Log.Info(\"Going to update binary at: %s\", self)\n\tbin, err := u.fetch(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tu.Log.Info(\"Replacing new binary with the old one.\")\n\terr, errRecover := updater.FromStream(bytes.NewBuffer(bin))\n\tif err != nil {\n\t\tif errRecover != nil {\n\t\t\treturn errRecover\n\t\t}\n\n\t\treturn err\n\t}\n\n\tenv := os.Environ()\n\n\t\/\/ TODO: os.Args[1:] should come also from the endpoint if the new binary\n\t\/\/ has a different flag!\n\targs := []string{self}\n\targs = append(args, os.Args[1:]...)\n\n\t\/\/ we need to call it here now too, because syscall.Exec will prevent to\n\t\/\/ call the defer that we've defined in the beginning.\n\n\tu.Log.Info(\"Updating was successfull. Replacing current process with args: %v\\n=====> RESTARTING...\\n\\n\", args)\n\n\texecErr := syscall.Exec(self, args, env)\n\tif execErr != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (u *Updater) latestVersion() (string, error) {\n\tresp, err := http.Get(u.Endpoint)\n\tif err != nil {\n\t\tu.Log.Debug(\"Getting latest version from %s\", u.Endpoint)\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tlatest, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn strings.TrimSpace(string(latest)), nil\n}\n\nfunc (u *Updater) fetch(url string) ([]byte, error) {\n\tu.Log.Info(\"Fetching binary %s\", url)\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"bad http status from %s: %v\", url, resp.Status)\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tgz, err := gzip.NewReader(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer gz.Close()\n\n\tif _, err = io.Copy(buf, gz); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}\n\n\/\/ Run runs the updater in the background for the interval of updater interval.\nfunc (u *Updater) Run() {\n\tu.Log.Info(\"Starting Updater with following options:\\n\\tinterval of: %s\\n\\tendpoint: %s\",\n\t\tu.Interval, u.Endpoint)\n\n\tfor _ = range time.Tick(u.Interval) {\n\t\tupdatingState(true)\n\n\t\tif err := u.checkAndUpdate(); err != nil {\n\t\t\tu.Log.Warning(\"Self-update: %s\", err)\n\t\t}\n\n\t\tupdatingState(false)\n\t}\n}\n\n\/\/ haveFreeSpace checks whether the disk has free space to provide the update\nfunc haveFreeSpace() error {\n\tstat := new(syscall.Statfs_t)\n\n\tif err := syscall.Statfs(\"\/\", stat); err != nil {\n\t\treturn err\n\t}\n\n\tbsize := stat.Bsize \/ 512\n\n\tfree := (uint64(stat.Bfree) * uint64(bsize)) >> 1\n\tfreeInMB := free \/ 1024\n\n\tif freeInMB < 100 {\n\t\treturn errors.New(\"No enough space to upgrade klient\")\n\t}\n\n\treturn nil\n}\n<commit_msg>update: check size before we update<commit_after>package app\n\nimport (\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/koding\/klient\/Godeps\/_workspace\/src\/github.com\/hashicorp\/go-version\"\n\t\"github.com\/koding\/klient\/Godeps\/_workspace\/src\/github.com\/inconshreveable\/go-update\"\n\t\"github.com\/koding\/klient\/Godeps\/_workspace\/src\/github.com\/koding\/kite\"\n\t\"github.com\/koding\/klient\/Godeps\/_workspace\/src\/github.com\/mitchellh\/osext\"\n\t\"github.com\/koding\/klient\/protocol\"\n)\n\ntype Updater struct {\n\tEndpoint string\n\tInterval time.Duration\n\tCurrentVersion string\n\tLog kite.Logger\n}\n\ntype UpdateData struct {\n\tKlientURL string\n}\n\nvar AuthenticatedUser = \"koding\"\n\nfunc (u *Updater) ServeKite(r *kite.Request) (interface{}, error) {\n\tif r.Username != AuthenticatedUser {\n\t\treturn nil, fmt.Errorf(\"Not authenticated to make an update: %s\", r.Username)\n\t}\n\n\tgo func() {\n\t\tr.LocalKite.Log.Info(\"klient.Update is called. Updating binary via latest version\")\n\n\t\tupdatingState(true)\n\t\tif err := u.checkAndUpdate(); err != nil {\n\t\t\tu.Log.Warning(\"klient.update: %s\", err)\n\t\t}\n\t\tupdatingState(false)\n\t}()\n\n\treturn true, nil\n}\n\nfunc updatingState(state bool) {\n\tupdatingMu.Lock()\n\tupdating = state\n\tupdatingMu.Unlock()\n}\n\nfunc (u *Updater) checkAndUpdate() error {\n\tif err := hasFreeSpace(100); err != nil {\n\t\treturn err\n\t}\n\n\tl, err := u.latestVersion()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlatestVer := \"0.1.\" + l\n\tlatest, err := version.NewVersion(latestVer)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcurrent, err := version.NewVersion(u.CurrentVersion)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !current.LessThan(latest) {\n\t\t\/\/ current running binary version is equal or greater than what we fetched, so return we don't need to update\n\t\treturn nil\n\t}\n\n\tu.Log.Info(\"Current version: %s is old. Going to update to: %s\", u.CurrentVersion, latestVer)\n\n\tbasePath := \"https:\/\/s3.amazonaws.com\/koding-klient\/\" + protocol.Environment + \"\/latest\"\n\tlatestKlientURL := basePath + \"\/klient-\" + latestVer + \".gz\"\n\n\treturn u.updateBinary(latestKlientURL)\n}\n\nfunc (u *Updater) updateBinary(url string) error {\n\tupdater := update.New()\n\terr := updater.CanUpdate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tself, err := osext.Executable()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tu.Log.Info(\"Going to update binary at: %s\", self)\n\tbin, err := u.fetch(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tu.Log.Info(\"Replacing new binary with the old one.\")\n\terr, errRecover := updater.FromStream(bytes.NewBuffer(bin))\n\tif err != nil {\n\t\tif errRecover != nil {\n\t\t\treturn errRecover\n\t\t}\n\n\t\treturn err\n\t}\n\n\tenv := os.Environ()\n\n\t\/\/ TODO: os.Args[1:] should come also from the endpoint if the new binary\n\t\/\/ has a different flag!\n\targs := []string{self}\n\targs = append(args, os.Args[1:]...)\n\n\t\/\/ we need to call it here now too, because syscall.Exec will prevent to\n\t\/\/ call the defer that we've defined in the beginning.\n\n\tu.Log.Info(\"Updating was successfull. Replacing current process with args: %v\\n=====> RESTARTING...\\n\\n\", args)\n\n\texecErr := syscall.Exec(self, args, env)\n\tif execErr != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (u *Updater) latestVersion() (string, error) {\n\tresp, err := http.Get(u.Endpoint)\n\tif err != nil {\n\t\tu.Log.Debug(\"Getting latest version from %s\", u.Endpoint)\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tlatest, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn strings.TrimSpace(string(latest)), nil\n}\n\nfunc (u *Updater) fetch(url string) ([]byte, error) {\n\tu.Log.Info(\"Fetching binary %s\", url)\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"bad http status from %s: %v\", url, resp.Status)\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tgz, err := gzip.NewReader(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer gz.Close()\n\n\tif _, err = io.Copy(buf, gz); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}\n\n\/\/ Run runs the updater in the background for the interval of updater interval.\nfunc (u *Updater) Run() {\n\tu.Log.Info(\"Starting Updater with following options:\\n\\tinterval of: %s\\n\\tendpoint: %s\",\n\t\tu.Interval, u.Endpoint)\n\n\tfor _ = range time.Tick(u.Interval) {\n\t\tupdatingState(true)\n\n\t\tif err := u.checkAndUpdate(); err != nil {\n\t\t\tu.Log.Warning(\"Self-update: %s\", err)\n\t\t}\n\n\t\tupdatingState(false)\n\t}\n}\n\n\/\/ hasFreeSpace checks whether the disk has free space to provide the update.\n\/\/ If free space is lower than then the given mustHas it returns an error.\nfunc hasFreeSpace(mustHas uint64) error {\n\tstat := new(syscall.Statfs_t)\n\n\tif err := syscall.Statfs(\"\/\", stat); err != nil {\n\t\treturn err\n\t}\n\n\tbsize := stat.Bsize \/ 512\n\n\tfree := (uint64(stat.Bfree) * uint64(bsize)) >> 1\n\tfreeInMB := free \/ 1024\n\n\tif freeInMB < mustHas {\n\t\treturn fmt.Errorf(\"No enough space to upgrade klient. Need '%d'. Have: '%d'\",\n\t\t\tmustHas, freeInMB)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before><<<<<<< HEAD\n=======\n<<<<<<< HEAD\n>>>>>>> 9bded767a1710b3959ec066faedb3ebd3010ae8a\n\/\/ Copyright 2014 layeka Author. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n<<<<<<< HEAD\n=======\n=======\n>>>>>>> 03b08fea254528b6ccb2d12699c150be84dd6c76\n>>>>>>> 9bded767a1710b3959ec066faedb3ebd3010ae8a\npackage qjson\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\n\t\"github.com\/layeka\/qutils\"\n)\n\nconst (\n\tJSONStringEmpty = \"\"\n\tJSONStringNull = \"null\"\n)\n\ntype jsonArray []interface{}\n\nfunc (this *jsonArray) Add(val interface{}) *jsonArray {\n\t*this = append(*this, val)\n\treturn this\n}\n\nfunc (this *jsonArray) Get(index int) (val interface{}, b bool) {\n\tif len(*this) > index {\n\t\treturn (*this)[index], true\n\t}\n\treturn nil, false\n}\nfunc (this *jsonArray) Last() (val interface{}, b bool) {\n\tl := len(*this)\n\tif l > 0 {\n\t\treturn (*this)[l-1], true\n\t}\n\treturn nil, false\n}\n\nfunc NewjsonArray(data []interface{}) *jsonArray {\n\tarr := &jsonArray{}\n\tfor _, item := range data {\n\t\tswitch val := item.(type) {\n\t\tcase []interface{}:\n\t\t\tarr.Add(NewjsonArray(val))\n\t\tcase *[]interface{}:\n\t\t\tarr.Add(NewjsonArray(*val))\n\t\tcase map[string]interface{}:\n\t\t\tarr.Add(NewjsonObject(val))\n\t\tcase *map[string]interface{}:\n\t\t\tarr.Add(NewjsonObject(*val))\n\t\tdefault:\n\t\t\tarr.Add(val)\n\t\t}\n\t}\n\treturn arr\n}\n\ntype jsonObject map[string]interface{}\n\nfunc (this *jsonObject) Add(key string, val interface{}) *jsonObject {\n\t(*this)[key] = val\n\treturn this\n}\nfunc (this *jsonObject) Get(key string) (val interface{}, b bool) {\n\tval, b = (*this)[key]\n\treturn\n}\n\nfunc NewjsonObject(data map[string]interface{}) *jsonObject {\n\tobj := &jsonObject{}\n\tfor key, item := range data {\n\t\tswitch val := item.(type) {\n\t\tcase []interface{}:\n\t\t\tobj.Add(key, NewjsonArray(val))\n\t\tcase *[]interface{}:\n\t\t\tobj.Add(key, NewjsonArray(*val))\n\t\tcase map[string]interface{}:\n\t\t\tobj.Add(key, NewjsonObject(val))\n\t\tcase *map[string]interface{}:\n\t\t\tobj.Add(key, NewjsonObject(*val))\n\t\tdefault:\n\t\t\tobj.Add(key, val)\n\t\t}\n\t}\n\treturn obj\n}\n\ntype QJson struct {\n\tdata interface{}\n}\n\nfunc NewJSON(body []byte) (*QJson, error) {\n\tj := new(QJson)\n\terr := j.UnmarshalJSON(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn j, nil\n}\n\nfunc NewObjectJSON() *QJson {\n\treturn &QJson{&jsonObject{}}\n}\nfunc NewArrayJSON() *QJson {\n\treturn &QJson{&jsonArray{}}\n}\n\n\/\/ Implements the json.Marshaler interface.\nfunc (this *QJson) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(&this.data)\n}\n\n\/\/ Implements the json.Unmarshaler interface.\nfunc (this *QJson) UnmarshalJSON(p []byte) error {\n\tdec := json.NewDecoder(bytes.NewBuffer(p))\n\tdec.UseNumber()\n\tvar obj interface{}\n\terr := dec.Decode(&obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch val := obj.(type) {\n\tcase []interface{}:\n\t\tthis.data = NewjsonArray(val)\n\tcase map[string]interface{}:\n\t\tthis.data = NewjsonObject(val)\n\tdefault:\n\t\tthis.data = obj\n\t}\n\treturn nil\n}\nfunc (this *QJson) IsArray() bool {\n\t_, ok := this.data.(*jsonArray)\n\treturn ok\n}\nfunc (this *QJson) asArray() (*jsonArray, error) {\n\tval, ok := this.data.(*jsonArray)\n\tif ok {\n\t\treturn val, nil\n\t}\n\treturn nil, errors.New(\"type assertion to *jsonArray failed\")\n}\nfunc (this *QJson) IsObject() bool {\n\t_, ok := this.data.(*jsonObject)\n\treturn ok\n}\nfunc (this *QJson) asObject() (*jsonObject, error) {\n\tval, ok := this.data.(*jsonObject)\n\tif ok {\n\t\treturn val, nil\n\t}\n\treturn nil, errors.New(\"type assertion to *jsonObject failed\")\n}\n\nfunc (this *QJson) ArrayAdd(obj interface{}) *QJson {\n\tarr, err := this.asArray()\n\tif err == nil {\n\t\tswitch val := obj.(type) {\n\t\tcase []interface{}:\n\t\t\tarr.Add(NewjsonArray(val))\n\t\tcase *[]interface{}:\n\t\t\tarr.Add(NewjsonArray(*val))\n\t\tcase map[string]interface{}:\n\t\t\tarr.Add(NewjsonObject(val))\n\t\tcase *map[string]interface{}:\n\t\t\tarr.Add(NewjsonObject(*val))\n\t\tdefault:\n\t\t\tarr.Add(val)\n\t\t}\n\t}\n\treturn this\n}\nfunc (this *QJson) ArrayGet(index int) *QJson {\n\tarr, err := this.asArray()\n\tif err == nil {\n\t\tval, ok := arr.Get(index)\n\t\tif ok {\n\t\t\treturn &QJson{val}\n\t\t}\n\t}\n\treturn &QJson{nil}\n}\nfunc (this *QJson) ArrayNewArray() *QJson {\n\tarr, err := this.asArray()\n\tif err == nil {\n\t\tval, b := arr.Add(&jsonArray{}).Last()\n\t\tif b {\n\t\t\treturn &QJson{val}\n\t\t}\n\t}\n\treturn &QJson{nil}\n}\n\nfunc (this *QJson) ArrayNewObject() *QJson {\n\tarr, err := this.asArray()\n\tif err == nil {\n\t\tval, b := arr.Add(&jsonObject{}).Last()\n\t\tif b {\n\t\t\treturn &QJson{val}\n\t\t}\n\t}\n\treturn &QJson{nil}\n}\nfunc (this *QJson) Exists(key string) bool {\n\tjobj, err := this.asObject()\n\tif err != nil {\n\t\treturn false\n\t}\n\t_, ok := jobj.Get(key)\n\treturn ok\n}\nfunc (this *QJson) ObjectAdd(key string, obj interface{}) *QJson {\n\tjobj, err := this.asObject()\n\tif err == nil {\n\t\tswitch val := obj.(type) {\n\t\tcase []interface{}:\n\t\t\tjobj.Add(key, NewjsonArray(val))\n\t\tcase *[]interface{}:\n\t\t\tjobj.Add(key, NewjsonArray(*val))\n\t\tcase map[string]interface{}:\n\t\t\tjobj.Add(key, NewjsonObject(val))\n\t\tcase *map[string]interface{}:\n\t\t\tjobj.Add(key, NewjsonObject(*val))\n\t\tdefault:\n\t\t\tjobj.Add(key, val)\n\t\t}\n\t}\n\treturn this\n}\nfunc (this *QJson) ObjectGet(key string) *QJson {\n\tobj, err := this.asObject()\n\tif err == nil {\n\t\tval, ok := obj.Get(key)\n\t\tif ok {\n\t\t\treturn &QJson{val}\n\t\t}\n\t}\n\treturn &QJson{nil}\n}\nfunc (this *QJson) ObjectNewObject(key string) *QJson {\n\treturn this.ObjectAdd(key, &jsonObject{}).ObjectGet(key)\n}\n\nfunc (this *QJson) ObjectNewArray(key string) *QJson {\n\treturn this.ObjectAdd(key, &jsonArray{}).ObjectGet(key)\n}\n\nfunc (this *QJson) String() string {\n\tb, err := json.Marshal(this.data)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\nfunc (this *QJson) MustBool(args ...bool) bool {\n\tval, err := convertBool(this.data)\n\tif err != nil {\n\t\treturn qutils.ArgsBool(args).Default(0)\n\t}\n\treturn val\n}\nfunc (this *QJson) MustInt(args ...int64) int64 {\n\tval, err := convertInt64(this.data)\n\tif err != nil {\n\t\tfmt.Println(reflect.TypeOf(this.data))\n\t\treturn qutils.ArgsInt64(args).Default(0)\n\t}\n\treturn val\n}\n\nfunc (this *QJson) MustFloat(args ...float64) float64 {\n\tval, err := convertFloat64(this.data)\n\tif err != nil {\n\t\treturn qutils.ArgsFloat64(args).Default(0)\n\t}\n\treturn val\n}\n\nfunc (this *QJson) MustString(args ...string) string {\n\tval, err := convertString(this.data)\n\tif err != nil {\n\t\treturn qutils.ArgsString(args).Default(0)\n\t}\n\treturn val\n}\n\nfunc (this *QJson) MustArray(args ...[]interface{}) []interface{} {\n\tval, err := this.asArray()\n\tif err != nil {\n\t\treturn qutils.ArgsArray(args).Default(0)\n\t}\n\treturn ([]interface{})(*val)\n}\n\nfunc (this *QJson) MustObject(args ...map[string]interface{}) map[string]interface{} {\n\tval, err := this.asObject()\n\tif err != nil {\n\t\treturn qutils.ArgsObject(args).Default(0)\n\t}\n\treturn (map[string]interface{})(*val)\n}\n\nfunc convertBool(o interface{}) (bool, error) {\n\tswitch val := o.(type) {\n\tcase nil:\n\t\treturn false, nil\n\tcase bool:\n\t\treturn val, nil\n\tcase string:\n\t\treturn strconv.ParseBool(val)\n\tcase json.Number:\n\t\t{\n\t\t\tv, err := val.Int64()\n\t\t\tif err == nil {\n\t\t\t\treturn v != 0, nil\n\t\t\t}\n\t\t}\n\tcase int, int8, int16, int32, int64:\n\t\treturn qutils.SafeInt64(val) != 0, nil\n\tcase uint, uint8, uint16, uint32, uint64, uintptr:\n\t\treturn qutils.SafeUInt64(val) != 0, nil\n\tcase float32, float64:\n\t\treturn qutils.SafeFloat64(val) != 0, nil\n\t}\n\treturn false, errors.New(\"The json can not be converted to a Boolean value\")\n}\n\nfunc convertString(o interface{}) (string, error) {\n\tswitch val := o.(type) {\n\tcase nil:\n\t\treturn JSONStringNull, nil\n\tcase bool:\n\t\treturn strconv.FormatBool(val), nil\n\tcase string:\n\t\treturn val, nil\n\tcase json.Number:\n\t\treturn val.String(), nil\n\tcase int, int8, int16, int32, int64:\n\t\treturn strconv.FormatInt(qutils.SafeInt64(val), 10), nil\n\tcase uint, uint8, uint16, uint32, uint64, uintptr:\n\t\treturn strconv.FormatUint(qutils.SafeUInt64(val), 10), nil\n\tcase float32, float64:\n\t\treturn strconv.FormatFloat(qutils.SafeFloat64(val), 'f', 18, 64), nil\n\tdefault:\n\t\t{\n\t\t\ts, ok := o.(string)\n\t\t\tif ok {\n\t\t\t\treturn s, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn JSONStringEmpty, errors.New(\"The json can not be converted to a string value\")\n}\n\nfunc convertInt64(o interface{}) (int64, error) {\n\tswitch val := o.(type) {\n\tcase nil:\n\t\treturn 0, nil\n\tcase bool:\n\t\t{\n\t\t\tif val {\n\t\t\t\treturn 1, nil\n\t\t\t}\n\t\t\treturn 0, nil\n\t\t}\n\tcase string:\n\t\treturn strconv.ParseInt(val, 10, 0)\n\tcase json.Number:\n\t\treturn val.Int64()\n\tcase int, int8, int16, int32, int64:\n\t\treturn qutils.SafeInt64(val), nil\n\tcase uint, uint8, uint16, uint32, uint64, uintptr:\n\t\treturn int64(qutils.SafeUInt64(val)), nil\n\tcase float32, float64:\n\t\treturn int64(qutils.SafeFloat64(val)), nil\n\t}\n\treturn 0, errors.New(\"The json can not be converted to a int64 value\")\n}\nfunc convertFloat64(o interface{}) (float64, error) {\n\tswitch val := o.(type) {\n\tcase nil:\n\t\treturn 0, nil\n\tcase bool:\n\t\t{\n\t\t\tif val {\n\t\t\t\treturn 1, nil\n\t\t\t}\n\t\t\treturn 0, nil\n\t\t}\n\tcase string:\n\t\treturn strconv.ParseFloat(val, 64)\n\tcase json.Number:\n\t\treturn val.Float64()\n\tcase int, int8, int16, int32, int64:\n\t\treturn float64(qutils.SafeInt64(val)), nil\n\tcase uint, uint8, uint16, uint32, uint64, uintptr:\n\t\treturn float64(qutils.SafeUInt64(val)), nil\n\tcase float32, float64:\n\t\treturn qutils.SafeFloat64(val), nil\n\t}\n\treturn 0, errors.New(\"The json can not be converted to a float64 value\")\n}\n<commit_msg>update License<commit_after>\/\/ Copyright 2014 layeka Author. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\npackage qjson\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\n\t\"github.com\/layeka\/qutils\"\n)\n\nconst (\n\tJSONStringEmpty = \"\"\n\tJSONStringNull = \"null\"\n)\n\ntype jsonArray []interface{}\n\nfunc (this *jsonArray) Add(val interface{}) *jsonArray {\n\t*this = append(*this, val)\n\treturn this\n}\n\nfunc (this *jsonArray) Get(index int) (val interface{}, b bool) {\n\tif len(*this) > index {\n\t\treturn (*this)[index], true\n\t}\n\treturn nil, false\n}\nfunc (this *jsonArray) Last() (val interface{}, b bool) {\n\tl := len(*this)\n\tif l > 0 {\n\t\treturn (*this)[l-1], true\n\t}\n\treturn nil, false\n}\n\nfunc NewjsonArray(data []interface{}) *jsonArray {\n\tarr := &jsonArray{}\n\tfor _, item := range data {\n\t\tswitch val := item.(type) {\n\t\tcase []interface{}:\n\t\t\tarr.Add(NewjsonArray(val))\n\t\tcase *[]interface{}:\n\t\t\tarr.Add(NewjsonArray(*val))\n\t\tcase map[string]interface{}:\n\t\t\tarr.Add(NewjsonObject(val))\n\t\tcase *map[string]interface{}:\n\t\t\tarr.Add(NewjsonObject(*val))\n\t\tdefault:\n\t\t\tarr.Add(val)\n\t\t}\n\t}\n\treturn arr\n}\n\ntype jsonObject map[string]interface{}\n\nfunc (this *jsonObject) Add(key string, val interface{}) *jsonObject {\n\t(*this)[key] = val\n\treturn this\n}\nfunc (this *jsonObject) Get(key string) (val interface{}, b bool) {\n\tval, b = (*this)[key]\n\treturn\n}\n\nfunc NewjsonObject(data map[string]interface{}) *jsonObject {\n\tobj := &jsonObject{}\n\tfor key, item := range data {\n\t\tswitch val := item.(type) {\n\t\tcase []interface{}:\n\t\t\tobj.Add(key, NewjsonArray(val))\n\t\tcase *[]interface{}:\n\t\t\tobj.Add(key, NewjsonArray(*val))\n\t\tcase map[string]interface{}:\n\t\t\tobj.Add(key, NewjsonObject(val))\n\t\tcase *map[string]interface{}:\n\t\t\tobj.Add(key, NewjsonObject(*val))\n\t\tdefault:\n\t\t\tobj.Add(key, val)\n\t\t}\n\t}\n\treturn obj\n}\n\ntype QJson struct {\n\tdata interface{}\n}\n\nfunc NewJSON(body []byte) (*QJson, error) {\n\tj := new(QJson)\n\terr := j.UnmarshalJSON(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn j, nil\n}\n\nfunc NewObjectJSON() *QJson {\n\treturn &QJson{&jsonObject{}}\n}\nfunc NewArrayJSON() *QJson {\n\treturn &QJson{&jsonArray{}}\n}\n\n\/\/ Implements the json.Marshaler interface.\nfunc (this *QJson) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(&this.data)\n}\n\n\/\/ Implements the json.Unmarshaler interface.\nfunc (this *QJson) UnmarshalJSON(p []byte) error {\n\tdec := json.NewDecoder(bytes.NewBuffer(p))\n\tdec.UseNumber()\n\tvar obj interface{}\n\terr := dec.Decode(&obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch val := obj.(type) {\n\tcase []interface{}:\n\t\tthis.data = NewjsonArray(val)\n\tcase map[string]interface{}:\n\t\tthis.data = NewjsonObject(val)\n\tdefault:\n\t\tthis.data = obj\n\t}\n\treturn nil\n}\nfunc (this *QJson) IsArray() bool {\n\t_, ok := this.data.(*jsonArray)\n\treturn ok\n}\nfunc (this *QJson) asArray() (*jsonArray, error) {\n\tval, ok := this.data.(*jsonArray)\n\tif ok {\n\t\treturn val, nil\n\t}\n\treturn nil, errors.New(\"type assertion to *jsonArray failed\")\n}\nfunc (this *QJson) IsObject() bool {\n\t_, ok := this.data.(*jsonObject)\n\treturn ok\n}\nfunc (this *QJson) asObject() (*jsonObject, error) {\n\tval, ok := this.data.(*jsonObject)\n\tif ok {\n\t\treturn val, nil\n\t}\n\treturn nil, errors.New(\"type assertion to *jsonObject failed\")\n}\n\nfunc (this *QJson) ArrayAdd(obj interface{}) *QJson {\n\tarr, err := this.asArray()\n\tif err == nil {\n\t\tswitch val := obj.(type) {\n\t\tcase []interface{}:\n\t\t\tarr.Add(NewjsonArray(val))\n\t\tcase *[]interface{}:\n\t\t\tarr.Add(NewjsonArray(*val))\n\t\tcase map[string]interface{}:\n\t\t\tarr.Add(NewjsonObject(val))\n\t\tcase *map[string]interface{}:\n\t\t\tarr.Add(NewjsonObject(*val))\n\t\tdefault:\n\t\t\tarr.Add(val)\n\t\t}\n\t}\n\treturn this\n}\nfunc (this *QJson) ArrayGet(index int) *QJson {\n\tarr, err := this.asArray()\n\tif err == nil {\n\t\tval, ok := arr.Get(index)\n\t\tif ok {\n\t\t\treturn &QJson{val}\n\t\t}\n\t}\n\treturn &QJson{nil}\n}\nfunc (this *QJson) ArrayNewArray() *QJson {\n\tarr, err := this.asArray()\n\tif err == nil {\n\t\tval, b := arr.Add(&jsonArray{}).Last()\n\t\tif b {\n\t\t\treturn &QJson{val}\n\t\t}\n\t}\n\treturn &QJson{nil}\n}\n\nfunc (this *QJson) ArrayNewObject() *QJson {\n\tarr, err := this.asArray()\n\tif err == nil {\n\t\tval, b := arr.Add(&jsonObject{}).Last()\n\t\tif b {\n\t\t\treturn &QJson{val}\n\t\t}\n\t}\n\treturn &QJson{nil}\n}\nfunc (this *QJson) Exists(key string) bool {\n\tjobj, err := this.asObject()\n\tif err != nil {\n\t\treturn false\n\t}\n\t_, ok := jobj.Get(key)\n\treturn ok\n}\nfunc (this *QJson) ObjectAdd(key string, obj interface{}) *QJson {\n\tjobj, err := this.asObject()\n\tif err == nil {\n\t\tswitch val := obj.(type) {\n\t\tcase []interface{}:\n\t\t\tjobj.Add(key, NewjsonArray(val))\n\t\tcase *[]interface{}:\n\t\t\tjobj.Add(key, NewjsonArray(*val))\n\t\tcase map[string]interface{}:\n\t\t\tjobj.Add(key, NewjsonObject(val))\n\t\tcase *map[string]interface{}:\n\t\t\tjobj.Add(key, NewjsonObject(*val))\n\t\tdefault:\n\t\t\tjobj.Add(key, val)\n\t\t}\n\t}\n\treturn this\n}\nfunc (this *QJson) ObjectGet(key string) *QJson {\n\tobj, err := this.asObject()\n\tif err == nil {\n\t\tval, ok := obj.Get(key)\n\t\tif ok {\n\t\t\treturn &QJson{val}\n\t\t}\n\t}\n\treturn &QJson{nil}\n}\nfunc (this *QJson) ObjectNewObject(key string) *QJson {\n\treturn this.ObjectAdd(key, &jsonObject{}).ObjectGet(key)\n}\n\nfunc (this *QJson) ObjectNewArray(key string) *QJson {\n\treturn this.ObjectAdd(key, &jsonArray{}).ObjectGet(key)\n}\n\nfunc (this *QJson) String() string {\n\tb, err := json.Marshal(this.data)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\nfunc (this *QJson) MustBool(args ...bool) bool {\n\tval, err := convertBool(this.data)\n\tif err != nil {\n\t\treturn qutils.ArgsBool(args).Default(0)\n\t}\n\treturn val\n}\nfunc (this *QJson) MustInt(args ...int64) int64 {\n\tval, err := convertInt64(this.data)\n\tif err != nil {\n\t\tfmt.Println(reflect.TypeOf(this.data))\n\t\treturn qutils.ArgsInt64(args).Default(0)\n\t}\n\treturn val\n}\n\nfunc (this *QJson) MustFloat(args ...float64) float64 {\n\tval, err := convertFloat64(this.data)\n\tif err != nil {\n\t\treturn qutils.ArgsFloat64(args).Default(0)\n\t}\n\treturn val\n}\n\nfunc (this *QJson) MustString(args ...string) string {\n\tval, err := convertString(this.data)\n\tif err != nil {\n\t\treturn qutils.ArgsString(args).Default(0)\n\t}\n\treturn val\n}\n\nfunc (this *QJson) MustArray(args ...[]interface{}) []interface{} {\n\tval, err := this.asArray()\n\tif err != nil {\n\t\treturn qutils.ArgsArray(args).Default(0)\n\t}\n\treturn ([]interface{})(*val)\n}\n\nfunc (this *QJson) MustObject(args ...map[string]interface{}) map[string]interface{} {\n\tval, err := this.asObject()\n\tif err != nil {\n\t\treturn qutils.ArgsObject(args).Default(0)\n\t}\n\treturn (map[string]interface{})(*val)\n}\n\nfunc convertBool(o interface{}) (bool, error) {\n\tswitch val := o.(type) {\n\tcase nil:\n\t\treturn false, nil\n\tcase bool:\n\t\treturn val, nil\n\tcase string:\n\t\treturn strconv.ParseBool(val)\n\tcase json.Number:\n\t\t{\n\t\t\tv, err := val.Int64()\n\t\t\tif err == nil {\n\t\t\t\treturn v != 0, nil\n\t\t\t}\n\t\t}\n\tcase int, int8, int16, int32, int64:\n\t\treturn qutils.SafeInt64(val) != 0, nil\n\tcase uint, uint8, uint16, uint32, uint64, uintptr:\n\t\treturn qutils.SafeUInt64(val) != 0, nil\n\tcase float32, float64:\n\t\treturn qutils.SafeFloat64(val) != 0, nil\n\t}\n\treturn false, errors.New(\"The json can not be converted to a Boolean value\")\n}\n\nfunc convertString(o interface{}) (string, error) {\n\tswitch val := o.(type) {\n\tcase nil:\n\t\treturn JSONStringNull, nil\n\tcase bool:\n\t\treturn strconv.FormatBool(val), nil\n\tcase string:\n\t\treturn val, nil\n\tcase json.Number:\n\t\treturn val.String(), nil\n\tcase int, int8, int16, int32, int64:\n\t\treturn strconv.FormatInt(qutils.SafeInt64(val), 10), nil\n\tcase uint, uint8, uint16, uint32, uint64, uintptr:\n\t\treturn strconv.FormatUint(qutils.SafeUInt64(val), 10), nil\n\tcase float32, float64:\n\t\treturn strconv.FormatFloat(qutils.SafeFloat64(val), 'f', 18, 64), nil\n\tdefault:\n\t\t{\n\t\t\ts, ok := o.(string)\n\t\t\tif ok {\n\t\t\t\treturn s, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn JSONStringEmpty, errors.New(\"The json can not be converted to a string value\")\n}\n\nfunc convertInt64(o interface{}) (int64, error) {\n\tswitch val := o.(type) {\n\tcase nil:\n\t\treturn 0, nil\n\tcase bool:\n\t\t{\n\t\t\tif val {\n\t\t\t\treturn 1, nil\n\t\t\t}\n\t\t\treturn 0, nil\n\t\t}\n\tcase string:\n\t\treturn strconv.ParseInt(val, 10, 0)\n\tcase json.Number:\n\t\treturn val.Int64()\n\tcase int, int8, int16, int32, int64:\n\t\treturn qutils.SafeInt64(val), nil\n\tcase uint, uint8, uint16, uint32, uint64, uintptr:\n\t\treturn int64(qutils.SafeUInt64(val)), nil\n\tcase float32, float64:\n\t\treturn int64(qutils.SafeFloat64(val)), nil\n\t}\n\treturn 0, errors.New(\"The json can not be converted to a int64 value\")\n}\nfunc convertFloat64(o interface{}) (float64, error) {\n\tswitch val := o.(type) {\n\tcase nil:\n\t\treturn 0, nil\n\tcase bool:\n\t\t{\n\t\t\tif val {\n\t\t\t\treturn 1, nil\n\t\t\t}\n\t\t\treturn 0, nil\n\t\t}\n\tcase string:\n\t\treturn strconv.ParseFloat(val, 64)\n\tcase json.Number:\n\t\treturn val.Float64()\n\tcase int, int8, int16, int32, int64:\n\t\treturn float64(qutils.SafeInt64(val)), nil\n\tcase uint, uint8, uint16, uint32, uint64, uintptr:\n\t\treturn float64(qutils.SafeUInt64(val)), nil\n\tcase float32, float64:\n\t\treturn qutils.SafeFloat64(val), nil\n\t}\n\treturn 0, errors.New(\"The json can not be converted to a float64 value\")\n}\n<|endoftext|>"} {"text":"<commit_before>package tdb\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Knetic\/govaluate\"\n\t\"github.com\/tecbot\/gorocksdb\"\n\t\"gopkg.in\/vmihailenco\/msgpack.v2\"\n)\n\ntype query struct {\n\ttable string\n\tfields []string\n\tfilter *govaluate.EvaluableExpression\n\tto time.Time\n\ttoOffset time.Duration\n\tfrom time.Time\n\tfromOffset time.Duration\n\tonValues func(key map[string]interface{}, field string, vals []float64)\n}\n\ntype QueryStats struct {\n\tScanned int64\n\tFilterPass int64\n\tFilterReject int64\n\tReadValue int64\n\tDataValid int64\n\tInTimeRange int64\n\tRuntime time.Duration\n}\n\nfunc (db *DB) runQuery(q *query) (*QueryStats, error) {\n\tstart := time.Now()\n\tstats := &QueryStats{}\n\n\tif q.from.IsZero() && q.fromOffset >= 0 {\n\t\treturn stats, fmt.Errorf(\"Please specify a from or a negative fromOffset\")\n\t}\n\tif len(q.fields) == 0 {\n\t\treturn stats, fmt.Errorf(\"Please specify at least one field\")\n\t}\n\tt := db.getTable(q.table)\n\tif t == nil {\n\t\treturn stats, fmt.Errorf(\"Unknown table %v\", q.table)\n\t}\n\tfields := make([][]byte, 0, len(q.fields))\n\tfor _, field := range q.fields {\n\t\tfieldBytes, err := msgpack.Marshal(strings.ToLower(field))\n\t\tif err != nil {\n\t\t\treturn stats, fmt.Errorf(\"Unable to marshal field: %v\", err)\n\t\t}\n\t\tfields = append(fields, fieldBytes)\n\t}\n\tsort.Sort(lexicographical(fields))\n\tnow := t.clock.Now()\n\tif q.to.IsZero() {\n\t\tq.to = now\n\t}\n\tif q.from.IsZero() {\n\t\tq.from = q.to.Add(q.fromOffset)\n\t}\n\tq.to = roundTime(q.to, t.resolution)\n\tq.from = roundTime(q.from, t.resolution)\n\tnumPeriods := int(q.to.Sub(q.from) \/ t.resolution)\n\tlog.Tracef(\"Query will return %d periods for range %v to %v\", numPeriods, q.from, q.to)\n\n\tro := gorocksdb.NewDefaultReadOptions()\n\t\/\/ Go ahead and fill the cache\n\tro.SetFillCache(true)\n\tit := t.archiveByKey.NewIterator(ro)\n\tdefer it.Close()\n\n\tfor _, fieldBytes := range fields {\n\t\tfor it.Seek(fieldBytes); it.ValidForPrefix(fieldBytes); it.Next() {\n\t\t\tstats.Scanned++\n\t\t\tk := it.Key()\n\t\t\tkr := bytes.NewReader(k.Data())\n\t\t\tdec := msgpack.NewDecoder(kr)\n\t\t\tstoredField, err := dec.DecodeString()\n\t\t\tif err != nil {\n\t\t\t\tk.Free()\n\t\t\t\treturn stats, fmt.Errorf(\"Unable to decode field: %v\", err)\n\t\t\t}\n\t\t\tkey := make(map[string]interface{})\n\t\t\terr = dec.Decode(&key)\n\t\t\tif err != nil {\n\t\t\t\tk.Free()\n\t\t\t\treturn stats, fmt.Errorf(\"Unable to decode key: %v\", err)\n\t\t\t}\n\t\t\tk.Free()\n\n\t\t\tif q.filter != nil {\n\t\t\t\tinclude, err := q.filter.Evaluate(key)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn stats, fmt.Errorf(\"Unable to apply filter: %v\", err)\n\t\t\t\t}\n\t\t\t\tinc, ok := include.(bool)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn stats, fmt.Errorf(\"Filter expression returned something other than a boolean: %v\", include)\n\t\t\t\t}\n\t\t\t\tif !inc {\n\t\t\t\t\tstats.FilterReject++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tstats.FilterPass++\n\t\t\t}\n\n\t\t\tv := it.Value()\n\t\t\tstats.ReadValue++\n\t\t\tseq := sequence(v.Data())\n\t\t\tvals := make([]float64, numPeriods)\n\t\t\tif seq.isValid() {\n\t\t\t\tstats.DataValid++\n\t\t\t\tseqStart := seq.start()\n\t\t\t\tif log.IsTraceEnabled() {\n\t\t\t\t\tlog.Tracef(\"Sequence starts at %v and has %d periods\", seqStart.In(time.UTC), seq.numPeriods())\n\t\t\t\t}\n\t\t\t\tincludeKey := false\n\t\t\t\tif !seqStart.Before(q.from) {\n\t\t\t\t\tto := q.to\n\t\t\t\t\tif to.After(seqStart) {\n\t\t\t\t\t\tto = seqStart\n\t\t\t\t\t}\n\t\t\t\t\tstartOffset := int(seqStart.Sub(to) \/ t.resolution)\n\t\t\t\t\tlog.Tracef(\"Start offset %d\", startOffset)\n\t\t\t\t\tcopyPeriods := seq.numPeriods()\n\t\t\t\t\tfor i := 0; i+startOffset < copyPeriods && i < numPeriods; i++ {\n\t\t\t\t\t\tincludeKey = true\n\t\t\t\t\t\tval := seq.valueAt(i + startOffset)\n\t\t\t\t\t\tlog.Tracef(\"Grabbing value %f\", val)\n\t\t\t\t\t\tvals[i] = val\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif includeKey {\n\t\t\t\t\tstats.InTimeRange++\n\t\t\t\t\tq.onValues(key, storedField, vals)\n\t\t\t\t}\n\t\t\t}\n\t\t\tv.Free()\n\t\t}\n\t}\n\n\tstats.Runtime = time.Now().Sub(start)\n\treturn stats, nil\n}\n\ntype lexicographical [][]byte\n\nfunc (a lexicographical) Len() int { return len(a) }\nfunc (a lexicographical) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a lexicographical) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) < 0 }\n\nfunc keysEqual(a map[string]interface{}, b map[string]interface{}) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor k, v := range a {\n\t\tif b[k] != v {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n<commit_msg>Fixed to offset<commit_after>package tdb\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Knetic\/govaluate\"\n\t\"github.com\/tecbot\/gorocksdb\"\n\t\"gopkg.in\/vmihailenco\/msgpack.v2\"\n)\n\ntype query struct {\n\ttable string\n\tfields []string\n\tfilter *govaluate.EvaluableExpression\n\tto time.Time\n\ttoOffset time.Duration\n\tfrom time.Time\n\tfromOffset time.Duration\n\tonValues func(key map[string]interface{}, field string, vals []float64)\n}\n\ntype QueryStats struct {\n\tScanned int64\n\tFilterPass int64\n\tFilterReject int64\n\tReadValue int64\n\tDataValid int64\n\tInTimeRange int64\n\tRuntime time.Duration\n}\n\nfunc (db *DB) runQuery(q *query) (*QueryStats, error) {\n\tstart := time.Now()\n\tstats := &QueryStats{}\n\n\tif q.from.IsZero() && q.fromOffset >= 0 {\n\t\treturn stats, fmt.Errorf(\"Please specify a from or a negative fromOffset\")\n\t}\n\tif len(q.fields) == 0 {\n\t\treturn stats, fmt.Errorf(\"Please specify at least one field\")\n\t}\n\tt := db.getTable(q.table)\n\tif t == nil {\n\t\treturn stats, fmt.Errorf(\"Unknown table %v\", q.table)\n\t}\n\tfields := make([][]byte, 0, len(q.fields))\n\tfor _, field := range q.fields {\n\t\tfieldBytes, err := msgpack.Marshal(strings.ToLower(field))\n\t\tif err != nil {\n\t\t\treturn stats, fmt.Errorf(\"Unable to marshal field: %v\", err)\n\t\t}\n\t\tfields = append(fields, fieldBytes)\n\t}\n\tsort.Sort(lexicographical(fields))\n\tnow := t.clock.Now()\n\tif q.to.IsZero() {\n\t\tq.to = now\n\t\tif q.toOffset != 0 {\n\t\t\tq.to = q.to.Add(q.toOffset)\n\t\t}\n\t}\n\tif q.from.IsZero() {\n\t\tq.from = now.Add(q.fromOffset)\n\t}\n\tq.to = roundTime(q.to, t.resolution)\n\tq.from = roundTime(q.from, t.resolution)\n\tnumPeriods := int(q.to.Sub(q.from) \/ t.resolution)\n\tlog.Tracef(\"Query will return %d periods for range %v to %v\", numPeriods, q.from, q.to)\n\n\tro := gorocksdb.NewDefaultReadOptions()\n\t\/\/ Go ahead and fill the cache\n\tro.SetFillCache(true)\n\tit := t.archiveByKey.NewIterator(ro)\n\tdefer it.Close()\n\n\tfor _, fieldBytes := range fields {\n\t\tfor it.Seek(fieldBytes); it.ValidForPrefix(fieldBytes); it.Next() {\n\t\t\tstats.Scanned++\n\t\t\tk := it.Key()\n\t\t\tkr := bytes.NewReader(k.Data())\n\t\t\tdec := msgpack.NewDecoder(kr)\n\t\t\tstoredField, err := dec.DecodeString()\n\t\t\tif err != nil {\n\t\t\t\tk.Free()\n\t\t\t\treturn stats, fmt.Errorf(\"Unable to decode field: %v\", err)\n\t\t\t}\n\t\t\tkey := make(map[string]interface{})\n\t\t\terr = dec.Decode(&key)\n\t\t\tif err != nil {\n\t\t\t\tk.Free()\n\t\t\t\treturn stats, fmt.Errorf(\"Unable to decode key: %v\", err)\n\t\t\t}\n\t\t\tk.Free()\n\n\t\t\tif q.filter != nil {\n\t\t\t\tinclude, err := q.filter.Evaluate(key)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn stats, fmt.Errorf(\"Unable to apply filter: %v\", err)\n\t\t\t\t}\n\t\t\t\tinc, ok := include.(bool)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn stats, fmt.Errorf(\"Filter expression returned something other than a boolean: %v\", include)\n\t\t\t\t}\n\t\t\t\tif !inc {\n\t\t\t\t\tstats.FilterReject++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tstats.FilterPass++\n\t\t\t}\n\n\t\t\tv := it.Value()\n\t\t\tstats.ReadValue++\n\t\t\tseq := sequence(v.Data())\n\t\t\tvals := make([]float64, numPeriods)\n\t\t\tif seq.isValid() {\n\t\t\t\tstats.DataValid++\n\t\t\t\tseqStart := seq.start()\n\t\t\t\tif log.IsTraceEnabled() {\n\t\t\t\t\tlog.Tracef(\"Sequence starts at %v and has %d periods\", seqStart.In(time.UTC), seq.numPeriods())\n\t\t\t\t}\n\t\t\t\tincludeKey := false\n\t\t\t\tif !seqStart.Before(q.from) {\n\t\t\t\t\tto := q.to\n\t\t\t\t\tif to.After(seqStart) {\n\t\t\t\t\t\tto = seqStart\n\t\t\t\t\t}\n\t\t\t\t\tstartOffset := int(seqStart.Sub(to) \/ t.resolution)\n\t\t\t\t\tlog.Tracef(\"Start offset %d\", startOffset)\n\t\t\t\t\tcopyPeriods := seq.numPeriods()\n\t\t\t\t\tfor i := 0; i+startOffset < copyPeriods && i < numPeriods; i++ {\n\t\t\t\t\t\tincludeKey = true\n\t\t\t\t\t\tval := seq.valueAt(i + startOffset)\n\t\t\t\t\t\tlog.Tracef(\"Grabbing value %f\", val)\n\t\t\t\t\t\tvals[i] = val\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif includeKey {\n\t\t\t\t\tstats.InTimeRange++\n\t\t\t\t\tq.onValues(key, storedField, vals)\n\t\t\t\t}\n\t\t\t}\n\t\t\tv.Free()\n\t\t}\n\t}\n\n\tstats.Runtime = time.Now().Sub(start)\n\treturn stats, nil\n}\n\ntype lexicographical [][]byte\n\nfunc (a lexicographical) Len() int { return len(a) }\nfunc (a lexicographical) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a lexicographical) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) < 0 }\n\nfunc keysEqual(a map[string]interface{}, b map[string]interface{}) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor k, v := range a {\n\t\tif b[k] != v {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n)\n\nvar quests List\n\nfunc randQuest() string {\n\tnum := rand.Int31n(int32(len(quests.Quests)))\n\treturn fmt.Sprintf(\"%s\\t%s\", quests.Quests[num].Rank, quests.Quests[num].Name)\n}\n\nfunc readQuests(name string) error {\n\tfile, err := os.Open(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcontent, err := ioutil.ReadAll(file)\n\tjson.Unmarshal(content, &quests)\n\treturn nil\n}\n\nfunc root(rw http.ResponseWriter, req *http.Request) {\n\toutput := randQuest()\n\tio.WriteString(rw, output)\n\tfmt.Println(output)\n}\n\nfunc init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n}\n\nfunc main() {\n\tinput := flag.String(\"file\", \"quests.json\", \"Quests json location\")\n\thttpPtr := flag.Bool(\"http\", false, \"Open http server\")\n\tport := flag.Int(\"port\", 8080, \"http server port\")\n\tflag.Parse()\n\terr := readQuests(*input)\n\tif err != nil {\n\t\tfmt.Println(\"Read Quest Error!!\")\n\t\treturn\n\t}\n\tif *httpPtr == false {\n\t\tfmt.Println(randQuest())\n\t\treturn\n\t}\n\thttp.Handle(\"\/root\/\", http.HandlerFunc(root))\n\terr = http.ListenAndServe(fmt.Sprintf(\":%d\", *port), nil)\n\tif err != nil {\n\t\tlog.Fatal(\"ListenAndServe\", err)\n\t}\n}\n<commit_msg>Add: flag for ip binding<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n)\n\nvar quests List\n\nfunc randQuest() string {\n\tnum := rand.Int31n(int32(len(quests.Quests)))\n\treturn fmt.Sprintf(\"%s\\t%s\", quests.Quests[num].Rank, quests.Quests[num].Name)\n}\n\nfunc readQuests(name string) error {\n\tfile, err := os.Open(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcontent, err := ioutil.ReadAll(file)\n\tjson.Unmarshal(content, &quests)\n\treturn nil\n}\n\nfunc root(rw http.ResponseWriter, req *http.Request) {\n\toutput := randQuest()\n\tio.WriteString(rw, output+\"\\n\")\n\tfmt.Println(output)\n}\n\nfunc init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n}\n\nfunc main() {\n\tinput := flag.String(\"file\", \"quests.json\", \"Quests json location\")\n\thttpPtr := flag.Bool(\"http\", false, \"Open http server\")\n\tip := flag.String(\"ip\", \"0.0.0.0\", \"http server ip\")\n\tport := flag.Int(\"port\", 8080, \"http server port\")\n\tflag.Parse()\n\terr := readQuests(*input)\n\tif err != nil {\n\t\tfmt.Println(\"Read Quest Error!!\")\n\t\treturn\n\t}\n\tif *httpPtr == false {\n\t\tfmt.Println(randQuest())\n\t\treturn\n\t}\n\thttp.Handle(\"\/root\/\", http.HandlerFunc(root))\n\terr = http.ListenAndServe(fmt.Sprintf(\"%s:%d\", *ip, *port), nil)\n\tif err != nil {\n\t\tlog.Fatal(\"ListenAndServe\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package qmd\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/bitly\/go-nsq\"\n)\n\ntype QueueConfig struct {\n\tHostNSQDAddr string `toml:\"host_nsqd_address\"`\n\tNSQDAddrs []string `toml:\"nsqd_addresses\"`\n\tLookupdAddrs []string `toml:\"lookupd_addresses\"`\n}\n\nfunc (qc *QueueConfig) Clean() error {\n\tif len(qc.LookupdAddrs) == 0 {\n\t\tif len(qc.NSQDAddrs) == 0 {\n\t\t\treturn fmt.Errorf(\"Both LookupdAddresses and NSQDAddresses are missing\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc ConnectConsumer(qc *QueueConfig, consumer *nsq.Consumer) error {\n\tvar err error\n\n\t\/\/ Connect consumers to NSQLookupd\n\tif qc.LookupdAddrs != nil || len(qc.LookupdAddrs) != 0 {\n\t\tlog.Info(\"Connecting Consumer to the following NSQLookupds %s\", qc.LookupdAddrs)\n\t\terr = consumer.ConnectToNSQLookupds(qc.LookupdAddrs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t\/\/ Connect consumers to NSQD\n\tif qc.NSQDAddrs != nil || len(qc.NSQDAddrs) != 0 {\n\t\tlog.Info(\"Connecting Consumer to the following NSQDs %s\", qc.NSQDAddrs)\n\t\terr = consumer.ConnectToNSQDs(qc.NSQDAddrs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn fmt.Errorf(\"Couldn't connect to either NSQDs or NSQLookupds\")\n}\n<commit_msg>Fixed typo<commit_after>package qmd\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/alexcesaro\/log\"\n\t\"github.com\/bitly\/go-nsq\"\n)\n\ntype QueueConfig struct {\n\tHostNSQDAddr string `toml:\"host_nsqd_address\"`\n\tNSQDAddrs []string `toml:\"nsqd_addresses\"`\n\tLookupdAddrs []string `toml:\"lookupd_addresses\"`\n}\n\nfunc (qc *QueueConfig) Clean() error {\n\tif len(qc.LookupdAddrs) == 0 {\n\t\tif len(qc.NSQDAddrs) == 0 {\n\t\t\treturn fmt.Errorf(\"Both LookupdAddresses and NSQDAddresses are missing\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc ConnectConsumer(qc *QueueConfig, consumer *nsq.Consumer) error {\n\tvar err error\n\n\t\/\/ Connect consumers to NSQLookupd\n\tif qc.LookupdAddrs != nil && len(qc.LookupdAddrs) != 0 {\n\t\tlog.Info(\"Connecting Consumer to the following NSQLookupds %s\", qc.LookupdAddrs)\n\t\terr = consumer.ConnectToNSQLookupds(qc.LookupdAddrs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t\/\/ Connect consumers to NSQD\n\tif qc.NSQDAddrs != nil && len(qc.NSQDAddrs) != 0 {\n\t\tlog.Info(\"Connecting Consumer to the following NSQDs %s\", qc.NSQDAddrs)\n\t\terr = consumer.ConnectToNSQDs(qc.NSQDAddrs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn fmt.Errorf(\"Couldn't connect to either NSQDs or NSQLookupds\")\n}\n<|endoftext|>"} {"text":"<commit_before>package citibank\n\nconst responseCiti = `\n<soapenv:Envelope xmlns:soapenv=\"http:\/\/schemas.xmlsoap.org\/soap\/envelope\/\">\n <soapenv:Header\/>\n <soapenv:Body>\n <RegisterBoletoResponse>\n <actionCode>{{returnCode}}<\/actionCode>\n <reasonMessage>{{returnMessage}}<\/reasonMessage>\n <\/RegisterBoletoResponse>\n <\/soapenv:Body>\n<\/soapenv:Envelope>\n`\n\nconst registerBoletoCiti = `\n\n## SOAPAction:RegisterBoleto\n## Content-Type:text\/xml\n\n<soapenv:Envelope xmlns:soapenv=\"http:\/\/schemas.xmlsoap.org\/soap\/envelope\/\">\n <soapenv:Header\/>\n <soapenv:Body>\n <GrpREMColTit>\n <GrpBenf>\n <CdClrSys>745<\/CdClrSys>\n <CdIspb>33479023<\/CdIspb>\n <CdtrId>0{{.Authentication.Username}}<\/CdtrId>\n <CdtrNm>{{.Recipient.Name}}<\/CdtrNm>\n <CdtrTaxId>{{.Recipient.Document.Number}}<\/CdtrTaxId>\n <CdtrTaxTp>J<\/CdtrTaxTp>\n <\/GrpBenf>\n <GrpClPgd>\n <DbtrNm>{{.Buyer.Name}}<\/DbtrNm>\n <DbtrTaxId>{{.Buyer.Document.Number}}<\/DbtrTaxId>\n\t\t\t{{if (eq .Buyer.Document.Type \"CPF\")}}\n \t<DbtrTaxTp>F<\/DbtrTaxTp>\n\t\t\t{{else}}\n \t<DbtrTaxTp>J<\/DbtrTaxTp>\n\t\t\t{{end}}\n <GrpClPgdAdr>\n <DbtrAdrTp>{{.Buyer.Address.Street}} {{.Buyer.Address.Number}} {{.Buyer.Address.Complement}}<\/DbtrAdrTp>\n <DbtrCtrySubDvsn>{{.Buyer.Address.StateCode}}<\/DbtrCtrySubDvsn>\n <DbtrPstCd>{{.Buyer.Address.ZipCode}}<\/DbtrPstCd>\n <DbtrTwnNm>{{.Buyer.Address.City}}<\/DbtrTwnNm>\n <\/GrpClPgdAdr>\n <\/GrpClPgd>\n <CdOccTp>01<\/CdOccTp>\n <DbtrGrntNm> <\/DbtrGrntNm>\n <DbtrMsg>{{.Title.Instructions}}<\/DbtrMsg>\n <TitlAmt>{{.Title.AmountInCents}}<\/TitlAmt>\n <TitlBarCdInd>0<\/TitlBarCdInd>\n <TitlCcyCd>09<\/TitlCcyCd>\n <TitlCiaCdId>{{.Title.DocumentNumber}}<\/TitlCiaCdId>\n <TitlDueDt>{{enDate .Title.ExpireDateTime \"-\"}}<\/TitlDueDt>\n <TitlInstrNmDtExec>0<\/TitlInstrNmDtExec>\n <TitlInstrProtInd> <\/TitlInstrProtInd>\n <TitlInstrWrtOffInd> <\/TitlInstrWrtOffInd>\n <TitlIOFAmt>0<\/TitlIOFAmt>\n <TitlIssDt>{{enDate today \"-\"}}<\/TitlIssDt>\n <TitlOurNb>{{padLeft (toString .Title.OurNumber) \"0\" 12}}<\/TitlOurNb>\n <TitlPortCd>1<\/TitlPortCd>\n <TitlRbtAmt>0<\/TitlRbtAmt>\n <TitlTpCd>03<\/TitlTpCd>\n <TitlYourNb>{{.Title.DocumentNumber}}<\/TitlYourNb>\n <GrpDscnt>\n <TitlDscntAmtOrPrct>0<\/TitlDscntAmtOrPrct>\n <TitlDscntEndDt> <\/TitlDscntEndDt>\n <TitlDscntTp> <\/TitlDscntTp>\n <\/GrpDscnt>\n <GrpItrs>\n <TitlItrsAmtOrPrct>0<\/TitlItrsAmtOrPrct>\n <TitlItrsStrDt> <\/TitlItrsStrDt>\n <TitlItrsTp> <\/TitlItrsTp>\n <\/GrpItrs>\n <GrpFn>\n <TitlFnAmtOrPrct>0<\/TitlFnAmtOrPrct>\n <TitlFnStrDt> <\/TitlFnStrDt>\n <TitlFnTp> <\/TitlFnTp>\n <\/GrpFn>\n <\/GrpREMColTit>\n <\/soapenv:Body>\n<\/soapenv:Envelope>\n`\n\nfunc getRequestCiti() string {\n\treturn registerBoletoCiti\n}\n\nfunc getResponseCiti() string {\n\treturn responseCiti\n}\n<commit_msg>Remove 0 no template de request<commit_after>package citibank\n\nconst responseCiti = `\n<soapenv:Envelope xmlns:soapenv=\"http:\/\/schemas.xmlsoap.org\/soap\/envelope\/\">\n <soapenv:Header\/>\n <soapenv:Body>\n <RegisterBoletoResponse>\n <actionCode>{{returnCode}}<\/actionCode>\n <reasonMessage>{{returnMessage}}<\/reasonMessage>\n <\/RegisterBoletoResponse>\n <\/soapenv:Body>\n<\/soapenv:Envelope>\n`\n\nconst registerBoletoCiti = `\n\n## SOAPAction:RegisterBoleto\n## Content-Type:text\/xml\n\n<soapenv:Envelope xmlns:soapenv=\"http:\/\/schemas.xmlsoap.org\/soap\/envelope\/\">\n <soapenv:Header\/>\n <soapenv:Body>\n <GrpREMColTit>\n <GrpBenf>\n <CdClrSys>745<\/CdClrSys>\n <CdIspb>33479023<\/CdIspb>\n <CdtrId>{{.Authentication.Username}}<\/CdtrId>\n <CdtrNm>{{.Recipient.Name}}<\/CdtrNm>\n <CdtrTaxId>{{.Recipient.Document.Number}}<\/CdtrTaxId>\n <CdtrTaxTp>J<\/CdtrTaxTp>\n <\/GrpBenf>\n <GrpClPgd>\n <DbtrNm>{{.Buyer.Name}}<\/DbtrNm>\n <DbtrTaxId>{{.Buyer.Document.Number}}<\/DbtrTaxId>\n\t\t\t{{if (eq .Buyer.Document.Type \"CPF\")}}\n \t<DbtrTaxTp>F<\/DbtrTaxTp>\n\t\t\t{{else}}\n \t<DbtrTaxTp>J<\/DbtrTaxTp>\n\t\t\t{{end}}\n <GrpClPgdAdr>\n <DbtrAdrTp>{{.Buyer.Address.Street}} {{.Buyer.Address.Number}} {{.Buyer.Address.Complement}}<\/DbtrAdrTp>\n <DbtrCtrySubDvsn>{{.Buyer.Address.StateCode}}<\/DbtrCtrySubDvsn>\n <DbtrPstCd>{{.Buyer.Address.ZipCode}}<\/DbtrPstCd>\n <DbtrTwnNm>{{.Buyer.Address.City}}<\/DbtrTwnNm>\n <\/GrpClPgdAdr>\n <\/GrpClPgd>\n <CdOccTp>01<\/CdOccTp>\n <DbtrGrntNm> <\/DbtrGrntNm>\n <DbtrMsg>{{.Title.Instructions}}<\/DbtrMsg>\n <TitlAmt>{{.Title.AmountInCents}}<\/TitlAmt>\n <TitlBarCdInd>0<\/TitlBarCdInd>\n <TitlCcyCd>09<\/TitlCcyCd>\n <TitlCiaCdId>{{.Title.DocumentNumber}}<\/TitlCiaCdId>\n <TitlDueDt>{{enDate .Title.ExpireDateTime \"-\"}}<\/TitlDueDt>\n <TitlInstrNmDtExec>0<\/TitlInstrNmDtExec>\n <TitlInstrProtInd> <\/TitlInstrProtInd>\n <TitlInstrWrtOffInd> <\/TitlInstrWrtOffInd>\n <TitlIOFAmt>0<\/TitlIOFAmt>\n <TitlIssDt>{{enDate today \"-\"}}<\/TitlIssDt>\n <TitlOurNb>{{padLeft (toString .Title.OurNumber) \"0\" 12}}<\/TitlOurNb>\n <TitlPortCd>1<\/TitlPortCd>\n <TitlRbtAmt>0<\/TitlRbtAmt>\n <TitlTpCd>03<\/TitlTpCd>\n <TitlYourNb>{{.Title.DocumentNumber}}<\/TitlYourNb>\n <GrpDscnt>\n <TitlDscntAmtOrPrct>0<\/TitlDscntAmtOrPrct>\n <TitlDscntEndDt> <\/TitlDscntEndDt>\n <TitlDscntTp> <\/TitlDscntTp>\n <\/GrpDscnt>\n <GrpItrs>\n <TitlItrsAmtOrPrct>0<\/TitlItrsAmtOrPrct>\n <TitlItrsStrDt> <\/TitlItrsStrDt>\n <TitlItrsTp> <\/TitlItrsTp>\n <\/GrpItrs>\n <GrpFn>\n <TitlFnAmtOrPrct>0<\/TitlFnAmtOrPrct>\n <TitlFnStrDt> <\/TitlFnStrDt>\n <TitlFnTp> <\/TitlFnTp>\n <\/GrpFn>\n <\/GrpREMColTit>\n <\/soapenv:Body>\n<\/soapenv:Envelope>\n`\n\nfunc getRequestCiti() string {\n\treturn registerBoletoCiti\n}\n\nfunc getResponseCiti() string {\n\treturn responseCiti\n}\n<|endoftext|>"} {"text":"<commit_before>package backend\n\nimport (\n\t\"bytes\"\n\t\"crypto\/x509\"\n\t\"encoding\/json\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/sftp\"\n\tworkerctx \"github.com\/travis-ci\/worker\/lib\/context\"\n\t\"github.com\/travis-ci\/worker\/lib\/metrics\"\n\t\"golang.org\/x\/crypto\/ssh\"\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype SauceLabsProvider struct {\n\tclient *http.Client\n\tbaseURL *url.URL\n\timageAliases map[string]string\n\tsshKeyPath string\n\tsshKeyPassphrase string\n}\n\ntype SauceLabsInstance struct {\n\tpayload sauceLabsInstancePayload\n\tprovider *SauceLabsProvider\n}\n\ntype sauceLabsInstancePayload struct {\n\tID string `json:\"instance_id\"`\n\tImageID string `json:\"image_id\"`\n\tRealImageID string `json:\"real_image_id\"`\n\tState string `json:\"state\"`\n\tPrivateIP string `json:\"private_ip\"`\n\tExtraInfo map[string]interface{} `json:\"extra_info\"`\n}\n\nfunc NewSauceLabsProvider(config map[string]string) (*SauceLabsProvider, error) {\n\tendpoint, ok := config[\"endpoint\"]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"expected endpoint config key\")\n\t}\n\tbaseURL, err := url.Parse(endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taliasNames, ok := config[\"image_aliases\"]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"expected image_aliases config key\")\n\t}\n\n\timageAliases := make(map[string]string, len(aliasNames))\n\n\tfor _, aliasName := range strings.Split(aliasNames, \",\") {\n\t\timageName, ok := config[fmt.Sprintf(\"image_alias_%s\", aliasName)]\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"expected image alias %q\", aliasName)\n\t\t}\n\n\t\timageAliases[aliasName] = imageName\n\t}\n\n\tsshKeyPath, ok := config[\"ssh_key_path\"]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"expected ssh_key_path config key\")\n\t}\n\n\tsshKeyPassphrase, ok := config[\"ssh_key_passphrase\"]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"expected ssh_key_passphrase config key\")\n\t}\n\n\treturn &SauceLabsProvider{\n\t\tclient: http.DefaultClient,\n\t\tbaseURL: baseURL,\n\t\timageAliases: imageAliases,\n\t\tsshKeyPath: sshKeyPath,\n\t\tsshKeyPassphrase: sshKeyPassphrase,\n\t}, nil\n}\n\nfunc (p *SauceLabsProvider) Start(ctx context.Context, startAttributes StartAttributes) (Instance, error) {\n\tstartupInfo, err := json.Marshal(map[string]interface{}{\"worker_pid\": os.Getpid(), \"source\": \"worker\"})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\timageName, ok := p.imageAliases[startAttributes.OsxImage]\n\tif !ok {\n\t\timageName, _ = p.imageAliases[\"default\"]\n\t}\n\n\tif imageName == \"\" {\n\t\treturn nil, fmt.Errorf(\"no image alias for %s\", startAttributes.OsxImage)\n\t}\n\n\tu, err := p.baseURL.Parse(fmt.Sprintf(\"instances?image=%s\", url.QueryEscape(imageName)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", u.String(), bytes.NewReader(startupInfo))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstartBooting := time.Now()\n\n\tresp, err := p.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif c := resp.StatusCode; c < 200 || c >= 300 {\n\t\treturn nil, fmt.Errorf(\"expected 2xx from Sauce Labs API, got %d\", c)\n\t}\n\n\tvar payload sauceLabsInstancePayload\n\terr = json.NewDecoder(resp.Body).Decode(&payload)\n\n\tinstanceReady := make(chan sauceLabsInstancePayload, 1)\n\terrChan := make(chan error, 1)\n\tgo func(id string) {\n\t\tu, err := p.baseURL.Parse(fmt.Sprintf(\"instances\/%s\", url.QueryEscape(id)))\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\n\t\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\n\t\tfor true {\n\t\t\tresp, err := p.client.Do(req)\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar payload sauceLabsInstancePayload\n\t\t\terr = json.NewDecoder(resp.Body).Decode(&payload)\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tconn, err := net.Dial(\"tcp\", fmt.Sprintf(\"%s:3422\", payload.PrivateIP))\n\t\t\tif conn != nil {\n\t\t\t\tconn.Close()\n\t\t\t}\n\n\t\t\tif err == nil {\n\t\t\t\tinstanceReady <- payload\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(payload.ID)\n\n\tselect {\n\tcase payload := <-instanceReady:\n\t\tmetrics.TimeSince(\"worker.vm.provider.sauce_labs.boot\", startBooting)\n\t\tworkerctx.LoggerFromContext(ctx).WithField(\"instance_id\", payload.ID).Info(\"booted instance\")\n\t\treturn &SauceLabsInstance{\n\t\t\tpayload: payload,\n\t\t\tprovider: p,\n\t\t}, nil\n\tcase err := <-errChan:\n\t\tinstance := &SauceLabsInstance{\n\t\t\tpayload: payload,\n\t\t\tprovider: p,\n\t\t}\n\t\tinstance.Stop(ctx)\n\n\t\treturn nil, err\n\tcase <-ctx.Done():\n\t\tif ctx.Err() == context.DeadlineExceeded {\n\t\t\tmetrics.Mark(\"worker.vm.provider.sauce_labs.boot.timeout\")\n\t\t}\n\n\t\tinstance := &SauceLabsInstance{\n\t\t\tpayload: payload,\n\t\t\tprovider: p,\n\t\t}\n\t\tinstance.Stop(ctx)\n\n\t\treturn nil, ctx.Err()\n\t}\n}\n\nfunc (i *SauceLabsInstance) UploadScript(ctx context.Context, script []byte) error {\n\tclient, err := i.sshClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer client.Close()\n\n\tsftp, err := sftp.NewClient(client)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer sftp.Close()\n\n\tf, err := sftp.Create(\"build.sh\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = f.Write(script)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf, err = sftp.Create(\"wrapper.sh\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = fmt.Fprintf(f, `#!\/bin\/bash\n\n[[ -f ~\/build.sh.exit ]] && rm ~\/build.sh.exit\n\nuntil nc 127.0.0.1 15782; do sleep 1; done\n\nuntil [[ -f ~\/build.sh.exit ]]; do sleep 1; done\nexit $(cat ~\/build.sh.exit)\n`)\n\n\treturn err\n}\n\nfunc (i *SauceLabsInstance) RunScript(ctx context.Context, output io.WriteCloser) (RunResult, error) {\n\tclient, err := i.sshClient()\n\tif err != nil {\n\t\treturn RunResult{Completed: false}, err\n\t}\n\tdefer client.Close()\n\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\treturn RunResult{Completed: false}, err\n\t}\n\tdefer session.Close()\n\n\terr = session.RequestPty(\"xterm\", 80, 40, ssh.TerminalModes{})\n\tif err != nil {\n\t\treturn RunResult{Completed: false}, err\n\t}\n\n\tsession.Stdout = output\n\tsession.Stderr = output\n\n\terr = session.Run(\"bash ~\/wrapper.sh\")\n\tif err == nil {\n\t\treturn RunResult{Completed: true, ExitCode: 0}, nil\n\t}\n\n\tswitch err := err.(type) {\n\tcase *ssh.ExitError:\n\t\treturn RunResult{Completed: true, ExitCode: uint8(err.ExitStatus())}, nil\n\tdefault:\n\t\treturn RunResult{Completed: false}, err\n\t}\n}\n\nfunc (i *SauceLabsInstance) Stop(ctx context.Context) error {\n\tu, err := i.provider.baseURL.Parse(fmt.Sprintf(\"instances\/%s\", url.QueryEscape(i.payload.ID)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq, err := http.NewRequest(\"DELETE\", u.String(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = i.provider.client.Do(req)\n\treturn err\n}\n\nfunc (i *SauceLabsInstance) sshClient() (*ssh.Client, error) {\n\tfile, err := ioutil.ReadFile(i.provider.sshKeyPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tblock, _ := pem.Decode(file)\n\tif block == nil {\n\t\treturn nil, fmt.Errorf(\"ssh key does not contain a valid PEM block\")\n\t}\n\n\tder, err := x509.DecryptPEMBlock(block, []byte(i.provider.sshKeyPassphrase))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkey, err := x509.ParsePKCS1PrivateKey(der)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsigner, err := ssh.NewSignerFromKey(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ssh.Dial(\"tcp\", fmt.Sprintf(\"%s:3422\", i.payload.PrivateIP), &ssh.ClientConfig{\n\t\tUser: \"travis\",\n\t\tAuth: []ssh.AuthMethod{\n\t\t\tssh.PublicKeys(signer),\n\t\t},\n\t})\n}\n<commit_msg>sauce-labs: read and close all HTTP request bodies<commit_after>package backend\n\nimport (\n\t\"bytes\"\n\t\"crypto\/x509\"\n\t\"encoding\/json\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/sftp\"\n\tworkerctx \"github.com\/travis-ci\/worker\/lib\/context\"\n\t\"github.com\/travis-ci\/worker\/lib\/metrics\"\n\t\"golang.org\/x\/crypto\/ssh\"\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype SauceLabsProvider struct {\n\tclient *http.Client\n\tbaseURL *url.URL\n\timageAliases map[string]string\n\tsshKeyPath string\n\tsshKeyPassphrase string\n}\n\ntype SauceLabsInstance struct {\n\tpayload sauceLabsInstancePayload\n\tprovider *SauceLabsProvider\n}\n\ntype sauceLabsInstancePayload struct {\n\tID string `json:\"instance_id\"`\n\tImageID string `json:\"image_id\"`\n\tRealImageID string `json:\"real_image_id\"`\n\tState string `json:\"state\"`\n\tPrivateIP string `json:\"private_ip\"`\n\tExtraInfo map[string]interface{} `json:\"extra_info\"`\n}\n\nfunc NewSauceLabsProvider(config map[string]string) (*SauceLabsProvider, error) {\n\tendpoint, ok := config[\"endpoint\"]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"expected endpoint config key\")\n\t}\n\tbaseURL, err := url.Parse(endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taliasNames, ok := config[\"image_aliases\"]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"expected image_aliases config key\")\n\t}\n\n\timageAliases := make(map[string]string, len(aliasNames))\n\n\tfor _, aliasName := range strings.Split(aliasNames, \",\") {\n\t\timageName, ok := config[fmt.Sprintf(\"image_alias_%s\", aliasName)]\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"expected image alias %q\", aliasName)\n\t\t}\n\n\t\timageAliases[aliasName] = imageName\n\t}\n\n\tsshKeyPath, ok := config[\"ssh_key_path\"]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"expected ssh_key_path config key\")\n\t}\n\n\tsshKeyPassphrase, ok := config[\"ssh_key_passphrase\"]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"expected ssh_key_passphrase config key\")\n\t}\n\n\treturn &SauceLabsProvider{\n\t\tclient: http.DefaultClient,\n\t\tbaseURL: baseURL,\n\t\timageAliases: imageAliases,\n\t\tsshKeyPath: sshKeyPath,\n\t\tsshKeyPassphrase: sshKeyPassphrase,\n\t}, nil\n}\n\nfunc (p *SauceLabsProvider) Start(ctx context.Context, startAttributes StartAttributes) (Instance, error) {\n\tstartupInfo, err := json.Marshal(map[string]interface{}{\"worker_pid\": os.Getpid(), \"source\": \"worker\"})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\timageName, ok := p.imageAliases[startAttributes.OsxImage]\n\tif !ok {\n\t\timageName, _ = p.imageAliases[\"default\"]\n\t}\n\n\tif imageName == \"\" {\n\t\treturn nil, fmt.Errorf(\"no image alias for %s\", startAttributes.OsxImage)\n\t}\n\n\tu, err := p.baseURL.Parse(fmt.Sprintf(\"instances?image=%s\", url.QueryEscape(imageName)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", u.String(), bytes.NewReader(startupInfo))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstartBooting := time.Now()\n\n\tresp, err := p.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer io.Copy(ioutil.Discard, resp.Body)\n\tdefer resp.Body.Close()\n\n\tif c := resp.StatusCode; c < 200 || c >= 300 {\n\t\treturn nil, fmt.Errorf(\"expected 2xx from Sauce Labs API, got %d\", c)\n\t}\n\n\tvar payload sauceLabsInstancePayload\n\terr = json.NewDecoder(resp.Body).Decode(&payload)\n\n\tinstanceReady := make(chan sauceLabsInstancePayload, 1)\n\terrChan := make(chan error, 1)\n\tgo func(id string) {\n\t\tu, err := p.baseURL.Parse(fmt.Sprintf(\"instances\/%s\", url.QueryEscape(id)))\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\n\t\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\n\t\tfor true {\n\t\t\tresp, err := p.client.Do(req)\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer io.Copy(ioutil.Discard, resp.Body)\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tvar payload sauceLabsInstancePayload\n\t\t\terr = json.NewDecoder(resp.Body).Decode(&payload)\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tconn, err := net.Dial(\"tcp\", fmt.Sprintf(\"%s:3422\", payload.PrivateIP))\n\t\t\tif conn != nil {\n\t\t\t\tconn.Close()\n\t\t\t}\n\n\t\t\tif err == nil {\n\t\t\t\tinstanceReady <- payload\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(payload.ID)\n\n\tselect {\n\tcase payload := <-instanceReady:\n\t\tmetrics.TimeSince(\"worker.vm.provider.sauce_labs.boot\", startBooting)\n\t\tworkerctx.LoggerFromContext(ctx).WithField(\"instance_id\", payload.ID).Info(\"booted instance\")\n\t\treturn &SauceLabsInstance{\n\t\t\tpayload: payload,\n\t\t\tprovider: p,\n\t\t}, nil\n\tcase err := <-errChan:\n\t\tinstance := &SauceLabsInstance{\n\t\t\tpayload: payload,\n\t\t\tprovider: p,\n\t\t}\n\t\tinstance.Stop(ctx)\n\n\t\treturn nil, err\n\tcase <-ctx.Done():\n\t\tif ctx.Err() == context.DeadlineExceeded {\n\t\t\tmetrics.Mark(\"worker.vm.provider.sauce_labs.boot.timeout\")\n\t\t}\n\n\t\tinstance := &SauceLabsInstance{\n\t\t\tpayload: payload,\n\t\t\tprovider: p,\n\t\t}\n\t\tinstance.Stop(ctx)\n\n\t\treturn nil, ctx.Err()\n\t}\n}\n\nfunc (i *SauceLabsInstance) UploadScript(ctx context.Context, script []byte) error {\n\tclient, err := i.sshClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer client.Close()\n\n\tsftp, err := sftp.NewClient(client)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer sftp.Close()\n\n\tf, err := sftp.Create(\"build.sh\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = f.Write(script)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf, err = sftp.Create(\"wrapper.sh\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = fmt.Fprintf(f, `#!\/bin\/bash\n\n[[ -f ~\/build.sh.exit ]] && rm ~\/build.sh.exit\n\nuntil nc 127.0.0.1 15782; do sleep 1; done\n\nuntil [[ -f ~\/build.sh.exit ]]; do sleep 1; done\nexit $(cat ~\/build.sh.exit)\n`)\n\n\treturn err\n}\n\nfunc (i *SauceLabsInstance) RunScript(ctx context.Context, output io.WriteCloser) (RunResult, error) {\n\tclient, err := i.sshClient()\n\tif err != nil {\n\t\treturn RunResult{Completed: false}, err\n\t}\n\tdefer client.Close()\n\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\treturn RunResult{Completed: false}, err\n\t}\n\tdefer session.Close()\n\n\terr = session.RequestPty(\"xterm\", 80, 40, ssh.TerminalModes{})\n\tif err != nil {\n\t\treturn RunResult{Completed: false}, err\n\t}\n\n\tsession.Stdout = output\n\tsession.Stderr = output\n\n\terr = session.Run(\"bash ~\/wrapper.sh\")\n\tif err == nil {\n\t\treturn RunResult{Completed: true, ExitCode: 0}, nil\n\t}\n\n\tswitch err := err.(type) {\n\tcase *ssh.ExitError:\n\t\treturn RunResult{Completed: true, ExitCode: uint8(err.ExitStatus())}, nil\n\tdefault:\n\t\treturn RunResult{Completed: false}, err\n\t}\n}\n\nfunc (i *SauceLabsInstance) Stop(ctx context.Context) error {\n\tu, err := i.provider.baseURL.Parse(fmt.Sprintf(\"instances\/%s\", url.QueryEscape(i.payload.ID)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq, err := http.NewRequest(\"DELETE\", u.String(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := i.provider.client.Do(req)\n\tio.Copy(ioutil.Discard, resp.Body)\n\tresp.Body.Close()\n\treturn err\n}\n\nfunc (i *SauceLabsInstance) sshClient() (*ssh.Client, error) {\n\tfile, err := ioutil.ReadFile(i.provider.sshKeyPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tblock, _ := pem.Decode(file)\n\tif block == nil {\n\t\treturn nil, fmt.Errorf(\"ssh key does not contain a valid PEM block\")\n\t}\n\n\tder, err := x509.DecryptPEMBlock(block, []byte(i.provider.sshKeyPassphrase))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkey, err := x509.ParsePKCS1PrivateKey(der)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsigner, err := ssh.NewSignerFromKey(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ssh.Dial(\"tcp\", fmt.Sprintf(\"%s:3422\", i.payload.PrivateIP), &ssh.ClientConfig{\n\t\tUser: \"travis\",\n\t\tAuth: []ssh.AuthMethod{\n\t\t\tssh.PublicKeys(signer),\n\t\t},\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package gherkin\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/cucumber\/messages-go\/v12\"\n)\n\ntype AstBuilder interface {\n\tBuilder\n\tGetGherkinDocument() *messages.GherkinDocument\n}\n\ntype astBuilder struct {\n\tstack []*astNode\n\tcomments []*messages.GherkinDocument_Comment\n\tnewId func() string\n}\n\nfunc (t *astBuilder) Reset() {\n\tt.comments = []*messages.GherkinDocument_Comment{}\n\tt.stack = []*astNode{}\n\tt.push(newAstNode(RuleTypeNone))\n}\n\nfunc (t *astBuilder) GetGherkinDocument() *messages.GherkinDocument {\n\tres := t.currentNode().getSingle(RuleTypeGherkinDocument)\n\tif val, ok := res.(*messages.GherkinDocument); ok {\n\t\treturn val\n\t}\n\treturn nil\n}\n\ntype astNode struct {\n\truleType RuleType\n\tsubNodes map[RuleType][]interface{}\n}\n\nfunc (a *astNode) add(rt RuleType, obj interface{}) {\n\ta.subNodes[rt] = append(a.subNodes[rt], obj)\n}\n\nfunc (a *astNode) getSingle(rt RuleType) interface{} {\n\tif val, ok := a.subNodes[rt]; ok {\n\t\tfor i := range val {\n\t\t\treturn val[i]\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *astNode) getItems(rt RuleType) []interface{} {\n\tvar res []interface{}\n\tif val, ok := a.subNodes[rt]; ok {\n\t\tfor i := range val {\n\t\t\tres = append(res, val[i])\n\t\t}\n\t}\n\treturn res\n}\n\nfunc (a *astNode) getToken(tt TokenType) *Token {\n\tif val, ok := a.getSingle(tt.RuleType()).(*Token); ok {\n\t\treturn val\n\t}\n\treturn nil\n}\n\nfunc (a *astNode) getTokens(tt TokenType) []*Token {\n\tvar items = a.getItems(tt.RuleType())\n\tvar tokens []*Token\n\tfor i := range items {\n\t\tif val, ok := items[i].(*Token); ok {\n\t\t\ttokens = append(tokens, val)\n\t\t}\n\t}\n\treturn tokens\n}\n\nfunc (t *astBuilder) currentNode() *astNode {\n\tif len(t.stack) > 0 {\n\t\treturn t.stack[len(t.stack)-1]\n\t}\n\treturn nil\n}\n\nfunc newAstNode(rt RuleType) *astNode {\n\treturn &astNode{\n\t\truleType: rt,\n\t\tsubNodes: make(map[RuleType][]interface{}),\n\t}\n}\n\nfunc NewAstBuilder(newId func() string) AstBuilder {\n\tbuilder := new(astBuilder)\n\tbuilder.newId = newId\n\tbuilder.comments = []*messages.GherkinDocument_Comment{}\n\tbuilder.push(newAstNode(RuleTypeNone))\n\treturn builder\n}\n\nfunc (t *astBuilder) push(n *astNode) {\n\tt.stack = append(t.stack, n)\n}\n\nfunc (t *astBuilder) pop() *astNode {\n\tx := t.stack[len(t.stack)-1]\n\tt.stack = t.stack[:len(t.stack)-1]\n\treturn x\n}\n\nfunc (t *astBuilder) Build(tok *Token) (bool, error) {\n\tif tok.Type == TokenTypeComment {\n\t\tcomment := &messages.GherkinDocument_Comment{\n\t\t\tLocation: astLocation(tok),\n\t\t\tText: tok.Text,\n\t\t}\n\t\tt.comments = append(t.comments, comment)\n\t} else {\n\t\tt.currentNode().add(tok.Type.RuleType(), tok)\n\t}\n\treturn true, nil\n}\n\nfunc (t *astBuilder) StartRule(r RuleType) (bool, error) {\n\tt.push(newAstNode(r))\n\treturn true, nil\n}\n\nfunc (t *astBuilder) EndRule(r RuleType) (bool, error) {\n\tnode := t.pop()\n\ttransformedNode, err := t.transformNode(node)\n\tt.currentNode().add(node.ruleType, transformedNode)\n\treturn true, err\n}\n\nfunc (t *astBuilder) transformNode(node *astNode) (interface{}, error) {\n\tswitch node.ruleType {\n\n\tcase RuleTypeStep:\n\t\tstepLine := node.getToken(TokenTypeStepLine)\n\n\t\tstep := &messages.GherkinDocument_Feature_Step{\n\t\t\tLocation: astLocation(stepLine),\n\t\t\tKeyword: stepLine.Keyword,\n\t\t\tText: stepLine.Text,\n\t\t\tId: t.newId(),\n\t\t}\n\t\tdataTable := node.getSingle(RuleTypeDataTable)\n\t\tif dataTable != nil {\n\t\t\tstep.Argument = &messages.GherkinDocument_Feature_Step_DataTable_{\n\t\t\t\tDataTable: dataTable.(*messages.GherkinDocument_Feature_Step_DataTable),\n\t\t\t}\n\t\t} else {\n\t\t\tdocString := node.getSingle(RuleTypeDocString)\n\t\t\tif docString != nil {\n\t\t\t\tstep.Argument = &messages.GherkinDocument_Feature_Step_DocString_{DocString: docString.(*messages.GherkinDocument_Feature_Step_DocString)}\n\t\t\t}\n\t\t}\n\n\t\treturn step, nil\n\n\tcase RuleTypeDocString:\n\t\tseparatorToken := node.getToken(TokenTypeDocStringSeparator)\n\t\tlineTokens := node.getTokens(TokenTypeOther)\n\t\tvar text string\n\t\tfor i := range lineTokens {\n\t\t\tif i > 0 {\n\t\t\t\ttext += \"\\n\"\n\t\t\t}\n\t\t\ttext += lineTokens[i].Text\n\t\t}\n\t\tds := &messages.GherkinDocument_Feature_Step_DocString{\n\t\t\tLocation: astLocation(separatorToken),\n\t\t\tMediaType: separatorToken.Text,\n\t\t\tContent: text,\n\t\t\tDelimiter: separatorToken.Keyword,\n\t\t}\n\t\treturn ds, nil\n\n\tcase RuleTypeDataTable:\n\t\trows, err := astTableRows(node, t.newId)\n\t\tdt := &messages.GherkinDocument_Feature_Step_DataTable{\n\t\t\tLocation: rows[0].Location,\n\t\t\tRows: rows,\n\t\t}\n\t\treturn dt, err\n\n\tcase RuleTypeBackground:\n\t\tbackgroundLine := node.getToken(TokenTypeBackgroundLine)\n\t\tdescription, _ := node.getSingle(RuleTypeDescription).(string)\n\t\tbg := &messages.GherkinDocument_Feature_Background{\n\t\t\tId: t.newId(),\n\t\t\tLocation: astLocation(backgroundLine),\n\t\t\tKeyword: backgroundLine.Keyword,\n\t\t\tName: backgroundLine.Text,\n\t\t\tDescription: description,\n\t\t\tSteps: astSteps(node),\n\t\t}\n\t\treturn bg, nil\n\n\tcase RuleTypeScenarioDefinition:\n\t\ttags := astTags(node, t.newId)\n\t\tscenarioNode, _ := node.getSingle(RuleTypeScenario).(*astNode)\n\n\t\tscenarioLine := scenarioNode.getToken(TokenTypeScenarioLine)\n\t\tdescription, _ := scenarioNode.getSingle(RuleTypeDescription).(string)\n\t\tsc := &messages.GherkinDocument_Feature_Scenario{\n\t\t\tId: t.newId(),\n\t\t\tTags: tags,\n\t\t\tLocation: astLocation(scenarioLine),\n\t\t\tKeyword: scenarioLine.Keyword,\n\t\t\tName: scenarioLine.Text,\n\t\t\tDescription: description,\n\t\t\tSteps: astSteps(scenarioNode),\n\t\t\tExamples: astExamples(scenarioNode),\n\t\t}\n\n\t\treturn sc, nil\n\n\tcase RuleTypeExamplesDefinition:\n\t\ttags := astTags(node, t.newId)\n\t\texamplesNode, _ := node.getSingle(RuleTypeExamples).(*astNode)\n\t\texamplesLine := examplesNode.getToken(TokenTypeExamplesLine)\n\t\tdescription, _ := examplesNode.getSingle(RuleTypeDescription).(string)\n\t\texamplesTable := examplesNode.getSingle(RuleTypeExamplesTable)\n\n\t\tex := &messages.GherkinDocument_Feature_Scenario_Examples{\n\t\t\tId: t.newId(),\n\t\t\tTags: tags,\n\t\t\tLocation: astLocation(examplesLine),\n\t\t\tKeyword: examplesLine.Keyword,\n\t\t\tName: examplesLine.Text,\n\t\t\tDescription: description,\n\t\t}\n\t\tif examplesTable != nil {\n\t\t\tallRows, _ := examplesTable.([]*messages.GherkinDocument_Feature_TableRow)\n\t\t\tex.TableHeader = allRows[0]\n\t\t\tex.TableBody = allRows[1:]\n\t\t}\n\t\treturn ex, nil\n\n\tcase RuleTypeExamplesTable:\n\t\tallRows, err := astTableRows(node, t.newId)\n\t\treturn allRows, err\n\n\tcase RuleTypeDescription:\n\t\tlineTokens := node.getTokens(TokenTypeOther)\n\t\t\/\/ Trim trailing empty lines\n\t\tend := len(lineTokens)\n\t\tfor end > 0 && strings.TrimSpace(lineTokens[end-1].Text) == \"\" {\n\t\t\tend--\n\t\t}\n\t\tvar desc []string\n\t\tfor i := range lineTokens[0:end] {\n\t\t\tdesc = append(desc, lineTokens[i].Text)\n\t\t}\n\t\treturn strings.Join(desc, \"\\n\"), nil\n\n\tcase RuleTypeFeature:\n\t\theader, ok := node.getSingle(RuleTypeFeatureHeader).(*astNode)\n\t\tif !ok {\n\t\t\treturn nil, nil\n\t\t}\n\t\ttags := astTags(header, t.newId)\n\t\tfeatureLine := header.getToken(TokenTypeFeatureLine)\n\t\tif featureLine == nil {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tvar children []*messages.GherkinDocument_Feature_FeatureChild\n\t\tbackground, _ := node.getSingle(RuleTypeBackground).(*messages.GherkinDocument_Feature_Background)\n\t\tif background != nil {\n\t\t\tchildren = append(children, &messages.GherkinDocument_Feature_FeatureChild{\n\t\t\t\tValue: &messages.GherkinDocument_Feature_FeatureChild_Background{Background: background},\n\t\t\t})\n\t\t}\n\t\tscenarios := node.getItems(RuleTypeScenarioDefinition)\n\t\tfor i := range scenarios {\n\t\t\tscenario := scenarios[i].(*messages.GherkinDocument_Feature_Scenario)\n\t\t\tchildren = append(children, &messages.GherkinDocument_Feature_FeatureChild{\n\t\t\t\tValue: &messages.GherkinDocument_Feature_FeatureChild_Scenario{Scenario: scenario},\n\t\t\t})\n\t\t}\n\t\trules := node.getItems(RuleTypeRule)\n\t\tfor i := range rules {\n\t\t\trule := rules[i].(*messages.GherkinDocument_Feature_FeatureChild_Rule)\n\t\t\tchildren = append(children, &messages.GherkinDocument_Feature_FeatureChild{\n\t\t\t\tValue: &messages.GherkinDocument_Feature_FeatureChild_Rule_{\n\t\t\t\t\tRule: rule,\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t\tdescription, _ := header.getSingle(RuleTypeDescription).(string)\n\n\t\tfeat := &messages.GherkinDocument_Feature{}\n\t\tfeat.Tags = tags\n\t\tfeat.Location = astLocation(featureLine)\n\t\tfeat.Language = featureLine.GherkinDialect\n\t\tfeat.Keyword = featureLine.Keyword\n\t\tfeat.Name = featureLine.Text\n\t\tfeat.Description = description\n\t\tfeat.Children = children\n\t\treturn feat, nil\n\n\tcase RuleTypeRule:\n\t\theader, ok := node.getSingle(RuleTypeRuleHeader).(*astNode)\n\t\tif !ok {\n\t\t\treturn nil, nil\n\t\t}\n\t\truleLine := header.getToken(TokenTypeRuleLine)\n\t\tif ruleLine == nil {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tvar children []*messages.GherkinDocument_Feature_FeatureChild_RuleChild\n\t\tbackground, _ := node.getSingle(RuleTypeBackground).(*messages.GherkinDocument_Feature_Background)\n\n\t\tif background != nil {\n\t\t\tchildren = append(children, &messages.GherkinDocument_Feature_FeatureChild_RuleChild{\n\t\t\t\tValue: &messages.GherkinDocument_Feature_FeatureChild_RuleChild_Background{Background: background},\n\t\t\t})\n\t\t}\n\t\tscenarios := node.getItems(RuleTypeScenarioDefinition)\n\t\tfor i := range scenarios {\n\t\t\tscenario := scenarios[i].(*messages.GherkinDocument_Feature_Scenario)\n\t\t\tchildren = append(children, &messages.GherkinDocument_Feature_FeatureChild_RuleChild{\n\t\t\t\tValue: &messages.GherkinDocument_Feature_FeatureChild_RuleChild_Scenario{Scenario: scenario},\n\t\t\t})\n\t\t}\n\n\t\tdescription, _ := header.getSingle(RuleTypeDescription).(string)\n\n\t\trule := &messages.GherkinDocument_Feature_FeatureChild_Rule{\n\t\t\tId: t.newId(),\n\t\t\tLocation: astLocation(ruleLine),\n\t\t\tKeyword: ruleLine.Keyword,\n\t\t\tName: ruleLine.Text,\n\t\t\tDescription: description,\n\t\t\tChildren: children,\n\t\t}\n\t\treturn rule, nil\n\n\tcase RuleTypeGherkinDocument:\n\t\tfeature, _ := node.getSingle(RuleTypeFeature).(*messages.GherkinDocument_Feature)\n\n\t\tdoc := &messages.GherkinDocument{}\n\t\tif feature != nil {\n\t\t\tdoc.Feature = feature\n\t\t}\n\t\tdoc.Comments = t.comments\n\t\treturn doc, nil\n\t}\n\treturn node, nil\n}\n\nfunc astLocation(t *Token) *messages.Location {\n\treturn &messages.Location{\n\t\tLine: uint32(t.Location.Line),\n\t\tColumn: uint32(t.Location.Column),\n\t}\n}\n\nfunc astTableRows(t *astNode, newId func() string) (rows []*messages.GherkinDocument_Feature_TableRow, err error) {\n\trows = []*messages.GherkinDocument_Feature_TableRow{}\n\ttokens := t.getTokens(TokenTypeTableRow)\n\tfor i := range tokens {\n\t\trow := &messages.GherkinDocument_Feature_TableRow{\n\t\t\tId: newId(),\n\t\t\tLocation: astLocation(tokens[i]),\n\t\t\tCells: astTableCells(tokens[i]),\n\t\t}\n\t\trows = append(rows, row)\n\t}\n\terr = ensureCellCount(rows)\n\treturn\n}\n\nfunc ensureCellCount(rows []*messages.GherkinDocument_Feature_TableRow) error {\n\tif len(rows) <= 1 {\n\t\treturn nil\n\t}\n\tcellCount := len(rows[0].Cells)\n\tfor i := range rows {\n\t\tif cellCount != len(rows[i].Cells) {\n\t\t\treturn &parseError{\"inconsistent cell count within the table\", &Location{\n\t\t\t\tLine: int(rows[i].Location.Line),\n\t\t\t\tColumn: int(rows[i].Location.Column),\n\t\t\t}}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc astTableCells(t *Token) (cells []*messages.GherkinDocument_Feature_TableRow_TableCell) {\n\tcells = []*messages.GherkinDocument_Feature_TableRow_TableCell{}\n\tfor i := range t.Items {\n\t\titem := t.Items[i]\n\t\tcell := &messages.GherkinDocument_Feature_TableRow_TableCell{}\n\t\tcell.Location = &messages.Location{\n\t\t\tLine: uint32(t.Location.Line),\n\t\t\tColumn: uint32(item.Column),\n\t\t}\n\t\tcell.Value = item.Text\n\t\tcells = append(cells, cell)\n\t}\n\treturn\n}\n\nfunc astSteps(t *astNode) (steps []*messages.GherkinDocument_Feature_Step) {\n\tsteps = []*messages.GherkinDocument_Feature_Step{}\n\ttokens := t.getItems(RuleTypeStep)\n\tfor i := range tokens {\n\t\tstep, _ := tokens[i].(*messages.GherkinDocument_Feature_Step)\n\t\tsteps = append(steps, step)\n\t}\n\treturn\n}\n\nfunc astExamples(t *astNode) (examples []*messages.GherkinDocument_Feature_Scenario_Examples) {\n\texamples = []*messages.GherkinDocument_Feature_Scenario_Examples{}\n\ttokens := t.getItems(RuleTypeExamplesDefinition)\n\tfor i := range tokens {\n\t\texample, _ := tokens[i].(*messages.GherkinDocument_Feature_Scenario_Examples)\n\t\texamples = append(examples, example)\n\t}\n\treturn\n}\n\nfunc astTags(node *astNode, newId func() string) (tags []*messages.GherkinDocument_Feature_Tag) {\n\ttags = []*messages.GherkinDocument_Feature_Tag{}\n\ttagsNode, ok := node.getSingle(RuleTypeTags).(*astNode)\n\tif !ok {\n\t\treturn\n\t}\n\ttokens := tagsNode.getTokens(TokenTypeTagLine)\n\tfor i := range tokens {\n\t\ttoken := tokens[i]\n\t\tfor k := range token.Items {\n\t\t\titem := token.Items[k]\n\t\t\ttag := &messages.GherkinDocument_Feature_Tag{}\n\t\t\ttag.Location = &messages.Location{\n\t\t\t\tLine: uint32(token.Location.Line),\n\t\t\t\tColumn: uint32(item.Column),\n\t\t\t}\n\t\t\ttag.Name = item.Text\n\t\t\ttag.Id = newId()\n\t\t\ttags = append(tags, tag)\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>Fix issue after merge<commit_after>package gherkin\n\nimport (\n\t\"github.com\/cucumber\/messages-go\/v12\"\n\t\"strings\"\n)\n\ntype AstBuilder interface {\n\tBuilder\n\tGetGherkinDocument() *messages.GherkinDocument\n}\n\ntype astBuilder struct {\n\tstack []*astNode\n\tcomments []*messages.GherkinDocument_Comment\n\tnewId func() string\n}\n\nfunc (t *astBuilder) Reset() {\n\tt.comments = []*messages.GherkinDocument_Comment{}\n\tt.stack = []*astNode{}\n\tt.push(newAstNode(RuleTypeNone))\n}\n\nfunc (t *astBuilder) GetGherkinDocument() *messages.GherkinDocument {\n\tres := t.currentNode().getSingle(RuleTypeGherkinDocument)\n\tif val, ok := res.(*messages.GherkinDocument); ok {\n\t\treturn val\n\t}\n\treturn nil\n}\n\ntype astNode struct {\n\truleType RuleType\n\tsubNodes map[RuleType][]interface{}\n}\n\nfunc (a *astNode) add(rt RuleType, obj interface{}) {\n\ta.subNodes[rt] = append(a.subNodes[rt], obj)\n}\n\nfunc (a *astNode) getSingle(rt RuleType) interface{} {\n\tif val, ok := a.subNodes[rt]; ok {\n\t\tfor i := range val {\n\t\t\treturn val[i]\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *astNode) getItems(rt RuleType) []interface{} {\n\tvar res []interface{}\n\tif val, ok := a.subNodes[rt]; ok {\n\t\tfor i := range val {\n\t\t\tres = append(res, val[i])\n\t\t}\n\t}\n\treturn res\n}\n\nfunc (a *astNode) getToken(tt TokenType) *Token {\n\tif val, ok := a.getSingle(tt.RuleType()).(*Token); ok {\n\t\treturn val\n\t}\n\treturn nil\n}\n\nfunc (a *astNode) getTokens(tt TokenType) []*Token {\n\tvar items = a.getItems(tt.RuleType())\n\tvar tokens []*Token\n\tfor i := range items {\n\t\tif val, ok := items[i].(*Token); ok {\n\t\t\ttokens = append(tokens, val)\n\t\t}\n\t}\n\treturn tokens\n}\n\nfunc (t *astBuilder) currentNode() *astNode {\n\tif len(t.stack) > 0 {\n\t\treturn t.stack[len(t.stack)-1]\n\t}\n\treturn nil\n}\n\nfunc newAstNode(rt RuleType) *astNode {\n\treturn &astNode{\n\t\truleType: rt,\n\t\tsubNodes: make(map[RuleType][]interface{}),\n\t}\n}\n\nfunc NewAstBuilder(newId func() string) AstBuilder {\n\tbuilder := new(astBuilder)\n\tbuilder.newId = newId\n\tbuilder.comments = []*messages.GherkinDocument_Comment{}\n\tbuilder.push(newAstNode(RuleTypeNone))\n\treturn builder\n}\n\nfunc (t *astBuilder) push(n *astNode) {\n\tt.stack = append(t.stack, n)\n}\n\nfunc (t *astBuilder) pop() *astNode {\n\tx := t.stack[len(t.stack)-1]\n\tt.stack = t.stack[:len(t.stack)-1]\n\treturn x\n}\n\nfunc (t *astBuilder) Build(tok *Token) (bool, error) {\n\tif tok.Type == TokenTypeComment {\n\t\tcomment := &messages.GherkinDocument_Comment{\n\t\t\tLocation: astLocation(tok),\n\t\t\tText: tok.Text,\n\t\t}\n\t\tt.comments = append(t.comments, comment)\n\t} else {\n\t\tt.currentNode().add(tok.Type.RuleType(), tok)\n\t}\n\treturn true, nil\n}\n\nfunc (t *astBuilder) StartRule(r RuleType) (bool, error) {\n\tt.push(newAstNode(r))\n\treturn true, nil\n}\n\nfunc (t *astBuilder) EndRule(r RuleType) (bool, error) {\n\tnode := t.pop()\n\ttransformedNode, err := t.transformNode(node)\n\tt.currentNode().add(node.ruleType, transformedNode)\n\treturn true, err\n}\n\nfunc (t *astBuilder) transformNode(node *astNode) (interface{}, error) {\n\tswitch node.ruleType {\n\n\tcase RuleTypeStep:\n\t\tstepLine := node.getToken(TokenTypeStepLine)\n\n\t\tstep := &messages.GherkinDocument_Feature_Step{\n\t\t\tLocation: astLocation(stepLine),\n\t\t\tKeyword: stepLine.Keyword,\n\t\t\tText: stepLine.Text,\n\t\t\tId: t.newId(),\n\t\t}\n\t\tdataTable := node.getSingle(RuleTypeDataTable)\n\t\tif dataTable != nil {\n\t\t\tstep.Argument = &messages.GherkinDocument_Feature_Step_DataTable_{\n\t\t\t\tDataTable: dataTable.(*messages.GherkinDocument_Feature_Step_DataTable),\n\t\t\t}\n\t\t} else {\n\t\t\tdocString := node.getSingle(RuleTypeDocString)\n\t\t\tif docString != nil {\n\t\t\t\tstep.Argument = &messages.GherkinDocument_Feature_Step_DocString_{DocString: docString.(*messages.GherkinDocument_Feature_Step_DocString)}\n\t\t\t}\n\t\t}\n\n\t\treturn step, nil\n\n\tcase RuleTypeDocString:\n\t\tseparatorToken := node.getToken(TokenTypeDocStringSeparator)\n\t\tlineTokens := node.getTokens(TokenTypeOther)\n\t\tvar text string\n\t\tfor i := range lineTokens {\n\t\t\tif i > 0 {\n\t\t\t\ttext += \"\\n\"\n\t\t\t}\n\t\t\ttext += lineTokens[i].Text\n\t\t}\n\t\tds := &messages.GherkinDocument_Feature_Step_DocString{\n\t\t\tLocation: astLocation(separatorToken),\n\t\t\tMediaType: separatorToken.Text,\n\t\t\tContent: text,\n\t\t\tDelimiter: separatorToken.Keyword,\n\t\t}\n\t\treturn ds, nil\n\n\tcase RuleTypeDataTable:\n\t\trows, err := astTableRows(node, t.newId)\n\t\tdt := &messages.GherkinDocument_Feature_Step_DataTable{\n\t\t\tLocation: rows[0].Location,\n\t\t\tRows: rows,\n\t\t}\n\t\treturn dt, err\n\n\tcase RuleTypeBackground:\n\t\tbackgroundLine := node.getToken(TokenTypeBackgroundLine)\n\t\tdescription, _ := node.getSingle(RuleTypeDescription).(string)\n\t\tbg := &messages.GherkinDocument_Feature_Background{\n\t\t\tId: t.newId(),\n\t\t\tLocation: astLocation(backgroundLine),\n\t\t\tKeyword: backgroundLine.Keyword,\n\t\t\tName: backgroundLine.Text,\n\t\t\tDescription: description,\n\t\t\tSteps: astSteps(node),\n\t\t}\n\t\treturn bg, nil\n\n\tcase RuleTypeScenarioDefinition:\n\t\ttags := astTags(node, t.newId)\n\t\tscenarioNode, _ := node.getSingle(RuleTypeScenario).(*astNode)\n\n\t\tscenarioLine := scenarioNode.getToken(TokenTypeScenarioLine)\n\t\tdescription, _ := scenarioNode.getSingle(RuleTypeDescription).(string)\n\t\tsc := &messages.GherkinDocument_Feature_Scenario{\n\t\t\tId: t.newId(),\n\t\t\tTags: tags,\n\t\t\tLocation: astLocation(scenarioLine),\n\t\t\tKeyword: scenarioLine.Keyword,\n\t\t\tName: scenarioLine.Text,\n\t\t\tDescription: description,\n\t\t\tSteps: astSteps(scenarioNode),\n\t\t\tExamples: astExamples(scenarioNode),\n\t\t}\n\n\t\treturn sc, nil\n\n\tcase RuleTypeExamplesDefinition:\n\t\ttags := astTags(node, t.newId)\n\t\texamplesNode, _ := node.getSingle(RuleTypeExamples).(*astNode)\n\t\texamplesLine := examplesNode.getToken(TokenTypeExamplesLine)\n\t\tdescription, _ := examplesNode.getSingle(RuleTypeDescription).(string)\n\t\texamplesTable := examplesNode.getSingle(RuleTypeExamplesTable)\n\n\t\tex := &messages.GherkinDocument_Feature_Scenario_Examples{\n\t\t\tId: t.newId(),\n\t\t\tTags: tags,\n\t\t\tLocation: astLocation(examplesLine),\n\t\t\tKeyword: examplesLine.Keyword,\n\t\t\tName: examplesLine.Text,\n\t\t\tDescription: description,\n\t\t}\n\t\tif examplesTable != nil {\n\t\t\tallRows, _ := examplesTable.([]*messages.GherkinDocument_Feature_TableRow)\n\t\t\tex.TableHeader = allRows[0]\n\t\t\tex.TableBody = allRows[1:]\n\t\t}\n\t\treturn ex, nil\n\n\tcase RuleTypeExamplesTable:\n\t\tallRows, err := astTableRows(node, t.newId)\n\t\treturn allRows, err\n\n\tcase RuleTypeDescription:\n\t\tlineTokens := node.getTokens(TokenTypeOther)\n\t\t\/\/ Trim trailing empty lines\n\t\tend := len(lineTokens)\n\t\tfor end > 0 && strings.TrimSpace(lineTokens[end-1].Text) == \"\" {\n\t\t\tend--\n\t\t}\n\t\tvar desc []string\n\t\tfor i := range lineTokens[0:end] {\n\t\t\tdesc = append(desc, lineTokens[i].Text)\n\t\t}\n\t\treturn strings.Join(desc, \"\\n\"), nil\n\n\tcase RuleTypeFeature:\n\t\theader, ok := node.getSingle(RuleTypeFeatureHeader).(*astNode)\n\t\tif !ok {\n\t\t\treturn nil, nil\n\t\t}\n\t\ttags := astTags(header, t.newId)\n\t\tfeatureLine := header.getToken(TokenTypeFeatureLine)\n\t\tif featureLine == nil {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tvar children []*messages.GherkinDocument_Feature_FeatureChild\n\t\tbackground, _ := node.getSingle(RuleTypeBackground).(*messages.GherkinDocument_Feature_Background)\n\t\tif background != nil {\n\t\t\tchildren = append(children, &messages.GherkinDocument_Feature_FeatureChild{\n\t\t\t\tValue: &messages.GherkinDocument_Feature_FeatureChild_Background{Background: background},\n\t\t\t})\n\t\t}\n\t\tscenarios := node.getItems(RuleTypeScenarioDefinition)\n\t\tfor i := range scenarios {\n\t\t\tscenario := scenarios[i].(*messages.GherkinDocument_Feature_Scenario)\n\t\t\tchildren = append(children, &messages.GherkinDocument_Feature_FeatureChild{\n\t\t\t\tValue: &messages.GherkinDocument_Feature_FeatureChild_Scenario{Scenario: scenario},\n\t\t\t})\n\t\t}\n\t\trules := node.getItems(RuleTypeRule)\n\t\tfor i := range rules {\n\t\t\trule := rules[i].(*messages.GherkinDocument_Feature_FeatureChild_Rule)\n\t\t\tchildren = append(children, &messages.GherkinDocument_Feature_FeatureChild{\n\t\t\t\tValue: &messages.GherkinDocument_Feature_FeatureChild_Rule_{\n\t\t\t\t\tRule: rule,\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t\tdescription, _ := header.getSingle(RuleTypeDescription).(string)\n\n\t\tfeat := &messages.GherkinDocument_Feature{}\n\t\tfeat.Tags = tags\n\t\tfeat.Location = astLocation(featureLine)\n\t\tfeat.Language = featureLine.GherkinDialect\n\t\tfeat.Keyword = featureLine.Keyword\n\t\tfeat.Name = featureLine.Text\n\t\tfeat.Description = description\n\t\tfeat.Children = children\n\t\treturn feat, nil\n\n\tcase RuleTypeRule:\n\t\theader, ok := node.getSingle(RuleTypeRuleHeader).(*astNode)\n\t\tif !ok {\n\t\t\treturn nil, nil\n\t\t}\n\t\truleLine := header.getToken(TokenTypeRuleLine)\n\t\tif ruleLine == nil {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tvar children []*messages.GherkinDocument_Feature_FeatureChild_RuleChild\n\t\tbackground, _ := node.getSingle(RuleTypeBackground).(*messages.GherkinDocument_Feature_Background)\n\n\t\tif background != nil {\n\t\t\tchildren = append(children, &messages.GherkinDocument_Feature_FeatureChild_RuleChild{\n\t\t\t\tValue: &messages.GherkinDocument_Feature_FeatureChild_RuleChild_Background{Background: background},\n\t\t\t})\n\t\t}\n\t\tscenarios := node.getItems(RuleTypeScenarioDefinition)\n\t\tfor i := range scenarios {\n\t\t\tscenario := scenarios[i].(*messages.GherkinDocument_Feature_Scenario)\n\t\t\tchildren = append(children, &messages.GherkinDocument_Feature_FeatureChild_RuleChild{\n\t\t\t\tValue: &messages.GherkinDocument_Feature_FeatureChild_RuleChild_Scenario{Scenario: scenario},\n\t\t\t})\n\t\t}\n\n\t\tdescription, _ := header.getSingle(RuleTypeDescription).(string)\n\n\t\trule := &messages.GherkinDocument_Feature_FeatureChild_Rule{\n\t\t\tId: t.newId(),\n\t\t\tLocation: astLocation(ruleLine),\n\t\t\tKeyword: ruleLine.Keyword,\n\t\t\tName: ruleLine.Text,\n\t\t\tDescription: description,\n\t\t\tChildren: children,\n\t\t}\n\t\treturn rule, nil\n\n\tcase RuleTypeGherkinDocument:\n\t\tfeature, _ := node.getSingle(RuleTypeFeature).(*messages.GherkinDocument_Feature)\n\n\t\tdoc := &messages.GherkinDocument{}\n\t\tif feature != nil {\n\t\t\tdoc.Feature = feature\n\t\t}\n\t\tdoc.Comments = t.comments\n\t\treturn doc, nil\n\t}\n\treturn node, nil\n}\n\nfunc astLocation(t *Token) *messages.Location {\n\treturn &messages.Location{\n\t\tLine: uint32(t.Location.Line),\n\t\tColumn: uint32(t.Location.Column),\n\t}\n}\n\nfunc astTableRows(t *astNode, newId func() string) (rows []*messages.GherkinDocument_Feature_TableRow, err error) {\n\trows = []*messages.GherkinDocument_Feature_TableRow{}\n\ttokens := t.getTokens(TokenTypeTableRow)\n\tfor i := range tokens {\n\t\trow := &messages.GherkinDocument_Feature_TableRow{\n\t\t\tId: newId(),\n\t\t\tLocation: astLocation(tokens[i]),\n\t\t\tCells: astTableCells(tokens[i]),\n\t\t}\n\t\trows = append(rows, row)\n\t}\n\terr = ensureCellCount(rows)\n\treturn\n}\n\nfunc ensureCellCount(rows []*messages.GherkinDocument_Feature_TableRow) error {\n\tif len(rows) <= 1 {\n\t\treturn nil\n\t}\n\tcellCount := len(rows[0].Cells)\n\tfor i := range rows {\n\t\tif cellCount != len(rows[i].Cells) {\n\t\t\treturn &parseError{\"inconsistent cell count within the table\", &Location{\n\t\t\t\tLine: int(rows[i].Location.Line),\n\t\t\t\tColumn: int(rows[i].Location.Column),\n\t\t\t}}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc astTableCells(t *Token) (cells []*messages.GherkinDocument_Feature_TableRow_TableCell) {\n\tcells = []*messages.GherkinDocument_Feature_TableRow_TableCell{}\n\tfor i := range t.Items {\n\t\titem := t.Items[i]\n\t\tcell := &messages.GherkinDocument_Feature_TableRow_TableCell{}\n\t\tcell.Location = &messages.Location{\n\t\t\tLine: uint32(t.Location.Line),\n\t\t\tColumn: uint32(item.Column),\n\t\t}\n\t\tcell.Value = item.Text\n\t\tcells = append(cells, cell)\n\t}\n\treturn\n}\n\nfunc astSteps(t *astNode) (steps []*messages.GherkinDocument_Feature_Step) {\n\tsteps = []*messages.GherkinDocument_Feature_Step{}\n\ttokens := t.getItems(RuleTypeStep)\n\tfor i := range tokens {\n\t\tstep, _ := tokens[i].(*messages.GherkinDocument_Feature_Step)\n\t\tsteps = append(steps, step)\n\t}\n\treturn\n}\n\nfunc astExamples(t *astNode) (examples []*messages.GherkinDocument_Feature_Scenario_Examples) {\n\texamples = []*messages.GherkinDocument_Feature_Scenario_Examples{}\n\ttokens := t.getItems(RuleTypeExamplesDefinition)\n\tfor i := range tokens {\n\t\texample, _ := tokens[i].(*messages.GherkinDocument_Feature_Scenario_Examples)\n\t\texamples = append(examples, example)\n\t}\n\treturn\n}\n\nfunc astTags(node *astNode, newId func() string) (tags []*messages.GherkinDocument_Feature_Tag) {\n\ttags = []*messages.GherkinDocument_Feature_Tag{}\n\ttagsNode, ok := node.getSingle(RuleTypeTags).(*astNode)\n\tif !ok {\n\t\treturn\n\t}\n\ttokens := tagsNode.getTokens(TokenTypeTagLine)\n\tfor i := range tokens {\n\t\ttoken := tokens[i]\n\t\tfor k := range token.Items {\n\t\t\titem := token.Items[k]\n\t\t\ttag := &messages.GherkinDocument_Feature_Tag{}\n\t\t\ttag.Location = &messages.Location{\n\t\t\t\tLine: uint32(token.Location.Line),\n\t\t\t\tColumn: uint32(item.Column),\n\t\t\t}\n\t\t\ttag.Name = item.Text\n\t\t\ttag.Id = newId()\n\t\t\ttags = append(tags, tag)\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package auth\n\nimport (\n\t\"crypto\/sha1\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\tsql \"github.com\/aodin\/aspect\"\n\t\"github.com\/aodin\/aspect\/postgres\"\n)\n\n\/\/ User is a database-backed user.\ntype User struct {\n\tID int64 `db:\"id,omitempty\"`\n\tEmail string `db:\"email\"`\n\tFirstName string `db:\"first_name\"`\n\tLastName string `db:\"last_name\"`\n\tAbout string `db:\"about\"`\n\tPhoto string `db:\"photo\"`\n\tIsActive bool `db:\"is_active\"`\n\tIsSuperuser bool `db:\"is_superuser\"`\n\tPassword string `db:\"password\"`\n\tToken string `db:\"token\"`\n\tTokenSetAt time.Time `db:\"token_set_at,omitempty\"`\n\tCreatedAt time.Time `db:\"created_at,omitempty\"`\n\tmanager *UserManager `db:\"-\"`\n}\n\n\/\/ Delete removes the user with the given ID from the database.\n\/\/ It will return an error if the user does not have an ID or the ID\n\/\/ was not deleted from the database. It will panic on any connection error.\nfunc (user User) Delete() error {\n\tif !user.Exists() {\n\t\treturn fmt.Errorf(\"auth: users without IDs cannot be deleted\")\n\t}\n\treturn user.manager.Delete(user.ID)\n}\n\n\/\/ Exists returns true if the user has an assigned ID\nfunc (user User) Exists() bool {\n\treturn user.ID != 0\n}\n\n\/\/ Name returns the concatenated first and last name\nfunc (user User) Name() string {\n\treturn fmt.Sprintf(\"%s %s\", user.FirstName, user.LastName)\n}\n\n\/\/ String returns the user id and email\nfunc (user User) String() string {\n\treturn fmt.Sprintf(\"%d: %s\", user.ID, user.Email)\n}\n\n\/\/ Users is the postgres schema for users\nvar Users = sql.Table(\"users\",\n\tsql.Column(\"id\", postgres.Serial{NotNull: true}),\n\tsql.Column(\"email\", sql.String{Unique: true, NotNull: true, Length: 255}),\n\tsql.Column(\"first_name\", sql.String{Length: 255, NotNull: true, Default: sql.Blank}),\n\tsql.Column(\"last_name\", sql.String{Length: 255, NotNull: true, Default: sql.Blank}),\n\tsql.Column(\"about\", sql.String{Length: 511, NotNull: true, Default: sql.Blank}),\n\tsql.Column(\"photo\", sql.String{Length: 511, NotNull: true, Default: sql.Blank}),\n\tsql.Column(\"is_active\", sql.Boolean{Default: sql.True}),\n\tsql.Column(\"is_superuser\", sql.Boolean{Default: sql.False}),\n\tsql.Column(\"password\", sql.String{Length: 255, NotNull: true}),\n\tsql.Column(\"token\", sql.String{Length: 255, NotNull: true, Default: sql.Blank}),\n\tsql.Column(\"token_set_at\", sql.Timestamp{Default: postgres.Now}),\n\tsql.Column(\"created_at\", sql.Timestamp{Default: postgres.Now}),\n\tsql.PrimaryKey(\"id\"),\n)\n\n\/\/ UserManager is the internal manager of users\ntype UserManager struct {\n\tconn sql.Connection\n\thash Hasher\n\ttokenFunc KeyFunc\n}\n\n\/\/ Create will create a new user with given email and cleartext password.\n\/\/ It will panic on any crypto or database connection errors.\nfunc (m *UserManager) Create(email, first, last, clear string) (User, error) {\n\tuser := User{\n\t\tEmail: email,\n\t\tFirstName: first,\n\t\tLastName: last,\n\t\tIsActive: true,\n\t\tPassword: MakePassword(m.hash, clear),\n\t\tToken: m.tokenFunc(),\n\t\tTokenSetAt: time.Now(),\n\t\tmanager: m,\n\t}\n\terr := m.create(&user)\n\treturn user, err\n}\n\n\/\/ create checks for a duplicate email before inserting the user.\n\/\/ Email must already be normalized.\nfunc (m *UserManager) create(user *User) error {\n\tvar duplicate string\n\temail := sql.Select(\n\t\tUsers.C[\"email\"],\n\t).Where(Users.C[\"email\"].Equals(user.Email)).Limit(1)\n\tif m.conn.MustQueryOne(email, &duplicate) {\n\t\treturn fmt.Errorf(\n\t\t\t\"auth: user with email %s already exists\", duplicate,\n\t\t)\n\t}\n\n\t\/\/ Insert the new user\n\tstmt := postgres.Insert(Users).Returning(Users.Columns()...).Values(user)\n\tm.conn.MustQueryOne(stmt, user)\n\treturn nil\n}\n\n\/\/ Delete removes the user with the given ID from the database.\n\/\/ It will return an error if the ID was not deleted from the database.\n\/\/ It will panic on any connection error.\nfunc (m *UserManager) Delete(id int64) error {\n\tstmt := Users.Delete().Where(Users.C[\"id\"].Equals(id))\n\trowsAffected, err := m.conn.MustExecute(stmt).RowsAffected()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"auth: error during rows affected: %s\", err)\n\t}\n\tif rowsAffected == 0 {\n\t\treturn fmt.Errorf(\"auth: user ID %d was not deleted\", id)\n\t}\n\treturn nil\n}\n\n\/\/ GetByEmail returns the user with the given email.\nfunc (m *UserManager) GetByEmail(email string) (user User, err error) {\n\tstmt := Users.Select().Where(Users.C[\"email\"].Equals(email))\n\tif !m.conn.MustQueryOne(stmt, &user) {\n\t\terr = fmt.Errorf(\"auth: no user with email %s exists\", email)\n\t}\n\treturn\n}\n\n\/\/ GetByID returns the user with the given id.\nfunc (m *UserManager) GetByID(id int64) (user User, err error) {\n\tstmt := Users.Select().Where(Users.C[\"id\"].Equals(id))\n\tif !m.conn.MustQueryOne(stmt, &user) {\n\t\terr = fmt.Errorf(\"auth: no user with id %d exists\", id)\n\t}\n\treturn\n}\n\n\/\/ Hasher returns the hasher used by the UserManager\nfunc (m UserManager) Hasher() Hasher {\n\treturn m.hash\n}\n\nfunc NewUsers(conn sql.Connection) *UserManager {\n\thasher, err := GetHasher(\"pbkdf2_sha256\")\n\tif err != nil {\n\t\tlog.Panicf(\"auth: could not get pbkdf2_sha256 hasher: %s\", err)\n\t}\n\treturn newUsers(conn, hasher)\n}\n\nfunc MockUsers(conn sql.Connection) *UserManager {\n\treturn newUsers(conn, MockHasher(\"mock\", 1, sha1.New))\n}\n\nfunc newUsers(conn sql.Connection, hash Hasher) *UserManager {\n\treturn &UserManager{\n\t\tconn: conn,\n\t\thash: hash,\n\t\ttokenFunc: RandomKey,\n\t}\n}\n<commit_msg>Added CreateSuperuser method to user manager<commit_after>package auth\n\nimport (\n\t\"crypto\/sha1\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\tsql \"github.com\/aodin\/aspect\"\n\t\"github.com\/aodin\/aspect\/postgres\"\n)\n\n\/\/ User is a database-backed user.\ntype User struct {\n\tID int64 `db:\"id,omitempty\"`\n\tEmail string `db:\"email\"`\n\tFirstName string `db:\"first_name\"`\n\tLastName string `db:\"last_name\"`\n\tAbout string `db:\"about\"`\n\tPhoto string `db:\"photo\"`\n\tIsActive bool `db:\"is_active\"`\n\tIsSuperuser bool `db:\"is_superuser\"`\n\tPassword string `db:\"password\"`\n\tToken string `db:\"token\"`\n\tTokenSetAt time.Time `db:\"token_set_at,omitempty\"`\n\tCreatedAt time.Time `db:\"created_at,omitempty\"`\n\tmanager *UserManager `db:\"-\"`\n}\n\n\/\/ Delete removes the user with the given ID from the database.\n\/\/ It will return an error if the user does not have an ID or the ID\n\/\/ was not deleted from the database. It will panic on any connection error.\nfunc (user User) Delete() error {\n\tif !user.Exists() {\n\t\treturn fmt.Errorf(\"auth: users without IDs cannot be deleted\")\n\t}\n\treturn user.manager.Delete(user.ID)\n}\n\n\/\/ Exists returns true if the user has an assigned ID\nfunc (user User) Exists() bool {\n\treturn user.ID != 0\n}\n\n\/\/ Name returns the concatenated first and last name\nfunc (user User) Name() string {\n\treturn fmt.Sprintf(\"%s %s\", user.FirstName, user.LastName)\n}\n\n\/\/ String returns the user id and email\nfunc (user User) String() string {\n\treturn fmt.Sprintf(\"%d: %s\", user.ID, user.Email)\n}\n\n\/\/ Users is the postgres schema for users\nvar Users = sql.Table(\"users\",\n\tsql.Column(\"id\", postgres.Serial{NotNull: true}),\n\tsql.Column(\"email\", sql.String{Unique: true, NotNull: true, Length: 255}),\n\tsql.Column(\"first_name\", sql.String{Length: 255, NotNull: true, Default: sql.Blank}),\n\tsql.Column(\"last_name\", sql.String{Length: 255, NotNull: true, Default: sql.Blank}),\n\tsql.Column(\"about\", sql.String{Length: 511, NotNull: true, Default: sql.Blank}),\n\tsql.Column(\"photo\", sql.String{Length: 511, NotNull: true, Default: sql.Blank}),\n\tsql.Column(\"is_active\", sql.Boolean{Default: sql.True}),\n\tsql.Column(\"is_superuser\", sql.Boolean{Default: sql.False}),\n\tsql.Column(\"password\", sql.String{Length: 255, NotNull: true}),\n\tsql.Column(\"token\", sql.String{Length: 255, NotNull: true, Default: sql.Blank}),\n\tsql.Column(\"token_set_at\", sql.Timestamp{Default: postgres.Now}),\n\tsql.Column(\"created_at\", sql.Timestamp{Default: postgres.Now}),\n\tsql.PrimaryKey(\"id\"),\n)\n\n\/\/ UserManager is the internal manager of users\ntype UserManager struct {\n\tconn sql.Connection\n\thash Hasher\n\ttokenFunc KeyFunc\n}\n\n\/\/ Create will create a new user with given email and cleartext password.\n\/\/ It will panic on any crypto or database connection errors.\nfunc (m *UserManager) Create(email, first, last, clear string) (User, error) {\n\treturn m.create(email, first, last, clear, false)\n}\n\n\/\/ CreateSuperuser will create a new superuser with given email and cleartext password.\n\/\/ It will panic on any crypto or database connection errors.\nfunc (m *UserManager) CreateSuperuser(email, first, last, clear string) (User, error) {\n\treturn m.create(email, first, last, clear, true)\n}\n\nfunc (m *UserManager) create(email, first, last, clear string, isAdmin bool) (User, error) {\n\tuser := User{\n\t\tEmail: email,\n\t\tFirstName: first,\n\t\tLastName: last,\n\t\tIsActive: true,\n\t\tIsSuperuser: isAdmin,\n\t\tPassword: MakePassword(m.hash, clear),\n\t\tToken: m.tokenFunc(),\n\t\tTokenSetAt: time.Now(),\n\t\tmanager: m,\n\t}\n\terr := m.createUser(&user)\n\treturn user, err\n}\n\n\/\/ createUser checks for a duplicate email before inserting the user.\n\/\/ Email must already be normalized.\nfunc (m *UserManager) createUser(user *User) error {\n\tvar duplicate string\n\temail := sql.Select(\n\t\tUsers.C[\"email\"],\n\t).Where(Users.C[\"email\"].Equals(user.Email)).Limit(1)\n\tif m.conn.MustQueryOne(email, &duplicate) {\n\t\treturn fmt.Errorf(\n\t\t\t\"auth: user with email %s already exists\", duplicate,\n\t\t)\n\t}\n\n\t\/\/ Insert the new user\n\tstmt := postgres.Insert(Users).Returning(Users.Columns()...).Values(user)\n\tm.conn.MustQueryOne(stmt, user)\n\treturn nil\n}\n\n\/\/ Delete removes the user with the given ID from the database.\n\/\/ It will return an error if the ID was not deleted from the database.\n\/\/ It will panic on any connection error.\nfunc (m *UserManager) Delete(id int64) error {\n\tstmt := Users.Delete().Where(Users.C[\"id\"].Equals(id))\n\trowsAffected, err := m.conn.MustExecute(stmt).RowsAffected()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"auth: error during rows affected: %s\", err)\n\t}\n\tif rowsAffected == 0 {\n\t\treturn fmt.Errorf(\"auth: user ID %d was not deleted\", id)\n\t}\n\treturn nil\n}\n\n\/\/ GetByEmail returns the user with the given email.\nfunc (m *UserManager) GetByEmail(email string) (user User, err error) {\n\tstmt := Users.Select().Where(Users.C[\"email\"].Equals(email))\n\tif !m.conn.MustQueryOne(stmt, &user) {\n\t\terr = fmt.Errorf(\"auth: no user with email %s exists\", email)\n\t}\n\treturn\n}\n\n\/\/ GetByID returns the user with the given id.\nfunc (m *UserManager) GetByID(id int64) (user User, err error) {\n\tstmt := Users.Select().Where(Users.C[\"id\"].Equals(id))\n\tif !m.conn.MustQueryOne(stmt, &user) {\n\t\terr = fmt.Errorf(\"auth: no user with id %d exists\", id)\n\t}\n\treturn\n}\n\n\/\/ Hasher returns the hasher used by the UserManager\nfunc (m UserManager) Hasher() Hasher {\n\treturn m.hash\n}\n\nfunc NewUsers(conn sql.Connection) *UserManager {\n\thasher, err := GetHasher(\"pbkdf2_sha256\")\n\tif err != nil {\n\t\tlog.Panicf(\"auth: could not get pbkdf2_sha256 hasher: %s\", err)\n\t}\n\treturn newUsers(conn, hasher)\n}\n\nfunc MockUsers(conn sql.Connection) *UserManager {\n\treturn newUsers(conn, MockHasher(\"mock\", 1, sha1.New))\n}\n\nfunc newUsers(conn sql.Connection, hash Hasher) *UserManager {\n\treturn &UserManager{\n\t\tconn: conn,\n\t\thash: hash,\n\t\ttokenFunc: RandomKey,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package yiigo\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/gomodule\/redigo\/redis\"\n\t\"vitess.io\/vitess\/go\/pools\"\n)\n\ntype redisOptions struct {\n\tpassword string\n\tdatabase int\n\tconnTimeout time.Duration\n\treadTimeout time.Duration\n\twriteTimeout time.Duration\n\tpoolSize int\n\tpoolLimit int\n\tidleTimeout time.Duration\n}\n\n\/\/ RedisOption configures how we set up the db\ntype RedisOption interface {\n\tapply(options *redisOptions)\n}\n\n\/\/ funcRedisOption implements redis option\ntype funcRedisOption struct {\n\tf func(options *redisOptions)\n}\n\nfunc (fo *funcRedisOption) apply(o *redisOptions) {\n\tfo.f(o)\n}\n\nfunc newFuncRedisOption(f func(options *redisOptions)) *funcRedisOption {\n\treturn &funcRedisOption{f: f}\n}\n\n\/\/ WithRedisPassword specifies the `Password` to redis.\nfunc WithRedisPassword(s string) RedisOption {\n\treturn newFuncRedisOption(func(o *redisOptions) {\n\t\to.password = s\n\t})\n}\n\n\/\/ WithRedisDatabase specifies the `Database` to redis.\nfunc WithRedisDatabase(n int) RedisOption {\n\treturn newFuncRedisOption(func(o *redisOptions) {\n\t\to.database = n\n\t})\n}\n\n\/\/ WithRedisConnTimeout specifies the `ConnTimeout` to redis.\nfunc WithRedisConnTimeout(d time.Duration) RedisOption {\n\treturn newFuncRedisOption(func(o *redisOptions) {\n\t\to.connTimeout = d\n\t})\n}\n\n\/\/ WithRedisReadTimeout specifies the `ReadTimeout` to redis.\nfunc WithRedisReadTimeout(d time.Duration) RedisOption {\n\treturn newFuncRedisOption(func(o *redisOptions) {\n\t\to.readTimeout = d\n\t})\n}\n\n\/\/ WithRedisWriteTimeout specifies the `WriteTimeout` to redis.\nfunc WithRedisWriteTimeout(d time.Duration) RedisOption {\n\treturn newFuncRedisOption(func(o *redisOptions) {\n\t\to.writeTimeout = d\n\t})\n}\n\n\/\/ WithRedisPoolSize specifies the `PoolSize` to redis.\nfunc WithRedisPoolSize(n int) RedisOption {\n\treturn newFuncRedisOption(func(o *redisOptions) {\n\t\to.poolSize = n\n\t})\n}\n\n\/\/ WithRedisPoolLimit specifies the `PoolLimit` to redis.\nfunc WithRedisPoolLimit(n int) RedisOption {\n\treturn newFuncRedisOption(func(o *redisOptions) {\n\t\to.poolLimit = n\n\t})\n}\n\n\/\/ WithRedisIdleTimeout specifies the `IdleTimeout` to redis.\nfunc WithRedisIdleTimeout(d time.Duration) RedisOption {\n\treturn newFuncRedisOption(func(o *redisOptions) {\n\t\to.idleTimeout = d\n\t})\n}\n\n\/\/ RedisConn redis connection resource\ntype RedisConn struct {\n\tredis.Conn\n}\n\n\/\/ Close close connection resorce\nfunc (r RedisConn) Close() {\n\tr.Conn.Close()\n}\n\n\/\/ RedisPoolResource redis pool resource\ntype RedisPoolResource struct {\n\taddr string\n\toptions *redisOptions\n\tpool *pools.ResourcePool\n\tmutex sync.Mutex\n}\n\nfunc (r *RedisPoolResource) dial() (redis.Conn, error) {\n\tdialOptions := []redis.DialOption{\n\t\tredis.DialPassword(r.options.password),\n\t\tredis.DialDatabase(r.options.database),\n\t\tredis.DialConnectTimeout(r.options.connTimeout),\n\t\tredis.DialReadTimeout(r.options.readTimeout),\n\t\tredis.DialWriteTimeout(r.options.writeTimeout),\n\t}\n\n\tconn, err := redis.Dial(\"tcp\", r.addr, dialOptions...)\n\n\treturn conn, err\n}\n\nfunc (r *RedisPoolResource) init() {\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\n\tif r.pool != nil && !r.pool.IsClosed() {\n\t\treturn\n\t}\n\n\tdf := func() (pools.Resource, error) {\n\t\tconn, err := r.dial()\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn RedisConn{conn}, nil\n\t}\n\n\tr.pool = pools.NewResourcePool(df, r.options.poolSize, r.options.poolLimit, r.options.idleTimeout)\n}\n\n\/\/ Get get a connection resource from the pool.\nfunc (r *RedisPoolResource) Get() (RedisConn, error) {\n\tif r.pool.IsClosed() {\n\t\tr.init()\n\t}\n\n\tresource, err := r.pool.Get(context.TODO())\n\n\tif err != nil {\n\t\treturn RedisConn{}, err\n\t}\n\n\trc := resource.(RedisConn)\n\n\t\/\/ if rc is error, close and reconnect\n\tif rc.Err() != nil {\n\t\tconn, err := r.dial()\n\n\t\tif err != nil {\n\t\t\tr.pool.Put(rc)\n\n\t\t\treturn rc, err\n\t\t}\n\n\t\trc.Close()\n\n\t\treturn RedisConn{conn}, nil\n\t}\n\n\treturn rc, nil\n}\n\n\/\/ Put returns a connection resource to the pool.\nfunc (r *RedisPoolResource) Put(rc RedisConn) {\n\tr.pool.Put(rc)\n}\n\nvar (\n\t\/\/ Redis default redis connection pool\n\tRedis *RedisPoolResource\n\tredisMap sync.Map\n)\n\n\/\/ RegisterRedis register a redis\nfunc RegisterRedis(name, addr string, options ...RedisOption) {\n\to := &redisOptions{\n\t\tconnTimeout: 10 * time.Second,\n\t\treadTimeout: 10 * time.Second,\n\t\twriteTimeout: 10 * time.Second,\n\t\tpoolSize: 10,\n\t\tpoolLimit: 20,\n\t\tidleTimeout: 60 * time.Second,\n\t}\n\n\tif len(options) > 0 {\n\t\tfor _, option := range options {\n\t\t\toption.apply(o)\n\t\t}\n\t}\n\n\tpoolResource := &RedisPoolResource{\n\t\taddr: addr,\n\t\toptions: o,\n\t}\n\tpoolResource.init()\n\n\tredisMap.Store(name, poolResource)\n\n\tif name == AsDefault {\n\t\tRedis = poolResource\n\t}\n}\n\n\/\/ UseRedis returns a redis pool.\nfunc UseRedis(name string) *RedisPoolResource {\n\tv, ok := redisMap.Load(name)\n\n\tif !ok {\n\t\tpanic(fmt.Errorf(\"yiigo: redis.%s is not registered\", name))\n\t}\n\n\treturn v.(*RedisPoolResource)\n}\n<commit_msg>update<commit_after>package yiigo\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/gomodule\/redigo\/redis\"\n\t\"vitess.io\/vitess\/go\/pools\"\n)\n\ntype redisOptions struct {\n\tpassword string\n\tdatabase int\n\tconnTimeout time.Duration\n\treadTimeout time.Duration\n\twriteTimeout time.Duration\n\tpoolSize int\n\tpoolLimit int\n\tidleTimeout time.Duration\n}\n\n\/\/ RedisOption configures how we set up the db\ntype RedisOption interface {\n\tapply(options *redisOptions)\n}\n\n\/\/ funcRedisOption implements redis option\ntype funcRedisOption struct {\n\tf func(options *redisOptions)\n}\n\nfunc (fo *funcRedisOption) apply(o *redisOptions) {\n\tfo.f(o)\n}\n\nfunc newFuncRedisOption(f func(options *redisOptions)) *funcRedisOption {\n\treturn &funcRedisOption{f: f}\n}\n\n\/\/ WithRedisPassword specifies the `Password` to redis.\nfunc WithRedisPassword(s string) RedisOption {\n\treturn newFuncRedisOption(func(o *redisOptions) {\n\t\to.password = s\n\t})\n}\n\n\/\/ WithRedisDatabase specifies the `Database` to redis.\nfunc WithRedisDatabase(n int) RedisOption {\n\treturn newFuncRedisOption(func(o *redisOptions) {\n\t\to.database = n\n\t})\n}\n\n\/\/ WithRedisConnTimeout specifies the `ConnTimeout` to redis.\nfunc WithRedisConnTimeout(d time.Duration) RedisOption {\n\treturn newFuncRedisOption(func(o *redisOptions) {\n\t\to.connTimeout = d\n\t})\n}\n\n\/\/ WithRedisReadTimeout specifies the `ReadTimeout` to redis.\nfunc WithRedisReadTimeout(d time.Duration) RedisOption {\n\treturn newFuncRedisOption(func(o *redisOptions) {\n\t\to.readTimeout = d\n\t})\n}\n\n\/\/ WithRedisWriteTimeout specifies the `WriteTimeout` to redis.\nfunc WithRedisWriteTimeout(d time.Duration) RedisOption {\n\treturn newFuncRedisOption(func(o *redisOptions) {\n\t\to.writeTimeout = d\n\t})\n}\n\n\/\/ WithRedisPoolSize specifies the `PoolSize` to redis.\nfunc WithRedisPoolSize(n int) RedisOption {\n\treturn newFuncRedisOption(func(o *redisOptions) {\n\t\to.poolSize = n\n\t})\n}\n\n\/\/ WithRedisPoolLimit specifies the `PoolLimit` to redis.\nfunc WithRedisPoolLimit(n int) RedisOption {\n\treturn newFuncRedisOption(func(o *redisOptions) {\n\t\to.poolLimit = n\n\t})\n}\n\n\/\/ WithRedisIdleTimeout specifies the `IdleTimeout` to redis.\nfunc WithRedisIdleTimeout(d time.Duration) RedisOption {\n\treturn newFuncRedisOption(func(o *redisOptions) {\n\t\to.idleTimeout = d\n\t})\n}\n\n\/\/ RedisConn redis connection resource\ntype RedisConn struct {\n\tredis.Conn\n}\n\n\/\/ Close close connection resorce\nfunc (r RedisConn) Close() {\n\tr.Conn.Close()\n}\n\n\/\/ RedisPoolResource redis pool resource\ntype RedisPoolResource struct {\n\taddr string\n\toptions *redisOptions\n\tpool *pools.ResourcePool\n\tmutex sync.Mutex\n}\n\nfunc (r *RedisPoolResource) dial() (redis.Conn, error) {\n\tdialOptions := []redis.DialOption{\n\t\tredis.DialPassword(r.options.password),\n\t\tredis.DialDatabase(r.options.database),\n\t\tredis.DialConnectTimeout(r.options.connTimeout),\n\t\tredis.DialReadTimeout(r.options.readTimeout),\n\t\tredis.DialWriteTimeout(r.options.writeTimeout),\n\t}\n\n\tconn, err := redis.Dial(\"tcp\", r.addr, dialOptions...)\n\n\treturn conn, err\n}\n\nfunc (r *RedisPoolResource) init() {\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\n\tif r.pool != nil && !r.pool.IsClosed() {\n\t\treturn\n\t}\n\n\tdf := func() (pools.Resource, error) {\n\t\tconn, err := r.dial()\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn RedisConn{conn}, nil\n\t}\n\n\tr.pool = pools.NewResourcePool(df, r.options.poolSize, r.options.poolLimit, r.options.idleTimeout)\n}\n\n\/\/ Get get a connection resource from the pool.\nfunc (r *RedisPoolResource) Get() (RedisConn, error) {\n\tif r.pool.IsClosed() {\n\t\tr.init()\n\t}\n\n\tresource, err := r.pool.Get(context.TODO())\n\n\tif err != nil {\n\t\treturn RedisConn{}, err\n\t}\n\n\trc := resource.(RedisConn)\n\n\t\/\/ if rc is error, close and reconnect\n\tif rc.Err() != nil {\n\t\tconn, err := r.dial()\n\n\t\tif err != nil {\n\t\t\tr.pool.Put(rc)\n\n\t\t\treturn rc, err\n\t\t}\n\n\t\trc.Close()\n\n\t\treturn RedisConn{conn}, nil\n\t}\n\n\treturn rc, nil\n}\n\n\/\/ Put returns a connection resource to the pool.\nfunc (r *RedisPoolResource) Put(rc RedisConn) {\n\tr.pool.Put(rc)\n}\n\nvar (\n\t\/\/ Redis default redis connection pool\n\tRedis *RedisPoolResource\n\tredisMap sync.Map\n)\n\n\/\/ RegisterRedis register a redis\nfunc RegisterRedis(name, addr string, options ...RedisOption) {\n\to := &redisOptions{\n\t\tconnTimeout: 10 * time.Second,\n\t\treadTimeout: 10 * time.Second,\n\t\twriteTimeout: 10 * time.Second,\n\t\tpoolSize: 10,\n\t\tpoolLimit: 20,\n\t\tidleTimeout: 60 * time.Second,\n\t}\n\n\tif len(options) > 0 {\n\t\tfor _, option := range options {\n\t\t\toption.apply(o)\n\t\t}\n\t}\n\n\tpoolResource := &RedisPoolResource{\n\t\taddr: addr,\n\t\toptions: o,\n\t}\n\n\tpoolResource.init()\n\n\tredisMap.Store(name, poolResource)\n\n\tif name == AsDefault {\n\t\tRedis = poolResource\n\t}\n}\n\n\/\/ UseRedis returns a redis pool.\nfunc UseRedis(name string) *RedisPoolResource {\n\tv, ok := redisMap.Load(name)\n\n\tif !ok {\n\t\tpanic(fmt.Errorf(\"yiigo: redis.%s is not registered\", name))\n\t}\n\n\treturn v.(*RedisPoolResource)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Based on:\n\/\/ - https:\/\/github.com\/looplab\/logspout-logstash\/blob\/master\/logstash.go\n\/\/ - https:\/\/github.com\/gettyimages\/logspout-kafka\/blob\/master\/kafka.go\n\/\/ - https:\/\/github.com\/gliderlabs\/logspout\/pull\/41\/files\n\/\/ - https:\/\/github.com\/fsouza\/go-dockerclient\/blob\/master\/container.go#L222\n\npackage redis\n\nimport (\n \"fmt\"\n \"strings\"\n \"log\"\n \"os\"\n \"time\"\n \"encoding\/json\"\n \"github.com\/gliderlabs\/logspout\/router\"\n \"github.com\/garyburd\/redigo\/redis\"\n)\n\n\ntype RedisAdapter struct {\n route *router.Route\n pool *redis.Pool\n key string\n docker_host string\n use_v0 bool\n}\n\ntype DockerFields struct {\n Name string `json:\"name\"`\n CID string `json:\"cid\"`\n Image string `json:\"image\"`\n ImageTag string `json:\"image_tag,omitempty\"`\n Source string `json:\"source\"`\n DockerHost string `json:\"docker_host,omitempty\"`\n}\n\ntype LogstashFields struct {\n Docker DockerFields `json:\"docker\"`\n}\n\ntype LogstashMessageV0 struct {\n Timestamp string `json:\"@timestamp\"`\n Sourcehost string `json:\"@source_host\"`\n Message string `json:\"@message\"`\n Options map[string]string `json:\"options,omitempty\"`\n InstanceId string `json:\"instance-id,omitempty\"`\n Fields LogstashFields `json:\"@fields\"`\n}\n\ntype LogstashMessageV1 struct {\n Timestamp string `json:\"@timestamp\"`\n Sourcehost string `json:\"host\"`\n Message string `json:\"message\"`\n Options map[string]string `json:\"options,omitempty\"`\n InstanceId string `json:\"instance-id,omitempty\"`\n Fields DockerFields `json:\"docker\"`\n}\n\n\nfunc init() {\n router.AdapterFactories.Register(NewRedisAdapter, \"redis\")\n}\n\nfunc NewRedisAdapter(route *router.Route) (router.LogAdapter, error) {\n\n \/\/ add port if missing\n address := route.Address\n if !strings.Contains(address, \":\") {\n address = address + \":6379\"\n }\n\n key := route.Options[\"key\"]\n if key == \"\" {\n key = getopt(\"REDIS_KEY\", \"logspout\")\n }\n\n password := route.Options[\"password\"]\n if password == \"\" {\n password = getopt(\"REDIS_PASSWORD\", \"\")\n }\n\n docker_host := getopt(\"REDIS_DOCKER_HOST\", \"\")\n\n use_v0 := route.Options[\"use_v0_layout\"] != \"\"\n if !use_v0 {\n use_v0 = getopt(\"REDIS_USE_V0_LAYOUT\", \"\") != \"\"\n }\n\n if os.Getenv(\"DEBUG\") != \"\" {\n log.Printf(\"Using Redis server '%s', password: %t, pushkey: '%s', v0 layout: %t\\n\",\n address, password != \"\", key, use_v0)\n }\n\n pool := newRedisConnectionPool(address, password)\n\n \/\/ lets test the water\n conn := pool.Get()\n defer conn.Close()\n res, err := conn.Do(\"PING\")\n if err != nil {\n return nil, errorf(\"Cannot connect to Redis server %s: %v\", address, err)\n }\n if os.Getenv(\"DEBUG\") != \"\" {\n log.Printf(\"Redis connect successful, got response: %s\\n\", res)\n }\n\n return &RedisAdapter{\n route: route,\n pool: pool,\n key: key,\n docker_host: docker_host,\n use_v0: use_v0,\n }, nil\n}\n\nfunc GetLogspoutOptionsString(env []string) string {\n\tif env != nil {\n\t\tfor _, value := range env {\n\t\t\tif strings.HasPrefix(value, \"LOGSPOUT_OPTIONS=\") {\n\t\t\t\treturn strings.TrimPrefix(value, \"LOGSPOUT_OPTIONS=\")\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc GetAwsInstanceId() string {\n resp, err := http.Get(\"http:\/\/169.254.169.254\/latest\/meta-data\/instance-id\")\n\tinstance_id := \"\"\n\tif err == nil {\n\t\tvalue, err := ioutil.ReadAll(resp.Body)\n\t\tif err == nil {\n\t\t\tinstance_id = string(value)\n\t\t}\n\t\tresp.Body.Close()\n\t}\n return instance_id\n}\n\nfunc UnmarshalOptions(opt_string string) map[string]string {\n\tvar options map[string]string\n\n\tif opt_string != \"\" {\n\t\tb := []byte(opt_string)\n\n\t\tjson.Unmarshal(b, &options)\n\t\treturn options\n\t}\n\treturn nil\n}\n\nfunc (a *RedisAdapter) Stream(logstream chan *router.Message) {\n conn := a.pool.Get()\n defer conn.Close()\n\n mute := false\n\n instance_id := GetAwsInstanceId()\n\n options := UnmarshalOptions(getopt(\"OPTIONS\", \"\"))\n\n for m := range logstream {\n msg := createLogstashMessage(m, a.docker_host, a.use_v0)\n js, err := json.Marshal(msg)\n if err != nil {\n if !mute {\n log.Println(\"redis: error on json.Marshal (muting until restored):\", err)\n mute = true\n }\n continue\n }\n _, err = conn.Do(\"RPUSH\", a.key, js)\n if err != nil {\n if !mute {\n log.Println(\"redis: error on rpush (muting until restored):\", err)\n mute = true\n }\n continue\n }\n mute = false\n }\n}\n\nfunc errorf(format string, a ...interface{}) (err error) {\n err = fmt.Errorf(format, a...)\n if os.Getenv(\"DEBUG\") != \"\" {\n fmt.Println(err.Error())\n }\n return\n}\n\nfunc getopt(name, dfault string) string {\n value := os.Getenv(name)\n if value == \"\" {\n value = dfault\n }\n return value\n}\n\nfunc newRedisConnectionPool(server, password string) *redis.Pool {\n return &redis.Pool{\n MaxIdle: 3,\n IdleTimeout: 240 * time.Second,\n Dial: func () (redis.Conn, error) {\n c, err := redis.Dial(\"tcp\", server)\n if err != nil {\n return nil, err\n }\n if password != \"\" {\n if _, err := c.Do(\"AUTH\", password); err != nil {\n c.Close()\n return nil, err\n }\n }\n return c, err\n },\n TestOnBorrow: func(c redis.Conn, t time.Time) error {\n _, err := c.Do(\"PING\")\n return err\n },\n }\n}\n\nfunc splitImage(image string) (string, string) {\n n := strings.Index(image, \":\")\n if n > -1 {\n return image[0:n], image[n+1:]\n }\n return image, \"\"\n}\n\nfunc createLogstashMessage(m *router.Message, docker_host string, use_v0 bool, options map[string]string) interface{} {\n image_name, image_tag := splitImage(m.Container.Config.Image)\n cid := m.Container.ID[0:12]\n name := m.Container.Name[1:]\n timestamp := m.Time.Format(time.RFC3339Nano)\n\n container_options := UnmarshalOptions(GetLogspoutOptionsString(m.Container.Config.Env))\n\n\t\/\/ We give preference to the containers environment that is sending us the message\n\tif container_options == nil {\n\t\tcontainer_options = options\n\t} else if options != nil {\n\t\tfor k, v := range options {\n\t\t\tif _, ok := container_options[k]; !ok {\n\t\t\t\tcontainer_options[k] = v\n\t\t\t}\n\t\t}\n\t}\n\n if use_v0 {\n return LogstashMessageV0{\n Message: m.Data,\n Timestamp: timestamp,\n Sourcehost: m.Container.Config.Hostname,\n Fields: LogstashFields{\n Docker: DockerFields{\n CID: cid,\n Name: name,\n Image: image_name,\n ImageTag: image_tag,\n Source: m.Source,\n DockerHost: docker_host,\n },\n },\n }\n }\n\n return LogstashMessageV1{\n Message: m.Data,\n Timestamp: timestamp,\n Sourcehost: m.Container.Config.Hostname,\n Fields: DockerFields{\n CID: cid,\n Name: name,\n Image: image_name,\n ImageTag: image_tag,\n Source: m.Source,\n DockerHost: docker_host,\n },\n }\n}\n<commit_msg>including imports<commit_after>\/\/ Based on:\n\/\/ - https:\/\/github.com\/looplab\/logspout-logstash\/blob\/master\/logstash.go\n\/\/ - https:\/\/github.com\/gettyimages\/logspout-kafka\/blob\/master\/kafka.go\n\/\/ - https:\/\/github.com\/gliderlabs\/logspout\/pull\/41\/files\n\/\/ - https:\/\/github.com\/fsouza\/go-dockerclient\/blob\/master\/container.go#L222\n\npackage redis\n\nimport (\n \"fmt\"\n \"strings\"\n \"log\"\n \"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n \"os\"\n \"time\"\n \"encoding\/json\"\n \"github.com\/gliderlabs\/logspout\/router\"\n \"github.com\/garyburd\/redigo\/redis\"\n)\n\n\ntype RedisAdapter struct {\n route *router.Route\n pool *redis.Pool\n key string\n docker_host string\n use_v0 bool\n}\n\ntype DockerFields struct {\n Name string `json:\"name\"`\n CID string `json:\"cid\"`\n Image string `json:\"image\"`\n ImageTag string `json:\"image_tag,omitempty\"`\n Source string `json:\"source\"`\n DockerHost string `json:\"docker_host,omitempty\"`\n}\n\ntype LogstashFields struct {\n Docker DockerFields `json:\"docker\"`\n}\n\ntype LogstashMessageV0 struct {\n Timestamp string `json:\"@timestamp\"`\n Sourcehost string `json:\"@source_host\"`\n Message string `json:\"@message\"`\n Options map[string]string `json:\"options,omitempty\"`\n InstanceId string `json:\"instance-id,omitempty\"`\n Fields LogstashFields `json:\"@fields\"`\n}\n\ntype LogstashMessageV1 struct {\n Timestamp string `json:\"@timestamp\"`\n Sourcehost string `json:\"host\"`\n Message string `json:\"message\"`\n Options map[string]string `json:\"options,omitempty\"`\n InstanceId string `json:\"instance-id,omitempty\"`\n Fields DockerFields `json:\"docker\"`\n}\n\n\nfunc init() {\n router.AdapterFactories.Register(NewRedisAdapter, \"redis\")\n}\n\nfunc NewRedisAdapter(route *router.Route) (router.LogAdapter, error) {\n\n \/\/ add port if missing\n address := route.Address\n if !strings.Contains(address, \":\") {\n address = address + \":6379\"\n }\n\n key := route.Options[\"key\"]\n if key == \"\" {\n key = getopt(\"REDIS_KEY\", \"logspout\")\n }\n\n password := route.Options[\"password\"]\n if password == \"\" {\n password = getopt(\"REDIS_PASSWORD\", \"\")\n }\n\n docker_host := getopt(\"REDIS_DOCKER_HOST\", \"\")\n\n use_v0 := route.Options[\"use_v0_layout\"] != \"\"\n if !use_v0 {\n use_v0 = getopt(\"REDIS_USE_V0_LAYOUT\", \"\") != \"\"\n }\n\n if os.Getenv(\"DEBUG\") != \"\" {\n log.Printf(\"Using Redis server '%s', password: %t, pushkey: '%s', v0 layout: %t\\n\",\n address, password != \"\", key, use_v0)\n }\n\n pool := newRedisConnectionPool(address, password)\n\n \/\/ lets test the water\n conn := pool.Get()\n defer conn.Close()\n res, err := conn.Do(\"PING\")\n if err != nil {\n return nil, errorf(\"Cannot connect to Redis server %s: %v\", address, err)\n }\n if os.Getenv(\"DEBUG\") != \"\" {\n log.Printf(\"Redis connect successful, got response: %s\\n\", res)\n }\n\n return &RedisAdapter{\n route: route,\n pool: pool,\n key: key,\n docker_host: docker_host,\n use_v0: use_v0,\n }, nil\n}\n\nfunc GetLogspoutOptionsString(env []string) string {\n\tif env != nil {\n\t\tfor _, value := range env {\n\t\t\tif strings.HasPrefix(value, \"LOGSPOUT_OPTIONS=\") {\n\t\t\t\treturn strings.TrimPrefix(value, \"LOGSPOUT_OPTIONS=\")\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc GetAwsInstanceId() string {\n resp, err := http.Get(\"http:\/\/169.254.169.254\/latest\/meta-data\/instance-id\")\n\tinstance_id := \"\"\n\tif err == nil {\n\t\tvalue, err := ioutil.ReadAll(resp.Body)\n\t\tif err == nil {\n\t\t\tinstance_id = string(value)\n\t\t}\n\t\tresp.Body.Close()\n\t}\n return instance_id\n}\n\nfunc UnmarshalOptions(opt_string string) map[string]string {\n\tvar options map[string]string\n\n\tif opt_string != \"\" {\n\t\tb := []byte(opt_string)\n\n\t\tjson.Unmarshal(b, &options)\n\t\treturn options\n\t}\n\treturn nil\n}\n\nfunc (a *RedisAdapter) Stream(logstream chan *router.Message) {\n conn := a.pool.Get()\n defer conn.Close()\n\n mute := false\n\n instance_id := GetAwsInstanceId()\n\n options := UnmarshalOptions(getopt(\"OPTIONS\", \"\"))\n\n for m := range logstream {\n msg := createLogstashMessage(m, a.docker_host, a.use_v0, options)\n js, err := json.Marshal(msg)\n if err != nil {\n if !mute {\n log.Println(\"redis: error on json.Marshal (muting until restored):\", err)\n mute = true\n }\n continue\n }\n _, err = conn.Do(\"RPUSH\", a.key, js)\n if err != nil {\n if !mute {\n log.Println(\"redis: error on rpush (muting until restored):\", err)\n mute = true\n }\n continue\n }\n mute = false\n }\n}\n\nfunc errorf(format string, a ...interface{}) (err error) {\n err = fmt.Errorf(format, a...)\n if os.Getenv(\"DEBUG\") != \"\" {\n fmt.Println(err.Error())\n }\n return\n}\n\nfunc getopt(name, dfault string) string {\n value := os.Getenv(name)\n if value == \"\" {\n value = dfault\n }\n return value\n}\n\nfunc newRedisConnectionPool(server, password string) *redis.Pool {\n return &redis.Pool{\n MaxIdle: 3,\n IdleTimeout: 240 * time.Second,\n Dial: func () (redis.Conn, error) {\n c, err := redis.Dial(\"tcp\", server)\n if err != nil {\n return nil, err\n }\n if password != \"\" {\n if _, err := c.Do(\"AUTH\", password); err != nil {\n c.Close()\n return nil, err\n }\n }\n return c, err\n },\n TestOnBorrow: func(c redis.Conn, t time.Time) error {\n _, err := c.Do(\"PING\")\n return err\n },\n }\n}\n\nfunc splitImage(image string) (string, string) {\n n := strings.Index(image, \":\")\n if n > -1 {\n return image[0:n], image[n+1:]\n }\n return image, \"\"\n}\n\nfunc createLogstashMessage(m *router.Message, docker_host string, use_v0 bool, options map[string]string) interface{} {\n image_name, image_tag := splitImage(m.Container.Config.Image)\n cid := m.Container.ID[0:12]\n name := m.Container.Name[1:]\n timestamp := m.Time.Format(time.RFC3339Nano)\n\n container_options := UnmarshalOptions(GetLogspoutOptionsString(m.Container.Config.Env))\n\n\t\/\/ We give preference to the containers environment that is sending us the message\n\tif container_options == nil {\n\t\tcontainer_options = options\n\t} else if options != nil {\n\t\tfor k, v := range options {\n\t\t\tif _, ok := container_options[k]; !ok {\n\t\t\t\tcontainer_options[k] = v\n\t\t\t}\n\t\t}\n\t}\n\n if use_v0 {\n return LogstashMessageV0{\n Message: m.Data,\n Timestamp: timestamp,\n Sourcehost: m.Container.Config.Hostname,\n Fields: LogstashFields{\n Docker: DockerFields{\n CID: cid,\n Name: name,\n Image: image_name,\n ImageTag: image_tag,\n Source: m.Source,\n DockerHost: docker_host,\n },\n },\n }\n }\n\n return LogstashMessageV1{\n Message: m.Data,\n Timestamp: timestamp,\n Sourcehost: m.Container.Config.Hostname,\n Fields: DockerFields{\n CID: cid,\n Name: name,\n Image: image_name,\n ImageTag: image_tag,\n Source: m.Source,\n DockerHost: docker_host,\n },\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package ucfg\n\nimport (\n\t\"reflect\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc (c *Config) Unpack(to interface{}, options ...Option) error {\n\topts := makeOptions(options)\n\n\tif c == nil {\n\t\treturn raiseNil(ErrNilConfig)\n\t}\n\tif to == nil {\n\t\treturn raiseNil(ErrNilValue)\n\t}\n\n\tvTo := reflect.ValueOf(to)\n\tif to == nil || (vTo.Kind() != reflect.Ptr && vTo.Kind() != reflect.Map) {\n\t\treturn raisePointerRequired(vTo)\n\t}\n\treturn reifyInto(opts, vTo, c)\n}\n\nfunc reifyInto(opts options, to reflect.Value, from *Config) Error {\n\tto = chaseValuePointers(to)\n\n\tif to, ok := tryTConfig(to); ok {\n\t\treturn mergeConfig(to.Addr().Interface().(*Config), from)\n\t}\n\n\ttTo := chaseTypePointers(to.Type())\n\tswitch tTo.Kind() {\n\tcase reflect.Map:\n\t\treturn reifyMap(opts, to, from)\n\tcase reflect.Struct:\n\t\treturn reifyStruct(opts, to, from)\n\t}\n\n\treturn raiseInvalidTopLevelType(to.Interface())\n}\n\nfunc reifyMap(opts options, to reflect.Value, from *Config) Error {\n\tif to.Type().Key().Kind() != reflect.String {\n\t\treturn raiseKeyInvalidTypeUnpack(to.Type(), from)\n\t}\n\n\tif len(from.fields.fields) == 0 {\n\t\treturn nil\n\t}\n\n\tif to.IsNil() {\n\t\tto.Set(reflect.MakeMap(to.Type()))\n\t}\n\tfor k, value := range from.fields.fields {\n\t\tkey := reflect.ValueOf(k)\n\n\t\told := to.MapIndex(key)\n\t\tvar v reflect.Value\n\t\tvar err Error\n\n\t\tif !old.IsValid() {\n\t\t\tv, err = reifyValue(opts, to.Type().Elem(), value)\n\t\t} else {\n\t\t\tv, err = reifyMergeValue(opts, old, value)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tto.SetMapIndex(key, v)\n\t}\n\n\treturn nil\n}\n\nfunc reifyStruct(opts options, orig reflect.Value, cfg *Config) Error {\n\torig = chaseValuePointers(orig)\n\n\tto := chaseValuePointers(reflect.New(chaseTypePointers(orig.Type())))\n\tif orig.Kind() == reflect.Struct { \/\/ if orig is has been allocated copy into to\n\t\tto.Set(orig)\n\t}\n\n\tnumField := to.NumField()\n\tfor i := 0; i < numField; i++ {\n\t\tvar err Error\n\t\tstField := to.Type().Field(i)\n\t\tvField := to.Field(i)\n\t\tname, tagOpts := parseTags(stField.Tag.Get(opts.tag))\n\n\t\tif tagOpts.squash {\n\t\t\tvField := chaseValue(vField)\n\t\t\tswitch vField.Kind() {\n\t\t\tcase reflect.Struct, reflect.Map:\n\t\t\t\terr = reifyInto(opts, vField, cfg)\n\t\t\tdefault:\n\t\t\t\treturn raiseInlineNeedsObject(cfg, stField.Name, vField.Type())\n\t\t\t}\n\t\t} else {\n\t\t\tname = fieldName(name, stField.Name)\n\t\t\terr = reifyGetField(cfg, opts, name, vField)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\torig.Set(pointerize(orig.Type(), to.Type(), to))\n\treturn nil\n}\n\nfunc reifyGetField(cfg *Config, opts options, name string, to reflect.Value) Error {\n\tfrom, field, err := reifyCfgPath(cfg, opts, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvalue, ok := from.fields.fields[field]\n\tif !ok {\n\t\t\/\/ TODO: handle missing config\n\t\treturn nil\n\t}\n\n\tv, err := reifyMergeValue(opts, to, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tto.Set(v)\n\treturn nil\n}\n\nfunc reifyCfgPath(cfg *Config, opts options, field string) (*Config, string, Error) {\n\tif opts.pathSep == \"\" {\n\t\treturn cfg, field, nil\n\t}\n\n\tpath := strings.Split(field, opts.pathSep)\n\tfor len(path) > 1 {\n\t\tfield = path[0]\n\t\tpath = path[1:]\n\n\t\tsub, exists := cfg.fields.fields[field]\n\t\tif !exists {\n\t\t\treturn nil, field, raiseMissing(cfg, field)\n\t\t}\n\n\t\tcSub, err := sub.toConfig()\n\t\tif err != nil {\n\t\t\treturn nil, field, raiseExpectedObject(sub)\n\t\t}\n\t\tcfg = cSub\n\t}\n\tfield = path[0]\n\n\treturn cfg, field, nil\n}\n\nfunc reifyValue(opts options, t reflect.Type, val value) (reflect.Value, Error) {\n\tif t.Kind() == reflect.Interface && t.NumMethod() == 0 {\n\t\treturn reflect.ValueOf(val.reify()), nil\n\t}\n\n\tbaseType := chaseTypePointers(t)\n\tif tConfig.ConvertibleTo(baseType) {\n\t\tif _, err := val.toConfig(); err != nil {\n\t\t\treturn reflect.Value{}, raiseExpectedObject(val)\n\t\t}\n\n\t\tv := val.reflect().Convert(reflect.PtrTo(baseType))\n\t\tif t == baseType { \/\/ copy config\n\t\t\tv = v.Elem()\n\t\t} else {\n\t\t\tv = pointerize(t, baseType, v)\n\t\t}\n\t\treturn v, nil\n\t}\n\n\tif baseType.Kind() == reflect.Struct {\n\t\tsub, err := val.toConfig()\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseExpectedObject(val)\n\t\t}\n\n\t\tnewSt := reflect.New(baseType)\n\t\tif err := reifyInto(opts, newSt, sub); err != nil {\n\t\t\treturn reflect.Value{}, err\n\t\t}\n\n\t\tif t.Kind() != reflect.Ptr {\n\t\t\treturn newSt.Elem(), nil\n\t\t}\n\t\treturn pointerize(t, baseType, newSt), nil\n\t}\n\n\tswitch baseType.Kind() {\n\tcase reflect.Map:\n\t\tsub, err := val.toConfig()\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseExpectedObject(val)\n\t\t}\n\n\t\tif baseType.Key().Kind() != reflect.String {\n\t\t\treturn reflect.Value{}, raiseKeyInvalidTypeUnpack(baseType, sub)\n\t\t}\n\n\t\tnewMap := reflect.MakeMap(baseType)\n\t\tif err := reifyInto(opts, newMap, sub); err != nil {\n\t\t\treturn reflect.Value{}, err\n\t\t}\n\t\treturn newMap, nil\n\n\tcase reflect.Slice:\n\t\tv, err := reifySlice(opts, baseType, castArr(val))\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, err\n\t\t}\n\t\treturn pointerize(t, baseType, v), nil\n\t}\n\n\treturn reifyPrimitive(opts, val, t, baseType)\n}\n\nfunc reifyMergeValue(\n\topts options,\n\toldValue reflect.Value, val value,\n) (reflect.Value, Error) {\n\told := chaseValueInterfaces(oldValue)\n\tt := old.Type()\n\told = chaseValuePointers(old)\n\tif (old.Kind() == reflect.Ptr || old.Kind() == reflect.Interface) && old.IsNil() {\n\t\treturn reifyValue(opts, t, val)\n\t}\n\n\tbaseType := chaseTypePointers(old.Type())\n\tif tConfig.ConvertibleTo(baseType) {\n\t\tsub, err := val.toConfig()\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseExpectedObject(val)\n\t\t}\n\n\t\tif t == baseType {\n\t\t\t\/\/ no pointer -> return type mismatch\n\t\t\treturn reflect.Value{}, raisePointerRequired(oldValue)\n\t\t}\n\n\t\t\/\/ check if old is nil -> copy reference only\n\t\tif old.Kind() == reflect.Ptr && old.IsNil() {\n\t\t\tv := val.reflect().Convert(reflect.PtrTo(baseType))\n\t\t\treturn pointerize(t, baseType, v), nil\n\t\t}\n\n\t\t\/\/ check if old == value\n\t\tsubOld := chaseValuePointers(old).Addr().Convert(tConfigPtr).Interface().(*Config)\n\t\tif sub == subOld {\n\t\t\treturn oldValue, nil\n\t\t}\n\n\t\t\/\/ old != value -> merge value into old\n\t\treturn oldValue, mergeConfig(subOld, sub)\n\t}\n\n\tswitch baseType.Kind() {\n\tcase reflect.Map:\n\t\tsub, err := val.toConfig()\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseExpectedObject(val)\n\t\t}\n\t\treturn old, reifyMap(opts, old, sub)\n\n\tcase reflect.Struct:\n\t\tsub, err := val.toConfig()\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseExpectedObject(val)\n\t\t}\n\t\treturn oldValue, reifyStruct(opts, old, sub)\n\n\tcase reflect.Array:\n\t\treturn reifyArray(opts, old, baseType, castArr(val))\n\n\tcase reflect.Slice:\n\t\treturn reifySlice(opts, baseType, castArr(val))\n\t}\n\n\treturn reifyPrimitive(opts, val, t, baseType)\n}\n\nfunc reifyPrimitive(\n\topts options,\n\tval value,\n\tt, baseType reflect.Type,\n) (reflect.Value, Error) {\n\t\/\/ zero initialize value if val==nil\n\tif _, ok := val.(*cfgNil); ok {\n\t\treturn pointerize(t, baseType, reflect.Zero(baseType)), nil\n\t}\n\n\t\/\/ try primitive conversion\n\tswitch {\n\tcase val.typ() == baseType:\n\t\treturn pointerize(t, baseType, val.reflect()), nil\n\n\tcase baseType.Kind() == reflect.String:\n\t\ts, err := val.toString()\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseConversion(val, err, \"string\")\n\t\t}\n\t\treturn pointerize(t, baseType, reflect.ValueOf(s)), nil\n\n\tcase baseType == tDuration:\n\t\tvar d time.Duration\n\t\tvar err error\n\n\t\tswitch v := val.(type) {\n\t\tcase *cfgInt:\n\t\t\td = time.Duration(v.i) * time.Second\n\t\tcase *cfgFloat:\n\t\t\td = time.Duration(v.f * float64(time.Second))\n\t\tcase *cfgString:\n\t\t\td, err = time.ParseDuration(v.s)\n\t\tdefault:\n\t\t\ts, err := val.toString()\n\t\t\tif err != nil {\n\t\t\t\treturn reflect.Value{}, raiseConversion(val, err, \"duration\")\n\t\t\t}\n\n\t\t\td, err = time.ParseDuration(s)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseConversion(val, err, \"duration\")\n\t\t}\n\t\treturn pointerize(t, baseType, reflect.ValueOf(d)), nil\n\n\tcase baseType == tRegexp:\n\t\ts, err := val.toString()\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseConversion(val, err, \"regex\")\n\t\t}\n\n\t\tr, err := regexp.Compile(s)\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseConversion(val, err, \"regex\")\n\t\t}\n\t\treturn pointerize(t, baseType, reflect.ValueOf(r).Elem()), nil\n\n\tcase val.typ().ConvertibleTo(baseType):\n\t\treturn pointerize(t, baseType, val.reflect().Convert(baseType)), nil\n\n\t}\n\n\treturn reflect.Value{}, raiseToTypeNotSupported(val, baseType)\n}\n\nfunc reifyArray(\n\topts options,\n\tto reflect.Value, tTo reflect.Type,\n\tarr *cfgArray,\n) (reflect.Value, Error) {\n\tif arr.Len() != tTo.Len() {\n\t\treturn reflect.Value{}, raiseArraySize(tTo, arr)\n\t}\n\treturn reifyDoArray(opts, to, tTo.Elem(), arr)\n}\n\nfunc reifySlice(opts options, tTo reflect.Type, arr *cfgArray) (reflect.Value, Error) {\n\tto := reflect.MakeSlice(tTo, arr.Len(), arr.Len())\n\treturn reifyDoArray(opts, to, tTo.Elem(), arr)\n}\n\nfunc reifyDoArray(\n\topts options,\n\tto reflect.Value, elemT reflect.Type,\n\tarr *cfgArray,\n) (reflect.Value, Error) {\n\tfor i, from := range arr.arr {\n\t\tv, err := reifyValue(opts, elemT, from)\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, err\n\t\t}\n\t\tto.Index(i).Set(v)\n\t}\n\treturn to, nil\n}\n\nfunc castArr(v value) *cfgArray {\n\tif arr, ok := v.(*cfgArray); ok {\n\t\treturn arr\n\t}\n\n\tif v.Len() == 0 {\n\t\treturn &cfgArray{}\n\t}\n\n\treturn &cfgArray{\n\t\tarr: []value{v},\n\t}\n}\n<commit_msg>Fix regexp\/timeout unpacking<commit_after>package ucfg\n\nimport (\n\t\"reflect\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc (c *Config) Unpack(to interface{}, options ...Option) error {\n\topts := makeOptions(options)\n\n\tif c == nil {\n\t\treturn raiseNil(ErrNilConfig)\n\t}\n\tif to == nil {\n\t\treturn raiseNil(ErrNilValue)\n\t}\n\n\tvTo := reflect.ValueOf(to)\n\tif to == nil || (vTo.Kind() != reflect.Ptr && vTo.Kind() != reflect.Map) {\n\t\treturn raisePointerRequired(vTo)\n\t}\n\treturn reifyInto(opts, vTo, c)\n}\n\nfunc reifyInto(opts options, to reflect.Value, from *Config) Error {\n\tto = chaseValuePointers(to)\n\n\tif to, ok := tryTConfig(to); ok {\n\t\treturn mergeConfig(to.Addr().Interface().(*Config), from)\n\t}\n\n\ttTo := chaseTypePointers(to.Type())\n\tswitch tTo.Kind() {\n\tcase reflect.Map:\n\t\treturn reifyMap(opts, to, from)\n\tcase reflect.Struct:\n\t\treturn reifyStruct(opts, to, from)\n\t}\n\n\treturn raiseInvalidTopLevelType(to.Interface())\n}\n\nfunc reifyMap(opts options, to reflect.Value, from *Config) Error {\n\tif to.Type().Key().Kind() != reflect.String {\n\t\treturn raiseKeyInvalidTypeUnpack(to.Type(), from)\n\t}\n\n\tif len(from.fields.fields) == 0 {\n\t\treturn nil\n\t}\n\n\tif to.IsNil() {\n\t\tto.Set(reflect.MakeMap(to.Type()))\n\t}\n\tfor k, value := range from.fields.fields {\n\t\tkey := reflect.ValueOf(k)\n\n\t\told := to.MapIndex(key)\n\t\tvar v reflect.Value\n\t\tvar err Error\n\n\t\tif !old.IsValid() {\n\t\t\tv, err = reifyValue(opts, to.Type().Elem(), value)\n\t\t} else {\n\t\t\tv, err = reifyMergeValue(opts, old, value)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tto.SetMapIndex(key, v)\n\t}\n\n\treturn nil\n}\n\nfunc reifyStruct(opts options, orig reflect.Value, cfg *Config) Error {\n\torig = chaseValuePointers(orig)\n\n\tto := chaseValuePointers(reflect.New(chaseTypePointers(orig.Type())))\n\tif orig.Kind() == reflect.Struct { \/\/ if orig is has been allocated copy into to\n\t\tto.Set(orig)\n\t}\n\n\tnumField := to.NumField()\n\tfor i := 0; i < numField; i++ {\n\t\tvar err Error\n\t\tstField := to.Type().Field(i)\n\t\tvField := to.Field(i)\n\t\tname, tagOpts := parseTags(stField.Tag.Get(opts.tag))\n\n\t\tif tagOpts.squash {\n\t\t\tvField := chaseValue(vField)\n\t\t\tswitch vField.Kind() {\n\t\t\tcase reflect.Struct, reflect.Map:\n\t\t\t\terr = reifyInto(opts, vField, cfg)\n\t\t\tdefault:\n\t\t\t\treturn raiseInlineNeedsObject(cfg, stField.Name, vField.Type())\n\t\t\t}\n\t\t} else {\n\t\t\tname = fieldName(name, stField.Name)\n\t\t\terr = reifyGetField(cfg, opts, name, vField)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\torig.Set(pointerize(orig.Type(), to.Type(), to))\n\treturn nil\n}\n\nfunc reifyGetField(cfg *Config, opts options, name string, to reflect.Value) Error {\n\tfrom, field, err := reifyCfgPath(cfg, opts, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvalue, ok := from.fields.fields[field]\n\tif !ok {\n\t\t\/\/ TODO: handle missing config\n\t\treturn nil\n\t}\n\n\tv, err := reifyMergeValue(opts, to, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tto.Set(v)\n\treturn nil\n}\n\nfunc reifyCfgPath(cfg *Config, opts options, field string) (*Config, string, Error) {\n\tif opts.pathSep == \"\" {\n\t\treturn cfg, field, nil\n\t}\n\n\tpath := strings.Split(field, opts.pathSep)\n\tfor len(path) > 1 {\n\t\tfield = path[0]\n\t\tpath = path[1:]\n\n\t\tsub, exists := cfg.fields.fields[field]\n\t\tif !exists {\n\t\t\treturn nil, field, raiseMissing(cfg, field)\n\t\t}\n\n\t\tcSub, err := sub.toConfig()\n\t\tif err != nil {\n\t\t\treturn nil, field, raiseExpectedObject(sub)\n\t\t}\n\t\tcfg = cSub\n\t}\n\tfield = path[0]\n\n\treturn cfg, field, nil\n}\n\nfunc reifyValue(opts options, t reflect.Type, val value) (reflect.Value, Error) {\n\tif t.Kind() == reflect.Interface && t.NumMethod() == 0 {\n\t\treturn reflect.ValueOf(val.reify()), nil\n\t}\n\n\tbaseType := chaseTypePointers(t)\n\tif tConfig.ConvertibleTo(baseType) {\n\t\tif _, err := val.toConfig(); err != nil {\n\t\t\treturn reflect.Value{}, raiseExpectedObject(val)\n\t\t}\n\n\t\tv := val.reflect().Convert(reflect.PtrTo(baseType))\n\t\tif t == baseType { \/\/ copy config\n\t\t\tv = v.Elem()\n\t\t} else {\n\t\t\tv = pointerize(t, baseType, v)\n\t\t}\n\t\treturn v, nil\n\t}\n\n\tif baseType.Kind() == reflect.Struct {\n\t\tsub, err := val.toConfig()\n\t\tif err != nil {\n\t\t\t\/\/ try primitive\n\t\t\tif v, check := reifyPrimitive(opts, val, t, baseType); check == nil {\n\t\t\t\treturn v, nil\n\t\t\t}\n\n\t\t\treturn reflect.Value{}, raiseExpectedObject(val)\n\t\t}\n\n\t\tnewSt := reflect.New(baseType)\n\t\tif err := reifyInto(opts, newSt, sub); err != nil {\n\t\t\treturn reflect.Value{}, err\n\t\t}\n\n\t\tif t.Kind() != reflect.Ptr {\n\t\t\treturn newSt.Elem(), nil\n\t\t}\n\t\treturn pointerize(t, baseType, newSt), nil\n\t}\n\n\tswitch baseType.Kind() {\n\tcase reflect.Map:\n\t\tsub, err := val.toConfig()\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseExpectedObject(val)\n\t\t}\n\n\t\tif baseType.Key().Kind() != reflect.String {\n\t\t\treturn reflect.Value{}, raiseKeyInvalidTypeUnpack(baseType, sub)\n\t\t}\n\n\t\tnewMap := reflect.MakeMap(baseType)\n\t\tif err := reifyInto(opts, newMap, sub); err != nil {\n\t\t\treturn reflect.Value{}, err\n\t\t}\n\t\treturn newMap, nil\n\n\tcase reflect.Slice:\n\t\tv, err := reifySlice(opts, baseType, castArr(val))\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, err\n\t\t}\n\t\treturn pointerize(t, baseType, v), nil\n\t}\n\n\treturn reifyPrimitive(opts, val, t, baseType)\n}\n\nfunc reifyMergeValue(\n\topts options,\n\toldValue reflect.Value, val value,\n) (reflect.Value, Error) {\n\told := chaseValueInterfaces(oldValue)\n\tt := old.Type()\n\told = chaseValuePointers(old)\n\tif (old.Kind() == reflect.Ptr || old.Kind() == reflect.Interface) && old.IsNil() {\n\t\treturn reifyValue(opts, t, val)\n\t}\n\n\tbaseType := chaseTypePointers(old.Type())\n\tif tConfig.ConvertibleTo(baseType) {\n\t\tsub, err := val.toConfig()\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseExpectedObject(val)\n\t\t}\n\n\t\tif t == baseType {\n\t\t\t\/\/ no pointer -> return type mismatch\n\t\t\treturn reflect.Value{}, raisePointerRequired(oldValue)\n\t\t}\n\n\t\t\/\/ check if old is nil -> copy reference only\n\t\tif old.Kind() == reflect.Ptr && old.IsNil() {\n\t\t\tv := val.reflect().Convert(reflect.PtrTo(baseType))\n\t\t\treturn pointerize(t, baseType, v), nil\n\t\t}\n\n\t\t\/\/ check if old == value\n\t\tsubOld := chaseValuePointers(old).Addr().Convert(tConfigPtr).Interface().(*Config)\n\t\tif sub == subOld {\n\t\t\treturn oldValue, nil\n\t\t}\n\n\t\t\/\/ old != value -> merge value into old\n\t\treturn oldValue, mergeConfig(subOld, sub)\n\t}\n\n\tswitch baseType.Kind() {\n\tcase reflect.Map:\n\t\tsub, err := val.toConfig()\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseExpectedObject(val)\n\t\t}\n\t\treturn old, reifyMap(opts, old, sub)\n\n\tcase reflect.Struct:\n\t\tsub, err := val.toConfig()\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseExpectedObject(val)\n\t\t}\n\t\treturn oldValue, reifyStruct(opts, old, sub)\n\n\tcase reflect.Array:\n\t\treturn reifyArray(opts, old, baseType, castArr(val))\n\n\tcase reflect.Slice:\n\t\treturn reifySlice(opts, baseType, castArr(val))\n\t}\n\n\treturn reifyPrimitive(opts, val, t, baseType)\n}\n\nfunc reifyPrimitive(\n\topts options,\n\tval value,\n\tt, baseType reflect.Type,\n) (reflect.Value, Error) {\n\t\/\/ zero initialize value if val==nil\n\tif _, ok := val.(*cfgNil); ok {\n\t\treturn pointerize(t, baseType, reflect.Zero(baseType)), nil\n\t}\n\n\t\/\/ try primitive conversion\n\tswitch {\n\tcase val.typ() == baseType:\n\t\treturn pointerize(t, baseType, val.reflect()), nil\n\n\tcase baseType.Kind() == reflect.String:\n\t\ts, err := val.toString()\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseConversion(val, err, \"string\")\n\t\t}\n\t\treturn pointerize(t, baseType, reflect.ValueOf(s)), nil\n\n\tcase baseType == tDuration:\n\t\tvar d time.Duration\n\t\tvar err error\n\n\t\tswitch v := val.(type) {\n\t\tcase *cfgInt:\n\t\t\td = time.Duration(v.i) * time.Second\n\t\tcase *cfgFloat:\n\t\t\td = time.Duration(v.f * float64(time.Second))\n\t\tcase *cfgString:\n\t\t\td, err = time.ParseDuration(v.s)\n\t\tdefault:\n\t\t\ts, err := val.toString()\n\t\t\tif err != nil {\n\t\t\t\treturn reflect.Value{}, raiseConversion(val, err, \"duration\")\n\t\t\t}\n\n\t\t\td, err = time.ParseDuration(s)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseConversion(val, err, \"duration\")\n\t\t}\n\t\treturn pointerize(t, baseType, reflect.ValueOf(d)), nil\n\n\tcase baseType == tRegexp:\n\t\ts, err := val.toString()\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseConversion(val, err, \"regex\")\n\t\t}\n\n\t\tr, err := regexp.Compile(s)\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, raiseConversion(val, err, \"regex\")\n\t\t}\n\t\treturn pointerize(t, baseType, reflect.ValueOf(r).Elem()), nil\n\n\tcase val.typ().ConvertibleTo(baseType):\n\t\treturn pointerize(t, baseType, val.reflect().Convert(baseType)), nil\n\n\t}\n\n\treturn reflect.Value{}, raiseToTypeNotSupported(val, baseType)\n}\n\nfunc reifyArray(\n\topts options,\n\tto reflect.Value, tTo reflect.Type,\n\tarr *cfgArray,\n) (reflect.Value, Error) {\n\tif arr.Len() != tTo.Len() {\n\t\treturn reflect.Value{}, raiseArraySize(tTo, arr)\n\t}\n\treturn reifyDoArray(opts, to, tTo.Elem(), arr)\n}\n\nfunc reifySlice(opts options, tTo reflect.Type, arr *cfgArray) (reflect.Value, Error) {\n\tto := reflect.MakeSlice(tTo, arr.Len(), arr.Len())\n\treturn reifyDoArray(opts, to, tTo.Elem(), arr)\n}\n\nfunc reifyDoArray(\n\topts options,\n\tto reflect.Value, elemT reflect.Type,\n\tarr *cfgArray,\n) (reflect.Value, Error) {\n\tfor i, from := range arr.arr {\n\t\tv, err := reifyValue(opts, elemT, from)\n\t\tif err != nil {\n\t\t\treturn reflect.Value{}, err\n\t\t}\n\t\tto.Index(i).Set(v)\n\t}\n\treturn to, nil\n}\n\nfunc castArr(v value) *cfgArray {\n\tif arr, ok := v.(*cfgArray); ok {\n\t\treturn arr\n\t}\n\n\tif v.Len() == 0 {\n\t\treturn &cfgArray{}\n\t}\n\n\treturn &cfgArray{\n\t\tarr: []value{v},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package pi\n\nimport \"strings\"\n\n\/\/ route represents an API route.\n\/\/ For example: \/user\/get\/{id}\ntype route struct {\n\tRouteURL string\n\tChildRoutes routes\n\tMethods map[string]HandlerFunction\n\tInterceptors interceptors\n}\n\ntype routes []*route\n\n\/\/ newRoute returns a new route.\nfunc newRoute(RouteURL string, ChildRoutes ...*route) *route {\n\treturn &route{\n\t\tRouteURL: RouteURL,\n\t\tChildRoutes: ChildRoutes,\n\t\tMethods: make(map[string]HandlerFunction),\n\t}\n}\n\ntype interceptorHelper struct {\n\tBeforeFunc HandlerFunction\n\tAfterFunc HandlerFunction\n\tErrorFunc func(error) HandlerFunction\n}\n\nfunc (helper *interceptorHelper) Before() HandlerFunction {\n\treturn helper.BeforeFunc\n}\n\nfunc (helper *interceptorHelper) After() HandlerFunction {\n\treturn helper.AfterFunc\n}\n\nfunc (helper *interceptorHelper) Error(err error) HandlerFunction {\n\treturn helper.ErrorFunc(err)\n}\n\n\/\/ Before registers an interceptor to be called before the request is handled.\nfunc (r *route) Before(b beforeInterceptor) *route {\n\tr.Interceptors.addBefore(b)\n\treturn r\n}\n\n\/\/ BeforeFunc add an an Before handler to the interceptor.\nfunc (r *route) BeforeFunc(handler HandlerFunction) *route {\n\thelper := &interceptorHelper{\n\t\tBeforeFunc: handler,\n\t}\n\tr.Interceptors.addBefore(helper)\n\treturn r\n}\n\n\/\/ After registers an interceptor to be called after the request has been handled.\nfunc (r *route) After(a afterInterceptor) *route {\n\tr.Interceptors.addAfter(a)\n\treturn r\n}\n\n\/\/ AfterFunc add an an After interceptor.\nfunc (r *route) AfterFunc(handler HandlerFunction) *route {\n\thelper := &interceptorHelper{\n\t\tAfterFunc: handler,\n\t}\n\tr.Interceptors.addAfter(helper)\n\treturn r\n}\n\n\/\/ Error registers an interceptor to be called when an error occurs in the request handler or in any Before interceptor.\nfunc (r *route) Error(e errorInterceptor) *route {\n\tr.Interceptors.addError(e)\n\treturn r\n}\n\n\/\/ ErrorFunc add an an Error interceptor.\nfunc (r *route) ErrorFunc(handler func (error) HandlerFunction) *route {\n\thelper := &interceptorHelper{\n\t\tErrorFunc: handler,\n\t}\n\tr.Interceptors.addError(helper)\n\treturn r\n}\n\n\/\/ Intercept registers an interceptor to be called before, after or on error.\nfunc (r *route) Intercept(ci interface{}) *route {\n\tr.Interceptors.addInterceptor(ci)\n\treturn r\n}\n\n\/\/ Any registers an HandlerFunction to handle any requests.\nfunc (r *route) Any(handlerFunc HandlerFunction) *route {\n\tr.Get(handlerFunc)\n\tr.Post(handlerFunc)\n\tr.Put(handlerFunc)\n\tr.Delete(handlerFunc)\n\tr.Patch(handlerFunc)\n\tr.Options(handlerFunc)\n\tr.Head(handlerFunc)\n\treturn r\n}\n\n\/\/ Get registers an HandlerFunction to handle GET requests.\nfunc (r *route) Get(handlerFunc HandlerFunction) *route {\n\tr.Methods[\"GET\"] = handlerFunc\n\treturn r\n}\n\n\/\/ Post registers an HandlerFunction to handle POST requests.\nfunc (r *route) Post(handlerFunc HandlerFunction) *route {\n\tr.Methods[\"POST\"] = handlerFunc\n\treturn r\n}\n\n\/\/ Put registers an HandlerFunction to handle PUT requests.\nfunc (r *route) Put(handlerFunc HandlerFunction) *route {\n\tr.Methods[\"PUT\"] = handlerFunc\n\treturn r\n}\n\n\/\/ Delete registers an HandlerFunction to handle DELETE requests.\nfunc (r *route) Delete(handlerFunc HandlerFunction) *route {\n\tr.Methods[\"DELETE\"] = handlerFunc\n\treturn r\n}\n\n\/\/ Patch registers an HandlerFunction to handle PATCH requests.\nfunc (r *route) Patch(handlerFunc HandlerFunction) *route {\n\tr.Methods[\"PATCH\"] = handlerFunc\n\treturn r\n}\n\n\/\/ Options registers an HandlerFunction to handle OPTIONS requests.\nfunc (r *route) Options(handlerFunc HandlerFunction) *route {\n\tr.Methods[\"OPTIONS\"] = handlerFunc\n\treturn r\n}\n\n\/\/ Head registers an HandlerFunction to handle HEAD requests.\nfunc (r *route) Head(handlerFunc HandlerFunction) *route {\n\tr.Methods[\"HEAD\"] = handlerFunc\n\treturn r\n}\n\n\/\/ Custom registers an HandlerFunction to handle custom requests.\nfunc (r *route) Custom(method string, handlerFunc HandlerFunction) *route {\n\tr.Methods[method] = handlerFunc\n\treturn r\n}\n\nfunc (r routes) Len() int {\n\treturn len(r)\n}\n\nfunc (r routes) Swap(i, j int) {\n\tr[i], r[j] = r[j], r[i]\n}\n\nfunc (r routes) Less(i, j int) bool {\n\treturn strings.Count(r[i].RouteURL, \"\/\") > strings.Count(r[j].RouteURL, \"\/\")\n}\n<commit_msg>Added comment<commit_after>package pi\n\nimport \"strings\"\n\n\/\/ route represents an API route.\n\/\/ For example: \/user\/get\/{id}\ntype route struct {\n\tRouteURL string\n\tChildRoutes routes\n\tMethods map[string]HandlerFunction\n\tInterceptors interceptors\n}\n\ntype routes []*route\n\n\/\/ newRoute returns a new route.\nfunc newRoute(RouteURL string, ChildRoutes ...*route) *route {\n\treturn &route{\n\t\tRouteURL: RouteURL,\n\t\tChildRoutes: ChildRoutes,\n\t\tMethods: make(map[string]HandlerFunction),\n\t}\n}\n\n\/\/ interceptorHelper is an helper to add single funcs to the interceptor list.\ntype interceptorHelper struct {\n\tBeforeFunc HandlerFunction\n\tAfterFunc HandlerFunction\n\tErrorFunc func(error) HandlerFunction\n}\n\nfunc (helper *interceptorHelper) Before() HandlerFunction {\n\treturn helper.BeforeFunc\n}\n\nfunc (helper *interceptorHelper) After() HandlerFunction {\n\treturn helper.AfterFunc\n}\n\nfunc (helper *interceptorHelper) Error(err error) HandlerFunction {\n\treturn helper.ErrorFunc(err)\n}\n\n\/\/ Before registers an interceptor to be called before the request is handled.\nfunc (r *route) Before(b beforeInterceptor) *route {\n\tr.Interceptors.addBefore(b)\n\treturn r\n}\n\n\/\/ BeforeFunc add an an Before handler to the interceptor.\nfunc (r *route) BeforeFunc(handler HandlerFunction) *route {\n\thelper := &interceptorHelper{\n\t\tBeforeFunc: handler,\n\t}\n\tr.Interceptors.addBefore(helper)\n\treturn r\n}\n\n\/\/ After registers an interceptor to be called after the request has been handled.\nfunc (r *route) After(a afterInterceptor) *route {\n\tr.Interceptors.addAfter(a)\n\treturn r\n}\n\n\/\/ AfterFunc add an an After interceptor.\nfunc (r *route) AfterFunc(handler HandlerFunction) *route {\n\thelper := &interceptorHelper{\n\t\tAfterFunc: handler,\n\t}\n\tr.Interceptors.addAfter(helper)\n\treturn r\n}\n\n\/\/ Error registers an interceptor to be called when an error occurs in the request handler or in any Before interceptor.\nfunc (r *route) Error(e errorInterceptor) *route {\n\tr.Interceptors.addError(e)\n\treturn r\n}\n\n\/\/ ErrorFunc add an an Error interceptor.\nfunc (r *route) ErrorFunc(handler func (error) HandlerFunction) *route {\n\thelper := &interceptorHelper{\n\t\tErrorFunc: handler,\n\t}\n\tr.Interceptors.addError(helper)\n\treturn r\n}\n\n\/\/ Intercept registers an interceptor to be called before, after or on error.\nfunc (r *route) Intercept(ci interface{}) *route {\n\tr.Interceptors.addInterceptor(ci)\n\treturn r\n}\n\n\/\/ Any registers an HandlerFunction to handle any requests.\nfunc (r *route) Any(handlerFunc HandlerFunction) *route {\n\tr.Get(handlerFunc)\n\tr.Post(handlerFunc)\n\tr.Put(handlerFunc)\n\tr.Delete(handlerFunc)\n\tr.Patch(handlerFunc)\n\tr.Options(handlerFunc)\n\tr.Head(handlerFunc)\n\treturn r\n}\n\n\/\/ Get registers an HandlerFunction to handle GET requests.\nfunc (r *route) Get(handlerFunc HandlerFunction) *route {\n\tr.Methods[\"GET\"] = handlerFunc\n\treturn r\n}\n\n\/\/ Post registers an HandlerFunction to handle POST requests.\nfunc (r *route) Post(handlerFunc HandlerFunction) *route {\n\tr.Methods[\"POST\"] = handlerFunc\n\treturn r\n}\n\n\/\/ Put registers an HandlerFunction to handle PUT requests.\nfunc (r *route) Put(handlerFunc HandlerFunction) *route {\n\tr.Methods[\"PUT\"] = handlerFunc\n\treturn r\n}\n\n\/\/ Delete registers an HandlerFunction to handle DELETE requests.\nfunc (r *route) Delete(handlerFunc HandlerFunction) *route {\n\tr.Methods[\"DELETE\"] = handlerFunc\n\treturn r\n}\n\n\/\/ Patch registers an HandlerFunction to handle PATCH requests.\nfunc (r *route) Patch(handlerFunc HandlerFunction) *route {\n\tr.Methods[\"PATCH\"] = handlerFunc\n\treturn r\n}\n\n\/\/ Options registers an HandlerFunction to handle OPTIONS requests.\nfunc (r *route) Options(handlerFunc HandlerFunction) *route {\n\tr.Methods[\"OPTIONS\"] = handlerFunc\n\treturn r\n}\n\n\/\/ Head registers an HandlerFunction to handle HEAD requests.\nfunc (r *route) Head(handlerFunc HandlerFunction) *route {\n\tr.Methods[\"HEAD\"] = handlerFunc\n\treturn r\n}\n\n\/\/ Custom registers an HandlerFunction to handle custom requests.\nfunc (r *route) Custom(method string, handlerFunc HandlerFunction) *route {\n\tr.Methods[method] = handlerFunc\n\treturn r\n}\n\nfunc (r routes) Len() int {\n\treturn len(r)\n}\n\nfunc (r routes) Swap(i, j int) {\n\tr[i], r[j] = r[j], r[i]\n}\n\nfunc (r routes) Less(i, j int) bool {\n\treturn strings.Count(r[i].RouteURL, \"\/\") > strings.Count(r[j].RouteURL, \"\/\")\n}\n<|endoftext|>"} {"text":"<commit_before>package v1alpha1\n\nimport (\n\t\"github.com\/kubedb\/apimachinery\/apis\/kubedb\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n)\n\nvar SchemeGroupVersion = schema.GroupVersion{Group: kubedb.GroupName, Version: \"v1alpha1\"}\n\nvar (\n\t\/\/ TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io\/api.\n\t\/\/ localSchemeBuilder and AddToScheme will stay in k8s.io\/kubernetes.\n\tSchemeBuilder runtime.SchemeBuilder\n\tlocalSchemeBuilder = &SchemeBuilder\n\tAddToScheme = localSchemeBuilder.AddToScheme\n)\n\nfunc init() {\n\t\/\/ We only register manually written functions here. The registration of the\n\t\/\/ generated functions takes place in the generated files. The separation\n\t\/\/ makes the code compile even when the generated files are missing.\n\tlocalSchemeBuilder.Register(addKnownTypes)\n}\n\n\/\/ Resource takes an unqualified resource and returns a Group qualified GroupResource\nfunc Resource(resource string) schema.GroupResource {\n\treturn SchemeGroupVersion.WithResource(resource).GroupResource()\n}\n\n\/\/ Adds the list of known types to api.Scheme.\nfunc addKnownTypes(scheme *runtime.Scheme) error {\n\tscheme.AddKnownTypes(SchemeGroupVersion,\n\t\t&Postgres{},\n\t\t&PostgresList{},\n\t\t&Elasticsearch{},\n\t\t&ElasticsearchList{},\n\t\t&Memcached{},\n\t\t&MemcachedList{},\n\t\t&MongoDB{},\n\t\t&MongoDBList{},\n\t\t&MySQL{},\n\t\t&MySQLList{},\n\t\t&Redis{},\n\t\t&RedisList{},\n\t\t&Snapshot{},\n\t\t&SnapshotList{},\n\t\t&DormantDatabase{},\n\t\t&DormantDatabaseList{},\n\t)\n\n\tscheme.AddKnownTypes(SchemeGroupVersion,\n\t\t&metav1.Status{},\n\t)\n\tmetav1.AddToGroupVersion(scheme, SchemeGroupVersion)\n\treturn nil\n}\n<commit_msg>Register Etcd types (#212)<commit_after>package v1alpha1\n\nimport (\n\t\"github.com\/kubedb\/apimachinery\/apis\/kubedb\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n)\n\nvar SchemeGroupVersion = schema.GroupVersion{Group: kubedb.GroupName, Version: \"v1alpha1\"}\n\nvar (\n\t\/\/ TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io\/api.\n\t\/\/ localSchemeBuilder and AddToScheme will stay in k8s.io\/kubernetes.\n\tSchemeBuilder runtime.SchemeBuilder\n\tlocalSchemeBuilder = &SchemeBuilder\n\tAddToScheme = localSchemeBuilder.AddToScheme\n)\n\nfunc init() {\n\t\/\/ We only register manually written functions here. The registration of the\n\t\/\/ generated functions takes place in the generated files. The separation\n\t\/\/ makes the code compile even when the generated files are missing.\n\tlocalSchemeBuilder.Register(addKnownTypes)\n}\n\n\/\/ Resource takes an unqualified resource and returns a Group qualified GroupResource\nfunc Resource(resource string) schema.GroupResource {\n\treturn SchemeGroupVersion.WithResource(resource).GroupResource()\n}\n\n\/\/ Adds the list of known types to api.Scheme.\nfunc addKnownTypes(scheme *runtime.Scheme) error {\n\tscheme.AddKnownTypes(SchemeGroupVersion,\n\t\t&Postgres{},\n\t\t&PostgresList{},\n\t\t&Elasticsearch{},\n\t\t&ElasticsearchList{},\n\t\t&Memcached{},\n\t\t&MemcachedList{},\n\t\t&MongoDB{},\n\t\t&MongoDBList{},\n\t\t&MySQL{},\n\t\t&MySQLList{},\n\t\t&Redis{},\n\t\t&RedisList{},\n\t\t&Etcd{},\n\t\t&EtcdList{},\n\t\t&Snapshot{},\n\t\t&SnapshotList{},\n\t\t&DormantDatabase{},\n\t\t&DormantDatabaseList{},\n\t)\n\n\tscheme.AddKnownTypes(SchemeGroupVersion,\n\t\t&metav1.Status{},\n\t)\n\tmetav1.AddToGroupVersion(scheme, SchemeGroupVersion)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package environments\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\tstrftime \"github.com\/jehiah\/go-strftime\"\n\thomedir \"github.com\/mitchellh\/go-homedir\"\n\tyaml \"gopkg.in\/yaml.v2\"\n\n\t\"github.com\/9seconds\/ah\/app\/utils\"\n)\n\nconst (\n\tdefaultAppDirName = \".ah\"\n\tdefaultTracesDirName = \"traces\"\n\tdefaultBookmarksDirName = \"bookmarks\"\n\n\tdefaultConfigFileName = \"config.yaml\"\n\tdefaultZshHistFileName = \".zsh_history\"\n\tdefaultBashHistFileName = \".bash_history\"\n\tdefaultAutoCommandsFileName = \"autocommands.gob\"\n\n\tShellBash = \"bash\"\n\tShellZsh = \"zsh\"\n)\n\nvar (\n\thomeDir string\n\n\tdefaultTmpDir = os.TempDir()\n\n\tCreatedAt = time.Now().Unix()\n)\n\ntype Environment struct {\n\tShell string `yaml:\"shell\"`\n\tHistFile string `yaml:\"histfile\"`\n\tHistTimeFormat string `yaml:\"histtimeformat\"`\n\n\tHomeDir string `yaml:\"homedir\"`\n\tAppDir string `yaml:\"appdir\"`\n\tTmpDir string `yaml:\"tmpdir\"`\n\tTracesDir string `yaml:\"tracesdir\"`\n\tBookmarksDir string `yaml:\"bookmarksdir\"`\n\n\tAutoCommandsFileName string `yaml:\"autocommands\"`\n\tConfigFileName string `yaml:\"config\"`\n}\n\nfunc init() {\n\tcurrentHomeDir, err := homedir.Dir()\n\tif err != nil {\n\t\tos.Stderr.WriteString(fmt.Sprintf(\"Cannot fetch your home directory: %v\", err))\n\t\tos.Exit(1)\n\t}\n\thomeDir = currentHomeDir\n}\n\nfunc (e *Environment) GetTraceFileName(hash string) string {\n\treturn filepath.Join(e.TracesDir, hash)\n}\n\nfunc (e *Environment) GetBookmarkFileName(name string) string {\n\treturn filepath.Join(e.BookmarksDir, name)\n}\n\nfunc (e *Environment) GetHistFileName() (fileName string, err error) {\n\tfileName = e.HistFile\n\n\tif fileName == \"\" {\n\t\tswitch e.Shell {\n\t\tcase ShellBash:\n\t\t\tfileName = filepath.Join(e.HomeDir, defaultBashHistFileName)\n\t\tcase ShellZsh:\n\t\t\tfileName = filepath.Join(e.HomeDir, defaultZshHistFileName)\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"Shell %s is not supported\", e.Shell)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (e *Environment) FormatTimeStamp(timestamp int64) string {\n\treturn e.FormatTime(utils.ConvertTimestamp(timestamp))\n}\n\nfunc (e *Environment) FormatTime(timestamp *time.Time) (formatted string) {\n\tif e.HistTimeFormat != \"\" {\n\t\tformatted = strftime.Format(e.HistTimeFormat, *timestamp)\n\t}\n\n\treturn\n}\n\nfunc (e *Environment) GetTracesFileInfos() ([]os.FileInfo, error) {\n\treturn e.getFileNames(e.TracesDir)\n}\n\nfunc (e *Environment) GetBookmarksFileInfos() ([]os.FileInfo, error) {\n\treturn e.getFileNames(e.BookmarksDir)\n}\n\nfunc (e *Environment) getFileNames(directory string) ([]os.FileInfo, error) {\n\tfiles, err := ioutil.ReadDir(directory)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfileInfos := make([]os.FileInfo, 0, len(files))\n\tfor _, file := range files {\n\t\tif file.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tfileInfos = append(fileInfos, file)\n\t}\n\n\treturn fileInfos, nil\n}\n\nfunc (e *Environment) ReadFromConfig() (configEnv *Environment, err error) {\n\tconfigEnv = new(Environment)\n\n\tcontent, err := ioutil.ReadFile(e.ConfigFileName)\n\tif err == nil {\n\t\terr = yaml.Unmarshal(content, configEnv)\n\t}\n\n\treturn\n}\n\nfunc MakeDefaultEnvironment() (env *Environment) {\n\tenv = new(Environment)\n\n\tenv.Shell = path.Base(os.Getenv(\"SHELL\"))\n\tenv.HistFile = os.Getenv(\"HISTFILE\")\n\tenv.HistTimeFormat = os.Getenv(\"HISTTIMEFORMAT\")\n\n\tenv.HomeDir = homeDir\n\tenv.AppDir = filepath.Join(homeDir, defaultAppDirName)\n\tenv.TracesDir = filepath.Join(env.AppDir, defaultTracesDirName)\n\tenv.BookmarksDir = filepath.Join(env.AppDir, defaultBookmarksDirName)\n\tenv.TmpDir = defaultTmpDir\n\n\tenv.ConfigFileName = filepath.Join(env.AppDir, defaultConfigFileName)\n\tenv.AutoCommandsFileName = filepath.Join(env.AppDir, defaultAutoCommandsFileName)\n\n\treturn\n}\n\nfunc MergeEnvironments(envs ...*Environment) (result *Environment) {\n\tresult = new(Environment)\n\n\tfor _, value := range envs {\n\t\tresult.Shell = value.Shell\n\t\tresult.HistFile = value.HistFile\n\t\tresult.HistTimeFormat = value.HistTimeFormat\n\t\tresult.HomeDir = value.HomeDir\n\t\tresult.AppDir = value.AppDir\n\t\tresult.TracesDir = value.TracesDir\n\t\tresult.BookmarksDir = value.BookmarksDir\n\t\tresult.TmpDir = value.TmpDir\n\t\tresult.ConfigFileName = value.ConfigFileName\n\t\tresult.AutoCommandsFileName = value.AutoCommandsFileName\n\t}\n\n\treturn\n}\n<commit_msg>Make Environment Stringer<commit_after>package environments\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\tstrftime \"github.com\/jehiah\/go-strftime\"\n\thomedir \"github.com\/mitchellh\/go-homedir\"\n\tyaml \"gopkg.in\/yaml.v2\"\n\n\t\"github.com\/9seconds\/ah\/app\/utils\"\n)\n\nconst (\n\tdefaultAppDirName = \".ah\"\n\tdefaultTracesDirName = \"traces\"\n\tdefaultBookmarksDirName = \"bookmarks\"\n\n\tdefaultConfigFileName = \"config.yaml\"\n\tdefaultZshHistFileName = \".zsh_history\"\n\tdefaultBashHistFileName = \".bash_history\"\n\tdefaultAutoCommandsFileName = \"autocommands.gob\"\n\n\tShellBash = \"bash\"\n\tShellZsh = \"zsh\"\n)\n\nvar (\n\thomeDir string\n\n\tdefaultTmpDir = os.TempDir()\n\n\tCreatedAt = time.Now().Unix()\n)\n\ntype Environment struct {\n\tShell string `yaml:\"shell\"`\n\tHistFile string `yaml:\"histfile\"`\n\tHistTimeFormat string `yaml:\"histtimeformat\"`\n\n\tHomeDir string `yaml:\"homedir\"`\n\tAppDir string `yaml:\"appdir\"`\n\tTmpDir string `yaml:\"tmpdir\"`\n\tTracesDir string `yaml:\"tracesdir\"`\n\tBookmarksDir string `yaml:\"bookmarksdir\"`\n\n\tAutoCommandsFileName string `yaml:\"autocommands\"`\n\tConfigFileName string `yaml:\"config\"`\n}\n\nfunc init() {\n\tcurrentHomeDir, err := homedir.Dir()\n\tif err != nil {\n\t\tos.Stderr.WriteString(fmt.Sprintf(\"Cannot fetch your home directory: %v\", err))\n\t\tos.Exit(1)\n\t}\n\thomeDir = currentHomeDir\n}\n\nfunc (e *Environment) GetTraceFileName(hash string) string {\n\treturn filepath.Join(e.TracesDir, hash)\n}\n\nfunc (e *Environment) GetBookmarkFileName(name string) string {\n\treturn filepath.Join(e.BookmarksDir, name)\n}\n\nfunc (e *Environment) GetHistFileName() (fileName string, err error) {\n\tfileName = e.HistFile\n\n\tif fileName == \"\" {\n\t\tswitch e.Shell {\n\t\tcase ShellBash:\n\t\t\tfileName = filepath.Join(e.HomeDir, defaultBashHistFileName)\n\t\tcase ShellZsh:\n\t\t\tfileName = filepath.Join(e.HomeDir, defaultZshHistFileName)\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"Shell %s is not supported\", e.Shell)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (e *Environment) FormatTimeStamp(timestamp int64) string {\n\treturn e.FormatTime(utils.ConvertTimestamp(timestamp))\n}\n\nfunc (e *Environment) FormatTime(timestamp *time.Time) (formatted string) {\n\tif e.HistTimeFormat != \"\" {\n\t\tformatted = strftime.Format(e.HistTimeFormat, *timestamp)\n\t}\n\n\treturn\n}\n\nfunc (e *Environment) GetTracesFileInfos() ([]os.FileInfo, error) {\n\treturn e.getFileNames(e.TracesDir)\n}\n\nfunc (e *Environment) GetBookmarksFileInfos() ([]os.FileInfo, error) {\n\treturn e.getFileNames(e.BookmarksDir)\n}\n\nfunc (e *Environment) getFileNames(directory string) ([]os.FileInfo, error) {\n\tfiles, err := ioutil.ReadDir(directory)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfileInfos := make([]os.FileInfo, 0, len(files))\n\tfor _, file := range files {\n\t\tif file.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tfileInfos = append(fileInfos, file)\n\t}\n\n\treturn fileInfos, nil\n}\n\nfunc (e *Environment) ReadFromConfig() (configEnv *Environment, err error) {\n\tconfigEnv = new(Environment)\n\n\tcontent, err := ioutil.ReadFile(e.ConfigFileName)\n\tif err == nil {\n\t\terr = yaml.Unmarshal(content, configEnv)\n\t}\n\n\treturn\n}\n\nfunc (e *Environment) String() string {\n\treturn fmt.Sprintf(\"<Environment(shell='%s', histFile='%s', histTimeFormat='%s', homeDir='%s', appDir='%s', tracesDir='%s', bookmarksDir='%s', tmpDir='%s', configFileName='%s', autoCommandsFileName='%s')>\",\n\t\te.Shell,\n\t\te.HistFile,\n\t\te.HistTimeFormat,\n\t\te.HomeDir,\n\t\te.AppDir,\n\t\te.TracesDir,\n\t\te.BookmarksDir,\n\t\te.TmpDir,\n\t\te.ConfigFileName,\n\t\te.AutoCommandsFileName)\n}\n\nfunc MakeDefaultEnvironment() (env *Environment) {\n\tenv = new(Environment)\n\n\tenv.Shell = path.Base(os.Getenv(\"SHELL\"))\n\tenv.HistFile = os.Getenv(\"HISTFILE\")\n\tenv.HistTimeFormat = os.Getenv(\"HISTTIMEFORMAT\")\n\n\tenv.HomeDir = homeDir\n\tenv.AppDir = filepath.Join(homeDir, defaultAppDirName)\n\tenv.TracesDir = filepath.Join(env.AppDir, defaultTracesDirName)\n\tenv.BookmarksDir = filepath.Join(env.AppDir, defaultBookmarksDirName)\n\tenv.TmpDir = defaultTmpDir\n\n\tenv.ConfigFileName = filepath.Join(env.AppDir, defaultConfigFileName)\n\tenv.AutoCommandsFileName = filepath.Join(env.AppDir, defaultAutoCommandsFileName)\n\n\treturn\n}\n\nfunc MergeEnvironments(envs ...*Environment) (result *Environment) {\n\tresult = new(Environment)\n\n\tfor _, value := range envs {\n\t\tresult.Shell = value.Shell\n\t\tresult.HistFile = value.HistFile\n\t\tresult.HistTimeFormat = value.HistTimeFormat\n\t\tresult.HomeDir = value.HomeDir\n\t\tresult.AppDir = value.AppDir\n\t\tresult.TracesDir = value.TracesDir\n\t\tresult.BookmarksDir = value.BookmarksDir\n\t\tresult.TmpDir = value.TmpDir\n\t\tresult.ConfigFileName = value.ConfigFileName\n\t\tresult.AutoCommandsFileName = value.AutoCommandsFileName\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package moby\n\n\/\/ We want to replace much of this with use of containerd tools\n\/\/ and also using the Docker API not shelling out\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/containerd\/containerd\/reference\"\n\t\"github.com\/docker\/cli\/cli\/command\"\n\t\"github.com\/docker\/cli\/cli\/config\"\n\t\"github.com\/docker\/cli\/cli\/config\/configfile\"\n\tdistref \"github.com\/docker\/distribution\/reference\"\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/docker\/api\/types\/container\"\n\t\"github.com\/docker\/docker\/api\/types\/filters\"\n\t\"github.com\/docker\/docker\/client\"\n\t\"github.com\/docker\/docker\/registry\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nvar (\n\t\/\/ configFile is a cached copy of the client config file\n\tconfigFile *configfile.ConfigFile\n\t\/\/ authCache maps a registry URL to encoded authentication\n\tauthCache map[string]string\n)\n\nfunc dockerRun(input io.Reader, output io.Writer, trust bool, img string, args ...string) error {\n\tlog.Debugf(\"docker run %s (trust=%t) (input): %s\", img, trust, strings.Join(args, \" \"))\n\tdocker, err := exec.LookPath(\"docker\")\n\tif err != nil {\n\t\treturn errors.New(\"Docker does not seem to be installed\")\n\t}\n\n\tenv := os.Environ()\n\tif trust {\n\t\tenv = append(env, \"DOCKER_CONTENT_TRUST=1\")\n\t}\n\n\t\/\/ Pull first to avoid https:\/\/github.com\/docker\/cli\/issues\/631\n\tpull := exec.Command(docker, \"pull\", img)\n\tpull.Env = env\n\tif err := pull.Run(); err != nil {\n\t\tif exitError, ok := err.(*exec.ExitError); ok {\n\t\t\treturn fmt.Errorf(\"docker pull %s failed: %v output:\\n%s\", img, err, exitError.Stderr)\n\t\t}\n\t\treturn err\n\t}\n\n\targs = append([]string{\"run\", \"--network=none\", \"--rm\", \"-i\", img}, args...)\n\tcmd := exec.Command(docker, args...)\n\tcmd.Stdin = input\n\tcmd.Stdout = output\n\tcmd.Env = env\n\n\tif err := cmd.Run(); err != nil {\n\t\tif exitError, ok := err.(*exec.ExitError); ok {\n\t\t\treturn fmt.Errorf(\"docker run %s failed: %v output:\\n%s\", img, err, exitError.Stderr)\n\t\t}\n\t\treturn err\n\t}\n\n\tlog.Debugf(\"docker run %s (input): %s...Done\", img, strings.Join(args, \" \"))\n\treturn nil\n}\n\nfunc dockerCreate(image string) (string, error) {\n\tlog.Debugf(\"docker create: %s\", image)\n\tcli, err := dockerClient()\n\tif err != nil {\n\t\treturn \"\", errors.New(\"could not initialize Docker API client: \" + err.Error())\n\t}\n\t\/\/ we do not ever run the container, so \/dev\/null is used as command\n\tconfig := &container.Config{\n\t\tCmd: []string{\"\/dev\/null\"},\n\t\tImage: image,\n\t}\n\trespBody, err := cli.ContainerCreate(context.Background(), config, nil, nil, \"\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlog.Debugf(\"docker create: %s...Done\", image)\n\treturn respBody.ID, nil\n}\n\nfunc dockerExport(container string) (io.ReadCloser, error) {\n\tlog.Debugf(\"docker export: %s\", container)\n\tcli, err := dockerClient()\n\tif err != nil {\n\t\treturn nil, errors.New(\"could not initialize Docker API client: \" + err.Error())\n\t}\n\tresponseBody, err := cli.ContainerExport(context.Background(), container)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn responseBody, err\n}\n\nfunc dockerRm(container string) error {\n\tlog.Debugf(\"docker rm: %s\", container)\n\tcli, err := dockerClient()\n\tif err != nil {\n\t\treturn errors.New(\"could not initialize Docker API client: \" + err.Error())\n\t}\n\tif err = cli.ContainerRemove(context.Background(), container, types.ContainerRemoveOptions{}); err != nil {\n\t\treturn err\n\t}\n\tlog.Debugf(\"docker rm: %s...Done\", container)\n\treturn nil\n}\n\nfunc dockerPull(ref *reference.Spec, forcePull, trustedPull bool) error {\n\tlog.Debugf(\"docker pull: %s\", ref)\n\tcli, err := dockerClient()\n\tif err != nil {\n\t\treturn errors.New(\"could not initialize Docker API client: \" + err.Error())\n\t}\n\n\tif trustedPull {\n\t\tlog.Debugf(\"pulling %s with content trust\", ref)\n\t\ttrustedImg, err := TrustedReference(ref.String())\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Trusted pull for %s failed: %v\", ref, err)\n\t\t}\n\n\t\t\/\/ tag the image on a best-effort basis after pulling with content trust,\n\t\t\/\/ ensuring that docker picks up the tag and digest fom the canonical format\n\t\tdefer func(src, dst string) {\n\t\t\tif err := cli.ImageTag(context.Background(), src, dst); err != nil {\n\t\t\t\tlog.Debugf(\"could not tag trusted image %s to %s\", src, dst)\n\t\t\t}\n\t\t}(trustedImg.String(), ref.String())\n\n\t\tlog.Debugf(\"successfully verified trusted reference %s from notary\", trustedImg.String())\n\t\ttrustedSpec, err := reference.Parse(trustedImg.String())\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to convert trusted img %s to Spec: %v\", trustedImg, err)\n\t\t}\n\t\tref.Locator = trustedSpec.Locator\n\t\tref.Object = trustedSpec.Object\n\n\t\timageSearchArg := filters.NewArgs()\n\t\timageSearchArg.Add(\"reference\", trustedImg.String())\n\t\tif _, err := cli.ImageList(context.Background(), types.ImageListOptions{Filters: imageSearchArg}); err == nil && !forcePull {\n\t\t\tlog.Debugf(\"docker pull: trusted image %s already cached...Done\", trustedImg.String())\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tlog.Infof(\"Pull image: %s\", ref)\n\tpullOptions := types.ImagePullOptions{RegistryAuth: getAuth(cli, ref.String())}\n\tr, err := cli.ImagePull(context.Background(), ref.String(), pullOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Close()\n\t_, err = io.Copy(ioutil.Discard, r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debugf(\"docker pull: %s...Done\", ref)\n\treturn nil\n}\n\nfunc dockerClient() (*client.Client, error) {\n\t\/\/ for maximum compatibility as we use nothing new\n\terr := os.Setenv(\"DOCKER_API_VERSION\", \"1.23\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn client.NewEnvClient()\n}\n\nfunc dockerInspectImage(cli *client.Client, ref *reference.Spec, trustedPull bool) (types.ImageInspect, error) {\n\tlog.Debugf(\"docker inspect image: %s\", ref)\n\n\tinspect, _, err := cli.ImageInspectWithRaw(context.Background(), ref.String())\n\tif err != nil {\n\t\tif client.IsErrNotFound(err) {\n\t\t\tpullErr := dockerPull(ref, true, trustedPull)\n\t\t\tif pullErr != nil {\n\t\t\t\treturn types.ImageInspect{}, pullErr\n\t\t\t}\n\t\t\tinspect, _, err = cli.ImageInspectWithRaw(context.Background(), ref.String())\n\t\t\tif err != nil {\n\t\t\t\treturn types.ImageInspect{}, err\n\t\t\t}\n\t\t} else {\n\t\t\treturn types.ImageInspect{}, err\n\t\t}\n\t}\n\n\tlog.Debugf(\"docker inspect image: %s...Done\", ref)\n\n\treturn inspect, nil\n}\n\n\/\/ getAuth attempts to get an encoded authentication spec for a reference\n\/\/ This function does not error. It simply returns a empty string if something fails.\nfunc getAuth(cli *client.Client, refStr string) string {\n\t\/\/ Initialise state on first use\n\tif authCache == nil {\n\t\tauthCache = make(map[string]string)\n\t}\n\tif configFile == nil {\n\t\tconfigFile = config.LoadDefaultConfigFile(ioutil.Discard)\n\t}\n\n\t\/\/ Normalise ref\n\tref, err := distref.ParseNormalizedNamed(refStr)\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to parse reference %s: %v: %v\", refStr, err)\n\t\treturn \"\"\n\t}\n\thostname := distref.Domain(ref)\n\t\/\/ Special case for docker.io which currently is stored as https:\/\/index.docker.io\/v1\/ in authentication map.\n\t\/\/ Other registries are stored under their domain name.\n\tif hostname == registry.IndexName {\n\t\thostname = registry.IndexServer\n\t}\n\n\t\/\/ Look up in cache\n\tif val, ok := authCache[hostname]; ok {\n\t\tlog.Debugf(\"Returning cached credentials for %s\", hostname)\n\t\treturn val\n\t}\n\n\tauthConfig, err := configFile.GetAuthConfig(hostname)\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to get authentication config for %s. Ignoring: %v\", hostname, err)\n\t\treturn \"\"\n\t}\n\tlog.Debugf(\"Looked up credentials for %s\", hostname)\n\n\tencodedAuth, err := command.EncodeAuthToBase64(authConfig)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tauthCache[hostname] = encodedAuth\n\treturn encodedAuth\n}\n<commit_msg>cmd: de-lint moby\/docker.go<commit_after>package moby\n\n\/\/ We want to replace much of this with use of containerd tools\n\/\/ and also using the Docker API not shelling out\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/containerd\/containerd\/reference\"\n\t\"github.com\/docker\/cli\/cli\/command\"\n\t\"github.com\/docker\/cli\/cli\/config\"\n\t\"github.com\/docker\/cli\/cli\/config\/configfile\"\n\tdistref \"github.com\/docker\/distribution\/reference\"\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/docker\/api\/types\/container\"\n\t\"github.com\/docker\/docker\/api\/types\/filters\"\n\t\"github.com\/docker\/docker\/client\"\n\t\"github.com\/docker\/docker\/registry\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nvar (\n\t\/\/ configFile is a cached copy of the client config file\n\tconfigFile *configfile.ConfigFile\n\t\/\/ authCache maps a registry URL to encoded authentication\n\tauthCache map[string]string\n)\n\nfunc dockerRun(input io.Reader, output io.Writer, trust bool, img string, args ...string) error {\n\tlog.Debugf(\"docker run %s (trust=%t) (input): %s\", img, trust, strings.Join(args, \" \"))\n\tdocker, err := exec.LookPath(\"docker\")\n\tif err != nil {\n\t\treturn errors.New(\"Docker does not seem to be installed\")\n\t}\n\n\tenv := os.Environ()\n\tif trust {\n\t\tenv = append(env, \"DOCKER_CONTENT_TRUST=1\")\n\t}\n\n\t\/\/ Pull first to avoid https:\/\/github.com\/docker\/cli\/issues\/631\n\tpull := exec.Command(docker, \"pull\", img)\n\tpull.Env = env\n\tif err := pull.Run(); err != nil {\n\t\tif exitError, ok := err.(*exec.ExitError); ok {\n\t\t\treturn fmt.Errorf(\"docker pull %s failed: %v output:\\n%s\", img, err, exitError.Stderr)\n\t\t}\n\t\treturn err\n\t}\n\n\targs = append([]string{\"run\", \"--network=none\", \"--rm\", \"-i\", img}, args...)\n\tcmd := exec.Command(docker, args...)\n\tcmd.Stdin = input\n\tcmd.Stdout = output\n\tcmd.Env = env\n\n\tif err := cmd.Run(); err != nil {\n\t\tif exitError, ok := err.(*exec.ExitError); ok {\n\t\t\treturn fmt.Errorf(\"docker run %s failed: %v output:\\n%s\", img, err, exitError.Stderr)\n\t\t}\n\t\treturn err\n\t}\n\n\tlog.Debugf(\"docker run %s (input): %s...Done\", img, strings.Join(args, \" \"))\n\treturn nil\n}\n\nfunc dockerCreate(image string) (string, error) {\n\tlog.Debugf(\"docker create: %s\", image)\n\tcli, err := dockerClient()\n\tif err != nil {\n\t\treturn \"\", errors.New(\"could not initialize Docker API client: \" + err.Error())\n\t}\n\t\/\/ we do not ever run the container, so \/dev\/null is used as command\n\tconfig := &container.Config{\n\t\tCmd: []string{\"\/dev\/null\"},\n\t\tImage: image,\n\t}\n\trespBody, err := cli.ContainerCreate(context.Background(), config, nil, nil, \"\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlog.Debugf(\"docker create: %s...Done\", image)\n\treturn respBody.ID, nil\n}\n\nfunc dockerExport(container string) (io.ReadCloser, error) {\n\tlog.Debugf(\"docker export: %s\", container)\n\tcli, err := dockerClient()\n\tif err != nil {\n\t\treturn nil, errors.New(\"could not initialize Docker API client: \" + err.Error())\n\t}\n\tresponseBody, err := cli.ContainerExport(context.Background(), container)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn responseBody, err\n}\n\nfunc dockerRm(container string) error {\n\tlog.Debugf(\"docker rm: %s\", container)\n\tcli, err := dockerClient()\n\tif err != nil {\n\t\treturn errors.New(\"could not initialize Docker API client: \" + err.Error())\n\t}\n\tif err = cli.ContainerRemove(context.Background(), container, types.ContainerRemoveOptions{}); err != nil {\n\t\treturn err\n\t}\n\tlog.Debugf(\"docker rm: %s...Done\", container)\n\treturn nil\n}\n\nfunc dockerPull(ref *reference.Spec, forcePull, trustedPull bool) error {\n\tlog.Debugf(\"docker pull: %s\", ref)\n\tcli, err := dockerClient()\n\tif err != nil {\n\t\treturn errors.New(\"could not initialize Docker API client: \" + err.Error())\n\t}\n\n\tif trustedPull {\n\t\tlog.Debugf(\"pulling %s with content trust\", ref)\n\t\ttrustedImg, err := TrustedReference(ref.String())\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Trusted pull for %s failed: %v\", ref, err)\n\t\t}\n\n\t\t\/\/ tag the image on a best-effort basis after pulling with content trust,\n\t\t\/\/ ensuring that docker picks up the tag and digest fom the canonical format\n\t\tdefer func(src, dst string) {\n\t\t\tif err := cli.ImageTag(context.Background(), src, dst); err != nil {\n\t\t\t\tlog.Debugf(\"could not tag trusted image %s to %s\", src, dst)\n\t\t\t}\n\t\t}(trustedImg.String(), ref.String())\n\n\t\tlog.Debugf(\"successfully verified trusted reference %s from notary\", trustedImg.String())\n\t\ttrustedSpec, err := reference.Parse(trustedImg.String())\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to convert trusted img %s to Spec: %v\", trustedImg, err)\n\t\t}\n\t\tref.Locator = trustedSpec.Locator\n\t\tref.Object = trustedSpec.Object\n\n\t\timageSearchArg := filters.NewArgs()\n\t\timageSearchArg.Add(\"reference\", trustedImg.String())\n\t\tif _, err := cli.ImageList(context.Background(), types.ImageListOptions{Filters: imageSearchArg}); err == nil && !forcePull {\n\t\t\tlog.Debugf(\"docker pull: trusted image %s already cached...Done\", trustedImg.String())\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tlog.Infof(\"Pull image: %s\", ref)\n\tpullOptions := types.ImagePullOptions{RegistryAuth: getAuth(cli, ref.String())}\n\tr, err := cli.ImagePull(context.Background(), ref.String(), pullOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Close()\n\t_, err = io.Copy(ioutil.Discard, r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debugf(\"docker pull: %s...Done\", ref)\n\treturn nil\n}\n\nfunc dockerClient() (*client.Client, error) {\n\t\/\/ for maximum compatibility as we use nothing new\n\terr := os.Setenv(\"DOCKER_API_VERSION\", \"1.23\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn client.NewEnvClient()\n}\n\nfunc dockerInspectImage(cli *client.Client, ref *reference.Spec, trustedPull bool) (types.ImageInspect, error) {\n\tlog.Debugf(\"docker inspect image: %s\", ref)\n\n\tinspect, _, err := cli.ImageInspectWithRaw(context.Background(), ref.String())\n\tif err != nil {\n\t\tif client.IsErrNotFound(err) {\n\t\t\tpullErr := dockerPull(ref, true, trustedPull)\n\t\t\tif pullErr != nil {\n\t\t\t\treturn types.ImageInspect{}, pullErr\n\t\t\t}\n\t\t\tinspect, _, err = cli.ImageInspectWithRaw(context.Background(), ref.String())\n\t\t\tif err != nil {\n\t\t\t\treturn types.ImageInspect{}, err\n\t\t\t}\n\t\t} else {\n\t\t\treturn types.ImageInspect{}, err\n\t\t}\n\t}\n\n\tlog.Debugf(\"docker inspect image: %s...Done\", ref)\n\n\treturn inspect, nil\n}\n\n\/\/ getAuth attempts to get an encoded authentication spec for a reference\n\/\/ This function does not error. It simply returns a empty string if something fails.\nfunc getAuth(cli *client.Client, refStr string) string {\n\t\/\/ Initialise state on first use\n\tif authCache == nil {\n\t\tauthCache = make(map[string]string)\n\t}\n\tif configFile == nil {\n\t\tconfigFile = config.LoadDefaultConfigFile(ioutil.Discard)\n\t}\n\n\t\/\/ Normalise ref\n\tref, err := distref.ParseNormalizedNamed(refStr)\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to parse reference %s: %v\", refStr, err)\n\t\treturn \"\"\n\t}\n\thostname := distref.Domain(ref)\n\t\/\/ Special case for docker.io which currently is stored as https:\/\/index.docker.io\/v1\/ in authentication map.\n\t\/\/ Other registries are stored under their domain name.\n\tif hostname == registry.IndexName {\n\t\thostname = registry.IndexServer\n\t}\n\n\t\/\/ Look up in cache\n\tif val, ok := authCache[hostname]; ok {\n\t\tlog.Debugf(\"Returning cached credentials for %s\", hostname)\n\t\treturn val\n\t}\n\n\tauthConfig, err := configFile.GetAuthConfig(hostname)\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to get authentication config for %s. Ignoring: %v\", hostname, err)\n\t\treturn \"\"\n\t}\n\tlog.Debugf(\"Looked up credentials for %s\", hostname)\n\n\tencodedAuth, err := command.EncodeAuthToBase64(authConfig)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tauthCache[hostname] = encodedAuth\n\treturn encodedAuth\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Serviced Authors.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build integration,root\n\npackage main_test\n\nimport (\n\t\"os\/exec\"\n\t\"testing\"\n\n\t. \"github.com\/control-center\/serviced\/tools\/serviced-storage\"\n\t\"github.com\/control-center\/serviced\/volume\"\n\t. \"gopkg.in\/check.v1\"\n)\n\nconst (\n\tsize int64 = 100 * 1024 * 1024\n)\n\nfunc TestThinpool(t *testing.T) { TestingT(t) }\n\ntype ThinpoolSuite struct {\n\tdevices []*volume.TemporaryDevice\n}\n\nfunc (s *ThinpoolSuite) TempDevice(c *C) *volume.TemporaryDevice {\n\tdev, err := volume.CreateTemporaryDevice(size)\n\tif err != nil {\n\t\tc.Fatal(err)\n\t}\n\ts.devices = append(s.devices, dev)\n\treturn dev\n}\n\nvar _ = Suite(&ThinpoolSuite{})\n\nfunc (s *ThinpoolSuite) TearDownTest(c *C) {\n\tfor _, dev := range s.devices {\n\t\tdev.Destroy()\n\t}\n}\n\nfunc (s *ThinpoolSuite) TestEnsurePhysicalDevices(c *C) {\n\t\/\/ Create a couple devices\n\tdevices := []string{\n\t\ts.TempDevice(c).LoopDevice(),\n\t\ts.TempDevice(c).LoopDevice(),\n\t}\n\n\t\/\/ First create the pvs\n\terr := EnsurePhysicalDevices(devices)\n\tc.Assert(err, IsNil)\n\n\tdefer exec.Command(\"pvremove\", devices...).Run()\n\n\t\/\/ Should be idempotent\n\terr = EnsurePhysicalDevices(devices)\n\tc.Assert(err, IsNil)\n\n\t\/\/ Invalid devices should fail\n\terr = EnsurePhysicalDevices([]string{\"\/not\/a\/device\"})\n\tc.Assert(err, Not(IsNil))\n}\n\nfunc (s *ThinpoolSuite) TestCreateVolumeGroup(c *C) {\n\tvolumeGroup := \"serviced-test\"\n\n\t\/\/ Should fail if devices are invalid\n\terr := CreateVolumeGroup(volumeGroup, []string{\"\/dev\/invalid1\", \"\/dev\/invalid2\"})\n\tc.Assert(err, Not(IsNil))\n\n\t\/\/ Create a couple devices\n\tdevices := []string{\n\t\ts.TempDevice(c).LoopDevice(),\n\t\ts.TempDevice(c).LoopDevice(),\n\t}\n\n\t\/\/ Ensure pvs\n\terr = EnsurePhysicalDevices(devices)\n\tc.Assert(err, IsNil)\n\tdefer exec.Command(\"pvremove\", devices...).Run()\n\n\t\/\/ Should succeed now\n\terr = CreateVolumeGroup(volumeGroup, devices)\n\tc.Assert(err, IsNil)\n\n}\n\nfunc (s *ThinpoolSuite) TestCreateMetadataVolume(c *C) {\n\tvolumeGroup := \"serviced-test\"\n\n\t\/\/ Should fail if the volume group is invalid\n\t_, err := CreateMetadataVolume(volumeGroup)\n\tc.Assert(err, Not(IsNil))\n\n}\n<commit_msg>Clean up vg<commit_after>\/\/ Copyright 2015 The Serviced Authors.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build integration,root\n\npackage main_test\n\nimport (\n\t\"os\/exec\"\n\t\"testing\"\n\n\t. \"github.com\/control-center\/serviced\/tools\/serviced-storage\"\n\t\"github.com\/control-center\/serviced\/volume\"\n\t. \"gopkg.in\/check.v1\"\n)\n\nconst (\n\tsize int64 = 100 * 1024 * 1024\n)\n\nfunc TestThinpool(t *testing.T) { TestingT(t) }\n\ntype ThinpoolSuite struct {\n\tdevices []*volume.TemporaryDevice\n}\n\nfunc (s *ThinpoolSuite) TempDevice(c *C) *volume.TemporaryDevice {\n\tdev, err := volume.CreateTemporaryDevice(size)\n\tif err != nil {\n\t\tc.Fatal(err)\n\t}\n\ts.devices = append(s.devices, dev)\n\treturn dev\n}\n\nvar _ = Suite(&ThinpoolSuite{})\n\nfunc (s *ThinpoolSuite) TearDownTest(c *C) {\n\tfor _, dev := range s.devices {\n\t\tdev.Destroy()\n\t}\n}\n\nfunc (s *ThinpoolSuite) TestEnsurePhysicalDevices(c *C) {\n\t\/\/ Create a couple devices\n\tdevices := []string{\n\t\ts.TempDevice(c).LoopDevice(),\n\t\ts.TempDevice(c).LoopDevice(),\n\t}\n\n\t\/\/ First create the pvs\n\terr := EnsurePhysicalDevices(devices)\n\tc.Assert(err, IsNil)\n\n\tdefer exec.Command(\"pvremove\", devices...).Run()\n\n\t\/\/ Should be idempotent\n\terr = EnsurePhysicalDevices(devices)\n\tc.Assert(err, IsNil)\n\n\t\/\/ Invalid devices should fail\n\terr = EnsurePhysicalDevices([]string{\"\/not\/a\/device\"})\n\tc.Assert(err, Not(IsNil))\n}\n\nfunc (s *ThinpoolSuite) TestCreateVolumeGroup(c *C) {\n\tvolumeGroup := \"serviced-test\"\n\n\t\/\/ Should fail if devices are invalid\n\terr := CreateVolumeGroup(volumeGroup, []string{\"\/dev\/invalid1\", \"\/dev\/invalid2\"})\n\tc.Assert(err, Not(IsNil))\n\n\t\/\/ Create a couple devices\n\tdevices := []string{\n\t\ts.TempDevice(c).LoopDevice(),\n\t\ts.TempDevice(c).LoopDevice(),\n\t}\n\n\t\/\/ Ensure pvs\n\terr = EnsurePhysicalDevices(devices)\n\tc.Assert(err, IsNil)\n\tdefer exec.Command(\"pvremove\", devices...).Run()\n\n\t\/\/ Should succeed now\n\terr = CreateVolumeGroup(volumeGroup, devices)\n\tc.Assert(err, IsNil)\n\n\tdefer exec.Command(\"vgremove\", volumeGroup)\n\n}\n\nfunc (s *ThinpoolSuite) TestCreateMetadataVolume(c *C) {\n\tvolumeGroup := \"serviced-test\"\n\n\t\/\/ Should fail if the volume group is invalid\n\t_, err := CreateMetadataVolume(volumeGroup)\n\tc.Assert(err, Not(IsNil))\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"github.com\/funkygao\/dbus\"\n)\n\nvar (\n\toptions struct {\n\t\tdebug bool\n\t\tdryrun bool\n\n\t\tconfigfile string\n\t\tshowversion bool\n\t\tvisualizeFile string\n\t\tlockfile string\n\n\t\tlogfile string\n\t\tloglevel string\n\t}\n)\n\nconst (\n\tUSAGE = `dbusd - Distributed DataBus\n\nFlags:\n`\n)\n\nfunc parseFlags() {\n\tflag.StringVar(&options.configfile, \"conf\", \"\", \"main config file\")\n\tflag.StringVar(&options.logfile, \"logfile\", \"\", \"master log file path, default stdout\")\n\tflag.StringVar(&options.loglevel, \"loglevel\", \"trace\", \"log level\")\n\tflag.BoolVar(&options.showversion, \"version\", false, \"show version and exit\")\n\tflag.BoolVar(&options.debug, \"debug\", false, \"debug mode\")\n\tflag.BoolVar(&options.dryrun, \"dryrun\", false, \"dry run\")\n\tflag.StringVar(&options.visualizeFile, \"dump\", \"\", \"visualize the pipleline to a png file\")\n\tflag.Usage = func() {\n\t\tfmt.Fprint(os.Stderr, USAGE)\n\t\tflag.PrintDefaults()\n\t}\n\tflag.Parse()\n}\n\nfunc showVersionAndExit() {\n\tfmt.Fprintf(os.Stderr, \"%s %s (build: %s)\\n\", os.Args[0], dbus.Version, dbus.BuildID)\n\tfmt.Fprintf(os.Stderr, \"Built with %s %s for %s\/%s\\n\",\n\t\truntime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)\n\tos.Exit(0)\n}\n<commit_msg>dump depends on graphviz pkg<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"github.com\/funkygao\/dbus\"\n)\n\nvar (\n\toptions struct {\n\t\tdebug bool\n\t\tdryrun bool\n\n\t\tconfigfile string\n\t\tshowversion bool\n\t\tvisualizeFile string\n\t\tlockfile string\n\n\t\tlogfile string\n\t\tloglevel string\n\t}\n)\n\nconst (\n\tUSAGE = `dbusd - Distributed DataBus\n\nFlags:\n`\n)\n\nfunc parseFlags() {\n\tflag.StringVar(&options.configfile, \"conf\", \"\", \"main config file\")\n\tflag.StringVar(&options.logfile, \"logfile\", \"\", \"master log file path, default stdout\")\n\tflag.StringVar(&options.loglevel, \"loglevel\", \"trace\", \"log level\")\n\tflag.BoolVar(&options.showversion, \"version\", false, \"show version and exit\")\n\tflag.BoolVar(&options.debug, \"debug\", false, \"debug mode\")\n\tflag.BoolVar(&options.dryrun, \"dryrun\", false, \"dry run\")\n\tflag.StringVar(&options.visualizeFile, \"dump\", \"\", \"visualize the pipleline to a png file. graphviz must be installed\")\n\tflag.Usage = func() {\n\t\tfmt.Fprint(os.Stderr, USAGE)\n\t\tflag.PrintDefaults()\n\t}\n\tflag.Parse()\n}\n\nfunc showVersionAndExit() {\n\tfmt.Fprintf(os.Stderr, \"%s %s (build: %s)\\n\", os.Args[0], dbus.Version, dbus.BuildID)\n\tfmt.Fprintf(os.Stderr, \"Built with %s %s for %s\/%s\\n\",\n\t\truntime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)\n\tos.Exit(0)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2017 Mikael Berthe <mikael@lilotux.net>\n\/\/\n\/\/ Licensed under the MIT license.\n\/\/ Please see the LICENSE file is this directory.\n\npackage cmd\n\nimport (\n\t\"os\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/McKael\/madon\"\n)\n\nvar domainBlocksOpts struct {\n\tshow, block, unblock bool\n\n\tlimit uint \/\/ Limit the results\n\tsinceID, maxID int64 \/\/ Query boundaries\n\tall bool \/\/ Try to fetch all results\n}\n\n\/\/ timelinesCmd represents the timelines command\nvar domainBlocksCmd = &cobra.Command{\n\tUse: \"domain-blocks --show|--block|--unblock [DOMAINNAME]\",\n\tAliases: []string{\"domain-block\"},\n\tShort: \"Display, add or remove user-blocked domains\",\n\tRunE: domainBlocksRunE,\n\tExample: ` madonctl domain-blocks --show\n madonctl domain-blocks --block example.com\n madonctl domain-blocks --unblock example.com`,\n}\n\nfunc init() {\n\tRootCmd.AddCommand(domainBlocksCmd)\n\n\tdomainBlocksCmd.Flags().BoolVar(&domainBlocksOpts.show, \"show\", false, \"List current user-blocked domains\")\n\tdomainBlocksCmd.Flags().BoolVar(&domainBlocksOpts.block, \"block\", false, \"Block domain\")\n\tdomainBlocksCmd.Flags().BoolVar(&domainBlocksOpts.unblock, \"unblock\", false, \"Unblock domain\")\n\n\tdomainBlocksCmd.Flags().UintVarP(&domainBlocksOpts.limit, \"limit\", \"l\", 0, \"Limit number of results\")\n\tdomainBlocksCmd.Flags().Int64Var(&domainBlocksOpts.sinceID, \"since-id\", 0, \"Request IDs greater than a value\")\n\tdomainBlocksCmd.Flags().Int64Var(&domainBlocksOpts.maxID, \"max-id\", 0, \"Request IDs less (or equal) than a value\")\n\tdomainBlocksCmd.Flags().BoolVar(&domainBlocksOpts.all, \"all\", false, \"Fetch all results\")\n}\n\nfunc domainBlocksRunE(cmd *cobra.Command, args []string) error {\n\topt := domainBlocksOpts\n\tvar domName madon.DomainName\n\n\t\/\/ Check flags\n\tif opt.block && opt.unblock {\n\t\treturn errors.New(\"cannot use both --block and --unblock\")\n\t}\n\n\tif opt.block || opt.unblock {\n\t\tif opt.show {\n\t\t\treturn errors.New(\"cannot use both --[un]block and --show\")\n\t\t}\n\t\tif len(args) != 1 {\n\t\t\treturn errors.New(\"missing domain name\")\n\t\t}\n\t\tdomName = madon.DomainName(args[0])\n\t}\n\n\tif !opt.show && !opt.block && !opt.unblock {\n\t\treturn errors.New(\"missing flag: please provide --show, --block or --unblock\")\n\t}\n\n\t\/\/ Set up LimitParams\n\tvar limOpts *madon.LimitParams\n\tif opt.all || opt.limit > 0 || opt.sinceID > 0 || opt.maxID > 0 {\n\t\tlimOpts = new(madon.LimitParams)\n\t\tlimOpts.All = opt.all\n\t}\n\tif opt.limit > 0 {\n\t\tlimOpts.Limit = int(opt.limit)\n\t}\n\tif opt.maxID > 0 {\n\t\tlimOpts.MaxID = opt.maxID\n\t}\n\tif opt.sinceID > 0 {\n\t\tlimOpts.SinceID = opt.sinceID\n\t}\n\n\t\/\/ Log in\n\tif err := madonInit(true); err != nil {\n\t\treturn err\n\t}\n\n\tvar obj interface{}\n\tvar err error\n\n\tswitch {\n\tcase opt.show:\n\t\tvar domainList []madon.DomainName\n\t\tdomainList, err = gClient.GetDomainBlocks(limOpts)\n\t\tobj = domainList\n\tcase opt.block:\n\t\terr = gClient.DomainBlock(domName)\n\tcase opt.unblock:\n\t\terr = gClient.DomainUnblock(domName)\n\tdefault:\n\t\treturn errors.New(\"domainBlocksCmd: internal error\")\n\t}\n\n\tif err != nil {\n\t\terrPrint(\"Error: %s\", err.Error())\n\t\tos.Exit(1)\n\t}\n\tif obj == nil {\n\t\treturn nil\n\t}\n\n\tp, err := getPrinter()\n\tif err != nil {\n\t\terrPrint(\"Error: %v\", err)\n\t\tos.Exit(1)\n\t}\n\treturn p.printObj(obj)\n}\n<commit_msg>Sync with Madon library update<commit_after>\/\/ Copyright © 2017 Mikael Berthe <mikael@lilotux.net>\n\/\/\n\/\/ Licensed under the MIT license.\n\/\/ Please see the LICENSE file is this directory.\n\npackage cmd\n\nimport (\n\t\"os\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/McKael\/madon\"\n)\n\nvar domainBlocksOpts struct {\n\tshow, block, unblock bool\n\n\tlimit uint \/\/ Limit the results\n\tsinceID, maxID int64 \/\/ Query boundaries\n\tall bool \/\/ Try to fetch all results\n}\n\n\/\/ timelinesCmd represents the timelines command\nvar domainBlocksCmd = &cobra.Command{\n\tUse: \"domain-blocks --show|--block|--unblock [DOMAINNAME]\",\n\tAliases: []string{\"domain-block\"},\n\tShort: \"Display, add or remove user-blocked domains\",\n\tRunE: domainBlocksRunE,\n\tExample: ` madonctl domain-blocks --show\n madonctl domain-blocks --block example.com\n madonctl domain-blocks --unblock example.com`,\n}\n\nfunc init() {\n\tRootCmd.AddCommand(domainBlocksCmd)\n\n\tdomainBlocksCmd.Flags().BoolVar(&domainBlocksOpts.show, \"show\", false, \"List current user-blocked domains\")\n\tdomainBlocksCmd.Flags().BoolVar(&domainBlocksOpts.block, \"block\", false, \"Block domain\")\n\tdomainBlocksCmd.Flags().BoolVar(&domainBlocksOpts.unblock, \"unblock\", false, \"Unblock domain\")\n\n\tdomainBlocksCmd.Flags().UintVarP(&domainBlocksOpts.limit, \"limit\", \"l\", 0, \"Limit number of results\")\n\tdomainBlocksCmd.Flags().Int64Var(&domainBlocksOpts.sinceID, \"since-id\", 0, \"Request IDs greater than a value\")\n\tdomainBlocksCmd.Flags().Int64Var(&domainBlocksOpts.maxID, \"max-id\", 0, \"Request IDs less (or equal) than a value\")\n\tdomainBlocksCmd.Flags().BoolVar(&domainBlocksOpts.all, \"all\", false, \"Fetch all results\")\n}\n\nfunc domainBlocksRunE(cmd *cobra.Command, args []string) error {\n\topt := domainBlocksOpts\n\tvar domName madon.DomainName\n\n\t\/\/ Check flags\n\tif opt.block && opt.unblock {\n\t\treturn errors.New(\"cannot use both --block and --unblock\")\n\t}\n\n\tif opt.block || opt.unblock {\n\t\tif opt.show {\n\t\t\treturn errors.New(\"cannot use both --[un]block and --show\")\n\t\t}\n\t\tif len(args) != 1 {\n\t\t\treturn errors.New(\"missing domain name\")\n\t\t}\n\t\tdomName = madon.DomainName(args[0])\n\t}\n\n\tif !opt.show && !opt.block && !opt.unblock {\n\t\treturn errors.New(\"missing flag: please provide --show, --block or --unblock\")\n\t}\n\n\t\/\/ Set up LimitParams\n\tvar limOpts *madon.LimitParams\n\tif opt.all || opt.limit > 0 || opt.sinceID > 0 || opt.maxID > 0 {\n\t\tlimOpts = new(madon.LimitParams)\n\t\tlimOpts.All = opt.all\n\t}\n\tif opt.limit > 0 {\n\t\tlimOpts.Limit = int(opt.limit)\n\t}\n\tif opt.maxID > 0 {\n\t\tlimOpts.MaxID = opt.maxID\n\t}\n\tif opt.sinceID > 0 {\n\t\tlimOpts.SinceID = opt.sinceID\n\t}\n\n\t\/\/ Log in\n\tif err := madonInit(true); err != nil {\n\t\treturn err\n\t}\n\n\tvar obj interface{}\n\tvar err error\n\n\tswitch {\n\tcase opt.show:\n\t\tvar domainList []madon.DomainName\n\t\tdomainList, err = gClient.GetBlockedDomains(limOpts)\n\t\tobj = domainList\n\tcase opt.block:\n\t\terr = gClient.BlockDomain(domName)\n\tcase opt.unblock:\n\t\terr = gClient.UnblockDomain(domName)\n\tdefault:\n\t\treturn errors.New(\"domainBlocksCmd: internal error\")\n\t}\n\n\tif err != nil {\n\t\terrPrint(\"Error: %s\", err.Error())\n\t\tos.Exit(1)\n\t}\n\tif obj == nil {\n\t\treturn nil\n\t}\n\n\tp, err := getPrinter()\n\tif err != nil {\n\t\terrPrint(\"Error: %v\", err)\n\t\tos.Exit(1)\n\t}\n\treturn p.printObj(obj)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.net\/websocket\"\n\t\"github.com\/GeertJohan\/go.rice\"\n\t\"github.com\/bmizerany\/pat\"\n\n\t\"github.com\/drone\/drone\/pkg\/build\/docker\"\n\t\"github.com\/drone\/drone\/pkg\/channel\"\n\t\"github.com\/drone\/drone\/pkg\/database\"\n\t\"github.com\/drone\/drone\/pkg\/handler\"\n\t\"github.com\/drone\/drone\/pkg\/queue\"\n)\n\nvar (\n\t\/\/ local path where the SQLite database\n\t\/\/ should be stored. By default this is\n\t\/\/ in the current working directory.\n\tpath string\n\n\t\/\/ port the server will run on\n\tport string\n\n\t\/\/ database driver used to connect to the database\n\tdriver string\n\n\t\/\/ driver specific connection information. In this\n\t\/\/ case, it should be the location of the SQLite file\n\tdatasource string\n\n\t\/\/ optional flags for tls listener\n\tsslcert string\n\tsslkey string\n\n\t\/\/ build will timeout after N milliseconds.\n\t\/\/ this will default to 500 minutes (6 hours)\n\ttimeout time.Duration\n\n\t\/\/ commit sha for the current build.\n\tversion string\n)\n\nfunc main() {\n\t\/\/ parse command line flags\n\tflag.StringVar(&path, \"path\", \"\", \"\")\n\tflag.StringVar(&port, \"port\", \":8080\", \"\")\n\tflag.StringVar(&driver, \"driver\", \"sqlite3\", \"\")\n\tflag.StringVar(&datasource, \"datasource\", \"drone.sqlite\", \"\")\n\tflag.StringVar(&sslcert, \"sslcert\", \"\", \"\")\n\tflag.StringVar(&sslkey, \"sslkey\", \"\", \"\")\n\tflag.DurationVar(&timeout, \"timeout\", 300*time.Minute, \"\")\n\tflag.Parse()\n\n\t\/\/ validate the TLS arguments\n\tcheckTLSFlags()\n\n\t\/\/ setup database and handlers\n\tif err := database.Init(driver, datasource); err != nil {\n\t\tlog.Fatal(\"Can't initialize database: \", err)\n\t}\n\tsetupStatic()\n\tsetupHandlers()\n\n\t\/\/ debug\n\tlog.Printf(\"starting drone version %s on port %s\\n\", version, port)\n\n\t\/\/ start webserver using HTTPS or HTTP\n\tif sslcert != \"\" && sslkey != \"\" {\n\t\tpanic(http.ListenAndServeTLS(port, sslcert, sslkey, nil))\n\t} else {\n\t\tpanic(http.ListenAndServe(port, nil))\n\t}\n}\n\n\/\/ checking if the TLS flags where supplied correctly.\nfunc checkTLSFlags() {\n\n\tif sslcert != \"\" && sslkey == \"\" {\n\t\tlog.Fatal(\"invalid configuration: -sslkey unspecified, but -sslcert was specified.\")\n\t} else if sslcert == \"\" && sslkey != \"\" {\n\t\tlog.Fatal(\"invalid configuration: -sslcert unspecified, but -sslkey was specified.\")\n\t}\n\n}\n\n\/\/ setup routes for static assets. These assets may\n\/\/ be directly embedded inside the application using\n\/\/ the `rice embed` command, else they are served from disk.\nfunc setupStatic() {\n\tbox := rice.MustFindBox(\"assets\")\n\thttp.Handle(\"\/css\/\", http.FileServer(box.HTTPBox()))\n\thttp.Handle(\"\/js\/\", http.FileServer(box.HTTPBox()))\n\n\t\/\/ we need to intercept all attempts to serve images\n\t\/\/ so that we can add a cache-control settings\n\tvar images = http.FileServer(box.HTTPBox())\n\thttp.HandleFunc(\"\/img\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tif strings.HasPrefix(r.URL.Path, \"\/img\/build_\") {\n\t\t\tw.Header().Add(\"Cache-Control\", \"no-cache\")\n\t\t}\n\n\t\t\/\/ serce images\n\t\timages.ServeHTTP(w, r)\n\t})\n}\n\n\/\/ setup routes for serving dynamic content.\nfunc setupHandlers() {\n\tqueueRunner := queue.NewBuildRunner(docker.New(), timeout)\n\tqueue := queue.Start(runtime.NumCPU(), queueRunner)\n\n\thookHandler := handler.NewHookHandler(queue)\n\n\tm := pat.New()\n\tm.Get(\"\/login\", handler.ErrorHandler(handler.Login))\n\tm.Post(\"\/login\", handler.ErrorHandler(handler.Authorize))\n\tm.Get(\"\/logout\", handler.ErrorHandler(handler.Logout))\n\tm.Get(\"\/forgot\", handler.ErrorHandler(handler.Forgot))\n\tm.Post(\"\/forgot\", handler.ErrorHandler(handler.ForgotPost))\n\tm.Get(\"\/reset\", handler.ErrorHandler(handler.Reset))\n\tm.Post(\"\/reset\", handler.ErrorHandler(handler.ResetPost))\n\tm.Get(\"\/signup\", handler.ErrorHandler(handler.SignUp))\n\tm.Post(\"\/signup\", handler.ErrorHandler(handler.SignUpPost))\n\tm.Get(\"\/register\", handler.ErrorHandler(handler.Register))\n\tm.Post(\"\/register\", handler.ErrorHandler(handler.RegisterPost))\n\tm.Get(\"\/accept\", handler.UserHandler(handler.TeamMemberAccept))\n\n\t\/\/ handlers for setting up your GitHub repository\n\tm.Post(\"\/new\/github.com\", handler.UserHandler(handler.RepoCreateGithub))\n\tm.Get(\"\/new\/github.com\", handler.UserHandler(handler.RepoAdd))\n\n\t\/\/ handlers for linking your GitHub account\n\tm.Get(\"\/auth\/login\/github\", handler.UserHandler(handler.LinkGithub))\n\n\t\/\/ handlers for dashboard pages\n\tm.Get(\"\/dashboard\/team\/:team\", handler.UserHandler(handler.TeamShow))\n\tm.Get(\"\/dashboard\", handler.UserHandler(handler.UserShow))\n\n\t\/\/ handlers for user account management\n\tm.Get(\"\/account\/user\/profile\", handler.UserHandler(handler.UserEdit))\n\tm.Post(\"\/account\/user\/profile\", handler.UserHandler(handler.UserUpdate))\n\tm.Get(\"\/account\/user\/delete\", handler.UserHandler(handler.UserDeleteConfirm))\n\tm.Post(\"\/account\/user\/delete\", handler.UserHandler(handler.UserDelete))\n\tm.Get(\"\/account\/user\/password\", handler.UserHandler(handler.UserPass))\n\tm.Post(\"\/account\/user\/password\", handler.UserHandler(handler.UserPassUpdate))\n\tm.Get(\"\/account\/user\/teams\/add\", handler.UserHandler(handler.TeamAdd))\n\tm.Post(\"\/account\/user\/teams\/add\", handler.UserHandler(handler.TeamCreate))\n\tm.Get(\"\/account\/user\/teams\", handler.UserHandler(handler.UserTeams))\n\n\t\/\/ handlers for team managements\n\tm.Get(\"\/account\/team\/:team\/profile\", handler.UserHandler(handler.TeamEdit))\n\tm.Post(\"\/account\/team\/:team\/profile\", handler.UserHandler(handler.TeamUpdate))\n\tm.Get(\"\/account\/team\/:team\/delete\", handler.UserHandler(handler.TeamDeleteConfirm))\n\tm.Post(\"\/account\/team\/:team\/delete\", handler.UserHandler(handler.TeamDelete))\n\tm.Get(\"\/account\/team\/:team\/members\/add\", handler.UserHandler(handler.TeamMemberAdd))\n\tm.Post(\"\/account\/team\/:team\/members\/add\", handler.UserHandler(handler.TeamMemberInvite))\n\tm.Get(\"\/account\/team\/:team\/members\/edit\", handler.UserHandler(handler.TeamMemberEdit))\n\tm.Post(\"\/account\/team\/:team\/members\/edit\", handler.UserHandler(handler.TeamMemberUpdate))\n\tm.Post(\"\/account\/team\/:team\/members\/delete\", handler.UserHandler(handler.TeamMemberDelete))\n\tm.Get(\"\/account\/team\/:team\/members\", handler.UserHandler(handler.TeamMembers))\n\n\t\/\/ handlers for system administration\n\tm.Get(\"\/account\/admin\/settings\", handler.AdminHandler(handler.AdminSettings))\n\tm.Post(\"\/account\/admin\/settings\", handler.AdminHandler(handler.AdminSettingsUpdate))\n\tm.Get(\"\/account\/admin\/users\/edit\", handler.AdminHandler(handler.AdminUserEdit))\n\tm.Post(\"\/account\/admin\/users\/edit\", handler.AdminHandler(handler.AdminUserUpdate))\n\tm.Post(\"\/account\/admin\/users\/delete\", handler.AdminHandler(handler.AdminUserDelete))\n\tm.Get(\"\/account\/admin\/users\/add\", handler.AdminHandler(handler.AdminUserAdd))\n\tm.Post(\"\/account\/admin\/users\", handler.AdminHandler(handler.AdminUserInvite))\n\tm.Get(\"\/account\/admin\/users\", handler.AdminHandler(handler.AdminUserList))\n\n\t\/\/ handlers for GitHub post-commit hooks\n\tm.Post(\"\/hook\/github.com\", handler.ErrorHandler(hookHandler.Hook))\n\n\t\/\/ handlers for first-time installation\n\tm.Get(\"\/install\", handler.ErrorHandler(handler.Install))\n\tm.Post(\"\/install\", handler.ErrorHandler(handler.InstallPost))\n\n\t\/\/ handlers for repository, commits and build details\n\tm.Get(\"\/:host\/:owner\/:name\/commit\/:commit\/build\/:label\/out.txt\", handler.RepoHandler(handler.BuildOut))\n\tm.Get(\"\/:host\/:owner\/:name\/commit\/:commit\/build\/:label\", handler.RepoHandler(handler.CommitShow))\n\tm.Get(\"\/:host\/:owner\/:name\/commit\/:commit\", handler.RepoHandler(handler.CommitShow))\n\tm.Get(\"\/:host\/:owner\/:name\/tree\", handler.RepoHandler(handler.RepoDashboard))\n\tm.Get(\"\/:host\/:owner\/:name\/status.png\", handler.ErrorHandler(handler.Badge))\n\tm.Get(\"\/:host\/:owner\/:name\/settings\", handler.RepoAdminHandler(handler.RepoSettingsForm))\n\tm.Get(\"\/:host\/:owner\/:name\/params\", handler.RepoAdminHandler(handler.RepoParamsForm))\n\tm.Get(\"\/:host\/:owner\/:name\/badges\", handler.RepoAdminHandler(handler.RepoBadges))\n\tm.Get(\"\/:host\/:owner\/:name\/keys\", handler.RepoAdminHandler(handler.RepoKeys))\n\tm.Get(\"\/:host\/:owner\/:name\/delete\", handler.RepoAdminHandler(handler.RepoDeleteForm))\n\tm.Post(\"\/:host\/:owner\/:name\/delete\", handler.RepoAdminHandler(handler.RepoDelete))\n\tm.Get(\"\/:host\/:owner\/:name\", handler.RepoHandler(handler.RepoDashboard))\n\tm.Post(\"\/:host\/:owner\/:name\", handler.RepoHandler(handler.RepoUpdate))\n\thttp.Handle(\"\/feed\", websocket.Handler(channel.Read))\n\n\t\/\/ no routes are served at the root URL. Instead we will\n\t\/\/ redirect the user to his\/her dashboard page.\n\tm.Get(\"\/\", http.RedirectHandler(\"\/dashboard\", http.StatusSeeOther))\n\n\t\/\/ the first time a page is requested we should record\n\t\/\/ the scheme and hostname.\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\/\/ our multiplexer is a bit finnicky and therefore requires\n\t\t\/\/ us to strip any trailing slashes in order to correctly\n\t\t\/\/ find and match a route.\n\t\tif r.URL.Path != \"\/\" && strings.HasSuffix(r.URL.Path, \"\/\") {\n\t\t\thttp.Redirect(w, r, r.URL.Path[:len(r.URL.Path)-1], http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ standard header variables that should be set, for good measure.\n\t\tw.Header().Add(\"Cache-Control\", \"no-cache, no-store, max-age=0, must-revalidate\")\n\t\tw.Header().Add(\"X-Frame-Options\", \"DENY\")\n\t\tw.Header().Add(\"X-Content-Type-Options\", \"nosniff\")\n\t\tw.Header().Add(\"X-XSS-Protection\", \"1; mode=block\")\n\n\t\t\/\/ ok, now we're ready to serve the request.\n\t\tm.ServeHTTP(w, r)\n\t})\n}\n<commit_msg>Remove unused -path<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.net\/websocket\"\n\t\"github.com\/GeertJohan\/go.rice\"\n\t\"github.com\/bmizerany\/pat\"\n\n\t\"github.com\/drone\/drone\/pkg\/build\/docker\"\n\t\"github.com\/drone\/drone\/pkg\/channel\"\n\t\"github.com\/drone\/drone\/pkg\/database\"\n\t\"github.com\/drone\/drone\/pkg\/handler\"\n\t\"github.com\/drone\/drone\/pkg\/queue\"\n)\n\nvar (\n\t\/\/ port the server will run on\n\tport string\n\n\t\/\/ database driver used to connect to the database\n\tdriver string\n\n\t\/\/ driver specific connection information. In this\n\t\/\/ case, it should be the location of the SQLite file\n\tdatasource string\n\n\t\/\/ optional flags for tls listener\n\tsslcert string\n\tsslkey string\n\n\t\/\/ build will timeout after N milliseconds.\n\t\/\/ this will default to 500 minutes (6 hours)\n\ttimeout time.Duration\n\n\t\/\/ commit sha for the current build.\n\tversion string\n)\n\nfunc main() {\n\t\/\/ parse command line flags\n\tflag.StringVar(&port, \"port\", \":8080\", \"\")\n\tflag.StringVar(&driver, \"driver\", \"sqlite3\", \"\")\n\tflag.StringVar(&datasource, \"datasource\", \"drone.sqlite\", \"\")\n\tflag.StringVar(&sslcert, \"sslcert\", \"\", \"\")\n\tflag.StringVar(&sslkey, \"sslkey\", \"\", \"\")\n\tflag.DurationVar(&timeout, \"timeout\", 300*time.Minute, \"\")\n\tflag.Parse()\n\n\t\/\/ validate the TLS arguments\n\tcheckTLSFlags()\n\n\t\/\/ setup database and handlers\n\tif err := database.Init(driver, datasource); err != nil {\n\t\tlog.Fatal(\"Can't initialize database: \", err)\n\t}\n\tsetupStatic()\n\tsetupHandlers()\n\n\t\/\/ debug\n\tlog.Printf(\"starting drone version %s on port %s\\n\", version, port)\n\n\t\/\/ start webserver using HTTPS or HTTP\n\tif sslcert != \"\" && sslkey != \"\" {\n\t\tpanic(http.ListenAndServeTLS(port, sslcert, sslkey, nil))\n\t} else {\n\t\tpanic(http.ListenAndServe(port, nil))\n\t}\n}\n\n\/\/ checking if the TLS flags where supplied correctly.\nfunc checkTLSFlags() {\n\n\tif sslcert != \"\" && sslkey == \"\" {\n\t\tlog.Fatal(\"invalid configuration: -sslkey unspecified, but -sslcert was specified.\")\n\t} else if sslcert == \"\" && sslkey != \"\" {\n\t\tlog.Fatal(\"invalid configuration: -sslcert unspecified, but -sslkey was specified.\")\n\t}\n\n}\n\n\/\/ setup routes for static assets. These assets may\n\/\/ be directly embedded inside the application using\n\/\/ the `rice embed` command, else they are served from disk.\nfunc setupStatic() {\n\tbox := rice.MustFindBox(\"assets\")\n\thttp.Handle(\"\/css\/\", http.FileServer(box.HTTPBox()))\n\thttp.Handle(\"\/js\/\", http.FileServer(box.HTTPBox()))\n\n\t\/\/ we need to intercept all attempts to serve images\n\t\/\/ so that we can add a cache-control settings\n\tvar images = http.FileServer(box.HTTPBox())\n\thttp.HandleFunc(\"\/img\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tif strings.HasPrefix(r.URL.Path, \"\/img\/build_\") {\n\t\t\tw.Header().Add(\"Cache-Control\", \"no-cache\")\n\t\t}\n\n\t\t\/\/ serce images\n\t\timages.ServeHTTP(w, r)\n\t})\n}\n\n\/\/ setup routes for serving dynamic content.\nfunc setupHandlers() {\n\tqueueRunner := queue.NewBuildRunner(docker.New(), timeout)\n\tqueue := queue.Start(runtime.NumCPU(), queueRunner)\n\n\thookHandler := handler.NewHookHandler(queue)\n\n\tm := pat.New()\n\tm.Get(\"\/login\", handler.ErrorHandler(handler.Login))\n\tm.Post(\"\/login\", handler.ErrorHandler(handler.Authorize))\n\tm.Get(\"\/logout\", handler.ErrorHandler(handler.Logout))\n\tm.Get(\"\/forgot\", handler.ErrorHandler(handler.Forgot))\n\tm.Post(\"\/forgot\", handler.ErrorHandler(handler.ForgotPost))\n\tm.Get(\"\/reset\", handler.ErrorHandler(handler.Reset))\n\tm.Post(\"\/reset\", handler.ErrorHandler(handler.ResetPost))\n\tm.Get(\"\/signup\", handler.ErrorHandler(handler.SignUp))\n\tm.Post(\"\/signup\", handler.ErrorHandler(handler.SignUpPost))\n\tm.Get(\"\/register\", handler.ErrorHandler(handler.Register))\n\tm.Post(\"\/register\", handler.ErrorHandler(handler.RegisterPost))\n\tm.Get(\"\/accept\", handler.UserHandler(handler.TeamMemberAccept))\n\n\t\/\/ handlers for setting up your GitHub repository\n\tm.Post(\"\/new\/github.com\", handler.UserHandler(handler.RepoCreateGithub))\n\tm.Get(\"\/new\/github.com\", handler.UserHandler(handler.RepoAdd))\n\n\t\/\/ handlers for linking your GitHub account\n\tm.Get(\"\/auth\/login\/github\", handler.UserHandler(handler.LinkGithub))\n\n\t\/\/ handlers for dashboard pages\n\tm.Get(\"\/dashboard\/team\/:team\", handler.UserHandler(handler.TeamShow))\n\tm.Get(\"\/dashboard\", handler.UserHandler(handler.UserShow))\n\n\t\/\/ handlers for user account management\n\tm.Get(\"\/account\/user\/profile\", handler.UserHandler(handler.UserEdit))\n\tm.Post(\"\/account\/user\/profile\", handler.UserHandler(handler.UserUpdate))\n\tm.Get(\"\/account\/user\/delete\", handler.UserHandler(handler.UserDeleteConfirm))\n\tm.Post(\"\/account\/user\/delete\", handler.UserHandler(handler.UserDelete))\n\tm.Get(\"\/account\/user\/password\", handler.UserHandler(handler.UserPass))\n\tm.Post(\"\/account\/user\/password\", handler.UserHandler(handler.UserPassUpdate))\n\tm.Get(\"\/account\/user\/teams\/add\", handler.UserHandler(handler.TeamAdd))\n\tm.Post(\"\/account\/user\/teams\/add\", handler.UserHandler(handler.TeamCreate))\n\tm.Get(\"\/account\/user\/teams\", handler.UserHandler(handler.UserTeams))\n\n\t\/\/ handlers for team managements\n\tm.Get(\"\/account\/team\/:team\/profile\", handler.UserHandler(handler.TeamEdit))\n\tm.Post(\"\/account\/team\/:team\/profile\", handler.UserHandler(handler.TeamUpdate))\n\tm.Get(\"\/account\/team\/:team\/delete\", handler.UserHandler(handler.TeamDeleteConfirm))\n\tm.Post(\"\/account\/team\/:team\/delete\", handler.UserHandler(handler.TeamDelete))\n\tm.Get(\"\/account\/team\/:team\/members\/add\", handler.UserHandler(handler.TeamMemberAdd))\n\tm.Post(\"\/account\/team\/:team\/members\/add\", handler.UserHandler(handler.TeamMemberInvite))\n\tm.Get(\"\/account\/team\/:team\/members\/edit\", handler.UserHandler(handler.TeamMemberEdit))\n\tm.Post(\"\/account\/team\/:team\/members\/edit\", handler.UserHandler(handler.TeamMemberUpdate))\n\tm.Post(\"\/account\/team\/:team\/members\/delete\", handler.UserHandler(handler.TeamMemberDelete))\n\tm.Get(\"\/account\/team\/:team\/members\", handler.UserHandler(handler.TeamMembers))\n\n\t\/\/ handlers for system administration\n\tm.Get(\"\/account\/admin\/settings\", handler.AdminHandler(handler.AdminSettings))\n\tm.Post(\"\/account\/admin\/settings\", handler.AdminHandler(handler.AdminSettingsUpdate))\n\tm.Get(\"\/account\/admin\/users\/edit\", handler.AdminHandler(handler.AdminUserEdit))\n\tm.Post(\"\/account\/admin\/users\/edit\", handler.AdminHandler(handler.AdminUserUpdate))\n\tm.Post(\"\/account\/admin\/users\/delete\", handler.AdminHandler(handler.AdminUserDelete))\n\tm.Get(\"\/account\/admin\/users\/add\", handler.AdminHandler(handler.AdminUserAdd))\n\tm.Post(\"\/account\/admin\/users\", handler.AdminHandler(handler.AdminUserInvite))\n\tm.Get(\"\/account\/admin\/users\", handler.AdminHandler(handler.AdminUserList))\n\n\t\/\/ handlers for GitHub post-commit hooks\n\tm.Post(\"\/hook\/github.com\", handler.ErrorHandler(hookHandler.Hook))\n\n\t\/\/ handlers for first-time installation\n\tm.Get(\"\/install\", handler.ErrorHandler(handler.Install))\n\tm.Post(\"\/install\", handler.ErrorHandler(handler.InstallPost))\n\n\t\/\/ handlers for repository, commits and build details\n\tm.Get(\"\/:host\/:owner\/:name\/commit\/:commit\/build\/:label\/out.txt\", handler.RepoHandler(handler.BuildOut))\n\tm.Get(\"\/:host\/:owner\/:name\/commit\/:commit\/build\/:label\", handler.RepoHandler(handler.CommitShow))\n\tm.Get(\"\/:host\/:owner\/:name\/commit\/:commit\", handler.RepoHandler(handler.CommitShow))\n\tm.Get(\"\/:host\/:owner\/:name\/tree\", handler.RepoHandler(handler.RepoDashboard))\n\tm.Get(\"\/:host\/:owner\/:name\/status.png\", handler.ErrorHandler(handler.Badge))\n\tm.Get(\"\/:host\/:owner\/:name\/settings\", handler.RepoAdminHandler(handler.RepoSettingsForm))\n\tm.Get(\"\/:host\/:owner\/:name\/params\", handler.RepoAdminHandler(handler.RepoParamsForm))\n\tm.Get(\"\/:host\/:owner\/:name\/badges\", handler.RepoAdminHandler(handler.RepoBadges))\n\tm.Get(\"\/:host\/:owner\/:name\/keys\", handler.RepoAdminHandler(handler.RepoKeys))\n\tm.Get(\"\/:host\/:owner\/:name\/delete\", handler.RepoAdminHandler(handler.RepoDeleteForm))\n\tm.Post(\"\/:host\/:owner\/:name\/delete\", handler.RepoAdminHandler(handler.RepoDelete))\n\tm.Get(\"\/:host\/:owner\/:name\", handler.RepoHandler(handler.RepoDashboard))\n\tm.Post(\"\/:host\/:owner\/:name\", handler.RepoHandler(handler.RepoUpdate))\n\thttp.Handle(\"\/feed\", websocket.Handler(channel.Read))\n\n\t\/\/ no routes are served at the root URL. Instead we will\n\t\/\/ redirect the user to his\/her dashboard page.\n\tm.Get(\"\/\", http.RedirectHandler(\"\/dashboard\", http.StatusSeeOther))\n\n\t\/\/ the first time a page is requested we should record\n\t\/\/ the scheme and hostname.\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\/\/ our multiplexer is a bit finnicky and therefore requires\n\t\t\/\/ us to strip any trailing slashes in order to correctly\n\t\t\/\/ find and match a route.\n\t\tif r.URL.Path != \"\/\" && strings.HasSuffix(r.URL.Path, \"\/\") {\n\t\t\thttp.Redirect(w, r, r.URL.Path[:len(r.URL.Path)-1], http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ standard header variables that should be set, for good measure.\n\t\tw.Header().Add(\"Cache-Control\", \"no-cache, no-store, max-age=0, must-revalidate\")\n\t\tw.Header().Add(\"X-Frame-Options\", \"DENY\")\n\t\tw.Header().Add(\"X-Content-Type-Options\", \"nosniff\")\n\t\tw.Header().Add(\"X-XSS-Protection\", \"1; mode=block\")\n\n\t\t\/\/ ok, now we're ready to serve the request.\n\t\tm.ServeHTTP(w, r)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ Version is inserted at build using --ldflags -X\nvar Version = \"(unknown version)\"\n\nconst socketName = \"\/var\/run\/edgectl.socket\"\nconst logfile = \"\/tmp\/edgectl.log\"\nconst apiVersion = 1\n\nvar failedToConnect = `Failed to connect to the daemon. Is it still running?\nThe daemon's log output in ` + logfile + ` may have more information.\nStart the daemon using \"sudo edgectl daemon\" if it is not running.\n`\nvar displayVersion = fmt.Sprintf(\"v%s (api v%d)\", Version, apiVersion)\nvar edgectl string\n\nfunc main() {\n\t\/\/ Figure out our executable and save it\n\tfunc() {\n\t\tvar err error\n\t\tedgectl, err = os.Executable()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Internal error: %v\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}()\n\n\t\/\/ Notice early if we're impersonating Teleproxy\n\tif len(os.Args) == 3 && os.Args[1] == \"teleproxy\" {\n\t\tfunc() {\n\t\t\tvar err error\n\t\t\tswitch os.Args[2] {\n\t\t\tcase \"intercept\":\n\t\t\t\terr = RunAsTeleproxyIntercept()\n\t\t\tcase \"bridge\":\n\t\t\t\terr = RunAsTeleproxyBridge()\n\t\t\tdefault:\n\t\t\t\treturn \/\/ Allow normal CLI error handling to proceed\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"error: %v\", err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tos.Exit(0)\n\t\t}()\n\t}\n\n\t\/\/ Let the daemon take care of everything if possible\n\tfailedToConnectErr := mainViaDaemon()\n\n\t\/\/ Couldn't reach the daemon. Try to handle things locally.\n\t\/\/ ...\n\n\trootCmd := &cobra.Command{\n\t\tUse: \"edgectl\",\n\t\tShort: \"Edge Control (daemon unavailable)\",\n\t\tSilenceUsage: true, \/\/ https:\/\/github.com\/spf13\/cobra\/issues\/340\n\t\tArgs: cobra.ArbitraryArgs,\n\t\tRunE: func(_ *cobra.Command, _ []string) error {\n\t\t\tfmt.Println(failedToConnect)\n\t\t\treturn failedToConnectErr\n\t\t},\n\t}\n\n\trootCmd.AddCommand(&cobra.Command{\n\t\tUse: \"version\",\n\t\tShort: \"Show program's version number and exit\",\n\t\tArgs: cobra.ExactArgs(0),\n\t\tRunE: func(_ *cobra.Command, _ []string) error {\n\t\t\tfmt.Println(failedToConnect)\n\t\t\tfmt.Println(\"Client\", displayVersion)\n\t\t\tfmt.Println()\n\t\t\treturn failedToConnectErr\n\t\t},\n\t})\n\trootCmd.AddCommand(&cobra.Command{\n\t\tUse: \"daemon-foreground\",\n\t\tShort: \"Launch Edge Control Daemon in the foreground (debug)\",\n\t\tArgs: cobra.ExactArgs(0),\n\t\tHidden: true,\n\t\tRunE: func(_ *cobra.Command, _ []string) error {\n\t\t\treturn RunAsDaemon()\n\t\t},\n\t})\n\trootCmd.AddCommand(&cobra.Command{\n\t\tUse: \"daemon\",\n\t\tShort: \"Launch Edge Control Daemon in the background (sudo)\",\n\t\tArgs: cobra.ExactArgs(0),\n\t\tRunE: launchDaemon,\n\t})\n\trootCmd.SetHelpTemplate(rootCmd.HelpTemplate() + \"\\n\" + failedToConnect)\n\n\terr := rootCmd.Execute()\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc launchDaemon(ccmd *cobra.Command, _ []string) error {\n\tif os.Geteuid() != 0 {\n\t\tfmt.Println(\"Edge Control Daemon must be launched as root.\")\n\t\tfmt.Printf(\"\\n sudo %s\\n\\n\", ccmd.CommandPath())\n\t\treturn errors.New(\"root privileges required\")\n\t}\n\tfmt.Println(\"Launching Edge Control Daemon\", displayVersion)\n\n\tcmd := exec.Command(edgectl, \"daemon-foreground\")\n\tcmd.Env = os.Environ()\n\tcmd.Stdin = nil\n\tcmd.Stdout = nil\n\tcmd.Stderr = nil\n\tcmd.ExtraFiles = nil\n\tcmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true}\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to launch the server\")\n\t}\n\n\tsuccess := false\n\tfor count := 0; count < 40; count++ {\n\t\tif isServerRunning() {\n\t\t\tsuccess = true\n\t\t\tbreak\n\t\t}\n\t\tif count == 4 {\n\t\t\tfmt.Println(\"Waiting for daemon to start...\")\n\t\t}\n\t\ttime.Sleep(250 * time.Millisecond)\n\t}\n\tif !success {\n\t\tfmt.Println(\"Server did not come up!\")\n\t\tfmt.Printf(\"Take a look at %s for more information.\\n\", logfile)\n\t\treturn errors.New(\"launch failed\")\n\t}\n\treturn nil\n}\n<commit_msg>Give Edge Control a version number when built directly<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ Version is inserted at build using --ldflags -X\n\/\/ But we don't really build that way any longer.\n\/\/ So include a fallback version number that's not useless.\nvar Version = \"0.8.0\"\n\nconst socketName = \"\/var\/run\/edgectl.socket\"\nconst logfile = \"\/tmp\/edgectl.log\"\nconst apiVersion = 1\n\nvar failedToConnect = `Failed to connect to the daemon. Is it still running?\nThe daemon's log output in ` + logfile + ` may have more information.\nStart the daemon using \"sudo edgectl daemon\" if it is not running.\n`\nvar displayVersion = fmt.Sprintf(\"v%s (api v%d)\", Version, apiVersion)\n\n\/\/ edgectl is the full path to the Edge Control binary\nvar edgectl string\n\nfunc main() {\n\t\/\/ Figure out our executable and save it\n\tfunc() {\n\t\tvar err error\n\t\tedgectl, err = os.Executable()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Internal error: %v\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}()\n\n\t\/\/ Notice early if we're impersonating Teleproxy\n\tif len(os.Args) == 3 && os.Args[1] == \"teleproxy\" {\n\t\tfunc() {\n\t\t\tvar err error\n\t\t\tswitch os.Args[2] {\n\t\t\tcase \"intercept\":\n\t\t\t\terr = RunAsTeleproxyIntercept()\n\t\t\tcase \"bridge\":\n\t\t\t\terr = RunAsTeleproxyBridge()\n\t\t\tdefault:\n\t\t\t\treturn \/\/ Allow normal CLI error handling to proceed\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"error: %v\", err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tos.Exit(0)\n\t\t}()\n\t}\n\n\t\/\/ Let the daemon take care of everything if possible\n\tfailedToConnectErr := mainViaDaemon()\n\n\t\/\/ Couldn't reach the daemon. Try to handle things locally.\n\t\/\/ ...\n\n\trootCmd := &cobra.Command{\n\t\tUse: \"edgectl\",\n\t\tShort: \"Edge Control (daemon unavailable)\",\n\t\tSilenceUsage: true, \/\/ https:\/\/github.com\/spf13\/cobra\/issues\/340\n\t\tArgs: cobra.ArbitraryArgs,\n\t\tRunE: func(_ *cobra.Command, _ []string) error {\n\t\t\tfmt.Println(failedToConnect)\n\t\t\treturn failedToConnectErr\n\t\t},\n\t}\n\n\trootCmd.AddCommand(&cobra.Command{\n\t\tUse: \"version\",\n\t\tShort: \"Show program's version number and exit\",\n\t\tArgs: cobra.ExactArgs(0),\n\t\tRunE: func(_ *cobra.Command, _ []string) error {\n\t\t\tfmt.Println(failedToConnect)\n\t\t\tfmt.Println(\"Client\", displayVersion)\n\t\t\tfmt.Println()\n\t\t\treturn failedToConnectErr\n\t\t},\n\t})\n\trootCmd.AddCommand(&cobra.Command{\n\t\tUse: \"daemon-foreground\",\n\t\tShort: \"Launch Edge Control Daemon in the foreground (debug)\",\n\t\tArgs: cobra.ExactArgs(0),\n\t\tHidden: true,\n\t\tRunE: func(_ *cobra.Command, _ []string) error {\n\t\t\treturn RunAsDaemon()\n\t\t},\n\t})\n\trootCmd.AddCommand(&cobra.Command{\n\t\tUse: \"daemon\",\n\t\tShort: \"Launch Edge Control Daemon in the background (sudo)\",\n\t\tArgs: cobra.ExactArgs(0),\n\t\tRunE: launchDaemon,\n\t})\n\trootCmd.SetHelpTemplate(rootCmd.HelpTemplate() + \"\\n\" + failedToConnect)\n\n\terr := rootCmd.Execute()\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc launchDaemon(ccmd *cobra.Command, _ []string) error {\n\tif os.Geteuid() != 0 {\n\t\tfmt.Println(\"Edge Control Daemon must be launched as root.\")\n\t\tfmt.Printf(\"\\n sudo %s\\n\\n\", ccmd.CommandPath())\n\t\treturn errors.New(\"root privileges required\")\n\t}\n\tfmt.Println(\"Launching Edge Control Daemon\", displayVersion)\n\n\tcmd := exec.Command(edgectl, \"daemon-foreground\")\n\tcmd.Env = os.Environ()\n\tcmd.Stdin = nil\n\tcmd.Stdout = nil\n\tcmd.Stderr = nil\n\tcmd.ExtraFiles = nil\n\tcmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true}\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to launch the server\")\n\t}\n\n\tsuccess := false\n\tfor count := 0; count < 40; count++ {\n\t\tif isServerRunning() {\n\t\t\tsuccess = true\n\t\t\tbreak\n\t\t}\n\t\tif count == 4 {\n\t\t\tfmt.Println(\"Waiting for daemon to start...\")\n\t\t}\n\t\ttime.Sleep(250 * time.Millisecond)\n\t}\n\tif !success {\n\t\tfmt.Println(\"Server did not come up!\")\n\t\tfmt.Printf(\"Take a look at %s for more information.\\n\", logfile)\n\t\treturn errors.New(\"launch failed\")\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\n\t\"github.com\/google\/ent\/utils\"\n\t\"github.com\/ipfs\/go-cid\"\n\tformat \"github.com\/ipfs\/go-ipld-format\"\n\t\"github.com\/ipfs\/go-merkledag\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar pullCmd = &cobra.Command{\n\tUse: \"pull [hash] [target directory]\",\n\tArgs: cobra.RangeArgs(1, 2),\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\thash := args[0]\n\t\ttargetDir := \".\"\n\t\tif len(args) >= 2 {\n\t\t\ttargetDir = args[1]\n\t\t}\n\t\ttargetDir, err := filepath.Abs(targetDir)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"could not normalize target directory %q, %v\", targetDir, err)\n\t\t}\n\n\t\tbase, err := cid.Decode(hash)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"could not decode cid: %v\", err)\n\t\t}\n\n\t\tpull(base, targetDir, false)\n\n\t\tlog.Printf(\"pull %s %s\", hash, targetDir)\n\t},\n}\n\nfunc pull(base cid.Cid, targetPath string, executable bool) {\n\t_, err := os.Stat(targetPath)\n\tif os.IsNotExist(err) {\n\t\t\/\/ Continue.\n\t} else if err != nil {\n\t\tlog.Fatalf(\"could not stat target path: %v\", err)\n\t}\n\tif !os.IsNotExist(err) {\n\t\tlog.Printf(\"target path %s already exists; skipping\", targetPath)\n\t\t\/\/ Skip?\n\t\treturn\n\t}\n\tfiles, err := ioutil.ReadDir(targetPath)\n\tif os.IsNotExist(err) {\n\t\t\/\/ Continue.\n\t} else if err != nil {\n\t\tlog.Fatalf(\"could not read target directory %q: %v\", targetPath, err)\n\t}\n\tif len(files) > 0 {\n\t\tlog.Fatalf(\"cannot pull to non-empty directory %q\", targetPath)\n\t}\n\ttraverseRemote(base, \"\", func(p string, node format.Node) error {\n\t\tfullPath := filepath.Join(targetPath, p)\n\t\tlog.Printf(\"%s\\n\", fullPath)\n\t\tswitch node := node.(type) {\n\t\tcase *merkledag.ProtoNode:\n\t\t\terr := os.MkdirAll(fullPath, 0755)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"could not create directory %q: %v\", fullPath, err)\n\t\t\t}\n\t\tcase *merkledag.RawNode:\n\t\t\terr := os.MkdirAll(path.Dir(fullPath), 0755)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"could not create directory %q: %v\", fullPath, err)\n\t\t\t}\n\t\t\tmode := 0644\n\t\t\tif executable {\n\t\t\t\tmode = 0755\n\t\t\t}\n\t\t\terr = ioutil.WriteFile(fullPath, node.RawData(), os.FileMode(mode))\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"could not create file %q: %v\", fullPath, err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc traverseRemote(base cid.Cid, relativeFilename string, f func(string, format.Node) error) {\n\tobj, err := nodeService.GetObject(context.Background(), base.Hash())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tswitch base.Prefix().Codec {\n\tcase cid.DagProtobuf:\n\t\tnode, err := utils.ParseProtoNode(obj)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\terr = f(relativeFilename, node)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfor _, l := range node.Links() {\n\t\t\tif l.Name == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnewRelativeFilename := path.Join(relativeFilename, l.Name)\n\t\t\ttraverseRemote(l.Cid, newRelativeFilename, f)\n\t\t}\n\tcase cid.Raw:\n\t\tnode, err := utils.ParseRawNode(obj)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\terr = f(relativeFilename, node)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tdefault:\n\t\tlog.Fatalf(\"invalid codec: %v\", base.Prefix().Codec)\n\t}\n}\n<commit_msg>Fix formatting of pull logs<commit_after>package cmd\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/google\/ent\/utils\"\n\t\"github.com\/ipfs\/go-cid\"\n\tformat \"github.com\/ipfs\/go-ipld-format\"\n\t\"github.com\/ipfs\/go-merkledag\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar pullCmd = &cobra.Command{\n\tUse: \"pull [hash] [target directory]\",\n\tArgs: cobra.RangeArgs(1, 2),\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\thash := args[0]\n\t\ttargetDir := \".\"\n\t\tif len(args) >= 2 {\n\t\t\ttargetDir = args[1]\n\t\t}\n\t\ttargetDir, err := filepath.Abs(targetDir)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"could not normalize target directory %q, %v\", targetDir, err)\n\t\t}\n\n\t\tbase, err := cid.Decode(hash)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"could not decode cid: %v\", err)\n\t\t}\n\n\t\tpull(base, targetDir, false)\n\n\t\tlog.Printf(\"pull %s %s\", hash, targetDir)\n\t},\n}\n\nfunc pull(base cid.Cid, targetPath string, executable bool) {\n\t_, err := os.Stat(targetPath)\n\tif os.IsNotExist(err) {\n\t\t\/\/ Continue.\n\t} else if err != nil {\n\t\tlog.Fatalf(\"could not stat target path: %v\", err)\n\t}\n\tif !os.IsNotExist(err) {\n\t\tlog.Printf(\"target path %s already exists; skipping\", targetPath)\n\t\t\/\/ Skip?\n\t\treturn\n\t}\n\tfiles, err := ioutil.ReadDir(targetPath)\n\tif os.IsNotExist(err) {\n\t\t\/\/ Continue.\n\t} else if err != nil {\n\t\tlog.Fatalf(\"could not read target directory %q: %v\", targetPath, err)\n\t}\n\tif len(files) > 0 {\n\t\tlog.Fatalf(\"cannot pull to non-empty directory %q\", targetPath)\n\t}\n\ttraverseRemote(base, \"\", func(p string, node format.Node) error {\n\t\tfullPath := filepath.Join(targetPath, p)\n\t\tmarker := color.BlueString(\"↓\")\n\t\tfmt.Printf(\"%s %s %s\\n\", color.YellowString(node.Cid().String()), marker, fullPath)\n\t\tswitch node := node.(type) {\n\t\tcase *merkledag.ProtoNode:\n\t\t\terr := os.MkdirAll(fullPath, 0755)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"could not create directory %q: %v\", fullPath, err)\n\t\t\t}\n\t\tcase *merkledag.RawNode:\n\t\t\terr := os.MkdirAll(path.Dir(fullPath), 0755)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"could not create directory %q: %v\", fullPath, err)\n\t\t\t}\n\t\t\tmode := 0644\n\t\t\tif executable {\n\t\t\t\tmode = 0755\n\t\t\t}\n\t\t\terr = ioutil.WriteFile(fullPath, node.RawData(), os.FileMode(mode))\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"could not create file %q: %v\", fullPath, err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc traverseRemote(base cid.Cid, relativeFilename string, f func(string, format.Node) error) {\n\tobj, err := nodeService.GetObject(context.Background(), base.Hash())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tswitch base.Prefix().Codec {\n\tcase cid.DagProtobuf:\n\t\tnode, err := utils.ParseProtoNode(obj)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\terr = f(relativeFilename, node)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfor _, l := range node.Links() {\n\t\t\tif l.Name == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnewRelativeFilename := path.Join(relativeFilename, l.Name)\n\t\t\ttraverseRemote(l.Cid, newRelativeFilename, f)\n\t\t}\n\tcase cid.Raw:\n\t\tnode, err := utils.ParseRawNode(obj)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\terr = f(relativeFilename, node)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tdefault:\n\t\tlog.Fatalf(\"invalid codec: %v\", base.Prefix().Codec)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"k8s.io\/helm\/pkg\/helm\"\n)\n\nconst upgradeDesc = `\nThis command upgrades a release to a new version of a chart.\n\nThe upgrade arguments must be a release and a chart. The chart\nargument can be a relative path to a packaged or unpackaged chart.\n`\n\ntype upgradeCmd struct {\n\trelease string\n\tchart string\n\tout io.Writer\n\tclient helm.Interface\n\tdryRun bool\n\tdisableHooks bool\n\tvaluesFile string\n}\n\nfunc newUpgradeCmd(client helm.Interface, out io.Writer) *cobra.Command {\n\n\tupgrade := &upgradeCmd{\n\t\tout: out,\n\t\tclient: client,\n\t}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"upgrade [RELEASE] [CHART]\",\n\t\tShort: \"upgrade a release\",\n\t\tLong: upgradeDesc,\n\t\tPersistentPreRunE: setupConnection,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tif err := checkArgsLength(2, len(args), \"release name, chart path\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tupgrade.release = args[0]\n\t\t\tupgrade.chart = args[1]\n\t\t\tupgrade.client = ensureHelmClient(upgrade.client)\n\n\t\t\treturn upgrade.run()\n\t\t},\n\t}\n\n\tf := cmd.Flags()\n\tf.StringVarP(&upgrade.valuesFile, \"values\", \"f\", \"\", \"path to a values YAML file\")\n\tf.BoolVar(&upgrade.dryRun, \"dry-run\", false, \"simulate an upgrade\")\n\tf.BoolVar(&upgrade.disableHooks, \"disable-hooks\", false, \"disable pre\/post upgrade hooks\")\n\n\treturn cmd\n}\n\nfunc (u *upgradeCmd) run() error {\n\tchartPath, err := locateChartPath(u.chart)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trawVals := []byte{}\n\tif u.valuesFile != \"\" {\n\t\trawVals, err = ioutil.ReadFile(u.valuesFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t_, err = u.client.UpdateRelease(u.release, chartPath, helm.UpdateValueOverrides(rawVals), helm.UpgradeDryRun(u.dryRun), helm.UpgradeDisableHooks(u.disableHooks))\n\tif err != nil {\n\t\treturn prettyError(err)\n\t}\n\n\tsuccess := u.release + \" has been upgraded. Happy Helming!\\n\"\n\tfmt.Fprintf(u.out, success)\n\n\treturn nil\n\n}\n<commit_msg>Add --set flag to `helm upgrade`<commit_after>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"k8s.io\/helm\/pkg\/helm\"\n)\n\nconst upgradeDesc = `\nThis command upgrades a release to a new version of a chart.\n\nThe upgrade arguments must be a release and a chart. The chart\nargument can be a relative path to a packaged or unpackaged chart.\n\nTo override values in a chart, use either the '--values' flag and pass in a file\nor use the '--set' flag and pass configuration from the command line.\n`\n\ntype upgradeCmd struct {\n\trelease string\n\tchart string\n\tout io.Writer\n\tclient helm.Interface\n\tdryRun bool\n\tdisableHooks bool\n\tvaluesFile string\n\tvalues *values\n}\n\nfunc newUpgradeCmd(client helm.Interface, out io.Writer) *cobra.Command {\n\n\tupgrade := &upgradeCmd{\n\t\tout: out,\n\t\tclient: client,\n\t\tvalues: new(values),\n\t}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"upgrade [RELEASE] [CHART]\",\n\t\tShort: \"upgrade a release\",\n\t\tLong: upgradeDesc,\n\t\tPersistentPreRunE: setupConnection,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tif err := checkArgsLength(2, len(args), \"release name, chart path\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tupgrade.release = args[0]\n\t\t\tupgrade.chart = args[1]\n\t\t\tupgrade.client = ensureHelmClient(upgrade.client)\n\n\t\t\treturn upgrade.run()\n\t\t},\n\t}\n\n\tf := cmd.Flags()\n\tf.StringVarP(&upgrade.valuesFile, \"values\", \"f\", \"\", \"path to a values YAML file\")\n\tf.BoolVar(&upgrade.dryRun, \"dry-run\", false, \"simulate an upgrade\")\n\tf.Var(upgrade.values, \"set\", \"set values on the command line. Separate values with commas: key1=val1,key2=val2\")\n\tf.BoolVar(&upgrade.disableHooks, \"disable-hooks\", false, \"disable pre\/post upgrade hooks\")\n\n\treturn cmd\n}\n\nfunc (u *upgradeCmd) vals() ([]byte, error) {\n\tvar buffer bytes.Buffer\n\n\t\/\/ User specified a values file via -f\/--values\n\tif u.valuesFile != \"\" {\n\t\tbytes, err := ioutil.ReadFile(u.valuesFile)\n\t\tif err != nil {\n\t\t\treturn []byte{}, err\n\t\t}\n\t\tbuffer.Write(bytes)\n\t}\n\n\t\/\/ User specified value pairs via --set\n\t\/\/ These override any values in the specified file\n\tif len(u.values.pairs) > 0 {\n\t\tbytes, err := u.values.yaml()\n\t\tif err != nil {\n\t\t\treturn []byte{}, err\n\t\t}\n\t\tbuffer.Write(bytes)\n\t}\n\n\treturn buffer.Bytes(), nil\n}\n\nfunc (u *upgradeCmd) run() error {\n\tchartPath, err := locateChartPath(u.chart)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trawVals, err := u.vals()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = u.client.UpdateRelease(u.release, chartPath, helm.UpdateValueOverrides(rawVals), helm.UpgradeDryRun(u.dryRun), helm.UpgradeDisableHooks(u.disableHooks))\n\tif err != nil {\n\t\treturn prettyError(err)\n\t}\n\n\tsuccess := u.release + \" has been upgraded. Happy Helming!\\n\"\n\tfmt.Fprintf(u.out, success)\n\n\treturn nil\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main \/\/ import \"vimagination.zapto.org\/httpdir\/cmd\/httpdir\"\n\nimport (\n\t\"compress\/flate\"\n\t\"compress\/gzip\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/google\/brotli\/go\/cbrotli\"\n\t\"vimagination.zapto.org\/memio\"\n)\n\nvar (\n\tpkg = flag.String(\"p\", \"main\", \"package name\")\n\tin = flag.String(\"i\", \"\", \"input filename\")\n\tout = flag.String(\"o\", \"\", \"output filename\")\n\todate = flag.String(\"d\", \"\", \"modified date [seconds since epoch]\")\n\tpath = flag.String(\"w\", \"\", \"http path\")\n\tvarname = flag.String(\"v\", \"httpdir.Default\", \"http dir variable name\")\n\tcvarname = flag.String(\"c\", \"httpdir.Default\", \"http dir compressed variable name\")\n\thelp = flag.Bool(\"h\", false, \"show help\")\n\tgzcomp = flag.Bool(\"g\", false, \"compress using gzip\")\n\tbrcomp = flag.Bool(\"b\", false, \"compress using brotli\")\n\tflcomp = flag.Bool(\"f\", false, \"compress using flate\/deflate\")\n\tsingle = flag.Bool(\"s\", false, \"use single source var and decompress\/compress for others\")\n)\n\ntype replacer struct {\n\tf *os.File\n}\n\nvar hexArr = [16]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}\n\nfunc (r replacer) Write(p []byte) (int, error) {\n\tn := 0\n\ttoWrite := make([]byte, 0, 5)\n\tfor len(p) > 0 {\n\t\trn, s := utf8.DecodeRune(p)\n\t\tif rn == utf8.RuneError {\n\t\t\ts = 1\n\t\t}\n\t\tif s > 1 || (p[0] > 0 && p[0] < 0x7f && p[0] != '\\n' && p[0] != '\\\\' && p[0] != '\"') {\n\t\t\ttoWrite = append(toWrite[:0], p[:s]...)\n\t\t} else {\n\t\t\tswitch p[0] {\n\t\t\tcase '\\n':\n\t\t\t\ttoWrite = append(toWrite[:0], '\\\\', 'n')\n\t\t\tcase '\\\\':\n\t\t\t\ttoWrite = append(toWrite[:0], '\\\\', '\\\\')\n\t\t\tcase '\"':\n\t\t\t\ttoWrite = append(toWrite[:0], '\\\\', '\"')\n\t\t\tdefault:\n\t\t\t\ttoWrite = append(toWrite[:0], '\\\\', 'x', hexArr[p[0]>>4], hexArr[p[0]&15])\n\t\t\t}\n\t\t}\n\t\t_, err := r.f.Write(toWrite)\n\t\tn += s\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tp = p[s:]\n\t}\n\treturn n, nil\n}\n\ntype tickReplacer struct {\n\tf *os.File\n}\n\nfunc (r tickReplacer) Write(p []byte) (int, error) {\n\tvar (\n\t\tm, n int\n\t\terr error\n\t)\n\thexes := make([]byte, 0, 32)\n\ttoWrite := make([]byte, 0, 1024)\n\tfor len(p) > 0 {\n\t\tdr, s := utf8.DecodeRune(p)\n\t\tif dr == utf8.RuneError || dr == '`' || dr == '\\r' {\n\t\t\thexes = append(hexes, p[0])\n\t\t} else {\n\t\t\tif len(hexes) > 0 {\n\t\t\t\ttoWrite = toWrite[:0]\n\t\t\t\ttoWrite = append(toWrite, '`', '+', '\"')\n\t\t\t\tfor _, b := range hexes {\n\t\t\t\t\tif b == '`' {\n\t\t\t\t\t\ttoWrite = append(toWrite, '`')\n\t\t\t\t\t} else if b == '\\r' {\n\t\t\t\t\t\ttoWrite = append(toWrite, '\\\\', 'r')\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttoWrite = append(toWrite, '\\\\', 'x', hexArr[b>>4], hexArr[b&15])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttoWrite = append(toWrite, '\"', '+', '`')\n\t\t\t\t_, err = r.f.Write(toWrite)\n\t\t\t\tn += len(hexes)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\thexes = hexes[:0]\n\t\t\t}\n\t\t\tm, err = r.f.Write(p[:s])\n\t\t\tn += m\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tp = p[s:]\n\t}\n\treturn n, err\n}\n\nfunc e(err error) {\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n}\n\nfunc ne(_ int, err error) {\n\te(err)\n}\n\ntype imports []string\n\nfunc (im imports) Len() int {\n\treturn len(im)\n}\n\nfunc (im imports) Less(i, j int) bool {\n\tsi := strings.HasPrefix(im[i], \"\\\"github.com\")\n\tsj := strings.HasPrefix(im[j], \"\\\"github.com\")\n\tvi := strings.HasPrefix(im[i], \"\\\"vimagination.zapto.org\")\n\tvj := strings.HasPrefix(im[j], \"\\\"vimagination.zapto.org\")\n\tif si == sj && vi == vj {\n\t\treturn im[i] < im[j]\n\t}\n\treturn !si && !vi || si && vj\n}\n\nfunc (im imports) Swap(i, j int) {\n\tim[i], im[j] = im[j], im[i]\n}\n\ntype encoding struct {\n\tBuffer []byte\n\tCompress, Decompress, Ext string\n}\n\ntype encodings []encoding\n\nfunc (e encodings) Len() int {\n\treturn len(e)\n}\n\nfunc (e encodings) Less(i, j int) bool {\n\treturn len(e[i].Buffer) < len(e[j].Buffer)\n}\n\nfunc (e encodings) Swap(i, j int) {\n\te[i], e[j] = e[j], e[i]\n}\n\nfunc main() {\n\tflag.Parse()\n\tif *help {\n\t\tflag.Usage()\n\t\treturn\n\t}\n\tvar (\n\t\tf *os.File\n\t\terr error\n\t\tdate int64\n\t)\n\tif *in == \"-\" || *in == \"\" {\n\t\tf = os.Stdin\n\t\tdate = time.Now().Unix()\n\t} else {\n\t\tf, err = os.Open(*in)\n\t\te(err)\n\t\tfi, err := f.Stat()\n\t\te(err)\n\t\tdate = fi.ModTime().Unix()\n\t}\n\tif *odate != \"\" {\n\t\tdate, err = strconv.ParseInt(*odate, 10, 64)\n\t\te(err)\n\t}\n\n\tdata := make(memio.Buffer, 0, 1<<20)\n\t_, err = io.Copy(&data, f)\n\te(err)\n\te(f.Close())\n\n\tim := imports{\"\\\"vimagination.zapto.org\/httpdir\\\"\", \"\\\"time\\\"\"}\n\n\tencs := make(encodings, 1, 4)\n\tencs[0] = encoding{\n\t\tBuffer: data,\n\t\tCompress: identCompress,\n\t\tDecompress: identDecompress,\n\t\tExt: \"\",\n\t}\n\n\tif *brcomp {\n\t\tvar b memio.Buffer\n\t\tbr := cbrotli.NewWriter(&b, cbrotli.WriterOptions{Quality: 11})\n\t\tbr.Write(data)\n\t\tbr.Close()\n\t\tif *single {\n\t\t\tim = append(im, brotliImport)\n\t\t}\n\t\tencs = append(encs, encoding{\n\t\t\tBuffer: b,\n\t\t\tCompress: brotliCompress,\n\t\t\tDecompress: brotliDecompress,\n\t\t\tExt: \".br\",\n\t\t})\n\t}\n\tif *flcomp {\n\t\tvar b memio.Buffer\n\t\tfl, _ := flate.NewWriter(&b, flate.BestCompression)\n\t\tfl.Write(data)\n\t\tfl.Close()\n\t\tif *single {\n\t\t\tim = append(im, strings.Split(flateImport, \"\\n\")...)\n\t\t}\n\t\tencs = append(encs, encoding{\n\t\t\tBuffer: b,\n\t\t\tCompress: flateCompress,\n\t\t\tDecompress: flateDecompress,\n\t\t\tExt: \".fl\",\n\t\t})\n\t}\n\tif *gzcomp {\n\t\tvar b memio.Buffer\n\t\tgz, _ := gzip.NewWriterLevel(&b, gzip.BestCompression)\n\t\tgz.Write(data)\n\t\tgz.Close()\n\t\tif *single {\n\t\t\tim = append(im, gzipImport)\n\t\t}\n\t\tencs = append(encs, encoding{\n\t\t\tBuffer: b,\n\t\t\tCompress: gzipCompress,\n\t\t\tDecompress: gzipDecompress,\n\t\t\tExt: \".gz\",\n\t\t})\n\t}\n\n\tsort.Sort(encs)\n\n\tif *single && (*gzcomp || *brcomp || *flcomp) {\n\t\tim = append(im, \"\\\"vimagination.zapto.org\/memio\\\"\")\n\t\tif encs[0].Ext != \"\" {\n\t\t\tim = append(im, \"\\\"strings\\\"\")\n\t\t}\n\t}\n\tif encs[0].Ext == \".fl\" {\n\t\tim = append(im, \"\\\"io\\\"\")\n\t}\n\n\tsort.Sort(im)\n\tvar (\n\t\timports string\n\t\text bool\n\t)\n\tfor _, i := range im {\n\t\tif !ext && (strings.HasPrefix(i, \"\\\"github.com\") || strings.HasPrefix(i, \"\\\"vimagination\")) {\n\t\t\timports += \"\\n\"\n\t\t\text = true\n\t\t}\n\t\timports += \"\t\" + i + \"\\n\"\n\t}\n\tif *out == \"-\" || *out == \"\" {\n\t\tf = os.Stdout\n\t} else {\n\t\tf, err = os.Create(*out)\n\t\te(err)\n\t}\n\tfmt.Fprintf(f, packageStart, *pkg, imports, date)\n\tif *single {\n\t\tf.WriteString(stringStart)\n\t\tif encs[0].Ext == \"\" {\n\t\t\tne(f.WriteString(\"`\"))\n\t\t\tne(tickReplacer{f}.Write(encs[0].Buffer))\n\t\t\tne(f.WriteString(\"`\"))\n\t\t} else {\n\t\t\tne(f.WriteString(\"\\\"\"))\n\t\t\tne(replacer{f}.Write(encs[0].Buffer))\n\t\t\tne(f.WriteString(\"\\\"\"))\n\t\t}\n\t\tf.WriteString(stringEnd)\n\t\tfor n, enc := range encs {\n\t\t\tvar (\n\t\t\t\ttempl string\n\t\t\t\tvars = []interface{}{0, *cvarname, *path + enc.Ext}\n\t\t\t)\n\t\t\tif enc.Ext == \"\" {\n\t\t\t\tvars = vars[1:]\n\t\t\t\tvars[0] = *varname\n\t\t\t\tif n == 0 {\n\t\t\t\t\ttempl = identDecompress\n\t\t\t\t} else {\n\t\t\t\t\ttempl = identCompress\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif n == 0 {\n\t\t\t\t\tvars[0] = len(data)\n\t\t\t\t\ttempl = enc.Decompress\n\t\t\t\t} else {\n\t\t\t\t\tvars[0] = len(enc.Buffer)\n\t\t\t\t\ttempl = enc.Compress\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Fprintf(f, templ, vars...)\n\t\t}\n\t} else {\n\t\tfor _, enc := range encs {\n\t\t\tfilename := *path + enc.Ext\n\t\t\tne(fmt.Fprintf(f, soloStart, *varname, filename))\n\t\t\tif enc.Ext == \"\" {\n\t\t\t\tne(f.WriteString(\"`\"))\n\t\t\t\tne(tickReplacer{f}.Write(enc.Buffer))\n\t\t\t\tne(f.WriteString(\"`\"))\n\t\t\t} else {\n\t\t\t\tne(f.WriteString(\"\\\"\"))\n\t\t\t\tne(replacer{f}.Write(enc.Buffer))\n\t\t\t\tne(f.WriteString(\"\\\"\"))\n\t\t\t}\n\t\t\tne(f.WriteString(soloEnd))\n\t\t}\n\t}\n\tne(fmt.Fprintf(f, packageEnd))\n\te(f.Close())\n}\n\nconst (\n\tpackageStart = `package %s\n\nimport (\n%s)\n\nfunc init() {\n\tdate := time.Unix(%d, 0)\n`\n\tstringStart = `\ts := `\n\tstringEnd = `\n`\n\tpackageEnd = `}\n`\n\tsoloStart = `\t%s.Create(%q, httpdir.FileString(`\n\tsoloEnd = `, date))\n`\n\tidentDecompress = `\tb := []byte(s)\n\t%s.Create(%q, httpdir.FileString(s, date))\n`\n\tidentCompress = `\t%s.Create(%q, httpdir.FileBytes(b, date))\n`\n\n\tbrotliImport = \"\\\"github.com\/google\/brotli\/go\/cbrotli\\\"\"\n\tbrotliDecompress = `\tb := make([]byte, %d)\n\tbr := cbrotli.NewReader(strings.NewReader(s))\n\tbr.Read(b)\n\tbr.Close()\n\t%s.Create(%q, httpdir.FileString(s, date))\n`\n\tbrotliCompress = `\tbrb := make(memio.Buffer, 0, %d)\n\tbr := cbrotli.NewWriter(&brb, cbrotli.WriterOptions{Quality: 11})\n\tbr.Write(b)\n\tbr.Close()\n\t%s.Create(%q, httpdir.FileBytes(brb, date))\n`\n\tflateImport = \"\\\"compress\/flate\\\"\"\n\tflateDecompress = `\tb := make([]byte, %d)\n\tfl := flate.NewReader(strings.NewReader(s))\n\tio.ReadFull(fl, b)\n\tfl.Close()\n\t%s.Create(%q, httpdir.FileString(s, date))\n`\n\tflateCompress = `\tflb := make(memio.Buffer, 0, %d)\n\tfl, _ := flate.NewWriter(&flb, flate.BestCompression)\n\tfl.Write(b)\n\tfl.Close()\n\t%s.Create(%q, httpdir.FileBytes(flb, date))\n`\n\tgzipImport = \"\\\"compress\/gzip\\\"\"\n\tgzipDecompress = `\tb := make([]byte, %d)\n\tgz := gzip.NewReader(strings.NewReader(s))\n\tgz.Read(b)\n\tgz.Close()\n\t%s.Create(%q, httpdir.FileString(s, date))\n`\n\tgzipCompress = `\tgzb := make(memio.Buffer, 0, %d)\n\tgz, _ := gzip.NewWriterLevel(&gzb, gzip.BestCompression)\n\tgz.Write(b)\n\tgz.Close()\n\t%s.Create(%q, httpdir.FileBytes(gzb, date))\n`\n)\n<commit_msg>added zopfli compression option to replace gzip<commit_after>package main \/\/ import \"vimagination.zapto.org\/httpdir\/cmd\/httpdir\"\n\nimport (\n\t\"compress\/flate\"\n\t\"compress\/gzip\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/foobaz\/go-zopfli\/zopfli\"\n\t\"github.com\/google\/brotli\/go\/cbrotli\"\n\t\"vimagination.zapto.org\/memio\"\n)\n\nvar (\n\tpkg = flag.String(\"p\", \"main\", \"package name\")\n\tin = flag.String(\"i\", \"\", \"input filename\")\n\tout = flag.String(\"o\", \"\", \"output filename\")\n\todate = flag.String(\"d\", \"\", \"modified date [seconds since epoch]\")\n\tpath = flag.String(\"w\", \"\", \"http path\")\n\tvarname = flag.String(\"v\", \"httpdir.Default\", \"http dir variable name\")\n\tcvarname = flag.String(\"c\", \"httpdir.Default\", \"http dir compressed variable name\")\n\thelp = flag.Bool(\"h\", false, \"show help\")\n\tgzcomp = flag.Bool(\"g\", false, \"compress using gzip\")\n\tbrcomp = flag.Bool(\"b\", false, \"compress using brotli\")\n\tflcomp = flag.Bool(\"f\", false, \"compress using flate\/deflate\")\n\tsingle = flag.Bool(\"s\", false, \"use single source var and decompress\/compress for others\")\n\tzpfcomp = flag.Bool(\"z\", false, \"replace gzip with zopfli compression\")\n)\n\ntype replacer struct {\n\tf *os.File\n}\n\nvar hexArr = [16]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}\n\nfunc (r replacer) Write(p []byte) (int, error) {\n\tn := 0\n\ttoWrite := make([]byte, 0, 5)\n\tfor len(p) > 0 {\n\t\trn, s := utf8.DecodeRune(p)\n\t\tif rn == utf8.RuneError {\n\t\t\ts = 1\n\t\t}\n\t\tif s > 1 || (p[0] > 0 && p[0] < 0x7f && p[0] != '\\n' && p[0] != '\\\\' && p[0] != '\"') {\n\t\t\ttoWrite = append(toWrite[:0], p[:s]...)\n\t\t} else {\n\t\t\tswitch p[0] {\n\t\t\tcase '\\n':\n\t\t\t\ttoWrite = append(toWrite[:0], '\\\\', 'n')\n\t\t\tcase '\\\\':\n\t\t\t\ttoWrite = append(toWrite[:0], '\\\\', '\\\\')\n\t\t\tcase '\"':\n\t\t\t\ttoWrite = append(toWrite[:0], '\\\\', '\"')\n\t\t\tdefault:\n\t\t\t\ttoWrite = append(toWrite[:0], '\\\\', 'x', hexArr[p[0]>>4], hexArr[p[0]&15])\n\t\t\t}\n\t\t}\n\t\t_, err := r.f.Write(toWrite)\n\t\tn += s\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tp = p[s:]\n\t}\n\treturn n, nil\n}\n\ntype tickReplacer struct {\n\tf *os.File\n}\n\nfunc (r tickReplacer) Write(p []byte) (int, error) {\n\tvar (\n\t\tm, n int\n\t\terr error\n\t)\n\thexes := make([]byte, 0, 32)\n\ttoWrite := make([]byte, 0, 1024)\n\tfor len(p) > 0 {\n\t\tdr, s := utf8.DecodeRune(p)\n\t\tif dr == utf8.RuneError || dr == '`' || dr == '\\r' {\n\t\t\thexes = append(hexes, p[0])\n\t\t} else {\n\t\t\tif len(hexes) > 0 {\n\t\t\t\ttoWrite = toWrite[:0]\n\t\t\t\ttoWrite = append(toWrite, '`', '+', '\"')\n\t\t\t\tfor _, b := range hexes {\n\t\t\t\t\tif b == '`' {\n\t\t\t\t\t\ttoWrite = append(toWrite, '`')\n\t\t\t\t\t} else if b == '\\r' {\n\t\t\t\t\t\ttoWrite = append(toWrite, '\\\\', 'r')\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttoWrite = append(toWrite, '\\\\', 'x', hexArr[b>>4], hexArr[b&15])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttoWrite = append(toWrite, '\"', '+', '`')\n\t\t\t\t_, err = r.f.Write(toWrite)\n\t\t\t\tn += len(hexes)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\thexes = hexes[:0]\n\t\t\t}\n\t\t\tm, err = r.f.Write(p[:s])\n\t\t\tn += m\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tp = p[s:]\n\t}\n\treturn n, err\n}\n\nfunc e(err error) {\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n}\n\nfunc ne(_ int, err error) {\n\te(err)\n}\n\ntype imports []string\n\nfunc (im imports) Len() int {\n\treturn len(im)\n}\n\nfunc (im imports) Less(i, j int) bool {\n\tsi := strings.HasPrefix(im[i], \"\\\"github.com\")\n\tsj := strings.HasPrefix(im[j], \"\\\"github.com\")\n\tvi := strings.HasPrefix(im[i], \"\\\"vimagination.zapto.org\")\n\tvj := strings.HasPrefix(im[j], \"\\\"vimagination.zapto.org\")\n\tif si == sj && vi == vj {\n\t\treturn im[i] < im[j]\n\t}\n\treturn !si && !vi || si && vj\n}\n\nfunc (im imports) Swap(i, j int) {\n\tim[i], im[j] = im[j], im[i]\n}\n\ntype encoding struct {\n\tBuffer []byte\n\tCompress, Decompress, Ext string\n}\n\ntype encodings []encoding\n\nfunc (e encodings) Len() int {\n\treturn len(e)\n}\n\nfunc (e encodings) Less(i, j int) bool {\n\treturn len(e[i].Buffer) < len(e[j].Buffer)\n}\n\nfunc (e encodings) Swap(i, j int) {\n\te[i], e[j] = e[j], e[i]\n}\n\nfunc main() {\n\tflag.Parse()\n\tif *help {\n\t\tflag.Usage()\n\t\treturn\n\t}\n\tvar (\n\t\tf *os.File\n\t\terr error\n\t\tdate int64\n\t)\n\tif *in == \"-\" || *in == \"\" {\n\t\tf = os.Stdin\n\t\tdate = time.Now().Unix()\n\t} else {\n\t\tf, err = os.Open(*in)\n\t\te(err)\n\t\tfi, err := f.Stat()\n\t\te(err)\n\t\tdate = fi.ModTime().Unix()\n\t}\n\tif *odate != \"\" {\n\t\tdate, err = strconv.ParseInt(*odate, 10, 64)\n\t\te(err)\n\t}\n\n\tdata := make(memio.Buffer, 0, 1<<20)\n\t_, err = io.Copy(&data, f)\n\te(err)\n\te(f.Close())\n\n\tim := imports{\"\\\"vimagination.zapto.org\/httpdir\\\"\", \"\\\"time\\\"\"}\n\n\tencs := make(encodings, 1, 4)\n\tencs[0] = encoding{\n\t\tBuffer: data,\n\t\tCompress: identCompress,\n\t\tDecompress: identDecompress,\n\t\tExt: \"\",\n\t}\n\n\tif *brcomp {\n\t\tvar b memio.Buffer\n\t\tbr := cbrotli.NewWriter(&b, cbrotli.WriterOptions{Quality: 11})\n\t\tbr.Write(data)\n\t\tbr.Close()\n\t\tif *single {\n\t\t\tim = append(im, brotliImport)\n\t\t}\n\t\tencs = append(encs, encoding{\n\t\t\tBuffer: b,\n\t\t\tCompress: brotliCompress,\n\t\t\tDecompress: brotliDecompress,\n\t\t\tExt: \".br\",\n\t\t})\n\t}\n\tif *flcomp {\n\t\tvar b memio.Buffer\n\t\tfl, _ := flate.NewWriter(&b, flate.BestCompression)\n\t\tfl.Write(data)\n\t\tfl.Close()\n\t\tif *single {\n\t\t\tim = append(im, strings.Split(flateImport, \"\\n\")...)\n\t\t}\n\t\tencs = append(encs, encoding{\n\t\t\tBuffer: b,\n\t\t\tCompress: flateCompress,\n\t\t\tDecompress: flateDecompress,\n\t\t\tExt: \".fl\",\n\t\t})\n\t}\n\tif *gzcomp || *zpfcomp {\n\t\tvar b memio.Buffer\n\t\tif *zpfcomp {\n\t\t\tzopfli.GzipCompress(&zopfli.Options{\n\t\t\t\tNumIterations: 100,\n\t\t\t\tBlockSplitting: true,\n\t\t\t\tBlockType: 2,\n\t\t\t}, data, &b)\n\t\t} else {\n\t\t\tgz, _ := gzip.NewWriterLevel(&b, gzip.BestCompression)\n\t\t\tgz.Write(data)\n\t\t\tgz.Close()\n\t\t}\n\t\tif *single {\n\t\t\tim = append(im, gzipImport)\n\t\t}\n\t\tencs = append(encs, encoding{\n\t\t\tBuffer: b,\n\t\t\tCompress: gzipCompress,\n\t\t\tDecompress: gzipDecompress,\n\t\t\tExt: \".gz\",\n\t\t})\n\t}\n\n\tsort.Sort(encs)\n\n\tif *single && (*gzcomp || *brcomp || *flcomp) {\n\t\tim = append(im, \"\\\"vimagination.zapto.org\/memio\\\"\")\n\t\tif encs[0].Ext != \"\" {\n\t\t\tim = append(im, \"\\\"strings\\\"\")\n\t\t}\n\t}\n\tif encs[0].Ext == \".fl\" {\n\t\tim = append(im, \"\\\"io\\\"\")\n\t}\n\n\tsort.Sort(im)\n\tvar (\n\t\timports string\n\t\text bool\n\t)\n\tfor _, i := range im {\n\t\tif !ext && (strings.HasPrefix(i, \"\\\"github.com\") || strings.HasPrefix(i, \"\\\"vimagination\")) {\n\t\t\timports += \"\\n\"\n\t\t\text = true\n\t\t}\n\t\timports += \"\t\" + i + \"\\n\"\n\t}\n\tif *out == \"-\" || *out == \"\" {\n\t\tf = os.Stdout\n\t} else {\n\t\tf, err = os.Create(*out)\n\t\te(err)\n\t}\n\tfmt.Fprintf(f, packageStart, *pkg, imports, date)\n\tif *single {\n\t\tf.WriteString(stringStart)\n\t\tif encs[0].Ext == \"\" {\n\t\t\tne(f.WriteString(\"`\"))\n\t\t\tne(tickReplacer{f}.Write(encs[0].Buffer))\n\t\t\tne(f.WriteString(\"`\"))\n\t\t} else {\n\t\t\tne(f.WriteString(\"\\\"\"))\n\t\t\tne(replacer{f}.Write(encs[0].Buffer))\n\t\t\tne(f.WriteString(\"\\\"\"))\n\t\t}\n\t\tf.WriteString(stringEnd)\n\t\tfor n, enc := range encs {\n\t\t\tvar (\n\t\t\t\ttempl string\n\t\t\t\tvars = []interface{}{0, *cvarname, *path + enc.Ext}\n\t\t\t)\n\t\t\tif enc.Ext == \"\" {\n\t\t\t\tvars = vars[1:]\n\t\t\t\tvars[0] = *varname\n\t\t\t\tif n == 0 {\n\t\t\t\t\ttempl = identDecompress\n\t\t\t\t} else {\n\t\t\t\t\ttempl = identCompress\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif n == 0 {\n\t\t\t\t\tvars[0] = len(data)\n\t\t\t\t\ttempl = enc.Decompress\n\t\t\t\t} else {\n\t\t\t\t\tvars[0] = len(enc.Buffer)\n\t\t\t\t\ttempl = enc.Compress\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Fprintf(f, templ, vars...)\n\t\t}\n\t} else {\n\t\tfor _, enc := range encs {\n\t\t\tfilename := *path + enc.Ext\n\t\t\tne(fmt.Fprintf(f, soloStart, *varname, filename))\n\t\t\tif enc.Ext == \"\" {\n\t\t\t\tne(f.WriteString(\"`\"))\n\t\t\t\tne(tickReplacer{f}.Write(enc.Buffer))\n\t\t\t\tne(f.WriteString(\"`\"))\n\t\t\t} else {\n\t\t\t\tne(f.WriteString(\"\\\"\"))\n\t\t\t\tne(replacer{f}.Write(enc.Buffer))\n\t\t\t\tne(f.WriteString(\"\\\"\"))\n\t\t\t}\n\t\t\tne(f.WriteString(soloEnd))\n\t\t}\n\t}\n\tne(fmt.Fprintf(f, packageEnd))\n\te(f.Close())\n}\n\nconst (\n\tpackageStart = `package %s\n\nimport (\n%s)\n\nfunc init() {\n\tdate := time.Unix(%d, 0)\n`\n\tstringStart = `\ts := `\n\tstringEnd = `\n`\n\tpackageEnd = `}\n`\n\tsoloStart = `\t%s.Create(%q, httpdir.FileString(`\n\tsoloEnd = `, date))\n`\n\tidentDecompress = `\tb := []byte(s)\n\t%s.Create(%q, httpdir.FileString(s, date))\n`\n\tidentCompress = `\t%s.Create(%q, httpdir.FileBytes(b, date))\n`\n\n\tbrotliImport = \"\\\"github.com\/google\/brotli\/go\/cbrotli\\\"\"\n\tbrotliDecompress = `\tb := make([]byte, %d)\n\tbr := cbrotli.NewReader(strings.NewReader(s))\n\tbr.Read(b)\n\tbr.Close()\n\t%s.Create(%q, httpdir.FileString(s, date))\n`\n\tbrotliCompress = `\tbrb := make(memio.Buffer, 0, %d)\n\tbr := cbrotli.NewWriter(&brb, cbrotli.WriterOptions{Quality: 11})\n\tbr.Write(b)\n\tbr.Close()\n\t%s.Create(%q, httpdir.FileBytes(brb, date))\n`\n\tflateImport = \"\\\"compress\/flate\\\"\"\n\tflateDecompress = `\tb := make([]byte, %d)\n\tfl := flate.NewReader(strings.NewReader(s))\n\tio.ReadFull(fl, b)\n\tfl.Close()\n\t%s.Create(%q, httpdir.FileString(s, date))\n`\n\tflateCompress = `\tflb := make(memio.Buffer, 0, %d)\n\tfl, _ := flate.NewWriter(&flb, flate.BestCompression)\n\tfl.Write(b)\n\tfl.Close()\n\t%s.Create(%q, httpdir.FileBytes(flb, date))\n`\n\tgzipImport = \"\\\"compress\/gzip\\\"\"\n\tgzipDecompress = `\tb := make([]byte, %d)\n\tgz := gzip.NewReader(strings.NewReader(s))\n\tgz.Read(b)\n\tgz.Close()\n\t%s.Create(%q, httpdir.FileString(s, date))\n`\n\tgzipCompress = `\tgzb := make(memio.Buffer, 0, %d)\n\tgz, _ := gzip.NewWriterLevel(&gzb, gzip.BestCompression)\n\tgz.Write(b)\n\tgz.Close()\n\t%s.Create(%q, httpdir.FileBytes(gzb, date))\n`\n)\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Intel Corporation\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n)\n\n\/\/ Repo represents the repository under test\n\/\/ For more information about this structure take a look to the README\ntype Repo struct {\n\t\/\/ URL is the url of the repository\n\tURL string\n\n\t\/\/ MasterBranch is the master branch of this repository\n\tMasterBranch string\n\n\t\/\/ PR is the pull request number\n\tPR int\n\n\t\/\/ RefreshTime is the time to wait for checking if a pull request needs to be tested\n\tRefreshTime string\n\n\t\/\/ Toke is the repository access token\n\tToken string\n\n\t\/\/ Setup contains the conmmands needed to setup the environment\n\tSetup []string\n\n\t\/\/ Run contains the commands to run the test\n\tRun []string\n\n\t\/\/ Teardown contains the commands to be executed once Run ends\n\tTeardown []string\n\n\t\/\/ OnSuccess contains the commands to be executed if Setup, Run and Teardown finished correctly\n\tOnSuccess []string\n\n\t\/\/ OnFailure contains the commands to be executed if any of Setup, Run or Teardown fail\n\tOnFailure []string\n\n\t\/\/ TTY specify whether a tty must be allocate to run the stages\n\tTTY bool\n\n\t\/\/ PostOnSuccess is the comment to be posted if the test finished correctly\n\tPostOnSuccess string\n\n\t\/\/ PostOnFailure is the comment to be posted if the test fails\n\tPostOnFailure string\n\n\t\/\/ LogDir is the logs directory\n\tLogDir string\n\n\t\/\/ Language is the language of the repository\n\tLanguage RepoLanguage\n\n\t\/\/ CommentTrigger is the comment that must be present to trigger the test\n\tCommentTrigger RepoComment\n\n\t\/\/ LogServer contains the information of the server where the logs must be placed\n\tLogServer LogServer\n\n\t\/\/ Whitelist is the list of users whose pull request can be tested\n\tWhitelist string\n\n\t\/\/ cvr control version repository\n\tcvr CVR\n\n\t\/\/ refresh is RefreshTime once parsed\n\trefresh time.Duration\n\n\t\/\/ env contains the environment variables to be used in each stage\n\tenv []string\n\n\t\/\/ whitelistUsers is the whitelist once parsed\n\twhitelistUsers []string\n\n\t\/\/ logger of the repository\n\tlogger *logrus.Entry\n\n\t\/\/ prConfig is the configuration used to create pull request objects\n\tprConfig pullRequestConfig\n}\n\nconst (\n\tlogDirMode = 0755\n\tlogFileMode = 0664\n\tlogServerUser = \"root\"\n)\n\nvar defaultEnv = []string{\"CI=true\", \"LOCALCI=true\"}\n\nvar runTestsInParallel bool\n\nvar testLock sync.Mutex\n\nfunc (r *Repo) setupCvr() error {\n\tvar err error\n\n\t\/\/ validate url\n\tr.URL = strings.TrimSpace(r.URL)\n\tif len(r.URL) == 0 {\n\t\treturn fmt.Errorf(\"missing repository url\")\n\t}\n\n\t\/\/ set repository logger\n\tr.logger = ciLog.WithFields(logrus.Fields{\n\t\t\"Repo\": r.URL,\n\t})\n\n\t\/\/ get the control version repository\n\tr.cvr, err = newCVR(r.URL, r.Token)\n\tr.logger.Debugf(\"control version repository: %#v\", r.cvr)\n\n\treturn err\n}\n\nfunc (r *Repo) setupLogServer() error {\n\tif reflect.DeepEqual(r.LogServer, LogServer{}) {\n\t\treturn nil\n\t}\n\n\tif len(r.LogServer.IP) == 0 {\n\t\treturn fmt.Errorf(\"missing server ip\")\n\t}\n\n\tif len(r.LogServer.User) == 0 {\n\t\tr.LogServer.User = logServerUser\n\t}\n\n\tif len(r.LogServer.Dir) == 0 {\n\t\tr.LogServer.Dir = defaultLogDir\n\t}\n\n\treturn nil\n}\n\nfunc (r *Repo) setupLogDir() error {\n\t\/\/ create log directory\n\tif err := os.MkdirAll(r.LogDir, logDirMode); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (r *Repo) setupRefreshTime() error {\n\tvar err error\n\n\t\/\/ validate refresh time\n\tr.refresh, err = time.ParseDuration(r.RefreshTime)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to parse refresh time '%s' %s\", r.RefreshTime, err)\n\t}\n\n\treturn nil\n}\n\nfunc (r *Repo) setupCommentTrigger() error {\n\tif reflect.DeepEqual(r.CommentTrigger, RepoComment{}) {\n\t\treturn nil\n\t}\n\n\tif len(r.CommentTrigger.Comment) == 0 {\n\t\treturn fmt.Errorf(\"missing comment trigger\")\n\t}\n\n\treturn nil\n}\n\nfunc (r *Repo) setupLanguage() error {\n\treturn r.Language.setup()\n}\n\nfunc (r *Repo) setupStages() error {\n\tif len(r.Run) == 0 {\n\t\treturn fmt.Errorf(\"missing run commands\")\n\t}\n\n\treturn nil\n}\n\nfunc (r *Repo) setupWhitelist() error {\n\t\/\/ get the list of users\n\tr.whitelistUsers = strings.Split(r.Whitelist, \",\")\n\treturn nil\n}\n\nfunc (r *Repo) setupEnvars() error {\n\t\/\/ add environment variables\n\tr.env = os.Environ()\n\tr.env = append(r.env, defaultEnv...)\n\trepoSlug := fmt.Sprintf(\"LOCALCI_REPO_SLUG=%s\", r.cvr.getRepoSlug())\n\tr.env = append(r.env, repoSlug)\n\n\treturn nil\n}\n\n\/\/ setup the repository. This method MUST BE called before use any other\nfunc (r *Repo) setup() error {\n\tvar err error\n\n\tsetupFuncs := []func() error{\n\t\tr.setupCvr,\n\t\tr.setupRefreshTime,\n\t\tr.setupLogDir,\n\t\tr.setupLogServer,\n\t\tr.setupCommentTrigger,\n\t\tr.setupLanguage,\n\t\tr.setupStages,\n\t\tr.setupWhitelist,\n\t\tr.setupEnvars,\n\t}\n\n\tfor _, setupFunc := range setupFuncs {\n\t\tif err = setupFunc(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tr.prConfig = pullRequestConfig{\n\t\tcvr: r.cvr,\n\t\tlogger: r.logger,\n\t\tcommentTrigger: r.CommentTrigger,\n\t\tpostOnFailure: r.PostOnFailure,\n\t\tpostOnSuccess: r.PostOnSuccess,\n\t}\n\n\tr.logger.Debugf(\"control version repository: %#v\", r.cvr)\n\n\treturn nil\n}\n\n\/\/ loop to monitor the repository\nfunc (r *Repo) loop() {\n\trevisionsTested := make(map[string]revision)\n\n\tr.logger.Debugf(\"monitoring in a loop the repository: %+v\", *r)\n\n\tappendPullRequests := func(revisions *[]revision, prs []int) error {\n\t\tfor _, prNumber := range prs {\n\t\t\tr.logger.Debugf(\"requesting pull request %d\", prNumber)\n\t\t\tpr, err := newPullRequest(prNumber, r.prConfig)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to get pull request '%d' %s\", prNumber, err)\n\t\t\t}\n\t\t\t*revisions = append(*revisions, pr)\n\t\t}\n\t\treturn nil\n\t}\n\n\tfor {\n\t\tvar revisionsToTest []revision\n\n\t\t\/\/ append master branch\n\t\tr.logger.Debugf(\"requesting master branch: %s\", r.MasterBranch)\n\t\tbranch, err := newRepoBranch(r.MasterBranch, r.cvr, r.logger)\n\t\tif err != nil {\n\t\t\tr.logger.Warnf(\"failed to get master branch %s: %s\", r.MasterBranch, err)\n\t\t} else {\n\t\t\trevisionsToTest = append(revisionsToTest, branch)\n\t\t}\n\n\t\t\/\/ append pull requests\n\t\tif r.PR != 0 {\n\t\t\t\/\/ if PR is not 0 then we have to monitor just one PR\n\t\t\tif err = appendPullRequests(&revisionsToTest, []int{r.PR}); err != nil {\n\t\t\t\tr.logger.Warnf(\"failed to append pull request %d\", r.PR, err)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ append open pull request\n\t\t\tr.logger.Debugf(\"requesting open pull requests\")\n\t\t\tprs, err := r.cvr.getOpenPullRequests()\n\t\t\tif err != nil {\n\t\t\t\tr.logger.Warnf(\"failed to get open pull requests: %s\", err)\n\t\t\t} else if err = appendPullRequests(&revisionsToTest, prs); err != nil {\n\t\t\t\tr.logger.Warnf(\"failed to append pull requests %+v: %s\", prs, err)\n\t\t\t}\n\t\t}\n\n\t\tr.logger.Debugf(\"testing revisions: %#v\", revisionsToTest)\n\t\tr.testRevisions(revisionsToTest, &revisionsTested)\n\n\t\tr.logger.Debugf(\"going to sleep: %s\", r.RefreshTime)\n\t\ttime.Sleep(r.refresh)\n\t}\n}\n\nfunc (r *Repo) testRevisions(revisions []revision, revisionsTested *map[string]revision) {\n\t\/\/ remove revisions that are not in the list of open pull request and already tested\n\tfor k, v := range *revisionsTested {\n\t\tfound := false\n\t\t\/\/ iterate over open pull requests and master branch\n\t\tfor _, r := range revisions {\n\t\t\tif r.id() == k {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found && !v.isBeingTested() {\n\t\t\tdelete((*revisionsTested), k)\n\t\t}\n\t}\n\n\tfor _, revision := range revisions {\n\t\ttested, ok := (*revisionsTested)[revision.id()]\n\t\tif ok {\n\t\t\t\/\/ checking if the old version of the PR is being tested\n\t\t\tif tested.isBeingTested() {\n\t\t\t\tr.logger.Debugf(\"revision is being tested: %#v\", tested)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif revision.equal(tested) {\n\t\t\t\tr.logger.Debugf(\"revision was already tested: %#v\", revision)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t\/\/ setup revision\n\t\tlangEnv, err := r.setupRevision(revision)\n\t\tif err != nil {\n\t\t\tr.logger.Errorf(\"failed to setup revision %#v: %s\", revision, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ cleanup revision\n\t\tdefer func() {\n\t\t\terr = langEnv.cleanup()\n\t\t\tif err != nil {\n\t\t\t\tr.logger.Error(err)\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ test revision\n\t\tif runTestsInParallel {\n\t\t\tgo func() {\n\t\t\t\tif err := r.testRevision(revision, langEnv); err != nil {\n\t\t\t\t\tr.logger.Errorf(\"failed to test revision %#v %s\", revision, err)\n\t\t\t\t}\n\t\t\t}()\n\t\t} else {\n\t\t\ttestLock.Lock()\n\t\t\tif err := r.testRevision(revision, langEnv); err != nil {\n\t\t\t\tr.logger.Errorf(\"failed to test revision %#v %s\", revision, err)\n\t\t\t}\n\t\t\ttestLock.Unlock()\n\t\t}\n\n\t\t\/\/ copy the PR that was tested\n\t\t(*revisionsTested)[revision.id()] = revision\n\t}\n}\n\n\/\/ test the pull request specified in the configuration file\n\/\/ if pr does not exist an error is returned\nfunc (r *Repo) test() error {\n\tif r.PR == 0 {\n\t\treturn fmt.Errorf(\"Missing pull request number in configuration file\")\n\t}\n\n\trev, err := newPullRequest(r.PR, r.prConfig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get pull request %d %s\", r.PR, err)\n\t}\n\n\t\/\/ run tests in parallel does not make sense when\n\t\/\/ we are just testing one pull request\n\trunTestsInParallel = false\n\n\t\/\/ setup revision\n\tlangEnv, err := r.setupRevision(rev)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to setup revision %#v: %s\", rev, err)\n\t}\n\n\t\/\/ cleanup revision\n\tdefer func() {\n\t\terr = langEnv.cleanup()\n\t\tif err != nil {\n\t\t\tr.logger.Error(err)\n\t\t}\n\t}()\n\n\t\/\/ test revision\n\treturn r.testRevision(rev, langEnv)\n}\n\n\/\/ setupRevision generates a language environment, downloads the revision\n\/\/ and creates the logs directory\nfunc (r *Repo) setupRevision(rev revision) (languageConfig, error) {\n\t\/\/ generate a new environment to run the stages\n\tlangEnv, err := r.Language.generateEnvironment(r.cvr.getProjectSlug())\n\tif err != nil {\n\t\treturn languageConfig{}, err\n\t}\n\n\t\/\/ download the revision\n\tif err = rev.download(langEnv.workingDir); err != nil {\n\t\treturn languageConfig{}, err\n\t}\n\n\t\/\/ cleanup and set the log directory of the pull request\n\tlogDir := filepath.Join(r.LogDir, rev.logDirName())\n\t_ = os.RemoveAll(logDir)\n\tif err = os.MkdirAll(logDir, logDirMode); err != nil {\n\t\treturn languageConfig{}, err\n\t}\n\n\treturn langEnv, nil\n}\n\n\/\/ testRevision tests a specific revision\n\/\/ returns an error if the test fail\nfunc (r *Repo) testRevision(rev revision, langEnv languageConfig) error {\n\tconfig := stageConfig{\n\t\tlogger: r.logger,\n\t\tworkingDir: langEnv.workingDir,\n\t\ttty: r.TTY,\n\t\tlogDir: filepath.Join(r.LogDir, rev.logDirName()),\n\t}\n\n\t\/\/ set environment variables\n\tconfig.env = r.env\n\n\t\/\/ appends language environment variables\n\tif len(langEnv.env) > 0 {\n\t\tconfig.env = append(config.env, langEnv.env...)\n\t}\n\n\t\/\/ copy logs to server if we have an IP address\n\tif len(r.LogServer.IP) != 0 {\n\t\tdefer func() {\n\t\t\tif err := r.LogServer.copy(config.logDir); err != nil {\n\t\t\t\tr.logger.Errorf(\"failed to copy log dir %s to server %+v\", config.logDir, r.LogServer)\n\t\t\t}\n\t\t}()\n\t}\n\n\tr.logger.Debugf(\"stage config: %+v\", config)\n\n\tstages := map[string]stage{\n\t\t\"setup\": stage{name: \"setup\", commands: r.Setup},\n\t\t\"run\": stage{name: \"run\", commands: r.Run},\n\t\t\"teardown\": stage{name: \"teardown\", commands: r.Teardown},\n\t\t\"onSuccess\": stage{name: \"onSuccess\", commands: r.OnSuccess},\n\t\t\"onFailure\": stage{name: \"onFailure\", commands: r.OnFailure},\n\t}\n\n\t\/\/ run test\n\treturn rev.test(config, stages)\n}\n<commit_msg>localCI: honour whitelist<commit_after>\/\/ Copyright (c) 2017 Intel Corporation\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n)\n\n\/\/ Repo represents the repository under test\n\/\/ For more information about this structure take a look to the README\ntype Repo struct {\n\t\/\/ URL is the url of the repository\n\tURL string\n\n\t\/\/ MasterBranch is the master branch of this repository\n\tMasterBranch string\n\n\t\/\/ PR is the pull request number\n\tPR int\n\n\t\/\/ RefreshTime is the time to wait for checking if a pull request needs to be tested\n\tRefreshTime string\n\n\t\/\/ Toke is the repository access token\n\tToken string\n\n\t\/\/ Setup contains the conmmands needed to setup the environment\n\tSetup []string\n\n\t\/\/ Run contains the commands to run the test\n\tRun []string\n\n\t\/\/ Teardown contains the commands to be executed once Run ends\n\tTeardown []string\n\n\t\/\/ OnSuccess contains the commands to be executed if Setup, Run and Teardown finished correctly\n\tOnSuccess []string\n\n\t\/\/ OnFailure contains the commands to be executed if any of Setup, Run or Teardown fail\n\tOnFailure []string\n\n\t\/\/ TTY specify whether a tty must be allocate to run the stages\n\tTTY bool\n\n\t\/\/ PostOnSuccess is the comment to be posted if the test finished correctly\n\tPostOnSuccess string\n\n\t\/\/ PostOnFailure is the comment to be posted if the test fails\n\tPostOnFailure string\n\n\t\/\/ LogDir is the logs directory\n\tLogDir string\n\n\t\/\/ Language is the language of the repository\n\tLanguage RepoLanguage\n\n\t\/\/ CommentTrigger is the comment that must be present to trigger the test\n\tCommentTrigger RepoComment\n\n\t\/\/ LogServer contains the information of the server where the logs must be placed\n\tLogServer LogServer\n\n\t\/\/ Whitelist is the list of users whose pull request can be tested\n\tWhitelist string\n\n\t\/\/ cvr control version repository\n\tcvr CVR\n\n\t\/\/ refresh is RefreshTime once parsed\n\trefresh time.Duration\n\n\t\/\/ env contains the environment variables to be used in each stage\n\tenv []string\n\n\t\/\/ whitelistUsers is the whitelist once parsed\n\twhitelistUsers []string\n\n\t\/\/ logger of the repository\n\tlogger *logrus.Entry\n\n\t\/\/ prConfig is the configuration used to create pull request objects\n\tprConfig pullRequestConfig\n}\n\nconst (\n\tlogDirMode = 0755\n\tlogFileMode = 0664\n\tlogServerUser = \"root\"\n)\n\nvar defaultEnv = []string{\"CI=true\", \"LOCALCI=true\"}\n\nvar runTestsInParallel bool\n\nvar testLock sync.Mutex\n\nfunc (r *Repo) setupCvr() error {\n\tvar err error\n\n\t\/\/ validate url\n\tr.URL = strings.TrimSpace(r.URL)\n\tif len(r.URL) == 0 {\n\t\treturn fmt.Errorf(\"missing repository url\")\n\t}\n\n\t\/\/ set repository logger\n\tr.logger = ciLog.WithFields(logrus.Fields{\n\t\t\"Repo\": r.URL,\n\t})\n\n\t\/\/ get the control version repository\n\tr.cvr, err = newCVR(r.URL, r.Token)\n\tr.logger.Debugf(\"control version repository: %#v\", r.cvr)\n\n\treturn err\n}\n\nfunc (r *Repo) setupLogServer() error {\n\tif reflect.DeepEqual(r.LogServer, LogServer{}) {\n\t\treturn nil\n\t}\n\n\tif len(r.LogServer.IP) == 0 {\n\t\treturn fmt.Errorf(\"missing server ip\")\n\t}\n\n\tif len(r.LogServer.User) == 0 {\n\t\tr.LogServer.User = logServerUser\n\t}\n\n\tif len(r.LogServer.Dir) == 0 {\n\t\tr.LogServer.Dir = defaultLogDir\n\t}\n\n\treturn nil\n}\n\nfunc (r *Repo) setupLogDir() error {\n\t\/\/ create log directory\n\tif err := os.MkdirAll(r.LogDir, logDirMode); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (r *Repo) setupRefreshTime() error {\n\tvar err error\n\n\t\/\/ validate refresh time\n\tr.refresh, err = time.ParseDuration(r.RefreshTime)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to parse refresh time '%s' %s\", r.RefreshTime, err)\n\t}\n\n\treturn nil\n}\n\nfunc (r *Repo) setupCommentTrigger() error {\n\tif reflect.DeepEqual(r.CommentTrigger, RepoComment{}) {\n\t\treturn nil\n\t}\n\n\tif len(r.CommentTrigger.Comment) == 0 {\n\t\treturn fmt.Errorf(\"missing comment trigger\")\n\t}\n\n\treturn nil\n}\n\nfunc (r *Repo) setupLanguage() error {\n\treturn r.Language.setup()\n}\n\nfunc (r *Repo) setupStages() error {\n\tif len(r.Run) == 0 {\n\t\treturn fmt.Errorf(\"missing run commands\")\n\t}\n\n\treturn nil\n}\n\nfunc (r *Repo) setupWhitelist() error {\n\t\/\/ get the list of users\n\tr.whitelistUsers = strings.Split(r.Whitelist, \",\")\n\treturn nil\n}\n\nfunc (r *Repo) setupEnvars() error {\n\t\/\/ add environment variables\n\tr.env = os.Environ()\n\tr.env = append(r.env, defaultEnv...)\n\trepoSlug := fmt.Sprintf(\"LOCALCI_REPO_SLUG=%s\", r.cvr.getRepoSlug())\n\tr.env = append(r.env, repoSlug)\n\n\treturn nil\n}\n\n\/\/ setup the repository. This method MUST BE called before use any other\nfunc (r *Repo) setup() error {\n\tvar err error\n\n\tsetupFuncs := []func() error{\n\t\tr.setupCvr,\n\t\tr.setupRefreshTime,\n\t\tr.setupLogDir,\n\t\tr.setupLogServer,\n\t\tr.setupCommentTrigger,\n\t\tr.setupLanguage,\n\t\tr.setupStages,\n\t\tr.setupWhitelist,\n\t\tr.setupEnvars,\n\t}\n\n\tfor _, setupFunc := range setupFuncs {\n\t\tif err = setupFunc(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tr.prConfig = pullRequestConfig{\n\t\tcvr: r.cvr,\n\t\tlogger: r.logger,\n\t\tcommentTrigger: r.CommentTrigger,\n\t\tpostOnFailure: r.PostOnFailure,\n\t\tpostOnSuccess: r.PostOnSuccess,\n\t\twhitelist: r.Whitelist,\n\t}\n\n\tr.logger.Debugf(\"control version repository: %#v\", r.cvr)\n\n\treturn nil\n}\n\n\/\/ loop to monitor the repository\nfunc (r *Repo) loop() {\n\trevisionsTested := make(map[string]revision)\n\n\tr.logger.Debugf(\"monitoring in a loop the repository: %+v\", *r)\n\n\tappendPullRequests := func(revisions *[]revision, prs []int) error {\n\t\tfor _, prNumber := range prs {\n\t\t\tr.logger.Debugf(\"requesting pull request %d\", prNumber)\n\t\t\tpr, err := newPullRequest(prNumber, r.prConfig)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to get pull request '%d' %s\", prNumber, err)\n\t\t\t}\n\t\t\t*revisions = append(*revisions, pr)\n\t\t}\n\t\treturn nil\n\t}\n\n\tfor {\n\t\tvar revisionsToTest []revision\n\n\t\t\/\/ append master branch\n\t\tr.logger.Debugf(\"requesting master branch: %s\", r.MasterBranch)\n\t\tbranch, err := newRepoBranch(r.MasterBranch, r.cvr, r.logger)\n\t\tif err != nil {\n\t\t\tr.logger.Warnf(\"failed to get master branch %s: %s\", r.MasterBranch, err)\n\t\t} else {\n\t\t\trevisionsToTest = append(revisionsToTest, branch)\n\t\t}\n\n\t\t\/\/ append pull requests\n\t\tif r.PR != 0 {\n\t\t\t\/\/ if PR is not 0 then we have to monitor just one PR\n\t\t\tif err = appendPullRequests(&revisionsToTest, []int{r.PR}); err != nil {\n\t\t\t\tr.logger.Warnf(\"failed to append pull request %d\", r.PR, err)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ append open pull request\n\t\t\tr.logger.Debugf(\"requesting open pull requests\")\n\t\t\tprs, err := r.cvr.getOpenPullRequests()\n\t\t\tif err != nil {\n\t\t\t\tr.logger.Warnf(\"failed to get open pull requests: %s\", err)\n\t\t\t} else if err = appendPullRequests(&revisionsToTest, prs); err != nil {\n\t\t\t\tr.logger.Warnf(\"failed to append pull requests %+v: %s\", prs, err)\n\t\t\t}\n\t\t}\n\n\t\tr.logger.Debugf(\"testing revisions: %#v\", revisionsToTest)\n\t\tr.testRevisions(revisionsToTest, &revisionsTested)\n\n\t\tr.logger.Debugf(\"going to sleep: %s\", r.RefreshTime)\n\t\ttime.Sleep(r.refresh)\n\t}\n}\n\nfunc (r *Repo) testRevisions(revisions []revision, revisionsTested *map[string]revision) {\n\t\/\/ remove revisions that are not in the list of open pull request and already tested\n\tfor k, v := range *revisionsTested {\n\t\tfound := false\n\t\t\/\/ iterate over open pull requests and master branch\n\t\tfor _, r := range revisions {\n\t\t\tif r.id() == k {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found && !v.isBeingTested() {\n\t\t\tdelete((*revisionsTested), k)\n\t\t}\n\t}\n\n\tfor _, revision := range revisions {\n\t\ttested, ok := (*revisionsTested)[revision.id()]\n\t\tif ok {\n\t\t\t\/\/ checking if the old version of the PR is being tested\n\t\t\tif tested.isBeingTested() {\n\t\t\t\tr.logger.Debugf(\"revision is being tested: %#v\", tested)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif revision.equal(tested) {\n\t\t\t\tr.logger.Debugf(\"revision was already tested: %#v\", revision)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t\/\/ check if the revision can be tested\n\t\tif err := revision.canBeTested(); err != nil {\n\t\t\tr.logger.Debugf(\"revision %s cannot be tested: %s\", revision.id(), err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ setup revision\n\t\tlangEnv, err := r.setupRevision(revision)\n\t\tif err != nil {\n\t\t\tr.logger.Errorf(\"failed to setup revision %#v: %s\", revision, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ cleanup revision\n\t\tdefer func() {\n\t\t\terr = langEnv.cleanup()\n\t\t\tif err != nil {\n\t\t\t\tr.logger.Error(err)\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ test revision\n\t\tif runTestsInParallel {\n\t\t\tgo func() {\n\t\t\t\tif err := r.testRevision(revision, langEnv); err != nil {\n\t\t\t\t\tr.logger.Errorf(\"failed to test revision %#v %s\", revision, err)\n\t\t\t\t}\n\t\t\t}()\n\t\t} else {\n\t\t\ttestLock.Lock()\n\t\t\tif err := r.testRevision(revision, langEnv); err != nil {\n\t\t\t\tr.logger.Errorf(\"failed to test revision %#v %s\", revision, err)\n\t\t\t}\n\t\t\ttestLock.Unlock()\n\t\t}\n\n\t\t\/\/ copy the PR that was tested\n\t\t(*revisionsTested)[revision.id()] = revision\n\t}\n}\n\n\/\/ test the pull request specified in the configuration file\n\/\/ if pr does not exist an error is returned\nfunc (r *Repo) test() error {\n\tif r.PR == 0 {\n\t\treturn fmt.Errorf(\"Missing pull request number in configuration file\")\n\t}\n\n\trev, err := newPullRequest(r.PR, r.prConfig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get pull request %d %s\", r.PR, err)\n\t}\n\n\t\/\/ run tests in parallel does not make sense when\n\t\/\/ we are just testing one pull request\n\trunTestsInParallel = false\n\n\t\/\/ setup revision\n\tlangEnv, err := r.setupRevision(rev)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to setup revision %#v: %s\", rev, err)\n\t}\n\n\t\/\/ cleanup revision\n\tdefer func() {\n\t\terr = langEnv.cleanup()\n\t\tif err != nil {\n\t\t\tr.logger.Error(err)\n\t\t}\n\t}()\n\n\t\/\/ test revision\n\treturn r.testRevision(rev, langEnv)\n}\n\n\/\/ setupRevision generates a language environment, downloads the revision\n\/\/ and creates the logs directory\nfunc (r *Repo) setupRevision(rev revision) (languageConfig, error) {\n\t\/\/ generate a new environment to run the stages\n\tlangEnv, err := r.Language.generateEnvironment(r.cvr.getProjectSlug())\n\tif err != nil {\n\t\treturn languageConfig{}, err\n\t}\n\n\t\/\/ download the revision\n\tif err = rev.download(langEnv.workingDir); err != nil {\n\t\treturn languageConfig{}, err\n\t}\n\n\t\/\/ cleanup and set the log directory of the pull request\n\tlogDir := filepath.Join(r.LogDir, rev.logDirName())\n\t_ = os.RemoveAll(logDir)\n\tif err = os.MkdirAll(logDir, logDirMode); err != nil {\n\t\treturn languageConfig{}, err\n\t}\n\n\treturn langEnv, nil\n}\n\n\/\/ testRevision tests a specific revision\n\/\/ returns an error if the test fail\nfunc (r *Repo) testRevision(rev revision, langEnv languageConfig) error {\n\tconfig := stageConfig{\n\t\tlogger: r.logger,\n\t\tworkingDir: langEnv.workingDir,\n\t\ttty: r.TTY,\n\t\tlogDir: filepath.Join(r.LogDir, rev.logDirName()),\n\t}\n\n\t\/\/ set environment variables\n\tconfig.env = r.env\n\n\t\/\/ appends language environment variables\n\tif len(langEnv.env) > 0 {\n\t\tconfig.env = append(config.env, langEnv.env...)\n\t}\n\n\t\/\/ copy logs to server if we have an IP address\n\tif len(r.LogServer.IP) != 0 {\n\t\tdefer func() {\n\t\t\tif err := r.LogServer.copy(config.logDir); err != nil {\n\t\t\t\tr.logger.Errorf(\"failed to copy log dir %s to server %+v\", config.logDir, r.LogServer)\n\t\t\t}\n\t\t}()\n\t}\n\n\tr.logger.Debugf(\"stage config: %+v\", config)\n\n\tstages := map[string]stage{\n\t\t\"setup\": stage{name: \"setup\", commands: r.Setup},\n\t\t\"run\": stage{name: \"run\", commands: r.Run},\n\t\t\"teardown\": stage{name: \"teardown\", commands: r.Teardown},\n\t\t\"onSuccess\": stage{name: \"onSuccess\", commands: r.OnSuccess},\n\t\t\"onFailure\": stage{name: \"onFailure\", commands: r.OnFailure},\n\t}\n\n\t\/\/ run test\n\treturn rev.test(config, stages)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/Symantec\/Dominator\/lib\/awsutil\"\n\tlibjson \"github.com\/Symantec\/Dominator\/lib\/json\"\n\t\"github.com\/Symantec\/Dominator\/lib\/log\"\n\t\"github.com\/Symantec\/Dominator\/lib\/mdb\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n)\n\ntype awsGeneratorType struct {\n\ttargets awsutil.TargetList\n\tfilterTagsFile string\n}\n\ntype resultType struct {\n\tmdb *mdb.Mdb\n\terr error\n}\n\ntype tagFilterType struct {\n\tKey string\n\tValues []string\n}\n\nfunc newAwsGenerator(args []string) (generator, error) {\n\treturn &awsGeneratorType{\n\t\t\ttargets: awsutil.TargetList{awsutil.Target{args[1], args[0]}}},\n\t\tnil\n}\n\nfunc newAwsFilteredGenerator(args []string) (generator, error) {\n\tgen := awsGeneratorType{\n\t\tfilterTagsFile: args[1],\n\t}\n\tif err := gen.targets.Set(args[0]); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &gen, nil\n}\n\nfunc newAwsLocalGenerator(args []string) (generator, error) {\n\tregion, err := awsutil.GetLocalRegion()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &awsGeneratorType{\n\t\t\ttargets: awsutil.TargetList{awsutil.Target{\"\", region}}},\n\t\tnil\n}\n\nfunc (g *awsGeneratorType) Generate(unused_datacentre string,\n\tlogger log.Logger) (*mdb.Mdb, error) {\n\tresultsChannel := make(chan resultType, 1)\n\tnumTargets, err := awsutil.ForEachTarget(g.targets, nil,\n\t\tfunc(awsService *ec2.EC2, account, region string, logger log.Logger) {\n\t\t\tvar result resultType\n\t\t\tresult.mdb, result.err = g.generateForTarget(awsService, logger)\n\t\t\tresultsChannel <- result\n\t\t},\n\t\tlogger)\n\t\/\/ Collect results.\n\tvar newMdb mdb.Mdb\n\thostnames := make(map[string]struct{})\n\tfor i := 0; i < numTargets; i++ {\n\t\tresult := <-resultsChannel\n\t\tif result.err != nil {\n\t\t\tif err == nil {\n\t\t\t\terr = result.err\n\t\t\t\tlogger.Println(err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tfor _, machine := range result.mdb.Machines {\n\t\t\tif _, ok := hostnames[machine.Hostname]; ok {\n\t\t\t\ttxt := \"duplicate hostname: \" + machine.Hostname\n\t\t\t\tlogger.Println(txt)\n\t\t\t\tif err == nil {\n\t\t\t\t\terr = errors.New(txt)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnewMdb.Machines = append(newMdb.Machines, machine)\n\t\t}\n\t}\n\treturn &newMdb, err\n}\n\nfunc (g *awsGeneratorType) generateForTarget(svc *ec2.EC2, logger log.Logger) (\n\t*mdb.Mdb, error) {\n\tfilters, err := g.makeFilters()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := svc.DescribeInstances(&ec2.DescribeInstancesInput{\n\t\tFilters: filters,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn extractMdb(resp), nil\n}\n\nfunc (g *awsGeneratorType) makeFilters() ([]*ec2.Filter, error) {\n\tfilters := make([]*ec2.Filter, 1, 1)\n\tfilters[0] = &ec2.Filter{\n\t\tName: aws.String(\"instance-state-name\"),\n\t\tValues: []*string{aws.String(ec2.InstanceStateNameRunning)},\n\t}\n\tif g.filterTagsFile == \"\" {\n\t\treturn filters, nil\n\t}\n\tvar tags []tagFilterType\n\tif err := libjson.ReadFromFile(g.filterTagsFile, &tags); err != nil {\n\t\treturn nil, fmt.Errorf(\"error loading tags file: %s\", err)\n\t}\n\tfor _, tag := range tags {\n\t\tfilters = append(filters, &ec2.Filter{\n\t\t\tName: aws.String(\"tag:\" + tag.Key),\n\t\t\tValues: aws.StringSlice(tag.Values),\n\t\t})\n\t}\n\treturn filters, nil\n}\n\nfunc extractMdb(output *ec2.DescribeInstancesOutput) *mdb.Mdb {\n\tvar result mdb.Mdb\n\tfor _, reservation := range output.Reservations {\n\t\tfor _, instance := range reservation.Instances {\n\t\t\tif instance.PrivateDnsName != nil {\n\t\t\t\tmachine := mdb.Machine{\n\t\t\t\t\tHostname: *instance.PrivateDnsName,\n\t\t\t\t\tAwsMetadata: &mdb.AwsMetadata{\n\t\t\t\t\t\tInstanceId: *instance.InstanceId,\n\t\t\t\t\t\tTags: make(map[string]string),\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tif instance.PrivateIpAddress != nil {\n\t\t\t\t\tmachine.IpAddress = *instance.PrivateIpAddress\n\t\t\t\t}\n\t\t\t\textractTags(instance.Tags, &machine)\n\t\t\t\tresult.Machines = append(result.Machines, machine)\n\t\t\t}\n\t\t}\n\t}\n\treturn &result\n}\n\nfunc extractTags(tags []*ec2.Tag, machine *mdb.Machine) {\n\tfor _, tag := range tags {\n\t\tif tag.Key == nil || tag.Value == nil {\n\t\t\tcontinue\n\t\t}\n\t\tmachine.AwsMetadata.Tags[*tag.Key] = *tag.Value\n\t\tswitch *tag.Key {\n\t\tcase \"RequiredImage\":\n\t\t\tmachine.RequiredImage = *tag.Value\n\t\tcase \"PlannedImage\":\n\t\t\tmachine.PlannedImage = *tag.Value\n\t\tcase \"DisableUpdates\":\n\t\t\tmachine.DisableUpdates = true\n\t\tcase \"OwnerGroup\":\n\t\t\tmachine.OwnerGroup = *tag.Value\n\t\t}\n\t}\n}\n<commit_msg>Populate lib\/mdb.AwsMetadata.{AcocuntId,AccountName,Region} fields in mdbd.<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/Symantec\/Dominator\/lib\/awsutil\"\n\tlibjson \"github.com\/Symantec\/Dominator\/lib\/json\"\n\t\"github.com\/Symantec\/Dominator\/lib\/log\"\n\t\"github.com\/Symantec\/Dominator\/lib\/mdb\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n)\n\ntype awsGeneratorType struct {\n\ttargets awsutil.TargetList\n\tfilterTagsFile string\n}\n\ntype resultType struct {\n\tmdb *mdb.Mdb\n\terr error\n}\n\ntype tagFilterType struct {\n\tKey string\n\tValues []string\n}\n\nfunc newAwsGenerator(args []string) (generator, error) {\n\treturn &awsGeneratorType{\n\t\t\ttargets: awsutil.TargetList{awsutil.Target{args[1], args[0]}}},\n\t\tnil\n}\n\nfunc newAwsFilteredGenerator(args []string) (generator, error) {\n\tgen := awsGeneratorType{\n\t\tfilterTagsFile: args[1],\n\t}\n\tif err := gen.targets.Set(args[0]); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &gen, nil\n}\n\nfunc newAwsLocalGenerator(args []string) (generator, error) {\n\tregion, err := awsutil.GetLocalRegion()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &awsGeneratorType{\n\t\t\ttargets: awsutil.TargetList{awsutil.Target{\"\", region}}},\n\t\tnil\n}\n\nfunc (g *awsGeneratorType) Generate(unused_datacentre string,\n\tlogger log.Logger) (*mdb.Mdb, error) {\n\tresultsChannel := make(chan resultType, 1)\n\tnumTargets, err := awsutil.ForEachTarget(g.targets, nil,\n\t\tfunc(awsService *ec2.EC2, account, region string, logger log.Logger) {\n\t\t\tvar result resultType\n\t\t\tresult.mdb, result.err = g.generateForTarget(awsService, account,\n\t\t\t\tregion, logger)\n\t\t\tresultsChannel <- result\n\t\t},\n\t\tlogger)\n\t\/\/ Collect results.\n\tvar newMdb mdb.Mdb\n\thostnames := make(map[string]struct{})\n\tfor i := 0; i < numTargets; i++ {\n\t\tresult := <-resultsChannel\n\t\tif result.err != nil {\n\t\t\tif err == nil {\n\t\t\t\terr = result.err\n\t\t\t\tlogger.Println(err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tfor _, machine := range result.mdb.Machines {\n\t\t\tif _, ok := hostnames[machine.Hostname]; ok {\n\t\t\t\ttxt := \"duplicate hostname: \" + machine.Hostname\n\t\t\t\tlogger.Println(txt)\n\t\t\t\tif err == nil {\n\t\t\t\t\terr = errors.New(txt)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnewMdb.Machines = append(newMdb.Machines, machine)\n\t\t}\n\t}\n\treturn &newMdb, err\n}\n\nfunc (g *awsGeneratorType) generateForTarget(svc *ec2.EC2, accountName string,\n\tregion string, logger log.Logger) (\n\t*mdb.Mdb, error) {\n\tfilters, err := g.makeFilters()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := svc.DescribeInstances(&ec2.DescribeInstancesInput{\n\t\tFilters: filters,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn extractMdb(resp, accountName, region), nil\n}\n\nfunc (g *awsGeneratorType) makeFilters() ([]*ec2.Filter, error) {\n\tfilters := make([]*ec2.Filter, 1, 1)\n\tfilters[0] = &ec2.Filter{\n\t\tName: aws.String(\"instance-state-name\"),\n\t\tValues: []*string{aws.String(ec2.InstanceStateNameRunning)},\n\t}\n\tif g.filterTagsFile == \"\" {\n\t\treturn filters, nil\n\t}\n\tvar tags []tagFilterType\n\tif err := libjson.ReadFromFile(g.filterTagsFile, &tags); err != nil {\n\t\treturn nil, fmt.Errorf(\"error loading tags file: %s\", err)\n\t}\n\tfor _, tag := range tags {\n\t\tfilters = append(filters, &ec2.Filter{\n\t\t\tName: aws.String(\"tag:\" + tag.Key),\n\t\t\tValues: aws.StringSlice(tag.Values),\n\t\t})\n\t}\n\treturn filters, nil\n}\n\nfunc extractMdb(output *ec2.DescribeInstancesOutput, accountName string,\n\tregion string) *mdb.Mdb {\n\tvar result mdb.Mdb\n\tfor _, reservation := range output.Reservations {\n\t\taccountId := aws.StringValue(reservation.OwnerId)\n\t\tfor _, instance := range reservation.Instances {\n\t\t\tif instance.PrivateDnsName != nil {\n\t\t\t\tmachine := mdb.Machine{\n\t\t\t\t\tHostname: *instance.PrivateDnsName,\n\t\t\t\t\tAwsMetadata: &mdb.AwsMetadata{\n\t\t\t\t\t\tAccountId: accountId,\n\t\t\t\t\t\tAccountName: accountName,\n\t\t\t\t\t\tInstanceId: *instance.InstanceId,\n\t\t\t\t\t\tRegion: region,\n\t\t\t\t\t\tTags: make(map[string]string),\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tif instance.PrivateIpAddress != nil {\n\t\t\t\t\tmachine.IpAddress = *instance.PrivateIpAddress\n\t\t\t\t}\n\t\t\t\textractTags(instance.Tags, &machine)\n\t\t\t\tresult.Machines = append(result.Machines, machine)\n\t\t\t}\n\t\t}\n\t}\n\treturn &result\n}\n\nfunc extractTags(tags []*ec2.Tag, machine *mdb.Machine) {\n\tfor _, tag := range tags {\n\t\tif tag.Key == nil || tag.Value == nil {\n\t\t\tcontinue\n\t\t}\n\t\tmachine.AwsMetadata.Tags[*tag.Key] = *tag.Value\n\t\tswitch *tag.Key {\n\t\tcase \"RequiredImage\":\n\t\t\tmachine.RequiredImage = *tag.Value\n\t\tcase \"PlannedImage\":\n\t\t\tmachine.PlannedImage = *tag.Value\n\t\tcase \"DisableUpdates\":\n\t\t\tmachine.DisableUpdates = true\n\t\tcase \"OwnerGroup\":\n\t\t\tmachine.OwnerGroup = *tag.Value\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016, Ben Morgan. All rights reserved.\n\/\/ Use of this source code is governed by an MIT license\n\/\/ that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"sort\"\n\n\t\"github.com\/goulash\/pacman\"\n\t\"github.com\/goulash\/pacman\/meta\"\n\t\"github.com\/goulash\/pr\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\t\/\/ Versioned causes packages to be printed with version information.\n\tlistVersioned bool\n\t\/\/ Mode can be either \"count\", \"filter\", or \"mark\" (which is the default\n\t\/\/ if no match is found.\n\tlistMode string\n\t\/\/ Pending marks packages that need to be added to the database,\n\t\/\/ as well as packages that are in the database but are not available.\n\tlistPending bool\n\t\/\/ Duplicates marks the number of obsolete packages for each package.\n\tlistDuplicates bool\n\t\/\/ Installed marks whether packages are locally installed or not.\n\tlistInstalled bool\n\t\/\/ Synchronize marks which packages have newer versions on AUR.\n\tlistSynchronize bool\n\t\/\/ Same as all of the above.\n\tlistAllOptions bool\n\n\tsearchPOSIX bool\n)\n\nfunc init() {\n\tMainCmd.AddCommand(listCmd)\n\n\tlistCmd.Flags().BoolVarP(&listVersioned, \"versioned\", \"v\", false, \"show package versions along with name\")\n\tlistCmd.Flags().BoolVarP(&listPending, \"pending\", \"p\", false, \"mark pending changes to the database\")\n\tlistCmd.Flags().BoolVarP(&listDuplicates, \"duplicates\", \"d\", false, \"mark packages with duplicate package files\")\n\tlistCmd.Flags().BoolVarP(&listInstalled, \"installed\", \"l\", false, \"mark packages that are locally installed\")\n\tlistCmd.Flags().BoolVarP(&listSynchronize, \"outdated\", \"o\", false, \"mark packages that are newer in AUR\")\n\tlistCmd.Flags().BoolVarP(&listAllOptions, \"all\", \"a\", false, \"all information; same as -vpdlo\")\n\tlistCmd.Flags().BoolVar(&searchPOSIX, \"posix\", false, \"use POSIX-style regular expressions\")\n}\n\nvar listCmd = &cobra.Command{\n\tUse: \"list [regex]\",\n\tAliases: []string{\"ls\"},\n\tShort: \"list packages that belong to the managed repository\",\n\tLong: `List packages that belong to the managed repository.\n\n All packages that are in the managed repository are listed,\n whether or not they are registered with the database.\n If you want to filter packages, see the filter command.\n \n When marking entries, the following symbols are used:\n\n -package- package will be deleted\n package <?> no AUR information could be found\n package <!> local package is out-of-date\n package <*> local package is newer than AUR package\n package (n) there are n extra versions of package\n\n When versions are shown, local version is adjacent to package name:\n\n package 1.0 -> 2.0 local package is out-of-date\n package 2.0 <- 1.0 local package is newer than AUR package\n\n If a valid regular expression is supplied, only packages that match\n the expression will be listed.`,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tif len(args) > 1 {\n\t\t\treturn &UsageError{\"list\", \"list command takes at most one argument\", cmd.Usage}\n\t\t}\n\n\t\tif listAllOptions {\n\t\t\tlistVersioned = true\n\t\t\tlistPending = true\n\t\t\tlistDuplicates = true\n\t\t\tlistInstalled = true\n\t\t\tlistSynchronize = true\n\t\t}\n\n\t\tvar regex *regexp.Regexp\n\t\tif len(args) == 1 {\n\t\t\tvar err error\n\t\t\tif searchPOSIX {\n\t\t\t\tregex, err = regexp.Compile(args[0])\n\t\t\t} else {\n\t\t\t\tregex, err = regexp.CompilePOSIX(args[0])\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tpkgs, err := Repo.ListMeta(nil, listSynchronize, func(mp pacman.AnyPackage) string {\n\t\t\tp := mp.(*meta.Package)\n\t\t\tif regex != nil && !regex.MatchString(p.PkgName()) {\n\t\t\t\treturn \"\"\n\t\t\t}\n\n\t\t\tif listPending && !p.HasFiles() {\n\t\t\t\treturn fmt.Sprintf(\"-%s-\", p.Name)\n\t\t\t}\n\n\t\t\tbuf := bytes.NewBufferString(p.Name)\n\t\t\tif listPending && p.HasUpdate() {\n\t\t\t\tbuf.WriteRune('*')\n\t\t\t}\n\t\t\tif listVersioned {\n\t\t\t\tbuf.WriteRune(' ')\n\t\t\t\tbuf.WriteString(p.Version())\n\t\t\t}\n\t\t\tif listSynchronize {\n\t\t\t\tap := p.AUR\n\t\t\t\tif ap == nil {\n\t\t\t\t\tbuf.WriteString(\" <?>\") \/\/ no aur info\n\t\t\t\t} else if pacman.PkgNewer(ap, p) {\n\t\t\t\t\tif listVersioned {\n\t\t\t\t\t\tbuf.WriteString(\" -> \") \/\/ new version\n\t\t\t\t\t\tbuf.WriteString(ap.Version)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbuf.WriteString(\" <!>\") \/\/ local version older than aur\n\t\t\t\t\t}\n\t\t\t\t} else if pacman.PkgOlder(ap, p) {\n\t\t\t\t\tif listVersioned {\n\t\t\t\t\t\tbuf.WriteString(\" <- \") \/\/ old version\n\t\t\t\t\t\tbuf.WriteString(ap.Version)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbuf.WriteString(\" <*>\") \/\/ local version newer than aur\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif listDuplicates && len(p.Files)-1 > 0 {\n\t\t\t\tbuf.WriteString(fmt.Sprintf(\" (%v)\", len(p.Files)-1))\n\t\t\t}\n\n\t\t\treturn buf.String()\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Print packages to stdout\n\t\tprintSet(pkgs, \"\", Conf.Columnate)\n\t\treturn nil\n\t},\n}\n\n\/\/ printSet prints a set of items and optionally a header.\nfunc printSet(list []string, h string, cols bool) {\n\tsort.Strings(list)\n\tif h != \"\" {\n\t\tfmt.Printf(\"\\n%s\\n\", h)\n\t}\n\tif cols {\n\t\tpr.PrintFlex(list)\n\t} else if h != \"\" {\n\t\tfor _, j := range list {\n\t\t\tfmt.Println(\" \", j)\n\t\t}\n\t} else {\n\t\tfor _, j := range list {\n\t\t\tfmt.Println(j)\n\t\t}\n\t}\n}\n<commit_msg>Add -r, --registered flag to list command<commit_after>\/\/ Copyright (c) 2016, Ben Morgan. All rights reserved.\n\/\/ Use of this source code is governed by an MIT license\n\/\/ that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"sort\"\n\n\t\"github.com\/goulash\/pacman\"\n\t\"github.com\/goulash\/pacman\/meta\"\n\t\"github.com\/goulash\/pr\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\t\/\/ Versioned causes packages to be printed with version information.\n\tlistVersioned bool\n\t\/\/ Mode can be either \"count\", \"filter\", or \"mark\" (which is the default\n\t\/\/ if no match is found.\n\tlistMode string\n\t\/\/ Pending marks packages that need to be added to the database,\n\t\/\/ as well as packages that are in the database but are not available.\n\tlistPending bool\n\t\/\/ Duplicates marks the number of obsolete packages for each package.\n\tlistDuplicates bool\n\t\/\/ Installed marks whether packages are locally installed or not.\n\tlistInstalled bool\n\t\/\/ Synchronize marks which packages have newer versions on AUR.\n\tlistSynchronize bool\n\t\/\/ Same as all of the above.\n\tlistAllOptions bool\n\t\/\/ Only show registered packages.\n\tfilterRegistered bool\n\n\tsearchPOSIX bool\n)\n\nfunc init() {\n\tMainCmd.AddCommand(listCmd)\n\n\tlistCmd.Flags().BoolVarP(&filterRegistered, \"registered\", \"r\", false, \"only show packages that are in the database\")\n\tlistCmd.Flags().BoolVarP(&listVersioned, \"versioned\", \"v\", false, \"show package versions along with name\")\n\tlistCmd.Flags().BoolVarP(&listPending, \"pending\", \"p\", false, \"mark pending changes to the database\")\n\tlistCmd.Flags().BoolVarP(&listDuplicates, \"duplicates\", \"d\", false, \"mark packages with duplicate package files\")\n\tlistCmd.Flags().BoolVarP(&listInstalled, \"installed\", \"l\", false, \"mark packages that are locally installed\")\n\tlistCmd.Flags().BoolVarP(&listSynchronize, \"outdated\", \"o\", false, \"mark packages that are newer in AUR\")\n\tlistCmd.Flags().BoolVarP(&listAllOptions, \"all\", \"a\", false, \"all information; same as -vpdlo\")\n\tlistCmd.Flags().BoolVar(&searchPOSIX, \"posix\", false, \"use POSIX-style regular expressions\")\n}\n\nvar listCmd = &cobra.Command{\n\tUse: \"list [regex]\",\n\tAliases: []string{\"ls\"},\n\tShort: \"list packages that belong to the managed repository\",\n\tLong: `List packages that belong to the managed repository.\n\n All packages that are in the managed repository are listed,\n whether or not they are registered with the database.\n If you only want to show registered packages, use the -r flag.\n \n When marking entries, the following symbols are used:\n\n -package- package will be deleted\n package <?> no AUR information could be found\n package <!> local package is out-of-date\n package <*> local package is newer than AUR package\n package (n) there are n extra versions of package\n\n When versions are shown, local version is adjacent to package name:\n\n package 1.0 -> 2.0 local package is out-of-date\n package 2.0 <- 1.0 local package is newer than AUR package\n\n If a valid regular expression is supplied, only packages that match\n the expression will be listed.`,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tif len(args) > 1 {\n\t\t\treturn &UsageError{\"list\", \"list command takes at most one argument\", cmd.Usage}\n\t\t}\n\n\t\tif listAllOptions {\n\t\t\tlistVersioned = true\n\t\t\tlistPending = true\n\t\t\tlistDuplicates = true\n\t\t\tlistInstalled = true\n\t\t\tlistSynchronize = true\n\t\t}\n\n\t\tvar regex *regexp.Regexp\n\t\tif len(args) == 1 {\n\t\t\tvar err error\n\t\t\tif searchPOSIX {\n\t\t\t\tregex, err = regexp.Compile(args[0])\n\t\t\t} else {\n\t\t\t\tregex, err = regexp.CompilePOSIX(args[0])\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tpkgs, err := Repo.ListMeta(nil, listSynchronize, func(mp pacman.AnyPackage) string {\n\t\t\tp := mp.(*meta.Package)\n\t\t\tif regex != nil && !regex.MatchString(p.PkgName()) {\n\t\t\t\treturn \"\"\n\t\t\t}\n\n\t\t\tif filterRegistered && !p.IsRegistered() {\n\t\t\t\treturn \"\"\n\t\t\t}\n\n\t\t\tif listPending && !p.HasFiles() {\n\t\t\t\treturn fmt.Sprintf(\"-%s-\", p.Name)\n\t\t\t}\n\n\t\t\tbuf := bytes.NewBufferString(p.Name)\n\t\t\tif listPending && p.HasUpdate() {\n\t\t\t\tbuf.WriteRune('*')\n\t\t\t}\n\t\t\tif listVersioned {\n\t\t\t\tbuf.WriteRune(' ')\n\t\t\t\tbuf.WriteString(p.Version())\n\t\t\t}\n\t\t\tif listSynchronize {\n\t\t\t\tap := p.AUR\n\t\t\t\tif ap == nil {\n\t\t\t\t\tbuf.WriteString(\" <?>\") \/\/ no aur info\n\t\t\t\t} else if pacman.PkgNewer(ap, p) {\n\t\t\t\t\tif listVersioned {\n\t\t\t\t\t\tbuf.WriteString(\" -> \") \/\/ new version\n\t\t\t\t\t\tbuf.WriteString(ap.Version)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbuf.WriteString(\" <!>\") \/\/ local version older than aur\n\t\t\t\t\t}\n\t\t\t\t} else if pacman.PkgOlder(ap, p) {\n\t\t\t\t\tif listVersioned {\n\t\t\t\t\t\tbuf.WriteString(\" <- \") \/\/ old version\n\t\t\t\t\t\tbuf.WriteString(ap.Version)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbuf.WriteString(\" <*>\") \/\/ local version newer than aur\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif listDuplicates && len(p.Files)-1 > 0 {\n\t\t\t\tbuf.WriteString(fmt.Sprintf(\" (%v)\", len(p.Files)-1))\n\t\t\t}\n\n\t\t\treturn buf.String()\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Print packages to stdout\n\t\tprintSet(pkgs, \"\", Conf.Columnate)\n\t\treturn nil\n\t},\n}\n\n\/\/ printSet prints a set of items and optionally a header.\nfunc printSet(list []string, h string, cols bool) {\n\tsort.Strings(list)\n\tif h != \"\" {\n\t\tfmt.Printf(\"\\n%s\\n\", h)\n\t}\n\tif cols {\n\t\tpr.PrintFlex(list)\n\t} else if h != \"\" {\n\t\tfor _, j := range list {\n\t\t\tfmt.Println(\" \", j)\n\t\t}\n\t} else {\n\t\tfor _, j := range list {\n\t\t\tfmt.Println(j)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Downloads torrents from the command-line.\npackage main\n\nimport (\n\t\"errors\"\n\t\"expvar\"\n\t\"fmt\"\n\t\"io\"\n\tstdLog \"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/anacrolix\/args\"\n\t\"github.com\/anacrolix\/envpprof\"\n\t\"github.com\/anacrolix\/log\"\n\t\"github.com\/anacrolix\/missinggo\/v2\"\n\t\"github.com\/anacrolix\/tagflag\"\n\t\"github.com\/anacrolix\/torrent\/bencode\"\n\t\"github.com\/anacrolix\/torrent\/version\"\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/dustin\/go-humanize\"\n\t\"golang.org\/x\/time\/rate\"\n\n\t\"github.com\/anacrolix\/torrent\"\n\t\"github.com\/anacrolix\/torrent\/iplist\"\n\t\"github.com\/anacrolix\/torrent\/metainfo\"\n\t\"github.com\/anacrolix\/torrent\/storage\"\n)\n\nfunc torrentBar(t *torrent.Torrent, pieceStates bool) {\n\tgo func() {\n\t\tstart := time.Now()\n\t\tif t.Info() == nil {\n\t\t\tfmt.Printf(\"%v: getting torrent info for %q\\n\", time.Since(start), t.Name())\n\t\t\t<-t.GotInfo()\n\t\t}\n\t\tlastStats := t.Stats()\n\t\tvar lastLine string\n\t\tfor range time.Tick(time.Second) {\n\t\t\tvar completedPieces, partialPieces int\n\t\t\tpsrs := t.PieceStateRuns()\n\t\t\tfor _, r := range psrs {\n\t\t\t\tif r.Complete {\n\t\t\t\t\tcompletedPieces += r.Length\n\t\t\t\t}\n\t\t\t\tif r.Partial {\n\t\t\t\t\tpartialPieces += r.Length\n\t\t\t\t}\n\t\t\t}\n\t\t\tstats := t.Stats()\n\t\t\tline := fmt.Sprintf(\n\t\t\t\t\"%v: downloading %q: %s\/%s, %d\/%d pieces completed (%d partial): %v\/s\\n\",\n\t\t\t\ttime.Since(start),\n\t\t\t\tt.Name(),\n\t\t\t\thumanize.Bytes(uint64(t.BytesCompleted())),\n\t\t\t\thumanize.Bytes(uint64(t.Length())),\n\t\t\t\tcompletedPieces,\n\t\t\t\tt.NumPieces(),\n\t\t\t\tpartialPieces,\n\t\t\t\thumanize.Bytes(uint64(stats.BytesReadUsefulData.Int64()-lastStats.BytesReadUsefulData.Int64())),\n\t\t\t)\n\t\t\tif line != lastLine {\n\t\t\t\tlastLine = line\n\t\t\t\tos.Stdout.WriteString(line)\n\t\t\t}\n\t\t\tif pieceStates {\n\t\t\t\tfmt.Println(psrs)\n\t\t\t}\n\t\t\tlastStats = stats\n\t\t}\n\t}()\n}\n\ntype stringAddr string\n\nfunc (stringAddr) Network() string { return \"\" }\nfunc (me stringAddr) String() string { return string(me) }\n\nfunc resolveTestPeers(addrs []string) (ret []torrent.PeerInfo) {\n\tfor _, ta := range addrs {\n\t\tret = append(ret, torrent.PeerInfo{\n\t\t\tAddr: stringAddr(ta),\n\t\t})\n\t}\n\treturn\n}\n\nfunc addTorrents(client *torrent.Client, flags downloadFlags) error {\n\ttestPeers := resolveTestPeers(flags.TestPeer)\n\tfor _, arg := range flags.Torrent {\n\t\tt, err := func() (*torrent.Torrent, error) {\n\t\t\tif strings.HasPrefix(arg, \"magnet:\") {\n\t\t\t\tt, err := client.AddMagnet(arg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error adding magnet: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn t, nil\n\t\t\t} else if strings.HasPrefix(arg, \"http:\/\/\") || strings.HasPrefix(arg, \"https:\/\/\") {\n\t\t\t\tresponse, err := http.Get(arg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"Error downloading torrent file: %s\", err)\n\t\t\t\t}\n\n\t\t\t\tmetaInfo, err := metainfo.Load(response.Body)\n\t\t\t\tdefer response.Body.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error loading torrent file %q: %s\\n\", arg, err)\n\t\t\t\t}\n\t\t\t\tt, err := client.AddTorrent(metaInfo)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"adding torrent: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn t, nil\n\t\t\t} else if strings.HasPrefix(arg, \"infohash:\") {\n\t\t\t\tt, _ := client.AddTorrentInfoHash(metainfo.NewHashFromHex(strings.TrimPrefix(arg, \"infohash:\")))\n\t\t\t\treturn t, nil\n\t\t\t} else {\n\t\t\t\tmetaInfo, err := metainfo.LoadFromFile(arg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error loading torrent file %q: %s\\n\", arg, err)\n\t\t\t\t}\n\t\t\t\tt, err := client.AddTorrent(metaInfo)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"adding torrent: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn t, nil\n\t\t\t}\n\t\t}()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"adding torrent for %q: %w\", arg, err)\n\t\t}\n\t\tif flags.Progress {\n\t\t\ttorrentBar(t, flags.PieceStates)\n\t\t}\n\t\tt.AddPeers(testPeers)\n\t\tgo func() {\n\t\t\t<-t.GotInfo()\n\t\t\tif len(flags.File) == 0 {\n\t\t\t\tt.DownloadAll()\n\t\t\t} else {\n\t\t\t\tfor _, f := range t.Files() {\n\t\t\t\t\tfor _, fileArg := range flags.File {\n\t\t\t\t\t\tif f.DisplayPath() == fileArg {\n\t\t\t\t\t\t\tf.Download()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\treturn nil\n}\n\ntype downloadFlags struct {\n\tDebug bool\n\tDownloadCmd\n}\n\ntype DownloadCmd struct {\n\tMmap bool `help:\"memory-map torrent data\"`\n\tTestPeer []string `help:\"addresses of some starting peers\"`\n\tSeed bool `help:\"seed after download is complete\"`\n\tAddr string `help:\"network listen addr\"`\n\tMaxUnverifiedBytes tagflag.Bytes `help:\"maximum number bytes to have pending verification\"`\n\tUploadRate *tagflag.Bytes `help:\"max piece bytes to send per second\"`\n\tDownloadRate *tagflag.Bytes `help:\"max bytes per second down from peers\"`\n\tPackedBlocklist string\n\tPublicIP net.IP\n\tProgress bool `default:\"true\"`\n\tPieceStates bool\n\tQuiet bool `help:\"discard client logging\"`\n\tStats *bool `help:\"print stats at termination\"`\n\tDht bool `default:\"true\"`\n\n\tTcpPeers bool `default:\"true\"`\n\tUtpPeers bool `default:\"true\"`\n\tWebtorrent bool `default:\"true\"`\n\tDisableWebseeds bool\n\n\tIpv4 bool `default:\"true\"`\n\tIpv6 bool `default:\"true\"`\n\tPex bool `default:\"true\"`\n\n\tFile []string\n\tTorrent []string `arity:\"+\" help:\"torrent file path or magnet uri\" arg:\"positional\"`\n}\n\nfunc stdoutAndStderrAreSameFile() bool {\n\tfi1, _ := os.Stdout.Stat()\n\tfi2, _ := os.Stderr.Stat()\n\treturn os.SameFile(fi1, fi2)\n}\n\nfunc statsEnabled(flags downloadFlags) bool {\n\tif flags.Stats == nil {\n\t\treturn flags.Debug\n\t}\n\treturn *flags.Stats\n}\n\nfunc exitSignalHandlers(notify *missinggo.SynchronizedEvent) {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, syscall.SIGINT, syscall.SIGTERM)\n\tfor {\n\t\tlog.Printf(\"close signal received: %+v\", <-c)\n\t\tnotify.Set()\n\t}\n}\n\nfunc main() {\n\tdefer envpprof.Stop()\n\tif err := mainErr(); err != nil {\n\t\tlog.Printf(\"error in main: %v\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc mainErr() error {\n\tstdLog.SetFlags(stdLog.Flags() | stdLog.Lshortfile)\n\tdebug := args.Flag(args.FlagOpt{Long: \"debug\"})\n\tp := args.ParseMain(\n\t\tdebug,\n\t\targs.Subcommand(\"metainfo\", metainfoCmd),\n\t\targs.Subcommand(\"announce\", func(p args.SubCmdCtx) error {\n\t\t\tvar cmd AnnounceCmd\n\t\t\terr := p.NewParser().AddParams(\n\t\t\t\targs.Pos(\"tracker\", &cmd.Tracker),\n\t\t\t\targs.Pos(\"infohash\", &cmd.InfoHash)).Parse()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn announceErr(cmd)\n\t\t}),\n\t\targs.Subcommand(\"download\", func(p args.SubCmdCtx) error {\n\t\t\tvar dlf DownloadCmd\n\t\t\terr := p.NewParser().AddParams(\n\t\t\t\tappend(args.FromStruct(&dlf), debug)...,\n\t\t\t).Parse()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn downloadErr(downloadFlags{\n\t\t\t\tDebug: debug.Bool(),\n\t\t\t\tDownloadCmd: dlf,\n\t\t\t})\n\t\t}),\n\t\targs.Subcommand(\n\t\t\t\"spew-bencoding\",\n\t\t\tfunc(p args.SubCmdCtx) error {\n\t\t\t\td := bencode.NewDecoder(os.Stdin)\n\t\t\t\tfor i := 0; ; i++ {\n\t\t\t\t\tvar v interface{}\n\t\t\t\t\terr := d.Decode(&v)\n\t\t\t\t\tif err == io.EOF {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"decoding message index %d: %w\", i, err)\n\t\t\t\t\t}\n\t\t\t\t\tspew.Dump(v)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\targs.Help(\"reads bencoding from stdin into Go native types and spews the result\"),\n\t\t),\n\t\targs.Subcommand(\"version\", func(p args.SubCmdCtx) error {\n\t\t\tfmt.Printf(\"HTTP User-Agent: %q\\n\", version.DefaultHttpUserAgent)\n\t\t\tfmt.Printf(\"Torrent client version: %q\\n\", version.DefaultExtendedHandshakeClientVersion)\n\t\t\tfmt.Printf(\"Torrent version prefix: %q\\n\", version.DefaultBep20Prefix)\n\t\t\treturn nil\n\t\t}),\n\t)\n\tif p.Err != nil {\n\t\tif errors.Is(p.Err, args.ErrHelped) {\n\t\t\treturn nil\n\t\t}\n\t\treturn p.Err\n\t}\n\tif !p.RanSubCmd {\n\t\tp.PrintChoices(os.Stderr)\n\t\targs.FatalUsage()\n\t}\n\treturn nil\n}\n\nfunc downloadErr(flags downloadFlags) error {\n\tclientConfig := torrent.NewDefaultClientConfig()\n\tclientConfig.DisableWebseeds = flags.DisableWebseeds\n\tclientConfig.DisableTCP = !flags.TcpPeers\n\tclientConfig.DisableUTP = !flags.UtpPeers\n\tclientConfig.DisableIPv4 = !flags.Ipv4\n\tclientConfig.DisableIPv6 = !flags.Ipv6\n\tclientConfig.DisableAcceptRateLimiting = true\n\tclientConfig.NoDHT = !flags.Dht\n\tclientConfig.Debug = flags.Debug\n\tclientConfig.Seed = flags.Seed\n\tclientConfig.PublicIp4 = flags.PublicIP\n\tclientConfig.PublicIp6 = flags.PublicIP\n\tclientConfig.DisablePEX = !flags.Pex\n\tclientConfig.DisableWebtorrent = !flags.Webtorrent\n\tif flags.PackedBlocklist != \"\" {\n\t\tblocklist, err := iplist.MMapPackedFile(flags.PackedBlocklist)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"loading blocklist: %v\", err)\n\t\t}\n\t\tdefer blocklist.Close()\n\t\tclientConfig.IPBlocklist = blocklist\n\t}\n\tif flags.Mmap {\n\t\tclientConfig.DefaultStorage = storage.NewMMap(\"\")\n\t}\n\tif flags.Addr != \"\" {\n\t\tclientConfig.SetListenAddr(flags.Addr)\n\t}\n\tif flags.UploadRate != nil {\n\t\tclientConfig.UploadRateLimiter = rate.NewLimiter(rate.Limit(*flags.UploadRate), 256<<10)\n\t}\n\tif flags.DownloadRate != nil {\n\t\tclientConfig.DownloadRateLimiter = rate.NewLimiter(rate.Limit(*flags.DownloadRate), 1<<20)\n\t}\n\tif flags.Quiet {\n\t\tclientConfig.Logger = log.Discard\n\t}\n\tclientConfig.MaxUnverifiedBytes = flags.MaxUnverifiedBytes.Int64()\n\n\tvar stop missinggo.SynchronizedEvent\n\tdefer func() {\n\t\tstop.Set()\n\t}()\n\n\tclient, err := torrent.NewClient(clientConfig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating client: %w\", err)\n\t}\n\tvar clientClose sync.Once \/\/In certain situations, close was being called more than once.\n\tdefer clientClose.Do(client.Close)\n\tgo exitSignalHandlers(&stop)\n\tgo func() {\n\t\t<-stop.C()\n\t\tclientClose.Do(client.Close)\n\t}()\n\n\t\/\/ Write status on the root path on the default HTTP muxer. This will be bound to localhost\n\t\/\/ somewhere if GOPPROF is set, thanks to the envpprof import.\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, req *http.Request) {\n\t\tclient.WriteStatus(w)\n\t})\n\terr = addTorrents(client, flags)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"adding torrents: %w\", err)\n\t}\n\tdefer outputStats(client, flags)\n\tif client.WaitAll() {\n\t\tlog.Print(\"downloaded ALL the torrents\")\n\t} else {\n\t\treturn errors.New(\"y u no complete torrents?!\")\n\t}\n\tif flags.Seed {\n\t\tif len(client.Torrents()) == 0 {\n\t\t\tlog.Print(\"no torrents to seed\")\n\t\t} else {\n\t\t\toutputStats(client, flags)\n\t\t\t<-stop.C()\n\t\t}\n\t}\n\tspew.Dump(expvar.Get(\"torrent\").(*expvar.Map).Get(\"chunks received\"))\n\tspew.Dump(client.ConnStats())\n\tclStats := client.ConnStats()\n\tsentOverhead := clStats.BytesWritten.Int64() - clStats.BytesWrittenData.Int64()\n\tlog.Printf(\n\t\t\"client read %v, %v was useful data. sent %v non-data bytes\",\n\t\thumanize.Bytes(uint64(clStats.BytesRead.Int64())),\n\t\t100*float64(clStats.BytesReadUsefulData.Int64())\/float64(clStats.BytesRead.Int64()),\n\t\thumanize.Bytes(uint64(sentOverhead)))\n\treturn nil\n}\n\nfunc outputStats(cl *torrent.Client, args downloadFlags) {\n\tif !statsEnabled(args) {\n\t\treturn\n\t}\n\texpvar.Do(func(kv expvar.KeyValue) {\n\t\tfmt.Printf(\"%s: %s\\n\", kv.Key, kv.Value)\n\t})\n\tcl.WriteStatus(os.Stdout)\n}\n<commit_msg>cmd\/torrent: Include download stats on interrupt<commit_after>\/\/ Downloads torrents from the command-line.\npackage main\n\nimport (\n\t\"errors\"\n\t\"expvar\"\n\t\"fmt\"\n\t\"io\"\n\tstdLog \"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/anacrolix\/args\"\n\t\"github.com\/anacrolix\/envpprof\"\n\t\"github.com\/anacrolix\/log\"\n\t\"github.com\/anacrolix\/missinggo\/v2\"\n\t\"github.com\/anacrolix\/tagflag\"\n\t\"github.com\/anacrolix\/torrent\/bencode\"\n\t\"github.com\/anacrolix\/torrent\/version\"\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/dustin\/go-humanize\"\n\t\"golang.org\/x\/time\/rate\"\n\n\t\"github.com\/anacrolix\/torrent\"\n\t\"github.com\/anacrolix\/torrent\/iplist\"\n\t\"github.com\/anacrolix\/torrent\/metainfo\"\n\t\"github.com\/anacrolix\/torrent\/storage\"\n)\n\nfunc torrentBar(t *torrent.Torrent, pieceStates bool) {\n\tgo func() {\n\t\tstart := time.Now()\n\t\tif t.Info() == nil {\n\t\t\tfmt.Printf(\"%v: getting torrent info for %q\\n\", time.Since(start), t.Name())\n\t\t\t<-t.GotInfo()\n\t\t}\n\t\tlastStats := t.Stats()\n\t\tvar lastLine string\n\t\tfor range time.Tick(time.Second) {\n\t\t\tvar completedPieces, partialPieces int\n\t\t\tpsrs := t.PieceStateRuns()\n\t\t\tfor _, r := range psrs {\n\t\t\t\tif r.Complete {\n\t\t\t\t\tcompletedPieces += r.Length\n\t\t\t\t}\n\t\t\t\tif r.Partial {\n\t\t\t\t\tpartialPieces += r.Length\n\t\t\t\t}\n\t\t\t}\n\t\t\tstats := t.Stats()\n\t\t\tline := fmt.Sprintf(\n\t\t\t\t\"%v: downloading %q: %s\/%s, %d\/%d pieces completed (%d partial): %v\/s\\n\",\n\t\t\t\ttime.Since(start),\n\t\t\t\tt.Name(),\n\t\t\t\thumanize.Bytes(uint64(t.BytesCompleted())),\n\t\t\t\thumanize.Bytes(uint64(t.Length())),\n\t\t\t\tcompletedPieces,\n\t\t\t\tt.NumPieces(),\n\t\t\t\tpartialPieces,\n\t\t\t\thumanize.Bytes(uint64(stats.BytesReadUsefulData.Int64()-lastStats.BytesReadUsefulData.Int64())),\n\t\t\t)\n\t\t\tif line != lastLine {\n\t\t\t\tlastLine = line\n\t\t\t\tos.Stdout.WriteString(line)\n\t\t\t}\n\t\t\tif pieceStates {\n\t\t\t\tfmt.Println(psrs)\n\t\t\t}\n\t\t\tlastStats = stats\n\t\t}\n\t}()\n}\n\ntype stringAddr string\n\nfunc (stringAddr) Network() string { return \"\" }\nfunc (me stringAddr) String() string { return string(me) }\n\nfunc resolveTestPeers(addrs []string) (ret []torrent.PeerInfo) {\n\tfor _, ta := range addrs {\n\t\tret = append(ret, torrent.PeerInfo{\n\t\t\tAddr: stringAddr(ta),\n\t\t})\n\t}\n\treturn\n}\n\nfunc addTorrents(client *torrent.Client, flags downloadFlags) error {\n\ttestPeers := resolveTestPeers(flags.TestPeer)\n\tfor _, arg := range flags.Torrent {\n\t\tt, err := func() (*torrent.Torrent, error) {\n\t\t\tif strings.HasPrefix(arg, \"magnet:\") {\n\t\t\t\tt, err := client.AddMagnet(arg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error adding magnet: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn t, nil\n\t\t\t} else if strings.HasPrefix(arg, \"http:\/\/\") || strings.HasPrefix(arg, \"https:\/\/\") {\n\t\t\t\tresponse, err := http.Get(arg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"Error downloading torrent file: %s\", err)\n\t\t\t\t}\n\n\t\t\t\tmetaInfo, err := metainfo.Load(response.Body)\n\t\t\t\tdefer response.Body.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error loading torrent file %q: %s\\n\", arg, err)\n\t\t\t\t}\n\t\t\t\tt, err := client.AddTorrent(metaInfo)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"adding torrent: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn t, nil\n\t\t\t} else if strings.HasPrefix(arg, \"infohash:\") {\n\t\t\t\tt, _ := client.AddTorrentInfoHash(metainfo.NewHashFromHex(strings.TrimPrefix(arg, \"infohash:\")))\n\t\t\t\treturn t, nil\n\t\t\t} else {\n\t\t\t\tmetaInfo, err := metainfo.LoadFromFile(arg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error loading torrent file %q: %s\\n\", arg, err)\n\t\t\t\t}\n\t\t\t\tt, err := client.AddTorrent(metaInfo)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"adding torrent: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn t, nil\n\t\t\t}\n\t\t}()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"adding torrent for %q: %w\", arg, err)\n\t\t}\n\t\tif flags.Progress {\n\t\t\ttorrentBar(t, flags.PieceStates)\n\t\t}\n\t\tt.AddPeers(testPeers)\n\t\tgo func() {\n\t\t\t<-t.GotInfo()\n\t\t\tif len(flags.File) == 0 {\n\t\t\t\tt.DownloadAll()\n\t\t\t} else {\n\t\t\t\tfor _, f := range t.Files() {\n\t\t\t\t\tfor _, fileArg := range flags.File {\n\t\t\t\t\t\tif f.DisplayPath() == fileArg {\n\t\t\t\t\t\t\tf.Download()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\treturn nil\n}\n\ntype downloadFlags struct {\n\tDebug bool\n\tDownloadCmd\n}\n\ntype DownloadCmd struct {\n\tMmap bool `help:\"memory-map torrent data\"`\n\tTestPeer []string `help:\"addresses of some starting peers\"`\n\tSeed bool `help:\"seed after download is complete\"`\n\tAddr string `help:\"network listen addr\"`\n\tMaxUnverifiedBytes tagflag.Bytes `help:\"maximum number bytes to have pending verification\"`\n\tUploadRate *tagflag.Bytes `help:\"max piece bytes to send per second\"`\n\tDownloadRate *tagflag.Bytes `help:\"max bytes per second down from peers\"`\n\tPackedBlocklist string\n\tPublicIP net.IP\n\tProgress bool `default:\"true\"`\n\tPieceStates bool\n\tQuiet bool `help:\"discard client logging\"`\n\tStats *bool `help:\"print stats at termination\"`\n\tDht bool `default:\"true\"`\n\n\tTcpPeers bool `default:\"true\"`\n\tUtpPeers bool `default:\"true\"`\n\tWebtorrent bool `default:\"true\"`\n\tDisableWebseeds bool\n\n\tIpv4 bool `default:\"true\"`\n\tIpv6 bool `default:\"true\"`\n\tPex bool `default:\"true\"`\n\n\tFile []string\n\tTorrent []string `arity:\"+\" help:\"torrent file path or magnet uri\" arg:\"positional\"`\n}\n\nfunc stdoutAndStderrAreSameFile() bool {\n\tfi1, _ := os.Stdout.Stat()\n\tfi2, _ := os.Stderr.Stat()\n\treturn os.SameFile(fi1, fi2)\n}\n\nfunc statsEnabled(flags downloadFlags) bool {\n\tif flags.Stats == nil {\n\t\treturn flags.Debug\n\t}\n\treturn *flags.Stats\n}\n\nfunc exitSignalHandlers(notify *missinggo.SynchronizedEvent) {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, syscall.SIGINT, syscall.SIGTERM)\n\tfor {\n\t\tlog.Printf(\"close signal received: %+v\", <-c)\n\t\tnotify.Set()\n\t}\n}\n\nfunc main() {\n\tdefer envpprof.Stop()\n\tif err := mainErr(); err != nil {\n\t\tlog.Printf(\"error in main: %v\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc mainErr() error {\n\tstdLog.SetFlags(stdLog.Flags() | stdLog.Lshortfile)\n\tdebug := args.Flag(args.FlagOpt{Long: \"debug\"})\n\tp := args.ParseMain(\n\t\tdebug,\n\t\targs.Subcommand(\"metainfo\", metainfoCmd),\n\t\targs.Subcommand(\"announce\", func(p args.SubCmdCtx) error {\n\t\t\tvar cmd AnnounceCmd\n\t\t\terr := p.NewParser().AddParams(\n\t\t\t\targs.Pos(\"tracker\", &cmd.Tracker),\n\t\t\t\targs.Pos(\"infohash\", &cmd.InfoHash)).Parse()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn announceErr(cmd)\n\t\t}),\n\t\targs.Subcommand(\"download\", func(p args.SubCmdCtx) error {\n\t\t\tvar dlf DownloadCmd\n\t\t\terr := p.NewParser().AddParams(\n\t\t\t\tappend(args.FromStruct(&dlf), debug)...,\n\t\t\t).Parse()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn downloadErr(downloadFlags{\n\t\t\t\tDebug: debug.Bool(),\n\t\t\t\tDownloadCmd: dlf,\n\t\t\t})\n\t\t}),\n\t\targs.Subcommand(\n\t\t\t\"spew-bencoding\",\n\t\t\tfunc(p args.SubCmdCtx) error {\n\t\t\t\td := bencode.NewDecoder(os.Stdin)\n\t\t\t\tfor i := 0; ; i++ {\n\t\t\t\t\tvar v interface{}\n\t\t\t\t\terr := d.Decode(&v)\n\t\t\t\t\tif err == io.EOF {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"decoding message index %d: %w\", i, err)\n\t\t\t\t\t}\n\t\t\t\t\tspew.Dump(v)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\targs.Help(\"reads bencoding from stdin into Go native types and spews the result\"),\n\t\t),\n\t\targs.Subcommand(\"version\", func(p args.SubCmdCtx) error {\n\t\t\tfmt.Printf(\"HTTP User-Agent: %q\\n\", version.DefaultHttpUserAgent)\n\t\t\tfmt.Printf(\"Torrent client version: %q\\n\", version.DefaultExtendedHandshakeClientVersion)\n\t\t\tfmt.Printf(\"Torrent version prefix: %q\\n\", version.DefaultBep20Prefix)\n\t\t\treturn nil\n\t\t}),\n\t)\n\tif p.Err != nil {\n\t\tif errors.Is(p.Err, args.ErrHelped) {\n\t\t\treturn nil\n\t\t}\n\t\treturn p.Err\n\t}\n\tif !p.RanSubCmd {\n\t\tp.PrintChoices(os.Stderr)\n\t\targs.FatalUsage()\n\t}\n\treturn nil\n}\n\nfunc downloadErr(flags downloadFlags) error {\n\tclientConfig := torrent.NewDefaultClientConfig()\n\tclientConfig.DisableWebseeds = flags.DisableWebseeds\n\tclientConfig.DisableTCP = !flags.TcpPeers\n\tclientConfig.DisableUTP = !flags.UtpPeers\n\tclientConfig.DisableIPv4 = !flags.Ipv4\n\tclientConfig.DisableIPv6 = !flags.Ipv6\n\tclientConfig.DisableAcceptRateLimiting = true\n\tclientConfig.NoDHT = !flags.Dht\n\tclientConfig.Debug = flags.Debug\n\tclientConfig.Seed = flags.Seed\n\tclientConfig.PublicIp4 = flags.PublicIP\n\tclientConfig.PublicIp6 = flags.PublicIP\n\tclientConfig.DisablePEX = !flags.Pex\n\tclientConfig.DisableWebtorrent = !flags.Webtorrent\n\tif flags.PackedBlocklist != \"\" {\n\t\tblocklist, err := iplist.MMapPackedFile(flags.PackedBlocklist)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"loading blocklist: %v\", err)\n\t\t}\n\t\tdefer blocklist.Close()\n\t\tclientConfig.IPBlocklist = blocklist\n\t}\n\tif flags.Mmap {\n\t\tclientConfig.DefaultStorage = storage.NewMMap(\"\")\n\t}\n\tif flags.Addr != \"\" {\n\t\tclientConfig.SetListenAddr(flags.Addr)\n\t}\n\tif flags.UploadRate != nil {\n\t\tclientConfig.UploadRateLimiter = rate.NewLimiter(rate.Limit(*flags.UploadRate), 256<<10)\n\t}\n\tif flags.DownloadRate != nil {\n\t\tclientConfig.DownloadRateLimiter = rate.NewLimiter(rate.Limit(*flags.DownloadRate), 1<<20)\n\t}\n\tif flags.Quiet {\n\t\tclientConfig.Logger = log.Discard\n\t}\n\tclientConfig.MaxUnverifiedBytes = flags.MaxUnverifiedBytes.Int64()\n\n\tvar stop missinggo.SynchronizedEvent\n\tdefer func() {\n\t\tstop.Set()\n\t}()\n\n\tclient, err := torrent.NewClient(clientConfig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating client: %w\", err)\n\t}\n\tvar clientClose sync.Once \/\/In certain situations, close was being called more than once.\n\tdefer clientClose.Do(client.Close)\n\tgo exitSignalHandlers(&stop)\n\tgo func() {\n\t\t<-stop.C()\n\t\tclientClose.Do(client.Close)\n\t}()\n\n\t\/\/ Write status on the root path on the default HTTP muxer. This will be bound to localhost\n\t\/\/ somewhere if GOPPROF is set, thanks to the envpprof import.\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, req *http.Request) {\n\t\tclient.WriteStatus(w)\n\t})\n\terr = addTorrents(client, flags)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"adding torrents: %w\", err)\n\t}\n\tdefer outputStats(client, flags)\n\tif client.WaitAll() {\n\t\tlog.Print(\"downloaded ALL the torrents\")\n\t} else {\n\t\terr = errors.New(\"y u no complete torrents?!\")\n\t}\n\tif flags.Seed {\n\t\tif len(client.Torrents()) == 0 {\n\t\t\tlog.Print(\"no torrents to seed\")\n\t\t} else {\n\t\t\toutputStats(client, flags)\n\t\t\t<-stop.C()\n\t\t}\n\t}\n\tspew.Dump(expvar.Get(\"torrent\").(*expvar.Map).Get(\"chunks received\"))\n\tspew.Dump(client.ConnStats())\n\tclStats := client.ConnStats()\n\tsentOverhead := clStats.BytesWritten.Int64() - clStats.BytesWrittenData.Int64()\n\tlog.Printf(\n\t\t\"client read %v, %v was useful data. sent %v non-data bytes\",\n\t\thumanize.Bytes(uint64(clStats.BytesRead.Int64())),\n\t\t100*float64(clStats.BytesReadUsefulData.Int64())\/float64(clStats.BytesRead.Int64()),\n\t\thumanize.Bytes(uint64(sentOverhead)))\n\treturn err\n}\n\nfunc outputStats(cl *torrent.Client, args downloadFlags) {\n\tif !statsEnabled(args) {\n\t\treturn\n\t}\n\texpvar.Do(func(kv expvar.KeyValue) {\n\t\tfmt.Printf(\"%s: %s\\n\", kv.Key, kv.Value)\n\t})\n\tcl.WriteStatus(os.Stdout)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t_ \"net\/http\/pprof\"\n\t\"os\"\n\n\t\"github.com\/appscode\/errors\"\n\terr_logger \"github.com\/appscode\/errors\/h\/log\"\n\t\"github.com\/appscode\/go\/flags\"\n\t\"github.com\/appscode\/log\"\n\tlogs \"github.com\/appscode\/log\/golog\"\n\t\"github.com\/appscode\/voyager\/cmd\/voyager\/app\"\n\t\"github.com\/appscode\/voyager\/cmd\/voyager\/app\/options\"\n\t\"github.com\/mikespook\/golib\/signal\"\n\t\"github.com\/spf13\/pflag\"\n\t\/\/ Add fake package as a dependency to add this under vendor\n\t_ \"github.com\/appscode\/k8s-addons\/client\/clientset\/fake\"\n\t_ \"k8s.io\/kubernetes\/pkg\/client\/clientset_generated\/internalclientset\/fake\"\n)\n\nfunc main() {\n\tconfig := options.NewConfig()\n\tconfig.AddFlags(pflag.CommandLine)\n\n\tflags.InitFlags()\n\tlogs.InitLogs()\n\tdefer logs.FlushLogs()\n\terrors.Handlers.Add(err_logger.LogHandler{})\n\tflags.DumpAll()\n\n\tif config.ProviderName == \"\" ||\n\t\tconfig.ClusterName == \"\" ||\n\t\tconfig.LoadbalancerImageName == \"\" {\n\t\tlog.Fatalln(\"Required flag not provided.\")\n\t}\n\n\tlog.Infoln(\"Starting Voyager Controller...\")\n\tgo app.Run(config)\n\tsig := signal.New(nil)\n\tsig.Bind(os.Interrupt, func() uint { return signal.BreakExit })\n\tsig.Wait()\n}\n<commit_msg>cloud-provider & cloud-name is not required for unknown providers. (#65)<commit_after>package main\n\nimport (\n\t_ \"net\/http\/pprof\"\n\t\"os\"\n\n\t\"github.com\/appscode\/errors\"\n\terr_logger \"github.com\/appscode\/errors\/h\/log\"\n\t\"github.com\/appscode\/go\/flags\"\n\tstringz \"github.com\/appscode\/go\/strings\"\n\t\"github.com\/appscode\/log\"\n\tlogs \"github.com\/appscode\/log\/golog\"\n\t\"github.com\/appscode\/voyager\/cmd\/voyager\/app\"\n\t\"github.com\/appscode\/voyager\/cmd\/voyager\/app\/options\"\n\t\"github.com\/mikespook\/golib\/signal\"\n\t\"github.com\/spf13\/pflag\"\n\t\/\/ Add fake package as a dependency to add this under vendor\n\t_ \"github.com\/appscode\/k8s-addons\/client\/clientset\/fake\"\n\t_ \"k8s.io\/kubernetes\/pkg\/client\/clientset_generated\/internalclientset\/fake\"\n)\n\nfunc main() {\n\tconfig := options.NewConfig()\n\tconfig.AddFlags(pflag.CommandLine)\n\n\tflags.InitFlags()\n\tlogs.InitLogs()\n\tdefer logs.FlushLogs()\n\terrors.Handlers.Add(err_logger.LogHandler{})\n\tflags.DumpAll()\n\n\tif config.LoadbalancerImageName == \"\" {\n\t\tlog.Fatalln(\"Missing required flag --haproxy-image\")\n\t}\n\tif stringz.Contains([]string{\"aws\", \"gce\", \"gke\", \"azure\"}, config.ProviderName) && config.ClusterName == \"\" {\n\t\tlog.Fatalln(\"--cluster-name flag must be set when --cloud-provider={aws,gce,gke,azure}\")\n\t}\n\n\tlog.Infoln(\"Starting Voyager Controller...\")\n\tgo app.Run(config)\n\tsig := signal.New(nil)\n\tsig.Bind(os.Interrupt, func() uint { return signal.BreakExit })\n\tsig.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/hagen1778\/chproxy\/config\"\n\t\"github.com\/hagen1778\/chproxy\/log\"\n)\n\nfunc (s *scope) String() string {\n\treturn fmt.Sprintf(\"[ Id: %d; User %q(%d) proxying as %q(%d) to %q(%d) ]\",\n\t\ts.id,\n\t\ts.user.name, s.user.runningQueries(),\n\t\ts.clusterUser.name, s.clusterUser.runningQueries(),\n\t\ts.host.addr.Host, s.host.runningQueries())\n}\n\ntype scope struct {\n\tid uint64\n\thost *host\n\tcluster *cluster\n\tuser *user\n\tclusterUser *clusterUser\n}\n\nvar scopeId uint64\n\nfunc newScope(u *user, cu *clusterUser, c *cluster) *scope {\n\treturn &scope{\n\t\tid: atomic.AddUint64(&scopeId, 1),\n\t\thost: c.getHost(),\n\t\tcluster: c,\n\t\tuser: u,\n\t\tclusterUser: cu,\n\t}\n}\n\nfunc (s *scope) inc() error {\n\ts.user.inc()\n\ts.clusterUser.inc()\n\ts.host.inc()\n\n\tvar err error\n\tif s.user.maxConcurrentQueries > 0 && s.user.runningQueries() > s.user.maxConcurrentQueries {\n\t\terr = fmt.Errorf(\"limits for user %q are exceeded: maxConcurrentQueries limit: %d\", s.user.name, s.user.maxConcurrentQueries)\n\t}\n\n\tif s.clusterUser.maxConcurrentQueries > 0 && s.clusterUser.runningQueries() > s.clusterUser.maxConcurrentQueries {\n\t\terr = fmt.Errorf(\"limits for cluster user %q are exceeded: maxConcurrentQueries limit: %d\", s.clusterUser.name, s.clusterUser.maxConcurrentQueries)\n\t}\n\n\tif err != nil {\n\t\ts.dec()\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *scope) dec() {\n\ts.host.dec()\n\ts.user.dec()\n\ts.clusterUser.dec()\n}\n\ntype user struct {\n\ttoUser string\n\ttoCluster string\n\tallowedNetworks config.Networks\n\n\tname, password string\n\tmaxExecutionTime time.Duration\n\tmaxConcurrentQueries uint32\n\n\tqueryCounter\n}\n\ntype clusterUser struct {\n\tname, password string\n\tmaxExecutionTime time.Duration\n\tmaxConcurrentQueries uint32\n\n\tqueryCounter\n}\n\nfunc (u *clusterUser) timeout() <-chan time.Time {\n\tif u.maxExecutionTime > 0 {\n\t\treturn time.After(u.maxExecutionTime)\n\t}\n\n\treturn nil\n}\n\ntype host struct {\n\taddr *url.URL\n\n\tqueryCounter\n}\n\ntype cluster struct {\n\tnextIdx uint32\n\thosts []*host\n\tusers map[string]*clusterUser\n}\n\nfunc newCluster(h []*host, cu map[string]*clusterUser) *cluster {\n\treturn &cluster{\n\t\thosts: h,\n\t\tusers: cu,\n\t\tnextIdx: uint32(time.Now().UnixNano()),\n\t}\n}\n\n\/\/ We don't use query_id because of distributed processing, the query ID is not passed to remote servers\nfunc (c *cluster) killQueries(ua string, elapsed float64) {\n\tq := fmt.Sprintf(\"KILL QUERY WHERE http_user_agent = '%s' AND elapsed >= %d\", ua, int(elapsed))\n\tlog.Debugf(\"ExecutionTime exceeded. Going to call query %q for hosts %v\", q, c.hosts)\n\tfor _, host := range c.hosts {\n\t\tif err := doQuery(q, host.addr.String()); err != nil {\n\t\t\tlog.Errorf(\"error while killing queries older than %.2fs: %s\", elapsed, err)\n\t\t}\n\t}\n}\n\n\/\/ get least loaded + round-robin host from cluster\nfunc (c *cluster) getHost() *host {\n\tidx := atomic.AddUint32(&c.nextIdx, 1)\n\n\tl := uint32(len(c.hosts))\n\tidx = idx % l\n\tidle := c.hosts[idx]\n\tidleN := idle.runningQueries()\n\n\tif idleN == 0 {\n\t\treturn idle\n\t}\n\n\t\/\/ round checking of hosts in slice\n\t\/\/ until hits least loaded\n\tfor i := (idx + 1) % l; i != idx; i = (i + 1) % l {\n\t\th := c.hosts[i]\n\t\tn := h.runningQueries()\n\t\tif n == 0 {\n\t\t\treturn h\n\t\t}\n\t\tif n < idleN {\n\t\t\tidle, idleN = h, n\n\t\t}\n\t}\n\n\treturn idle\n}\n\ntype queryCounter struct {\n\tvalue uint32\n}\n\nfunc (qc *queryCounter) runningQueries() uint32 {\n\treturn atomic.LoadUint32(&qc.value)\n}\n\nfunc (qc *queryCounter) inc() {\n\tatomic.AddUint32(&qc.value, 1)\n}\n\nfunc (qc *queryCounter) dec() {\n\tatomic.AddUint32(&qc.value, ^uint32(0))\n}\n<commit_msg>fix comment<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/hagen1778\/chproxy\/config\"\n\t\"github.com\/hagen1778\/chproxy\/log\"\n)\n\nfunc (s *scope) String() string {\n\treturn fmt.Sprintf(\"[ Id: %d; User %q(%d) proxying as %q(%d) to %q(%d) ]\",\n\t\ts.id,\n\t\ts.user.name, s.user.runningQueries(),\n\t\ts.clusterUser.name, s.clusterUser.runningQueries(),\n\t\ts.host.addr.Host, s.host.runningQueries())\n}\n\ntype scope struct {\n\tid uint64\n\thost *host\n\tcluster *cluster\n\tuser *user\n\tclusterUser *clusterUser\n}\n\nvar scopeId uint64\n\nfunc newScope(u *user, cu *clusterUser, c *cluster) *scope {\n\treturn &scope{\n\t\tid: atomic.AddUint64(&scopeId, 1),\n\t\thost: c.getHost(),\n\t\tcluster: c,\n\t\tuser: u,\n\t\tclusterUser: cu,\n\t}\n}\n\nfunc (s *scope) inc() error {\n\ts.user.inc()\n\ts.clusterUser.inc()\n\ts.host.inc()\n\n\tvar err error\n\tif s.user.maxConcurrentQueries > 0 && s.user.runningQueries() > s.user.maxConcurrentQueries {\n\t\terr = fmt.Errorf(\"limits for user %q are exceeded: maxConcurrentQueries limit: %d\", s.user.name, s.user.maxConcurrentQueries)\n\t}\n\n\tif s.clusterUser.maxConcurrentQueries > 0 && s.clusterUser.runningQueries() > s.clusterUser.maxConcurrentQueries {\n\t\terr = fmt.Errorf(\"limits for cluster user %q are exceeded: maxConcurrentQueries limit: %d\", s.clusterUser.name, s.clusterUser.maxConcurrentQueries)\n\t}\n\n\tif err != nil {\n\t\ts.dec()\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *scope) dec() {\n\ts.host.dec()\n\ts.user.dec()\n\ts.clusterUser.dec()\n}\n\ntype user struct {\n\ttoUser string\n\ttoCluster string\n\tallowedNetworks config.Networks\n\n\tname, password string\n\tmaxExecutionTime time.Duration\n\tmaxConcurrentQueries uint32\n\n\tqueryCounter\n}\n\ntype clusterUser struct {\n\tname, password string\n\tmaxExecutionTime time.Duration\n\tmaxConcurrentQueries uint32\n\n\tqueryCounter\n}\n\nfunc (u *clusterUser) timeout() <-chan time.Time {\n\tif u.maxExecutionTime > 0 {\n\t\treturn time.After(u.maxExecutionTime)\n\t}\n\n\treturn nil\n}\n\ntype host struct {\n\taddr *url.URL\n\n\tqueryCounter\n}\n\ntype cluster struct {\n\tnextIdx uint32\n\thosts []*host\n\tusers map[string]*clusterUser\n}\n\nfunc newCluster(h []*host, cu map[string]*clusterUser) *cluster {\n\treturn &cluster{\n\t\thosts: h,\n\t\tusers: cu,\n\t\tnextIdx: uint32(time.Now().UnixNano()),\n\t}\n}\n\n\/\/ We don't use query_id because of distributed processing, the query ID is not passed to remote servers\nfunc (c *cluster) killQueries(ua string, elapsed float64) {\n\tq := fmt.Sprintf(\"KILL QUERY WHERE http_user_agent = '%s' AND elapsed >= %d\", ua, int(elapsed))\n\tlog.Debugf(\"ExecutionTime exceeded. Going to call query %q for hosts %v\", q, c.hosts)\n\tfor _, host := range c.hosts {\n\t\tif err := doQuery(q, host.addr.String()); err != nil {\n\t\t\tlog.Errorf(\"error while killing queries older than %.2fs: %s\", elapsed, err)\n\t\t}\n\t}\n}\n\n\/\/ get least loaded + round-robin host from cluster\nfunc (c *cluster) getHost() *host {\n\tidx := atomic.AddUint32(&c.nextIdx, 1)\n\n\tl := uint32(len(c.hosts))\n\tidx = idx % l\n\tidle := c.hosts[idx]\n\tidleN := idle.runningQueries()\n\n\tif idleN == 0 {\n\t\treturn idle\n\t}\n\n\t\/\/ round hosts checking\n\t\/\/ until the least loaded is found\n\tfor i := (idx + 1) % l; i != idx; i = (i + 1) % l {\n\t\th := c.hosts[i]\n\t\tn := h.runningQueries()\n\t\tif n == 0 {\n\t\t\treturn h\n\t\t}\n\t\tif n < idleN {\n\t\t\tidle, idleN = h, n\n\t\t}\n\t}\n\n\treturn idle\n}\n\ntype queryCounter struct {\n\tvalue uint32\n}\n\nfunc (qc *queryCounter) runningQueries() uint32 {\n\treturn atomic.LoadUint32(&qc.value)\n}\n\nfunc (qc *queryCounter) inc() {\n\tatomic.AddUint32(&qc.value, 1)\n}\n\nfunc (qc *queryCounter) dec() {\n\tatomic.AddUint32(&qc.value, ^uint32(0))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage eval\n\nimport (\n\t\"go\/token\"\n\t\"log\"\n)\n\n\/*\n * Blocks and scopes\n *\/\n\n\/\/ A definition can be a *Variable, *Constant, or Type.\ntype Def interface {\n\tPos() token.Pos\n}\n\ntype Variable struct {\n\tVarPos token.Pos\n\t\/\/ Index of this variable in the Frame structure\n\tIndex int\n\t\/\/ Static type of this variable\n\tType Type\n\t\/\/ Value of this variable. This is only used by Scope.NewFrame;\n\t\/\/ therefore, it is useful for global scopes but cannot be used\n\t\/\/ in function scopes.\n\tInit Value\n}\n\nfunc (v *Variable) Pos() token.Pos {\n\treturn v.VarPos\n}\n\ntype Constant struct {\n\tConstPos token.Pos\n\tType Type\n\tValue Value\n}\n\nfunc (c *Constant) Pos() token.Pos {\n\treturn c.ConstPos\n}\n\ntype PkgIdent struct {\n\tPkgPos token.Pos\n\tpath string \/\/ the unique path to that package\n\tscope *Scope \/\/ the scope holding the package definition(s)\n}\n\nfunc (p *PkgIdent) Pos() token.Pos {\n\treturn p.PkgPos\n}\n\n\/\/ A block represents a definition block in which a name may not be\n\/\/ defined more than once.\ntype block struct {\n\t\/\/ The block enclosing this one, including blocks in other\n\t\/\/ scopes.\n\touter *block\n\t\/\/ The nested block currently being compiled, or nil.\n\tinner *block\n\t\/\/ The Scope containing this block.\n\tscope *Scope\n\t\/\/ The Variables, Constants, and Types defined in this block.\n\tdefs map[string]Def\n\t\/\/ The index of the first variable defined in this block.\n\t\/\/ This must be greater than the index of any variable defined\n\t\/\/ in any parent of this block within the same Scope at the\n\t\/\/ time this block is entered.\n\toffset int\n\t\/\/ The number of Variables defined in this block.\n\tnumVars int\n\t\/\/ If global, do not allocate new vars and consts in\n\t\/\/ the frame; assume that the refs will be compiled in\n\t\/\/ using defs[name].Init.\n\tglobal bool\n}\n\n\/\/ A Scope is the compile-time analogue of a Frame, which captures\n\/\/ some subtree of blocks.\ntype Scope struct {\n\t\/\/ The root block of this scope.\n\t*block\n\t\/\/ The maximum number of variables required at any point in\n\t\/\/ this Scope. This determines the number of slots needed in\n\t\/\/ Frame's created from this Scope at run-time.\n\tmaxVars int\n}\n\n\/\/ A special scope for the universe, to be able to stash 'packages'\ntype universeScope struct {\n\t*Scope\n\t\/\/ a lookup-table for easy retrieval of packages by their 'path'\n\tpkgs map[string]*Scope\n}\n\nfunc (b *block) enterChild() *block {\n\tif b.inner != nil && b.inner.scope == b.scope {\n\t\tlog.Panic(\"Failed to exit child block before entering another child\")\n\t}\n\tsub := &block{\n\t\touter: b,\n\t\tscope: b.scope,\n\t\tdefs: make(map[string]Def),\n\t\toffset: b.offset + b.numVars,\n\t}\n\tb.inner = sub\n\treturn sub\n}\n\nfunc (b *block) exit() {\n\tif b.outer == nil {\n\t\tlog.Panic(\"Cannot exit top-level block\")\n\t}\n\tif b.outer.scope == b.scope {\n\t\tif b.outer.inner != b {\n\t\t\tlog.Panic(\"Already exited block\")\n\t\t}\n\t\tif b.inner != nil && b.inner.scope == b.scope {\n\t\t\tlog.Panic(\"Exit of parent block without exit of child block\")\n\t\t}\n\t}\n\tb.outer.inner = nil\n}\n\nfunc (b *block) ChildScope() *Scope {\n\tif b.inner != nil && b.inner.scope == b.scope {\n\t\tlog.Panic(\"Failed to exit child block before entering a child scope\")\n\t}\n\tsub := b.enterChild()\n\tsub.offset = 0\n\tsub.scope = &Scope{sub, 0}\n\treturn sub.scope\n}\n\nfunc (b *block) DefineVar(name string, pos token.Pos, t Type) (*Variable, Def) {\n\tif prev, ok := b.defs[name]; ok {\n\t\treturn nil, prev\n\t}\n\tv := b.defineSlot(t, false)\n\tv.VarPos = pos\n\tb.defs[name] = v\n\treturn v, nil\n}\n\nfunc (b *block) DefineTemp(t Type) *Variable { return b.defineSlot(t, true) }\n\nfunc (b *block) defineSlot(t Type, temp bool) *Variable {\n\tif b.inner != nil && b.inner.scope == b.scope {\n\t\tlog.Panic(\"Failed to exit child block before defining variable\")\n\t}\n\tindex := -1\n\tif !b.global || temp {\n\t\tindex = b.offset + b.numVars\n\t\tb.numVars++\n\t\tif index >= b.scope.maxVars {\n\t\t\tb.scope.maxVars = index + 1\n\t\t}\n\t}\n\tv := &Variable{token.NoPos, index, t, nil}\n\treturn v\n}\n\nfunc (b *block) DefineConst(name string, pos token.Pos, t Type, v Value) (*Constant, Def) {\n\tif prev, ok := b.defs[name]; ok {\n\t\treturn nil, prev\n\t}\n\tc := &Constant{pos, t, v}\n\tb.defs[name] = c\n\treturn c, nil\n}\n\nfunc (b *block) DefineType(name string, pos token.Pos, t Type) Type {\n\tif _, ok := b.defs[name]; ok {\n\t\treturn nil\n\t}\n\tnt := &NamedType{pos, name, nil, true, make(map[string]Method)}\n\tif t != nil {\n\t\tnt.Complete(t)\n\t}\n\tb.defs[name] = nt\n\treturn nt\n}\n\nfunc (b *block) DefinePackage(id, path string, pos token.Pos) (*PkgIdent, Def) {\n\tif prev, ok := b.defs[id]; ok {\n\t\treturn nil, prev\n\t}\n\tp := &PkgIdent{pos, path, universe.pkgs[path]}\n\tb.defs[id] = p\n\treturn p, nil\n}\n\nfunc (b *block) Lookup(name string) (bl *block, level int, def Def) {\n\tfor b != nil {\n\t\tif d, ok := b.defs[name]; ok {\n\t\t\treturn b, level, d\n\t\t}\n\t\tif b.outer != nil && b.scope != b.outer.scope {\n\t\t\tlevel++\n\t\t}\n\t\tb = b.outer\n\t}\n\treturn nil, 0, nil\n}\n\nfunc (s *Scope) NewFrame(outer *Frame) *Frame { return outer.child(s.maxVars) }\n\n\/*\n * Frames\n *\/\n\ntype Frame struct {\n\tOuter *Frame\n\tVars []Value\n}\n\nfunc (f *Frame) Get(level int, index int) Value {\n\tfor ; level > 0; level-- {\n\t\tf = f.Outer\n\t}\n\treturn f.Vars[index]\n}\n\nfunc (f *Frame) child(numVars int) *Frame {\n\t\/\/ TODO(austin) This is probably rather expensive. All values\n\t\/\/ require heap allocation and zeroing them when we execute a\n\t\/\/ definition typically requires some computation.\n\treturn &Frame{f, make([]Value, numVars)}\n}\n<commit_msg>block: allow to undefine definitions<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage eval\n\nimport (\n\t\"go\/token\"\n\t\"log\"\n)\n\n\/*\n * Blocks and scopes\n *\/\n\n\/\/ A definition can be a *Variable, *Constant, or Type.\ntype Def interface {\n\tPos() token.Pos\n}\n\ntype Variable struct {\n\tVarPos token.Pos\n\t\/\/ Index of this variable in the Frame structure\n\tIndex int\n\t\/\/ Static type of this variable\n\tType Type\n\t\/\/ Value of this variable. This is only used by Scope.NewFrame;\n\t\/\/ therefore, it is useful for global scopes but cannot be used\n\t\/\/ in function scopes.\n\tInit Value\n}\n\nfunc (v *Variable) Pos() token.Pos {\n\treturn v.VarPos\n}\n\ntype Constant struct {\n\tConstPos token.Pos\n\tType Type\n\tValue Value\n}\n\nfunc (c *Constant) Pos() token.Pos {\n\treturn c.ConstPos\n}\n\ntype PkgIdent struct {\n\tPkgPos token.Pos\n\tpath string \/\/ the unique path to that package\n\tscope *Scope \/\/ the scope holding the package definition(s)\n}\n\nfunc (p *PkgIdent) Pos() token.Pos {\n\treturn p.PkgPos\n}\n\n\/\/ A block represents a definition block in which a name may not be\n\/\/ defined more than once.\ntype block struct {\n\t\/\/ The block enclosing this one, including blocks in other\n\t\/\/ scopes.\n\touter *block\n\t\/\/ The nested block currently being compiled, or nil.\n\tinner *block\n\t\/\/ The Scope containing this block.\n\tscope *Scope\n\t\/\/ The Variables, Constants, and Types defined in this block.\n\tdefs map[string]Def\n\t\/\/ The index of the first variable defined in this block.\n\t\/\/ This must be greater than the index of any variable defined\n\t\/\/ in any parent of this block within the same Scope at the\n\t\/\/ time this block is entered.\n\toffset int\n\t\/\/ The number of Variables defined in this block.\n\tnumVars int\n\t\/\/ If global, do not allocate new vars and consts in\n\t\/\/ the frame; assume that the refs will be compiled in\n\t\/\/ using defs[name].Init.\n\tglobal bool\n}\n\n\/\/ A Scope is the compile-time analogue of a Frame, which captures\n\/\/ some subtree of blocks.\ntype Scope struct {\n\t\/\/ The root block of this scope.\n\t*block\n\t\/\/ The maximum number of variables required at any point in\n\t\/\/ this Scope. This determines the number of slots needed in\n\t\/\/ Frame's created from this Scope at run-time.\n\tmaxVars int\n}\n\n\/\/ A special scope for the universe, to be able to stash 'packages'\ntype universeScope struct {\n\t*Scope\n\t\/\/ a lookup-table for easy retrieval of packages by their 'path'\n\tpkgs map[string]*Scope\n}\n\nfunc (b *block) enterChild() *block {\n\tif b.inner != nil && b.inner.scope == b.scope {\n\t\tlog.Panic(\"Failed to exit child block before entering another child\")\n\t}\n\tsub := &block{\n\t\touter: b,\n\t\tscope: b.scope,\n\t\tdefs: make(map[string]Def),\n\t\toffset: b.offset + b.numVars,\n\t}\n\tb.inner = sub\n\treturn sub\n}\n\nfunc (b *block) exit() {\n\tif b.outer == nil {\n\t\tlog.Panic(\"Cannot exit top-level block\")\n\t}\n\tif b.outer.scope == b.scope {\n\t\tif b.outer.inner != b {\n\t\t\tlog.Panic(\"Already exited block\")\n\t\t}\n\t\tif b.inner != nil && b.inner.scope == b.scope {\n\t\t\tlog.Panic(\"Exit of parent block without exit of child block\")\n\t\t}\n\t}\n\tb.outer.inner = nil\n}\n\nfunc (b *block) ChildScope() *Scope {\n\tif b.inner != nil && b.inner.scope == b.scope {\n\t\tlog.Panic(\"Failed to exit child block before entering a child scope\")\n\t}\n\tsub := b.enterChild()\n\tsub.offset = 0\n\tsub.scope = &Scope{sub, 0}\n\treturn sub.scope\n}\n\nfunc (b *block) undefine(name string) {\n\tdelete(b.defs, name)\n}\n\nfunc (b *block) DefineVar(name string, pos token.Pos, t Type) (*Variable, Def) {\n\tif prev, ok := b.defs[name]; ok {\n\t\treturn nil, prev\n\t}\n\tv := b.defineSlot(t, false)\n\tv.VarPos = pos\n\tb.defs[name] = v\n\treturn v, nil\n}\n\nfunc (b *block) DefineTemp(t Type) *Variable { return b.defineSlot(t, true) }\n\nfunc (b *block) defineSlot(t Type, temp bool) *Variable {\n\tif b.inner != nil && b.inner.scope == b.scope {\n\t\tlog.Panic(\"Failed to exit child block before defining variable\")\n\t}\n\tindex := -1\n\tif !b.global || temp {\n\t\tindex = b.offset + b.numVars\n\t\tb.numVars++\n\t\tif index >= b.scope.maxVars {\n\t\t\tb.scope.maxVars = index + 1\n\t\t}\n\t}\n\tv := &Variable{token.NoPos, index, t, nil}\n\treturn v\n}\n\nfunc (b *block) DefineConst(name string, pos token.Pos, t Type, v Value) (*Constant, Def) {\n\tif prev, ok := b.defs[name]; ok {\n\t\treturn nil, prev\n\t}\n\tc := &Constant{pos, t, v}\n\tb.defs[name] = c\n\treturn c, nil\n}\n\nfunc (b *block) DefineType(name string, pos token.Pos, t Type) Type {\n\tif _, ok := b.defs[name]; ok {\n\t\treturn nil\n\t}\n\tnt := &NamedType{pos, name, nil, true, make(map[string]Method)}\n\tif t != nil {\n\t\tnt.Complete(t)\n\t}\n\tb.defs[name] = nt\n\treturn nt\n}\n\nfunc (b *block) DefinePackage(id, path string, pos token.Pos) (*PkgIdent, Def) {\n\tif prev, ok := b.defs[id]; ok {\n\t\treturn nil, prev\n\t}\n\tp := &PkgIdent{pos, path, universe.pkgs[path]}\n\tb.defs[id] = p\n\treturn p, nil\n}\n\nfunc (b *block) Lookup(name string) (bl *block, level int, def Def) {\n\tfor b != nil {\n\t\tif d, ok := b.defs[name]; ok {\n\t\t\treturn b, level, d\n\t\t}\n\t\tif b.outer != nil && b.scope != b.outer.scope {\n\t\t\tlevel++\n\t\t}\n\t\tb = b.outer\n\t}\n\treturn nil, 0, nil\n}\n\nfunc (s *Scope) NewFrame(outer *Frame) *Frame { return outer.child(s.maxVars) }\n\n\/*\n * Frames\n *\/\n\ntype Frame struct {\n\tOuter *Frame\n\tVars []Value\n}\n\nfunc (f *Frame) Get(level int, index int) Value {\n\tfor ; level > 0; level-- {\n\t\tf = f.Outer\n\t}\n\treturn f.Vars[index]\n}\n\nfunc (f *Frame) child(numVars int) *Frame {\n\t\/\/ TODO(austin) This is probably rather expensive. All values\n\t\/\/ require heap allocation and zeroing them when we execute a\n\t\/\/ definition typically requires some computation.\n\treturn &Frame{f, make([]Value, numVars)}\n}\n<|endoftext|>"} {"text":"<commit_before>package fuzzaldrin\n\nimport (\n\t\"math\"\n\t\"os\"\n\t\"strings\"\n)\n\nfunc basenameScore(str, query string, _score float64) float64 {\n\tindex := len(str) - 1\n\tfor os.IsPathSeparator(str[index]) {\n\t\tindex--\n\t}\n\tslashCount := 0\n\tlastCharacter := index\n\tvar base string\n\tfor index >= 0 {\n\t\tif os.IsPathSeparator(str[index]) {\n\t\t\tslashCount++\n\t\t\tbase = str[index+1 : lastCharacter+1]\n\t\t} else if index == 0 {\n\t\t\tif lastCharacter < len(str)-1 {\n\t\t\t\tbase = str[0 : lastCharacter+1]\n\t\t\t} else {\n\t\t\t\tbase = str\n\t\t\t}\n\t\t}\n\t\tindex--\n\t}\n\n\t\/\/ Basename matches count for more.\n\tif base == str {\n\t\t_score *= 2\n\t} else if base != \"\" {\n\t\t_score += score(base, query)\n\t}\n\n\t\/\/ Shallow files are scored higher\n\tsegmentCount := slashCount + 1\n\tdepth := math.Max(1.0, float64(10-segmentCount))\n\t_score *= depth * 0.01\n\treturn _score\n}\n\nfunc score(str, query string) float64 {\n\tif str == query {\n\t\treturn 1\n\t}\n\n\tif queryIsLastPathSegment(str, query) {\n\t\treturn 1\n\t}\n\n\ttotalScore := 0.0\n\tqueryLength := len(query)\n\tstrLength := len(str)\n\n\tindexInQuery := 0\n\tindexInStr := 0\n\n\tfor indexInQuery < queryLength {\n\t\tch := query[indexInQuery]\n\t\tindexInQuery++\n\t\tlcIndex := strings.Index(str, strings.ToLower(string(ch)))\n\t\tucIndex := strings.Index(str, strings.ToUpper(string(ch)))\n\t\tminIndex := math.Min(float64(lcIndex), float64(ucIndex))\n\t\tif minIndex == -1 {\n\t\t\tminIndex = math.Max(float64(lcIndex), float64(ucIndex))\n\t\t}\n\t\tindexInStr = int(minIndex)\n\t\tif indexInStr == -1 {\n\t\t\treturn 0\n\t\t}\n\n\t\tchScore := 0.1\n\n\t\tif str[indexInStr] == ch {\n\t\t\tchScore += 0.1\n\t\t}\n\n\t\tif indexInStr == 0 || os.IsPathSeparator(str[indexInStr-1]) {\n\t\t\t\/\/ Start of string bonus\n\t\t\tchScore += 0.8\n\t\t} else if strings.Contains(\"-_ \", string(str[indexInStr-1])) {\n\t\t\t\/\/ Start of word bonus\n\t\t\tchScore += 0.7\n\t\t}\n\n\t\t\/\/ Trim string to after current abbreviation match\n\t\tstr = str[indexInStr+1 : len(str)]\n\n\t\ttotalScore += chScore\n\t}\n\n\tqueryScore := totalScore \/ float64(queryLength)\n\treturn ((queryScore * (float64(queryLength) \/ float64(strLength))) + queryScore) \/ 2.0\n}\n\nfunc queryIsLastPathSegment(str, query string) bool {\n\tif len(str) <= len(query) {\n\t\treturn false\n\t}\n\tif os.IsPathSeparator(str[len(str)-len(query)-1]) {\n\t\treturn strings.Index(str, query) == (len(str) - len(query))\n\t}\n\treturn false\n}\n<commit_msg>Fix variable name<commit_after>package fuzzaldrin\n\nimport (\n\t\"math\"\n\t\"os\"\n\t\"strings\"\n)\n\nfunc basenameScore(str, query string, calcScore float64) float64 {\n\tindex := len(str) - 1\n\tfor os.IsPathSeparator(str[index]) {\n\t\tindex--\n\t}\n\tslashCount := 0\n\tlastCharacter := index\n\tvar base string\n\tfor index >= 0 {\n\t\tif os.IsPathSeparator(str[index]) {\n\t\t\tslashCount++\n\t\t\tbase = str[index+1 : lastCharacter+1]\n\t\t} else if index == 0 {\n\t\t\tif lastCharacter < len(str)-1 {\n\t\t\t\tbase = str[0 : lastCharacter+1]\n\t\t\t} else {\n\t\t\t\tbase = str\n\t\t\t}\n\t\t}\n\t\tindex--\n\t}\n\n\t\/\/ Basename matches count for more.\n\tif base == str {\n\t\tcalcScore *= 2\n\t} else if base != \"\" {\n\t\tcalcScore += score(base, query)\n\t}\n\n\t\/\/ Shallow files are scored higher\n\tsegmentCount := slashCount + 1\n\tdepth := math.Max(1.0, float64(10-segmentCount))\n\tcalcScore *= depth * 0.01\n\treturn calcScore\n}\n\nfunc score(str, query string) float64 {\n\tif str == query {\n\t\treturn 1\n\t}\n\n\tif queryIsLastPathSegment(str, query) {\n\t\treturn 1\n\t}\n\n\ttotalScore := 0.0\n\tqueryLength := len(query)\n\tstrLength := len(str)\n\n\tindexInQuery := 0\n\tindexInStr := 0\n\n\tfor indexInQuery < queryLength {\n\t\tch := query[indexInQuery]\n\t\tindexInQuery++\n\t\tlcIndex := strings.Index(str, strings.ToLower(string(ch)))\n\t\tucIndex := strings.Index(str, strings.ToUpper(string(ch)))\n\t\tminIndex := math.Min(float64(lcIndex), float64(ucIndex))\n\t\tif minIndex == -1 {\n\t\t\tminIndex = math.Max(float64(lcIndex), float64(ucIndex))\n\t\t}\n\t\tindexInStr = int(minIndex)\n\t\tif indexInStr == -1 {\n\t\t\treturn 0\n\t\t}\n\n\t\tchScore := 0.1\n\n\t\tif str[indexInStr] == ch {\n\t\t\tchScore += 0.1\n\t\t}\n\n\t\tif indexInStr == 0 || os.IsPathSeparator(str[indexInStr-1]) {\n\t\t\t\/\/ Start of string bonus\n\t\t\tchScore += 0.8\n\t\t} else if strings.Contains(\"-_ \", string(str[indexInStr-1])) {\n\t\t\t\/\/ Start of word bonus\n\t\t\tchScore += 0.7\n\t\t}\n\n\t\t\/\/ Trim string to after current abbreviation match\n\t\tstr = str[indexInStr+1 : len(str)]\n\n\t\ttotalScore += chScore\n\t}\n\n\tqueryScore := totalScore \/ float64(queryLength)\n\treturn ((queryScore * (float64(queryLength) \/ float64(strLength))) + queryScore) \/ 2.0\n}\n\nfunc queryIsLastPathSegment(str, query string) bool {\n\tif len(str) <= len(query) {\n\t\treturn false\n\t}\n\tif os.IsPathSeparator(str[len(str)-len(query)-1]) {\n\t\treturn strings.Index(str, query) == (len(str) - len(query))\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package router\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tg \"github.com\/onsi\/ginkgo\"\n\to \"github.com\/onsi\/gomega\"\n\n\trouteclientset \"github.com\/openshift\/client-go\/route\/clientset\/versioned\"\n\t\"github.com\/openshift\/origin\/test\/extended\/router\/h2spec\"\n\t\"github.com\/openshift\/origin\/test\/extended\/router\/shard\"\n\texutil \"github.com\/openshift\/origin\/test\/extended\/util\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\te2e \"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\te2epod \"k8s.io\/kubernetes\/test\/e2e\/framework\/pod\"\n)\n\nconst h2specDialTimeoutInSeconds = 15\n\ntype h2specFailingTest struct {\n\tTestCase *h2spec.JUnitTestCase\n\tTestNumber int\n}\n\nvar _ = g.Describe(\"[sig-network-edge][Conformance][Area:Networking][Feature:Router]\", func() {\n\tdefer g.GinkgoRecover()\n\n\tvar (\n\t\th2specServiceConfigPath = exutil.FixturePath(\"testdata\", \"router\", \"router-h2spec.yaml\")\n\t\th2specRoutesConfigPath = exutil.FixturePath(\"testdata\", \"router\", \"router-h2spec-routes.yaml\")\n\t\th2specRouterShardConfigPath = exutil.FixturePath(\"testdata\", \"router\", \"router-shard.yaml\")\n\n\t\toc = exutil.NewCLI(\"router-h2spec\")\n\t\tshardConfigPath string \/\/ computed\n\t)\n\n\t\/\/ this hook must be registered before the framework namespace teardown\n\t\/\/ hook\n\tg.AfterEach(func() {\n\t\tif g.CurrentGinkgoTestDescription().Failed {\n\t\t\tclient := routeclientset.NewForConfigOrDie(oc.AdminConfig()).RouteV1().Routes(oc.KubeFramework().Namespace.Name)\n\t\t\tif routes, _ := client.List(context.Background(), metav1.ListOptions{}); routes != nil {\n\t\t\t\toutputIngress(routes.Items...)\n\t\t\t}\n\t\t\texutil.DumpPodLogsStartingWith(\"h2spec\", oc)\n\t\t}\n\t\tif len(shardConfigPath) > 0 {\n\t\t\tif err := oc.AsAdmin().Run(\"delete\").Args(\"-n\", \"openshift-ingress-operator\", \"-f\", shardConfigPath).Execute(); err != nil {\n\t\t\t\te2e.Logf(\"deleting ingress controller failed: %v\\n\", err)\n\t\t\t}\n\t\t}\n\t})\n\n\tg.Describe(\"The HAProxy router\", func() {\n\t\tg.It(\"should pass the h2spec conformance tests\", func() {\n\t\t\tinfra, err := oc.AdminConfigClient().ConfigV1().Infrastructures().Get(context.Background(), \"cluster\", metav1.GetOptions{})\n\t\t\to.Expect(err).NotTo(o.HaveOccurred(), \"failed to get cluster-wide infrastructure\")\n\t\t\tif !platformHasHTTP2LoadBalancerService(infra.Status.PlatformStatus.Type) {\n\t\t\t\tg.Skip(\"Skip on platforms where the default router is not exposed by a load balancer service.\")\n\t\t\t}\n\n\t\t\tg.By(\"Getting the default domain\")\n\t\t\tdefaultDomain, err := getDefaultIngressClusterDomainName(oc, time.Minute)\n\t\t\to.Expect(err).NotTo(o.HaveOccurred(), \"failed to find default domain name\")\n\n\t\t\tg.By(\"Locating the router image reference\")\n\t\t\trouterImage, err := exutil.FindRouterImage(oc)\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\tg.By(\"Locating the canary image reference\")\n\t\t\tcanaryImage, err := getCanaryImage(oc)\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\tg.By(\"Creating h2spec test service\")\n\t\t\terr = oc.Run(\"new-app\").Args(\"-f\", h2specServiceConfigPath,\n\t\t\t\t\"-p\", \"HAPROXY_IMAGE=\"+routerImage,\n\t\t\t\t\"-p\", \"H2SPEC_IMAGE=\"+canaryImage).Execute()\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\te2e.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(oc.KubeClient(), \"h2spec-haproxy\", oc.KubeFramework().Namespace.Name))\n\t\t\te2e.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(oc.KubeClient(), \"h2spec\", oc.KubeFramework().Namespace.Name))\n\n\t\t\tshardFQDN := oc.Namespace() + \".\" + defaultDomain\n\n\t\t\t\/\/ The new router shard is using a namespace\n\t\t\t\/\/ selector so label this test namespace to\n\t\t\t\/\/ match.\n\t\t\tg.By(\"By labelling the namespace\")\n\t\t\terr = oc.AsAdmin().Run(\"label\").Args(\"namespace\", oc.Namespace(), \"type=\"+oc.Namespace()).Execute()\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\tg.By(\"Creating routes to test for h2spec compliance\")\n\t\t\terr = oc.Run(\"new-app\").Args(\"-f\", h2specRoutesConfigPath,\n\t\t\t\t\"-p\", \"DOMAIN=\"+shardFQDN,\n\t\t\t\t\"-p\", \"TYPE=\"+oc.Namespace()).Execute()\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\tg.By(\"Creating a test-specific router shard\")\n\t\t\tshardConfigPath, err = shard.DeployNewRouterShard(oc, 10*time.Minute, shard.Config{\n\t\t\t\tFixturePath: h2specRouterShardConfigPath,\n\t\t\t\tDomain: shardFQDN,\n\t\t\t\tType: oc.Namespace(),\n\t\t\t})\n\t\t\to.Expect(err).NotTo(o.HaveOccurred(), \"new router shard did not rollout\")\n\n\t\t\tg.By(\"Getting LB service\")\n\t\t\tshardService, err := getRouterService(oc, 5*time.Minute, \"router-\"+oc.Namespace())\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t\to.Expect(shardService).NotTo(o.BeNil())\n\t\t\to.Expect(shardService.Status.LoadBalancer.Ingress).ShouldNot(o.BeEmpty())\n\n\t\t\tvar addrs []string\n\n\t\t\tif len(shardService.Status.LoadBalancer.Ingress[0].Hostname) > 0 {\n\t\t\t\tg.By(\"Waiting for LB hostname to register in DNS\")\n\t\t\t\taddrs, err = resolveHost(oc, time.Minute, 15*time.Minute, shardService.Status.LoadBalancer.Ingress[0].Hostname)\n\t\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t\t\to.Expect(addrs).NotTo(o.BeEmpty())\n\t\t\t} else {\n\t\t\t\taddrs = append(addrs, shardService.Status.LoadBalancer.Ingress[0].IP)\n\t\t\t}\n\n\t\t\tg.By(\"Waiting for route hostname to register in DNS\")\n\t\t\thost := \"h2spec-passthrough.\" + shardFQDN\n\t\t\taddrs, err = resolveHostAsAddress(oc, time.Minute, 15*time.Minute, host, addrs[0])\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t\to.Expect(addrs).NotTo(o.BeEmpty())\n\n\t\t\t\/\/ ROUTER_H2SPEC_SAMPLE when set runs the\n\t\t\t\/\/ conformance tests for N iterations to\n\t\t\t\/\/ identify flaking tests.\n\t\t\t\/\/\n\t\t\t\/\/ This should be enabled in development every\n\t\t\t\/\/ time we consume any new version of haproxy\n\t\t\t\/\/ be that a major, minor or a micro update to\n\t\t\t\/\/ continuously validate the set of test case\n\t\t\t\/\/ IDs that fail.\n\t\t\tif iterations := lookupEnv(\"ROUTER_H2SPEC_SAMPLE\", \"\"); iterations != \"\" {\n\t\t\t\tn, err := strconv.Atoi(iterations)\n\t\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t\t\to.Expect(n).To(o.BeNumerically(\">\", 0))\n\t\t\t\trunConformanceTestsAndLogAggregateFailures(oc, host, \"h2spec\", n)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttestSuites, err := runConformanceTests(oc, host, \"h2spec\")\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t\to.Expect(testSuites).ShouldNot(o.BeEmpty())\n\n\t\t\tfailures := failingTests(testSuites)\n\t\t\tfailureCount := len(failures)\n\n\t\t\tg.By(\"Analyzing results\")\n\t\t\t\/\/ https:\/\/github.com\/haproxy\/haproxy\/issues\/471#issuecomment-591420924\n\t\t\t\/\/\n\t\t\t\/\/ 5. Streams and Multiplexing\n\t\t\t\/\/ 5.4. Error Handling\n\t\t\t\/\/ 5.4.1. Connection Error Handling\n\t\t\t\/\/ using source address 10.131.0.37:34742\n\t\t\t\/\/ × 2: Sends an invalid PING frame to receive GOAWAY frame\n\t\t\t\/\/ -> An endpoint that encounters a connection error SHOULD first send a GOAWAY frame\n\t\t\t\/\/ Expected: GOAWAY Frame (Error Code: PROTOCOL_ERROR)\n\t\t\t\/\/ Actual: Connection closed\n\t\t\t\/\/\n\t\t\t\/\/ 6. Frame Definitions\n\t\t\t\/\/ 6.9. WINDOW_UPDATE\n\t\t\t\/\/ 6.9.1. The Flow-Control Window\n\t\t\t\/\/ using source address 10.131.0.37:34816\n\t\t\t\/\/ × 2: Sends multiple WINDOW_UPDATE frames increasing the flow control window to above 2^31-1\n\t\t\t\/\/ -> The endpoint MUST sends a GOAWAY frame with a FLOW_CONTROL_ERROR code.\n\t\t\t\/\/ Expected: GOAWAY Frame (Error Code: FLOW_CONTROL_ERROR)\n\t\t\t\/\/ Actual: Connection closed\n\t\t\t\/\/\n\t\t\t\/\/ 147 tests, 145 passed, 0 skipped, 2 failed\n\t\t\tknownFailures := map[string]bool{\n\t\t\t\t\"http2\/5.4.1.2\": true,\n\t\t\t\t\"http2\/6.9.1.2\": true,\n\t\t\t}\n\t\t\tfor _, f := range failures {\n\t\t\t\tif _, exists := knownFailures[f.ID()]; exists {\n\t\t\t\t\tfailureCount -= 1\n\t\t\t\t\te2e.Logf(\"TestCase ID: %q is a known failure; ignoring\", f.ID())\n\t\t\t\t} else {\n\t\t\t\t\te2e.Logf(\"TestCase ID: %q (%q) ****FAILED****\", f.ID(), f.TestCase.ClassName)\n\t\t\t\t}\n\t\t\t}\n\t\t\to.Expect(failureCount).Should(o.BeZero(), \"expected zero failures\")\n\t\t})\n\t})\n})\n\nfunc failingTests(testSuites []*h2spec.JUnitTestSuite) []h2specFailingTest {\n\tvar failures []h2specFailingTest\n\n\tfor _, ts := range testSuites {\n\t\tfor i := 0; i < ts.Tests; i++ {\n\t\t\tif ts.TestCases[i].Error != nil {\n\t\t\t\tfailures = append(failures, h2specFailingTest{\n\t\t\t\t\tTestNumber: i + 1,\n\t\t\t\t\tTestCase: ts.TestCases[i],\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn failures\n}\n\nfunc runConformanceTests(oc *exutil.CLI, host, podName string) ([]*h2spec.JUnitTestSuite, error) {\n\tg.By(\"Running the h2spec CLI test\")\n\n\t\/\/ this is the output file in the pod\n\toutputFile := \"\/tmp\/h2spec-results\"\n\n\t\/\/ h2spec will exit with non-zero if _any_ test in the suite\n\t\/\/ fails, or if there is a dial timeout, so we ignore the\n\t\/\/ error. If we can fetch the results and if we can decode the\n\t\/\/ results and we have > 0 test suites from the decoded\n\t\/\/ results then assume the test ran.\n\toutput, _ := e2e.RunHostCmd(oc.Namespace(), podName, h2specCommand(h2specDialTimeoutInSeconds, host, outputFile))\n\n\tg.By(\"Copying results\")\n\tdata, err := e2e.RunHostCmd(oc.Namespace(), podName, fmt.Sprintf(\"cat %q\", outputFile))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(data) == 0 {\n\t\treturn nil, errors.New(\"zero length results file\")\n\t}\n\n\tg.By(\"Decoding results\")\n\ttestSuites, err := h2spec.DecodeJUnitReport(strings.NewReader(data))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(testSuites) == 0 {\n\t\treturn nil, errors.New(\"no test results found\")\n\t}\n\n\t\/\/ Log what we consider a successful run\n\te2e.Logf(\"h2spec results\\n%s\", output)\n\treturn testSuites, nil\n}\n\nfunc runConformanceTestsAndLogAggregateFailures(oc *exutil.CLI, host, podName string, iterations int) {\n\tsortKeys := func(m map[string]int) []string {\n\t\tvar index []string\n\t\tfor k := range m {\n\t\t\tindex = append(index, k)\n\t\t}\n\t\tsort.Strings(index)\n\t\treturn index\n\t}\n\n\tprintFailures := func(prefix string, m map[string]int) {\n\t\tfor _, id := range sortKeys(m) {\n\t\t\te2e.Logf(\"%sTestCase ID: %q, cumulative failures: %v\", prefix, id, m[id])\n\t\t}\n\t}\n\n\tfailuresByTestCaseID := map[string]int{}\n\n\tfor i := 1; i <= iterations; i++ {\n\t\ttestResults, err := runConformanceTests(oc, host, podName)\n\t\tif err != nil {\n\t\t\te2e.Logf(err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tfailures := failingTests(testResults)\n\t\te2e.Logf(\"Iteration %v\/%v: had %v failures\", i, iterations, len(failures))\n\n\t\t\/\/ Aggregate any new failures\n\t\tfor _, f := range failures {\n\t\t\tfailuresByTestCaseID[f.ID()]++\n\t\t}\n\n\t\t\/\/ Dump the current state at every iteration should\n\t\t\/\/ you wish to interrupt\/abort the running test.\n\t\tprintFailures(\"\\t\", failuresByTestCaseID)\n\t}\n\n\te2e.Logf(\"Sampling completed: %v test cases failed\", len(failuresByTestCaseID))\n\tprintFailures(\"\\t\", failuresByTestCaseID)\n}\n\nfunc getHostnameForRoute(oc *exutil.CLI, routeName string) (string, error) {\n\tvar hostname string\n\tns := oc.KubeFramework().Namespace.Name\n\tif err := wait.Poll(time.Second, changeTimeoutSeconds*time.Second, func() (bool, error) {\n\t\troute, err := oc.RouteClient().RouteV1().Routes(ns).Get(context.Background(), routeName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\te2e.Logf(\"Error getting hostname for route %q: %v\", routeName, err)\n\t\t\treturn false, err\n\t\t}\n\t\tif len(route.Status.Ingress) == 0 || len(route.Status.Ingress[0].Host) == 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\thostname = route.Status.Ingress[0].Host\n\t\treturn true, nil\n\t}); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hostname, nil\n}\n\nfunc h2specCommand(timeout int, hostname, results string) string {\n\treturn fmt.Sprintf(\"ingress-operator h2spec --timeout=%v --tls --insecure --strict --host=%q --junit-report=%q\", timeout, hostname, results)\n}\n\nfunc (f h2specFailingTest) ID() string {\n\treturn fmt.Sprintf(\"%s.%d\", f.TestCase.Package, f.TestNumber)\n}\n\nfunc lookupEnv(key, defaultVal string) string {\n\tif val, ok := os.LookupEnv(key); ok {\n\t\treturn val\n\t}\n\treturn defaultVal\n}\n<commit_msg>test\/extended\/router: h2spec increase timeout and retry tests<commit_after>package router\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tg \"github.com\/onsi\/ginkgo\"\n\to \"github.com\/onsi\/gomega\"\n\n\trouteclientset \"github.com\/openshift\/client-go\/route\/clientset\/versioned\"\n\t\"github.com\/openshift\/origin\/test\/extended\/router\/h2spec\"\n\t\"github.com\/openshift\/origin\/test\/extended\/router\/shard\"\n\texutil \"github.com\/openshift\/origin\/test\/extended\/util\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\te2e \"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\te2epod \"k8s.io\/kubernetes\/test\/e2e\/framework\/pod\"\n)\n\nconst h2specDialTimeoutInSeconds = 30\n\ntype h2specFailingTest struct {\n\tTestCase *h2spec.JUnitTestCase\n\tTestNumber int\n}\n\nvar _ = g.Describe(\"[sig-network-edge][Conformance][Area:Networking][Feature:Router]\", func() {\n\tdefer g.GinkgoRecover()\n\n\tvar (\n\t\th2specServiceConfigPath = exutil.FixturePath(\"testdata\", \"router\", \"router-h2spec.yaml\")\n\t\th2specRoutesConfigPath = exutil.FixturePath(\"testdata\", \"router\", \"router-h2spec-routes.yaml\")\n\t\th2specRouterShardConfigPath = exutil.FixturePath(\"testdata\", \"router\", \"router-shard.yaml\")\n\n\t\toc = exutil.NewCLI(\"router-h2spec\")\n\t\tshardConfigPath string \/\/ computed\n\t)\n\n\t\/\/ this hook must be registered before the framework namespace teardown\n\t\/\/ hook\n\tg.AfterEach(func() {\n\t\tif g.CurrentGinkgoTestDescription().Failed {\n\t\t\tclient := routeclientset.NewForConfigOrDie(oc.AdminConfig()).RouteV1().Routes(oc.KubeFramework().Namespace.Name)\n\t\t\tif routes, _ := client.List(context.Background(), metav1.ListOptions{}); routes != nil {\n\t\t\t\toutputIngress(routes.Items...)\n\t\t\t}\n\t\t\texutil.DumpPodLogsStartingWith(\"h2spec\", oc)\n\t\t}\n\t\tif len(shardConfigPath) > 0 {\n\t\t\tif err := oc.AsAdmin().Run(\"delete\").Args(\"-n\", \"openshift-ingress-operator\", \"-f\", shardConfigPath).Execute(); err != nil {\n\t\t\t\te2e.Logf(\"deleting ingress controller failed: %v\\n\", err)\n\t\t\t}\n\t\t}\n\t})\n\n\tg.Describe(\"The HAProxy router\", func() {\n\t\tg.It(\"should pass the h2spec conformance tests\", func() {\n\t\t\tinfra, err := oc.AdminConfigClient().ConfigV1().Infrastructures().Get(context.Background(), \"cluster\", metav1.GetOptions{})\n\t\t\to.Expect(err).NotTo(o.HaveOccurred(), \"failed to get cluster-wide infrastructure\")\n\t\t\tif !platformHasHTTP2LoadBalancerService(infra.Status.PlatformStatus.Type) {\n\t\t\t\tg.Skip(\"Skip on platforms where the default router is not exposed by a load balancer service.\")\n\t\t\t}\n\n\t\t\tg.By(\"Getting the default domain\")\n\t\t\tdefaultDomain, err := getDefaultIngressClusterDomainName(oc, time.Minute)\n\t\t\to.Expect(err).NotTo(o.HaveOccurred(), \"failed to find default domain name\")\n\n\t\t\tg.By(\"Locating the router image reference\")\n\t\t\trouterImage, err := exutil.FindRouterImage(oc)\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\tg.By(\"Locating the canary image reference\")\n\t\t\tcanaryImage, err := getCanaryImage(oc)\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\tg.By(\"Creating h2spec test service\")\n\t\t\terr = oc.Run(\"new-app\").Args(\"-f\", h2specServiceConfigPath,\n\t\t\t\t\"-p\", \"HAPROXY_IMAGE=\"+routerImage,\n\t\t\t\t\"-p\", \"H2SPEC_IMAGE=\"+canaryImage).Execute()\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\te2e.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(oc.KubeClient(), \"h2spec-haproxy\", oc.KubeFramework().Namespace.Name))\n\t\t\te2e.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(oc.KubeClient(), \"h2spec\", oc.KubeFramework().Namespace.Name))\n\n\t\t\tshardFQDN := oc.Namespace() + \".\" + defaultDomain\n\n\t\t\t\/\/ The new router shard is using a namespace\n\t\t\t\/\/ selector so label this test namespace to\n\t\t\t\/\/ match.\n\t\t\tg.By(\"By labelling the namespace\")\n\t\t\terr = oc.AsAdmin().Run(\"label\").Args(\"namespace\", oc.Namespace(), \"type=\"+oc.Namespace()).Execute()\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\tg.By(\"Creating routes to test for h2spec compliance\")\n\t\t\terr = oc.Run(\"new-app\").Args(\"-f\", h2specRoutesConfigPath,\n\t\t\t\t\"-p\", \"DOMAIN=\"+shardFQDN,\n\t\t\t\t\"-p\", \"TYPE=\"+oc.Namespace()).Execute()\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\tg.By(\"Creating a test-specific router shard\")\n\t\t\tshardConfigPath, err = shard.DeployNewRouterShard(oc, 10*time.Minute, shard.Config{\n\t\t\t\tFixturePath: h2specRouterShardConfigPath,\n\t\t\t\tDomain: shardFQDN,\n\t\t\t\tType: oc.Namespace(),\n\t\t\t})\n\t\t\to.Expect(err).NotTo(o.HaveOccurred(), \"new router shard did not rollout\")\n\n\t\t\tg.By(\"Getting LB service\")\n\t\t\tshardService, err := getRouterService(oc, 5*time.Minute, \"router-\"+oc.Namespace())\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t\to.Expect(shardService).NotTo(o.BeNil())\n\t\t\to.Expect(shardService.Status.LoadBalancer.Ingress).ShouldNot(o.BeEmpty())\n\n\t\t\tvar addrs []string\n\n\t\t\tif len(shardService.Status.LoadBalancer.Ingress[0].Hostname) > 0 {\n\t\t\t\tg.By(\"Waiting for LB hostname to register in DNS\")\n\t\t\t\taddrs, err = resolveHost(oc, time.Minute, 15*time.Minute, shardService.Status.LoadBalancer.Ingress[0].Hostname)\n\t\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t\t\to.Expect(addrs).NotTo(o.BeEmpty())\n\t\t\t} else {\n\t\t\t\taddrs = append(addrs, shardService.Status.LoadBalancer.Ingress[0].IP)\n\t\t\t}\n\n\t\t\tg.By(\"Waiting for route hostname to register in DNS\")\n\t\t\thost := \"h2spec-passthrough.\" + shardFQDN\n\t\t\taddrs, err = resolveHostAsAddress(oc, time.Minute, 15*time.Minute, host, addrs[0])\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t\to.Expect(addrs).NotTo(o.BeEmpty())\n\n\t\t\t\/\/ ROUTER_H2SPEC_SAMPLE when set runs the\n\t\t\t\/\/ conformance tests for N iterations to\n\t\t\t\/\/ identify flaking tests.\n\t\t\t\/\/\n\t\t\t\/\/ This should be enabled in development every\n\t\t\t\/\/ time we consume any new version of haproxy\n\t\t\t\/\/ be that a major, minor or a micro update to\n\t\t\t\/\/ continuously validate the set of test case\n\t\t\t\/\/ IDs that fail.\n\t\t\tif iterations := lookupEnv(\"ROUTER_H2SPEC_SAMPLE\", \"\"); iterations != \"\" {\n\t\t\t\tn, err := strconv.Atoi(iterations)\n\t\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t\t\to.Expect(n).To(o.BeNumerically(\">\", 0))\n\t\t\t\trunConformanceTestsAndLogAggregateFailures(oc, host, \"h2spec\", n)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttestSuites, err := runConformanceTests(oc, host, \"h2spec\", 5*time.Minute)\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t\to.Expect(testSuites).ShouldNot(o.BeEmpty())\n\n\t\t\tfailures := failingTests(testSuites)\n\t\t\tfailureCount := len(failures)\n\n\t\t\tg.By(\"Analyzing results\")\n\t\t\t\/\/ https:\/\/github.com\/haproxy\/haproxy\/issues\/471#issuecomment-591420924\n\t\t\t\/\/\n\t\t\t\/\/ 5. Streams and Multiplexing\n\t\t\t\/\/ 5.4. Error Handling\n\t\t\t\/\/ 5.4.1. Connection Error Handling\n\t\t\t\/\/ using source address 10.131.0.37:34742\n\t\t\t\/\/ × 2: Sends an invalid PING frame to receive GOAWAY frame\n\t\t\t\/\/ -> An endpoint that encounters a connection error SHOULD first send a GOAWAY frame\n\t\t\t\/\/ Expected: GOAWAY Frame (Error Code: PROTOCOL_ERROR)\n\t\t\t\/\/ Actual: Connection closed\n\t\t\t\/\/\n\t\t\t\/\/ 6. Frame Definitions\n\t\t\t\/\/ 6.9. WINDOW_UPDATE\n\t\t\t\/\/ 6.9.1. The Flow-Control Window\n\t\t\t\/\/ using source address 10.131.0.37:34816\n\t\t\t\/\/ × 2: Sends multiple WINDOW_UPDATE frames increasing the flow control window to above 2^31-1\n\t\t\t\/\/ -> The endpoint MUST sends a GOAWAY frame with a FLOW_CONTROL_ERROR code.\n\t\t\t\/\/ Expected: GOAWAY Frame (Error Code: FLOW_CONTROL_ERROR)\n\t\t\t\/\/ Actual: Connection closed\n\t\t\t\/\/\n\t\t\t\/\/ 147 tests, 145 passed, 0 skipped, 2 failed\n\t\t\tknownFailures := map[string]bool{\n\t\t\t\t\"http2\/5.4.1.2\": true,\n\t\t\t\t\"http2\/6.9.1.2\": true,\n\t\t\t}\n\t\t\tfor _, f := range failures {\n\t\t\t\tif _, exists := knownFailures[f.ID()]; exists {\n\t\t\t\t\tfailureCount -= 1\n\t\t\t\t\te2e.Logf(\"TestCase ID: %q is a known failure; ignoring\", f.ID())\n\t\t\t\t} else {\n\t\t\t\t\te2e.Logf(\"TestCase ID: %q (%q) ****FAILED****\", f.ID(), f.TestCase.ClassName)\n\t\t\t\t}\n\t\t\t}\n\t\t\to.Expect(failureCount).Should(o.BeZero(), \"expected zero failures\")\n\t\t})\n\t})\n})\n\nfunc failingTests(testSuites []*h2spec.JUnitTestSuite) []h2specFailingTest {\n\tvar failures []h2specFailingTest\n\n\tfor _, ts := range testSuites {\n\t\tfor i := 0; i < ts.Tests; i++ {\n\t\t\tif ts.TestCases[i].Error != nil {\n\t\t\t\tfailures = append(failures, h2specFailingTest{\n\t\t\t\t\tTestNumber: i + 1,\n\t\t\t\t\tTestCase: ts.TestCases[i],\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn failures\n}\n\nfunc runConformanceTests(oc *exutil.CLI, host, podName string, timeout time.Duration) ([]*h2spec.JUnitTestSuite, error) {\n\tvar testSuites []*h2spec.JUnitTestSuite\n\n\tif err := wait.Poll(time.Second, timeout, func() (bool, error) {\n\t\tg.By(\"Running the h2spec CLI test\")\n\n\t\t\/\/ this is the output file in the pod\n\t\toutputFile := \"\/tmp\/h2spec-results\"\n\n\t\t\/\/ h2spec will exit with non-zero if _any_ test in the suite\n\t\t\/\/ fails, or if there is a dial timeout, so we log the\n\t\t\/\/ error. But if we can fetch the results and if we can decode the\n\t\t\/\/ results and we have > 0 test suites from the decoded\n\t\t\/\/ results then assume the test ran.\n\t\toutput, err := e2e.RunHostCmd(oc.Namespace(), podName, h2specCommand(h2specDialTimeoutInSeconds, host, outputFile))\n\t\tif err != nil {\n\t\t\te2e.Logf(\"error running h2spec: %v, but checking on result content\", err)\n\t\t}\n\n\t\tg.By(\"Copying results\")\n\t\tdata, err := e2e.RunHostCmd(oc.Namespace(), podName, fmt.Sprintf(\"cat %q\", outputFile))\n\t\tif err != nil {\n\t\t\te2e.Logf(\"error copying results: %v, retrying...\", err)\n\t\t\treturn false, err\n\t\t}\n\t\tif len(data) == 0 {\n\t\t\te2e.Logf(\"results file is zero length, retrying...\")\n\t\t\treturn false, errors.New(\"empty results\")\n\t\t}\n\n\t\tg.By(\"Decoding results\")\n\t\ttestSuites, err = h2spec.DecodeJUnitReport(strings.NewReader(data))\n\t\tif err != nil {\n\t\t\te2e.Logf(\"error decoding results: %v, retrying...\", err)\n\t\t\treturn false, err\n\t\t}\n\t\tif len(testSuites) == 0 {\n\t\t\te2e.Logf(\"expected len(testSuites) > 0, retrying...\")\n\t\t\treturn false, errors.New(\"no test results found\")\n\t\t}\n\n\t\t\/\/ Log what we consider a successful run\n\t\te2e.Logf(\"h2spec results\\n%s\", output)\n\t\treturn true, nil\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn testSuites, nil\n}\n\nfunc runConformanceTestsAndLogAggregateFailures(oc *exutil.CLI, host, podName string, iterations int) {\n\tsortKeys := func(m map[string]int) []string {\n\t\tvar index []string\n\t\tfor k := range m {\n\t\t\tindex = append(index, k)\n\t\t}\n\t\tsort.Strings(index)\n\t\treturn index\n\t}\n\n\tprintFailures := func(prefix string, m map[string]int) {\n\t\tfor _, id := range sortKeys(m) {\n\t\t\te2e.Logf(\"%sTestCase ID: %q, cumulative failures: %v\", prefix, id, m[id])\n\t\t}\n\t}\n\n\tfailuresByTestCaseID := map[string]int{}\n\n\tfor i := 1; i <= iterations; i++ {\n\t\ttestResults, err := runConformanceTests(oc, host, podName, 5*time.Minute)\n\t\tif err != nil {\n\t\t\te2e.Logf(err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tfailures := failingTests(testResults)\n\t\te2e.Logf(\"Iteration %v\/%v: had %v failures\", i, iterations, len(failures))\n\n\t\t\/\/ Aggregate any new failures\n\t\tfor _, f := range failures {\n\t\t\tfailuresByTestCaseID[f.ID()]++\n\t\t}\n\n\t\t\/\/ Dump the current state at every iteration should\n\t\t\/\/ you wish to interrupt\/abort the running test.\n\t\tprintFailures(\"\\t\", failuresByTestCaseID)\n\t}\n\n\te2e.Logf(\"Sampling completed: %v test cases failed\", len(failuresByTestCaseID))\n\tprintFailures(\"\\t\", failuresByTestCaseID)\n}\n\nfunc getHostnameForRoute(oc *exutil.CLI, routeName string) (string, error) {\n\tvar hostname string\n\tns := oc.KubeFramework().Namespace.Name\n\tif err := wait.Poll(time.Second, changeTimeoutSeconds*time.Second, func() (bool, error) {\n\t\troute, err := oc.RouteClient().RouteV1().Routes(ns).Get(context.Background(), routeName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\te2e.Logf(\"Error getting hostname for route %q: %v\", routeName, err)\n\t\t\treturn false, err\n\t\t}\n\t\tif len(route.Status.Ingress) == 0 || len(route.Status.Ingress[0].Host) == 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\thostname = route.Status.Ingress[0].Host\n\t\treturn true, nil\n\t}); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hostname, nil\n}\n\nfunc h2specCommand(timeout int, hostname, results string) string {\n\treturn fmt.Sprintf(\"ingress-operator h2spec --timeout=%v --tls --insecure --strict --host=%q --junit-report=%q\", timeout, hostname, results)\n}\n\nfunc (f h2specFailingTest) ID() string {\n\treturn fmt.Sprintf(\"%s.%d\", f.TestCase.Package, f.TestNumber)\n}\n\nfunc lookupEnv(key, defaultVal string) string {\n\tif val, ok := os.LookupEnv(key); ok {\n\t\treturn val\n\t}\n\treturn defaultVal\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/csv\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n)\n\ntype basicAuthData struct {\n\tuser string\n\tpassword string\n}\n\ntype basicAuth struct {\n\tUsers map[string]string\n}\n\nfunc newBasicAuthFromFile(path string) (*basicAuth, error) {\n\tr, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newBasicAuth(r)\n}\n\nfunc newBasicAuth(file io.Reader) (*basicAuth, error) {\n\tcsvReader := csv.NewReader(file)\n\tcsvReader.Comma = ':'\n\tcsvReader.Comment = '#'\n\tcsvReader.TrimLeadingSpace = true\n\n\trecords, err := csvReader.ReadAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\th := &basicAuth{Users: make(map[string]string)}\n\n\tfor _, record := range records {\n\t\tif len(record) != 2 {\n\t\t\treturn nil, errors.New(\"invalid basic auth file format\")\n\t\t}\n\t\th.Users[record[0]] = record[1]\n\t}\n\n\tif len(h.Users) == 0 {\n\t\treturn nil, errors.New(\"auth file contains no data\")\n\t}\n\n\treturn h, nil\n}\n\nfunc (h *basicAuth) validate(authData *basicAuthData) bool {\n\trealPassword, exists := h.Users[authData.user]\n\tif !exists || realPassword != authData.password {\n\t\treturn false\n\t}\n\n\treturn true\n}\n<commit_msg>code style consistency<commit_after>package main\n\nimport (\n\t\"encoding\/csv\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n)\n\ntype basicAuthData struct {\n\tuser string\n\tpassword string\n}\n\ntype basicAuth struct {\n\tusers map[string]string\n}\n\nfunc newBasicAuthFromFile(path string) (*basicAuth, error) {\n\tr, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newBasicAuth(r)\n}\n\nfunc newBasicAuth(file io.Reader) (*basicAuth, error) {\n\tcsvReader := csv.NewReader(file)\n\tcsvReader.Comma = ':'\n\tcsvReader.Comment = '#'\n\tcsvReader.TrimLeadingSpace = true\n\n\trecords, err := csvReader.ReadAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\th := &basicAuth{users: make(map[string]string)}\n\n\tfor _, record := range records {\n\t\tif len(record) != 2 {\n\t\t\treturn nil, errors.New(\"invalid basic auth file format\")\n\t\t}\n\t\th.users[record[0]] = record[1]\n\t}\n\n\tif len(h.users) == 0 {\n\t\treturn nil, errors.New(\"auth file contains no data\")\n\t}\n\n\treturn h, nil\n}\n\nfunc (h *basicAuth) validate(authData *basicAuthData) bool {\n\trealPassword, exists := h.users[authData.user]\n\tif !exists || realPassword != authData.password {\n\t\treturn false\n\t}\n\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage conventions\n\n\/\/ OpenTelemetry Semantic Convention values for Resource attribute names.\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/tree\/master\/specification\/resource\/semantic_conventions\/README.md\nconst (\n\tAttributeCloudAccount = \"cloud.account.id\"\n\tAttributeCloudProvider = \"cloud.provider\"\n\tAttributeCloudRegion = \"cloud.region\"\n\tAttributeCloudZone = \"cloud.zone\"\n\tAttributeContainerID = \"container.id\"\n\tAttributeContainerImage = \"container.image.name\"\n\tAttributeContainerName = \"container.name\"\n\tAttributeContainerTag = \"container.image.tag\"\n\tAttributeDeploymentEnvironment = \"deployment.environment\"\n\tAttributeFaasID = \"faas.id\"\n\tAttributeFaasInstance = \"faas.instance\"\n\tAttributeFaasName = \"faas.name\"\n\tAttributeFaasVersion = \"faas.version\"\n\tAttributeHostHostname = \"host.hostname\"\n\tAttributeHostID = \"host.id\"\n\tAttributeHostImageID = \"host.image.id\"\n\tAttributeHostImageName = \"host.image.name\"\n\tAttributeHostImageVersion = \"host.image.version\"\n\tAttributeHostName = \"host.name\"\n\tAttributeHostType = \"host.type\"\n\tAttributeK8sCluster = \"k8s.cluster.name\"\n\tAttributeK8sContainer = \"k8s.container.name\"\n\tAttributeK8sCronJob = \"k8s.cronjob.name\"\n\tAttributeK8sCronJobUID = \"k8s.cronjob.uid\"\n\tAttributeK8sDaemonSet = \"k8s.daemonset.name\"\n\tAttributeK8sDaemonSetUID = \"k8s.daemonset.uid\"\n\tAttributeK8sDeployment = \"k8s.deployment.name\"\n\tAttributeK8sDeploymentUID = \"k8s.deployment.uid\"\n\tAttributeK8sJob = \"k8s.job.name\"\n\tAttributeK8sJobUID = \"k8s.job.uid\"\n\tAttributeK8sNamespace = \"k8s.namespace.name\"\n\tAttributeK8sPod = \"k8s.pod.name\"\n\tAttributeK8sPodUID = \"k8s.pod.uid\"\n\tAttributeK8sReplicaSet = \"k8s.replicaset.name\"\n\tAttributeK8sReplicaSetUID = \"k8s.replicaset.uid\"\n\tAttributeK8sStatefulSet = \"k8s.statefulset.name\"\n\tAttributeK8sStatefulSetUID = \"k8s.statefulset.uid\"\n\tAttributeOSType = \"os.type\"\n\tAttributeOSDescription = \"os.description\"\n\tAttributeProcessCommand = \"process.command\"\n\tAttributeProcessCommandLine = \"process.command_line\"\n\tAttributeProcessExecutableName = \"process.executable.name\"\n\tAttributeProcessExecutablePath = \"process.executable.path\"\n\tAttributeProcessID = \"process.pid\"\n\tAttributeProcessOwner = \"process.owner\"\n\tAttributeServiceInstance = \"service.instance.id\"\n\tAttributeServiceName = \"service.name\"\n\tAttributeServiceNamespace = \"service.namespace\"\n\tAttributeServiceVersion = \"service.version\"\n\tAttributeTelemetryAutoVersion = \"telemetry.auto.version\"\n\tAttributeTelemetrySDKLanguage = \"telemetry.sdk.language\"\n\tAttributeTelemetrySDKName = \"telemetry.sdk.name\"\n\tAttributeTelemetrySDKVersion = \"telemetry.sdk.version\"\n)\n\n\/\/ OpenTelemetry Semantic Convention values for Resource attribute \"telemetry.sdk.language\" values.\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/tree\/master\/specification\/resource\/semantic_conventions\/README.md\nconst (\n\tAttributeSDKLangValueCPP = \"cpp\"\n\tAttributeSDKLangValueDotNET = \"dotnet\"\n\tAttributeSDKLangValueErlang = \"erlang\"\n\tAttributeSDKLangValueGo = \"go\"\n\tAttributeSDKLangValueJava = \"java\"\n\tAttributeSDKLangValueNodeJS = \"nodejs\"\n\tAttributeSDKLangValuePHP = \"php\"\n\tAttributeSDKLangValuePython = \"python\"\n\tAttributeSDKLangValueRuby = \"ruby\"\n\tAttributeSDKLangValueWebJS = \"webjs\"\n)\n\n\/\/ GetResourceSemanticConventionAttributeNames a slice with all the Resource Semantic Conventions attribute names.\nfunc GetResourceSemanticConventionAttributeNames() []string {\n\treturn []string{\n\t\tAttributeCloudAccount,\n\t\tAttributeCloudProvider,\n\t\tAttributeCloudRegion,\n\t\tAttributeCloudZone,\n\t\tAttributeContainerID,\n\t\tAttributeContainerImage,\n\t\tAttributeContainerName,\n\t\tAttributeContainerTag,\n\t\tAttributeDeploymentEnvironment,\n\t\tAttributeFaasID,\n\t\tAttributeFaasInstance,\n\t\tAttributeFaasName,\n\t\tAttributeFaasVersion,\n\t\tAttributeHostHostname,\n\t\tAttributeHostID,\n\t\tAttributeHostImageID,\n\t\tAttributeHostImageName,\n\t\tAttributeHostImageVersion,\n\t\tAttributeHostName,\n\t\tAttributeHostType,\n\t\tAttributeK8sCluster,\n\t\tAttributeK8sContainer,\n\t\tAttributeK8sCronJob,\n\t\tAttributeK8sCronJobUID,\n\t\tAttributeK8sDaemonSet,\n\t\tAttributeK8sDaemonSetUID,\n\t\tAttributeK8sDeployment,\n\t\tAttributeK8sDeploymentUID,\n\t\tAttributeK8sJob,\n\t\tAttributeK8sJobUID,\n\t\tAttributeK8sNamespace,\n\t\tAttributeK8sPod,\n\t\tAttributeK8sPodUID,\n\t\tAttributeK8sReplicaSet,\n\t\tAttributeK8sReplicaSetUID,\n\t\tAttributeK8sStatefulSet,\n\t\tAttributeK8sStatefulSetUID,\n\t\tAttributeOSType,\n\t\tAttributeOSDescription,\n\t\tAttributeProcessCommand,\n\t\tAttributeProcessCommandLine,\n\t\tAttributeProcessExecutableName,\n\t\tAttributeProcessExecutablePath,\n\t\tAttributeProcessID,\n\t\tAttributeProcessOwner,\n\t\tAttributeServiceInstance,\n\t\tAttributeServiceName,\n\t\tAttributeServiceNamespace,\n\t\tAttributeServiceVersion,\n\t\tAttributeTelemetryAutoVersion,\n\t\tAttributeTelemetrySDKLanguage,\n\t\tAttributeTelemetrySDKName,\n\t\tAttributeTelemetrySDKVersion,\n\t}\n}\n\n\/\/ OpenTelemetry Semantic Convention values for general Span attribute names.\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/blob\/master\/specification\/trace\/semantic_conventions\/span-general.md\nconst (\n\tAttributeComponent = \"component\"\n\tAttributeEnduserID = \"enduser.id\"\n\tAttributeEnduserRole = \"enduser.role\"\n\tAttributeEnduserScope = \"enduser.scope\"\n\tAttributeNetHostIP = \"net.host.ip\"\n\tAttributeNetHostName = \"net.host.name\"\n\tAttributeNetHostPort = \"net.host.port\"\n\tAttributeNetPeerIP = \"net.peer.ip\"\n\tAttributeNetPeerName = \"net.peer.name\"\n\tAttributeNetPeerPort = \"net.peer.port\"\n\tAttributeNetTransport = \"net.transport\"\n\tAttributePeerService = \"peer.service\"\n)\n\n\/\/ OpenTelemetry Semantic Convention values for component attribute values.\n\/\/ Possibly being removed due to issue #336\nconst (\n\tComponentTypeHTTP = \"http\"\n\tComponentTypeGRPC = \"grpc\"\n)\n\n\/\/ OpenTelemetry Semantic Convention attribute names for HTTP related attributes\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/blob\/master\/specification\/trace\/semantic_conventions\/http.md\nconst (\n\tAttributeHTTPClientIP = \"http.client_ip\"\n\tAttributeHTTPFlavor = \"http.flavor\"\n\tAttributeHTTPHost = \"http.host\"\n\tAttributeHTTPHostName = \"host.name\"\n\tAttributeHTTPHostPort = \"host.port\"\n\tAttributeHTTPMethod = \"http.method\"\n\tAttributeHTTPRequestContentLength = \"http.request_content_length\"\n\tAttributeHTTPRequestContentLengthUncompressed = \"http.request_content_length_uncompressed\"\n\tAttributeHTTPResponseContentLength = \"http.response_content_length\"\n\tAttributeHTTPResponseContentLengthUncompressed = \"http.response_content_length_uncompressed\"\n\tAttributeHTTPRoute = \"http.route\"\n\tAttributeHTTPScheme = \"http.scheme\"\n\tAttributeHTTPServerName = \"http.server_name\"\n\tAttributeHTTPStatusCode = \"http.status_code\"\n\tAttributeHTTPStatusText = \"http.status_text\"\n\tAttributeHTTPTarget = \"http.target\"\n\tAttributeHTTPURL = \"http.url\"\n\tAttributeHTTPUserAgent = \"http.user_agent\"\n)\n\n\/\/ OpenTelemetry Semantic Convention attribute names for database related attributes\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/blob\/master\/specification\/trace\/semantic_conventions\/database.md\nconst (\n\tAttributeDBConnectionString = \"db.connection_string\"\n\n\tAttributeDBCassandraKeyspace = \"db.cassandra.keyspace\"\n\tAttributeDBHBaseNamespace = \"db.hbase.namespace\"\n\tAttributeDBJDBCDriverClassname = \"db.jdbc.driver_classname\"\n\tAttributeDBMongoDBCollection = \"db.mongodb.collection\"\n\tAttributeDBMsSQLInstanceName = \"db.mssql.instance_name\"\n\n\tAttributeDBName = \"db.name\"\n\tAttributeDBOperation = \"db.operation\"\n\tAttributeDBRedisDatabaseIndex = \"db.redis.database_index\"\n\tAttributeDBStatement = \"db.statement\"\n\tAttributeDBSystem = \"db.system\"\n\tAttributeDBUser = \"db.user\"\n)\n\n\/\/ OpenTelemetry Semantic Convention attribute names for gRPC related attributes\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/blob\/master\/specification\/trace\/semantic_conventions\/rpc.md\nconst (\n\tAttributeMessageCompressedSize = \"message.compressed_size\"\n\tAttributeMessageID = \"message.id\"\n\tAttributeMessageType = \"message.type\"\n\tAttributeMessageUncompressedSize = \"message.uncompressed_size\"\n\tAttributeRPCMethod = \"rpc.method\"\n\tAttributeRPCService = \"rpc.service\"\n\tAttributeRPCSystem = \"rpc.system\"\n\tEventTypeMessage = \"message\"\n\tMessageTypeReceived = \"RECEIVED\"\n\tMessageTypeSent = \"SENT\"\n)\n\n\/\/ OpenTelemetry Semantic Convention attribute names for FaaS related attributes\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/blob\/master\/specification\/trace\/semantic_conventions\/faas.md\nconst (\n\tAttributeFaaSCron = \"faas.cron\"\n\tAttributeFaaSDocumentCollection = \"faas.document.collection\"\n\tAttributeFaaSDocumentName = \"faas.document.name\"\n\tAttributeFaaSDocumentOperation = \"faas.document.operation\"\n\tAttributeFaaSDocumentTime = \"faas.document.time\"\n\tAttributeFaaSExecution = \"faas.execution\"\n\tAttributeFaaSTime = \"faas.time\"\n\tAttributeFaaSTrigger = \"faas.trigger\"\n\tFaaSTriggerDataSource = \"datasource\"\n\tFaaSTriggerHTTP = \"http\"\n\tFaaSTriggerOther = \"other\"\n\tFaaSTriggerPubSub = \"pubsub\"\n\tFaaSTriggerTimer = \"timer\"\n)\n\n\/\/ OpenTelemetry Semantic Convention attribute names for messaging system related attributes\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/blob\/master\/specification\/trace\/semantic_conventions\/messaging.md\nconst (\n\tAttributeMessagingConversationID = \"messaging.conversation_id\"\n\tAttributeMessagingDestination = \"messaging.destination\"\n\tAttributeMessagingDestinationKind = \"messaging.destination_kind\"\n\tAttributeMessagingMessageID = \"messaging.message_id\"\n\tAttributeMessagingOperation = \"messaging.operation\"\n\tAttributeMessagingPayloadCompressedSize = \"messaging.message_payload_compressed_size_bytes\"\n\tAttributeMessagingPayloadSize = \"messaging.message_payload_size_bytes\"\n\tAttributeMessagingProtocol = \"messaging.protocol\"\n\tAttributeMessagingProtocolVersion = \"messaging.protocol_version\"\n\tAttributeMessagingSystem = \"messaging.system\"\n\tAttributeMessagingTempDestination = \"messaging.temp_destination\"\n\tAttributeMessagingURL = \"messaging.url\"\n)\n\n\/\/ OpenTelemetry Semantic Convention attribute names for exceptions\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/blob\/master\/specification\/trace\/semantic_conventions\/exceptions.md\nconst (\n\tAttributeExceptionEventName = \"exception\"\n\tAttributeExceptionMessage = \"exception.message\"\n\tAttributeExceptionStacktrace = \"exception.stacktrace\"\n\tAttributeExceptionType = \"exception.type\"\n)\n<commit_msg>Add cloud.provider semantic conventions (#1865)<commit_after>\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage conventions\n\n\/\/ OpenTelemetry Semantic Convention values for Resource attribute names.\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/tree\/master\/specification\/resource\/semantic_conventions\/README.md\nconst (\n\tAttributeCloudAccount = \"cloud.account.id\"\n\tAttributeCloudProvider = \"cloud.provider\"\n\tAttributeCloudRegion = \"cloud.region\"\n\tAttributeCloudZone = \"cloud.zone\"\n\tAttributeContainerID = \"container.id\"\n\tAttributeContainerImage = \"container.image.name\"\n\tAttributeContainerName = \"container.name\"\n\tAttributeContainerTag = \"container.image.tag\"\n\tAttributeDeploymentEnvironment = \"deployment.environment\"\n\tAttributeFaasID = \"faas.id\"\n\tAttributeFaasInstance = \"faas.instance\"\n\tAttributeFaasName = \"faas.name\"\n\tAttributeFaasVersion = \"faas.version\"\n\tAttributeHostHostname = \"host.hostname\"\n\tAttributeHostID = \"host.id\"\n\tAttributeHostImageID = \"host.image.id\"\n\tAttributeHostImageName = \"host.image.name\"\n\tAttributeHostImageVersion = \"host.image.version\"\n\tAttributeHostName = \"host.name\"\n\tAttributeHostType = \"host.type\"\n\tAttributeK8sCluster = \"k8s.cluster.name\"\n\tAttributeK8sContainer = \"k8s.container.name\"\n\tAttributeK8sCronJob = \"k8s.cronjob.name\"\n\tAttributeK8sCronJobUID = \"k8s.cronjob.uid\"\n\tAttributeK8sDaemonSet = \"k8s.daemonset.name\"\n\tAttributeK8sDaemonSetUID = \"k8s.daemonset.uid\"\n\tAttributeK8sDeployment = \"k8s.deployment.name\"\n\tAttributeK8sDeploymentUID = \"k8s.deployment.uid\"\n\tAttributeK8sJob = \"k8s.job.name\"\n\tAttributeK8sJobUID = \"k8s.job.uid\"\n\tAttributeK8sNamespace = \"k8s.namespace.name\"\n\tAttributeK8sPod = \"k8s.pod.name\"\n\tAttributeK8sPodUID = \"k8s.pod.uid\"\n\tAttributeK8sReplicaSet = \"k8s.replicaset.name\"\n\tAttributeK8sReplicaSetUID = \"k8s.replicaset.uid\"\n\tAttributeK8sStatefulSet = \"k8s.statefulset.name\"\n\tAttributeK8sStatefulSetUID = \"k8s.statefulset.uid\"\n\tAttributeOSType = \"os.type\"\n\tAttributeOSDescription = \"os.description\"\n\tAttributeProcessCommand = \"process.command\"\n\tAttributeProcessCommandLine = \"process.command_line\"\n\tAttributeProcessExecutableName = \"process.executable.name\"\n\tAttributeProcessExecutablePath = \"process.executable.path\"\n\tAttributeProcessID = \"process.pid\"\n\tAttributeProcessOwner = \"process.owner\"\n\tAttributeServiceInstance = \"service.instance.id\"\n\tAttributeServiceName = \"service.name\"\n\tAttributeServiceNamespace = \"service.namespace\"\n\tAttributeServiceVersion = \"service.version\"\n\tAttributeTelemetryAutoVersion = \"telemetry.auto.version\"\n\tAttributeTelemetrySDKLanguage = \"telemetry.sdk.language\"\n\tAttributeTelemetrySDKName = \"telemetry.sdk.name\"\n\tAttributeTelemetrySDKVersion = \"telemetry.sdk.version\"\n)\n\n\/\/ OpenTelemetry Semantic Convention values for Resource attribute \"telemetry.sdk.language\" values.\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/tree\/master\/specification\/resource\/semantic_conventions\/README.md\nconst (\n\tAttributeSDKLangValueCPP = \"cpp\"\n\tAttributeSDKLangValueDotNET = \"dotnet\"\n\tAttributeSDKLangValueErlang = \"erlang\"\n\tAttributeSDKLangValueGo = \"go\"\n\tAttributeSDKLangValueJava = \"java\"\n\tAttributeSDKLangValueNodeJS = \"nodejs\"\n\tAttributeSDKLangValuePHP = \"php\"\n\tAttributeSDKLangValuePython = \"python\"\n\tAttributeSDKLangValueRuby = \"ruby\"\n\tAttributeSDKLangValueWebJS = \"webjs\"\n)\n\n\/\/ OpenTelemetry Semantic Convention values for Resource attribute \"cloud.provider\" values.\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/blob\/master\/specification\/resource\/semantic_conventions\/cloud.md\nconst (\n\tAttributeCloudProviderAWS = \"aws\"\n\tAttributeCloudProviderAzure = \"azure\"\n\tAttributeCloudProviderGCP = \"gcp\"\n)\n\n\/\/ GetResourceSemanticConventionAttributeNames a slice with all the Resource Semantic Conventions attribute names.\nfunc GetResourceSemanticConventionAttributeNames() []string {\n\treturn []string{\n\t\tAttributeCloudAccount,\n\t\tAttributeCloudProvider,\n\t\tAttributeCloudRegion,\n\t\tAttributeCloudZone,\n\t\tAttributeContainerID,\n\t\tAttributeContainerImage,\n\t\tAttributeContainerName,\n\t\tAttributeContainerTag,\n\t\tAttributeDeploymentEnvironment,\n\t\tAttributeFaasID,\n\t\tAttributeFaasInstance,\n\t\tAttributeFaasName,\n\t\tAttributeFaasVersion,\n\t\tAttributeHostHostname,\n\t\tAttributeHostID,\n\t\tAttributeHostImageID,\n\t\tAttributeHostImageName,\n\t\tAttributeHostImageVersion,\n\t\tAttributeHostName,\n\t\tAttributeHostType,\n\t\tAttributeK8sCluster,\n\t\tAttributeK8sContainer,\n\t\tAttributeK8sCronJob,\n\t\tAttributeK8sCronJobUID,\n\t\tAttributeK8sDaemonSet,\n\t\tAttributeK8sDaemonSetUID,\n\t\tAttributeK8sDeployment,\n\t\tAttributeK8sDeploymentUID,\n\t\tAttributeK8sJob,\n\t\tAttributeK8sJobUID,\n\t\tAttributeK8sNamespace,\n\t\tAttributeK8sPod,\n\t\tAttributeK8sPodUID,\n\t\tAttributeK8sReplicaSet,\n\t\tAttributeK8sReplicaSetUID,\n\t\tAttributeK8sStatefulSet,\n\t\tAttributeK8sStatefulSetUID,\n\t\tAttributeOSType,\n\t\tAttributeOSDescription,\n\t\tAttributeProcessCommand,\n\t\tAttributeProcessCommandLine,\n\t\tAttributeProcessExecutableName,\n\t\tAttributeProcessExecutablePath,\n\t\tAttributeProcessID,\n\t\tAttributeProcessOwner,\n\t\tAttributeServiceInstance,\n\t\tAttributeServiceName,\n\t\tAttributeServiceNamespace,\n\t\tAttributeServiceVersion,\n\t\tAttributeTelemetryAutoVersion,\n\t\tAttributeTelemetrySDKLanguage,\n\t\tAttributeTelemetrySDKName,\n\t\tAttributeTelemetrySDKVersion,\n\t}\n}\n\n\/\/ OpenTelemetry Semantic Convention values for general Span attribute names.\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/blob\/master\/specification\/trace\/semantic_conventions\/span-general.md\nconst (\n\tAttributeComponent = \"component\"\n\tAttributeEnduserID = \"enduser.id\"\n\tAttributeEnduserRole = \"enduser.role\"\n\tAttributeEnduserScope = \"enduser.scope\"\n\tAttributeNetHostIP = \"net.host.ip\"\n\tAttributeNetHostName = \"net.host.name\"\n\tAttributeNetHostPort = \"net.host.port\"\n\tAttributeNetPeerIP = \"net.peer.ip\"\n\tAttributeNetPeerName = \"net.peer.name\"\n\tAttributeNetPeerPort = \"net.peer.port\"\n\tAttributeNetTransport = \"net.transport\"\n\tAttributePeerService = \"peer.service\"\n)\n\n\/\/ OpenTelemetry Semantic Convention values for component attribute values.\n\/\/ Possibly being removed due to issue #336\nconst (\n\tComponentTypeHTTP = \"http\"\n\tComponentTypeGRPC = \"grpc\"\n)\n\n\/\/ OpenTelemetry Semantic Convention attribute names for HTTP related attributes\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/blob\/master\/specification\/trace\/semantic_conventions\/http.md\nconst (\n\tAttributeHTTPClientIP = \"http.client_ip\"\n\tAttributeHTTPFlavor = \"http.flavor\"\n\tAttributeHTTPHost = \"http.host\"\n\tAttributeHTTPHostName = \"host.name\"\n\tAttributeHTTPHostPort = \"host.port\"\n\tAttributeHTTPMethod = \"http.method\"\n\tAttributeHTTPRequestContentLength = \"http.request_content_length\"\n\tAttributeHTTPRequestContentLengthUncompressed = \"http.request_content_length_uncompressed\"\n\tAttributeHTTPResponseContentLength = \"http.response_content_length\"\n\tAttributeHTTPResponseContentLengthUncompressed = \"http.response_content_length_uncompressed\"\n\tAttributeHTTPRoute = \"http.route\"\n\tAttributeHTTPScheme = \"http.scheme\"\n\tAttributeHTTPServerName = \"http.server_name\"\n\tAttributeHTTPStatusCode = \"http.status_code\"\n\tAttributeHTTPStatusText = \"http.status_text\"\n\tAttributeHTTPTarget = \"http.target\"\n\tAttributeHTTPURL = \"http.url\"\n\tAttributeHTTPUserAgent = \"http.user_agent\"\n)\n\n\/\/ OpenTelemetry Semantic Convention attribute names for database related attributes\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/blob\/master\/specification\/trace\/semantic_conventions\/database.md\nconst (\n\tAttributeDBConnectionString = \"db.connection_string\"\n\n\tAttributeDBCassandraKeyspace = \"db.cassandra.keyspace\"\n\tAttributeDBHBaseNamespace = \"db.hbase.namespace\"\n\tAttributeDBJDBCDriverClassname = \"db.jdbc.driver_classname\"\n\tAttributeDBMongoDBCollection = \"db.mongodb.collection\"\n\tAttributeDBMsSQLInstanceName = \"db.mssql.instance_name\"\n\n\tAttributeDBName = \"db.name\"\n\tAttributeDBOperation = \"db.operation\"\n\tAttributeDBRedisDatabaseIndex = \"db.redis.database_index\"\n\tAttributeDBStatement = \"db.statement\"\n\tAttributeDBSystem = \"db.system\"\n\tAttributeDBUser = \"db.user\"\n)\n\n\/\/ OpenTelemetry Semantic Convention attribute names for gRPC related attributes\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/blob\/master\/specification\/trace\/semantic_conventions\/rpc.md\nconst (\n\tAttributeMessageCompressedSize = \"message.compressed_size\"\n\tAttributeMessageID = \"message.id\"\n\tAttributeMessageType = \"message.type\"\n\tAttributeMessageUncompressedSize = \"message.uncompressed_size\"\n\tAttributeRPCMethod = \"rpc.method\"\n\tAttributeRPCService = \"rpc.service\"\n\tAttributeRPCSystem = \"rpc.system\"\n\tEventTypeMessage = \"message\"\n\tMessageTypeReceived = \"RECEIVED\"\n\tMessageTypeSent = \"SENT\"\n)\n\n\/\/ OpenTelemetry Semantic Convention attribute names for FaaS related attributes\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/blob\/master\/specification\/trace\/semantic_conventions\/faas.md\nconst (\n\tAttributeFaaSCron = \"faas.cron\"\n\tAttributeFaaSDocumentCollection = \"faas.document.collection\"\n\tAttributeFaaSDocumentName = \"faas.document.name\"\n\tAttributeFaaSDocumentOperation = \"faas.document.operation\"\n\tAttributeFaaSDocumentTime = \"faas.document.time\"\n\tAttributeFaaSExecution = \"faas.execution\"\n\tAttributeFaaSTime = \"faas.time\"\n\tAttributeFaaSTrigger = \"faas.trigger\"\n\tFaaSTriggerDataSource = \"datasource\"\n\tFaaSTriggerHTTP = \"http\"\n\tFaaSTriggerOther = \"other\"\n\tFaaSTriggerPubSub = \"pubsub\"\n\tFaaSTriggerTimer = \"timer\"\n)\n\n\/\/ OpenTelemetry Semantic Convention attribute names for messaging system related attributes\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/blob\/master\/specification\/trace\/semantic_conventions\/messaging.md\nconst (\n\tAttributeMessagingConversationID = \"messaging.conversation_id\"\n\tAttributeMessagingDestination = \"messaging.destination\"\n\tAttributeMessagingDestinationKind = \"messaging.destination_kind\"\n\tAttributeMessagingMessageID = \"messaging.message_id\"\n\tAttributeMessagingOperation = \"messaging.operation\"\n\tAttributeMessagingPayloadCompressedSize = \"messaging.message_payload_compressed_size_bytes\"\n\tAttributeMessagingPayloadSize = \"messaging.message_payload_size_bytes\"\n\tAttributeMessagingProtocol = \"messaging.protocol\"\n\tAttributeMessagingProtocolVersion = \"messaging.protocol_version\"\n\tAttributeMessagingSystem = \"messaging.system\"\n\tAttributeMessagingTempDestination = \"messaging.temp_destination\"\n\tAttributeMessagingURL = \"messaging.url\"\n)\n\n\/\/ OpenTelemetry Semantic Convention attribute names for exceptions\n\/\/ See: https:\/\/github.com\/open-telemetry\/opentelemetry-specification\/blob\/master\/specification\/trace\/semantic_conventions\/exceptions.md\nconst (\n\tAttributeExceptionEventName = \"exception\"\n\tAttributeExceptionMessage = \"exception.message\"\n\tAttributeExceptionStacktrace = \"exception.stacktrace\"\n\tAttributeExceptionType = \"exception.type\"\n)\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Read the right username field when performing account deactivation (#1954)<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 TVersity Inc. All rights reserved.\n\/\/ Use of this source code is governed by an Apache 2.0\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\/\/ #include <stdio.h>\n\t\/\/ #include <stdlib.h>\n\t\/\/ #include \"callbacks.h\"\n\t\"C\"\n\n\t\"github.com\/tversity\/appflinger-go\"\n)\nimport (\n\t\"fmt\"\n\t\"unsafe\"\n)\n\nconst (\n\tMockDuration = 60\n)\n\n\/\/ This struct will implement the appflinger.AppFlinger interface which is needed in order to\n\/\/ receive the control channel commands and process them\ntype AppflingerListener struct {\n\t\/\/ C callback pointers\n\t\/\/ Note - we cannot invoke C function pointers from Go so we use a helper C function to do it\n\t\/\/ e.g. to invoke the on_ui_frame_cb function pointer we use C.invoke_on_ui_frame()\n\tcb *C.appflinger_callbacks_t\n}\n\nfunc NewAppflingerListener(cb *C.appflinger_callbacks_t) (self *AppflingerListener) {\n\tself = &AppflingerListener{}\n\tself.cb = cb\n\treturn\n}\n\n\/\/ Implementation of appflinger.AppFlinger interface that just delegates to C Callbacks\n\nfunc (self *AppflingerListener) Load(sessionId string, instanceId string, url string) (err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\tcUrl := C.CString(url)\n\trc := C.invoke_load(self.cb.load_cb, cSessionId, cInstanceId, cUrl)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to load media\")\n\t} else {\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\tC.free(unsafe.Pointer(cUrl))\n\treturn\n}\n\nfunc (self *AppflingerListener) CancelLoad(sessionId string, instanceId string) (err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\trc := C.invoke_cancel_load(self.cb.cancel_load_cb, cSessionId, cInstanceId)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to cancel load of media\")\n\t} else {\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) Pause(sessionId string, instanceId string) (err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\trc := C.invoke_pause(self.cb.pause_cb, cSessionId, cInstanceId)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to pause media\")\n\t} else {\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) Play(sessionId string, instanceId string) (err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\trc := C.invoke_play(self.cb.play_cb, cSessionId, cInstanceId)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to play media\")\n\t} else {\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) Seek(sessionId string, instanceId string, time float64) (err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\trc := C.invoke_seek(self.cb.seek_cb, cSessionId, cInstanceId, C.double(time))\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to seek media\")\n\t} else {\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) GetPaused(sessionId string, instanceId string) (paused bool, err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\tvar cPaused C.int\n\trc := C.invoke_get_paused(self.cb.get_paused_cb, cSessionId, cInstanceId, &cPaused)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to get pause state\")\n\t} else {\n\t\tpaused = GoBool(cPaused)\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) GetSeeking(sessionId string, instanceId string) (seeking bool, err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\tvar cSeeking C.int\n\trc := C.invoke_get_seeking(self.cb.get_seeking_cb, cSessionId, cInstanceId, &cSeeking)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to get seeking state\")\n\t} else {\n\t\tseeking = GoBool(cSeeking)\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) GetDuration(sessionId string, instanceId string) (duration float64, err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\tvar cDuration C.double\n\trc := C.invoke_get_duration(self.cb.get_duration_cb, cSessionId, cInstanceId, &cDuration)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to get duration of media\")\n\t} else {\n\t\tduration = float64(cDuration)\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) GetCurrentTime(sessionId string, instanceId string) (time float64, err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\tvar cTime C.double\n\trc := C.invoke_get_current_time(self.cb.get_current_time_cb, cSessionId, cInstanceId, &cTime)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to get current time of media\")\n\t} else {\n\t\ttime = float64(cTime)\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) GetNetworkState(sessionId string, instanceId string) (networkState int, err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\tvar cNetworkState C.int\n\trc := C.invoke_get_network_state(self.cb.get_network_state_cb, cSessionId, cInstanceId, &cNetworkState)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to get network state\")\n\t} else {\n\t\tnetworkState = int(cNetworkState)\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) GetReadyState(sessionId string, instanceId string) (readyState int, err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\tvar cReadyState C.int\n\trc := C.invoke_get_ready_state(self.cb.get_ready_state_cb, cSessionId, cInstanceId, &cReadyState)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to get ready state\")\n\t} else {\n\t\treadyState = int(cReadyState)\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) GetSeekable(sessionId string, instanceId string, result *appflinger.GetSeekableResult) (err error) {\n\tresult.Start = []float64{0}\n\tresult.End = []float64{MockDuration}\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) GetBuffered(sessionId string, instanceId string, result *appflinger.GetBufferedResult) (err error) {\n\tresult.Start = []float64{0}\n\tresult.End = []float64{MockDuration}\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) SetRect(sessionId string, instanceId string, x int, y int, width int, height int) (err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\trc := C.invoke_set_rect(self.cb.set_rect_cb, cSessionId, cInstanceId, C.int(x), C.int(y), C.int(width), C.int(height))\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to set media display rectangle\")\n\t} else {\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) SetVisible(sessionId string, instanceId string, visible bool) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) SetRate(sessionId string, instanceId string, rate float64) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) SetVolume(sessionId string, instanceId string, volume float64) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) AddSourceBuffer(sessionId string, instanceId string, sourceId string, mimeType string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) RemoveSourceBuffer(sessionId string, instanceId string, sourceId string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) AbortSourceBuffer(sessionId string, instanceId string, sourceId string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) AppendBuffer(sessionId string, instanceId string, sourceId string, appendWindowStart float64, appendWindowEnd float64,\n\tbufferId string, bufferOffset int, bufferLength int, payload []byte, result *appflinger.GetBufferedResult) (err error) {\n\tresult.Start = nil\n\tresult.End = nil\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) SetAppendMode(sessionId string, instanceId string, sourceId string, mode int) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) SetAppendTimestampOffset(sessionId string, instanceId string, sourceId string, timestampOffset float64) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) RemoveBufferRange(sessionId string, instanceId string, sourceId string, start float64, end float64) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) ChangeSourceBufferType(sessionId string, instanceId string, sourceId string, mimeType string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) LoadResource(sessionId string, url string, method string, headers string, resourceId string,\n\tbyteRangeStart int, byteRangeEnd int, sequenceNumber int, payload []byte, result *appflinger.LoadResourceResult) (err error) {\n\terr = nil\n\tresult.Code = \"404\"\n\tresult.Headers = \"\"\n\tresult.BufferId = \"\"\n\tresult.BufferLength = 0\n\tresult.Payload = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) DeleteResource(sessionId string, BufferId string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) RequestKeySystem(sessionId string, keySystem string, supportedConfigurations []appflinger.EMEMediaKeySystemConfiguration, result *appflinger.RequestKeySystemResult) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) CdmCreate(sessionId string, keySystem string, securityOrigin string, allowDistinctiveIdentifier bool, allowPersistentState bool) (cdmId string, err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) CdmSetServerCertificate(sessionId string, cdmId string, payload []byte) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) CdmSessionCreate(sessionId string, eventInstanceId string, cdmId string, sessionType string, initDataType string, payload []byte) (cdmSessionId string, expiration float64, err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) CdmSessionUpdate(sessionId string, eventInstanceId string, cdmId string, cdmSessionId string, payload []byte) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) CdmSessionLoad(sessionId string, eventInstanceId string, cdmId string, cdmSessionId string) (loaded bool, expiration float64, err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) CdmSessionRemove(sessionId string, eventInstanceId string, cdmId string, cdmSessionId string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) CdmSessionClose(sessionId string, eventInstanceId string, cdmId string, cdmSessionId string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) SetCdm(sessionId string, instanceId string, cdmId string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) SendMessage(sessionId string, message string) (result string, err error) {\n\terr = nil\n\tresult = \"\"\n\treturn\n}\n\nfunc (self *AppflingerListener) OnPageLoad(sessionId string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) OnAddressBarChanged(sessionId string, url string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) OnTitleChanged(sessionId string, title string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) OnPageClose(sessionId string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) OnUIFrame(sessionId string, isCodecConfig bool, isKeyFrame bool, idx int, pts int, dts int, data []byte) (err error) {\n\tcSessionId := C.CString(sessionId)\n\trc := C.invoke_on_ui_frame(self.cb.on_ui_frame_cb, cSessionId, CBool(isCodecConfig), CBool(isKeyFrame), C.int(idx), C.longlong(pts),\n\t\tC.longlong(dts), C.CBytes(data), C.uint(len(data)))\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to process frame\")\n\t} else {\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\treturn\n}\n<commit_msg>C bindings - better handling of getBuffered() and getSeekable()<commit_after>\/\/ Copyright 2015 TVersity Inc. All rights reserved.\n\/\/ Use of this source code is governed by an Apache 2.0\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\/\/ #include <stdio.h>\n\t\/\/ #include <stdlib.h>\n\t\/\/ #include \"callbacks.h\"\n\t\"C\"\n\n\t\"github.com\/tversity\/appflinger-go\"\n)\nimport (\n\t\"fmt\"\n\t\"unsafe\"\n)\n\nconst ()\n\n\/\/ This struct will implement the appflinger.AppFlinger interface which is needed in order to\n\/\/ receive the control channel commands and process them\ntype AppflingerListener struct {\n\t\/\/ C callback pointers\n\t\/\/ Note - we cannot invoke C function pointers from Go so we use a helper C function to do it\n\t\/\/ e.g. to invoke the on_ui_frame_cb function pointer we use C.invoke_on_ui_frame()\n\tcb *C.appflinger_callbacks_t\n}\n\nfunc NewAppflingerListener(cb *C.appflinger_callbacks_t) (self *AppflingerListener) {\n\tself = &AppflingerListener{}\n\tself.cb = cb\n\treturn\n}\n\n\/\/ Implementation of appflinger.AppFlinger interface that just delegates to C Callbacks\n\nfunc (self *AppflingerListener) Load(sessionId string, instanceId string, url string) (err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\tcUrl := C.CString(url)\n\trc := C.invoke_load(self.cb.load_cb, cSessionId, cInstanceId, cUrl)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to load media\")\n\t} else {\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\tC.free(unsafe.Pointer(cUrl))\n\treturn\n}\n\nfunc (self *AppflingerListener) CancelLoad(sessionId string, instanceId string) (err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\trc := C.invoke_cancel_load(self.cb.cancel_load_cb, cSessionId, cInstanceId)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to cancel load of media\")\n\t} else {\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) Pause(sessionId string, instanceId string) (err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\trc := C.invoke_pause(self.cb.pause_cb, cSessionId, cInstanceId)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to pause media\")\n\t} else {\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) Play(sessionId string, instanceId string) (err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\trc := C.invoke_play(self.cb.play_cb, cSessionId, cInstanceId)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to play media\")\n\t} else {\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) Seek(sessionId string, instanceId string, time float64) (err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\trc := C.invoke_seek(self.cb.seek_cb, cSessionId, cInstanceId, C.double(time))\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to seek media\")\n\t} else {\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) GetPaused(sessionId string, instanceId string) (paused bool, err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\tvar cPaused C.int\n\trc := C.invoke_get_paused(self.cb.get_paused_cb, cSessionId, cInstanceId, &cPaused)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to get pause state\")\n\t} else {\n\t\tpaused = GoBool(cPaused)\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) GetSeeking(sessionId string, instanceId string) (seeking bool, err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\tvar cSeeking C.int\n\trc := C.invoke_get_seeking(self.cb.get_seeking_cb, cSessionId, cInstanceId, &cSeeking)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to get seeking state\")\n\t} else {\n\t\tseeking = GoBool(cSeeking)\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) GetDuration(sessionId string, instanceId string) (duration float64, err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\tvar cDuration C.double\n\trc := C.invoke_get_duration(self.cb.get_duration_cb, cSessionId, cInstanceId, &cDuration)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to get duration of media\")\n\t} else {\n\t\tduration = float64(cDuration)\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) GetCurrentTime(sessionId string, instanceId string) (time float64, err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\tvar cTime C.double\n\trc := C.invoke_get_current_time(self.cb.get_current_time_cb, cSessionId, cInstanceId, &cTime)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to get current time of media\")\n\t} else {\n\t\ttime = float64(cTime)\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) GetNetworkState(sessionId string, instanceId string) (networkState int, err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\tvar cNetworkState C.int\n\trc := C.invoke_get_network_state(self.cb.get_network_state_cb, cSessionId, cInstanceId, &cNetworkState)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to get network state\")\n\t} else {\n\t\tnetworkState = int(cNetworkState)\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) GetReadyState(sessionId string, instanceId string) (readyState int, err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\tvar cReadyState C.int\n\trc := C.invoke_get_ready_state(self.cb.get_ready_state_cb, cSessionId, cInstanceId, &cReadyState)\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to get ready state\")\n\t} else {\n\t\treadyState = int(cReadyState)\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) GetSeekable(sessionId string, instanceId string, result *appflinger.GetSeekableResult) (err error) {\n\tvar duration float64\n\tduration, err = self.GetDuration(sessionId, instanceId)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresult.Start = []float64{0}\n\tresult.End = []float64{duration}\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) GetBuffered(sessionId string, instanceId string, result *appflinger.GetBufferedResult) (err error) {\n\tvar duration float64\n\tduration, err = self.GetDuration(sessionId, instanceId)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresult.Start = []float64{0}\n\tresult.End = []float64{duration}\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) SetRect(sessionId string, instanceId string, x int, y int, width int, height int) (err error) {\n\tcSessionId := C.CString(sessionId)\n\tcInstanceId := C.CString(instanceId)\n\trc := C.invoke_set_rect(self.cb.set_rect_cb, cSessionId, cInstanceId, C.int(x), C.int(y), C.int(width), C.int(height))\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to set media display rectangle\")\n\t} else {\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\tC.free(unsafe.Pointer(cInstanceId))\n\treturn\n}\n\nfunc (self *AppflingerListener) SetVisible(sessionId string, instanceId string, visible bool) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) SetRate(sessionId string, instanceId string, rate float64) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) SetVolume(sessionId string, instanceId string, volume float64) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) AddSourceBuffer(sessionId string, instanceId string, sourceId string, mimeType string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) RemoveSourceBuffer(sessionId string, instanceId string, sourceId string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) AbortSourceBuffer(sessionId string, instanceId string, sourceId string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) AppendBuffer(sessionId string, instanceId string, sourceId string, appendWindowStart float64, appendWindowEnd float64,\n\tbufferId string, bufferOffset int, bufferLength int, payload []byte, result *appflinger.GetBufferedResult) (err error) {\n\tresult.Start = nil\n\tresult.End = nil\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) SetAppendMode(sessionId string, instanceId string, sourceId string, mode int) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) SetAppendTimestampOffset(sessionId string, instanceId string, sourceId string, timestampOffset float64) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) RemoveBufferRange(sessionId string, instanceId string, sourceId string, start float64, end float64) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) ChangeSourceBufferType(sessionId string, instanceId string, sourceId string, mimeType string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) LoadResource(sessionId string, url string, method string, headers string, resourceId string,\n\tbyteRangeStart int, byteRangeEnd int, sequenceNumber int, payload []byte, result *appflinger.LoadResourceResult) (err error) {\n\terr = nil\n\tresult.Code = \"404\"\n\tresult.Headers = \"\"\n\tresult.BufferId = \"\"\n\tresult.BufferLength = 0\n\tresult.Payload = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) DeleteResource(sessionId string, BufferId string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) RequestKeySystem(sessionId string, keySystem string, supportedConfigurations []appflinger.EMEMediaKeySystemConfiguration, result *appflinger.RequestKeySystemResult) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) CdmCreate(sessionId string, keySystem string, securityOrigin string, allowDistinctiveIdentifier bool, allowPersistentState bool) (cdmId string, err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) CdmSetServerCertificate(sessionId string, cdmId string, payload []byte) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) CdmSessionCreate(sessionId string, eventInstanceId string, cdmId string, sessionType string, initDataType string, payload []byte) (cdmSessionId string, expiration float64, err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) CdmSessionUpdate(sessionId string, eventInstanceId string, cdmId string, cdmSessionId string, payload []byte) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) CdmSessionLoad(sessionId string, eventInstanceId string, cdmId string, cdmSessionId string) (loaded bool, expiration float64, err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) CdmSessionRemove(sessionId string, eventInstanceId string, cdmId string, cdmSessionId string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) CdmSessionClose(sessionId string, eventInstanceId string, cdmId string, cdmSessionId string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) SetCdm(sessionId string, instanceId string, cdmId string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) SendMessage(sessionId string, message string) (result string, err error) {\n\terr = nil\n\tresult = \"\"\n\treturn\n}\n\nfunc (self *AppflingerListener) OnPageLoad(sessionId string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) OnAddressBarChanged(sessionId string, url string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) OnTitleChanged(sessionId string, title string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) OnPageClose(sessionId string) (err error) {\n\terr = nil\n\treturn\n}\n\nfunc (self *AppflingerListener) OnUIFrame(sessionId string, isCodecConfig bool, isKeyFrame bool, idx int, pts int, dts int, data []byte) (err error) {\n\tcSessionId := C.CString(sessionId)\n\trc := C.invoke_on_ui_frame(self.cb.on_ui_frame_cb, cSessionId, CBool(isCodecConfig), CBool(isKeyFrame), C.int(idx), C.longlong(pts),\n\t\tC.longlong(dts), C.CBytes(data), C.uint(len(data)))\n\tif rc != 0 {\n\t\terr = fmt.Errorf(\"Failed to process frame\")\n\t} else {\n\t\terr = nil\n\t}\n\tC.free(unsafe.Pointer(cSessionId))\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>mssql: use column name in index name instead of 'osm_id'<commit_after><|endoftext|>"} {"text":"<commit_before>package device\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/Comcast\/webpa-common\/logging\"\n\t\"github.com\/Comcast\/webpa-common\/wrp\"\n)\n\nfunc expectMessage(c Connection) (*wrp.Message, error) {\n\tvar frame bytes.Buffer\n\tif ok, err := c.Read(&frame); !ok || err != nil {\n\t\treturn nil, fmt.Errorf(\"Read failed: %s\", err)\n\t}\n\n\tvar (\n\t\tmessage = new(wrp.Message)\n\t\tdecoder = wrp.NewDecoder(&frame, wrp.Msgpack)\n\t)\n\n\tif err := decoder.Decode(message); err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not decode message: %s\", err)\n\t}\n\n\treturn message, nil\n}\n\nfunc writeMessage(m *wrp.Message, c Connection) error {\n\tif frame, err := c.NextWriter(); err != nil {\n\t\treturn err\n\t} else {\n\t\tencoder := wrp.NewEncoder(frame, wrp.Msgpack)\n\t\tif err := encoder.Encode(m); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn frame.Close()\n\t}\n}\n\nfunc ExampleManagerTransaction() {\n\tvar (\n\t\tauthStatusSent = new(sync.WaitGroup)\n\n\t\toptions = &Options{\n\t\t\tLogger: logging.DefaultLogger(),\n\t\t\tAuthDelay: 250 * time.Millisecond,\n\t\t\tListeners: []Listener{\n\t\t\t\tfunc(e *Event) {\n\t\t\t\t\tswitch e.Type {\n\t\t\t\t\tcase Connect:\n\t\t\t\t\t\tfmt.Printf(\"%s connected\\n\", e.Device.ID())\n\t\t\t\t\t\tauthStatusSent.Add(1)\n\t\t\t\t\tcase MessageSent:\n\t\t\t\t\t\tif e.Message.MessageType() == wrp.AuthMessageType {\n\t\t\t\t\t\t\tfmt.Println(\"auth status sent\")\n\t\t\t\t\t\t\tauthStatusSent.Done()\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tfmt.Println(\"message sent\")\n\t\t\t\t\t\t}\n\t\t\t\t\tcase TransactionComplete:\n\t\t\t\t\t\tfmt.Println(\"response received\")\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tmanager, server, websocketURL = startWebsocketServer(options)\n\n\t\tdialer = NewDialer(options, nil)\n\t\tconnection, _, dialError = dialer.Dial(\n\t\t\twebsocketURL,\n\t\t\t\"mac:111122223333\",\n\t\t\tnil,\n\t\t\tnil,\n\t\t)\n\t)\n\n\tdefer server.Close()\n\tif dialError != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Dial error: %s\\n\", dialError)\n\t\treturn\n\t}\n\n\tdefer connection.Close()\n\n\t\/\/ this is our \"device\" goroutine\n\tgo func() {\n\t\tif message, err := expectMessage(connection); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t\t\treturn\n\t\t} else if message.Type != wrp.AuthMessageType {\n\t\t\tfmt.Fprintf(os.Stderr, \"Expected auth status, but got: %s\", message.Type)\n\t\t\treturn\n\t\t} else if message.Status == nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"No auth status code\")\n\t\t\treturn\n\t\t} else if *message.Status != wrp.AuthStatusAuthorized {\n\t\t\tfmt.Fprintf(os.Stderr, \"Expected authorized, but got: %d\", *message.Status)\n\t\t\treturn\n\t\t} else {\n\t\t\tfmt.Println(\"auth status received\")\n\t\t}\n\n\t\tif message, err := expectMessage(connection); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t\t\treturn\n\t\t} else if message.Type != wrp.SimpleRequestResponseMessageType {\n\t\t\tfmt.Fprintf(os.Stderr, \"Expected request\/response, but got: %s\", message.Type)\n\t\t\treturn\n\t\t} else {\n\t\t\tfmt.Println(\"message received\")\n\t\t\tdeviceResponse := *message\n\t\t\tdeviceResponse.Source = message.Destination\n\t\t\tdeviceResponse.Destination = message.Source\n\t\t\tdeviceResponse.Payload = []byte(\"Homer Simpson, Smiling Politely\")\n\n\t\t\tif err := writeMessage(&deviceResponse, connection); err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Unable to write response: %s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ Before sending our request, wait for the server to send the auth status\n\tauthStatusSent.Wait()\n\n\t\/\/ Route will block until the corresponding message returns from the device\n\tresponse, err := manager.Route(\n\t\t&Request{\n\t\t\tMessage: &wrp.SimpleRequestResponse{\n\t\t\t\tSource: \"Example\",\n\t\t\t\tDestination: \"mac:111122223333\",\n\t\t\t\tPayload: []byte(\"Billy Corgan, Smashing Pumpkins\"),\n\t\t\t\tTransactionUUID: \"MyTransactionUUID\",\n\t\t\t},\n\t\t},\n\t)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Route error: %s\\n\", err)\n\t} else if response != nil {\n\t\tfmt.Printf(\"(%s): %s to %s -> %s\\n\", response.Message.TransactionUUID, response.Message.Source, response.Message.Destination, response.Message.Payload)\n\t}\n\n\t\/\/ Output:\n\t\/\/ mac:111122223333 connected\n\t\/\/ auth status sent\n\t\/\/ auth status received\n\t\/\/ message sent\n\t\/\/ message received\n\t\/\/ response received\n\t\/\/ (MyTransactionUUID): mac:111122223333 to Example -> Homer Simpson, Smiling Politely\n}\n<commit_msg>Tweaked the synchronization to avoid a race condition with the golang example code wrapper<commit_after>package device\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/Comcast\/webpa-common\/logging\"\n\t\"github.com\/Comcast\/webpa-common\/wrp\"\n)\n\nfunc expectMessage(c Connection) (*wrp.Message, error) {\n\tvar frame bytes.Buffer\n\tif ok, err := c.Read(&frame); !ok || err != nil {\n\t\treturn nil, fmt.Errorf(\"Read failed: %s\", err)\n\t}\n\n\tvar (\n\t\tmessage = new(wrp.Message)\n\t\tdecoder = wrp.NewDecoder(&frame, wrp.Msgpack)\n\t)\n\n\tif err := decoder.Decode(message); err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not decode message: %s\", err)\n\t}\n\n\treturn message, nil\n}\n\nfunc writeMessage(m *wrp.Message, c Connection) error {\n\tif frame, err := c.NextWriter(); err != nil {\n\t\treturn err\n\t} else {\n\t\tencoder := wrp.NewEncoder(frame, wrp.Msgpack)\n\t\tif err := encoder.Encode(m); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn frame.Close()\n\t}\n}\n\nfunc ExampleManagerTransaction() {\n\tdisconnected := new(sync.WaitGroup)\n\tdisconnected.Add(1)\n\n\tvar (\n\t\toptions = &Options{\n\t\t\tLogger: logging.DefaultLogger(),\n\t\t\tAuthDelay: 250 * time.Millisecond,\n\t\t\tListeners: []Listener{\n\t\t\t\tfunc(e *Event) {\n\t\t\t\t\tswitch e.Type {\n\t\t\t\t\tcase Connect:\n\t\t\t\t\t\tfmt.Printf(\"%s connected\\n\", e.Device.ID())\n\t\t\t\t\tcase MessageSent:\n\t\t\t\t\t\tif e.Message.MessageType() == wrp.AuthMessageType {\n\t\t\t\t\t\t\tfmt.Println(\"auth status sent\")\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tfmt.Println(\"message sent\")\n\t\t\t\t\t\t}\n\t\t\t\t\tcase TransactionComplete:\n\t\t\t\t\t\tfmt.Println(\"response received\")\n\t\t\t\t\tcase Disconnect:\n\t\t\t\t\t\tfmt.Printf(\"%s disconnected\\n\", e.Device.ID())\n\t\t\t\t\t\tdisconnected.Done()\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tmanager, server, websocketURL = startWebsocketServer(options)\n\n\t\tdialer = NewDialer(options, nil)\n\t\tconnection, _, dialError = dialer.Dial(\n\t\t\twebsocketURL,\n\t\t\t\"mac:111122223333\",\n\t\t\tnil,\n\t\t\tnil,\n\t\t)\n\t)\n\n\tdefer server.Close()\n\tif dialError != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Dial error: %s\\n\", dialError)\n\t\treturn\n\t}\n\n\t\/\/ grab the auth status message that should be coming\n\tif message, err := expectMessage(connection); err != nil {\n\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t\treturn\n\t} else if message.Type != wrp.AuthMessageType {\n\t\tfmt.Fprintf(os.Stderr, \"Expected auth status, but got: %s\", message.Type)\n\t\treturn\n\t} else if message.Status == nil {\n\t\tfmt.Fprintf(os.Stderr, \"No auth status code\")\n\t\treturn\n\t} else if *message.Status != wrp.AuthStatusAuthorized {\n\t\tfmt.Fprintf(os.Stderr, \"Expected authorized, but got: %d\", *message.Status)\n\t\treturn\n\t} else {\n\t\tfmt.Println(\"auth status received\")\n\t}\n\n\t\/\/ spawn a go routine to respond to our routed message\n\tgo func() {\n\t\tif message, err := expectMessage(connection); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t\t\treturn\n\t\t} else if message.Type != wrp.SimpleRequestResponseMessageType {\n\t\t\tfmt.Fprintf(os.Stderr, \"Expected request\/response, but got: %s\", message.Type)\n\t\t\treturn\n\t\t} else {\n\t\t\tfmt.Println(\"message received\")\n\t\t\tdeviceResponse := *message\n\t\t\tdeviceResponse.Source = message.Destination\n\t\t\tdeviceResponse.Destination = message.Source\n\t\t\tdeviceResponse.Payload = []byte(\"Homer Simpson, Smiling Politely\")\n\n\t\t\tif err := writeMessage(&deviceResponse, connection); err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Unable to write response: %s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ Route will block until the corresponding message returns from the device\n\tresponse, err := manager.Route(\n\t\t&Request{\n\t\t\tMessage: &wrp.SimpleRequestResponse{\n\t\t\t\tSource: \"Example\",\n\t\t\t\tDestination: \"mac:111122223333\",\n\t\t\t\tPayload: []byte(\"Billy Corgan, Smashing Pumpkins\"),\n\t\t\t\tTransactionUUID: \"MyTransactionUUID\",\n\t\t\t},\n\t\t},\n\t)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Route error: %s\\n\", err)\n\t} else if response != nil {\n\t\tfmt.Printf(\"(%s): %s to %s -> %s\\n\", response.Message.TransactionUUID, response.Message.Source, response.Message.Destination, response.Message.Payload)\n\t}\n\n\t\/\/ close the connection and ensure that the Disconnect event gets sent\n\t\/\/ before continuing. This prevents a race condition with the example code\n\t\/\/ wrapper that swaps out the stream written to by the fmt.Print* functions.\n\tconnection.Close()\n\tdisconnected.Wait()\n\n\t\/\/ Output:\n\t\/\/ mac:111122223333 connected\n\t\/\/ auth status sent\n\t\/\/ auth status received\n\t\/\/ message sent\n\t\/\/ message received\n\t\/\/ response received\n\t\/\/ (MyTransactionUUID): mac:111122223333 to Example -> Homer Simpson, Smiling Politely\n\t\/\/ mac:111122223333 disconnected\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\tmrand \"math\/rand\"\n\n\tclientv2 \"github.com\/coreos\/etcd\/client\"\n\t\"github.com\/coreos\/etcd\/clientv3\"\n\tconsulapi \"github.com\/hashicorp\/consul\/api\"\n\t\"github.com\/samuel\/go-zookeeper\/zk\"\n)\n\nvar (\n\t\/\/ dialTotal counts the number of mustCreateConn calls so that endpoint\n\t\/\/ connections can be handed out in round-robin order\n\tdialTotal int\n)\n\nfunc mustCreateConn() *clientv3.Client {\n\tendpoint := endpoints[dialTotal%len(endpoints)]\n\tdialTotal++\n\tcfg := clientv3.Config{Endpoints: []string{endpoint}}\n\tclient, err := clientv3.New(cfg)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"dial error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\treturn client\n}\n\nfunc mustCreateClients(totalClients, totalConns uint) []*clientv3.Client {\n\tconns := make([]*clientv3.Client, totalConns)\n\tfor i := range conns {\n\t\tconns[i] = mustCreateConn()\n\t}\n\n\tclients := make([]*clientv3.Client, totalClients)\n\tfor i := range clients {\n\t\tclients[i] = conns[i%int(totalConns)]\n\t}\n\treturn clients\n}\n\nfunc mustCreateClientsEtcd2(total uint) []clientv2.KeysAPI {\n\tcks := make([]clientv2.KeysAPI, total)\n\tfor i := range cks {\n\t\tendpoint := endpoints[dialTotal%len(endpoints)]\n\t\tdialTotal++\n\n\t\tcfg := clientv2.Config{\n\t\t\tEndpoints: []string{endpoint},\n\t\t\tTransport: clientv2.DefaultTransport,\n\t\t\tHeaderTimeoutPerRequest: time.Second,\n\t\t}\n\t\tc, err := clientv2.New(cfg)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tkapi := clientv2.NewKeysAPI(c)\n\n\t\tcks[i] = kapi\n\t}\n\treturn cks\n}\n\nfunc mustCreateConnsZk(total uint) []*zk.Conn {\n\tzks := make([]*zk.Conn, total)\n\tfor i := range zks {\n\t\tendpoint := endpoints[dialTotal%len(endpoints)]\n\t\tdialTotal++\n\t\tconn, _, err := zk.Connect([]string{endpoint}, time.Second)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tzks[i] = conn\n\t}\n\treturn zks\n}\n\nfunc mustCreateConnsConsul(total uint) []*consulapi.KV {\n\tcss := make([]*consulapi.KV, total)\n\tfor i := range css {\n\t\tendpoint := endpoints[dialTotal%len(endpoints)]\n\t\tdialTotal++\n\n\t\tdcfg := consulapi.DefaultConfig()\n\t\tdcfg.Address = endpoint \/\/ x.x.x.x:8500\n\t\tcli, err := consulapi.NewClient(dcfg)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tcss[i] = cli.KV()\n\t}\n\treturn css\n}\n\nfunc mustRandBytes(n int) []byte {\n\trb := make([]byte, n)\n\t_, err := rand.Read(rb)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to generate value: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\treturn rb\n}\n\nfunc randBytes(bytesN int) []byte {\n\tconst (\n\t\tletterBytes = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\t\tletterIdxBits = 6 \/\/ 6 bits to represent a letter index\n\t\tletterIdxMask = 1<<letterIdxBits - 1 \/\/ All 1-bits, as many as letterIdxBits\n\t\tletterIdxMax = 63 \/ letterIdxBits \/\/ # of letter indices fitting in 63 bits\n\t)\n\tsrc := mrand.NewSource(time.Now().UnixNano())\n\tb := make([]byte, bytesN)\n\tfor i, cache, remain := bytesN-1, src.Int63(), letterIdxMax; i >= 0; {\n\t\tif remain == 0 {\n\t\t\tcache, remain = src.Int63(), letterIdxMax\n\t\t}\n\t\tif idx := int(cache & letterIdxMask); idx < len(letterBytes) {\n\t\t\tb[i] = letterBytes[idx]\n\t\t\ti--\n\t\t}\n\t\tcache >>= letterIdxBits\n\t\tremain--\n\t}\n\treturn b\n}\n\nfunc multiRandBytes(bytesN, sliceN int) [][]byte {\n\tm := make(map[string]struct{})\n\tvar rs [][]byte\n\tfor len(rs) != sliceN {\n\t\tb := randBytes(bytesN)\n\t\tif _, ok := m[string(b)]; !ok {\n\t\t\trs = append(rs, b)\n\t\t\tm[string(b)] = struct{}{}\n\t\t}\n\t}\n\treturn rs\n}\n\nfunc toFile(txt, fpath string) error {\n\tf, err := os.OpenFile(fpath, os.O_RDWR|os.O_TRUNC, 0777)\n\tif err != nil {\n\t\tf, err = os.Create(fpath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tdefer f.Close()\n\tif _, err := f.WriteString(txt); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc toMillisecond(d time.Duration) float64 {\n\treturn d.Seconds() * 1000\n}\n<commit_msg>bench: fix endpoint schema for etcd2<commit_after>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\tmrand \"math\/rand\"\n\n\tclientv2 \"github.com\/coreos\/etcd\/client\"\n\t\"github.com\/coreos\/etcd\/clientv3\"\n\tconsulapi \"github.com\/hashicorp\/consul\/api\"\n\t\"github.com\/samuel\/go-zookeeper\/zk\"\n)\n\nvar (\n\t\/\/ dialTotal counts the number of mustCreateConn calls so that endpoint\n\t\/\/ connections can be handed out in round-robin order\n\tdialTotal int\n)\n\nfunc mustCreateConn() *clientv3.Client {\n\tendpoint := endpoints[dialTotal%len(endpoints)]\n\tdialTotal++\n\tcfg := clientv3.Config{Endpoints: []string{endpoint}}\n\tclient, err := clientv3.New(cfg)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"dial error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\treturn client\n}\n\nfunc mustCreateClients(totalClients, totalConns uint) []*clientv3.Client {\n\tconns := make([]*clientv3.Client, totalConns)\n\tfor i := range conns {\n\t\tconns[i] = mustCreateConn()\n\t}\n\n\tclients := make([]*clientv3.Client, totalClients)\n\tfor i := range clients {\n\t\tclients[i] = conns[i%int(totalConns)]\n\t}\n\treturn clients\n}\n\nfunc mustCreateClientsEtcd2(total uint) []clientv2.KeysAPI {\n\tcks := make([]clientv2.KeysAPI, total)\n\tfor i := range cks {\n\t\tendpoint := endpoints[dialTotal%len(endpoints)]\n\t\tdialTotal++\n\n\t\tif !strings.HasPrefix(endpoint, \"http:\/\/\") {\n\t\t\tendpoint = \"http:\/\/\" + endpoint\n\t\t}\n\t\tcfg := clientv2.Config{\n\t\t\tEndpoints: []string{endpoint},\n\t\t\tTransport: clientv2.DefaultTransport,\n\t\t\tHeaderTimeoutPerRequest: time.Second,\n\t\t}\n\t\tc, err := clientv2.New(cfg)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tkapi := clientv2.NewKeysAPI(c)\n\n\t\tcks[i] = kapi\n\t}\n\treturn cks\n}\n\nfunc mustCreateConnsZk(total uint) []*zk.Conn {\n\tzks := make([]*zk.Conn, total)\n\tfor i := range zks {\n\t\tendpoint := endpoints[dialTotal%len(endpoints)]\n\t\tdialTotal++\n\t\tconn, _, err := zk.Connect([]string{endpoint}, time.Second)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tzks[i] = conn\n\t}\n\treturn zks\n}\n\nfunc mustCreateConnsConsul(total uint) []*consulapi.KV {\n\tcss := make([]*consulapi.KV, total)\n\tfor i := range css {\n\t\tendpoint := endpoints[dialTotal%len(endpoints)]\n\t\tdialTotal++\n\n\t\tdcfg := consulapi.DefaultConfig()\n\t\tdcfg.Address = endpoint \/\/ x.x.x.x:8500\n\t\tcli, err := consulapi.NewClient(dcfg)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tcss[i] = cli.KV()\n\t}\n\treturn css\n}\n\nfunc mustRandBytes(n int) []byte {\n\trb := make([]byte, n)\n\t_, err := rand.Read(rb)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to generate value: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\treturn rb\n}\n\nfunc randBytes(bytesN int) []byte {\n\tconst (\n\t\tletterBytes = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\t\tletterIdxBits = 6 \/\/ 6 bits to represent a letter index\n\t\tletterIdxMask = 1<<letterIdxBits - 1 \/\/ All 1-bits, as many as letterIdxBits\n\t\tletterIdxMax = 63 \/ letterIdxBits \/\/ # of letter indices fitting in 63 bits\n\t)\n\tsrc := mrand.NewSource(time.Now().UnixNano())\n\tb := make([]byte, bytesN)\n\tfor i, cache, remain := bytesN-1, src.Int63(), letterIdxMax; i >= 0; {\n\t\tif remain == 0 {\n\t\t\tcache, remain = src.Int63(), letterIdxMax\n\t\t}\n\t\tif idx := int(cache & letterIdxMask); idx < len(letterBytes) {\n\t\t\tb[i] = letterBytes[idx]\n\t\t\ti--\n\t\t}\n\t\tcache >>= letterIdxBits\n\t\tremain--\n\t}\n\treturn b\n}\n\nfunc multiRandBytes(bytesN, sliceN int) [][]byte {\n\tm := make(map[string]struct{})\n\tvar rs [][]byte\n\tfor len(rs) != sliceN {\n\t\tb := randBytes(bytesN)\n\t\tif _, ok := m[string(b)]; !ok {\n\t\t\trs = append(rs, b)\n\t\t\tm[string(b)] = struct{}{}\n\t\t}\n\t}\n\treturn rs\n}\n\nfunc toFile(txt, fpath string) error {\n\tf, err := os.OpenFile(fpath, os.O_RDWR|os.O_TRUNC, 0777)\n\tif err != nil {\n\t\tf, err = os.Create(fpath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tdefer f.Close()\n\tif _, err := f.WriteString(txt); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc toMillisecond(d time.Duration) float64 {\n\treturn d.Seconds() * 1000\n}\n<|endoftext|>"} {"text":"<commit_before>package mdb\n\nimport (\n\tcrand \"crypto\/rand\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"testing\"\n)\n\n\/\/ repeatedly put (overwrite) keys.\nfunc BenchmarkTxnPut(b *testing.B) {\n\tinitRandSource(b)\n\tenv, path := setupBenchDB(b)\n\tdefer teardownBenchDB(b, env, path)\n\n\tdbi := openBenchDBI(b, env)\n\n\tvar ps [][]byte\n\n\trc := newRandSourceCursor()\n\ttxn, err := env.BeginTxn(nil, 0)\n\tbMust(b, err, \"starting transaction\")\n\tfor i := 0; i < benchDBNumKeys; i++ {\n\t\tk := makeBenchDBKey(&rc)\n\t\tv := makeBenchDBVal(&rc)\n\t\terr := txn.Put(dbi, k, v, 0)\n\t\tps = append(ps, k, v)\n\t\tbTxnMust(b, txn, err, \"putting data\")\n\t}\n\terr = txn.Commit()\n\tbMust(b, err, \"commiting transaction\")\n\n\ttxn, err = env.BeginTxn(nil, 0)\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tk := ps[rand.Intn(len(ps)\/2)*2]\n\t\tv := makeBenchDBVal(&rc)\n\t\terr := txn.Put(dbi, k, v, 0)\n\t\tbTxnMust(b, txn, err, \"putting data\")\n\t}\n\tb.StopTimer()\n\terr = txn.Commit()\n\tbMust(b, err, \"commiting transaction\")\n}\n\n\/\/ repeatedly get random keys.\nfunc BenchmarkTxnGetRDONLY(b *testing.B) {\n\tinitRandSource(b)\n\tenv, path := setupBenchDB(b)\n\tdefer teardownBenchDB(b, env, path)\n\n\tdbi := openBenchDBI(b, env)\n\n\tvar ps [][]byte\n\n\trc := newRandSourceCursor()\n\ttxn, err := env.BeginTxn(nil, 0)\n\tbMust(b, err, \"starting transaction\")\n\tfor i := 0; i < benchDBNumKeys; i++ {\n\t\tk := makeBenchDBKey(&rc)\n\t\tv := makeBenchDBVal(&rc)\n\t\terr := txn.Put(dbi, k, v, 0)\n\t\tps = append(ps, k, v)\n\t\tbTxnMust(b, txn, err, \"putting data\")\n\t}\n\terr = txn.Commit()\n\tbMust(b, err, \"commiting transaction\")\n\n\ttxn, err = env.BeginTxn(nil, RDONLY)\n\tbMust(b, err, \"starting transaction\")\n\tdefer txn.Abort()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err := txn.Get(dbi, ps[rand.Intn(len(ps))])\n\t\tif err == NotFound {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"error getting data: %v\", err)\n\t\t}\n\t}\n\tb.StopTimer()\n}\n\n\/\/ like BenchmarkTxnGetRDONLY, but txn.GetVal() is called instead.\nfunc BenchmarkTxnGetValRDONLY(b *testing.B) {\n\tinitRandSource(b)\n\tenv, path := setupBenchDB(b)\n\tdefer teardownBenchDB(b, env, path)\n\n\tdbi := openBenchDBI(b, env)\n\n\tvar ps [][]byte\n\n\trc := newRandSourceCursor()\n\ttxn, err := env.BeginTxn(nil, 0)\n\tbMust(b, err, \"starting transaction\")\n\tfor i := 0; i < benchDBNumKeys; i++ {\n\t\tk := makeBenchDBKey(&rc)\n\t\tv := makeBenchDBVal(&rc)\n\t\terr := txn.Put(dbi, k, v, 0)\n\t\tps = append(ps, k, v)\n\t\tbTxnMust(b, txn, err, \"putting data\")\n\t}\n\terr = txn.Commit()\n\tbMust(b, err, \"commiting transaction\")\n\n\ttxn, err = env.BeginTxn(nil, RDONLY)\n\tbMust(b, err, \"starting transaction\")\n\tdefer txn.Abort()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err := txn.GetVal(dbi, ps[rand.Intn(len(ps))])\n\t\tif err == NotFound {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"error getting data: %v\", err)\n\t\t}\n\t}\n\tb.StopTimer()\n}\n\n\/\/ repeatedly scan all the values in a database.\nfunc BenchmarkCursorScanRDONLY(b *testing.B) {\n\tinitRandSource(b)\n\tenv, path := setupBenchDB(b)\n\tdefer teardownBenchDB(b, env, path)\n\n\tdbi := openBenchDBI(b, env)\n\n\tvar ps [][]byte\n\n\trc := newRandSourceCursor()\n\ttxn, err := env.BeginTxn(nil, 0)\n\tbMust(b, err, \"starting transaction\")\n\tfor i := 0; i < benchDBNumKeys; i++ {\n\t\tk := makeBenchDBKey(&rc)\n\t\tv := makeBenchDBVal(&rc)\n\t\terr := txn.Put(dbi, k, v, 0)\n\t\tps = append(ps, k, v)\n\t\tbTxnMust(b, txn, err, \"putting data\")\n\t}\n\terr = txn.Commit()\n\tbMust(b, err, \"commiting transaction\")\n\n\ttxn, err = env.BeginTxn(nil, RDONLY)\n\tbMust(b, err, \"starting transaction\")\n\tdefer txn.Abort()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tfunc() {\n\t\t\tcur, err := txn.CursorOpen(dbi)\n\t\t\tbMust(b, err, \"opening cursor\")\n\t\t\tdefer cur.Close()\n\t\t\tvar count int64\n\t\t\tfor {\n\t\t\t\t_, _, err := cur.Get(nil, nil, NEXT)\n\t\t\t\tif err == NotFound {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Fatalf(\"error getting data: %v\", err)\n\t\t\t\t}\n\t\t\t\tcount++\n\t\t\t}\n\t\t\tif count != benchDBNumKeys {\n\t\t\t\tb.Fatalf(\"unexpected number of keys: %d\", count)\n\t\t\t}\n\t\t}()\n\t}\n\tb.StopTimer()\n}\n\n\/\/ like BenchmarkCursoreScanRDONLY, but cursor.GetVal() is called instead.\nfunc BenchmarkCursorScanValRDONLY(b *testing.B) {\n\tinitRandSource(b)\n\tenv, path := setupBenchDB(b)\n\tdefer teardownBenchDB(b, env, path)\n\n\tdbi := openBenchDBI(b, env)\n\n\tvar ps [][]byte\n\n\trc := newRandSourceCursor()\n\ttxn, err := env.BeginTxn(nil, 0)\n\tbMust(b, err, \"starting transaction\")\n\tfor i := 0; i < benchDBNumKeys; i++ {\n\t\tk := makeBenchDBKey(&rc)\n\t\tv := makeBenchDBVal(&rc)\n\t\terr := txn.Put(dbi, k, v, 0)\n\t\tps = append(ps, k, v)\n\t\tbTxnMust(b, txn, err, \"putting data\")\n\t}\n\terr = txn.Commit()\n\tbMust(b, err, \"commiting transaction\")\n\n\ttxn, err = env.BeginTxn(nil, RDONLY)\n\tbMust(b, err, \"starting transaction\")\n\tdefer txn.Abort()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tfunc() {\n\t\t\tcur, err := txn.CursorOpen(dbi)\n\t\t\tbMust(b, err, \"opening cursor\")\n\t\t\tdefer cur.Close()\n\t\t\tvar count int64\n\t\t\tfor {\n\t\t\t\t_, _, err := cur.GetVal(nil, nil, NEXT)\n\t\t\t\tif err == NotFound {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Fatalf(\"error getting data: %v\", err)\n\t\t\t\t}\n\t\t\t\tcount++\n\t\t\t}\n\t\t\tif count != benchDBNumKeys {\n\t\t\t\tb.Fatalf(\"unexpected number of keys: %d\", count)\n\t\t\t}\n\t\t}()\n\t}\n\tb.StopTimer()\n}\n\nfunc setupBenchDB(b *testing.B) (*Env, string) {\n\tenv, err := NewEnv()\n\tbMust(b, err, \"creating env\")\n\terr = env.SetMaxDBs(26)\n\tbMust(b, err, \"setting max dbs\")\n\terr = env.SetMapSize(1 << 30) \/\/ 1GB\n\tbMust(b, err, \"sizing env\")\n\tpath, err := ioutil.TempDir(\"\", \"mdb_test-bench-\")\n\tbMust(b, err, \"creating temp directory\")\n\terr = env.Open(path, 0, 0644)\n\tif err != nil {\n\t\tteardownBenchDB(b, env, path)\n\t}\n\tbMust(b, err, \"opening database\")\n\treturn env, path\n}\n\nfunc openBenchDBI(b *testing.B, env *Env) DBI {\n\ttxn, err := env.BeginTxn(nil, 0)\n\tbMust(b, err, \"starting transaction\")\n\tname := \"benchmark\"\n\tdbi, err := txn.DBIOpen(&name, CREATE)\n\tif err != nil {\n\t\ttxn.Abort()\n\t\tb.Fatalf(\"error opening dbi: %v\", err)\n\t}\n\terr = txn.Commit()\n\tbMust(b, err, \"commiting transaction\")\n\treturn dbi\n}\n\nfunc teardownBenchDB(b *testing.B, env *Env, path string) {\n\tenv.Close()\n\tos.RemoveAll(path)\n}\n\nfunc randBytes(n int) []byte {\n\tp := make([]byte, n)\n\tcrand.Read(p)\n\treturn p\n}\n\nfunc bMust(b *testing.B, err error, action string) {\n\tif err != nil {\n\t\tb.Fatalf(\"error %s: %v\", action, err)\n\t}\n}\n\nfunc bTxnMust(b *testing.B, txn *Txn, err error, action string) {\n\tif err != nil {\n\t\ttxn.Abort()\n\t\tb.Fatalf(\"error %s: %v\", action, err)\n\t}\n}\n\nconst randSourceSize = 500 << 20 \/\/ size of the 'entropy pool' for random byte generation.\nconst benchDBNumKeys = 100000 \/\/ number of keys to store in benchmark databases\nconst benchDBMaxKeyLen = 30 \/\/ maximum length for database keys (size is limited by MDB)\nconst benchDBMaxValLen = 2000 \/\/ maximum lengh for database values\n\nfunc makeBenchDBKey(c *randSourceCursor) []byte {\n\treturn c.NBytes(rand.Intn(benchDBMaxKeyLen) + 1)\n}\n\nfunc makeBenchDBVal(c *randSourceCursor) []byte {\n\treturn c.NBytes(rand.Intn(benchDBMaxValLen) + 1)\n}\n\n\/\/ holds a bunch of random bytes so repeated generation af 'random' slices is\n\/\/ cheap. acts as a ring which can be read from (although doesn't implement io.Reader).\nvar randSource [randSourceSize]byte\n\nfunc initRandSource(b *testing.B) {\n\tif randSource[0] == 0 && randSource[1] == 0 && randSource[2] == 0 && randSource[3] == 0 {\n\t\tb.Logf(\"initializing random source data\")\n\t\tn, err := crand.Read(randSource[:])\n\t\tbMust(b, err, \"initializing random source\")\n\t\tif n < len(randSource) {\n\t\t\tb.Fatalf(\"unable to read enough random source data %d\", n)\n\t\t}\n\t}\n}\n\n\/\/ acts as a simple byte slice generator.\ntype randSourceCursor int\n\nfunc newRandSourceCursor() randSourceCursor {\n\ti := rand.Intn(randSourceSize)\n\treturn randSourceCursor(i)\n}\n\nfunc (c *randSourceCursor) NBytes(n int) []byte {\n\ti := int(*c)\n\tif n >= randSourceSize {\n\t\tpanic(\"rand size too big\")\n\t}\n\t*c = (*c + randSourceCursor(n)) % randSourceSize\n\t_n := i + n - randSourceSize\n\tif _n > 0 {\n\t\tp := make([]byte, n)\n\t\tm := copy(p, randSource[i:])\n\t\tcopy(p[m:], randSource[:])\n\t\treturn p\n\t}\n\treturn randSource[i : i+n]\n}\n<commit_msg>Remove unreachable code (go vet FTW)<commit_after>package mdb\n\nimport (\n\tcrand \"crypto\/rand\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"testing\"\n)\n\n\/\/ repeatedly put (overwrite) keys.\nfunc BenchmarkTxnPut(b *testing.B) {\n\tinitRandSource(b)\n\tenv, path := setupBenchDB(b)\n\tdefer teardownBenchDB(b, env, path)\n\n\tdbi := openBenchDBI(b, env)\n\n\tvar ps [][]byte\n\n\trc := newRandSourceCursor()\n\ttxn, err := env.BeginTxn(nil, 0)\n\tbMust(b, err, \"starting transaction\")\n\tfor i := 0; i < benchDBNumKeys; i++ {\n\t\tk := makeBenchDBKey(&rc)\n\t\tv := makeBenchDBVal(&rc)\n\t\terr := txn.Put(dbi, k, v, 0)\n\t\tps = append(ps, k, v)\n\t\tbTxnMust(b, txn, err, \"putting data\")\n\t}\n\terr = txn.Commit()\n\tbMust(b, err, \"commiting transaction\")\n\n\ttxn, err = env.BeginTxn(nil, 0)\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tk := ps[rand.Intn(len(ps)\/2)*2]\n\t\tv := makeBenchDBVal(&rc)\n\t\terr := txn.Put(dbi, k, v, 0)\n\t\tbTxnMust(b, txn, err, \"putting data\")\n\t}\n\tb.StopTimer()\n\terr = txn.Commit()\n\tbMust(b, err, \"commiting transaction\")\n}\n\n\/\/ repeatedly get random keys.\nfunc BenchmarkTxnGetRDONLY(b *testing.B) {\n\tinitRandSource(b)\n\tenv, path := setupBenchDB(b)\n\tdefer teardownBenchDB(b, env, path)\n\n\tdbi := openBenchDBI(b, env)\n\n\tvar ps [][]byte\n\n\trc := newRandSourceCursor()\n\ttxn, err := env.BeginTxn(nil, 0)\n\tbMust(b, err, \"starting transaction\")\n\tfor i := 0; i < benchDBNumKeys; i++ {\n\t\tk := makeBenchDBKey(&rc)\n\t\tv := makeBenchDBVal(&rc)\n\t\terr := txn.Put(dbi, k, v, 0)\n\t\tps = append(ps, k, v)\n\t\tbTxnMust(b, txn, err, \"putting data\")\n\t}\n\terr = txn.Commit()\n\tbMust(b, err, \"commiting transaction\")\n\n\ttxn, err = env.BeginTxn(nil, RDONLY)\n\tbMust(b, err, \"starting transaction\")\n\tdefer txn.Abort()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err := txn.Get(dbi, ps[rand.Intn(len(ps))])\n\t\tif err == NotFound {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"error getting data: %v\", err)\n\t\t}\n\t}\n\tb.StopTimer()\n}\n\n\/\/ like BenchmarkTxnGetRDONLY, but txn.GetVal() is called instead.\nfunc BenchmarkTxnGetValRDONLY(b *testing.B) {\n\tinitRandSource(b)\n\tenv, path := setupBenchDB(b)\n\tdefer teardownBenchDB(b, env, path)\n\n\tdbi := openBenchDBI(b, env)\n\n\tvar ps [][]byte\n\n\trc := newRandSourceCursor()\n\ttxn, err := env.BeginTxn(nil, 0)\n\tbMust(b, err, \"starting transaction\")\n\tfor i := 0; i < benchDBNumKeys; i++ {\n\t\tk := makeBenchDBKey(&rc)\n\t\tv := makeBenchDBVal(&rc)\n\t\terr := txn.Put(dbi, k, v, 0)\n\t\tps = append(ps, k, v)\n\t\tbTxnMust(b, txn, err, \"putting data\")\n\t}\n\terr = txn.Commit()\n\tbMust(b, err, \"commiting transaction\")\n\n\ttxn, err = env.BeginTxn(nil, RDONLY)\n\tbMust(b, err, \"starting transaction\")\n\tdefer txn.Abort()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err := txn.GetVal(dbi, ps[rand.Intn(len(ps))])\n\t\tif err == NotFound {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"error getting data: %v\", err)\n\t\t}\n\t}\n\tb.StopTimer()\n}\n\n\/\/ repeatedly scan all the values in a database.\nfunc BenchmarkCursorScanRDONLY(b *testing.B) {\n\tinitRandSource(b)\n\tenv, path := setupBenchDB(b)\n\tdefer teardownBenchDB(b, env, path)\n\n\tdbi := openBenchDBI(b, env)\n\n\tvar ps [][]byte\n\n\trc := newRandSourceCursor()\n\ttxn, err := env.BeginTxn(nil, 0)\n\tbMust(b, err, \"starting transaction\")\n\tfor i := 0; i < benchDBNumKeys; i++ {\n\t\tk := makeBenchDBKey(&rc)\n\t\tv := makeBenchDBVal(&rc)\n\t\terr := txn.Put(dbi, k, v, 0)\n\t\tps = append(ps, k, v)\n\t\tbTxnMust(b, txn, err, \"putting data\")\n\t}\n\terr = txn.Commit()\n\tbMust(b, err, \"commiting transaction\")\n\n\ttxn, err = env.BeginTxn(nil, RDONLY)\n\tbMust(b, err, \"starting transaction\")\n\tdefer txn.Abort()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tfunc() {\n\t\t\tcur, err := txn.CursorOpen(dbi)\n\t\t\tbMust(b, err, \"opening cursor\")\n\t\t\tdefer cur.Close()\n\t\t\tvar count int64\n\t\t\tfor {\n\t\t\t\t_, _, err := cur.Get(nil, nil, NEXT)\n\t\t\t\tif err == NotFound {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Fatalf(\"error getting data: %v\", err)\n\t\t\t\t}\n\t\t\t\tcount++\n\t\t\t}\n\t\t}()\n\t}\n\tb.StopTimer()\n}\n\n\/\/ like BenchmarkCursoreScanRDONLY, but cursor.GetVal() is called instead.\nfunc BenchmarkCursorScanValRDONLY(b *testing.B) {\n\tinitRandSource(b)\n\tenv, path := setupBenchDB(b)\n\tdefer teardownBenchDB(b, env, path)\n\n\tdbi := openBenchDBI(b, env)\n\n\tvar ps [][]byte\n\n\trc := newRandSourceCursor()\n\ttxn, err := env.BeginTxn(nil, 0)\n\tbMust(b, err, \"starting transaction\")\n\tfor i := 0; i < benchDBNumKeys; i++ {\n\t\tk := makeBenchDBKey(&rc)\n\t\tv := makeBenchDBVal(&rc)\n\t\terr := txn.Put(dbi, k, v, 0)\n\t\tps = append(ps, k, v)\n\t\tbTxnMust(b, txn, err, \"putting data\")\n\t}\n\terr = txn.Commit()\n\tbMust(b, err, \"commiting transaction\")\n\n\ttxn, err = env.BeginTxn(nil, RDONLY)\n\tbMust(b, err, \"starting transaction\")\n\tdefer txn.Abort()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tfunc() {\n\t\t\tcur, err := txn.CursorOpen(dbi)\n\t\t\tbMust(b, err, \"opening cursor\")\n\t\t\tdefer cur.Close()\n\t\t\tvar count int64\n\t\t\tfor {\n\t\t\t\t_, _, err := cur.GetVal(nil, nil, NEXT)\n\t\t\t\tif err == NotFound {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Fatalf(\"error getting data: %v\", err)\n\t\t\t\t}\n\t\t\t\tcount++\n\t\t\t}\n\t\t}()\n\t}\n\tb.StopTimer()\n}\n\nfunc setupBenchDB(b *testing.B) (*Env, string) {\n\tenv, err := NewEnv()\n\tbMust(b, err, \"creating env\")\n\terr = env.SetMaxDBs(26)\n\tbMust(b, err, \"setting max dbs\")\n\terr = env.SetMapSize(1 << 30) \/\/ 1GB\n\tbMust(b, err, \"sizing env\")\n\tpath, err := ioutil.TempDir(\"\", \"mdb_test-bench-\")\n\tbMust(b, err, \"creating temp directory\")\n\terr = env.Open(path, 0, 0644)\n\tif err != nil {\n\t\tteardownBenchDB(b, env, path)\n\t}\n\tbMust(b, err, \"opening database\")\n\treturn env, path\n}\n\nfunc openBenchDBI(b *testing.B, env *Env) DBI {\n\ttxn, err := env.BeginTxn(nil, 0)\n\tbMust(b, err, \"starting transaction\")\n\tname := \"benchmark\"\n\tdbi, err := txn.DBIOpen(&name, CREATE)\n\tif err != nil {\n\t\ttxn.Abort()\n\t\tb.Fatalf(\"error opening dbi: %v\", err)\n\t}\n\terr = txn.Commit()\n\tbMust(b, err, \"commiting transaction\")\n\treturn dbi\n}\n\nfunc teardownBenchDB(b *testing.B, env *Env, path string) {\n\tenv.Close()\n\tos.RemoveAll(path)\n}\n\nfunc randBytes(n int) []byte {\n\tp := make([]byte, n)\n\tcrand.Read(p)\n\treturn p\n}\n\nfunc bMust(b *testing.B, err error, action string) {\n\tif err != nil {\n\t\tb.Fatalf(\"error %s: %v\", action, err)\n\t}\n}\n\nfunc bTxnMust(b *testing.B, txn *Txn, err error, action string) {\n\tif err != nil {\n\t\ttxn.Abort()\n\t\tb.Fatalf(\"error %s: %v\", action, err)\n\t}\n}\n\nconst randSourceSize = 500 << 20 \/\/ size of the 'entropy pool' for random byte generation.\nconst benchDBNumKeys = 100000 \/\/ number of keys to store in benchmark databases\nconst benchDBMaxKeyLen = 30 \/\/ maximum length for database keys (size is limited by MDB)\nconst benchDBMaxValLen = 2000 \/\/ maximum lengh for database values\n\nfunc makeBenchDBKey(c *randSourceCursor) []byte {\n\treturn c.NBytes(rand.Intn(benchDBMaxKeyLen) + 1)\n}\n\nfunc makeBenchDBVal(c *randSourceCursor) []byte {\n\treturn c.NBytes(rand.Intn(benchDBMaxValLen) + 1)\n}\n\n\/\/ holds a bunch of random bytes so repeated generation af 'random' slices is\n\/\/ cheap. acts as a ring which can be read from (although doesn't implement io.Reader).\nvar randSource [randSourceSize]byte\n\nfunc initRandSource(b *testing.B) {\n\tif randSource[0] == 0 && randSource[1] == 0 && randSource[2] == 0 && randSource[3] == 0 {\n\t\tb.Logf(\"initializing random source data\")\n\t\tn, err := crand.Read(randSource[:])\n\t\tbMust(b, err, \"initializing random source\")\n\t\tif n < len(randSource) {\n\t\t\tb.Fatalf(\"unable to read enough random source data %d\", n)\n\t\t}\n\t}\n}\n\n\/\/ acts as a simple byte slice generator.\ntype randSourceCursor int\n\nfunc newRandSourceCursor() randSourceCursor {\n\ti := rand.Intn(randSourceSize)\n\treturn randSourceCursor(i)\n}\n\nfunc (c *randSourceCursor) NBytes(n int) []byte {\n\ti := int(*c)\n\tif n >= randSourceSize {\n\t\tpanic(\"rand size too big\")\n\t}\n\t*c = (*c + randSourceCursor(n)) % randSourceSize\n\t_n := i + n - randSourceSize\n\tif _n > 0 {\n\t\tp := make([]byte, n)\n\t\tm := copy(p, randSource[i:])\n\t\tcopy(p[m:], randSource[:])\n\t\treturn p\n\t}\n\treturn randSource[i : i+n]\n}\n<|endoftext|>"} {"text":"<commit_before>package command\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n)\n\nconst (\n\t\/\/ DefaultInitName is the default name we use when\n\t\/\/ initializing the example file\n\tDefaultInitName = \"example.nomad\"\n)\n\n\/\/ JobInitCommand generates a new job template that you can customize to your\n\/\/ liking, like vagrant init\ntype JobInitCommand struct {\n\tMeta\n}\n\nfunc (c *JobInitCommand) Help() string {\n\thelpText := `\nUsage: nomad job init\nAlias: nomad init\n\n Creates an example job file that can be used as a starting\n point to customize further.\n`\n\treturn strings.TrimSpace(helpText)\n}\n\nfunc (c *JobInitCommand) Synopsis() string {\n\treturn \"Create an example job file\"\n}\n\nfunc (c *JobInitCommand) Run(args []string) int {\n\t\/\/ Check for misuse\n\tif len(args) != 0 {\n\t\tc.Ui.Error(c.Help())\n\t\treturn 1\n\t}\n\n\t\/\/ Check if the file already exists\n\t_, err := os.Stat(DefaultInitName)\n\tif err != nil && !os.IsNotExist(err) {\n\t\tc.Ui.Error(fmt.Sprintf(\"Failed to stat '%s': %v\", DefaultInitName, err))\n\t\treturn 1\n\t}\n\tif !os.IsNotExist(err) {\n\t\tc.Ui.Error(fmt.Sprintf(\"Job '%s' already exists\", DefaultInitName))\n\t\treturn 1\n\t}\n\n\t\/\/ Write out the example\n\terr = ioutil.WriteFile(DefaultInitName, []byte(defaultJob), 0660)\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Failed to write '%s': %v\", DefaultInitName, err))\n\t\treturn 1\n\t}\n\n\t\/\/ Success\n\tc.Ui.Output(fmt.Sprintf(\"Example job file written to %s\", DefaultInitName))\n\treturn 0\n}\n\nvar defaultJob = strings.TrimSpace(`\n# There can only be a single job definition per file. This job is named\n# \"example\" so it will create a job with the ID and Name \"example\".\n\n# The \"job\" stanza is the top-most configuration option in the job\n# specification. A job is a declarative specification of tasks that Nomad\n# should run. Jobs have a globally unique name, one or many task groups, which\n# are themselves collections of one or many tasks.\n#\n# For more information and examples on the \"job\" stanza, please see\n# the online documentation at:\n#\n# https:\/\/www.nomadproject.io\/docs\/job-specification\/job.html\n#\njob \"example\" {\n # The \"region\" parameter specifies the region in which to execute the job. If\n # omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = [\"dc1\"]\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/jobspec\/schedulers.html\n #\n type = \"service\"\n\n # The \"constraint\" stanza defines additional constraints for placing this job,\n # in addition to any resource or driver constraints. This stanza may be placed\n # at the \"job\", \"group\", or \"task\" level, and supports variable interpolation.\n #\n # For more information and examples on the \"constraint\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/constraint.html\n #\n # constraint {\n # attribute = \"${attr.kernel.name}\"\n # value = \"linux\"\n # }\n\n # The \"update\" stanza specifies the update strategy of task groups. The update\n # strategy is used to control things like rolling upgrades, canaries, and\n # blue\/green deployments. If omitted, no update strategy is enforced. The\n # \"update\" stanza may be placed at the job or task group. When placed at the\n # job, it applies to all groups within the job. When placed at both the job and\n # group level, the stanzas are merged with the group's taking precedence.\n #\n # For more information and examples on the \"update\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/update.html\n #\n update {\n # The \"max_parallel\" parameter specifies the maximum number of updates to\n # perform in parallel. In this case, this specifies to update a single task\n # at a time.\n max_parallel = 1\n \n # The \"min_healthy_time\" parameter specifies the minimum time the allocation\n # must be in the healthy state before it is marked as healthy and unblocks\n # further allocations from being updated.\n min_healthy_time = \"10s\"\n \n # The \"healthy_deadline\" parameter specifies the deadline in which the\n # allocation must be marked as healthy after which the allocation is\n # automatically transitioned to unhealthy. Transitioning to unhealthy will\n # fail the deployment and potentially roll back the job if \"auto_revert\" is\n # set to true.\n healthy_deadline = \"3m\"\n \n # The \"auto_revert\" parameter specifies if the job should auto-revert to the\n # last stable job on deployment failure. A job is marked as stable if all the\n # allocations as part of its deployment were marked healthy.\n auto_revert = false\n \n # The \"canary\" parameter specifies that changes to the job that would result\n # in destructive updates should create the specified number of canaries\n # without stopping any previous allocations. Once the operator determines the\n # canaries are healthy, they can be promoted which unblocks a rolling update\n # of the remaining allocations at a rate of \"max_parallel\".\n #\n # Further, setting \"canary\" equal to the count of the task group allows\n # blue\/green deployments. When the job is updated, a full set of the new\n # version is deployed and upon promotion the old version is stopped.\n canary = 0\n }\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/group.html\n #\n group \"cache\" {\n # The \"count\" parameter specifies the number of the task groups that should\n # be running under this group. This value must be non-negative and defaults\n # to 1.\n count = 1\n\n # The \"restart\" stanza configures a group's behavior on task failure. If\n # left unspecified, a default restart policy is used based on the job type.\n #\n # For more information and examples on the \"restart\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/restart.html\n #\n restart {\n # The number of attempts to run the job within the specified interval.\n attempts = 2\n interval = \"30m\"\n\n # The \"delay\" parameter specifies the duration to wait before restarting\n # a task after it has failed.\n delay = \"15s\"\n\n # The \"mode\" parameter controls what happens when a task has restarted\n # \"attempts\" times within the interval. \"delay\" mode delays the next\n # restart until the next interval. \"fail\" mode does not restart the task\n # if \"attempts\" has been hit within the interval.\n mode = \"fail\"\n }\n\n # The \"ephemeral_disk\" stanza instructs Nomad to utilize an ephemeral disk\n # instead of a hard disk requirement. Clients using this stanza should\n # not specify disk requirements in the resources stanza of the task. All\n # tasks in this group will share the same ephemeral disk.\n #\n # For more information and examples on the \"ephemeral_disk\" stanza, please\n # see the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/ephemeral_disk.html\n #\n ephemeral_disk {\n # When sticky is true and the task group is updated, the scheduler\n # will prefer to place the updated allocation on the same node and\n # will migrate the data. This is useful for tasks that store data\n # that should persist across allocation updates.\n # sticky = true\n # \n # Setting migrate to true results in the allocation directory of a\n # sticky allocation directory to be migrated.\n # migrate = true\n\n # The \"size\" parameter specifies the size in MB of shared ephemeral disk\n # between tasks in the group.\n size = 300\n }\n\n # The \"task\" stanza creates an individual unit of work, such as a Docker\n # container, web application, or batch processing.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/task.html\n #\n task \"redis\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"redis:3.2\"\n port_map {\n db = 6379\n }\n }\n\n # The \"artifact\" stanza instructs Nomad to download an artifact from a\n # remote source prior to starting the task. This provides a convenient\n # mechanism for downloading configuration files or data needed to run the\n # task. It is possible to specify the \"artifact\" stanza multiple times to\n # download multiple artifacts.\n #\n # For more information and examples on the \"artifact\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/artifact.html\n #\n # artifact {\n # source = \"http:\/\/foo.com\/artifact.tar.gz\"\n # options {\n # checksum = \"md5:c4aa853ad2215426eb7d70a21922e794\"\n # }\n # }\n\n # The \"logs\" stanza instructs the Nomad client on how many log files and\n # the maximum size of those logs files to retain. Logging is enabled by\n # default, but the \"logs\" stanza allows for finer-grained control over\n # the log rotation and storage configuration.\n #\n # For more information and examples on the \"logs\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/logs.html\n #\n # logs {\n # max_files = 10\n # max_file_size = 15\n # }\n\n # The \"resources\" stanza describes the requirements a task needs to\n # execute. Resource requirements include memory, network, cpu, and more.\n # This ensures the task will execute on a machine that contains enough\n # resource capacity.\n #\n # For more information and examples on the \"resources\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/resources.html\n #\n resources {\n cpu = 500 # 500 MHz\n memory = 256 # 256MB\n network {\n mbits = 10\n port \"db\" {}\n }\n }\n\n # The \"service\" stanza instructs Nomad to register this task as a service\n # in the service discovery engine, which is currently Consul. This will\n # make the service addressable after Nomad has placed it on a host and\n # port.\n #\n # For more information and examples on the \"service\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/service.html\n #\n service {\n name = \"redis-cache\"\n tags = [\"global\", \"cache\"]\n port = \"db\"\n check {\n name = \"alive\"\n type = \"tcp\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n # The \"template\" stanza instructs Nomad to manage a template, such as\n # a configuration file or script. This template can optionally pull data\n # from Consul or Vault to populate runtime configuration data.\n #\n # For more information and examples on the \"template\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/template.html\n #\n # template {\n # data = \"---\\nkey: {{ key \\\"service\/my-key\\\" }}\"\n # destination = \"local\/file.yml\"\n # change_mode = \"signal\"\n # change_signal = \"SIGHUP\"\n # }\n\n # The \"template\" stanza can also be used to create environment variables\n # for tasks that prefer those to config files. The task will be restarted\n # when data pulled from Consul or Vault changes.\n #\n # template {\n # data = \"KEY={{ key \\\"service\/my-key\\\" }}\"\n # destination = \"local\/file.env\"\n # env = true\n # }\n\n # The \"vault\" stanza instructs the Nomad client to acquire a token from\n # a HashiCorp Vault server. The Nomad servers must be configured and\n # authorized to communicate with Vault. By default, Nomad will inject\n # The token into the job via an environment variable and make the token\n # available to the \"template\" stanza. The Nomad client handles the renewal\n # and revocation of the Vault token.\n #\n # For more information and examples on the \"vault\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/vault.html\n #\n # vault {\n # policies = [\"cdn\", \"frontend\"]\n # change_mode = \"signal\"\n # change_signal = \"SIGHUP\"\n # }\n\n # Controls the timeout between signalling a task it will be killed\n # and killing the task. If not set a default is used.\n # kill_timeout = \"20s\"\n }\n }\n}\n`)\n<commit_msg>docs: add migrate to example.nomad<commit_after>package command\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n)\n\nconst (\n\t\/\/ DefaultInitName is the default name we use when\n\t\/\/ initializing the example file\n\tDefaultInitName = \"example.nomad\"\n)\n\n\/\/ JobInitCommand generates a new job template that you can customize to your\n\/\/ liking, like vagrant init\ntype JobInitCommand struct {\n\tMeta\n}\n\nfunc (c *JobInitCommand) Help() string {\n\thelpText := `\nUsage: nomad job init\nAlias: nomad init\n\n Creates an example job file that can be used as a starting\n point to customize further.\n`\n\treturn strings.TrimSpace(helpText)\n}\n\nfunc (c *JobInitCommand) Synopsis() string {\n\treturn \"Create an example job file\"\n}\n\nfunc (c *JobInitCommand) Run(args []string) int {\n\t\/\/ Check for misuse\n\tif len(args) != 0 {\n\t\tc.Ui.Error(c.Help())\n\t\treturn 1\n\t}\n\n\t\/\/ Check if the file already exists\n\t_, err := os.Stat(DefaultInitName)\n\tif err != nil && !os.IsNotExist(err) {\n\t\tc.Ui.Error(fmt.Sprintf(\"Failed to stat '%s': %v\", DefaultInitName, err))\n\t\treturn 1\n\t}\n\tif !os.IsNotExist(err) {\n\t\tc.Ui.Error(fmt.Sprintf(\"Job '%s' already exists\", DefaultInitName))\n\t\treturn 1\n\t}\n\n\t\/\/ Write out the example\n\terr = ioutil.WriteFile(DefaultInitName, []byte(defaultJob), 0660)\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Failed to write '%s': %v\", DefaultInitName, err))\n\t\treturn 1\n\t}\n\n\t\/\/ Success\n\tc.Ui.Output(fmt.Sprintf(\"Example job file written to %s\", DefaultInitName))\n\treturn 0\n}\n\nvar defaultJob = strings.TrimSpace(`\n# There can only be a single job definition per file. This job is named\n# \"example\" so it will create a job with the ID and Name \"example\".\n\n# The \"job\" stanza is the top-most configuration option in the job\n# specification. A job is a declarative specification of tasks that Nomad\n# should run. Jobs have a globally unique name, one or many task groups, which\n# are themselves collections of one or many tasks.\n#\n# For more information and examples on the \"job\" stanza, please see\n# the online documentation at:\n#\n# https:\/\/www.nomadproject.io\/docs\/job-specification\/job.html\n#\njob \"example\" {\n # The \"region\" parameter specifies the region in which to execute the job. If\n # omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = [\"dc1\"]\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/jobspec\/schedulers.html\n #\n type = \"service\"\n\n # The \"constraint\" stanza defines additional constraints for placing this job,\n # in addition to any resource or driver constraints. This stanza may be placed\n # at the \"job\", \"group\", or \"task\" level, and supports variable interpolation.\n #\n # For more information and examples on the \"constraint\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/constraint.html\n #\n # constraint {\n # attribute = \"${attr.kernel.name}\"\n # value = \"linux\"\n # }\n\n # The \"update\" stanza specifies the update strategy of task groups. The update\n # strategy is used to control things like rolling upgrades, canaries, and\n # blue\/green deployments. If omitted, no update strategy is enforced. The\n # \"update\" stanza may be placed at the job or task group. When placed at the\n # job, it applies to all groups within the job. When placed at both the job and\n # group level, the stanzas are merged with the group's taking precedence.\n #\n # For more information and examples on the \"update\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/update.html\n #\n update {\n # The \"max_parallel\" parameter specifies the maximum number of updates to\n # perform in parallel. In this case, this specifies to update a single task\n # at a time.\n max_parallel = 1\n \n # The \"min_healthy_time\" parameter specifies the minimum time the allocation\n # must be in the healthy state before it is marked as healthy and unblocks\n # further allocations from being updated.\n min_healthy_time = \"10s\"\n \n # The \"healthy_deadline\" parameter specifies the deadline in which the\n # allocation must be marked as healthy after which the allocation is\n # automatically transitioned to unhealthy. Transitioning to unhealthy will\n # fail the deployment and potentially roll back the job if \"auto_revert\" is\n # set to true.\n healthy_deadline = \"3m\"\n \n # The \"auto_revert\" parameter specifies if the job should auto-revert to the\n # last stable job on deployment failure. A job is marked as stable if all the\n # allocations as part of its deployment were marked healthy.\n auto_revert = false\n \n # The \"canary\" parameter specifies that changes to the job that would result\n # in destructive updates should create the specified number of canaries\n # without stopping any previous allocations. Once the operator determines the\n # canaries are healthy, they can be promoted which unblocks a rolling update\n # of the remaining allocations at a rate of \"max_parallel\".\n #\n # Further, setting \"canary\" equal to the count of the task group allows\n # blue\/green deployments. When the job is updated, a full set of the new\n # version is deployed and upon promotion the old version is stopped.\n canary = 0\n }\n\n # The migrate stanza specifies the group's strategy for migrating off of\n # draining nodes. If omitted, a default migration strategy is applied.\n #\n # For more information on the \"migrate\" stanza, please see \n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/migrate.html\n #\n migrate {\n # Specifies the number of task groups that can be migrated at the same\n # time. This number must be less than the total count for the group as\n # (count - max_parallel) will be left running during migrations.\n max_parallel = 1\n\n # Specifies the mechanism in which allocations health is determined. The\n # potential values are \"checks\" or \"task_states\".\n health_check = \"checks\"\n\n # Specifies the minimum time the allocation must be in the healthy state\n # before it is marked as healthy and unblocks further allocations from being\n # migrated. This is specified using a label suffix like \"30s\" or \"15m\".\n min_healthy_time = \"10s\"\n\n # Specifies the deadline in which the allocation must be marked as healthy\n # after which the allocation is automatically transitioned to unhealthy. This\n # is specified using a label suffix like \"2m\" or \"1h\".\n healthy_deadline = \"5m\"\n }\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/group.html\n #\n group \"cache\" {\n # The \"count\" parameter specifies the number of the task groups that should\n # be running under this group. This value must be non-negative and defaults\n # to 1.\n count = 1\n\n # The \"restart\" stanza configures a group's behavior on task failure. If\n # left unspecified, a default restart policy is used based on the job type.\n #\n # For more information and examples on the \"restart\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/restart.html\n #\n restart {\n # The number of attempts to run the job within the specified interval.\n attempts = 2\n interval = \"30m\"\n\n # The \"delay\" parameter specifies the duration to wait before restarting\n # a task after it has failed.\n delay = \"15s\"\n\n # The \"mode\" parameter controls what happens when a task has restarted\n # \"attempts\" times within the interval. \"delay\" mode delays the next\n # restart until the next interval. \"fail\" mode does not restart the task\n # if \"attempts\" has been hit within the interval.\n mode = \"fail\"\n }\n\n # The \"ephemeral_disk\" stanza instructs Nomad to utilize an ephemeral disk\n # instead of a hard disk requirement. Clients using this stanza should\n # not specify disk requirements in the resources stanza of the task. All\n # tasks in this group will share the same ephemeral disk.\n #\n # For more information and examples on the \"ephemeral_disk\" stanza, please\n # see the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/ephemeral_disk.html\n #\n ephemeral_disk {\n # When sticky is true and the task group is updated, the scheduler\n # will prefer to place the updated allocation on the same node and\n # will migrate the data. This is useful for tasks that store data\n # that should persist across allocation updates.\n # sticky = true\n # \n # Setting migrate to true results in the allocation directory of a\n # sticky allocation directory to be migrated.\n # migrate = true\n\n # The \"size\" parameter specifies the size in MB of shared ephemeral disk\n # between tasks in the group.\n size = 300\n }\n\n # The \"task\" stanza creates an individual unit of work, such as a Docker\n # container, web application, or batch processing.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/task.html\n #\n task \"redis\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"redis:3.2\"\n port_map {\n db = 6379\n }\n }\n\n # The \"artifact\" stanza instructs Nomad to download an artifact from a\n # remote source prior to starting the task. This provides a convenient\n # mechanism for downloading configuration files or data needed to run the\n # task. It is possible to specify the \"artifact\" stanza multiple times to\n # download multiple artifacts.\n #\n # For more information and examples on the \"artifact\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/artifact.html\n #\n # artifact {\n # source = \"http:\/\/foo.com\/artifact.tar.gz\"\n # options {\n # checksum = \"md5:c4aa853ad2215426eb7d70a21922e794\"\n # }\n # }\n\n # The \"logs\" stanza instructs the Nomad client on how many log files and\n # the maximum size of those logs files to retain. Logging is enabled by\n # default, but the \"logs\" stanza allows for finer-grained control over\n # the log rotation and storage configuration.\n #\n # For more information and examples on the \"logs\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/logs.html\n #\n # logs {\n # max_files = 10\n # max_file_size = 15\n # }\n\n # The \"resources\" stanza describes the requirements a task needs to\n # execute. Resource requirements include memory, network, cpu, and more.\n # This ensures the task will execute on a machine that contains enough\n # resource capacity.\n #\n # For more information and examples on the \"resources\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/resources.html\n #\n resources {\n cpu = 500 # 500 MHz\n memory = 256 # 256MB\n network {\n mbits = 10\n port \"db\" {}\n }\n }\n\n # The \"service\" stanza instructs Nomad to register this task as a service\n # in the service discovery engine, which is currently Consul. This will\n # make the service addressable after Nomad has placed it on a host and\n # port.\n #\n # For more information and examples on the \"service\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/service.html\n #\n service {\n name = \"redis-cache\"\n tags = [\"global\", \"cache\"]\n port = \"db\"\n check {\n name = \"alive\"\n type = \"tcp\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n # The \"template\" stanza instructs Nomad to manage a template, such as\n # a configuration file or script. This template can optionally pull data\n # from Consul or Vault to populate runtime configuration data.\n #\n # For more information and examples on the \"template\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/template.html\n #\n # template {\n # data = \"---\\nkey: {{ key \\\"service\/my-key\\\" }}\"\n # destination = \"local\/file.yml\"\n # change_mode = \"signal\"\n # change_signal = \"SIGHUP\"\n # }\n\n # The \"template\" stanza can also be used to create environment variables\n # for tasks that prefer those to config files. The task will be restarted\n # when data pulled from Consul or Vault changes.\n #\n # template {\n # data = \"KEY={{ key \\\"service\/my-key\\\" }}\"\n # destination = \"local\/file.env\"\n # env = true\n # }\n\n # The \"vault\" stanza instructs the Nomad client to acquire a token from\n # a HashiCorp Vault server. The Nomad servers must be configured and\n # authorized to communicate with Vault. By default, Nomad will inject\n # The token into the job via an environment variable and make the token\n # available to the \"template\" stanza. The Nomad client handles the renewal\n # and revocation of the Vault token.\n #\n # For more information and examples on the \"vault\" stanza, please see\n # the online documentation at:\n #\n # https:\/\/www.nomadproject.io\/docs\/job-specification\/vault.html\n #\n # vault {\n # policies = [\"cdn\", \"frontend\"]\n # change_mode = \"signal\"\n # change_signal = \"SIGHUP\"\n # }\n\n # Controls the timeout between signalling a task it will be killed\n # and killing the task. If not set a default is used.\n # kill_timeout = \"20s\"\n }\n }\n}\n`)\n<|endoftext|>"} {"text":"<commit_before>package reviewdog\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n)\n\nvar _ CommentService = &CommentWriter{}\n\ntype CommentWriter struct {\n\tw io.Writer\n}\n\nfunc NewCommentWriter(w io.Writer) *CommentWriter {\n\treturn &CommentWriter{w: w}\n}\n\nfunc (s *CommentWriter) Post(c *Comment) error {\n\t_, err := fmt.Fprintln(s.w, strings.Join(c.CheckResult.Lines, \"\\n\"))\n\treturn err\n}\n<commit_msg>introduce UnifiedCommentWriter<commit_after>package reviewdog\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n)\n\nvar _ CommentService = &CommentWriter{}\n\ntype CommentWriter struct {\n\tw io.Writer\n}\n\nfunc NewCommentWriter(w io.Writer) *CommentWriter {\n\treturn &CommentWriter{w: w}\n}\n\nfunc (s *CommentWriter) Post(c *Comment) error {\n\t_, err := fmt.Fprintln(s.w, strings.Join(c.CheckResult.Lines, \"\\n\"))\n\treturn err\n}\n\nvar _ CommentService = &UnifiedCommentWriter{}\n\n\/\/ UnifiedCommentWriter is comment writer which writes results to given writer\n\/\/ in one of following unified formats.\n\/\/\n\/\/ Format:\n\/\/ - <file>: [<tool name>] <message>\n\/\/ - <file>:<lnum>: [<tool name>] <message>\n\/\/ - <file>:<lnum>:<col>: [<tool name>] <message>\n\/\/ where <message> can be multiple lines.\ntype UnifiedCommentWriter struct {\n\tw io.Writer\n}\n\nfunc NewUnifiedCommentWriter(w io.Writer) *UnifiedCommentWriter {\n\treturn &UnifiedCommentWriter{w: w}\n}\n\nfunc (mc *UnifiedCommentWriter) Post(c *Comment) error {\n\ts := c.Path\n\tif c.Lnum > 0 {\n\t\ts += fmt.Sprintf(\":%d\", c.Lnum)\n\t\tif c.Col > 0 {\n\t\t\ts += fmt.Sprintf(\":%d\", c.Col)\n\t\t}\n\t}\n\ts += fmt.Sprintf(\": [%s] %s\", c.ToolName, c.Body)\n\t_, err := fmt.Fprintln(mc.w, s)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package varlink\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"net\"\n\t\"strings\"\n)\n\n\/\/ Error is a varlink error returned from a method call.\ntype Error struct {\n\tName string\n\tParameters interface{}\n}\n\n\/\/ Error returns the fully-qualified varlink error name.\nfunc (e *Error) Error() string {\n\treturn e.Name\n}\n\n\/\/ Connection is a connection from a client to a service.\ntype Connection struct {\n\taddress string\n\tconn net.Conn\n\treader *bufio.Reader\n\twriter *bufio.Writer\n}\n\n\/\/ Send sends a method call.\nfunc (c *Connection) Send(method string, parameters interface{}, more bool, oneway bool) error {\n\ttype call struct {\n\t\tMethod string `json:\"method\"`\n\t\tParameters interface{} `json:\"parameters,omitempty\"`\n\t\tMore bool `json:\"more,omitempty\"`\n\t\tOneway bool `json:\"oneway,omitempty\"`\n\t}\n\n\tif more && oneway {\n\t\treturn &Error{\n\t\t\tName: \"org.varlink.InvalidParameter\",\n\t\t\tParameters: \"oneway\",\n\t\t}\n\t}\n\n\tm := call{\n\t\tMethod: method,\n\t\tParameters: parameters,\n\t\tMore: more,\n\t\tOneway: oneway,\n\t}\n\tb, err := json.Marshal(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb = append(b, 0)\n\t_, err = c.writer.Write(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn c.writer.Flush()\n}\n\n\/\/ Receive receives a method reply.\nfunc (c *Connection) Receive(parameters interface{}) (bool, error) {\n\ttype reply struct {\n\t\tParameters *json.RawMessage `json:\"parameters\"`\n\t\tContinues bool `json:\"continues\"`\n\t\tError string `json:\"error\"`\n\t}\n\n\tout, err := c.reader.ReadBytes('\\x00')\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tvar m reply\n\terr = json.Unmarshal(out[:len(out)-1], &m)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif m.Error != \"\" {\n\t\treturn false, &Error{\n\t\t\tName: m.Error,\n\t\t\tParameters: m.Parameters,\n\t\t}\n\t}\n\n\tif parameters != nil && m.Parameters != nil {\n\t\treturn m.Continues, json.Unmarshal(*m.Parameters, parameters)\n\t}\n\n\treturn m.Continues, nil\n}\n\n\/\/ Call sends a method call and returns the result of the call.\nfunc (c *Connection) Call(method string, parameters interface{}, result interface{}) error {\n\terr := c.Send(method, ¶meters, false, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = c.Receive(result)\n\treturn err\n}\n\n\/\/ GetInterfaceDescription requests the interface description string from the service.\nfunc (c *Connection) GetInterfaceDescription(name string) (string, error) {\n\ttype request struct {\n\t\tInterface string `json:\"interface\"`\n\t}\n\ttype reply struct {\n\t\tDescription string `json:\"description\"`\n\t}\n\n\tvar r reply\n\terr := c.Call(\"org.varlink.service.GetInterfaceDescription\", request{Interface: name}, &r)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn r.Description, nil\n}\n\n\/\/ GetInfo requests information about the service.\nfunc (c *Connection) GetInfo(vendor *string, product *string, version *string, url *string, interfaces *[]string) error {\n\ttype reply struct {\n\t\tVendor string `json:\"vendor\"`\n\t\tProduct string `json:\"product\"`\n\t\tVersion string `json:\"version\"`\n\t\tURL string `json:\"url\"`\n\t\tInterfaces []string `json:\"interfaces\"`\n\t}\n\n\tvar r reply\n\terr := c.Call(\"org.varlink.service.GetInfo\", nil, &r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vendor != nil {\n\t\t*vendor = r.Vendor\n\t}\n\tif product != nil {\n\t\t*product = r.Product\n\t}\n\tif version != nil {\n\t\t*version = r.Version\n\t}\n\tif url != nil {\n\t\t*url = r.URL\n\t}\n\tif interfaces != nil {\n\t\t*interfaces = r.Interfaces\n\t}\n\n\treturn nil\n}\n\n\/\/ Close terminates the connection.\nfunc (c *Connection) Close() error {\n\treturn c.conn.Close()\n}\n\n\/\/ NewConnection returns a new connection to the given address.\nfunc NewConnection(address string) (*Connection, error) {\n\tvar err error\n\n\twords := strings.SplitN(address, \":\", 2)\n\tprotocol := words[0]\n\taddr := words[1]\n\n\t\/\/ Ignore parameters after ';'\n\twords = strings.SplitN(addr, \";\", 2)\n\tif words != nil {\n\t\taddr = words[0]\n\t}\n\n\tswitch protocol {\n\tcase \"unix\":\n\t\tbreak\n\n\tcase \"tcp\":\n\t\tbreak\n\t}\n\n\tc := Connection{}\n\tc.conn, err = net.Dial(protocol, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.address = address\n\tc.reader = bufio.NewReader(c.conn)\n\tc.writer = bufio.NewWriter(c.conn)\n\n\treturn &c, nil\n}\n<commit_msg>Update \"varlink\" module<commit_after>package varlink\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"net\"\n\t\"strings\"\n)\n\n\/\/ Message flags for Send(). More indicates that the client accepts more than one method\n\/\/ reply to this call. Oneway requests, that the service must not send a method reply to\n\/\/ this call. Continues indicates that the service will send more than one reply.\nconst (\n\tMore = 1 << iota\n\tOneway = 1 << iota\n\tContinues = 1 << iota\n)\n\n\/\/ Error is a varlink error returned from a method call.\ntype Error struct {\n\tName string\n\tParameters interface{}\n}\n\n\/\/ Error returns the fully-qualified varlink error name.\nfunc (e *Error) Error() string {\n\treturn e.Name\n}\n\n\/\/ Connection is a connection from a client to a service.\ntype Connection struct {\n\taddress string\n\tconn net.Conn\n\treader *bufio.Reader\n\twriter *bufio.Writer\n}\n\n\/\/ Send sends a method call. It returns a receive() function which is called to retrieve the method reply.\n\/\/ If Send() is called with the `More`flag and the receive() function carries the `Continues` flag, receive()\n\/\/ can be called multiple times to retrieve multiple replies.\nfunc (c *Connection) Send(method string, parameters interface{}, flags uint64) (func(interface{}) (uint64, error), error) {\n\ttype call struct {\n\t\tMethod string `json:\"method\"`\n\t\tParameters interface{} `json:\"parameters,omitempty\"`\n\t\tMore bool `json:\"more,omitempty\"`\n\t\tOneway bool `json:\"oneway,omitempty\"`\n\t}\n\n\tif (flags&More != 0) && (flags&Oneway != 0) {\n\t\treturn nil, &Error{\n\t\t\tName: \"org.varlink.InvalidParameter\",\n\t\t\tParameters: \"oneway\",\n\t\t}\n\t}\n\n\tm := call{\n\t\tMethod: method,\n\t\tParameters: parameters,\n\t\tMore: flags&More != 0,\n\t\tOneway: flags&Oneway != 0,\n\t}\n\tb, err := json.Marshal(m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb = append(b, 0)\n\t_, err = c.writer.Write(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = c.writer.Flush()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treceive := func(out_parameters interface{}) (uint64, error) {\n\t\ttype reply struct {\n\t\t\tParameters *json.RawMessage `json:\"parameters\"`\n\t\t\tContinues bool `json:\"continues\"`\n\t\t\tError string `json:\"error\"`\n\t\t}\n\n\t\tout, err := c.reader.ReadBytes('\\x00')\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\tvar m reply\n\t\terr = json.Unmarshal(out[:len(out)-1], &m)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\tif m.Error != \"\" {\n\t\t\terr = &Error{\n\t\t\t\tName: m.Error,\n\t\t\t\tParameters: m.Parameters,\n\t\t\t}\n\t\t\treturn 0, err\n\t\t}\n\n\t\tif m.Parameters != nil {\n\t\t\tjson.Unmarshal(*m.Parameters, out_parameters)\n\t\t}\n\n\t\tif m.Continues {\n\t\t\treturn Continues, nil\n\t\t}\n\n\t\treturn 0, nil\n\t}\n\n\treturn receive, nil\n}\n\n\/\/ Call sends a method call and returns the method reply.\nfunc (c *Connection) Call(method string, parameters interface{}, out_parameters interface{}) error {\n\treceive, err := c.Send(method, ¶meters, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = receive(out_parameters)\n\treturn err\n}\n\n\/\/ GetInterfaceDescription requests the interface description string from the service.\nfunc (c *Connection) GetInterfaceDescription(name string) (string, error) {\n\ttype request struct {\n\t\tInterface string `json:\"interface\"`\n\t}\n\ttype reply struct {\n\t\tDescription string `json:\"description\"`\n\t}\n\n\tvar r reply\n\terr := c.Call(\"org.varlink.service.GetInterfaceDescription\", request{Interface: name}, &r)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn r.Description, nil\n}\n\n\/\/ GetInfo requests information about the service.\nfunc (c *Connection) GetInfo(vendor *string, product *string, version *string, url *string, interfaces *[]string) error {\n\ttype reply struct {\n\t\tVendor string `json:\"vendor\"`\n\t\tProduct string `json:\"product\"`\n\t\tVersion string `json:\"version\"`\n\t\tURL string `json:\"url\"`\n\t\tInterfaces []string `json:\"interfaces\"`\n\t}\n\n\tvar r reply\n\terr := c.Call(\"org.varlink.service.GetInfo\", nil, &r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vendor != nil {\n\t\t*vendor = r.Vendor\n\t}\n\tif product != nil {\n\t\t*product = r.Product\n\t}\n\tif version != nil {\n\t\t*version = r.Version\n\t}\n\tif url != nil {\n\t\t*url = r.URL\n\t}\n\tif interfaces != nil {\n\t\t*interfaces = r.Interfaces\n\t}\n\n\treturn nil\n}\n\n\/\/ Close terminates the connection.\nfunc (c *Connection) Close() error {\n\treturn c.conn.Close()\n}\n\n\/\/ NewConnection returns a new connection to the given address.\nfunc NewConnection(address string) (*Connection, error) {\n\tvar err error\n\n\twords := strings.SplitN(address, \":\", 2)\n\tprotocol := words[0]\n\taddr := words[1]\n\n\t\/\/ Ignore parameters after ';'\n\twords = strings.SplitN(addr, \";\", 2)\n\tif words != nil {\n\t\taddr = words[0]\n\t}\n\n\tswitch protocol {\n\tcase \"unix\":\n\t\tbreak\n\n\tcase \"tcp\":\n\t\tbreak\n\t}\n\n\tc := Connection{}\n\tc.conn, err = net.Dial(protocol, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.address = address\n\tc.reader = bufio.NewReader(c.conn)\n\tc.writer = bufio.NewWriter(c.conn)\n\n\treturn &c, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"os\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n\n\t\"github.com\/grafeas\/voucher\/v2\/cmd\/config\"\n\t\"github.com\/grafeas\/voucher\/v2\/subscriber\"\n)\n\nvar subscriberCmd = &cobra.Command{\n\tUse: \"subscriber\",\n\tShort: \"Runs the subscriber\",\n\tLong: `Run the go subscriber that automatically vouches for images on the specified subscription and project\n\tuse --project=<project> --subscription=<subscription> to specify the project and subscription you want the subscriber to pull from`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tvar log = &logrus.Logger{\n\t\t\tOut: os.Stderr,\n\t\t\tFormatter: new(logrus.JSONFormatter),\n\t\t\tHooks: make(logrus.LevelHooks),\n\t\t\tLevel: logrus.DebugLevel,\n\t\t}\n\n\t\tsecrets, err := config.ReadSecrets()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error loading EJSON file, no secrets loaded: %s\", err)\n\t\t}\n\n\t\tmetricsClient, err := config.MetricsClient(secrets)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error configuring metrics client: %s\", err)\n\t\t}\n\n\t\tconfig.RegisterDynamicChecks()\n\n\t\tsubscriberConfig := subscriber.Config{\n\t\t\tProject: viper.GetString(\"pubsub.project\"),\n\t\t\tSubscription: viper.GetString(\"pubsub.subscription\"),\n\t\t\tRequiredChecks: config.GetRequiredChecksFromConfig()[\"all\"],\n\t\t\tDryRun: viper.GetBool(\"dryrun\"),\n\t\t\tTimeout: viper.GetInt(\"pubsub.timeout\"),\n\t\t}\n\t\tvoucherSubscriber := subscriber.NewSubscriber(&subscriberConfig, secrets, metricsClient, log)\n\n\t\terr = voucherSubscriber.Subscribe(context.Background())\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"couldn't pull pub\/sub messages: %s\", err)\n\t\t}\n\t},\n}\n\nfunc init() {\n\tcobra.OnInitialize(config.InitConfig)\n\n\tsubscriberCmd.Flags().StringP(\"project\", \"p\", \"\", \"pub\/sub project that has the subsciprion (required)\")\n\tsubscriberCmd.MarkFlagRequired(\"project\")\n\tviper.BindPFlag(\"pubsub.project\", subscriberCmd.Flags().Lookup(\"project\"))\n\tsubscriberCmd.Flags().StringP(\"subscription\", \"s\", \"\", \"pub\/sub topic subscription (required)\")\n\tsubscriberCmd.MarkFlagRequired(\"subscription\")\n\tviper.BindPFlag(\"pubsub.subscription\", subscriberCmd.Flags().Lookup(\"subscription\"))\n\tsubscriberCmd.Flags().StringVarP(&config.FileName, \"config\", \"c\", \"\", \"path to config\")\n\tsubscriberCmd.Flags().IntP(\"timeout\", \"\", 240, \"number of seconds that should be dedicated to a Voucher call\")\n\tviper.BindPFlag(\"pubsub.timeout\", subscriberCmd.Flags().Lookup(\"timeout\"))\n}\n<commit_msg>flush subscriber on close<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"os\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n\n\t\"github.com\/grafeas\/voucher\/v2\/cmd\/config\"\n\t\"github.com\/grafeas\/voucher\/v2\/subscriber\"\n)\n\nvar subscriberCmd = &cobra.Command{\n\tUse: \"subscriber\",\n\tShort: \"Runs the subscriber\",\n\tLong: `Run the go subscriber that automatically vouches for images on the specified subscription and project\n\tuse --project=<project> --subscription=<subscription> to specify the project and subscription you want the subscriber to pull from`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tvar log = &logrus.Logger{\n\t\t\tOut: os.Stderr,\n\t\t\tFormatter: new(logrus.JSONFormatter),\n\t\t\tHooks: make(logrus.LevelHooks),\n\t\t\tLevel: logrus.DebugLevel,\n\t\t}\n\n\t\tsecrets, err := config.ReadSecrets()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error loading EJSON file, no secrets loaded: %s\", err)\n\t\t}\n\n\t\tmetricsClient, err := config.MetricsClient(secrets)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error configuring metrics client: %s\", err)\n\t\t} else if closer, ok := metricsClient.(io.Closer); ok {\n\t\t\tdefer closer.Close()\n\t\t}\n\n\t\tconfig.RegisterDynamicChecks()\n\n\t\tsubscriberConfig := subscriber.Config{\n\t\t\tProject: viper.GetString(\"pubsub.project\"),\n\t\t\tSubscription: viper.GetString(\"pubsub.subscription\"),\n\t\t\tRequiredChecks: config.GetRequiredChecksFromConfig()[\"all\"],\n\t\t\tDryRun: viper.GetBool(\"dryrun\"),\n\t\t\tTimeout: viper.GetInt(\"pubsub.timeout\"),\n\t\t}\n\t\tvoucherSubscriber := subscriber.NewSubscriber(&subscriberConfig, secrets, metricsClient, log)\n\n\t\terr = voucherSubscriber.Subscribe(context.Background())\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"couldn't pull pub\/sub messages: %s\", err)\n\t\t}\n\t},\n}\n\nfunc init() {\n\tcobra.OnInitialize(config.InitConfig)\n\n\tsubscriberCmd.Flags().StringP(\"project\", \"p\", \"\", \"pub\/sub project that has the subsciprion (required)\")\n\tsubscriberCmd.MarkFlagRequired(\"project\")\n\tviper.BindPFlag(\"pubsub.project\", subscriberCmd.Flags().Lookup(\"project\"))\n\tsubscriberCmd.Flags().StringP(\"subscription\", \"s\", \"\", \"pub\/sub topic subscription (required)\")\n\tsubscriberCmd.MarkFlagRequired(\"subscription\")\n\tviper.BindPFlag(\"pubsub.subscription\", subscriberCmd.Flags().Lookup(\"subscription\"))\n\tsubscriberCmd.Flags().StringVarP(&config.FileName, \"config\", \"c\", \"\", \"path to config\")\n\tsubscriberCmd.Flags().IntP(\"timeout\", \"\", 240, \"number of seconds that should be dedicated to a Voucher call\")\n\tviper.BindPFlag(\"pubsub.timeout\", subscriberCmd.Flags().Lookup(\"timeout\"))\n}\n<|endoftext|>"} {"text":"<commit_before>package vtls\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\tday = 24 * time.Hour\n\tyear = 365 * day\n\n\tcertTime = time.Minute\n)\n\n\/\/ ListenAndServeTLS mostly mirrors the http.ListenAndServeTLS API, but\n\/\/ generates the certificates for the server automatically via vault, with a\n\/\/ short TTL. The function only needs an additional Certifier parameter which\n\/\/ can generate signed certificates in order to work properly.\nfunc ListenAndServeTLS(addr string, handler http.Handler, crt Certifier) error {\n\tcerts := map[string]*tls.Certificate{}\n\tcertsM := &sync.RWMutex{}\n\n\taddCert := func(name string) (*tls.Certificate, error) {\n\t\tlog.Println(\"Generating cert for \", name)\n\t\tcrt, err := crt.Certify(name, certTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcertsM.Lock()\n\t\tcerts[name] = &crt\n\t\tcertsM.Unlock()\n\t\treturn &crt, nil\n\t}\n\n\tgetCert := func(name string) (*tls.Certificate, error) {\n\t\tlkr := certsM.RLocker()\n\t\tlkr.Lock()\n\n\t\tif c, ok := certs[name]; ok {\n\t\t\tlog.Println(c.Leaf.NotAfter.String())\n\t\t\tn := time.Now()\n\t\t\tif n.After(c.Leaf.NotBefore) && n.Before(c.Leaf.NotAfter) {\n\t\t\t\tlkr.Unlock()\n\t\t\t\treturn c, nil\n\t\t\t}\n\t\t}\n\t\tlkr.Unlock()\n\n\t\treturn addCert(name)\n\t}\n\n\ttl, err := tls.Listen(\"tcp\", addr, &tls.Config{\n\t\tGetCertificate: func(h *tls.ClientHelloInfo) (*tls.Certificate, error) {\n\t\t\tif h.ServerName == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"Cannot generate certs for IP alone\")\n\t\t\t}\n\t\t\treturn getCert(h.ServerName)\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif handler == nil {\n\t\thandler = http.DefaultServeMux\n\t}\n\n\treturn http.Serve(tl, handler)\n}\n<commit_msg>Split certCache into separate type<commit_after>package vtls\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\tday = 24 * time.Hour\n\tyear = 365 * day\n\n\tcertTime = time.Minute\n)\n\ntype certCache struct {\n\tm map[string]*tls.Certificate\n\tmut *sync.RWMutex\n\tcrt Certifier\n}\n\nfunc newCertCache(crt Certifier) *certCache {\n\treturn &certCache{\n\t\tm: map[string]*tls.Certificate{},\n\t\tmut: &sync.RWMutex{},\n\t\tcrt: crt,\n\t}\n}\n\nfunc (cc *certCache) add(name string) (*tls.Certificate, error) {\n\tcrt, err := cc.crt.Certify(name, certTime)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcc.mut.Lock()\n\tcc.m[name] = &crt\n\tcc.mut.Unlock()\n\treturn &crt, nil\n}\n\nfunc (cc *certCache) get(name string) (*tls.Certificate, error) {\n\tlkr := cc.mut.RLocker()\n\tlkr.Lock()\n\n\tif c, ok := cc.m[name]; ok {\n\t\tn := time.Now()\n\t\tif n.After(c.Leaf.NotBefore) && n.Before(c.Leaf.NotAfter) {\n\t\t\tlkr.Unlock()\n\t\t\treturn c, nil\n\t\t}\n\t}\n\tlkr.Unlock()\n\n\treturn cc.add(name)\n}\n\n\/\/ ListenAndServeTLS mostly mirrors the http.ListenAndServeTLS API, but\n\/\/ generates the certificates for the server automatically via vault, with a\n\/\/ short TTL. The function only needs an additional Certifier parameter which\n\/\/ can generate signed certificates in order to work properly.\nfunc ListenAndServeTLS(addr string, handler http.Handler, crt Certifier) error {\n\tcerts := newCertCache(crt)\n\n\ttl, err := tls.Listen(\"tcp\", addr, &tls.Config{\n\t\tGetCertificate: func(h *tls.ClientHelloInfo) (*tls.Certificate, error) {\n\t\t\tif h.ServerName == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"Cannot generate certs without TLS SNI (no server name was indicated)\")\n\t\t\t}\n\t\t\treturn certs.get(h.ServerName)\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif handler == nil {\n\t\thandler = http.DefaultServeMux\n\t}\n\n\treturn http.Serve(tl, handler)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Gogs Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"container\/list\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/gogits\/gogs\/modules\/log\"\n\n\t\"github.com\/gogits\/git\"\n\t\"github.com\/gogits\/gogs\/models\"\n\t\"github.com\/gogits\/gogs\/modules\/base\"\n)\n\nvar (\n\tCOMMANDS_READONLY = map[string]int{\n\t\t\"git-upload-pack\": models.AU_WRITABLE,\n\t\t\"git upload-pack\": models.AU_WRITABLE,\n\t\t\"git-upload-archive\": models.AU_WRITABLE,\n\t}\n\n\tCOMMANDS_WRITE = map[string]int{\n\t\t\"git-receive-pack\": models.AU_READABLE,\n\t\t\"git receive-pack\": models.AU_READABLE,\n\t}\n)\n\nvar CmdServ = cli.Command{\n\tName: \"serv\",\n\tUsage: \"This command just should be called by ssh shell\",\n\tDescription: `\ngogs serv provide access auth for repositories`,\n\tAction: runServ,\n\tFlags: []cli.Flag{},\n}\n\nfunc parseCmd(cmd string) (string, string) {\n\tss := strings.SplitN(cmd, \" \", 2)\n\tif len(ss) != 2 {\n\t\treturn \"\", \"\"\n\t}\n\n\tverb, args := ss[0], ss[1]\n\tif verb == \"git\" {\n\t\tss = strings.SplitN(args, \" \", 2)\n\t\targs = ss[1]\n\t\tverb = fmt.Sprintf(\"%s %s\", verb, ss[0])\n\t}\n\treturn verb, args\n}\n\nfunc In(b string, sl map[string]int) bool {\n\t_, e := sl[b]\n\treturn e\n}\n\nfunc runServ(k *cli.Context) {\n\tbase.NewConfigContext()\n\tmodels.LoadModelsConfig()\n\tmodels.NewEngine()\n\n\tkeys := strings.Split(os.Args[2], \"-\")\n\tif len(keys) != 2 {\n\t\tfmt.Println(\"auth file format error\")\n\t\treturn\n\t}\n\n\tkeyId, err := strconv.ParseInt(keys[1], 10, 64)\n\tif err != nil {\n\t\tfmt.Println(\"auth file format error\")\n\t\treturn\n\t}\n\tuser, err := models.GetUserByKeyId(keyId)\n\tif err != nil {\n\t\tfmt.Println(\"You have no right to access\")\n\t\treturn\n\t}\n\n\tcmd := os.Getenv(\"SSH_ORIGINAL_COMMAND\")\n\tif cmd == \"\" {\n\t\tprintln(\"Hi\", user.Name, \"! You've successfully authenticated, but Gogs does not provide shell access.\")\n\t\treturn\n\t}\n\n\tverb, args := parseCmd(cmd)\n\trRepo := strings.Trim(args, \"'\")\n\trr := strings.SplitN(rRepo, \"\/\", 2)\n\tif len(rr) != 2 {\n\t\tprintln(\"Unavilable repository\", args)\n\t\treturn\n\t}\n\trepoName := rr[1]\n\tif strings.HasSuffix(repoName, \".git\") {\n\t\trepoName = repoName[:len(repoName)-4]\n\t}\n\n\trepo, err := models.GetRepositoryByName(user.Id, repoName)\n\tvar isExist bool = true\n\tif err != nil {\n\t\tif err == models.ErrRepoNotExist {\n\t\t\tisExist = false\n\t\t} else {\n\t\t\tprintln(\"Unavilable repository\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tisWrite := In(verb, COMMANDS_WRITE)\n\tisRead := In(verb, COMMANDS_READONLY)\n\n\tswitch {\n\tcase isWrite:\n\t\thas, err := models.HasAccess(user.Name, repoName, models.AU_WRITABLE)\n\t\tif err != nil {\n\t\t\tprintln(\"Inernel error:\", err)\n\t\t\treturn\n\t\t}\n\t\tif !has {\n\t\t\tprintln(\"You have no right to write this repository\")\n\t\t\treturn\n\t\t}\n\tcase isRead:\n\t\thas, err := models.HasAccess(user.Name, repoName, models.AU_READABLE)\n\t\tif err != nil {\n\t\t\tprintln(\"Inernel error\")\n\t\t\treturn\n\t\t}\n\t\tif !has {\n\t\t\thas, err = models.HasAccess(user.Name, repoName, models.AU_WRITABLE)\n\t\t\tif err != nil {\n\t\t\t\tprintln(\"Inernel error\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif !has {\n\t\t\tprintln(\"You have no right to access this repository\")\n\t\t\treturn\n\t\t}\n\tdefault:\n\t\tprintln(\"Unknown command\")\n\t\treturn\n\t}\n\n\tif !isExist {\n\t\tif isRead {\n\t\t\tprintln(\"Repository\", user.Name+\"\/\"+repoName, \"is not exist\")\n\t\t\treturn\n\t\t} else if isWrite {\n\t\t\t_, err := models.CreateRepository(user, repoName, \"\", \"\", \"\", false, true)\n\t\t\tif err != nil {\n\t\t\t\tprintln(\"Create repository failed\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\trep, err := git.OpenRepository(models.RepoPath(user.Name, repoName))\n\tif err != nil {\n\t\tprintln(err.Error())\n\t\treturn\n\t}\n\n\trefs, err := rep.AllReferencesMap()\n\tif err != nil {\n\t\tprintln(err.Error())\n\t\treturn\n\t}\n\n\tgitcmd := exec.Command(verb, rRepo)\n\tgitcmd.Dir = base.RepoRootPath\n\n\tvar s string\n\tb := bytes.NewBufferString(s)\n\n\tgitcmd.Stdout = io.MultiWriter(os.Stdout, b)\n\t\/\/gitcmd.Stdin = io.MultiReader(os.Stdin, b)\n\tgitcmd.Stdin = os.Stdin\n\tgitcmd.Stderr = os.Stderr\n\n\tif err = gitcmd.Run(); err != nil {\n\t\tprintln(\"execute command error:\", err.Error())\n\t}\n\n\tif !strings.HasPrefix(cmd, \"git-receive-pack\") {\n\t\treturn\n\t}\n\n\t\/\/ update\n\t\/\/w, _ := os.Create(\"serve.log\")\n\t\/\/defer w.Close()\n\t\/\/log.SetOutput(w)\n\n\tvar t = \"ok refs\/heads\/\"\n\tvar i int\n\tvar refname string\n\tfor {\n\t\tl, err := b.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\ti = i + 1\n\t\tl = l[:len(l)-1]\n\t\tidx := strings.Index(l, t)\n\t\tif idx > 0 {\n\t\t\trefname = l[idx+len(t):]\n\t\t}\n\t}\n\tvar ref *git.Reference\n\tvar ok bool\n\n\tvar l *list.List\n\t\/\/log.Info(\"----\", refname, \"-----\")\n\tif ref, ok = refs[refname]; !ok {\n\t\trefs, err = rep.AllReferencesMap()\n\t\tif err != nil {\n\t\t\tprintln(err.Error())\n\t\t\treturn\n\t\t}\n\t\tif ref, ok = refs[refname]; !ok {\n\t\t\tprintln(\"unknow reference name -\", refname, \"-\")\n\t\t\treturn\n\t\t}\n\t\tl, err = ref.AllCommits()\n\t\tif err != nil {\n\t\t\tprintln(err.Error())\n\t\t\treturn\n\t\t}\n\t} else {\n\t\t\/\/log.Info(\"----\", ref, \"-----\")\n\t\tvar last *git.Commit\n\t\t\/\/log.Info(\"00000\", ref.Oid.String())\n\t\tlast, err = ref.LastCommit()\n\t\tif err != nil {\n\t\t\tprintln(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tref2, err := rep.LookupReference(ref.Name)\n\t\tif err != nil {\n\t\t\tprintln(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t\/\/log.Info(\"11111\", ref2.Oid.String())\n\t\tbefore, err := ref2.LastCommit()\n\t\tif err != nil {\n\t\t\tprintln(err.Error())\n\t\t\treturn\n\t\t}\n\t\t\/\/log.Info(\"----\", before.Id(), \"-----\", last.Id())\n\t\tl = ref.CommitsBetween(before, last)\n\t}\n\n\tcommits := make([][]string, 0)\n\tvar maxCommits = 3\n\tfor e := l.Back(); e != nil; e = e.Prev() {\n\t\tcommit := e.Value.(*git.Commit)\n\t\tcommits = append(commits, []string{commit.Id().String(), commit.Message()})\n\t\tif len(commits) >= maxCommits {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err = models.CommitRepoAction(user.Id, user.Name,\n\t\trepo.Id, repoName, refname, &models.PushCommits{l.Len(), commits}); err != nil {\n\t\tlog.Error(\"runUpdate.models.CommitRepoAction: %v\", err, commits)\n\t} else {\n\t\t\/\/log.Info(\"refname\", refname)\n\t\t\/\/log.Info(\"Listen: %v\", cmd)\n\t\t\/\/fmt.Println(\"...\", cmd)\n\n\t\t\/\/runUpdate(k)\n\t\tc := exec.Command(\"exec\", \"git\", \"update-server-info\")\n\t\tc.Run()\n\t}\n}\n<commit_msg>bug fixed<commit_after>\/\/ Copyright 2014 The Gogs Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"container\/list\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/gogits\/gogs\/modules\/log\"\n\n\t\"github.com\/gogits\/git\"\n\t\"github.com\/gogits\/gogs\/models\"\n\t\"github.com\/gogits\/gogs\/modules\/base\"\n)\n\nvar (\n\tCOMMANDS_READONLY = map[string]int{\n\t\t\"git-upload-pack\": models.AU_WRITABLE,\n\t\t\"git upload-pack\": models.AU_WRITABLE,\n\t\t\"git-upload-archive\": models.AU_WRITABLE,\n\t}\n\n\tCOMMANDS_WRITE = map[string]int{\n\t\t\"git-receive-pack\": models.AU_READABLE,\n\t\t\"git receive-pack\": models.AU_READABLE,\n\t}\n)\n\nvar CmdServ = cli.Command{\n\tName: \"serv\",\n\tUsage: \"This command just should be called by ssh shell\",\n\tDescription: `\ngogs serv provide access auth for repositories`,\n\tAction: runServ,\n\tFlags: []cli.Flag{},\n}\n\nfunc parseCmd(cmd string) (string, string) {\n\tss := strings.SplitN(cmd, \" \", 2)\n\tif len(ss) != 2 {\n\t\treturn \"\", \"\"\n\t}\n\n\tverb, args := ss[0], ss[1]\n\tif verb == \"git\" {\n\t\tss = strings.SplitN(args, \" \", 2)\n\t\targs = ss[1]\n\t\tverb = fmt.Sprintf(\"%s %s\", verb, ss[0])\n\t}\n\treturn verb, args\n}\n\nfunc In(b string, sl map[string]int) bool {\n\t_, e := sl[b]\n\treturn e\n}\n\nfunc runServ(k *cli.Context) {\n\tbase.NewConfigContext()\n\tmodels.LoadModelsConfig()\n\tmodels.NewEngine()\n\n\tkeys := strings.Split(os.Args[2], \"-\")\n\tif len(keys) != 2 {\n\t\tfmt.Println(\"auth file format error\")\n\t\treturn\n\t}\n\n\tkeyId, err := strconv.ParseInt(keys[1], 10, 64)\n\tif err != nil {\n\t\tfmt.Println(\"auth file format error\")\n\t\treturn\n\t}\n\tuser, err := models.GetUserByKeyId(keyId)\n\tif err != nil {\n\t\tfmt.Println(\"You have no right to access\")\n\t\treturn\n\t}\n\n\tcmd := os.Getenv(\"SSH_ORIGINAL_COMMAND\")\n\tif cmd == \"\" {\n\t\tprintln(\"Hi\", user.Name, \"! You've successfully authenticated, but Gogs does not provide shell access.\")\n\t\treturn\n\t}\n\n\tverb, args := parseCmd(cmd)\n\trRepo := strings.Trim(args, \"'\")\n\trr := strings.SplitN(rRepo, \"\/\", 2)\n\tif len(rr) != 2 {\n\t\tprintln(\"Unavilable repository\", args)\n\t\treturn\n\t}\n\trepoName := rr[1]\n\tif strings.HasSuffix(repoName, \".git\") {\n\t\trepoName = repoName[:len(repoName)-4]\n\t}\n\n\trepo, err := models.GetRepositoryByName(user.Id, repoName)\n\tvar isExist bool = true\n\tif err != nil {\n\t\tif err == models.ErrRepoNotExist {\n\t\t\tisExist = false\n\t\t} else {\n\t\t\tprintln(\"Unavilable repository\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tisWrite := In(verb, COMMANDS_WRITE)\n\tisRead := In(verb, COMMANDS_READONLY)\n\n\tswitch {\n\tcase isWrite:\n\t\thas, err := models.HasAccess(user.Name, repoName, models.AU_WRITABLE)\n\t\tif err != nil {\n\t\t\tprintln(\"Inernel error:\", err)\n\t\t\treturn\n\t\t}\n\t\tif !has {\n\t\t\tprintln(\"You have no right to write this repository\")\n\t\t\treturn\n\t\t}\n\tcase isRead:\n\t\thas, err := models.HasAccess(user.Name, repoName, models.AU_READABLE)\n\t\tif err != nil {\n\t\t\tprintln(\"Inernel error\")\n\t\t\treturn\n\t\t}\n\t\tif !has {\n\t\t\thas, err = models.HasAccess(user.Name, repoName, models.AU_WRITABLE)\n\t\t\tif err != nil {\n\t\t\t\tprintln(\"Inernel error\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif !has {\n\t\t\tprintln(\"You have no right to access this repository\")\n\t\t\treturn\n\t\t}\n\tdefault:\n\t\tprintln(\"Unknown command\")\n\t\treturn\n\t}\n\n\tif !isExist {\n\t\tif isRead {\n\t\t\tprintln(\"Repository\", user.Name+\"\/\"+repoName, \"is not exist\")\n\t\t\treturn\n\t\t} else if isWrite {\n\t\t\t_, err := models.CreateRepository(user, repoName, \"\", \"\", \"\", false, true)\n\t\t\tif err != nil {\n\t\t\t\tprintln(\"Create repository failed\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\trep, err := git.OpenRepository(models.RepoPath(user.Name, repoName))\n\tif err != nil {\n\t\tprintln(err.Error())\n\t\treturn\n\t}\n\n\trefs, err := rep.AllReferencesMap()\n\tif err != nil {\n\t\tprintln(err.Error())\n\t\treturn\n\t}\n\n\tgitcmd := exec.Command(verb, rRepo)\n\tgitcmd.Dir = base.RepoRootPath\n\n\tvar s string\n\tb := bytes.NewBufferString(s)\n\n\tgitcmd.Stdout = io.MultiWriter(os.Stdout, b)\n\t\/\/gitcmd.Stdin = io.MultiReader(os.Stdin, b)\n\tgitcmd.Stdin = os.Stdin\n\tgitcmd.Stderr = os.Stderr\n\n\tif err = gitcmd.Run(); err != nil {\n\t\tprintln(\"execute command error:\", err.Error())\n\t}\n\n\tif !strings.HasPrefix(cmd, \"git-receive-pack\") {\n\t\treturn\n\t}\n\n\t\/\/ update\n\t\/\/w, _ := os.Create(\"serve.log\")\n\t\/\/defer w.Close()\n\t\/\/log.SetOutput(w)\n\n\tvar t = \"ok refs\/heads\/\"\n\tvar i int\n\tvar refname string\n\tfor {\n\t\tl, err := b.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\ti = i + 1\n\t\tl = l[:len(l)-1]\n\t\tidx := strings.Index(l, t)\n\t\tif idx > 0 {\n\t\t\trefname = l[idx+len(t):]\n\t\t}\n\t}\n\tvar ref *git.Reference\n\tvar ok bool\n\n\tvar l *list.List\n\t\/\/log.Info(\"----\", refname, \"-----\")\n\tif ref, ok = refs[refname]; !ok {\n\t\trefs, err = rep.AllReferencesMap()\n\t\tif err != nil {\n\t\t\tprintln(err.Error())\n\t\t\treturn\n\t\t}\n\t\tif ref, ok = refs[refname]; !ok {\n\t\t\tprintln(\"unknow reference name -\", refname, \"-\")\n\t\t\treturn\n\t\t}\n\t\tl, err = ref.AllCommits()\n\t\tif err != nil {\n\t\t\tprintln(err.Error())\n\t\t\treturn\n\t\t}\n\t} else {\n\t\t\/\/log.Info(\"----\", ref, \"-----\")\n\t\tvar last *git.Commit\n\t\t\/\/log.Info(\"00000\", ref.Oid.String())\n\t\tlast, err = ref.LastCommit()\n\t\tif err != nil {\n\t\t\tprintln(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tref2, err := rep.LookupReference(ref.Name)\n\t\tif err != nil {\n\t\t\tprintln(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t\/\/log.Info(\"11111\", ref2.Oid.String())\n\t\tbefore, err := ref2.LastCommit()\n\t\tif err != nil {\n\t\t\tprintln(err.Error())\n\t\t\treturn\n\t\t}\n\t\t\/\/log.Info(\"----\", before.Id(), \"-----\", last.Id())\n\t\tl = ref.CommitsBetween(before, last)\n\t}\n\n\tcommits := make([][]string, 0)\n\tvar maxCommits = 3\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\tcommit := e.Value.(*git.Commit)\n\t\tcommits = append(commits, []string{commit.Id().String(), commit.Message()})\n\t\tif len(commits) >= maxCommits {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err = models.CommitRepoAction(user.Id, user.Name,\n\t\trepo.Id, repoName, refname, &models.PushCommits{l.Len(), commits}); err != nil {\n\t\tlog.Error(\"runUpdate.models.CommitRepoAction: %v\", err, commits)\n\t} else {\n\t\t\/\/log.Info(\"refname\", refname)\n\t\t\/\/log.Info(\"Listen: %v\", cmd)\n\t\t\/\/fmt.Println(\"...\", cmd)\n\n\t\t\/\/runUpdate(k)\n\t\tc := exec.Command(\"exec\", \"git\", \"update-server-info\")\n\t\tc.Run()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package dskvs\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar kvCount int64\n\nconst (\n\tcoll = \"games\"\n\tbaseKey = \"total annihilation #\"\n\tgoroutines = 1\n\tbyteSize = 100\n)\n\nfunc init() {\n\tflag.Parse()\n\tif testing.Short() {\n\t\tkvCount = 2048 \/\/ To be runable with race detector\n\t} else {\n\t\tkvCount = 10000\n\t}\n\tlog.Printf(\"Concurrency Benchmark - Goroutines=%d, unique key-value=%d\\n\", goroutines, kvCount)\n}\n\ntype keyValue struct {\n\tKey string\n\tValue []byte\n}\n\ntype Context struct {\n\tt *testing.T\n\ts *Store\n\tkv keyValue\n\twg *sync.WaitGroup\n\tsem chan int\n\tdur chan time.Duration\n\terrors chan error\n}\n\nfunc TestOneOperationWithMultipleConcurrentRequest(t *testing.T) {\n\n\tlog.Printf(\"Sequence of %d bytes operations by group, \"+\n\t\t\"%d concurrent request in each groups\", byteSize, goroutines)\n\n\tstore := setUp(t)\n\tdefer tearDown(store, t)\n\tdefer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))\n\n\texpectedList := generateKeyValueList(kvCount, t)\n\n\tlog.Println(\"Put - first time\")\n\tputStats := runTest(doPutRequest, 1, goroutines, store, expectedList, t)\n\tlog.Println(putStats.String())\n\n\tlog.Println(\"Put - rewrite\")\n\trePutStats := runTest(doPutRequest, 1, goroutines, store, expectedList, t)\n\tlog.Println(rePutStats.String())\n\n\tlog.Printf(\"Get\")\n\tgetStats := runTest(doGetRequest, 1, goroutines, store, expectedList, t)\n\tlog.Println(getStats.String())\n\n\tlog.Printf(\"Delete\")\n\tdeleteStats := runTest(doDeleteRequest, 1, goroutines, store, expectedList, t)\n\tlog.Println(deleteStats.String())\n\n\tlog.Printf(\"by %d cpus, using %d concurrent goroutines\\n\",\n\t\truntime.NumCPU(), goroutines)\n\n}\n\nfunc TestManyOperationWithMultipleConcurrentRequest(t *testing.T) {\n\n\tstore := setUp(t)\n\tdefer tearDown(store, t)\n\tdefer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))\n\n\texpectedList := generateKeyValueList(kvCount\/3, t)\n\n\t\/\/ Start writing\/reading concurrently, in random order\n\tconcurrentFunc := func(ctx Context) {\n\t\tswitch rand.Intn(5) {\n\t\tcase 0:\n\t\t\tdoFailGetRequest(ctx)\n\t\t\tdoPutRequest(ctx)\n\t\t\tdoDeleteRequest(ctx)\n\t\tcase 1:\n\t\t\tdoPutRequest(ctx)\n\t\t\tdoGetRequest(ctx)\n\t\t\tdoDeleteRequest(ctx)\n\t\tcase 2:\n\t\t\tdoPutRequest(ctx)\n\t\t\tdoGetRequest(ctx)\n\t\t\tdoDeleteRequest(ctx)\n\t\tcase 3:\n\t\t\tdoPutRequest(ctx)\n\t\t\tdoDeleteRequest(ctx)\n\t\t\tdoFailGetRequest(ctx)\n\t\tcase 4:\n\t\t\tdoFailGetRequest(ctx)\n\t\t\tdoPutRequest(ctx)\n\t\t\tdoGetRequest(ctx)\n\t\t}\n\t}\n\t_ = runTest(concurrentFunc, 3, goroutines, store, expectedList, t)\n\n\t\/\/ Cleanup\n\terr := store.DeleteAll(coll)\n\tif err != nil {\n\t\tt.Fatalf(\"Error deleting all\", err)\n\t}\n\tfor _, kv := range expectedList {\n\t\tcheckGetIsEmpty(store, kv.Key, t)\n\t}\n\n}\n\nfunc TestConcurrentPutCanBeGetAllAndDeleteAll(t *testing.T) {\n\n\tstore := setUp(t)\n\tdefer tearDown(store, t)\n\tdefer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))\n\n\texpectedList := generateKeyValueList(kvCount, t)\n\n\t\/\/ Don't care about the stats\n\t_ = runTest(doPutRequest, 1, 100, store, expectedList, t)\n\n\tactual, err := store.GetAll(coll)\n\tif err != nil {\n\t\tt.Errorf(\"Error on GetAll(%s), %v\", coll, err)\n\t} else if len(actual) != len(expectedList) {\n\t\tt.Errorf(\"Expected len(store.GetAll)=<%s> but was <%s>\",\n\t\t\tlen(expectedList),\n\t\t\tlen(actual))\n\t}\n\n\terr = store.DeleteAll(coll)\n\tif err != nil {\n\t\tt.Fatalf(\"Error deleting all\", err)\n\t}\n\tfor _, kv := range expectedList {\n\t\tcheckGetIsEmpty(store, kv.Key, t)\n\t}\n\n}\n\nfunc runTest(\n\ttestedFunc func(Context),\n\tnGo int,\n\tgoroutines int,\n\tstore *Store,\n\texpectedList []keyValue,\n\tt *testing.T,\n) stats {\n\n\tcErr := make(chan error)\n\tvar wg sync.WaitGroup\n\tdurations := make(chan time.Duration, len(expectedList)*nGo)\n\n\tsem := make(chan int, goroutines)\n\tfor _, kv := range expectedList {\n\t\twg.Add(nGo)\n\t\tctx := Context{\n\t\t\tt: t,\n\t\t\ts: store,\n\t\t\tkv: kv,\n\t\t\twg: &wg,\n\t\t\tsem: sem,\n\t\t\tdur: durations,\n\t\t\terrors: cErr,\n\t\t}\n\t\tgo testedFunc(ctx)\n\n\t}\n\n\twg.Wait()\n\tclose(durations)\n\n\tif len(cErr) != 0 {\n\t\tt.Fatalf(\"Failed to write values concurrently, got %d errors\",\n\t\t\tlen(cErr))\n\t}\n\n\tvar dTList []time.Duration\n\tfor dT := range durations {\n\t\tdTList = append(dTList, dT)\n\t}\n\n\treturn newStats(dTList)\n}\n\nfunc doPutRequest(ctx Context) {\n\tctx.sem <- 1\n\tt0 := time.Now()\n\terr := ctx.s.Put(ctx.kv.Key, ctx.kv.Value)\n\tdT := time.Since(t0)\n\n\tctx.dur <- dT\n\n\tif err != nil {\n\t\tctx.t.Fatal(\"Received an error\", err)\n\t\tctx.errors <- err\n\t}\n\tctx.wg.Done()\n\t<-ctx.sem\n}\n\nfunc doGetRequest(ctx Context) {\n\tctx.sem <- 1\n\texpected := ctx.kv.Value\n\n\tt0 := time.Now()\n\tactual, err := ctx.s.Get(ctx.kv.Key)\n\tdT := time.Since(t0)\n\n\tctx.dur <- dT\n\n\tif err != nil {\n\t\tctx.errors <- err\n\t} else if !bytes.Equal(expected, actual) {\n\t\tctx.t.Errorf(\"Expected <%s> but was <%s>\",\n\t\t\texpected,\n\t\t\tactual)\n\t}\n\tctx.wg.Done()\n\t<-ctx.sem\n}\n\n\/\/ Get can fail when there's no such key, not true for Put and Delete\nfunc doFailGetRequest(ctx Context) {\n\tctx.sem <- 1\n\tt0 := time.Now()\n\t_, err := ctx.s.Get(ctx.kv.Key)\n\tdT := time.Since(t0)\n\n\tctx.dur <- dT\n\n\tif _, ok := err.(KeyError); !ok {\n\t\tctx.errors <- errors.New(fmt.Sprintf(\"Should have failed on Get(%s)\", ctx.kv.Key))\n\t}\n\tctx.wg.Done()\n\t<-ctx.sem\n}\n\nfunc doDeleteRequest(ctx Context) {\n\tctx.sem <- 1\n\tt0 := time.Now()\n\terr := ctx.s.Delete(ctx.kv.Key)\n\tdT := time.Since(t0)\n\n\tctx.dur <- dT\n\n\tif err != nil {\n\t\tctx.t.Fatal(\"Received an error\", err)\n\t\tctx.errors <- err\n\t}\n\tctx.wg.Done()\n\t<-ctx.sem\n}\n\nfunc generateKeyValueList(kvCount int64, t *testing.T) []keyValue {\n\n\tcoll := \"games\"\n\tbaseKey := \"total annihilation #\"\n\n\tkvList := make([]keyValue, kvCount)\n\tfor i := int64(0); i < kvCount; i++ {\n\t\tkey := coll + CollKeySep + baseKey + strconv.FormatInt(i, 10)\n\t\tdata := make([]byte, byteSize)\n\t\t\/\/data := generateData(Data{\"It's fun!\"}, t)\n\t\tkv := keyValue{key, data}\n\t\tkvList[i] = kv\n\t}\n\n\treturn kvList\n}\n<commit_msg>Set test to exercise concurrency, not benchmark.<commit_after>package dskvs\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar kvCount int64\n\nconst (\n\tcoll = \"games\"\n\tbaseKey = \"total annihilation #\"\n\tgoroutines = 10\n\tbyteSize = 1000\n)\n\nfunc init() {\n\tflag.Parse()\n\tif testing.Short() {\n\t\tkvCount = 2048 \/\/ To be runable with race detector\n\t} else {\n\t\tkvCount = 100000\n\t}\n\tlog.Printf(\"Concurrency Benchmark - Goroutines=%d, unique key-value=%d\\n\", goroutines, kvCount)\n}\n\ntype keyValue struct {\n\tKey string\n\tValue []byte\n}\n\ntype Context struct {\n\tt *testing.T\n\ts *Store\n\tkv keyValue\n\twg *sync.WaitGroup\n\tsem chan int\n\tdur chan time.Duration\n\terrors chan error\n}\n\nfunc TestOneOperationWithMultipleConcurrentRequest(t *testing.T) {\n\n\tlog.Printf(\"Sequence of %d bytes operations by group, \"+\n\t\t\"%d concurrent request in each groups\", byteSize, goroutines)\n\n\tstore := setUp(t)\n\tdefer tearDown(store, t)\n\tdefer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))\n\n\texpectedList := generateKeyValueList(kvCount, t)\n\n\tlog.Println(\"Put - first time\")\n\tputStats := runTest(doPutRequest, 1, goroutines, store, expectedList, t)\n\tlog.Println(putStats.String())\n\n\tlog.Println(\"Put - rewrite\")\n\trePutStats := runTest(doPutRequest, 1, goroutines, store, expectedList, t)\n\tlog.Println(rePutStats.String())\n\n\tlog.Printf(\"Get\")\n\tgetStats := runTest(doGetRequest, 1, goroutines, store, expectedList, t)\n\tlog.Println(getStats.String())\n\n\tlog.Printf(\"Delete\")\n\tdeleteStats := runTest(doDeleteRequest, 1, goroutines, store, expectedList, t)\n\tlog.Println(deleteStats.String())\n\n\tlog.Printf(\"by %d cpus, using %d concurrent goroutines\\n\",\n\t\truntime.NumCPU(), goroutines)\n\n}\n\nfunc TestManyOperationWithMultipleConcurrentRequest(t *testing.T) {\n\n\tstore := setUp(t)\n\tdefer tearDown(store, t)\n\tdefer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))\n\n\texpectedList := generateKeyValueList(kvCount\/3, t)\n\n\t\/\/ Start writing\/reading concurrently, in random order\n\tconcurrentFunc := func(ctx Context) {\n\t\tswitch rand.Intn(5) {\n\t\tcase 0:\n\t\t\tdoFailGetRequest(ctx)\n\t\t\tdoPutRequest(ctx)\n\t\t\tdoDeleteRequest(ctx)\n\t\tcase 1:\n\t\t\tdoPutRequest(ctx)\n\t\t\tdoGetRequest(ctx)\n\t\t\tdoDeleteRequest(ctx)\n\t\tcase 2:\n\t\t\tdoPutRequest(ctx)\n\t\t\tdoGetRequest(ctx)\n\t\t\tdoDeleteRequest(ctx)\n\t\tcase 3:\n\t\t\tdoPutRequest(ctx)\n\t\t\tdoDeleteRequest(ctx)\n\t\t\tdoFailGetRequest(ctx)\n\t\tcase 4:\n\t\t\tdoFailGetRequest(ctx)\n\t\t\tdoPutRequest(ctx)\n\t\t\tdoGetRequest(ctx)\n\t\t}\n\t}\n\t_ = runTest(concurrentFunc, 3, goroutines, store, expectedList, t)\n\n\t\/\/ Cleanup\n\terr := store.DeleteAll(coll)\n\tif err != nil {\n\t\tt.Fatalf(\"Error deleting all\", err)\n\t}\n\tfor _, kv := range expectedList {\n\t\tcheckGetIsEmpty(store, kv.Key, t)\n\t}\n\n}\n\nfunc TestConcurrentPutCanBeGetAllAndDeleteAll(t *testing.T) {\n\n\tstore := setUp(t)\n\tdefer tearDown(store, t)\n\tdefer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))\n\n\texpectedList := generateKeyValueList(kvCount, t)\n\n\t\/\/ Don't care about the stats\n\t_ = runTest(doPutRequest, 1, 100, store, expectedList, t)\n\n\tactual, err := store.GetAll(coll)\n\tif err != nil {\n\t\tt.Errorf(\"Error on GetAll(%s), %v\", coll, err)\n\t} else if len(actual) != len(expectedList) {\n\t\tt.Errorf(\"Expected len(store.GetAll)=<%s> but was <%s>\",\n\t\t\tlen(expectedList),\n\t\t\tlen(actual))\n\t}\n\n\terr = store.DeleteAll(coll)\n\tif err != nil {\n\t\tt.Fatalf(\"Error deleting all\", err)\n\t}\n\tfor _, kv := range expectedList {\n\t\tcheckGetIsEmpty(store, kv.Key, t)\n\t}\n\n}\n\nfunc runTest(\n\ttestedFunc func(Context),\n\tnGo int,\n\tgoroutines int,\n\tstore *Store,\n\texpectedList []keyValue,\n\tt *testing.T,\n) stats {\n\n\tcErr := make(chan error)\n\tvar wg sync.WaitGroup\n\tdurations := make(chan time.Duration, len(expectedList)*nGo)\n\n\tsem := make(chan int, goroutines)\n\tfor _, kv := range expectedList {\n\t\twg.Add(nGo)\n\t\tctx := Context{\n\t\t\tt: t,\n\t\t\ts: store,\n\t\t\tkv: kv,\n\t\t\twg: &wg,\n\t\t\tsem: sem,\n\t\t\tdur: durations,\n\t\t\terrors: cErr,\n\t\t}\n\t\tgo testedFunc(ctx)\n\n\t}\n\n\twg.Wait()\n\tclose(durations)\n\n\tif len(cErr) != 0 {\n\t\tt.Fatalf(\"Failed to write values concurrently, got %d errors\",\n\t\t\tlen(cErr))\n\t}\n\n\tvar dTList []time.Duration\n\tfor dT := range durations {\n\t\tdTList = append(dTList, dT)\n\t}\n\n\treturn newStats(dTList)\n}\n\nfunc doPutRequest(ctx Context) {\n\tctx.sem <- 1\n\tt0 := time.Now()\n\terr := ctx.s.Put(ctx.kv.Key, ctx.kv.Value)\n\tdT := time.Since(t0)\n\n\tctx.dur <- dT\n\n\tif err != nil {\n\t\tctx.t.Fatal(\"Received an error\", err)\n\t\tctx.errors <- err\n\t}\n\tctx.wg.Done()\n\t<-ctx.sem\n}\n\nfunc doGetRequest(ctx Context) {\n\tctx.sem <- 1\n\texpected := ctx.kv.Value\n\n\tt0 := time.Now()\n\tactual, err := ctx.s.Get(ctx.kv.Key)\n\tdT := time.Since(t0)\n\n\tctx.dur <- dT\n\n\tif err != nil {\n\t\tctx.errors <- err\n\t} else if !bytes.Equal(expected, actual) {\n\t\tctx.t.Errorf(\"Expected <%s> but was <%s>\",\n\t\t\texpected,\n\t\t\tactual)\n\t}\n\tctx.wg.Done()\n\t<-ctx.sem\n}\n\n\/\/ Get can fail when there's no such key, not true for Put and Delete\nfunc doFailGetRequest(ctx Context) {\n\tctx.sem <- 1\n\tt0 := time.Now()\n\t_, err := ctx.s.Get(ctx.kv.Key)\n\tdT := time.Since(t0)\n\n\tctx.dur <- dT\n\n\tif _, ok := err.(KeyError); !ok {\n\t\tctx.errors <- errors.New(fmt.Sprintf(\"Should have failed on Get(%s)\", ctx.kv.Key))\n\t}\n\tctx.wg.Done()\n\t<-ctx.sem\n}\n\nfunc doDeleteRequest(ctx Context) {\n\tctx.sem <- 1\n\tt0 := time.Now()\n\terr := ctx.s.Delete(ctx.kv.Key)\n\tdT := time.Since(t0)\n\n\tctx.dur <- dT\n\n\tif err != nil {\n\t\tctx.t.Fatal(\"Received an error\", err)\n\t\tctx.errors <- err\n\t}\n\tctx.wg.Done()\n\t<-ctx.sem\n}\n\nfunc generateKeyValueList(kvCount int64, t *testing.T) []keyValue {\n\n\tcoll := \"games\"\n\tbaseKey := \"total annihilation #\"\n\n\tkvList := make([]keyValue, kvCount)\n\tfor i := int64(0); i < kvCount; i++ {\n\t\tkey := coll + CollKeySep + baseKey + strconv.FormatInt(i, 10)\n\t\tdata := make([]byte, byteSize)\n\t\t\/\/data := generateData(Data{\"It's fun!\"}, t)\n\t\tkv := keyValue{key, data}\n\t\tkvList[i] = kv\n\t}\n\n\treturn kvList\n}\n<|endoftext|>"} {"text":"<commit_before>package openstack_test\n\nimport (\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"io\"\n\t. \"launchpad.net\/gocheck\"\n\t\"launchpad.net\/goose\/client\"\n\t\"launchpad.net\/goose\/identity\"\n\t\"launchpad.net\/juju-core\/environs\"\n\t\"launchpad.net\/juju-core\/environs\/jujutest\"\n\t\"launchpad.net\/juju-core\/environs\/openstack\"\n\tenvtesting \"launchpad.net\/juju-core\/environs\/testing\"\n\tcoretesting \"launchpad.net\/juju-core\/testing\"\n)\n\n\/\/ generate a different bucket name for each config instance, so that\n\/\/ we are not polluted by previous test state.\nfunc randomName() string {\n\tbuf := make([]byte, 8)\n\t_, err := io.ReadFull(rand.Reader, buf)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"error from crypto rand: %v\", err))\n\t}\n\treturn fmt.Sprintf(\"%x\", buf)\n}\n\nfunc makeTestConfig(cred *identity.Credentials) map[string]interface{} {\n\t\/\/ The following attributes hold the environment configuration\n\t\/\/ for running the OpenStack integration tests.\n\t\/\/\n\t\/\/ This is missing keys for security reasons; set the following\n\t\/\/ environment variables to make the OpenStack testing work:\n\t\/\/ access-key: $OS_USERNAME\n\t\/\/ secret-key: $OS_PASSWORD\n\t\/\/\n\tattrs := map[string]interface{}{\n\t\t\"name\": \"sample-\" + randomName(),\n\t\t\"type\": \"openstack\",\n\t\t\"auth-mode\": \"userpass\",\n\t\t\"control-bucket\": \"juju-test-\" + randomName(),\n\t\t\"ca-cert\": coretesting.CACert,\n\t\t\"ca-private-key\": coretesting.CAKey,\n\t\t\"authorized-keys\": \"fakekey\",\n\t\t\"admin-secret\": \"secret\",\n\t\t\"username\": cred.User,\n\t\t\"password\": cred.Secrets,\n\t\t\"region\": cred.Region,\n\t\t\"auth-url\": cred.URL,\n\t\t\"tenant-name\": cred.TenantName,\n\t}\n\treturn attrs\n}\n\n\/\/ Register tests to run against a real Openstack instance.\nfunc registerLiveTests(cred *identity.Credentials, testImageDetails openstack.ImageDetails) {\n\tconfig := makeTestConfig(cred)\n\tconfig[\"default-image-id\"] = testImageDetails.ImageId\n\tconfig[\"default-instance-type\"] = testImageDetails.Flavor\n\tSuite(&LiveTests{\n\t\tcred: cred,\n\t\tLiveTests: jujutest.LiveTests{\n\t\t\tTestConfig: jujutest.TestConfig{config},\n\t\t\tAttempt: *openstack.ShortAttempt,\n\t\t\t\/\/ TODO: Bug #1133263, once the infrastructure is set up,\n\t\t\t\/\/ enable The state tests on openstack\n\t\t\tCanOpenState: false,\n\t\t\t\/\/ TODO: Bug #1133272, enabling this requires mapping from\n\t\t\t\/\/ 'series' to an image id, when we have support, set\n\t\t\t\/\/ this flag to True.\n\t\t\tHasProvisioner: false,\n\t\t},\n\t})\n}\n\n\/\/ LiveTests contains tests that can be run against OpenStack deployments.\n\/\/ The deployment can be a real live instance or service doubles.\n\/\/ Each test runs using the same connection.\ntype LiveTests struct {\n\tcoretesting.LoggingSuite\n\tjujutest.LiveTests\n\tcred *identity.Credentials\n\twriteablePublicStorage environs.Storage\n}\n\nfunc (t *LiveTests) SetUpSuite(c *C) {\n\tt.LoggingSuite.SetUpSuite(c)\n\t\/\/ Update some Config items now that we have services running.\n\t\/\/ This is setting the public-bucket-url and auth-url because that\n\t\/\/ information is set during startup of the localLiveSuite\n\tcl := client.NewClient(t.cred, identity.AuthUserPass, nil)\n\terr := cl.Authenticate()\n\tc.Assert(err, IsNil)\n\tpublicBucketURL, err := cl.MakeServiceURL(\"object-store\", nil)\n\tc.Assert(err, IsNil)\n\tt.TestConfig.UpdateConfig(map[string]interface{}{\n\t\t\"public-bucket-url\": publicBucketURL,\n\t\t\"auth-url\": t.cred.URL,\n\t})\n\tt.LiveTests.SetUpSuite(c)\n\t\/\/ Environ.PublicStorage() is read only.\n\t\/\/ For testing, we create a specific storage instance which is authorised to write to\n\t\/\/ the public storage bucket so that we can upload files for testing.\n\tt.writeablePublicStorage = openstack.WritablePublicStorage(t.Env)\n\t\/\/ Put some fake tools in place so that tests that are simply\n\t\/\/ starting instances without any need to check if those instances\n\t\/\/ are running will find them in the public bucket.\n\tenvtesting.UploadFakeTools(c, t.writeablePublicStorage)\n}\n\nfunc (t *LiveTests) TearDownSuite(c *C) {\n\tif t.Env == nil {\n\t\t\/\/ This can happen if SetUpSuite fails.\n\t\treturn\n\t}\n\tif t.writeablePublicStorage != nil {\n\t\tenvtesting.RemoveFakeTools(c, t.writeablePublicStorage)\n\t}\n\tt.LiveTests.TearDownSuite(c)\n\tt.LoggingSuite.TearDownSuite(c)\n}\n\nfunc (t *LiveTests) SetUpTest(c *C) {\n\tt.LoggingSuite.SetUpTest(c)\n\tt.LiveTests.SetUpTest(c)\n}\n\nfunc (t *LiveTests) TearDownTest(c *C) {\n\tt.LiveTests.TearDownTest(c)\n\tt.LoggingSuite.TearDownTest(c)\n}\n<commit_msg>Enable extra live tests for openstack<commit_after>package openstack_test\n\nimport (\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"io\"\n\t. \"launchpad.net\/gocheck\"\n\t\"launchpad.net\/goose\/client\"\n\t\"launchpad.net\/goose\/identity\"\n\t\"launchpad.net\/juju-core\/environs\"\n\t\"launchpad.net\/juju-core\/environs\/jujutest\"\n\t\"launchpad.net\/juju-core\/environs\/openstack\"\n\tenvtesting \"launchpad.net\/juju-core\/environs\/testing\"\n\tcoretesting \"launchpad.net\/juju-core\/testing\"\n)\n\n\/\/ generate a different bucket name for each config instance, so that\n\/\/ we are not polluted by previous test state.\nfunc randomName() string {\n\tbuf := make([]byte, 8)\n\t_, err := io.ReadFull(rand.Reader, buf)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"error from crypto rand: %v\", err))\n\t}\n\treturn fmt.Sprintf(\"%x\", buf)\n}\n\nfunc makeTestConfig(cred *identity.Credentials) map[string]interface{} {\n\t\/\/ The following attributes hold the environment configuration\n\t\/\/ for running the OpenStack integration tests.\n\t\/\/\n\t\/\/ This is missing keys for security reasons; set the following\n\t\/\/ environment variables to make the OpenStack testing work:\n\t\/\/ access-key: $OS_USERNAME\n\t\/\/ secret-key: $OS_PASSWORD\n\t\/\/\n\tattrs := map[string]interface{}{\n\t\t\"name\": \"sample-\" + randomName(),\n\t\t\"type\": \"openstack\",\n\t\t\"auth-mode\": \"userpass\",\n\t\t\"control-bucket\": \"juju-test-\" + randomName(),\n\t\t\"ca-cert\": coretesting.CACert,\n\t\t\"ca-private-key\": coretesting.CAKey,\n\t\t\"authorized-keys\": \"fakekey\",\n\t\t\"admin-secret\": \"secret\",\n\t\t\"username\": cred.User,\n\t\t\"password\": cred.Secrets,\n\t\t\"region\": cred.Region,\n\t\t\"auth-url\": cred.URL,\n\t\t\"tenant-name\": cred.TenantName,\n\t}\n\treturn attrs\n}\n\n\/\/ Register tests to run against a real Openstack instance.\nfunc registerLiveTests(cred *identity.Credentials, testImageDetails openstack.ImageDetails) {\n\tconfig := makeTestConfig(cred)\n\tconfig[\"default-image-id\"] = testImageDetails.ImageId\n\tconfig[\"default-instance-type\"] = testImageDetails.Flavor\n\tSuite(&LiveTests{\n\t\tcred: cred,\n\t\tLiveTests: jujutest.LiveTests{\n\t\t\tTestConfig: jujutest.TestConfig{config},\n\t\t\tAttempt: *openstack.ShortAttempt,\n\t\t\tCanOpenState: true,\n\t\t\tHasProvisioner: true,\n\t\t},\n\t})\n}\n\n\/\/ LiveTests contains tests that can be run against OpenStack deployments.\n\/\/ The deployment can be a real live instance or service doubles.\n\/\/ Each test runs using the same connection.\ntype LiveTests struct {\n\tcoretesting.LoggingSuite\n\tjujutest.LiveTests\n\tcred *identity.Credentials\n\twriteablePublicStorage environs.Storage\n}\n\nfunc (t *LiveTests) SetUpSuite(c *C) {\n\tt.LoggingSuite.SetUpSuite(c)\n\t\/\/ Update some Config items now that we have services running.\n\t\/\/ This is setting the public-bucket-url and auth-url because that\n\t\/\/ information is set during startup of the localLiveSuite\n\tcl := client.NewClient(t.cred, identity.AuthUserPass, nil)\n\terr := cl.Authenticate()\n\tc.Assert(err, IsNil)\n\tpublicBucketURL, err := cl.MakeServiceURL(\"object-store\", nil)\n\tc.Assert(err, IsNil)\n\tt.TestConfig.UpdateConfig(map[string]interface{}{\n\t\t\"public-bucket-url\": publicBucketURL,\n\t\t\"auth-url\": t.cred.URL,\n\t})\n\tt.LiveTests.SetUpSuite(c)\n\t\/\/ Environ.PublicStorage() is read only.\n\t\/\/ For testing, we create a specific storage instance which is authorised to write to\n\t\/\/ the public storage bucket so that we can upload files for testing.\n\tt.writeablePublicStorage = openstack.WritablePublicStorage(t.Env)\n\t\/\/ Put some fake tools in place so that tests that are simply\n\t\/\/ starting instances without any need to check if those instances\n\t\/\/ are running will find them in the public bucket.\n\tenvtesting.UploadFakeTools(c, t.writeablePublicStorage)\n}\n\nfunc (t *LiveTests) TearDownSuite(c *C) {\n\tif t.Env == nil {\n\t\t\/\/ This can happen if SetUpSuite fails.\n\t\treturn\n\t}\n\tif t.writeablePublicStorage != nil {\n\t\tenvtesting.RemoveFakeTools(c, t.writeablePublicStorage)\n\t}\n\tt.LiveTests.TearDownSuite(c)\n\tt.LoggingSuite.TearDownSuite(c)\n}\n\nfunc (t *LiveTests) SetUpTest(c *C) {\n\tt.LoggingSuite.SetUpTest(c)\n\tt.LiveTests.SetUpTest(c)\n}\n\nfunc (t *LiveTests) TearDownTest(c *C) {\n\tt.LiveTests.TearDownTest(c)\n\tt.LoggingSuite.TearDownTest(c)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage tls\n\nimport (\n\t\"crypto\"\n\t\"crypto\/md5\"\n\t\"crypto\/rsa\"\n\t\"crypto\/sha1\"\n\t\"crypto\/x509\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n)\n\n\/\/ a keyAgreement implements the client and server side of a TLS key agreement\n\/\/ protocol by generating and processing key exchange messages.\ntype keyAgreement interface {\n\t\/\/ On the server side, the first two methods are called in order.\n\n\t\/\/ In the case that the key agreement protocol doesn't use a\n\t\/\/ ServerKeyExchange message, generateServerKeyExchange can return nil,\n\t\/\/ nil.\n\tgenerateServerKeyExchange(*Config, *Certificate, *clientHelloMsg, *serverHelloMsg) (*serverKeyExchangeMsg, error)\n\tprocessClientKeyExchange(*Config, *Certificate, *clientKeyExchangeMsg, uint16) ([]byte, error)\n\n\t\/\/ On the client side, the next two methods are called in order.\n\n\t\/\/ This method may not be called if the server doesn't send a\n\t\/\/ ServerKeyExchange message.\n\tprocessServerKeyExchange(*Config, *clientHelloMsg, *serverHelloMsg, *x509.Certificate, *serverKeyExchangeMsg) error\n\tgenerateClientKeyExchange(*Config, *clientHelloMsg, *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error)\n}\n\nvar errClientKeyExchange = errors.New(\"tls: invalid ClientKeyExchange message\")\nvar errServerKeyExchange = errors.New(\"tls: invalid ServerKeyExchange message\")\n\n\/\/ rsaKeyAgreement implements the standard TLS key agreement where the client\n\/\/ encrypts the pre-master secret to the server's public key.\ntype rsaKeyAgreement struct{}\n\nfunc (ka rsaKeyAgreement) generateServerKeyExchange(config *Config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) {\n\treturn nil, nil\n}\n\nfunc (ka rsaKeyAgreement) processClientKeyExchange(config *Config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) {\n\tif len(ckx.ciphertext) < 2 {\n\t\treturn nil, errClientKeyExchange\n\t}\n\tciphertextLen := int(ckx.ciphertext[0])<<8 | int(ckx.ciphertext[1])\n\tif ciphertextLen != len(ckx.ciphertext)-2 {\n\t\treturn nil, errClientKeyExchange\n\t}\n\tciphertext := ckx.ciphertext[2:]\n\n\tpriv, ok := cert.PrivateKey.(crypto.Decrypter)\n\tif !ok {\n\t\treturn nil, errors.New(\"tls: certificate private key does not implement crypto.Decrypter\")\n\t}\n\t\/\/ Perform constant time RSA PKCS #1 v1.5 decryption\n\tpreMasterSecret, err := priv.Decrypt(config.rand(), ciphertext, &rsa.PKCS1v15DecryptOptions{SessionKeyLen: 48})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ We don't check the version number in the premaster secret. For one,\n\t\/\/ by checking it, we would leak information about the validity of the\n\t\/\/ encrypted pre-master secret. Secondly, it provides only a small\n\t\/\/ benefit against a downgrade attack and some implementations send the\n\t\/\/ wrong version anyway. See the discussion at the end of section\n\t\/\/ 7.4.7.1 of RFC 4346.\n\treturn preMasterSecret, nil\n}\n\nfunc (ka rsaKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error {\n\treturn errors.New(\"tls: unexpected ServerKeyExchange\")\n}\n\nfunc (ka rsaKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) {\n\tpreMasterSecret := make([]byte, 48)\n\tpreMasterSecret[0] = byte(clientHello.vers >> 8)\n\tpreMasterSecret[1] = byte(clientHello.vers)\n\t_, err := io.ReadFull(config.rand(), preMasterSecret[2:])\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tencrypted, err := rsa.EncryptPKCS1v15(config.rand(), cert.PublicKey.(*rsa.PublicKey), preMasterSecret)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tckx := new(clientKeyExchangeMsg)\n\tckx.ciphertext = make([]byte, len(encrypted)+2)\n\tckx.ciphertext[0] = byte(len(encrypted) >> 8)\n\tckx.ciphertext[1] = byte(len(encrypted))\n\tcopy(ckx.ciphertext[2:], encrypted)\n\treturn preMasterSecret, ckx, nil\n}\n\n\/\/ sha1Hash calculates a SHA1 hash over the given byte slices.\nfunc sha1Hash(slices [][]byte) []byte {\n\thsha1 := sha1.New()\n\tfor _, slice := range slices {\n\t\thsha1.Write(slice)\n\t}\n\treturn hsha1.Sum(nil)\n}\n\n\/\/ md5SHA1Hash implements TLS 1.0's hybrid hash function which consists of the\n\/\/ concatenation of an MD5 and SHA1 hash.\nfunc md5SHA1Hash(slices [][]byte) []byte {\n\tmd5sha1 := make([]byte, md5.Size+sha1.Size)\n\thmd5 := md5.New()\n\tfor _, slice := range slices {\n\t\thmd5.Write(slice)\n\t}\n\tcopy(md5sha1, hmd5.Sum(nil))\n\tcopy(md5sha1[md5.Size:], sha1Hash(slices))\n\treturn md5sha1\n}\n\n\/\/ hashForServerKeyExchange hashes the given slices and returns their digest\n\/\/ using the given hash function (for >= TLS 1.2) or using a default based on\n\/\/ the sigType (for earlier TLS versions). For Ed25519 signatures, which don't\n\/\/ do pre-hashing, it returns the concatenation of the slices.\nfunc hashForServerKeyExchange(sigType uint8, hashFunc crypto.Hash, version uint16, slices ...[]byte) []byte {\n\tif sigType == signatureEd25519 {\n\t\tvar signed []byte\n\t\tfor _, slice := range slices {\n\t\t\tsigned = append(signed, slice...)\n\t\t}\n\t\treturn signed\n\t}\n\tif version >= VersionTLS12 {\n\t\th := hashFunc.New()\n\t\tfor _, slice := range slices {\n\t\t\th.Write(slice)\n\t\t}\n\t\tdigest := h.Sum(nil)\n\t\treturn digest\n\t}\n\tif sigType == signatureECDSA {\n\t\treturn sha1Hash(slices)\n\t}\n\treturn md5SHA1Hash(slices)\n}\n\n\/\/ ecdheKeyAgreement implements a TLS key agreement where the server\n\/\/ generates an ephemeral EC public\/private key pair and signs it. The\n\/\/ pre-master secret is then calculated using ECDH. The signature may\n\/\/ be ECDSA, Ed25519 or RSA.\ntype ecdheKeyAgreement struct {\n\tversion uint16\n\tisRSA bool\n\tparams ecdheParameters\n\n\t\/\/ ckx and preMasterSecret are generated in processServerKeyExchange\n\t\/\/ and returned in generateClientKeyExchange.\n\tckx *clientKeyExchangeMsg\n\tpreMasterSecret []byte\n}\n\nfunc (ka *ecdheKeyAgreement) generateServerKeyExchange(config *Config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) {\n\tvar curveID CurveID\n\tfor _, c := range clientHello.supportedCurves {\n\t\tif config.supportsCurve(c) {\n\t\t\tcurveID = c\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif curveID == 0 {\n\t\treturn nil, errors.New(\"tls: no supported elliptic curves offered\")\n\t}\n\tif _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {\n\t\treturn nil, errors.New(\"tls: CurvePreferences includes unsupported curve\")\n\t}\n\n\tparams, err := generateECDHEParameters(config.rand(), curveID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tka.params = params\n\n\t\/\/ See RFC 4492, Section 5.4.\n\tecdhePublic := params.PublicKey()\n\tserverECDHEParams := make([]byte, 1+2+1+len(ecdhePublic))\n\tserverECDHEParams[0] = 3 \/\/ named curve\n\tserverECDHEParams[1] = byte(curveID >> 8)\n\tserverECDHEParams[2] = byte(curveID)\n\tserverECDHEParams[3] = byte(len(ecdhePublic))\n\tcopy(serverECDHEParams[4:], ecdhePublic)\n\n\tpriv, ok := cert.PrivateKey.(crypto.Signer)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"tls: certificate private key of type %T does not implement crypto.Signer\", cert.PrivateKey)\n\t}\n\n\tvar signatureAlgorithm SignatureScheme\n\tvar sigType uint8\n\tvar sigHash crypto.Hash\n\tif ka.version >= VersionTLS12 {\n\t\tsignatureAlgorithm, err = selectSignatureScheme(ka.version, cert, clientHello.supportedSignatureAlgorithms)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsigType, sigHash, err = typeAndHashFromSignatureScheme(signatureAlgorithm)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tsigType, sigHash, err = legacyTypeAndHashFromPublicKey(priv.Public())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif (sigType == signaturePKCS1v15 || sigType == signatureRSAPSS) != ka.isRSA {\n\t\treturn nil, errors.New(\"tls: certificate cannot be used with the selected cipher suite\")\n\t}\n\n\tsigned := hashForServerKeyExchange(sigType, sigHash, ka.version, clientHello.random, hello.random, serverECDHEParams)\n\n\tsignOpts := crypto.SignerOpts(sigHash)\n\tif sigType == signatureRSAPSS {\n\t\tsignOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}\n\t}\n\tsig, err := priv.Sign(config.rand(), signed, signOpts)\n\tif err != nil {\n\t\treturn nil, errors.New(\"tls: failed to sign ECDHE parameters: \" + err.Error())\n\t}\n\n\tskx := new(serverKeyExchangeMsg)\n\tsigAndHashLen := 0\n\tif ka.version >= VersionTLS12 {\n\t\tsigAndHashLen = 2\n\t}\n\tskx.key = make([]byte, len(serverECDHEParams)+sigAndHashLen+2+len(sig))\n\tcopy(skx.key, serverECDHEParams)\n\tk := skx.key[len(serverECDHEParams):]\n\tif ka.version >= VersionTLS12 {\n\t\tk[0] = byte(signatureAlgorithm >> 8)\n\t\tk[1] = byte(signatureAlgorithm)\n\t\tk = k[2:]\n\t}\n\tk[0] = byte(len(sig) >> 8)\n\tk[1] = byte(len(sig))\n\tcopy(k[2:], sig)\n\n\treturn skx, nil\n}\n\nfunc (ka *ecdheKeyAgreement) processClientKeyExchange(config *Config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) {\n\tif len(ckx.ciphertext) == 0 || int(ckx.ciphertext[0]) != len(ckx.ciphertext)-1 {\n\t\treturn nil, errClientKeyExchange\n\t}\n\n\tpreMasterSecret := ka.params.SharedKey(ckx.ciphertext[1:])\n\tif preMasterSecret == nil {\n\t\treturn nil, errClientKeyExchange\n\t}\n\n\treturn preMasterSecret, nil\n}\n\nfunc (ka *ecdheKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error {\n\tif len(skx.key) < 4 {\n\t\treturn errServerKeyExchange\n\t}\n\tif skx.key[0] != 3 { \/\/ named curve\n\t\treturn errors.New(\"tls: server selected unsupported curve\")\n\t}\n\tcurveID := CurveID(skx.key[1])<<8 | CurveID(skx.key[2])\n\n\tpublicLen := int(skx.key[3])\n\tif publicLen+4 > len(skx.key) {\n\t\treturn errServerKeyExchange\n\t}\n\tserverECDHEParams := skx.key[:4+publicLen]\n\tpublicKey := serverECDHEParams[4:]\n\n\tsig := skx.key[4+publicLen:]\n\tif len(sig) < 2 {\n\t\treturn errServerKeyExchange\n\t}\n\n\tif _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {\n\t\treturn errors.New(\"tls: server selected unsupported curve\")\n\t}\n\n\tparams, err := generateECDHEParameters(config.rand(), curveID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tka.params = params\n\n\tka.preMasterSecret = params.SharedKey(publicKey)\n\tif ka.preMasterSecret == nil {\n\t\treturn errServerKeyExchange\n\t}\n\n\tourPublicKey := params.PublicKey()\n\tka.ckx = new(clientKeyExchangeMsg)\n\tka.ckx.ciphertext = make([]byte, 1+len(ourPublicKey))\n\tka.ckx.ciphertext[0] = byte(len(ourPublicKey))\n\tcopy(ka.ckx.ciphertext[1:], ourPublicKey)\n\n\tvar sigType uint8\n\tvar sigHash crypto.Hash\n\tif ka.version >= VersionTLS12 {\n\t\tsignatureAlgorithm := SignatureScheme(sig[0])<<8 | SignatureScheme(sig[1])\n\t\tsig = sig[2:]\n\t\tif len(sig) < 2 {\n\t\t\treturn errServerKeyExchange\n\t\t}\n\n\t\tif !isSupportedSignatureAlgorithm(signatureAlgorithm, clientHello.supportedSignatureAlgorithms) {\n\t\t\treturn errors.New(\"tls: certificate used with invalid signature algorithm\")\n\t\t}\n\t\tsigType, sigHash, err = typeAndHashFromSignatureScheme(signatureAlgorithm)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tsigType, sigHash, err = legacyTypeAndHashFromPublicKey(cert.PublicKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif (sigType == signaturePKCS1v15 || sigType == signatureRSAPSS) != ka.isRSA {\n\t\treturn errServerKeyExchange\n\t}\n\n\tsigLen := int(sig[0])<<8 | int(sig[1])\n\tif sigLen+2 != len(sig) {\n\t\treturn errServerKeyExchange\n\t}\n\tsig = sig[2:]\n\n\tsigned := hashForServerKeyExchange(sigType, sigHash, ka.version, clientHello.random, serverHello.random, serverECDHEParams)\n\tif err := verifyHandshakeSignature(sigType, cert.PublicKey, sigHash, signed, sig); err != nil {\n\t\treturn errors.New(\"tls: invalid signature by the server certificate: \" + err.Error())\n\t}\n\treturn nil\n}\n\nfunc (ka *ecdheKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) {\n\tif ka.ckx == nil {\n\t\treturn nil, nil, errors.New(\"tls: missing ServerKeyExchange message\")\n\t}\n\n\treturn ka.preMasterSecret, ka.ckx, nil\n}\n<commit_msg>crypto\/tls: test key type when casting<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage tls\n\nimport (\n\t\"crypto\"\n\t\"crypto\/md5\"\n\t\"crypto\/rsa\"\n\t\"crypto\/sha1\"\n\t\"crypto\/x509\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n)\n\n\/\/ a keyAgreement implements the client and server side of a TLS key agreement\n\/\/ protocol by generating and processing key exchange messages.\ntype keyAgreement interface {\n\t\/\/ On the server side, the first two methods are called in order.\n\n\t\/\/ In the case that the key agreement protocol doesn't use a\n\t\/\/ ServerKeyExchange message, generateServerKeyExchange can return nil,\n\t\/\/ nil.\n\tgenerateServerKeyExchange(*Config, *Certificate, *clientHelloMsg, *serverHelloMsg) (*serverKeyExchangeMsg, error)\n\tprocessClientKeyExchange(*Config, *Certificate, *clientKeyExchangeMsg, uint16) ([]byte, error)\n\n\t\/\/ On the client side, the next two methods are called in order.\n\n\t\/\/ This method may not be called if the server doesn't send a\n\t\/\/ ServerKeyExchange message.\n\tprocessServerKeyExchange(*Config, *clientHelloMsg, *serverHelloMsg, *x509.Certificate, *serverKeyExchangeMsg) error\n\tgenerateClientKeyExchange(*Config, *clientHelloMsg, *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error)\n}\n\nvar errClientKeyExchange = errors.New(\"tls: invalid ClientKeyExchange message\")\nvar errServerKeyExchange = errors.New(\"tls: invalid ServerKeyExchange message\")\n\n\/\/ rsaKeyAgreement implements the standard TLS key agreement where the client\n\/\/ encrypts the pre-master secret to the server's public key.\ntype rsaKeyAgreement struct{}\n\nfunc (ka rsaKeyAgreement) generateServerKeyExchange(config *Config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) {\n\treturn nil, nil\n}\n\nfunc (ka rsaKeyAgreement) processClientKeyExchange(config *Config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) {\n\tif len(ckx.ciphertext) < 2 {\n\t\treturn nil, errClientKeyExchange\n\t}\n\tciphertextLen := int(ckx.ciphertext[0])<<8 | int(ckx.ciphertext[1])\n\tif ciphertextLen != len(ckx.ciphertext)-2 {\n\t\treturn nil, errClientKeyExchange\n\t}\n\tciphertext := ckx.ciphertext[2:]\n\n\tpriv, ok := cert.PrivateKey.(crypto.Decrypter)\n\tif !ok {\n\t\treturn nil, errors.New(\"tls: certificate private key does not implement crypto.Decrypter\")\n\t}\n\t\/\/ Perform constant time RSA PKCS #1 v1.5 decryption\n\tpreMasterSecret, err := priv.Decrypt(config.rand(), ciphertext, &rsa.PKCS1v15DecryptOptions{SessionKeyLen: 48})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ We don't check the version number in the premaster secret. For one,\n\t\/\/ by checking it, we would leak information about the validity of the\n\t\/\/ encrypted pre-master secret. Secondly, it provides only a small\n\t\/\/ benefit against a downgrade attack and some implementations send the\n\t\/\/ wrong version anyway. See the discussion at the end of section\n\t\/\/ 7.4.7.1 of RFC 4346.\n\treturn preMasterSecret, nil\n}\n\nfunc (ka rsaKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error {\n\treturn errors.New(\"tls: unexpected ServerKeyExchange\")\n}\n\nfunc (ka rsaKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) {\n\tpreMasterSecret := make([]byte, 48)\n\tpreMasterSecret[0] = byte(clientHello.vers >> 8)\n\tpreMasterSecret[1] = byte(clientHello.vers)\n\t_, err := io.ReadFull(config.rand(), preMasterSecret[2:])\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\trsaKey, ok := cert.PublicKey.(*rsa.PublicKey)\n\tif !ok {\n\t\treturn nil, nil, errors.New(\"tls: server certificate contains incorrect key type for selected ciphersuite\")\n\t}\n\tencrypted, err := rsa.EncryptPKCS1v15(config.rand(), rsaKey, preMasterSecret)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tckx := new(clientKeyExchangeMsg)\n\tckx.ciphertext = make([]byte, len(encrypted)+2)\n\tckx.ciphertext[0] = byte(len(encrypted) >> 8)\n\tckx.ciphertext[1] = byte(len(encrypted))\n\tcopy(ckx.ciphertext[2:], encrypted)\n\treturn preMasterSecret, ckx, nil\n}\n\n\/\/ sha1Hash calculates a SHA1 hash over the given byte slices.\nfunc sha1Hash(slices [][]byte) []byte {\n\thsha1 := sha1.New()\n\tfor _, slice := range slices {\n\t\thsha1.Write(slice)\n\t}\n\treturn hsha1.Sum(nil)\n}\n\n\/\/ md5SHA1Hash implements TLS 1.0's hybrid hash function which consists of the\n\/\/ concatenation of an MD5 and SHA1 hash.\nfunc md5SHA1Hash(slices [][]byte) []byte {\n\tmd5sha1 := make([]byte, md5.Size+sha1.Size)\n\thmd5 := md5.New()\n\tfor _, slice := range slices {\n\t\thmd5.Write(slice)\n\t}\n\tcopy(md5sha1, hmd5.Sum(nil))\n\tcopy(md5sha1[md5.Size:], sha1Hash(slices))\n\treturn md5sha1\n}\n\n\/\/ hashForServerKeyExchange hashes the given slices and returns their digest\n\/\/ using the given hash function (for >= TLS 1.2) or using a default based on\n\/\/ the sigType (for earlier TLS versions). For Ed25519 signatures, which don't\n\/\/ do pre-hashing, it returns the concatenation of the slices.\nfunc hashForServerKeyExchange(sigType uint8, hashFunc crypto.Hash, version uint16, slices ...[]byte) []byte {\n\tif sigType == signatureEd25519 {\n\t\tvar signed []byte\n\t\tfor _, slice := range slices {\n\t\t\tsigned = append(signed, slice...)\n\t\t}\n\t\treturn signed\n\t}\n\tif version >= VersionTLS12 {\n\t\th := hashFunc.New()\n\t\tfor _, slice := range slices {\n\t\t\th.Write(slice)\n\t\t}\n\t\tdigest := h.Sum(nil)\n\t\treturn digest\n\t}\n\tif sigType == signatureECDSA {\n\t\treturn sha1Hash(slices)\n\t}\n\treturn md5SHA1Hash(slices)\n}\n\n\/\/ ecdheKeyAgreement implements a TLS key agreement where the server\n\/\/ generates an ephemeral EC public\/private key pair and signs it. The\n\/\/ pre-master secret is then calculated using ECDH. The signature may\n\/\/ be ECDSA, Ed25519 or RSA.\ntype ecdheKeyAgreement struct {\n\tversion uint16\n\tisRSA bool\n\tparams ecdheParameters\n\n\t\/\/ ckx and preMasterSecret are generated in processServerKeyExchange\n\t\/\/ and returned in generateClientKeyExchange.\n\tckx *clientKeyExchangeMsg\n\tpreMasterSecret []byte\n}\n\nfunc (ka *ecdheKeyAgreement) generateServerKeyExchange(config *Config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) {\n\tvar curveID CurveID\n\tfor _, c := range clientHello.supportedCurves {\n\t\tif config.supportsCurve(c) {\n\t\t\tcurveID = c\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif curveID == 0 {\n\t\treturn nil, errors.New(\"tls: no supported elliptic curves offered\")\n\t}\n\tif _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {\n\t\treturn nil, errors.New(\"tls: CurvePreferences includes unsupported curve\")\n\t}\n\n\tparams, err := generateECDHEParameters(config.rand(), curveID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tka.params = params\n\n\t\/\/ See RFC 4492, Section 5.4.\n\tecdhePublic := params.PublicKey()\n\tserverECDHEParams := make([]byte, 1+2+1+len(ecdhePublic))\n\tserverECDHEParams[0] = 3 \/\/ named curve\n\tserverECDHEParams[1] = byte(curveID >> 8)\n\tserverECDHEParams[2] = byte(curveID)\n\tserverECDHEParams[3] = byte(len(ecdhePublic))\n\tcopy(serverECDHEParams[4:], ecdhePublic)\n\n\tpriv, ok := cert.PrivateKey.(crypto.Signer)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"tls: certificate private key of type %T does not implement crypto.Signer\", cert.PrivateKey)\n\t}\n\n\tvar signatureAlgorithm SignatureScheme\n\tvar sigType uint8\n\tvar sigHash crypto.Hash\n\tif ka.version >= VersionTLS12 {\n\t\tsignatureAlgorithm, err = selectSignatureScheme(ka.version, cert, clientHello.supportedSignatureAlgorithms)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsigType, sigHash, err = typeAndHashFromSignatureScheme(signatureAlgorithm)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tsigType, sigHash, err = legacyTypeAndHashFromPublicKey(priv.Public())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif (sigType == signaturePKCS1v15 || sigType == signatureRSAPSS) != ka.isRSA {\n\t\treturn nil, errors.New(\"tls: certificate cannot be used with the selected cipher suite\")\n\t}\n\n\tsigned := hashForServerKeyExchange(sigType, sigHash, ka.version, clientHello.random, hello.random, serverECDHEParams)\n\n\tsignOpts := crypto.SignerOpts(sigHash)\n\tif sigType == signatureRSAPSS {\n\t\tsignOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}\n\t}\n\tsig, err := priv.Sign(config.rand(), signed, signOpts)\n\tif err != nil {\n\t\treturn nil, errors.New(\"tls: failed to sign ECDHE parameters: \" + err.Error())\n\t}\n\n\tskx := new(serverKeyExchangeMsg)\n\tsigAndHashLen := 0\n\tif ka.version >= VersionTLS12 {\n\t\tsigAndHashLen = 2\n\t}\n\tskx.key = make([]byte, len(serverECDHEParams)+sigAndHashLen+2+len(sig))\n\tcopy(skx.key, serverECDHEParams)\n\tk := skx.key[len(serverECDHEParams):]\n\tif ka.version >= VersionTLS12 {\n\t\tk[0] = byte(signatureAlgorithm >> 8)\n\t\tk[1] = byte(signatureAlgorithm)\n\t\tk = k[2:]\n\t}\n\tk[0] = byte(len(sig) >> 8)\n\tk[1] = byte(len(sig))\n\tcopy(k[2:], sig)\n\n\treturn skx, nil\n}\n\nfunc (ka *ecdheKeyAgreement) processClientKeyExchange(config *Config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) {\n\tif len(ckx.ciphertext) == 0 || int(ckx.ciphertext[0]) != len(ckx.ciphertext)-1 {\n\t\treturn nil, errClientKeyExchange\n\t}\n\n\tpreMasterSecret := ka.params.SharedKey(ckx.ciphertext[1:])\n\tif preMasterSecret == nil {\n\t\treturn nil, errClientKeyExchange\n\t}\n\n\treturn preMasterSecret, nil\n}\n\nfunc (ka *ecdheKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error {\n\tif len(skx.key) < 4 {\n\t\treturn errServerKeyExchange\n\t}\n\tif skx.key[0] != 3 { \/\/ named curve\n\t\treturn errors.New(\"tls: server selected unsupported curve\")\n\t}\n\tcurveID := CurveID(skx.key[1])<<8 | CurveID(skx.key[2])\n\n\tpublicLen := int(skx.key[3])\n\tif publicLen+4 > len(skx.key) {\n\t\treturn errServerKeyExchange\n\t}\n\tserverECDHEParams := skx.key[:4+publicLen]\n\tpublicKey := serverECDHEParams[4:]\n\n\tsig := skx.key[4+publicLen:]\n\tif len(sig) < 2 {\n\t\treturn errServerKeyExchange\n\t}\n\n\tif _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {\n\t\treturn errors.New(\"tls: server selected unsupported curve\")\n\t}\n\n\tparams, err := generateECDHEParameters(config.rand(), curveID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tka.params = params\n\n\tka.preMasterSecret = params.SharedKey(publicKey)\n\tif ka.preMasterSecret == nil {\n\t\treturn errServerKeyExchange\n\t}\n\n\tourPublicKey := params.PublicKey()\n\tka.ckx = new(clientKeyExchangeMsg)\n\tka.ckx.ciphertext = make([]byte, 1+len(ourPublicKey))\n\tka.ckx.ciphertext[0] = byte(len(ourPublicKey))\n\tcopy(ka.ckx.ciphertext[1:], ourPublicKey)\n\n\tvar sigType uint8\n\tvar sigHash crypto.Hash\n\tif ka.version >= VersionTLS12 {\n\t\tsignatureAlgorithm := SignatureScheme(sig[0])<<8 | SignatureScheme(sig[1])\n\t\tsig = sig[2:]\n\t\tif len(sig) < 2 {\n\t\t\treturn errServerKeyExchange\n\t\t}\n\n\t\tif !isSupportedSignatureAlgorithm(signatureAlgorithm, clientHello.supportedSignatureAlgorithms) {\n\t\t\treturn errors.New(\"tls: certificate used with invalid signature algorithm\")\n\t\t}\n\t\tsigType, sigHash, err = typeAndHashFromSignatureScheme(signatureAlgorithm)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tsigType, sigHash, err = legacyTypeAndHashFromPublicKey(cert.PublicKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif (sigType == signaturePKCS1v15 || sigType == signatureRSAPSS) != ka.isRSA {\n\t\treturn errServerKeyExchange\n\t}\n\n\tsigLen := int(sig[0])<<8 | int(sig[1])\n\tif sigLen+2 != len(sig) {\n\t\treturn errServerKeyExchange\n\t}\n\tsig = sig[2:]\n\n\tsigned := hashForServerKeyExchange(sigType, sigHash, ka.version, clientHello.random, serverHello.random, serverECDHEParams)\n\tif err := verifyHandshakeSignature(sigType, cert.PublicKey, sigHash, signed, sig); err != nil {\n\t\treturn errors.New(\"tls: invalid signature by the server certificate: \" + err.Error())\n\t}\n\treturn nil\n}\n\nfunc (ka *ecdheKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) {\n\tif ka.ckx == nil {\n\t\treturn nil, nil, errors.New(\"tls: missing ServerKeyExchange message\")\n\t}\n\n\treturn ka.preMasterSecret, ka.ckx, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Tor websocket server transport plugin.\n\/\/\n\/\/ Usage:\n\/\/ ServerTransportPlugin websocket exec .\/websocket-server --port 9901\n\npackage main\n\nimport (\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n)\n\nvar ptInfo PtServerInfo\n\n\/\/ When a connection handler starts, +1 is written to this channel; when it\n\/\/ ends, -1 is written.\nvar handlerChan = make(chan int)\n\nfunc logDebug(format string, v ...interface{}) {\n\tfmt.Fprintf(os.Stderr, format+\"\\n\", v...)\n}\n\n\/\/ An abstraction that makes an underlying WebSocket connection look like an\n\/\/ io.ReadWriteCloser. It internally takes care of things like base64 encoding and\n\/\/ decoding.\ntype websocketConn struct {\n\tWs *Websocket\n\tBase64 bool\n\tmessageBuf []byte\n}\n\n\/\/ Implements io.Reader.\nfunc (conn *websocketConn) Read(b []byte) (n int, err error) {\n\tfor len(conn.messageBuf) == 0 {\n\t\tvar m WebsocketMessage\n\t\tm, err = conn.Ws.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif m.Opcode == 8 {\n\t\t\terr = io.EOF\n\t\t\treturn\n\t\t}\n\t\tif conn.Base64 {\n\t\t\tif m.Opcode != 1 {\n\t\t\t\terr = errors.New(fmt.Sprintf(\"got non-text opcode %d with the base64 subprotocol\", m.Opcode))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconn.messageBuf = make([]byte, base64.StdEncoding.DecodedLen(len(m.Payload)))\n\t\t\tvar num int\n\t\t\tnum, err = base64.StdEncoding.Decode(conn.messageBuf, m.Payload)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconn.messageBuf = conn.messageBuf[:num]\n\t\t} else {\n\t\t\tif m.Opcode != 2 {\n\t\t\t\terr = errors.New(fmt.Sprintf(\"got non-binary opcode %d with no subprotocol\", m.Opcode))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconn.messageBuf = m.Payload\n\t\t}\n\t}\n\n\tn = copy(b, conn.messageBuf)\n\tconn.messageBuf = conn.messageBuf[n:]\n\n\treturn\n}\n\n\/\/ Implements io.Writer.\nfunc (conn *websocketConn) Write(b []byte) (n int, err error) {\n\tif conn.Base64 {\n\t\tbuf := make([]byte, base64.StdEncoding.EncodedLen(len(b)))\n\t\tbase64.StdEncoding.Encode(buf, b)\n\t\terr = conn.Ws.WriteMessage(1, buf)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tn = len(b)\n\t} else {\n\t\terr = conn.Ws.WriteMessage(2, b)\n\t\tn = len(b)\n\t}\n\treturn\n}\n\n\/\/ Implements io.Closer.\nfunc (conn *websocketConn) Close() (err error) {\n\terr = conn.Ws.WriteFrame(8, nil)\n\tif err != nil {\n\t\tconn.Ws.Conn.Close()\n\t\treturn\n\t}\n\terr = conn.Ws.Conn.Close()\n\treturn\n}\n\n\/\/ Create a new websocketConn.\nfunc NewWebsocketConn(ws *Websocket) websocketConn {\n\tvar conn websocketConn\n\tconn.Ws = ws\n\tconn.Base64 = (ws.Subprotocol == \"base64\")\n\treturn conn\n}\n\n\/\/ Copy from WebSocket to socket and vice versa.\nfunc proxy(local *net.TCPConn, conn *websocketConn) {\n\tvar wg sync.WaitGroup\n\n\twg.Add(2)\n\n\tgo func() {\n\t\t_, err := io.Copy(conn, local)\n\t\tif err != nil {\n\t\t\tlogDebug(\"error copying ORPort to WebSocket: \" + err.Error())\n\t\t}\n\t\tlocal.CloseRead()\n\t\tconn.Close()\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\t_, err := io.Copy(local, conn)\n\t\tif err != nil {\n\t\t\tlogDebug(\"error copying WebSocket to ORPort: \" + err.Error())\n\t\t}\n\t\tlocal.CloseWrite()\n\t\tconn.Close()\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n}\n\nfunc websocketHandler(ws *Websocket) {\n\tconn := NewWebsocketConn(ws)\n\n\thandlerChan <- 1\n\tdefer func() {\n\t\thandlerChan <- -1\n\t}()\n\n\ts, err := net.DialTCP(\"tcp\", nil, ptInfo.OrAddr)\n\tif err != nil {\n\t\tlogDebug(\"Failed to connect to ORPort: \" + err.Error())\n\t\treturn\n\t}\n\n\tproxy(s, &conn)\n}\n\nfunc startListener(addr *net.TCPAddr) (*net.TCPListener, error) {\n\tln, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgo func() {\n\t\tvar config WebsocketConfig\n\t\tconfig.Subprotocols = []string{\"base64\"}\n\t\tconfig.MaxMessageSize = 2500\n\t\thttp.Handle(\"\/\", config.Handler(websocketHandler))\n\t\terr = http.Serve(ln, nil)\n\t\tif err != nil {\n\t\t\tlogDebug(\"http.Serve: \" + err.Error())\n\t\t}\n\t}()\n\treturn ln, nil\n}\n\nfunc main() {\n\tconst ptMethodName = \"websocket\"\n\tvar defaultPort int\n\n\tflag.IntVar(&defaultPort, \"port\", 0, \"port to listen on if unspecified by Tor\")\n\tflag.Parse()\n\n\tptInfo = PtServerSetup([]string{ptMethodName})\n\n\tlisteners := make([]*net.TCPListener, 0)\n\tfor _, bindAddr := range ptInfo.BindAddrs {\n\t\t\/\/ Override tor's requested port (which is 0 if this transport\n\t\t\/\/ has not been run before) with the one requested by the --port\n\t\t\/\/ option.\n\t\tif defaultPort != 0 {\n\t\t\tbindAddr.Addr.Port = defaultPort\n\t\t}\n\n\t\tln, err := startListener(bindAddr.Addr)\n\t\tif err != nil {\n\t\t\tPtSmethodError(bindAddr.MethodName, err.Error())\n\t\t}\n\t\tPtSmethod(bindAddr.MethodName, ln.Addr())\n\t\tlisteners = append(listeners, ln)\n\t}\n\tPtSmethodsDone()\n\n\tvar numHandlers int = 0\n\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, os.Interrupt)\n\tvar sigint bool = false\n\tfor !sigint {\n\t\tselect {\n\t\tcase n := <-handlerChan:\n\t\t\tnumHandlers += n\n\t\tcase <-signalChan:\n\t\t\tlogDebug(\"SIGINT\")\n\t\t\tsigint = true\n\t\t}\n\t}\n\n\tfor _, ln := range listeners {\n\t\tln.Close()\n\t}\n\n\tsigint = false\n\tfor numHandlers != 0 && !sigint {\n\t\tselect {\n\t\tcase n := <-handlerChan:\n\t\t\tnumHandlers += n\n\t\tcase <-signalChan:\n\t\t\tlogDebug(\"SIGINT\")\n\t\t\tsigint = true\n\t\t}\n\t}\n}\n<commit_msg>Add --log option to websocket-server.<commit_after>\/\/ Tor websocket server transport plugin.\n\/\/\n\/\/ Usage:\n\/\/ ServerTransportPlugin websocket exec .\/websocket-server --port 9901\n\npackage main\n\nimport (\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n)\n\nvar logFile = os.Stderr\n\nvar ptInfo PtServerInfo\n\n\/\/ When a connection handler starts, +1 is written to this channel; when it\n\/\/ ends, -1 is written.\nvar handlerChan = make(chan int)\n\nfunc logDebug(format string, v ...interface{}) {\n\tfmt.Fprintf(logFile, format+\"\\n\", v...)\n}\n\n\/\/ An abstraction that makes an underlying WebSocket connection look like an\n\/\/ io.ReadWriteCloser. It internally takes care of things like base64 encoding and\n\/\/ decoding.\ntype websocketConn struct {\n\tWs *Websocket\n\tBase64 bool\n\tmessageBuf []byte\n}\n\n\/\/ Implements io.Reader.\nfunc (conn *websocketConn) Read(b []byte) (n int, err error) {\n\tfor len(conn.messageBuf) == 0 {\n\t\tvar m WebsocketMessage\n\t\tm, err = conn.Ws.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif m.Opcode == 8 {\n\t\t\terr = io.EOF\n\t\t\treturn\n\t\t}\n\t\tif conn.Base64 {\n\t\t\tif m.Opcode != 1 {\n\t\t\t\terr = errors.New(fmt.Sprintf(\"got non-text opcode %d with the base64 subprotocol\", m.Opcode))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconn.messageBuf = make([]byte, base64.StdEncoding.DecodedLen(len(m.Payload)))\n\t\t\tvar num int\n\t\t\tnum, err = base64.StdEncoding.Decode(conn.messageBuf, m.Payload)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconn.messageBuf = conn.messageBuf[:num]\n\t\t} else {\n\t\t\tif m.Opcode != 2 {\n\t\t\t\terr = errors.New(fmt.Sprintf(\"got non-binary opcode %d with no subprotocol\", m.Opcode))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconn.messageBuf = m.Payload\n\t\t}\n\t}\n\n\tn = copy(b, conn.messageBuf)\n\tconn.messageBuf = conn.messageBuf[n:]\n\n\treturn\n}\n\n\/\/ Implements io.Writer.\nfunc (conn *websocketConn) Write(b []byte) (n int, err error) {\n\tif conn.Base64 {\n\t\tbuf := make([]byte, base64.StdEncoding.EncodedLen(len(b)))\n\t\tbase64.StdEncoding.Encode(buf, b)\n\t\terr = conn.Ws.WriteMessage(1, buf)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tn = len(b)\n\t} else {\n\t\terr = conn.Ws.WriteMessage(2, b)\n\t\tn = len(b)\n\t}\n\treturn\n}\n\n\/\/ Implements io.Closer.\nfunc (conn *websocketConn) Close() (err error) {\n\terr = conn.Ws.WriteFrame(8, nil)\n\tif err != nil {\n\t\tconn.Ws.Conn.Close()\n\t\treturn\n\t}\n\terr = conn.Ws.Conn.Close()\n\treturn\n}\n\n\/\/ Create a new websocketConn.\nfunc NewWebsocketConn(ws *Websocket) websocketConn {\n\tvar conn websocketConn\n\tconn.Ws = ws\n\tconn.Base64 = (ws.Subprotocol == \"base64\")\n\treturn conn\n}\n\n\/\/ Copy from WebSocket to socket and vice versa.\nfunc proxy(local *net.TCPConn, conn *websocketConn) {\n\tvar wg sync.WaitGroup\n\n\twg.Add(2)\n\n\tgo func() {\n\t\t_, err := io.Copy(conn, local)\n\t\tif err != nil {\n\t\t\tlogDebug(\"error copying ORPort to WebSocket: \" + err.Error())\n\t\t}\n\t\tlocal.CloseRead()\n\t\tconn.Close()\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\t_, err := io.Copy(local, conn)\n\t\tif err != nil {\n\t\t\tlogDebug(\"error copying WebSocket to ORPort: \" + err.Error())\n\t\t}\n\t\tlocal.CloseWrite()\n\t\tconn.Close()\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n}\n\nfunc websocketHandler(ws *Websocket) {\n\tconn := NewWebsocketConn(ws)\n\n\thandlerChan <- 1\n\tdefer func() {\n\t\thandlerChan <- -1\n\t}()\n\n\ts, err := net.DialTCP(\"tcp\", nil, ptInfo.OrAddr)\n\tif err != nil {\n\t\tlogDebug(\"Failed to connect to ORPort: \" + err.Error())\n\t\treturn\n\t}\n\n\tproxy(s, &conn)\n}\n\nfunc startListener(addr *net.TCPAddr) (*net.TCPListener, error) {\n\tln, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgo func() {\n\t\tvar config WebsocketConfig\n\t\tconfig.Subprotocols = []string{\"base64\"}\n\t\tconfig.MaxMessageSize = 2500\n\t\thttp.Handle(\"\/\", config.Handler(websocketHandler))\n\t\terr = http.Serve(ln, nil)\n\t\tif err != nil {\n\t\t\tlogDebug(\"http.Serve: \" + err.Error())\n\t\t}\n\t}()\n\treturn ln, nil\n}\n\nfunc main() {\n\tconst ptMethodName = \"websocket\"\n\tvar defaultPort int\n\tvar logFilename string\n\n\tflag.IntVar(&defaultPort, \"port\", 0, \"port to listen on if unspecified by Tor\")\n\tflag.StringVar(&logFilename, \"log\", \"\", \"log file to write to\")\n\tflag.Parse()\n\n\tif logFilename != \"\" {\n\t\tf, err := os.OpenFile(logFilename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Can't open log file %q: %s.\\n\", logFilename, err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t\tlogFile = f\n\t}\n\n\tptInfo = PtServerSetup([]string{ptMethodName})\n\n\tlisteners := make([]*net.TCPListener, 0)\n\tfor _, bindAddr := range ptInfo.BindAddrs {\n\t\t\/\/ Override tor's requested port (which is 0 if this transport\n\t\t\/\/ has not been run before) with the one requested by the --port\n\t\t\/\/ option.\n\t\tif defaultPort != 0 {\n\t\t\tbindAddr.Addr.Port = defaultPort\n\t\t}\n\n\t\tln, err := startListener(bindAddr.Addr)\n\t\tif err != nil {\n\t\t\tPtSmethodError(bindAddr.MethodName, err.Error())\n\t\t}\n\t\tPtSmethod(bindAddr.MethodName, ln.Addr())\n\t\tlisteners = append(listeners, ln)\n\t}\n\tPtSmethodsDone()\n\n\tvar numHandlers int = 0\n\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, os.Interrupt)\n\tvar sigint bool = false\n\tfor !sigint {\n\t\tselect {\n\t\tcase n := <-handlerChan:\n\t\t\tnumHandlers += n\n\t\tcase <-signalChan:\n\t\t\tlogDebug(\"SIGINT\")\n\t\t\tsigint = true\n\t\t}\n\t}\n\n\tfor _, ln := range listeners {\n\t\tln.Close()\n\t}\n\n\tsigint = false\n\tfor numHandlers != 0 && !sigint {\n\t\tselect {\n\t\tcase n := <-handlerChan:\n\t\t\tnumHandlers += n\n\t\tcase <-signalChan:\n\t\t\tlogDebug(\"SIGINT\")\n\t\t\tsigint = true\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package container\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n)\n\n\/\/ Runner runs commands specified by a Command. The default implementation uses\n\/\/ Docker, but MockExecutor also implements this interface and may be used\n\/\/ during testing (by temporarily changing the DefaultRunner variable). The sole\n\/\/ purpose of this interface is to allow mocking.\ntype Runner interface {\n\tRun(*Command) ([]byte, error)\n}\n\nvar DefaultRunner Runner = dockerRunner{}\n\nvar (\n\tRunRetries = 3\n\tBuildRetries = 3\n)\n\ntype dockerRunner struct{}\n\nfunc (_ dockerRunner) Run(c *Command) ([]byte, error) {\n\ttmpDir, err := ioutil.TempDir(\"\", \"sg-docker\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer os.RemoveAll(tmpDir)\n\n\timage := \"sg-temp:\" + filepath.Base(tmpDir)\n\tlog.Printf(\"Running image %s\", image)\n\n\tcmdJSON, err := json.Marshal(c.Cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Build Docker container.\n\tdockerfile := c.Dockerfile\n\n\t\/\/ AddFiles\n\tfor i, f := range c.AddFiles {\n\t\tif i == 0 {\n\t\t\tdockerfile = append(dockerfile, '\\n')\n\t\t}\n\n\t\t\/\/ Copy to the Docker context dir so we can ADD it.\n\t\tname := filepath.Base(f[0])\n\t\terr = cp(f[0], filepath.Join(tmpDir, name))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdockerfile = append(dockerfile, []byte(fmt.Sprintf(\"ADD %s %s\\n\", \".\/\"+name, f[1]))...)\n\t}\n\n\t\/\/ AddDirs\n\tfor i, d := range c.AddDirs {\n\t\tif i == 0 {\n\t\t\tdockerfile = append(dockerfile, '\\n')\n\t\t}\n\n\t\t\/\/ Move repository to Docker context dir and ADD it.\n\t\tdirName := filepath.Base(d[0])\n\t\terr := cp(d[0], filepath.Join(tmpDir, dirName))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdockerfile = append(dockerfile, []byte(fmt.Sprintf(\"ADD %s %s\\n\", \".\/\"+dirName, d[1]))...)\n\t}\n\n\tdockerfile = append(dockerfile, c.PreCmdDockerfile...)\n\n\tdockerfile = append(dockerfile, []byte(fmt.Sprintf(\"\\nCMD %s\", cmdJSON))...)\n\terr = ioutil.WriteFile(filepath.Join(tmpDir, \"Dockerfile\"), []byte(dockerfile), 0700)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbuildCmd := exec.Command(\"docker\", \"build\", \"--rm=false\", \"-t\", image, \".\")\n\tbuildCmd.Dir = tmpDir\n\tbuildCmd.Stdout, buildCmd.Stderr = c.Stderr, c.Stderr\n\n\tfor i := 0; i < BuildRetries; i++ {\n\t\tremainingAttempts := RunRetries - i - 1\n\t\terr = buildCmd.Run()\n\t\tif err != nil {\n\t\t\tif remainingAttempts == 0 {\n\t\t\t\treturn nil, err\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Command failed: %v: %s (retrying %d more times)\", buildCmd.Args, err, remainingAttempts)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tbreak\n\t}\n\n\tfor i := 0; i < RunRetries; i++ {\n\t\tremainingAttempts := RunRetries - i - 1\n\t\trunOptions := append([]string{}, c.RunOptions...)\n\t\tif c.Dir != \"\" {\n\t\t\trunOptions = append(runOptions, \"--workdir=\"+c.Dir)\n\t\t}\n\t\targs := append([]string{\"run\"}, runOptions...)\n\t\targs = append(args, image)\n\t\trunCmd := exec.Command(\"docker\", args...)\n\t\trunCmd.Stderr = os.Stderr\n\n\t\t\/\/ Get original output.\n\t\tdata, err := runCmd.Output()\n\t\tif err != nil {\n\t\t\tif remainingAttempts == 0 {\n\t\t\t\treturn nil, err\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Command failed: %v: %s (retrying %d more times)\", runCmd.Args, err, remainingAttempts)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Transform.\n\t\tif c.Transform != nil {\n\t\t\tdata, err = c.Transform(data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\treturn data, nil\n\t}\n\n\tpanic(\"unreachable\")\n}\n\ntype MockRunner struct {\n\tCmds []*Command\n}\n\nfunc (r *MockRunner) Run(c *Command) ([]byte, error) {\n\tr.Cmds = append(r.Cmds, c)\n\treturn nil, nil\n}\n\nvar _ Runner = &MockRunner{}\n\nfunc cp(src, dst string) error {\n\tcmd := exec.Command(\"cp\", \"-R\", \"--no-dereference\", \"--preserve=mode,ownership,timestamps\", src, dst)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cp %s %s failed: %s: %q\", src, dst, err, out)\n\t}\n\treturn nil\n}\n<commit_msg>fix build cmd retry (reinitialize command each time; otherwise fails)<commit_after>package container\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n)\n\n\/\/ Runner runs commands specified by a Command. The default implementation uses\n\/\/ Docker, but MockExecutor also implements this interface and may be used\n\/\/ during testing (by temporarily changing the DefaultRunner variable). The sole\n\/\/ purpose of this interface is to allow mocking.\ntype Runner interface {\n\tRun(*Command) ([]byte, error)\n}\n\nvar DefaultRunner Runner = dockerRunner{}\n\nvar (\n\tRunRetries = 3\n\tBuildRetries = 3\n)\n\ntype dockerRunner struct{}\n\nfunc (_ dockerRunner) Run(c *Command) ([]byte, error) {\n\ttmpDir, err := ioutil.TempDir(\"\", \"sg-docker\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer os.RemoveAll(tmpDir)\n\n\timage := \"sg-temp:\" + filepath.Base(tmpDir)\n\tlog.Printf(\"Running image %s\", image)\n\n\tcmdJSON, err := json.Marshal(c.Cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Build Docker container.\n\tdockerfile := c.Dockerfile\n\n\t\/\/ AddFiles\n\tfor i, f := range c.AddFiles {\n\t\tif i == 0 {\n\t\t\tdockerfile = append(dockerfile, '\\n')\n\t\t}\n\n\t\t\/\/ Copy to the Docker context dir so we can ADD it.\n\t\tname := filepath.Base(f[0])\n\t\terr = cp(f[0], filepath.Join(tmpDir, name))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdockerfile = append(dockerfile, []byte(fmt.Sprintf(\"ADD %s %s\\n\", \".\/\"+name, f[1]))...)\n\t}\n\n\t\/\/ AddDirs\n\tfor i, d := range c.AddDirs {\n\t\tif i == 0 {\n\t\t\tdockerfile = append(dockerfile, '\\n')\n\t\t}\n\n\t\t\/\/ Move repository to Docker context dir and ADD it.\n\t\tdirName := filepath.Base(d[0])\n\t\terr := cp(d[0], filepath.Join(tmpDir, dirName))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdockerfile = append(dockerfile, []byte(fmt.Sprintf(\"ADD %s %s\\n\", \".\/\"+dirName, d[1]))...)\n\t}\n\n\tdockerfile = append(dockerfile, c.PreCmdDockerfile...)\n\n\tdockerfile = append(dockerfile, []byte(fmt.Sprintf(\"\\nCMD %s\", cmdJSON))...)\n\terr = ioutil.WriteFile(filepath.Join(tmpDir, \"Dockerfile\"), []byte(dockerfile), 0700)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor i := 0; i < BuildRetries; i++ {\n\t\tremainingAttempts := RunRetries - i - 1\n\t\tbuildCmd := exec.Command(\"docker\", \"build\", \"--rm=false\", \"-t\", image, \".\")\n\t\tbuildCmd.Dir = tmpDir\n\t\tbuildCmd.Stdout, buildCmd.Stderr = c.Stderr, c.Stderr\n\t\terr = buildCmd.Run()\n\t\tif err != nil {\n\t\t\tif remainingAttempts == 0 {\n\t\t\t\treturn nil, err\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Command failed: %v: %s (retrying %d more times)\", buildCmd.Args, err, remainingAttempts)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tbreak\n\t}\n\n\tfor i := 0; i < RunRetries; i++ {\n\t\tremainingAttempts := RunRetries - i - 1\n\t\trunOptions := append([]string{}, c.RunOptions...)\n\t\tif c.Dir != \"\" {\n\t\t\trunOptions = append(runOptions, \"--workdir=\"+c.Dir)\n\t\t}\n\t\targs := append([]string{\"run\"}, runOptions...)\n\t\targs = append(args, image)\n\t\trunCmd := exec.Command(\"docker\", args...)\n\t\trunCmd.Stderr = os.Stderr\n\n\t\t\/\/ Get original output.\n\t\tdata, err := runCmd.Output()\n\t\tif err != nil {\n\t\t\tif remainingAttempts == 0 {\n\t\t\t\treturn nil, err\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Command failed: %v: %s (retrying %d more times)\", runCmd.Args, err, remainingAttempts)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Transform.\n\t\tif c.Transform != nil {\n\t\t\tdata, err = c.Transform(data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\treturn data, nil\n\t}\n\n\tpanic(\"unreachable\")\n}\n\ntype MockRunner struct {\n\tCmds []*Command\n}\n\nfunc (r *MockRunner) Run(c *Command) ([]byte, error) {\n\tr.Cmds = append(r.Cmds, c)\n\treturn nil, nil\n}\n\nvar _ Runner = &MockRunner{}\n\nfunc cp(src, dst string) error {\n\tcmd := exec.Command(\"cp\", \"-R\", \"--no-dereference\", \"--preserve=mode,ownership,timestamps\", src, dst)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cp %s %s failed: %s: %q\", src, dst, err, out)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage instancepoller\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\tgc \"launchpad.net\/gocheck\"\n\n\t\"launchpad.net\/juju-core\/instance\"\n\t\"launchpad.net\/juju-core\/testing\/testbase\"\n)\n\ntype aggregateSuite struct {\n\ttestbase.LoggingSuite\n}\n\nvar _ = gc.Suite(&aggregateSuite{})\n\ntype testInstance struct {\n\tinstance.Instance\n\taddresses []instance.Address\n\tstatus string\n}\n\nvar _ instance.Instance = (*testInstance)(nil)\n\nfunc (t testInstance) Addresses() ([]instance.Address, error) {\n\treturn t.addresses, nil\n}\n\nfunc (t testInstance) Status() string {\n\treturn t.status\n}\n\ntype testInstanceGetter struct {\n\tids []instance.Id\n\tresults []testInstance\n\terr bool\n}\n\nfunc (i *testInstanceGetter) Instances(ids []instance.Id) (result []instance.Instance, err error) {\n\ti.ids = ids\n\tif i.err {\n\t\treturn nil, fmt.Errorf(\"Some error\")\n\t}\n\tfor _, inst := range i.results {\n\t\tresult = append(result, inst)\n\t}\n\treturn\n}\n\nfunc newTestInstance(status string, addresses []string) *testInstance {\n\tthisInstance := &testInstance{status: status}\n\tfor _, address := range addresses {\n\t\tthisInstance.addresses = append(thisInstance.addresses, instance.NewAddress(address))\n\t}\n\treturn thisInstance\n}\n\nfunc (s *aggregateSuite) TestSingleRequest(c *gc.C) {\n\ttestGetter := new(testInstanceGetter)\n\tinstance1 := newTestInstance(\"foobar\", []string{\"127.0.0.1\", \"192.168.1.1\"})\n\ttestGetter.results = []testInstance{*instance1}\n\taggregator := newAggregator(testGetter)\n\n\treplyChan := make(chan instanceInfoReply)\n\treq := instanceInfoReq{\n\t\treply: replyChan,\n\t\tinstId: instance.Id(\"foo\"),\n\t}\n\taggregator.reqc <- req\n\treply := <-replyChan\n\tc.Assert(reply.err, gc.IsNil)\n\tc.Assert(reply.info, gc.DeepEquals, instanceInfo{\n\t\tstatus: \"foobar\",\n\t\taddresses: instance1.addresses,\n\t})\n\tc.Assert(testGetter.ids, gc.DeepEquals, []instance.Id{instance.Id(\"foo\")})\n}\n\nfunc (s *aggregateSuite) TestRequestBatching(c *gc.C) {\n\ts.PatchValue(&GatherTime, 30*time.Millisecond)\n\ttestGetter := new(testInstanceGetter)\n\n\tinstance1 := newTestInstance(\"foobar\", []string{\"127.0.0.1\", \"192.168.1.1\"})\n\ttestGetter.results = []testInstance{*instance1}\n\taggregator := newAggregator(testGetter)\n\n\treplyChan := make(chan instanceInfoReply)\n\treq := instanceInfoReq{\n\t\treply: replyChan,\n\t\tinstId: instance.Id(\"foo\"),\n\t}\n\taggregator.reqc <- req\n\treply := <-replyChan\n\tc.Assert(reply.err, gc.IsNil)\n\n\tinstance2 := newTestInstance(\"not foobar\", []string{\"192.168.1.2\"})\n\tinstance3 := newTestInstance(\"ok-ish\", []string{\"192.168.1.3\"})\n\n\treplyChan2 := make(chan instanceInfoReply)\n\treplyChan3 := make(chan instanceInfoReply)\n\n\taggregator.reqc <- instanceInfoReq{reply: replyChan2, instId: instance.Id(\"foo2\")}\n\taggregator.reqc <- instanceInfoReq{reply: replyChan3, instId: instance.Id(\"foo3\")}\n\n\ttestGetter.results = []testInstance{*instance2, *instance3}\n\treply2 := <-replyChan2\n\treply3 := <-replyChan3\n\tc.Assert(reply2.err, gc.IsNil)\n\tc.Assert(reply3.err, gc.IsNil)\n\tc.Assert(reply2.info.status, gc.Equals, \"not foobar\")\n\tc.Assert(reply3.info.status, gc.Equals, \"ok-ish\")\n\n\tc.Assert(testGetter.ids, gc.DeepEquals, []instance.Id{instance.Id(\"foo2\"), instance.Id(\"foo3\")})\n}\n<commit_msg>Skeleton test for error<commit_after>\/\/ Copyright 2014 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage instancepoller\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\tgc \"launchpad.net\/gocheck\"\n\n\t\"launchpad.net\/juju-core\/instance\"\n\t\"launchpad.net\/juju-core\/testing\/testbase\"\n)\n\ntype aggregateSuite struct {\n\ttestbase.LoggingSuite\n}\n\nvar _ = gc.Suite(&aggregateSuite{})\n\ntype testInstance struct {\n\tinstance.Instance\n\taddresses []instance.Address\n\tstatus string\n}\n\nvar _ instance.Instance = (*testInstance)(nil)\n\nfunc (t testInstance) Addresses() ([]instance.Address, error) {\n\treturn t.addresses, nil\n}\n\nfunc (t testInstance) Status() string {\n\treturn t.status\n}\n\ntype testInstanceGetter struct {\n\tids []instance.Id\n\tresults []testInstance\n\terr bool\n}\n\nfunc (i *testInstanceGetter) Instances(ids []instance.Id) (result []instance.Instance, err error) {\n\ti.ids = ids\n\tif i.err {\n\t\treturn nil, fmt.Errorf(\"Some error\")\n\t}\n\tfor _, inst := range i.results {\n\t\tresult = append(result, inst)\n\t}\n\treturn\n}\n\nfunc newTestInstance(status string, addresses []string) *testInstance {\n\tthisInstance := &testInstance{status: status}\n\tfor _, address := range addresses {\n\t\tthisInstance.addresses = append(thisInstance.addresses, instance.NewAddress(address))\n\t}\n\treturn thisInstance\n}\n\nfunc (s *aggregateSuite) TestSingleRequest(c *gc.C) {\n\ttestGetter := new(testInstanceGetter)\n\tinstance1 := newTestInstance(\"foobar\", []string{\"127.0.0.1\", \"192.168.1.1\"})\n\ttestGetter.results = []testInstance{*instance1}\n\taggregator := newAggregator(testGetter)\n\n\treplyChan := make(chan instanceInfoReply)\n\treq := instanceInfoReq{\n\t\treply: replyChan,\n\t\tinstId: instance.Id(\"foo\"),\n\t}\n\taggregator.reqc <- req\n\treply := <-replyChan\n\tc.Assert(reply.err, gc.IsNil)\n\tc.Assert(reply.info, gc.DeepEquals, instanceInfo{\n\t\tstatus: \"foobar\",\n\t\taddresses: instance1.addresses,\n\t})\n\tc.Assert(testGetter.ids, gc.DeepEquals, []instance.Id{instance.Id(\"foo\")})\n}\n\nfunc (s *aggregateSuite) TestRequestBatching(c *gc.C) {\n\ts.PatchValue(&GatherTime, 30*time.Millisecond)\n\ttestGetter := new(testInstanceGetter)\n\n\tinstance1 := newTestInstance(\"foobar\", []string{\"127.0.0.1\", \"192.168.1.1\"})\n\ttestGetter.results = []testInstance{*instance1}\n\taggregator := newAggregator(testGetter)\n\n\treplyChan := make(chan instanceInfoReply)\n\treq := instanceInfoReq{\n\t\treply: replyChan,\n\t\tinstId: instance.Id(\"foo\"),\n\t}\n\taggregator.reqc <- req\n\treply := <-replyChan\n\tc.Assert(reply.err, gc.IsNil)\n\n\tinstance2 := newTestInstance(\"not foobar\", []string{\"192.168.1.2\"})\n\tinstance3 := newTestInstance(\"ok-ish\", []string{\"192.168.1.3\"})\n\n\treplyChan2 := make(chan instanceInfoReply)\n\treplyChan3 := make(chan instanceInfoReply)\n\n\taggregator.reqc <- instanceInfoReq{reply: replyChan2, instId: instance.Id(\"foo2\")}\n\taggregator.reqc <- instanceInfoReq{reply: replyChan3, instId: instance.Id(\"foo3\")}\n\n\ttestGetter.results = []testInstance{*instance2, *instance3}\n\treply2 := <-replyChan2\n\treply3 := <-replyChan3\n\tc.Assert(reply2.err, gc.IsNil)\n\tc.Assert(reply3.err, gc.IsNil)\n\tc.Assert(reply2.info.status, gc.Equals, \"not foobar\")\n\tc.Assert(reply3.info.status, gc.Equals, \"ok-ish\")\n\n\tc.Assert(testGetter.ids, gc.DeepEquals, []instance.Id{instance.Id(\"foo2\"), instance.Id(\"foo3\")})\n}\n\nfunc (s *aggregateSuite) TestError(c *gc.C) {\n\ttestGetter := new(testInstanceGetter)\n\n\taggregator := newAggregator(testGetter)\n\n\treplyChan := make(chan instanceInfoReply)\n\treq := instanceInfoReq{\n\t\treply: replyChan,\n\t\tinstId: instance.Id(\"foo\"),\n\t}\n\taggregator.reqc <- req\n\treply := <-replyChan\n\tc.Assert(reply.err, gc.IsNil)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage terminationworker_test\n\nimport (\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\tstdtesting \"testing\"\n\n\tjc \"github.com\/juju\/testing\/checkers\"\n\tgc \"gopkg.in\/check.v1\"\n\n\t\"github.com\/juju\/juju\/testing\"\n\t\"github.com\/juju\/juju\/worker\"\n\t\"github.com\/juju\/juju\/worker\/terminationworker\"\n)\n\nfunc TestPackage(t *stdtesting.T) {\n\tgc.TestingT(t)\n}\n\nvar _ = gc.Suite(&TerminationWorkerSuite{})\n\ntype TerminationWorkerSuite struct {\n\ttesting.BaseSuite\n\t\/\/ c is a channel that will wait for the termination\n\t\/\/ signal, to prevent signals terminating the process.\n\tc chan os.Signal\n}\n\nfunc (s *TerminationWorkerSuite) SetUpTest(c *gc.C) {\n\ts.BaseSuite.SetUpTest(c)\n\ts.c = make(chan os.Signal, 1)\n\tsignal.Notify(s.c, terminationworker.TerminationSignal)\n}\n\nfunc (s *TerminationWorkerSuite) TearDownTest(c *gc.C) {\n\tclose(s.c)\n\tsignal.Stop(s.c)\n\ts.BaseSuite.TearDownTest(c)\n}\n\nfunc (s *TerminationWorkerSuite) TestStartStop(c *gc.C) {\n\tw := terminationworker.NewWorker()\n\tw.Kill()\n\terr := w.Wait()\n\tc.Assert(err, jc.ErrorIsNil)\n}\n\nfunc (s *TerminationWorkerSuite) TestSignal(c *gc.C) {\n\t\/\/TODO(bogdanteleaga): Inspect this further on windows\n\tif runtime.GOOS == \"windows\" {\n\t\tc.Skip(\"bug 1403084: sending this signal is not supported on windows\")\n\t}\n\tw := terminationworker.NewWorker()\n\tproc, err := os.FindProcess(os.Getpid())\n\tc.Assert(err, jc.ErrorIsNil)\n\tdefer proc.Release()\n\terr = proc.Signal(terminationworker.TerminationSignal)\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = w.Wait()\n\tc.Assert(err, gc.Equals, worker.ErrTerminateAgent)\n}\n<commit_msg>worker\/terminationworker: stop listening for signal before closing signal channel<commit_after>\/\/ Copyright 2014 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage terminationworker_test\n\nimport (\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\tstdtesting \"testing\"\n\n\tjc \"github.com\/juju\/testing\/checkers\"\n\tgc \"gopkg.in\/check.v1\"\n\n\t\"github.com\/juju\/juju\/testing\"\n\t\"github.com\/juju\/juju\/worker\"\n\t\"github.com\/juju\/juju\/worker\/terminationworker\"\n)\n\nfunc TestPackage(t *stdtesting.T) {\n\tgc.TestingT(t)\n}\n\nvar _ = gc.Suite(&TerminationWorkerSuite{})\n\ntype TerminationWorkerSuite struct {\n\ttesting.BaseSuite\n\t\/\/ c is a channel that will wait for the termination\n\t\/\/ signal, to prevent signals terminating the process.\n\tc chan os.Signal\n}\n\nfunc (s *TerminationWorkerSuite) SetUpTest(c *gc.C) {\n\ts.BaseSuite.SetUpTest(c)\n\ts.c = make(chan os.Signal, 1)\n\tsignal.Notify(s.c, terminationworker.TerminationSignal)\n}\n\nfunc (s *TerminationWorkerSuite) TearDownTest(c *gc.C) {\n\tsignal.Stop(s.c)\n\tclose(s.c)\n\ts.BaseSuite.TearDownTest(c)\n}\n\nfunc (s *TerminationWorkerSuite) TestStartStop(c *gc.C) {\n\tw := terminationworker.NewWorker()\n\tw.Kill()\n\terr := w.Wait()\n\tc.Assert(err, jc.ErrorIsNil)\n}\n\nfunc (s *TerminationWorkerSuite) TestSignal(c *gc.C) {\n\t\/\/TODO(bogdanteleaga): Inspect this further on windows\n\tif runtime.GOOS == \"windows\" {\n\t\tc.Skip(\"bug 1403084: sending this signal is not supported on windows\")\n\t}\n\tw := terminationworker.NewWorker()\n\tproc, err := os.FindProcess(os.Getpid())\n\tc.Assert(err, jc.ErrorIsNil)\n\tdefer proc.Release()\n\terr = proc.Signal(terminationworker.TerminationSignal)\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = w.Wait()\n\tc.Assert(err, gc.Equals, worker.ErrTerminateAgent)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage operation_test\n\nimport (\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/names\"\n\t\"github.com\/juju\/testing\"\n\tjc \"github.com\/juju\/testing\/checkers\"\n\tgc \"gopkg.in\/check.v1\"\n\n\t\"github.com\/juju\/juju\/worker\/uniter\/operation\"\n)\n\ntype UpdateStorageSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&UpdateStorageSuite{})\n\nfunc (s *UpdateStorageSuite) TestPrepare(c *gc.C) {\n\tfactory := operation.NewFactory(operation.FactoryParams{})\n\top, err := factory.NewUpdateStorage(nil)\n\tc.Assert(err, jc.ErrorIsNil)\n\tstate, err := op.Prepare(operation.State{})\n\tc.Check(err, jc.ErrorIsNil)\n\tc.Check(state, gc.IsNil)\n}\n\nfunc (s *UpdateStorageSuite) TestExecuteError(c *gc.C) {\n\tupdater := &mockStorageUpdater{err: errors.New(\"meep\")}\n\tfactory := operation.NewFactory(operation.FactoryParams{StorageUpdater: updater})\n\n\ttag0 := names.NewStorageTag(\"data\/0\")\n\ttag1 := names.NewStorageTag(\"data\/1\")\n\ttags := []names.StorageTag{tag0, tag1}\n\top, err := factory.NewUpdateStorage(tags)\n\tc.Assert(err, jc.ErrorIsNil)\n\tstate, err := op.Prepare(operation.State{})\n\tc.Check(err, jc.ErrorIsNil)\n\tc.Check(state, gc.IsNil)\n\n\tstate, err = op.Execute(operation.State{})\n\tc.Check(err, gc.ErrorMatches, \"meep\")\n\tc.Check(state, gc.IsNil)\n\tc.Check(updater.tags, jc.DeepEquals, [][]names.StorageTag{tags})\n}\n\nfunc (s *UpdateStorageSuite) TestExecuteSuccess(c *gc.C) {\n\tupdater := &mockStorageUpdater{}\n\tfactory := operation.NewFactory(operation.FactoryParams{StorageUpdater: updater})\n\n\ttag0 := names.NewStorageTag(\"data\/0\")\n\ttag1 := names.NewStorageTag(\"data\/1\")\n\ttags := []names.StorageTag{tag0, tag1}\n\top, err := factory.NewUpdateStorage(tags)\n\tc.Assert(err, jc.ErrorIsNil)\n\tstate, err := op.Prepare(operation.State{})\n\tc.Check(err, jc.ErrorIsNil)\n\tc.Check(state, gc.IsNil)\n\n\tstate, err = op.Execute(operation.State{})\n\tc.Check(err, jc.ErrorIsNil)\n\tc.Check(state, gc.IsNil)\n\tc.Check(updater.tags, jc.DeepEquals, [][]names.StorageTag{tags})\n}\n\nfunc (s *UpdateStorageSuite) TestCommit(c *gc.C) {\n\tfactory := operation.NewFactory(operation.FactoryParams{})\n\top, err := factory.NewUpdateStorage(nil)\n\tc.Assert(err, jc.ErrorIsNil)\n\tstate, err := op.Commit(operation.State{})\n\tc.Check(err, jc.ErrorIsNil)\n\tc.Check(state, gc.IsNil)\n}\n\nfunc (s *UpdateStorageSuite) TestDoesNotNeedGlobalMachineLock(c *gc.C) {\n\tfactory := operation.NewFactory(operation.FactoryParams{})\n\top, err := factory.NewUpdateStorage(nil)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(op.NeedsGlobalMachineLock(), jc.IsFalse)\n}\n\ntype mockStorageUpdater struct {\n\ttags [][]names.StorageTag\n\terr error\n}\n\nfunc (u *mockStorageUpdater) UpdateStorage(tags []names.StorageTag) error {\n\tu.tags = append(u.tags, tags)\n\treturn u.err\n}\n<commit_msg>Delete some more code<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2015 Andrea Masi. All rights reserved.\n\/\/ Use of this source code is governed by MIT license\n\/\/ which that can be found in the LICENSE.txt file.\n\npackage learn\n\nimport (\n\t\"math\"\n\t\"testing\"\n)\n\nfunc loadDataset(t *testing.T) Table {\n\t\/\/ Load all data in memory.\n\ttrainData, err := ReadAllCSV(\"datasets\/linear_test.csv\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn trainData\n}\n\nfunc TestLinearRegression_Predict(t *testing.T) {\n\ttrainData := loadDataset(t)\n\tvar tab MemoryTable = make([][]interface{}, 1)\n\t\/\/ Hand crafted case.\n\ttab[0] = []interface{}{1650.0, 3.0}\n\tlr, err := NewLinearRegression(trainData)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ty, err := lr.Predict(tab)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif e := math.Abs(y[0] - 293081.464335); e > tolerance {\n\t\tt.Fatal(\"prediction over tolerance:\", e)\n\t}\n}\n<commit_msg>Refactor linear regression test<commit_after>\/\/ Copyright (c) 2015 Andrea Masi. All rights reserved.\n\/\/ Use of this source code is governed by MIT license\n\/\/ which that can be found in the LICENSE.txt file.\n\npackage learn\n\nimport (\n\t\"math\"\n\t\"testing\"\n)\n\nfunc loadDataset(t *testing.T) Table {\n\t\/\/ Load all data in memory.\n\ttrainData, err := ReadAllCSV(\"datasets\/linear_test.csv\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn trainData\n}\n\nconst tolerance = 1e-7\n\nfunc TestLinearRegression_Predict(t *testing.T) {\n\ttrainData := loadDataset(t)\n\tvar tab MemoryTable = make([][]interface{}, 1)\n\t\/\/ Hand crafted case.\n\ttab[0] = []interface{}{1650.0, 3.0}\n\tlr, err := NewLinearRegression(trainData)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ty, err := lr.Predict(tab)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif e := math.Abs(y[0] - 293081.464335); e > tolerance {\n\t\tt.Fatal(\"prediction over tolerance:\", e)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package janusgraph\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/creativesoftwarefdn\/weaviate\/database\/schema\"\n\t\"github.com\/creativesoftwarefdn\/weaviate\/database\/schema\/kind\"\n\t\"github.com\/creativesoftwarefdn\/weaviate\/gremlin\"\n\t\"github.com\/creativesoftwarefdn\/weaviate\/models\"\n\t\"github.com\/go-openapi\/strfmt\"\n)\n\n\/\/ map properties in thing.Schema according to the mapping.\ntype edge struct {\n\tPropertyName string\n\tType string\n\tReference string\n\tLocation string\n}\n\ntype edgeFromRefProp struct {\n\tlocalEdges []edge\n\tnetworkEdges []edge\n\tedgesToDrop []string\n}\n\nfunc (j *Janusgraph) updateClass(k kind.Kind, className schema.ClassName, UUID strfmt.UUID, atContext string, lastUpdateTimeUnix int64, rawProperties interface{}) error {\n\tvertexLabel := j.state.getMappedClassName(className)\n\n\tq := gremlin.G.V().\n\t\tHasString(PROP_KIND, k.Name()).\n\t\tHasString(PROP_UUID, UUID.String()).\n\t\tAs(\"class\").\n\t\tStringProperty(PROP_CLASS_ID, string(vertexLabel)).\n\t\tStringProperty(PROP_AT_CONTEXT, atContext).\n\t\tInt64Property(PROP_LAST_UPDATE_TIME_UNIX, lastUpdateTimeUnix)\n\n\tq, err := j.addEdgesToQuery(q, k, className, rawProperties)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = j.client.Execute(q)\n\treturn err\n}\n\nfunc (j *Janusgraph) addEdgesToQuery(q *gremlin.Query, k kind.Kind, className schema.ClassName, rawProperties interface{}) (*gremlin.Query, error) {\n\n\tvar localEdges []edge\n\tvar networkEdges []edge\n\tvar dropTheseEdgeTypes []string\n\n\tproperties, ok := rawProperties.(map[string]interface{})\n\tif !ok {\n\t\t\/\/ nothing to do because we don't have any\n\t\t\/\/ (useable) properties\n\t\treturn q, nil\n\t}\n\n\tfor propName, value := range properties {\n\t\tsanitizedPropertyName := schema.AssertValidPropertyName(propName)\n\t\terr, property := j.schema.GetProperty(k, className, sanitizedPropertyName)\n\t\tif err != nil {\n\t\t\treturn q, err\n\t\t}\n\n\t\tjanusPropertyName := string(\n\t\t\tj.state.getMappedPropertyName(className, sanitizedPropertyName))\n\t\tpropType, err := j.schema.FindPropertyDataType(property.AtDataType)\n\t\tif err != nil {\n\t\t\treturn q, err\n\t\t}\n\n\t\tif propType.IsPrimitive() {\n\t\t\tq, err = addPrimitivePropToQuery(q, propType, value,\n\t\t\t\tjanusPropertyName, sanitizedPropertyName)\n\t\t\tif err != nil {\n\t\t\t\treturn q, err\n\t\t\t}\n\t\t} else {\n\t\t\tresult, err := j.edgesFromReferenceProp(property, value, propType, janusPropertyName, sanitizedPropertyName)\n\t\t\tif err != nil {\n\t\t\t\treturn q, err\n\t\t\t}\n\n\t\t\tlocalEdges = append(localEdges, result.localEdges...)\n\t\t\tnetworkEdges = append(networkEdges, result.networkEdges...)\n\t\t\tdropTheseEdgeTypes = append(dropTheseEdgeTypes, result.edgesToDrop...)\n\t\t}\n\t}\n\n\t\/\/ Now drop all edges of the type we are touching\n\tfor _, edgeLabel := range dropTheseEdgeTypes {\n\t\tq = q.Optional(gremlin.Current().OutEWithLabel(edgeLabel).HasString(PROP_REF_ID, edgeLabel).Drop())\n\t}\n\n\t\/\/ (Re-)Add edges to all local refs\n\tfor _, edge := range localEdges {\n\t\tq = q.AddE(edge.PropertyName).\n\t\t\tFromRef(\"class\").\n\t\t\tToQuery(gremlin.G.V().HasString(PROP_UUID, edge.Reference)).\n\t\t\tStringProperty(PROP_REF_ID, edge.PropertyName).\n\t\t\tStringProperty(PROP_REF_EDGE_CREF, edge.Reference).\n\t\t\tStringProperty(PROP_REF_EDGE_TYPE, edge.Type).\n\t\t\tStringProperty(PROP_REF_EDGE_LOCATION, edge.Location)\n\t}\n\n\t\/\/ (Re-)Add edges to all network refs\n\tfor _, edge := range networkEdges {\n\t\tq = q.AddE(edge.PropertyName).\n\t\t\tFromRef(\"class\").\n\t\t\tToQuery(\n\t\t\t\tgremlin.G.V().HasString(PROP_UUID, edge.Reference).\n\t\t\t\t\tFold().\n\t\t\t\t\tCoalesce(gremlin.RawQuery(\n\t\t\t\t\t\tfmt.Sprintf(\"unfold(), addV().property(\\\"uuid\\\", \\\"%s\\\")\", edge.Reference),\n\t\t\t\t\t)),\n\t\t\t).\n\t\t\tStringProperty(PROP_REF_ID, edge.PropertyName).\n\t\t\tStringProperty(PROP_REF_EDGE_CREF, edge.Reference).\n\t\t\tStringProperty(PROP_REF_EDGE_TYPE, edge.Type).\n\t\t\tStringProperty(PROP_REF_EDGE_LOCATION, edge.Location)\n\t}\n\n\treturn q, nil\n}\n\nfunc addPrimitivePropToQuery(q *gremlin.Query, propType schema.PropertyDataType,\n\tvalue interface{}, janusPropertyName string, sanitizedPropertyName schema.PropertyName,\n) (*gremlin.Query, error) {\n\tswitch propType.AsPrimitive() {\n\tcase schema.DataTypeInt:\n\t\tswitch t := value.(type) {\n\t\tcase int:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase int8:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase int16:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase int32:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase int64:\n\t\t\tq = q.Int64Property(janusPropertyName, t)\n\t\tcase uint:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase uint8:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase uint16:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase uint32:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase uint64:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase float32:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase float64:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tdefault:\n\t\t\treturn q, fmt.Errorf(\"Illegal primitive value for property %s\", sanitizedPropertyName)\n\t\t}\n\tcase schema.DataTypeString:\n\t\tswitch t := value.(type) {\n\t\tcase string:\n\t\t\tq = q.StringProperty(janusPropertyName, t)\n\t\tdefault:\n\t\t\treturn q, fmt.Errorf(\"Illegal primitive value for property %s\", sanitizedPropertyName)\n\t\t}\n\tcase schema.DataTypeText:\n\t\tswitch t := value.(type) {\n\t\tcase string:\n\t\t\tq = q.StringProperty(janusPropertyName, t)\n\t\tdefault:\n\t\t\treturn q, fmt.Errorf(\"Illegal primitive value for property %s\", sanitizedPropertyName)\n\t\t}\n\tcase schema.DataTypeBoolean:\n\t\tswitch t := value.(type) {\n\t\tcase bool:\n\t\t\tq = q.BoolProperty(janusPropertyName, t)\n\t\tdefault:\n\t\t\treturn q, fmt.Errorf(\"Illegal primitive value for property %s\", sanitizedPropertyName)\n\t\t}\n\tcase schema.DataTypeNumber:\n\t\tswitch t := value.(type) {\n\t\tcase float32:\n\t\t\tq = q.Float64Property(janusPropertyName, float64(t))\n\t\tcase float64:\n\t\t\tq = q.Float64Property(janusPropertyName, t)\n\t\tdefault:\n\t\t\treturn q, fmt.Errorf(\"Illegal primitive value for property %s\", sanitizedPropertyName)\n\t\t}\n\tcase schema.DataTypeDate:\n\t\tswitch t := value.(type) {\n\t\tcase time.Time:\n\t\t\tq = q.StringProperty(janusPropertyName, t.Format(time.RFC3339))\n\t\tdefault:\n\t\t\treturn q, fmt.Errorf(\"Illegal primitive value for property %s\", sanitizedPropertyName)\n\t\t}\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Unkown primitive datatype %s\", propType.AsPrimitive()))\n\t}\n\n\treturn q, nil\n}\n\nfunc (j *Janusgraph) edgesFromReferenceProp(property *models.SemanticSchemaClassProperty,\n\tvalue interface{}, propType schema.PropertyDataType, janusPropertyName string, sanitizedPropertyName schema.PropertyName) (edgeFromRefProp, error) {\n\tresult := edgeFromRefProp{}\n\n\tswitch schema.CardinalityOfProperty(property) {\n\tcase schema.CardinalityAtMostOne:\n\t\treturn j.singleRef(value, propType, janusPropertyName, sanitizedPropertyName)\n\tcase schema.CardinalityMany:\n\t\treturn j.multipleRefs(value, propType, janusPropertyName, sanitizedPropertyName)\n\tdefault:\n\t\treturn result, fmt.Errorf(\"Unexpected cardinality %v\",\n\t\t\tschema.CardinalityOfProperty(property))\n\t}\n}\n\nfunc (j *Janusgraph) singleRef(value interface{}, propType schema.PropertyDataType,\n\tjanusPropertyName string, sanitizedPropertyName schema.PropertyName) (edgeFromRefProp, error) {\n\tresult := edgeFromRefProp{}\n\tswitch ref := value.(type) {\n\tcase *models.SingleRef:\n\t\tswitch ref.Type {\n\t\tcase \"NetworkThing\", \"NetworkAction\":\n\t\t\treturn j.singleNetworkRef(ref, janusPropertyName)\n\t\tcase \"Action\", \"Thing\":\n\t\t\treturn j.singleLocalRef(ref, propType, janusPropertyName, sanitizedPropertyName)\n\t\tdefault:\n\t\t\treturn result, fmt.Errorf(\n\t\t\t\t\"illegal value for property %s; only Thing or Action supported\", ref.Type)\n\t\t}\n\n\tdefault:\n\t\treturn result, fmt.Errorf(\"Illegal value for property %s\", sanitizedPropertyName)\n\t}\n}\n\nfunc (j *Janusgraph) singleNetworkRef(ref *models.SingleRef, janusPropertyName string,\n) (edgeFromRefProp, error) {\n\tresult := edgeFromRefProp{}\n\t\/\/ We can't do any business-validation in here (such as does this\n\t\/\/ NetworkThing\/Action really exist on that particular network instance?), as\n\t\/\/ we are in a (local) database connector. Network validations are not our\n\t\/\/ concern. We must trust that a previous layer has verified the correctness.\n\n\tresult.networkEdges = []edge{{\n\t\tPropertyName: janusPropertyName,\n\t\tReference: ref.NrDollarCref.String(),\n\t\tType: ref.Type,\n\t\tLocation: *ref.LocationURL,\n\t}}\n\treturn result, nil\n}\n\nfunc (j *Janusgraph) singleLocalRef(ref *models.SingleRef, propType schema.PropertyDataType,\n\tjanusPropertyName string, sanitizedPropertyName schema.PropertyName) (edgeFromRefProp, error) {\n\tvar refClassName schema.ClassName\n\tresult := edgeFromRefProp{}\n\n\tswitch ref.Type {\n\tcase \"Action\":\n\t\tvar singleRefValue models.ActionGetResponse\n\t\terr := j.GetAction(nil, ref.NrDollarCref, &singleRefValue)\n\t\tif err != nil {\n\t\t\treturn result, fmt.Errorf(\"Illegal value for property %s; could not resolve action with UUID: %v\", ref.NrDollarCref.String(), err)\n\t\t}\n\t\trefClassName = schema.AssertValidClassName(singleRefValue.AtClass)\n\tcase \"Thing\":\n\t\tvar singleRefValue models.ThingGetResponse\n\t\terr := j.GetThing(nil, ref.NrDollarCref, &singleRefValue)\n\t\tif err != nil {\n\t\t\treturn result, fmt.Errorf(\"Illegal value for property %s; could not resolve thing with UUID: %v\", ref.NrDollarCref.String(), err)\n\t\t}\n\t\trefClassName = schema.AssertValidClassName(singleRefValue.AtClass)\n\t}\n\n\t\/\/ Verify the cross reference\n\tif !propType.ContainsClass(refClassName) {\n\t\treturn result, fmt.Errorf(\"Illegal value for property %s; cannot point to %s\", sanitizedPropertyName, ref.Type)\n\t}\n\tresult.localEdges = []edge{{\n\t\tPropertyName: janusPropertyName,\n\t\tReference: ref.NrDollarCref.String(),\n\t\tType: ref.Type,\n\t\tLocation: *ref.LocationURL,\n\t}}\n\n\treturn result, nil\n}\n\nfunc (j *Janusgraph) multipleRefs(value interface{}, propType schema.PropertyDataType,\n\tjanusPropertyName string, sanitizedPropertyName schema.PropertyName) (edgeFromRefProp, error) {\n\tresult := edgeFromRefProp{}\n\tresult.edgesToDrop = []string{janusPropertyName}\n\tswitch t := value.(type) {\n\tcase models.MultipleRef:\n\t\tfor _, ref := range t {\n\t\t\tsingleRef, err := j.singleRef(ref, propType, janusPropertyName, sanitizedPropertyName)\n\t\t\tif err != nil {\n\t\t\t\treturn result, err\n\t\t\t}\n\t\t\tresult.localEdges = append(result.localEdges, singleRef.localEdges...)\n\t\t\tresult.networkEdges = append(result.networkEdges, singleRef.networkEdges...)\n\t\t}\n\t\treturn result, nil\n\tcase []interface{}:\n\t\tfor _, ref := range t {\n\t\t\tref, ok := ref.(*models.SingleRef)\n\t\t\tif !ok {\n\t\t\t\treturn result, fmt.Errorf(\n\t\t\t\t\t\"illegal value for property %s: expected a list of single refs, but current item is %#v\",\n\t\t\t\t\tsanitizedPropertyName, ref)\n\t\t\t}\n\t\t\tsingleRef, err := j.singleRef(ref, propType, janusPropertyName, sanitizedPropertyName)\n\t\t\tif err != nil {\n\t\t\t\treturn result, err\n\t\t\t}\n\t\t\tresult.localEdges = append(result.localEdges, singleRef.localEdges...)\n\t\t\tresult.networkEdges = append(result.networkEdges, singleRef.networkEdges...)\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn result, fmt.Errorf(\"illegal value for property %s, expected *models.MultipleRef, but got %#v\",\n\t\t\tsanitizedPropertyName, value)\n\t}\n}\n<commit_msg>gh-658 fix broken acceptance test for actions<commit_after>package janusgraph\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/creativesoftwarefdn\/weaviate\/database\/schema\"\n\t\"github.com\/creativesoftwarefdn\/weaviate\/database\/schema\/kind\"\n\t\"github.com\/creativesoftwarefdn\/weaviate\/gremlin\"\n\t\"github.com\/creativesoftwarefdn\/weaviate\/models\"\n\t\"github.com\/go-openapi\/strfmt\"\n)\n\n\/\/ map properties in thing.Schema according to the mapping.\ntype edge struct {\n\tPropertyName string\n\tType string\n\tReference string\n\tLocation string\n}\n\ntype edgeFromRefProp struct {\n\tlocalEdges []edge\n\tnetworkEdges []edge\n\tedgesToDrop []string\n}\n\nfunc (j *Janusgraph) updateClass(k kind.Kind, className schema.ClassName, UUID strfmt.UUID, atContext string, lastUpdateTimeUnix int64, rawProperties interface{}) error {\n\tvertexLabel := j.state.getMappedClassName(className)\n\n\tq := gremlin.G.V().\n\t\tHasString(PROP_KIND, k.Name()).\n\t\tHasString(PROP_UUID, UUID.String()).\n\t\tAs(\"class\").\n\t\tStringProperty(PROP_CLASS_ID, string(vertexLabel)).\n\t\tStringProperty(PROP_AT_CONTEXT, atContext).\n\t\tInt64Property(PROP_LAST_UPDATE_TIME_UNIX, lastUpdateTimeUnix)\n\n\tq, err := j.addEdgesToQuery(q, k, className, rawProperties)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = j.client.Execute(q)\n\treturn err\n}\n\nfunc (j *Janusgraph) addEdgesToQuery(q *gremlin.Query, k kind.Kind, className schema.ClassName, rawProperties interface{}) (*gremlin.Query, error) {\n\n\tvar localEdges []edge\n\tvar networkEdges []edge\n\tvar dropTheseEdgeTypes []string\n\n\tproperties, ok := rawProperties.(map[string]interface{})\n\tif !ok {\n\t\t\/\/ nothing to do because we don't have any\n\t\t\/\/ (useable) properties\n\t\treturn q, nil\n\t}\n\n\tfor propName, value := range properties {\n\t\tsanitizedPropertyName := schema.AssertValidPropertyName(propName)\n\t\terr, property := j.schema.GetProperty(k, className, sanitizedPropertyName)\n\t\tif err != nil {\n\t\t\treturn q, err\n\t\t}\n\n\t\tjanusPropertyName := string(\n\t\t\tj.state.getMappedPropertyName(className, sanitizedPropertyName))\n\t\tpropType, err := j.schema.FindPropertyDataType(property.AtDataType)\n\t\tif err != nil {\n\t\t\treturn q, err\n\t\t}\n\n\t\tif propType.IsPrimitive() {\n\t\t\tq, err = addPrimitivePropToQuery(q, propType, value,\n\t\t\t\tjanusPropertyName, sanitizedPropertyName)\n\t\t\tif err != nil {\n\t\t\t\treturn q, err\n\t\t\t}\n\t\t} else {\n\t\t\tresult, err := j.edgesFromReferenceProp(property, value, propType, janusPropertyName, sanitizedPropertyName)\n\t\t\tif err != nil {\n\t\t\t\treturn q, err\n\t\t\t}\n\n\t\t\tlocalEdges = append(localEdges, result.localEdges...)\n\t\t\tnetworkEdges = append(networkEdges, result.networkEdges...)\n\t\t\tdropTheseEdgeTypes = append(dropTheseEdgeTypes, result.edgesToDrop...)\n\t\t}\n\t}\n\n\t\/\/ Now drop all edges of the type we are touching\n\tfor _, edgeLabel := range dropTheseEdgeTypes {\n\t\tq = q.Optional(gremlin.Current().OutEWithLabel(edgeLabel).HasString(PROP_REF_ID, edgeLabel).Drop())\n\t}\n\n\t\/\/ (Re-)Add edges to all local refs\n\tfor _, edge := range localEdges {\n\t\tq = q.AddE(edge.PropertyName).\n\t\t\tFromRef(\"class\").\n\t\t\tToQuery(gremlin.G.V().HasString(PROP_UUID, edge.Reference)).\n\t\t\tStringProperty(PROP_REF_ID, edge.PropertyName).\n\t\t\tStringProperty(PROP_REF_EDGE_CREF, edge.Reference).\n\t\t\tStringProperty(PROP_REF_EDGE_TYPE, edge.Type).\n\t\t\tStringProperty(PROP_REF_EDGE_LOCATION, edge.Location)\n\t}\n\n\t\/\/ (Re-)Add edges to all network refs\n\tfor _, edge := range networkEdges {\n\t\tq = q.AddE(edge.PropertyName).\n\t\t\tFromRef(\"class\").\n\t\t\tToQuery(\n\t\t\t\tgremlin.G.V().HasString(PROP_UUID, edge.Reference).\n\t\t\t\t\tFold().\n\t\t\t\t\tCoalesce(gremlin.RawQuery(\n\t\t\t\t\t\tfmt.Sprintf(\"unfold(), addV().property(\\\"uuid\\\", \\\"%s\\\")\", edge.Reference),\n\t\t\t\t\t)),\n\t\t\t).\n\t\t\tStringProperty(PROP_REF_ID, edge.PropertyName).\n\t\t\tStringProperty(PROP_REF_EDGE_CREF, edge.Reference).\n\t\t\tStringProperty(PROP_REF_EDGE_TYPE, edge.Type).\n\t\t\tStringProperty(PROP_REF_EDGE_LOCATION, edge.Location)\n\t}\n\n\treturn q, nil\n}\n\nfunc addPrimitivePropToQuery(q *gremlin.Query, propType schema.PropertyDataType,\n\tvalue interface{}, janusPropertyName string, sanitizedPropertyName schema.PropertyName,\n) (*gremlin.Query, error) {\n\tswitch propType.AsPrimitive() {\n\tcase schema.DataTypeInt:\n\t\tswitch t := value.(type) {\n\t\tcase int:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase int8:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase int16:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase int32:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase int64:\n\t\t\tq = q.Int64Property(janusPropertyName, t)\n\t\tcase uint:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase uint8:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase uint16:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase uint32:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase uint64:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase float32:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tcase float64:\n\t\t\tq = q.Int64Property(janusPropertyName, int64(t))\n\t\tdefault:\n\t\t\treturn q, fmt.Errorf(\"Illegal primitive value for property %s\", sanitizedPropertyName)\n\t\t}\n\tcase schema.DataTypeString:\n\t\tswitch t := value.(type) {\n\t\tcase string:\n\t\t\tq = q.StringProperty(janusPropertyName, t)\n\t\tdefault:\n\t\t\treturn q, fmt.Errorf(\"Illegal primitive value for property %s\", sanitizedPropertyName)\n\t\t}\n\tcase schema.DataTypeText:\n\t\tswitch t := value.(type) {\n\t\tcase string:\n\t\t\tq = q.StringProperty(janusPropertyName, t)\n\t\tdefault:\n\t\t\treturn q, fmt.Errorf(\"Illegal primitive value for property %s\", sanitizedPropertyName)\n\t\t}\n\tcase schema.DataTypeBoolean:\n\t\tswitch t := value.(type) {\n\t\tcase bool:\n\t\t\tq = q.BoolProperty(janusPropertyName, t)\n\t\tdefault:\n\t\t\treturn q, fmt.Errorf(\"Illegal primitive value for property %s\", sanitizedPropertyName)\n\t\t}\n\tcase schema.DataTypeNumber:\n\t\tswitch t := value.(type) {\n\t\tcase float32:\n\t\t\tq = q.Float64Property(janusPropertyName, float64(t))\n\t\tcase float64:\n\t\t\tq = q.Float64Property(janusPropertyName, t)\n\t\tdefault:\n\t\t\treturn q, fmt.Errorf(\"Illegal primitive value for property %s\", sanitizedPropertyName)\n\t\t}\n\tcase schema.DataTypeDate:\n\t\tswitch t := value.(type) {\n\t\tcase time.Time:\n\t\t\tq = q.StringProperty(janusPropertyName, t.Format(time.RFC3339))\n\t\tdefault:\n\t\t\treturn q, fmt.Errorf(\"Illegal primitive value for property %s\", sanitizedPropertyName)\n\t\t}\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Unkown primitive datatype %s\", propType.AsPrimitive()))\n\t}\n\n\treturn q, nil\n}\n\nfunc (j *Janusgraph) edgesFromReferenceProp(property *models.SemanticSchemaClassProperty,\n\tvalue interface{}, propType schema.PropertyDataType, janusPropertyName string, sanitizedPropertyName schema.PropertyName) (edgeFromRefProp, error) {\n\tresult := edgeFromRefProp{}\n\n\tswitch schema.CardinalityOfProperty(property) {\n\tcase schema.CardinalityAtMostOne:\n\t\treturn j.singleRef(value, propType, janusPropertyName, sanitizedPropertyName)\n\tcase schema.CardinalityMany:\n\t\treturn j.multipleRefs(value, propType, janusPropertyName, sanitizedPropertyName)\n\tdefault:\n\t\treturn result, fmt.Errorf(\"Unexpected cardinality %v\",\n\t\t\tschema.CardinalityOfProperty(property))\n\t}\n}\n\nfunc (j *Janusgraph) singleRef(value interface{}, propType schema.PropertyDataType,\n\tjanusPropertyName string, sanitizedPropertyName schema.PropertyName) (edgeFromRefProp, error) {\n\tresult := edgeFromRefProp{}\n\tswitch ref := value.(type) {\n\tcase *models.SingleRef:\n\t\tswitch ref.Type {\n\t\tcase \"NetworkThing\", \"NetworkAction\":\n\t\t\treturn j.singleNetworkRef(ref, janusPropertyName)\n\t\tcase \"Action\", \"Thing\":\n\t\t\treturn j.singleLocalRef(ref, propType, janusPropertyName, sanitizedPropertyName)\n\t\tdefault:\n\t\t\treturn result, fmt.Errorf(\n\t\t\t\t\"illegal value for property %s; only Thing or Action supported\", ref.Type)\n\t\t}\n\n\tdefault:\n\t\treturn result, fmt.Errorf(\"Illegal value for property %s\", sanitizedPropertyName)\n\t}\n}\n\nfunc (j *Janusgraph) singleNetworkRef(ref *models.SingleRef, janusPropertyName string,\n) (edgeFromRefProp, error) {\n\tresult := edgeFromRefProp{}\n\t\/\/ We can't do any business-validation in here (such as does this\n\t\/\/ NetworkThing\/Action really exist on that particular network instance?), as\n\t\/\/ we are in a (local) database connector. Network validations are not our\n\t\/\/ concern. We must trust that a previous layer has verified the correctness.\n\n\tresult.networkEdges = []edge{{\n\t\tPropertyName: janusPropertyName,\n\t\tReference: ref.NrDollarCref.String(),\n\t\tType: ref.Type,\n\t\tLocation: *ref.LocationURL,\n\t}}\n\treturn result, nil\n}\n\nfunc (j *Janusgraph) singleLocalRef(ref *models.SingleRef, propType schema.PropertyDataType,\n\tjanusPropertyName string, sanitizedPropertyName schema.PropertyName) (edgeFromRefProp, error) {\n\tvar refClassName schema.ClassName\n\tresult := edgeFromRefProp{}\n\n\tswitch ref.Type {\n\tcase \"Action\":\n\t\tvar singleRefValue models.ActionGetResponse\n\t\terr := j.GetAction(nil, ref.NrDollarCref, &singleRefValue)\n\t\tif err != nil {\n\t\t\treturn result, fmt.Errorf(\"Illegal value for property %s; could not resolve action with UUID: %v\", ref.NrDollarCref.String(), err)\n\t\t}\n\t\trefClassName = schema.AssertValidClassName(singleRefValue.AtClass)\n\tcase \"Thing\":\n\t\tvar singleRefValue models.ThingGetResponse\n\t\terr := j.GetThing(nil, ref.NrDollarCref, &singleRefValue)\n\t\tif err != nil {\n\t\t\treturn result, fmt.Errorf(\"Illegal value for property %s; could not resolve thing with UUID: %v\", ref.NrDollarCref.String(), err)\n\t\t}\n\t\trefClassName = schema.AssertValidClassName(singleRefValue.AtClass)\n\t}\n\n\t\/\/ Verify the cross reference\n\tif !propType.ContainsClass(refClassName) {\n\t\treturn result, fmt.Errorf(\"Illegal value for property %s; cannot point to %s\", sanitizedPropertyName, ref.Type)\n\t}\n\tresult.localEdges = []edge{{\n\t\tPropertyName: janusPropertyName,\n\t\tReference: ref.NrDollarCref.String(),\n\t\tType: ref.Type,\n\t\tLocation: *ref.LocationURL,\n\t}}\n\n\treturn result, nil\n}\n\nfunc (j *Janusgraph) multipleRefs(value interface{}, propType schema.PropertyDataType,\n\tjanusPropertyName string, sanitizedPropertyName schema.PropertyName) (edgeFromRefProp, error) {\n\tresult := edgeFromRefProp{}\n\tresult.edgesToDrop = []string{janusPropertyName}\n\tswitch t := value.(type) {\n\tcase models.MultipleRef, *models.MultipleRef:\n\t\trefs := derefMultipleRefsIfNeeded(t)\n\t\tfor _, ref := range refs {\n\t\t\tsingleRef, err := j.singleRef(ref, propType, janusPropertyName, sanitizedPropertyName)\n\t\t\tif err != nil {\n\t\t\t\treturn result, err\n\t\t\t}\n\t\t\tresult.localEdges = append(result.localEdges, singleRef.localEdges...)\n\t\t\tresult.networkEdges = append(result.networkEdges, singleRef.networkEdges...)\n\t\t}\n\t\treturn result, nil\n\tcase []interface{}:\n\t\tfor _, ref := range t {\n\t\t\tref, ok := ref.(*models.SingleRef)\n\t\t\tif !ok {\n\t\t\t\treturn result, fmt.Errorf(\n\t\t\t\t\t\"illegal value for property %s: expected a list of single refs, but current item is %#v\",\n\t\t\t\t\tsanitizedPropertyName, ref)\n\t\t\t}\n\t\t\tsingleRef, err := j.singleRef(ref, propType, janusPropertyName, sanitizedPropertyName)\n\t\t\tif err != nil {\n\t\t\t\treturn result, err\n\t\t\t}\n\t\t\tresult.localEdges = append(result.localEdges, singleRef.localEdges...)\n\t\t\tresult.networkEdges = append(result.networkEdges, singleRef.networkEdges...)\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn result, fmt.Errorf(\"illegal value for property %s, expected *models.MultipleRef, but got %#v\",\n\t\t\tsanitizedPropertyName, value)\n\t}\n}\n\nfunc derefMultipleRefsIfNeeded(t interface{}) models.MultipleRef {\n\tswitch typed := t.(type) {\n\tcase models.MultipleRef:\n\t\t\/\/ during a patch we don't get a pointer type\n\t\treturn typed\n\tcase *models.MultipleRef:\n\t\t\/\/ during a put we get a pointer type\n\t\treturn *typed\n\tdefault:\n\t\t\/\/ impossible to reach since it's only used after previous type assertion\n\t\tpanic(\"neither *models.MultipleRef nor models.MultipleRef received\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package ttlcache\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst (\n\tbenchmarkSize = 1024\n)\n\nvar (\n\tevictPrinter = func(key string, value interface{}) {\n\t\tfmt.Printf(\"evict key %s, value %v\\n\", key, value)\n\t}\n\ttc = NewLRU(benchmarkSize, 1*time.Second, evictPrinter)\n)\n\nfunc TestSet(t *testing.T) {\n\tc := NewLRU(10, 1*time.Second, nil)\n\n\tk := \"key\"\n\tv := \"value\"\n\tcreated := c.Set(k, v, 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\n\t\/\/ check internal structure\n\telem, ok := c.table[k]\n\tif !ok {\n\t\tt.Fatal(\"Set didn't insert value\")\n\t}\n\titem := elem.Value.(*entry)\n\tif item.key != \"key\" || item.value != \"value\" {\n\t\tt.Fatal(\"Bad key value stored\")\n\t}\n}\n\nfunc TestDoubleSet(t *testing.T) {\n\tc := NewLRU(10, 1*time.Second, nil)\n\n\tk := \"key\"\n\tv := \"value\"\n\tcreated := c.Set(k, v, 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\n\tcreated = c.Set(k, v, 0)\n\tif created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\n}\n\nfunc TestSetGet(t *testing.T) {\n\tc := NewLRU(10, 1*time.Hour, nil)\n\n\tk := \"key\"\n\tv := \"value\"\n\tcreated := c.Set(k, v, 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\n\tgot, stale := c.Get(k)\n\tif stale {\n\t\tt.Fatal(\"bad stale value returned\")\n\t}\n\n\tif got != \"value\" {\n\t\tt.Fatal(\"bad value returned\")\n\t}\n}\n\nfunc TestGetStale(t *testing.T) {\n\tc := NewLRU(10, 50*time.Millisecond, nil)\n\n\tk := \"key\"\n\tv := \"value\"\n\tcreated := c.Set(k, v, 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\tgot, stale := c.Get(k)\n\tif !stale {\n\t\tt.Fatal(\"bad stale value returned\")\n\t}\n\n\tif got != \"value\" {\n\t\tt.Fatal(\"bad value returned\")\n\t}\n}\n\nfunc TestGetMiss(t *testing.T) {\n\tc := NewLRU(10, 50*time.Millisecond, nil)\n\n\tk := \"key\"\n\tv := \"value\"\n\tcreated := c.Set(k, v, 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\n\tgot, stale := c.Get(\"badkey\")\n\tif stale {\n\t\tt.Fatal(\"bad stale value returned\")\n\t}\n\n\tif got != nil {\n\t\tt.Fatal(\"bad value returned\")\n\t}\n}\n\nfunc TestSetRemoveGet(t *testing.T) {\n\tc := NewLRU(10, 50*time.Millisecond, nil)\n\n\tk := \"key\"\n\tv := \"value\"\n\tcreated := c.Set(k, v, 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\tfound := c.Remove(\"key\")\n\tif !found {\n\t\tt.Fatal(\"bad delete\")\n\t}\n\n\tgot, stale := c.Get(\"key\")\n\tif stale {\n\t\tt.Fatal(\"bad stale value returned\")\n\t}\n\n\tif got != nil {\n\t\tt.Fatal(\"bad value returned\")\n\t}\n}\n\nfunc TestRemoveNonExistent(t *testing.T) {\n\tc := NewLRU(10, 50*time.Millisecond, nil)\n\n\tk := \"key\"\n\tv := \"value\"\n\tcreated := c.Set(k, v, 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\tfound := c.Remove(\"non-existent\")\n\tif found {\n\t\tt.Fatal(\"bad delete\")\n\t}\n}\n\nfunc TestLRU1(t *testing.T) {\n\tMustEvictKey0 := func(key string, value interface{}) {\n\t\tif key != \"key-0\" {\n\t\t\tt.Fatal(\"LRU failure: not the last element being evicted\", key)\n\t\t}\n\t}\n\n\tnum := 10\n\tc := NewLRU(num, 1*time.Hour, MustEvictKey0)\n\n\tfor i := 0; i < num; i++ {\n\t\tk := fmt.Sprintf(\"key-%d\", i)\n\t\tv := fmt.Sprintf(\"val-%d\", i)\n\t\tcreated := c.Set(k, v, 0)\n\t\tif !created {\n\t\t\tt.Fatal(\"Set returns wrong value\")\n\t\t}\n\t}\n\n\tcreated := c.Set(\"new-key\", \"new-value\", 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\n}\n\nfunc TestLRU2(t *testing.T) {\n\tnum := 10\n\n\tMustEvictKey := func(key string, value interface{}) {\n\t\tlastK := fmt.Sprintf(\"key-%d\", num-1)\n\t\tif key != lastK {\n\t\t\tt.Fatal(\"LRU failure: not the desired element being evicted\", key)\n\t\t}\n\t}\n\n\tc := NewLRU(num, 1*time.Hour, MustEvictKey)\n\n\tfor i := 0; i < num; i++ {\n\t\tk := fmt.Sprintf(\"key-%d\", i)\n\t\tv := fmt.Sprintf(\"val-%d\", i)\n\t\tcreated := c.Set(k, v, 0)\n\t\tif !created {\n\t\t\tt.Fatal(\"Set returns wrong value\")\n\t\t}\n\t}\n\n\t\/\/ reference all cached value except the last one\n\tfor i := 0; i < num-1; i++ {\n\t\tk := fmt.Sprintf(\"key-%d\", i)\n\t\tc.Get(k)\n\t}\n\n\t\/\/ add a new one to trigger eviction\n\tcreated := c.Set(\"new-key\", \"new-value\", 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\n}\n\nfunc TestStaleLRU(t *testing.T) {\n\tMustEvictKey0 := func(key string, value interface{}) {\n\t\tif key != \"key-0\" {\n\t\t\tt.Fatal(\"LRU failure: not the desired element being evicted\", key)\n\t\t}\n\t}\n\n\tnum := 10\n\tc := NewLRU(num, 100*time.Second, MustEvictKey0)\n\n\tfor i := 0; i < num; i++ {\n\t\tk := fmt.Sprintf(\"key-%d\", i)\n\t\tv := fmt.Sprintf(\"val-%d\", i)\n\t\tvar ttl time.Duration\n\t\tif i == 0 {\n\t\t\tttl = 50 * time.Millisecond\n\t\t}\n\t\tcreated := c.Set(k, v, ttl)\n\t\tif !created {\n\t\t\tt.Fatal(\"Set returns wrong value\")\n\t\t}\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\t\/\/ Get a stale cached value will send it to the back of the list\n\t_, stale := c.Get(\"key-0\")\n\tif !stale {\n\t\tt.Fatal(\"bad stale value\")\n\t}\n\n\tcreated := c.Set(\"new-key\", \"new-value\", 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n}\n\nfunc TestLRUTimer(t *testing.T) {\n\tnum := 10\n\tMustEvictKeyNumMinusOne := func(key string, value interface{}) {\n\t\tlastK := fmt.Sprintf(\"key-%d\", num-1)\n\t\tif key != lastK {\n\t\t\tt.Fatal(\"LRU failure: not the desired element being evicted\", key)\n\t\t}\n\t}\n\n\tc := NewLRU(num, 100*time.Second, MustEvictKeyNumMinusOne)\n\n\tfor i := 0; i < num; i++ {\n\t\tk := fmt.Sprintf(\"key-%d\", i)\n\t\tv := fmt.Sprintf(\"val-%d\", i)\n\t\tvar ttl time.Duration\n\t\tif i == num-1 {\n\t\t\tttl = 50 * time.Millisecond\n\t\t}\n\t\tcreated := c.Set(k, v, ttl)\n\t\tif !created {\n\t\t\tt.Fatal(\"Set returns wrong value\")\n\t\t}\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\t\/\/ Without touching the cache, a timer should move \"key-9\" to the end of the list for eviction.\n\n\tcreated := c.Set(\"new-key\", \"new-value\", 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n}\n\nfunc TestNilCache(t *testing.T) {\n\tc := NewLRU(0, 100*time.Second, nil)\n\tif c != nil {\n\t\tt.Fatal(\"0-size cache is created\")\n\t}\n\n\tret := c.Set(\"key\", \"value\", 0)\n\tif ret != false {\n\t\tt.Fatal(\"nil cache is bad\")\n\t}\n\n\tval, stale := c.Get(\"key\")\n\tif val != nil && stale != false {\n\t\tt.Fatal(\"nil cache is bad\")\n\t}\n\n\tremoved := c.Remove(\"key\")\n\tif removed != false {\n\t\tt.Fatal(\"nil cache is bad\")\n\t}\n}\n\n\/\/ TestGetMany simply set many elements.\n\/\/ It aims to catch race condition\nfunc TestSetMany(t *testing.T) {\n\tt.Parallel()\n\tfor i := 0; i < 1024; i++ {\n\t\ttime.Sleep(time.Microsecond)\n\t\tcreated := tc.Set(fmt.Sprint(i), i, 0)\n\t\tif !created {\n\t\t\tt.Fatal(\"Bad set method\")\n\t\t}\n\t}\n\n}\n\n\/\/ TestGetMany simply get elements for many times.\n\/\/ It aims to catch race condition\nfunc TestGetMany(t *testing.T) {\n\tt.Parallel()\n\tfor i := 0; i < 1024; i++ {\n\t\ttime.Sleep(time.Microsecond)\n\t\tvalue, stale := tc.Get(fmt.Sprint(i))\n\t\t\/\/ should not have cached nil\n\t\tif value == nil && stale == true {\n\t\t\tt.Fatal(\"Bad get method\")\n\t\t}\n\t}\n}\n\n\/\/ TestRemoveMany simply remove elements for many times.\n\/\/ It aims to catch race condition\nfunc TestRemoveMany(t *testing.T) {\n\tt.Parallel()\n\tfor i := 0; i < 1024; i++ {\n\t\ttime.Sleep(time.Microsecond)\n\t\t_ = tc.Remove(fmt.Sprint(i))\n\t}\n\n}\n<commit_msg>test tweaks<commit_after>package ttlcache\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst (\n\tbenchmarkSize = 512\n)\n\nvar (\n\tevictPrinter = func(key string, value interface{}) {\n\t\tfmt.Printf(\"evict key %s, value %v\\n\", key, value)\n\t}\n\ttc = NewLRU(benchmarkSize, 1*time.Second, evictPrinter)\n)\n\nfunc TestSet(t *testing.T) {\n\tc := NewLRU(10, 1*time.Second, nil)\n\n\tk := \"key\"\n\tv := \"value\"\n\tcreated := c.Set(k, v, 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\n\t\/\/ check internal structure\n\telem, ok := c.table[k]\n\tif !ok {\n\t\tt.Fatal(\"Set didn't insert value\")\n\t}\n\titem := elem.Value.(*entry)\n\tif item.key != \"key\" || item.value != \"value\" {\n\t\tt.Fatal(\"Bad key value stored\")\n\t}\n}\n\nfunc TestDoubleSet(t *testing.T) {\n\tc := NewLRU(10, 1*time.Second, nil)\n\n\tk := \"key\"\n\tv := \"value\"\n\tcreated := c.Set(k, v, 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\n\tcreated = c.Set(k, v, 0)\n\tif created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\n}\n\nfunc TestSetGet(t *testing.T) {\n\tc := NewLRU(10, 1*time.Hour, nil)\n\n\tk := \"key\"\n\tv := \"value\"\n\tcreated := c.Set(k, v, 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\n\tgot, stale := c.Get(k)\n\tif stale {\n\t\tt.Fatal(\"bad stale value returned\")\n\t}\n\n\tif got != \"value\" {\n\t\tt.Fatal(\"bad value returned\")\n\t}\n}\n\nfunc TestGetStale(t *testing.T) {\n\tc := NewLRU(10, 50*time.Millisecond, nil)\n\n\tk := \"key\"\n\tv := \"value\"\n\tcreated := c.Set(k, v, 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\tgot, stale := c.Get(k)\n\tif !stale {\n\t\tt.Fatal(\"bad stale value returned\")\n\t}\n\n\tif got != \"value\" {\n\t\tt.Fatal(\"bad value returned\")\n\t}\n}\n\nfunc TestGetMiss(t *testing.T) {\n\tc := NewLRU(10, 50*time.Millisecond, nil)\n\n\tk := \"key\"\n\tv := \"value\"\n\tcreated := c.Set(k, v, 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\n\tgot, stale := c.Get(\"badkey\")\n\tif stale {\n\t\tt.Fatal(\"bad stale value returned\")\n\t}\n\n\tif got != nil {\n\t\tt.Fatal(\"bad value returned\")\n\t}\n}\n\nfunc TestSetRemoveGet(t *testing.T) {\n\tc := NewLRU(10, 50*time.Millisecond, nil)\n\n\tk := \"key\"\n\tv := \"value\"\n\tcreated := c.Set(k, v, 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\tfound := c.Remove(\"key\")\n\tif !found {\n\t\tt.Fatal(\"bad delete\")\n\t}\n\n\tgot, stale := c.Get(\"key\")\n\tif stale {\n\t\tt.Fatal(\"bad stale value returned\")\n\t}\n\n\tif got != nil {\n\t\tt.Fatal(\"bad value returned\")\n\t}\n}\n\nfunc TestRemoveNonExistent(t *testing.T) {\n\tc := NewLRU(10, 50*time.Millisecond, nil)\n\n\tk := \"key\"\n\tv := \"value\"\n\tcreated := c.Set(k, v, 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\tfound := c.Remove(\"non-existent\")\n\tif found {\n\t\tt.Fatal(\"bad delete\")\n\t}\n}\n\nfunc TestLRU1(t *testing.T) {\n\tMustEvictKey0 := func(key string, value interface{}) {\n\t\tif key != \"key-0\" {\n\t\t\tt.Fatal(\"LRU failure: not the last element being evicted\", key)\n\t\t}\n\t}\n\n\tnum := 10\n\tc := NewLRU(num, 1*time.Hour, MustEvictKey0)\n\n\tfor i := 0; i < num; i++ {\n\t\tk := fmt.Sprintf(\"key-%d\", i)\n\t\tv := fmt.Sprintf(\"val-%d\", i)\n\t\tcreated := c.Set(k, v, 0)\n\t\tif !created {\n\t\t\tt.Fatal(\"Set returns wrong value\")\n\t\t}\n\t}\n\n\tcreated := c.Set(\"new-key\", \"new-value\", 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\n}\n\nfunc TestLRU2(t *testing.T) {\n\tnum := 10\n\n\tMustEvictKey := func(key string, value interface{}) {\n\t\tlastK := fmt.Sprintf(\"key-%d\", num-1)\n\t\tif key != lastK {\n\t\t\tt.Fatal(\"LRU failure: not the desired element being evicted\", key)\n\t\t}\n\t}\n\n\tc := NewLRU(num, 1*time.Hour, MustEvictKey)\n\n\tfor i := 0; i < num; i++ {\n\t\tk := fmt.Sprintf(\"key-%d\", i)\n\t\tv := fmt.Sprintf(\"val-%d\", i)\n\t\tcreated := c.Set(k, v, 0)\n\t\tif !created {\n\t\t\tt.Fatal(\"Set returns wrong value\")\n\t\t}\n\t}\n\n\t\/\/ reference all cached value except the last one\n\tfor i := 0; i < num-1; i++ {\n\t\tk := fmt.Sprintf(\"key-%d\", i)\n\t\tc.Get(k)\n\t}\n\n\t\/\/ add a new one to trigger eviction\n\tcreated := c.Set(\"new-key\", \"new-value\", 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n\n}\n\nfunc TestStaleLRU(t *testing.T) {\n\tMustEvictKey0 := func(key string, value interface{}) {\n\t\tif key != \"key-0\" {\n\t\t\tt.Fatal(\"LRU failure: not the desired element being evicted\", key)\n\t\t}\n\t}\n\n\tnum := 10\n\tc := NewLRU(num, 100*time.Second, MustEvictKey0)\n\n\tfor i := 0; i < num; i++ {\n\t\tk := fmt.Sprintf(\"key-%d\", i)\n\t\tv := fmt.Sprintf(\"val-%d\", i)\n\t\tvar ttl time.Duration\n\t\tif i == 0 {\n\t\t\tttl = 50 * time.Millisecond\n\t\t}\n\t\tcreated := c.Set(k, v, ttl)\n\t\tif !created {\n\t\t\tt.Fatal(\"Set returns wrong value\")\n\t\t}\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\t\/\/ Get a stale cached value will send it to the back of the list\n\t_, stale := c.Get(\"key-0\")\n\tif !stale {\n\t\tt.Fatal(\"bad stale value\")\n\t}\n\n\tcreated := c.Set(\"new-key\", \"new-value\", 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n}\n\nfunc TestLRUTimer(t *testing.T) {\n\tnum := 10\n\tMustEvictKeyNumMinusOne := func(key string, value interface{}) {\n\t\tlastK := fmt.Sprintf(\"key-%d\", num-1)\n\t\tif key != lastK {\n\t\t\tt.Fatal(\"LRU failure: not the desired element being evicted\", key)\n\t\t}\n\t}\n\n\tc := NewLRU(num, 100*time.Second, MustEvictKeyNumMinusOne)\n\n\tfor i := 0; i < num; i++ {\n\t\tk := fmt.Sprintf(\"key-%d\", i)\n\t\tv := fmt.Sprintf(\"val-%d\", i)\n\t\tvar ttl time.Duration\n\t\tif i == num-1 {\n\t\t\tttl = 50 * time.Millisecond\n\t\t}\n\t\tcreated := c.Set(k, v, ttl)\n\t\tif !created {\n\t\t\tt.Fatal(\"Set returns wrong value\")\n\t\t}\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\t\/\/ Without touching the cache, a timer should move \"key-9\" to the end of the list for eviction.\n\n\tcreated := c.Set(\"new-key\", \"new-value\", 0)\n\tif !created {\n\t\tt.Fatal(\"Set returns wrong value\")\n\t}\n}\n\nfunc TestNilCache(t *testing.T) {\n\tc := NewLRU(0, 100*time.Second, nil)\n\tif c != nil {\n\t\tt.Fatal(\"0-size cache is created\")\n\t}\n\n\tret := c.Set(\"key\", \"value\", 0)\n\tif ret != false {\n\t\tt.Fatal(\"nil cache is bad\")\n\t}\n\n\tval, stale := c.Get(\"key\")\n\tif val != nil && stale != false {\n\t\tt.Fatal(\"nil cache is bad\")\n\t}\n\n\tremoved := c.Remove(\"key\")\n\tif removed != false {\n\t\tt.Fatal(\"nil cache is bad\")\n\t}\n}\n\n\/\/ TestGetMany simply set many elements.\n\/\/ It aims to catch race condition\nfunc TestSetMany(t *testing.T) {\n\tt.Parallel()\n\tfor i := 0; i < 1024; i++ {\n\t\ttime.Sleep(time.Microsecond)\n\t\tcreated := tc.Set(fmt.Sprint(i), i, 0)\n\t\tif !created {\n\t\t\tt.Fatal(\"Bad set method\")\n\t\t}\n\t}\n\n}\n\n\/\/ TestGetMany simply get elements for many times.\n\/\/ It aims to catch race condition\nfunc TestGetMany(t *testing.T) {\n\tt.Parallel()\n\tfor i := 0; i < 1024; i++ {\n\t\ttime.Sleep(time.Microsecond)\n\t\tvalue, stale := tc.Get(fmt.Sprint(i))\n\t\t\/\/ should not have cached nil\n\t\tif value == nil && stale == true {\n\t\t\tt.Fatal(\"Bad get method\")\n\t\t}\n\t}\n}\n\n\/\/ TestRemoveMany simply remove elements for many times.\n\/\/ It aims to catch race condition\nfunc TestRemoveMany(t *testing.T) {\n\tt.Parallel()\n\tfor i := 0; i < 1024; i++ {\n\t\ttime.Sleep(time.Microsecond)\n\t\t_ = tc.Remove(fmt.Sprint(i))\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package ut implements some testing utilities. So far it includes CallTracker, which helps you build\n\/\/ mock implementations of interfaces.\npackage ut\n\nimport (\n\t\"reflect\"\n\t\"sync\"\n\t\"testing\"\n)\n\n\/\/ CallTracker is an interface to help build mocks.\n\/\/\n\/\/ Build the CallTracker interface into your mocks. Use TrackCall within mock methods to track calls to the method\n\/\/ and the parameters used.\n\/\/ Within tests use AddCall to add expected method calls, and SetReturns to indicate what the calls will return.\n\/\/\n\/\/ The tests for this package contain a full example.\n\/\/\n\/\/ type MyMock struct {ut.CallTracker}\n\/\/\n\/\/ func (m *MyMock) AFunction(p int) error {\n\/\/ \tr := m.TrackCall(\"AFunction\", p)\n\/\/ return NilOrError(r[0])\n\/\/ }\n\/\/\n\/\/ func Something(m Functioner) {\n\/\/ m.AFunction(37)\n\/\/ }\n\/\/\n\/\/ func TestSomething(t *testing.T) {\n\/\/ \tm := &MyMock{NewCallRecords(t)}\n\/\/ m.AddCall(\"AFunction\", 37).SetReturns(nil)\n\/\/\n\/\/ Something(m)\n\/\/\n\/\/ m.AssertDone()\n\/\/ }\ntype CallTracker interface {\n\t\/\/ AddCall() is used by tests to add an expected call to the tracker\n\tAddCall(name string, params ...interface{}) CallTracker\n\n\t\/\/ SetReturns() is called immediately after AddCall() to set the return\n\t\/\/ values for the call.\n\tSetReturns(returns ...interface{}) CallTracker\n\n\t\/\/ TrackCall() is called within mocks to track a call to the Mock. It\n\t\/\/ returns the return values registered via SetReturns()\n\tTrackCall(name string, params ...interface{}) []interface{}\n\n\t\/\/ AssertDone() should be called at the end of a test to confirm all\n\t\/\/ the expected calls have been made\n\tAssertDone()\n}\n\ntype callRecord struct {\n\tname string\n\tparams []interface{}\n\treturns []interface{}\n}\n\nfunc (e *callRecord) assert(t testing.TB, name string, params ...interface{}) {\n\tif name != e.name {\n\t\tt.Fatalf(\"Expected call to %s, got call to %s\", e.name, name)\n\t}\n\tif len(params) != len(e.params) {\n\t\tt.Fatalf(\"Call to (%s) expected %d params, got %d (%#v)\", name, len(e.params), len(params), params)\n\t}\n\tfor i, ap := range params {\n\t\tep := e.params[i]\n\n\t\tif ap == nil && ep == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch ep := ep.(type) {\n\t\tcase func(actual interface{}):\n\t\t\tep(ap)\n\t\tdefault:\n\t\t\tif !reflect.DeepEqual(ap, ep) {\n\t\t\t\tt.Fatalf(\"Call to (%s) parameter %d got %#v (%T) does not match expected %#v (%T)\", name, i, ap, ap, ep, ep)\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype callRecords struct {\n\tsync.Mutex\n\tt testing.TB\n\tcalls []callRecord\n\tcurrent int\n}\n\n\/\/ NewCallRecords creates a new call tracker\nfunc NewCallRecords(t testing.TB) CallTracker {\n\treturn &callRecords{\n\t\tt: t,\n\t}\n}\n\nfunc (cr *callRecords) AddCall(name string, params ...interface{}) CallTracker {\n\tcr.calls = append(cr.calls, callRecord{name: name, params: params})\n\treturn cr\n}\n\nfunc (cr *callRecords) SetReturns(returns ...interface{}) CallTracker {\n\tcr.calls[len(cr.calls)-1].returns = returns\n\treturn cr\n}\n\nfunc (cr *callRecords) TrackCall(name string, params ...interface{}) []interface{} {\n\tcr.Lock()\n\tdefer cr.Unlock()\n\tif cr.current >= len(cr.calls) {\n\t\tcr.t.Fatalf(\"Unexpected call to \\\"%s\\\" with parameters %#v\", name, params)\n\t}\n\texpectedCall := cr.calls[cr.current]\n\texpectedCall.assert(cr.t, name, params...)\n\tcr.current += 1\n\treturn expectedCall.returns\n}\n\nfunc (cr *callRecords) AssertDone() {\n\tif cr.current < len(cr.calls) {\n\t\tcr.t.Fatalf(\"Only %d of %d expected calls made\", cr.current, len(cr.calls))\n\t}\n}\n\n\/\/ NilOrError is a utility function for returning err from mocked methods\nfunc NilOrError(val interface{}) error {\n\tif val == nil {\n\t\treturn nil\n\t}\n\treturn val.(error)\n}\n<commit_msg>better error messages<commit_after>\/\/ Package ut implements some testing utilities. So far it includes CallTracker, which helps you build\n\/\/ mock implementations of interfaces.\npackage ut\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"sync\"\n\t\"testing\"\n)\n\n\/\/ CallTracker is an interface to help build mocks.\n\/\/\n\/\/ Build the CallTracker interface into your mocks. Use TrackCall within mock methods to track calls to the method\n\/\/ and the parameters used.\n\/\/ Within tests use AddCall to add expected method calls, and SetReturns to indicate what the calls will return.\n\/\/\n\/\/ The tests for this package contain a full example.\n\/\/\n\/\/ type MyMock struct {ut.CallTracker}\n\/\/\n\/\/ func (m *MyMock) AFunction(p int) error {\n\/\/ \tr := m.TrackCall(\"AFunction\", p)\n\/\/ return NilOrError(r[0])\n\/\/ }\n\/\/\n\/\/ func Something(m Functioner) {\n\/\/ m.AFunction(37)\n\/\/ }\n\/\/\n\/\/ func TestSomething(t *testing.T) {\n\/\/ \tm := &MyMock{NewCallRecords(t)}\n\/\/ m.AddCall(\"AFunction\", 37).SetReturns(nil)\n\/\/\n\/\/ Something(m)\n\/\/\n\/\/ m.AssertDone()\n\/\/ }\ntype CallTracker interface {\n\t\/\/ AddCall() is used by tests to add an expected call to the tracker\n\tAddCall(name string, params ...interface{}) CallTracker\n\n\t\/\/ SetReturns() is called immediately after AddCall() to set the return\n\t\/\/ values for the call.\n\tSetReturns(returns ...interface{}) CallTracker\n\n\t\/\/ TrackCall() is called within mocks to track a call to the Mock. It\n\t\/\/ returns the return values registered via SetReturns()\n\tTrackCall(name string, params ...interface{}) []interface{}\n\n\t\/\/ AssertDone() should be called at the end of a test to confirm all\n\t\/\/ the expected calls have been made\n\tAssertDone()\n}\n\ntype callRecord struct {\n\tname string\n\tparams []interface{}\n\treturns []interface{}\n}\n\nfunc (e *callRecord) assert(t testing.TB, name string, params ...interface{}) {\n\tif name != e.name {\n\t\tt.Logf(\"Expected call to %s%s\", e.name, paramsToString(e.params))\n\t\tt.Logf(\" got call to %s%s\", name, paramsToString(params))\n\t\tshowStack(t)\n\t\tt.Fail()\n\t\treturn\n\t}\n\tif len(params) != len(e.params) {\n\t\tt.Logf(\"Call to (%s) unexpected parameters\", name)\n\t\tt.Logf(\" expected %s\", paramsToString(e.params))\n\t\tt.Logf(\" got %s\", paramsToString(params))\n\t\tshowStack(t)\n\t\tt.FailNow()\n\t\treturn\n\t}\n\tfor i, ap := range params {\n\t\tep := e.params[i]\n\n\t\tif ap == nil && ep == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch ep := ep.(type) {\n\t\tcase func(actual interface{}):\n\t\t\tep(ap)\n\t\tdefault:\n\t\t\tif !reflect.DeepEqual(ap, ep) {\n\t\t\t\tt.Logf(\"Call to %s parameter %d unexpected\", name, i)\n\t\t\t\tt.Logf(\" expected %#v (%T)\", ep, ep)\n\t\t\t\tt.Logf(\" got %#v (%T)\", ap, ap)\n\t\t\t\tshowStack(t)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc showStack(t testing.TB) {\n\tpc := make([]uintptr, 10)\n\tn := runtime.Callers(4, pc)\n\tfor i := 0; i < n; i++ {\n\t\tf := runtime.FuncForPC(pc[i])\n\t\tfile, line := f.FileLine(pc[i])\n\t\tt.Logf(\" %s (%s line %d\", f.Name(), file, line)\n\t}\n}\n\nfunc paramsToString(params []interface{}) string {\n\tw := &bytes.Buffer{}\n\tw.WriteString(\"(\")\n\tl := len(params)\n\tfor i, p := range params {\n\t\tfmt.Fprintf(w, \"%#v\", p)\n\t\tif i < l-1 {\n\t\t\tw.WriteString(\", \")\n\t\t}\n\t}\n\tw.WriteString(\")\")\n\treturn w.String()\n}\n\ntype callRecords struct {\n\tsync.Mutex\n\tt testing.TB\n\tcalls []callRecord\n\tcurrent int\n}\n\n\/\/ NewCallRecords creates a new call tracker\nfunc NewCallRecords(t testing.TB) CallTracker {\n\treturn &callRecords{\n\t\tt: t,\n\t}\n}\n\nfunc (cr *callRecords) AddCall(name string, params ...interface{}) CallTracker {\n\tcr.calls = append(cr.calls, callRecord{name: name, params: params})\n\treturn cr\n}\n\nfunc (cr *callRecords) SetReturns(returns ...interface{}) CallTracker {\n\tcr.calls[len(cr.calls)-1].returns = returns\n\treturn cr\n}\n\nfunc (cr *callRecords) TrackCall(name string, params ...interface{}) []interface{} {\n\tcr.Lock()\n\tdefer cr.Unlock()\n\tif cr.current >= len(cr.calls) {\n\t\tcr.t.Logf(\"Unexpected call to %s%s\", name, paramsToString(params))\n\t\tshowStack(cr.t)\n\t\tcr.t.FailNow()\n\t}\n\texpectedCall := cr.calls[cr.current]\n\texpectedCall.assert(cr.t, name, params...)\n\tcr.current += 1\n\treturn expectedCall.returns\n}\n\nfunc (cr *callRecords) AssertDone() {\n\tif cr.current < len(cr.calls) {\n\t\tcr.t.Fatalf(\"Only %d of %d expected calls made\", cr.current, len(cr.calls))\n\t}\n}\n\n\/\/ NilOrError is a utility function for returning err from mocked methods\nfunc NilOrError(val interface{}) error {\n\tif val == nil {\n\t\treturn nil\n\t}\n\treturn val.(error)\n}\n<|endoftext|>"} {"text":"<commit_before>package capnp\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar ErrNullClient = errors.New(\"capn: call on null client\")\n\n\/\/ A Client represents an interface type.\ntype Client interface {\n\t\/\/ Call starts executing a method and returns an answer that will hold\n\t\/\/ the resulting struct. The call's parameters must be placed before\n\t\/\/ Call() returns.\n\t\/\/\n\t\/\/ Calls are delivered to the capability in the order they are made.\n\t\/\/ This guarantee is based on the concept of a capability\n\t\/\/ acknowledging delivery of a call: this is specific to an\n\t\/\/ implementation of Client. A type that implements Client must\n\t\/\/ guarantee that if foo() then bar() is called on a client, that\n\t\/\/ acknowledging foo() happens before acknowledging bar().\n\tCall(call *Call) Answer\n\n\t\/\/ Close releases any resources associated with this client.\n\t\/\/ No further calls to the client should be made after calling Close.\n\tClose() error\n}\n\n\/\/ The Call type holds the record for an outgoing interface call.\ntype Call struct {\n\t\/\/ Ctx is the context of the call.\n\tCtx context.Context\n\n\t\/\/ Method is the interface ID and method ID, along with the optional name,\n\t\/\/ of the method to call.\n\tMethod Method\n\n\t\/\/ Params is a struct containing parameters for the call.\n\t\/\/ This should be set when the RPC system receives a call for an\n\t\/\/ exported interface. It is mutually exclusive with ParamsFunc\n\t\/\/ and ParamsSize.\n\tParams Struct\n\t\/\/ ParamsFunc is a function that populates an allocated struct with\n\t\/\/ the parameters for the call. ParamsSize determines the size of the\n\t\/\/ struct to allocate. This is used when application code is using a\n\t\/\/ client. These settings should be set together; they are mutually\n\t\/\/ exclusive with Params.\n\tParamsFunc func(Struct)\n\tParamsSize ObjectSize\n\n\t\/\/ Options passes RPC-specific options for the call.\n\tOptions CallOptions\n}\n\n\/\/ CallOptions holds RPC-specific options for an interface call.\n\/\/ Its usage is similar to the values in context.Context, but is only\n\/\/ used for a single call: its values are not intended to propagate to\n\/\/ other callees. An example of an option would be the\n\/\/ Call.sendResultsTo field in rpc.capnp.\ntype CallOptions map[interface{}]interface{}\n\n\/\/ NewCallOptions builds a CallOptions value from a list of individual options.\nfunc NewCallOptions(opts []CallOption) CallOptions {\n\tco := make(CallOptions)\n\tfor _, o := range opts {\n\t\to(co)\n\t}\n\treturn co\n}\n\n\/\/ A CallOption is a function that modifies options on an interface call.\ntype CallOption func(CallOptions)\n\n\/\/ PlaceParams returns the parameters struct, allocating it inside\n\/\/ segment s as necessary. If s is nil, a new segment is allocated.\nfunc (call *Call) PlaceParams(s *Segment) Struct {\n\tif call.ParamsFunc == nil {\n\t\treturn call.Params\n\t}\n\tif s == nil {\n\t\ts = NewBuffer(nil)\n\t}\n\tp := s.NewStruct(call.ParamsSize)\n\tcall.ParamsFunc(p)\n\treturn p\n}\n\n\/\/ An Answer is the deferred result of a client call, which is usually wrapped by a Pipeline.\ntype Answer interface {\n\t\/\/ Struct waits until the call is finished and returns the result.\n\tStruct() (Struct, error)\n\n\t\/\/ The following methods are the same as in Client except with\n\t\/\/ an added transform parameter -- a path to the interface to use.\n\n\tPipelineCall(transform []PipelineOp, call *Call) Answer\n\tPipelineClose(transform []PipelineOp) error\n}\n\n\/\/ A Pipeline is a generic wrapper for an answer.\ntype Pipeline struct {\n\tanswer Answer\n\tparent *Pipeline\n\top PipelineOp\n}\n\n\/\/ NewPipeline returns a new pipeline based on an answer.\nfunc NewPipeline(ans Answer) *Pipeline {\n\treturn &Pipeline{answer: ans}\n}\n\n\/\/ Answer returns the answer the pipeline is derived from.\nfunc (p *Pipeline) Answer() Answer {\n\treturn p.answer\n}\n\n\/\/ Transform returns the operations needed to transform the root answer\n\/\/ into the value p represents.\nfunc (p *Pipeline) Transform() []PipelineOp {\n\tn := 0\n\tfor q := p; q.parent != nil; q = q.parent {\n\t\tn++\n\t}\n\txform := make([]PipelineOp, n)\n\tfor i, q := n-1, p; q.parent != nil; i, q = i-1, q.parent {\n\t\txform[i] = q.op\n\t}\n\treturn xform\n}\n\n\/\/ Struct waits until the answer is resolved and returns the struct\n\/\/ this pipeline represents.\nfunc (p *Pipeline) Struct() (Struct, error) {\n\ts, err := p.answer.Struct()\n\tif err != nil {\n\t\treturn Struct{}, err\n\t}\n\treturn TransformObject(Object(s), p.Transform()).ToStruct(), err\n}\n\n\/\/ Client returns the client version of p.\nfunc (p *Pipeline) Client() *PipelineClient {\n\treturn (*PipelineClient)(p)\n}\n\n\/\/ GetPipeline returns a derived pipeline which yields the pointer field given.\nfunc (p *Pipeline) GetPipeline(off int) *Pipeline {\n\treturn p.GetPipelineDefault(off, nil, 0)\n}\n\n\/\/ GetPipelineDefault returns a derived pipeline which yields the pointer field given,\n\/\/ defaulting to the value given.\nfunc (p *Pipeline) GetPipelineDefault(off int, dseg *Segment, doff int) *Pipeline {\n\treturn &Pipeline{\n\t\tanswer: p.answer,\n\t\tparent: p,\n\t\top: PipelineOp{\n\t\t\tField: off,\n\t\t\tDefaultSegment: dseg,\n\t\t\tDefaultOffset: doff,\n\t\t},\n\t}\n}\n\n\/\/ PipelineClient implements Client by calling to the pipeline's answer.\ntype PipelineClient Pipeline\n\nfunc (pc *PipelineClient) transform() []PipelineOp {\n\treturn (*Pipeline)(pc).Transform()\n}\n\nfunc (pc *PipelineClient) Call(call *Call) Answer {\n\treturn pc.answer.PipelineCall(pc.transform(), call)\n}\n\nfunc (pc *PipelineClient) Close() error {\n\treturn pc.answer.PipelineClose(pc.transform())\n}\n\n\/\/ A PipelineOp describes a step in transforming a pipeline.\n\/\/ It maps closely with the PromisedAnswer.Op struct in rpc.capnp.\ntype PipelineOp struct {\n\tField int\n\tDefaultSegment *Segment\n\tDefaultOffset int\n}\n\n\/\/ String returns a human-readable description of op.\nfunc (op PipelineOp) String() string {\n\ts := make([]byte, 0, 32)\n\ts = append(s, \"get field \"...)\n\ts = strconv.AppendInt(s, int64(op.Field), 10)\n\tif op.DefaultSegment == nil {\n\t\treturn string(s)\n\t}\n\ts = append(s, \" with default\"...)\n\treturn string(s)\n}\n\n\/\/ A Method identifies a method along with an optional human-readable\n\/\/ description of the method.\ntype Method struct {\n\tInterfaceID uint64\n\tMethodID uint16\n\n\t\/\/ Canonical name of the interface. May be empty.\n\tInterfaceName string\n\t\/\/ Method name as it appears in the schema. May be empty.\n\tMethodName string\n}\n\n\/\/ String returns a formatted string containing the interface name or\n\/\/ the method name if present, otherwise it uses the raw IDs.\n\/\/ This is suitable for use in error messages and logs.\nfunc (m *Method) String() string {\n\tbuf := make([]byte, 0, 128)\n\tif m.InterfaceName == \"\" {\n\t\tbuf = append(buf, '@', '0', 'x')\n\t\tbuf = strconv.AppendUint(buf, m.InterfaceID, 16)\n\t} else {\n\t\tbuf = append(buf, m.InterfaceName...)\n\t}\n\tbuf = append(buf, '.')\n\tif m.MethodName == \"\" {\n\t\tbuf = append(buf, '@')\n\t\tbuf = strconv.AppendUint(buf, uint64(m.MethodID), 10)\n\t} else {\n\t\tbuf = append(buf, m.MethodName...)\n\t}\n\treturn string(buf)\n}\n\n\/\/ TransformObject applies a sequence of pipeline operations to an object\n\/\/ and returns the result.\nfunc TransformObject(p Object, transform []PipelineOp) Object {\n\tn := len(transform)\n\tif n == 0 {\n\t\treturn p\n\t}\n\ts := p.ToStruct()\n\tfor _, op := range transform[:n-1] {\n\t\tfield := s.GetObject(op.Field)\n\t\tif op.DefaultSegment == nil {\n\t\t\ts = field.ToStruct()\n\t\t} else {\n\t\t\ts = field.ToStructDefault(op.DefaultSegment, op.DefaultOffset)\n\t\t}\n\t}\n\top := transform[n-1]\n\tp = s.GetObject(op.Field)\n\tif op.DefaultSegment != nil {\n\t\tp = Object(p.ToStructDefault(op.DefaultSegment, op.DefaultOffset))\n\t}\n\treturn p\n}\n\ntype immediateAnswer Object\n\n\/\/ ImmediateAnswer returns an Answer that accesses s.\nfunc ImmediateAnswer(s Object) Answer {\n\treturn immediateAnswer(s)\n}\n\nfunc (ans immediateAnswer) Struct() (Struct, error) {\n\treturn Struct(ans), nil\n}\n\nfunc (ans immediateAnswer) PipelineCall(transform []PipelineOp, call *Call) Answer {\n\tc := TransformObject(Object(ans), transform).ToInterface().Client()\n\tif c == nil {\n\t\treturn ErrorAnswer(ErrNullClient)\n\t}\n\treturn c.Call(call)\n}\n\nfunc (ans immediateAnswer) PipelineClose(transform []PipelineOp) error {\n\tc := TransformObject(Object(ans), transform).ToInterface().Client()\n\tif c == nil {\n\t\treturn ErrNullClient\n\t}\n\treturn c.Close()\n}\n\ntype errorAnswer struct {\n\te error\n}\n\n\/\/ ErrorAnswer returns a Answer that always returns error e.\nfunc ErrorAnswer(e error) Answer {\n\treturn errorAnswer{e}\n}\n\nfunc (ans errorAnswer) Struct() (Struct, error) {\n\treturn Struct{}, ans.e\n}\n\nfunc (ans errorAnswer) PipelineCall([]PipelineOp, *Call) Answer {\n\treturn ans\n}\n\nfunc (ans errorAnswer) PipelineClose([]PipelineOp) error {\n\treturn ans.e\n}\n\ntype errorClient struct {\n\te error\n}\n\n\/\/ ErrorClient returns a Client that always returns error e.\nfunc ErrorClient(e error) Client {\n\treturn errorClient{e}\n}\n\nfunc (ec errorClient) Call(*Call) Answer {\n\treturn ErrorAnswer(ec.e)\n}\n\nfunc (ec errorClient) Close() error {\n\treturn nil\n}\n<commit_msg>make CallOption(s) opaque types<commit_after>package capnp\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar ErrNullClient = errors.New(\"capn: call on null client\")\n\n\/\/ A Client represents an interface type.\ntype Client interface {\n\t\/\/ Call starts executing a method and returns an answer that will hold\n\t\/\/ the resulting struct. The call's parameters must be placed before\n\t\/\/ Call() returns.\n\t\/\/\n\t\/\/ Calls are delivered to the capability in the order they are made.\n\t\/\/ This guarantee is based on the concept of a capability\n\t\/\/ acknowledging delivery of a call: this is specific to an\n\t\/\/ implementation of Client. A type that implements Client must\n\t\/\/ guarantee that if foo() then bar() is called on a client, that\n\t\/\/ acknowledging foo() happens before acknowledging bar().\n\tCall(call *Call) Answer\n\n\t\/\/ Close releases any resources associated with this client.\n\t\/\/ No further calls to the client should be made after calling Close.\n\tClose() error\n}\n\n\/\/ The Call type holds the record for an outgoing interface call.\ntype Call struct {\n\t\/\/ Ctx is the context of the call.\n\tCtx context.Context\n\n\t\/\/ Method is the interface ID and method ID, along with the optional name,\n\t\/\/ of the method to call.\n\tMethod Method\n\n\t\/\/ Params is a struct containing parameters for the call.\n\t\/\/ This should be set when the RPC system receives a call for an\n\t\/\/ exported interface. It is mutually exclusive with ParamsFunc\n\t\/\/ and ParamsSize.\n\tParams Struct\n\t\/\/ ParamsFunc is a function that populates an allocated struct with\n\t\/\/ the parameters for the call. ParamsSize determines the size of the\n\t\/\/ struct to allocate. This is used when application code is using a\n\t\/\/ client. These settings should be set together; they are mutually\n\t\/\/ exclusive with Params.\n\tParamsFunc func(Struct)\n\tParamsSize ObjectSize\n\n\t\/\/ Options passes RPC-specific options for the call.\n\tOptions CallOptions\n}\n\n\/\/ PlaceParams returns the parameters struct, allocating it inside\n\/\/ segment s as necessary. If s is nil, a new segment is allocated.\nfunc (call *Call) PlaceParams(s *Segment) Struct {\n\tif call.ParamsFunc == nil {\n\t\treturn call.Params\n\t}\n\tif s == nil {\n\t\ts = NewBuffer(nil)\n\t}\n\tp := s.NewStruct(call.ParamsSize)\n\tcall.ParamsFunc(p)\n\treturn p\n}\n\n\/\/ CallOptions holds RPC-specific options for an interface call.\n\/\/ Its usage is similar to the values in context.Context, but is only\n\/\/ used for a single call: its values are not intended to propagate to\n\/\/ other callees. An example of an option would be the\n\/\/ Call.sendResultsTo field in rpc.capnp.\ntype CallOptions struct {\n\tm map[interface{}]interface{}\n}\n\n\/\/ NewCallOptions builds a CallOptions value from a list of individual options.\nfunc NewCallOptions(opts []CallOption) CallOptions {\n\tco := CallOptions{make(map[interface{}]interface{})}\n\tfor _, o := range opts {\n\t\to.f(co)\n\t}\n\treturn co\n}\n\n\/\/ Value retrieves the value associated with the options for this key,\n\/\/ or nil if no value is associated with this key.\nfunc (co CallOptions) Value(key interface{}) interface{} {\n\treturn co.m[key]\n}\n\n\/\/ With creates a copy of the CallOptions value with other options applied.\nfunc (co CallOptions) With(opts []CallOption) CallOptions {\n\tnewopts := CallOptions{make(map[interface{}]interface{})}\n\tfor k, v := range co.m {\n\t\tnewopts.m[k] = v\n\t}\n\tfor _, o := range opts {\n\t\to.f(newopts)\n\t}\n\treturn newopts\n}\n\n\/\/ A CallOption is a function that modifies options on an interface call.\ntype CallOption struct {\n\tf func(CallOptions)\n}\n\n\/\/ SetOptionValue returns a call option that associates a value to an\n\/\/ option key. This can be retrieved later with CallOptions.Value.\nfunc SetOptionValue(key, value interface{}) CallOption {\n\treturn CallOption{func(co CallOptions) {\n\t\tco.m[key] = value\n\t}}\n}\n\n\/\/ An Answer is the deferred result of a client call, which is usually wrapped by a Pipeline.\ntype Answer interface {\n\t\/\/ Struct waits until the call is finished and returns the result.\n\tStruct() (Struct, error)\n\n\t\/\/ The following methods are the same as in Client except with\n\t\/\/ an added transform parameter -- a path to the interface to use.\n\n\tPipelineCall(transform []PipelineOp, call *Call) Answer\n\tPipelineClose(transform []PipelineOp) error\n}\n\n\/\/ A Pipeline is a generic wrapper for an answer.\ntype Pipeline struct {\n\tanswer Answer\n\tparent *Pipeline\n\top PipelineOp\n}\n\n\/\/ NewPipeline returns a new pipeline based on an answer.\nfunc NewPipeline(ans Answer) *Pipeline {\n\treturn &Pipeline{answer: ans}\n}\n\n\/\/ Answer returns the answer the pipeline is derived from.\nfunc (p *Pipeline) Answer() Answer {\n\treturn p.answer\n}\n\n\/\/ Transform returns the operations needed to transform the root answer\n\/\/ into the value p represents.\nfunc (p *Pipeline) Transform() []PipelineOp {\n\tn := 0\n\tfor q := p; q.parent != nil; q = q.parent {\n\t\tn++\n\t}\n\txform := make([]PipelineOp, n)\n\tfor i, q := n-1, p; q.parent != nil; i, q = i-1, q.parent {\n\t\txform[i] = q.op\n\t}\n\treturn xform\n}\n\n\/\/ Struct waits until the answer is resolved and returns the struct\n\/\/ this pipeline represents.\nfunc (p *Pipeline) Struct() (Struct, error) {\n\ts, err := p.answer.Struct()\n\tif err != nil {\n\t\treturn Struct{}, err\n\t}\n\treturn TransformObject(Object(s), p.Transform()).ToStruct(), err\n}\n\n\/\/ Client returns the client version of p.\nfunc (p *Pipeline) Client() *PipelineClient {\n\treturn (*PipelineClient)(p)\n}\n\n\/\/ GetPipeline returns a derived pipeline which yields the pointer field given.\nfunc (p *Pipeline) GetPipeline(off int) *Pipeline {\n\treturn p.GetPipelineDefault(off, nil, 0)\n}\n\n\/\/ GetPipelineDefault returns a derived pipeline which yields the pointer field given,\n\/\/ defaulting to the value given.\nfunc (p *Pipeline) GetPipelineDefault(off int, dseg *Segment, doff int) *Pipeline {\n\treturn &Pipeline{\n\t\tanswer: p.answer,\n\t\tparent: p,\n\t\top: PipelineOp{\n\t\t\tField: off,\n\t\t\tDefaultSegment: dseg,\n\t\t\tDefaultOffset: doff,\n\t\t},\n\t}\n}\n\n\/\/ PipelineClient implements Client by calling to the pipeline's answer.\ntype PipelineClient Pipeline\n\nfunc (pc *PipelineClient) transform() []PipelineOp {\n\treturn (*Pipeline)(pc).Transform()\n}\n\nfunc (pc *PipelineClient) Call(call *Call) Answer {\n\treturn pc.answer.PipelineCall(pc.transform(), call)\n}\n\nfunc (pc *PipelineClient) Close() error {\n\treturn pc.answer.PipelineClose(pc.transform())\n}\n\n\/\/ A PipelineOp describes a step in transforming a pipeline.\n\/\/ It maps closely with the PromisedAnswer.Op struct in rpc.capnp.\ntype PipelineOp struct {\n\tField int\n\tDefaultSegment *Segment\n\tDefaultOffset int\n}\n\n\/\/ String returns a human-readable description of op.\nfunc (op PipelineOp) String() string {\n\ts := make([]byte, 0, 32)\n\ts = append(s, \"get field \"...)\n\ts = strconv.AppendInt(s, int64(op.Field), 10)\n\tif op.DefaultSegment == nil {\n\t\treturn string(s)\n\t}\n\ts = append(s, \" with default\"...)\n\treturn string(s)\n}\n\n\/\/ A Method identifies a method along with an optional human-readable\n\/\/ description of the method.\ntype Method struct {\n\tInterfaceID uint64\n\tMethodID uint16\n\n\t\/\/ Canonical name of the interface. May be empty.\n\tInterfaceName string\n\t\/\/ Method name as it appears in the schema. May be empty.\n\tMethodName string\n}\n\n\/\/ String returns a formatted string containing the interface name or\n\/\/ the method name if present, otherwise it uses the raw IDs.\n\/\/ This is suitable for use in error messages and logs.\nfunc (m *Method) String() string {\n\tbuf := make([]byte, 0, 128)\n\tif m.InterfaceName == \"\" {\n\t\tbuf = append(buf, '@', '0', 'x')\n\t\tbuf = strconv.AppendUint(buf, m.InterfaceID, 16)\n\t} else {\n\t\tbuf = append(buf, m.InterfaceName...)\n\t}\n\tbuf = append(buf, '.')\n\tif m.MethodName == \"\" {\n\t\tbuf = append(buf, '@')\n\t\tbuf = strconv.AppendUint(buf, uint64(m.MethodID), 10)\n\t} else {\n\t\tbuf = append(buf, m.MethodName...)\n\t}\n\treturn string(buf)\n}\n\n\/\/ TransformObject applies a sequence of pipeline operations to an object\n\/\/ and returns the result.\nfunc TransformObject(p Object, transform []PipelineOp) Object {\n\tn := len(transform)\n\tif n == 0 {\n\t\treturn p\n\t}\n\ts := p.ToStruct()\n\tfor _, op := range transform[:n-1] {\n\t\tfield := s.GetObject(op.Field)\n\t\tif op.DefaultSegment == nil {\n\t\t\ts = field.ToStruct()\n\t\t} else {\n\t\t\ts = field.ToStructDefault(op.DefaultSegment, op.DefaultOffset)\n\t\t}\n\t}\n\top := transform[n-1]\n\tp = s.GetObject(op.Field)\n\tif op.DefaultSegment != nil {\n\t\tp = Object(p.ToStructDefault(op.DefaultSegment, op.DefaultOffset))\n\t}\n\treturn p\n}\n\ntype immediateAnswer Object\n\n\/\/ ImmediateAnswer returns an Answer that accesses s.\nfunc ImmediateAnswer(s Object) Answer {\n\treturn immediateAnswer(s)\n}\n\nfunc (ans immediateAnswer) Struct() (Struct, error) {\n\treturn Struct(ans), nil\n}\n\nfunc (ans immediateAnswer) PipelineCall(transform []PipelineOp, call *Call) Answer {\n\tc := TransformObject(Object(ans), transform).ToInterface().Client()\n\tif c == nil {\n\t\treturn ErrorAnswer(ErrNullClient)\n\t}\n\treturn c.Call(call)\n}\n\nfunc (ans immediateAnswer) PipelineClose(transform []PipelineOp) error {\n\tc := TransformObject(Object(ans), transform).ToInterface().Client()\n\tif c == nil {\n\t\treturn ErrNullClient\n\t}\n\treturn c.Close()\n}\n\ntype errorAnswer struct {\n\te error\n}\n\n\/\/ ErrorAnswer returns a Answer that always returns error e.\nfunc ErrorAnswer(e error) Answer {\n\treturn errorAnswer{e}\n}\n\nfunc (ans errorAnswer) Struct() (Struct, error) {\n\treturn Struct{}, ans.e\n}\n\nfunc (ans errorAnswer) PipelineCall([]PipelineOp, *Call) Answer {\n\treturn ans\n}\n\nfunc (ans errorAnswer) PipelineClose([]PipelineOp) error {\n\treturn ans.e\n}\n\ntype errorClient struct {\n\te error\n}\n\n\/\/ ErrorClient returns a Client that always returns error e.\nfunc ErrorClient(e error) Client {\n\treturn errorClient{e}\n}\n\nfunc (ec errorClient) Call(*Call) Answer {\n\treturn ErrorAnswer(ec.e)\n}\n\nfunc (ec errorClient) Close() error {\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package coremain\n\nconst (\n\tcoreName = \"CoreDNS\"\n\tcoreVersion = \"1.0.1\"\n\n\tserverType = \"dns\"\n)\n<commit_msg>Release 1.0.2<commit_after>package coremain\n\nconst (\n\tcoreName = \"CoreDNS\"\n\tcoreVersion = \"1.0.2\"\n\n\tserverType = \"dns\"\n)\n<|endoftext|>"} {"text":"<commit_before>package coremain\n\nconst (\n\tcoreName = \"CoreDNS\"\n\tcoreVersion = \"010\"\n\n\tserverType = \"dns\"\n)\n<commit_msg>Release 011<commit_after>package coremain\n\nconst (\n\tcoreName = \"CoreDNS\"\n\tcoreVersion = \"011\"\n\n\tserverType = \"dns\"\n)\n<|endoftext|>"} {"text":"<commit_before>package nacl\n\nimport (\n\t\"bytes\"\n\t\"crypto\/rand\"\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\n\t\"golang.org\/x\/crypto\/nacl\/secretbox\"\n\n\t\"github.com\/lucas-clemente\/git-cr\/git\"\n)\n\ntype naclRepo struct {\n\t\/\/ repo is not embedded to prevent acidentally leaking info if new methods are added to git.Repo\n\trepo git.Repo\n\tkey [32]byte\n}\n\n\/\/ NewNaClRepo returns a git.Repo implementation that encrypts data using nacl\nfunc NewNaClRepo(backend git.Repo, key [32]byte) (git.Repo, error) {\n\treturn &naclRepo{\n\t\trepo: backend,\n\t\tkey: key,\n\t}, nil\n}\n\nfunc (r *naclRepo) FindDelta(from, to string) (git.Delta, error) {\n\treturn r.repo.FindDelta(from, to)\n}\n\nfunc (r *naclRepo) ListAncestors(target string) ([]string, error) {\n\treturn r.repo.ListAncestors(target)\n}\n\nfunc (r *naclRepo) ReadRefs() (io.ReadCloser, error) {\n\tbackendReader, err := r.repo.ReadRefs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn decrypt(backendReader, &r.key)\n}\n\nfunc (r *naclRepo) WriteRefs(rdr io.Reader) error {\n\tencryptedRdr, err := encrypt(rdr, &r.key)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.repo.WriteRefs(encryptedRdr)\n}\n\nfunc (r *naclRepo) ReadPackfile(d git.Delta) (io.ReadCloser, error) {\n\tbackendReader, err := r.repo.ReadPackfile(d)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn decrypt(backendReader, &r.key)\n}\n\nfunc (r *naclRepo) WritePackfile(from, to string, rdr io.Reader) error {\n\tencryptedRdr, err := encrypt(rdr, &r.key)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.repo.WritePackfile(from, to, encryptedRdr)\n}\n\nfunc encrypt(in io.Reader, key *[32]byte) (io.Reader, error) {\n\tdata, err := ioutil.ReadAll(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnonce := makeNonce()\n\tout := secretbox.Seal(nonce[:], data, nonce, key)\n\treturn bytes.NewBuffer(out), nil\n}\n\nfunc decrypt(in io.ReadCloser, key *[32]byte) (io.ReadCloser, error) {\n\tdefer in.Close()\n\n\tdata, err := ioutil.ReadAll(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(data) < 24 {\n\t\treturn nil, errors.New(\"error in encrypted message\")\n\t}\n\tvar nonce [24]byte\n\tcopy(nonce[:], data)\n\tdata = data[24:]\n\n\tout, ok := secretbox.Open([]byte{}, data, &nonce, key)\n\tif !ok {\n\t\treturn nil, errors.New(\"error verifying encrypted data while reading refs\")\n\t}\n\treturn ioutil.NopCloser(bytes.NewBuffer(out)), nil\n}\n\nfunc makeNonce() *[24]byte {\n\tvar nonce [24]byte\n\t_, err := rand.Read(nonce[:])\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &nonce\n}\n<commit_msg>improve error messages<commit_after>package nacl\n\nimport (\n\t\"bytes\"\n\t\"crypto\/rand\"\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\n\t\"golang.org\/x\/crypto\/nacl\/secretbox\"\n\n\t\"github.com\/lucas-clemente\/git-cr\/git\"\n)\n\ntype naclRepo struct {\n\t\/\/ repo is not embedded to prevent acidentally leaking info if new methods are added to git.Repo\n\trepo git.Repo\n\tkey [32]byte\n}\n\n\/\/ NewNaClRepo returns a git.Repo implementation that encrypts data using nacl\nfunc NewNaClRepo(backend git.Repo, key [32]byte) (git.Repo, error) {\n\treturn &naclRepo{\n\t\trepo: backend,\n\t\tkey: key,\n\t}, nil\n}\n\nfunc (r *naclRepo) FindDelta(from, to string) (git.Delta, error) {\n\treturn r.repo.FindDelta(from, to)\n}\n\nfunc (r *naclRepo) ListAncestors(target string) ([]string, error) {\n\treturn r.repo.ListAncestors(target)\n}\n\nfunc (r *naclRepo) ReadRefs() (io.ReadCloser, error) {\n\tbackendReader, err := r.repo.ReadRefs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn decrypt(backendReader, &r.key)\n}\n\nfunc (r *naclRepo) WriteRefs(rdr io.Reader) error {\n\tencryptedRdr, err := encrypt(rdr, &r.key)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.repo.WriteRefs(encryptedRdr)\n}\n\nfunc (r *naclRepo) ReadPackfile(d git.Delta) (io.ReadCloser, error) {\n\tbackendReader, err := r.repo.ReadPackfile(d)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn decrypt(backendReader, &r.key)\n}\n\nfunc (r *naclRepo) WritePackfile(from, to string, rdr io.Reader) error {\n\tencryptedRdr, err := encrypt(rdr, &r.key)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.repo.WritePackfile(from, to, encryptedRdr)\n}\n\nfunc encrypt(in io.Reader, key *[32]byte) (io.Reader, error) {\n\tdata, err := ioutil.ReadAll(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnonce := makeNonce()\n\tout := secretbox.Seal(nonce[:], data, nonce, key)\n\treturn bytes.NewBuffer(out), nil\n}\n\nfunc decrypt(in io.ReadCloser, key *[32]byte) (io.ReadCloser, error) {\n\tdefer in.Close()\n\n\tdata, err := ioutil.ReadAll(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(data) < 24 {\n\t\treturn nil, errors.New(\"encrypted message is too short\")\n\t}\n\tvar nonce [24]byte\n\tcopy(nonce[:], data)\n\tdata = data[24:]\n\n\tout, ok := secretbox.Open([]byte{}, data, &nonce, key)\n\tif !ok {\n\t\treturn nil, errors.New(\"error verifying encrypted data\")\n\t}\n\treturn ioutil.NopCloser(bytes.NewBuffer(out)), nil\n}\n\nfunc makeNonce() *[24]byte {\n\tvar nonce [24]byte\n\t_, err := rand.Read(nonce[:])\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &nonce\n}\n<|endoftext|>"} {"text":"<commit_before>package crypto\n\nimport (\n\t\"testing\"\n)\n\n\/\/ TestRandIntnPanics tests that RandIntn panics if n <= 0.\nfunc TestRandIntnPanics(t *testing.T) {\n\tdefer func() {\n\t\tif r := recover(); r == nil {\n\t\t\tt.Error(\"expected panic for n <= 0\")\n\t\t}\n\t}()\n\tRandIntn(0)\n\tRandIntn(-1)\n}\n<commit_msg>Add error checks to TestRandIntnPanics<commit_after>package crypto\n\nimport (\n\t\"testing\"\n)\n\n\/\/ TestRandIntnPanics tests that RandIntn panics if n <= 0.\nfunc TestRandIntnPanics(t *testing.T) {\n\tdefer func() {\n\t\tif r := recover(); r == nil {\n\t\t\tt.Error(\"expected panic for n <= 0\")\n\t\t}\n\t}()\n\n\t_, err := RandIntn(0)\n\tif err != nil {\n\t\tt.Error(\"expected panic on n <= 0, not error\")\n\t}\n\n\t_, err = RandIntn(-1)\n\tif err != nil {\n\t\tt.Error(\"expected panic on n <= 0, not error\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package errors\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n)\n\n\/\/ Frame represents a program counter inside a stack frame.\ntype Frame uintptr\n\n\/\/ pc returns the program counter for this frame;\n\/\/ multiple frames may have the same PC value.\nfunc (f Frame) pc() uintptr { return uintptr(f) - 1 }\n\n\/\/ file returns the full path to the file that contains the\n\/\/ function for this Frame's pc.\nfunc (f Frame) file() string {\n\tfn := runtime.FuncForPC(f.pc())\n\tif fn == nil {\n\t\treturn \"unknown\"\n\t}\n\tfile, _ := fn.FileLine(f.pc())\n\treturn file\n}\n\n\/\/ line returns the line number of source code of the\n\/\/ function for this Frame's pc.\nfunc (f Frame) line() int {\n\tfn := runtime.FuncForPC(f.pc())\n\tif fn == nil {\n\t\treturn 0\n\t}\n\t_, line := fn.FileLine(f.pc())\n\treturn line\n}\n\n\/\/ Format formats the frame according to the fmt.Formatter interface.\n\/\/\n\/\/ %s source file\n\/\/ %d source line\n\/\/ %n function name\n\/\/ %v equivalent to %s:%d\n\/\/\n\/\/ Format accepts flags that alter the printing of some verbs, as follows:\n\/\/\n\/\/ %+s path of source file relative to the compile time GOPATH\n\/\/ %+v equivalent to %+s:%d\nfunc (f Frame) Format(s fmt.State, verb rune) {\n\tswitch verb {\n\tcase 's':\n\t\tswitch {\n\t\tcase s.Flag('+'):\n\t\t\tpc := f.pc()\n\t\t\tfn := runtime.FuncForPC(pc)\n\t\t\tif fn == nil {\n\t\t\t\tio.WriteString(s, \"unknown\")\n\t\t\t} else {\n\t\t\t\tfile, _ := fn.FileLine(pc)\n\t\t\t\tfmt.Fprintf(s, \"%s\\n\\t%s\", fn.Name(), file)\n\t\t\t}\n\t\tdefault:\n\t\t\tio.WriteString(s, path.Base(f.file()))\n\t\t}\n\tcase 'd':\n\t\tfmt.Fprintf(s, \"%d\", f.line())\n\tcase 'n':\n\t\tname := runtime.FuncForPC(f.pc()).Name()\n\t\tio.WriteString(s, funcname(name))\n\tcase 'v':\n\t\tf.Format(s, 's')\n\t\tio.WriteString(s, \":\")\n\t\tf.Format(s, 'd')\n\t}\n}\n\n\/\/ StackTrace is stack of Frames from innermost (newest) to outermost (oldest).\ntype StackTrace []Frame\n\n\/\/ Format formats the stack of Frames according to the fmt.Formatter interface.\n\/\/\n\/\/ %s\tlists source files for each Frame in the stack\n\/\/ %v\tlists the source file and line number for each Frame in the stack\n\/\/\n\/\/ Format accepts flags that alter the printing of some verbs, as follows:\n\/\/\n\/\/ %+v Prints filename, function, and line number for each Frame in the stack.\nfunc (st StackTrace) Format(s fmt.State, verb rune) {\n\tswitch verb {\n\tcase 'v':\n\t\tswitch {\n\t\tcase s.Flag('+'):\n\t\t\tfor _, f := range st {\n\t\t\t\tfmt.Fprintf(s, \"\\n%+v\", f)\n\t\t\t}\n\t\tcase s.Flag('#'):\n\t\t\tfmt.Fprintf(s, \"%#v\", []Frame(st))\n\t\tdefault:\n\t\t\tfmt.Fprintf(s, \"%v\", []Frame(st))\n\t\t}\n\tcase 's':\n\t\tfmt.Fprintf(s, \"%s\", []Frame(st))\n\t}\n}\n\n\/\/ stack represents a stack of program counters.\ntype stack []uintptr\n\nfunc (s *stack) Format(st fmt.State, verb rune) {\n\tswitch verb {\n\tcase 'v':\n\t\tswitch {\n\t\tcase st.Flag('+'):\n\t\t\tfor _, pc := range *s {\n\t\t\t\tf := Frame(pc)\n\t\t\t\tfmt.Fprintf(st, \"\\n%+v\", f)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *stack) StackTrace() StackTrace {\n\tf := make([]Frame, len(*s))\n\tfor i := 0; i < len(f); i++ {\n\t\tf[i] = Frame((*s)[i])\n\t}\n\treturn f\n}\n\nfunc callers() *stack {\n\tconst depth = 32\n\tvar pcs [depth]uintptr\n\tn := runtime.Callers(3, pcs[:])\n\tvar st stack = pcs[0:n]\n\treturn &st\n}\n\n\/\/ funcname removes the path prefix component of a function's name reported by func.Name().\nfunc funcname(name string) string {\n\ti := strings.LastIndex(name, \"\/\")\n\tname = name[i+1:]\n\ti = strings.Index(name, \".\")\n\treturn name[i+1:]\n}\n\nfunc trimGOPATH(name, file string) string {\n\t\/\/ Here we want to get the source file path relative to the compile time\n\t\/\/ GOPATH. As of Go 1.6.x there is no direct way to know the compiled\n\t\/\/ GOPATH at runtime, but we can infer the number of path segments in the\n\t\/\/ GOPATH. We note that fn.Name() returns the function name qualified by\n\t\/\/ the import path, which does not include the GOPATH. Thus we can trim\n\t\/\/ segments from the beginning of the file path until the number of path\n\t\/\/ separators remaining is one more than the number of path separators in\n\t\/\/ the function name. For example, given:\n\t\/\/\n\t\/\/ GOPATH \/home\/user\n\t\/\/ file \/home\/user\/src\/pkg\/sub\/file.go\n\t\/\/ fn.Name() pkg\/sub.Type.Method\n\t\/\/\n\t\/\/ We want to produce:\n\t\/\/\n\t\/\/ pkg\/sub\/file.go\n\t\/\/\n\t\/\/ From this we can easily see that fn.Name() has one less path separator\n\t\/\/ than our desired output. We count separators from the end of the file\n\t\/\/ path until it finds two more than in the function name and then move\n\t\/\/ one character forward to preserve the initial path segment without a\n\t\/\/ leading separator.\n\tconst sep = \"\/\"\n\tgoal := strings.Count(name, sep) + 2\n\ti := len(file)\n\tfor n := 0; n < goal; n++ {\n\t\ti = strings.LastIndex(file[:i], sep)\n\t\tif i == -1 {\n\t\t\t\/\/ not enough separators found, set i so that the slice expression\n\t\t\t\/\/ below leaves file unmodified\n\t\t\ti = -len(sep)\n\t\t\tbreak\n\t\t}\n\t}\n\t\/\/ get back to 0 or trim the leading separator\n\tfile = file[i+len(sep):]\n\treturn file\n}\n<commit_msg>Fix doc comment for exported Format func (#137)<commit_after>package errors\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n)\n\n\/\/ Frame represents a program counter inside a stack frame.\ntype Frame uintptr\n\n\/\/ pc returns the program counter for this frame;\n\/\/ multiple frames may have the same PC value.\nfunc (f Frame) pc() uintptr { return uintptr(f) - 1 }\n\n\/\/ file returns the full path to the file that contains the\n\/\/ function for this Frame's pc.\nfunc (f Frame) file() string {\n\tfn := runtime.FuncForPC(f.pc())\n\tif fn == nil {\n\t\treturn \"unknown\"\n\t}\n\tfile, _ := fn.FileLine(f.pc())\n\treturn file\n}\n\n\/\/ line returns the line number of source code of the\n\/\/ function for this Frame's pc.\nfunc (f Frame) line() int {\n\tfn := runtime.FuncForPC(f.pc())\n\tif fn == nil {\n\t\treturn 0\n\t}\n\t_, line := fn.FileLine(f.pc())\n\treturn line\n}\n\n\/\/ Format formats the frame according to the fmt.Formatter interface.\n\/\/\n\/\/ %s source file\n\/\/ %d source line\n\/\/ %n function name\n\/\/ %v equivalent to %s:%d\n\/\/\n\/\/ Format accepts flags that alter the printing of some verbs, as follows:\n\/\/\n\/\/ %+s function name and path of source file relative to the compile time\n\/\/ GOPATH separated by \\n\\t (<funcname>\\n\\t<path>)\n\/\/ %+v equivalent to %+s:%d\nfunc (f Frame) Format(s fmt.State, verb rune) {\n\tswitch verb {\n\tcase 's':\n\t\tswitch {\n\t\tcase s.Flag('+'):\n\t\t\tpc := f.pc()\n\t\t\tfn := runtime.FuncForPC(pc)\n\t\t\tif fn == nil {\n\t\t\t\tio.WriteString(s, \"unknown\")\n\t\t\t} else {\n\t\t\t\tfile, _ := fn.FileLine(pc)\n\t\t\t\tfmt.Fprintf(s, \"%s\\n\\t%s\", fn.Name(), file)\n\t\t\t}\n\t\tdefault:\n\t\t\tio.WriteString(s, path.Base(f.file()))\n\t\t}\n\tcase 'd':\n\t\tfmt.Fprintf(s, \"%d\", f.line())\n\tcase 'n':\n\t\tname := runtime.FuncForPC(f.pc()).Name()\n\t\tio.WriteString(s, funcname(name))\n\tcase 'v':\n\t\tf.Format(s, 's')\n\t\tio.WriteString(s, \":\")\n\t\tf.Format(s, 'd')\n\t}\n}\n\n\/\/ StackTrace is stack of Frames from innermost (newest) to outermost (oldest).\ntype StackTrace []Frame\n\n\/\/ Format formats the stack of Frames according to the fmt.Formatter interface.\n\/\/\n\/\/ %s\tlists source files for each Frame in the stack\n\/\/ %v\tlists the source file and line number for each Frame in the stack\n\/\/\n\/\/ Format accepts flags that alter the printing of some verbs, as follows:\n\/\/\n\/\/ %+v Prints filename, function, and line number for each Frame in the stack.\nfunc (st StackTrace) Format(s fmt.State, verb rune) {\n\tswitch verb {\n\tcase 'v':\n\t\tswitch {\n\t\tcase s.Flag('+'):\n\t\t\tfor _, f := range st {\n\t\t\t\tfmt.Fprintf(s, \"\\n%+v\", f)\n\t\t\t}\n\t\tcase s.Flag('#'):\n\t\t\tfmt.Fprintf(s, \"%#v\", []Frame(st))\n\t\tdefault:\n\t\t\tfmt.Fprintf(s, \"%v\", []Frame(st))\n\t\t}\n\tcase 's':\n\t\tfmt.Fprintf(s, \"%s\", []Frame(st))\n\t}\n}\n\n\/\/ stack represents a stack of program counters.\ntype stack []uintptr\n\nfunc (s *stack) Format(st fmt.State, verb rune) {\n\tswitch verb {\n\tcase 'v':\n\t\tswitch {\n\t\tcase st.Flag('+'):\n\t\t\tfor _, pc := range *s {\n\t\t\t\tf := Frame(pc)\n\t\t\t\tfmt.Fprintf(st, \"\\n%+v\", f)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *stack) StackTrace() StackTrace {\n\tf := make([]Frame, len(*s))\n\tfor i := 0; i < len(f); i++ {\n\t\tf[i] = Frame((*s)[i])\n\t}\n\treturn f\n}\n\nfunc callers() *stack {\n\tconst depth = 32\n\tvar pcs [depth]uintptr\n\tn := runtime.Callers(3, pcs[:])\n\tvar st stack = pcs[0:n]\n\treturn &st\n}\n\n\/\/ funcname removes the path prefix component of a function's name reported by func.Name().\nfunc funcname(name string) string {\n\ti := strings.LastIndex(name, \"\/\")\n\tname = name[i+1:]\n\ti = strings.Index(name, \".\")\n\treturn name[i+1:]\n}\n\nfunc trimGOPATH(name, file string) string {\n\t\/\/ Here we want to get the source file path relative to the compile time\n\t\/\/ GOPATH. As of Go 1.6.x there is no direct way to know the compiled\n\t\/\/ GOPATH at runtime, but we can infer the number of path segments in the\n\t\/\/ GOPATH. We note that fn.Name() returns the function name qualified by\n\t\/\/ the import path, which does not include the GOPATH. Thus we can trim\n\t\/\/ segments from the beginning of the file path until the number of path\n\t\/\/ separators remaining is one more than the number of path separators in\n\t\/\/ the function name. For example, given:\n\t\/\/\n\t\/\/ GOPATH \/home\/user\n\t\/\/ file \/home\/user\/src\/pkg\/sub\/file.go\n\t\/\/ fn.Name() pkg\/sub.Type.Method\n\t\/\/\n\t\/\/ We want to produce:\n\t\/\/\n\t\/\/ pkg\/sub\/file.go\n\t\/\/\n\t\/\/ From this we can easily see that fn.Name() has one less path separator\n\t\/\/ than our desired output. We count separators from the end of the file\n\t\/\/ path until it finds two more than in the function name and then move\n\t\/\/ one character forward to preserve the initial path segment without a\n\t\/\/ leading separator.\n\tconst sep = \"\/\"\n\tgoal := strings.Count(name, sep) + 2\n\ti := len(file)\n\tfor n := 0; n < goal; n++ {\n\t\ti = strings.LastIndex(file[:i], sep)\n\t\tif i == -1 {\n\t\t\t\/\/ not enough separators found, set i so that the slice expression\n\t\t\t\/\/ below leaves file unmodified\n\t\t\ti = -len(sep)\n\t\t\tbreak\n\t\t}\n\t}\n\t\/\/ get back to 0 or trim the leading separator\n\tfile = file[i+len(sep):]\n\treturn file\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"fmt\"\n)\n\nfunc swap (data []int, start int, end int) {\n s := data[start]\n e := data[end]\n data[start] = e\n data[end] = s\n}\n\/*\n Stable\n O(1) extra space\n O(n2) comparisons and swaps\n Adaptive: O(n) time when nearly sorted\n Very low overhead\n*\/\nfunc insertionSort(data []int) []int {\n for i := 1; i < len(data); i++ {\n for k := i; k > 0; k-- {\n if data[k] < data[k-1] {\n swap(data, k, k - 1)\n }\n }\n }\n return data;\n}\n\n\/*\n Not stable\n O(1) extra space\n Θ(n2) comparisons\n Θ(n) swaps\n Not adaptive\n*\/\nfunc selectionSort(data []int) []int {\n for i := 0; i < len(data); i++ {\n for k := i+1; k < len(data); k++ {\n if data[k] < data[i] {\n swap(data, i, k)\n }\n }\n }\n return data\n}\n\n\/*\n Stable\n O(1) extra space\n O(n2) comparisons and swaps\n Adaptive: O(n) when nearly sorted\n*\/\nfunc bubbleSort(data []int) []int {\n for i := 0; i < len(data); i++ {\n for k := 0; k < len(data) - 1 - i; k++ {\n if data[k + 1] < data[k] {\n swap(data, k + 1, k)\n }\n }\n }\n return data\n}\n\n\nfunc merge (left []int, right []int) []int {\n result := make([]int, len(left) + len(right))\n lc := 0\n rc := 0\n for i :=0; ; i++ {\n if i >= len(left) + len(right) {\n break\n }\n \/\/rc >= len(right) || lc >= len(left) || lif rc < len(right)\n if len(left) > lc && len(right) <= rc {\n result[i] = left[lc]\n lc++\n } else {\n if len(right) > rc && len(left) <= lc {\n result[i] = right[rc]\n rc++\n } else {\n if left[lc] < right[rc] {\n result[i] = left[lc]\n lc++\n } else {\n result[i] = right[rc]\n rc++\n }\n }\n }\n }\n return result\n}\n\n\nfunc mergeSort(data []int) []int {\n start := 0\n end := len(data)\n if len(data) < 2 {\n return data\n }\n middle := (end - start)\/2\n leftData := mergeSort(data[:middle])\n rightData := mergeSort(data[middle:])\n return merge(leftData, rightData)\n}\n\n\nfunc main() {\n insertionSortData := []int{44, 4, 2, 47, 4, 8, 12, 1, 0, 2, 9, 23}\n selectionSortData := []int{44, 4, 2, 47, 4, 8, 12, 1, 0, 2, 9, 23}\n bubbleSortData := []int{44, 4, 2, 47, 4, 8, 12, 1, 0, 2, 9, 23}\n mergeSortData := []int{44, 4, 2, 47, 4, 8, 12, 1}\n fmt.Println(\"initial:\", insertionSortData)\n fmt.Println(\"inserted sort:\", insertionSort(insertionSortData))\n fmt.Println(\"selection sort:\", selectionSort(selectionSortData))\n fmt.Println(\"bubble sort:\", bubbleSort(bubbleSortData))\n fmt.Println(\"merge sort:\", mergeSort(mergeSortData))\n}\n<commit_msg>formating<commit_after>package main\n\nimport (\n \"fmt\"\n)\n\nfunc swap (data []int, start int, end int) {\n s := data[start]\n e := data[end]\n data[start] = e\n data[end] = s\n}\n\/*\n Stable\n O(1) extra space\n O(n2) comparisons and swaps\n Adaptive: O(n) time when nearly sorted\n Very low overhead\n*\/\nfunc insertionSort(data []int) []int {\n for i := 1; i < len(data); i++ {\n for k := i; k > 0; k-- {\n if data[k] < data[k-1] {\n swap(data, k, k - 1)\n }\n }\n }\n return data;\n}\n\n\/*\n Not stable\n O(1) extra space\n Θ(n2) comparisons\n Θ(n) swaps\n Not adaptive\n*\/\nfunc selectionSort(data []int) []int {\n for i := 0; i < len(data); i++ {\n for k := i+1; k < len(data); k++ {\n if data[k] < data[i] {\n swap(data, i, k)\n }\n }\n }\n return data\n}\n\n\/*\n Stable\n O(1) extra space\n O(n2) comparisons and swaps\n Adaptive: O(n) when nearly sorted\n*\/\nfunc bubbleSort(data []int) []int {\n for i := 0; i < len(data); i++ {\n for k := 0; k < len(data) - 1 - i; k++ {\n if data[k + 1] < data[k] {\n swap(data, k + 1, k)\n }\n }\n }\n return data\n}\n\n\nfunc merge(left []int, right []int) []int {\n result := make([]int, len(left) + len(right))\n lc := 0\n rc := 0\n for i :=0; ; i++ {\n if i >= len(left) + len(right) {\n break\n }\n \/\/rc >= len(right) || lc >= len(left) || lif rc < len(right)\n if len(left) > lc && len(right) <= rc {\n result[i] = left[lc]\n lc++\n } else {\n if len(right) > rc && len(left) <= lc {\n result[i] = right[rc]\n rc++\n } else {\n if left[lc] < right[rc] {\n result[i] = left[lc]\n lc++\n } else {\n result[i] = right[rc]\n rc++\n }\n }\n }\n }\n return result\n}\n\n\nfunc mergeSort(data []int) []int {\n start := 0\n end := len(data)\n if len(data) < 2 {\n return data\n }\n middle := (end - start)\/2\n leftData := mergeSort(data[:middle])\n rightData := mergeSort(data[middle:])\n return merge(leftData, rightData)\n}\n\n\nfunc main() {\n insertionSortData := []int{44, 4, 2, 47, 4, 8, 12, 1, 0, 2, 9, 23}\n selectionSortData := []int{44, 4, 2, 47, 4, 8, 12, 1, 0, 2, 9, 23}\n bubbleSortData := []int{44, 4, 2, 47, 4, 8, 12, 1, 0, 2, 9, 23}\n mergeSortData := []int{44, 4, 2, 47, 4, 8, 12, 1}\n fmt.Println(\"initial:\", insertionSortData)\n fmt.Println(\"inserted sort:\", insertionSort(insertionSortData))\n fmt.Println(\"selection sort:\", selectionSort(selectionSortData))\n fmt.Println(\"bubble sort:\", bubbleSort(bubbleSortData))\n fmt.Println(\"merge sort:\", mergeSort(mergeSortData))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport ()\n\ntype Stats struct {\n\twords int\n\tseconds int\n\terrors int\n\twordlist string\n}\n\nfunc (s *Stats) loadStats(filename string) bool {\n\t\/\/ TODO: Implement Later\n\ts.words = 10\n\ts.seconds = 11\n\ts.errors = 12\n\ts.wordlist = \"words.txt\"\n\n\treturn true\n}\n\nfunc (s *Stats) saveStats(filename string) bool {\n\t\/\/ TODO: Implement Later\n\n\treturn true\n}\n\nfunc (s *Stats) wpm() float64 {\n\tif s.seconds == 0 {\n\t\treturn 0\n\t}\n\treturn float64(s.words \/ s.seconds \/ 60)\n}\n<commit_msg>Fixed WPM calculation<commit_after>package main\n\nimport ()\n\ntype Stats struct {\n\twords int\n\tseconds int\n\terrors int\n\twordlist string\n}\n\nfunc (s *Stats) loadStats(filename string) bool {\n\t\/\/ TODO: Implement Later\n\ts.words = 10\n\ts.seconds = 11\n\ts.errors = 12\n\ts.wordlist = \"words.txt\"\n\n\treturn true\n}\n\nfunc (s *Stats) saveStats(filename string) bool {\n\t\/\/ TODO: Implement Later\n\n\treturn true\n}\n\nfunc (s *Stats) wpm() int {\n\tif s.seconds == 0 {\n\t\treturn 0\n\t}\n\treturn s.words \/ s.seconds \/ 60\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2014 The SkyDNS Authors. All rights reserved.\n\/\/ Use of this source code is governed by The MIT License (MIT) that can be\n\/\/ found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"github.com\/rcrowley\/go-metrics\"\n\t\"github.com\/rcrowley\/go-metrics\/influxdb\"\n\t\"github.com\/rcrowley\/go-metrics\/stathat\"\n\t\"net\"\n\t\"os\"\n)\n\nvar (\n\tStatsForwardCount metrics.Counter\n\tStatsLookupCount metrics.Counter\n\tStatsRequestCount metrics.Counter\n\tStatsDnssecOkCount metrics.Counter\n\tStatsDnssecCacheMiss metrics.Counter\n\tStatsNameErrorCount metrics.Counter\n\tStatsNoDataCount metrics.Counter\n\n\tinfluxConfig *influxdb.Config\n\tgraphiteServer = os.Getenv(\"GRAPHITE_SERVER\")\n\tgraphitePrefix = os.Getenv(\"GRAPHITE_PREFIX\")\n\tstathatUser = os.Getenv(\"STATHAT_USER\")\n\tinfluxServer = os.Getenv(\"INFLUX_SERVER\")\n\tinfluxDatabase = os.Getenv(\"INFLUX_DATABASE\")\n\tinfluxUser = os.Getenv(\"INFLUX_USER\")\n\tinfluxPassword = os.Getenv(\"INFLUX_PASSWORD\")\n)\n\nfunc init() {\n\tinfluxConfig = &influxdb.Config{}\n\tinfluxConfig.Host = influxServer\n\tinfluxConfig.Database = influxDatabase\n\tinfluxConfig.Username = influxUser\n\tinfluxConfig.Password = influxPassword\n\n\tif graphitePrefix == \"\" {\n\t\tgraphitePrefix = \"skydns\"\n\t}\n\n\tStatsForwardCount = metrics.NewCounter()\n\tmetrics.Register(\"skydns-forward-requests\", StatsForwardCount)\n\n\tStatsDnssecOkCount = metrics.NewCounter()\n\tmetrics.Register(\"skydns-dnssecok-requests\", StatsDnssecOkCount)\n\n\tStatsDnssecCacheMiss = metrics.NewCounter()\n\tmetrics.Register(\"skydns-dnssec-cache-miss\", StatsDnssecCacheMiss)\n\n\tStatsLookupCount = metrics.NewCounter()\n\tmetrics.Register(\"skydns-internal-lookups\", StatsLookupCount)\n\n\tStatsRequestCount = metrics.NewCounter()\n\tmetrics.Register(\"skydns-requests\", StatsRequestCount)\n\n\tStatsNameErrorCount = metrics.NewCounter()\n\tmetrics.Register(\"skydns-nameerror-responses\", StatsNameErrorCount)\n\n\tStatsNoDataCount = metrics.NewCounter()\n\tmetrics.Register(\"skydns-nodata-responses\", StatsNoDataCount)\n}\n\nfunc statsCollect() {\n\tif graphiteServer != \"\" {\n\t\taddr, err := net.ResolveTCPAddr(\"tcp\", graphiteServer)\n\t\tif err == nil {\n\t\t\tgo metrics.Graphite(metrics.DefaultRegistry, 10e9, graphitePrefix, addr)\n\t\t}\n\t}\n\n\tif stathatUser != \"\" {\n\t\tgo stathat.Stathat(metrics.DefaultRegistry, 10e9, stathatUser)\n\t}\n\n\tif influxConfig.Host != \"\" {\n\t\tgo influxdb.Influxdb(metrics.DefaultRegistry, 10e9, influxConfig)\n\t}\n}\n<commit_msg>Switch to internal fork of go-metrics<commit_after>\/\/ Copyright (c) 2014 The SkyDNS Authors. All rights reserved.\n\/\/ Use of this source code is governed by The MIT License (MIT) that can be\n\/\/ found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"github.com\/miekg\/go-metrics\"\n\t\"github.com\/miekg\/go-metrics\/stathat\"\n\t\"github.com\/miekg\/go-metrics\/influxdb\"\n\t\"net\"\n\t\"os\"\n)\n\nvar (\n\tStatsForwardCount metrics.Counter\n\tStatsLookupCount metrics.Counter\n\tStatsRequestCount metrics.Counter\n\tStatsDnssecOkCount metrics.Counter\n\tStatsDnssecCacheMiss metrics.Counter\n\tStatsNameErrorCount metrics.Counter\n\tStatsNoDataCount metrics.Counter\n\n\tinfluxConfig *influxdb.Config\n\tgraphiteServer = os.Getenv(\"GRAPHITE_SERVER\")\n\tgraphitePrefix = os.Getenv(\"GRAPHITE_PREFIX\")\n\tstathatUser = os.Getenv(\"STATHAT_USER\")\n\tinfluxServer = os.Getenv(\"INFLUX_SERVER\")\n\tinfluxDatabase = os.Getenv(\"INFLUX_DATABASE\")\n\tinfluxUser = os.Getenv(\"INFLUX_USER\")\n\tinfluxPassword = os.Getenv(\"INFLUX_PASSWORD\")\n)\n\nfunc init() {\n\tinfluxConfig = &influxdb.Config{}\n\tinfluxConfig.Host = influxServer\n\tinfluxConfig.Database = influxDatabase\n\tinfluxConfig.Username = influxUser\n\tinfluxConfig.Password = influxPassword\n\n\tif graphitePrefix == \"\" {\n\t\tgraphitePrefix = \"skydns\"\n\t}\n\n\tStatsForwardCount = metrics.NewCounter()\n\tmetrics.Register(\"skydns-forward-requests\", StatsForwardCount)\n\n\tStatsDnssecOkCount = metrics.NewCounter()\n\tmetrics.Register(\"skydns-dnssecok-requests\", StatsDnssecOkCount)\n\n\tStatsDnssecCacheMiss = metrics.NewCounter()\n\tmetrics.Register(\"skydns-dnssec-cache-miss\", StatsDnssecCacheMiss)\n\n\tStatsLookupCount = metrics.NewCounter()\n\tmetrics.Register(\"skydns-internal-lookups\", StatsLookupCount)\n\n\tStatsRequestCount = metrics.NewCounter()\n\tmetrics.Register(\"skydns-requests\", StatsRequestCount)\n\n\tStatsNameErrorCount = metrics.NewCounter()\n\tmetrics.Register(\"skydns-nameerror-responses\", StatsNameErrorCount)\n\n\tStatsNoDataCount = metrics.NewCounter()\n\tmetrics.Register(\"skydns-nodata-responses\", StatsNoDataCount)\n}\n\nfunc statsCollect() {\n\tif graphiteServer != \"\" {\n\t\taddr, err := net.ResolveTCPAddr(\"tcp\", graphiteServer)\n\t\tif err == nil {\n\t\t\tgo metrics.Graphite(metrics.DefaultRegistry, 10e9, graphitePrefix, addr)\n\t\t}\n\t}\n\n\tif stathatUser != \"\" {\n\t\tgo stathat.Stathat(metrics.DefaultRegistry, 10e9, stathatUser)\n\t}\n\n\tif influxConfig.Host != \"\" {\n\t\tgo influxdb.Influxdb(metrics.DefaultRegistry, 10e9, influxConfig)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package libnetwork\n\nimport (\n\t\"fmt\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/libnetwork\/datastore\"\n)\n\nfunc (c *controller) initStores() error {\n\tc.Lock()\n\tif c.cfg == nil {\n\t\tc.Unlock()\n\t\treturn nil\n\t}\n\tscopeConfigs := c.cfg.Scopes\n\tc.Unlock()\n\n\tfor scope, scfg := range scopeConfigs {\n\t\tstore, err := datastore.NewDataStore(scope, scfg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Lock()\n\t\tc.stores = append(c.stores, store)\n\t\tc.Unlock()\n\t}\n\n\tc.startWatch()\n\treturn nil\n}\n\nfunc (c *controller) closeStores() {\n\tfor _, store := range c.getStores() {\n\t\tstore.Close()\n\t}\n}\n\nfunc (c *controller) getStore(scope string) datastore.DataStore {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tfor _, store := range c.stores {\n\t\tif store.Scope() == scope {\n\t\t\treturn store\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (c *controller) getStores() []datastore.DataStore {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\treturn c.stores\n}\n\nfunc (c *controller) getNetworkFromStore(nid string) (*network, error) {\n\tfor _, store := range c.getStores() {\n\t\tn := &network{id: nid, ctrlr: c}\n\t\terr := store.GetObject(datastore.Key(n.Key()...), n)\n\t\tif err != nil && err != datastore.ErrKeyNotFound {\n\t\t\treturn nil, fmt.Errorf(\"could not find network %s: %v\", nid, err)\n\t\t}\n\n\t\t\/\/ Continue searching in the next store if the key is not found in this store\n\t\tif err == datastore.ErrKeyNotFound {\n\t\t\tcontinue\n\t\t}\n\n\t\tec := &endpointCnt{n: n}\n\t\terr = store.GetObject(datastore.Key(ec.Key()...), ec)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not find endpoint count for network %s: %v\", n.Name(), err)\n\t\t}\n\n\t\tn.epCnt = ec\n\t\treturn n, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"network %s not found\", nid)\n}\n\nfunc (c *controller) getNetworksForScope(scope string) ([]*network, error) {\n\tvar nl []*network\n\n\tstore := c.getStore(scope)\n\tif store == nil {\n\t\treturn nil, nil\n\t}\n\n\tkvol, err := store.List(datastore.Key(datastore.NetworkKeyPrefix),\n\t\t&network{ctrlr: c})\n\tif err != nil && err != datastore.ErrKeyNotFound {\n\t\treturn nil, fmt.Errorf(\"failed to get networks for scope %s: %v\",\n\t\t\tscope, err)\n\t}\n\n\tfor _, kvo := range kvol {\n\t\tn := kvo.(*network)\n\t\tn.ctrlr = c\n\n\t\tec := &endpointCnt{n: n}\n\t\terr = store.GetObject(datastore.Key(ec.Key()...), ec)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not find endpoint count key %s for network %s while listing: %v\", datastore.Key(ec.Key()...), n.Name(), err)\n\t\t}\n\n\t\tn.epCnt = ec\n\t\tnl = append(nl, n)\n\t}\n\n\treturn nl, nil\n}\n\nfunc (c *controller) getNetworksFromStore() ([]*network, error) {\n\tvar nl []*network\n\n\tfor _, store := range c.getStores() {\n\t\tkvol, err := store.List(datastore.Key(datastore.NetworkKeyPrefix),\n\t\t\t&network{ctrlr: c})\n\t\tif err != nil && err != datastore.ErrKeyNotFound {\n\t\t\treturn nil, fmt.Errorf(\"failed to get networks for scope %s: %v\",\n\t\t\t\tstore.Scope(), err)\n\t\t}\n\n\t\t\/\/ Continue searching in the next store if no keys found in this store\n\t\tif err == datastore.ErrKeyNotFound {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, kvo := range kvol {\n\t\t\tn := kvo.(*network)\n\t\t\tn.ctrlr = c\n\n\t\t\tec := &endpointCnt{n: n}\n\t\t\terr = store.GetObject(datastore.Key(ec.Key()...), ec)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"could not find endpoint count key %s for network %s while listing: %v\", datastore.Key(ec.Key()...), n.Name(), err)\n\t\t\t}\n\n\t\t\tn.epCnt = ec\n\t\t\tnl = append(nl, n)\n\t\t}\n\t}\n\n\treturn nl, nil\n}\n\nfunc (n *network) getEndpointFromStore(eid string) (*endpoint, error) {\n\tfor _, store := range n.ctrlr.getStores() {\n\t\tep := &endpoint{id: eid, network: n}\n\t\terr := store.GetObject(datastore.Key(ep.Key()...), ep)\n\t\tif err != nil && err != datastore.ErrKeyNotFound {\n\t\t\treturn nil, fmt.Errorf(\"could not find endpoint %s: %v\", eid, err)\n\t\t}\n\n\t\t\/\/ Continue searching in the next store if the key is not found in this store\n\t\tif err == datastore.ErrKeyNotFound {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn ep, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"endpoint %s not found\", eid)\n}\n\nfunc (n *network) getEndpointsFromStore() ([]*endpoint, error) {\n\tvar epl []*endpoint\n\n\ttmp := endpoint{network: n}\n\tfor _, store := range n.getController().getStores() {\n\t\tkvol, err := store.List(datastore.Key(tmp.KeyPrefix()...), &endpoint{network: n})\n\t\tif err != nil && err != datastore.ErrKeyNotFound {\n\t\t\treturn nil,\n\t\t\t\tfmt.Errorf(\"failed to get endpoints for network %s scope %s: %v\",\n\t\t\t\t\tn.Name(), store.Scope(), err)\n\t\t}\n\n\t\t\/\/ Continue searching in the next store if no keys found in this store\n\t\tif err == datastore.ErrKeyNotFound {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, kvo := range kvol {\n\t\t\tep := kvo.(*endpoint)\n\t\t\tep.network = n\n\t\t\tepl = append(epl, ep)\n\t\t}\n\t}\n\n\treturn epl, nil\n}\n\nfunc (c *controller) updateToStore(kvObject datastore.KVObject) error {\n\tcs := c.getStore(kvObject.DataScope())\n\tif cs == nil {\n\t\tlog.Warnf(\"datastore for scope %s not initialized. kv object %s is not added to the store\", kvObject.DataScope(), datastore.Key(kvObject.Key()...))\n\t\treturn nil\n\t}\n\n\tif err := cs.PutObjectAtomic(kvObject); err != nil {\n\t\tif err == datastore.ErrKeyModified {\n\t\t\treturn err\n\t\t}\n\t\treturn fmt.Errorf(\"failed to update store for object type %T: %v\", kvObject, err)\n\t}\n\n\treturn nil\n}\n\nfunc (c *controller) deleteFromStore(kvObject datastore.KVObject) error {\n\tcs := c.getStore(kvObject.DataScope())\n\tif cs == nil {\n\t\tlog.Debugf(\"datastore for scope %s not initialized. kv object %s is not deleted from datastore\", kvObject.DataScope(), datastore.Key(kvObject.Key()...))\n\t\treturn nil\n\t}\n\nretry:\n\tif err := cs.DeleteObjectAtomic(kvObject); err != nil {\n\t\tif err == datastore.ErrKeyModified {\n\t\t\tif err := cs.GetObject(datastore.Key(kvObject.Key()...), kvObject); err != nil {\n\t\t\t\treturn fmt.Errorf(\"could not update the kvobject to latest when trying to delete: %v\", err)\n\t\t\t}\n\t\t\tgoto retry\n\t\t}\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\ntype netWatch struct {\n\tlocalEps map[string]*endpoint\n\tremoteEps map[string]*endpoint\n\tstopCh chan struct{}\n}\n\nfunc (c *controller) getLocalEps(nw *netWatch) []*endpoint {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tvar epl []*endpoint\n\tfor _, ep := range nw.localEps {\n\t\tepl = append(epl, ep)\n\t}\n\n\treturn epl\n}\n\nfunc (c *controller) watchSvcRecord(ep *endpoint) {\n\tc.watchCh <- ep\n}\n\nfunc (c *controller) unWatchSvcRecord(ep *endpoint) {\n\tc.unWatchCh <- ep\n}\n\nfunc (c *controller) networkWatchLoop(nw *netWatch, ep *endpoint, ecCh <-chan datastore.KVObject) {\n\tfor {\n\t\tselect {\n\t\tcase <-nw.stopCh:\n\t\t\treturn\n\t\tcase o := <-ecCh:\n\t\t\tec := o.(*endpointCnt)\n\n\t\t\tepl, err := ec.n.getEndpointsFromStore()\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tc.Lock()\n\t\t\tvar addEp []*endpoint\n\n\t\t\tdelEpMap := make(map[string]*endpoint)\n\t\t\tfor k, v := range nw.remoteEps {\n\t\t\t\tdelEpMap[k] = v\n\t\t\t}\n\n\t\t\tfor _, lEp := range epl {\n\t\t\t\tif _, ok := nw.localEps[lEp.ID()]; ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif _, ok := nw.remoteEps[lEp.ID()]; ok {\n\t\t\t\t\tdelete(delEpMap, lEp.ID())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tnw.remoteEps[lEp.ID()] = lEp\n\t\t\t\taddEp = append(addEp, lEp)\n\n\t\t\t}\n\t\t\tc.Unlock()\n\n\t\t\tfor _, lEp := range addEp {\n\t\t\t\tep.getNetwork().updateSvcRecord(lEp, c.getLocalEps(nw), true)\n\t\t\t}\n\n\t\t\tfor _, lEp := range delEpMap {\n\t\t\t\tep.getNetwork().updateSvcRecord(lEp, c.getLocalEps(nw), false)\n\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *controller) processEndpointCreate(nmap map[string]*netWatch, ep *endpoint) {\n\tc.Lock()\n\tnw, ok := nmap[ep.getNetwork().ID()]\n\tc.Unlock()\n\n\tif ok {\n\t\t\/\/ Update the svc db for the local endpoint join right away\n\t\tep.getNetwork().updateSvcRecord(ep, c.getLocalEps(nw), true)\n\n\t\tc.Lock()\n\t\tnw.localEps[ep.ID()] = ep\n\t\tc.Unlock()\n\t\treturn\n\t}\n\n\tnw = &netWatch{\n\t\tlocalEps: make(map[string]*endpoint),\n\t\tremoteEps: make(map[string]*endpoint),\n\t}\n\n\t\/\/ Update the svc db for the local endpoint join right away\n\t\/\/ Do this before adding this ep to localEps so that we don't\n\t\/\/ try to update this ep's container's svc records\n\tep.getNetwork().updateSvcRecord(ep, c.getLocalEps(nw), true)\n\n\tc.Lock()\n\tnw.localEps[ep.ID()] = ep\n\tnmap[ep.getNetwork().ID()] = nw\n\tnw.stopCh = make(chan struct{})\n\tc.Unlock()\n\n\tstore := c.getStore(ep.getNetwork().DataScope())\n\tif store == nil {\n\t\treturn\n\t}\n\n\tif !store.Watchable() {\n\t\treturn\n\t}\n\n\tch, err := store.Watch(ep.getNetwork().getEpCnt(), nw.stopCh)\n\tif err != nil {\n\t\tlog.Warnf(\"Error creating watch for network: %v\", err)\n\t\treturn\n\t}\n\n\tgo c.networkWatchLoop(nw, ep, ch)\n}\n\nfunc (c *controller) processEndpointDelete(nmap map[string]*netWatch, ep *endpoint) {\n\tc.Lock()\n\tnw, ok := nmap[ep.getNetwork().ID()]\n\n\tif ok {\n\t\tdelete(nw.localEps, ep.ID())\n\t\tc.Unlock()\n\n\t\t\/\/ Update the svc db about local endpoint leave right away\n\t\t\/\/ Do this after we remove this ep from localEps so that we\n\t\t\/\/ don't try to remove this svc record from this ep's container.\n\t\tep.getNetwork().updateSvcRecord(ep, c.getLocalEps(nw), false)\n\n\t\tc.Lock()\n\t\tif len(nw.localEps) == 0 {\n\t\t\tclose(nw.stopCh)\n\t\t\tdelete(nmap, ep.getNetwork().ID())\n\t\t}\n\t}\n\tc.Unlock()\n}\n\nfunc (c *controller) watchLoop(nmap map[string]*netWatch) {\n\tfor {\n\t\tselect {\n\t\tcase ep := <-c.watchCh:\n\t\t\tc.processEndpointCreate(nmap, ep)\n\t\tcase ep := <-c.unWatchCh:\n\t\t\tc.processEndpointDelete(nmap, ep)\n\t\t}\n\t}\n}\n\nfunc (c *controller) startWatch() {\n\tc.watchCh = make(chan *endpoint)\n\tc.unWatchCh = make(chan *endpoint)\n\tnmap := make(map[string]*netWatch)\n\n\tgo c.watchLoop(nmap)\n}\n<commit_msg>Dont fail the Get functions if there is an error in one of the stores<commit_after>package libnetwork\n\nimport (\n\t\"fmt\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/libnetwork\/datastore\"\n)\n\nfunc (c *controller) initStores() error {\n\tc.Lock()\n\tif c.cfg == nil {\n\t\tc.Unlock()\n\t\treturn nil\n\t}\n\tscopeConfigs := c.cfg.Scopes\n\tc.Unlock()\n\n\tfor scope, scfg := range scopeConfigs {\n\t\tstore, err := datastore.NewDataStore(scope, scfg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Lock()\n\t\tc.stores = append(c.stores, store)\n\t\tc.Unlock()\n\t}\n\n\tc.startWatch()\n\treturn nil\n}\n\nfunc (c *controller) closeStores() {\n\tfor _, store := range c.getStores() {\n\t\tstore.Close()\n\t}\n}\n\nfunc (c *controller) getStore(scope string) datastore.DataStore {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tfor _, store := range c.stores {\n\t\tif store.Scope() == scope {\n\t\t\treturn store\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (c *controller) getStores() []datastore.DataStore {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\treturn c.stores\n}\n\nfunc (c *controller) getNetworkFromStore(nid string) (*network, error) {\n\tfor _, store := range c.getStores() {\n\t\tn := &network{id: nid, ctrlr: c}\n\t\terr := store.GetObject(datastore.Key(n.Key()...), n)\n\t\t\/\/ Continue searching in the next store if the key is not found in this store\n\t\tif err != nil {\n\t\t\tif err != datastore.ErrKeyNotFound {\n\t\t\t\tlog.Debugf(\"could not find network %s: %v\", nid, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tec := &endpointCnt{n: n}\n\t\terr = store.GetObject(datastore.Key(ec.Key()...), ec)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not find endpoint count for network %s: %v\", n.Name(), err)\n\t\t}\n\n\t\tn.epCnt = ec\n\t\treturn n, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"network %s not found\", nid)\n}\n\nfunc (c *controller) getNetworksForScope(scope string) ([]*network, error) {\n\tvar nl []*network\n\n\tstore := c.getStore(scope)\n\tif store == nil {\n\t\treturn nil, nil\n\t}\n\n\tkvol, err := store.List(datastore.Key(datastore.NetworkKeyPrefix),\n\t\t&network{ctrlr: c})\n\tif err != nil && err != datastore.ErrKeyNotFound {\n\t\treturn nil, fmt.Errorf(\"failed to get networks for scope %s: %v\",\n\t\t\tscope, err)\n\t}\n\n\tfor _, kvo := range kvol {\n\t\tn := kvo.(*network)\n\t\tn.ctrlr = c\n\n\t\tec := &endpointCnt{n: n}\n\t\terr = store.GetObject(datastore.Key(ec.Key()...), ec)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not find endpoint count key %s for network %s while listing: %v\", datastore.Key(ec.Key()...), n.Name(), err)\n\t\t}\n\n\t\tn.epCnt = ec\n\t\tnl = append(nl, n)\n\t}\n\n\treturn nl, nil\n}\n\nfunc (c *controller) getNetworksFromStore() ([]*network, error) {\n\tvar nl []*network\n\n\tfor _, store := range c.getStores() {\n\t\tkvol, err := store.List(datastore.Key(datastore.NetworkKeyPrefix),\n\t\t\t&network{ctrlr: c})\n\t\t\/\/ Continue searching in the next store if no keys found in this store\n\t\tif err != nil {\n\t\t\tif err != datastore.ErrKeyNotFound {\n\t\t\t\tlog.Debugf(\"failed to get networks for scope %s: %v\", store.Scope(), err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, kvo := range kvol {\n\t\t\tn := kvo.(*network)\n\t\t\tn.ctrlr = c\n\n\t\t\tec := &endpointCnt{n: n}\n\t\t\terr = store.GetObject(datastore.Key(ec.Key()...), ec)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"could not find endpoint count key %s for network %s while listing: %v\", datastore.Key(ec.Key()...), n.Name(), err)\n\t\t\t}\n\n\t\t\tn.epCnt = ec\n\t\t\tnl = append(nl, n)\n\t\t}\n\t}\n\n\treturn nl, nil\n}\n\nfunc (n *network) getEndpointFromStore(eid string) (*endpoint, error) {\n\tstore := n.ctrlr.getStore(n.Scope())\n\tif store == nil {\n\t\treturn nil, fmt.Errorf(\"could not find endpoint %s: datastore not found for scope %s\", eid, n.Scope())\n\t}\n\n\tep := &endpoint{id: eid, network: n}\n\terr := store.GetObject(datastore.Key(ep.Key()...), ep)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not find endpoint %s: %v\", eid, err)\n\t}\n\treturn ep, nil\n}\n\nfunc (n *network) getEndpointsFromStore() ([]*endpoint, error) {\n\tvar epl []*endpoint\n\n\ttmp := endpoint{network: n}\n\tfor _, store := range n.getController().getStores() {\n\t\tkvol, err := store.List(datastore.Key(tmp.KeyPrefix()...), &endpoint{network: n})\n\t\t\/\/ Continue searching in the next store if no keys found in this store\n\t\tif err != nil {\n\t\t\tif err != datastore.ErrKeyNotFound {\n\t\t\t\tlog.Debugf(\"failed to get endpoints for network %s scope %s: %v\",\n\t\t\t\t\tn.Name(), store.Scope(), err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, kvo := range kvol {\n\t\t\tep := kvo.(*endpoint)\n\t\t\tep.network = n\n\t\t\tepl = append(epl, ep)\n\t\t}\n\t}\n\n\treturn epl, nil\n}\n\nfunc (c *controller) updateToStore(kvObject datastore.KVObject) error {\n\tcs := c.getStore(kvObject.DataScope())\n\tif cs == nil {\n\t\tlog.Warnf(\"datastore for scope %s not initialized. kv object %s is not added to the store\", kvObject.DataScope(), datastore.Key(kvObject.Key()...))\n\t\treturn nil\n\t}\n\n\tif err := cs.PutObjectAtomic(kvObject); err != nil {\n\t\tif err == datastore.ErrKeyModified {\n\t\t\treturn err\n\t\t}\n\t\treturn fmt.Errorf(\"failed to update store for object type %T: %v\", kvObject, err)\n\t}\n\n\treturn nil\n}\n\nfunc (c *controller) deleteFromStore(kvObject datastore.KVObject) error {\n\tcs := c.getStore(kvObject.DataScope())\n\tif cs == nil {\n\t\tlog.Debugf(\"datastore for scope %s not initialized. kv object %s is not deleted from datastore\", kvObject.DataScope(), datastore.Key(kvObject.Key()...))\n\t\treturn nil\n\t}\n\nretry:\n\tif err := cs.DeleteObjectAtomic(kvObject); err != nil {\n\t\tif err == datastore.ErrKeyModified {\n\t\t\tif err := cs.GetObject(datastore.Key(kvObject.Key()...), kvObject); err != nil {\n\t\t\t\treturn fmt.Errorf(\"could not update the kvobject to latest when trying to delete: %v\", err)\n\t\t\t}\n\t\t\tgoto retry\n\t\t}\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\ntype netWatch struct {\n\tlocalEps map[string]*endpoint\n\tremoteEps map[string]*endpoint\n\tstopCh chan struct{}\n}\n\nfunc (c *controller) getLocalEps(nw *netWatch) []*endpoint {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tvar epl []*endpoint\n\tfor _, ep := range nw.localEps {\n\t\tepl = append(epl, ep)\n\t}\n\n\treturn epl\n}\n\nfunc (c *controller) watchSvcRecord(ep *endpoint) {\n\tc.watchCh <- ep\n}\n\nfunc (c *controller) unWatchSvcRecord(ep *endpoint) {\n\tc.unWatchCh <- ep\n}\n\nfunc (c *controller) networkWatchLoop(nw *netWatch, ep *endpoint, ecCh <-chan datastore.KVObject) {\n\tfor {\n\t\tselect {\n\t\tcase <-nw.stopCh:\n\t\t\treturn\n\t\tcase o := <-ecCh:\n\t\t\tec := o.(*endpointCnt)\n\n\t\t\tepl, err := ec.n.getEndpointsFromStore()\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tc.Lock()\n\t\t\tvar addEp []*endpoint\n\n\t\t\tdelEpMap := make(map[string]*endpoint)\n\t\t\tfor k, v := range nw.remoteEps {\n\t\t\t\tdelEpMap[k] = v\n\t\t\t}\n\n\t\t\tfor _, lEp := range epl {\n\t\t\t\tif _, ok := nw.localEps[lEp.ID()]; ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif _, ok := nw.remoteEps[lEp.ID()]; ok {\n\t\t\t\t\tdelete(delEpMap, lEp.ID())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tnw.remoteEps[lEp.ID()] = lEp\n\t\t\t\taddEp = append(addEp, lEp)\n\n\t\t\t}\n\t\t\tc.Unlock()\n\n\t\t\tfor _, lEp := range addEp {\n\t\t\t\tep.getNetwork().updateSvcRecord(lEp, c.getLocalEps(nw), true)\n\t\t\t}\n\n\t\t\tfor _, lEp := range delEpMap {\n\t\t\t\tep.getNetwork().updateSvcRecord(lEp, c.getLocalEps(nw), false)\n\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *controller) processEndpointCreate(nmap map[string]*netWatch, ep *endpoint) {\n\tc.Lock()\n\tnw, ok := nmap[ep.getNetwork().ID()]\n\tc.Unlock()\n\n\tif ok {\n\t\t\/\/ Update the svc db for the local endpoint join right away\n\t\tep.getNetwork().updateSvcRecord(ep, c.getLocalEps(nw), true)\n\n\t\tc.Lock()\n\t\tnw.localEps[ep.ID()] = ep\n\t\tc.Unlock()\n\t\treturn\n\t}\n\n\tnw = &netWatch{\n\t\tlocalEps: make(map[string]*endpoint),\n\t\tremoteEps: make(map[string]*endpoint),\n\t}\n\n\t\/\/ Update the svc db for the local endpoint join right away\n\t\/\/ Do this before adding this ep to localEps so that we don't\n\t\/\/ try to update this ep's container's svc records\n\tep.getNetwork().updateSvcRecord(ep, c.getLocalEps(nw), true)\n\n\tc.Lock()\n\tnw.localEps[ep.ID()] = ep\n\tnmap[ep.getNetwork().ID()] = nw\n\tnw.stopCh = make(chan struct{})\n\tc.Unlock()\n\n\tstore := c.getStore(ep.getNetwork().DataScope())\n\tif store == nil {\n\t\treturn\n\t}\n\n\tif !store.Watchable() {\n\t\treturn\n\t}\n\n\tch, err := store.Watch(ep.getNetwork().getEpCnt(), nw.stopCh)\n\tif err != nil {\n\t\tlog.Warnf(\"Error creating watch for network: %v\", err)\n\t\treturn\n\t}\n\n\tgo c.networkWatchLoop(nw, ep, ch)\n}\n\nfunc (c *controller) processEndpointDelete(nmap map[string]*netWatch, ep *endpoint) {\n\tc.Lock()\n\tnw, ok := nmap[ep.getNetwork().ID()]\n\n\tif ok {\n\t\tdelete(nw.localEps, ep.ID())\n\t\tc.Unlock()\n\n\t\t\/\/ Update the svc db about local endpoint leave right away\n\t\t\/\/ Do this after we remove this ep from localEps so that we\n\t\t\/\/ don't try to remove this svc record from this ep's container.\n\t\tep.getNetwork().updateSvcRecord(ep, c.getLocalEps(nw), false)\n\n\t\tc.Lock()\n\t\tif len(nw.localEps) == 0 {\n\t\t\tclose(nw.stopCh)\n\t\t\tdelete(nmap, ep.getNetwork().ID())\n\t\t}\n\t}\n\tc.Unlock()\n}\n\nfunc (c *controller) watchLoop(nmap map[string]*netWatch) {\n\tfor {\n\t\tselect {\n\t\tcase ep := <-c.watchCh:\n\t\t\tc.processEndpointCreate(nmap, ep)\n\t\tcase ep := <-c.unWatchCh:\n\t\t\tc.processEndpointDelete(nmap, ep)\n\t\t}\n\t}\n}\n\nfunc (c *controller) startWatch() {\n\tc.watchCh = make(chan *endpoint)\n\tc.unWatchCh = make(chan *endpoint)\n\tnmap := make(map[string]*netWatch)\n\n\tgo c.watchLoop(nmap)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package store automatically configures a database to store structured information in an sql database\npackage store\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n)\n\ntype field struct {\n\tname string\n\tpos []int\n\tisPointer bool\n}\n\ntype statement struct {\n\t*sql.Stmt\n\tin []int\n\tout []int\n}\n\ntype typeInfo struct {\n\tprimary int\n\tfields []field\n\tstatements []statement\n}\n\ntype Store struct {\n\tdb *sql.DB\n\ttypes map[string]typeInfo\n}\n\nfunc New(db *sql.DB) *Store {\n\treturn &Store{\n\t\tdb: db,\n\t\ttypes: make(map[string]typeInfo),\n\t}\n}\n\nfunc (s *Store) Close() error {\n\terr := s.db.Close()\n\ts.db = nil\n\treturn err\n}\n\nfunc isPointerStruct(i interface{}) bool {\n\tt := reflect.TypeOf(i)\n\treturn t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct\n}\n\nfunc (s *Store) Register(i interface{}) error {\n\tif s.db == nil {\n\t\treturn DBClosed\n\t}\n\tif !isPointerStruct(i) {\n\t\treturn NoPointerStruct\n\t}\n\n\tname := typeName(i)\n\tfmt.Println(\"Processing\", name)\n\tif _, ok := s.types[name]; ok {\n\t\treturn nil\n\t}\n\n\tv := reflect.ValueOf(i).Elem()\n\tnumFields := v.Type().NumField()\n\tfields := make([]field, 0, numFields)\n\tid := 0\n\tidType := 0\n\tfor n := 0; n < numFields; n++ {\n\t\tf := v.Type().Field(n)\n\t\tif f.PkgPath != \"\" { \/\/ not exported\n\t\t\tcontinue\n\t\t}\n\t\tfieldName := f.Name\n\t\tif fn := f.Tag.Get(\"store\"); fn != \"\" {\n\t\t\tfieldName = fn\n\t\t}\n\t\tif fieldName == \"-\" { \/\/ Skip field\n\t\t\tcontinue\n\t\t}\n\t\tisPointer := f.Type.Kind() == reflect.Ptr\n\t\tvar iface interface{}\n\t\tif isPointer {\n\t\t\tiface = v.Field(n).Interface()\n\t\t} else {\n\t\t\tiface = v.Field(n).Addr().Interface()\n\t\t}\n\t\tif isValidKeyType(iface) {\n\t\t\tif idType < 3 && f.Tag.Get(\"key\") == \"1\" {\n\t\t\t\tidType = 3\n\t\t\t\tid = n\n\t\t\t} else if idType < 2 && strings.ToLower(fieldName) == \"id\" {\n\t\t\t\tidType = 2\n\t\t\t\tid = n\n\t\t\t} else if idType < 1 {\n\t\t\t\tidType = 1\n\t\t\t\tid = n\n\t\t\t}\n\t\t}\n\t\tpos := make([]int, 1, 2)\n\t\tpos[0] = n\n\t\tif isPointerStruct(iface) {\n\t\t\terr := s.Register(iface)\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tpos = append(pos, s.types[typeName(iface)].primary)\n\t\t} else if !isValidType(iface) {\n\t\t\tcontinue\n\t\t}\n\t\tfields = append(fields, field{\n\t\t\tfieldName,\n\t\t\tpos,\n\t\t\tisPointer,\n\t\t})\n\t\tfmt.Println(fields[len(fields)-1])\n\t}\n\ts.types[name] = typeInfo{\n\t\tid,\n\t\tfields,\n\t\tnil,\n\t}\n\tfmt.Println(s.types[name])\n\treturn nil\n}\n\nfunc isValidType(i interface{}) bool {\n\tswitch i.(type) {\n\tcase *int, *int8, *int16, *int32, *int64,\n\t\t*uint, *uint8, *uint16, *uint32, *uint64,\n\t\t*string, *float32, *float64, *bool:\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc isValidKeyType(i interface{}) bool {\n\tswitch i.(type) {\n\tcase *int, *int8, *int16, *int32, *int64,\n\t\t*uint, *uint8, *uint16, *uint32, *uint64:\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc typeName(i interface{}) string {\n\tname := reflect.TypeOf(i).String()\n\tif name[0] == '*' {\n\t\tname = name[1:]\n\t}\n\treturn name\n}\n\n\/\/ Errors\n\nvar (\n\tDBClosed = errors.New(\"database already closed\")\n\tNoPointerStruct = errors.New(\"given variable is not a pointer to a struct\")\n)\n<commit_msg>Completed intial type creation<commit_after>\/\/ Package store automatically configures a database to store structured information in an sql database\npackage store\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n)\n\ntype field struct {\n\tname string\n\tpos []int\n\tisPointer bool\n}\n\ntype statement struct {\n\t*sql.Stmt\n\tin []int\n\tout []int\n}\n\ntype typeInfo struct {\n\tprimary int\n\tfields []field\n\tstatements []statement\n}\n\ntype Store struct {\n\tdb *sql.DB\n\ttypes map[string]typeInfo\n\tmutex sync.Mutex\n}\n\nfunc New(db *sql.DB) *Store {\n\treturn &Store{\n\t\tdb: db,\n\t\ttypes: make(map[string]typeInfo),\n\t}\n}\n\nfunc (s *Store) Close() error {\n\terr := s.db.Close()\n\ts.db = nil\n\treturn err\n}\n\nfunc isPointerStruct(i interface{}) bool {\n\tt := reflect.TypeOf(i)\n\treturn t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct\n}\n\nfunc (s *Store) Register(i interface{}) error {\n\tif s.db == nil {\n\t\treturn DBClosed\n\t} else if !isPointerStruct(i) {\n\t\treturn NoPointerStruct\n\t}\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\treturn s.defineType(i)\n}\n\nfunc (s *Store) defineType(i interface{}) error {\n\tname := typeName(i)\n\tif _, ok := s.types[name]; ok {\n\t\treturn nil\n\t}\n\n\tv := reflect.ValueOf(i).Elem()\n\tnumFields := v.Type().NumField()\n\tfields := make([]field, 0, numFields)\n\tid := 0\n\tidType := 0\n\n\ttype tR struct {\n\t\ti interface{}\n\t\tn int\n\t}\n\ttoRegister := make([]tR, 0)\n\tfor n := 0; n < numFields; n++ {\n\t\tf := v.Type().Field(n)\n\t\tif f.PkgPath != \"\" { \/\/ not exported\n\t\t\tcontinue\n\t\t}\n\t\tfieldName := f.Name\n\t\tif fn := f.Tag.Get(\"store\"); fn != \"\" {\n\t\t\tfieldName = fn\n\t\t}\n\t\tif fieldName == \"-\" { \/\/ Skip field\n\t\t\tcontinue\n\t\t}\n\t\tisPointer := f.Type.Kind() == reflect.Ptr\n\t\tvar iface interface{}\n\t\tif isPointer {\n\t\t\tiface = v.Field(n).Interface()\n\t\t} else {\n\t\t\tiface = v.Field(n).Addr().Interface()\n\t\t}\n\t\tif isValidKeyType(iface) {\n\t\t\tif idType < 3 && f.Tag.Get(\"key\") == \"1\" {\n\t\t\t\tidType = 3\n\t\t\t\tid = n\n\t\t\t} else if idType < 2 && strings.ToLower(fieldName) == \"id\" {\n\t\t\t\tidType = 2\n\t\t\t\tid = n\n\t\t\t} else if idType < 1 {\n\t\t\t\tidType = 1\n\t\t\t\tid = n\n\t\t\t}\n\t\t}\n\t\tpos := make([]int, 1, 2)\n\t\tpos[0] = n\n\t\tif isPointerStruct(iface) {\n\t\t\ttoRegister = append(toRegister, tR{\n\t\t\t\tiface,\n\t\t\t\tlen(fields),\n\t\t\t})\n\t\t} else if !isValidType(iface) {\n\t\t\tcontinue\n\t\t}\n\t\tfields = append(fields, field{\n\t\t\tfieldName,\n\t\t\tpos,\n\t\t\tisPointer,\n\t\t})\n\t}\n\ts.types[name] = typeInfo{\n\t\tprimary: id,\n\t}\n\tfor _, t := range toRegister {\n\t\tif err := s.defineType(t.i); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfields[t.n].pos = append(fields[t.n].pos, s.types[typeName(t.i)].primary)\n\t}\n\ts.types[name] = typeInfo{\n\t\tprimary: id,\n\t\tfields: fields,\n\t}\n\n\t\/\/ create statements\n\n\treturn nil\n}\n\nfunc isValidType(i interface{}) bool {\n\tswitch i.(type) {\n\tcase *int, *int8, *int16, *int32, *int64,\n\t\t*uint, *uint8, *uint16, *uint32, *uint64,\n\t\t*string, *float32, *float64, *bool:\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc isValidKeyType(i interface{}) bool {\n\tswitch i.(type) {\n\tcase *int, *int8, *int16, *int32, *int64,\n\t\t*uint, *uint8, *uint16, *uint32, *uint64:\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc typeName(i interface{}) string {\n\tname := reflect.TypeOf(i).String()\n\tif name[0] == '*' {\n\t\tname = name[1:]\n\t}\n\treturn name\n}\n\n\/\/ Errors\n\nvar (\n\tDBClosed = errors.New(\"database already closed\")\n\tNoPointerStruct = errors.New(\"given variable is not a pointer to a struct\")\n)\n<|endoftext|>"} {"text":"<commit_before>package androidbinary\n\nimport (\n\t\"encoding\/binary\"\n\t\"io\"\n\t\"os\"\n)\n\ntype TableFile struct {\n\tstringPool *ResStringPool\n\ttablePackages []*TablePackage\n}\n\ntype ResTableHeader struct {\n\tHeader ResChunkHeader\n\tPackageCount uint32\n}\n\ntype ResTablePackage struct {\n\tHeader ResChunkHeader\n\tId uint32\n\tName [128]uint16\n\tTypeStrings uint32\n\tLastPublicType uint32\n\tKeyStrings uint32\n\tLastPublicKey uint32\n}\n\ntype TablePackage struct {\n\tHeader ResTablePackage\n\tTypeStrings *ResStringPool\n\tKeyStrings *ResStringPool\n\tTableTypes []*TableType\n}\n\ntype ResTableType struct {\n\tHeader ResChunkHeader\n\tId uint8\n\tRes0 uint8\n\tRes1 uint16\n\tEntryCount uint32\n\tEntriesStart uint32\n\tConfig ResTableConfig\n}\n\ntype ResTableConfig struct {\n\tSize uint32\n\t\/\/ imsi\n\tMcc uint16\n\tMnc uint16\n\n\t\/\/ locale\n\tLanguage [2]uint8\n\tCountry [2]uint8\n\n\t\/\/ screen type\n\tOrientation uint8\n\tTouchscreen uint8\n\tDensity uint16\n\n\t\/\/ inout\n\tKeyboard uint8\n\tNavigation uint8\n\tInputFlags uint8\n\tInputPad0 uint8\n\n\t\/\/ screen\n\tScreenWidth uint16\n\tScreenHeight uint16\n\n\t\/\/ version\n\tSDKVersion uint16\n\tMinorVersion uint16\n\n\t\/\/ screen config\n\tScreenLayout uint8\n\tUIMode uint8\n\tScreenConfigPad1 uint8\n\tScreenConfigPad2 uint8\n}\n\ntype TableType struct {\n\tHeader *ResTableType\n\tEntries []TableEntry\n}\n\ntype ResTableEntry struct {\n\tSize uint16\n\tFlags uint16\n\tKey ResStringPoolRef\n}\n\ntype TableEntry struct {\n\tKey *ResTableEntry\n\tValue *ResValue\n\tFlags uint32\n}\n\ntype ResTableTypeSpec struct {\n\tHeader ResChunkHeader\n\tId uint8\n\tRes0 uint8\n\tRes1 uint16\n\tEntryCount uint32\n}\n\nfunc NewTableFile(r io.ReaderAt) (*TableFile, error) {\n\tf := new(TableFile)\n\tsr := io.NewSectionReader(r, 0, 1<<63-1)\n\n\theader := new(ResTableHeader)\n\tbinary.Read(sr, binary.LittleEndian, header)\n\tf.tablePackages = make([]*TablePackage, header.PackageCount)\n\n\toffset := int64(header.Header.HeaderSize)\n\tfor offset < int64(header.Header.Size) {\n\t\tchunkHeader, err := f.readChunk(sr, offset)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\toffset += int64(chunkHeader.Size)\n\t}\n\treturn f, nil\n}\n\nfunc (f *TableFile) readChunk(r io.ReaderAt, offset int64) (*ResChunkHeader, error) {\n\tsr := io.NewSectionReader(r, offset, 1<<63-1-offset)\n\tchunkHeader := &ResChunkHeader{}\n\tsr.Seek(0, os.SEEK_SET)\n\tif err := binary.Read(sr, binary.LittleEndian, chunkHeader); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar err error\n\tsr.Seek(0, os.SEEK_SET)\n\tnumTablePackages := 0\n\tswitch chunkHeader.Type {\n\tcase RES_STRING_POOL_TYPE:\n\t\tf.stringPool, err = readStringPool(sr)\n\tcase RES_TABLE_PACKAGE_TYPE:\n\t\tvar tablePackage *TablePackage\n\t\ttablePackage, err = readTablePackage(sr)\n\t\tf.tablePackages[numTablePackages] = tablePackage\n\t\tnumTablePackages++\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn chunkHeader, nil\n}\n\nfunc readTablePackage(sr *io.SectionReader) (*TablePackage, error) {\n\ttablePackage := new(TablePackage)\n\theader := new(ResTablePackage)\n\tif err := binary.Read(sr, binary.LittleEndian, header); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsrTypes := io.NewSectionReader(sr, int64(header.TypeStrings), int64(header.Header.Size-header.TypeStrings))\n\tif typeStrings, err := readStringPool(srTypes); err == nil {\n\t\ttablePackage.TypeStrings = typeStrings\n\t} else {\n\t\treturn nil, err\n\t}\n\n\tsrKeys := io.NewSectionReader(sr, int64(header.KeyStrings), int64(header.Header.Size-header.KeyStrings))\n\tif keyStrings, err := readStringPool(srKeys); err == nil {\n\t\ttablePackage.KeyStrings = keyStrings\n\t} else {\n\t\treturn nil, err\n\t}\n\n\toffset := int64(header.Header.HeaderSize)\n\tfor offset < int64(header.Header.Size) {\n\t\tchunkHeader := &ResChunkHeader{}\n\t\tsr.Seek(offset, os.SEEK_SET)\n\t\tif err := binary.Read(sr, binary.LittleEndian, chunkHeader); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar err error\n\t\tchunkReader := io.NewSectionReader(sr, offset, int64(chunkHeader.Size))\n\t\tsr.Seek(offset, os.SEEK_SET)\n\t\tswitch chunkHeader.Type {\n\t\tcase RES_TABLE_TYPE_TYPE:\n\t\t\tvar tableType *TableType\n\t\t\ttableType, err = readTableType(chunkReader)\n\t\t\ttablePackage.TableTypes = append(tablePackage.TableTypes, tableType)\n\t\tcase RES_TABLE_TYPE_SPEC_TYPE:\n\t\t\t_, err = readTableTypeSpec(chunkReader)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\toffset += int64(chunkHeader.Size)\n\t}\n\n\treturn tablePackage, nil\n}\n\nfunc readTableType(sr *io.SectionReader) (*TableType, error) {\n\theader := new(ResTableType)\n\tif err := binary.Read(sr, binary.LittleEndian, header); err != nil {\n\t\treturn nil, err\n\t}\n\n\tentryIndexes := make([]uint32, header.EntryCount)\n\tsr.Seek(int64(header.Header.HeaderSize), os.SEEK_SET)\n\tif err := binary.Read(sr, binary.LittleEndian, entryIndexes); err != nil {\n\t\treturn nil, err\n\t}\n\n\tentries := make([]TableEntry, header.EntryCount)\n\tfor i, index := range entryIndexes {\n\t\tif index == 0xFFFFFFFF {\n\t\t\tcontinue\n\t\t}\n\t\tsr.Seek(int64(header.EntriesStart+index), os.SEEK_SET)\n\t\tvar key ResTableEntry\n\t\tbinary.Read(sr, binary.LittleEndian, &key)\n\t\tentries[i].Key = &key\n\n\t\tvar val ResValue\n\t\tbinary.Read(sr, binary.LittleEndian, &val)\n\t\tentries[i].Value = &val\n\t}\n\treturn &TableType{\n\t\theader,\n\t\tentries,\n\t}, nil\n}\n\nfunc readTableTypeSpec(sr *io.SectionReader) ([]uint32, error) {\n\theader := new(ResTableTypeSpec)\n\tif err := binary.Read(sr, binary.LittleEndian, header); err != nil {\n\t\treturn nil, err\n\t}\n\n\tflags := make([]uint32, header.EntryCount)\n\tsr.Seek(int64(header.Header.HeaderSize), os.SEEK_SET)\n\tif err := binary.Read(sr, binary.LittleEndian, flags); err != nil {\n\t\treturn nil, err\n\t}\n\treturn flags, nil\n}\n<commit_msg>Configの比較を行うメソッドを追加<commit_after>package androidbinary\n\nimport (\n\t\"encoding\/binary\"\n\t\"io\"\n\t\"os\"\n)\n\ntype TableFile struct {\n\tstringPool *ResStringPool\n\ttablePackages []*TablePackage\n}\n\ntype ResTableHeader struct {\n\tHeader ResChunkHeader\n\tPackageCount uint32\n}\n\ntype ResTablePackage struct {\n\tHeader ResChunkHeader\n\tId uint32\n\tName [128]uint16\n\tTypeStrings uint32\n\tLastPublicType uint32\n\tKeyStrings uint32\n\tLastPublicKey uint32\n}\n\ntype TablePackage struct {\n\tHeader ResTablePackage\n\tTypeStrings *ResStringPool\n\tKeyStrings *ResStringPool\n\tTableTypes []*TableType\n}\n\ntype ResTableType struct {\n\tHeader ResChunkHeader\n\tId uint8\n\tRes0 uint8\n\tRes1 uint16\n\tEntryCount uint32\n\tEntriesStart uint32\n\tConfig ResTableConfig\n}\n\ntype ResTableConfig struct {\n\tSize uint32\n\t\/\/ imsi\n\tMcc uint16\n\tMnc uint16\n\n\t\/\/ locale\n\tLanguage [2]uint8\n\tCountry [2]uint8\n\n\t\/\/ screen type\n\tOrientation uint8\n\tTouchscreen uint8\n\tDensity uint16\n\n\t\/\/ inout\n\tKeyboard uint8\n\tNavigation uint8\n\tInputFlags uint8\n\tInputPad0 uint8\n\n\t\/\/ screen\n\tScreenWidth uint16\n\tScreenHeight uint16\n\n\t\/\/ version\n\tSDKVersion uint16\n\tMinorVersion uint16\n\n\t\/\/ screen config\n\tScreenLayout uint8\n\tUIMode uint8\n\tScreenConfigPad1 uint8\n\tScreenConfigPad2 uint8\n}\n\ntype TableType struct {\n\tHeader *ResTableType\n\tEntries []TableEntry\n}\n\ntype ResTableEntry struct {\n\tSize uint16\n\tFlags uint16\n\tKey ResStringPoolRef\n}\n\ntype TableEntry struct {\n\tKey *ResTableEntry\n\tValue *ResValue\n\tFlags uint32\n}\n\ntype ResTableTypeSpec struct {\n\tHeader ResChunkHeader\n\tId uint8\n\tRes0 uint8\n\tRes1 uint16\n\tEntryCount uint32\n}\n\nfunc NewTableFile(r io.ReaderAt) (*TableFile, error) {\n\tf := new(TableFile)\n\tsr := io.NewSectionReader(r, 0, 1<<63-1)\n\n\theader := new(ResTableHeader)\n\tbinary.Read(sr, binary.LittleEndian, header)\n\tf.tablePackages = make([]*TablePackage, header.PackageCount)\n\n\toffset := int64(header.Header.HeaderSize)\n\tfor offset < int64(header.Header.Size) {\n\t\tchunkHeader, err := f.readChunk(sr, offset)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\toffset += int64(chunkHeader.Size)\n\t}\n\treturn f, nil\n}\n\nfunc (f *TableFile) readChunk(r io.ReaderAt, offset int64) (*ResChunkHeader, error) {\n\tsr := io.NewSectionReader(r, offset, 1<<63-1-offset)\n\tchunkHeader := &ResChunkHeader{}\n\tsr.Seek(0, os.SEEK_SET)\n\tif err := binary.Read(sr, binary.LittleEndian, chunkHeader); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar err error\n\tsr.Seek(0, os.SEEK_SET)\n\tnumTablePackages := 0\n\tswitch chunkHeader.Type {\n\tcase RES_STRING_POOL_TYPE:\n\t\tf.stringPool, err = readStringPool(sr)\n\tcase RES_TABLE_PACKAGE_TYPE:\n\t\tvar tablePackage *TablePackage\n\t\ttablePackage, err = readTablePackage(sr)\n\t\tf.tablePackages[numTablePackages] = tablePackage\n\t\tnumTablePackages++\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn chunkHeader, nil\n}\n\nfunc readTablePackage(sr *io.SectionReader) (*TablePackage, error) {\n\ttablePackage := new(TablePackage)\n\theader := new(ResTablePackage)\n\tif err := binary.Read(sr, binary.LittleEndian, header); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsrTypes := io.NewSectionReader(sr, int64(header.TypeStrings), int64(header.Header.Size-header.TypeStrings))\n\tif typeStrings, err := readStringPool(srTypes); err == nil {\n\t\ttablePackage.TypeStrings = typeStrings\n\t} else {\n\t\treturn nil, err\n\t}\n\n\tsrKeys := io.NewSectionReader(sr, int64(header.KeyStrings), int64(header.Header.Size-header.KeyStrings))\n\tif keyStrings, err := readStringPool(srKeys); err == nil {\n\t\ttablePackage.KeyStrings = keyStrings\n\t} else {\n\t\treturn nil, err\n\t}\n\n\toffset := int64(header.Header.HeaderSize)\n\tfor offset < int64(header.Header.Size) {\n\t\tchunkHeader := &ResChunkHeader{}\n\t\tsr.Seek(offset, os.SEEK_SET)\n\t\tif err := binary.Read(sr, binary.LittleEndian, chunkHeader); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar err error\n\t\tchunkReader := io.NewSectionReader(sr, offset, int64(chunkHeader.Size))\n\t\tsr.Seek(offset, os.SEEK_SET)\n\t\tswitch chunkHeader.Type {\n\t\tcase RES_TABLE_TYPE_TYPE:\n\t\t\tvar tableType *TableType\n\t\t\ttableType, err = readTableType(chunkReader)\n\t\t\ttablePackage.TableTypes = append(tablePackage.TableTypes, tableType)\n\t\tcase RES_TABLE_TYPE_SPEC_TYPE:\n\t\t\t_, err = readTableTypeSpec(chunkReader)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\toffset += int64(chunkHeader.Size)\n\t}\n\n\treturn tablePackage, nil\n}\n\nfunc readTableType(sr *io.SectionReader) (*TableType, error) {\n\theader := new(ResTableType)\n\tif err := binary.Read(sr, binary.LittleEndian, header); err != nil {\n\t\treturn nil, err\n\t}\n\n\tentryIndexes := make([]uint32, header.EntryCount)\n\tsr.Seek(int64(header.Header.HeaderSize), os.SEEK_SET)\n\tif err := binary.Read(sr, binary.LittleEndian, entryIndexes); err != nil {\n\t\treturn nil, err\n\t}\n\n\tentries := make([]TableEntry, header.EntryCount)\n\tfor i, index := range entryIndexes {\n\t\tif index == 0xFFFFFFFF {\n\t\t\tcontinue\n\t\t}\n\t\tsr.Seek(int64(header.EntriesStart+index), os.SEEK_SET)\n\t\tvar key ResTableEntry\n\t\tbinary.Read(sr, binary.LittleEndian, &key)\n\t\tentries[i].Key = &key\n\n\t\tvar val ResValue\n\t\tbinary.Read(sr, binary.LittleEndian, &val)\n\t\tentries[i].Value = &val\n\t}\n\treturn &TableType{\n\t\theader,\n\t\tentries,\n\t}, nil\n}\n\nfunc readTableTypeSpec(sr *io.SectionReader) ([]uint32, error) {\n\theader := new(ResTableTypeSpec)\n\tif err := binary.Read(sr, binary.LittleEndian, header); err != nil {\n\t\treturn nil, err\n\t}\n\n\tflags := make([]uint32, header.EntryCount)\n\tsr.Seek(int64(header.Header.HeaderSize), os.SEEK_SET)\n\tif err := binary.Read(sr, binary.LittleEndian, flags); err != nil {\n\t\treturn nil, err\n\t}\n\treturn flags, nil\n}\n\nfunc (c *ResTableConfig) IsMoreSpecificThan(o *ResTableConfig) bool {\n\t\/\/ imsi\n\tif (c.Mcc != 0 && c.Mnc != 0) || (o.Mcc != 0 && o.Mnc != 0) {\n\t\tif c.Mcc != o.Mcc {\n\t\t\tif c.Mcc != 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif o.Mnc != 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tif c.Mnc != o.Mnc {\n\t\t\tif c.Mnc != 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif o.Mnc != 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ locale\n\tif (c.Language[0] != 0 && c.Country[0] != 0) || (o.Language[0] != 0 && o.Country[0] != 0) {\n\t\tif c.Language[0] != o.Language[0] {\n\t\t\tif c.Language[0] != 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif o.Language[0] != 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tif c.Country[0] != o.Country[0] {\n\t\t\tif c.Country[0] != 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif o.Country[0] != 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ orientation\n\tif c.Orientation != o.Orientation {\n\t\tif c.Orientation != 0 {\n\t\t\treturn false\n\t\t}\n\t\tif o.Orientation != 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\n\t\/\/ TODO: uimode\n\n\t\/\/ touchscreen\n\tif c.Touchscreen != o.Touchscreen {\n\t\tif c.Touchscreen != 0 {\n\t\t\treturn false\n\t\t}\n\t\tif o.Touchscreen != 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\n\t\/\/ TODO: input\n\n\t\/\/ screen size\n\tif (c.ScreenWidth != 0 && c.ScreenHeight != 0) || (o.ScreenWidth != 0 && o.ScreenHeight != 0) {\n\t\tif c.ScreenWidth != o.ScreenWidth {\n\t\t\tif c.ScreenWidth != 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif o.ScreenWidth != 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tif c.ScreenHeight != o.ScreenHeight {\n\t\t\tif c.ScreenHeight != 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif o.ScreenHeight != 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/version\n\tif (c.SDKVersion != 0 && c.SDKVersion != 0) || (o.MinorVersion != 0 && o.MinorVersion != 0) {\n\t\tif c.SDKVersion != o.SDKVersion {\n\t\t\tif c.SDKVersion != 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif o.SDKVersion != 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tif c.MinorVersion != o.MinorVersion {\n\t\t\tif c.MinorVersion != 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif o.MinorVersion != 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (c *ResTableConfig) IsBetterThan(o *ResTableConfig, r *ResTableConfig) bool {\n\tif r == nil {\n\t\treturn c.IsMoreSpecificThan(o)\n\t}\n\n\t\/\/ imsi\n\tif (c.Mcc != 0 && c.Mnc != 0) || (o.Mcc != 0 && o.Mnc != 0) {\n\t\tif c.Mcc != o.Mcc && r.Mcc != 0 {\n\t\t\treturn c.Mcc != 0\n\t\t}\n\t\tif c.Mnc != o.Mnc && r.Mnc != 0 {\n\t\t\treturn c.Mnc != 0\n\t\t}\n\t}\n\n\t\/\/ locale\n\tif (c.Language[0] != 0 && c.Country[0] != 0) || (o.Language[0] != 0 && o.Country[0] != 0) {\n\t\tif c.Language[0] != o.Language[0] && r.Language[0] != 0 {\n\t\t\treturn c.Language[0] != 0\n\t\t}\n\t\tif c.Country[0] != o.Country[0] && r.Country[0] != 0 {\n\t\t\treturn c.Country[0] != 0\n\t\t}\n\t}\n\n\t\/\/ TODO: screen layout\n\n\t\/\/ orientation\n\tif c.Orientation != o.Orientation && r.Orientation != 0 {\n\t\treturn c.Orientation != 0\n\t}\n\n\t\/\/ TODO: uimode\n\n\t\/\/ TODO: screen type\n\n\t\/\/ TODO: input\n\n\t\/\/ screen size\n\tif (c.ScreenWidth != 0 && c.ScreenHeight != 0) || (o.ScreenWidth != 0 && o.ScreenHeight != 0) {\n\t\tif c.ScreenWidth != o.ScreenWidth && r.ScreenWidth != 0 {\n\t\t\treturn c.ScreenWidth != 0\n\t\t}\n\t\tif c.ScreenHeight != o.ScreenHeight && r.ScreenHeight != 0 {\n\t\t\treturn c.ScreenHeight != 0\n\t\t}\n\t}\n\n\t\/\/ version\n\tif (c.SDKVersion != 0 && c.SDKVersion != 0) || (o.MinorVersion != 0 && o.MinorVersion != 0) {\n\t\tif c.SDKVersion != o.SDKVersion && r.SDKVersion != 0 {\n\t\t\treturn c.SDKVersion > o.SDKVersion\n\t\t}\n\t\tif c.MinorVersion != o.MinorVersion {\n\t\t\treturn c.MinorVersion != 0\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (c *ResTableConfig) Match(settings *ResTableConfig) bool {\n\t\/\/ match imsi\n\tif c.Mcc != 0 && c.Mnc != 0 {\n\t\tif settings.Mcc == 0 {\n\t\t\tif c.Mcc != 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t} else {\n\t\t\tif c.Mcc != 0 && c.Mcc != settings.Mcc {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tif settings.Mnc == 0 {\n\t\t\tif c.Mnc != 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t} else {\n\t\t\tif c.Mnc != 0 && c.Mnc != settings.Mnc {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ match locale\n\tif c.Language[0] != 0 && c.Country[0] != 0 {\n\t\tif settings.Language[0] != 0 && c.Language[0] != 0 &&\n\t\t\t!(settings.Language[0] == c.Language[0] && settings.Language[1] == c.Language[1]) {\n\t\t\treturn false\n\t\t}\n\t\tif settings.Country[0] != 0 && c.Country[0] != 0 &&\n\t\t\t!(settings.Country[0] == c.Country[0] && settings.Country[1] == c.Country[1]) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\t\/\/ TODO: screen config\n\t\/\/ TODO: screen type\n\t\/\/ TODO: input\n\n\t\/\/ screen size\n\tif c.ScreenWidth != 0 && c.ScreenHeight != 0 {\n\t\tif settings.ScreenWidth != 0 && c.ScreenWidth != 0 &&\n\t\t\tc.ScreenWidth != settings.ScreenWidth {\n\t\t\treturn false\n\t\t}\n\t\tif settings.ScreenHeight != 0 && c.ScreenHeight != 0 &&\n\t\t\tc.ScreenHeight != settings.ScreenHeight {\n\t\t\treturn false\n\t\t}\n\t}\n\n\t\/\/ version\n\tif c.SDKVersion != 0 && c.MinorVersion != 0 {\n\t\tif settings.SDKVersion != 0 && c.SDKVersion != 0 &&\n\t\t\tc.SDKVersion > settings.SDKVersion {\n\t\t\treturn false\n\t\t}\n\t\tif settings.MinorVersion != 0 && c.MinorVersion != 0 &&\n\t\t\tc.MinorVersion != settings.MinorVersion {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ This is free and unencumbered software released into the public domain.\n\/\/\n\/\/ Anyone is free to copy, modify, publish, use, compile, sell, or\n\/\/ distribute this software, either in source code form or as a compiled\n\/\/ binary, for any purpose, commercial or non-commercial, and by any\n\/\/ means.\n\/\/\n\/\/ In jurisdictions that recognize copyright laws, the author or authors\n\/\/ of this software dedicate any and all copyright interest in the\n\/\/ software to the public domain. We make this dedication for the benefit\n\/\/ of the public at large and to the detriment of our heirs and\n\/\/ successors. We intend this dedication to be an overt act of\n\/\/ relinquishment in perpetuity of all present and future rights to this\n\/\/ software under copyright law.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n\/\/ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n\/\/ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n\/\/ IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n\/\/ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n\/\/ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n\/\/ OTHER DEALINGS IN THE SOFTWARE.\n\/\/\n\/\/ For more information, please refer to <http:\/\/unlicense.org>\n\/\/\n\n\/\/ Package clitable implements methods for pretty command line table output.\npackage clitable\n\nimport (\n\t\"fmt\"\n\t\"github.com\/mattn\/go-runewidth\"\n\t\"strings\"\n)\n\n\/\/ Table - Table structure.\ntype Table struct {\n\tFields []string\n\tRows []map[string]string\n\tHideHead bool \/\/ when true doesn't print header\n\tMarkdown bool\n\tfieldSizes map[string]int\n}\n\n\/\/ New - Creates a new table.\nfunc New(fields []string) *Table {\n\treturn &Table{\n\t\tFields: fields,\n\t\tRows: make([]map[string]string, 0),\n\t\tfieldSizes: make(map[string]int),\n\t}\n}\n\n\/\/ PrintTable - Prints table.\nfunc PrintTable(fields []string, rows []map[string]interface{}) {\n\ttable := New(fields)\n\tfor _, r := range rows {\n\t\ttable.AddRow(r)\n\t}\n\ttable.Print()\n}\n\n\/\/ PrintHorizontal - Prints horizontal table from a map.\nfunc PrintHorizontal(m map[string]interface{}) {\n\ttable := New([]string{\"Key\", \"Value\"})\n\trows := mapToRows(m)\n\tfor _, row := range rows {\n\t\ttable.AddRow(row)\n\t}\n\ttable.HideHead = true\n\ttable.Print()\n}\n\n\/\/ PrintRow - Prints table with only one row.\nfunc PrintRow(fields []string, row map[string]interface{}) {\n\ttable := New(fields)\n\ttable.AddRow(row)\n\ttable.Print()\n}\n\n\/\/ AddRow - Adds row to the table.\nfunc (t *Table) AddRow(row map[string]interface{}) {\n\tnewRow := make(map[string]string)\n\tfor _, k := range t.Fields {\n\t\tv := row[k]\n\t\t\/\/ If is not nil format\n\t\t\/\/ else value is empty string\n\t\tvar val string\n\t\tif v == nil {\n\t\t\tval = \"\"\n\t\t} else {\n\t\t\tval = fmt.Sprintf(\"%v\", v)\n\t\t}\n\n\t\tvalLen := runewidth.StringWidth(val)\n\t\t\/\/ align to field name length\n\t\tif klen := runewidth.StringWidth(k); valLen < klen {\n\t\t\tvalLen = klen\n\t\t}\n\t\tvalLen += 2 \/\/ + 2 spaces\n\t\tif t.fieldSizes[k] < valLen {\n\t\t\tt.fieldSizes[k] = valLen\n\t\t}\n\t\tnewRow[k] = val\n\t}\n\tif len(newRow) > 0 {\n\t\tt.Rows = append(t.Rows, newRow)\n\t}\n}\n\n\/\/ Print - Prints table.\nfunc (t *Table) Print() {\n\tif len(t.Rows) == 0 {\n\t\treturn\n\t}\n\n\tif !t.Markdown {\n\t\tt.printDash()\n\t}\n\n\tif !t.HideHead {\n\t\tfmt.Println(t.getHead())\n\t\tif t.Markdown {\n\t\t\tt.printMarkdownDash()\n\t\t} else {\n\t\t\tt.printDash()\n\t\t}\n\t}\n\n\tfor _, r := range t.Rows {\n\t\tfmt.Println(t.rowString(r))\n\t\tif !t.Markdown {\n\t\t\tt.printDash()\n\t\t}\n\t}\n}\n\n\/\/ getHead - Returns table header containing fields names.\nfunc (t *Table) getHead() string {\n\ts := \"|\"\n\tfor _, name := range t.Fields {\n\t\ts += t.fieldString(name, strings.Title(name)) + \"|\"\n\t}\n\treturn s\n}\n\n\/\/ rowString - Creates a string row.\nfunc (t *Table) rowString(row map[string]string) string {\n\ts := \"|\"\n\tfor _, name := range t.Fields {\n\t\tvalue := row[name]\n\t\ts += t.fieldString(name, value) + \"|\"\n\t}\n\treturn s\n}\n\n\/\/ fieldString - Creates field value string.\nfunc (t *Table) fieldString(name, value string) string {\n\tvalue = fmt.Sprintf(\" %s \", value)\n\tspacesLeft := t.fieldSizes[name] - runewidth.StringWidth(value)\n\tif spacesLeft > 0 {\n\t\tfor i := 0; i < spacesLeft; i++ {\n\t\t\tvalue += \" \"\n\t\t}\n\t}\n\treturn value\n}\n\n\/\/ printDash - Prints dash (on top and header).\nfunc (t *Table) printDash() {\n\ts := \"|\"\n\tfor i := 0; i < t.lineLength()-2; i++ {\n\t\ts += \"-\"\n\t}\n\ts += \"|\"\n\tfmt.Println(s)\n}\n\n\/\/ printMarkdownDash - Prints dash in middle of table.\nfunc (t *Table) printMarkdownDash() {\n\tr := make(map[string]string)\n\tfor _, name := range t.Fields {\n\t\tr[name] = strings.Repeat(\"-\", t.fieldSizes[name]-2)\n\t}\n\tfmt.Println(t.rowString(r))\n}\n\n\/\/ lineLength - Counts size of table line length (with spaces etc.).\nfunc (t *Table) lineLength() (sum int) {\n\tfor _, l := range t.fieldSizes {\n\t\tsum += l + 1\n\t}\n\treturn sum + 1\n}\n\nfunc mapToRows(m map[string]interface{}) (rows []map[string]interface{}) {\n\trows = []map[string]interface{}{}\n\tfor key, value := range m {\n\t\trow := map[string]interface{}{}\n\t\trow[\"Key\"] = strings.Title(key)\n\t\trow[\"Value\"] = value\n\t\trows = append(rows, row)\n\t}\n\treturn\n}\n<commit_msg>@deoxxa footer functionality<commit_after>\/\/ This is free and unencumbered software released into the public domain.\n\/\/\n\/\/ Anyone is free to copy, modify, publish, use, compile, sell, or\n\/\/ distribute this software, either in source code form or as a compiled\n\/\/ binary, for any purpose, commercial or non-commercial, and by any\n\/\/ means.\n\/\/\n\/\/ In jurisdictions that recognize copyright laws, the author or authors\n\/\/ of this software dedicate any and all copyright interest in the\n\/\/ software to the public domain. We make this dedication for the benefit\n\/\/ of the public at large and to the detriment of our heirs and\n\/\/ successors. We intend this dedication to be an overt act of\n\/\/ relinquishment in perpetuity of all present and future rights to this\n\/\/ software under copyright law.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n\/\/ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n\/\/ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n\/\/ IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n\/\/ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n\/\/ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n\/\/ OTHER DEALINGS IN THE SOFTWARE.\n\/\/\n\/\/ For more information, please refer to <http:\/\/unlicense.org>\n\/\/\n\n\/\/ Package clitable implements methods for pretty command line table output.\npackage clitable\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/mattn\/go-runewidth\"\n)\n\n\/\/ Table - Table structure.\ntype Table struct {\n\tFields []string\n\tFooter map[string]string\n\tRows []map[string]string\n\tHideHead bool \/\/ when true doesn't print header\n\tMarkdown bool\n\tfieldSizes map[string]int\n}\n\n\/\/ New - Creates a new table.\nfunc New(fields []string) *Table {\n\treturn &Table{\n\t\tFields: fields,\n\t\tRows: make([]map[string]string, 0),\n\t\tfieldSizes: make(map[string]int),\n\t}\n}\n\n\/\/ PrintTable - Prints table.\nfunc PrintTable(fields []string, rows []map[string]interface{}) {\n\ttable := New(fields)\n\tfor _, r := range rows {\n\t\ttable.AddRow(r)\n\t}\n\ttable.Print()\n}\n\n\/\/ PrintHorizontal - Prints horizontal table from a map.\nfunc PrintHorizontal(m map[string]interface{}) {\n\ttable := New([]string{\"Key\", \"Value\"})\n\trows := mapToRows(m)\n\tfor _, row := range rows {\n\t\ttable.AddRow(row)\n\t}\n\ttable.HideHead = true\n\ttable.Print()\n}\n\n\/\/ PrintRow - Prints table with only one row.\nfunc PrintRow(fields []string, row map[string]interface{}) {\n\ttable := New(fields)\n\ttable.AddRow(row)\n\ttable.Print()\n}\n\n\/\/ AddRow - Adds row to the table.\nfunc (t *Table) AddRow(row map[string]interface{}) {\n\tnewRow := make(map[string]string)\n\tfor _, k := range t.Fields {\n\t\tv := row[k]\n\t\t\/\/ If is not nil format\n\t\t\/\/ else value is empty string\n\t\tvar val string\n\t\tif v == nil {\n\t\t\tval = \"\"\n\t\t} else {\n\t\t\tval = fmt.Sprintf(\"%v\", v)\n\t\t}\n\n\t\tnewRow[k] = val\n\t}\n\n\tt.calculateSizes(newRow)\n\n\tif len(newRow) > 0 {\n\t\tt.Rows = append(t.Rows, newRow)\n\t}\n}\n\n\/\/ AddFooter - Adds footer to the table.\nfunc (t *Table) AddFooter(footer map[string]string) {\n\tt.Footer = footer\n}\n\n\/\/ Print - Prints table.\nfunc (t *Table) Print() {\n\tif len(t.Rows) == 0 && t.Footer == nil {\n\t\treturn\n\t}\n\n\tt.calculateSizes(t.Footer)\n\n\tif !t.Markdown {\n\t\tt.printDash()\n\t}\n\n\tif !t.HideHead {\n\t\tfmt.Println(t.getHead())\n\t\tt.printTableDash()\n\t}\n\n\tfor _, r := range t.Rows {\n\t\tfmt.Println(t.rowString(r))\n\t\tif !t.Markdown {\n\t\t\tt.printDash()\n\t\t}\n\t}\n\n\tif t.Footer != nil {\n\t\tt.printTableDash()\n\t\tfmt.Println(t.rowString(t.Footer))\n\t\tif !t.Markdown {\n\t\t\tt.printTableDash()\n\t\t}\n\t}\n}\n\n\/\/ getHead - Returns table header containing fields names.\nfunc (t *Table) getHead() string {\n\ts := \"|\"\n\tfor _, name := range t.Fields {\n\t\ts += t.fieldString(name, strings.Title(name)) + \"|\"\n\t}\n\treturn s\n}\n\n\/\/ rowString - Creates a string row.\nfunc (t *Table) rowString(row map[string]string) string {\n\ts := \"|\"\n\tfor _, name := range t.Fields {\n\t\tvalue := row[name]\n\t\ts += t.fieldString(name, value) + \"|\"\n\t}\n\treturn s\n}\n\n\/\/ fieldString - Creates field value string.\nfunc (t *Table) fieldString(name, value string) string {\n\tvalue = fmt.Sprintf(\" %s \", value)\n\tspacesLeft := t.fieldSizes[name] - runewidth.StringWidth(value)\n\tif spacesLeft > 0 {\n\t\tfor i := 0; i < spacesLeft; i++ {\n\t\t\tvalue += \" \"\n\t\t}\n\t}\n\treturn value\n}\n\n\/\/ printTableDash - Prints table dash. Markdown or not depending on settings.\nfunc (t *Table) printTableDash() {\n\tif t.Markdown {\n\t\tt.printMarkdownDash()\n\t} else {\n\t\tt.printDash()\n\t}\n}\n\n\/\/ printDash - Prints dash (on top and header).\nfunc (t *Table) printDash() {\n\ts := \"|\"\n\tfor i := 0; i < t.lineLength()-2; i++ {\n\t\ts += \"-\"\n\t}\n\ts += \"|\"\n\tfmt.Println(s)\n}\n\n\/\/ printMarkdownDash - Prints dash in middle of table.\nfunc (t *Table) printMarkdownDash() {\n\tr := make(map[string]string)\n\tfor _, name := range t.Fields {\n\t\tr[name] = strings.Repeat(\"-\", t.fieldSizes[name]-2)\n\t}\n\tfmt.Println(t.rowString(r))\n}\n\n\/\/ lineLength - Counts size of table line length (with spaces etc.).\nfunc (t *Table) lineLength() (sum int) {\n\tfor _, l := range t.fieldSizes {\n\t\tsum += l + 1\n\t}\n\treturn sum + 1\n}\n\nfunc (t *Table) calculateSizes(row map[string]string) {\n\tfor _, k := range t.Fields {\n\t\tv, ok := row[k]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tvlen := runewidth.StringWidth(v)\n\t\t\/\/ align to field name length\n\t\tif klen := runewidth.StringWidth(k); vlen < klen {\n\t\t\tvlen = klen\n\t\t}\n\t\tvlen += 2 \/\/ + 2 spaces\n\t\tif t.fieldSizes[k] < vlen {\n\t\t\tt.fieldSizes[k] = vlen\n\t\t}\n\t}\n}\n\nfunc mapToRows(m map[string]interface{}) (rows []map[string]interface{}) {\n\trows = []map[string]interface{}{}\n\tfor key, value := range m {\n\t\trow := map[string]interface{}{}\n\t\trow[\"Key\"] = strings.Title(key)\n\t\trow[\"Value\"] = value\n\t\trows = append(rows, row)\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n *\n * Copyright 2021 gRPC authors.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage pubsub\n\nimport (\n\t\"google.golang.org\/grpc\/internal\/pretty\"\n\t\"google.golang.org\/grpc\/xds\/internal\/xdsclient\/xdsresource\"\n\t\"google.golang.org\/protobuf\/proto\"\n)\n\ntype watcherInfoWithUpdate struct {\n\twi *watchInfo\n\tupdate interface{}\n\terr error\n}\n\n\/\/ scheduleCallback should only be called by methods of watchInfo, which checks\n\/\/ for watcher states and maintain consistency.\nfunc (pb *Pubsub) scheduleCallback(wi *watchInfo, update interface{}, err error) {\n\tpb.updateCh.Put(&watcherInfoWithUpdate{\n\t\twi: wi,\n\t\tupdate: update,\n\t\terr: err,\n\t})\n}\n\nfunc (pb *Pubsub) callCallback(wiu *watcherInfoWithUpdate) {\n\tpb.mu.Lock()\n\t\/\/ Use a closure to capture the callback and type assertion, to save one\n\t\/\/ more switch case.\n\t\/\/\n\t\/\/ The callback must be called without pb.mu. Otherwise if the callback calls\n\t\/\/ another watch() inline, it will cause a deadlock. This leaves a small\n\t\/\/ window that a watcher's callback could be called after the watcher is\n\t\/\/ canceled, and the user needs to take care of it.\n\tvar ccb func()\n\tswitch wiu.wi.rType {\n\tcase xdsresource.ListenerResource:\n\t\tif s, ok := pb.ldsWatchers[wiu.wi.target]; ok && s[wiu.wi] {\n\t\t\tccb = func() { wiu.wi.ldsCallback(wiu.update.(xdsresource.ListenerUpdate), wiu.err) }\n\t\t}\n\tcase xdsresource.RouteConfigResource:\n\t\tif s, ok := pb.rdsWatchers[wiu.wi.target]; ok && s[wiu.wi] {\n\t\t\tccb = func() { wiu.wi.rdsCallback(wiu.update.(xdsresource.RouteConfigUpdate), wiu.err) }\n\t\t}\n\tcase xdsresource.ClusterResource:\n\t\tif s, ok := pb.cdsWatchers[wiu.wi.target]; ok && s[wiu.wi] {\n\t\t\tccb = func() { wiu.wi.cdsCallback(wiu.update.(xdsresource.ClusterUpdate), wiu.err) }\n\t\t}\n\tcase xdsresource.EndpointsResource:\n\t\tif s, ok := pb.edsWatchers[wiu.wi.target]; ok && s[wiu.wi] {\n\t\t\tccb = func() { wiu.wi.edsCallback(wiu.update.(xdsresource.EndpointsUpdate), wiu.err) }\n\t\t}\n\t}\n\tpb.mu.Unlock()\n\n\tif ccb != nil {\n\t\tccb()\n\t}\n}\n\n\/\/ NewListeners is called when there's a new LDS update.\nfunc (pb *Pubsub) NewListeners(updates map[string]xdsresource.ListenerUpdateErrTuple, metadata xdsresource.UpdateMetadata) {\n\tpb.mu.Lock()\n\tdefer pb.mu.Unlock()\n\n\tfor name, uErr := range updates {\n\t\tif s, ok := pb.ldsWatchers[name]; ok {\n\t\t\tif uErr.Err != nil {\n\t\t\t\t\/\/ On error, keep previous version for each resource. But update\n\t\t\t\t\/\/ status and error.\n\t\t\t\tmdCopy := pb.ldsMD[name]\n\t\t\t\tmdCopy.ErrState = metadata.ErrState\n\t\t\t\tmdCopy.Status = metadata.Status\n\t\t\t\tpb.ldsMD[name] = mdCopy\n\t\t\t\tfor wi := range s {\n\t\t\t\t\twi.newError(uErr.Err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ If we get here, it means that the update is a valid one. Notify\n\t\t\t\/\/ watchers only if this is a first time update or it is different\n\t\t\t\/\/ from the one currently cached.\n\t\t\tif cur, ok := pb.ldsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) {\n\t\t\t\tfor wi := range s {\n\t\t\t\t\twi.newUpdate(uErr.Update)\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ Sync cache.\n\t\t\tpb.logger.Debugf(\"LDS resource with name %v, value %+v added to cache\", name, pretty.ToJSON(uErr))\n\t\t\tpb.ldsCache[name] = uErr.Update\n\t\t\t\/\/ Set status to ACK, and clear error state. The metadata might be a\n\t\t\t\/\/ NACK metadata because some other resources in the same response\n\t\t\t\/\/ are invalid.\n\t\t\tmdCopy := metadata\n\t\t\tmdCopy.Status = xdsresource.ServiceStatusACKed\n\t\t\tmdCopy.ErrState = nil\n\t\t\tif metadata.ErrState != nil {\n\t\t\t\tmdCopy.Version = metadata.ErrState.Version\n\t\t\t}\n\t\t\tpb.ldsMD[name] = mdCopy\n\t\t}\n\t}\n\t\/\/ Resources not in the new update were removed by the server, so delete\n\t\/\/ them.\n\tfor name := range pb.ldsCache {\n\t\tif _, ok := updates[name]; !ok {\n\t\t\t\/\/ If resource exists in cache, but not in the new update, delete\n\t\t\t\/\/ the resource from cache, and also send an resource not found\n\t\t\t\/\/ error to indicate resource removed.\n\t\t\tdelete(pb.ldsCache, name)\n\t\t\tpb.ldsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}\n\t\t\tfor wi := range pb.ldsWatchers[name] {\n\t\t\t\twi.resourceNotFound()\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ When LDS resource is removed, we don't delete corresponding RDS cached\n\t\/\/ data. The RDS watch will be canceled, and cache entry is removed when the\n\t\/\/ last watch is canceled.\n}\n\n\/\/ NewRouteConfigs is called when there's a new RDS update.\nfunc (pb *Pubsub) NewRouteConfigs(updates map[string]xdsresource.RouteConfigUpdateErrTuple, metadata xdsresource.UpdateMetadata) {\n\tpb.mu.Lock()\n\tdefer pb.mu.Unlock()\n\n\t\/\/ If no error received, the status is ACK.\n\tfor name, uErr := range updates {\n\t\tif s, ok := pb.rdsWatchers[name]; ok {\n\t\t\tif uErr.Err != nil {\n\t\t\t\t\/\/ On error, keep previous version for each resource. But update\n\t\t\t\t\/\/ status and error.\n\t\t\t\tmdCopy := pb.rdsMD[name]\n\t\t\t\tmdCopy.ErrState = metadata.ErrState\n\t\t\t\tmdCopy.Status = metadata.Status\n\t\t\t\tpb.rdsMD[name] = mdCopy\n\t\t\t\tfor wi := range s {\n\t\t\t\t\twi.newError(uErr.Err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ If we get here, it means that the update is a valid one. Notify\n\t\t\t\/\/ watchers only if this is a first time update or it is different\n\t\t\t\/\/ from the one currently cached.\n\t\t\tif cur, ok := pb.rdsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) {\n\t\t\t\tfor wi := range s {\n\t\t\t\t\twi.newUpdate(uErr.Update)\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ Sync cache.\n\t\t\tpb.logger.Debugf(\"RDS resource with name %v, value %+v added to cache\", name, pretty.ToJSON(uErr))\n\t\t\tpb.rdsCache[name] = uErr.Update\n\t\t\t\/\/ Set status to ACK, and clear error state. The metadata might be a\n\t\t\t\/\/ NACK metadata because some other resources in the same response\n\t\t\t\/\/ are invalid.\n\t\t\tmdCopy := metadata\n\t\t\tmdCopy.Status = xdsresource.ServiceStatusACKed\n\t\t\tmdCopy.ErrState = nil\n\t\t\tif metadata.ErrState != nil {\n\t\t\t\tmdCopy.Version = metadata.ErrState.Version\n\t\t\t}\n\t\t\tpb.rdsMD[name] = mdCopy\n\t\t}\n\t}\n}\n\n\/\/ NewClusters is called when there's a new CDS update.\nfunc (pb *Pubsub) NewClusters(updates map[string]xdsresource.ClusterUpdateErrTuple, metadata xdsresource.UpdateMetadata) {\n\tpb.mu.Lock()\n\tdefer pb.mu.Unlock()\n\n\tfor name, uErr := range updates {\n\t\tif s, ok := pb.cdsWatchers[name]; ok {\n\t\t\tif uErr.Err != nil {\n\t\t\t\t\/\/ On error, keep previous version for each resource. But update\n\t\t\t\t\/\/ status and error.\n\t\t\t\tmdCopy := pb.cdsMD[name]\n\t\t\t\tmdCopy.ErrState = metadata.ErrState\n\t\t\t\tmdCopy.Status = metadata.Status\n\t\t\t\tpb.cdsMD[name] = mdCopy\n\t\t\t\tfor wi := range s {\n\t\t\t\t\t\/\/ Send the watcher the individual error, instead of the\n\t\t\t\t\t\/\/ overall combined error from the metadata.ErrState.\n\t\t\t\t\twi.newError(uErr.Err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ If we get here, it means that the update is a valid one. Notify\n\t\t\t\/\/ watchers only if this is a first time update or it is different\n\t\t\t\/\/ from the one currently cached.\n\t\t\tif cur, ok := pb.cdsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) {\n\t\t\t\tfor wi := range s {\n\t\t\t\t\twi.newUpdate(uErr.Update)\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ Sync cache.\n\t\t\tpb.logger.Debugf(\"CDS resource with name %v, value %+v added to cache\", name, pretty.ToJSON(uErr))\n\t\t\tpb.cdsCache[name] = uErr.Update\n\t\t\t\/\/ Set status to ACK, and clear error state. The metadata might be a\n\t\t\t\/\/ NACK metadata because some other resources in the same response\n\t\t\t\/\/ are invalid.\n\t\t\tmdCopy := metadata\n\t\t\tmdCopy.Status = xdsresource.ServiceStatusACKed\n\t\t\tmdCopy.ErrState = nil\n\t\t\tif metadata.ErrState != nil {\n\t\t\t\tmdCopy.Version = metadata.ErrState.Version\n\t\t\t}\n\t\t\tpb.cdsMD[name] = mdCopy\n\t\t}\n\t}\n\t\/\/ Resources not in the new update were removed by the server, so delete\n\t\/\/ them.\n\tfor name := range pb.cdsCache {\n\t\tif _, ok := updates[name]; !ok {\n\t\t\t\/\/ If resource exists in cache, but not in the new update, delete it\n\t\t\t\/\/ from cache, and also send an resource not found error to indicate\n\t\t\t\/\/ resource removed.\n\t\t\tdelete(pb.cdsCache, name)\n\t\t\tpb.ldsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}\n\t\t\tfor wi := range pb.cdsWatchers[name] {\n\t\t\t\twi.resourceNotFound()\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ When CDS resource is removed, we don't delete corresponding EDS cached\n\t\/\/ data. The EDS watch will be canceled, and cache entry is removed when the\n\t\/\/ last watch is canceled.\n}\n\n\/\/ NewEndpoints is called when there's anew EDS update.\nfunc (pb *Pubsub) NewEndpoints(updates map[string]xdsresource.EndpointsUpdateErrTuple, metadata xdsresource.UpdateMetadata) {\n\tpb.mu.Lock()\n\tdefer pb.mu.Unlock()\n\n\tfor name, uErr := range updates {\n\t\tif s, ok := pb.edsWatchers[name]; ok {\n\t\t\tif uErr.Err != nil {\n\t\t\t\t\/\/ On error, keep previous version for each resource. But update\n\t\t\t\t\/\/ status and error.\n\t\t\t\tmdCopy := pb.edsMD[name]\n\t\t\t\tmdCopy.ErrState = metadata.ErrState\n\t\t\t\tmdCopy.Status = metadata.Status\n\t\t\t\tpb.edsMD[name] = mdCopy\n\t\t\t\tfor wi := range s {\n\t\t\t\t\t\/\/ Send the watcher the individual error, instead of the\n\t\t\t\t\t\/\/ overall combined error from the metadata.ErrState.\n\t\t\t\t\twi.newError(uErr.Err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ If we get here, it means that the update is a valid one. Notify\n\t\t\t\/\/ watchers only if this is a first time update or it is different\n\t\t\t\/\/ from the one currently cached.\n\t\t\tif cur, ok := pb.edsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) {\n\t\t\t\tfor wi := range s {\n\t\t\t\t\twi.newUpdate(uErr.Update)\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ Sync cache.\n\t\t\tpb.logger.Debugf(\"EDS resource with name %v, value %+v added to cache\", name, pretty.ToJSON(uErr))\n\t\t\tpb.edsCache[name] = uErr.Update\n\t\t\t\/\/ Set status to ACK, and clear error state. The metadata might be a\n\t\t\t\/\/ NACK metadata because some other resources in the same response\n\t\t\t\/\/ are invalid.\n\t\t\tmdCopy := metadata\n\t\t\tmdCopy.Status = xdsresource.ServiceStatusACKed\n\t\t\tmdCopy.ErrState = nil\n\t\t\tif metadata.ErrState != nil {\n\t\t\t\tmdCopy.Version = metadata.ErrState.Version\n\t\t\t}\n\t\t\tpb.edsMD[name] = mdCopy\n\t\t}\n\t}\n}\n\n\/\/ NewConnectionError is called by the underlying xdsAPIClient when it receives\n\/\/ a connection error. The error will be forwarded to all the resource watchers.\nfunc (pb *Pubsub) NewConnectionError(err error) {\n\tpb.mu.Lock()\n\tdefer pb.mu.Unlock()\n\n\tfor _, s := range pb.edsWatchers {\n\t\tfor wi := range s {\n\t\t\twi.newError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, \"xds: error received from xDS stream: %v\", err))\n\t\t}\n\t}\n}\n<commit_msg>xds\/client: send connection errors to all watchers (#5054)<commit_after>\/*\n *\n * Copyright 2021 gRPC authors.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage pubsub\n\nimport (\n\t\"google.golang.org\/grpc\/internal\/pretty\"\n\t\"google.golang.org\/grpc\/xds\/internal\/xdsclient\/xdsresource\"\n\t\"google.golang.org\/protobuf\/proto\"\n)\n\ntype watcherInfoWithUpdate struct {\n\twi *watchInfo\n\tupdate interface{}\n\terr error\n}\n\n\/\/ scheduleCallback should only be called by methods of watchInfo, which checks\n\/\/ for watcher states and maintain consistency.\nfunc (pb *Pubsub) scheduleCallback(wi *watchInfo, update interface{}, err error) {\n\tpb.updateCh.Put(&watcherInfoWithUpdate{\n\t\twi: wi,\n\t\tupdate: update,\n\t\terr: err,\n\t})\n}\n\nfunc (pb *Pubsub) callCallback(wiu *watcherInfoWithUpdate) {\n\tpb.mu.Lock()\n\t\/\/ Use a closure to capture the callback and type assertion, to save one\n\t\/\/ more switch case.\n\t\/\/\n\t\/\/ The callback must be called without pb.mu. Otherwise if the callback calls\n\t\/\/ another watch() inline, it will cause a deadlock. This leaves a small\n\t\/\/ window that a watcher's callback could be called after the watcher is\n\t\/\/ canceled, and the user needs to take care of it.\n\tvar ccb func()\n\tswitch wiu.wi.rType {\n\tcase xdsresource.ListenerResource:\n\t\tif s, ok := pb.ldsWatchers[wiu.wi.target]; ok && s[wiu.wi] {\n\t\t\tccb = func() { wiu.wi.ldsCallback(wiu.update.(xdsresource.ListenerUpdate), wiu.err) }\n\t\t}\n\tcase xdsresource.RouteConfigResource:\n\t\tif s, ok := pb.rdsWatchers[wiu.wi.target]; ok && s[wiu.wi] {\n\t\t\tccb = func() { wiu.wi.rdsCallback(wiu.update.(xdsresource.RouteConfigUpdate), wiu.err) }\n\t\t}\n\tcase xdsresource.ClusterResource:\n\t\tif s, ok := pb.cdsWatchers[wiu.wi.target]; ok && s[wiu.wi] {\n\t\t\tccb = func() { wiu.wi.cdsCallback(wiu.update.(xdsresource.ClusterUpdate), wiu.err) }\n\t\t}\n\tcase xdsresource.EndpointsResource:\n\t\tif s, ok := pb.edsWatchers[wiu.wi.target]; ok && s[wiu.wi] {\n\t\t\tccb = func() { wiu.wi.edsCallback(wiu.update.(xdsresource.EndpointsUpdate), wiu.err) }\n\t\t}\n\t}\n\tpb.mu.Unlock()\n\n\tif ccb != nil {\n\t\tccb()\n\t}\n}\n\n\/\/ NewListeners is called when there's a new LDS update.\nfunc (pb *Pubsub) NewListeners(updates map[string]xdsresource.ListenerUpdateErrTuple, metadata xdsresource.UpdateMetadata) {\n\tpb.mu.Lock()\n\tdefer pb.mu.Unlock()\n\n\tfor name, uErr := range updates {\n\t\tif s, ok := pb.ldsWatchers[name]; ok {\n\t\t\tif uErr.Err != nil {\n\t\t\t\t\/\/ On error, keep previous version for each resource. But update\n\t\t\t\t\/\/ status and error.\n\t\t\t\tmdCopy := pb.ldsMD[name]\n\t\t\t\tmdCopy.ErrState = metadata.ErrState\n\t\t\t\tmdCopy.Status = metadata.Status\n\t\t\t\tpb.ldsMD[name] = mdCopy\n\t\t\t\tfor wi := range s {\n\t\t\t\t\twi.newError(uErr.Err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ If we get here, it means that the update is a valid one. Notify\n\t\t\t\/\/ watchers only if this is a first time update or it is different\n\t\t\t\/\/ from the one currently cached.\n\t\t\tif cur, ok := pb.ldsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) {\n\t\t\t\tfor wi := range s {\n\t\t\t\t\twi.newUpdate(uErr.Update)\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ Sync cache.\n\t\t\tpb.logger.Debugf(\"LDS resource with name %v, value %+v added to cache\", name, pretty.ToJSON(uErr))\n\t\t\tpb.ldsCache[name] = uErr.Update\n\t\t\t\/\/ Set status to ACK, and clear error state. The metadata might be a\n\t\t\t\/\/ NACK metadata because some other resources in the same response\n\t\t\t\/\/ are invalid.\n\t\t\tmdCopy := metadata\n\t\t\tmdCopy.Status = xdsresource.ServiceStatusACKed\n\t\t\tmdCopy.ErrState = nil\n\t\t\tif metadata.ErrState != nil {\n\t\t\t\tmdCopy.Version = metadata.ErrState.Version\n\t\t\t}\n\t\t\tpb.ldsMD[name] = mdCopy\n\t\t}\n\t}\n\t\/\/ Resources not in the new update were removed by the server, so delete\n\t\/\/ them.\n\tfor name := range pb.ldsCache {\n\t\tif _, ok := updates[name]; !ok {\n\t\t\t\/\/ If resource exists in cache, but not in the new update, delete\n\t\t\t\/\/ the resource from cache, and also send an resource not found\n\t\t\t\/\/ error to indicate resource removed.\n\t\t\tdelete(pb.ldsCache, name)\n\t\t\tpb.ldsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}\n\t\t\tfor wi := range pb.ldsWatchers[name] {\n\t\t\t\twi.resourceNotFound()\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ When LDS resource is removed, we don't delete corresponding RDS cached\n\t\/\/ data. The RDS watch will be canceled, and cache entry is removed when the\n\t\/\/ last watch is canceled.\n}\n\n\/\/ NewRouteConfigs is called when there's a new RDS update.\nfunc (pb *Pubsub) NewRouteConfigs(updates map[string]xdsresource.RouteConfigUpdateErrTuple, metadata xdsresource.UpdateMetadata) {\n\tpb.mu.Lock()\n\tdefer pb.mu.Unlock()\n\n\t\/\/ If no error received, the status is ACK.\n\tfor name, uErr := range updates {\n\t\tif s, ok := pb.rdsWatchers[name]; ok {\n\t\t\tif uErr.Err != nil {\n\t\t\t\t\/\/ On error, keep previous version for each resource. But update\n\t\t\t\t\/\/ status and error.\n\t\t\t\tmdCopy := pb.rdsMD[name]\n\t\t\t\tmdCopy.ErrState = metadata.ErrState\n\t\t\t\tmdCopy.Status = metadata.Status\n\t\t\t\tpb.rdsMD[name] = mdCopy\n\t\t\t\tfor wi := range s {\n\t\t\t\t\twi.newError(uErr.Err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ If we get here, it means that the update is a valid one. Notify\n\t\t\t\/\/ watchers only if this is a first time update or it is different\n\t\t\t\/\/ from the one currently cached.\n\t\t\tif cur, ok := pb.rdsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) {\n\t\t\t\tfor wi := range s {\n\t\t\t\t\twi.newUpdate(uErr.Update)\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ Sync cache.\n\t\t\tpb.logger.Debugf(\"RDS resource with name %v, value %+v added to cache\", name, pretty.ToJSON(uErr))\n\t\t\tpb.rdsCache[name] = uErr.Update\n\t\t\t\/\/ Set status to ACK, and clear error state. The metadata might be a\n\t\t\t\/\/ NACK metadata because some other resources in the same response\n\t\t\t\/\/ are invalid.\n\t\t\tmdCopy := metadata\n\t\t\tmdCopy.Status = xdsresource.ServiceStatusACKed\n\t\t\tmdCopy.ErrState = nil\n\t\t\tif metadata.ErrState != nil {\n\t\t\t\tmdCopy.Version = metadata.ErrState.Version\n\t\t\t}\n\t\t\tpb.rdsMD[name] = mdCopy\n\t\t}\n\t}\n}\n\n\/\/ NewClusters is called when there's a new CDS update.\nfunc (pb *Pubsub) NewClusters(updates map[string]xdsresource.ClusterUpdateErrTuple, metadata xdsresource.UpdateMetadata) {\n\tpb.mu.Lock()\n\tdefer pb.mu.Unlock()\n\n\tfor name, uErr := range updates {\n\t\tif s, ok := pb.cdsWatchers[name]; ok {\n\t\t\tif uErr.Err != nil {\n\t\t\t\t\/\/ On error, keep previous version for each resource. But update\n\t\t\t\t\/\/ status and error.\n\t\t\t\tmdCopy := pb.cdsMD[name]\n\t\t\t\tmdCopy.ErrState = metadata.ErrState\n\t\t\t\tmdCopy.Status = metadata.Status\n\t\t\t\tpb.cdsMD[name] = mdCopy\n\t\t\t\tfor wi := range s {\n\t\t\t\t\t\/\/ Send the watcher the individual error, instead of the\n\t\t\t\t\t\/\/ overall combined error from the metadata.ErrState.\n\t\t\t\t\twi.newError(uErr.Err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ If we get here, it means that the update is a valid one. Notify\n\t\t\t\/\/ watchers only if this is a first time update or it is different\n\t\t\t\/\/ from the one currently cached.\n\t\t\tif cur, ok := pb.cdsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) {\n\t\t\t\tfor wi := range s {\n\t\t\t\t\twi.newUpdate(uErr.Update)\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ Sync cache.\n\t\t\tpb.logger.Debugf(\"CDS resource with name %v, value %+v added to cache\", name, pretty.ToJSON(uErr))\n\t\t\tpb.cdsCache[name] = uErr.Update\n\t\t\t\/\/ Set status to ACK, and clear error state. The metadata might be a\n\t\t\t\/\/ NACK metadata because some other resources in the same response\n\t\t\t\/\/ are invalid.\n\t\t\tmdCopy := metadata\n\t\t\tmdCopy.Status = xdsresource.ServiceStatusACKed\n\t\t\tmdCopy.ErrState = nil\n\t\t\tif metadata.ErrState != nil {\n\t\t\t\tmdCopy.Version = metadata.ErrState.Version\n\t\t\t}\n\t\t\tpb.cdsMD[name] = mdCopy\n\t\t}\n\t}\n\t\/\/ Resources not in the new update were removed by the server, so delete\n\t\/\/ them.\n\tfor name := range pb.cdsCache {\n\t\tif _, ok := updates[name]; !ok {\n\t\t\t\/\/ If resource exists in cache, but not in the new update, delete it\n\t\t\t\/\/ from cache, and also send an resource not found error to indicate\n\t\t\t\/\/ resource removed.\n\t\t\tdelete(pb.cdsCache, name)\n\t\t\tpb.ldsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}\n\t\t\tfor wi := range pb.cdsWatchers[name] {\n\t\t\t\twi.resourceNotFound()\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ When CDS resource is removed, we don't delete corresponding EDS cached\n\t\/\/ data. The EDS watch will be canceled, and cache entry is removed when the\n\t\/\/ last watch is canceled.\n}\n\n\/\/ NewEndpoints is called when there's anew EDS update.\nfunc (pb *Pubsub) NewEndpoints(updates map[string]xdsresource.EndpointsUpdateErrTuple, metadata xdsresource.UpdateMetadata) {\n\tpb.mu.Lock()\n\tdefer pb.mu.Unlock()\n\n\tfor name, uErr := range updates {\n\t\tif s, ok := pb.edsWatchers[name]; ok {\n\t\t\tif uErr.Err != nil {\n\t\t\t\t\/\/ On error, keep previous version for each resource. But update\n\t\t\t\t\/\/ status and error.\n\t\t\t\tmdCopy := pb.edsMD[name]\n\t\t\t\tmdCopy.ErrState = metadata.ErrState\n\t\t\t\tmdCopy.Status = metadata.Status\n\t\t\t\tpb.edsMD[name] = mdCopy\n\t\t\t\tfor wi := range s {\n\t\t\t\t\t\/\/ Send the watcher the individual error, instead of the\n\t\t\t\t\t\/\/ overall combined error from the metadata.ErrState.\n\t\t\t\t\twi.newError(uErr.Err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ If we get here, it means that the update is a valid one. Notify\n\t\t\t\/\/ watchers only if this is a first time update or it is different\n\t\t\t\/\/ from the one currently cached.\n\t\t\tif cur, ok := pb.edsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) {\n\t\t\t\tfor wi := range s {\n\t\t\t\t\twi.newUpdate(uErr.Update)\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ Sync cache.\n\t\t\tpb.logger.Debugf(\"EDS resource with name %v, value %+v added to cache\", name, pretty.ToJSON(uErr))\n\t\t\tpb.edsCache[name] = uErr.Update\n\t\t\t\/\/ Set status to ACK, and clear error state. The metadata might be a\n\t\t\t\/\/ NACK metadata because some other resources in the same response\n\t\t\t\/\/ are invalid.\n\t\t\tmdCopy := metadata\n\t\t\tmdCopy.Status = xdsresource.ServiceStatusACKed\n\t\t\tmdCopy.ErrState = nil\n\t\t\tif metadata.ErrState != nil {\n\t\t\t\tmdCopy.Version = metadata.ErrState.Version\n\t\t\t}\n\t\t\tpb.edsMD[name] = mdCopy\n\t\t}\n\t}\n}\n\n\/\/ NewConnectionError is called by the underlying xdsAPIClient when it receives\n\/\/ a connection error. The error will be forwarded to all the resource watchers.\nfunc (pb *Pubsub) NewConnectionError(err error) {\n\tpb.mu.Lock()\n\tdefer pb.mu.Unlock()\n\n\tfor _, s := range pb.ldsWatchers {\n\t\tfor wi := range s {\n\t\t\twi.newError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, \"xds: error received from xDS stream: %v\", err))\n\t\t}\n\t}\n\tfor _, s := range pb.rdsWatchers {\n\t\tfor wi := range s {\n\t\t\twi.newError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, \"xds: error received from xDS stream: %v\", err))\n\t\t}\n\t}\n\tfor _, s := range pb.cdsWatchers {\n\t\tfor wi := range s {\n\t\t\twi.newError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, \"xds: error received from xDS stream: %v\", err))\n\t\t}\n\t}\n\tfor _, s := range pb.edsWatchers {\n\t\tfor wi := range s {\n\t\t\twi.newError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, \"xds: error received from xDS stream: %v\", err))\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/nextgearcapital\/pepper\/pkg\/device42\"\n\t\"github.com\/nextgearcapital\/pepper\/pkg\/log\"\n\t\"github.com\/nextgearcapital\/pepper\/pkg\/salt\"\n\t\"github.com\/nextgearcapital\/pepper\/template\/vsphere\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\tprofile string\n\troles string\n\tosTemplate string\n\tipam bool\n)\n\nfunc init() {\n\tRootCmd.AddCommand(deployCmd)\n\n\tdeployCmd.Flags().StringVarP(&profile, \"profile\", \"p\", \"\", \"Profile to generate and output to \/etc\/salt\/cloud.profiles.d for salt-cloud to use\")\n\tdeployCmd.Flags().StringVarP(&roles, \"roles\", \"r\", \"\", \"List of roles to assign to the host in D42 [eg: dcos,dcos-master]\")\n\tdeployCmd.Flags().StringVarP(&osTemplate, \"template\", \"t\", \"\", \"Which OS template you want to use [eg: Ubuntu, CentOS, someothertemplatename]\")\n\tdeployCmd.Flags().BoolVarP(&ipam, \"no-ipam\", \"\", true, \"Whether or not to use Device42 IPAM [This is only used internally]\")\n\tdeployCmd.Flags().BoolVarP(&log.IsDebugging, \"debug\", \"d\", false, \"Turn debugging on\")\n}\n\nvar deployCmd = &cobra.Command{\n\tUse: \"deploy\",\n\tShort: \"Deploy VM's via salt-cloud\",\n\tLong: `pepper is a wrapper around salt-cloud that will generate salt-cloud profiles based on information you provide in profile configs.\nProfile configs live in \"\/etc\/pepper\/config.d\/{platform}\/{environment}. Pepper is opinionated and looks at the profile you pass in as it's source\nof truth. For example: If you pass in \"vmware-dev-large\" as the profile, it will look for your profile config in \"\/etc\/pepper\/config.d\/vmware\/large.yaml\".\nThis allows for maximum flexibility due to the fact that everyone has different environments and may have some sort of naming scheme associated with them\nso Pepper makes no assumptions on that. Pepper does however make assumptions on your instance type. [eg: nano, micro, small, medium, etc] Although these\noptions are available to you, you are free to override them as you see fit.\nFor example:\n\nProvision new host web01 (Ubuntu) in the dev environment from the nano profile using vmware as a provider:\n\n$ pepper deploy -p vmware-dev-nano -t Ubuntu web01\n\nOr alternatively:\n\n$ pepper deploy --profile vmware-dev-nano --template Ubuntu web01\n\nProvision new host web02 (CentOS) in the prd environment from the large profile using vmware as a provider:\n\n$ pepper deploy -p vmware-prd-large -t CentOS web02\n\nProvision new host web03 (Ubuntu) in the uat environment from the hyper profile using vmware as a provider:\n\n$ pepper deploy -p vmware-uat-hyper -t Ubuntu web03\n\nAre you getting this yet?\n\n$ pepper deploy -p vmware-prd-mid -t Ubuntu -r dcos,dcos-master dcos01 dcos02 dcos03`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif profile == \"\" {\n\t\t\tlog.Die(\"You didn't specify a profile.\")\n\t\t} else if osTemplate == \"\" {\n\t\t\tlog.Die(\"You didn't specify an OS template.\")\n\t\t} else if len(args) == 0 {\n\t\t\tlog.Die(\"You didn't specify any hosts.\")\n\t\t}\n\n\t\tsplitProfile := strings.Split(profile, \"-\")\n\n\t\t\/\/ These will be the basis for how the profile gets generated.\n\t\tplatform := splitProfile[0]\n\t\tenvironment := splitProfile[1]\n\t\tinstancetype := splitProfile[2]\n\n\t\t\/\/ Nothing really gained here it just makes the code more readable.\n\t\thosts := args\n\n\t\tvar ipAddress string\n\n\t\tfor _, host := range hosts {\n\t\t\tif ipam != true {\n\t\t\t\tif err := device42.ReadConfig(); err != nil {\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\tif environment == \"prd\" {\n\t\t\t\t\t\/\/ Get a new IP\n\t\t\t\t\tnewIP, err := device42.GetNextIP(device42.PrdRange)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tipAddress = newIP\n\t\t\t\t\t\/\/ Create the Device\n\t\t\t\t\tif err := device42.CreateDevice(host, \"Production\"); err != nil {\n\t\t\t\t\t\tif err = device42.DeleteDevice(host); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ Reserve IP\n\t\t\t\t\tif err := device42.ReserveIP(newIP, host); err != nil {\n\t\t\t\t\t\tif err = device42.MakeIPAvailable(newIP); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err = device42.DeleteDevice(host); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ Update custom fields\n\t\t\t\t\tif err := device42.UpdateCustomFields(host, \"roles\", roles); err != nil {\n\t\t\t\t\t\tif err = device42.MakeIPAvailable(newIP); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err = device42.DeleteDevice(host); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t} else if environment == \"dev\" {\n\t\t\t\t\t\/\/ Get a new IP\n\t\t\t\t\tnewIP, err := device42.GetNextIP(device42.DevRange)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tipAddress = newIP\n\t\t\t\t\t\/\/ Create the Device\n\t\t\t\t\tif err := device42.CreateDevice(host, \"Development\"); err != nil {\n\t\t\t\t\t\tif err = device42.DeleteDevice(host); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ Reserve IP\n\t\t\t\t\tif err := device42.ReserveIP(newIP, host); err != nil {\n\t\t\t\t\t\tif err = device42.MakeIPAvailable(newIP); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err = device42.DeleteDevice(host); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ Update custom fields\n\t\t\t\t\tif err := device42.UpdateCustomFields(host, \"roles\", roles); err != nil {\n\t\t\t\t\t\tif err = device42.MakeIPAvailable(newIP); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err = device42.DeleteDevice(host); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tswitch platform {\n\t\t\tcase \"vmware\":\n\t\t\t\tvar vsphere vsphere.ProfileConfig\n\t\t\t\tif err := vsphere.Prepare(platform, environment, instancetype, osTemplate, ipAddress); err != nil {\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\tif err := vsphere.Generate(); err != nil {\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\tif err := salt.Provision(profile, host); err != nil {\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\t\/\/if err := vsphere.Remove(); err != nil {\n\t\t\t\t\/\/\tlog.Die(\"%s\", err)\n\t\t\t\t\/\/}\n\t\t\tdefault:\n\t\t\t\tlog.Die(\"I don't recognize this platform!\")\n\t\t\t}\n\t\t}\n\t},\n}\n<commit_msg>Forgot to uncomment the remove function<commit_after>package cmd\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/nextgearcapital\/pepper\/pkg\/device42\"\n\t\"github.com\/nextgearcapital\/pepper\/pkg\/log\"\n\t\"github.com\/nextgearcapital\/pepper\/pkg\/salt\"\n\t\"github.com\/nextgearcapital\/pepper\/template\/vsphere\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\tprofile string\n\troles string\n\tosTemplate string\n\tipam bool\n)\n\nfunc init() {\n\tRootCmd.AddCommand(deployCmd)\n\n\tdeployCmd.Flags().StringVarP(&profile, \"profile\", \"p\", \"\", \"Profile to generate and output to \/etc\/salt\/cloud.profiles.d for salt-cloud to use\")\n\tdeployCmd.Flags().StringVarP(&roles, \"roles\", \"r\", \"\", \"List of roles to assign to the host in D42 [eg: dcos,dcos-master]\")\n\tdeployCmd.Flags().StringVarP(&osTemplate, \"template\", \"t\", \"\", \"Which OS template you want to use [eg: Ubuntu, CentOS, someothertemplatename]\")\n\tdeployCmd.Flags().BoolVarP(&ipam, \"no-ipam\", \"\", true, \"Whether or not to use Device42 IPAM [This is only used internally]\")\n\tdeployCmd.Flags().BoolVarP(&log.IsDebugging, \"debug\", \"d\", false, \"Turn debugging on\")\n}\n\nvar deployCmd = &cobra.Command{\n\tUse: \"deploy\",\n\tShort: \"Deploy VM's via salt-cloud\",\n\tLong: `pepper is a wrapper around salt-cloud that will generate salt-cloud profiles based on information you provide in profile configs.\nProfile configs live in \"\/etc\/pepper\/config.d\/{platform}\/{environment}. Pepper is opinionated and looks at the profile you pass in as it's source\nof truth. For example: If you pass in \"vmware-dev-large\" as the profile, it will look for your profile config in \"\/etc\/pepper\/config.d\/vmware\/large.yaml\".\nThis allows for maximum flexibility due to the fact that everyone has different environments and may have some sort of naming scheme associated with them\nso Pepper makes no assumptions on that. Pepper does however make assumptions on your instance type. [eg: nano, micro, small, medium, etc] Although these\noptions are available to you, you are free to override them as you see fit.\nFor example:\n\nProvision new host web01 (Ubuntu) in the dev environment from the nano profile using vmware as a provider:\n\n$ pepper deploy -p vmware-dev-nano -t Ubuntu web01\n\nOr alternatively:\n\n$ pepper deploy --profile vmware-dev-nano --template Ubuntu web01\n\nProvision new host web02 (CentOS) in the prd environment from the large profile using vmware as a provider:\n\n$ pepper deploy -p vmware-prd-large -t CentOS web02\n\nProvision new host web03 (Ubuntu) in the uat environment from the hyper profile using vmware as a provider:\n\n$ pepper deploy -p vmware-uat-hyper -t Ubuntu web03\n\nAre you getting this yet?\n\n$ pepper deploy -p vmware-prd-mid -t Ubuntu -r dcos,dcos-master dcos01 dcos02 dcos03`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif profile == \"\" {\n\t\t\tlog.Die(\"You didn't specify a profile.\")\n\t\t} else if osTemplate == \"\" {\n\t\t\tlog.Die(\"You didn't specify an OS template.\")\n\t\t} else if len(args) == 0 {\n\t\t\tlog.Die(\"You didn't specify any hosts.\")\n\t\t}\n\n\t\tsplitProfile := strings.Split(profile, \"-\")\n\n\t\t\/\/ These will be the basis for how the profile gets generated.\n\t\tplatform := splitProfile[0]\n\t\tenvironment := splitProfile[1]\n\t\tinstancetype := splitProfile[2]\n\n\t\t\/\/ Nothing really gained here it just makes the code more readable.\n\t\thosts := args\n\n\t\tvar ipAddress string\n\n\t\tfor _, host := range hosts {\n\t\t\tif ipam != true {\n\t\t\t\tif err := device42.ReadConfig(); err != nil {\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\tif environment == \"prd\" {\n\t\t\t\t\t\/\/ Get a new IP\n\t\t\t\t\tnewIP, err := device42.GetNextIP(device42.PrdRange)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tipAddress = newIP\n\t\t\t\t\t\/\/ Create the Device\n\t\t\t\t\tif err := device42.CreateDevice(host, \"Production\"); err != nil {\n\t\t\t\t\t\tif err = device42.DeleteDevice(host); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ Reserve IP\n\t\t\t\t\tif err := device42.ReserveIP(newIP, host); err != nil {\n\t\t\t\t\t\tif err = device42.MakeIPAvailable(newIP); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err = device42.DeleteDevice(host); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ Update custom fields\n\t\t\t\t\tif err := device42.UpdateCustomFields(host, \"roles\", roles); err != nil {\n\t\t\t\t\t\tif err = device42.MakeIPAvailable(newIP); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err = device42.DeleteDevice(host); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t} else if environment == \"dev\" {\n\t\t\t\t\t\/\/ Get a new IP\n\t\t\t\t\tnewIP, err := device42.GetNextIP(device42.DevRange)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tipAddress = newIP\n\t\t\t\t\t\/\/ Create the Device\n\t\t\t\t\tif err := device42.CreateDevice(host, \"Development\"); err != nil {\n\t\t\t\t\t\tif err = device42.DeleteDevice(host); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ Reserve IP\n\t\t\t\t\tif err := device42.ReserveIP(newIP, host); err != nil {\n\t\t\t\t\t\tif err = device42.MakeIPAvailable(newIP); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err = device42.DeleteDevice(host); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ Update custom fields\n\t\t\t\t\tif err := device42.UpdateCustomFields(host, \"roles\", roles); err != nil {\n\t\t\t\t\t\tif err = device42.MakeIPAvailable(newIP); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err = device42.DeleteDevice(host); err != nil {\n\t\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tswitch platform {\n\t\t\tcase \"vmware\":\n\t\t\t\tvar vsphere vsphere.ProfileConfig\n\t\t\t\tif err := vsphere.Prepare(platform, environment, instancetype, osTemplate, ipAddress); err != nil {\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\tif err := vsphere.Generate(); err != nil {\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\tif err := salt.Provision(profile, host); err != nil {\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\tif err := vsphere.Remove(); err != nil {\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tlog.Die(\"I don't recognize this platform!\")\n\t\t\t}\n\t\t}\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2017 Mikael Berthe <mikael@lilotux.net>\n\/\/\n\/\/ Licensed under the MIT license.\n\/\/ Please see the LICENSE file is this directory.\n\npackage cmd\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/McKael\/madon\"\n)\n\n\/*\nvar streamOpts struct {\n\tlocal bool\n}\n*\/\n\n\/\/ Maximum number of websockets (1 hashtag <=> 1 ws)\nconst maximumHashtagStreamWS = 4\n\n\/\/ streamCmd represents the stream command\nvar streamCmd = &cobra.Command{\n\tUse: \"stream [user|local|public|:HASHTAG]\",\n\tShort: \"Listen to an event stream\",\n\tLong: `\nThe stream command stays connected to the server and listen to a stream of\nevents (user, local or federated).\nIt can also get a hashtag-based stream if the keyword or prefixed with\n':' or '#'.`,\n\tExample: ` madonctl stream # User timeline stream\n madonctl stream local # Local timeline stream\n madonctl stream public # Public timeline stream\n madonctl stream :mastodon # Hashtag\n madonctl stream #madonctl\n\nSeveral (up to 4) hashtags can be given.\nNote: madonctl will use 1 websocket per hashtag stream.\n madonctl stream #madonctl,#mastodon,#golang\n madonctl stream :madonctl,mastodon,api`,\n\tRunE: streamRunE,\n\tValidArgs: []string{\"user\", \"public\"},\n\tArgAliases: []string{\"home\"},\n}\n\nfunc init() {\n\tRootCmd.AddCommand(streamCmd)\n\n\t\/\/streamCmd.Flags().BoolVar(&streamOpts.local, \"local\", false, \"Events from the local instance\")\n}\n\nfunc streamRunE(cmd *cobra.Command, args []string) error {\n\tstreamName := \"user\"\n\ttag := \"\"\n\tvar hashTagList []string\n\n\tif len(args) > 0 {\n\t\tif len(args) != 1 {\n\t\t\treturn errors.New(\"too many parameters\")\n\t\t}\n\t\targ := args[0]\n\t\tswitch arg {\n\t\tcase \"\", \"user\":\n\t\tcase \"public\":\n\t\t\tstreamName = arg\n\t\tcase \"local\":\n\t\t\tstreamName = \"public:local\"\n\t\tdefault:\n\t\t\tif arg[0] != ':' && arg[0] != '#' {\n\t\t\t\treturn errors.New(\"invalid argument\")\n\t\t\t}\n\t\t\tstreamName = \"hashtag\"\n\t\t\ttag = arg[1:]\n\t\t\tif len(tag) == 0 {\n\t\t\t\treturn errors.New(\"empty hashtag\")\n\t\t\t}\n\t\t\thashTagList = strings.Split(tag, \",\")\n\t\t\tfor i, h := range hashTagList {\n\t\t\t\tif h[0] == ':' || h[0] == '#' {\n\t\t\t\t\thashTagList[i] = h[1:]\n\t\t\t\t}\n\t\t\t\tif h == \"\" {\n\t\t\t\t\treturn errors.New(\"empty hashtag\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(hashTagList) > maximumHashtagStreamWS {\n\t\t\t\treturn errors.Errorf(\"too many hashtags, maximum is %d\", maximumHashtagStreamWS)\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := madonInit(true); err != nil {\n\t\treturn err\n\t}\n\n\tevChan := make(chan madon.StreamEvent, 10)\n\tstop := make(chan bool)\n\tdone := make(chan bool)\n\tvar err error\n\n\tif streamName != \"hashtag\" || len(hashTagList) <= 1 { \/\/ Usual case: Only 1 stream\n\t\terr = gClient.StreamListener(streamName, tag, evChan, stop, done)\n\t} else { \/\/ Several streams\n\t\tn := len(hashTagList)\n\t\ttagEvCh := make([]chan madon.StreamEvent, n)\n\t\ttagDoneCh := make([]chan bool, n)\n\t\tfor i, t := range hashTagList {\n\t\t\tif verbose {\n\t\t\t\terrPrint(\"Launching listener for tag '%s'\", t)\n\t\t\t}\n\t\t\ttagEvCh[i] = make(chan madon.StreamEvent)\n\t\t\te := gClient.StreamListener(streamName, t, tagEvCh[i], stop, tagDoneCh[i])\n\t\t\tif e != nil {\n\t\t\t\tif i > 0 { \/\/ Close previous connections\n\t\t\t\t\tclose(stop)\n\t\t\t\t}\n\t\t\t\terr = e\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t\/\/ Forward events to main ev channel\n\t\t\tgo func(i int) {\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase _, ok := <-tagDoneCh[i]:\n\t\t\t\t\t\tif !ok { \/\/ end of streaming for this tag\n\t\t\t\t\t\t\tdone <- true\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\tcase ev := <-tagEvCh[i]:\n\t\t\t\t\t\tevChan <- ev\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(i)\n\t\t}\n\t}\n\n\tif err != nil {\n\t\terrPrint(\"Error: %s\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tp, err := getPrinter()\n\tif err != nil {\n\t\tclose(stop)\n\t\t<-done\n\t\tclose(evChan)\n\t\terrPrint(\"Error: %s\", err.Error())\n\t\tos.Exit(1)\n\t}\n\nLISTEN:\n\tfor {\n\t\tselect {\n\t\tcase _, ok := <-done:\n\t\t\tif !ok { \/\/ done is closed, end of streaming\n\t\t\t\tbreak LISTEN\n\t\t\t}\n\t\tcase ev := <-evChan:\n\t\t\tswitch ev.Event {\n\t\t\tcase \"error\":\n\t\t\t\tif ev.Error != nil {\n\t\t\t\t\tif ev.Error == io.ErrUnexpectedEOF {\n\t\t\t\t\t\terrPrint(\"The stream connection was unexpectedly closed\")\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\terrPrint(\"Error event: [%s] %s\", ev.Event, ev.Error)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\terrPrint(\"Event: [%s]\", ev.Event)\n\t\t\tcase \"update\":\n\t\t\t\ts := ev.Data.(madon.Status)\n\t\t\t\tp.PrintObj(&s, nil, \"\")\n\t\t\t\tcontinue\n\t\t\tcase \"notification\":\n\t\t\t\tn := ev.Data.(madon.Notification)\n\t\t\t\tp.PrintObj(&n, nil, \"\")\n\t\t\t\tcontinue\n\t\t\tcase \"delete\":\n\t\t\t\t\/\/ TODO PrintObj ?\n\t\t\t\terrPrint(\"Event: [%s] Status %d was deleted\", ev.Event, ev.Data.(int64))\n\t\t\tdefault:\n\t\t\t\terrPrint(\"Unhandled event: [%s] %T\", ev.Event, ev.Data)\n\t\t\t}\n\t\t}\n\t}\n\tclose(stop)\n\tclose(evChan)\n\treturn nil\n}\n<commit_msg>Fix panic when a streaming connection is closed (in multi-streams)<commit_after>\/\/ Copyright © 2017 Mikael Berthe <mikael@lilotux.net>\n\/\/\n\/\/ Licensed under the MIT license.\n\/\/ Please see the LICENSE file is this directory.\n\npackage cmd\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/McKael\/madon\"\n)\n\n\/*\nvar streamOpts struct {\n\tlocal bool\n}\n*\/\n\n\/\/ Maximum number of websockets (1 hashtag <=> 1 ws)\nconst maximumHashtagStreamWS = 4\n\n\/\/ streamCmd represents the stream command\nvar streamCmd = &cobra.Command{\n\tUse: \"stream [user|local|public|:HASHTAG]\",\n\tShort: \"Listen to an event stream\",\n\tLong: `\nThe stream command stays connected to the server and listen to a stream of\nevents (user, local or federated).\nIt can also get a hashtag-based stream if the keyword or prefixed with\n':' or '#'.`,\n\tExample: ` madonctl stream # User timeline stream\n madonctl stream local # Local timeline stream\n madonctl stream public # Public timeline stream\n madonctl stream :mastodon # Hashtag\n madonctl stream #madonctl\n\nSeveral (up to 4) hashtags can be given.\nNote: madonctl will use 1 websocket per hashtag stream.\n madonctl stream #madonctl,#mastodon,#golang\n madonctl stream :madonctl,mastodon,api`,\n\tRunE: streamRunE,\n\tValidArgs: []string{\"user\", \"public\"},\n\tArgAliases: []string{\"home\"},\n}\n\nfunc init() {\n\tRootCmd.AddCommand(streamCmd)\n\n\t\/\/streamCmd.Flags().BoolVar(&streamOpts.local, \"local\", false, \"Events from the local instance\")\n}\n\nfunc streamRunE(cmd *cobra.Command, args []string) error {\n\tstreamName := \"user\"\n\ttag := \"\"\n\tvar hashTagList []string\n\n\tif len(args) > 0 {\n\t\tif len(args) != 1 {\n\t\t\treturn errors.New(\"too many parameters\")\n\t\t}\n\t\targ := args[0]\n\t\tswitch arg {\n\t\tcase \"\", \"user\":\n\t\tcase \"public\":\n\t\t\tstreamName = arg\n\t\tcase \"local\":\n\t\t\tstreamName = \"public:local\"\n\t\tdefault:\n\t\t\tif arg[0] != ':' && arg[0] != '#' {\n\t\t\t\treturn errors.New(\"invalid argument\")\n\t\t\t}\n\t\t\tstreamName = \"hashtag\"\n\t\t\ttag = arg[1:]\n\t\t\tif len(tag) == 0 {\n\t\t\t\treturn errors.New(\"empty hashtag\")\n\t\t\t}\n\t\t\thashTagList = strings.Split(tag, \",\")\n\t\t\tfor i, h := range hashTagList {\n\t\t\t\tif h[0] == ':' || h[0] == '#' {\n\t\t\t\t\thashTagList[i] = h[1:]\n\t\t\t\t}\n\t\t\t\tif h == \"\" {\n\t\t\t\t\treturn errors.New(\"empty hashtag\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(hashTagList) > maximumHashtagStreamWS {\n\t\t\t\treturn errors.Errorf(\"too many hashtags, maximum is %d\", maximumHashtagStreamWS)\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := madonInit(true); err != nil {\n\t\treturn err\n\t}\n\n\tevChan := make(chan madon.StreamEvent, 10)\n\tstop := make(chan bool)\n\tdone := make(chan bool)\n\tvar err error\n\n\tif streamName != \"hashtag\" || len(hashTagList) <= 1 { \/\/ Usual case: Only 1 stream\n\t\terr = gClient.StreamListener(streamName, tag, evChan, stop, done)\n\t} else { \/\/ Several streams\n\t\tn := len(hashTagList)\n\t\ttagEvCh := make([]chan madon.StreamEvent, n)\n\t\ttagDoneCh := make([]chan bool, n)\n\t\tfor i, t := range hashTagList {\n\t\t\tif verbose {\n\t\t\t\terrPrint(\"Launching listener for tag '%s'\", t)\n\t\t\t}\n\t\t\ttagEvCh[i] = make(chan madon.StreamEvent)\n\t\t\ttagDoneCh[i] = make(chan bool)\n\t\t\te := gClient.StreamListener(streamName, t, tagEvCh[i], stop, tagDoneCh[i])\n\t\t\tif e != nil {\n\t\t\t\tif i > 0 { \/\/ Close previous connections\n\t\t\t\t\tclose(stop)\n\t\t\t\t}\n\t\t\t\terr = e\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t\/\/ Forward events to main ev channel\n\t\t\tgo func(i int) {\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase _, ok := <-tagDoneCh[i]:\n\t\t\t\t\t\tif !ok { \/\/ end of streaming for this tag\n\t\t\t\t\t\t\tdone <- true\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\tcase ev := <-tagEvCh[i]:\n\t\t\t\t\t\tevChan <- ev\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(i)\n\t\t}\n\t}\n\n\tif err != nil {\n\t\terrPrint(\"Error: %s\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tp, err := getPrinter()\n\tif err != nil {\n\t\tclose(stop)\n\t\t<-done\n\t\tclose(evChan)\n\t\terrPrint(\"Error: %s\", err.Error())\n\t\tos.Exit(1)\n\t}\n\nLISTEN:\n\tfor {\n\t\tselect {\n\t\tcase v, ok := <-done:\n\t\t\tif !ok || v == true { \/\/ done is closed, end of streaming\n\t\t\t\tbreak LISTEN\n\t\t\t}\n\t\tcase ev := <-evChan:\n\t\t\tswitch ev.Event {\n\t\t\tcase \"error\":\n\t\t\t\tif ev.Error != nil {\n\t\t\t\t\tif ev.Error == io.ErrUnexpectedEOF {\n\t\t\t\t\t\terrPrint(\"The stream connection was unexpectedly closed\")\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\terrPrint(\"Error event: [%s] %s\", ev.Event, ev.Error)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\terrPrint(\"Event: [%s]\", ev.Event)\n\t\t\tcase \"update\":\n\t\t\t\ts := ev.Data.(madon.Status)\n\t\t\t\tp.PrintObj(&s, nil, \"\")\n\t\t\t\tcontinue\n\t\t\tcase \"notification\":\n\t\t\t\tn := ev.Data.(madon.Notification)\n\t\t\t\tp.PrintObj(&n, nil, \"\")\n\t\t\t\tcontinue\n\t\t\tcase \"delete\":\n\t\t\t\t\/\/ TODO PrintObj ?\n\t\t\t\terrPrint(\"Event: [%s] Status %d was deleted\", ev.Event, ev.Data.(int64))\n\t\t\tdefault:\n\t\t\t\terrPrint(\"Unhandled event: [%s] %T\", ev.Event, ev.Data)\n\t\t\t}\n\t\t}\n\t}\n\tclose(stop)\n\tclose(evChan)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"net\/http\"\n)\n\ntype stressRequest interface{}\n\ntype finishedStress struct{}\n\ntype workerDone struct{}\n\ntype requestStat struct {\n\tduration int64 \/\/milliseconds\n}\ntype requestStatSummary struct {\n\tavgDuration int64 \/\/milliseconds\n\tlongestDuration int64 \/\/milliseconds\n\tshortestDuration int64 \/\/milliseconds\n}\n\n\/\/flags\nvar (\n\tnumTests int\n\ttimeout int\n\tconcurrency int\n)\n\nfunc init() {\n\tRootCmd.AddCommand(stressCmd)\n\n\t\/\/ Here you will define your flags and configuration settings.\n\n\t\/\/ Cobra supports Persistent Flags which will work for this command\n\t\/\/ and all subcommands, e.g.:\n\t\/\/ stressCmd.PersistentFlags().String(\"foo\", \"\", \"A help for foo\")\n\n\t\/\/ Cobra supports local flags which will only run when this command\n\t\/\/ is called directly, e.g.:\n\t\/\/ stressCmd.Flags().BoolP(\"toggle\", \"t\", false, \"Help message for toggle\")\n\n\tstressCmd.Flags().IntVarP(&numTests, \"num\", \"n\", 100, \"Number of requests to make\")\n\tstressCmd.Flags().IntVarP(&concurrency, \"concurrent\", \"c\", 1, \"Number of multiple requests to make\")\n\tstressCmd.Flags().IntVarP(&timeout, \"timeout\", \"t\", 0, \"Maximum seconds to wait for response. 0 means unlimited\")\n}\n\n\/\/ stressCmd represents the stress command\nvar stressCmd = &cobra.Command{\n\tUse: \"stress http[s]:\/\/hostname[:port]\/path\",\n\tShort: \"Run predefined load of requests\",\n\tLong: `Run predefined load of requests`,\n\tRunE: RunStress,\n}\n\nfunc RunStress(cmd *cobra.Command, args []string) error {\n\t\/\/checks\n\tif len(args) != 1 {\n\t\treturn errors.New(\"needs URL\")\n\t}\n\tif numTests <= 0 {\n\t\treturn errors.New(\"number of requests must be one or more\")\n\t}\n\tif concurrency <= 0 {\n\t\treturn errors.New(\"concurrency must be one or more\")\n\t}\n\tif timeout < 0 {\n\t\treturn errors.New(\"timeout must be zero or more\")\n\t}\n\n\tfmt.Println(\"running stress\")\n\n\t\/\/setup the queue of requests\n\trequestChan := make(chan stressRequest, numTests+concurrency)\n\tfor i := 0; i < numTests; i++ {\n\t\t\/\/TODO optimize by not creating a new http request each time since it's the same thing\n\t\treq, err := http.NewRequest(\"GET\", args[0], nil)\n\t\tif err != nil {\n\t\t\treturn errors.New(\"failed to create request: \" + err.Error())\n\t\t}\n\t\trequestChan <- req\n\t}\n\tfor i := 0; i < concurrency; i++ {\n\t\trequestChan <- finishedStress{}\n\t}\n\n\tworkerDoneChan := make(chan workerDone) \/\/workers use this to indicate they are done\n\trequestStatChan := make(chan requestStat) \/\/workers communicate each requests' info\n\n\t\/\/workers\n\tfor i := 0; i < concurrency; i++ {\n\t\tgo func() {\n\t\t\tclient := &http.Client{Timeout: time.Duration(timeout) * time.Second}\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase req := <-requestChan:\n\t\t\t\t\tswitch req.(type) {\n\t\t\t\t\tcase *http.Request:\n\t\t\t\t\t\t\/\/run the acutal request\n\t\t\t\t\t\treqStartTime := time.Now()\n\t\t\t\t\t\t_, err := client.Do(req.(*http.Request))\n\t\t\t\t\t\treqEndTime := time.Now()\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tfmt.Errorf(err.Error()) \/\/TODO handle this further up\n\t\t\t\t\t\t}\n\t\t\t\t\t\treqTimeMs := (reqEndTime.UnixNano() - reqStartTime.UnixNano()) \/ 1000000\n\t\t\t\t\t\tfmt.Printf(\"request took %dms\\n\", reqTimeMs)\n\t\t\t\t\t\trequestStatChan <- requestStat{duration: reqTimeMs}\n\t\t\t\t\tcase finishedStress:\n\t\t\t\t\t\tworkerDoneChan <- workerDone{}\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\tallRequestStats := make([]requestStat, numTests)\n\trequestsCompleteCount := 0\n\tworkersDoneCount := 0\n\t\/\/wait for all workers to finish\n\tfor {\n\t\tselect {\n\t\tcase <-workerDoneChan:\n\t\t\tworkersDoneCount++\n\t\t\tif workersDoneCount == concurrency {\n\t\t\t\t\/\/all workers are done\n\t\t\t\tfmt.Println(\"%+v\", createStats(allRequestStats))\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase requestStat := <-requestStatChan:\n\t\t\tallRequestStats[requestsCompleteCount] = requestStat\n\t\t\trequestsCompleteCount++\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc createStats(requestStats []requestStat) requestStatSummary {\n\tif len(requestStats) == 0 {\n\t\treturn requestStatSummary{}\n\t}\n\n\tsummary := requestStatSummary{longestDuration: requestStats[0].duration, shortestDuration: requestStats[0].duration}\n\tvar totalDurations int64\n\ttotalDurations = 0\n\tfor i := 0; i < len(requestStats); i++ {\n\t\tif requestStats[i].duration > summary.longestDuration {\n\t\t\tsummary.longestDuration = requestStats[i].duration\n\t\t}\n\t\tif requestStats[i].duration < summary.shortestDuration {\n\t\t\tsummary.shortestDuration = requestStats[i].duration\n\t\t}\n\t\ttotalDurations += requestStats[i].duration\n\t}\n\tsummary.avgDuration = totalDurations \/ int64(len(requestStats))\n\treturn summary\n}\n<commit_msg>Cleaner printing of request summary<commit_after>package cmd\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"net\/http\"\n)\n\ntype stressRequest interface{}\n\ntype finishedStress struct{}\n\ntype workerDone struct{}\n\ntype requestStat struct {\n\tduration int64 \/\/milliseconds\n}\ntype requestStatSummary struct {\n\tavgDuration int64 \/\/milliseconds\n\tmaxDuration int64 \/\/milliseconds\n\tminDuration int64 \/\/milliseconds\n}\n\n\/\/flags\nvar (\n\tnumTests int\n\ttimeout int\n\tconcurrency int\n)\n\nfunc init() {\n\tRootCmd.AddCommand(stressCmd)\n\n\t\/\/ Here you will define your flags and configuration settings.\n\n\t\/\/ Cobra supports Persistent Flags which will work for this command\n\t\/\/ and all subcommands, e.g.:\n\t\/\/ stressCmd.PersistentFlags().String(\"foo\", \"\", \"A help for foo\")\n\n\t\/\/ Cobra supports local flags which will only run when this command\n\t\/\/ is called directly, e.g.:\n\t\/\/ stressCmd.Flags().BoolP(\"toggle\", \"t\", false, \"Help message for toggle\")\n\n\tstressCmd.Flags().IntVarP(&numTests, \"num\", \"n\", 100, \"Number of requests to make\")\n\tstressCmd.Flags().IntVarP(&concurrency, \"concurrent\", \"c\", 1, \"Number of multiple requests to make\")\n\tstressCmd.Flags().IntVarP(&timeout, \"timeout\", \"t\", 0, \"Maximum seconds to wait for response. 0 means unlimited\")\n}\n\n\/\/ stressCmd represents the stress command\nvar stressCmd = &cobra.Command{\n\tUse: \"stress http[s]:\/\/hostname[:port]\/path\",\n\tShort: \"Run predefined load of requests\",\n\tLong: `Run predefined load of requests`,\n\tRunE: RunStress,\n}\n\nfunc RunStress(cmd *cobra.Command, args []string) error {\n\t\/\/checks\n\tif len(args) != 1 {\n\t\treturn errors.New(\"needs URL\")\n\t}\n\tif numTests <= 0 {\n\t\treturn errors.New(\"number of requests must be one or more\")\n\t}\n\tif concurrency <= 0 {\n\t\treturn errors.New(\"concurrency must be one or more\")\n\t}\n\tif timeout < 0 {\n\t\treturn errors.New(\"timeout must be zero or more\")\n\t}\n\n\turl := args[0]\n\n\tfmt.Println(\"Stress testing \" + url + \"...\")\n\n\t\/\/setup the queue of requests\n\trequestChan := make(chan stressRequest, numTests+concurrency)\n\tfor i := 0; i < numTests; i++ {\n\t\t\/\/TODO optimize by not creating a new http request each time since it's the same thing\n\t\treq, err := http.NewRequest(\"GET\", url, nil)\n\t\tif err != nil {\n\t\t\treturn errors.New(\"failed to create request: \" + err.Error())\n\t\t}\n\t\trequestChan <- req\n\t}\n\tfor i := 0; i < concurrency; i++ {\n\t\trequestChan <- finishedStress{}\n\t}\n\n\tworkerDoneChan := make(chan workerDone) \/\/workers use this to indicate they are done\n\trequestStatChan := make(chan requestStat) \/\/workers communicate each requests' info\n\n\t\/\/workers\n\tfor i := 0; i < concurrency; i++ {\n\t\tgo func() {\n\t\t\tclient := &http.Client{Timeout: time.Duration(timeout) * time.Second}\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase req := <-requestChan:\n\t\t\t\t\tswitch req.(type) {\n\t\t\t\t\tcase *http.Request:\n\t\t\t\t\t\t\/\/run the acutal request\n\t\t\t\t\t\treqStartTime := time.Now()\n\t\t\t\t\t\t_, err := client.Do(req.(*http.Request))\n\t\t\t\t\t\treqEndTime := time.Now()\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tfmt.Errorf(err.Error()) \/\/TODO handle this further up\n\t\t\t\t\t\t}\n\t\t\t\t\t\treqTimeMs := (reqEndTime.UnixNano() - reqStartTime.UnixNano()) \/ 1000000\n\t\t\t\t\t\tfmt.Printf(\"request took %dms\\n\", reqTimeMs)\n\t\t\t\t\t\trequestStatChan <- requestStat{duration: reqTimeMs}\n\t\t\t\t\tcase finishedStress:\n\t\t\t\t\t\tworkerDoneChan <- workerDone{}\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\tallRequestStats := make([]requestStat, numTests)\n\trequestsCompleteCount := 0\n\tworkersDoneCount := 0\n\t\/\/wait for all workers to finish\n\tfor {\n\t\tselect {\n\t\tcase <-workerDoneChan:\n\t\t\tworkersDoneCount++\n\t\t\tif workersDoneCount == concurrency {\n\t\t\t\t\/\/all workers are done\n\t\t\t\tfmt.Println(createTextSummary(createStats(allRequestStats)))\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase requestStat := <-requestStatChan:\n\t\t\tallRequestStats[requestsCompleteCount] = requestStat\n\t\t\trequestsCompleteCount++\n\t\t}\n\t}\n}\n\nfunc createStats(requestStats []requestStat) requestStatSummary {\n\tif len(requestStats) == 0 {\n\t\treturn requestStatSummary{}\n\t}\n\n\tsummary := requestStatSummary{maxDuration: requestStats[0].duration, minDuration: requestStats[0].duration}\n\tvar totalDurations int64\n\ttotalDurations = 0\n\tfor i := 0; i < len(requestStats); i++ {\n\t\tif requestStats[i].duration > summary.maxDuration {\n\t\t\tsummary.maxDuration = requestStats[i].duration\n\t\t}\n\t\tif requestStats[i].duration < summary.minDuration {\n\t\t\tsummary.minDuration = requestStats[i].duration\n\t\t}\n\t\ttotalDurations += requestStats[i].duration\n\t}\n\tsummary.avgDuration = totalDurations \/ int64(len(requestStats))\n\treturn summary\n}\n\nfunc createTextSummary(summary requestStatSummary) string {\n\treturn `Average: ` + strconv.Itoa(int(summary.avgDuration)) + \"ms\\n\" +\n\t\t`Max: ` + strconv.Itoa(int(summary.maxDuration)) + \"ms\\n\" +\n\t\t`Min: ` + strconv.Itoa(int(summary.minDuration)) + \"ms\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016-2017 the u-root Authors. All rights reserved\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Copy files.\n\/\/\n\/\/ Synopsis:\n\/\/ cp [-rRfivwP] FROM... TO\n\/\/\n\/\/ Options:\n\/\/ -w n: number of worker goroutines\n\/\/ -R: copy file hierarchies\n\/\/ -r: alias to -R recursive mode\n\/\/ -i: prompt about overwriting file\n\/\/ -f: force overwrite files\n\/\/ -v: verbose copy mode\n\/\/ -P: don't follow symlinks\npackage main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n)\n\n\/\/ buffSize is the length of buffer during\n\/\/ the parallel copy using worker function\nconst buffSize = 8192\n\nvar (\n\trecursive bool\n\task bool\n\tforce bool\n\tverbose bool\n\tsymlink bool\n\tnwork int\n\tinput = bufio.NewReader(os.Stdin)\n\t\/\/ offchan is a channel used for indicate the nextbuffer to read with worker()\n\toffchan = make(chan int64, 0)\n\t\/\/ zerochan is a channel used for indicate the start of a new read file\n\tzerochan = make(chan int, 0)\n)\n\nfunc init() {\n\tdefUsage := flag.Usage\n\tflag.Usage = func() {\n\t\tos.Args[0] = \"cp [-wRrifvP] file[s] ... dest\"\n\t\tdefUsage()\n\t}\n\tflag.IntVar(&nwork, \"w\", runtime.NumCPU(), \"number of worker goroutines\")\n\tflag.BoolVar(&recursive, \"R\", false, \"copy file hierarchies\")\n\tflag.BoolVar(&recursive, \"r\", false, \"alias to -R recursive mode\")\n\tflag.BoolVar(&ask, \"i\", false, \"prompt about overwriting file\")\n\tflag.BoolVar(&force, \"f\", false, \"force overwrite files\")\n\tflag.BoolVar(&verbose, \"v\", false, \"verbose copy mode\")\n\tflag.BoolVar(&symlink, \"P\", false, \"don't follow symlinks\")\n\tflag.Parse()\n\tgo nextOff()\n}\n\n\/\/ promptOverwrite ask if the user wants overwrite file\nfunc promptOverwrite(dst string) (bool, error) {\n\tfmt.Printf(\"cp: overwrite %q? \", dst)\n\tanswer, err := input.ReadString('\\n')\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif strings.ToLower(answer)[0] != 'y' {\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}\n\n\/\/ copyFile copies file between src (source) and dst (destination)\n\/\/ todir: if true insert src INTO dir dst\nfunc copyFile(src, dst string, todir bool) error {\n\tif todir {\n\t\tfile := filepath.Base(src)\n\t\tdst = filepath.Join(dst, file)\n\t}\n\n\tsrcb, err := os.Lstat(src)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't stat %v: %v\", src, err)\n\t}\n\n\t\/\/ don't follow symlinks, copy symlink\n\tif L := os.ModeSymlink; symlink && srcb.Mode()&L == L {\n\t\tlinkPath, err := filepath.EvalSymlinks(src)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"can't eval symlink %v: %v\", src, err)\n\t\t}\n\t\treturn os.Symlink(linkPath, dst)\n\t}\n\n\tif srcb.IsDir() {\n\t\tif recursive {\n\t\t\treturn copyDir(src, dst)\n\t\t}\n\t\treturn fmt.Errorf(\"%q is a directory, try use recursive option\", src)\n\t}\n\n\tdstb, err := os.Stat(dst)\n\tif !os.IsNotExist(err) {\n\t\tif sameFile(srcb.Sys(), dstb.Sys()) {\n\t\t\treturn fmt.Errorf(\"%q and %q are the same file\", src, dst)\n\t\t}\n\t\tif ask && !force {\n\t\t\toverwrite, err := promptOverwrite(dst)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !overwrite {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\tmode := srcb.Mode() & 0777\n\ts, err := os.Open(src)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't open %q: %v\", src, err)\n\t}\n\tdefer s.Close()\n\n\td, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't create %q: %v\", dst, err)\n\t}\n\tdefer d.Close()\n\n\treturn copyOneFile(s, d, src, dst)\n}\n\n\/\/ copyOneFile copy the content between two files\nfunc copyOneFile(s *os.File, d *os.File, src, dst string) error {\n\tzerochan <- 0\n\tfail := make(chan error, nwork)\n\tfor i := 0; i < nwork; i++ {\n\t\tgo worker(s, d, fail)\n\t}\n\n\t\/\/ iterate the errors from channel\n\tfor i := 0; i < nwork; i++ {\n\t\terr := <-fail\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif verbose {\n\t\tfmt.Printf(\"%q -> %q\\n\", src, dst)\n\t}\n\n\treturn nil\n}\n\n\/\/ createDir populate dir destination if not exists\n\/\/ if exists verify is not a dir: return error if is file\n\/\/ cannot overwrite: dir -> file\nfunc createDir(src, dst string) error {\n\tdstInfo, err := os.Stat(dst)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\n\tif err == nil {\n\t\tif !dstInfo.IsDir() {\n\t\t\treturn fmt.Errorf(\"can't overwrite non-dir %q with dir %q\", dst, src)\n\t\t}\n\t\treturn nil\n\t}\n\n\tsrcInfo, err := os.Stat(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := os.Mkdir(dst, srcInfo.Mode()); err != nil {\n\t\treturn err\n\t}\n\tif verbose {\n\t\tfmt.Printf(\"%q -> %q\\n\", src, dst)\n\t}\n\n\treturn nil\n}\n\n\/\/ copyDir copy the file hierarchies\n\/\/ used at cp when -r or -R flag is true\nfunc copyDir(src, dst string) error {\n\tif err := createDir(src, dst); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ list files from destination\n\tfiles, err := ioutil.ReadDir(src)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't list files from %q: %q\", src, err)\n\t}\n\n\t\/\/ copy recursively the src -> dst\n\tfor _, file := range files {\n\t\tfname := file.Name()\n\t\tfpath := filepath.Join(src, fname)\n\t\tnewDst := filepath.Join(dst, fname)\n\t\tcopyFile(fpath, newDst, false)\n\t}\n\n\treturn err\n}\n\n\/\/ worker is a concurrent copy, used to copy part of the files\n\/\/ in parallel\nfunc worker(s *os.File, d *os.File, fail chan error) {\n\tvar buf [buffSize]byte\n\tvar bp []byte\n\n\tl := len(buf)\n\tbp = buf[0:]\n\to := <-offchan\n\tfor {\n\t\tn, err := s.ReadAt(bp, o)\n\t\tif err != nil && err != io.EOF {\n\t\t\tfail <- fmt.Errorf(\"reading %s at %v: %v\", s.Name(), o, err)\n\t\t\treturn\n\t\t}\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tnb := bp[0:n]\n\t\tn, err = d.WriteAt(nb, o)\n\t\tif err != nil {\n\t\t\tfail <- fmt.Errorf(\"writing %s: %v\", d.Name(), err)\n\t\t\treturn\n\t\t}\n\t\tbp = buf[n:]\n\t\to += int64(n)\n\t\tl -= n\n\t\tif l == 0 {\n\t\t\tl = len(buf)\n\t\t\tbp = buf[0:]\n\t\t\to = <-offchan\n\t\t}\n\t}\n\tfail <- nil\n}\n\n\/\/ nextOff handler for next buffers and sync goroutines\n\/\/ zerochan imply the init of file\n\/\/ offchan is the next buffer part to read\nfunc nextOff() {\n\toff := int64(0)\n\tfor {\n\t\tselect {\n\t\tcase <-zerochan:\n\t\t\toff = 0\n\t\tcase offchan <- off:\n\t\t\toff += buffSize\n\t\t}\n\t}\n}\n\n\/\/ cp is a function whose eval the args\n\/\/ and make decisions for copyfiles\nfunc cp(args []string) (lastErr error) {\n\ttodir := false\n\tfrom, to := args[:len(args)-1], args[len(args)-1]\n\ttoStat, err := os.Stat(to)\n\tif err == nil {\n\t\ttodir = toStat.IsDir()\n\t}\n\tif flag.NArg() > 2 && todir == false {\n\t\tlog.Fatalf(\"is not a directory: %s\\n\", to)\n\t}\n\n\tfor _, file := range from {\n\t\tif err := copyFile(file, to, todir); err != nil {\n\t\t\tlog.Printf(\"cp: %v\\n\", err)\n\t\t\tlastErr = err\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc main() {\n\tif flag.NArg() < 2 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tif err := cp(flag.Args()); err != nil {\n\t\tos.Exit(1)\n\t}\n\n}\n<commit_msg>cp: fix incorrect handling of error on Stat of dst<commit_after>\/\/ Copyright 2016-2017 the u-root Authors. All rights reserved\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Copy files.\n\/\/\n\/\/ Synopsis:\n\/\/ cp [-rRfivwP] FROM... TO\n\/\/\n\/\/ Options:\n\/\/ -w n: number of worker goroutines\n\/\/ -R: copy file hierarchies\n\/\/ -r: alias to -R recursive mode\n\/\/ -i: prompt about overwriting file\n\/\/ -f: force overwrite files\n\/\/ -v: verbose copy mode\n\/\/ -P: don't follow symlinks\npackage main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n)\n\n\/\/ buffSize is the length of buffer during\n\/\/ the parallel copy using worker function\nconst buffSize = 8192\n\nvar (\n\trecursive bool\n\task bool\n\tforce bool\n\tverbose bool\n\tsymlink bool\n\tnwork int\n\tinput = bufio.NewReader(os.Stdin)\n\t\/\/ offchan is a channel used for indicate the nextbuffer to read with worker()\n\toffchan = make(chan int64, 0)\n\t\/\/ zerochan is a channel used for indicate the start of a new read file\n\tzerochan = make(chan int, 0)\n)\n\nfunc init() {\n\tdefUsage := flag.Usage\n\tflag.Usage = func() {\n\t\tos.Args[0] = \"cp [-wRrifvP] file[s] ... dest\"\n\t\tdefUsage()\n\t}\n\tflag.IntVar(&nwork, \"w\", runtime.NumCPU(), \"number of worker goroutines\")\n\tflag.BoolVar(&recursive, \"R\", false, \"copy file hierarchies\")\n\tflag.BoolVar(&recursive, \"r\", false, \"alias to -R recursive mode\")\n\tflag.BoolVar(&ask, \"i\", false, \"prompt about overwriting file\")\n\tflag.BoolVar(&force, \"f\", false, \"force overwrite files\")\n\tflag.BoolVar(&verbose, \"v\", false, \"verbose copy mode\")\n\tflag.BoolVar(&symlink, \"P\", false, \"don't follow symlinks\")\n\tflag.Parse()\n\tgo nextOff()\n}\n\n\/\/ promptOverwrite ask if the user wants overwrite file\nfunc promptOverwrite(dst string) (bool, error) {\n\tfmt.Printf(\"cp: overwrite %q? \", dst)\n\tanswer, err := input.ReadString('\\n')\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif strings.ToLower(answer)[0] != 'y' {\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}\n\n\/\/ copyFile copies file between src (source) and dst (destination)\n\/\/ todir: if true insert src INTO dir dst\nfunc copyFile(src, dst string, todir bool) error {\n\tif todir {\n\t\tfile := filepath.Base(src)\n\t\tdst = filepath.Join(dst, file)\n\t}\n\n\tsrcb, err := os.Lstat(src)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't stat %v: %v\", src, err)\n\t}\n\n\t\/\/ don't follow symlinks, copy symlink\n\tif L := os.ModeSymlink; symlink && srcb.Mode()&L == L {\n\t\tlinkPath, err := filepath.EvalSymlinks(src)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"can't eval symlink %v: %v\", src, err)\n\t\t}\n\t\treturn os.Symlink(linkPath, dst)\n\t}\n\n\tif srcb.IsDir() {\n\t\tif recursive {\n\t\t\treturn copyDir(src, dst)\n\t\t}\n\t\treturn fmt.Errorf(\"%q is a directory, try use recursive option\", src)\n\t}\n\n\tdstb, err := os.Stat(dst)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn fmt.Errorf(\"%q: can't handle error %v\", dst, err)\n\t}\n\n\tif dstb != nil {\n\t\tif sameFile(srcb.Sys(), dstb.Sys()) {\n\t\t\treturn fmt.Errorf(\"%q and %q are the same file\", src, dst)\n\t\t}\n\t\tif ask && !force {\n\t\t\toverwrite, err := promptOverwrite(dst)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !overwrite {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\tmode := srcb.Mode() & 0777\n\ts, err := os.Open(src)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't open %q: %v\", src, err)\n\t}\n\tdefer s.Close()\n\n\td, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't create %q: %v\", dst, err)\n\t}\n\tdefer d.Close()\n\n\treturn copyOneFile(s, d, src, dst)\n}\n\n\/\/ copyOneFile copy the content between two files\nfunc copyOneFile(s *os.File, d *os.File, src, dst string) error {\n\tzerochan <- 0\n\tfail := make(chan error, nwork)\n\tfor i := 0; i < nwork; i++ {\n\t\tgo worker(s, d, fail)\n\t}\n\n\t\/\/ iterate the errors from channel\n\tfor i := 0; i < nwork; i++ {\n\t\terr := <-fail\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif verbose {\n\t\tfmt.Printf(\"%q -> %q\\n\", src, dst)\n\t}\n\n\treturn nil\n}\n\n\/\/ createDir populate dir destination if not exists\n\/\/ if exists verify is not a dir: return error if is file\n\/\/ cannot overwrite: dir -> file\nfunc createDir(src, dst string) error {\n\tdstInfo, err := os.Stat(dst)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\n\tif err == nil {\n\t\tif !dstInfo.IsDir() {\n\t\t\treturn fmt.Errorf(\"can't overwrite non-dir %q with dir %q\", dst, src)\n\t\t}\n\t\treturn nil\n\t}\n\n\tsrcInfo, err := os.Stat(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := os.Mkdir(dst, srcInfo.Mode()); err != nil {\n\t\treturn err\n\t}\n\tif verbose {\n\t\tfmt.Printf(\"%q -> %q\\n\", src, dst)\n\t}\n\n\treturn nil\n}\n\n\/\/ copyDir copy the file hierarchies\n\/\/ used at cp when -r or -R flag is true\nfunc copyDir(src, dst string) error {\n\tif err := createDir(src, dst); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ list files from destination\n\tfiles, err := ioutil.ReadDir(src)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't list files from %q: %q\", src, err)\n\t}\n\n\t\/\/ copy recursively the src -> dst\n\tfor _, file := range files {\n\t\tfname := file.Name()\n\t\tfpath := filepath.Join(src, fname)\n\t\tnewDst := filepath.Join(dst, fname)\n\t\tcopyFile(fpath, newDst, false)\n\t}\n\n\treturn err\n}\n\n\/\/ worker is a concurrent copy, used to copy part of the files\n\/\/ in parallel\nfunc worker(s *os.File, d *os.File, fail chan error) {\n\tvar buf [buffSize]byte\n\tvar bp []byte\n\n\tl := len(buf)\n\tbp = buf[0:]\n\to := <-offchan\n\tfor {\n\t\tn, err := s.ReadAt(bp, o)\n\t\tif err != nil && err != io.EOF {\n\t\t\tfail <- fmt.Errorf(\"reading %s at %v: %v\", s.Name(), o, err)\n\t\t\treturn\n\t\t}\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tnb := bp[0:n]\n\t\tn, err = d.WriteAt(nb, o)\n\t\tif err != nil {\n\t\t\tfail <- fmt.Errorf(\"writing %s: %v\", d.Name(), err)\n\t\t\treturn\n\t\t}\n\t\tbp = buf[n:]\n\t\to += int64(n)\n\t\tl -= n\n\t\tif l == 0 {\n\t\t\tl = len(buf)\n\t\t\tbp = buf[0:]\n\t\t\to = <-offchan\n\t\t}\n\t}\n\tfail <- nil\n}\n\n\/\/ nextOff handler for next buffers and sync goroutines\n\/\/ zerochan imply the init of file\n\/\/ offchan is the next buffer part to read\nfunc nextOff() {\n\toff := int64(0)\n\tfor {\n\t\tselect {\n\t\tcase <-zerochan:\n\t\t\toff = 0\n\t\tcase offchan <- off:\n\t\t\toff += buffSize\n\t\t}\n\t}\n}\n\n\/\/ cp is a function whose eval the args\n\/\/ and make decisions for copyfiles\nfunc cp(args []string) (lastErr error) {\n\ttodir := false\n\tfrom, to := args[:len(args)-1], args[len(args)-1]\n\ttoStat, err := os.Stat(to)\n\tif err == nil {\n\t\ttodir = toStat.IsDir()\n\t}\n\tif flag.NArg() > 2 && todir == false {\n\t\tlog.Fatalf(\"is not a directory: %s\\n\", to)\n\t}\n\n\tfor _, file := range from {\n\t\tif err := copyFile(file, to, todir); err != nil {\n\t\t\tlog.Printf(\"cp: %v\\n\", err)\n\t\t\tlastErr = err\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc main() {\n\tif flag.NArg() < 2 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tif err := cp(flag.Args()); err != nil {\n\t\tos.Exit(1)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package gkvlite\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"unsafe\"\n)\n\n\/\/ User-supplied key comparison func should return 0 if a == b,\n\/\/ -1 if a < b, and +1 if a > b. For example: bytes.Compare()\ntype KeyCompare func(a, b []byte) int\n\n\/\/ A persistable collection of ordered key-values (Item's).\ntype Collection struct {\n\tname string \/\/ May be \"\" for a private collection.\n\tstore *Store\n\tcompare KeyCompare\n\n\trootLock *sync.Mutex\n\troot *rootNodeLoc \/\/ Protected by rootLock.\n\n\tstats CollectionStats\n\n\tfreeLock sync.Mutex\n\tfreeNodes *node \/\/ Protected by freeLock.\n\tfreeNodeLocs *nodeLoc \/\/ Not protected by freeLock.\n\tfreeRootNodeLocs *rootNodeLoc \/\/ Protected by freeLock.\n}\n\ntype CollectionStats struct {\n\tMkNodeLocs int64\n\tFreeNodeLocs int64\n\tAllocNodeLocs int64\n\n\tMkNodes int64\n\tFreeNodes int64\n\tAllocNodes int64\n}\n\ntype rootNodeLoc struct {\n\trefs int64 \/\/ Atomic reference counter.\n\troot *nodeLoc\n\tnext *rootNodeLoc \/\/ For free-list tracking.\n}\n\nfunc (t *Collection) Name() string {\n\treturn t.name\n}\n\n\/\/ Retrieve an item by its key. Use withValue of false if you don't\n\/\/ need the item's value (Item.Val may be nil), which might be able\n\/\/ to save on I\/O and memory resources, especially for large values.\n\/\/ The returned Item should be treated as immutable.\nfunc (t *Collection) GetItem(key []byte, withValue bool) (i *Item, err error) {\n\trnl := t.rootAddRef()\n\tdefer t.rootDecRef(rnl)\n\tn := rnl.root\n\tfor {\n\t\tnNode, err := n.read(t.store)\n\t\tif err != nil || n.isEmpty() || nNode == nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ti := &nNode.item\n\t\tiItem, err := i.read(t, false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif iItem == nil || iItem.Key == nil {\n\t\t\treturn nil, errors.New(\"missing item after item.read() in GetItem()\")\n\t\t}\n\t\tc := t.compare(key, iItem.Key)\n\t\tif c < 0 {\n\t\t\tn = &nNode.left\n\t\t} else if c > 0 {\n\t\t\tn = &nNode.right\n\t\t} else {\n\t\t\tif withValue {\n\t\t\t\tiItem, err = i.read(t, withValue)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn iItem, nil\n\t\t}\n\t}\n}\n\n\/\/ Retrieve a value by its key. Returns nil if the item is not in the\n\/\/ collection. The returned value should be treated as immutable.\nfunc (t *Collection) Get(key []byte) (val []byte, err error) {\n\ti, err := t.GetItem(key, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif i != nil {\n\t\treturn i.Val, nil\n\t}\n\treturn nil, nil\n}\n\n\/\/ Replace or insert an item of a given key.\n\/\/ A random item Priority (e.g., rand.Int31()) will usually work well,\n\/\/ but advanced users may consider using non-random item priorities\n\/\/ at the risk of unbalancing the lookup tree. The input Item instance\n\/\/ should be considered immutable and owned by the Collection.\nfunc (t *Collection) SetItem(item *Item) (err error) {\n\tif item.Key == nil || len(item.Key) > 0xffff || len(item.Key) == 0 ||\n\t\titem.Val == nil {\n\t\treturn errors.New(\"Item.Key\/Val missing or too long\")\n\t}\n\tif item.Priority < 0 {\n\t\treturn errors.New(\"Item.Priority must be non-negative\")\n\t}\n\trnl := t.rootAddRef()\n\tdefer t.rootDecRef(rnl)\n\troot := rnl.root\n\tn := t.mkNode(nil, nil, nil, 1, uint64(len(item.Key))+uint64(item.NumValBytes(t)))\n\tn.item.item = unsafe.Pointer(item) \/\/ Avoid garbage via separate init.\n\tnloc := t.mkNodeLoc(n)\n\tr, err := t.store.union(t, root, nloc)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !t.rootCAS(rnl, t.mkRootNodeLoc(r)) {\n\t\treturn errors.New(\"concurrent mutation attempted\")\n\t}\n\tt.rootDecRef(rnl)\n\tt.freeNodeLoc(nloc)\n\tt.reclaimNodes(n)\n\treturn nil\n}\n\n\/\/ Replace or insert an item of a given key.\nfunc (t *Collection) Set(key []byte, val []byte) error {\n\treturn t.SetItem(&Item{Key: key, Val: val, Priority: rand.Int31()})\n}\n\n\/\/ Deletes an item of a given key.\nfunc (t *Collection) Delete(key []byte) (wasDeleted bool, err error) {\n\trnl := t.rootAddRef()\n\tdefer t.rootDecRef(rnl)\n\troot := rnl.root\n\ti, err := t.GetItem(key, false)\n\tif err != nil || i == nil {\n\t\treturn false, err\n\t}\n\tleft, middle, right, err := t.store.split(t, root, key)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif middle.isEmpty() {\n\t\treturn false, fmt.Errorf(\"concurrent delete, key: %v\", key)\n\t}\n\t\/\/ TODO: Even though we markReclaimable() the middle node, there\n\t\/\/ might not be a pathway to it during reclaimation.\n\tt.markReclaimable(middle.Node())\n\tt.freeNodeLoc(middle)\n\tr, err := t.store.join(t, left, right)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !t.rootCAS(rnl, t.mkRootNodeLoc(r)) {\n\t\treturn false, errors.New(\"concurrent mutation attempted\")\n\t}\n\tt.rootDecRef(rnl)\n\tt.reclaimNodes(left.Node())\n\tt.reclaimNodes(right.Node())\n\tt.freeNodeLoc(left)\n\tt.freeNodeLoc(right)\n\treturn true, nil\n}\n\n\/\/ Retrieves the item with the \"smallest\" key.\n\/\/ The returned item should be treated as immutable.\nfunc (t *Collection) MinItem(withValue bool) (*Item, error) {\n\treturn t.store.walk(t, withValue,\n\t\tfunc(n *node) (*nodeLoc, bool) { return &n.left, true })\n}\n\n\/\/ Retrieves the item with the \"largest\" key.\n\/\/ The returned item should be treated as immutable.\nfunc (t *Collection) MaxItem(withValue bool) (*Item, error) {\n\treturn t.store.walk(t, withValue,\n\t\tfunc(n *node) (*nodeLoc, bool) { return &n.right, true })\n}\n\n\/\/ Evict some clean items found by randomly walking a tree branch.\nfunc (t *Collection) EvictSomeItems() (numEvicted uint64) {\n\tt.store.walk(t, false, func(n *node) (*nodeLoc, bool) {\n\t\tif !n.item.Loc().isEmpty() {\n\t\t\tatomic.StorePointer(&n.item.item, unsafe.Pointer(nil))\n\t\t\tnumEvicted++\n\t\t}\n\t\tnext := &n.left\n\t\tif (rand.Int() & 0x01) == 0x01 {\n\t\t\tnext = &n.right\n\t\t}\n\t\tif next.isEmpty() {\n\t\t\treturn nil, false\n\t\t}\n\t\treturn next, true\n\t})\n\treturn numEvicted\n}\n\ntype ItemVisitor func(i *Item) bool\ntype ItemVisitorEx func(i *Item, depth uint64) bool\n\n\/\/ Visit items greater-than-or-equal to the target key in ascending order.\nfunc (t *Collection) VisitItemsAscend(target []byte, withValue bool, v ItemVisitor) error {\n\treturn t.VisitItemsAscendEx(target, withValue,\n\t\tfunc(i *Item, depth uint64) bool { return v(i) })\n}\n\n\/\/ Visit items less-than the target key in descending order.\nfunc (t *Collection) VisitItemsDescend(target []byte, withValue bool, v ItemVisitor) error {\n\treturn t.VisitItemsDescendEx(target, withValue,\n\t\tfunc(i *Item, depth uint64) bool { return v(i) })\n}\n\n\/\/ Visit items greater-than-or-equal to the target key in ascending order; with depth info.\nfunc (t *Collection) VisitItemsAscendEx(target []byte, withValue bool,\n\tvisitor ItemVisitorEx) error {\n\trnl := t.rootAddRef()\n\tdefer t.rootDecRef(rnl)\n\t_, err := t.store.visitNodes(t, rnl.root,\n\t\ttarget, withValue, visitor, 0, ascendChoice)\n\treturn err\n}\n\n\/\/ Visit items less-than the target key in descending order; with depth info.\nfunc (t *Collection) VisitItemsDescendEx(target []byte, withValue bool,\n\tvisitor ItemVisitorEx) error {\n\trnl := t.rootAddRef()\n\tdefer t.rootDecRef(rnl)\n\t_, err := t.store.visitNodes(t, rnl.root,\n\t\ttarget, withValue, visitor, 0, descendChoice)\n\treturn err\n}\n\nfunc ascendChoice(cmp int, n *node) (bool, *nodeLoc, *nodeLoc) {\n\treturn cmp <= 0, &n.left, &n.right\n}\n\nfunc descendChoice(cmp int, n *node) (bool, *nodeLoc, *nodeLoc) {\n\treturn cmp > 0, &n.right, &n.left\n}\n\n\/\/ Returns total number of items and total key bytes plus value bytes.\nfunc (t *Collection) GetTotals() (numItems uint64, numBytes uint64, err error) {\n\trnl := t.rootAddRef()\n\tdefer t.rootDecRef(rnl)\n\tn := rnl.root\n\tnNode, err := n.read(t.store)\n\tif err != nil || n.isEmpty() || nNode == nil {\n\t\treturn 0, 0, err\n\t}\n\treturn nNode.numNodes, nNode.numBytes, nil\n}\n\n\/\/ Returns JSON representation of root node file location.\nfunc (t *Collection) MarshalJSON() ([]byte, error) {\n\trnl := t.rootAddRef()\n\tdefer t.rootDecRef(rnl)\n\tloc := rnl.root.Loc()\n\tif loc.isEmpty() {\n\t\treturn json.Marshal(ploc_empty)\n\t}\n\treturn json.Marshal(loc)\n}\n\n\/\/ Unmarshals JSON representation of root node file location.\nfunc (t *Collection) UnmarshalJSON(d []byte) error {\n\tp := ploc{}\n\tif err := json.Unmarshal(d, &p); err != nil {\n\t\treturn err\n\t}\n\tif t.rootLock == nil {\n\t\tt.rootLock = &sync.Mutex{}\n\t}\n\tnloc := t.mkNodeLoc(nil)\n\tnloc.loc = unsafe.Pointer(&p)\n\tif !t.rootCAS(nil, t.mkRootNodeLoc(nloc)) {\n\t\treturn errors.New(\"concurrent mutation during UnmarshalJSON().\")\n\t}\n\treturn nil\n}\n\n\/\/ Writes dirty items of a collection BUT (WARNING) does NOT write new\n\/\/ root records. Use Store.Flush() to write root records, which would\n\/\/ make these writes visible to the next file re-opening\/re-loading.\nfunc (t *Collection) Write() (err error) {\n\trnl := t.rootAddRef()\n\tdefer t.rootDecRef(rnl)\n\troot := rnl.root\n\tif err = t.flushItems(root); err != nil {\n\t\treturn err\n\t}\n\tif err = t.store.flushNodes(root); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Assumes that the caller serializes invocations w.r.t. mutations.\nfunc (t *Collection) Stats() CollectionStats {\n\treturn t.stats\n}\n\nfunc (t *Collection) flushItems(nloc *nodeLoc) (err error) {\n\tif nloc == nil || !nloc.Loc().isEmpty() {\n\t\treturn nil \/\/ Flush only unpersisted items of non-empty, unpersisted nodes.\n\t}\n\tnode := nloc.Node()\n\tif node == nil {\n\t\treturn nil\n\t}\n\tif err = t.flushItems(&node.left); err != nil {\n\t\treturn err\n\t}\n\tif err = node.item.write(t); err != nil { \/\/ Write items in key order.\n\t\treturn err\n\t}\n\treturn t.flushItems(&node.right)\n}\n\nfunc (t *Collection) rootCAS(prev, next *rootNodeLoc) bool {\n\tt.rootLock.Lock()\n\tdefer t.rootLock.Unlock()\n\n\tif t.root != prev {\n\t\treturn false\n\t}\n\tt.root = next\n\treturn true\n}\n\nfunc (t *Collection) rootAddRef() *rootNodeLoc {\n\tt.rootLock.Lock()\n\tdefer t.rootLock.Unlock()\n\n\tt.root.refs++\n\treturn t.root\n}\n\nfunc (t *Collection) rootDecRef(r *rootNodeLoc) {\n\tt.rootLock.Lock()\n\tdefer t.rootLock.Unlock()\n\n\tr.refs--\n\tif r.refs > 0 {\n\t\treturn\n\t}\n\tif r.root.isEmpty() {\n\t\treturn\n\t}\n\tt.reclaimNodes(r.root.Node())\n\tt.freeNodeLoc(r.root)\n\tt.freeRootNodeLoc(r)\n}\n<commit_msg>Smaller rootDecRef() lock area.<commit_after>package gkvlite\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"unsafe\"\n)\n\n\/\/ User-supplied key comparison func should return 0 if a == b,\n\/\/ -1 if a < b, and +1 if a > b. For example: bytes.Compare()\ntype KeyCompare func(a, b []byte) int\n\n\/\/ A persistable collection of ordered key-values (Item's).\ntype Collection struct {\n\tname string \/\/ May be \"\" for a private collection.\n\tstore *Store\n\tcompare KeyCompare\n\n\trootLock *sync.Mutex\n\troot *rootNodeLoc \/\/ Protected by rootLock.\n\n\tstats CollectionStats\n\n\tfreeLock sync.Mutex\n\tfreeNodes *node \/\/ Protected by freeLock.\n\tfreeNodeLocs *nodeLoc \/\/ Not protected by freeLock.\n\tfreeRootNodeLocs *rootNodeLoc \/\/ Protected by freeLock.\n}\n\ntype CollectionStats struct {\n\tMkNodeLocs int64\n\tFreeNodeLocs int64\n\tAllocNodeLocs int64\n\n\tMkNodes int64\n\tFreeNodes int64\n\tAllocNodes int64\n}\n\ntype rootNodeLoc struct {\n\trefs int64 \/\/ Atomic reference counter.\n\troot *nodeLoc\n\tnext *rootNodeLoc \/\/ For free-list tracking.\n}\n\nfunc (t *Collection) Name() string {\n\treturn t.name\n}\n\n\/\/ Retrieve an item by its key. Use withValue of false if you don't\n\/\/ need the item's value (Item.Val may be nil), which might be able\n\/\/ to save on I\/O and memory resources, especially for large values.\n\/\/ The returned Item should be treated as immutable.\nfunc (t *Collection) GetItem(key []byte, withValue bool) (i *Item, err error) {\n\trnl := t.rootAddRef()\n\tdefer t.rootDecRef(rnl)\n\tn := rnl.root\n\tfor {\n\t\tnNode, err := n.read(t.store)\n\t\tif err != nil || n.isEmpty() || nNode == nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ti := &nNode.item\n\t\tiItem, err := i.read(t, false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif iItem == nil || iItem.Key == nil {\n\t\t\treturn nil, errors.New(\"missing item after item.read() in GetItem()\")\n\t\t}\n\t\tc := t.compare(key, iItem.Key)\n\t\tif c < 0 {\n\t\t\tn = &nNode.left\n\t\t} else if c > 0 {\n\t\t\tn = &nNode.right\n\t\t} else {\n\t\t\tif withValue {\n\t\t\t\tiItem, err = i.read(t, withValue)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn iItem, nil\n\t\t}\n\t}\n}\n\n\/\/ Retrieve a value by its key. Returns nil if the item is not in the\n\/\/ collection. The returned value should be treated as immutable.\nfunc (t *Collection) Get(key []byte) (val []byte, err error) {\n\ti, err := t.GetItem(key, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif i != nil {\n\t\treturn i.Val, nil\n\t}\n\treturn nil, nil\n}\n\n\/\/ Replace or insert an item of a given key.\n\/\/ A random item Priority (e.g., rand.Int31()) will usually work well,\n\/\/ but advanced users may consider using non-random item priorities\n\/\/ at the risk of unbalancing the lookup tree. The input Item instance\n\/\/ should be considered immutable and owned by the Collection.\nfunc (t *Collection) SetItem(item *Item) (err error) {\n\tif item.Key == nil || len(item.Key) > 0xffff || len(item.Key) == 0 ||\n\t\titem.Val == nil {\n\t\treturn errors.New(\"Item.Key\/Val missing or too long\")\n\t}\n\tif item.Priority < 0 {\n\t\treturn errors.New(\"Item.Priority must be non-negative\")\n\t}\n\trnl := t.rootAddRef()\n\tdefer t.rootDecRef(rnl)\n\troot := rnl.root\n\tn := t.mkNode(nil, nil, nil, 1, uint64(len(item.Key))+uint64(item.NumValBytes(t)))\n\tn.item.item = unsafe.Pointer(item) \/\/ Avoid garbage via separate init.\n\tnloc := t.mkNodeLoc(n)\n\tr, err := t.store.union(t, root, nloc)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !t.rootCAS(rnl, t.mkRootNodeLoc(r)) {\n\t\treturn errors.New(\"concurrent mutation attempted\")\n\t}\n\tt.rootDecRef(rnl)\n\tt.freeNodeLoc(nloc)\n\tt.reclaimNodes(n)\n\treturn nil\n}\n\n\/\/ Replace or insert an item of a given key.\nfunc (t *Collection) Set(key []byte, val []byte) error {\n\treturn t.SetItem(&Item{Key: key, Val: val, Priority: rand.Int31()})\n}\n\n\/\/ Deletes an item of a given key.\nfunc (t *Collection) Delete(key []byte) (wasDeleted bool, err error) {\n\trnl := t.rootAddRef()\n\tdefer t.rootDecRef(rnl)\n\troot := rnl.root\n\ti, err := t.GetItem(key, false)\n\tif err != nil || i == nil {\n\t\treturn false, err\n\t}\n\tleft, middle, right, err := t.store.split(t, root, key)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif middle.isEmpty() {\n\t\treturn false, fmt.Errorf(\"concurrent delete, key: %v\", key)\n\t}\n\t\/\/ TODO: Even though we markReclaimable() the middle node, there\n\t\/\/ might not be a pathway to it during reclaimation.\n\tt.markReclaimable(middle.Node())\n\tt.freeNodeLoc(middle)\n\tr, err := t.store.join(t, left, right)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !t.rootCAS(rnl, t.mkRootNodeLoc(r)) {\n\t\treturn false, errors.New(\"concurrent mutation attempted\")\n\t}\n\tt.rootDecRef(rnl)\n\tt.reclaimNodes(left.Node())\n\tt.reclaimNodes(right.Node())\n\tt.freeNodeLoc(left)\n\tt.freeNodeLoc(right)\n\treturn true, nil\n}\n\n\/\/ Retrieves the item with the \"smallest\" key.\n\/\/ The returned item should be treated as immutable.\nfunc (t *Collection) MinItem(withValue bool) (*Item, error) {\n\treturn t.store.walk(t, withValue,\n\t\tfunc(n *node) (*nodeLoc, bool) { return &n.left, true })\n}\n\n\/\/ Retrieves the item with the \"largest\" key.\n\/\/ The returned item should be treated as immutable.\nfunc (t *Collection) MaxItem(withValue bool) (*Item, error) {\n\treturn t.store.walk(t, withValue,\n\t\tfunc(n *node) (*nodeLoc, bool) { return &n.right, true })\n}\n\n\/\/ Evict some clean items found by randomly walking a tree branch.\nfunc (t *Collection) EvictSomeItems() (numEvicted uint64) {\n\tt.store.walk(t, false, func(n *node) (*nodeLoc, bool) {\n\t\tif !n.item.Loc().isEmpty() {\n\t\t\tatomic.StorePointer(&n.item.item, unsafe.Pointer(nil))\n\t\t\tnumEvicted++\n\t\t}\n\t\tnext := &n.left\n\t\tif (rand.Int() & 0x01) == 0x01 {\n\t\t\tnext = &n.right\n\t\t}\n\t\tif next.isEmpty() {\n\t\t\treturn nil, false\n\t\t}\n\t\treturn next, true\n\t})\n\treturn numEvicted\n}\n\ntype ItemVisitor func(i *Item) bool\ntype ItemVisitorEx func(i *Item, depth uint64) bool\n\n\/\/ Visit items greater-than-or-equal to the target key in ascending order.\nfunc (t *Collection) VisitItemsAscend(target []byte, withValue bool, v ItemVisitor) error {\n\treturn t.VisitItemsAscendEx(target, withValue,\n\t\tfunc(i *Item, depth uint64) bool { return v(i) })\n}\n\n\/\/ Visit items less-than the target key in descending order.\nfunc (t *Collection) VisitItemsDescend(target []byte, withValue bool, v ItemVisitor) error {\n\treturn t.VisitItemsDescendEx(target, withValue,\n\t\tfunc(i *Item, depth uint64) bool { return v(i) })\n}\n\n\/\/ Visit items greater-than-or-equal to the target key in ascending order; with depth info.\nfunc (t *Collection) VisitItemsAscendEx(target []byte, withValue bool,\n\tvisitor ItemVisitorEx) error {\n\trnl := t.rootAddRef()\n\tdefer t.rootDecRef(rnl)\n\t_, err := t.store.visitNodes(t, rnl.root,\n\t\ttarget, withValue, visitor, 0, ascendChoice)\n\treturn err\n}\n\n\/\/ Visit items less-than the target key in descending order; with depth info.\nfunc (t *Collection) VisitItemsDescendEx(target []byte, withValue bool,\n\tvisitor ItemVisitorEx) error {\n\trnl := t.rootAddRef()\n\tdefer t.rootDecRef(rnl)\n\t_, err := t.store.visitNodes(t, rnl.root,\n\t\ttarget, withValue, visitor, 0, descendChoice)\n\treturn err\n}\n\nfunc ascendChoice(cmp int, n *node) (bool, *nodeLoc, *nodeLoc) {\n\treturn cmp <= 0, &n.left, &n.right\n}\n\nfunc descendChoice(cmp int, n *node) (bool, *nodeLoc, *nodeLoc) {\n\treturn cmp > 0, &n.right, &n.left\n}\n\n\/\/ Returns total number of items and total key bytes plus value bytes.\nfunc (t *Collection) GetTotals() (numItems uint64, numBytes uint64, err error) {\n\trnl := t.rootAddRef()\n\tdefer t.rootDecRef(rnl)\n\tn := rnl.root\n\tnNode, err := n.read(t.store)\n\tif err != nil || n.isEmpty() || nNode == nil {\n\t\treturn 0, 0, err\n\t}\n\treturn nNode.numNodes, nNode.numBytes, nil\n}\n\n\/\/ Returns JSON representation of root node file location.\nfunc (t *Collection) MarshalJSON() ([]byte, error) {\n\trnl := t.rootAddRef()\n\tdefer t.rootDecRef(rnl)\n\tloc := rnl.root.Loc()\n\tif loc.isEmpty() {\n\t\treturn json.Marshal(ploc_empty)\n\t}\n\treturn json.Marshal(loc)\n}\n\n\/\/ Unmarshals JSON representation of root node file location.\nfunc (t *Collection) UnmarshalJSON(d []byte) error {\n\tp := ploc{}\n\tif err := json.Unmarshal(d, &p); err != nil {\n\t\treturn err\n\t}\n\tif t.rootLock == nil {\n\t\tt.rootLock = &sync.Mutex{}\n\t}\n\tnloc := t.mkNodeLoc(nil)\n\tnloc.loc = unsafe.Pointer(&p)\n\tif !t.rootCAS(nil, t.mkRootNodeLoc(nloc)) {\n\t\treturn errors.New(\"concurrent mutation during UnmarshalJSON().\")\n\t}\n\treturn nil\n}\n\n\/\/ Writes dirty items of a collection BUT (WARNING) does NOT write new\n\/\/ root records. Use Store.Flush() to write root records, which would\n\/\/ make these writes visible to the next file re-opening\/re-loading.\nfunc (t *Collection) Write() (err error) {\n\trnl := t.rootAddRef()\n\tdefer t.rootDecRef(rnl)\n\troot := rnl.root\n\tif err = t.flushItems(root); err != nil {\n\t\treturn err\n\t}\n\tif err = t.store.flushNodes(root); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Assumes that the caller serializes invocations w.r.t. mutations.\nfunc (t *Collection) Stats() CollectionStats {\n\treturn t.stats\n}\n\nfunc (t *Collection) flushItems(nloc *nodeLoc) (err error) {\n\tif nloc == nil || !nloc.Loc().isEmpty() {\n\t\treturn nil \/\/ Flush only unpersisted items of non-empty, unpersisted nodes.\n\t}\n\tnode := nloc.Node()\n\tif node == nil {\n\t\treturn nil\n\t}\n\tif err = t.flushItems(&node.left); err != nil {\n\t\treturn err\n\t}\n\tif err = node.item.write(t); err != nil { \/\/ Write items in key order.\n\t\treturn err\n\t}\n\treturn t.flushItems(&node.right)\n}\n\nfunc (t *Collection) rootCAS(prev, next *rootNodeLoc) bool {\n\tt.rootLock.Lock()\n\tdefer t.rootLock.Unlock()\n\n\tif t.root != prev {\n\t\treturn false\n\t}\n\tt.root = next\n\treturn true\n}\n\nfunc (t *Collection) rootAddRef() *rootNodeLoc {\n\tt.rootLock.Lock()\n\tdefer t.rootLock.Unlock()\n\n\tt.root.refs++\n\treturn t.root\n}\n\nfunc (t *Collection) rootDecRef(r *rootNodeLoc) {\n\tt.rootLock.Lock()\n\tr.refs--\n\tif r.refs > 0 {\n\t\tt.rootLock.Unlock()\n\t\treturn\n\t}\n\tt.rootLock.Unlock()\n\n\tif r.root.isEmpty() {\n\t\treturn\n\t}\n\tt.reclaimNodes(r.root.Node())\n\tt.freeNodeLoc(r.root)\n\tt.freeRootNodeLoc(r)\n}\n<|endoftext|>"} {"text":"<commit_before>package tesls\n\nimport (\n\t\"go\/ast\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"golang.org\/x\/tools\/go\/types\"\n)\n\n\/\/ Test describes a single test found in the *_test.go file\ntype Test struct {\n\tName string `json:\"name\"`\n\tFile string `json:\"file\"`\n\tPkg string `json:\"pkg\"`\n}\n\n\/\/ Format returns Test struct as a string in the given format.\n\/\/ It can be any string in which %P will turn into package name,\n\/\/ %T into test name, and %F into file path in which test was found.\n\/\/ Note that it is not required to use all three placeholders.\nfunc (t *Test) Format(format string) string {\n\treturn strings.NewReplacer(\"%P\", t.Pkg, \"%T\", t.Name, \"%F\", t.File).Replace(format)\n}\n\n\/\/ String returns a string representation of the Test\n\/\/ in the form of 'package.Test filename'\nfunc (t *Test) String() string {\n\treturn t.Format(\"%P.%T %F\")\n}\n\n\/\/ TestSlice attaches the methods of sort.Interface to []Test.\n\/\/ Sorting in increasing order comparing package+testname.\ntype TestSlice []Test\n\nfunc (s TestSlice) Len() int { return len(s) }\nfunc (s TestSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\nfunc (s TestSlice) Less(i, j int) bool { return s[i].Pkg+s[i].Name < s[j].Pkg+s[j].Name }\n\n\/\/ Sort is a convenience method.\nfunc (s TestSlice) Sort() { sort.Sort(s) }\n\nfunc filter(fi os.FileInfo) bool {\n\treturn strings.HasSuffix(fi.Name(), \"_test.go\")\n}\n\nfunc isTest(fdecl *ast.FuncDecl) bool {\n\treturn strings.HasPrefix(fdecl.Name.String(), \"Test\") &&\n\t\tfdecl.Type != nil &&\n\t\tfdecl.Type.Params != nil &&\n\t\tlen(fdecl.Type.Params.List) == 1 &&\n\t\ttypes.ExprString(fdecl.Type.Params.List[0].Type) == \"*testing.T\"\n}\n\n\/\/ Tests function searches for test function declarations in the given directory.\nfunc Tests(dir string) (tests TestSlice, err error) {\n\tfset := token.NewFileSet()\n\tpkgs, err := parser.ParseDir(fset, dir, filter, parser.Mode(0))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, pkg := range pkgs {\n\t\tfor filename, file := range pkg.Files {\n\t\t\tfor _, decl := range file.Decls {\n\t\t\t\tfdecl, ok := decl.(*ast.FuncDecl)\n\t\t\t\tif ok && isTest(fdecl) {\n\t\t\t\t\ttests = append(tests, Test{\n\t\t\t\t\t\tName: fdecl.Name.String(),\n\t\t\t\t\t\tFile: filename,\n\t\t\t\t\t\tPkg: pkg.Name,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn tests, nil\n}\n<commit_msg>tesls: build tags should not be ignored<commit_after>package tesls\n\nimport (\n\t\"go\/ast\"\n\t\"go\/build\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"golang.org\/x\/tools\/go\/types\"\n)\n\n\/\/ Test describes a single test found in the *_test.go file\ntype Test struct {\n\tName string `json:\"name\"`\n\tFile string `json:\"file\"`\n\tPkg string `json:\"pkg\"`\n}\n\n\/\/ Format returns Test struct as a string in the given format.\n\/\/ It can be any string in which %P will turn into package name,\n\/\/ %T into test name, and %F into file path in which test was found.\n\/\/ Note that it is not required to use all three placeholders.\nfunc (t *Test) Format(format string) string {\n\treturn strings.NewReplacer(\"%P\", t.Pkg, \"%T\", t.Name, \"%F\", t.File).Replace(format)\n}\n\n\/\/ String returns a string representation of the Test\n\/\/ in the form of 'package.Test filename'\nfunc (t *Test) String() string {\n\treturn t.Format(\"%P.%T %F\")\n}\n\n\/\/ TestSlice attaches the methods of sort.Interface to []Test.\n\/\/ Sorting in increasing order comparing package+testname.\ntype TestSlice []Test\n\nfunc (s TestSlice) Len() int { return len(s) }\nfunc (s TestSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\nfunc (s TestSlice) Less(i, j int) bool { return s[i].Pkg+s[i].Name < s[j].Pkg+s[j].Name }\n\n\/\/ Sort is a convenience method.\nfunc (s TestSlice) Sort() { sort.Sort(s) }\n\nfunc isTest(fdecl *ast.FuncDecl) bool {\n\treturn strings.HasPrefix(fdecl.Name.String(), \"Test\") &&\n\t\tfdecl.Type != nil &&\n\t\tfdecl.Type.Params != nil &&\n\t\tlen(fdecl.Type.Params.List) == 1 &&\n\t\ttypes.ExprString(fdecl.Type.Params.List[0].Type) == \"*testing.T\"\n}\n\nfunc isNoGoError(err error) bool {\n\t_, ok := err.(*build.NoGoError)\n\treturn ok\n}\n\n\/\/ Tests function searches for test function declarations in the given directory.\nfunc Tests(dir string) (tests TestSlice, err error) {\n\tpkg, err := build.ImportDir(dir, build.ImportMode(0))\n\tif err != nil && !isNoGoError(err) {\n\t\treturn nil, err\n\t}\n\tfset := token.NewFileSet()\n\tfor _, filename := range pkg.TestGoFiles {\n\t\tfilename = filepath.Join(dir, filename)\n\t\tf, err := parser.ParseFile(fset, filename, nil, parser.Mode(0))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, decl := range f.Decls {\n\t\t\tfdecl, ok := decl.(*ast.FuncDecl)\n\t\t\tif ok && isTest(fdecl) {\n\t\t\t\ttests = append(tests, Test{\n\t\t\t\t\tName: fdecl.Name.String(),\n\t\t\t\t\tFile: filename,\n\t\t\t\t\tPkg: pkg.Name,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\treturn tests, nil\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Instrument cloud.google.com\/go\/storage.Client methods<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Package trace defines common-use Dapper-style tracing APIs for the Go programming language.\n\/\/\n\/\/ Package trace provides a backend-agnostic APIs and various tracing providers\n\/\/ can be used with the package by importing various implementations of the Client interface.\npackage trace\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net\/http\"\n\t\"time\"\n)\n\nvar client Client\n\n\/\/ TODO(jbd): annotate with labels that can propagate, similar to OpenTracing baggage.\n\n\/\/ TODO(jbd): should we support a span to have multiple parents?\n\n\/\/ TODO(jbd): set error\/state on finish.\n\n\/\/ TODO(jbd): A big TODO, we probably don't want to set a global client.\nfunc Configure(c Client) {\n\tclient = c\n}\n\n\/\/ Span represents a work.\ntype Span struct {\n\tID []byte \/\/ represents the global identifier of the span.\n\tAnnotations map[string][]byte \/\/ annotations set on this span.\n}\n\n\/\/ Annotate allows you to attach data to a span. Key-value pairs are\n\/\/ arbitary information you want to collect in the lifetime of a span.\nfunc (s *Span) Annotate(key string, val []byte) {\n\ts.Annotations[key] = val\n}\n\n\/\/ Child creates a child span from s with the given name.\n\/\/ Created child span needs to be finished by calling\n\/\/ the finishing function.\nfunc (s *Span) Child(name string, linked ...*Span) (*Span, FinishFunc) {\n\tchild := &Span{\n\t\tID: client.NewSpan(s.ID),\n\t\tAnnotations: make(map[string][]byte),\n\t}\n\tstart := time.Now()\n\tfn := func() error {\n\t\treturn client.Finish(child.ID, name, spanIDs(linked), child.Annotations, start, time.Now())\n\t}\n\treturn child, fn\n}\n\n\/\/ ToHTTPReq injects the span information in the given request\n\/\/ and returns the modified request.\n\/\/\n\/\/ If the current client is not supporting HTTP propagation,\n\/\/ an error is returned.\nfunc (s *Span) ToHTTPReq(req *http.Request) (*http.Request, error) {\n\thc, ok := client.(HTTPCarrier)\n\tif !ok {\n\t\treturn req, errors.New(\"not supported\")\n\t}\n\terr := hc.ToReq(req, s.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}\n\n\/\/ FromHTTPReq creates a *Span from an incoming request.\n\/\/\n\/\/ An error will be returned if the current tracing client is\n\/\/ not supporting propagation via HTTP.\nfunc FromHTTPReq(req *http.Request) (*Span, error) {\n\thc, ok := client.(HTTPCarrier)\n\tif !ok {\n\t\treturn nil, errors.New(\"not supported\")\n\t}\n\tid, err := hc.FromReq(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Span{ID: id}, nil\n}\n\n\/\/ HTTPCarrier represents a mechanism that can attach the tracing\n\/\/ information into an HTTP request or extract it from one.\ntype HTTPCarrier interface {\n\tFromReq(req *http.Request) (id []byte, err error)\n\tToReq(req *http.Request, id []byte) error\n}\n\n\/\/ Client represents a client communicates with a tracing backend.\n\/\/ Tracing backends are supposed to implement the interface in order to\n\/\/ provide Go support.\n\/\/\n\/\/ A Client is an HTTPCarrier if it can propagate the tracing\n\/\/ information via an HTTP request.\n\/\/\n\/\/ If you are not a tracing provider, you will never have to interact with\n\/\/ this interface directly.\ntype Client interface {\n\tNewSpan(parent []byte) (id []byte)\n\tFinish(id []byte, name string, linked [][]byte, annotations map[string][]byte, start, end time.Time) error\n}\n\n\/\/ NewSpan creates a new root-level span.\n\/\/\n\/\/ The span must be finished when the job it represents it is finished.\nfunc NewSpan(name string, linked ...*Span) (*Span, FinishFunc) {\n\tspan := &Span{\n\t\tID: client.NewSpan(nil),\n\t\tAnnotations: make(map[string][]byte),\n\t}\n\tstart := time.Now()\n\tfn := func() error {\n\t\treturn client.Finish(span.ID, name, spanIDs(linked), span.Annotations, start, time.Now())\n\t}\n\treturn span, fn\n}\n\n\/\/ NewContext returns a context with the span in.\nfunc NewContext(ctx context.Context, span *Span) context.Context {\n\treturn context.WithValue(ctx, spanKey, span)\n}\n\n\/\/ FromContext returns a span from the given context.\nfunc FromContext(ctx context.Context) *Span {\n\treturn ctx.Value(spanKey).(*Span)\n}\n\nfunc spanIDs(spans []*Span) [][]byte {\n\tvar links [][]byte\n\tfor _, s := range spans {\n\t\tlinks = append(links, s.ID)\n\t}\n\treturn links\n}\n\n\/\/ FinishFunc finalizes its span.\ntype FinishFunc func() error\n\ntype contextKey struct{}\n\nvar spanKey = contextKey{}\n<commit_msg>noop when span is nil<commit_after>\/\/ Package trace defines common-use Dapper-style tracing APIs for the Go programming language.\n\/\/\n\/\/ Package trace provides a backend-agnostic APIs and various tracing providers\n\/\/ can be used with the package by importing various implementations of the Client interface.\npackage trace\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net\/http\"\n\t\"time\"\n)\n\nvar client Client\n\n\/\/ TODO(jbd): annotate with labels that can propagate, similar to OpenTracing baggage.\n\n\/\/ TODO(jbd): should we support a span to have multiple parents?\n\n\/\/ TODO(jbd): set error\/state on finish.\n\n\/\/ TODO(jbd): Avoid things if client = nil.\n\n\/\/ TODO(jbd): A big TODO, we probably don't want to set a global client.\nfunc Configure(c Client) {\n\tclient = c\n}\n\n\/\/ Span represents a work.\ntype Span struct {\n\tID []byte \/\/ represents the global identifier of the span.\n\tAnnotations map[string][]byte \/\/ annotations set on this span.\n}\n\n\/\/ Annotate allows you to attach data to a span. Key-value pairs are\n\/\/ arbitary information you want to collect in the lifetime of a span.\nfunc (s *Span) Annotate(key string, val []byte) {\n\tif s == nil {\n\t\treturn\n\t}\n\ts.Annotations[key] = val\n}\n\n\/\/ Child creates a child span from s with the given name.\n\/\/ Created child span needs to be finished by calling\n\/\/ the finishing function.\nfunc (s *Span) Child(name string, linked ...*Span) (*Span, FinishFunc) {\n\tif s == nil {\n\t\treturn nil, noop\n\t}\n\tchild := &Span{\n\t\tID: client.NewSpan(s.ID),\n\t\tAnnotations: make(map[string][]byte),\n\t}\n\tstart := time.Now()\n\tfn := func() error {\n\t\treturn client.Finish(child.ID, name, spanIDs(linked), child.Annotations, start, time.Now())\n\t}\n\treturn child, fn\n}\n\n\/\/ ToHTTPReq injects the span information in the given request\n\/\/ and returns the modified request.\n\/\/\n\/\/ If the current client is not supporting HTTP propagation,\n\/\/ an error is returned.\nfunc (s *Span) ToHTTPReq(req *http.Request) (*http.Request, error) {\n\tif s == nil {\n\t\treturn req, nil\n\t}\n\thc, ok := client.(HTTPCarrier)\n\tif !ok {\n\t\treturn req, errors.New(\"not supported\")\n\t}\n\terr := hc.ToReq(req, s.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}\n\n\/\/ FromHTTPReq creates a *Span from an incoming request.\n\/\/\n\/\/ An error will be returned if the current tracing client is\n\/\/ not supporting propagation via HTTP.\nfunc FromHTTPReq(req *http.Request) (*Span, error) {\n\thc, ok := client.(HTTPCarrier)\n\tif !ok {\n\t\treturn nil, errors.New(\"not supported\")\n\t}\n\tid, err := hc.FromReq(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Span{ID: id}, nil\n}\n\n\/\/ HTTPCarrier represents a mechanism that can attach the tracing\n\/\/ information into an HTTP request or extract it from one.\ntype HTTPCarrier interface {\n\tFromReq(req *http.Request) (id []byte, err error)\n\tToReq(req *http.Request, id []byte) error\n}\n\n\/\/ Client represents a client communicates with a tracing backend.\n\/\/ Tracing backends are supposed to implement the interface in order to\n\/\/ provide Go support.\n\/\/\n\/\/ A Client is an HTTPCarrier if it can propagate the tracing\n\/\/ information via an HTTP request.\n\/\/\n\/\/ If you are not a tracing provider, you will never have to interact with\n\/\/ this interface directly.\ntype Client interface {\n\tNewSpan(parent []byte) (id []byte)\n\tFinish(id []byte, name string, linked [][]byte, annotations map[string][]byte, start, end time.Time) error\n}\n\n\/\/ NewSpan creates a new root-level span.\n\/\/\n\/\/ The span must be finished when the job it represents it is finished.\nfunc NewSpan(name string, linked ...*Span) (*Span, FinishFunc) {\n\tspan := &Span{\n\t\tID: client.NewSpan(nil),\n\t\tAnnotations: make(map[string][]byte),\n\t}\n\tstart := time.Now()\n\tfn := func() error {\n\t\treturn client.Finish(span.ID, name, spanIDs(linked), span.Annotations, start, time.Now())\n\t}\n\treturn span, fn\n}\n\n\/\/ NewContext returns a context with the span in.\nfunc NewContext(ctx context.Context, span *Span) context.Context {\n\treturn context.WithValue(ctx, spanKey, span)\n}\n\n\/\/ FromContext returns a span from the given context.\nfunc FromContext(ctx context.Context) *Span {\n\treturn ctx.Value(spanKey).(*Span)\n}\n\nfunc spanIDs(spans []*Span) [][]byte {\n\tvar links [][]byte\n\tfor _, s := range spans {\n\t\tlinks = append(links, s.ID)\n\t}\n\treturn links\n}\n\n\/\/ FinishFunc finalizes its span.\ntype FinishFunc func() error\n\ntype contextKey struct{}\n\nvar spanKey = contextKey{}\n\nfunc noop() error { return nil }\n<|endoftext|>"} {"text":"<commit_before>package ts\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\/\/ \"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"regifted\/data\"\n)\n\nconst TS_PACKET_SIZE = 188\nconst PACKET_TYPE_ERROR = 0\nconst PACKET_TYPE_PAT = 2\nconst PACKET_TYPE_PCR = 3\nconst PACKET_TYPE_PES = 4\nconst PACKET_TYPE_ES = 1\nconst PACKET_TYPE_PMT = 5\nconst PACKET_TYPE_PROGRAM = 6\nconst PACKET_TYPE_TS = 7\n\ntype TSState struct {\n\tglobals_initialized bool\n\t\/\/ the keys in these maps are pids\n\tpesCollector map[uint]Pes\n\tpmtConstructors map[uint]Pmt\n\tentryConstructors map[uint]PmtEntry\n\telementaryConstructors map[uint]ElementaryStreamPacket\n\ttypes map[uint]uint\n\tpat Pat\n\n\tbytes []byte\n\treader *data.Reader\n\tpcr uint\n\n\n\t\/\/ pes.streamtype -> pes[]\n\tpesMap map[uint][]Pes\n}\n\n\/\/ this is still global state - it's a temporary step in-between\nvar state TSState\n\nfunc Load(bytes []byte) *TSState {\n\tfmt.Println( \"load()\" )\n\tvar state2 *TSState\n\tstate2 = &TSState{}\n\tstate2.bytes = bytes\n\tstate2.reader = data.NewReader(bytes)\n\tstate2.main()\n\tstate = *state2\n\treturn state2\n}\n\nfunc (state *TSState) main() {\n\treader := state.reader\n\tbytes := state.bytes\n\n\trc := Init()\n\tif rc != true {\n\t\tlog.Printf(\"could not initialize global state\\n\")\n\t\tos.Exit(71)\n\t}\n\tfmt.Println(\"Size: \", len(bytes))\n\ts := uint64(len(bytes))\n\n\tfor reader.Cursor < s {\n\t\tvar pesData *Pes\n\t\tbyteChunk := reader.ReadBytes(TS_PACKET_SIZE)\n\t\ttsPacket := TsPacket{}\n\t\ttsPacket.byteChunk = byteChunk\n\t\tpacketType, packetReader := tsPacket.Read()\n\n\t\tswitch {\n\t\tcase packetType == PACKET_TYPE_PAT:\n\t\t\treadPat(&tsPacket, packetReader)\n\n\t\tcase packetType == PACKET_TYPE_PMT:\n\t\t\treadPMT(&tsPacket, packetReader)\n\n\t\tcase packetType == PACKET_TYPE_ES:\n\t\t\tpesData = readES(&tsPacket, packetReader)\n\n\t\t\tif pesData != nil{\n\t\t\t\tif state.pesMap[pesData.streamType] != nil{\n\t\t\t\t\tstate.pesMap[pesData.streamType] = make([]Pes, 1, 1)\n\n\t\t\t\t}\n\n\t\t\t\tstate.pesMap[pesData.streamType] = append(state.pesMap[pesData.streamType], *pesData)\n\n\t\t\t}\n\t\t}\n\n\t\tif tsPacket.hasAdaptation && tsPacket.adaptation.hasPCR {\n\t\t\tstate.pcr = tsPacket.adaptation.pcr.pcr\n\t\t}\n\t}\n\n\tfor key := range state.pesCollector {\n\t\tstate.CreateAndDispensePes(key, state.types[key])\n\t}\n\n}\n\n\/\/CreateAndDispensePes\n\/\/Dump the remaining PES\nfunc (state *TSState) CreateAndDispensePes(pid uint, streamType uint) {\n\tpes := state.pesCollector[pid]\n\tpes.pid = pid\n\tpes.streamType = streamType\n\tpes.Read()\n\tpes.Print()\n}\n\nfunc readPat(tsPacket *TsPacket, reader *data.Reader) {\n\tstate.pat.byteChunk = reader.ReadBytes(reader.Size - reader.Cursor)\n\tstate.pat.unitStart = tsPacket.unitStart\n\tstate.pat.Read()\n}\n\nfunc readPMT(tsPacket *TsPacket, reader *data.Reader) {\n\tpmt, _ := state.pmtConstructors[tsPacket.pid]\n\tpmt.unitStart = tsPacket.unitStart\n\tpmt.byteChunk = reader.ReadBytes(reader.Size - reader.Cursor)\n\tpmt.Read()\n}\n\nfunc readES(tsPacket *TsPacket, reader *data.Reader) *Pes {\n\tvar pesData *Pes\n\telementaryStreamPacket, _ := state.elementaryConstructors[tsPacket.pid]\n\telementaryStreamPacket.pid = tsPacket.pid\n\telementaryStreamPacket.unitStart = tsPacket.unitStart\n\n\tif tsPacket.hasAdaptation {\n\t\telementaryStreamPacket.payload = tsPacket.adaptation.payload\n\t} else {\n\t\telementaryStreamPacket.payload = reader.ReadBytes(reader.Size - reader.Cursor)\n\t}\n\n\tpesData = elementaryStreamPacket.Dispatch()\n\telementaryStreamPacket.Print()\n\treturn pesData\n}\n\n\/\/ todo( mathew guest ) I think golang wants to use error as return codes but\n\/\/ it's a little slow so I'm cheating\nfunc getFilepath() (string, int) {\n\tflag.Parse()\n\targc := flag.NArg()\n\tif argc < 1 {\n\t\tlog.Printf(\"Usage: \" + os.Args[0] + \" [input ts file]\\n\")\n\t\treturn \"\", 66\n\t}\n\tif argc > 1 {\n\t\tlog.Printf(\"Ignoring all but first argument.\\n\")\n\t\tos.Exit(1)\n\t}\n\tfileName := os.Args[1]\n\treturn fileName, 0\n}\n\n\/\/Init\n\/\/Initialize the constructors\nfunc Init() bool {\n\tif state.globals_initialized == true {\n\t\tlog.Printf(\"EE attempted to initialize globals twice\\n\")\n\t\treturn false\n\t}\n\tstate.pmtConstructors = make(map[uint]Pmt)\n\tstate.entryConstructors = make(map[uint]PmtEntry)\n\tstate.types = make(map[uint]uint)\n\tstate.pesCollector = make(map[uint]Pes)\n\tstate.elementaryConstructors = make(map[uint]ElementaryStreamPacket)\n\tstate.pat = Pat{}\n\tstate.pat.tableId = 0\n\tstate.globals_initialized = true\n\treturn true\n}\n\nfunc DeleteState() {\n\tif state.globals_initialized == false {\n\t\treturn\n\t}\n\tstate.globals_initialized = false\n\tInit()\n\tstate.globals_initialized = false\n}\n\nfunc ReadHeaderData(bytes []byte) uint {\n\treader := data.NewReader(bytes)\n\tvar a uint = (reader.Read(1) >> 1) & 0x07\n\tvar b uint = reader.Read(1)\n\tvar c uint = (reader.Read(1) >> 1) & 0x7f\n\tvar d uint = reader.Read(1)\n\tvar e uint = (reader.Read(1) >> 1) & 0x7f\n\tvar timestamp uint = (a << 30) | (b << 22) | (c << 15) | (d << 7) | e\n\treturn timestamp\n}\n<commit_msg>added missing constructor for pes buff map<commit_after>package ts\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\/\/ \"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"regifted\/data\"\n)\n\nconst TS_PACKET_SIZE = 188\nconst PACKET_TYPE_ERROR = 0\nconst PACKET_TYPE_PAT = 2\nconst PACKET_TYPE_PCR = 3\nconst PACKET_TYPE_PES = 4\nconst PACKET_TYPE_ES = 1\nconst PACKET_TYPE_PMT = 5\nconst PACKET_TYPE_PROGRAM = 6\nconst PACKET_TYPE_TS = 7\n\ntype TSState struct {\n\tglobals_initialized bool\n\t\/\/ the keys in these maps are pids\n\tpesCollector map[uint]Pes\n\tpmtConstructors map[uint]Pmt\n\tentryConstructors map[uint]PmtEntry\n\telementaryConstructors map[uint]ElementaryStreamPacket\n\ttypes map[uint]uint\n\tpat Pat\n\n\tbytes []byte\n\treader *data.Reader\n\tpcr uint\n\n\n\t\/\/ pes.streamtype -> pes[]\n\tpesMap map[uint][]Pes\n}\n\n\/\/ this is still global state - it's a temporary step in-between\nvar state TSState\n\nfunc Load(bytes []byte) *TSState {\n\tfmt.Println( \"load()\" )\n\n\tstate = TSState{}\n\tvar state2 *TSState\n\tstate2 = &state\n\tstate2.bytes = bytes\n\tstate2.reader = data.NewReader(bytes)\n\tstate2.main()\n\treturn state2\n}\n\nfunc (state *TSState) main() {\n\treader := state.reader\n\tbytes := state.bytes\n\n\trc := Init()\n\tif rc != true {\n\t\tlog.Printf(\"could not initialize global state\\n\")\n\t\tos.Exit(71)\n\t}\n\tfmt.Println(\"Size: \", len(bytes))\n\ts := uint64(len(bytes))\n\n\tfor reader.Cursor < s {\n\t\tvar pesData *Pes\n\t\tbyteChunk := reader.ReadBytes(TS_PACKET_SIZE)\n\t\ttsPacket := TsPacket{}\n\t\ttsPacket.byteChunk = byteChunk\n\t\tpacketType, packetReader := tsPacket.Read()\n\n\t\tswitch {\n\t\tcase packetType == PACKET_TYPE_PAT:\n\t\t\treadPat(&tsPacket, packetReader)\n\n\t\tcase packetType == PACKET_TYPE_PMT:\n\t\t\treadPMT(&tsPacket, packetReader)\n\n\t\tcase packetType == PACKET_TYPE_ES:\n\t\t\tpesData = readES(&tsPacket, packetReader)\n\n\t\t\tif pesData != nil{\n\t\t\t\tif state.pesMap[pesData.streamType] != nil{\n\t\t\t\t\tstate.pesMap[pesData.streamType] = make([]Pes, 1, 1)\n\n\t\t\t\t}\n\n\t\t\t\tstate.pesMap[pesData.streamType] = append(state.pesMap[pesData.streamType], *pesData)\n\n\t\t\t}\n\t\t}\n\n\t\tif tsPacket.hasAdaptation && tsPacket.adaptation.hasPCR {\n\t\t\tstate.pcr = tsPacket.adaptation.pcr.pcr\n\t\t}\n\t}\n\n\tfor key := range state.pesCollector {\n\t\tstate.CreateAndDispensePes(key, state.types[key])\n\t}\n\n}\n\n\/\/CreateAndDispensePes\n\/\/Dump the remaining PES\nfunc (state *TSState) CreateAndDispensePes(pid uint, streamType uint) {\n\tpes := state.pesCollector[pid]\n\tpes.pid = pid\n\tpes.streamType = streamType\n\tpes.Read()\n\tpes.Print()\n}\n\nfunc readPat(tsPacket *TsPacket, reader *data.Reader) {\n\tstate.pat.byteChunk = reader.ReadBytes(reader.Size - reader.Cursor)\n\tstate.pat.unitStart = tsPacket.unitStart\n\tstate.pat.Read()\n}\n\nfunc readPMT(tsPacket *TsPacket, reader *data.Reader) {\n\tpmt, _ := state.pmtConstructors[tsPacket.pid]\n\tpmt.unitStart = tsPacket.unitStart\n\tpmt.byteChunk = reader.ReadBytes(reader.Size - reader.Cursor)\n\tpmt.Read()\n}\n\nfunc readES(tsPacket *TsPacket, reader *data.Reader) *Pes {\n\tvar pesData *Pes\n\telementaryStreamPacket, _ := state.elementaryConstructors[tsPacket.pid]\n\telementaryStreamPacket.pid = tsPacket.pid\n\telementaryStreamPacket.unitStart = tsPacket.unitStart\n\n\tif tsPacket.hasAdaptation {\n\t\telementaryStreamPacket.payload = tsPacket.adaptation.payload\n\t} else {\n\t\telementaryStreamPacket.payload = reader.ReadBytes(reader.Size - reader.Cursor)\n\t}\n\n\tpesData = elementaryStreamPacket.Dispatch()\n\telementaryStreamPacket.Print()\n\treturn pesData\n}\n\n\/\/ todo( mathew guest ) I think golang wants to use error as return codes but\n\/\/ it's a little slow so I'm cheating\nfunc getFilepath() (string, int) {\n\tflag.Parse()\n\targc := flag.NArg()\n\tif argc < 1 {\n\t\tlog.Printf(\"Usage: \" + os.Args[0] + \" [input ts file]\\n\")\n\t\treturn \"\", 66\n\t}\n\tif argc > 1 {\n\t\tlog.Printf(\"Ignoring all but first argument.\\n\")\n\t\tos.Exit(1)\n\t}\n\tfileName := os.Args[1]\n\treturn fileName, 0\n}\n\n\/\/Init\n\/\/Initialize the constructors\nfunc Init() bool {\n\tif state.globals_initialized == true {\n\t\tlog.Printf(\"EE attempted to initialize globals twice\\n\")\n\t\treturn false\n\t}\n\tstate.pmtConstructors = make(map[uint]Pmt)\n\tstate.entryConstructors = make(map[uint]PmtEntry)\n\tstate.types = make(map[uint]uint)\n\tstate.pesCollector = make(map[uint]Pes)\n\tstate.elementaryConstructors = make(map[uint]ElementaryStreamPacket)\n\tstate.pat = Pat{}\n\tstate.pat.tableId = 0\n\tstate.pesMap = make(map[uint][]Pes)\n\tstate.globals_initialized = true\n\treturn true\n}\n\nfunc DeleteState() {\n\tif state.globals_initialized == false {\n\t\treturn\n\t}\n\tstate.globals_initialized = false\n\tInit()\n\tstate.globals_initialized = false\n}\n\nfunc ReadHeaderData(bytes []byte) uint {\n\treader := data.NewReader(bytes)\n\tvar a uint = (reader.Read(1) >> 1) & 0x07\n\tvar b uint = reader.Read(1)\n\tvar c uint = (reader.Read(1) >> 1) & 0x7f\n\tvar d uint = reader.Read(1)\n\tvar e uint = (reader.Read(1) >> 1) & 0x7f\n\tvar timestamp uint = (a << 30) | (b << 22) | (c << 15) | (d << 7) | e\n\treturn timestamp\n}\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport (\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ WithLogger stores the logger.\nfunc WithLogger(ctx context.Context, l logrus.FieldLogger) context.Context {\n\treturn context.WithValue(ctx, \"logger\", l)\n}\n\n\/\/ Logger returns the structured logger.\nfunc Logger(ctx context.Context) logrus.FieldLogger {\n\tl, ok := ctx.Value(\"logger\").(logrus.FieldLogger)\n\tif !ok {\n\t\treturn logrus.StandardLogger()\n\t}\n\treturn l\n}\n<commit_msg>Fixed tests, added context to more methods and added some better logger\/context handling.<commit_after>package common\n\nimport (\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ WithLogger stores the logger.\nfunc WithLogger(ctx context.Context, l logrus.FieldLogger) context.Context {\n\treturn context.WithValue(ctx, \"logger\", l)\n}\n\n\/\/ Logger returns the structured logger.\nfunc Logger(ctx context.Context) logrus.FieldLogger {\n\tl, ok := ctx.Value(\"logger\").(logrus.FieldLogger)\n\tif !ok {\n\t\treturn logrus.StandardLogger()\n\t}\n\treturn l\n}\n\n\/\/ Attempt at simplifying this whole logger in the context thing\n\/\/ Could even make this take a generic map, then the logger that gets returned could be used just like the stdlib too, since it's compatible\nfunc LoggerWithFields(ctx context.Context, fields logrus.Fields) (context.Context, logrus.FieldLogger) {\n\tl := Logger(ctx)\n\tl = l.WithFields(fields)\n\tctx = WithLogger(ctx, l)\n\treturn ctx, l\n}\n<|endoftext|>"} {"text":"<commit_before>package cachet\n\nimport (\n\t\"fmt\"\n)\n\nconst (\n\t\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/component-statuses\n\n\t\/\/ ComponentStatusOperational means \"The component is working.\"\n\tComponentStatusOperational = 1\n\t\/\/ ComponentStatusPerformanceIssues means \"The component is experiencing some slowness.\"\n\tComponentStatusPerformanceIssues = 2\n\t\/\/ ComponentStatusPartialOutage means \"The component may not be working for everybody.\"\n\t\/\/ This could be a geographical issue for example.\n\tComponentStatusPartialOutage = 3\n\t\/\/ ComponentStatusMajorOutage means \"The component is not working for anybody.\"\n\tComponentStatusMajorOutage = 4\n)\n\n\/\/ ComponentsService contains REST endpoints that belongs to cachet components.\ntype ComponentsService struct {\n\tclient *Client\n}\n\n\/\/ Component entity reflects one single component\ntype Component struct {\n\tID int `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tLink string `json:\"link,omitempty\"`\n\tStatus int `json:\"status,omitempty\"`\n\tOrder int `json:\"order,omitempty\"`\n\tGroupID int `json:\"group_id,omitempty\"`\n\tCreatedAt string `json:\"created_at,omitempty\"`\n\tUpdatedAt string `json:\"updated_at,omitempty\"`\n\tDeletedAt string `json:\"deleted_at,omitempty\"`\n\tStatusName string `json:\"status_name,omitempty\"`\n}\n\n\/\/ ComponentGroup entity reflects one single component group\ntype ComponentGroup struct {\n\tID int `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tOrder int `json:\"order,omitempty\"`\n\tCreatedAt string `json:\"created_at,omitempty\"`\n\tUpdatedAt string `json:\"updated_at,omitempty\"`\n}\n\n\/\/ ComponentResponse reflects the response of \/components call\ntype ComponentResponse struct {\n\tMeta Meta `json:\"meta,omitempty\"`\n\tComponents []Component `json:\"data,omitempty\"`\n}\n\n\/\/ ComponentGroupResponse reflects the response of \/components\/groups call\ntype ComponentGroupResponse struct {\n\tMeta Meta `json:\"meta,omitempty\"`\n\tComponentGroups []ComponentGroup `json:\"data,omitempty\"`\n}\n\n\/\/ componentApiResponse is an internal type to hide\n\/\/ some the \"data\" nested level from the API.\n\/\/ Some calls (e.g. Get or Create) return the component in the \"data\" key.\ntype componentAPIResponse struct {\n\tData *Component `json:\"data\"`\n}\n\n\/\/ componentGroupAPIResponse is an internal type to hide\n\/\/ some the \"data\" nested level from the API.\n\/\/ Some calls (e.g. Get or Create) return the component group in the \"data\" key.\ntype componentGroupAPIResponse struct {\n\tData *ComponentGroup `json:\"data\"`\n}\n\n\/\/ GetAll return all components that have been created.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/get-components\nfunc (s *ComponentsService) GetAll() (*ComponentResponse, *Response, error) {\n\tu := \"api\/v1\/components\"\n\tv := new(ComponentResponse)\n\n\tresp, err := s.client.Call(\"GET\", u, nil, v)\n\treturn v, resp, err\n}\n\n\/\/ Get return a single component.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/get-a-component\nfunc (s *ComponentsService) Get(id int) (*Component, *Response, error) {\n\tu := fmt.Sprintf(\"api\/v1\/components\/%d\", id)\n\tv := new(componentAPIResponse)\n\n\tresp, err := s.client.Call(\"GET\", u, nil, v)\n\treturn v.Data, resp, err\n}\n\n\/\/ Create a new component.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/components\nfunc (s *ComponentsService) Create(c *Component) (*Component, *Response, error) {\n\tu := \"api\/v1\/components\"\n\tv := new(componentAPIResponse)\n\n\tresp, err := s.client.Call(\"POST\", u, c, v)\n\treturn v.Data, resp, err\n}\n\n\/\/ Update updates a component.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/update-a-component\nfunc (s *ComponentsService) Update(id int, c *Component) (*Component, *Response, error) {\n\tu := fmt.Sprintf(\"api\/v1\/components\/%d\", id)\n\tv := new(componentAPIResponse)\n\n\tresp, err := s.client.Call(\"PUT\", u, c, v)\n\treturn v.Data, resp, err\n}\n\n\/\/ Delete deletes a component.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/delete-a-component\nfunc (s *ComponentsService) Delete(id int) (*Response, error) {\n\tu := fmt.Sprintf(\"api\/v1\/components\/%d\", id)\n\n\tresp, err := s.client.Call(\"DELETE\", u, nil, nil)\n\treturn resp, err\n}\n\n\/\/ GetAllGroups return all component groups that have been created.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/get-componentgroups\nfunc (s *ComponentsService) GetAllGroups() (*ComponentGroupResponse, *Response, error) {\n\tu := \"api\/v1\/components\/groups\"\n\tv := new(ComponentGroupResponse)\n\n\tresp, err := s.client.Call(\"GET\", u, nil, v)\n\treturn v, resp, err\n}\n\n\/\/ GetGroup return a single component group.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/get-a-component-group\nfunc (s *ComponentsService) GetGroup(id int) (*ComponentGroup, *Response, error) {\n\tu := fmt.Sprintf(\"api\/v1\/components\/groups\/%d\", id)\n\tv := new(componentGroupAPIResponse)\n\n\tresp, err := s.client.Call(\"GET\", u, nil, v)\n\treturn v.Data, resp, err\n}\n\n\/\/ CreateGroup creates a new component group.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/post-componentgroups\nfunc (s *ComponentsService) CreateGroup(c *ComponentGroup) (*ComponentGroup, *Response, error) {\n\tu := \"api\/v1\/components\/groups\"\n\tv := new(componentGroupAPIResponse)\n\n\tresp, err := s.client.Call(\"POST\", u, c, v)\n\treturn v.Data, resp, err\n}\n\n\/\/ UpdateGroup updates a component group.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/put-component-group\nfunc (s *ComponentsService) UpdateGroup(id int, c *ComponentGroup) (*ComponentGroup, *Response, error) {\n\tu := fmt.Sprintf(\"api\/v1\/components\/groups\/%d\", id)\n\tv := new(componentGroupAPIResponse)\n\n\tresp, err := s.client.Call(\"PUT\", u, c, v)\n\treturn v.Data, resp, err\n}\n\n\/\/ DeleteGroup deletes a component group.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/delete-component-group\nfunc (s *ComponentsService) DeleteGroup(id int) (*Response, error) {\n\tu := fmt.Sprintf(\"api\/v1\/components\/groups\/%d\", id)\n\n\tresp, err := s.client.Call(\"DELETE\", u, nil, nil)\n\treturn resp, err\n}\n<commit_msg>Added Component.Enabled field<commit_after>package cachet\n\nimport (\n\t\"fmt\"\n)\n\nconst (\n\t\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/component-statuses\n\n\t\/\/ ComponentStatusOperational means \"The component is working.\"\n\tComponentStatusOperational = 1\n\t\/\/ ComponentStatusPerformanceIssues means \"The component is experiencing some slowness.\"\n\tComponentStatusPerformanceIssues = 2\n\t\/\/ ComponentStatusPartialOutage means \"The component may not be working for everybody.\"\n\t\/\/ This could be a geographical issue for example.\n\tComponentStatusPartialOutage = 3\n\t\/\/ ComponentStatusMajorOutage means \"The component is not working for anybody.\"\n\tComponentStatusMajorOutage = 4\n)\n\n\/\/ ComponentsService contains REST endpoints that belongs to cachet components.\ntype ComponentsService struct {\n\tclient *Client\n}\n\n\/\/ Component entity reflects one single component\ntype Component struct {\n\tID int `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tLink string `json:\"link,omitempty\"`\n\tStatus int `json:\"status,omitempty\"`\n\tOrder int `json:\"order,omitempty\"`\n\tEnabled bool `json:\"enabled,omitempty\"`\n\tGroupID int `json:\"group_id,omitempty\"`\n\tCreatedAt string `json:\"created_at,omitempty\"`\n\tUpdatedAt string `json:\"updated_at,omitempty\"`\n\tDeletedAt string `json:\"deleted_at,omitempty\"`\n\tStatusName string `json:\"status_name,omitempty\"`\n}\n\n\/\/ ComponentGroup entity reflects one single component group\ntype ComponentGroup struct {\n\tID int `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tOrder int `json:\"order,omitempty\"`\n\tCreatedAt string `json:\"created_at,omitempty\"`\n\tUpdatedAt string `json:\"updated_at,omitempty\"`\n}\n\n\/\/ ComponentResponse reflects the response of \/components call\ntype ComponentResponse struct {\n\tMeta Meta `json:\"meta,omitempty\"`\n\tComponents []Component `json:\"data,omitempty\"`\n}\n\n\/\/ ComponentGroupResponse reflects the response of \/components\/groups call\ntype ComponentGroupResponse struct {\n\tMeta Meta `json:\"meta,omitempty\"`\n\tComponentGroups []ComponentGroup `json:\"data,omitempty\"`\n}\n\n\/\/ componentApiResponse is an internal type to hide\n\/\/ some the \"data\" nested level from the API.\n\/\/ Some calls (e.g. Get or Create) return the component in the \"data\" key.\ntype componentAPIResponse struct {\n\tData *Component `json:\"data\"`\n}\n\n\/\/ componentGroupAPIResponse is an internal type to hide\n\/\/ some the \"data\" nested level from the API.\n\/\/ Some calls (e.g. Get or Create) return the component group in the \"data\" key.\ntype componentGroupAPIResponse struct {\n\tData *ComponentGroup `json:\"data\"`\n}\n\n\/\/ GetAll return all components that have been created.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/get-components\nfunc (s *ComponentsService) GetAll() (*ComponentResponse, *Response, error) {\n\tu := \"api\/v1\/components\"\n\tv := new(ComponentResponse)\n\n\tresp, err := s.client.Call(\"GET\", u, nil, v)\n\treturn v, resp, err\n}\n\n\/\/ Get return a single component.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/get-a-component\nfunc (s *ComponentsService) Get(id int) (*Component, *Response, error) {\n\tu := fmt.Sprintf(\"api\/v1\/components\/%d\", id)\n\tv := new(componentAPIResponse)\n\n\tresp, err := s.client.Call(\"GET\", u, nil, v)\n\treturn v.Data, resp, err\n}\n\n\/\/ Create a new component.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/components\nfunc (s *ComponentsService) Create(c *Component) (*Component, *Response, error) {\n\tu := \"api\/v1\/components\"\n\tv := new(componentAPIResponse)\n\n\tresp, err := s.client.Call(\"POST\", u, c, v)\n\treturn v.Data, resp, err\n}\n\n\/\/ Update updates a component.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/update-a-component\nfunc (s *ComponentsService) Update(id int, c *Component) (*Component, *Response, error) {\n\tu := fmt.Sprintf(\"api\/v1\/components\/%d\", id)\n\tv := new(componentAPIResponse)\n\n\tresp, err := s.client.Call(\"PUT\", u, c, v)\n\treturn v.Data, resp, err\n}\n\n\/\/ Delete deletes a component.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/delete-a-component\nfunc (s *ComponentsService) Delete(id int) (*Response, error) {\n\tu := fmt.Sprintf(\"api\/v1\/components\/%d\", id)\n\n\tresp, err := s.client.Call(\"DELETE\", u, nil, nil)\n\treturn resp, err\n}\n\n\/\/ GetAllGroups return all component groups that have been created.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/get-componentgroups\nfunc (s *ComponentsService) GetAllGroups() (*ComponentGroupResponse, *Response, error) {\n\tu := \"api\/v1\/components\/groups\"\n\tv := new(ComponentGroupResponse)\n\n\tresp, err := s.client.Call(\"GET\", u, nil, v)\n\treturn v, resp, err\n}\n\n\/\/ GetGroup return a single component group.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/get-a-component-group\nfunc (s *ComponentsService) GetGroup(id int) (*ComponentGroup, *Response, error) {\n\tu := fmt.Sprintf(\"api\/v1\/components\/groups\/%d\", id)\n\tv := new(componentGroupAPIResponse)\n\n\tresp, err := s.client.Call(\"GET\", u, nil, v)\n\treturn v.Data, resp, err\n}\n\n\/\/ CreateGroup creates a new component group.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/post-componentgroups\nfunc (s *ComponentsService) CreateGroup(c *ComponentGroup) (*ComponentGroup, *Response, error) {\n\tu := \"api\/v1\/components\/groups\"\n\tv := new(componentGroupAPIResponse)\n\n\tresp, err := s.client.Call(\"POST\", u, c, v)\n\treturn v.Data, resp, err\n}\n\n\/\/ UpdateGroup updates a component group.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/put-component-group\nfunc (s *ComponentsService) UpdateGroup(id int, c *ComponentGroup) (*ComponentGroup, *Response, error) {\n\tu := fmt.Sprintf(\"api\/v1\/components\/groups\/%d\", id)\n\tv := new(componentGroupAPIResponse)\n\n\tresp, err := s.client.Call(\"PUT\", u, c, v)\n\treturn v.Data, resp, err\n}\n\n\/\/ DeleteGroup deletes a component group.\n\/\/\n\/\/ Docs: https:\/\/docs.cachethq.io\/docs\/delete-component-group\nfunc (s *ComponentsService) DeleteGroup(id int) (*Response, error) {\n\tu := fmt.Sprintf(\"api\/v1\/components\/groups\/%d\", id)\n\n\tresp, err := s.client.Call(\"DELETE\", u, nil, nil)\n\treturn resp, err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ The googlecompute package contains a packer.Builder implementation that\n\/\/ builds images for Google Compute Engine.\npackage googlecompute\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/mitchellh\/multistep\"\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n)\n\n\/\/ The unique ID for this builder.\nconst BuilderId = \"kelseyhightower.googlecompute\"\n\n\/\/ Builder represents a Packer Builder.\ntype Builder struct {\n\tconfig config\n\trunner multistep.Runner\n}\n\n\/\/ config holds the googlecompute builder configuration settings.\ntype config struct {\n\tBucketName string `mapstructure:\"bucket_name\"`\n\tClientSecretsFile string `mapstructure:\"client_secrets_file\"`\n\tImageName string `mapstructure:\"image_name\"`\n\tImageDescription string `mapstructure:\"image_description\"`\n\tMachineType string `mapstructure:\"machine_type\"`\n\tMetadata map[string]string `mapstructure:\"metadata\"`\n\tNetwork string `mapstructure:\"network\"`\n\tPassphrase string `mapstructure:\"passphrase\"`\n\tPrivateKeyFile string `mapstructure:\"private_key_file\"`\n\tProjectId string `mapstructure:\"project_id\"`\n\tSourceImage string `mapstructure:\"source_image\"`\n\tSSHUsername string `mapstructure:\"ssh_username\"`\n\tSSHPort uint `mapstructure:\"ssh_port\"`\n\tRawSSHTimeout string `mapstructure:\"ssh_timeout\"`\n\tRawStateTimeout string `mapstructure:\"state_timeout\"`\n\tTags []string `mapstructure:\"tags\"`\n\tZone string `mapstructure:\"zone\"`\n\tclientSecrets *clientSecrets\n\tcommon.PackerConfig `mapstructure:\",squash\"`\n\tinstanceName string\n\tprivateKeyBytes []byte\n\tsshTimeout time.Duration\n\tstateTimeout time.Duration\n\ttpl *packer.ConfigTemplate\n}\n\n\/\/ Prepare processes the build configuration parameters.\nfunc (b *Builder) Prepare(raws ...interface{}) ([]string, error) {\n\t\/\/ Load the packer config.\n\tmd, err := common.DecodeConfig(&b.config, raws...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb.config.tpl, err = packer.NewConfigTemplate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb.config.tpl.UserVars = b.config.PackerUserVars\n\n\terrs := common.CheckUnusedConfig(md)\n\t\/\/ Collect errors if any.\n\tif err := common.CheckUnusedConfig(md); err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Set defaults.\n\tif b.config.Network == \"\" {\n\t\tb.config.Network = \"default\"\n\t}\n\tif b.config.ImageDescription == \"\" {\n\t\tb.config.ImageDescription = \"Created by Packer\"\n\t}\n\tif b.config.ImageName == \"\" {\n\t\t\/\/ Default to packer-{{ unix timestamp (utc) }}\n\t\tb.config.ImageName = \"packer-{{timestamp}}\"\n\t}\n\tif b.config.MachineType == \"\" {\n\t\tb.config.MachineType = \"n1-standard-1\"\n\t}\n\tif b.config.RawSSHTimeout == \"\" {\n\t\tb.config.RawSSHTimeout = \"5m\"\n\t}\n\tif b.config.RawStateTimeout == \"\" {\n\t\tb.config.RawStateTimeout = \"5m\"\n\t}\n\tif b.config.SSHUsername == \"\" {\n\t\tb.config.SSHUsername = \"root\"\n\t}\n\tif b.config.SSHPort == 0 {\n\t\tb.config.SSHPort = 22\n\t}\n\t\/\/ Process Templates\n\ttemplates := map[string]*string{\n\t\t\"bucket_name\": &b.config.BucketName,\n\t\t\"client_secrets_file\": &b.config.ClientSecretsFile,\n\t\t\"image_name\": &b.config.ImageName,\n\t\t\"image_description\": &b.config.ImageDescription,\n\t\t\"machine_type\": &b.config.MachineType,\n\t\t\"network\": &b.config.Network,\n\t\t\"passphrase\": &b.config.Passphrase,\n\t\t\"private_key_file\": &b.config.PrivateKeyFile,\n\t\t\"project_id\": &b.config.ProjectId,\n\t\t\"source_image\": &b.config.SourceImage,\n\t\t\"ssh_username\": &b.config.SSHUsername,\n\t\t\"ssh_timeout\": &b.config.RawSSHTimeout,\n\t\t\"state_timeout\": &b.config.RawStateTimeout,\n\t\t\"zone\": &b.config.Zone,\n\t}\n\tfor n, ptr := range templates {\n\t\tvar err error\n\t\t*ptr, err = b.config.tpl.Process(*ptr, nil)\n\t\tif err != nil {\n\t\t\terrs = packer.MultiErrorAppend(\n\t\t\t\terrs, fmt.Errorf(\"Error processing %s: %s\", n, err))\n\t\t}\n\t}\n\t\/\/ Process required parameters.\n\tif b.config.BucketName == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a bucket_name must be specified\"))\n\t}\n\tif b.config.ClientSecretsFile == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a client_secrets_file must be specified\"))\n\t}\n\tif b.config.PrivateKeyFile == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a private_key_file must be specified\"))\n\t}\n\tif b.config.ProjectId == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a project_id must be specified\"))\n\t}\n\tif b.config.SourceImage == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a source_image must be specified\"))\n\t}\n\tif b.config.Zone == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a zone must be specified\"))\n\t}\n\t\/\/ Process timeout settings.\n\tsshTimeout, err := time.ParseDuration(b.config.RawSSHTimeout)\n\tif err != nil {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, fmt.Errorf(\"Failed parsing ssh_timeout: %s\", err))\n\t}\n\tb.config.sshTimeout = sshTimeout\n\tstateTimeout, err := time.ParseDuration(b.config.RawStateTimeout)\n\tif err != nil {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, fmt.Errorf(\"Failed parsing state_timeout: %s\", err))\n\t}\n\tb.config.stateTimeout = stateTimeout\n\t\/\/ Load the client secrets file.\n\tcs, err := loadClientSecrets(b.config.ClientSecretsFile)\n\tif err != nil {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, fmt.Errorf(\"Failed parsing client secrets file: %s\", err))\n\t}\n\tb.config.clientSecrets = cs\n\t\/\/ Load the private key.\n\tb.config.privateKeyBytes, err = processPrivateKeyFile(b.config.PrivateKeyFile, b.config.Passphrase)\n\tif err != nil {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, fmt.Errorf(\"Failed loading private key file: %s\", err))\n\t}\n\t\/\/ Check for any errors.\n\tif errs != nil && len(errs.Errors) > 0 {\n\t\treturn nil, errs\n\t}\n\treturn nil, nil\n}\n\n\/\/ Run executes a googlecompute Packer build and returns a packer.Artifact\n\/\/ representing a GCE machine image.\nfunc (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {\n\t\/\/ Initialize the Google Compute Engine API.\n\tclient, err := New(b.config.ProjectId, b.config.Zone, b.config.clientSecrets, b.config.privateKeyBytes)\n\tif err != nil {\n\t\tlog.Println(\"Failed to create the Google Compute Engine client.\")\n\t\treturn nil, err\n\t}\n\t\/\/ Set up the state.\n\tstate := new(multistep.BasicStateBag)\n\tstate.Put(\"config\", b.config)\n\tstate.Put(\"client\", client)\n\tstate.Put(\"hook\", hook)\n\tstate.Put(\"ui\", ui)\n\t\/\/ Build the steps.\n\tsteps := []multistep.Step{\n\t\tnew(stepCreateSSHKey),\n\t\tnew(stepCreateInstance),\n\t\tnew(stepInstanceInfo),\n\t\t&common.StepConnectSSH{\n\t\t\tSSHAddress: sshAddress,\n\t\t\tSSHConfig: sshConfig,\n\t\t\tSSHWaitTimeout: 5 * time.Minute,\n\t\t},\n\t\tnew(common.StepProvision),\n\t\tnew(stepUpdateGsutil),\n\t\tnew(stepCreateImage),\n\t\tnew(stepUploadImage),\n\t\tnew(stepRegisterImage),\n\t}\n\t\/\/ Run the steps.\n\tif b.config.PackerDebug {\n\t\tb.runner = &multistep.DebugRunner{\n\t\t\tSteps: steps,\n\t\t\tPauseFn: common.MultistepDebugFn(ui),\n\t\t}\n\t} else {\n\t\tb.runner = &multistep.BasicRunner{Steps: steps}\n\t}\n\tb.runner.Run(state)\n\t\/\/ Report any errors.\n\tif rawErr, ok := state.GetOk(\"error\"); ok {\n\t\treturn nil, rawErr.(error)\n\t}\n\tif _, ok := state.GetOk(\"image_name\"); !ok {\n\t\tlog.Println(\"Failed to find image_name in state. Bug?\")\n\t\treturn nil, nil\n\t}\n\tartifact := &Artifact{\n\t\timageName: state.Get(\"image_name\").(string),\n\t\tclient: client,\n\t}\n\treturn artifact, nil\n}\n\n\/\/ Cancel.\nfunc (b *Builder) Cancel() {}\n<commit_msg>builder\/googlecompute: set ID to packer namespace now thats in core<commit_after>\/\/ The googlecompute package contains a packer.Builder implementation that\n\/\/ builds images for Google Compute Engine.\npackage googlecompute\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/mitchellh\/multistep\"\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n)\n\n\/\/ The unique ID for this builder.\nconst BuilderId = \"packer.googlecompute\"\n\n\/\/ Builder represents a Packer Builder.\ntype Builder struct {\n\tconfig config\n\trunner multistep.Runner\n}\n\n\/\/ config holds the googlecompute builder configuration settings.\ntype config struct {\n\tBucketName string `mapstructure:\"bucket_name\"`\n\tClientSecretsFile string `mapstructure:\"client_secrets_file\"`\n\tImageName string `mapstructure:\"image_name\"`\n\tImageDescription string `mapstructure:\"image_description\"`\n\tMachineType string `mapstructure:\"machine_type\"`\n\tMetadata map[string]string `mapstructure:\"metadata\"`\n\tNetwork string `mapstructure:\"network\"`\n\tPassphrase string `mapstructure:\"passphrase\"`\n\tPrivateKeyFile string `mapstructure:\"private_key_file\"`\n\tProjectId string `mapstructure:\"project_id\"`\n\tSourceImage string `mapstructure:\"source_image\"`\n\tSSHUsername string `mapstructure:\"ssh_username\"`\n\tSSHPort uint `mapstructure:\"ssh_port\"`\n\tRawSSHTimeout string `mapstructure:\"ssh_timeout\"`\n\tRawStateTimeout string `mapstructure:\"state_timeout\"`\n\tTags []string `mapstructure:\"tags\"`\n\tZone string `mapstructure:\"zone\"`\n\tclientSecrets *clientSecrets\n\tcommon.PackerConfig `mapstructure:\",squash\"`\n\tinstanceName string\n\tprivateKeyBytes []byte\n\tsshTimeout time.Duration\n\tstateTimeout time.Duration\n\ttpl *packer.ConfigTemplate\n}\n\n\/\/ Prepare processes the build configuration parameters.\nfunc (b *Builder) Prepare(raws ...interface{}) ([]string, error) {\n\t\/\/ Load the packer config.\n\tmd, err := common.DecodeConfig(&b.config, raws...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb.config.tpl, err = packer.NewConfigTemplate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb.config.tpl.UserVars = b.config.PackerUserVars\n\n\terrs := common.CheckUnusedConfig(md)\n\t\/\/ Collect errors if any.\n\tif err := common.CheckUnusedConfig(md); err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Set defaults.\n\tif b.config.Network == \"\" {\n\t\tb.config.Network = \"default\"\n\t}\n\tif b.config.ImageDescription == \"\" {\n\t\tb.config.ImageDescription = \"Created by Packer\"\n\t}\n\tif b.config.ImageName == \"\" {\n\t\t\/\/ Default to packer-{{ unix timestamp (utc) }}\n\t\tb.config.ImageName = \"packer-{{timestamp}}\"\n\t}\n\tif b.config.MachineType == \"\" {\n\t\tb.config.MachineType = \"n1-standard-1\"\n\t}\n\tif b.config.RawSSHTimeout == \"\" {\n\t\tb.config.RawSSHTimeout = \"5m\"\n\t}\n\tif b.config.RawStateTimeout == \"\" {\n\t\tb.config.RawStateTimeout = \"5m\"\n\t}\n\tif b.config.SSHUsername == \"\" {\n\t\tb.config.SSHUsername = \"root\"\n\t}\n\tif b.config.SSHPort == 0 {\n\t\tb.config.SSHPort = 22\n\t}\n\t\/\/ Process Templates\n\ttemplates := map[string]*string{\n\t\t\"bucket_name\": &b.config.BucketName,\n\t\t\"client_secrets_file\": &b.config.ClientSecretsFile,\n\t\t\"image_name\": &b.config.ImageName,\n\t\t\"image_description\": &b.config.ImageDescription,\n\t\t\"machine_type\": &b.config.MachineType,\n\t\t\"network\": &b.config.Network,\n\t\t\"passphrase\": &b.config.Passphrase,\n\t\t\"private_key_file\": &b.config.PrivateKeyFile,\n\t\t\"project_id\": &b.config.ProjectId,\n\t\t\"source_image\": &b.config.SourceImage,\n\t\t\"ssh_username\": &b.config.SSHUsername,\n\t\t\"ssh_timeout\": &b.config.RawSSHTimeout,\n\t\t\"state_timeout\": &b.config.RawStateTimeout,\n\t\t\"zone\": &b.config.Zone,\n\t}\n\tfor n, ptr := range templates {\n\t\tvar err error\n\t\t*ptr, err = b.config.tpl.Process(*ptr, nil)\n\t\tif err != nil {\n\t\t\terrs = packer.MultiErrorAppend(\n\t\t\t\terrs, fmt.Errorf(\"Error processing %s: %s\", n, err))\n\t\t}\n\t}\n\t\/\/ Process required parameters.\n\tif b.config.BucketName == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a bucket_name must be specified\"))\n\t}\n\tif b.config.ClientSecretsFile == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a client_secrets_file must be specified\"))\n\t}\n\tif b.config.PrivateKeyFile == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a private_key_file must be specified\"))\n\t}\n\tif b.config.ProjectId == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a project_id must be specified\"))\n\t}\n\tif b.config.SourceImage == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a source_image must be specified\"))\n\t}\n\tif b.config.Zone == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a zone must be specified\"))\n\t}\n\t\/\/ Process timeout settings.\n\tsshTimeout, err := time.ParseDuration(b.config.RawSSHTimeout)\n\tif err != nil {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, fmt.Errorf(\"Failed parsing ssh_timeout: %s\", err))\n\t}\n\tb.config.sshTimeout = sshTimeout\n\tstateTimeout, err := time.ParseDuration(b.config.RawStateTimeout)\n\tif err != nil {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, fmt.Errorf(\"Failed parsing state_timeout: %s\", err))\n\t}\n\tb.config.stateTimeout = stateTimeout\n\t\/\/ Load the client secrets file.\n\tcs, err := loadClientSecrets(b.config.ClientSecretsFile)\n\tif err != nil {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, fmt.Errorf(\"Failed parsing client secrets file: %s\", err))\n\t}\n\tb.config.clientSecrets = cs\n\t\/\/ Load the private key.\n\tb.config.privateKeyBytes, err = processPrivateKeyFile(b.config.PrivateKeyFile, b.config.Passphrase)\n\tif err != nil {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, fmt.Errorf(\"Failed loading private key file: %s\", err))\n\t}\n\t\/\/ Check for any errors.\n\tif errs != nil && len(errs.Errors) > 0 {\n\t\treturn nil, errs\n\t}\n\treturn nil, nil\n}\n\n\/\/ Run executes a googlecompute Packer build and returns a packer.Artifact\n\/\/ representing a GCE machine image.\nfunc (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {\n\t\/\/ Initialize the Google Compute Engine API.\n\tclient, err := New(b.config.ProjectId, b.config.Zone, b.config.clientSecrets, b.config.privateKeyBytes)\n\tif err != nil {\n\t\tlog.Println(\"Failed to create the Google Compute Engine client.\")\n\t\treturn nil, err\n\t}\n\t\/\/ Set up the state.\n\tstate := new(multistep.BasicStateBag)\n\tstate.Put(\"config\", b.config)\n\tstate.Put(\"client\", client)\n\tstate.Put(\"hook\", hook)\n\tstate.Put(\"ui\", ui)\n\t\/\/ Build the steps.\n\tsteps := []multistep.Step{\n\t\tnew(stepCreateSSHKey),\n\t\tnew(stepCreateInstance),\n\t\tnew(stepInstanceInfo),\n\t\t&common.StepConnectSSH{\n\t\t\tSSHAddress: sshAddress,\n\t\t\tSSHConfig: sshConfig,\n\t\t\tSSHWaitTimeout: 5 * time.Minute,\n\t\t},\n\t\tnew(common.StepProvision),\n\t\tnew(stepUpdateGsutil),\n\t\tnew(stepCreateImage),\n\t\tnew(stepUploadImage),\n\t\tnew(stepRegisterImage),\n\t}\n\t\/\/ Run the steps.\n\tif b.config.PackerDebug {\n\t\tb.runner = &multistep.DebugRunner{\n\t\t\tSteps: steps,\n\t\t\tPauseFn: common.MultistepDebugFn(ui),\n\t\t}\n\t} else {\n\t\tb.runner = &multistep.BasicRunner{Steps: steps}\n\t}\n\tb.runner.Run(state)\n\t\/\/ Report any errors.\n\tif rawErr, ok := state.GetOk(\"error\"); ok {\n\t\treturn nil, rawErr.(error)\n\t}\n\tif _, ok := state.GetOk(\"image_name\"); !ok {\n\t\tlog.Println(\"Failed to find image_name in state. Bug?\")\n\t\treturn nil, nil\n\t}\n\tartifact := &Artifact{\n\t\timageName: state.Get(\"image_name\").(string),\n\t\tclient: client,\n\t}\n\treturn artifact, nil\n}\n\n\/\/ Cancel.\nfunc (b *Builder) Cancel() {}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux,have_chainfs\n\npackage chainfs\n\n\/*\nextern int start_chainfs(int mode, char *mount_path);\nextern void stop_chainfs(void);\nextern int create_layer(char *id, char *parent_id);\nextern int remove_layer(char *id);\nextern int check_layer(char *id);\nextern int alloc_chainfs(char *id);\nextern int release_chainfs(char *id);\n#cgo LDFLAGS: -lfuse -lulockmgr -lchainfs\n#cgo CFLAGS: -g3\n*\/\nimport \"C\"\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/libopenstorage\/openstorage\/api\"\n\t\"github.com\/libopenstorage\/openstorage\/graph\"\n\n\t\"github.com\/docker\/docker\/daemon\/graphdriver\"\n\t\"github.com\/docker\/docker\/pkg\/archive\"\n\t\"github.com\/docker\/docker\/pkg\/chrootarchive\"\n\t\"github.com\/docker\/docker\/pkg\/directory\"\n\t\"github.com\/docker\/docker\/pkg\/idtools\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n)\n\nconst (\n\tName = \"chainfs\"\n\tType = api.Graph\n\tvirtPath = \"\/var\/lib\/openstorage\/chainfs\"\n)\n\ntype Driver struct {\n}\n\nfunc Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {\n\tlogrus.Infof(\"Initializing libchainfs at home: %s and storage: %v...\", home, virtPath)\n\n\tcVirtPath := C.CString(virtPath)\n\tgo C.start_chainfs(1, cVirtPath)\n\n\td := &Driver{}\n\n\treturn d, nil\n}\n\nfunc (d *Driver) String() string {\n\treturn \"openstorage-chainfs\"\n}\n\n\/\/ Cleanup performs necessary tasks to release resources\n\/\/ held by the driver, e.g., unmounting all layered filesystems\n\/\/ known to this driver.\nfunc (d *Driver) Cleanup() error {\n\tlogrus.Infof(\"Stopping libchainfs at %s\", virtPath)\n\tC.stop_chainfs()\n\treturn nil\n}\n\n\/\/ Status returns a set of key-value pairs which give low\n\/\/ level diagnostic status about this driver.\nfunc (d *Driver) Status() [][2]string {\n\treturn [][2]string{\n\t\t{\"OpenStorage FUSE\", \"OK\"},\n\t}\n}\n\n\/\/ Create creates a new, empty, filesystem layer with the\n\/\/ specified id and parent and mountLabel. Parent and mountLabel may be \"\".\nfunc (d *Driver) Create(id string, parent string, ml string) error {\n\tif parent != \"\" {\n\t\tlogrus.Infof(\"Creating layer %s with parent %s\", id, parent)\n\t} else {\n\t\tlogrus.Infof(\"Creating parent layer %s\", id)\n\t}\n\n\tcID := C.CString(id)\n\tcParent := C.CString(parent)\n\n\tret, err := C.create_layer(cID, cParent)\n\tif int(ret) != 0 {\n\t\tlogrus.Warnf(\"Error while creating layer %s\", id)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Remove attempts to remove the filesystem layer with this id.\nfunc (d *Driver) Remove(id string) error {\n\tlogrus.Infof(\"Removing layer %s\", id)\n\n\tcID := C.CString(id)\n\n\tret, err := C.remove_layer(cID)\n\tif int(ret) != 0 {\n\t\tlogrus.Warnf(\"Error while removing layer %s\", id)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Returns a set of key-value pairs which give low level information\n\/\/ about the image\/container driver is managing.\nfunc (d *Driver) GetMetadata(id string) (map[string]string, error) {\n\treturn nil, nil\n}\n\n\/\/ Get returns the mountpoint for the layered filesystem referred\n\/\/ to by this id. You can optionally specify a mountLabel or \"\".\n\/\/ Returns the absolute path to the mounted layered filesystem.\nfunc (d *Driver) Get(id, mountLabel string) (string, error) {\n\tcID := C.CString(id)\n\n\tret, err := C.alloc_chainfs(cID)\n\tif int(ret) != 0 {\n\t\tlogrus.Warnf(\"Error while creating a chain FS for %s\", id)\n\t\treturn \"\", err\n\t} else {\n\t\tlogrus.Debugf(\"Created a chain FS for %s\", id)\n\t\tchainPath := path.Join(virtPath, id)\n\n\t\treturn chainPath, err\n\t}\n}\n\n\/\/ Put releases the system resources for the specified id,\n\/\/ e.g, unmounting layered filesystem.\nfunc (d *Driver) Put(id string) error {\n\tlogrus.Debugf(\"Releasing chain FS for %s\", id)\n\n\tcID := C.CString(id)\n\t_, err := C.release_chainfs(cID)\n\n\treturn err\n}\n\n\/\/ Exists returns whether a filesystem layer with the specified\n\/\/ ID exists on this driver.\n\/\/ All cache entries exist.\nfunc (d *Driver) Exists(id string) bool {\n\tpath := path.Join(virtPath, id)\n\n\t_, err := os.Stat(path)\n\n\tif err == nil {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}\n\n\/\/ ApplyDiff extracts the changeset from the given diff into the\n\/\/ layer with the specified id and parent, returning the size of the\n\/\/ new layer in bytes.\n\/\/ The archive.Reader must be an uncompressed stream.\nfunc (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) {\n\tdir := path.Join(virtPath, id)\n\tif err := chrootarchive.UntarUncompressed(diff, dir, nil); err != nil {\n\t\tlogrus.Warnf(\"Error while applying diff to %s: %v\", id, err)\n\t\treturn 0, err\n\t}\n\n\t\/\/ show invalid whiteouts warning.\n\tfiles, err := ioutil.ReadDir(path.Join(dir, archive.WhiteoutLinkDir))\n\tif err == nil && len(files) > 0 {\n\t\tlogrus.Warnf(\"Archive contains aufs hardlink references that are not supported.\")\n\t}\n\n\treturn d.DiffSize(id, parent)\n}\n\n\/\/ Changes produces a list of changes between the specified layer\n\/\/ and its parent layer. If parent is \"\", then all changes will be ADD changes.\nfunc (d *Driver) Changes(id, parent string) ([]archive.Change, error) {\n\n\treturn nil, nil\n}\n\n\/\/ Diff produces an archive of the changes between the specified\n\/\/ layer and its parent layer which may be \"\".\nfunc (d *Driver) Diff(id, parent string) (archive.Archive, error) {\n\treturn archive.TarWithOptions(path.Join(virtPath, id), &archive.TarOptions{\n\t\tCompression: archive.Uncompressed,\n\t\tExcludePatterns: []string{archive.WhiteoutMetaPrefix + \"*\", \"!\" + archive.WhiteoutOpaqueDir},\n\t})\n}\n\n\/\/ DiffSize calculates the changes between the specified id\n\/\/ and its parent and returns the size in bytes of the changes\n\/\/ relative to its base filesystem directory.\nfunc (d *Driver) DiffSize(id, parent string) (size int64, err error) {\n\treturn directory.Size(path.Join(virtPath, id))\n}\n\nfunc init() {\n\tgraph.Register(Name, Init)\n}\n<commit_msg>Added some debug log messages<commit_after>\/\/ +build linux,have_chainfs\n\npackage chainfs\n\n\/*\nextern int start_chainfs(int mode, char *mount_path);\nextern void stop_chainfs(void);\nextern int create_layer(char *id, char *parent_id);\nextern int remove_layer(char *id);\nextern int check_layer(char *id);\nextern int alloc_chainfs(char *id);\nextern int release_chainfs(char *id);\n#cgo LDFLAGS: -lfuse -lulockmgr -lchainfs\n#cgo CFLAGS: -g3\n*\/\nimport \"C\"\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/libopenstorage\/openstorage\/api\"\n\t\"github.com\/libopenstorage\/openstorage\/graph\"\n\n\t\"github.com\/docker\/docker\/daemon\/graphdriver\"\n\t\"github.com\/docker\/docker\/pkg\/archive\"\n\t\"github.com\/docker\/docker\/pkg\/chrootarchive\"\n\t\"github.com\/docker\/docker\/pkg\/directory\"\n\t\"github.com\/docker\/docker\/pkg\/idtools\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n)\n\nconst (\n\tName = \"chainfs\"\n\tType = api.Graph\n\tvirtPath = \"\/var\/lib\/openstorage\/chainfs\"\n)\n\ntype Driver struct {\n}\n\nfunc Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {\n\tlogrus.Infof(\"Initializing libchainfs at home: %s and storage: %v...\", home, virtPath)\n\n\tcVirtPath := C.CString(virtPath)\n\tgo C.start_chainfs(1, cVirtPath)\n\n\td := &Driver{}\n\n\treturn d, nil\n}\n\nfunc (d *Driver) String() string {\n\treturn \"openstorage-chainfs\"\n}\n\n\/\/ Cleanup performs necessary tasks to release resources\n\/\/ held by the driver, e.g., unmounting all layered filesystems\n\/\/ known to this driver.\nfunc (d *Driver) Cleanup() error {\n\tlogrus.Infof(\"Stopping libchainfs at %s\", virtPath)\n\tC.stop_chainfs()\n\treturn nil\n}\n\n\/\/ Status returns a set of key-value pairs which give low\n\/\/ level diagnostic status about this driver.\nfunc (d *Driver) Status() [][2]string {\n\treturn [][2]string{\n\t\t{\"OpenStorage FUSE\", \"OK\"},\n\t}\n}\n\n\/\/ Create creates a new, empty, filesystem layer with the\n\/\/ specified id and parent and mountLabel. Parent and mountLabel may be \"\".\nfunc (d *Driver) Create(id string, parent string, ml string) error {\n\tif parent != \"\" {\n\t\tlogrus.Infof(\"Creating layer %s with parent %s\", id, parent)\n\t} else {\n\t\tlogrus.Infof(\"Creating parent layer %s\", id)\n\t}\n\n\tcID := C.CString(id)\n\tcParent := C.CString(parent)\n\n\tret, err := C.create_layer(cID, cParent)\n\tif int(ret) != 0 {\n\t\tlogrus.Warnf(\"Error while creating layer %s\", id)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Remove attempts to remove the filesystem layer with this id.\nfunc (d *Driver) Remove(id string) error {\n\tlogrus.Infof(\"Removing layer %s\", id)\n\n\tcID := C.CString(id)\n\n\tret, err := C.remove_layer(cID)\n\tif int(ret) != 0 {\n\t\tlogrus.Warnf(\"Error while removing layer %s\", id)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Returns a set of key-value pairs which give low level information\n\/\/ about the image\/container driver is managing.\nfunc (d *Driver) GetMetadata(id string) (map[string]string, error) {\n\treturn nil, nil\n}\n\n\/\/ Get returns the mountpoint for the layered filesystem referred\n\/\/ to by this id. You can optionally specify a mountLabel or \"\".\n\/\/ Returns the absolute path to the mounted layered filesystem.\nfunc (d *Driver) Get(id, mountLabel string) (string, error) {\n\tcID := C.CString(id)\n\n\tret, err := C.alloc_chainfs(cID)\n\tif int(ret) != 0 {\n\t\tlogrus.Warnf(\"Error while creating a chain FS for %s\", id)\n\t\treturn \"\", err\n\t} else {\n\t\tlogrus.Debugf(\"Created a chain FS for %s\", id)\n\t\tchainPath := path.Join(virtPath, id)\n\n\t\treturn chainPath, err\n\t}\n}\n\n\/\/ Put releases the system resources for the specified id,\n\/\/ e.g, unmounting layered filesystem.\nfunc (d *Driver) Put(id string) error {\n\tlogrus.Debugf(\"Releasing chain FS for %s\", id)\n\n\tcID := C.CString(id)\n\t_, err := C.release_chainfs(cID)\n\n\treturn err\n}\n\n\/\/ Exists returns whether a filesystem layer with the specified\n\/\/ ID exists on this driver.\n\/\/ All cache entries exist.\nfunc (d *Driver) Exists(id string) bool {\n\tpath := path.Join(virtPath, id)\n\n\t_, err := os.Stat(path)\n\n\tif err == nil {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}\n\n\/\/ ApplyDiff extracts the changeset from the given diff into the\n\/\/ layer with the specified id and parent, returning the size of the\n\/\/ new layer in bytes.\n\/\/ The archive.Reader must be an uncompressed stream.\nfunc (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) {\n\tdir := path.Join(virtPath, id)\n\n\tlogrus.Infof(\"Applying diff at path %s\\n\", dir)\n\n\tif err := chrootarchive.UntarUncompressed(diff, dir, nil); err != nil {\n\t\tlogrus.Warnf(\"Error while applying diff to %s: %v\", id, err)\n\t\treturn 0, err\n\t}\n\n\t\/\/ show invalid whiteouts warning.\n\tfiles, err := ioutil.ReadDir(path.Join(dir, archive.WhiteoutLinkDir))\n\tif err == nil && len(files) > 0 {\n\t\tlogrus.Warnf(\"Archive contains aufs hardlink references that are not supported.\")\n\t}\n\n\treturn d.DiffSize(id, parent)\n}\n\n\/\/ Changes produces a list of changes between the specified layer\n\/\/ and its parent layer. If parent is \"\", then all changes will be ADD changes.\nfunc (d *Driver) Changes(id, parent string) ([]archive.Change, error) {\n\n\treturn nil, nil\n}\n\n\/\/ Diff produces an archive of the changes between the specified\n\/\/ layer and its parent layer which may be \"\".\nfunc (d *Driver) Diff(id, parent string) (archive.Archive, error) {\n\treturn archive.TarWithOptions(path.Join(virtPath, id), &archive.TarOptions{\n\t\tCompression: archive.Uncompressed,\n\t\tExcludePatterns: []string{archive.WhiteoutMetaPrefix + \"*\", \"!\" + archive.WhiteoutOpaqueDir},\n\t})\n}\n\n\/\/ DiffSize calculates the changes between the specified id\n\/\/ and its parent and returns the size in bytes of the changes\n\/\/ relative to its base filesystem directory.\nfunc (d *Driver) DiffSize(id, parent string) (size int64, err error) {\n\treturn directory.Size(path.Join(virtPath, id))\n}\n\nfunc init() {\n\tgraph.Register(Name, Init)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2016 Casa Platform\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package casa defines interfaces and types for the casa home automation\n\/\/ platform. These interfaces are subject to change without notice as casa is\n\/\/ refined. Feel free to play with it now but wait until a v1 release before\n\/\/ you depend on it for anything.\npackage casa\n\nimport \"github.com\/spf13\/viper\"\n\n\/\/ Message represents an MQTT message\n\/\/ Might be better to be an interface?\ntype Message struct {\n\tTopic string\n\tPayload []byte\n\tRetain bool\n}\n\n\/\/ MessageBus defines a system for passing Messages\ntype MessageBus interface {\n\t\/\/ Shutdown the MessageBus\n\tClose() error\n\n\t\/\/ NewClient returns a client that is connected to the MessageBus\n\tNewClient() MessageClient\n}\n\n\/\/ MessageClient sends and recieves Messages to a MessageBus\ntype MessageClient interface {\n\t\/\/ Register a function to call when a message or error is received\n\tHandle(func(msg *Message, err error))\n\n\t\/\/ Publish a pre-composed message\n\tPublishMessage(message Message) error\n\n\t\/\/ Subscribes to the given topic\n\tSubscribe(topic string) error\n\n\t\/\/ Removes subscription for the topic\n\tUnsubscribe(topic string) error\n\n\t\/\/ Closes the client\n\tClose() error\n}\n\n\/\/ Service defines a type that can subscribe to the message bus and perform\n\/\/ actions.\ntype Service interface {\n\t\/\/ Start the service. Config is the config section for that service,\n\t\/\/ with the global MQTT config section appended.\n\tStart(config *viper.Viper) error\n\n\t\/\/ Specifies the logger to be used.\n\tUseLogger(logger Logger)\n\n\t\/\/ Stops the service.\n\tStop() error\n}\n\n\/\/ Logger represents a way to log error messages for the user.\ntype Logger interface {\n\tLog(a ...interface{})\n}\n<commit_msg>Fix typo<commit_after>\/\/ Copyright © 2016 Casa Platform\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package casa defines interfaces and types for the casa home automation\n\/\/ platform. These interfaces are subject to change without notice as casa is\n\/\/ refined. Feel free to play with it now but wait until a v1 release before\n\/\/ you depend on it for anything.\npackage casa\n\nimport \"github.com\/spf13\/viper\"\n\n\/\/ Message represents an MQTT message\n\/\/ Might be better to be an interface?\ntype Message struct {\n\tTopic string\n\tPayload []byte\n\tRetain bool\n}\n\n\/\/ MessageBus defines a system for passing Messages\ntype MessageBus interface {\n\t\/\/ Shutdown the MessageBus\n\tClose() error\n\n\t\/\/ NewClient returns a client that is connected to the MessageBus\n\tNewClient() MessageClient\n}\n\n\/\/ MessageClient sends and receives Messages to a MessageBus\ntype MessageClient interface {\n\t\/\/ Register a function to call when a message or error is received\n\tHandle(func(msg *Message, err error))\n\n\t\/\/ Publish a pre-composed message\n\tPublishMessage(message Message) error\n\n\t\/\/ Subscribes to the given topic\n\tSubscribe(topic string) error\n\n\t\/\/ Removes subscription for the topic\n\tUnsubscribe(topic string) error\n\n\t\/\/ Closes the client\n\tClose() error\n}\n\n\/\/ Service defines a type that can subscribe to the message bus and perform\n\/\/ actions.\ntype Service interface {\n\t\/\/ Start the service. Config is the config section for that service,\n\t\/\/ with the global MQTT config section appended.\n\tStart(config *viper.Viper) error\n\n\t\/\/ Specifies the logger to be used.\n\tUseLogger(logger Logger)\n\n\t\/\/ Stops the service.\n\tStop() error\n}\n\n\/\/ Logger represents a way to log error messages for the user.\ntype Logger interface {\n\tLog(a ...interface{})\n}\n<|endoftext|>"} {"text":"<commit_before>package stockfighter\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\ntype OrderType int\n\nconst (\n\tLimit OrderType = iota\n\tMarket\n\tFillOrKill\n\tImmediateOrCancel\n)\n\ntype Game struct {\n\tAccount string\n\tInstanceId uint64\n\tInstructions map[string]string\n\tSecondsPerTradingDay uint64\n\tTickers []string\n\tVenues []string\n}\n\ntype GameState struct {\n\tDetails struct {\n\t\tEndOfTheWorldDay uint64\n\t\tTradingDay uint64\n\t}\n\tFlash struct {\n\t\tInfo string\n\t}\n\tDone bool\n\tId uint64\n\tState string\n}\n\ntype Symbol struct {\n\tSymbol string\n\tName string\n}\n\ntype Order struct {\n\tAccount string `json:\"account\"`\n\tVenue string `json:\"venue\"`\n\tStock string `json:\"stock\"`\n\tPrice uint64 `json:\"price\"`\n\tQuantity uint64 `json:\"qty\"`\n\tDirection string `json:\"direction\"`\n\tOrderType OrderType `json:\"orderType\"`\n}\n\ntype StandingOrder struct {\n\tPrice uint64\n\tQuantity uint64 `json:\"qty\"`\n\tIsBuy bool\n}\n\ntype OrderBook struct {\n\tVenue string\n\tSymbol string\n\tAsks []StandingOrder\n\tBids []StandingOrder\n\tTimeStamp time.Time `json:\"ts\"`\n}\n\ntype Fill struct {\n\tPrice uint64\n\tQuantity uint64 `json:\"qty\"`\n\tTimeStamp time.Time `json:\"ts\"`\n}\n\ntype OrderState struct {\n\tVenue string\n\tSymbol string\n\tPrice uint64\n\tOriginalQuantity uint64 `json:\"originalQty\"`\n\tQuantity uint64 `json:\"qty\"`\n\tDirection string\n\tType OrderType\n\tId uint64\n\tAccount string\n\tTimestamp time.Time `json:\"ts\"`\n\tFills []Fill\n\tTotalFilled uint64\n\tOpen bool\n}\n\ntype Quote struct {\n\tVenue string\n\tSymbol string\n\tBid uint64\n\tBidSize uint64\n\tBidDepth uint64\n\tAsk uint64\n\tAskSize uint64\n\tAskDepth uint64\n\tLast uint64\n\tLastSize uint64\n\tLastTrade time.Time\n\tQuoteTime time.Time\n}\n\ntype Execution struct {\n\tAccount string\n\tVenue string\n\tSymbol string\n\tOrder OrderState\n\tStandingId uint64\n\tPrice uint64\n\tFilled uint64\n\tFilledAt time.Time\n\tStandingComplete bool\n\tIncomingComplete bool\n}\n\nvar orderTypes = [...]string{\n\tLimit: \"limit\",\n\tMarket: \"market\",\n\tFillOrKill: \"fill-or-kill\",\n\tImmediateOrCancel: \"immediate-or-cancel\",\n}\n\nfunc (o OrderType) MarshalText() ([]byte, error) {\n\treturn []byte(orderTypes[o]), nil\n}\n\nvar orderTypeMap = map[string]OrderType{\n\t\"limit\": Limit,\n\t\"market\": Market,\n\t\"fill-or-kill\": FillOrKill,\n\t\"immediate-or-cancel\": ImmediateOrCancel,\n}\n\nfunc (o *OrderType) UnmarshalText(text []byte) error {\n\ttyp, ok := orderTypeMap[string(text)]\n\tif ok {\n\t\t*o = typ\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"Unknown order type: %s\", text)\n}\n<commit_msg>Add OrderType String()<commit_after>package stockfighter\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\ntype OrderType int\n\nconst (\n\tLimit OrderType = iota\n\tMarket\n\tFillOrKill\n\tImmediateOrCancel\n)\n\ntype Game struct {\n\tAccount string\n\tInstanceId uint64\n\tInstructions map[string]string\n\tSecondsPerTradingDay uint64\n\tTickers []string\n\tVenues []string\n}\n\ntype GameState struct {\n\tDetails struct {\n\t\tEndOfTheWorldDay uint64\n\t\tTradingDay uint64\n\t}\n\tFlash struct {\n\t\tInfo string\n\t}\n\tDone bool\n\tId uint64\n\tState string\n}\n\ntype Symbol struct {\n\tSymbol string\n\tName string\n}\n\ntype Order struct {\n\tAccount string `json:\"account\"`\n\tVenue string `json:\"venue\"`\n\tStock string `json:\"stock\"`\n\tPrice uint64 `json:\"price\"`\n\tQuantity uint64 `json:\"qty\"`\n\tDirection string `json:\"direction\"`\n\tOrderType OrderType `json:\"orderType\"`\n}\n\ntype StandingOrder struct {\n\tPrice uint64\n\tQuantity uint64 `json:\"qty\"`\n\tIsBuy bool\n}\n\ntype OrderBook struct {\n\tVenue string\n\tSymbol string\n\tAsks []StandingOrder\n\tBids []StandingOrder\n\tTimeStamp time.Time `json:\"ts\"`\n}\n\ntype Fill struct {\n\tPrice uint64\n\tQuantity uint64 `json:\"qty\"`\n\tTimeStamp time.Time `json:\"ts\"`\n}\n\ntype OrderState struct {\n\tVenue string\n\tSymbol string\n\tPrice uint64\n\tOriginalQuantity uint64 `json:\"originalQty\"`\n\tQuantity uint64 `json:\"qty\"`\n\tDirection string\n\tType OrderType\n\tId uint64\n\tAccount string\n\tTimestamp time.Time `json:\"ts\"`\n\tFills []Fill\n\tTotalFilled uint64\n\tOpen bool\n}\n\ntype Quote struct {\n\tVenue string\n\tSymbol string\n\tBid uint64\n\tBidSize uint64\n\tBidDepth uint64\n\tAsk uint64\n\tAskSize uint64\n\tAskDepth uint64\n\tLast uint64\n\tLastSize uint64\n\tLastTrade time.Time\n\tQuoteTime time.Time\n}\n\ntype Execution struct {\n\tAccount string\n\tVenue string\n\tSymbol string\n\tOrder OrderState\n\tStandingId uint64\n\tPrice uint64\n\tFilled uint64\n\tFilledAt time.Time\n\tStandingComplete bool\n\tIncomingComplete bool\n}\n\nvar orderTypes = [...]string{\n\tLimit: \"limit\",\n\tMarket: \"market\",\n\tFillOrKill: \"fill-or-kill\",\n\tImmediateOrCancel: \"immediate-or-cancel\",\n}\n\nfunc (o OrderType) MarshalText() ([]byte, error) {\n\treturn []byte(orderTypes[o]), nil\n}\n\nfunc (o OrderType) String() string {\n\treturn orderTypes[o]\n}\n\nvar orderTypeMap = map[string]OrderType{\n\t\"limit\": Limit,\n\t\"market\": Market,\n\t\"fill-or-kill\": FillOrKill,\n\t\"immediate-or-cancel\": ImmediateOrCancel,\n}\n\nfunc (o *OrderType) UnmarshalText(text []byte) error {\n\ttyp, ok := orderTypeMap[string(text)]\n\tif ok {\n\t\t*o = typ\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"Unknown order type: %s\", text)\n}\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\tcapn \"github.com\/glycerine\/go-capnproto\"\n\t\"sort\"\n)\n\nconst (\n\tRMIdEmpty = RMId(0)\n\tKeyLen = 20\n\tClientLen = KeyLen - 8\n)\n\ntype RMId uint32\n\nfunc (rmId RMId) String() string {\n\tif rmId == RMIdEmpty {\n\t\treturn \"RM:Empty\"\n\t} else {\n\t\treturn fmt.Sprintf(\"RM:%x\", uint32(rmId))\n\t}\n}\n\ntype KeyType [KeyLen]byte\n\ntype VarUUId KeyType\n\nfunc MakeVarUUId(data []byte) *VarUUId {\n\tuuid := VarUUId([KeyLen]byte{})\n\tcopy(uuid[:], data)\n\treturn &uuid\n}\n\nfunc MakeVarUUIdFromStr(str string) *VarUUId {\n\tdata, err := hex.DecodeString(str)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn MakeVarUUId(data)\n}\n\nfunc (vUUId VarUUId) String() string {\n\th := ([]byte)(hex.EncodeToString(vUUId[:]))\n\treturn fmt.Sprintf(\"VarUUId:%s\", h)\n}\n\ntype TxnId KeyType\n\nfunc MakeTxnId(data []byte) *TxnId {\n\tid := TxnId([KeyLen]byte{})\n\tcopy(id[:], data)\n\treturn &id\n}\n\nfunc (txnId TxnId) String() string {\n\tt := txnId[:]\n\tclient, conn, boot, rmId := hex.EncodeToString(t[0:8]), hex.EncodeToString(t[8:12]), hex.EncodeToString(t[12:16]), hex.EncodeToString(t[16:20])\n\treturn fmt.Sprintf(\"TxnId:%s-%s-%s-%v\", client, conn, boot, rmId)\n}\n\nfunc (txnId TxnId) ClientId() [ClientLen]byte {\n\tclient := [ClientLen]byte{}\n\tcopy(client[:], txnId[8:])\n\treturn client\n}\n\ntype Cmp int8\n\nconst (\n\tLT Cmp = iota - 1\n\tEQ\n\tGT\n)\n\nfunc (a *TxnId) Compare(b *TxnId) Cmp {\n\tswitch {\n\tcase a == b:\n\t\treturn EQ\n\tcase a == nil:\n\t\treturn LT\n\tcase b == nil:\n\t\treturn GT\n\tdefault:\n\t\tswitch cmp := bytes.Compare(a[:], b[:]); {\n\t\tcase cmp < 0:\n\t\t\treturn LT\n\t\tcase cmp > 0:\n\t\t\treturn GT\n\t\tdefault:\n\t\t\treturn EQ\n\t\t}\n\t}\n}\n\nfunc (a *VarUUId) Compare(b *VarUUId) Cmp {\n\tswitch {\n\tcase a == b:\n\t\treturn EQ\n\tcase a == nil:\n\t\treturn LT\n\tcase b == nil:\n\t\treturn GT\n\tdefault:\n\t\tswitch cmp := bytes.Compare(a[:], b[:]); {\n\t\tcase cmp < 0:\n\t\t\treturn LT\n\t\tcase cmp > 0:\n\t\t\treturn GT\n\t\tdefault:\n\t\t\treturn EQ\n\t\t}\n\t}\n}\n\ntype VarUUIds []*VarUUId\n\nfunc (vUUIds VarUUIds) Sort() { sort.Sort(vUUIds) }\nfunc (vUUIds VarUUIds) Len() int { return len(vUUIds) }\nfunc (vUUIds VarUUIds) Less(i, j int) bool { return vUUIds[i].Compare(vUUIds[j]) == LT }\nfunc (vUUIds VarUUIds) Swap(i, j int) { vUUIds[i], vUUIds[j] = vUUIds[j], vUUIds[i] }\n\ntype Positions capn.UInt8List\n\nfunc (a *Positions) Equal(b *Positions) bool {\n\tif a == nil || b == nil {\n\t\treturn a == b\n\t}\n\taCap, bCap := (*capn.UInt8List)(a), (*capn.UInt8List)(b)\n\tif aCap.Len() != bCap.Len() {\n\t\treturn false\n\t}\n\tfor idx, l := 0, aCap.Len(); idx < l; idx++ {\n\t\tif aV, bV := aCap.At(idx), bCap.At(idx); aV != bV {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (p *Positions) String() string {\n\treturn fmt.Sprint((*capn.UInt8List)(p).ToArray())\n}\n\ntype RMIds []RMId\n\nfunc (rmIds RMIds) Sort() { sort.Sort(rmIds) }\nfunc (rmIds RMIds) Len() int { return len(rmIds) }\nfunc (rmIds RMIds) Less(i, j int) bool { return rmIds[i] < rmIds[j] }\nfunc (rmIds RMIds) Swap(i, j int) { rmIds[i], rmIds[j] = rmIds[j], rmIds[i] }\n\nfunc (a RMIds) Equal(b RMIds) bool {\n\tif a == nil || b == nil {\n\t\treturn a == nil && b == nil\n\t}\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor idx, rA := range a {\n\t\tif b[idx] != rA {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (rmIds RMIds) EmptyLen() int {\n\tcount := 0\n\tfor _, rmId := range rmIds {\n\t\tif rmId == RMIdEmpty {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}\n\nfunc (rmIds RMIds) NonEmptyLen() int {\n\tcount := 0\n\tfor _, rmId := range rmIds {\n\t\tif rmId != RMIdEmpty {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}\n\nfunc (rmIds RMIds) NonEmpty() RMIds {\n\tnel := rmIds.NonEmptyLen()\n\tif nel == len(rmIds) {\n\t\treturn rmIds\n\t}\n\tnonEmpty := make([]RMId, 0, nel)\n\tfor _, rmId := range rmIds {\n\t\tif rmId != RMIdEmpty {\n\t\t\tnonEmpty = append(nonEmpty, rmId)\n\t\t}\n\t}\n\treturn nonEmpty\n}\n<commit_msg>Whoops, forgot to add these yesterday. Ref T6.<commit_after>package common\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\tcapn \"github.com\/glycerine\/go-capnproto\"\n\t\"sort\"\n)\n\nconst (\n\tRMIdEmpty = RMId(0)\n\tKeyLen = 20\n\tClientLen = KeyLen - 8\n)\n\ntype RMId uint32\n\nfunc (rmId RMId) String() string {\n\tif rmId == RMIdEmpty {\n\t\treturn \"RM:Empty\"\n\t} else {\n\t\treturn fmt.Sprintf(\"RM:%x\", uint32(rmId))\n\t}\n}\n\ntype KeyType [KeyLen]byte\n\ntype VarUUId KeyType\n\nfunc MakeVarUUId(data []byte) *VarUUId {\n\tuuid := VarUUId([KeyLen]byte{})\n\tcopy(uuid[:], data)\n\treturn &uuid\n}\n\nfunc MakeVarUUIdFromStr(str string) *VarUUId {\n\tdata, err := hex.DecodeString(str)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn MakeVarUUId(data)\n}\n\nfunc (vUUId VarUUId) String() string {\n\th := ([]byte)(hex.EncodeToString(vUUId[:]))\n\treturn fmt.Sprintf(\"VarUUId:%s\", h)\n}\n\nfunc (vUUId VarUUId) ConnectionCount() uint32 {\n\treturn binary.BigEndian.Uint32(vUUId[8:12])\n}\n\nfunc (vUUId VarUUId) BootCount() uint32 {\n\treturn binary.BigEndian.Uint32(vUUId[12:16])\n}\n\nfunc (vUUId VarUUId) RMId() RMId {\n\treturn RMId(binary.BigEndian.Uint32(vUUId[16:20]))\n}\n\ntype TxnId KeyType\n\nfunc MakeTxnId(data []byte) *TxnId {\n\tid := TxnId([KeyLen]byte{})\n\tcopy(id[:], data)\n\treturn &id\n}\n\nfunc (txnId TxnId) String() string {\n\tt := txnId[:]\n\tclient, conn, boot, rmId := hex.EncodeToString(t[0:8]), hex.EncodeToString(t[8:12]), hex.EncodeToString(t[12:16]), hex.EncodeToString(t[16:20])\n\treturn fmt.Sprintf(\"TxnId:%s-%s-%s-%v\", client, conn, boot, rmId)\n}\n\nfunc (txnId TxnId) ClientId() [ClientLen]byte {\n\tclient := [ClientLen]byte{}\n\tcopy(client[:], txnId[8:])\n\treturn client\n}\n\nfunc (txnId TxnId) ConnectionCount() uint32 {\n\treturn binary.BigEndian.Uint32(txnId[8:12])\n}\n\nfunc (txnId TxnId) BootCount() uint32 {\n\treturn binary.BigEndian.Uint32(txnId[12:16])\n}\n\nfunc (txnId TxnId) RMId() RMId {\n\treturn RMId(binary.BigEndian.Uint32(txnId[16:20]))\n}\n\ntype Cmp int8\n\nconst (\n\tLT Cmp = iota - 1\n\tEQ\n\tGT\n)\n\nfunc (a *TxnId) Compare(b *TxnId) Cmp {\n\tswitch {\n\tcase a == b:\n\t\treturn EQ\n\tcase a == nil:\n\t\treturn LT\n\tcase b == nil:\n\t\treturn GT\n\tdefault:\n\t\tswitch cmp := bytes.Compare(a[:], b[:]); {\n\t\tcase cmp < 0:\n\t\t\treturn LT\n\t\tcase cmp > 0:\n\t\t\treturn GT\n\t\tdefault:\n\t\t\treturn EQ\n\t\t}\n\t}\n}\n\nfunc (a *VarUUId) Compare(b *VarUUId) Cmp {\n\tswitch {\n\tcase a == b:\n\t\treturn EQ\n\tcase a == nil:\n\t\treturn LT\n\tcase b == nil:\n\t\treturn GT\n\tdefault:\n\t\tswitch cmp := bytes.Compare(a[:], b[:]); {\n\t\tcase cmp < 0:\n\t\t\treturn LT\n\t\tcase cmp > 0:\n\t\t\treturn GT\n\t\tdefault:\n\t\t\treturn EQ\n\t\t}\n\t}\n}\n\ntype VarUUIds []*VarUUId\n\nfunc (vUUIds VarUUIds) Sort() { sort.Sort(vUUIds) }\nfunc (vUUIds VarUUIds) Len() int { return len(vUUIds) }\nfunc (vUUIds VarUUIds) Less(i, j int) bool { return vUUIds[i].Compare(vUUIds[j]) == LT }\nfunc (vUUIds VarUUIds) Swap(i, j int) { vUUIds[i], vUUIds[j] = vUUIds[j], vUUIds[i] }\n\ntype Positions capn.UInt8List\n\nfunc (a *Positions) Equal(b *Positions) bool {\n\tif a == nil || b == nil {\n\t\treturn a == b\n\t}\n\taCap, bCap := (*capn.UInt8List)(a), (*capn.UInt8List)(b)\n\tif aCap.Len() != bCap.Len() {\n\t\treturn false\n\t}\n\tfor idx, l := 0, aCap.Len(); idx < l; idx++ {\n\t\tif aV, bV := aCap.At(idx), bCap.At(idx); aV != bV {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (p *Positions) String() string {\n\treturn fmt.Sprint((*capn.UInt8List)(p).ToArray())\n}\n\ntype RMIds []RMId\n\nfunc (rmIds RMIds) Sort() { sort.Sort(rmIds) }\nfunc (rmIds RMIds) Len() int { return len(rmIds) }\nfunc (rmIds RMIds) Less(i, j int) bool { return rmIds[i] < rmIds[j] }\nfunc (rmIds RMIds) Swap(i, j int) { rmIds[i], rmIds[j] = rmIds[j], rmIds[i] }\n\nfunc (a RMIds) Equal(b RMIds) bool {\n\tif a == nil || b == nil {\n\t\treturn a == nil && b == nil\n\t}\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor idx, rA := range a {\n\t\tif b[idx] != rA {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (rmIds RMIds) EmptyLen() int {\n\tcount := 0\n\tfor _, rmId := range rmIds {\n\t\tif rmId == RMIdEmpty {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}\n\nfunc (rmIds RMIds) NonEmptyLen() int {\n\tcount := 0\n\tfor _, rmId := range rmIds {\n\t\tif rmId != RMIdEmpty {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}\n\nfunc (rmIds RMIds) NonEmpty() RMIds {\n\tnel := rmIds.NonEmptyLen()\n\tif nel == len(rmIds) {\n\t\treturn rmIds\n\t}\n\tnonEmpty := make([]RMId, 0, nel)\n\tfor _, rmId := range rmIds {\n\t\tif rmId != RMIdEmpty {\n\t\t\tnonEmpty = append(nonEmpty, rmId)\n\t\t}\n\t}\n\treturn nonEmpty\n}\n<|endoftext|>"} {"text":"<commit_before>package gorill\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"time\"\n)\n\n\/\/ ErrWriteAfterClose is returned if a Write is attempted after Close called.\ntype ErrWriteAfterClose struct{}\n\n\/\/ Error returns a string representation of a ErrWriteAfterClose error instance.\nfunc (e ErrWriteAfterClose) Error() string {\n\treturn \"cannot write; already closed\"\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype nopCloseWriter struct{ io.Writer }\n\nfunc (nopCloseWriter) Close() error { return nil }\n\n\/\/ NopCloseWriter returns a structure that implements io.WriteCloser, but provides a no-op Close\n\/\/ method. It is useful when you have an io.Writer that you must pass to a method that requires an\n\/\/ io.WriteCloser. It is the counter-part to ioutil.NopCloser, but for io.Writer.\n\/\/\n\/\/ iowc := gorill.NopCloseWriter(iow)\n\/\/ iowc.Close() \/\/ does nothing\nfunc NopCloseWriter(iow io.Writer) io.WriteCloser { return nopCloseWriter{iow} }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype nopCloseReader struct{ io.Reader }\n\nfunc (nopCloseReader) Close() error { return nil }\n\n\/\/ NopCloseReader returns a structure that implements io.ReadCloser, but provides a no-op Close\n\/\/ method. It is useful when you have an io.Reader that you must pass to a method that requires an\n\/\/ io.ReadCloser. It is the same as ioutil.NopCloser, but for provided here for symmetry with\n\/\/ NopCloseWriter.\n\/\/\n\/\/ iorc := gorill.NopCloseReader(ior)\n\/\/ iorc.Close() \/\/ does nothing\nfunc NopCloseReader(ior io.Reader) io.ReadCloser { return nopCloseReader{ior} }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype writeJob struct {\n\tdata []byte\n\tresults chan writeResult\n}\n\ntype writeResult struct {\n\tn int\n\terr error\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ NopCloseBuffer is a structure that wraps a buffer, but also provides a no-op Close method.\ntype NopCloseBuffer struct {\n\t*bytes.Buffer\n\tclosed bool\n}\n\n\/\/ Close returns nil error.\nfunc (m *NopCloseBuffer) Close() error { m.closed = true; return nil }\n\n\/\/ IsClosed returns false, unless NopCloseBuffer's Close method has been invoked\nfunc (m *NopCloseBuffer) IsClosed() bool { return m.closed }\n\n\/\/ NewNopCloseBuffer returns a structure that wraps bytes.Buffer with a no-op Close method. It can\n\/\/ be used in tests that need a bytes.Buffer.\n\/\/\n\/\/ bb := gorill.NopCloseBuffer()\n\/\/ bb.Write([]byte(\"example\"))\n\/\/ bb.Close() \/\/ does nothing\nfunc NewNopCloseBuffer() *NopCloseBuffer {\n\treturn &NopCloseBuffer{Buffer: new(bytes.Buffer), closed: false}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype shortWriter struct {\n\tiow io.Writer\n\tmax int\n}\n\nfunc (s shortWriter) Write(data []byte) (int, error) {\n\tindex := len(data)\n\tif index > s.max {\n\t\tindex = s.max\n\t}\n\tn, err := s.iow.Write(data[:index])\n\tif err != nil {\n\t\treturn n, err\n\t}\n\treturn n, io.ErrShortWrite\n}\n\n\/\/ ShortWriter returns a structure that wraps an io.Writer, but returns io.ErrShortWrite when the\n\/\/ number of bytes to write exceeds a preset limit.\n\/\/\n\/\/ bb := gorill.NopCloseBuffer()\n\/\/ sw := gorill.ShortWriter(bb, 16)\n\/\/\n\/\/ n, err := sw.Write([]byte(\"short write\"))\n\/\/ \/\/ n == 11, err == nil\n\/\/\n\/\/ n, err := sw.Write([]byte(\"a somewhat longer write\"))\n\/\/ \/\/ n == 16, err == io.ErrShortWrite\nfunc ShortWriter(iow io.Writer, size int) io.Writer { return shortWriter{iow, size} }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype slowWriter struct {\n\td time.Duration\n\tw io.Writer\n}\n\nfunc (s *slowWriter) Write(data []byte) (int, error) {\n\ttime.Sleep(s.d)\n\treturn s.w.Write(data)\n}\n\n\/\/ SlowWriter returns a structure that wraps an io.Writer, but sleeps prior to writing data to the\n\/\/ underlying io.Writer.\n\/\/\n\/\/ bb := gorill.NopCloseBuffer()\n\/\/ sw := gorill.SlowWriter(bb, 10*time.Second)\n\/\/\n\/\/ n, err := sw.Write([]byte(\"example\")) \/\/ this call takes at least 10 seconds to return\n\/\/ \/\/ n == 7, err == nil\nfunc SlowWriter(w io.Writer, d time.Duration) io.Writer {\n\treturn &slowWriter{d: d, w: w}\n}\n<commit_msg>NopCloseBufferSize function creates NopCloseBuffer with specified size<commit_after>package gorill\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"time\"\n)\n\n\/\/ ErrWriteAfterClose is returned if a Write is attempted after Close called.\ntype ErrWriteAfterClose struct{}\n\n\/\/ Error returns a string representation of a ErrWriteAfterClose error instance.\nfunc (e ErrWriteAfterClose) Error() string {\n\treturn \"cannot write; already closed\"\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype nopCloseWriter struct{ io.Writer }\n\nfunc (nopCloseWriter) Close() error { return nil }\n\n\/\/ NopCloseWriter returns a structure that implements io.WriteCloser, but provides a no-op Close\n\/\/ method. It is useful when you have an io.Writer that you must pass to a method that requires an\n\/\/ io.WriteCloser. It is the counter-part to ioutil.NopCloser, but for io.Writer.\n\/\/\n\/\/ iowc := gorill.NopCloseWriter(iow)\n\/\/ iowc.Close() \/\/ does nothing\nfunc NopCloseWriter(iow io.Writer) io.WriteCloser { return nopCloseWriter{iow} }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype nopCloseReader struct{ io.Reader }\n\nfunc (nopCloseReader) Close() error { return nil }\n\n\/\/ NopCloseReader returns a structure that implements io.ReadCloser, but provides a no-op Close\n\/\/ method. It is useful when you have an io.Reader that you must pass to a method that requires an\n\/\/ io.ReadCloser. It is the same as ioutil.NopCloser, but for provided here for symmetry with\n\/\/ NopCloseWriter.\n\/\/\n\/\/ iorc := gorill.NopCloseReader(ior)\n\/\/ iorc.Close() \/\/ does nothing\nfunc NopCloseReader(ior io.Reader) io.ReadCloser { return nopCloseReader{ior} }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype writeJob struct {\n\tdata []byte\n\tresults chan writeResult\n}\n\ntype writeResult struct {\n\tn int\n\terr error\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ NopCloseBuffer is a structure that wraps a buffer, but also provides a no-op Close method.\ntype NopCloseBuffer struct {\n\t*bytes.Buffer\n\tclosed bool\n}\n\n\/\/ Close returns nil error.\nfunc (m *NopCloseBuffer) Close() error { m.closed = true; return nil }\n\n\/\/ IsClosed returns false, unless NopCloseBuffer's Close method has been invoked\nfunc (m *NopCloseBuffer) IsClosed() bool { return m.closed }\n\n\/\/ NewNopCloseBuffer returns a structure that wraps bytes.Buffer with a no-op Close method. It can\n\/\/ be used in tests that need a bytes.Buffer.\n\/\/\n\/\/ bb := gorill.NopCloseBuffer()\n\/\/ bb.Write([]byte(\"example\"))\n\/\/ bb.Close() \/\/ does nothing\nfunc NewNopCloseBuffer() *NopCloseBuffer {\n\treturn &NopCloseBuffer{Buffer: new(bytes.Buffer), closed: false}\n}\n\nfunc NewNopCloseBufferSize(size int) *NopCloseBuffer {\n\treturn &NopCloseBuffer{Buffer: bytes.NewBuffer(make([]byte, 0, size)), closed: false}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype shortWriter struct {\n\tiow io.Writer\n\tmax int\n}\n\nfunc (s shortWriter) Write(data []byte) (int, error) {\n\tindex := len(data)\n\tif index > s.max {\n\t\tindex = s.max\n\t}\n\tn, err := s.iow.Write(data[:index])\n\tif err != nil {\n\t\treturn n, err\n\t}\n\treturn n, io.ErrShortWrite\n}\n\n\/\/ ShortWriter returns a structure that wraps an io.Writer, but returns io.ErrShortWrite when the\n\/\/ number of bytes to write exceeds a preset limit.\n\/\/\n\/\/ bb := gorill.NopCloseBuffer()\n\/\/ sw := gorill.ShortWriter(bb, 16)\n\/\/\n\/\/ n, err := sw.Write([]byte(\"short write\"))\n\/\/ \/\/ n == 11, err == nil\n\/\/\n\/\/ n, err := sw.Write([]byte(\"a somewhat longer write\"))\n\/\/ \/\/ n == 16, err == io.ErrShortWrite\nfunc ShortWriter(iow io.Writer, size int) io.Writer { return shortWriter{iow, size} }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype slowWriter struct {\n\td time.Duration\n\tw io.Writer\n}\n\nfunc (s *slowWriter) Write(data []byte) (int, error) {\n\ttime.Sleep(s.d)\n\treturn s.w.Write(data)\n}\n\n\/\/ SlowWriter returns a structure that wraps an io.Writer, but sleeps prior to writing data to the\n\/\/ underlying io.Writer.\n\/\/\n\/\/ bb := gorill.NopCloseBuffer()\n\/\/ sw := gorill.SlowWriter(bb, 10*time.Second)\n\/\/\n\/\/ n, err := sw.Write([]byte(\"example\")) \/\/ this call takes at least 10 seconds to return\n\/\/ \/\/ n == 7, err == nil\nfunc SlowWriter(w io.Writer, d time.Duration) io.Writer {\n\treturn &slowWriter{d: d, w: w}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Commonly used types in a single place. Purely for organisation purposes.\n*\/\n\npackage main\n\n\/\/ Board stores board metadata and the OPs of all threads\ntype Board struct {\n\tCtr int `json:\"ctr\",gorethink:\"ctr\"`\n\tThreads []Thread `json:\"threads\",gorethink:\"threads\"`\n}\n\n\/\/ Thread stores the metadata and posts of a single thread\ntype Thread struct {\n\tID int `json:\"id,omitempty\",gorethink:\"id\"`\n\tIP string `json:\"-\",gorethink:\"ip\"`\n\tBoard string `json:\"board,omitempty\",gorethink:\"board\"`\n\tTime int `json:\"time,omitempty\",gorethink:\"time\"`\n\tBumpTime int `json:\"bumpTime,omitempty\",gorethink:\"bumpTime\"`\n\tNonce string `json:\"-\",gorethink:\"nonce\"`\n\tPosts map[string]Post `json:\"posts,omitempty\",gorethink:\"posts\"`\n\tHistory []Message `json:\"-\",gorethink:\"history\"`\n\tHistCtr int `json:\"histCtr,omitempty\",gorethink:\"histCtr\"`\n\tReplyCtr int `json:\"replyCtr,omitempty\",gorethink:\"replyCtr\"`\n\tImageCtr int `json:\"imageCtr,omitempty\",gorethink:\"imageCtr\"`\n\tOP Post `json:\"-\",gorethink:\"op\"` \/\/ For internal use\n}\n\n\/\/ Message is the universal transport container of all live updates through\n\/\/ websockets\ntype Message struct {\n\tType string `json:\"type\",gorethink:\"type\"`\n\n\t\/\/ If present, determines a priviledged access level, the client has to\n\t\/\/ have, to recieve this message\n\tPriv string `json:\"priv,omitempty\",gorethink:\"priv,omitempty\"`\n\n\t\/\/ The actual contents of the message. Very variadic, thus interface{}.\n\tMsg interface{} `json:\"msg,omitempty\",gorethink:\"msg,omitempty\"`\n}\n\n\/\/ Post is a generic post. Either OP or reply.\ntype Post struct {\n\tID int `json:\"id,omitempty\",gorethink:\"id\"`\n\tIP string `json:\"-\",gorethink:\"ip\"`\n\tOP int `json:\"op,omitempty\",gorethink:\"op\"`\n\tBoard string `json:\"board,omitempty\",gorethink:\"board\"`\n\tTime int `json:\"time,omitempty\",gorethink:\"time\"`\n\tNonce string `json:\"-\",gorethink:\"nonce\"`\n\tEditing bool `json:\"editing,omitempty\",gorethink:\"editing,omitempty\"`\n\tBody string `json:\"body,omitempty\",gorethink:\"body\"`\n\tDeleted bool `json:\"-\",gorethink:\"deleted\"`\n\tImgDeleted bool `json:\"-\",gorethink:\"imgDeleted\"`\n\tImage Image `json:\"image,omitempty\",gorethink:\"image,omitempty\"`\n\tName string `json:\"name,omitempty\",gorethink:\"name,omitempty\"`\n\tTrip string `json:\"trip,omitempty\",gorethink:\"trip,omitempty\"`\n\tEmail string `json:\"email,omitempty\",gorethink:\"email,omitempty\"`\n\tAuth string `json:\"auth,omitempty\",gorethink:\"auth,omitempty\"`\n\tDice Dice `json:\"dice,omitempty\",gorethink:\"dice,omitempty\"`\n\tLinks LinkMap `json:\"links,omitempty\",gorethink:\"links,omitempty\"`\n\tBacklinks LinkMap `json:\"backlinks,omitempty\",gorethink:\"backlinks,omitempty\"`\n\tMod ModerationList `json:\"mod,omitempty\",gorethink:\"mod,omitempty\"`\n}\n\n\/\/ Image contains a post's image and thumbanail data\ntype Image struct {\n\tSrc string `json:\"src,omitempty\",gorethink:\"src\"`\n\tThumb string `json:\"thumb,omitempty\",gorethink:\"thumb,omitempty\"`\n\tMid string `json:\"mid,omitempty\",gorethink:\"mid,omitempty\"`\n\tDims []int `json:\"dims,omitempty\",gorethink:\"dims\"`\n\tExt string `json:\"ext,omitempty\",gorethink:\"ext\"`\n\tSize int `json:\"size,omitempty\",gorethink:\"size\"`\n\tMD5 string `json:\",omitempty\"`\n\tSHA1 string `json:\",omitempty\"`\n\tImgnm string `json:\"imgnm,omitempty\",gorethink:\"imgnm,omitempty\"`\n\tSpoiler int `json:\"spoiler,omitempty\",gorethink:\"spoiler,omitempty\"`\n\tAPNG bool `json:\"apng,omitempty\",gorethink:\"apng,omitempty\"`\n\tAudio bool `json:\"audio,omitempty\",gorethink:\"audio,omitempty\"`\n\tLength string `json:\"lenght,omitempty\",gorethink:\"lenght,omitempty\"`\n}\n\n\/\/ Dice stores # command information of the post in exectution order\ntype Dice []Roll\n\n\/\/ Roll represents a single hash command. It always contains the Type field,\n\/\/ which determines, which of the other fields are present.\ntype Roll struct {\n\tType string `json:\"type\",gorethink:\"type\"`\n\tBool bool `json:\"bool,omitempty\",gorethink:\"bool,omitempty\"`\n\tInt int `json:\"int,omitempty\",gorethink:\"int,omitempty\"`\n\tInts []int `json:\"ints,omitempty\",gorethink:\"ints,omitempty\"`\n\tString string `json:\"string,omitempty\",gorethink:\"string,omitempty\"`\n}\n\n\/\/ LinkMap contains a map of post numbers, this tread is linking, to\n\/\/ corresponding Link tuples\ntype LinkMap map[string]Link\n\n\/\/ Link stores the target post's parent board and parent thread\ntype Link struct {\n\tBoard string `json:\"board\",gorethink:\"board\"`\n\tOP int `json:\"op\",gorethink:\"op\"`\n}\n\n\/\/ Ident is used to verify a client's access and write permissions\ntype Ident struct {\n\t\/\/ Indicates priveledged access rights for staff.\n\tAuth string\n\tBanned bool\n\tIP string\n}\n\n\/\/ ModerationList contains modration acts commited on this post.\n\/\/ TEMP\ntype ModerationList []interface{}\n<commit_msg>Always pass thread counters in JSON<commit_after>\/*\n Commonly used types in a single place. Purely for organisation purposes.\n*\/\n\npackage main\n\n\/\/ Board stores board metadata and the OPs of all threads\ntype Board struct {\n\tCtr int `json:\"ctr\",gorethink:\"ctr\"`\n\tThreads []Thread `json:\"threads\",gorethink:\"threads\"`\n}\n\n\/\/ Thread stores the metadata and posts of a single thread\ntype Thread struct {\n\tID int `json:\"id,omitempty\",gorethink:\"id\"`\n\tIP string `json:\"-\",gorethink:\"ip\"`\n\tBoard string `json:\"board,omitempty\",gorethink:\"board\"`\n\tTime int `json:\"time,omitempty\",gorethink:\"time\"`\n\tBumpTime int `json:\"bumpTime,omitempty\",gorethink:\"bumpTime\"`\n\tNonce string `json:\"-\",gorethink:\"nonce\"`\n\tPosts map[string]Post `json:\"posts,omitempty\",gorethink:\"posts\"`\n\tHistory []Message `json:\"-\",gorethink:\"history\"`\n\tHistCtr int `json:\"histCtr\",gorethink:\"histCtr\"`\n\tReplyCtr int `json:\"replyCtr\",gorethink:\"replyCtr\"`\n\tImageCtr int `json:\"imageCtr\",gorethink:\"imageCtr\"`\n\tOP Post `json:\"-\",gorethink:\"op\"` \/\/ For internal use\n}\n\n\/\/ Message is the universal transport container of all live updates through\n\/\/ websockets\ntype Message struct {\n\tType string `json:\"type\",gorethink:\"type\"`\n\n\t\/\/ If present, determines a priviledged access level, the client has to\n\t\/\/ have, to recieve this message\n\tPriv string `json:\"priv,omitempty\",gorethink:\"priv,omitempty\"`\n\n\t\/\/ The actual contents of the message. Very variadic, thus interface{}.\n\tMsg interface{} `json:\"msg,omitempty\",gorethink:\"msg,omitempty\"`\n}\n\n\/\/ Post is a generic post. Either OP or reply.\ntype Post struct {\n\tID int `json:\"id,omitempty\",gorethink:\"id\"`\n\tIP string `json:\"-\",gorethink:\"ip\"`\n\tOP int `json:\"op,omitempty\",gorethink:\"op\"`\n\tBoard string `json:\"board,omitempty\",gorethink:\"board\"`\n\tTime int `json:\"time,omitempty\",gorethink:\"time\"`\n\tNonce string `json:\"-\",gorethink:\"nonce\"`\n\tEditing bool `json:\"editing,omitempty\",gorethink:\"editing,omitempty\"`\n\tBody string `json:\"body,omitempty\",gorethink:\"body\"`\n\tDeleted bool `json:\"-\",gorethink:\"deleted\"`\n\tImgDeleted bool `json:\"-\",gorethink:\"imgDeleted\"`\n\tImage Image `json:\"image,omitempty\",gorethink:\"image,omitempty\"`\n\tName string `json:\"name,omitempty\",gorethink:\"name,omitempty\"`\n\tTrip string `json:\"trip,omitempty\",gorethink:\"trip,omitempty\"`\n\tEmail string `json:\"email,omitempty\",gorethink:\"email,omitempty\"`\n\tAuth string `json:\"auth,omitempty\",gorethink:\"auth,omitempty\"`\n\tDice Dice `json:\"dice,omitempty\",gorethink:\"dice,omitempty\"`\n\tLinks LinkMap `json:\"links,omitempty\",gorethink:\"links,omitempty\"`\n\tBacklinks LinkMap `json:\"backlinks,omitempty\",gorethink:\"backlinks,omitempty\"`\n\tMod ModerationList `json:\"mod,omitempty\",gorethink:\"mod,omitempty\"`\n}\n\n\/\/ Image contains a post's image and thumbanail data\ntype Image struct {\n\tSrc string `json:\"src,omitempty\",gorethink:\"src\"`\n\tThumb string `json:\"thumb,omitempty\",gorethink:\"thumb,omitempty\"`\n\tMid string `json:\"mid,omitempty\",gorethink:\"mid,omitempty\"`\n\tDims []int `json:\"dims,omitempty\",gorethink:\"dims\"`\n\tExt string `json:\"ext,omitempty\",gorethink:\"ext\"`\n\tSize int `json:\"size,omitempty\",gorethink:\"size\"`\n\tMD5 string `json:\",omitempty\"`\n\tSHA1 string `json:\",omitempty\"`\n\tImgnm string `json:\"imgnm,omitempty\",gorethink:\"imgnm,omitempty\"`\n\tSpoiler int `json:\"spoiler,omitempty\",gorethink:\"spoiler,omitempty\"`\n\tAPNG bool `json:\"apng,omitempty\",gorethink:\"apng,omitempty\"`\n\tAudio bool `json:\"audio,omitempty\",gorethink:\"audio,omitempty\"`\n\tLength string `json:\"lenght,omitempty\",gorethink:\"lenght,omitempty\"`\n}\n\n\/\/ Dice stores # command information of the post in exectution order\ntype Dice []Roll\n\n\/\/ Roll represents a single hash command. It always contains the Type field,\n\/\/ which determines, which of the other fields are present.\ntype Roll struct {\n\tType string `json:\"type\",gorethink:\"type\"`\n\tBool bool `json:\"bool,omitempty\",gorethink:\"bool,omitempty\"`\n\tInt int `json:\"int,omitempty\",gorethink:\"int,omitempty\"`\n\tInts []int `json:\"ints,omitempty\",gorethink:\"ints,omitempty\"`\n\tString string `json:\"string,omitempty\",gorethink:\"string,omitempty\"`\n}\n\n\/\/ LinkMap contains a map of post numbers, this tread is linking, to\n\/\/ corresponding Link tuples\ntype LinkMap map[string]Link\n\n\/\/ Link stores the target post's parent board and parent thread\ntype Link struct {\n\tBoard string `json:\"board\",gorethink:\"board\"`\n\tOP int `json:\"op\",gorethink:\"op\"`\n}\n\n\/\/ Ident is used to verify a client's access and write permissions\ntype Ident struct {\n\t\/\/ Indicates priveledged access rights for staff.\n\tAuth string\n\tBanned bool\n\tIP string\n}\n\n\/\/ ModerationList contains modration acts commited on this post.\n\/\/ TEMP\ntype ModerationList []interface{}\n<|endoftext|>"} {"text":"<commit_before>package vsolver\n\ntype ProjectName string\n\ntype ProjectAtom struct {\n\tName ProjectName\n\tVersion Version\n}\n\nvar emptyProjectAtom ProjectAtom\n\ntype ProjectDep struct {\n\tName ProjectName\n\tConstraint Constraint\n}\n\ntype Dependency struct {\n\tDepender ProjectAtom\n\tDep ProjectDep\n}\n\n\/\/ ProjectInfo holds the spec and lock information for a given ProjectAtom\ntype ProjectInfo struct {\n\tpa ProjectAtom\n\tManifest\n\tLock\n}\n\n\/\/ LockedProject is a single project entry from a lock file. It expresses the\n\/\/ project's name, one or both of version and underlying revision, the URI for\n\/\/ accessing it, and the path at which it should be placed within a vendor\n\/\/ directory.\n\/\/\n\/\/ TODO note that sometime soon, we also plan to allow pkgs. this'll change\ntype LockedProject struct {\n\tn ProjectName\n\tv UnpairedVersion\n\tr Revision\n\tpath, uri string\n}\n\n\/\/ NewLockedProject creates a new LockedProject struct with a given name,\n\/\/ version, upstream repository URI, and on-disk path at which the project is to\n\/\/ be checked out under a vendor directory.\n\/\/\n\/\/ Note that passing a nil version will cause a panic. This is a correctness\n\/\/ measure to ensure that the solver is never exposed to a version-less lock\n\/\/ entry. Such a case would be meaningless - the solver would have no choice but\n\/\/ to simply dismiss that project. By creating a hard failure case via panic\n\/\/ instead, we are trying to avoid inflicting the resulting pain on the user by\n\/\/ instead forcing a decision on the Analyzer implementation.\nfunc NewLockedProject(n ProjectName, v Version, uri, path string) LockedProject {\n\tif v == nil {\n\t\tpanic(\"must provide a non-nil version to create a LockedProject\")\n\t}\n\n\tlp := LockedProject{\n\t\tn: n,\n\t\turi: uri,\n\t\tpath: path,\n\t}\n\n\tswitch tv := v.(type) {\n\tcase Revision:\n\t\tlp.r = tv\n\tcase branchVersion:\n\t\tlp.v = tv\n\tcase semVersion:\n\t\tlp.v = tv\n\tcase plainVersion:\n\t\tlp.v = tv\n\tcase versionPair:\n\t\tlp.r = tv.r\n\t\tlp.v = tv.v\n\t}\n\n\treturn lp\n}\n\n\/\/ Name returns the name of the locked project.\nfunc (lp LockedProject) Name() ProjectName {\n\treturn lp.n\n}\n\n\/\/ Version assembles together whatever version and\/or revision data is\n\/\/ available into a single Version.\nfunc (lp LockedProject) Version() Version {\n\tif lp.r == \"\" {\n\t\treturn lp.v\n\t}\n\n\tif lp.v == nil {\n\t\treturn lp.r\n\t}\n\n\treturn lp.v.Is(lp.r)\n}\n\n\/\/ URI returns the upstream URI of the locked project.\nfunc (lp LockedProject) URI() string {\n\treturn lp.uri\n}\n\n\/\/ Path returns the path relative to the vendor directory to which the locked\n\/\/ project should be checked out.\nfunc (lp LockedProject) Path() string {\n\treturn lp.path\n}\n\nfunc (lp LockedProject) toAtom() ProjectAtom {\n\tpa := ProjectAtom{\n\t\tName: lp.n,\n\t}\n\n\tif lp.v == nil {\n\t\tpa.Version = lp.r\n\t} else if lp.r != \"\" {\n\t\tpa.Version = lp.v.Is(lp.r)\n\t} else {\n\t\tpa.Version = lp.v\n\t}\n\n\treturn pa\n}\n\n\/\/ Manifest represents the data from a manifest file (or however the\n\/\/ implementing tool chooses to store it) at a particular version that is\n\/\/ relevant to the satisfiability solving process:\n\/\/\n\/\/ - A list of dependencies: project name, and a constraint\n\/\/ - A list of development-time dependencies (e.g. for testing - only\n\/\/ the root project's are incorporated)\n\/\/\n\/\/ Finding a solution that satisfies the constraints expressed by all of these\n\/\/ dependencies (and those from all other projects, transitively), is what the\n\/\/ solver does.\n\/\/\n\/\/ Note that vsolver does perform static analysis on all projects' codebases;\n\/\/ if dependencies it finds through that analysis are missing from what the\n\/\/ Manifest lists, it is considered an error that will eliminate that version\n\/\/ from consideration in the solving algorithm.\ntype Manifest interface {\n\tName() ProjectName\n\tGetDependencies() []ProjectDep\n\tGetDevDependencies() []ProjectDep\n}\n\n\/\/ Lock represents data from a lock file (or however the implementing tool\n\/\/ chooses to store it) at a particular version that is relevant to the\n\/\/ satisfiability solving process.\n\/\/\n\/\/ In general, the information produced by vsolver on finding a successful\n\/\/ solution is all that would be necessary to constitute a lock file, though\n\/\/ tools can include whatever other information they want in their storage.\ntype Lock interface {\n\t\/\/ Indicates the version of the solver used to generate this lock data\n\t\/\/SolverVersion() string\n\n\t\/\/ The hash of inputs to vsolver that resulted in this lock data\n\tInputHash() []byte\n\n\t\/\/ Projects returns the list of LockedProjects contained in the lock data.\n\tProjects() []LockedProject\n}\n\n\/\/ SimpleLock is a helper for tools to simply enumerate lock data when they know\n\/\/ that no hash, or other complex information, is available.\ntype SimpleLock []LockedProject\n\nvar _ Lock = SimpleLock{}\n\n\/\/ InputHash always returns an empty string for SimpleLock. This makes it useless\n\/\/ as a stable lock to be written to disk, but still useful for some ephemeral\n\/\/ purposes.\nfunc (SimpleLock) InputHash() []byte {\n\treturn nil\n}\n\n\/\/ Projects returns the entire contents of the SimpleLock.\nfunc (l SimpleLock) Projects() []LockedProject {\n\treturn l\n}\n\n\/\/ SimpleManifest is a helper for tools to enumerate manifest data. It's\n\/\/ intended for ephemeral manifests, such as those created by Analyzers on the\n\/\/ fly.\ntype SimpleManifest struct {\n\tN ProjectName\n\tP []ProjectDep\n\tDP []ProjectDep\n}\n\nvar _ Manifest = SimpleManifest{}\n\n\/\/ Name returns the name of the project described by the manifest.\nfunc (m SimpleManifest) Name() ProjectName {\n\treturn m.N\n}\n\n\/\/ GetDependencies returns the project's dependencies.\nfunc (m SimpleManifest) GetDependencies() []ProjectDep {\n\treturn m.P\n}\n\n\/\/ GetDependencies returns the project's test dependencies.\nfunc (m SimpleManifest) GetDevDependencies() []ProjectDep {\n\treturn m.DP\n}\n<commit_msg>Maybe the new struct<commit_after>package vsolver\n\ntype ProjectIdentifier struct {\n\tName ProjectName\n\tNetworkURI string\n}\n\ntype ProjectName string\n\ntype ProjectAtom struct {\n\tName ProjectName \/\/ TODO to ProjectIdentifier\n\tVersion Version\n}\n\nvar emptyProjectAtom ProjectAtom\n\ntype ProjectDep struct {\n\tName ProjectName \/\/ TODO to ProjectIdentifier\n\tConstraint Constraint\n}\n\ntype Dependency struct {\n\tDepender ProjectAtom\n\tDep ProjectDep\n}\n\n\/\/ ProjectInfo holds the spec and lock information for a given ProjectAtom\ntype ProjectInfo struct {\n\tpa ProjectAtom\n\tManifest\n\tLock\n}\n\n\/\/ LockedProject is a single project entry from a lock file. It expresses the\n\/\/ project's name, one or both of version and underlying revision, the URI for\n\/\/ accessing it, and the path at which it should be placed within a vendor\n\/\/ directory.\n\/\/\n\/\/ TODO note that sometime soon, we also plan to allow pkgs. this'll change\ntype LockedProject struct {\n\tn ProjectName\n\tv UnpairedVersion\n\tr Revision\n\tpath, uri string\n}\n\n\/\/ NewLockedProject creates a new LockedProject struct with a given name,\n\/\/ version, upstream repository URI, and on-disk path at which the project is to\n\/\/ be checked out under a vendor directory.\n\/\/\n\/\/ Note that passing a nil version will cause a panic. This is a correctness\n\/\/ measure to ensure that the solver is never exposed to a version-less lock\n\/\/ entry. Such a case would be meaningless - the solver would have no choice but\n\/\/ to simply dismiss that project. By creating a hard failure case via panic\n\/\/ instead, we are trying to avoid inflicting the resulting pain on the user by\n\/\/ instead forcing a decision on the Analyzer implementation.\nfunc NewLockedProject(n ProjectName, v Version, uri, path string) LockedProject {\n\tif v == nil {\n\t\tpanic(\"must provide a non-nil version to create a LockedProject\")\n\t}\n\n\tlp := LockedProject{\n\t\tn: n,\n\t\turi: uri,\n\t\tpath: path,\n\t}\n\n\tswitch tv := v.(type) {\n\tcase Revision:\n\t\tlp.r = tv\n\tcase branchVersion:\n\t\tlp.v = tv\n\tcase semVersion:\n\t\tlp.v = tv\n\tcase plainVersion:\n\t\tlp.v = tv\n\tcase versionPair:\n\t\tlp.r = tv.r\n\t\tlp.v = tv.v\n\t}\n\n\treturn lp\n}\n\n\/\/ Name returns the name of the locked project.\nfunc (lp LockedProject) Name() ProjectName {\n\treturn lp.n\n}\n\n\/\/ Version assembles together whatever version and\/or revision data is\n\/\/ available into a single Version.\nfunc (lp LockedProject) Version() Version {\n\tif lp.r == \"\" {\n\t\treturn lp.v\n\t}\n\n\tif lp.v == nil {\n\t\treturn lp.r\n\t}\n\n\treturn lp.v.Is(lp.r)\n}\n\n\/\/ URI returns the upstream URI of the locked project.\nfunc (lp LockedProject) URI() string {\n\treturn lp.uri\n}\n\n\/\/ Path returns the path relative to the vendor directory to which the locked\n\/\/ project should be checked out.\nfunc (lp LockedProject) Path() string {\n\treturn lp.path\n}\n\nfunc (lp LockedProject) toAtom() ProjectAtom {\n\tpa := ProjectAtom{\n\t\tName: lp.n,\n\t}\n\n\tif lp.v == nil {\n\t\tpa.Version = lp.r\n\t} else if lp.r != \"\" {\n\t\tpa.Version = lp.v.Is(lp.r)\n\t} else {\n\t\tpa.Version = lp.v\n\t}\n\n\treturn pa\n}\n\n\/\/ Manifest represents the data from a manifest file (or however the\n\/\/ implementing tool chooses to store it) at a particular version that is\n\/\/ relevant to the satisfiability solving process:\n\/\/\n\/\/ - A list of dependencies: project name, and a constraint\n\/\/ - A list of development-time dependencies (e.g. for testing - only\n\/\/ the root project's are incorporated)\n\/\/\n\/\/ Finding a solution that satisfies the constraints expressed by all of these\n\/\/ dependencies (and those from all other projects, transitively), is what the\n\/\/ solver does.\n\/\/\n\/\/ Note that vsolver does perform static analysis on all projects' codebases;\n\/\/ if dependencies it finds through that analysis are missing from what the\n\/\/ Manifest lists, it is considered an error that will eliminate that version\n\/\/ from consideration in the solving algorithm.\ntype Manifest interface {\n\tName() ProjectName\n\tGetDependencies() []ProjectDep\n\tGetDevDependencies() []ProjectDep\n}\n\n\/\/ Lock represents data from a lock file (or however the implementing tool\n\/\/ chooses to store it) at a particular version that is relevant to the\n\/\/ satisfiability solving process.\n\/\/\n\/\/ In general, the information produced by vsolver on finding a successful\n\/\/ solution is all that would be necessary to constitute a lock file, though\n\/\/ tools can include whatever other information they want in their storage.\ntype Lock interface {\n\t\/\/ Indicates the version of the solver used to generate this lock data\n\t\/\/SolverVersion() string\n\n\t\/\/ The hash of inputs to vsolver that resulted in this lock data\n\tInputHash() []byte\n\n\t\/\/ Projects returns the list of LockedProjects contained in the lock data.\n\tProjects() []LockedProject\n}\n\n\/\/ SimpleLock is a helper for tools to simply enumerate lock data when they know\n\/\/ that no hash, or other complex information, is available.\ntype SimpleLock []LockedProject\n\nvar _ Lock = SimpleLock{}\n\n\/\/ InputHash always returns an empty string for SimpleLock. This makes it useless\n\/\/ as a stable lock to be written to disk, but still useful for some ephemeral\n\/\/ purposes.\nfunc (SimpleLock) InputHash() []byte {\n\treturn nil\n}\n\n\/\/ Projects returns the entire contents of the SimpleLock.\nfunc (l SimpleLock) Projects() []LockedProject {\n\treturn l\n}\n\n\/\/ SimpleManifest is a helper for tools to enumerate manifest data. It's\n\/\/ intended for ephemeral manifests, such as those created by Analyzers on the\n\/\/ fly.\ntype SimpleManifest struct {\n\tN ProjectName\n\tP []ProjectDep\n\tDP []ProjectDep\n}\n\nvar _ Manifest = SimpleManifest{}\n\n\/\/ Name returns the name of the project described by the manifest.\nfunc (m SimpleManifest) Name() ProjectName {\n\treturn m.N\n}\n\n\/\/ GetDependencies returns the project's dependencies.\nfunc (m SimpleManifest) GetDependencies() []ProjectDep {\n\treturn m.P\n}\n\n\/\/ GetDependencies returns the project's test dependencies.\nfunc (m SimpleManifest) GetDevDependencies() []ProjectDep {\n\treturn m.DP\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/Copyright 2014 Aaron Goldman. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file\n\n\/\/Package web implements a web interface\n\/\/It is started by calling web.Start\npackage web\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/AaronGoldman\/ccfs\/objects\"\n\t\"github.com\/AaronGoldman\/ccfs\/services\"\n)\n\n\/\/BlobServerStart starts a server for the content services\nfunc BlobServerStart() {\n\thttp.Handle(\n\t\t\"\/b\/\",\n\t\thttp.StripPrefix(\n\t\t\t\"\/b\/\",\n\t\t\thttp.FileServer(http.Dir(\"bin\/blobs\")),\n\t\t),\n\t)\n\thttp.Handle(\n\t\t\"\/t\/\",\n\t\thttp.StripPrefix(\n\t\t\t\"\/t\/\",\n\t\t\thttp.FileServer(http.Dir(\"bin\/tags\")),\n\t\t),\n\t)\n\thttp.Handle(\n\t\t\"\/c\/\",\n\t\thttp.StripPrefix(\"\/c\/\",\n\t\t\thttp.FileServer(http.Dir(\"bin\/commits\")),\n\t\t),\n\t)\n\thttp.ListenAndServe(\":8080\", nil)\n}\n\n\/\/CollectionServerStart starts a server for full CCFS queries HKID\/path\nfunc CollectionServerStart() {\n\thttp.HandleFunc(\"\/r\/\", webRepositoryHandler)\n\thttp.HandleFunc(\"\/d\/\", webDomainHandler)\n}\n\nfunc webRepositoryHandler(w http.ResponseWriter, r *http.Request) {\n\tparts := strings.SplitN(r.RequestURI[3:], \"\/\", 2)\n\thkidhex := parts[0]\n\tpath := \"\"\n\tif len(parts) > 1 {\n\t\tpath = parts[1]\n\t}\n\terr := error(nil)\n\tif len(hkidhex) == 64 {\n\t\th, err := objects.HkidFromHex(hkidhex)\n\t\tif err == nil {\n\t\t\tb, err := services.Get(h, path)\n\t\t\tif err == nil {\n\t\t\t\tw.Write(b.Bytes())\n\t\t\t\treturn\n\t\t\t}\n\t\t\thttp.Error(w, fmt.Sprint(\n\t\t\t\t\"HTTP Error 500 Internal server error\\n\\n\", err), 500)\n\t\t}\n\t}\n\tw.Write([]byte(fmt.Sprintf(\"Invalid HKID\\nerr: %v\", err)))\n\treturn\n}\nfunc webDomainHandler(w http.ResponseWriter, r *http.Request) {\n\tparts := strings.SplitN(r.RequestURI[3:], \"\/\", 2)\n\thkidhex := parts[0]\n\tpath := \"\"\n\tif len(parts) > 1 {\n\t\tpath = parts[1]\n\t}\n\terr := error(nil)\n\tif len(hkidhex) == 64 {\n\t\th, err := objects.HkidFromHex(hkidhex)\n\t\tif err == nil {\n\t\t\tb, err := services.GetD(h, path)\n\t\t\tif err == nil {\n\t\t\t\tw.Write(b.Bytes())\n\t\t\t\treturn\n\t\t\t}\n\t\t\thttp.Error(w, fmt.Sprint(\n\t\t\t\t\"HTTP Error 500 Internal server error\\n\\n\", err), 500)\n\t\t}\n\t}\n\tw.Write([]byte(fmt.Sprintf(\"Invalid HKID\\nerr: %v\", err)))\n\treturn\n}\n\nfunc composeQuery(typestring string, hash objects.HKID, namesegment string) (message string) {\n\tmessage = fmt.Sprintf(\"%s,%s\", typestring, hash.String())\n\tif namesegment != \"\" {\n\t\tmessage = fmt.Sprintf(\"%s,%s\", message, namesegment)\n\t}\n\treturn message\n\n}\n\nfunc parseQuery(message string) (typestring string, hash objects.HKID, namesegment string) {\n\tarrMessage := strings.SplitN(message, \",\", 3)\n\tif len(arrMessage) < 2 {\n\t\tpanic(\"error: malformed parseQuery\")\n\t}\n\ttypestring = arrMessage[0]\n\n\thash, err := objects.HkidFromHex(arrMessage[1])\n\tif err != nil {\n\t\tpanic(\"error: malformed hexadecimal\")\n\t}\n\tif len(arrMessage) > 2 {\n\t\tnamesegment = arrMessage[2]\n\t} else {\n\t\tnamesegment = \"\"\n\t}\n\tfmt.Println(\"Error\")\n\treturn typestring, hash, namesegment\n}\n\n\/\/Start begins the listening for web reqwests on 8080\nfunc Start() {\n\tgo BlobServerStart()\n\tgo CollectionServerStart()\n}\n<commit_msg>diplay the port the server is started on<commit_after>\/\/Copyright 2014 Aaron Goldman. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file\n\n\/\/Package web implements a web interface\n\/\/It is started by calling web.Start\npackage web\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/AaronGoldman\/ccfs\/objects\"\n\t\"github.com\/AaronGoldman\/ccfs\/services\"\n)\n\n\/\/BlobServerStart starts a server for the content services\nfunc BlobServerStart() {\n\thttp.Handle(\n\t\t\"\/b\/\",\n\t\thttp.StripPrefix(\n\t\t\t\"\/b\/\",\n\t\t\thttp.FileServer(http.Dir(\"bin\/blobs\")),\n\t\t),\n\t)\n\thttp.Handle(\n\t\t\"\/t\/\",\n\t\thttp.StripPrefix(\n\t\t\t\"\/t\/\",\n\t\t\thttp.FileServer(http.Dir(\"bin\/tags\")),\n\t\t),\n\t)\n\thttp.Handle(\n\t\t\"\/c\/\",\n\t\thttp.StripPrefix(\"\/c\/\",\n\t\t\thttp.FileServer(http.Dir(\"bin\/commits\")),\n\t\t),\n\t)\n\thttp.ListenAndServe(\":8080\", nil)\n\tfmt.Println(\"Listening on :8080\")\n}\n\n\/\/CollectionServerStart starts a server for full CCFS queries HKID\/path\nfunc CollectionServerStart() {\n\thttp.HandleFunc(\"\/r\/\", webRepositoryHandler)\n\thttp.HandleFunc(\"\/d\/\", webDomainHandler)\n}\n\nfunc webRepositoryHandler(w http.ResponseWriter, r *http.Request) {\n\tparts := strings.SplitN(r.RequestURI[3:], \"\/\", 2)\n\thkidhex := parts[0]\n\tpath := \"\"\n\tif len(parts) > 1 {\n\t\tpath = parts[1]\n\t}\n\terr := error(nil)\n\tif len(hkidhex) == 64 {\n\t\th, err := objects.HkidFromHex(hkidhex)\n\t\tif err == nil {\n\t\t\tb, err := services.Get(h, path)\n\t\t\tif err == nil {\n\t\t\t\tw.Write(b.Bytes())\n\t\t\t\treturn\n\t\t\t}\n\t\t\thttp.Error(w, fmt.Sprint(\n\t\t\t\t\"HTTP Error 500 Internal server error\\n\\n\", err), 500)\n\t\t}\n\t}\n\tw.Write([]byte(fmt.Sprintf(\"Invalid HKID\\nerr: %v\", err)))\n\treturn\n}\nfunc webDomainHandler(w http.ResponseWriter, r *http.Request) {\n\tparts := strings.SplitN(r.RequestURI[3:], \"\/\", 2)\n\thkidhex := parts[0]\n\tpath := \"\"\n\tif len(parts) > 1 {\n\t\tpath = parts[1]\n\t}\n\terr := error(nil)\n\tif len(hkidhex) == 64 {\n\t\th, err := objects.HkidFromHex(hkidhex)\n\t\tif err == nil {\n\t\t\tb, err := services.GetD(h, path)\n\t\t\tif err == nil {\n\t\t\t\tw.Write(b.Bytes())\n\t\t\t\treturn\n\t\t\t}\n\t\t\thttp.Error(w, fmt.Sprint(\n\t\t\t\t\"HTTP Error 500 Internal server error\\n\\n\", err), 500)\n\t\t}\n\t}\n\tw.Write([]byte(fmt.Sprintf(\"Invalid HKID\\nerr: %v\", err)))\n\treturn\n}\n\nfunc composeQuery(typestring string, hash objects.HKID, namesegment string) (message string) {\n\tmessage = fmt.Sprintf(\"%s,%s\", typestring, hash.String())\n\tif namesegment != \"\" {\n\t\tmessage = fmt.Sprintf(\"%s,%s\", message, namesegment)\n\t}\n\treturn message\n\n}\n\nfunc parseQuery(message string) (typestring string, hash objects.HKID, namesegment string) {\n\tarrMessage := strings.SplitN(message, \",\", 3)\n\tif len(arrMessage) < 2 {\n\t\tpanic(\"error: malformed parseQuery\")\n\t}\n\ttypestring = arrMessage[0]\n\n\thash, err := objects.HkidFromHex(arrMessage[1])\n\tif err != nil {\n\t\tpanic(\"error: malformed hexadecimal\")\n\t}\n\tif len(arrMessage) > 2 {\n\t\tnamesegment = arrMessage[2]\n\t} else {\n\t\tnamesegment = \"\"\n\t}\n\tfmt.Println(\"Error\")\n\treturn typestring, hash, namesegment\n}\n\n\/\/Start begins the listening for web reqwests on 8080\nfunc Start() {\n\tgo BlobServerStart()\n\tgo CollectionServerStart()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage database\n\nimport (\n\t\"context\"\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/google\/exposure-notifications-server\/internal\/model\"\n\t\"github.com\/google\/go-cmp\/cmp\"\n)\n\nfunc TestAddExportConfig(t *testing.T) {\n\tif testDB == nil {\n\t\tt.Skip(\"no test DB\")\n\t}\n\tdefer resetTestDB(t)\n\tctx := context.Background()\n\n\tfromTime := time.Now().UTC()\n\tthruTime := fromTime.Add(6 * time.Hour)\n\twant := &model.ExportConfig{\n\t\tFilenameRoot: \"root\",\n\t\tPeriod: 3 * time.Hour,\n\t\tRegion: \"i1\",\n\t\tFrom: fromTime,\n\t\tThru: thruTime,\n\t}\n\tif err := testDB.AddExportConfig(ctx, want); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tconn, err := testDB.pool.Acquire(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer conn.Release()\n\tvar (\n\t\tgot model.ExportConfig\n\t\tpsecs int\n\t)\n\terr = conn.QueryRow(ctx, `\n\t\tSELECT\n\t\t\tconfig_id, filename_root, period_seconds, region, from_timestamp, thru_timestamp\n\t\tFROM\n\t\t\tExportConfig\n\t\tWHERE\n\t\t\tconfig_id = $1\n\t`, want.ConfigID).Scan(&got.ConfigID, &got.FilenameRoot, &psecs, &got.Region, &got.From, &got.Thru)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgot.Period = time.Duration(psecs) * time.Second\n\n\twant.From = want.From.Truncate(time.Microsecond)\n\twant.Thru = want.Thru.Truncate(time.Microsecond)\n\tif diff := cmp.Diff(want, &got); diff != \"\" {\n\t\tt.Errorf(\"mismatch (-want, +got):\\n%s\", diff)\n\t}\n}\n\nfunc TestIterateExportConfigs(t *testing.T) {\n\tif testDB == nil {\n\t\tt.Skip(\"no test DB\")\n\t}\n\tdefer resetTestDB(t)\n\tctx := context.Background()\n\n\tnow := time.Now().Truncate(time.Microsecond)\n\tecs := []*model.ExportConfig{\n\t\t{\n\t\t\tFilenameRoot: \"active 1\",\n\t\t\tFrom: now.Add(-time.Minute),\n\t\t\tThru: now.Add(time.Minute),\n\t\t},\n\t\t{\n\t\t\tFilenameRoot: \"active 2\",\n\t\t\tFrom: now.Add(-time.Minute),\n\t\t},\n\t\t{\n\t\t\tFilenameRoot: \"done\",\n\t\t\tFrom: now.Add(-time.Hour),\n\t\t\tThru: now.Add(-time.Minute),\n\t\t},\n\t\t{\n\t\t\tFilenameRoot: \"not yet\",\n\t\t\tFrom: now.Add(time.Minute),\n\t\t\tThru: now.Add(time.Hour),\n\t\t},\n\t}\n\tfor _, ec := range ecs {\n\t\tec.Period = time.Hour\n\t\tec.Region = \"R\"\n\t\tif err := testDB.AddExportConfig(ctx, ec); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\titer, err := testDB.IterateExportConfigs(ctx, now)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\tif err := iter.Close(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\tvar got []*model.ExportConfig\n\tfor {\n\t\tec, done, err := iter.Next()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif done {\n\t\t\tbreak\n\t\t}\n\t\tgot = append(got, ec)\n\t}\n\twant := ecs[0:2]\n\tsort.Slice(got, func(i, j int) bool { return got[i].FilenameRoot < got[j].FilenameRoot })\n\tif diff := cmp.Diff(want, got); diff != \"\" {\n\t\tt.Errorf(\"mismatch (-want, +got):\\n%s\", diff)\n\t}\n}\n<commit_msg>internal\/database: add ExportBatch tests (#103)<commit_after>\/\/ Copyright 2020 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage database\n\nimport (\n\t\"context\"\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/google\/exposure-notifications-server\/internal\/model\"\n\t\"github.com\/google\/go-cmp\/cmp\"\n\tpgx \"github.com\/jackc\/pgx\/v4\"\n)\n\nfunc TestAddExportConfig(t *testing.T) {\n\tif testDB == nil {\n\t\tt.Skip(\"no test DB\")\n\t}\n\tdefer resetTestDB(t)\n\tctx := context.Background()\n\n\tfromTime := time.Now().UTC()\n\tthruTime := fromTime.Add(6 * time.Hour)\n\twant := &model.ExportConfig{\n\t\tFilenameRoot: \"root\",\n\t\tPeriod: 3 * time.Hour,\n\t\tRegion: \"i1\",\n\t\tFrom: fromTime,\n\t\tThru: thruTime,\n\t}\n\tif err := testDB.AddExportConfig(ctx, want); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tconn, err := testDB.pool.Acquire(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer conn.Release()\n\tvar (\n\t\tgot model.ExportConfig\n\t\tpsecs int\n\t)\n\terr = conn.QueryRow(ctx, `\n\t\tSELECT\n\t\t\tconfig_id, filename_root, period_seconds, region, from_timestamp, thru_timestamp\n\t\tFROM\n\t\t\tExportConfig\n\t\tWHERE\n\t\t\tconfig_id = $1\n\t`, want.ConfigID).Scan(&got.ConfigID, &got.FilenameRoot, &psecs, &got.Region, &got.From, &got.Thru)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgot.Period = time.Duration(psecs) * time.Second\n\n\twant.From = want.From.Truncate(time.Microsecond)\n\twant.Thru = want.Thru.Truncate(time.Microsecond)\n\tif diff := cmp.Diff(want, &got); diff != \"\" {\n\t\tt.Errorf(\"mismatch (-want, +got):\\n%s\", diff)\n\t}\n}\n\nfunc TestIterateExportConfigs(t *testing.T) {\n\tif testDB == nil {\n\t\tt.Skip(\"no test DB\")\n\t}\n\tdefer resetTestDB(t)\n\tctx := context.Background()\n\n\tnow := time.Now().Truncate(time.Microsecond)\n\tecs := []*model.ExportConfig{\n\t\t{\n\t\t\tFilenameRoot: \"active 1\",\n\t\t\tFrom: now.Add(-time.Minute),\n\t\t\tThru: now.Add(time.Minute),\n\t\t},\n\t\t{\n\t\t\tFilenameRoot: \"active 2\",\n\t\t\tFrom: now.Add(-time.Minute),\n\t\t},\n\t\t{\n\t\t\tFilenameRoot: \"done\",\n\t\t\tFrom: now.Add(-time.Hour),\n\t\t\tThru: now.Add(-time.Minute),\n\t\t},\n\t\t{\n\t\t\tFilenameRoot: \"not yet\",\n\t\t\tFrom: now.Add(time.Minute),\n\t\t\tThru: now.Add(time.Hour),\n\t\t},\n\t}\n\tfor _, ec := range ecs {\n\t\tec.Period = time.Hour\n\t\tec.Region = \"R\"\n\t\tif err := testDB.AddExportConfig(ctx, ec); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\titer, err := testDB.IterateExportConfigs(ctx, now)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\tif err := iter.Close(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\tvar got []*model.ExportConfig\n\tfor {\n\t\tec, done, err := iter.Next()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif done {\n\t\t\tbreak\n\t\t}\n\t\tgot = append(got, ec)\n\t}\n\twant := ecs[0:2]\n\tsort.Slice(got, func(i, j int) bool { return got[i].FilenameRoot < got[j].FilenameRoot })\n\tif diff := cmp.Diff(want, got); diff != \"\" {\n\t\tt.Errorf(\"mismatch (-want, +got):\\n%s\", diff)\n\t}\n}\n\nfunc TestBatches(t *testing.T) {\n\tif testDB == nil {\n\t\tt.Skip(\"no test DB\")\n\t}\n\tdefer resetTestDB(t)\n\tctx := context.Background()\n\n\tnow := time.Now().Truncate(time.Microsecond)\n\tconfig := &model.ExportConfig{\n\t\tFilenameRoot: \"root\",\n\t\tPeriod: time.Hour,\n\t\tRegion: \"R\",\n\t\tFrom: now,\n\t\tThru: now.Add(time.Hour),\n\t}\n\tif err := testDB.AddExportConfig(ctx, config); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar batches []*model.ExportBatch\n\tvar wantLatest time.Time\n\tfor i := 0; i < 4; i++ {\n\t\tstart := now.Add(time.Duration(i) * time.Minute)\n\t\tend := start.Add(time.Minute)\n\t\twantLatest = end\n\t\tbatches = append(batches, &model.ExportBatch{\n\t\t\tConfigID: config.ConfigID,\n\t\t\tFilenameRoot: config.FilenameRoot,\n\t\t\tRegion: config.Region,\n\t\t\tStatus: model.ExportBatchOpen,\n\t\t\tStartTimestamp: start,\n\t\t\tEndTimestamp: end,\n\t\t})\n\t}\n\tif err := testDB.AddExportBatches(ctx, batches); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tgotLatest, err := testDB.LatestExportBatchEnd(ctx, config)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !gotLatest.Equal(wantLatest) {\n\t\tt.Errorf(\"LatestExportBatchEnd: got %s, want %s\", gotLatest, wantLatest)\n\t}\n\n\tleaseBatches := func() int64 {\n\t\tt.Helper()\n\t\tvar batchID int64\n\t\t\/\/ Lease all the batches.\n\t\tfor range batches {\n\t\t\tgot, err := testDB.LeaseBatch(ctx, time.Hour, now)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif got == nil {\n\t\t\t\tt.Fatal(\"could not lease a batch\")\n\t\t\t}\n\t\t\tif got.ConfigID != config.ConfigID || got.FilenameRoot != config.FilenameRoot || got.Region != config.Region {\n\t\t\t\tt.Errorf(\"LeaseBatch: got (%d, %q, %q), want (%d, %q, %q)\",\n\t\t\t\t\tgot.ConfigID, got.FilenameRoot, got.Region,\n\t\t\t\t\tconfig.ConfigID, config.FilenameRoot, config.Region)\n\t\t\t}\n\t\t\tif got.Status != model.ExportBatchPending {\n\t\t\t\tt.Errorf(\"LeaseBatch: got status %q, want pending\", got.Status)\n\t\t\t}\n\t\t\twantExpires := now.Add(time.Hour)\n\t\t\tif got.LeaseExpires.Before(wantExpires) || got.LeaseExpires.After(wantExpires.Add(time.Minute)) {\n\t\t\t\tt.Errorf(\"LeaseBatch: expires at %s, wanted a time close to %s\", got.LeaseExpires, wantExpires)\n\t\t\t}\n\t\t\tbatchID = got.BatchID\n\t\t}\n\t\t\/\/ Every batch is leased.\n\t\tgot, err := testDB.LeaseBatch(ctx, time.Hour, now)\n\t\tif got != nil || err != nil {\n\t\t\tt.Errorf(\"all leased: got (%v, %v), want (nil, nil)\", got, err)\n\t\t}\n\t\treturn batchID\n\t}\n\t\/\/ Now, all end times are in the future, so no batches can be leased.\n\tgot, err := testDB.LeaseBatch(ctx, time.Hour, now)\n\tif got != nil || err != nil {\n\t\tt.Errorf(\"got (%v, %v), want (nil, nil)\", got, err)\n\t}\n\n\t\/\/ One hour later, all batches' end times are in the past, so they can be leased.\n\tnow = now.Add(time.Hour)\n\tleaseBatches()\n\t\/\/ Two hours later all the batches have expired, so we can lease them again.\n\tnow = now.Add(2 * time.Hour)\n\tbatchID := leaseBatches()\n\n\t\/\/ Complete a batch.\n\terr = testDB.inTx(ctx, pgx.Serializable, func(tx pgx.Tx) error { return completeBatch(ctx, tx, batchID) })\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgot, err = testDB.LookupExportBatch(ctx, batchID)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got.Status != model.ExportBatchComplete {\n\t\tt.Errorf(\"after completion: got status %q, want complete\", got.Status)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package mackerel\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n)\n\n\/\/ User information\ntype User struct {\n\tID string `json:\"id,omitempty\"`\n\tScreenName string `json:\"screenName,omitempty\"`\n\tEmail string `json:\"email,omitempty\"`\n\tAuthority string `json:\"authority,omitempty\"`\n\n\tIsInRegistrationProcess bool `json:\"isInRegistrationProcess,omitempty\"`\n\tIsMFAEnabled bool `json:\"isMFAEnabled,omitempty\"`\n\tAuthenticationMethods []string `json:\"authenticationMethods,omitempty\"`\n\tJoinedAt int64 `json:\"joinedAt,omitempty\"`\n}\n\n\/\/ FindUsers find users.\nfunc (c *Client) FindUsers() ([]*User, error) {\n\treq, err := http.NewRequest(\"GET\", c.urlFor(fmt.Sprintf(\"\/api\/v0\/users\")).String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.Request(req)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data struct {\n\t\tUsers []*User `json:\"users\"`\n\t}\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn data.Users, err\n}\n\n\/\/ DeleteUser delete users.\nfunc (c *Client) DeleteUser(userID string) (*User, error) {\n\treq, err := http.NewRequest(\"DELETE\", c.urlFor(fmt.Sprintf(\"\/api\/v0\/users\/%s\", userID)).String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\n\tresp, err := c.Request(req)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tuser := &User{}\n\terr = json.NewDecoder(resp.Body).Decode(user)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn user, nil\n}\n<commit_msg>delete unnecessary string conversion<commit_after>package mackerel\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n)\n\n\/\/ User information\ntype User struct {\n\tID string `json:\"id,omitempty\"`\n\tScreenName string `json:\"screenName,omitempty\"`\n\tEmail string `json:\"email,omitempty\"`\n\tAuthority string `json:\"authority,omitempty\"`\n\n\tIsInRegistrationProcess bool `json:\"isInRegistrationProcess,omitempty\"`\n\tIsMFAEnabled bool `json:\"isMFAEnabled,omitempty\"`\n\tAuthenticationMethods []string `json:\"authenticationMethods,omitempty\"`\n\tJoinedAt int64 `json:\"joinedAt,omitempty\"`\n}\n\n\/\/ FindUsers find users.\nfunc (c *Client) FindUsers() ([]*User, error) {\n\treq, err := http.NewRequest(\"GET\", c.urlFor(\"\/api\/v0\/users\").String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.Request(req)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data struct {\n\t\tUsers []*User `json:\"users\"`\n\t}\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn data.Users, err\n}\n\n\/\/ DeleteUser delete users.\nfunc (c *Client) DeleteUser(userID string) (*User, error) {\n\treq, err := http.NewRequest(\"DELETE\", c.urlFor(fmt.Sprintf(\"\/api\/v0\/users\/%s\", userID)).String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\n\tresp, err := c.Request(req)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tuser := &User{}\n\terr = json.NewDecoder(resp.Body).Decode(user)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn user, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package somaproto\n\nimport (\n\t\"github.com\/satori\/go.uuid\"\n)\n\ntype ProtoRequestUser struct {\n\tUser ProtoUser `json:\"user,omitempty\"`\n\tCredentials ProtoUserCredentials `json:\"credentials,omitempty\"`\n\tRestore bool `json:\"purge,omitempty\"`\n\tPurge bool `json:\"purge,omitempty\"`\n}\n\ntype ProtoResultUser struct {\n\tCode uint16 `json:\"code,omitempty\"`\n\tStatus string `json:\"status,omitempty\"`\n\tText []string `json:\"text,omitempty\"`\n\tUsers []ProtoUser `json:\"users,omitempty\"`\n}\n\ntype ProtoUser struct {\n\tId uuid.UUID `json:\"id,omitempty\"`\n\tUserName string `json:\"username,omitempty\"`\n\tFirstName string `json:\"firstname,omitempty\"`\n\tLastName string `json:\"lastname,omitempty\"`\n\tEmployeeNumber string `json:\"employeenumber,omitempty\"`\n\tMailAddress string `json:\"mailaddress,omitempty\"`\n\tIsActive bool `json:\"active,omitempty\"`\n\tIsSystem bool `json:\"system,omitempty\"`\n\tTeam string `json:\"team,omitempty\"`\n\tDetails ProtoUserDetails `json:\"details,omitempty\"`\n}\n\ntype ProtoUserCredentials struct {\n\tReset bool `json:\"reset,omitempty\"`\n\tPassword bool `json:\"password,omitempty\"`\n}\n\ntype ProtoUserDetails struct {\n\tCreatedAt string `json:\"createdat,omitempty\"`\n\tCreatedBy string `json:\"createdby,omitempty\"`\n}\n\ntype ProtoUserFilter struct {\n\tUserName string `json:\"username,omitempty\"`\n\tIsActive bool `json:\"active,omitempty\"`\n\tIsSystem bool `json:\"system,omitempty\"`\n\tIsDeleted bool `json:\"deleted,omitempty\"`\n}\n\n\/\/ vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix\n<commit_msg>Add IsDeleted to ProtoUser struct, fix Restore<commit_after>package somaproto\n\nimport (\n\t\"github.com\/satori\/go.uuid\"\n)\n\ntype ProtoRequestUser struct {\n\tUser ProtoUser `json:\"user,omitempty\"`\n\tCredentials ProtoUserCredentials `json:\"credentials,omitempty\"`\n\tRestore bool `json:\"restore,omitempty\"`\n\tPurge bool `json:\"purge,omitempty\"`\n}\n\ntype ProtoResultUser struct {\n\tCode uint16 `json:\"code,omitempty\"`\n\tStatus string `json:\"status,omitempty\"`\n\tText []string `json:\"text,omitempty\"`\n\tUsers []ProtoUser `json:\"users,omitempty\"`\n}\n\ntype ProtoUser struct {\n\tId uuid.UUID `json:\"id,omitempty\"`\n\tUserName string `json:\"username,omitempty\"`\n\tFirstName string `json:\"firstname,omitempty\"`\n\tLastName string `json:\"lastname,omitempty\"`\n\tEmployeeNumber string `json:\"employeenumber,omitempty\"`\n\tMailAddress string `json:\"mailaddress,omitempty\"`\n\tIsActive bool `json:\"active,omitempty\"`\n\tIsSystem bool `json:\"system,omitempty\"`\n\tIsDeleted bool `json:\"deleted,omitempty\"`\n\tTeam string `json:\"team,omitempty\"`\n\tDetails ProtoUserDetails `json:\"details,omitempty\"`\n}\n\ntype ProtoUserCredentials struct {\n\tReset bool `json:\"reset,omitempty\"`\n\tPassword bool `json:\"password,omitempty\"`\n}\n\ntype ProtoUserDetails struct {\n\tCreatedAt string `json:\"createdat,omitempty\"`\n\tCreatedBy string `json:\"createdby,omitempty\"`\n}\n\ntype ProtoUserFilter struct {\n\tUserName string `json:\"username,omitempty\"`\n\tIsActive bool `json:\"active,omitempty\"`\n\tIsSystem bool `json:\"system,omitempty\"`\n\tIsDeleted bool `json:\"deleted,omitempty\"`\n}\n\n\/\/ vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix\n<|endoftext|>"} {"text":"<commit_before>package builtins\n\ntype ArrayClass struct {\n\tvalueStub\n}\n\nfunc NewArrayClass() Value {\n\ta := &ArrayClass{}\n\ta.class = NewClassValue().(Class)\n\ta.initialize()\n\treturn a\n}\n\nfunc (klass *ArrayClass) New() Value {\n\ta := &Array{}\n\ta.initialize()\n\ta.class = klass\n\n\ta.AddMethod(NewMethod(\"shift\", func(args ...Value) (Value, error) {\n\t\tif len(a.members) == 0 {\n\t\t\treturn Nil(), nil\n\t\t}\n\n\t\tval := a.members[0]\n\t\ta.members = a.members[1:]\n\t\treturn val, nil\n\t}))\n\ta.AddMethod(NewMethod(\"include?\", func(args ...Value) (Value, error) {\n\t\tfor _, m := range a.members {\n\t\t\tif m == args[0] {\n\t\t\t\treturn NewTrueClass().(Class).New(), nil\n\t\t\t}\n\t\t}\n\n\t\treturn NewFalseClass().(Class).New(), nil\n\t}))\n\n\treturn a\n}\n\nfunc (array *ArrayClass) String() string {\n\treturn \"Array\"\n}\n\ntype Array struct {\n\tvalueStub\n\tmembers []Value\n}\n\nfunc (array *Array) Append(v Value) {\n\tarray.members = append(array.members, v)\n}\n\nfunc (array *Array) Members() []Value {\n\treturn array.members\n}\n\nfunc (array *Array) String() string {\n\treturn \"Array\"\n}\n<commit_msg>Implement Array#unshift for rubyspec<commit_after>package builtins\n\ntype ArrayClass struct {\n\tvalueStub\n}\n\nfunc NewArrayClass() Value {\n\ta := &ArrayClass{}\n\ta.class = NewClassValue().(Class)\n\ta.initialize()\n\treturn a\n}\n\nfunc (klass *ArrayClass) New() Value {\n\ta := &Array{}\n\ta.initialize()\n\ta.class = klass\n\n\ta.AddMethod(NewMethod(\"shift\", func(args ...Value) (Value, error) {\n\t\tif len(a.members) == 0 {\n\t\t\treturn Nil(), nil\n\t\t}\n\n\t\tval := a.members[0]\n\t\ta.members = a.members[1:]\n\t\treturn val, nil\n\t}))\n\n\ta.AddMethod(NewMethod(\"unshift\", func(args ...Value) (Value, error) {\n\t\ta.members = append([]Value{args[0]}, a.members[0:]...)\n\t\treturn a, nil\n\t}))\n\n\ta.AddMethod(NewMethod(\"include?\", func(args ...Value) (Value, error) {\n\t\tfor _, m := range a.members {\n\t\t\tif m == args[0] {\n\t\t\t\treturn NewTrueClass().(Class).New(), nil\n\t\t\t}\n\t\t}\n\n\t\treturn NewFalseClass().(Class).New(), nil\n\t}))\n\n\treturn a\n}\n\nfunc (array *ArrayClass) String() string {\n\treturn \"Array\"\n}\n\ntype Array struct {\n\tvalueStub\n\tmembers []Value\n}\n\nfunc (array *Array) Append(v Value) {\n\tarray.members = append(array.members, v)\n}\n\nfunc (array *Array) Members() []Value {\n\treturn array.members\n}\n\nfunc (array *Array) String() string {\n\treturn \"Array\"\n}\n<|endoftext|>"} {"text":"<commit_before>package borm\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com\/boltdb\/bolt\"\n)\n\nfunc createBucket(tx *bolt.Tx, buckets []string) (b *bolt.Bucket, err error) {\n\tb, err = tx.CreateBucketIfNotExists([]byte(buckets[0]))\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(buckets) > 1 {\n\t\tfor _, v := range buckets[1:] {\n\t\t\tb, err = b.CreateBucketIfNotExists([]byte(v))\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc getBucket(tx *bolt.Tx, buckets []string) (b *bolt.Bucket) {\n\tb = tx.Bucket([]byte(buckets[0]))\n\tif b != nil {\n\t\tif len(buckets) > 1 {\n\t\t\tfor _, v := range buckets[1:] {\n\t\t\t\tif b == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tb = b.Bucket([]byte(v))\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc unmarshal(data []byte, i interface{}) error {\n\terr := json.Unmarshal(data, i)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc marshal(i interface{}) ([]byte, error) {\n\tenc, err := json.Marshal(i)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn enc, nil\n}\n\nfunc nextID(b *bolt.Bucket) string {\n\tid, _ := b.NextSequence()\n\treturn fmt.Sprint(id)\n}\n\n\/\/ deref is Indirect for reflect.Types\nfunc deref(t reflect.Type) reflect.Type {\n\tif t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\t}\n\treturn t\n}\n\nfunc baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) {\n\tt = deref(t)\n\tif t.Kind() != expected {\n\t\treturn nil, fmt.Errorf(\"expected %s but got %s\", expected, t.Kind())\n\t}\n\treturn t, nil\n}\n\nfunc checkID(b *bolt.Bucket, m mod) (id string, newItem bool) {\n\tid = m.GetID()\n\tif id == \"\" {\n\t\tnewItem = true\n\t\tid = nextID(b)\n\t\tm.setID(id)\n\t\tif m1, ok := m.(modCreate); ok {\n\t\t\tm1.setCreation()\n\t\t}\n\t\tif m1, ok := m.(modUpdate); ok {\n\t\t\tm1.touchModel()\n\t\t}\n\t} else {\n\t\tif m1, ok := m.(modUpdate); ok {\n\t\t\tm1.touchModel()\n\t\t}\n\t}\n\treturn\n}\n\nfunc typeName(i interface{}) string {\n\treturn reflect.TypeOf(i).Elem().Name()\n}\n\nvar addEvent = func(name string, m mod) {\n\tgo Events.Pub(eventName(name, m), m)\n}\n\nfunc eventName(name string, m mod) string {\n\treturn typeName(m) + name\n}\n<commit_msg>adding cursor methods<commit_after>package borm\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com\/boltdb\/bolt\"\n)\n\nfunc createBucket(tx *bolt.Tx, buckets []string) (b *bolt.Bucket, err error) {\n\tb, err = tx.CreateBucketIfNotExists([]byte(buckets[0]))\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(buckets) > 1 {\n\t\tfor _, v := range buckets[1:] {\n\t\t\tb, err = b.CreateBucketIfNotExists([]byte(v))\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc getBucket(tx *bolt.Tx, buckets []string) (b *bolt.Bucket) {\n\tb = tx.Bucket([]byte(buckets[0]))\n\tif b != nil {\n\t\tif len(buckets) > 1 {\n\t\t\tfor _, v := range buckets[1:] {\n\t\t\t\tif b == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tb = b.Bucket([]byte(v))\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc unmarshal(data []byte, i interface{}) error {\n\terr := json.Unmarshal(data, i)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc marshal(i interface{}) ([]byte, error) {\n\tenc, err := json.Marshal(i)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn enc, nil\n}\n\nfunc nextID(b *bolt.Bucket) string {\n\tid, _ := b.NextSequence()\n\treturn fmt.Sprint(id)\n}\n\n\/\/ deref is Indirect for reflect.Types\nfunc deref(t reflect.Type) reflect.Type {\n\tif t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\t}\n\treturn t\n}\n\nfunc baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) {\n\tt = deref(t)\n\tif t.Kind() != expected {\n\t\treturn nil, fmt.Errorf(\"expected %s but got %s\", expected, t.Kind())\n\t}\n\treturn t, nil\n}\n\nfunc checkID(b *bolt.Bucket, m mod) (id string, newItem bool) {\n\tid = m.GetID()\n\tif id == \"\" {\n\t\tnewItem = true\n\t\tid = nextID(b)\n\t\tm.setID(id)\n\t\tif m1, ok := m.(modCreate); ok {\n\t\t\tm1.setCreation()\n\t\t}\n\t\tif m1, ok := m.(modUpdate); ok {\n\t\t\tm1.touchModel()\n\t\t}\n\t} else {\n\t\tif m1, ok := m.(modUpdate); ok {\n\t\t\tm1.touchModel()\n\t\t}\n\t}\n\treturn\n}\n\nfunc typeName(i interface{}) string {\n\treturn reflect.TypeOf(i).Elem().Name()\n}\n\nvar addEvent = func(name string, m mod) {\n\tgo Events.Pub(eventName(name, m), m)\n}\n\nfunc eventName(name string, m mod) string {\n\treturn typeName(m) + name\n}\n\nfunc cursorStart(c *bolt.Cursor, rev bool) (k []byte, v []byte) {\n\tif rev {\n\t\treturn c.Last()\n\t}\n\treturn c.First()\n}\n\nfunc cursorNext(c *bolt.Cursor, rev bool) (k []byte, v []byte) {\n\tif rev {\n\t\treturn c.Prev()\n\t}\n\treturn c.Next()\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nmatrixutils provides utilities for working with dsputils and fft packages found at\ngithub.com\/mjibson\/go-dsp\/dsputils and github.com\/mjibson\/go-dsp\/fft, respectively.\n*\/\npackage matrixutils\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\n\t\"github.com\/mjibson\/go-dsp\/dsputils\"\n)\n\nfunc positiveMod(n, k int) int {\n\tif n < 0 {\n\t\treturn k - ((-1 * n) % k)\n\t}\n\n\treturn n % k\n}\n\n\/\/ Using degrees, not radians\nfunc PolarToCartesian(matrix *dsputils.Matrix) (*dsputils.Matrix, error) {\n\tdims := matrix.Dimensions()\n\tif len(dims) > 2 {\n\t\treturn nil, fmt.Errorf(\"CartesianToPolar expects 2 dimensions, found %d\", len(dims))\n\t}\n\n\t\/\/ Clarify size of each dimension\n\tlenX, lenY, _ := func() (int, int, int) {\n\t\t\/\/ Reminder: matrices are matrix[y][x]\n\t\treturn dims[1], dims[0], int(math.Hypot(float64(dims[0]), float64(dims[1])))\n\t}()\n\n\tcartesianMatrix := dsputils.MakeEmptyMatrix([]int{lenY, lenX})\n\n\t\/\/ Reminder: matrices are matrix[y][x]\n\tfor cartesianY := 0; cartesianY < lenY; cartesianY++ {\n\t\tfor cartesianX := 0; cartesianX < lenX; cartesianX++ {\n\n\t\t\tpolarX := int(float64(cartesianX) * math.Cos(float64(cartesianY)*0.5*math.Pi\/float64(lenY)))\n\t\t\tpolarY := int(float64(cartesianX) * math.Sin(float64(cartesianY)*0.5*math.Pi\/float64(lenY)))\n\n\t\t\tvalue := matrix.Value([]int{positiveMod(polarY, lenY), positiveMod(polarX, lenX)})\n\n\t\t\tcartesianMatrix.SetValue(value, []int{cartesianY, cartesianX})\n\t\t}\n\t}\n\n\treturn cartesianMatrix, nil\n}\n\n\/\/ Using degrees, not radians\nfunc CartesianToPolar(matrix *dsputils.Matrix) (*dsputils.Matrix, error) {\n\tdims := matrix.Dimensions()\n\tif len(dims) > 2 {\n\t\treturn nil, fmt.Errorf(\"CartesianToPolar expects 2 dimensions, found %d\", len(dims))\n\t}\n\n\t\/\/ Clarify size of each dimension\n\tlenX, lenY, _ := func() (int, int, int) {\n\t\t\/\/ Reminder: matrices are matrix[y][x]\n\t\treturn dims[1], dims[0], int(math.Hypot(float64(dims[0]), float64(dims[1])))\n\t}()\n\n\tpolarMatrix := dsputils.MakeEmptyMatrix([]int{lenY, lenX})\n\n\t\/\/ Reminder: matrices are matrix[y][x]\n\tfor polarY := 0; polarY < lenY; polarY++ {\n\t\tfor polarX := 0; polarX < lenX; polarX++ {\n\n\t\t\tcartesianX := int(math.Hypot(float64(polarX), float64(polarY)))\n\t\t\tcartesianY := int(math.Atan2(float64(polarY), float64(polarX)) * 2.0 \/ math.Pi * float64(lenY))\n\n\t\t\tvalue := matrix.Value([]int{positiveMod(cartesianY, lenY), positiveMod(cartesianX, lenX)})\n\n\t\t\tpolarMatrix.SetValue(value, []int{polarY, polarX})\n\t\t}\n\t}\n\n\treturn polarMatrix, nil\n}\n\nfunc Translate(matrix *dsputils.Matrix) (*dsputils.Matrix, error) {\n\tdims := matrix.Dimensions()\n\n\tif len(dims) > 2 {\n\t\treturn nil, fmt.Errorf(\"CartesianToPolar expects 2 dimensions, found %d\", len(dims))\n\t}\n\n\t\/\/ Convert matrix to polar coordinates\n\ttranslatedMatrix := dsputils.MakeEmptyMatrix([]int{dims[0], dims[1]})\n\n\tfor i := 0; i < dims[0]; i++ {\n\t\tfor j := 0; j < dims[1]; j++ {\n\t\t\toldX := (dims[0] + i + dims[0]\/2) % dims[0]\n\t\t\toldY := (dims[1] + j - dims[1]\/2) % dims[1]\n\n\t\t\ttranslatedMatrix.SetValue(matrix.Value([]int{oldX, oldY}), []int{i, j})\n\t\t}\n\t}\n\n\treturn translatedMatrix, nil\n}\n<commit_msg>Raster to cartesian<commit_after>\/*\nmatrixutils provides utilities for working with dsputils and fft packages found at\ngithub.com\/mjibson\/go-dsp\/dsputils and github.com\/mjibson\/go-dsp\/fft, respectively.\n*\/\npackage matrixutils\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\n\t\"github.com\/mjibson\/go-dsp\/dsputils\"\n)\n\nfunc positiveMod(n, k int) int {\n\tif n < 0 {\n\t\treturn k - ((-1 * n) % k)\n\t}\n\n\treturn n % k\n}\n\nfunc RasterToCartesian(matrix *dsputils.Matrix) (*dsputils.Matrix, error) {\n\tdims := matrix.Dimensions()\n\tif len(dims) > 2 {\n\t\treturn nil, fmt.Errorf(\"CartesianToPolar expects 2 dimensions, found %d\", len(dims))\n\t}\n\n\t\/\/ Clarify size of each dimension\n\tlenX, lenY, _ := func() (int, int, int) {\n\t\t\/\/ Reminder: matrices are matrix[y][x]\n\t\treturn dims[1], dims[0], int(math.Hypot(float64(dims[0]), float64(dims[1])))\n\t}()\n\n\tcartesianMatrix := dsputils.MakeEmptyMatrix([]int{lenY, lenX})\n\n\t\/\/ Reminder: matrices are matrix[y][x]\n\tfor y := 0; y < lenY; y++ {\n\t\tfor x := 0; x < lenX; x++ {\n\n\t\t\tcartesianX := x - lenX\/2\n\t\t\tcartesianY := lenY\/2 - y\n\n\t\t\tvalue := matrix.Value([]int{positiveMod(cartesianY, lenY), positiveMod(cartesianX, lenX)})\n\n\t\t\tcartesianMatrix.SetValue(value, []int{y, x})\n\t\t}\n\t}\n\n\treturn cartesianMatrix, nil\n}\n\n\/\/ Using degrees, not radians\nfunc PolarToCartesian(matrix *dsputils.Matrix) (*dsputils.Matrix, error) {\n\tdims := matrix.Dimensions()\n\tif len(dims) > 2 {\n\t\treturn nil, fmt.Errorf(\"CartesianToPolar expects 2 dimensions, found %d\", len(dims))\n\t}\n\n\t\/\/ Clarify size of each dimension\n\tlenX, lenY, _ := func() (int, int, int) {\n\t\t\/\/ Reminder: matrices are matrix[y][x]\n\t\treturn dims[1], dims[0], int(math.Hypot(float64(dims[0]), float64(dims[1])))\n\t}()\n\n\tcartesianMatrix := dsputils.MakeEmptyMatrix([]int{lenY, lenX})\n\n\t\/\/ Reminder: matrices are matrix[y][x]\n\tfor cartesianY := 0; cartesianY < lenY; cartesianY++ {\n\t\tfor cartesianX := 0; cartesianX < lenX; cartesianX++ {\n\n\t\t\tpolarX := int(float64(cartesianX) * math.Cos(float64(cartesianY)*0.5*math.Pi\/float64(lenY)))\n\t\t\tpolarY := int(float64(cartesianX) * math.Sin(float64(cartesianY)*0.5*math.Pi\/float64(lenY)))\n\n\t\t\tvalue := matrix.Value([]int{positiveMod(polarY, lenY), positiveMod(polarX, lenX)})\n\n\t\t\tcartesianMatrix.SetValue(value, []int{cartesianY, cartesianX})\n\t\t}\n\t}\n\n\treturn cartesianMatrix, nil\n}\n\n\/\/ Using degrees, not radians\nfunc CartesianToPolar(matrix *dsputils.Matrix) (*dsputils.Matrix, error) {\n\tdims := matrix.Dimensions()\n\tif len(dims) > 2 {\n\t\treturn nil, fmt.Errorf(\"CartesianToPolar expects 2 dimensions, found %d\", len(dims))\n\t}\n\n\t\/\/ Clarify size of each dimension\n\tlenX, lenY, _ := func() (int, int, int) {\n\t\t\/\/ Reminder: matrices are matrix[y][x]\n\t\treturn dims[1], dims[0], int(math.Hypot(float64(dims[0]), float64(dims[1])))\n\t}()\n\n\tpolarMatrix := dsputils.MakeEmptyMatrix([]int{lenY, lenX})\n\n\t\/\/ Reminder: matrices are matrix[y][x]\n\tfor polarY := 0; polarY < lenY; polarY++ {\n\t\tfor polarX := 0; polarX < lenX; polarX++ {\n\n\t\t\tcartesianX := int(math.Hypot(float64(polarX), float64(polarY)))\n\t\t\tcartesianY := int(math.Atan2(float64(polarY), float64(polarX)) * 2.0 \/ math.Pi * float64(lenY))\n\n\t\t\tvalue := matrix.Value([]int{positiveMod(cartesianY, lenY), positiveMod(cartesianX, lenX)})\n\n\t\t\tpolarMatrix.SetValue(value, []int{polarY, polarX})\n\t\t}\n\t}\n\n\treturn polarMatrix, nil\n}\n\nfunc Translate(matrix *dsputils.Matrix) (*dsputils.Matrix, error) {\n\tdims := matrix.Dimensions()\n\n\tif len(dims) > 2 {\n\t\treturn nil, fmt.Errorf(\"CartesianToPolar expects 2 dimensions, found %d\", len(dims))\n\t}\n\n\t\/\/ Convert matrix to polar coordinates\n\ttranslatedMatrix := dsputils.MakeEmptyMatrix([]int{dims[0], dims[1]})\n\n\tfor i := 0; i < dims[0]; i++ {\n\t\tfor j := 0; j < dims[1]; j++ {\n\t\t\toldX := (dims[0] + i + dims[0]\/2) % dims[0]\n\t\t\toldY := (dims[1] + j - dims[1]\/2) % dims[1]\n\n\t\t\ttranslatedMatrix.SetValue(matrix.Value([]int{oldX, oldY}), []int{i, j})\n\t\t}\n\t}\n\n\treturn translatedMatrix, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package GoSDK\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\tmqtt \"github.com\/clearblade\/mqtt_parsing\"\n\t\"github.com\/clearblade\/mqttclient\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\t\/\/CB_ADDR is the address of the ClearBlade Platform you are speaking with\n\tCB_ADDR = \"https:\/\/rtp.clearblade.com\"\n\t\/\/CB_MSG_ADDR is the messaging address you wish to speak to\n\tCB_MSG_ADDR = \"rtp.clearblade.com:1883\"\n\n\t_HEADER_KEY_KEY = \"ClearBlade-SystemKey\"\n\t_HEADER_SECRET_KEY = \"ClearBlade-SystemSecret\"\n)\n\nvar tr = &http.Transport{\n\tTLSClientConfig: &tls.Config{InsecureSkipVerify: false},\n}\n\nconst (\n\tcreateDevUser = iota\n\tcreateUser\n)\n\n\/\/Client is a convience interface for API consumers, if they want to use the same functions for both developer users and unprivleged users\ntype Client interface {\n\t\/\/session bookkeeping calls\n\tAuthenticate() error\n\tLogout() error\n\n\t\/\/data calls\n\tInsertData(string, interface{}) error\n\tUpdateData(string, *Query, map[string]interface{}) error\n\tGetData(string, *Query) (map[string]interface{}, error)\n\tGetDataByName(string, *Query) (map[string]interface{}, error)\n\tGetDataByKeyAndName(string, string, *Query) (map[string]interface{}, error)\n\tDeleteData(string, *Query) error\n\n\t\/\/mqtt calls\n\tInitializeMQTT(string, string, int) error\n\tConnectMQTT(*tls.Config, *LastWillPacket) error\n\tPublish(string, []byte, int) error\n\tSubscribe(string, int) (<-chan *mqtt.Publish, error)\n\tUnsubscribe(string) error\n\tDisconnect() error\n}\n\n\/\/cbClient will supply various information that differs between privleged and unprivleged users\n\/\/this interface is meant to be unexported\ntype cbClient interface {\n\tcredentials() ([][]string, error) \/\/the inner slice is a tuple of \"Header\":\"Value\"\n\tpreamble() string\n\tsetToken(string)\n\tgetToken() string\n\tgetSystemInfo() (string, string)\n\tgetMessageId() uint16\n}\n\n\/\/UserClient is the type for users\ntype UserClient struct {\n\tUserToken string\n\tmrand *rand.Rand\n\tMQTTClient *mqttclient.Client\n\tSystemKey string\n\tSystemSecret string\n\tEmail string\n\tPassword string\n}\n\n\/\/DevClient is the type for developers\ntype DevClient struct {\n\tDevToken string\n\tmrand *rand.Rand\n\tMQTTClient *mqttclient.Client\n\tEmail string\n\tPassword string\n}\n\n\/\/CbReq is a wrapper around an HTTP request\ntype CbReq struct {\n\tBody interface{}\n\tMethod string\n\tEndpoint string\n\tQueryString string\n\tHeaders map[string][]string\n}\n\n\/\/CbResp is a wrapper around an HTTP response\ntype CbResp struct {\n\tBody interface{}\n\tStatusCode int\n}\n\n\/\/NewUserClient allocates a new UserClient struct\nfunc NewUserClient(systemkey, systemsecret, email, password string) *UserClient {\n\treturn &UserClient{\n\t\tUserToken: \"\",\n\t\tmrand: rand.New(rand.NewSource(time.Now().UnixNano())),\n\t\tMQTTClient: nil,\n\t\tSystemSecret: systemsecret,\n\t\tSystemKey: systemkey,\n\t\tEmail: email,\n\t\tPassword: password,\n\t}\n}\n\n\/\/NewDevClient allocates a new DevClient struct\nfunc NewDevClient(email, password string) *DevClient {\n\treturn &DevClient{\n\t\tDevToken: \"\",\n\t\tmrand: rand.New(rand.NewSource(time.Now().UnixNano())),\n\t\tMQTTClient: nil,\n\t\tEmail: email,\n\t\tPassword: password,\n\t}\n}\n\n\/\/Authenticate retrieves a token from the specified Clearblade Platform\nfunc (u *UserClient) Authenticate() error {\n\treturn authenticate(u, u.Email, u.Password)\n}\n\nfunc (u *UserClient) AuthAnon() error {\n\treturn authAnon(u)\n}\n\n\/\/Authenticate retrieves a token from the specified Clearblade Platform\nfunc (d *DevClient) Authenticate() error {\n\treturn authenticate(d, d.Email, d.Password)\n}\n\n\/\/Register creates a new user\nfunc (u *UserClient) Register(username, password string) error {\n\tif u.UserToken == \"\" {\n\t\treturn fmt.Errorf(\"Must be logged in to create users\")\n\t}\n\t_, err := register(u, createUser, username, password, u.SystemKey, u.SystemSecret, \"\", \"\", \"\")\n\treturn err\n}\n\n\/\/RegisterUser creates a new user, returning the body of the response.\nfunc (u *UserClient) RegisterUser(username, password string) (map[string]interface{}, error) {\n\tif u.UserToken == \"\" {\n\t\treturn nil, fmt.Errorf(\"Must be logged in to create users\")\n\t}\n\tresp, err := register(u, createUser, username, password, u.SystemKey, u.SystemSecret, \"\", \"\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}\n\n\/\/Registers a new developer\nfunc (d *DevClient) Register(username, password, fname, lname, org string) error {\n\tresp, err := register(d, createDevUser, username, password, \"\", \"\", fname, lname, org)\n\tif err != nil {\n\t\treturn err\n\t} else {\n\t\td.DevToken = resp[\"dev_token\"].(string)\n\t\treturn nil\n\t}\n}\n\nfunc (d *DevClient) RegisterNewUser(username, password, systemkey, systemsecret string) (map[string]interface{}, error) {\n\tif d.DevToken == \"\" {\n\t\treturn nil, fmt.Errorf(\"Must authenticate first\")\n\t}\n\treturn register(d, createUser, username, password, systemkey, systemsecret, \"\", \"\", \"\")\n\n}\n\n\/\/Register creates a new developer user\nfunc (d *DevClient) RegisterDevUser(username, password, fname, lname, org string) (map[string]interface{}, error) {\n\tresp, err := register(d, createDevUser, username, password, \"\", \"\", fname, lname, org)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}\n\n\/\/Logout ends the session\nfunc (u *UserClient) Logout() error {\n\treturn logout(u)\n}\n\n\/\/Logout ends the session\nfunc (d *DevClient) Logout() error {\n\treturn logout(d)\n}\n\n\/\/Below are some shared functions\n\nfunc authenticate(c cbClient, username, password string) error {\n\tvar creds [][]string\n\tswitch c.(type) {\n\tcase *UserClient:\n\t\tvar err error\n\t\tcreds, err = c.credentials()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tresp, err := post(c.preamble()+\"\/auth\", map[string]interface{}{\n\t\t\"email\": username,\n\t\t\"password\": password,\n\t}, creds, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"Error in authenticating, Status Code: %d, %v\\n\", resp.StatusCode, resp.Body)\n\t}\n\n\tvar token string = \"\"\n\tswitch c.(type) {\n\tcase *UserClient:\n\t\ttoken = resp.Body.(map[string]interface{})[\"user_token\"].(string)\n\tcase *DevClient:\n\t\ttoken = resp.Body.(map[string]interface{})[\"dev_token\"].(string)\n\t}\n\tif token == \"\" {\n\t\treturn fmt.Errorf(\"Token not present i response from platform %+v\", resp.Body)\n\t}\n\tc.setToken(token)\n\treturn nil\n}\n\nfunc authAnon(c cbClient) error {\n\tcreds, err := c.credentials()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Invalid client: %+s\", err.Error())\n\t}\n\tresp, err := post(c.preamble()+\"\/anon\", nil, creds, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error retrieving anon user token: %s\", err.Error())\n\t}\n\ttoken := resp.Body.(map[string]interface{})[\"user_token\"].(string)\n\tif token == \"\" {\n\t\treturn fmt.Errorf(\"Token not present in response from platform %+v\", resp.Body)\n\t}\n\tc.setToken(token)\n\treturn nil\n}\n\nfunc register(c cbClient, kind int, username, password, syskey, syssec, fname, lname, org string) (map[string]interface{}, error) {\n\tpayload := map[string]interface{}{\n\t\t\"email\": username,\n\t\t\"password\": password,\n\t}\n\tvar endpoint string\n\theaders := make(map[string][]string)\n\tvar creds [][]string\n\tswitch kind {\n\tcase createDevUser:\n\t\tendpoint = \"\/admin\/reg\"\n\t\tpayload[\"fname\"] = fname\n\t\tpayload[\"lname\"] = lname\n\t\tpayload[\"org\"] = org\n\tcase createUser:\n\t\tswitch c.(type) {\n\t\tcase *DevClient:\n\t\t\tif syskey == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"System key required\")\n\t\t\t}\n\t\t\tendpoint = fmt.Sprintf(\"\/admin\/user\/%s\", syskey)\n\t\tcase *UserClient:\n\t\t\tif syskey == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"System key required\")\n\t\t\t}\n\t\t\tif syssec == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"System secret required\")\n\t\t\t}\n\t\t\tendpoint = \"\/api\/v\/1\/user\/reg\"\n\t\t\theaders[\"Clearblade-Systemkey\"] = []string{syskey}\n\t\t\theaders[\"Clearblade-Systemsecret\"] = []string{syssec}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unreachable code detected\")\n\t\t}\n\t\tvar err error\n\t\tcreds, err = c.credentials()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Cannot create that kind of user\")\n\t}\n\tresp, err := post(endpoint, payload, creds, headers)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"Status code: %d, Error in authenticating, %v\\n\", resp.StatusCode, resp.Body)\n\t}\n\tvar token string = \"\"\n\tswitch kind {\n\tcase createDevUser:\n\t\ttoken = resp.Body.(map[string]interface{})[\"dev_token\"].(string)\n\tcase createUser:\n\t\ttoken = resp.Body.(map[string]interface{})[\"user_id\"].(string)\n\t}\n\n\tif token == \"\" {\n\t\treturn nil, fmt.Errorf(\"Token not present in response from platform %+v\", resp.Body)\n\t}\n\treturn resp.Body.(map[string]interface{}), nil\n}\n\nfunc logout(c cbClient) error {\n\tcreds, err := c.credentials()\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := post(c.preamble()+\"\/logout\", nil, creds, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"Error in authenticating %v\\n\", resp.Body)\n\t}\n\treturn nil\n}\n\nfunc do(r *CbReq, creds [][]string) (*CbResp, error) {\n\tvar bodyToSend *bytes.Buffer\n\tif r.Body != nil {\n\t\tb, jsonErr := json.Marshal(r.Body)\n\t\tif jsonErr != nil {\n\t\t\treturn nil, fmt.Errorf(\"JSON Encoding Error: %v\", jsonErr)\n\t\t}\n\t\tbodyToSend = bytes.NewBuffer(b)\n\t} else {\n\t\tbodyToSend = nil\n\t}\n\turl := CB_ADDR + r.Endpoint\n\tif r.QueryString != \"\" {\n\t\turl += \"?\" + r.QueryString\n\t}\n\tvar req *http.Request\n\tvar reqErr error\n\tif bodyToSend != nil {\n\t\treq, reqErr = http.NewRequest(r.Method, url, bodyToSend)\n\t} else {\n\t\treq, reqErr = http.NewRequest(r.Method, url, nil)\n\t}\n\tif reqErr != nil {\n\t\treturn nil, fmt.Errorf(\"Request Creation Error: %v\", reqErr)\n\t}\n\tif r.Headers != nil {\n\t\tfor hed, val := range r.Headers {\n\t\t\tfor _, vv := range val {\n\t\t\t\treq.Header.Add(hed, vv)\n\t\t\t}\n\t\t}\n\t}\n\tfor _, c := range creds {\n\t\tif len(c) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"Request Creation Error: Invalid credential header supplied\")\n\t\t}\n\t\treq.Header.Add(c[0], c[1])\n\t}\n\n\tcli := &http.Client{Transport: tr}\n\tresp, err := cli.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error Making Request: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\tbody, readErr := ioutil.ReadAll(resp.Body)\n\tif readErr != nil {\n\t\treturn nil, fmt.Errorf(\"Error Reading Response Body: %v\", readErr)\n\t}\n\tvar d interface{}\n\tif len(body) == 0 {\n\t\treturn &CbResp{\n\t\t\tBody: nil,\n\t\t\tStatusCode: resp.StatusCode,\n\t\t}, nil\n\t}\n\tbuf := bytes.NewBuffer(body)\n\tdec := json.NewDecoder(buf)\n\tdecErr := dec.Decode(&d)\n\tvar bod interface{}\n\tif decErr != nil {\n\t\t\/\/\t\treturn nil, fmt.Errorf(\"JSON Decoding Error: %v\\n With Body: %v\\n\", decErr, string(body))\n\t\tbod = string(body)\n\t}\n\tswitch d.(type) {\n\tcase []interface{}:\n\t\tbod = d\n\tcase map[string]interface{}:\n\t\tbod = d\n\tdefault:\n\t\tbod = string(body)\n\t}\n\treturn &CbResp{\n\t\tBody: bod,\n\t\tStatusCode: resp.StatusCode,\n\t}, nil\n}\n\n\/\/standard http verbs\n\nfunc get(endpoint string, query map[string]string, creds [][]string, headers map[string][]string) (*CbResp, error) {\n\treq := &CbReq{\n\t\tBody: nil,\n\t\tMethod: \"GET\",\n\t\tEndpoint: endpoint,\n\t\tQueryString: query_to_string(query),\n\t\tHeaders: headers,\n\t}\n\treturn do(req, creds)\n}\n\nfunc post(endpoint string, body interface{}, creds [][]string, headers map[string][]string) (*CbResp, error) {\n\treq := &CbReq{\n\t\tBody: body,\n\t\tMethod: \"POST\",\n\t\tEndpoint: endpoint,\n\t\tQueryString: \"\",\n\t\tHeaders: headers,\n\t}\n\treturn do(req, creds)\n}\n\nfunc put(endpoint string, body interface{}, heads [][]string, headers map[string][]string) (*CbResp, error) {\n\treq := &CbReq{\n\t\tBody: body,\n\t\tMethod: \"PUT\",\n\t\tEndpoint: endpoint,\n\t\tQueryString: \"\",\n\t\tHeaders: headers,\n\t}\n\treturn do(req, heads)\n}\n\nfunc delete(endpoint string, query map[string]string, heds [][]string, headers map[string][]string) (*CbResp, error) {\n\treq := &CbReq{\n\t\tBody: nil,\n\t\tMethod: \"DELETE\",\n\t\tEndpoint: endpoint,\n\t\tHeaders: headers,\n\t\tQueryString: query_to_string(query),\n\t}\n\treturn do(req, heds)\n}\n\nfunc query_to_string(query map[string]string) string {\n\tqryStr := \"\"\n\tfor k, v := range query {\n\t\tqryStr += k + \"=\" + v + \"&\"\n\t}\n\treturn strings.TrimSuffix(qryStr, \"&\")\n}\n<commit_msg>Added new way to create a dev client with a token<commit_after>package GoSDK\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\tmqtt \"github.com\/clearblade\/mqtt_parsing\"\n\t\"github.com\/clearblade\/mqttclient\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\t\/\/CB_ADDR is the address of the ClearBlade Platform you are speaking with\n\tCB_ADDR = \"https:\/\/rtp.clearblade.com\"\n\t\/\/CB_MSG_ADDR is the messaging address you wish to speak to\n\tCB_MSG_ADDR = \"rtp.clearblade.com:1883\"\n\n\t_HEADER_KEY_KEY = \"ClearBlade-SystemKey\"\n\t_HEADER_SECRET_KEY = \"ClearBlade-SystemSecret\"\n)\n\nvar tr = &http.Transport{\n\tTLSClientConfig: &tls.Config{InsecureSkipVerify: false},\n}\n\nconst (\n\tcreateDevUser = iota\n\tcreateUser\n)\n\n\/\/Client is a convience interface for API consumers, if they want to use the same functions for both developer users and unprivleged users\ntype Client interface {\n\t\/\/session bookkeeping calls\n\tAuthenticate() error\n\tLogout() error\n\n\t\/\/data calls\n\tInsertData(string, interface{}) error\n\tUpdateData(string, *Query, map[string]interface{}) error\n\tGetData(string, *Query) (map[string]interface{}, error)\n\tGetDataByName(string, *Query) (map[string]interface{}, error)\n\tGetDataByKeyAndName(string, string, *Query) (map[string]interface{}, error)\n\tDeleteData(string, *Query) error\n\n\t\/\/mqtt calls\n\tInitializeMQTT(string, string, int) error\n\tConnectMQTT(*tls.Config, *LastWillPacket) error\n\tPublish(string, []byte, int) error\n\tSubscribe(string, int) (<-chan *mqtt.Publish, error)\n\tUnsubscribe(string) error\n\tDisconnect() error\n}\n\n\/\/cbClient will supply various information that differs between privleged and unprivleged users\n\/\/this interface is meant to be unexported\ntype cbClient interface {\n\tcredentials() ([][]string, error) \/\/the inner slice is a tuple of \"Header\":\"Value\"\n\tpreamble() string\n\tsetToken(string)\n\tgetToken() string\n\tgetSystemInfo() (string, string)\n\tgetMessageId() uint16\n}\n\n\/\/UserClient is the type for users\ntype UserClient struct {\n\tUserToken string\n\tmrand *rand.Rand\n\tMQTTClient *mqttclient.Client\n\tSystemKey string\n\tSystemSecret string\n\tEmail string\n\tPassword string\n}\n\n\/\/DevClient is the type for developers\ntype DevClient struct {\n\tDevToken string\n\tmrand *rand.Rand\n\tMQTTClient *mqttclient.Client\n\tEmail string\n\tPassword string\n}\n\n\/\/CbReq is a wrapper around an HTTP request\ntype CbReq struct {\n\tBody interface{}\n\tMethod string\n\tEndpoint string\n\tQueryString string\n\tHeaders map[string][]string\n}\n\n\/\/CbResp is a wrapper around an HTTP response\ntype CbResp struct {\n\tBody interface{}\n\tStatusCode int\n}\n\n\/\/NewUserClient allocates a new UserClient struct\nfunc NewUserClient(systemkey, systemsecret, email, password string) *UserClient {\n\treturn &UserClient{\n\t\tUserToken: \"\",\n\t\tmrand: rand.New(rand.NewSource(time.Now().UnixNano())),\n\t\tMQTTClient: nil,\n\t\tSystemSecret: systemsecret,\n\t\tSystemKey: systemkey,\n\t\tEmail: email,\n\t\tPassword: password,\n\t}\n}\n\n\/\/NewDevClient allocates a new DevClient struct\nfunc NewDevClient(email, password string) *DevClient {\n\treturn &DevClient{\n\t\tDevToken: \"\",\n\t\tmrand: rand.New(rand.NewSource(time.Now().UnixNano())),\n\t\tMQTTClient: nil,\n\t\tEmail: email,\n\t\tPassword: password,\n\t}\n}\n\nfunc NewDevClientWithToken(token, email string) *DevClient {\n\treturn &DevClient{\n\t\tDevToken: token,\n\t\tmrand: rand.New(rand.NewSource(time.Now().UnixNano())),\n\t\tMQTTClient: nil,\n\t\tEmail: email,\n\t\tPassword: \"\",\n\t}\n}\n\n\/\/Authenticate retrieves a token from the specified Clearblade Platform\nfunc (u *UserClient) Authenticate() error {\n\treturn authenticate(u, u.Email, u.Password)\n}\n\nfunc (u *UserClient) AuthAnon() error {\n\treturn authAnon(u)\n}\n\n\/\/Authenticate retrieves a token from the specified Clearblade Platform\nfunc (d *DevClient) Authenticate() error {\n\treturn authenticate(d, d.Email, d.Password)\n}\n\n\/\/Register creates a new user\nfunc (u *UserClient) Register(username, password string) error {\n\tif u.UserToken == \"\" {\n\t\treturn fmt.Errorf(\"Must be logged in to create users\")\n\t}\n\t_, err := register(u, createUser, username, password, u.SystemKey, u.SystemSecret, \"\", \"\", \"\")\n\treturn err\n}\n\n\/\/RegisterUser creates a new user, returning the body of the response.\nfunc (u *UserClient) RegisterUser(username, password string) (map[string]interface{}, error) {\n\tif u.UserToken == \"\" {\n\t\treturn nil, fmt.Errorf(\"Must be logged in to create users\")\n\t}\n\tresp, err := register(u, createUser, username, password, u.SystemKey, u.SystemSecret, \"\", \"\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}\n\n\/\/Registers a new developer\nfunc (d *DevClient) Register(username, password, fname, lname, org string) error {\n\tresp, err := register(d, createDevUser, username, password, \"\", \"\", fname, lname, org)\n\tif err != nil {\n\t\treturn err\n\t} else {\n\t\td.DevToken = resp[\"dev_token\"].(string)\n\t\treturn nil\n\t}\n}\n\nfunc (d *DevClient) RegisterNewUser(username, password, systemkey, systemsecret string) (map[string]interface{}, error) {\n\tif d.DevToken == \"\" {\n\t\treturn nil, fmt.Errorf(\"Must authenticate first\")\n\t}\n\treturn register(d, createUser, username, password, systemkey, systemsecret, \"\", \"\", \"\")\n\n}\n\n\/\/Register creates a new developer user\nfunc (d *DevClient) RegisterDevUser(username, password, fname, lname, org string) (map[string]interface{}, error) {\n\tresp, err := register(d, createDevUser, username, password, \"\", \"\", fname, lname, org)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}\n\n\/\/Logout ends the session\nfunc (u *UserClient) Logout() error {\n\treturn logout(u)\n}\n\n\/\/Logout ends the session\nfunc (d *DevClient) Logout() error {\n\treturn logout(d)\n}\n\n\/\/Below are some shared functions\n\nfunc authenticate(c cbClient, username, password string) error {\n\tvar creds [][]string\n\tswitch c.(type) {\n\tcase *UserClient:\n\t\tvar err error\n\t\tcreds, err = c.credentials()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tresp, err := post(c.preamble()+\"\/auth\", map[string]interface{}{\n\t\t\"email\": username,\n\t\t\"password\": password,\n\t}, creds, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"Error in authenticating, Status Code: %d, %v\\n\", resp.StatusCode, resp.Body)\n\t}\n\n\tvar token string = \"\"\n\tswitch c.(type) {\n\tcase *UserClient:\n\t\ttoken = resp.Body.(map[string]interface{})[\"user_token\"].(string)\n\tcase *DevClient:\n\t\ttoken = resp.Body.(map[string]interface{})[\"dev_token\"].(string)\n\t}\n\tif token == \"\" {\n\t\treturn fmt.Errorf(\"Token not present i response from platform %+v\", resp.Body)\n\t}\n\tc.setToken(token)\n\treturn nil\n}\n\nfunc authAnon(c cbClient) error {\n\tcreds, err := c.credentials()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Invalid client: %+s\", err.Error())\n\t}\n\tresp, err := post(c.preamble()+\"\/anon\", nil, creds, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error retrieving anon user token: %s\", err.Error())\n\t}\n\ttoken := resp.Body.(map[string]interface{})[\"user_token\"].(string)\n\tif token == \"\" {\n\t\treturn fmt.Errorf(\"Token not present in response from platform %+v\", resp.Body)\n\t}\n\tc.setToken(token)\n\treturn nil\n}\n\nfunc register(c cbClient, kind int, username, password, syskey, syssec, fname, lname, org string) (map[string]interface{}, error) {\n\tpayload := map[string]interface{}{\n\t\t\"email\": username,\n\t\t\"password\": password,\n\t}\n\tvar endpoint string\n\theaders := make(map[string][]string)\n\tvar creds [][]string\n\tswitch kind {\n\tcase createDevUser:\n\t\tendpoint = \"\/admin\/reg\"\n\t\tpayload[\"fname\"] = fname\n\t\tpayload[\"lname\"] = lname\n\t\tpayload[\"org\"] = org\n\tcase createUser:\n\t\tswitch c.(type) {\n\t\tcase *DevClient:\n\t\t\tif syskey == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"System key required\")\n\t\t\t}\n\t\t\tendpoint = fmt.Sprintf(\"\/admin\/user\/%s\", syskey)\n\t\tcase *UserClient:\n\t\t\tif syskey == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"System key required\")\n\t\t\t}\n\t\t\tif syssec == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"System secret required\")\n\t\t\t}\n\t\t\tendpoint = \"\/api\/v\/1\/user\/reg\"\n\t\t\theaders[\"Clearblade-Systemkey\"] = []string{syskey}\n\t\t\theaders[\"Clearblade-Systemsecret\"] = []string{syssec}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unreachable code detected\")\n\t\t}\n\t\tvar err error\n\t\tcreds, err = c.credentials()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Cannot create that kind of user\")\n\t}\n\tresp, err := post(endpoint, payload, creds, headers)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"Status code: %d, Error in authenticating, %v\\n\", resp.StatusCode, resp.Body)\n\t}\n\tvar token string = \"\"\n\tswitch kind {\n\tcase createDevUser:\n\t\ttoken = resp.Body.(map[string]interface{})[\"dev_token\"].(string)\n\tcase createUser:\n\t\ttoken = resp.Body.(map[string]interface{})[\"user_id\"].(string)\n\t}\n\n\tif token == \"\" {\n\t\treturn nil, fmt.Errorf(\"Token not present in response from platform %+v\", resp.Body)\n\t}\n\treturn resp.Body.(map[string]interface{}), nil\n}\n\nfunc logout(c cbClient) error {\n\tcreds, err := c.credentials()\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := post(c.preamble()+\"\/logout\", nil, creds, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"Error in authenticating %v\\n\", resp.Body)\n\t}\n\treturn nil\n}\n\nfunc do(r *CbReq, creds [][]string) (*CbResp, error) {\n\tvar bodyToSend *bytes.Buffer\n\tif r.Body != nil {\n\t\tb, jsonErr := json.Marshal(r.Body)\n\t\tif jsonErr != nil {\n\t\t\treturn nil, fmt.Errorf(\"JSON Encoding Error: %v\", jsonErr)\n\t\t}\n\t\tbodyToSend = bytes.NewBuffer(b)\n\t} else {\n\t\tbodyToSend = nil\n\t}\n\turl := CB_ADDR + r.Endpoint\n\tif r.QueryString != \"\" {\n\t\turl += \"?\" + r.QueryString\n\t}\n\tvar req *http.Request\n\tvar reqErr error\n\tif bodyToSend != nil {\n\t\treq, reqErr = http.NewRequest(r.Method, url, bodyToSend)\n\t} else {\n\t\treq, reqErr = http.NewRequest(r.Method, url, nil)\n\t}\n\tif reqErr != nil {\n\t\treturn nil, fmt.Errorf(\"Request Creation Error: %v\", reqErr)\n\t}\n\tif r.Headers != nil {\n\t\tfor hed, val := range r.Headers {\n\t\t\tfor _, vv := range val {\n\t\t\t\treq.Header.Add(hed, vv)\n\t\t\t}\n\t\t}\n\t}\n\tfor _, c := range creds {\n\t\tif len(c) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"Request Creation Error: Invalid credential header supplied\")\n\t\t}\n\t\treq.Header.Add(c[0], c[1])\n\t}\n\n\tcli := &http.Client{Transport: tr}\n\tresp, err := cli.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error Making Request: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\tbody, readErr := ioutil.ReadAll(resp.Body)\n\tif readErr != nil {\n\t\treturn nil, fmt.Errorf(\"Error Reading Response Body: %v\", readErr)\n\t}\n\tvar d interface{}\n\tif len(body) == 0 {\n\t\treturn &CbResp{\n\t\t\tBody: nil,\n\t\t\tStatusCode: resp.StatusCode,\n\t\t}, nil\n\t}\n\tbuf := bytes.NewBuffer(body)\n\tdec := json.NewDecoder(buf)\n\tdecErr := dec.Decode(&d)\n\tvar bod interface{}\n\tif decErr != nil {\n\t\t\/\/\t\treturn nil, fmt.Errorf(\"JSON Decoding Error: %v\\n With Body: %v\\n\", decErr, string(body))\n\t\tbod = string(body)\n\t}\n\tswitch d.(type) {\n\tcase []interface{}:\n\t\tbod = d\n\tcase map[string]interface{}:\n\t\tbod = d\n\tdefault:\n\t\tbod = string(body)\n\t}\n\treturn &CbResp{\n\t\tBody: bod,\n\t\tStatusCode: resp.StatusCode,\n\t}, nil\n}\n\n\/\/standard http verbs\n\nfunc get(endpoint string, query map[string]string, creds [][]string, headers map[string][]string) (*CbResp, error) {\n\treq := &CbReq{\n\t\tBody: nil,\n\t\tMethod: \"GET\",\n\t\tEndpoint: endpoint,\n\t\tQueryString: query_to_string(query),\n\t\tHeaders: headers,\n\t}\n\treturn do(req, creds)\n}\n\nfunc post(endpoint string, body interface{}, creds [][]string, headers map[string][]string) (*CbResp, error) {\n\treq := &CbReq{\n\t\tBody: body,\n\t\tMethod: \"POST\",\n\t\tEndpoint: endpoint,\n\t\tQueryString: \"\",\n\t\tHeaders: headers,\n\t}\n\treturn do(req, creds)\n}\n\nfunc put(endpoint string, body interface{}, heads [][]string, headers map[string][]string) (*CbResp, error) {\n\treq := &CbReq{\n\t\tBody: body,\n\t\tMethod: \"PUT\",\n\t\tEndpoint: endpoint,\n\t\tQueryString: \"\",\n\t\tHeaders: headers,\n\t}\n\treturn do(req, heads)\n}\n\nfunc delete(endpoint string, query map[string]string, heds [][]string, headers map[string][]string) (*CbResp, error) {\n\treq := &CbReq{\n\t\tBody: nil,\n\t\tMethod: \"DELETE\",\n\t\tEndpoint: endpoint,\n\t\tHeaders: headers,\n\t\tQueryString: query_to_string(query),\n\t}\n\treturn do(req, heds)\n}\n\nfunc query_to_string(query map[string]string) string {\n\tqryStr := \"\"\n\tfor k, v := range query {\n\t\tqryStr += k + \"=\" + v + \"&\"\n\t}\n\treturn strings.TrimSuffix(qryStr, \"&\")\n}\n<|endoftext|>"} {"text":"<commit_before>package wsl\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\t\"unicode\"\n)\n\nfunc Hook() {\n\tsigs := make(chan os.Signal, 1)\n\tdone := make(chan bool, 1)\n\tsignal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase sig := <-sigs:\n\t\t\t\tfmt.Println(sig)\n\t\t\t\t\/\/ cleanup code here\n\t\t\t\tdone <- true\n\t\t\t}\n\t\t}\n\t}()\n\n\t<-done\n\tfmt.Println(\"Bye!\")\n}\n\nfunc extractParamsFromMap(m map[string]string) []interface{} {\n\tret := []interface{}{}\n\tfor i := 0; ; i++ {\n\t\tif val, ok := m[fmt.Sprint(\"$\", i)]; ok {\n\t\t\tret = append(ret, val)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn ret\n}\n\nfunc extractScriptParamsFromMap(m map[string]string) map[string]string {\n\tret := map[string]string{}\n\tfor k, v := range m {\n\t\tif strings.HasPrefix(k, \"__\") {\n\t\t\tsqlSafe(&v)\n\t\t\tret[k] = v\n\t\t}\n\t}\n\treturn ret\n}\n\nfunc valuesToMap(values ...map[string][]string) map[string]string {\n\tret := map[string]string{}\n\tfor _, vs := range values {\n\t\tfor k, v := range vs {\n\t\t\tret[k] = v[0]\n\t\t}\n\t}\n\treturn ret\n}\n\n\/\/ true if the first character is uppercase, false otherwise\nfunc shouldExport(sql string) bool {\n\tif !unicode.IsLetter([]rune(sql)[0]) {\n\t\treturn false\n\t}\n\treturn strings.ToUpper(sql[0:1]) == sql[0:1]\n}\n\n\/\/ return whether export the result of this sql statement or not\nfunc sqlNormalize(sql *string) {\n\t*sql = strings.TrimSpace(*sql)\n\tvar ret string\n\tlines := strings.Split(*sql, \"\\n\")\n\tfor _, line := range lines {\n\t\tlineTrimmed := strings.TrimSpace(line)\n\t\tif lineTrimmed != \"\" && !strings.HasPrefix(lineTrimmed, \"-- \") {\n\t\t\tret += line + \"\\n\"\n\t\t}\n\t}\n\t*sql = ret\n}\n\nfunc sqlSafe(s *string) {\n\t*s = strings.Replace(*s, \"'\", \"''\", -1)\n\t*s = strings.Replace(*s, \"--\", \"\", -1)\n}\n\nfunc isQuery(sql string) bool {\n\tsqlUpper := strings.ToUpper(strings.TrimSpace(sql))\n\tif strings.HasPrefix(sqlUpper, \"SELECT\") ||\n\t\tstrings.HasPrefix(sqlUpper, \"SHOW\") ||\n\t\tstrings.HasPrefix(sqlUpper, \"DESCRIBE\") ||\n\t\tstrings.HasPrefix(sqlUpper, \"EXPLAIN\") {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc ConvertInterfaceArrayToStringArray(arrayOfInterfaces []interface{}) ([]string, error) {\n\tret := []string{}\n\tfor _, v := range arrayOfInterfaces {\n\t\tif s, ok := v.(string); ok {\n\t\t\tret = append(ret, s)\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Failed to convert.\")\n\t\t}\n\t}\n\treturn ret, nil\n}\n\nvar ConvertMapOfInterfacesToMapOfStrings = func(data map[string]interface{}) (map[string]string, error) {\n\tif data == nil {\n\t\treturn nil, errors.New(\"Cannot convert nil.\")\n\t}\n\tret := map[string]string{}\n\tfor k, v := range data {\n\t\tif v == nil {\n\t\t\treturn nil, errors.New(\"Data contains nil.\")\n\t\t}\n\t\tret[k] = v.(string)\n\t}\n\treturn ret, nil\n}\n\nvar ConvertMapOfStringsToMapOfInterfaces = func(data map[string]string) (map[string]interface{}, error) {\n\tif data == nil {\n\t\treturn nil, errors.New(\"Cannot convert nil.\")\n\t}\n\tret := map[string]interface{}{}\n\tfor k, v := range data {\n\t\tret[k] = v\n\t}\n\treturn ret, nil\n}\n<commit_msg>Update.<commit_after>package wsl\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\t\"unicode\"\n)\n\nfunc Hook() {\n\tsigs := make(chan os.Signal, 1)\n\tdone := make(chan bool, 1)\n\tsignal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase sig := <-sigs:\n\t\t\t\tfmt.Println(sig)\n\t\t\t\t\/\/ cleanup code here\n\t\t\t\tdone <- true\n\t\t\t}\n\t\t}\n\t}()\n\n\t<-done\n\tfmt.Println(\"Bye!\")\n}\n\nfunc extractParamsFromMap(m map[string]string) []interface{} {\n\tret := []interface{}{}\n\tfor i := 0; ; i++ {\n\t\tif val, ok := m[fmt.Sprint(\"_\", i)]; ok {\n\t\t\tret = append(ret, val)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn ret\n}\n\nfunc extractScriptParamsFromMap(m map[string]string) map[string]string {\n\tret := map[string]string{}\n\tfor k, v := range m {\n\t\tif strings.HasPrefix(k, \"__\") {\n\t\t\tsqlSafe(&v)\n\t\t\tret[k] = v\n\t\t}\n\t}\n\treturn ret\n}\n\nfunc valuesToMap(values ...map[string][]string) map[string]string {\n\tret := map[string]string{}\n\tfor _, vs := range values {\n\t\tfor k, v := range vs {\n\t\t\tret[k] = v[0]\n\t\t}\n\t}\n\treturn ret\n}\n\n\/\/ true if the first character is uppercase, false otherwise\nfunc shouldExport(sql string) bool {\n\tif !unicode.IsLetter([]rune(sql)[0]) {\n\t\treturn false\n\t}\n\treturn strings.ToUpper(sql[0:1]) == sql[0:1]\n}\n\n\/\/ return whether export the result of this sql statement or not\nfunc sqlNormalize(sql *string) {\n\t*sql = strings.TrimSpace(*sql)\n\tvar ret string\n\tlines := strings.Split(*sql, \"\\n\")\n\tfor _, line := range lines {\n\t\tlineTrimmed := strings.TrimSpace(line)\n\t\tif lineTrimmed != \"\" && !strings.HasPrefix(lineTrimmed, \"-- \") {\n\t\t\tret += line + \"\\n\"\n\t\t}\n\t}\n\t*sql = ret\n}\n\nfunc sqlSafe(s *string) {\n\t*s = strings.Replace(*s, \"'\", \"''\", -1)\n\t*s = strings.Replace(*s, \"--\", \"\", -1)\n}\n\nfunc isQuery(sql string) bool {\n\tsqlUpper := strings.ToUpper(strings.TrimSpace(sql))\n\tif strings.HasPrefix(sqlUpper, \"SELECT\") ||\n\t\tstrings.HasPrefix(sqlUpper, \"SHOW\") ||\n\t\tstrings.HasPrefix(sqlUpper, \"DESCRIBE\") ||\n\t\tstrings.HasPrefix(sqlUpper, \"EXPLAIN\") {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc ConvertInterfaceArrayToStringArray(arrayOfInterfaces []interface{}) ([]string, error) {\n\tret := []string{}\n\tfor _, v := range arrayOfInterfaces {\n\t\tif s, ok := v.(string); ok {\n\t\t\tret = append(ret, s)\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Failed to convert.\")\n\t\t}\n\t}\n\treturn ret, nil\n}\n\nvar ConvertMapOfInterfacesToMapOfStrings = func(data map[string]interface{}) (map[string]string, error) {\n\tif data == nil {\n\t\treturn nil, errors.New(\"Cannot convert nil.\")\n\t}\n\tret := map[string]string{}\n\tfor k, v := range data {\n\t\tif v == nil {\n\t\t\treturn nil, errors.New(\"Data contains nil.\")\n\t\t}\n\t\tret[k] = v.(string)\n\t}\n\treturn ret, nil\n}\n\nvar ConvertMapOfStringsToMapOfInterfaces = func(data map[string]string) (map[string]interface{}, error) {\n\tif data == nil {\n\t\treturn nil, errors.New(\"Cannot convert nil.\")\n\t}\n\tret := map[string]interface{}{}\n\tfor k, v := range data {\n\t\tret[k] = v\n\t}\n\treturn ret, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package amalgam\n\nimport (\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"database\/sql\/driver\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"math\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/juju\/errors\"\n)\n\n\/\/Custom data type for easy mapping of jsonB DB column to a GO struct member.\ntype PgJson map[string]interface{}\n\n\/\/ This is the only method of the interface Valuer under the sql\/driver package.\n\/\/ Types implementing this interface can convert themselves to a driver\n\/\/ acceptable value.\nfunc (p PgJson) Value() (driver.Value, error) {\n\tj, err := json.Marshal(p)\n\treturn j, err\n}\n\n\/\/ Scan is the only method of the Scanner interface under the sql package.\n\/\/ Scan assigns a value from the DB driver to the object that calls it\nfunc (p *PgJson) Scan(src interface{}) error {\n\tsource, ok := src.([]byte)\n\tif !ok {\n\t\treturn errors.New(\"Type assertion .([]byte) failed.\")\n\t}\n\n\terr := json.Unmarshal(source, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\ntype NullPgJson struct {\n\tPgJson PgJson\n\tValid bool\n}\n\nfunc (p NullPgJson) Value() (driver.Value, error) {\n\tif !p.Valid {\n\t\treturn nil, nil\n\t}\n\tj, err := json.Marshal(p)\n\treturn j, err\n}\n\nfunc (p *NullPgJson) Scan(src interface{}) error {\n\tif src == nil {\n\t\tp.Valid = false\n\t\treturn nil\n\t}\n\tsource, ok := src.([]byte)\n\tif !ok {\n\t\treturn errors.New(\"Type assertion .([]byte) failed.\")\n\t}\n\n\tjs := PgJson{}\n\terr := json.Unmarshal(source, &js)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.Valid = true\n\tp.PgJson = js\n\n\treturn nil\n}\n\nfunc GetIPFromRequest(r *http.Request) (string, error) {\n\tip, _, err := net.SplitHostPort(r.RemoteAddr)\n\tif err != nil {\n\t\treturn \"\", errors.Trace(err)\n\t}\n\n\treturn ip, nil\n}\n\nfunc DecodeTracker(et string) (string, error) {\n\tif len(et)%3 != 0 {\n\t\trem := len(et) % 3\n\t\tfor i := 3; i > rem; i-- {\n\t\t\tet = et + \"=\"\n\n\t\t}\n\t}\n\t\/\/et = strings.Replace(et, \".\", \"=\", -1)\n\te, err := base64.URLEncoding.DecodeString(et)\n\tif err != nil {\n\t\treturn \"\", errors.Trace(err)\n\t}\n\n\tblock, err := aes.NewCipher([]byte(Secret[:24]))\n\tif err != nil {\n\t\treturn \"\", errors.Trace(err)\n\t}\n\n\tblockmode := cipher.NewCBCDecrypter(\n\t\tblock, []byte(Secret[len(Secret)-16:]),\n\t)\n\n\tblockmode.CryptBlocks(e, e)\n\n\tb := int(e[4])\n\tvar exp = 1\n\tfor i := 5; i < len(e); i++ {\n\t\tif int(e[i]) != 0 {\n\t\t\tb += int(math.Pow(float64(256), float64(exp))) * int(e[i])\n\t\t}\n\t\texp++\n\t}\n\n\ttracker := strconv.Itoa(b)\n\n\treturn tracker, nil\n}\n\nfunc GetTrackerFromRequest(r *http.Request) string {\n\tvar tracker string = \"\"\n\tcookies := r.Cookies()\n\tfor i := 0; i < len(cookies); i++ {\n\t\tcookie := cookies[i]\n\t\tif cookie.Name == \"trackerid\" {\n\t\t\ttracker = cookie.Value\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn tracker\n}\n<commit_msg>updating ID encryption logic as per encryped-id v2.0<commit_after>package amalgam\n\nimport (\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/sha256\"\n\t\"database\/sql\/driver\"\n\t\"encoding\/base64\"\n\t\"encoding\/binary\"\n\t\"encoding\/json\"\n\t\"hash\/crc32\"\n\t\"math\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/juju\/errors\"\n)\n\n\/\/Custom data type for easy mapping of jsonB DB column to a GO struct member.\ntype PgJson map[string]interface{}\n\n\/\/ This is the only method of the interface Valuer under the sql\/driver package.\n\/\/ Types implementing this interface can convert themselves to a driver\n\/\/ acceptable value.\nfunc (p PgJson) Value() (driver.Value, error) {\n\tj, err := json.Marshal(p)\n\treturn j, err\n}\n\n\/\/ Scan is the only method of the Scanner interface under the sql package.\n\/\/ Scan assigns a value from the DB driver to the object that calls it\nfunc (p *PgJson) Scan(src interface{}) error {\n\tsource, ok := src.([]byte)\n\tif !ok {\n\t\treturn errors.New(\"Type assertion .([]byte) failed.\")\n\t}\n\n\terr := json.Unmarshal(source, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\ntype NullPgJson struct {\n\tPgJson PgJson\n\tValid bool\n}\n\nfunc (p NullPgJson) Value() (driver.Value, error) {\n\tif !p.Valid {\n\t\treturn nil, nil\n\t}\n\tj, err := json.Marshal(p)\n\treturn j, err\n}\n\nfunc (p *NullPgJson) Scan(src interface{}) error {\n\tif src == nil {\n\t\tp.Valid = false\n\t\treturn nil\n\t}\n\tsource, ok := src.([]byte)\n\tif !ok {\n\t\treturn errors.New(\"Type assertion .([]byte) failed.\")\n\t}\n\n\tjs := PgJson{}\n\terr := json.Unmarshal(source, &js)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.Valid = true\n\tp.PgJson = js\n\n\treturn nil\n}\n\nfunc GetIPFromRequest(r *http.Request) (string, error) {\n\tip, _, err := net.SplitHostPort(r.RemoteAddr)\n\tif err != nil {\n\t\treturn \"\", errors.Trace(err)\n\t}\n\n\treturn ip, nil\n}\n\nfunc EncodeID(id int64, model string) string {\n\tcrc := crc32.ChecksumIEEE(make([]byte, id)) & 0xffffffff\n\tmessage := make([]byte, 0)\n\n\tmm := make([]byte, 4)\n\tbinary.LittleEndian.PutUint32(mm, crc)\n\tmessage = append(message, mm...)\n\n\tmm = make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(mm, uint64(id))\n\tmessage = append(message, mm...)\n\n\tmm = make([]byte, 4)\n\tmessage = append(message, mm...)\n\n\tblock, err := aes.NewCipher([]byte(Secret[:32]))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tt := sha256.Sum256([]byte(Secret + model))\n\tiv := t[:16]\n\n\tblockmode := cipher.NewCBCEncrypter(block, iv)\n\n\tblockmode.CryptBlocks(message, message)\n\n\ttt := make([]byte, 32)\n\tbase64.URLEncoding.Encode(tt, message)\n\n\teid := strings.Replace(string(tt), \"=\", \"\", -1)\n\n\treturn eid\n\n}\n\nfunc DecodeEID(eid string, model string) (string, error) {\n\tif len(eid)%3 != 0 {\n\t\trem := len(eid) % 3\n\t\tfor i := 3; i > rem; i-- {\n\t\t\teid = eid + \"=\"\n\t\t}\n\t}\n\n\te, err := base64.URLEncoding.DecodeString(eid)\n\tif err != nil {\n\t\treturn \"\", errors.Trace(err)\n\t}\n\n\tblock, err := aes.NewCipher([]byte(Secret[:32]))\n\tif err != nil {\n\t\treturn \"\", errors.Trace(err)\n\t}\n\n\tt := sha256.Sum256([]byte(Secret + model))\n\tiv := t[:16]\n\n\tblockmode := cipher.NewCBCDecrypter(block, iv)\n\n\tblockmode.CryptBlocks(e, e)\n\n\tb := int(e[4])\n\tvar exp = 1\n\tfor i := 5; i < len(e); i++ {\n\t\tif int(e[i]) != 0 {\n\t\t\tb += int(math.Pow(float64(256), float64(exp))) * int(e[i])\n\t\t}\n\t\texp++\n\t}\n\n\ttracker := strconv.Itoa(b)\n\n\treturn tracker, nil\n}\n\nfunc GetTrackerFromRequest(r *http.Request) string {\n\tvar tracker string = \"\"\n\tcookies := r.Cookies()\n\tfor i := 0; i < len(cookies); i++ {\n\t\tcookie := cookies[i]\n\t\tif cookie.Name == \"trackerid\" {\n\t\t\ttracker = cookie.Value\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn tracker\n}\n<|endoftext|>"} {"text":"<commit_before>package sjf\n\nimport (\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"fmt\"\n)\n\n\/\/ RootURI re-constructs URI without path from *http.Request.\nfunc RootURI(r *http.Request) string {\n\tprotocol := \"http\"\n\thost := r.Host\n\tif r.TLS != nil {\n\t\tprotocol = \"https\"\n\t}\n\treturn protocol + \":\/\/\" + host\n}\n\n\/\/ URI re-constructs URI from *http.Request.\nfunc URI(r *http.Request) string {\n\treturn RootURI(r) + r.RequestURI\n}\n\n\/\/ SteamId extracts Steam ID from OpenID URI.\nfunc SteamId(uri string) (uint64, error) {\n\tp := strings.Split(uri, \"\/\")\n\tif len(p) != 6 {\n\t\treturn 0, fmt.Errorf(\"SteamId: invalid uri %q\", uri)\n\t}\n\treturn strconv.ParseUint(p[5], 10, 64)\n}\n<commit_msg>rename argument correctly on SteamId<commit_after>package sjf\n\nimport (\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"fmt\"\n)\n\n\/\/ RootURI re-constructs URI without path from *http.Request.\nfunc RootURI(r *http.Request) string {\n\tprotocol := \"http\"\n\thost := r.Host\n\tif r.TLS != nil {\n\t\tprotocol = \"https\"\n\t}\n\treturn protocol + \":\/\/\" + host\n}\n\n\/\/ URI re-constructs URI from *http.Request.\nfunc URI(r *http.Request) string {\n\treturn RootURI(r) + r.RequestURI\n}\n\n\/\/ SteamId extracts Steam ID from OpenID ID.\nfunc SteamId(id string) (uint64, error) {\n\tp := strings.Split(id, \"\/\")\n\tif len(p) != 6 {\n\t\treturn 0, fmt.Errorf(\"SteamId: invalid id %q\", id)\n\t}\n\treturn strconv.ParseUint(p[5], 10, 64)\n}\n<|endoftext|>"} {"text":"<commit_before>package manikyr\n\nimport (\n\t\"image\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\t\n\t\"github.com\/disintegration\/imaging\"\n)\n\nfunc openImageWhenReady(file string) (image.Image, error) {\n\t\/\/ Retry opening the image until err != image.ErrFormat\n\t\/\/ or the next retry would take over a minute.\n\t\/\/ FIXME\n\t\n\tvar img image.Image\n\tvar err error\n\tvar retry int\n\tvar t time.Duration\n\n\tfor {\n\t\tt = time.Duration(1000 * (retry * 2))\n\t\ttime.Sleep(time.Millisecond * t)\n\n\t\timg, err = imaging.Open(file)\n\t\tif err == image.ErrFormat {\n\t\t\tretry = retry + 1\n\t\t\tif retry*2 > 60 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn img, err\n}\n\nfunc Subdirectories(root string) ([]string, error) {\n\tvar dirs []string\n\n\tfiles, err := ioutil.ReadDir(root)\n\tif err != nil {\n\t\treturn dirs, err\n\t}\n\tfor _, file := range files {\n\t\tif file.IsDir() {\n\t\t\tdirs = append(dirs, path.Join(root, file.Name()))\n\t\t}\n\t}\n\n\treturn dirs, nil\n}\n\nfunc NthSubdir(root, dir string, n int) (bool, error) {\n\treturn path.Match(path.Join(root, \"*\" + strings.Repeat(\"\/*\", n)), dir)\n}\n\nfunc autoAdd(m *Manikyr, root, currentDir string) error {\n\tfiles, err := ioutil.ReadDir(currentDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, file := range files {\n\t\tfilePath := path.Join(root, file.Name())\n\t\tif file.IsDir() && m.ShouldWatchSubdir(currentDir, filePath) {\n\t\t\terr = m.AddSubdir(root, filePath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = autoAdd(m, root, filePath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if !file.IsDir() && m.ShouldCreateThumb(root, filePath) {\n\t\t\tthumbLocation := path.Join(m.ThumbDirGetter(filePath), m.ThumbNameGetter(filePath))\n\t\t\tif _, err := os.Stat(thumbLocation); os.IsNotExist(err) {\n\t\t\t\tgo m.createThumb(filePath, m.errChans[root])\n\t\t\t} else if err != nil {\n\t\t\t\treturn err\t\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Add comments<commit_after>package manikyr\n\nimport (\n\t\"image\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\t\n\t\"github.com\/disintegration\/imaging\"\n)\n\nfunc openImageWhenReady(file string) (image.Image, error) {\n\t\/\/ Retry opening the image until err != image.ErrFormat\n\t\/\/ or the next retry would take over a minute.\n\t\/\/ FIXME\n\t\n\tvar img image.Image\n\tvar err error\n\tvar retry int\n\tvar t time.Duration\n\n\tfor {\n\t\tt = time.Duration(1000 * (retry * 2))\n\t\ttime.Sleep(time.Millisecond * t)\n\n\t\timg, err = imaging.Open(file)\n\t\tif err == image.ErrFormat {\n\t\t\tretry = retry + 1\n\t\t\tif retry*2 > 60 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn img, err\n}\n\n\/\/ Subdirectories returns a list of absolute paths of subdirectories in a given directory.\n\/\/ Returned error is non-nil if the directory provided could not be read.\nfunc Subdirectories(root string) ([]string, error) {\n\tvar dirs []string\n\n\tfiles, err := ioutil.ReadDir(root)\n\tif err != nil {\n\t\treturn dirs, err\n\t}\n\tfor _, file := range files {\n\t\tif file.IsDir() {\n\t\t\tdirs = append(dirs, path.Join(root, file.Name()))\n\t\t}\n\t}\n\n\treturn dirs, nil\n}\n\n\/\/ NthSubdir returns true if dir is the nth-level subdirectory of root, else false.\n\/\/ This function should never return a non-nil error, preserved for testing for now.\nfunc NthSubdir(root, dir string, n int) (bool, error) {\n\treturn path.Match(path.Join(root, \"*\" + strings.Repeat(\"\/*\", n)), dir)\n}\n\nfunc autoAdd(m *Manikyr, root, currentDir string) error {\n\tfiles, err := ioutil.ReadDir(currentDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, file := range files {\n\t\tfilePath := path.Join(root, file.Name())\n\t\tif file.IsDir() && m.ShouldWatchSubdir(currentDir, filePath) {\n\t\t\terr = m.AddSubdir(root, filePath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = autoAdd(m, root, filePath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if !file.IsDir() && m.ShouldCreateThumb(root, filePath) {\n\t\t\tthumbLocation := path.Join(m.ThumbDirGetter(filePath), m.ThumbNameGetter(filePath))\n\t\t\tif _, err := os.Stat(thumbLocation); os.IsNotExist(err) {\n\t\t\t\tgo m.createThumb(filePath, m.errChans[root])\n\t\t\t} else if err != nil {\n\t\t\t\treturn err\t\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package storage\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/containers\/storage\/pkg\/idtools\"\n\t\"github.com\/containers\/storage\/pkg\/system\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping\nfunc ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap string) (*IDMappingOptions, error) {\n\toptions := IDMappingOptions{\n\t\tHostUIDMapping: true,\n\t\tHostGIDMapping: true,\n\t}\n\tif subGIDMap == \"\" && subUIDMap != \"\" {\n\t\tsubGIDMap = subUIDMap\n\t}\n\tif subUIDMap == \"\" && subGIDMap != \"\" {\n\t\tsubUIDMap = subGIDMap\n\t}\n\tif len(GIDMapSlice) == 0 && len(UIDMapSlice) != 0 {\n\t\tGIDMapSlice = UIDMapSlice\n\t}\n\tif len(UIDMapSlice) == 0 && len(GIDMapSlice) != 0 {\n\t\tUIDMapSlice = GIDMapSlice\n\t}\n\tif len(UIDMapSlice) == 0 && subUIDMap == \"\" && os.Getuid() != 0 {\n\t\tUIDMapSlice = []string{fmt.Sprintf(\"0:%d:1\", os.Getuid())}\n\t}\n\tif len(GIDMapSlice) == 0 && subGIDMap == \"\" && os.Getuid() != 0 {\n\t\tGIDMapSlice = []string{fmt.Sprintf(\"0:%d:1\", os.Getgid())}\n\t}\n\n\tif subUIDMap != \"\" && subGIDMap != \"\" {\n\t\tmappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to create NewIDMappings for uidmap=%s gidmap=%s\", subUIDMap, subGIDMap)\n\t\t}\n\t\toptions.UIDMap = mappings.UIDs()\n\t\toptions.GIDMap = mappings.GIDs()\n\t}\n\tparsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, \"UID\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create ParseUIDMap UID=%s\", UIDMapSlice)\n\t}\n\tparsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, \"GID\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create ParseGIDMap GID=%s\", UIDMapSlice)\n\t}\n\toptions.UIDMap = append(options.UIDMap, parsedUIDMap...)\n\toptions.GIDMap = append(options.GIDMap, parsedGIDMap...)\n\tif len(options.UIDMap) > 0 {\n\t\toptions.HostUIDMapping = false\n\t}\n\tif len(options.GIDMap) > 0 {\n\t\toptions.HostGIDMapping = false\n\t}\n\treturn &options, nil\n}\n\n\/\/ GetRootlessRuntimeDir returns the runtime directory when running as non root\nfunc GetRootlessRuntimeDir(rootlessUid int) (string, error) {\n\truntimeDir := os.Getenv(\"XDG_RUNTIME_DIR\")\n\tif runtimeDir == \"\" {\n\t\ttmpDir := fmt.Sprintf(\"\/run\/user\/%d\", rootlessUid)\n\t\tst, err := system.Stat(tmpDir)\n\t\tif err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000 {\n\t\t\treturn tmpDir, nil\n\t\t}\n\t}\n\ttmpDir := fmt.Sprintf(\"%s\/%d\", os.TempDir(), rootlessUid)\n\tif err := os.MkdirAll(tmpDir, 0700); err != nil {\n\t\tlogrus.Errorf(\"failed to create %s: %v\", tmpDir, err)\n\t} else {\n\t\treturn tmpDir, nil\n\t}\n\thome, err := homeDir()\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"neither XDG_RUNTIME_DIR nor HOME was set non-empty\")\n\t}\n\tresolvedHome, err := filepath.EvalSymlinks(home)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"cannot resolve %s\", home)\n\t}\n\treturn filepath.Join(resolvedHome, \"rundir\"), nil\n}\n\n\/\/ getRootlessDirInfo returns the parent path of where the storage for containers and\n\/\/ volumes will be in rootless mode\nfunc getRootlessDirInfo(rootlessUid int) (string, string, error) {\n\trootlessRuntime, err := GetRootlessRuntimeDir(rootlessUid)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tdataDir := os.Getenv(\"XDG_DATA_HOME\")\n\tif dataDir == \"\" {\n\t\thome, err := homeDir()\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", errors.Wrapf(err, \"neither XDG_DATA_HOME nor HOME was set non-empty\")\n\t\t}\n\t\t\/\/ runc doesn't like symlinks in the rootfs path, and at least\n\t\t\/\/ on CoreOS \/home is a symlink to \/var\/home, so resolve any symlink.\n\t\tresolvedHome, err := filepath.EvalSymlinks(home)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", errors.Wrapf(err, \"cannot resolve %s\", home)\n\t\t}\n\t\tdataDir = filepath.Join(resolvedHome, \".local\", \"share\")\n\t}\n\treturn dataDir, rootlessRuntime, nil\n}\n\n\/\/ getRootlessStorageOpts returns the storage opts for containers running as non root\nfunc getRootlessStorageOpts(rootlessUid int) (StoreOptions, error) {\n\tvar opts StoreOptions\n\n\tdataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUid)\n\tif err != nil {\n\t\treturn opts, err\n\t}\n\topts.RunRoot = rootlessRuntime\n\topts.GraphRoot = filepath.Join(dataDir, \"containers\", \"storage\")\n\tif path, err := exec.LookPath(\"fuse-overlayfs\"); err == nil {\n\t\topts.GraphDriverName = \"overlay\"\n\t\topts.GraphDriverOptions = []string{fmt.Sprintf(\"overlay.mount_program=%s\", path)}\n\t} else {\n\t\topts.GraphDriverName = \"vfs\"\n\t}\n\treturn opts, nil\n}\n\ntype tomlOptionsConfig struct {\n\tMountProgram string `toml:\"mount_program\"`\n}\n\nfunc getTomlStorage(storeOptions *StoreOptions) *tomlConfig {\n\tconfig := new(tomlConfig)\n\n\tconfig.Storage.Driver = storeOptions.GraphDriverName\n\tconfig.Storage.RunRoot = storeOptions.RunRoot\n\tconfig.Storage.GraphRoot = storeOptions.GraphRoot\n\tfor _, i := range storeOptions.GraphDriverOptions {\n\t\ts := strings.Split(i, \"=\")\n\t\tif s[0] == \"overlay.mount_program\" {\n\t\t\tconfig.Storage.Options.MountProgram = s[1]\n\t\t}\n\t}\n\n\treturn config\n}\n\nfunc getRootlessUID() int {\n\tuidEnv := os.Getenv(\"_CONTAINERS_ROOTLESS_UID\")\n\tif uidEnv != \"\" {\n\t\tu, _ := strconv.Atoi(uidEnv)\n\t\treturn u\n\t}\n\treturn os.Geteuid()\n}\n\n\/\/ DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers\nfunc DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) {\n\tuid := getRootlessUID()\n\treturn DefaultStoreOptions(uid != 0, uid)\n}\n\n\/\/ DefaultStoreOptions returns the default storage ops for containers\nfunc DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) {\n\tvar (\n\t\tdefaultRootlessRunRoot string\n\t\tdefaultRootlessGraphRoot string\n\t\terr error\n\t)\n\tstorageOpts := defaultStoreOptions\n\tif rootless {\n\t\tstorageOpts, err = getRootlessStorageOpts(rootlessUid)\n\t\tif err != nil {\n\t\t\treturn storageOpts, err\n\t\t}\n\t}\n\n\tstorageConf, err := DefaultConfigFile(rootless)\n\tif err != nil {\n\t\treturn storageOpts, err\n\t}\n\tif _, err = os.Stat(storageConf); err == nil {\n\t\tdefaultRootlessRunRoot = storageOpts.RunRoot\n\t\tdefaultRootlessGraphRoot = storageOpts.GraphRoot\n\t\tstorageOpts = StoreOptions{}\n\t\tReloadConfigurationFile(storageConf, &storageOpts)\n\t}\n\n\tif !os.IsNotExist(err) {\n\t\treturn storageOpts, errors.Wrapf(err, \"cannot stat %s\", storageConf)\n\t}\n\n\tif rootless {\n\t\tif err == nil {\n\t\t\t\/\/ If the file did not specify a graphroot or runroot,\n\t\t\t\/\/ set sane defaults so we don't try and use root-owned\n\t\t\t\/\/ directories\n\t\t\tif storageOpts.RunRoot == \"\" {\n\t\t\t\tstorageOpts.RunRoot = defaultRootlessRunRoot\n\t\t\t}\n\t\t\tif storageOpts.GraphRoot == \"\" {\n\t\t\t\tstorageOpts.GraphRoot = defaultRootlessGraphRoot\n\t\t\t}\n\t\t} else {\n\t\t\tif err := os.MkdirAll(filepath.Dir(storageConf), 0755); err != nil {\n\t\t\t\treturn storageOpts, errors.Wrapf(err, \"cannot make directory %s\", filepath.Dir(storageConf))\n\t\t\t}\n\t\t\tfile, err := os.OpenFile(storageConf, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)\n\t\t\tif err != nil {\n\t\t\t\treturn storageOpts, errors.Wrapf(err, \"cannot open %s\", storageConf)\n\t\t\t}\n\n\t\t\ttomlConfiguration := getTomlStorage(&storageOpts)\n\t\t\tdefer file.Close()\n\t\t\tenc := toml.NewEncoder(file)\n\t\t\tif err := enc.Encode(tomlConfiguration); err != nil {\n\t\t\t\tos.Remove(storageConf)\n\n\t\t\t\treturn storageOpts, errors.Wrapf(err, \"failed to encode %s\", storageConf)\n\t\t\t}\n\t\t}\n\t}\n\treturn storageOpts, nil\n}\n\nfunc homeDir() (string, error) {\n\thome := os.Getenv(\"HOME\")\n\tif home == \"\" {\n\t\tusr, err := user.Current()\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrapf(err, \"neither XDG_RUNTIME_DIR nor HOME was set non-empty\")\n\t\t}\n\t\thome = usr.HomeDir\n\t}\n\treturn home, nil\n}\n<commit_msg>utils: root in a userns uses global conf file<commit_after>package storage\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/containers\/storage\/pkg\/idtools\"\n\t\"github.com\/containers\/storage\/pkg\/system\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping\nfunc ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap string) (*IDMappingOptions, error) {\n\toptions := IDMappingOptions{\n\t\tHostUIDMapping: true,\n\t\tHostGIDMapping: true,\n\t}\n\tif subGIDMap == \"\" && subUIDMap != \"\" {\n\t\tsubGIDMap = subUIDMap\n\t}\n\tif subUIDMap == \"\" && subGIDMap != \"\" {\n\t\tsubUIDMap = subGIDMap\n\t}\n\tif len(GIDMapSlice) == 0 && len(UIDMapSlice) != 0 {\n\t\tGIDMapSlice = UIDMapSlice\n\t}\n\tif len(UIDMapSlice) == 0 && len(GIDMapSlice) != 0 {\n\t\tUIDMapSlice = GIDMapSlice\n\t}\n\tif len(UIDMapSlice) == 0 && subUIDMap == \"\" && os.Getuid() != 0 {\n\t\tUIDMapSlice = []string{fmt.Sprintf(\"0:%d:1\", os.Getuid())}\n\t}\n\tif len(GIDMapSlice) == 0 && subGIDMap == \"\" && os.Getuid() != 0 {\n\t\tGIDMapSlice = []string{fmt.Sprintf(\"0:%d:1\", os.Getgid())}\n\t}\n\n\tif subUIDMap != \"\" && subGIDMap != \"\" {\n\t\tmappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to create NewIDMappings for uidmap=%s gidmap=%s\", subUIDMap, subGIDMap)\n\t\t}\n\t\toptions.UIDMap = mappings.UIDs()\n\t\toptions.GIDMap = mappings.GIDs()\n\t}\n\tparsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, \"UID\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create ParseUIDMap UID=%s\", UIDMapSlice)\n\t}\n\tparsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, \"GID\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create ParseGIDMap GID=%s\", UIDMapSlice)\n\t}\n\toptions.UIDMap = append(options.UIDMap, parsedUIDMap...)\n\toptions.GIDMap = append(options.GIDMap, parsedGIDMap...)\n\tif len(options.UIDMap) > 0 {\n\t\toptions.HostUIDMapping = false\n\t}\n\tif len(options.GIDMap) > 0 {\n\t\toptions.HostGIDMapping = false\n\t}\n\treturn &options, nil\n}\n\n\/\/ GetRootlessRuntimeDir returns the runtime directory when running as non root\nfunc GetRootlessRuntimeDir(rootlessUid int) (string, error) {\n\truntimeDir := os.Getenv(\"XDG_RUNTIME_DIR\")\n\tif runtimeDir == \"\" {\n\t\ttmpDir := fmt.Sprintf(\"\/run\/user\/%d\", rootlessUid)\n\t\tst, err := system.Stat(tmpDir)\n\t\tif err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000 {\n\t\t\treturn tmpDir, nil\n\t\t}\n\t}\n\ttmpDir := fmt.Sprintf(\"%s\/%d\", os.TempDir(), rootlessUid)\n\tif err := os.MkdirAll(tmpDir, 0700); err != nil {\n\t\tlogrus.Errorf(\"failed to create %s: %v\", tmpDir, err)\n\t} else {\n\t\treturn tmpDir, nil\n\t}\n\thome, err := homeDir()\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"neither XDG_RUNTIME_DIR nor HOME was set non-empty\")\n\t}\n\tresolvedHome, err := filepath.EvalSymlinks(home)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"cannot resolve %s\", home)\n\t}\n\treturn filepath.Join(resolvedHome, \"rundir\"), nil\n}\n\n\/\/ getRootlessDirInfo returns the parent path of where the storage for containers and\n\/\/ volumes will be in rootless mode\nfunc getRootlessDirInfo(rootlessUid int) (string, string, error) {\n\trootlessRuntime, err := GetRootlessRuntimeDir(rootlessUid)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tdataDir := os.Getenv(\"XDG_DATA_HOME\")\n\tif dataDir == \"\" {\n\t\thome, err := homeDir()\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", errors.Wrapf(err, \"neither XDG_DATA_HOME nor HOME was set non-empty\")\n\t\t}\n\t\t\/\/ runc doesn't like symlinks in the rootfs path, and at least\n\t\t\/\/ on CoreOS \/home is a symlink to \/var\/home, so resolve any symlink.\n\t\tresolvedHome, err := filepath.EvalSymlinks(home)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", errors.Wrapf(err, \"cannot resolve %s\", home)\n\t\t}\n\t\tdataDir = filepath.Join(resolvedHome, \".local\", \"share\")\n\t}\n\treturn dataDir, rootlessRuntime, nil\n}\n\n\/\/ getRootlessStorageOpts returns the storage opts for containers running as non root\nfunc getRootlessStorageOpts(rootlessUid int) (StoreOptions, error) {\n\tvar opts StoreOptions\n\n\tdataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUid)\n\tif err != nil {\n\t\treturn opts, err\n\t}\n\topts.RunRoot = rootlessRuntime\n\topts.GraphRoot = filepath.Join(dataDir, \"containers\", \"storage\")\n\tif path, err := exec.LookPath(\"fuse-overlayfs\"); err == nil {\n\t\topts.GraphDriverName = \"overlay\"\n\t\topts.GraphDriverOptions = []string{fmt.Sprintf(\"overlay.mount_program=%s\", path)}\n\t} else {\n\t\topts.GraphDriverName = \"vfs\"\n\t}\n\treturn opts, nil\n}\n\ntype tomlOptionsConfig struct {\n\tMountProgram string `toml:\"mount_program\"`\n}\n\nfunc getTomlStorage(storeOptions *StoreOptions) *tomlConfig {\n\tconfig := new(tomlConfig)\n\n\tconfig.Storage.Driver = storeOptions.GraphDriverName\n\tconfig.Storage.RunRoot = storeOptions.RunRoot\n\tconfig.Storage.GraphRoot = storeOptions.GraphRoot\n\tfor _, i := range storeOptions.GraphDriverOptions {\n\t\ts := strings.Split(i, \"=\")\n\t\tif s[0] == \"overlay.mount_program\" {\n\t\t\tconfig.Storage.Options.MountProgram = s[1]\n\t\t}\n\t}\n\n\treturn config\n}\n\nfunc getRootlessUID() int {\n\tuidEnv := os.Getenv(\"_CONTAINERS_ROOTLESS_UID\")\n\tif uidEnv != \"\" {\n\t\tu, _ := strconv.Atoi(uidEnv)\n\t\treturn u\n\t}\n\treturn os.Geteuid()\n}\n\n\/\/ DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers\nfunc DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) {\n\tuid := getRootlessUID()\n\treturn DefaultStoreOptions(uid != 0, uid)\n}\n\n\/\/ DefaultStoreOptions returns the default storage ops for containers\nfunc DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) {\n\tvar (\n\t\tdefaultRootlessRunRoot string\n\t\tdefaultRootlessGraphRoot string\n\t\terr error\n\t)\n\tstorageOpts := defaultStoreOptions\n\tif rootless && rootlessUid != 0 {\n\t\tstorageOpts, err = getRootlessStorageOpts(rootlessUid)\n\t\tif err != nil {\n\t\t\treturn storageOpts, err\n\t\t}\n\t}\n\n\tstorageConf, err := DefaultConfigFile(rootless && rootlessUid != 0)\n\tif err != nil {\n\t\treturn storageOpts, err\n\t}\n\tif _, err = os.Stat(storageConf); err == nil {\n\t\tdefaultRootlessRunRoot = storageOpts.RunRoot\n\t\tdefaultRootlessGraphRoot = storageOpts.GraphRoot\n\t\tstorageOpts = StoreOptions{}\n\t\tReloadConfigurationFile(storageConf, &storageOpts)\n\t}\n\n\tif !os.IsNotExist(err) {\n\t\treturn storageOpts, errors.Wrapf(err, \"cannot stat %s\", storageConf)\n\t}\n\n\tif rootless && rootlessUid != 0 {\n\t\tif err == nil {\n\t\t\t\/\/ If the file did not specify a graphroot or runroot,\n\t\t\t\/\/ set sane defaults so we don't try and use root-owned\n\t\t\t\/\/ directories\n\t\t\tif storageOpts.RunRoot == \"\" {\n\t\t\t\tstorageOpts.RunRoot = defaultRootlessRunRoot\n\t\t\t}\n\t\t\tif storageOpts.GraphRoot == \"\" {\n\t\t\t\tstorageOpts.GraphRoot = defaultRootlessGraphRoot\n\t\t\t}\n\t\t} else {\n\t\t\tif err := os.MkdirAll(filepath.Dir(storageConf), 0755); err != nil {\n\t\t\t\treturn storageOpts, errors.Wrapf(err, \"cannot make directory %s\", filepath.Dir(storageConf))\n\t\t\t}\n\t\t\tfile, err := os.OpenFile(storageConf, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)\n\t\t\tif err != nil {\n\t\t\t\treturn storageOpts, errors.Wrapf(err, \"cannot open %s\", storageConf)\n\t\t\t}\n\n\t\t\ttomlConfiguration := getTomlStorage(&storageOpts)\n\t\t\tdefer file.Close()\n\t\t\tenc := toml.NewEncoder(file)\n\t\t\tif err := enc.Encode(tomlConfiguration); err != nil {\n\t\t\t\tos.Remove(storageConf)\n\n\t\t\t\treturn storageOpts, errors.Wrapf(err, \"failed to encode %s\", storageConf)\n\t\t\t}\n\t\t}\n\t}\n\treturn storageOpts, nil\n}\n\nfunc homeDir() (string, error) {\n\thome := os.Getenv(\"HOME\")\n\tif home == \"\" {\n\t\tusr, err := user.Current()\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrapf(err, \"neither XDG_RUNTIME_DIR nor HOME was set non-empty\")\n\t\t}\n\t\thome = usr.HomeDir\n\t}\n\treturn home, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package venom\n\nimport (\n\t\"encoding\/json\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\n\/\/ Version of Venom\n\/\/ One Line for this, used by release.sh script\n\/\/ Keep \"const Version on one line\"\nconst Version = \"0.0.5\"\n\n\/\/ PrintFunc used by venom to print output\nvar PrintFunc = fmt.Printf\n\nvar (\n\texecutors = map[string]Executor{}\n\tcontexts = map[string]TestCaseContext{}\n)\n\nconst (\n\t\/\/ ContextKey is key for Test Case Context. this\n\t\/\/ can be used by executors for getting context\n\tContextKey = \"tcContext\"\n)\n\n\/\/ RegisterExecutor register Test Executors\nfunc RegisterExecutor(name string, e Executor) {\n\texecutors[name] = e\n}\n\n\/\/ getExecutorWrap initializes a test by name\n\/\/ no type -> exec is default\nfunc getExecutorWrap(t map[string]interface{}, tcc TestCaseContext) (*executorWrap, error) {\n\tvar name string\n\tvar retry, delay, timeout int\n\n\tif itype, ok := t[\"type\"]; ok {\n\t\tname = fmt.Sprintf(\"%s\", itype)\n\t}\n\n\tif name == \"\" && tcc.GetName() != \"default\" {\n\t\tname = tcc.GetName()\n\t} else if name == \"\" {\n\t\tname = \"exec\"\n\t}\n\n\tretry, errRetry := getAttrInt(t, \"retry\")\n\tif errRetry != nil {\n\t\treturn nil, errRetry\n\t}\n\tdelay, errDelay := getAttrInt(t, \"delay\")\n\tif errDelay != nil {\n\t\treturn nil, errDelay\n\t}\n\ttimeout, errTimeout := getAttrInt(t, \"timeout\")\n\tif errTimeout != nil {\n\t\treturn nil, errTimeout\n\t}\n\n\tif e, ok := executors[name]; ok {\n\t\tew := &executorWrap{\n\t\t\texecutor: e,\n\t\t\tretry: retry,\n\t\t\tdelay: delay,\n\t\t\ttimeout: timeout,\n\t\t}\n\t\treturn ew, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"[%s] type '%s' is not implemented\", tcc.GetName(), name)\n}\n\n\/\/ RegisterTestCaseContext new register TestCaseContext\nfunc RegisterTestCaseContext(name string, tcc TestCaseContext) {\n\tcontexts[name] = tcc\n}\n\n\/\/ getContextWrap initializes a context for a testcase\n\/\/ no type -> parent context\nfunc getContextWrap(tc *TestCase) (TestCaseContext, error) {\n\tif tc.Context == nil {\n\t\treturn contexts[\"default\"], nil\n\t}\n\tvar typeName string\n\tif itype, ok := tc.Context[\"type\"]; ok {\n\t\ttypeName = fmt.Sprintf(\"%s\", itype)\n\t}\n\n\tif typeName == \"\" {\n\t\treturn nil, fmt.Errorf(\"context type '%s' is not implemented\", typeName)\n\t}\n\tcontexts[typeName].SetTestCase(*tc)\n\treturn contexts[typeName], nil\n}\n\nfunc getAttrInt(t map[string]interface{}, name string) (int, error) {\n\tvar out int\n\tif i, ok := t[name]; ok {\n\t\tvar ok bool\n\t\tout, ok = i.(int)\n\t\tif !ok {\n\t\t\treturn -1, fmt.Errorf(\"attribute %s '%s' is not an integer\", name, i)\n\t\t}\n\t}\n\tif out < 0 {\n\t\tout = 0\n\t}\n\treturn out, nil\n}\n\n\/\/ Exit func display an error message on stderr and exit 1\nfunc Exit(format string, args ...interface{}) {\n\tfmt.Fprintf(os.Stderr, format, args...)\n\tos.Exit(1)\n}\n\n\/\/ OutputResult output result to sdtout, files...\nfunc OutputResult(format string, resume, resumeFailures bool, outputDir string, tests Tests, elapsed time.Duration, detailsLevel string) error {\n\tvar data []byte\n\tvar err error\n\tswitch format {\n\tcase \"json\":\n\t\tdata, err = json.Marshal(tests)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error: cannot format output json (%s)\", err)\n\t\t}\n\tcase \"yml\", \"yaml\":\n\t\tdata, err = yaml.Marshal(tests)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error: cannot format output yaml (%s)\", err)\n\t\t}\n\tdefault:\n\t\tdataxml, errm := xml.Marshal(tests)\n\t\tif errm != nil {\n\t\t\tlog.Fatalf(\"Error: cannot format xml output: %s\", errm)\n\t\t}\n\t\tdata = append([]byte(\"<?xml version=\\\"1.0\\\" encoding=\\\"utf-8\\\"?>\\n\"), dataxml...)\n\t}\n\n\tif detailsLevel == \"high\" {\n\t\tPrintFunc(string(data))\n\t}\n\n\tif resume {\n\t\toutputResume(tests, elapsed, resumeFailures)\n\t}\n\n\tif outputDir != \"\" {\n\t\tfilename := outputDir + \"\/\" + \"test_results\" + \".\" + format\n\t\tif err := ioutil.WriteFile(filename, data, 0644); err != nil {\n\t\t\treturn fmt.Errorf(\"Error while creating file %s, err:%s\", filename, err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc outputResume(tests Tests, elapsed time.Duration, resumeFailures bool) {\n\n\tif resumeFailures {\n\t\tfor _, t := range tests.TestSuites {\n\t\t\tif t.Failures > 0 || t.Errors > 0 {\n\t\t\t\tPrintFunc(\"FAILED %s\\n\", t.Name)\n\t\t\t\tPrintFunc(\"--------------\\n\")\n\n\t\t\t\tfor _, tc := range t.TestCases {\n\t\t\t\t\tfor _, f := range tc.Failures {\n\t\t\t\t\t\tPrintFunc(\"%s\\n\", f.Value)\n\t\t\t\t\t}\n\t\t\t\t\tfor _, f := range tc.Errors {\n\t\t\t\t\t\tPrintFunc(\"%s\\n\", f.Value)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tPrintFunc(\"-=-=-=-=-=-=-=-=-\\n\")\n\t\t\t}\n\t\t}\n\t}\n\n\ttotalTestCases := 0\n\ttotalTestSteps := 0\n\tfor _, t := range tests.TestSuites {\n\t\tif t.Failures > 0 || t.Errors > 0 {\n\t\t\tPrintFunc(\"FAILED %s\\n\", t.Name)\n\t\t}\n\t\ttotalTestCases += len(t.TestCases)\n\t\tfor _, tc := range t.TestCases {\n\t\t\ttotalTestSteps += len(tc.TestSteps)\n\t\t}\n\t}\n\n\tPrintFunc(\"Total:%d TotalOK:%d TotalKO:%d TotalSkipped:%d TotalTestSuite:%d TotalTestCase:%d TotalTestStep:%d Duration:%s\\n\",\n\t\ttests.Total,\n\t\ttests.TotalOK,\n\t\ttests.TotalKO,\n\t\ttests.TotalSkipped,\n\t\tlen(tests.TestSuites),\n\t\ttotalTestCases,\n\t\ttotalTestSteps,\n\t\telapsed,\n\t)\n}\n<commit_msg>[auto] bump version to v0.0.6<commit_after>package venom\n\nimport (\n\t\"encoding\/json\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\n\/\/ Version of Venom\n\/\/ One Line for this, used by release.sh script\n\/\/ Keep \"const Version on one line\"\nconst Version = \"0.0.6\"\n\n\/\/ PrintFunc used by venom to print output\nvar PrintFunc = fmt.Printf\n\nvar (\n\texecutors = map[string]Executor{}\n\tcontexts = map[string]TestCaseContext{}\n)\n\nconst (\n\t\/\/ ContextKey is key for Test Case Context. this\n\t\/\/ can be used by executors for getting context\n\tContextKey = \"tcContext\"\n)\n\n\/\/ RegisterExecutor register Test Executors\nfunc RegisterExecutor(name string, e Executor) {\n\texecutors[name] = e\n}\n\n\/\/ getExecutorWrap initializes a test by name\n\/\/ no type -> exec is default\nfunc getExecutorWrap(t map[string]interface{}, tcc TestCaseContext) (*executorWrap, error) {\n\tvar name string\n\tvar retry, delay, timeout int\n\n\tif itype, ok := t[\"type\"]; ok {\n\t\tname = fmt.Sprintf(\"%s\", itype)\n\t}\n\n\tif name == \"\" && tcc.GetName() != \"default\" {\n\t\tname = tcc.GetName()\n\t} else if name == \"\" {\n\t\tname = \"exec\"\n\t}\n\n\tretry, errRetry := getAttrInt(t, \"retry\")\n\tif errRetry != nil {\n\t\treturn nil, errRetry\n\t}\n\tdelay, errDelay := getAttrInt(t, \"delay\")\n\tif errDelay != nil {\n\t\treturn nil, errDelay\n\t}\n\ttimeout, errTimeout := getAttrInt(t, \"timeout\")\n\tif errTimeout != nil {\n\t\treturn nil, errTimeout\n\t}\n\n\tif e, ok := executors[name]; ok {\n\t\tew := &executorWrap{\n\t\t\texecutor: e,\n\t\t\tretry: retry,\n\t\t\tdelay: delay,\n\t\t\ttimeout: timeout,\n\t\t}\n\t\treturn ew, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"[%s] type '%s' is not implemented\", tcc.GetName(), name)\n}\n\n\/\/ RegisterTestCaseContext new register TestCaseContext\nfunc RegisterTestCaseContext(name string, tcc TestCaseContext) {\n\tcontexts[name] = tcc\n}\n\n\/\/ getContextWrap initializes a context for a testcase\n\/\/ no type -> parent context\nfunc getContextWrap(tc *TestCase) (TestCaseContext, error) {\n\tif tc.Context == nil {\n\t\treturn contexts[\"default\"], nil\n\t}\n\tvar typeName string\n\tif itype, ok := tc.Context[\"type\"]; ok {\n\t\ttypeName = fmt.Sprintf(\"%s\", itype)\n\t}\n\n\tif typeName == \"\" {\n\t\treturn nil, fmt.Errorf(\"context type '%s' is not implemented\", typeName)\n\t}\n\tcontexts[typeName].SetTestCase(*tc)\n\treturn contexts[typeName], nil\n}\n\nfunc getAttrInt(t map[string]interface{}, name string) (int, error) {\n\tvar out int\n\tif i, ok := t[name]; ok {\n\t\tvar ok bool\n\t\tout, ok = i.(int)\n\t\tif !ok {\n\t\t\treturn -1, fmt.Errorf(\"attribute %s '%s' is not an integer\", name, i)\n\t\t}\n\t}\n\tif out < 0 {\n\t\tout = 0\n\t}\n\treturn out, nil\n}\n\n\/\/ Exit func display an error message on stderr and exit 1\nfunc Exit(format string, args ...interface{}) {\n\tfmt.Fprintf(os.Stderr, format, args...)\n\tos.Exit(1)\n}\n\n\/\/ OutputResult output result to sdtout, files...\nfunc OutputResult(format string, resume, resumeFailures bool, outputDir string, tests Tests, elapsed time.Duration, detailsLevel string) error {\n\tvar data []byte\n\tvar err error\n\tswitch format {\n\tcase \"json\":\n\t\tdata, err = json.Marshal(tests)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error: cannot format output json (%s)\", err)\n\t\t}\n\tcase \"yml\", \"yaml\":\n\t\tdata, err = yaml.Marshal(tests)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error: cannot format output yaml (%s)\", err)\n\t\t}\n\tdefault:\n\t\tdataxml, errm := xml.Marshal(tests)\n\t\tif errm != nil {\n\t\t\tlog.Fatalf(\"Error: cannot format xml output: %s\", errm)\n\t\t}\n\t\tdata = append([]byte(\"<?xml version=\\\"1.0\\\" encoding=\\\"utf-8\\\"?>\\n\"), dataxml...)\n\t}\n\n\tif detailsLevel == \"high\" {\n\t\tPrintFunc(string(data))\n\t}\n\n\tif resume {\n\t\toutputResume(tests, elapsed, resumeFailures)\n\t}\n\n\tif outputDir != \"\" {\n\t\tfilename := outputDir + \"\/\" + \"test_results\" + \".\" + format\n\t\tif err := ioutil.WriteFile(filename, data, 0644); err != nil {\n\t\t\treturn fmt.Errorf(\"Error while creating file %s, err:%s\", filename, err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc outputResume(tests Tests, elapsed time.Duration, resumeFailures bool) {\n\n\tif resumeFailures {\n\t\tfor _, t := range tests.TestSuites {\n\t\t\tif t.Failures > 0 || t.Errors > 0 {\n\t\t\t\tPrintFunc(\"FAILED %s\\n\", t.Name)\n\t\t\t\tPrintFunc(\"--------------\\n\")\n\n\t\t\t\tfor _, tc := range t.TestCases {\n\t\t\t\t\tfor _, f := range tc.Failures {\n\t\t\t\t\t\tPrintFunc(\"%s\\n\", f.Value)\n\t\t\t\t\t}\n\t\t\t\t\tfor _, f := range tc.Errors {\n\t\t\t\t\t\tPrintFunc(\"%s\\n\", f.Value)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tPrintFunc(\"-=-=-=-=-=-=-=-=-\\n\")\n\t\t\t}\n\t\t}\n\t}\n\n\ttotalTestCases := 0\n\ttotalTestSteps := 0\n\tfor _, t := range tests.TestSuites {\n\t\tif t.Failures > 0 || t.Errors > 0 {\n\t\t\tPrintFunc(\"FAILED %s\\n\", t.Name)\n\t\t}\n\t\ttotalTestCases += len(t.TestCases)\n\t\tfor _, tc := range t.TestCases {\n\t\t\ttotalTestSteps += len(tc.TestSteps)\n\t\t}\n\t}\n\n\tPrintFunc(\"Total:%d TotalOK:%d TotalKO:%d TotalSkipped:%d TotalTestSuite:%d TotalTestCase:%d TotalTestStep:%d Duration:%s\\n\",\n\t\ttests.Total,\n\t\ttests.TotalOK,\n\t\ttests.TotalKO,\n\t\ttests.TotalSkipped,\n\t\tlen(tests.TestSuites),\n\t\ttotalTestCases,\n\t\ttotalTestSteps,\n\t\telapsed,\n\t)\n}\n<|endoftext|>"} {"text":"<commit_before>package vm\n\nimport (\n\t\"fmt\"\n\t\"github.com\/goby-lang\/goby\/bytecode\"\n\t\"github.com\/goby-lang\/goby\/parser\"\n\t\"io\/ioutil\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nvar stackTrace int\n\ntype isIndexTable struct {\n\tData map[string]int\n}\n\nfunc newISIndexTable() *isIndexTable {\n\treturn &isIndexTable{Data: make(map[string]int)}\n}\n\ntype isTable map[string][]*instructionSet\n\ntype filename string\n\ntype errorMessage string\n\nvar standardLibraries = map[string]func(*VM){\n\t\"file\": initializeFileClass,\n\t\"net\/http\": initializeHTTPClass,\n\t\"net\/simple_server\": initializeSimpleServerClass,\n\t\"uri\": initializeURIClass,\n}\n\n\/\/ VM represents a stack based virtual machine.\ntype VM struct {\n\tmainThread *thread\n\t\/\/ a map holds pointers of constants\n\tconstants map[string]*Pointer\n\t\/\/ a map holds different types of label tables\n\tisTables map[labelType]isTable\n\t\/\/ method instruction set table\n\tmethodISIndexTables map[filename]*isIndexTable\n\t\/\/ class instruction set table\n\tclassISIndexTables map[filename]*isIndexTable\n\t\/\/ block instruction set table\n\tblockTables map[filename]map[string]*instructionSet\n\t\/\/ fileDir indicates executed file's directory\n\tfileDir string\n\t\/\/ args are command line arguments\n\targs []string\n\t\/\/ projectRoot is goby root's absolute path, which is $GOROOT\/src\/github.com\/goby-lang\/goby\n\tprojectRoot string\n}\n\n\/\/ New initializes a vm to initialize state and returns it.\nfunc New(fileDir string, args []string) *VM {\n\ts := &stack{}\n\tcfs := &callFrameStack{callFrames: []*callFrame{}}\n\tthread := &thread{stack: s, callFrameStack: cfs, sp: 0, cfp: 0}\n\ts.thread = thread\n\tcfs.thread = thread\n\n\tvm := &VM{mainThread: thread, args: args}\n\tthread.vm = vm\n\n\tvm.initConstants()\n\tvm.methodISIndexTables = map[filename]*isIndexTable{\n\t\tfilename(fileDir): newISIndexTable(),\n\t}\n\tvm.classISIndexTables = map[filename]*isIndexTable{\n\t\tfilename(fileDir): newISIndexTable(),\n\t}\n\tvm.blockTables = make(map[filename]map[string]*instructionSet)\n\tvm.isTables = map[labelType]isTable{\n\t\tbytecode.LabelDef: make(isTable),\n\t\tbytecode.LabelDefClass: make(isTable),\n\t}\n\tvm.fileDir = fileDir\n\n\tp, _ := filepath.Abs(\"..\/\")\n\n\tif !strings.HasSuffix(p, \"goby\") {\n\t\tvm.projectRoot = path.Join(p, \"goby\")\n\t} else {\n\t\tvm.projectRoot = p\n\t}\n\n\treturn vm\n}\n\n\/\/ ExecBytecodes accepts a sequence of bytecodes and use vm to evaluate them.\nfunc (vm *VM) ExecBytecodes(bytecodes, fn string) {\n\tfilename := filename(fn)\n\tp := newBytecodeParser(filename)\n\tp.vm = vm\n\tp.parseBytecode(bytecodes)\n\n\t\/\/ Keep update label table after parsed new files.\n\t\/\/ TODO: Find more efficient way to do this.\n\tfor labelType, table := range p.labelTable {\n\t\tfor labelName, is := range table {\n\t\t\tvm.isTables[labelType][labelName] = is\n\t\t}\n\t}\n\n\tvm.blockTables[p.filename] = p.blockTable\n\tvm.classISIndexTables[filename] = newISIndexTable()\n\tvm.methodISIndexTables[filename] = newISIndexTable()\n\n\tdefer func() {\n\t\tif p := recover(); p != nil {\n\t\t\tswitch p.(type) {\n\t\t\tcase errorMessage:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t}\n\t}()\n\n\tcf := newCallFrame(p.program)\n\tcf.self = mainObj\n\tvm.mainThread.callFrameStack.push(cf)\n\tvm.startFromTopFrame()\n}\n\n\/\/ GetExecResult returns stack's top most value. Normally it's used in tests.\nfunc (vm *VM) GetExecResult() Object {\n\treturn vm.mainThread.stack.top().Target\n}\n\nfunc (vm *VM) initConstants() {\n\tvm.constants = make(map[string]*Pointer)\n\tconstants := make(map[string]*Pointer)\n\n\tbuiltInClasses := []Class{\n\t\tintegerClass,\n\t\tstringClass,\n\t\tbooleanClass,\n\t\tnullClass,\n\t\tarrayClass,\n\t\thashClass,\n\t\tclassClass,\n\t\tmethodClass,\n\t}\n\n\targs := []Object{}\n\n\tfor _, arg := range vm.args {\n\t\targs = append(args, initializeString(arg))\n\t}\n\n\tfor _, c := range builtInClasses {\n\t\tp := &Pointer{Target: c}\n\t\tconstants[c.ReturnName()] = p\n\t}\n\n\tconstants[\"ARGV\"] = &Pointer{Target: initializeArray(args)}\n\tobjectClass.constants = constants\n\tvm.constants[\"Object\"] = &Pointer{objectClass}\n}\n\n\/\/ Start evaluation from top most call frame\nfunc (vm *VM) startFromTopFrame() {\n\tvm.mainThread.startFromTopFrame()\n}\n\nfunc (vm *VM) currentFilePath() string {\n\treturn string(vm.mainThread.callFrameStack.top().instructionSet.filename)\n}\n\nfunc (vm *VM) printDebugInfo(i *instruction) {\n\tfmt.Println(i.inspect())\n}\n\nfunc (vm *VM) getBlock(name string, filename filename) *instructionSet {\n\t\/\/ The \"name\" here is actually an index from label\n\t\/\/ for example <Block:1>'s name is \"1\"\n\tis, ok := vm.blockTables[filename][name]\n\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"Can't find block %s\", name))\n\t}\n\n\treturn is\n}\n\nfunc (vm *VM) getMethodIS(name string, filename filename) (*instructionSet, bool) {\n\tiss, ok := vm.isTables[bytecode.LabelDef][name]\n\n\tif !ok {\n\t\treturn nil, false\n\t}\n\n\tis := iss[vm.methodISIndexTables[filename].Data[name]]\n\n\tvm.methodISIndexTables[filename].Data[name]++\n\treturn is, ok\n}\n\nfunc (vm *VM) getClassIS(name string, filename filename) *instructionSet {\n\tiss, ok := vm.isTables[bytecode.LabelDefClass][name]\n\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"Can't find class %s's instructions\", name))\n\t}\n\n\tis := iss[vm.classISIndexTables[filename].Data[name]]\n\n\tvm.classISIndexTables[filename].Data[name]++\n\treturn is\n}\n\nfunc (vm *VM) loadConstant(name string, isModule bool) *RClass {\n\tvar c *RClass\n\tvar ptr *Pointer\n\n\tptr = objectClass.constants[name]\n\n\tif ptr == nil {\n\t\tc = initializeClass(name, isModule)\n\t\tobjectClass.constants[name] = &Pointer{Target: c}\n\t} else {\n\t\tc = ptr.Target.(*RClass)\n\t}\n\n\treturn c\n}\n\nfunc (vm *VM) lookupConstant(cf *callFrame, constName string) *Pointer {\n\tvar constant *Pointer\n\tvar namespace Class\n\tvar hasNamespace bool\n\n\ttop := vm.mainThread.stack.top()\n\n\tif top == nil {\n\t\thasNamespace = false\n\t} else {\n\t\tnamespace, hasNamespace = top.Target.(Class)\n\t}\n\n\tif hasNamespace {\n\t\tif namespace != cf.self {\n\t\t\tvm.mainThread.stack.pop()\n\t\t}\n\n\t\tconstant = namespace.lookupConstant(constName, true)\n\n\t\tif constant != nil {\n\t\t\treturn constant\n\t\t}\n\t}\n\n\tswitch s := cf.self.(type) {\n\tcase Class:\n\t\tconstant = s.lookupConstant(constName, true)\n\t\tif constant != nil {\n\t\t\treturn constant\n\t\t}\n\tdefault:\n\t\tc := s.returnClass()\n\n\t\tconstant = c.lookupConstant(constName, true)\n\t\tif constant != nil {\n\t\t\treturn constant\n\t\t}\n\t}\n\n\tconstant = vm.constants[constName]\n\treturn constant\n}\n\nfunc (vm *VM) execGobyLib(libName string) {\n\tlibPath := path.Join(vm.projectRoot, \"lib\", libName)\n\tfile, err := ioutil.ReadFile(libPath)\n\n\tif err != nil {\n\t\tvm.mainThread.returnError(err.Error())\n\t}\n\n\tvm.execRequiredFile(libPath, file)\n}\n\nfunc (vm *VM) execRequiredFile(filepath string, file []byte) {\n\tprogram := parser.BuildAST(file)\n\tg := bytecode.NewGenerator(program)\n\tbytecodes := g.GenerateByteCode(program)\n\n\toldMethodTable := isTable{}\n\toldClassTable := isTable{}\n\n\t\/\/ Copy current file's instruction sets.\n\tfor name, is := range vm.isTables[bytecode.LabelDef] {\n\t\toldMethodTable[name] = is\n\t}\n\n\tfor name, is := range vm.isTables[bytecode.LabelDefClass] {\n\t\toldClassTable[name] = is\n\t}\n\n\t\/\/ This creates new execution environments for required file, including new instruction set table.\n\t\/\/ So we need to copy old instruction sets and restore them later, otherwise current program's instruction set would be overwrite.\n\tvm.ExecBytecodes(bytecodes, filepath)\n\n\t\/\/ Restore instruction sets.\n\tvm.isTables[bytecode.LabelDef] = oldMethodTable\n\tvm.isTables[bytecode.LabelDefClass] = oldClassTable\n}\n\nfunc newError(format string, args ...interface{}) *Error {\n\treturn &Error{Message: fmt.Sprintf(format, args...)}\n}\n<commit_msg>Add method to initilaize thread.<commit_after>package vm\n\nimport (\n\t\"fmt\"\n\t\"github.com\/goby-lang\/goby\/bytecode\"\n\t\"github.com\/goby-lang\/goby\/parser\"\n\t\"io\/ioutil\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nvar stackTrace int\n\ntype isIndexTable struct {\n\tData map[string]int\n}\n\nfunc newISIndexTable() *isIndexTable {\n\treturn &isIndexTable{Data: make(map[string]int)}\n}\n\ntype isTable map[string][]*instructionSet\n\ntype filename string\n\ntype errorMessage string\n\nvar standardLibraries = map[string]func(*VM){\n\t\"file\": initializeFileClass,\n\t\"net\/http\": initializeHTTPClass,\n\t\"net\/simple_server\": initializeSimpleServerClass,\n\t\"uri\": initializeURIClass,\n}\n\n\/\/ VM represents a stack based virtual machine.\ntype VM struct {\n\tmainThread *thread\n\t\/\/ a map holds pointers of constants\n\tconstants map[string]*Pointer\n\t\/\/ a map holds different types of label tables\n\tisTables map[labelType]isTable\n\t\/\/ method instruction set table\n\tmethodISIndexTables map[filename]*isIndexTable\n\t\/\/ class instruction set table\n\tclassISIndexTables map[filename]*isIndexTable\n\t\/\/ block instruction set table\n\tblockTables map[filename]map[string]*instructionSet\n\t\/\/ fileDir indicates executed file's directory\n\tfileDir string\n\t\/\/ args are command line arguments\n\targs []string\n\t\/\/ projectRoot is goby root's absolute path, which is $GOROOT\/src\/github.com\/goby-lang\/goby\n\tprojectRoot string\n}\n\n\/\/ New initializes a vm to initialize state and returns it.\nfunc New(fileDir string, args []string) *VM {\n\tvm := &VM{args: args}\n\tvm.mainThread = vm.newThread()\n\n\tvm.initConstants()\n\tvm.methodISIndexTables = map[filename]*isIndexTable{\n\t\tfilename(fileDir): newISIndexTable(),\n\t}\n\tvm.classISIndexTables = map[filename]*isIndexTable{\n\t\tfilename(fileDir): newISIndexTable(),\n\t}\n\tvm.blockTables = make(map[filename]map[string]*instructionSet)\n\tvm.isTables = map[labelType]isTable{\n\t\tbytecode.LabelDef: make(isTable),\n\t\tbytecode.LabelDefClass: make(isTable),\n\t}\n\tvm.fileDir = fileDir\n\n\tp, _ := filepath.Abs(\"..\/\")\n\n\tif !strings.HasSuffix(p, \"goby\") {\n\t\tvm.projectRoot = path.Join(p, \"goby\")\n\t} else {\n\t\tvm.projectRoot = p\n\t}\n\n\treturn vm\n}\n\nfunc (vm *VM) newThread() *thread {\n\ts := &stack{}\n\tcfs := &callFrameStack{callFrames: []*callFrame{}}\n\tt := &thread{stack: s, callFrameStack: cfs, sp: 0, cfp: 0}\n\ts.thread = t\n\tcfs.thread = t\n\tt.vm = vm\n\treturn t\n}\n\n\/\/ ExecBytecodes accepts a sequence of bytecodes and use vm to evaluate them.\nfunc (vm *VM) ExecBytecodes(bytecodes, fn string) {\n\tfilename := filename(fn)\n\tp := newBytecodeParser(filename)\n\tp.vm = vm\n\tp.parseBytecode(bytecodes)\n\n\t\/\/ Keep update label table after parsed new files.\n\t\/\/ TODO: Find more efficient way to do this.\n\tfor labelType, table := range p.labelTable {\n\t\tfor labelName, is := range table {\n\t\t\tvm.isTables[labelType][labelName] = is\n\t\t}\n\t}\n\n\tvm.blockTables[p.filename] = p.blockTable\n\tvm.classISIndexTables[filename] = newISIndexTable()\n\tvm.methodISIndexTables[filename] = newISIndexTable()\n\n\tdefer func() {\n\t\tif p := recover(); p != nil {\n\t\t\tswitch p.(type) {\n\t\t\tcase errorMessage:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t}\n\t}()\n\n\tcf := newCallFrame(p.program)\n\tcf.self = mainObj\n\tvm.mainThread.callFrameStack.push(cf)\n\tvm.startFromTopFrame()\n}\n\n\/\/ GetExecResult returns stack's top most value. Normally it's used in tests.\nfunc (vm *VM) GetExecResult() Object {\n\treturn vm.mainThread.stack.top().Target\n}\n\nfunc (vm *VM) initConstants() {\n\tvm.constants = make(map[string]*Pointer)\n\tconstants := make(map[string]*Pointer)\n\n\tbuiltInClasses := []Class{\n\t\tintegerClass,\n\t\tstringClass,\n\t\tbooleanClass,\n\t\tnullClass,\n\t\tarrayClass,\n\t\thashClass,\n\t\tclassClass,\n\t\tmethodClass,\n\t}\n\n\targs := []Object{}\n\n\tfor _, arg := range vm.args {\n\t\targs = append(args, initializeString(arg))\n\t}\n\n\tfor _, c := range builtInClasses {\n\t\tp := &Pointer{Target: c}\n\t\tconstants[c.ReturnName()] = p\n\t}\n\n\tconstants[\"ARGV\"] = &Pointer{Target: initializeArray(args)}\n\tobjectClass.constants = constants\n\tvm.constants[\"Object\"] = &Pointer{objectClass}\n}\n\n\/\/ Start evaluation from top most call frame\nfunc (vm *VM) startFromTopFrame() {\n\tvm.mainThread.startFromTopFrame()\n}\n\nfunc (vm *VM) currentFilePath() string {\n\treturn string(vm.mainThread.callFrameStack.top().instructionSet.filename)\n}\n\nfunc (vm *VM) printDebugInfo(i *instruction) {\n\tfmt.Println(i.inspect())\n}\n\nfunc (vm *VM) getBlock(name string, filename filename) *instructionSet {\n\t\/\/ The \"name\" here is actually an index from label\n\t\/\/ for example <Block:1>'s name is \"1\"\n\tis, ok := vm.blockTables[filename][name]\n\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"Can't find block %s\", name))\n\t}\n\n\treturn is\n}\n\nfunc (vm *VM) getMethodIS(name string, filename filename) (*instructionSet, bool) {\n\tiss, ok := vm.isTables[bytecode.LabelDef][name]\n\n\tif !ok {\n\t\treturn nil, false\n\t}\n\n\tis := iss[vm.methodISIndexTables[filename].Data[name]]\n\n\tvm.methodISIndexTables[filename].Data[name]++\n\treturn is, ok\n}\n\nfunc (vm *VM) getClassIS(name string, filename filename) *instructionSet {\n\tiss, ok := vm.isTables[bytecode.LabelDefClass][name]\n\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"Can't find class %s's instructions\", name))\n\t}\n\n\tis := iss[vm.classISIndexTables[filename].Data[name]]\n\n\tvm.classISIndexTables[filename].Data[name]++\n\treturn is\n}\n\nfunc (vm *VM) loadConstant(name string, isModule bool) *RClass {\n\tvar c *RClass\n\tvar ptr *Pointer\n\n\tptr = objectClass.constants[name]\n\n\tif ptr == nil {\n\t\tc = initializeClass(name, isModule)\n\t\tobjectClass.constants[name] = &Pointer{Target: c}\n\t} else {\n\t\tc = ptr.Target.(*RClass)\n\t}\n\n\treturn c\n}\n\nfunc (vm *VM) lookupConstant(cf *callFrame, constName string) *Pointer {\n\tvar constant *Pointer\n\tvar namespace Class\n\tvar hasNamespace bool\n\n\ttop := vm.mainThread.stack.top()\n\n\tif top == nil {\n\t\thasNamespace = false\n\t} else {\n\t\tnamespace, hasNamespace = top.Target.(Class)\n\t}\n\n\tif hasNamespace {\n\t\tif namespace != cf.self {\n\t\t\tvm.mainThread.stack.pop()\n\t\t}\n\n\t\tconstant = namespace.lookupConstant(constName, true)\n\n\t\tif constant != nil {\n\t\t\treturn constant\n\t\t}\n\t}\n\n\tswitch s := cf.self.(type) {\n\tcase Class:\n\t\tconstant = s.lookupConstant(constName, true)\n\t\tif constant != nil {\n\t\t\treturn constant\n\t\t}\n\tdefault:\n\t\tc := s.returnClass()\n\n\t\tconstant = c.lookupConstant(constName, true)\n\t\tif constant != nil {\n\t\t\treturn constant\n\t\t}\n\t}\n\n\tconstant = vm.constants[constName]\n\treturn constant\n}\n\nfunc (vm *VM) execGobyLib(libName string) {\n\tlibPath := path.Join(vm.projectRoot, \"lib\", libName)\n\tfile, err := ioutil.ReadFile(libPath)\n\n\tif err != nil {\n\t\tvm.mainThread.returnError(err.Error())\n\t}\n\n\tvm.execRequiredFile(libPath, file)\n}\n\nfunc (vm *VM) execRequiredFile(filepath string, file []byte) {\n\tprogram := parser.BuildAST(file)\n\tg := bytecode.NewGenerator(program)\n\tbytecodes := g.GenerateByteCode(program)\n\n\toldMethodTable := isTable{}\n\toldClassTable := isTable{}\n\n\t\/\/ Copy current file's instruction sets.\n\tfor name, is := range vm.isTables[bytecode.LabelDef] {\n\t\toldMethodTable[name] = is\n\t}\n\n\tfor name, is := range vm.isTables[bytecode.LabelDefClass] {\n\t\toldClassTable[name] = is\n\t}\n\n\t\/\/ This creates new execution environments for required file, including new instruction set table.\n\t\/\/ So we need to copy old instruction sets and restore them later, otherwise current program's instruction set would be overwrite.\n\tvm.ExecBytecodes(bytecodes, filepath)\n\n\t\/\/ Restore instruction sets.\n\tvm.isTables[bytecode.LabelDef] = oldMethodTable\n\tvm.isTables[bytecode.LabelDefClass] = oldClassTable\n}\n\nfunc newError(format string, args ...interface{}) *Error {\n\treturn &Error{Message: fmt.Sprintf(format, args...)}\n}\n<|endoftext|>"} {"text":"<commit_before>package transloadit\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"gopkg.in\/fsnotify.v1\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype WatchOptions struct {\n\t\/\/ Directory to watch files in (only if Watch is true)\n\tInput string\n\t\/\/ Directoy to put result files in\n\tOutput string\n\t\/\/ Watch the input directory or just compile files in input directory\n\tWatch bool\n\t\/\/ Template id to convert files with\n\tTemplateId string\n\t\/\/ Optional notify url for each assembly.\n\tNotifyUrl string\n\t\/\/ Instead of using templates you can define steps\n\tSteps map[string]map[string]interface{}\n\t\/\/ If true the original files will be copied in the output directoy with `-original_0_` prefix.\n\t\/\/ If false input files will be deleted.\n\tPreserve bool\n}\n\ntype Watcher struct {\n\tclient *Client\n\toptions *WatchOptions\n\tstopped bool\n\t\/\/ Listen for errors\n\tError chan error\n\t\/\/ Listen for completed assemblies\n\tDone chan *AssemblyInfo\n\t\/\/ Listen for file changes (only if Watch == true)\n\tChange chan string\n\tend chan bool\n\trecentWrites map[string]time.Time\n\tblacklist map[string]bool\n}\n\n\/\/ Watch a directory for changes and convert all changes files and download the result.\n\/\/ It will create a new assembly for each file.\n\/\/ If the directory already contains some they are all converted.\n\/\/ See WatchOptions for possible configuration.\nfunc (client *Client) Watch(options *WatchOptions) *Watcher {\n\n\twatcher := &Watcher{\n\t\tclient: client,\n\t\toptions: options,\n\t\tError: make(chan error),\n\t\tDone: make(chan *AssemblyInfo),\n\t\tChange: make(chan string),\n\t\tend: make(chan bool),\n\t\trecentWrites: make(map[string]time.Time),\n\t\tblacklist: make(map[string]bool),\n\t}\n\n\twatcher.start()\n\n\treturn watcher\n\n}\n\nfunc (watcher *Watcher) start() {\n\n\twatcher.processDir()\n\n\tif watcher.options.Watch {\n\t\tgo watcher.startWatcher()\n\t}\n\n}\n\n\/\/ Stop the watcher.\nfunc (watcher *Watcher) Stop() {\n\n\tif watcher.stopped {\n\t\treturn\n\t}\n\n\twatcher.stopped = true\n\n\twatcher.end <- true\n\tclose(watcher.Done)\n\tclose(watcher.Error)\n\tclose(watcher.Change)\n\tclose(watcher.end)\n}\n\nfunc (watcher *Watcher) processDir() {\n\n\tfiles, err := ioutil.ReadDir(watcher.options.Input)\n\tif err != nil {\n\t\twatcher.error(err)\n\t\treturn\n\t}\n\n\tinput := watcher.options.Input\n\n\tfor _, file := range files {\n\t\tif !file.IsDir() {\n\t\t\tgo watcher.processFile(path.Join(input, file.Name()))\n\t\t}\n\t}\n\n}\n\nfunc (watcher *Watcher) processFile(name string) {\n\n\tfile, err := os.Open(name)\n\tif err != nil {\n\t\twatcher.error(err)\n\t\treturn\n\t}\n\n\t\/\/ Add file to blacklist\n\twatcher.blacklist[name] = true\n\n\tassembly := watcher.client.CreateAssembly()\n\n\tif watcher.options.TemplateId != \"\" {\n\t\tassembly.TemplateId = watcher.options.TemplateId\n\t}\n\n\tif watcher.options.NotifyUrl != \"\" {\n\t\tassembly.NotifyUrl = watcher.options.NotifyUrl\n\t}\n\n\tfor name, step := range watcher.options.Steps {\n\t\tassembly.AddStep(name, step)\n\t}\n\n\tassembly.Blocking = true\n\n\tassembly.AddReader(\"file\", path.Base(name), file)\n\n\tinfo, err := assembly.Upload()\n\tif err != nil {\n\t\twatcher.error(err)\n\t\treturn\n\t}\n\n\tif info.Error != \"\" {\n\t\twatcher.error(errors.New(info.Error))\n\t\treturn\n\t}\n\n\tfor stepName, results := range info.Results {\n\t\tfor index, result := range results {\n\t\t\tgo func() {\n\t\t\t\twatcher.downloadResult(stepName, index, result)\n\t\t\t\twatcher.handleOriginalFile(name)\n\t\t\t\tdelete(watcher.blacklist, name)\n\t\t\t\twatcher.Done <- info\n\t\t\t}()\n\t\t}\n\t}\n}\n\nfunc (watcher *Watcher) downloadResult(stepName string, index int, result *FileInfo) {\n\n\tfileName := fmt.Sprintf(\"%s_%d_%s\", stepName, index, result.Name)\n\n\tresp, err := http.Get(result.Url)\n\tif err != nil {\n\t\twatcher.error(err)\n\t\treturn\n\t}\n\n\tdefer resp.Body.Close()\n\n\tout, err := os.Create(path.Join(watcher.options.Output, fileName))\n\tif err != nil {\n\t\twatcher.error(err)\n\t\treturn\n\t}\n\n\tdefer out.Close()\n\n\tio.Copy(out, resp.Body)\n\n}\n\nfunc (watcher *Watcher) startWatcher() {\n\n\tfsWatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\twatcher.error(err)\n\t}\n\n\tdefer fsWatcher.Close()\n\n\tif err = fsWatcher.Add(watcher.options.Input); err != nil {\n\t\twatcher.error(err)\n\t}\n\n\tgo func() {\n\t\tfor {\n\n\t\t\tif watcher.stopped {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttime.Sleep(time.Second)\n\t\t\tnow := time.Now()\n\n\t\t\tfor name, lastEvent := range watcher.recentWrites {\n\t\t\t\tdiff := now.Sub(lastEvent)\n\t\t\t\tif diff > (time.Millisecond * 500) {\n\t\t\t\t\tdelete(watcher.recentWrites, name)\n\t\t\t\t\twatcher.Change <- name\n\t\t\t\t\tgo watcher.processFile(name)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-watcher.end:\n\t\t\treturn\n\t\tcase err := <-fsWatcher.Errors:\n\t\t\twatcher.error(err)\n\t\tcase evt := <-fsWatcher.Events:\n\t\t\t\/\/ Ignore the event if the file is currently processed\n\t\t\tif _, ok := watcher.blacklist[evt.Name]; ok == true {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif evt.Op&fsnotify.Create == fsnotify.Create || evt.Op&fsnotify.Write == fsnotify.Write {\n\t\t\t\twatcher.recentWrites[evt.Name] = time.Now()\n\t\t\t}\n\t\t}\n\t}\n\n}\n\nfunc (watcher *Watcher) handleOriginalFile(name string) {\n\n\tvar err error\n\tif watcher.options.Preserve {\n\t\t_, file := path.Split(name)\n\t\terr = os.Rename(name, watcher.options.Output+\"\/-original_0_\"+basename(file))\n\t} else {\n\t\terr = os.Remove(name)\n\t}\n\n\tif err != nil {\n\t\twatcher.error(err)\n\t}\n\n}\n\nfunc (watcher *Watcher) error(err error) {\n\twatcher.Error <- err\n}\n\nfunc basename(name string) string {\n\ti := strings.LastIndex(name, string(os.PathSeparator))\n\treturn name[i+1:]\n}\n<commit_msg>Define buffers to see errors in transloadify<commit_after>package transloadit\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"gopkg.in\/fsnotify.v1\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype WatchOptions struct {\n\t\/\/ Directory to watch files in (only if Watch is true)\n\tInput string\n\t\/\/ Directoy to put result files in\n\tOutput string\n\t\/\/ Watch the input directory or just compile files in input directory\n\tWatch bool\n\t\/\/ Template id to convert files with\n\tTemplateId string\n\t\/\/ Optional notify url for each assembly.\n\tNotifyUrl string\n\t\/\/ Instead of using templates you can define steps\n\tSteps map[string]map[string]interface{}\n\t\/\/ If true the original files will be copied in the output directoy with `-original_0_` prefix.\n\t\/\/ If false input files will be deleted.\n\tPreserve bool\n}\n\ntype Watcher struct {\n\tclient *Client\n\toptions *WatchOptions\n\tstopped bool\n\t\/\/ Listen for errors\n\tError chan error\n\t\/\/ Listen for completed assemblies\n\tDone chan *AssemblyInfo\n\t\/\/ Listen for file changes (only if Watch == true)\n\tChange chan string\n\tend chan bool\n\trecentWrites map[string]time.Time\n\tblacklist map[string]bool\n}\n\n\/\/ Watch a directory for changes and convert all changes files and download the result.\n\/\/ It will create a new assembly for each file.\n\/\/ If the directory already contains some they are all converted.\n\/\/ See WatchOptions for possible configuration.\nfunc (client *Client) Watch(options *WatchOptions) *Watcher {\n\n\twatcher := &Watcher{\n\t\tclient: client,\n\t\toptions: options,\n\t\tError: make(chan error, 1),\n\t\tDone: make(chan *AssemblyInfo),\n\t\tChange: make(chan string),\n\t\tend: make(chan bool, 1),\n\t\trecentWrites: make(map[string]time.Time),\n\t\tblacklist: make(map[string]bool),\n\t}\n\n\twatcher.start()\n\n\treturn watcher\n\n}\n\nfunc (watcher *Watcher) start() {\n\n\twatcher.processDir()\n\n\tif watcher.options.Watch {\n\t\tgo watcher.startWatcher()\n\t}\n\n}\n\n\/\/ Stop the watcher.\nfunc (watcher *Watcher) Stop() {\n\n\tif watcher.stopped {\n\t\treturn\n\t}\n\n\twatcher.stopped = true\n\n\twatcher.end <- true\n\tclose(watcher.Done)\n\tclose(watcher.Error)\n\tclose(watcher.Change)\n\tclose(watcher.end)\n}\n\nfunc (watcher *Watcher) processDir() {\n\n\tfiles, err := ioutil.ReadDir(watcher.options.Input)\n\tif err != nil {\n\t\twatcher.error(err)\n\t\treturn\n\t}\n\n\tinput := watcher.options.Input\n\n\tfor _, file := range files {\n\t\tif !file.IsDir() {\n\t\t\tgo watcher.processFile(path.Join(input, file.Name()))\n\t\t}\n\t}\n\n}\n\nfunc (watcher *Watcher) processFile(name string) {\n\n\tfile, err := os.Open(name)\n\tif err != nil {\n\t\twatcher.error(err)\n\t\treturn\n\t}\n\n\t\/\/ Add file to blacklist\n\twatcher.blacklist[name] = true\n\n\tassembly := watcher.client.CreateAssembly()\n\n\tif watcher.options.TemplateId != \"\" {\n\t\tassembly.TemplateId = watcher.options.TemplateId\n\t}\n\n\tif watcher.options.NotifyUrl != \"\" {\n\t\tassembly.NotifyUrl = watcher.options.NotifyUrl\n\t}\n\n\tfor name, step := range watcher.options.Steps {\n\t\tassembly.AddStep(name, step)\n\t}\n\n\tassembly.Blocking = true\n\n\tassembly.AddReader(\"file\", path.Base(name), file)\n\n\tinfo, err := assembly.Upload()\n\tif err != nil {\n\t\twatcher.error(err)\n\t\treturn\n\t}\n\n\tif info.Error != \"\" {\n\t\twatcher.error(errors.New(info.Error))\n\t\treturn\n\t}\n\n\tfor stepName, results := range info.Results {\n\t\tfor index, result := range results {\n\t\t\tgo func() {\n\t\t\t\twatcher.downloadResult(stepName, index, result)\n\t\t\t\twatcher.handleOriginalFile(name)\n\t\t\t\tdelete(watcher.blacklist, name)\n\t\t\t\twatcher.Done <- info\n\t\t\t}()\n\t\t}\n\t}\n}\n\nfunc (watcher *Watcher) downloadResult(stepName string, index int, result *FileInfo) {\n\n\tfileName := fmt.Sprintf(\"%s_%d_%s\", stepName, index, result.Name)\n\n\tresp, err := http.Get(result.Url)\n\tif err != nil {\n\t\twatcher.error(err)\n\t\treturn\n\t}\n\n\tdefer resp.Body.Close()\n\n\tout, err := os.Create(path.Join(watcher.options.Output, fileName))\n\tif err != nil {\n\t\twatcher.error(err)\n\t\treturn\n\t}\n\n\tdefer out.Close()\n\n\tio.Copy(out, resp.Body)\n\n}\n\nfunc (watcher *Watcher) startWatcher() {\n\n\tfsWatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\twatcher.error(err)\n\t\treturn\n\t}\n\n\tdefer fsWatcher.Close()\n\n\tif err = fsWatcher.Add(watcher.options.Input); err != nil {\n\t\twatcher.error(err)\n\t\treturn\n\t}\n\n\tgo func() {\n\t\tfor {\n\n\t\t\tif watcher.stopped {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttime.Sleep(time.Second)\n\t\t\tnow := time.Now()\n\n\t\t\tfor name, lastEvent := range watcher.recentWrites {\n\t\t\t\tdiff := now.Sub(lastEvent)\n\t\t\t\tif diff > (time.Millisecond * 500) {\n\t\t\t\t\tdelete(watcher.recentWrites, name)\n\t\t\t\t\twatcher.Change <- name\n\t\t\t\t\tgo watcher.processFile(name)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-watcher.end:\n\t\t\treturn\n\t\tcase err := <-fsWatcher.Errors:\n\t\t\twatcher.error(err)\n\t\tcase evt := <-fsWatcher.Events:\n\t\t\t\/\/ Ignore the event if the file is currently processed\n\t\t\tif _, ok := watcher.blacklist[evt.Name]; ok == true {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif evt.Op&fsnotify.Create == fsnotify.Create || evt.Op&fsnotify.Write == fsnotify.Write {\n\t\t\t\twatcher.recentWrites[evt.Name] = time.Now()\n\t\t\t}\n\t\t}\n\t}\n\n}\n\nfunc (watcher *Watcher) handleOriginalFile(name string) {\n\n\tvar err error\n\tif watcher.options.Preserve {\n\t\t_, file := path.Split(name)\n\t\terr = os.Rename(name, watcher.options.Output+\"\/-original_0_\"+basename(file))\n\t} else {\n\t\terr = os.Remove(name)\n\t}\n\n\tif err != nil {\n\t\twatcher.error(err)\n\t}\n\n}\n\nfunc (watcher *Watcher) error(err error) {\n\twatcher.Error <- err\n}\n\nfunc basename(name string) string {\n\ti := strings.LastIndex(name, string(os.PathSeparator))\n\treturn name[i+1:]\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"sync\"\n\t\"text\/template\"\n\t\"time\"\n\n\t\"github.com\/armon\/consul-api\"\n)\n\nconst (\n\t\/\/ failSleep controls how long to sleep on a failure\n\tfailSleep = 5 * time.Second\n\n\t\/\/ maxFailures controls the maximum number of failures\n\t\/\/ before we limit the sleep value\n\tmaxFailures = 5\n\n\t\/\/ waitTime is used to control how long we do a blocking\n\t\/\/ query for\n\twaitTime = 60 * time.Second\n)\n\ntype backendData struct {\n\tsync.Mutex\n\n\t\/\/ Client is a shared Consul client\n\tClient *consulapi.Client\n\n\t\/\/ Servers maps each watch path to a list of entries\n\tServers map[*WatchPath][]*consulapi.ServiceEntry\n\n\t\/\/ Backends maps a backend to a list of watch paths used\n\t\/\/ to build up the server list\n\tBackends map[string][]*WatchPath\n\n\t\/\/ ChangeCh is used to inform of an update\n\tChangeCh chan struct{}\n\n\t\/\/ StopCh is used to trigger a stop\n\tStopCh chan struct{}\n}\n\n\/\/ watch is used to start a long running watcher to handle updates.\n\/\/ Returns a stopCh, and a finishCh.\nfunc watch(conf *Config) (chan struct{}, chan struct{}) {\n\tstopCh := make(chan struct{})\n\tfinishCh := make(chan struct{})\n\tgo runWatch(conf, stopCh, finishCh)\n\treturn stopCh, finishCh\n}\n\n\/\/ runWatch is a long running routine that watches with a\n\/\/ given configuration\nfunc runWatch(conf *Config, stopCh, doneCh chan struct{}) {\n\tdefer close(doneCh)\n\n\t\/\/ Create the consul client\n\tconsulConf := consulapi.DefaultConfig()\n\tif conf.Address != \"\" {\n\t\tconsulConf.Address = conf.Address\n\t}\n\n\t\/\/ Attempt to contact the agent\n\tclient, err := consulapi.NewClient(consulConf)\n\tif err != nil {\n\t\tlog.Printf(\"[ERR] Failed to initialize consul client: %v\", err)\n\t\treturn\n\t}\n\tif _, err := client.Agent().NodeName(); err != nil {\n\t\tlog.Printf(\"[ERR] Failed to contact consul agent: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Create a backend store\n\tdata := &backendData{\n\t\tClient: client,\n\t\tServers: make(map[*WatchPath][]*consulapi.ServiceEntry),\n\t\tBackends: make(map[string][]*WatchPath),\n\t\tChangeCh: make(chan struct{}, 1),\n\t\tStopCh: stopCh,\n\t}\n\n\t\/\/ Start the watches\n\tdata.Lock()\n\tfor idx, watch := range conf.watches {\n\t\tdata.Backends[watch.Backend] = append(data.Backends[watch.Backend], watch)\n\t\tgo runSingleWatch(conf, data, idx, watch)\n\t}\n\tdata.Unlock()\n\n\t\/\/ Monitor for changes or stop\n\tfor {\n\t\tselect {\n\t\tcase <-data.ChangeCh:\n\t\t\tif maybeRefresh(conf, data) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase <-stopCh:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ maybeRefresh is used to handle a potential config update\nfunc maybeRefresh(conf *Config, data *backendData) (exit bool) {\n\t\/\/ Ignore initial updates until all the data is ready\n\tif !allWatchesReturned(conf, data) {\n\t\treturn\n\t}\n\n\t\/\/ Merge the data for each backend\n\tbackendServers := aggregateServers(data)\n\n\t\/\/ Build the output template\n\toutput, err := buildTemplate(conf, backendServers)\n\tif err != nil {\n\t\tlog.Printf(\"[ERR] %v\", err)\n\t\treturn true\n\t}\n\n\t\/\/ Check for a dry run\n\tif conf.DryRun {\n\t\tfmt.Printf(\"%s\\n\", output)\n\t\treturn true\n\t}\n\n\t\/\/ Write out the configuration\n\tif err := ioutil.WriteFile(conf.Path, output, 0660); err != nil {\n\t\tlog.Printf(\"[ERR] Failed to write config file: %v\", err)\n\t\treturn true\n\t}\n\tlog.Printf(\"[INFO] Updated configuration file at %s\", conf.Path)\n\n\t\/\/ Invoke the reload hook\n\tif err := reload(conf); err != nil {\n\t\tlog.Printf(\"[ERR] Failed to reload: %v\", err)\n\t} else {\n\t\tlog.Printf(\"[INFO] Completed reload\")\n\t}\n\treturn\n}\n\n\/\/ allWatchesReturned checks if all the watches have some\n\/\/ data registered. Prevents early template generation.\nfunc allWatchesReturned(conf *Config, data *backendData) bool {\n\tdata.Lock()\n\tdefer data.Unlock()\n\treturn len(data.Servers) >= len(conf.watches)\n}\n\n\/\/ aggregateServers merges the watches belonging to each\n\/\/ backend together to prepare for template generation\nfunc aggregateServers(data *backendData) map[string][]*consulapi.ServiceEntry {\n\tbackendServers := make(map[string][]*consulapi.ServiceEntry)\n\tdata.Lock()\n\tdefer data.Unlock()\n\tfor backend, watches := range data.Backends {\n\t\tvar all []*consulapi.ServiceEntry\n\t\tfor _, watch := range watches {\n\t\t\tentries := data.Servers[watch]\n\t\t\tall = append(all, entries...)\n\t\t}\n\t\tbackendServers[backend] = all\n\t}\n\treturn backendServers\n}\n\n\/\/ buildTemplate is used to build the output template\n\/\/ from the configuration and server list\nfunc buildTemplate(conf *Config,\n\tservers map[string][]*consulapi.ServiceEntry) ([]byte, error) {\n\t\/\/ Format the output\n\toutVars := formatOutput(servers)\n\n\t\/\/ Read the template\n\traw, err := ioutil.ReadFile(conf.Template)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to read template: %v\", err)\n\t}\n\n\t\/\/ Create the template\n\ttempl, err := template.New(\"output\").Parse(string(raw))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to parse the template: %v\", err)\n\t}\n\n\t\/\/ Generate the output\n\tvar output bytes.Buffer\n\tif err := templ.Execute(&output, outVars); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate the template: %v\", err)\n\t}\n\treturn output.Bytes(), nil\n}\n\n\/\/ runSingleWatch is used to query a single watch path for changes\nfunc runSingleWatch(conf *Config, data *backendData, idx int, watch *WatchPath) {\n\thealth := data.Client.Health()\n\topts := &consulapi.QueryOptions{\n\t\tWaitTime: waitTime,\n\t}\n\tif watch.Datacenter != \"\" {\n\t\topts.Datacenter = watch.Datacenter\n\t}\n\n\tfailures := 0\n\tfor {\n\t\tif shouldStop(data.StopCh) {\n\t\t\treturn\n\t\t}\n\t\tentries, qm, err := health.Service(watch.Service, watch.Tag, true, opts)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[ERR] Failed to fetch service nodes: %v\", err)\n\t\t}\n\n\t\t\/\/ Patch the entries as necessary\n\t\tfor _, entry := range entries {\n\t\t\t\/\/ Modify the node name to prefix with the watch ID. This\n\t\t\t\/\/ prevents a name conflict on duplicate names\n\t\t\tentry.Node.Node = fmt.Sprintf(\"%d_%s\", idx, entry.Node.Node)\n\n\t\t\t\/\/ Patch the port if provided\n\t\t\tif watch.Port != 0 {\n\t\t\t\tentry.Service.Port = watch.Port\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Update the entries. If this is the first read, do it on error\n\t\tdata.Lock()\n\t\told, ok := data.Servers[watch]\n\t\tif !ok || (err == nil && !reflect.DeepEqual(old, entries)) {\n\t\t\tdata.Servers[watch] = entries\n\t\t\tasyncNotify(data.ChangeCh)\n\t\t\tif !conf.DryRun {\n\t\t\t\tlog.Printf(\"[DEBUG] Updated nodes for %v\", watch.Spec)\n\t\t\t}\n\t\t}\n\t\tdata.Unlock()\n\n\t\t\/\/ Stop immediately on a dry run\n\t\tif conf.DryRun {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Check for an error\n\t\tif err != nil {\n\t\t\tfailures = min(failures+1, maxFailures)\n\t\t\ttime.Sleep(backoff(failSleep, failures))\n\t\t} else {\n\t\t\tfailures = 0\n\t\t\topts.WaitIndex = qm.LastIndex\n\t\t}\n\t}\n}\n\n\/\/ reload is used to invoke the reload command\nfunc reload(conf *Config) error {\n\t\/\/ Determine the shell invocation based on OS\n\tvar shell, flag string\n\tif runtime.GOOS == \"windows\" {\n\t\tshell = \"cmd\"\n\t\tflag = \"\/C\"\n\t} else {\n\t\tshell = \"\/bin\/sh\"\n\t\tflag = \"-c\"\n\t}\n\n\t\/\/ Create and invoke the command\n\tcmd := exec.Command(shell, flag, conf.ReloadCommand)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\n\/\/ shouldStop checks for a closed control channel\nfunc shouldStop(ch chan struct{}) bool {\n\tselect {\n\tcase <-ch:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n\/\/ asyncNotify is used to notify a channel\nfunc asyncNotify(ch chan struct{}) {\n\tselect {\n\tcase ch <- struct{}{}:\n\tdefault:\n\t}\n}\n\n\/\/ min returns the min of two ints\nfunc min(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\n\/\/ backoff is used to compute an exponential backoff\nfunc backoff(interval time.Duration, times int) time.Duration {\n\ttimes--\n\treturn interval * time.Duration(math.Pow(2, float64(times)))\n}\n\n\/\/ ServerEntry is the full data structure exposed to\n\/\/ the template for each server\ntype ServerEntry struct {\n\tID string\n\tService string\n\tTags []string\n\tPort int\n\tIP net.IP\n\tNode string\n}\n\n\/\/ String is the default text representation of a server\nfunc (se *ServerEntry) String() string {\n\tname := fmt.Sprintf(\"%s_%s\", se.Node, se.ID)\n\taddr := &net.TCPAddr{IP: se.IP, Port: se.Port}\n\treturn fmt.Sprintf(\"server %s %s\", name, addr)\n}\n\n\/\/ formatOutput converts the service entries into a format\n\/\/ suitable for templating into the HAProxy file\nfunc formatOutput(inp map[string][]*consulapi.ServiceEntry) map[string][]*ServerEntry {\n\tout := make(map[string][]*ServerEntry)\n\tfor backend, entries := range inp {\n\t\tservers := make([]*ServerEntry, len(entries))\n\t\tfor idx, entry := range entries {\n\t\t\tservers[idx] = &ServerEntry{\n\t\t\t\tID: entry.Service.ID,\n\t\t\t\tService: entry.Service.Service,\n\t\t\t\tTags: entry.Service.Tags,\n\t\t\t\tPort: entry.Service.Port,\n\t\t\t\tIP: net.ParseIP(entry.Node.Address),\n\t\t\t\tNode: entry.Node.Node,\n\t\t\t}\n\t\t}\n\t\tout[backend] = servers\n\t}\n\treturn out\n}\n<commit_msg>Ignore the output of a check for purposes of change detection<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"sync\"\n\t\"text\/template\"\n\t\"time\"\n\n\t\"github.com\/armon\/consul-api\"\n)\n\nconst (\n\t\/\/ failSleep controls how long to sleep on a failure\n\tfailSleep = 5 * time.Second\n\n\t\/\/ maxFailures controls the maximum number of failures\n\t\/\/ before we limit the sleep value\n\tmaxFailures = 5\n\n\t\/\/ waitTime is used to control how long we do a blocking\n\t\/\/ query for\n\twaitTime = 60 * time.Second\n)\n\ntype backendData struct {\n\tsync.Mutex\n\n\t\/\/ Client is a shared Consul client\n\tClient *consulapi.Client\n\n\t\/\/ Servers maps each watch path to a list of entries\n\tServers map[*WatchPath][]*consulapi.ServiceEntry\n\n\t\/\/ Backends maps a backend to a list of watch paths used\n\t\/\/ to build up the server list\n\tBackends map[string][]*WatchPath\n\n\t\/\/ ChangeCh is used to inform of an update\n\tChangeCh chan struct{}\n\n\t\/\/ StopCh is used to trigger a stop\n\tStopCh chan struct{}\n}\n\n\/\/ watch is used to start a long running watcher to handle updates.\n\/\/ Returns a stopCh, and a finishCh.\nfunc watch(conf *Config) (chan struct{}, chan struct{}) {\n\tstopCh := make(chan struct{})\n\tfinishCh := make(chan struct{})\n\tgo runWatch(conf, stopCh, finishCh)\n\treturn stopCh, finishCh\n}\n\n\/\/ runWatch is a long running routine that watches with a\n\/\/ given configuration\nfunc runWatch(conf *Config, stopCh, doneCh chan struct{}) {\n\tdefer close(doneCh)\n\n\t\/\/ Create the consul client\n\tconsulConf := consulapi.DefaultConfig()\n\tif conf.Address != \"\" {\n\t\tconsulConf.Address = conf.Address\n\t}\n\n\t\/\/ Attempt to contact the agent\n\tclient, err := consulapi.NewClient(consulConf)\n\tif err != nil {\n\t\tlog.Printf(\"[ERR] Failed to initialize consul client: %v\", err)\n\t\treturn\n\t}\n\tif _, err := client.Agent().NodeName(); err != nil {\n\t\tlog.Printf(\"[ERR] Failed to contact consul agent: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Create a backend store\n\tdata := &backendData{\n\t\tClient: client,\n\t\tServers: make(map[*WatchPath][]*consulapi.ServiceEntry),\n\t\tBackends: make(map[string][]*WatchPath),\n\t\tChangeCh: make(chan struct{}, 1),\n\t\tStopCh: stopCh,\n\t}\n\n\t\/\/ Start the watches\n\tdata.Lock()\n\tfor idx, watch := range conf.watches {\n\t\tdata.Backends[watch.Backend] = append(data.Backends[watch.Backend], watch)\n\t\tgo runSingleWatch(conf, data, idx, watch)\n\t}\n\tdata.Unlock()\n\n\t\/\/ Monitor for changes or stop\n\tfor {\n\t\tselect {\n\t\tcase <-data.ChangeCh:\n\t\t\tif maybeRefresh(conf, data) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase <-stopCh:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ maybeRefresh is used to handle a potential config update\nfunc maybeRefresh(conf *Config, data *backendData) (exit bool) {\n\t\/\/ Ignore initial updates until all the data is ready\n\tif !allWatchesReturned(conf, data) {\n\t\treturn\n\t}\n\n\t\/\/ Merge the data for each backend\n\tbackendServers := aggregateServers(data)\n\n\t\/\/ Build the output template\n\toutput, err := buildTemplate(conf, backendServers)\n\tif err != nil {\n\t\tlog.Printf(\"[ERR] %v\", err)\n\t\treturn true\n\t}\n\n\t\/\/ Check for a dry run\n\tif conf.DryRun {\n\t\tfmt.Printf(\"%s\\n\", output)\n\t\treturn true\n\t}\n\n\t\/\/ Write out the configuration\n\tif err := ioutil.WriteFile(conf.Path, output, 0660); err != nil {\n\t\tlog.Printf(\"[ERR] Failed to write config file: %v\", err)\n\t\treturn true\n\t}\n\tlog.Printf(\"[INFO] Updated configuration file at %s\", conf.Path)\n\n\t\/\/ Invoke the reload hook\n\tif err := reload(conf); err != nil {\n\t\tlog.Printf(\"[ERR] Failed to reload: %v\", err)\n\t} else {\n\t\tlog.Printf(\"[INFO] Completed reload\")\n\t}\n\treturn\n}\n\n\/\/ allWatchesReturned checks if all the watches have some\n\/\/ data registered. Prevents early template generation.\nfunc allWatchesReturned(conf *Config, data *backendData) bool {\n\tdata.Lock()\n\tdefer data.Unlock()\n\treturn len(data.Servers) >= len(conf.watches)\n}\n\n\/\/ aggregateServers merges the watches belonging to each\n\/\/ backend together to prepare for template generation\nfunc aggregateServers(data *backendData) map[string][]*consulapi.ServiceEntry {\n\tbackendServers := make(map[string][]*consulapi.ServiceEntry)\n\tdata.Lock()\n\tdefer data.Unlock()\n\tfor backend, watches := range data.Backends {\n\t\tvar all []*consulapi.ServiceEntry\n\t\tfor _, watch := range watches {\n\t\t\tentries := data.Servers[watch]\n\t\t\tall = append(all, entries...)\n\t\t}\n\t\tbackendServers[backend] = all\n\t}\n\treturn backendServers\n}\n\n\/\/ buildTemplate is used to build the output template\n\/\/ from the configuration and server list\nfunc buildTemplate(conf *Config,\n\tservers map[string][]*consulapi.ServiceEntry) ([]byte, error) {\n\t\/\/ Format the output\n\toutVars := formatOutput(servers)\n\n\t\/\/ Read the template\n\traw, err := ioutil.ReadFile(conf.Template)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to read template: %v\", err)\n\t}\n\n\t\/\/ Create the template\n\ttempl, err := template.New(\"output\").Parse(string(raw))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to parse the template: %v\", err)\n\t}\n\n\t\/\/ Generate the output\n\tvar output bytes.Buffer\n\tif err := templ.Execute(&output, outVars); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to generate the template: %v\", err)\n\t}\n\treturn output.Bytes(), nil\n}\n\n\/\/ runSingleWatch is used to query a single watch path for changes\nfunc runSingleWatch(conf *Config, data *backendData, idx int, watch *WatchPath) {\n\thealth := data.Client.Health()\n\topts := &consulapi.QueryOptions{\n\t\tWaitTime: waitTime,\n\t}\n\tif watch.Datacenter != \"\" {\n\t\topts.Datacenter = watch.Datacenter\n\t}\n\n\tfailures := 0\n\tfor {\n\t\tif shouldStop(data.StopCh) {\n\t\t\treturn\n\t\t}\n\t\tentries, qm, err := health.Service(watch.Service, watch.Tag, true, opts)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[ERR] Failed to fetch service nodes: %v\", err)\n\t\t}\n\n\t\t\/\/ Patch the entries as necessary\n\t\tfor _, entry := range entries {\n\t\t\t\/\/ Modify the node name to prefix with the watch ID. This\n\t\t\t\/\/ prevents a name conflict on duplicate names\n\t\t\tentry.Node.Node = fmt.Sprintf(\"%d_%s\", idx, entry.Node.Node)\n\n\t\t\t\/\/ Patch the port if provided\n\t\t\tif watch.Port != 0 {\n\t\t\t\tentry.Service.Port = watch.Port\n\t\t\t}\n\n\t\t\t\/\/ Clear the health output to prevent reloading due to changes\n\t\t\t\/\/ in output text since we don't care.\n\t\t\tfor _, c := range entry.Checks {\n\t\t\t\tc.Notes = \"\"\n\t\t\t\tc.Output = \"\"\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Update the entries. If this is the first read, do it on error\n\t\tdata.Lock()\n\t\told, ok := data.Servers[watch]\n\t\tif !ok || (err == nil && !reflect.DeepEqual(old, entries)) {\n\t\t\tdata.Servers[watch] = entries\n\t\t\tasyncNotify(data.ChangeCh)\n\t\t\tif !conf.DryRun {\n\t\t\t\tlog.Printf(\"[DEBUG] Updated nodes for %v\", watch.Spec)\n\t\t\t}\n\t\t}\n\t\tdata.Unlock()\n\n\t\t\/\/ Stop immediately on a dry run\n\t\tif conf.DryRun {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Check for an error\n\t\tif err != nil {\n\t\t\tfailures = min(failures+1, maxFailures)\n\t\t\ttime.Sleep(backoff(failSleep, failures))\n\t\t} else {\n\t\t\tfailures = 0\n\t\t\topts.WaitIndex = qm.LastIndex\n\t\t}\n\t}\n}\n\n\/\/ reload is used to invoke the reload command\nfunc reload(conf *Config) error {\n\t\/\/ Determine the shell invocation based on OS\n\tvar shell, flag string\n\tif runtime.GOOS == \"windows\" {\n\t\tshell = \"cmd\"\n\t\tflag = \"\/C\"\n\t} else {\n\t\tshell = \"\/bin\/sh\"\n\t\tflag = \"-c\"\n\t}\n\n\t\/\/ Create and invoke the command\n\tcmd := exec.Command(shell, flag, conf.ReloadCommand)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\n\/\/ shouldStop checks for a closed control channel\nfunc shouldStop(ch chan struct{}) bool {\n\tselect {\n\tcase <-ch:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n\/\/ asyncNotify is used to notify a channel\nfunc asyncNotify(ch chan struct{}) {\n\tselect {\n\tcase ch <- struct{}{}:\n\tdefault:\n\t}\n}\n\n\/\/ min returns the min of two ints\nfunc min(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\n\/\/ backoff is used to compute an exponential backoff\nfunc backoff(interval time.Duration, times int) time.Duration {\n\ttimes--\n\treturn interval * time.Duration(math.Pow(2, float64(times)))\n}\n\n\/\/ ServerEntry is the full data structure exposed to\n\/\/ the template for each server\ntype ServerEntry struct {\n\tID string\n\tService string\n\tTags []string\n\tPort int\n\tIP net.IP\n\tNode string\n}\n\n\/\/ String is the default text representation of a server\nfunc (se *ServerEntry) String() string {\n\tname := fmt.Sprintf(\"%s_%s\", se.Node, se.ID)\n\taddr := &net.TCPAddr{IP: se.IP, Port: se.Port}\n\treturn fmt.Sprintf(\"server %s %s\", name, addr)\n}\n\n\/\/ formatOutput converts the service entries into a format\n\/\/ suitable for templating into the HAProxy file\nfunc formatOutput(inp map[string][]*consulapi.ServiceEntry) map[string][]*ServerEntry {\n\tout := make(map[string][]*ServerEntry)\n\tfor backend, entries := range inp {\n\t\tservers := make([]*ServerEntry, len(entries))\n\t\tfor idx, entry := range entries {\n\t\t\tservers[idx] = &ServerEntry{\n\t\t\t\tID: entry.Service.ID,\n\t\t\t\tService: entry.Service.Service,\n\t\t\t\tTags: entry.Service.Tags,\n\t\t\t\tPort: entry.Service.Port,\n\t\t\t\tIP: net.ParseIP(entry.Node.Address),\n\t\t\t\tNode: entry.Node.Node,\n\t\t\t}\n\t\t}\n\t\tout[backend] = servers\n\t}\n\treturn out\n}\n<|endoftext|>"} {"text":"<commit_before>package whogo\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"github.com\/ziutek\/telnet\"\n\t\"io\/ioutil\"\n\t\"time\"\n)\n\n\/\/ Feed this function with data from Whois() function and it will tell whether the domain is available.\nfunc Available(data []byte) bool {\n\tavailable := [34]string{\"No Data Found\", \"NOT FOUND\", \"Domain Status: Available\", \"not registred,\", \"No match\", \"This query returned 0 objects\", \"Domain Not Found\", \"nothing found\", \"No records matching\", \"Status: AVAILABLE\", \"does not exist in database\", \"Status: Not Registered\", \"No match for\", \"Object does not exist\", \"We do not have an entry in our database matching your query\", \"no existe\", \"no matching record\", \"No domain records were found to match\", \"No entries found\", \"Status: free\", \"No entries found for the selected sourc\", \"not found...\", \"The domain has not been registered\", \"Not Registered\", \"No data was found\", \"This domain is available for registration\", \"Nothing found for this query\", \"No such domain\", \"No Objects Found\", \"Object_Not_Found\", \"No information available\", \"Domain is not registered\", \"domain name not known\", \"not found in database\"}\n\tfor _, v := range available {\n\t\tif v == string(data) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc solve(domain string, timeout time.Duration) (string, error) {\n\tconn, err := dial(\"whois.iana.org\", timeout)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\terr = send(conn, domain, timeout)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tscanner := bufio.NewScanner(conn)\n\tfor scanner.Scan() {\n\t\tif bytes.Contains(scanner.Bytes(), []byte(\"refer\")) {\n\t\t\trefer := bytes.Split(scanner.Bytes(), []byte(\":\"))[1]\n\t\t\treturn string(bytes.TrimSpace(refer)), nil\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn \"\", nil\n}\n\nfunc dial(host string, timeout time.Duration) (*telnet.Conn, error) {\n\tconn, err := telnet.DialTimeout(\"tcp\", host+\":43\", timeout)\n\tif err != nil {\n\t\treturn conn, err\n\t}\n\tconn.SetUnixWriteMode(true)\n\treturn conn, nil\n}\n\nfunc send(conn *telnet.Conn, s string, timeout time.Duration) error {\n\tconn.SetWriteDeadline(time.Now().Add(timeout))\n\tbuf := make([]byte, len(s)+1)\n\tcopy(buf, s)\n\tbuf[len(s)] = '\\n'\n\t_, err := conn.Write(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Takes in domain as a string without the any prefix. Example: google.com\n\/\/ Timeout defines timeout which is set at every write and read request.\n\/\/ Returns byte array of the WHOIS query.\nfunc Whois(domain string, timeout time.Duration) ([]byte, error) {\n\trefer, err := solve(domain, timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconn, err := dial(refer, timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = send(conn, domain, timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twhois, err := ioutil.ReadAll(conn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/TODO: automatically search for the exact domain when whois returns\n\t\/\/more than one record.\n\t\/\/ if bytes.Contains(whois, []byte(\"To single out one record\")) {\n\t\/\/ \tfmt.Println(\"Contains more than one record.\")\n\t\/\/ \terr = Send(conn, domain, timeout)\n\t\/\/ \tif err != nil {\n\t\/\/ \t\treturn nil, err\n\t\/\/ \t}\n\t\/\/ \twhois, err = ioutil.ReadAll(conn)\n\t\/\/ \tif err != nil {\n\t\/\/ \t\treturn nil, err\n\t\/\/ \t}\n\t\/\/ }\n\tconn.Close()\n\treturn bytes.TrimSpace(whois), nil\n}\n\n\/\/TODO: parse whois records and return Go struct\n\/\/ func Records(status []byte) {\n\/\/ \tlines := bytes.Split(status, []byte(\"\\n\"))\n\/\/ \tfor _, line := range lines {\n\/\/ \t\tfmt.Println(string(line))\n\/\/ \t}\n\/\/ }\n<commit_msg>add Records function<commit_after>package whogo\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"github.com\/9uuso\/go-jaro-winkler-distance\"\n\t\"github.com\/ziutek\/telnet\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype record struct {\n\tNameservers []string\n\tStatus []string\n\tCreated string\n\tUpdated string\n\tExpiration string\n\tReferral string\n}\n\n\/\/ Available tells whether a domain is available according to it's WHOIS query.\n\/\/ The parameter should be the result from Whois function.\nfunc Available(data []byte) bool {\n\tavailable := [34]string{\"No Data Found\", \"NOT FOUND\", \"Domain Status: Available\", \"not registred,\", \"No match\", \"This query returned 0 objects\", \"Domain Not Found\", \"nothing found\", \"No records matching\", \"Status: AVAILABLE\", \"does not exist in database\", \"Status: Not Registered\", \"No match for\", \"Object does not exist\", \"We do not have an entry in our database matching your query\", \"no existe\", \"no matching record\", \"No domain records were found to match\", \"No entries found\", \"Status: free\", \"No entries found for the selected sourc\", \"not found...\", \"The domain has not been registered\", \"Not Registered\", \"No data was found\", \"This domain is available for registration\", \"Nothing found for this query\", \"No such domain\", \"No Objects Found\", \"Object_Not_Found\", \"No information available\", \"Domain is not registered\", \"domain name not known\", \"not found in database\"}\n\tfor _, v := range available {\n\t\tif v == string(data) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc solve(domain string, timeout time.Duration) (string, error) {\n\tconn, err := dial(\"whois.iana.org\", timeout)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\terr = send(conn, domain, timeout)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tscanner := bufio.NewScanner(conn)\n\tfor scanner.Scan() {\n\t\tif bytes.Contains(scanner.Bytes(), []byte(\"refer\")) {\n\t\t\trefer := bytes.Split(scanner.Bytes(), []byte(\":\"))[1]\n\t\t\treturn string(bytes.TrimSpace(refer)), nil\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn \"\", nil\n}\n\nfunc dial(host string, timeout time.Duration) (*telnet.Conn, error) {\n\tconn, err := telnet.DialTimeout(\"tcp\", host+\":43\", timeout)\n\tif err != nil {\n\t\treturn conn, err\n\t}\n\tconn.SetUnixWriteMode(true)\n\treturn conn, nil\n}\n\nfunc send(conn *telnet.Conn, s string, timeout time.Duration) error {\n\tconn.SetWriteDeadline(time.Now().Add(timeout))\n\tbuf := make([]byte, len(s)+1)\n\tcopy(buf, s)\n\tbuf[len(s)] = '\\n'\n\t_, err := conn.Write(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Whois returns WHOIS query of a domain in format such as google.com\n\/\/ Timeout defines timeout which is set at every write and read request.\nfunc Whois(domain string, timeout time.Duration) ([]byte, error) {\n\trefer, err := solve(domain, timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconn, err := dial(refer, timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = send(conn, domain, timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twhois, err := ioutil.ReadAll(conn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/TODO: automatically search for the exact domain when whois returns\n\t\/\/more than one record.\n\t\/\/ if bytes.Contains(whois, []byte(\"To single out one record\")) {\n\t\/\/ \tfmt.Println(\"Contains more than one record.\")\n\t\/\/ \terr = Send(conn, domain, timeout)\n\t\/\/ \tif err != nil {\n\t\/\/ \t\treturn nil, err\n\t\/\/ \t}\n\t\/\/ \twhois, err = ioutil.ReadAll(conn)\n\t\/\/ \tif err != nil {\n\t\/\/ \t\treturn nil, err\n\t\/\/ \t}\n\t\/\/ }\n\tconn.Close()\n\treturn bytes.TrimSpace(whois), nil\n}\n\nfunc find(query map[string]string, s ...string) string {\n\n\tvar max = float64(0)\n\tvar res string\n\n\tif len(s) == 1 {\n\t\tfor whoisIndex, whoisValue := range query {\n\t\t\tscore := jwd.Calculate(whoisIndex, s[0])\n\t\t\tif score > max && score > 0.7 {\n\t\t\t\tmax = score\n\t\t\t\tres = whoisValue\n\t\t\t}\n\t\t}\n\t}\n\n\treturn res\n}\n\n\/\/ Records uses Jaro-Winkler distance to parse WHOIS queries.\n\/\/ Other than .com domains may not be supported.\nfunc Records(data []byte) record {\n\tlines := bytes.Split(data, []byte(\"\\n\"))\n\tquery := make(map[string]string)\n\tvar record record\n\tfor _, line := range lines {\n\t\tif jwd.Calculate(strings.Split(string(line), \":\")[0], \"Referral\") > 0.7 && bytes.Contains(line, []byte(\":\")) {\n\t\t\trecord.Referral = strings.TrimSpace(strings.Split(string(line), \": \")[1])\n\t\t}\n\t\tif len(line) > 0 && bytes.Contains(line, []byte(\":\")) && len(bytes.TrimSpace(bytes.Split(line, []byte(\":\"))[1])) > 0 {\n\t\t\tthis := string(line)\n\t\t\tif len(query[strings.TrimSpace(strings.Split(this, \":\")[0])]) != 0 {\n\t\t\t\tn := query[strings.TrimSpace(strings.Split(this, \":\")[0])]\n\t\t\t\tquery[strings.TrimSpace(strings.Split(this, \":\")[0])] = n + \",\" + strings.TrimSpace(strings.Split(this, \":\")[1])\n\t\t\t} else {\n\t\t\t\tquery[strings.TrimSpace(strings.Split(this, \":\")[0])] = strings.TrimSpace(strings.Split(this, \":\")[1])\n\t\t\t}\n\t\t}\n\t}\n\trecord.Updated = find(query, \"Updated\")\n\trecord.Created = find(query, \"Created\")\n\trecord.Nameservers = strings.Split(find(query, \"Nameservers\"), \",\")\n\trecord.Status = strings.Split(find(query, \"Status\"), \",\")\n\trecord.Expiration = find(query, \"Expiration\")\n\treturn record\n}\n<|endoftext|>"} {"text":"<commit_before>package narcissus\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n)\n\n\/\/ Write writes a structure pointer to the Augeas tree\nfunc (n *Narcissus) Write(val interface{}) error {\n\tref, err := structRef(val)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid interface: %v\", err)\n\t}\n\n\terr = n.Autoload(ref)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to autoload file: %v\", err)\n\t}\n\n\tpath, err := getPath(ref)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"undefined path: %v\", err)\n\t}\n\n\terr = n.writeStruct(ref, path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to write interface to tree: %v\", err)\n\t}\n\n\terr = n.Augeas.Save()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to save Augeas tree: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (n *Narcissus) writeStruct(ref reflect.Value, path string) error {\n\trefType := ref.Type()\n\tfor i := 0; i < refType.NumField(); i++ {\n\t\tif refType.Field(i).Name == \"augeasPath\" {\n\t\t\t\/\/ Ignore the special `augeasPath` field\n\t\t\tcontinue\n\t\t}\n\t\terr := n.writeField(ref.Field(i), refType.Field(i), path)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write field %s to path %s: %v\", refType.Field(i).Name, path, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (n *Narcissus) writeField(field reflect.Value, fieldType reflect.StructField, path string) error {\n\tfieldPath := fmt.Sprintf(\"%s\/%s\", path, fieldType.Tag.Get(\"path\"))\n\tif field.Kind() == reflect.Slice {\n\t\treturn n.writeSliceField(field, fieldType, path, fieldPath)\n\t} else if field.Kind() == reflect.Map {\n\t\treturn n.writeMapField(field, fieldType, path, fieldPath)\n\t}\n\treturn n.writeSimpleField(field, fieldPath, fieldType.Tag)\n}\n\nfunc (n *Narcissus) writeSimpleField(field reflect.Value, fieldPath string, tag reflect.StructTag) error {\n\taug := n.Augeas\n\t\/\/ There might be a better way to convert, but that does it\n\tvalue := fmt.Sprintf(\"%v\", field.Interface())\n\n\tif tag.Get(\"value-from\") == \"label\" {\n\t\treturn nil\n\t}\n\n\t\/\/ FIXME: use omitempty for that\n\tif value == \"\" {\n\t\treturn nil\n\t}\n\n\terr := aug.Set(fieldPath, value)\n\treturn err\n}\n\nfunc (n *Narcissus) writeSliceField(field reflect.Value, fieldType reflect.StructField, path, fieldPath string) error {\n\tfor i := 0; i < field.Len(); i++ {\n\t\tvalue := field.Index(i)\n\t\tvar p string\n\t\tif fieldType.Tag.Get(\"type\") == \"seq\" {\n\t\t\tp = fmt.Sprintf(\"%s\/%v\", path, i+1)\n\t\t} else {\n\t\t\tp = fmt.Sprintf(\"%s[%v]\", fieldPath, i+1)\n\t\t}\n\t\t\/\/ Create base node\n\t\terr := n.Augeas.Clear(p)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to create base path for slice: %v\", err)\n\t\t}\n\t\tif value.Kind() == reflect.Struct {\n\t\t\terr := n.writeStruct(value, p)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to write slice struct value: %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\terr := n.writeSimpleField(value, p, fieldType.Tag)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to write slice value: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (n *Narcissus) writeMapField(field reflect.Value, fieldType reflect.StructField, path, fieldPath string) (err error) {\n\tkeys := field.MapKeys()\n\tvar purgeConditions []string\n\tfor _, k := range keys {\n\t\tvalue := field.MapIndex(k)\n\t\tvar p string\n\t\tif strings.HasSuffix(fieldPath, \"\/*\") {\n\t\t\t\/\/ TrimSuffix? ouch!\n\t\t\tp = fmt.Sprintf(\"%s\/%s\", strings.TrimSuffix(fieldPath, \"\/*\"), k)\n\t\t\tpurgeConditions = append(purgeConditions, fmt.Sprintf(\"label() != '%s'\", k))\n\t\t} else {\n\t\t\tp = fmt.Sprintf(\"%s[.='%s']\", fieldPath, k)\n\t\t\tpurgeConditions = append(purgeConditions, fmt.Sprintf(\". != '%s'\", k))\n\t\t}\n\t\tif value.Kind() == reflect.Struct {\n\t\t\tif fieldType.Tag.Get(\"key\") != \"label\" {\n\t\t\t\terr = n.writeSimpleField(k, p, fieldType.Tag)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed to write map key: %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\terr = n.writeStruct(value, p)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to write map struct value: %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\terr := n.writeSimpleField(value, p, fieldType.Tag)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to write map value: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Purge absent keys\n\tpurge := fieldType.Tag.Get(\"purge\")\n\tif purge == \"true\" {\n\t\tpurgePath := fieldPath + \"[\" + strings.Join(purgeConditions, \" and \") + \"]\"\n\t\tn.Augeas.Remove(purgePath)\n\t}\n\n\treturn nil\n}\n<commit_msg>Ignore all augeas* fields in write<commit_after>package narcissus\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n)\n\n\/\/ Write writes a structure pointer to the Augeas tree\nfunc (n *Narcissus) Write(val interface{}) error {\n\tref, err := structRef(val)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid interface: %v\", err)\n\t}\n\n\terr = n.Autoload(ref)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to autoload file: %v\", err)\n\t}\n\n\tpath, err := getPath(ref)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"undefined path: %v\", err)\n\t}\n\n\terr = n.writeStruct(ref, path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to write interface to tree: %v\", err)\n\t}\n\n\terr = n.Augeas.Save()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to save Augeas tree: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (n *Narcissus) writeStruct(ref reflect.Value, path string) error {\n\trefType := ref.Type()\n\tfor i := 0; i < refType.NumField(); i++ {\n\t\tif strings.HasPrefix(refType.Field(i).Name, \"augeas\") {\n\t\t\t\/\/ Ignore the special `augeas*` fields\n\t\t\tcontinue\n\t\t}\n\t\terr := n.writeField(ref.Field(i), refType.Field(i), path)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write field %s to path %s: %v\", refType.Field(i).Name, path, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (n *Narcissus) writeField(field reflect.Value, fieldType reflect.StructField, path string) error {\n\tfieldPath := fmt.Sprintf(\"%s\/%s\", path, fieldType.Tag.Get(\"path\"))\n\tif field.Kind() == reflect.Slice {\n\t\treturn n.writeSliceField(field, fieldType, path, fieldPath)\n\t} else if field.Kind() == reflect.Map {\n\t\treturn n.writeMapField(field, fieldType, path, fieldPath)\n\t}\n\treturn n.writeSimpleField(field, fieldPath, fieldType.Tag)\n}\n\nfunc (n *Narcissus) writeSimpleField(field reflect.Value, fieldPath string, tag reflect.StructTag) error {\n\taug := n.Augeas\n\t\/\/ There might be a better way to convert, but that does it\n\tvalue := fmt.Sprintf(\"%v\", field.Interface())\n\n\tif tag.Get(\"value-from\") == \"label\" {\n\t\treturn nil\n\t}\n\n\t\/\/ FIXME: use omitempty for that\n\tif value == \"\" {\n\t\treturn nil\n\t}\n\n\terr := aug.Set(fieldPath, value)\n\treturn err\n}\n\nfunc (n *Narcissus) writeSliceField(field reflect.Value, fieldType reflect.StructField, path, fieldPath string) error {\n\tfor i := 0; i < field.Len(); i++ {\n\t\tvalue := field.Index(i)\n\t\tvar p string\n\t\tif fieldType.Tag.Get(\"type\") == \"seq\" {\n\t\t\tp = fmt.Sprintf(\"%s\/%v\", path, i+1)\n\t\t} else {\n\t\t\tp = fmt.Sprintf(\"%s[%v]\", fieldPath, i+1)\n\t\t}\n\t\t\/\/ Create base node\n\t\terr := n.Augeas.Clear(p)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to create base path for slice: %v\", err)\n\t\t}\n\t\tif value.Kind() == reflect.Struct {\n\t\t\terr := n.writeStruct(value, p)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to write slice struct value: %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\terr := n.writeSimpleField(value, p, fieldType.Tag)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to write slice value: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (n *Narcissus) writeMapField(field reflect.Value, fieldType reflect.StructField, path, fieldPath string) (err error) {\n\tkeys := field.MapKeys()\n\tvar purgeConditions []string\n\tfor _, k := range keys {\n\t\tvalue := field.MapIndex(k)\n\t\tvar p string\n\t\tif strings.HasSuffix(fieldPath, \"\/*\") {\n\t\t\t\/\/ TrimSuffix? ouch!\n\t\t\tp = fmt.Sprintf(\"%s\/%s\", strings.TrimSuffix(fieldPath, \"\/*\"), k)\n\t\t\tpurgeConditions = append(purgeConditions, fmt.Sprintf(\"label() != '%s'\", k))\n\t\t} else {\n\t\t\tp = fmt.Sprintf(\"%s[.='%s']\", fieldPath, k)\n\t\t\tpurgeConditions = append(purgeConditions, fmt.Sprintf(\". != '%s'\", k))\n\t\t}\n\t\tif value.Kind() == reflect.Struct {\n\t\t\tif fieldType.Tag.Get(\"key\") != \"label\" {\n\t\t\t\terr = n.writeSimpleField(k, p, fieldType.Tag)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed to write map key: %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\terr = n.writeStruct(value, p)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to write map struct value: %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\terr := n.writeSimpleField(value, p, fieldType.Tag)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to write map value: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Purge absent keys\n\tpurge := fieldType.Tag.Get(\"purge\")\n\tif purge == \"true\" {\n\t\tpurgePath := fieldPath + \"[\" + strings.Join(purgeConditions, \" and \") + \"]\"\n\t\tn.Augeas.Remove(purgePath)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ This is Free Software covered by the terms of the MIT license.\n\/\/ See LICENSE file for details.\n\/\/ Copyright 2017 by Intevation GmbH\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\"\n)\n\ntype xhtml struct {\n\tout bytes.Buffer\n}\n\nfunc (x *xhtml) tag(name string) func(*node) error {\n\ttag := \"<\" + name + \"\/>\"\n\treturn func(*node) error {\n\t\tx.out.WriteString(tag)\n\t\treturn nil\n\t}\n}\n\nfunc (x *xhtml) open(name string) func(*node) error {\n\ttag := \"<\" + name + \">\"\n\treturn func(*node) error {\n\t\tx.out.WriteString(tag)\n\t\treturn nil\n\t}\n}\n\nfunc (x *xhtml) close(name string) func(*node) error {\n\ttag := \"<\/\" + name + \">\"\n\treturn func(*node) error {\n\t\tx.out.WriteString(tag)\n\t\treturn nil\n\t}\n}\n\nfunc (x *xhtml) element(name string) *visitor {\n\treturn &visitor{x.open(name), x.close(name)}\n}\n\nfunc (x *xhtml) heading(level int) *visitor {\n\ttag := fmt.Sprintf(\"h%d\", level)\n\treturn &visitor{x.open(tag), x.close(tag)}\n}\n\nfunc (x *xhtml) text(n *node) error {\n\tenc := xml.NewEncoder(&x.out)\n\ttxt := n.value.(string)\n\tenc.EncodeToken(xml.CharData(txt))\n\tenc.Flush()\n\treturn nil\n}\n\nfunc (x *xhtml) noWikiInline(n *node) error {\n\tx.out.WriteString(\"<tt>\")\n\tx.text(n)\n\tx.out.WriteString(\"<\/tt>\")\n\treturn nil\n}\n\nfunc (x *xhtml) noWiki(n *node) error {\n\tx.out.WriteString(\"<pre>\")\n\tx.noWikiInline(n)\n\tx.out.WriteString(\"<\/pre>\\n\")\n\treturn nil\n}\n\nfunc (x *xhtml) link(n *node) error {\n\thref := n.value.(string)\n\tenc := xml.NewEncoder(&x.out)\n\tenc.EncodeToken(xml.StartElement{\n\t\tName: xml.Name{Local: \"a\"},\n\t\tAttr: []xml.Attr{{xml.Name{Local: \"href\"}, href}},\n\t})\n\tenc.Flush()\n\treturn nil\n}\n\nfunc (x *xhtml) image(n *node) error {\n\timg := n.value.(*image)\n\tenc := xml.NewEncoder(&x.out)\n\tattr := []xml.Attr{{xml.Name{Local: \"src\"}, img.src}}\n\tif img.alt != \"\" {\n\t\tattr = append(attr, xml.Attr{xml.Name{Local: \"src\"}, img.alt})\n\t}\n\tenc.EncodeToken(xml.StartElement{\n\t\tName: xml.Name{Local: \"img\"},\n\t\tAttr: attr,\n\t})\n\tenc.Flush()\n\treturn nil\n}\n\nfunc exportXHTML(doc *document, out io.Writer) error {\n\n\tvar x xhtml\n\n\tx.out.WriteString(xml.Header)\n\tx.out.WriteString(\"<html>\\n<body>\\n\")\n\n\terr := doc.traverse(map[nodeType]*visitor{\n\t\torderedListNode: x.element(\"ol\"),\n\t\tunorderedListNode: x.element(\"ul\"),\n\t\tlistItemNode: x.element(\"li\"),\n\t\ttextNode: &visitor{enter: x.text},\n\t\tboldNode: x.element(\"strong\"),\n\t\titalicsNode: x.element(\"i\"),\n\t\tunderlinedNode: x.element(\"em\"),\n\t\tstrikeNode: x.element(\"del\"),\n\t\tsuperscriptNode: x.element(\"sup\"),\n\t\tsubscriptNode: x.element(\"sub\"),\n\t\ttableNode: x.element(\"table\"),\n\t\ttableRowNode: x.element(\"tr\"),\n\t\ttableCellNode: x.element(\"td\"),\n\t\ttableHeaderRowNode: x.element(\"th\"),\n\t\ttableHeaderCellNode: x.element(\"td\"),\n\t\theading1Node: x.heading(1),\n\t\theading2Node: x.heading(2),\n\t\theading3Node: x.heading(3),\n\t\theading4Node: x.heading(4),\n\t\theading5Node: x.heading(5),\n\t\theading6Node: x.heading(6),\n\t\tparagraphNode: x.element(\"p\"),\n\t\tlineBreakNode: &visitor{enter: x.tag(\"br\")},\n\t\tescapeNode: &visitor{enter: x.text},\n\t\tnoWikiNode: &visitor{enter: x.noWiki},\n\t\tnoWikiInlineNode: &visitor{enter: x.noWikiInline},\n\t\t\/\/ placeholderNode not supported, yet.\n\t\timageNode: &visitor{enter: x.image, leave: x.close(\"img\")},\n\t\tlinkNode: &visitor{enter: x.link, leave: x.close(\"a\")},\n\t\thorizontalLineNode: &visitor{enter: x.tag(\"hr\")},\n\t})\n\tif err != nil {\n\t\tx.out.WriteString(\"\\n<\/body>\\n<\/html>\\n\")\n\t\t_, err = x.out.WriteTo(out)\n\t}\n\treturn err\n}\n<commit_msg>Use bufio.Writer instead of bytes.Buffer to write XHTML to. Should reduce alloc and copying around for larger documents. Also be more keen about errors while writing.<commit_after>\/\/ This is Free Software covered by the terms of the MIT license.\n\/\/ See LICENSE file for details.\n\/\/ Copyright 2017 by Intevation GmbH\npackage main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\"\n)\n\ntype xhtml struct {\n\tout *bufio.Writer\n}\n\nfunc (x *xhtml) writeString(s string) error {\n\t_, err := x.out.WriteString(s)\n\treturn err\n}\n\nfunc (x *xhtml) tag(name string) func(*node) error {\n\ttag := \"<\" + name + \"\/>\"\n\treturn func(*node) error { return x.writeString(tag) }\n}\n\nfunc (x *xhtml) open(name string) func(*node) error {\n\ttag := \"<\" + name + \">\"\n\treturn func(*node) error { return x.writeString(tag) }\n}\n\nfunc (x *xhtml) close(name string) func(*node) error {\n\ttag := \"<\/\" + name + \">\"\n\treturn func(*node) error { return x.writeString(tag) }\n}\n\nfunc (x *xhtml) element(name string) *visitor {\n\treturn &visitor{x.open(name), x.close(name)}\n}\n\nfunc (x *xhtml) heading(level int) *visitor {\n\ttag := fmt.Sprintf(\"h%d\", level)\n\treturn &visitor{x.open(tag), x.close(tag)}\n}\n\nfunc (x *xhtml) text(n *node) error {\n\tenc := xml.NewEncoder(x.out)\n\ttxt := n.value.(string)\n\tif err := enc.EncodeToken(xml.CharData(txt)); err != nil {\n\t\treturn err\n\t}\n\treturn enc.Flush()\n}\n\nfunc (x *xhtml) noWikiInline(n *node) error {\n\tx.writeString(\"<tt>\")\n\tx.text(n)\n\treturn x.writeString(\"<\/tt>\")\n}\n\nfunc (x *xhtml) noWiki(n *node) error {\n\tx.writeString(\"<pre>\")\n\tx.noWikiInline(n)\n\treturn x.writeString(\"<\/pre>\\n\")\n}\n\nfunc (x *xhtml) link(n *node) error {\n\thref := n.value.(string)\n\tenc := xml.NewEncoder(x.out)\n\tif err := enc.EncodeToken(xml.StartElement{\n\t\tName: xml.Name{Local: \"a\"},\n\t\tAttr: []xml.Attr{{xml.Name{Local: \"href\"}, href}},\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn enc.Flush()\n}\n\nfunc (x *xhtml) image(n *node) error {\n\timg := n.value.(*image)\n\tenc := xml.NewEncoder(x.out)\n\tattr := []xml.Attr{{xml.Name{Local: \"src\"}, img.src}}\n\tif img.alt != \"\" {\n\t\tattr = append(attr, xml.Attr{xml.Name{Local: \"src\"}, img.alt})\n\t}\n\tif err := enc.EncodeToken(xml.StartElement{\n\t\tName: xml.Name{Local: \"img\"},\n\t\tAttr: attr,\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn enc.Flush()\n}\n\nfunc exportXHTML(doc *document, out io.Writer) error {\n\n\tx := xhtml{out: bufio.NewWriter(out)}\n\n\tx.out.WriteString(xml.Header)\n\tx.out.WriteString(\"<html>\\n<body>\\n\")\n\n\terr := doc.traverse(map[nodeType]*visitor{\n\t\torderedListNode: x.element(\"ol\"),\n\t\tunorderedListNode: x.element(\"ul\"),\n\t\tlistItemNode: x.element(\"li\"),\n\t\ttextNode: &visitor{enter: x.text},\n\t\tboldNode: x.element(\"strong\"),\n\t\titalicsNode: x.element(\"i\"),\n\t\tunderlinedNode: x.element(\"em\"),\n\t\tstrikeNode: x.element(\"del\"),\n\t\tsuperscriptNode: x.element(\"sup\"),\n\t\tsubscriptNode: x.element(\"sub\"),\n\t\ttableNode: x.element(\"table\"),\n\t\ttableRowNode: x.element(\"tr\"),\n\t\ttableCellNode: x.element(\"td\"),\n\t\ttableHeaderRowNode: x.element(\"th\"),\n\t\ttableHeaderCellNode: x.element(\"td\"),\n\t\theading1Node: x.heading(1),\n\t\theading2Node: x.heading(2),\n\t\theading3Node: x.heading(3),\n\t\theading4Node: x.heading(4),\n\t\theading5Node: x.heading(5),\n\t\theading6Node: x.heading(6),\n\t\tparagraphNode: x.element(\"p\"),\n\t\tlineBreakNode: &visitor{enter: x.tag(\"br\")},\n\t\tescapeNode: &visitor{enter: x.text},\n\t\tnoWikiNode: &visitor{enter: x.noWiki},\n\t\tnoWikiInlineNode: &visitor{enter: x.noWikiInline},\n\t\t\/\/ placeholderNode not supported, yet.\n\t\timageNode: &visitor{enter: x.image, leave: x.close(\"img\")},\n\t\tlinkNode: &visitor{enter: x.link, leave: x.close(\"a\")},\n\t\thorizontalLineNode: &visitor{enter: x.tag(\"hr\")},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tx.writeString(\"\\n<\/body>\\n<\/html>\\n\")\n\treturn x.out.Flush()\n}\n<|endoftext|>"} {"text":"<commit_before>package HologramGo\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n)\n\n\/\/ Device object returned in the response.\ntype Device map[string]interface{}\n\n\/\/ Devices is just a list of Device(s).\ntype Devices []interface{}\n\n\/\/ GetDevices returns device details.\nfunc GetDevices() Devices {\n\n\treq := createGetRequest(\"\/devices\")\n\n\tresp, err := sendRequest(req)\n\tif err != nil {\n\t\tfmt.Printf(\"Could not send request: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn unmarshallIntoArrayObject(resp)\n}\n\n\/\/ GetDevice returns a device detail based on the given deviceid.\nfunc GetDevice(deviceid int) Device {\n\n\treq := createGetRequest(\"\/devices\/\" + strconv.Itoa(deviceid))\n\n\tresp, err := sendRequest(req)\n\n\tif err != nil {\n\t\tfmt.Printf(\"Could not send request: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn unmarshallIntoObject(resp)\n}\n\n\/\/ ClaimOwnershipAndActivateDevice claims ownership and activate the given device.\nfunc ClaimOwnershipAndActivateDevice(simnumber int) Device {\n\n\tvar params Parameters\n\treq := createPostRequest(\"\/cellular\/sim_\"+strconv.Itoa(simnumber)+\"\/claim\", params)\n\n\tresp, err := sendRequest(req)\n\tif err != nil {\n\t\tfmt.Printf(\"Could not send request: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn unmarshallIntoObject(resp)\n}\n\n\/\/ PurchaseAndAssignPhoneNumberToDevice purchases and assigns a phone number to the device.\nfunc PurchaseAndAssignPhoneNumberToDevice(deviceid int) Device {\n\n\tvar params Parameters\n\treq := createPostRequest(\"\/devices\/\"+strconv.Itoa(deviceid)+\"\/addnumber\", params)\n\n\tresp, err := sendRequest(req)\n\tif err != nil {\n\t\tfmt.Printf(\"Could not send request: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn unmarshallIntoObject(resp)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ GENERIC DEVICE GETTER FUNCTIONS\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ GetDeviceId returns the id.\nfunc (device Device) GetDeviceId() float64 {\n\treturn device[\"id\"].(float64)\n}\n\n\/\/ GetDeviceUserId returns the user id.\nfunc (device Device) GetDeviceUserId() float64 {\n\treturn device[\"userid\"].(float64)\n}\n\n\/\/ GetDeviceName returns the device name.\nfunc (device Device) GetDeviceName() string {\n\treturn device[\"name\"].(string)\n}\n\n\/\/ GetDeviceType returns the device type.\nfunc (device Device) GetDeviceType() string {\n\treturn device[\"type\"].(string)\n}\n\n\/\/ GetWhenCreated returns a UNIX timestamp of the creation time.\nfunc (device Device) GetWhenCreated() string {\n\treturn device[\"whencreated\"].(string)\n}\n\n\/\/ GetPhoneNumber returns a phone number.\nfunc (device Device) GetPhoneNumber() string {\n\treturn device[\"phonenumber\"].(string)\n}\n\n\/\/ GetTunnelable returns true if it is tunnelable.\nfunc (device Device) GetTunnelable() bool {\n\treturn device[\"tunnelable\"].(bool)\n}\n\n\/\/ TODO: links\/cellular\n<commit_msg>fix lint issues<commit_after>package HologramGo\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n)\n\n\/\/ Device object returned in the response.\ntype Device map[string]interface{}\n\n\/\/ Devices is just a list of Device(s).\ntype Devices []interface{}\n\n\/\/ GetDevices returns device details.\nfunc GetDevices() Devices {\n\n\treq := createGetRequest(\"\/devices\")\n\n\tresp, err := sendRequest(req)\n\tif err != nil {\n\t\tfmt.Printf(\"Could not send request: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn unmarshallIntoArrayObject(resp)\n}\n\n\/\/ GetDevice returns a device detail based on the given deviceid.\nfunc GetDevice(deviceid int) Device {\n\n\treq := createGetRequest(\"\/devices\/\" + strconv.Itoa(deviceid))\n\n\tresp, err := sendRequest(req)\n\n\tif err != nil {\n\t\tfmt.Printf(\"Could not send request: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn unmarshallIntoObject(resp)\n}\n\n\/\/ ClaimOwnershipAndActivateDevice claims ownership and activate the given device.\nfunc ClaimOwnershipAndActivateDevice(simnumber int) Device {\n\n\tvar params Parameters\n\treq := createPostRequest(\"\/cellular\/sim_\"+strconv.Itoa(simnumber)+\"\/claim\", params)\n\n\tresp, err := sendRequest(req)\n\tif err != nil {\n\t\tfmt.Printf(\"Could not send request: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn unmarshallIntoObject(resp)\n}\n\n\/\/ PurchaseAndAssignPhoneNumberToDevice purchases and assigns a phone number to the device.\nfunc PurchaseAndAssignPhoneNumberToDevice(deviceid int) Device {\n\n\tvar params Parameters\n\treq := createPostRequest(\"\/devices\/\"+strconv.Itoa(deviceid)+\"\/addnumber\", params)\n\n\tresp, err := sendRequest(req)\n\tif err != nil {\n\t\tfmt.Printf(\"Could not send request: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn unmarshallIntoObject(resp)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ GENERIC DEVICE GETTER FUNCTIONS\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ GetDeviceID returns the id.\nfunc (device Device) GetDeviceID() float64 {\n\treturn device[\"id\"].(float64)\n}\n\n\/\/ GetDeviceUserID returns the user id.\nfunc (device Device) GetDeviceUserID() float64 {\n\treturn device[\"userid\"].(float64)\n}\n\n\/\/ GetDeviceName returns the device name.\nfunc (device Device) GetDeviceName() string {\n\treturn device[\"name\"].(string)\n}\n\n\/\/ GetDeviceType returns the device type.\nfunc (device Device) GetDeviceType() string {\n\treturn device[\"type\"].(string)\n}\n\n\/\/ GetWhenCreated returns a UNIX timestamp of the creation time.\nfunc (device Device) GetWhenCreated() string {\n\treturn device[\"whencreated\"].(string)\n}\n\n\/\/ GetPhoneNumber returns a phone number.\nfunc (device Device) GetPhoneNumber() string {\n\treturn device[\"phonenumber\"].(string)\n}\n\n\/\/ GetTunnelable returns true if it is tunnelable.\nfunc (device Device) GetTunnelable() bool {\n\treturn device[\"tunnelable\"].(bool)\n}\n\n\/\/ TODO: links\/cellular\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/ninjasphere\/go-ninja\/events\"\n\t\"github.com\/ninjasphere\/go-ninja\/model\"\n\t\"github.com\/ninjasphere\/go-ninja\/support\"\n\t\"github.com\/ninjasphere\/go-zigbee\"\n\t\"github.com\/ninjasphere\/go-zigbee\/nwkmgr\"\n)\n\nconst (\n\tClusterIDBasic uint32 = 0x00\n\tClusterIDOnOff uint32 = 0x06\n\tClusterIDLevel uint32 = 0x08 \/\/ We're always exporting as brightness for now\n\tClusterIDColor uint32 = 0x300\n\tClusterIDTemp uint32 = 0x402\n\tClusterIDHumidity uint32 = 0x405\n\tClusterIDPower uint32 = 0x702\n)\n\ntype ZStackConfig struct {\n\tHostname string\n\tOtasrvrPort int\n\tGatewayPort int\n\tNwkmgrPort int\n\tStableFlagFile string\n}\n\ntype Driver struct {\n\tsupport.DriverSupport\n\n\tdevices map[uint64]*Device\n\n\tconfig *ZStackConfig\n\n\tnwkmgrConn *zigbee.ZStackNwkMgr\n\tgatewayConn *zigbee.ZStackGateway\n\totaConn *zigbee.ZStackOta\n}\n\nfunc NewDriver(config *ZStackConfig) (*Driver, error) {\n\tdriver := &Driver{\n\t\tconfig: config,\n\t\tdevices: make(map[uint64]*Device),\n\t}\n\n\terr := driver.Init(info)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to initialize fake driver: %s\", err)\n\t}\n\n\terr = driver.Export(driver)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to export fake driver: %s\", err)\n\t}\n\n\tuserAgent := driver.Conn.GetServiceClient(\"$device\/:deviceId\/channel\/user-agent\")\n\tuserAgent.OnEvent(\"pairing-requested\", func(pairingRequest *events.PairingRequest, values map[string]string) bool {\n\t\tlog.Infof(\"Pairing request received from %s for %d seconds\", values[\"deviceId\"], pairingRequest.Duration)\n\n\t\tduration := uint32(pairingRequest.Duration)\n\n\t\tif err := driver.EnableJoin(duration); err != nil {\n\t\t\tlog.Warningf(\"Failed to enable joining: %s\", err)\n\t\t}\n\n\t\treturn true\n\t})\n\n\treturn driver, nil\n\n}\n\nfunc (d *Driver) Reset(hard bool) error {\n\treturn d.nwkmgrConn.Reset(hard)\n}\n\nfunc (d *Driver) Start() error {\n\n\twaitUntilZStackReady(d.config.StableFlagFile)\n\n\tvar err error\n\n\td.nwkmgrConn, err = zigbee.ConnectToNwkMgrServer(d.config.Hostname, d.config.NwkmgrPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to nwkmgr %s\", err)\n\t}\n\td.nwkmgrConn.OnDeviceFound = func(deviceInfo *nwkmgr.NwkDeviceInfoT) {\n\t\td.onDeviceFound(deviceInfo)\n\t}\n\n\t\/*done := false\n\td.nwkmgrConn.OnNetworkReady = func() {\n\t\tif !done {\n\t\t\tdone = true\n\t\t}\n\t}*\/\n\n\td.otaConn, err = zigbee.ConnectToOtaServer(d.config.Hostname, d.config.OtasrvrPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to ota server %s\", err)\n\t}\n\n\td.gatewayConn, err = zigbee.ConnectToGatewayServer(d.config.Hostname, d.config.GatewayPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to gateway %s\", err)\n\t}\n\n\t\/*keyResponse := &nwkmgr.NwkZigbeeGenericCnf{}\n\n\tkeyRequest := &nwkmgr.NwkChangeNwkKeyReq{\n\t\tNewKey: []byte{0x5a, 0x69, 0x67, 0x42, 0x65, 0x65, 0x41, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x30, 0x39},\n\t}\n\n\terr = d.nwkmgrConn.SendCommand(keyRequest, keyResponse)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed setting network key: %s\", err)\n\t}\n\n\tspew.Dump(keyResponse)\n\tlog.Println(\"Sleeping for 10 seconds after setting network key\")\n\n\ttime.Sleep(10 * time.Second)*\/\n\n\tnetworkInfo := &nwkmgr.NwkZigbeeNwkInfoCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkZigbeeNwkInfoReq{}, networkInfo)\n\tif err != nil {\n\t\tif log.IsDebugEnabled() {\n\t\t\tspew.Dump(networkInfo)\n\t\t}\n\t\treturn fmt.Errorf(\"Failed getting network info: %s\", err)\n\t}\n\n\tif *networkInfo.Status == nwkmgr.NwkNetworkStatusT_NWK_DOWN {\n\t\treturn fmt.Errorf(\"The ZigBee network is down\")\n\t}\n\n\tlocalDevice := &nwkmgr.NwkGetLocalDeviceInfoCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkGetLocalDeviceInfoReq{}, localDevice)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed getting local device info: %s\", err)\n\t}\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(\"device info\", localDevice.String())\n\t}\n\n\tnetworkKey := &nwkmgr.NwkGetNwkKeyCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkGetNwkKeyReq{}, networkKey)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed getting network key: %s\", err)\n\t}\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(networkKey)\n\t}\n\n\tlog.Debugf(\"Started coordinator. Channel:%d Pan ID:0x%X Key:% X\", *networkInfo.NwkChannel, *networkInfo.PanId, networkKey.NewKey)\n\n\treturn d.FetchDevices()\n}\n\nfunc (d *Driver) EnableJoin(duration uint32) error {\n\n\tpermitJoinRequest := &nwkmgr.NwkSetPermitJoinReq{\n\t\tPermitJoinTime: &duration,\n\t\tPermitJoin: nwkmgr.NwkPermitJoinTypeT_PERMIT_ALL.Enum(),\n\t}\n\n\tpermitJoinResponse := &nwkmgr.NwkZigbeeGenericCnf{}\n\n\terr := d.nwkmgrConn.SendCommand(permitJoinRequest, permitJoinResponse)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to enable joining: %s\", err)\n\t}\n\tif permitJoinResponse.Status.String() != \"STATUS_SUCCESS\" {\n\t\treturn fmt.Errorf(\"Failed to enable joining: %s\", permitJoinResponse.Status)\n\t}\n\n\td.SendEvent(\"pairing-started\", &events.PairingStarted{\n\t\tDuration: int(duration),\n\t})\n\n\tgo func() {\n\t\ttime.Sleep(time.Second * time.Duration(duration))\n\t\td.SendEvent(\"pairing-ended\", &events.PairingStarted{\n\t\t\tDuration: int(duration),\n\t\t})\n\t}()\n\treturn nil\n}\n\nfunc (d *Driver) FetchDevices() error {\n\treturn d.nwkmgrConn.FetchDeviceList()\n}\n\nfunc (d *Driver) onDeviceFound(deviceInfo *nwkmgr.NwkDeviceInfoT) {\n\n\tlog.Debugf(\"\\n\\n\")\n\tlog.Debugf(\"---- Found Device IEEE:%X ----\\f\", *deviceInfo.IeeeAddress)\n\tlog.Debugf(\"Device Info: %v\", *deviceInfo)\n\n\tif d.devices[*deviceInfo.IeeeAddress] != nil {\n\t\t\/\/ We've seen this already, but it may have been repaired. We *should* just be able to replace\n\t\t\/\/ the deviceInfo object, which is used for all communication.\n\t\t\/\/ TODO: Actually verify this. May need to re-run channel init.\n\t\td.devices[*deviceInfo.IeeeAddress].deviceInfo = deviceInfo\n\t}\n\n\tdevice := &Device{\n\t\tdriver: d,\n\t\tdeviceInfo: deviceInfo,\n\t\tinfo: &model.Device{\n\t\t\tNaturalID: fmt.Sprintf(\"%X\", *deviceInfo.IeeeAddress),\n\t\t\tNaturalIDType: \"zigbee\",\n\t\t\tSignatures: &map[string]string{},\n\t\t},\n\t}\n\n\tfor _, endpoint := range deviceInfo.SimpleDescList {\n\t\tif *endpoint.ProfileId == 0xC05E \/*ZLL*\/ {\n\n\t\t\tswitch *endpoint.DeviceId {\n\n\t\t\tcase 0x0000: \/\/ On\/Off Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x0100: \/\/ Dimmable Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x0200: \/\/ Color Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x210: \/\/ Ext Color Light\n\t\t\t\t(*device.info.Signatures)[\"ninja:thingType\"] = \"light\"\n\t\t\t}\n\t\t}\n\t}\n\n\terr := device.getBasicInfo()\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to get basic info: %s\", err)\n\t}\n\n\tif device.ManufacturerName != \"\" {\n\t\t(*device.info.Signatures)[\"zigbee:ManufacturerName\"] = device.ManufacturerName\n\t} else {\n\t\tdevice.ManufacturerName = \"Unknown\"\n\t}\n\tif device.ModelIdentifier != \"\" {\n\t\t(*device.info.Signatures)[\"zigbee:ModelIdentifier\"] = device.ModelIdentifier\n\t} else {\n\t\tdevice.ModelIdentifier = fmt.Sprintf(\"MAC:%X\", *deviceInfo.IeeeAddress)\n\t}\n\n\tname := fmt.Sprintf(\"%s by %s\", device.ModelIdentifier, device.ManufacturerName)\n\n\tdevice.info.Name = &name\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(deviceInfo)\n\t}\n\n\terr = d.Conn.ExportDevice(device)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to export zigbee device %s: %s\", name, err)\n\t}\n\n\tlog.Debugf(\"Got device : %d\", *deviceInfo.IeeeAddress)\n\n\tfor _, endpoint := range deviceInfo.SimpleDescList {\n\t\tlog.Debugf(\"Got endpoint : %d\", *endpoint.EndpointId)\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDOnOff) {\n\t\t\tlog.Debugf(\"This endpoint has on\/off cluster\")\n\n\t\t\tonOff := &OnOffChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDOnOff),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := onOff.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising on\/off channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDPower) {\n\t\t\tlog.Debugf(\"This endpoint has power cluster\")\n\n\t\t\tpower := &PowerChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDPower),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := power.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising power channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDTemp) {\n\t\t\tlog.Debugf(\"This endpoint has temperature cluster\")\n\n\t\t\ttemp := &TempChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDTemp),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := temp.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising temp channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDHumidity) {\n\t\t\tlog.Debugf(\"This endpoint has humidity cluster\")\n\n\t\t\thumidity := &HumidityChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDHumidity),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := humidity.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising humidity channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDLevel) {\n\t\t\tlog.Debugf(\"This endpoint has level cluster. Exporting as brightness channel\")\n\n\t\t\tif log.IsDebugEnabled() {\n\t\t\t\tspew.Dump(\"brightness cluster\", endpoint, ClusterIDLevel)\n\t\t\t}\n\n\t\t\tbrightness := &BrightnessChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDLevel),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := brightness.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising brightness channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\td.devices[*deviceInfo.IeeeAddress] = device\n\n\tfmt.Printf(\"---- Finished Device IEEE:%X ----\\n\", *deviceInfo.IeeeAddress)\n\n}\n\nfunc getCurDir() string {\n\tpwd, _ := os.Getwd()\n\treturn pwd + \"\/\"\n}\n\nfunc containsUInt32(hackstack []uint32, needle uint32) bool {\n\tfor _, cluster := range hackstack {\n\t\tif cluster == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc waitUntilZStackReady(checkFile string) {\n\tif checkFile == \"\" {\n\t\treturn\n\t}\n\n\t\/\/ cooperate with zigbeeHAgw so that we don't start the zigbee driver\n\t\/\/ until we look somewhat stable.\n\tlog.Debugf(\"waiting until zigbeeHAgw writes %s\", checkFile)\n\tfor {\n\t\tif _, err := os.Stat(checkFile); err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}\n\tlog.Debugf(\"%s detected. start up continues...\", checkFile)\n}\n<commit_msg>Restore info level logging of join windows. Use correct type on pairing-ended event.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/ninjasphere\/go-ninja\/events\"\n\t\"github.com\/ninjasphere\/go-ninja\/model\"\n\t\"github.com\/ninjasphere\/go-ninja\/support\"\n\t\"github.com\/ninjasphere\/go-zigbee\"\n\t\"github.com\/ninjasphere\/go-zigbee\/nwkmgr\"\n)\n\nconst (\n\tClusterIDBasic uint32 = 0x00\n\tClusterIDOnOff uint32 = 0x06\n\tClusterIDLevel uint32 = 0x08 \/\/ We're always exporting as brightness for now\n\tClusterIDColor uint32 = 0x300\n\tClusterIDTemp uint32 = 0x402\n\tClusterIDHumidity uint32 = 0x405\n\tClusterIDPower uint32 = 0x702\n)\n\ntype ZStackConfig struct {\n\tHostname string\n\tOtasrvrPort int\n\tGatewayPort int\n\tNwkmgrPort int\n\tStableFlagFile string\n}\n\ntype Driver struct {\n\tsupport.DriverSupport\n\n\tdevices map[uint64]*Device\n\n\tconfig *ZStackConfig\n\n\tnwkmgrConn *zigbee.ZStackNwkMgr\n\tgatewayConn *zigbee.ZStackGateway\n\totaConn *zigbee.ZStackOta\n\n\tdevicesFound int\n}\n\nfunc NewDriver(config *ZStackConfig) (*Driver, error) {\n\tdriver := &Driver{\n\t\tconfig: config,\n\t\tdevices: make(map[uint64]*Device),\n\t}\n\n\terr := driver.Init(info)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to initialize fake driver: %s\", err)\n\t}\n\n\terr = driver.Export(driver)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to export fake driver: %s\", err)\n\t}\n\n\tuserAgent := driver.Conn.GetServiceClient(\"$device\/:deviceId\/channel\/user-agent\")\n\tuserAgent.OnEvent(\"pairing-requested\", func(pairingRequest *events.PairingRequest, values map[string]string) bool {\n\t\tlog.Infof(\"Pairing request received from %s for %d seconds\", values[\"deviceId\"], pairingRequest.Duration)\n\n\t\tduration := uint32(pairingRequest.Duration)\n\n\t\tif err := driver.EnableJoin(duration); err != nil {\n\t\t\tlog.Warningf(\"Failed to enable joining: %s\", err)\n\t\t}\n\n\t\treturn true\n\t})\n\n\treturn driver, nil\n\n}\n\nfunc (d *Driver) Reset(hard bool) error {\n\treturn d.nwkmgrConn.Reset(hard)\n}\n\nfunc (d *Driver) Start() error {\n\n\twaitUntilZStackReady(d.config.StableFlagFile)\n\n\tvar err error\n\n\td.nwkmgrConn, err = zigbee.ConnectToNwkMgrServer(d.config.Hostname, d.config.NwkmgrPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to nwkmgr %s\", err)\n\t}\n\td.nwkmgrConn.OnDeviceFound = func(deviceInfo *nwkmgr.NwkDeviceInfoT) {\n\t\td.onDeviceFound(deviceInfo)\n\t}\n\n\t\/*done := false\n\td.nwkmgrConn.OnNetworkReady = func() {\n\t\tif !done {\n\t\t\tdone = true\n\t\t}\n\t}*\/\n\n\td.otaConn, err = zigbee.ConnectToOtaServer(d.config.Hostname, d.config.OtasrvrPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to ota server %s\", err)\n\t}\n\n\td.gatewayConn, err = zigbee.ConnectToGatewayServer(d.config.Hostname, d.config.GatewayPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to gateway %s\", err)\n\t}\n\n\t\/*keyResponse := &nwkmgr.NwkZigbeeGenericCnf{}\n\n\tkeyRequest := &nwkmgr.NwkChangeNwkKeyReq{\n\t\tNewKey: []byte{0x5a, 0x69, 0x67, 0x42, 0x65, 0x65, 0x41, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x30, 0x39},\n\t}\n\n\terr = d.nwkmgrConn.SendCommand(keyRequest, keyResponse)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed setting network key: %s\", err)\n\t}\n\n\tspew.Dump(keyResponse)\n\tlog.Println(\"Sleeping for 10 seconds after setting network key\")\n\n\ttime.Sleep(10 * time.Second)*\/\n\n\tnetworkInfo := &nwkmgr.NwkZigbeeNwkInfoCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkZigbeeNwkInfoReq{}, networkInfo)\n\tif err != nil {\n\t\tif log.IsDebugEnabled() {\n\t\t\tspew.Dump(networkInfo)\n\t\t}\n\t\treturn fmt.Errorf(\"Failed getting network info: %s\", err)\n\t}\n\n\tif *networkInfo.Status == nwkmgr.NwkNetworkStatusT_NWK_DOWN {\n\t\treturn fmt.Errorf(\"The ZigBee network is down\")\n\t}\n\n\tlocalDevice := &nwkmgr.NwkGetLocalDeviceInfoCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkGetLocalDeviceInfoReq{}, localDevice)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed getting local device info: %s\", err)\n\t}\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(\"device info\", localDevice.String())\n\t}\n\n\tnetworkKey := &nwkmgr.NwkGetNwkKeyCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkGetNwkKeyReq{}, networkKey)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed getting network key: %s\", err)\n\t}\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(networkKey)\n\t}\n\n\tlog.Debugf(\"Started coordinator. Channel:%d Pan ID:0x%X Key:% X\", *networkInfo.NwkChannel, *networkInfo.PanId, networkKey.NewKey)\n\n\treturn d.FetchDevices()\n}\n\nfunc (d *Driver) EnableJoin(duration uint32) error {\n\n\tgo func() {\n\t \tsave := d.devicesFound;\n\t\ttime.Sleep(time.Second * time.Duration(duration))\n\t\td.Log.Infof(\"Join window closes after %d seconds.\", duration);\n\t\td.SendEvent(\"pairing-ended\", &events.PairingEnded{\n\t\t\tDevicesFound: int(d.devicesFound - save),\n\t\t})\n\t}()\n\n\tpermitJoinRequest := &nwkmgr.NwkSetPermitJoinReq{\n\t\tPermitJoinTime: &duration,\n\t\tPermitJoin: nwkmgr.NwkPermitJoinTypeT_PERMIT_ALL.Enum(),\n\t}\n\n\tpermitJoinResponse := &nwkmgr.NwkZigbeeGenericCnf{}\n\n\terr := d.nwkmgrConn.SendCommand(permitJoinRequest, permitJoinResponse)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to enable joining: %s\", err)\n\t}\n\tif permitJoinResponse.Status.String() != \"STATUS_SUCCESS\" {\n\t\treturn fmt.Errorf(\"Failed to enable joining: %s\", permitJoinResponse.Status)\n\t}\n\n\td.SendEvent(\"pairing-started\", &events.PairingStarted{\n\t\tDuration: int(duration),\n\t})\n\n\td.Log.Infof(\"Join window opens for %d seconds\", duration);\n\n\treturn nil\n}\n\nfunc (d *Driver) FetchDevices() error {\n\treturn d.nwkmgrConn.FetchDeviceList()\n}\n\nfunc (d *Driver) onDeviceFound(deviceInfo *nwkmgr.NwkDeviceInfoT) {\n\n\tlog.Debugf(\"\\n\\n\")\n\tlog.Debugf(\"---- Found Device IEEE:%X ----\\f\", *deviceInfo.IeeeAddress)\n\tlog.Debugf(\"Device Info: %v\", *deviceInfo)\n\n\tif d.devices[*deviceInfo.IeeeAddress] != nil {\n\t\t\/\/ We've seen this already, but it may have been repaired. We *should* just be able to replace\n\t\t\/\/ the deviceInfo object, which is used for all communication.\n\t\t\/\/ TODO: Actually verify this. May need to re-run channel init.\n\t\td.devices[*deviceInfo.IeeeAddress].deviceInfo = deviceInfo\n\t}\n\n\tdevice := &Device{\n\t\tdriver: d,\n\t\tdeviceInfo: deviceInfo,\n\t\tinfo: &model.Device{\n\t\t\tNaturalID: fmt.Sprintf(\"%X\", *deviceInfo.IeeeAddress),\n\t\t\tNaturalIDType: \"zigbee\",\n\t\t\tSignatures: &map[string]string{},\n\t\t},\n\t}\n\n\tfor _, endpoint := range deviceInfo.SimpleDescList {\n\t\tif *endpoint.ProfileId == 0xC05E \/*ZLL*\/ {\n\n\t\t\tswitch *endpoint.DeviceId {\n\n\t\t\tcase 0x0000: \/\/ On\/Off Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x0100: \/\/ Dimmable Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x0200: \/\/ Color Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x210: \/\/ Ext Color Light\n\t\t\t\t(*device.info.Signatures)[\"ninja:thingType\"] = \"light\"\n\t\t\t}\n\t\t}\n\t}\n\n\terr := device.getBasicInfo()\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to get basic info: %s\", err)\n\t}\n\n\tif device.ManufacturerName != \"\" {\n\t\t(*device.info.Signatures)[\"zigbee:ManufacturerName\"] = device.ManufacturerName\n\t} else {\n\t\tdevice.ManufacturerName = \"Unknown\"\n\t}\n\tif device.ModelIdentifier != \"\" {\n\t\t(*device.info.Signatures)[\"zigbee:ModelIdentifier\"] = device.ModelIdentifier\n\t} else {\n\t\tdevice.ModelIdentifier = fmt.Sprintf(\"MAC:%X\", *deviceInfo.IeeeAddress)\n\t}\n\n\tname := fmt.Sprintf(\"%s by %s\", device.ModelIdentifier, device.ManufacturerName)\n\n\tdevice.info.Name = &name\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(deviceInfo)\n\t}\n\n\terr = d.Conn.ExportDevice(device)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to export zigbee device %s: %s\", name, err)\n\t}\n\td.devicesFound++;\n\n\tlog.Debugf(\"Got device : %d\", *deviceInfo.IeeeAddress)\n\n\tfor _, endpoint := range deviceInfo.SimpleDescList {\n\t\tlog.Debugf(\"Got endpoint : %d\", *endpoint.EndpointId)\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDOnOff) {\n\t\t\tlog.Debugf(\"This endpoint has on\/off cluster\")\n\n\t\t\tonOff := &OnOffChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDOnOff),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := onOff.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising on\/off channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDPower) {\n\t\t\tlog.Debugf(\"This endpoint has power cluster\")\n\n\t\t\tpower := &PowerChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDPower),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := power.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising power channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDTemp) {\n\t\t\tlog.Debugf(\"This endpoint has temperature cluster\")\n\n\t\t\ttemp := &TempChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDTemp),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := temp.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising temp channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDHumidity) {\n\t\t\tlog.Debugf(\"This endpoint has humidity cluster\")\n\n\t\t\thumidity := &HumidityChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDHumidity),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := humidity.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising humidity channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDLevel) {\n\t\t\tlog.Debugf(\"This endpoint has level cluster. Exporting as brightness channel\")\n\n\t\t\tif log.IsDebugEnabled() {\n\t\t\t\tspew.Dump(\"brightness cluster\", endpoint, ClusterIDLevel)\n\t\t\t}\n\n\t\t\tbrightness := &BrightnessChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDLevel),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := brightness.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising brightness channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\td.devices[*deviceInfo.IeeeAddress] = device\n\n\tfmt.Printf(\"---- Finished Device IEEE:%X ----\\n\", *deviceInfo.IeeeAddress)\n\n}\n\nfunc getCurDir() string {\n\tpwd, _ := os.Getwd()\n\treturn pwd + \"\/\"\n}\n\nfunc containsUInt32(hackstack []uint32, needle uint32) bool {\n\tfor _, cluster := range hackstack {\n\t\tif cluster == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc waitUntilZStackReady(checkFile string) {\n\tif checkFile == \"\" {\n\t\treturn\n\t}\n\n\t\/\/ cooperate with zigbeeHAgw so that we don't start the zigbee driver\n\t\/\/ until we look somewhat stable.\n\tlog.Debugf(\"waiting until zigbeeHAgw writes %s\", checkFile)\n\tfor {\n\t\tif _, err := os.Stat(checkFile); err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}\n\tlog.Debugf(\"%s detected. start up continues...\", checkFile)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/ninjasphere\/go-ninja\/events\"\n\t\"github.com\/ninjasphere\/go-ninja\/model\"\n\t\"github.com\/ninjasphere\/go-ninja\/support\"\n\t\"github.com\/ninjasphere\/go-zigbee\"\n\t\"github.com\/ninjasphere\/go-zigbee\/nwkmgr\"\n)\n\nconst (\n\tClusterIDBasic uint32 = 0x00\n\tClusterIDOnOff uint32 = 0x06\n\tClusterIDLevel uint32 = 0x08 \/\/ We're always exporting as brightness for now\n\tClusterIDColor uint32 = 0x300\n\tClusterIDTemp uint32 = 0x402\n\tClusterIDHumidity uint32 = 0x405\n\tClusterIDPower uint32 = 0x702\n)\n\ntype ZStackConfig struct {\n\tHostname string\n\tOtasrvrPort int\n\tGatewayPort int\n\tNwkmgrPort int\n\tStableFlagFile string\n}\n\ntype Driver struct {\n\tsupport.DriverSupport\n\n\tdevices map[uint64]*Device\n\n\tconfig *ZStackConfig\n\n\tnwkmgrConn *zigbee.ZStackNwkMgr\n\tgatewayConn *zigbee.ZStackGateway\n\totaConn *zigbee.ZStackOta\n\n\tdevicesFound int\n}\n\nfunc NewDriver(config *ZStackConfig) (*Driver, error) {\n\tdriver := &Driver{\n\t\tconfig: config,\n\t\tdevices: make(map[uint64]*Device),\n\t}\n\n\terr := driver.Init(info)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to initialize fake driver: %s\", err)\n\t}\n\n\terr = driver.Export(driver)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to export fake driver: %s\", err)\n\t}\n\n\tuserAgent := driver.Conn.GetServiceClient(\"$device\/:deviceId\/channel\/user-agent\")\n\tuserAgent.OnEvent(\"pairing-requested\", func(pairingRequest *events.PairingRequest, values map[string]string) bool {\n\n\t\tduration := uint32(pairingRequest.Duration)\n\n\t\tif duration > 254 {\n\t\t\tduration = 254\n\t\t}\n\n\t\tlog.Infof(\"Pairing request received from %s for %d seconds\", values[\"deviceId\"], duration)\n\n\t\tif err := driver.EnableJoin(duration); err != nil {\n\t\t\tlog.Warningf(\"Failed to enable joining: %s\", err)\n\t\t}\n\n\t\treturn true\n\t})\n\n\treturn driver, nil\n\n}\n\nfunc (d *Driver) Reset(hard bool) error {\n\treturn d.nwkmgrConn.Reset(hard)\n}\n\nfunc (d *Driver) Start() error {\n\n\twaitUntilZStackReady(d.config.StableFlagFile)\n\n\tvar err error\n\n\td.nwkmgrConn, err = zigbee.ConnectToNwkMgrServer(d.config.Hostname, d.config.NwkmgrPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to nwkmgr %s\", err)\n\t}\n\td.nwkmgrConn.OnDeviceFound = func(deviceInfo *nwkmgr.NwkDeviceInfoT) {\n\t\td.onDeviceFound(deviceInfo)\n\t}\n\n\t\/*done := false\n\td.nwkmgrConn.OnNetworkReady = func() {\n\t\tif !done {\n\t\t\tdone = true\n\t\t}\n\t}*\/\n\n\td.otaConn, err = zigbee.ConnectToOtaServer(d.config.Hostname, d.config.OtasrvrPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to ota server %s\", err)\n\t}\n\n\td.gatewayConn, err = zigbee.ConnectToGatewayServer(d.config.Hostname, d.config.GatewayPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to gateway %s\", err)\n\t}\n\n\t\/*keyResponse := &nwkmgr.NwkZigbeeGenericCnf{}\n\n\tkeyRequest := &nwkmgr.NwkChangeNwkKeyReq{\n\t\tNewKey: []byte{0x5a, 0x69, 0x67, 0x42, 0x65, 0x65, 0x41, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x30, 0x39},\n\t}\n\n\terr = d.nwkmgrConn.SendCommand(keyRequest, keyResponse)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed setting network key: %s\", err)\n\t}\n\n\tspew.Dump(keyResponse)\n\tlog.Println(\"Sleeping for 10 seconds after setting network key\")\n\n\ttime.Sleep(10 * time.Second)*\/\n\n\tnetworkInfo := &nwkmgr.NwkZigbeeNwkInfoCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkZigbeeNwkInfoReq{}, networkInfo)\n\tif err != nil {\n\t\tif log.IsDebugEnabled() {\n\t\t\tspew.Dump(networkInfo)\n\t\t}\n\t\treturn fmt.Errorf(\"Failed getting network info: %s\", err)\n\t}\n\n\tif *networkInfo.Status == nwkmgr.NwkNetworkStatusT_NWK_DOWN {\n\t\treturn fmt.Errorf(\"The ZigBee network is down\")\n\t}\n\n\tlocalDevice := &nwkmgr.NwkGetLocalDeviceInfoCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkGetLocalDeviceInfoReq{}, localDevice)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed getting local device info: %s\", err)\n\t}\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(\"device info\", localDevice.String())\n\t}\n\n\t\/*networkKey := &nwkmgr.NwkGetNwkKeyCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkGetNwkKeyReq{}, networkKey)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed getting network key: %s\", err)\n\t}\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(networkKey)\n\t}*\/\n\n\tlog.Debugf(\"Started coordinator. Channel:%d Pan ID:0x%X Key:% X\", *networkInfo.NwkChannel, *networkInfo.PanId)\n\n\treturn d.FetchDevices()\n\n}\n\nfunc (d *Driver) StartPairing(period uint32) (*uint32, error) {\n\tif period > 254 {\n\t\tperiod = 254\n\t}\n\terr := d.EnableJoin(period)\n\treturn &period, err\n}\n\nfunc (d *Driver) EndPairing() error {\n\terr := d.EnableJoin(0)\n\treturn err\n}\n\nfunc (d *Driver) EnableJoin(duration uint32) error {\n\n\tpermitJoinRequest := &nwkmgr.NwkSetPermitJoinReq{\n\t\tPermitJoinTime: &duration,\n\t\tPermitJoin: nwkmgr.NwkPermitJoinTypeT_PERMIT_ALL.Enum(),\n\t}\n\n\tpermitJoinResponse := &nwkmgr.NwkZigbeeGenericCnf{}\n\n\terr := d.nwkmgrConn.SendCommand(permitJoinRequest, permitJoinResponse)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to enable joining: %s\", err)\n\t}\n\tif permitJoinResponse.Status.String() != \"STATUS_SUCCESS\" {\n\t\treturn fmt.Errorf(\"Failed to enable joining: %s\", permitJoinResponse.Status)\n\t}\n\n\td.SendEvent(\"pairing-started\", &events.PairingStarted{\n\t\tDuration: int(duration),\n\t})\n\n\tgo func() {\n\t\tsave := d.devicesFound\n\t\ttime.Sleep(time.Second * time.Duration(duration))\n\t\td.Log.Infof(\"Join window closes after %d seconds.\", duration)\n\t\td.SendEvent(\"pairing-ended\", &events.PairingEnded{\n\t\t\tDevicesFound: int(d.devicesFound - save),\n\t\t})\n\t}()\n\n\td.Log.Infof(\"Join window opens for %d seconds\", duration)\n\n\treturn nil\n}\n\nfunc (d *Driver) FetchDevices() error {\n\treturn d.nwkmgrConn.FetchDeviceList()\n}\n\nfunc (d *Driver) onDeviceFound(deviceInfo *nwkmgr.NwkDeviceInfoT) {\n\n\tlog.Debugf(\"\\n\\n\")\n\tlog.Debugf(\"---- Found Device IEEE:%X ----\\f\", *deviceInfo.IeeeAddress)\n\tlog.Debugf(\"Device Info: %v\", *deviceInfo)\n\n\tif d.devices[*deviceInfo.IeeeAddress] != nil {\n\t\t\/\/ We've seen this already, but it may have been repaired. We *should* just be able to replace\n\t\t\/\/ the deviceInfo object, which is used for all communication.\n\t\t\/\/ TODO: Actually verify this. May need to re-run channel init.\n\t\td.devices[*deviceInfo.IeeeAddress].deviceInfo = deviceInfo\n\t}\n\n\tdevice := &Device{\n\t\tdriver: d,\n\t\tdeviceInfo: deviceInfo,\n\t\tinfo: &model.Device{\n\t\t\tNaturalID: fmt.Sprintf(\"%X\", *deviceInfo.IeeeAddress),\n\t\t\tNaturalIDType: \"zigbee\",\n\t\t\tSignatures: &map[string]string{},\n\t\t},\n\t}\n\n\tfor _, endpoint := range deviceInfo.SimpleDescList {\n\t\tif *endpoint.ProfileId == 0xC05E \/*ZLL*\/ {\n\n\t\t\tswitch *endpoint.DeviceId {\n\n\t\t\tcase 0x0000: \/\/ On\/Off Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x0100: \/\/ Dimmable Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x0200: \/\/ Color Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x210: \/\/ Ext Color Light\n\t\t\t\t(*device.info.Signatures)[\"ninja:thingType\"] = \"light\"\n\t\t\t}\n\t\t}\n\t}\n\n\terr := device.getBasicInfo()\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to get basic info: %s\", err)\n\t}\n\n\tif device.ManufacturerName != \"\" {\n\t\t(*device.info.Signatures)[\"zigbee:ManufacturerName\"] = device.ManufacturerName\n\t} else {\n\t\tdevice.ManufacturerName = \"Unknown\"\n\t}\n\tif device.ModelIdentifier != \"\" {\n\t\t(*device.info.Signatures)[\"zigbee:ModelIdentifier\"] = device.ModelIdentifier\n\t} else {\n\t\tdevice.ModelIdentifier = fmt.Sprintf(\"MAC:%X\", *deviceInfo.IeeeAddress)\n\t}\n\n\tname := fmt.Sprintf(\"%s by %s\", device.ModelIdentifier, device.ManufacturerName)\n\n\tdevice.info.Name = &name\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(deviceInfo)\n\t}\n\n\terr = d.Conn.ExportDevice(device)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to export zigbee device %s: %s\", name, err)\n\t}\n\td.devicesFound++\n\n\tlog.Debugf(\"Got device : %d\", *deviceInfo.IeeeAddress)\n\n\tfor _, endpoint := range deviceInfo.SimpleDescList {\n\t\tlog.Debugf(\"Got endpoint : %d\", *endpoint.EndpointId)\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDOnOff) {\n\t\t\tlog.Debugf(\"This endpoint has on\/off cluster\")\n\n\t\t\tonOff := &OnOffChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDOnOff),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := onOff.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising on\/off channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDPower) {\n\t\t\tlog.Debugf(\"This endpoint has power cluster\")\n\n\t\t\tpower := &PowerChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDPower),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := power.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising power channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDTemp) {\n\t\t\tlog.Debugf(\"This endpoint has temperature cluster\")\n\n\t\t\ttemp := &TempChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDTemp),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := temp.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising temp channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDHumidity) {\n\t\t\tlog.Debugf(\"This endpoint has humidity cluster\")\n\n\t\t\thumidity := &HumidityChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDHumidity),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := humidity.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising humidity channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDLevel) {\n\t\t\tlog.Debugf(\"This endpoint has level cluster. Exporting as brightness channel\")\n\n\t\t\tif log.IsDebugEnabled() {\n\t\t\t\tspew.Dump(\"brightness cluster\", endpoint, ClusterIDLevel)\n\t\t\t}\n\n\t\t\tbrightness := &BrightnessChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDLevel),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := brightness.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising brightness channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\td.devices[*deviceInfo.IeeeAddress] = device\n\n\tfmt.Printf(\"---- Finished Device IEEE:%X ----\\n\", *deviceInfo.IeeeAddress)\n\n}\n\nfunc getCurDir() string {\n\tpwd, _ := os.Getwd()\n\treturn pwd + \"\/\"\n}\n\nfunc containsUInt32(hackstack []uint32, needle uint32) bool {\n\tfor _, cluster := range hackstack {\n\t\tif cluster == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc waitUntilZStackReady(checkFile string) {\n\tif checkFile == \"\" {\n\t\treturn\n\t}\n\n\t\/\/ cooperate with zigbeeHAgw so that we don't start the zigbee driver\n\t\/\/ until we look somewhat stable.\n\tlog.Debugf(\"waiting until zigbeeHAgw writes %s\", checkFile)\n\tfor {\n\t\tif _, err := os.Stat(checkFile); err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}\n\tlog.Debugf(\"%s detected. start up continues...\", checkFile)\n}\n<commit_msg>Don't export devices if we can't talk to them. Wait 30s and try again. And if we don't have any nice name to say, don't say anything at all.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/ninjasphere\/go-ninja\/events\"\n\t\"github.com\/ninjasphere\/go-ninja\/model\"\n\t\"github.com\/ninjasphere\/go-ninja\/support\"\n\t\"github.com\/ninjasphere\/go-zigbee\"\n\t\"github.com\/ninjasphere\/go-zigbee\/nwkmgr\"\n)\n\nconst (\n\tClusterIDBasic uint32 = 0x00\n\tClusterIDOnOff uint32 = 0x06\n\tClusterIDLevel uint32 = 0x08 \/\/ We're always exporting as brightness for now\n\tClusterIDColor uint32 = 0x300\n\tClusterIDTemp uint32 = 0x402\n\tClusterIDHumidity uint32 = 0x405\n\tClusterIDPower uint32 = 0x702\n)\n\ntype ZStackConfig struct {\n\tHostname string\n\tOtasrvrPort int\n\tGatewayPort int\n\tNwkmgrPort int\n\tStableFlagFile string\n}\n\ntype Driver struct {\n\tsupport.DriverSupport\n\n\tdevices map[uint64]*Device\n\n\tconfig *ZStackConfig\n\n\tnwkmgrConn *zigbee.ZStackNwkMgr\n\tgatewayConn *zigbee.ZStackGateway\n\totaConn *zigbee.ZStackOta\n\n\tdevicesFound int\n}\n\nfunc NewDriver(config *ZStackConfig) (*Driver, error) {\n\tdriver := &Driver{\n\t\tconfig: config,\n\t\tdevices: make(map[uint64]*Device),\n\t}\n\n\terr := driver.Init(info)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to initialize fake driver: %s\", err)\n\t}\n\n\terr = driver.Export(driver)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to export fake driver: %s\", err)\n\t}\n\n\tuserAgent := driver.Conn.GetServiceClient(\"$device\/:deviceId\/channel\/user-agent\")\n\tuserAgent.OnEvent(\"pairing-requested\", func(pairingRequest *events.PairingRequest, values map[string]string) bool {\n\n\t\tduration := uint32(pairingRequest.Duration)\n\n\t\tif duration > 254 {\n\t\t\tduration = 254\n\t\t}\n\n\t\tlog.Infof(\"Pairing request received from %s for %d seconds\", values[\"deviceId\"], duration)\n\n\t\tif err := driver.EnableJoin(duration); err != nil {\n\t\t\tlog.Warningf(\"Failed to enable joining: %s\", err)\n\t\t}\n\n\t\treturn true\n\t})\n\n\treturn driver, nil\n\n}\n\nfunc (d *Driver) Reset(hard bool) error {\n\treturn d.nwkmgrConn.Reset(hard)\n}\n\nfunc (d *Driver) Start() error {\n\n\twaitUntilZStackReady(d.config.StableFlagFile)\n\n\tvar err error\n\n\td.nwkmgrConn, err = zigbee.ConnectToNwkMgrServer(d.config.Hostname, d.config.NwkmgrPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to nwkmgr %s\", err)\n\t}\n\td.nwkmgrConn.OnDeviceFound = func(deviceInfo *nwkmgr.NwkDeviceInfoT) {\n\t\td.onDeviceFound(deviceInfo)\n\t}\n\n\t\/*done := false\n\td.nwkmgrConn.OnNetworkReady = func() {\n\t\tif !done {\n\t\t\tdone = true\n\t\t}\n\t}*\/\n\n\td.otaConn, err = zigbee.ConnectToOtaServer(d.config.Hostname, d.config.OtasrvrPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to ota server %s\", err)\n\t}\n\n\td.gatewayConn, err = zigbee.ConnectToGatewayServer(d.config.Hostname, d.config.GatewayPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to gateway %s\", err)\n\t}\n\n\t\/*keyResponse := &nwkmgr.NwkZigbeeGenericCnf{}\n\n\tkeyRequest := &nwkmgr.NwkChangeNwkKeyReq{\n\t\tNewKey: []byte{0x5a, 0x69, 0x67, 0x42, 0x65, 0x65, 0x41, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x30, 0x39},\n\t}\n\n\terr = d.nwkmgrConn.SendCommand(keyRequest, keyResponse)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed setting network key: %s\", err)\n\t}\n\n\tspew.Dump(keyResponse)\n\tlog.Println(\"Sleeping for 10 seconds after setting network key\")\n\n\ttime.Sleep(10 * time.Second)*\/\n\n\tnetworkInfo := &nwkmgr.NwkZigbeeNwkInfoCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkZigbeeNwkInfoReq{}, networkInfo)\n\tif err != nil {\n\t\tif log.IsDebugEnabled() {\n\t\t\tspew.Dump(networkInfo)\n\t\t}\n\t\treturn fmt.Errorf(\"Failed getting network info: %s\", err)\n\t}\n\n\tif *networkInfo.Status == nwkmgr.NwkNetworkStatusT_NWK_DOWN {\n\t\treturn fmt.Errorf(\"The ZigBee network is down\")\n\t}\n\n\tlocalDevice := &nwkmgr.NwkGetLocalDeviceInfoCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkGetLocalDeviceInfoReq{}, localDevice)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed getting local device info: %s\", err)\n\t}\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(\"device info\", localDevice.String())\n\t}\n\n\t\/*networkKey := &nwkmgr.NwkGetNwkKeyCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkGetNwkKeyReq{}, networkKey)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed getting network key: %s\", err)\n\t}\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(networkKey)\n\t}*\/\n\n\tlog.Debugf(\"Started coordinator. Channel:%d Pan ID:0x%X\", *networkInfo.NwkChannel, *networkInfo.PanId)\n\n\td.StartFetchingDevices()\n\n\treturn nil\n}\n\nfunc (d *Driver) StartPairing(period uint32) (*uint32, error) {\n\tif period > 254 {\n\t\tperiod = 254\n\t}\n\terr := d.EnableJoin(period)\n\treturn &period, err\n}\n\nfunc (d *Driver) EndPairing() error {\n\terr := d.EnableJoin(0)\n\treturn err\n}\n\nfunc (d *Driver) EnableJoin(duration uint32) error {\n\n\tpermitJoinRequest := &nwkmgr.NwkSetPermitJoinReq{\n\t\tPermitJoinTime: &duration,\n\t\tPermitJoin: nwkmgr.NwkPermitJoinTypeT_PERMIT_ALL.Enum(),\n\t}\n\n\tpermitJoinResponse := &nwkmgr.NwkZigbeeGenericCnf{}\n\n\terr := d.nwkmgrConn.SendCommand(permitJoinRequest, permitJoinResponse)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to enable joining: %s\", err)\n\t}\n\tif permitJoinResponse.Status.String() != \"STATUS_SUCCESS\" {\n\t\treturn fmt.Errorf(\"Failed to enable joining: %s\", permitJoinResponse.Status)\n\t}\n\n\td.SendEvent(\"pairing-started\", &events.PairingStarted{\n\t\tDuration: int(duration),\n\t})\n\n\tgo func() {\n\t\tsave := d.devicesFound\n\t\ttime.Sleep(time.Second * time.Duration(duration))\n\t\td.Log.Infof(\"Join window closes after %d seconds.\", duration)\n\t\td.SendEvent(\"pairing-ended\", &events.PairingEnded{\n\t\t\tDevicesFound: int(d.devicesFound - save),\n\t\t})\n\t}()\n\n\td.Log.Infof(\"Join window opens for %d seconds\", duration)\n\treturn nil\n}\n\nfunc (d *Driver) StartFetchingDevices() {\n\tgo func() {\n\t\tfor {\n\t\t\td.nwkmgrConn.FetchDeviceList()\n\t\t\ttime.Sleep(time.Second * 30)\n\t\t}\n\t}()\n}\n\nfunc (d *Driver) onDeviceFound(deviceInfo *nwkmgr.NwkDeviceInfoT) {\n\n\t\/\/ XXX: This device status is often wrong. Useless.\n\t\/*if *deviceInfo.DeviceStatus != nwkmgr.NwkDeviceStatusT_DEVICE_ON_LINE {\n\t\tlog.Debugf(\"---- Found Offline Device IEEE:%X ----\\f\", *deviceInfo.IeeeAddress)\n\n\t\t\/\/ TODO: Inform the device (if we've seen it online) to stop polling\n\n\t\treturn\n\t}*\/\n\n\tif d.devices[*deviceInfo.IeeeAddress] != nil {\n\t\t\/\/ We've seen this already, but it may have been re-paired. We *should* just be able to replace\n\t\t\/\/ the deviceInfo object, which is used for all communication.\n\t\t\/\/ TODO: Actually verify this. May need to re-run channel init.\n\t\td.devices[*deviceInfo.IeeeAddress].deviceInfo = deviceInfo\n\t\treturn\n\t}\n\n\tdevice := &Device{\n\t\tdriver: d,\n\t\tdeviceInfo: deviceInfo,\n\t\tinfo: &model.Device{\n\t\t\tNaturalID: fmt.Sprintf(\"%X\", *deviceInfo.IeeeAddress),\n\t\t\tNaturalIDType: \"zigbee\",\n\t\t\tSignatures: &map[string]string{},\n\t\t},\n\t}\n\n\tfor _, endpoint := range deviceInfo.SimpleDescList {\n\t\tif *endpoint.ProfileId == 0xC05E \/*ZLL*\/ {\n\n\t\t\tswitch *endpoint.DeviceId {\n\n\t\t\tcase 0x0000: \/\/ On\/Off Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x0100: \/\/ Dimmable Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x0200: \/\/ Color Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x210: \/\/ Ext Color Light\n\t\t\t\t(*device.info.Signatures)[\"ninja:thingType\"] = \"light\"\n\t\t\t}\n\t\t}\n\t}\n\n\terr := device.getBasicInfo()\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to get basic info for: %X : %s\", *deviceInfo.IeeeAddress, err)\n\t\treturn\n\t}\n\n\tname := \"\"\n\n\tif device.ModelIdentifier != \"\" {\n\t\t(*device.info.Signatures)[\"zigbee:ModelIdentifier\"] = device.ModelIdentifier\n\t\tname = device.ModelIdentifier\n\t}\n\n\tif device.ManufacturerName != \"\" {\n\t\t(*device.info.Signatures)[\"zigbee:ManufacturerName\"] = device.ManufacturerName\n\t\tif device.ModelIdentifier != \"\" {\n\t\t\tname += \" by \"\n\t\t}\n\t\tname += device.ManufacturerName\n\t}\n\n\tlog.Debugf(\"\\n\\n\")\n\tlog.Infof(\"---- Found Device IEEE:%X Name:%s ----\\f\", *deviceInfo.IeeeAddress, name)\n\tlog.Debugf(\"Device Info: %v\", *deviceInfo)\n\n\tif name != \"\" {\n\t\tdevice.info.Name = &name\n\t}\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(deviceInfo)\n\t}\n\n\terr = d.Conn.ExportDevice(device)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to export zigbee device %s: %s\", name, err)\n\t}\n\td.devicesFound++\n\n\td.devices[*deviceInfo.IeeeAddress] = device\n\n\tlog.Debugf(\"Got device : %d\", *deviceInfo.IeeeAddress)\n\n\tfor _, endpoint := range deviceInfo.SimpleDescList {\n\t\tlog.Debugf(\"Got endpoint : %d\", *endpoint.EndpointId)\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDOnOff) {\n\t\t\tlog.Debugf(\"This endpoint has on\/off cluster\")\n\n\t\t\tonOff := &OnOffChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDOnOff),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := onOff.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising on\/off channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDPower) {\n\t\t\tlog.Debugf(\"This endpoint has power cluster\")\n\n\t\t\tpower := &PowerChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDPower),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := power.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising power channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDTemp) {\n\t\t\tlog.Debugf(\"This endpoint has temperature cluster\")\n\n\t\t\ttemp := &TempChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDTemp),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := temp.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising temp channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDHumidity) {\n\t\t\tlog.Debugf(\"This endpoint has humidity cluster\")\n\n\t\t\thumidity := &HumidityChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDHumidity),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := humidity.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising humidity channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDLevel) {\n\t\t\tlog.Debugf(\"This endpoint has level cluster. Exporting as brightness channel\")\n\n\t\t\tif log.IsDebugEnabled() {\n\t\t\t\tspew.Dump(\"brightness cluster\", endpoint, ClusterIDLevel)\n\t\t\t}\n\n\t\t\tbrightness := &BrightnessChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDLevel),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := brightness.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising brightness channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tfmt.Printf(\"---- Finished Device IEEE:%X ----\\n\", *deviceInfo.IeeeAddress)\n\n}\n\nfunc getCurDir() string {\n\tpwd, _ := os.Getwd()\n\treturn pwd + \"\/\"\n}\n\nfunc containsUInt32(hackstack []uint32, needle uint32) bool {\n\tfor _, cluster := range hackstack {\n\t\tif cluster == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc waitUntilZStackReady(checkFile string) {\n\tif checkFile == \"\" {\n\t\treturn\n\t}\n\n\t\/\/ cooperate with zigbeeHAgw so that we don't start the zigbee driver\n\t\/\/ until we look somewhat stable.\n\tlog.Debugf(\"waiting until zigbeeHAgw writes %s\", checkFile)\n\tfor {\n\t\tif _, err := os.Stat(checkFile); err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}\n\tlog.Debugf(\"%s detected. start up continues...\", checkFile)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/mitchellh\/go-homedir\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/xolan\/pin\/list\"\n)\n\nfunc sanity_check() {\n\tlog.Debugln(\"Checking sanity...\")\n\tvar path, _ = homedir.Expand(\"~\/.pin\")\n\tif _, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666); !os.IsNotExist(err) {\n\t\tlog.Debugf(\"Found %s\", path)\n\t} else {\n\t\tlog.Errorln(err.Error())\n\t}\n}\n\nfunc config(v bool) {\n\tif v {\n\t\tlog.SetLevel(log.DebugLevel)\n\t} else {\n\t\tlog.SetLevel(log.ErrorLevel)\n\t}\n\tsanity_check()\n}\n\nfunc main() {\n\tvar Verbose bool\n\n\tvar PinCmd = &cobra.Command{\n\t\tUse: \"pin\",\n\t\tShort: \"Pin is a command pinner, similar to aliasing\",\n\t\tLong: `Pin is a command pinner, similar to aliasing`,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tconfig(Verbose)\n\t\t},\n\t}\n\n\tvar ListCmd = &cobra.Command{\n\t\tUse: \"list\",\n\t\tShort: \"Display a list of pinned commands\",\n\t\tLong: \"Display a list of pinned commands\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tconfig(Verbose)\n\t\t\tlist.List()\n\t\t},\n\t}\n\t\n\tvar GenDocsCmd = &cobra.Command{\n\t\tUse:\t\"gendocs\",\n\t\tShort: \"Generate documentation for this program\",\n\t\tLong: \"Generate documentation for this program\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tconfig(Verbose)\n\t\t\tcobra.GenMarkdownTree(PinCmd, \".\/\")\n\t\t},\n\t}\n\n\tPinCmd.PersistentFlags().BoolVarP(&Verbose, \"verbose\", \"v\", false, \"verbose output\")\n\tPinCmd.AddCommand(ListCmd)\n\tPinCmd.AddCommand(GenDocsCmd)\n\tPinCmd.Execute()\n}\n<commit_msg>\"pin\" now displays usage if nothing else is provided<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/mitchellh\/go-homedir\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/xolan\/pin\/list\"\n)\n\nfunc sanity_check() {\n\tlog.Debugln(\"Checking sanity...\")\n\tvar path, _ = homedir.Expand(\"~\/.pin\")\n\tif _, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666); !os.IsNotExist(err) {\n\t\tlog.Debugf(\"Found %s\", path)\n\t} else {\n\t\tlog.Errorln(err.Error())\n\t}\n}\n\nfunc config(v bool) {\n\tif v {\n\t\tlog.SetLevel(log.DebugLevel)\n\t} else {\n\t\tlog.SetLevel(log.ErrorLevel)\n\t}\n\tsanity_check()\n}\n\nfunc main() {\n\tvar Verbose bool\n\n\tvar PinCmd = &cobra.Command{\n\t\tUse: \"pin\",\n\t\tShort: \"Pin is a command pinner, similar to aliasing\",\n\t\tLong: `Pin is a command pinner, similar to aliasing`,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tconfig(Verbose)\n\t\t\tfmt.Println(cmd.UsageString())\n\t\t},\n\t}\n\n\tvar ListCmd = &cobra.Command{\n\t\tUse: \"list\",\n\t\tShort: \"Display a list of pinned commands\",\n\t\tLong: \"Display a list of pinned commands\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tconfig(Verbose)\n\t\t\tlist.List()\n\t\t},\n\t}\n\n\tvar GenDocsCmd = &cobra.Command{\n\t\tUse: \"gendocs\",\n\t\tShort: \"Generate documentation for this program\",\n\t\tLong: \"Generate documentation for this program\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tconfig(Verbose)\n\t\t\tcobra.GenMarkdownTree(PinCmd, \".\/\")\n\t\t},\n\t}\n\n\tPinCmd.PersistentFlags().BoolVarP(&Verbose, \"verbose\", \"v\", false, \"verbose output\")\n\tPinCmd.AddCommand(ListCmd)\n\tPinCmd.AddCommand(GenDocsCmd)\n\tPinCmd.Execute()\n}\n<|endoftext|>"} {"text":"<commit_before>package metrics\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/fweikert\/continuous-integration\/metrics\/clients\"\n\t\"github.com\/fweikert\/continuous-integration\/metrics\/data\"\n\ttimestamp \"github.com\/golang\/protobuf\/ptypes\/timestamp\"\n\tmetricpb \"google.golang.org\/genproto\/googleapis\/api\/metric\"\n\tmonitoredres \"google.golang.org\/genproto\/googleapis\/api\/monitoredres\"\n\tmonitoringpb \"google.golang.org\/genproto\/googleapis\/monitoring\/v3\"\n)\n\nconst baseMetricType = \"custom.googleapis.com\/bazel\/ci\"\n\ntype PlatformLoad struct {\n\tclient *clients.BuildkiteClient\n\tcolumns []Column\n\tbuilds int\n}\n\nfunc (pl *PlatformLoad) Name() string {\n\treturn \"platform_load\"\n}\n\nfunc (pl *PlatformLoad) Columns() []Column {\n\treturn pl.columns\n}\n\nfunc (pl *PlatformLoad) Collect() (data.DataSet, error) {\n\tbuilds, err := pl.client.GetMostRecentBuilds(\"all\", pl.builds)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Cannot get builds to determine platform load: %v\", err)\n\t}\n\n\tresult := &loadDataSet{headers: GetColumnNames(pl.columns), ts: time.Now()}\n\tallPlatforms := make(map[string]bool)\n\twaiting := make(map[string]int)\n\trunning := make(map[string]int)\n\tfor _, build := range builds {\n\t\tfor _, job := range build.Jobs {\n\t\t\t\/\/ Do not use getPlatform() since it may return \"rbe\", but here we're only interested in the actual worker OS (which would be \"linux\" in the rbe case).\n\t\t\tplatform := getPlatformFromAgentQueryRules(job.AgentQueryRules)\n\t\t\tif platform == \"\" || job.CreatedAt == nil || job.FinishedAt != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tallPlatforms[platform] = true\n\t\t\tswitch *job.State {\n\t\t\tcase \"running\":\n\t\t\t\trunning[platform] += 1\n\t\t\tcase \"scheduled\":\n\t\t\t\t\/*\n\t\t\t\t\tState \"scheduled\" = waiting for a worker to become available\n\t\t\t\t\tState \"waiting\" \/ \"waiting_failed\" = waiting for another task to finish\n\n\t\t\t\t\tWe're only interested in \"scheduled\" jobs since they may indicate a shortage of workers.\n\t\t\t\t*\/\n\t\t\t\twaiting[platform] += 1\n\t\t\t}\n\t\t}\n\t}\n\n\tresult.rows = make([]loadDataRow, 0)\n\tfor platform := range allPlatforms {\n\t\trow := loadDataRow{platform: platform, waitingJobs: waiting[platform], runningJobs: running[platform]}\n\t\tresult.rows = append(result.rows, row)\n\t}\n\treturn result, nil\n}\n\n\/\/ CREATE TABLE platform_load (timestamp DATETIME, platform VARCHAR(255), waiting_jobs INT, running_jobs INT, PRIMARY KEY(timestamp, platform));\nfunc CreatePlatformLoad(client *clients.BuildkiteClient, builds int) *PlatformLoad {\n\tcolumns := []Column{Column{\"timestamp\", true}, Column{\"platform\", true}, Column{\"waiting_jobs\", false}, Column{\"running_jobs\", false}}\n\treturn &PlatformLoad{client: client, columns: columns, builds: builds}\n}\n\ntype loadDataRow struct {\n\tplatform string\n\twaitingJobs int\n\trunningJobs int\n}\n\ntype loadDataSet struct {\n\theaders []string\n\tts time.Time\n\trows []loadDataRow\n}\n\nfunc (lds *loadDataSet) GetData() *data.LegacyDataSet {\n\trawSet := data.CreateDataSet(lds.headers)\n\tfor _, row := range lds.rows {\n\t\trawRow := []interface{}{lds.ts, row.platform, row.waitingJobs, row.runningJobs}\n\t\trawSet.Data = append(rawSet.Data, rawRow)\n\t}\n\treturn rawSet\n}\n\nfunc (lds *loadDataSet) CreateTimeSeriesRequest(projectID string) *monitoringpb.CreateTimeSeriesRequest {\n\tts := ×tamp.Timestamp{\n\t\tSeconds: lds.ts.Unix(),\n\t}\n\tseries := make([]*monitoringpb.TimeSeries, len(lds.rows)*2)\n\tfor i, row := range lds.rows {\n\t\tseries[i] = createTimeSeries(ts, row.platform, \"waiting_jobs\", row.waitingJobs)\n\t\tseries[i] = createTimeSeries(ts, row.platform, \"running_jobs\", row.runningJobs)\n\t}\n\n\treturn &monitoringpb.CreateTimeSeriesRequest{\n\t\tName: \"projects\/\" + projectID,\n\t\tTimeSeries: series,\n\t}\n}\n\nfunc createTimeSeries(ts *timestamp.Timestamp, platform, metricSuffix string, value int) *monitoringpb.TimeSeries {\n\treturn &monitoringpb.TimeSeries{\n\t\tMetric: &metricpb.Metric{\n\t\t\tType: fmt.Sprintf(\"%s\/%s\/%s\", baseMetricType, platform, metricSuffix),\n\t\t},\n\t\tResource: &monitoredres.MonitoredResource{\n\t\t\tType: \"global\",\n\t\t},\n\t\tPoints: []*monitoringpb.Point{{\n\t\t\tInterval: &monitoringpb.TimeInterval{\n\t\t\t\tStartTime: ts,\n\t\t\t\tEndTime: ts,\n\t\t\t},\n\t\t\tValue: &monitoringpb.TypedValue{\n\t\t\t\tValue: &monitoringpb.TypedValue_Int64Value{\n\t\t\t\t\tInt64Value: int64(value),\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t}\n}\n<commit_msg>Fix list indexes of time series.<commit_after>package metrics\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/fweikert\/continuous-integration\/metrics\/clients\"\n\t\"github.com\/fweikert\/continuous-integration\/metrics\/data\"\n\ttimestamp \"github.com\/golang\/protobuf\/ptypes\/timestamp\"\n\tmetricpb \"google.golang.org\/genproto\/googleapis\/api\/metric\"\n\tmonitoredres \"google.golang.org\/genproto\/googleapis\/api\/monitoredres\"\n\tmonitoringpb \"google.golang.org\/genproto\/googleapis\/monitoring\/v3\"\n)\n\nconst baseMetricType = \"custom.googleapis.com\/bazel\/ci\"\n\ntype PlatformLoad struct {\n\tclient *clients.BuildkiteClient\n\tcolumns []Column\n\tbuilds int\n}\n\nfunc (pl *PlatformLoad) Name() string {\n\treturn \"platform_load\"\n}\n\nfunc (pl *PlatformLoad) Columns() []Column {\n\treturn pl.columns\n}\n\nfunc (pl *PlatformLoad) Collect() (data.DataSet, error) {\n\tbuilds, err := pl.client.GetMostRecentBuilds(\"all\", pl.builds)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Cannot get builds to determine platform load: %v\", err)\n\t}\n\n\tresult := &loadDataSet{headers: GetColumnNames(pl.columns), ts: time.Now()}\n\tallPlatforms := make(map[string]bool)\n\twaiting := make(map[string]int)\n\trunning := make(map[string]int)\n\tfor _, build := range builds {\n\t\tfor _, job := range build.Jobs {\n\t\t\t\/\/ Do not use getPlatform() since it may return \"rbe\", but here we're only interested in the actual worker OS (which would be \"linux\" in the rbe case).\n\t\t\tplatform := getPlatformFromAgentQueryRules(job.AgentQueryRules)\n\t\t\tif platform == \"\" || job.CreatedAt == nil || job.FinishedAt != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tallPlatforms[platform] = true\n\t\t\tswitch *job.State {\n\t\t\tcase \"running\":\n\t\t\t\trunning[platform] += 1\n\t\t\tcase \"scheduled\":\n\t\t\t\t\/*\n\t\t\t\t\tState \"scheduled\" = waiting for a worker to become available\n\t\t\t\t\tState \"waiting\" \/ \"waiting_failed\" = waiting for another task to finish\n\n\t\t\t\t\tWe're only interested in \"scheduled\" jobs since they may indicate a shortage of workers.\n\t\t\t\t*\/\n\t\t\t\twaiting[platform] += 1\n\t\t\t}\n\t\t}\n\t}\n\n\tresult.rows = make([]loadDataRow, 0)\n\tfor platform := range allPlatforms {\n\t\trow := loadDataRow{platform: platform, waitingJobs: waiting[platform], runningJobs: running[platform]}\n\t\tresult.rows = append(result.rows, row)\n\t}\n\treturn result, nil\n}\n\n\/\/ CREATE TABLE platform_load (timestamp DATETIME, platform VARCHAR(255), waiting_jobs INT, running_jobs INT, PRIMARY KEY(timestamp, platform));\nfunc CreatePlatformLoad(client *clients.BuildkiteClient, builds int) *PlatformLoad {\n\tcolumns := []Column{Column{\"timestamp\", true}, Column{\"platform\", true}, Column{\"waiting_jobs\", false}, Column{\"running_jobs\", false}}\n\treturn &PlatformLoad{client: client, columns: columns, builds: builds}\n}\n\ntype loadDataRow struct {\n\tplatform string\n\twaitingJobs int\n\trunningJobs int\n}\n\ntype loadDataSet struct {\n\theaders []string\n\tts time.Time\n\trows []loadDataRow\n}\n\nfunc (lds *loadDataSet) GetData() *data.LegacyDataSet {\n\trawSet := data.CreateDataSet(lds.headers)\n\tfor _, row := range lds.rows {\n\t\trawRow := []interface{}{lds.ts, row.platform, row.waitingJobs, row.runningJobs}\n\t\trawSet.Data = append(rawSet.Data, rawRow)\n\t}\n\treturn rawSet\n}\n\nfunc (lds *loadDataSet) CreateTimeSeriesRequest(projectID string) *monitoringpb.CreateTimeSeriesRequest {\n\tts := ×tamp.Timestamp{\n\t\tSeconds: lds.ts.Unix(),\n\t}\n\tseries := make([]*monitoringpb.TimeSeries, len(lds.rows)*2)\n\tfor i, row := range lds.rows {\n\t\tseries[2*i] = createTimeSeries(ts, row.platform, \"waiting_jobs\", row.waitingJobs)\n\t\tseries[2*i+1] = createTimeSeries(ts, row.platform, \"running_jobs\", row.runningJobs)\n\t}\n\n\treturn &monitoringpb.CreateTimeSeriesRequest{\n\t\tName: \"projects\/\" + projectID,\n\t\tTimeSeries: series,\n\t}\n}\n\nfunc createTimeSeries(ts *timestamp.Timestamp, platform, metricSuffix string, value int) *monitoringpb.TimeSeries {\n\treturn &monitoringpb.TimeSeries{\n\t\tMetric: &metricpb.Metric{\n\t\t\tType: fmt.Sprintf(\"%s\/%s\/%s\", baseMetricType, platform, metricSuffix),\n\t\t},\n\t\tResource: &monitoredres.MonitoredResource{\n\t\t\tType: \"global\",\n\t\t},\n\t\tPoints: []*monitoringpb.Point{{\n\t\t\tInterval: &monitoringpb.TimeInterval{\n\t\t\t\tStartTime: ts,\n\t\t\t\tEndTime: ts,\n\t\t\t},\n\t\t\tValue: &monitoringpb.TypedValue{\n\t\t\t\tValue: &monitoringpb.TypedValue_Int64Value{\n\t\t\t\t\tInt64Value: int64(value),\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ HTTP reverse proxy handler\n\npackage proxy\n\nimport (\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ onExitFlushLoop is a callback set by tests to detect the state of the\n\/\/ flushLoop() goroutine.\nvar onExitFlushLoop func()\n\n\/\/ ReverseProxy is an HTTP Handler that takes an incoming request and\n\/\/ sends it to another server, proxying the response back to the\n\/\/ client.\ntype ReverseProxy struct {\n\t\/\/ Director must be a function which modifies\n\t\/\/ the request into a new request to be sent\n\t\/\/ using Transport. Its response is then copied\n\t\/\/ back to the original client unmodified.\n\tDirector func(*http.Request)\n\n\t\/\/ The transport used to perform proxy requests.\n\t\/\/ If nil, http.DefaultTransport is used.\n\tTransport http.RoundTripper\n\n\t\/\/ FlushInterval specifies the flush interval\n\t\/\/ to flush to the client while copying the\n\t\/\/ response body.\n\t\/\/ If zero, no periodic flushing is done.\n\tFlushInterval time.Duration\n}\n\nfunc singleJoiningSlash(a, b string) string {\n\taslash := strings.HasSuffix(a, \"\/\")\n\tbslash := strings.HasPrefix(b, \"\/\")\n\tswitch {\n\tcase aslash && bslash:\n\t\treturn a + b[1:]\n\tcase !aslash && !bslash:\n\t\treturn a + \"\/\" + b\n\t}\n\treturn a + b\n}\n\n\/\/ NewSingleHostReverseProxy returns a new ReverseProxy that rewrites\n\/\/ URLs to the scheme, host, and base path provided in target. If the\n\/\/ target's path is \"\/base\" and the incoming request was for \"\/dir\",\n\/\/ the target request will be for \/base\/dir.\nfunc NewSingleHostReverseProxy(target *url.URL) *ReverseProxy {\n\ttargetQuery := target.RawQuery\n\tdirector := func(req *http.Request) {\n\t\treq.URL.Scheme = target.Scheme\n\t\treq.URL.Host = target.Host\n\t\treq.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)\n\t\tif targetQuery == \"\" || req.URL.RawQuery == \"\" {\n\t\t\treq.URL.RawQuery = targetQuery + req.URL.RawQuery\n\t\t} else {\n\t\t\treq.URL.RawQuery = targetQuery + \"&\" + req.URL.RawQuery\n\t\t}\n\t}\n\treturn &ReverseProxy{Director: director}\n}\n\nfunc copyHeader(dst, src http.Header) {\n\tfor k, vv := range src {\n\t\tfor _, v := range vv {\n\t\t\tdst.Add(k, v)\n\t\t}\n\t}\n}\n\n\/\/ Hop-by-hop headers. These are removed when sent to the backend.\n\/\/ http:\/\/www.w3.org\/Protocols\/rfc2616\/rfc2616-sec13.html\nvar hopHeaders = []string{\n\t\"Connection\",\n\t\"Keep-Alive\",\n\t\"Proxy-Authenticate\",\n\t\"Proxy-Authorization\",\n\t\"Te\", \/\/ canonicalized version of \"TE\"\n\t\"Trailers\",\n\t\"Transfer-Encoding\",\n\t\"Upgrade\",\n}\n\nfunc (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request, extraHeaders http.Header) error {\n\ttransport := p.Transport\n\tif transport == nil {\n\t\ttransport = http.DefaultTransport\n\t}\n\n\toutreq := new(http.Request)\n\t*outreq = *req \/\/ includes shallow copies of maps, but okay\n\n\tp.Director(outreq)\n\toutreq.Proto = \"HTTP\/1.1\"\n\toutreq.ProtoMajor = 1\n\toutreq.ProtoMinor = 1\n\toutreq.Close = false\n\n\t\/\/ Remove hop-by-hop headers to the backend. Especially\n\t\/\/ important is \"Connection\" because we want a persistent\n\t\/\/ connection, regardless of what the client sent to us. This\n\t\/\/ is modifying the same underlying map from req (shallow\n\t\/\/ copied above) so we only copy it if necessary.\n\tcopiedHeaders := false\n\tfor _, h := range hopHeaders {\n\t\tif outreq.Header.Get(h) != \"\" {\n\t\t\tif !copiedHeaders {\n\t\t\t\toutreq.Header = make(http.Header)\n\t\t\t\tcopyHeader(outreq.Header, req.Header)\n\t\t\t\tcopiedHeaders = true\n\t\t\t}\n\t\t\toutreq.Header.Del(h)\n\t\t}\n\t}\n\n\tif clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {\n\t\t\/\/ If we aren't the first proxy retain prior\n\t\t\/\/ X-Forwarded-For information as a comma+space\n\t\t\/\/ separated list and fold multiple headers into one.\n\t\tif prior, ok := outreq.Header[\"X-Forwarded-For\"]; ok {\n\t\t\tclientIP = strings.Join(prior, \", \") + \", \" + clientIP\n\t\t}\n\t\toutreq.Header.Set(\"X-Forwarded-For\", clientIP)\n\t}\n\n\tif extraHeaders != nil {\n\t\tfor k, v := range extraHeaders {\n\t\t\toutreq.Header[k] = v\n\t\t}\n\t}\n\n\tres, err := transport.RoundTrip(outreq)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode == http.StatusSwitchingProtocols && outreq.Header.Get(\"Upgrade\") == \"websocket\" {\n\t\thj, ok := rw.(http.Hijacker)\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\n\t\tconn, _, err := hj.Hijack()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbackendConn, err := net.Dial(\"tcp\", outreq.Host)\n\t\tif err != nil {\n\t\t\tconn.Close()\n\t\t\treturn err\n\t\t}\n\n\t\toutreq.Write(backendConn)\n\n\t\tgo func() {\n\t\t\tio.Copy(backendConn, conn) \/\/ write tcp stream to backend.\n\t\t\tbackendConn.Close()\n\t\t}()\n\n\t\tio.Copy(conn, backendConn) \/\/ read tcp stream from backend.\n\t\tconn.Close()\n\t} else {\n\t\tfor _, h := range hopHeaders {\n\t\t\tres.Header.Del(h)\n\t\t}\n\n\t\tcopyHeader(rw.Header(), res.Header)\n\n\t\trw.WriteHeader(res.StatusCode)\n\t\tp.copyResponse(rw, res.Body)\n\t}\n\n\treturn nil\n}\n\nfunc (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) {\n\tif p.FlushInterval != 0 {\n\t\tif wf, ok := dst.(writeFlusher); ok {\n\t\t\tmlw := &maxLatencyWriter{\n\t\t\t\tdst: wf,\n\t\t\t\tlatency: p.FlushInterval,\n\t\t\t\tdone: make(chan bool),\n\t\t\t}\n\t\t\tgo mlw.flushLoop()\n\t\t\tdefer mlw.stop()\n\t\t\tdst = mlw\n\t\t}\n\t}\n\n\tio.Copy(dst, src)\n}\n\ntype writeFlusher interface {\n\tio.Writer\n\thttp.Flusher\n}\n\ntype maxLatencyWriter struct {\n\tdst writeFlusher\n\tlatency time.Duration\n\n\tlk sync.Mutex \/\/ protects Write + Flush\n\tdone chan bool\n}\n\nfunc (m *maxLatencyWriter) Write(p []byte) (int, error) {\n\tm.lk.Lock()\n\tdefer m.lk.Unlock()\n\treturn m.dst.Write(p)\n}\n\nfunc (m *maxLatencyWriter) flushLoop() {\n\tt := time.NewTicker(m.latency)\n\tdefer t.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-m.done:\n\t\t\tif onExitFlushLoop != nil {\n\t\t\t\tonExitFlushLoop()\n\t\t\t}\n\t\t\treturn\n\t\tcase <-t.C:\n\t\t\tm.lk.Lock()\n\t\t\tm.dst.Flush()\n\t\t\tm.lk.Unlock()\n\t\t}\n\t}\n}\n\nfunc (m *maxLatencyWriter) stop() { m.done <- true }\n<commit_msg>check server response instead of client<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ HTTP reverse proxy handler\n\npackage proxy\n\nimport (\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ onExitFlushLoop is a callback set by tests to detect the state of the\n\/\/ flushLoop() goroutine.\nvar onExitFlushLoop func()\n\n\/\/ ReverseProxy is an HTTP Handler that takes an incoming request and\n\/\/ sends it to another server, proxying the response back to the\n\/\/ client.\ntype ReverseProxy struct {\n\t\/\/ Director must be a function which modifies\n\t\/\/ the request into a new request to be sent\n\t\/\/ using Transport. Its response is then copied\n\t\/\/ back to the original client unmodified.\n\tDirector func(*http.Request)\n\n\t\/\/ The transport used to perform proxy requests.\n\t\/\/ If nil, http.DefaultTransport is used.\n\tTransport http.RoundTripper\n\n\t\/\/ FlushInterval specifies the flush interval\n\t\/\/ to flush to the client while copying the\n\t\/\/ response body.\n\t\/\/ If zero, no periodic flushing is done.\n\tFlushInterval time.Duration\n}\n\nfunc singleJoiningSlash(a, b string) string {\n\taslash := strings.HasSuffix(a, \"\/\")\n\tbslash := strings.HasPrefix(b, \"\/\")\n\tswitch {\n\tcase aslash && bslash:\n\t\treturn a + b[1:]\n\tcase !aslash && !bslash:\n\t\treturn a + \"\/\" + b\n\t}\n\treturn a + b\n}\n\n\/\/ NewSingleHostReverseProxy returns a new ReverseProxy that rewrites\n\/\/ URLs to the scheme, host, and base path provided in target. If the\n\/\/ target's path is \"\/base\" and the incoming request was for \"\/dir\",\n\/\/ the target request will be for \/base\/dir.\nfunc NewSingleHostReverseProxy(target *url.URL) *ReverseProxy {\n\ttargetQuery := target.RawQuery\n\tdirector := func(req *http.Request) {\n\t\treq.URL.Scheme = target.Scheme\n\t\treq.URL.Host = target.Host\n\t\treq.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)\n\t\tif targetQuery == \"\" || req.URL.RawQuery == \"\" {\n\t\t\treq.URL.RawQuery = targetQuery + req.URL.RawQuery\n\t\t} else {\n\t\t\treq.URL.RawQuery = targetQuery + \"&\" + req.URL.RawQuery\n\t\t}\n\t}\n\treturn &ReverseProxy{Director: director}\n}\n\nfunc copyHeader(dst, src http.Header) {\n\tfor k, vv := range src {\n\t\tfor _, v := range vv {\n\t\t\tdst.Add(k, v)\n\t\t}\n\t}\n}\n\n\/\/ Hop-by-hop headers. These are removed when sent to the backend.\n\/\/ http:\/\/www.w3.org\/Protocols\/rfc2616\/rfc2616-sec13.html\nvar hopHeaders = []string{\n\t\"Connection\",\n\t\"Keep-Alive\",\n\t\"Proxy-Authenticate\",\n\t\"Proxy-Authorization\",\n\t\"Te\", \/\/ canonicalized version of \"TE\"\n\t\"Trailers\",\n\t\"Transfer-Encoding\",\n\t\"Upgrade\",\n}\n\nfunc (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request, extraHeaders http.Header) error {\n\ttransport := p.Transport\n\tif transport == nil {\n\t\ttransport = http.DefaultTransport\n\t}\n\n\toutreq := new(http.Request)\n\t*outreq = *req \/\/ includes shallow copies of maps, but okay\n\n\tp.Director(outreq)\n\toutreq.Proto = \"HTTP\/1.1\"\n\toutreq.ProtoMajor = 1\n\toutreq.ProtoMinor = 1\n\toutreq.Close = false\n\n\t\/\/ Remove hop-by-hop headers to the backend. Especially\n\t\/\/ important is \"Connection\" because we want a persistent\n\t\/\/ connection, regardless of what the client sent to us. This\n\t\/\/ is modifying the same underlying map from req (shallow\n\t\/\/ copied above) so we only copy it if necessary.\n\tcopiedHeaders := false\n\tfor _, h := range hopHeaders {\n\t\tif outreq.Header.Get(h) != \"\" {\n\t\t\tif !copiedHeaders {\n\t\t\t\toutreq.Header = make(http.Header)\n\t\t\t\tcopyHeader(outreq.Header, req.Header)\n\t\t\t\tcopiedHeaders = true\n\t\t\t}\n\t\t\toutreq.Header.Del(h)\n\t\t}\n\t}\n\n\tif clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {\n\t\t\/\/ If we aren't the first proxy retain prior\n\t\t\/\/ X-Forwarded-For information as a comma+space\n\t\t\/\/ separated list and fold multiple headers into one.\n\t\tif prior, ok := outreq.Header[\"X-Forwarded-For\"]; ok {\n\t\t\tclientIP = strings.Join(prior, \", \") + \", \" + clientIP\n\t\t}\n\t\toutreq.Header.Set(\"X-Forwarded-For\", clientIP)\n\t}\n\n\tif extraHeaders != nil {\n\t\tfor k, v := range extraHeaders {\n\t\t\toutreq.Header[k] = v\n\t\t}\n\t}\n\n\tres, err := transport.RoundTrip(outreq)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode == http.StatusSwitchingProtocols && res.Header.Get(\"Upgrade\") == \"websocket\" {\n\t\thj, ok := rw.(http.Hijacker)\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\n\t\tconn, _, err := hj.Hijack()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbackendConn, err := net.Dial(\"tcp\", outreq.Host)\n\t\tif err != nil {\n\t\t\tconn.Close()\n\t\t\treturn err\n\t\t}\n\n\t\toutreq.Write(backendConn)\n\n\t\tgo func() {\n\t\t\tio.Copy(backendConn, conn) \/\/ write tcp stream to backend.\n\t\t\tbackendConn.Close()\n\t\t}()\n\n\t\tio.Copy(conn, backendConn) \/\/ read tcp stream from backend.\n\t\tconn.Close()\n\t} else {\n\t\tfor _, h := range hopHeaders {\n\t\t\tres.Header.Del(h)\n\t\t}\n\n\t\tcopyHeader(rw.Header(), res.Header)\n\n\t\trw.WriteHeader(res.StatusCode)\n\t\tp.copyResponse(rw, res.Body)\n\t}\n\n\treturn nil\n}\n\nfunc (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) {\n\tif p.FlushInterval != 0 {\n\t\tif wf, ok := dst.(writeFlusher); ok {\n\t\t\tmlw := &maxLatencyWriter{\n\t\t\t\tdst: wf,\n\t\t\t\tlatency: p.FlushInterval,\n\t\t\t\tdone: make(chan bool),\n\t\t\t}\n\t\t\tgo mlw.flushLoop()\n\t\t\tdefer mlw.stop()\n\t\t\tdst = mlw\n\t\t}\n\t}\n\n\tio.Copy(dst, src)\n}\n\ntype writeFlusher interface {\n\tio.Writer\n\thttp.Flusher\n}\n\ntype maxLatencyWriter struct {\n\tdst writeFlusher\n\tlatency time.Duration\n\n\tlk sync.Mutex \/\/ protects Write + Flush\n\tdone chan bool\n}\n\nfunc (m *maxLatencyWriter) Write(p []byte) (int, error) {\n\tm.lk.Lock()\n\tdefer m.lk.Unlock()\n\treturn m.dst.Write(p)\n}\n\nfunc (m *maxLatencyWriter) flushLoop() {\n\tt := time.NewTicker(m.latency)\n\tdefer t.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-m.done:\n\t\t\tif onExitFlushLoop != nil {\n\t\t\t\tonExitFlushLoop()\n\t\t\t}\n\t\t\treturn\n\t\tcase <-t.C:\n\t\t\tm.lk.Lock()\n\t\t\tm.dst.Flush()\n\t\t\tm.lk.Unlock()\n\t\t}\n\t}\n}\n\nfunc (m *maxLatencyWriter) stop() { m.done <- true }\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"eaciit\/gdrj\/model\"\n\t\"eaciit\/gdrj\/modules\"\n\t\"os\"\n\n\t\"flag\"\n\t\"github.com\/eaciit\/dbox\"\n\t\"github.com\/eaciit\/orm\/v1\"\n\t\"github.com\/eaciit\/toolkit\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar conn dbox.IConnection\nvar count int\n\n\/\/ var mwg sync.WaitGroup\n\nvar (\n\tt0 time.Time\n\tcustgroup, prodgroup, plcode, ref string\n\tvalue, fiscalyear int\n\tglobalval float64\n\tmapkeysvalue map[string]float64\n\tmasters toolkit.M\n)\n\nfunc setinitialconnection() {\n\tvar err error\n\tconn, err = modules.GetDboxIConnection(\"db_godrej\")\n\n\tif err != nil {\n\t\ttoolkit.Println(\"Initial connection found : \", err)\n\t\tos.Exit(1)\n\t}\n\n\terr = gdrj.SetDb(conn)\n\tif err != nil {\n\t\ttoolkit.Println(\"Initial connection found : \", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc buildmap(holder interface{},\n\tfnModel func() orm.IModel,\n\tfilter *dbox.Filter,\n\tfnIter func(holder interface{}, obj interface{})) interface{} {\n\tcrx, _ := gdrj.Find(fnModel(), filter, nil)\n\tdefer crx.Close()\n\tfor {\n\t\ts := fnModel()\n\t\te := crx.Fetch(s, 1, false)\n\t\tif e != nil {\n\t\t\tbreak\n\t\t}\n\t\tfnIter(holder, s)\n\t}\n\treturn holder\n}\n\nfunc prepmaster() {\n\tmasters = toolkit.M{}\n\tmasters.Set(\"plmodel\", buildmap(map[string]*gdrj.PLModel{},\n\t\tfunc() orm.IModel {\n\t\t\treturn new(gdrj.PLModel)\n\t\t},\n\t\tnil,\n\t\tfunc(holder, obj interface{}) {\n\t\t\th := holder.(map[string]*gdrj.PLModel)\n\t\t\to := obj.(*gdrj.PLModel)\n\t\t\th[o.ID] = o\n\t\t}).(map[string]*gdrj.PLModel))\n}\n\nfunc main() {\n\tt0 = time.Now()\n\tmapkeysvalue = make(map[string]float64)\n\n\tflag.IntVar(&value, \"value\", 0, \"representation of value need to be allocation. Default is 0\")\n\t\/\/branch,channel,group\n\tflag.StringVar(&custgroup, \"custgroup\", \"global\", \"group of customer. Default is global\")\n\t\/\/brand,group,skuid\n\tflag.StringVar(&prodgroup, \"prodgroup\", \"global\", \"group of product. Default is global\")\n\tflag.IntVar(&fiscalyear, \"year\", 2015, \"YYYY representation of godrej fiscal year. Default is 2015\")\n\tflag.StringVar(&plcode, \"plcode\", \"\", \"PL Code to take the value. Default is blank\")\n\tflag.StringVar(&ref, \"ref\", \"\", \"Reference from document or other. Default is blank\")\n\tflag.Parse()\n\n\teperiode := time.Date(fiscalyear, 4, 1, 0, 0, 0, 0, time.UTC)\n\tsperiode := eperiode.AddDate(-1, 0, 0)\n\n\tfilter := dbox.And(dbox.Gte(\"date.date\", speriode), dbox.Lt(\"date.date\", eperiode))\n\n\tif plcode == \"\" || value == 0 {\n\t\ttoolkit.Println(\"PLCode and Value are mandatory to fill\")\n\t\tos.Exit(1)\n\t}\n\n\tsetinitialconnection()\n\tdefer gdrj.CloseDb()\n\n\ttoolkit.Println(\"Get Data Master...\")\n\tprepmaster()\n\n\ttoolkit.Println(\"Start Data Process...\")\n\tc, _ := gdrj.Find(new(gdrj.SalesPL), filter, nil)\n\tdefer c.Close()\n\n\tsplcount := c.Count()\n\tstep := splcount \/ 100\n\ti := 0\n\n\tfor {\n\t\ti++\n\n\t\tspl := new(gdrj.SalesPL)\n\t\te := c.Fetch(spl, 1, false)\n\t\tif e != nil {\n\t\t\tbreak\n\t\t}\n\t\tkey := toolkit.Sprintf(\"%d_%d\", spl.Date.Month, spl.Date.Year)\n\t\tk := \"\"\n\n\t\tif spl.Customer != nil {\n\t\t\tswitch custgroup {\n\t\t\tcase \"branch\":\n\t\t\t\tkey = toolkit.Sprintf(\"%v_%v\", key, spl.Customer.BranchID)\n\t\t\tcase \"channel\":\n\t\t\t\tk = toolkit.Sprintf(\"%v|%v\", spl.Customer.BranchID, spl.Customer.ChannelID)\n\t\t\t\tkey = toolkit.Sprintf(\"%v_%v\", key, k)\n\t\t\tcase \"group\":\n\t\t\t\tk = toolkit.Sprintf(\"%v|%v\", spl.Customer.BranchID, spl.Customer.CustomerGroup)\n\t\t\t\tkey = toolkit.Sprintf(\"%v_%v\", key, k)\n\t\t\tdefault:\n\t\t\t\tkey = toolkit.Sprintf(\"%v_\", key)\n\t\t\t}\n\t\t}\n\n\t\tif spl.Product != nil {\n\t\t\tswitch prodgroup {\n\t\t\tcase \"brand\":\n\t\t\t\tkey = toolkit.Sprintf(\"%v_%v\", key, spl.Product.Brand)\n\t\t\tcase \"group\":\n\t\t\t\tk = toolkit.Sprintf(\"%v|%v\", spl.Product.Brand, spl.Product.BrandCategoryID)\n\t\t\t\tkey = toolkit.Sprintf(\"%v_%v\", key, k)\n\t\t\tcase \"skuid\":\n\t\t\t\tk = toolkit.Sprintf(\"%v|%v|%v\", spl.Product.Brand, spl.Product.BrandCategoryID, spl.Product.ID)\n\t\t\t\tkey = toolkit.Sprintf(\"%v_%v\", key, k)\n\t\t\tdefault:\n\t\t\t\tkey = toolkit.Sprintf(\"%v_\", key)\n\t\t\t}\n\n\t\t}\n\n\t\tmapkeysvalue[key] += spl.GrossAmount\n\t\tglobalval += spl.GrossAmount\n\n\t\tif i > step {\n\t\t\tstep += splcount \/ 100\n\t\t\ttoolkit.Printfn(\"Processing %d of %d in %s\", i, splcount,\n\t\t\t\ttime.Since(t0).String())\n\t\t}\n\n\t}\n\n\ttoolkit.Printfn(\"Preparing the data\")\n\tmcount := len(mapkeysvalue)\n\ti = 0\n\tfor k, v := range mapkeysvalue {\n\t\ti++\n\n\t\takey := strings.Split(k, \"_\")\n\t\ttcustomer := new(gdrj.Customer)\n\t\ttproduct := new(gdrj.Product)\n\n\t\tif len(akey) > 2 && akey[2] != \"\" {\n\t\t\tswitch custgroup {\n\t\t\tcase \"branch\":\n\t\t\t\ttcustomer.BranchID = akey[2]\n\t\t\tcase \"channel\":\n\t\t\t\tskey := strings.Split(akey[2], \"|\")\n\t\t\t\ttcustomer.BranchID = skey[0]\n\t\t\t\tif len(skey) > 1 {\n\t\t\t\t\ttcustomer.ChannelID = skey[1]\n\t\t\t\t}\n\t\t\tcase \"group\":\n\t\t\t\tskey := strings.Split(akey[2], \"|\")\n\t\t\t\ttcustomer.BranchID = skey[0]\n\t\t\t\tif len(skey) > 1 {\n\t\t\t\t\ttcustomer.CustomerGroup = skey[1]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(akey) > 3 && akey[3] != \"\" {\n\t\t\tswitch prodgroup {\n\t\t\tcase \"brand\":\n\t\t\t\ttproduct.Brand = akey[3]\n\t\t\tcase \"group\":\n\t\t\t\tskey := strings.Split(akey[3], \"|\")\n\t\t\t\ttproduct.Brand = skey[0]\n\t\t\t\tif len(skey) > 1 {\n\t\t\t\t\ttproduct.BrandCategoryID = skey[1]\n\t\t\t\t}\n\t\t\tcase \"skuid\":\n\t\t\t\tskey := strings.Split(akey[3], \"|\")\n\t\t\t\ttproduct.Brand = skey[0]\n\t\t\t\tif len(skey) > 1 {\n\t\t\t\t\ttproduct.BrandCategoryID = skey[1]\n\t\t\t\t}\n\t\t\t\tif len(skey) > 2 {\n\t\t\t\t\ttproduct.ID = skey[2]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tspl := new(gdrj.SalesPL)\n\t\tspl.ID = toolkit.Sprintf(\"%v%v%v%v\", akey[0], akey[1], \"\", \"\")\n\t\tspl.Date = gdrj.SetDate(time.Date(toolkit.ToInt(akey[1], toolkit.RoundingAuto),\n\t\t\ttime.Month(toolkit.ToInt(akey[0], toolkit.RoundingAuto)),\n\t\t\t1, 0, 0, 0, 0, time.UTC))\n\t\tspl.Customer = tcustomer\n\t\tspl.Product = tproduct\n\t\tspl.Source = \"ADJUST\"\n\t\tspl.Ref = ref\n\n\t\tspl.PC = new(gdrj.ProfitCenter)\n\t\tspl.CC = new(gdrj.CostCenter)\n\n\t\tamount := toolkit.ToFloat64(value, 6, toolkit.RoundingAuto) * (v \/ globalval)\n\t\tplmodels := masters.Get(\"plmodel\").(map[string]*gdrj.PLModel)\n\t\tspl.AddData(plcode, amount, plmodels)\n\t\tspl.CalcSum(masters)\n\n\t\tgdrj.Save(spl)\n\n\t\ttoolkit.Printfn(\"Saving %d of %d in %s\", i, mcount,\n\t\t\ttime.Since(t0).String())\n\t}\n\n\ttoolkit.Printfn(\"Processing done in %s\",\n\t\ttime.Since(t0).String())\n}\n<commit_msg>update<commit_after>package main\n\nimport (\n\t\"eaciit\/gdrj\/model\"\n\t\"eaciit\/gdrj\/modules\"\n\t\"os\"\n\n\t\"flag\"\n\t\"github.com\/eaciit\/dbox\"\n\t\"github.com\/eaciit\/orm\/v1\"\n\t\"github.com\/eaciit\/toolkit\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar conn dbox.IConnection\nvar count int\n\n\/\/ var mwg sync.WaitGroup\n\nvar (\n\tt0 time.Time\n\tcustgroup, prodgroup, plcode, ref string\n\tvalue, fiscalyear int\n\tglobalval float64\n\tmapkeysvalue map[string]float64\n\tmasters toolkit.M\n)\n\nfunc setinitialconnection() {\n\tvar err error\n\tconn, err = modules.GetDboxIConnection(\"db_godrej\")\n\n\tif err != nil {\n\t\ttoolkit.Println(\"Initial connection found : \", err)\n\t\tos.Exit(1)\n\t}\n\n\terr = gdrj.SetDb(conn)\n\tif err != nil {\n\t\ttoolkit.Println(\"Initial connection found : \", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc getCursor(obj orm.IModel) dbox.ICursor {\n\tc, e := gdrj.Find(obj,\n\t\tnil, nil)\n\t\/\/toolkit.M{}.Set(\"take\", 10))\n\tif e != nil {\n\t\treturn nil\n\t}\n\treturn c\n}\n\nfunc buildmap(holder interface{},\n\tfnModel func() orm.IModel,\n\tfilter *dbox.Filter,\n\tfnIter func(holder interface{}, obj interface{})) interface{} {\n\tcrx, _ := gdrj.Find(fnModel(), filter, nil)\n\tdefer crx.Close()\n\tfor {\n\t\ts := fnModel()\n\t\te := crx.Fetch(s, 1, false)\n\t\tif e != nil {\n\t\t\tbreak\n\t\t}\n\t\tfnIter(holder, s)\n\t}\n\treturn holder\n}\n\nfunc prepmaster() {\n\tmasters = toolkit.M{}\n\tmasters.Set(\"plmodel\", buildmap(map[string]*gdrj.PLModel{},\n\t\tfunc() orm.IModel {\n\t\t\treturn new(gdrj.PLModel)\n\t\t},\n\t\tnil,\n\t\tfunc(holder, obj interface{}) {\n\t\t\th := holder.(map[string]*gdrj.PLModel)\n\t\t\to := obj.(*gdrj.PLModel)\n\t\t\th[o.ID] = o\n\t\t}).(map[string]*gdrj.PLModel))\n}\n\nfunc prepmasterclean() {\n\tsubchannels := toolkit.M{}\n\ttoolkit.Println(\"--> Sub Channel\")\n\tcsr, _ := conn.NewQuery().From(\"subchannels\").Cursor(nil)\n\tdefer csr.Close()\n\tfor {\n\t\tm := toolkit.M{}\n\t\te := csr.Fetch(&m, 1, false)\n\t\tif e != nil {\n\t\t\tbreak\n\t\t}\n\t\tsubchannels.Set(m.GetString(\"_id\"), m.GetString(\"title\"))\n\t}\n\tmasters.Set(\"subchannels\", subchannels)\n\n\tcustomers := toolkit.M{}\n\ttoolkit.Println(\"--> Customer\")\n\tccb := getCursor(new(gdrj.Customer))\n\tdefer ccb.Close()\n\tfor {\n\t\tcust := new(gdrj.Customer)\n\t\te := ccb.Fetch(cust, 1, false)\n\t\tif e != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tcustomers.Set(cust.ID, cust)\n\t}\n\tmasters.Set(\"customers\", customers)\n\n\tbranchs := toolkit.M{}\n\tcmb := getCursor(new(gdrj.MasterBranch))\n\tdefer cmb.Close()\n\tfor {\n\t\tstx := toolkit.M{}\n\t\te := cmb.Fetch(&stx, 1, false)\n\t\tif e != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tbranchs.Set(stx.Get(\"_id\", \"\").(string), stx)\n\t}\n\tmasters.Set(\"branchs\", branchs)\n\n\trdlocations := toolkit.M{}\n\tcrdloc, _ := conn.NewQuery().From(\"outletgeo\").Cursor(nil)\n\tdefer crdloc.Close()\n\tfor {\n\t\tstx := toolkit.M{}\n\t\te := crdloc.Fetch(&stx, 1, false)\n\t\tif e != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tbranchs.Set(stx.GetString(\"_id\"), stx)\n\t}\n\tmasters.Set(\"rdlocations\", rdlocations)\n}\n\nfunc main() {\n\tt0 = time.Now()\n\tmapkeysvalue = make(map[string]float64)\n\n\tflag.IntVar(&value, \"value\", 0, \"representation of value need to be allocation. Default is 0\")\n\t\/\/branch,channel,group\n\tflag.StringVar(&custgroup, \"custgroup\", \"global\", \"group of customer. Default is global\")\n\t\/\/brand,group,skuid\n\tflag.StringVar(&prodgroup, \"prodgroup\", \"global\", \"group of product. Default is global\")\n\tflag.IntVar(&fiscalyear, \"year\", 2015, \"YYYY representation of godrej fiscal year. Default is 2015\")\n\tflag.StringVar(&plcode, \"plcode\", \"\", \"PL Code to take the value. Default is blank\")\n\tflag.StringVar(&ref, \"ref\", \"\", \"Reference from document or other. Default is blank\")\n\tflag.Parse()\n\n\teperiode := time.Date(fiscalyear, 4, 1, 0, 0, 0, 0, time.UTC)\n\tsperiode := eperiode.AddDate(-1, 0, 0)\n\n\tfilter := dbox.And(dbox.Gte(\"date.date\", speriode), dbox.Lt(\"date.date\", eperiode))\n\n\tif plcode == \"\" || value == 0 {\n\t\ttoolkit.Println(\"PLCode and Value are mandatory to fill\")\n\t\tos.Exit(1)\n\t}\n\n\tsetinitialconnection()\n\tdefer gdrj.CloseDb()\n\n\ttoolkit.Println(\"Get Data Master...\")\n\tprepmaster()\n\tprepmasterclean()\n\n\ttoolkit.Println(\"Start Data Process...\")\n\tc, _ := gdrj.Find(new(gdrj.SalesPL), filter, nil)\n\tdefer c.Close()\n\n\tsplcount := c.Count()\n\tstep := splcount \/ 100\n\ti := 0\n\n\tfor {\n\t\ti++\n\n\t\tspl := new(gdrj.SalesPL)\n\t\te := c.Fetch(spl, 1, false)\n\t\tif e != nil {\n\t\t\tbreak\n\t\t}\n\t\tkey := toolkit.Sprintf(\"%d_%d\", spl.Date.Month, spl.Date.Year)\n\t\tk := \"\"\n\n\t\tif spl.Customer != nil {\n\t\t\tswitch custgroup {\n\t\t\tcase \"branch\":\n\t\t\t\tkey = toolkit.Sprintf(\"%v_%v\", key, spl.Customer.BranchID)\n\t\t\tcase \"channel\":\n\t\t\t\tk = toolkit.Sprintf(\"%v|%v\", spl.Customer.BranchID, spl.Customer.ChannelID)\n\t\t\t\tkey = toolkit.Sprintf(\"%v_%v\", key, k)\n\t\t\tcase \"group\":\n\t\t\t\tk = toolkit.Sprintf(\"%v|%v\", spl.Customer.BranchID, spl.Customer.CustomerGroup)\n\t\t\t\tkey = toolkit.Sprintf(\"%v_%v\", key, k)\n\t\t\tdefault:\n\t\t\t\tkey = toolkit.Sprintf(\"%v_\", key)\n\t\t\t}\n\t\t}\n\n\t\tif prodgroup == \"skuid\" && (spl.Product.ID == \"\" || len(spl.Product.ID) < 3) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif spl.Product != nil {\n\t\t\tswitch prodgroup {\n\t\t\tcase \"brand\":\n\t\t\t\tkey = toolkit.Sprintf(\"%v_%v\", key, spl.Product.Brand)\n\t\t\tcase \"group\":\n\t\t\t\tk = toolkit.Sprintf(\"%v|%v\", spl.Product.Brand, spl.Product.BrandCategoryID)\n\t\t\t\tkey = toolkit.Sprintf(\"%v_%v\", key, k)\n\t\t\tcase \"skuid\":\n\t\t\t\tk = toolkit.Sprintf(\"%v|%v|%v\", spl.Product.Brand, spl.Product.BrandCategoryID, spl.Product.ID)\n\t\t\t\tkey = toolkit.Sprintf(\"%v_%v\", key, k)\n\t\t\tdefault:\n\t\t\t\tkey = toolkit.Sprintf(\"%v_\", key)\n\t\t\t}\n\n\t\t}\n\n\t\tmapkeysvalue[key] += spl.GrossAmount\n\t\tglobalval += spl.GrossAmount\n\n\t\tif i > step {\n\t\t\tstep += splcount \/ 100\n\t\t\ttoolkit.Printfn(\"Processing %d of %d in %s\", i, splcount,\n\t\t\t\ttime.Since(t0).String())\n\t\t}\n\n\t}\n\n\ttoolkit.Printfn(\"Preparing the data\")\n\tmcount := len(mapkeysvalue)\n\ti = 0\n\tfor k, v := range mapkeysvalue {\n\t\ti++\n\n\t\takey := strings.Split(k, \"_\")\n\t\ttcustomer := new(gdrj.Customer)\n\t\ttproduct := new(gdrj.Product)\n\n\t\tif len(akey) > 2 && akey[2] != \"\" {\n\t\t\tswitch custgroup {\n\t\t\tcase \"branch\":\n\t\t\t\ttcustomer.BranchID = akey[2]\n\t\t\tcase \"channel\":\n\t\t\t\tskey := strings.Split(akey[2], \"|\")\n\t\t\t\ttcustomer.BranchID = skey[0]\n\t\t\t\tif len(skey) > 1 {\n\t\t\t\t\ttcustomer.ChannelID = skey[1]\n\t\t\t\t}\n\t\t\tcase \"group\":\n\t\t\t\tskey := strings.Split(akey[2], \"|\")\n\t\t\t\ttcustomer.BranchID = skey[0]\n\t\t\t\tif len(skey) > 1 {\n\t\t\t\t\ttcustomer.CustomerGroup = skey[1]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(akey) > 3 && akey[3] != \"\" {\n\t\t\tswitch prodgroup {\n\t\t\tcase \"brand\":\n\t\t\t\ttproduct.Brand = akey[3]\n\t\t\tcase \"group\":\n\t\t\t\tskey := strings.Split(akey[3], \"|\")\n\t\t\t\ttproduct.Brand = skey[0]\n\t\t\t\tif len(skey) > 1 {\n\t\t\t\t\ttproduct.BrandCategoryID = skey[1]\n\t\t\t\t}\n\t\t\tcase \"skuid\":\n\t\t\t\tskey := strings.Split(akey[3], \"|\")\n\t\t\t\ttproduct.Brand = skey[0]\n\t\t\t\tif len(skey) > 1 {\n\t\t\t\t\ttproduct.BrandCategoryID = skey[1]\n\t\t\t\t}\n\t\t\t\tif len(skey) > 2 {\n\t\t\t\t\ttproduct.ID = skey[2]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tspl := new(gdrj.SalesPL)\n\t\tspl.Date = gdrj.SetDate(time.Date(toolkit.ToInt(akey[1], toolkit.RoundingAuto),\n\t\t\ttime.Month(toolkit.ToInt(akey[0], toolkit.RoundingAuto)),\n\t\t\t1, 0, 0, 0, 0, time.UTC))\n\t\tspl.Customer = tcustomer\n\t\tspl.Product = tproduct\n\t\tspl.Source = \"ADJUST\"\n\t\tspl.Ref = ref\n\n\t\tspl.PC = new(gdrj.ProfitCenter)\n\t\tspl.CC = new(gdrj.CostCenter)\n\n\t\tamount := toolkit.ToFloat64(value, 6, toolkit.RoundingAuto) * (v \/ globalval)\n\t\tplmodels := masters.Get(\"plmodel\").(map[string]*gdrj.PLModel)\n\n\t\tspl.ID = toolkit.Sprintf(\"%v_%v_%v_%v\", \"ALLOCATION-SCRIPT\", spl.Date.Fiscal, akey[0], akey[1])\n\n\t\tspl.CleanAndClasify(masters)\n\t\tspl.AddData(plcode, amount, plmodels)\n\t\tspl.CalcSum(masters)\n\n\t\tgdrj.Save(spl)\n\n\t\ttoolkit.Printfn(\"Saving %d of %d in %s\", i, mcount,\n\t\t\ttime.Since(t0).String())\n\t}\n\n\ttoolkit.Printfn(\"Processing done in %s\",\n\t\ttime.Since(t0).String())\n}\n<|endoftext|>"} {"text":"<commit_before>package router\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/micro\/go-micro\/registry\"\n\t\"github.com\/olekukonko\/tablewriter\"\n)\n\n\/\/ router provides default router implementation\ntype router struct {\n\topts Options\n\texit chan struct{}\n\twg *sync.WaitGroup\n}\n\n\/\/ newRouter creates new router and returns it\nfunc newRouter(opts ...Option) Router {\n\t\/\/ TODO: we need to add default GW entry here\n\t\/\/ Should default GW be part of router options?\n\n\t\/\/ get default options\n\toptions := DefaultOptions()\n\n\t\/\/ apply requested options\n\tfor _, o := range opts {\n\t\to(&options)\n\t}\n\n\treturn &router{\n\t\topts: options,\n\t\texit: make(chan struct{}),\n\t\twg: &sync.WaitGroup{},\n\t}\n}\n\n\/\/ Init initializes router with given options\nfunc (r *router) Init(opts ...Option) error {\n\tfor _, o := range opts {\n\t\to(&r.opts)\n\t}\n\treturn nil\n}\n\n\/\/ Options returns router options\nfunc (r *router) Options() Options {\n\treturn r.opts\n}\n\n\/\/ ID returns router ID\nfunc (r *router) ID() string {\n\treturn r.opts.ID\n}\n\n\/\/ Table returns routing table\nfunc (r *router) Table() Table {\n\treturn r.opts.Table\n}\n\n\/\/ Address returns router's bind address\nfunc (r *router) Address() string {\n\treturn r.opts.Address\n}\n\n\/\/ Network returns the address router advertises to the network\nfunc (r *router) Network() string {\n\treturn r.opts.Network\n}\n\n\/\/ Advertise advertises the routes to the network. It is a blocking function.\n\/\/ It returns error if any of the launched goroutines fail with error.\nfunc (r *router) Advertise() error {\n\t\/\/ add local service routes into the routing table\n\tif err := r.addServiceRoutes(r.opts.Registry, \"local\", DefaultLocalMetric); err != nil {\n\t\treturn fmt.Errorf(\"failed adding routes: %v\", err)\n\t}\n\n\tlocalWatcher, err := r.opts.Registry.Watch()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create registry watcher: %v\", err)\n\t}\n\n\t\/\/ error channel collecting goroutine errors\n\terrChan := make(chan error, 1)\n\n\tr.wg.Add(1)\n\tgo func() {\n\t\tdefer r.wg.Done()\n\t\t\/\/ watch local registry and register routes in routine table\n\t\terrChan <- r.manageServiceRoutes(localWatcher, DefaultLocalMetric)\n\t}()\n\n\treturn <-errChan\n}\n\n\/\/ addServiceRoutes adds all services in given registry to the routing table.\n\/\/ NOTE: this is a one-off operation done when bootstrapping the routing table\n\/\/ It returns error if either the services failed to be listed or\n\/\/ if the routes could not be added to the routing table.\nfunc (r *router) addServiceRoutes(reg registry.Registry, network string, metric int) error {\n\tservices, err := reg.ListServices()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to list services: %v\", err)\n\t}\n\n\t\/\/ add each service node as a separate route;\n\tfor _, service := range services {\n\t\tfor _, node := range service.Nodes {\n\t\t\tgw := node.Address\n\t\t\tif node.Port > 0 {\n\t\t\t\tgw = fmt.Sprintf(\"%s:%d\", node.Address, node.Port)\n\t\t\t}\n\t\t\troute := Route{\n\t\t\t\tDestination: service.Name,\n\t\t\t\tGateway: gw,\n\t\t\t\tRouter: r.opts.Address,\n\t\t\t\tNetwork: r.opts.Network,\n\t\t\t\tMetric: metric,\n\t\t\t}\n\t\t\tif err := r.opts.Table.Add(route); err != nil && err != ErrDuplicateRoute {\n\t\t\t\treturn fmt.Errorf(\"error adding route for service %s: %s\", service.Name, err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ manageServiceRoutes watches services in given registry and updates the routing table accordingly.\n\/\/ It returns error if the service registry watcher has stopped or if the routing table failed to be updated.\nfunc (r *router) manageServiceRoutes(w registry.Watcher, metric int) error {\n\t\/\/ wait in the background for the router to stop\n\t\/\/ when the router stops, stop the watcher and exit\n\tr.wg.Add(1)\n\tgo func() {\n\t\tdefer r.wg.Done()\n\t\t<-r.exit\n\t\tw.Stop()\n\t}()\n\n\tvar watchErr error\n\n\tfor {\n\t\tres, err := w.Next()\n\t\tif err == registry.ErrWatcherStopped {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\twatchErr = err\n\t\t\tbreak\n\t\t}\n\n\t\troute := Route{\n\t\t\tDestination: res.Service.Name,\n\t\t\tRouter: r.opts.Address,\n\t\t\tNetwork: r.opts.Network,\n\t\t\tMetric: metric,\n\t\t}\n\n\t\tswitch res.Action {\n\t\tcase \"create\":\n\t\t\t\/\/ only return error if the route is not duplicate, but something else has failed\n\t\t\tif err := r.opts.Table.Add(route); err != nil && err != ErrDuplicateRoute {\n\t\t\t\treturn fmt.Errorf(\"failed to add route for service %v: %s\", res.Service.Name, err)\n\t\t\t}\n\t\tcase \"delete\":\n\t\t\t\/\/ only return error if the route is not in the table, but something else has failed\n\t\t\tif err := r.opts.Table.Delete(route); err != nil && err != ErrRouteNotFound {\n\t\t\t\treturn fmt.Errorf(\"failed to delete route for service %v: %s\", res.Service.Name, err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn watchErr\n}\n\n\/\/ Stop stops the router\nfunc (r *router) Stop() error {\n\t\/\/ notify all goroutines to finish\n\tclose(r.exit)\n\n\t\/\/ wait for all goroutines to finish\n\tr.wg.Wait()\n\n\treturn nil\n}\n\n\/\/ String prints debugging information about router\nfunc (r *router) String() string {\n\tsb := &strings.Builder{}\n\n\ttable := tablewriter.NewWriter(sb)\n\ttable.SetHeader([]string{\"ID\", \"Address\", \"Network\", \"Table\"})\n\n\tdata := []string{\n\t\tr.opts.ID,\n\t\tr.opts.Address,\n\t\tr.opts.Network,\n\t\tfmt.Sprintf(\"%d\", r.opts.Table.Size()),\n\t}\n\ttable.Append(data)\n\n\t\/\/ render table into sb\n\ttable.Render()\n\n\treturn sb.String()\n}\n<commit_msg>Lookup every service. FML<commit_after>package router\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/micro\/go-micro\/registry\"\n\t\"github.com\/olekukonko\/tablewriter\"\n)\n\n\/\/ router provides default router implementation\ntype router struct {\n\topts Options\n\texit chan struct{}\n\twg *sync.WaitGroup\n}\n\n\/\/ newRouter creates new router and returns it\nfunc newRouter(opts ...Option) Router {\n\t\/\/ TODO: we need to add default GW entry here\n\t\/\/ Should default GW be part of router options?\n\n\t\/\/ get default options\n\toptions := DefaultOptions()\n\n\t\/\/ apply requested options\n\tfor _, o := range opts {\n\t\to(&options)\n\t}\n\n\treturn &router{\n\t\topts: options,\n\t\texit: make(chan struct{}),\n\t\twg: &sync.WaitGroup{},\n\t}\n}\n\n\/\/ Init initializes router with given options\nfunc (r *router) Init(opts ...Option) error {\n\tfor _, o := range opts {\n\t\to(&r.opts)\n\t}\n\treturn nil\n}\n\n\/\/ Options returns router options\nfunc (r *router) Options() Options {\n\treturn r.opts\n}\n\n\/\/ ID returns router ID\nfunc (r *router) ID() string {\n\treturn r.opts.ID\n}\n\n\/\/ Table returns routing table\nfunc (r *router) Table() Table {\n\treturn r.opts.Table\n}\n\n\/\/ Address returns router's bind address\nfunc (r *router) Address() string {\n\treturn r.opts.Address\n}\n\n\/\/ Network returns the address router advertises to the network\nfunc (r *router) Network() string {\n\treturn r.opts.Network\n}\n\n\/\/ Advertise advertises the routes to the network. It is a blocking function.\n\/\/ It returns error if any of the launched goroutines fail with error.\nfunc (r *router) Advertise() error {\n\t\/\/ add local service routes into the routing table\n\tif err := r.addServiceRoutes(r.opts.Registry, \"local\", DefaultLocalMetric); err != nil {\n\t\treturn fmt.Errorf(\"failed adding routes: %v\", err)\n\t}\n\n\tlocalWatcher, err := r.opts.Registry.Watch()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create registry watcher: %v\", err)\n\t}\n\n\t\/\/ error channel collecting goroutine errors\n\terrChan := make(chan error, 1)\n\n\tr.wg.Add(1)\n\tgo func() {\n\t\tdefer r.wg.Done()\n\t\t\/\/ watch local registry and register routes in routine table\n\t\terrChan <- r.manageServiceRoutes(localWatcher, DefaultLocalMetric)\n\t}()\n\n\treturn <-errChan\n}\n\n\/\/ addServiceRoutes adds all services in given registry to the routing table.\n\/\/ NOTE: this is a one-off operation done when bootstrapping the routing table\n\/\/ It returns error if either the services failed to be listed or\n\/\/ if the routes could not be added to the routing table.\nfunc (r *router) addServiceRoutes(reg registry.Registry, network string, metric int) error {\n\tservices, err := reg.ListServices()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to list services: %v\", err)\n\t}\n\n\t\/\/ add each service node as a separate route;\n\tfor _, service := range services {\n\t\t\/\/ get the service to retrieve all its info\n\t\tsrvs, err := reg.GetService(service.Name)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ create a flat slide of nodes\n\t\tvar nodes []*registry.Node\n\t\tfor _, s := range srvs {\n\t\t\tnodes = append(nodes, s.Nodes...)\n\t\t}\n\n\t\t\/\/ range over the flat slice of nodes\n\t\tfor _, node := range nodes {\n\t\t\tgw := node.Address\n\t\t\tif node.Port > 0 {\n\t\t\t\tgw = fmt.Sprintf(\"%s:%d\", node.Address, node.Port)\n\t\t\t}\n\t\t\troute := Route{\n\t\t\t\tDestination: service.Name,\n\t\t\t\tGateway: gw,\n\t\t\t\tRouter: r.opts.Address,\n\t\t\t\tNetwork: r.opts.Network,\n\t\t\t\tMetric: metric,\n\t\t\t}\n\t\t\tif err := r.opts.Table.Add(route); err != nil && err != ErrDuplicateRoute {\n\t\t\t\treturn fmt.Errorf(\"error adding route for service %s: %s\", service.Name, err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ manageServiceRoutes watches services in given registry and updates the routing table accordingly.\n\/\/ It returns error if the service registry watcher has stopped or if the routing table failed to be updated.\nfunc (r *router) manageServiceRoutes(w registry.Watcher, metric int) error {\n\t\/\/ wait in the background for the router to stop\n\t\/\/ when the router stops, stop the watcher and exit\n\tr.wg.Add(1)\n\tgo func() {\n\t\tdefer r.wg.Done()\n\t\t<-r.exit\n\t\tw.Stop()\n\t}()\n\n\tvar watchErr error\n\n\tfor {\n\t\tres, err := w.Next()\n\t\tif err == registry.ErrWatcherStopped {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\twatchErr = err\n\t\t\tbreak\n\t\t}\n\n\t\troute := Route{\n\t\t\tDestination: res.Service.Name,\n\t\t\tRouter: r.opts.Address,\n\t\t\tNetwork: r.opts.Network,\n\t\t\tMetric: metric,\n\t\t}\n\n\t\tswitch res.Action {\n\t\tcase \"create\":\n\t\t\t\/\/ only return error if the route is not duplicate, but something else has failed\n\t\t\tif err := r.opts.Table.Add(route); err != nil && err != ErrDuplicateRoute {\n\t\t\t\treturn fmt.Errorf(\"failed to add route for service %v: %s\", res.Service.Name, err)\n\t\t\t}\n\t\tcase \"delete\":\n\t\t\t\/\/ only return error if the route is not in the table, but something else has failed\n\t\t\tif err := r.opts.Table.Delete(route); err != nil && err != ErrRouteNotFound {\n\t\t\t\treturn fmt.Errorf(\"failed to delete route for service %v: %s\", res.Service.Name, err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn watchErr\n}\n\n\/\/ Stop stops the router\nfunc (r *router) Stop() error {\n\t\/\/ notify all goroutines to finish\n\tclose(r.exit)\n\n\t\/\/ wait for all goroutines to finish\n\tr.wg.Wait()\n\n\treturn nil\n}\n\n\/\/ String prints debugging information about router\nfunc (r *router) String() string {\n\tsb := &strings.Builder{}\n\n\ttable := tablewriter.NewWriter(sb)\n\ttable.SetHeader([]string{\"ID\", \"Address\", \"Network\", \"Table\"})\n\n\tdata := []string{\n\t\tr.opts.ID,\n\t\tr.opts.Address,\n\t\tr.opts.Network,\n\t\tfmt.Sprintf(\"%d\", r.opts.Table.Size()),\n\t}\n\ttable.Append(data)\n\n\t\/\/ render table into sb\n\ttable.Render()\n\n\treturn sb.String()\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Add operation<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/go:build !windows && !plan9 && !js\n\/\/ +build !windows,!plan9,!js\n\npackage daemon\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"os\/user\"\n\t\"testing\"\n\n\t\"src.elv.sh\/pkg\/daemon\/daemondefs\"\n\t\"src.elv.sh\/pkg\/testutil\"\n)\n\nfunc TestActivate_InterruptsOutdatedServerAndSpawnsNewServer(t *testing.T) {\n\tactivated := 0\n\tsetupForActivate(t, func(name string, argv []string, attr *os.ProcAttr) error {\n\t\tstartServer(t, argv)\n\t\tactivated++\n\t\treturn nil\n\t})\n\tversion := api.Version - 1\n\toldServer := startServerOpts(t, cli(\"sock\", \"db\"), ServeOpts{Version: &version})\n\n\t_, err := Activate(io.Discard,\n\t\t&daemondefs.SpawnConfig{DbPath: \"db\", SockPath: \"sock\", RunDir: \".\"})\n\tif err != nil {\n\t\tt.Errorf(\"got error %v, want nil\", err)\n\t}\n\tif activated != 1 {\n\t\tt.Errorf(\"got activated %v times, want 1\", activated)\n\t}\n\toldServer.WaitQuit()\n}\n\nfunc TestActivate_FailsIfUnableToRemoveHangingSocket(t *testing.T) {\n\tif u, err := user.Current(); err != nil || u.Uid == \"0\" {\n\t\tt.Skip(\"current user is root or unknown\")\n\t}\n\tactivated := 0\n\tsetupForActivate(t, func(name string, argv []string, attr *os.ProcAttr) error {\n\t\tactivated++\n\t\treturn nil\n\t})\n\ttestutil.MustMkdirAll(\"d\")\n\tmakeHangingUNIXSocket(t, \"d\/sock\")\n\t\/\/ Remove write permission so that removing d\/sock will fail\n\tos.Chmod(\"d\", 0600)\n\tdefer os.Chmod(\"d\", 0700)\n\n\t_, err := Activate(io.Discard,\n\t\t&daemondefs.SpawnConfig{DbPath: \"db\", SockPath: \"d\/sock\", RunDir: \".\"})\n\tif err == nil {\n\t\tt.Errorf(\"got error nil, want non-nil\")\n\t}\n\tif activated != 0 {\n\t\tt.Errorf(\"got activated %v times, want 0\", activated)\n\t}\n}\n<commit_msg>pkg\/daemon: Fix activate_unix_test.go.<commit_after>\/\/go:build !windows && !plan9 && !js\n\/\/ +build !windows,!plan9,!js\n\npackage daemon\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"os\/user\"\n\t\"testing\"\n\n\t\"src.elv.sh\/pkg\/daemon\/daemondefs\"\n\t\"src.elv.sh\/pkg\/daemon\/internal\/api\"\n\t\"src.elv.sh\/pkg\/testutil\"\n)\n\nfunc TestActivate_InterruptsOutdatedServerAndSpawnsNewServer(t *testing.T) {\n\tactivated := 0\n\tsetupForActivate(t, func(name string, argv []string, attr *os.ProcAttr) error {\n\t\tstartServer(t, argv)\n\t\tactivated++\n\t\treturn nil\n\t})\n\tversion := api.Version - 1\n\toldServer := startServerOpts(t, cli(\"sock\", \"db\"), ServeOpts{Version: &version})\n\n\t_, err := Activate(io.Discard,\n\t\t&daemondefs.SpawnConfig{DbPath: \"db\", SockPath: \"sock\", RunDir: \".\"})\n\tif err != nil {\n\t\tt.Errorf(\"got error %v, want nil\", err)\n\t}\n\tif activated != 1 {\n\t\tt.Errorf(\"got activated %v times, want 1\", activated)\n\t}\n\toldServer.WaitQuit()\n}\n\nfunc TestActivate_FailsIfUnableToRemoveHangingSocket(t *testing.T) {\n\tif u, err := user.Current(); err != nil || u.Uid == \"0\" {\n\t\tt.Skip(\"current user is root or unknown\")\n\t}\n\tactivated := 0\n\tsetupForActivate(t, func(name string, argv []string, attr *os.ProcAttr) error {\n\t\tactivated++\n\t\treturn nil\n\t})\n\ttestutil.MustMkdirAll(\"d\")\n\tmakeHangingUNIXSocket(t, \"d\/sock\")\n\t\/\/ Remove write permission so that removing d\/sock will fail\n\tos.Chmod(\"d\", 0600)\n\tdefer os.Chmod(\"d\", 0700)\n\n\t_, err := Activate(io.Discard,\n\t\t&daemondefs.SpawnConfig{DbPath: \"db\", SockPath: \"d\/sock\", RunDir: \".\"})\n\tif err == nil {\n\t\tt.Errorf(\"got error nil, want non-nil\")\n\t}\n\tif activated != 0 {\n\t\tt.Errorf(\"got activated %v times, want 0\", activated)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage adapter\n\nimport (\n\t\"context\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/pingcap-incubator\/tidb-dashboard\/pkg\/apiserver\"\n\t\"github.com\/pingcap\/kvproto\/pkg\/pdpb\"\n\t\"go.uber.org\/zap\"\n\n\t\"github.com\/pingcap\/log\"\n\t\"github.com\/pingcap\/pd\/v4\/pkg\/logutil\"\n\t\"github.com\/pingcap\/pd\/v4\/server\"\n\t\"github.com\/pingcap\/pd\/v4\/server\/cluster\"\n)\n\nvar (\n\t\/\/ CheckInterval represents the time interval of running check.\n\tCheckInterval = time.Second\n)\n\n\/\/ Manager is used to control dashboard.\ntype Manager struct {\n\tctx context.Context\n\tcancel context.CancelFunc\n\twg sync.WaitGroup\n\n\tsrv *server.Server\n\tservice *apiserver.Service\n\tredirector *Redirector\n\n\tisLeader bool\n\tmembers []*pdpb.Member\n}\n\n\/\/ NewManager creates a new Manager.\nfunc NewManager(srv *server.Server, s *apiserver.Service, redirector *Redirector) *Manager {\n\tctx, cancel := context.WithCancel(srv.Context())\n\treturn &Manager{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tsrv: srv,\n\t\tservice: s,\n\t\tredirector: redirector,\n\t}\n}\n\n\/\/ Start monitoring the dynamic config and control the dashboard.\nfunc (m *Manager) Start() {\n\tm.wg.Add(1)\n\tgo m.serviceLoop()\n}\n\n\/\/ Stop monitoring the dynamic config and control the dashboard.\nfunc (m *Manager) Stop() {\n\tm.cancel()\n\tm.wg.Wait()\n\tlog.Info(\"exit dashboard loop\")\n}\n\nfunc (m *Manager) serviceLoop() {\n\tdefer logutil.LogPanic()\n\tdefer m.wg.Done()\n\n\tticker := time.NewTicker(CheckInterval)\n\tdefer ticker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-m.ctx.Done():\n\t\t\tm.stopService()\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tm.updateInfo()\n\t\t\tm.checkAddress()\n\t\t}\n\t}\n}\n\n\/\/ updateInfo updates information from the server.\nfunc (m *Manager) updateInfo() {\n\tif !m.srv.GetMember().IsLeader() {\n\t\tm.isLeader = false\n\t\tm.members = nil\n\t\tm.srv.GetPersistOptions().Reload(m.srv.GetStorage())\n\t\treturn\n\t}\n\n\tm.isLeader = true\n\n\tvar err error\n\tif m.members, err = cluster.GetMembers(m.srv.GetClient()); err != nil {\n\t\tlog.Warn(\"failed to get members\", zap.Error(err))\n\t\tm.members = nil\n\t\treturn\n\t}\n\n\tallHasClientUrls := true\n\tfor _, member := range m.members {\n\t\tif len(member.GetClientUrls()) == 0 {\n\t\t\tallHasClientUrls = false\n\t\t}\n\t}\n\tif !allHasClientUrls {\n\t\tlog.Warn(\"failed to get member client urls\")\n\t\tm.members = nil\n\t}\n}\n\n\/\/ checkDashboardAddress checks if the dashboard service needs to change due to dashboard address is changed.\nfunc (m *Manager) checkAddress() {\n\tdashboardAddress := m.srv.GetPersistOptions().GetDashboardAddress()\n\tswitch dashboardAddress {\n\tcase \"auto\":\n\t\tif m.isLeader && len(m.members) > 0 {\n\t\t\tm.setNewAddress()\n\t\t}\n\t\treturn\n\tcase \"none\":\n\t\tm.redirector.SetAddress(\"\")\n\t\tm.stopService()\n\t\treturn\n\tdefault:\n\t\tif _, err := url.Parse(dashboardAddress); err != nil {\n\t\t\tlog.Error(\"illegal dashboard address\", zap.String(\"address\", dashboardAddress))\n\t\t\treturn\n\t\t}\n\n\t\tif m.isLeader && m.needResetAddress(dashboardAddress) {\n\t\t\tm.setNewAddress()\n\t\t\treturn\n\t\t}\n\t}\n\n\tm.redirector.SetAddress(dashboardAddress)\n\n\tclientUrls := m.srv.GetMemberInfo().GetClientUrls()\n\tif len(clientUrls) > 0 && clientUrls[0] == dashboardAddress {\n\t\tm.startService()\n\t} else {\n\t\tm.stopService()\n\t}\n}\n\nfunc (m *Manager) needResetAddress(addr string) bool {\n\tif len(m.members) == 0 {\n\t\treturn false\n\t}\n\n\tfor _, member := range m.members {\n\t\tif member.GetClientUrls()[0] == addr {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc (m *Manager) setNewAddress() {\n\t\/\/ get new dashboard address\n\tmembers := m.members\n\tvar addr string\n\tswitch len(members) {\n\tcase 1:\n\t\taddr = members[0].GetClientUrls()[0]\n\tdefault:\n\t\taddr = members[0].GetClientUrls()[0]\n\t\tleaderID := m.srv.GetMemberInfo().MemberId\n\t\tsort.Slice(members, func(i, j int) bool { return members[i].GetMemberId() < members[j].GetMemberId() })\n\t\tfor _, member := range members {\n\t\t\tif member.MemberId != leaderID {\n\t\t\t\taddr = member.GetClientUrls()[0]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ set new dashboard address\n\tcfg := m.srv.GetPersistOptions().GetPDServerConfig().Clone()\n\tcfg.DashboardAddress = addr\n\tm.srv.SetPDServerConfig(*cfg)\n}\n\nfunc (m *Manager) startService() {\n\tif m.service.IsRunning() {\n\t\treturn\n\t}\n\tif err := m.service.Start(m.ctx); err != nil {\n\t\tlog.Error(\"Can not start dashboard server\", zap.Error(err))\n\t} else {\n\t\tlog.Info(\"Dashboard server is started\")\n\t}\n}\n\nfunc (m *Manager) stopService() {\n\tif !m.service.IsRunning() {\n\t\treturn\n\t}\n\tif err := m.service.Stop(context.Background()); err != nil {\n\t\tlog.Error(\"Stop dashboard server error\", zap.Error(err))\n\t} else {\n\t\tlog.Info(\"Dashboard server is stopped\")\n\t}\n}\n<commit_msg>dashboard: generate Context every time the manager starts (#2466)<commit_after>\/\/ Copyright 2020 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage adapter\n\nimport (\n\t\"context\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/pingcap-incubator\/tidb-dashboard\/pkg\/apiserver\"\n\t\"github.com\/pingcap\/kvproto\/pkg\/pdpb\"\n\t\"go.uber.org\/zap\"\n\n\t\"github.com\/pingcap\/log\"\n\t\"github.com\/pingcap\/pd\/v4\/pkg\/logutil\"\n\t\"github.com\/pingcap\/pd\/v4\/server\"\n\t\"github.com\/pingcap\/pd\/v4\/server\/cluster\"\n)\n\nvar (\n\t\/\/ CheckInterval represents the time interval of running check.\n\tCheckInterval = time.Second\n)\n\n\/\/ Manager is used to control dashboard.\ntype Manager struct {\n\tctx context.Context\n\tcancel context.CancelFunc\n\twg sync.WaitGroup\n\n\tsrv *server.Server\n\tservice *apiserver.Service\n\tredirector *Redirector\n\n\tisLeader bool\n\tmembers []*pdpb.Member\n}\n\n\/\/ NewManager creates a new Manager.\nfunc NewManager(srv *server.Server, s *apiserver.Service, redirector *Redirector) *Manager {\n\treturn &Manager{\n\t\tsrv: srv,\n\t\tservice: s,\n\t\tredirector: redirector,\n\t}\n}\n\n\/\/ Start monitoring the dynamic config and control the dashboard.\nfunc (m *Manager) Start() {\n\tm.ctx, m.cancel = context.WithCancel(m.srv.Context())\n\tm.wg.Add(1)\n\tm.isLeader = false\n\tm.members = nil\n\tgo m.serviceLoop()\n}\n\n\/\/ Stop monitoring the dynamic config and control the dashboard.\nfunc (m *Manager) Stop() {\n\tm.cancel()\n\tm.wg.Wait()\n\tlog.Info(\"exit dashboard loop\")\n}\n\nfunc (m *Manager) serviceLoop() {\n\tdefer logutil.LogPanic()\n\tdefer m.wg.Done()\n\n\tticker := time.NewTicker(CheckInterval)\n\tdefer ticker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-m.ctx.Done():\n\t\t\tm.stopService()\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tm.updateInfo()\n\t\t\tm.checkAddress()\n\t\t}\n\t}\n}\n\n\/\/ updateInfo updates information from the server.\nfunc (m *Manager) updateInfo() {\n\tif !m.srv.GetMember().IsLeader() {\n\t\tm.isLeader = false\n\t\tm.members = nil\n\t\tm.srv.GetPersistOptions().Reload(m.srv.GetStorage())\n\t\treturn\n\t}\n\n\tm.isLeader = true\n\n\tvar err error\n\tif m.members, err = cluster.GetMembers(m.srv.GetClient()); err != nil {\n\t\tlog.Warn(\"failed to get members\", zap.Error(err))\n\t\tm.members = nil\n\t\treturn\n\t}\n\n\tallHasClientUrls := true\n\tfor _, member := range m.members {\n\t\tif len(member.GetClientUrls()) == 0 {\n\t\t\tallHasClientUrls = false\n\t\t}\n\t}\n\tif !allHasClientUrls {\n\t\tlog.Warn(\"failed to get member client urls\")\n\t\tm.members = nil\n\t}\n}\n\n\/\/ checkDashboardAddress checks if the dashboard service needs to change due to dashboard address is changed.\nfunc (m *Manager) checkAddress() {\n\tdashboardAddress := m.srv.GetPersistOptions().GetDashboardAddress()\n\tswitch dashboardAddress {\n\tcase \"auto\":\n\t\tif m.isLeader && len(m.members) > 0 {\n\t\t\tm.setNewAddress()\n\t\t}\n\t\treturn\n\tcase \"none\":\n\t\tm.redirector.SetAddress(\"\")\n\t\tm.stopService()\n\t\treturn\n\tdefault:\n\t\tif _, err := url.Parse(dashboardAddress); err != nil {\n\t\t\tlog.Error(\"illegal dashboard address\", zap.String(\"address\", dashboardAddress))\n\t\t\treturn\n\t\t}\n\n\t\tif m.isLeader && m.needResetAddress(dashboardAddress) {\n\t\t\tm.setNewAddress()\n\t\t\treturn\n\t\t}\n\t}\n\n\tm.redirector.SetAddress(dashboardAddress)\n\n\tclientUrls := m.srv.GetMemberInfo().GetClientUrls()\n\tif len(clientUrls) > 0 && clientUrls[0] == dashboardAddress {\n\t\tm.startService()\n\t} else {\n\t\tm.stopService()\n\t}\n}\n\nfunc (m *Manager) needResetAddress(addr string) bool {\n\tif len(m.members) == 0 {\n\t\treturn false\n\t}\n\n\tfor _, member := range m.members {\n\t\tif member.GetClientUrls()[0] == addr {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc (m *Manager) setNewAddress() {\n\t\/\/ get new dashboard address\n\tmembers := m.members\n\tvar addr string\n\tswitch len(members) {\n\tcase 1:\n\t\taddr = members[0].GetClientUrls()[0]\n\tdefault:\n\t\taddr = members[0].GetClientUrls()[0]\n\t\tleaderID := m.srv.GetMemberInfo().MemberId\n\t\tsort.Slice(members, func(i, j int) bool { return members[i].GetMemberId() < members[j].GetMemberId() })\n\t\tfor _, member := range members {\n\t\t\tif member.MemberId != leaderID {\n\t\t\t\taddr = member.GetClientUrls()[0]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ set new dashboard address\n\tcfg := m.srv.GetPersistOptions().GetPDServerConfig().Clone()\n\tcfg.DashboardAddress = addr\n\tm.srv.SetPDServerConfig(*cfg)\n}\n\nfunc (m *Manager) startService() {\n\tif m.service.IsRunning() {\n\t\treturn\n\t}\n\tif err := m.service.Start(m.ctx); err != nil {\n\t\tlog.Error(\"Can not start dashboard server\", zap.Error(err))\n\t} else {\n\t\tlog.Info(\"Dashboard server is started\")\n\t}\n}\n\nfunc (m *Manager) stopService() {\n\tif !m.service.IsRunning() {\n\t\treturn\n\t}\n\tif err := m.service.Stop(context.Background()); err != nil {\n\t\tlog.Error(\"Stop dashboard server error\", zap.Error(err))\n\t} else {\n\t\tlog.Info(\"Dashboard server is stopped\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage download\n\nimport (\n\t\"context\"\n\t\"crypto\/md5\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"cloud.google.com\/go\/storage\"\n\t\"google.golang.org\/api\/option\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/detect\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/viper\"\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/driver\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/localpath\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/out\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/style\"\n)\n\nconst (\n\t\/\/ PreloadVersion is the current version of the preloaded tarball\n\t\/\/\n\t\/\/ NOTE: You may need to bump this version up when upgrading auxiliary docker images\n\tPreloadVersion = \"v14\"\n\t\/\/ PreloadBucket is the name of the GCS bucket where preloaded volume tarballs exist\n\tPreloadBucket = \"minikube-preloaded-volume-tarballs\"\n)\n\nvar (\n\tpreloadStates = make(map[string]map[string]bool)\n)\n\n\/\/ TarballName returns name of the tarball\nfunc TarballName(k8sVersion, containerRuntime string) string {\n\tif containerRuntime == \"crio\" {\n\t\tcontainerRuntime = \"cri-o\"\n\t}\n\tvar storageDriver string\n\tif containerRuntime == \"cri-o\" {\n\t\tstorageDriver = \"overlay\"\n\t} else {\n\t\tstorageDriver = \"overlay2\"\n\t}\n\tarch := detect.EffectiveArch()\n\treturn fmt.Sprintf(\"preloaded-images-k8s-%s-%s-%s-%s-%s.tar.lz4\", PreloadVersion, k8sVersion, containerRuntime, storageDriver, arch)\n}\n\n\/\/ returns the name of the checksum file\nfunc checksumName(k8sVersion, containerRuntime string) string {\n\treturn fmt.Sprintf(\"%s.checksum\", TarballName(k8sVersion, containerRuntime))\n}\n\n\/\/ returns target dir for all cached items related to preloading\nfunc targetDir() string {\n\treturn localpath.MakeMiniPath(\"cache\", \"preloaded-tarball\")\n}\n\n\/\/ PreloadChecksumPath returns the local path to the cached checksum file\nfunc PreloadChecksumPath(k8sVersion, containerRuntime string) string {\n\treturn filepath.Join(targetDir(), checksumName(k8sVersion, containerRuntime))\n}\n\n\/\/ TarballPath returns the local path to the cached preload tarball\nfunc TarballPath(k8sVersion, containerRuntime string) string {\n\treturn filepath.Join(targetDir(), TarballName(k8sVersion, containerRuntime))\n}\n\n\/\/ remoteTarballURL returns the URL for the remote tarball in GCS\nfunc remoteTarballURL(k8sVersion, containerRuntime string) string {\n\treturn fmt.Sprintf(\"https:\/\/%s\/%s\/%s\", downloadHost, PreloadBucket, TarballName(k8sVersion, containerRuntime))\n}\n\nfunc setPreloadState(k8sVersion, containerRuntime string, value bool) {\n\tcRuntimes, ok := preloadStates[k8sVersion]\n\tif !ok {\n\t\tcRuntimes = make(map[string]bool)\n\t\tpreloadStates[k8sVersion] = cRuntimes\n\t}\n\tcRuntimes[containerRuntime] = value\n}\n\nvar checkRemotePreloadExists = func(k8sVersion, containerRuntime string) bool {\n\turl := remoteTarballURL(k8sVersion, containerRuntime)\n\tresp, err := http.Head(url)\n\tif err != nil {\n\t\tklog.Warningf(\"%s fetch error: %v\", url, err)\n\t\treturn false\n\t}\n\n\t\/\/ note: err won't be set if it's a 404\n\tif resp.StatusCode != http.StatusOK {\n\t\tklog.Warningf(\"%s status code: %d\", url, resp.StatusCode)\n\t\treturn false\n\t}\n\n\tklog.Infof(\"Found remote preload: %s\", url)\n\treturn true\n}\n\n\/\/ PreloadExists returns true if there is a preloaded tarball that can be used\nfunc PreloadExists(k8sVersion, containerRuntime, driverName string, forcePreload ...bool) bool {\n\t\/\/ TODO (#8166): Get rid of the need for this and viper at all\n\tforce := false\n\tif len(forcePreload) > 0 {\n\t\tforce = forcePreload[0]\n\t}\n\n\t\/\/ TODO: debug why this func is being called two times\n\tklog.Infof(\"Checking if preload exists for k8s version %s and runtime %s\", k8sVersion, containerRuntime)\n\t\/\/ If `driverName` is BareMetal, there is no preload. Note: some uses of\n\t\/\/ `PreloadExists` assume that the driver is irrelevant unless BareMetal.\n\tif !driver.AllowsPreload(driverName) || !viper.GetBool(\"preload\") && !force {\n\t\treturn false\n\t}\n\n\t\/\/ If the preload existence is cached, just return that value.\n\tpreloadState, ok := preloadStates[k8sVersion][containerRuntime]\n\tif ok {\n\t\treturn preloadState\n\t}\n\n\t\/\/ Omit remote check if tarball exists locally\n\ttargetPath := TarballPath(k8sVersion, containerRuntime)\n\tif _, err := checkCache(targetPath); err == nil {\n\t\tklog.Infof(\"Found local preload: %s\", targetPath)\n\t\tsetPreloadState(k8sVersion, containerRuntime, true)\n\t\treturn true\n\t}\n\n\texistence := checkRemotePreloadExists(k8sVersion, containerRuntime)\n\tsetPreloadState(k8sVersion, containerRuntime, existence)\n\treturn existence\n}\n\nvar checkPreloadExists = PreloadExists\n\n\/\/ Preload caches the preloaded images tarball on the host machine\nfunc Preload(k8sVersion, containerRuntime, driverName string) error {\n\ttargetPath := TarballPath(k8sVersion, containerRuntime)\n\ttargetLock := targetPath + \".lock\"\n\n\treleaser, err := lockDownload(targetLock)\n\tif releaser != nil {\n\t\tdefer releaser.Release()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := checkCache(targetPath); err == nil {\n\t\tklog.Infof(\"Found %s in cache, skipping download\", targetPath)\n\t\treturn nil\n\t}\n\n\t\/\/ Make sure we support this k8s version\n\tif !checkPreloadExists(k8sVersion, containerRuntime, driverName) {\n\t\tklog.Infof(\"Preloaded tarball for k8s version %s does not exist\", k8sVersion)\n\t\treturn nil\n\t}\n\n\tout.Step(style.FileDownload, \"Downloading Kubernetes {{.version}} preload ...\", out.V{\"version\": k8sVersion})\n\turl := remoteTarballURL(k8sVersion, containerRuntime)\n\n\tchecksum, err := getChecksum(k8sVersion, containerRuntime)\n\tvar realPath string\n\tif err != nil {\n\t\tklog.Warningf(\"No checksum for preloaded tarball for k8s version %s: %v\", k8sVersion, err)\n\t\trealPath = targetPath\n\t\ttmp, err := os.CreateTemp(targetDir(), TarballName(k8sVersion, containerRuntime)+\".*\")\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"tempfile\")\n\t\t}\n\t\ttargetPath = tmp.Name()\n\t} else if checksum != nil {\n\t\t\/\/ add URL parameter for go-getter to automatically verify the checksum\n\t\turl += fmt.Sprintf(\"?checksum=md5:%s\", hex.EncodeToString(checksum))\n\t}\n\n\tif err := download(url, targetPath); err != nil {\n\t\treturn errors.Wrapf(err, \"download failed: %s\", url)\n\t}\n\n\tif err := ensureChecksumValid(k8sVersion, containerRuntime, targetPath, checksum); err != nil {\n\t\treturn err\n\t}\n\n\tif realPath != \"\" {\n\t\tklog.Infof(\"renaming tempfile to %s ...\", TarballName(k8sVersion, containerRuntime))\n\t\terr := os.Rename(targetPath, realPath)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"rename\")\n\t\t}\n\t}\n\n\t\/\/ If the download was successful, mark off that the preload exists in the cache.\n\tsetPreloadState(k8sVersion, containerRuntime, true)\n\treturn nil\n}\n\nfunc getStorageAttrs(name string) (*storage.ObjectAttrs, error) {\n\tctx := context.Background()\n\tclient, err := storage.NewClient(ctx, option.WithoutAuthentication())\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"getting storage client\")\n\t}\n\tattrs, err := client.Bucket(PreloadBucket).Object(name).Attrs(ctx)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"getting storage object\")\n\t}\n\treturn attrs, nil\n}\n\n\/\/ getChecksum returns the MD5 checksum of the preload tarball\nvar getChecksum = func(k8sVersion, containerRuntime string) ([]byte, error) {\n\tklog.Infof(\"getting checksum for %s ...\", TarballName(k8sVersion, containerRuntime))\n\tattrs, err := getStorageAttrs(TarballName(k8sVersion, containerRuntime))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn attrs.MD5, nil\n}\n\n\/\/ saveChecksumFile saves the checksum to a local file for later verification\nfunc saveChecksumFile(k8sVersion, containerRuntime string, checksum []byte) error {\n\tklog.Infof(\"saving checksum for %s ...\", TarballName(k8sVersion, containerRuntime))\n\treturn os.WriteFile(PreloadChecksumPath(k8sVersion, containerRuntime), checksum, 0o644)\n}\n\n\/\/ verifyChecksum returns true if the checksum of the local binary matches\n\/\/ the checksum of the remote binary\nfunc verifyChecksum(k8sVersion, containerRuntime, path string) error {\n\tklog.Infof(\"verifying checksumm of %s ...\", path)\n\t\/\/ get md5 checksum of tarball path\n\tcontents, err := os.ReadFile(path)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"reading tarball\")\n\t}\n\tchecksum := md5.Sum(contents)\n\n\tremoteChecksum, err := os.ReadFile(PreloadChecksumPath(k8sVersion, containerRuntime))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"reading checksum file\")\n\t}\n\n\t\/\/ create a slice of checksum, which is [16]byte\n\tif string(remoteChecksum) != string(checksum[:]) {\n\t\treturn fmt.Errorf(\"checksum of %s does not match remote checksum (%s != %s)\", path, string(remoteChecksum), string(checksum[:]))\n\t}\n\treturn nil\n}\n\n\/\/ ensureChecksumValid saves and verifies local binary checksum matches remote binary checksum\nvar ensureChecksumValid = func(k8sVersion, containerRuntime, targetPath string, checksum []byte) error {\n\tif err := saveChecksumFile(k8sVersion, containerRuntime, checksum); err != nil {\n\t\treturn errors.Wrap(err, \"saving checksum file\")\n\t}\n\n\tif err := verifyChecksum(k8sVersion, containerRuntime, targetPath); err != nil {\n\t\treturn errors.Wrap(err, \"verify\")\n\t}\n\n\treturn nil\n}\n\n\/\/ CleanUpOlderPreloads deletes preload files beloning to older minikube versions\n\/\/ checks the current preload version and then if the saved tar file is belongs to older minikube it will delete it\n\/\/ in case of failure only logs to the user\nfunc CleanUpOlderPreloads() {\n\tfiles, err := os.ReadDir(targetDir())\n\tif err != nil {\n\t\tklog.Warningf(\"Failed to list preload files: %v\", err)\n\t}\n\n\tfor _, file := range files {\n\t\tsplited := strings.Split(file.Name(), \"-\")\n\t\tif len(splited) < 4 {\n\t\t\tcontinue\n\t\t}\n\t\tver := splited[3]\n\t\tif ver != PreloadVersion {\n\t\t\tfn := path.Join(targetDir(), file.Name())\n\t\t\tklog.Infof(\"deleting older generation preload %s\", fn)\n\t\t\terr := os.Remove(fn)\n\t\t\tif err != nil {\n\t\t\t\tklog.Warningf(\"Failed to clean up older preload files, consider running `minikube delete --all --purge`\")\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>Bump PreloadVersion const to v15<commit_after>\/*\nCopyright 2020 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage download\n\nimport (\n\t\"context\"\n\t\"crypto\/md5\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"cloud.google.com\/go\/storage\"\n\t\"google.golang.org\/api\/option\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/detect\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/viper\"\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/driver\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/localpath\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/out\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/style\"\n)\n\nconst (\n\t\/\/ PreloadVersion is the current version of the preloaded tarball\n\t\/\/\n\t\/\/ NOTE: You may need to bump this version up when upgrading auxiliary docker images\n\tPreloadVersion = \"v15\"\n\t\/\/ PreloadBucket is the name of the GCS bucket where preloaded volume tarballs exist\n\tPreloadBucket = \"minikube-preloaded-volume-tarballs\"\n)\n\nvar (\n\tpreloadStates = make(map[string]map[string]bool)\n)\n\n\/\/ TarballName returns name of the tarball\nfunc TarballName(k8sVersion, containerRuntime string) string {\n\tif containerRuntime == \"crio\" {\n\t\tcontainerRuntime = \"cri-o\"\n\t}\n\tvar storageDriver string\n\tif containerRuntime == \"cri-o\" {\n\t\tstorageDriver = \"overlay\"\n\t} else {\n\t\tstorageDriver = \"overlay2\"\n\t}\n\tarch := detect.EffectiveArch()\n\treturn fmt.Sprintf(\"preloaded-images-k8s-%s-%s-%s-%s-%s.tar.lz4\", PreloadVersion, k8sVersion, containerRuntime, storageDriver, arch)\n}\n\n\/\/ returns the name of the checksum file\nfunc checksumName(k8sVersion, containerRuntime string) string {\n\treturn fmt.Sprintf(\"%s.checksum\", TarballName(k8sVersion, containerRuntime))\n}\n\n\/\/ returns target dir for all cached items related to preloading\nfunc targetDir() string {\n\treturn localpath.MakeMiniPath(\"cache\", \"preloaded-tarball\")\n}\n\n\/\/ PreloadChecksumPath returns the local path to the cached checksum file\nfunc PreloadChecksumPath(k8sVersion, containerRuntime string) string {\n\treturn filepath.Join(targetDir(), checksumName(k8sVersion, containerRuntime))\n}\n\n\/\/ TarballPath returns the local path to the cached preload tarball\nfunc TarballPath(k8sVersion, containerRuntime string) string {\n\treturn filepath.Join(targetDir(), TarballName(k8sVersion, containerRuntime))\n}\n\n\/\/ remoteTarballURL returns the URL for the remote tarball in GCS\nfunc remoteTarballURL(k8sVersion, containerRuntime string) string {\n\treturn fmt.Sprintf(\"https:\/\/%s\/%s\/%s\", downloadHost, PreloadBucket, TarballName(k8sVersion, containerRuntime))\n}\n\nfunc setPreloadState(k8sVersion, containerRuntime string, value bool) {\n\tcRuntimes, ok := preloadStates[k8sVersion]\n\tif !ok {\n\t\tcRuntimes = make(map[string]bool)\n\t\tpreloadStates[k8sVersion] = cRuntimes\n\t}\n\tcRuntimes[containerRuntime] = value\n}\n\nvar checkRemotePreloadExists = func(k8sVersion, containerRuntime string) bool {\n\turl := remoteTarballURL(k8sVersion, containerRuntime)\n\tresp, err := http.Head(url)\n\tif err != nil {\n\t\tklog.Warningf(\"%s fetch error: %v\", url, err)\n\t\treturn false\n\t}\n\n\t\/\/ note: err won't be set if it's a 404\n\tif resp.StatusCode != http.StatusOK {\n\t\tklog.Warningf(\"%s status code: %d\", url, resp.StatusCode)\n\t\treturn false\n\t}\n\n\tklog.Infof(\"Found remote preload: %s\", url)\n\treturn true\n}\n\n\/\/ PreloadExists returns true if there is a preloaded tarball that can be used\nfunc PreloadExists(k8sVersion, containerRuntime, driverName string, forcePreload ...bool) bool {\n\t\/\/ TODO (#8166): Get rid of the need for this and viper at all\n\tforce := false\n\tif len(forcePreload) > 0 {\n\t\tforce = forcePreload[0]\n\t}\n\n\t\/\/ TODO: debug why this func is being called two times\n\tklog.Infof(\"Checking if preload exists for k8s version %s and runtime %s\", k8sVersion, containerRuntime)\n\t\/\/ If `driverName` is BareMetal, there is no preload. Note: some uses of\n\t\/\/ `PreloadExists` assume that the driver is irrelevant unless BareMetal.\n\tif !driver.AllowsPreload(driverName) || !viper.GetBool(\"preload\") && !force {\n\t\treturn false\n\t}\n\n\t\/\/ If the preload existence is cached, just return that value.\n\tpreloadState, ok := preloadStates[k8sVersion][containerRuntime]\n\tif ok {\n\t\treturn preloadState\n\t}\n\n\t\/\/ Omit remote check if tarball exists locally\n\ttargetPath := TarballPath(k8sVersion, containerRuntime)\n\tif _, err := checkCache(targetPath); err == nil {\n\t\tklog.Infof(\"Found local preload: %s\", targetPath)\n\t\tsetPreloadState(k8sVersion, containerRuntime, true)\n\t\treturn true\n\t}\n\n\texistence := checkRemotePreloadExists(k8sVersion, containerRuntime)\n\tsetPreloadState(k8sVersion, containerRuntime, existence)\n\treturn existence\n}\n\nvar checkPreloadExists = PreloadExists\n\n\/\/ Preload caches the preloaded images tarball on the host machine\nfunc Preload(k8sVersion, containerRuntime, driverName string) error {\n\ttargetPath := TarballPath(k8sVersion, containerRuntime)\n\ttargetLock := targetPath + \".lock\"\n\n\treleaser, err := lockDownload(targetLock)\n\tif releaser != nil {\n\t\tdefer releaser.Release()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := checkCache(targetPath); err == nil {\n\t\tklog.Infof(\"Found %s in cache, skipping download\", targetPath)\n\t\treturn nil\n\t}\n\n\t\/\/ Make sure we support this k8s version\n\tif !checkPreloadExists(k8sVersion, containerRuntime, driverName) {\n\t\tklog.Infof(\"Preloaded tarball for k8s version %s does not exist\", k8sVersion)\n\t\treturn nil\n\t}\n\n\tout.Step(style.FileDownload, \"Downloading Kubernetes {{.version}} preload ...\", out.V{\"version\": k8sVersion})\n\turl := remoteTarballURL(k8sVersion, containerRuntime)\n\n\tchecksum, err := getChecksum(k8sVersion, containerRuntime)\n\tvar realPath string\n\tif err != nil {\n\t\tklog.Warningf(\"No checksum for preloaded tarball for k8s version %s: %v\", k8sVersion, err)\n\t\trealPath = targetPath\n\t\ttmp, err := os.CreateTemp(targetDir(), TarballName(k8sVersion, containerRuntime)+\".*\")\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"tempfile\")\n\t\t}\n\t\ttargetPath = tmp.Name()\n\t} else if checksum != nil {\n\t\t\/\/ add URL parameter for go-getter to automatically verify the checksum\n\t\turl += fmt.Sprintf(\"?checksum=md5:%s\", hex.EncodeToString(checksum))\n\t}\n\n\tif err := download(url, targetPath); err != nil {\n\t\treturn errors.Wrapf(err, \"download failed: %s\", url)\n\t}\n\n\tif err := ensureChecksumValid(k8sVersion, containerRuntime, targetPath, checksum); err != nil {\n\t\treturn err\n\t}\n\n\tif realPath != \"\" {\n\t\tklog.Infof(\"renaming tempfile to %s ...\", TarballName(k8sVersion, containerRuntime))\n\t\terr := os.Rename(targetPath, realPath)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"rename\")\n\t\t}\n\t}\n\n\t\/\/ If the download was successful, mark off that the preload exists in the cache.\n\tsetPreloadState(k8sVersion, containerRuntime, true)\n\treturn nil\n}\n\nfunc getStorageAttrs(name string) (*storage.ObjectAttrs, error) {\n\tctx := context.Background()\n\tclient, err := storage.NewClient(ctx, option.WithoutAuthentication())\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"getting storage client\")\n\t}\n\tattrs, err := client.Bucket(PreloadBucket).Object(name).Attrs(ctx)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"getting storage object\")\n\t}\n\treturn attrs, nil\n}\n\n\/\/ getChecksum returns the MD5 checksum of the preload tarball\nvar getChecksum = func(k8sVersion, containerRuntime string) ([]byte, error) {\n\tklog.Infof(\"getting checksum for %s ...\", TarballName(k8sVersion, containerRuntime))\n\tattrs, err := getStorageAttrs(TarballName(k8sVersion, containerRuntime))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn attrs.MD5, nil\n}\n\n\/\/ saveChecksumFile saves the checksum to a local file for later verification\nfunc saveChecksumFile(k8sVersion, containerRuntime string, checksum []byte) error {\n\tklog.Infof(\"saving checksum for %s ...\", TarballName(k8sVersion, containerRuntime))\n\treturn os.WriteFile(PreloadChecksumPath(k8sVersion, containerRuntime), checksum, 0o644)\n}\n\n\/\/ verifyChecksum returns true if the checksum of the local binary matches\n\/\/ the checksum of the remote binary\nfunc verifyChecksum(k8sVersion, containerRuntime, path string) error {\n\tklog.Infof(\"verifying checksumm of %s ...\", path)\n\t\/\/ get md5 checksum of tarball path\n\tcontents, err := os.ReadFile(path)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"reading tarball\")\n\t}\n\tchecksum := md5.Sum(contents)\n\n\tremoteChecksum, err := os.ReadFile(PreloadChecksumPath(k8sVersion, containerRuntime))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"reading checksum file\")\n\t}\n\n\t\/\/ create a slice of checksum, which is [16]byte\n\tif string(remoteChecksum) != string(checksum[:]) {\n\t\treturn fmt.Errorf(\"checksum of %s does not match remote checksum (%s != %s)\", path, string(remoteChecksum), string(checksum[:]))\n\t}\n\treturn nil\n}\n\n\/\/ ensureChecksumValid saves and verifies local binary checksum matches remote binary checksum\nvar ensureChecksumValid = func(k8sVersion, containerRuntime, targetPath string, checksum []byte) error {\n\tif err := saveChecksumFile(k8sVersion, containerRuntime, checksum); err != nil {\n\t\treturn errors.Wrap(err, \"saving checksum file\")\n\t}\n\n\tif err := verifyChecksum(k8sVersion, containerRuntime, targetPath); err != nil {\n\t\treturn errors.Wrap(err, \"verify\")\n\t}\n\n\treturn nil\n}\n\n\/\/ CleanUpOlderPreloads deletes preload files beloning to older minikube versions\n\/\/ checks the current preload version and then if the saved tar file is belongs to older minikube it will delete it\n\/\/ in case of failure only logs to the user\nfunc CleanUpOlderPreloads() {\n\tfiles, err := os.ReadDir(targetDir())\n\tif err != nil {\n\t\tklog.Warningf(\"Failed to list preload files: %v\", err)\n\t}\n\n\tfor _, file := range files {\n\t\tsplited := strings.Split(file.Name(), \"-\")\n\t\tif len(splited) < 4 {\n\t\t\tcontinue\n\t\t}\n\t\tver := splited[3]\n\t\tif ver != PreloadVersion {\n\t\t\tfn := path.Join(targetDir(), file.Name())\n\t\t\tklog.Infof(\"deleting older generation preload %s\", fn)\n\t\t\terr := os.Remove(fn)\n\t\t\tif err != nil {\n\t\t\t\tklog.Warningf(\"Failed to clean up older preload files, consider running `minikube delete --all --purge`\")\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage shell\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestGenerateUsageHint(t *testing.T) {\n\tvar testCases = []struct {\n\t\tec EnvConfig\n\t\texpected string\n\t}{\n\t\t{EnvConfig{\"\"}, `# foo\n# eval $(bar)`},\n\t\t{EnvConfig{\"powershell\"}, `# foo\n# & bar | Invoke-Expression`},\n\t\t{EnvConfig{\"bash\"}, `# foo\n# eval $(bar)`},\n\t\t{EnvConfig{\"powershell\"}, `# foo\n# & bar | Invoke-Expression`},\n\t\t{EnvConfig{\"emacs\"}, `;; foo\n;; (with-temp-buffer (shell-command \"bar\" (current-buffer)) (eval-buffer))`},\n\t\t{EnvConfig{\"fish\"}, `# foo\n# bar | source`},\n\t\t{EnvConfig{\"none\"}, `# foo\n# eval $(bar)`},\n\t}\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tt.Run(tc.ec.Shell, func(t *testing.T) {\n\t\t\tgot := strings.TrimSpace(generateUsageHint(tc.ec, \"foo\", \"bar\"))\n\t\t\texpected := strings.TrimSpace(tc.expected)\n\t\t\tif got != expected {\n\t\t\t\tt.Errorf(\"Expected '%v' but got '%v'\", expected, got)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCfgSet(t *testing.T) {\n\tvar testCases = []struct {\n\t\tplz, cmd string\n\t\tec EnvConfig\n\t\texpected string\n\t}{\n\t\t{\"\", \"eval\", EnvConfig{\"\"}, `\"`},\n\t\t{\"\", \"eval\", EnvConfig{\"bash\"}, `\"`},\n\t\t{\"\", \"eval\", EnvConfig{\"powershell\"}, `\"`},\n\t\t{\"\", \"eval\", EnvConfig{\"cmd\"}, ``},\n\t\t{\"\", \"eval\", EnvConfig{\"emacs\"}, `\")`},\n\t\t{\"\", \"eval\", EnvConfig{\"none\"}, ``},\n\t\t{\"\", \"eval\", EnvConfig{\"fish\"}, `\";`},\n\t}\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tt.Run(tc.ec.Shell, func(t *testing.T) {\n\t\t\tconf := CfgSet(tc.ec, tc.plz, tc.cmd)\n\t\t\texpected := strings.TrimSpace(tc.expected)\n\t\t\tgot := strings.TrimSpace(conf.Suffix)\n\t\t\tif expected != got {\n\t\t\t\tt.Errorf(\"Expected suffix '%v' but got '%v'\", expected, got)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestUnsetScript(t *testing.T) {\n\tvar testCases = []struct {\n\t\tvars []string\n\t\tec EnvConfig\n\t\texpected string\n\t}{\n\t\t{[]string{\"baz\", \"bar\"}, EnvConfig{\"\"}, `unset baz bar`},\n\t\t{[]string{\"baz\", \"bar\"}, EnvConfig{\"bash\"}, `unset baz bar`},\n\t\t{[]string{\"baz\", \"bar\"}, EnvConfig{\"powershell\"}, `Remove-Item Env:\\\\baz Env:\\\\bar`},\n\t\t{[]string{\"baz\", \"bar\"}, EnvConfig{\"cmd\"}, `SET baz=\nSET bar=`},\n\t\t{[]string{\"baz\", \"bar\"}, EnvConfig{\"fish\"}, `set -e baz;\nset -e bar;`},\n\t\t{[]string{\"baz\", \"bar\"}, EnvConfig{\"emacs\"}, `(setenv \"baz\" nil)\n(setenv \"bar\" nil)`},\n\t\t{[]string{\"baz\", \"bar\"}, EnvConfig{\"none\"}, `baz bar`},\n\t}\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tt.Run(tc.ec.Shell, func(t *testing.T) {\n\t\t\tvar b bytes.Buffer\n\n\t\t\tif err := UnsetScript(tc.ec, &b, tc.vars); err != nil {\n\t\t\t\tt.Fatalf(\"Unexpected error when unseting script happen: %v\", err)\n\t\t\t} else {\n\t\t\t\twrittenMessage := strings.TrimSpace(b.String())\n\t\t\t\texpected := strings.TrimSpace(tc.expected)\n\t\t\t\tif writtenMessage != expected {\n\t\t\t\t\tt.Fatalf(\"Expected '%v' but got '%v' \", tc.expected, writtenMessage)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestDetectSet(t *testing.T) {\n\torgShellEnv := os.Getenv(\"SHELL\")\n\tdefer os.Setenv(\"SHELL\", orgShellEnv)\n\n\tos.Setenv(\"SHELL\", \"\/bin\/bash\")\n\tif s, err := Detect(); err != nil {\n\t\tt.Fatalf(\"unexpected error: '%v' during shell detection. Returned shell: %s\", err, s)\n\t} else if s == \"\" {\n\t\tt.Fatalf(\"Detected shell expected to be non empty string\")\n\t}\n}\n\nfunc TestDetectUnset(t *testing.T) {\n\torgShellEnv := os.Getenv(\"SHELL\")\n\tdefer os.Setenv(\"SHELL\", orgShellEnv)\n\n\tos.Unsetenv(\"SHELL\")\n\tif s, err := Detect(); err != nil {\n\t\tt.Fatalf(\"unexpected error: '%v' during shell detection. Returned shell: %s\", err, s)\n\t} else if s == \"\" {\n\t\tt.Fatalf(\"Detected shell expected to be non empty string\")\n\t}\n}\n\nfunc TestSetScript(t *testing.T) {\n\tec := EnvConfig{\"bash\"}\n\tvar w bytes.Buffer\n\tif err := SetScript(ec, &w, \"foo\", nil); err != nil {\n\t\tt.Fatalf(\"Unexpected error: '%v' during Setting script\", err)\n\t}\n\tif w.String() != \"foo\" {\n\t\tt.Fatalf(\"Expected foo writed by SetScript, but got '%v'\", w.String())\n\t}\n\tif ec.Shell == \"\" {\n\t\tt.Fatalf(\"Expected no empty shell\")\n\t}\n}\n<commit_msg>Fix shell test to reflect the fact that shell=none no longer has a usage msg.<commit_after>\/*\nCopyright 2020 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage shell\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestGenerateUsageHint(t *testing.T) {\n\tvar testCases = []struct {\n\t\tec EnvConfig\n\t\texpected string\n\t}{\n\t\t{EnvConfig{\"\"}, `# foo\n# eval $(bar)`},\n\t\t{EnvConfig{\"powershell\"}, `# foo\n# & bar | Invoke-Expression`},\n\t\t{EnvConfig{\"bash\"}, `# foo\n# eval $(bar)`},\n\t\t{EnvConfig{\"powershell\"}, `# foo\n# & bar | Invoke-Expression`},\n\t\t{EnvConfig{\"emacs\"}, `;; foo\n;; (with-temp-buffer (shell-command \"bar\" (current-buffer)) (eval-buffer))`},\n\t\t{EnvConfig{\"fish\"}, `# foo\n# bar | source`},\n\t\t{EnvConfig{\"none\"}, ``},\n\t}\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tt.Run(tc.ec.Shell, func(t *testing.T) {\n\t\t\tgot := strings.TrimSpace(generateUsageHint(tc.ec, \"foo\", \"bar\"))\n\t\t\texpected := strings.TrimSpace(tc.expected)\n\t\t\tif got != expected {\n\t\t\t\tt.Errorf(\"Expected '%v' but got '%v'\", expected, got)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCfgSet(t *testing.T) {\n\tvar testCases = []struct {\n\t\tplz, cmd string\n\t\tec EnvConfig\n\t\texpected string\n\t}{\n\t\t{\"\", \"eval\", EnvConfig{\"\"}, `\"`},\n\t\t{\"\", \"eval\", EnvConfig{\"bash\"}, `\"`},\n\t\t{\"\", \"eval\", EnvConfig{\"powershell\"}, `\"`},\n\t\t{\"\", \"eval\", EnvConfig{\"cmd\"}, ``},\n\t\t{\"\", \"eval\", EnvConfig{\"emacs\"}, `\")`},\n\t\t{\"\", \"eval\", EnvConfig{\"none\"}, ``},\n\t\t{\"\", \"eval\", EnvConfig{\"fish\"}, `\";`},\n\t}\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tt.Run(tc.ec.Shell, func(t *testing.T) {\n\t\t\tconf := CfgSet(tc.ec, tc.plz, tc.cmd)\n\t\t\texpected := strings.TrimSpace(tc.expected)\n\t\t\tgot := strings.TrimSpace(conf.Suffix)\n\t\t\tif expected != got {\n\t\t\t\tt.Errorf(\"Expected suffix '%v' but got '%v'\", expected, got)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestUnsetScript(t *testing.T) {\n\tvar testCases = []struct {\n\t\tvars []string\n\t\tec EnvConfig\n\t\texpected string\n\t}{\n\t\t{[]string{\"baz\", \"bar\"}, EnvConfig{\"\"}, `unset baz bar`},\n\t\t{[]string{\"baz\", \"bar\"}, EnvConfig{\"bash\"}, `unset baz bar`},\n\t\t{[]string{\"baz\", \"bar\"}, EnvConfig{\"powershell\"}, `Remove-Item Env:\\\\baz Env:\\\\bar`},\n\t\t{[]string{\"baz\", \"bar\"}, EnvConfig{\"cmd\"}, `SET baz=\nSET bar=`},\n\t\t{[]string{\"baz\", \"bar\"}, EnvConfig{\"fish\"}, `set -e baz;\nset -e bar;`},\n\t\t{[]string{\"baz\", \"bar\"}, EnvConfig{\"emacs\"}, `(setenv \"baz\" nil)\n(setenv \"bar\" nil)`},\n\t\t{[]string{\"baz\", \"bar\"}, EnvConfig{\"none\"}, `baz bar`},\n\t}\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tt.Run(tc.ec.Shell, func(t *testing.T) {\n\t\t\tvar b bytes.Buffer\n\n\t\t\tif err := UnsetScript(tc.ec, &b, tc.vars); err != nil {\n\t\t\t\tt.Fatalf(\"Unexpected error when unseting script happen: %v\", err)\n\t\t\t} else {\n\t\t\t\twrittenMessage := strings.TrimSpace(b.String())\n\t\t\t\texpected := strings.TrimSpace(tc.expected)\n\t\t\t\tif writtenMessage != expected {\n\t\t\t\t\tt.Fatalf(\"Expected '%v' but got '%v' \", tc.expected, writtenMessage)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestDetectSet(t *testing.T) {\n\torgShellEnv := os.Getenv(\"SHELL\")\n\tdefer os.Setenv(\"SHELL\", orgShellEnv)\n\n\tos.Setenv(\"SHELL\", \"\/bin\/bash\")\n\tif s, err := Detect(); err != nil {\n\t\tt.Fatalf(\"unexpected error: '%v' during shell detection. Returned shell: %s\", err, s)\n\t} else if s == \"\" {\n\t\tt.Fatalf(\"Detected shell expected to be non empty string\")\n\t}\n}\n\nfunc TestDetectUnset(t *testing.T) {\n\torgShellEnv := os.Getenv(\"SHELL\")\n\tdefer os.Setenv(\"SHELL\", orgShellEnv)\n\n\tos.Unsetenv(\"SHELL\")\n\tif s, err := Detect(); err != nil {\n\t\tt.Fatalf(\"unexpected error: '%v' during shell detection. Returned shell: %s\", err, s)\n\t} else if s == \"\" {\n\t\tt.Fatalf(\"Detected shell expected to be non empty string\")\n\t}\n}\n\nfunc TestSetScript(t *testing.T) {\n\tec := EnvConfig{\"bash\"}\n\tvar w bytes.Buffer\n\tif err := SetScript(ec, &w, \"foo\", nil); err != nil {\n\t\tt.Fatalf(\"Unexpected error: '%v' during Setting script\", err)\n\t}\n\tif w.String() != \"foo\" {\n\t\tt.Fatalf(\"Expected foo writed by SetScript, but got '%v'\", w.String())\n\t}\n\tif ec.Shell == \"\" {\n\t\tt.Fatalf(\"Expected no empty shell\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gotest\n\nimport (\n\t\"flag\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/jstemmer\/go-junit-report\/v2\/pkg\/gtr\"\n\n\t\"github.com\/google\/go-cmp\/cmp\"\n)\n\nvar matchTest = flag.String(\"match\", \"\", \"only test testdata matching this pattern\")\n\nvar tests = []struct {\n\tname string\n\tinput string\n\tevents []gtr.Event\n}{\n\t{\n\t\t\"run\",\n\t\tinp(\"=== RUN TestOne\",\n\t\t\t\"=== RUN TestTwo\/Subtest\",\n\t\t),\n\t\t[]gtr.Event{\n\t\t\t{Type: \"run_test\", Name: \"TestOne\"},\n\t\t\t{Type: \"run_test\", Name: \"TestTwo\/Subtest\"},\n\t\t},\n\t},\n\t{\n\t\t\"pause\",\n\t\t\"=== PAUSE TestOne\",\n\t\t[]gtr.Event{\n\t\t\t{Type: \"pause_test\", Name: \"TestOne\"},\n\t\t},\n\t},\n\t{\n\t\t\"cont\",\n\t\t\"=== CONT TestOne\",\n\t\t[]gtr.Event{\n\t\t\t{Type: \"cont_test\", Name: \"TestOne\"},\n\t\t},\n\t},\n\t{\n\t\t\"end\",\n\t\tinp(\"--- PASS: TestOne (12.34 seconds)\",\n\t\t\t\" --- SKIP: TestOne\/Subtest (0.00s)\",\n\t\t\t\" --- FAIL: TestOne\/Subtest\/#01 (0.35s)\",\n\t\t\t\"some text--- PASS: TestTwo (0.06 seconds)\",\n\t\t),\n\t\t[]gtr.Event{\n\t\t\t{Type: \"end_test\", Name: \"TestOne\", Result: \"PASS\", Duration: 12_340 * time.Millisecond},\n\t\t\t{Type: \"end_test\", Name: \"TestOne\/Subtest\", Result: \"SKIP\", Indent: 1},\n\t\t\t{Type: \"end_test\", Name: \"TestOne\/Subtest\/#01\", Result: \"FAIL\", Duration: 350 * time.Millisecond, Indent: 2},\n\t\t\t{Type: \"output\", Data: \"some text\"},\n\t\t\t{Type: \"end_test\", Name: \"TestTwo\", Result: \"PASS\", Duration: 60 * time.Millisecond},\n\t\t},\n\t},\n\t{\n\t\t\"status\",\n\t\tinp(\"PASS\",\n\t\t\t\"FAIL\",\n\t\t\t\"SKIP\",\n\t\t),\n\t\t[]gtr.Event{\n\t\t\t{Type: \"status\", Result: \"PASS\"},\n\t\t\t{Type: \"status\", Result: \"FAIL\"},\n\t\t\t{Type: \"status\", Result: \"SKIP\"},\n\t\t},\n\t},\n\t{\n\t\t\"summary\",\n\t\tinp(\"ok package\/name\/ok 0.100s\",\n\t\t\t\"FAIL package\/name\/failing [build failed]\",\n\t\t\t\"FAIL package\/other\/failing [setup failed]\",\n\t\t\t\"ok package\/other (cached)\",\n\t\t\t\"ok \tpackage\/name 0.400s coverage: 10.0% of statements\",\n\t\t\t\"ok \tpackage\/name 4.200s coverage: 99.8% of statements in fmt, encoding\/xml\",\n\t\t\t\"? \tpackage\/name\t[no test files]\",\n\t\t\t\"ok \tpackage\/name\t0.001s [no tests to run]\",\n\t\t\t\"ok \tpackage\/name\t(cached) [no tests to run]\",\n\t\t),\n\t\t[]gtr.Event{\n\t\t\t{Type: \"summary\", Name: \"package\/name\/ok\", Result: \"ok\", Duration: 100 * time.Millisecond},\n\t\t\t{Type: \"summary\", Name: \"package\/name\/failing\", Result: \"FAIL\", Data: \"[build failed]\"},\n\t\t\t{Type: \"summary\", Name: \"package\/other\/failing\", Result: \"FAIL\", Data: \"[setup failed]\"},\n\t\t\t{Type: \"summary\", Name: \"package\/other\", Result: \"ok\", Data: \"(cached)\"},\n\t\t\t{Type: \"summary\", Name: \"package\/name\", Result: \"ok\", Duration: 400 * time.Millisecond, CovPct: 10},\n\t\t\t{Type: \"summary\", Name: \"package\/name\", Result: \"ok\", Duration: 4200 * time.Millisecond, CovPct: 99.8, CovPackages: []string{\"fmt\", \"encoding\/xml\"}},\n\t\t\t{Type: \"summary\", Name: \"package\/name\", Result: \"?\", Data: \"[no test files]\"},\n\t\t\t{Type: \"summary\", Name: \"package\/name\", Result: \"ok\", Duration: 1 * time.Millisecond, Data: \"[no tests to run]\"},\n\t\t\t{Type: \"summary\", Name: \"package\/name\", Result: \"ok\", Data: \"(cached) [no tests to run]\"},\n\t\t},\n\t},\n\t{\n\t\t\"coverage\",\n\t\tinp(\"coverage: 10% of statements\",\n\t\t\t\"coverage: 10% of statements in fmt, encoding\/xml\",\n\t\t\t\"coverage: 13.37% of statements\",\n\t\t\t\"coverage: 99.8% of statements in fmt, encoding\/xml\",\n\t\t),\n\t\t[]gtr.Event{\n\t\t\t{Type: \"coverage\", CovPct: 10},\n\t\t\t{Type: \"coverage\", CovPct: 10, CovPackages: []string{\"fmt\", \"encoding\/xml\"}},\n\t\t\t{Type: \"coverage\", CovPct: 13.37},\n\t\t\t{Type: \"coverage\", CovPct: 99.8, CovPackages: []string{\"fmt\", \"encoding\/xml\"}},\n\t\t},\n\t},\n\t{\n\t\t\"benchmark\",\n\t\tinp(\"BenchmarkOne-8 2000000\t 604 ns\/op\",\n\t\t\t\"BenchmarkTwo-16 30000\t52568 ns\/op\t24879 B\/op\t494 allocs\/op\",\n\t\t\t\"BenchmarkThree 2000000000\t 0.26 ns\/op\",\n\t\t\t\"BenchmarkFour-8 \t 10000\t 104427 ns\/op\t 95.76 MB\/s\t 40629 B\/op\t 5 allocs\/op\",\n\t\t),\n\t\t[]gtr.Event{\n\t\t\t{Type: \"benchmark\", Name: \"BenchmarkOne\", Iterations: 2_000_000, NsPerOp: 604},\n\t\t\t{Type: \"benchmark\", Name: \"BenchmarkTwo\", Iterations: 30_000, NsPerOp: 52_568, BytesPerOp: 24_879, AllocsPerOp: 494},\n\t\t\t{Type: \"benchmark\", Name: \"BenchmarkThree\", Iterations: 2_000_000_000, NsPerOp: 0.26},\n\t\t\t{Type: \"benchmark\", Name: \"BenchmarkFour\", Iterations: 10_000, NsPerOp: 104_427, MBPerSec: 95.76, BytesPerOp: 40_629, AllocsPerOp: 5},\n\t\t},\n\t},\n\t{\n\t\t\"build output\",\n\t\tinp(\"# package\/name\/failing1\",\n\t\t\t\"# package\/name\/failing2 [package\/name\/failing2.test]\",\n\t\t),\n\t\t[]gtr.Event{\n\t\t\t{Type: \"build_output\", Name: \"package\/name\/failing1\"},\n\t\t\t{Type: \"build_output\", Name: \"package\/name\/failing2\"},\n\t\t},\n\t},\n\t{\n\t\t\"output\",\n\t\tinp(\"single line stdout\",\n\t\t\t\"# some more output\",\n\t\t\t\"\\tfile_test.go:11: Error message\",\n\t\t\t\"\\tfile_test.go:12: Longer\",\n\t\t\t\"\\t\\terror\",\n\t\t\t\"\\t\\tmessage.\",\n\t\t),\n\t\t[]gtr.Event{\n\t\t\t{Type: \"output\", Data: \"single line stdout\"},\n\t\t\t{Type: \"output\", Data: \"# some more output\"},\n\t\t\t{Type: \"output\", Data: \"\\tfile_test.go:11: Error message\"},\n\t\t\t{Type: \"output\", Data: \"\\tfile_test.go:12: Longer\"},\n\t\t\t{Type: \"output\", Data: \"\\t\\terror\"},\n\t\t\t{Type: \"output\", Data: \"\\t\\tmessage.\"},\n\t\t},\n\t},\n}\n\nfunc TestParse(t *testing.T) {\n\tmatchRegex := compileMatch(t)\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tif !matchRegex.MatchString(test.name) || test.input == \"\" {\n\t\t\t\tt.SkipNow()\n\t\t\t}\n\t\t\ttestParse(t, test.name, test.input, test.events)\n\t\t})\n\t}\n}\n\nfunc testParse(t *testing.T, name, input string, want []gtr.Event) {\n\tgot, err := Parse(strings.NewReader(input))\n\tif err != nil {\n\t\tt.Errorf(\"Parse(%s) error: %v\", name, err)\n\t\treturn\n\t}\n\n\tif diff := cmp.Diff(got, want); diff != \"\" {\n\t\tt.Errorf(\"Parse returned unexpected events, input:\\n%s\\ndiff (-got, +want):\\n%v\", input, diff)\n\t}\n}\n\nfunc compileMatch(t *testing.T) *regexp.Regexp {\n\trx, err := regexp.Compile(*matchTest)\n\tif err != nil {\n\t\tt.Fatalf(\"Error compiling -match flag %q: %v\", *matchTest, err)\n\t}\n\treturn rx\n}\n\nfunc inp(lines ...string) string {\n\treturn strings.Join(lines, \"\\n\")\n}\n<commit_msg>parser\/gotest: refactor tests to run one test per input line<commit_after>package gotest\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/jstemmer\/go-junit-report\/v2\/pkg\/gtr\"\n\n\t\"github.com\/google\/go-cmp\/cmp\"\n)\n\ntype parseLineTest struct {\n\tinput string\n\tevents interface{}\n}\n\nvar parseLineTests = []parseLineTest{\n\t{\n\t\t\"=== RUN TestOne\",\n\t\tgtr.Event{Type: \"run_test\", Name: \"TestOne\"},\n\t},\n\t{\n\t\t\"=== RUN TestTwo\/Subtest\",\n\t\tgtr.Event{Type: \"run_test\", Name: \"TestTwo\/Subtest\"},\n\t},\n\t{\n\t\t\"=== PAUSE TestOne\",\n\t\tgtr.Event{Type: \"pause_test\", Name: \"TestOne\"},\n\t},\n\t{\n\t\t\"=== CONT TestOne\",\n\t\tgtr.Event{Type: \"cont_test\", Name: \"TestOne\"},\n\t},\n\t{\n\t\t\"--- PASS: TestOne (12.34 seconds)\",\n\t\tgtr.Event{Type: \"end_test\", Name: \"TestOne\", Result: \"PASS\", Duration: 12_340 * time.Millisecond},\n\t},\n\t{\n\t\t\" --- SKIP: TestOne\/Subtest (0.00s)\",\n\t\tgtr.Event{Type: \"end_test\", Name: \"TestOne\/Subtest\", Result: \"SKIP\", Indent: 1},\n\t},\n\t{\n\t\t\" --- FAIL: TestOne\/Subtest\/#01 (0.35s)\",\n\t\tgtr.Event{Type: \"end_test\", Name: \"TestOne\/Subtest\/#01\", Result: \"FAIL\", Duration: 350 * time.Millisecond, Indent: 2},\n\t},\n\t{\n\t\t\"some text--- PASS: TestTwo (0.06 seconds)\",\n\t\t[]gtr.Event{\n\t\t\t{Type: \"output\", Data: \"some text\"},\n\t\t\t{Type: \"end_test\", Name: \"TestTwo\", Result: \"PASS\", Duration: 60 * time.Millisecond},\n\t\t},\n\t},\n\t{\n\t\t\"PASS\",\n\t\tgtr.Event{Type: \"status\", Result: \"PASS\"},\n\t},\n\t{\n\t\t\"FAIL\",\n\t\tgtr.Event{Type: \"status\", Result: \"FAIL\"},\n\t},\n\t{\n\t\t\"SKIP\",\n\t\tgtr.Event{Type: \"status\", Result: \"SKIP\"},\n\t},\n\t{\n\t\t\"ok package\/name\/ok 0.100s\",\n\t\tgtr.Event{Type: \"summary\", Name: \"package\/name\/ok\", Result: \"ok\", Duration: 100 * time.Millisecond},\n\t},\n\t{\n\t\t\"FAIL package\/name\/failing [build failed]\",\n\t\tgtr.Event{Type: \"summary\", Name: \"package\/name\/failing\", Result: \"FAIL\", Data: \"[build failed]\"},\n\t},\n\t{\n\t\t\"FAIL package\/other\/failing [setup failed]\",\n\t\tgtr.Event{Type: \"summary\", Name: \"package\/other\/failing\", Result: \"FAIL\", Data: \"[setup failed]\"},\n\t},\n\t{\n\t\t\"ok package\/other (cached)\",\n\t\tgtr.Event{Type: \"summary\", Name: \"package\/other\", Result: \"ok\", Data: \"(cached)\"},\n\t},\n\t{\n\t\t\"ok \tpackage\/name 0.400s coverage: 10.0% of statements\",\n\t\tgtr.Event{Type: \"summary\", Name: \"package\/name\", Result: \"ok\", Duration: 400 * time.Millisecond, CovPct: 10},\n\t},\n\t{\n\t\t\"ok \tpackage\/name 4.200s coverage: 99.8% of statements in fmt, encoding\/xml\",\n\t\tgtr.Event{Type: \"summary\", Name: \"package\/name\", Result: \"ok\", Duration: 4200 * time.Millisecond, CovPct: 99.8, CovPackages: []string{\"fmt\", \"encoding\/xml\"}},\n\t},\n\t{\n\t\t\"? \tpackage\/name\t[no test files]\",\n\t\tgtr.Event{Type: \"summary\", Name: \"package\/name\", Result: \"?\", Data: \"[no test files]\"},\n\t},\n\t{\n\t\t\"ok \tpackage\/name\t0.001s [no tests to run]\",\n\t\tgtr.Event{Type: \"summary\", Name: \"package\/name\", Result: \"ok\", Duration: 1 * time.Millisecond, Data: \"[no tests to run]\"},\n\t},\n\t{\n\t\t\"ok \tpackage\/name\t(cached) [no tests to run]\",\n\t\tgtr.Event{Type: \"summary\", Name: \"package\/name\", Result: \"ok\", Data: \"(cached) [no tests to run]\"},\n\t},\n\t{\n\t\t\"coverage: 10% of statements\",\n\t\tgtr.Event{Type: \"coverage\", CovPct: 10},\n\t},\n\t{\n\t\t\"coverage: 10% of statements in fmt, encoding\/xml\",\n\t\tgtr.Event{Type: \"coverage\", CovPct: 10, CovPackages: []string{\"fmt\", \"encoding\/xml\"}},\n\t},\n\t{\n\t\t\"coverage: 13.37% of statements\",\n\t\tgtr.Event{Type: \"coverage\", CovPct: 13.37},\n\t},\n\t{\n\t\t\"coverage: 99.8% of statements in fmt, encoding\/xml\",\n\t\tgtr.Event{Type: \"coverage\", CovPct: 99.8, CovPackages: []string{\"fmt\", \"encoding\/xml\"}},\n\t},\n\t{\n\t\t\"BenchmarkOne-8 2000000\t 604 ns\/op\",\n\t\tgtr.Event{Type: \"benchmark\", Name: \"BenchmarkOne\", Iterations: 2_000_000, NsPerOp: 604},\n\t},\n\t{\n\t\t\"BenchmarkTwo-16 30000\t52568 ns\/op\t24879 B\/op\t494 allocs\/op\",\n\t\tgtr.Event{Type: \"benchmark\", Name: \"BenchmarkTwo\", Iterations: 30_000, NsPerOp: 52_568, BytesPerOp: 24_879, AllocsPerOp: 494},\n\t},\n\t{\n\t\t\"BenchmarkThree 2000000000\t 0.26 ns\/op\",\n\t\tgtr.Event{Type: \"benchmark\", Name: \"BenchmarkThree\", Iterations: 2_000_000_000, NsPerOp: 0.26},\n\t},\n\t{\n\t\t\"BenchmarkFour-8 \t 10000\t 104427 ns\/op\t 95.76 MB\/s\t 40629 B\/op\t 5 allocs\/op\",\n\t\tgtr.Event{Type: \"benchmark\", Name: \"BenchmarkFour\", Iterations: 10_000, NsPerOp: 104_427, MBPerSec: 95.76, BytesPerOp: 40_629, AllocsPerOp: 5},\n\t},\n\t{\n\t\t\"# package\/name\/failing1\",\n\t\tgtr.Event{Type: \"build_output\", Name: \"package\/name\/failing1\"},\n\t},\n\t{\n\t\t\"# package\/name\/failing2 [package\/name\/failing2.test]\",\n\t\tgtr.Event{Type: \"build_output\", Name: \"package\/name\/failing2\"},\n\t},\n\t{\n\t\t\"single line stdout\",\n\t\tgtr.Event{Type: \"output\", Data: \"single line stdout\"},\n\t},\n\t{\n\t\t\"# some more output\",\n\t\tgtr.Event{Type: \"output\", Data: \"# some more output\"},\n\t},\n\t{\n\t\t\"\\tfile_test.go:11: Error message\",\n\t\tgtr.Event{Type: \"output\", Data: \"\\tfile_test.go:11: Error message\"},\n\t},\n\t{\n\t\t\"\\tfile_test.go:12: Longer\",\n\t\tgtr.Event{Type: \"output\", Data: \"\\tfile_test.go:12: Longer\"},\n\t},\n\t{\n\t\t\"\\t\\terror\",\n\t\tgtr.Event{Type: \"output\", Data: \"\\t\\terror\"},\n\t},\n\t{\n\t\t\"\\t\\tmessage.\",\n\t\tgtr.Event{Type: \"output\", Data: \"\\t\\tmessage.\"},\n\t},\n}\n\nfunc TestParseLine(t *testing.T) {\n\tfor i, test := range parseLineTests {\n\t\tvar want []gtr.Event\n\t\tswitch e := test.events.(type) {\n\t\tcase gtr.Event:\n\t\t\twant = []gtr.Event{e}\n\t\tcase []gtr.Event:\n\t\t\twant = e\n\t\tdefault:\n\t\t\tpanic(\"invalid events type\")\n\t\t}\n\n\t\tvar types []string\n\t\tfor _, e := range want {\n\t\t\ttypes = append(types, e.Type)\n\t\t}\n\n\t\tname := fmt.Sprintf(\"%d %s\", i+1, strings.Join(types, \",\"))\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\ttestParseLine(t, &parser{}, test.input, want)\n\t\t})\n\t}\n}\n\nfunc testParseLine(t *testing.T, parser *parser, input string, want []gtr.Event) {\n\tparser.parseLine(input)\n\tgot := parser.events\n\tif diff := cmp.Diff(got, want); diff != \"\" {\n\t\tt.Errorf(\"parseLine(%q) returned unexpected events, diff (-got, +want):\\n%v\", input, diff)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package smokescreen\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"io\/ioutil\"\n\t\"strings\"\n)\n\ntype EgressAclRule struct {\n\tProject string\n\tPolicy ConfigEnforcementPolicy\n\tDomainGlob []string\n}\n\ntype EgressAclConfig struct {\n\tServices map[string]EgressAclRule\n\tDefault *EgressAclRule\n}\n\nfunc (ew *EgressAclConfig) Decide(fromService string, toHost string) (EgressAclDecision, error) {\n\n\tvar (\n\t\tfound bool\n\t\tservice EgressAclRule\n\t)\n\tif fromService == \"\" {\n\t\tfound = false\n\t} else {\n\t\tservice, found = ew.Services[fromService]\n\t}\n\n\tif !found && ew.Default != nil {\n\t\tfound = true\n\t\tservice = *ew.Default\n\t}\n\n\tif !found {\n\t\treturn 0, fmt.Errorf(\"unknown role role=%#v\", fromService)\n\t}\n\n\tif service.Policy == ConfigEnforcementPolicyOpen {\n\t\treturn EgressAclDecisionAllow, nil\n\t}\n\n\tmatches := false\n\tfor _, host := range service.DomainGlob {\n\t\tif len(host) > 0 && host[0] == '*' {\n\t\t\tpostfix := host[1:]\n\t\t\tif strings.HasSuffix(toHost, postfix) {\n\t\t\t\tmatches = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tif host == toHost {\n\t\t\t\tmatches = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch service.Policy {\n\tcase ConfigEnforcementPolicyReport:\n\t\tif matches {\n\t\t\treturn EgressAclDecisionAllow, nil\n\t\t} else {\n\t\t\treturn EgressAclDecisionAllowAndReport, nil\n\t\t}\n\tcase ConfigEnforcementPolicyEnforce:\n\t\tif matches {\n\t\t\treturn EgressAclDecisionAllow, nil\n\t\t} else {\n\t\t\treturn EgressAclDecisionDeny, nil\n\t\t}\n\tdefault:\n\t\treturn 0, errors.New(\"unexpected state\")\n\t}\n}\n\nfunc (ew *EgressAclConfig) Project(fromService string) (string, error) {\n\tservice, found := ew.Services[fromService]\n\n\tif found {\n\t\treturn service.Project, nil\n\t}\n\n\tif ew.Default != nil {\n\t\treturn ew.Default.Project, nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"warn: No known project: role=%#v\\n\", fromService)\n}\n\n\/\/ Configuration\n\ntype ServiceRule struct {\n\tName string `yaml:\"name\"`\n\tProject string `yaml:\"project\"`\n\tAction string `yaml:\"action\"`\n\tAllowedHosts []string `yaml:\"allowed_domains\"`\n}\n\ntype EgressAclConfiguration struct {\n\tServices []ServiceRule `yaml:\"services\"`\n\tDefault *ServiceRule `yaml:\"default\"`\n\tVersion string `yaml:\"version\"`\n}\n\nfunc LoadFromYamlFile(config *Config, aclPath string, disabledAclPolicyActions []string) (*EgressAclConfig, error) {\n\tfail := func(err error) (*EgressAclConfig, error) { return nil, err }\n\n\tyamlConfig := EgressAclConfiguration{}\n\n\tyamlFile, err := ioutil.ReadFile(aclPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not load whitelist configuration at '%s': #%v\", aclPath, err)\n\t\treturn nil, err\n\t}\n\n\terr = yaml.Unmarshal(yamlFile, &yamlConfig)\n\n\tif yamlConfig.Version != \"v1\" {\n\t\treturn fail(fmt.Errorf(\"Expected version \\\"v1\\\" got %#v\\n\", yamlConfig.Version))\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tacl := EgressAclConfig{Services: make(map[string]EgressAclRule)}\n\n\tif yamlConfig.Services == nil {\n\t\treturn nil, errors.New(\"Top level list 'services' is missing\")\n\t}\n\n\tfor _, v := range yamlConfig.Services {\n\t\tres, err := aclConfigToRule(&v, disabledAclPolicyActions)\n\t\tif err != nil {\n\t\t\tconfig.Log.Error(\"gnored policy\", err)\n\t\t} else {\n\t\t\tacl.Services[v.Name] = res\n\t\t}\n\t}\n\n\tif yamlConfig.Default != nil {\n\t\tres, err := aclConfigToRule(yamlConfig.Default, disabledAclPolicyActions)\n\t\tif err != nil {\n\t\t\tconfig.Log.Error(\"gnored policy\", err)\n\t\t} else {\n\t\t\tacl.Default = &res\n\t\t}\n\t}\n\treturn &acl, nil\n}\n\nfunc aclConfigToRule(v *ServiceRule, disabledAclPolicyAction []string) (EgressAclRule, error) {\n\tvar enforcementPolicy ConfigEnforcementPolicy\n\n\t\/\/ Validate policy action\n\tfor _, disabledAction := range disabledAclPolicyAction {\n\t\tif disabledAction == v.Action {\n\t\t\treturn EgressAclRule{}, fmt.Errorf(\"policy action %#v has been disabled but is used in rule for service %#v\", disabledAction, v.Name)\n\t\t}\n\t}\n\n\t\/\/ Validate hosts\n\n\tfor _, host := range v.AllowedHosts {\n\t\tif !strings.HasPrefix(host, \"*.\") && strings.HasPrefix(host, \"*\") {\n\t\t\treturn EgressAclRule{}, fmt.Errorf(\"glob must represent a full prefxi (sub)domain\")\n\t\t}\n\n\t\t\/\/ Check for stars elsewhere\n\t\thostToCheck := host\n\t\tif strings.HasPrefix(hostToCheck, \"*\") {\n\t\t\thostToCheck = hostToCheck[1:]\n\t\t}\n\t\tif strings.Contains(hostToCheck, \"*\") {\n\t\t\treturn EgressAclRule{}, fmt.Errorf(\"globs are only supported as prefix\")\n\t\t}\n\t}\n\n\tswitch v.Action {\n\tcase \"open\":\n\t\tenforcementPolicy = ConfigEnforcementPolicyOpen\n\tcase \"report\":\n\t\tenforcementPolicy = ConfigEnforcementPolicyReport\n\tcase \"enforce\":\n\t\tenforcementPolicy = ConfigEnforcementPolicyEnforce\n\tdefault:\n\t\tenforcementPolicy = 0\n\t\treturn EgressAclRule{}, errors.New(fmt.Sprintf(\"Unknown action '%s' under '%s'.\", v.Action, v.Name))\n\t}\n\n\treturn EgressAclRule{\n\t\tProject: v.Project,\n\t\tPolicy: enforcementPolicy,\n\t\tDomainGlob: v.AllowedHosts,\n\t}, nil\n}\n<commit_msg>Generify YAML ACL loading<commit_after>package smokescreen\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"io\"\n\t\"os\"\n)\n\ntype EgressAclRule struct {\n\tProject string\n\tPolicy ConfigEnforcementPolicy\n\tDomainGlob []string\n}\n\ntype EgressAclConfig struct {\n\tServices map[string]EgressAclRule\n\tDefault *EgressAclRule\n}\n\nfunc (ew *EgressAclConfig) Decide(fromService string, toHost string) (EgressAclDecision, error) {\n\n\tvar (\n\t\tfound bool\n\t\tservice EgressAclRule\n\t)\n\tif fromService == \"\" {\n\t\tfound = false\n\t} else {\n\t\tservice, found = ew.Services[fromService]\n\t}\n\n\tif !found && ew.Default != nil {\n\t\tfound = true\n\t\tservice = *ew.Default\n\t}\n\n\tif !found {\n\t\treturn 0, fmt.Errorf(\"unknown role role=%#v\", fromService)\n\t}\n\n\tif service.Policy == ConfigEnforcementPolicyOpen {\n\t\treturn EgressAclDecisionAllow, nil\n\t}\n\n\tmatches := false\n\tfor _, host := range service.DomainGlob {\n\t\tif len(host) > 0 && host[0] == '*' {\n\t\t\tpostfix := host[1:]\n\t\t\tif strings.HasSuffix(toHost, postfix) {\n\t\t\t\tmatches = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tif host == toHost {\n\t\t\t\tmatches = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch service.Policy {\n\tcase ConfigEnforcementPolicyReport:\n\t\tif matches {\n\t\t\treturn EgressAclDecisionAllow, nil\n\t\t} else {\n\t\t\treturn EgressAclDecisionAllowAndReport, nil\n\t\t}\n\tcase ConfigEnforcementPolicyEnforce:\n\t\tif matches {\n\t\t\treturn EgressAclDecisionAllow, nil\n\t\t} else {\n\t\t\treturn EgressAclDecisionDeny, nil\n\t\t}\n\tdefault:\n\t\treturn 0, errors.New(\"unexpected state\")\n\t}\n}\n\nfunc (ew *EgressAclConfig) Project(fromService string) (string, error) {\n\tservice, found := ew.Services[fromService]\n\n\tif found {\n\t\treturn service.Project, nil\n\t}\n\n\tif ew.Default != nil {\n\t\treturn ew.Default.Project, nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"warn: No known project: role=%#v\\n\", fromService)\n}\n\n\/\/ Configuration\n\ntype ServiceRule struct {\n\tName string `yaml:\"name\"`\n\tProject string `yaml:\"project\"`\n\tAction string `yaml:\"action\"`\n\tAllowedHosts []string `yaml:\"allowed_domains\"`\n}\n\ntype EgressAclConfiguration struct {\n\tServices []ServiceRule `yaml:\"services\"`\n\tDefault *ServiceRule `yaml:\"default\"`\n\tVersion string `yaml:\"version\"`\n}\n\nfunc LoadFromYamlFile(config *Config, aclPath string, disabledAclPolicyActions []string) (*EgressAclConfig, error) {\n\tfile, err := os.Open(aclPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn LoadFromReader(config, file, disabledAclPolicyActions)\n}\n\nfunc LoadFromReader(config *Config, aclReader io.Reader, disabledAclPolicyActions []string) (*EgressAclConfig, error) {\n\tfail := func(err error) (*EgressAclConfig, error) { return nil, err }\n\n\tyamlConfig := EgressAclConfiguration{}\n\n\tyamlFile, err := ioutil.ReadAll(aclReader)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not load acl configuration\")\n\t}\n\n\terr = yaml.Unmarshal(yamlFile, &yamlConfig)\n\n\tif yamlConfig.Version != \"v1\" {\n\t\treturn fail(fmt.Errorf(\"Expected version \\\"v1\\\" got %#v\\n\", yamlConfig.Version))\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tacl := EgressAclConfig{Services: make(map[string]EgressAclRule)}\n\n\tif yamlConfig.Services == nil {\n\t\treturn nil, errors.New(\"Top level list 'services' is missing\")\n\t}\n\n\tfor _, v := range yamlConfig.Services {\n\t\tres, err := aclConfigToRule(&v, disabledAclPolicyActions)\n\t\tif err != nil {\n\t\t\tconfig.Log.Error(\"gnored policy\", err)\n\t\t} else {\n\t\t\tacl.Services[v.Name] = res\n\t\t}\n\t}\n\n\tif yamlConfig.Default != nil {\n\t\tres, err := aclConfigToRule(yamlConfig.Default, disabledAclPolicyActions)\n\t\tif err != nil {\n\t\t\tconfig.Log.Error(\"gnored policy\", err)\n\t\t} else {\n\t\t\tacl.Default = &res\n\t\t}\n\t}\n\treturn &acl, nil\n}\n\nfunc aclConfigToRule(v *ServiceRule, disabledAclPolicyAction []string) (EgressAclRule, error) {\n\tvar enforcementPolicy ConfigEnforcementPolicy\n\n\t\/\/ Validate policy action\n\tfor _, disabledAction := range disabledAclPolicyAction {\n\t\tif disabledAction == v.Action {\n\t\t\treturn EgressAclRule{}, fmt.Errorf(\"policy action %#v has been disabled but is used in rule for service %#v\", disabledAction, v.Name)\n\t\t}\n\t}\n\n\t\/\/ Validate hosts\n\n\tfor _, host := range v.AllowedHosts {\n\t\tif !strings.HasPrefix(host, \"*.\") && strings.HasPrefix(host, \"*\") {\n\t\t\treturn EgressAclRule{}, fmt.Errorf(\"glob must represent a full prefxi (sub)domain\")\n\t\t}\n\n\t\t\/\/ Check for stars elsewhere\n\t\thostToCheck := host\n\t\tif strings.HasPrefix(hostToCheck, \"*\") {\n\t\t\thostToCheck = hostToCheck[1:]\n\t\t}\n\t\tif strings.Contains(hostToCheck, \"*\") {\n\t\t\treturn EgressAclRule{}, fmt.Errorf(\"globs are only supported as prefix\")\n\t\t}\n\t}\n\n\tswitch v.Action {\n\tcase \"open\":\n\t\tenforcementPolicy = ConfigEnforcementPolicyOpen\n\tcase \"report\":\n\t\tenforcementPolicy = ConfigEnforcementPolicyReport\n\tcase \"enforce\":\n\t\tenforcementPolicy = ConfigEnforcementPolicyEnforce\n\tdefault:\n\t\tenforcementPolicy = 0\n\t\treturn EgressAclRule{}, errors.New(fmt.Sprintf(\"Unknown action '%s' under '%s'.\", v.Action, v.Name))\n\t}\n\n\treturn EgressAclRule{\n\t\tProject: v.Project,\n\t\tPolicy: enforcementPolicy,\n\t\tDomainGlob: v.AllowedHosts,\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\/\/\n\/\/ Author: Spencer Kimball (spencer@cockroachlabs.com)\n\npackage sql_test\n\nimport (\n\t\"bytes\"\n\t\"sync\/atomic\"\n\t\"testing\"\n\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/base\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/keys\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/roachpb\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/storage\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/testutils\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/testutils\/sqlutils\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/testutils\/testcluster\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/util\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/util\/leaktest\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ TestAmbiguousCommit verifies that an ambiguous commit error is\n\/\/ returned from sql.Exec in situations where an EndTransaction is\n\/\/ part of a batch and the disposition of the batch request is unknown\n\/\/ after a network failure or timeout. The goal here is to prevent\n\/\/ spurious transaction retries after the initial transaction actually\n\/\/ succeeded. In cases where there's an auto-generated primary key,\n\/\/ this can result in silent duplications. In cases where the primary\n\/\/ key is specified in advance, it can result in violated uniqueness\n\/\/ constraints, or duplicate key violations. See #6053, #7604, and #10023.\nfunc TestAmbiguousCommit(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\n\t\/\/ Create a command filter which prevents EndTransaction from\n\t\/\/ returning a response.\n\tparams := base.TestServerArgs{}\n\tcommitted := make(chan struct{})\n\twait := make(chan struct{})\n\tvar tableStartKey atomic.Value\n\tvar responseCount int32\n\n\t\/\/ Prevent the first conditional put on table 51 from returning to\n\t\/\/ waiting client in order to simulate a lost update or slow network\n\t\/\/ link.\n\tparams.Knobs.Store = &storage.StoreTestingKnobs{\n\t\tTestingResponseFilter: func(ba roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error {\n\t\t\treq, ok := ba.GetArg(roachpb.ConditionalPut)\n\t\t\ttsk := tableStartKey.Load()\n\t\t\tif tsk == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif !ok || !bytes.HasPrefix(req.Header().Key, tsk.([]byte)) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t\/\/ If this is the first write to the table, wait to respond to the\n\t\t\t\/\/ client in order to simulate a retry.\n\t\t\tif atomic.AddInt32(&responseCount, 1) == 1 {\n\t\t\t\tclose(committed)\n\t\t\t\t<-wait\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\ttestClusterArgs := base.TestClusterArgs{\n\t\tReplicationMode: base.ReplicationAuto,\n\t\tServerArgs: params,\n\t}\n\tconst numReplicas = 3\n\ttc := testcluster.StartTestCluster(t, numReplicas, testClusterArgs)\n\tdefer tc.Stopper().Stop()\n\tif err := tc.WaitForFullReplication(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tsqlDB := sqlutils.MakeSQLRunner(t, tc.Conns[0])\n\n\tsqlDB.Exec(`CREATE DATABASE test`)\n\tsqlDB.Exec(`CREATE TABLE test.t (k SERIAL PRIMARY KEY, v INT)`)\n\n\ttableID := sqlutils.QueryTableID(t, tc.Conns[0], \"test\", \"t\")\n\ttableStartKey.Store(keys.MakeTablePrefix(tableID))\n\n\t\/\/ Wait for new table to split.\n\tutil.SucceedsSoon(t, func() error {\n\t\tdesc, err := tc.LookupRange(keys.MakeRowSentinelKey(tableStartKey.Load().([]byte)))\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !desc.StartKey.Equal(tableStartKey.Load().([]byte)) {\n\t\t\treturn errors.Errorf(\"expected range start key %s; got %s\",\n\t\t\t\ttableStartKey.Load().([]byte), desc.StartKey)\n\t\t}\n\t\treturn nil\n\t})\n\n\t\/\/ Lookup the lease.\n\ttableRangeDesc, err := tc.LookupRange(keys.MakeRowSentinelKey(tableStartKey.Load().([]byte)))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tleaseHolder, err := tc.FindRangeLeaseHolder(\n\t\t&tableRangeDesc,\n\t\t&testcluster.ReplicationTarget{\n\t\t\tNodeID: tc.Servers[0].GetNode().Descriptor.NodeID,\n\t\t\tStoreID: tc.Servers[0].GetFirstStoreID(),\n\t\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ In a goroutine, send an insert which will commit but not return\n\t\/\/ from the leader (due to the command filter we installed on node 0).\n\tsqlErrCh := make(chan error, 1)\n\tgo func() {\n\t\t\/\/ Use a connection other than through the node which is the current\n\t\t\/\/ leaseholder to ensure that we use GRPC instead of the local server.\n\t\t\/\/ If we use a local server, the hanging response we simulate takes\n\t\t\/\/ up the dist sender thread of execution because local requests are\n\t\t\/\/ executed synchronously.\n\t\tsqlConn := tc.Conns[leaseHolder.NodeID%numReplicas]\n\t\t_, err := sqlConn.Exec(`INSERT INTO test.t (v) VALUES (1)`)\n\t\tsqlErrCh <- err\n\t\tclose(wait)\n\t}()\n\t\/\/ Wait until the insert has committed.\n\t<-committed\n\n\t\/\/ Find a node other than the current lease holder to transfer the lease to.\n\tfor i, s := range tc.Servers {\n\t\tif leaseHolder.StoreID != s.GetFirstStoreID() {\n\t\t\tif err := tc.TransferRangeLease(&tableRangeDesc, tc.Target(i)); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ Close the wait channel and wait for the error from the pending SQL insert.\n\tif err := <-sqlErrCh; !testutils.IsError(err, \"transaction commit result is ambiguous\") {\n\t\tt.Errorf(\"expected ambiguous commit error; got %v\", err)\n\t}\n\n\t\/\/ Verify a single row exists in the table.\n\tvar rowCount int\n\tsqlDB.QueryRow(`SELECT COUNT(*) FROM test.t`).Scan(&rowCount)\n\tif rowCount != 1 {\n\t\tt.Errorf(\"expected 1 row but found %d\", rowCount)\n\t}\n}\n<commit_msg>sql: extract local variable to avoid races<commit_after>\/\/ Copyright 2016 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\/\/\n\/\/ Author: Spencer Kimball (spencer@cockroachlabs.com)\n\npackage sql_test\n\nimport (\n\t\"bytes\"\n\t\"sync\/atomic\"\n\t\"testing\"\n\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/base\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/keys\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/roachpb\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/storage\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/testutils\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/testutils\/sqlutils\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/testutils\/testcluster\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/util\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/util\/leaktest\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ TestAmbiguousCommit verifies that an ambiguous commit error is\n\/\/ returned from sql.Exec in situations where an EndTransaction is\n\/\/ part of a batch and the disposition of the batch request is unknown\n\/\/ after a network failure or timeout. The goal here is to prevent\n\/\/ spurious transaction retries after the initial transaction actually\n\/\/ succeeded. In cases where there's an auto-generated primary key,\n\/\/ this can result in silent duplications. In cases where the primary\n\/\/ key is specified in advance, it can result in violated uniqueness\n\/\/ constraints, or duplicate key violations. See #6053, #7604, and #10023.\nfunc TestAmbiguousCommit(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\n\t\/\/ Create a command filter which prevents EndTransaction from\n\t\/\/ returning a response.\n\tparams := base.TestServerArgs{}\n\tcommitted := make(chan struct{})\n\twait := make(chan struct{})\n\tvar tableStartKey atomic.Value\n\tvar responseCount int32\n\n\t\/\/ Prevent the first conditional put on table 51 from returning to\n\t\/\/ waiting client in order to simulate a lost update or slow network\n\t\/\/ link.\n\tparams.Knobs.Store = &storage.StoreTestingKnobs{\n\t\tTestingResponseFilter: func(ba roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error {\n\t\t\treq, ok := ba.GetArg(roachpb.ConditionalPut)\n\t\t\ttsk := tableStartKey.Load()\n\t\t\tif tsk == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif !ok || !bytes.HasPrefix(req.Header().Key, tsk.([]byte)) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t\/\/ If this is the first write to the table, wait to respond to the\n\t\t\t\/\/ client in order to simulate a retry.\n\t\t\tif atomic.AddInt32(&responseCount, 1) == 1 {\n\t\t\t\tclose(committed)\n\t\t\t\t<-wait\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\ttestClusterArgs := base.TestClusterArgs{\n\t\tReplicationMode: base.ReplicationAuto,\n\t\tServerArgs: params,\n\t}\n\tconst numReplicas = 3\n\ttc := testcluster.StartTestCluster(t, numReplicas, testClusterArgs)\n\tdefer tc.Stopper().Stop()\n\tif err := tc.WaitForFullReplication(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tsqlDB := sqlutils.MakeSQLRunner(t, tc.Conns[0])\n\n\tsqlDB.Exec(`CREATE DATABASE test`)\n\tsqlDB.Exec(`CREATE TABLE test.t (k SERIAL PRIMARY KEY, v INT)`)\n\n\ttableID := sqlutils.QueryTableID(t, tc.Conns[0], \"test\", \"t\")\n\ttableStartKey.Store(keys.MakeTablePrefix(tableID))\n\n\t\/\/ Wait for new table to split.\n\tutil.SucceedsSoon(t, func() error {\n\t\tstartKey := tableStartKey.Load().([]byte)\n\n\t\tdesc, err := tc.LookupRange(keys.MakeRowSentinelKey(startKey))\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !desc.StartKey.Equal(startKey) {\n\t\t\treturn errors.Errorf(\"expected range start key %s; got %s\",\n\t\t\t\tstartKey, desc.StartKey)\n\t\t}\n\t\treturn nil\n\t})\n\n\t\/\/ Lookup the lease.\n\ttableRangeDesc, err := tc.LookupRange(keys.MakeRowSentinelKey(tableStartKey.Load().([]byte)))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tleaseHolder, err := tc.FindRangeLeaseHolder(\n\t\t&tableRangeDesc,\n\t\t&testcluster.ReplicationTarget{\n\t\t\tNodeID: tc.Servers[0].GetNode().Descriptor.NodeID,\n\t\t\tStoreID: tc.Servers[0].GetFirstStoreID(),\n\t\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ In a goroutine, send an insert which will commit but not return\n\t\/\/ from the leader (due to the command filter we installed on node 0).\n\tsqlErrCh := make(chan error, 1)\n\tgo func() {\n\t\t\/\/ Use a connection other than through the node which is the current\n\t\t\/\/ leaseholder to ensure that we use GRPC instead of the local server.\n\t\t\/\/ If we use a local server, the hanging response we simulate takes\n\t\t\/\/ up the dist sender thread of execution because local requests are\n\t\t\/\/ executed synchronously.\n\t\tsqlConn := tc.Conns[leaseHolder.NodeID%numReplicas]\n\t\t_, err := sqlConn.Exec(`INSERT INTO test.t (v) VALUES (1)`)\n\t\tsqlErrCh <- err\n\t\tclose(wait)\n\t}()\n\t\/\/ Wait until the insert has committed.\n\t<-committed\n\n\t\/\/ Find a node other than the current lease holder to transfer the lease to.\n\tfor i, s := range tc.Servers {\n\t\tif leaseHolder.StoreID != s.GetFirstStoreID() {\n\t\t\tif err := tc.TransferRangeLease(&tableRangeDesc, tc.Target(i)); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ Close the wait channel and wait for the error from the pending SQL insert.\n\tif err := <-sqlErrCh; !testutils.IsError(err, \"transaction commit result is ambiguous\") {\n\t\tt.Errorf(\"expected ambiguous commit error; got %v\", err)\n\t}\n\n\t\/\/ Verify a single row exists in the table.\n\tvar rowCount int\n\tsqlDB.QueryRow(`SELECT COUNT(*) FROM test.t`).Scan(&rowCount)\n\tif rowCount != 1 {\n\t\tt.Errorf(\"expected 1 row but found %d\", rowCount)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package donut\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n)\n\nfunc newDonutFileWriter(objectDir string) (Writer, error) {\n\tdataFile, err := os.OpenFile(path.Join(objectDir, \"data\"), os.O_WRONLY|os.O_CREATE, 0600)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn donutFileWriter{\n\t\troot: objectDir,\n\t\tfile: dataFile,\n\t\tmetadata: make(map[string]string),\n\t\tdonutMetadata: make(map[string]string),\n\t}, nil\n}\n\ntype donutFileWriter struct {\n\troot string\n\tfile *os.File\n\tmetadata map[string]string\n\tdonutMetadata map[string]string\n\terr error\n}\n\nfunc (d donutFileWriter) Write(data []byte) (int, error) {\n\treturn d.file.Write(data)\n}\n\nfunc (d donutFileWriter) Close() error {\n\tif d.err != nil {\n\t\treturn d.err\n\t}\n\tmetadata, _ := json.Marshal(d.metadata)\n\tioutil.WriteFile(path.Join(d.root, \"metadata.json\"), metadata, 0600)\n\tdonutMetadata, _ := json.Marshal(d.donutMetadata)\n\tioutil.WriteFile(path.Join(d.root, \"donutMetadata.json\"), donutMetadata, 0600)\n\n\treturn d.file.Close()\n}\n\nfunc (d donutFileWriter) CloseWithError(err error) error {\n\tif d.err != nil {\n\t\td.err = err\n\t}\n\treturn d.Close()\n}\n\nfunc (d donutFileWriter) SetMetadata(metadata map[string]string) error {\n\tfor k := range d.metadata {\n\t\tdelete(d.metadata, k)\n\t}\n\tfor k, v := range metadata {\n\t\td.metadata[k] = v\n\t}\n\treturn nil\n}\n\nfunc (d donutFileWriter) GetMetadata() (map[string]string, error) {\n\tmetadata := make(map[string]string)\n\tfor k, v := range d.metadata {\n\t\tmetadata[k] = v\n\t}\n\treturn metadata, nil\n}\n\nfunc (d donutFileWriter) SetDonutMetadata(metadata map[string]string) error {\n\tfor k := range d.donutMetadata {\n\t\tdelete(d.donutMetadata, k)\n\t}\n\tfor k, v := range metadata {\n\t\td.donutMetadata[k] = v\n\t}\n\treturn nil\n}\n\nfunc (d donutFileWriter) GetDonutMetadata() (map[string]string, error) {\n\tdonutMetadata := make(map[string]string)\n\tfor k, v := range d.donutMetadata {\n\t\tdonutMetadata[k] = v\n\t}\n\treturn donutMetadata, nil\n}\n<commit_msg>Make sure we use O_EXCL with O_CREATE to make sure we don't trip over existing file<commit_after>package donut\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n)\n\nfunc newDonutFileWriter(objectDir string) (Writer, error) {\n\tdataFile, err := os.OpenFile(path.Join(objectDir, \"data\"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC|os.O_EXCL, 0600)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn donutFileWriter{\n\t\troot: objectDir,\n\t\tfile: dataFile,\n\t\tmetadata: make(map[string]string),\n\t\tdonutMetadata: make(map[string]string),\n\t}, nil\n}\n\ntype donutFileWriter struct {\n\troot string\n\tfile *os.File\n\tmetadata map[string]string\n\tdonutMetadata map[string]string\n\terr error\n}\n\nfunc (d donutFileWriter) Write(data []byte) (int, error) {\n\treturn d.file.Write(data)\n}\n\nfunc (d donutFileWriter) Close() error {\n\tif d.err != nil {\n\t\treturn d.err\n\t}\n\tmetadata, _ := json.Marshal(d.metadata)\n\tioutil.WriteFile(path.Join(d.root, \"metadata.json\"), metadata, 0600)\n\tdonutMetadata, _ := json.Marshal(d.donutMetadata)\n\tioutil.WriteFile(path.Join(d.root, \"donutMetadata.json\"), donutMetadata, 0600)\n\n\treturn d.file.Close()\n}\n\nfunc (d donutFileWriter) CloseWithError(err error) error {\n\tif d.err != nil {\n\t\td.err = err\n\t}\n\treturn d.Close()\n}\n\nfunc (d donutFileWriter) SetMetadata(metadata map[string]string) error {\n\tfor k := range d.metadata {\n\t\tdelete(d.metadata, k)\n\t}\n\tfor k, v := range metadata {\n\t\td.metadata[k] = v\n\t}\n\treturn nil\n}\n\nfunc (d donutFileWriter) GetMetadata() (map[string]string, error) {\n\tmetadata := make(map[string]string)\n\tfor k, v := range d.metadata {\n\t\tmetadata[k] = v\n\t}\n\treturn metadata, nil\n}\n\nfunc (d donutFileWriter) SetDonutMetadata(metadata map[string]string) error {\n\tfor k := range d.donutMetadata {\n\t\tdelete(d.donutMetadata, k)\n\t}\n\tfor k, v := range metadata {\n\t\td.donutMetadata[k] = v\n\t}\n\treturn nil\n}\n\nfunc (d donutFileWriter) GetDonutMetadata() (map[string]string, error) {\n\tdonutMetadata := make(map[string]string)\n\tfor k, v := range d.donutMetadata {\n\t\tdonutMetadata[k] = v\n\t}\n\treturn donutMetadata, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package web\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/feedhenry\/mcp-standalone\/pkg\/mobile\"\n\t\"github.com\/feedhenry\/mcp-standalone\/pkg\/mobile\/integration\"\n\t\"github.com\/feedhenry\/mcp-standalone\/pkg\/web\/headers\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ MobileServiceHandler handles endpoints associated with mobile enabled services. It will list services in the namespace that\n\/\/ it knows about so that they can be rendered in the MCP\ntype MobileServiceHandler struct {\n\tlogger *logrus.Logger\n\tmobileIntegrationService *integration.MobileService\n\ttokenClientBuilder mobile.TokenScopedClientBuilder\n}\n\n\/\/ NewMobileServiceHandler returns a new MobileServiceHandler\nfunc NewMobileServiceHandler(logger *logrus.Logger, integrationService *integration.MobileService, tokenClientBuilder mobile.TokenScopedClientBuilder) *MobileServiceHandler {\n\treturn &MobileServiceHandler{\n\t\tlogger: logger,\n\t\tmobileIntegrationService: integrationService,\n\t\ttokenClientBuilder: tokenClientBuilder,\n\t}\n}\n\n\/\/ List allows you to list mobile services\nfunc (msh *MobileServiceHandler) List(rw http.ResponseWriter, req *http.Request) {\n\ttoken := headers.DefaultTokenRetriever(req.Header)\n\tserviceCruder, err := msh.tokenClientBuilder.MobileServiceCruder(token)\n\tif err != nil {\n\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\treturn\n\t}\n\tsvc, err := msh.mobileIntegrationService.DiscoverMobileServices(serviceCruder)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"attempted to list mobile services\")\n\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\treturn\n\t}\n\tencoder := json.NewEncoder(rw)\n\tif err := encoder.Encode(svc); err != nil {\n\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\treturn\n\t}\n}\n\nfunc (msh *MobileServiceHandler) Read(rw http.ResponseWriter, req *http.Request) {\n\ttoken := headers.DefaultTokenRetriever(req.Header)\n\tparams := mux.Vars(req)\n\tserviceName := params[\"name\"]\n\twithIntegrations := req.URL.Query().Get(\"withIntegrations\")\n\tvar ms *mobile.Service\n\tvar err error\n\tif serviceName == \"\" {\n\t\thttp.Error(rw, \"service name cannot be empty \", http.StatusBadRequest)\n\t\treturn\n\t}\n\tserviceCruder, err := msh.tokenClientBuilder.MobileServiceCruder(token)\n\tif err != nil {\n\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\treturn\n\t}\n\n\tif withIntegrations != \"\" {\n\t\tfmt.Println(\"with Integrations\", serviceName)\n\t\tms, err = msh.mobileIntegrationService.ReadMoileServiceAndIntegrations(serviceCruder, serviceName)\n\t\tif err != nil {\n\t\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tms, err = serviceCruder.Read(serviceName)\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"MobileServiceHandler : failed to read mobile service \")\n\t\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\t\treturn\n\t\t}\n\t}\n\tencoder := json.NewEncoder(rw)\n\tif err := encoder.Encode(ms); err != nil {\n\t\terr = errors.Wrap(err, \"MobileServiceHandler: failed to encode response\")\n\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\treturn\n\t}\n}\n\n\/\/ Create will create a mobile service\nfunc (msh *MobileServiceHandler) Create(rw http.ResponseWriter, req *http.Request) {\n\tms := mobile.NewMobileService()\n\ttoken := headers.DefaultTokenRetriever(req.Header)\n\tserviceCruder, err := msh.tokenClientBuilder.MobileServiceCruder(token)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"failed to setup service cruder based on token\")\n\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\treturn\n\t}\n\tdecoder := json.NewDecoder(req.Body)\n\tif err := decoder.Decode(ms); err != nil {\n\t\terr = errors.Wrap(err, \"failed to decode mobile app \")\n\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\treturn\n\t}\n\tif err := serviceCruder.Create(ms); err != nil {\n\t\terr = errors.Wrap(err, \"failed to create mobile app\")\n\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\treturn\n\t}\n\trw.WriteHeader(http.StatusCreated)\n}\n\n\/\/ Configure configures components binding\nfunc (msh *MobileServiceHandler) Configure(rw http.ResponseWriter, req *http.Request) {\n\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\ttoken := headers.DefaultTokenRetriever(req.Header)\n\n\tparams := mux.Vars(req)\n\tcomponent := strings.ToLower(params[\"component\"])\n\tsecret := strings.ToLower(params[\"secret\"])\n\tif len(component) == 0 {\n\t\thandleCommonErrorCases(errors.New(\"web.msh.Configure -> provided component must not be empty\"), rw, msh.logger)\n\t\treturn\n\t}\n\tif len(secret) == 0 {\n\t\thandleCommonErrorCases(errors.New(\"web.msh.Configure -> provided secret must not be empty\"), rw, msh.logger)\n\t\treturn\n\t}\n\n\tmounter, err := msh.tokenClientBuilder.VolumeMounterUnmounter(token)\n\tif err != nil {\n\t\thandleCommonErrorCases(errors.Wrap(err, \"web.msh.Configure -> could not create mounter\"), rw, msh.logger)\n\t\treturn\n\t}\n\n\tsvcCruder, err := msh.tokenClientBuilder.MobileServiceCruder(token)\n\tif err != nil {\n\t\thandleCommonErrorCases(errors.Wrap(err, \"web.msh.Configure -> could not create service cruder\"), rw, msh.logger)\n\t\treturn\n\t}\n\n\terr = msh.mobileIntegrationService.MountSecretForComponent(svcCruder, mounter, component, secret)\n\tif err != nil {\n\t\thandleCommonErrorCases(errors.Wrap(err, \"web.msh.Configure -> could not mount secret: '\"+secret+\"' into component: '\"+component+\"'\"), rw, msh.logger)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Deconfigure removes configuration for components binding\nfunc (msh *MobileServiceHandler) Deconfigure(rw http.ResponseWriter, req *http.Request) {\n\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\ttoken := headers.DefaultTokenRetriever(req.Header)\n\n\tparams := mux.Vars(req)\n\tcomponent := params[\"component\"]\n\tsecret := params[\"secret\"]\n\n\tsvcCruder, err := msh.tokenClientBuilder.MobileServiceCruder(token)\n\tif err != nil {\n\t\thandleCommonErrorCases(errors.Wrap(err, \"web.msh.Deconfigure -> could not create service cruder\"), rw, msh.logger)\n\t\treturn\n\t}\n\n\tunmounter, err := msh.tokenClientBuilder.VolumeMounterUnmounter(token)\n\tif err != nil {\n\t\thandleCommonErrorCases(errors.Wrap(err, \"web.msh.Deconfigure -> could not create volume unmounter\"), rw, msh.logger)\n\t\treturn\n\t}\n\n\terr = msh.mobileIntegrationService.UnmountSecretInComponent(svcCruder, unmounter, component, secret)\n\tif err != nil {\n\t\thandleCommonErrorCases(errors.Wrap(err, \"web.msh.Deconfigure -> could not unmount secret: '\"+secret+\"' from component: '\"+component+\"'\"), rw, msh.logger)\n\t\treturn\n\t}\n\treturn\n}\n<commit_msg>add missing validation<commit_after>package web\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/feedhenry\/mcp-standalone\/pkg\/mobile\"\n\t\"github.com\/feedhenry\/mcp-standalone\/pkg\/mobile\/integration\"\n\t\"github.com\/feedhenry\/mcp-standalone\/pkg\/web\/headers\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ MobileServiceHandler handles endpoints associated with mobile enabled services. It will list services in the namespace that\n\/\/ it knows about so that they can be rendered in the MCP\ntype MobileServiceHandler struct {\n\tlogger *logrus.Logger\n\tmobileIntegrationService *integration.MobileService\n\ttokenClientBuilder mobile.TokenScopedClientBuilder\n}\n\n\/\/ NewMobileServiceHandler returns a new MobileServiceHandler\nfunc NewMobileServiceHandler(logger *logrus.Logger, integrationService *integration.MobileService, tokenClientBuilder mobile.TokenScopedClientBuilder) *MobileServiceHandler {\n\treturn &MobileServiceHandler{\n\t\tlogger: logger,\n\t\tmobileIntegrationService: integrationService,\n\t\ttokenClientBuilder: tokenClientBuilder,\n\t}\n}\n\n\/\/ List allows you to list mobile services\nfunc (msh *MobileServiceHandler) List(rw http.ResponseWriter, req *http.Request) {\n\ttoken := headers.DefaultTokenRetriever(req.Header)\n\tserviceCruder, err := msh.tokenClientBuilder.MobileServiceCruder(token)\n\tif err != nil {\n\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\treturn\n\t}\n\tsvc, err := msh.mobileIntegrationService.DiscoverMobileServices(serviceCruder)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"attempted to list mobile services\")\n\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\treturn\n\t}\n\tencoder := json.NewEncoder(rw)\n\tif err := encoder.Encode(svc); err != nil {\n\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\treturn\n\t}\n}\n\nfunc (msh *MobileServiceHandler) Read(rw http.ResponseWriter, req *http.Request) {\n\ttoken := headers.DefaultTokenRetriever(req.Header)\n\tparams := mux.Vars(req)\n\tserviceName := params[\"name\"]\n\twithIntegrations := req.URL.Query().Get(\"withIntegrations\")\n\tvar ms *mobile.Service\n\tvar err error\n\tif serviceName == \"\" {\n\t\thttp.Error(rw, \"service name cannot be empty \", http.StatusBadRequest)\n\t\treturn\n\t}\n\tserviceCruder, err := msh.tokenClientBuilder.MobileServiceCruder(token)\n\tif err != nil {\n\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\treturn\n\t}\n\n\tif withIntegrations != \"\" {\n\t\tfmt.Println(\"with Integrations\", serviceName)\n\t\tms, err = msh.mobileIntegrationService.ReadMoileServiceAndIntegrations(serviceCruder, serviceName)\n\t\tif err != nil {\n\t\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tms, err = serviceCruder.Read(serviceName)\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"MobileServiceHandler : failed to read mobile service \")\n\t\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\t\treturn\n\t\t}\n\t}\n\tencoder := json.NewEncoder(rw)\n\tif err := encoder.Encode(ms); err != nil {\n\t\terr = errors.Wrap(err, \"MobileServiceHandler: failed to encode response\")\n\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\treturn\n\t}\n}\n\n\/\/ Create will create a mobile service\nfunc (msh *MobileServiceHandler) Create(rw http.ResponseWriter, req *http.Request) {\n\tms := mobile.NewMobileService()\n\ttoken := headers.DefaultTokenRetriever(req.Header)\n\tserviceCruder, err := msh.tokenClientBuilder.MobileServiceCruder(token)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"failed to setup service cruder based on token\")\n\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\treturn\n\t}\n\tdecoder := json.NewDecoder(req.Body)\n\tif err := decoder.Decode(ms); err != nil {\n\t\terr = errors.Wrap(err, \"failed to decode mobile app \")\n\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\treturn\n\t}\n\tif err := serviceCruder.Create(ms); err != nil {\n\t\terr = errors.Wrap(err, \"failed to create mobile app\")\n\t\thandleCommonErrorCases(err, rw, msh.logger)\n\t\treturn\n\t}\n\trw.WriteHeader(http.StatusCreated)\n}\n\n\/\/ Configure configures components binding\nfunc (msh *MobileServiceHandler) Configure(rw http.ResponseWriter, req *http.Request) {\n\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\ttoken := headers.DefaultTokenRetriever(req.Header)\n\n\tparams := mux.Vars(req)\n\tcomponent := strings.ToLower(params[\"component\"])\n\tsecret := strings.ToLower(params[\"secret\"])\n\tif len(component) == 0 {\n\t\thandleCommonErrorCases(errors.New(\"web.msh.Configure -> provided component must not be empty\"), rw, msh.logger)\n\t\treturn\n\t}\n\tif len(secret) == 0 {\n\t\thandleCommonErrorCases(errors.New(\"web.msh.Configure -> provided secret must not be empty\"), rw, msh.logger)\n\t\treturn\n\t}\n\n\tmounter, err := msh.tokenClientBuilder.VolumeMounterUnmounter(token)\n\tif err != nil {\n\t\thandleCommonErrorCases(errors.Wrap(err, \"web.msh.Configure -> could not create mounter\"), rw, msh.logger)\n\t\treturn\n\t}\n\n\tsvcCruder, err := msh.tokenClientBuilder.MobileServiceCruder(token)\n\tif err != nil {\n\t\thandleCommonErrorCases(errors.Wrap(err, \"web.msh.Configure -> could not create service cruder\"), rw, msh.logger)\n\t\treturn\n\t}\n\n\terr = msh.mobileIntegrationService.MountSecretForComponent(svcCruder, mounter, component, secret)\n\tif err != nil {\n\t\thandleCommonErrorCases(errors.Wrap(err, \"web.msh.Configure -> could not mount secret: '\"+secret+\"' into component: '\"+component+\"'\"), rw, msh.logger)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Deconfigure removes configuration for components binding\nfunc (msh *MobileServiceHandler) Deconfigure(rw http.ResponseWriter, req *http.Request) {\n\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\ttoken := headers.DefaultTokenRetriever(req.Header)\n\n\tparams := mux.Vars(req)\n\tcomponent := params[\"component\"]\n\tsecret := params[\"secret\"]\n\tif len(component) == 0 {\n\t\thandleCommonErrorCases(errors.New(\"web.msh.Configure -> provided component must not be empty\"), rw, msh.logger)\n\t\treturn\n\t}\n\tif len(secret) == 0 {\n\t\thandleCommonErrorCases(errors.New(\"web.msh.Configure -> provided secret must not be empty\"), rw, msh.logger)\n\t\treturn\n\t}\n\n\tsvcCruder, err := msh.tokenClientBuilder.MobileServiceCruder(token)\n\tif err != nil {\n\t\thandleCommonErrorCases(errors.Wrap(err, \"web.msh.Deconfigure -> could not create service cruder\"), rw, msh.logger)\n\t\treturn\n\t}\n\n\tunmounter, err := msh.tokenClientBuilder.VolumeMounterUnmounter(token)\n\tif err != nil {\n\t\thandleCommonErrorCases(errors.Wrap(err, \"web.msh.Deconfigure -> could not create volume unmounter\"), rw, msh.logger)\n\t\treturn\n\t}\n\n\terr = msh.mobileIntegrationService.UnmountSecretInComponent(svcCruder, unmounter, component, secret)\n\tif err != nil {\n\t\thandleCommonErrorCases(errors.Wrap(err, \"web.msh.Deconfigure -> could not unmount secret: '\"+secret+\"' from component: '\"+component+\"'\"), rw, msh.logger)\n\t\treturn\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>fix lint findings<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>Changed watcher from multi-watcher to be an entity one.<commit_after><|endoftext|>"} {"text":"<commit_before>\/*\n Nging is a toolbox for webmasters\n Copyright (C) 2018-present Wenhui Shen <swh@admpub.com>\n\n This program is free software: you can redistribute it and\/or modify\n it under the terms of the GNU Affero General Public License as published\n by the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Affero General Public License for more details.\n\n You should have received a copy of the GNU Affero General Public License\n along with this program. If not, see <https:\/\/www.gnu.org\/licenses\/>.\n*\/\n\npackage index\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/webx-top\/echo\"\n\n\t\"github.com\/admpub\/nging\/v4\/application\/handler\"\n\t\"github.com\/admpub\/nging\/v4\/application\/library\/codec\"\n\t\"github.com\/admpub\/nging\/v4\/application\/library\/common\"\n\t\"github.com\/admpub\/nging\/v4\/application\/library\/config\"\n\t\"github.com\/admpub\/nging\/v4\/application\/library\/license\"\n\t\"github.com\/admpub\/nging\/v4\/application\/middleware\"\n\t\"github.com\/admpub\/nging\/v4\/application\/model\"\n\t\"github.com\/admpub\/nging\/v4\/application\/registry\/dashboard\"\n\tstdCode \"github.com\/webx-top\/echo\/code\"\n)\n\nfunc Index(ctx echo.Context) error {\n\tuser := handler.User(ctx)\n\tif user == nil {\n\t\treturn ctx.Redirect(handler.URLFor(`\/login`))\n\t}\n\tvar err error\n\tctx.Set(`cards`, dashboard.CardAll(ctx).Build(ctx))\n\tblocks := dashboard.BlockAll(ctx)\n\tif err = blocks.Ready(ctx); err != nil {\n\t\treturn err\n\t}\n\tctx.Set(`blocks`, blocks)\n\tctx.Set(`license`, license.License())\n\tctx.Set(`showExpirationTime`, config.FromFile().Sys.ShowExpirationTime)\n\tproductURL := license.ProductDetailURL()\n\tctx.Set(`productURL`, productURL)\n\treturn ctx.Render(`index`, handler.Err(ctx, err))\n}\n\nfunc Login(ctx echo.Context) error {\n\t\/\/检查是否已安装\n\tif !config.IsInstalled() {\n\t\treturn ctx.Redirect(handler.URLFor(`\/setup`))\n\t}\n\n\tnext := ctx.Form(`next`)\n\tif len(next) == 0 {\n\t\tnext = handler.URLFor(`\/index`)\n\t}\n\n\tuser := handler.User(ctx)\n\tif user != nil {\n\t\treturn ctx.Redirect(next)\n\t}\n\tvar err error\n\tif ctx.IsPost() {\n\t\tif data := common.VerifyCaptcha(ctx, `backend`, `code`); data.GetCode() == stdCode.CaptchaError {\n\t\t\terr = common.ErrCaptcha\n\t\t} else if data.GetCode() != stdCode.Success {\n\t\t\terr = fmt.Errorf(\"%v\", data.GetInfo())\n\t\t} else {\n\t\t\terr = middleware.Auth(ctx, true)\n\t\t\tif err == nil {\n\t\t\t\treturn ctx.Redirect(next)\n\t\t\t}\n\t\t}\n\t}\n\treturn ctx.Render(`login`, handler.Err(ctx, err))\n}\n\nfunc Register(ctx echo.Context) error {\n\tvar err error\n\tif ctx.IsPost() {\n\t\tc := model.NewCode(ctx)\n\t\tm := model.NewUser(ctx)\n\t\tcode := ctx.Form(`invitationCode`)\n\t\tuser := ctx.Form(`username`)\n\t\temail := ctx.Form(`email`)\n\t\tpasswd := ctx.Form(`password`)\n\t\trepwd := ctx.Form(`confirmationPassword`)\n\t\tpasswd, err = codec.DefaultSM2DecryptHex(passwd)\n\t\tif err != nil {\n\t\t\terr = ctx.NewError(stdCode.InvalidParameter, ctx.T(`密码解密失败: %v`, err)).SetZone(`password`)\n\t\t\tgoto END\n\t\t}\n\t\trepwd, err = codec.DefaultSM2DecryptHex(repwd)\n\t\tif err != nil {\n\t\t\terr = ctx.NewError(stdCode.InvalidParameter, ctx.T(`您输入的确认密码解密失败: %v`, err)).SetZone(`confirmationPassword`)\n\t\t\tgoto END\n\t\t}\n\t\tif len(code) == 0 {\n\t\t\terr = ctx.E(`邀请码不能为空`)\n\t\t\tgoto END\n\t\t}\n\t\tif repwd != passwd {\n\t\t\terr = ctx.E(`密码与确认密码不一致`)\n\t\t\tgoto END\n\t\t}\n\t\terr = c.VerfyInvitationCode(code)\n\t\tif err != nil {\n\t\t\tgoto END\n\t\t}\n\t\terr = m.Register(user, passwd, email, c.Invitation.RoleIds)\n\t\tif err != nil {\n\t\t\tgoto END\n\t\t}\n\t\tc.UseInvitationCode(c.Invitation, m.NgingUser.Id)\n\t\tm.SetSession()\n\t\tm.NgingUser.UpdateField(nil, `session_id`, ctx.Session().ID(), `id`, m.NgingUser.Id)\n\t\tnext := ctx.Query(`next`)\n\t\tif len(next) == 0 {\n\t\t\tnext = handler.URLFor(`\/index`)\n\t\t}\n\t\treturn ctx.Redirect(next)\n\t}\n\nEND:\n\treturn ctx.Render(`register`, handler.Err(ctx, err))\n}\n\nfunc Logout(ctx echo.Context) error {\n\tctx.Session().Delete(`user`)\n\treturn ctx.Redirect(handler.URLFor(`\/login`))\n}\n\nvar (\n\tDonationAccountTypes = echo.NewKVData()\n\tDefaultDonationAccountType = `alipay`\n)\n\nfunc init() {\n\tDonationAccountTypes.AddItem(&echo.KV{\n\t\tK: `alipay`, V: `支付宝付款`, H: echo.H{`qrimg`: `alipay.jpg`},\n\t})\n\tDonationAccountTypes.AddItem(&echo.KV{\n\t\tK: `wechat`, V: `微信支付`, H: echo.H{`qrimg`: `wechat.png`},\n\t})\n\tDonationAccountTypes.AddItem(&echo.KV{\n\t\tK: `btcoin`, V: `比特币支付`, H: echo.H{`qrimg`: `btcoin.jpeg`},\n\t})\n}\n\nfunc Donation(ctx echo.Context) error {\n\tif len(DonationAccountTypes.Slice()) == 0 {\n\t\treturn echo.ErrNotFound\n\t}\n\ttyp := ctx.Param(`type`, DefaultDonationAccountType)\n\titem := DonationAccountTypes.GetItem(typ)\n\tif item == nil {\n\t\ttyp = DefaultDonationAccountType\n\t\titem = DonationAccountTypes.GetItem(typ)\n\t\tif item == nil {\n\t\t\treturn echo.ErrNotFound\n\t\t}\n\t}\n\tctx.Set(`data`, item)\n\tctx.Set(`list`, DonationAccountTypes.Slice())\n\treturn ctx.Render(`index\/donation\/donation`, nil)\n}\n<commit_msg>update<commit_after>\/*\n Nging is a toolbox for webmasters\n Copyright (C) 2018-present Wenhui Shen <swh@admpub.com>\n\n This program is free software: you can redistribute it and\/or modify\n it under the terms of the GNU Affero General Public License as published\n by the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Affero General Public License for more details.\n\n You should have received a copy of the GNU Affero General Public License\n along with this program. If not, see <https:\/\/www.gnu.org\/licenses\/>.\n*\/\n\npackage index\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/webx-top\/echo\"\n\n\t\"github.com\/admpub\/nging\/v4\/application\/handler\"\n\t\"github.com\/admpub\/nging\/v4\/application\/library\/codec\"\n\t\"github.com\/admpub\/nging\/v4\/application\/library\/common\"\n\t\"github.com\/admpub\/nging\/v4\/application\/library\/config\"\n\t\"github.com\/admpub\/nging\/v4\/application\/library\/license\"\n\t\"github.com\/admpub\/nging\/v4\/application\/middleware\"\n\t\"github.com\/admpub\/nging\/v4\/application\/model\"\n\t\"github.com\/admpub\/nging\/v4\/application\/registry\/dashboard\"\n\tstdCode \"github.com\/webx-top\/echo\/code\"\n)\n\nfunc Index(ctx echo.Context) error {\n\tuser := handler.User(ctx)\n\tif user == nil {\n\t\treturn ctx.Redirect(handler.URLFor(`\/login`))\n\t}\n\tvar err error\n\tctx.Set(`cards`, dashboard.CardAll(ctx).Build(ctx))\n\tblocks := dashboard.BlockAll(ctx)\n\tif err = blocks.Ready(ctx); err != nil {\n\t\treturn err\n\t}\n\tctx.Set(`blocks`, blocks)\n\tctx.Set(`license`, license.License())\n\tctx.Set(`showExpirationTime`, config.FromFile().Sys.ShowExpirationTime)\n\tproductURL := license.ProductDetailURL()\n\tctx.Set(`productURL`, productURL)\n\treturn ctx.Render(`index`, handler.Err(ctx, err))\n}\n\nfunc Login(ctx echo.Context) error {\n\t\/\/检查是否已安装\n\tif !config.IsInstalled() {\n\t\treturn ctx.Redirect(handler.URLFor(`\/setup`))\n\t}\n\n\tnext := ctx.Form(`next`)\n\tif len(next) == 0 {\n\t\tnext = handler.URLFor(`\/index`)\n\t}\n\n\tuser := handler.User(ctx)\n\tif user != nil {\n\t\treturn ctx.Redirect(next)\n\t}\n\tvar err error\n\tif ctx.IsPost() {\n\t\tif data := common.VerifyCaptcha(ctx, `backend`, `code`); data.GetCode() == stdCode.CaptchaError {\n\t\t\terr = common.ErrCaptcha\n\t\t} else if data.GetCode() != stdCode.Success {\n\t\t\terr = fmt.Errorf(\"%v\", data.GetInfo())\n\t\t} else {\n\t\t\terr = middleware.Auth(ctx, true)\n\t\t\tif err == nil {\n\t\t\t\treturn ctx.Redirect(next)\n\t\t\t}\n\t\t}\n\t}\n\treturn ctx.Render(`login`, handler.Err(ctx, err))\n}\n\nfunc Register(ctx echo.Context) error {\n\tvar err error\n\tif ctx.IsPost() {\n\t\tc := model.NewCode(ctx)\n\t\tm := model.NewUser(ctx)\n\t\tcode := ctx.Form(`invitationCode`)\n\t\tuser := ctx.Form(`username`)\n\t\temail := ctx.Form(`email`)\n\t\tpasswd := ctx.Form(`password`)\n\t\trepwd := ctx.Form(`confirmationPassword`)\n\t\tpasswd, err = codec.DefaultSM2DecryptHex(passwd)\n\t\tif err != nil {\n\t\t\terr = ctx.NewError(stdCode.InvalidParameter, ctx.T(`密码解密失败: %v`, err)).SetZone(`password`)\n\t\t\tgoto END\n\t\t}\n\t\trepwd, err = codec.DefaultSM2DecryptHex(repwd)\n\t\tif err != nil {\n\t\t\terr = ctx.NewError(stdCode.InvalidParameter, ctx.T(`您输入的确认密码解密失败: %v`, err)).SetZone(`confirmationPassword`)\n\t\t\tgoto END\n\t\t}\n\t\tif len(code) == 0 {\n\t\t\terr = ctx.E(`邀请码不能为空`)\n\t\t\tgoto END\n\t\t}\n\t\tif repwd != passwd {\n\t\t\terr = ctx.E(`密码与确认密码不一致`)\n\t\t\tgoto END\n\t\t}\n\t\terr = c.VerfyInvitationCode(code)\n\t\tif err != nil {\n\t\t\tgoto END\n\t\t}\n\t\terr = m.Register(user, passwd, email, c.Invitation.RoleIds)\n\t\tif err != nil {\n\t\t\tgoto END\n\t\t}\n\t\tc.UseInvitationCode(c.Invitation, m.NgingUser.Id)\n\t\tm.SetSession()\n\t\terr = m.NgingUser.UpdateField(nil, `session_id`, ctx.Session().ID(), `id`, m.NgingUser.Id)\n\t\tif err != nil {\n\t\t\tgoto END\n\t\t}\n\n\t\tloginLogM := model.NewLoginLog(ctx)\n\t\tloginLogM.OwnerType = `user`\n\t\tloginLogM.Username = user\n\t\tloginLogM.SessionId = ctx.Session().ID()\n\t\tloginLogM.Success = `Y`\n\t\tloginLogM.Add()\n\n\t\tnext := ctx.Query(`next`)\n\t\tif len(next) == 0 {\n\t\t\tnext = handler.URLFor(`\/index`)\n\t\t}\n\t\treturn ctx.Redirect(next)\n\t}\n\nEND:\n\treturn ctx.Render(`register`, handler.Err(ctx, err))\n}\n\nfunc Logout(ctx echo.Context) error {\n\tctx.Session().Delete(`user`)\n\treturn ctx.Redirect(handler.URLFor(`\/login`))\n}\n\nvar (\n\tDonationAccountTypes = echo.NewKVData()\n\tDefaultDonationAccountType = `alipay`\n)\n\nfunc init() {\n\tDonationAccountTypes.AddItem(&echo.KV{\n\t\tK: `alipay`, V: `支付宝付款`, H: echo.H{`qrimg`: `alipay.jpg`},\n\t})\n\tDonationAccountTypes.AddItem(&echo.KV{\n\t\tK: `wechat`, V: `微信支付`, H: echo.H{`qrimg`: `wechat.png`},\n\t})\n\tDonationAccountTypes.AddItem(&echo.KV{\n\t\tK: `btcoin`, V: `比特币支付`, H: echo.H{`qrimg`: `btcoin.jpeg`},\n\t})\n}\n\nfunc Donation(ctx echo.Context) error {\n\tif len(DonationAccountTypes.Slice()) == 0 {\n\t\treturn echo.ErrNotFound\n\t}\n\ttyp := ctx.Param(`type`, DefaultDonationAccountType)\n\titem := DonationAccountTypes.GetItem(typ)\n\tif item == nil {\n\t\ttyp = DefaultDonationAccountType\n\t\titem = DonationAccountTypes.GetItem(typ)\n\t\tif item == nil {\n\t\t\treturn echo.ErrNotFound\n\t\t}\n\t}\n\tctx.Set(`data`, item)\n\tctx.Set(`list`, DonationAccountTypes.Slice())\n\treturn ctx.Render(`index\/donation\/donation`, nil)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage native\n\nimport (\n\t\"time\"\n\n\t\"github.com\/tsuru\/tsuru\/auth\"\n\t\"gopkg.in\/check.v1\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\nfunc (s *S) TestCreatePasswordToken(c *check.C) {\n\tu := auth.User{Email: \"pure@alanis.com\"}\n\tt, err := createPasswordToken(&u)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(t.UserEmail, check.Equals, u.Email)\n\tc.Assert(t.Used, check.Equals, false)\n\tvar dbToken passwordToken\n\terr = s.conn.PasswordTokens().Find(bson.M{\"_id\": t.Token}).One(&dbToken)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(dbToken.Token, check.Equals, t.Token)\n\tc.Assert(dbToken.UserEmail, check.Equals, t.UserEmail)\n\tc.Assert(dbToken.Used, check.Equals, t.Used)\n}\n\nfunc (s *S) TestCreatePasswordTokenErrors(c *check.C) {\n\tvar tests = []struct {\n\t\tinput *auth.User\n\t\twant string\n\t}{\n\t\t{nil, \"User is nil\"},\n\t\t{&auth.User{}, \"User email is empty\"},\n\t}\n\tfor _, t := range tests {\n\t\ttoken, err := createPasswordToken(t.input)\n\t\tc.Check(token, check.IsNil)\n\t\tc.Check(err, check.NotNil)\n\t\tc.Check(err.Error(), check.Equals, t.want)\n\t}\n}\n\nfunc (s *S) TestPasswordTokenUser(c *check.C) {\n\tu := auth.User{Email: \"need@who.com\", Password: \"123456\"}\n\terr := u.Create()\n\tc.Assert(err, check.IsNil)\n\tdefer u.Delete()\n\tt, err := createPasswordToken(&u)\n\tc.Assert(err, check.IsNil)\n\tu2, err := t.user()\n\tu2.Keys = u.Keys\n\tc.Assert(err, check.IsNil)\n\tc.Assert(*u2, check.DeepEquals, u)\n}\n\nfunc (s *S) TestGetPasswordToken(c *check.C) {\n\tu := auth.User{Email: \"porcelain@opeth.com\"}\n\tt, err := createPasswordToken(&u)\n\tc.Assert(err, check.IsNil)\n\tt2, err := getPasswordToken(t.Token)\n\tt2.Creation = t.Creation\n\tc.Assert(err, check.IsNil)\n\tc.Assert(t2, check.DeepEquals, t)\n}\n\nfunc (s *S) TestGetPasswordTokenUnknown(c *check.C) {\n\tt, err := getPasswordToken(\"what??\")\n\tc.Assert(t, check.IsNil)\n\tc.Assert(err, check.Equals, auth.ErrInvalidToken)\n}\n\nfunc (s *S) TestGetPasswordUsedToken(c *check.C) {\n\tu := auth.User{Email: \"porcelain@opeth.com\"}\n\tt, err := createPasswordToken(&u)\n\tc.Assert(err, check.IsNil)\n\tt.Used = true\n\terr = s.conn.PasswordTokens().UpdateId(t.Token, t)\n\tc.Assert(err, check.IsNil)\n\tt2, err := getPasswordToken(t.Token)\n\tc.Assert(t2, check.IsNil)\n\tc.Assert(err, check.Equals, auth.ErrInvalidToken)\n}\n\nfunc (s *S) TestPasswordTokensAreValidFor24Hours(c *check.C) {\n\tu := auth.User{Email: \"porcelain@opeth.com\"}\n\tt, err := createPasswordToken(&u)\n\tc.Assert(err, check.IsNil)\n\tt.Creation = time.Now().Add(-24 * time.Hour)\n\terr = s.conn.PasswordTokens().UpdateId(t.Token, t)\n\tc.Assert(err, check.IsNil)\n\tt2, err := getPasswordToken(t.Token)\n\tc.Assert(t2, check.IsNil)\n\tc.Assert(err, check.NotNil)\n\tc.Assert(err.Error(), check.Equals, \"Invalid token\")\n}\n<commit_msg>auth\/native: fix build<commit_after>\/\/ Copyright 2015 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage native\n\nimport (\n\t\"time\"\n\n\t\"github.com\/tsuru\/tsuru\/auth\"\n\t\"gopkg.in\/check.v1\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\nfunc (s *S) TestCreatePasswordToken(c *check.C) {\n\tu := auth.User{Email: \"pure@alanis.com\"}\n\tt, err := createPasswordToken(&u)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(t.UserEmail, check.Equals, u.Email)\n\tc.Assert(t.Used, check.Equals, false)\n\tvar dbToken passwordToken\n\terr = s.conn.PasswordTokens().Find(bson.M{\"_id\": t.Token}).One(&dbToken)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(dbToken.Token, check.Equals, t.Token)\n\tc.Assert(dbToken.UserEmail, check.Equals, t.UserEmail)\n\tc.Assert(dbToken.Used, check.Equals, t.Used)\n}\n\nfunc (s *S) TestCreatePasswordTokenErrors(c *check.C) {\n\tvar tests = []struct {\n\t\tinput *auth.User\n\t\twant string\n\t}{\n\t\t{nil, \"User is nil\"},\n\t\t{&auth.User{}, \"User email is empty\"},\n\t}\n\tfor _, t := range tests {\n\t\ttoken, err := createPasswordToken(t.input)\n\t\tc.Check(token, check.IsNil)\n\t\tc.Check(err, check.NotNil)\n\t\tc.Check(err.Error(), check.Equals, t.want)\n\t}\n}\n\nfunc (s *S) TestPasswordTokenUser(c *check.C) {\n\tu := auth.User{Email: \"need@who.com\", Password: \"123456\"}\n\terr := u.Create()\n\tc.Assert(err, check.IsNil)\n\tdefer u.Delete()\n\tt, err := createPasswordToken(&u)\n\tc.Assert(err, check.IsNil)\n\tu2, err := t.user()\n\tc.Assert(err, check.IsNil)\n\tc.Assert(*u2, check.DeepEquals, u)\n}\n\nfunc (s *S) TestGetPasswordToken(c *check.C) {\n\tu := auth.User{Email: \"porcelain@opeth.com\"}\n\tt, err := createPasswordToken(&u)\n\tc.Assert(err, check.IsNil)\n\tt2, err := getPasswordToken(t.Token)\n\tt2.Creation = t.Creation\n\tc.Assert(err, check.IsNil)\n\tc.Assert(t2, check.DeepEquals, t)\n}\n\nfunc (s *S) TestGetPasswordTokenUnknown(c *check.C) {\n\tt, err := getPasswordToken(\"what??\")\n\tc.Assert(t, check.IsNil)\n\tc.Assert(err, check.Equals, auth.ErrInvalidToken)\n}\n\nfunc (s *S) TestGetPasswordUsedToken(c *check.C) {\n\tu := auth.User{Email: \"porcelain@opeth.com\"}\n\tt, err := createPasswordToken(&u)\n\tc.Assert(err, check.IsNil)\n\tt.Used = true\n\terr = s.conn.PasswordTokens().UpdateId(t.Token, t)\n\tc.Assert(err, check.IsNil)\n\tt2, err := getPasswordToken(t.Token)\n\tc.Assert(t2, check.IsNil)\n\tc.Assert(err, check.Equals, auth.ErrInvalidToken)\n}\n\nfunc (s *S) TestPasswordTokensAreValidFor24Hours(c *check.C) {\n\tu := auth.User{Email: \"porcelain@opeth.com\"}\n\tt, err := createPasswordToken(&u)\n\tc.Assert(err, check.IsNil)\n\tt.Creation = time.Now().Add(-24 * time.Hour)\n\terr = s.conn.PasswordTokens().UpdateId(t.Token, t)\n\tc.Assert(err, check.IsNil)\n\tt2, err := getPasswordToken(t.Token)\n\tc.Assert(t2, check.IsNil)\n\tc.Assert(err, check.NotNil)\n\tc.Assert(err.Error(), check.Equals, \"Invalid token\")\n}\n<|endoftext|>"} {"text":"<commit_before>package sniff_test\n\nimport (\n\t\"errors\"\n\t\"strings\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"github.com\/pivotal-golang\/lager\"\n\t\"github.com\/pivotal-golang\/lager\/lagertest\"\n\n\t\"cred-alert\/scanners\"\n\t\"cred-alert\/sniff\"\n\t\"cred-alert\/sniff\/fixtures\"\n\t\"cred-alert\/sniff\/matchers\/matchersfakes\"\n\t\"cred-alert\/sniff\/snifffakes\"\n)\n\nvar _ = Describe(\"Sniffer\", func() {\n\tvar (\n\t\tlogger *lagertest.TestLogger\n\t\tmatcher *matchersfakes.FakeMatcher\n\t\texclusionMatcher *matchersfakes.FakeMatcher\n\t\tscanner *snifffakes.FakeScanner\n\t\texpectedLine *scanners.Line\n\n\t\tsniffer sniff.Sniffer\n\t)\n\n\tBeforeEach(func() {\n\t\tlogger = lagertest.NewTestLogger(\"scanner\")\n\t\tmatcher = new(matchersfakes.FakeMatcher)\n\t\texclusionMatcher = new(matchersfakes.FakeMatcher)\n\t\tsniffer = sniff.NewSniffer(matcher, exclusionMatcher)\n\n\t\tscanner = new(snifffakes.FakeScanner)\n\t\tscanner.ScanStub = func(lager.Logger) bool {\n\t\t\treturn scanner.ScanCallCount() < 4\n\t\t}\n\t\texpectedLine = &scanners.Line{\n\t\t\tPath: \"some-path\",\n\t\t\tLineNumber: 42,\n\t\t\tContent: \"some-content\",\n\t\t}\n\t\tscanner.LineReturns(expectedLine)\n\t})\n\n\tDescribe(\"Sniff\", func() {\n\t\tIt(\"calls the exclusion matcher with each line\", func() {\n\t\t\tsniffer.Sniff(logger, scanner, func(scanners.Line) error {\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tExpect(exclusionMatcher.MatchCallCount()).To(Equal(3))\n\t\t})\n\n\t\tIt(\"calls the regular matcher with each line\", func() {\n\t\t\tsniffer.Sniff(logger, scanner, func(scanners.Line) error {\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tExpect(matcher.MatchCallCount()).To(Equal(3))\n\t\t})\n\n\t\tContext(\"when the exclusion matcher returns true\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\texclusionMatcher.MatchReturns(true)\n\t\t\t})\n\n\t\t\tIt(\"does not call the regular matcher\", func() {\n\t\t\t\tsniffer.Sniff(logger, scanner, func(scanners.Line) error {\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\t\tExpect(matcher.MatchCallCount()).To(BeZero())\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the regular matcher returns true\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tmatcher.MatchStub = func(string) bool {\n\t\t\t\t\treturn matcher.MatchCallCount() != 1 \/\/ 2 should match\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tIt(\"calls the callback with the line\", func() {\n\t\t\t\tvar actualLine *scanners.Line\n\t\t\t\tcallback := func(line scanners.Line) error {\n\t\t\t\t\tactualLine = &line\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tsniffer.Sniff(logger, scanner, callback)\n\t\t\t\tExpect(actualLine).To(Equal(expectedLine))\n\t\t\t})\n\n\t\t\tContext(\"when the callback returns an error\", func() {\n\t\t\t\tvar (\n\t\t\t\t\tcallCount int\n\t\t\t\t\tcallback func(scanners.Line) error\n\t\t\t\t)\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tcallCount = 0\n\n\t\t\t\t\tcallback = func(line scanners.Line) error {\n\t\t\t\t\t\tcallCount++\n\t\t\t\t\t\treturn errors.New(\"tragedy\")\n\t\t\t\t\t}\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an error\", func() {\n\t\t\t\t\terr := sniffer.Sniff(logger, scanner, callback)\n\t\t\t\t\tExpect(err).To(HaveOccurred())\n\t\t\t\t})\n\n\t\t\t\tIt(\"calls the exclusion matcher with each line\", func() {\n\t\t\t\t\tsniffer.Sniff(logger, scanner, callback)\n\t\t\t\t\tExpect(exclusionMatcher.MatchCallCount()).To(Equal(3))\n\t\t\t\t})\n\n\t\t\t\tIt(\"calls the regular matcher with each line\", func() {\n\t\t\t\t\tsniffer.Sniff(logger, scanner, callback)\n\t\t\t\t\tExpect(matcher.MatchCallCount()).To(Equal(3))\n\t\t\t\t})\n\n\t\t\t\tIt(\"calls the callback for each line that matches\", func() {\n\t\t\t\t\tsniffer.Sniff(logger, scanner, callback)\n\t\t\t\t\tExpect(callCount).To(Equal(2))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n\n\tDescribe(\"DefaultSniffer\", func() {\n\t\tvar lines []string\n\t\tvar sniffer sniff.Sniffer\n\n\t\tBeforeEach(func() {\n\t\t\tlines = strings.Split(fixtures.Credentials, \"\\n\")\n\t\t\tsniffer = sniff.NewDefaultSniffer()\n\t\t})\n\n\t\tIt(\"matches all positive examples\", func() {\n\t\t\tvar expectations []string\n\t\t\tvar actuals []string\n\n\t\t\tfor _, line := range lines {\n\t\t\t\tscanner.ScanReturns(true)\n\n\t\t\t\tif strings.Contains(line, \"should_match\") {\n\t\t\t\t\texpectations = append(expectations, line)\n\t\t\t\t}\n\n\t\t\t\tscanner.LineStub = func() *scanners.Line {\n\t\t\t\t\tscanner.ScanReturns(false)\n\n\t\t\t\t\treturn &scanners.Line{\n\t\t\t\t\t\tContent: line,\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tsniffer.Sniff(logger, scanner, func(line scanners.Line) error {\n\t\t\t\t\tactuals = append(actuals, line.Content)\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tfor _, expectation := range expectations {\n\t\t\t\tExpect(actuals).To(ContainElement(expectation))\n\t\t\t}\n\t\t})\n\t})\n})\n<commit_msg>make interim sniff feature test pending<commit_after>package sniff_test\n\nimport (\n\t\"errors\"\n\t\"strings\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"github.com\/pivotal-golang\/lager\"\n\t\"github.com\/pivotal-golang\/lager\/lagertest\"\n\n\t\"cred-alert\/scanners\"\n\t\"cred-alert\/sniff\"\n\t\"cred-alert\/sniff\/fixtures\"\n\t\"cred-alert\/sniff\/matchers\/matchersfakes\"\n\t\"cred-alert\/sniff\/snifffakes\"\n)\n\nvar _ = Describe(\"Sniffer\", func() {\n\tvar (\n\t\tlogger *lagertest.TestLogger\n\t\tmatcher *matchersfakes.FakeMatcher\n\t\texclusionMatcher *matchersfakes.FakeMatcher\n\t\tscanner *snifffakes.FakeScanner\n\t\texpectedLine *scanners.Line\n\n\t\tsniffer sniff.Sniffer\n\t)\n\n\tBeforeEach(func() {\n\t\tlogger = lagertest.NewTestLogger(\"scanner\")\n\t\tmatcher = new(matchersfakes.FakeMatcher)\n\t\texclusionMatcher = new(matchersfakes.FakeMatcher)\n\t\tsniffer = sniff.NewSniffer(matcher, exclusionMatcher)\n\n\t\tscanner = new(snifffakes.FakeScanner)\n\t\tscanner.ScanStub = func(lager.Logger) bool {\n\t\t\treturn scanner.ScanCallCount() < 4\n\t\t}\n\t\texpectedLine = &scanners.Line{\n\t\t\tPath: \"some-path\",\n\t\t\tLineNumber: 42,\n\t\t\tContent: \"some-content\",\n\t\t}\n\t\tscanner.LineReturns(expectedLine)\n\t})\n\n\tDescribe(\"Sniff\", func() {\n\t\tIt(\"calls the exclusion matcher with each line\", func() {\n\t\t\tsniffer.Sniff(logger, scanner, func(scanners.Line) error {\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tExpect(exclusionMatcher.MatchCallCount()).To(Equal(3))\n\t\t})\n\n\t\tIt(\"calls the regular matcher with each line\", func() {\n\t\t\tsniffer.Sniff(logger, scanner, func(scanners.Line) error {\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tExpect(matcher.MatchCallCount()).To(Equal(3))\n\t\t})\n\n\t\tContext(\"when the exclusion matcher returns true\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\texclusionMatcher.MatchReturns(true)\n\t\t\t})\n\n\t\t\tIt(\"does not call the regular matcher\", func() {\n\t\t\t\tsniffer.Sniff(logger, scanner, func(scanners.Line) error {\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\t\tExpect(matcher.MatchCallCount()).To(BeZero())\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the regular matcher returns true\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tmatcher.MatchStub = func(string) bool {\n\t\t\t\t\treturn matcher.MatchCallCount() != 1 \/\/ 2 should match\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tIt(\"calls the callback with the line\", func() {\n\t\t\t\tvar actualLine *scanners.Line\n\t\t\t\tcallback := func(line scanners.Line) error {\n\t\t\t\t\tactualLine = &line\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tsniffer.Sniff(logger, scanner, callback)\n\t\t\t\tExpect(actualLine).To(Equal(expectedLine))\n\t\t\t})\n\n\t\t\tContext(\"when the callback returns an error\", func() {\n\t\t\t\tvar (\n\t\t\t\t\tcallCount int\n\t\t\t\t\tcallback func(scanners.Line) error\n\t\t\t\t)\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tcallCount = 0\n\n\t\t\t\t\tcallback = func(line scanners.Line) error {\n\t\t\t\t\t\tcallCount++\n\t\t\t\t\t\treturn errors.New(\"tragedy\")\n\t\t\t\t\t}\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an error\", func() {\n\t\t\t\t\terr := sniffer.Sniff(logger, scanner, callback)\n\t\t\t\t\tExpect(err).To(HaveOccurred())\n\t\t\t\t})\n\n\t\t\t\tIt(\"calls the exclusion matcher with each line\", func() {\n\t\t\t\t\tsniffer.Sniff(logger, scanner, callback)\n\t\t\t\t\tExpect(exclusionMatcher.MatchCallCount()).To(Equal(3))\n\t\t\t\t})\n\n\t\t\t\tIt(\"calls the regular matcher with each line\", func() {\n\t\t\t\t\tsniffer.Sniff(logger, scanner, callback)\n\t\t\t\t\tExpect(matcher.MatchCallCount()).To(Equal(3))\n\t\t\t\t})\n\n\t\t\t\tIt(\"calls the callback for each line that matches\", func() {\n\t\t\t\t\tsniffer.Sniff(logger, scanner, callback)\n\t\t\t\t\tExpect(callCount).To(Equal(2))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n\n\tDescribe(\"DefaultSniffer\", func() {\n\t\tvar lines []string\n\t\tvar sniffer sniff.Sniffer\n\n\t\tBeforeEach(func() {\n\t\t\tlines = strings.Split(fixtures.Credentials, \"\\n\")\n\t\t\tsniffer = sniff.NewDefaultSniffer()\n\t\t})\n\n\t\tXIt(\"matches all positive examples\", func() {\n\t\t\tvar expectations []string\n\t\t\tvar actuals []string\n\n\t\t\tfor _, line := range lines {\n\t\t\t\tscanner.ScanReturns(true)\n\n\t\t\t\tif strings.Contains(line, \"should_match\") {\n\t\t\t\t\texpectations = append(expectations, line)\n\t\t\t\t}\n\n\t\t\t\tscanner.LineStub = func() *scanners.Line {\n\t\t\t\t\tscanner.ScanReturns(false)\n\n\t\t\t\t\treturn &scanners.Line{\n\t\t\t\t\t\tContent: line,\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tsniffer.Sniff(logger, scanner, func(line scanners.Line) error {\n\t\t\t\t\tactuals = append(actuals, line.Content)\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tfor _, expectation := range expectations {\n\t\t\t\tExpect(actuals).To(ContainElement(expectation))\n\t\t\t}\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"os\"\n\t\"time\"\n)\n\n\/\/ RunConfig contains configuration for running an instance from a source\n\/\/ AMI and details on how to access that launched image.\ntype RunConfig struct {\n\tSourceAmi string `mapstructure:\"source_ami\"`\n\tIamInstanceProfile string `mapstructure:\"iam_instance_profile\"`\n\tInstanceType string `mapstructure:\"instance_type\"`\n\tUserData string `mapstructure:\"user_data\"`\n\tUserDataFile string `mapstructure:\"user_data_file\"`\n\tRawSSHTimeout string `mapstructure:\"ssh_timeout\"`\n\tSSHUsername string `mapstructure:\"ssh_username\"`\n\tSSHPort int `mapstructure:\"ssh_port\"`\n\tSecurityGroupId string `mapstructure:\"security_group_id\"`\n\tSecurityGroupIds []string `mapstructure:\"security_group_ids\"`\n\tSubnetId string `mapstructure:\"subnet_id\"`\n\tAssociatePublicIpAddress bool `mapstructure:\"associate_public_ip_address\"`\n\tTemporaryKeyPairName string `mapstructure:\"temporary_key_pair_name\"`\n\tVpcId string `mapstructure:\"vpc_id\"`\n\tAvailabilityZone string `mapstructure:\"availability_zone\"`\n\n\t\/\/ Unexported fields that are calculated from others\n\tsshTimeout time.Duration\n}\n\nfunc (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error {\n\tif t == nil {\n\t\tvar err error\n\t\tt, err = packer.NewConfigTemplate()\n\t\tif err != nil {\n\t\t\treturn []error{err}\n\t\t}\n\t}\n\n\t\/\/ Defaults\n\tif c.SSHPort == 0 {\n\t\tc.SSHPort = 22\n\t}\n\n\tif c.RawSSHTimeout == \"\" {\n\t\tc.RawSSHTimeout = \"1m\"\n\t}\n\n\tif c.TemporaryKeyPairName == \"\" {\n\t\tc.TemporaryKeyPairName = \"packer {{uuid}}\"\n\t}\n\n\t\/\/ Validation\n\tvar err error\n\terrs := make([]error, 0)\n\tif c.SourceAmi == \"\" {\n\t\terrs = append(errs, errors.New(\"A source_ami must be specified\"))\n\t}\n\n\tif c.InstanceType == \"\" {\n\t\terrs = append(errs, errors.New(\"An instance_type must be specified\"))\n\t}\n\n\tif c.SSHUsername == \"\" {\n\t\terrs = append(errs, errors.New(\"An ssh_username must be specified\"))\n\t}\n\n\tif c.UserData != \"\" && c.UserDataFile != \"\" {\n\t\terrs = append(errs, fmt.Errorf(\"Only one of user_data or user_data_file can be specified.\"))\n\t} else if c.UserDataFile != \"\" {\n\t\tif _, err := os.Stat(c.UserDataFile); err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\"user_data_file not found: %s\", c.UserDataFile))\n\t\t}\n\t}\n\n\tif c.SecurityGroupId != \"\" {\n\t\tif len(c.SecurityGroupIds) > 0 {\n\t\t\terrs = append(errs, fmt.Errorf(\"Only one of security_group_id or security_group_ids can be specified.\"))\n\t\t} else {\n\t\t\tc.SecurityGroupIds = []string{c.SecurityGroupId}\n\t\t\tc.SecurityGroupId = \"\"\n\t\t}\n\t}\n\n\ttemplates := map[string]*string{\n\t\t\"iam_instance_profile\": &c.IamInstanceProfile,\n\t\t\"instance_type\": &c.InstanceType,\n\t\t\"ssh_timeout\": &c.RawSSHTimeout,\n\t\t\"ssh_username\": &c.SSHUsername,\n\t\t\"source_ami\": &c.SourceAmi,\n\t\t\"subnet_id\": &c.SubnetId,\n\t\t\"temporary_key_pair_name\": &c.TemporaryKeyPairName,\n\t\t\"vpc_id\": &c.VpcId,\n\t\t\"availability_zone\": &c.AvailabilityZone,\n\t}\n\n\tfor n, ptr := range templates {\n\t\tvar err error\n\t\t*ptr, err = t.Process(*ptr, nil)\n\t\tif err != nil {\n\t\t\terrs = append(\n\t\t\t\terrs, fmt.Errorf(\"Error processing %s: %s\", n, err))\n\t\t}\n\t}\n\n\tsliceTemplates := map[string][]string{\n\t\t\"security_group_ids\": c.SecurityGroupIds,\n\t}\n\n\tfor n, slice := range sliceTemplates {\n\t\tfor i, elem := range slice {\n\t\t\tvar err error\n\t\t\tslice[i], err = t.Process(elem, nil)\n\t\t\tif err != nil {\n\t\t\t\terrs = append(\n\t\t\t\t\terrs, fmt.Errorf(\"Error processing %s[%d]: %s\", n, i, err))\n\t\t\t}\n\t\t}\n\t}\n\n\tc.sshTimeout, err = time.ParseDuration(c.RawSSHTimeout)\n\tif err != nil {\n\t\terrs = append(errs, fmt.Errorf(\"Failed parsing ssh_timeout: %s\", err))\n\t}\n\n\treturn errs\n}\n\nfunc (c *RunConfig) SSHTimeout() time.Duration {\n\treturn c.sshTimeout\n}\n<commit_msg>builder\/amazon: alphabetize<commit_after>package common\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"os\"\n\t\"time\"\n)\n\n\/\/ RunConfig contains configuration for running an instance from a source\n\/\/ AMI and details on how to access that launched image.\ntype RunConfig struct {\n\tAssociatePublicIpAddress bool `mapstructure:\"associate_public_ip_address\"`\n\tAvailabilityZone string `mapstructure:\"availability_zone\"`\n\tIamInstanceProfile string `mapstructure:\"iam_instance_profile\"`\n\tInstanceType string `mapstructure:\"instance_type\"`\n\tSourceAmi string `mapstructure:\"source_ami\"`\n\tRawSSHTimeout string `mapstructure:\"ssh_timeout\"`\n\tSSHUsername string `mapstructure:\"ssh_username\"`\n\tSSHPort int `mapstructure:\"ssh_port\"`\n\tSecurityGroupId string `mapstructure:\"security_group_id\"`\n\tSecurityGroupIds []string `mapstructure:\"security_group_ids\"`\n\tSubnetId string `mapstructure:\"subnet_id\"`\n\tTemporaryKeyPairName string `mapstructure:\"temporary_key_pair_name\"`\n\tUserData string `mapstructure:\"user_data\"`\n\tUserDataFile string `mapstructure:\"user_data_file\"`\n\tVpcId string `mapstructure:\"vpc_id\"`\n\n\t\/\/ Unexported fields that are calculated from others\n\tsshTimeout time.Duration\n}\n\nfunc (c *RunConfig) Prepare(t *packer.ConfigTemplate) []error {\n\tif t == nil {\n\t\tvar err error\n\t\tt, err = packer.NewConfigTemplate()\n\t\tif err != nil {\n\t\t\treturn []error{err}\n\t\t}\n\t}\n\n\t\/\/ Defaults\n\tif c.SSHPort == 0 {\n\t\tc.SSHPort = 22\n\t}\n\n\tif c.RawSSHTimeout == \"\" {\n\t\tc.RawSSHTimeout = \"1m\"\n\t}\n\n\tif c.TemporaryKeyPairName == \"\" {\n\t\tc.TemporaryKeyPairName = \"packer {{uuid}}\"\n\t}\n\n\t\/\/ Validation\n\tvar err error\n\terrs := make([]error, 0)\n\tif c.SourceAmi == \"\" {\n\t\terrs = append(errs, errors.New(\"A source_ami must be specified\"))\n\t}\n\n\tif c.InstanceType == \"\" {\n\t\terrs = append(errs, errors.New(\"An instance_type must be specified\"))\n\t}\n\n\tif c.SSHUsername == \"\" {\n\t\terrs = append(errs, errors.New(\"An ssh_username must be specified\"))\n\t}\n\n\tif c.UserData != \"\" && c.UserDataFile != \"\" {\n\t\terrs = append(errs, fmt.Errorf(\"Only one of user_data or user_data_file can be specified.\"))\n\t} else if c.UserDataFile != \"\" {\n\t\tif _, err := os.Stat(c.UserDataFile); err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\"user_data_file not found: %s\", c.UserDataFile))\n\t\t}\n\t}\n\n\tif c.SecurityGroupId != \"\" {\n\t\tif len(c.SecurityGroupIds) > 0 {\n\t\t\terrs = append(errs, fmt.Errorf(\"Only one of security_group_id or security_group_ids can be specified.\"))\n\t\t} else {\n\t\t\tc.SecurityGroupIds = []string{c.SecurityGroupId}\n\t\t\tc.SecurityGroupId = \"\"\n\t\t}\n\t}\n\n\ttemplates := map[string]*string{\n\t\t\"iam_instance_profile\": &c.IamInstanceProfile,\n\t\t\"instance_type\": &c.InstanceType,\n\t\t\"ssh_timeout\": &c.RawSSHTimeout,\n\t\t\"ssh_username\": &c.SSHUsername,\n\t\t\"source_ami\": &c.SourceAmi,\n\t\t\"subnet_id\": &c.SubnetId,\n\t\t\"temporary_key_pair_name\": &c.TemporaryKeyPairName,\n\t\t\"vpc_id\": &c.VpcId,\n\t\t\"availability_zone\": &c.AvailabilityZone,\n\t}\n\n\tfor n, ptr := range templates {\n\t\tvar err error\n\t\t*ptr, err = t.Process(*ptr, nil)\n\t\tif err != nil {\n\t\t\terrs = append(\n\t\t\t\terrs, fmt.Errorf(\"Error processing %s: %s\", n, err))\n\t\t}\n\t}\n\n\tsliceTemplates := map[string][]string{\n\t\t\"security_group_ids\": c.SecurityGroupIds,\n\t}\n\n\tfor n, slice := range sliceTemplates {\n\t\tfor i, elem := range slice {\n\t\t\tvar err error\n\t\t\tslice[i], err = t.Process(elem, nil)\n\t\t\tif err != nil {\n\t\t\t\terrs = append(\n\t\t\t\t\terrs, fmt.Errorf(\"Error processing %s[%d]: %s\", n, i, err))\n\t\t\t}\n\t\t}\n\t}\n\n\tc.sshTimeout, err = time.ParseDuration(c.RawSSHTimeout)\n\tif err != nil {\n\t\terrs = append(errs, fmt.Errorf(\"Failed parsing ssh_timeout: %s\", err))\n\t}\n\n\treturn errs\n}\n\nfunc (c *RunConfig) SSHTimeout() time.Duration {\n\treturn c.sshTimeout\n}\n<|endoftext|>"} {"text":"<commit_before>package iso\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/multistep\"\n\tvmwcommon \"github.com\/mitchellh\/packer\/builder\/vmware\/common\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"log\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ This step shuts down the machine. It first attempts to do so gracefully,\n\/\/ but ultimately forcefully shuts it down if that fails.\n\/\/\n\/\/ Uses:\n\/\/ communicator packer.Communicator\n\/\/ config *config\n\/\/ driver Driver\n\/\/ ui packer.Ui\n\/\/ vmx_path string\n\/\/\n\/\/ Produces:\n\/\/ <nothing>\ntype stepShutdown struct{}\n\nfunc (s *stepShutdown) Run(state multistep.StateBag) multistep.StepAction {\n\tcomm := state.Get(\"communicator\").(packer.Communicator)\n\tconfig := state.Get(\"config\").(*config)\n\tdriver := state.Get(\"driver\").(vmwcommon.Driver)\n\tui := state.Get(\"ui\").(packer.Ui)\n\tvmxPath := state.Get(\"vmx_path\").(string)\n\n\tif config.ShutdownCommand != \"\" {\n\t\tui.Say(\"Gracefully halting virtual machine...\")\n\t\tlog.Printf(\"Executing shutdown command: %s\", config.ShutdownCommand)\n\n\t\tvar stdout, stderr bytes.Buffer\n\t\tcmd := &packer.RemoteCmd{\n\t\t\tCommand: config.ShutdownCommand,\n\t\t\tStdout: &stdout,\n\t\t\tStderr: &stderr,\n\t\t}\n\t\tif err := comm.Start(cmd); err != nil {\n\t\t\terr := fmt.Errorf(\"Failed to send shutdown command: %s\", err)\n\t\t\tstate.Put(\"error\", err)\n\t\t\tui.Error(err.Error())\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\t\/\/ Wait for the command to run\n\t\tcmd.Wait()\n\n\t\t\/\/ If the command failed to run, notify the user in some way.\n\t\tif cmd.ExitStatus != 0 {\n\t\t\tstate.Put(\"error\", fmt.Errorf(\n\t\t\t\t\"Shutdown command has non-zero exit status.\\n\\nStdout: %s\\n\\nStderr: %s\",\n\t\t\t\tstdout.String(), stderr.String()))\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\tlog.Printf(\"Shutdown stdout: %s\", stdout.String())\n\t\tlog.Printf(\"Shutdown stderr: %s\", stderr.String())\n\n\t\t\/\/ Wait for the machine to actually shut down\n\t\tlog.Printf(\"Waiting max %s for shutdown to complete\", config.shutdownTimeout)\n\t\tshutdownTimer := time.After(config.shutdownTimeout)\n\t\tfor {\n\t\t\trunning, _ := driver.IsRunning(vmxPath)\n\t\t\tif !running {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase <-shutdownTimer:\n\t\t\t\terr := errors.New(\"Timeout while waiting for machine to shut down.\")\n\t\t\t\tstate.Put(\"error\", err)\n\t\t\t\tui.Error(err.Error())\n\t\t\t\treturn multistep.ActionHalt\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif err := driver.Stop(vmxPath); err != nil {\n\t\t\terr := fmt.Errorf(\"Error stopping VM: %s\", err)\n\t\t\tstate.Put(\"error\", err)\n\t\t\tui.Error(err.Error())\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\t}\n\n\tui.Message(\"Waiting for VMware to clean up after itself...\")\n\tlockPattern := filepath.Join(config.OutputDir, \"*.lck\")\n\ttimer := time.After(15 * time.Second)\nLockWaitLoop:\n\tfor {\n\t\tlocks, err := filepath.Glob(lockPattern)\n\t\tif err == nil {\n\t\t\tif len(locks) == 0 {\n\t\t\t\tlog.Println(\"No more lock files found. VMware is clean.\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif len(locks) == 1 && strings.HasSuffix(locks[0], \".vmx.lck\") {\n\t\t\t\tlog.Println(\"Only waiting on VMX lock. VMware is clean.\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tlog.Printf(\"Waiting on lock files: %#v\", locks)\n\t\t}\n\n\t\tselect {\n\t\tcase <-timer:\n\t\t\tlog.Println(\"Reached timeout on waiting for clean VMware. Assuming clean.\")\n\t\t\tbreak LockWaitLoop\n\t\tcase <-time.After(1 * time.Second):\n\t\t}\n\t}\n\n\tif runtime.GOOS == \"windows\" {\n\t\t\/\/ Windows takes a while to yield control of the files when the\n\t\t\/\/ process is exiting. We just sleep here. In the future, it'd be\n\t\t\/\/ nice to find a better solution to this.\n\t\ttime.Sleep(5 * time.Second)\n\t}\n\n\tlog.Println(\"VM shut down.\")\n\treturn multistep.ActionContinue\n}\n\nfunc (s *stepShutdown) Cleanup(state multistep.StateBag) {}\n<commit_msg>builder\/vmware\/iso: convert stepShutdown to use OutputDir for cleanup<commit_after>package iso\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/multistep\"\n\tvmwcommon \"github.com\/mitchellh\/packer\/builder\/vmware\/common\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"log\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ This step shuts down the machine. It first attempts to do so gracefully,\n\/\/ but ultimately forcefully shuts it down if that fails.\n\/\/\n\/\/ Uses:\n\/\/ communicator packer.Communicator\n\/\/ config *config\n\/\/ driver Driver\n\/\/ ui packer.Ui\n\/\/ vmx_path string\n\/\/\n\/\/ Produces:\n\/\/ <nothing>\ntype stepShutdown struct{}\n\nfunc (s *stepShutdown) Run(state multistep.StateBag) multistep.StepAction {\n\tcomm := state.Get(\"communicator\").(packer.Communicator)\n\tconfig := state.Get(\"config\").(*config)\n\tdir := state.Get(\"dir\").(vmwcommon.OutputDir)\n\tdriver := state.Get(\"driver\").(vmwcommon.Driver)\n\tui := state.Get(\"ui\").(packer.Ui)\n\tvmxPath := state.Get(\"vmx_path\").(string)\n\n\tif config.ShutdownCommand != \"\" {\n\t\tui.Say(\"Gracefully halting virtual machine...\")\n\t\tlog.Printf(\"Executing shutdown command: %s\", config.ShutdownCommand)\n\n\t\tvar stdout, stderr bytes.Buffer\n\t\tcmd := &packer.RemoteCmd{\n\t\t\tCommand: config.ShutdownCommand,\n\t\t\tStdout: &stdout,\n\t\t\tStderr: &stderr,\n\t\t}\n\t\tif err := comm.Start(cmd); err != nil {\n\t\t\terr := fmt.Errorf(\"Failed to send shutdown command: %s\", err)\n\t\t\tstate.Put(\"error\", err)\n\t\t\tui.Error(err.Error())\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\t\/\/ Wait for the command to run\n\t\tcmd.Wait()\n\n\t\t\/\/ If the command failed to run, notify the user in some way.\n\t\tif cmd.ExitStatus != 0 {\n\t\t\tstate.Put(\"error\", fmt.Errorf(\n\t\t\t\t\"Shutdown command has non-zero exit status.\\n\\nStdout: %s\\n\\nStderr: %s\",\n\t\t\t\tstdout.String(), stderr.String()))\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\tlog.Printf(\"Shutdown stdout: %s\", stdout.String())\n\t\tlog.Printf(\"Shutdown stderr: %s\", stderr.String())\n\n\t\t\/\/ Wait for the machine to actually shut down\n\t\tlog.Printf(\"Waiting max %s for shutdown to complete\", config.shutdownTimeout)\n\t\tshutdownTimer := time.After(config.shutdownTimeout)\n\t\tfor {\n\t\t\trunning, _ := driver.IsRunning(vmxPath)\n\t\t\tif !running {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase <-shutdownTimer:\n\t\t\t\terr := errors.New(\"Timeout while waiting for machine to shut down.\")\n\t\t\t\tstate.Put(\"error\", err)\n\t\t\t\tui.Error(err.Error())\n\t\t\t\treturn multistep.ActionHalt\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif err := driver.Stop(vmxPath); err != nil {\n\t\t\terr := fmt.Errorf(\"Error stopping VM: %s\", err)\n\t\t\tstate.Put(\"error\", err)\n\t\t\tui.Error(err.Error())\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\t}\n\n\tui.Message(\"Waiting for VMware to clean up after itself...\")\n\tlockRegex := regexp.MustCompile(`(?i)\\.lck$`)\n\ttimer := time.After(15 * time.Second)\nLockWaitLoop:\n\tfor {\n\t\tfiles, err := dir.ListFiles()\n\t\tif err == nil {\n\t\t\tvar locks []string\n\t\t\tfor _, file := range files {\n\t\t\t\tif lockRegex.MatchString(file) {\n\t\t\t\t\tlocks = append(locks, file)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(locks) == 0 {\n\t\t\t\tlog.Println(\"No more lock files found. VMware is clean.\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif len(locks) == 1 && strings.HasSuffix(locks[0], \".vmx.lck\") {\n\t\t\t\tlog.Println(\"Only waiting on VMX lock. VMware is clean.\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tlog.Printf(\"Waiting on lock files: %#v\", locks)\n\t\t}\n\n\t\tselect {\n\t\tcase <-timer:\n\t\t\tlog.Println(\"Reached timeout on waiting for clean VMware. Assuming clean.\")\n\t\t\tbreak LockWaitLoop\n\t\tcase <-time.After(1 * time.Second):\n\t\t}\n\t}\n\n\tif runtime.GOOS == \"windows\" {\n\t\t\/\/ Windows takes a while to yield control of the files when the\n\t\t\/\/ process is exiting. We just sleep here. In the future, it'd be\n\t\t\/\/ nice to find a better solution to this.\n\t\ttime.Sleep(5 * time.Second)\n\t}\n\n\tlog.Println(\"VM shut down.\")\n\treturn multistep.ActionContinue\n}\n\nfunc (s *stepShutdown) Cleanup(state multistep.StateBag) {}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2021 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\npackage cmdcreate\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"arete\/pkg\/utils\"\n\n\t\"github.com\/manifoldco\/promptui\"\n\t\"github.com\/rs\/zerolog\/log\"\n\t\"github.com\/spf13\/viper\"\n\t\"gopkg.in\/yaml.v3\"\n)\n\nvar currentRegions = []string{\"us-central1\", \"us-east1\", \"northamerica-northeast1\",\t\"northamerica-northeast2\",\t \"europe-north1\",\t\"europe-west1\",\t\"europe-west3\",\t\"australia-southeast1\",\t\"australia-southeast2\",\t\"asia-northeast1\",\t\"asia-northeast2\"}\n\ntype createSteps struct {\n\tSteps []step `yaml:\"steps\"`\n}\n\ntype step struct {\n\tStep string `yaml:\"step\"`\n}\n\n\/\/ Does the step exist in the .create yaml file\nfunc (cs *createSteps) stepExists(step string) bool {\n\tfor _, stp := range cs.Steps {\n\t\tif stp.Step == step {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/ createCmd represents the create command\nfunc CmdcreateRun(instanceName string, region string, project string, billing string) {\n\tregionFound := false\n\tregionMsg := \"Unsupported region. Please choose one of: \\n\"\n\n\tfor i := 0; i < len(currentRegions); i++ {\n\t\tregionMsg += \" - \" + currentRegions[i] + \"\\n\"\n\t\tif region == currentRegions[i] {\n\t\t\tregionFound = true\n\t\t}\n\t}\n\n\tif !regionFound {\n\t\tlog.Fatal().Msg(regionMsg)\n\t}\n\n\t\/\/ check to see if the .create yaml file exists, if not create it\n\t\/\/ The .create file keeps track of steps that have already been completed for repeated calls to the create function.\n\tcreateStepsFile := filepath.Join(viper.GetString(\"cache\"), \".create\")\n\tcreateSteps := createSteps{}\n\n\tif _, err := os.Stat(createStepsFile); err != nil {\n\t\tcreateYaml := \"steps:\\n\"\n\n\t\tutils.WriteToCache(&createYaml, \".create\", false)\n\t} else {\n\t\tstepsFileContents, err := os.ReadFile(createStepsFile)\n\n\t\tif err == nil {\n\t\t\tyaml.Unmarshal(stepsFileContents, &createSteps)\n\t\t}\n\t}\n\n\t\/\/ project flag not provided to create a random project name based on the cluster name and random string\n\tif project == \"\" {\n\t\tproject = instanceName + \"-\" + utils.RandomString(5)\n\t\tlog.Info().Msgf(\"Project name will be set to: %s\", project)\n\t}\n\n\t\/\/ Test if project exists\n\trtr, err := utils.CallCommand(utils.Gcloud, []string{\"projects\", \"list\", \"--filter=projectId:\" + project , \"--format=\\\"yaml\\\"\"}, false)\n\n\tif err != nil {\n\t\tlog.Fatal().Err(err).Msg(\"\")\n\t}\n\n\t\/\/ If no project exists (or the user doesn't have access to it) let's create one\n\tif len(string(rtr)) == 0 {\n\t\t\/\/ billing flag not used, pull billing accounts and prompt the user to select one\n\t\tif billing == \"\" {\n\t\t\tbilling, err = selectGcloudPrompt([]string{\"alpha\", \"billing\", \"accounts\", \"list\", \"--format=value[separator=' - '](NAME, ACCOUNT_ID)\"}, \"billing account\")\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal().Err(err).Msg(\"Unable to generate billing prompt\")\n\t\t\t}\n\t\t}\n\n\t\terr := createProject(project, billing)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(\"\")\n\t\t}\n\n\t\tcmdArgs := []string{\"beta\", \"billing\", \"projects\", \"link\", project, \"--billing-account\", billing}\n\n\t\tbillRes, err := utils.CallCommand(utils.Gcloud, cmdArgs, false)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(\"Unable to assign billing account to project: \" + string(billRes))\n\t\t}\n\t}\n\n\t\/\/ If the GCP services have not already been enabled by the cli then let's do it.\n\tif !createSteps.stepExists(\"services\") {\n\t\tlog.Info().Msg(\"Enabling required services...\")\n\n\t\tcmdArgs := []string{\"services\", \"enable\", \"krmapihosting.googleapis.com\", \"container.googleapis.com\", \"cloudresourcemanager.googleapis.com\", \"cloudbilling.googleapis.com\", \"--project=\" + project}\n\n\t\t_, err := utils.CallCommand(utils.Gcloud, cmdArgs, true)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(\"\")\n\t\t}\n\n\t\tsaveStep(\"services\")\n\t}\n\n\tnetworkConfig := map[string]interface{}{\"name\": \"kcc-controller\", \"subnet-name\": \"kcc-regional-subnet\", \"cidr\": \"192.168.0.0\/16\"}\n\n\tif viper.InConfig(\"network\") {\n\t\tnetworkConfig = viper.GetStringMap(\"network\")\n\t}\n\n\t\/\/ If the VPC has not already been created by the cli then let's do it\n\tif !createSteps.stepExists(\"network\") {\n\t\t\/\/ BUT first let's check to make sure that the network with the same name doesn't already exist\n\t\tcmdArgs := []string{\"compute\", \"networks\", \"list\", \"--project=\" + project, \"--filter=name:\" + networkConfig[\"name\"].(string), \"--format=\\\"yaml\\\"\"}\n\n\t\tret, err := utils.CallCommand(utils.Gcloud, cmdArgs, false)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(string(ret))\n\t\t}\n\n\t\tif len(string(ret)) == 0 {\n\t\t\tcmdArgs = []string{\"compute\", \"networks\", \"create\", networkConfig[\"name\"].(string), \"--project=\" + project, \"--subnet-mode=custom\"}\n\n\t\t\tlog.Info().Msg(\"Creating Network...\")\n\n\t\t\tret, err = utils.CallCommand(utils.Gcloud, cmdArgs, false)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal().Err(err).Msg(string(ret))\n\t\t\t}\n\n\t\t\tsaveStep(\"network\")\n\t\t} else {\n\t\t\tsaveStep(\"network\")\n\t\t}\n\t}\n\n\t\/\/ If the subnet on the VPC has not already been created by the cli then let's do it\n\tif !createSteps.stepExists(\"subnet\") {\n\t\t\/\/ BUT first let's check to make sure that a subnet with the same name doesn't already exist\n\t\tcmdArgs := []string{\"compute\", \"networks\", \"subnets\", \"list\", \"--project=\" + project, \"--filter=name:\" + networkConfig[\"subnet-name\"].(string), \"--format=\\\"yaml\\\"\"}\n\n\t\tret, err := utils.CallCommand(utils.Gcloud, cmdArgs, false)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(string(ret))\n\t\t}\n\n\t\tif len(string(ret)) == 0 || strings.Contains(strings.ToLower(string(ret)), \"warning\") {\n\t\t\tcmdArgs = []string{\"compute\", \"networks\", \"subnets\", \"create\", networkConfig[\"subnet-name\"].(string), \"--network=\" + networkConfig[\"name\"].(string), \"--range=\" + networkConfig[\"cidr\"].(string), \"--enable-private-ip-google-access\", \"--region=\" + region, \"--project=\" + project}\n\n\t\t\tlog.Info().Msg(\"Creating subnet....\")\n\n\t\t\tret, err = utils.CallCommand(utils.Gcloud, cmdArgs, false)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal().Err(err).Msg(string(ret))\n\t\t\t}\n\n\t\t\tsaveStep(\"subnet\")\n\t\t} else { \n\t\t\tsaveStep(\"subnet\")\n\t\t}\n\t}\n\n\t\/\/ If config controller hasn't already been setup by the cli then let's do it\n\tif !createSteps.stepExists(\"config-controller\") {\n\t\tcmdArgs := []string{\"anthos\", \"config\", \"controller\", \"create\", instanceName, \"--location=\" + region, \"--network=\" + networkConfig[\"name\"].(string), \"--subnet=\" + networkConfig[\"subnet-name\"].(string), \"--project=\" + project}\n\n\t\tlog.Info().Msg(\"Creating Config Controller Cluster....\")\n\n\t\t_, err := utils.CallCommand(utils.Gcloud, cmdArgs, true)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(\"\")\n\t\t}\n\n\t\tsaveStep(\"config-controller\")\n\t}\n\n\t\/\/ Adding service account to the owners role\n\tif !createSteps.stepExists(\"add-policy\") {\n\t\tcmdArgs := []string{\"container\", \"clusters\", \"get-credentials\", \"krmapihost-\" + instanceName, \"--region=\" + region, \"--project=\" + project}\n\n\t\t_, err := utils.CallCommand(utils.Gcloud, cmdArgs, false)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(\"Unable to get configconnectorcontext\")\n\t\t}\n\n\t\tcmdArgs = []string{\"get\", \"ConfigConnectorContext\", \"-n\", \"config-control\", \"-o\", \"jsonpath='{.items[0].spec.googleServiceAccount}'\"}\n\n\t\tret, err := utils.CallCommand(utils.Kubectl, cmdArgs, false)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(\"Unable to get configconnectorcontext\")\n\t\t}\n\n\t\tsa := strings.Replace(strings.Split(strings.Split(string(ret), \"@\")[0], \"'\")[1] + \"@\" + strings.Split(string(ret), \"@\")[1], \"'\", \"\",1)\n\n\t\tcmdArgs = []string{\"projects\", \"add-iam-policy-binding\", project, `--member=serviceAccount:` + sa, `--role=roles\/owner`, `--condition=None`}\n\n\t\tlog.Info().Msg(\"Add SA to roles\/owner role...\")\n\t\tret, err = utils.CallCommand(utils.Gcloud, cmdArgs, false)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(\"Unable to add roles\/owner to \" + sa + string(ret))\n\t\t}\n\n\t\tsaveStep(\"add-policy\")\n\t}\n\n\tlog.Info().Msg(\"Config Controller setup complete\")\n}\n\n\/\/ Create a project at either the org or at a folder level.\nfunc createProject(project string, billing string) error {\n\tvar folderId string\n\n\torgId, err := selectGcloudPrompt([]string{\"organizations\", \"list\", \"--format=value[separator=' - '](DISPLAY_NAME, ID)\"}, \"organization ID\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprompt := promptui.Select{\n\t\tLabel: \"Would you like the project to be created at the Org level or in a folder\",\n\t\tItems: []string{\"Organization Level\", \"Folder Level\"},\n\t}\n\n\t_, result, err := prompt.Run()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmdArgs := []string{\"projects\", \"create\", project, \"--set-as-default\", \"--labels=created-with-arete=true\"}\n\n\tif result == \"Folder Level\" {\n\t\tfolderId, err = selectGcloudPrompt([]string{\"resource-manager\", \"folders\", \"list\", \"--organization=\"+orgId, \"--format=value[separator=' - '](DISPLAY_NAME, ID)\"}, \"folder\")\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcmdArgs = append(cmdArgs, \"--folder=\"+folderId)\n\t} else {\n\t\tcmdArgs = append(cmdArgs, \"--organization=\"+orgId)\n\t}\n\n\t_, err = utils.CallCommand(utils.Gcloud, cmdArgs, true)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Generic select prompt from a gcloud list command\nfunc selectGcloudPrompt(args []string, label string) (string, error) {\n\tvar opts []string\n\n\tif viper.GetBool(\"verbose\") {\n\t\tlog.Debug().Msg(\"Calling gcloud \"+label+\" list\")\n\t}\n\n\tout, err := utils.CallCommand(utils.Gcloud, args, false)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsp := strings.Split(string(out), \"\\n\")\n\tsp = sp[:len(sp)-1] \/\/ pop off the last element as it's a blank newline element\n\n\tfor i := 0; i < len(sp); i++ {\n\t\topts = append(opts, sp[i])\n\t}\n\n\tprompt := promptui.Select{\n\t\tLabel: \"Choose a \" + label,\n\t\tItems: opts,\n\t}\n\n\t_, result, err := prompt.Run()\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn result[strings.Index(result, \" - \")+3:], nil\n}\n\n\/\/ Save a step to the cache .create file for future tracking\nfunc saveStep(step string) error {\n\tstep = \"- step: \" + step + \"\\n\"\n\n\tif err := utils.WriteToCache(&step, \".create\", true); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>FIXED: create command not tracking multiple config controller creates<commit_after>\/\/ Copyright 2021 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\npackage cmdcreate\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"arete\/pkg\/utils\"\n\n\t\"github.com\/manifoldco\/promptui\"\n\t\"github.com\/rs\/zerolog\/log\"\n\t\"github.com\/spf13\/viper\"\n\t\"gopkg.in\/yaml.v3\"\n)\n\nvar currentRegions = []string{\"us-central1\", \"us-east1\", \"northamerica-northeast1\",\t\"northamerica-northeast2\",\t \"europe-north1\",\t\"europe-west1\",\t\"europe-west3\",\t\"australia-southeast1\",\t\"australia-southeast2\",\t\"asia-northeast1\",\t\"asia-northeast2\"}\n\ntype createSteps struct {\n\tClusters []clusters `yaml:\"clusters\"`\n}\n\ntype clusters struct{\n\tCluster string `yaml:\"cluster\"`\n\tSteps []string `yaml:\"\"`\n}\n\nfunc (cs *createSteps) clusterExists(cluster string) bool {\n\tfor _, cls := range cs.Clusters {\n\t\tif cls.Cluster == cluster {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/ Does the step exist in the .create yaml file\nfunc (cs *createSteps) stepExists(cluster string, step string) bool {\n\tfor _, cls := range cs.Clusters {\n\t\tif cls.Cluster == cluster {\n\t\t\tfor _, stp := range cls.Steps {\n\t\t\t\tif stp == step {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn false;\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/ createCmd represents the create command\nfunc CmdcreateRun(instanceName string, region string, project string, billing string) {\n\tregionFound := false\n\tregionMsg := \"Unsupported region. Please choose one of: \\n\"\n\n\tfor i := 0; i < len(currentRegions); i++ {\n\t\tregionMsg += \" - \" + currentRegions[i] + \"\\n\"\n\t\tif region == currentRegions[i] {\n\t\t\tregionFound = true\n\t\t}\n\t}\n\n\tif !regionFound {\n\t\tlog.Fatal().Msg(regionMsg)\n\t}\n\n\t\/\/ project flag not provided to create a random project name based on the cluster name and random string\n\tif project == \"\" {\n\t\tproject = instanceName + \"-\" + utils.RandomString(5)\n\t\tlog.Info().Msgf(\"Project name will be set to: %s\", project)\n\t}\n\n\t\/\/ check to see if the .create yaml file exists, if not create it\n\t\/\/ The .create file keeps track of steps that have already been completed for repeated calls to the create function.\n\tcreateStepsFile := filepath.Join(viper.GetString(\"cache\"), \".create\")\n\tcreateSteps := createSteps{}\n\n\tif _, err := os.Stat(createStepsFile); err != nil {\n\t\tcreateYaml := \"clusters:\\n - cluster: \" + instanceName + \"\\n steps:\\n\"\n\n\t\tutils.WriteToCache(&createYaml, \".create\", false)\n\t} else {\n\t\tstepsFileContents, err := os.ReadFile(createStepsFile)\n\n\t\tif err == nil {\n\t\t\tyaml.Unmarshal(stepsFileContents, &createSteps)\n\t\t}\n\n\t\tif(!createSteps.clusterExists(instanceName)) {\n\t\t\tcreateYaml := \" - cluster: \" + instanceName + \"\\n steps:\\n\"\n\n\t\t\tif err := utils.WriteToCache(&createYaml, \".create\", true); err != nil {\n\t\t\t\tlog.Fatal().Err(err).Msg(\"\")\n\t\t\t}\n\n\t\t\tstepsFileContents, err := os.ReadFile(createStepsFile)\n\n\t\t\tif err == nil {\n\t\t\t\tyaml.Unmarshal(stepsFileContents, &createSteps)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Test if project exists\n\trtr, err := utils.CallCommand(utils.Gcloud, []string{\"projects\", \"list\", \"--filter=projectId:\" + project , \"--format=\\\"yaml\\\"\"}, false)\n\n\tif err != nil {\n\t\tlog.Fatal().Err(err).Msg(\"\")\n\t}\n\n\t\/\/ If no project exists (or the user doesn't have access to it) let's create one\n\tif len(string(rtr)) == 0 {\n\t\t\/\/ billing flag not used, pull billing accounts and prompt the user to select one\n\t\tif billing == \"\" {\n\t\t\tbilling, err = selectGcloudPrompt([]string{\"alpha\", \"billing\", \"accounts\", \"list\", \"--format=value[separator=' - '](NAME, ACCOUNT_ID)\"}, \"billing account\")\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal().Err(err).Msg(\"Unable to generate billing prompt\")\n\t\t\t}\n\t\t}\n\n\t\terr := createProject(project, billing)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(\"\")\n\t\t}\n\n\t\tcmdArgs := []string{\"beta\", \"billing\", \"projects\", \"link\", project, \"--billing-account\", billing}\n\n\t\tbillRes, err := utils.CallCommand(utils.Gcloud, cmdArgs, false)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(\"Unable to assign billing account to project: \" + string(billRes))\n\t\t}\n\t}\n\n\t\/\/ If the GCP services have not already been enabled by the cli then let's do it.\n\tif !createSteps.stepExists(instanceName, \"services\") {\n\t\tlog.Info().Msg(\"Enabling required services...\")\n\n\t\tcmdArgs := []string{\"services\", \"enable\", \"krmapihosting.googleapis.com\", \"container.googleapis.com\", \"cloudresourcemanager.googleapis.com\", \"cloudbilling.googleapis.com\", \"--project=\" + project}\n\n\t\t_, err := utils.CallCommand(utils.Gcloud, cmdArgs, true)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(\"\")\n\t\t}\n\n\t\tsaveStep(instanceName, \"services\")\n\t}\n\n\tnetworkConfig := map[string]interface{}{\"name\": \"kcc-controller\", \"subnet-name\": \"kcc-regional-subnet\", \"cidr\": \"192.168.0.0\/16\"}\n\n\tif viper.InConfig(\"network\") {\n\t\tnetworkConfig = viper.GetStringMap(\"network\")\n\t}\n\n\t\/\/ If the VPC has not already been created by the cli then let's do it\n\tif !createSteps.stepExists(instanceName, \"network\") {\n\t\t\/\/ BUT first let's check to make sure that the network with the same name doesn't already exist\n\t\tcmdArgs := []string{\"compute\", \"networks\", \"list\", \"--project=\" + project, \"--filter=name:\" + networkConfig[\"name\"].(string), \"--format=\\\"yaml\\\"\"}\n\n\t\tret, err := utils.CallCommand(utils.Gcloud, cmdArgs, false)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(string(ret))\n\t\t}\n\n\t\tif len(string(ret)) == 0 {\n\t\t\tcmdArgs = []string{\"compute\", \"networks\", \"create\", networkConfig[\"name\"].(string), \"--project=\" + project, \"--subnet-mode=custom\"}\n\n\t\t\tlog.Info().Msg(\"Creating Network...\")\n\n\t\t\tret, err = utils.CallCommand(utils.Gcloud, cmdArgs, false)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal().Err(err).Msg(string(ret))\n\t\t\t}\n\n\t\t\tsaveStep(instanceName, \"network\")\n\t\t} else {\n\t\t\tsaveStep(instanceName, \"network\")\n\t\t}\n\t}\n\n\t\/\/ If the subnet on the VPC has not already been created by the cli then let's do it\n\tif !createSteps.stepExists(instanceName, \"subnet\") {\n\t\t\/\/ BUT first let's check to make sure that a subnet with the same name doesn't already exist\n\t\tcmdArgs := []string{\"compute\", \"networks\", \"subnets\", \"list\", \"--project=\" + project, \"--filter=name:\" + networkConfig[\"subnet-name\"].(string), \"--format=\\\"yaml\\\"\"}\n\n\t\tret, err := utils.CallCommand(utils.Gcloud, cmdArgs, false)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(string(ret))\n\t\t}\n\n\t\tif len(string(ret)) == 0 || strings.Contains(strings.ToLower(string(ret)), \"warning\") {\n\t\t\tcmdArgs = []string{\"compute\", \"networks\", \"subnets\", \"create\", networkConfig[\"subnet-name\"].(string), \"--network=\" + networkConfig[\"name\"].(string), \"--range=\" + networkConfig[\"cidr\"].(string), \"--enable-private-ip-google-access\", \"--region=\" + region, \"--project=\" + project}\n\n\t\t\tlog.Info().Msg(\"Creating subnet....\")\n\n\t\t\tret, err = utils.CallCommand(utils.Gcloud, cmdArgs, false)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal().Err(err).Msg(string(ret))\n\t\t\t}\n\n\t\t\tsaveStep(instanceName, \"subnet\")\n\t\t} else { \n\t\t\tsaveStep(instanceName, \"subnet\")\n\t\t}\n\t}\n\n\t\/\/ If config controller hasn't already been setup by the cli then let's do it\n\tif !createSteps.stepExists(instanceName, \"config-controller\") {\n\t\tcmdArgs := []string{\"anthos\", \"config\", \"controller\", \"create\", instanceName, \"--location=\" + region, \"--network=\" + networkConfig[\"name\"].(string), \"--subnet=\" + networkConfig[\"subnet-name\"].(string), \"--project=\" + project}\n\n\t\tlog.Info().Msg(\"Creating Config Controller Cluster....\")\n\n\t\t_, err := utils.CallCommand(utils.Gcloud, cmdArgs, true)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(\"\")\n\t\t}\n\n\t\tsaveStep(instanceName, \"config-controller\")\n\t}\n\n\t\/\/ Adding service account to the owners role\n\tif !createSteps.stepExists(instanceName, \"add-policy\") {\n\t\tcmdArgs := []string{\"container\", \"clusters\", \"get-credentials\", \"krmapihost-\" + instanceName, \"--region=\" + region, \"--project=\" + project}\n\n\t\t_, err := utils.CallCommand(utils.Gcloud, cmdArgs, false)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(\"Unable to get configconnectorcontext\")\n\t\t}\n\n\t\tcmdArgs = []string{\"get\", \"ConfigConnectorContext\", \"-n\", \"config-control\", \"-o\", \"jsonpath='{.items[0].spec.googleServiceAccount}'\"}\n\n\t\tret, err := utils.CallCommand(utils.Kubectl, cmdArgs, false)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(\"Unable to get configconnectorcontext\")\n\t\t}\n\n\t\tsa := strings.Replace(strings.Split(strings.Split(string(ret), \"@\")[0], \"'\")[1] + \"@\" + strings.Split(string(ret), \"@\")[1], \"'\", \"\",1)\n\n\t\tcmdArgs = []string{\"projects\", \"add-iam-policy-binding\", project, `--member=serviceAccount:` + sa, `--role=roles\/owner`, `--condition=None`}\n\n\t\tlog.Info().Msg(\"Add SA to roles\/owner role...\")\n\t\tret, err = utils.CallCommand(utils.Gcloud, cmdArgs, false)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal().Err(err).Msg(\"Unable to add roles\/owner to \" + sa + string(ret))\n\t\t}\n\n\t\tsaveStep(instanceName, \"add-policy\")\n\t}\n\n\tlog.Info().Msg(\"Config Controller setup complete\")\n}\n\n\/\/ Create a project at either the org or at a folder level.\nfunc createProject(project string, billing string) error {\n\tvar folderId string\n\n\torgId, err := selectGcloudPrompt([]string{\"organizations\", \"list\", \"--format=value[separator=' - '](DISPLAY_NAME, ID)\"}, \"organization ID\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprompt := promptui.Select{\n\t\tLabel: \"Would you like the project to be created at the Org level or in a folder\",\n\t\tItems: []string{\"Organization Level\", \"Folder Level\"},\n\t}\n\n\t_, result, err := prompt.Run()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmdArgs := []string{\"projects\", \"create\", project, \"--set-as-default\", \"--labels=created-with-arete=true\"}\n\n\tif result == \"Folder Level\" {\n\t\tfolderId, err = selectGcloudPrompt([]string{\"resource-manager\", \"folders\", \"list\", \"--organization=\"+orgId, \"--format=value[separator=' - '](DISPLAY_NAME, ID)\"}, \"folder\")\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcmdArgs = append(cmdArgs, \"--folder=\"+folderId)\n\t} else {\n\t\tcmdArgs = append(cmdArgs, \"--organization=\"+orgId)\n\t}\n\n\t_, err = utils.CallCommand(utils.Gcloud, cmdArgs, true)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Generic select prompt from a gcloud list command\nfunc selectGcloudPrompt(args []string, label string) (string, error) {\n\tvar opts []string\n\n\tif viper.GetBool(\"verbose\") {\n\t\tlog.Debug().Msg(\"Calling gcloud \"+label+\" list\")\n\t}\n\n\tout, err := utils.CallCommand(utils.Gcloud, args, false)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsp := strings.Split(string(out), \"\\n\")\n\tsp = sp[:len(sp)-1] \/\/ pop off the last element as it's a blank newline element\n\n\tfor i := 0; i < len(sp); i++ {\n\t\topts = append(opts, sp[i])\n\t}\n\n\tprompt := promptui.Select{\n\t\tLabel: \"Choose a \" + label,\n\t\tItems: opts,\n\t}\n\n\t_, result, err := prompt.Run()\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn result[strings.Index(result, \" - \")+3:], nil\n}\n\n\/\/ Save a step to the cache .create file for future tracking\nfunc saveStep(cluster string, step string) error {\n\tstep = \" - \" + step + \"\\n\"\n\n\tif err := utils.WriteToCache(&step, \".create\", true); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 by the contributors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"sigs.k8s.io\/aws-iam-authenticator\/pkg\/metrics\"\n\t\"sigs.k8s.io\/aws-iam-authenticator\/pkg\/token\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\/endpoints\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n)\n\nvar verifyCmd = &cobra.Command{\n\tUse: \"verify\",\n\tShort: \"Verify a token for debugging purpose\",\n\tLong: ``,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\ttok := viper.GetString(\"token\")\n\t\toutput := viper.GetString(\"output\")\n\t\tclusterID := viper.GetString(\"clusterID\")\n\t\tpartition := viper.GetString(\"partition\")\n\n\t\tif tok == \"\" {\n\t\t\tfmt.Fprintf(os.Stderr, \"error: token not specified\\n\")\n\t\t\tcmd.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tif clusterID == \"\" {\n\t\t\tfmt.Fprintf(os.Stderr, \"error: cluster ID not specified\\n\")\n\t\t\tcmd.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tid, err := token.NewVerifier(clusterID, partition).Verify(tok)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"could not verify token: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tif output == \"json\" {\n\t\t\tvalue, err := json.MarshalIndent(id, \"\", \" \")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"could not unmarshal token: %v\\n\", err)\n\t\t\t}\n\t\t\tfmt.Printf(\"%s\\n\", value)\n\t\t} else {\n\t\t\tfmt.Printf(\"%+v\\n\", id)\n\t\t}\n\t},\n}\n\nfunc init() {\n\trootCmd.AddCommand(verifyCmd)\n\tmetrics.InitMetrics(prometheus.DefaultRegisterer)\n\tverifyCmd.Flags().StringP(\"token\", \"t\", \"\", \"Token to verify\")\n\tverifyCmd.Flags().StringP(\"output\", \"o\", \"\", \"Output format. Only `json` is supported currently.\")\n\tviper.BindPFlag(\"token\", verifyCmd.Flags().Lookup(\"token\"))\n\tviper.BindPFlag(\"output\", verifyCmd.Flags().Lookup(\"output\"))\n\n\tpartitionKeys := []string{}\n\tfor _, p := range endpoints.DefaultPartitions() {\n\t\tpartitionKeys = append(partitionKeys, p.ID())\n\t}\n\n\tverifyCmd.Flags().String(\"partition\",\n\t\tendpoints.AwsPartitionID,\n\t\tfmt.Sprintf(\"The AWS partition. Must be one of: %v\", partitionKeys))\n\tviper.BindPFlag(\"partition\", verifyCmd.Flags().Lookup(\"partition\"))\n\n}\n<commit_msg>Remove duplicate InitMetrics<commit_after>\/*\nCopyright 2017 by the contributors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"sigs.k8s.io\/aws-iam-authenticator\/pkg\/metrics\"\n\t\"sigs.k8s.io\/aws-iam-authenticator\/pkg\/token\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\/endpoints\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n)\n\nvar verifyCmd = &cobra.Command{\n\tUse: \"verify\",\n\tShort: \"Verify a token for debugging purpose\",\n\tLong: ``,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\ttok := viper.GetString(\"token\")\n\t\toutput := viper.GetString(\"output\")\n\t\tclusterID := viper.GetString(\"clusterID\")\n\t\tpartition := viper.GetString(\"partition\")\n\n\t\tif tok == \"\" {\n\t\t\tfmt.Fprintf(os.Stderr, \"error: token not specified\\n\")\n\t\t\tcmd.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tif clusterID == \"\" {\n\t\t\tfmt.Fprintf(os.Stderr, \"error: cluster ID not specified\\n\")\n\t\t\tcmd.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tid, err := token.NewVerifier(clusterID, partition).Verify(tok)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"could not verify token: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tif output == \"json\" {\n\t\t\tvalue, err := json.MarshalIndent(id, \"\", \" \")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"could not unmarshal token: %v\\n\", err)\n\t\t\t}\n\t\t\tfmt.Printf(\"%s\\n\", value)\n\t\t} else {\n\t\t\tfmt.Printf(\"%+v\\n\", id)\n\t\t}\n\t},\n}\n\nfunc init() {\n\trootCmd.AddCommand(verifyCmd)\n\tverifyCmd.Flags().StringP(\"token\", \"t\", \"\", \"Token to verify\")\n\tverifyCmd.Flags().StringP(\"output\", \"o\", \"\", \"Output format. Only `json` is supported currently.\")\n\tviper.BindPFlag(\"token\", verifyCmd.Flags().Lookup(\"token\"))\n\tviper.BindPFlag(\"output\", verifyCmd.Flags().Lookup(\"output\"))\n\n\tpartitionKeys := []string{}\n\tfor _, p := range endpoints.DefaultPartitions() {\n\t\tpartitionKeys = append(partitionKeys, p.ID())\n\t}\n\n\tverifyCmd.Flags().String(\"partition\",\n\t\tendpoints.AwsPartitionID,\n\t\tfmt.Sprintf(\"The AWS partition. Must be one of: %v\", partitionKeys))\n\tviper.BindPFlag(\"partition\", verifyCmd.Flags().Lookup(\"partition\"))\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"flag\"\n\tatx \"github.com\/craigulmer\/airlinetracks\"\n)\n\n\nfunc readFile(fname string, carrier string, region string, verbose bool){\n\n\tvar trdr *atx.TrackReader\n\tvar err error\n\n\tif carrier!=\"\" && region!=\"\" {\n\t\tpanic(\"Currently, can only filter on ONE thing\")\n\t}\n\n\n\tif carrier==\"\" {\n\n\t\tif region==\"\" {\n\t\t\ttrdr, err = atx.Open(fname)\n\t\t} else {\n\t\t\ttrdr, err = atx.OpenWithFilter(fname, atx.NewFilterByRegion(region))\n\t\t}\n\t} else {\n\t\ttrdr, err = atx.OpenWithFilter(fname, atx.NewFilterByCarrier(carrier))\n\t}\n\tif err!=nil {\n\t\tfmt.Println(\"Error opening \"+fname)\n\t\treturn\n\t}\n\n\tfor {\n\t\ttrack,_ := trdr.GetNext()\n\t\tif track==nil { break }\n\t\tif verbose {\n\t\t\tfmt.Println(\"Track: \"+track.ToString())\t\t\n\t\t}\n\t}\n\n\t\/\/Stats are maintained in the reader\n\tfmt.Println(\"ValidTracks:\",trdr.ValidTracks,\"TotalTracks:\",trdr.TotalTracks, \"CarrierFilter: '\"+carrier+\"'\")\n\ttrdr.Close()\n}\n\nfunc main() {\n\n\tvar file_src = flag.String(\"file\", \"2014-10-28.txt.gz\", \"Input File\")\n\tvar carrier = flag.String(\"carrier\", \"\", \"Filter to a specific carrier\")\n\tvar region = flag.String(\"region\",\"\", \"Filter to flights that crossed a particular region\")\n\tvar verbose = flag.Bool(\"verbose\", false, \"Verbose\")\n\tflag.Parse()\n\n\treadFile(*file_src, *carrier, *region, *verbose,)\n\n}\n<commit_msg>ENH: Minor cleanup of op description<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"flag\"\n\tatx \"github.com\/craigulmer\/airlinetracks\"\n)\n\n\nfunc readFile(fname string, carrier string, region string, verbose bool){\n\n\tvar trdr *atx.TrackReader\n\tvar err error\n\tvar op_name string\n\tif carrier!=\"\" && region!=\"\" {\n\t\tpanic(\"Currently, can only filter on ONE thing\")\n\t}\n\n\tswitch {\n\tcase region!=\"\": \n\t\ttrdr, err = atx.OpenWithFilter(fname, atx.NewFilterByRegion(region));\n\t\top_name=\"Region: \"+region\n\tcase carrier!=\"\": \n\t\ttrdr, err = atx.OpenWithFilter(fname, atx.NewFilterByCarrier(carrier)); \n\t\top_name=\"Carrier: \"+carrier\n\tdefault: \n\t\ttrdr, err = atx.Open(fname)\n\t}\n\tif err!=nil {\n\t\tfmt.Println(\"Error opening \"+fname)\n\t\treturn\n\t}\n\n\t\/\/Walk through each track until nothing left\n\tfor {\n\t\ttrack,_ := trdr.GetNext()\n\t\tif track==nil { break }\n\t\tif verbose {\n\t\t\tfmt.Println(\"Track: \"+track.ToString())\t\t\n\t\t}\n\t}\n\n\t\/\/Stats are maintained in the reader\n\tfmt.Println(\"ValidTracks:\",trdr.ValidTracks,\"TotalTracks:\",trdr.TotalTracks, op_name)\n\ttrdr.Close()\n}\n\nfunc main() {\n\n\tvar file_src = flag.String(\"file\", \"2014-10-28.txt.gz\", \"Input File\")\n\tvar carrier = flag.String(\"carrier\", \"\", \"Filter to a specific carrier\")\n\tvar region = flag.String(\"region\",\"\", \"Filter to flights that crossed a particular region\")\n\tvar verbose = flag.Bool(\"verbose\", false, \"Verbose\")\n\tflag.Parse()\n\n\treadFile(*file_src, *carrier, *region, *verbose,)\n\n}\n<|endoftext|>"} {"text":"<commit_before>package desec\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sort\"\n\n\t\"github.com\/StackExchange\/dnscontrol\/v3\/models\"\n\t\"github.com\/StackExchange\/dnscontrol\/v3\/pkg\/diff\"\n\t\"github.com\/StackExchange\/dnscontrol\/v3\/pkg\/printer\"\n\t\"github.com\/StackExchange\/dnscontrol\/v3\/providers\"\n\t\"github.com\/miekg\/dns\/dnsutil\"\n)\n\n\/*\ndesec API DNS provider:\nInfo required in `creds.json`:\n - auth-token\n*\/\n\n\/\/ NewDeSec creates the provider.\nfunc NewDeSec(m map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) {\n\tc := &api{}\n\tc.creds.token = m[\"auth-token\"]\n\tif c.creds.token == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing deSEC auth-token\")\n\t}\n\n\t\/\/ Get a domain to validate authentication\n\tif err := c.fetchDomainList(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}\n\nvar features = providers.DocumentationNotes{\n\tproviders.DocDualHost: providers.Unimplemented(),\n\tproviders.DocOfficiallySupported: providers.Cannot(),\n\tproviders.DocCreateDomains: providers.Can(),\n\tproviders.CanUseAlias: providers.Cannot(),\n\tproviders.CanUseSRV: providers.Can(),\n\tproviders.CanUseDS: providers.Can(),\n\tproviders.CanUseSSHFP: providers.Can(),\n\tproviders.CanUseCAA: providers.Can(),\n\tproviders.CanUseTLSA: providers.Can(),\n\tproviders.CanUsePTR: providers.Unimplemented(),\n\tproviders.CanGetZones: providers.Can(),\n\tproviders.CanAutoDNSSEC: providers.Cannot(),\n}\n\nvar defaultNameServerNames = []string{\n\t\"ns1.desec.io\",\n\t\"ns2.desec.org\",\n}\n\nfunc init() {\n\tproviders.RegisterDomainServiceProviderType(\"DESEC\", NewDeSec, features)\n}\n\n\/\/ GetNameservers returns the nameservers for a domain.\nfunc (c *api) GetNameservers(domain string) ([]*models.Nameserver, error) {\n\treturn models.ToNameservers(defaultNameServerNames)\n}\n\nfunc (c *api) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {\n\texisting, err := c.GetZoneRecords(dc.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmodels.PostProcessRecords(existing)\n\tclean := PrepFoundRecords(existing)\n\tvar minTTL uint32\n\tif ttl, ok := c.domainIndex[dc.Name]; !ok {\n\t\tminTTL = 3600\n\t} else {\n\t\tminTTL = ttl\n\t}\n\tPrepDesiredRecords(dc, minTTL)\n\treturn c.GenerateDomainCorrections(dc, clean)\n}\n\n\/\/ GetZoneRecords gets the records of a zone and returns them in RecordConfig format.\nfunc (c *api) GetZoneRecords(domain string) (models.Records, error) {\n\trecords, err := c.getRecords(domain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Convert them to DNScontrol's native format:\n\texistingRecords := []*models.RecordConfig{}\n\tfor _, rr := range records {\n\t\texistingRecords = append(existingRecords, nativeToRecords(rr, domain)...)\n\t}\n\treturn existingRecords, nil\n}\n\n\/\/ EnsureDomainExists returns an error if domain doesn't exist.\nfunc (c *api) EnsureDomainExists(domain string) error {\n\tif err := c.fetchDomainList(); err != nil {\n\t\treturn err\n\t}\n\t\/\/ domain already exists\n\tif _, ok := c.domainIndex[domain]; ok {\n\t\treturn nil\n\t}\n\treturn c.createDomain(domain)\n}\n\n\/\/ PrepFoundRecords munges any records to make them compatible with\n\/\/ this provider. Usually this is a no-op.\nfunc PrepFoundRecords(recs models.Records) models.Records {\n\t\/\/ If there are records that need to be modified, removed, etc. we\n\t\/\/ do it here. Usually this is a no-op.\n\treturn recs\n}\n\n\/\/ PrepDesiredRecords munges any records to best suit this provider.\nfunc PrepDesiredRecords(dc *models.DomainConfig, minTTL uint32) {\n\t\/\/ Sort through the dc.Records, eliminate any that can't be\n\t\/\/ supported; modify any that need adjustments to work with the\n\t\/\/ provider. We try to do minimal changes otherwise it gets\n\t\/\/ confusing.\n\n\tdc.Punycode()\n\trecordsToKeep := make([]*models.RecordConfig, 0, len(dc.Records))\n\tfor _, rec := range dc.Records {\n\t\tif rec.Type == \"ALIAS\" {\n\t\t\t\/\/ deSEC does not permit ALIAS records, just ignore it\n\t\t\tprinter.Warnf(\"deSEC does not support alias records\\n\")\n\t\t\tcontinue\n\t\t}\n\t\tif rec.TTL < minTTL {\n\t\t\tif rec.Type != \"NS\" {\n\t\t\t\tprinter.Warnf(\"Please contact support@desec.io if you need ttls < %d. Setting ttl of %s type %s from %d to %d\\n\", minTTL, rec.GetLabelFQDN(), rec.Type, rec.TTL, minTTL)\n\t\t\t}\n\t\t\trec.TTL = minTTL\n\t\t}\n\t\trecordsToKeep = append(recordsToKeep, rec)\n\t}\n\tdc.Records = recordsToKeep\n}\n\n\/\/ GenerateDomainCorrections takes the desired and existing records\n\/\/ and produces a Correction list. The correction list is simply\n\/\/ a list of functions to call to actually make the desired\n\/\/ correction, and a message to output to the user when the change is\n\/\/ made.\nfunc (c *api) GenerateDomainCorrections(dc *models.DomainConfig, existing models.Records) ([]*models.Correction, error) {\n\n\tvar corrections = []*models.Correction{}\n\n\t\/\/ diff existing vs. current.\n\tdiffer := diff.New(dc)\n\tkeysToUpdate := differ.ChangedGroups(existing)\n\tif len(keysToUpdate) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tdesiredRecords := dc.Records.GroupedByKey()\n\tvar rrs []resourceRecord\n\tbuf := &bytes.Buffer{}\n\t\/\/ For any key with an update, delete or replace those records.\n\tfor label := range keysToUpdate {\n\t\tif _, ok := desiredRecords[label]; !ok {\n\t\t\t\/\/we could not find this RecordKey in the desiredRecords\n\t\t\t\/\/this means it must be deleted\n\t\t\tfor i, msg := range keysToUpdate[label] {\n\t\t\t\tif i == 0 {\n\t\t\t\t\trc := resourceRecord{}\n\t\t\t\t\trc.Type = label.Type\n\t\t\t\t\trc.Records = make([]string, 0) \/\/ empty array of records should delete this rrset\n\t\t\t\t\trc.TTL = 3600\n\t\t\t\t\tshortname := dnsutil.TrimDomainName(label.NameFQDN, dc.Name)\n\t\t\t\t\tif shortname == \"@\" {\n\t\t\t\t\t\tshortname = \"\"\n\t\t\t\t\t}\n\t\t\t\t\trc.Subname = shortname\n\t\t\t\t\tfmt.Fprintln(buf, msg)\n\t\t\t\t\trrs = append(rrs, rc)\n\t\t\t\t} else {\n\t\t\t\t\t\/\/just add the message\n\t\t\t\t\tfmt.Fprintln(buf, msg)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/it must be an update or create, both can be done with the same api call.\n\t\t\tns := recordsToNative(desiredRecords[label], dc.Name)\n\t\t\tif len(ns) > 1 {\n\t\t\t\tpanic(\"we got more than one resource record to create \/ modify\")\n\t\t\t}\n\t\t\tfor i, msg := range keysToUpdate[label] {\n\t\t\t\tif i == 0 {\n\t\t\t\t\trrs = append(rrs, ns[0])\n\t\t\t\t\tfmt.Fprintln(buf, msg)\n\t\t\t\t} else {\n\t\t\t\t\t\/\/noop just for printing the additional messages\n\t\t\t\t\tfmt.Fprintln(buf, msg)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tvar msg string\n\tmsg = fmt.Sprintf(\"Changes:\\n%s\", buf)\n\tcorrections = append(corrections,\n\t\t&models.Correction{\n\t\t\tMsg: msg,\n\t\t\tF: func() error {\n\t\t\t\trc := rrs\n\t\t\t\terr := c.upsertRR(rc, dc.Name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t},\n\t\t})\n\n\t\/\/ NB(tlim): This sort is just to make updates look pretty. It is\n\t\/\/ cosmetic. The risk here is that there may be some updates that\n\t\/\/ require a specific order (for example a delete before an add).\n\t\/\/ However the code doesn't seem to have such situation. All tests\n\t\/\/ pass. That said, if this breaks anything, the easiest fix might\n\t\/\/ be to just remove the sort.\n\tsort.Slice(corrections, func(i, j int) bool { return diff.CorrectionLess(corrections, i, j) })\n\n\treturn corrections, nil\n}\n<commit_msg>desec: Supports PTR records out of the box (#801)<commit_after>package desec\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sort\"\n\n\t\"github.com\/StackExchange\/dnscontrol\/v3\/models\"\n\t\"github.com\/StackExchange\/dnscontrol\/v3\/pkg\/diff\"\n\t\"github.com\/StackExchange\/dnscontrol\/v3\/pkg\/printer\"\n\t\"github.com\/StackExchange\/dnscontrol\/v3\/providers\"\n\t\"github.com\/miekg\/dns\/dnsutil\"\n)\n\n\/*\ndesec API DNS provider:\nInfo required in `creds.json`:\n - auth-token\n*\/\n\n\/\/ NewDeSec creates the provider.\nfunc NewDeSec(m map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) {\n\tc := &api{}\n\tc.creds.token = m[\"auth-token\"]\n\tif c.creds.token == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing deSEC auth-token\")\n\t}\n\n\t\/\/ Get a domain to validate authentication\n\tif err := c.fetchDomainList(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}\n\nvar features = providers.DocumentationNotes{\n\tproviders.DocDualHost: providers.Unimplemented(),\n\tproviders.DocOfficiallySupported: providers.Cannot(),\n\tproviders.DocCreateDomains: providers.Can(),\n\tproviders.CanUseAlias: providers.Cannot(),\n\tproviders.CanUseSRV: providers.Can(),\n\tproviders.CanUseDS: providers.Can(),\n\tproviders.CanUseSSHFP: providers.Can(),\n\tproviders.CanUseCAA: providers.Can(),\n\tproviders.CanUseTLSA: providers.Can(),\n\tproviders.CanUsePTR: providers.Can(),\n\tproviders.CanGetZones: providers.Can(),\n\tproviders.CanAutoDNSSEC: providers.Cannot(),\n}\n\nvar defaultNameServerNames = []string{\n\t\"ns1.desec.io\",\n\t\"ns2.desec.org\",\n}\n\nfunc init() {\n\tproviders.RegisterDomainServiceProviderType(\"DESEC\", NewDeSec, features)\n}\n\n\/\/ GetNameservers returns the nameservers for a domain.\nfunc (c *api) GetNameservers(domain string) ([]*models.Nameserver, error) {\n\treturn models.ToNameservers(defaultNameServerNames)\n}\n\nfunc (c *api) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {\n\texisting, err := c.GetZoneRecords(dc.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmodels.PostProcessRecords(existing)\n\tclean := PrepFoundRecords(existing)\n\tvar minTTL uint32\n\tif ttl, ok := c.domainIndex[dc.Name]; !ok {\n\t\tminTTL = 3600\n\t} else {\n\t\tminTTL = ttl\n\t}\n\tPrepDesiredRecords(dc, minTTL)\n\treturn c.GenerateDomainCorrections(dc, clean)\n}\n\n\/\/ GetZoneRecords gets the records of a zone and returns them in RecordConfig format.\nfunc (c *api) GetZoneRecords(domain string) (models.Records, error) {\n\trecords, err := c.getRecords(domain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Convert them to DNScontrol's native format:\n\texistingRecords := []*models.RecordConfig{}\n\tfor _, rr := range records {\n\t\texistingRecords = append(existingRecords, nativeToRecords(rr, domain)...)\n\t}\n\treturn existingRecords, nil\n}\n\n\/\/ EnsureDomainExists returns an error if domain doesn't exist.\nfunc (c *api) EnsureDomainExists(domain string) error {\n\tif err := c.fetchDomainList(); err != nil {\n\t\treturn err\n\t}\n\t\/\/ domain already exists\n\tif _, ok := c.domainIndex[domain]; ok {\n\t\treturn nil\n\t}\n\treturn c.createDomain(domain)\n}\n\n\/\/ PrepFoundRecords munges any records to make them compatible with\n\/\/ this provider. Usually this is a no-op.\nfunc PrepFoundRecords(recs models.Records) models.Records {\n\t\/\/ If there are records that need to be modified, removed, etc. we\n\t\/\/ do it here. Usually this is a no-op.\n\treturn recs\n}\n\n\/\/ PrepDesiredRecords munges any records to best suit this provider.\nfunc PrepDesiredRecords(dc *models.DomainConfig, minTTL uint32) {\n\t\/\/ Sort through the dc.Records, eliminate any that can't be\n\t\/\/ supported; modify any that need adjustments to work with the\n\t\/\/ provider. We try to do minimal changes otherwise it gets\n\t\/\/ confusing.\n\n\tdc.Punycode()\n\trecordsToKeep := make([]*models.RecordConfig, 0, len(dc.Records))\n\tfor _, rec := range dc.Records {\n\t\tif rec.Type == \"ALIAS\" {\n\t\t\t\/\/ deSEC does not permit ALIAS records, just ignore it\n\t\t\tprinter.Warnf(\"deSEC does not support alias records\\n\")\n\t\t\tcontinue\n\t\t}\n\t\tif rec.TTL < minTTL {\n\t\t\tif rec.Type != \"NS\" {\n\t\t\t\tprinter.Warnf(\"Please contact support@desec.io if you need ttls < %d. Setting ttl of %s type %s from %d to %d\\n\", minTTL, rec.GetLabelFQDN(), rec.Type, rec.TTL, minTTL)\n\t\t\t}\n\t\t\trec.TTL = minTTL\n\t\t}\n\t\trecordsToKeep = append(recordsToKeep, rec)\n\t}\n\tdc.Records = recordsToKeep\n}\n\n\/\/ GenerateDomainCorrections takes the desired and existing records\n\/\/ and produces a Correction list. The correction list is simply\n\/\/ a list of functions to call to actually make the desired\n\/\/ correction, and a message to output to the user when the change is\n\/\/ made.\nfunc (c *api) GenerateDomainCorrections(dc *models.DomainConfig, existing models.Records) ([]*models.Correction, error) {\n\n\tvar corrections = []*models.Correction{}\n\n\t\/\/ diff existing vs. current.\n\tdiffer := diff.New(dc)\n\tkeysToUpdate := differ.ChangedGroups(existing)\n\tif len(keysToUpdate) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tdesiredRecords := dc.Records.GroupedByKey()\n\tvar rrs []resourceRecord\n\tbuf := &bytes.Buffer{}\n\t\/\/ For any key with an update, delete or replace those records.\n\tfor label := range keysToUpdate {\n\t\tif _, ok := desiredRecords[label]; !ok {\n\t\t\t\/\/we could not find this RecordKey in the desiredRecords\n\t\t\t\/\/this means it must be deleted\n\t\t\tfor i, msg := range keysToUpdate[label] {\n\t\t\t\tif i == 0 {\n\t\t\t\t\trc := resourceRecord{}\n\t\t\t\t\trc.Type = label.Type\n\t\t\t\t\trc.Records = make([]string, 0) \/\/ empty array of records should delete this rrset\n\t\t\t\t\trc.TTL = 3600\n\t\t\t\t\tshortname := dnsutil.TrimDomainName(label.NameFQDN, dc.Name)\n\t\t\t\t\tif shortname == \"@\" {\n\t\t\t\t\t\tshortname = \"\"\n\t\t\t\t\t}\n\t\t\t\t\trc.Subname = shortname\n\t\t\t\t\tfmt.Fprintln(buf, msg)\n\t\t\t\t\trrs = append(rrs, rc)\n\t\t\t\t} else {\n\t\t\t\t\t\/\/just add the message\n\t\t\t\t\tfmt.Fprintln(buf, msg)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/it must be an update or create, both can be done with the same api call.\n\t\t\tns := recordsToNative(desiredRecords[label], dc.Name)\n\t\t\tif len(ns) > 1 {\n\t\t\t\tpanic(\"we got more than one resource record to create \/ modify\")\n\t\t\t}\n\t\t\tfor i, msg := range keysToUpdate[label] {\n\t\t\t\tif i == 0 {\n\t\t\t\t\trrs = append(rrs, ns[0])\n\t\t\t\t\tfmt.Fprintln(buf, msg)\n\t\t\t\t} else {\n\t\t\t\t\t\/\/noop just for printing the additional messages\n\t\t\t\t\tfmt.Fprintln(buf, msg)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tvar msg string\n\tmsg = fmt.Sprintf(\"Changes:\\n%s\", buf)\n\tcorrections = append(corrections,\n\t\t&models.Correction{\n\t\t\tMsg: msg,\n\t\t\tF: func() error {\n\t\t\t\trc := rrs\n\t\t\t\terr := c.upsertRR(rc, dc.Name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t},\n\t\t})\n\n\t\/\/ NB(tlim): This sort is just to make updates look pretty. It is\n\t\/\/ cosmetic. The risk here is that there may be some updates that\n\t\/\/ require a specific order (for example a delete before an add).\n\t\/\/ However the code doesn't seem to have such situation. All tests\n\t\/\/ pass. That said, if this breaks anything, the easiest fix might\n\t\/\/ be to just remove the sort.\n\tsort.Slice(corrections, func(i, j int) bool { return diff.CorrectionLess(corrections, i, j) })\n\n\treturn corrections, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ This package implements a provisioner for Packer that executes\n\/\/ shell scripts within the remote machine.\npackage shell\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n)\n\nconst DefaultRemotePath = \"\/tmp\/script.sh\"\n\ntype config struct {\n\t\/\/ An inline script to execute. Multiple strings are all executed\n\t\/\/ in the context of a single shell.\n\tInline []string\n\n\t\/\/ The shebang value used when running inline scripts.\n\tInlineShebang string `mapstructure:\"inline_shebang\"`\n\n\t\/\/ The local path of the shell script to upload and execute.\n\tScript string\n\n\t\/\/ An array of multiple scripts to run.\n\tScripts []string\n\n\t\/\/ An array of environment variables that will be injected before\n\t\/\/ your command(s) are executed.\n\tVars []string `mapstructure:\"environment_vars\"`\n\n\t\/\/ The remote path where the local shell script will be uploaded to.\n\t\/\/ This should be set to a writable file that is in a pre-existing directory.\n\tRemotePath string `mapstructure:\"remote_path\"`\n\n\t\/\/ The command used to execute the script. The '{{ .Path }}' variable\n\t\/\/ should be used to specify where the script goes, {{ .Vars }}\n\t\/\/ can be used to inject the environment_vars into the environment.\n\tExecuteCommand string `mapstructure:\"execute_command\"`\n\n\t\/\/ Packer configurations, these come from Packer itself\n\tPackerBuildName string `mapstructure:\"packer_build_name\"`\n\tPackerBuilderType string `mapstructure:\"packer_builder_type\"`\n\n\ttpl *common.Template\n}\n\ntype Provisioner struct {\n\tconfig config\n}\n\ntype ExecuteCommandTemplate struct {\n\tVars string\n\tPath string\n}\n\nfunc (p *Provisioner) Prepare(raws ...interface{}) error {\n\tmd, err := common.DecodeConfig(&p.config, raws...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.config.tpl, err = common.NewTemplate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Accumulate any errors\n\terrs := common.CheckUnusedConfig(md)\n\n\tif p.config.ExecuteCommand == \"\" {\n\t\tp.config.ExecuteCommand = \"chmod +x {{.Path}}; {{.Vars}} {{.Path}}\"\n\t}\n\n\tif p.config.Inline != nil && len(p.config.Inline) == 0 {\n\t\tp.config.Inline = nil\n\t}\n\n\tif p.config.InlineShebang == \"\" {\n\t\tp.config.InlineShebang = \"\/bin\/sh\"\n\t}\n\n\tif p.config.RemotePath == \"\" {\n\t\tp.config.RemotePath = DefaultRemotePath\n\t}\n\n\tif p.config.Scripts == nil {\n\t\tp.config.Scripts = make([]string, 0)\n\t}\n\n\tif p.config.Vars == nil {\n\t\tp.config.Vars = make([]string, 0)\n\t}\n\n\tif p.config.Script != \"\" && len(p.config.Scripts) > 0 {\n\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\terrors.New(\"Only one of script or scripts can be specified.\"))\n\t}\n\n\tif p.config.Script != \"\" {\n\t\tp.config.Scripts = []string{p.config.Script}\n\t}\n\n\ttemplates := map[string]*string{\n\t\t\"inline_shebang\": &p.config.InlineShebang,\n\t\t\"script\": &p.config.Script,\n\t\t\"remote_path\": &p.config.RemotePath,\n\t}\n\n\tfor n, ptr := range templates {\n\t\tvar err error\n\t\t*ptr, err = p.config.tpl.Process(*ptr, nil)\n\t\tif err != nil {\n\t\t\terrs = packer.MultiErrorAppend(\n\t\t\t\terrs, fmt.Errorf(\"Error processing %s: %s\", n, err))\n\t\t}\n\t}\n\n\tsliceTemplates := map[string][]string{\n\t\t\"inline\": p.config.Inline,\n\t\t\"scripts\": p.config.Scripts,\n\t\t\"environment_vars\": p.config.Vars,\n\t}\n\n\tfor n, slice := range sliceTemplates {\n\t\tfor i, elem := range slice {\n\t\t\tvar err error\n\t\t\tslice[i], err = p.config.tpl.Process(elem, nil)\n\t\t\tif err != nil {\n\t\t\t\terrs = packer.MultiErrorAppend(\n\t\t\t\t\terrs, fmt.Errorf(\"Error processing %s[%d]: %s\", n, i, err))\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(p.config.Scripts) == 0 && p.config.Inline == nil {\n\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\terrors.New(\"Either a script file or inline script must be specified.\"))\n\t} else if len(p.config.Scripts) > 0 && p.config.Inline != nil {\n\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\terrors.New(\"Only a script file or an inline script can be specified, not both.\"))\n\t}\n\n\tfor _, path := range p.config.Scripts {\n\t\tif _, err := os.Stat(path); err != nil {\n\t\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\t\tfmt.Errorf(\"Bad script '%s': %s\", path, err))\n\t\t}\n\t}\n\n\t\/\/ Do a check for bad environment variables, such as '=foo', 'foobar'\n\tfor _, kv := range p.config.Vars {\n\t\tvs := strings.Split(kv, \"=\")\n\t\tif len(vs) != 2 || vs[0] == \"\" {\n\t\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\t\tfmt.Errorf(\"Environment variable not in format 'key=value': %s\", kv))\n\t\t}\n\t}\n\n\tif errs != nil && len(errs.Errors) > 0 {\n\t\treturn errs\n\t}\n\n\treturn nil\n}\n\nfunc (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {\n\tscripts := make([]string, len(p.config.Scripts))\n\tcopy(scripts, p.config.Scripts)\n\n\t\/\/ If we have an inline script, then turn that into a temporary\n\t\/\/ shell script and use that.\n\tif p.config.Inline != nil {\n\t\ttf, err := ioutil.TempFile(\"\", \"packer-shell\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error preparing shell script: %s\", err)\n\t\t}\n\t\tdefer os.Remove(tf.Name())\n\n\t\t\/\/ Set the path to the temporary file\n\t\tscripts = append(scripts, tf.Name())\n\n\t\t\/\/ Write our contents to it\n\t\twriter := bufio.NewWriter(tf)\n\t\twriter.WriteString(fmt.Sprintf(\"#!%s\\n\", p.config.InlineShebang))\n\t\tfor _, command := range p.config.Inline {\n\t\t\tif _, err := writer.WriteString(command + \"\\n\"); err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error preparing shell script: %s\", err)\n\t\t\t}\n\t\t}\n\n\t\tif err := writer.Flush(); err != nil {\n\t\t\treturn fmt.Errorf(\"Error preparing shell script: %s\", err)\n\t\t}\n\n\t\ttf.Close()\n\t}\n\n\t\/\/ Build our variables up by adding in the build name and builder type\n\tenvVars := make([]string, len(p.config.Vars)+2)\n\tenvVars[0] = \"PACKER_BUILD_NAME=\" + p.config.PackerBuildName\n\tenvVars[1] = \"PACKER_BUILDER_TYPE=\" + p.config.PackerBuilderType\n\tcopy(envVars[2:], p.config.Vars)\n\n\tfor _, path := range scripts {\n\t\tui.Say(fmt.Sprintf(\"Provisioning with shell script: %s\", path))\n\n\t\tlog.Printf(\"Opening %s for reading\", path)\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error opening shell script: %s\", err)\n\t\t}\n\t\tdefer f.Close()\n\n\t\tlog.Printf(\"Uploading %s => %s\", path, p.config.RemotePath)\n\t\terr = comm.Upload(p.config.RemotePath, f)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error uploading shell script: %s\", err)\n\t\t}\n\n\t\t\/\/ Close the original file since we copied it\n\t\tf.Close()\n\n\t\t\/\/ Flatten the environment variables\n\t\tflattendVars := strings.Join(envVars, \" \")\n\n\t\t\/\/ Compile the command\n\t\tcommand := p.config.tpl.Process(p.config.ExecuteCommand, &ExecuteCommandTemplate{\n\t\t\tVars: flattendVars,\n\t\t\tPath: p.config.RemotePath,\n\t\t})\n\n\t\tcmd := &packer.RemoteCmd{Command: command}\n\t\tlog.Printf(\"Executing command: %s\", cmd.Command)\n\t\tif err := cmd.StartWithUi(comm, ui); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed executing command: %s\", err)\n\t\t}\n\n\t\tif cmd.ExitStatus != 0 {\n\t\t\treturn fmt.Errorf(\"Script exited with non-zero exit status: %d\", cmd.ExitStatus)\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>provisioner\/shell: tests passing and compiling<commit_after>\/\/ This package implements a provisioner for Packer that executes\n\/\/ shell scripts within the remote machine.\npackage shell\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n)\n\nconst DefaultRemotePath = \"\/tmp\/script.sh\"\n\ntype config struct {\n\t\/\/ An inline script to execute. Multiple strings are all executed\n\t\/\/ in the context of a single shell.\n\tInline []string\n\n\t\/\/ The shebang value used when running inline scripts.\n\tInlineShebang string `mapstructure:\"inline_shebang\"`\n\n\t\/\/ The local path of the shell script to upload and execute.\n\tScript string\n\n\t\/\/ An array of multiple scripts to run.\n\tScripts []string\n\n\t\/\/ An array of environment variables that will be injected before\n\t\/\/ your command(s) are executed.\n\tVars []string `mapstructure:\"environment_vars\"`\n\n\t\/\/ The remote path where the local shell script will be uploaded to.\n\t\/\/ This should be set to a writable file that is in a pre-existing directory.\n\tRemotePath string `mapstructure:\"remote_path\"`\n\n\t\/\/ The command used to execute the script. The '{{ .Path }}' variable\n\t\/\/ should be used to specify where the script goes, {{ .Vars }}\n\t\/\/ can be used to inject the environment_vars into the environment.\n\tExecuteCommand string `mapstructure:\"execute_command\"`\n\n\t\/\/ Packer configurations, these come from Packer itself\n\tPackerBuildName string `mapstructure:\"packer_build_name\"`\n\tPackerBuilderType string `mapstructure:\"packer_builder_type\"`\n\n\ttpl *common.Template\n}\n\ntype Provisioner struct {\n\tconfig config\n}\n\ntype ExecuteCommandTemplate struct {\n\tVars string\n\tPath string\n}\n\nfunc (p *Provisioner) Prepare(raws ...interface{}) error {\n\tmd, err := common.DecodeConfig(&p.config, raws...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.config.tpl, err = common.NewTemplate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Accumulate any errors\n\terrs := common.CheckUnusedConfig(md)\n\n\tif p.config.ExecuteCommand == \"\" {\n\t\tp.config.ExecuteCommand = \"chmod +x {{.Path}}; {{.Vars}} {{.Path}}\"\n\t}\n\n\tif p.config.Inline != nil && len(p.config.Inline) == 0 {\n\t\tp.config.Inline = nil\n\t}\n\n\tif p.config.InlineShebang == \"\" {\n\t\tp.config.InlineShebang = \"\/bin\/sh\"\n\t}\n\n\tif p.config.RemotePath == \"\" {\n\t\tp.config.RemotePath = DefaultRemotePath\n\t}\n\n\tif p.config.Scripts == nil {\n\t\tp.config.Scripts = make([]string, 0)\n\t}\n\n\tif p.config.Vars == nil {\n\t\tp.config.Vars = make([]string, 0)\n\t}\n\n\tif p.config.Script != \"\" && len(p.config.Scripts) > 0 {\n\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\terrors.New(\"Only one of script or scripts can be specified.\"))\n\t}\n\n\tif p.config.Script != \"\" {\n\t\tp.config.Scripts = []string{p.config.Script}\n\t}\n\n\ttemplates := map[string]*string{\n\t\t\"inline_shebang\": &p.config.InlineShebang,\n\t\t\"script\": &p.config.Script,\n\t\t\"remote_path\": &p.config.RemotePath,\n\t}\n\n\tfor n, ptr := range templates {\n\t\tvar err error\n\t\t*ptr, err = p.config.tpl.Process(*ptr, nil)\n\t\tif err != nil {\n\t\t\terrs = packer.MultiErrorAppend(\n\t\t\t\terrs, fmt.Errorf(\"Error processing %s: %s\", n, err))\n\t\t}\n\t}\n\n\tsliceTemplates := map[string][]string{\n\t\t\"inline\": p.config.Inline,\n\t\t\"scripts\": p.config.Scripts,\n\t\t\"environment_vars\": p.config.Vars,\n\t}\n\n\tfor n, slice := range sliceTemplates {\n\t\tfor i, elem := range slice {\n\t\t\tvar err error\n\t\t\tslice[i], err = p.config.tpl.Process(elem, nil)\n\t\t\tif err != nil {\n\t\t\t\terrs = packer.MultiErrorAppend(\n\t\t\t\t\terrs, fmt.Errorf(\"Error processing %s[%d]: %s\", n, i, err))\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(p.config.Scripts) == 0 && p.config.Inline == nil {\n\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\terrors.New(\"Either a script file or inline script must be specified.\"))\n\t} else if len(p.config.Scripts) > 0 && p.config.Inline != nil {\n\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\terrors.New(\"Only a script file or an inline script can be specified, not both.\"))\n\t}\n\n\tfor _, path := range p.config.Scripts {\n\t\tif _, err := os.Stat(path); err != nil {\n\t\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\t\tfmt.Errorf(\"Bad script '%s': %s\", path, err))\n\t\t}\n\t}\n\n\t\/\/ Do a check for bad environment variables, such as '=foo', 'foobar'\n\tfor _, kv := range p.config.Vars {\n\t\tvs := strings.Split(kv, \"=\")\n\t\tif len(vs) != 2 || vs[0] == \"\" {\n\t\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\t\tfmt.Errorf(\"Environment variable not in format 'key=value': %s\", kv))\n\t\t}\n\t}\n\n\tif errs != nil && len(errs.Errors) > 0 {\n\t\treturn errs\n\t}\n\n\treturn nil\n}\n\nfunc (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {\n\tscripts := make([]string, len(p.config.Scripts))\n\tcopy(scripts, p.config.Scripts)\n\n\t\/\/ If we have an inline script, then turn that into a temporary\n\t\/\/ shell script and use that.\n\tif p.config.Inline != nil {\n\t\ttf, err := ioutil.TempFile(\"\", \"packer-shell\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error preparing shell script: %s\", err)\n\t\t}\n\t\tdefer os.Remove(tf.Name())\n\n\t\t\/\/ Set the path to the temporary file\n\t\tscripts = append(scripts, tf.Name())\n\n\t\t\/\/ Write our contents to it\n\t\twriter := bufio.NewWriter(tf)\n\t\twriter.WriteString(fmt.Sprintf(\"#!%s\\n\", p.config.InlineShebang))\n\t\tfor _, command := range p.config.Inline {\n\t\t\tif _, err := writer.WriteString(command + \"\\n\"); err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error preparing shell script: %s\", err)\n\t\t\t}\n\t\t}\n\n\t\tif err := writer.Flush(); err != nil {\n\t\t\treturn fmt.Errorf(\"Error preparing shell script: %s\", err)\n\t\t}\n\n\t\ttf.Close()\n\t}\n\n\t\/\/ Build our variables up by adding in the build name and builder type\n\tenvVars := make([]string, len(p.config.Vars)+2)\n\tenvVars[0] = \"PACKER_BUILD_NAME=\" + p.config.PackerBuildName\n\tenvVars[1] = \"PACKER_BUILDER_TYPE=\" + p.config.PackerBuilderType\n\tcopy(envVars[2:], p.config.Vars)\n\n\tfor _, path := range scripts {\n\t\tui.Say(fmt.Sprintf(\"Provisioning with shell script: %s\", path))\n\n\t\tlog.Printf(\"Opening %s for reading\", path)\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error opening shell script: %s\", err)\n\t\t}\n\t\tdefer f.Close()\n\n\t\tlog.Printf(\"Uploading %s => %s\", path, p.config.RemotePath)\n\t\terr = comm.Upload(p.config.RemotePath, f)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error uploading shell script: %s\", err)\n\t\t}\n\n\t\t\/\/ Close the original file since we copied it\n\t\tf.Close()\n\n\t\t\/\/ Flatten the environment variables\n\t\tflattendVars := strings.Join(envVars, \" \")\n\n\t\t\/\/ Compile the command\n\t\tcommand, err := p.config.tpl.Process(p.config.ExecuteCommand, &ExecuteCommandTemplate{\n\t\t\tVars: flattendVars,\n\t\t\tPath: p.config.RemotePath,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error processing command: %s\", err)\n\t\t}\n\n\t\tcmd := &packer.RemoteCmd{Command: command}\n\t\tlog.Printf(\"Executing command: %s\", cmd.Command)\n\t\tif err := cmd.StartWithUi(comm, ui); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed executing command: %s\", err)\n\t\t}\n\n\t\tif cmd.ExitStatus != 0 {\n\t\t\treturn fmt.Errorf(\"Script exited with non-zero exit status: %d\", cmd.ExitStatus)\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/diff\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"github.com\/mitchellh\/goamz\/ec2\"\n)\n\nfunc resource_aws_internet_gateway_create(\n\ts *terraform.InstanceState,\n\td *terraform.InstanceDiff,\n\tmeta interface{}) (*terraform.InstanceState, error) {\n\tp := meta.(*ResourceProvider)\n\tec2conn := p.ec2conn\n\n\t\/\/ Create the gateway\n\tlog.Printf(\"[DEBUG] Creating internet gateway\")\n\tresp, err := ec2conn.CreateInternetGateway(nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating subnet: %s\", err)\n\t}\n\n\t\/\/ Get the ID and store it\n\tig := &resp.InternetGateway\n\ts.ID = ig.InternetGatewayId\n\tlog.Printf(\"[INFO] InternetGateway ID: %s\", s.ID)\n\n\t\/\/ Update our attributes and return\n\treturn resource_aws_internet_gateway_update(s, d, meta)\n}\n\nfunc resource_aws_internet_gateway_update(\n\ts *terraform.InstanceState,\n\td *terraform.InstanceDiff,\n\tmeta interface{}) (*terraform.InstanceState, error) {\n\tp := meta.(*ResourceProvider)\n\tec2conn := p.ec2conn\n\n\t\/\/ Merge the diff so we have the latest attributes\n\trs := s.MergeDiff(d)\n\n\t\/\/ A note on the states below: the AWS docs (as of July, 2014) say\n\t\/\/ that the states would be: attached, attaching, detached, detaching,\n\t\/\/ but when running, I noticed that the state is usually \"available\" when\n\t\/\/ it is attached.\n\n\t\/\/ If we're already attached, detach it first\n\tif err := resource_aws_internet_gateway_detach(ec2conn, s); err != nil {\n\t\treturn s, err\n\t}\n\n\t\/\/ Set the VPC ID to empty since we're detached at this point\n\tdelete(rs.Attributes, \"vpc_id\")\n\n\tif attr, ok := d.Attributes[\"vpc_id\"]; ok && attr.New != \"\" {\n\t\terr := resource_aws_internet_gateway_attach(ec2conn, s, attr.New)\n\t\tif err != nil {\n\t\t\treturn rs, err\n\t\t}\n\n\t\trs.Attributes[\"vpc_id\"] = attr.New\n\t}\n\n\treturn resource_aws_internet_gateway_update_state(rs, nil)\n}\n\nfunc resource_aws_internet_gateway_destroy(\n\ts *terraform.InstanceState,\n\tmeta interface{}) error {\n\tp := meta.(*ResourceProvider)\n\tec2conn := p.ec2conn\n\n\t\/\/ Detach if it is attached\n\tif err := resource_aws_internet_gateway_detach(ec2conn, s); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"[INFO] Deleting Internet Gateway: %s\", s.ID)\n\tif _, err := ec2conn.DeleteInternetGateway(s.ID); err != nil {\n\t\tec2err, ok := err.(*ec2.Error)\n\t\tif ok && ec2err.Code == \"InvalidInternetGatewayID.NotFound\" {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn fmt.Errorf(\"Error deleting internet gateway: %s\", err)\n\t}\n\n\t\/\/ Wait for the internet gateway to actually delete\n\tlog.Printf(\"[DEBUG] Waiting for internet gateway (%s) to delete\", s.ID)\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"available\"},\n\t\tTarget: \"\",\n\t\tRefresh: IGStateRefreshFunc(ec2conn, s.ID),\n\t\tTimeout: 10 * time.Minute,\n\t}\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Error waiting for internet gateway (%s) to destroy: %s\",\n\t\t\ts.ID, err)\n\t}\n\n\treturn nil\n}\n\nfunc resource_aws_internet_gateway_refresh(\n\ts *terraform.InstanceState,\n\tmeta interface{}) (*terraform.InstanceState, error) {\n\tp := meta.(*ResourceProvider)\n\tec2conn := p.ec2conn\n\n\tigRaw, _, err := IGStateRefreshFunc(ec2conn, s.ID)()\n\tif err != nil {\n\t\treturn s, err\n\t}\n\tif igRaw == nil {\n\t\treturn nil, nil\n\t}\n\n\tig := igRaw.(*ec2.InternetGateway)\n\treturn resource_aws_internet_gateway_update_state(s, ig)\n}\n\nfunc resource_aws_internet_gateway_diff(\n\ts *terraform.InstanceState,\n\tc *terraform.ResourceConfig,\n\tmeta interface{}) (*terraform.InstanceDiff, error) {\n\tb := &diff.ResourceBuilder{\n\t\tAttrs: map[string]diff.AttrType{\n\t\t\t\"vpc_id\": diff.AttrTypeUpdate,\n\t\t},\n\t}\n\n\treturn b.Diff(s, c)\n}\n\nfunc resource_aws_internet_gateway_attach(\n\tec2conn *ec2.EC2,\n\ts *terraform.InstanceState,\n\tvpcId string) error {\n\tlog.Printf(\n\t\t\"[INFO] Attaching Internet Gateway '%s' to VPC '%s'\",\n\t\ts.ID,\n\t\tvpcId)\n\t_, err := ec2conn.AttachInternetGateway(s.ID, vpcId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Wait for it to be fully attached before continuing\n\tlog.Printf(\"[DEBUG] Waiting for internet gateway (%s) to attach\", s.ID)\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"detached\", \"attaching\"},\n\t\tTarget: \"available\",\n\t\tRefresh: IGAttachStateRefreshFunc(ec2conn, s.ID, \"available\"),\n\t\tTimeout: 1 * time.Minute,\n\t}\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Error waiting for internet gateway (%s) to attach: %s\",\n\t\t\ts.ID, err)\n\t}\n\n\treturn nil\n}\n\nfunc resource_aws_internet_gateway_detach(\n\tec2conn *ec2.EC2,\n\ts *terraform.InstanceState) error {\n\tif s.Attributes[\"vpc_id\"] == \"\" {\n\t\treturn nil\n\t}\n\n\tlog.Printf(\n\t\t\"[INFO] Detaching Internet Gateway '%s' from VPC '%s'\",\n\t\ts.ID,\n\t\ts.Attributes[\"vpc_id\"])\n\twait := true\n\t_, err := ec2conn.DetachInternetGateway(s.ID, s.Attributes[\"vpc_id\"])\n\tif err != nil {\n\t\tec2err, ok := err.(*ec2.Error)\n\t\tif ok {\n\t\t\tif ec2err.Code == \"InvalidInternetGatewayID.NotFound\" {\n\t\t\t\terr = nil\n\t\t\t\twait = false\n\t\t\t} else if ec2err.Code == \"Gateway.NotAttached\" {\n\t\t\t\terr = nil\n\t\t\t\twait = false\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tdelete(s.Attributes, \"vpc_id\")\n\n\tif !wait {\n\t\treturn nil\n\t}\n\n\t\/\/ Wait for it to be fully detached before continuing\n\tlog.Printf(\"[DEBUG] Waiting for internet gateway (%s) to detach\", s.ID)\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"attached\", \"detaching\", \"available\"},\n\t\tTarget: \"detached\",\n\t\tRefresh: IGAttachStateRefreshFunc(ec2conn, s.ID, \"detached\"),\n\t\tTimeout: 1 * time.Minute,\n\t}\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Error waiting for internet gateway (%s) to detach: %s\",\n\t\t\ts.ID, err)\n\t}\n\n\treturn nil\n}\n\nfunc resource_aws_internet_gateway_update_state(\n\ts *terraform.InstanceState,\n\tig *ec2.InternetGateway) (*terraform.InstanceState, error) {\n\treturn s, nil\n}\n\n\/\/ IGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch\n\/\/ an internet gateway.\nfunc IGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc {\n\treturn func() (interface{}, string, error) {\n\t\tresp, err := conn.DescribeInternetGateways([]string{id}, ec2.NewFilter())\n\t\tif err != nil {\n\t\t\tec2err, ok := err.(*ec2.Error)\n\t\t\tif ok && ec2err.Code == \"InvalidInternetGatewayID.NotFound\" {\n\t\t\t\tresp = nil\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"[ERROR] Error on IGStateRefresh: %s\", err)\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t}\n\n\t\tif resp == nil {\n\t\t\t\/\/ Sometimes AWS just has consistency issues and doesn't see\n\t\t\t\/\/ our instance yet. Return an empty state.\n\t\t\treturn nil, \"\", nil\n\t\t}\n\n\t\tig := &resp.InternetGateways[0]\n\t\treturn ig, \"available\", nil\n\t}\n}\n\n\/\/ IGAttachStateRefreshFunc returns a resource.StateRefreshFunc that is used\n\/\/ watch the state of an internet gateway's attachment.\nfunc IGAttachStateRefreshFunc(conn *ec2.EC2, id string, expected string) resource.StateRefreshFunc {\n\tvar start time.Time\n\treturn func() (interface{}, string, error) {\n\t\tif start.IsZero() {\n\t\t\tstart = time.Now()\n\t\t}\n\n\t\tresp, err := conn.DescribeInternetGateways([]string{id}, ec2.NewFilter())\n\t\tif err != nil {\n\t\t\tec2err, ok := err.(*ec2.Error)\n\t\t\tif ok && ec2err.Code == \"InvalidInternetGatewayID.NotFound\" {\n\t\t\t\tresp = nil\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"[ERROR] Error on IGStateRefresh: %s\", err)\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t}\n\n\t\tif resp == nil {\n\t\t\t\/\/ Sometimes AWS just has consistency issues and doesn't see\n\t\t\t\/\/ our instance yet. Return an empty state.\n\t\t\treturn nil, \"\", nil\n\t\t}\n\n\t\tig := &resp.InternetGateways[0]\n\n\t\tif time.Now().Sub(start) > 10*time.Second {\n\t\t\treturn ig, expected, nil\n\t\t}\n\n\t\tif len(ig.Attachments) == 0 {\n\t\t\t\/\/ No attachments, we're detached\n\t\t\treturn ig, \"detached\", nil\n\t\t}\n\n\t\treturn ig, ig.Attachments[0].State, nil\n\t}\n}\n<commit_msg>providers\/aws: retry destroying internet gateway for some time [GH-447]<commit_after>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/diff\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"github.com\/mitchellh\/goamz\/ec2\"\n)\n\nfunc resource_aws_internet_gateway_create(\n\ts *terraform.InstanceState,\n\td *terraform.InstanceDiff,\n\tmeta interface{}) (*terraform.InstanceState, error) {\n\tp := meta.(*ResourceProvider)\n\tec2conn := p.ec2conn\n\n\t\/\/ Create the gateway\n\tlog.Printf(\"[DEBUG] Creating internet gateway\")\n\tresp, err := ec2conn.CreateInternetGateway(nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating subnet: %s\", err)\n\t}\n\n\t\/\/ Get the ID and store it\n\tig := &resp.InternetGateway\n\ts.ID = ig.InternetGatewayId\n\tlog.Printf(\"[INFO] InternetGateway ID: %s\", s.ID)\n\n\t\/\/ Update our attributes and return\n\treturn resource_aws_internet_gateway_update(s, d, meta)\n}\n\nfunc resource_aws_internet_gateway_update(\n\ts *terraform.InstanceState,\n\td *terraform.InstanceDiff,\n\tmeta interface{}) (*terraform.InstanceState, error) {\n\tp := meta.(*ResourceProvider)\n\tec2conn := p.ec2conn\n\n\t\/\/ Merge the diff so we have the latest attributes\n\trs := s.MergeDiff(d)\n\n\t\/\/ A note on the states below: the AWS docs (as of July, 2014) say\n\t\/\/ that the states would be: attached, attaching, detached, detaching,\n\t\/\/ but when running, I noticed that the state is usually \"available\" when\n\t\/\/ it is attached.\n\n\t\/\/ If we're already attached, detach it first\n\tif err := resource_aws_internet_gateway_detach(ec2conn, s); err != nil {\n\t\treturn s, err\n\t}\n\n\t\/\/ Set the VPC ID to empty since we're detached at this point\n\tdelete(rs.Attributes, \"vpc_id\")\n\n\tif attr, ok := d.Attributes[\"vpc_id\"]; ok && attr.New != \"\" {\n\t\terr := resource_aws_internet_gateway_attach(ec2conn, s, attr.New)\n\t\tif err != nil {\n\t\t\treturn rs, err\n\t\t}\n\n\t\trs.Attributes[\"vpc_id\"] = attr.New\n\t}\n\n\treturn resource_aws_internet_gateway_update_state(rs, nil)\n}\n\nfunc resource_aws_internet_gateway_destroy(\n\ts *terraform.InstanceState,\n\tmeta interface{}) error {\n\tp := meta.(*ResourceProvider)\n\tec2conn := p.ec2conn\n\n\t\/\/ Detach if it is attached\n\tif err := resource_aws_internet_gateway_detach(ec2conn, s); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"[INFO] Deleting Internet Gateway: %s\", s.ID)\n\treturn resource.Retry(5*time.Minute, func() error {\n\t\t_, err := ec2conn.DeleteInternetGateway(s.ID)\n\t\tif err != nil {\n\t\t\tec2err, ok := err.(*ec2.Error)\n\t\t\tif !ok {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tswitch ec2err.Code {\n\t\t\tcase \"InvalidInternetGatewayID.NotFound\":\n\t\t\t\treturn nil\n\t\t\tcase \"DependencyViolation\":\n\t\t\t\treturn err \/\/ retry\n\t\t\tdefault:\n\t\t\t\treturn resource.RetryError{err}\n\t\t\t}\n\t\t}\n\n\t\treturn fmt.Errorf(\"Error deleting internet gateway: %s\", err)\n\t})\n\n\t\/\/ Wait for the internet gateway to actually delete\n\tlog.Printf(\"[DEBUG] Waiting for internet gateway (%s) to delete\", s.ID)\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"available\"},\n\t\tTarget: \"\",\n\t\tRefresh: IGStateRefreshFunc(ec2conn, s.ID),\n\t\tTimeout: 10 * time.Minute,\n\t}\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Error waiting for internet gateway (%s) to destroy: %s\",\n\t\t\ts.ID, err)\n\t}\n\n\treturn nil\n}\n\nfunc resource_aws_internet_gateway_refresh(\n\ts *terraform.InstanceState,\n\tmeta interface{}) (*terraform.InstanceState, error) {\n\tp := meta.(*ResourceProvider)\n\tec2conn := p.ec2conn\n\n\tigRaw, _, err := IGStateRefreshFunc(ec2conn, s.ID)()\n\tif err != nil {\n\t\treturn s, err\n\t}\n\tif igRaw == nil {\n\t\treturn nil, nil\n\t}\n\n\tig := igRaw.(*ec2.InternetGateway)\n\treturn resource_aws_internet_gateway_update_state(s, ig)\n}\n\nfunc resource_aws_internet_gateway_diff(\n\ts *terraform.InstanceState,\n\tc *terraform.ResourceConfig,\n\tmeta interface{}) (*terraform.InstanceDiff, error) {\n\tb := &diff.ResourceBuilder{\n\t\tAttrs: map[string]diff.AttrType{\n\t\t\t\"vpc_id\": diff.AttrTypeUpdate,\n\t\t},\n\t}\n\n\treturn b.Diff(s, c)\n}\n\nfunc resource_aws_internet_gateway_attach(\n\tec2conn *ec2.EC2,\n\ts *terraform.InstanceState,\n\tvpcId string) error {\n\tlog.Printf(\n\t\t\"[INFO] Attaching Internet Gateway '%s' to VPC '%s'\",\n\t\ts.ID,\n\t\tvpcId)\n\t_, err := ec2conn.AttachInternetGateway(s.ID, vpcId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Wait for it to be fully attached before continuing\n\tlog.Printf(\"[DEBUG] Waiting for internet gateway (%s) to attach\", s.ID)\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"detached\", \"attaching\"},\n\t\tTarget: \"available\",\n\t\tRefresh: IGAttachStateRefreshFunc(ec2conn, s.ID, \"available\"),\n\t\tTimeout: 1 * time.Minute,\n\t}\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Error waiting for internet gateway (%s) to attach: %s\",\n\t\t\ts.ID, err)\n\t}\n\n\treturn nil\n}\n\nfunc resource_aws_internet_gateway_detach(\n\tec2conn *ec2.EC2,\n\ts *terraform.InstanceState) error {\n\tif s.Attributes[\"vpc_id\"] == \"\" {\n\t\treturn nil\n\t}\n\n\tlog.Printf(\n\t\t\"[INFO] Detaching Internet Gateway '%s' from VPC '%s'\",\n\t\ts.ID,\n\t\ts.Attributes[\"vpc_id\"])\n\twait := true\n\t_, err := ec2conn.DetachInternetGateway(s.ID, s.Attributes[\"vpc_id\"])\n\tif err != nil {\n\t\tec2err, ok := err.(*ec2.Error)\n\t\tif ok {\n\t\t\tif ec2err.Code == \"InvalidInternetGatewayID.NotFound\" {\n\t\t\t\terr = nil\n\t\t\t\twait = false\n\t\t\t} else if ec2err.Code == \"Gateway.NotAttached\" {\n\t\t\t\terr = nil\n\t\t\t\twait = false\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tdelete(s.Attributes, \"vpc_id\")\n\n\tif !wait {\n\t\treturn nil\n\t}\n\n\t\/\/ Wait for it to be fully detached before continuing\n\tlog.Printf(\"[DEBUG] Waiting for internet gateway (%s) to detach\", s.ID)\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"attached\", \"detaching\", \"available\"},\n\t\tTarget: \"detached\",\n\t\tRefresh: IGAttachStateRefreshFunc(ec2conn, s.ID, \"detached\"),\n\t\tTimeout: 1 * time.Minute,\n\t}\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Error waiting for internet gateway (%s) to detach: %s\",\n\t\t\ts.ID, err)\n\t}\n\n\treturn nil\n}\n\nfunc resource_aws_internet_gateway_update_state(\n\ts *terraform.InstanceState,\n\tig *ec2.InternetGateway) (*terraform.InstanceState, error) {\n\treturn s, nil\n}\n\n\/\/ IGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch\n\/\/ an internet gateway.\nfunc IGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc {\n\treturn func() (interface{}, string, error) {\n\t\tresp, err := conn.DescribeInternetGateways([]string{id}, ec2.NewFilter())\n\t\tif err != nil {\n\t\t\tec2err, ok := err.(*ec2.Error)\n\t\t\tif ok && ec2err.Code == \"InvalidInternetGatewayID.NotFound\" {\n\t\t\t\tresp = nil\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"[ERROR] Error on IGStateRefresh: %s\", err)\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t}\n\n\t\tif resp == nil {\n\t\t\t\/\/ Sometimes AWS just has consistency issues and doesn't see\n\t\t\t\/\/ our instance yet. Return an empty state.\n\t\t\treturn nil, \"\", nil\n\t\t}\n\n\t\tig := &resp.InternetGateways[0]\n\t\treturn ig, \"available\", nil\n\t}\n}\n\n\/\/ IGAttachStateRefreshFunc returns a resource.StateRefreshFunc that is used\n\/\/ watch the state of an internet gateway's attachment.\nfunc IGAttachStateRefreshFunc(conn *ec2.EC2, id string, expected string) resource.StateRefreshFunc {\n\tvar start time.Time\n\treturn func() (interface{}, string, error) {\n\t\tif start.IsZero() {\n\t\t\tstart = time.Now()\n\t\t}\n\n\t\tresp, err := conn.DescribeInternetGateways([]string{id}, ec2.NewFilter())\n\t\tif err != nil {\n\t\t\tec2err, ok := err.(*ec2.Error)\n\t\t\tif ok && ec2err.Code == \"InvalidInternetGatewayID.NotFound\" {\n\t\t\t\tresp = nil\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"[ERROR] Error on IGStateRefresh: %s\", err)\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t}\n\n\t\tif resp == nil {\n\t\t\t\/\/ Sometimes AWS just has consistency issues and doesn't see\n\t\t\t\/\/ our instance yet. Return an empty state.\n\t\t\treturn nil, \"\", nil\n\t\t}\n\n\t\tig := &resp.InternetGateways[0]\n\n\t\tif time.Now().Sub(start) > 10*time.Second {\n\t\t\treturn ig, expected, nil\n\t\t}\n\n\t\tif len(ig.Attachments) == 0 {\n\t\t\t\/\/ No attachments, we're detached\n\t\t\treturn ig, \"detached\", nil\n\t\t}\n\n\t\treturn ig, ig.Attachments[0].State, nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"github.com\/hashicorp\/errwrap\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nfunc resourceAwsInternetGateway() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsInternetGatewayCreate,\n\t\tRead: resourceAwsInternetGatewayRead,\n\t\tUpdate: resourceAwsInternetGatewayUpdate,\n\t\tDelete: resourceAwsInternetGatewayDelete,\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: schema.ImportStatePassthrough,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"vpc_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"tags\": tagsSchema(),\n\t\t},\n\t}\n}\n\nfunc resourceAwsInternetGatewayCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\n\t\/\/ Create the gateway\n\tlog.Printf(\"[DEBUG] Creating internet gateway\")\n\tvar err error\n\tresp, err := conn.CreateInternetGateway(nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating internet gateway: %s\", err)\n\t}\n\n\t\/\/ Get the ID and store it\n\tig := *resp.InternetGateway\n\td.SetId(*ig.InternetGatewayId)\n\tlog.Printf(\"[INFO] InternetGateway ID: %s\", d.Id())\n\n\terr = resource.Retry(5*time.Minute, func() *resource.RetryError {\n\t\tigRaw, _, err := IGStateRefreshFunc(conn, d.Id())()\n\t\tif igRaw != nil {\n\t\t\treturn nil\n\t\t}\n\t\tif err == nil {\n\t\t\treturn resource.RetryableError(err)\n\t\t} else {\n\t\t\treturn resource.NonRetryableError(err)\n\t\t}\n\t})\n\n\tif err != nil {\n\t\treturn errwrap.Wrapf(\"{{err}}\", err)\n\t}\n\n\terr = setTags(conn, d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Attach the new gateway to the correct vpc\n\treturn resourceAwsInternetGatewayAttach(d, meta)\n}\n\nfunc resourceAwsInternetGatewayRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\n\tigRaw, _, err := IGStateRefreshFunc(conn, d.Id())()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif igRaw == nil {\n\t\t\/\/ Seems we have lost our internet gateway\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\n\tig := igRaw.(*ec2.InternetGateway)\n\tif len(ig.Attachments) == 0 {\n\t\t\/\/ Gateway exists but not attached to the VPC\n\t\td.Set(\"vpc_id\", \"\")\n\t} else {\n\t\td.Set(\"vpc_id\", ig.Attachments[0].VpcId)\n\t}\n\n\td.Set(\"tags\", tagsToMap(ig.Tags))\n\n\treturn nil\n}\n\nfunc resourceAwsInternetGatewayUpdate(d *schema.ResourceData, meta interface{}) error {\n\tif d.HasChange(\"vpc_id\") {\n\t\t\/\/ If we're already attached, detach it first\n\t\tif err := resourceAwsInternetGatewayDetach(d, meta); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Attach the gateway to the new vpc\n\t\tif err := resourceAwsInternetGatewayAttach(d, meta); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tconn := meta.(*AWSClient).ec2conn\n\n\tif err := setTags(conn, d); err != nil {\n\t\treturn err\n\t}\n\n\td.SetPartial(\"tags\")\n\n\treturn nil\n}\n\nfunc resourceAwsInternetGatewayDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\n\t\/\/ Detach if it is attached\n\tif err := resourceAwsInternetGatewayDetach(d, meta); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"[INFO] Deleting Internet Gateway: %s\", d.Id())\n\n\treturn resource.Retry(5*time.Minute, func() *resource.RetryError {\n\t\t_, err := conn.DeleteInternetGateway(&ec2.DeleteInternetGatewayInput{\n\t\t\tInternetGatewayId: aws.String(d.Id()),\n\t\t})\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tec2err, ok := err.(awserr.Error)\n\t\tif !ok {\n\t\t\treturn resource.RetryableError(err)\n\t\t}\n\n\t\tswitch ec2err.Code() {\n\t\tcase \"InvalidInternetGatewayID.NotFound\":\n\t\t\treturn nil\n\t\tcase \"DependencyViolation\":\n\t\t\treturn resource.RetryableError(err) \/\/ retry\n\t\t}\n\n\t\treturn resource.NonRetryableError(err)\n\t})\n}\n\nfunc resourceAwsInternetGatewayAttach(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\n\tif d.Get(\"vpc_id\").(string) == \"\" {\n\t\tlog.Printf(\n\t\t\t\"[DEBUG] Not attaching Internet Gateway '%s' as no VPC ID is set\",\n\t\t\td.Id())\n\t\treturn nil\n\t}\n\n\tlog.Printf(\n\t\t\"[INFO] Attaching Internet Gateway '%s' to VPC '%s'\",\n\t\td.Id(),\n\t\td.Get(\"vpc_id\").(string))\n\n\terr := resource.Retry(2*time.Minute, func() *resource.RetryError {\n\t\t_, err := conn.AttachInternetGateway(&ec2.AttachInternetGatewayInput{\n\t\t\tInternetGatewayId: aws.String(d.Id()),\n\t\t\tVpcId: aws.String(d.Get(\"vpc_id\").(string)),\n\t\t})\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif ec2err, ok := err.(awserr.Error); ok {\n\t\t\tswitch ec2err.Code() {\n\t\t\tcase \"InvalidInternetGatewayID.NotFound\":\n\t\t\t\treturn resource.RetryableError(err) \/\/ retry\n\t\t\t}\n\t\t}\n\t\treturn resource.NonRetryableError(err)\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ A note on the states below: the AWS docs (as of July, 2014) say\n\t\/\/ that the states would be: attached, attaching, detached, detaching,\n\t\/\/ but when running, I noticed that the state is usually \"available\" when\n\t\/\/ it is attached.\n\n\t\/\/ Wait for it to be fully attached before continuing\n\tlog.Printf(\"[DEBUG] Waiting for internet gateway (%s) to attach\", d.Id())\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"detached\", \"attaching\"},\n\t\tTarget: []string{\"available\"},\n\t\tRefresh: IGAttachStateRefreshFunc(conn, d.Id(), \"available\"),\n\t\tTimeout: 4 * time.Minute,\n\t}\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Error waiting for internet gateway (%s) to attach: %s\",\n\t\t\td.Id(), err)\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsInternetGatewayDetach(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\n\t\/\/ Get the old VPC ID to detach from\n\tvpcID, _ := d.GetChange(\"vpc_id\")\n\n\tif vpcID.(string) == \"\" {\n\t\tlog.Printf(\n\t\t\t\"[DEBUG] Not detaching Internet Gateway '%s' as no VPC ID is set\",\n\t\t\td.Id())\n\t\treturn nil\n\t}\n\n\tlog.Printf(\n\t\t\"[INFO] Detaching Internet Gateway '%s' from VPC '%s'\",\n\t\td.Id(),\n\t\tvpcID.(string))\n\n\t\/\/ Wait for it to be fully detached before continuing\n\tlog.Printf(\"[DEBUG] Waiting for internet gateway (%s) to detach\", d.Id())\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"detaching\"},\n\t\tTarget: []string{\"detached\"},\n\t\tRefresh: detachIGStateRefreshFunc(conn, d.Id(), vpcID.(string)),\n\t\tTimeout: 15 * time.Minute,\n\t\tDelay: 10 * time.Second,\n\t\tNotFoundChecks: 30,\n\t}\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Error waiting for internet gateway (%s) to detach: %s\",\n\t\t\td.Id(), err)\n\t}\n\n\treturn nil\n}\n\n\/\/ InstanceStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch\n\/\/ an EC2 instance.\nfunc detachIGStateRefreshFunc(conn *ec2.EC2, gatewayID, vpcID string) resource.StateRefreshFunc {\n\treturn func() (interface{}, string, error) {\n\t\t_, err := conn.DetachInternetGateway(&ec2.DetachInternetGatewayInput{\n\t\t\tInternetGatewayId: aws.String(gatewayID),\n\t\t\tVpcId: aws.String(vpcID),\n\t\t})\n\t\tif err != nil {\n\t\t\tif ec2err, ok := err.(awserr.Error); ok {\n\t\t\t\tswitch ec2err.Code() {\n\t\t\t\tcase \"InvalidInternetGatewayID.NotFound\":\n\t\t\t\t\tlog.Printf(\"[TRACE] Error detaching Internet Gateway '%s' from VPC '%s': %s\", gatewayID, vpcID, err)\n\t\t\t\t\treturn nil, \"Not Found\", nil\n\n\t\t\t\tcase \"Gateway.NotAttached\":\n\t\t\t\t\treturn \"detached\", \"detached\", nil\n\n\t\t\t\tcase \"DependencyViolation\":\n\t\t\t\t\treturn nil, \"detaching\", nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ DetachInternetGateway only returns an error, so if it's nil, assume we're\n\t\t\/\/ detached\n\t\treturn \"detached\", \"detached\", nil\n\t}\n}\n\n\/\/ IGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch\n\/\/ an internet gateway.\nfunc IGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc {\n\treturn func() (interface{}, string, error) {\n\t\tresp, err := conn.DescribeInternetGateways(&ec2.DescribeInternetGatewaysInput{\n\t\t\tInternetGatewayIds: []*string{aws.String(id)},\n\t\t})\n\t\tif err != nil {\n\t\t\tec2err, ok := err.(awserr.Error)\n\t\t\tif ok && ec2err.Code() == \"InvalidInternetGatewayID.NotFound\" {\n\t\t\t\tresp = nil\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"[ERROR] Error on IGStateRefresh: %s\", err)\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t}\n\n\t\tif resp == nil {\n\t\t\t\/\/ Sometimes AWS just has consistency issues and doesn't see\n\t\t\t\/\/ our instance yet. Return an empty state.\n\t\t\treturn nil, \"\", nil\n\t\t}\n\n\t\tig := resp.InternetGateways[0]\n\t\treturn ig, \"available\", nil\n\t}\n}\n\n\/\/ IGAttachStateRefreshFunc returns a resource.StateRefreshFunc that is used\n\/\/ watch the state of an internet gateway's attachment.\nfunc IGAttachStateRefreshFunc(conn *ec2.EC2, id string, expected string) resource.StateRefreshFunc {\n\tvar start time.Time\n\treturn func() (interface{}, string, error) {\n\t\tif start.IsZero() {\n\t\t\tstart = time.Now()\n\t\t}\n\n\t\tresp, err := conn.DescribeInternetGateways(&ec2.DescribeInternetGatewaysInput{\n\t\t\tInternetGatewayIds: []*string{aws.String(id)},\n\t\t})\n\t\tif err != nil {\n\t\t\tec2err, ok := err.(awserr.Error)\n\t\t\tif ok && ec2err.Code() == \"InvalidInternetGatewayID.NotFound\" {\n\t\t\t\tresp = nil\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"[ERROR] Error on IGStateRefresh: %s\", err)\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t}\n\n\t\tif resp == nil {\n\t\t\t\/\/ Sometimes AWS just has consistency issues and doesn't see\n\t\t\t\/\/ our instance yet. Return an empty state.\n\t\t\treturn nil, \"\", nil\n\t\t}\n\n\t\tig := resp.InternetGateways[0]\n\n\t\tif time.Now().Sub(start) > 10*time.Second {\n\t\t\treturn ig, expected, nil\n\t\t}\n\n\t\tif len(ig.Attachments) == 0 {\n\t\t\t\/\/ No attachments, we're detached\n\t\t\treturn ig, \"detached\", nil\n\t\t}\n\n\t\treturn ig, *ig.Attachments[0].State, nil\n\t}\n}\n<commit_msg>provider\/aws: Increase timeout for deleting IGW (#14705)<commit_after>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"github.com\/hashicorp\/errwrap\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nfunc resourceAwsInternetGateway() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsInternetGatewayCreate,\n\t\tRead: resourceAwsInternetGatewayRead,\n\t\tUpdate: resourceAwsInternetGatewayUpdate,\n\t\tDelete: resourceAwsInternetGatewayDelete,\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: schema.ImportStatePassthrough,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"vpc_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"tags\": tagsSchema(),\n\t\t},\n\t}\n}\n\nfunc resourceAwsInternetGatewayCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\n\t\/\/ Create the gateway\n\tlog.Printf(\"[DEBUG] Creating internet gateway\")\n\tvar err error\n\tresp, err := conn.CreateInternetGateway(nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating internet gateway: %s\", err)\n\t}\n\n\t\/\/ Get the ID and store it\n\tig := *resp.InternetGateway\n\td.SetId(*ig.InternetGatewayId)\n\tlog.Printf(\"[INFO] InternetGateway ID: %s\", d.Id())\n\n\terr = resource.Retry(5*time.Minute, func() *resource.RetryError {\n\t\tigRaw, _, err := IGStateRefreshFunc(conn, d.Id())()\n\t\tif igRaw != nil {\n\t\t\treturn nil\n\t\t}\n\t\tif err == nil {\n\t\t\treturn resource.RetryableError(err)\n\t\t} else {\n\t\t\treturn resource.NonRetryableError(err)\n\t\t}\n\t})\n\n\tif err != nil {\n\t\treturn errwrap.Wrapf(\"{{err}}\", err)\n\t}\n\n\terr = setTags(conn, d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Attach the new gateway to the correct vpc\n\treturn resourceAwsInternetGatewayAttach(d, meta)\n}\n\nfunc resourceAwsInternetGatewayRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\n\tigRaw, _, err := IGStateRefreshFunc(conn, d.Id())()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif igRaw == nil {\n\t\t\/\/ Seems we have lost our internet gateway\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\n\tig := igRaw.(*ec2.InternetGateway)\n\tif len(ig.Attachments) == 0 {\n\t\t\/\/ Gateway exists but not attached to the VPC\n\t\td.Set(\"vpc_id\", \"\")\n\t} else {\n\t\td.Set(\"vpc_id\", ig.Attachments[0].VpcId)\n\t}\n\n\td.Set(\"tags\", tagsToMap(ig.Tags))\n\n\treturn nil\n}\n\nfunc resourceAwsInternetGatewayUpdate(d *schema.ResourceData, meta interface{}) error {\n\tif d.HasChange(\"vpc_id\") {\n\t\t\/\/ If we're already attached, detach it first\n\t\tif err := resourceAwsInternetGatewayDetach(d, meta); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Attach the gateway to the new vpc\n\t\tif err := resourceAwsInternetGatewayAttach(d, meta); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tconn := meta.(*AWSClient).ec2conn\n\n\tif err := setTags(conn, d); err != nil {\n\t\treturn err\n\t}\n\n\td.SetPartial(\"tags\")\n\n\treturn nil\n}\n\nfunc resourceAwsInternetGatewayDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\n\t\/\/ Detach if it is attached\n\tif err := resourceAwsInternetGatewayDetach(d, meta); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"[INFO] Deleting Internet Gateway: %s\", d.Id())\n\n\treturn resource.Retry(10*time.Minute, func() *resource.RetryError {\n\t\t_, err := conn.DeleteInternetGateway(&ec2.DeleteInternetGatewayInput{\n\t\t\tInternetGatewayId: aws.String(d.Id()),\n\t\t})\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tec2err, ok := err.(awserr.Error)\n\t\tif !ok {\n\t\t\treturn resource.RetryableError(err)\n\t\t}\n\n\t\tswitch ec2err.Code() {\n\t\tcase \"InvalidInternetGatewayID.NotFound\":\n\t\t\treturn nil\n\t\tcase \"DependencyViolation\":\n\t\t\treturn resource.RetryableError(err) \/\/ retry\n\t\t}\n\n\t\treturn resource.NonRetryableError(err)\n\t})\n}\n\nfunc resourceAwsInternetGatewayAttach(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\n\tif d.Get(\"vpc_id\").(string) == \"\" {\n\t\tlog.Printf(\n\t\t\t\"[DEBUG] Not attaching Internet Gateway '%s' as no VPC ID is set\",\n\t\t\td.Id())\n\t\treturn nil\n\t}\n\n\tlog.Printf(\n\t\t\"[INFO] Attaching Internet Gateway '%s' to VPC '%s'\",\n\t\td.Id(),\n\t\td.Get(\"vpc_id\").(string))\n\n\terr := resource.Retry(2*time.Minute, func() *resource.RetryError {\n\t\t_, err := conn.AttachInternetGateway(&ec2.AttachInternetGatewayInput{\n\t\t\tInternetGatewayId: aws.String(d.Id()),\n\t\t\tVpcId: aws.String(d.Get(\"vpc_id\").(string)),\n\t\t})\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif ec2err, ok := err.(awserr.Error); ok {\n\t\t\tswitch ec2err.Code() {\n\t\t\tcase \"InvalidInternetGatewayID.NotFound\":\n\t\t\t\treturn resource.RetryableError(err) \/\/ retry\n\t\t\t}\n\t\t}\n\t\treturn resource.NonRetryableError(err)\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ A note on the states below: the AWS docs (as of July, 2014) say\n\t\/\/ that the states would be: attached, attaching, detached, detaching,\n\t\/\/ but when running, I noticed that the state is usually \"available\" when\n\t\/\/ it is attached.\n\n\t\/\/ Wait for it to be fully attached before continuing\n\tlog.Printf(\"[DEBUG] Waiting for internet gateway (%s) to attach\", d.Id())\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"detached\", \"attaching\"},\n\t\tTarget: []string{\"available\"},\n\t\tRefresh: IGAttachStateRefreshFunc(conn, d.Id(), \"available\"),\n\t\tTimeout: 4 * time.Minute,\n\t}\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Error waiting for internet gateway (%s) to attach: %s\",\n\t\t\td.Id(), err)\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsInternetGatewayDetach(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\n\t\/\/ Get the old VPC ID to detach from\n\tvpcID, _ := d.GetChange(\"vpc_id\")\n\n\tif vpcID.(string) == \"\" {\n\t\tlog.Printf(\n\t\t\t\"[DEBUG] Not detaching Internet Gateway '%s' as no VPC ID is set\",\n\t\t\td.Id())\n\t\treturn nil\n\t}\n\n\tlog.Printf(\n\t\t\"[INFO] Detaching Internet Gateway '%s' from VPC '%s'\",\n\t\td.Id(),\n\t\tvpcID.(string))\n\n\t\/\/ Wait for it to be fully detached before continuing\n\tlog.Printf(\"[DEBUG] Waiting for internet gateway (%s) to detach\", d.Id())\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"detaching\"},\n\t\tTarget: []string{\"detached\"},\n\t\tRefresh: detachIGStateRefreshFunc(conn, d.Id(), vpcID.(string)),\n\t\tTimeout: 15 * time.Minute,\n\t\tDelay: 10 * time.Second,\n\t\tNotFoundChecks: 30,\n\t}\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Error waiting for internet gateway (%s) to detach: %s\",\n\t\t\td.Id(), err)\n\t}\n\n\treturn nil\n}\n\n\/\/ InstanceStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch\n\/\/ an EC2 instance.\nfunc detachIGStateRefreshFunc(conn *ec2.EC2, gatewayID, vpcID string) resource.StateRefreshFunc {\n\treturn func() (interface{}, string, error) {\n\t\t_, err := conn.DetachInternetGateway(&ec2.DetachInternetGatewayInput{\n\t\t\tInternetGatewayId: aws.String(gatewayID),\n\t\t\tVpcId: aws.String(vpcID),\n\t\t})\n\t\tif err != nil {\n\t\t\tif ec2err, ok := err.(awserr.Error); ok {\n\t\t\t\tswitch ec2err.Code() {\n\t\t\t\tcase \"InvalidInternetGatewayID.NotFound\":\n\t\t\t\t\tlog.Printf(\"[TRACE] Error detaching Internet Gateway '%s' from VPC '%s': %s\", gatewayID, vpcID, err)\n\t\t\t\t\treturn nil, \"Not Found\", nil\n\n\t\t\t\tcase \"Gateway.NotAttached\":\n\t\t\t\t\treturn \"detached\", \"detached\", nil\n\n\t\t\t\tcase \"DependencyViolation\":\n\t\t\t\t\treturn nil, \"detaching\", nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ DetachInternetGateway only returns an error, so if it's nil, assume we're\n\t\t\/\/ detached\n\t\treturn \"detached\", \"detached\", nil\n\t}\n}\n\n\/\/ IGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch\n\/\/ an internet gateway.\nfunc IGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc {\n\treturn func() (interface{}, string, error) {\n\t\tresp, err := conn.DescribeInternetGateways(&ec2.DescribeInternetGatewaysInput{\n\t\t\tInternetGatewayIds: []*string{aws.String(id)},\n\t\t})\n\t\tif err != nil {\n\t\t\tec2err, ok := err.(awserr.Error)\n\t\t\tif ok && ec2err.Code() == \"InvalidInternetGatewayID.NotFound\" {\n\t\t\t\tresp = nil\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"[ERROR] Error on IGStateRefresh: %s\", err)\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t}\n\n\t\tif resp == nil {\n\t\t\t\/\/ Sometimes AWS just has consistency issues and doesn't see\n\t\t\t\/\/ our instance yet. Return an empty state.\n\t\t\treturn nil, \"\", nil\n\t\t}\n\n\t\tig := resp.InternetGateways[0]\n\t\treturn ig, \"available\", nil\n\t}\n}\n\n\/\/ IGAttachStateRefreshFunc returns a resource.StateRefreshFunc that is used\n\/\/ watch the state of an internet gateway's attachment.\nfunc IGAttachStateRefreshFunc(conn *ec2.EC2, id string, expected string) resource.StateRefreshFunc {\n\tvar start time.Time\n\treturn func() (interface{}, string, error) {\n\t\tif start.IsZero() {\n\t\t\tstart = time.Now()\n\t\t}\n\n\t\tresp, err := conn.DescribeInternetGateways(&ec2.DescribeInternetGatewaysInput{\n\t\t\tInternetGatewayIds: []*string{aws.String(id)},\n\t\t})\n\t\tif err != nil {\n\t\t\tec2err, ok := err.(awserr.Error)\n\t\t\tif ok && ec2err.Code() == \"InvalidInternetGatewayID.NotFound\" {\n\t\t\t\tresp = nil\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"[ERROR] Error on IGStateRefresh: %s\", err)\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t}\n\n\t\tif resp == nil {\n\t\t\t\/\/ Sometimes AWS just has consistency issues and doesn't see\n\t\t\t\/\/ our instance yet. Return an empty state.\n\t\t\treturn nil, \"\", nil\n\t\t}\n\n\t\tig := resp.InternetGateways[0]\n\n\t\tif time.Now().Sub(start) > 10*time.Second {\n\t\t\treturn ig, expected, nil\n\t\t}\n\n\t\tif len(ig.Attachments) == 0 {\n\t\t\t\/\/ No attachments, we're detached\n\t\t\treturn ig, \"detached\", nil\n\t\t}\n\n\t\treturn ig, *ig.Attachments[0].State, nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Serviced Authors.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build unit\n\npackage strategy_test\n\nimport (\n\t\"github.com\/control-center\/serviced\/scheduler\/strategy\"\n\t\"github.com\/control-center\/serviced\/scheduler\/strategy\/mocks\"\n\t\"github.com\/control-center\/serviced\/utils\"\n\t. \"gopkg.in\/check.v1\"\n)\n\nconst (\n\tGigabyte = 1024 * 1024 * 1024\n)\n\nfunc newHost(cores int, memgigs uint64) *mocks.Host {\n\thost := &mocks.Host{}\n\thost.On(\"TotalCores\").Return(cores)\n\thost.On(\"TotalMemory\").Return(memgigs * Gigabyte)\n\treturn host\n}\n\nfunc newService(cores int, memgigs uint64) *mocks.ServiceConfig {\n\tid, _ := utils.NewUUID36()\n\tsvc := &mocks.ServiceConfig{}\n\tsvc.On(\"RequestedCorePercent\").Return(cores * 100)\n\tsvc.On(\"RequestedMemoryBytes\").Return(memgigs * Gigabyte)\n\tsvc.On(\"GetServiceID\").Return(id)\n\treturn svc\n}\n\n\/\/ Given two identical hosts, one of which has a service running on it, verify\n\/\/ that the second host is deemed most appropriate\nfunc (s *StrategySuite) TestSimpleScoring(c *C) {\n\thostA := newHost(2, 2)\n\thostB := newHost(2, 2)\n\n\tsvc := newService(1, 1)\n\tsvc2 := newService(1, 1)\n\n\thostA.On(\"RunningServices\").Return([]strategy.ServiceConfig{svc})\n\thostB.On(\"RunningServices\").Return([]strategy.ServiceConfig{})\n\n\tunder, over := strategy.ScoreHosts(svc2, []strategy.Host{hostA, hostB})\n\tc.Assert(under[0].Host, Equals, hostB)\n\tc.Assert(under[1].Host, Equals, hostA)\n\tc.Assert(over, HasLen, 0)\n}\n\n\/\/ Given two non-identical hosts, one of which has fewer overall resources but\n\/\/ more free space, verify that the freer host is deemed most appropriate\nfunc (s *StrategySuite) TestUnbalancedScoring(c *C) {\n\thostA := newHost(3, 3)\n\thostB := newHost(2, 2)\n\n\tsvc := newService(2, 2)\n\tsvc2 := newService(1, 1)\n\n\thostA.On(\"RunningServices\").Return([]strategy.ServiceConfig{svc})\n\thostB.On(\"RunningServices\").Return([]strategy.ServiceConfig{})\n\n\tunder, over := strategy.ScoreHosts(svc2, []strategy.Host{hostA, hostB})\n\n\tc.Assert(under[0].Host, Equals, hostB)\n\tc.Assert(under[1].Host, Equals, hostA)\n\tc.Assert(over, HasLen, 0)\n\n}\n\n\/\/ Given two identical hosts, one of which has plenty of memory but no\n\/\/ available CPU, verify that it is not selected as the appropriate host\nfunc (s *StrategySuite) TestNoCPUAvailable(c *C) {\n\thostA := newHost(6, 6)\n\thostB := newHost(6, 6)\n\n\tsvc := newService(6, 1)\n\tsvc2 := newService(4, 4)\n\tsvc3 := newService(2, 2)\n\n\thostA.On(\"RunningServices\").Return([]strategy.ServiceConfig{svc})\n\thostB.On(\"RunningServices\").Return([]strategy.ServiceConfig{svc2})\n\n\tunder, over := strategy.ScoreHosts(svc3, []strategy.Host{hostA, hostB})\n\n\tc.Assert(under[0].Host, Equals, hostB)\n\tc.Assert(over[0].Host, Equals, hostA)\n}\n\n\/\/ Given two identical hosts, none of which has enough CPU free but one of\n\/\/ which has enough memory, make sure the host with the memory is the one\n\/\/ assigned\nfunc (s *StrategySuite) TestNotEnoughResources(c *C) {\n\thostA := newHost(5, 5)\n\thostB := newHost(5, 5)\n\n\tsvc := newService(4, 4)\n\tsvc2 := newService(4, 3)\n\tsvc3 := newService(2, 2)\n\n\thostA.On(\"RunningServices\").Return([]strategy.ServiceConfig{svc})\n\thostB.On(\"RunningServices\").Return([]strategy.ServiceConfig{svc2})\n\n\tunder, over := strategy.ScoreHosts(svc3, []strategy.Host{hostA, hostB})\n\n\tc.Assert(under, HasLen, 0)\n\tc.Assert(over[0].Host, Equals, hostB)\n\tc.Assert(over[1].Host, Equals, hostA)\n}\n\n\/\/ Given two identical hosts, all of which are overloaded both in terms of\n\/\/ memory and CPU, make sure the one with the least memory spoken for is\n\/\/ preferred over the one with the least CPU spoken for\nfunc (s *StrategySuite) TestMemoryPreferred(c *C) {\n\n\thostA := newHost(5, 5)\n\thostB := newHost(5, 5)\n\n\tsvc := newService(7, 6)\n\tsvc2 := newService(6, 7)\n\tsvc3 := newService(1, 1)\n\n\thostA.On(\"RunningServices\").Return([]strategy.ServiceConfig{svc})\n\thostB.On(\"RunningServices\").Return([]strategy.ServiceConfig{svc2})\n\n\tunder, over := strategy.ScoreHosts(svc3, []strategy.Host{hostA, hostB})\n\n\tc.Assert(under, HasLen, 0)\n\tc.Assert(over[0].Host, Equals, hostA)\n\tc.Assert(over[1].Host, Equals, hostB)\n}\n<commit_msg>fix tests<commit_after>\/\/ Copyright 2015 The Serviced Authors.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build unit\n\npackage strategy_test\n\nimport (\n\t\"github.com\/control-center\/serviced\/scheduler\/strategy\"\n\t\"github.com\/control-center\/serviced\/scheduler\/strategy\/mocks\"\n\t\"github.com\/control-center\/serviced\/utils\"\n\t. \"gopkg.in\/check.v1\"\n)\n\nconst (\n\tGigabyte = 1024 * 1024 * 1024\n)\n\nfunc newHost(cores int, memgigs uint64) *mocks.Host {\n\thost := &mocks.Host{}\n\thost.On(\"TotalCores\").Return(cores)\n\thost.On(\"TotalMemory\").Return(memgigs * Gigabyte)\n\tid, _ := utils.NewUUID36()\n\thost.On(\"HostID\").Return(id)\n\treturn host\n}\n\nfunc newService(cores int, memgigs uint64) *mocks.ServiceConfig {\n\tid, _ := utils.NewUUID36()\n\tsvc := &mocks.ServiceConfig{}\n\tsvc.On(\"RequestedCorePercent\").Return(cores * 100)\n\tsvc.On(\"RequestedMemoryBytes\").Return(memgigs * Gigabyte)\n\tsvc.On(\"GetServiceID\").Return(id)\n\treturn svc\n}\n\n\/\/ Given two identical hosts, one of which has a service running on it, verify\n\/\/ that the second host is deemed most appropriate\nfunc (s *StrategySuite) TestSimpleScoring(c *C) {\n\thostA := newHost(2, 2)\n\thostB := newHost(2, 2)\n\n\tsvc := newService(1, 1)\n\tsvc2 := newService(1, 1)\n\n\thostA.On(\"RunningServices\").Return([]strategy.ServiceConfig{svc})\n\thostB.On(\"RunningServices\").Return([]strategy.ServiceConfig{})\n\n\tunder, over := strategy.ScoreHosts(svc2, []strategy.Host{hostA, hostB})\n\tc.Assert(under[0].Host, Equals, hostB)\n\tc.Assert(under[1].Host, Equals, hostA)\n\tc.Assert(over, HasLen, 0)\n}\n\n\/\/ Given two non-identical hosts, one of which has fewer overall resources but\n\/\/ more free space, verify that the freer host is deemed most appropriate\nfunc (s *StrategySuite) TestUnbalancedScoring(c *C) {\n\thostA := newHost(3, 3)\n\thostB := newHost(2, 2)\n\n\tsvc := newService(2, 2)\n\tsvc2 := newService(1, 1)\n\n\thostA.On(\"RunningServices\").Return([]strategy.ServiceConfig{svc})\n\thostB.On(\"RunningServices\").Return([]strategy.ServiceConfig{})\n\n\tunder, over := strategy.ScoreHosts(svc2, []strategy.Host{hostA, hostB})\n\n\tc.Assert(under[0].Host, Equals, hostB)\n\tc.Assert(under[1].Host, Equals, hostA)\n\tc.Assert(over, HasLen, 0)\n\n}\n\n\/\/ Given two identical hosts, one of which has plenty of memory but no\n\/\/ available CPU, verify that it is not selected as the appropriate host\nfunc (s *StrategySuite) TestNoCPUAvailable(c *C) {\n\thostA := newHost(6, 6)\n\thostB := newHost(6, 6)\n\n\tsvc := newService(6, 1)\n\tsvc2 := newService(4, 4)\n\tsvc3 := newService(2, 2)\n\n\thostA.On(\"RunningServices\").Return([]strategy.ServiceConfig{svc})\n\thostB.On(\"RunningServices\").Return([]strategy.ServiceConfig{svc2})\n\n\tunder, over := strategy.ScoreHosts(svc3, []strategy.Host{hostA, hostB})\n\n\tc.Assert(under[0].Host, Equals, hostB)\n\tc.Assert(over[0].Host, Equals, hostA)\n}\n\n\/\/ Given two identical hosts, none of which has enough CPU free but one of\n\/\/ which has enough memory, make sure the host with the memory is the one\n\/\/ assigned\nfunc (s *StrategySuite) TestNotEnoughResources(c *C) {\n\thostA := newHost(5, 5)\n\thostB := newHost(5, 5)\n\n\tsvc := newService(4, 4)\n\tsvc2 := newService(4, 3)\n\tsvc3 := newService(2, 2)\n\n\thostA.On(\"RunningServices\").Return([]strategy.ServiceConfig{svc})\n\thostB.On(\"RunningServices\").Return([]strategy.ServiceConfig{svc2})\n\n\tunder, over := strategy.ScoreHosts(svc3, []strategy.Host{hostA, hostB})\n\n\tc.Assert(under, HasLen, 0)\n\tc.Assert(over[0].Host, Equals, hostB)\n\tc.Assert(over[1].Host, Equals, hostA)\n}\n\n\/\/ Given two identical hosts, all of which are overloaded both in terms of\n\/\/ memory and CPU, make sure the one with the least memory spoken for is\n\/\/ preferred over the one with the least CPU spoken for\nfunc (s *StrategySuite) TestMemoryPreferred(c *C) {\n\n\thostA := newHost(5, 5)\n\thostB := newHost(5, 5)\n\n\tsvc := newService(7, 6)\n\tsvc2 := newService(6, 7)\n\tsvc3 := newService(1, 1)\n\n\thostA.On(\"RunningServices\").Return([]strategy.ServiceConfig{svc})\n\thostB.On(\"RunningServices\").Return([]strategy.ServiceConfig{svc2})\n\n\tunder, over := strategy.ScoreHosts(svc3, []strategy.Host{hostA, hostB})\n\n\tc.Assert(under, HasLen, 0)\n\tc.Assert(over[0].Host, Equals, hostA)\n\tc.Assert(over[1].Host, Equals, hostB)\n}\n<|endoftext|>"} {"text":"<commit_before>package terminal\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestTerminal(t *testing.T) {\n\tvar buf bytes.Buffer\n\tterm := New(&buf)\n\tterm.SetLastLine(\"-- last line --\")\n\tterm.Refresh(\"prompt \", []rune(\"aaa\"), 2)\n\tif got := buf.String(); strings.Index(got, \"prompt aaa\") < 0 {\n\t\tt.Errorf(\"got %q, but should include %q\", got, \"prompt aaa\")\n\t}\n}\n\nfunc BenchmarkTerminal(b *testing.B) {\n\tterm := New(ioutil.Discard)\n\tterm.SetLastLine(\"-- last line --\")\n\tfor i := 0; i < b.N; i++ {\n\t\tterm.Refresh(\"prompt \", []rune(\"aaa\"), 2)\n\t}\n}\n<commit_msg>Make it simple<commit_after>package terminal\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestTerminal(t *testing.T) {\n\tvar buf bytes.Buffer\n\tterm := New(&buf)\n\tterm.SetLastLine(\"-- last line --\")\n\tterm.Refresh(\"prompt \", []rune(\"aaa\"), 2)\n\tif got := buf.String(); !strings.Contains(got, \"prompt aaa\") {\n\t\tt.Errorf(\"got %q, but should include %q\", got, \"prompt aaa\")\n\t}\n}\n\nfunc BenchmarkTerminal(b *testing.B) {\n\tterm := New(ioutil.Discard)\n\tterm.SetLastLine(\"-- last line --\")\n\tfor i := 0; i < b.N; i++ {\n\t\tterm.Refresh(\"prompt \", []rune(\"aaa\"), 2)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package githook\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/bleenco\/abstruse\/server\/core\"\n\t\"github.com\/drone\/go-scm\/scm\"\n)\n\nconst emptyCommit = \"0000000000000000000000000000000000000000\"\n\n\/\/ NewParser returns a new GitHookParser.\nfunc NewParser(client *scm.Client) core.GitHookParser {\n\treturn &parser{client}\n}\n\ntype parser struct {\n\tclient *scm.Client\n}\n\nfunc (p *parser) Parse(req *http.Request, secretFunc func(string) *core.Repository) (*core.GitHook, *core.Repository, error) {\n\tfn := func(webhook scm.Webhook) (string, error) {\n\t\tif webhook == nil {\n\t\t\treturn \"\", scm.ErrUnknownEvent\n\t\t}\n\t\trepo := webhook.Repository()\n\t\tfullname := fmt.Sprintf(\"%s\/%s\", repo.Namespace, repo.Name)\n\t\tr := secretFunc(fullname)\n\t\tif r == nil || r.Provider.Secret == \"\" {\n\t\t\treturn \"\", fmt.Errorf(\"cannot find repository\")\n\t\t}\n\t\treturn r.Provider.Secret, nil\n\t}\n\n\tpayload, err := p.client.Webhooks.Parse(req, fn)\n\tif err == scm.ErrUnknownEvent {\n\t\treturn nil, nil, nil\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tswitch h := payload.(type) {\n\tcase *scm.PushHook:\n\t\treturn parsePushHook(h)\n\tcase *scm.TagHook:\n\t\treturn nil, nil, nil\n\tcase *scm.PullRequestHook:\n\t\treturn nil, nil, nil\n\tcase *scm.BranchHook:\n\t\treturn nil, nil, nil\n\tdefault:\n\t\treturn nil, nil, nil\n\t}\n}\n\nfunc parsePushHook(h *scm.PushHook) (*core.GitHook, *core.Repository, error) {\n\trepo := &core.Repository{}\n\tgithook := &core.GitHook{}\n\n\tif h.Commit.Sha == emptyCommit {\n\t\treturn nil, nil, nil\n\t}\n\n\tif strings.HasPrefix(h.Ref, \"refs\/tags\/\") {\n\t\tgithook = &core.GitHook{\n\t\t\tEvent: core.EventTag,\n\t\t\tAction: core.ActionCreate,\n\t\t\tLink: h.Commit.Link,\n\t\t\tTimestamp: h.Commit.Author.Date,\n\t\t\tMessage: h.Commit.Message,\n\t\t\tBefore: h.Before,\n\t\t\tAfter: h.After,\n\t\t\tSource: scm.TrimRef(h.BaseRef),\n\t\t\tTarget: scm.TrimRef(h.BaseRef),\n\t\t\tRef: h.Ref,\n\t\t\tAuthorEmail: h.Commit.Author.Email,\n\t\t\tAuthorAvatar: h.Commit.Author.Avatar,\n\t\t\tAuthorName: h.Commit.Author.Name,\n\t\t\tAuthorLogin: h.Commit.Author.Login,\n\t\t\tSenderEmail: h.Sender.Email,\n\t\t\tSenderAvatar: h.Sender.Avatar,\n\t\t\tSenderName: h.Sender.Name,\n\t\t\tSenderLogin: h.Sender.Login,\n\t\t}\n\t} else {\n\t\tgithook = &core.GitHook{\n\t\t\tEvent: core.EventPush,\n\t\t\tLink: h.Commit.Link,\n\t\t\tTimestamp: h.Commit.Author.Date,\n\t\t\tMessage: h.Commit.Message,\n\t\t\tBefore: h.Before,\n\t\t\tAfter: h.Commit.Sha,\n\t\t\tRef: h.Ref,\n\t\t\tSource: strings.TrimPrefix(h.Ref, \"refs\/heads\/\"),\n\t\t\tTarget: strings.TrimPrefix(h.Ref, \"refs\/heads\/\"),\n\t\t\tAuthorEmail: h.Commit.Author.Email,\n\t\t\tAuthorAvatar: h.Commit.Author.Avatar,\n\t\t\tAuthorName: h.Commit.Author.Name,\n\t\t\tAuthorLogin: h.Commit.Author.Login,\n\t\t\tSenderEmail: h.Sender.Email,\n\t\t\tSenderAvatar: h.Sender.Avatar,\n\t\t\tSenderName: h.Sender.Name,\n\t\t\tSenderLogin: h.Sender.Login,\n\t\t}\n\t}\n\n\t\/\/ TODO: check branch field!\n\trepo = &core.Repository{\n\t\tUID: h.Repo.ID,\n\t\tNamespace: h.Repo.Namespace,\n\t\tName: h.Repo.Name,\n\t\tFullName: fmt.Sprintf(\"%s\/%s\", h.Repo.Namespace, h.Repo.Name),\n\t\tURL: h.Repo.Link,\n\t\tPrivate: h.Repo.Private,\n\t\tClone: h.Repo.Clone,\n\t\tCloneSSH: h.Repo.CloneSSH,\n\t}\n\n\tif githook.AuthorAvatar == \"\" {\n\t\tgithook.AuthorAvatar = h.Sender.Avatar\n\t}\n\n\treturn githook, repo, nil\n}\n<commit_msg>feat(webhooks): PR, tag, branch parsers<commit_after>package githook\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com\/bleenco\/abstruse\/server\/core\"\n\t\"github.com\/drone\/go-scm\/scm\"\n)\n\nconst emptyCommit = \"0000000000000000000000000000000000000000\"\n\n\/\/ NewParser returns a new GitHookParser.\nfunc NewParser(client *scm.Client) core.GitHookParser {\n\treturn &parser{client}\n}\n\ntype parser struct {\n\tclient *scm.Client\n}\n\nfunc (p *parser) Parse(req *http.Request, secretFunc func(string) *core.Repository) (*core.GitHook, *core.Repository, error) {\n\tfn := func(webhook scm.Webhook) (string, error) {\n\t\tif webhook == nil {\n\t\t\treturn \"\", scm.ErrUnknownEvent\n\t\t}\n\t\trepo := webhook.Repository()\n\t\tfullname := fmt.Sprintf(\"%s\/%s\", repo.Namespace, repo.Name)\n\t\tr := secretFunc(fullname)\n\t\tif r == nil || r.Provider.Secret == \"\" {\n\t\t\treturn \"\", fmt.Errorf(\"cannot find repository\")\n\t\t}\n\t\treturn r.Provider.Secret, nil\n\t}\n\n\tpayload, err := p.client.Webhooks.Parse(req, fn)\n\tif err == scm.ErrUnknownEvent {\n\t\treturn nil, nil, nil\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tswitch h := payload.(type) {\n\tcase *scm.PushHook:\n\t\treturn p.parsePushHook(h)\n\tcase *scm.TagHook:\n\t\treturn p.parseTagHook(h)\n\tcase *scm.PullRequestHook:\n\t\treturn p.parsePRHook(h)\n\tcase *scm.BranchHook:\n\t\treturn p.parseBranchHook(h)\n\tdefault:\n\t\treturn nil, nil, nil\n\t}\n}\n\nfunc (p *parser) parsePushHook(h *scm.PushHook) (*core.GitHook, *core.Repository, error) {\n\trepo := &core.Repository{}\n\tgithook := &core.GitHook{}\n\n\tif h.Commit.Sha == emptyCommit {\n\t\treturn nil, nil, nil\n\t}\n\n\tif strings.HasPrefix(h.Ref, \"refs\/tags\/\") {\n\t\tgithook = &core.GitHook{\n\t\t\tEvent: core.EventTag,\n\t\t\tAction: core.ActionCreate,\n\t\t\tLink: h.Commit.Link,\n\t\t\tTimestamp: h.Commit.Author.Date,\n\t\t\tMessage: h.Commit.Message,\n\t\t\tBefore: h.Before,\n\t\t\tAfter: h.After,\n\t\t\tSource: scm.TrimRef(h.BaseRef),\n\t\t\tTarget: scm.TrimRef(h.BaseRef),\n\t\t\tRef: h.Ref,\n\t\t\tAuthorEmail: h.Commit.Author.Email,\n\t\t\tAuthorAvatar: h.Commit.Author.Avatar,\n\t\t\tAuthorName: h.Commit.Author.Name,\n\t\t\tAuthorLogin: h.Commit.Author.Login,\n\t\t\tSenderEmail: h.Sender.Email,\n\t\t\tSenderAvatar: h.Sender.Avatar,\n\t\t\tSenderName: h.Sender.Name,\n\t\t\tSenderLogin: h.Sender.Login,\n\t\t}\n\t} else {\n\t\tgithook = &core.GitHook{\n\t\t\tEvent: core.EventPush,\n\t\t\tLink: h.Commit.Link,\n\t\t\tTimestamp: h.Commit.Author.Date,\n\t\t\tMessage: h.Commit.Message,\n\t\t\tBefore: h.Before,\n\t\t\tAfter: h.Commit.Sha,\n\t\t\tRef: h.Ref,\n\t\t\tSource: strings.TrimPrefix(h.Ref, \"refs\/heads\/\"),\n\t\t\tTarget: strings.TrimPrefix(h.Ref, \"refs\/heads\/\"),\n\t\t\tAuthorEmail: h.Commit.Author.Email,\n\t\t\tAuthorAvatar: h.Commit.Author.Avatar,\n\t\t\tAuthorName: h.Commit.Author.Name,\n\t\t\tAuthorLogin: h.Commit.Author.Login,\n\t\t\tSenderEmail: h.Sender.Email,\n\t\t\tSenderAvatar: h.Sender.Avatar,\n\t\t\tSenderName: h.Sender.Name,\n\t\t\tSenderLogin: h.Sender.Login,\n\t\t}\n\t}\n\n\t\/\/ TODO: check branch field!\n\trepo = &core.Repository{\n\t\tUID: h.Repo.ID,\n\t\tNamespace: h.Repo.Namespace,\n\t\tName: h.Repo.Name,\n\t\tFullName: fmt.Sprintf(\"%s\/%s\", h.Repo.Namespace, h.Repo.Name),\n\t\tURL: h.Repo.Link,\n\t\tPrivate: h.Repo.Private,\n\t\tClone: h.Repo.Clone,\n\t\tCloneSSH: h.Repo.CloneSSH,\n\t}\n\n\tif githook.AuthorAvatar == \"\" {\n\t\tgithook.AuthorAvatar = h.Sender.Avatar\n\t}\n\n\treturn githook, repo, nil\n}\n\nfunc (p *parser) parseTagHook(h *scm.TagHook) (*core.GitHook, *core.Repository, error) {\n\trepo := &core.Repository{}\n\tgithook := &core.GitHook{}\n\n\tif h.Action != scm.ActionCreate {\n\t\treturn nil, nil, nil\n\t}\n\n\t\/\/ When tag is created github, gitea and gitlab sends both a push\n\t\/\/ and a tag hook. Push hook contains more info so we use that.\n\tif p.client.Driver == scm.DriverGithub ||\n\t\tp.client.Driver == scm.DriverGitea ||\n\t\tp.client.Driver == scm.DriverGitlab {\n\t\treturn nil, nil, nil\n\t}\n\n\tgithook = &core.GitHook{\n\t\tEvent: core.EventTag,\n\t\tAction: core.ActionCreate,\n\t\tLink: \"\",\n\t\tMessage: path.Base(h.Ref.Path),\n\t\tAfter: h.Ref.Sha,\n\t\tRef: h.Ref.Name,\n\t\tSource: h.Ref.Name,\n\t\tTarget: h.Ref.Name,\n\t\tAuthorLogin: h.Sender.Login,\n\t\tAuthorName: h.Sender.Name,\n\t\tAuthorEmail: h.Sender.Email,\n\t\tAuthorAvatar: h.Sender.Avatar,\n\t\tSenderEmail: h.Sender.Email,\n\t\tSenderAvatar: h.Sender.Avatar,\n\t\tSenderName: h.Sender.Name,\n\t\tSenderLogin: h.Sender.Login,\n\t}\n\trepo = &core.Repository{\n\t\tUID: h.Repo.ID,\n\t\tNamespace: h.Repo.Namespace,\n\t\tName: h.Repo.Name,\n\t\tFullName: fmt.Sprintf(\"%s\/%s\", h.Repo.Namespace, h.Repo.Name),\n\t\tURL: h.Repo.Link,\n\t\tPrivate: h.Repo.Private,\n\t\tClone: h.Repo.Clone,\n\t\tCloneSSH: h.Repo.CloneSSH,\n\t}\n\n\tif !strings.HasPrefix(githook.Ref, \"refs\/tags\/\") {\n\t\tgithook.Ref = fmt.Sprintf(\"refs\/tags\/%s\", githook.Ref)\n\t}\n\n\treturn githook, repo, nil\n}\n\nfunc (p *parser) parsePRHook(h *scm.PullRequestHook) (*core.GitHook, *core.Repository, error) {\n\tif h.Action == scm.ActionClose {\n\t\treturn nil, nil, nil\n\t}\n\n\tgithook := &core.GitHook{\n\t\tEvent: core.EventPullRequest,\n\t\tAction: h.Action.String(),\n\t\tLink: h.PullRequest.Link,\n\t\tTimestamp: h.PullRequest.Created,\n\t\tTitle: h.PullRequest.Title,\n\t\tMessage: h.PullRequest.Body,\n\t\tBefore: h.PullRequest.Base.Sha,\n\t\tAfter: h.PullRequest.Sha,\n\t\tRef: h.PullRequest.Ref,\n\t\tFork: h.PullRequest.Fork,\n\t\tSource: h.PullRequest.Source,\n\t\tTarget: h.PullRequest.Target,\n\t\tAuthorLogin: h.PullRequest.Author.Login,\n\t\tAuthorName: h.PullRequest.Author.Name,\n\t\tAuthorEmail: h.PullRequest.Author.Email,\n\t\tAuthorAvatar: h.PullRequest.Author.Avatar,\n\t\tSenderEmail: h.Sender.Email,\n\t\tSenderAvatar: h.Sender.Avatar,\n\t\tSenderName: h.Sender.Name,\n\t\tSenderLogin: h.Sender.Login,\n\t\tPrNumber: h.PullRequest.Number,\n\t\tPrTitle: h.PullRequest.Title,\n\t\tPrBody: h.PullRequest.Body,\n\t}\n\trepo := &core.Repository{\n\t\tUID: h.Repo.ID,\n\t\tNamespace: h.Repo.Namespace,\n\t\tName: h.Repo.Name,\n\t\tFullName: fmt.Sprintf(\"%s\/%s\", h.Repo.Namespace, h.Repo.Name),\n\t\tURL: h.Repo.Link,\n\t\tPrivate: h.Repo.Private,\n\t\tClone: h.Repo.Clone,\n\t\tCloneSSH: h.Repo.CloneSSH,\n\t}\n\n\treturn githook, repo, nil\n}\n\nfunc (p *parser) parseBranchHook(h *scm.BranchHook) (*core.GitHook, *core.Repository, error) {\n\tif h.Action != scm.ActionCreate {\n\t\treturn nil, nil, nil\n\t}\n\n\tgithook := &core.GitHook{\n\t\tEvent: core.EventPush,\n\t\tLink: \"\",\n\t\tMessage: \"\",\n\t\tAfter: h.Ref.Sha,\n\t\tRef: h.Ref.Name,\n\t\tSource: h.Ref.Name,\n\t\tTarget: h.Ref.Name,\n\t\tAuthorLogin: h.Sender.Login,\n\t\tAuthorName: h.Sender.Name,\n\t\tAuthorEmail: h.Sender.Email,\n\t\tAuthorAvatar: h.Sender.Avatar,\n\t\tSenderLogin: h.Sender.Login,\n\t\tSenderName: h.Sender.Name,\n\t\tSenderEmail: h.Sender.Email,\n\t\tSenderAvatar: h.Sender.Avatar,\n\t}\n\trepo := &core.Repository{\n\t\tUID: h.Repo.ID,\n\t\tNamespace: h.Repo.Namespace,\n\t\tName: h.Repo.Name,\n\t\tFullName: fmt.Sprintf(\"%s\/%s\", h.Repo.Namespace, h.Repo.Name),\n\t\tURL: h.Repo.Link,\n\t\tPrivate: h.Repo.Private,\n\t\tClone: h.Repo.Clone,\n\t\tCloneSSH: h.Repo.CloneSSH,\n\t}\n\n\treturn githook, repo, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package validate_test\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/lxc\/lxd\/shared\/validate\"\n)\n\nfunc ExampleIsNetworkMAC() {\n\ttests := []string{\n\t\t\"00:00:5e:00:53:01\",\n\t\t\"02:00:5e:10:00:00:00:01\", \/\/ too long\n\t\t\"00-00-5e-00-53-01\", \/\/ invalid delimiter\n\t\t\"0000.5e00.5301\", \/\/ invalid delimiter\n\t\t\"invalid\",\n\t\t\"\",\n\t}\n\n\tfor _, v := range tests {\n\t\terr := validate.IsNetworkMAC(v)\n\t\tfmt.Printf(\"%s, %t\\n\", v, err == nil)\n\t}\n\n\t\/\/ Output: 00:00:5e:00:53:01, true\n\t\/\/ 02:00:5e:10:00:00:00:01, false\n\t\/\/ 00-00-5e-00-53-01, false\n\t\/\/ 0000.5e00.5301, false\n\t\/\/ invalid, false\n\t\/\/ , false\n}\n\nfunc ExampleIsPCIAddress() {\n\ttests := []string{\n\t\t\"0000:12:ab.0\", \/\/ valid\n\t\t\"0010:12:ab.0\", \/\/ valid\n\t\t\"0000:12:CD.0\", \/\/ valid\n\t\t\"12:ab.0\", \/\/ valid\n\t\t\"12:CD.0\", \/\/ valid\n\t\t\"0000:12:gh.0\", \/\/ invalid hex\n\t\t\"0000:12:GH.0\", \/\/ invalid hex\n\t\t\"12:gh.0\", \/\/ invalid hex\n\t\t\"12:GH.0\", \/\/ invalid hex\n\t\t\"000:12:CD.0\", \/\/ wrong prefix\n\t\t\"12.ab.0\", \/\/ invalid format\n\t\t\"\",\n\t}\n\n\tfor _, v := range tests {\n\t\terr := validate.IsPCIAddress(v)\n\t\tfmt.Printf(\"%s, %t\\n\", v, err == nil)\n\t}\n\n\t\/\/ Output: 0000:12:ab.0, true\n\t\/\/ 0010:12:ab.0, true\n\t\/\/ 0000:12:CD.0, true\n\t\/\/ 12:ab.0, true\n\t\/\/ 12:CD.0, true\n\t\/\/ 0000:12:gh.0, false\n\t\/\/ 0000:12:GH.0, false\n\t\/\/ 12:gh.0, false\n\t\/\/ 12:GH.0, false\n\t\/\/ 000:12:CD.0, false\n\t\/\/ 12.ab.0, false\n\t\/\/ , false\n}\n<commit_msg>shared\/validate\/validate\/test: Adds tests for Required and Optional<commit_after>package validate_test\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/lxc\/lxd\/shared\/validate\"\n)\n\nfunc ExampleIsNetworkMAC() {\n\ttests := []string{\n\t\t\"00:00:5e:00:53:01\",\n\t\t\"02:00:5e:10:00:00:00:01\", \/\/ too long\n\t\t\"00-00-5e-00-53-01\", \/\/ invalid delimiter\n\t\t\"0000.5e00.5301\", \/\/ invalid delimiter\n\t\t\"invalid\",\n\t\t\"\",\n\t}\n\n\tfor _, v := range tests {\n\t\terr := validate.IsNetworkMAC(v)\n\t\tfmt.Printf(\"%s, %t\\n\", v, err == nil)\n\t}\n\n\t\/\/ Output: 00:00:5e:00:53:01, true\n\t\/\/ 02:00:5e:10:00:00:00:01, false\n\t\/\/ 00-00-5e-00-53-01, false\n\t\/\/ 0000.5e00.5301, false\n\t\/\/ invalid, false\n\t\/\/ , false\n}\n\nfunc ExampleIsPCIAddress() {\n\ttests := []string{\n\t\t\"0000:12:ab.0\", \/\/ valid\n\t\t\"0010:12:ab.0\", \/\/ valid\n\t\t\"0000:12:CD.0\", \/\/ valid\n\t\t\"12:ab.0\", \/\/ valid\n\t\t\"12:CD.0\", \/\/ valid\n\t\t\"0000:12:gh.0\", \/\/ invalid hex\n\t\t\"0000:12:GH.0\", \/\/ invalid hex\n\t\t\"12:gh.0\", \/\/ invalid hex\n\t\t\"12:GH.0\", \/\/ invalid hex\n\t\t\"000:12:CD.0\", \/\/ wrong prefix\n\t\t\"12.ab.0\", \/\/ invalid format\n\t\t\"\",\n\t}\n\n\tfor _, v := range tests {\n\t\terr := validate.IsPCIAddress(v)\n\t\tfmt.Printf(\"%s, %t\\n\", v, err == nil)\n\t}\n\n\t\/\/ Output: 0000:12:ab.0, true\n\t\/\/ 0010:12:ab.0, true\n\t\/\/ 0000:12:CD.0, true\n\t\/\/ 12:ab.0, true\n\t\/\/ 12:CD.0, true\n\t\/\/ 0000:12:gh.0, false\n\t\/\/ 0000:12:GH.0, false\n\t\/\/ 12:gh.0, false\n\t\/\/ 12:GH.0, false\n\t\/\/ 000:12:CD.0, false\n\t\/\/ 12.ab.0, false\n\t\/\/ , false\n}\n\nfunc ExampleOptional() {\n\ttests := []string{\n\t\t\"\",\n\t\t\"foo\",\n\t\t\"true\",\n\t}\n\n\tfor _, v := range tests {\n\t\tf := validate.Optional()\n\t\tfmt.Printf(\"%v \", f(v))\n\n\t\tf = validate.Optional(validate.IsBool)\n\t\tfmt.Printf(\"%v\\n\", f(v))\n\t}\n\n\t\/\/ Output: <nil> <nil>\n\t\/\/ <nil> Invalid value for a boolean \"foo\"\n\t\/\/ <nil> <nil>\n}\n\nfunc ExampleRequired() {\n\ttests := []string{\n\t\t\"\",\n\t\t\"foo\",\n\t\t\"true\",\n\t}\n\n\tfor _, v := range tests {\n\t\tf := validate.Required()\n\t\tfmt.Printf(\"%v \", f(v))\n\n\t\tf = validate.Required(validate.IsBool)\n\t\tfmt.Printf(\"%v\\n\", f(v))\n\t}\n\n\t\/\/ Output: <nil> Invalid value for a boolean \"\"\n\t\/\/ <nil> Invalid value for a boolean \"foo\"\n\t\/\/ <nil> <nil>\n}\n<|endoftext|>"} {"text":"<commit_before>package solrmonitor\n\nimport (\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/samuel\/go-zookeeper\/zk\"\n)\n\n\/\/ simple generator of zk.Event objects, with monotonically increasing Type field\ntype eventGenerator int32\n\nfunc (g *eventGenerator) newEvent() zk.Event {\n\te := atomic.AddInt32((*int32)(g), 1)\n\treturn zk.Event{Type: zk.EventType(e)}\n}\n\n\/\/ state used and tracked throughout each test case\ntype testState struct {\n\tt *testing.T\n\t\/\/ generator of events, for submitting an event to fire the re-registered watch\n\tgen eventGenerator\n\t\/\/ completes when the callback is done and no longer re-registering itself\n\tdone sync.WaitGroup\n\t\/\/ number of concurrent executions\n\tnumConcurrentExecutions int32\n\t\/\/ maximum number of concurrent executions observed\n\tmaxConcurrentExecutions int32\n\t\/\/ optional wait group used to let watcher go routines start\n\tready *sync.WaitGroup\n\t\/\/ optional wait group used to coordinate the start of handling\n\trun *sync.WaitGroup\n\t\/\/ if true, assume events are consumed perfectly in order (e.g. event #1 is the first\n\t\/\/ event handled, event #2 is the second, and so on)\n\tassumeEventOrder bool\n}\n\nfunc (state *testState) newCallback(maxEvents int32) *testCallback {\n\tif state.ready != nil {\n\t\tstate.ready.Add(1)\n\t}\n\treturn &testCallback{\n\t\tstate: state,\n\t\tmaxNumEvents: maxEvents,\n\t\tready: state.ready,\n\t\trun: state.run,\n\t}\n}\n\n\/\/ callback for testing dispatches\ntype testCallback struct {\n\tstate *testState\n\t\/\/ number of events processed\n\tnumEvents int32\n\t\/\/ maximum number of events to process, after which the callback no longer re-registers\n\t\/\/ itself\n\tmaxNumEvents int32\n\t\/\/ if non-nil, mark this as done at the start of first callback (to indicate that the\n\t\/\/ callback is ready to start)\n\tready *sync.WaitGroup\n\t\/\/ if non-nil, wait on this before starting first callback (to coordinate the start of\n\t\/\/ multiple callbacks)\n\trun *sync.WaitGroup\n}\n\nfunc (cb *testCallback) Handle(e zk.Event) <-chan zk.Event {\n\t\/\/ track the maximum number of concurrent executions\n\tnum := atomic.AddInt32(&cb.state.numConcurrentExecutions, 1)\n\tdefer atomic.AddInt32(&cb.state.numConcurrentExecutions, -1)\n\n\t\/\/ if these wait groups are non-nil, we are coordinating the start of handling\n\tif cb.ready != nil {\n\t\tcb.ready.Done()\n\t\tcb.ready = nil\n\t}\n\tif cb.run != nil {\n\t\tcb.run.Wait()\n\t\tcb.run = nil\n\t}\n\n\tfor {\n\t\tmax := atomic.LoadInt32(&cb.state.maxConcurrentExecutions)\n\t\tif num <= max {\n\t\t\tbreak\n\t\t}\n\t\tif num > max &&\n\t\t\tatomic.CompareAndSwapInt32(&cb.state.maxConcurrentExecutions, max, num) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tnumEvents := atomic.AddInt32(&cb.numEvents, 1)\n\tif cb.state.assumeEventOrder && e.Type != zk.EventType(numEvents) {\n\t\tcb.state.t.Errorf(\"Expecting event %d, got %d\", numEvents, int(e.Type))\n\t}\n\n\tif numEvents >= cb.maxNumEvents {\n\t\tcb.state.done.Done()\n\t\treturn nil\n\t} else {\n\t\tch := make(chan zk.Event, 1)\n\t\tch <- cb.state.gen.newEvent()\n\t\treturn ch\n\t}\n}\n\nfunc TestReregistration(t *testing.T) {\n\tdisp := NewZkDispatcher(zk.DefaultLogger)\n\n\tstate := testState{t: t, assumeEventOrder: true}\n\tcb := state.newCallback(100)\n\tstate.done.Add(1)\n\n\tchannel := make(chan zk.Event, 1)\n\tif err := disp.WatchEvent(channel, cb); err != nil {\n\t\tt.Fatalf(\"failed to watch: %s\", err)\n\t}\n\n\tchannel <- state.gen.newEvent()\n\tstate.done.Wait()\n\n\tif cb.numEvents != 100 {\n\t\tt.Errorf(\"Should have processed 100 events but instead processed %d\", cb.numEvents)\n\t}\n\n\ttasksShouldBecomeEmpty(t, disp)\n}\n\nfunc TestDispatchesToSameCallbackAreSerial(t *testing.T) {\n\tdisp := NewZkDispatcher(zk.DefaultLogger)\n\tvar channels [10]chan zk.Event\n\tvar run sync.WaitGroup\n\trun.Add(1)\n\n\tstate := testState{t: t, run: &run}\n\tcb := state.newCallback(10)\n\n\tfor i := 0; i < 10; i++ {\n\t\tstate.done.Add(1)\n\t\tchannels[i] = make(chan zk.Event, 1)\n\t\tif err := disp.WatchEvent(channels[i], cb); err != nil {\n\t\t\tt.Fatalf(\"failed to watch: %s\", err)\n\t\t}\n\t}\n\n\t\/\/ send an event to get them started\n\tfor i := 0; i < 10; i++ {\n\t\tchannels[i] <- state.gen.newEvent()\n\t}\n\t\/\/ give dispatch go routine(s) a chance to start\n\ttime.Sleep(200 * time.Millisecond)\n\t\/\/ let 'em rip\n\trun.Done()\n\n\t\/\/ wait for them all to finish\n\tstate.done.Wait()\n\n\tif state.maxConcurrentExecutions != 1 {\n\t\tt.Errorf(\"Executions should be serial, but detected %d concurrent exceutions\",\n\t\t\tstate.maxConcurrentExecutions)\n\t}\n\n\ttasksShouldBecomeEmpty(t, disp)\n}\n\nfunc TestDispatchesToSameNakedFuncAreConcurrent(t *testing.T) {\n\tdisp := NewZkDispatcher(zk.DefaultLogger)\n\tvar channels [10]chan zk.Event\n\tvar run sync.WaitGroup\n\trun.Add(1)\n\n\tstate := testState{t: t, run: &run}\n\tcb := state.newCallback(10)\n\tf := cb.Handle\n\n\tfor i := 0; i < 10; i++ {\n\t\tstate.done.Add(1)\n\t\tchannels[i] = make(chan zk.Event, 1)\n\t\tdisp.Watch(channels[i], f)\n\t}\n\n\t\/\/ send an event to get them started\n\tfor i := 0; i < 10; i++ {\n\t\tchannels[i] <- state.gen.newEvent()\n\t}\n\t\/\/ give dispatch go routine(s) a chance to start\n\ttime.Sleep(200 * time.Millisecond)\n\t\/\/ let 'em rip\n\trun.Done()\n\n\t\/\/ wait for them all to finish\n\tstate.done.Wait()\n\n\tif state.maxConcurrentExecutions != 10 {\n\t\tt.Errorf(\"Should have been 10 concurrent executions (1 for each lambda);\"+\n\t\t\t\" instead detected %d\", state.maxConcurrentExecutions)\n\t}\n\n\ttasksShouldBecomeEmpty(t, disp)\n}\n\nfunc TestDispatchesToDifferentCallbacksAreConcurrent(t *testing.T) {\n\tdisp := NewZkDispatcher(zk.DefaultLogger)\n\tvar channels [10]chan zk.Event\n\tvar run, ready sync.WaitGroup\n\trun.Add(1)\n\n\tstate := testState{t: t, run: &run, ready: &ready}\n\tvar callbacks [10]*testCallback\n\tfor i := 0; i < 10; i++ {\n\t\tcallbacks[i] = state.newCallback(10)\n\t\tstate.done.Add(1)\n\t\tchannels[i] = make(chan zk.Event, 1)\n\t\tif err := disp.WatchEvent(channels[i], callbacks[i]); err != nil {\n\t\t\tt.Fatalf(\"failed to watch: %s\", err)\n\t\t}\n\t}\n\n\t\/\/ send an event to get them started\n\tfor i := 0; i < 10; i++ {\n\t\tchannels[i] <- state.gen.newEvent()\n\t}\n\n\t\/\/ make sure they are all ready\n\tready.Wait()\n\t\/\/ let 'em rip\n\trun.Done()\n\n\t\/\/ wait for them all to finish\n\tstate.done.Wait()\n\n\tif state.maxConcurrentExecutions != 10 {\n\t\tt.Errorf(\"Should have been 10 concurrent executions (1 for each callback);\"+\n\t\t\t\" instead detected %d\", state.maxConcurrentExecutions)\n\t}\n\n\ttasksShouldBecomeEmpty(t, disp)\n}\n\nfunc tasksShouldBecomeEmpty(t *testing.T, disp *ZkDispatcher) {\n\t\/\/ make sure that map of tasks gets cleaned up and returns to empty\n\tshouldBecomeEq(t, 0, func() int32 {\n\t\tdisp.taskMu.Lock()\n\t\tdefer disp.taskMu.Unlock()\n\t\treturn int32(len(disp.tasks))\n\t})\n}\n<commit_msg>Fix race condition in zkdispatcher_test<commit_after>package solrmonitor\n\nimport (\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/samuel\/go-zookeeper\/zk\"\n)\n\n\/\/ simple generator of zk.Event objects, with monotonically increasing Type field\ntype eventGenerator int32\n\nfunc (g *eventGenerator) newEvent() zk.Event {\n\te := atomic.AddInt32((*int32)(g), 1)\n\treturn zk.Event{Type: zk.EventType(e)}\n}\n\n\/\/ state used and tracked throughout each test case\ntype testState struct {\n\tt *testing.T\n\t\/\/ generator of events, for submitting an event to fire the re-registered watch\n\tgen eventGenerator\n\t\/\/ completes when the callback is done and no longer re-registering itself\n\tdone sync.WaitGroup\n\t\/\/ number of concurrent executions\n\tnumConcurrentExecutions int32\n\t\/\/ maximum number of concurrent executions observed\n\tmaxConcurrentExecutions int32\n\t\/\/ optional wait group used to let watcher go routines start\n\tready *sync.WaitGroup\n\t\/\/ optional wait group used to coordinate the start of handling\n\trun *sync.WaitGroup\n\t\/\/ if true, assume events are consumed perfectly in order (e.g. event #1 is the first\n\t\/\/ event handled, event #2 is the second, and so on)\n\tassumeEventOrder bool\n}\n\nfunc (state *testState) newCallback(maxEvents int32) *testCallback {\n\tif state.ready != nil {\n\t\tstate.ready.Add(1)\n\t}\n\treturn &testCallback{\n\t\tstate: state,\n\t\tmaxNumEvents: maxEvents,\n\t\tready: state.ready,\n\t\trun: state.run,\n\t}\n}\n\n\/\/ callback for testing dispatches\ntype testCallback struct {\n\tstate *testState\n\t\/\/ number of events processed\n\tnumEvents int32\n\t\/\/ maximum number of events to process, after which the callback no longer re-registers\n\t\/\/ itself\n\tmaxNumEvents int32\n\t\/\/ if non-nil, mark this as done at the start of first callback (to indicate that the\n\t\/\/ callback is ready to start)\n\tready *sync.WaitGroup\n\t\/\/ if non-nil, wait on this before starting first callback (to coordinate the start of\n\t\/\/ multiple callbacks)\n\trun *sync.WaitGroup\n}\n\nfunc (cb *testCallback) Handle(e zk.Event) <-chan zk.Event {\n\t\/\/ track the maximum number of concurrent executions\n\tnum := atomic.AddInt32(&cb.state.numConcurrentExecutions, 1)\n\tdefer atomic.AddInt32(&cb.state.numConcurrentExecutions, -1)\n\n\t\/\/ if these wait groups are non-nil, we are coordinating the start of handling\n\tif cb.ready != nil {\n\t\tcb.ready.Done()\n\t\tcb.ready = nil\n\t}\n\tif cb.run != nil {\n\t\tcb.run.Wait()\n\t}\n\n\tfor {\n\t\tmax := atomic.LoadInt32(&cb.state.maxConcurrentExecutions)\n\t\tif num <= max {\n\t\t\tbreak\n\t\t}\n\t\tif num > max &&\n\t\t\tatomic.CompareAndSwapInt32(&cb.state.maxConcurrentExecutions, max, num) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tnumEvents := atomic.AddInt32(&cb.numEvents, 1)\n\tif cb.state.assumeEventOrder && e.Type != zk.EventType(numEvents) {\n\t\tcb.state.t.Errorf(\"Expecting event %d, got %d\", numEvents, int(e.Type))\n\t}\n\n\tif numEvents >= cb.maxNumEvents {\n\t\tcb.state.done.Done()\n\t\treturn nil\n\t} else {\n\t\tch := make(chan zk.Event, 1)\n\t\tch <- cb.state.gen.newEvent()\n\t\treturn ch\n\t}\n}\n\nfunc TestReregistration(t *testing.T) {\n\tdisp := NewZkDispatcher(zk.DefaultLogger)\n\n\tstate := testState{t: t, assumeEventOrder: true}\n\tcb := state.newCallback(100)\n\tstate.done.Add(1)\n\n\tchannel := make(chan zk.Event, 1)\n\tif err := disp.WatchEvent(channel, cb); err != nil {\n\t\tt.Fatalf(\"failed to watch: %s\", err)\n\t}\n\n\tchannel <- state.gen.newEvent()\n\tstate.done.Wait()\n\n\tif cb.numEvents != 100 {\n\t\tt.Errorf(\"Should have processed 100 events but instead processed %d\", cb.numEvents)\n\t}\n\n\ttasksShouldBecomeEmpty(t, disp)\n}\n\nfunc TestDispatchesToSameCallbackAreSerial(t *testing.T) {\n\tdisp := NewZkDispatcher(zk.DefaultLogger)\n\tvar channels [10]chan zk.Event\n\tvar run sync.WaitGroup\n\trun.Add(1)\n\n\tstate := testState{t: t, run: &run}\n\tcb := state.newCallback(10)\n\n\tfor i := 0; i < 10; i++ {\n\t\tstate.done.Add(1)\n\t\tchannels[i] = make(chan zk.Event, 1)\n\t\tif err := disp.WatchEvent(channels[i], cb); err != nil {\n\t\t\tt.Fatalf(\"failed to watch: %s\", err)\n\t\t}\n\t}\n\n\t\/\/ send an event to get them started\n\tfor i := 0; i < 10; i++ {\n\t\tchannels[i] <- state.gen.newEvent()\n\t}\n\t\/\/ give dispatch go routine(s) a chance to start\n\ttime.Sleep(200 * time.Millisecond)\n\t\/\/ let 'em rip\n\trun.Done()\n\n\t\/\/ wait for them all to finish\n\tstate.done.Wait()\n\n\tif state.maxConcurrentExecutions != 1 {\n\t\tt.Errorf(\"Executions should be serial, but detected %d concurrent exceutions\",\n\t\t\tstate.maxConcurrentExecutions)\n\t}\n\n\ttasksShouldBecomeEmpty(t, disp)\n}\n\nfunc TestDispatchesToSameNakedFuncAreConcurrent(t *testing.T) {\n\tdisp := NewZkDispatcher(zk.DefaultLogger)\n\tvar channels [10]chan zk.Event\n\tvar run sync.WaitGroup\n\trun.Add(1)\n\n\tstate := testState{t: t, run: &run}\n\tcb := state.newCallback(10)\n\tf := cb.Handle\n\n\tfor i := 0; i < 10; i++ {\n\t\tstate.done.Add(1)\n\t\tchannels[i] = make(chan zk.Event, 1)\n\t\tdisp.Watch(channels[i], f)\n\t}\n\n\t\/\/ send an event to get them started\n\tfor i := 0; i < 10; i++ {\n\t\tchannels[i] <- state.gen.newEvent()\n\t}\n\t\/\/ give dispatch go routine(s) a chance to start\n\ttime.Sleep(200 * time.Millisecond)\n\t\/\/ let 'em rip\n\trun.Done()\n\n\t\/\/ wait for them all to finish\n\tstate.done.Wait()\n\n\tif state.maxConcurrentExecutions != 10 {\n\t\tt.Errorf(\"Should have been 10 concurrent executions (1 for each lambda);\"+\n\t\t\t\" instead detected %d\", state.maxConcurrentExecutions)\n\t}\n\n\ttasksShouldBecomeEmpty(t, disp)\n}\n\nfunc TestDispatchesToDifferentCallbacksAreConcurrent(t *testing.T) {\n\tdisp := NewZkDispatcher(zk.DefaultLogger)\n\tvar channels [10]chan zk.Event\n\tvar run, ready sync.WaitGroup\n\trun.Add(1)\n\n\tstate := testState{t: t, run: &run, ready: &ready}\n\tvar callbacks [10]*testCallback\n\tfor i := 0; i < 10; i++ {\n\t\tcallbacks[i] = state.newCallback(10)\n\t\tstate.done.Add(1)\n\t\tchannels[i] = make(chan zk.Event, 1)\n\t\tif err := disp.WatchEvent(channels[i], callbacks[i]); err != nil {\n\t\t\tt.Fatalf(\"failed to watch: %s\", err)\n\t\t}\n\t}\n\n\t\/\/ send an event to get them started\n\tfor i := 0; i < 10; i++ {\n\t\tchannels[i] <- state.gen.newEvent()\n\t}\n\n\t\/\/ make sure they are all ready\n\tready.Wait()\n\t\/\/ let 'em rip\n\trun.Done()\n\n\t\/\/ wait for them all to finish\n\tstate.done.Wait()\n\n\tif state.maxConcurrentExecutions != 10 {\n\t\tt.Errorf(\"Should have been 10 concurrent executions (1 for each callback);\"+\n\t\t\t\" instead detected %d\", state.maxConcurrentExecutions)\n\t}\n\n\ttasksShouldBecomeEmpty(t, disp)\n}\n\nfunc tasksShouldBecomeEmpty(t *testing.T, disp *ZkDispatcher) {\n\t\/\/ make sure that map of tasks gets cleaned up and returns to empty\n\tshouldBecomeEq(t, 0, func() int32 {\n\t\tdisp.taskMu.Lock()\n\t\tdefer disp.taskMu.Unlock()\n\t\treturn int32(len(disp.tasks))\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package tchannel\n\n\/\/ Copyright (c) 2015 Uber Technologies, Inc.\n\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/uber\/tchannel\/golang\/typed\"\n\t\"golang.org\/x\/net\/context\"\n\t\"net\"\n\t\"sync\"\n\t\"sync\/atomic\"\n)\n\n\/\/ PeerInfo contains nformation about a TChannel peer\ntype PeerInfo struct {\n\t\/\/ The host and port that can be used to contact the peer, as encoded by net.JoinHostPort\n\tHostPort string\n\n\t\/\/ The logical process name for the peer, used for only for logging \/ debugging\n\tProcessName string\n}\n\nfunc (p PeerInfo) String() string {\n\treturn fmt.Sprintf(\"%s(%s)\", p.HostPort, p.ProcessName)\n}\n\n\/\/ CurrentProtocolVersion is the current version of the TChannel protocol\n\/\/ supported by this stack\nconst CurrentProtocolVersion = 0x02\n\nvar (\n\t\/\/ ErrConnectionClosed is returned when a caller performs an operation\n\t\/\/ on a closed connection\n\tErrConnectionClosed = errors.New(\"connection is closed\")\n\n\t\/\/ ErrConnectionNotReady is returned when a caller attempts to send a\n\t\/\/ request through a connection which has not yet been initialized\n\tErrConnectionNotReady = errors.New(\"connection is not yet ready\")\n\n\t\/\/ ErrSendBufferFull is returned when a message cannot be sent to the\n\t\/\/ peer because the frame sending buffer has become full. Typically\n\t\/\/ this indicates that the connection is stuck and writes have become\n\t\/\/ backed up\n\tErrSendBufferFull = errors.New(\"connection send buffer is full, cannot send frame\")\n\n\terrConnectionAlreadyActive = errors.New(\"connection is already active\")\n\terrConnectionWaitingOnPeerInit = errors.New(\"connection is waiting for the peer to sent init\")\n\terrCannotHandleInitRes = errors.New(\"could not return init-res to handshake thread\")\n)\n\n\/\/ ConnectionOptions are options that control the behavior of a Connection\ntype ConnectionOptions struct {\n\t\/\/ The frame pool, allowing better management of frame buffers. Defaults to using raw heap\n\tFramePool FramePool\n\n\t\/\/ The size of receive channel buffers. Defaults to 512\n\tRecvBufferSize int\n\n\t\/\/ The size of send channel buffers. Defaults to 512\n\tSendBufferSize int\n\n\t\/\/ The type of checksum to use when sending messages\n\tChecksumType ChecksumType\n}\n\n\/\/ Connection represents a connection to a remote peer.\ntype Connection struct {\n\tlog Logger\n\tchecksumType ChecksumType\n\tframePool FramePool\n\tconn net.Conn\n\tlocalPeerInfo PeerInfo\n\tremotePeerInfo PeerInfo\n\tsendCh chan *Frame\n\tstate connectionState\n\tstateMut sync.RWMutex\n\tinbound messageExchangeSet\n\toutbound messageExchangeSet\n\thandlers *handlerMap\n\tnextMessageID uint32\n}\n\ntype connectionState int\n\nconst (\n\t\/\/ Connection initiated by peer is waiting to recv init-req from peer\n\tconnectionWaitingToRecvInitReq connectionState = iota\n\n\t\/\/ Connection initated by current process is waiting to send init-req to peer\n\tconnectionWaitingToSendInitReq\n\n\t\/\/ Connection initiated by current process has sent init-req, and is\n\t\/\/ waiting for init-req\n\tconnectionWaitingToRecvInitRes\n\n\t\/\/ Connection is fully active\n\tconnectionActive\n\n\t\/\/ Connection is starting to close; new incoming requests are rejected, outbound\n\t\/\/ requests are allowed to proceed\n\tconnectionStartClose\n\n\t\/\/ Connection has finished processing all active inbound, and is\n\t\/\/ waiting for outbound requests to complete or timeout\n\tconnectionInboundClosed\n\n\t\/\/ Connection is fully closed\n\tconnectionClosed\n)\n\n\/\/ Creates a new Connection around an outbound connection initiated to a peer\nfunc newOutboundConnection(conn net.Conn, handlers *handlerMap, log Logger, peerInfo PeerInfo,\n\topts *ConnectionOptions) (*Connection, error) {\n\treturn newConnection(conn, connectionWaitingToSendInitReq, handlers, peerInfo, log, opts), nil\n}\n\n\/\/ Creates a new Connection based on an incoming connection from a peer\nfunc newInboundConnection(conn net.Conn, handlers *handlerMap, peerInfo PeerInfo, log Logger,\n\topts *ConnectionOptions) (*Connection, error) {\n\treturn newConnection(conn, connectionWaitingToRecvInitReq, handlers, peerInfo, log, opts), nil\n}\n\n\/\/ Creates a new connection in a given initial state\nfunc newConnection(conn net.Conn, initialState connectionState, handlers *handlerMap, peerInfo PeerInfo, log Logger,\n\topts *ConnectionOptions) *Connection {\n\n\tif opts == nil {\n\t\topts = &ConnectionOptions{}\n\t}\n\n\tsendBufferSize := opts.SendBufferSize\n\tif sendBufferSize <= 0 {\n\t\tsendBufferSize = 512\n\t}\n\n\trecvBufferSize := opts.RecvBufferSize\n\tif recvBufferSize <= 0 {\n\t\trecvBufferSize = 512\n\t}\n\n\tframePool := opts.FramePool\n\tif framePool == nil {\n\t\tframePool = DefaultFramePool\n\t}\n\n\tc := &Connection{\n\t\tlog: log,\n\t\tconn: conn,\n\t\tframePool: framePool,\n\t\tstate: initialState,\n\t\tsendCh: make(chan *Frame, sendBufferSize),\n\t\tlocalPeerInfo: peerInfo,\n\t\tchecksumType: opts.ChecksumType,\n\t\tinbound: messageExchangeSet{\n\t\t\tname: messageExchangeSetInbound,\n\t\t\tlog: log,\n\t\t\texchanges: make(map[uint32]*messageExchange),\n\t\t},\n\t\toutbound: messageExchangeSet{\n\t\t\tname: messageExchangeSetOutbound,\n\t\t\tlog: log,\n\t\t\texchanges: make(map[uint32]*messageExchange),\n\t\t},\n\t\thandlers: handlers,\n\t}\n\n\tgo c.readFrames()\n\tgo c.writeFrames()\n\treturn c\n}\n\n\/\/ Initiates a handshake with a peer.\nfunc (c *Connection) sendInit(ctx context.Context) error {\n\terr := c.withStateLock(func() error {\n\t\tswitch c.state {\n\t\tcase connectionWaitingToSendInitReq:\n\t\t\tc.state = connectionWaitingToRecvInitRes\n\t\t\treturn nil\n\t\tcase connectionWaitingToRecvInitReq:\n\t\t\treturn errConnectionWaitingOnPeerInit\n\t\tcase connectionClosed, connectionStartClose, connectionInboundClosed:\n\t\t\treturn ErrConnectionClosed\n\t\tcase connectionActive, connectionWaitingToRecvInitRes:\n\t\t\treturn errConnectionAlreadyActive\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"connection in unknown state %d\", c.state)\n\t\t}\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinitMsgID := c.NextMessageID()\n\treq := initReq{initMessage{id: initMsgID}}\n\treq.Version = CurrentProtocolVersion\n\treq.initParams = initParams{\n\t\tInitParamHostPort: c.localPeerInfo.HostPort,\n\t\tInitParamProcessName: c.localPeerInfo.ProcessName,\n\t}\n\n\tmex, err := c.outbound.newExchange(ctx, req.messageType(), req.ID(), 1)\n\tif err != nil {\n\t\treturn c.connectionError(err)\n\t}\n\n\tdefer c.outbound.removeExchange(req.ID())\n\n\tif err := c.sendMessage(&req); err != nil {\n\t\treturn c.connectionError(err)\n\t}\n\n\tres := initRes{initMessage{id: initMsgID}}\n\terr = c.recvMessage(ctx, &res, mex.recvCh)\n\tif err != nil {\n\t\treturn c.connectionError(err)\n\t}\n\n\tif res.Version != CurrentProtocolVersion {\n\t\treturn c.connectionError(fmt.Errorf(\"Unsupported protocol version %d from peer\", res.Version))\n\t}\n\n\tc.remotePeerInfo.HostPort = res.initParams[InitParamHostPort]\n\tif c.remotePeerInfo.HostPort == ephemeralHostPort || c.remotePeerInfo.HostPort == \"\" {\n\t\tc.remotePeerInfo.HostPort = c.conn.RemoteAddr().String()\n\t}\n\tc.remotePeerInfo.ProcessName = res.initParams[InitParamProcessName]\n\n\tc.withStateLock(func() error {\n\t\tif c.state == connectionWaitingToRecvInitRes {\n\t\t\tc.state = connectionActive\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn nil\n}\n\n\/\/ Handles an incoming InitReq. If we are waiting for the peer to send us an\n\/\/ InitReq, and the InitReq is valid, send a corresponding InitRes and mark\n\/\/ ourselves as active\nfunc (c *Connection) handleInitReq(frame *Frame) {\n\tif err := c.withStateRLock(func() error {\n\t\treturn nil\n\t}); err != nil {\n\t\tc.connectionError(err)\n\t\treturn\n\t}\n\n\tvar req initReq\n\trbuf := typed.NewReadBuffer(frame.SizedPayload())\n\tif err := req.read(rbuf); err != nil {\n\t\t\/\/ TODO(mmihic): Technically probably a protocol error\n\t\tc.connectionError(err)\n\t\treturn\n\t}\n\n\tif req.Version != CurrentProtocolVersion {\n\t\t\/\/ TODO(mmihic): Send protocol error\n\t\tc.connectionError(fmt.Errorf(\"Unsupported protocol version %d from peer\", req.Version))\n\t\treturn\n\t}\n\n\tc.remotePeerInfo.HostPort = req.initParams[InitParamHostPort]\n\tc.remotePeerInfo.ProcessName = req.initParams[InitParamProcessName]\n\n\tres := initRes{initMessage{id: frame.Header.ID}}\n\tres.initParams = initParams{\n\t\tInitParamHostPort: c.localPeerInfo.HostPort,\n\t\tInitParamProcessName: c.localPeerInfo.ProcessName,\n\t}\n\tres.Version = CurrentProtocolVersion\n\tif err := c.sendMessage(&res); err != nil {\n\t\tc.connectionError(err)\n\t\treturn\n\t}\n\n\tc.withStateLock(func() error {\n\t\tswitch c.state {\n\t\tcase connectionWaitingToRecvInitReq:\n\t\t\tc.state = connectionActive\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\n\/\/ Handles an incoming InitRes. If we are waiting for the peer to send us an\n\/\/ InitRes, forward the InitRes to the waiting goroutine\n\n\/\/ TODO(mmihic): There is a race condition here, in that the peer might start\n\/\/ sending us requests before the goroutine doing initialization has a chance\n\/\/ to process the InitRes. We probably want to move the InitRes checking to\n\/\/ here (where it will run in the receiver goroutine and thus block new\n\/\/ incoming messages), and simply signal the init goroutine that we are done\nfunc (c *Connection) handleInitRes(frame *Frame) {\n\tif err := c.withStateRLock(func() error {\n\t\tswitch c.state {\n\t\tcase connectionWaitingToRecvInitRes:\n\t\t\treturn nil\n\t\tcase connectionClosed, connectionStartClose, connectionInboundClosed:\n\t\t\treturn ErrConnectionClosed\n\n\t\tcase connectionActive:\n\t\t\treturn errConnectionAlreadyActive\n\n\t\tcase connectionWaitingToSendInitReq:\n\t\t\treturn ErrConnectionNotReady\n\n\t\tcase connectionWaitingToRecvInitReq:\n\t\t\treturn errConnectionWaitingOnPeerInit\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"Connection in unknown state %d\", c.state)\n\t\t}\n\t}); err != nil {\n\t\tc.connectionError(err)\n\t\treturn\n\t}\n\n\tif err := c.outbound.forwardPeerFrame(frame.Header.ID, frame); err != nil {\n\t\tc.connectionError(errCannotHandleInitRes)\n\t}\n}\n\n\/\/ sendMessage sends a standalone message (typically a control message)\nfunc (c *Connection) sendMessage(msg message) error {\n\tframe := c.framePool.Get()\n\tif err := frame.write(msg); err != nil {\n\t\tc.framePool.Release(frame)\n\t\treturn err\n\t}\n\n\tselect {\n\tcase c.sendCh <- frame:\n\t\treturn nil\n\tdefault:\n\t\treturn ErrSendBufferFull\n\t}\n}\n\n\/\/ recvMessage blocks waiting for a standalone response message (typically a\n\/\/ control message)\nfunc (c *Connection) recvMessage(ctx context.Context, msg message, resCh <-chan *Frame) error {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\n\tcase frame := <-resCh:\n\t\terr := frame.read(msg)\n\t\tc.framePool.Release(frame)\n\t\treturn err\n\t}\n}\n\n\/\/ NextMessageID reserves the next available message id for this connection\nfunc (c *Connection) NextMessageID() uint32 {\n\treturn atomic.AddUint32(&c.nextMessageID, 1)\n}\n\n\/\/ connectionError handles a connection level error\nfunc (c *Connection) connectionError(err error) error {\n\tdoClose := false\n\tc.withStateLock(func() error {\n\t\tif c.state != connectionClosed {\n\t\t\tc.state = connectionClosed\n\t\t\tdoClose = true\n\t\t}\n\t\treturn nil\n\t})\n\n\tif doClose {\n\t\tc.closeNetwork()\n\t}\n\n\treturn NewWrappedSystemError(ErrCodeNetwork, err)\n}\n\n\/\/ withStateLock performs an action with the connection state mutex locked\nfunc (c *Connection) withStateLock(f func() error) error {\n\tc.stateMut.Lock()\n\tdefer c.stateMut.Unlock()\n\n\treturn f()\n}\n\n\/\/ withStateRLock an action with the connection state mutex held in a read lock\nfunc (c *Connection) withStateRLock(f func() error) error {\n\tc.stateMut.RLock()\n\tdefer c.stateMut.RUnlock()\n\n\treturn f()\n}\n\n\/\/ readFrames is the loop that reads frames from the network connection and\n\/\/ dispatches to the appropriate handler. Run within its own goroutine to\n\/\/ prevent overlapping reads on the socket. Most handlers simply send the\n\/\/ incoming frame to a channel; the init handlers are a notable exception,\n\/\/ since we cannot process new frames until the initialization is complete.\nfunc (c *Connection) readFrames() {\n\tfor {\n\t\tframe := c.framePool.Get()\n\t\tif err := frame.ReadFrom(c.conn); err != nil {\n\t\t\tc.framePool.Release(frame)\n\t\t\tc.connectionError(err)\n\t\t\treturn\n\t\t}\n\n\t\tswitch frame.Header.messageType {\n\t\tcase messageTypeCallReq:\n\t\t\tc.handleCallReq(frame)\n\t\tcase messageTypeCallReqContinue:\n\t\t\tc.handleCallReqContinue(frame)\n\t\tcase messageTypeCallRes:\n\t\t\tc.handleCallRes(frame)\n\t\tcase messageTypeCallResContinue:\n\t\t\tc.handleCallResContinue(frame)\n\t\tcase messageTypeInitReq:\n\t\t\tc.handleInitReq(frame)\n\t\tcase messageTypeInitRes:\n\t\t\tc.handleInitRes(frame)\n\t\tcase messageTypeError:\n\t\t\tc.handleError(frame)\n\t\tdefault:\n\t\t\t\/\/ TODO(mmihic): Log and close connection with protocol error\n\t\t}\n\t}\n}\n\n\/\/ writeFrames is the main loop that pulls frames from the send channel and\n\/\/ writes them to the connection.\nfunc (c *Connection) writeFrames() {\n\tfor f := range c.sendCh {\n\t\tdefer c.framePool.Release(f)\n\n\t\tif err := f.WriteTo(c.conn); err != nil {\n\t\t\tc.connectionError(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Close the network after we have sent the last frame\n\tc.closeNetwork()\n}\n\n\/\/ closeNetwork closes the network connection and all network-related channels.\n\/\/ This should only be done in response to a fatal connection or protocol\n\/\/ error, or after all pending frames have been sent.\nfunc (c *Connection) closeNetwork() {\n\t\/\/ NB(mmihic): The sender goroutine will exit once the connection is\n\t\/\/ closed; no need to close the send channel (and closing the send\n\t\/\/ channel would be dangerous since other goroutine might be sending)\n\tif err := c.conn.Close(); err != nil {\n\t\tc.log.Warnf(\"could not close connection to peer %s: %v\", c.remotePeerInfo, err)\n\t}\n}\n<commit_msg>Add more logging in connection<commit_after>package tchannel\n\n\/\/ Copyright (c) 2015 Uber Technologies, Inc.\n\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/uber\/tchannel\/golang\/typed\"\n\t\"golang.org\/x\/net\/context\"\n\t\"net\"\n\t\"sync\"\n\t\"sync\/atomic\"\n)\n\n\/\/ PeerInfo contains nformation about a TChannel peer\ntype PeerInfo struct {\n\t\/\/ The host and port that can be used to contact the peer, as encoded by net.JoinHostPort\n\tHostPort string\n\n\t\/\/ The logical process name for the peer, used for only for logging \/ debugging\n\tProcessName string\n}\n\nfunc (p PeerInfo) String() string {\n\treturn fmt.Sprintf(\"%s(%s)\", p.HostPort, p.ProcessName)\n}\n\n\/\/ CurrentProtocolVersion is the current version of the TChannel protocol\n\/\/ supported by this stack\nconst CurrentProtocolVersion = 0x02\n\nvar (\n\t\/\/ ErrConnectionClosed is returned when a caller performs an operation\n\t\/\/ on a closed connection\n\tErrConnectionClosed = errors.New(\"connection is closed\")\n\n\t\/\/ ErrConnectionNotReady is returned when a caller attempts to send a\n\t\/\/ request through a connection which has not yet been initialized\n\tErrConnectionNotReady = errors.New(\"connection is not yet ready\")\n\n\t\/\/ ErrSendBufferFull is returned when a message cannot be sent to the\n\t\/\/ peer because the frame sending buffer has become full. Typically\n\t\/\/ this indicates that the connection is stuck and writes have become\n\t\/\/ backed up\n\tErrSendBufferFull = errors.New(\"connection send buffer is full, cannot send frame\")\n\n\terrConnectionAlreadyActive = errors.New(\"connection is already active\")\n\terrConnectionWaitingOnPeerInit = errors.New(\"connection is waiting for the peer to sent init\")\n\terrCannotHandleInitRes = errors.New(\"could not return init-res to handshake thread\")\n)\n\n\/\/ ConnectionOptions are options that control the behavior of a Connection\ntype ConnectionOptions struct {\n\t\/\/ The frame pool, allowing better management of frame buffers. Defaults to using raw heap\n\tFramePool FramePool\n\n\t\/\/ The size of receive channel buffers. Defaults to 512\n\tRecvBufferSize int\n\n\t\/\/ The size of send channel buffers. Defaults to 512\n\tSendBufferSize int\n\n\t\/\/ The type of checksum to use when sending messages\n\tChecksumType ChecksumType\n}\n\n\/\/ Connection represents a connection to a remote peer.\ntype Connection struct {\n\tlog Logger\n\tchecksumType ChecksumType\n\tframePool FramePool\n\tconn net.Conn\n\tlocalPeerInfo PeerInfo\n\tremotePeerInfo PeerInfo\n\tsendCh chan *Frame\n\tstate connectionState\n\tstateMut sync.RWMutex\n\tinbound messageExchangeSet\n\toutbound messageExchangeSet\n\thandlers *handlerMap\n\tnextMessageID uint32\n}\n\ntype connectionState int\n\nconst (\n\t\/\/ Connection initiated by peer is waiting to recv init-req from peer\n\tconnectionWaitingToRecvInitReq connectionState = iota\n\n\t\/\/ Connection initated by current process is waiting to send init-req to peer\n\tconnectionWaitingToSendInitReq\n\n\t\/\/ Connection initiated by current process has sent init-req, and is\n\t\/\/ waiting for init-req\n\tconnectionWaitingToRecvInitRes\n\n\t\/\/ Connection is fully active\n\tconnectionActive\n\n\t\/\/ Connection is starting to close; new incoming requests are rejected, outbound\n\t\/\/ requests are allowed to proceed\n\tconnectionStartClose\n\n\t\/\/ Connection has finished processing all active inbound, and is\n\t\/\/ waiting for outbound requests to complete or timeout\n\tconnectionInboundClosed\n\n\t\/\/ Connection is fully closed\n\tconnectionClosed\n)\n\n\/\/ Creates a new Connection around an outbound connection initiated to a peer\nfunc newOutboundConnection(conn net.Conn, handlers *handlerMap, log Logger, peerInfo PeerInfo,\n\topts *ConnectionOptions) (*Connection, error) {\n\treturn newConnection(conn, connectionWaitingToSendInitReq, handlers, peerInfo, log, opts), nil\n}\n\n\/\/ Creates a new Connection based on an incoming connection from a peer\nfunc newInboundConnection(conn net.Conn, handlers *handlerMap, peerInfo PeerInfo, log Logger,\n\topts *ConnectionOptions) (*Connection, error) {\n\treturn newConnection(conn, connectionWaitingToRecvInitReq, handlers, peerInfo, log, opts), nil\n}\n\n\/\/ Creates a new connection in a given initial state\nfunc newConnection(conn net.Conn, initialState connectionState, handlers *handlerMap, peerInfo PeerInfo, log Logger,\n\topts *ConnectionOptions) *Connection {\n\n\tif opts == nil {\n\t\topts = &ConnectionOptions{}\n\t}\n\n\tsendBufferSize := opts.SendBufferSize\n\tif sendBufferSize <= 0 {\n\t\tsendBufferSize = 512\n\t}\n\n\trecvBufferSize := opts.RecvBufferSize\n\tif recvBufferSize <= 0 {\n\t\trecvBufferSize = 512\n\t}\n\n\tframePool := opts.FramePool\n\tif framePool == nil {\n\t\tframePool = DefaultFramePool\n\t}\n\n\tc := &Connection{\n\t\tlog: log,\n\t\tconn: conn,\n\t\tframePool: framePool,\n\t\tstate: initialState,\n\t\tsendCh: make(chan *Frame, sendBufferSize),\n\t\tlocalPeerInfo: peerInfo,\n\t\tchecksumType: opts.ChecksumType,\n\t\tinbound: messageExchangeSet{\n\t\t\tname: messageExchangeSetInbound,\n\t\t\tlog: log,\n\t\t\texchanges: make(map[uint32]*messageExchange),\n\t\t},\n\t\toutbound: messageExchangeSet{\n\t\t\tname: messageExchangeSetOutbound,\n\t\t\tlog: log,\n\t\t\texchanges: make(map[uint32]*messageExchange),\n\t\t},\n\t\thandlers: handlers,\n\t}\n\n\tgo c.readFrames()\n\tgo c.writeFrames()\n\treturn c\n}\n\n\/\/ Initiates a handshake with a peer.\nfunc (c *Connection) sendInit(ctx context.Context) error {\n\terr := c.withStateLock(func() error {\n\t\tswitch c.state {\n\t\tcase connectionWaitingToSendInitReq:\n\t\t\tc.state = connectionWaitingToRecvInitRes\n\t\t\treturn nil\n\t\tcase connectionWaitingToRecvInitReq:\n\t\t\treturn errConnectionWaitingOnPeerInit\n\t\tcase connectionClosed, connectionStartClose, connectionInboundClosed:\n\t\t\treturn ErrConnectionClosed\n\t\tcase connectionActive, connectionWaitingToRecvInitRes:\n\t\t\treturn errConnectionAlreadyActive\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"connection in unknown state %d\", c.state)\n\t\t}\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinitMsgID := c.NextMessageID()\n\treq := initReq{initMessage{id: initMsgID}}\n\treq.Version = CurrentProtocolVersion\n\treq.initParams = initParams{\n\t\tInitParamHostPort: c.localPeerInfo.HostPort,\n\t\tInitParamProcessName: c.localPeerInfo.ProcessName,\n\t}\n\n\tmex, err := c.outbound.newExchange(ctx, req.messageType(), req.ID(), 1)\n\tif err != nil {\n\t\treturn c.connectionError(err)\n\t}\n\n\tdefer c.outbound.removeExchange(req.ID())\n\n\tif err := c.sendMessage(&req); err != nil {\n\t\treturn c.connectionError(err)\n\t}\n\n\tres := initRes{initMessage{id: initMsgID}}\n\terr = c.recvMessage(ctx, &res, mex.recvCh)\n\tif err != nil {\n\t\treturn c.connectionError(err)\n\t}\n\n\tif res.Version != CurrentProtocolVersion {\n\t\treturn c.connectionError(fmt.Errorf(\"Unsupported protocol version %d from peer\", res.Version))\n\t}\n\n\tc.remotePeerInfo.HostPort = res.initParams[InitParamHostPort]\n\tif c.remotePeerInfo.HostPort == ephemeralHostPort || c.remotePeerInfo.HostPort == \"\" {\n\t\tc.remotePeerInfo.HostPort = c.conn.RemoteAddr().String()\n\t}\n\tc.remotePeerInfo.ProcessName = res.initParams[InitParamProcessName]\n\n\tc.withStateLock(func() error {\n\t\tif c.state == connectionWaitingToRecvInitRes {\n\t\t\tc.state = connectionActive\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn nil\n}\n\n\/\/ Handles an incoming InitReq. If we are waiting for the peer to send us an\n\/\/ InitReq, and the InitReq is valid, send a corresponding InitRes and mark\n\/\/ ourselves as active\nfunc (c *Connection) handleInitReq(frame *Frame) {\n\tif err := c.withStateRLock(func() error {\n\t\treturn nil\n\t}); err != nil {\n\t\tc.connectionError(err)\n\t\treturn\n\t}\n\n\tvar req initReq\n\trbuf := typed.NewReadBuffer(frame.SizedPayload())\n\tif err := req.read(rbuf); err != nil {\n\t\t\/\/ TODO(mmihic): Technically probably a protocol error\n\t\tc.connectionError(err)\n\t\treturn\n\t}\n\n\tif req.Version != CurrentProtocolVersion {\n\t\t\/\/ TODO(mmihic): Send protocol error\n\t\tc.connectionError(fmt.Errorf(\"Unsupported protocol version %d from peer\", req.Version))\n\t\treturn\n\t}\n\n\tc.remotePeerInfo.HostPort = req.initParams[InitParamHostPort]\n\tc.remotePeerInfo.ProcessName = req.initParams[InitParamProcessName]\n\n\tres := initRes{initMessage{id: frame.Header.ID}}\n\tres.initParams = initParams{\n\t\tInitParamHostPort: c.localPeerInfo.HostPort,\n\t\tInitParamProcessName: c.localPeerInfo.ProcessName,\n\t}\n\tres.Version = CurrentProtocolVersion\n\tif err := c.sendMessage(&res); err != nil {\n\t\tc.connectionError(err)\n\t\treturn\n\t}\n\n\tc.withStateLock(func() error {\n\t\tswitch c.state {\n\t\tcase connectionWaitingToRecvInitReq:\n\t\t\tc.state = connectionActive\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\n\/\/ Handles an incoming InitRes. If we are waiting for the peer to send us an\n\/\/ InitRes, forward the InitRes to the waiting goroutine\n\n\/\/ TODO(mmihic): There is a race condition here, in that the peer might start\n\/\/ sending us requests before the goroutine doing initialization has a chance\n\/\/ to process the InitRes. We probably want to move the InitRes checking to\n\/\/ here (where it will run in the receiver goroutine and thus block new\n\/\/ incoming messages), and simply signal the init goroutine that we are done\nfunc (c *Connection) handleInitRes(frame *Frame) {\n\tif err := c.withStateRLock(func() error {\n\t\tswitch c.state {\n\t\tcase connectionWaitingToRecvInitRes:\n\t\t\treturn nil\n\t\tcase connectionClosed, connectionStartClose, connectionInboundClosed:\n\t\t\treturn ErrConnectionClosed\n\n\t\tcase connectionActive:\n\t\t\treturn errConnectionAlreadyActive\n\n\t\tcase connectionWaitingToSendInitReq:\n\t\t\treturn ErrConnectionNotReady\n\n\t\tcase connectionWaitingToRecvInitReq:\n\t\t\treturn errConnectionWaitingOnPeerInit\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"Connection in unknown state %d\", c.state)\n\t\t}\n\t}); err != nil {\n\t\tc.connectionError(err)\n\t\treturn\n\t}\n\n\tif err := c.outbound.forwardPeerFrame(frame.Header.ID, frame); err != nil {\n\t\tc.connectionError(errCannotHandleInitRes)\n\t}\n}\n\n\/\/ sendMessage sends a standalone message (typically a control message)\nfunc (c *Connection) sendMessage(msg message) error {\n\tframe := c.framePool.Get()\n\tif err := frame.write(msg); err != nil {\n\t\tc.framePool.Release(frame)\n\t\treturn err\n\t}\n\n\tselect {\n\tcase c.sendCh <- frame:\n\t\treturn nil\n\tdefault:\n\t\treturn ErrSendBufferFull\n\t}\n}\n\n\/\/ recvMessage blocks waiting for a standalone response message (typically a\n\/\/ control message)\nfunc (c *Connection) recvMessage(ctx context.Context, msg message, resCh <-chan *Frame) error {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\n\tcase frame := <-resCh:\n\t\terr := frame.read(msg)\n\t\tc.framePool.Release(frame)\n\t\treturn err\n\t}\n}\n\n\/\/ NextMessageID reserves the next available message id for this connection\nfunc (c *Connection) NextMessageID() uint32 {\n\treturn atomic.AddUint32(&c.nextMessageID, 1)\n}\n\n\/\/ connectionError handles a connection level error\nfunc (c *Connection) connectionError(err error) error {\n\tdoClose := false\n\tc.withStateLock(func() error {\n\t\tif c.state != connectionClosed {\n\t\t\tc.state = connectionClosed\n\t\t\tdoClose = true\n\t\t}\n\t\treturn nil\n\t})\n\n\tif doClose {\n\t\tc.closeNetwork()\n\t}\n\n\treturn NewWrappedSystemError(ErrCodeNetwork, err)\n}\n\n\/\/ withStateLock performs an action with the connection state mutex locked\nfunc (c *Connection) withStateLock(f func() error) error {\n\tc.stateMut.Lock()\n\tdefer c.stateMut.Unlock()\n\n\treturn f()\n}\n\n\/\/ withStateRLock an action with the connection state mutex held in a read lock\nfunc (c *Connection) withStateRLock(f func() error) error {\n\tc.stateMut.RLock()\n\tdefer c.stateMut.RUnlock()\n\n\treturn f()\n}\n\n\/\/ readFrames is the loop that reads frames from the network connection and\n\/\/ dispatches to the appropriate handler. Run within its own goroutine to\n\/\/ prevent overlapping reads on the socket. Most handlers simply send the\n\/\/ incoming frame to a channel; the init handlers are a notable exception,\n\/\/ since we cannot process new frames until the initialization is complete.\nfunc (c *Connection) readFrames() {\n\tfor {\n\t\tframe := c.framePool.Get()\n\t\tif err := frame.ReadFrom(c.conn); err != nil {\n\t\t\tc.framePool.Release(frame)\n\t\t\tc.connectionError(err)\n\t\t\treturn\n\t\t}\n\n\t\tswitch frame.Header.messageType {\n\t\tcase messageTypeCallReq:\n\t\t\tc.handleCallReq(frame)\n\t\tcase messageTypeCallReqContinue:\n\t\t\tc.handleCallReqContinue(frame)\n\t\tcase messageTypeCallRes:\n\t\t\tc.handleCallRes(frame)\n\t\tcase messageTypeCallResContinue:\n\t\t\tc.handleCallResContinue(frame)\n\t\tcase messageTypeInitReq:\n\t\t\tc.handleInitReq(frame)\n\t\tcase messageTypeInitRes:\n\t\t\tc.handleInitRes(frame)\n\t\tcase messageTypeError:\n\t\t\tc.handleError(frame)\n\t\tdefault:\n\t\t\t\/\/ TODO(mmihic): Log and close connection with protocol error\n\t\t\tc.log.Errorf(\"Received unexpected frame %s from %s\", frame.Header, c.remotePeerInfo)\n\t\t}\n\t}\n}\n\n\/\/ writeFrames is the main loop that pulls frames from the send channel and\n\/\/ writes them to the connection.\nfunc (c *Connection) writeFrames() {\n\tfor f := range c.sendCh {\n\t\tdefer c.framePool.Release(f)\n\n\t\tc.log.Debugf(\"Writing frame %s\", f.Header)\n\t\tif err := f.WriteTo(c.conn); err != nil {\n\t\t\tc.connectionError(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Close the network after we have sent the last frame\n\tc.closeNetwork()\n}\n\n\/\/ closeNetwork closes the network connection and all network-related channels.\n\/\/ This should only be done in response to a fatal connection or protocol\n\/\/ error, or after all pending frames have been sent.\nfunc (c *Connection) closeNetwork() {\n\t\/\/ NB(mmihic): The sender goroutine will exit once the connection is\n\t\/\/ closed; no need to close the send channel (and closing the send\n\t\/\/ channel would be dangerous since other goroutine might be sending)\n\tif err := c.conn.Close(); err != nil {\n\t\tc.log.Warnf(\"could not close connection to peer %s: %v\", c.remotePeerInfo, err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package apns\n\nimport (\n\t\"crypto\/tls\"\n\t\"encoding\/binary\"\n\t\"log\"\n\t\"time\"\n)\n\nvar ResponseQueueSize = 10000\nvar SentBufferSize = 10000\nvar TimeoutSeconds = 5 * time.Second\n\n\/\/a Connection represents a single connection to APNS.\ntype Connection struct {\n\tclient Client\n\tconn tls.Conn\n\tqueue chan PushNotification\n\terrors chan *BadPushNotification\n}\n\ntype Response struct {\n\tStatus uint8\n\tIdentifier uint32\n}\n\nfunc newResponse() *Response {\n\treturn new(Response)\n}\n\ntype BadPushNotification struct {\n\tPushNotification\n\tStatus uint8\n}\n\nfunc (conn *Connection) Enqueue(pn *PushNotification) {\n\tgo func(pn *PushNotification) {\n\t\tconn.queue <- *pn\n\t}(pn)\n}\n\nfunc (conn *Connection) Errors() (errors <-chan *BadPushNotification) {\n\treturn conn.errors\n}\n\nfunc (conn *Connection) Start() error {\n\t\/\/Connect to APNS. The reason this is here as well as in sender is that this probably catches any unavoidable errors in a synchronous fashion, while in sender it can reconnect after temporary errors (which should work most of the time.)\n\t\/\/Start sender goroutine\n\tsent := make(chan PushNotification)\n\tgo conn.sender(conn.queue, sent)\n\t\/\/Start reader goroutine\n\tresponses := make(chan *Response, ResponseQueueSize)\n\tgo conn.reader(responses)\n\t\/\/Start limbo goroutine\n\treturn nil\n}\n\nfunc (conn *Connection) Stop() {\n\t\/\/We can't just close the main queue channel, because retries might still need to be sent there.\n\t\/\/\n}\n\nfunc (conn *Connection) sender(queue <-chan PushNotification, sent chan PushNotification) {\n\tfor {\n\t\tpn, ok := <-conn.queue\n\t\tif !ok {\n\t\t\t\/\/That means the connection is closed, teardown the connection (should it be this routine's responsibility?) and return\n\t\t\treturn\n\t\t} else {\n\t\t\t\/\/If not connected, connect\n\t\t\t\/\/Exponential backoff up to a limit\n\t\t\t\/\/Then send the push notification\n\t\t\t\/\/TODO(@draaglom): Do buffering as per the APNS docs\n\t\t}\n\t}\n}\n\nfunc (conn *Connection) reader(responses chan<- *Response) {\n\tbuffer := make([]byte, 6)\n\tfor {\n\t\t_, err := conn.conn.Read(buffer)\n\t\tif err != nil {\n\t\t\tlog.Println(\"APNS: Error reading from connection: \", err)\n\t\t\tconn.conn.Close()\n\t\t\treturn\n\t\t}\n\t\tresp := newResponse()\n\t\tresp.Identifier = binary.BigEndian.Uint32(buffer[2:6])\n\t\tresp.Status = uint8(buffer[1])\n\t\tresponses <- resp\n\t}\n}\n\nfunc (conn *Connection) limbo(sent <-chan PushNotification, responses chan Response, errors chan PushNotification, queue chan PushNotification) {\n\tlimbo := make(chan PushNotification, SentBufferSize)\n\tticker := time.NewTicker(1 * time.Second)\n\ttimeNextNotification := true\n\tfor {\n\t\tselect {\n\t\tcase pn := <-sent:\n\t\t\t\/\/Drop it into the array\n\t\t\tlimbo <- pn\n\t\t\tif timeNextNotification {\n\t\t\t\t\/\/Is there a cleaner way of doing this?\n\t\t\t\tgo func(pn PushNotification) {\n\t\t\t\t\t<-time.After(TimeoutSeconds)\n\t\t\t\t\tsuccessResp := newResponse()\n\t\t\t\t\tsuccessResp.Identifier = pn.Identifier\n\t\t\t\t\tresponses <- *successResp\n\t\t\t\t}(pn)\n\t\t\t\ttimeNextNotification = false\n\t\t\t}\n\t\tcase resp, ok := <-responses:\n\t\t\tif !ok {\n\t\t\t\t\/\/If the responses channel is closed, that means we're shutting down the connection.\n\t\t\t\t\/\/We should\n\t\t\t}\n\t\t\tswitch {\n\t\t\tcase resp.Status == 0:\n\t\t\t\t\/\/Status 0 is a \"success\" response generated by a timeout in the library.\n\t\t\t\tfor pn := range limbo {\n\t\t\t\t\t\/\/Drop all the notifications until we get to the timed-out one.\n\t\t\t\t\t\/\/(and leave the others in limbo)\n\t\t\t\t\tif pn.Identifier == resp.Identifier {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\thit := false\n\t\t\t\tfor pn := range limbo {\n\t\t\t\t\tswitch {\n\t\t\t\t\tcase pn.Identifier != resp.Identifier && !hit:\n\t\t\t\t\t\t\/\/We haven't seen the identified notification yet\n\t\t\t\t\t\t\/\/so these are all successful (drop silently)\n\t\t\t\t\tcase pn.Identifier == resp.Identifier:\n\t\t\t\t\t\thit = true\n\t\t\t\t\t\tif resp.Status != 10 {\n\t\t\t\t\t\t\t\/\/It was an error, we should report this on the error channel\n\t\t\t\t\t\t}\n\t\t\t\t\tcase pn.Identifier != resp.Identifier && hit:\n\t\t\t\t\t\t\/\/We've already seen the identified notification,\n\t\t\t\t\t\t\/\/so these should be requeued\n\t\t\t\t\t\tconn.Enqueue(&pn)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/Drop all of the notifications before this response (they're ok)\n\t\t\t\/\/if status != 10, return the offending notification on errors\n\t\t\t\/\/if status == 10, close the connection.\n\t\t\t\/\/requeue all the notifications after that one.\n\t\tcase <-ticker.C:\n\t\t\ttimeNextNotification = true\n\t\t}\n\t}\n}\n<commit_msg>TimeoutSeconds was redundant<commit_after>package apns\n\nimport (\n\t\"crypto\/tls\"\n\t\"encoding\/binary\"\n\t\"log\"\n\t\"time\"\n)\n\nvar ResponseQueueSize = 10000\nvar SentBufferSize = 10000\n\n\/\/a Connection represents a single connection to APNS.\ntype Connection struct {\n\tclient Client\n\tconn tls.Conn\n\tqueue chan PushNotification\n\terrors chan *BadPushNotification\n}\n\ntype Response struct {\n\tStatus uint8\n\tIdentifier uint32\n}\n\nfunc newResponse() *Response {\n\treturn new(Response)\n}\n\ntype BadPushNotification struct {\n\tPushNotification\n\tStatus uint8\n}\n\nfunc (conn *Connection) Enqueue(pn *PushNotification) {\n\tgo func(pn *PushNotification) {\n\t\tconn.queue <- *pn\n\t}(pn)\n}\n\nfunc (conn *Connection) Errors() (errors <-chan *BadPushNotification) {\n\treturn conn.errors\n}\n\nfunc (conn *Connection) Start() error {\n\t\/\/Connect to APNS. The reason this is here as well as in sender is that this probably catches any unavoidable errors in a synchronous fashion, while in sender it can reconnect after temporary errors (which should work most of the time.)\n\t\/\/Start sender goroutine\n\tsent := make(chan PushNotification)\n\tgo conn.sender(conn.queue, sent)\n\t\/\/Start reader goroutine\n\tresponses := make(chan *Response, ResponseQueueSize)\n\tgo conn.reader(responses)\n\t\/\/Start limbo goroutine\n\treturn nil\n}\n\nfunc (conn *Connection) Stop() {\n\t\/\/We can't just close the main queue channel, because retries might still need to be sent there.\n\t\/\/\n}\n\nfunc (conn *Connection) sender(queue <-chan PushNotification, sent chan PushNotification) {\n\tfor {\n\t\tpn, ok := <-conn.queue\n\t\tif !ok {\n\t\t\t\/\/That means the connection is closed, teardown the connection (should it be this routine's responsibility?) and return\n\t\t\treturn\n\t\t} else {\n\t\t\t\/\/If not connected, connect\n\t\t\t\/\/Exponential backoff up to a limit\n\t\t\t\/\/Then send the push notification\n\t\t\t\/\/TODO(@draaglom): Do buffering as per the APNS docs\n\t\t}\n\t}\n}\n\nfunc (conn *Connection) reader(responses chan<- *Response) {\n\tbuffer := make([]byte, 6)\n\tfor {\n\t\t_, err := conn.conn.Read(buffer)\n\t\tif err != nil {\n\t\t\tlog.Println(\"APNS: Error reading from connection: \", err)\n\t\t\tconn.conn.Close()\n\t\t\treturn\n\t\t}\n\t\tresp := newResponse()\n\t\tresp.Identifier = binary.BigEndian.Uint32(buffer[2:6])\n\t\tresp.Status = uint8(buffer[1])\n\t\tresponses <- resp\n\t}\n}\n\nfunc (conn *Connection) limbo(sent <-chan PushNotification, responses chan Response, errors chan PushNotification, queue chan PushNotification) {\n\tlimbo := make(chan PushNotification, SentBufferSize)\n\tticker := time.NewTicker(1 * time.Second)\n\ttimeNextNotification := true\n\tfor {\n\t\tselect {\n\t\tcase pn := <-sent:\n\t\t\t\/\/Drop it into the array\n\t\t\tlimbo <- pn\n\t\t\tif timeNextNotification {\n\t\t\t\t\/\/Is there a cleaner way of doing this?\n\t\t\t\tgo func(pn PushNotification) {\n\t\t\t\t\t<-time.After(TimeoutSeconds)\n\t\t\t\t\tsuccessResp := newResponse()\n\t\t\t\t\tsuccessResp.Identifier = pn.Identifier\n\t\t\t\t\tresponses <- *successResp\n\t\t\t\t}(pn)\n\t\t\t\ttimeNextNotification = false\n\t\t\t}\n\t\tcase resp, ok := <-responses:\n\t\t\tif !ok {\n\t\t\t\t\/\/If the responses channel is closed, that means we're shutting down the connection.\n\t\t\t\t\/\/We should\n\t\t\t}\n\t\t\tswitch {\n\t\t\tcase resp.Status == 0:\n\t\t\t\t\/\/Status 0 is a \"success\" response generated by a timeout in the library.\n\t\t\t\tfor pn := range limbo {\n\t\t\t\t\t\/\/Drop all the notifications until we get to the timed-out one.\n\t\t\t\t\t\/\/(and leave the others in limbo)\n\t\t\t\t\tif pn.Identifier == resp.Identifier {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\thit := false\n\t\t\t\tfor pn := range limbo {\n\t\t\t\t\tswitch {\n\t\t\t\t\tcase pn.Identifier != resp.Identifier && !hit:\n\t\t\t\t\t\t\/\/We haven't seen the identified notification yet\n\t\t\t\t\t\t\/\/so these are all successful (drop silently)\n\t\t\t\t\tcase pn.Identifier == resp.Identifier:\n\t\t\t\t\t\thit = true\n\t\t\t\t\t\tif resp.Status != 10 {\n\t\t\t\t\t\t\t\/\/It was an error, we should report this on the error channel\n\t\t\t\t\t\t}\n\t\t\t\t\tcase pn.Identifier != resp.Identifier && hit:\n\t\t\t\t\t\t\/\/We've already seen the identified notification,\n\t\t\t\t\t\t\/\/so these should be requeued\n\t\t\t\t\t\tconn.Enqueue(&pn)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/Drop all of the notifications before this response (they're ok)\n\t\t\t\/\/if status != 10, return the offending notification on errors\n\t\t\t\/\/if status == 10, close the connection.\n\t\t\t\/\/requeue all the notifications after that one.\n\t\tcase <-ticker.C:\n\t\t\ttimeNextNotification = true\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012-2013 Tamás Gulácsi\n\/\/ See LICENSE.txt\n\/\/ Translated from cx_Oracle ((c) Anthony Tuininga) by Tamás Gulácsi\npackage goracle\n\n\/*\n#cgo CFLAGS: -I\/usr\/include\/oracle\/11.2\/client64\n#cgo LDFLAGS: -lclntsh -L\/usr\/lib\/oracle\/11.2\/client64\/lib\n\n#include <oci.h>\n\/\/#include <datetime.h>\n\/\/#include <structmember.h>\n\/\/#include <time.h>\n\/\/#include <oci.h>\n\/\/#include <orid.h>\n\/\/#include <xa.h>\n\n*\/\nimport \"C\"\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"unsafe\"\n)\n\n\/\/ MakeDSN makea a data source name given the host port and SID.\nfunc MakeDSN(host string, port int, sid, serviceName string) string {\n\tvar format, conn string\n\tif sid != \"\" {\n\t\tconn = sid\n\t\tformat = (\"(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=\" +\n\t\t\t\"(PROTOCOL=TCP)(HOST=%s)(PORT=%d)))(CONNECT_DATA=(SID=%s)))\")\n\t} else {\n\t\tconn = serviceName\n\t\tformat = (\"(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=\" +\n\t\t\t\"(PROTOCOL=TCP)(HOST=%s)(PORT=%d)))(CONNECT_DATA=\" +\n\t\t\t\"(SERVICE_NAME=%s)))\")\n\t}\n\tif format == \"\" {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(format, host, port, conn)\n}\n\n\/\/ ClientVersion returns the client's version (slice of 5 int32s)\nfunc ClientVersion() []int32 {\n\tvar majorVersion, minorVersion, updateNum, patchNum, portUpdateNum C.sword\n\n\tC.OCIClientVersion(&majorVersion, &minorVersion, &updateNum,\n\t\t&patchNum, &portUpdateNum)\n\treturn []int32{int32(majorVersion), int32(minorVersion), int32(updateNum),\n\t\tint32(patchNum), int32(portUpdateNum)}\n}\n\ntype Connection struct {\n\thandle *C.OCISvcCtx \/\/connection\n\tserverHandle *C.OCIServer \/\/server's handle\n\tsessionHandle *C.OCISession \/\/session's handle\n\tenvironment *Environment \/\/environment\n\t\/\/ sessionPool *SessionPool \/\/sessionpool\n\tusername, password, dsn, version string\n\tcommitMode int64\n\tautocommit, release, attached bool\n\tsrvMtx sync.Mutex\n}\n\n\/\/ Connection_IsConnected()\n\/\/ Determines if the connection object is connected to the database.\nfunc (conn Connection) IsConnected() bool {\n\treturn conn.handle != nil\n}\n\nfunc (conn *Connection) AttrSet(key C.ub4, value unsafe.Pointer) *Error {\n\treturn conn.environment.AttrSet(\n\t\tunsafe.Pointer(conn.handle), C.OCI_HTYPE_SVCCTX,\n\t\tkey, value)\n}\n\nfunc (conn *Connection) ServerAttrSet(key C.ub4, value unsafe.Pointer) *Error {\n\treturn conn.environment.AttrSet(\n\t\tunsafe.Pointer(conn.serverHandle), C.OCI_HTYPE_SERVER,\n\t\tkey, value)\n}\n\nfunc (conn *Connection) SessionAttrSet(key C.ub4, value unsafe.Pointer) *Error {\n\treturn conn.environment.AttrSet(\n\t\tunsafe.Pointer(conn.sessionHandle), C.OCI_HTYPE_SESSION,\n\t\tkey, value)\n}\n\n\/\/ Create a new connection object by connecting to the database.\nfunc (conn *Connection) Connect(mode int64, twophase bool \/*, newPassword string*\/) error {\n\tcredentialType := C.OCI_CRED_EXT\n\tvar (\n\t\tstatus C.sword\n\t\terr *Error\n\t)\n\n\t\/\/ allocate the server handle\n\tif ociHandleAlloc(unsafe.Pointer(conn.environment.handle),\n\t\tC.OCI_HTYPE_SERVER,\n\t\t(*unsafe.Pointer)(unsafe.Pointer(&conn.serverHandle))); err != nil {\n\t\terr.At = \"Connect[allocate server handle]\"\n\t\treturn err\n\t}\n\n\t\/\/ attach to the server\n\t\/*\n\t if (cxBuffer_FromObject(&buffer, self->dsn,\n\t self->environment->encoding) < 0)\n\t return -1;\n\t*\/\n\n\tbuffer := make([]byte, len(conn.dsn)+1, max(16, len(conn.dsn), len(conn.username), len(conn.password))+1)\n\tcopy(buffer, []byte(conn.dsn))\n\tbuffer[len(conn.dsn)] = 0\n\t\/\/ dsn := C.CString(conn.dsn)\n\t\/\/ defer C.free(unsafe.Pointer(dsn))\n\t\/\/ Py_BEGIN_ALLOW_THREADS\n\tconn.srvMtx.Lock()\n\tstatus = C.OCIServerAttach(conn.serverHandle,\n\t\tconn.environment.errorHandle, (*C.OraText)(&buffer[0]),\n\t\tC.sb4(len(buffer)), C.OCI_DEFAULT)\n\t\/\/ Py_END_ALLOW_THREADS\n\tconn.srvMtx.Unlock()\n\t\/\/ cxBuffer_Clear(&buffer);\n\tif err = CheckStatus(status); err != nil {\n\t\terr.At = \"Connect[server attach]\"\n\t\treturn err\n\t}\n\n\t\/\/ allocate the service context handle\n\tif err = ociHandleAlloc(unsafe.Pointer(conn.environment.handle),\n\t\tC.OCI_HTYPE_SVCCTX, (*unsafe.Pointer)(unsafe.Pointer(&conn.handle))); err != nil {\n\t\terr.At = \"Connect[allocate service context handle]\"\n\t\treturn err\n\t}\n\n\t\/\/ set attribute for server handle\n\tif err = conn.AttrSet(C.OCI_ATTR_SERVER, unsafe.Pointer(conn.serverHandle)); err != nil {\n\t\terr.At = \"Connect[set server handle]\"\n\t\treturn err\n\t}\n\n\t\/\/ set the internal and external names; these are needed for global\n\t\/\/ transactions but are limited in terms of the lengths of the strings\n\tif twophase {\n\t\tcopy(buffer, []byte(\"goracle\"))\n\t\tbuffer[len(\"goracle\")] = 0\n\n\t\tif err = conn.ServerAttrSet(C.OCI_ATTR_INTERNAL_NAME,\n\t\t\tunsafe.Pointer(&buffer[0])); err != nil {\n\t\t\terr.At = \"Connect[set internal name]\"\n\t\t\treturn err\n\t\t}\n\t\tif err = conn.ServerAttrSet(C.OCI_ATTR_EXTERNAL_NAME,\n\t\t\tunsafe.Pointer(&buffer[0])); err != nil {\n\t\t\terr.At = \"Connect[set external name]\"\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ allocate the session handle\n\tif err = ociHandleAlloc(unsafe.Pointer(conn.environment.handle),\n\t\tC.OCI_HTYPE_SESSION,\n\t\t(*unsafe.Pointer)(unsafe.Pointer(&conn.sessionHandle))); err != nil {\n\t\terr.At = \"Connect[allocate session handle]\"\n\t\treturn err\n\t}\n\n\t\/\/ set user name in session handle\n\tif conn.username != \"\" {\n\t\tcopy(buffer, []byte(conn.username))\n\t\tbuffer[len(conn.username)] = 0\n\t\tcredentialType = C.OCI_CRED_RDBMS\n\n\t\tif err = conn.SessionAttrSet(C.OCI_ATTR_USERNAME,\n\t\t\tunsafe.Pointer(&buffer[0])); err != nil {\n\t\t\terr.At = \"Connect[set user name]\"\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ set password in session handle\n\tif conn.password != \"\" {\n\t\tcopy(buffer, []byte(conn.password))\n\t\tbuffer[len(conn.password)] = 0\n\t\tcredentialType = C.OCI_CRED_RDBMS\n\t\tif err = conn.SessionAttrSet(C.OCI_ATTR_PASSWORD,\n\t\t\tunsafe.Pointer(&buffer[0])); err != nil {\n\t\t\terr.At = \"Connect[set password]\"\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/*\n\t #ifdef OCI_ATTR_DRIVER_NAME\n\t status = OCIAttrSet(self->sessionHandle, OCI_HTYPE_SESSION,\n\t (text*) DRIVER_NAME, strlen(DRIVER_NAME), OCI_ATTR_DRIVER_NAME,\n\t self->environment->errorHandle);\n\t if (Environment_CheckForError(self->environment, status,\n\t \"Connection_Connect(): set driver name\") < 0)\n\t return -1;\n\n\t #endif\n\t*\/\n\n\t\/\/ set the session handle on the service context handle\n\tif err = conn.AttrSet(C.OCI_ATTR_SESSION,\n\t\tunsafe.Pointer(conn.sessionHandle)); err != nil {\n\t\terr.At = \"Connect[set session handle]\"\n\t\treturn err\n\t}\n\n\t\/*\n\t \/\/ if a new password has been specified, change it which will also\n\t \/\/ establish the session\n\t if (newPasswordObj)\n\t return Connection_ChangePassword(self, self->password, newPasswordObj);\n\t*\/\n\n\t\/\/ begin the session\n\t\/\/ Py_BEGIN_ALLOW_THREADS\n\tconn.srvMtx.Lock()\n\tstatus = C.OCISessionBegin(conn.handle, conn.environment.errorHandle,\n\t\tconn.sessionHandle, C.ub4(credentialType), C.ub4(mode))\n\t\/\/ Py_END_ALLOW_THREADS\n\tconn.srvMtx.Unlock()\n\tif err = CheckStatus(status); err != nil {\n\t\terr.At = \"Connect[begin session]\"\n\t\tconn.sessionHandle = nil\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (conn *Connection) Rollback() {\n\tC.OCITransRollback(conn.handle, conn.environment.errorHandle,\n\t\tC.OCI_DEFAULT)\n}\n\n\/\/ Deallocate the connection, disconnecting from the database if necessary.\nfunc (conn *Connection) Close() {\n\tif conn.release {\n\t\t\/\/ Py_BEGIN_ALLOW_THREADS\n\t\tconn.srvMtx.Lock()\n\t\tconn.Rollback()\n\t\tC.OCISessionRelease(conn.handle, conn.environment.errorHandle, nil,\n\t\t\t0, C.OCI_DEFAULT)\n\t\t\/\/ Py_END_ALLOW_THREADS\n\t\tconn.srvMtx.Unlock()\n\t} else if !conn.attached {\n\t\tif conn.sessionHandle != nil {\n\t\t\t\/\/ Py_BEGIN_ALLOW_THREADS\n\t\t\tconn.srvMtx.Lock()\n\t\t\tconn.Rollback()\n\t\t\tC.OCISessionEnd(conn.handle, conn.environment.errorHandle,\n\t\t\t\tconn.sessionHandle, C.OCI_DEFAULT)\n\t\t\t\/\/ Py_END_ALLOW_THREADS\n\t\t\tconn.srvMtx.Unlock()\n\t\t}\n\t\tif conn.serverHandle != nil {\n\t\t\tC.OCIServerDetach(conn.serverHandle,\n\t\t\t\tconn.environment.errorHandle, C.OCI_DEFAULT)\n\t\t}\n\t}\n}\n\nfunc max(numbers ...int) int {\n\tif len(numbers) == 0 {\n\t\treturn 0\n\t}\n\tm := numbers[0]\n\tfor _, x := range numbers {\n\t\tif m < x {\n\t\t\tm = x\n\t\t}\n\t}\n\treturn m\n}\n<commit_msg>add Close<commit_after>\/\/ Copyright 2012-2013 Tamás Gulácsi\n\/\/ See LICENSE.txt\n\/\/ Translated from cx_Oracle ((c) Anthony Tuininga) by Tamás Gulácsi\npackage goracle\n\n\/*\n#cgo CFLAGS: -I\/usr\/include\/oracle\/11.2\/client64\n#cgo LDFLAGS: -lclntsh -L\/usr\/lib\/oracle\/11.2\/client64\/lib\n\n#include <stdlib.h>\n#include <oci.h>\n\/\/#include <datetime.h>\n\/\/#include <structmember.h>\n\/\/#include <time.h>\n\/\/#include <oci.h>\n\/\/#include <orid.h>\n\/\/#include <xa.h>\n\n*\/\nimport \"C\"\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"unsafe\"\n)\n\n\/\/ MakeDSN makea a data source name given the host port and SID.\nfunc MakeDSN(host string, port int, sid, serviceName string) string {\n\tvar format, conn string\n\tif sid != \"\" {\n\t\tconn = sid\n\t\tformat = (\"(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=\" +\n\t\t\t\"(PROTOCOL=TCP)(HOST=%s)(PORT=%d)))(CONNECT_DATA=(SID=%s)))\")\n\t} else {\n\t\tconn = serviceName\n\t\tformat = (\"(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=\" +\n\t\t\t\"(PROTOCOL=TCP)(HOST=%s)(PORT=%d)))(CONNECT_DATA=\" +\n\t\t\t\"(SERVICE_NAME=%s)))\")\n\t}\n\tif format == \"\" {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(format, host, port, conn)\n}\n\n\/\/ ClientVersion returns the client's version (slice of 5 int32s)\nfunc ClientVersion() []int32 {\n\tvar majorVersion, minorVersion, updateNum, patchNum, portUpdateNum C.sword\n\n\tC.OCIClientVersion(&majorVersion, &minorVersion, &updateNum,\n\t\t&patchNum, &portUpdateNum)\n\treturn []int32{int32(majorVersion), int32(minorVersion), int32(updateNum),\n\t\tint32(patchNum), int32(portUpdateNum)}\n}\n\ntype Connection struct {\n\thandle *C.OCISvcCtx \/\/connection\n\tserverHandle *C.OCIServer \/\/server's handle\n\tsessionHandle *C.OCISession \/\/session's handle\n\tenvironment *Environment \/\/environment\n\t\/\/ sessionPool *SessionPool \/\/sessionpool\n\tusername, password, dsn, version string\n\tcommitMode int64\n\tautocommit, release, attached bool\n\tsrvMtx sync.Mutex\n}\n\n\/\/ Connection_IsConnected()\n\/\/ Determines if the connection object is connected to the database.\nfunc (conn Connection) IsConnected() bool {\n\treturn conn.handle != nil\n}\n\nfunc (conn *Connection) AttrSet(key C.ub4, value unsafe.Pointer) *Error {\n\treturn conn.environment.AttrSet(\n\t\tunsafe.Pointer(conn.handle), C.OCI_HTYPE_SVCCTX,\n\t\tkey, value)\n}\n\nfunc (conn *Connection) ServerAttrSet(key C.ub4, value unsafe.Pointer) *Error {\n\treturn conn.environment.AttrSet(\n\t\tunsafe.Pointer(conn.serverHandle), C.OCI_HTYPE_SERVER,\n\t\tkey, value)\n}\n\nfunc (conn *Connection) SessionAttrSet(key C.ub4, value unsafe.Pointer) *Error {\n\treturn conn.environment.AttrSet(\n\t\tunsafe.Pointer(conn.sessionHandle), C.OCI_HTYPE_SESSION,\n\t\tkey, value)\n}\n\n\/\/ Create a new connection object by connecting to the database.\nfunc (conn *Connection) Connect(mode int64, twophase bool \/*, newPassword string*\/) error {\n\tcredentialType := C.OCI_CRED_EXT\n\tvar (\n\t\tstatus C.sword\n\t\terr *Error\n\t)\n\n\t\/\/ allocate the server handle\n\tif ociHandleAlloc(unsafe.Pointer(conn.environment.handle),\n\t\tC.OCI_HTYPE_SERVER,\n\t\t(*unsafe.Pointer)(unsafe.Pointer(&conn.serverHandle))); err != nil {\n\t\terr.At = \"Connect[allocate server handle]\"\n\t\treturn err\n\t}\n\n\t\/\/ attach to the server\n\t\/*\n\t if (cxBuffer_FromObject(&buffer, self->dsn,\n\t self->environment->encoding) < 0)\n\t return -1;\n\t*\/\n\n\tbuffer := make([]byte, len(conn.dsn)+1, max(16, len(conn.dsn), len(conn.username), len(conn.password))+1)\n\tcopy(buffer, []byte(conn.dsn))\n\tbuffer[len(conn.dsn)] = 0\n\t\/\/ dsn := C.CString(conn.dsn)\n\t\/\/ defer C.free(unsafe.Pointer(dsn))\n\t\/\/ Py_BEGIN_ALLOW_THREADS\n\tconn.srvMtx.Lock()\n\tstatus = C.OCIServerAttach(conn.serverHandle,\n\t\tconn.environment.errorHandle, (*C.OraText)(&buffer[0]),\n\t\tC.sb4(len(buffer)), C.OCI_DEFAULT)\n\t\/\/ Py_END_ALLOW_THREADS\n\tconn.srvMtx.Unlock()\n\t\/\/ cxBuffer_Clear(&buffer);\n\tif err = CheckStatus(status); err != nil {\n\t\terr.At = \"Connect[server attach]\"\n\t\treturn err\n\t}\n\n\t\/\/ allocate the service context handle\n\tif err = ociHandleAlloc(unsafe.Pointer(conn.environment.handle),\n\t\tC.OCI_HTYPE_SVCCTX, (*unsafe.Pointer)(unsafe.Pointer(&conn.handle))); err != nil {\n\t\terr.At = \"Connect[allocate service context handle]\"\n\t\treturn err\n\t}\n\n\t\/\/ set attribute for server handle\n\tif err = conn.AttrSet(C.OCI_ATTR_SERVER, unsafe.Pointer(conn.serverHandle)); err != nil {\n\t\terr.At = \"Connect[set server handle]\"\n\t\treturn err\n\t}\n\n\t\/\/ set the internal and external names; these are needed for global\n\t\/\/ transactions but are limited in terms of the lengths of the strings\n\tif twophase {\n\t\tcopy(buffer, []byte(\"goracle\"))\n\t\tbuffer[len(\"goracle\")] = 0\n\n\t\tif err = conn.ServerAttrSet(C.OCI_ATTR_INTERNAL_NAME,\n\t\t\tunsafe.Pointer(&buffer[0])); err != nil {\n\t\t\terr.At = \"Connect[set internal name]\"\n\t\t\treturn err\n\t\t}\n\t\tif err = conn.ServerAttrSet(C.OCI_ATTR_EXTERNAL_NAME,\n\t\t\tunsafe.Pointer(&buffer[0])); err != nil {\n\t\t\terr.At = \"Connect[set external name]\"\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ allocate the session handle\n\tif err = ociHandleAlloc(unsafe.Pointer(conn.environment.handle),\n\t\tC.OCI_HTYPE_SESSION,\n\t\t(*unsafe.Pointer)(unsafe.Pointer(&conn.sessionHandle))); err != nil {\n\t\terr.At = \"Connect[allocate session handle]\"\n\t\treturn err\n\t}\n\n\t\/\/ set user name in session handle\n\tif conn.username != \"\" {\n\t\tcopy(buffer, []byte(conn.username))\n\t\tbuffer[len(conn.username)] = 0\n\t\tcredentialType = C.OCI_CRED_RDBMS\n\n\t\tif err = conn.SessionAttrSet(C.OCI_ATTR_USERNAME,\n\t\t\tunsafe.Pointer(&buffer[0])); err != nil {\n\t\t\terr.At = \"Connect[set user name]\"\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ set password in session handle\n\tif conn.password != \"\" {\n\t\tcopy(buffer, []byte(conn.password))\n\t\tbuffer[len(conn.password)] = 0\n\t\tcredentialType = C.OCI_CRED_RDBMS\n\t\tif err = conn.SessionAttrSet(C.OCI_ATTR_PASSWORD,\n\t\t\tunsafe.Pointer(&buffer[0])); err != nil {\n\t\t\terr.At = \"Connect[set password]\"\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/*\n\t #ifdef OCI_ATTR_DRIVER_NAME\n\t status = OCIAttrSet(self->sessionHandle, OCI_HTYPE_SESSION,\n\t (text*) DRIVER_NAME, strlen(DRIVER_NAME), OCI_ATTR_DRIVER_NAME,\n\t self->environment->errorHandle);\n\t if (Environment_CheckForError(self->environment, status,\n\t \"Connection_Connect(): set driver name\") < 0)\n\t return -1;\n\n\t #endif\n\t*\/\n\n\t\/\/ set the session handle on the service context handle\n\tif err = conn.AttrSet(C.OCI_ATTR_SESSION,\n\t\tunsafe.Pointer(conn.sessionHandle)); err != nil {\n\t\terr.At = \"Connect[set session handle]\"\n\t\treturn err\n\t}\n\n\t\/*\n\t \/\/ if a new password has been specified, change it which will also\n\t \/\/ establish the session\n\t if (newPasswordObj)\n\t return Connection_ChangePassword(self, self->password, newPasswordObj);\n\t*\/\n\n\t\/\/ begin the session\n\t\/\/ Py_BEGIN_ALLOW_THREADS\n\tconn.srvMtx.Lock()\n\tstatus = C.OCISessionBegin(conn.handle, conn.environment.errorHandle,\n\t\tconn.sessionHandle, C.ub4(credentialType), C.ub4(mode))\n\t\/\/ Py_END_ALLOW_THREADS\n\tconn.srvMtx.Unlock()\n\tif err = CheckStatus(status); err != nil {\n\t\terr.At = \"Connect[begin session]\"\n\t\tconn.sessionHandle = nil\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (conn *Connection) Rollback() *Error {\n\treturn CheckStatus(C.OCITransRollback(conn.handle, conn.environment.errorHandle,\n\t\tC.OCI_DEFAULT))\n}\n\n\/\/ Deallocate the connection, disconnecting from the database if necessary.\nfunc (conn *Connection) Free() {\n\tif conn.release {\n\t\t\/\/ Py_BEGIN_ALLOW_THREADS\n\t\tconn.srvMtx.Lock()\n\t\tconn.Rollback()\n\t\tC.OCISessionRelease(conn.handle, conn.environment.errorHandle, nil,\n\t\t\t0, C.OCI_DEFAULT)\n\t\t\/\/ Py_END_ALLOW_THREADS\n\t\tconn.srvMtx.Unlock()\n\t} else if !conn.attached {\n\t\tif conn.sessionHandle != nil {\n\t\t\t\/\/ Py_BEGIN_ALLOW_THREADS\n\t\t\tconn.srvMtx.Lock()\n\t\t\tconn.Rollback()\n\t\t\tC.OCISessionEnd(conn.handle, conn.environment.errorHandle,\n\t\t\t\tconn.sessionHandle, C.OCI_DEFAULT)\n\t\t\t\/\/ Py_END_ALLOW_THREADS\n\t\t\tconn.srvMtx.Unlock()\n\t\t}\n\t\tif conn.serverHandle != nil {\n\t\t\tC.OCIServerDetach(conn.serverHandle,\n\t\t\t\tconn.environment.errorHandle, C.OCI_DEFAULT)\n\t\t}\n\t}\n}\n\n\/\/ Close the connection, disconnecting from the database.\nfunc (conn *Connection) Close() (err *Error) {\n\tif !conn.IsConnected() {\n\t\treturn nil \/\/?\n\t}\n\n\t\/\/ Py_BEGIN_ALLOW_THREADS\n\tconn.srvMtx.Lock()\n\tdefer conn.srvMtx.Unlock()\n\n\t\/\/ perform a rollback\n\tif err = conn.Rollback(); err != nil {\n\t\terr.At = \"Close[rollback]\"\n\t\treturn\n\t}\n\t\/\/ Py_END_ALLOW_THREADS\n\n\t\/\/ logoff of the server\n\tif conn.sessionHandle != nil {\n\t\t\/\/ Py_BEGIN_ALLOW_THREADS\n\t\tif err = CheckStatus(C.OCISessionEnd((conn.handle),\n\t\t\tconn.environment.errorHandle, conn.sessionHandle,\n\t\t\tC.OCI_DEFAULT)); err != nil {\n\t\t\terr.At = \"Close[end session]\"\n\t\t\treturn\n\t\t}\n\t\tC.OCIHandleFree(unsafe.Pointer(conn.handle), C.OCI_HTYPE_SVCCTX)\n\t}\n\tconn.handle = nil\n\tif conn.serverHandle != nil {\n\t\tif err = CheckStatus(C.OCIServerDetach(conn.serverHandle, conn.environment.errorHandle, C.OCI_DEFAULT)); err != nil {\n\t\t\terr.At = \"Close[server detach]\"\n\t\t\treturn\n\t\t}\n\t\tconn.serverHandle = nil\n\t}\n\treturn nil\n}\n\nfunc max(numbers ...int) int {\n\tif len(numbers) == 0 {\n\t\treturn 0\n\t}\n\tm := numbers[0]\n\tfor _, x := range numbers {\n\t\tif m < x {\n\t\t\tm = x\n\t\t}\n\t}\n\treturn m\n}\n<|endoftext|>"} {"text":"<commit_before>package mqttclient\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\tmqtt \"github.com\/clearblade\/mqtt_parsing\"\n\t\"log\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst (\n\tCON_ACCEPT = iota\n\tCON_REFUSED_BAD_PROTO_VER\n\tCON_REFUSED_BAD_ID\n\tCON_REFUSED_DISALLOWED_ID\n\tCON_REFUSED_BAD_USERNAME_PASSWORD\n\tCON_REFUSED_NOT_AUTH\n)\n\n\/\/SendConnect does precisely what it says on the tin. It returns an error if there is a problem with the authentication\nfunc SendConnect(c *Client, lastWill, lastWillRetain bool, lastWillQOS int,\n\tlastWillBody, lastWillTopic string) error {\n\tvar username string\n\tvar password string\n\tvar clientid string\n\tif c.AuthToken != \"\" && c.SystemKey != \"\" {\n\t\tusername, password = c.AuthToken, c.SystemKey\n\t} else if c.AuthToken == \"\" && c.SystemKey != \"\" && c.SystemKey != \"\" {\n\t\tusername, password = c.SystemKey, c.SystemSecret\n\t} else {\n\t\terrFmtStr := \"You need either an auth token and systemkey, or a systemkey and systemsecret to connect. The client got\\n\\tToken: %s\\n\\tsystemkey: %s\\n\\tsystemsecret: %s\\n\"\n\t\treturn errors.New(fmt.Sprintf(errFmtStr, c.AuthToken, c.SystemKey, c.SystemSecret))\n\t}\n\tif c.Clientid == \"\" {\n\t\tclientid = randStr(c)\n\t} else {\n\t\tclientid = c.Clientid\n\t}\n\tvar lwTopic mqtt.TopicPath\n\tif lastWill {\n\t\tvar valid bool\n\t\tlwTopic, valid = mqtt.NewTopicPath(lastWillTopic)\n\t\tif !valid {\n\t\t\treturn fmt.Errorf(\"%S is an invalid topic\\n\", lastWillTopic)\n\t\t}\n\t}\n\n\tconnect := &mqtt.Connect{\n\t\tProtoName: \"MQTT\",\n\t\tUsernameFlag: true,\n\t\tUsername: username,\n\t\tPasswordFlag: true,\n\t\tPassword: password,\n\t\tCleanSeshFlag: true,\n\t\tKeepAlive: 60,\n\t\tClientId: clientid,\n\t\tWillRetainFlag: lastWillRetain,\n\t\tWillFlag: lastWill,\n\t\tWillTopic: lwTopic,\n\t\tWillMessage: lastWillBody,\n\t\tWillQOS: uint8(lastWillQOS),\n\t\tVersion: 0x4,\n\t}\n\treturn c.sendMessage(connect)\n}\n\n\/\/MakeMeABytePublish is a helper function to create a QOS 0, non retained MQTT publish without the problems of worrying about bad unicode\n\/\/if you're not into strings\nfunc MakeMeABytePublish(topic string, msg []byte, mid uint16) (*mqtt.Publish, error) {\n\ttp, valid := mqtt.NewTopicPath(topic)\n\tif !valid {\n\t\treturn nil, fmt.Errorf(\"invalid topic path %s\\n\", topic)\n\t}\n\treturn &mqtt.Publish{\n\t\tHeader: &mqtt.StaticHeader{\n\t\t\tQOS: 0,\n\t\t\tRetain: false,\n\t\t\tDUP: false,\n\t\t},\n\t\tPayload: msg,\n\t\tMessageId: mid,\n\t\tTopic: tp,\n\t}, nil\n}\n\n\/\/MakeMeAPublish is a helper function for creating a publish with a string payload\nfunc MakeMeAPublish(topic, msg string, mid uint16) (*mqtt.Publish, error) {\n\ttp, valid := mqtt.NewTopicPath(topic)\n\tif !valid {\n\t\treturn nil, fmt.Errorf(\"invalid topic path %s\\n\", topic)\n\t}\n\treturn &mqtt.Publish{\n\t\tHeader: &mqtt.StaticHeader{\n\t\t\tQOS: 0,\n\t\t\tRetain: false,\n\t\t\tDUP: false,\n\t\t},\n\t\tPayload: []byte(msg),\n\t\tMessageId: mid,\n\t\tTopic: tp,\n\t}, nil\n}\n\n\/\/PublishFlow allows one use their mqttclient to publish a message\nfunc PublishFlow(c *Client, p *mqtt.Publish) error {\n\t\/\/make sure we don't have to commit it to storage\n\t\/\/TODO:Handle high qoses\n\t\/\/also, TODO::FEATURE::should this block depending on qos?\n\t\/\/ switch p.Header.QOS {\n\t\/\/ case 0:\n\t\/\/ \treturn c.sendMessage(p)\n\t\/\/ }\n\tif p.Header.QOS > 0 && p.MessageId == 0 {\n\t\tp.MessageId = randMid(c)\n\t}\n\treturn c.sendMessage(p)\n}\n\n\/\/UnsubscribeFlow sends unsubscribes the client from a topic\nfunc UnsubscribeFlow(c *Client, topic string) error {\n\t\/\/TODO:BUG:add multiple unsubscribes at once\n\t\/\/make sure you were subscribed\n\tif !c.subscriptions.subscription_exists(topic) && !c.waiting_for_subscription.subscription_exists(\"UNSUBSCRIBE#\"+topic) {\n\t\treturn errors.New(\"Not subscribed or waiting to be subscribed to topic: \" + topic)\n\t}\n\n\ttp, valid := mqtt.NewTopicPath(topic)\n\tif !valid {\n\t\treturn errors.New(\"invalid wildcard topic\")\n\t}\n\t\/\/ugh gotta do work now\n\tunsub := &mqtt.Unsubscribe{\n\t\tMessageId: randMid(c),\n\t\tHeader: &mqtt.StaticHeader{\n\t\t\tQOS: 1,\n\t\t\tRetain: false,\n\t\t\tDUP: false,\n\t\t},\n\t\tTopics: []mqtt.TopicQOSTuple{\n\t\t\tmqtt.TopicQOSTuple{\n\t\t\t\tQos: 0, \/\/qos is irrelevant here\n\t\t\t\tTopic: tp,\n\t\t\t},\n\t\t},\n\t}\n\n\tschan, err := c.waiting_for_subscription.new_subscription(\"UNSUBSCRIBE#\" + topic)\n\tc.msg_store.add(unsub, unsub.MessageId)\n\terr = c.sendMessage(unsub)\n\tif err != nil {\n\t\treturn err\n\t}\n\tselect {\n\t\/\/WARNING: HERE WE ARE ASSUMING ONE UNSUBACK PER SUBSCRIPTION\n\tcase _ = <-schan:\n\t\tc.waiting_for_subscription.remove_subscription(\"UNSUBSCRIBE#\" + unsub.Topics[0].Topic.Whole)\n\t\treturn nil\n\tcase <-time.After(time.Minute * 5):\n\t\t\/\/THE ABOVE IS TOTALLY ARBITRARY\n\t\treturn errors.New(\"Did not recieve suback after five minutes\")\n\t}\n\n}\n\n\/\/SubscribeFlow allows the client to subscribe to an mqtt topic. it returns a channel that will contain\n\/\/the publishes recieved from that particular topic, or an error\n\/\/this call blocks until a suback is recieved\nfunc SubscribeFlow(c *Client, topic string, qos int) (<-chan *mqtt.Publish, error) {\n\t\/\/NOTE: we're only allowing singleton subscriptions right now\n\tif qos > 2 || qos < 0 {\n\t\treturn nil, errors.New(\"Invalid qos: \" + strconv.Itoa(qos) + \". Must be less than two.\")\n\t}\n\n\ttp, valid := mqtt.NewTopicPath(topic)\n\tif !valid {\n\t\treturn nil, errors.New(\"invalid topic path\")\n\t}\n\tsub := &mqtt.Subscribe{\n\t\tHeader: &mqtt.StaticHeader{\n\t\t\tDUP: false,\n\t\t\tRetain: false,\n\t\t\tQOS: 1,\n\t\t},\n\t\tMessageId: randMid(c),\n\t\tSubscriptions: []mqtt.TopicQOSTuple{\n\t\t\tmqtt.TopicQOSTuple{\n\t\t\t\tQos: uint8(qos),\n\t\t\t\tTopic: tp,\n\t\t\t},\n\t\t},\n\t}\n\n\tc.msg_store.add(sub, sub.MessageId)\n\tschan, err := c.waiting_for_subscription.new_subscription(topic)\n\terr = c.sendMessage(sub)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/it already exists\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/so we're going to retry a few times. it's another register in use, but eh\n\t\/\/better than blocking for eternity\n\tretries := 3\n\tfor {\n\t\tselect {\n\t\t\/\/WARNING: HERE WE ARE ASSUMING ONE SUBACK PER SUBSCRIPTION\n\t\tcase _ = <-schan:\n\t\t\tc.waiting_for_subscription.remove_subscription(topic)\n\t\t\treturn c.subscriptions.new_outgoing(topic)\n\t\tcase <-time.After(c.Timeout \/ 2):\n\t\t\t\/\/THE ABOVE IS TOTALLY ARBITRARY\n\t\t\tif retries == 0 {\n\t\t\t\treturn nil, errors.New(\"Did not recieve suback after 3 tries\")\n\t\t\t}\n\t\t\tretries--\n\t\t}\n\t\tif retries > 0 {\n\t\t\terr = c.sendMessage(sub)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"error resending subscribe\", err.Error())\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/Sends a disconnect\nfunc SendDisconnect(c *Client) error {\n\treturn c.sendMessage(&mqtt.Disconnect{})\n}\n\n\/\/allows us to get a random string\nfunc randStr(c *Client) string {\n\tvar out string\n\tconst charset string = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\torder := c.randoPerm(23)\n\t\/\/permute order of string\n\tfor _, v := range order {\n\t\tout += string(charset[v])\n\t}\n\treturn out\n}\n\nfunc randMid(c *Client) uint16 {\n\treturn uint16(c.getInt())\n}\n<commit_msg>deprecate certain forms of auth<commit_after>package mqttclient\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\tmqtt \"github.com\/clearblade\/mqtt_parsing\"\n\t\"log\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst (\n\tCON_ACCEPT = iota\n\tCON_REFUSED_BAD_PROTO_VER\n\tCON_REFUSED_BAD_ID\n\tCON_REFUSED_DISALLOWED_ID\n\tCON_REFUSED_BAD_USERNAME_PASSWORD\n\tCON_REFUSED_NOT_AUTH\n)\n\n\/\/SendConnect does precisely what it says on the tin. It returns an error if there is a problem with the authentication\nfunc SendConnect(c *Client, lastWill, lastWillRetain bool, lastWillQOS int,\n\tlastWillBody, lastWillTopic string) error {\n\tvar username string\n\tvar password string\n\tvar clientid string\n\n\tif c.AuthToken != \"\" && c.SystemKey != \"\" {\n\t\tusername, password = c.AuthToken, c.SystemKey\n\t} else {\n\n\t\treturn errors.New(fmt.Sprintf(\"Systemkey and auth token required, one of those was blank syskey: %s, token: %s\\n\", c.SystemKey, c.AuthToken))\n\t}\n\tif c.Clientid == \"\" {\n\t\tclientid = randStr(c)\n\t} else {\n\t\tclientid = c.Clientid\n\t}\n\tvar lwTopic mqtt.TopicPath\n\tif lastWill {\n\t\tvar valid bool\n\t\tlwTopic, valid = mqtt.NewTopicPath(lastWillTopic)\n\t\tif !valid {\n\t\t\treturn fmt.Errorf(\"%S is an invalid topic\\n\", lastWillTopic)\n\t\t}\n\t}\n\n\tconnect := &mqtt.Connect{\n\t\tProtoName: \"MQTT\",\n\t\tUsernameFlag: true,\n\t\tUsername: username,\n\t\tPasswordFlag: true,\n\t\tPassword: password,\n\t\tCleanSeshFlag: true,\n\t\tKeepAlive: 60,\n\t\tClientId: clientid,\n\t\tWillRetainFlag: lastWillRetain,\n\t\tWillFlag: lastWill,\n\t\tWillTopic: lwTopic,\n\t\tWillMessage: lastWillBody,\n\t\tWillQOS: uint8(lastWillQOS),\n\t\tVersion: 0x4,\n\t}\n\treturn c.sendMessage(connect)\n}\n\n\/\/MakeMeABytePublish is a helper function to create a QOS 0, non retained MQTT publish without the problems of worrying about bad unicode\n\/\/if you're not into strings\nfunc MakeMeABytePublish(topic string, msg []byte, mid uint16) (*mqtt.Publish, error) {\n\ttp, valid := mqtt.NewTopicPath(topic)\n\tif !valid {\n\t\treturn nil, fmt.Errorf(\"invalid topic path %s\\n\", topic)\n\t}\n\treturn &mqtt.Publish{\n\t\tHeader: &mqtt.StaticHeader{\n\t\t\tQOS: 0,\n\t\t\tRetain: false,\n\t\t\tDUP: false,\n\t\t},\n\t\tPayload: msg,\n\t\tMessageId: mid,\n\t\tTopic: tp,\n\t}, nil\n}\n\n\/\/MakeMeAPublish is a helper function for creating a publish with a string payload\nfunc MakeMeAPublish(topic, msg string, mid uint16) (*mqtt.Publish, error) {\n\ttp, valid := mqtt.NewTopicPath(topic)\n\tif !valid {\n\t\treturn nil, fmt.Errorf(\"invalid topic path %s\\n\", topic)\n\t}\n\treturn &mqtt.Publish{\n\t\tHeader: &mqtt.StaticHeader{\n\t\t\tQOS: 0,\n\t\t\tRetain: false,\n\t\t\tDUP: false,\n\t\t},\n\t\tPayload: []byte(msg),\n\t\tMessageId: mid,\n\t\tTopic: tp,\n\t}, nil\n}\n\n\/\/PublishFlow allows one use their mqttclient to publish a message\nfunc PublishFlow(c *Client, p *mqtt.Publish) error {\n\t\/\/make sure we don't have to commit it to storage\n\t\/\/TODO:Handle high qoses\n\t\/\/also, TODO::FEATURE::should this block depending on qos?\n\t\/\/ switch p.Header.QOS {\n\t\/\/ case 0:\n\t\/\/ \treturn c.sendMessage(p)\n\t\/\/ }\n\tif p.Header.QOS > 0 && p.MessageId == 0 {\n\t\tp.MessageId = randMid(c)\n\t}\n\treturn c.sendMessage(p)\n}\n\n\/\/UnsubscribeFlow sends unsubscribes the client from a topic\nfunc UnsubscribeFlow(c *Client, topic string) error {\n\t\/\/TODO:BUG:add multiple unsubscribes at once\n\t\/\/make sure you were subscribed\n\tif !c.subscriptions.subscription_exists(topic) && !c.waiting_for_subscription.subscription_exists(\"UNSUBSCRIBE#\"+topic) {\n\t\treturn errors.New(\"Not subscribed or waiting to be subscribed to topic: \" + topic)\n\t}\n\n\ttp, valid := mqtt.NewTopicPath(topic)\n\tif !valid {\n\t\treturn errors.New(\"invalid wildcard topic\")\n\t}\n\t\/\/ugh gotta do work now\n\tunsub := &mqtt.Unsubscribe{\n\t\tMessageId: randMid(c),\n\t\tHeader: &mqtt.StaticHeader{\n\t\t\tQOS: 1,\n\t\t\tRetain: false,\n\t\t\tDUP: false,\n\t\t},\n\t\tTopics: []mqtt.TopicQOSTuple{\n\t\t\tmqtt.TopicQOSTuple{\n\t\t\t\tQos: 0, \/\/qos is irrelevant here\n\t\t\t\tTopic: tp,\n\t\t\t},\n\t\t},\n\t}\n\n\tschan, err := c.waiting_for_subscription.new_subscription(\"UNSUBSCRIBE#\" + topic)\n\tc.msg_store.add(unsub, unsub.MessageId)\n\terr = c.sendMessage(unsub)\n\tif err != nil {\n\t\treturn err\n\t}\n\tselect {\n\t\/\/WARNING: HERE WE ARE ASSUMING ONE UNSUBACK PER SUBSCRIPTION\n\tcase _ = <-schan:\n\t\tc.waiting_for_subscription.remove_subscription(\"UNSUBSCRIBE#\" + unsub.Topics[0].Topic.Whole)\n\t\treturn nil\n\tcase <-time.After(time.Minute * 5):\n\t\t\/\/THE ABOVE IS TOTALLY ARBITRARY\n\t\treturn errors.New(\"Did not recieve suback after five minutes\")\n\t}\n\n}\n\n\/\/SubscribeFlow allows the client to subscribe to an mqtt topic. it returns a channel that will contain\n\/\/the publishes recieved from that particular topic, or an error\n\/\/this call blocks until a suback is recieved\nfunc SubscribeFlow(c *Client, topic string, qos int) (<-chan *mqtt.Publish, error) {\n\t\/\/NOTE: we're only allowing singleton subscriptions right now\n\tif qos > 2 || qos < 0 {\n\t\treturn nil, errors.New(\"Invalid qos: \" + strconv.Itoa(qos) + \". Must be less than two.\")\n\t}\n\n\ttp, valid := mqtt.NewTopicPath(topic)\n\tif !valid {\n\t\treturn nil, errors.New(\"invalid topic path\")\n\t}\n\tsub := &mqtt.Subscribe{\n\t\tHeader: &mqtt.StaticHeader{\n\t\t\tDUP: false,\n\t\t\tRetain: false,\n\t\t\tQOS: 1,\n\t\t},\n\t\tMessageId: randMid(c),\n\t\tSubscriptions: []mqtt.TopicQOSTuple{\n\t\t\tmqtt.TopicQOSTuple{\n\t\t\t\tQos: uint8(qos),\n\t\t\t\tTopic: tp,\n\t\t\t},\n\t\t},\n\t}\n\n\tc.msg_store.add(sub, sub.MessageId)\n\tschan, err := c.waiting_for_subscription.new_subscription(topic)\n\terr = c.sendMessage(sub)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/it already exists\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/so we're going to retry a few times. it's another register in use, but eh\n\t\/\/better than blocking for eternity\n\tretries := 3\n\tfor {\n\t\tselect {\n\t\t\/\/WARNING: HERE WE ARE ASSUMING ONE SUBACK PER SUBSCRIPTION\n\t\tcase _ = <-schan:\n\t\t\tc.waiting_for_subscription.remove_subscription(topic)\n\t\t\treturn c.subscriptions.new_outgoing(topic)\n\t\tcase <-time.After(c.Timeout \/ 2):\n\t\t\t\/\/THE ABOVE IS TOTALLY ARBITRARY\n\t\t\tif retries == 0 {\n\t\t\t\treturn nil, errors.New(\"Did not recieve suback after 3 tries\")\n\t\t\t}\n\t\t\tretries--\n\t\t}\n\t\tif retries > 0 {\n\t\t\terr = c.sendMessage(sub)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"error resending subscribe\", err.Error())\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/Sends a disconnect\nfunc SendDisconnect(c *Client) error {\n\treturn c.sendMessage(&mqtt.Disconnect{})\n}\n\n\/\/allows us to get a random string\nfunc randStr(c *Client) string {\n\tvar out string\n\tconst charset string = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\torder := c.randoPerm(23)\n\t\/\/permute order of string\n\tfor _, v := range order {\n\t\tout += string(charset[v])\n\t}\n\treturn out\n}\n\nfunc randMid(c *Client) uint16 {\n\treturn uint16(c.getInt())\n}\n<|endoftext|>"} {"text":"<commit_before>package gorethink\n\nimport (\n\t\"bufio\"\n\t\"code.google.com\/p\/goprotobuf\/proto\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\tp \"github.com\/dancannon\/gorethink\/ql2\"\n\t\"io\"\n\t\"net\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\ntype Connection struct {\n\t\/\/ The underlying Connection\n\tconn net.Conn\n\n\ttoken int64\n\taddress string\n\tdatabase string\n\ttimeout time.Duration\n\tauthkey string\n\ttimeFormat string\n\tclosed bool\n}\n\nfunc newConnection(args map[string]interface{}) *Connection {\n\tc := &Connection{}\n\n\tif token, ok := args[\"token\"]; ok {\n\t\tc.token = token.(int64)\n\t}\n\tif address, ok := args[\"address\"]; ok {\n\t\tc.address = address.(string)\n\t}\n\tif database, ok := args[\"database\"]; ok {\n\t\tc.database = database.(string)\n\t}\n\tif timeout, ok := args[\"timeout\"]; ok {\n\t\tc.timeout = timeout.(time.Duration)\n\t}\n\tif authkey, ok := args[\"authkey\"]; ok {\n\t\tc.authkey = authkey.(string)\n\t}\n\n\treturn c\n}\n\n\/\/ Connect opens a connection between the driver and the client\nfunc Connect(args map[string]interface{}) (*Connection, error) {\n\tc := newConnection(args)\n\terr := c.Reconnect()\n\n\treturn c, err\n}\n\n\/\/ Reconnect closes the previous connection and attempts to connect again.\nfunc (c *Connection) Reconnect() error {\n\tvar err error\n\tif err = c.Close(); err != nil {\n\t\treturn err\n\t}\n\n\tconn, err := net.Dial(\"tcp\", c.address)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := binary.Write(conn, binary.LittleEndian, p.VersionDummy_V0_2); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ authorization key\n\tif err := binary.Write(conn, binary.LittleEndian, uint32(len(c.authkey))); err != nil {\n\t\treturn err\n\t}\n\n\tif err := binary.Write(conn, binary.BigEndian, []byte(c.authkey)); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ read server response to authorization key (terminated by NUL)\n\treader := bufio.NewReader(conn)\n\tline, err := reader.ReadBytes('\\x00')\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ convert to string and remove trailing NUL byte\n\tresponse := string(line[:len(line)-1])\n\tif response != \"SUCCESS\" {\n\t\t\/\/ we failed authorization or something else terrible happened\n\t\treturn RqlDriverError{fmt.Sprintf(\"Server dropped connection with message: \\\"%s\\\"\", response)}\n\t}\n\n\tc.conn = conn\n\tc.closed = false\n\n\treturn nil\n}\n\n\/\/ Close closes the connection\nfunc (c *Connection) Close() error {\n\tif c.conn == nil || c.closed {\n\t\treturn nil\n\t}\n\n\terr := c.conn.Close()\n\tc.closed = true\n\n\treturn err\n}\n\n\/\/ Use changes the default database used\nfunc (c *Connection) Use(database string) {\n\tc.database = database\n}\n\n\/\/ getToken generates the next query token, used to number requests and match\n\/\/ responses with requests.\nfunc (c *Connection) nextToken() int64 {\n\treturn atomic.AddInt64(&c.token, 1)\n}\n\n\/\/ startQuery creates a query from the term given and sends it to the server.\n\/\/ The result from the server is returned as ResultRows\nfunc (c *Connection) startQuery(t RqlTerm, opts map[string]interface{}) (*ResultRows, error) {\n\ttoken := c.nextToken()\n\n\t\/\/ Build query tree\n\tpt := t.build()\n\n\t\/\/ Build global options\n\tglobalOpts := []*p.Query_AssocPair{}\n\tfor k, v := range opts {\n\t\tif k == \"db\" {\n\t\t\tglobalOpts = append(globalOpts, &p.Query_AssocPair{\n\t\t\t\tKey: proto.String(\"db\"),\n\t\t\t\tVal: Db(v).build(),\n\t\t\t})\n\t\t} else if k == \"use_outdated\" {\n\t\t\tglobalOpts = append(globalOpts, &p.Query_AssocPair{\n\t\t\t\tKey: proto.String(\"use_outdated\"),\n\t\t\t\tVal: Expr(v).build(),\n\t\t\t})\n\t\t} else if k == \"noreply\" {\n\t\t\tglobalOpts = append(globalOpts, &p.Query_AssocPair{\n\t\t\t\tKey: proto.String(\"noreply\"),\n\t\t\t\tVal: Expr(v).build(),\n\t\t\t})\n\t\t}\n\t}\n\n\t\/\/ Construct query\n\tquery := &p.Query{\n\t\tType: p.Query_START.Enum(),\n\t\tToken: proto.Int64(token),\n\t\tQuery: pt,\n\t\tGlobalOptargs: globalOpts,\n\t}\n\n\treturn c.send(query, t, opts)\n}\n\n\/\/ continueQuery continues a previously run query.\nfunc (c *Connection) continueQuery(q *p.Query, t RqlTerm, opts map[string]interface{}) (*ResultRows, error) {\n\tnq := &p.Query{\n\t\tType: p.Query_CONTINUE.Enum(),\n\t\tToken: q.Token,\n\t}\n\n\treturn c.send(nq, t, opts)\n}\n\n\/\/ stopQuery sends closes a query by sending Query_STOP to the server.\nfunc (c *Connection) stopQuery(q *p.Query, t RqlTerm, opts map[string]interface{}) (*ResultRows, error) {\n\tnq := &p.Query{\n\t\tType: p.Query_STOP.Enum(),\n\t\tToken: q.Token,\n\t}\n\n\treturn c.send(nq, t, opts)\n}\n\nfunc (c *Connection) send(q *p.Query, t RqlTerm, opts map[string]interface{}) (*ResultRows, error) {\n\tvar data []byte\n\tvar err error\n\n\t\/\/ Ensure that the connection is not closed\n\tif c.closed {\n\t\treturn nil, RqlDriverError{\"Connection is closed\"}\n\t}\n\n\t\/\/ Set timeout\n\tif c.timeout == 0 {\n\t\tc.conn.SetDeadline(time.Time{})\n\t} else {\n\t\tc.conn.SetDeadline(time.Now().Add(c.timeout))\n\t}\n\n\t\/\/ Send query\n\tif data, err = proto.Marshal(q); err != nil {\n\t\treturn nil, err\n\t}\n\tif err = binary.Write(c.conn, binary.LittleEndian, uint32(len(data))); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = binary.Write(c.conn, binary.BigEndian, data); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Return immediately if the noreply option was set\n\tif noreply, ok := opts[\"noreply\"]; ok && noreply.(bool) {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Read response\n\tvar messageLength uint32\n\tif err := binary.Read(c.conn, binary.LittleEndian, &messageLength); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuffer := make([]byte, messageLength)\n\t_, err = io.ReadFull(c.conn, buffer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := &p.Response{}\n\terr = proto.Unmarshal(buffer, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Ensure that this is the response we were expecting\n\tif q.GetToken() != r.GetToken() {\n\t\treturn nil, RqlDriverError{\"Unexpected response received.\"}\n\t}\n\n\t\/\/ De-construct datum and return the result\n\tswitch r.GetType() {\n\tcase p.Response_CLIENT_ERROR:\n\t\treturn nil, RqlClientError{rqlResponseError{r, t}}\n\tcase p.Response_COMPILE_ERROR:\n\t\treturn nil, RqlCompileError{rqlResponseError{r, t}}\n\tcase p.Response_RUNTIME_ERROR:\n\t\treturn nil, RqlRuntimeError{rqlResponseError{r, t}}\n\tcase p.Response_SUCCESS_PARTIAL, p.Response_SUCCESS_SEQUENCE:\n\t\treturn &ResultRows{\n\t\t\tconn: c,\n\t\t\tquery: q,\n\t\t\tterm: t,\n\t\t\topts: opts,\n\t\t\tbuffer: r.GetResponse(),\n\t\t\tend: len(r.GetResponse()),\n\t\t\ttoken: q.GetToken(),\n\t\t\tresponseType: r.GetType(),\n\t\t}, nil\n\tcase p.Response_SUCCESS_ATOM:\n\t\treturn &ResultRows{\n\t\t\tconn: c,\n\t\t\tquery: q,\n\t\t\tterm: t,\n\t\t\topts: opts,\n\t\t\tbuffer: r.GetResponse(),\n\t\t\tend: len(r.GetResponse()),\n\t\t\ttoken: q.GetToken(),\n\t\t\tresponseType: r.GetType(),\n\t\t}, nil\n\tdefault:\n\t\treturn nil, RqlDriverError{fmt.Sprintf(\"Unexpected response type received: %s\", r.GetType())}\n\t}\n}\n<commit_msg>Fixed issue with DB value in the connection not being used<commit_after>package gorethink\n\nimport (\n\t\"bufio\"\n\t\"code.google.com\/p\/goprotobuf\/proto\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\tp \"github.com\/dancannon\/gorethink\/ql2\"\n\t\"io\"\n\t\"net\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\ntype Connection struct {\n\t\/\/ The underlying Connection\n\tconn net.Conn\n\n\ttoken int64\n\taddress string\n\tdatabase string\n\ttimeout time.Duration\n\tauthkey string\n\ttimeFormat string\n\tclosed bool\n}\n\nfunc newConnection(args map[string]interface{}) *Connection {\n\tc := &Connection{}\n\n\tif token, ok := args[\"token\"]; ok {\n\t\tc.token = token.(int64)\n\t}\n\tif address, ok := args[\"address\"]; ok {\n\t\tc.address = address.(string)\n\t}\n\tif database, ok := args[\"database\"]; ok {\n\t\tc.database = database.(string)\n\t}\n\tif timeout, ok := args[\"timeout\"]; ok {\n\t\tc.timeout = timeout.(time.Duration)\n\t}\n\tif authkey, ok := args[\"authkey\"]; ok {\n\t\tc.authkey = authkey.(string)\n\t}\n\n\treturn c\n}\n\n\/\/ Connect opens a connection between the driver and the client\nfunc Connect(args map[string]interface{}) (*Connection, error) {\n\tc := newConnection(args)\n\terr := c.Reconnect()\n\n\treturn c, err\n}\n\n\/\/ Reconnect closes the previous connection and attempts to connect again.\nfunc (c *Connection) Reconnect() error {\n\tvar err error\n\tif err = c.Close(); err != nil {\n\t\treturn err\n\t}\n\n\tconn, err := net.Dial(\"tcp\", c.address)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := binary.Write(conn, binary.LittleEndian, p.VersionDummy_V0_2); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ authorization key\n\tif err := binary.Write(conn, binary.LittleEndian, uint32(len(c.authkey))); err != nil {\n\t\treturn err\n\t}\n\n\tif err := binary.Write(conn, binary.BigEndian, []byte(c.authkey)); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ read server response to authorization key (terminated by NUL)\n\treader := bufio.NewReader(conn)\n\tline, err := reader.ReadBytes('\\x00')\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ convert to string and remove trailing NUL byte\n\tresponse := string(line[:len(line)-1])\n\tif response != \"SUCCESS\" {\n\t\t\/\/ we failed authorization or something else terrible happened\n\t\treturn RqlDriverError{fmt.Sprintf(\"Server dropped connection with message: \\\"%s\\\"\", response)}\n\t}\n\n\tc.conn = conn\n\tc.closed = false\n\n\treturn nil\n}\n\n\/\/ Close closes the connection\nfunc (c *Connection) Close() error {\n\tif c.conn == nil || c.closed {\n\t\treturn nil\n\t}\n\n\terr := c.conn.Close()\n\tc.closed = true\n\n\treturn err\n}\n\n\/\/ Use changes the default database used\nfunc (c *Connection) Use(database string) {\n\tc.database = database\n}\n\n\/\/ getToken generates the next query token, used to number requests and match\n\/\/ responses with requests.\nfunc (c *Connection) nextToken() int64 {\n\treturn atomic.AddInt64(&c.token, 1)\n}\n\n\/\/ startQuery creates a query from the term given and sends it to the server.\n\/\/ The result from the server is returned as ResultRows\nfunc (c *Connection) startQuery(t RqlTerm, opts map[string]interface{}) (*ResultRows, error) {\n\ttoken := c.nextToken()\n\n\t\/\/ Build query tree\n\tpt := t.build()\n\n\t\/\/ Build global options\n\tglobalOpts := []*p.Query_AssocPair{}\n\tfor k, v := range opts {\n\t\tif k == \"db\" {\n\t\t\tglobalOpts = append(globalOpts, &p.Query_AssocPair{\n\t\t\t\tKey: proto.String(\"db\"),\n\t\t\t\tVal: Db(v).build(),\n\t\t\t})\n\t\t} else if k == \"use_outdated\" {\n\t\t\tglobalOpts = append(globalOpts, &p.Query_AssocPair{\n\t\t\t\tKey: proto.String(\"use_outdated\"),\n\t\t\t\tVal: Expr(v).build(),\n\t\t\t})\n\t\t} else if k == \"noreply\" {\n\t\t\tglobalOpts = append(globalOpts, &p.Query_AssocPair{\n\t\t\t\tKey: proto.String(\"noreply\"),\n\t\t\t\tVal: Expr(v).build(),\n\t\t\t})\n\t\t}\n\t}\n\t\/\/ If no DB option was set default to the value set in the connection\n\tif _, ok := opts[\"db\"]; !ok {\n\t\tglobalOpts = append(globalOpts, &p.Query_AssocPair{\n\t\t\tKey: proto.String(\"db\"),\n\t\t\tVal: Db(c.database).build(),\n\t\t})\n\t}\n\n\t\/\/ Construct query\n\tquery := &p.Query{\n\t\tType: p.Query_START.Enum(),\n\t\tToken: proto.Int64(token),\n\t\tQuery: pt,\n\t\tGlobalOptargs: globalOpts,\n\t}\n\n\treturn c.send(query, t, opts)\n}\n\n\/\/ continueQuery continues a previously run query.\nfunc (c *Connection) continueQuery(q *p.Query, t RqlTerm, opts map[string]interface{}) (*ResultRows, error) {\n\tnq := &p.Query{\n\t\tType: p.Query_CONTINUE.Enum(),\n\t\tToken: q.Token,\n\t}\n\n\treturn c.send(nq, t, opts)\n}\n\n\/\/ stopQuery sends closes a query by sending Query_STOP to the server.\nfunc (c *Connection) stopQuery(q *p.Query, t RqlTerm, opts map[string]interface{}) (*ResultRows, error) {\n\tnq := &p.Query{\n\t\tType: p.Query_STOP.Enum(),\n\t\tToken: q.Token,\n\t}\n\n\treturn c.send(nq, t, opts)\n}\n\nfunc (c *Connection) send(q *p.Query, t RqlTerm, opts map[string]interface{}) (*ResultRows, error) {\n\tvar data []byte\n\tvar err error\n\n\t\/\/ Ensure that the connection is not closed\n\tif c.closed {\n\t\treturn nil, RqlDriverError{\"Connection is closed\"}\n\t}\n\n\t\/\/ Set timeout\n\tif c.timeout == 0 {\n\t\tc.conn.SetDeadline(time.Time{})\n\t} else {\n\t\tc.conn.SetDeadline(time.Now().Add(c.timeout))\n\t}\n\n\t\/\/ Send query\n\tif data, err = proto.Marshal(q); err != nil {\n\t\treturn nil, err\n\t}\n\tif err = binary.Write(c.conn, binary.LittleEndian, uint32(len(data))); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = binary.Write(c.conn, binary.BigEndian, data); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Return immediately if the noreply option was set\n\tif noreply, ok := opts[\"noreply\"]; ok && noreply.(bool) {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Read response\n\tvar messageLength uint32\n\tif err := binary.Read(c.conn, binary.LittleEndian, &messageLength); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuffer := make([]byte, messageLength)\n\t_, err = io.ReadFull(c.conn, buffer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := &p.Response{}\n\terr = proto.Unmarshal(buffer, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Ensure that this is the response we were expecting\n\tif q.GetToken() != r.GetToken() {\n\t\treturn nil, RqlDriverError{\"Unexpected response received.\"}\n\t}\n\n\t\/\/ De-construct datum and return the result\n\tswitch r.GetType() {\n\tcase p.Response_CLIENT_ERROR:\n\t\treturn nil, RqlClientError{rqlResponseError{r, t}}\n\tcase p.Response_COMPILE_ERROR:\n\t\treturn nil, RqlCompileError{rqlResponseError{r, t}}\n\tcase p.Response_RUNTIME_ERROR:\n\t\treturn nil, RqlRuntimeError{rqlResponseError{r, t}}\n\tcase p.Response_SUCCESS_PARTIAL, p.Response_SUCCESS_SEQUENCE:\n\t\treturn &ResultRows{\n\t\t\tconn: c,\n\t\t\tquery: q,\n\t\t\tterm: t,\n\t\t\topts: opts,\n\t\t\tbuffer: r.GetResponse(),\n\t\t\tend: len(r.GetResponse()),\n\t\t\ttoken: q.GetToken(),\n\t\t\tresponseType: r.GetType(),\n\t\t}, nil\n\tcase p.Response_SUCCESS_ATOM:\n\t\treturn &ResultRows{\n\t\t\tconn: c,\n\t\t\tquery: q,\n\t\t\tterm: t,\n\t\t\topts: opts,\n\t\t\tbuffer: r.GetResponse(),\n\t\t\tend: len(r.GetResponse()),\n\t\t\ttoken: q.GetToken(),\n\t\t\tresponseType: r.GetType(),\n\t\t}, nil\n\tdefault:\n\t\treturn nil, RqlDriverError{fmt.Sprintf(\"Unexpected response type received: %s\", r.GetType())}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package tarantool\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/phonkee\/godsn\"\n\t\"gopkg.in\/vmihailenco\/msgpack.v2\"\n)\n\ntype Options struct {\n\tConnectTimeout time.Duration\n\tQueryTimeout time.Duration\n\tDefaultSpace string\n\tUser string\n\tPassword string\n}\n\ntype Greeting struct {\n\tVersion []byte\n\tAuth []byte\n}\n\ntype Connection struct {\n\taddr string\n\trequestID uint32\n\trequests *requestMap\n\twriteChan chan []byte \/\/ packed messages with header\n\tcloseOnce sync.Once\n\texit chan bool\n\tclosed chan bool\n\ttcpConn net.Conn\n\t\/\/ options\n\tqueryTimeout time.Duration\n\tGreeting *Greeting\n\tpackData *packData\n}\n\nfunc Connect(addr string, options *Options) (conn *Connection, err error) {\n\tdefer func() { \/\/ close opened connection if error\n\t\tif err != nil && conn != nil {\n\t\t\tif conn.tcpConn != nil {\n\t\t\t\tconn.tcpConn.Close()\n\t\t\t}\n\t\t\tconn = nil\n\t\t}\n\t}()\n\n\tconn = &Connection{\n\t\taddr: addr,\n\t\trequests: newRequestMap(),\n\t\twriteChan: make(chan []byte, 256),\n\t\texit: make(chan bool),\n\t\tclosed: make(chan bool),\n\t}\n\n\tif options == nil {\n\t\toptions = &Options{}\n\t}\n\n\topts := *options \/\/ copy to new object\n\n\tif opts.ConnectTimeout.Nanoseconds() == 0 {\n\t\topts.ConnectTimeout = time.Duration(time.Second)\n\t}\n\n\tif opts.QueryTimeout.Nanoseconds() == 0 {\n\t\topts.QueryTimeout = time.Duration(time.Second)\n\t}\n\n\tdsn, err := godsn.Parse(\"\/\/\" + addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif options.User == \"\" {\n\t\tuser := dsn.User()\n\t\tif user != nil {\n\t\t\tusername := user.Username()\n\t\t\tpass, _ := user.Password()\n\t\t\toptions.User = username\n\t\t\toptions.Password = pass\n\t\t}\n\n\t}\n\tremoteAddr := dsn.Host()\n\tpath := dsn.Path()\n\n\tif opts.DefaultSpace == \"\" {\n\t\tif len(path) > 0 {\n\t\t\tsplittedPath := strings.Split(path, \"\/\")\n\t\t\tif len(splittedPath) > 1 {\n\t\t\t\tif splittedPath[1] == \"\" {\n\t\t\t\t\treturn nil, fmt.Errorf(\"Wrong space: %s\", splittedPath[1])\n\t\t\t\t}\n\t\t\t\topts.DefaultSpace = splittedPath[1]\n\t\t\t}\n\t\t}\n\t}\n\n\td, err := newPackData(opts.DefaultSpace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconn.packData = d\n\n\tconn.queryTimeout = opts.QueryTimeout\n\n\tconnectDeadline := time.Now().Add(opts.ConnectTimeout)\n\n\tconn.tcpConn, err = net.DialTimeout(\"tcp\", remoteAddr, opts.ConnectTimeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgreeting := make([]byte, 128)\n\n\tconn.tcpConn.SetDeadline(connectDeadline)\n\t_, err = io.ReadFull(conn.tcpConn, greeting)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tconn.Greeting = &Greeting{\n\t\tVersion: greeting[:64],\n\t\tAuth: greeting[64:108],\n\t}\n\n\tif options.User != \"\" {\n\t\tvar authRaw []byte\n\t\tvar authResponse *Response\n\n\t\tauthRequestID := conn.nextID()\n\n\t\tauthRaw, err = (&Auth{\n\t\t\tUser: options.User,\n\t\t\tPassword: options.Password,\n\t\t\tGreetingAuth: conn.Greeting.Auth,\n\t\t}).Pack(authRequestID, conn.packData)\n\n\t\t_, err = conn.tcpConn.Write(authRaw)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tauthResponse, err = read(conn.tcpConn)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif authResponse.requestID != authRequestID {\n\t\t\terr = errors.New(\"Bad auth responseID\")\n\t\t\treturn\n\t\t}\n\n\t\tif authResponse.Error != nil {\n\t\t\terr = authResponse.Error\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ select space and index schema\n\trequest := func(req Query) (*Response, error) {\n\t\tvar err error\n\t\trequestID := conn.nextID()\n\t\tpackedReq, _ := (req).Pack(requestID, conn.packData)\n\n\t\t_, err = conn.tcpConn.Write(packedReq)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tres, err := read(conn.tcpConn)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif res.requestID != requestID {\n\t\t\treturn nil, errors.New(\"Bad auth responseID\")\n\t\t}\n\n\t\tif res.Error != nil {\n\t\t\treturn nil, res.Error\n\t\t}\n\n\t\treturn res, nil\n\t}\n\n\tres, err := request(&Select{\n\t\tSpace: ViewSpace,\n\t\tKey: 0,\n\t\tIterator: IterAll,\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, space := range res.Data {\n\t\tconn.packData.spaceMap[space[2].(string)] = space[0].(uint64)\n\t}\n\n\tvar defSpaceBuf bytes.Buffer\n\tdefSpaceEnc := msgpack.NewEncoder(&defSpaceBuf)\n\tconn.packData.encodeSpace(opts.DefaultSpace, defSpaceEnc)\n\tconn.packData.packedDefaultSpace = defSpaceBuf.Bytes()\n\n\tres, err = request(&Select{\n\t\tSpace: ViewIndex,\n\t\tKey: 0,\n\t\tIterator: IterAll,\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, index := range res.Data {\n\t\tspaceID := index[0].(uint64)\n\t\tindexSpaceMap, exists := conn.packData.indexMap[spaceID]\n\t\tif !exists {\n\t\t\tindexSpaceMap = make(map[string]uint64)\n\t\t\tconn.packData.indexMap[spaceID] = indexSpaceMap\n\t\t}\n\t\tindexSpaceMap[index[2].(string)] = index[1].(uint64)\n\t}\n\n\t\/\/ remove deadline\n\tconn.tcpConn.SetDeadline(time.Time{})\n\n\tgo conn.worker(conn.tcpConn)\n\n\treturn\n}\n\nfunc (conn *Connection) nextID() uint32 {\n\treturn atomic.AddUint32(&conn.requestID, 1)\n}\n\nfunc (conn *Connection) stop() {\n\tconn.closeOnce.Do(func() {\n\t\t\/\/ debug.PrintStack()\n\t\tclose(conn.exit)\n\t\tconn.tcpConn.Close()\n\t})\n}\n\nfunc (conn *Connection) Close() {\n\tconn.stop()\n\t<-conn.closed\n}\n\nfunc (conn *Connection) IsClosed() bool {\n\tselect {\n\tcase <-conn.exit:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (conn *Connection) worker(tcpConn net.Conn) {\n\n\tvar wg sync.WaitGroup\n\n\twg.Add(2)\n\n\tgo func() {\n\t\twriter(tcpConn, conn.writeChan, conn.exit)\n\t\tconn.stop()\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tconn.reader(tcpConn)\n\t\tconn.stop()\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n\n\t\/\/ send error reply to all pending requests\n\tconn.requests.CleanUp(func(req *request) {\n\t\treq.replyChan <- &Response{\n\t\t\tError: ConnectionClosedError(),\n\t\t}\n\t\tclose(req.replyChan)\n\t})\n\n\tclose(conn.closed)\n}\n\nfunc writer(tcpConn net.Conn, writeChan chan []byte, stopChan chan bool) {\n\tvar err error\n\tvar n int\n\n\tw := bufio.NewWriter(tcpConn)\n\nWRITER_LOOP:\n\tfor {\n\t\tselect {\n\t\tcase messageBody, ok := <-writeChan:\n\t\t\tif !ok {\n\t\t\t\tbreak WRITER_LOOP\n\t\t\t}\n\t\t\tn, err = w.Write(messageBody)\n\t\t\tif err != nil || n != len(messageBody) {\n\t\t\t\tbreak WRITER_LOOP\n\t\t\t}\n\t\tcase <-stopChan:\n\t\t\tbreak WRITER_LOOP\n\t\tdefault:\n\t\t\tif err = w.Flush(); err != nil {\n\t\t\t\tbreak WRITER_LOOP\n\t\t\t}\n\n\t\t\t\/\/ same without flush\n\t\t\tselect {\n\t\t\tcase messageBody, ok := <-writeChan:\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak WRITER_LOOP\n\t\t\t\t}\n\t\t\t\tn, err = w.Write(messageBody)\n\t\t\t\tif err != nil || n != len(messageBody) {\n\t\t\t\t\tbreak WRITER_LOOP\n\t\t\t\t}\n\t\t\tcase <-stopChan:\n\t\t\t\tbreak WRITER_LOOP\n\t\t\t}\n\n\t\t}\n\t}\n\tif err != nil {\n\t\t\/\/ @TODO\n\t\t\/\/ pp.Println(err)\n\t}\n}\n\nfunc (conn *Connection) reader(tcpConn net.Conn) {\n\tvar response *Response\n\tvar err error\n\tvar body []byte\n\tvar req *request\n\n\tr := bufio.NewReaderSize(tcpConn, 128*1024)\n\nREADER_LOOP:\n\tfor {\n\t\t\/\/ read raw bytes\n\t\tbody, err = readMessage(r)\n\t\tif err != nil {\n\t\t\tbreak READER_LOOP\n\t\t}\n\n\t\tresponse = &Response{\n\t\t\tbuf: bytes.NewBuffer(body),\n\t\t}\n\n\t\t\/\/ decode response header. for requestID\n\t\terr = response.decodeHeader(response.buf)\n\t\tif err != nil {\n\t\t\tbreak READER_LOOP\n\t\t}\n\n\t\treq = conn.requests.Pop(response.requestID)\n\t\tif req != nil {\n\t\t\treq.replyChan <- response\n\t\t\tclose(req.replyChan)\n\t\t}\n\t}\n}\n\nfunc packIproto(requestCode byte, requestID uint32, body []byte) []byte {\n\th := [...]byte{\n\t\t0xce, 0, 0, 0, 0, \/\/ length\n\t\t0x82, \/\/ 2 element map\n\t\tKeyCode, byte(requestCode), \/\/ request code\n\t\tKeySync, 0xce,\n\t\tbyte(requestID >> 24), byte(requestID >> 16),\n\t\tbyte(requestID >> 8), byte(requestID),\n\t}\n\n\tl := uint32(len(h) - 5 + len(body))\n\th[1] = byte(l >> 24)\n\th[2] = byte(l >> 16)\n\th[3] = byte(l >> 8)\n\th[4] = byte(l)\n\n\treturn append(h[:], body...)\n}\n<commit_msg>Support dsn's with connection schema specified<commit_after>package tarantool\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/phonkee\/godsn\"\n\t\"gopkg.in\/vmihailenco\/msgpack.v2\"\n)\n\ntype Options struct {\n\tConnectTimeout time.Duration\n\tQueryTimeout time.Duration\n\tDefaultSpace string\n\tUser string\n\tPassword string\n}\n\ntype Greeting struct {\n\tVersion []byte\n\tAuth []byte\n}\n\ntype Connection struct {\n\taddr string\n\trequestID uint32\n\trequests *requestMap\n\twriteChan chan []byte \/\/ packed messages with header\n\tcloseOnce sync.Once\n\texit chan bool\n\tclosed chan bool\n\ttcpConn net.Conn\n\t\/\/ options\n\tqueryTimeout time.Duration\n\tGreeting *Greeting\n\tpackData *packData\n}\n\nfunc Connect(addr string, options *Options) (conn *Connection, err error) {\n\tdefer func() { \/\/ close opened connection if error\n\t\tif err != nil && conn != nil {\n\t\t\tif conn.tcpConn != nil {\n\t\t\t\tconn.tcpConn.Close()\n\t\t\t}\n\t\t\tconn = nil\n\t\t}\n\t}()\n\n\tconn = &Connection{\n\t\taddr: addr,\n\t\trequests: newRequestMap(),\n\t\twriteChan: make(chan []byte, 256),\n\t\texit: make(chan bool),\n\t\tclosed: make(chan bool),\n\t}\n\n\tif options == nil {\n\t\toptions = &Options{}\n\t}\n\n\topts := *options \/\/ copy to new object\n\n\tif opts.ConnectTimeout.Nanoseconds() == 0 {\n\t\topts.ConnectTimeout = time.Duration(time.Second)\n\t}\n\n\tif opts.QueryTimeout.Nanoseconds() == 0 {\n\t\topts.QueryTimeout = time.Duration(time.Second)\n\t}\n\n\tvar dsn *godsn.DSN\n\n\t\/\/ remove schema, if present\n\tif strings.HasPrefix(addr, \"tcp:\/\/\") {\n\t\tdsn, err = godsn.Parse(strings.Split(addr, \"tcp:\")[1])\n\t} else if strings.HasPrefix(addr, \"unix:\/\/\") {\n\t\tdsn, err = godsn.Parse(strings.Split(addr, \"unix:\")[1])\n\t} else {\n\t\tdsn, err = godsn.Parse(\"\/\/\" + addr)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif options.User == \"\" {\n\t\tuser := dsn.User()\n\t\tif user != nil {\n\t\t\tusername := user.Username()\n\t\t\tpass, _ := user.Password()\n\t\t\toptions.User = username\n\t\t\toptions.Password = pass\n\t\t}\n\n\t}\n\tremoteAddr := dsn.Host()\n\tpath := dsn.Path()\n\n\tif opts.DefaultSpace == \"\" {\n\t\tif len(path) > 0 {\n\t\t\tsplittedPath := strings.Split(path, \"\/\")\n\t\t\tif len(splittedPath) > 1 {\n\t\t\t\tif splittedPath[1] == \"\" {\n\t\t\t\t\treturn nil, fmt.Errorf(\"Wrong space: %s\", splittedPath[1])\n\t\t\t\t}\n\t\t\t\topts.DefaultSpace = splittedPath[1]\n\t\t\t}\n\t\t}\n\t}\n\n\td, err := newPackData(opts.DefaultSpace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconn.packData = d\n\n\tconn.queryTimeout = opts.QueryTimeout\n\n\tconnectDeadline := time.Now().Add(opts.ConnectTimeout)\n\n\tconn.tcpConn, err = net.DialTimeout(\"tcp\", remoteAddr, opts.ConnectTimeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgreeting := make([]byte, 128)\n\n\tconn.tcpConn.SetDeadline(connectDeadline)\n\t_, err = io.ReadFull(conn.tcpConn, greeting)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tconn.Greeting = &Greeting{\n\t\tVersion: greeting[:64],\n\t\tAuth: greeting[64:108],\n\t}\n\n\tif options.User != \"\" {\n\t\tvar authRaw []byte\n\t\tvar authResponse *Response\n\n\t\tauthRequestID := conn.nextID()\n\n\t\tauthRaw, err = (&Auth{\n\t\t\tUser: options.User,\n\t\t\tPassword: options.Password,\n\t\t\tGreetingAuth: conn.Greeting.Auth,\n\t\t}).Pack(authRequestID, conn.packData)\n\n\t\t_, err = conn.tcpConn.Write(authRaw)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tauthResponse, err = read(conn.tcpConn)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif authResponse.requestID != authRequestID {\n\t\t\terr = errors.New(\"Bad auth responseID\")\n\t\t\treturn\n\t\t}\n\n\t\tif authResponse.Error != nil {\n\t\t\terr = authResponse.Error\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ select space and index schema\n\trequest := func(req Query) (*Response, error) {\n\t\tvar err error\n\t\trequestID := conn.nextID()\n\t\tpackedReq, _ := (req).Pack(requestID, conn.packData)\n\n\t\t_, err = conn.tcpConn.Write(packedReq)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tres, err := read(conn.tcpConn)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif res.requestID != requestID {\n\t\t\treturn nil, errors.New(\"Bad auth responseID\")\n\t\t}\n\n\t\tif res.Error != nil {\n\t\t\treturn nil, res.Error\n\t\t}\n\n\t\treturn res, nil\n\t}\n\n\tres, err := request(&Select{\n\t\tSpace: ViewSpace,\n\t\tKey: 0,\n\t\tIterator: IterAll,\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, space := range res.Data {\n\t\tconn.packData.spaceMap[space[2].(string)] = space[0].(uint64)\n\t}\n\n\tvar defSpaceBuf bytes.Buffer\n\tdefSpaceEnc := msgpack.NewEncoder(&defSpaceBuf)\n\tconn.packData.encodeSpace(opts.DefaultSpace, defSpaceEnc)\n\tconn.packData.packedDefaultSpace = defSpaceBuf.Bytes()\n\n\tres, err = request(&Select{\n\t\tSpace: ViewIndex,\n\t\tKey: 0,\n\t\tIterator: IterAll,\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, index := range res.Data {\n\t\tspaceID := index[0].(uint64)\n\t\tindexSpaceMap, exists := conn.packData.indexMap[spaceID]\n\t\tif !exists {\n\t\t\tindexSpaceMap = make(map[string]uint64)\n\t\t\tconn.packData.indexMap[spaceID] = indexSpaceMap\n\t\t}\n\t\tindexSpaceMap[index[2].(string)] = index[1].(uint64)\n\t}\n\n\t\/\/ remove deadline\n\tconn.tcpConn.SetDeadline(time.Time{})\n\n\tgo conn.worker(conn.tcpConn)\n\n\treturn\n}\n\nfunc (conn *Connection) nextID() uint32 {\n\treturn atomic.AddUint32(&conn.requestID, 1)\n}\n\nfunc (conn *Connection) stop() {\n\tconn.closeOnce.Do(func() {\n\t\t\/\/ debug.PrintStack()\n\t\tclose(conn.exit)\n\t\tconn.tcpConn.Close()\n\t})\n}\n\nfunc (conn *Connection) Close() {\n\tconn.stop()\n\t<-conn.closed\n}\n\nfunc (conn *Connection) IsClosed() bool {\n\tselect {\n\tcase <-conn.exit:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (conn *Connection) worker(tcpConn net.Conn) {\n\n\tvar wg sync.WaitGroup\n\n\twg.Add(2)\n\n\tgo func() {\n\t\twriter(tcpConn, conn.writeChan, conn.exit)\n\t\tconn.stop()\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tconn.reader(tcpConn)\n\t\tconn.stop()\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n\n\t\/\/ send error reply to all pending requests\n\tconn.requests.CleanUp(func(req *request) {\n\t\treq.replyChan <- &Response{\n\t\t\tError: ConnectionClosedError(),\n\t\t}\n\t\tclose(req.replyChan)\n\t})\n\n\tclose(conn.closed)\n}\n\nfunc writer(tcpConn net.Conn, writeChan chan []byte, stopChan chan bool) {\n\tvar err error\n\tvar n int\n\n\tw := bufio.NewWriter(tcpConn)\n\nWRITER_LOOP:\n\tfor {\n\t\tselect {\n\t\tcase messageBody, ok := <-writeChan:\n\t\t\tif !ok {\n\t\t\t\tbreak WRITER_LOOP\n\t\t\t}\n\t\t\tn, err = w.Write(messageBody)\n\t\t\tif err != nil || n != len(messageBody) {\n\t\t\t\tbreak WRITER_LOOP\n\t\t\t}\n\t\tcase <-stopChan:\n\t\t\tbreak WRITER_LOOP\n\t\tdefault:\n\t\t\tif err = w.Flush(); err != nil {\n\t\t\t\tbreak WRITER_LOOP\n\t\t\t}\n\n\t\t\t\/\/ same without flush\n\t\t\tselect {\n\t\t\tcase messageBody, ok := <-writeChan:\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak WRITER_LOOP\n\t\t\t\t}\n\t\t\t\tn, err = w.Write(messageBody)\n\t\t\t\tif err != nil || n != len(messageBody) {\n\t\t\t\t\tbreak WRITER_LOOP\n\t\t\t\t}\n\t\t\tcase <-stopChan:\n\t\t\t\tbreak WRITER_LOOP\n\t\t\t}\n\n\t\t}\n\t}\n\tif err != nil {\n\t\t\/\/ @TODO\n\t\t\/\/ pp.Println(err)\n\t}\n}\n\nfunc (conn *Connection) reader(tcpConn net.Conn) {\n\tvar response *Response\n\tvar err error\n\tvar body []byte\n\tvar req *request\n\n\tr := bufio.NewReaderSize(tcpConn, 128*1024)\n\nREADER_LOOP:\n\tfor {\n\t\t\/\/ read raw bytes\n\t\tbody, err = readMessage(r)\n\t\tif err != nil {\n\t\t\tbreak READER_LOOP\n\t\t}\n\n\t\tresponse = &Response{\n\t\t\tbuf: bytes.NewBuffer(body),\n\t\t}\n\n\t\t\/\/ decode response header. for requestID\n\t\terr = response.decodeHeader(response.buf)\n\t\tif err != nil {\n\t\t\tbreak READER_LOOP\n\t\t}\n\n\t\treq = conn.requests.Pop(response.requestID)\n\t\tif req != nil {\n\t\t\treq.replyChan <- response\n\t\t\tclose(req.replyChan)\n\t\t}\n\t}\n}\n\nfunc packIproto(requestCode byte, requestID uint32, body []byte) []byte {\n\th := [...]byte{\n\t\t0xce, 0, 0, 0, 0, \/\/ length\n\t\t0x82, \/\/ 2 element map\n\t\tKeyCode, byte(requestCode), \/\/ request code\n\t\tKeySync, 0xce,\n\t\tbyte(requestID >> 24), byte(requestID >> 16),\n\t\tbyte(requestID >> 8), byte(requestID),\n\t}\n\n\tl := uint32(len(h) - 5 + len(body))\n\th[1] = byte(l >> 24)\n\th[2] = byte(l >> 16)\n\th[3] = byte(l >> 8)\n\th[4] = byte(l)\n\n\treturn append(h[:], body...)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/camptocamp\/conplicity\/engines\"\n\t\"github.com\/camptocamp\/conplicity\/handler\"\n\t\"github.com\/camptocamp\/conplicity\/providers\"\n\t\"github.com\/camptocamp\/conplicity\/util\"\n\t\"github.com\/camptocamp\/conplicity\/volume\"\n)\n\nvar version = \"undefined\"\n\nfunc main() {\n\tvar err error\n\n\tc, err := handler.NewConplicity(version)\n\tutil.CheckErr(err, \"Failed to setup Conplicity handler: %v\", \"fatal\")\n\n\tlog.Infof(\"Conplity v%s starting backup...\", version)\n\n\tvols, err := c.GetVolumes()\n\tutil.CheckErr(err, \"Failed to get Docker volumes: %v\", \"fatal\")\n\n\tfor _, vol := range vols {\n\t\tmetrics, err := backupVolume(c, vol)\n\t\tutil.CheckErr(err, \"Failed to process volume \"+vol.Name+\": %v\", \"fatal\")\n\t\tc.Metrics = append(c.Metrics, metrics...)\n\t}\n\n\terr = c.PushToPrometheus()\n\tutil.CheckErr(err, \"Failed post data to Prometheus Pushgateway: %v\", \"fatal\")\n\n\tlog.Infof(\"End backup...\")\n}\n\nfunc backupVolume(c *handler.Conplicity, vol *volume.Volume) (metrics []string, err error) {\n\tp := providers.GetProvider(c, vol)\n\tlog.WithFields(log.Fields{\n\t\t\"volume\": vol.Name,\n\t\t\"provider\": p.GetName(),\n\t}).Info(\"Found data provider\")\n\terr = providers.PrepareBackup(p)\n\tutil.CheckErr(err, \"Failed to prepare backup for volume \"+vol.Name+\": %v\", \"fatal\")\n\n\te := engines.GetEngine(c, vol)\n\tlog.WithFields(log.Fields{\n\t\t\"volume\": vol.Name,\n\t\t\"engine\": e.GetName(),\n\t}).Info(\"Found backup engine\")\n\n\tmetrics, err = e.Backup()\n\tutil.CheckErr(err, \"Failed to backup volume \"+vol.Name+\": %v\", \"fatal\")\n\treturn\n}\n<commit_msg>Continue if a volume fails to backup<commit_after>package main\n\nimport (\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/camptocamp\/conplicity\/engines\"\n\t\"github.com\/camptocamp\/conplicity\/handler\"\n\t\"github.com\/camptocamp\/conplicity\/providers\"\n\t\"github.com\/camptocamp\/conplicity\/util\"\n\t\"github.com\/camptocamp\/conplicity\/volume\"\n)\n\nvar version = \"undefined\"\n\nfunc main() {\n\tvar err error\n\n\tc, err := handler.NewConplicity(version)\n\tutil.CheckErr(err, \"Failed to setup Conplicity handler: %v\", \"fatal\")\n\n\tlog.Infof(\"Conplity v%s starting backup...\", version)\n\n\tvols, err := c.GetVolumes()\n\tutil.CheckErr(err, \"Failed to get Docker volumes: %v\", \"fatal\")\n\n\tfor _, vol := range vols {\n\t\tmetrics, err := backupVolume(c, vol)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to backup volume %s: %v\", vol.Name, err)\n\t\t\tcontinue\n\t\t}\n\t\tc.Metrics = append(c.Metrics, metrics...)\n\t}\n\n\terr = c.PushToPrometheus()\n\tutil.CheckErr(err, \"Failed post data to Prometheus Pushgateway: %v\", \"fatal\")\n\n\tlog.Infof(\"End backup...\")\n}\n\nfunc backupVolume(c *handler.Conplicity, vol *volume.Volume) (metrics []string, err error) {\n\tp := providers.GetProvider(c, vol)\n\tlog.WithFields(log.Fields{\n\t\t\"volume\": vol.Name,\n\t\t\"provider\": p.GetName(),\n\t}).Info(\"Found data provider\")\n\terr = providers.PrepareBackup(p)\n\tutil.CheckErr(err, \"Failed to prepare backup for volume \"+vol.Name+\": %v\", \"fatal\")\n\n\te := engines.GetEngine(c, vol)\n\tlog.WithFields(log.Fields{\n\t\t\"volume\": vol.Name,\n\t\t\"engine\": e.GetName(),\n\t}).Info(\"Found backup engine\")\n\n\tmetrics, err = e.Backup()\n\tutil.CheckErr(err, \"Failed to backup volume \"+vol.Name+\": %v\", \"fatal\")\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package core\n\nimport (\n \"encoding\/json\"\n \"time\"\n)\n\n\/\/ EntryView instances represent one user's view of a password database entry.\n\/\/ Most fields are kept encrypted until they need to be accessed.\ntype EntryView struct {\n \/\/ The Id is the database row identifier.\n Id int64\n \/\/ CreatedAt is the time when the entry was created.\n CreatedAt time.Time\n \/\/ UpdatedAt is the time when the entry was last updated.\n UpdatedAt time.Time\n\n \/\/ The EntryId string is the unique identifier for the password entry, and ties together the individual views into the entry of each user.\n EntryId string\n\n \/\/ UserId is the foreign key of the owning user's database entry.\n UserId int64\n \/\/ The user field is the pointer to the owning user's object.\n user *User\n\n \/\/ The Permissions field is the encrypted string describing the permissions that the user has for this entry. The permissions are granted\n \/\/ by the associated authority.\n Permissions string\n \/\/ AuthorityId is the foreign key of the user granting the permissions for this entry.\n AuthorityId int64\n \/\/ The authority field is the pointer to the authorizing user's object.\n authority *User\n\n \/\/ The Group field is the encrypted name of the group to which the entry belongs.\n Group string\n \/\/ The Icon field is the encrypted image data or path to image file of the entry.\n Icon string\n \/\/ The Title field is the encrypted title of the entry.\n Title string\n\n \/\/ The Username field is the encrypted username stored in the entry.\n Username string\n \/\/ The Password field is the encrypted password stored in the entry.\n Password string\n \/\/ The Url field is the encrypted url stored in the entry.\n Url string\n \/\/ The Comment field is the encrypted comment stored in the entry.\n Comment string\n \/\/ The Expiry field is the encrypted expiry date of the password stored in the entry.\n Expiry string\n \/\/ The Extras field is extra encrypted JSON data associated with the entry.\n Extras string\n\n \/\/ The Userdata field is extra encrypted user-specific JSON data associated with the entry.\n Userdata string\n}\n\n\/\/ The getAuthority function finds the authority user model instance and sets the internal reference pointer.\nfunc (this *EntryView) getAuthority() *User {\n if this.authority == nil {\n var authority User\n DB.Model(this).Related(&authority, \"AuthorityId\")\n this.authority = &authority\n }\n return this.authority\n}\n\n\/\/ The getUser function finds the user model instance and sets the internal reference pointer.\nfunc (this *EntryView) getUser() *User {\n if this.user == nil {\n var user User\n DB.Model(this).Related(&user, \"UserId\")\n this.user = &user\n }\n return this.user\n}\n\n\/\/ ReadGroup reads the group field of the entry, provided that the user has appropriate permissions.\n\/\/ Read access to the group field is granted to users with any permissions, since this field is necessary in order to be able\n\/\/ to display the entry properly.\nfunc (this *EntryView) ReadGroup() (string, error) {\n if this.getUser().Can(\"*\", this) {\n data, err := this.getUser().Decrypt(this.Group)\n if err != nil {\n return \"\", err\n }\n return string(data), nil\n }\n return \"\", &Error{ErrPermission, this.user.Name, \"group read permission denied\"}\n}\n\n\/\/ ReadIcon reads the icon field of the entry, provided that the user has appropriate permissions.\n\/\/ Read access to the icon field is granted to users with any permissions, since this field is necessary\n\/\/ in order to be able to display the entry properly.\nfunc (this *EntryView) ReadIcon() (string, error) {\n if this.getUser().Can(\"*\", this) {\n data, err := this.getUser().Decrypt(this.Icon)\n if err != nil {\n return \"\", err\n }\n\n return string(data), nil\n }\n return \"\", &Error{ErrPermission, this.user.Name, \"icon read permission denied\"}\n}\n\n\/\/ ReadTitle reads the title field of the entry, provided that the user has appropriate permissions.\n\/\/ Read access to the title field is granted to users with any permissions, since this field is necessary\n\/\/ in order to be able to display the entry properly.\nfunc (this *EntryView) ReadTitle() (string, error) {\n if this.getUser().Can(\"*\", this) {\n data, err := this.getUser().Decrypt(this.Title)\n if err != nil {\n return \"\", err\n }\n return string(data), nil\n }\n return \"\", &Error{ErrPermission, this.user.Name, \"title read permission denied\"}\n}\n\n\/\/ ReadUsername reads the username field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) ReadUsername() (string, error) {\n if this.getUser().Can(\"r\", this) {\n data, err := this.getUser().Decrypt(this.Username)\n if err != nil {\n return \"\", err\n }\n return string(data), nil\n }\n return \"\", &Error{ErrPermission, this.user.Name, \"username read permission denied\"}\n}\n\n\/\/ ReadPassword reads the password field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) ReadPassword() (string, error) {\n if this.getUser().Can(\"r\", this) {\n data, err := this.getUser().Decrypt(this.Password)\n if err != nil {\n return \"\", err\n }\n return string(data), nil\n }\n return \"\", &Error{ErrPermission, this.user.Name, \"password read permission denied\"}\n}\n\n\/\/ ReadUrl reads the password field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) ReadUrl() (string, error) {\n if this.getUser().Can(\"r\", this) {\n data, err := this.getUser().Decrypt(this.Url)\n if err != nil {\n return \"\", err\n }\n return string(data), nil\n }\n return \"\", &Error{ErrPermission, this.user.Name, \"URL read permission denied\"}\n}\n\n\/\/ ReadComment reads the comment field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) ReadComment() (string, error) {\n if this.getUser().Can(\"r\", this) {\n data, err := this.getUser().Decrypt(this.Comment)\n if err != nil {\n return \"\", err\n }\n return string(data), nil\n }\n return \"\", &Error{ErrPermission, this.user.Name, \"comment read permission denied\"}\n}\n\n\/\/ ReadExpiry reads the expiry date field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) ReadExpiry() (time.Time, error) {\n if this.getUser().Can(\"r\", this) {\n data, err := this.getUser().Decrypt(this.Expiry)\n if err != nil {\n return time.Now(), err\n }\n\n var t time.Time\n err = t.UnmarshalText(data)\n if err != nil {\n return time.Now(), WrapError(err).SetCode(ErrOther).SetUser(this.user)\n }\n return t, nil\n }\n return time.Now(), &Error{ErrPermission, this.user.Name, \"expiry date read permission denied\"}\n}\n\n\/\/ ReadExtras reads the extras field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) ReadExtras(user string) (interface{}, error) {\n if this.getUser().Can(\"r\", this) {\n data, err := this.getUser().Decrypt(this.Extras)\n if err != nil {\n return nil, err\n }\n\n var extras interface{}\n err = json.Unmarshal(data, &extras)\n if err != nil {\n return nil, WrapError(err).SetCode(ErrOther).SetUser(this.user)\n }\n return extras, nil\n }\n return nil, &Error{ErrPermission, this.user.Name, \"comment read permission denied\"}\n}\n\n\/\/ ReadUserdata reads the userdata field of the entry.\n\/\/ No specific permissions are required since this field is only ever accessible by the user and is not propagated to others.\nfunc (this *EntryView) ReadUserdata() (interface{}, error) {\n data, err := this.getUser().Decrypt(this.Userdata)\n if err != nil {\n return nil, err\n }\n\n var userdata interface{}\n err = json.Unmarshal(data, &userdata)\n if err != nil {\n return nil, WrapError(err).SetCode(ErrOther).SetUser(this.user)\n }\n return userdata.(map[string]interface{}), nil\n}\n\n\/\/ WriteGroup writes the group field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) WriteGroup(group string) error {\n if this.getUser().Can(\"w\", this) {\n data, err := this.getUser().Encrypt([]byte(group))\n if err != nil {\n return err\n }\n this.Group = data\n return nil\n }\n return &Error{ErrPermission, this.user.Name, \"group write permission denied\"}\n}\n\n\/\/ WriteIcon writes the icon field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) WriteIcon(icon string) error {\n if this.getUser().Can(\"w\", this) {\n data, err := this.getUser().Encrypt([]byte(icon))\n if err != nil {\n return err\n }\n this.Icon = data\n return nil\n }\n return &Error{ErrPermission, this.user.Name, \"icon write permission denied\"}\n}\n\n\/\/ WriteTitle writes the title field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) WriteTitle(title string) error {\n if this.getUser().Can(\"w\", this) {\n data, err := this.getUser().Encrypt([]byte(title))\n if err != nil {\n return err\n }\n this.Title = data\n return nil\n }\n return &Error{ErrPermission, this.user.Name, \"title write permission denied\"}\n}\n\n\/\/ WriteUsername writes the username field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) WriteUsername(username string) error {\n if this.getUser().Can(\"w\", this) {\n data, err := this.getUser().Encrypt([]byte(username))\n if err != nil {\n return err\n }\n this.Username = data\n return nil\n }\n return &Error{ErrPermission, this.user.Name, \"username write permission denied\"}\n}\n\n\/\/ WritePassword writes the password field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) WritePassword(password string) error {\n if this.getUser().Can(\"w\", this) {\n data, err := this.getUser().Encrypt([]byte(password))\n if err != nil {\n return err\n }\n this.Password = data\n return nil\n }\n return &Error{ErrPermission, this.user.Name, \"password write permission denied\"}\n}\n\n\/\/ WriteUrl writes the url field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) WriteUrl(url string) error {\n if this.getUser().Can(\"w\", this) {\n data, err := this.getUser().Encrypt([]byte(url))\n if err != nil {\n return err\n }\n this.Url = data\n return nil\n }\n return &Error{ErrPermission, this.user.Name, \"url write permission denied\"}\n}\n\n\/\/ WriteComment writes the comment field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) WriteComment(comment string) error {\n if this.getUser().Can(\"w\", this) {\n data, err := this.getUser().Encrypt([]byte(comment))\n if err != nil {\n return err\n }\n this.Comment = data\n return nil\n }\n return &Error{ErrPermission, this.user.Name, \"comment write permission denied\"}\n}\n\n\/\/ WriteExpiry writes the expiry field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) WriteExpiry(expiry time.Time) error {\n if this.getUser().Can(\"w\", this) {\n data, err := this.getUser().Encrypt([]byte(expiry.Format(time.RFC3339)))\n if err != nil {\n return err\n }\n this.Expiry = data\n return nil\n }\n return &Error{ErrPermission, this.user.Name, \"expiry date write permission denied\"}\n}\n\n\/\/ WriteExtras writes the extras field of the entry, provided that the user has appropriate permissions and a valid encryption key.\nfunc (this *EntryView) WriteExtras(extras interface{}) error {\n if this.getUser().Can(\"w\", this) {\n bytes, err := json.Marshal(extras)\n if err != nil {\n return WrapError(err).SetCode(ErrOther).SetUser(this.user)\n }\n\n data, e := this.getUser().Encrypt(bytes)\n if e != nil {\n return e\n }\n this.Extras = data\n return nil\n }\n return &Error{ErrPermission, this.user.Name, \"extras write permission denied\"}\n}\n\n\/\/ WriteUserdata writes the userdata field of the entry, provided that the user a valid encryption key.\nfunc (this *EntryView) WriteUserdata(userdata interface{}) error {\n bytes, err := json.Marshal(userdata)\n if err != nil {\n return WrapError(err).SetCode(ErrOther).SetUser(this.user)\n }\n\n data, e := this.getUser().Encrypt(bytes)\n if e != nil {\n return e\n }\n this.Userdata = data\n return nil\n}\n<commit_msg>Updated error handling and user access in entries<commit_after>package core\n\nimport (\n \"encoding\/json\"\n \"time\"\n)\n\n\/\/ EntryView instances represent one user's view of a password database entry.\n\/\/ Most fields are kept encrypted until they need to be accessed.\ntype EntryView struct {\n \/\/ The Id is the database row identifier.\n Id int64\n \/\/ CreatedAt is the time when the entry was created.\n CreatedAt time.Time\n \/\/ UpdatedAt is the time when the entry was last updated.\n UpdatedAt time.Time\n\n \/\/ The EntryId string is the unique identifier for the password entry, and ties together the individual views into the entry of each user.\n EntryId string\n\n \/\/ UserId is the foreign key of the owning user's database entry.\n UserId int64\n\n \/\/ The Permissions field is the signed string describing the permissions that the user has for this entry. The permissions are granted\n \/\/ by the associated authority.\n Permissions string\n \/\/ AuthorityId is the foreign key of the user granting the permissions for this entry.\n AuthorityId int64\n\n \/\/ The Group field is the encrypted name of the group to which the entry belongs.\n Group string\n \/\/ The Icon field is the encrypted image data or path to image file of the entry.\n Icon string\n \/\/ The Title field is the encrypted title of the entry.\n Title string\n\n \/\/ The Username field is the encrypted username stored in the entry.\n Username string\n \/\/ The Password field is the encrypted password stored in the entry.\n Password string\n \/\/ The Url field is the encrypted url stored in the entry.\n Url string\n \/\/ The Comment field is the encrypted comment stored in the entry.\n Comment string\n \/\/ The Expiry field is the encrypted expiry date of the password stored in the entry.\n Expiry string\n \/\/ The Extras field is extra encrypted JSON data associated with the entry.\n Extras string\n\n \/\/ The Userdata field is extra encrypted user-specific JSON data associated with the entry.\n Userdata string\n}\n\n\/\/ The getAuthority function finds the authority user model instance and sets the internal reference pointer.\nfunc (this *EntryView) getAuthority() *User {\n authority := new(User)\n DB.Model(this).Related(authority, \"AuthorityId\")\n return authority\n}\n\n\/\/ The getUser function finds the user model instance and sets the internal reference pointer.\nfunc (this *EntryView) getUser() *User {\n user := new(User)\n DB.Model(this).Related(user, \"UserId\")\n return user\n}\n\n\/\/ ReadGroup reads the group field of the entry, provided that the user has appropriate permissions.\n\/\/ Read access to the group field is granted to users with any permissions, since this field is necessary in order to be able\n\/\/ to display the entry properly.\nfunc (this *EntryView) ReadGroup() (string, error) {\n if this.getUser().Can(\"*\", this) {\n data, err := this.getUser().Decrypt(this.Group)\n if err != nil {\n return \"\", err\n }\n return string(data), nil\n }\n return \"\", NewError(\"Group read permission denied\", this.getUser())\n}\n\n\/\/ ReadIcon reads the icon field of the entry, provided that the user has appropriate permissions.\n\/\/ Read access to the icon field is granted to users with any permissions, since this field is necessary\n\/\/ in order to be able to display the entry properly.\nfunc (this *EntryView) ReadIcon() (string, error) {\n if this.getUser().Can(\"*\", this) {\n data, err := this.getUser().Decrypt(this.Icon)\n if err != nil {\n return \"\", err\n }\n\n return string(data), nil\n }\n return \"\", NewError(\"Icon read permission denied\", this.getUser())\n}\n\n\/\/ ReadTitle reads the title field of the entry, provided that the user has appropriate permissions.\n\/\/ Read access to the title field is granted to users with any permissions, since this field is necessary\n\/\/ in order to be able to display the entry properly.\nfunc (this *EntryView) ReadTitle() (string, error) {\n if this.getUser().Can(\"*\", this) {\n data, err := this.getUser().Decrypt(this.Title)\n if err != nil {\n return \"\", err\n }\n return string(data), nil\n }\n return \"\", NewError(\"Title read permission denied\", this.getUser())\n}\n\n\/\/ ReadUsername reads the username field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) ReadUsername() (string, error) {\n if this.getUser().Can(\"r\", this) {\n data, err := this.getUser().Decrypt(this.Username)\n if err != nil {\n return \"\", err\n }\n return string(data), nil\n }\n return \"\", NewError(\"Username read permission denied\", this.getUser())\n}\n\n\/\/ ReadPassword reads the password field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) ReadPassword() (string, error) {\n if this.getUser().Can(\"r\", this) {\n data, err := this.getUser().Decrypt(this.Password)\n if err != nil {\n return \"\", err\n }\n return string(data), nil\n }\n return \"\", NewError(\"Password read permission denied\", this.getUser())\n}\n\n\/\/ ReadUrl reads the password field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) ReadUrl() (string, error) {\n if this.getUser().Can(\"r\", this) {\n data, err := this.getUser().Decrypt(this.Url)\n if err != nil {\n return \"\", err\n }\n return string(data), nil\n }\n return \"\", NewError(\"URL read permission denied\", this.getUser())\n}\n\n\/\/ ReadComment reads the comment field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) ReadComment() (string, error) {\n if this.getUser().Can(\"r\", this) {\n data, err := this.getUser().Decrypt(this.Comment)\n if err != nil {\n return \"\", err\n }\n return string(data), nil\n }\n return \"\", NewError(\"Comment read permission denied\", this.getUser())\n}\n\n\/\/ ReadExpiry reads the expiry date field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) ReadExpiry() (time.Time, error) {\n if this.getUser().Can(\"r\", this) {\n data, err := this.getUser().Decrypt(this.Expiry)\n if err != nil {\n return time.Now(), err\n }\n\n var t time.Time\n err = t.UnmarshalText(data)\n if err != nil {\n return time.Now(), NewError(err, this.getUser())\n }\n return t, nil\n }\n return time.Now(), NewError(\"Expiry date read permission denied\", this.getUser())\n}\n\n\/\/ ReadExtras reads the extras field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) ReadExtras(user string) (interface{}, error) {\n if this.getUser().Can(\"r\", this) {\n data, err := this.getUser().Decrypt(this.Extras)\n if err != nil {\n return nil, err\n }\n\n var extras interface{}\n err = json.Unmarshal(data, &extras)\n if err != nil {\n return nil, NewError(err, this.getUser())\n }\n return extras, nil\n }\n return nil, NewError(\"Comment read permission denied\", this.getUser())\n}\n\n\/\/ ReadUserdata reads the userdata field of the entry.\n\/\/ No specific permissions are required since this field is only ever accessible by the user and is not propagated to others.\nfunc (this *EntryView) ReadUserdata() (interface{}, error) {\n data, err := this.getUser().Decrypt(this.Userdata)\n if err != nil {\n return nil, err\n }\n\n var userdata interface{}\n err = json.Unmarshal(data, &userdata)\n if err != nil {\n return nil, NewError(err, this.getUser())\n }\n return userdata.(map[string]interface{}), nil\n}\n\n\/\/ WriteGroup writes the group field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) WriteGroup(group string) error {\n if this.getUser().Can(\"w\", this) {\n data, err := this.getUser().Encrypt([]byte(group))\n if err != nil {\n return err\n }\n this.Group = data\n return nil\n }\n return NewError(\"Group write permission denied\", this.getUser())\n}\n\n\/\/ WriteIcon writes the icon field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) WriteIcon(icon string) error {\n if this.getUser().Can(\"w\", this) {\n data, err := this.getUser().Encrypt([]byte(icon))\n if err != nil {\n return err\n }\n this.Icon = data\n return nil\n }\n return NewError(\"Icon write permission denied\", this.getUser())\n}\n\n\/\/ WriteTitle writes the title field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) WriteTitle(title string) error {\n if this.getUser().Can(\"w\", this) {\n data, err := this.getUser().Encrypt([]byte(title))\n if err != nil {\n return err\n }\n this.Title = data\n return nil\n }\n return NewError(\"Title write permission denied\", this.getUser())\n}\n\n\/\/ WriteUsername writes the username field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) WriteUsername(username string) error {\n if this.getUser().Can(\"w\", this) {\n data, err := this.getUser().Encrypt([]byte(username))\n if err != nil {\n return err\n }\n this.Username = data\n return nil\n }\n return NewError(\"Username write permission denied\", this.getUser())\n}\n\n\/\/ WritePassword writes the password field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) WritePassword(password string) error {\n if this.getUser().Can(\"w\", this) {\n data, err := this.getUser().Encrypt([]byte(password))\n if err != nil {\n return err\n }\n this.Password = data\n return nil\n }\n return NewError(\"Password write permission denied\", this.getUser())\n}\n\n\/\/ WriteUrl writes the url field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) WriteUrl(url string) error {\n if this.getUser().Can(\"w\", this) {\n data, err := this.getUser().Encrypt([]byte(url))\n if err != nil {\n return err\n }\n this.Url = data\n return nil\n }\n return NewError(\"URL write permission denied\", this.getUser())\n}\n\n\/\/ WriteComment writes the comment field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) WriteComment(comment string) error {\n if this.getUser().Can(\"w\", this) {\n data, err := this.getUser().Encrypt([]byte(comment))\n if err != nil {\n return err\n }\n this.Comment = data\n return nil\n }\n return NewError(\"Comment write permission denied\", this.getUser())\n}\n\n\/\/ WriteExpiry writes the expiry field of the entry, provided that the user has appropriate permissions.\nfunc (this *EntryView) WriteExpiry(expiry time.Time) error {\n if this.getUser().Can(\"w\", this) {\n data, err := this.getUser().Encrypt([]byte(expiry.Format(time.RFC3339)))\n if err != nil {\n return err\n }\n this.Expiry = data\n return nil\n }\n return NewError(\"Expiry date write permission denied\", this.getUser())\n}\n\n\/\/ WriteExtras writes the extras field of the entry, provided that the user has appropriate permissions and a valid encryption key.\nfunc (this *EntryView) WriteExtras(extras interface{}) error {\n if this.getUser().Can(\"w\", this) {\n bytes, err := json.Marshal(extras)\n if err != nil {\n return NewError(err, this.getUser())\n }\n\n data, e := this.getUser().Encrypt(bytes)\n if e != nil {\n return e\n }\n this.Extras = data\n return nil\n }\n return NewError(\"Extras write permission denied\", this.getUser())\n}\n\n\/\/ WriteUserdata writes the userdata field of the entry, provided that the user a valid encryption key.\nfunc (this *EntryView) WriteUserdata(userdata interface{}) error {\n bytes, err := json.Marshal(userdata)\n if err != nil {\n return NewError(err, this.getUser())\n }\n\n data, e := this.getUser().Encrypt(bytes)\n if e != nil {\n return e\n }\n this.Userdata = data\n return nil\n}\n<|endoftext|>"} {"text":"<commit_before>package core\n\nimport (\n\t\"github.com\/devfeel\/dotweb\/framework\/json\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\nvar GlobalState *ServerStateInfo\n\nconst (\n\tminuteTimeLayout = \"200601021504\"\n\tdateTimeLayout = \"2006-01-02 15:04:05\"\n\tdefaultReserveMinutes = 60\n\tdefaultCheckTimeMinutes = 10\n)\n\nfunc init() {\n\tGlobalState = &ServerStateInfo{\n\t\tServerStartTime: time.Now(),\n\t\tTotalRequestCount: 0,\n\t\tTotalErrorCount: 0,\n\t\tIntervalRequestData: NewItemContext(),\n\t\tDetailRequestPageData: NewItemContext(),\n\t\tIntervalErrorData: NewItemContext(),\n\t\tDetailErrorPageData: NewItemContext(),\n\t\tDetailErrorData: NewItemContext(),\n\t\tDetailHttpCodeData: NewItemContext(),\n\t\tdataChan_Request: make(chan *RequestInfo, 1000),\n\t\tdataChan_Error: make(chan *ErrorInfo, 1000),\n\t\tdataChan_HttpCode: make(chan *HttpCodeInfo, 1000),\n\t\tinfoPool: &pool{\n\t\t\trequestInfo: sync.Pool{\n\t\t\t\tNew: func() interface{} {\n\t\t\t\t\treturn &RequestInfo{}\n\t\t\t\t},\n\t\t\t},\n\t\t\terrorInfo: sync.Pool{\n\t\t\t\tNew: func() interface{} {\n\t\t\t\t\treturn &ErrorInfo{}\n\t\t\t\t},\n\t\t\t},\n\t\t\thttpCodeInfo: sync.Pool{\n\t\t\t\tNew: func() interface{} {\n\t\t\t\t\treturn &HttpCodeInfo{}\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tgo GlobalState.handleInfo()\n\tgo time.AfterFunc(time.Duration(defaultCheckTimeMinutes)*time.Minute, GlobalState.checkAndRemoveIntervalData)\n}\n\n\/\/pool定义\ntype pool struct {\n\trequestInfo sync.Pool\n\terrorInfo sync.Pool\n\thttpCodeInfo sync.Pool\n}\n\n\/\/http request count info\ntype RequestInfo struct {\n\tUrl string\n\tNum uint64\n}\n\n\/\/error count info\ntype ErrorInfo struct {\n\tUrl string\n\tErrMsg string\n\tNum uint64\n}\n\n\/\/httpcode count info\ntype HttpCodeInfo struct {\n\tUrl string\n\tCode int\n\tNum uint64\n}\n\n\/\/服务器状态信息\ntype ServerStateInfo struct {\n\t\/\/服务启动时间\n\tServerStartTime time.Time\n\t\/\/该运行期间总访问次数\n\tTotalRequestCount uint64\n\t\/\/单位时间内请求数据 - 按分钟为单位\n\tIntervalRequestData *ItemContext\n\t\/\/明细请求页面数据 - 以不带参数的访问url为key\n\tDetailRequestPageData *ItemContext\n\t\/\/该运行期间异常次数\n\tTotalErrorCount uint64\n\t\/\/单位时间内异常次数 - 按分钟为单位\n\tIntervalErrorData *ItemContext\n\t\/\/明细异常页面数据 - 以不带参数的访问url为key\n\tDetailErrorPageData *ItemContext\n\t\/\/明细异常数据 - 以不带参数的访问url为key\n\tDetailErrorData *ItemContext\n\t\/\/明细Http状态码数据 - 以HttpCode为key,例如200、500等\n\tDetailHTTPCodeData *ItemContext\n\n\tdataChan_Request chan *RequestInfo\n\tdataChan_Error chan *ErrorInfo\n\tdataChan_HttpCode chan *HttpCodeInfo\n\t\/\/对象池\n\tinfoPool *pool\n}\n\n\/\/ShowHtmlData show server state data html-string format\nfunc (state *ServerStateInfo) ShowHtmlData() string {\n\tdata := \"<html><body><div>\"\n\tdata += \"ServerStartTime : \" + state.ServerStartTime.Format(dateTimeLayout)\n\tdata += \"<br>\"\n\tdata += \"TotalRequestCount : \" + strconv.FormatUint(state.TotalRequestCount, 10)\n\tdata += \"<br>\"\n\tdata += \"TotalErrorCount : \" + strconv.FormatUint(state.TotalErrorCount, 10)\n\tdata += \"<br>\"\n\tstate.IntervalRequestData.RLock()\n\tdata += \"IntervalRequestData : \" + jsonutil.GetJsonString(state.IntervalRequestData.GetCurrentMap())\n\tstate.IntervalRequestData.RUnlock()\n\tdata += \"<br>\"\n\tstate.DetailRequestPageData.RLock()\n\tdata += \"DetailRequestPageData : \" + jsonutil.GetJsonString(state.DetailRequestPageData.GetCurrentMap())\n\tstate.DetailRequestPageData.RUnlock()\n\tdata += \"<br>\"\n\tstate.IntervalErrorData.RLock()\n\tdata += \"IntervalErrorData : \" + jsonutil.GetJsonString(state.IntervalErrorData.GetCurrentMap())\n\tstate.IntervalErrorData.RUnlock()\n\tdata += \"<br>\"\n\tstate.DetailErrorPageData.RLock()\n\tdata += \"DetailErrorPageData : \" + jsonutil.GetJsonString(state.DetailErrorPageData.GetCurrentMap())\n\tstate.DetailErrorPageData.RUnlock()\n\tdata += \"<br>\"\n\tstate.DetailErrorData.RLock()\n\tdata += \"DetailErrorData : \" + jsonutil.GetJsonString(state.DetailErrorData.GetCurrentMap())\n\tstate.DetailErrorData.RUnlock()\n\tdata += \"<br>\"\n\tstate.DetailHTTPCodeData.RLock()\n\tdata += \"DetailHttpCodeData : \" + jsonutil.GetJsonString(state.DetailHTTPCodeData.GetCurrentMap())\n\tstate.DetailHTTPCodeData.RUnlock()\n\tdata += \"<\/div><\/body><\/html>\"\n\treturn data\n}\n\n\/\/QueryIntervalRequestData query request count by query time\nfunc (state *ServerStateInfo) QueryIntervalRequestData(queryKey string) uint64 {\n\treturn state.IntervalRequestData.GetUInt64(queryKey)\n}\n\n\/\/QueryIntervalErrorData query error count by query time\nfunc (state *ServerStateInfo) QueryIntervalErrorData(queryKey string) uint64 {\n\treturn state.IntervalErrorData.GetUInt64(queryKey)\n}\n\n\/\/AddRequestCount 增加请求数\nfunc (state *ServerStateInfo) AddRequestCount(page string, num uint64) uint64 {\n\tif strings.Index(page, \"\/dotweb\/\") != 0 {\n\t\tatomic.AddUint64(&state.TotalRequestCount, num)\n\t\tstate.addRequestData(page, num)\n\t}\n\treturn state.TotalRequestCount\n}\n\n\/\/AddHttpCodeCount 增加Http状态码数据\nfunc (state *ServerStateInfo) AddTTPCodeCount(page string, code int, num uint64) uint64 {\n\tif strings.Index(page, \"\/dotweb\/\") != 0 {\n\t\tstate.addHTTPCodeData(page, code, num)\n\t}\n\treturn state.TotalErrorCount\n}\n\n\/\/AddErrorCount 增加错误数\nfunc (state *ServerStateInfo) AddErrorCount(page string, err error, num uint64) uint64 {\n\tatomic.AddUint64(&state.TotalErrorCount, num)\n\tstate.addErrorData(page, err, num)\n\treturn state.TotalErrorCount\n}\n\nfunc (state *ServerStateInfo) addRequestData(page string, num uint64) {\n\t\/\/get from pool\n\tinfo := state.infoPool.requestInfo.Get().(*RequestInfo)\n\tinfo.Url = page\n\tinfo.Num = num\n\tstate.dataChan_Request <- info\n}\n\nfunc (state *ServerStateInfo) addErrorData(page string, err error, num uint64) {\n\t\/\/get from pool\n\tinfo := state.infoPool.errorInfo.Get().(*ErrorInfo)\n\tinfo.Url = page\n\tinfo.ErrMsg = err.Error()\n\tinfo.Num = num\n\tstate.dataChan_Error <- info\n}\n\nfunc (state *ServerStateInfo) addHTTPCodeData(page string, code int, num uint64) {\n\t\/\/get from pool\n\tinfo := state.infoPool.httpCodeInfo.Get().(*HttpCodeInfo)\n\tinfo.Url = page\n\tinfo.Code = code\n\tinfo.Num = num\n\tstate.dataChan_HttpCode <- info\n}\n\n\/\/处理日志内部函数\nfunc (state *ServerStateInfo) handleInfo() {\n\tfor {\n\t\tselect {\n\t\tcase info := <-state.dataChan_Request:\n\t\t\t{\n\t\t\t\t\/\/set detail page data\n\t\t\t\tkey := strings.ToLower(info.Url)\n\t\t\t\tval := state.DetailRequestPageData.GetUInt64(key)\n\t\t\t\tstate.DetailRequestPageData.Set(key, val+info.Num)\n\n\t\t\t\t\/\/set interval data\n\t\t\t\tkey = time.Now().Format(minuteTimeLayout)\n\t\t\t\tval = state.IntervalRequestData.GetUInt64(key)\n\t\t\t\tstate.IntervalRequestData.Set(key, val+info.Num)\n\n\t\t\t\t\/\/put info obj\n\t\t\t\tstate.infoPool.requestInfo.Put(info)\n\t\t\t}\n\t\tcase info := <-state.dataChan_Error:\n\t\t\t{\n\t\t\t\t\/\/set detail error page data\n\t\t\t\tkey := strings.ToLower(info.Url)\n\t\t\t\tval := state.DetailErrorPageData.GetUInt64(key)\n\t\t\t\tstate.DetailErrorPageData.Set(key, val+info.Num)\n\n\t\t\t\t\/\/set detail error data\n\t\t\t\tkey = info.ErrMsg\n\t\t\t\tval = state.DetailErrorData.GetUInt64(key)\n\t\t\t\tstate.DetailErrorData.Set(key, val+info.Num)\n\n\t\t\t\t\/\/set interval data\n\t\t\t\tkey = time.Now().Format(minuteTimeLayout)\n\t\t\t\tval = state.IntervalErrorData.GetUInt64(key)\n\t\t\t\tstate.IntervalErrorData.Set(key, val+info.Num)\n\n\t\t\t\t\/\/put info obj\n\t\t\t\tstate.infoPool.errorInfo.Put(info)\n\t\t\t}\n\t\tcase info := <-state.dataChan_HttpCode:\n\t\t\t{\n\t\t\t\t\/\/set detail error page data\n\t\t\t\tkey := strconv.Itoa(info.Code)\n\t\t\t\tval := state.DetailHTTPCodeData.GetUInt64(key)\n\t\t\t\tstate.DetailHTTPCodeData.Set(key, val+info.Num)\n\n\t\t\t\t\/\/put info obj\n\t\t\t\tstate.infoPool.httpCodeInfo.Put(info)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/check and remove need to remove interval data with request and error\nfunc (state *ServerStateInfo) checkAndRemoveIntervalData() {\n\tvar needRemoveKey []string\n\n\t\/\/check IntervalRequestData\n\tstate.IntervalRequestData.RLock()\n\tif state.IntervalRequestData.Len() > 10 {\n\t\tfor k := range state.IntervalRequestData.GetCurrentMap() {\n\t\t\tif t, err := time.Parse(minuteTimeLayout, k); err != nil {\n\t\t\t\tneedRemoveKey = append(needRemoveKey, k)\n\t\t\t} else {\n\t\t\t\tif time.Now().Sub(t) > defaultReserveMinutes {\n\t\t\t\t\tneedRemoveKey = append(needRemoveKey, k)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tstate.IntervalRequestData.RUnlock()\n\t\/\/remove keys\n\tfor _, v := range needRemoveKey {\n\t\tstate.IntervalRequestData.Remove(v)\n\t}\n\n\t\/\/check IntervalErrorData\n\tneedRemoveKey = []string{}\n\tstate.IntervalErrorData.RLock()\n\tif state.IntervalErrorData.Len() > 10 {\n\t\tfor k := range state.IntervalErrorData.GetCurrentMap() {\n\t\t\tif t, err := time.Parse(minuteTimeLayout, k); err != nil {\n\t\t\t\tneedRemoveKey = append(needRemoveKey, k)\n\t\t\t} else {\n\t\t\t\tif time.Now().Sub(t) > defaultReserveMinutes {\n\t\t\t\t\tneedRemoveKey = append(needRemoveKey, k)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\/\/remove keys\n\tfor _, v := range needRemoveKey {\n\t\tstate.IntervalErrorData.Remove(v)\n\t}\n\n\ttime.AfterFunc(time.Duration(defaultCheckTimeMinutes)*time.Minute, state.checkAndRemoveIntervalData)\n}\n<commit_msg>#### Version 1.1 * 主要新增集成基础统计数据 * 【新增】dotweb\/state接口,提供基础统计数据,主要数据说明: * 1、ServerStartTime 服务启动时间 * 2、TotalRequestCount 服务启动以来累计请求数(排除了\"\/dotweb\/\"下系统自有页面的访问数) * 3、TotalErrorCount 服务启动以来累计错误数 * 4、IntervalRequestData 按1分钟为间隔,默认保存最近60分钟,每分钟的请求数(排除了\"\/dotweb\/\"下系统自有页面的访问数) * 5、DetailRequestPageData 服务启动以来,每个页面的累计请求数(排除了\"\/dotweb\/\"下系统自有页面的访问数) * 6、IntervalErrorData 按1分钟为间隔,默认保存最近60分钟,每分钟的错误数 * 7、DetailErrorPageData 服务启动以来,每个页面的累计错误数 * 8、DetailErrorData 服务启动以来,每个异常的累计数 * 9、DetailHttpCodeData 服务启动以来,每个Http状态码的累计数 * 10、可通过 {host}\/dotweb\/state 获取数据 * 【新增】dotweb\/state\/interval接口,提供按分钟级的基础数据查询 * 主要数据说明: * 1、Time 表示查询时间的字符串,最小单位为分钟,例如:201709251200 * 2、RequestCount 单位时间内累计请求数(排除了\"\/dotweb\/\"下系统自有页面的访问数) * 3、ErrorCount 单位时间内累计错误数 * 2017-09-25 13:00<commit_after>package core\n\nimport (\n\t\"github.com\/devfeel\/dotweb\/framework\/json\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\nvar GlobalState *ServerStateInfo\n\nconst (\n\tminuteTimeLayout = \"200601021504\"\n\tdateTimeLayout = \"2006-01-02 15:04:05\"\n\tdefaultReserveMinutes = 60\n\tdefaultCheckTimeMinutes = 10\n)\n\nfunc init() {\n\tGlobalState = &ServerStateInfo{\n\t\tServerStartTime: time.Now(),\n\t\tTotalRequestCount: 0,\n\t\tTotalErrorCount: 0,\n\t\tIntervalRequestData: NewItemContext(),\n\t\tDetailRequestPageData: NewItemContext(),\n\t\tIntervalErrorData: NewItemContext(),\n\t\tDetailErrorPageData: NewItemContext(),\n\t\tDetailErrorData: NewItemContext(),\n\t\tDetailHTTPCodeData: NewItemContext(),\n\t\tdataChan_Request: make(chan *RequestInfo, 1000),\n\t\tdataChan_Error: make(chan *ErrorInfo, 1000),\n\t\tdataChan_HttpCode: make(chan *HttpCodeInfo, 1000),\n\t\tinfoPool: &pool{\n\t\t\trequestInfo: sync.Pool{\n\t\t\t\tNew: func() interface{} {\n\t\t\t\t\treturn &RequestInfo{}\n\t\t\t\t},\n\t\t\t},\n\t\t\terrorInfo: sync.Pool{\n\t\t\t\tNew: func() interface{} {\n\t\t\t\t\treturn &ErrorInfo{}\n\t\t\t\t},\n\t\t\t},\n\t\t\thttpCodeInfo: sync.Pool{\n\t\t\t\tNew: func() interface{} {\n\t\t\t\t\treturn &HttpCodeInfo{}\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tgo GlobalState.handleInfo()\n\tgo time.AfterFunc(time.Duration(defaultCheckTimeMinutes)*time.Minute, GlobalState.checkAndRemoveIntervalData)\n}\n\n\/\/pool定义\ntype pool struct {\n\trequestInfo sync.Pool\n\terrorInfo sync.Pool\n\thttpCodeInfo sync.Pool\n}\n\n\/\/http request count info\ntype RequestInfo struct {\n\tUrl string\n\tNum uint64\n}\n\n\/\/error count info\ntype ErrorInfo struct {\n\tUrl string\n\tErrMsg string\n\tNum uint64\n}\n\n\/\/httpcode count info\ntype HttpCodeInfo struct {\n\tUrl string\n\tCode int\n\tNum uint64\n}\n\n\/\/服务器状态信息\ntype ServerStateInfo struct {\n\t\/\/服务启动时间\n\tServerStartTime time.Time\n\t\/\/该运行期间总访问次数\n\tTotalRequestCount uint64\n\t\/\/单位时间内请求数据 - 按分钟为单位\n\tIntervalRequestData *ItemContext\n\t\/\/明细请求页面数据 - 以不带参数的访问url为key\n\tDetailRequestPageData *ItemContext\n\t\/\/该运行期间异常次数\n\tTotalErrorCount uint64\n\t\/\/单位时间内异常次数 - 按分钟为单位\n\tIntervalErrorData *ItemContext\n\t\/\/明细异常页面数据 - 以不带参数的访问url为key\n\tDetailErrorPageData *ItemContext\n\t\/\/明细异常数据 - 以不带参数的访问url为key\n\tDetailErrorData *ItemContext\n\t\/\/明细Http状态码数据 - 以HttpCode为key,例如200、500等\n\tDetailHTTPCodeData *ItemContext\n\n\tdataChan_Request chan *RequestInfo\n\tdataChan_Error chan *ErrorInfo\n\tdataChan_HttpCode chan *HttpCodeInfo\n\t\/\/对象池\n\tinfoPool *pool\n}\n\n\/\/ShowHtmlData show server state data html-string format\nfunc (state *ServerStateInfo) ShowHtmlData() string {\n\tdata := \"<html><body><div>\"\n\tdata += \"ServerStartTime : \" + state.ServerStartTime.Format(dateTimeLayout)\n\tdata += \"<br>\"\n\tdata += \"TotalRequestCount : \" + strconv.FormatUint(state.TotalRequestCount, 10)\n\tdata += \"<br>\"\n\tdata += \"TotalErrorCount : \" + strconv.FormatUint(state.TotalErrorCount, 10)\n\tdata += \"<br>\"\n\tstate.IntervalRequestData.RLock()\n\tdata += \"IntervalRequestData : \" + jsonutil.GetJsonString(state.IntervalRequestData.GetCurrentMap())\n\tstate.IntervalRequestData.RUnlock()\n\tdata += \"<br>\"\n\tstate.DetailRequestPageData.RLock()\n\tdata += \"DetailRequestPageData : \" + jsonutil.GetJsonString(state.DetailRequestPageData.GetCurrentMap())\n\tstate.DetailRequestPageData.RUnlock()\n\tdata += \"<br>\"\n\tstate.IntervalErrorData.RLock()\n\tdata += \"IntervalErrorData : \" + jsonutil.GetJsonString(state.IntervalErrorData.GetCurrentMap())\n\tstate.IntervalErrorData.RUnlock()\n\tdata += \"<br>\"\n\tstate.DetailErrorPageData.RLock()\n\tdata += \"DetailErrorPageData : \" + jsonutil.GetJsonString(state.DetailErrorPageData.GetCurrentMap())\n\tstate.DetailErrorPageData.RUnlock()\n\tdata += \"<br>\"\n\tstate.DetailErrorData.RLock()\n\tdata += \"DetailErrorData : \" + jsonutil.GetJsonString(state.DetailErrorData.GetCurrentMap())\n\tstate.DetailErrorData.RUnlock()\n\tdata += \"<br>\"\n\tstate.DetailHTTPCodeData.RLock()\n\tdata += \"DetailHttpCodeData : \" + jsonutil.GetJsonString(state.DetailHTTPCodeData.GetCurrentMap())\n\tstate.DetailHTTPCodeData.RUnlock()\n\tdata += \"<\/div><\/body><\/html>\"\n\treturn data\n}\n\n\/\/QueryIntervalRequestData query request count by query time\nfunc (state *ServerStateInfo) QueryIntervalRequestData(queryKey string) uint64 {\n\treturn state.IntervalRequestData.GetUInt64(queryKey)\n}\n\n\/\/QueryIntervalErrorData query error count by query time\nfunc (state *ServerStateInfo) QueryIntervalErrorData(queryKey string) uint64 {\n\treturn state.IntervalErrorData.GetUInt64(queryKey)\n}\n\n\/\/AddRequestCount 增加请求数\nfunc (state *ServerStateInfo) AddRequestCount(page string, num uint64) uint64 {\n\tif strings.Index(page, \"\/dotweb\/\") != 0 {\n\t\tatomic.AddUint64(&state.TotalRequestCount, num)\n\t\tstate.addRequestData(page, num)\n\t}\n\treturn state.TotalRequestCount\n}\n\n\/\/AddHttpCodeCount 增加Http状态码数据\nfunc (state *ServerStateInfo) AddTTPCodeCount(page string, code int, num uint64) uint64 {\n\tif strings.Index(page, \"\/dotweb\/\") != 0 {\n\t\tstate.addHTTPCodeData(page, code, num)\n\t}\n\treturn state.TotalErrorCount\n}\n\n\/\/AddErrorCount 增加错误数\nfunc (state *ServerStateInfo) AddErrorCount(page string, err error, num uint64) uint64 {\n\tatomic.AddUint64(&state.TotalErrorCount, num)\n\tstate.addErrorData(page, err, num)\n\treturn state.TotalErrorCount\n}\n\nfunc (state *ServerStateInfo) addRequestData(page string, num uint64) {\n\t\/\/get from pool\n\tinfo := state.infoPool.requestInfo.Get().(*RequestInfo)\n\tinfo.Url = page\n\tinfo.Num = num\n\tstate.dataChan_Request <- info\n}\n\nfunc (state *ServerStateInfo) addErrorData(page string, err error, num uint64) {\n\t\/\/get from pool\n\tinfo := state.infoPool.errorInfo.Get().(*ErrorInfo)\n\tinfo.Url = page\n\tinfo.ErrMsg = err.Error()\n\tinfo.Num = num\n\tstate.dataChan_Error <- info\n}\n\nfunc (state *ServerStateInfo) addHTTPCodeData(page string, code int, num uint64) {\n\t\/\/get from pool\n\tinfo := state.infoPool.httpCodeInfo.Get().(*HttpCodeInfo)\n\tinfo.Url = page\n\tinfo.Code = code\n\tinfo.Num = num\n\tstate.dataChan_HttpCode <- info\n}\n\n\/\/处理日志内部函数\nfunc (state *ServerStateInfo) handleInfo() {\n\tfor {\n\t\tselect {\n\t\tcase info := <-state.dataChan_Request:\n\t\t\t{\n\t\t\t\t\/\/set detail page data\n\t\t\t\tkey := strings.ToLower(info.Url)\n\t\t\t\tval := state.DetailRequestPageData.GetUInt64(key)\n\t\t\t\tstate.DetailRequestPageData.Set(key, val+info.Num)\n\n\t\t\t\t\/\/set interval data\n\t\t\t\tkey = time.Now().Format(minuteTimeLayout)\n\t\t\t\tval = state.IntervalRequestData.GetUInt64(key)\n\t\t\t\tstate.IntervalRequestData.Set(key, val+info.Num)\n\n\t\t\t\t\/\/put info obj\n\t\t\t\tstate.infoPool.requestInfo.Put(info)\n\t\t\t}\n\t\tcase info := <-state.dataChan_Error:\n\t\t\t{\n\t\t\t\t\/\/set detail error page data\n\t\t\t\tkey := strings.ToLower(info.Url)\n\t\t\t\tval := state.DetailErrorPageData.GetUInt64(key)\n\t\t\t\tstate.DetailErrorPageData.Set(key, val+info.Num)\n\n\t\t\t\t\/\/set detail error data\n\t\t\t\tkey = info.ErrMsg\n\t\t\t\tval = state.DetailErrorData.GetUInt64(key)\n\t\t\t\tstate.DetailErrorData.Set(key, val+info.Num)\n\n\t\t\t\t\/\/set interval data\n\t\t\t\tkey = time.Now().Format(minuteTimeLayout)\n\t\t\t\tval = state.IntervalErrorData.GetUInt64(key)\n\t\t\t\tstate.IntervalErrorData.Set(key, val+info.Num)\n\n\t\t\t\t\/\/put info obj\n\t\t\t\tstate.infoPool.errorInfo.Put(info)\n\t\t\t}\n\t\tcase info := <-state.dataChan_HttpCode:\n\t\t\t{\n\t\t\t\t\/\/set detail error page data\n\t\t\t\tkey := strconv.Itoa(info.Code)\n\t\t\t\tval := state.DetailHTTPCodeData.GetUInt64(key)\n\t\t\t\tstate.DetailHTTPCodeData.Set(key, val+info.Num)\n\n\t\t\t\t\/\/put info obj\n\t\t\t\tstate.infoPool.httpCodeInfo.Put(info)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/check and remove need to remove interval data with request and error\nfunc (state *ServerStateInfo) checkAndRemoveIntervalData() {\n\tvar needRemoveKey []string\n\n\t\/\/check IntervalRequestData\n\tstate.IntervalRequestData.RLock()\n\tif state.IntervalRequestData.Len() > 10 {\n\t\tfor k := range state.IntervalRequestData.GetCurrentMap() {\n\t\t\tif t, err := time.Parse(minuteTimeLayout, k); err != nil {\n\t\t\t\tneedRemoveKey = append(needRemoveKey, k)\n\t\t\t} else {\n\t\t\t\tif time.Now().Sub(t) > defaultReserveMinutes {\n\t\t\t\t\tneedRemoveKey = append(needRemoveKey, k)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tstate.IntervalRequestData.RUnlock()\n\t\/\/remove keys\n\tfor _, v := range needRemoveKey {\n\t\tstate.IntervalRequestData.Remove(v)\n\t}\n\n\t\/\/check IntervalErrorData\n\tneedRemoveKey = []string{}\n\tstate.IntervalErrorData.RLock()\n\tif state.IntervalErrorData.Len() > 10 {\n\t\tfor k := range state.IntervalErrorData.GetCurrentMap() {\n\t\t\tif t, err := time.Parse(minuteTimeLayout, k); err != nil {\n\t\t\t\tneedRemoveKey = append(needRemoveKey, k)\n\t\t\t} else {\n\t\t\t\tif time.Now().Sub(t) > defaultReserveMinutes {\n\t\t\t\t\tneedRemoveKey = append(needRemoveKey, k)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\/\/remove keys\n\tfor _, v := range needRemoveKey {\n\t\tstate.IntervalErrorData.Remove(v)\n\t}\n\n\ttime.AfterFunc(time.Duration(defaultCheckTimeMinutes)*time.Minute, state.checkAndRemoveIntervalData)\n}\n<|endoftext|>"} {"text":"<commit_before>package program\n\n\/\/ Version can be requested through the command line with:\n\/\/\n\/\/ c2go -v\n\/\/\n\/\/ See https:\/\/github.com\/elliotchance\/c2go\/wiki\/Release-Process\nconst Version = \"v0.25.3 Dubnium 2018-11-11\"\n<commit_msg>Bump version: v0.25.4 Dubnium 2018-11-11<commit_after>package program\n\n\/\/ Version can be requested through the command line with:\n\/\/\n\/\/ c2go -v\n\/\/\n\/\/ See https:\/\/github.com\/elliotchance\/c2go\/wiki\/Release-Process\nconst Version = \"v0.25.4 Dubnium 2018-11-11\"\n<|endoftext|>"} {"text":"<commit_before>package program\n\n\/\/ Version can be requested through the command line with:\n\/\/\n\/\/ c2go -v\n\/\/\n\/\/ See https:\/\/github.com\/elliotchance\/c2go\/wiki\/Release-Process\nconst Version = \"v0.21.0 Zinc 2018-01-11\"\n<commit_msg>Bump version: v0.21.2 Zinc 2018-01-26<commit_after>package program\n\n\/\/ Version can be requested through the command line with:\n\/\/\n\/\/ c2go -v\n\/\/\n\/\/ See https:\/\/github.com\/elliotchance\/c2go\/wiki\/Release-Process\nconst Version = \"v0.21.2 Zinc 2018-01-26\"\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package properties is used to read or write or modify the properties document.\npackage properties\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"container\/list\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n)\n\ntype element struct {\n\t\/\/ # 注释行\n\t\/\/ ! 注释行\n\t\/\/ ' ' 空白行或者空行\n\t\/\/ = 等号分隔的属性行\n\t\/\/ : 冒号分隔的属性行\n\ttypo byte \/\/ 行类型\n\tvalue string \/\/ 行的内容,如果是注释注释引导符也包含在内\n\tkey string \/\/ 如果是属性行这里表示属性的key\n}\n\n\/\/ PropertiesDocument The properties document in memory.\ntype PropertiesDocument struct {\n\telems *list.List\n\tprops map[string]*list.Element\n}\n\n\/\/ New is used to create a new and empty properties document.\n\/\/ \n\/\/ It's used to generate a new document.\nfunc New() *PropertiesDocument {\n\tdoc := new(PropertiesDocument)\n\tdoc.elems = list.New()\n\tdoc.props = make(map[string]*list.Element)\n\treturn doc\n}\n\n\n\/\/ Save is used to save the doc to file or stream.\nfunc Save(doc *PropertiesDocument, writer io.Writer) error {\n\tvar err error\n\n\tdoc.Accept(func(typo byte, value string, key string) bool {\n\t\tswitch typo {\n\t\tcase '#', '!', ' ':\n\t\t\t_, err = fmt.Fprintln(writer, value)\n\t\tcase '=', ':':\n\t\t\t_, err = fmt.Fprintf(writer, \"%s%c%s\\n\", key, typo, value)\n\t\t}\n\n\t\treturn nil == err\n\t})\n\n\treturn err\n}\n\n\/\/ Load is used to create the properties document from a file or a stream.\nfunc Load(reader io.Reader) (doc *PropertiesDocument, err error) {\n\n\t\/\/ 创建一个Properties对象\n\tdoc = New()\n\n\t\/\/ 创建一个扫描器\n\tscanner := bufio.NewScanner(reader)\n\tfor scanner.Scan() {\n\t\t\/\/ 逐行读取\n\t\tline := scanner.Bytes()\n\n\t\t\/\/ 遇到空行\n\t\tif 0 == len(line) {\n\t\t\tdoc.elems.PushBack(&element{typo: ' ', value: string(line)})\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ 找到第一个非空白字符\n\t\tpos := bytes.IndexFunc(line, func(r rune) bool {\n\t\t\treturn !unicode.IsSpace(r)\n\t\t})\n\n\t\t\/\/ 遇到空白行\n\t\tif -1 == pos {\n\t\t\tdoc.elems.PushBack(&element{typo: ' ', value: string(line)})\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ 遇到注释行\n\t\tif '#' == line[pos] {\n\t\t\tdoc.elems.PushBack(&element{typo: '#', value: string(line)})\n\t\t\tcontinue\n\t\t}\n\n\t\tif '!' == line[pos] {\n\t\t\tdoc.elems.PushBack(&element{typo: '!', value: string(line)})\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ 找到第一个等号的位置\n\t\tend := bytes.IndexFunc(line[pos+1:], func(r rune) bool {\n\t\t\treturn ('=' == r) || (':' == r)\n\t\t})\n\n\t\t\/\/ 没有=,说明该配置项只有key\n\t\tkey := \"\"\n\t\tvalue := \"\"\n\t\tif -1 == end {\n\t\t\tkey = string(bytes.TrimRightFunc(line[pos:], func(r rune) bool {\n\t\t\t\treturn unicode.IsSpace(r)\n\t\t\t}))\n\t\t} else {\n\t\t\tkey = string(bytes.TrimRightFunc(line[pos:pos+1+end], func(r rune) bool {\n\t\t\t\treturn unicode.IsSpace(r)\n\t\t\t}))\n\n\t\t\tvalue = string(bytes.TrimSpace(line[pos+1+end+1:]))\n\t\t}\n\n\t\tvar typo byte = '='\n\t\tif end > 0 {\n\t\t\ttypo = line[pos+1+end]\n\t\t}\n\t\telem := &element{typo: typo, key: key, value: value}\n\t\tlistelem := doc.elems.PushBack(elem)\n\t\tdoc.props[key] = listelem\n\t}\n\n\tif err = scanner.Err(); nil != err {\n\t\treturn nil, err\n\t}\n\n\treturn doc, nil\n}\n\n\/\/ Get Retrive the value from PropertiesDocument.\n\/\/\n\/\/ If the item is not exist, the exist is false.\nfunc (p PropertiesDocument) Get(key string) (value string, exist bool) {\n\te, ok := p.props[key]\n\tif !ok {\n\t\treturn \"\", ok\n\t}\n\n\treturn e.Value.(*element).value, ok\n}\n\n\/\/ Set Update the value of the item of the key.\n\/\/\n\/\/ Create a new item if the item of the key is not exist.\nfunc (p *PropertiesDocument) Set(key string, value string) {\n\te, ok := p.props[key]\n\tif !ok {\n\t\tp.props[key] = p.elems.PushBack(&element{typo: '=', key: key, value: value})\n\t\treturn\n\t}\n\n\te.Value.(*element).value = value\n\treturn\n}\n\n\/\/ Del Delete the exist item.\n\/\/\n\/\/ If the item is not exist, return false.\nfunc (p *PropertiesDocument) Del(key string) bool {\n\te, ok := p.props[key]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tp.Uncomment(key)\n\tp.elems.Remove(e)\n\tdelete(p.props, key)\n\treturn true\n}\n\n\/\/ Comment Append comments for the special item.\n\/\/\n\/\/ Return false if the special item is not exist.\nfunc (p *PropertiesDocument) Comment(key string, comments string) bool {\n\te, ok := p.props[key]\n\tif !ok {\n\t\treturn false\n\t}\n\n\t\/\/ 如果所有注释为空\n\tif len(comments) <= 0 {\n\t\tp.elems.InsertBefore(&element{typo: '#', value: \"#\"}, e)\n\t\treturn true\n\t}\n\n\t\/\/ 创建一个新的Scanner\n\tscanner := bufio.NewScanner(strings.NewReader(comments))\n\tfor scanner.Scan() {\n\t\tp.elems.InsertBefore(&element{typo: '#', value: \"#\" + scanner.Text()}, e)\n\t}\n\n\treturn true\n}\n\n\/\/ Uncomment Remove all of the comments for the special item.\n\/\/\n\/\/ Return false if the special item is not exist.\nfunc (p *PropertiesDocument) Uncomment(key string) bool {\n\te, ok := p.props[key]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tfor item := e.Prev(); nil != item; {\n\t\tdel := item\n\t\titem = item.Prev()\n\n\t\tif ('=' == del.Value.(*element).typo) ||\n\t\t\t(':' == del.Value.(*element).typo) ||\n\t\t\t(' ' == del.Value.(*element).typo) {\n\t\t\tbreak\n\t\t}\n\n\t\tp.elems.Remove(del)\n\t}\n\n\treturn true\n}\n\n\/\/ Accept Traverse every element of the document, include comment.\n\/\/\n\/\/ The typo parameter special the element type.\n\/\/ If typo is '#' or '!' means current element is a comment.\n\/\/ If typo is ' ' means current element is a empty or a space line.\n\/\/ If typo is '=' or ':' means current element is a key-value pair.\n\/\/ The traverse will be terminated if f return false.\nfunc (p PropertiesDocument) Accept(f func(typo byte, value string, key string) bool) {\n\tfor e := p.elems.Front(); e != nil; e = e.Next() {\n\t\telem := e.Value.(*element)\n\t\tcontinues := f(elem.typo, elem.value, elem.key)\n\t\tif !continues {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Foreach Traverse all of the key-value pairs in the document.\n\/\/ The traverse will be terminated if f return false.\nfunc (p PropertiesDocument) Foreach(f func(value string, key string) bool) {\n\tfor e := p.elems.Front(); e != nil; e = e.Next() {\n\t\telem := e.Value.(*element)\n\t\tif ('=' == elem.typo) ||\n\t\t\t(':' == elem.typo) {\n\t\t\tcontinues := f(elem.value, elem.key)\n\t\t\tif !continues {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ StringDefault Retrive the string value by key.\n\/\/ If the element is not exist, the def will be returned.\nfunc (p PropertiesDocument) StringDefault(key string, def string) string {\n\te, ok := p.props[key]\n\tif ok {\n\t\treturn e.Value.(*element).value\n\t}\n\n\treturn def\n}\n\n\/\/ IntDefault Retrive the int64 value by key.\n\/\/ If the element is not exist, the def will be returned.\nfunc (p PropertiesDocument) IntDefault(key string, def int64) int64 {\n\te, ok := p.props[key]\n\tif ok {\n\t\tv, err := strconv.ParseInt(e.Value.(*element).value, 10, 64)\n\t\tif nil != err {\n\t\t\treturn def\n\t\t}\n\n\t\treturn v\n\t}\n\n\treturn def\n}\n\n\/\/ UintDefault Same as IntDefault, but the return type is uint64.\nfunc (p PropertiesDocument) UintDefault(key string, def uint64) uint64 {\n\te, ok := p.props[key]\n\tif ok {\n\t\tv, err := strconv.ParseUint(e.Value.(*element).value, 10, 64)\n\t\tif nil != err {\n\t\t\treturn def\n\t\t}\n\n\t\treturn v\n\t}\n\n\treturn def\n}\n\n\/\/ FloatDefault Retrive the float64 value by key.\n\/\/ If the element is not exist, the def will be returned.\nfunc (p PropertiesDocument) FloatDefault(key string, def float64) float64 {\n\te, ok := p.props[key]\n\tif ok {\n\t\tv, err := strconv.ParseFloat(e.Value.(*element).value, 64)\n\t\tif nil != err {\n\t\t\treturn def\n\t\t}\n\n\t\treturn v\n\t}\n\n\treturn def\n}\n\n\/\/ BoolDefault Retrive the bool value by key.\n\/\/ If the element is not exist, the def will be returned.\n\/\/ This function mapping \"1\", \"t\", \"T\", \"true\", \"TRUE\", \"True\" as true.\n\/\/ This function mapping \"0\", \"f\", \"F\", \"false\", \"FALSE\", \"False\" as false.\n\/\/ If the element is not exist of can not map to value of bool,the def will be returned.\nfunc (p PropertiesDocument) BoolDefault(key string, def bool) bool {\n\te, ok := p.props[key]\n\tif ok {\n\t\tv, err := strconv.ParseBool(e.Value.(*element).value)\n\t\tif nil != err {\n\t\t\treturn def\n\t\t}\n\n\t\treturn v\n\t}\n\n\treturn def\n}\n\n\/\/ ObjectDefault Map the value of the key to any object.\n\/\/ The f is the customized mapping function.\n\/\/ Return def if the element is not exist of f have a error returned.\nfunc (p PropertiesDocument) ObjectDefault(key string, def interface{}, f func(k string, v string) (interface{}, error)) interface{} {\n\te, ok := p.props[key]\n\tif ok {\n\t\tv, err := f(key, e.Value.(*element).value)\n\t\tif nil != err {\n\t\t\treturn def\n\t\t}\n\n\t\treturn v\n\t}\n\n\treturn def\n}\n\n\/\/ String Same as StringDefault but the def is \"\".\nfunc (p PropertiesDocument) String(key string) string {\n\treturn p.StringDefault(key, \"\")\n}\n\n\/\/ Int is ame as IntDefault but the def is 0 .\nfunc (p PropertiesDocument) Int(key string) int64 {\n\treturn p.IntDefault(key, 0)\n}\n\n\/\/ Uint Same as UintDefault but the def is 0 .\nfunc (p PropertiesDocument) Uint(key string) uint64 {\n\treturn p.UintDefault(key, 0)\n}\n\n\/\/ Float is same as FloatDefault but the def is 0.0 .\nfunc (p PropertiesDocument) Float(key string) float64 {\n\treturn p.FloatDefault(key, 0.0)\n}\n\n\/\/ Bool is same as BoolDefault but the def is false.\nfunc (p PropertiesDocument) Bool(key string) bool {\n\treturn p.BoolDefault(key, false)\n}\n\n\/\/ Object is same as ObjectDefault but the def is nil.\n\/\/\n\/\/ Notice: If the return value can not be assign to nil, this function will panic\/\nfunc (p PropertiesDocument) Object(key string, f func(k string, v string) (interface{}, error)) interface{} {\n\treturn p.ObjectDefault(key, interface{}(nil), f)\n}\n<commit_msg>准备将几个开源的构建工程利用起来<commit_after>\/\/ Package properties is used to read or write or modify the properties document.\npackage properties\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"container\/list\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n)\n\ntype element struct {\n\t\/\/ # 注释行\n\t\/\/ ! 注释行\n\t\/\/ ' ' 空白行或者空行\n\t\/\/ = 等号分隔的属性行\n\t\/\/ : 冒号分隔的属性行\n\ttypo byte \/\/ 行类型\n\tvalue string \/\/ 行的内容,如果是注释注释引导符也包含在内\n\tkey string \/\/ 如果是属性行这里表示属性的key\n}\n\n\/\/ PropertiesDocument The properties document in memory.\ntype PropertiesDocument struct {\n\telems *list.List\n\tprops map[string]*list.Element\n}\n\n\/\/ New is used to create a new and empty properties document.\n\/\/\n\/\/ It's used to generate a new document.\nfunc New() *PropertiesDocument {\n\tdoc := new(PropertiesDocument)\n\tdoc.elems = list.New()\n\tdoc.props = make(map[string]*list.Element)\n\treturn doc\n}\n\n\/\/ Save is used to save the doc to file or stream.\nfunc Save(doc *PropertiesDocument, writer io.Writer) error {\n\tvar err error\n\n\tdoc.Accept(func(typo byte, value string, key string) bool {\n\t\tswitch typo {\n\t\tcase '#', '!', ' ':\n\t\t\t_, err = fmt.Fprintln(writer, value)\n\t\tcase '=', ':':\n\t\t\t_, err = fmt.Fprintf(writer, \"%s%c%s\\n\", key, typo, value)\n\t\t}\n\n\t\treturn nil == err\n\t})\n\n\treturn err\n}\n\n\/\/ Load is used to create the properties document from a file or a stream.\nfunc Load(reader io.Reader) (doc *PropertiesDocument, err error) {\n\n\t\/\/ 创建一个Properties对象\n\tdoc = New()\n\n\t\/\/ 创建一个扫描器\n\tscanner := bufio.NewScanner(reader)\n\tfor scanner.Scan() {\n\t\t\/\/ 逐行读取\n\t\tline := scanner.Bytes()\n\n\t\t\/\/ 遇到空行\n\t\tif 0 == len(line) {\n\t\t\tdoc.elems.PushBack(&element{typo: ' ', value: string(line)})\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ 找到第一个非空白字符\n\t\tpos := bytes.IndexFunc(line, func(r rune) bool {\n\t\t\treturn !unicode.IsSpace(r)\n\t\t})\n\n\t\t\/\/ 遇到空白行\n\t\tif -1 == pos {\n\t\t\tdoc.elems.PushBack(&element{typo: ' ', value: string(line)})\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ 遇到注释行\n\t\tif '#' == line[pos] {\n\t\t\tdoc.elems.PushBack(&element{typo: '#', value: string(line)})\n\t\t\tcontinue\n\t\t}\n\n\t\tif '!' == line[pos] {\n\t\t\tdoc.elems.PushBack(&element{typo: '!', value: string(line)})\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ 找到第一个等号的位置\n\t\tend := bytes.IndexFunc(line[pos+1:], func(r rune) bool {\n\t\t\treturn ('=' == r) || (':' == r)\n\t\t})\n\n\t\t\/\/ 没有=,说明该配置项只有key\n\t\tkey := \"\"\n\t\tvalue := \"\"\n\t\tif -1 == end {\n\t\t\tkey = string(bytes.TrimRightFunc(line[pos:], func(r rune) bool {\n\t\t\t\treturn unicode.IsSpace(r)\n\t\t\t}))\n\t\t} else {\n\t\t\tkey = string(bytes.TrimRightFunc(line[pos:pos+1+end], func(r rune) bool {\n\t\t\t\treturn unicode.IsSpace(r)\n\t\t\t}))\n\n\t\t\tvalue = string(bytes.TrimSpace(line[pos+1+end+1:]))\n\t\t}\n\n\t\tvar typo byte = '='\n\t\tif end > 0 {\n\t\t\ttypo = line[pos+1+end]\n\t\t}\n\t\telem := &element{typo: typo, key: key, value: value}\n\t\tlistelem := doc.elems.PushBack(elem)\n\t\tdoc.props[key] = listelem\n\t}\n\n\tif err = scanner.Err(); nil != err {\n\t\treturn nil, err\n\t}\n\n\treturn doc, nil\n}\n\n\/\/ Get Retrive the value from PropertiesDocument.\n\/\/\n\/\/ If the item is not exist, the exist is false.\nfunc (p PropertiesDocument) Get(key string) (value string, exist bool) {\n\te, ok := p.props[key]\n\tif !ok {\n\t\treturn \"\", ok\n\t}\n\n\treturn e.Value.(*element).value, ok\n}\n\n\/\/ Set Update the value of the item of the key.\n\/\/\n\/\/ Create a new item if the item of the key is not exist.\nfunc (p *PropertiesDocument) Set(key string, value string) {\n\te, ok := p.props[key]\n\tif !ok {\n\t\tp.props[key] = p.elems.PushBack(&element{typo: '=', key: key, value: value})\n\t\treturn\n\t}\n\n\te.Value.(*element).value = value\n\treturn\n}\n\n\/\/ Del Delete the exist item.\n\/\/\n\/\/ If the item is not exist, return false.\nfunc (p *PropertiesDocument) Del(key string) bool {\n\te, ok := p.props[key]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tp.Uncomment(key)\n\tp.elems.Remove(e)\n\tdelete(p.props, key)\n\treturn true\n}\n\n\/\/ Comment Append comments for the special item.\n\/\/\n\/\/ Return false if the special item is not exist.\nfunc (p *PropertiesDocument) Comment(key string, comments string) bool {\n\te, ok := p.props[key]\n\tif !ok {\n\t\treturn false\n\t}\n\n\t\/\/ 如果所有注释为空\n\tif len(comments) <= 0 {\n\t\tp.elems.InsertBefore(&element{typo: '#', value: \"#\"}, e)\n\t\treturn true\n\t}\n\n\t\/\/ 创建一个新的Scanner\n\tscanner := bufio.NewScanner(strings.NewReader(comments))\n\tfor scanner.Scan() {\n\t\tp.elems.InsertBefore(&element{typo: '#', value: \"#\" + scanner.Text()}, e)\n\t}\n\n\treturn true\n}\n\n\/\/ Uncomment Remove all of the comments for the special item.\n\/\/\n\/\/ Return false if the special item is not exist.\nfunc (p *PropertiesDocument) Uncomment(key string) bool {\n\te, ok := p.props[key]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tfor item := e.Prev(); nil != item; {\n\t\tdel := item\n\t\titem = item.Prev()\n\n\t\tif ('=' == del.Value.(*element).typo) ||\n\t\t\t(':' == del.Value.(*element).typo) ||\n\t\t\t(' ' == del.Value.(*element).typo) {\n\t\t\tbreak\n\t\t}\n\n\t\tp.elems.Remove(del)\n\t}\n\n\treturn true\n}\n\n\/\/ Accept Traverse every element of the document, include comment.\n\/\/\n\/\/ The typo parameter special the element type.\n\/\/ If typo is '#' or '!' means current element is a comment.\n\/\/ If typo is ' ' means current element is a empty or a space line.\n\/\/ If typo is '=' or ':' means current element is a key-value pair.\n\/\/ The traverse will be terminated if f return false.\nfunc (p PropertiesDocument) Accept(f func(typo byte, value string, key string) bool) {\n\tfor e := p.elems.Front(); e != nil; e = e.Next() {\n\t\telem := e.Value.(*element)\n\t\tcontinues := f(elem.typo, elem.value, elem.key)\n\t\tif !continues {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Foreach Traverse all of the key-value pairs in the document.\n\/\/ The traverse will be terminated if f return false.\nfunc (p PropertiesDocument) Foreach(f func(value string, key string) bool) {\n\tfor e := p.elems.Front(); e != nil; e = e.Next() {\n\t\telem := e.Value.(*element)\n\t\tif ('=' == elem.typo) ||\n\t\t\t(':' == elem.typo) {\n\t\t\tcontinues := f(elem.value, elem.key)\n\t\t\tif !continues {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ StringDefault Retrive the string value by key.\n\/\/ If the element is not exist, the def will be returned.\nfunc (p PropertiesDocument) StringDefault(key string, def string) string {\n\te, ok := p.props[key]\n\tif ok {\n\t\treturn e.Value.(*element).value\n\t}\n\n\treturn def\n}\n\n\/\/ IntDefault Retrive the int64 value by key.\n\/\/ If the element is not exist, the def will be returned.\nfunc (p PropertiesDocument) IntDefault(key string, def int64) int64 {\n\te, ok := p.props[key]\n\tif ok {\n\t\tv, err := strconv.ParseInt(e.Value.(*element).value, 10, 64)\n\t\tif nil != err {\n\t\t\treturn def\n\t\t}\n\n\t\treturn v\n\t}\n\n\treturn def\n}\n\n\/\/ UintDefault Same as IntDefault, but the return type is uint64.\nfunc (p PropertiesDocument) UintDefault(key string, def uint64) uint64 {\n\te, ok := p.props[key]\n\tif ok {\n\t\tv, err := strconv.ParseUint(e.Value.(*element).value, 10, 64)\n\t\tif nil != err {\n\t\t\treturn def\n\t\t}\n\n\t\treturn v\n\t}\n\n\treturn def\n}\n\n\/\/ FloatDefault Retrive the float64 value by key.\n\/\/ If the element is not exist, the def will be returned.\nfunc (p PropertiesDocument) FloatDefault(key string, def float64) float64 {\n\te, ok := p.props[key]\n\tif ok {\n\t\tv, err := strconv.ParseFloat(e.Value.(*element).value, 64)\n\t\tif nil != err {\n\t\t\treturn def\n\t\t}\n\n\t\treturn v\n\t}\n\n\treturn def\n}\n\n\/\/ BoolDefault Retrive the bool value by key.\n\/\/ If the element is not exist, the def will be returned.\n\/\/ This function mapping \"1\", \"t\", \"T\", \"true\", \"TRUE\", \"True\" as true.\n\/\/ This function mapping \"0\", \"f\", \"F\", \"false\", \"FALSE\", \"False\" as false.\n\/\/ If the element is not exist of can not map to value of bool,the def will be returned.\nfunc (p PropertiesDocument) BoolDefault(key string, def bool) bool {\n\te, ok := p.props[key]\n\tif ok {\n\t\tv, err := strconv.ParseBool(e.Value.(*element).value)\n\t\tif nil != err {\n\t\t\treturn def\n\t\t}\n\n\t\treturn v\n\t}\n\n\treturn def\n}\n\n\/\/ ObjectDefault Map the value of the key to any object.\n\/\/ The f is the customized mapping function.\n\/\/ Return def if the element is not exist of f have a error returned.\nfunc (p PropertiesDocument) ObjectDefault(key string, def interface{}, f func(k string, v string) (interface{}, error)) interface{} {\n\te, ok := p.props[key]\n\tif ok {\n\t\tv, err := f(key, e.Value.(*element).value)\n\t\tif nil != err {\n\t\t\treturn def\n\t\t}\n\n\t\treturn v\n\t}\n\n\treturn def\n}\n\n\/\/ String Same as StringDefault but the def is \"\".\nfunc (p PropertiesDocument) String(key string) string {\n\treturn p.StringDefault(key, \"\")\n}\n\n\/\/ Int is ame as IntDefault but the def is 0 .\nfunc (p PropertiesDocument) Int(key string) int64 {\n\treturn p.IntDefault(key, 0)\n}\n\n\/\/ Uint Same as UintDefault but the def is 0 .\nfunc (p PropertiesDocument) Uint(key string) uint64 {\n\treturn p.UintDefault(key, 0)\n}\n\n\/\/ Float is same as FloatDefault but the def is 0.0 .\nfunc (p PropertiesDocument) Float(key string) float64 {\n\treturn p.FloatDefault(key, 0.0)\n}\n\n\/\/ Bool is same as BoolDefault but the def is false.\nfunc (p PropertiesDocument) Bool(key string) bool {\n\treturn p.BoolDefault(key, false)\n}\n\n\/\/ Object is same as ObjectDefault but the def is nil.\n\/\/\n\/\/ Notice: If the return value can not be assign to nil, this function will panic\/\nfunc (p PropertiesDocument) Object(key string, f func(k string, v string) (interface{}, error)) interface{} {\n\treturn p.ObjectDefault(key, interface{}(nil), f)\n}\n<|endoftext|>"} {"text":"<commit_before>package benchmarkbbs_test\n\nimport (\n\t\"context\"\n\t\"crypto\/rand\"\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/bbs\"\n\t\"code.cloudfoundry.org\/bbs\/db\"\n\t\"code.cloudfoundry.org\/bbs\/db\/sqldb\"\n\t\"code.cloudfoundry.org\/bbs\/encryption\"\n\t\"code.cloudfoundry.org\/bbs\/format\"\n\t\"code.cloudfoundry.org\/bbs\/guidprovider\"\n\t\"code.cloudfoundry.org\/bbs\/models\"\n\tbenchmarkconfig \"code.cloudfoundry.org\/benchmarkbbs\/config\"\n\t\"code.cloudfoundry.org\/benchmarkbbs\/generator\"\n\t\"code.cloudfoundry.org\/benchmarkbbs\/reporter\"\n\t\"code.cloudfoundry.org\/cfhttp\"\n\t\"code.cloudfoundry.org\/clock\"\n\tfakes \"code.cloudfoundry.org\/diego-logging-client\/testhelpers\"\n\t\"code.cloudfoundry.org\/executor\"\n\t\"code.cloudfoundry.org\/lager\"\n\t\"code.cloudfoundry.org\/locket\"\n\tlocketmodels \"code.cloudfoundry.org\/locket\/models\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\/s3manager\"\n\t\"github.com\/zorkian\/go-datadog-api\"\n\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"testing\"\n)\n\nvar (\n\texpectedLRPCount int\n\n\texpectedActualLRPCounts map[string]int\n\tconfig benchmarkconfig.BenchmarkBBSConfig\n\n\tlogger lager.Logger\n\tlocketClient locketmodels.LocketClient\n\tsqlDB *sqldb.SQLDB\n\tactiveDB db.DB\n\tbbsClient bbs.InternalClient\n\tdataDogClient *datadog.Client\n\tdataDogReporter reporter.DataDogReporter\n\treporters []Reporter\n)\n\nconst (\n\tDEBUG = \"debug\"\n\tINFO = \"info\"\n\tERROR = \"error\"\n\tFATAL = \"fatal\"\n)\n\nfunc init() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tconfigFile := flag.String(\"config\", \"\", \"config file\")\n\tflag.Parse()\n\n\tvar err error\n\tconfig, err = benchmarkconfig.NewBenchmarkBBSConfig(*configFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif config.BBSAddress == \"\" {\n\t\tlog.Fatal(\"bbsAddress is required\")\n\t}\n\n\tBenchmarkTests(config.NumReps, config.NumTrials, config.LocalRouteEmitters)\n}\n\nfunc TestBenchmarkBbs(t *testing.T) {\n\tvar lagerLogLevel lager.LogLevel\n\tswitch config.LogLevel {\n\tcase DEBUG:\n\t\tlagerLogLevel = lager.DEBUG\n\tcase INFO:\n\t\tlagerLogLevel = lager.INFO\n\tcase ERROR:\n\t\tlagerLogLevel = lager.ERROR\n\tcase FATAL:\n\t\tlagerLogLevel = lager.FATAL\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unknown log level: %s\", config.LogLevel))\n\t}\n\n\tvar logWriter io.Writer\n\tif config.LogFilename == \"\" {\n\t\tlogWriter = GinkgoWriter\n\t} else {\n\t\tlogFile, err := os.OpenFile(config.LogFilename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\n\t\tif err != nil {\n\t\t\tpanic(\"Error opening file, err: \" + err.Error())\n\t\t}\n\t\tdefer logFile.Close()\n\n\t\tlogWriter = logFile\n\t}\n\n\tlogger = lager.NewLogger(\"bbs-benchmarks-test\")\n\tlogger.RegisterSink(lager.NewWriterSink(logWriter, lagerLogLevel))\n\n\treporters = []Reporter{}\n\n\tif config.DataDogAPIKey != \"\" && config.DataDogAppKey != \"\" {\n\t\tdataDogClient = datadog.NewClient(config.DataDogAPIKey, config.DataDogAppKey)\n\t\tdataDogReporter = reporter.NewDataDogReporter(logger, config.MetricPrefix, dataDogClient)\n\t\treporters = append(reporters, &dataDogReporter)\n\t}\n\n\tif config.AwsAccessKeyID != \"\" && config.AwsSecretAccessKey != \"\" && config.AwsBucketName != \"\" {\n\t\tcreds := credentials.NewStaticCredentials(config.AwsAccessKeyID, config.AwsSecretAccessKey, \"\")\n\t\ts3Client := s3.New(&aws.Config{\n\t\t\tRegion: &config.AwsRegion,\n\t\t\tCredentials: creds,\n\t\t})\n\t\tuploader := s3manager.NewUploader(&s3manager.UploadOptions{S3: s3Client})\n\t\treporter := reporter.NewS3Reporter(logger, config.AwsBucketName, uploader)\n\t\treporters = append(reporters, &reporter)\n\t}\n\n\tRegisterFailHandler(Fail)\n\tRunSpecsWithDefaultAndCustomReporters(t, \"Benchmark BBS Suite\", reporters)\n}\n\ntype expectedLRPCounts struct {\n\tDesiredLRPCount int\n\n\tActualLRPCounts map[string]int\n}\n\nfunc initializeActiveDB() *sql.DB {\n\tif activeDB != nil {\n\t\treturn nil\n\t}\n\n\tif config.DatabaseConnectionString == \"\" {\n\t\tlogger.Fatal(\"no-sql-configuration\", errors.New(\"no-sql-configuration\"))\n\t}\n\n\tif config.DatabaseDriver == \"postgres\" && !strings.Contains(config.DatabaseConnectionString, \"sslmode\") {\n\t\tconfig.DatabaseConnectionString = fmt.Sprintf(\"%s?sslmode=disable\", config.DatabaseConnectionString)\n\t}\n\n\tsqlConn, err := sql.Open(config.DatabaseDriver, config.DatabaseConnectionString)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed-to-open-sql\", err)\n\t}\n\tsqlConn.SetMaxOpenConns(1)\n\tsqlConn.SetMaxIdleConns(1)\n\n\terr = sqlConn.Ping()\n\tExpect(err).NotTo(HaveOccurred())\n\n\tsqlDB = initializeSQLDB(logger, sqlConn)\n\tactiveDB = sqlDB\n\treturn sqlConn\n}\n\nvar _ = BeforeSuite(func() {\n\tbbsClient = initializeBBSClient(logger, time.Duration(config.BBSClientHTTPTimeout))\n\n\tconn := initializeActiveDB()\n\tinitializeLocketClient()\n\n\tresp, err := locketClient.FetchAll(context.Background(), &locketmodels.FetchAllRequest{TypeCode: locketmodels.PRESENCE})\n\tExpect(err).NotTo(HaveOccurred())\n\n\tcells := make(map[string]struct{})\n\tfor i := 0; i < config.NumReps; i++ {\n\t\tcells[fmt.Sprintf(\"cell-%d\", i)] = struct{}{}\n\t}\n\n\tif config.ReseedDatabase || config.NumReps != len(resp.Resources) {\n\t\tvar err error\n\n\t\tcleanupSQLDB(conn)\n\t\tresetLocketDB(locketClient, cells, resp)\n\n\t\tif config.DesiredLRPs > 0 {\n\t\t\tdesiredLRPGenerator := generator.NewDesiredLRPGenerator(\n\t\t\t\tconfig.ErrorTolerance,\n\t\t\t\tconfig.MetricPrefix,\n\t\t\t\tconfig.NumPopulateWorkers,\n\t\t\t\tbbsClient,\n\t\t\t\tdataDogClient,\n\t\t\t)\n\n\t\t\texpectedLRPCount, expectedActualLRPCounts, err = desiredLRPGenerator.Generate(logger, config.NumReps, config.DesiredLRPs)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t}\n\t} else {\n\t\tresetUnclaimedActualLRPs(conn, cells)\n\n\t\tquery := `\n\t\t\tSELECT\n\t\t\t\tCOUNT(*)\n\t\t\tFROM desired_lrps\n\t\t`\n\t\tres := conn.QueryRow(query)\n\t\terr := res.Scan(&expectedLRPCount)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\texpectedActualLRPCounts = make(map[string]int)\n\t\tquery = `\n\t\t\tSELECT\n\t\t\t\tcell_id, COUNT(*)\n\t\t\tFROM actual_lrps\n\t\t\tGROUP BY cell_id\n\t\t`\n\t\trows, err := conn.Query(query)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tdefer rows.Close()\n\t\tfor rows.Next() {\n\t\t\tvar cellId string\n\t\t\tvar count int\n\t\t\terr = rows.Scan(&cellId, &count)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\texpectedActualLRPCounts[cellId] = count\n\t\t}\n\t}\n\n\tif float64(expectedLRPCount) < float64(config.DesiredLRPs)*config.ErrorTolerance {\n\t\tFail(fmt.Sprintf(\"Error rate of %.3f for actuals exceeds tolerance of %.3f\", float64(expectedLRPCount)\/float64(config.DesiredLRPs), config.ErrorTolerance))\n\t}\n})\n\nfunc initializeSQLDB(logger lager.Logger, sqlConn *sql.DB) *sqldb.SQLDB {\n\tkey, keys, err := config.EncryptionConfig.Parse()\n\tif err != nil {\n\t\tlogger.Fatal(\"cannot-setup-encryption\", err)\n\t}\n\tkeyManager, err := encryption.NewKeyManager(key, keys)\n\tif err != nil {\n\t\tlogger.Fatal(\"cannot-setup-encryption\", err)\n\t}\n\tcryptor := encryption.NewCryptor(keyManager, rand.Reader)\n\n\treturn sqldb.NewSQLDB(\n\t\tsqlConn,\n\t\t1000,\n\t\t1000,\n\t\tformat.ENCODED_PROTO,\n\t\tcryptor,\n\t\tguidprovider.DefaultGuidProvider,\n\t\tclock.NewClock(),\n\t\tconfig.DatabaseDriver,\n\t\t&fakes.FakeIngressClient{},\n\t)\n}\n\nfunc initializeLocketClient() {\n\tvar err error\n\tif config.SkipCertVerify {\n\t\tlocketClient, err = locket.NewClientSkipCertVerify(logger, config.ClientLocketConfig)\n\t} else {\n\t\tlocketClient, err = locket.NewClient(logger, config.ClientLocketConfig)\n\t}\n\tExpect(err).NotTo(HaveOccurred())\n}\n\nfunc initializeBBSClient(logger lager.Logger, bbsClientHTTPTimeout time.Duration) bbs.InternalClient {\n\tbbsURL, err := url.Parse(config.BBSAddress)\n\tif err != nil {\n\t\tlogger.Fatal(\"Invalid BBS URL\", err)\n\t}\n\n\tif bbsURL.Scheme != \"https\" {\n\t\treturn bbs.NewClient(config.BBSAddress)\n\t}\n\n\tcfhttp.Initialize(bbsClientHTTPTimeout)\n\tvar bbsClient bbs.InternalClient\n\tif config.SkipCertVerify {\n\t\tbbsClient, err = bbs.NewSecureSkipVerifyClient(\n\t\t\tconfig.BBSAddress,\n\t\t\tconfig.BBSClientCert,\n\t\t\tconfig.BBSClientKey,\n\t\t\t1,\n\t\t\t25000,\n\t\t)\n\t} else {\n\t\tbbsClient, err = bbs.NewSecureClient(\n\t\t\tconfig.BBSAddress,\n\t\t\tconfig.BBSCACert,\n\t\t\tconfig.BBSClientCert,\n\t\t\tconfig.BBSClientKey,\n\t\t\t1,\n\t\t\t25000,\n\t\t)\n\t}\n\tif err != nil {\n\t\tlogger.Fatal(\"Failed to configure secure BBS client\", err)\n\t}\n\treturn bbsClient\n}\n\nfunc cleanupSQLDB(conn *sql.DB) {\n\t_, err := conn.Exec(\"TRUNCATE actual_lrps\")\n\tExpect(err).NotTo(HaveOccurred())\n\t_, err = conn.Exec(\"TRUNCATE desired_lrps\")\n\tExpect(err).NotTo(HaveOccurred())\n}\n\nfunc resetLocketDB(locketClient locketmodels.LocketClient, cells map[string]struct{}, resp *locketmodels.FetchAllResponse) {\n\tfor _, resource := range resp.Resources {\n\t\tif _, ok := cells[resource.Owner]; !ok {\n\t\t\t_, err := locketClient.Release(context.Background(), &locketmodels.ReleaseRequest{Resource: resource})\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t}\n\t}\n\n\tfor cellId, _ := range cells {\n\t\t_, err := locketClient.Lock(context.Background(), &locketmodels.LockRequest{Resource: lockResource(cellId), TtlInSeconds: longTermTtl})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t}\n}\n\nfunc lockResource(cellID string) *locketmodels.Resource {\n\tresources := executor.ExecutorResources{\n\t\tMemoryMB: 1000,\n\t\tDiskMB: 2000,\n\t\tContainers: 300,\n\t}\n\tcellCapacity := models.NewCellCapacity(int32(resources.MemoryMB), int32(resources.DiskMB), int32(resources.Containers))\n\tcellPresence := models.NewCellPresence(cellID, cellID+\".address\", cellID+\".url\",\n\t\t\"z1\", cellCapacity, []string{\"providers\"},\n\t\t[]string{\"cflinuxfs9\"}, []string{}, []string{})\n\n\tpayload, err := json.Marshal(cellPresence)\n\tExpect(err).NotTo(HaveOccurred())\n\n\treturn &locketmodels.Resource{\n\t\tKey: cellID,\n\t\tOwner: cellID,\n\t\tValue: string(payload),\n\t\tType: locketmodels.PresenceType,\n\t}\n}\n\nfunc resetUnclaimedActualLRPs(conn *sql.DB, cells map[string]struct{}) {\n\texistingCellIds := make(map[string]struct{})\n\n\tquery := `\n\t\t\tSELECT\n\t\t\t\tcell_id\n\t\t\tFROM actual_lrps\n\t\t\tGROUP BY cell_id\n\t\t`\n\trows, err := conn.Query(query)\n\tExpect(err).NotTo(HaveOccurred())\n\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar cellId string\n\t\terr = rows.Scan(&cellId)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\texistingCellIds[cellId] = struct{}{}\n\t}\n\n\tmissingCells := findMissingCells(existingCellIds, cells)\n\n\tquery = `\n\t\t\t\tSELECT\n\t\t\t\t\tprocess_guid, domain\n\t\t\t\tFROM actual_lrps WHERE cell_id = ''\n\t\t\t`\n\n\ttype guidAndDomain struct {\n\t\tProcessGuid string\n\t\tDomain string\n\t}\n\tvar lrps []guidAndDomain\n\n\trows, err = conn.Query(query)\n\tExpect(err).NotTo(HaveOccurred())\n\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar guid, domain string\n\t\terr = rows.Scan(&guid, &domain)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tlrps = append(lrps, guidAndDomain{ProcessGuid: guid, Domain: domain})\n\t}\n\n\tfor i, lrp := range lrps {\n\t\tcellID := missingCells[i%len(missingCells)]\n\t\tactualLRPInstanceKey := &models.ActualLRPInstanceKey{InstanceGuid: lrp.ProcessGuid + \"-i\", CellId: cellID}\n\t\tnetInfo := models.NewActualLRPNetInfo(\"1.2.3.4\", \"2.2.2.2\", models.NewPortMapping(61999, 8080))\n\t\terr := bbsClient.StartActualLRP(logger, &models.ActualLRPKey{Domain: lrp.Domain, ProcessGuid: lrp.ProcessGuid, Index: 0}, actualLRPInstanceKey, &netInfo)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t}\n}\n\nfunc findMissingCells(existing map[string]struct{}, expected map[string]struct{}) []string {\n\tvar missing []string\n\tfor cell, _ := range expected {\n\t\tif _, ok := existing[cell]; !ok {\n\t\t\tmissing = append(missing, cell)\n\t\t}\n\t}\n\n\treturn missing\n}\n<commit_msg>update benchmarkbbs with new SQL helpers interfaces<commit_after>package benchmarkbbs_test\n\nimport (\n\t\"context\"\n\t\"crypto\/rand\"\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/bbs\"\n\t\"code.cloudfoundry.org\/bbs\/db\"\n\t\"code.cloudfoundry.org\/bbs\/db\/sqldb\"\n\t\"code.cloudfoundry.org\/bbs\/db\/sqldb\/helpers\"\n\t\"code.cloudfoundry.org\/bbs\/encryption\"\n\t\"code.cloudfoundry.org\/bbs\/format\"\n\t\"code.cloudfoundry.org\/bbs\/guidprovider\"\n\t\"code.cloudfoundry.org\/bbs\/models\"\n\tbenchmarkconfig \"code.cloudfoundry.org\/benchmarkbbs\/config\"\n\t\"code.cloudfoundry.org\/benchmarkbbs\/generator\"\n\t\"code.cloudfoundry.org\/benchmarkbbs\/reporter\"\n\t\"code.cloudfoundry.org\/cfhttp\"\n\t\"code.cloudfoundry.org\/clock\"\n\tfakes \"code.cloudfoundry.org\/diego-logging-client\/testhelpers\"\n\t\"code.cloudfoundry.org\/executor\"\n\t\"code.cloudfoundry.org\/lager\"\n\t\"code.cloudfoundry.org\/locket\"\n\tlocketmodels \"code.cloudfoundry.org\/locket\/models\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\/s3manager\"\n\t\"github.com\/zorkian\/go-datadog-api\"\n\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"testing\"\n)\n\nvar (\n\texpectedLRPCount int\n\n\texpectedActualLRPCounts map[string]int\n\tconfig benchmarkconfig.BenchmarkBBSConfig\n\n\tlogger lager.Logger\n\tlocketClient locketmodels.LocketClient\n\tsqlDB *sqldb.SQLDB\n\tactiveDB db.DB\n\tbbsClient bbs.InternalClient\n\tdataDogClient *datadog.Client\n\tdataDogReporter reporter.DataDogReporter\n\treporters []Reporter\n)\n\nconst (\n\tDEBUG = \"debug\"\n\tINFO = \"info\"\n\tERROR = \"error\"\n\tFATAL = \"fatal\"\n)\n\nfunc init() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tconfigFile := flag.String(\"config\", \"\", \"config file\")\n\tflag.Parse()\n\n\tvar err error\n\tconfig, err = benchmarkconfig.NewBenchmarkBBSConfig(*configFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif config.BBSAddress == \"\" {\n\t\tlog.Fatal(\"bbsAddress is required\")\n\t}\n\n\tBenchmarkTests(config.NumReps, config.NumTrials, config.LocalRouteEmitters)\n}\n\nfunc TestBenchmarkBbs(t *testing.T) {\n\tvar lagerLogLevel lager.LogLevel\n\tswitch config.LogLevel {\n\tcase DEBUG:\n\t\tlagerLogLevel = lager.DEBUG\n\tcase INFO:\n\t\tlagerLogLevel = lager.INFO\n\tcase ERROR:\n\t\tlagerLogLevel = lager.ERROR\n\tcase FATAL:\n\t\tlagerLogLevel = lager.FATAL\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unknown log level: %s\", config.LogLevel))\n\t}\n\n\tvar logWriter io.Writer\n\tif config.LogFilename == \"\" {\n\t\tlogWriter = GinkgoWriter\n\t} else {\n\t\tlogFile, err := os.OpenFile(config.LogFilename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\n\t\tif err != nil {\n\t\t\tpanic(\"Error opening file, err: \" + err.Error())\n\t\t}\n\t\tdefer logFile.Close()\n\n\t\tlogWriter = logFile\n\t}\n\n\tlogger = lager.NewLogger(\"bbs-benchmarks-test\")\n\tlogger.RegisterSink(lager.NewWriterSink(logWriter, lagerLogLevel))\n\n\treporters = []Reporter{}\n\n\tif config.DataDogAPIKey != \"\" && config.DataDogAppKey != \"\" {\n\t\tdataDogClient = datadog.NewClient(config.DataDogAPIKey, config.DataDogAppKey)\n\t\tdataDogReporter = reporter.NewDataDogReporter(logger, config.MetricPrefix, dataDogClient)\n\t\treporters = append(reporters, &dataDogReporter)\n\t}\n\n\tif config.AwsAccessKeyID != \"\" && config.AwsSecretAccessKey != \"\" && config.AwsBucketName != \"\" {\n\t\tcreds := credentials.NewStaticCredentials(config.AwsAccessKeyID, config.AwsSecretAccessKey, \"\")\n\t\ts3Client := s3.New(&aws.Config{\n\t\t\tRegion: &config.AwsRegion,\n\t\t\tCredentials: creds,\n\t\t})\n\t\tuploader := s3manager.NewUploader(&s3manager.UploadOptions{S3: s3Client})\n\t\treporter := reporter.NewS3Reporter(logger, config.AwsBucketName, uploader)\n\t\treporters = append(reporters, &reporter)\n\t}\n\n\tRegisterFailHandler(Fail)\n\tRunSpecsWithDefaultAndCustomReporters(t, \"Benchmark BBS Suite\", reporters)\n}\n\ntype expectedLRPCounts struct {\n\tDesiredLRPCount int\n\n\tActualLRPCounts map[string]int\n}\n\nfunc initializeActiveDB() *sql.DB {\n\tif activeDB != nil {\n\t\treturn nil\n\t}\n\n\tif config.DatabaseConnectionString == \"\" {\n\t\tlogger.Fatal(\"no-sql-configuration\", errors.New(\"no-sql-configuration\"))\n\t}\n\n\tif config.DatabaseDriver == \"postgres\" && !strings.Contains(config.DatabaseConnectionString, \"sslmode\") {\n\t\tconfig.DatabaseConnectionString = fmt.Sprintf(\"%s?sslmode=disable\", config.DatabaseConnectionString)\n\t}\n\n\tsqlConn, err := sql.Open(config.DatabaseDriver, config.DatabaseConnectionString)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed-to-open-sql\", err)\n\t}\n\tsqlConn.SetMaxOpenConns(1)\n\tsqlConn.SetMaxIdleConns(1)\n\n\terr = sqlConn.Ping()\n\tExpect(err).NotTo(HaveOccurred())\n\n\twrappedDB := helpers.NewMonitoredDB(sqlConn, helpers.NewQueryMonitor())\n\tsqlDB = initializeSQLDB(logger, wrappedDB)\n\tactiveDB = sqlDB\n\treturn sqlConn\n}\n\nvar _ = BeforeSuite(func() {\n\tbbsClient = initializeBBSClient(logger, time.Duration(config.BBSClientHTTPTimeout))\n\n\tconn := initializeActiveDB()\n\tinitializeLocketClient()\n\n\tresp, err := locketClient.FetchAll(context.Background(), &locketmodels.FetchAllRequest{TypeCode: locketmodels.PRESENCE})\n\tExpect(err).NotTo(HaveOccurred())\n\n\tcells := make(map[string]struct{})\n\tfor i := 0; i < config.NumReps; i++ {\n\t\tcells[fmt.Sprintf(\"cell-%d\", i)] = struct{}{}\n\t}\n\n\tif config.ReseedDatabase || config.NumReps != len(resp.Resources) {\n\t\tvar err error\n\n\t\tcleanupSQLDB(conn)\n\t\tresetLocketDB(locketClient, cells, resp)\n\n\t\tif config.DesiredLRPs > 0 {\n\t\t\tdesiredLRPGenerator := generator.NewDesiredLRPGenerator(\n\t\t\t\tconfig.ErrorTolerance,\n\t\t\t\tconfig.MetricPrefix,\n\t\t\t\tconfig.NumPopulateWorkers,\n\t\t\t\tbbsClient,\n\t\t\t\tdataDogClient,\n\t\t\t)\n\n\t\t\texpectedLRPCount, expectedActualLRPCounts, err = desiredLRPGenerator.Generate(logger, config.NumReps, config.DesiredLRPs)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t}\n\t} else {\n\t\tresetUnclaimedActualLRPs(conn, cells)\n\n\t\tquery := `\n\t\t\tSELECT\n\t\t\t\tCOUNT(*)\n\t\t\tFROM desired_lrps\n\t\t`\n\t\tres := conn.QueryRow(query)\n\t\terr := res.Scan(&expectedLRPCount)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\texpectedActualLRPCounts = make(map[string]int)\n\t\tquery = `\n\t\t\tSELECT\n\t\t\t\tcell_id, COUNT(*)\n\t\t\tFROM actual_lrps\n\t\t\tGROUP BY cell_id\n\t\t`\n\t\trows, err := conn.Query(query)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tdefer rows.Close()\n\t\tfor rows.Next() {\n\t\t\tvar cellId string\n\t\t\tvar count int\n\t\t\terr = rows.Scan(&cellId, &count)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\texpectedActualLRPCounts[cellId] = count\n\t\t}\n\t}\n\n\tif float64(expectedLRPCount) < float64(config.DesiredLRPs)*config.ErrorTolerance {\n\t\tFail(fmt.Sprintf(\"Error rate of %.3f for actuals exceeds tolerance of %.3f\", float64(expectedLRPCount)\/float64(config.DesiredLRPs), config.ErrorTolerance))\n\t}\n})\n\nfunc initializeSQLDB(logger lager.Logger, sqlConn helpers.DB) *sqldb.SQLDB {\n\tkey, keys, err := config.EncryptionConfig.Parse()\n\tif err != nil {\n\t\tlogger.Fatal(\"cannot-setup-encryption\", err)\n\t}\n\tkeyManager, err := encryption.NewKeyManager(key, keys)\n\tif err != nil {\n\t\tlogger.Fatal(\"cannot-setup-encryption\", err)\n\t}\n\tcryptor := encryption.NewCryptor(keyManager, rand.Reader)\n\n\treturn sqldb.NewSQLDB(\n\t\tsqlConn,\n\t\t1000,\n\t\t1000,\n\t\tformat.ENCODED_PROTO,\n\t\tcryptor,\n\t\tguidprovider.DefaultGuidProvider,\n\t\tclock.NewClock(),\n\t\tconfig.DatabaseDriver,\n\t\t&fakes.FakeIngressClient{},\n\t)\n}\n\nfunc initializeLocketClient() {\n\tvar err error\n\tif config.SkipCertVerify {\n\t\tlocketClient, err = locket.NewClientSkipCertVerify(logger, config.ClientLocketConfig)\n\t} else {\n\t\tlocketClient, err = locket.NewClient(logger, config.ClientLocketConfig)\n\t}\n\tExpect(err).NotTo(HaveOccurred())\n}\n\nfunc initializeBBSClient(logger lager.Logger, bbsClientHTTPTimeout time.Duration) bbs.InternalClient {\n\tbbsURL, err := url.Parse(config.BBSAddress)\n\tif err != nil {\n\t\tlogger.Fatal(\"Invalid BBS URL\", err)\n\t}\n\n\tif bbsURL.Scheme != \"https\" {\n\t\treturn bbs.NewClient(config.BBSAddress)\n\t}\n\n\tcfhttp.Initialize(bbsClientHTTPTimeout)\n\tvar bbsClient bbs.InternalClient\n\tif config.SkipCertVerify {\n\t\tbbsClient, err = bbs.NewSecureSkipVerifyClient(\n\t\t\tconfig.BBSAddress,\n\t\t\tconfig.BBSClientCert,\n\t\t\tconfig.BBSClientKey,\n\t\t\t1,\n\t\t\t25000,\n\t\t)\n\t} else {\n\t\tbbsClient, err = bbs.NewSecureClient(\n\t\t\tconfig.BBSAddress,\n\t\t\tconfig.BBSCACert,\n\t\t\tconfig.BBSClientCert,\n\t\t\tconfig.BBSClientKey,\n\t\t\t1,\n\t\t\t25000,\n\t\t)\n\t}\n\tif err != nil {\n\t\tlogger.Fatal(\"Failed to configure secure BBS client\", err)\n\t}\n\treturn bbsClient\n}\n\nfunc cleanupSQLDB(conn *sql.DB) {\n\t_, err := conn.Exec(\"TRUNCATE actual_lrps\")\n\tExpect(err).NotTo(HaveOccurred())\n\t_, err = conn.Exec(\"TRUNCATE desired_lrps\")\n\tExpect(err).NotTo(HaveOccurred())\n}\n\nfunc resetLocketDB(locketClient locketmodels.LocketClient, cells map[string]struct{}, resp *locketmodels.FetchAllResponse) {\n\tfor _, resource := range resp.Resources {\n\t\tif _, ok := cells[resource.Owner]; !ok {\n\t\t\t_, err := locketClient.Release(context.Background(), &locketmodels.ReleaseRequest{Resource: resource})\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t}\n\t}\n\n\tfor cellId, _ := range cells {\n\t\t_, err := locketClient.Lock(context.Background(), &locketmodels.LockRequest{Resource: lockResource(cellId), TtlInSeconds: longTermTtl})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t}\n}\n\nfunc lockResource(cellID string) *locketmodels.Resource {\n\tresources := executor.ExecutorResources{\n\t\tMemoryMB: 1000,\n\t\tDiskMB: 2000,\n\t\tContainers: 300,\n\t}\n\tcellCapacity := models.NewCellCapacity(int32(resources.MemoryMB), int32(resources.DiskMB), int32(resources.Containers))\n\tcellPresence := models.NewCellPresence(cellID, cellID+\".address\", cellID+\".url\",\n\t\t\"z1\", cellCapacity, []string{\"providers\"},\n\t\t[]string{\"cflinuxfs9\"}, []string{}, []string{})\n\n\tpayload, err := json.Marshal(cellPresence)\n\tExpect(err).NotTo(HaveOccurred())\n\n\treturn &locketmodels.Resource{\n\t\tKey: cellID,\n\t\tOwner: cellID,\n\t\tValue: string(payload),\n\t\tType: locketmodels.PresenceType,\n\t}\n}\n\nfunc resetUnclaimedActualLRPs(conn *sql.DB, cells map[string]struct{}) {\n\texistingCellIds := make(map[string]struct{})\n\n\tquery := `\n\t\t\tSELECT\n\t\t\t\tcell_id\n\t\t\tFROM actual_lrps\n\t\t\tGROUP BY cell_id\n\t\t`\n\trows, err := conn.Query(query)\n\tExpect(err).NotTo(HaveOccurred())\n\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar cellId string\n\t\terr = rows.Scan(&cellId)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\texistingCellIds[cellId] = struct{}{}\n\t}\n\n\tmissingCells := findMissingCells(existingCellIds, cells)\n\n\tquery = `\n\t\t\t\tSELECT\n\t\t\t\t\tprocess_guid, domain\n\t\t\t\tFROM actual_lrps WHERE cell_id = ''\n\t\t\t`\n\n\ttype guidAndDomain struct {\n\t\tProcessGuid string\n\t\tDomain string\n\t}\n\tvar lrps []guidAndDomain\n\n\trows, err = conn.Query(query)\n\tExpect(err).NotTo(HaveOccurred())\n\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar guid, domain string\n\t\terr = rows.Scan(&guid, &domain)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tlrps = append(lrps, guidAndDomain{ProcessGuid: guid, Domain: domain})\n\t}\n\n\tfor i, lrp := range lrps {\n\t\tcellID := missingCells[i%len(missingCells)]\n\t\tactualLRPInstanceKey := &models.ActualLRPInstanceKey{InstanceGuid: lrp.ProcessGuid + \"-i\", CellId: cellID}\n\t\tnetInfo := models.NewActualLRPNetInfo(\"1.2.3.4\", \"2.2.2.2\", models.NewPortMapping(61999, 8080))\n\t\terr := bbsClient.StartActualLRP(logger, &models.ActualLRPKey{Domain: lrp.Domain, ProcessGuid: lrp.ProcessGuid, Index: 0}, actualLRPInstanceKey, &netInfo)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t}\n}\n\nfunc findMissingCells(existing map[string]struct{}, expected map[string]struct{}) []string {\n\tvar missing []string\n\tfor cell, _ := range expected {\n\t\tif _, ok := existing[cell]; !ok {\n\t\t\tmissing = append(missing, cell)\n\t\t}\n\t}\n\n\treturn missing\n}\n<|endoftext|>"} {"text":"<commit_before>package benchmark_bbs_test\n\nimport (\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/cloudfoundry-incubator\/bbs\"\n\tetcddb \"github.com\/cloudfoundry-incubator\/bbs\/db\/etcd\"\n\t\"github.com\/cloudfoundry-incubator\/bbs\/encryption\"\n\t\"github.com\/cloudfoundry-incubator\/benchmark-bbs\/generator\"\n\t\"github.com\/cloudfoundry-incubator\/cf-lager\"\n\t\"github.com\/coreos\/go-etcd\/etcd\"\n\t\"github.com\/pivotal-golang\/lager\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"testing\"\n)\n\nvar bbsAddress string\nvar bbsClientCert string\nvar bbsClientKey string\nvar etcdFlags *ETCDFlags\nvar desiredLRPs int\nvar actualLRPs int\nvar encryptionFlags *encryption.EncryptionFlags\n\nvar etcdClient *etcd.Client\nvar bbsClient bbs.Client\nvar logger lager.Logger\n\nfunc init() {\n\tflag.StringVar(&bbsAddress, \"bbsAddress\", \"\", \"Address of the BBS Server\")\n\tflag.StringVar(&bbsClientCert, \"bbsClientCert\", \"\", \"bbs client ssl certificate\")\n\tflag.StringVar(&bbsClientKey, \"bbsClientKey\", \"\", \"bbs client ssl key\")\n\n\tflag.IntVar(&desiredLRPs, \"desiredLRPs\", 0, \"number of DesiredLRPs to create\")\n\tflag.IntVar(&actualLRPs, \"actualLRPs\", 0, \"number of ActualLRPs to create\")\n\n\tcf_lager.AddFlags(flag.CommandLine)\n\tetcdFlags = AddETCDFlags(flag.CommandLine)\n\tencryptionFlags = encryption.AddEncryptionFlags(flag.CommandLine)\n}\n\nfunc TestBenchmarkBbs(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"BenchmarkBbs Suite\")\n}\n\nvar _ = BeforeSuite(func() {\n\tetcdOptions, err := etcdFlags.Validate()\n\tExpect(err).NotTo(HaveOccurred())\n\n\tlogger = lager.NewLogger(\"test\")\n\tlogger.RegisterSink(lager.NewWriterSink(GinkgoWriter, lager.DEBUG))\n\n\tetcdClient = initializeEtcdClient(logger, etcdOptions)\n\tbbsClient = initializeBBSClient(logger)\n\n\tcleanUpDesiredLRPs()\n\tcleanUpActualLRPs()\n\n\t_, err = bbsClient.Domains()\n\tExpect(err).NotTo(HaveOccurred())\n\n\tif desiredLRPs > 0 {\n\t\tdesiredLRPGenerator := generator.NewDesiredLRPGenerator(logger, bbsClient, *etcdClient)\n\t\terr := desiredLRPGenerator.Generate(desiredLRPs)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t}\n\n\tif actualLRPs > 0 {\n\t\tactualLRPGenerator := generator.NewActualLRPGenerator(logger, bbsClient, *etcdClient)\n\t\terr := actualLRPGenerator.Generate(actualLRPs)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t}\n})\n\nvar _ = AfterSuite(func() {\n\tcleanUpDesiredLRPs()\n\tcleanUpActualLRPs()\n})\n\ntype ETCDFlags struct {\n\tetcdCertFile string\n\tetcdKeyFile string\n\tetcdCaFile string\n\tclusterUrls string\n\tclientSessionCacheSize int\n\tmaxIdleConnsPerHost int\n}\n\nfunc AddETCDFlags(flagSet *flag.FlagSet) *ETCDFlags {\n\tflags := &ETCDFlags{}\n\n\tflagSet.StringVar(\n\t\t&flags.clusterUrls,\n\t\t\"etcdCluster\",\n\t\t\"http:\/\/127.0.0.1:4001\",\n\t\t\"comma-separated list of etcd URLs (scheme:\/\/ip:port)\",\n\t)\n\tflagSet.StringVar(\n\t\t&flags.etcdCertFile,\n\t\t\"etcdCertFile\",\n\t\t\"\",\n\t\t\"Location of the client certificate for mutual auth\",\n\t)\n\tflagSet.StringVar(\n\t\t&flags.etcdKeyFile,\n\t\t\"etcdKeyFile\",\n\t\t\"\",\n\t\t\"Location of the client key for mutual auth\",\n\t)\n\tflagSet.StringVar(\n\t\t&flags.etcdCaFile,\n\t\t\"etcdCaFile\",\n\t\t\"\",\n\t\t\"Location of the CA certificate for mutual auth\",\n\t)\n\n\tflagSet.IntVar(\n\t\t&flags.clientSessionCacheSize,\n\t\t\"etcdSessionCacheSize\",\n\t\t0,\n\t\t\"Capacity of the ClientSessionCache option on the TLS configuration. If zero, golang's default will be used\",\n\t)\n\tflagSet.IntVar(\n\t\t&flags.maxIdleConnsPerHost,\n\t\t\"etcdMaxIdleConnsPerHost\",\n\t\t0,\n\t\t\"Controls the maximum number of idle (keep-alive) connctions per host. If zero, golang's default will be used\",\n\t)\n\treturn flags\n}\n\nfunc (flags *ETCDFlags) Validate() (*etcddb.ETCDOptions, error) {\n\tscheme := \"\"\n\tclusterUrls := strings.Split(flags.clusterUrls, \",\")\n\tfor i, uString := range clusterUrls {\n\t\tuString = strings.TrimSpace(uString)\n\t\tclusterUrls[i] = uString\n\t\tu, err := url.Parse(uString)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Invalid cluster URL: '%s', error: [%s]\", uString, err.Error())\n\t\t}\n\t\tif scheme == \"\" {\n\t\t\tif u.Scheme != \"http\" && u.Scheme != \"https\" {\n\t\t\t\treturn nil, errors.New(\"Invalid scheme: \" + uString)\n\t\t\t}\n\t\t\tscheme = u.Scheme\n\t\t} else if scheme != u.Scheme {\n\t\t\treturn nil, fmt.Errorf(\"Multiple url schemes provided: %s\", flags.clusterUrls)\n\t\t}\n\t}\n\n\tisSSL := false\n\tif scheme == \"https\" {\n\t\tisSSL = true\n\t\tif flags.etcdCertFile == \"\" {\n\t\t\treturn nil, errors.New(\"Cert file must be provided for https connections\")\n\t\t}\n\t\tif flags.etcdKeyFile == \"\" {\n\t\t\treturn nil, errors.New(\"Key file must be provided for https connections\")\n\t\t}\n\t}\n\n\treturn &etcddb.ETCDOptions{\n\t\tCertFile: flags.etcdCertFile,\n\t\tKeyFile: flags.etcdKeyFile,\n\t\tCAFile: flags.etcdCaFile,\n\t\tClusterUrls: clusterUrls,\n\t\tIsSSL: isSSL,\n\t\tClientSessionCacheSize: flags.clientSessionCacheSize,\n\t\tMaxIdleConnsPerHost: flags.maxIdleConnsPerHost,\n\t}, nil\n}\n\nfunc initializeEtcdClient(logger lager.Logger, etcdOptions *etcddb.ETCDOptions) *etcd.Client {\n\tvar etcdClient *etcd.Client\n\tvar tr *http.Transport\n\n\tif etcdOptions.IsSSL {\n\t\tif etcdOptions.CertFile == \"\" || etcdOptions.KeyFile == \"\" {\n\t\t\tlogger.Fatal(\"failed-to-construct-etcd-tls-client\", errors.New(\"Require both cert and key path\"))\n\t\t}\n\n\t\tvar err error\n\t\tetcdClient, err = etcd.NewTLSClient(etcdOptions.ClusterUrls, etcdOptions.CertFile, etcdOptions.KeyFile, etcdOptions.CAFile)\n\t\tif err != nil {\n\t\t\tlogger.Fatal(\"failed-to-construct-etcd-tls-client\", err)\n\t\t}\n\n\t\ttlsCert, err := tls.LoadX509KeyPair(etcdOptions.CertFile, etcdOptions.KeyFile)\n\t\tif err != nil {\n\t\t\tlogger.Fatal(\"failed-to-construct-etcd-tls-client\", err)\n\t\t}\n\n\t\ttlsConfig := &tls.Config{\n\t\t\tCertificates: []tls.Certificate{tlsCert},\n\t\t\tInsecureSkipVerify: true,\n\t\t\tClientSessionCache: tls.NewLRUClientSessionCache(etcdOptions.ClientSessionCacheSize),\n\t\t}\n\t\ttr = &http.Transport{\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t\tDial: etcdClient.DefaultDial,\n\t\t\tMaxIdleConnsPerHost: etcdOptions.MaxIdleConnsPerHost,\n\t\t}\n\t\tetcdClient.SetTransport(tr)\n\t} else {\n\t\tetcdClient = etcd.NewClient(etcdOptions.ClusterUrls)\n\t}\n\tetcdClient.SetConsistency(etcd.STRONG_CONSISTENCY)\n\n\treturn etcdClient\n}\n\nfunc initializeBBSClient(logger lager.Logger) bbs.Client {\n\tif bbsAddress == \"\" {\n\t\tlog.Fatal(\"bbsAddress is required\")\n\t}\n\n\tbbsURL, err := url.Parse(bbsAddress)\n\tif err != nil {\n\t\tlogger.Fatal(\"Invalid BBS URL\", err)\n\t}\n\n\tif bbsURL.Scheme != \"https\" {\n\t\treturn bbs.NewClient(bbsAddress)\n\t}\n\n\tbbsClient, err := bbs.NewSecureSkipVerifyClient(bbsAddress, bbsClientCert, bbsClientKey, 1, 1)\n\tif err != nil {\n\t\tlogger.Fatal(\"Failed to configure secure BBS client\", err)\n\t}\n\treturn bbsClient\n}\n\nfunc cleanUpDesiredLRPs() {\n\t_, err := etcdClient.Delete(\"\/v1\/desired_lrp\/\", true)\n\tExpect(err).ToNot(HaveOccurred())\n}\n\nfunc cleanUpActualLRPs() {\n\t_, err := etcdClient.Delete(\"\/v1\/actual\/\", true)\n\tExpect(err).ToNot(HaveOccurred())\n}\n<commit_msg>clean up methods -> purge method<commit_after>package benchmark_bbs_test\n\nimport (\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/cloudfoundry-incubator\/bbs\"\n\tetcddb \"github.com\/cloudfoundry-incubator\/bbs\/db\/etcd\"\n\t\"github.com\/cloudfoundry-incubator\/bbs\/encryption\"\n\t\"github.com\/cloudfoundry-incubator\/benchmark-bbs\/generator\"\n\t\"github.com\/cloudfoundry-incubator\/cf-lager\"\n\t\"github.com\/coreos\/go-etcd\/etcd\"\n\t\"github.com\/pivotal-golang\/lager\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"testing\"\n)\n\nvar bbsAddress string\nvar bbsClientCert string\nvar bbsClientKey string\nvar etcdFlags *ETCDFlags\nvar desiredLRPs int\nvar actualLRPs int\nvar encryptionFlags *encryption.EncryptionFlags\n\nvar etcdClient *etcd.Client\nvar bbsClient bbs.Client\nvar logger lager.Logger\n\nfunc init() {\n\tflag.StringVar(&bbsAddress, \"bbsAddress\", \"\", \"Address of the BBS Server\")\n\tflag.StringVar(&bbsClientCert, \"bbsClientCert\", \"\", \"bbs client ssl certificate\")\n\tflag.StringVar(&bbsClientKey, \"bbsClientKey\", \"\", \"bbs client ssl key\")\n\n\tflag.IntVar(&desiredLRPs, \"desiredLRPs\", 0, \"number of DesiredLRPs to create\")\n\tflag.IntVar(&actualLRPs, \"actualLRPs\", 0, \"number of ActualLRPs to create\")\n\n\tcf_lager.AddFlags(flag.CommandLine)\n\tetcdFlags = AddETCDFlags(flag.CommandLine)\n\tencryptionFlags = encryption.AddEncryptionFlags(flag.CommandLine)\n}\n\nfunc TestBenchmarkBbs(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"BenchmarkBbs Suite\")\n}\n\nvar _ = BeforeSuite(func() {\n\tetcdOptions, err := etcdFlags.Validate()\n\tExpect(err).NotTo(HaveOccurred())\n\n\tlogger = lager.NewLogger(\"test\")\n\tlogger.RegisterSink(lager.NewWriterSink(GinkgoWriter, lager.DEBUG))\n\n\tetcdClient = initializeEtcdClient(logger, etcdOptions)\n\tbbsClient = initializeBBSClient(logger)\n\n\tpurge(\"\/v1\/desired_lrp\")\n\tpurge(\"\/v1\/actual\")\n\n\t_, err = bbsClient.Domains()\n\tExpect(err).NotTo(HaveOccurred())\n\n\tif desiredLRPs > 0 {\n\t\tdesiredLRPGenerator := generator.NewDesiredLRPGenerator(logger, bbsClient, *etcdClient)\n\t\terr := desiredLRPGenerator.Generate(desiredLRPs)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t}\n\n\tif actualLRPs > 0 {\n\t\tactualLRPGenerator := generator.NewActualLRPGenerator(logger, bbsClient, *etcdClient)\n\t\terr := actualLRPGenerator.Generate(actualLRPs)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t}\n})\n\nvar _ = AfterSuite(func() {\n\tpurge(\"\/v1\/desired_lrp\")\n\tpurge(\"\/v1\/actual\")\n})\n\ntype ETCDFlags struct {\n\tetcdCertFile string\n\tetcdKeyFile string\n\tetcdCaFile string\n\tclusterUrls string\n\tclientSessionCacheSize int\n\tmaxIdleConnsPerHost int\n}\n\nfunc AddETCDFlags(flagSet *flag.FlagSet) *ETCDFlags {\n\tflags := &ETCDFlags{}\n\n\tflagSet.StringVar(\n\t\t&flags.clusterUrls,\n\t\t\"etcdCluster\",\n\t\t\"http:\/\/127.0.0.1:4001\",\n\t\t\"comma-separated list of etcd URLs (scheme:\/\/ip:port)\",\n\t)\n\tflagSet.StringVar(\n\t\t&flags.etcdCertFile,\n\t\t\"etcdCertFile\",\n\t\t\"\",\n\t\t\"Location of the client certificate for mutual auth\",\n\t)\n\tflagSet.StringVar(\n\t\t&flags.etcdKeyFile,\n\t\t\"etcdKeyFile\",\n\t\t\"\",\n\t\t\"Location of the client key for mutual auth\",\n\t)\n\tflagSet.StringVar(\n\t\t&flags.etcdCaFile,\n\t\t\"etcdCaFile\",\n\t\t\"\",\n\t\t\"Location of the CA certificate for mutual auth\",\n\t)\n\n\tflagSet.IntVar(\n\t\t&flags.clientSessionCacheSize,\n\t\t\"etcdSessionCacheSize\",\n\t\t0,\n\t\t\"Capacity of the ClientSessionCache option on the TLS configuration. If zero, golang's default will be used\",\n\t)\n\tflagSet.IntVar(\n\t\t&flags.maxIdleConnsPerHost,\n\t\t\"etcdMaxIdleConnsPerHost\",\n\t\t0,\n\t\t\"Controls the maximum number of idle (keep-alive) connctions per host. If zero, golang's default will be used\",\n\t)\n\treturn flags\n}\n\nfunc (flags *ETCDFlags) Validate() (*etcddb.ETCDOptions, error) {\n\tscheme := \"\"\n\tclusterUrls := strings.Split(flags.clusterUrls, \",\")\n\tfor i, uString := range clusterUrls {\n\t\tuString = strings.TrimSpace(uString)\n\t\tclusterUrls[i] = uString\n\t\tu, err := url.Parse(uString)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Invalid cluster URL: '%s', error: [%s]\", uString, err.Error())\n\t\t}\n\t\tif scheme == \"\" {\n\t\t\tif u.Scheme != \"http\" && u.Scheme != \"https\" {\n\t\t\t\treturn nil, errors.New(\"Invalid scheme: \" + uString)\n\t\t\t}\n\t\t\tscheme = u.Scheme\n\t\t} else if scheme != u.Scheme {\n\t\t\treturn nil, fmt.Errorf(\"Multiple url schemes provided: %s\", flags.clusterUrls)\n\t\t}\n\t}\n\n\tisSSL := false\n\tif scheme == \"https\" {\n\t\tisSSL = true\n\t\tif flags.etcdCertFile == \"\" {\n\t\t\treturn nil, errors.New(\"Cert file must be provided for https connections\")\n\t\t}\n\t\tif flags.etcdKeyFile == \"\" {\n\t\t\treturn nil, errors.New(\"Key file must be provided for https connections\")\n\t\t}\n\t}\n\n\treturn &etcddb.ETCDOptions{\n\t\tCertFile: flags.etcdCertFile,\n\t\tKeyFile: flags.etcdKeyFile,\n\t\tCAFile: flags.etcdCaFile,\n\t\tClusterUrls: clusterUrls,\n\t\tIsSSL: isSSL,\n\t\tClientSessionCacheSize: flags.clientSessionCacheSize,\n\t\tMaxIdleConnsPerHost: flags.maxIdleConnsPerHost,\n\t}, nil\n}\n\nfunc initializeEtcdClient(logger lager.Logger, etcdOptions *etcddb.ETCDOptions) *etcd.Client {\n\tvar etcdClient *etcd.Client\n\tvar tr *http.Transport\n\n\tif etcdOptions.IsSSL {\n\t\tif etcdOptions.CertFile == \"\" || etcdOptions.KeyFile == \"\" {\n\t\t\tlogger.Fatal(\"failed-to-construct-etcd-tls-client\", errors.New(\"Require both cert and key path\"))\n\t\t}\n\n\t\tvar err error\n\t\tetcdClient, err = etcd.NewTLSClient(etcdOptions.ClusterUrls, etcdOptions.CertFile, etcdOptions.KeyFile, etcdOptions.CAFile)\n\t\tif err != nil {\n\t\t\tlogger.Fatal(\"failed-to-construct-etcd-tls-client\", err)\n\t\t}\n\n\t\ttlsCert, err := tls.LoadX509KeyPair(etcdOptions.CertFile, etcdOptions.KeyFile)\n\t\tif err != nil {\n\t\t\tlogger.Fatal(\"failed-to-construct-etcd-tls-client\", err)\n\t\t}\n\n\t\ttlsConfig := &tls.Config{\n\t\t\tCertificates: []tls.Certificate{tlsCert},\n\t\t\tInsecureSkipVerify: true,\n\t\t\tClientSessionCache: tls.NewLRUClientSessionCache(etcdOptions.ClientSessionCacheSize),\n\t\t}\n\t\ttr = &http.Transport{\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t\tDial: etcdClient.DefaultDial,\n\t\t\tMaxIdleConnsPerHost: etcdOptions.MaxIdleConnsPerHost,\n\t\t}\n\t\tetcdClient.SetTransport(tr)\n\t} else {\n\t\tetcdClient = etcd.NewClient(etcdOptions.ClusterUrls)\n\t}\n\tetcdClient.SetConsistency(etcd.STRONG_CONSISTENCY)\n\n\treturn etcdClient\n}\n\nfunc initializeBBSClient(logger lager.Logger) bbs.Client {\n\tif bbsAddress == \"\" {\n\t\tlog.Fatal(\"bbsAddress is required\")\n\t}\n\n\tbbsURL, err := url.Parse(bbsAddress)\n\tif err != nil {\n\t\tlogger.Fatal(\"Invalid BBS URL\", err)\n\t}\n\n\tif bbsURL.Scheme != \"https\" {\n\t\treturn bbs.NewClient(bbsAddress)\n\t}\n\n\tbbsClient, err := bbs.NewSecureSkipVerifyClient(bbsAddress, bbsClientCert, bbsClientKey, 1, 1)\n\tif err != nil {\n\t\tlogger.Fatal(\"Failed to configure secure BBS client\", err)\n\t}\n\treturn bbsClient\n}\n\nfunc purge(key string) {\n\t_, err := etcdClient.Delete(key, true)\n\tif err != nil {\n\t\tmatches, matchErr := regexp.Match(\".*Key not found.*\", []byte(err.Error()))\n\t\tif matchErr != nil {\n\t\t\tFail(matchErr.Error())\n\t\t}\n\t\tif !matches {\n\t\t\tFail(err.Error())\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2018 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\npackage cmdimpl\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/contiv\/vpp\/plugins\/contiv\/model\/node\"\n\t\"github.com\/contiv\/vpp\/plugins\/crd\/cache\/telemetrymodel\"\n\t\"github.com\/contiv\/vpp\/plugins\/ksr\/model\/pod\"\n\t\"github.com\/contiv\/vpp\/plugins\/netctl\/http\"\n\t\"github.com\/ligato\/cn-infra\/db\/keyval\/etcd\"\n\t\"github.com\/ligato\/vpp-agent\/plugins\/vpp\/model\/interfaces\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n)\n\ntype nodeData struct {\n\tipam *telemetrymodel.IPamEntry\n\tifcs telemetrymodel.NodeInterfaces\n}\ntype nodeDataCache map[string]*nodeData\n\ntype podGetter struct {\n\tndCache nodeDataCache\n\tdb *etcd.BytesConnectionEtcd\n\tpods []*pod.Pod\n}\n\n\/\/ PrintAllPods will print out all of the non local pods in a network in\n\/\/ a table format.\nfunc PrintAllPods() {\n\tw := getWriter(\"HOST-NAME\")\n\tpg := newPodGetter()\n\tpg.printAllPods(w)\n\tpg.db.Close()\n\tw.Flush()\n}\n\n\/\/PrintPodsPerNode will print out all of the non-local pods for a certain\n\/\/ pods along with their tap interface ip address\nfunc PrintPodsPerNode(input string) {\n\tw := getWriter(\"\")\n\tpg := newPodGetter()\n\tpg.printPodsPerNode(w, input, \"\")\n\tpg.db.Close()\n\tw.Flush()\n}\n\nfunc newPodGetter() *podGetter {\n\tpg := &podGetter{\n\t\tndCache: make(nodeDataCache, 0),\n\t\tdb: getEtcdBroker(),\n\t}\n\n\tpg.pods = make([]*pod.Pod, 0)\n\titr, err := pg.db.ListValues(\"\/vnf-agent\/contiv-ksr\/k8s\/pod\/\")\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to get pods from etcd, error %s\", err)\n\t\tos.Exit(2)\n\t}\n\n\tfor {\n\t\tkv, stop := itr.GetNext()\n\t\tif stop {\n\t\t\tbreak\n\t\t}\n\t\tbuf := kv.GetValue()\n\t\tpodInfo := &pod.Pod{}\n\t\tif err = json.Unmarshal(buf, podInfo); err != nil {\n\t\t\tfmt.Printf(\"Failed to unmarshall pod, error %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tpg.pods = append(pg.pods, podInfo)\n\t}\n\n\treturn pg\n}\n\nfunc (pg *podGetter) printAllPods(w *tabwriter.Writer) {\n\n\titr, err := pg.db.ListValues(\"\/vnf-agent\/contiv-ksr\/allocatedIDs\/\")\n\tif err != nil {\n\t\tfmt.Printf(\"Error getting values\")\n\t\treturn\n\t}\n\n\tfor {\n\t\tkv, stop := itr.GetNext()\n\t\tif stop {\n\t\t\tfmt.Println()\n\t\t\tbreak\n\t\t}\n\t\tbuf := kv.GetValue()\n\t\tnodeInfo := &node.NodeInfo{}\n\t\terr = json.Unmarshal(buf, nodeInfo)\n\t\tnodeID := fmt.Sprintf(\"%s (%s):\", nodeInfo.Name, nodeInfo.ManagementIpAddress)\n\t\tfmt.Fprintf(w, \"%s\\t\\t\\t\\t\\t\\t\\t\\n\", nodeID)\n\t\tfmt.Fprintf(w, \"%s\\t\\t\\t\\t\\t\\t\\t\\n\", strings.Repeat(\"-\", len(nodeID)))\n\n\t\tpg.printPodsPerNode(w, nodeInfo.ManagementIpAddress, nodeInfo.Name)\n\t\tfmt.Fprintln(w, \"\\t\\t\\t\\t\\t\\t\\t\\t\")\n\t}\n}\n\nfunc (pg *podGetter) printPodsPerNode(w *tabwriter.Writer, nodeNameOrIP string, nodeName string) {\n\thostIP := resolveNodeOrIP(nodeNameOrIP)\n\n\tfmt.Fprintf(w, \"POD-NAME\\tNAMESPACE\\tPOD-IP\\tVPP-IP\\tIF-IDX\\tIF-NAME\\tINTERNAL-IF-NAME\\n\")\n\n\tfor _, podInfo := range pg.pods {\n\t\tif podInfo.HostIpAddress != hostIP {\n\t\t\tcontinue\n\t\t} else if podInfo.IpAddress == hostIP {\n\t\t\tfmt.Fprintf(w, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\",\n\t\t\t\tpodInfo.Name,\n\t\t\t\tpodInfo.Namespace,\n\t\t\t\tpodInfo.IpAddress,\n\t\t\t\t\"\", \"\", \"\", \"\")\n\t\t} else {\n\t\t\tipAddress, ifIndex, intName, name := pg.getTapInterfaceForPod(podInfo)\n\t\t\tfmt.Fprintf(w, \"%s\\t%s\\t%s\\t%s\\t%d\\t%s\\t%s\\n\",\n\t\t\t\tpodInfo.Name,\n\t\t\t\tpodInfo.Namespace,\n\t\t\t\tpodInfo.IpAddress,\n\t\t\t\tstrings.Split(ipAddress, \"\/\")[0],\n\t\t\t\tifIndex,\n\t\t\t\tintName,\n\t\t\t\tname)\n\t\t}\n\t}\n}\n\nfunc (pg *podGetter) getTapInterfaceForPod(podInfo *pod.Pod) (string, uint32, string, string) {\n\t\/\/ when we see a pod from a given node for the first time, retrieve its\n\t\/\/ IPAM and interface info\n\tif pg.ndCache[podInfo.HostIpAddress] == nil {\n\n\t\t\/\/ Get ipam data for the node where the pod is hosted\n\t\tb, err := http.GetNodeInfo(podInfo.HostIpAddress, getIpamDataCmd)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Host '%s', Pod '%s' - failed to get ipam, err %s\\n\",\n\t\t\t\tpodInfo.HostIpAddress, podInfo.Name, err)\n\t\t\treturn \"N\/A\", 0, \"N\/A\", \"N\/A\"\n\t\t}\n\n\t\tipam := &telemetrymodel.IPamEntry{}\n\t\tif err := json.Unmarshal(b, ipam); err != nil {\n\t\t\tfmt.Printf(\"Host '%s', Pod '%s' - failed to decode ipam, err %s\\n\",\n\t\t\t\tpodInfo.HostIpAddress, podInfo.Name, err)\n\t\t\treturn \"N\/A\", 0, \"N\/A\", \"N\/A\"\n\t\t}\n\n\t\t\/\/ Get interfaces data for the node where the pod is hosted\n\t\tb, err = http.GetNodeInfo(podInfo.HostIpAddress, getInterfaceDataCmd)\n\t\tintfs := make(telemetrymodel.NodeInterfaces)\n\t\tif err := json.Unmarshal(b, &intfs); err != nil {\n\t\t\tfmt.Printf(\"Host '%s', Pod '%s' - failed to get pod's interface, err %s\\n\",\n\t\t\t\tpodInfo.HostIpAddress, podInfo.Name, err)\n\t\t\treturn \"N\/A\", 0, \"N\/A\", \"N\/A\"\n\t\t}\n\n\t\tpg.ndCache[podInfo.HostIpAddress] = &nodeData{\n\t\t\tipam: ipam,\n\t\t\tifcs: intfs,\n\t\t}\n\t}\n\n\t\/\/ Determine the tap interface on VPP that connects the pod to the VPP\n\tpodPfxLen := pg.ndCache[podInfo.HostIpAddress].ipam.Config.VppHostNetworkPrefixLen\n\tpodMask := maskLength2Mask(int(podPfxLen))\n\n\tpodIPSubnet, podIPMask, err := getIPAddressAndMask(pg.ndCache[podInfo.HostIpAddress].ipam.Config.PodSubnetCIDR)\n\tif err != nil {\n\t\tfmt.Printf(\"Host '%s', Pod '%s' - invalid PodSubnetCIDR address %s, err %s\\n\",\n\t\t\tpodInfo.HostIpAddress, podInfo.Name, pg.ndCache[podInfo.HostIpAddress].ipam.Config.PodSubnetCIDR, err)\n\t\t\/\/ Do not return - we can still continue if this error happens\n\t}\n\n\tif podMask != podIPMask {\n\t\tfmt.Printf(\"Host '%s', Pod '%s' - vppHostNetworkPrefixLen mismatch: \"+\n\t\t\t\"PodSubnetCIDR '%s', podNetworkPrefixLen '%d'\\n\",\n\t\t\tpodInfo.HostIpAddress, podInfo.Name,\n\t\t\tpg.ndCache[podInfo.HostIpAddress].ipam.Config.PodSubnetCIDR,\n\t\t\tpg.ndCache[podInfo.HostIpAddress].ipam.Config.PodNetworkPrefixLen)\n\t\t\/\/ Do not return - we can still continue if this error happens\n\t}\n\n\tpodIfIPAddress, podIfIPMask, err := getIPAddressAndMask(pg.ndCache[podInfo.HostIpAddress].ipam.Config.PodIfIPCIDR)\n\tif err != nil {\n\t\tfmt.Printf(\"Host '%s', Pod '%s' - invalid PodIfIPCIDR address %s, err %s\\n\",\n\t\t\tpodInfo.HostIpAddress, podInfo.Name, pg.ndCache[podInfo.HostIpAddress].ipam.Config.PodIfIPCIDR, err)\n\t\treturn \"N\/A\", 0, \"N\/A\", \"N\/A\"\n\t}\n\n\tif podMask != podIfIPMask {\n\t\tfmt.Printf(\"Host '%s', Pod '%s' - vppHostNetworkPrefixLen mismatch: \"+\n\t\t\t\"PodIfIPCIDR '%s', podNetworkPrefixLen '%d'\\n\",\n\t\t\tpodInfo.HostIpAddress, podInfo.Name,\n\t\t\tpg.ndCache[podInfo.HostIpAddress].ipam.Config.PodIfIPCIDR,\n\t\t\tpg.ndCache[podInfo.HostIpAddress].ipam.Config.PodNetworkPrefixLen)\n\t\t\/\/ Do not return - we can still continue if this error happens\n\t}\n\n\tpodIfIPPrefix := podIfIPAddress &^ podMask\n\tpodAddr, err := ip2uint32(podInfo.IpAddress)\n\tif err != nil {\n\t\tfmt.Printf(\"Host '%s', Pod '%s' - invalid podInfo.IpAddress %s, err %s\",\n\t\t\tpodInfo.HostIpAddress, podInfo.Name, podInfo.IpAddress, err)\n\t\treturn \"N\/A\", 0, \"N\/A\", \"N\/A\"\n\t}\n\n\tpodAddrSuffix := podAddr & podMask\n\n\tif podAddr&^podMask != podIPSubnet {\n\t\tfmt.Printf(\"Host '%s', Pod '%s' - pod IP address %s not from PodSubnetCIDR %s\\n\",\n\t\t\tpodInfo.HostIpAddress, podInfo.Name, podInfo.IpAddress,\n\t\t\tpg.ndCache[podInfo.HostIpAddress].ipam.Config.PodSubnetCIDR)\n\t\t\/\/ Do not return - we can still continue if this error happens\n\t}\n\n\tfor _, intf := range pg.ndCache[podInfo.HostIpAddress].ifcs {\n\t\tif intf.If.IfType == interfaces.InterfaceType_TAP_INTERFACE {\n\t\t\tfor _, ip := range intf.If.IPAddresses {\n\t\t\t\tifIPAddr, iffIPMask, err := getIPAddressAndMask(ip)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif iffIPMask != 0 {\n\t\t\t\t\t\/\/ TODO: do some error handling\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tifIPAdrPrefix := ifIPAddr &^ podMask\n\t\t\t\tifIPAdrSuffix := ifIPAddr & podMask\n\t\t\t\tif (podIfIPPrefix == ifIPAdrPrefix) && (ifIPAdrSuffix == podAddrSuffix) {\n\t\t\t\t\treturn ip, intf.IfMeta.SwIfIndex, intf.IfMeta.VppInternalName, intf.If.Name\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"N\/A\", 0, \"N\/A\", \"N\/A\"\n}\n\nfunc getWriter(hostName string) *tabwriter.Writer {\n\tw := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)\n\treturn w\n}\n<commit_msg>Fix a bug in netctl where IPAM config.podSubnetCIDR was used instead of podNetwork<commit_after>\/\/ Copyright (c) 2018 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\npackage cmdimpl\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/contiv\/vpp\/plugins\/contiv\/model\/node\"\n\t\"github.com\/contiv\/vpp\/plugins\/crd\/cache\/telemetrymodel\"\n\t\"github.com\/contiv\/vpp\/plugins\/ksr\/model\/pod\"\n\t\"github.com\/contiv\/vpp\/plugins\/netctl\/http\"\n\t\"github.com\/ligato\/cn-infra\/db\/keyval\/etcd\"\n\t\"github.com\/ligato\/vpp-agent\/plugins\/vpp\/model\/interfaces\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n)\n\ntype nodeData struct {\n\tipam *telemetrymodel.IPamEntry\n\tifcs telemetrymodel.NodeInterfaces\n}\ntype nodeDataCache map[string]*nodeData\n\ntype podGetter struct {\n\tndCache nodeDataCache\n\tdb *etcd.BytesConnectionEtcd\n\tpods []*pod.Pod\n}\n\n\/\/ PrintAllPods will print out all of the non local pods in a network in\n\/\/ a table format.\nfunc PrintAllPods() {\n\tw := getWriter(\"HOST-NAME\")\n\tpg := newPodGetter()\n\tpg.printAllPods(w)\n\tpg.db.Close()\n\tw.Flush()\n}\n\n\/\/PrintPodsPerNode will print out all of the non-local pods for a certain\n\/\/ pods along with their tap interface ip address\nfunc PrintPodsPerNode(input string) {\n\tw := getWriter(\"\")\n\tpg := newPodGetter()\n\tpg.printPodsPerNode(w, input, \"\")\n\tpg.db.Close()\n\tw.Flush()\n}\n\nfunc newPodGetter() *podGetter {\n\tpg := &podGetter{\n\t\tndCache: make(nodeDataCache, 0),\n\t\tdb: getEtcdBroker(),\n\t}\n\n\tpg.pods = make([]*pod.Pod, 0)\n\titr, err := pg.db.ListValues(\"\/vnf-agent\/contiv-ksr\/k8s\/pod\/\")\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to get pods from etcd, error %s\", err)\n\t\tos.Exit(2)\n\t}\n\n\tfor {\n\t\tkv, stop := itr.GetNext()\n\t\tif stop {\n\t\t\tbreak\n\t\t}\n\t\tbuf := kv.GetValue()\n\t\tpodInfo := &pod.Pod{}\n\t\tif err = json.Unmarshal(buf, podInfo); err != nil {\n\t\t\tfmt.Printf(\"Failed to unmarshall pod, error %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tpg.pods = append(pg.pods, podInfo)\n\t}\n\n\treturn pg\n}\n\nfunc (pg *podGetter) printAllPods(w *tabwriter.Writer) {\n\n\titr, err := pg.db.ListValues(\"\/vnf-agent\/contiv-ksr\/allocatedIDs\/\")\n\tif err != nil {\n\t\tfmt.Printf(\"Error getting values\")\n\t\treturn\n\t}\n\n\tfor {\n\t\tkv, stop := itr.GetNext()\n\t\tif stop {\n\t\t\tfmt.Println()\n\t\t\tbreak\n\t\t}\n\t\tbuf := kv.GetValue()\n\t\tnodeInfo := &node.NodeInfo{}\n\t\terr = json.Unmarshal(buf, nodeInfo)\n\t\tnodeID := fmt.Sprintf(\"%s (%s):\", nodeInfo.Name, nodeInfo.ManagementIpAddress)\n\t\tfmt.Fprintf(w, \"%s\\t\\t\\t\\t\\t\\t\\t\\n\", nodeID)\n\t\tfmt.Fprintf(w, \"%s\\t\\t\\t\\t\\t\\t\\t\\n\", strings.Repeat(\"-\", len(nodeID)))\n\n\t\tpg.printPodsPerNode(w, nodeInfo.ManagementIpAddress, nodeInfo.Name)\n\t\tfmt.Fprintln(w, \"\\t\\t\\t\\t\\t\\t\\t\\t\")\n\t}\n}\n\nfunc (pg *podGetter) printPodsPerNode(w *tabwriter.Writer, nodeNameOrIP string, nodeName string) {\n\thostIP := resolveNodeOrIP(nodeNameOrIP)\n\n\tfmt.Fprintf(w, \"POD-NAME\\tNAMESPACE\\tPOD-IP\\tVPP-IP\\tIF-IDX\\tIF-NAME\\tINTERNAL-IF-NAME\\n\")\n\n\tfor _, podInfo := range pg.pods {\n\t\tif podInfo.HostIpAddress != hostIP {\n\t\t\tcontinue\n\t\t} else if podInfo.IpAddress == hostIP {\n\t\t\tfmt.Fprintf(w, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\",\n\t\t\t\tpodInfo.Name,\n\t\t\t\tpodInfo.Namespace,\n\t\t\t\tpodInfo.IpAddress,\n\t\t\t\t\"\", \"\", \"\", \"\")\n\t\t} else {\n\t\t\tipAddress, ifIndex, intName, name := pg.getTapInterfaceForPod(podInfo)\n\t\t\tfmt.Fprintf(w, \"%s\\t%s\\t%s\\t%s\\t%d\\t%s\\t%s\\n\",\n\t\t\t\tpodInfo.Name,\n\t\t\t\tpodInfo.Namespace,\n\t\t\t\tpodInfo.IpAddress,\n\t\t\t\tstrings.Split(ipAddress, \"\/\")[0],\n\t\t\t\tifIndex,\n\t\t\t\tintName,\n\t\t\t\tname)\n\t\t}\n\t}\n}\n\nfunc (pg *podGetter) getTapInterfaceForPod(podInfo *pod.Pod) (string, uint32, string, string) {\n\t\/\/ when we see a pod from a given node for the first time, retrieve its\n\t\/\/ IPAM and interface info\n\tif pg.ndCache[podInfo.HostIpAddress] == nil {\n\n\t\t\/\/ Get ipam data for the node where the pod is hosted\n\t\tb, err := http.GetNodeInfo(podInfo.HostIpAddress, getIpamDataCmd)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Host '%s', Pod '%s' - failed to get ipam, err %s\\n\",\n\t\t\t\tpodInfo.HostIpAddress, podInfo.Name, err)\n\t\t\treturn \"N\/A\", 0, \"N\/A\", \"N\/A\"\n\t\t}\n\n\t\tipam := &telemetrymodel.IPamEntry{}\n\t\tif err := json.Unmarshal(b, ipam); err != nil {\n\t\t\tfmt.Printf(\"Host '%s', Pod '%s' - failed to decode ipam, err %s\\n\",\n\t\t\t\tpodInfo.HostIpAddress, podInfo.Name, err)\n\t\t\treturn \"N\/A\", 0, \"N\/A\", \"N\/A\"\n\t\t}\n\n\t\t\/\/ Get interfaces data for the node where the pod is hosted\n\t\tb, err = http.GetNodeInfo(podInfo.HostIpAddress, getInterfaceDataCmd)\n\t\tintfs := make(telemetrymodel.NodeInterfaces)\n\t\tif err := json.Unmarshal(b, &intfs); err != nil {\n\t\t\tfmt.Printf(\"Host '%s', Pod '%s' - failed to get pod's interface, err %s\\n\",\n\t\t\t\tpodInfo.HostIpAddress, podInfo.Name, err)\n\t\t\treturn \"N\/A\", 0, \"N\/A\", \"N\/A\"\n\t\t}\n\n\t\tpg.ndCache[podInfo.HostIpAddress] = &nodeData{\n\t\t\tipam: ipam,\n\t\t\tifcs: intfs,\n\t\t}\n\t}\n\n\t\/\/ Determine the tap interface on VPP that connects the pod to the VPP\n\tpodPfxLen := pg.ndCache[podInfo.HostIpAddress].ipam.Config.VppHostNetworkPrefixLen\n\tpodMask := maskLength2Mask(int(podPfxLen))\n\n\tpodNetwork, podIPMask, err := getIPAddressAndMask(pg.ndCache[podInfo.HostIpAddress].ipam.PodNetwork)\n\tif err != nil {\n\t\tfmt.Printf(\"Host '%s', Pod '%s' - invalid PodNetwork address %s, err %s\\n\",\n\t\t\tpodInfo.HostIpAddress, podInfo.Name, pg.ndCache[podInfo.HostIpAddress].ipam.PodNetwork, err)\n\t\t\/\/ Do not return - we can still continue if this error happens\n\t}\n\n\tif podMask != podIPMask {\n\t\tfmt.Printf(\"Host '%s', Pod '%s' - vppHostNetworkPrefixLen mismatch: \"+\n\t\t\t\"PodNetwork '%s', podNetworkPrefixLen '%d'\\n\",\n\t\t\tpodInfo.HostIpAddress, podInfo.Name,\n\t\t\tpg.ndCache[podInfo.HostIpAddress].ipam.PodNetwork,\n\t\t\tpg.ndCache[podInfo.HostIpAddress].ipam.Config.PodNetworkPrefixLen)\n\t\t\/\/ Do not return - we can still continue if this error happens\n\t}\n\n\tpodIfIPAddress, podIfIPMask, err := getIPAddressAndMask(pg.ndCache[podInfo.HostIpAddress].ipam.Config.PodIfIPCIDR)\n\tif err != nil {\n\t\tfmt.Printf(\"Host '%s', Pod '%s' - invalid PodIfIPCIDR address %s, err %s\\n\",\n\t\t\tpodInfo.HostIpAddress, podInfo.Name, pg.ndCache[podInfo.HostIpAddress].ipam.Config.PodIfIPCIDR, err)\n\t\treturn \"N\/A\", 0, \"N\/A\", \"N\/A\"\n\t}\n\n\tif podMask != podIfIPMask {\n\t\tfmt.Printf(\"Host '%s', Pod '%s' - vppHostNetworkPrefixLen mismatch: \"+\n\t\t\t\"PodIfIPCIDR '%s', podNetworkPrefixLen '%d'\\n\",\n\t\t\tpodInfo.HostIpAddress, podInfo.Name,\n\t\t\tpg.ndCache[podInfo.HostIpAddress].ipam.Config.PodIfIPCIDR,\n\t\t\tpg.ndCache[podInfo.HostIpAddress].ipam.Config.PodNetworkPrefixLen)\n\t\t\/\/ Do not return - we can still continue if this error happens\n\t}\n\n\tpodIfIPPrefix := podIfIPAddress &^ podMask\n\tpodAddr, err := ip2uint32(podInfo.IpAddress)\n\tif err != nil {\n\t\tfmt.Printf(\"Host '%s', Pod '%s' - invalid podInfo.IpAddress %s, err %s\",\n\t\t\tpodInfo.HostIpAddress, podInfo.Name, podInfo.IpAddress, err)\n\t\treturn \"N\/A\", 0, \"N\/A\", \"N\/A\"\n\t}\n\n\tpodAddrSuffix := podAddr & podMask\n\n\tif podAddr&^podMask != podNetwork {\n\t\tfmt.Printf(\"Host '%s', Pod '%s' - pod IP address %s not from PodNetwork subnet %s\\n\",\n\t\t\tpodInfo.HostIpAddress, podInfo.Name, podInfo.IpAddress,\n\t\t\tpg.ndCache[podInfo.HostIpAddress].ipam.PodNetwork)\n\t\t\/\/ Do not return - we can still continue if this error happens\n\t}\n\n\tfor _, intf := range pg.ndCache[podInfo.HostIpAddress].ifcs {\n\t\tif intf.If.IfType == interfaces.InterfaceType_TAP_INTERFACE {\n\t\t\tfor _, ip := range intf.If.IPAddresses {\n\t\t\t\tifIPAddr, iffIPMask, err := getIPAddressAndMask(ip)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif iffIPMask != 0 {\n\t\t\t\t\t\/\/ TODO: do some error handling\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tifIPAdrPrefix := ifIPAddr &^ podMask\n\t\t\t\tifIPAdrSuffix := ifIPAddr & podMask\n\t\t\t\tif (podIfIPPrefix == ifIPAdrPrefix) && (ifIPAdrSuffix == podAddrSuffix) {\n\t\t\t\t\treturn ip, intf.IfMeta.SwIfIndex, intf.IfMeta.VppInternalName, intf.If.Name\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"N\/A\", 0, \"N\/A\", \"N\/A\"\n}\n\nfunc getWriter(hostName string) *tabwriter.Writer {\n\tw := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)\n\treturn w\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage bootkube\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/coreos\/mantle\/kola\/cluster\"\n\t\"github.com\/coreos\/mantle\/pluton\"\n\t\"github.com\/coreos\/mantle\/pluton\/spawn\"\n\t\"github.com\/coreos\/mantle\/util\"\n)\n\nfunc etcdScale(tc cluster.TestCluster) error {\n\t\/\/ create cluster with self-hosted etcd\n\tc, err := spawn.MakeBootkubeCluster(tc, 1, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ add two master nodes to cluster\n\tif err := c.AddMasters(2); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ scale up etcd operator\n\tif err := resizeSelfHostedEtcd(c, 3); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ todo check that each pod runs on a different master node\n\tif err := checkEtcdPodDistribution(c, 3); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ scale back to 1\n\tif err := resizeSelfHostedEtcd(c, 1); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ run an nginx deployment and ping it\n\tif err := nginxCheck(c); err != nil {\n\t\treturn fmt.Errorf(\"nginxCheck: %v\", err)\n\t}\n\treturn nil\n}\n\n\/\/ resizes self-hosted etcd and checks that the desired number of pods are in a running state\nfunc resizeSelfHostedEtcd(c *pluton.Cluster, size int) error {\n\tconst (\n\t\ttprGroup = \"etcd.coreos.com\"\n\t\tapiVersion = \"v1beta1\"\n\t\ttprKind = \"clusters\"\n\t)\n\tvar tprEndpoint = fmt.Sprintf(\"http:\/\/127.0.0.1:8080\/apis\/%s\/%s\/namespaces\/kube-system\/%s\/kube-etcd\",\n\t\ttprGroup, apiVersion, tprKind)\n\n\tscaleCmds := []string{\n\t\tfmt.Sprintf(\"curl -H 'Content-Type: application\/json' -X GET %v > body.json\", tprEndpoint),\n\t\t\/\/ delete resourceVersion field before curling back\n\t\tfmt.Sprintf(\"jq 'recurse(.metadata) |= del(.resourceVersion)' < body.json | jq .spec.size=%v > newbody.json\", size),\n\t\tfmt.Sprintf(\"curl -H 'Content-Type: application\/json' -X PUT --data @newbody.json %v\", tprEndpoint),\n\t}\n\tfor _, cmd := range scaleCmds {\n\t\tsout, serr, err := c.SSH(cmd)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error in scale up command: %v:\\nSTDERR: %s\\nSTDOUT: %s\", cmd, serr, sout)\n\t\t}\n\t}\n\n\t\/\/ check that all pods are running\n\tpodsReady := func() error {\n\t\tout, err := c.Kubectl(`get po -l etcd_cluster=kube-etcd -o jsonpath='{.items[*].status.phase}' --namespace=kube-system`)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tphases := strings.Split(out, \" \")\n\t\tif len(phases) != size {\n\t\t\treturn fmt.Errorf(\"expected %d etcd pods got %d: %v\", size, len(phases), phases)\n\t\t}\n\t\tfor _, phase := range phases {\n\t\t\tif phase != \"Running\" {\n\t\t\t\treturn fmt.Errorf(\"one or more etcd pods not in a 'Running' phase\")\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tif err := util.Retry(15, 10*time.Second, podsReady); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ checks that self-hosted etcd pods are scheduled on different master nodes\n\/\/ when possible\nfunc checkEtcdPodDistribution(c *pluton.Cluster, etcdClusterSize int) error {\n\t\/\/ check that number of unique nodes etcd pods run on is equal to the\n\t\/\/ lesser value betweeen total number of master nodes and total number\n\t\/\/ of etcd pods\n\tout, err := c.Kubectl(`get po -l etcd_cluster=kube-etcd -o jsonpath='{.items[*].status.hostIP}' --namespace=kube-system`)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnodeIPs := strings.Split(out, \" \")\n\tnodeSet := map[string]struct{}{}\n\tfor _, node := range nodeIPs {\n\t\tnodeSet[node] = struct{}{}\n\t}\n\n\tvar expectedUniqueNodes int\n\tif len(c.Masters) > etcdClusterSize {\n\t\texpectedUniqueNodes = etcdClusterSize\n\t} else {\n\t\texpectedUniqueNodes = len(c.Masters)\n\t}\n\n\tif len(nodeSet) != expectedUniqueNodes {\n\t\treturn fmt.Errorf(\"self-hosted etcd pods not properly distributed\")\n\t}\n\n\t\/\/ check that each node in nodeSet is a master node\n\tmasterSet := map[string]struct{}{}\n\tfor _, m := range c.Masters {\n\t\tmasterSet[m.PrivateIP()] = struct{}{}\n\t}\n\n\tfor k, _ := range nodeSet {\n\t\tif _, ok := masterSet[k]; !ok {\n\t\t\t\/\/ Just warn instead of erroring until\/if supported\n\t\t\tplog.Infof(\"detected self-hosted etcd pod running on non-master node %v %v\", masterSet, nodeSet)\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>pluton\/tests\/bootkube: self-hosted etcd only scales to master nodes<commit_after>\/\/ Copyright 2017 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage bootkube\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/coreos\/mantle\/kola\/cluster\"\n\t\"github.com\/coreos\/mantle\/pluton\"\n\t\"github.com\/coreos\/mantle\/pluton\/spawn\"\n\t\"github.com\/coreos\/mantle\/util\"\n)\n\nfunc etcdScale(tc cluster.TestCluster) error {\n\t\/\/ create cluster with self-hosted etcd\n\tc, err := spawn.MakeBootkubeCluster(tc, 1, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ add two master nodes to cluster\n\tif err := c.AddMasters(2); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ scale up etcd operator\n\tif err := resizeSelfHostedEtcd(c, 3); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ todo check that each pod runs on a different master node\n\tif err := checkEtcdPodDistribution(c, 3); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ scale back to 1\n\tif err := resizeSelfHostedEtcd(c, 1); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ run an nginx deployment and ping it\n\tif err := nginxCheck(c); err != nil {\n\t\treturn fmt.Errorf(\"nginxCheck: %v\", err)\n\t}\n\treturn nil\n}\n\n\/\/ resizes self-hosted etcd and checks that the desired number of pods are in a running state\nfunc resizeSelfHostedEtcd(c *pluton.Cluster, size int) error {\n\tconst (\n\t\ttprGroup = \"etcd.coreos.com\"\n\t\tapiVersion = \"v1beta1\"\n\t\ttprKind = \"clusters\"\n\t)\n\tvar tprEndpoint = fmt.Sprintf(\"http:\/\/127.0.0.1:8080\/apis\/%s\/%s\/namespaces\/kube-system\/%s\/kube-etcd\",\n\t\ttprGroup, apiVersion, tprKind)\n\n\tscaleCmds := []string{\n\t\tfmt.Sprintf(\"curl -H 'Content-Type: application\/json' -X GET %v > body.json\", tprEndpoint),\n\t\t\/\/ delete resourceVersion field before curling back\n\t\tfmt.Sprintf(\"jq 'recurse(.metadata) |= del(.resourceVersion)' < body.json | jq .spec.size=%v > newbody.json\", size),\n\t\tfmt.Sprintf(\"curl -H 'Content-Type: application\/json' -X PUT --data @newbody.json %v\", tprEndpoint),\n\t}\n\tfor _, cmd := range scaleCmds {\n\t\tsout, serr, err := c.SSH(cmd)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error in scale up command: %v:\\nSTDERR: %s\\nSTDOUT: %s\", cmd, serr, sout)\n\t\t}\n\t}\n\n\t\/\/ check that all pods are running\n\tpodsReady := func() error {\n\t\tout, err := c.Kubectl(`get po -l etcd_cluster=kube-etcd -o jsonpath='{.items[*].status.phase}' --namespace=kube-system`)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tphases := strings.Split(out, \" \")\n\t\tif len(phases) != size {\n\t\t\treturn fmt.Errorf(\"expected %d etcd pods got %d: %v\", size, len(phases), phases)\n\t\t}\n\t\tfor _, phase := range phases {\n\t\t\tif phase != \"Running\" {\n\t\t\t\treturn fmt.Errorf(\"one or more etcd pods not in a 'Running' phase\")\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tif err := util.Retry(15, 10*time.Second, podsReady); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ checks that self-hosted etcd pods are scheduled on different master nodes\n\/\/ when possible\nfunc checkEtcdPodDistribution(c *pluton.Cluster, etcdClusterSize int) error {\n\t\/\/ check that number of unique nodes etcd pods run on is equal to the\n\t\/\/ lesser value betweeen total number of master nodes and total number\n\t\/\/ of etcd pods\n\tout, err := c.Kubectl(`get po -l etcd_cluster=kube-etcd -o jsonpath='{.items[*].status.hostIP}' --namespace=kube-system`)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnodeIPs := strings.Split(out, \" \")\n\tnodeSet := map[string]struct{}{}\n\tfor _, node := range nodeIPs {\n\t\tnodeSet[node] = struct{}{}\n\t}\n\n\tvar expectedUniqueNodes int\n\tif len(c.Masters) > etcdClusterSize {\n\t\texpectedUniqueNodes = etcdClusterSize\n\t} else {\n\t\texpectedUniqueNodes = len(c.Masters)\n\t}\n\n\tif len(nodeSet) != expectedUniqueNodes {\n\t\treturn fmt.Errorf(\"self-hosted etcd pods not properly distributed\")\n\t}\n\n\t\/\/ check that each node in nodeSet is a master node\n\tmasterSet := map[string]struct{}{}\n\tfor _, m := range c.Masters {\n\t\tmasterSet[m.PrivateIP()] = struct{}{}\n\t}\n\n\tfor k, _ := range nodeSet {\n\t\tif _, ok := masterSet[k]; !ok {\n\t\t\treturn fmt.Errorf(\"detected self-hosted etcd pod running on non-master node %v %v\", masterSet, nodeSet)\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package bootstrap\n\nimport (\n\t\"testing\"\n)\n\ntype TestAction struct {\n\tFoo string\n\tBar int\n}\n\nfunc (t *TestAction) Run(state *State) error {\n\treturn nil\n}\n\nfunc init() {\n\tRegister(\"test-action\", &TestAction{})\n}\n\nfunc TestRun(t *testing.T) {\n\tmanifest := []byte(`[{\"id\":\"1\", \"action\":\"test-action\", \"Foo\":\"bar\"}, {\"id\":\"2\", \"action\":\"test-action\", \"Bar\":2}]`)\n\terr := Run(manifest)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n<commit_msg>bootstrap: Remove outdated test<commit_after><|endoftext|>"} {"text":"<commit_before>package btelegram\n\nimport (\n\t\"strconv\"\n\n\t\"github.com\/42wim\/matterbridge\/bridge\/config\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/go-telegram-bot-api\/telegram-bot-api\"\n)\n\ntype Btelegram struct {\n\tc *tgbotapi.BotAPI\n\tConfig *config.Protocol\n\tRemote chan config.Message\n\tAccount string\n}\n\nvar flog *log.Entry\nvar protocol = \"telegram\"\n\nfunc init() {\n\tflog = log.WithFields(log.Fields{\"module\": protocol})\n}\n\nfunc New(cfg config.Protocol, account string, c chan config.Message) *Btelegram {\n\tb := &Btelegram{}\n\tb.Config = &cfg\n\tb.Remote = c\n\tb.Account = account\n\treturn b\n}\n\nfunc (b *Btelegram) Connect() error {\n\tvar err error\n\tflog.Info(\"Connecting\")\n\tb.c, err = tgbotapi.NewBotAPI(b.Config.Token)\n\tif err != nil {\n\t\tflog.Debugf(\"%#v\", err)\n\t\treturn err\n\t}\n\tupdates, err := b.c.GetUpdatesChan(tgbotapi.NewUpdate(0))\n\tif err != nil {\n\t\tflog.Debugf(\"%#v\", err)\n\t\treturn err\n\t}\n\tflog.Info(\"Connection succeeded\")\n\tgo b.handleRecv(updates)\n\treturn nil\n}\n\nfunc (b *Btelegram) Disconnect() error {\n\treturn nil\n\n}\n\nfunc (b *Btelegram) JoinChannel(channel string) error {\n\treturn nil\n}\n\nfunc (b *Btelegram) Send(msg config.Message) error {\n\tflog.Debugf(\"Receiving %#v\", msg)\n\tchatid, err := strconv.ParseInt(msg.Channel, 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif b.Config.MessageFormat == \"HTML\" {\n\t\tmsg.Text = makeHTML(msg.Text)\n\t}\n\tm := tgbotapi.NewMessage(chatid, msg.Username+msg.Text)\n\tif b.Config.MessageFormat == \"HTML\" {\n\t\tm.ParseMode = tgbotapi.ModeHTML\n\t}\n\t_, err = b.c.Send(m)\n\treturn err\n}\n\nfunc (b *Btelegram) handleRecv(updates <-chan tgbotapi.Update) {\n\tfor update := range updates {\n\t\tvar message *tgbotapi.Message\n\t\tusername := \"\"\n\t\tchannel := \"\"\n\t\ttext := \"\"\n\t\t\/\/ handle channels\n\t\tif update.ChannelPost != nil {\n\t\t\tmessage = update.ChannelPost\n\t\t}\n\t\tif update.EditedChannelPost != nil && !b.Config.EditDisable {\n\t\t\tmessage = update.EditedChannelPost\n\t\t\tmessage.Text = message.Text + b.Config.EditSuffix\n\t\t}\n\t\t\/\/ handle groups\n\t\tif update.Message != nil {\n\t\t\tmessage = update.Message\n\t\t}\n\t\tif update.EditedMessage != nil && !b.Config.EditDisable {\n\t\t\tmessage = update.EditedMessage\n\t\t\tmessage.Text = message.Text + b.Config.EditSuffix\n\t\t}\n\t\tif message.From != nil {\n\t\t\tif b.Config.UseFirstName {\n\t\t\t\tusername = message.From.FirstName\n\t\t\t}\n\t\t\tif username == \"\" {\n\t\t\t\tusername = message.From.UserName\n\t\t\t\tif username == \"\" {\n\t\t\t\t\tusername = message.From.FirstName\n\t\t\t\t}\n\t\t\t}\n\t\t\ttext = message.Text\n\t\t\tchannel = strconv.FormatInt(message.Chat.ID, 10)\n\t\t}\n\n\t\tif username == \"\" {\n\t\t\tusername = \"unknown\"\n\t\t}\n\t\tif message.Sticker != nil {\n\t\t\ttext = text + \" \" + b.getFileDirectURL(message.Sticker.FileID)\n\t\t}\n\t\tif message.Video != nil {\n\t\t\ttext = text + \" \" + b.getFileDirectURL(message.Video.FileID)\n\t\t}\n\t\tif message.Photo != nil {\n\t\t\tphotos := *message.Photo\n\t\t\t\/\/ photo 3 is the biggest\n\t\t\tif len(photos) == 3 {\n\t\t\t\ttext = text + \" \" + b.getFileDirectURL(photos[2].FileID)\n\t\t\t}\n\t\t}\n\t\tif message.Document != nil {\n\t\t\ttext = text + \" \" + message.Document.FileName + \" : \" + b.getFileDirectURL(message.Document.FileID)\n\t\t}\n\t\tif text != \"\" {\n\t\t\tflog.Debugf(\"Sending message from %s on %s to gateway\", username, b.Account)\n\t\t\tb.Remote <- config.Message{Username: username, Text: text, Channel: channel, Account: b.Account, UserID: strconv.Itoa(message.From.ID)}\n\t\t}\n\t}\n}\n\nfunc (b *Btelegram) getFileDirectURL(id string) string {\n\tres, err := b.c.GetFileDirectURL(id)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn res\n}\n<commit_msg>Use the last (and biggest) photo to relay (telegram). Closes #184<commit_after>package btelegram\n\nimport (\n\t\"strconv\"\n\n\t\"github.com\/42wim\/matterbridge\/bridge\/config\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/go-telegram-bot-api\/telegram-bot-api\"\n)\n\ntype Btelegram struct {\n\tc *tgbotapi.BotAPI\n\tConfig *config.Protocol\n\tRemote chan config.Message\n\tAccount string\n}\n\nvar flog *log.Entry\nvar protocol = \"telegram\"\n\nfunc init() {\n\tflog = log.WithFields(log.Fields{\"module\": protocol})\n}\n\nfunc New(cfg config.Protocol, account string, c chan config.Message) *Btelegram {\n\tb := &Btelegram{}\n\tb.Config = &cfg\n\tb.Remote = c\n\tb.Account = account\n\treturn b\n}\n\nfunc (b *Btelegram) Connect() error {\n\tvar err error\n\tflog.Info(\"Connecting\")\n\tb.c, err = tgbotapi.NewBotAPI(b.Config.Token)\n\tif err != nil {\n\t\tflog.Debugf(\"%#v\", err)\n\t\treturn err\n\t}\n\tupdates, err := b.c.GetUpdatesChan(tgbotapi.NewUpdate(0))\n\tif err != nil {\n\t\tflog.Debugf(\"%#v\", err)\n\t\treturn err\n\t}\n\tflog.Info(\"Connection succeeded\")\n\tgo b.handleRecv(updates)\n\treturn nil\n}\n\nfunc (b *Btelegram) Disconnect() error {\n\treturn nil\n\n}\n\nfunc (b *Btelegram) JoinChannel(channel string) error {\n\treturn nil\n}\n\nfunc (b *Btelegram) Send(msg config.Message) error {\n\tflog.Debugf(\"Receiving %#v\", msg)\n\tchatid, err := strconv.ParseInt(msg.Channel, 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif b.Config.MessageFormat == \"HTML\" {\n\t\tmsg.Text = makeHTML(msg.Text)\n\t}\n\tm := tgbotapi.NewMessage(chatid, msg.Username+msg.Text)\n\tif b.Config.MessageFormat == \"HTML\" {\n\t\tm.ParseMode = tgbotapi.ModeHTML\n\t}\n\t_, err = b.c.Send(m)\n\treturn err\n}\n\nfunc (b *Btelegram) handleRecv(updates <-chan tgbotapi.Update) {\n\tfor update := range updates {\n\t\tvar message *tgbotapi.Message\n\t\tusername := \"\"\n\t\tchannel := \"\"\n\t\ttext := \"\"\n\t\t\/\/ handle channels\n\t\tif update.ChannelPost != nil {\n\t\t\tmessage = update.ChannelPost\n\t\t}\n\t\tif update.EditedChannelPost != nil && !b.Config.EditDisable {\n\t\t\tmessage = update.EditedChannelPost\n\t\t\tmessage.Text = message.Text + b.Config.EditSuffix\n\t\t}\n\t\t\/\/ handle groups\n\t\tif update.Message != nil {\n\t\t\tmessage = update.Message\n\t\t}\n\t\tif update.EditedMessage != nil && !b.Config.EditDisable {\n\t\t\tmessage = update.EditedMessage\n\t\t\tmessage.Text = message.Text + b.Config.EditSuffix\n\t\t}\n\t\tif message.From != nil {\n\t\t\tif b.Config.UseFirstName {\n\t\t\t\tusername = message.From.FirstName\n\t\t\t}\n\t\t\tif username == \"\" {\n\t\t\t\tusername = message.From.UserName\n\t\t\t\tif username == \"\" {\n\t\t\t\t\tusername = message.From.FirstName\n\t\t\t\t}\n\t\t\t}\n\t\t\ttext = message.Text\n\t\t\tchannel = strconv.FormatInt(message.Chat.ID, 10)\n\t\t}\n\n\t\tif username == \"\" {\n\t\t\tusername = \"unknown\"\n\t\t}\n\t\tif message.Sticker != nil {\n\t\t\ttext = text + \" \" + b.getFileDirectURL(message.Sticker.FileID)\n\t\t}\n\t\tif message.Video != nil {\n\t\t\ttext = text + \" \" + b.getFileDirectURL(message.Video.FileID)\n\t\t}\n\t\tif message.Photo != nil {\n\t\t\tphotos := *message.Photo\n\t\t\t\/\/ last photo is the biggest\n\t\t\ttext = text + \" \" + b.getFileDirectURL(photos[len(photos)-1].FileID)\n\t\t}\n\t\tif message.Document != nil {\n\t\t\ttext = text + \" \" + message.Document.FileName + \" : \" + b.getFileDirectURL(message.Document.FileID)\n\t\t}\n\t\tif text != \"\" {\n\t\t\tflog.Debugf(\"Sending message from %s on %s to gateway\", username, b.Account)\n\t\t\tb.Remote <- config.Message{Username: username, Text: text, Channel: channel, Account: b.Account, UserID: strconv.Itoa(message.From.ID)}\n\t\t}\n\t}\n}\n\nfunc (b *Btelegram) getFileDirectURL(id string) string {\n\tres, err := b.c.GetFileDirectURL(id)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn res\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage kubernetes\n\nimport (\n\t\"encoding\/json\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/tsuru\/tsuru\/provision\"\n\t\"github.com\/tsuru\/tsuru\/set\"\n\t\"github.com\/tsuru\/tsuru\/volume\"\n\t\"github.com\/ugorji\/go\/codec\"\n\tapiv1 \"k8s.io\/api\/core\/v1\"\n\tk8sErrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/resource\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\ntype volumeOptions struct {\n\tPlugin string\n\tStorageClass string `json:\"storage-class\"`\n\tCapacity resource.Quantity\n\tAccessModes string `json:\"access-modes\"`\n}\n\nvar allowedNonPersistentVolumes = set.FromValues(\"emptyDir\")\n\nfunc (opts *volumeOptions) isPersistent() bool {\n\treturn !allowedNonPersistentVolumes.Includes(opts.Plugin)\n}\n\nfunc createVolumesForApp(client *ClusterClient, app provision.App) ([]apiv1.Volume, []apiv1.VolumeMount, error) {\n\tvolumes, err := volume.ListByApp(app.GetName())\n\tif err != nil {\n\t\treturn nil, nil, errors.WithStack(err)\n\t}\n\tvar kubeVolumes []apiv1.Volume\n\tvar kubeMounts []apiv1.VolumeMount\n\tfor i := range volumes {\n\t\topts, err := validateVolume(&volumes[i])\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif opts.isPersistent() {\n\t\t\terr = createVolume(client, &volumes[i], opts, app)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t}\n\t\tvolume, mounts, err := bindsForVolume(&volumes[i], opts, app.GetName())\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tkubeMounts = append(kubeMounts, mounts...)\n\t\tkubeVolumes = append(kubeVolumes, *volume)\n\t}\n\treturn kubeVolumes, kubeMounts, nil\n}\n\nfunc bindsForVolume(v *volume.Volume, opts *volumeOptions, appName string) (*apiv1.Volume, []apiv1.VolumeMount, error) {\n\tvar kubeMounts []apiv1.VolumeMount\n\tbinds, err := v.LoadBindsForApp(appName)\n\tif err != nil {\n\t\treturn nil, nil, errors.WithStack(err)\n\t}\n\tallReadOnly := true\n\tfor _, b := range binds {\n\t\tkubeMounts = append(kubeMounts, apiv1.VolumeMount{\n\t\t\tName: volumeName(v.Name),\n\t\t\tMountPath: b.ID.MountPoint,\n\t\t\tReadOnly: b.ReadOnly,\n\t\t})\n\t\tif !b.ReadOnly {\n\t\t\tallReadOnly = false\n\t\t}\n\t}\n\tkubeVol := apiv1.Volume{\n\t\tName: volumeName(v.Name),\n\t}\n\tif opts.isPersistent() {\n\t\tkubeVol.VolumeSource = apiv1.VolumeSource{\n\t\t\tPersistentVolumeClaim: &apiv1.PersistentVolumeClaimVolumeSource{\n\t\t\t\tClaimName: volumeClaimName(v.Name),\n\t\t\t\tReadOnly: allReadOnly,\n\t\t\t},\n\t\t}\n\t} else {\n\t\tkubeVol.VolumeSource, err = nonPersistentVolume(v, opts)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\treturn &kubeVol, kubeMounts, nil\n}\n\nfunc nonPersistentVolume(v *volume.Volume, opts *volumeOptions) (apiv1.VolumeSource, error) {\n\tvar volumeSrc apiv1.VolumeSource\n\tdata, err := json.Marshal(map[string]interface{}{\n\t\topts.Plugin: v.Opts,\n\t})\n\tif err != nil {\n\t\treturn volumeSrc, errors.WithStack(err)\n\t}\n\th := &codec.JsonHandle{}\n\tdec := codec.NewDecoderBytes(data, h)\n\terr = dec.Decode(&volumeSrc)\n\tif err != nil {\n\t\treturn volumeSrc, errors.WithStack(err)\n\t}\n\treturn volumeSrc, nil\n}\n\nfunc validateVolume(v *volume.Volume) (*volumeOptions, error) {\n\tvar opts volumeOptions\n\terr := v.UnmarshalPlan(&opts)\n\tif err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\tif opts.Plugin != \"\" && opts.StorageClass != \"\" {\n\t\treturn nil, errors.New(\"both volume plan plugin and storage-class cannot be set\")\n\t}\n\tif opts.Plugin == \"\" && opts.StorageClass == \"\" {\n\t\treturn nil, errors.New(\"both volume plan plugin and storage-class are empty\")\n\t}\n\tif !opts.isPersistent() {\n\t\treturn &opts, nil\n\t}\n\tif capRaw, ok := v.Opts[\"capacity\"]; ok {\n\t\tdelete(v.Opts, \"capacity\")\n\t\topts.Capacity, err = resource.ParseQuantity(capRaw)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"unable to parse `capacity` opt\")\n\t\t}\n\t}\n\tif opts.Capacity.IsZero() {\n\t\treturn nil, errors.New(\"capacity is mandatory either in plan or as volume opts\")\n\t}\n\tif accessModesRaw, ok := v.Opts[\"access-modes\"]; ok {\n\t\tdelete(v.Opts, \"access-modes\")\n\t\topts.AccessModes = accessModesRaw\n\t}\n\tif opts.AccessModes == \"\" {\n\t\treturn nil, errors.New(\"access-modes is mandatory either in plan or as volume opts\")\n\t}\n\treturn &opts, nil\n}\n\nfunc deleteVolume(client *ClusterClient, name string) error {\n\tv, err := volume.Load(name)\n\tif err != nil {\n\t\tif err == volume.ErrVolumeNotFound {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tnamespace, err := getNamespaceForVolume(client, v)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = client.CoreV1().PersistentVolumes().Delete(volumeName(name), &metav1.DeleteOptions{\n\t\tPropagationPolicy: propagationPtr(metav1.DeletePropagationForeground),\n\t})\n\tif err != nil && !k8sErrors.IsNotFound(err) {\n\t\treturn errors.WithStack(err)\n\t}\n\terr = client.CoreV1().PersistentVolumeClaims(namespace).Delete(volumeClaimName(name), &metav1.DeleteOptions{\n\t\tPropagationPolicy: propagationPtr(metav1.DeletePropagationForeground),\n\t})\n\tif err != nil && !k8sErrors.IsNotFound(err) {\n\t\treturn errors.WithStack(err)\n\t}\n\treturn nil\n}\n\nfunc createVolume(client *ClusterClient, v *volume.Volume, opts *volumeOptions, app provision.App) error {\n\tnamespace, err := getNamespaceForVolume(client, v)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlabelSet := provision.VolumeLabels(provision.VolumeLabelsOpts{\n\t\tName: v.Name,\n\t\tProvisioner: provisionerName,\n\t\tPrefix: tsuruLabelPrefix,\n\t\tPool: v.Pool,\n\t\tPlan: v.Plan.Name,\n\t})\n\tdata, err := json.Marshal(map[string]interface{}{\n\t\topts.Plugin: v.Opts,\n\t})\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\th := &codec.JsonHandle{}\n\tdec := codec.NewDecoderBytes(data, h)\n\tpvSpec := apiv1.PersistentVolumeSpec{}\n\terr = dec.Decode(&pvSpec)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"unable to decode as pv spec: %s\", string(data))\n\t}\n\tpvSpec.Capacity = apiv1.ResourceList{\n\t\tapiv1.ResourceStorage: opts.Capacity,\n\t}\n\tfor _, am := range strings.Split(opts.AccessModes, \",\") {\n\t\tpvSpec.AccessModes = append(pvSpec.AccessModes, apiv1.PersistentVolumeAccessMode(am))\n\t}\n\tvar volName string\n\tvar selector *metav1.LabelSelector\n\tif opts.Plugin != \"\" {\n\t\tpv := &apiv1.PersistentVolume{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: volumeName(v.Name),\n\t\t\t\tLabels: labelSet.ToLabels(),\n\t\t\t},\n\t\t\tSpec: pvSpec,\n\t\t}\n\t\t_, err = client.CoreV1().PersistentVolumes().Create(pv)\n\t\tif err != nil && !k8sErrors.IsAlreadyExists(err) {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\tselector = &metav1.LabelSelector{\n\t\t\tMatchLabels: labelSet.ToVolumeSelector(),\n\t\t}\n\t\tvolName = volumeName(v.Name)\n\t}\n\tpvc := &apiv1.PersistentVolumeClaim{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: volumeClaimName(v.Name),\n\t\t\tLabels: labelSet.ToLabels(),\n\t\t},\n\t\tSpec: apiv1.PersistentVolumeClaimSpec{\n\t\t\tResources: apiv1.ResourceRequirements{\n\t\t\t\tRequests: pvSpec.Capacity,\n\t\t\t},\n\t\t\tAccessModes: pvSpec.AccessModes,\n\t\t\tSelector: selector,\n\t\t\tVolumeName: volName,\n\t\t\tStorageClassName: &opts.StorageClass,\n\t\t},\n\t}\n\t_, err = client.CoreV1().PersistentVolumeClaims(namespace).Create(pvc)\n\tif err != nil && !k8sErrors.IsAlreadyExists(err) {\n\t\treturn errors.WithStack(err)\n\t}\n\treturn nil\n}\n\nfunc volumeExists(client *ClusterClient, name string) (bool, error) {\n\tv, err := volume.Load(name)\n\tif err != nil {\n\t\tif err == volume.ErrVolumeNotFound {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\tnamespace, err := getNamespaceForVolume(client, v)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\t_, pvErr := client.CoreV1().PersistentVolumes().Get(volumeName(name), metav1.GetOptions{})\n\t_, pvcErr := client.CoreV1().PersistentVolumeClaims(namespace).Get(volumeClaimName(name), metav1.GetOptions{})\n\tif k8sErrors.IsNotFound(pvErr) && k8sErrors.IsNotFound(pvcErr) {\n\t\treturn false, nil\n\t}\n\tif pvErr != nil {\n\t\treturn false, errors.WithStack(pvErr)\n\t}\n\tif pvcErr != nil {\n\t\treturn false, errors.WithStack(pvcErr)\n\t}\n\treturn true, nil\n}\n\nfunc getNamespaceForVolume(client *ClusterClient, v *volume.Volume) (string, error) {\n\tbinds, err := v.LoadBinds()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(binds) == 0 {\n\t\treturn client.PoolNamespace(v.Pool), nil\n\t}\n\tvar namespace string\n\tfor _, b := range binds {\n\t\tns, err := client.appNamespaceByName(b.ID.App)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif namespace == \"\" {\n\t\t\tnamespace = ns\n\t\t\tcontinue\n\t\t}\n\t\tif ns != namespace {\n\t\t\treturn \"\", errors.New(\"multiple namespaces for volume\")\n\t\t}\n\t}\n\treturn namespace, nil\n}\n<commit_msg>provision\/kubernetes: fix volume create when no plugin is specified<commit_after>\/\/ Copyright 2017 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage kubernetes\n\nimport (\n\t\"encoding\/json\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/tsuru\/tsuru\/provision\"\n\t\"github.com\/tsuru\/tsuru\/set\"\n\t\"github.com\/tsuru\/tsuru\/volume\"\n\t\"github.com\/ugorji\/go\/codec\"\n\tapiv1 \"k8s.io\/api\/core\/v1\"\n\tk8sErrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/resource\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\ntype volumeOptions struct {\n\tPlugin string\n\tStorageClass string `json:\"storage-class\"`\n\tCapacity resource.Quantity\n\tAccessModes string `json:\"access-modes\"`\n}\n\nvar allowedNonPersistentVolumes = set.FromValues(\"emptyDir\")\n\nfunc (opts *volumeOptions) isPersistent() bool {\n\treturn !allowedNonPersistentVolumes.Includes(opts.Plugin)\n}\n\nfunc createVolumesForApp(client *ClusterClient, app provision.App) ([]apiv1.Volume, []apiv1.VolumeMount, error) {\n\tvolumes, err := volume.ListByApp(app.GetName())\n\tif err != nil {\n\t\treturn nil, nil, errors.WithStack(err)\n\t}\n\tvar kubeVolumes []apiv1.Volume\n\tvar kubeMounts []apiv1.VolumeMount\n\tfor i := range volumes {\n\t\topts, err := validateVolume(&volumes[i])\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif opts.isPersistent() {\n\t\t\terr = createVolume(client, &volumes[i], opts, app)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t}\n\t\tvolume, mounts, err := bindsForVolume(&volumes[i], opts, app.GetName())\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tkubeMounts = append(kubeMounts, mounts...)\n\t\tkubeVolumes = append(kubeVolumes, *volume)\n\t}\n\treturn kubeVolumes, kubeMounts, nil\n}\n\nfunc bindsForVolume(v *volume.Volume, opts *volumeOptions, appName string) (*apiv1.Volume, []apiv1.VolumeMount, error) {\n\tvar kubeMounts []apiv1.VolumeMount\n\tbinds, err := v.LoadBindsForApp(appName)\n\tif err != nil {\n\t\treturn nil, nil, errors.WithStack(err)\n\t}\n\tallReadOnly := true\n\tfor _, b := range binds {\n\t\tkubeMounts = append(kubeMounts, apiv1.VolumeMount{\n\t\t\tName: volumeName(v.Name),\n\t\t\tMountPath: b.ID.MountPoint,\n\t\t\tReadOnly: b.ReadOnly,\n\t\t})\n\t\tif !b.ReadOnly {\n\t\t\tallReadOnly = false\n\t\t}\n\t}\n\tkubeVol := apiv1.Volume{\n\t\tName: volumeName(v.Name),\n\t}\n\tif opts.isPersistent() {\n\t\tkubeVol.VolumeSource = apiv1.VolumeSource{\n\t\t\tPersistentVolumeClaim: &apiv1.PersistentVolumeClaimVolumeSource{\n\t\t\t\tClaimName: volumeClaimName(v.Name),\n\t\t\t\tReadOnly: allReadOnly,\n\t\t\t},\n\t\t}\n\t} else {\n\t\tkubeVol.VolumeSource, err = nonPersistentVolume(v, opts)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\treturn &kubeVol, kubeMounts, nil\n}\n\nfunc nonPersistentVolume(v *volume.Volume, opts *volumeOptions) (apiv1.VolumeSource, error) {\n\tvar volumeSrc apiv1.VolumeSource\n\tdata, err := json.Marshal(map[string]interface{}{\n\t\topts.Plugin: v.Opts,\n\t})\n\tif err != nil {\n\t\treturn volumeSrc, errors.WithStack(err)\n\t}\n\th := &codec.JsonHandle{}\n\tdec := codec.NewDecoderBytes(data, h)\n\terr = dec.Decode(&volumeSrc)\n\tif err != nil {\n\t\treturn volumeSrc, errors.WithStack(err)\n\t}\n\treturn volumeSrc, nil\n}\n\nfunc validateVolume(v *volume.Volume) (*volumeOptions, error) {\n\tvar opts volumeOptions\n\terr := v.UnmarshalPlan(&opts)\n\tif err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\tif opts.Plugin != \"\" && opts.StorageClass != \"\" {\n\t\treturn nil, errors.New(\"both volume plan plugin and storage-class cannot be set\")\n\t}\n\tif opts.Plugin == \"\" && opts.StorageClass == \"\" {\n\t\treturn nil, errors.New(\"both volume plan plugin and storage-class are empty\")\n\t}\n\tif !opts.isPersistent() {\n\t\treturn &opts, nil\n\t}\n\tif capRaw, ok := v.Opts[\"capacity\"]; ok {\n\t\tdelete(v.Opts, \"capacity\")\n\t\topts.Capacity, err = resource.ParseQuantity(capRaw)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"unable to parse `capacity` opt\")\n\t\t}\n\t}\n\tif opts.Capacity.IsZero() {\n\t\treturn nil, errors.New(\"capacity is mandatory either in plan or as volume opts\")\n\t}\n\tif accessModesRaw, ok := v.Opts[\"access-modes\"]; ok {\n\t\tdelete(v.Opts, \"access-modes\")\n\t\topts.AccessModes = accessModesRaw\n\t}\n\tif opts.AccessModes == \"\" {\n\t\treturn nil, errors.New(\"access-modes is mandatory either in plan or as volume opts\")\n\t}\n\treturn &opts, nil\n}\n\nfunc deleteVolume(client *ClusterClient, name string) error {\n\tv, err := volume.Load(name)\n\tif err != nil {\n\t\tif err == volume.ErrVolumeNotFound {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tnamespace, err := getNamespaceForVolume(client, v)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = client.CoreV1().PersistentVolumes().Delete(volumeName(name), &metav1.DeleteOptions{\n\t\tPropagationPolicy: propagationPtr(metav1.DeletePropagationForeground),\n\t})\n\tif err != nil && !k8sErrors.IsNotFound(err) {\n\t\treturn errors.WithStack(err)\n\t}\n\terr = client.CoreV1().PersistentVolumeClaims(namespace).Delete(volumeClaimName(name), &metav1.DeleteOptions{\n\t\tPropagationPolicy: propagationPtr(metav1.DeletePropagationForeground),\n\t})\n\tif err != nil && !k8sErrors.IsNotFound(err) {\n\t\treturn errors.WithStack(err)\n\t}\n\treturn nil\n}\n\nfunc createVolume(client *ClusterClient, v *volume.Volume, opts *volumeOptions, app provision.App) error {\n\tnamespace, err := getNamespaceForVolume(client, v)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlabelSet := provision.VolumeLabels(provision.VolumeLabelsOpts{\n\t\tName: v.Name,\n\t\tProvisioner: provisionerName,\n\t\tPrefix: tsuruLabelPrefix,\n\t\tPool: v.Pool,\n\t\tPlan: v.Plan.Name,\n\t})\n\tcapacity := apiv1.ResourceList{\n\t\tapiv1.ResourceStorage: opts.Capacity,\n\t}\n\tvar accessModes []apiv1.PersistentVolumeAccessMode\n\tfor _, am := range strings.Split(opts.AccessModes, \",\") {\n\t\taccessModes = append(accessModes, apiv1.PersistentVolumeAccessMode(am))\n\t}\n\tvar volName string\n\tvar selector *metav1.LabelSelector\n\tif opts.Plugin != \"\" {\n\t\tpvSpec := apiv1.PersistentVolumeSpec{}\n\t\tvar data []byte\n\t\tdata, err = json.Marshal(map[string]interface{}{\n\t\t\topts.Plugin: v.Opts,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\th := &codec.JsonHandle{}\n\t\tdec := codec.NewDecoderBytes(data, h)\n\t\terr = dec.Decode(&pvSpec)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"unable to decode as pv spec: %s\", string(data))\n\t\t}\n\t\tpvSpec.Capacity = capacity\n\t\tpvSpec.AccessModes = accessModes\n\t\tpv := &apiv1.PersistentVolume{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: volumeName(v.Name),\n\t\t\t\tLabels: labelSet.ToLabels(),\n\t\t\t},\n\t\t\tSpec: pvSpec,\n\t\t}\n\t\t_, err = client.CoreV1().PersistentVolumes().Create(pv)\n\t\tif err != nil && !k8sErrors.IsAlreadyExists(err) {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\tselector = &metav1.LabelSelector{\n\t\t\tMatchLabels: labelSet.ToVolumeSelector(),\n\t\t}\n\t\tvolName = volumeName(v.Name)\n\t}\n\tpvc := &apiv1.PersistentVolumeClaim{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: volumeClaimName(v.Name),\n\t\t\tLabels: labelSet.ToLabels(),\n\t\t},\n\t\tSpec: apiv1.PersistentVolumeClaimSpec{\n\t\t\tResources: apiv1.ResourceRequirements{\n\t\t\t\tRequests: capacity,\n\t\t\t},\n\t\t\tAccessModes: accessModes,\n\t\t\tSelector: selector,\n\t\t\tVolumeName: volName,\n\t\t\tStorageClassName: &opts.StorageClass,\n\t\t},\n\t}\n\t_, err = client.CoreV1().PersistentVolumeClaims(namespace).Create(pvc)\n\tif err != nil && !k8sErrors.IsAlreadyExists(err) {\n\t\treturn errors.WithStack(err)\n\t}\n\treturn nil\n}\n\nfunc volumeExists(client *ClusterClient, name string) (bool, error) {\n\tv, err := volume.Load(name)\n\tif err != nil {\n\t\tif err == volume.ErrVolumeNotFound {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\tnamespace, err := getNamespaceForVolume(client, v)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\t_, pvErr := client.CoreV1().PersistentVolumes().Get(volumeName(name), metav1.GetOptions{})\n\t_, pvcErr := client.CoreV1().PersistentVolumeClaims(namespace).Get(volumeClaimName(name), metav1.GetOptions{})\n\tif k8sErrors.IsNotFound(pvErr) && k8sErrors.IsNotFound(pvcErr) {\n\t\treturn false, nil\n\t}\n\tif pvErr != nil {\n\t\treturn false, errors.WithStack(pvErr)\n\t}\n\tif pvcErr != nil {\n\t\treturn false, errors.WithStack(pvcErr)\n\t}\n\treturn true, nil\n}\n\nfunc getNamespaceForVolume(client *ClusterClient, v *volume.Volume) (string, error) {\n\tbinds, err := v.LoadBinds()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(binds) == 0 {\n\t\treturn client.PoolNamespace(v.Pool), nil\n\t}\n\tvar namespace string\n\tfor _, b := range binds {\n\t\tns, err := client.appNamespaceByName(b.ID.App)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif namespace == \"\" {\n\t\t\tnamespace = ns\n\t\t\tcontinue\n\t\t}\n\t\tif ns != namespace {\n\t\t\treturn \"\", errors.New(\"multiple namespaces for volume\")\n\t\t}\n\t}\n\treturn namespace, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package build\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nfunc (b *Builder) prepTarget() error {\n\t\/\/ Create output directory if not exists\n\toutputDir := filepath.Join(b.Root, filepath.Dir(b.Bin))\n\tlogrus.Debugf(\"preparing target dir %s\", outputDir)\n\tos.RemoveAll(outputDir)\n\n\tif _, err := os.Stat(outputDir); os.IsNotExist(err) {\n\t\tos.MkdirAll(outputDir, 0776)\n\t\tlogrus.Debugf(\"creating target dir %s\", outputDir)\n\t}\n\treturn nil\n}\n<commit_msg>running `buffalo build` with `-o` flag deletes working directory fixes #827<commit_after>package build\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nfunc (b *Builder) prepTarget() error {\n\t\/\/ Create output directory if not exists\n\toutputDir := filepath.Join(b.Root, filepath.Dir(b.Bin))\n\tif _, err := os.Stat(outputDir); os.IsNotExist(err) {\n\t\tos.MkdirAll(outputDir, 0776)\n\t\tlogrus.Debugf(\"creating target dir %s\", outputDir)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2017 Michael Lihs\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage client_test\n\nimport (\n\t\/\/. \"github.com\/michaellihs\/golab\/client\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t\/\/. \"github.com\/onsi\/gomega\"\n)\n\n\nvar _ = Describe(\"GitlabClient\", func() {\n\n})\n<commit_msg>deleted unused client folder<commit_after><|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"bufio\"\n \"fmt\"\n \"net\/http\"\n \"strings\"\n \"time\"\n)\n\nfunc main() {\n status()\n}\n\nfunc status() {\n host := \"http:\/\/127.0.0.1:19071\"\n path := \"\/ApplicationStatus\"\n description := \"Config server\"\n \/\/response := request(host, path, description)\n\n client := &http.Client{\n\t Timeout: time.Second * 30,\n }\n resp, err := client.Get(host + path)\n if err != nil {\n error(\"Could not connect to\", strings.ToLower(description), \"at\", host)\n detail(err.Error())\n return\n }\n defer resp.Body.Close()\n\n scanner := bufio.NewScanner(resp.Body)\n\n if err := scanner.Err(); err != nil {\n error(\"Error reading data from\", strings.ToLower(description), \"at\", host)\n detail(err.Error())\n } else if resp.StatusCode != 200 {\n error(description, \"at\", host, \"is not ready\")\n detail(\"Response status:\", resp.Status)\n } else {\n success(description, \"at\", host, \"is ready\")\n }\n}\n\nfunc request(host string, path string, description string) {\n}\n\nfunc error(messages ...string) {\n print(\"\\033[31m\", messages)\n}\n\nfunc success(messages ...string) {\n print(\"\\033[32m\", messages)\n}\n\nfunc detail(messages ...string) {\n print(\"\\033[33m\", messages)\n}\n\nfunc print(prefix string, messages []string) {\n fmt.Print(prefix)\n for i := 0; i < len(messages); i++ {\n fmt.Print(messages[i])\n fmt.Print(\" \")\n }\n fmt.Println(\"\")\n}\n<commit_msg>Extract http get error handling<commit_after>package main\n\nimport (\n \"bufio\"\n \"fmt\"\n \"net\/http\"\n \"strings\"\n \"time\"\n)\n\nfunc main() {\n status()\n}\n\nfunc status() {\n host := \"http:\/\/127.0.0.1:19071\"\n path := \"\/ApplicationStatus\"\n description := \"Config server\"\n response := request(host, path, description)\n if (response == nil) {\n return\n }\n\n if response.StatusCode != 200 {\n printError(description, \"at\", host, \"is not ready\")\n printDetail(\"Response status:\", response.Status)\n } else {\n printSuccess(description, \"at\", host, \"is ready\")\n }\n}\n\nfunc request(host string, path string, description string) (response *http.Response) {\n client := &http.Client{\n\t Timeout: time.Second * 30,\n }\n response, error := client.Get(host + path)\n if error != nil {\n printError(\"Could not connect to\", strings.ToLower(description), \"at\", host)\n printDetail(error.Error())\n return\n }\n defer response.Body.Close()\n\n scanner := bufio.NewScanner(response.Body)\n\n if error := scanner.Err(); error != nil {\n printError(\"Error reading data from\", strings.ToLower(description), \"at\", host)\n printDetail(error.Error())\n return nil\n } else {\n return response\n }\n}\n\nfunc printError(messages ...string) {\n print(\"\\033[31m\", messages)\n}\n\nfunc printSuccess(messages ...string) {\n print(\"\\033[32m\", messages)\n}\n\nfunc printDetail(messages ...string) {\n print(\"\\033[33m\", messages)\n}\n\nfunc print(prefix string, messages []string) {\n fmt.Print(prefix)\n for i := 0; i < len(messages); i++ {\n fmt.Print(messages[i])\n fmt.Print(\" \")\n }\n fmt.Println(\"\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/kusubooru\/shimmie2-tools\/bulk\"\n)\n\nvar fns = template.FuncMap{\n\t\"last\": func(s []string) string {\n\t\tif len(s) == 0 {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn s[len(s)-1]\n\t},\n}\n\nvar templates = template.Must(template.New(\"\").Funcs(fns).ParseGlob(\"web\/*.tmpl\"))\n\nvar (\n\tdirectory = flag.String(\"dir\", \".\", \"the directory that contains the images\")\n\tcsvFilename = flag.String(\"csv\", \"bulk.csv\", \"the name of the CSV file\")\n\tpathPrefix = flag.String(\"prefix\", \"\", \"the path that should be prefixed before the directory and the image name on the CSV file\")\n\tport = flag.String(\"port\", \"8080\", \"server port\")\n)\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"Usage: bulk-add-csv-ui [Options...]\\n\")\n\tfmt.Fprintf(os.Stderr, \"Options:\\n\")\n\tflag.PrintDefaults()\n}\n\ntype model struct {\n\tErr error\n\tPrefix string\n\tDir string\n\tCSVFilename string\n\tImages []bulk.Image\n}\n\nvar globalModel *model\n\nfunc main() {\n\tif err := run(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(2)\n\t}\n}\n\nfunc run() error {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\td, err := filepath.Abs(*directory)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*directory = d\n\n\tm, err := loadFromCSVFile(*directory, *csvFilename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tglobalModel = m\n\n\tif *pathPrefix != \"\" {\n\t\tglobalModel.Prefix = *pathPrefix\n\t}\n\n\thttp.Handle(\"\/\", http.HandlerFunc(indexHandler))\n\thttp.Handle(\"\/load\", http.HandlerFunc(loadHandler))\n\thttp.Handle(\"\/update\", http.HandlerFunc(updateHandler))\n\thttp.Handle(\"\/img\/\", http.HandlerFunc(serveImage))\n\thttp.Handle(\"\/static\/\", http.StripPrefix(\"\/static\/\", http.FileServer(http.Dir(\"web\/static\"))))\n\n\tgo func() {\n\t\tlocalURL := fmt.Sprintf(\"http:\/\/localhost:%v\", *port)\n\t\tif err := browserOpen(localURL); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Error: could not open browser, please visit %v manually.\\n\", localURL)\n\t\t}\n\t}()\n\n\tfmt.Println(\"Starting server at :\" + *port)\n\tif err := http.ListenAndServe(\":\"+*port, nil); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc loadFromCSVFile(dir, csvFilename string) (*model, error) {\n\tm := &model{Dir: dir, CSVFilename: csvFilename}\n\n\t\/\/ Loading images from folder\n\timages, err := bulk.LoadImages(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tf, err := os.OpenFile(filepath.Join(dir, csvFilename), os.O_RDONLY|os.O_CREATE, 0644)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tif cerr := f.Close(); cerr != nil && err == nil {\n\t\t\terr = cerr\n\t\t}\n\t}()\n\n\t\/\/ Loading CSV image data\n\timagesWithInfo, err := bulk.LoadCSV(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.Images = bulk.Combine(images, imagesWithInfo)\n\n\t\/\/ Getting current prefix\n\tif _, err = f.Seek(0, 0); err != nil {\n\t\treturn nil, err\n\t}\n\tcp, err := bulk.CurrentPrefix(dir, f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.Prefix = cp\n\n\treturn m, nil\n}\n\nfunc loadHandler(w http.ResponseWriter, r *http.Request) {\n\tf, h, err := r.FormFile(\"csvFilename\")\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not parse multipart file: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif cerr := f.Close(); cerr != nil {\n\t\t\tlog.Printf(\"Error: could not close multipart file: %v\\n\", cerr)\n\t\t}\n\t}()\n\n\t\/\/ TODO: Extract load logic to function.\n\timg, err := bulk.LoadCSV(f)\n\tif err != nil {\n\t\tglobalModel.Err = fmt.Errorf(\"Error: could not load image info from CSV File: %v\", err)\n\t\trender(w, \"index\", globalModel)\n\t\treturn\n\t}\n\tglobalModel.CSVFilename = h.Filename\n\tglobalModel.Images = bulk.Combine(globalModel.Images, img)\n\tif _, err = f.Seek(0, 0); err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not seek multipart file: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tprefix, err := bulk.CurrentPrefix(globalModel.Dir, f)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not read current prefix from multipart file: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\terr = bulk.Save(globalModel.Images, globalModel.Dir, globalModel.CSVFilename, prefix)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not save file: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n}\n\nfunc indexHandler(w http.ResponseWriter, r *http.Request) {\n\tm, err := loadFromCSVFile(globalModel.Dir, globalModel.CSVFilename)\n\tif err != nil {\n\t\tglobalModel.Err = fmt.Errorf(\"Error: could not load from CSV File: %v\", err)\n\t} else {\n\t\tglobalModel = m\n\t}\n\n\trender(w, \"index\", globalModel)\n}\n\nfunc render(w http.ResponseWriter, tmpl string, model interface{}) {\n\tif err := templates.ExecuteTemplate(w, tmpl+\".tmpl\", model); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\nfunc updateHandler(w http.ResponseWriter, r *http.Request) {\n\n\tif err := r.ParseForm(); err != nil {\n\t\thttp.Error(w, \"could not parse form\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ prefix\n\tglobalModel.Prefix = r.PostForm[\"prefix\"][0]\n\t\/\/ csvFilename\n\tglobalModel.CSVFilename = r.PostForm[\"csvFilename\"][0]\n\tfor _, img := range globalModel.Images {\n\t\t\/\/ tags\n\t\tareaTags := r.PostForm[fmt.Sprintf(\"image[%d].tags\", img.ID)]\n\t\tglobalModel.Images[img.ID].Tags = strings.Fields(areaTags[0])\n\t\t\/\/ source\n\t\tglobalModel.Images[img.ID].Source = r.PostForm[fmt.Sprintf(\"image[%d].source\", img.ID)][0]\n\t\t\/\/ rating\n\t\trating := r.PostForm[fmt.Sprintf(\"image[%d].rating\", img.ID)]\n\t\tif len(rating) != 0 {\n\t\t\tglobalModel.Images[img.ID].Rating = rating[0]\n\t\t}\n\t}\n\n\tif err := saveToCSVFile(globalModel); err != nil {\n\t\tglobalModel.Err = fmt.Errorf(\"Error: could not save to CSV file: %v\", err)\n\t\trender(w, \"index\", globalModel)\n\t} else {\n\t\tglobalModel.Err = nil\n\t\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n\t}\n}\n\nfunc saveToCSVFile(m *model) error {\n\treturn bulk.Save(m.Images, m.Dir, m.CSVFilename, m.Prefix)\n}\n\nfunc serveImage(w http.ResponseWriter, r *http.Request) {\n\tidStr := r.URL.Path[strings.LastIndex(r.URL.Path, \"\/\")+1:]\n\tid, err := strconv.Atoi(idStr)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"%v is not a valid image ID\", idStr), http.StatusBadRequest)\n\t\treturn\n\t}\n\timg := bulk.FindByID(globalModel.Images, id)\n\tif img == nil {\n\t\thttp.Error(w, fmt.Sprintf(\"no image found with ID: %v\", id), http.StatusNotFound)\n\t\treturn\n\t}\n\tp := filepath.Join(*directory, img.Name)\n\n\tf, err := os.Open(p)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not open image %v\", p), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif cerr := f.Close(); cerr != nil {\n\t\t\tlog.Printf(\"Error: could not close image file: %v\\n\", cerr)\n\t\t}\n\t}()\n\tdata, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not read image %v\", p), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t_, err = w.Write(data)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not write image bytes: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\nfunc browserOpen(input string) error {\n\tvar err error\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\terr = exec.Command(\"xdg-open\", input).Start()\n\tcase \"windows\":\n\t\terr = exec.Command(\"cmd\", \"\/C\", \"start\", \"\", input).Start()\n\tcase \"darwin\":\n\t\terr = exec.Command(\"open\", input).Start()\n\tdefault:\n\t\terr = fmt.Errorf(\"unsupported platform\")\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<commit_msg>Fix http.Error message in serveImage<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/kusubooru\/shimmie2-tools\/bulk\"\n)\n\nvar fns = template.FuncMap{\n\t\"last\": func(s []string) string {\n\t\tif len(s) == 0 {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn s[len(s)-1]\n\t},\n}\n\nvar templates = template.Must(template.New(\"\").Funcs(fns).ParseGlob(\"web\/*.tmpl\"))\n\nvar (\n\tdirectory = flag.String(\"dir\", \".\", \"the directory that contains the images\")\n\tcsvFilename = flag.String(\"csv\", \"bulk.csv\", \"the name of the CSV file\")\n\tpathPrefix = flag.String(\"prefix\", \"\", \"the path that should be prefixed before the directory and the image name on the CSV file\")\n\tport = flag.String(\"port\", \"8080\", \"server port\")\n)\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"Usage: bulk-add-csv-ui [Options...]\\n\")\n\tfmt.Fprintf(os.Stderr, \"Options:\\n\")\n\tflag.PrintDefaults()\n}\n\ntype model struct {\n\tErr error\n\tPrefix string\n\tDir string\n\tCSVFilename string\n\tImages []bulk.Image\n}\n\nvar globalModel *model\n\nfunc main() {\n\tif err := run(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(2)\n\t}\n}\n\nfunc run() error {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\td, err := filepath.Abs(*directory)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*directory = d\n\n\tm, err := loadFromCSVFile(*directory, *csvFilename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tglobalModel = m\n\n\tif *pathPrefix != \"\" {\n\t\tglobalModel.Prefix = *pathPrefix\n\t}\n\n\thttp.Handle(\"\/\", http.HandlerFunc(indexHandler))\n\thttp.Handle(\"\/load\", http.HandlerFunc(loadHandler))\n\thttp.Handle(\"\/update\", http.HandlerFunc(updateHandler))\n\thttp.Handle(\"\/img\/\", http.HandlerFunc(serveImage))\n\thttp.Handle(\"\/static\/\", http.StripPrefix(\"\/static\/\", http.FileServer(http.Dir(\"web\/static\"))))\n\n\tgo func() {\n\t\tlocalURL := fmt.Sprintf(\"http:\/\/localhost:%v\", *port)\n\t\tif err := browserOpen(localURL); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Error: could not open browser, please visit %v manually.\\n\", localURL)\n\t\t}\n\t}()\n\n\tfmt.Println(\"Starting server at :\" + *port)\n\tif err := http.ListenAndServe(\":\"+*port, nil); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc loadFromCSVFile(dir, csvFilename string) (*model, error) {\n\tm := &model{Dir: dir, CSVFilename: csvFilename}\n\n\t\/\/ Loading images from folder\n\timages, err := bulk.LoadImages(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tf, err := os.OpenFile(filepath.Join(dir, csvFilename), os.O_RDONLY|os.O_CREATE, 0644)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tif cerr := f.Close(); cerr != nil && err == nil {\n\t\t\terr = cerr\n\t\t}\n\t}()\n\n\t\/\/ Loading CSV image data\n\timagesWithInfo, err := bulk.LoadCSV(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.Images = bulk.Combine(images, imagesWithInfo)\n\n\t\/\/ Getting current prefix\n\tif _, err = f.Seek(0, 0); err != nil {\n\t\treturn nil, err\n\t}\n\tcp, err := bulk.CurrentPrefix(dir, f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.Prefix = cp\n\n\treturn m, nil\n}\n\nfunc loadHandler(w http.ResponseWriter, r *http.Request) {\n\tf, h, err := r.FormFile(\"csvFilename\")\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not parse multipart file: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif cerr := f.Close(); cerr != nil {\n\t\t\tlog.Printf(\"Error: could not close multipart file: %v\\n\", cerr)\n\t\t}\n\t}()\n\n\t\/\/ TODO: Extract load logic to function.\n\timg, err := bulk.LoadCSV(f)\n\tif err != nil {\n\t\tglobalModel.Err = fmt.Errorf(\"Error: could not load image info from CSV File: %v\", err)\n\t\trender(w, \"index\", globalModel)\n\t\treturn\n\t}\n\tglobalModel.CSVFilename = h.Filename\n\tglobalModel.Images = bulk.Combine(globalModel.Images, img)\n\tif _, err = f.Seek(0, 0); err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not seek multipart file: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tprefix, err := bulk.CurrentPrefix(globalModel.Dir, f)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not read current prefix from multipart file: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\terr = bulk.Save(globalModel.Images, globalModel.Dir, globalModel.CSVFilename, prefix)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not save file: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n}\n\nfunc indexHandler(w http.ResponseWriter, r *http.Request) {\n\tm, err := loadFromCSVFile(globalModel.Dir, globalModel.CSVFilename)\n\tif err != nil {\n\t\tglobalModel.Err = fmt.Errorf(\"Error: could not load from CSV File: %v\", err)\n\t} else {\n\t\tglobalModel = m\n\t}\n\n\trender(w, \"index\", globalModel)\n}\n\nfunc render(w http.ResponseWriter, tmpl string, model interface{}) {\n\tif err := templates.ExecuteTemplate(w, tmpl+\".tmpl\", model); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\nfunc updateHandler(w http.ResponseWriter, r *http.Request) {\n\n\tif err := r.ParseForm(); err != nil {\n\t\thttp.Error(w, \"could not parse form\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ prefix\n\tglobalModel.Prefix = r.PostForm[\"prefix\"][0]\n\t\/\/ csvFilename\n\tglobalModel.CSVFilename = r.PostForm[\"csvFilename\"][0]\n\tfor _, img := range globalModel.Images {\n\t\t\/\/ tags\n\t\tareaTags := r.PostForm[fmt.Sprintf(\"image[%d].tags\", img.ID)]\n\t\tglobalModel.Images[img.ID].Tags = strings.Fields(areaTags[0])\n\t\t\/\/ source\n\t\tglobalModel.Images[img.ID].Source = r.PostForm[fmt.Sprintf(\"image[%d].source\", img.ID)][0]\n\t\t\/\/ rating\n\t\trating := r.PostForm[fmt.Sprintf(\"image[%d].rating\", img.ID)]\n\t\tif len(rating) != 0 {\n\t\t\tglobalModel.Images[img.ID].Rating = rating[0]\n\t\t}\n\t}\n\n\tif err := saveToCSVFile(globalModel); err != nil {\n\t\tglobalModel.Err = fmt.Errorf(\"Error: could not save to CSV file: %v\", err)\n\t\trender(w, \"index\", globalModel)\n\t} else {\n\t\tglobalModel.Err = nil\n\t\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n\t}\n}\n\nfunc saveToCSVFile(m *model) error {\n\treturn bulk.Save(m.Images, m.Dir, m.CSVFilename, m.Prefix)\n}\n\nfunc serveImage(w http.ResponseWriter, r *http.Request) {\n\tidStr := r.URL.Path[strings.LastIndex(r.URL.Path, \"\/\")+1:]\n\tid, err := strconv.Atoi(idStr)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"%v is not a valid image ID\", idStr), http.StatusBadRequest)\n\t\treturn\n\t}\n\timg := bulk.FindByID(globalModel.Images, id)\n\tif img == nil {\n\t\thttp.Error(w, fmt.Sprintf(\"no image found with ID: %v\", id), http.StatusNotFound)\n\t\treturn\n\t}\n\tp := filepath.Join(*directory, img.Name)\n\n\tf, err := os.Open(p)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not open image: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif cerr := f.Close(); cerr != nil {\n\t\t\tlog.Printf(\"Error: could not close image file: %v\\n\", cerr)\n\t\t}\n\t}()\n\tdata, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not read image: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t_, err = w.Write(data)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not write image bytes: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\nfunc browserOpen(input string) error {\n\tvar err error\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\terr = exec.Command(\"xdg-open\", input).Start()\n\tcase \"windows\":\n\t\terr = exec.Command(\"cmd\", \"\/C\", \"start\", \"\", input).Start()\n\tcase \"darwin\":\n\t\terr = exec.Command(\"open\", input).Start()\n\tdefault:\n\t\terr = fmt.Errorf(\"unsupported platform\")\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Utility scans spectrum and shows channels on which Crazyflies found\npackage main\n\nimport (\n\t\"log\"\n\n\t\"github.com\/krasin\/crazyradio\"\n)\n\nfunc main() {\n\tst, err := crazyradio.Start(nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\taddr, err := st.Scan()\n\tif err != nil {\n\t\tlog.Fatalf(\"Scan failed: %v\", err)\n\t}\n\tlog.Printf(\"Found crazyflies: %v\", addr)\n}\n<commit_msg>Fix nil reference panic in crazyradio-scan. Pass usb.Hub to station.Start, instead of nil<commit_after>\/\/ Utility scans spectrum and shows channels on which Crazyflies found\npackage main\n\nimport (\n\t\"log\"\n\n\t\"github.com\/krasin\/crazyradio\"\n\t\"github.com\/krasin\/crazyradio\/usb\"\n)\n\nfunc main() {\n\tst, err := crazyradio.Start(usb.Hub)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\taddr, err := st.Scan()\n\tif err != nil {\n\t\tlog.Fatalf(\"Scan failed: %v\", err)\n\t}\n\tlog.Printf(\"Found crazyflies: %v\", addr)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"github.com\/Akagi201\/cryptotrader\/eosforce\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\ntype AccountBalance struct {\n\tStaked string `json:\"staked\"`\n\tAvailable float64 `json:\"avalable\"`\n\tCreated string `json:\"created\"`\n\tUnstaking string `json:\"unstaking\"`\n\tName string `json:\"name\"`\n}\n\nvar balances []AccountBalance\nvar fuckingGuys []string\n\nfunc main() {\n\n\tcontent, _ := ioutil.ReadFile(Opts.AccountFile)\n\terr := json.Unmarshal(content, &balances)\n\tif err != nil {\n\t\tlog.Fatalf(\"json file parse json failed, err: %v\", err)\n\t}\n\n\tc := eosforce.New([]string{\"docker\", \"exec\", \"eosforce\", \"cleos\"}, Opts.RpcScheme, Opts.RpcHost)\n\n\tfor {\n\t\tfor i, v := range balances {\n\t\t\tavailable, err := c.GetAvailable(context.Background(), v.Name)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"eosforce get available balance failed, err: %v\", err)\n\t\t\t}\n\t\t\tif available != v.Available {\n\t\t\t\tlog.Infof(\"Fucking Guy %v is dumping! before: %v, after: %v, diff: %v\", v.Name, v.Available, available, v.Available-available)\n\t\t\t\tbalances[i].Available = available\n\t\t\t\tfuckingGuys = append(fuckingGuys, v.Name)\n\t\t\t}\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t}\n\t\tlog.Infof(\"Fucking Guys: %v\", fuckingGuys)\n\t\ttime.Sleep(2*time.Minute)\n\t}\n}\n<commit_msg>feat(eosforcemonitor): add callbackbot<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"time\"\n\t\"net\/http\"\n\n\t\"github.com\/Akagi201\/cryptotrader\/eosforce\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"fmt\"\n\t\"bytes\"\n)\n\ntype AccountBalance struct {\n\tStaked string `json:\"staked\"`\n\tAvailable float64 `json:\"avalable\"`\n\tCreated string `json:\"created\"`\n\tUnstaking string `json:\"unstaking\"`\n\tName string `json:\"name\"`\n}\n\nvar balances []AccountBalance\nvar fuckingGuys []string\n\nfunc main() {\n\n\tcontent, _ := ioutil.ReadFile(Opts.AccountFile)\n\terr := json.Unmarshal(content, &balances)\n\tif err != nil {\n\t\tlog.Fatalf(\"json file parse json failed, err: %v\", err)\n\t}\n\n\tc := eosforce.New([]string{\"docker\", \"exec\", \"eosforce\", \"cleos\"}, Opts.RpcScheme, Opts.RpcHost)\n\n\tfor {\n\t\tfor i, v := range balances {\n\t\t\tavailable, err := c.GetAvailable(context.Background(), v.Name)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"eosforce get available balance failed, err: %v\", err)\n\t\t\t}\n\t\t\tif available != v.Available {\n\t\t\t\tlog.Infof(\"Fucking Guy %v is dumping! before: %v, after: %v, diff: %v\", v.Name, v.Available, available, v.Available-available)\n\t\t\t\tbalances[i].Available = available\n\t\t\t\tfuckingGuys = append(fuckingGuys, v.Name)\n\t\t\t\tfunc(){\n\t\t\t\t\tpost := fmt.Sprintf(\"Fucking Guy %v is dumping! before: %v, after: %v, diff: %v\", v.Name, v.Available, available, v.Available-available)\n\t\t\t\t\treq, _ := http.NewRequest(\"POST\", \"http:\/\/dev.chainpool.io:3000\/eosforce-monitor\", bytes.NewBuffer([]byte(post)))\n\t\t\t\t\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\t\t\t\t\tclient := &http.Client{}\n\t\t\t\t\tresp, _ := client.Do(req)\n\t\t\t\t\tdefer resp.Body.Close()\n\t\t\t\t}()\n\t\t\t}\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t}\n\t\tlog.Infof(\"Fucking Guys: %v\", fuckingGuys)\n\t\tfunc(){\n\t\t\tpost := fmt.Sprintf(\"Fucking Guys: %v\", fuckingGuys)\n\t\t\treq, _ := http.NewRequest(\"POST\", \"http:\/\/dev.chainpool.io:3000\/eosforce-monitor\", bytes.NewBuffer([]byte(post)))\n\t\t\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\t\t\tclient := &http.Client{}\n\t\t\tresp, _ := client.Do(req)\n\t\t\tdefer resp.Body.Close()\n\t\t}()\n\t\ttime.Sleep(2*time.Minute)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/juju\/cmd\"\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/loggo\"\n\t\"gopkg.in\/juju\/charm.v6-unstable\"\n\t\"gopkg.in\/juju\/charmrepo.v2-unstable\"\n\t\"gopkg.in\/juju\/charmrepo.v2-unstable\/csclient\"\n\t\"gopkg.in\/macaroon-bakery.v1\/httpbakery\"\n\t\"gopkg.in\/macaroon.v1\"\n\n\t\"github.com\/juju\/juju\/api\"\n\t\"github.com\/juju\/juju\/apiserver\/params\"\n\t\"github.com\/juju\/juju\/cmd\/envcmd\"\n\t\"github.com\/juju\/juju\/environs\"\n\t\"github.com\/juju\/juju\/environs\/config\"\n\t\"github.com\/juju\/juju\/environs\/configstore\"\n)\n\n\/\/ destroyPreparedEnviron destroys the environment and logs an error\n\/\/ if it fails.\nvar destroyPreparedEnviron = destroyPreparedEnvironProductionFunc\n\nvar logger = loggo.GetLogger(\"juju.cmd.juju\")\n\nfunc destroyPreparedEnvironProductionFunc(\n\tctx *cmd.Context,\n\tenv environs.Environ,\n\tstore configstore.Storage,\n\taction string,\n) {\n\tctx.Infof(\"%s failed, destroying environment\", action)\n\tif err := environs.Destroy(env, store); err != nil {\n\t\tlogger.Errorf(\"the environment could not be destroyed: %v\", err)\n\t}\n}\n\nvar destroyEnvInfo = destroyEnvInfoProductionFunc\n\nfunc destroyEnvInfoProductionFunc(\n\tctx *cmd.Context,\n\tcfgName string,\n\tstore configstore.Storage,\n\taction string,\n) {\n\tctx.Infof(\"%s failed, cleaning up the environment.\", action)\n\tif err := environs.DestroyInfo(cfgName, store); err != nil {\n\t\tlogger.Errorf(\"the environment jenv file could not be cleaned up: %v\", err)\n\t}\n}\n\n\/\/ environFromName loads an existing environment or prepares a new\n\/\/ one. If there are no errors, it returns the environ and a closure to\n\/\/ clean up in case we need to further up the stack. If an error has\n\/\/ occurred, the environment and cleanup function will be nil, and the\n\/\/ error will be filled in.\nvar environFromName = environFromNameProductionFunc\n\nfunc environFromNameProductionFunc(\n\tctx *cmd.Context,\n\tenvName string,\n\taction string,\n\tensureNotBootstrapped func(environs.Environ) error,\n) (env environs.Environ, cleanup func(), err error) {\n\n\tstore, err := configstore.Default()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tenvExisted := false\n\tif environInfo, err := store.ReadInfo(envName); err == nil {\n\t\tenvExisted = true\n\t\tlogger.Warningf(\n\t\t\t\"ignoring environments.yaml: using bootstrap config in %s\",\n\t\t\tenvironInfo.Location(),\n\t\t)\n\t} else if !errors.IsNotFound(err) {\n\t\treturn nil, nil, err\n\t}\n\n\tcleanup = func() {\n\t\t\/\/ Distinguish b\/t removing the jenv file or tearing down the\n\t\t\/\/ environment. We want to remove the jenv file if preparation\n\t\t\/\/ was not successful. We want to tear down the environment\n\t\t\/\/ only in the case where the environment didn't already\n\t\t\/\/ exist.\n\t\tif env == nil {\n\t\t\tlogger.Debugf(\"Destroying environment info.\")\n\t\t\tdestroyEnvInfo(ctx, envName, store, action)\n\t\t} else if !envExisted && ensureNotBootstrapped(env) != environs.ErrAlreadyBootstrapped {\n\t\t\tlogger.Debugf(\"Destroying environment.\")\n\t\t\tdestroyPreparedEnviron(ctx, env, store, action)\n\t\t}\n\t}\n\n\tif env, err = environs.PrepareFromName(envName, envcmd.BootstrapContext(ctx), store); err != nil {\n\t\treturn nil, cleanup, err\n\t}\n\n\treturn env, cleanup, err\n}\n\ntype resolveCharmStoreEntityParams struct {\n\turlStr string\n\trequestedSeries string\n\tforceSeries bool\n\tcsParams charmrepo.NewCharmStoreParams\n\trepoPath string\n\tconf *config.Config\n}\n\n\/\/ resolveCharmStoreEntityURL resolves the given charm or bundle URL string\n\/\/ by looking it up in the appropriate charm repository.\n\/\/ If it is a charm store URL, the given csParams will\n\/\/ be used to access the charm store repository.\n\/\/ If it is a local charm or bundle URL, the local charm repository at\n\/\/ the given repoPath will be used. The given configuration\n\/\/ will be used to add any necessary attributes to the repo\n\/\/ and to return the charm's supported series if possible.\n\/\/\n\/\/ resolveCharmStoreEntityURL also returns the charm repository holding\n\/\/ the charm or bundle.\nfunc resolveCharmStoreEntityURL(args resolveCharmStoreEntityParams) (*charm.URL, []string, charmrepo.Interface, error) {\n\turl, err := charm.ParseURL(args.urlStr)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.Trace(err)\n\t}\n\trepo, err := charmrepo.InferRepository(url, args.csParams, args.repoPath)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.Trace(err)\n\t}\n\trepo = config.SpecializeCharmRepo(repo, args.conf)\n\n\tif url.Schema == \"local\" && url.Series == \"\" {\n\t\tif defaultSeries, ok := args.conf.DefaultSeries(); ok {\n\t\t\turl.Series = defaultSeries\n\t\t}\n\t\tif url.Series == \"\" {\n\t\t\tpossibleURL := *url\n\t\t\tpossibleURL.Series = config.LatestLtsSeries()\n\t\t\tlogger.Errorf(\"The series is not specified in the environment (default-series) or with the charm. Did you mean:\\n\\t%s\", &possibleURL)\n\t\t\treturn nil, nil, nil, errors.Errorf(\"cannot resolve series for charm: %q\", url)\n\t\t}\n\t}\n\tresultUrl, supportedSeries, err := repo.Resolve(url)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.Trace(err)\n\t}\n\treturn resultUrl, supportedSeries, repo, nil\n}\n\nfunc isSeriesSupported(requestedSeries string, supportedSeries []string) bool {\n\tfor _, series := range supportedSeries {\n\t\tif series == requestedSeries {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ termsAgreementError returns err as a *termsAgreementError\n\/\/ if it has a \"terms agreement request\" error code, otherwise\n\/\/ it returns err unchanged.\nfunc termsAgreementError(err error) error {\n\te, ok := err.(*httpbakery.DischargeError)\n\tif !ok {\n\t\treturn nil\n\t}\n\tif e.Reason == nil {\n\t\treturn err\n\t}\n\tcode := \"term agreement required\"\n\tif e.Reason.Code != httpbakery.ErrorCode(code) {\n\t\treturn err\n\t}\n\tmagicMarker := code + \":\"\n\tindex := strings.LastIndex(e.Reason.Message, magicMarker)\n\tif index == -1 {\n\t\treturn err\n\t}\n\treturn &termsRequiredError{strings.Fields(e.Reason.Message[index+len(magicMarker):])}\n}\n\ntype termsRequiredError struct {\n\tTerms []string\n}\n\nfunc (e *termsRequiredError) Error() string {\n\treturn fmt.Sprintf(\"please agree to terms %q\", strings.Join(e.Terms, \" \"))\n}\n\n\/\/ addCharmFromURL calls the appropriate client API calls to add the\n\/\/ given charm URL to state. For non-public charm URLs, this function also\n\/\/ handles the macaroon authorization process using the given csClient.\n\/\/ The resulting charm URL of the added charm is displayed on stdout.\nfunc addCharmFromURL(client *api.Client, curl *charm.URL, repo charmrepo.Interface, csclient *csClient) (*charm.URL, error) {\n\tswitch curl.Schema {\n\tcase \"local\":\n\t\tch, err := repo.Get(curl)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstateCurl, err := client.AddLocalCharm(curl, ch)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcurl = stateCurl\n\tcase \"cs\":\n\t\tif err := client.AddCharm(curl); err != nil {\n\t\t\tif !params.IsCodeUnauthorized(err) {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tm, err := csclient.authorize(curl)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, termsAgreementError(errors.Cause(err))\n\t\t\t}\n\t\t\tif err := client.AddCharmWithAuthorization(curl, m); err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported charm URL schema: %q\", curl.Schema)\n\t}\n\treturn curl, nil\n}\n\n\/\/ csClient gives access to the charm store server and provides parameters\n\/\/ for connecting to the charm store.\ntype csClient struct {\n\tparams charmrepo.NewCharmStoreParams\n}\n\n\/\/ newCharmStoreClient is called to obtain a charm store client\n\/\/ including the parameters for connecting to the charm store, and\n\/\/ helpers to save the local authorization cookies and to authorize\n\/\/ non-public charm deployments. It is defined as a variable so it can\n\/\/ be changed for testing purposes.\nvar newCharmStoreClient = func(client *http.Client) *csClient {\n\treturn &csClient{\n\t\tparams: charmrepo.NewCharmStoreParams{\n\t\t\tHTTPClient: client,\n\t\t\tVisitWebPage: httpbakery.OpenWebBrowser,\n\t\t},\n\t}\n}\n\n\/\/ authorize acquires and return the charm store delegatable macaroon to be\n\/\/ used to add the charm corresponding to the given URL.\n\/\/ The macaroon is properly attenuated so that it can only be used to deploy\n\/\/ the given charm URL.\nfunc (c *csClient) authorize(curl *charm.URL) (*macaroon.Macaroon, error) {\n\tclient := csclient.New(csclient.Params{\n\t\tURL: c.params.URL,\n\t\tHTTPClient: c.params.HTTPClient,\n\t\tVisitWebPage: c.params.VisitWebPage,\n\t})\n\tendpoint := \"\/delegatable-macaroon\"\n\tif curl != nil {\n\t\tquery := url.Values{}\n\t\tquery.Add(\"id\", curl.String())\n\t\tendpoint = endpoint + \"?\" + query.Encode()\n\t}\n\tvar m *macaroon.Macaroon\n\tif err := client.Get(endpoint, &m); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif err := m.AddFirstPartyCaveat(\"is-entity \" + curl.String()); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\treturn m, nil\n}\n<commit_msg>cmd\/juju\/common: No longer need to add FPC<commit_after>\/\/ Copyright 2014 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/juju\/cmd\"\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/loggo\"\n\t\"gopkg.in\/juju\/charm.v6-unstable\"\n\t\"gopkg.in\/juju\/charmrepo.v2-unstable\"\n\t\"gopkg.in\/juju\/charmrepo.v2-unstable\/csclient\"\n\t\"gopkg.in\/macaroon-bakery.v1\/httpbakery\"\n\t\"gopkg.in\/macaroon.v1\"\n\n\t\"github.com\/juju\/juju\/api\"\n\t\"github.com\/juju\/juju\/apiserver\/params\"\n\t\"github.com\/juju\/juju\/cmd\/envcmd\"\n\t\"github.com\/juju\/juju\/environs\"\n\t\"github.com\/juju\/juju\/environs\/config\"\n\t\"github.com\/juju\/juju\/environs\/configstore\"\n)\n\n\/\/ destroyPreparedEnviron destroys the environment and logs an error\n\/\/ if it fails.\nvar destroyPreparedEnviron = destroyPreparedEnvironProductionFunc\n\nvar logger = loggo.GetLogger(\"juju.cmd.juju\")\n\nfunc destroyPreparedEnvironProductionFunc(\n\tctx *cmd.Context,\n\tenv environs.Environ,\n\tstore configstore.Storage,\n\taction string,\n) {\n\tctx.Infof(\"%s failed, destroying environment\", action)\n\tif err := environs.Destroy(env, store); err != nil {\n\t\tlogger.Errorf(\"the environment could not be destroyed: %v\", err)\n\t}\n}\n\nvar destroyEnvInfo = destroyEnvInfoProductionFunc\n\nfunc destroyEnvInfoProductionFunc(\n\tctx *cmd.Context,\n\tcfgName string,\n\tstore configstore.Storage,\n\taction string,\n) {\n\tctx.Infof(\"%s failed, cleaning up the environment.\", action)\n\tif err := environs.DestroyInfo(cfgName, store); err != nil {\n\t\tlogger.Errorf(\"the environment jenv file could not be cleaned up: %v\", err)\n\t}\n}\n\n\/\/ environFromName loads an existing environment or prepares a new\n\/\/ one. If there are no errors, it returns the environ and a closure to\n\/\/ clean up in case we need to further up the stack. If an error has\n\/\/ occurred, the environment and cleanup function will be nil, and the\n\/\/ error will be filled in.\nvar environFromName = environFromNameProductionFunc\n\nfunc environFromNameProductionFunc(\n\tctx *cmd.Context,\n\tenvName string,\n\taction string,\n\tensureNotBootstrapped func(environs.Environ) error,\n) (env environs.Environ, cleanup func(), err error) {\n\n\tstore, err := configstore.Default()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tenvExisted := false\n\tif environInfo, err := store.ReadInfo(envName); err == nil {\n\t\tenvExisted = true\n\t\tlogger.Warningf(\n\t\t\t\"ignoring environments.yaml: using bootstrap config in %s\",\n\t\t\tenvironInfo.Location(),\n\t\t)\n\t} else if !errors.IsNotFound(err) {\n\t\treturn nil, nil, err\n\t}\n\n\tcleanup = func() {\n\t\t\/\/ Distinguish b\/t removing the jenv file or tearing down the\n\t\t\/\/ environment. We want to remove the jenv file if preparation\n\t\t\/\/ was not successful. We want to tear down the environment\n\t\t\/\/ only in the case where the environment didn't already\n\t\t\/\/ exist.\n\t\tif env == nil {\n\t\t\tlogger.Debugf(\"Destroying environment info.\")\n\t\t\tdestroyEnvInfo(ctx, envName, store, action)\n\t\t} else if !envExisted && ensureNotBootstrapped(env) != environs.ErrAlreadyBootstrapped {\n\t\t\tlogger.Debugf(\"Destroying environment.\")\n\t\t\tdestroyPreparedEnviron(ctx, env, store, action)\n\t\t}\n\t}\n\n\tif env, err = environs.PrepareFromName(envName, envcmd.BootstrapContext(ctx), store); err != nil {\n\t\treturn nil, cleanup, err\n\t}\n\n\treturn env, cleanup, err\n}\n\ntype resolveCharmStoreEntityParams struct {\n\turlStr string\n\trequestedSeries string\n\tforceSeries bool\n\tcsParams charmrepo.NewCharmStoreParams\n\trepoPath string\n\tconf *config.Config\n}\n\n\/\/ resolveCharmStoreEntityURL resolves the given charm or bundle URL string\n\/\/ by looking it up in the appropriate charm repository.\n\/\/ If it is a charm store URL, the given csParams will\n\/\/ be used to access the charm store repository.\n\/\/ If it is a local charm or bundle URL, the local charm repository at\n\/\/ the given repoPath will be used. The given configuration\n\/\/ will be used to add any necessary attributes to the repo\n\/\/ and to return the charm's supported series if possible.\n\/\/\n\/\/ resolveCharmStoreEntityURL also returns the charm repository holding\n\/\/ the charm or bundle.\nfunc resolveCharmStoreEntityURL(args resolveCharmStoreEntityParams) (*charm.URL, []string, charmrepo.Interface, error) {\n\turl, err := charm.ParseURL(args.urlStr)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.Trace(err)\n\t}\n\trepo, err := charmrepo.InferRepository(url, args.csParams, args.repoPath)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.Trace(err)\n\t}\n\trepo = config.SpecializeCharmRepo(repo, args.conf)\n\n\tif url.Schema == \"local\" && url.Series == \"\" {\n\t\tif defaultSeries, ok := args.conf.DefaultSeries(); ok {\n\t\t\turl.Series = defaultSeries\n\t\t}\n\t\tif url.Series == \"\" {\n\t\t\tpossibleURL := *url\n\t\t\tpossibleURL.Series = config.LatestLtsSeries()\n\t\t\tlogger.Errorf(\"The series is not specified in the environment (default-series) or with the charm. Did you mean:\\n\\t%s\", &possibleURL)\n\t\t\treturn nil, nil, nil, errors.Errorf(\"cannot resolve series for charm: %q\", url)\n\t\t}\n\t}\n\tresultUrl, supportedSeries, err := repo.Resolve(url)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.Trace(err)\n\t}\n\treturn resultUrl, supportedSeries, repo, nil\n}\n\nfunc isSeriesSupported(requestedSeries string, supportedSeries []string) bool {\n\tfor _, series := range supportedSeries {\n\t\tif series == requestedSeries {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ termsAgreementError returns err as a *termsAgreementError\n\/\/ if it has a \"terms agreement request\" error code, otherwise\n\/\/ it returns err unchanged.\nfunc termsAgreementError(err error) error {\n\te, ok := err.(*httpbakery.DischargeError)\n\tif !ok {\n\t\treturn nil\n\t}\n\tif e.Reason == nil {\n\t\treturn err\n\t}\n\tcode := \"term agreement required\"\n\tif e.Reason.Code != httpbakery.ErrorCode(code) {\n\t\treturn err\n\t}\n\tmagicMarker := code + \":\"\n\tindex := strings.LastIndex(e.Reason.Message, magicMarker)\n\tif index == -1 {\n\t\treturn err\n\t}\n\treturn &termsRequiredError{strings.Fields(e.Reason.Message[index+len(magicMarker):])}\n}\n\ntype termsRequiredError struct {\n\tTerms []string\n}\n\nfunc (e *termsRequiredError) Error() string {\n\treturn fmt.Sprintf(\"please agree to terms %q\", strings.Join(e.Terms, \" \"))\n}\n\n\/\/ addCharmFromURL calls the appropriate client API calls to add the\n\/\/ given charm URL to state. For non-public charm URLs, this function also\n\/\/ handles the macaroon authorization process using the given csClient.\n\/\/ The resulting charm URL of the added charm is displayed on stdout.\nfunc addCharmFromURL(client *api.Client, curl *charm.URL, repo charmrepo.Interface, csclient *csClient) (*charm.URL, error) {\n\tswitch curl.Schema {\n\tcase \"local\":\n\t\tch, err := repo.Get(curl)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstateCurl, err := client.AddLocalCharm(curl, ch)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcurl = stateCurl\n\tcase \"cs\":\n\t\tif err := client.AddCharm(curl); err != nil {\n\t\t\tif !params.IsCodeUnauthorized(err) {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tm, err := csclient.authorize(curl)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, termsAgreementError(errors.Cause(err))\n\t\t\t}\n\t\t\tif err := client.AddCharmWithAuthorization(curl, m); err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported charm URL schema: %q\", curl.Schema)\n\t}\n\treturn curl, nil\n}\n\n\/\/ csClient gives access to the charm store server and provides parameters\n\/\/ for connecting to the charm store.\ntype csClient struct {\n\tparams charmrepo.NewCharmStoreParams\n}\n\n\/\/ newCharmStoreClient is called to obtain a charm store client\n\/\/ including the parameters for connecting to the charm store, and\n\/\/ helpers to save the local authorization cookies and to authorize\n\/\/ non-public charm deployments. It is defined as a variable so it can\n\/\/ be changed for testing purposes.\nvar newCharmStoreClient = func(client *http.Client) *csClient {\n\treturn &csClient{\n\t\tparams: charmrepo.NewCharmStoreParams{\n\t\t\tHTTPClient: client,\n\t\t\tVisitWebPage: httpbakery.OpenWebBrowser,\n\t\t},\n\t}\n}\n\n\/\/ authorize acquires and return the charm store delegatable macaroon to be\n\/\/ used to add the charm corresponding to the given URL.\n\/\/ The macaroon is properly attenuated so that it can only be used to deploy\n\/\/ the given charm URL.\nfunc (c *csClient) authorize(curl *charm.URL) (*macaroon.Macaroon, error) {\n\tclient := csclient.New(csclient.Params{\n\t\tURL: c.params.URL,\n\t\tHTTPClient: c.params.HTTPClient,\n\t\tVisitWebPage: c.params.VisitWebPage,\n\t})\n\tendpoint := \"\/delegatable-macaroon\"\n\tif curl != nil {\n\t\tquery := url.Values{}\n\t\tquery.Add(\"id\", curl.String())\n\t\tendpoint = endpoint + \"?\" + query.Encode()\n\t}\n\tvar m *macaroon.Macaroon\n\tif err := client.Get(endpoint, &m); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\treturn m, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cmd\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/minikube\/pkg\/drivers\/kic\/oci\"\n\t\"k8s.io\/minikube\/pkg\/kapi\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/browser\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/driver\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/exit\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/localpath\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/mustload\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/out\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/reason\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/service\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/style\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/tunnel\/kic\"\n)\n\nconst defaultServiceFormatTemplate = \"http:\/\/{{.IP}}:{{.Port}}\"\n\nvar (\n\tnamespace string\n\tall bool\n\thttps bool\n\tserviceURLMode bool\n\tserviceURLFormat string\n\tserviceURLTemplate *template.Template\n\twait int\n\tinterval int\n)\n\n\/\/ serviceCmd represents the service command\nvar serviceCmd = &cobra.Command{\n\tUse: \"service [flags] SERVICE\",\n\tShort: \"Returns a URL to connect to a service\",\n\tLong: `Returns the Kubernetes URL(s) for service(s) in your local cluster. In the case of multiple URLs they will be printed one at a time.`,\n\tPersistentPreRun: func(cmd *cobra.Command, args []string) {\n\t\tt, err := template.New(\"serviceURL\").Parse(serviceURLFormat)\n\t\tif err != nil {\n\t\t\texit.Error(reason.InternalFormatUsage, \"The value passed to --format is invalid\", err)\n\t\t}\n\t\tserviceURLTemplate = t\n\n\t\tRootCmd.PersistentPreRun(cmd, args)\n\t},\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) == 0 && !all || (len(args) > 0 && all) {\n\t\t\texit.Message(reason.Usage, \"You must specify service name(s) or --all\")\n\t\t}\n\n\t\tsvcArgs := make(map[string]bool)\n\t\tfor _, v := range args {\n\t\t\tsvcArgs[v] = true\n\t\t}\n\n\t\tcname := ClusterFlagValue()\n\t\tco := mustload.Healthy(cname)\n\n\t\tvar services service.URLs\n\t\tservices, err := service.GetServiceURLs(co.API, co.Config.Name, namespace, serviceURLTemplate)\n\t\tif err != nil {\n\t\t\tout.FatalT(\"Failed to get service URL: {{.error}}\", out.V{\"error\": err})\n\t\t\tout.ErrT(style.Notice, \"Check that minikube is running and that you have specified the correct namespace (-n flag) if required.\")\n\t\t\tos.Exit(reason.ExSvcUnavailable)\n\t\t}\n\n\t\tif len(args) >= 1 {\n\t\t\tvar newServices service.URLs\n\t\t\tfor _, svc := range services {\n\t\t\t\tif _, ok := svcArgs[svc.Name]; ok {\n\t\t\t\t\tnewServices = append(newServices, svc)\n\t\t\t\t}\n\t\t\t}\n\t\t\tservices = newServices\n\t\t}\n\n\t\tif len(services) == 0 {\n\t\t\texit.Message(reason.SvcNotFound, `Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n <namespace>'. Or list out all the services using 'minikube service list'`, out.V{\"service\": args[0], \"namespace\": namespace})\n\t\t}\n\n\t\tvar data [][]string\n\t\tfor _, svc := range services {\n\t\t\topenUrls, err := service.WaitForService(co.API, co.Config.Name, namespace, svc.Name, serviceURLTemplate, serviceURLMode, https, wait, interval)\n\n\t\t\tif err != nil {\n\t\t\t\tvar s *service.SVCNotFoundError\n\t\t\t\tif errors.As(err, &s) {\n\t\t\t\t\texit.Message(reason.SvcNotFound, `Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n <namespace>'. Or list out all the services using 'minikube service list'`, out.V{\"service\": svc, \"namespace\": namespace})\n\t\t\t\t}\n\t\t\t\texit.Error(reason.SvcTimeout, \"Error opening service\", err)\n\t\t\t}\n\n\t\t\tif len(openUrls) == 0 {\n\t\t\t\tdata = append(data, []string{svc.Namespace, svc.Name, \"No node port\"})\n\t\t\t} else {\n\t\t\t\tservicePortNames := strings.Join(svc.PortNames, \"\\n\")\n\t\t\t\tserviceURLs := strings.Join(openUrls, \"\\n\")\n\n\t\t\t\t\/\/ if we are running Docker on OSX we empty the internal service URLs\n\t\t\t\tif runtime.GOOS == \"darwin\" && co.Config.Driver == oci.Docker {\n\t\t\t\t\tserviceURLs = \"\"\n\t\t\t\t}\n\n\t\t\t\tdata = append(data, []string{svc.Namespace, svc.Name, servicePortNames, serviceURLs})\n\n\t\t\t\tif serviceURLMode && !driver.NeedsPortForward(co.Config.Driver) {\n\t\t\t\t\tout.String(fmt.Sprintf(\"%s\\n\", serviceURLs))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif driver.NeedsPortForward(co.Config.Driver) && driver.IsKIC(co.Config.Driver) && services != nil {\n\t\t\tstartKicServiceTunnel(services, cname, co.Config.Driver)\n\t\t} else if driver.NeedsPortForward(co.Config.Driver) && driver.IsQEMU(co.Config.Driver) && services != nil {\n\t\t\tstartQemuServiceTunnel(services, cname, co.Config.Driver)\n\t\t} else if !serviceURLMode {\n\t\t\topenURLs(data)\n\t\t}\n\t},\n}\n\nfunc init() {\n\tserviceCmd.Flags().StringVarP(&namespace, \"namespace\", \"n\", \"default\", \"The service namespace\")\n\tserviceCmd.Flags().BoolVar(&serviceURLMode, \"url\", false, \"Display the Kubernetes service URL in the CLI instead of opening it in the default browser\")\n\tserviceCmd.Flags().BoolVar(&all, \"all\", false, \"Forwards all services in a namespace (defaults to \\\"false\\\")\")\n\tserviceCmd.Flags().BoolVar(&https, \"https\", false, \"Open the service URL with https instead of http (defaults to \\\"false\\\")\")\n\tserviceCmd.Flags().IntVar(&wait, \"wait\", service.DefaultWait, \"Amount of time to wait for a service in seconds\")\n\tserviceCmd.Flags().IntVar(&interval, \"interval\", service.DefaultInterval, \"The initial time interval for each check that wait performs in seconds\")\n\n\tserviceCmd.PersistentFlags().StringVar(&serviceURLFormat, \"format\", defaultServiceFormatTemplate, \"Format to output service URL in. This format will be applied to each url individually and they will be printed one at a time.\")\n}\n\nfunc startKicServiceTunnel(services service.URLs, configName, driverName string) {\n\tctrlC := make(chan os.Signal, 1)\n\tsignal.Notify(ctrlC, os.Interrupt)\n\n\tclientset, err := kapi.Client(configName)\n\tif err != nil {\n\t\texit.Error(reason.InternalKubernetesClient, \"error creating clientset\", err)\n\t}\n\n\tvar data [][]string\n\tfor _, svc := range services {\n\t\tport, err := oci.ForwardedPort(driverName, configName, 22)\n\t\tif err != nil {\n\t\t\texit.Error(reason.DrvPortForward, \"error getting ssh port\", err)\n\t\t}\n\t\tsshPort := strconv.Itoa(port)\n\t\tsshKey := filepath.Join(localpath.MiniPath(), \"machines\", configName, \"id_rsa\")\n\n\t\tserviceTunnel := kic.NewServiceTunnel(sshPort, sshKey, clientset.CoreV1(), serviceURLMode)\n\t\turls, err := serviceTunnel.Start(svc.Name, namespace)\n\n\t\tif err != nil {\n\t\t\texit.Error(reason.SvcTunnelStart, \"error starting tunnel\", err)\n\t\t}\n\t\t\/\/ mutate response urls to HTTPS if needed\n\t\turls, err = mutateURLs(svc.Name, urls)\n\n\t\tif err != nil {\n\t\t\texit.Error(reason.SvcTunnelStart, \"error creatings urls\", err)\n\t\t}\n\n\t\tdefer serviceTunnel.Stop()\n\t\tsvc.URLs = urls\n\t\tdata = append(data, []string{namespace, svc.Name, \"\", strings.Join(urls, \"\\n\")})\n\t}\n\n\ttime.Sleep(1 * time.Second)\n\n\tif !serviceURLMode {\n\t\tservice.PrintServiceList(os.Stdout, data)\n\t} else {\n\t\tfor _, row := range data {\n\t\t\tout.String(fmt.Sprintf(\"%s\\n\", row[3]))\n\t\t}\n\t}\n\n\tif !serviceURLMode {\n\t\topenURLs(data)\n\t}\n\n\tout.WarningT(\"Because you are using a Docker driver on {{.operating_system}}, the terminal needs to be open to run it.\", out.V{\"operating_system\": runtime.GOOS})\n\n\t<-ctrlC\n}\n\nfunc startQemuServiceTunnel(services service.URLs, configName, driverName string) {\n}\n\nfunc mutateURLs(serviceName string, urls []string) ([]string, error) {\n\tformattedUrls := make([]string, 0)\n\tfor _, rawURL := range urls {\n\t\tvar doc bytes.Buffer\n\t\tparsedURL, err := url.Parse(rawURL)\n\t\tif err != nil {\n\t\t\texit.Error(reason.SvcTunnelStart, \"No valid URL found for tunnel.\", err)\n\t\t}\n\t\tport, err := strconv.Atoi(parsedURL.Port())\n\t\tif err != nil {\n\t\t\texit.Error(reason.SvcTunnelStart, \"No valid port found for tunnel.\", err)\n\t\t}\n\t\terr = serviceURLTemplate.Execute(&doc, struct {\n\t\t\tIP string\n\t\t\tPort int32\n\t\t\tName string\n\t\t}{\n\t\t\tparsedURL.Hostname(),\n\t\t\tint32(port),\n\t\t\tserviceName,\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\thttpsURL, _ := service.OptionallyHTTPSFormattedURLString(doc.String(), https)\n\t\tformattedUrls = append(formattedUrls, httpsURL)\n\t}\n\n\treturn formattedUrls, nil\n}\n\nfunc openURLs(urls [][]string) {\n\tfor _, u := range urls {\n\n\t\tif len(u) < 4 {\n\t\t\tklog.Warning(\"No URL found\")\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err := url.Parse(u[3])\n\t\tif err != nil {\n\t\t\tklog.Warningf(\"failed to parse url %q: %v (will not open)\", u[3], err)\n\t\t\tout.String(fmt.Sprintf(\"%s\\n\", u))\n\t\t\tcontinue\n\t\t}\n\n\t\tif serviceURLMode {\n\t\t\tout.String(fmt.Sprintf(\"%s\\n\", u))\n\t\t\tcontinue\n\t\t}\n\n\t\tout.Styled(style.Celebrate, \"Opening service {{.namespace_name}}\/{{.service_name}} in default browser...\", out.V{\"namespace_name\": namespace, \"service_name\": u[1]})\n\t\tif err := browser.OpenURL(u[3]); err != nil {\n\t\t\texit.Error(reason.HostBrowser, fmt.Sprintf(\"open url failed: %s\", u), err)\n\t\t}\n\t}\n}\n<commit_msg>revert changes<commit_after>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cmd\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/minikube\/pkg\/drivers\/kic\/oci\"\n\t\"k8s.io\/minikube\/pkg\/kapi\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/browser\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/driver\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/exit\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/localpath\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/mustload\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/out\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/reason\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/service\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/style\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/tunnel\/kic\"\n)\n\nconst defaultServiceFormatTemplate = \"http:\/\/{{.IP}}:{{.Port}}\"\n\nvar (\n\tnamespace string\n\tall bool\n\thttps bool\n\tserviceURLMode bool\n\tserviceURLFormat string\n\tserviceURLTemplate *template.Template\n\twait int\n\tinterval int\n)\n\n\/\/ serviceCmd represents the service command\nvar serviceCmd = &cobra.Command{\n\tUse: \"service [flags] SERVICE\",\n\tShort: \"Returns a URL to connect to a service\",\n\tLong: `Returns the Kubernetes URL(s) for service(s) in your local cluster. In the case of multiple URLs they will be printed one at a time.`,\n\tPersistentPreRun: func(cmd *cobra.Command, args []string) {\n\t\tt, err := template.New(\"serviceURL\").Parse(serviceURLFormat)\n\t\tif err != nil {\n\t\t\texit.Error(reason.InternalFormatUsage, \"The value passed to --format is invalid\", err)\n\t\t}\n\t\tserviceURLTemplate = t\n\n\t\tRootCmd.PersistentPreRun(cmd, args)\n\t},\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) == 0 && !all || (len(args) > 0 && all) {\n\t\t\texit.Message(reason.Usage, \"You must specify service name(s) or --all\")\n\t\t}\n\n\t\tsvcArgs := make(map[string]bool)\n\t\tfor _, v := range args {\n\t\t\tsvcArgs[v] = true\n\t\t}\n\n\t\tcname := ClusterFlagValue()\n\t\tco := mustload.Healthy(cname)\n\n\t\t\/\/ Bail cleanly for qemu2 until implemented\n\t\tif driver.IsQEMU(co.Config.Driver) {\n\t\t\texit.Message(reason.Unimplemented, \"minikube service is not currently implemented with the qemu2 driver. See https:\/\/github.com\/kubernetes\/minikube\/issues\/14146 for details.\")\n\t\t}\n\n\t\tvar services service.URLs\n\t\tservices, err := service.GetServiceURLs(co.API, co.Config.Name, namespace, serviceURLTemplate)\n\t\tif err != nil {\n\t\t\tout.FatalT(\"Failed to get service URL: {{.error}}\", out.V{\"error\": err})\n\t\t\tout.ErrT(style.Notice, \"Check that minikube is running and that you have specified the correct namespace (-n flag) if required.\")\n\t\t\tos.Exit(reason.ExSvcUnavailable)\n\t\t}\n\n\t\tif len(args) >= 1 {\n\t\t\tvar newServices service.URLs\n\t\t\tfor _, svc := range services {\n\t\t\t\tif _, ok := svcArgs[svc.Name]; ok {\n\t\t\t\t\tnewServices = append(newServices, svc)\n\t\t\t\t}\n\t\t\t}\n\t\t\tservices = newServices\n\t\t}\n\n\t\tif len(services) == 0 {\n\t\t\texit.Message(reason.SvcNotFound, `Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n <namespace>'. Or list out all the services using 'minikube service list'`, out.V{\"service\": args[0], \"namespace\": namespace})\n\t\t}\n\n\t\tvar data [][]string\n\t\tfor _, svc := range services {\n\t\t\topenUrls, err := service.WaitForService(co.API, co.Config.Name, namespace, svc.Name, serviceURLTemplate, serviceURLMode, https, wait, interval)\n\n\t\t\tif err != nil {\n\t\t\t\tvar s *service.SVCNotFoundError\n\t\t\t\tif errors.As(err, &s) {\n\t\t\t\t\texit.Message(reason.SvcNotFound, `Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n <namespace>'. Or list out all the services using 'minikube service list'`, out.V{\"service\": svc, \"namespace\": namespace})\n\t\t\t\t}\n\t\t\t\texit.Error(reason.SvcTimeout, \"Error opening service\", err)\n\t\t\t}\n\n\t\t\tif len(openUrls) == 0 {\n\t\t\t\tdata = append(data, []string{svc.Namespace, svc.Name, \"No node port\"})\n\t\t\t} else {\n\t\t\t\tservicePortNames := strings.Join(svc.PortNames, \"\\n\")\n\t\t\t\tserviceURLs := strings.Join(openUrls, \"\\n\")\n\n\t\t\t\t\/\/ if we are running Docker on OSX we empty the internal service URLs\n\t\t\t\tif runtime.GOOS == \"darwin\" && co.Config.Driver == oci.Docker {\n\t\t\t\t\tserviceURLs = \"\"\n\t\t\t\t}\n\n\t\t\t\tdata = append(data, []string{svc.Namespace, svc.Name, servicePortNames, serviceURLs})\n\n\t\t\t\tif serviceURLMode && !driver.NeedsPortForward(co.Config.Driver) {\n\t\t\t\t\tout.String(fmt.Sprintf(\"%s\\n\", serviceURLs))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif driver.NeedsPortForward(co.Config.Driver) && driver.IsKIC(co.Config.Driver) && services != nil {\n\t\t\tstartKicServiceTunnel(services, cname, co.Config.Driver)\n\t\t} else if driver.NeedsPortForward(co.Config.Driver) && driver.IsQEMU(co.Config.Driver) && services != nil {\n\t\t\tstartQemuServiceTunnel(services, cname, co.Config.Driver)\n\t\t} else if !serviceURLMode {\n\t\t\topenURLs(data)\n\t\t}\n\t},\n}\n\nfunc init() {\n\tserviceCmd.Flags().StringVarP(&namespace, \"namespace\", \"n\", \"default\", \"The service namespace\")\n\tserviceCmd.Flags().BoolVar(&serviceURLMode, \"url\", false, \"Display the Kubernetes service URL in the CLI instead of opening it in the default browser\")\n\tserviceCmd.Flags().BoolVar(&all, \"all\", false, \"Forwards all services in a namespace (defaults to \\\"false\\\")\")\n\tserviceCmd.Flags().BoolVar(&https, \"https\", false, \"Open the service URL with https instead of http (defaults to \\\"false\\\")\")\n\tserviceCmd.Flags().IntVar(&wait, \"wait\", service.DefaultWait, \"Amount of time to wait for a service in seconds\")\n\tserviceCmd.Flags().IntVar(&interval, \"interval\", service.DefaultInterval, \"The initial time interval for each check that wait performs in seconds\")\n\n\tserviceCmd.PersistentFlags().StringVar(&serviceURLFormat, \"format\", defaultServiceFormatTemplate, \"Format to output service URL in. This format will be applied to each url individually and they will be printed one at a time.\")\n}\n\nfunc startKicServiceTunnel(services service.URLs, configName, driverName string) {\n\tctrlC := make(chan os.Signal, 1)\n\tsignal.Notify(ctrlC, os.Interrupt)\n\n\tclientset, err := kapi.Client(configName)\n\tif err != nil {\n\t\texit.Error(reason.InternalKubernetesClient, \"error creating clientset\", err)\n\t}\n\n\tvar data [][]string\n\tfor _, svc := range services {\n\t\tport, err := oci.ForwardedPort(driverName, configName, 22)\n\t\tif err != nil {\n\t\t\texit.Error(reason.DrvPortForward, \"error getting ssh port\", err)\n\t\t}\n\t\tsshPort := strconv.Itoa(port)\n\t\tsshKey := filepath.Join(localpath.MiniPath(), \"machines\", configName, \"id_rsa\")\n\n\t\tserviceTunnel := kic.NewServiceTunnel(sshPort, sshKey, clientset.CoreV1(), serviceURLMode)\n\t\turls, err := serviceTunnel.Start(svc.Name, namespace)\n\n\t\tif err != nil {\n\t\t\texit.Error(reason.SvcTunnelStart, \"error starting tunnel\", err)\n\t\t}\n\t\t\/\/ mutate response urls to HTTPS if needed\n\t\turls, err = mutateURLs(svc.Name, urls)\n\n\t\tif err != nil {\n\t\t\texit.Error(reason.SvcTunnelStart, \"error creatings urls\", err)\n\t\t}\n\n\t\tdefer serviceTunnel.Stop()\n\t\tsvc.URLs = urls\n\t\tdata = append(data, []string{namespace, svc.Name, \"\", strings.Join(urls, \"\\n\")})\n\t}\n\n\ttime.Sleep(1 * time.Second)\n\n\tif !serviceURLMode {\n\t\tservice.PrintServiceList(os.Stdout, data)\n\t} else {\n\t\tfor _, row := range data {\n\t\t\tout.String(fmt.Sprintf(\"%s\\n\", row[3]))\n\t\t}\n\t}\n\n\tif !serviceURLMode {\n\t\topenURLs(data)\n\t}\n\n\tout.WarningT(\"Because you are using a Docker driver on {{.operating_system}}, the terminal needs to be open to run it.\", out.V{\"operating_system\": runtime.GOOS})\n\n\t<-ctrlC\n}\n\nfunc startQemuServiceTunnel(services service.URLs, configName, driverName string) {\n}\n\nfunc mutateURLs(serviceName string, urls []string) ([]string, error) {\n\tformattedUrls := make([]string, 0)\n\tfor _, rawURL := range urls {\n\t\tvar doc bytes.Buffer\n\t\tparsedURL, err := url.Parse(rawURL)\n\t\tif err != nil {\n\t\t\texit.Error(reason.SvcTunnelStart, \"No valid URL found for tunnel.\", err)\n\t\t}\n\t\tport, err := strconv.Atoi(parsedURL.Port())\n\t\tif err != nil {\n\t\t\texit.Error(reason.SvcTunnelStart, \"No valid port found for tunnel.\", err)\n\t\t}\n\t\terr = serviceURLTemplate.Execute(&doc, struct {\n\t\t\tIP string\n\t\t\tPort int32\n\t\t\tName string\n\t\t}{\n\t\t\tparsedURL.Hostname(),\n\t\t\tint32(port),\n\t\t\tserviceName,\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\thttpsURL, _ := service.OptionallyHTTPSFormattedURLString(doc.String(), https)\n\t\tformattedUrls = append(formattedUrls, httpsURL)\n\t}\n\n\treturn formattedUrls, nil\n}\n\nfunc openURLs(urls [][]string) {\n\tfor _, u := range urls {\n\n\t\tif len(u) < 4 {\n\t\t\tklog.Warning(\"No URL found\")\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err := url.Parse(u[3])\n\t\tif err != nil {\n\t\t\tklog.Warningf(\"failed to parse url %q: %v (will not open)\", u[3], err)\n\t\t\tout.String(fmt.Sprintf(\"%s\\n\", u))\n\t\t\tcontinue\n\t\t}\n\n\t\tif serviceURLMode {\n\t\t\tout.String(fmt.Sprintf(\"%s\\n\", u))\n\t\t\tcontinue\n\t\t}\n\n\t\tout.Styled(style.Celebrate, \"Opening service {{.namespace_name}}\/{{.service_name}} in default browser...\", out.V{\"namespace_name\": namespace, \"service_name\": u[1]})\n\t\tif err := browser.OpenURL(u[3]); err != nil {\n\t\t\texit.Error(reason.HostBrowser, fmt.Sprintf(\"open url failed: %s\", u), err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"go.uber.org\/atomic\"\n\n\tnetwork \"knative.dev\/networking\/pkg\"\n\t\"knative.dev\/serving\/pkg\/queue\/readiness\"\n)\n\nfunc TestProbeQueueInvalidPort(t *testing.T) {\n\tt.Cleanup(func() { os.Unsetenv(queuePortEnvVar) })\n\tfor _, port := range []string{\"-1\", \"0\", \"66000\"} {\n\t\tos.Setenv(queuePortEnvVar, port)\n\t\tif rv := standaloneProbeMain(1, http.DefaultTransport); rv != 1 {\n\t\t\tt.Error(\"Unexpected return code\", rv)\n\t\t}\n\t}\n}\n\nfunc TestProbeQueueConnectionFailure(t *testing.T) {\n\tif err := probeQueueHealthPath(1, 12345, http.DefaultTransport); err == nil {\n\t\tt.Error(\"Expected error, got nil\")\n\t}\n}\n\nfunc TestProbeQueueNotReady(t *testing.T) {\n\tvar probed atomic.Bool\n\tport := newProbeTestServer(t, func(w http.ResponseWriter, _ *http.Request) {\n\t\tprobed.Store(true)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t})\n\n\terr := probeQueueHealthPath(100*time.Millisecond, port, http.DefaultTransport)\n\n\tif err == nil || err.Error() != \"probe returned not ready\" {\n\t\tt.Error(\"Unexpected not ready error:\", err)\n\t}\n\n\tif !probed.Load() {\n\t\tt.Error(\"Expected the queue proxy server to be probed\")\n\t}\n}\n\nfunc TestProbeShuttingDown(t *testing.T) {\n\tvar probed atomic.Bool\n\tport := newProbeTestServer(t, func(w http.ResponseWriter, _ *http.Request) {\n\t\tprobed.Store(true)\n\t\tw.WriteHeader(http.StatusGone)\n\t})\n\n\terr := probeQueueHealthPath(time.Second, port, http.DefaultTransport)\n\n\tif err == nil || err.Error() != \"failed to probe: failing probe deliberately for shutdown\" {\n\t\tt.Error(\"Unexpected error:\", err)\n\t}\n\n\tif !probed.Load() {\n\t\tt.Error(\"Expected the queue proxy server to be probed\")\n\t}\n}\n\nfunc TestProbeQueueShuttingDownFailsFast(t *testing.T) {\n\tport := newProbeTestServer(t, func(w http.ResponseWriter, _ *http.Request) {\n\t\tw.WriteHeader(http.StatusGone)\n\t})\n\n\tstart := time.Now()\n\tif err := probeQueueHealthPath(1, port, http.DefaultTransport); err == nil {\n\t\tt.Error(\"probeQueueHealthPath did not fail\")\n\t}\n\n\t\/\/ if fails due to timeout and not cancelation, then it took too long.\n\tif time.Since(start) >= 1*time.Second {\n\t\tt.Error(\"took too long to fail\")\n\t}\n}\n\nfunc TestProbeQueueReady(t *testing.T) {\n\tvar probed atomic.Bool\n\tport := newProbeTestServer(t, func(w http.ResponseWriter, _ *http.Request) {\n\t\tprobed.Store(true)\n\t\tw.WriteHeader(http.StatusOK)\n\t})\n\n\tt.Cleanup(func() { os.Unsetenv(queuePortEnvVar) })\n\tos.Setenv(queuePortEnvVar, strconv.Itoa(port))\n\n\tif rv := standaloneProbeMain(0 \/*use default*\/, nil); rv != 0 {\n\t\tt.Error(\"Unexpected return value from standaloneProbeMain:\", rv)\n\t}\n\n\tif !probed.Load() {\n\t\tt.Error(\"Expected the queue proxy server to be probed\")\n\t}\n}\n\nfunc TestProbeQueueTimeout(t *testing.T) {\n\tvar probed atomic.Bool\n\tport := newProbeTestServer(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tprobed.Store(true)\n\n\t\tselect {\n\t\tcase <-time.After(time.Second):\n\t\tcase <-r.Context().Done():\n\t\t}\n\n\t\tw.WriteHeader(http.StatusOK)\n\t})\n\n\tt.Cleanup(func() { os.Unsetenv(queuePortEnvVar) })\n\tos.Setenv(queuePortEnvVar, strconv.Itoa(port))\n\n\tif rv := standaloneProbeMain(300*time.Millisecond, nil); rv == 0 {\n\t\tt.Error(\"Unexpected return value from standaloneProbeMain:\", rv)\n\t}\n\n\tif !probed.Load() {\n\t\tt.Error(\"Expected the queue proxy server to be probed\")\n\t}\n}\n\nfunc TestProbeQueueDelayedReady(t *testing.T) {\n\tvar count atomic.Int64\n\tport := newProbeTestServer(t, func(w http.ResponseWriter, _ *http.Request) {\n\t\tif count.Inc() < 3 {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusOK)\n\t})\n\n\tif err := probeQueueHealthPath(readiness.PollTimeout, port, http.DefaultTransport); err != nil {\n\t\tt.Errorf(\"probeQueueHealthPath(%d) = %s\", port, err)\n\t}\n}\n\nfunc newProbeTestServer(t *testing.T, f func(w http.ResponseWriter, r *http.Request)) (port int) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Header.Get(network.UserAgentKey) == network.QueueProxyUserAgent {\n\t\t\tf(w, r)\n\t\t}\n\t}))\n\tt.Cleanup(ts.Close)\n\n\tu, err := url.Parse(ts.URL)\n\tif err != nil {\n\t\tt.Fatalf(\"%s is not a valid URL: %v\", ts.URL, err)\n\t}\n\n\tport, err = strconv.Atoi(u.Port())\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to convert port(%s) to int: %v\", u.Port(), err)\n\t}\n\n\treturn port\n}\n<commit_msg>Bump request timeout in TestProbeQueueNotReady to fix flake (#11050)<commit_after>\/*\nCopyright 2020 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"go.uber.org\/atomic\"\n\n\tnetwork \"knative.dev\/networking\/pkg\"\n\t\"knative.dev\/serving\/pkg\/queue\/readiness\"\n)\n\nfunc TestProbeQueueInvalidPort(t *testing.T) {\n\tt.Cleanup(func() { os.Unsetenv(queuePortEnvVar) })\n\tfor _, port := range []string{\"-1\", \"0\", \"66000\"} {\n\t\tos.Setenv(queuePortEnvVar, port)\n\t\tif rv := standaloneProbeMain(1, http.DefaultTransport); rv != 1 {\n\t\t\tt.Error(\"Unexpected return code\", rv)\n\t\t}\n\t}\n}\n\nfunc TestProbeQueueConnectionFailure(t *testing.T) {\n\tif err := probeQueueHealthPath(1, 12345, http.DefaultTransport); err == nil {\n\t\tt.Error(\"Expected error, got nil\")\n\t}\n}\n\nfunc TestProbeQueueNotReady(t *testing.T) {\n\tvar probed atomic.Bool\n\tport := newProbeTestServer(t, func(w http.ResponseWriter, _ *http.Request) {\n\t\tprobed.Store(true)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t})\n\n\terr := probeQueueHealthPath(300*time.Millisecond, port, http.DefaultTransport)\n\n\tif err == nil || err.Error() != \"probe returned not ready\" {\n\t\tt.Error(\"Unexpected not ready error:\", err)\n\t}\n\n\tif !probed.Load() {\n\t\tt.Error(\"Expected the queue proxy server to be probed\")\n\t}\n}\n\nfunc TestProbeShuttingDown(t *testing.T) {\n\tvar probed atomic.Bool\n\tport := newProbeTestServer(t, func(w http.ResponseWriter, _ *http.Request) {\n\t\tprobed.Store(true)\n\t\tw.WriteHeader(http.StatusGone)\n\t})\n\n\terr := probeQueueHealthPath(time.Second, port, http.DefaultTransport)\n\n\tif err == nil || err.Error() != \"failed to probe: failing probe deliberately for shutdown\" {\n\t\tt.Error(\"Unexpected error:\", err)\n\t}\n\n\tif !probed.Load() {\n\t\tt.Error(\"Expected the queue proxy server to be probed\")\n\t}\n}\n\nfunc TestProbeQueueShuttingDownFailsFast(t *testing.T) {\n\tport := newProbeTestServer(t, func(w http.ResponseWriter, _ *http.Request) {\n\t\tw.WriteHeader(http.StatusGone)\n\t})\n\n\tstart := time.Now()\n\tif err := probeQueueHealthPath(1, port, http.DefaultTransport); err == nil {\n\t\tt.Error(\"probeQueueHealthPath did not fail\")\n\t}\n\n\t\/\/ if fails due to timeout and not cancelation, then it took too long.\n\tif time.Since(start) >= 1*time.Second {\n\t\tt.Error(\"took too long to fail\")\n\t}\n}\n\nfunc TestProbeQueueReady(t *testing.T) {\n\tvar probed atomic.Bool\n\tport := newProbeTestServer(t, func(w http.ResponseWriter, _ *http.Request) {\n\t\tprobed.Store(true)\n\t\tw.WriteHeader(http.StatusOK)\n\t})\n\n\tt.Cleanup(func() { os.Unsetenv(queuePortEnvVar) })\n\tos.Setenv(queuePortEnvVar, strconv.Itoa(port))\n\n\tif rv := standaloneProbeMain(0 \/*use default*\/, nil); rv != 0 {\n\t\tt.Error(\"Unexpected return value from standaloneProbeMain:\", rv)\n\t}\n\n\tif !probed.Load() {\n\t\tt.Error(\"Expected the queue proxy server to be probed\")\n\t}\n}\n\nfunc TestProbeQueueTimeout(t *testing.T) {\n\tvar probed atomic.Bool\n\tport := newProbeTestServer(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tprobed.Store(true)\n\n\t\tselect {\n\t\tcase <-time.After(time.Second):\n\t\tcase <-r.Context().Done():\n\t\t}\n\n\t\tw.WriteHeader(http.StatusOK)\n\t})\n\n\tt.Cleanup(func() { os.Unsetenv(queuePortEnvVar) })\n\tos.Setenv(queuePortEnvVar, strconv.Itoa(port))\n\n\tif rv := standaloneProbeMain(300*time.Millisecond, nil); rv == 0 {\n\t\tt.Error(\"Unexpected return value from standaloneProbeMain:\", rv)\n\t}\n\n\tif !probed.Load() {\n\t\tt.Error(\"Expected the queue proxy server to be probed\")\n\t}\n}\n\nfunc TestProbeQueueDelayedReady(t *testing.T) {\n\tvar count atomic.Int64\n\tport := newProbeTestServer(t, func(w http.ResponseWriter, _ *http.Request) {\n\t\tif count.Inc() < 3 {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusOK)\n\t})\n\n\tif err := probeQueueHealthPath(readiness.PollTimeout, port, http.DefaultTransport); err != nil {\n\t\tt.Errorf(\"probeQueueHealthPath(%d) = %s\", port, err)\n\t}\n}\n\nfunc newProbeTestServer(t *testing.T, f func(w http.ResponseWriter, r *http.Request)) (port int) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Header.Get(network.UserAgentKey) == network.QueueProxyUserAgent {\n\t\t\tf(w, r)\n\t\t}\n\t}))\n\tt.Cleanup(ts.Close)\n\n\tu, err := url.Parse(ts.URL)\n\tif err != nil {\n\t\tt.Fatalf(\"%s is not a valid URL: %v\", ts.URL, err)\n\t}\n\n\tport, err = strconv.Atoi(u.Port())\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to convert port(%s) to int: %v\", u.Port(), err)\n\t}\n\n\treturn port\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"database\/sql\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/brandur\/sorg\"\n\t_ \"github.com\/brandur\/sorg\/testing\"\n\t_ \"github.com\/lib\/pq\"\n\tassert \"github.com\/stretchr\/testify\/require\"\n)\n\nvar db *sql.DB\n\nfunc init() {\n\terr := sorg.CreateTargetDirs()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdb, err = sql.Open(\"postgres\", \"postgres:\/\/localhost\/sorg-test?sslmode=disable\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc TestCompileJavascripts(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"javascripts\")\n\n\tfile1 := dir + \"\/file1.js\"\n\tfile2 := dir + \"\/file2.js\"\n\tfile3 := dir + \"\/file3.js\"\n\tout := dir + \"\/app.js\"\n\n\terr = ioutil.WriteFile(file1, []byte(`function() { return \"file1\" }`), 0755)\n\tassert.NoError(t, err)\n\n\terr = ioutil.WriteFile(file2, []byte(`function() { return \"file2\" }`), 0755)\n\tassert.NoError(t, err)\n\n\terr = ioutil.WriteFile(file3, []byte(`function() { return \"file3\" }`), 0755)\n\tassert.NoError(t, err)\n\n\terr = compileJavascripts([]string{file1, file2, file3}, out)\n\tassert.NoError(t, err)\n\n\tactual, err := ioutil.ReadFile(out)\n\tassert.NoError(t, err)\n\n\texpected := `\/* file1.js *\/\n\n(function() {\n\nfunction() { return \"file1\" }\n\n}).call(this);\n\n\/* file2.js *\/\n\n(function() {\n\nfunction() { return \"file2\" }\n\n}).call(this);\n\n\/* file3.js *\/\n\n(function() {\n\nfunction() { return \"file3\" }\n\n}).call(this);\n\n`\n\tassert.Equal(t, expected, string(actual))\n}\n\nfunc TestCompilePhotos(t *testing.T) {\n\t\/\/\n\t\/\/ No database\n\t\/\/\n\n\tphotos, err := compilePhotos(nil)\n\tassert.NoError(t, err)\n\tassert.Equal(t, []*Photo(nil), photos)\n\n\t\/\/\n\t\/\/ With empty database\n\t\/\/\n\n\tphotos, err = compilePhotos(db)\n\tassert.NoError(t, err)\n\tassert.Equal(t, []*Photo(nil), photos)\n\n\t\/\/\n\t\/\/ With results\n\t\/\/\n\n\t\/\/ TODO: insert photos\n\t\/\/photos, err = compilePhotos(db)\n\t\/\/assert.NoError(t, err)\n}\n\nfunc TestCompileReading(t *testing.T) {\n\t\/\/\n\t\/\/ No database\n\t\/\/\n\n\terr := compileReading(nil)\n\tassert.NoError(t, err)\n\n\t\/\/\n\t\/\/ With empty database\n\t\/\/\n\n\terr = compileReading(db)\n\tassert.NoError(t, err)\n\n\t\/\/\n\t\/\/ With results\n\t\/\/\n\n\t\/\/ TODO: insert reading\n\t\/\/err = compileReading(db)\n\t\/\/assert.NoError(t, err)\n}\n\nfunc TestCompileRuns(t *testing.T) {\n\t\/\/\n\t\/\/ No database\n\t\/\/\n\n\terr := compileRuns(nil)\n\tassert.NoError(t, err)\n\n\t\/\/\n\t\/\/ With empty database\n\t\/\/\n\n\terr = compileRuns(db)\n\tassert.NoError(t, err)\n\n\t\/\/\n\t\/\/ With results\n\t\/\/\n\n\t\/\/ TODO: insert runs\n\t\/\/err = compileRuns(db)\n\t\/\/assert.NoError(t, err)\n}\n\nfunc TestCompileStylesheets(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"stylesheets\")\n\n\tfile1 := dir + \"\/file1.sass\"\n\tfile2 := dir + \"\/file2.sass\"\n\tfile3 := dir + \"\/file3.css\"\n\tout := dir + \"\/app.css\"\n\n\t\/\/ The syntax of the first and second files is GCSS and the third is in\n\t\/\/ CSS.\n\terr = ioutil.WriteFile(file1, []byte(\"p\\n margin: 10px\"), 0755)\n\tassert.NoError(t, err)\n\n\terr = ioutil.WriteFile(file2, []byte(\"p\\n padding: 10px\"), 0755)\n\tassert.NoError(t, err)\n\n\terr = ioutil.WriteFile(file3, []byte(\"p {\\n border: 10px;\\n}\"), 0755)\n\tassert.NoError(t, err)\n\n\terr = compileStylesheets([]string{file1, file2, file3}, out)\n\tassert.NoError(t, err)\n\n\tactual, err := ioutil.ReadFile(out)\n\tassert.NoError(t, err)\n\n\t\/\/ Note that the first two files have no spacing in the output because they\n\t\/\/ go through the GCSS compiler.\n\texpected := `\/* file1.sass *\/\n\np{margin:10px;}\n\n\/* file2.sass *\/\n\np{padding:10px;}\n\n\/* file3.css *\/\n\np {\n border: 10px;\n}\n\n`\n\tassert.Equal(t, expected, string(actual))\n}\n\nfunc TestCompileTwitter(t *testing.T) {\n\t\/\/\n\t\/\/ No database\n\t\/\/\n\n\terr := compileTwitter(nil)\n\tassert.NoError(t, err)\n\n\t\/\/\n\t\/\/ With empty database\n\t\/\/\n\n\terr = compileTwitter(db)\n\tassert.NoError(t, err)\n\n\t\/\/\n\t\/\/ With results\n\t\/\/\n\n\tnow := time.Now()\n\ttweet := &Tweet{\n\t\tContent: \"Hello, world!\",\n\t\tOccurredAt: &now,\n\t\tSlug: \"1234\",\n\t}\n\tinsertTweet(t, tweet, false)\n\n\terr = compileTwitter(db)\n\tassert.NoError(t, err)\n}\n\nfunc TestEnsureSymlink(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"symlink\")\n\tassert.NoError(t, err)\n\n\tsource := path.Join(dir, \"source\")\n\terr = ioutil.WriteFile(source, []byte(\"source\"), 0755)\n\tassert.NoError(t, err)\n\n\tdest := path.Join(dir, \"symlink-dest\")\n\n\t\/\/\n\t\/\/ Case 1: Symlink does not exist\n\t\/\/\n\n\terr = ensureSymlink(source, dest)\n\tassert.NoError(t, err)\n\n\tactual, err := os.Readlink(dest)\n\tassert.Equal(t, source, actual)\n\n\t\/\/\n\t\/\/ Case 2: Symlink does exist\n\t\/\/\n\t\/\/ Consists solely of re-running the previous test case.\n\t\/\/\n\n\terr = ensureSymlink(source, dest)\n\tassert.NoError(t, err)\n\n\tactual, err = os.Readlink(dest)\n\tassert.Equal(t, source, actual)\n\n\t\/\/\n\t\/\/ Case 3: Symlink file exists, but source doesn't\n\t\/\/\n\n\terr = os.RemoveAll(dest)\n\tassert.NoError(t, err)\n\n\tsource = path.Join(dir, \"source\")\n\terr = ioutil.WriteFile(source, []byte(\"source\"), 0755)\n\tassert.NoError(t, err)\n\n\terr = ensureSymlink(source, dest)\n\tassert.NoError(t, err)\n\n\tactual, err = os.Readlink(dest)\n\tassert.Equal(t, source, actual)\n}\n\nfunc TestGetLocals(t *testing.T) {\n\tlocals := getLocals(\"Title\", map[string]interface{}{\n\t\t\"Foo\": \"Bar\",\n\t})\n\n\tassert.Equal(t, \"Bar\", locals[\"Foo\"])\n\tassert.Equal(t, sorg.Release, locals[\"Release\"])\n\tassert.Equal(t, \"Title\", locals[\"Title\"])\n}\n\nfunc TestIsHidden(t *testing.T) {\n\tassert.Equal(t, true, isHidden(\".gitkeep\"))\n\tassert.Equal(t, false, isHidden(\"article\"))\n}\n\nfunc TestSplitFrontmatter(t *testing.T) {\n\tfrontmatter, content, err := splitFrontmatter(`---\nfoo: bar\n---\n\nother`)\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"foo: bar\", frontmatter)\n\tassert.Equal(t, \"other\", content)\n\n\tfrontmatter, content, err = splitFrontmatter(`other`)\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"\", frontmatter)\n\tassert.Equal(t, \"other\", content)\n\n\tfrontmatter, content, err = splitFrontmatter(`---\nfoo: bar\n---\n`)\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"foo: bar\", frontmatter)\n\tassert.Equal(t, \"\", content)\n\n\tfrontmatter, content, err = splitFrontmatter(`foo: bar\n---\n`)\n\tassert.Equal(t, errBadFrontmatter, err)\n}\n\nfunc insertTweet(t *testing.T, tweet *Tweet, reply bool) {\n\t_, err := db.Exec(`\n\t\tINSERT INTO events\n\t\t\t(content, occurred_at, metadata, slug, type)\n\t\tVALUES\n\t\t\t($1, $2, hstore('reply', $3), $4, $5)\n\t`, tweet.Content, tweet.OccurredAt, reply, tweet.Slug, \"twitter\")\n\tassert.NoError(t, err)\n}\n<commit_msg>Fix test involing output directory<commit_after>package main\n\nimport (\n\t\"database\/sql\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/brandur\/sorg\"\n\t_ \"github.com\/brandur\/sorg\/testing\"\n\t_ \"github.com\/lib\/pq\"\n\tassert \"github.com\/stretchr\/testify\/require\"\n)\n\nvar db *sql.DB\n\nfunc init() {\n\tconf.TargetDir = \".\/public\"\n\terr := sorg.CreateOutputDirs(conf.TargetDir)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdb, err = sql.Open(\"postgres\", \"postgres:\/\/localhost\/sorg-test?sslmode=disable\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc TestCompileJavascripts(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"javascripts\")\n\n\tfile1 := dir + \"\/file1.js\"\n\tfile2 := dir + \"\/file2.js\"\n\tfile3 := dir + \"\/file3.js\"\n\tout := dir + \"\/app.js\"\n\n\terr = ioutil.WriteFile(file1, []byte(`function() { return \"file1\" }`), 0755)\n\tassert.NoError(t, err)\n\n\terr = ioutil.WriteFile(file2, []byte(`function() { return \"file2\" }`), 0755)\n\tassert.NoError(t, err)\n\n\terr = ioutil.WriteFile(file3, []byte(`function() { return \"file3\" }`), 0755)\n\tassert.NoError(t, err)\n\n\terr = compileJavascripts([]string{file1, file2, file3}, out)\n\tassert.NoError(t, err)\n\n\tactual, err := ioutil.ReadFile(out)\n\tassert.NoError(t, err)\n\n\texpected := `\/* file1.js *\/\n\n(function() {\n\nfunction() { return \"file1\" }\n\n}).call(this);\n\n\/* file2.js *\/\n\n(function() {\n\nfunction() { return \"file2\" }\n\n}).call(this);\n\n\/* file3.js *\/\n\n(function() {\n\nfunction() { return \"file3\" }\n\n}).call(this);\n\n`\n\tassert.Equal(t, expected, string(actual))\n}\n\nfunc TestCompilePhotos(t *testing.T) {\n\t\/\/\n\t\/\/ No database\n\t\/\/\n\n\tphotos, err := compilePhotos(nil)\n\tassert.NoError(t, err)\n\tassert.Equal(t, []*Photo(nil), photos)\n\n\t\/\/\n\t\/\/ With empty database\n\t\/\/\n\n\tphotos, err = compilePhotos(db)\n\tassert.NoError(t, err)\n\tassert.Equal(t, []*Photo(nil), photos)\n\n\t\/\/\n\t\/\/ With results\n\t\/\/\n\n\t\/\/ TODO: insert photos\n\t\/\/photos, err = compilePhotos(db)\n\t\/\/assert.NoError(t, err)\n}\n\nfunc TestCompileReading(t *testing.T) {\n\t\/\/\n\t\/\/ No database\n\t\/\/\n\n\terr := compileReading(nil)\n\tassert.NoError(t, err)\n\n\t\/\/\n\t\/\/ With empty database\n\t\/\/\n\n\terr = compileReading(db)\n\tassert.NoError(t, err)\n\n\t\/\/\n\t\/\/ With results\n\t\/\/\n\n\t\/\/ TODO: insert reading\n\t\/\/err = compileReading(db)\n\t\/\/assert.NoError(t, err)\n}\n\nfunc TestCompileRuns(t *testing.T) {\n\t\/\/\n\t\/\/ No database\n\t\/\/\n\n\terr := compileRuns(nil)\n\tassert.NoError(t, err)\n\n\t\/\/\n\t\/\/ With empty database\n\t\/\/\n\n\terr = compileRuns(db)\n\tassert.NoError(t, err)\n\n\t\/\/\n\t\/\/ With results\n\t\/\/\n\n\t\/\/ TODO: insert runs\n\t\/\/err = compileRuns(db)\n\t\/\/assert.NoError(t, err)\n}\n\nfunc TestCompileStylesheets(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"stylesheets\")\n\n\tfile1 := dir + \"\/file1.sass\"\n\tfile2 := dir + \"\/file2.sass\"\n\tfile3 := dir + \"\/file3.css\"\n\tout := dir + \"\/app.css\"\n\n\t\/\/ The syntax of the first and second files is GCSS and the third is in\n\t\/\/ CSS.\n\terr = ioutil.WriteFile(file1, []byte(\"p\\n margin: 10px\"), 0755)\n\tassert.NoError(t, err)\n\n\terr = ioutil.WriteFile(file2, []byte(\"p\\n padding: 10px\"), 0755)\n\tassert.NoError(t, err)\n\n\terr = ioutil.WriteFile(file3, []byte(\"p {\\n border: 10px;\\n}\"), 0755)\n\tassert.NoError(t, err)\n\n\terr = compileStylesheets([]string{file1, file2, file3}, out)\n\tassert.NoError(t, err)\n\n\tactual, err := ioutil.ReadFile(out)\n\tassert.NoError(t, err)\n\n\t\/\/ Note that the first two files have no spacing in the output because they\n\t\/\/ go through the GCSS compiler.\n\texpected := `\/* file1.sass *\/\n\np{margin:10px;}\n\n\/* file2.sass *\/\n\np{padding:10px;}\n\n\/* file3.css *\/\n\np {\n border: 10px;\n}\n\n`\n\tassert.Equal(t, expected, string(actual))\n}\n\nfunc TestCompileTwitter(t *testing.T) {\n\t\/\/\n\t\/\/ No database\n\t\/\/\n\n\terr := compileTwitter(nil)\n\tassert.NoError(t, err)\n\n\t\/\/\n\t\/\/ With empty database\n\t\/\/\n\n\terr = compileTwitter(db)\n\tassert.NoError(t, err)\n\n\t\/\/\n\t\/\/ With results\n\t\/\/\n\n\tnow := time.Now()\n\ttweet := &Tweet{\n\t\tContent: \"Hello, world!\",\n\t\tOccurredAt: &now,\n\t\tSlug: \"1234\",\n\t}\n\tinsertTweet(t, tweet, false)\n\n\terr = compileTwitter(db)\n\tassert.NoError(t, err)\n}\n\nfunc TestEnsureSymlink(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"symlink\")\n\tassert.NoError(t, err)\n\n\tsource := path.Join(dir, \"source\")\n\terr = ioutil.WriteFile(source, []byte(\"source\"), 0755)\n\tassert.NoError(t, err)\n\n\tdest := path.Join(dir, \"symlink-dest\")\n\n\t\/\/\n\t\/\/ Case 1: Symlink does not exist\n\t\/\/\n\n\terr = ensureSymlink(source, dest)\n\tassert.NoError(t, err)\n\n\tactual, err := os.Readlink(dest)\n\tassert.Equal(t, source, actual)\n\n\t\/\/\n\t\/\/ Case 2: Symlink does exist\n\t\/\/\n\t\/\/ Consists solely of re-running the previous test case.\n\t\/\/\n\n\terr = ensureSymlink(source, dest)\n\tassert.NoError(t, err)\n\n\tactual, err = os.Readlink(dest)\n\tassert.Equal(t, source, actual)\n\n\t\/\/\n\t\/\/ Case 3: Symlink file exists, but source doesn't\n\t\/\/\n\n\terr = os.RemoveAll(dest)\n\tassert.NoError(t, err)\n\n\tsource = path.Join(dir, \"source\")\n\terr = ioutil.WriteFile(source, []byte(\"source\"), 0755)\n\tassert.NoError(t, err)\n\n\terr = ensureSymlink(source, dest)\n\tassert.NoError(t, err)\n\n\tactual, err = os.Readlink(dest)\n\tassert.Equal(t, source, actual)\n}\n\nfunc TestGetLocals(t *testing.T) {\n\tlocals := getLocals(\"Title\", map[string]interface{}{\n\t\t\"Foo\": \"Bar\",\n\t})\n\n\tassert.Equal(t, \"Bar\", locals[\"Foo\"])\n\tassert.Equal(t, sorg.Release, locals[\"Release\"])\n\tassert.Equal(t, \"Title\", locals[\"Title\"])\n}\n\nfunc TestIsHidden(t *testing.T) {\n\tassert.Equal(t, true, isHidden(\".gitkeep\"))\n\tassert.Equal(t, false, isHidden(\"article\"))\n}\n\nfunc TestSplitFrontmatter(t *testing.T) {\n\tfrontmatter, content, err := splitFrontmatter(`---\nfoo: bar\n---\n\nother`)\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"foo: bar\", frontmatter)\n\tassert.Equal(t, \"other\", content)\n\n\tfrontmatter, content, err = splitFrontmatter(`other`)\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"\", frontmatter)\n\tassert.Equal(t, \"other\", content)\n\n\tfrontmatter, content, err = splitFrontmatter(`---\nfoo: bar\n---\n`)\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"foo: bar\", frontmatter)\n\tassert.Equal(t, \"\", content)\n\n\tfrontmatter, content, err = splitFrontmatter(`foo: bar\n---\n`)\n\tassert.Equal(t, errBadFrontmatter, err)\n}\n\nfunc insertTweet(t *testing.T, tweet *Tweet, reply bool) {\n\t_, err := db.Exec(`\n\t\tINSERT INTO events\n\t\t\t(content, occurred_at, metadata, slug, type)\n\t\tVALUES\n\t\t\t($1, $2, hstore('reply', $3), $4, $5)\n\t`, tweet.Content, tweet.OccurredAt, reply, tweet.Slug, \"twitter\")\n\tassert.NoError(t, err)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ xlsx2json package wraps the github.com\/tealag\/xlsx package (used under a BSD License) and a fork of Robert Krimen's Otto\n\/\/ Javascript engine (under an MIT License) providing an scriptable xlsx2json exporter, explorer and importer utility.\n\/\/\n\/\/\n\/\/ Overview: A command line utility designed to take a XML based Excel file\n\/\/ and turn each row into a JSON blob. The JSON blob returned for\n\/\/ each row can be processed via a JavaScript callback allowing for\n\/\/ flexible translations for spreadsheet to JSON output.\n\/\/\n\/\/ @author R. S. Doiel, <rsdoiel@gmail.com>\n\/\/\n\/\/ Copyright (c) 2016, R. S. Doiel\n\/\/ All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are met:\n\/\/\n\/\/ * Redistributions of source code must retain the above copyright notice, this\n\/\/ list of conditions and the following disclaimer.\n\/\/\n\/\/ * Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation\n\/\/ and\/or other materials provided with the distribution.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n\/\/ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n\/\/ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n\/\/ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n\/\/ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n\/\/ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n\/\/ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n\/\/ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n\/\/ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\/\/ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\/\/\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\/\/ 3rd Party packages\n\t\"github.com\/chzyer\/readline\"\n\n\t\/\/ My packages\n\t\"github.com\/rsdoiel\/xlsx2json\"\n)\n\nvar (\n\tshowhelp bool\n\tsheetNo int\n\tinputFilename *string\n\tjsFilename *string\n\tjsCallback *string\n)\n\nfunc usage() {\n\tfmt.Println(`\n\n USAGE: xlsx2json [OPTIONS] EXCEL_FILENAME\n\n OVERVIEW\n\n Read a .xlsx file and return each row as a JSON object (or array of objects).\n If a JavaScript file and callback name are provided then that will be used to\n generate the resulting JSON object per row.\n\n JAVASCRIPT\n\n The callback function in JavaScript should return an object that looks like\n\n {\"path\": ..., \"source\": ..., \"error\": ...}\n\n The \"path\" property should contain the desired filename to use for storing\n the JSON blob. If it is empty the output will only be displayed to standard out.\n\n The \"source\" property should be the final version of the object you want to\n turn into a JSON blob.\n\n The \"error\" property is a string and if the string is not empty it will be\n used as an error message and cause the processing to stop.\n\n A simple JavaScript Examples:\n\n \/\/ Counter i is used to name the JSON output files.\n var i = 0;\n\n \/\/ callback is the default name looked for when processing.\n \/\/ the command line option -callback lets you used a different name.\n function callback(row) {\n i += 1;\n if (i > 10) {\n \/\/ Stop if processing more than 10 rows.\n return {\"error\": \"too many rows...\"}\n }\n return {\n \"path\": \"data\/\" + i + \".json\",\n \"source\": row,\n \"error\": \"\"\n }\n }\n\n\n OPTIONS\n`)\n\tflag.PrintDefaults()\n\tfmt.Println(`\n\n Examples\n\n xlsx2json myfile.xlsx\n\n xlsx2json -js row2obj.js -callback row2obj myfile.xlsx\n\n\txlsx2json -repl myfile.xlsx\n\n`)\n\tos.Exit(0)\n}\n\nfunc init() {\n\tflag.BoolVar(&showhelp, \"h\", false, \"display this help message\")\n\tflag.BoolVar(&showhelp, \"help\", false, \"display this help message\")\n\tflag.BoolVar(&showVersion, \"v\", false, \"display version information\")\n\tflag.BoolVar(&xlsx2json.UseRepl, \"i\", false, \"Run with an interactive repl\")\n\tflag.IntVar(&sheetNo, \"sheet\", 0, \"Specify the number of the sheet to process\")\n\tjsFilename = flag.String(\"js\", \"\", \"The name of the JavaScript file containing callback function\")\n\tjsCallback = flag.String(\"callback\", \"callback\", \"The name of the JavaScript function to use as a callback\")\n}\n\nfunc main() {\n\tvar (\n\t\tinputFilename string\n\t)\n\tflag.Parse()\n\n\tif showhelp == true {\n\t\tusage()\n\t}\n\tif showVersion == true {\n\t\tfmt.Printf(\"Version %s\\n\", xlsx2json.Version)\n\t}\n\n\targs := flag.Args()\n\tif len(args) > 0 {\n\t\tinputFilename = args[0]\n\t}\n\tif inputFilename == \"\" {\n\t\t\/\/ Read Excel file from standard\n\t\tlog.Fatalf(\"Need to provide an xlsx file for input, -i\")\n\t}\n\tvm, output, err := xlsx2json.Run(inputFilename, sheetNo, *jsFilename, *jsCallback)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif xlsx2json.UseRepl == true {\n\t\t\/\/FIXME: merge output as an array\n\t\tvm.Object(fmt.Sprintf(`Spreadsheet = [%s]`, strings.Join(output, \",\")))\n\t\trl, err := readline.New(\"> \")\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdefer rl.Close()\n\t\tfor xlsx2json.UseRepl == true {\n\t\t\tjsSrc, err := rl.Readline()\n\t\t\tif err != nil { \/\/ io.EOF, readline.ErrInterrupt\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif len(strings.Trim(jsSrc, \" \")) > 0 {\n\t\t\t\tif script, err := vm.Compile(\"repl\", jsSrc); err != nil {\n\t\t\t\t\tfmt.Printf(\"Compile error, %s\\n\", err)\n\t\t\t\t} else {\n\t\t\t\t\tout, err := vm.Eval(script)\n\t\t\t\t\tswitch {\n\t\t\t\t\tcase err != nil:\n\t\t\t\t\t\tfmt.Printf(\"Runtime error, %s\\n\", err)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tif xlsx2json.UseRepl == true {\n\t\t\t\t\t\t\tfmt.Println(out.String())\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\/\/FIXME: update output to reflect the contents of Spreadsheet from the JS VM\n\t\tif value, err := vm.Get(\"Spreadsheet\"); err == nil {\n\t\t\tdata, err := value.Export()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tsrc, err := json.Marshal(data)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tfmt.Printf(\"%s\", src)\n\t\t\tos.Exit(0)\n\t\t} else {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\tfmt.Printf(`[%s]`, strings.Join(output, \",\"))\n}\n<commit_msg>adding version number flag<commit_after>\/\/\n\/\/ xlsx2json package wraps the github.com\/tealag\/xlsx package (used under a BSD License) and a fork of Robert Krimen's Otto\n\/\/ Javascript engine (under an MIT License) providing an scriptable xlsx2json exporter, explorer and importer utility.\n\/\/\n\/\/\n\/\/ Overview: A command line utility designed to take a XML based Excel file\n\/\/ and turn each row into a JSON blob. The JSON blob returned for\n\/\/ each row can be processed via a JavaScript callback allowing for\n\/\/ flexible translations for spreadsheet to JSON output.\n\/\/\n\/\/ @author R. S. Doiel, <rsdoiel@gmail.com>\n\/\/\n\/\/ Copyright (c) 2016, R. S. Doiel\n\/\/ All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are met:\n\/\/\n\/\/ * Redistributions of source code must retain the above copyright notice, this\n\/\/ list of conditions and the following disclaimer.\n\/\/\n\/\/ * Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation\n\/\/ and\/or other materials provided with the distribution.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n\/\/ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n\/\/ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n\/\/ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n\/\/ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n\/\/ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n\/\/ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n\/\/ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n\/\/ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\/\/ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\/\/\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\/\/ 3rd Party packages\n\t\"github.com\/chzyer\/readline\"\n\n\t\/\/ My packages\n\t\"github.com\/rsdoiel\/xlsx2json\"\n)\n\nvar (\n\tshowhelp bool\n\tshowVersion bool\n\tsheetNo int\n\tinputFilename *string\n\tjsFilename *string\n\tjsCallback *string\n)\n\nfunc usage() {\n\tfmt.Println(`\n\n USAGE: xlsx2json [OPTIONS] EXCEL_FILENAME\n\n OVERVIEW\n\n Read a .xlsx file and return each row as a JSON object (or array of objects).\n If a JavaScript file and callback name are provided then that will be used to\n generate the resulting JSON object per row.\n\n JAVASCRIPT\n\n The callback function in JavaScript should return an object that looks like\n\n {\"path\": ..., \"source\": ..., \"error\": ...}\n\n The \"path\" property should contain the desired filename to use for storing\n the JSON blob. If it is empty the output will only be displayed to standard out.\n\n The \"source\" property should be the final version of the object you want to\n turn into a JSON blob.\n\n The \"error\" property is a string and if the string is not empty it will be\n used as an error message and cause the processing to stop.\n\n A simple JavaScript Examples:\n\n \/\/ Counter i is used to name the JSON output files.\n var i = 0;\n\n \/\/ callback is the default name looked for when processing.\n \/\/ the command line option -callback lets you used a different name.\n function callback(row) {\n i += 1;\n if (i > 10) {\n \/\/ Stop if processing more than 10 rows.\n return {\"error\": \"too many rows...\"}\n }\n return {\n \"path\": \"data\/\" + i + \".json\",\n \"source\": row,\n \"error\": \"\"\n }\n }\n\n\n OPTIONS\n`)\n\tflag.PrintDefaults()\n\tfmt.Printf(`\n\n Examples\n\n xlsx2json myfile.xlsx\n\n xlsx2json -js row2obj.js -callback row2obj myfile.xlsx\n\n\txlsx2json -repl myfile.xlsx\n\nVersion %s\n`, xlsx2json.Version)\n\tos.Exit(0)\n}\n\nfunc init() {\n\tflag.BoolVar(&showhelp, \"h\", false, \"display this help message\")\n\tflag.BoolVar(&showhelp, \"help\", false, \"display this help message\")\n\tflag.BoolVar(&showVersion, \"v\", false, \"display version information\")\n\tflag.BoolVar(&xlsx2json.UseRepl, \"i\", false, \"Run with an interactive repl\")\n\tflag.IntVar(&sheetNo, \"sheet\", 0, \"Specify the number of the sheet to process\")\n\tjsFilename = flag.String(\"js\", \"\", \"The name of the JavaScript file containing callback function\")\n\tjsCallback = flag.String(\"callback\", \"callback\", \"The name of the JavaScript function to use as a callback\")\n}\n\nfunc main() {\n\tvar (\n\t\tinputFilename string\n\t)\n\tflag.Parse()\n\n\tif showhelp == true {\n\t\tusage()\n\t}\n\tif showVersion == true {\n\t\tfmt.Printf(\"Version %s\\n\", xlsx2json.Version)\n\t\tos.Exit(0)\n\t}\n\n\targs := flag.Args()\n\tif len(args) > 0 {\n\t\tinputFilename = args[0]\n\t}\n\tif inputFilename == \"\" {\n\t\t\/\/ Read Excel file from standard\n\t\tlog.Fatalf(\"Need to provide an xlsx file for input, -i\")\n\t}\n\tvm, output, err := xlsx2json.Run(inputFilename, sheetNo, *jsFilename, *jsCallback)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif xlsx2json.UseRepl == true {\n\t\t\/\/FIXME: merge output as an array\n\t\tvm.Object(fmt.Sprintf(`Spreadsheet = [%s]`, strings.Join(output, \",\")))\n\t\trl, err := readline.New(\"> \")\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdefer rl.Close()\n\t\tfor xlsx2json.UseRepl == true {\n\t\t\tjsSrc, err := rl.Readline()\n\t\t\tif err != nil { \/\/ io.EOF, readline.ErrInterrupt\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif len(strings.Trim(jsSrc, \" \")) > 0 {\n\t\t\t\tif script, err := vm.Compile(\"repl\", jsSrc); err != nil {\n\t\t\t\t\tfmt.Printf(\"Compile error, %s\\n\", err)\n\t\t\t\t} else {\n\t\t\t\t\tout, err := vm.Eval(script)\n\t\t\t\t\tswitch {\n\t\t\t\t\tcase err != nil:\n\t\t\t\t\t\tfmt.Printf(\"Runtime error, %s\\n\", err)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tif xlsx2json.UseRepl == true {\n\t\t\t\t\t\t\tfmt.Println(out.String())\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\/\/FIXME: update output to reflect the contents of Spreadsheet from the JS VM\n\t\tif value, err := vm.Get(\"Spreadsheet\"); err == nil {\n\t\t\tdata, err := value.Export()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tsrc, err := json.Marshal(data)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tfmt.Printf(\"%s\", src)\n\t\t\tos.Exit(0)\n\t\t} else {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\tfmt.Printf(`[%s]`, strings.Join(output, \",\"))\n}\n<|endoftext|>"} {"text":"<commit_before>package rest\n\nimport (\n\t\"fmt\"\n\t\"text\/template\"\n\n\t\"goa.design\/goa.v2\/codegen\"\n\t\"goa.design\/goa.v2\/codegen\/files\"\n\t\"goa.design\/goa.v2\/design\"\n\t\"goa.design\/goa.v2\/design\/rest\"\n)\n\ntype (\n\t\/\/ pathData contains the data necessary to render the path template.\n\tpathData struct {\n\t\t\/\/ ServiceName\n\t\tServiceName string\n\t\t\/\/ EndpointName\n\t\tEndpointName string\n\t\t\/\/ Routes describes all the possible paths for an action\n\t\tRoutes []*pathRoute\n\t}\n\n\t\/\/ pathRoute contains the data to render a path for a specific route.\n\tpathRoute struct {\n\t\t\/\/ Path is the fullpath converted to printf compatible layout\n\t\tPath string\n\t\t\/\/ PathParams are all the path parameters in this route\n\t\tPathParams []string\n\t\t\/\/ Arguments describe the arguments used in the route\n\t\tArguments []*pathArgument\n\t}\n\n\t\/\/ pathArgument contains the name and data type of the path arguments\n\tpathArgument struct {\n\t\t\/\/ Name is the name of the argument variable\n\t\tName string\n\t\t\/\/ Type describes the datatype of the argument\n\t\tType design.DataType\n\t}\n\n\t\/\/ pathFile\n\tpathFile struct {\n\t\tsections []*codegen.Section\n\t}\n)\n\nvar pathTmpl = template.Must(template.New(\"path\").\n\tFuncs(template.FuncMap{\n\t\t\"add\": codegen.Add,\n\t\t\"goTypeRef\": codegen.GoTypeRef,\n\t\t\"goify\": codegen.Goify,\n\t}).\n\tParse(pathT))\n\n\/\/ PathFile returns the path file.\nfunc PathFile(api *design.APIExpr, r *rest.RootExpr) codegen.File {\n\ttitle := fmt.Sprintf(\"%s HTTP request path constructors\", api.Name)\n\tsections := []*codegen.Section{\n\t\tcodegen.Header(title, \"http\", []*codegen.ImportSpec{\n\t\t\t{Path: \"fmt\"},\n\t\t\t{Path: \"net\/url\"},\n\t\t\t{Path: \"strconv\"},\n\t\t\t{Path: \"strings\"},\n\t\t}),\n\t}\n\n\tfor _, res := range r.Resources {\n\t\tfor _, a := range res.Actions {\n\t\t\tsections = append(sections, Path(a))\n\t\t}\n\t}\n\n\treturn &pathFile{sections}\n}\n\n\/\/ Path returns a path section for the specified action\nfunc Path(a *rest.ActionExpr) *codegen.Section {\n\treturn &codegen.Section{\n\t\tTemplate: pathTmpl,\n\t\tData: buildPathData(a),\n\t}\n}\n\nfunc (e *pathFile) Sections(_ string) []*codegen.Section {\n\treturn e.sections\n}\n\nfunc (e *pathFile) OutputPath(reserved map[string]bool) string {\n\treturn files.UniquePath(\"gen\/transport\/http\/paths%d.go\", reserved)\n}\n\nfunc buildPathData(a *rest.ActionExpr) *pathData {\n\tpd := pathData{\n\t\tServiceName: a.Service.Name,\n\t\tEndpointName: a.Name,\n\t\tRoutes: make([]*pathRoute, len(a.Routes)),\n\t}\n\n\tfor i, r := range a.Routes {\n\t\tpd.Routes[i] = &pathRoute{\n\t\t\tPath: rest.WildcardRegex.ReplaceAllString(r.FullPath(), \"\/%v\"),\n\t\t\tPathParams: r.Params(),\n\t\t\tArguments: generatePathArguments(r),\n\t\t}\n\t}\n\treturn &pd\n}\n\nfunc generatePathArguments(r *rest.RouteExpr) []*pathArgument {\n\tparams := r.Params()\n\tobj := design.AsObject(r.Action.PathParams().Type)\n\targs := make([]*pathArgument, len(params))\n\tfor i, name := range params {\n\t\targs[i] = &pathArgument{\n\t\t\tName: name,\n\t\t\tType: obj[name].Type,\n\t\t}\n\t}\n\treturn args\n}\n\nconst pathT = `{{range $i, $route := .Routes -}}\n\/\/ {{$.EndpointName}}{{$.ServiceName}}Path{{if ne $i 0}}{{add $i 1}}{{end}} returns the URL path to the {{$.ServiceName}} service {{$.EndpointName}} HTTP endpoint.\nfunc {{$.EndpointName}}{{$.ServiceName}}Path{{if ne $i 0}}{{add $i 1}}{{end}}({{template \"arguments\" .Arguments}}) string {\n{{- if .Arguments}}\n\t{{template \"slice_conversion\" .Arguments -}}\n\treturn fmt.Sprintf(\"{{ .Path }}\"{{template \"fmt_params\" .Arguments}})\n{{- else}}\n\treturn \"{{ .Path }}\"\n{{- end}}\n}\n\n{{end}}\n\n{{- define \"arguments\" -}}\n{{range $i, $arg := . -}}\n{{if ne $i 0}}, {{end}}{{goify .Name false}} {{goTypeRef .Type}}\n{{- end}}\n{{- end}}\n\n{{- define \"fmt_params\" -}}\n{{range . -}}\n, {{if eq .Type.Name \"array\"}}strings.Join(encoded{{goify .Name true}}, \",\"){{else}}{{goify .Name false}}{{end}}\n{{- end}}\n{{- end}}\n\n{{- define \"slice_conversion\" -}}\n{{range $i, $arg := .}}\n\t{{- if eq .Type.Name \"array\" -}}\n\tencoded{{goify .Name true}} := make([]string, len({{goify .Name false}}))\n\tfor i, v := range {{goify .Name false}} {\n\t\tencoded{{goify .Name true}}[i] = {{if eq .Type.ElemType.Type.Name \"string\"}}url.QueryEscape(v)\n\t{{else if eq .Type.ElemType.Type.Name \"int\" \"int32\"}}strconv.FormatInt(int64(v), 10)\n\t{{else if eq .Type.ElemType.Type.Name \"int64\"}}strconv.FormatInt(v, 10)\n\t{{else if eq .Type.ElemType.Type.Name \"uint\" \"uint32\"}}strconv.FormatUint(uint64(v), 10)\n\t{{else if eq .Type.ElemType.Type.Name \"uint64\"}}strconv.FormatUint(v, 10)\n\t{{else if eq .Type.ElemType.Type.Name \"float32\"}}strconv.FormatFloat(float64(v), 'f', -1, 32)\n\t{{else if eq .Type.ElemType.Type.Name \"float64\"}}strconv.FormatFloat(v, 'f', -1, 64)\n\t{{else if eq .Type.ElemType.Type.Name \"boolean\"}}strconv.FormatBool(v)\n\t{{else}}url.QueryEscape(fmt.Sprintf(\"%v\", v))\n\t{{end -}}\n\t}\n\n\t{{end}}\n{{- end}}\n{{- end}}`\n<commit_msg>Make another pass at the paths file.<commit_after>package rest\n\nimport (\n\t\"fmt\"\n\t\"text\/template\"\n\n\t\"goa.design\/goa.v2\/codegen\"\n\t\"goa.design\/goa.v2\/codegen\/files\"\n\t\"goa.design\/goa.v2\/design\"\n\t\"goa.design\/goa.v2\/design\/rest\"\n)\n\ntype (\n\t\/\/ pathData contains the data necessary to render the path template.\n\tpathData struct {\n\t\t\/\/ ServiceName is the name of the service defined in the design.\n\t\tServiceName string\n\t\t\/\/ EndpointName is the name of the endpoint defined in the design.\n\t\tEndpointName string\n\t\t\/\/ Routes describes all the possible paths for an action.\n\t\tRoutes []*pathRoute\n\t}\n\n\t\/\/ pathRoute contains the data to render a path for a specific route.\n\tpathRoute struct {\n\t\t\/\/ Path is the fullpath converted to printf compatible layout.\n\t\tPath string\n\t\t\/\/ PathParams are all the path parameters in this route.\n\t\tPathParams []string\n\t\t\/\/ Arguments describe the arguments used in the route.\n\t\tArguments []*pathArgument\n\t}\n\n\t\/\/ pathArgument contains the name and data type of the path arguments.\n\tpathArgument struct {\n\t\t\/\/ Name is the name of the argument variable.\n\t\tName string\n\t\t\/\/ Type describes the datatype of the argument.\n\t\tType design.DataType\n\t}\n\n\t\/\/ pathFile is the codegen file that generates the path constructors.\n\tpathFile struct {\n\t\tsections []*codegen.Section\n\t}\n)\n\nvar pathTmpl = template.Must(template.New(\"path\").\n\tFuncs(template.FuncMap{\n\t\t\"add\": codegen.Add,\n\t\t\"goTypeRef\": codegen.GoTypeRef,\n\t\t\"goify\": codegen.Goify,\n\t\t\"isArray\": design.IsArray,\n\t}).\n\tParse(pathT))\n\n\/\/ PathFile returns the path file.\nfunc PathFile(api *design.APIExpr, r *rest.RootExpr) codegen.File {\n\ttitle := fmt.Sprintf(\"%s HTTP request path constructors\", api.Name)\n\tsections := []*codegen.Section{\n\t\tcodegen.Header(title, \"http\", []*codegen.ImportSpec{\n\t\t\t{Path: \"fmt\"},\n\t\t\t{Path: \"net\/url\"},\n\t\t\t{Path: \"strconv\"},\n\t\t\t{Path: \"strings\"},\n\t\t}),\n\t}\n\n\tfor _, res := range r.Resources {\n\t\tfor _, a := range res.Actions {\n\t\t\tsections = append(sections, Path(a))\n\t\t}\n\t}\n\n\treturn &pathFile{sections}\n}\n\n\/\/ Path returns a path section for the specified action\nfunc Path(a *rest.ActionExpr) *codegen.Section {\n\treturn &codegen.Section{\n\t\tTemplate: pathTmpl,\n\t\tData: buildPathData(a),\n\t}\n}\n\nfunc (e *pathFile) Sections(_ string) []*codegen.Section {\n\treturn e.sections\n}\n\nfunc (e *pathFile) OutputPath(reserved map[string]bool) string {\n\treturn files.UniquePath(\"gen\/transport\/http\/paths%d.go\", reserved)\n}\n\nfunc buildPathData(a *rest.ActionExpr) *pathData {\n\tpd := pathData{\n\t\tServiceName: a.Service.Name,\n\t\tEndpointName: a.Name,\n\t\tRoutes: make([]*pathRoute, len(a.Routes)),\n\t}\n\n\tfor i, r := range a.Routes {\n\t\tpd.Routes[i] = &pathRoute{\n\t\t\tPath: rest.WildcardRegex.ReplaceAllString(r.FullPath(), \"\/%v\"),\n\t\t\tPathParams: r.Params(),\n\t\t\tArguments: generatePathArguments(r),\n\t\t}\n\t}\n\treturn &pd\n}\n\nfunc generatePathArguments(r *rest.RouteExpr) []*pathArgument {\n\tparams := r.Params()\n\tobj := design.AsObject(r.Action.PathParams().Type)\n\targs := make([]*pathArgument, len(params))\n\tfor i, name := range params {\n\t\targs[i] = &pathArgument{\n\t\t\tName: name,\n\t\t\tType: obj[name].Type,\n\t\t}\n\t}\n\treturn args\n}\n\nconst pathT = `{{ range $i, $route := .Routes -}}\n\/\/ {{ goify $.EndpointName true }}{{ goify $.ServiceName true }}Path{{ if ne $i 0 }}{{ add $i 1 }}{{ end }} returns the URL path to the {{ $.ServiceName }} service {{ $.EndpointName }} HTTP endpoint.\nfunc {{ goify $.EndpointName true }}{{ goify $.ServiceName true }}Path{{ if ne $i 0 }}{{ add $i 1 }}{{ end }}({{ template \"arguments\" .Arguments }}) string {\n{{- if .Arguments }}\n\t{{ template \"slice_conversion\" .Arguments -}}\n\treturn fmt.Sprintf(\"{{ .Path }}\"{{ template \"fmt_params\" .Arguments }})\n{{- else }}\n\treturn \"{{ .Path }}\"\n{{- end }}\n}\n\n{{ end }}\n\n{{- define \"arguments\" -}}\n{{ range $i, $arg := . -}}\n{{ if ne $i 0 }}, {{ end }}{{ goify .Name false }} {{ goTypeRef .Type }}\n{{- end }}\n{{- end }}\n\n{{- define \"fmt_params\" -}}\n{{ range . -}}\n, {{ if isArray .Type }}strings.Join(encoded{{ goify .Name true }}, \",\")\n {{- else }}{{ goify .Name false }}{{ end }}\n{{- end }}\n{{- end }}\n\n{{- define \"slice_conversion\" -}}\n{{ range $i, $arg := . }}\n\t{{- if isArray .Type -}}\n\tencoded{{ goify .Name true }} := make([]string, len({{ goify .Name false }}))\n\t{{- $elemType := .Type.ElemType.Type.Name }}\n\tfor i, v := range {{ goify .Name false }} {\n\t\tencoded{{ goify .Name true }}[i] = {{ if eq $elemType \"string\" }}url.QueryEscape(v)\n\t{{ else if eq $elemType \"int\" \"int32\" }}strconv.FormatInt(int64(v), 10)\n\t{{ else if eq $elemType \"int64\" }}strconv.FormatInt(v, 10)\n\t{{ else if eq $elemType \"uint\" \"uint32\" }}strconv.FormatUint(uint64(v), 10)\n\t{{ else if eq $elemType \"uint64\" }}strconv.FormatUint(v, 10)\n\t{{ else if eq $elemType \"float32\" }}strconv.FormatFloat(float64(v), 'f', -1, 32)\n\t{{ else if eq $elemType \"float64\" }}strconv.FormatFloat(v, 'f', -1, 64)\n\t{{ else if eq $elemType \"boolean\" }}strconv.FormatBool(v)\n\t{{ else }}url.QueryEscape(fmt.Sprintf(\"%v\", v))\n\t{{ end -}}\n\t}\n\n\t{{ end }}\n{{- end }}\n{{- end }}`\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"bufio\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/github\/git-lfs\/lfs\"\n\t\"github.com\/github\/git-lfs\/vendor\/_nuts\/github.com\/spf13\/cobra\"\n)\n\nvar (\n\tuntrackCmd = &cobra.Command{\n\t\tUse: \"untrack\",\n\t\tShort: \"Remove an entry from .gitattributes\",\n\t\tRun: untrackCommand,\n\t}\n)\n\n\/\/ untrackCommand takes a list of paths as an argument, and removes each path from the\n\/\/ default attribtues file (.gitattributes), if it exists.\nfunc untrackCommand(cmd *cobra.Command, args []string) {\n\tlfs.InstallHooks(false)\n\n\tif len(args) < 1 {\n\t\tPrint(\"git lfs untrack <path> [path]*\")\n\t\treturn\n\t}\n\n\tdata, err := ioutil.ReadFile(\".gitattributes\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tattributes := strings.NewReader(string(data))\n\n\tattributesFile, err := os.Create(\".gitattributes\")\n\tif err != nil {\n\t\tPrint(\"Error opening .gitattributes for writing\")\n\t\treturn\n\t}\n\n\tscanner := bufio.NewScanner(attributes)\n\n\t\/\/ Iterate through each line of the attributes file and rewrite it,\n\t\/\/ if the path was meant to be untracked, omit it, and print a message instead.\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tif strings.Contains(line, \"filter=lfs\") {\n\t\t\tfields := strings.Fields(line)\n\t\t\tremoveThisPath := false\n\t\t\tfor _, t := range args {\n\t\t\t\tif t == fields[0] {\n\t\t\t\t\tremoveThisPath = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !removeThisPath {\n\t\t\t\tattributesFile.WriteString(line + \"\\n\")\n\t\t\t} else {\n\t\t\t\tPrint(\"Untracking %s\", fields[0])\n\t\t\t}\n\t\t}\n\t}\n\n\tattributesFile.Close()\n}\n\nfunc init() {\n\tRootCmd.AddCommand(untrackCmd)\n}\n<commit_msg>ラララララ ラー ウウウ フフフ<commit_after>package commands\n\nimport (\n\t\"bufio\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/github\/git-lfs\/lfs\"\n\t\"github.com\/github\/git-lfs\/vendor\/_nuts\/github.com\/spf13\/cobra\"\n)\n\nvar (\n\tuntrackCmd = &cobra.Command{\n\t\tUse: \"untrack\",\n\t\tShort: \"Remove an entry from .gitattributes\",\n\t\tRun: untrackCommand,\n\t}\n)\n\n\/\/ untrackCommand takes a list of paths as an argument, and removes each path from the\n\/\/ default attribtues file (.gitattributes), if it exists.\nfunc untrackCommand(cmd *cobra.Command, args []string) {\n\tif lfs.LocalGitDir == \"\" {\n\t\tPrint(\"Not a git repository.\")\n\t\tos.Exit(128)\n\t}\n\tif lfs.LocalWorkingDir == \"\" {\n\t\tPrint(\"This operation must be run in a work tree.\")\n\t\tos.Exit(128)\n\t}\n\n\tlfs.InstallHooks(false)\n\n\tif len(args) < 1 {\n\t\tPrint(\"git lfs untrack <path> [path]*\")\n\t\treturn\n\t}\n\n\tdata, err := ioutil.ReadFile(\".gitattributes\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tattributes := strings.NewReader(string(data))\n\n\tattributesFile, err := os.Create(\".gitattributes\")\n\tif err != nil {\n\t\tPrint(\"Error opening .gitattributes for writing\")\n\t\treturn\n\t}\n\n\tscanner := bufio.NewScanner(attributes)\n\n\t\/\/ Iterate through each line of the attributes file and rewrite it,\n\t\/\/ if the path was meant to be untracked, omit it, and print a message instead.\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tif strings.Contains(line, \"filter=lfs\") {\n\t\t\tfields := strings.Fields(line)\n\t\t\tremoveThisPath := false\n\t\t\tfor _, t := range args {\n\t\t\t\tif t == fields[0] {\n\t\t\t\t\tremoveThisPath = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !removeThisPath {\n\t\t\t\tattributesFile.WriteString(line + \"\\n\")\n\t\t\t} else {\n\t\t\t\tPrint(\"Untracking %s\", fields[0])\n\t\t\t}\n\t\t}\n\t}\n\n\tattributesFile.Close()\n}\n\nfunc init() {\n\tRootCmd.AddCommand(untrackCmd)\n}\n<|endoftext|>"} {"text":"<commit_before>package compilers\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/albrow\/scribble\/config\"\n\t\"github.com\/albrow\/scribble\/context\"\n\t\"github.com\/albrow\/scribble\/util\"\n\t\"github.com\/howeyc\/fsnotify\"\n\t\"github.com\/russross\/blackfriday\"\n\t\"github.com\/wsxiaoys\/terminal\/color\"\n\t\"html\/template\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ PostsCompilerType represents a type capable of compiling post files.\ntype PostsCompilerType struct {\n\tpathMatch string\n\t\/\/ createdDirs keeps track of the directories that were created in config.DestDir.\n\t\/\/ It is used in the RemoveOld method.\n\tcreatedDirs []string\n}\n\n\/\/ PostCompiler is an instatiation of PostCompilerType\nvar PostsCompiler = PostsCompilerType{\n\tpathMatch: \"\",\n}\n\n\/\/ Post is an in-memory representation of the metadata for a given post.\n\/\/ Much of this data comes from the toml frontmatter.\ntype Post struct {\n\tTitle string `toml:\"title\"`\n\tAuthor string `toml:\"author\"`\n\tDescription string `toml:\"description\"`\n\tDate time.Time `toml:\"date\"`\n\t\/\/ the url for the post, not including protocol or domain name (useful for creating links)\n\tUrl template.URL `toml:\"-\"`\n\t\/\/ the html content for the post (parsed from markdown source)\n\tContent template.HTML `toml:\"-\"`\n\t\/\/ the full source path\n\tsrc string `toml:\"-\"`\n\t\/\/ the layout tmpl file to be used for the post\n\tLayout string `toml:\"layout\"`\n\t\/\/ the html template that can be used to render the post\n\ttemplate *template.Template\n}\n\nvar (\n\t\/\/ a slice of all posts\n\tposts = []*Post{}\n\t\/\/ a map of source path to post\n\tpostsMap = map[string]*Post{}\n)\n\n\/\/ Init should be called before any other methods. In this case, Init\n\/\/ sets up the pathMatch variable based on config.SourceDir and config.PostsDir\n\/\/ and adds the Posts helper function to FuncMap.\nfunc (p *PostsCompilerType) Init() {\n\tp.pathMatch = filepath.Join(config.PostsDir, \"*.md\")\n\t\/\/ Add the posts function to FuncMap\n\tcontext.FuncMap[\"Posts\"] = Posts\n}\n\n\/\/ CompileMatchFunc returns a MatchFunc which will return true for\n\/\/ any files which match a given pattern. In this case, the pattern\n\/\/ is any file that is inside config.PostsDir and ends in \".md\", excluding\n\/\/ hidden files and directories (which start with a \".\") but not those\n\/\/ which start with an underscore.\nfunc (p *PostsCompilerType) CompileMatchFunc() MatchFunc {\n\treturn pathMatchFunc(p.pathMatch, true, false)\n}\n\n\/\/ WatchMatchFunc returns a MatchFunc which will return true for\n\/\/ any files which match a given pattern. In this case, the pattern\n\/\/ is the same as it is for CompileMatchFunc.\nfunc (p *PostsCompilerType) WatchMatchFunc() MatchFunc {\n\t\/\/ PostsCompiler needs to watch all posts in the posts dir,\n\t\/\/ but also needs to watch all the *tmpl files in the posts\n\t\/\/ layouts dir, because if those change, it affects the way\n\t\/\/ posts are rendered.\n\tpostsMatch := pathMatchFunc(p.pathMatch, true, false)\n\tpostLayoutsMatch := pathMatchFunc(filepath.Join(config.PostLayoutsDir, \"*.tmpl\"), true, false)\n\t\/\/ unionMatchFuncs combines these two cases and returns a MatchFunc\n\t\/\/ which will return true if either matches. This allows us to watch\n\t\/\/ for changes in both the posts dir and the posts layouts dir.\n\treturn unionMatchFuncs(postsMatch, postLayoutsMatch)\n}\n\n\/\/ Compile compiles the file at srcPath. The caller will only\n\/\/ call this function for files which belong to PostsCompiler\n\/\/ according to the MatchFunc. Behavior for any other file is\n\/\/ undefined. Compile will output the compiled result to the appropriate\n\/\/ location in config.DestDir.\nfunc (p *PostsCompilerType) Compile(srcPath string) error {\n\t\/\/ Get the parsed post object and determine dest path\n\tpost := getOrCreatePostFromPath(srcPath)\n\tsrcFilename := filepath.Base(srcPath)\n\tdestPath := fmt.Sprintf(\"%s\/%s\", config.DestDir, strings.TrimSuffix(srcFilename, \".md\"))\n\tdestIndexFilePath := filepath.Join(destPath, \"index.html\")\n\tcolor.Printf(\"@g CREATE: %s -> %s\\n\", srcPath, destIndexFilePath)\n\n\t\/\/ Create the index file\n\tdestFile, err := util.CreateFileWithPath(destIndexFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Parse content and frontmatter, then set the appropriate layout based on\n\t\/\/ the layout key in the frontmatter\n\tif err := post.parse(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Write to destFile by executing the template\n\tpostContext := context.CopyContext()\n\tpostContext[\"Post\"] = post\n\tif err := post.template.Execute(destFile, postContext); err != nil {\n\t\treturn fmt.Errorf(\"ERROR compiling html template for posts: %s\", err.Error())\n\t}\n\n\t\/\/ Add the created dir to the list of created dirs\n\tp.createdDirs = append(p.createdDirs, destPath)\n\n\treturn nil\n}\n\n\/\/ CompileAll compiles zero or more files identified by srcPaths.\n\/\/ It works simply by calling Compile for each path. The caller is\n\/\/ responsible for only passing in files that belong to AceCompiler\n\/\/ according to the MatchFunc. Behavior for any other file is undefined.\nfunc (p *PostsCompilerType) CompileAll(srcPaths []string) error {\n\tfmt.Println(\"--> compiling posts\")\n\tfor _, srcPath := range srcPaths {\n\t\tif err := p.Compile(srcPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (p *PostsCompilerType) FileChanged(srcPath string, ev fsnotify.FileEvent) error {\n\t\/\/ Because of the way we set up the watcher, there are two possible\n\t\/\/ cases here.\n\t\/\/ 1) A template in the post layouts dir was changed. In this case,\n\t\/\/ we would ideally recompile all the posts that used that layout.\n\t\/\/ 2) A markdown file corresponding to a single post was changed. In this\n\t\/\/ case, ideally we only recompile the post that was changed. We need to\n\t\/\/ take into account any rename, delete, or create events and how they\n\t\/\/ affect the output files in destDir.\n\tswitch filepath.Ext(srcPath) {\n\tcase \".md\":\n\t\t\/\/ TODO: Be more intelligent here? If a single post file was midified,\n\t\t\/\/ we can simply recompile that post. We would also need to take into\n\t\t\/\/ account the subtle differences between rename, create, and delete\n\t\t\/\/ events. For now, recompile all posts.\n\t\tif err := recompileAllForCompiler(p); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase \".tmpl\":\n\t\t\/\/ TODO: Analyze post files and be more intelligent here?\n\t\t\/\/ When a post layout changes, only recompile the posts that\n\t\t\/\/ use that layout. For now, recompile all posts.\n\t\tif err := recompileAllForCompiler(p); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\treturn nil\n}\n\nfunc (p *PostsCompilerType) RemoveOld() error {\n\t\/\/ Simply iterate through createdDirs and remove each of them\n\t\/\/ NOTE: this is different from the other compilers because each\n\t\/\/ post gets created as an index.html file inside some directory\n\t\/\/ (for prettier urls). So instead of removing files, we're removing\n\t\/\/ directories.\n\tfor _, dir := range p.createdDirs {\n\t\tif err := util.RemoveAllIfExists(dir); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Posts returns up to limit posts, sorted by date. If limit is 0,\n\/\/ it returns all posts. If limit is greater than len(posts), it returns\n\/\/ all posts.\nfunc Posts(limit ...int) []*Post {\n\t\/\/ Sort the posts by date\n\tsortedPosts := make([]*Post, len(posts))\n\tcopy(sortedPosts, posts)\n\tsort.Sort(PostsByDate(sortedPosts))\n\n\t\/\/ Return up to limit posts\n\tif len(limit) == 0 || limit[0] == 0 || limit[0] > len(sortedPosts) {\n\t\treturn sortedPosts\n\t} else {\n\t\treturn sortedPosts[:limit[0]]\n\t}\n}\n\nfunc createPostFromPath(path string) *Post {\n\t\/\/ create post object\n\tname := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))\n\tp := &Post{\n\t\tUrl: template.URL(\"\/\" + name),\n\t\tsrc: path,\n\t}\n\tposts = append(posts, p)\n\tpostsMap[path] = p\n\treturn p\n}\n\nfunc getPostByPath(path string) *Post {\n\treturn postsMap[path]\n}\n\nfunc getOrCreatePostFromPath(path string) *Post {\n\tif p, found := postsMap[path]; found {\n\t\treturn p\n\t} else {\n\t\treturn createPostFromPath(path)\n\t}\n}\n\n\/\/ parse reads from the source file and sets the content and metadata fields\n\/\/ for the post. It also creates a new template for the post using the layout\n\/\/ field of the frontmatter.\nfunc (p *Post) parse() error {\n\t\/\/ Open the source file\n\tfile, err := os.Open(p.src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr := bufio.NewReader(file)\n\n\t\/\/ Split the file into frontmatter and markdown content\n\tfrontMatter, content, err := util.SplitFrontMatter(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Decode the frontmatter\n\tif _, err := toml.Decode(frontMatter, p); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Parse the markdown content and set p.Content\n\tp.Content = template.HTML(blackfriday.MarkdownCommon([]byte(content)))\n\n\t\/\/ Create a template for the post\n\tif p.Layout == \"\" {\n\t\treturn fmt.Errorf(\"Could not find layout definition in toml frontmatter for post: %s\", p.src)\n\t}\n\n\t\/\/ When we call template.ParseFiles, we want the post layout file to be first,\n\t\/\/ so it is the one that will be executed.\n\tpostLayoutFile := filepath.Join(config.PostLayoutsDir, p.Layout)\n\totherLayoutFiles, err := filepath.Glob(filepath.Join(config.LayoutsDir, \"*.tmpl\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tallFiles := append([]string{postLayoutFile}, otherLayoutFiles...)\n\t\/\/ Parse the includes files if there are any\n\tif config.IncludesDir != \"\" {\n\t\tincludeFiles, err := filepath.Glob(filepath.Join(config.IncludesDir, \"*tmpl\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tallFiles = append(allFiles, includeFiles...)\n\t}\n\ttmpl, err := template.ParseFiles(allFiles...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.template = tmpl\n\n\treturn nil\n}\n\n\/\/ The PostsByDate type is used only for sorting\ntype PostsByDate []*Post\n\nfunc (p PostsByDate) Len() int {\n\treturn len(p)\n}\n\nfunc (p PostsByDate) Less(i, j int) bool {\n\treturn p[i].Date.Before(p[j].Date)\n}\n\nfunc (p PostsByDate) Swap(i, j int) {\n\tp[i], p[j] = p[j], p[i]\n}\n<commit_msg>Watch includes and layouts dir and recompile posts when they change.<commit_after>package compilers\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/albrow\/scribble\/config\"\n\t\"github.com\/albrow\/scribble\/context\"\n\t\"github.com\/albrow\/scribble\/util\"\n\t\"github.com\/howeyc\/fsnotify\"\n\t\"github.com\/russross\/blackfriday\"\n\t\"github.com\/wsxiaoys\/terminal\/color\"\n\t\"html\/template\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ PostsCompilerType represents a type capable of compiling post files.\ntype PostsCompilerType struct {\n\tpathMatch string\n\t\/\/ createdDirs keeps track of the directories that were created in config.DestDir.\n\t\/\/ It is used in the RemoveOld method.\n\tcreatedDirs []string\n}\n\n\/\/ PostCompiler is an instatiation of PostCompilerType\nvar PostsCompiler = PostsCompilerType{\n\tpathMatch: \"\",\n}\n\n\/\/ Post is an in-memory representation of the metadata for a given post.\n\/\/ Much of this data comes from the toml frontmatter.\ntype Post struct {\n\tTitle string `toml:\"title\"`\n\tAuthor string `toml:\"author\"`\n\tDescription string `toml:\"description\"`\n\tDate time.Time `toml:\"date\"`\n\t\/\/ the url for the post, not including protocol or domain name (useful for creating links)\n\tUrl template.URL `toml:\"-\"`\n\t\/\/ the html content for the post (parsed from markdown source)\n\tContent template.HTML `toml:\"-\"`\n\t\/\/ the full source path\n\tsrc string `toml:\"-\"`\n\t\/\/ the layout tmpl file to be used for the post\n\tLayout string `toml:\"layout\"`\n\t\/\/ the html template that can be used to render the post\n\ttemplate *template.Template\n}\n\nvar (\n\t\/\/ a slice of all posts\n\tposts = []*Post{}\n\t\/\/ a map of source path to post\n\tpostsMap = map[string]*Post{}\n)\n\n\/\/ Init should be called before any other methods. In this case, Init\n\/\/ sets up the pathMatch variable based on config.SourceDir and config.PostsDir\n\/\/ and adds the Posts helper function to FuncMap.\nfunc (p *PostsCompilerType) Init() {\n\tp.pathMatch = filepath.Join(config.PostsDir, \"*.md\")\n\t\/\/ Add the posts function to FuncMap\n\tcontext.FuncMap[\"Posts\"] = Posts\n}\n\n\/\/ CompileMatchFunc returns a MatchFunc which will return true for\n\/\/ any files which match a given pattern. In this case, the pattern\n\/\/ is any file that is inside config.PostsDir and ends in \".md\", excluding\n\/\/ hidden files and directories (which start with a \".\") but not those\n\/\/ which start with an underscore.\nfunc (p *PostsCompilerType) CompileMatchFunc() MatchFunc {\n\treturn pathMatchFunc(p.pathMatch, true, false)\n}\n\n\/\/ WatchMatchFunc returns a MatchFunc which will return true for\n\/\/ any files which match a given pattern. In this case, the pattern\n\/\/ is the same as it is for CompileMatchFunc.\nfunc (p *PostsCompilerType) WatchMatchFunc() MatchFunc {\n\t\/\/ PostsCompiler needs to watch all posts in the posts dir,\n\t\/\/ but also needs to watch all the *tmpl files in the posts\n\t\/\/ layouts dir, layouts dir, and includes dir. Because if those\n\t\/\/ change, it may affect the way posts are rendered.\n\tpostsMatch := pathMatchFunc(p.pathMatch, true, false)\n\tlayoutsMatch := pathMatchFunc(filepath.Join(config.LayoutsDir, \"*.tmpl\"), true, false)\n\tpostLayoutsMatch := pathMatchFunc(filepath.Join(config.PostLayoutsDir, \"*.tmpl\"), true, false)\n\t\/\/ unionMatchFuncs combines these two cases and returns a MatchFunc\n\t\/\/ which will return true if either matches. This allows us to watch\n\t\/\/ for changes in both the posts dir and the posts layouts dir.\n\tallMatch := unionMatchFuncs(postsMatch, layoutsMatch, postLayoutsMatch)\n\tif config.IncludesDir != \"\" {\n\t\t\/\/ We also want to watch includes if there are any\n\t\tincludesMatch := pathMatchFunc(filepath.Join(config.IncludesDir, \"*.tmpl\"), true, false)\n\t\tallMatch = unionMatchFuncs(allMatch, includesMatch)\n\t}\n\treturn allMatch\n}\n\n\/\/ Compile compiles the file at srcPath. The caller will only\n\/\/ call this function for files which belong to PostsCompiler\n\/\/ according to the MatchFunc. Behavior for any other file is\n\/\/ undefined. Compile will output the compiled result to the appropriate\n\/\/ location in config.DestDir.\nfunc (p *PostsCompilerType) Compile(srcPath string) error {\n\t\/\/ Get the parsed post object and determine dest path\n\tpost := getOrCreatePostFromPath(srcPath)\n\tsrcFilename := filepath.Base(srcPath)\n\tdestPath := fmt.Sprintf(\"%s\/%s\", config.DestDir, strings.TrimSuffix(srcFilename, \".md\"))\n\tdestIndexFilePath := filepath.Join(destPath, \"index.html\")\n\tcolor.Printf(\"@g CREATE: %s -> %s\\n\", srcPath, destIndexFilePath)\n\n\t\/\/ Create the index file\n\tdestFile, err := util.CreateFileWithPath(destIndexFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Parse content and frontmatter, then set the appropriate layout based on\n\t\/\/ the layout key in the frontmatter\n\tif err := post.parse(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Write to destFile by executing the template\n\tpostContext := context.CopyContext()\n\tpostContext[\"Post\"] = post\n\tif err := post.template.Execute(destFile, postContext); err != nil {\n\t\treturn fmt.Errorf(\"ERROR compiling html template for posts: %s\", err.Error())\n\t}\n\n\t\/\/ Add the created dir to the list of created dirs\n\tp.createdDirs = append(p.createdDirs, destPath)\n\n\treturn nil\n}\n\n\/\/ CompileAll compiles zero or more files identified by srcPaths.\n\/\/ It works simply by calling Compile for each path. The caller is\n\/\/ responsible for only passing in files that belong to AceCompiler\n\/\/ according to the MatchFunc. Behavior for any other file is undefined.\nfunc (p *PostsCompilerType) CompileAll(srcPaths []string) error {\n\tfmt.Println(\"--> compiling posts\")\n\tfor _, srcPath := range srcPaths {\n\t\tif err := p.Compile(srcPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (p *PostsCompilerType) FileChanged(srcPath string, ev fsnotify.FileEvent) error {\n\t\/\/ Because of the way we set up the watcher, there are two possible\n\t\/\/ cases here.\n\t\/\/ 1) A template in the post layouts dir was changed. In this case,\n\t\/\/ we would ideally recompile all the posts that used that layout.\n\t\/\/ 2) A markdown file corresponding to a single post was changed. In this\n\t\/\/ case, ideally we only recompile the post that was changed. We need to\n\t\/\/ take into account any rename, delete, or create events and how they\n\t\/\/ affect the output files in destDir.\n\tswitch filepath.Ext(srcPath) {\n\tcase \".md\":\n\t\t\/\/ TODO: Be more intelligent here? If a single post file was midified,\n\t\t\/\/ we can simply recompile that post. We would also need to take into\n\t\t\/\/ account the subtle differences between rename, create, and delete\n\t\t\/\/ events. For now, recompile all posts.\n\t\tif err := recompileAllForCompiler(p); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase \".tmpl\":\n\t\t\/\/ TODO: Analyze post files and be more intelligent here?\n\t\t\/\/ When a post layout changes, only recompile the posts that\n\t\t\/\/ use that layout. For now, recompile all posts.\n\t\tif err := recompileAllForCompiler(p); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\treturn nil\n}\n\nfunc (p *PostsCompilerType) RemoveOld() error {\n\t\/\/ Simply iterate through createdDirs and remove each of them\n\t\/\/ NOTE: this is different from the other compilers because each\n\t\/\/ post gets created as an index.html file inside some directory\n\t\/\/ (for prettier urls). So instead of removing files, we're removing\n\t\/\/ directories.\n\tfor _, dir := range p.createdDirs {\n\t\tif err := util.RemoveAllIfExists(dir); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Posts returns up to limit posts, sorted by date. If limit is 0,\n\/\/ it returns all posts. If limit is greater than len(posts), it returns\n\/\/ all posts.\nfunc Posts(limit ...int) []*Post {\n\t\/\/ Sort the posts by date\n\tsortedPosts := make([]*Post, len(posts))\n\tcopy(sortedPosts, posts)\n\tsort.Sort(PostsByDate(sortedPosts))\n\n\t\/\/ Return up to limit posts\n\tif len(limit) == 0 || limit[0] == 0 || limit[0] > len(sortedPosts) {\n\t\treturn sortedPosts\n\t} else {\n\t\treturn sortedPosts[:limit[0]]\n\t}\n}\n\nfunc createPostFromPath(path string) *Post {\n\t\/\/ create post object\n\tname := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))\n\tp := &Post{\n\t\tUrl: template.URL(\"\/\" + name),\n\t\tsrc: path,\n\t}\n\tposts = append(posts, p)\n\tpostsMap[path] = p\n\treturn p\n}\n\nfunc getPostByPath(path string) *Post {\n\treturn postsMap[path]\n}\n\nfunc getOrCreatePostFromPath(path string) *Post {\n\tif p, found := postsMap[path]; found {\n\t\treturn p\n\t} else {\n\t\treturn createPostFromPath(path)\n\t}\n}\n\n\/\/ parse reads from the source file and sets the content and metadata fields\n\/\/ for the post. It also creates a new template for the post using the layout\n\/\/ field of the frontmatter.\nfunc (p *Post) parse() error {\n\t\/\/ Open the source file\n\tfile, err := os.Open(p.src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr := bufio.NewReader(file)\n\n\t\/\/ Split the file into frontmatter and markdown content\n\tfrontMatter, content, err := util.SplitFrontMatter(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Decode the frontmatter\n\tif _, err := toml.Decode(frontMatter, p); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Parse the markdown content and set p.Content\n\tp.Content = template.HTML(blackfriday.MarkdownCommon([]byte(content)))\n\n\t\/\/ Create a template for the post\n\tif p.Layout == \"\" {\n\t\treturn fmt.Errorf(\"Could not find layout definition in toml frontmatter for post: %s\", p.src)\n\t}\n\n\t\/\/ When we call template.ParseFiles, we want the post layout file to be first,\n\t\/\/ so it is the one that will be executed.\n\tpostLayoutFile := filepath.Join(config.PostLayoutsDir, p.Layout)\n\totherLayoutFiles, err := filepath.Glob(filepath.Join(config.LayoutsDir, \"*.tmpl\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tallFiles := append([]string{postLayoutFile}, otherLayoutFiles...)\n\t\/\/ Parse the includes files if there are any\n\tif config.IncludesDir != \"\" {\n\t\tincludeFiles, err := filepath.Glob(filepath.Join(config.IncludesDir, \"*tmpl\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tallFiles = append(allFiles, includeFiles...)\n\t}\n\ttmpl, err := template.ParseFiles(allFiles...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.template = tmpl\n\n\treturn nil\n}\n\n\/\/ The PostsByDate type is used only for sorting\ntype PostsByDate []*Post\n\nfunc (p PostsByDate) Len() int {\n\treturn len(p)\n}\n\nfunc (p PostsByDate) Less(i, j int) bool {\n\treturn p[i].Date.Before(p[j].Date)\n}\n\nfunc (p PostsByDate) Swap(i, j int) {\n\tp[i], p[j] = p[j], p[i]\n}\n<|endoftext|>"} {"text":"<commit_before>package raft\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/HelloCodeMing\/raft-rocks\/pb\"\n\t\"github.com\/HelloCodeMing\/raft-rocks\/store\"\n\t\"github.com\/HelloCodeMing\/raft-rocks\/utils\"\n\t\"github.com\/golang\/glog\"\n)\n\n\/\/ Hold state of raft, coordinate the updating and reading\ntype raftState struct {\n\tsync.RWMutex\n\tlog *store.LogStorage\n\tpersister store.Persister\n\tapplyCh chan<- *ApplyMsg\n\n\t\/\/ once initialized, they will not be changed\n\tMe int\n\tNumPeers int\n\n\tRole pb.RaftRole\n\tCurrentTerm int32\n\tVotedFor int32\n\n\t\/\/ maintained by leader, record follower's repliate and commit state\n\tMatchIndex []int\n\tNextIndex []int\n\n\t\/\/ in raft paper, these two field is volatile\n\t\/\/ when restart, we need replay all WAL to recover state\n\t\/\/ otherwise, we have to do checkpoint, and record where has beed commited and applied to state machine\n\t\/\/ but when it comes to RocksDB like storage engine, it has its own WAL, so the storage itself is durable\n\t\/\/ but disappointing, even if reading RocksDB's WAL to get LastApplied is possible, it's too tricky\n\t\/\/ An elegant way to avoid 'double WAL' may be stop use WAL in RocksDB, but only in Raft,\n\t\/\/ and in raft we could perioidly do checkpoint\/snapshot, persist LastApplied, as a result, we just need to\n\t\/\/ restore state machine from checkpoint, and replay WAL.\n\t\/\/\n\t\/\/ All above is just my imagination, the real implementation is too naive. When LastApplied is updated, store it in persister.\n\tCommitIndex int \/\/ index of highest log entry known to be committed\n\tLastApplied int \/\/ index of highest log entry applied to state machine\n\n\treadLease time.Time\n}\n\nfunc (s *raftState) getRole() pb.RaftRole {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.Role\n}\n\n\/\/ maybe we could cache this field in Raft, to avoid lock\nfunc (s *raftState) getTerm() int32 {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.CurrentTerm\n}\n\nfunc (s *raftState) getCommited() int {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.CommitIndex\n}\n\nconst (\n\tcurrentTerm = \"currentTerm\"\n\tvotedFor = \"votedFor\"\n\tlastApplied = \"lastApplied\"\n)\n\nfunc (s *raftState) persist() {\n\ts.persister.StoreInt32(currentTerm, s.CurrentTerm)\n\ts.persister.StoreInt32(votedFor, s.VotedFor)\n}\n\nfunc (s *raftState) restore() {\n\tvar ok bool\n\ts.CurrentTerm, ok = s.persister.LoadInt32(currentTerm)\n\ts.VotedFor, ok = s.persister.LoadInt32(votedFor)\n\tif !ok {\n\t\ts.VotedFor = -1\n\t}\n\tapply, ok := s.persister.LoadInt32(lastApplied)\n\tif ok {\n\t\ts.LastApplied = int(apply)\n\t}\n}\n\nfunc (s *raftState) commitUntil(index int) {\n\ts.CommitIndex = index\n}\n\nfunc (s *raftState) applyOne() {\n\ts.LastApplied++\n\ts.persister.StoreInt32(lastApplied, int32(s.LastApplied))\n}\n\nfunc (s *raftState) checkNewTerm(newTerm int32) bool {\n\tif s.getTerm() < newTerm {\n\t\ts.becomeFollower(-1, newTerm)\n\t\treturn true\n\t}\n\treturn false\n}\nfunc (s *raftState) checkFollowerCommit(leaderCommit int) bool {\n\ts.Lock()\n\tdefer s.Unlock()\n\t\/\/ Have uncommited log, but leader has commited them\n\tif s.CommitIndex < leaderCommit && s.CommitIndex < s.log.LastIndex() {\n\t\ts.commitUntil(minInt(leaderCommit, int(s.log.LastIndex())))\n\t\ts.checkApply()\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ any command to apply\nfunc (s *raftState) checkApply() {\n\told := s.LastApplied\n\tlog := s.log\n\tfor s.CommitIndex > s.LastApplied {\n\t\ts.applyOne()\n\t\tif log.LastIndex() < s.LastApplied {\n\t\t\tpanic(s)\n\t\t}\n\t\tentry := log.At(s.LastApplied)\n\t\tif entry == nil {\n\t\t\tbreak\n\t\t}\n\t\tmsg := &ApplyMsg{Command: entry}\n\t\ts.applyCh <- msg\n\t}\n\tif s.LastApplied > old {\n\t\tglog.V(utils.VDebug).Infof(\"%s Applyed commands until index=%d\", s.String(), s.LastApplied)\n\t}\n}\n\nfunc (s *raftState) changeRole(role pb.RaftRole) {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.Role = role\n\n}\n\nfunc (s *raftState) becomeFollowerUnlocked(candidateID int32, term int32) {\n\ts.CurrentTerm = term\n\ts.VotedFor = candidateID\n\ts.Role = pb.RaftRole_Follower\n\ts.persist()\n}\n\nfunc (s *raftState) becomeFollower(candidateID int32, term int32) {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.CurrentTerm = term\n\ts.VotedFor = candidateID\n\ts.Role = pb.RaftRole_Follower\n\ts.MatchIndex = nil\n\ts.NextIndex = nil\n\ts.persist()\n}\n\nfunc (s *raftState) becomeCandidate() int32 {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.Role = pb.RaftRole_Candidate\n\ts.CurrentTerm++\n\ts.VotedFor = int32(s.Me)\n\ts.MatchIndex = nil\n\ts.NextIndex = nil\n\ts.persist()\n\treturn s.CurrentTerm\n}\n\nfunc (s *raftState) becomeLeader() {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.Role = pb.RaftRole_Leader\n\ts.MatchIndex = make([]int, s.NumPeers)\n\ts.NextIndex = make([]int, s.NumPeers)\n\tlastIndex := s.log.LastIndex()\n\tfor i := range s.NextIndex {\n\t\ts.NextIndex[i] = lastIndex + 1\n\t}\n}\n\nfunc (s *raftState) replicatedToPeer(peer int, index int) {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.MatchIndex[peer] = maxInt(index, s.MatchIndex[peer])\n\ts.NextIndex[peer] = s.MatchIndex[peer] + 1\n\tif s.checkLeaderCommit() {\n\t\ts.checkApply()\n\t}\n}\n\n\/\/ update commit index, use matchIndex from peers\n\/\/ NOTE: It's not thread-safe, should be synchronized by external lock\nfunc (s *raftState) checkLeaderCommit() (updated bool) {\n\tlowerIndex := s.CommitIndex + 1\n\tupperIndex := 0\n\t\/\/ find max matchIndex\n\tfor _, x := range s.MatchIndex {\n\t\tif x > upperIndex {\n\t\t\tupperIndex = x\n\t\t}\n\t}\n\t\/\/ if N > commitIndex, a majority of match[i] >= N, and log[N].term == currentTerm\n\t\/\/ set commitIndex = N\n\tfor N := upperIndex; N >= lowerIndex && s.log.At(N).Term == s.CurrentTerm; N-- {\n\t\t\/\/ count match[i] >= N\n\t\tcnt := 1\n\t\tfor i, x := range s.MatchIndex {\n\t\t\tif i != s.Me && x >= N {\n\t\t\t\tcnt++\n\t\t\t}\n\t\t}\n\t\tif cnt >= s.majority() {\n\t\t\ts.commitUntil(N)\n\t\t\tglog.V(utils.VDebug).Infof(\"%s Leader update commitIndex: %d\", s.String(), s.CommitIndex)\n\t\t\tupdated = true\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}\n\nfunc (s *raftState) retreatForPeer(peer int) int {\n\ts.Lock()\n\tdefer s.Unlock()\n\tidx := &s.NextIndex[peer]\n\tlog := s.log\n\t*idx = minInt(*idx, log.LastIndex())\n\toldTerm := log.At(*idx).Term\n\tfor *idx > 0 && log.At(*idx).Term == oldTerm {\n\t\t*idx--\n\t}\n\t*idx = maxInt(*idx, 1)\n\treturn *idx\n}\n\nfunc (s *raftState) toReplicate(peer int) int {\n\treturn s.NextIndex[peer]\n}\n\n\/\/ If lease is granted in this term, and later than the old one, extend the lease\nfunc (s *raftState) updateReadLease(term int32, lease time.Time) {\n\tif s.readLease.Before(time.Now()) && term == s.CurrentTerm && lease.After(s.readLease) {\n\t\ts.readLease = lease\n\t\tglog.Infof(\"%s Update read lease to %v\", s.String(), lease)\n\t}\n}\n\nfunc (s *raftState) String() string {\n\treturn fmt.Sprintf(\"Raft<%d:%d>\", s.Me, s.CurrentTerm)\n}\n\nfunc (s *raftState) dump(writer io.Writer) {\n\tstate := map[string]interface{}{\n\t\t\"meta\": s,\n\t\t\"LogLastIndex\": s.log.LastIndex(),\n\t}\n\tbuf, err := json.MarshalIndent(state, \"\", \"\\t\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\twriter.Write(buf)\n}\n\nfunc (s *raftState) majority() int {\n\treturn s.NumPeers\/2 + 1\n}\n\nfunc makeRaftState(log *store.LogStorage, persister store.Persister, applyCh chan<- *ApplyMsg, numPeers int, me int) *raftState {\n\ts := &raftState{\n\t\tlog: log,\n\t\tapplyCh: applyCh,\n\t\tNumPeers: numPeers,\n\t\tMe: me,\n\t\tCurrentTerm: 0,\n\t\tVotedFor: -1,\n\t\tRole: pb.RaftRole_Follower,\n\t\treadLease: time.Now(),\n\t\tpersister: persister,\n\t}\n\ts.restore()\n\tglog.Infof(\"Restore raft state: CurrentTerm: %d, CommitIndex: %d, LastApplied: %d\", s.CurrentTerm, s.CommitIndex, s.LastApplied)\n\treturn s\n}\n<commit_msg>optimize leader commit<commit_after>package raft\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/HelloCodeMing\/raft-rocks\/pb\"\n\t\"github.com\/HelloCodeMing\/raft-rocks\/store\"\n\t\"github.com\/HelloCodeMing\/raft-rocks\/utils\"\n\t\"github.com\/golang\/glog\"\n)\n\n\/\/ Hold state of raft, coordinate the updating and reading\ntype raftState struct {\n\tsync.RWMutex\n\tlog *store.LogStorage\n\tpersister store.Persister\n\tapplyCh chan<- *ApplyMsg\n\n\t\/\/ once initialized, they will not be changed\n\tMe int\n\tNumPeers int\n\n\tRole pb.RaftRole\n\tCurrentTerm int32\n\tVotedFor int32\n\n\t\/\/ maintained by leader, record follower's repliate and commit state\n\tMatchIndex []int\n\tNextIndex []int\n\n\t\/\/ in raft paper, these two field is volatile\n\t\/\/ when restart, we need replay all WAL to recover state\n\t\/\/ otherwise, we have to do checkpoint, and record where has beed commited and applied to state machine\n\t\/\/ but when it comes to RocksDB like storage engine, it has its own WAL, so the storage itself is durable\n\t\/\/ but disappointing, even if reading RocksDB's WAL to get LastApplied is possible, it's too tricky\n\t\/\/ An elegant way to avoid 'double WAL' may be stop use WAL in RocksDB, but only in Raft,\n\t\/\/ and in raft we could perioidly do checkpoint\/snapshot, persist LastApplied, as a result, we just need to\n\t\/\/ restore state machine from checkpoint, and replay WAL.\n\t\/\/\n\t\/\/ All above is just my imagination, the real implementation is too naive. When LastApplied is updated, store it in persister.\n\tCommitIndex int \/\/ index of highest log entry known to be committed\n\tLastApplied int \/\/ index of highest log entry applied to state machine\n\n\treadLease time.Time\n}\n\nfunc (s *raftState) getRole() pb.RaftRole {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.Role\n}\n\n\/\/ maybe we could cache this field in Raft, to avoid lock\nfunc (s *raftState) getTerm() int32 {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.CurrentTerm\n}\n\nfunc (s *raftState) getCommited() int {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.CommitIndex\n}\n\nconst (\n\tcurrentTerm = \"currentTerm\"\n\tvotedFor = \"votedFor\"\n\tlastApplied = \"lastApplied\"\n)\n\nfunc (s *raftState) persist() {\n\ts.persister.StoreInt32(currentTerm, s.CurrentTerm)\n\ts.persister.StoreInt32(votedFor, s.VotedFor)\n}\n\nfunc (s *raftState) restore() {\n\tvar ok bool\n\ts.CurrentTerm, ok = s.persister.LoadInt32(currentTerm)\n\ts.VotedFor, ok = s.persister.LoadInt32(votedFor)\n\tif !ok {\n\t\ts.VotedFor = -1\n\t}\n\tapply, ok := s.persister.LoadInt32(lastApplied)\n\tif ok {\n\t\ts.LastApplied = int(apply)\n\t}\n}\n\nfunc (s *raftState) commitUntil(index int) {\n\ts.CommitIndex = index\n}\n\nfunc (s *raftState) applyOne() {\n\ts.LastApplied++\n\ts.persister.StoreInt32(lastApplied, int32(s.LastApplied))\n}\n\nfunc (s *raftState) checkNewTerm(newTerm int32) bool {\n\tif s.getTerm() < newTerm {\n\t\ts.becomeFollower(-1, newTerm)\n\t\treturn true\n\t}\n\treturn false\n}\nfunc (s *raftState) checkFollowerCommit(leaderCommit int) bool {\n\ts.Lock()\n\tdefer s.Unlock()\n\t\/\/ Have uncommited log, but leader has commited them\n\tif s.CommitIndex < leaderCommit && s.CommitIndex < s.log.LastIndex() {\n\t\ts.commitUntil(minInt(leaderCommit, int(s.log.LastIndex())))\n\t\ts.checkApply()\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ any command to apply\nfunc (s *raftState) checkApply() {\n\told := s.LastApplied\n\tlog := s.log\n\tfor s.CommitIndex > s.LastApplied {\n\t\ts.applyOne()\n\t\tif log.LastIndex() < s.LastApplied {\n\t\t\tpanic(s)\n\t\t}\n\t\tentry := log.At(s.LastApplied)\n\t\tif entry == nil {\n\t\t\tbreak\n\t\t}\n\t\tmsg := &ApplyMsg{Command: entry}\n\t\ts.applyCh <- msg\n\t}\n\tif s.LastApplied > old {\n\t\tglog.V(utils.VDebug).Infof(\"%s Applyed commands until index=%d\", s.String(), s.LastApplied)\n\t}\n}\n\nfunc (s *raftState) changeRole(role pb.RaftRole) {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.Role = role\n\n}\n\nfunc (s *raftState) becomeFollowerUnlocked(candidateID int32, term int32) {\n\ts.CurrentTerm = term\n\ts.VotedFor = candidateID\n\ts.Role = pb.RaftRole_Follower\n\ts.persist()\n}\n\nfunc (s *raftState) becomeFollower(candidateID int32, term int32) {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.CurrentTerm = term\n\ts.VotedFor = candidateID\n\ts.Role = pb.RaftRole_Follower\n\ts.MatchIndex = nil\n\ts.NextIndex = nil\n\ts.persist()\n}\n\nfunc (s *raftState) becomeCandidate() int32 {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.Role = pb.RaftRole_Candidate\n\ts.CurrentTerm++\n\ts.VotedFor = int32(s.Me)\n\ts.MatchIndex = nil\n\ts.NextIndex = nil\n\ts.persist()\n\treturn s.CurrentTerm\n}\n\nfunc (s *raftState) becomeLeader() {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.Role = pb.RaftRole_Leader\n\ts.MatchIndex = make([]int, s.NumPeers)\n\ts.NextIndex = make([]int, s.NumPeers)\n\tlastIndex := s.log.LastIndex()\n\tfor i := range s.NextIndex {\n\t\ts.NextIndex[i] = lastIndex + 1\n\t}\n}\n\nfunc (s *raftState) replicatedToPeer(peer int, index int) {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.MatchIndex[peer] = maxInt(index, s.MatchIndex[peer])\n\ts.NextIndex[peer] = s.MatchIndex[peer] + 1\n\tif s.checkLeaderCommit() {\n\t\ts.checkApply()\n\t}\n}\n\n\/\/ update commit index, use matchIndex from peers\n\/\/ NOTE: It's not thread-safe, should be synchronized by external lock\nfunc (s *raftState) checkLeaderCommit() bool {\n\tmatches := make([]int, 0, len(s.MatchIndex))\n\tfor _, x := range s.MatchIndex {\n\t\tmatches = append(matches, x)\n\t}\n\tsort.Sort(sort.Reverse(sort.IntSlice(matches)))\n\tnewC := matches[s.majority()-1]\n\tif newC > s.CommitIndex {\n\t\ts.commitUntil(newC)\n\t\tglog.V(utils.VDebug).Infof(\"%s Leader update commitIndex: %d\", s.String(), newC)\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (s *raftState) retreatForPeer(peer int) int {\n\ts.Lock()\n\tdefer s.Unlock()\n\tidx := &s.NextIndex[peer]\n\tlog := s.log\n\t*idx = minInt(*idx, log.LastIndex())\n\toldTerm := log.At(*idx).Term\n\tfor *idx > 0 && log.At(*idx).Term == oldTerm {\n\t\t*idx--\n\t}\n\t*idx = maxInt(*idx, 1)\n\treturn *idx\n}\n\nfunc (s *raftState) toReplicate(peer int) int {\n\treturn s.NextIndex[peer]\n}\n\n\/\/ If lease is granted in this term, and later than the old one, extend the lease\nfunc (s *raftState) updateReadLease(term int32, lease time.Time) {\n\tif s.readLease.Before(time.Now()) && term == s.CurrentTerm && lease.After(s.readLease) {\n\t\ts.readLease = lease\n\t\tglog.Infof(\"%s Update read lease to %v\", s.String(), lease)\n\t}\n}\n\nfunc (s *raftState) String() string {\n\treturn fmt.Sprintf(\"Raft<%d:%d>\", s.Me, s.CurrentTerm)\n}\n\nfunc (s *raftState) dump(writer io.Writer) {\n\tstate := map[string]interface{}{\n\t\t\"meta\": s,\n\t\t\"LogLastIndex\": s.log.LastIndex(),\n\t}\n\tbuf, err := json.MarshalIndent(state, \"\", \"\\t\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\twriter.Write(buf)\n}\n\nfunc (s *raftState) majority() int {\n\treturn s.NumPeers\/2 + 1\n}\n\nfunc makeRaftState(log *store.LogStorage, persister store.Persister, applyCh chan<- *ApplyMsg, numPeers int, me int) *raftState {\n\ts := &raftState{\n\t\tlog: log,\n\t\tapplyCh: applyCh,\n\t\tNumPeers: numPeers,\n\t\tMe: me,\n\t\tCurrentTerm: 0,\n\t\tVotedFor: -1,\n\t\tRole: pb.RaftRole_Follower,\n\t\treadLease: time.Now(),\n\t\tpersister: persister,\n\t}\n\ts.restore()\n\tglog.Infof(\"Restore raft state: CurrentTerm: %d, CommitIndex: %d, LastApplied: %d\", s.CurrentTerm, s.CommitIndex, s.LastApplied)\n\treturn s\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2021 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ To run these tests, set GCLOUD_TESTS_GOLANG_PROJECT_ID env var to your GCP projectID\n\npackage compute\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"cloud.google.com\/go\/internal\/testutil\"\n\t\"cloud.google.com\/go\/internal\/uid\"\n\t\"google.golang.org\/api\/iterator\"\n\tcomputepb \"google.golang.org\/genproto\/googleapis\/cloud\/compute\/v1\"\n\t\"google.golang.org\/protobuf\/proto\"\n)\n\nvar projectId = testutil.ProjID()\nvar defaultZone = \"us-central1-a\"\n\nfunc TestCreateGetListInstance(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping smoke test in short mode\")\n\t}\n\tspace := uid.NewSpace(\"gogapic\", nil)\n\tname := space.New()\n\tctx := context.Background()\n\tc, err := NewInstancesRESTClient(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tzonesClient, err := NewZoneOperationsRESTClient(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcreateRequest := &computepb.InsertInstanceRequest{\n\t\tProject: projectId,\n\t\tZone: defaultZone,\n\t\tInstanceResource: &computepb.Instance{\n\t\t\tName: &name,\n\t\t\tDescription: proto.String(\"тест\"),\n\t\t\tMachineType: proto.String(fmt.Sprintf(\"https:\/\/www.googleapis.com\/compute\/v1\/projects\/%s\/zones\/%s\/machineTypes\/n1-standard-1\", projectId, defaultZone)),\n\t\t\tDisks: []*computepb.AttachedDisk{\n\t\t\t\t{\n\t\t\t\t\tAutoDelete: proto.Bool(true),\n\t\t\t\t\tBoot: proto.Bool(true),\n\t\t\t\t\tType: computepb.AttachedDisk_PERSISTENT.Enum(),\n\t\t\t\t\tInitializeParams: &computepb.AttachedDiskInitializeParams{\n\t\t\t\t\t\tSourceImage: proto.String(\"projects\/debian-cloud\/global\/images\/family\/debian-10\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tNetworkInterfaces: []*computepb.NetworkInterface{\n\t\t\t\t{\n\t\t\t\t\tAccessConfigs: []*computepb.AccessConfig{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: proto.String(\"default\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tinsert, err := c.Insert(ctx, createRequest)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twaitZonalRequest := &computepb.WaitZoneOperationRequest{\n\t\tProject: projectId,\n\t\tZone: defaultZone,\n\t\tOperation: insert.Proto().GetName(),\n\t}\n\t_, err = zonesClient.Wait(ctx, waitZonalRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer ForceDeleteInstance(ctx, name, c)\n\n\tgetRequest := &computepb.GetInstanceRequest{\n\t\tProject: projectId,\n\t\tZone: defaultZone,\n\t\tInstance: name,\n\t}\n\tget, err := c.Get(ctx, getRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif get.GetName() != name {\n\t\tt.Fatal(fmt.Sprintf(\"expected instance name: %s, got: %s\", name, get.GetName()))\n\t}\n\tif get.GetDescription() != \"тест\" {\n\t\tt.Fatal(fmt.Sprintf(\"expected instance description: %s, got: %s\", \"тест\", get.GetDescription()))\n\t}\n\tlistRequest := &computepb.ListInstancesRequest{\n\t\tProject: projectId,\n\t\tZone: defaultZone,\n\t}\n\n\titr := c.List(ctx, listRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tfound := false\n\telement, err := itr.Next()\n\tfor err == nil {\n\t\tif element.GetName() == name {\n\t\t\tfound = true\n\t\t}\n\t\telement, err = itr.Next()\n\t}\n\tif err != nil && err != iterator.Done {\n\t\tt.Fatal(err)\n\t}\n\tif !found {\n\t\tt.Error(\"Couldn't find the instance in list response\")\n\t}\n\n\tdeleteInstanceRequest := &computepb.DeleteInstanceRequest{\n\t\tProject: projectId,\n\t\tZone: defaultZone,\n\t\tInstance: name,\n\t}\n\t_, err = c.Delete(ctx, deleteInstanceRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc ForceDeleteInstance(ctx context.Context, name string, client *InstancesClient) {\n\tdeleteInstanceRequest := &computepb.DeleteInstanceRequest{\n\t\tProject: projectId,\n\t\tZone: defaultZone,\n\t\tInstance: name,\n\t}\n\tclient.Delete(ctx, deleteInstanceRequest)\n}\n\nfunc TestCreateGetRemoveSecurityPolicies(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping smoke test in short mode\")\n\t}\n\tspace := uid.NewSpace(\"gogapic\", nil)\n\tname := space.New()\n\tctx := context.Background()\n\tc, err := NewSecurityPoliciesRESTClient(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tglobalCLient, err := NewGlobalOperationsRESTClient(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\taction := \"allow\"\n\tmatcher := &computepb.SecurityPolicyRuleMatcher{\n\t\tConfig: &computepb.SecurityPolicyRuleMatcherConfig{\n\t\t\tSrcIpRanges: []string{\n\t\t\t\t\"*\",\n\t\t\t},\n\t\t},\n\t\tVersionedExpr: computepb.SecurityPolicyRuleMatcher_SRC_IPS_V1.Enum(),\n\t}\n\tsecurityPolicyRule := &computepb.SecurityPolicyRule{\n\t\tAction: &action,\n\t\tPriority: proto.Int32(0),\n\t\tDescription: proto.String(\"test rule\"),\n\t\tMatch: matcher,\n\t}\n\tsecurityPolicyRuleDefault := &computepb.SecurityPolicyRule{\n\t\tAction: &action,\n\t\tPriority: proto.Int32(2147483647),\n\t\tDescription: proto.String(\"default rule\"),\n\t\tMatch: matcher,\n\t}\n\tinsertRequest := &computepb.InsertSecurityPolicyRequest{\n\t\tProject: projectId,\n\t\tSecurityPolicyResource: &computepb.SecurityPolicy{\n\t\t\tName: &name,\n\t\t\tRules: []*computepb.SecurityPolicyRule{\n\t\t\t\tsecurityPolicyRule,\n\t\t\t\tsecurityPolicyRuleDefault,\n\t\t\t},\n\t\t},\n\t}\n\tinsert, err := c.Insert(ctx, insertRequest)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twaitGlobalRequest := &computepb.WaitGlobalOperationRequest{\n\t\tProject: projectId,\n\t\tOperation: insert.Proto().GetName(),\n\t}\n\t_, err = globalCLient.Wait(ctx, waitGlobalRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer ForceDeleteSecurityPolicy(ctx, name, c)\n\n\tremoveRuleRequest := &computepb.RemoveRuleSecurityPolicyRequest{\n\t\tPriority: proto.Int32(0),\n\t\tProject: projectId,\n\t\tSecurityPolicy: name,\n\t}\n\n\trule, err := c.RemoveRule(ctx, removeRuleRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\twaitGlobalRequestRemove := &computepb.WaitGlobalOperationRequest{\n\t\tProject: projectId,\n\t\tOperation: rule.Proto().GetName(),\n\t}\n\t_, err = globalCLient.Wait(ctx, waitGlobalRequestRemove)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tgetRequest := &computepb.GetSecurityPolicyRequest{\n\t\tProject: projectId,\n\t\tSecurityPolicy: name,\n\t}\n\tget, err := c.Get(ctx, getRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(get.GetRules()) != 1 {\n\t\tt.Fatal(fmt.Sprintf(\"expected count for rules: %d, got: %d\", 1, len(get.GetRules())))\n\t}\n\n\tdeleteRequest := &computepb.DeleteSecurityPolicyRequest{\n\t\tProject: projectId,\n\t\tSecurityPolicy: name,\n\t}\n\t_, err = c.Delete(ctx, deleteRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc ForceDeleteSecurityPolicy(ctx context.Context, name string, client *SecurityPoliciesClient) {\n\tdeleteRequest := &computepb.DeleteSecurityPolicyRequest{\n\t\tProject: projectId,\n\t\tSecurityPolicy: name,\n\t}\n\tclient.Delete(ctx, deleteRequest)\n}\n\nfunc TestPaginationWithMaxRes(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping smoke test in short mode\")\n\t}\n\tctx := context.Background()\n\tc, err := NewAcceleratorTypesRESTClient(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treq := &computepb.ListAcceleratorTypesRequest{\n\t\tProject: projectId,\n\t\tZone: defaultZone,\n\t\tMaxResults: proto.Uint32(1),\n\t}\n\titr := c.List(ctx, req)\n\n\tfound := false\n\telement, err := itr.Next()\n\tfor err == nil {\n\t\tif element.GetName() == \"nvidia-tesla-t4\" {\n\t\t\tfound = true\n\t\t}\n\t\telement, err = itr.Next()\n\t}\n\tif err != iterator.Done {\n\t\tt.Fatal(err)\n\t}\n\tif !found {\n\t\tt.Error(\"Couldn't find the accelerator in the response\")\n\t}\n}\n\nfunc TestPaginationDefault(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping smoke test in short mode\")\n\t}\n\tctx := context.Background()\n\tc, err := NewAcceleratorTypesRESTClient(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treq := &computepb.ListAcceleratorTypesRequest{\n\t\tProject: projectId,\n\t\tZone: defaultZone,\n\t}\n\titr := c.List(ctx, req)\n\n\tfound := false\n\telement, err := itr.Next()\n\tfor err == nil {\n\t\tif element.GetName() == \"nvidia-tesla-t4\" {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t\telement, err = itr.Next()\n\t}\n\tif err != iterator.Done {\n\t\tt.Fatal(err)\n\t}\n\tif !found {\n\t\tt.Error(\"Couldn't find the accelerator in the response\")\n\t}\n}\n\nfunc TestPaginationMapResponse(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping smoke test in short mode\")\n\t}\n\tctx := context.Background()\n\tc, err := NewAcceleratorTypesRESTClient(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treq := &computepb.AggregatedListAcceleratorTypesRequest{\n\t\tProject: projectId,\n\t}\n\titr := c.AggregatedList(ctx, req)\n\n\tfound := false\n\telement, err := itr.Next()\n\tfor err == nil {\n\t\tif element.Key == \"zones\/us-central1-a\" {\n\t\t\ttypes := element.Value.GetAcceleratorTypes()\n\t\t\tfor _, item := range types {\n\t\t\t\tif item.GetName() == \"nvidia-tesla-t4\" {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telement, err = itr.Next()\n\t}\n\tif err != iterator.Done {\n\t\tt.Fatal(err)\n\t}\n\tif !found {\n\t\tt.Error(\"Couldn't find the accelerator in the response\")\n\t}\n}\n\nfunc TestPaginationMapResponseMaxRes(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping smoke test in short mode\")\n\t}\n\tctx := context.Background()\n\tc, err := NewAcceleratorTypesRESTClient(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treq := &computepb.AggregatedListAcceleratorTypesRequest{\n\t\tProject: projectId,\n\t\tMaxResults: proto.Uint32(10),\n\t}\n\titr := c.AggregatedList(ctx, req)\n\tfound := false\n\telement, err := itr.Next()\n\tfor err == nil {\n\t\tif element.Key == \"zones\/us-central1-a\" {\n\t\t\ttypes := element.Value.GetAcceleratorTypes()\n\t\t\tfor _, item := range types {\n\t\t\t\tif item.GetName() == \"nvidia-tesla-t4\" {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telement, err = itr.Next()\n\t}\n\tif err != iterator.Done {\n\t\tt.Fatal(err)\n\t}\n\tif !found {\n\t\tt.Error(\"Couldn't find the accelerator in the response\")\n\t}\n}\n<commit_msg>tests(compute): fix failing test (#4549)<commit_after>\/\/ Copyright 2021 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ To run these tests, set GCLOUD_TESTS_GOLANG_PROJECT_ID env var to your GCP projectID\n\npackage compute\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"cloud.google.com\/go\/internal\/testutil\"\n\t\"cloud.google.com\/go\/internal\/uid\"\n\t\"google.golang.org\/api\/iterator\"\n\tcomputepb \"google.golang.org\/genproto\/googleapis\/cloud\/compute\/v1\"\n\t\"google.golang.org\/protobuf\/proto\"\n)\n\nvar projectId = testutil.ProjID()\nvar defaultZone = \"us-central1-a\"\n\nfunc TestCreateGetListInstance(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping smoke test in short mode\")\n\t}\n\tspace := uid.NewSpace(\"gogapic\", nil)\n\tname := space.New()\n\tctx := context.Background()\n\tc, err := NewInstancesRESTClient(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tzonesClient, err := NewZoneOperationsRESTClient(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcreateRequest := &computepb.InsertInstanceRequest{\n\t\tProject: projectId,\n\t\tZone: defaultZone,\n\t\tInstanceResource: &computepb.Instance{\n\t\t\tName: &name,\n\t\t\tDescription: proto.String(\"тест\"),\n\t\t\tMachineType: proto.String(fmt.Sprintf(\"https:\/\/www.googleapis.com\/compute\/v1\/projects\/%s\/zones\/%s\/machineTypes\/n1-standard-1\", projectId, defaultZone)),\n\t\t\tDisks: []*computepb.AttachedDisk{\n\t\t\t\t{\n\t\t\t\t\tAutoDelete: proto.Bool(true),\n\t\t\t\t\tBoot: proto.Bool(true),\n\t\t\t\t\tType: computepb.AttachedDisk_PERSISTENT.Enum(),\n\t\t\t\t\tInitializeParams: &computepb.AttachedDiskInitializeParams{\n\t\t\t\t\t\tSourceImage: proto.String(\"projects\/debian-cloud\/global\/images\/family\/debian-10\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tNetworkInterfaces: []*computepb.NetworkInterface{\n\t\t\t\t{\n\t\t\t\t\tAccessConfigs: []*computepb.AccessConfig{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: proto.String(\"default\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tinsert, err := c.Insert(ctx, createRequest)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twaitZonalRequest := &computepb.WaitZoneOperationRequest{\n\t\tProject: projectId,\n\t\tZone: defaultZone,\n\t\tOperation: insert.Proto().GetName(),\n\t}\n\t_, err = zonesClient.Wait(ctx, waitZonalRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer ForceDeleteInstance(ctx, name, c)\n\n\tgetRequest := &computepb.GetInstanceRequest{\n\t\tProject: projectId,\n\t\tZone: defaultZone,\n\t\tInstance: name,\n\t}\n\tget, err := c.Get(ctx, getRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif get.GetName() != name {\n\t\tt.Fatal(fmt.Sprintf(\"expected instance name: %s, got: %s\", name, get.GetName()))\n\t}\n\tif get.GetDescription() != \"тест\" {\n\t\tt.Fatal(fmt.Sprintf(\"expected instance description: %s, got: %s\", \"тест\", get.GetDescription()))\n\t}\n\tlistRequest := &computepb.ListInstancesRequest{\n\t\tProject: projectId,\n\t\tZone: defaultZone,\n\t}\n\n\titr := c.List(ctx, listRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tfound := false\n\telement, err := itr.Next()\n\tfor err == nil {\n\t\tif element.GetName() == name {\n\t\t\tfound = true\n\t\t}\n\t\telement, err = itr.Next()\n\t}\n\tif err != nil && err != iterator.Done {\n\t\tt.Fatal(err)\n\t}\n\tif !found {\n\t\tt.Error(\"Couldn't find the instance in list response\")\n\t}\n\n\tdeleteInstanceRequest := &computepb.DeleteInstanceRequest{\n\t\tProject: projectId,\n\t\tZone: defaultZone,\n\t\tInstance: name,\n\t}\n\t_, err = c.Delete(ctx, deleteInstanceRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc ForceDeleteInstance(ctx context.Context, name string, client *InstancesClient) {\n\tdeleteInstanceRequest := &computepb.DeleteInstanceRequest{\n\t\tProject: projectId,\n\t\tZone: defaultZone,\n\t\tInstance: name,\n\t}\n\tclient.Delete(ctx, deleteInstanceRequest)\n}\n\nfunc TestCreateGetRemoveSecurityPolicies(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping smoke test in short mode\")\n\t}\n\tspace := uid.NewSpace(\"gogapic\", nil)\n\tname := space.New()\n\tctx := context.Background()\n\tc, err := NewSecurityPoliciesRESTClient(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tglobalCLient, err := NewGlobalOperationsRESTClient(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\taction := \"allow\"\n\tmatcher := &computepb.SecurityPolicyRuleMatcher{\n\t\tConfig: &computepb.SecurityPolicyRuleMatcherConfig{\n\t\t\tSrcIpRanges: []string{\n\t\t\t\t\"*\",\n\t\t\t},\n\t\t},\n\t\tVersionedExpr: computepb.SecurityPolicyRuleMatcher_SRC_IPS_V1.Enum(),\n\t}\n\tsecurityPolicyRule := &computepb.SecurityPolicyRule{\n\t\tAction: &action,\n\t\tPriority: proto.Int32(0),\n\t\tDescription: proto.String(\"test rule\"),\n\t\tMatch: matcher,\n\t}\n\tsecurityPolicyRuleDefault := &computepb.SecurityPolicyRule{\n\t\tAction: &action,\n\t\tPriority: proto.Int32(2147483647),\n\t\tDescription: proto.String(\"default rule\"),\n\t\tMatch: matcher,\n\t}\n\tinsertRequest := &computepb.InsertSecurityPolicyRequest{\n\t\tProject: projectId,\n\t\tSecurityPolicyResource: &computepb.SecurityPolicy{\n\t\t\tName: &name,\n\t\t\tRules: []*computepb.SecurityPolicyRule{\n\t\t\t\tsecurityPolicyRule,\n\t\t\t\tsecurityPolicyRuleDefault,\n\t\t\t},\n\t\t},\n\t}\n\tinsert, err := c.Insert(ctx, insertRequest)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twaitGlobalRequest := &computepb.WaitGlobalOperationRequest{\n\t\tProject: projectId,\n\t\tOperation: insert.Proto().GetName(),\n\t}\n\t_, err = globalCLient.Wait(ctx, waitGlobalRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer ForceDeleteSecurityPolicy(ctx, name, c)\n\n\tremoveRuleRequest := &computepb.RemoveRuleSecurityPolicyRequest{\n\t\tPriority: proto.Int32(0),\n\t\tProject: projectId,\n\t\tSecurityPolicy: name,\n\t}\n\n\trule, err := c.RemoveRule(ctx, removeRuleRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\twaitGlobalRequestRemove := &computepb.WaitGlobalOperationRequest{\n\t\tProject: projectId,\n\t\tOperation: rule.Proto().GetName(),\n\t}\n\t_, err = globalCLient.Wait(ctx, waitGlobalRequestRemove)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tgetRequest := &computepb.GetSecurityPolicyRequest{\n\t\tProject: projectId,\n\t\tSecurityPolicy: name,\n\t}\n\tget, err := c.Get(ctx, getRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(get.GetRules()) != 1 {\n\t\tt.Fatal(fmt.Sprintf(\"expected count for rules: %d, got: %d\", 1, len(get.GetRules())))\n\t}\n\n\tdeleteRequest := &computepb.DeleteSecurityPolicyRequest{\n\t\tProject: projectId,\n\t\tSecurityPolicy: name,\n\t}\n\t_, err = c.Delete(ctx, deleteRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc ForceDeleteSecurityPolicy(ctx context.Context, name string, client *SecurityPoliciesClient) {\n\tdeleteRequest := &computepb.DeleteSecurityPolicyRequest{\n\t\tProject: projectId,\n\t\tSecurityPolicy: name,\n\t}\n\tclient.Delete(ctx, deleteRequest)\n}\n\nfunc TestPaginationWithMaxRes(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping smoke test in short mode\")\n\t}\n\tctx := context.Background()\n\tc, err := NewAcceleratorTypesRESTClient(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treq := &computepb.ListAcceleratorTypesRequest{\n\t\tProject: projectId,\n\t\tZone: defaultZone,\n\t\tMaxResults: proto.Uint32(1),\n\t}\n\titr := c.List(ctx, req)\n\n\tfound := false\n\telement, err := itr.Next()\n\tfor err == nil {\n\t\tif element.GetName() == \"nvidia-tesla-t4\" {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t\telement, err = itr.Next()\n\t}\n\tif err != nil && err != iterator.Done {\n\t\tt.Fatal(err)\n\t}\n\tif !found {\n\t\tt.Error(\"Couldn't find the accelerator in the response\")\n\t}\n}\n\nfunc TestPaginationDefault(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping smoke test in short mode\")\n\t}\n\tctx := context.Background()\n\tc, err := NewAcceleratorTypesRESTClient(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treq := &computepb.ListAcceleratorTypesRequest{\n\t\tProject: projectId,\n\t\tZone: defaultZone,\n\t}\n\titr := c.List(ctx, req)\n\n\tfound := false\n\telement, err := itr.Next()\n\tfor err == nil {\n\t\tif element.GetName() == \"nvidia-tesla-t4\" {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t\telement, err = itr.Next()\n\t}\n\tif err != nil && err != iterator.Done {\n\t\tt.Fatal(err)\n\t}\n\tif !found {\n\t\tt.Error(\"Couldn't find the accelerator in the response\")\n\t}\n}\n\nfunc TestPaginationMapResponse(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping smoke test in short mode\")\n\t}\n\tctx := context.Background()\n\tc, err := NewAcceleratorTypesRESTClient(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treq := &computepb.AggregatedListAcceleratorTypesRequest{\n\t\tProject: projectId,\n\t}\n\titr := c.AggregatedList(ctx, req)\n\n\tfound := false\n\telement, err := itr.Next()\n\tfor err == nil {\n\t\tif element.Key == \"zones\/us-central1-a\" {\n\t\t\ttypes := element.Value.GetAcceleratorTypes()\n\t\t\tfor _, item := range types {\n\t\t\t\tif item.GetName() == \"nvidia-tesla-t4\" {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telement, err = itr.Next()\n\t}\n\tif err != iterator.Done {\n\t\tt.Fatal(err)\n\t}\n\tif !found {\n\t\tt.Error(\"Couldn't find the accelerator in the response\")\n\t}\n}\n\nfunc TestPaginationMapResponseMaxRes(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping smoke test in short mode\")\n\t}\n\tctx := context.Background()\n\tc, err := NewAcceleratorTypesRESTClient(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treq := &computepb.AggregatedListAcceleratorTypesRequest{\n\t\tProject: projectId,\n\t\tMaxResults: proto.Uint32(10),\n\t}\n\titr := c.AggregatedList(ctx, req)\n\tfound := false\n\telement, err := itr.Next()\n\tfor err == nil {\n\t\tif element.Key == \"zones\/us-central1-a\" {\n\t\t\ttypes := element.Value.GetAcceleratorTypes()\n\t\t\tfor _, item := range types {\n\t\t\t\tif item.GetName() == \"nvidia-tesla-t4\" {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telement, err = itr.Next()\n\t}\n\tif err != iterator.Done {\n\t\tt.Fatal(err)\n\t}\n\tif !found {\n\t\tt.Error(\"Couldn't find the accelerator in the response\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ The term package implements rendering HTML content for the terminal.\npackage term\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"runtime\"\n\t\"unicode\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"golang.org\/x\/net\/html\"\n)\n\n\/\/ Renderer implements a rendering HTML content in a stripped down format\n\/\/ suitable for the terminal.\ntype Renderer struct {\n\t\/\/ Width sets the assumed width of the terminal, which aids in wrapping\n\t\/\/ text. If == 0, then the width is treated as 80. If < 0, then the width is\n\t\/\/ treated as unbounded.\n\tWidth int\n\t\/\/ TabSize indicates the number of characters that the terminal uses to\n\t\/\/ render a tab. If <= 0, then tabs are assumed to have a size of 8.\n\tTabSize int\n}\n\nvar (\n\tstop = errors.New(\"stop\") \/\/ Stop walking without erroring.\n\tskipChildren = errors.New(\"skip children\") \/\/ Skip all child nodes.\n\tskipText = errors.New(\"skip text\") \/\/ Skip child text nodes.\n)\n\ntype walker func(node *html.Node, entering bool) error\n\nfunc walk(node *html.Node, cb walker) error {\n\terr := cb(node, true)\n\tif err != nil && err != skipChildren && err != skipText {\n\t\treturn err\n\t}\n\tif err != skipChildren {\n\t\tfor c := node.FirstChild; c != nil; c = c.NextSibling {\n\t\t\tif c.Type == html.TextNode && err == skipText {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := walk(c, cb); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tif err = cb(node, false); err != nil && err != skipChildren && err != skipText {\n\t\treturn err\n\t}\n\treturn err\n}\n\n\/\/ Facilitates wrapped text.\ntype writer struct {\n\tbytes.Buffer\n\tw io.Writer\n\n\t\/\/ If wrapping, wrap to this width.\n\twidth int\n\n\t\/\/ Size of tab character.\n\ttabSize int\n}\n\nfunc (w *writer) Flush() error {\n\tb := w.Buffer.Bytes()\n\tdefer w.Buffer.Reset()\n\n\tb = bytes.TrimSpace(b)\n\tvar paragraphs [][]rune\n\tfor i := 0; i < len(b); {\n\t\tif b[i] == '\\n' {\n\t\t\t\/\/ Bounds check unneeded; if b[i], being a \\n, was at the end, it\n\t\t\t\/\/ would have been trimmed.\n\t\t\tif b[i+1] == '\\n' {\n\t\t\t\t\/\/ New paragraph.\n\t\t\t\tparagraphs = append(paragraphs, []rune(string(b[:i])))\n\t\t\t\ti += 2\n\t\t\t\tfor b[i] == '\\n' {\n\t\t\t\t\t\/\/ Collapse extra newlines.\n\t\t\t\t\ti++\n\t\t\t\t}\n\t\t\t\tb = b[i:]\n\t\t\t\ti = 0\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\t\/\/ Unwrap.\n\t\t\t\tb[i] = ' '\n\t\t\t}\n\t\t}\n\t\ti++\n\t}\n\tif w.width > 0 {\n\t\tfor j, p := range paragraphs {\n\t\t\tfor i := 0; i < len(p); {\n\t\t\t\tn := i + w.width\n\t\t\t\tif n+1 >= len(p) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tfor n > i && !unicode.IsSpace(p[n]) {\n\t\t\t\t\tn--\n\t\t\t\t}\n\t\t\t\tif n <= i {\n\t\t\t\t\t\/\/ Long word.\n\t\t\t\t\tn = i + w.width\n\t\t\t\t\tp = append(p, 0)\n\t\t\t\t\tcopy(p[n+1:], p[n:])\n\t\t\t\t\tp[n] = '\\n'\n\t\t\t\t\ti = n + 1\n\t\t\t\t} else {\n\t\t\t\t\tp[n] = '\\n'\n\t\t\t\t\ti = n + 1\n\t\t\t\t}\n\t\t\t}\n\t\t\tparagraphs[j] = p\n\t\t}\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\/\/ Remove newlines that are inserted just after the edge of the\n\t\t\t\/\/ width. This prevents the terminal from incorrectly producing an\n\t\t\t\/\/ extra gap when wrapping. This removes the newline entirely, so it\n\t\t\t\/\/ is assumed that the wrapping will separate the previous word from\n\t\t\t\/\/ the next word.\n\t\t\tfor i, p := range paragraphs {\n\t\t\t\tfor h, i := 0, 0; i < len(p); i++ {\n\t\t\t\t\tif p[i] != '\\n' {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif i-h >= w.width {\n\t\t\t\t\t\tcopy(p[i:], p[i+1:])\n\t\t\t\t\t\tp = p[:len(p)-1]\n\t\t\t\t\t\th = i\n\t\t\t\t\t\ti--\n\t\t\t\t\t} else {\n\t\t\t\t\t\th = i + 1\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tparagraphs[i] = p\n\t\t\t}\n\t\t}\n\t}\n\tfor i, p := range paragraphs {\n\t\tif i > 0 {\n\t\t\tw.w.Write([]byte{'\\n', '\\n'})\n\t\t}\n\t\tif _, err := w.w.Write([]byte(string(p))); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (r Renderer) Render(w io.Writer, s *goquery.Selection) error {\n\tbuf := &writer{\n\t\tw: w,\n\t\twidth: r.Width,\n\t\ttabSize: r.TabSize,\n\t}\n\tfor _, node := range s.Nodes {\n\t\tif node.Type == html.TextNode {\n\t\t\tcontinue\n\t\t}\n\t\terr := walk(node, func(node *html.Node, entering bool) error {\n\t\t\tswitch node.Type {\n\t\t\tcase html.ErrorNode:\n\t\t\tcase html.TextNode:\n\t\t\t\tif entering {\n\t\t\t\t\tbuf.WriteString(node.Data)\n\t\t\t\t}\n\t\t\tcase html.DocumentNode:\n\t\t\tcase html.ElementNode:\n\t\t\t\th := handlers[elementMatcher{node.Data, entering}]\n\t\t\t\tif h != nil {\n\t\t\t\t\treturn h(buf, node)\n\t\t\t\t}\n\t\t\tcase html.CommentNode:\n\t\t\tcase html.DoctypeNode:\n\t\t\tcase html.RawNode:\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil && err != stop {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn buf.Flush()\n}\n\ntype elementMatcher struct {\n\tdata string\n\tentering bool\n}\n\ntype nodeHandlers map[elementMatcher]func(w *writer, node *html.Node) error\n\nfunc isElement(node *html.Node, tag string) bool {\n\treturn node.Type == html.ElementNode && node.Data == tag\n}\n\nvar handlers = nodeHandlers{\n\t{\"code\", true}: func(w *writer, node *html.Node) error {\n\t\tif isElement(node.Parent, \"pre\") { \/\/ May have syntax: `class=\"language-*\"`\n\t\t\t\/\/ Block\n\t\t\tw.WriteString(node.FirstChild.Data)\n\t\t\treturn skipChildren\n\t\t} else {\n\t\t\t\/\/ Inline\n\t\t\treturn w.WriteByte('`')\n\t\t}\n\t},\n\t{\"code\", false}: func(w *writer, node *html.Node) error {\n\t\tif isElement(node.Parent, \"pre\") {\n\t\t\t\/\/ Block\n\t\t} else {\n\t\t\t\/\/ Inline\n\t\t\treturn w.WriteByte('`')\n\t\t}\n\t\treturn nil\n\t},\n\t{\"section\", true}: func(w *writer, node *html.Node) error {\n\t\treturn skipText\n\t},\n\t{\"p\", true}: func(w *writer, node *html.Node) error {\n\t\t_, err := w.WriteString(\"\\n\\n\")\n\t\treturn err\n\t},\n\t{\"p\", false}: func(w *writer, node *html.Node) error {\n\t\t_, err := w.WriteString(\"\\n\\n\")\n\t\treturn err\n\t},\n}\n<commit_msg>Fix missing paragraph.<commit_after>\/\/ The term package implements rendering HTML content for the terminal.\npackage term\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"runtime\"\n\t\"unicode\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"golang.org\/x\/net\/html\"\n)\n\n\/\/ Renderer implements a rendering HTML content in a stripped down format\n\/\/ suitable for the terminal.\ntype Renderer struct {\n\t\/\/ Width sets the assumed width of the terminal, which aids in wrapping\n\t\/\/ text. If == 0, then the width is treated as 80. If < 0, then the width is\n\t\/\/ treated as unbounded.\n\tWidth int\n\t\/\/ TabSize indicates the number of characters that the terminal uses to\n\t\/\/ render a tab. If <= 0, then tabs are assumed to have a size of 8.\n\tTabSize int\n}\n\nvar (\n\tstop = errors.New(\"stop\") \/\/ Stop walking without erroring.\n\tskipChildren = errors.New(\"skip children\") \/\/ Skip all child nodes.\n\tskipText = errors.New(\"skip text\") \/\/ Skip child text nodes.\n)\n\ntype walker func(node *html.Node, entering bool) error\n\nfunc walk(node *html.Node, cb walker) error {\n\terr := cb(node, true)\n\tif err != nil && err != skipChildren && err != skipText {\n\t\treturn err\n\t}\n\tif err != skipChildren {\n\t\tfor c := node.FirstChild; c != nil; c = c.NextSibling {\n\t\t\tif c.Type == html.TextNode && err == skipText {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := walk(c, cb); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tif err = cb(node, false); err != nil && err != skipChildren && err != skipText {\n\t\treturn err\n\t}\n\treturn err\n}\n\n\/\/ Facilitates wrapped text.\ntype writer struct {\n\tbytes.Buffer\n\tw io.Writer\n\n\t\/\/ If wrapping, wrap to this width.\n\twidth int\n\n\t\/\/ Size of tab character.\n\ttabSize int\n}\n\nfunc (w *writer) Flush() error {\n\tb := w.Buffer.Bytes()\n\tdefer w.Buffer.Reset()\n\n\tb = bytes.TrimSpace(b)\n\tvar paragraphs [][]rune\n\tfor i := 0; i < len(b); {\n\t\tif b[i] == '\\n' {\n\t\t\t\/\/ Bounds check unneeded; if b[i], being a \\n, was at the end, it\n\t\t\t\/\/ would have been trimmed.\n\t\t\tif b[i+1] == '\\n' {\n\t\t\t\t\/\/ New paragraph.\n\t\t\t\tparagraphs = append(paragraphs, []rune(string(b[:i])))\n\t\t\t\ti += 2\n\t\t\t\tfor b[i] == '\\n' {\n\t\t\t\t\t\/\/ Collapse extra newlines.\n\t\t\t\t\ti++\n\t\t\t\t}\n\t\t\t\tb = b[i:]\n\t\t\t\ti = 0\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\t\/\/ Unwrap.\n\t\t\t\tb[i] = ' '\n\t\t\t}\n\t\t}\n\t\ti++\n\t}\n\tparagraphs = append(paragraphs, []rune(string(b)))\n\tif w.width > 0 {\n\t\tfor j, p := range paragraphs {\n\t\t\tfor i := 0; i < len(p); {\n\t\t\t\tn := i + w.width\n\t\t\t\tif n+1 >= len(p) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tfor n > i && !unicode.IsSpace(p[n]) {\n\t\t\t\t\tn--\n\t\t\t\t}\n\t\t\t\tif n <= i {\n\t\t\t\t\t\/\/ Long word.\n\t\t\t\t\tn = i + w.width\n\t\t\t\t\tp = append(p, 0)\n\t\t\t\t\tcopy(p[n+1:], p[n:])\n\t\t\t\t\tp[n] = '\\n'\n\t\t\t\t\ti = n + 1\n\t\t\t\t} else {\n\t\t\t\t\tp[n] = '\\n'\n\t\t\t\t\ti = n + 1\n\t\t\t\t}\n\t\t\t}\n\t\t\tparagraphs[j] = p\n\t\t}\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\/\/ Remove newlines that are inserted just after the edge of the\n\t\t\t\/\/ width. This prevents the terminal from incorrectly producing an\n\t\t\t\/\/ extra gap when wrapping. This removes the newline entirely, so it\n\t\t\t\/\/ is assumed that the wrapping will separate the previous word from\n\t\t\t\/\/ the next word.\n\t\t\tfor i, p := range paragraphs {\n\t\t\t\tfor h, i := 0, 0; i < len(p); i++ {\n\t\t\t\t\tif p[i] != '\\n' {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif i-h >= w.width {\n\t\t\t\t\t\tcopy(p[i:], p[i+1:])\n\t\t\t\t\t\tp = p[:len(p)-1]\n\t\t\t\t\t\th = i\n\t\t\t\t\t\ti--\n\t\t\t\t\t} else {\n\t\t\t\t\t\th = i + 1\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tparagraphs[i] = p\n\t\t\t}\n\t\t}\n\t}\n\tfor i, p := range paragraphs {\n\t\tif i > 0 {\n\t\t\tw.w.Write([]byte{'\\n', '\\n'})\n\t\t}\n\t\tif _, err := w.w.Write([]byte(string(p))); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (r Renderer) Render(w io.Writer, s *goquery.Selection) error {\n\tbuf := &writer{\n\t\tw: w,\n\t\twidth: r.Width,\n\t\ttabSize: r.TabSize,\n\t}\n\tfor _, node := range s.Nodes {\n\t\tif node.Type == html.TextNode {\n\t\t\tcontinue\n\t\t}\n\t\terr := walk(node, func(node *html.Node, entering bool) error {\n\t\t\tswitch node.Type {\n\t\t\tcase html.ErrorNode:\n\t\t\tcase html.TextNode:\n\t\t\t\tif entering {\n\t\t\t\t\tbuf.WriteString(node.Data)\n\t\t\t\t}\n\t\t\tcase html.DocumentNode:\n\t\t\tcase html.ElementNode:\n\t\t\t\th := handlers[elementMatcher{node.Data, entering}]\n\t\t\t\tif h != nil {\n\t\t\t\t\treturn h(buf, node)\n\t\t\t\t}\n\t\t\tcase html.CommentNode:\n\t\t\tcase html.DoctypeNode:\n\t\t\tcase html.RawNode:\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil && err != stop {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn buf.Flush()\n}\n\ntype elementMatcher struct {\n\tdata string\n\tentering bool\n}\n\ntype nodeHandlers map[elementMatcher]func(w *writer, node *html.Node) error\n\nfunc isElement(node *html.Node, tag string) bool {\n\treturn node.Type == html.ElementNode && node.Data == tag\n}\n\nvar handlers = nodeHandlers{\n\t{\"code\", true}: func(w *writer, node *html.Node) error {\n\t\tif isElement(node.Parent, \"pre\") { \/\/ May have syntax: `class=\"language-*\"`\n\t\t\t\/\/ Block\n\t\t\tw.WriteString(node.FirstChild.Data)\n\t\t\treturn skipChildren\n\t\t} else {\n\t\t\t\/\/ Inline\n\t\t\treturn w.WriteByte('`')\n\t\t}\n\t},\n\t{\"code\", false}: func(w *writer, node *html.Node) error {\n\t\tif isElement(node.Parent, \"pre\") {\n\t\t\t\/\/ Block\n\t\t} else {\n\t\t\t\/\/ Inline\n\t\t\treturn w.WriteByte('`')\n\t\t}\n\t\treturn nil\n\t},\n\t{\"section\", true}: func(w *writer, node *html.Node) error {\n\t\treturn skipText\n\t},\n\t{\"p\", true}: func(w *writer, node *html.Node) error {\n\t\t_, err := w.WriteString(\"\\n\\n\")\n\t\treturn err\n\t},\n\t{\"p\", false}: func(w *writer, node *html.Node) error {\n\t\t_, err := w.WriteString(\"\\n\\n\")\n\t\treturn err\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Yahoo Inc.\n\/\/ Licensed under the terms of the Apache version 2.0 license. See LICENSE file for terms.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"github.com\/ardielle\/ardielle-go\/rdl\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\ntype javaClientGenerator struct {\n\tregistry rdl.TypeRegistry\n\tschema *rdl.Schema\n\tname string\n\twriter *bufio.Writer\n\terr error\n\tbanner string\n\tns string\n\tbase string\n}\n\n\/\/ GenerateJavaClient generates the client code to talk to the server\nfunc GenerateJavaClient(banner string, schema *rdl.Schema, outdir string, ns string, base string, options []string) error {\n\treg := rdl.NewTypeRegistry(schema)\n\tpackageDir, err := javaGenerationDir(outdir, schema, ns)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcName := javaGenerationStringOptionSet(options, \"clientclass\")\n\tif cName == \"\" {\n\t\tcName = capitalize(string(schema.Name))\n\t}\n\n\tout, file, _, err := outputWriter(packageDir, cName, \"Client.java\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tgen := &javaClientGenerator{reg, schema, cName, out, nil, banner, ns, base}\n\tgen.processTemplate(javaClientTemplate)\n\tout.Flush()\n\tfile.Close()\n\tif gen.err != nil {\n\t\treturn gen.err\n\t}\n\n\t\/\/ResourceException - the throawable wrapper for alternate return types\n\tout, file, _, err = outputWriter(packageDir, \"ResourceException\", \".java\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = javaGenerateResourceException(schema, out, ns)\n\tout.Flush()\n\tfile.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ResourceError - the default data object for an error\n\tout, file, _, err = outputWriter(packageDir, \"ResourceError\", \".java\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = javaGenerateResourceError(schema, out, ns)\n\tout.Flush()\n\tfile.Close()\n\treturn err\n}\n\nfunc (gen *javaClientGenerator) processTemplate(templateSource string) error {\n\tcommentFun := func(s string) string {\n\t\treturn formatComment(s, 0, 80)\n\t}\n\tfuncMap := template.FuncMap{\n\t\t\"header\": func() string { return javaGenerationHeader(gen.banner) },\n\t\t\"package\": func() string { return javaGenerationPackage(gen.schema, gen.ns) },\n\t\t\"comment\": commentFun,\n\t\t\"methodSig\": func(r *rdl.Resource) string { return gen.clientMethodSignature(r) },\n\t\t\"methodBody\": func(r *rdl.Resource) string { return gen.clientMethodBody(r) },\n\t\t\"name\": func() string { return gen.name },\n\t\t\"cName\": func() string { return capitalize(gen.name) },\n\t\t\"lName\": func() string { return uncapitalize(gen.name) },\n\t}\n\tt := template.Must(template.New(gen.name).Funcs(funcMap).Parse(templateSource))\n\treturn t.Execute(gen.writer, gen.schema)\n}\n\nfunc (gen *javaClientGenerator) resourcePath(r *rdl.Resource) string {\n\tpath := r.Path\n\ti := strings.Index(path, \"?\")\n\tif i >= 0 {\n\t\tpath = path[0:i]\n\t}\n\treturn path\n}\n\nconst javaClientTemplate = `{{header}}\npackage {{package}};\nimport com.yahoo.rdl.*;\nimport javax.ws.rs.client.*;\nimport javax.ws.rs.*;\nimport javax.ws.rs.core.*;\n\npublic class {{cName}}Client {\n WebTarget base;\n String credsHeader;\n String credsToken;\n\n public {{cName}}Client(String url) {\n base = ClientBuilder.newClient().target(url);\n }\n\n public {{cName}}Client addCredentials(String header, String token) {\n credsHeader = header;\n credsToken = token;\n return this;\n }\n{{range .Resources}}\n {{methodSig .}} {\n {{methodBody .}}\n }\n{{end}}\n}\n`\n\nfunc (gen *javaClientGenerator) clientMethodSignature(r *rdl.Resource) string {\n\treg := gen.registry\n\treturnType := javaType(reg, r.Type, false, \"\", \"\")\n\tmethName, params := javaMethodName(reg, r)\n\tsparams := \"\"\n\tif len(params) > 0 {\n\t\tsparams = strings.Join(params, \", \")\n\t}\n\treturn \"public \" + returnType + \" \" + methName + \"(\" + sparams + \")\"\n}\n\nfunc (gen *javaClientGenerator) clientMethodBody(r *rdl.Resource) string {\n\treg := gen.registry\n\treturnType := javaType(reg, r.Type, false, \"\", \"\")\n\tpath := r.Path\n\ts := \"WebTarget target = base.path(\\\"\" + path + \"\\\")\"\n\tentityName := \"\"\n\tq := \"\"\n\th := \"\"\n\tfor _, in := range r.Inputs {\n\t\tiname := javaName(in.Name)\n\t\tif in.PathParam {\n\t\t\ts += \"\\n .resolveTemplate(\\\"\" + iname + \"\\\", \" + iname + \")\"\n\t\t} else if in.QueryParam != \"\" {\n\t\t\tq += \"\\n if (\" + iname + \" != null) {\"\n\t\t\tq += \"\\n target = target.queryParam(\\\"\" + in.QueryParam + \"\\\", \" + iname + \");\"\n\t\t\tq += \"\\n }\"\n\t\t} else if in.Header != \"\" {\n\t\t\th += \"\\n if (\" + iname + \" != null) {\"\n\t\t\th += \"\\n invocationBuilder = invocationBuilder.header(\\\"\" + in.Header + \"\\\", \" + iname + \");\"\n\t\t\th += \"\\n }\"\n\t\t} else { \/\/the entity\n\t\t\tentityName = iname\n\t\t}\n\t}\n\ts += \";\"\n\tif q != \"\" {\n\t\ts += q\n\t}\n\ts += \"\\n Invocation.Builder invocationBuilder = target.request(\\\"application\/json\\\");\"\n\tif h != \"\" {\n\t\ts += h\n\t}\n\ts += \"\\n\"\n\tswitch r.Method {\n\tcase \"PUT\", \"POST\":\n\t\ts += \" Response response = invocationBuilder.\" + strings.ToLower(r.Method) + \"(Entity.entity(\" + entityName + \", \\\"application\/json\\\"));\\n\"\n\tdefault:\n\t\ts += \" Response response = invocationBuilder.\" + strings.ToLower(r.Method) + \"();\\n\"\n\t}\n\ts += \" int code = response.getStatus();\\n\"\n\ts += \" String status = Response.Status.fromStatusCode(code).toString();\\n\"\n\texpected := \"status.equals(\\\"\" + r.Expected + \"\\\")\"\n\tfor _, alt := range r.Alternatives {\n\t\texpected += \"|| status.equals(\\\"\" + alt + \"\\\")\"\n\t}\n\ts += \" if (\" + expected + \") {\\n\"\n\ts += \" return response.readEntity(\" + returnType + \".class);\\n\"\n\tif r.Exceptions != nil {\n\t\tfor k, v := range r.Exceptions {\n\t\t\ts += \" } else if (status.equals(\\\"\" + k + \"\\\")) {\\n\"\n\t\t\ts += \" throw new ResourceException(code, response.readEntity(\" + v.Type + \".class));\\n\"\n\t\t}\n\t}\n\ts += \" } else {\\n\"\n\ts += \" throw new ResourceException(code, response.readEntity(Object.class));\\n\"\n\ts += \" }\\n\"\n\treturn s\n}\n<commit_msg>enhancements for java client generation<commit_after>\/\/ Copyright 2015 Yahoo Inc.\n\/\/ Licensed under the terms of the Apache version 2.0 license. See LICENSE file for terms.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"github.com\/ardielle\/ardielle-go\/rdl\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\ntype javaClientGenerator struct {\n\tregistry rdl.TypeRegistry\n\tschema *rdl.Schema\n\tname string\n\twriter *bufio.Writer\n\terr error\n\tbanner string\n\tns string\n\tbase string\n}\n\n\/\/ GenerateJavaClient generates the client code to talk to the server\nfunc GenerateJavaClient(banner string, schema *rdl.Schema, outdir string, ns string, base string, options []string) error {\n\treg := rdl.NewTypeRegistry(schema)\n\tpackageDir, err := javaGenerationDir(outdir, schema, ns)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcName := javaGenerationStringOptionSet(options, \"clientclass\")\n\tif cName == \"\" {\n\t\tcName = capitalize(string(schema.Name))\n\t}\n\n\tout, file, _, err := outputWriter(packageDir, cName, \"Client.java\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tgen := &javaClientGenerator{reg, schema, cName, out, nil, banner, ns, base}\n\tgen.processTemplate(javaClientTemplate)\n\tout.Flush()\n\tfile.Close()\n\tif gen.err != nil {\n\t\treturn gen.err\n\t}\n\n\t\/\/ResourceException - the throawable wrapper for alternate return types\n\tout, file, _, err = outputWriter(packageDir, \"ResourceException\", \".java\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = javaGenerateResourceException(schema, out, ns)\n\tout.Flush()\n\tfile.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ResourceError - the default data object for an error\n\tout, file, _, err = outputWriter(packageDir, \"ResourceError\", \".java\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = javaGenerateResourceError(schema, out, ns)\n\tout.Flush()\n\tfile.Close()\n\treturn err\n}\n\nfunc (gen *javaClientGenerator) processTemplate(templateSource string) error {\n\tcommentFun := func(s string) string {\n\t\treturn formatComment(s, 0, 80)\n\t}\n\tfuncMap := template.FuncMap{\n\t\t\"header\": func() string { return javaGenerationHeader(gen.banner) },\n\t\t\"package\": func() string { return javaGenerationPackage(gen.schema, gen.ns) },\n\t\t\"comment\": commentFun,\n\t\t\"methodSig\": func(r *rdl.Resource) string { return gen.clientMethodSignature(r) },\n\t\t\"methodBody\": func(r *rdl.Resource) string { return gen.clientMethodBody(r) },\n\t\t\"name\": func() string { return gen.name },\n\t\t\"cName\": func() string { return capitalize(gen.name) },\n\t\t\"lName\": func() string { return uncapitalize(gen.name) },\n\t}\n\tt := template.Must(template.New(gen.name).Funcs(funcMap).Parse(templateSource))\n\treturn t.Execute(gen.writer, gen.schema)\n}\n\nfunc (gen *javaClientGenerator) resourcePath(r *rdl.Resource) string {\n\tpath := r.Path\n\ti := strings.Index(path, \"?\")\n\tif i >= 0 {\n\t\tpath = path[0:i]\n\t}\n\treturn path\n}\n\nconst javaClientTemplate = `{{header}}\npackage {{package}};\nimport com.yahoo.rdl.*;\nimport javax.ws.rs.client.*;\nimport javax.ws.rs.*;\nimport javax.ws.rs.core.*;\nimport javax.net.ssl.HostnameVerifier;\n\npublic class {{cName}}Client {\n Client client;\n WebTarget base;\n String credsHeader;\n String credsToken;\n\n public {{cName}}Client(String url) {\n client = ClientBuilder.newClient();\n base = client.target(url);\n }\n\n public {{cName}}Client(String url, HostnameVerifier hostnameVerifier) {\n client = ClientBuilder.newBuilder()\n .hostnameVerifier(hostnameVerifier)\n .build();\n base = client.target(url);\n }\n\n public void close() {\n client.close();\n }\n\n public {{cName}}Client setProperty(String name, Object value) {\n client = client.property(name, value);\n return this;\n }\n\n public {{cName}}Client addCredentials(String header, String token) {\n credsHeader = header;\n credsToken = token;\n return this;\n }\n{{range .Resources}}\n {{methodSig .}} {\n {{methodBody .}}\n }\n{{end}}\n}\n`\n\nfunc (gen *javaClientGenerator) clientMethodSignature(r *rdl.Resource) string {\n\treg := gen.registry\n\treturnType := javaType(reg, r.Type, false, \"\", \"\")\n\tmethName, params := javaMethodName(reg, r)\n\tsparams := \"\"\n\tif len(params) > 0 {\n\t\tsparams = strings.Join(params, \", \")\n\t}\n\treturn \"public \" + returnType + \" \" + methName + \"(\" + sparams + \")\"\n}\n\nfunc (gen *javaClientGenerator) clientMethodBody(r *rdl.Resource) string {\n\treg := gen.registry\n\treturnType := javaType(reg, r.Type, false, \"\", \"\")\n\tpath := r.Path\n\ts := \"WebTarget target = base.path(\\\"\" + path + \"\\\")\"\n\tentityName := \"\"\n\tq := \"\"\n\th := \"\"\n\tfor _, in := range r.Inputs {\n\t\tiname := javaName(in.Name)\n\t\tif in.PathParam {\n\t\t\ts += \"\\n .resolveTemplate(\\\"\" + iname + \"\\\", \" + iname + \")\"\n\t\t} else if in.QueryParam != \"\" {\n\t\t\tq += \"\\n if (\" + iname + \" != null) {\"\n\t\t\tq += \"\\n target = target.queryParam(\\\"\" + in.QueryParam + \"\\\", \" + iname + \");\"\n\t\t\tq += \"\\n }\"\n\t\t} else if in.Header != \"\" {\n\t\t\th += \"\\n if (\" + iname + \" != null) {\"\n\t\t\th += \"\\n invocationBuilder = invocationBuilder.header(\\\"\" + in.Header + \"\\\", \" + iname + \");\"\n\t\t\th += \"\\n }\"\n\t\t} else { \/\/the entity\n\t\t\tentityName = iname\n\t\t}\n\t}\n\ts += \";\"\n\tif q != \"\" {\n\t\ts += q\n\t}\n\ts += \"\\n Invocation.Builder invocationBuilder = target.request(\\\"application\/json\\\");\"\n\tif h != \"\" {\n\t\ts += h\n\t}\n\ts += \"\\n\"\n\tswitch r.Method {\n\tcase \"PUT\", \"POST\":\n\t\ts += \" Response response = invocationBuilder.\" + strings.ToLower(r.Method) + \"(javax.ws.rs.client.Entity.entity(\" + entityName + \", \\\"application\/json\\\"));\\n\"\n\tdefault:\n\t\ts += \" Response response = invocationBuilder.\" + strings.ToLower(r.Method) + \"();\\n\"\n\t}\n\ts += \" int code = response.getStatus();\\n\"\n\ts += \" String status = Response.Status.fromStatusCode(code).toString();\\n\"\n\texpected := \"status.equals(\\\"\" + r.Expected + \"\\\")\"\n\tfor _, alt := range r.Alternatives {\n\t\texpected += \"|| status.equals(\\\"\" + alt + \"\\\")\"\n\t}\n\ts += \" if (\" + expected + \") {\\n\"\n\ts += \" return response.readEntity(\" + returnType + \".class);\\n\"\n\tif r.Exceptions != nil {\n\t\tfor k, v := range r.Exceptions {\n\t\t\ts += \" } else if (status.equals(\\\"\" + k + \"\\\")) {\\n\"\n\t\t\ts += \" throw new ResourceException(code, response.readEntity(\" + v.Type + \".class));\\n\"\n\t\t}\n\t}\n\ts += \" } else {\\n\"\n\ts += \" throw new ResourceException(code, response.readEntity(Object.class));\\n\"\n\ts += \" }\\n\"\n\treturn s\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 Keybase Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"bazil.org\/fuse\"\n\t\"bazil.org\/fuse\/fs\"\n\t\"github.com\/keybase\/gomounts\"\n)\n\nvar kbfusePath = fuse.OSXFUSEPaths{\n\tDevicePrefix: \"\/dev\/kbfuse\",\n\tLoad: \"\/Library\/Filesystems\/kbfuse.fs\/Contents\/Resources\/load_kbfuse\",\n\tMount: \"\/Library\/Filesystems\/kbfuse.fs\/Contents\/Resources\/mount_kbfuse\",\n\tDaemonVar: \"MOUNT_KBFUSE_DAEMON_PATH\",\n}\n\nconst (\n\tmountpointTimeout = 5 * time.Second\n\tnotRunningName = \"KBFS_NOT_RUNNING\"\n)\n\ntype symlink struct {\n\tlink string\n}\n\nfunc (s symlink) Attr(ctx context.Context, a *fuse.Attr) (err error) {\n\ta.Mode = os.ModeSymlink | a.Mode | 0555\n\ta.Valid = 0\n\treturn nil\n}\n\nfunc (s symlink) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (\n\tlink string, err error) {\n\treturn s.link, nil\n}\n\ntype cacheEntry struct {\n\tmountpoint string\n\ttime time.Time\n}\n\ntype root struct {\n\trunmodeStr string\n\trunmodeStrFancy string\n\n\tlock sync.RWMutex\n\tmountpointCache map[uint32]cacheEntry\n\n\tgetMountsLock sync.Mutex\n}\n\nfunc newRoot() *root {\n\trunmodeStr := \"keybase\"\n\trunmodeStrFancy := \"Keybase\"\n\tswitch os.Getenv(\"KEYBASE_RUN_MODE\") {\n\tcase \"staging\":\n\t\trunmodeStr = \"keybase.staging\"\n\t\trunmodeStrFancy = \"KeybaseStaging\"\n\tcase \"devel\":\n\t\trunmodeStr = \"keybase.devel\"\n\t\trunmodeStrFancy = \"KeybaseDevel\"\n\t}\n\n\treturn &root{\n\t\trunmodeStr: runmodeStr,\n\t\trunmodeStrFancy: runmodeStrFancy,\n\t\tmountpointCache: make(map[uint32]cacheEntry),\n\t}\n}\n\nfunc (r *root) Root() (fs.Node, error) {\n\treturn r, nil\n}\n\nfunc (r *root) Attr(ctx context.Context, attr *fuse.Attr) error {\n\tattr.Mode = os.ModeDir | 0555\n\treturn nil\n}\n\nfunc (r *root) getCachedMountpoint(uid uint32) string {\n\tr.lock.RLock()\n\tdefer r.lock.RUnlock()\n\tentry, ok := r.mountpointCache[uid]\n\tif !ok {\n\t\treturn \"\"\n\t}\n\tnow := time.Now()\n\tif now.Sub(entry.time) > mountpointTimeout {\n\t\t\/\/ Don't bother deleting the entry, since the caller should\n\t\t\/\/ just overwrite it.\n\t\treturn \"\"\n\t}\n\treturn entry.mountpoint\n}\n\nfunc (r *root) getMountedVolumes() ([]gomounts.Volume, error) {\n\tr.getMountsLock.Lock()\n\tdefer r.getMountsLock.Unlock()\n\treturn gomounts.GetMountedVolumes()\n}\n\n\/\/ mountpointMatchesRunmode returns true if `mp` contains `runmode` at\n\/\/ the end of a component of the path, or followed by a space.\nfunc mountpointMatchesRunmode(mp, runmode string) bool {\n\ti := strings.Index(mp, runmode)\n\tif i < 0 {\n\t\treturn false\n\t}\n\tif len(mp) == i+len(runmode) || mp[i+len(runmode)] == '\/' ||\n\t\tmp[i+len(runmode)] == ' ' {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (r *root) findKBFSMount(ctx context.Context) (\n\tmountpoint string, err error) {\n\t\/\/ Get the UID, and crash intentionally if it's not set, because\n\t\/\/ that means we're not compiled against the correct version of\n\t\/\/ bazil.org\/fuse.\n\tuid := ctx.Value(fs.CtxHeaderUIDKey).(uint32)\n\t\/\/ Don't let the root see anything here; we don't want a symlink\n\t\/\/ loop back to this mount.\n\tif uid == 0 {\n\t\treturn \"\", fuse.ENOENT\n\t}\n\n\tmountpoint = r.getCachedMountpoint(uid)\n\tif mountpoint != \"\" {\n\t\treturn mountpoint, nil\n\t}\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t\/\/ Cache the entry if we didn't hit an error.\n\t\tr.lock.Lock()\n\t\tdefer r.lock.Unlock()\n\t\tr.mountpointCache[uid] = cacheEntry{\n\t\t\tmountpoint: mountpoint,\n\t\t\ttime: time.Now(),\n\t\t}\n\t}()\n\n\tu, err := user.LookupId(strconv.FormatUint(uint64(uid), 10))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvols, err := r.getMountedVolumes()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfuseType := \"fuse\"\n\tif runtime.GOOS == \"darwin\" {\n\t\tfuseType = \"kbfuse\"\n\t}\n\tvar fuseMountPoints []string\n\tfor _, v := range vols {\n\t\tif v.Type != fuseType {\n\t\t\tcontinue\n\t\t}\n\t\tif v.Owner != u.Uid {\n\t\t\tcontinue\n\t\t}\n\t\tfuseMountPoints = append(fuseMountPoints, v.Path)\n\t}\n\n\tif len(fuseMountPoints) == 0 {\n\t\treturn \"\", fuse.ENOENT\n\t}\n\n\t\/\/ Pick the first one alphabetically that has \"keybase\" in the\n\t\/\/ path.\n\tsort.Strings(fuseMountPoints)\n\tfor _, mp := range fuseMountPoints {\n\t\t\/\/ Find mountpoints like \"\/home\/user\/.local\/share\/keybase\/fs\",\n\t\t\/\/ or \"\/Volumes\/Keybase (user)\", and make sure it doesn't\n\t\t\/\/ match mounts for another run mode, say\n\t\t\/\/ \"\/Volumes\/KeybaseStaging (user)\".\n\t\tif mountpointMatchesRunmode(mp, r.runmodeStr) ||\n\t\t\tmountpointMatchesRunmode(mp, r.runmodeStrFancy) {\n\t\t\treturn mp, nil\n\t\t}\n\t}\n\n\t\/\/ Give up.\n\treturn \"\", fuse.ENOENT\n}\n\nfunc (r *root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {\n\t_, err := r.findKBFSMount(ctx)\n\tif err != nil {\n\t\tif err == fuse.ENOENT {\n\t\t\t\/\/ Put a symlink in the directory for someone who's not\n\t\t\t\/\/ logged in, so that the directory is non-empty and\n\t\t\t\/\/ future redirector calls as root won't try to mount over\n\t\t\t\/\/ us.\n\t\t\treturn []fuse.Dirent{\n\t\t\t\t{\n\t\t\t\t\tType: fuse.DT_Link,\n\t\t\t\t\tName: notRunningName,\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t\treturn []fuse.Dirent{}, err\n\t}\n\n\t\/\/ TODO: show the `kbfs.error.txt\" and \"kbfs.nologin.txt\" files if\n\t\/\/ they exist? As root, it is hard to figure out if they're\n\t\/\/ there, though.\n\treturn []fuse.Dirent{\n\t\t{\n\t\t\tType: fuse.DT_Link,\n\t\t\tName: \"private\",\n\t\t},\n\t\t{\n\t\t\tType: fuse.DT_Link,\n\t\t\tName: \"public\",\n\t\t},\n\t\tfuse.Dirent{\n\t\t\tType: fuse.DT_Link,\n\t\t\tName: \"team\",\n\t\t},\n\t}, nil\n}\n\nfunc (r *root) Lookup(\n\tctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (\n\tn fs.Node, err error) {\n\tmountpoint, err := r.findKBFSMount(ctx)\n\tif err != nil {\n\t\tif req.Name == notRunningName {\n\t\t\treturn symlink{\"\/dev\/null\"}, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tresp.EntryValid = 0\n\tswitch req.Name {\n\tcase \"private\", \"public\", \"team\", \".kbfs_error\", \".kbfs_metrics\",\n\t\t\".kbfs_profiles\", \".kbfs_reset_caches\", \".kbfs_status\",\n\t\t\"kbfs.error.txt\", \"kbfs.nologin.txt\", \".kbfs_enable_auto_journals\",\n\t\t\".kbfs_disable_auto_journals\", \".kbfs_enable_block_prefetching\",\n\t\t\".kbfs_disable_block_prefetching\", \".kbfs_enable_debug_server\",\n\t\t\".kbfs_disable_debug_server\":\n\t\treturn symlink{filepath.Join(mountpoint, req.Name)}, nil\n\t}\n\treturn nil, fuse.ENOENT\n}\n\nfunc main() {\n\tif len(os.Args) != 2 {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s <mountpoint>\\n\", os.Args[0])\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Restrict the mountpoint to paths starting with \"\/keybase\".\n\t\/\/ Since this is a suid binary, it is dangerous to allow arbitrary\n\t\/\/ mountpoints. TODO: Read a redirector mountpoint from a\n\t\/\/ root-owned config file.\n\tr := newRoot()\n\tif os.Args[1] != fmt.Sprintf(\"\/%s\", r.runmodeStr) {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: The redirector may only mount at \"+\n\t\t\t\"\/%s; %s is an invalid mountpoint\\n\", r.runmodeStr, os.Args[1])\n\t\tos.Exit(1)\n\t}\n\n\tcurrUser, err := user.Current()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif currUser.Uid != \"0\" {\n\t\truntime.LockOSThread()\n\t\t_, _, errNo := syscall.Syscall(syscall.SYS_SETUID, 0, 0, 0)\n\t\tif errNo != 0 {\n\t\t\tfmt.Fprintf(os.Stderr, \"Can't setuid: %+v\\n\", errNo)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\toptions := []fuse.MountOption{fuse.AllowOther()}\n\toptions = append(options, fuse.FSName(\"keybase-redirector\"))\n\toptions = append(options, fuse.ReadOnly())\n\tif runtime.GOOS == \"darwin\" {\n\t\toptions = append(options, fuse.OSXFUSELocations(kbfusePath))\n\t\toptions = append(options, fuse.VolumeName(\"keybase\"))\n\t\toptions = append(options, fuse.NoBrowse())\n\t\t\/\/ Without NoLocalCaches(), OSX will cache symlinks for a long time.\n\t\toptions = append(options, fuse.NoLocalCaches())\n\t}\n\n\tc, err := fuse.Mount(os.Args[1], options...)\n\tif err != nil {\n\t\tfmt.Printf(\"Mount error, exiting cleanly: %+v\\n\", err)\n\t\tos.Exit(0)\n\t}\n\n\tinterruptChan := make(chan os.Signal, 1)\n\tsignal.Notify(interruptChan, os.Interrupt)\n\tsignal.Notify(interruptChan, syscall.SIGTERM)\n\tgo func() {\n\t\t_ = <-interruptChan\n\n\t\t\/\/ This might be a different system thread than above, so we\n\t\t\/\/ might need to setuid again.\n\t\truntime.LockOSThread()\n\t\t_, _, errNo := syscall.Syscall(syscall.SYS_SETUID, 0, 0, 0)\n\t\tif errNo != 0 {\n\t\t\tfmt.Fprintf(os.Stderr, \"Can't setuid: %+v\\n\", errNo)\n\t\t\tos.Exit(1)\n\t\t}\n\t\terr := fuse.Unmount(os.Args[1])\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Couldn't unmount cleanly: %+v\", err)\n\t\t}\n\t}()\n\n\tsrv := fs.New(c, &fs.Config{\n\t\tWithContext: func(ctx context.Context, _ fuse.Request) context.Context {\n\t\t\treturn context.Background()\n\t\t},\n\t})\n\tsrv.Serve(r)\n}\n<commit_msg>redirector: redirect \/keybase\/.kbfs_edit_history<commit_after>\/\/ Copyright 2018 Keybase Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"bazil.org\/fuse\"\n\t\"bazil.org\/fuse\/fs\"\n\t\"github.com\/keybase\/gomounts\"\n)\n\nvar kbfusePath = fuse.OSXFUSEPaths{\n\tDevicePrefix: \"\/dev\/kbfuse\",\n\tLoad: \"\/Library\/Filesystems\/kbfuse.fs\/Contents\/Resources\/load_kbfuse\",\n\tMount: \"\/Library\/Filesystems\/kbfuse.fs\/Contents\/Resources\/mount_kbfuse\",\n\tDaemonVar: \"MOUNT_KBFUSE_DAEMON_PATH\",\n}\n\nconst (\n\tmountpointTimeout = 5 * time.Second\n\tnotRunningName = \"KBFS_NOT_RUNNING\"\n)\n\ntype symlink struct {\n\tlink string\n}\n\nfunc (s symlink) Attr(ctx context.Context, a *fuse.Attr) (err error) {\n\ta.Mode = os.ModeSymlink | a.Mode | 0555\n\ta.Valid = 0\n\treturn nil\n}\n\nfunc (s symlink) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (\n\tlink string, err error) {\n\treturn s.link, nil\n}\n\ntype cacheEntry struct {\n\tmountpoint string\n\ttime time.Time\n}\n\ntype root struct {\n\trunmodeStr string\n\trunmodeStrFancy string\n\n\tlock sync.RWMutex\n\tmountpointCache map[uint32]cacheEntry\n\n\tgetMountsLock sync.Mutex\n}\n\nfunc newRoot() *root {\n\trunmodeStr := \"keybase\"\n\trunmodeStrFancy := \"Keybase\"\n\tswitch os.Getenv(\"KEYBASE_RUN_MODE\") {\n\tcase \"staging\":\n\t\trunmodeStr = \"keybase.staging\"\n\t\trunmodeStrFancy = \"KeybaseStaging\"\n\tcase \"devel\":\n\t\trunmodeStr = \"keybase.devel\"\n\t\trunmodeStrFancy = \"KeybaseDevel\"\n\t}\n\n\treturn &root{\n\t\trunmodeStr: runmodeStr,\n\t\trunmodeStrFancy: runmodeStrFancy,\n\t\tmountpointCache: make(map[uint32]cacheEntry),\n\t}\n}\n\nfunc (r *root) Root() (fs.Node, error) {\n\treturn r, nil\n}\n\nfunc (r *root) Attr(ctx context.Context, attr *fuse.Attr) error {\n\tattr.Mode = os.ModeDir | 0555\n\treturn nil\n}\n\nfunc (r *root) getCachedMountpoint(uid uint32) string {\n\tr.lock.RLock()\n\tdefer r.lock.RUnlock()\n\tentry, ok := r.mountpointCache[uid]\n\tif !ok {\n\t\treturn \"\"\n\t}\n\tnow := time.Now()\n\tif now.Sub(entry.time) > mountpointTimeout {\n\t\t\/\/ Don't bother deleting the entry, since the caller should\n\t\t\/\/ just overwrite it.\n\t\treturn \"\"\n\t}\n\treturn entry.mountpoint\n}\n\nfunc (r *root) getMountedVolumes() ([]gomounts.Volume, error) {\n\tr.getMountsLock.Lock()\n\tdefer r.getMountsLock.Unlock()\n\treturn gomounts.GetMountedVolumes()\n}\n\n\/\/ mountpointMatchesRunmode returns true if `mp` contains `runmode` at\n\/\/ the end of a component of the path, or followed by a space.\nfunc mountpointMatchesRunmode(mp, runmode string) bool {\n\ti := strings.Index(mp, runmode)\n\tif i < 0 {\n\t\treturn false\n\t}\n\tif len(mp) == i+len(runmode) || mp[i+len(runmode)] == '\/' ||\n\t\tmp[i+len(runmode)] == ' ' {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (r *root) findKBFSMount(ctx context.Context) (\n\tmountpoint string, err error) {\n\t\/\/ Get the UID, and crash intentionally if it's not set, because\n\t\/\/ that means we're not compiled against the correct version of\n\t\/\/ bazil.org\/fuse.\n\tuid := ctx.Value(fs.CtxHeaderUIDKey).(uint32)\n\t\/\/ Don't let the root see anything here; we don't want a symlink\n\t\/\/ loop back to this mount.\n\tif uid == 0 {\n\t\treturn \"\", fuse.ENOENT\n\t}\n\n\tmountpoint = r.getCachedMountpoint(uid)\n\tif mountpoint != \"\" {\n\t\treturn mountpoint, nil\n\t}\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t\/\/ Cache the entry if we didn't hit an error.\n\t\tr.lock.Lock()\n\t\tdefer r.lock.Unlock()\n\t\tr.mountpointCache[uid] = cacheEntry{\n\t\t\tmountpoint: mountpoint,\n\t\t\ttime: time.Now(),\n\t\t}\n\t}()\n\n\tu, err := user.LookupId(strconv.FormatUint(uint64(uid), 10))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvols, err := r.getMountedVolumes()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfuseType := \"fuse\"\n\tif runtime.GOOS == \"darwin\" {\n\t\tfuseType = \"kbfuse\"\n\t}\n\tvar fuseMountPoints []string\n\tfor _, v := range vols {\n\t\tif v.Type != fuseType {\n\t\t\tcontinue\n\t\t}\n\t\tif v.Owner != u.Uid {\n\t\t\tcontinue\n\t\t}\n\t\tfuseMountPoints = append(fuseMountPoints, v.Path)\n\t}\n\n\tif len(fuseMountPoints) == 0 {\n\t\treturn \"\", fuse.ENOENT\n\t}\n\n\t\/\/ Pick the first one alphabetically that has \"keybase\" in the\n\t\/\/ path.\n\tsort.Strings(fuseMountPoints)\n\tfor _, mp := range fuseMountPoints {\n\t\t\/\/ Find mountpoints like \"\/home\/user\/.local\/share\/keybase\/fs\",\n\t\t\/\/ or \"\/Volumes\/Keybase (user)\", and make sure it doesn't\n\t\t\/\/ match mounts for another run mode, say\n\t\t\/\/ \"\/Volumes\/KeybaseStaging (user)\".\n\t\tif mountpointMatchesRunmode(mp, r.runmodeStr) ||\n\t\t\tmountpointMatchesRunmode(mp, r.runmodeStrFancy) {\n\t\t\treturn mp, nil\n\t\t}\n\t}\n\n\t\/\/ Give up.\n\treturn \"\", fuse.ENOENT\n}\n\nfunc (r *root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {\n\t_, err := r.findKBFSMount(ctx)\n\tif err != nil {\n\t\tif err == fuse.ENOENT {\n\t\t\t\/\/ Put a symlink in the directory for someone who's not\n\t\t\t\/\/ logged in, so that the directory is non-empty and\n\t\t\t\/\/ future redirector calls as root won't try to mount over\n\t\t\t\/\/ us.\n\t\t\treturn []fuse.Dirent{\n\t\t\t\t{\n\t\t\t\t\tType: fuse.DT_Link,\n\t\t\t\t\tName: notRunningName,\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t\treturn []fuse.Dirent{}, err\n\t}\n\n\t\/\/ TODO: show the `kbfs.error.txt\" and \"kbfs.nologin.txt\" files if\n\t\/\/ they exist? As root, it is hard to figure out if they're\n\t\/\/ there, though.\n\treturn []fuse.Dirent{\n\t\t{\n\t\t\tType: fuse.DT_Link,\n\t\t\tName: \"private\",\n\t\t},\n\t\t{\n\t\t\tType: fuse.DT_Link,\n\t\t\tName: \"public\",\n\t\t},\n\t\tfuse.Dirent{\n\t\t\tType: fuse.DT_Link,\n\t\t\tName: \"team\",\n\t\t},\n\t}, nil\n}\n\nfunc (r *root) Lookup(\n\tctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (\n\tn fs.Node, err error) {\n\tmountpoint, err := r.findKBFSMount(ctx)\n\tif err != nil {\n\t\tif req.Name == notRunningName {\n\t\t\treturn symlink{\"\/dev\/null\"}, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tresp.EntryValid = 0\n\tswitch req.Name {\n\tcase \"private\", \"public\", \"team\", \".kbfs_error\", \".kbfs_metrics\",\n\t\t\".kbfs_profiles\", \".kbfs_reset_caches\", \".kbfs_status\",\n\t\t\"kbfs.error.txt\", \"kbfs.nologin.txt\", \".kbfs_enable_auto_journals\",\n\t\t\".kbfs_disable_auto_journals\", \".kbfs_enable_block_prefetching\",\n\t\t\".kbfs_disable_block_prefetching\", \".kbfs_enable_debug_server\",\n\t\t\".kbfs_disable_debug_server\", \".kbfs_edit_history\":\n\t\treturn symlink{filepath.Join(mountpoint, req.Name)}, nil\n\t}\n\treturn nil, fuse.ENOENT\n}\n\nfunc main() {\n\tif len(os.Args) != 2 {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s <mountpoint>\\n\", os.Args[0])\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Restrict the mountpoint to paths starting with \"\/keybase\".\n\t\/\/ Since this is a suid binary, it is dangerous to allow arbitrary\n\t\/\/ mountpoints. TODO: Read a redirector mountpoint from a\n\t\/\/ root-owned config file.\n\tr := newRoot()\n\tif os.Args[1] != fmt.Sprintf(\"\/%s\", r.runmodeStr) {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: The redirector may only mount at \"+\n\t\t\t\"\/%s; %s is an invalid mountpoint\\n\", r.runmodeStr, os.Args[1])\n\t\tos.Exit(1)\n\t}\n\n\tcurrUser, err := user.Current()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif currUser.Uid != \"0\" {\n\t\truntime.LockOSThread()\n\t\t_, _, errNo := syscall.Syscall(syscall.SYS_SETUID, 0, 0, 0)\n\t\tif errNo != 0 {\n\t\t\tfmt.Fprintf(os.Stderr, \"Can't setuid: %+v\\n\", errNo)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\toptions := []fuse.MountOption{fuse.AllowOther()}\n\toptions = append(options, fuse.FSName(\"keybase-redirector\"))\n\toptions = append(options, fuse.ReadOnly())\n\tif runtime.GOOS == \"darwin\" {\n\t\toptions = append(options, fuse.OSXFUSELocations(kbfusePath))\n\t\toptions = append(options, fuse.VolumeName(\"keybase\"))\n\t\toptions = append(options, fuse.NoBrowse())\n\t\t\/\/ Without NoLocalCaches(), OSX will cache symlinks for a long time.\n\t\toptions = append(options, fuse.NoLocalCaches())\n\t}\n\n\tc, err := fuse.Mount(os.Args[1], options...)\n\tif err != nil {\n\t\tfmt.Printf(\"Mount error, exiting cleanly: %+v\\n\", err)\n\t\tos.Exit(0)\n\t}\n\n\tinterruptChan := make(chan os.Signal, 1)\n\tsignal.Notify(interruptChan, os.Interrupt)\n\tsignal.Notify(interruptChan, syscall.SIGTERM)\n\tgo func() {\n\t\t_ = <-interruptChan\n\n\t\t\/\/ This might be a different system thread than above, so we\n\t\t\/\/ might need to setuid again.\n\t\truntime.LockOSThread()\n\t\t_, _, errNo := syscall.Syscall(syscall.SYS_SETUID, 0, 0, 0)\n\t\tif errNo != 0 {\n\t\t\tfmt.Fprintf(os.Stderr, \"Can't setuid: %+v\\n\", errNo)\n\t\t\tos.Exit(1)\n\t\t}\n\t\terr := fuse.Unmount(os.Args[1])\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Couldn't unmount cleanly: %+v\", err)\n\t\t}\n\t}()\n\n\tsrv := fs.New(c, &fs.Config{\n\t\tWithContext: func(ctx context.Context, _ fuse.Request) context.Context {\n\t\t\treturn context.Background()\n\t\t},\n\t})\n\tsrv.Serve(r)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/google\/fleetspeak\/fleetspeak\/src\/e2etesting\/balancer\/proxyproto\"\n)\n\nvar (\n\tserversFile = flag.String(\"servers_file\", \"\", \"File with server hosts\")\n\tserverFrontendAddr = flag.String(\"frontend_address\", \"\", \"Frontend address for clients to connect\")\n)\n\nfunc copy(wc io.WriteCloser, r io.Reader) {\n\tdefer wc.Close()\n\tio.Copy(wc, r)\n}\n\nfunc run() error {\n\tdat, err := ioutil.ReadFile(*serversFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to read serversFile: %v\", err)\n\t}\n\tserverHosts := strings.Fields(string(dat))\n\tif len(serverHosts) == 0 {\n\t\treturn fmt.Errorf(\"No server hosts were provided\")\n\t}\n\n\tln, err := net.Listen(\"tcp\", *serverFrontendAddr)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to bind: %v\", err)\n\t}\n\tlog.Printf(\"Load balancer started on %v\\n\", *serverFrontendAddr)\n\n\tfor {\n\t\tlbConn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to accept connection: %v\", err)\n\t\t}\n\t\tvar serverAddr string\n\t\tvar serverConn net.Conn\n\t\tretriesLeft := 10\n\t\tfor {\n\t\t\tserverAddr = serverHosts[rand.Int()%len(serverHosts)]\n\t\t\tserverConn, err = net.Dial(\"tcp\", serverAddr)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Failed to connect to server (%v): %v, retrying...\\n\", serverAddr, err)\n\t\t\t\tretriesLeft--\n\t\t\t\tif retriesLeft < 0 {\n\t\t\t\t\treturn fmt.Errorf(\"Maximum number of retries exceeded - no active servers were found\")\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Second * 2)\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tlog.Printf(\"Connection accepted, server: %v\\n\", serverAddr)\n\t\terr = proxyproto.WriteFirstProxyMessage(serverConn, lbConn.RemoteAddr().String(), serverAddr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgo copy(serverConn, lbConn)\n\t\tgo copy(lbConn, serverConn)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\terr := run()\n\tif err != nil {\n\t\tfmt.Printf(\"Load balancer failed: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>In e2e testing load balancer, make usage of proxyproto optional. (#309)<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/google\/fleetspeak\/fleetspeak\/src\/e2etesting\/balancer\/proxyproto\"\n)\n\nvar (\n\tserversFile = flag.String(\"servers_file\", \"\", \"File with server hosts\")\n\tserverFrontendAddr = flag.String(\"frontend_address\", \"\", \"Frontend address for clients to connect\")\n\tuseProxyProto = flag.Bool(\"use_proxy_proto\", true, \"Whether to forward client information using proxy proto\")\n)\n\nfunc copy(wc io.WriteCloser, r io.Reader) {\n\tdefer wc.Close()\n\tio.Copy(wc, r)\n}\n\nfunc run() error {\n\tdat, err := ioutil.ReadFile(*serversFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to read serversFile: %v\", err)\n\t}\n\tserverHosts := strings.Fields(string(dat))\n\tif len(serverHosts) == 0 {\n\t\treturn fmt.Errorf(\"No server hosts were provided\")\n\t}\n\n\tln, err := net.Listen(\"tcp\", *serverFrontendAddr)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to bind: %v\", err)\n\t}\n\tlog.Printf(\"Load balancer started on %v\\n\", *serverFrontendAddr)\n\n\tfor {\n\t\tlbConn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to accept connection: %v\", err)\n\t\t}\n\t\tvar serverAddr string\n\t\tvar serverConn net.Conn\n\t\tretriesLeft := 10\n\t\tfor {\n\t\t\tserverAddr = serverHosts[rand.Int()%len(serverHosts)]\n\t\t\tserverConn, err = net.Dial(\"tcp\", serverAddr)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Failed to connect to server (%v): %v, retrying...\\n\", serverAddr, err)\n\t\t\t\tretriesLeft--\n\t\t\t\tif retriesLeft < 0 {\n\t\t\t\t\treturn fmt.Errorf(\"Maximum number of retries exceeded - no active servers were found\")\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Second * 2)\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tlog.Printf(\"Connection accepted, server: %v\\n\", serverAddr)\n\t\tif *useProxyProto {\n\t\t\terr = proxyproto.WriteFirstProxyMessage(serverConn, lbConn.RemoteAddr().String(), serverAddr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tgo copy(serverConn, lbConn)\n\t\tgo copy(lbConn, serverConn)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\terr := run()\n\tif err != nil {\n\t\tfmt.Printf(\"Load balancer failed: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package respond\r\n\r\nimport (\r\n\t\"sync\"\r\n\t\"net\/http\"\r\n\t\"log\"\r\n\t\"fmt\"\r\n\t\"crypto\/md5\"\r\n)\r\n\r\nvar (\r\n\tmutex sync.RWMutex\r\n\toptions map[*http.Request]*Options\r\n\tresponded map[*http.Request]bool\r\n)\r\n\r\nfunc init() {\r\n\toptions = make(map[*http.Request]*Options)\r\n\tresponded = make(map[*http.Request]bool)\r\n}\r\n\r\ntype Options struct {\r\n\tAllowMultiple bool\r\n\r\n\tOnError func(error)\r\n\r\n\tEncoder Encoder\r\n\r\n\tBefore func(w http.ResponseWriter, r *http.Request, status int, data interface{}) (int, interface{})\r\n\r\n\tStatusFunc func(w http.ResponseWriter, r *http.Request, status int) interface{}\r\n}\r\n\r\n\/\/ Handler wraps an HTTP handler becoming the source of options for all\r\n\/\/ containing With calls.\r\nfunc (opts *Options) Handler(handler http.Handler) http.Handler {\r\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\r\n\t\tmutex.Lock()\r\n\t\toptions[r] = opts\r\n\t\tmutex.Unlock()\r\n\r\n\t\tdefer func() {\r\n\t\t\tmutex.Lock()\r\n\t\t\tdelete(options, r)\r\n\t\t\tdelete(responded, r)\r\n\t\t\tmutex.Unlock()\r\n\t\t}()\r\n\r\n\t\thandler.ServeHTTP(w, r)\r\n\t})\r\n}\r\n\r\nfunc (opts *Options) log(err error) {\r\n\tif opts.OnError == nil {\r\n\t\tlog.Println(\"respond: \" + err.Error())\r\n\t}\r\n\r\n\topts.OnError(err)\r\n}\r\n\r\nfunc about(r *http.Request) (opts *Options, multiple bool) {\r\n\tmutex.RLock()\r\n\topts = options[r]\r\n\tmultiple = responded[r]\r\n\tmutex.RUnlock()\r\n\r\n\treturn opts, multiple\r\n}\r\n\r\nfunc with(w http.ResponseWriter, r *http.Request, status int, data interface{}, opts *Options, multiple bool) {\r\n\tif opts != nil && multiple && !opts.AllowMultiple {\r\n\t\tpanic(\"respond: multiple responses\")\r\n\t}\r\n\r\n\tencoder := JSON\r\n\tif opts != nil {\r\n\t\tif opts.Before != nil {\r\n\t\t\tstatus, data = opts.Before(w, r, status, data)\r\n\t\t}\r\n\r\n\t\tif opts.Encoder != nil {\r\n\t\t\tencoder = opts.Encoder\r\n\t\t}\r\n\t}\r\n\r\n\tout, err := encoder.Encode(data)\r\n\tif err != nil {\r\n\t\topts.log(err)\r\n\t\treturn\r\n\t}\r\n\r\n\tif status >= 200 && status < 300 {\r\n\t\tetag := fmt.Sprintf(\"\\\"%x\\\"\", md5.Sum(out))\r\n\r\n\t\tw.Header().Set(\"Etag\", etag)\r\n\r\n\t\tif match := r.Header.Get(\"if-none-match\"); match == etag {\r\n\t\t\tw.WriteHeader(http.StatusNotModified)\r\n\t\t\treturn\r\n\t\t}\r\n\t}\r\n\r\n\tw.Header().Set(\"Content-Type\", encoder.ContentType())\r\n\tw.WriteHeader(status)\r\n\tw.Write(out)\r\n}\r\n\r\nfunc With(w http.ResponseWriter, r *http.Request, status int, data interface{}) {\r\n\topts, multiple := about(r)\r\n\r\n\twith(w, r, status, data, opts, multiple)\r\n}\r\n\r\nfunc WithStatus(w http.ResponseWriter, r *http.Request, status int) {\r\n\topts, multiple := about(r)\r\n\r\n\tvar data interface{}\r\n\tif opts != nil && opts.StatusFunc != nil {\r\n\t\tdata = opts.StatusFunc(w, r, status)\r\n\t} else {\r\n\t\tconst (\r\n\t\t\tfieldStatus = \"status\"\r\n\t\t\tfieldCode = \"code\"\r\n\t\t)\r\n\t\tdata = map[string]interface{}{fieldStatus: http.StatusText(status), fieldCode: status}\r\n\t}\r\n\r\n\twith(w, r, status, data, opts, multiple)\r\n}\r\n<commit_msg>readers and custom headers<commit_after>package respond\r\n\r\nimport (\r\n\t\"sync\"\r\n\t\"net\/http\"\r\n\t\"log\"\r\n\t\"fmt\"\r\n\t\"crypto\/md5\"\r\n\t\"io\"\r\n\t\"bytes\"\r\n)\r\n\r\nvar (\r\n\tmutex sync.RWMutex\r\n\toptions map[*http.Request]*Options\r\n\tresponded map[*http.Request]bool\r\n)\r\n\r\nfunc init() {\r\n\toptions = make(map[*http.Request]*Options)\r\n\tresponded = make(map[*http.Request]bool)\r\n}\r\n\r\ntype ReadHasher interface {\r\n\tio.Reader\r\n\tHash() []byte\r\n}\r\n\r\ntype rhImpl struct {\r\n\tio.Reader\r\n\thash []byte\r\n}\r\n\r\nfunc (rh *rhImpl) Hash() []byte {\r\n\treturn rh.hash\r\n}\r\n\r\ntype Options struct {\r\n\tAllowMultiple bool\r\n\r\n\tOnError func(error)\r\n\r\n\tEncoder Encoder\r\n\r\n\tBefore func(w http.ResponseWriter, r *http.Request, status int, data interface{}) (int, interface{})\r\n\r\n\tStatusFunc func(w http.ResponseWriter, r *http.Request, status int) interface{}\r\n}\r\n\r\n\/\/ Handler wraps an HTTP handler becoming the source of options for all\r\n\/\/ containing With calls.\r\nfunc (opts *Options) Handler(handler http.Handler) http.Handler {\r\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\r\n\t\tmutex.Lock()\r\n\t\toptions[r] = opts\r\n\t\tmutex.Unlock()\r\n\r\n\t\tdefer func() {\r\n\t\t\tmutex.Lock()\r\n\t\t\tdelete(options, r)\r\n\t\t\tdelete(responded, r)\r\n\t\t\tmutex.Unlock()\r\n\t\t}()\r\n\r\n\t\thandler.ServeHTTP(w, r)\r\n\t})\r\n}\r\n\r\nfunc (opts *Options) log(err error) {\r\n\tif opts.OnError == nil {\r\n\t\tlog.Println(\"respond: \" + err.Error())\r\n\t}\r\n\r\n\topts.OnError(err)\r\n}\r\n\r\nfunc about(r *http.Request) (opts *Options, multiple bool) {\r\n\tmutex.RLock()\r\n\topts = options[r]\r\n\tmultiple = responded[r]\r\n\tmutex.RUnlock()\r\n\r\n\treturn opts, multiple\r\n}\r\n\r\nfunc with(w http.ResponseWriter, r *http.Request, status int, in io.Reader, headers []string, opts *Options, multiple bool) {\r\n\tif opts != nil && multiple && !opts.AllowMultiple {\r\n\t\tpanic(\"respond: multiple responses\")\r\n\t}\r\n\r\n\tif hasher, ok := in.(ReadHasher); ok {\r\n\t\tif status >= 200 && status < 300 {\r\n\t\t\tetag := fmt.Sprintf(\"\\\"%s\\\"\", hasher.Hash())\r\n\r\n\t\t\tw.Header().Set(\"Etag\", etag)\r\n\r\n\t\t\tif match := r.Header.Get(\"if-none-match\"); match == etag {\r\n\t\t\t\tw.WriteHeader(http.StatusNotModified)\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tfor i := 0; i < len(headers)\/2; i++ {\r\n\t\tw.Header().Set(headers[i*2], headers[i*2 + 1])\r\n\t}\r\n\r\n\tw.WriteHeader(status)\r\n\tio.Copy(w, in)\r\n}\r\n\r\nfunc With(w http.ResponseWriter, r *http.Request, status int, data interface{}, headers ...string) {\r\n\topts, multiple := about(r)\r\n\r\n\tvar out []byte\r\n\tif bs, ok := data.([]byte); ok {\r\n\t\tout = bs\r\n\t} else {\r\n\t\tencoder := JSON\r\n\t\tif opts != nil {\r\n\t\t\tif opts.Before != nil {\r\n\t\t\t\tstatus, data = opts.Before(w, r, status, data)\r\n\t\t\t}\r\n\r\n\t\t\tif opts.Encoder != nil {\r\n\t\t\t\tencoder = opts.Encoder\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\tbs, err := encoder.Encode(data)\r\n\t\tif err != nil {\r\n\t\t\topts.log(err)\r\n\t\t\treturn\r\n\t\t}\r\n\r\n\t\theaders = append(headers, \"Content-Type\", encoder.ContentType())\r\n\t\tout = bs\r\n\t}\r\n\r\n\twith(w, r, status, &rhImpl{bytes.NewBuffer(out), md5.New().Sum(out)}, headers, opts, multiple)\r\n}\r\n\r\nfunc WithReader(w http.ResponseWriter, r *http.Request, status int, reader io.Reader, headers ...string) {\r\n\topts, multiple := about(r)\r\n\twith(w, r, status, reader, headers, opts, multiple)\r\n}\r\n\r\nfunc WithStatus(w http.ResponseWriter, r *http.Request, status int, headers ...string) {\r\n\tconst (\r\n\t\tfieldStatus = \"status\"\r\n\t\tfieldCode = \"code\"\r\n\t)\r\n\tWith(w, r, status, map[string]interface{}{fieldStatus: http.StatusText(status), fieldCode: status}, headers...)\r\n}\r\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright (c) 2014 Jeremy Saenz\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy of\n * this software and associated documentation files (the \"Software\"), to deal in\n * the Software without restriction, including without limitation the rights to\n * use, copy, modify, merge, publish, distribute, sublicense, and\/or sell copies of\n * the Software, and to permit persons to whom the Software is furnished to do so,\n * subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n *\/\n\npackage hador\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n)\n\n\/\/ ResponseWriter is a wrapper around http.ResponseWriter that provides extra information about\n\/\/ the response. It is recommended that middleware handlers use this construct to wrap a responsewriter\n\/\/ if the functionality calls for it.\ntype ResponseWriter interface {\n\thttp.ResponseWriter\n\thttp.Flusher\n\t\/\/ Status returns the status code of the response or 0 if the response has not been written.\n\tStatus() int\n\t\/\/ Written returns whether or not the ResponseWriter has been written.\n\tWritten() bool\n\t\/\/ Size returns the size of the response body.\n\tSize() int\n\t\/\/ Before allows for a function to be called before the ResponseWriter has been written to. This is\n\t\/\/ useful for setting headers or any other operations that must happen before a response has been written.\n\tBefore(func(ResponseWriter))\n}\n\ntype beforeFunc func(ResponseWriter)\n\n\/\/ NewResponseWriter creates a ResponseWriter that wraps an http.ResponseWriter\nfunc NewResponseWriter(rw http.ResponseWriter) ResponseWriter {\n\treturn &responseWriter{rw, 0, 0, nil}\n}\n\ntype responseWriter struct {\n\thttp.ResponseWriter\n\tstatus int\n\tsize int\n\tbeforeFuncs []beforeFunc\n}\n\nfunc (rw *responseWriter) reset(w http.ResponseWriter) {\n\trw.ResponseWriter = w\n\trw.status = 0\n\trw.size = 0\n\trw.beforeFuncs = nil\n}\n\nfunc (rw *responseWriter) WriteHeader(s int) {\n\trw.callBefore()\n\trw.ResponseWriter.WriteHeader(s)\n\trw.status = s\n}\n\nfunc (rw *responseWriter) Write(b []byte) (int, error) {\n\tif !rw.Written() {\n\t\t\/\/ The status will be StatusOK if WriteHeader has not been called yet\n\t\trw.WriteHeader(http.StatusOK)\n\t}\n\tsize, err := rw.ResponseWriter.Write(b)\n\trw.size += size\n\treturn size, err\n}\n\nfunc (rw *responseWriter) Status() int {\n\treturn rw.status\n}\n\nfunc (rw *responseWriter) Size() int {\n\treturn rw.size\n}\n\nfunc (rw *responseWriter) Written() bool {\n\treturn rw.status != 0\n}\n\nfunc (rw *responseWriter) Before(before func(ResponseWriter)) {\n\trw.beforeFuncs = append(rw.beforeFuncs, before)\n}\n\nfunc (rw *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\thijacker, ok := rw.ResponseWriter.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, nil, fmt.Errorf(\"the ResponseWriter doesn't support the Hijacker interface\")\n\t}\n\treturn hijacker.Hijack()\n}\n\nfunc (rw *responseWriter) CloseNotify() <-chan bool {\n\treturn rw.ResponseWriter.(http.CloseNotifier).CloseNotify()\n}\n\nfunc (rw *responseWriter) callBefore() {\n\tfor i := len(rw.beforeFuncs) - 1; i >= 0; i-- {\n\t\trw.beforeFuncs[i](rw)\n\t}\n}\n\nfunc (rw *responseWriter) Flush() {\n\tflusher, ok := rw.ResponseWriter.(http.Flusher)\n\tif ok {\n\t\tflusher.Flush()\n\t}\n}\n<commit_msg>Added WriteString method.<commit_after>\/*\n * Copyright (c) 2014 Jeremy Saenz\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy of\n * this software and associated documentation files (the \"Software\"), to deal in\n * the Software without restriction, including without limitation the rights to\n * use, copy, modify, merge, publish, distribute, sublicense, and\/or sell copies of\n * the Software, and to permit persons to whom the Software is furnished to do so,\n * subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n *\/\n\npackage hador\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n)\n\n\/\/ ResponseWriter is a wrapper around http.ResponseWriter that provides extra information about\n\/\/ the response. It is recommended that middleware handlers use this construct to wrap a responsewriter\n\/\/ if the functionality calls for it.\ntype ResponseWriter interface {\n\thttp.ResponseWriter\n\thttp.Flusher\n\t\/\/ Status returns the status code of the response or 0 if the response has not been written.\n\tStatus() int\n\n\tWriteString(string) (int, error)\n\n\t\/\/ Written returns whether or not the ResponseWriter has been written.\n\tWritten() bool\n\t\/\/ Size returns the size of the response body.\n\tSize() int\n\t\/\/ Before allows for a function to be called before the ResponseWriter has been written to. This is\n\t\/\/ useful for setting headers or any other operations that must happen before a response has been written.\n\tBefore(func(ResponseWriter))\n}\n\ntype beforeFunc func(ResponseWriter)\n\n\/\/ NewResponseWriter creates a ResponseWriter that wraps an http.ResponseWriter\nfunc NewResponseWriter(rw http.ResponseWriter) ResponseWriter {\n\treturn &responseWriter{rw, 0, 0, nil}\n}\n\ntype responseWriter struct {\n\thttp.ResponseWriter\n\tstatus int\n\tsize int\n\tbeforeFuncs []beforeFunc\n}\n\nfunc (rw *responseWriter) reset(w http.ResponseWriter) {\n\trw.ResponseWriter = w\n\trw.status = 0\n\trw.size = 0\n\trw.beforeFuncs = nil\n}\n\nfunc (rw *responseWriter) WriteHeader(s int) {\n\trw.callBefore()\n\trw.ResponseWriter.WriteHeader(s)\n\trw.status = s\n}\n\nfunc (rw *responseWriter) Write(b []byte) (int, error) {\n\tif !rw.Written() {\n\t\t\/\/ The status will be StatusOK if WriteHeader has not been called yet\n\t\trw.WriteHeader(http.StatusOK)\n\t}\n\tsize, err := rw.ResponseWriter.Write(b)\n\trw.size += size\n\treturn size, err\n}\n\nfunc (rw *responseWriter) WriteString(s string) (int, error) {\n\tif !rw.Written() {\n\t\t\/\/ The status will be StatusOK if WriteHeader has not been called yet\n\t\trw.WriteHeader(http.StatusOK)\n\t}\n\tsize, err := io.WriteString(rw.ResponseWriter, s)\n\trw.size += size\n\treturn size, err\n}\n\nfunc (rw *responseWriter) Status() int {\n\treturn rw.status\n}\n\nfunc (rw *responseWriter) Size() int {\n\treturn rw.size\n}\n\nfunc (rw *responseWriter) Written() bool {\n\treturn rw.status != 0\n}\n\nfunc (rw *responseWriter) Before(before func(ResponseWriter)) {\n\trw.beforeFuncs = append(rw.beforeFuncs, before)\n}\n\nfunc (rw *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\thijacker, ok := rw.ResponseWriter.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, nil, fmt.Errorf(\"the ResponseWriter doesn't support the Hijacker interface\")\n\t}\n\treturn hijacker.Hijack()\n}\n\nfunc (rw *responseWriter) CloseNotify() <-chan bool {\n\treturn rw.ResponseWriter.(http.CloseNotifier).CloseNotify()\n}\n\nfunc (rw *responseWriter) callBefore() {\n\tfor i := len(rw.beforeFuncs) - 1; i >= 0; i-- {\n\t\trw.beforeFuncs[i](rw)\n\t}\n}\n\nfunc (rw *responseWriter) Flush() {\n\tflusher, ok := rw.ResponseWriter.(http.Flusher)\n\tif ok {\n\t\tflusher.Flush()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package rockredis\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/absolute8511\/ZanRedisDB\/common\"\n\t\"github.com\/absolute8511\/gorocksdb\"\n)\n\nvar (\n\terrExpMetaKey = errors.New(\"invalid expire meta key\")\n\terrExpTimeKey = errors.New(\"invalid expire time key\")\n)\n\nconst (\n\tlogTimeFormatStr = \"2006-01-02 15:04:05\"\n)\n\nvar errExpType = errors.New(\"invalid expire type\")\n\n\/*\nthe coded format of expire time key:\nbytes: -0-|-1-2-3-4-5-6-7-8-|----9---|-10-11--------x-|\ndata : 103| when |dataType| key |\n*\/\nfunc expEncodeTimeKey(dataType byte, key []byte, when int64) []byte {\n\tbuf := make([]byte, len(key)+1+8+1)\n\n\tpos := 0\n\tbuf[pos] = ExpTimeType\n\tpos++\n\n\tbinary.BigEndian.PutUint64(buf[pos:], uint64(when))\n\tpos += 8\n\n\tbuf[pos] = dataType\n\tpos++\n\n\tcopy(buf[pos:], key)\n\n\treturn buf\n}\n\n\/*\nthe coded format of expire meta key:\nbytes: -0-|----1-----|-2-3----------x--|\ndata : 102| dataType | key |\n*\/\nfunc expEncodeMetaKey(dataType byte, key []byte) []byte {\n\tbuf := make([]byte, len(key)+2)\n\n\tpos := 0\n\n\tbuf[pos] = ExpMetaType\n\tpos++\n\tbuf[pos] = dataType\n\tpos++\n\n\tcopy(buf[pos:], key)\n\n\treturn buf\n}\n\n\/\/decode the expire 'meta key', the return values are: dataType, key, error\nfunc expDecodeMetaKey(mk []byte) (byte, []byte, error) {\n\tpos := 0\n\n\tif pos+2 > len(mk) || mk[pos] != ExpMetaType {\n\t\treturn 0, nil, errExpMetaKey\n\t}\n\n\treturn mk[pos+1], mk[pos+2:], nil\n}\n\n\/\/decode the expire 'time key', the return values are: dataType, key, whenToExpire, error\nfunc expDecodeTimeKey(tk []byte) (byte, []byte, int64, error) {\n\tpos := 0\n\tif pos+10 > len(tk) || tk[pos] != ExpTimeType {\n\t\treturn 0, nil, 0, errExpTimeKey\n\t}\n\n\treturn tk[pos+9], tk[pos+10:], int64(binary.BigEndian.Uint64(tk[pos+1:])), nil\n}\n\ntype expiredMeta struct {\n\ttimeKey []byte\n\tmetaKey []byte\n\tUTC int64\n}\n\ntype expiredMetaBuffer interface {\n\tWrite(*expiredMeta) error\n}\n\ntype expiration interface {\n\trawExpireAt(byte, []byte, int64, *gorocksdb.WriteBatch) error\n\texpireAt(byte, []byte, int64) error\n\tttl(byte, []byte) (int64, error)\n\tdelExpire(byte, []byte, *gorocksdb.WriteBatch) error\n\tcheck(common.ExpiredDataBuffer, chan struct{}) error\n\tStart()\n\tStop()\n}\n\nfunc (db *RockDB) expire(dataType byte, key []byte, duration int64) error {\n\treturn db.expiration.expireAt(dataType, key, time.Now().Unix()+duration)\n}\n\nfunc (db *RockDB) KVTtl(key []byte) (t int64, err error) {\n\treturn db.ttl(KVType, key)\n}\n\nfunc (db *RockDB) HashTtl(key []byte) (t int64, err error) {\n\treturn db.ttl(HashType, key)\n}\n\nfunc (db *RockDB) ListTtl(key []byte) (t int64, err error) {\n\treturn db.ttl(ListType, key)\n}\n\nfunc (db *RockDB) SetTtl(key []byte) (t int64, err error) {\n\treturn db.ttl(SetType, key)\n}\n\nfunc (db *RockDB) ZSetTtl(key []byte) (t int64, err error) {\n\treturn db.ttl(ZSetType, key)\n}\n\ntype TTLChecker struct {\n\tsync.Mutex\n\n\tdb *RockDB\n\twatching int32\n\twg sync.WaitGroup\n\n\t\/\/next check time\n\tnc int64\n}\n\nfunc newTTLChecker(db *RockDB) *TTLChecker {\n\tc := &TTLChecker{\n\t\tdb: db,\n\t\tnc: time.Now().Unix(),\n\t}\n\treturn c\n}\n\nfunc (c *TTLChecker) setNextCheckTime(when int64, force bool) {\n\tc.Lock()\n\tif force {\n\t\tc.nc = when\n\t} else if c.nc > when {\n\t\tc.nc = when\n\t}\n\tc.Unlock()\n}\n\nfunc (c *TTLChecker) check(expiredBuf expiredMetaBuffer, stop chan struct{}) (err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tbuf := make([]byte, 4096)\n\t\t\tn := runtime.Stack(buf, false)\n\t\t\tbuf = buf[0:n]\n\t\t\tdbLog.Errorf(\"check ttl panic: %s:%v\", buf, e)\n\t\t}\n\t}()\n\n\tnow := time.Now().Unix()\n\n\tc.Lock()\n\tnc := c.nc\n\tc.Unlock()\n\n\tif now < nc {\n\t\treturn nil\n\t}\n\n\tnc = now + 3600\n\n\tminKey := expEncodeTimeKey(NoneType, nil, 0)\n\tmaxKey := expEncodeTimeKey(maxDataType, nil, nc)\n\n\tvar eCount int64\n\tvar scanned int64\n\tcheckStart := time.Now()\n\n\tit, err := NewDBRangeLimitIterator(c.db.eng, minKey, maxKey,\n\t\tcommon.RangeROpen, 0, -1, false)\n\tif err != nil {\n\t\tc.setNextCheckTime(now, false)\n\t\treturn err\n\t} else if it == nil {\n\t\tc.setNextCheckTime(nc, true)\n\t\treturn nil\n\t}\n\tdefer it.Close()\n\n\tfor ; it.Valid(); it.Next() {\n\t\tif scanned%100 == 0 {\n\t\t\tselect {\n\t\t\tcase <-stop:\n\t\t\t\tnc = now\n\t\t\t\tbreak\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t\ttk := it.Key()\n\t\tmk := it.Value()\n\n\t\tif tk == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tdt, k, nt, dErr := expDecodeTimeKey(tk)\n\t\tif dErr != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tscanned += 1\n\t\tif scanned == 1 {\n\t\t\t\/\/log the first scanned key\n\t\t\tdbLog.Infof(\"ttl check start at key:[%s] of type:%s whose expire time is: %s\", string(k),\n\t\t\t\tTypeName[dt], time.Unix(nt, 0).Format(logTimeFormatStr))\n\t\t}\n\n\t\tif nt > now {\n\t\t\t\/\/the next ttl check time is nt!\n\t\t\tnc = nt\n\t\t\tdbLog.Infof(\"ttl check end at key:[%s] of type:%s whose expire time is: %s\", string(k),\n\t\t\t\tTypeName[dt], time.Unix(nt, 0).Format(logTimeFormatStr))\n\t\t\tbreak\n\t\t}\n\n\t\teCount += 1\n\n\t\terr = expiredBuf.Write(&expiredMeta{timeKey: tk, metaKey: mk, UTC: nt})\n\t\tif err != nil {\n\t\t\tnc = now\n\t\t\tbreak\n\t\t}\n\t}\n\n\tc.setNextCheckTime(nc, true)\n\n\tcheckCost := time.Since(checkStart).Nanoseconds() \/ 1000\n\tdbLog.Infof(\"[%d\/%d] keys have expired during ttl checking, cost:%d us\", eCount, scanned, checkCost)\n\n\treturn err\n}\n<commit_msg>adjust log level<commit_after>package rockredis\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/absolute8511\/ZanRedisDB\/common\"\n\t\"github.com\/absolute8511\/gorocksdb\"\n)\n\nvar (\n\terrExpMetaKey = errors.New(\"invalid expire meta key\")\n\terrExpTimeKey = errors.New(\"invalid expire time key\")\n)\n\nconst (\n\tlogTimeFormatStr = \"2006-01-02 15:04:05\"\n)\n\nvar errExpType = errors.New(\"invalid expire type\")\n\n\/*\nthe coded format of expire time key:\nbytes: -0-|-1-2-3-4-5-6-7-8-|----9---|-10-11--------x-|\ndata : 103| when |dataType| key |\n*\/\nfunc expEncodeTimeKey(dataType byte, key []byte, when int64) []byte {\n\tbuf := make([]byte, len(key)+1+8+1)\n\n\tpos := 0\n\tbuf[pos] = ExpTimeType\n\tpos++\n\n\tbinary.BigEndian.PutUint64(buf[pos:], uint64(when))\n\tpos += 8\n\n\tbuf[pos] = dataType\n\tpos++\n\n\tcopy(buf[pos:], key)\n\n\treturn buf\n}\n\n\/*\nthe coded format of expire meta key:\nbytes: -0-|----1-----|-2-3----------x--|\ndata : 102| dataType | key |\n*\/\nfunc expEncodeMetaKey(dataType byte, key []byte) []byte {\n\tbuf := make([]byte, len(key)+2)\n\n\tpos := 0\n\n\tbuf[pos] = ExpMetaType\n\tpos++\n\tbuf[pos] = dataType\n\tpos++\n\n\tcopy(buf[pos:], key)\n\n\treturn buf\n}\n\n\/\/decode the expire 'meta key', the return values are: dataType, key, error\nfunc expDecodeMetaKey(mk []byte) (byte, []byte, error) {\n\tpos := 0\n\n\tif pos+2 > len(mk) || mk[pos] != ExpMetaType {\n\t\treturn 0, nil, errExpMetaKey\n\t}\n\n\treturn mk[pos+1], mk[pos+2:], nil\n}\n\n\/\/decode the expire 'time key', the return values are: dataType, key, whenToExpire, error\nfunc expDecodeTimeKey(tk []byte) (byte, []byte, int64, error) {\n\tpos := 0\n\tif pos+10 > len(tk) || tk[pos] != ExpTimeType {\n\t\treturn 0, nil, 0, errExpTimeKey\n\t}\n\n\treturn tk[pos+9], tk[pos+10:], int64(binary.BigEndian.Uint64(tk[pos+1:])), nil\n}\n\ntype expiredMeta struct {\n\ttimeKey []byte\n\tmetaKey []byte\n\tUTC int64\n}\n\ntype expiredMetaBuffer interface {\n\tWrite(*expiredMeta) error\n}\n\ntype expiration interface {\n\trawExpireAt(byte, []byte, int64, *gorocksdb.WriteBatch) error\n\texpireAt(byte, []byte, int64) error\n\tttl(byte, []byte) (int64, error)\n\tdelExpire(byte, []byte, *gorocksdb.WriteBatch) error\n\tcheck(common.ExpiredDataBuffer, chan struct{}) error\n\tStart()\n\tStop()\n}\n\nfunc (db *RockDB) expire(dataType byte, key []byte, duration int64) error {\n\treturn db.expiration.expireAt(dataType, key, time.Now().Unix()+duration)\n}\n\nfunc (db *RockDB) KVTtl(key []byte) (t int64, err error) {\n\treturn db.ttl(KVType, key)\n}\n\nfunc (db *RockDB) HashTtl(key []byte) (t int64, err error) {\n\treturn db.ttl(HashType, key)\n}\n\nfunc (db *RockDB) ListTtl(key []byte) (t int64, err error) {\n\treturn db.ttl(ListType, key)\n}\n\nfunc (db *RockDB) SetTtl(key []byte) (t int64, err error) {\n\treturn db.ttl(SetType, key)\n}\n\nfunc (db *RockDB) ZSetTtl(key []byte) (t int64, err error) {\n\treturn db.ttl(ZSetType, key)\n}\n\ntype TTLChecker struct {\n\tsync.Mutex\n\n\tdb *RockDB\n\twatching int32\n\twg sync.WaitGroup\n\n\t\/\/next check time\n\tnc int64\n}\n\nfunc newTTLChecker(db *RockDB) *TTLChecker {\n\tc := &TTLChecker{\n\t\tdb: db,\n\t\tnc: time.Now().Unix(),\n\t}\n\treturn c\n}\n\nfunc (c *TTLChecker) setNextCheckTime(when int64, force bool) {\n\tc.Lock()\n\tif force {\n\t\tc.nc = when\n\t} else if c.nc > when {\n\t\tc.nc = when\n\t}\n\tc.Unlock()\n}\n\nfunc (c *TTLChecker) check(expiredBuf expiredMetaBuffer, stop chan struct{}) (err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tbuf := make([]byte, 4096)\n\t\t\tn := runtime.Stack(buf, false)\n\t\t\tbuf = buf[0:n]\n\t\t\tdbLog.Errorf(\"check ttl panic: %s:%v\", buf, e)\n\t\t}\n\t}()\n\n\tnow := time.Now().Unix()\n\n\tc.Lock()\n\tnc := c.nc\n\tc.Unlock()\n\n\tif now < nc {\n\t\treturn nil\n\t}\n\n\tnc = now + 3600\n\n\tminKey := expEncodeTimeKey(NoneType, nil, 0)\n\tmaxKey := expEncodeTimeKey(maxDataType, nil, nc)\n\n\tvar eCount int64\n\tvar scanned int64\n\tcheckStart := time.Now()\n\n\tit, err := NewDBRangeLimitIterator(c.db.eng, minKey, maxKey,\n\t\tcommon.RangeROpen, 0, -1, false)\n\tif err != nil {\n\t\tc.setNextCheckTime(now, false)\n\t\treturn err\n\t} else if it == nil {\n\t\tc.setNextCheckTime(nc, true)\n\t\treturn nil\n\t}\n\tdefer it.Close()\n\n\tfor ; it.Valid(); it.Next() {\n\t\tif scanned%100 == 0 {\n\t\t\tselect {\n\t\t\tcase <-stop:\n\t\t\t\tnc = now\n\t\t\t\tbreak\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t\ttk := it.Key()\n\t\tmk := it.Value()\n\n\t\tif tk == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tdt, k, nt, dErr := expDecodeTimeKey(tk)\n\t\tif dErr != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tscanned += 1\n\t\tif scanned == 1 {\n\t\t\t\/\/log the first scanned key\n\t\t\tdbLog.Debugf(\"ttl check start at key:[%s] of type:%s whose expire time is: %s\", string(k),\n\t\t\t\tTypeName[dt], time.Unix(nt, 0).Format(logTimeFormatStr))\n\t\t}\n\n\t\tif nt > now {\n\t\t\t\/\/the next ttl check time is nt!\n\t\t\tnc = nt\n\t\t\tdbLog.Debugf(\"ttl check end at key:[%s] of type:%s whose expire time is: %s\", string(k),\n\t\t\t\tTypeName[dt], time.Unix(nt, 0).Format(logTimeFormatStr))\n\t\t\tbreak\n\t\t}\n\n\t\teCount += 1\n\n\t\terr = expiredBuf.Write(&expiredMeta{timeKey: tk, metaKey: mk, UTC: nt})\n\t\tif err != nil {\n\t\t\tnc = now\n\t\t\tbreak\n\t\t}\n\t}\n\n\tc.setNextCheckTime(nc, true)\n\n\tcheckCost := time.Since(checkStart).Nanoseconds() \/ 1000\n\tif dbLog.Level() >= common.LOG_DEBUG || eCount > 100 || checkCost >= time.Second.Nanoseconds()\/1000 {\n\t\tdbLog.Infof(\"[%d\/%d] keys have expired during ttl checking, cost:%d us\", eCount, scanned, checkCost)\n\t}\n\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package routing is a wrapper on naoina\/denco router.\n\/\/ It uses request.Form for params instead of a separate Params\n\/\/ argument. So, it requires a more memory and it is a bit slower.\n\/\/ However, the downsides are an acceptable trade off for compatability\n\/\/ with the standard library.\npackage routing\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/naoina\/denco\"\n)\n\n\/\/ Router represents a multiplexer for HTTP request.\ntype Router struct {\n\tdata *denco.Router\n}\n\n\/\/ info stores information about HTTP request's handler, its\n\/\/ pattern and methods.\ntype info struct {\n\thandler http.HandlerFunc \/\/ HTTP request handler.\n\tmethods []string \/\/ A list of allowed HTTP methods (e.g. \"GET\" or \"POST\").\n\tpattern string \/\/ Pattern is a routing path for handler.\n}\n\n\/\/ New allocates and returns a new multiplexer.\nfunc New() *Router {\n\treturn &Router{}\n}\n\n\/\/ HasMethod checks whether specific method request is allowed.\nfunc (t *info) HasMethod(name string) bool {\n\t\/\/ We are iterating through a slice of method strings\n\t\/\/ rather than using a map as there are only a few possible values.\n\t\/\/ So, hash function will require more time than a simple loop.\n\tfor _, v := range t.methods {\n\t\tif v == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ ServeHTTP is used to implement http.Handler interface.\n\/\/ It dispatches the request to the handler whose pattern\n\/\/ most closely matches the request URL.\nfunc (t *Router) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n}\n\n\/\/ Handle registers the handler for the given pattern.\n\/\/ If a handler already exists for pattern, it is being overridden.\nfunc (t *Router) Handle(method, pattern string, handler http.Handler) {\n}\n\n\/\/ HandleFunc registers the handler function for the given pattern.\nfunc (t *Router) HandleFunc(method, pattern string, handler http.HandlerFunc) {\n}\n\n\/\/ Handler returns the handler to use for the given request, consulting r.Method\n\/\/ and r.URL.Path. It always returns a non-nil handler. If there is no registered handler\n\/\/ that applies to the request, Handler returns a “page not found” handler and empty pattern.\n\/\/ If there is a registered handler but requested method is not allowed,\n\/\/ \"method not allowed\" and a pattern are returned.\nfunc (t *Router) Handler(r *http.Request) (handler http.Handler, pattern string) {\n\t\/\/ Make sure we have a handler for this request.\n\tobj, params, found := t.data.Lookup(r.URL.Path)\n\tif !found {\n\t\treturn http.HandlerFunc(NotFound), \"\"\n\t}\n\n\t\/\/ Check whether requested method is allowed.\n\tdata := obj.(info)\n\tif !data.HasMethod(r.Method) {\n\t\treturn http.HandlerFunc(MethodNotAllowed), data.pattern\n\t}\n\n\t\/\/ Add parameters of request to request.Form and return a handler.\n\tfor _, param := range params {\n\t\tr.Form.Add(param.Name, param.Value)\n\t}\n\treturn data.handler, data.pattern\n}\n\n\/\/ MethodNotAllowed replies to the request with an HTTP 405 method not allowed\n\/\/ error. If you want to use your own MethodNotAllowed handler, please override\n\/\/ this variable.\nvar MethodNotAllowed = func(w http.ResponseWriter, r *http.Request) {\n\thttp.Error(w, \"405 method not allowed\", http.StatusMethodNotAllowed)\n}\n\n\/\/ NotFound replies to the request with an HTTP 404 not found error.\n\/\/ NotFound is called when unknown HTTP method or a handler not found.\n\/\/ If you want to use the your own NotFound handler, please overwrite this variable.\nvar NotFound = func(w http.ResponseWriter, r *http.Request) {\n\thttp.NotFound(w, r)\n}\n<commit_msg>Implemented router using denco as a base<commit_after>\/\/ Package routing is a wrapper on naoina\/denco router.\n\/\/ It uses request.Form for params instead of a separate Params\n\/\/ argument. So, it requires a more memory and it is a bit slower.\n\/\/ However, the downsides are an acceptable trade off for compatability\n\/\/ with the standard library.\n\/\/\n\/\/ A sample of its usage is below:\n\/\/\n\/\/\tpackage main\n\/\/\n\/\/\timport (\n\/\/\t\t\"log\"\n\/\/\t\t\"net\/http\"\n\/\/\n\/\/\t\t\"github.com\/anonx\/sunplate\/routing\"\n\/\/\t)\n\/\/\n\/\/\tfunc main() {\n\/\/\t\tr := routing.New()\n\/\/\t\terr := r.Handle(routing.Routes{\n\/\/\t\t\tr.Get(\"\/profiles\/:username\", ShowUserHandleFunc),\n\/\/\t\t\tr.Delete(\"\/profiles\/:username\", DeleteUserHandleFunc),\n\/\/\t\t}).Build()\n\/\/\t\tif err != nil {\n\/\/\t\t\tpanic(err)\n\/\/\t\t}\n\/\/\t\tlog.Fatal(http.ListenAndServe(\":8080\", r))\n\/\/\t}\npackage routing\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/naoina\/denco\"\n)\n\n\/\/ Router represents a multiplexer for HTTP requests.\ntype Router struct {\n\tdata *denco.Router \/\/ data stores denco router.\n\tindexes map[string]int \/\/ indexes are used to check whether a record exists.\n\trecords []denco.Record \/\/ records is a list of handlers expected by denco router.\n}\n\n\/\/ Routes is an alias of []Route.\ntype Routes []Route\n\n\/\/ Route is used to store information about HTTP request's handler\n\/\/ including a list of allowed methods and pattern.\ntype Route struct {\n\thandler *http.HandlerFunc \/\/ HTTP request handler function.\n\tmethods []string \/\/ A list of allowed HTTP methods (e.g. \"GET\" or \"POST\").\n\tpattern string \/\/ Pattern is a routing path for handler.\n}\n\n\/\/ NewRouter allocates and returns a new multiplexer.\nfunc NewRouter() *Router {\n\treturn &Router{\n\t\tindexes: map[string]int{},\n\t}\n}\n\n\/\/ HasMethod checks whether specific method request is allowed.\nfunc (t *Route) HasMethod(name string) bool {\n\t\/\/ We are iterating through a slice of method strings\n\t\/\/ rather than using a map as there are only a few possible values.\n\t\/\/ So, hash function will require more time than a simple loop.\n\tfor _, v := range t.methods {\n\t\tif v == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ ServeHTTP is used to implement http.Handler interface.\n\/\/ It dispatches the request to the handler whose pattern\n\/\/ most closely matches the request URL.\nfunc (t *Router) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\th, _ := t.Handler(r)\n\th.ServeHTTP(w, r)\n}\n\n\/\/ Handle registers handlers for given patterns.\n\/\/ If a handler already exists for pattern, it will be overridden.\n\/\/ If it exists but with another method, a new method will be added.\nfunc (t *Router) Handle(routes Routes) {\n\tfor _, route := range routes {\n\t\t\/\/ Check whether we have already had such route.\n\t\tindex, ok := t.indexes[route.pattern]\n\t\tif ok {\n\t\t\t\/\/ If we haven't, add it.\n\t\t\tt.records = append(t.records, denco.NewRecord(route.pattern, route))\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Check whether existing route has the same handler and\n\t\t\/\/ we are just trying to add a new method.\n\t\tr := t.records[index].Value.(Route)\n\t\tif r.handler != route.handler {\n\t\t\t\/\/ If we aren't, override an old route.\n\t\t\tt.records[index] = denco.NewRecord(route.pattern, route)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Otherwise, add all methods that haven't added yet.\n\t\tfor _, v := range r.methods {\n\t\t\tif !route.HasMethod(v) {\n\t\t\t\troute.methods = append(route.methods, v)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Build compiles registered routes. Routes that are added after building will not\n\/\/ be handled. A new call to build will be required.\nfunc (t *Router) Build() error {\n\trouter := denco.New()\n\terr := router.Build(t.records)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Route allocates and returns a Route struct.\nfunc (t *Router) Route(method, pattern string, handler http.HandlerFunc) Route {\n\treturn Route{\n\t\thandler: &handler,\n\t\tmethods: []string{method},\n\t\tpattern: pattern,\n\t}\n}\n\n\/\/ Handler returns the handler to use for the given request, consulting r.Method\n\/\/ and r.URL.Path. It always returns a non-nil handler. If there is no registered handler\n\/\/ that applies to the request, Handler returns a “page not found” handler and empty pattern.\n\/\/ If there is a registered handler but requested method is not allowed,\n\/\/ \"method not allowed\" and a pattern are returned.\nfunc (t *Router) Handler(r *http.Request) (handler http.Handler, pattern string) {\n\t\/\/ Make sure we have a handler for this request.\n\tobj, params, found := t.data.Lookup(r.URL.Path)\n\tif !found {\n\t\treturn http.HandlerFunc(NotFound), \"\"\n\t}\n\n\t\/\/ Check whether requested method is allowed.\n\troute := obj.(Route)\n\tif !route.HasMethod(r.Method) {\n\t\treturn http.HandlerFunc(MethodNotAllowed), route.pattern\n\t}\n\n\t\/\/ Add parameters of request to request.Form and return a handler.\n\tfor _, param := range params {\n\t\tr.Form.Add(param.Name, param.Value)\n\t}\n\treturn route.handler, route.pattern\n}\n\n\/\/ MethodNotAllowed replies to the request with an HTTP 405 method not allowed\n\/\/ error. If you want to use your own MethodNotAllowed handler, please override\n\/\/ this variable.\nvar MethodNotAllowed = func(w http.ResponseWriter, r *http.Request) {\n\thttp.Error(w, \"405 method not allowed\", http.StatusMethodNotAllowed)\n}\n\n\/\/ NotFound replies to the request with an HTTP 404 not found error.\n\/\/ NotFound is called when unknown HTTP method or a handler not found.\n\/\/ If you want to use the your own NotFound handler, please overwrite this variable.\nvar NotFound = func(w http.ResponseWriter, r *http.Request) {\n\thttp.NotFound(w, r)\n}\n<|endoftext|>"} {"text":"<commit_before>package runtime\n\nimport (\n\t\"fmt\"\n\t\"github.com\/golang\/glog\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\nfunc SavePidFile(args ...string) (string, error) {\n\tcmd := filepath.Base(os.Args[0])\n\tpidFile, err := os.Create(fmt.Sprintf(\"%s-%s.pid\", cmd, strings.Join(args, \"-\")))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer pidFile.Close()\n\tfmt.Fprintf(pidFile, \"%d\", os.Getpid())\n\treturn pidFile.Name(), nil\n}\n\ntype ShutdownSequence []io.Closer\n\nfunc ShutdownHook(h func() error) closeWrapper {\n\treturn closeWrapper{run: h}\n}\n\ntype closeWrapper struct {\n\trun func() error\n}\n\nfunc (w closeWrapper) Close() error {\n\treturn w.run()\n}\n\n\/\/ Implements io.Closer\nfunc (s ShutdownSequence) Close() (err error) {\n\tfor _, cl := range s {\n\t\tif err1 := cl.Close(); err == nil && err1 != nil {\n\t\t\terr = err1\n\t\t}\n\t}\n\treturn\n}\n\nfunc exitf(pattern string, args ...interface{}) {\n\tif !strings.HasSuffix(pattern, \"\\n\") {\n\t\tpattern = pattern + \"\\n\"\n\t}\n\tfmt.Fprintf(os.Stderr, pattern, args...)\n\tos.Exit(1)\n}\n\nfunc HandleSignals(shutdownc <-chan io.Closer) {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, syscall.SIGHUP)\n\tsignal.Notify(c, syscall.SIGINT)\n\tfor {\n\t\tsig := <-c\n\t\tsysSig, ok := sig.(syscall.Signal)\n\t\tif !ok {\n\t\t\tglog.Fatal(\"Not a unix signal\")\n\t\t}\n\t\tswitch sysSig {\n\t\tcase syscall.SIGHUP:\n\t\tcase syscall.SIGINT:\n\t\t\tglog.Warningln(\"Got SIGTERM: shutting down\")\n\t\t\tdonec := make(chan bool)\n\t\t\tgo func() {\n\t\t\t\tcl := <-shutdownc\n\t\t\t\tif err := cl.Close(); err != nil {\n\t\t\t\t\texitf(\"Error shutting down: %v\", err)\n\t\t\t\t}\n\t\t\t\tdonec <- true\n\t\t\t}()\n\t\t\tselect {\n\t\t\tcase <-donec:\n\t\t\t\tglog.Infoln(\"Shut down completed.\")\n\t\t\t\tos.Exit(0)\n\t\t\tcase <-time.After(5 * time.Second):\n\t\t\t\texitf(\"Timeout shutting down. Exiting uncleanly.\")\n\t\t\t}\n\t\tdefault:\n\t\t\tglog.Fatal(\"Received another signal, should not happen.\")\n\t\t}\n\t}\n}\n\n\/\/ Runs the http server. This server offers more control than the standard go's default http server\n\/\/ in that when a 'true' is sent to the stop channel, the listener is closed to force a clean shutdown.\nfunc RunServer(server *http.Server, stop chan bool) (stopped chan bool) {\n\tprotocol := \"tcp\"\n\tif match, _ := regexp.MatchString(\"[a-zA-Z0-9\\\\.]*:[0-9]{4,}\", server.Addr); !match {\n\t\tprotocol = \"unix\"\n\t}\n\n\tlistener, err := net.Listen(protocol, server.Addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tstopped = make(chan bool)\n\n\tglog.Infoln(\"Starting\", protocol, \"listener at\", server.Addr)\n\n\t\/\/ This will be set to true if a shutdown signal is received. This allows us to detect\n\t\/\/ if the server stop is intentional or due to some error.\n\tfromSignal := false\n\n\t\/\/ The main goroutine where the server listens on the network connection\n\tgo func(fromSignal *bool) {\n\t\t\/\/ Serve will block until an error (e.g. from shutdown, closed connection) occurs.\n\t\terr := server.Serve(listener)\n\t\tif !*fromSignal {\n\t\t\tglog.Warningln(\"Warning: server stops due to error\", err)\n\t\t}\n\t\tstopped <- true\n\t}(&fromSignal)\n\n\t\/\/ Another goroutine that listens for signal to close the network connection\n\t\/\/ on shutdown. This will cause the server.Serve() to return.\n\tgo func(fromSignal *bool) {\n\t\tselect {\n\t\tcase <-stop:\n\t\t\tlistener.Close()\n\t\t\t*fromSignal = true \/\/ Intentially stopped from signal\n\t\t\treturn\n\t\t}\n\t}(&fromSignal)\n\treturn\n}\n<commit_msg>Adjust file permissions for unix domain sockets<commit_after>package runtime\n\nimport (\n\t\"fmt\"\n\t\"github.com\/golang\/glog\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\nfunc SavePidFile(args ...string) (string, error) {\n\tcmd := filepath.Base(os.Args[0])\n\tpidFile, err := os.Create(fmt.Sprintf(\"%s-%s.pid\", cmd, strings.Join(args, \"-\")))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer pidFile.Close()\n\tfmt.Fprintf(pidFile, \"%d\", os.Getpid())\n\treturn pidFile.Name(), nil\n}\n\ntype ShutdownSequence []io.Closer\n\nfunc ShutdownHook(h func() error) closeWrapper {\n\treturn closeWrapper{run: h}\n}\n\ntype closeWrapper struct {\n\trun func() error\n}\n\nfunc (w closeWrapper) Close() error {\n\treturn w.run()\n}\n\n\/\/ Implements io.Closer\nfunc (s ShutdownSequence) Close() (err error) {\n\tfor _, cl := range s {\n\t\tif err1 := cl.Close(); err == nil && err1 != nil {\n\t\t\terr = err1\n\t\t}\n\t}\n\treturn\n}\n\nfunc exitf(pattern string, args ...interface{}) {\n\tif !strings.HasSuffix(pattern, \"\\n\") {\n\t\tpattern = pattern + \"\\n\"\n\t}\n\tfmt.Fprintf(os.Stderr, pattern, args...)\n\tos.Exit(1)\n}\n\nfunc HandleSignals(shutdownc <-chan io.Closer) {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, syscall.SIGHUP)\n\tsignal.Notify(c, syscall.SIGINT)\n\tfor {\n\t\tsig := <-c\n\t\tsysSig, ok := sig.(syscall.Signal)\n\t\tif !ok {\n\t\t\tglog.Fatal(\"Not a unix signal\")\n\t\t}\n\t\tswitch sysSig {\n\t\tcase syscall.SIGHUP:\n\t\tcase syscall.SIGINT:\n\t\t\tglog.Warningln(\"Got SIGTERM: shutting down\")\n\t\t\tdonec := make(chan bool)\n\t\t\tgo func() {\n\t\t\t\tcl := <-shutdownc\n\t\t\t\tif err := cl.Close(); err != nil {\n\t\t\t\t\texitf(\"Error shutting down: %v\", err)\n\t\t\t\t}\n\t\t\t\tdonec <- true\n\t\t\t}()\n\t\t\tselect {\n\t\t\tcase <-donec:\n\t\t\t\tglog.Infoln(\"Shut down completed.\")\n\t\t\t\tos.Exit(0)\n\t\t\tcase <-time.After(5 * time.Second):\n\t\t\t\texitf(\"Timeout shutting down. Exiting uncleanly.\")\n\t\t\t}\n\t\tdefault:\n\t\t\tglog.Fatal(\"Received another signal, should not happen.\")\n\t\t}\n\t}\n}\n\nfunc updateDomainSocketPermissions(filename string) (err error) {\n\t_, err = os.Lstat(filename)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn os.Chmod(filename, 0777)\n}\n\n\/\/ Runs the http server. This server offers more control than the standard go's default http server\n\/\/ in that when a 'true' is sent to the stop channel, the listener is closed to force a clean shutdown.\nfunc RunServer(server *http.Server, stop chan bool) (stopped chan bool) {\n\tprotocol := \"tcp\"\n\tif match, _ := regexp.MatchString(\"[a-zA-Z0-9\\\\.]*:[0-9]{4,}\", server.Addr); !match {\n\t\tprotocol = \"unix\"\n\t}\n\n\tlistener, err := net.Listen(protocol, server.Addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tstopped = make(chan bool)\n\n\tglog.Infoln(\"Starting\", protocol, \"listener at\", server.Addr)\n\n\tif protocol == \"unix\" {\n\t\tupdateDomainSocketPermissions(server.Addr)\n\t}\n\n\t\/\/ This will be set to true if a shutdown signal is received. This allows us to detect\n\t\/\/ if the server stop is intentional or due to some error.\n\tfromSignal := false\n\n\t\/\/ The main goroutine where the server listens on the network connection\n\tgo func(fromSignal *bool) {\n\t\t\/\/ Serve will block until an error (e.g. from shutdown, closed connection) occurs.\n\t\terr := server.Serve(listener)\n\t\tif !*fromSignal {\n\t\t\tglog.Warningln(\"Warning: server stops due to error\", err)\n\t\t}\n\t\tstopped <- true\n\t}(&fromSignal)\n\n\t\/\/ Another goroutine that listens for signal to close the network connection\n\t\/\/ on shutdown. This will cause the server.Serve() to return.\n\tgo func(fromSignal *bool) {\n\t\tselect {\n\t\tcase <-stop:\n\t\t\tlistener.Close()\n\t\t\t*fromSignal = true \/\/ Intentially stopped from signal\n\t\t\treturn\n\t\t}\n\t}(&fromSignal)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package slack uses Norberto Lopes' slack library to implement the bot.Connector\n\/\/ interface.\npackage slack\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com\/lnxjedi\/gopherbot\/bot\"\n\t\"github.com\/nlopes\/slack\"\n)\n\ntype botDefinition struct {\n\tName, ID string \/\/ e.g. 'mygit', 'BAKDBISDO'\n}\n\ntype config struct {\n\tSlackToken string \/\/ the 'bot token for connecting to Slack\n\tMaxMessageSplit int \/\/ the maximum # of ~4000 byte messages to split a large message into\n\tBotRoster []botDefinition \/\/ roster mapping BXXX IDs to a defined username\n}\n\nvar lock sync.Mutex \/\/ package var lock\nvar started bool \/\/ set when connector is started\n\nfunc init() {\n\tbot.RegisterConnector(\"slack\", Initialize)\n}\n\n\/\/ Initialize starts the connection, sets up and returns the connector object\nfunc Initialize(robot bot.Handler, l *log.Logger) bot.Connector {\n\tlock.Lock()\n\tif started {\n\t\tlock.Unlock()\n\t\treturn nil\n\t}\n\tstarted = true\n\tlock.Unlock()\n\n\tvar c config\n\tvar tok string\n\n\terr := robot.GetProtocolConfig(&c)\n\tif err != nil {\n\t\trobot.Log(bot.Fatal, fmt.Errorf(\"Unable to retrieve protocol configuration: %v\", err))\n\t}\n\n\tif c.MaxMessageSplit == 0 {\n\t\tc.MaxMessageSplit = 1\n\t}\n\n\tif len(c.SlackToken) == 0 {\n\t\ttok = os.Getenv(\"SLACK_TOKEN\")\n\t\tif len(tok) == 0 {\n\t\t\trobot.Log(bot.Fatal, \"No slack token found in config or env var 'SLACK_TOKEN'\")\n\t\t}\n\t\tos.Unsetenv(\"SLACK_TOKEN\")\n\t\trobot.Log(bot.Debug, \"Using SLACK_TOKEN environment variable\")\n\t} else {\n\t\ttok = c.SlackToken\n\t}\n\n\tapi := slack.New(tok)\n\t\/\/ This spits out a lot of extra stuff, so we only enable it when tracing\n\tif robot.GetLogLevel() == bot.Trace {\n\t\tapi.SetDebug(true)\n\t}\n\tslack.SetLogger(l)\n\n\tsc := &slackConnector{\n\t\tapi: api,\n\t\tconn: api.NewRTM(),\n\t\tmaxMessageSplit: c.MaxMessageSplit,\n\t\tname: \"slack\",\n\t}\n\tsc.botUserID = make(map[string]string)\n\tsc.botIDUser = make(map[string]string)\n\tfor _, b := range c.BotRoster {\n\t\tif len(b.ID) == 0 || len(b.Name) == 0 {\n\t\t\trobot.Log(bot.Error, \"Zero-length Name or ID in BotRoster, skipping\")\n\t\t\tcontinue\n\t\t}\n\t\tsc.botUserID[b.Name] = b.ID\n\t\tsc.botIDUser[b.ID] = b.Name\n\t}\n\tgo sc.conn.ManageConnection()\n\n\tsc.Handler = robot\n\nLoop:\n\tfor {\n\t\tselect {\n\t\tcase msg := <-sc.conn.IncomingEvents:\n\n\t\t\tswitch ev := msg.Data.(type) {\n\n\t\t\tcase *slack.ConnectedEvent:\n\t\t\t\tsc.Log(bot.Debug, fmt.Sprintf(\"Infos: %T %v\\n\", ev, *ev.Info.User))\n\t\t\t\tsc.Log(bot.Debug, \"Connection counter:\", ev.ConnectionCount)\n\t\t\t\tsc.botName = ev.Info.User.Name\n\t\t\t\tsc.SetName(sc.botName)\n\t\t\t\tsc.Log(bot.Info, \"Slack setting bot name to\", sc.botName)\n\t\t\t\tsc.botID = ev.Info.User.ID\n\t\t\t\tsc.Log(bot.Trace, \"Set bot ID to\", sc.botID)\n\t\t\t\tsc.teamID = ev.Info.Team.ID\n\t\t\t\tsc.Log(bot.Info, \"Set team ID to\", sc.teamID)\n\t\t\t\tbreak Loop\n\n\t\t\tcase *slack.InvalidAuthEvent:\n\t\t\t\tsc.Log(bot.Fatal, \"Invalid credentials\")\n\t\t\t}\n\t\t}\n\t}\n\n\tsc.updateChannelMaps(\"\")\n\tsc.updateUserList(\"\")\n\tsc.botFullName, _ = sc.GetProtocolUserAttribute(sc.botName, \"realname\")\n\tsc.SetFullName(sc.botFullName)\n\tsc.Log(bot.Debug, \"Set bot full name to\", sc.botFullName)\n\tgo sc.startSendLoop()\n\n\treturn bot.Connector(sc)\n}\n\nfunc (sc *slackConnector) Run(stop <-chan struct{}) {\n\tsc.Lock()\n\t\/\/ This should never happen, just a bit of defensive coding\n\tif sc.running {\n\t\tsc.Unlock()\n\t\treturn\n\t}\n\tsc.running = true\n\tsc.Unlock()\nloop:\n\tfor {\n\t\tselect {\n\t\tcase <-stop:\n\t\t\tsc.Log(bot.Debug, \"Received stop in connector\")\n\t\t\tbreak loop\n\t\tcase msg := <-sc.conn.IncomingEvents:\n\t\t\tsc.Log(bot.Trace, fmt.Sprintf(\"Event Received (msg, data, type): %v; %v; %T\", msg, msg.Data, msg.Data))\n\t\t\tswitch ev := msg.Data.(type) {\n\t\t\tcase *slack.HelloEvent:\n\t\t\t\t\/\/ Ignore hello\n\t\t\tcase *slack.ChannelArchiveEvent, *slack.ChannelCreatedEvent, *slack.ChannelDeletedEvent, *slack.ChannelRenameEvent:\n\t\t\t\tsc.updateChannelMaps(\"\")\n\n\t\t\tcase *slack.MessageEvent:\n\t\t\t\t\/\/ Message processing is done concurrently\n\t\t\t\tgo sc.processMessage(ev)\n\n\t\t\tcase *slack.PresenceChangeEvent:\n\t\t\t\tsc.Log(bot.Debug, fmt.Sprintf(\"Presence Change: %v\", ev))\n\n\t\t\tcase *slack.LatencyReport:\n\t\t\t\tsc.Log(bot.Debug, fmt.Sprintf(\"Current latency: %v\", ev.Value))\n\n\t\t\tcase *slack.RTMError:\n\t\t\t\tsc.Log(bot.Debug, fmt.Sprintf(\"Error: %s\\n\", ev.Error()))\n\n\t\t\tdefault:\n\n\t\t\t\t\/\/ Ignore other events..\n\t\t\t\t\/\/ robot.Debug(fmt.Sprintf(\"Unexpected: %v\\n\", msg.Data)\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>Handle more channel updates<commit_after>\/\/ Package slack uses Norberto Lopes' slack library to implement the bot.Connector\n\/\/ interface.\npackage slack\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com\/lnxjedi\/gopherbot\/bot\"\n\t\"github.com\/nlopes\/slack\"\n)\n\ntype botDefinition struct {\n\tName, ID string \/\/ e.g. 'mygit', 'BAKDBISDO'\n}\n\ntype config struct {\n\tSlackToken string \/\/ the 'bot token for connecting to Slack\n\tMaxMessageSplit int \/\/ the maximum # of ~4000 byte messages to split a large message into\n\tBotRoster []botDefinition \/\/ roster mapping BXXX IDs to a defined username\n}\n\nvar lock sync.Mutex \/\/ package var lock\nvar started bool \/\/ set when connector is started\n\nfunc init() {\n\tbot.RegisterConnector(\"slack\", Initialize)\n}\n\n\/\/ Initialize starts the connection, sets up and returns the connector object\nfunc Initialize(robot bot.Handler, l *log.Logger) bot.Connector {\n\tlock.Lock()\n\tif started {\n\t\tlock.Unlock()\n\t\treturn nil\n\t}\n\tstarted = true\n\tlock.Unlock()\n\n\tvar c config\n\tvar tok string\n\n\terr := robot.GetProtocolConfig(&c)\n\tif err != nil {\n\t\trobot.Log(bot.Fatal, fmt.Errorf(\"Unable to retrieve protocol configuration: %v\", err))\n\t}\n\n\tif c.MaxMessageSplit == 0 {\n\t\tc.MaxMessageSplit = 1\n\t}\n\n\tif len(c.SlackToken) == 0 {\n\t\ttok = os.Getenv(\"SLACK_TOKEN\")\n\t\tif len(tok) == 0 {\n\t\t\trobot.Log(bot.Fatal, \"No slack token found in config or env var 'SLACK_TOKEN'\")\n\t\t}\n\t\tos.Unsetenv(\"SLACK_TOKEN\")\n\t\trobot.Log(bot.Debug, \"Using SLACK_TOKEN environment variable\")\n\t} else {\n\t\ttok = c.SlackToken\n\t}\n\n\tapi := slack.New(tok)\n\t\/\/ This spits out a lot of extra stuff, so we only enable it when tracing\n\tif robot.GetLogLevel() == bot.Trace {\n\t\tapi.SetDebug(true)\n\t}\n\tslack.SetLogger(l)\n\n\tsc := &slackConnector{\n\t\tapi: api,\n\t\tconn: api.NewRTM(),\n\t\tmaxMessageSplit: c.MaxMessageSplit,\n\t\tname: \"slack\",\n\t}\n\tsc.botUserID = make(map[string]string)\n\tsc.botIDUser = make(map[string]string)\n\tfor _, b := range c.BotRoster {\n\t\tif len(b.ID) == 0 || len(b.Name) == 0 {\n\t\t\trobot.Log(bot.Error, \"Zero-length Name or ID in BotRoster, skipping\")\n\t\t\tcontinue\n\t\t}\n\t\tsc.botUserID[b.Name] = b.ID\n\t\tsc.botIDUser[b.ID] = b.Name\n\t}\n\tgo sc.conn.ManageConnection()\n\n\tsc.Handler = robot\n\nLoop:\n\tfor {\n\t\tselect {\n\t\tcase msg := <-sc.conn.IncomingEvents:\n\n\t\t\tswitch ev := msg.Data.(type) {\n\n\t\t\tcase *slack.ConnectedEvent:\n\t\t\t\tsc.Log(bot.Debug, fmt.Sprintf(\"Infos: %T %v\\n\", ev, *ev.Info.User))\n\t\t\t\tsc.Log(bot.Debug, \"Connection counter:\", ev.ConnectionCount)\n\t\t\t\tsc.botName = ev.Info.User.Name\n\t\t\t\tsc.SetName(sc.botName)\n\t\t\t\tsc.Log(bot.Info, \"Slack setting bot name to\", sc.botName)\n\t\t\t\tsc.botID = ev.Info.User.ID\n\t\t\t\tsc.Log(bot.Trace, \"Set bot ID to\", sc.botID)\n\t\t\t\tsc.teamID = ev.Info.Team.ID\n\t\t\t\tsc.Log(bot.Info, \"Set team ID to\", sc.teamID)\n\t\t\t\tbreak Loop\n\n\t\t\tcase *slack.InvalidAuthEvent:\n\t\t\t\tsc.Log(bot.Fatal, \"Invalid credentials\")\n\t\t\t}\n\t\t}\n\t}\n\n\tsc.updateChannelMaps(\"\")\n\tsc.updateUserList(\"\")\n\tsc.botFullName, _ = sc.GetProtocolUserAttribute(sc.botName, \"realname\")\n\tsc.SetFullName(sc.botFullName)\n\tsc.Log(bot.Debug, \"Set bot full name to\", sc.botFullName)\n\tgo sc.startSendLoop()\n\n\treturn bot.Connector(sc)\n}\n\nfunc (sc *slackConnector) Run(stop <-chan struct{}) {\n\tsc.Lock()\n\t\/\/ This should never happen, just a bit of defensive coding\n\tif sc.running {\n\t\tsc.Unlock()\n\t\treturn\n\t}\n\tsc.running = true\n\tsc.Unlock()\nloop:\n\tfor {\n\t\tselect {\n\t\tcase <-stop:\n\t\t\tsc.Log(bot.Debug, \"Received stop in connector\")\n\t\t\tbreak loop\n\t\tcase msg := <-sc.conn.IncomingEvents:\n\t\t\tsc.Log(bot.Trace, fmt.Sprintf(\"Event Received (msg, data, type): %v; %v; %T\", msg, msg.Data, msg.Data))\n\t\t\tswitch ev := msg.Data.(type) {\n\t\t\tcase *slack.HelloEvent:\n\t\t\t\t\/\/ Ignore hello\n\t\t\tcase *slack.ChannelArchiveEvent, *slack.ChannelUnarchiveEvent,\n\t\t\t\t*slack.ChannelCreatedEvent, *slack.ChannelDeletedEvent,\n\t\t\t\t*slack.ChannelRenameEvent, *slack.GroupArchiveEvent,\n\t\t\t\t*slack.GroupUnarchiveEvent, *slack.GroupCreatedEvent,\n\t\t\t\t*slack.GroupRenameEvent, *slack.IMCloseEvent,\n\t\t\t\t*slack.IMCreatedEvent, *slack.IMOpenEvent:\n\t\t\t\tsc.updateChannelMaps(\"\")\n\n\t\t\tcase *slack.MessageEvent:\n\t\t\t\t\/\/ Message processing is done concurrently\n\t\t\t\tgo sc.processMessage(ev)\n\n\t\t\tcase *slack.PresenceChangeEvent:\n\t\t\t\tsc.Log(bot.Debug, fmt.Sprintf(\"Presence Change: %v\", ev))\n\n\t\t\tcase *slack.LatencyReport:\n\t\t\t\tsc.Log(bot.Debug, fmt.Sprintf(\"Current latency: %v\", ev.Value))\n\n\t\t\tcase *slack.RTMError:\n\t\t\t\tsc.Log(bot.Debug, fmt.Sprintf(\"Error: %s\\n\", ev.Error()))\n\n\t\t\tdefault:\n\n\t\t\t\t\/\/ Ignore other events..\n\t\t\t\t\/\/ robot.Debug(fmt.Sprintf(\"Unexpected: %v\\n\", msg.Data)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/hashcode\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/mitchellh\/goamz\/ec2\"\n)\n\nfunc resourceAwsSecurityGroup() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsSecurityGroupCreate,\n\t\tRead: resourceAwsSecurityGroupRead,\n\t\tUpdate: resourceAwsSecurityGroupUpdate,\n\t\tDelete: resourceAwsSecurityGroupDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"description\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\n\t\t\t\"vpc_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"ingress\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tRequired: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"from_port\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"to_port\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"protocol\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"cidr_blocks\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeList,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"security_groups\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\t\t\t\treturn hashcode.String(v.(string))\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"self\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tDefault: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSet: resourceAwsSecurityGroupIngressHash,\n\t\t\t},\n\n\t\t\t\"owner_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"tags\": tagsSchema(),\n\t\t},\n\t}\n}\n\nfunc resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error {\n\tec2conn := meta.(*AWSClient).ec2conn\n\n\tsecurityGroupOpts := ec2.SecurityGroup{\n\t\tName: d.Get(\"name\").(string),\n\t}\n\n\tif v := d.Get(\"vpc_id\"); v != nil {\n\t\tsecurityGroupOpts.VpcId = v.(string)\n\t}\n\n\tif v := d.Get(\"description\"); v != nil {\n\t\tsecurityGroupOpts.Description = v.(string)\n\t}\n\n\tlog.Printf(\n\t\t\"[DEBUG] Security Group create configuration: %#v\", securityGroupOpts)\n\tcreateResp, err := ec2conn.CreateSecurityGroup(securityGroupOpts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating Security Group: %s\", err)\n\t}\n\n\td.SetId(createResp.Id)\n\n\tlog.Printf(\"[INFO] Security Group ID: %s\", d.Id())\n\n\t\/\/ Wait for the security group to truly exist\n\tlog.Printf(\n\t\t\"[DEBUG] Waiting for Security Group (%s) to exist\",\n\t\td.Id())\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"\"},\n\t\tTarget: \"exists\",\n\t\tRefresh: SGStateRefreshFunc(ec2conn, d.Id()),\n\t\tTimeout: 1 * time.Minute,\n\t}\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Error waiting for Security Group (%s) to become available: %s\",\n\t\t\td.Id(), err)\n\t}\n\n\treturn resourceAwsSecurityGroupUpdate(d, meta)\n}\n\nfunc resourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) error {\n\tec2conn := meta.(*AWSClient).ec2conn\n\n\tsgRaw, _, err := SGStateRefreshFunc(ec2conn, d.Id())()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif sgRaw == nil {\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\n\tsg := sgRaw.(*ec2.SecurityGroupInfo)\n\n\t\/\/ Gather our ingress rules\n\tingressMap := make(map[string]map[string]interface{})\n\tfor _, perm := range sg.IPPerms {\n\t\tk := fmt.Sprintf(\"%s-%d-%d\", perm.Protocol, perm.FromPort, perm.ToPort)\n\t\tm, ok := ingressMap[k]\n\t\tif !ok {\n\t\t\tm = make(map[string]interface{})\n\t\t\tingressMap[k] = m\n\t\t}\n\n\t\tm[\"from_port\"] = perm.FromPort\n\t\tm[\"to_port\"] = perm.ToPort\n\t\tm[\"protocol\"] = perm.Protocol\n\n\t\tif len(perm.SourceIPs) > 0 {\n\t\t\traw, ok := m[\"cidr_blocks\"]\n\t\t\tif !ok {\n\t\t\t\traw = make([]string, 0, len(perm.SourceIPs))\n\t\t\t}\n\t\t\tlist := raw.([]string)\n\n\t\t\tlist = append(list, perm.SourceIPs...)\n\t\t\tm[\"cidr_blocks\"] = list\n\t\t}\n\n\t\tvar groups []string\n\t\tif len(perm.SourceGroups) > 0 {\n\t\t\tgroups = flattenSecurityGroups(perm.SourceGroups)\n\t\t}\n\t\tfor i, id := range groups {\n\t\t\tif id == d.Id() {\n\t\t\t\tgroups[i], groups = groups[len(groups)-1], groups[:len(groups)-1]\n\t\t\t\tm[\"self\"] = true\n\t\t\t}\n\t\t}\n\n\t\tif len(groups) > 0 {\n\t\t\traw, ok := m[\"security_groups\"]\n\t\t\tif !ok {\n\t\t\t\traw = make([]string, 0, len(groups))\n\t\t\t}\n\t\t\tlist := raw.([]string)\n\n\t\t\tlist = append(list, groups...)\n\t\t\tm[\"security_groups\"] = list\n\t\t}\n\t}\n\tingressRules := make([]map[string]interface{}, 0, len(ingressMap))\n\tfor _, m := range ingressMap {\n\t\tingressRules = append(ingressRules, m)\n\t}\n\n\td.Set(\"description\", sg.Description)\n\td.Set(\"name\", sg.Name)\n\td.Set(\"vpc_id\", sg.VpcId)\n\td.Set(\"owner_id\", sg.OwnerId)\n\td.Set(\"ingress\", ingressRules)\n\td.Set(\"tags\", tagsToMap(sg.Tags))\n\n\treturn nil\n}\n\nfunc resourceAwsSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error {\n\tec2conn := meta.(*AWSClient).ec2conn\n\n\tsgRaw, _, err := SGStateRefreshFunc(ec2conn, d.Id())()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif sgRaw == nil {\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\tgroup := sgRaw.(*ec2.SecurityGroupInfo).SecurityGroup\n\n\tif d.HasChange(\"ingress\") {\n\t\to, n := d.GetChange(\"ingress\")\n\t\tif o == nil {\n\t\t\to = new(schema.Set)\n\t\t}\n\t\tif n == nil {\n\t\t\tn = new(schema.Set)\n\t\t}\n\n\t\tos := o.(*schema.Set)\n\t\tns := n.(*schema.Set)\n\n\t\tremove := expandIPPerms(d.Id(), os.Difference(ns).List())\n\t\tadd := expandIPPerms(d.Id(), ns.Difference(os).List())\n\n\t\t\/\/ TODO: We need to handle partial state better in the in-between\n\t\t\/\/ in this update.\n\n\t\t\/\/ TODO: It'd be nicer to authorize before removing, but then we have\n\t\t\/\/ to deal with complicated unrolling to get individual CIDR blocks\n\t\t\/\/ to avoid authorizing already authorized sources. Removing before\n\t\t\/\/ adding is easier here, and Terraform should be fast enough to\n\t\t\/\/ not have service issues.\n\n\t\tif len(remove) > 0 {\n\t\t\t\/\/ Revoke the old rules\n\t\t\t_, err = ec2conn.RevokeSecurityGroup(group, remove)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error authorizing security group ingress rules: %s\", err)\n\t\t\t}\n\t\t}\n\n\t\tif len(add) > 0 {\n\t\t\t\/\/ Authorize the new rules\n\t\t\t_, err := ec2conn.AuthorizeSecurityGroup(group, add)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error authorizing security group ingress rules: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := setTags(ec2conn, d); err != nil {\n\t\treturn err\n\t} else {\n\t\td.SetPartial(\"tags\")\n\t}\n\n\treturn resourceAwsSecurityGroupRead(d, meta)\n}\n\nfunc resourceAwsSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error {\n\tec2conn := meta.(*AWSClient).ec2conn\n\n\tlog.Printf(\"[DEBUG] Security Group destroy: %v\", d.Id())\n\n\treturn resource.Retry(5*time.Minute, func() error {\n\t\t_, err := ec2conn.DeleteSecurityGroup(ec2.SecurityGroup{Id: d.Id()})\n\t\tif err != nil {\n\t\t\tec2err, ok := err.(*ec2.Error)\n\t\t\tif !ok {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tswitch ec2err.Code {\n\t\t\tcase \"InvalidGroup.NotFound\":\n\t\t\t\treturn nil\n\t\t\tcase \"DependencyViolation\":\n\t\t\t\t\/\/ If it is a dependency violation, we want to retry\n\t\t\t\treturn err\n\t\t\tdefault:\n\t\t\t\t\/\/ Any other error, we want to quit the retry loop immediately\n\t\t\t\treturn resource.RetryError{err}\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\nfunc resourceAwsSecurityGroupIngressHash(v interface{}) int {\n\tvar buf bytes.Buffer\n\tm := v.(map[string]interface{})\n\tbuf.WriteString(fmt.Sprintf(\"%d-\", m[\"from_port\"].(int)))\n\tbuf.WriteString(fmt.Sprintf(\"%d-\", m[\"to_port\"].(int)))\n\tbuf.WriteString(fmt.Sprintf(\"%s-\", m[\"protocol\"].(string)))\n\n\t\/\/ We need to make sure to sort the strings below so that we always\n\t\/\/ generate the same hash code no matter what is in the set.\n\tif v, ok := m[\"cidr_blocks\"]; ok {\n\t\tvs := v.([]interface{})\n\t\ts := make([]string, len(vs))\n\t\tfor i, raw := range vs {\n\t\t\ts[i] = raw.(string)\n\t\t}\n\t\tsort.Strings(s)\n\n\t\tfor _, v := range s {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%s-\", v))\n\t\t}\n\t}\n\tif v, ok := m[\"security_groups\"]; ok {\n\t\tvs := v.(*schema.Set).List()\n\t\ts := make([]string, len(vs))\n\t\tfor i, raw := range vs {\n\t\t\ts[i] = raw.(string)\n\t\t}\n\t\tsort.Strings(s)\n\n\t\tfor _, v := range s {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%s-\", v))\n\t\t}\n\t}\n\n\treturn hashcode.String(buf.String())\n}\n\n\/\/ SGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch\n\/\/ a security group.\nfunc SGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc {\n\treturn func() (interface{}, string, error) {\n\t\tsgs := []ec2.SecurityGroup{ec2.SecurityGroup{Id: id}}\n\t\tresp, err := conn.SecurityGroups(sgs, nil)\n\t\tif err != nil {\n\t\t\tif ec2err, ok := err.(*ec2.Error); ok {\n\t\t\t\tif ec2err.Code == \"InvalidSecurityGroupID.NotFound\" ||\n\t\t\t\t\tec2err.Code == \"InvalidGroup.NotFound\" {\n\t\t\t\t\tresp = nil\n\t\t\t\t\terr = nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error on SGStateRefresh: %s\", err)\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t}\n\n\t\tif resp == nil {\n\t\t\treturn nil, \"\", nil\n\t\t}\n\n\t\tgroup := &resp.Groups[0]\n\t\treturn group, \"exists\", nil\n\t}\n}\n<commit_msg>aws: Making security group ingress rules optional<commit_after>package aws\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/hashcode\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/mitchellh\/goamz\/ec2\"\n)\n\nfunc resourceAwsSecurityGroup() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsSecurityGroupCreate,\n\t\tRead: resourceAwsSecurityGroupRead,\n\t\tUpdate: resourceAwsSecurityGroupUpdate,\n\t\tDelete: resourceAwsSecurityGroupDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"description\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\n\t\t\t\"vpc_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"ingress\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"from_port\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"to_port\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"protocol\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"cidr_blocks\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeList,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"security_groups\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\t\t\t\treturn hashcode.String(v.(string))\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"self\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tDefault: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSet: resourceAwsSecurityGroupIngressHash,\n\t\t\t},\n\n\t\t\t\"owner_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"tags\": tagsSchema(),\n\t\t},\n\t}\n}\n\nfunc resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error {\n\tec2conn := meta.(*AWSClient).ec2conn\n\n\tsecurityGroupOpts := ec2.SecurityGroup{\n\t\tName: d.Get(\"name\").(string),\n\t}\n\n\tif v := d.Get(\"vpc_id\"); v != nil {\n\t\tsecurityGroupOpts.VpcId = v.(string)\n\t}\n\n\tif v := d.Get(\"description\"); v != nil {\n\t\tsecurityGroupOpts.Description = v.(string)\n\t}\n\n\tlog.Printf(\n\t\t\"[DEBUG] Security Group create configuration: %#v\", securityGroupOpts)\n\tcreateResp, err := ec2conn.CreateSecurityGroup(securityGroupOpts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating Security Group: %s\", err)\n\t}\n\n\td.SetId(createResp.Id)\n\n\tlog.Printf(\"[INFO] Security Group ID: %s\", d.Id())\n\n\t\/\/ Wait for the security group to truly exist\n\tlog.Printf(\n\t\t\"[DEBUG] Waiting for Security Group (%s) to exist\",\n\t\td.Id())\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"\"},\n\t\tTarget: \"exists\",\n\t\tRefresh: SGStateRefreshFunc(ec2conn, d.Id()),\n\t\tTimeout: 1 * time.Minute,\n\t}\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Error waiting for Security Group (%s) to become available: %s\",\n\t\t\td.Id(), err)\n\t}\n\n\treturn resourceAwsSecurityGroupUpdate(d, meta)\n}\n\nfunc resourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) error {\n\tec2conn := meta.(*AWSClient).ec2conn\n\n\tsgRaw, _, err := SGStateRefreshFunc(ec2conn, d.Id())()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif sgRaw == nil {\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\n\tsg := sgRaw.(*ec2.SecurityGroupInfo)\n\n\t\/\/ Gather our ingress rules\n\tingressMap := make(map[string]map[string]interface{})\n\tfor _, perm := range sg.IPPerms {\n\t\tk := fmt.Sprintf(\"%s-%d-%d\", perm.Protocol, perm.FromPort, perm.ToPort)\n\t\tm, ok := ingressMap[k]\n\t\tif !ok {\n\t\t\tm = make(map[string]interface{})\n\t\t\tingressMap[k] = m\n\t\t}\n\n\t\tm[\"from_port\"] = perm.FromPort\n\t\tm[\"to_port\"] = perm.ToPort\n\t\tm[\"protocol\"] = perm.Protocol\n\n\t\tif len(perm.SourceIPs) > 0 {\n\t\t\traw, ok := m[\"cidr_blocks\"]\n\t\t\tif !ok {\n\t\t\t\traw = make([]string, 0, len(perm.SourceIPs))\n\t\t\t}\n\t\t\tlist := raw.([]string)\n\n\t\t\tlist = append(list, perm.SourceIPs...)\n\t\t\tm[\"cidr_blocks\"] = list\n\t\t}\n\n\t\tvar groups []string\n\t\tif len(perm.SourceGroups) > 0 {\n\t\t\tgroups = flattenSecurityGroups(perm.SourceGroups)\n\t\t}\n\t\tfor i, id := range groups {\n\t\t\tif id == d.Id() {\n\t\t\t\tgroups[i], groups = groups[len(groups)-1], groups[:len(groups)-1]\n\t\t\t\tm[\"self\"] = true\n\t\t\t}\n\t\t}\n\n\t\tif len(groups) > 0 {\n\t\t\traw, ok := m[\"security_groups\"]\n\t\t\tif !ok {\n\t\t\t\traw = make([]string, 0, len(groups))\n\t\t\t}\n\t\t\tlist := raw.([]string)\n\n\t\t\tlist = append(list, groups...)\n\t\t\tm[\"security_groups\"] = list\n\t\t}\n\t}\n\tingressRules := make([]map[string]interface{}, 0, len(ingressMap))\n\tfor _, m := range ingressMap {\n\t\tingressRules = append(ingressRules, m)\n\t}\n\n\td.Set(\"description\", sg.Description)\n\td.Set(\"name\", sg.Name)\n\td.Set(\"vpc_id\", sg.VpcId)\n\td.Set(\"owner_id\", sg.OwnerId)\n\td.Set(\"ingress\", ingressRules)\n\td.Set(\"tags\", tagsToMap(sg.Tags))\n\n\treturn nil\n}\n\nfunc resourceAwsSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error {\n\tec2conn := meta.(*AWSClient).ec2conn\n\n\tsgRaw, _, err := SGStateRefreshFunc(ec2conn, d.Id())()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif sgRaw == nil {\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\tgroup := sgRaw.(*ec2.SecurityGroupInfo).SecurityGroup\n\n\tif d.HasChange(\"ingress\") {\n\t\to, n := d.GetChange(\"ingress\")\n\t\tif o == nil {\n\t\t\to = new(schema.Set)\n\t\t}\n\t\tif n == nil {\n\t\t\tn = new(schema.Set)\n\t\t}\n\n\t\tos := o.(*schema.Set)\n\t\tns := n.(*schema.Set)\n\n\t\tremove := expandIPPerms(d.Id(), os.Difference(ns).List())\n\t\tadd := expandIPPerms(d.Id(), ns.Difference(os).List())\n\n\t\t\/\/ TODO: We need to handle partial state better in the in-between\n\t\t\/\/ in this update.\n\n\t\t\/\/ TODO: It'd be nicer to authorize before removing, but then we have\n\t\t\/\/ to deal with complicated unrolling to get individual CIDR blocks\n\t\t\/\/ to avoid authorizing already authorized sources. Removing before\n\t\t\/\/ adding is easier here, and Terraform should be fast enough to\n\t\t\/\/ not have service issues.\n\n\t\tif len(remove) > 0 {\n\t\t\t\/\/ Revoke the old rules\n\t\t\t_, err = ec2conn.RevokeSecurityGroup(group, remove)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error authorizing security group ingress rules: %s\", err)\n\t\t\t}\n\t\t}\n\n\t\tif len(add) > 0 {\n\t\t\t\/\/ Authorize the new rules\n\t\t\t_, err := ec2conn.AuthorizeSecurityGroup(group, add)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error authorizing security group ingress rules: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := setTags(ec2conn, d); err != nil {\n\t\treturn err\n\t} else {\n\t\td.SetPartial(\"tags\")\n\t}\n\n\treturn resourceAwsSecurityGroupRead(d, meta)\n}\n\nfunc resourceAwsSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error {\n\tec2conn := meta.(*AWSClient).ec2conn\n\n\tlog.Printf(\"[DEBUG] Security Group destroy: %v\", d.Id())\n\n\treturn resource.Retry(5*time.Minute, func() error {\n\t\t_, err := ec2conn.DeleteSecurityGroup(ec2.SecurityGroup{Id: d.Id()})\n\t\tif err != nil {\n\t\t\tec2err, ok := err.(*ec2.Error)\n\t\t\tif !ok {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tswitch ec2err.Code {\n\t\t\tcase \"InvalidGroup.NotFound\":\n\t\t\t\treturn nil\n\t\t\tcase \"DependencyViolation\":\n\t\t\t\t\/\/ If it is a dependency violation, we want to retry\n\t\t\t\treturn err\n\t\t\tdefault:\n\t\t\t\t\/\/ Any other error, we want to quit the retry loop immediately\n\t\t\t\treturn resource.RetryError{err}\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\nfunc resourceAwsSecurityGroupIngressHash(v interface{}) int {\n\tvar buf bytes.Buffer\n\tm := v.(map[string]interface{})\n\tbuf.WriteString(fmt.Sprintf(\"%d-\", m[\"from_port\"].(int)))\n\tbuf.WriteString(fmt.Sprintf(\"%d-\", m[\"to_port\"].(int)))\n\tbuf.WriteString(fmt.Sprintf(\"%s-\", m[\"protocol\"].(string)))\n\n\t\/\/ We need to make sure to sort the strings below so that we always\n\t\/\/ generate the same hash code no matter what is in the set.\n\tif v, ok := m[\"cidr_blocks\"]; ok {\n\t\tvs := v.([]interface{})\n\t\ts := make([]string, len(vs))\n\t\tfor i, raw := range vs {\n\t\t\ts[i] = raw.(string)\n\t\t}\n\t\tsort.Strings(s)\n\n\t\tfor _, v := range s {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%s-\", v))\n\t\t}\n\t}\n\tif v, ok := m[\"security_groups\"]; ok {\n\t\tvs := v.(*schema.Set).List()\n\t\ts := make([]string, len(vs))\n\t\tfor i, raw := range vs {\n\t\t\ts[i] = raw.(string)\n\t\t}\n\t\tsort.Strings(s)\n\n\t\tfor _, v := range s {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%s-\", v))\n\t\t}\n\t}\n\n\treturn hashcode.String(buf.String())\n}\n\n\/\/ SGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch\n\/\/ a security group.\nfunc SGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc {\n\treturn func() (interface{}, string, error) {\n\t\tsgs := []ec2.SecurityGroup{ec2.SecurityGroup{Id: id}}\n\t\tresp, err := conn.SecurityGroups(sgs, nil)\n\t\tif err != nil {\n\t\t\tif ec2err, ok := err.(*ec2.Error); ok {\n\t\t\t\tif ec2err.Code == \"InvalidSecurityGroupID.NotFound\" ||\n\t\t\t\t\tec2err.Code == \"InvalidGroup.NotFound\" {\n\t\t\t\t\tresp = nil\n\t\t\t\t\terr = nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error on SGStateRefresh: %s\", err)\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t}\n\n\t\tif resp == nil {\n\t\t\treturn nil, \"\", nil\n\t\t}\n\n\t\tgroup := &resp.Groups[0]\n\t\treturn group, \"exists\", nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package riak_explorer\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"fmt\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/basho-labs\/riak-mesos\/cepmd\/cepm\"\n\t\"github.com\/basho-labs\/riak-mesos\/process_manager\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"text\/template\"\n)\n\ntype RiakExplorer struct {\n\ttempdir string\n\tport int64\n\tpm *process_manager.ProcessManager\n}\n\ntype templateData struct {\n\tHTTPPort int64\n\tNodeName string\n}\n\ntype advancedTemplateData struct {\n\tCEPMDPort int\n}\n\nfunc (re *RiakExplorer) NewRiakExplorerClient() *RiakExplorerClient {\n\treturn NewRiakExplorerClient(fmt.Sprintf(\"localhost:%d\", re.port))\n}\nfunc (re *RiakExplorer) TearDown() {\n\tre.pm.TearDown()\n}\n\nfunc decompress() string {\n\tassetPath := fmt.Sprintf(\"riak_explorer_%s_%s.tar.gz\", runtime.GOOS, runtime.GOARCH)\n\n\tlog.Info(\"Decompressing Riak Explorer: \", assetPath)\n\n\tasset, err := Asset(assetPath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tbytesReader := bytes.NewReader(asset)\n\n\tgzReader, err := gzip.NewReader(bytesReader)\n\n\tif err != nil {\n\t\tlog.Fatal(\"Error encountered decompressing riak explorer: \", err)\n\t\tos.Exit(1)\n\t}\n\n\ttr := tar.NewReader(gzReader)\n\ttempdir, err := ioutil.TempDir(\".\/\", \"riak_explorer.\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor {\n\t\thdr, err := tr.Next()\n\t\tif err == io.EOF {\n\t\t\t\/\/ end of tar archive\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t\tfilename := filepath.Join(\".\", tempdir, hdr.Name)\n\t\tif hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {\n\t\t\tfile, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, os.FileMode(hdr.Mode))\n\t\t\tio.Copy(file, tr)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tif _, err := io.Copy(file, tr); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tfile.Close()\n\t\t} else if hdr.Typeflag == tar.TypeDir {\n\t\t\terr := os.Mkdir(filename, 0777)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(err)\n\t\t\t}\n\t\t} else if hdr.Typeflag == tar.TypeSymlink {\n\t\t\tif err := os.Symlink(hdr.Linkname, filename); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\t\/\/ Hard link\n\t\t} else if hdr.Typeflag == tar.TypeLink {\n\t\t\tfmt.Printf(\"Encountered hardlink: %+v\\n\", hdr)\n\t\t\tlinkdest := filepath.Join(\".\", tempdir, hdr.Linkname)\n\t\t\tif err := os.Link(linkdest, filename); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Fatal(\"Experienced unknown tar file type: \", hdr.Typeflag)\n\t\t}\n\t}\n\treturn tempdir\n}\nfunc (re *RiakExplorer) configure(port int64, nodename string) {\n\tdata, err := Asset(\"riak_explorer.conf\")\n\tif err != nil {\n\t\tlog.Panic(\"Got error\", err)\n\t}\n\ttmpl, err := template.New(\"test\").Parse(string(data))\n\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\t\/\/ Populate template data from the MesosTask\n\tvars := templateData{}\n\n\tvars.NodeName = nodename\n\tvars.HTTPPort = port\n\tconfigpath := filepath.Join(\".\", re.tempdir, \"rex_root\", \"riak_explorer\", \"etc\", \"riak_explorer.conf\")\n\tfile, err := os.OpenFile(configpath, os.O_TRUNC|os.O_CREATE|os.O_RDWR, 0)\n\n\tif err != nil {\n\t\tlog.Panic(\"Unable to open file: \", err)\n\t}\n\n\terr = tmpl.Execute(file, vars)\n\n\tif err != nil {\n\t\tlog.Panic(\"Got error\", err)\n\t}\n}\nfunc (re *RiakExplorer) configureAdvanced(cepmdPort int) {\n\tdata, err := Asset(\"advanced.config\")\n\tif err != nil {\n\t\tlog.Panic(\"Got error\", err)\n\t}\n\ttmpl, err := template.New(\"advanced\").Parse(string(data))\n\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\t\/\/ Populate template data from the MesosTask\n\tvars := advancedTemplateData{}\n\tvars.CEPMDPort = cepmdPort\n\tconfigpath := filepath.Join(\".\", re.tempdir, \"rex_root\", \"riak_explorer\", \"etc\", \"advanced.config\")\n\tfile, err := os.OpenFile(configpath, os.O_TRUNC|os.O_CREATE|os.O_RDWR, 0)\n\n\tif err != nil {\n\t\tlog.Panic(\"Unable to open file: \", err)\n\t}\n\n\terr = tmpl.Execute(file, vars)\n\n\tif err != nil {\n\t\tlog.Panic(\"Got error\", err)\n\t}\n}\n\nfunc NewRiakExplorer(port int64, nodename string, c *cepm.CEPM) (*RiakExplorer, error) {\n\ttempdir := decompress()\n\texepath := filepath.Join(\".\", tempdir, \"rex_root\", \"riak_explorer\", \"bin\", \"riak_explorer\")\n\n\targs := []string{\"console\", \"-noinput\"}\n\thealthCheckFun := func() error {\n\t\tlog.Info(\"Running healthcheck: \", port)\n\t\t_, err := NewRiakExplorerClient(fmt.Sprintf(\"localhost:%d\", port)).Ping()\n\t\tlog.Info(\"Healthcheck result \", err)\n\t\treturn err\n\t}\n\ttearDownFun := func() {\n\t\tlog.Info(\"Deleting all data in: \", tempdir)\n\t\terr := os.RemoveAll(tempdir)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t}\n\tre := &RiakExplorer{\n\t\ttempdir: tempdir,\n\t\tport: port,\n\t}\n\tif c != nil {\n\t\t\/\/ This is gross -- we're passing \"hidden\" state by passing it through the unix environment variables.\n\t\t\/\/ Fix it -- we should convert the NewRiakExplorer into using a fluent pattern?\n\t\tlibpath := filepath.Join(\".\", tempdir, \"rex_root\", \"riak_explorer\", \"lib\", \"basho-patches\")\n\t\tos.Mkdir(libpath, 0777)\n\t\terr := cepm.InstallInto(libpath)\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t\targs = append(args, \"-no_epmd\")\n\t\tre.configureAdvanced(c.GetPort())\n\t}\n\tre.configure(port, nodename)\n\tlog.Debugf(\"Starting up Riak Explorer %v\", exepath)\n\tvar err error\n\tchroot := filepath.Join(\".\", tempdir, \"rex_root\", \"riak_explorer\")\n\tre.pm, err = process_manager.NewProcessManager(tearDownFun, exepath, args, healthCheckFun, &chroot)\n\tif err != nil {\n\t\tlog.Error(\"Could not start Riak Explorer: \", err)\n\t}\n\n\treturn re, err\n}\n<commit_msg>Alter rex to handle chroot relative paths<commit_after>package riak_explorer\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"fmt\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/basho-labs\/riak-mesos\/cepmd\/cepm\"\n\t\"github.com\/basho-labs\/riak-mesos\/process_manager\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"text\/template\"\n)\n\ntype RiakExplorer struct {\n\ttempdir string\n\tport int64\n\tpm *process_manager.ProcessManager\n}\n\ntype templateData struct {\n\tHTTPPort int64\n\tNodeName string\n}\n\ntype advancedTemplateData struct {\n\tCEPMDPort int\n}\n\nfunc (re *RiakExplorer) NewRiakExplorerClient() *RiakExplorerClient {\n\treturn NewRiakExplorerClient(fmt.Sprintf(\"localhost:%d\", re.port))\n}\nfunc (re *RiakExplorer) TearDown() {\n\tre.pm.TearDown()\n}\n\nfunc decompress() string {\n\tassetPath := fmt.Sprintf(\"riak_explorer_%s_%s.tar.gz\", runtime.GOOS, runtime.GOARCH)\n\n\tlog.Info(\"Decompressing Riak Explorer: \", assetPath)\n\n\tasset, err := Asset(assetPath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tbytesReader := bytes.NewReader(asset)\n\n\tgzReader, err := gzip.NewReader(bytesReader)\n\n\tif err != nil {\n\t\tlog.Fatal(\"Error encountered decompressing riak explorer: \", err)\n\t\tos.Exit(1)\n\t}\n\n\ttr := tar.NewReader(gzReader)\n\ttempdir, err := ioutil.TempDir(\".\/\", \"riak_explorer.\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor {\n\t\thdr, err := tr.Next()\n\t\tif err == io.EOF {\n\t\t\t\/\/ end of tar archive\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t\tfilename := filepath.Join(\".\", tempdir, hdr.Name)\n\t\tif hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {\n\t\t\tfile, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, os.FileMode(hdr.Mode))\n\t\t\tio.Copy(file, tr)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tif _, err := io.Copy(file, tr); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tfile.Close()\n\t\t} else if hdr.Typeflag == tar.TypeDir {\n\t\t\terr := os.Mkdir(filename, 0777)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(err)\n\t\t\t}\n\t\t} else if hdr.Typeflag == tar.TypeSymlink {\n\t\t\tif err := os.Symlink(hdr.Linkname, filename); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\t\/\/ Hard link\n\t\t} else if hdr.Typeflag == tar.TypeLink {\n\t\t\tfmt.Printf(\"Encountered hardlink: %+v\\n\", hdr)\n\t\t\tlinkdest := filepath.Join(\".\", tempdir, hdr.Linkname)\n\t\t\tif err := os.Link(linkdest, filename); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Fatal(\"Experienced unknown tar file type: \", hdr.Typeflag)\n\t\t}\n\t}\n\treturn tempdir\n}\nfunc (re *RiakExplorer) configure(port int64, nodename string) {\n\tdata, err := Asset(\"riak_explorer.conf\")\n\tif err != nil {\n\t\tlog.Panic(\"Got error\", err)\n\t}\n\ttmpl, err := template.New(\"test\").Parse(string(data))\n\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\t\/\/ Populate template data from the MesosTask\n\tvars := templateData{}\n\n\tvars.NodeName = nodename\n\tvars.HTTPPort = port\n\tconfigpath := filepath.Join(\".\", re.tempdir, \"rex_root\", \"riak_explorer\", \"etc\", \"riak_explorer.conf\")\n\tfile, err := os.OpenFile(configpath, os.O_TRUNC|os.O_CREATE|os.O_RDWR, 0)\n\n\tif err != nil {\n\t\tlog.Panic(\"Unable to open file: \", err)\n\t}\n\n\terr = tmpl.Execute(file, vars)\n\n\tif err != nil {\n\t\tlog.Panic(\"Got error\", err)\n\t}\n}\nfunc (re *RiakExplorer) configureAdvanced(cepmdPort int) {\n\tdata, err := Asset(\"advanced.config\")\n\tif err != nil {\n\t\tlog.Panic(\"Got error\", err)\n\t}\n\ttmpl, err := template.New(\"advanced\").Parse(string(data))\n\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\t\/\/ Populate template data from the MesosTask\n\tvars := advancedTemplateData{}\n\tvars.CEPMDPort = cepmdPort\n\tconfigpath := filepath.Join(\".\", re.tempdir, \"rex_root\", \"riak_explorer\", \"etc\", \"advanced.config\")\n\tfile, err := os.OpenFile(configpath, os.O_TRUNC|os.O_CREATE|os.O_RDWR, 0)\n\n\tif err != nil {\n\t\tlog.Panic(\"Unable to open file: \", err)\n\t}\n\n\terr = tmpl.Execute(file, vars)\n\n\tif err != nil {\n\t\tlog.Panic(\"Got error\", err)\n\t}\n}\n\nfunc NewRiakExplorer(port int64, nodename string, c *cepm.CEPM) (*RiakExplorer, error) {\n\ttempdir := decompress()\n\texepath := \"\/riak_explorer\/bin\/riak_explorer\"\n\n\targs := []string{\"console\", \"-noinput\"}\n\thealthCheckFun := func() error {\n\t\tlog.Info(\"Running healthcheck: \", port)\n\t\t_, err := NewRiakExplorerClient(fmt.Sprintf(\"localhost:%d\", port)).Ping()\n\t\tlog.Info(\"Healthcheck result \", err)\n\t\treturn err\n\t}\n\ttearDownFun := func() {\n\t\tlog.Info(\"Deleting all data in: \", tempdir)\n\t\terr := os.RemoveAll(tempdir)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t}\n\tre := &RiakExplorer{\n\t\ttempdir: tempdir,\n\t\tport: port,\n\t}\n\tif c != nil {\n\t\t\/\/ This is gross -- we're passing \"hidden\" state by passing it through the unix environment variables.\n\t\t\/\/ Fix it -- we should convert the NewRiakExplorer into using a fluent pattern?\n\t\tlibpath := filepath.Join(\".\", tempdir, \"rex_root\", \"riak_explorer\", \"lib\", \"basho-patches\")\n\t\tos.Mkdir(libpath, 0777)\n\t\terr := cepm.InstallInto(libpath)\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t\targs = append(args, \"-no_epmd\")\n\t\tre.configureAdvanced(c.GetPort())\n\t}\n\tre.configure(port, nodename)\n\tlog.Debugf(\"Starting up Riak Explorer %v\", exepath)\n\tvar err error\n\tchroot := filepath.Join(\".\", tempdir, \"rex_root\")\n\tre.pm, err = process_manager.NewProcessManager(tearDownFun, exepath, args, healthCheckFun, &chroot)\n\tif err != nil {\n\t\tlog.Error(\"Could not start Riak Explorer: \", err)\n\t}\n\n\treturn re, err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 The gVisor Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage filter\n\nimport (\n\t\"os\"\n\t\"syscall\"\n\n\t\"golang.org\/x\/sys\/unix\"\n\t\"gvisor.dev\/gvisor\/pkg\/abi\/linux\"\n\t\"gvisor.dev\/gvisor\/pkg\/seccomp\"\n)\n\n\/\/ allowedSyscalls is the set of syscalls executed by the gofer.\nvar allowedSyscalls = seccomp.SyscallRules{\n\tsyscall.SYS_ACCEPT: {},\n\tsyscall.SYS_SOCKET: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowValue(syscall.AF_UNIX),\n\t\t},\n\t},\n\tsyscall.SYS_CONNECT: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t},\n\t},\n\tsyscall.SYS_SETSOCKOPT: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t},\n\t},\n\tsyscall.SYS_GETSOCKNAME: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t},\n\t},\n\tsyscall.SYS_GETPEERNAME: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t},\n\t},\n\tsyscall.SYS_ARCH_PRCTL: []seccomp.Rule{\n\t\t{seccomp.AllowValue(linux.ARCH_GET_FS)},\n\t\t{seccomp.AllowValue(linux.ARCH_SET_FS)},\n\t},\n\tsyscall.SYS_CLOCK_GETTIME: {},\n\tsyscall.SYS_CLONE: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowValue(\n\t\t\t\tsyscall.CLONE_VM |\n\t\t\t\t\tsyscall.CLONE_FS |\n\t\t\t\t\tsyscall.CLONE_FILES |\n\t\t\t\t\tsyscall.CLONE_SIGHAND |\n\t\t\t\t\tsyscall.CLONE_SYSVSEM |\n\t\t\t\t\tsyscall.CLONE_THREAD),\n\t\t},\n\t},\n\tsyscall.SYS_CLOSE: {},\n\tsyscall.SYS_DUP: {},\n\tsyscall.SYS_EPOLL_CTL: {},\n\tsyscall.SYS_EPOLL_PWAIT: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(0),\n\t\t},\n\t},\n\tsyscall.SYS_EVENTFD2: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowValue(0),\n\t\t\tseccomp.AllowValue(0),\n\t\t},\n\t},\n\tsyscall.SYS_EXIT: {},\n\tsyscall.SYS_EXIT_GROUP: {},\n\tsyscall.SYS_FALLOCATE: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(0),\n\t\t},\n\t},\n\tsyscall.SYS_FCHMOD: {},\n\tsyscall.SYS_FCHOWNAT: {},\n\tsyscall.SYS_FCNTL: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.F_GETFL),\n\t\t},\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.F_SETFL),\n\t\t},\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.F_GETFD),\n\t\t},\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t},\n\t},\n\tsyscall.SYS_FSTAT: {},\n\tsyscall.SYS_FSTATFS: {},\n\tsyscall.SYS_FSYNC: {},\n\tsyscall.SYS_FTRUNCATE: {},\n\tsyscall.SYS_FUTEX: {\n\t\tseccomp.Rule{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(linux.FUTEX_WAIT | linux.FUTEX_PRIVATE_FLAG),\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(0),\n\t\t},\n\t\tseccomp.Rule{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(linux.FUTEX_WAKE | linux.FUTEX_PRIVATE_FLAG),\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(0),\n\t\t},\n\t},\n\tsyscall.SYS_GETDENTS64: {},\n\tsyscall.SYS_GETPID: {},\n\tunix.SYS_GETRANDOM: {},\n\tsyscall.SYS_GETTID: {},\n\tsyscall.SYS_GETTIMEOFDAY: {},\n\tsyscall.SYS_LINKAT: {},\n\tsyscall.SYS_LSEEK: {},\n\tsyscall.SYS_MADVISE: {},\n\tsyscall.SYS_MKDIRAT: {},\n\tsyscall.SYS_MMAP: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.MAP_SHARED),\n\t\t},\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.MAP_PRIVATE | syscall.MAP_ANONYMOUS),\n\t\t},\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.MAP_PRIVATE | syscall.MAP_ANONYMOUS | syscall.MAP_FIXED),\n\t\t},\n\t},\n\tsyscall.SYS_MPROTECT: {},\n\tsyscall.SYS_MUNMAP: {},\n\tsyscall.SYS_NANOSLEEP: {},\n\tsyscall.SYS_NEWFSTATAT: {},\n\tsyscall.SYS_OPENAT: {},\n\tsyscall.SYS_PPOLL: {},\n\tsyscall.SYS_PREAD64: {},\n\tsyscall.SYS_PWRITE64: {},\n\tsyscall.SYS_READ: {},\n\tsyscall.SYS_READLINKAT: {},\n\tsyscall.SYS_RECVMSG: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.MSG_DONTWAIT | syscall.MSG_TRUNC),\n\t\t},\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.MSG_DONTWAIT | syscall.MSG_TRUNC | syscall.MSG_PEEK),\n\t\t},\n\t},\n\tsyscall.SYS_RENAMEAT: {},\n\tsyscall.SYS_RESTART_SYSCALL: {},\n\tsyscall.SYS_RT_SIGPROCMASK: {},\n\tsyscall.SYS_SCHED_YIELD: {},\n\tsyscall.SYS_SENDMSG: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.MSG_DONTWAIT | syscall.MSG_NOSIGNAL),\n\t\t},\n\t},\n\tsyscall.SYS_SHUTDOWN: []seccomp.Rule{\n\t\t{seccomp.AllowAny{}, seccomp.AllowValue(syscall.SHUT_RDWR)},\n\t},\n\tsyscall.SYS_SIGALTSTACK: {},\n\tsyscall.SYS_SYMLINKAT: {},\n\tsyscall.SYS_TGKILL: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowValue(uint64(os.Getpid())),\n\t\t},\n\t},\n\tsyscall.SYS_UNLINKAT: {},\n\tsyscall.SYS_UTIMENSAT: {},\n\tsyscall.SYS_WRITE: {},\n}\n<commit_msg>Restrict seccomp filters for UDS support.<commit_after>\/\/ Copyright 2018 The gVisor Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage filter\n\nimport (\n\t\"os\"\n\t\"syscall\"\n\n\t\"golang.org\/x\/sys\/unix\"\n\t\"gvisor.dev\/gvisor\/pkg\/abi\/linux\"\n\t\"gvisor.dev\/gvisor\/pkg\/seccomp\"\n)\n\n\/\/ allowedSyscalls is the set of syscalls executed by the gofer.\nvar allowedSyscalls = seccomp.SyscallRules{\n\tsyscall.SYS_ACCEPT: {},\n\tsyscall.SYS_SOCKET: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowValue(syscall.AF_UNIX),\n\t\t},\n\t},\n\tsyscall.SYS_CONNECT: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t},\n\t},\n\tsyscall.SYS_SETSOCKOPT: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.SOL_SOCKET),\n\t\t\tseccomp.AllowValue(syscall.SO_BROADCAST),\n\t\t},\n\t},\n\tsyscall.SYS_GETSOCKNAME: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t},\n\t},\n\tsyscall.SYS_GETPEERNAME: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t},\n\t},\n\tsyscall.SYS_ARCH_PRCTL: []seccomp.Rule{\n\t\t{seccomp.AllowValue(linux.ARCH_GET_FS)},\n\t\t{seccomp.AllowValue(linux.ARCH_SET_FS)},\n\t},\n\tsyscall.SYS_CLOCK_GETTIME: {},\n\tsyscall.SYS_CLONE: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowValue(\n\t\t\t\tsyscall.CLONE_VM |\n\t\t\t\t\tsyscall.CLONE_FS |\n\t\t\t\t\tsyscall.CLONE_FILES |\n\t\t\t\t\tsyscall.CLONE_SIGHAND |\n\t\t\t\t\tsyscall.CLONE_SYSVSEM |\n\t\t\t\t\tsyscall.CLONE_THREAD),\n\t\t},\n\t},\n\tsyscall.SYS_CLOSE: {},\n\tsyscall.SYS_DUP: {},\n\tsyscall.SYS_EPOLL_CTL: {},\n\tsyscall.SYS_EPOLL_PWAIT: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(0),\n\t\t},\n\t},\n\tsyscall.SYS_EVENTFD2: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowValue(0),\n\t\t\tseccomp.AllowValue(0),\n\t\t},\n\t},\n\tsyscall.SYS_EXIT: {},\n\tsyscall.SYS_EXIT_GROUP: {},\n\tsyscall.SYS_FALLOCATE: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(0),\n\t\t},\n\t},\n\tsyscall.SYS_FCHMOD: {},\n\tsyscall.SYS_FCHOWNAT: {},\n\tsyscall.SYS_FCNTL: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.F_GETFL),\n\t\t},\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.F_SETFL),\n\t\t},\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.F_GETFD),\n\t\t},\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.F_DUPFD_CLOEXEC),\n\t\t},\n\t},\n\tsyscall.SYS_FSTAT: {},\n\tsyscall.SYS_FSTATFS: {},\n\tsyscall.SYS_FSYNC: {},\n\tsyscall.SYS_FTRUNCATE: {},\n\tsyscall.SYS_FUTEX: {\n\t\tseccomp.Rule{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(linux.FUTEX_WAIT | linux.FUTEX_PRIVATE_FLAG),\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(0),\n\t\t},\n\t\tseccomp.Rule{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(linux.FUTEX_WAKE | linux.FUTEX_PRIVATE_FLAG),\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(0),\n\t\t},\n\t},\n\tsyscall.SYS_GETDENTS64: {},\n\tsyscall.SYS_GETPID: {},\n\tunix.SYS_GETRANDOM: {},\n\tsyscall.SYS_GETTID: {},\n\tsyscall.SYS_GETTIMEOFDAY: {},\n\tsyscall.SYS_LINKAT: {},\n\tsyscall.SYS_LSEEK: {},\n\tsyscall.SYS_MADVISE: {},\n\tsyscall.SYS_MKDIRAT: {},\n\tsyscall.SYS_MMAP: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.MAP_SHARED),\n\t\t},\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.MAP_PRIVATE | syscall.MAP_ANONYMOUS),\n\t\t},\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.MAP_PRIVATE | syscall.MAP_ANONYMOUS | syscall.MAP_FIXED),\n\t\t},\n\t},\n\tsyscall.SYS_MPROTECT: {},\n\tsyscall.SYS_MUNMAP: {},\n\tsyscall.SYS_NANOSLEEP: {},\n\tsyscall.SYS_NEWFSTATAT: {},\n\tsyscall.SYS_OPENAT: {},\n\tsyscall.SYS_PPOLL: {},\n\tsyscall.SYS_PREAD64: {},\n\tsyscall.SYS_PWRITE64: {},\n\tsyscall.SYS_READ: {},\n\tsyscall.SYS_READLINKAT: {},\n\tsyscall.SYS_RECVMSG: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.MSG_DONTWAIT | syscall.MSG_TRUNC),\n\t\t},\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.MSG_DONTWAIT | syscall.MSG_TRUNC | syscall.MSG_PEEK),\n\t\t},\n\t},\n\tsyscall.SYS_RENAMEAT: {},\n\tsyscall.SYS_RESTART_SYSCALL: {},\n\tsyscall.SYS_RT_SIGPROCMASK: {},\n\tsyscall.SYS_SCHED_YIELD: {},\n\tsyscall.SYS_SENDMSG: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowAny{},\n\t\t\tseccomp.AllowValue(syscall.MSG_DONTWAIT | syscall.MSG_NOSIGNAL),\n\t\t},\n\t},\n\tsyscall.SYS_SHUTDOWN: []seccomp.Rule{\n\t\t{seccomp.AllowAny{}, seccomp.AllowValue(syscall.SHUT_RDWR)},\n\t},\n\tsyscall.SYS_SIGALTSTACK: {},\n\tsyscall.SYS_SYMLINKAT: {},\n\tsyscall.SYS_TGKILL: []seccomp.Rule{\n\t\t{\n\t\t\tseccomp.AllowValue(uint64(os.Getpid())),\n\t\t},\n\t},\n\tsyscall.SYS_UNLINKAT: {},\n\tsyscall.SYS_UTIMENSAT: {},\n\tsyscall.SYS_WRITE: {},\n}\n<|endoftext|>"} {"text":"<commit_before>package sck\n\nimport (\n\t\"bytes\"\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n)\n\n\/\/ Define a mock Connection for testing purposes. This will read to and write from a byte\n\/\/ buffer.\ntype MockConn struct {\n\tBuffer []byte\n\tSetBuffer chan []byte\n}\n\nfunc (c *MockConn) Read(b []byte) (int, error) {\n\tdata := <-c.SetBuffer\n\tcopy(b, data)\n\treturn len(b), nil\n}\n\nfunc (c *MockConn) Write(b []byte) (int, error) {\n\tc.Buffer = make([]byte, len(b))\n\n\t\/\/ This will block until the calling thread is ready for the read to happen.\n\t<-c.SetBuffer\n\n\tcopy(c.Buffer, b)\n\treturn len(b), nil\n}\n\nfunc (c *MockConn) Close() error {\n\treturn nil\n}\n\nfunc (c *MockConn) LocalAddr() net.Addr {\n\treturn nil\n}\n\nfunc (c *MockConn) RemoteAddr() net.Addr {\n\treturn nil\n}\n\nfunc (c *MockConn) SetDeadline(t time.Time) error {\n\treturn nil\n}\n\nfunc (c *MockConn) SetReadDeadline(t time.Time) error {\n\treturn nil\n}\n\nfunc (c *MockConn) SetWriteDeadline(t time.Time) error {\n\treturn nil\n}\n\n\/\/ TestSender tests the sck.Sender() goroutine. This test appears to have unsynchronised\n\/\/ access to a buffer but it actually doesn't, because the channel blocks the other goroutine.\nfunc TestSender(t *testing.T) {\n\t\/\/ Set up our variables.\n\tdataChan := make(chan []byte)\n\tconn := new(MockConn)\n\tconn.SetBuffer = make(chan []byte)\n\n\t\/\/ Prepare the messages we're going to send.\n\tmessages := [][]byte{\n\t\t[]byte(\"PASS secretpasswordhere\"),\n\t\t[]byte(\"SERVICE dict * *.fr 0 0 :French Dictionary\"),\n\t\t[]byte(\":syrk!kalt@millennium.stealth.net QUIT :Gone to have lunch\"),\n\t}\n\n\t\/\/ Start the goroutine.\n\tgo Sender(conn, dataChan)\n\n\tfor _, msg := range messages {\n\t\tdataChan <- msg\n\t\tconn.SetBuffer <- msg\n\n\t\t\/\/ Sleep to give the other goroutine some time to make the copy. This slows the tests,\n\t\t\/\/ but we just have to accept that.\n\t\ttime.Sleep(200)\n\n\t\tif cmp := bytes.Compare(conn.Buffer, msg); cmp != 0 {\n\t\t\tt.Errorf(\n\t\t\t\t\"Failed to write correctly: expected %v, got %v.\",\n\t\t\t\tmsg,\n\t\t\t\tconn.Buffer)\n\t\t}\n\t}\n\n\t\/\/ Close the channel.\n\tclose(dataChan)\n}\n\n\/\/ TestReceiver tests the sck.Receiver() goroutine.\nfunc TestReceiver(t *testing.T) {\n\t\/\/ Set up our variables.\n\tdataChan := make(chan []byte)\n\tconn := new(MockConn)\n\tconn.SetBuffer = make(chan []byte)\n\n\t\/\/ Prepare the messages we're going to receive.\n\tmessages := [][]byte{\n\t\t[]byte(\"PASS secretpasswordhere\"),\n\t\t[]byte(\"SERVICE dict * *.fr 0 0 :French Dictionary\"),\n\t\t[]byte(\":syrk!kalt@millennium.stealth.net QUIT :Gone to have lunch\"),\n\t}\n\n\t\/\/ Start the goroutine.\n\tgo Receiver(conn, dataChan)\n\n\tfor _, msg := range messages {\n\t\t\/\/ Send the message in.\n\t\tconn.SetBuffer <- msg\n\n\t\t\/\/ Receieve the message back out.\n\t\trecvMsg := <-dataChan\n\t\trecvMsg = bytes.TrimRight(recvMsg, \"\\x00\")\n\n\t\t\/\/ If they aren't the same, something horrible happened.\n\t\tif cmp := bytes.Compare(recvMsg, msg); cmp != 0 {\n\t\t\tt.Errorf(\n\t\t\t\t\"Failed to read correctly: expected %v, got %v\",\n\t\t\t\tmsg,\n\t\t\t\trecvMsg)\n\t\t}\n\t}\n\n\t\/\/ Close the channel and confirm the tests don't panic.\n\tclose(dataChan)\n\ttime.Sleep(50 * time.Millisecond)\n}\n<commit_msg>Correct previous incorrect test.<commit_after>package sck\n\nimport (\n\t\"bytes\"\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n)\n\n\/\/ Define a mock Connection for testing purposes. This will read to and write from a byte\n\/\/ buffer.\ntype MockConn struct {\n\tBuffer []byte\n\tSetBuffer chan []byte\n}\n\nfunc (c *MockConn) Read(b []byte) (int, error) {\n\tdata := <-c.SetBuffer\n\tcopy(b, data)\n\treturn len(b), nil\n}\n\nfunc (c *MockConn) Write(b []byte) (int, error) {\n\tc.Buffer = make([]byte, len(b))\n\n\t\/\/ This will block until the calling thread is ready for the read to happen.\n\t<-c.SetBuffer\n\n\tcopy(c.Buffer, b)\n\treturn len(b), nil\n}\n\nfunc (c *MockConn) Close() error {\n\treturn nil\n}\n\nfunc (c *MockConn) LocalAddr() net.Addr {\n\treturn nil\n}\n\nfunc (c *MockConn) RemoteAddr() net.Addr {\n\treturn nil\n}\n\nfunc (c *MockConn) SetDeadline(t time.Time) error {\n\treturn nil\n}\n\nfunc (c *MockConn) SetReadDeadline(t time.Time) error {\n\treturn nil\n}\n\nfunc (c *MockConn) SetWriteDeadline(t time.Time) error {\n\treturn nil\n}\n\n\/\/ TestSender tests the sck.Sender() goroutine. This test appears to have unsynchronised\n\/\/ access to a buffer but it actually doesn't, because the channel blocks the other goroutine.\nfunc TestSender(t *testing.T) {\n\t\/\/ Set up our variables.\n\tdataChan := make(chan []byte)\n\tconn := new(MockConn)\n\tconn.SetBuffer = make(chan []byte)\n\n\t\/\/ Prepare the messages we're going to send.\n\tmessages := [][]byte{\n\t\t[]byte(\"PASS secretpasswordhere\"),\n\t\t[]byte(\"SERVICE dict * *.fr 0 0 :French Dictionary\"),\n\t\t[]byte(\":syrk!kalt@millennium.stealth.net QUIT :Gone to have lunch\"),\n\t}\n\n\t\/\/ Start the goroutine.\n\tgo Sender(conn, dataChan)\n\n\tfor _, msg := range messages {\n\t\tdataChan <- msg\n\t\tconn.SetBuffer <- msg\n\n\t\t\/\/ Sleep to give the other goroutine some time to make the copy. This slows the tests,\n\t\t\/\/ but we just have to accept that.\n\t\ttime.Sleep(200)\n\n\t\tif cmp := bytes.Compare(conn.Buffer, msg); cmp != 0 {\n\t\t\tt.Errorf(\n\t\t\t\t\"Failed to write correctly: expected %v, got %v.\",\n\t\t\t\tmsg,\n\t\t\t\tconn.Buffer)\n\t\t}\n\t}\n\n\t\/\/ Close the channel.\n\tclose(dataChan)\n}\n\n\/\/ TestReceiver tests the sck.Receiver() goroutine.\nfunc TestReceiver(t *testing.T) {\n\t\/\/ Set up our variables.\n\tdataChan := make(chan []byte)\n\tconn := new(MockConn)\n\tconn.SetBuffer = make(chan []byte)\n\n\t\/\/ Prepare the messages we're going to receive.\n\tmessages := [][]byte{\n\t\t[]byte(\"PASS secretpasswordhere\"),\n\t\t[]byte(\"SERVICE dict * *.fr 0 0 :French Dictionary\"),\n\t\t[]byte(\":syrk!kalt@millennium.stealth.net QUIT :Gone to have lunch\"),\n\t}\n\n\t\/\/ Start the goroutine.\n\tgo Receiver(conn, dataChan)\n\n\tfor _, msg := range messages {\n\t\t\/\/ Send the message in.\n\t\tconn.SetBuffer <- msg\n\n\t\t\/\/ Receieve the message back out.\n\t\trecvMsg := <-dataChan\n\t\trecvMsg = bytes.TrimRight(recvMsg, \"\\x00\")\n\n\t\t\/\/ If they aren't the same, something horrible happened.\n\t\tif cmp := bytes.Compare(recvMsg, msg); cmp != 0 {\n\t\t\tt.Errorf(\n\t\t\t\t\"Failed to read correctly: expected %v, got %v\",\n\t\t\t\tmsg,\n\t\t\t\trecvMsg)\n\t\t}\n\t}\n\n\t\/\/ Close the channel and confirm the tests don't panic.\n\tclose(dataChan)\n\tconn.SetBuffer <- []byte(\"Last test\")\n\ttime.Sleep(100 * time.Millisecond)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/tarm\/goserial\"\n\n\tlog \"github.com\/cihub\/seelog\"\n\t\"github.com\/stampzilla\/stampzilla-go\/nodes\/basenode\"\n\t\"github.com\/stampzilla\/stampzilla-go\/protocol\"\n)\n\ntype SerialConnection struct {\n\tName string\n\tBaud int\n\tPort io.ReadWriteCloser\n}\n\nvar node *protocol.Node\nvar state *State = &State{}\nvar serverConnection *basenode.Connection\nvar ard *SerialConnection\nvar disp *SerialConnection\n\nfunc main() {\n\tconfig := basenode.NewConfig()\n\tbasenode.SetConfig(config)\n\n\tvar arddev string\n\tvar dispdev string\n\tflag.StringVar(&arddev, \"arduino-dev\", \"\/dev\/arduino\", \"Arduino serial port\")\n\tflag.StringVar(&dispdev, \"display-dev\", \"\/dev\/poleDisplay\", \"Display serial port\")\n\n\tflag.Parse()\n\n\tlog.Info(\"Starting Aquarium node\")\n\n\t\/\/ Create new node description\n\tnode = protocol.NewNode(\"aquarium\")\n\tnode.SetState(state)\n\n\tnode.AddElement(&protocol.Element{\n\t\tType: protocol.ElementTypeText,\n\t\tName: \"Water temperature\",\n\t\tFeedback: `WaterTemperature`,\n\t})\n\n\tserverConnection = basenode.Connect()\n\tgo monitorState(serverConnection)\n\n\t\/\/ This worker recives all incomming commands\n\tgo serverRecv(serverConnection)\n\n\t\/\/ Setup the serial connection\n\tard = &SerialConnection{Name: arddev, Baud: 115200}\n\tard.run(serverConnection, func(data string, connection *basenode.Connection) {\n\t\tprocessArduinoData(data, connection)\n\t})\n\n\tdisp = &SerialConnection{Name: dispdev, Baud: 9600}\n\tdisp.run(serverConnection, func(data string, connection *basenode.Connection) {\n\t\tlog.Debug(\"Incomming from display\", data)\n\t})\n\n\tgo updateDisplay(serverConnection, disp)\n\n\tselect {}\n}\n\nvar updateInhibit bool = false\nvar changed bool = false\n\nfunc processArduinoData(msg string, connection *basenode.Connection) { \/\/ {{{\n\tlog.Debug(msg)\n\n\tvar prevState State = *state\n\n\tvalues := strings.Split(msg, \"|\")\n\tif len(values) != 6 {\n\t\treturn\n\t}\n\n\t\/\/ Temperature\/\/ {{{\n\tvalue, err := strconv.ParseFloat(values[0], 32)\n\tif err != nil {\n\t\treturn\n\t}\n\t\/\/if value != state.WaterTemperature {\n\t\/\/changed = true\n\t\/\/}\n\tstate.WaterTemperature = value \/\/ }}}\n\n\t\/\/ Filling stat0e \/\/ {{{\n\tvalue, err = strconv.ParseFloat(values[1], 32)\n\tif err != nil {\n\t\treturn\n\t}\n\t\/\/if state.FillingTime != value {\n\t\/\/changed = true\n\t\/\/}\n\tstate.FillingTime = value \/\/ }}}\n\n\t\/\/ Cooling %\/\/ {{{\n\tvalue, err = strconv.ParseFloat(values[2], 32)\n\tif err != nil {\n\t\treturn\n\t}\n\t\/\/if state.Cooling != value {\n\t\/\/changed = true\n\t\/\/}\n\tstate.Cooling = value \/\/ }}}\n\n\tbits := values[3][0]\n\n\tstate.CirculationPumps = bits&0x01 > 0\n\tstate.Skimmer = bits&0x02 > 0\n\tstate.Heating = bits&0x04 > 0\n\tstate.Filling = bits&0x08 == 0\n\tstate.WaterLevelOk = bits&0x0F == 0\n\tstate.FilterOk = bits&0x10 == 0\n\n\t\/\/ Check if something have changed\n\tchanged = reflect.DeepEqual(prevState, *state)\n\n\tif !updateInhibit && changed {\n\t\tchanged = false\n\t\tconnection.Send <- node.Node()\n\t\tupdateInhibit = true\n\t\t\/\/log.Warn(\"Invalid pacakge: \", msg)\n\t\tgo func() {\n\t\t\t<-time.After(time.Millisecond * 100)\n\t\t\tupdateInhibit = false\n\t\t}()\n\t}\n} \/\/ }}}\nfunc updateDisplay(connection *basenode.Connection, serial *SerialConnection) { \/\/ {{{\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(time.Second \/ 15):\n\t\t\tif serial.Port == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar msg, row1, row2 string\n\t\t\tvar wLevel string = \"OK\"\n\n\t\t\t\/\/if !state.WaterLevelOk {\n\t\t\t\/\/wLevel = \"WLVL\"\n\t\t\t\/\/}\n\t\t\tif state.FillingTime > 0 {\n\t\t\t\twLevel = \"FILL ERR\"\n\t\t\t\tif state.FillingTime < 10000 {\n\t\t\t\t\twLevel = \"FILLING\"\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/msg += \"\\x1B\" \/\/ Reset\n\t\t\t\/\/msg += \"\\x0E\" \/\/ Clear\n\t\t\t\/\/msg += \"\\x13\" \/\/ Cursor on\n\t\t\tmsg += \"\\x11\" \/\/ Character over write mode\n\t\t\t\/\/msg += \"\\x12\" \/\/ Vertical scroll mode\n\t\t\t\/\/msg += \"\\x13\" \/\/ Cursor on\n\t\t\tmsg += \"\\x14\" \/\/ Cursor off\n\n\t\t\tmsg += \"\\r\"\n\n\t\t\trow1 = time.Now().Format(\"15:04:05\") + strings.Repeat(\" \", 12-len(wLevel)) + wLevel\n\t\t\trow2 = \"Temp \" + strconv.FormatFloat(state.WaterTemperature, 'f', 2, 64) + \" Cool \" + strconv.FormatFloat(state.Cooling, 'f', 2, 64)\n\n\t\t\tif 20-len(row1) > 0 {\n\t\t\t\trow1 += strings.Repeat(\" \", 20-len(row1))\n\t\t\t}\n\n\t\t\tif 20-len(row2) > 0 {\n\t\t\t\trow2 += strings.Repeat(\" \", 20-len(row2))\n\t\t\t}\n\t\t\tmsg += row1[0:20]\n\t\t\tmsg += row2[0:20]\n\n\t\t\t_, err := serial.Port.Write([]byte(msg))\n\t\t\tif err != nil {\n\t\t\t\tlog.Debug(\"Failed write to display:\", err)\n\t\t\t}\n\t\t}\n\t}\n} \/\/ }}}\n\n\/\/ WORKER that monitors the current connection state\nfunc monitorState(connection *basenode.Connection) { \/\/ {{{\n\tfor s := range connection.State {\n\t\tswitch s {\n\t\tcase basenode.ConnectionStateConnected:\n\t\t\tconnection.Send <- node.Node()\n\t\tcase basenode.ConnectionStateDisconnected:\n\t\t}\n\t}\n} \/\/ }}}\n\n\/\/ WORKER that recives all incomming commands\nfunc serverRecv(connection *basenode.Connection) { \/\/ {{{\n\tfor d := range connection.Receive {\n\t\tif err := processCommand(d); err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t}\n} \/\/ }}}\n\nfunc processCommand(cmd protocol.Command) error { \/\/ {{{\n\tif len(cmd.Args) < 1 {\n\t\treturn fmt.Errorf(\"Missing arguments, ignoring command\")\n\t}\n\n\tswitch cmd.Cmd {\n\tcase \"CirculationPumps\":\n\t\tif cmd.Args[0] != \"\" && cmd.Args[0] != \"0\" {\n\t\t\tard.Port.Write([]byte{0x02, 0x01, 0x01, 0x03}) \/\/ Turn on\n\t\t} else {\n\t\t\tard.Port.Write([]byte{0x02, 0x01, 0x00, 0x03}) \/\/ Turn off\n\t\t}\n\t\tbreak\n\tcase \"Skimmer\":\n\t\tif cmd.Args[0] != \"\" && cmd.Args[0] != \"0\" {\n\t\t\tard.Port.Write([]byte{0x02, 0x02, 0x01, 0x03}) \/\/ Turn on\n\t\t} else {\n\t\t\tard.Port.Write([]byte{0x02, 0x02, 0x00, 0x03}) \/\/ Turn off\n\t\t}\n\t\tbreak\n\tcase \"Heating\":\n\t\tif cmd.Args[0] != \"\" && cmd.Args[0] != \"0\" {\n\t\t\tard.Port.Write([]byte{0x02, 0x03, 0x01, 0x03}) \/\/ Turn on\n\t\t} else {\n\t\t\tard.Port.Write([]byte{0x02, 0x03, 0x00, 0x03}) \/\/ Turn off\n\t\t}\n\t\tbreak\n\tcase \"CoolingP\":\n\t\ti, err := strconv.Atoi(cmd.Args[0])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to decode arg[0] to int %s %s\", err, cmd.Args[0])\n\t\t}\n\n\t\tard.Port.Write([]byte{0x02, 0x04, byte(i), 0x03}) \/\/ Turn on\n\t\tbreak\n\tcase \"CoolingI\":\n\t\ti, err := strconv.Atoi(cmd.Args[0])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to decode arg[0] to int %s %s\", err, cmd.Args[0])\n\t\t}\n\n\t\tard.Port.Write([]byte{0x02, 0x05, byte(i), 0x03}) \/\/ Turn on\n\t\tbreak\n\tcase \"CoolingD\":\n\t\ti, err := strconv.Atoi(cmd.Args[0])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to decode arg[0] to int %s %s\", err, cmd.Args[0])\n\t\t}\n\n\t\tard.Port.Write([]byte{0x02, 0x06, byte(i), 0x03}) \/\/ Turn on\n\t\tbreak\n\t}\n\treturn nil\n} \/\/ }}}\n\n\/\/ Serial connection workers\nfunc (config *SerialConnection) run(connection *basenode.Connection, callback func(data string, connection *basenode.Connection)) { \/\/ {{{\n\tgo func() {\n\t\tfor {\n\t\t\tconfig.connect(connection, callback)\n\t\t\t<-time.After(time.Second * 4)\n\t\t}\n\t}()\n} \/\/ }}}\nfunc (config *SerialConnection) connect(connection *basenode.Connection, callback func(data string, connection *basenode.Connection)) { \/\/ {{{\n\n\tc := &serial.Config{Name: config.Name, Baud: config.Baud}\n\tvar err error\n\n\tconfig.Port, err = serial.OpenPort(c)\n\tif err != nil {\n\t\tlog.Error(\"Serial connect failed: \", err)\n\t\treturn\n\t}\n\n\tvar incomming string = \"\"\n\n\tfor {\n\t\tbuf := make([]byte, 128)\n\n\t\tn, err := config.Port.Read(buf)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Serial read failed: \", err)\n\t\t\treturn\n\t\t}\n\n\t\tincomming += string(buf[:n])\n\n\t\tfor {\n\t\t\tn := strings.Index(incomming, \"\\r\")\n\n\t\t\tif n < 0 {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tmsg := strings.TrimSpace(incomming[:n])\n\t\t\tincomming = incomming[n+1:]\n\t\t\t\/\/log.Info(msg)\n\n\t\t\tgo callback(msg, connection)\n\t\t}\n\t}\n} \/\/ }}}\n<commit_msg>New values and added a timeout when server is not reciving data<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/tarm\/goserial\"\n\n\tlog \"github.com\/cihub\/seelog\"\n\t\"github.com\/stampzilla\/stampzilla-go\/nodes\/basenode\"\n\t\"github.com\/stampzilla\/stampzilla-go\/protocol\"\n)\n\ntype SerialConnection struct {\n\tName string\n\tBaud int\n\tPort io.ReadWriteCloser\n}\n\nvar node *protocol.Node\nvar state *State = &State{}\nvar serverConnection *basenode.Connection\nvar ard *SerialConnection\nvar disp *SerialConnection\nvar frameCount int\n\nfunc main() {\n\tconfig := basenode.NewConfig()\n\tbasenode.SetConfig(config)\n\n\tvar arddev string\n\tvar dispdev string\n\tflag.StringVar(&arddev, \"arduino-dev\", \"\/dev\/arduino\", \"Arduino serial port\")\n\tflag.StringVar(&dispdev, \"display-dev\", \"\/dev\/poleDisplay\", \"Display serial port\")\n\n\tflag.Parse()\n\n\tlog.Info(\"Starting Aquarium node\")\n\n\t\/\/ Create new node description\n\tnode = protocol.NewNode(\"aquarium\")\n\tnode.SetState(state)\n\n\tnode.AddElement(&protocol.Element{\n\t\tType: protocol.ElementTypeText,\n\t\tName: \"Water temperature\",\n\t\tFeedback: `WaterTemperature`,\n\t})\n\tnode.AddElement(&protocol.Element{\n\t\tType: protocol.ElementTypeText,\n\t\tName: \"Water level - OK\",\n\t\tFeedback: `WaterLevelOk`,\n\t})\n\tnode.AddElement(&protocol.Element{\n\t\tType: protocol.ElementTypeText,\n\t\tName: \"Filter - OK\",\n\t\tFeedback: `FilterOk`,\n\t})\n\n\tnode.AddElement(&protocol.Element{\n\t\tType: protocol.ElementTypeText,\n\t\tName: \"Cooling\",\n\t\tFeedback: `Cooling`,\n\t})\n\tnode.AddElement(&protocol.Element{\n\t\tType: protocol.ElementTypeToggle,\n\t\tName: \"Heating\",\n\t\tCommand: &protocol.Command{\n\t\t\tCmd: \"Heating\",\n\t\t\tArgs: []string{},\n\t\t},\n\t\tFeedback: `Heating`,\n\t})\n\n\tnode.AddElement(&protocol.Element{\n\t\tType: protocol.ElementTypeToggle,\n\t\tName: \"Skimmer\",\n\t\tCommand: &protocol.Command{\n\t\t\tCmd: \"Skimmer\",\n\t\t\tArgs: []string{},\n\t\t},\n\t\tFeedback: `Skimmer`,\n\t})\n\tnode.AddElement(&protocol.Element{\n\t\tType: protocol.ElementTypeToggle,\n\t\tName: \"Circulation pumps\",\n\t\tCommand: &protocol.Command{\n\t\t\tCmd: \"CirculationPumps\",\n\t\t\tArgs: []string{},\n\t\t},\n\t\tFeedback: `CirculationPumps`,\n\t})\n\n\tserverConnection = basenode.Connect()\n\tgo monitorState(serverConnection)\n\n\t\/\/ This worker recives all incomming commands\n\tgo serverRecv(serverConnection)\n\n\t\/\/ Setup the serial connection\n\tard = &SerialConnection{Name: arddev, Baud: 115200}\n\tard.run(serverConnection, func(data string, connection *basenode.Connection) {\n\t\tprocessArduinoData(data, connection)\n\t})\n\n\tdisp = &SerialConnection{Name: dispdev, Baud: 9600}\n\tdisp.run(serverConnection, func(data string, connection *basenode.Connection) {\n\t\tlog.Debug(\"Incomming from display\", data)\n\t})\n\n\tgo updateDisplay(serverConnection, disp)\n\n\tgo func() {\n\t\tfor {\n\t\t\tfmt.Print(\"\\r\", frameCount)\n\t\t\t<-time.After(time.Millisecond * 10)\n\t\t}\n\t}()\n\n\tselect {}\n}\n\nvar updateInhibit bool = false\nvar changed bool = false\n\nfunc processArduinoData(msg string, connection *basenode.Connection) { \/\/ {{{\n\t\/\/log.Debug(msg)\n\tframeCount++\n\n\tvar prevState State = *state\n\n\tvalues := strings.Split(msg, \"|\")\n\tif len(values) != 6 {\n\t\treturn\n\t}\n\n\t\/\/ Temperature\/\/ {{{\n\tvalue, err := strconv.ParseFloat(values[0], 32)\n\tif err != nil {\n\t\treturn\n\t}\n\tif value != state.WaterTemperature {\n\t\tchanged = true\n\t}\n\tstate.WaterTemperature = value \/\/ }}}\n\n\t\/\/ Filling stat0e \/\/ {{{\n\tvalue, err = strconv.ParseFloat(values[1], 32)\n\tif err != nil {\n\t\treturn\n\t}\n\tif state.FillingTime != value {\n\t\tchanged = true\n\t}\n\tstate.FillingTime = value \/\/ }}}\n\n\t\/\/ Cooling %\/\/ {{{\n\tvalue, err = strconv.ParseFloat(values[2], 32)\n\tif err != nil {\n\t\treturn\n\t}\n\tif state.Cooling != value {\n\t\tchanged = true\n\t}\n\tstate.Cooling = value \/\/ }}}\n\n\tbits := values[3][0]\n\n\tstate.CirculationPumps = bits&0x01 != 0\n\tstate.Skimmer = bits&0x02 != 0\n\tstate.Heating = bits&0x04 != 0\n\tstate.Filling = bits&0x08 == 0\n\tstate.WaterLevelOk = bits&0x0F == 0\n\tstate.FilterOk = bits&0x10 == 0\n\n\t\/\/ Check if something have changed\n\tif !reflect.DeepEqual(prevState, *state) {\n\t\tchanged = true\n\t}\n\n\tif !updateInhibit && changed {\n\t\tchanged = false\n\t\t\/\/fmt.Print(\"\\n\")\n\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase connection.Send <- node.Node():\n\t\t\tcase <-time.After(time.Second * 1):\n\t\t\t\tlog.Warn(\"TIMEOUT: Failed to send update to server\")\n\t\t\t}\n\t\t}()\n\n\t\tupdateInhibit = true\n\t\t\/\/log.Warn(\"Invalid pacakge: \", msg)\n\t\tgo func() {\n\t\t\t<-time.After(time.Millisecond * 200)\n\t\t\tupdateInhibit = false\n\t\t}()\n\t}\n} \/\/ }}}\nfunc updateDisplay(connection *basenode.Connection, serial *SerialConnection) { \/\/ {{{\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(time.Second \/ 15):\n\t\t\tif serial.Port == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar msg, row1, row2 string\n\t\t\tvar wLevel string = \"OK\"\n\n\t\t\t\/\/if !state.WaterLevelOk {\n\t\t\t\/\/wLevel = \"WLVL\"\n\t\t\t\/\/}\n\t\t\tif state.FillingTime > 0 {\n\t\t\t\twLevel = \"FILL ERR\"\n\t\t\t\tif state.FillingTime < 20000 {\n\t\t\t\t\twLevel = \"FILLING\"\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/msg += \"\\x1B\" \/\/ Reset\n\t\t\t\/\/msg += \"\\x0E\" \/\/ Clear\n\t\t\t\/\/msg += \"\\x13\" \/\/ Cursor on\n\t\t\tmsg += \"\\x11\" \/\/ Character over write mode\n\t\t\t\/\/msg += \"\\x12\" \/\/ Vertical scroll mode\n\t\t\t\/\/msg += \"\\x13\" \/\/ Cursor on\n\t\t\tmsg += \"\\x14\" \/\/ Cursor off\n\n\t\t\tmsg += \"\\r\"\n\n\t\t\trow1 = time.Now().Format(\"15:04:05\") + strings.Repeat(\" \", 12-len(wLevel)) + wLevel\n\t\t\trow2 = \"Temp \" + strconv.FormatFloat(state.WaterTemperature, 'f', 2, 64) + \" Cool \" + strconv.FormatFloat(state.Cooling, 'f', 2, 64)\n\n\t\t\tif 20-len(row1) > 0 {\n\t\t\t\trow1 += strings.Repeat(\" \", 20-len(row1))\n\t\t\t}\n\n\t\t\tif 20-len(row2) > 0 {\n\t\t\t\trow2 += strings.Repeat(\" \", 20-len(row2))\n\t\t\t}\n\t\t\tmsg += row1[0:20]\n\t\t\tmsg += row2[0:20]\n\n\t\t\t_, err := serial.Port.Write([]byte(msg))\n\t\t\tif err != nil {\n\t\t\t\tlog.Debug(\"Failed write to display:\", err)\n\t\t\t}\n\t\t}\n\t}\n} \/\/ }}}\n\n\/\/ WORKER that monitors the current connection state\nfunc monitorState(connection *basenode.Connection) { \/\/ {{{\n\tfor s := range connection.State {\n\t\tswitch s {\n\t\tcase basenode.ConnectionStateConnected:\n\t\t\tconnection.Send <- node.Node()\n\t\tcase basenode.ConnectionStateDisconnected:\n\t\t}\n\t}\n} \/\/ }}}\n\n\/\/ WORKER that recives all incomming commands\nfunc serverRecv(connection *basenode.Connection) { \/\/ {{{\n\tfor d := range connection.Receive {\n\t\tif err := processCommand(d); err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t}\n} \/\/ }}}\n\nfunc processCommand(cmd protocol.Command) error { \/\/ {{{\n\tvar target bool\n\n\tif len(cmd.Args) < 1 {\n\t\tif len(cmd.Params) < 1 {\n\t\t\treturn fmt.Errorf(\"Missing arguments, ignoring command\")\n\t\t} else {\n\t\t\ttarget = cmd.Params[0] != \"\" && cmd.Params[0] != \"false\"\n\t\t}\n\t} else {\n\t\ttarget = cmd.Args[0] != \"\" && cmd.Args[0] != \"0\"\n\t}\n\n\tswitch cmd.Cmd {\n\tcase \"CirculationPumps\":\n\t\tif target {\n\t\t\tard.Port.Write([]byte{0x02, 0x01, 0x01, 0x03}) \/\/ Turn on\n\t\t} else {\n\t\t\tard.Port.Write([]byte{0x02, 0x01, 0x00, 0x03}) \/\/ Turn off\n\t\t}\n\t\tbreak\n\tcase \"Skimmer\":\n\t\tif target {\n\t\t\tard.Port.Write([]byte{0x02, 0x02, 0x01, 0x03}) \/\/ Turn on\n\t\t} else {\n\t\t\tard.Port.Write([]byte{0x02, 0x02, 0x00, 0x03}) \/\/ Turn off\n\t\t}\n\t\tbreak\n\tcase \"Heating\":\n\t\tif target {\n\t\t\tard.Port.Write([]byte{0x02, 0x03, 0x01, 0x03}) \/\/ Turn on\n\t\t} else {\n\t\t\tard.Port.Write([]byte{0x02, 0x03, 0x00, 0x03}) \/\/ Turn off\n\t\t}\n\t\tbreak\n\tcase \"CoolingP\":\n\t\ti, err := strconv.Atoi(cmd.Args[0])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to decode arg[0] to int %s %s\", err, cmd.Args[0])\n\t\t}\n\n\t\tard.Port.Write([]byte{0x02, 0x04, byte(i), 0x03}) \/\/ Turn on\n\t\tbreak\n\tcase \"CoolingI\":\n\t\ti, err := strconv.Atoi(cmd.Args[0])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to decode arg[0] to int %s %s\", err, cmd.Args[0])\n\t\t}\n\n\t\tard.Port.Write([]byte{0x02, 0x05, byte(i), 0x03}) \/\/ Turn on\n\t\tbreak\n\tcase \"CoolingD\":\n\t\ti, err := strconv.Atoi(cmd.Args[0])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to decode arg[0] to int %s %s\", err, cmd.Args[0])\n\t\t}\n\n\t\tard.Port.Write([]byte{0x02, 0x06, byte(i), 0x03}) \/\/ Turn on\n\t\tbreak\n\t}\n\treturn nil\n} \/\/ }}}\n\n\/\/ Serial connection workers\nfunc (config *SerialConnection) run(connection *basenode.Connection, callback func(data string, connection *basenode.Connection)) { \/\/ {{{\n\tgo func() {\n\t\tfor {\n\t\t\tconfig.connect(connection, callback)\n\t\t\t<-time.After(time.Second * 4)\n\t\t}\n\t}()\n} \/\/ }}}\nfunc (config *SerialConnection) connect(connection *basenode.Connection, callback func(data string, connection *basenode.Connection)) { \/\/ {{{\n\n\tc := &serial.Config{Name: config.Name, Baud: config.Baud}\n\tvar err error\n\n\tconfig.Port, err = serial.OpenPort(c)\n\tif err != nil {\n\t\tlog.Error(\"Serial connect failed: \", err)\n\t\treturn\n\t}\n\n\tvar incomming string = \"\"\n\n\tfor {\n\t\tbuf := make([]byte, 128)\n\n\t\tn, err := config.Port.Read(buf)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Serial read failed: \", err)\n\t\t\treturn\n\t\t}\n\n\t\tincomming += string(buf[:n])\n\n\t\tfor {\n\t\t\tn := strings.Index(incomming, \"\\r\")\n\n\t\t\tif n < 0 {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tmsg := strings.TrimSpace(incomming[:n])\n\t\t\tincomming = incomming[n+1:]\n\t\t\tfmt.Print(msg, \"\\r\")\n\n\t\t\tgo callback(msg, connection)\n\t\t}\n\t}\n} \/\/ }}}\n<|endoftext|>"} {"text":"<commit_before>package structs\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n\n\t\/\/ note: this is aliased so that it's more noticeable if someone\n\t\/\/ accidentally swaps it out for math\/rand via running goimports\n\tcryptorand \"crypto\/rand\"\n\n\t\"github.com\/hashicorp\/nomad\/helper\"\n\t\"github.com\/hashicorp\/nomad\/helper\/uuid\"\n)\n\nconst (\n\t\/\/ SecureVariablesUpsertRPCMethod is the RPC method for upserting\n\t\/\/ secure variables into Nomad state.\n\t\/\/\n\t\/\/ Args: SecureVariablesUpsertRequest\n\t\/\/ Reply: SecureVariablesUpsertResponse\n\tSecureVariablesUpsertRPCMethod = \"SecureVariables.Upsert\"\n\n\t\/\/ SecureVariablesDeleteRPCMethod is the RPC method for deleting\n\t\/\/ a secure variable by its namespace and path.\n\t\/\/\n\t\/\/ Args: SecureVariablesDeleteRequest\n\t\/\/ Reply: SecureVariablesDeleteResponse\n\tSecureVariablesDeleteRPCMethod = \"SecureVariables.Delete\"\n\n\t\/\/ SecureVariablesListRPCMethod is the RPC method for listing secure\n\t\/\/ variables within Nomad.\n\t\/\/\n\t\/\/ Args: SecureVariablesListRequest\n\t\/\/ Reply: SecureVariablesListResponse\n\tSecureVariablesListRPCMethod = \"SecureVariables.List\"\n\n\t\/\/ SecureVariablesGetServiceRPCMethod is the RPC method for fetching a\n\t\/\/ secure variable according to its namepace and path.\n\t\/\/\n\t\/\/ Args: SecureVariablesByNameRequest\n\t\/\/ Reply: SecureVariablesByNameResponse\n\tSecureVariablesReadRPCMethod = \"SecureVariables.Read\"\n)\n\n\/\/ SecureVariableMetadata is the metadata envelope for a Secure Variable, it\n\/\/ is the list object and is shared data between an SecureVariableEncrypted and\n\/\/ a SecureVariableDecrypted object.\ntype SecureVariableMetadata struct {\n\tNamespace string\n\tPath string\n\tCreateIndex uint64\n\tCreateTime int64\n\tModifyIndex uint64\n\tModifyTime int64\n}\n\n\/\/ SecureVariableEncrypted structs are returned from the Encrypter's encrypt\n\/\/ method. They are the only form that should ever be persisted to storage.\ntype SecureVariableEncrypted struct {\n\tSecureVariableMetadata\n\tSecureVariableData\n}\n\n\/\/ SecureVariableData is the secret data for a Secure Variable\ntype SecureVariableData struct {\n\tData []byte \/\/ includes nonce\n\tKeyID string \/\/ ID of root key used to encrypt this entry\n}\n\n\/\/ SecureVariableDecrypted structs are returned from the Encrypter's decrypt\n\/\/ method. Since they contains sensitive material, they should never be\n\/\/ persisted to disk.\ntype SecureVariableDecrypted struct {\n\tSecureVariableMetadata\n\tItems SecureVariableItems\n}\n\n\/\/ SecureVariableItems are the actual secrets stored in a secure variable. They\n\/\/ are always encrypted and decrypted as a single unit.\ntype SecureVariableItems map[string]string\n\n\/\/ Equals checks both the metadata and items in a SecureVariableDecrypted\n\/\/ struct\nfunc (v1 SecureVariableDecrypted) Equals(v2 SecureVariableDecrypted) bool {\n\treturn v1.SecureVariableMetadata.Equals(v2.SecureVariableMetadata) &&\n\t\tv1.Items.Equals(v2.Items)\n}\n\n\/\/ Equals is a convenience method to provide similar equality checking\n\/\/ syntax for metadata and the SecureVariablesData or SecureVariableItems\n\/\/ struct\nfunc (sv SecureVariableMetadata) Equals(sv2 SecureVariableMetadata) bool {\n\treturn sv == sv2\n}\n\n\/\/ Equals performs deep equality checking on the cleartext items\n\/\/ of a SecureVariableDecrypted. Uses reflect.DeepEqual\nfunc (i1 SecureVariableItems) Equals(i2 SecureVariableItems) bool {\n\treturn reflect.DeepEqual(i1, i2)\n}\n\n\/\/ Equals checks both the metadata and encrypted data for a\n\/\/ SecureVariableEncrypted struct\nfunc (v1 SecureVariableEncrypted) Equals(v2 SecureVariableEncrypted) bool {\n\treturn v1.SecureVariableMetadata.Equals(v2.SecureVariableMetadata) &&\n\t\tv1.SecureVariableData.Equals(v2.SecureVariableData)\n}\n\n\/\/ Equals performs deep equality checking on the encrypted data part\n\/\/ of a SecureVariableEncrypted\nfunc (d1 SecureVariableData) Equals(d2 SecureVariableData) bool {\n\treturn d1.KeyID == d2.KeyID &&\n\t\tbytes.Equal(d1.Data, d2.Data)\n}\n\nfunc (sv SecureVariableDecrypted) Copy() SecureVariableDecrypted {\n\treturn SecureVariableDecrypted{\n\t\tSecureVariableMetadata: sv.SecureVariableMetadata,\n\t\tItems: sv.Items.Copy(),\n\t}\n}\n\nfunc (sv SecureVariableItems) Copy() SecureVariableItems {\n\tout := make(SecureVariableItems, len(sv))\n\tfor k, v := range sv {\n\t\tout[k] = v\n\t}\n\treturn out\n}\n\nfunc (sv SecureVariableEncrypted) Copy() SecureVariableEncrypted {\n\treturn SecureVariableEncrypted{\n\t\tSecureVariableMetadata: sv.SecureVariableMetadata,\n\t\tSecureVariableData: sv.SecureVariableData.Copy(),\n\t}\n}\n\nfunc (sv SecureVariableData) Copy() SecureVariableData {\n\tout := make([]byte, len(sv.Data))\n\tcopy(out, sv.Data)\n\treturn SecureVariableData{\n\t\tData: out,\n\t\tKeyID: sv.KeyID,\n\t}\n}\n\nfunc (sv SecureVariableDecrypted) Validate() error {\n\n\tif len(sv.Path) == 0 {\n\t\treturn fmt.Errorf(\"variable requires path\")\n\t}\n\tparts := strings.Split(sv.Path, \"\/\")\n\tswitch {\n\tcase len(parts) == 1 && parts[0] == \"nomad\":\n\t\treturn fmt.Errorf(\"\\\"nomad\\\" is a reserved top-level directory path, but you may write variables to \\\"nomad\/jobs\\\" or below\")\n\tcase len(parts) >= 2 && parts[0] == \"nomad\" && parts[1] != \"jobs\":\n\t\treturn fmt.Errorf(\"only paths at \\\"nomad\/jobs\\\" or below are valid paths under the top-level \\\"nomad\\\" directory\")\n\t}\n\n\tif len(sv.Items) == 0 {\n\t\treturn errors.New(\"empty variables are invalid\")\n\t}\n\tif sv.Namespace == AllNamespacesSentinel {\n\t\treturn errors.New(\"can not target wildcard (\\\"*\\\")namespace\")\n\t}\n\treturn nil\n}\n\nfunc (sv *SecureVariableDecrypted) Canonicalize() {\n\tif sv.Namespace == \"\" {\n\t\tsv.Namespace = DefaultNamespace\n\t}\n}\n\n\/\/ GetNamespace returns the secure variable's namespace. Used for pagination.\nfunc (sv *SecureVariableMetadata) Copy() *SecureVariableMetadata {\n\tvar out SecureVariableMetadata = *sv\n\treturn &out\n}\n\n\/\/ GetNamespace returns the secure variable's namespace. Used for pagination.\nfunc (sv SecureVariableMetadata) GetNamespace() string {\n\treturn sv.Namespace\n}\n\n\/\/ GetID returns the secure variable's path. Used for pagination.\nfunc (sv SecureVariableMetadata) GetID() string {\n\treturn sv.Path\n}\n\n\/\/ GetCreateIndex returns the secure variable's create index. Used for pagination.\nfunc (sv SecureVariableMetadata) GetCreateIndex() uint64 {\n\treturn sv.CreateIndex\n}\n\n\/\/ SecureVariablesQuota is used to track the total size of secure\n\/\/ variables entries per namespace. The total length of\n\/\/ SecureVariable.EncryptedData will be added to the SecureVariablesQuota\n\/\/ table in the same transaction as a write, update, or delete.\ntype SecureVariablesQuota struct {\n\tNamespace string\n\tSize uint64\n\tCreateIndex uint64\n\tModifyIndex uint64\n}\n\nfunc (svq *SecureVariablesQuota) Copy() *SecureVariablesQuota {\n\tif svq == nil {\n\t\treturn nil\n\t}\n\tnq := new(SecureVariablesQuota)\n\t*nq = *svq\n\treturn nq\n}\n\ntype SecureVariablesUpsertRequest struct {\n\tData []*SecureVariableDecrypted\n\tCheckIndex *uint64\n\tWriteRequest\n}\n\nfunc (svur *SecureVariablesUpsertRequest) SetCheckIndex(ci uint64) {\n\tsvur.CheckIndex = &ci\n}\n\ntype SecureVariablesEncryptedUpsertRequest struct {\n\tData []*SecureVariableEncrypted\n\tWriteRequest\n}\n\ntype SecureVariablesUpsertResponse struct {\n\tConflicts []*SecureVariableDecrypted\n\tWriteMeta\n}\n\ntype SecureVariablesListRequest struct {\n\tQueryOptions\n}\n\ntype SecureVariablesListResponse struct {\n\tData []*SecureVariableMetadata\n\tQueryMeta\n}\n\ntype SecureVariablesReadRequest struct {\n\tPath string\n\tQueryOptions\n}\n\ntype SecureVariablesReadResponse struct {\n\tData *SecureVariableDecrypted\n\tQueryMeta\n}\n\ntype SecureVariablesDeleteRequest struct {\n\tPath string\n\tCheckIndex *uint64\n\tWriteRequest\n}\n\nfunc (svdr *SecureVariablesDeleteRequest) SetCheckIndex(ci uint64) {\n\tsvdr.CheckIndex = &ci\n}\n\ntype SecureVariablesDeleteResponse struct {\n\tConflict *SecureVariableDecrypted\n\tWriteMeta\n}\n\n\/\/ RootKey is used to encrypt and decrypt secure variables. It is\n\/\/ never stored in raft.\ntype RootKey struct {\n\tMeta *RootKeyMeta\n\tKey []byte \/\/ serialized to keystore as base64 blob\n}\n\n\/\/ NewRootKey returns a new root key and its metadata.\nfunc NewRootKey(algorithm EncryptionAlgorithm) (*RootKey, error) {\n\tmeta := NewRootKeyMeta()\n\tmeta.Algorithm = algorithm\n\n\trootKey := &RootKey{\n\t\tMeta: meta,\n\t}\n\n\tswitch algorithm {\n\tcase EncryptionAlgorithmAES256GCM:\n\t\tconst keyBytes = 32\n\t\tkey := make([]byte, keyBytes)\n\t\tn, err := cryptorand.Read(key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif n < keyBytes {\n\t\t\treturn nil, fmt.Errorf(\"failed to generate key: entropy exhausted\")\n\t\t}\n\t\trootKey.Key = key\n\t}\n\n\treturn rootKey, nil\n}\n\n\/\/ RootKeyMeta is the metadata used to refer to a RootKey. It is\n\/\/ stored in raft.\ntype RootKeyMeta struct {\n\tKeyID string \/\/ UUID\n\tAlgorithm EncryptionAlgorithm\n\tCreateTime time.Time\n\tCreateIndex uint64\n\tModifyIndex uint64\n\tState RootKeyState\n}\n\n\/\/ RootKeyState enum describes the lifecycle of a root key.\ntype RootKeyState string\n\nconst (\n\tRootKeyStateInactive RootKeyState = \"inactive\"\n\tRootKeyStateActive = \"active\"\n\tRootKeyStateRekeying = \"rekeying\"\n\tRootKeyStateDeprecated = \"deprecated\"\n)\n\n\/\/ NewRootKeyMeta returns a new RootKeyMeta with default values\nfunc NewRootKeyMeta() *RootKeyMeta {\n\treturn &RootKeyMeta{\n\t\tKeyID: uuid.Generate(),\n\t\tAlgorithm: EncryptionAlgorithmAES256GCM,\n\t\tState: RootKeyStateInactive,\n\t\tCreateTime: time.Now(),\n\t}\n}\n\n\/\/ RootKeyMetaStub is for serializing root key metadata to the\n\/\/ keystore, not for the List API. It excludes frequently-changing\n\/\/ fields such as ModifyIndex so we don't have to sync them to the\n\/\/ on-disk keystore when the fields are already in raft.\ntype RootKeyMetaStub struct {\n\tKeyID string\n\tAlgorithm EncryptionAlgorithm\n\tCreateTime time.Time\n\tState RootKeyState\n}\n\n\/\/ Active indicates his key is the one currently being used for\n\/\/ crypto operations (at most one key can be Active)\nfunc (rkm *RootKeyMeta) Active() bool {\n\treturn rkm.State == RootKeyStateActive\n}\n\nfunc (rkm *RootKeyMeta) SetActive() {\n\trkm.State = RootKeyStateActive\n}\n\n\/\/ Rekeying indicates that variables encrypted with this key should be\n\/\/ rekeyed\nfunc (rkm *RootKeyMeta) Rekeying() bool {\n\treturn rkm.State == RootKeyStateRekeying\n}\n\nfunc (rkm *RootKeyMeta) SetRekeying() {\n\trkm.State = RootKeyStateRekeying\n}\n\nfunc (rkm *RootKeyMeta) SetInactive() {\n\trkm.State = RootKeyStateInactive\n}\n\n\/\/ Deprecated indicates that variables encrypted with this key\n\/\/ have been rekeyed\nfunc (rkm *RootKeyMeta) Deprecated() bool {\n\treturn rkm.State == RootKeyStateDeprecated\n}\n\nfunc (rkm *RootKeyMeta) SetDeprecated() {\n\trkm.State = RootKeyStateDeprecated\n}\n\nfunc (rkm *RootKeyMeta) Stub() *RootKeyMetaStub {\n\tif rkm == nil {\n\t\treturn nil\n\t}\n\treturn &RootKeyMetaStub{\n\t\tKeyID: rkm.KeyID,\n\t\tAlgorithm: rkm.Algorithm,\n\t\tCreateTime: rkm.CreateTime,\n\t\tState: rkm.State,\n\t}\n\n}\nfunc (rkm *RootKeyMeta) Copy() *RootKeyMeta {\n\tif rkm == nil {\n\t\treturn nil\n\t}\n\tout := *rkm\n\treturn &out\n}\n\nfunc (rkm *RootKeyMeta) Validate() error {\n\tif rkm == nil {\n\t\treturn fmt.Errorf(\"root key metadata is required\")\n\t}\n\tif rkm.KeyID == \"\" || !helper.IsUUID(rkm.KeyID) {\n\t\treturn fmt.Errorf(\"root key UUID is required\")\n\t}\n\tif rkm.Algorithm == \"\" {\n\t\treturn fmt.Errorf(\"root key algorithm is required\")\n\t}\n\tswitch rkm.State {\n\tcase RootKeyStateInactive, RootKeyStateActive,\n\t\tRootKeyStateRekeying, RootKeyStateDeprecated:\n\tdefault:\n\t\treturn fmt.Errorf(\"root key state %q is invalid\", rkm.State)\n\t}\n\treturn nil\n}\n\n\/\/ EncryptionAlgorithm chooses which algorithm is used for\n\/\/ encrypting \/ decrypting entries with this key\ntype EncryptionAlgorithm string\n\nconst (\n\tEncryptionAlgorithmAES256GCM EncryptionAlgorithm = \"aes256-gcm\"\n)\n\ntype KeyringRotateRootKeyRequest struct {\n\tAlgorithm EncryptionAlgorithm\n\tFull bool\n\tWriteRequest\n}\n\n\/\/ KeyringRotateRootKeyResponse returns the full key metadata\ntype KeyringRotateRootKeyResponse struct {\n\tKey *RootKeyMeta\n\tWriteMeta\n}\n\ntype KeyringListRootKeyMetaRequest struct {\n\t\/\/ TODO: do we need any fields here?\n\tQueryOptions\n}\n\ntype KeyringListRootKeyMetaResponse struct {\n\tKeys []*RootKeyMeta\n\tQueryMeta\n}\n\n\/\/ KeyringUpdateRootKeyRequest is used internally for key replication\n\/\/ only and for keyring restores. The RootKeyMeta will be extracted\n\/\/ for applying to the FSM with the KeyringUpdateRootKeyMetaRequest\n\/\/ (see below)\ntype KeyringUpdateRootKeyRequest struct {\n\tRootKey *RootKey\n\tRekey bool\n\tWriteRequest\n}\n\ntype KeyringUpdateRootKeyResponse struct {\n\tWriteMeta\n}\n\n\/\/ KeyringGetRootKeyRequest is used internally for key replication\n\/\/ only and for keyring restores.\ntype KeyringGetRootKeyRequest struct {\n\tKeyID string\n\tQueryOptions\n}\n\ntype KeyringGetRootKeyResponse struct {\n\tKey *RootKey\n\tQueryMeta\n}\n\n\/\/ KeyringUpdateRootKeyMetaRequest is used internally for key\n\/\/ replication so that we have a request wrapper for writing the\n\/\/ metadata to the FSM without including the key material\ntype KeyringUpdateRootKeyMetaRequest struct {\n\tRootKeyMeta *RootKeyMeta\n\tRekey bool\n\tWriteRequest\n}\n\ntype KeyringUpdateRootKeyMetaResponse struct {\n\tWriteMeta\n}\n\ntype KeyringDeleteRootKeyRequest struct {\n\tKeyID string\n\tWriteRequest\n}\n\ntype KeyringDeleteRootKeyResponse struct {\n\tWriteMeta\n}\n<commit_msg>secure vars: limit maximum size of variable data (#13743)<commit_after>package structs\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n\n\t\/\/ note: this is aliased so that it's more noticeable if someone\n\t\/\/ accidentally swaps it out for math\/rand via running goimports\n\tcryptorand \"crypto\/rand\"\n\n\t\"github.com\/hashicorp\/nomad\/helper\"\n\t\"github.com\/hashicorp\/nomad\/helper\/uuid\"\n)\n\nconst (\n\t\/\/ SecureVariablesUpsertRPCMethod is the RPC method for upserting\n\t\/\/ secure variables into Nomad state.\n\t\/\/\n\t\/\/ Args: SecureVariablesUpsertRequest\n\t\/\/ Reply: SecureVariablesUpsertResponse\n\tSecureVariablesUpsertRPCMethod = \"SecureVariables.Upsert\"\n\n\t\/\/ SecureVariablesDeleteRPCMethod is the RPC method for deleting\n\t\/\/ a secure variable by its namespace and path.\n\t\/\/\n\t\/\/ Args: SecureVariablesDeleteRequest\n\t\/\/ Reply: SecureVariablesDeleteResponse\n\tSecureVariablesDeleteRPCMethod = \"SecureVariables.Delete\"\n\n\t\/\/ SecureVariablesListRPCMethod is the RPC method for listing secure\n\t\/\/ variables within Nomad.\n\t\/\/\n\t\/\/ Args: SecureVariablesListRequest\n\t\/\/ Reply: SecureVariablesListResponse\n\tSecureVariablesListRPCMethod = \"SecureVariables.List\"\n\n\t\/\/ SecureVariablesGetServiceRPCMethod is the RPC method for fetching a\n\t\/\/ secure variable according to its namepace and path.\n\t\/\/\n\t\/\/ Args: SecureVariablesByNameRequest\n\t\/\/ Reply: SecureVariablesByNameResponse\n\tSecureVariablesReadRPCMethod = \"SecureVariables.Read\"\n\n\t\/\/ maxVariableSize is the maximum size of the unencrypted contents of\n\t\/\/ a variable. This size is deliberately set low and is not\n\t\/\/ configurable, to discourage DoS'ing the cluster\n\tmaxVariableSize = 16384\n)\n\n\/\/ SecureVariableMetadata is the metadata envelope for a Secure Variable, it\n\/\/ is the list object and is shared data between an SecureVariableEncrypted and\n\/\/ a SecureVariableDecrypted object.\ntype SecureVariableMetadata struct {\n\tNamespace string\n\tPath string\n\tCreateIndex uint64\n\tCreateTime int64\n\tModifyIndex uint64\n\tModifyTime int64\n}\n\n\/\/ SecureVariableEncrypted structs are returned from the Encrypter's encrypt\n\/\/ method. They are the only form that should ever be persisted to storage.\ntype SecureVariableEncrypted struct {\n\tSecureVariableMetadata\n\tSecureVariableData\n}\n\n\/\/ SecureVariableData is the secret data for a Secure Variable\ntype SecureVariableData struct {\n\tData []byte \/\/ includes nonce\n\tKeyID string \/\/ ID of root key used to encrypt this entry\n}\n\n\/\/ SecureVariableDecrypted structs are returned from the Encrypter's decrypt\n\/\/ method. Since they contains sensitive material, they should never be\n\/\/ persisted to disk.\ntype SecureVariableDecrypted struct {\n\tSecureVariableMetadata\n\tItems SecureVariableItems\n}\n\n\/\/ SecureVariableItems are the actual secrets stored in a secure variable. They\n\/\/ are always encrypted and decrypted as a single unit.\ntype SecureVariableItems map[string]string\n\nfunc (svi SecureVariableItems) Size() uint64 {\n\tvar out uint64\n\tfor k, v := range svi {\n\t\tout += uint64(len(k))\n\t\tout += uint64(len(v))\n\t}\n\treturn out\n}\n\n\/\/ Equals checks both the metadata and items in a SecureVariableDecrypted\n\/\/ struct\nfunc (v1 SecureVariableDecrypted) Equals(v2 SecureVariableDecrypted) bool {\n\treturn v1.SecureVariableMetadata.Equals(v2.SecureVariableMetadata) &&\n\t\tv1.Items.Equals(v2.Items)\n}\n\n\/\/ Equals is a convenience method to provide similar equality checking\n\/\/ syntax for metadata and the SecureVariablesData or SecureVariableItems\n\/\/ struct\nfunc (sv SecureVariableMetadata) Equals(sv2 SecureVariableMetadata) bool {\n\treturn sv == sv2\n}\n\n\/\/ Equals performs deep equality checking on the cleartext items\n\/\/ of a SecureVariableDecrypted. Uses reflect.DeepEqual\nfunc (i1 SecureVariableItems) Equals(i2 SecureVariableItems) bool {\n\treturn reflect.DeepEqual(i1, i2)\n}\n\n\/\/ Equals checks both the metadata and encrypted data for a\n\/\/ SecureVariableEncrypted struct\nfunc (v1 SecureVariableEncrypted) Equals(v2 SecureVariableEncrypted) bool {\n\treturn v1.SecureVariableMetadata.Equals(v2.SecureVariableMetadata) &&\n\t\tv1.SecureVariableData.Equals(v2.SecureVariableData)\n}\n\n\/\/ Equals performs deep equality checking on the encrypted data part\n\/\/ of a SecureVariableEncrypted\nfunc (d1 SecureVariableData) Equals(d2 SecureVariableData) bool {\n\treturn d1.KeyID == d2.KeyID &&\n\t\tbytes.Equal(d1.Data, d2.Data)\n}\n\nfunc (sv SecureVariableDecrypted) Copy() SecureVariableDecrypted {\n\treturn SecureVariableDecrypted{\n\t\tSecureVariableMetadata: sv.SecureVariableMetadata,\n\t\tItems: sv.Items.Copy(),\n\t}\n}\n\nfunc (sv SecureVariableItems) Copy() SecureVariableItems {\n\tout := make(SecureVariableItems, len(sv))\n\tfor k, v := range sv {\n\t\tout[k] = v\n\t}\n\treturn out\n}\n\nfunc (sv SecureVariableEncrypted) Copy() SecureVariableEncrypted {\n\treturn SecureVariableEncrypted{\n\t\tSecureVariableMetadata: sv.SecureVariableMetadata,\n\t\tSecureVariableData: sv.SecureVariableData.Copy(),\n\t}\n}\n\nfunc (sv SecureVariableData) Copy() SecureVariableData {\n\tout := make([]byte, len(sv.Data))\n\tcopy(out, sv.Data)\n\treturn SecureVariableData{\n\t\tData: out,\n\t\tKeyID: sv.KeyID,\n\t}\n}\n\nfunc (sv SecureVariableDecrypted) Validate() error {\n\n\tif len(sv.Path) == 0 {\n\t\treturn fmt.Errorf(\"variable requires path\")\n\t}\n\tparts := strings.Split(sv.Path, \"\/\")\n\tswitch {\n\tcase len(parts) == 1 && parts[0] == \"nomad\":\n\t\treturn fmt.Errorf(\"\\\"nomad\\\" is a reserved top-level directory path, but you may write variables to \\\"nomad\/jobs\\\" or below\")\n\tcase len(parts) >= 2 && parts[0] == \"nomad\" && parts[1] != \"jobs\":\n\t\treturn fmt.Errorf(\"only paths at \\\"nomad\/jobs\\\" or below are valid paths under the top-level \\\"nomad\\\" directory\")\n\t}\n\n\tif len(sv.Items) == 0 {\n\t\treturn errors.New(\"empty variables are invalid\")\n\t}\n\tif sv.Items.Size() > maxVariableSize {\n\t\treturn errors.New(\"variables are limited to 16KiB in total size\")\n\t}\n\tif sv.Namespace == AllNamespacesSentinel {\n\t\treturn errors.New(\"can not target wildcard (\\\"*\\\")namespace\")\n\t}\n\treturn nil\n}\n\nfunc (sv *SecureVariableDecrypted) Canonicalize() {\n\tif sv.Namespace == \"\" {\n\t\tsv.Namespace = DefaultNamespace\n\t}\n}\n\n\/\/ GetNamespace returns the secure variable's namespace. Used for pagination.\nfunc (sv *SecureVariableMetadata) Copy() *SecureVariableMetadata {\n\tvar out SecureVariableMetadata = *sv\n\treturn &out\n}\n\n\/\/ GetNamespace returns the secure variable's namespace. Used for pagination.\nfunc (sv SecureVariableMetadata) GetNamespace() string {\n\treturn sv.Namespace\n}\n\n\/\/ GetID returns the secure variable's path. Used for pagination.\nfunc (sv SecureVariableMetadata) GetID() string {\n\treturn sv.Path\n}\n\n\/\/ GetCreateIndex returns the secure variable's create index. Used for pagination.\nfunc (sv SecureVariableMetadata) GetCreateIndex() uint64 {\n\treturn sv.CreateIndex\n}\n\n\/\/ SecureVariablesQuota is used to track the total size of secure\n\/\/ variables entries per namespace. The total length of\n\/\/ SecureVariable.EncryptedData will be added to the SecureVariablesQuota\n\/\/ table in the same transaction as a write, update, or delete.\ntype SecureVariablesQuota struct {\n\tNamespace string\n\tSize uint64\n\tCreateIndex uint64\n\tModifyIndex uint64\n}\n\nfunc (svq *SecureVariablesQuota) Copy() *SecureVariablesQuota {\n\tif svq == nil {\n\t\treturn nil\n\t}\n\tnq := new(SecureVariablesQuota)\n\t*nq = *svq\n\treturn nq\n}\n\ntype SecureVariablesUpsertRequest struct {\n\tData []*SecureVariableDecrypted\n\tCheckIndex *uint64\n\tWriteRequest\n}\n\nfunc (svur *SecureVariablesUpsertRequest) SetCheckIndex(ci uint64) {\n\tsvur.CheckIndex = &ci\n}\n\ntype SecureVariablesEncryptedUpsertRequest struct {\n\tData []*SecureVariableEncrypted\n\tWriteRequest\n}\n\ntype SecureVariablesUpsertResponse struct {\n\tConflicts []*SecureVariableDecrypted\n\tWriteMeta\n}\n\ntype SecureVariablesListRequest struct {\n\tQueryOptions\n}\n\ntype SecureVariablesListResponse struct {\n\tData []*SecureVariableMetadata\n\tQueryMeta\n}\n\ntype SecureVariablesReadRequest struct {\n\tPath string\n\tQueryOptions\n}\n\ntype SecureVariablesReadResponse struct {\n\tData *SecureVariableDecrypted\n\tQueryMeta\n}\n\ntype SecureVariablesDeleteRequest struct {\n\tPath string\n\tCheckIndex *uint64\n\tWriteRequest\n}\n\nfunc (svdr *SecureVariablesDeleteRequest) SetCheckIndex(ci uint64) {\n\tsvdr.CheckIndex = &ci\n}\n\ntype SecureVariablesDeleteResponse struct {\n\tConflict *SecureVariableDecrypted\n\tWriteMeta\n}\n\n\/\/ RootKey is used to encrypt and decrypt secure variables. It is\n\/\/ never stored in raft.\ntype RootKey struct {\n\tMeta *RootKeyMeta\n\tKey []byte \/\/ serialized to keystore as base64 blob\n}\n\n\/\/ NewRootKey returns a new root key and its metadata.\nfunc NewRootKey(algorithm EncryptionAlgorithm) (*RootKey, error) {\n\tmeta := NewRootKeyMeta()\n\tmeta.Algorithm = algorithm\n\n\trootKey := &RootKey{\n\t\tMeta: meta,\n\t}\n\n\tswitch algorithm {\n\tcase EncryptionAlgorithmAES256GCM:\n\t\tconst keyBytes = 32\n\t\tkey := make([]byte, keyBytes)\n\t\tn, err := cryptorand.Read(key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif n < keyBytes {\n\t\t\treturn nil, fmt.Errorf(\"failed to generate key: entropy exhausted\")\n\t\t}\n\t\trootKey.Key = key\n\t}\n\n\treturn rootKey, nil\n}\n\n\/\/ RootKeyMeta is the metadata used to refer to a RootKey. It is\n\/\/ stored in raft.\ntype RootKeyMeta struct {\n\tKeyID string \/\/ UUID\n\tAlgorithm EncryptionAlgorithm\n\tCreateTime time.Time\n\tCreateIndex uint64\n\tModifyIndex uint64\n\tState RootKeyState\n}\n\n\/\/ RootKeyState enum describes the lifecycle of a root key.\ntype RootKeyState string\n\nconst (\n\tRootKeyStateInactive RootKeyState = \"inactive\"\n\tRootKeyStateActive = \"active\"\n\tRootKeyStateRekeying = \"rekeying\"\n\tRootKeyStateDeprecated = \"deprecated\"\n)\n\n\/\/ NewRootKeyMeta returns a new RootKeyMeta with default values\nfunc NewRootKeyMeta() *RootKeyMeta {\n\treturn &RootKeyMeta{\n\t\tKeyID: uuid.Generate(),\n\t\tAlgorithm: EncryptionAlgorithmAES256GCM,\n\t\tState: RootKeyStateInactive,\n\t\tCreateTime: time.Now(),\n\t}\n}\n\n\/\/ RootKeyMetaStub is for serializing root key metadata to the\n\/\/ keystore, not for the List API. It excludes frequently-changing\n\/\/ fields such as ModifyIndex so we don't have to sync them to the\n\/\/ on-disk keystore when the fields are already in raft.\ntype RootKeyMetaStub struct {\n\tKeyID string\n\tAlgorithm EncryptionAlgorithm\n\tCreateTime time.Time\n\tState RootKeyState\n}\n\n\/\/ Active indicates his key is the one currently being used for\n\/\/ crypto operations (at most one key can be Active)\nfunc (rkm *RootKeyMeta) Active() bool {\n\treturn rkm.State == RootKeyStateActive\n}\n\nfunc (rkm *RootKeyMeta) SetActive() {\n\trkm.State = RootKeyStateActive\n}\n\n\/\/ Rekeying indicates that variables encrypted with this key should be\n\/\/ rekeyed\nfunc (rkm *RootKeyMeta) Rekeying() bool {\n\treturn rkm.State == RootKeyStateRekeying\n}\n\nfunc (rkm *RootKeyMeta) SetRekeying() {\n\trkm.State = RootKeyStateRekeying\n}\n\nfunc (rkm *RootKeyMeta) SetInactive() {\n\trkm.State = RootKeyStateInactive\n}\n\n\/\/ Deprecated indicates that variables encrypted with this key\n\/\/ have been rekeyed\nfunc (rkm *RootKeyMeta) Deprecated() bool {\n\treturn rkm.State == RootKeyStateDeprecated\n}\n\nfunc (rkm *RootKeyMeta) SetDeprecated() {\n\trkm.State = RootKeyStateDeprecated\n}\n\nfunc (rkm *RootKeyMeta) Stub() *RootKeyMetaStub {\n\tif rkm == nil {\n\t\treturn nil\n\t}\n\treturn &RootKeyMetaStub{\n\t\tKeyID: rkm.KeyID,\n\t\tAlgorithm: rkm.Algorithm,\n\t\tCreateTime: rkm.CreateTime,\n\t\tState: rkm.State,\n\t}\n\n}\nfunc (rkm *RootKeyMeta) Copy() *RootKeyMeta {\n\tif rkm == nil {\n\t\treturn nil\n\t}\n\tout := *rkm\n\treturn &out\n}\n\nfunc (rkm *RootKeyMeta) Validate() error {\n\tif rkm == nil {\n\t\treturn fmt.Errorf(\"root key metadata is required\")\n\t}\n\tif rkm.KeyID == \"\" || !helper.IsUUID(rkm.KeyID) {\n\t\treturn fmt.Errorf(\"root key UUID is required\")\n\t}\n\tif rkm.Algorithm == \"\" {\n\t\treturn fmt.Errorf(\"root key algorithm is required\")\n\t}\n\tswitch rkm.State {\n\tcase RootKeyStateInactive, RootKeyStateActive,\n\t\tRootKeyStateRekeying, RootKeyStateDeprecated:\n\tdefault:\n\t\treturn fmt.Errorf(\"root key state %q is invalid\", rkm.State)\n\t}\n\treturn nil\n}\n\n\/\/ EncryptionAlgorithm chooses which algorithm is used for\n\/\/ encrypting \/ decrypting entries with this key\ntype EncryptionAlgorithm string\n\nconst (\n\tEncryptionAlgorithmAES256GCM EncryptionAlgorithm = \"aes256-gcm\"\n)\n\ntype KeyringRotateRootKeyRequest struct {\n\tAlgorithm EncryptionAlgorithm\n\tFull bool\n\tWriteRequest\n}\n\n\/\/ KeyringRotateRootKeyResponse returns the full key metadata\ntype KeyringRotateRootKeyResponse struct {\n\tKey *RootKeyMeta\n\tWriteMeta\n}\n\ntype KeyringListRootKeyMetaRequest struct {\n\t\/\/ TODO: do we need any fields here?\n\tQueryOptions\n}\n\ntype KeyringListRootKeyMetaResponse struct {\n\tKeys []*RootKeyMeta\n\tQueryMeta\n}\n\n\/\/ KeyringUpdateRootKeyRequest is used internally for key replication\n\/\/ only and for keyring restores. The RootKeyMeta will be extracted\n\/\/ for applying to the FSM with the KeyringUpdateRootKeyMetaRequest\n\/\/ (see below)\ntype KeyringUpdateRootKeyRequest struct {\n\tRootKey *RootKey\n\tRekey bool\n\tWriteRequest\n}\n\ntype KeyringUpdateRootKeyResponse struct {\n\tWriteMeta\n}\n\n\/\/ KeyringGetRootKeyRequest is used internally for key replication\n\/\/ only and for keyring restores.\ntype KeyringGetRootKeyRequest struct {\n\tKeyID string\n\tQueryOptions\n}\n\ntype KeyringGetRootKeyResponse struct {\n\tKey *RootKey\n\tQueryMeta\n}\n\n\/\/ KeyringUpdateRootKeyMetaRequest is used internally for key\n\/\/ replication so that we have a request wrapper for writing the\n\/\/ metadata to the FSM without including the key material\ntype KeyringUpdateRootKeyMetaRequest struct {\n\tRootKeyMeta *RootKeyMeta\n\tRekey bool\n\tWriteRequest\n}\n\ntype KeyringUpdateRootKeyMetaResponse struct {\n\tWriteMeta\n}\n\ntype KeyringDeleteRootKeyRequest struct {\n\tKeyID string\n\tWriteRequest\n}\n\ntype KeyringDeleteRootKeyResponse struct {\n\tWriteMeta\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License. See the AUTHORS file\n\/\/ for names of contributors.\n\/\/\n\/\/ Author: Marc Berhault (marc@cockroachlabs.com)\n\npackage cli\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/cockroachdb\/cockroach\/security\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nconst defaultKeySize = 2048\n\nvar keySize int\n\n\/\/ A createCACert command generates a CA certificate and stores it\n\/\/ in the cert directory.\nvar createCACertCmd = &cobra.Command{\n\tUse: \"create-ca [options]\",\n\tShort: \"create CA cert and key\",\n\tLong: `\nGenerates a new key pair and CA certificate, writing them to\nindividual files in the directory specified by --certs (required).\n`,\n\tRun: runCreateCACert,\n}\n\n\/\/ runCreateCACert generates key pair and CA certificate and writes them\n\/\/ to their corresponding files.\nfunc runCreateCACert(cmd *cobra.Command, args []string) {\n\terr := security.RunCreateCACert(Context.Certs, keySize)\n\tif err != nil {\n\t\tfmt.Fprintf(osStderr, \"failed to generate CA certificate: %s\\n\", err)\n\t\tosExit(1)\n\t\treturn\n\t}\n}\n\n\/\/ A createNodeCert command generates a node certificate and stores it\n\/\/ in the cert directory.\nvar createNodeCertCmd = &cobra.Command{\n\tUse: \"create-node [options] <host 1> <host 2> ... <host N>\",\n\tShort: \"create node cert and key\",\n\tLong: `\nGenerates server and client certificates and keys for a given node, writing them to\nindividual files in the directory specified by --certs (required).\nThe certs directory should contain a CA cert and key.\nAt least one host should be passed in (either IP address of dns name).\n`,\n\tRun: runCreateNodeCert,\n}\n\n\/\/ runCreateNodeCert generates key pair and CA certificate and writes them\n\/\/ to their corresponding files.\nfunc runCreateNodeCert(cmd *cobra.Command, args []string) {\n\terr := security.RunCreateNodeCert(Context.Certs, keySize, args)\n\tif err != nil {\n\t\tfmt.Fprintf(osStderr, \"failed to generate node certificate: %s\\n\", err)\n\t\tosExit(1)\n\t\treturn\n\t}\n}\n\n\/\/ A createClientCert command generates a client certificate and stores it\n\/\/ in the cert directory under <username>.crt and key under <username>.key.\nvar createClientCertCmd = &cobra.Command{\n\tUse: \"create-client [options] username\",\n\tShort: \"create client cert and key\",\n\tLong: `\nGenerates a new key pair and client certificate, writing them to\nindividual files in the directory specified by --certs (required).\nThe certs directory should contain a CA cert and key.\n`,\n\tRun: runCreateClientCert,\n}\n\n\/\/ runCreateClientCert generates key pair and CA certificate and writes them\n\/\/ to their corresponding files.\nfunc runCreateClientCert(cmd *cobra.Command, args []string) {\n\terr := security.RunCreateClientCert(Context.Certs, keySize, args[0])\n\tif err != nil {\n\t\tfmt.Fprintf(osStderr, \"failed to generate clent certificate: %s\\n\", err)\n\t\tosExit(1)\n\t\treturn\n\t}\n}\n\nvar certCmds = []*cobra.Command{\n\tcreateCACertCmd,\n\tcreateNodeCertCmd,\n\tcreateClientCertCmd,\n}\n\nvar certCmd = &cobra.Command{\n\tUse: \"cert\",\n\tShort: \"create ca, node, and client certs\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcmd.Usage()\n\t},\n}\n\nfunc init() {\n\tcertCmd.AddCommand(certCmds...)\n}\n<commit_msg>Fix NPE in cert generation command<commit_after>\/\/ Copyright 2015 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License. See the AUTHORS file\n\/\/ for names of contributors.\n\/\/\n\/\/ Author: Marc Berhault (marc@cockroachlabs.com)\n\npackage cli\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/cockroachdb\/cockroach\/security\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nconst defaultKeySize = 2048\n\nvar keySize int\n\n\/\/ A createCACert command generates a CA certificate and stores it\n\/\/ in the cert directory.\nvar createCACertCmd = &cobra.Command{\n\tUse: \"create-ca [options]\",\n\tShort: \"create CA cert and key\",\n\tLong: `\nGenerates a new key pair and CA certificate, writing them to\nindividual files in the directory specified by --certs (required).\n`,\n\tRun: runCreateCACert,\n}\n\n\/\/ runCreateCACert generates key pair and CA certificate and writes them\n\/\/ to their corresponding files.\nfunc runCreateCACert(cmd *cobra.Command, args []string) {\n\terr := security.RunCreateCACert(Context.Certs, keySize)\n\tif err != nil {\n\t\tfmt.Fprintf(osStderr, \"failed to generate CA certificate: %s\\n\", err)\n\t\tosExit(1)\n\t\treturn\n\t}\n}\n\n\/\/ A createNodeCert command generates a node certificate and stores it\n\/\/ in the cert directory.\nvar createNodeCertCmd = &cobra.Command{\n\tUse: \"create-node [options] <host 1> <host 2> ... <host N>\",\n\tShort: \"create node cert and key\",\n\tLong: `\nGenerates server and client certificates and keys for a given node, writing them to\nindividual files in the directory specified by --certs (required).\nThe certs directory should contain a CA cert and key.\nAt least one host should be passed in (either IP address of dns name).\n`,\n\tRun: runCreateNodeCert,\n}\n\n\/\/ runCreateNodeCert generates key pair and CA certificate and writes them\n\/\/ to their corresponding files.\nfunc runCreateNodeCert(cmd *cobra.Command, args []string) {\n\terr := security.RunCreateNodeCert(Context.Certs, keySize, args)\n\tif err != nil {\n\t\tfmt.Fprintf(osStderr, \"failed to generate node certificate: %s\\n\", err)\n\t\tosExit(1)\n\t\treturn\n\t}\n}\n\n\/\/ A createClientCert command generates a client certificate and stores it\n\/\/ in the cert directory under <username>.crt and key under <username>.key.\nvar createClientCertCmd = &cobra.Command{\n\tUse: \"create-client [options] username\",\n\tShort: \"create client cert and key\",\n\tLong: `\nGenerates a new key pair and client certificate, writing them to\nindividual files in the directory specified by --certs (required).\nThe certs directory should contain a CA cert and key.\n`,\n\tRun: runCreateClientCert,\n}\n\n\/\/ runCreateClientCert generates key pair and CA certificate and writes them\n\/\/ to their corresponding files.\nfunc runCreateClientCert(cmd *cobra.Command, args []string) {\n\tif len(args) != 1 {\n\t\tcmd.Usage()\n\t\treturn\n\t}\n\terr := security.RunCreateClientCert(Context.Certs, keySize, args[0])\n\tif err != nil {\n\t\tfmt.Fprintf(osStderr, \"failed to generate clent certificate: %s\\n\", err)\n\t\tosExit(1)\n\t\treturn\n\t}\n}\n\nvar certCmds = []*cobra.Command{\n\tcreateCACertCmd,\n\tcreateNodeCertCmd,\n\tcreateClientCertCmd,\n}\n\nvar certCmd = &cobra.Command{\n\tUse: \"cert\",\n\tShort: \"create ca, node, and client certs\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcmd.Usage()\n\t},\n}\n\nfunc init() {\n\tcertCmd.AddCommand(certCmds...)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package observer provides a facility for publishing progress updates and\n\/\/ state changes from parts of the daemon, an a SSE http handler for consumers\n\/\/ of these events.\npackage observer\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n)\n\ntype ctxkey string\n\n\/\/ CtxRequestID is the context WithValue key for a request id.\nvar CtxRequestID ctxkey = \"id\"\n\n\/\/ EventType represents all possible types of events that can be observed.\ntype EventType string\n\n\/\/ All values for EventType\nconst (\n\tProgress EventType = \"progress\"\n\tStarted EventType = \"started\"\n\tFinished EventType = \"finished\"\n\tErrored EventType = \"errored\"\n\tAborted EventType = \"aborted\"\n)\n\ntype event struct {\n\tID string `json:\"id\"`\n\tType EventType `json:\"-\"` \/\/ type is included in the SSE\n\tMessage string `json:\"message\"`\n\tCompleted uint `json:\"completed\"`\n\tTotal uint `json:\"total\"`\n}\n\ntype notification struct {\n\tType EventType\n\tMessage string\n\tIncrement bool\n}\n\n\/\/ Observer receives events via Notify, and publishes them as SSEs via its\n\/\/ ServeHTTP function.\ntype Observer struct {\n\tnotify chan *event\n\tclosed chan int\n\n\tobservers map[chan []byte]bool\n\n\tnewObservers chan chan []byte\n\tclosedObservers chan chan []byte\n}\n\ntype transaction struct {\n\trequestID string\n\ttotal uint\n\tcurrent uint\n\tevents chan<- *event\n\ttotalUpdates <-chan uint\n\tnotifications <-chan *notification\n\tobserverClosed <-chan int\n\tctxDone <-chan struct{}\n}\n\n\/\/ Notifier belongs to a transactions and represents one segment in a series of\n\/\/ actions. A Notifier can send many messages.\ntype Notifier struct {\n\ttotal uint\n\tcurrent uint\n\ttransaction *transaction\n\ttotalUpdates chan<- uint\n\tnotifications chan<- *notification\n\tobserverClosed <-chan int\n\tctxDone <-chan struct{}\n}\n\nfunc (t *transaction) start() {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase notification := <-t.notifications:\n\t\t\t\tif notification.Increment {\n\t\t\t\t\tt.current++\n\t\t\t\t}\n\n\t\t\t\tevt := &event{\n\t\t\t\t\tID: t.requestID,\n\t\t\t\t\tType: notification.Type,\n\t\t\t\t\tMessage: notification.Message,\n\t\t\t\t\tCompleted: t.current,\n\t\t\t\t\tTotal: t.total,\n\t\t\t\t}\n\n\t\t\t\tt.events <- evt\n\t\t\tcase size := <-t.totalUpdates:\n\t\t\t\tt.total += size\n\t\t\tcase <-t.ctxDone: \/\/ transaction has completed\n\t\t\t\treturn\n\t\t\tcase <-t.observerClosed: \/\/ observer has shutdown\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ Notifier creates a child notifier to this Notifier\nfunc (n *Notifier) Notifier(total uint) *Notifier {\n\tnotifier := &Notifier{\n\t\ttotal: total,\n\t\tcurrent: 0,\n\t\ttransaction: nil,\n\t\ttotalUpdates: n.totalUpdates,\n\t\tnotifications: n.notifications,\n\t\tobserverClosed: n.observerClosed,\n\t\tctxDone: n.ctxDone,\n\t}\n\n\tn.totalUpdates <- total\n\treturn notifier\n}\n\n\/\/ Notify publishes an event to all SSE observers.\nfunc (n *Notifier) Notify(eventType EventType, message string, increment bool) {\n\tnotif := ¬ification{\n\t\tType: eventType,\n\t\tMessage: message,\n\t\tIncrement: increment,\n\t}\n\n\tif increment {\n\t\tn.current++\n\t}\n\n\tif n.current > n.total {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"notifications exceed maximum %d\/%d\", n.current, n.total))\n\t}\n\n\tselect {\n\tcase n.notifications <- notif:\n\t\treturn\n\tcase <-n.observerClosed:\n\t\treturn\n\tcase <-n.ctxDone:\n\t\treturn\n\t}\n}\n\n\/\/ New returns a new initialized Observer\nfunc New() *Observer {\n\treturn &Observer{\n\t\tnotify: make(chan *event),\n\t\tclosed: make(chan int),\n\n\t\tobservers: make(map[chan []byte]bool),\n\n\t\tnewObservers: make(chan chan []byte),\n\t\tclosedObservers: make(chan chan []byte),\n\t}\n}\n\n\/\/ Notifier creates a new transaction for sending notifications\nfunc (o *Observer) Notifier(ctx context.Context, total uint) (*Notifier, error) {\n\n\tif ctx == nil {\n\t\treturn nil, errors.New(\"Context must be provided\")\n\t}\n\n\tid, ok := ctx.Value(CtxRequestID).(string)\n\tif !ok {\n\t\treturn nil, errors.New(\"Missing 'id' property in Context\")\n\t}\n\n\ttotalUpdates := make(chan uint)\n\tnotifications := make(chan *notification)\n\n\tt := &transaction{\n\t\trequestID: id,\n\t\ttotal: total,\n\t\tcurrent: 0,\n\t\ttotalUpdates: totalUpdates,\n\t\tevents: o.notify,\n\t\tnotifications: notifications,\n\t\tobserverClosed: o.closed,\n\t\tctxDone: ctx.Done(),\n\t}\n\n\tt.start()\n\n\tn := &Notifier{\n\t\ttotal: total,\n\t\tcurrent: 0,\n\t\ttransaction: t,\n\t\ttotalUpdates: totalUpdates,\n\t\tnotifications: notifications,\n\t\tobserverClosed: t.observerClosed,\n\t\tctxDone: t.ctxDone,\n\t}\n\n\treturn n, nil\n}\n\n\/\/ ServeHTTP implements the http.Handler interface for providing server-sent\n\/\/ events of observed notifications.\nfunc (o *Observer) ServeHTTP(rw http.ResponseWriter, r *http.Request) {\n\n\trwf := rw.(http.Flusher)\n\tclosed := rw.(http.CloseNotifier).CloseNotify()\n\n\tnotify := make(chan []byte)\n\to.newObservers <- notify\n\n\tdefer func() {\n\t\to.closedObservers <- notify\n\t}()\n\n\trw.Header().Set(\"Content-Type\", \"text\/event-stream\")\n\trw.Header().Set(\"Connection\", \"keep-alive\")\n\trwf.Flush()\n\n\tfor {\n\t\tselect {\n\t\tcase evt := <-notify:\n\t\t\t\/\/ Write the event to the client. we ignore errors here, and\n\t\t\t\/\/ let the close channel tell us when to stop writing.\n\t\t\trw.Write(evt)\n\t\t\trwf.Flush()\n\n\t\tcase <-closed: \/\/ client has disconnected. Exit.\n\t\t\treturn\n\t\tcase <-o.closed: \/\/ The Observer is shutting down. Exit.\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Start begins listening for notifications of observable events. It returns\n\/\/ after stop has been called.\nfunc (o *Observer) Start() {\n\tfor {\n\t\tselect {\n\t\tcase evt := <-o.notify: \/\/ We have an event to observe\n\t\t\tif len(o.observers) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tevtb, err := json.Marshal(evt)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error marshaling event: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tsse := []byte(\"event: \" + evt.Type + \"\\ndata: \")\n\t\t\tsse = append(sse, append(evtb, []byte(\"\\n\\n\")...)...)\n\n\t\t\tfor n := range o.observers {\n\t\t\t\tn <- sse\n\t\t\t}\n\n\t\tcase n := <-o.newObservers:\n\t\t\to.observers[n] = true\n\t\tcase n := <-o.closedObservers:\n\t\t\tdelete(o.observers, n)\n\n\t\tcase <-o.closed: \/\/ The Observer has been closed.\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Stop terminates propagation of events through the observer\nfunc (o *Observer) Stop() {\n\tclose(o.closed)\n}\n<commit_msg>Observer: put a Read\/Write mutex in place.<commit_after>\/\/ Package observer provides a facility for publishing progress updates and\n\/\/ state changes from parts of the daemon, an a SSE http handler for consumers\n\/\/ of these events.\npackage observer\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"sync\"\n)\n\ntype ctxkey string\n\n\/\/ CtxRequestID is the context WithValue key for a request id.\nvar CtxRequestID ctxkey = \"id\"\n\n\/\/ EventType represents all possible types of events that can be observed.\ntype EventType string\n\n\/\/ All values for EventType\nconst (\n\tProgress EventType = \"progress\"\n\tStarted EventType = \"started\"\n\tFinished EventType = \"finished\"\n\tErrored EventType = \"errored\"\n\tAborted EventType = \"aborted\"\n)\n\ntype event struct {\n\tID string `json:\"id\"`\n\tType EventType `json:\"-\"` \/\/ type is included in the SSE\n\tMessage string `json:\"message\"`\n\tCompleted uint `json:\"completed\"`\n\tTotal uint `json:\"total\"`\n}\n\ntype notification struct {\n\tType EventType\n\tMessage string\n\tIncrement bool\n}\n\n\/\/ Observer receives events via Notify, and publishes them as SSEs via its\n\/\/ ServeHTTP function.\ntype Observer struct {\n\tnotify chan *event\n\tclosed chan int\n\n\tobservers map[chan []byte]bool\n\n\tnewObservers chan chan []byte\n\tclosedObservers chan chan []byte\n}\n\ntype transaction struct {\n\trequestID string\n\ttotal uint\n\tcurrent uint\n\tevents chan<- *event\n\ttotalUpdates <-chan uint\n\tnotifications <-chan *notification\n\tobserverClosed <-chan int\n\tctxDone <-chan struct{}\n}\n\n\/\/ Notifier belongs to a transactions and represents one segment in a series of\n\/\/ actions. A Notifier can send many messages.\ntype Notifier struct {\n\ttotal uint\n\tcurrent uint\n\ttransaction *transaction\n\ttotalUpdates chan<- uint\n\tnotifications chan<- *notification\n\tobserverClosed <-chan int\n\tctxDone <-chan struct{}\n\tsync.RWMutex\n}\n\nfunc (t *transaction) start() {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase notification := <-t.notifications:\n\t\t\t\tif notification.Increment {\n\t\t\t\t\tt.current++\n\t\t\t\t}\n\n\t\t\t\tevt := &event{\n\t\t\t\t\tID: t.requestID,\n\t\t\t\t\tType: notification.Type,\n\t\t\t\t\tMessage: notification.Message,\n\t\t\t\t\tCompleted: t.current,\n\t\t\t\t\tTotal: t.total,\n\t\t\t\t}\n\n\t\t\t\tt.events <- evt\n\t\t\tcase size := <-t.totalUpdates:\n\t\t\t\tt.total += size\n\t\t\tcase <-t.ctxDone: \/\/ transaction has completed\n\t\t\t\treturn\n\t\t\tcase <-t.observerClosed: \/\/ observer has shutdown\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ Notifier creates a child notifier to this Notifier\nfunc (n *Notifier) Notifier(total uint) *Notifier {\n\tnotifier := &Notifier{\n\t\ttotal: total,\n\t\tcurrent: 0,\n\t\ttransaction: nil,\n\t\ttotalUpdates: n.totalUpdates,\n\t\tnotifications: n.notifications,\n\t\tobserverClosed: n.observerClosed,\n\t\tctxDone: n.ctxDone,\n\t}\n\n\tn.totalUpdates <- total\n\treturn notifier\n}\n\n\/\/ Notify publishes an event to all SSE observers.\nfunc (n *Notifier) Notify(eventType EventType, message string, increment bool) {\n\tnotif := ¬ification{\n\t\tType: eventType,\n\t\tMessage: message,\n\t\tIncrement: increment,\n\t}\n\n\tif increment {\n\t\tn.Lock()\n\t\tn.current++\n\t\tn.Unlock()\n\t}\n\n\tn.RLock()\n\tif n.current > n.total {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"notifications exceed maximum %d\/%d\", n.current, n.total))\n\t}\n\tn.RUnlock()\n\n\tselect {\n\tcase n.notifications <- notif:\n\t\treturn\n\tcase <-n.observerClosed:\n\t\treturn\n\tcase <-n.ctxDone:\n\t\treturn\n\t}\n}\n\n\/\/ New returns a new initialized Observer\nfunc New() *Observer {\n\treturn &Observer{\n\t\tnotify: make(chan *event),\n\t\tclosed: make(chan int),\n\n\t\tobservers: make(map[chan []byte]bool),\n\n\t\tnewObservers: make(chan chan []byte),\n\t\tclosedObservers: make(chan chan []byte),\n\t}\n}\n\n\/\/ Notifier creates a new transaction for sending notifications\nfunc (o *Observer) Notifier(ctx context.Context, total uint) (*Notifier, error) {\n\n\tif ctx == nil {\n\t\treturn nil, errors.New(\"Context must be provided\")\n\t}\n\n\tid, ok := ctx.Value(CtxRequestID).(string)\n\tif !ok {\n\t\treturn nil, errors.New(\"Missing 'id' property in Context\")\n\t}\n\n\ttotalUpdates := make(chan uint)\n\tnotifications := make(chan *notification)\n\n\tt := &transaction{\n\t\trequestID: id,\n\t\ttotal: total,\n\t\tcurrent: 0,\n\t\ttotalUpdates: totalUpdates,\n\t\tevents: o.notify,\n\t\tnotifications: notifications,\n\t\tobserverClosed: o.closed,\n\t\tctxDone: ctx.Done(),\n\t}\n\n\tt.start()\n\n\tn := &Notifier{\n\t\ttotal: total,\n\t\tcurrent: 0,\n\t\ttransaction: t,\n\t\ttotalUpdates: totalUpdates,\n\t\tnotifications: notifications,\n\t\tobserverClosed: t.observerClosed,\n\t\tctxDone: t.ctxDone,\n\t}\n\n\treturn n, nil\n}\n\n\/\/ ServeHTTP implements the http.Handler interface for providing server-sent\n\/\/ events of observed notifications.\nfunc (o *Observer) ServeHTTP(rw http.ResponseWriter, r *http.Request) {\n\n\trwf := rw.(http.Flusher)\n\tclosed := rw.(http.CloseNotifier).CloseNotify()\n\n\tnotify := make(chan []byte)\n\to.newObservers <- notify\n\n\tdefer func() {\n\t\to.closedObservers <- notify\n\t}()\n\n\trw.Header().Set(\"Content-Type\", \"text\/event-stream\")\n\trw.Header().Set(\"Connection\", \"keep-alive\")\n\trwf.Flush()\n\n\tfor {\n\t\tselect {\n\t\tcase evt := <-notify:\n\t\t\t\/\/ Write the event to the client. we ignore errors here, and\n\t\t\t\/\/ let the close channel tell us when to stop writing.\n\t\t\trw.Write(evt)\n\t\t\trwf.Flush()\n\n\t\tcase <-closed: \/\/ client has disconnected. Exit.\n\t\t\treturn\n\t\tcase <-o.closed: \/\/ The Observer is shutting down. Exit.\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Start begins listening for notifications of observable events. It returns\n\/\/ after stop has been called.\nfunc (o *Observer) Start() {\n\tfor {\n\t\tselect {\n\t\tcase evt := <-o.notify: \/\/ We have an event to observe\n\t\t\tif len(o.observers) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tevtb, err := json.Marshal(evt)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error marshaling event: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tsse := []byte(\"event: \" + evt.Type + \"\\ndata: \")\n\t\t\tsse = append(sse, append(evtb, []byte(\"\\n\\n\")...)...)\n\n\t\t\tfor n := range o.observers {\n\t\t\t\tn <- sse\n\t\t\t}\n\n\t\tcase n := <-o.newObservers:\n\t\t\to.observers[n] = true\n\t\tcase n := <-o.closedObservers:\n\t\t\tdelete(o.observers, n)\n\n\t\tcase <-o.closed: \/\/ The Observer has been closed.\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Stop terminates propagation of events through the observer\nfunc (o *Observer) Stop() {\n\tclose(o.closed)\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/cihub\/seelog\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/streadway\/amqp\"\n\n\t\"github.com\/b2aio\/typhon\/errors\"\n\t\"github.com\/b2aio\/typhon\/rabbit\"\n)\n\ntype AMQPServer struct {\n\t\/\/ this is the routing key prefix for all endpoints\n\tServiceName string\n\tServiceDescription string\n\tendpointRegistry *EndpointRegistry\n\tconnection *rabbit.RabbitConnection\n\tnotifyConnected []chan bool\n\n\tcloseChan chan struct{}\n}\n\nfunc NewAMQPServer() Server {\n\treturn &AMQPServer{\n\t\tendpointRegistry: NewEndpointRegistry(),\n\t\tconnection: rabbit.NewRabbitConnection(),\n\t\tcloseChan: make(chan struct{}),\n\t}\n}\n\nfunc (s *AMQPServer) Name() string {\n\tif s == nil {\n\t\treturn \"\"\n\t}\n\treturn s.ServiceName\n}\n\nfunc (s *AMQPServer) Description() string {\n\tif s == nil {\n\t\treturn \"\"\n\t}\n\treturn s.ServiceDescription\n}\n\nfunc (s *AMQPServer) Init(c *Config) {\n\ts.ServiceName = c.Name\n\ts.ServiceDescription = c.Description\n}\n\nfunc (s *AMQPServer) NotifyConnected() chan bool {\n\tch := make(chan bool)\n\ts.notifyConnected = append(s.notifyConnected, ch)\n\treturn ch\n}\n\nfunc (s *AMQPServer) RegisterEndpoint(endpoint Endpoint) {\n\ts.endpointRegistry.Register(endpoint)\n}\n\nfunc (s *AMQPServer) DeregisterEndpoint(endpointName string) {\n\ts.endpointRegistry.Deregister(endpointName)\n}\n\n\/\/ Run the server, connecting to our transport and serving requests\nfunc (s *AMQPServer) Run() {\n\n\t\/\/ Connect to AMQP\n\tselect {\n\tcase <-s.connection.Init():\n\t\tlog.Info(\"[Server] Connected to RabbitMQ\")\n\t\tfor _, notify := range s.notifyConnected {\n\t\t\tnotify <- true\n\t\t}\n\tcase <-time.After(10 * time.Second):\n\t\tlog.Critical(\"[Server] Failed to connect to RabbitMQ\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Get a delivery channel from the connection\n\tlog.Infof(\"[Server] Listening for deliveries on %s.#\", s.ServiceName)\n\tdeliveries, err := s.connection.Consume(s.ServiceName)\n\tif err != nil {\n\t\tlog.Criticalf(\"[Server] [%s] Failed to consume from Rabbit\", s.ServiceName)\n\t}\n\n\t\/\/ Handle deliveries\n\tfor {\n\t\tselect {\n\t\tcase req := <-deliveries:\n\t\t\tlog.Infof(\"[Server] [%s] Received new delivery\", s.ServiceName)\n\t\t\tgo s.handleRequest(req)\n\t\tcase <-s.closeChan:\n\t\t\t\/\/ shut down server\n\t\t\ts.connection.Close()\n\t\t\tbreak\n\t\t}\n\t}\n\n\tlog.Infof(\"Exiting\")\n\tlog.Flush()\n}\n\nfunc (s *AMQPServer) Close() {\n\tclose(s.closeChan)\n}\n\n\/\/ handleRequest takes a delivery from AMQP, attempts to process it and return a response\nfunc (s *AMQPServer) handleRequest(delivery amqp.Delivery) {\n\n\tlog.Infof(\"Received delivery for %+v\", delivery)\n\n\t\/\/ See if we have a matching endpoint for this request\n\tendpointName := strings.Replace(delivery.RoutingKey, fmt.Sprintf(\"%s.\", s.ServiceName), \"\", -1)\n\tendpoint := s.endpointRegistry.Get(endpointName)\n\tif endpoint == nil {\n\t\tlog.Errorf(\"[Server] Endpoint '%s' not found, cannot handle request\", endpointName)\n\t\ts.respondWithError(delivery, errors.BadRequest(\"endpoint.notfound\", \"Endpoint not found\"))\n\t\treturn\n\t}\n\n\t\/\/ Handle the delivery\n\treq := NewAMQPRequest(&delivery)\n\trsp, err := endpoint.HandleRequest(req)\n\tif err != nil {\n\t\ts.respondWithError(delivery, err)\n\t\treturn\n\t}\n\n\t\/\/ TODO deal with rsp == nil (programmer error, but still)\n\n\t\/\/ Marshal the response\n\tbody, err := rsp.Encode()\n\tif err != nil {\n\t\tlog.Errorf(\"[Server] Failed to marshal response\")\n\t\ts.respondWithError(delivery, errors.BadResponse(\"response.marshal\", err.Error()))\n\t\treturn\n\t}\n\n\t\/\/ Build return delivery, and publish\n\tmsg := amqp.Publishing{\n\t\tCorrelationId: delivery.CorrelationId,\n\t\tTimestamp: time.Now().UTC(),\n\t\tBody: body,\n\t\tHeaders: map[string]interface{}{\n\t\t\t\"Content-Encoding\": \"RESPONSE\",\n\t\t},\n\t}\n\ts.connection.Publish(\"\", delivery.ReplyTo, msg)\n}\n\n\/\/ respondWithError to a delivery, with the provided error\nfunc (s *AMQPServer) respondWithError(delivery amqp.Delivery, err error) {\n\n\t\/\/ Construct a return message with an error\n\tmsg := amqp.Publishing{\n\t\tCorrelationId: delivery.CorrelationId,\n\t\tTimestamp: time.Now().UTC(),\n\t\tBody: []byte(err.Error()),\n\t}\n\n\t\/\/ Publish the error back to the client\n\ts.connection.Publish(\"\", delivery.ReplyTo, msg)\n}\n<commit_msg>Marshal service errors to proto, and set content encoding<commit_after>package server\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/cihub\/seelog\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/streadway\/amqp\"\n\n\t\"github.com\/b2aio\/typhon\/errors\"\n\t\"github.com\/b2aio\/typhon\/rabbit\"\n)\n\ntype AMQPServer struct {\n\t\/\/ this is the routing key prefix for all endpoints\n\tServiceName string\n\tServiceDescription string\n\tendpointRegistry *EndpointRegistry\n\tconnection *rabbit.RabbitConnection\n\tnotifyConnected []chan bool\n\n\tcloseChan chan struct{}\n}\n\nfunc NewAMQPServer() Server {\n\treturn &AMQPServer{\n\t\tendpointRegistry: NewEndpointRegistry(),\n\t\tconnection: rabbit.NewRabbitConnection(),\n\t\tcloseChan: make(chan struct{}),\n\t}\n}\n\nfunc (s *AMQPServer) Name() string {\n\tif s == nil {\n\t\treturn \"\"\n\t}\n\treturn s.ServiceName\n}\n\nfunc (s *AMQPServer) Description() string {\n\tif s == nil {\n\t\treturn \"\"\n\t}\n\treturn s.ServiceDescription\n}\n\nfunc (s *AMQPServer) Init(c *Config) {\n\ts.ServiceName = c.Name\n\ts.ServiceDescription = c.Description\n}\n\nfunc (s *AMQPServer) NotifyConnected() chan bool {\n\tch := make(chan bool)\n\ts.notifyConnected = append(s.notifyConnected, ch)\n\treturn ch\n}\n\nfunc (s *AMQPServer) RegisterEndpoint(endpoint Endpoint) {\n\ts.endpointRegistry.Register(endpoint)\n}\n\nfunc (s *AMQPServer) DeregisterEndpoint(endpointName string) {\n\ts.endpointRegistry.Deregister(endpointName)\n}\n\n\/\/ Run the server, connecting to our transport and serving requests\nfunc (s *AMQPServer) Run() {\n\n\t\/\/ Connect to AMQP\n\tselect {\n\tcase <-s.connection.Init():\n\t\tlog.Info(\"[Server] Connected to RabbitMQ\")\n\t\tfor _, notify := range s.notifyConnected {\n\t\t\tnotify <- true\n\t\t}\n\tcase <-time.After(10 * time.Second):\n\t\tlog.Critical(\"[Server] Failed to connect to RabbitMQ\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Get a delivery channel from the connection\n\tlog.Infof(\"[Server] Listening for deliveries on %s.#\", s.ServiceName)\n\tdeliveries, err := s.connection.Consume(s.ServiceName)\n\tif err != nil {\n\t\tlog.Criticalf(\"[Server] [%s] Failed to consume from Rabbit\", s.ServiceName)\n\t}\n\n\t\/\/ Handle deliveries\n\tfor {\n\t\tselect {\n\t\tcase req := <-deliveries:\n\t\t\tlog.Infof(\"[Server] [%s] Received new delivery\", s.ServiceName)\n\t\t\tgo s.handleRequest(req)\n\t\tcase <-s.closeChan:\n\t\t\t\/\/ shut down server\n\t\t\ts.connection.Close()\n\t\t\tbreak\n\t\t}\n\t}\n\n\tlog.Infof(\"Exiting\")\n\tlog.Flush()\n}\n\nfunc (s *AMQPServer) Close() {\n\tclose(s.closeChan)\n}\n\n\/\/ handleRequest takes a delivery from AMQP, attempts to process it and return a response\nfunc (s *AMQPServer) handleRequest(delivery amqp.Delivery) {\n\n\tlog.Infof(\"Received delivery for %+v\", delivery)\n\n\t\/\/ See if we have a matching endpoint for this request\n\tendpointName := strings.Replace(delivery.RoutingKey, fmt.Sprintf(\"%s.\", s.ServiceName), \"\", -1)\n\tendpoint := s.endpointRegistry.Get(endpointName)\n\tif endpoint == nil {\n\t\tlog.Errorf(\"[Server] Endpoint '%s' not found, cannot handle request\", endpointName)\n\t\ts.respondWithError(delivery, errors.BadRequest(\"endpoint.notfound\", \"Endpoint not found\"))\n\t\treturn\n\t}\n\n\t\/\/ Handle the delivery\n\treq := NewAMQPRequest(&delivery)\n\trsp, err := endpoint.HandleRequest(req)\n\tif err != nil {\n\t\ts.respondWithError(delivery, err)\n\t\treturn\n\t}\n\n\t\/\/ TODO deal with rsp == nil (programmer error, but still)\n\n\t\/\/ Marshal the response\n\tbody, err := rsp.Encode()\n\tif err != nil {\n\t\tlog.Errorf(\"[Server] Failed to marshal response\")\n\t\ts.respondWithError(delivery, errors.BadResponse(\"response.marshal\", err.Error()))\n\t\treturn\n\t}\n\n\t\/\/ Build return delivery, and publish\n\tmsg := amqp.Publishing{\n\t\tCorrelationId: delivery.CorrelationId,\n\t\tTimestamp: time.Now().UTC(),\n\t\tBody: body,\n\t\tHeaders: map[string]interface{}{\n\t\t\t\"Content-Encoding\": \"RESPONSE\",\n\t\t},\n\t}\n\ts.connection.Publish(\"\", delivery.ReplyTo, msg)\n}\n\n\/\/ respondWithError to a delivery, with the provided error\nfunc (s *AMQPServer) respondWithError(delivery amqp.Delivery, err error) {\n\n\t\/\/ Ensure we have a service error in proto form\n\t\/\/ and marshal this for transmission\n\tsvcErr := errorToServiceError(err)\n\tb, err := proto.Marshal(errors.Marshal(svcErr))\n\tif err != nil {\n\t\t\/\/ shit\n\t}\n\n\t\/\/ Construct a return message with an error\n\tmsg := amqp.Publishing{\n\t\tCorrelationId: delivery.CorrelationId,\n\t\tTimestamp: time.Now().UTC(),\n\t\tBody: b,\n\t\tHeaders: map[string]interface{}{\n\t\t\t\"Content-Encoding\": \"ERROR\",\n\t\t},\n\t}\n\n\t\/\/ Publish the error back to the client\n\ts.connection.Publish(\"\", delivery.ReplyTo, msg)\n}\n\n\/\/ errorToServiceError converts an error interface to the concrete\n\/\/ errors.ServiceError type which we pass between services (as proto)\nfunc errorToServiceError(err error) *errors.ServiceError {\n\n\t\/\/ If we already have a service error, return this\n\tif svcErr, ok := err.(*errors.ServiceError); ok {\n\t\treturn svcErr\n\t}\n\n\t\/\/ Otherwise make a generic internal service error\n\t\/\/ Encapsulating the error we were given\n\treturn errors.InternalService(\"internalservice\", err.Error()).(*errors.ServiceError)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package service provides a micro service\npackage service\n\nimport (\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/micro\/cli\"\n\t\"github.com\/micro\/go-micro\"\n\t\"github.com\/micro\/go-micro\/proxy\"\n\t\"github.com\/micro\/go-micro\/proxy\/grpc\"\n\t\"github.com\/micro\/go-micro\/proxy\/http\"\n\t\"github.com\/micro\/go-micro\/proxy\/mucp\"\n\t\"github.com\/micro\/go-micro\/runtime\"\n\t\"github.com\/micro\/go-micro\/server\"\n\t\"github.com\/micro\/go-micro\/util\/log\"\n\t\"github.com\/micro\/go-micro\/util\/mux\"\n)\n\nfunc run(ctx *cli.Context, opts ...micro.Option) {\n\tlog.Name(\"service\")\n\n\tname := ctx.String(\"name\")\n\taddress := ctx.String(\"address\")\n\tendpoint := ctx.String(\"endpoint\")\n\n\tif len(name) > 0 {\n\t\topts = append(opts, micro.Name(name))\n\t}\n\n\tif len(address) > 0 {\n\t\topts = append(opts, micro.Address(address))\n\t}\n\n\tif len(endpoint) == 0 {\n\t\tendpoint = proxy.DefaultEndpoint\n\t}\n\n\tvar p proxy.Proxy\n\n\tswitch {\n\tcase strings.HasPrefix(endpoint, \"grpc\"):\n\t\tp = grpc.NewProxy(proxy.WithEndpoint(endpoint))\n\tcase strings.HasPrefix(endpoint, \"http\"):\n\t\tp = http.NewProxy(proxy.WithEndpoint(endpoint))\n\tdefault:\n\t\tp = mucp.NewProxy(proxy.WithEndpoint(endpoint))\n\t}\n\n\t\/\/ run the service if asked to\n\tif len(ctx.Args()) > 0 {\n\t\targs := []runtime.CreateOption{\n\t\t\truntime.WithCommand(ctx.Args()[0], ctx.Args()[1:]...),\n\t\t\truntime.WithOutput(os.Stdout),\n\t\t}\n\n\t\t\/\/ register the service\n\t\truntime.Create(&runtime.Service{\n\t\t\tName: name,\n\t\t}, args...)\n\n\t\t\/\/ start the runtime\n\t\truntime.Start()\n\n\t\t\/\/ stop the runtime\n\t\tdefer func() {\n\t\t\truntime.Delete(&runtime.Service{\n\t\t\t\tName: name,\n\t\t\t})\n\t\t\truntime.Stop()\n\t\t}()\n\t}\n\n\tlog.Logf(\"Service [%s] Serving %s at endpoint %s\\n\", p.String(), name, endpoint)\n\n\t\/\/ new service\n\tservice := micro.NewService(opts...)\n\n\t\/\/ create new muxer\n\tmuxer := mux.New(name, p)\n\n\t\/\/ set the router\n\tservice.Server().Init(\n\t\tserver.WithRouter(muxer),\n\t)\n\n\t\/\/ run service\n\tservice.Run()\n}\n\nfunc Commands(options ...micro.Option) []cli.Command {\n\tcommand := cli.Command{\n\t\tName: \"service\",\n\t\tUsage: \"Run a micro service\",\n\t\tAction: func(ctx *cli.Context) {\n\t\t\trun(ctx, options...)\n\t\t},\n\t\tFlags: []cli.Flag{\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"name\",\n\t\t\t\tUsage: \"Name of the service\",\n\t\t\t\tEnvVar: \"MICRO_SERVICE_NAME\",\n\t\t\t},\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"address\",\n\t\t\t\tUsage: \"Address of the service\",\n\t\t\t\tEnvVar: \"MICRO_SERVICE_ADDRESS\",\n\t\t\t},\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"endpoint\",\n\t\t\t\tUsage: \"The local service endpoint. Defaults to localhost:9090\",\n\t\t\t\tEnvVar: \"MICRO_SERVICE_ENDPOINT\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, p := range Plugins() {\n\t\tif cmds := p.Commands(); len(cmds) > 0 {\n\t\t\tcommand.Subcommands = append(command.Subcommands, cmds...)\n\t\t}\n\n\t\tif flags := p.Flags(); len(flags) > 0 {\n\t\t\tcommand.Flags = append(command.Flags, flags...)\n\t\t}\n\t}\n\n\treturn []cli.Command{command}\n}\n<commit_msg>fix the service runtime break<commit_after>\/\/ Package service provides a micro service\npackage service\n\nimport (\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/micro\/cli\"\n\t\"github.com\/micro\/go-micro\"\n\t\"github.com\/micro\/go-micro\/proxy\"\n\t\"github.com\/micro\/go-micro\/proxy\/grpc\"\n\t\"github.com\/micro\/go-micro\/proxy\/http\"\n\t\"github.com\/micro\/go-micro\/proxy\/mucp\"\n\t\"github.com\/micro\/go-micro\/runtime\"\n\t\"github.com\/micro\/go-micro\/server\"\n\t\"github.com\/micro\/go-micro\/util\/log\"\n\t\"github.com\/micro\/go-micro\/util\/mux\"\n)\n\nfunc run(ctx *cli.Context, opts ...micro.Option) {\n\tlog.Name(\"service\")\n\n\tname := ctx.String(\"name\")\n\taddress := ctx.String(\"address\")\n\tendpoint := ctx.String(\"endpoint\")\n\n\tif len(name) > 0 {\n\t\topts = append(opts, micro.Name(name))\n\t}\n\n\tif len(address) > 0 {\n\t\topts = append(opts, micro.Address(address))\n\t}\n\n\tif len(endpoint) == 0 {\n\t\tendpoint = proxy.DefaultEndpoint\n\t}\n\n\tvar p proxy.Proxy\n\n\tswitch {\n\tcase strings.HasPrefix(endpoint, \"grpc\"):\n\t\tp = grpc.NewProxy(proxy.WithEndpoint(endpoint))\n\tcase strings.HasPrefix(endpoint, \"http\"):\n\t\tp = http.NewProxy(proxy.WithEndpoint(endpoint))\n\tdefault:\n\t\tp = mucp.NewProxy(proxy.WithEndpoint(endpoint))\n\t}\n\n\t\/\/ run the service if asked to\n\tif len(ctx.Args()) > 0 {\n\t\targs := []runtime.CreateOption{\n\t\t\truntime.WithCommand(ctx.Args()[0], ctx.Args()[1:]...),\n\t\t\truntime.WithOutput(os.Stdout),\n\t\t}\n\n\t\tr := runtime.NewRuntime()\n\n\t\t\/\/ register the service\n\t\tr.Create(&runtime.Service{\n\t\t\tName: name,\n\t\t}, args...)\n\n\t\t\/\/ start the runtime\n\t\tr.Start()\n\n\t\t\/\/ stop the runtime\n\t\tdefer func() {\n\t\t\tr.Delete(&runtime.Service{\n\t\t\t\tName: name,\n\t\t\t})\n\t\t\tr.Stop()\n\t\t}()\n\t}\n\n\tlog.Logf(\"Service [%s] Serving %s at endpoint %s\\n\", p.String(), name, endpoint)\n\n\t\/\/ new service\n\tservice := micro.NewService(opts...)\n\n\t\/\/ create new muxer\n\tmuxer := mux.New(name, p)\n\n\t\/\/ set the router\n\tservice.Server().Init(\n\t\tserver.WithRouter(muxer),\n\t)\n\n\t\/\/ run service\n\tservice.Run()\n}\n\nfunc Commands(options ...micro.Option) []cli.Command {\n\tcommand := cli.Command{\n\t\tName: \"service\",\n\t\tUsage: \"Run a micro service\",\n\t\tAction: func(ctx *cli.Context) {\n\t\t\trun(ctx, options...)\n\t\t},\n\t\tFlags: []cli.Flag{\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"name\",\n\t\t\t\tUsage: \"Name of the service\",\n\t\t\t\tEnvVar: \"MICRO_SERVICE_NAME\",\n\t\t\t},\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"address\",\n\t\t\t\tUsage: \"Address of the service\",\n\t\t\t\tEnvVar: \"MICRO_SERVICE_ADDRESS\",\n\t\t\t},\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"endpoint\",\n\t\t\t\tUsage: \"The local service endpoint. Defaults to localhost:9090\",\n\t\t\t\tEnvVar: \"MICRO_SERVICE_ENDPOINT\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, p := range Plugins() {\n\t\tif cmds := p.Commands(); len(cmds) > 0 {\n\t\t\tcommand.Subcommands = append(command.Subcommands, cmds...)\n\t\t}\n\n\t\tif flags := p.Flags(); len(flags) > 0 {\n\t\t\tcommand.Flags = append(command.Flags, flags...)\n\t\t}\n\t}\n\n\treturn []cli.Command{command}\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"encoding\/json\"\n\n\t\"github.com\/cosminrentea\/gobbler\/client\"\n\t\"github.com\/cosminrentea\/gobbler\/server\/connector\"\n\t\"github.com\/cosminrentea\/gobbler\/server\/fcm\"\n\t\"github.com\/cosminrentea\/gobbler\/server\/service\"\n\t\"github.com\/cosminrentea\/gobbler\/testutil\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/cosminrentea\/gobbler\/server\/configstring\"\n)\n\nvar (\n\ttestHttpPort = 11000\n\ttimeoutForOneMessage = 150 * time.Millisecond\n)\n\ntype fcmMetricsMap struct {\n\tCurrentErrorsCount int `json:\"current_errors_count\"`\n\tCurrentMessagesCount int `json:\"current_messages_count\"`\n\tCurrentMessagesTotalLatencies int `json:\"current_messages_total_latencies_nanos\"`\n\tCurrentErrorsTotalLatencies int `json:\"current_errors_total_latencies_nanos\"`\n}\n\ntype fcmMetrics struct {\n\tTotalSentMessages int `json:\"fcm.total_sent_messages\"`\n\tTotalSentMessageErrors int `json:\"fcm.total_sent_message_errors\"`\n\tMinute fcmMetricsMap `json:\"fcm.minute\"`\n\tHour fcmMetricsMap `json:\"fcm.hour\"`\n\tDay fcmMetricsMap `json:\"fcm.day\"`\n}\n\ntype routerMetrics struct {\n\tCurrentRoutes int `json:\"router.current_routes\"`\n\tCurrentSubscriptions int `json:\"router.current_subscriptions\"`\n}\n\ntype expectedValues struct {\n\tZeroLatencies bool\n\tMessageCount int\n\tCurrentRoutes int\n\tCurrentSubscriptions int\n}\n\n\/\/ Test that restarting the service continues to fetch messages from store for a subscription from lastID\nfunc TestFCMRestart(t *testing.T) {\n\t\/\/defer testutil.EnableDebugForMethod()()\n\tdefer testutil.ResetDefaultRegistryHealthCheck()\n\ta := assert.New(t)\n\n\treceiveC := make(chan bool)\n\ts, cleanup := serviceSetUp(t)\n\tdefer cleanup()\n\n\tassertMetrics(a, s, expectedValues{true, 0, 0, 0})\n\n\tvar fcmConn connector.ResponsiveConnector\n\tvar ok bool\n\tfor _, iface := range s.ModulesSortedByStartOrder() {\n\t\tfcmConn, ok = iface.(connector.ResponsiveConnector)\n\t\tif ok {\n\t\t\tbreak\n\t\t}\n\t}\n\ta.True(ok, \"There should be a module of type FCMConnector\")\n\n\t\/\/ add a high timeout so the messages are processed slow\n\tsender, err := fcm.CreateFcmSender(fcm.SuccessFCMResponse, receiveC, 10*time.Millisecond)\n\ta.NoError(err)\n\tfcmConn.SetSender(sender)\n\n\t\/\/ create subscription on topic\n\tsubscriptionSetUp(t, s)\n\n\tassertMetrics(a, s, expectedValues{true, 0, 1, 1})\n\n\tc := clientSetUp(t, s)\n\tdefer c.Close()\n\n\t\/\/ send 3 messages in the router but read only one and close the service\n\tfor i := 0; i < 3; i++ {\n\t\tc.Send(testTopic, \"dummy body\", `{\"Correlation-Id\": \"id\"}`)\n\t}\n\n\t\/\/ receive one message only from FCM\n\tselect {\n\tcase <-receiveC:\n\tcase <-time.After(timeoutForOneMessage):\n\t\ta.Fail(\"Initial FCM message not received\")\n\t}\n\n\tassertMetrics(a, s, expectedValues{false, 1, 1, 1})\n\tclose(receiveC)\n\t\/\/ restart the service\n\ta.NoError(s.Stop())\n\n\t\/\/ remake the sender\n\treceiveC = make(chan bool)\n\tsender, err = fcm.CreateFcmSender(fcm.SuccessFCMResponse, receiveC, 10*time.Millisecond)\n\ta.NoError(err)\n\tfcmConn.SetSender(sender)\n\n\ttime.Sleep(50 * time.Millisecond)\n\ttestutil.ResetDefaultRegistryHealthCheck()\n\ta.NoError(s.Start())\n\n\t\/\/TODO Cosmin Bogdan add 2 calls to assertMetrics before and after the next block\n\n\t\/\/ read the other 2 messages\n\tfor i := 0; i < 1; i++ {\n\t\tselect {\n\t\tcase <-receiveC:\n\t\tcase <-time.After(2 * timeoutForOneMessage):\n\t\t\ta.Fail(\"FCM message not received\")\n\t\t}\n\t}\n}\n\nfunc serviceSetUp(t *testing.T) (*service.Service, func()) {\n\tdir, errTempDir := ioutil.TempDir(\"\", \"guble_fcm_test\")\n\tassert.NoError(t, errTempDir)\n\n\t*Config.KVS = \"memory\"\n\t*Config.MS = \"file\"\n\t*Config.Cluster.NodeID = 0\n\t*Config.StoragePath = dir\n\t*Config.MetricsEndpoint = \"\/admin\/metrics-old\"\n\t*Config.WS.Enabled = true\n\t*Config.WS.Prefix = \"\/stream\/\"\n\t*Config.FCM.Enabled = true\n\t*Config.FCM.APIKey = \"WILL BE OVERWRITTEN\"\n\t*Config.FCM.Prefix = \"\/fcm\/\"\n\t*Config.FCM.Workers = 1 \/\/ use only one worker so we can control the number of messages that go to FCM\n\t*Config.APNS.Enabled = false\n\t*Config.KafkaProducer.Brokers = configstring.List{}\n\n\tvar s *service.Service\n\tfor s == nil {\n\t\ttestHttpPort++\n\t\tlogger.WithField(\"port\", testHttpPort).Debug(\"trying to use HTTP Port\")\n\t\t*Config.HttpListen = fmt.Sprintf(\"127.0.0.1:%d\", testHttpPort)\n\t\ts = StartService()\n\t}\n\treturn s, func() {\n\t\terrRemove := os.RemoveAll(dir)\n\t\tif errRemove != nil {\n\t\t\tlogger.WithError(errRemove).WithField(\"module\", \"testing\").Error(\"Could not remove directory\")\n\t\t}\n\t}\n}\n\nfunc clientSetUp(t *testing.T, service *service.Service) client.Client {\n\twsURL := \"ws:\/\/\" + service.WebServer().GetAddr() + \"\/stream\/user\/user01\"\n\tc, err := client.Open(wsURL, \"http:\/\/localhost\/\", 1000, false)\n\tassert.NoError(t, err)\n\treturn c\n}\n\nfunc subscriptionSetUp(t *testing.T, service *service.Service) {\n\ta := assert.New(t)\n\n\turlFormat := fmt.Sprintf(\"http:\/\/%s\/fcm\/%%d\/gcmId%%d\/%%s\", service.WebServer().GetAddr())\n\t\/\/ create GCM subscription\n\tresponse, errPost := http.Post(\n\t\tfmt.Sprintf(urlFormat, 1, 1, strings.TrimPrefix(testTopic, \"\/\")),\n\t\t\"text\/plain\",\n\t\tbytes.NewBufferString(\"\"),\n\t)\n\ta.NoError(errPost)\n\ta.Equal(response.StatusCode, 200)\n\n\tbody, errReadAll := ioutil.ReadAll(response.Body)\n\ta.NoError(errReadAll)\n\ta.Equal(fmt.Sprintf(`{\"subscribed\":\"%s\"}`, testTopic), string(body))\n}\n\nfunc assertMetrics(a *assert.Assertions, s *service.Service, expected expectedValues) {\n\ttime.Sleep(50 * time.Millisecond)\n\n\thttpClient := &http.Client{}\n\tu := fmt.Sprintf(\"http:\/\/%s%s\", s.WebServer().GetAddr(), defaultMetricsEndpoint)\n\trequest, err := http.NewRequest(http.MethodGet, u, nil)\n\ta.NoError(err)\n\n\tresponse, err := httpClient.Do(request)\n\ta.NoError(err)\n\tdefer response.Body.Close()\n\n\ta.Equal(http.StatusOK, response.StatusCode)\n\tbodyBytes, err := ioutil.ReadAll(response.Body)\n\ta.NoError(err)\n\tlogger.WithField(\"body\", string(bodyBytes)).Debug(\"metrics response\")\n\n\tmFCM := &fcmMetrics{}\n\terr = json.Unmarshal(bodyBytes, mFCM)\n\ta.NoError(err)\n\n\ta.Equal(0, mFCM.TotalSentMessageErrors)\n\ta.Equal(expected.MessageCount, mFCM.TotalSentMessages)\n\n\ta.Equal(0, mFCM.Minute.CurrentErrorsCount)\n\ta.Equal(expected.MessageCount, mFCM.Minute.CurrentMessagesCount)\n\ta.Equal(0, mFCM.Minute.CurrentErrorsTotalLatencies)\n\ta.Equal(expected.ZeroLatencies, mFCM.Minute.CurrentMessagesTotalLatencies == 0)\n\n\ta.Equal(0, mFCM.Hour.CurrentErrorsCount)\n\ta.Equal(expected.MessageCount, mFCM.Hour.CurrentMessagesCount)\n\ta.Equal(0, mFCM.Hour.CurrentErrorsTotalLatencies)\n\ta.Equal(expected.ZeroLatencies, mFCM.Hour.CurrentMessagesTotalLatencies == 0)\n\n\ta.Equal(0, mFCM.Day.CurrentErrorsCount)\n\ta.Equal(expected.MessageCount, mFCM.Day.CurrentMessagesCount)\n\ta.Equal(0, mFCM.Day.CurrentErrorsTotalLatencies)\n\ta.Equal(expected.ZeroLatencies, mFCM.Day.CurrentMessagesTotalLatencies == 0)\n\n\tmRouter := &routerMetrics{}\n\terr = json.Unmarshal(bodyBytes, mRouter)\n\ta.NoError(err)\n\n\ta.Equal(expected.CurrentRoutes, mRouter.CurrentRoutes)\n\ta.Equal(expected.CurrentSubscriptions, mRouter.CurrentSubscriptions)\n}\n<commit_msg>some test.<commit_after>package server\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"encoding\/json\"\n\n\t\"github.com\/cosminrentea\/gobbler\/client\"\n\t\"github.com\/cosminrentea\/gobbler\/server\/configstring\"\n\t\"github.com\/cosminrentea\/gobbler\/server\/connector\"\n\t\"github.com\/cosminrentea\/gobbler\/server\/fcm\"\n\t\"github.com\/cosminrentea\/gobbler\/server\/service\"\n\t\"github.com\/cosminrentea\/gobbler\/testutil\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nvar (\n\ttestHttpPort = 11000\n\ttimeoutForOneMessage = 150 * time.Millisecond\n)\n\ntype fcmMetricsMap struct {\n\tCurrentErrorsCount int `json:\"current_errors_count\"`\n\tCurrentMessagesCount int `json:\"current_messages_count\"`\n\tCurrentMessagesTotalLatencies int `json:\"current_messages_total_latencies_nanos\"`\n\tCurrentErrorsTotalLatencies int `json:\"current_errors_total_latencies_nanos\"`\n}\n\ntype fcmMetrics struct {\n\tTotalSentMessages int `json:\"fcm.total_sent_messages\"`\n\tTotalSentMessageErrors int `json:\"fcm.total_sent_message_errors\"`\n\tMinute fcmMetricsMap `json:\"fcm.minute\"`\n\tHour fcmMetricsMap `json:\"fcm.hour\"`\n\tDay fcmMetricsMap `json:\"fcm.day\"`\n}\n\ntype routerMetrics struct {\n\tCurrentRoutes int `json:\"router.current_routes\"`\n\tCurrentSubscriptions int `json:\"router.current_subscriptions\"`\n}\n\ntype expectedValues struct {\n\tZeroLatencies bool\n\tMessageCount int\n\tCurrentRoutes int\n\tCurrentSubscriptions int\n}\n\n\/\/ Test that restarting the service continues to fetch messages from store for a subscription from lastID\nfunc TestFCMRestart(t *testing.T) {\n\t\/\/defer testutil.EnableDebugForMethod()()\n\tdefer testutil.ResetDefaultRegistryHealthCheck()\n\ta := assert.New(t)\n\n\treceiveC := make(chan bool)\n\ts, cleanup := serviceSetUp(t)\n\tdefer cleanup()\n\n\tassertMetrics(a, s, expectedValues{true, 0, 0, 0})\n\n\tvar fcmConn connector.ResponsiveConnector\n\tvar ok bool\n\tfor _, iface := range s.ModulesSortedByStartOrder() {\n\t\tfcmConn, ok = iface.(connector.ResponsiveConnector)\n\t\tif ok {\n\t\t\tbreak\n\t\t}\n\t}\n\ta.True(ok, \"There should be a module of type FCMConnector\")\n\n\t\/\/ add a high timeout so the messages are processed slow\n\tsender, err := fcm.CreateFcmSender(fcm.SuccessFCMResponse, receiveC, 10*time.Millisecond)\n\ta.NoError(err)\n\tfcmConn.SetSender(sender)\n\n\t\/\/ create subscription on topic\n\tsubscriptionSetUp(t, s)\n\ttime.Sleep(time.Second)\n\tassertMetrics(a, s, expectedValues{true, 0, 1, 1})\n\n\tc := clientSetUp(t, s)\n\tdefer c.Close()\n\n\t\/\/ send 3 messages in the router but read only one and close the service\n\tfor i := 0; i < 3; i++ {\n\t\tc.Send(testTopic, \"dummy body\", `{\"Correlation-Id\": \"id\"}`)\n\t}\n\n\t\/\/ receive one message only from FCM\n\tselect {\n\tcase <-receiveC:\n\tcase <-time.After(timeoutForOneMessage):\n\t\ta.Fail(\"Initial FCM message not received\")\n\t}\n\n\ttime.Sleep(time.Second)\n\n\tassertMetrics(a, s, expectedValues{false, 1, 1, 1})\n\tclose(receiveC)\n\t\/\/ restart the service\n\ta.NoError(s.Stop())\n\n\t\/\/ remake the sender\n\treceiveC = make(chan bool)\n\tsender, err = fcm.CreateFcmSender(fcm.SuccessFCMResponse, receiveC, 10*time.Millisecond)\n\ta.NoError(err)\n\tfcmConn.SetSender(sender)\n\n\ttime.Sleep(50 * time.Millisecond)\n\ttestutil.ResetDefaultRegistryHealthCheck()\n\ta.NoError(s.Start())\n\n\t\/\/TODO Cosmin Bogdan add 2 calls to assertMetrics before and after the next block\n\n\t\/\/ read the other 2 messages\n\tfor i := 0; i < 1; i++ {\n\t\tselect {\n\t\tcase <-receiveC:\n\t\tcase <-time.After(2 * timeoutForOneMessage):\n\t\t\ta.Fail(\"FCM message not received\")\n\t\t}\n\t}\n}\n\nfunc serviceSetUp(t *testing.T) (*service.Service, func()) {\n\tdir, errTempDir := ioutil.TempDir(\"\", \"guble_fcm_test\")\n\tassert.NoError(t, errTempDir)\n\n\t*Config.KVS = \"memory\"\n\t*Config.MS = \"file\"\n\t*Config.Cluster.NodeID = 0\n\t*Config.StoragePath = dir\n\t*Config.MetricsEndpoint = \"\/admin\/metrics-old\"\n\t*Config.WS.Enabled = true\n\t*Config.WS.Prefix = \"\/stream\/\"\n\t*Config.FCM.Enabled = true\n\t*Config.FCM.APIKey = \"WILL BE OVERWRITTEN\"\n\t*Config.FCM.Prefix = \"\/fcm\/\"\n\t*Config.FCM.Workers = 1 \/\/ use only one worker so we can control the number of messages that go to FCM\n\t*Config.APNS.Enabled = false\n\t*Config.KafkaProducer.Brokers = configstring.List{}\n\n\tvar s *service.Service\n\tfor s == nil {\n\t\ttestHttpPort++\n\t\tlogger.WithField(\"port\", testHttpPort).Debug(\"trying to use HTTP Port\")\n\t\t*Config.HttpListen = fmt.Sprintf(\"127.0.0.1:%d\", testHttpPort)\n\t\ts = StartService()\n\t}\n\treturn s, func() {\n\t\terrRemove := os.RemoveAll(dir)\n\t\tif errRemove != nil {\n\t\t\tlogger.WithError(errRemove).WithField(\"module\", \"testing\").Error(\"Could not remove directory\")\n\t\t}\n\t}\n}\n\nfunc clientSetUp(t *testing.T, service *service.Service) client.Client {\n\twsURL := \"ws:\/\/\" + service.WebServer().GetAddr() + \"\/stream\/user\/user01\"\n\tc, err := client.Open(wsURL, \"http:\/\/localhost\/\", 1000, false)\n\tassert.NoError(t, err)\n\treturn c\n}\n\nfunc subscriptionSetUp(t *testing.T, service *service.Service) {\n\ta := assert.New(t)\n\n\turlFormat := fmt.Sprintf(\"http:\/\/%s\/fcm\/%%d\/gcmId%%d\/%%s\", service.WebServer().GetAddr())\n\t\/\/ create GCM subscription\n\tresponse, errPost := http.Post(\n\t\tfmt.Sprintf(urlFormat, 1, 1, strings.TrimPrefix(testTopic, \"\/\")),\n\t\t\"text\/plain\",\n\t\tbytes.NewBufferString(\"\"),\n\t)\n\ta.NoError(errPost)\n\ta.Equal(response.StatusCode, 200)\n\n\tbody, errReadAll := ioutil.ReadAll(response.Body)\n\ta.NoError(errReadAll)\n\ta.Equal(fmt.Sprintf(`{\"subscribed\":\"%s\"}`, testTopic), string(body))\n}\n\nfunc assertMetrics(a *assert.Assertions, s *service.Service, expected expectedValues) {\n\ttime.Sleep(50 * time.Millisecond)\n\n\thttpClient := &http.Client{}\n\tu := fmt.Sprintf(\"http:\/\/%s%s\", s.WebServer().GetAddr(), defaultMetricsEndpoint)\n\trequest, err := http.NewRequest(http.MethodGet, u, nil)\n\ta.NoError(err)\n\n\tresponse, err := httpClient.Do(request)\n\ta.NoError(err)\n\tdefer response.Body.Close()\n\n\ta.Equal(http.StatusOK, response.StatusCode)\n\tbodyBytes, err := ioutil.ReadAll(response.Body)\n\ta.NoError(err)\n\tlogger.WithField(\"body\", string(bodyBytes)).Debug(\"metrics response\")\n\n\tmFCM := &fcmMetrics{}\n\terr = json.Unmarshal(bodyBytes, mFCM)\n\ta.NoError(err)\n\n\ta.Equal(0, mFCM.TotalSentMessageErrors)\n\ta.Equal(expected.MessageCount, mFCM.TotalSentMessages)\n\n\ta.Equal(0, mFCM.Minute.CurrentErrorsCount)\n\ta.Equal(expected.MessageCount, mFCM.Minute.CurrentMessagesCount)\n\ta.Equal(0, mFCM.Minute.CurrentErrorsTotalLatencies)\n\ta.Equal(expected.ZeroLatencies, mFCM.Minute.CurrentMessagesTotalLatencies == 0)\n\n\ta.Equal(0, mFCM.Hour.CurrentErrorsCount)\n\ta.Equal(expected.MessageCount, mFCM.Hour.CurrentMessagesCount)\n\ta.Equal(0, mFCM.Hour.CurrentErrorsTotalLatencies)\n\ta.Equal(expected.ZeroLatencies, mFCM.Hour.CurrentMessagesTotalLatencies == 0)\n\n\ta.Equal(0, mFCM.Day.CurrentErrorsCount)\n\ta.Equal(expected.MessageCount, mFCM.Day.CurrentMessagesCount)\n\ta.Equal(0, mFCM.Day.CurrentErrorsTotalLatencies)\n\ta.Equal(expected.ZeroLatencies, mFCM.Day.CurrentMessagesTotalLatencies == 0)\n\n\tmRouter := &routerMetrics{}\n\terr = json.Unmarshal(bodyBytes, mRouter)\n\ta.NoError(err)\n\n\ta.Equal(expected.CurrentRoutes, mRouter.CurrentRoutes)\n\ta.Equal(expected.CurrentSubscriptions, mRouter.CurrentSubscriptions)\n}\n<|endoftext|>"} {"text":"<commit_before>package services\n\nimport (\n\t\"errors\"\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/pufferpanel\/pufferpanel\/database\"\n\t\"github.com\/pufferpanel\/pufferpanel\/models\"\n\t\"github.com\/pufferpanel\/pufferpanel\/models\/view\"\n\to2 \"github.com\/pufferpanel\/pufferpanel\/oauth2\"\n\toauth \"gopkg.in\/oauth2.v3\"\n\toauthErrors \"gopkg.in\/oauth2.v3\/errors\"\n\t\"gopkg.in\/oauth2.v3\/manage\"\n\t\"gopkg.in\/oauth2.v3\/server\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n)\n\ntype OAuthService interface {\n\tHandleHTTPTokenRequest(writer http.ResponseWriter, request *http.Request)\n\n\tGetInfo(token string) (info *view.OAuthTokenInfoViewModel, valid bool, err error)\n\n\tCreate(user *models.User, server *models.Server, clientId string) (clientSecret string, existing bool, err error)\n\n\tUpdateScopes(client *models.ClientInfo, server *models.Server, scopes ...string) (err error)\n\n\tDelete(clientId string) (err error)\n\n\tGetByClientId(clientId string) (client *models.ClientInfo, exists bool, err error)\n\n\tGetByUser(user *models.User) (client *models.ClientInfo, exists bool, err error)\n\n\tHasRights(accessToken string, serverId *uint, scope string) (client *models.ClientInfo, allowed bool, err error)\n\n\tHasTokenExpired(info oauth.TokenInfo) (expired bool)\n}\n\ntype oauthService struct {\n\tserver *server.Server\n}\n\nvar _oauthService *oauthService\n\nfunc GetOAuthService() (service OAuthService, err error) {\n\tif _oauthService == nil {\n\t\terr = configureServer()\n\t}\n\treturn _oauthService, err\n}\n\nfunc configureServer() error {\n\tmanager := manage.NewDefaultManager()\n\tmanager.MapClientStorage(&o2.ClientStore{})\n\tmanager.MapTokenStorage(&o2.TokenStore{})\n\n\tsrv := server.NewServer(server.NewConfig(), manager)\n\tsrv.SetClientInfoHandler(server.ClientFormHandler)\n\n\tsrv.SetInternalErrorHandler(func(err error) (re *oauthErrors .Response) {\n\t\tlog.Println(\"Internal Error:\", err.Error())\n\t\treturn\n\t})\n\n\tsrv.SetResponseErrorHandler(func(re *oauthErrors.Response) {\n\t\tlog.Println(\"Response Error:\", re.Error.Error())\n\t})\n\n\t_oauthService = &oauthService{server: srv}\n\treturn nil\n}\n\nfunc (oauth2 *oauthService) HandleHTTPTokenRequest(writer http.ResponseWriter, request *http.Request) {\n\terr := oauth2.server.HandleTokenRequest(writer, request)\n\tif err != nil {\n\t\thttp.Error(writer, err.Error(), http.StatusInternalServerError)\n\t}\n}\n\nfunc (oauth2 *oauthService) GetInfo(token string) (info *view.OAuthTokenInfoViewModel, valid bool, err error) {\n\tts := &o2.TokenStore{}\n\tinfo = &view.OAuthTokenInfoViewModel{Active: false}\n\n\titem, err := ts.GetByAccess(token)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdb, err := database.GetConnection()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tclient := &models.ClientInfo{\n\t\tClientID: item.GetClientID(),\n\t}\n\terr = db.Set(\"gorm:auto_preload\", true).Where(client).First(client).Error\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/see if the access token expiration is after now\n\tinfo = view.FromTokenInfo(item, client)\n\tvalid = info.Active\n\n\treturn\n}\n\nfunc (oauth2 *oauthService) Create(user *models.User, server *models.Server, clientId string) (clientSecret string, existing bool, err error) {\n\treturn\n}\n\nfunc (oauth2 *oauthService) UpdateScopes(client *models.ClientInfo, server *models.Server, scopes ...string) (err error) {\n\tdb, err := database.GetConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif server != nil && server.ID == 0 {\n\t\tres := db.Where(server).First(server)\n\t\tif res.Error != nil {\n\t\t\treturn res.Error\n\t\t}\n\t}\n\n\tif client.ID == 0 {\n\t\tvar query *gorm.DB\n\t\tif server != nil && server.ID != 0 {\n\t\t\tquery = db.Preload(\"ServerScopes\", \"server_id = ?\", server.ID)\n\t\t} else {\n\t\t\tquery = db.Preload(\"ServerScopes\", \"server_id IS NULL\")\n\t\t}\n\t\tres := query.Where(client).First(client)\n\t\tif res.Error != nil {\n\t\t\treturn res.Error\n\t\t}\n\t\tif client.ID == 0 {\n\t\t\treturn errors.New(\"no client with given information\")\n\t\t}\n\t}\n\n\t\/\/delete ones which don't exist on the new list\n\tfor _, v := range client.ServerScopes {\n\t\ttoDelete := true\n\t\tfor _, s := range scopes {\n\t\t\tif s == v.Scope {\n\t\t\t\ttoDelete = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif toDelete {\n\t\t\tdb.Delete(v)\n\t\t}\n\t}\n\n\t\/\/add new values\n\tfor _, v := range scopes {\n\t\ttoAdd := true\n\t\tfor _, s := range client.ServerScopes {\n\t\t\tif v == s.Scope {\n\t\t\t\ttoAdd = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif toAdd {\n\t\t\treplacement := &models.ClientServerScopes{\n\t\t\t\tScope: v,\n\t\t\t\tClientInfoID: client.ID,\n\t\t\t}\n\t\t\tif server != nil && server.ID != 0{\n\t\t\t\treplacement.ServerId = &server.ID\n\t\t\t}\n\n\t\t\tdb.Create(replacement)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (oauth2 *oauthService) Delete(clientId string) (err error) {\n\tdb, err := database.GetConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmodel := &models.ClientInfo{ClientID: clientId}\n\n\treturn db.Delete(model).Error\n}\n\nfunc (oauth2 *oauthService) GetByClientId(clientId string) (client *models.ClientInfo, exists bool, err error) {\n\tdb, err := database.GetConnection()\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tmodel := &models.ClientInfo{ClientID: clientId}\n\n\tres := db.Where(model).First(model)\n\n\treturn model, model.ID != 0, res.Error\n}\n\nfunc (oauth2 *oauthService) GetByUser(user *models.User) (client *models.ClientInfo, exists bool, err error) {\n\tdb, err := database.GetConnection()\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tmodel := &models.ClientInfo{UserID: user.ID}\n\n\tres := db.Where(model).First(model)\n\n\treturn model, model.ID != 0, res.Error\n}\n\nfunc (oauth2 *oauthService) HasRights(accessToken string, serverId *uint, scope string) (client *models.ClientInfo, allowed bool, err error) {\n\tts := o2.TokenStore{}\n\n\tvar ti oauth.TokenInfo\n\tif ti, err = ts.GetByAccess(accessToken); err != nil {\n\t\treturn\n\t}\n\tif oauth2.HasTokenExpired(ti) {\n\t\treturn\n\t}\n\n\tconverted, ok := ti.(*models.TokenInfo)\n\tif !ok {\n\t\terr = errors.New(\"token info state was invalid\")\n\t\treturn\n\t}\n\n\tfor _, v := range converted.ClientInfo.ServerScopes {\n\t\tif (v.ServerId == nil && serverId == nil) || (v.ServerId == serverId) {\n\t\t\tif v.Scope == scope {\n\t\t\t\treturn &converted.ClientInfo, true, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &converted.ClientInfo, false, nil\n}\n\nfunc (oauth2 *oauthService) HasTokenExpired(info oauth.TokenInfo) (expired bool) {\n\tif info == nil {\n\t\treturn true\n\t}\n\n\tif info.GetAccessCreateAt().Add(info.GetCodeExpiresIn()).Before(time.Now()) {\n\t\treturn true\n\t}\n\n\treturn false\n}<commit_msg>Remove need for \"admins\" to have redundant scopes<commit_after>package services\n\nimport (\n\t\"errors\"\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/pufferpanel\/pufferpanel\/database\"\n\t\"github.com\/pufferpanel\/pufferpanel\/models\"\n\t\"github.com\/pufferpanel\/pufferpanel\/models\/view\"\n\to2 \"github.com\/pufferpanel\/pufferpanel\/oauth2\"\n\toauth \"gopkg.in\/oauth2.v3\"\n\toauthErrors \"gopkg.in\/oauth2.v3\/errors\"\n\t\"gopkg.in\/oauth2.v3\/manage\"\n\t\"gopkg.in\/oauth2.v3\/server\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n)\n\ntype OAuthService interface {\n\tHandleHTTPTokenRequest(writer http.ResponseWriter, request *http.Request)\n\n\tGetInfo(token string) (info *view.OAuthTokenInfoViewModel, valid bool, err error)\n\n\tCreate(user *models.User, server *models.Server, clientId string) (clientSecret string, existing bool, err error)\n\n\tUpdateScopes(client *models.ClientInfo, server *models.Server, scopes ...string) (err error)\n\n\tDelete(clientId string) (err error)\n\n\tGetByClientId(clientId string) (client *models.ClientInfo, exists bool, err error)\n\n\tGetByUser(user *models.User) (client *models.ClientInfo, exists bool, err error)\n\n\tHasRights(accessToken string, serverId *uint, scope string) (client *models.ClientInfo, allowed bool, err error)\n\n\tHasTokenExpired(info oauth.TokenInfo) (expired bool)\n}\n\ntype oauthService struct {\n\tserver *server.Server\n}\n\nvar _oauthService *oauthService\n\nfunc GetOAuthService() (service OAuthService, err error) {\n\tif _oauthService == nil {\n\t\terr = configureServer()\n\t}\n\treturn _oauthService, err\n}\n\nfunc configureServer() error {\n\tmanager := manage.NewDefaultManager()\n\tmanager.MapClientStorage(&o2.ClientStore{})\n\tmanager.MapTokenStorage(&o2.TokenStore{})\n\n\tsrv := server.NewServer(server.NewConfig(), manager)\n\tsrv.SetClientInfoHandler(server.ClientFormHandler)\n\n\tsrv.SetInternalErrorHandler(func(err error) (re *oauthErrors .Response) {\n\t\tlog.Println(\"Internal Error:\", err.Error())\n\t\treturn\n\t})\n\n\tsrv.SetResponseErrorHandler(func(re *oauthErrors.Response) {\n\t\tlog.Println(\"Response Error:\", re.Error.Error())\n\t})\n\n\t_oauthService = &oauthService{server: srv}\n\treturn nil\n}\n\nfunc (oauth2 *oauthService) HandleHTTPTokenRequest(writer http.ResponseWriter, request *http.Request) {\n\terr := oauth2.server.HandleTokenRequest(writer, request)\n\tif err != nil {\n\t\thttp.Error(writer, err.Error(), http.StatusInternalServerError)\n\t}\n}\n\nfunc (oauth2 *oauthService) GetInfo(token string) (info *view.OAuthTokenInfoViewModel, valid bool, err error) {\n\tts := &o2.TokenStore{}\n\tinfo = &view.OAuthTokenInfoViewModel{Active: false}\n\n\titem, err := ts.GetByAccess(token)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdb, err := database.GetConnection()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tclient := &models.ClientInfo{\n\t\tClientID: item.GetClientID(),\n\t}\n\terr = db.Set(\"gorm:auto_preload\", true).Where(client).First(client).Error\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/see if the access token expiration is after now\n\tinfo = view.FromTokenInfo(item, client)\n\tvalid = info.Active\n\n\treturn\n}\n\nfunc (oauth2 *oauthService) Create(user *models.User, server *models.Server, clientId string) (clientSecret string, existing bool, err error) {\n\treturn\n}\n\nfunc (oauth2 *oauthService) UpdateScopes(client *models.ClientInfo, server *models.Server, scopes ...string) (err error) {\n\tdb, err := database.GetConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif server != nil && server.ID == 0 {\n\t\tres := db.Where(server).First(server)\n\t\tif res.Error != nil {\n\t\t\treturn res.Error\n\t\t}\n\t}\n\n\tif client.ID == 0 {\n\t\tvar query *gorm.DB\n\t\tif server != nil && server.ID != 0 {\n\t\t\tquery = db.Preload(\"ServerScopes\", \"server_id = ?\", server.ID)\n\t\t} else {\n\t\t\tquery = db.Preload(\"ServerScopes\", \"server_id IS NULL\")\n\t\t}\n\t\tres := query.Where(client).First(client)\n\t\tif res.Error != nil {\n\t\t\treturn res.Error\n\t\t}\n\t\tif client.ID == 0 {\n\t\t\treturn errors.New(\"no client with given information\")\n\t\t}\n\t}\n\n\t\/\/delete ones which don't exist on the new list\n\tfor _, v := range client.ServerScopes {\n\t\ttoDelete := true\n\t\tfor _, s := range scopes {\n\t\t\tif s == v.Scope {\n\t\t\t\ttoDelete = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif toDelete {\n\t\t\tdb.Delete(v)\n\t\t}\n\t}\n\n\t\/\/add new values\n\tfor _, v := range scopes {\n\t\ttoAdd := true\n\t\tfor _, s := range client.ServerScopes {\n\t\t\tif v == s.Scope {\n\t\t\t\ttoAdd = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif toAdd {\n\t\t\treplacement := &models.ClientServerScopes{\n\t\t\t\tScope: v,\n\t\t\t\tClientInfoID: client.ID,\n\t\t\t}\n\t\t\tif server != nil && server.ID != 0{\n\t\t\t\treplacement.ServerId = &server.ID\n\t\t\t}\n\n\t\t\tdb.Create(replacement)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (oauth2 *oauthService) Delete(clientId string) (err error) {\n\tdb, err := database.GetConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmodel := &models.ClientInfo{ClientID: clientId}\n\n\treturn db.Delete(model).Error\n}\n\nfunc (oauth2 *oauthService) GetByClientId(clientId string) (client *models.ClientInfo, exists bool, err error) {\n\tdb, err := database.GetConnection()\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tmodel := &models.ClientInfo{ClientID: clientId}\n\n\tres := db.Where(model).First(model)\n\n\treturn model, model.ID != 0, res.Error\n}\n\nfunc (oauth2 *oauthService) GetByUser(user *models.User) (client *models.ClientInfo, exists bool, err error) {\n\tdb, err := database.GetConnection()\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tmodel := &models.ClientInfo{UserID: user.ID}\n\n\tres := db.Where(model).First(model)\n\n\treturn model, model.ID != 0, res.Error\n}\n\nfunc (oauth2 *oauthService) HasRights(accessToken string, serverId *uint, scope string) (client *models.ClientInfo, allowed bool, err error) {\n\tts := o2.TokenStore{}\n\n\tvar ti oauth.TokenInfo\n\tif ti, err = ts.GetByAccess(accessToken); err != nil {\n\t\treturn\n\t}\n\tif oauth2.HasTokenExpired(ti) {\n\t\treturn\n\t}\n\n\tconverted, ok := ti.(*models.TokenInfo)\n\tif !ok {\n\t\terr = errors.New(\"token info state was invalid\")\n\t\treturn\n\t}\n\n\tfor _, v := range converted.ClientInfo.ServerScopes {\n\t\tif v.ServerId == nil || v.ServerId == serverId {\n\t\t\tif v.Scope == scope {\n\t\t\t\treturn &converted.ClientInfo, true, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &converted.ClientInfo, false, nil\n}\n\nfunc (oauth2 *oauthService) HasTokenExpired(info oauth.TokenInfo) (expired bool) {\n\tif info == nil {\n\t\treturn true\n\t}\n\n\tif info.GetAccessCreateAt().Add(info.GetCodeExpiresIn()).Before(time.Now()) {\n\t\treturn true\n\t}\n\n\treturn false\n}<|endoftext|>"} {"text":"<commit_before>package shells\n\nimport (\n\t\"gitlab.com\/gitlab-org\/gitlab-ci-multi-runner\/common\"\n\t\"gitlab.com\/gitlab-org\/gitlab-ci-multi-runner\/helpers\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype AbstractShell struct {\n}\n\ntype ShellWriter interface {\n\tVariable(variable common.BuildVariable)\n\tCommand(command string, arguments ...string)\n\tLine(text string)\n\n\tIfDirectory(path string)\n\tIfFile(file string)\n\tElse()\n\tEndIf()\n\n\tCd(path string)\n\tRmDir(path string)\n\tRmFile(path string)\n\tAbsolute(path string) string\n\n\tPrint(fmt string, arguments ...interface{})\n\tNotice(fmt string, arguments ...interface{})\n\tWarning(fmt string, arguments ...interface{})\n\tError(fmt string, arguments ...interface{})\n\tEmptyLine()\n}\n\nfunc (b *AbstractShell) GetFeatures(features *common.FeaturesInfo) {\n\tfeatures.Artifacts = true\n\tfeatures.Cache = true\n}\n\nfunc (s *AbstractShell) GetSupportedOptions() []string {\n\treturn []string{\"artifacts\", \"cache\"}\n}\n\nfunc (b *AbstractShell) writeCdBuildDir(w ShellWriter, info common.ShellScriptInfo) {\n\tw.Cd(info.Build.FullProjectDir())\n}\n\nfunc (b *AbstractShell) writeExports(w ShellWriter, info common.ShellScriptInfo) {\n\tfor _, variable := range info.Build.GetAllVariables() {\n\t\tw.Variable(variable)\n\t}\n}\n\nfunc (b *AbstractShell) writeTLSCAInfo(w ShellWriter, build *common.Build, key string) {\n\tif build.TLSCAChain != \"\" {\n\t\tw.Variable(common.BuildVariable{\n\t\t\tKey: key,\n\t\t\tValue: build.TLSCAChain,\n\t\t\tPublic: true,\n\t\t\tInternal: true,\n\t\t\tFile: true,\n\t\t})\n\t}\n}\n\nfunc (b *AbstractShell) writeCloneCmd(w ShellWriter, build *common.Build, projectDir string) {\n\tw.Notice(\"Cloning repository...\")\n\tw.RmDir(projectDir)\n\tw.Command(\"git\", \"clone\", build.RepoURL, projectDir)\n\tw.Cd(projectDir)\n}\n\nfunc (b *AbstractShell) writeFetchCmd(w ShellWriter, build *common.Build, projectDir string, gitDir string) {\n\tw.IfDirectory(gitDir)\n\tw.Notice(\"Fetching changes...\")\n\tw.Cd(projectDir)\n\tw.Command(\"git\", \"clean\", \"-ffdx\")\n\tw.Command(\"git\", \"reset\", \"--hard\")\n\tw.Command(\"git\", \"remote\", \"set-url\", \"origin\", build.RepoURL)\n\tw.Command(\"git\", \"fetch\", \"origin\")\n\tw.Else()\n\tb.writeCloneCmd(w, build, projectDir)\n\tw.EndIf()\n}\n\nfunc (b *AbstractShell) writeCheckoutCmd(w ShellWriter, build *common.Build) {\n\tw.Notice(\"Checking out %s as %s...\", build.Sha[0:8], build.RefName)\n\tw.Command(\"git\", \"checkout\", build.Sha)\n}\n\nfunc (b *AbstractShell) GeneratePreBuild(w ShellWriter, info common.ShellScriptInfo) {\n\tb.writeExports(w, info)\n\n\tbuild := info.Build\n\tprojectDir := build.FullProjectDir()\n\tgitDir := filepath.Join(build.FullProjectDir(), \".git\")\n\n\tb.writeTLSCAInfo(w, info.Build, \"GIT_SSL_CAINFO\")\n\n\tif build.AllowGitFetch {\n\t\tb.writeFetchCmd(w, build, projectDir, gitDir)\n\t} else {\n\t\tb.writeCloneCmd(w, build, projectDir)\n\t}\n\n\tb.writeCheckoutCmd(w, build)\n\n\tcacheFile := info.Build.CacheFile()\n\tcacheFile2 := info.Build.CacheFileForRef(\"master\")\n\tif cacheFile == \"\" {\n\t\tcacheFile = cacheFile2\n\t\tcacheFile2 = \"\"\n\t}\n\n\t\/\/ Try to restore from main cache, if not found cache for master\n\tif cacheFile != \"\" {\n\t\t\/\/ If we have cache, restore it\n\t\tw.IfFile(cacheFile)\n\t\tb.extractFiles(w, info.RunnerCommand, \"cache\", cacheFile)\n\t\tif cacheFile2 != \"\" {\n\t\t\tw.Else()\n\n\t\t\t\/\/ If we have cache, restore it\n\t\t\tw.IfFile(cacheFile2)\n\t\t\tb.extractFiles(w, info.RunnerCommand, \"cache\", cacheFile2)\n\t\t\tw.EndIf()\n\t\t}\n\t\tw.EndIf()\n\t}\n}\n\nfunc (b *AbstractShell) GenerateCommands(w ShellWriter, info common.ShellScriptInfo) {\n\tb.writeExports(w, info)\n\tb.writeCdBuildDir(w, info)\n\n\tcommands := info.Build.Commands\n\tcommands = strings.TrimSpace(commands)\n\tfor _, command := range strings.Split(commands, \"\\n\") {\n\t\tcommand = strings.TrimSpace(command)\n\t\tif !helpers.BoolOrDefault(info.Build.Runner.DisableVerbose, false) {\n\t\t\tif command != \"\" {\n\t\t\t\tw.Notice(\"$ %s\", command)\n\t\t\t} else {\n\t\t\t\tw.EmptyLine()\n\t\t\t}\n\t\t}\n\t\tw.Line(command)\n\t}\n}\n\nfunc (b *AbstractShell) archiveFiles(w ShellWriter, list interface{}, runnerCommand, archiveType, archivePath string) {\n\thash, ok := helpers.ToConfigMap(list)\n\tif !ok {\n\t\treturn\n\t}\n\n\tif runnerCommand == \"\" {\n\t\tw.Warning(\"The %s is not supported in this executor.\", archiveType)\n\t\treturn\n\t}\n\n\targs := []string{\n\t\t\"archive\",\n\t\t\"--file\",\n\t\tarchivePath,\n\t}\n\n\t\/\/ Collect paths\n\tif paths, ok := hash[\"paths\"].([]interface{}); ok {\n\t\tfor _, artifactPath := range paths {\n\t\t\tif file, ok := artifactPath.(string); ok {\n\t\t\t\targs = append(args, \"--path\", file)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Archive also untracked files\n\tif untracked, ok := hash[\"untracked\"].(bool); ok && untracked {\n\t\targs = append(args, \"--untracked\")\n\t}\n\n\t\/\/ Skip creating archive\n\tif len(args) <= 3 {\n\t\treturn\n\t}\n\n\t\/\/ Execute archive command\n\tw.Notice(\"Archiving %s...\", archiveType)\n\tw.Command(runnerCommand, args...)\n}\n\nfunc (b *AbstractShell) extractFiles(w ShellWriter, runnerCommand, archiveType, archivePath string) {\n\tif runnerCommand == \"\" {\n\t\tw.Warning(\"The %s is not supported in this executor.\", archiveType)\n\t\treturn\n\t}\n\n\targs := []string{\n\t\t\"extract\",\n\t\t\"--file\",\n\t\tarchivePath,\n\t}\n\n\t\/\/ Execute extract command\n\tw.Notice(\"Restoring %s...\", archiveType)\n\tw.Command(runnerCommand, args...)\n}\n\nfunc (b *AbstractShell) uploadArtifacts(w ShellWriter, build *common.Build, runnerCommand, archivePath string) {\n\tif runnerCommand == \"\" {\n\t\tw.Warning(\"The artifacts uploading is not supported in this executor.\")\n\t\treturn\n\t}\n\n\targs := []string{\n\t\t\"artifacts\",\n\t\t\"--url\",\n\t\tbuild.Runner.URL,\n\t\t\"--token\",\n\t\tbuild.Token,\n\t\t\"--id\",\n\t\tstrconv.Itoa(build.ID),\n\t\t\"--file\",\n\t\tarchivePath,\n\t}\n\n\tw.Notice(\"Uploading artifacts...\")\n\tb.writeTLSCAInfo(w, build, \"CI_SERVER_TLS_CA_FILE\")\n\tw.Command(runnerCommand, args...)\n}\n\nfunc (b *AbstractShell) GeneratePostBuild(w ShellWriter, info common.ShellScriptInfo) {\n\tb.writeExports(w, info)\n\tb.writeCdBuildDir(w, info)\n\n\t\/\/ Find cached files and archive them\n\tif cacheFile := info.Build.CacheFile(); cacheFile != \"\" {\n\t\tb.archiveFiles(w, info.Build.Options[\"cache\"], info.RunnerCommand, \"cache\", cacheFile)\n\t}\n\n\tif info.Build.Network != nil {\n\t\t\/\/ Find artifacts\n\t\tb.archiveFiles(w, info.Build.Options[\"artifacts\"], info.RunnerCommand, \"artifacts\", \"artifacts.zip\")\n\n\t\t\/\/ If archive is created upload it\n\t\tw.IfFile(\"artifacts.zip\")\n\t\tb.uploadArtifacts(w, info.Build, info.RunnerCommand, \"artifacts.zip\")\n\t\tw.RmFile(\"aritfacts.zip\")\n\t\tw.EndIf()\n\t}\n}\n<commit_msg>Make `go fmt` happy<commit_after>package shells\n\nimport (\n\t\"gitlab.com\/gitlab-org\/gitlab-ci-multi-runner\/common\"\n\t\"gitlab.com\/gitlab-org\/gitlab-ci-multi-runner\/helpers\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype AbstractShell struct {\n}\n\ntype ShellWriter interface {\n\tVariable(variable common.BuildVariable)\n\tCommand(command string, arguments ...string)\n\tLine(text string)\n\n\tIfDirectory(path string)\n\tIfFile(file string)\n\tElse()\n\tEndIf()\n\n\tCd(path string)\n\tRmDir(path string)\n\tRmFile(path string)\n\tAbsolute(path string) string\n\n\tPrint(fmt string, arguments ...interface{})\n\tNotice(fmt string, arguments ...interface{})\n\tWarning(fmt string, arguments ...interface{})\n\tError(fmt string, arguments ...interface{})\n\tEmptyLine()\n}\n\nfunc (b *AbstractShell) GetFeatures(features *common.FeaturesInfo) {\n\tfeatures.Artifacts = true\n\tfeatures.Cache = true\n}\n\nfunc (s *AbstractShell) GetSupportedOptions() []string {\n\treturn []string{\"artifacts\", \"cache\"}\n}\n\nfunc (b *AbstractShell) writeCdBuildDir(w ShellWriter, info common.ShellScriptInfo) {\n\tw.Cd(info.Build.FullProjectDir())\n}\n\nfunc (b *AbstractShell) writeExports(w ShellWriter, info common.ShellScriptInfo) {\n\tfor _, variable := range info.Build.GetAllVariables() {\n\t\tw.Variable(variable)\n\t}\n}\n\nfunc (b *AbstractShell) writeTLSCAInfo(w ShellWriter, build *common.Build, key string) {\n\tif build.TLSCAChain != \"\" {\n\t\tw.Variable(common.BuildVariable{\n\t\t\tKey: key,\n\t\t\tValue: build.TLSCAChain,\n\t\t\tPublic: true,\n\t\t\tInternal: true,\n\t\t\tFile: true,\n\t\t})\n\t}\n}\n\nfunc (b *AbstractShell) writeCloneCmd(w ShellWriter, build *common.Build, projectDir string) {\n\tw.Notice(\"Cloning repository...\")\n\tw.RmDir(projectDir)\n\tw.Command(\"git\", \"clone\", build.RepoURL, projectDir)\n\tw.Cd(projectDir)\n}\n\nfunc (b *AbstractShell) writeFetchCmd(w ShellWriter, build *common.Build, projectDir string, gitDir string) {\n\tw.IfDirectory(gitDir)\n\tw.Notice(\"Fetching changes...\")\n\tw.Cd(projectDir)\n\tw.Command(\"git\", \"clean\", \"-ffdx\")\n\tw.Command(\"git\", \"reset\", \"--hard\")\n\tw.Command(\"git\", \"remote\", \"set-url\", \"origin\", build.RepoURL)\n\tw.Command(\"git\", \"fetch\", \"origin\")\n\tw.Else()\n\tb.writeCloneCmd(w, build, projectDir)\n\tw.EndIf()\n}\n\nfunc (b *AbstractShell) writeCheckoutCmd(w ShellWriter, build *common.Build) {\n\tw.Notice(\"Checking out %s as %s...\", build.Sha[0:8], build.RefName)\n\tw.Command(\"git\", \"checkout\", build.Sha)\n}\n\nfunc (b *AbstractShell) GeneratePreBuild(w ShellWriter, info common.ShellScriptInfo) {\n\tb.writeExports(w, info)\n\n\tbuild := info.Build\n\tprojectDir := build.FullProjectDir()\n\tgitDir := filepath.Join(build.FullProjectDir(), \".git\")\n\n\tb.writeTLSCAInfo(w, info.Build, \"GIT_SSL_CAINFO\")\n\n\tif build.AllowGitFetch {\n\t\tb.writeFetchCmd(w, build, projectDir, gitDir)\n\t} else {\n\t\tb.writeCloneCmd(w, build, projectDir)\n\t}\n\n\tb.writeCheckoutCmd(w, build)\n\n\tcacheFile := info.Build.CacheFile()\n\tcacheFile2 := info.Build.CacheFileForRef(\"master\")\n\tif cacheFile == \"\" {\n\t\tcacheFile = cacheFile2\n\t\tcacheFile2 = \"\"\n\t}\n\n\t\/\/ Try to restore from main cache, if not found cache for master\n\tif cacheFile != \"\" {\n\t\t\/\/ If we have cache, restore it\n\t\tw.IfFile(cacheFile)\n\t\tb.extractFiles(w, info.RunnerCommand, \"cache\", cacheFile)\n\t\tif cacheFile2 != \"\" {\n\t\t\tw.Else()\n\n\t\t\t\/\/ If we have cache, restore it\n\t\t\tw.IfFile(cacheFile2)\n\t\t\tb.extractFiles(w, info.RunnerCommand, \"cache\", cacheFile2)\n\t\t\tw.EndIf()\n\t\t}\n\t\tw.EndIf()\n\t}\n}\n\nfunc (b *AbstractShell) GenerateCommands(w ShellWriter, info common.ShellScriptInfo) {\n\tb.writeExports(w, info)\n\tb.writeCdBuildDir(w, info)\n\n\tcommands := info.Build.Commands\n\tcommands = strings.TrimSpace(commands)\n\tfor _, command := range strings.Split(commands, \"\\n\") {\n\t\tcommand = strings.TrimSpace(command)\n\t\tif !helpers.BoolOrDefault(info.Build.Runner.DisableVerbose, false) {\n\t\t\tif command != \"\" {\n\t\t\t\tw.Notice(\"$ %s\", command)\n\t\t\t} else {\n\t\t\t\tw.EmptyLine()\n\t\t\t}\n\t\t}\n\t\tw.Line(command)\n\t}\n}\n\nfunc (b *AbstractShell) archiveFiles(w ShellWriter, list interface{}, runnerCommand, archiveType, archivePath string) {\n\thash, ok := helpers.ToConfigMap(list)\n\tif !ok {\n\t\treturn\n\t}\n\n\tif runnerCommand == \"\" {\n\t\tw.Warning(\"The %s is not supported in this executor.\", archiveType)\n\t\treturn\n\t}\n\n\targs := []string{\n\t\t\"archive\",\n\t\t\"--file\",\n\t\tarchivePath,\n\t}\n\n\t\/\/ Collect paths\n\tif paths, ok := hash[\"paths\"].([]interface{}); ok {\n\t\tfor _, artifactPath := range paths {\n\t\t\tif file, ok := artifactPath.(string); ok {\n\t\t\t\targs = append(args, \"--path\", file)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Archive also untracked files\n\tif untracked, ok := hash[\"untracked\"].(bool); ok && untracked {\n\t\targs = append(args, \"--untracked\")\n\t}\n\n\t\/\/ Skip creating archive\n\tif len(args) <= 3 {\n\t\treturn\n\t}\n\n\t\/\/ Execute archive command\n\tw.Notice(\"Archiving %s...\", archiveType)\n\tw.Command(runnerCommand, args...)\n}\n\nfunc (b *AbstractShell) extractFiles(w ShellWriter, runnerCommand, archiveType, archivePath string) {\n\tif runnerCommand == \"\" {\n\t\tw.Warning(\"The %s is not supported in this executor.\", archiveType)\n\t\treturn\n\t}\n\n\targs := []string{\n\t\t\"extract\",\n\t\t\"--file\",\n\t\tarchivePath,\n\t}\n\n\t\/\/ Execute extract command\n\tw.Notice(\"Restoring %s...\", archiveType)\n\tw.Command(runnerCommand, args...)\n}\n\nfunc (b *AbstractShell) uploadArtifacts(w ShellWriter, build *common.Build, runnerCommand, archivePath string) {\n\tif runnerCommand == \"\" {\n\t\tw.Warning(\"The artifacts uploading is not supported in this executor.\")\n\t\treturn\n\t}\n\n\targs := []string{\n\t\t\"artifacts\",\n\t\t\"--url\",\n\t\tbuild.Runner.URL,\n\t\t\"--token\",\n\t\tbuild.Token,\n\t\t\"--id\",\n\t\tstrconv.Itoa(build.ID),\n\t\t\"--file\",\n\t\tarchivePath,\n\t}\n\n\tw.Notice(\"Uploading artifacts...\")\n\tb.writeTLSCAInfo(w, build, \"CI_SERVER_TLS_CA_FILE\")\n\tw.Command(runnerCommand, args...)\n}\n\nfunc (b *AbstractShell) GeneratePostBuild(w ShellWriter, info common.ShellScriptInfo) {\n\tb.writeExports(w, info)\n\tb.writeCdBuildDir(w, info)\n\n\t\/\/ Find cached files and archive them\n\tif cacheFile := info.Build.CacheFile(); cacheFile != \"\" {\n\t\tb.archiveFiles(w, info.Build.Options[\"cache\"], info.RunnerCommand, \"cache\", cacheFile)\n\t}\n\n\tif info.Build.Network != nil {\n\t\t\/\/ Find artifacts\n\t\tb.archiveFiles(w, info.Build.Options[\"artifacts\"], info.RunnerCommand, \"artifacts\", \"artifacts.zip\")\n\n\t\t\/\/ If archive is created upload it\n\t\tw.IfFile(\"artifacts.zip\")\n\t\tb.uploadArtifacts(w, info.Build, info.RunnerCommand, \"artifacts.zip\")\n\t\tw.RmFile(\"aritfacts.zip\")\n\t\tw.EndIf()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The following enables go generate to generate the doc.go file.\n\/\/go:generate go run $JIRI_ROOT\/release\/go\/src\/v.io\/x\/lib\/cmdline\/testdata\/gendoc.go .\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\t\"v.io\/v23\"\n\t\"v.io\/v23\/context\"\n\t\"v.io\/v23\/security\"\n\t\"v.io\/v23\/verror\"\n\t\"v.io\/x\/lib\/cmdline\"\n\t\"v.io\/x\/ref\/lib\/v23cmd\"\n\t_ \"v.io\/x\/ref\/runtime\/factories\/generic\"\n)\n\nvar (\n\tflagConfigFile string\n\tflagKubectlBin string\n\tflagGcloudBin string\n\tflagResourceFile string\n\tflagVerbose bool\n\tflagTag string\n\tflagWait bool\n)\n\nfunc main() {\n\tcmdline.HideGlobalFlagsExcept()\n\n\tcmd := &cmdline.Command{\n\t\tName: \"vkube\",\n\t\tShort: \"Manages Vanadium applications on kubernetes\",\n\t\tLong: \"Manages Vanadium applications on kubernetes\",\n\t\tChildren: []*cmdline.Command{\n\t\t\tcmdStart,\n\t\t\tcmdUpdate,\n\t\t\tcmdStop,\n\t\t\tcmdStartClusterAgent,\n\t\t\tcmdStopClusterAgent,\n\t\t\tcmdClaimClusterAgent,\n\t\t\tcmdBuildDockerImages,\n\t\t\tcmdKubectl,\n\t\t},\n\t}\n\tcmd.Flags.StringVar(&flagConfigFile, \"config\", \"vkube.cfg\", \"The 'vkube.cfg' file to use.\")\n\tcmd.Flags.StringVar(&flagKubectlBin, \"kubectl\", \"kubectl\", \"The 'kubectl' binary to use.\")\n\tcmd.Flags.StringVar(&flagGcloudBin, \"gcloud\", \"gcloud\", \"The 'gcloud' binary to use.\")\n\n\tcmdStart.Flags.StringVar(&flagResourceFile, \"f\", \"\", \"Filename to use to create the kubernetes resource.\")\n\tcmdStart.Flags.BoolVar(&flagWait, \"wait\", false, \"Wait for at least one replica to be ready.\")\n\n\tcmdUpdate.Flags.StringVar(&flagResourceFile, \"f\", \"\", \"Filename to use to update the kubernetes resource.\")\n\tcmdUpdate.Flags.BoolVar(&flagWait, \"wait\", false, \"Wait for at least one replica to be ready after the update.\")\n\n\tcmdStop.Flags.StringVar(&flagResourceFile, \"f\", \"\", \"Filename to use to stop the kubernetes resource.\")\n\n\tcmdStartClusterAgent.Flags.BoolVar(&flagWait, \"wait\", false, \"Wait for the cluster agent to be ready.\")\n\n\tcmdBuildDockerImages.Flags.BoolVar(&flagVerbose, \"v\", false, \"When true, the output is more verbose.\")\n\tcmdBuildDockerImages.Flags.StringVar(&flagTag, \"tag\", \"\", \"The tag to add to the docker images. If empty, the current timestamp is used.\")\n\n\tcmdline.Main(cmd)\n}\n\nfunc kubeCmdRunner(kcmd func(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error) cmdline.Runner {\n\treturn v23cmd.RunnerFunc(func(ctx *context.T, env *cmdline.Env, args []string) error {\n\t\tconfig, err := readConfig(flagConfigFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tf, err := ioutil.TempFile(\"\", \"kubeconfig-\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tos.Setenv(\"KUBECONFIG\", f.Name())\n\t\tdefer os.Remove(f.Name())\n\t\tf.Close()\n\n\t\tif out, err := exec.Command(flagGcloudBin, \"container\", \"clusters\", \"get-credentials\", config.Cluster, \"--project\", config.Project, \"--zone\", config.Zone).CombinedOutput(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to get credentials for %q: %v: %s\", config.Cluster, err, out)\n\t\t}\n\t\treturn kcmd(ctx, env, args, config)\n\t})\n}\n\nvar cmdStart = &cmdline.Command{\n\tRunner: kubeCmdRunner(runCmdStart),\n\tName: \"start\",\n\tShort: \"Starts an application.\",\n\tLong: \"Starts an application.\",\n\tArgsName: \"<extension>\",\n\tArgsLong: \"<extension> The blessing name extension to give to the application.\",\n}\n\nfunc runCmdStart(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {\n\tif expected, got := 1, len(args); expected != got {\n\t\treturn env.UsageErrorf(\"start: incorrect number of arguments, expected %d, got %d\", expected, got)\n\t}\n\textension := args[0]\n\tif flagResourceFile == \"\" {\n\t\treturn fmt.Errorf(\"-f must be specified.\")\n\t}\n\trc, err := readReplicationControllerConfig(flagResourceFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, v := range []string{\"spec.template.metadata.labels.application\", \"spec.template.metadata.labels.version\"} {\n\t\tif rc.getString(v) == \"\" {\n\t\t\tfmt.Fprintf(env.Stderr, \"WARNING: %q is not set. Rolling updates will not work.\\n\", v)\n\t\t}\n\t}\n\tagentAddr, err := findClusterAgent(config, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsecretName, err := makeSecretName()\n\tif err != nil {\n\t\treturn err\n\t}\n\tnamespace := rc.getString(\"metadata.namespace\")\n\tappName := rc.getString(\"spec.template.metadata.labels.application\")\n\tif n, err := findReplicationControllerNamesForApp(appName, namespace); err != nil {\n\t\treturn err\n\t} else if len(n) != 0 {\n\t\treturn fmt.Errorf(\"replication controller for application=%q already running: %q\", appName, n)\n\t}\n\tif err := createSecret(ctx, secretName, namespace, agentAddr, extension); err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(env.Stdout, \"Created secret successfully.\")\n\n\tif err := createReplicationController(ctx, config, rc, secretName); err != nil {\n\t\tif err := deleteSecret(ctx, config, secretName, rootBlessings(ctx), namespace); err != nil {\n\t\t\tctx.Error(err)\n\t\t}\n\t\treturn err\n\t}\n\tfmt.Fprintln(env.Stdout, \"Created replication controller successfully.\")\n\tif flagWait {\n\t\tif err := waitForReadyPods(appName, namespace); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintln(env.Stdout, \"Application is running.\")\n\t}\n\treturn nil\n}\n\nvar cmdUpdate = &cmdline.Command{\n\tRunner: kubeCmdRunner(runCmdUpdate),\n\tName: \"update\",\n\tShort: \"Updates an application.\",\n\tLong: \"Updates an application to a new version with a rolling update, preserving the existing blessings.\",\n}\n\nfunc runCmdUpdate(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {\n\tif flagResourceFile == \"\" {\n\t\treturn fmt.Errorf(\"-f must be specified.\")\n\t}\n\trc, err := readReplicationControllerConfig(flagResourceFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := updateReplicationController(ctx, config, rc, env.Stdout, env.Stderr); err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(env.Stdout, \"Updated replication controller successfully.\")\n\tif flagWait {\n\t\tnamespace := rc.getString(\"metadata.namespace\")\n\t\tappName := rc.getString(\"spec.template.metadata.labels.application\")\n\t\tif err := waitForReadyPods(appName, namespace); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintln(env.Stdout, \"Application is running.\")\n\t}\n\treturn nil\n}\n\nvar cmdStop = &cmdline.Command{\n\tRunner: kubeCmdRunner(runCmdStop),\n\tName: \"stop\",\n\tShort: \"Stops an application.\",\n\tLong: \"Stops an application.\",\n}\n\nfunc runCmdStop(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {\n\tif flagResourceFile == \"\" {\n\t\treturn fmt.Errorf(\"-f must be specified.\")\n\t}\n\trc, err := readReplicationControllerConfig(flagResourceFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tname := rc.getString(\"metadata.name\")\n\tif name == \"\" {\n\t\treturn fmt.Errorf(\"metadata.name must be set\")\n\t}\n\tnamespace := rc.getString(\"metadata.namespace\")\n\tsecretName, rootBlessings, err := findPodAttributes(name, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif out, err := kubectl(\"--namespace=\"+namespace, \"stop\", \"rc\", name); err != nil {\n\t\treturn fmt.Errorf(\"failed to stop replication controller: %v: %s\", err, out)\n\t}\n\tfmt.Fprintln(env.Stdout, \"Stopping replication controller.\")\n\tif err := deleteSecret(ctx, config, secretName, rootBlessings, namespace); err != nil {\n\t\treturn fmt.Errorf(\"failed to delete secret: %v\", err)\n\t}\n\tfmt.Fprintln(env.Stdout, \"Deleting secret.\")\n\treturn nil\n}\n\nvar cmdStartClusterAgent = &cmdline.Command{\n\tRunner: kubeCmdRunner(runCmdStartClusterAgent),\n\tName: \"start-cluster-agent\",\n\tShort: \"Starts the cluster agent.\",\n\tLong: \"Starts the cluster agent.\",\n}\n\nfunc runCmdStartClusterAgent(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {\n\tif err := createClusterAgent(ctx, config); err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(env.Stdout, \"Starting cluster agent.\")\n\tif flagWait {\n\t\tif err := waitForReadyPods(clusterAgentApplicationName, config.ClusterAgent.Namespace); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor {\n\t\t\tif _, err := findClusterAgent(config, true); err == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t\tfmt.Fprintf(env.Stdout, \"Cluster agent is ready.\\n\")\n\t}\n\treturn nil\n}\n\nvar cmdStopClusterAgent = &cmdline.Command{\n\tRunner: kubeCmdRunner(runCmdStopClusterAgent),\n\tName: \"stop-cluster-agent\",\n\tShort: \"Stops the cluster agent.\",\n\tLong: \"Stops the cluster agent.\",\n}\n\nfunc runCmdStopClusterAgent(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {\n\tif err := stopClusterAgent(config); err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(env.Stdout, \"Stopping cluster agent.\")\n\treturn nil\n}\n\nvar cmdClaimClusterAgent = &cmdline.Command{\n\tRunner: kubeCmdRunner(runCmdClaimClusterAgent),\n\tName: \"claim-cluster-agent\",\n\tShort: \"Claims the cluster agent.\",\n\tLong: \"Claims the cluster agent.\",\n}\n\nfunc runCmdClaimClusterAgent(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {\n\tmyBlessings, _ := v23.GetPrincipal(ctx).BlessingStore().Default()\n\tclaimer := clusterAgentClaimer(config)\n\tif !myBlessings.CouldHaveNames([]string{claimer}) {\n\t\treturn fmt.Errorf(\"principal isn't the expected claimer: got %q, expected %q\", myBlessings, claimer)\n\t}\n\textension := strings.TrimPrefix(config.ClusterAgent.Blessing, claimer+security.ChainSeparator)\n\tif err := claimClusterAgent(ctx, config, extension); err != nil {\n\t\tif verror.ErrorID(err) == verror.ErrUnknownMethod.ID {\n\t\t\treturn fmt.Errorf(\"already claimed\")\n\t\t}\n\t\treturn err\n\t}\n\tfmt.Fprintln(env.Stdout, \"Claimed cluster agent successfully.\")\n\treturn nil\n}\n\nvar cmdBuildDockerImages = &cmdline.Command{\n\tRunner: kubeCmdRunner(runCmdBuildDockerImages),\n\tName: \"build-docker-images\",\n\tShort: \"Builds the docker images for the cluster and pod agents.\",\n\tLong: \"Builds the docker images for the cluster and pod agents.\",\n}\n\nfunc runCmdBuildDockerImages(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {\n\treturn buildDockerImages(config, flagTag, flagVerbose, env.Stdout)\n}\n\nvar cmdKubectl = &cmdline.Command{\n\tRunner: kubeCmdRunner(runCmdKubectl),\n\tName: \"kubectl\",\n\tShort: \"Runs kubectl on the cluster defined in vkube.cfg.\",\n\tLong: \"Runs kubectl on the cluster defined in vkube.cfg.\",\n\tArgsName: \"-- <kubectl args>\",\n\tArgsLong: \"<kubectl args> are passed directly to the kubectl command.\",\n}\n\nfunc runCmdKubectl(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {\n\tcmd := exec.Command(flagKubectlBin, args...)\n\tcmd.Stdin = env.Stdin\n\tcmd.Stdout = env.Stdout\n\tcmd.Stderr = env.Stderr\n\treturn cmd.Run()\n}\n<commit_msg>services\/cluster\/vkube: Show the cluster<commit_after>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The following enables go generate to generate the doc.go file.\n\/\/go:generate go run $JIRI_ROOT\/release\/go\/src\/v.io\/x\/lib\/cmdline\/testdata\/gendoc.go .\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\t\"v.io\/v23\"\n\t\"v.io\/v23\/context\"\n\t\"v.io\/v23\/security\"\n\t\"v.io\/v23\/verror\"\n\t\"v.io\/x\/lib\/cmdline\"\n\t\"v.io\/x\/ref\/lib\/v23cmd\"\n\t_ \"v.io\/x\/ref\/runtime\/factories\/generic\"\n)\n\nvar (\n\tflagConfigFile string\n\tflagKubectlBin string\n\tflagGcloudBin string\n\tflagResourceFile string\n\tflagVerbose bool\n\tflagTag string\n\tflagWait bool\n)\n\nfunc main() {\n\tcmdline.HideGlobalFlagsExcept()\n\n\tcmd := &cmdline.Command{\n\t\tName: \"vkube\",\n\t\tShort: \"Manages Vanadium applications on kubernetes\",\n\t\tLong: \"Manages Vanadium applications on kubernetes\",\n\t\tChildren: []*cmdline.Command{\n\t\t\tcmdStart,\n\t\t\tcmdUpdate,\n\t\t\tcmdStop,\n\t\t\tcmdStartClusterAgent,\n\t\t\tcmdStopClusterAgent,\n\t\t\tcmdClaimClusterAgent,\n\t\t\tcmdBuildDockerImages,\n\t\t\tcmdKubectl,\n\t\t},\n\t}\n\tcmd.Flags.StringVar(&flagConfigFile, \"config\", \"vkube.cfg\", \"The 'vkube.cfg' file to use.\")\n\tcmd.Flags.StringVar(&flagKubectlBin, \"kubectl\", \"kubectl\", \"The 'kubectl' binary to use.\")\n\tcmd.Flags.StringVar(&flagGcloudBin, \"gcloud\", \"gcloud\", \"The 'gcloud' binary to use.\")\n\n\tcmdStart.Flags.StringVar(&flagResourceFile, \"f\", \"\", \"Filename to use to create the kubernetes resource.\")\n\tcmdStart.Flags.BoolVar(&flagWait, \"wait\", false, \"Wait for at least one replica to be ready.\")\n\n\tcmdUpdate.Flags.StringVar(&flagResourceFile, \"f\", \"\", \"Filename to use to update the kubernetes resource.\")\n\tcmdUpdate.Flags.BoolVar(&flagWait, \"wait\", false, \"Wait for at least one replica to be ready after the update.\")\n\n\tcmdStop.Flags.StringVar(&flagResourceFile, \"f\", \"\", \"Filename to use to stop the kubernetes resource.\")\n\n\tcmdStartClusterAgent.Flags.BoolVar(&flagWait, \"wait\", false, \"Wait for the cluster agent to be ready.\")\n\n\tcmdBuildDockerImages.Flags.BoolVar(&flagVerbose, \"v\", false, \"When true, the output is more verbose.\")\n\tcmdBuildDockerImages.Flags.StringVar(&flagTag, \"tag\", \"\", \"The tag to add to the docker images. If empty, the current timestamp is used.\")\n\n\tcmdline.Main(cmd)\n}\n\nfunc kubeCmdRunner(kcmd func(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error) cmdline.Runner {\n\treturn v23cmd.RunnerFunc(func(ctx *context.T, env *cmdline.Env, args []string) error {\n\t\tconfig, err := readConfig(flagConfigFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tf, err := ioutil.TempFile(\"\", \"kubeconfig-\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tos.Setenv(\"KUBECONFIG\", f.Name())\n\t\tdefer os.Remove(f.Name())\n\t\tf.Close()\n\n\t\tfmt.Fprintf(env.Stderr, \"Project: %s Zone: %s Cluster: %s\\n\\n\", config.Project, config.Zone, config.Cluster)\n\t\tif out, err := exec.Command(flagGcloudBin, \"container\", \"clusters\", \"get-credentials\", config.Cluster, \"--project\", config.Project, \"--zone\", config.Zone).CombinedOutput(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to get credentials for %q: %v: %s\", config.Cluster, err, out)\n\t\t}\n\t\treturn kcmd(ctx, env, args, config)\n\t})\n}\n\nvar cmdStart = &cmdline.Command{\n\tRunner: kubeCmdRunner(runCmdStart),\n\tName: \"start\",\n\tShort: \"Starts an application.\",\n\tLong: \"Starts an application.\",\n\tArgsName: \"<extension>\",\n\tArgsLong: \"<extension> The blessing name extension to give to the application.\",\n}\n\nfunc runCmdStart(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {\n\tif expected, got := 1, len(args); expected != got {\n\t\treturn env.UsageErrorf(\"start: incorrect number of arguments, expected %d, got %d\", expected, got)\n\t}\n\textension := args[0]\n\tif flagResourceFile == \"\" {\n\t\treturn fmt.Errorf(\"-f must be specified.\")\n\t}\n\trc, err := readReplicationControllerConfig(flagResourceFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, v := range []string{\"spec.template.metadata.labels.application\", \"spec.template.metadata.labels.version\"} {\n\t\tif rc.getString(v) == \"\" {\n\t\t\tfmt.Fprintf(env.Stderr, \"WARNING: %q is not set. Rolling updates will not work.\\n\", v)\n\t\t}\n\t}\n\tagentAddr, err := findClusterAgent(config, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsecretName, err := makeSecretName()\n\tif err != nil {\n\t\treturn err\n\t}\n\tnamespace := rc.getString(\"metadata.namespace\")\n\tappName := rc.getString(\"spec.template.metadata.labels.application\")\n\tif n, err := findReplicationControllerNamesForApp(appName, namespace); err != nil {\n\t\treturn err\n\t} else if len(n) != 0 {\n\t\treturn fmt.Errorf(\"replication controller for application=%q already running: %q\", appName, n)\n\t}\n\tif err := createSecret(ctx, secretName, namespace, agentAddr, extension); err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(env.Stdout, \"Created secret successfully.\")\n\n\tif err := createReplicationController(ctx, config, rc, secretName); err != nil {\n\t\tif err := deleteSecret(ctx, config, secretName, rootBlessings(ctx), namespace); err != nil {\n\t\t\tctx.Error(err)\n\t\t}\n\t\treturn err\n\t}\n\tfmt.Fprintln(env.Stdout, \"Created replication controller successfully.\")\n\tif flagWait {\n\t\tif err := waitForReadyPods(appName, namespace); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintln(env.Stdout, \"Application is running.\")\n\t}\n\treturn nil\n}\n\nvar cmdUpdate = &cmdline.Command{\n\tRunner: kubeCmdRunner(runCmdUpdate),\n\tName: \"update\",\n\tShort: \"Updates an application.\",\n\tLong: \"Updates an application to a new version with a rolling update, preserving the existing blessings.\",\n}\n\nfunc runCmdUpdate(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {\n\tif flagResourceFile == \"\" {\n\t\treturn fmt.Errorf(\"-f must be specified.\")\n\t}\n\trc, err := readReplicationControllerConfig(flagResourceFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := updateReplicationController(ctx, config, rc, env.Stdout, env.Stderr); err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(env.Stdout, \"Updated replication controller successfully.\")\n\tif flagWait {\n\t\tnamespace := rc.getString(\"metadata.namespace\")\n\t\tappName := rc.getString(\"spec.template.metadata.labels.application\")\n\t\tif err := waitForReadyPods(appName, namespace); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintln(env.Stdout, \"Application is running.\")\n\t}\n\treturn nil\n}\n\nvar cmdStop = &cmdline.Command{\n\tRunner: kubeCmdRunner(runCmdStop),\n\tName: \"stop\",\n\tShort: \"Stops an application.\",\n\tLong: \"Stops an application.\",\n}\n\nfunc runCmdStop(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {\n\tif flagResourceFile == \"\" {\n\t\treturn fmt.Errorf(\"-f must be specified.\")\n\t}\n\trc, err := readReplicationControllerConfig(flagResourceFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tname := rc.getString(\"metadata.name\")\n\tif name == \"\" {\n\t\treturn fmt.Errorf(\"metadata.name must be set\")\n\t}\n\tnamespace := rc.getString(\"metadata.namespace\")\n\tsecretName, rootBlessings, err := findPodAttributes(name, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif out, err := kubectl(\"--namespace=\"+namespace, \"stop\", \"rc\", name); err != nil {\n\t\treturn fmt.Errorf(\"failed to stop replication controller: %v: %s\", err, out)\n\t}\n\tfmt.Fprintln(env.Stdout, \"Stopping replication controller.\")\n\tif err := deleteSecret(ctx, config, secretName, rootBlessings, namespace); err != nil {\n\t\treturn fmt.Errorf(\"failed to delete secret: %v\", err)\n\t}\n\tfmt.Fprintln(env.Stdout, \"Deleting secret.\")\n\treturn nil\n}\n\nvar cmdStartClusterAgent = &cmdline.Command{\n\tRunner: kubeCmdRunner(runCmdStartClusterAgent),\n\tName: \"start-cluster-agent\",\n\tShort: \"Starts the cluster agent.\",\n\tLong: \"Starts the cluster agent.\",\n}\n\nfunc runCmdStartClusterAgent(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {\n\tif err := createClusterAgent(ctx, config); err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(env.Stdout, \"Starting cluster agent.\")\n\tif flagWait {\n\t\tif err := waitForReadyPods(clusterAgentApplicationName, config.ClusterAgent.Namespace); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor {\n\t\t\tif _, err := findClusterAgent(config, true); err == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t\tfmt.Fprintf(env.Stdout, \"Cluster agent is ready.\\n\")\n\t}\n\treturn nil\n}\n\nvar cmdStopClusterAgent = &cmdline.Command{\n\tRunner: kubeCmdRunner(runCmdStopClusterAgent),\n\tName: \"stop-cluster-agent\",\n\tShort: \"Stops the cluster agent.\",\n\tLong: \"Stops the cluster agent.\",\n}\n\nfunc runCmdStopClusterAgent(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {\n\tif err := stopClusterAgent(config); err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(env.Stdout, \"Stopping cluster agent.\")\n\treturn nil\n}\n\nvar cmdClaimClusterAgent = &cmdline.Command{\n\tRunner: kubeCmdRunner(runCmdClaimClusterAgent),\n\tName: \"claim-cluster-agent\",\n\tShort: \"Claims the cluster agent.\",\n\tLong: \"Claims the cluster agent.\",\n}\n\nfunc runCmdClaimClusterAgent(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {\n\tmyBlessings, _ := v23.GetPrincipal(ctx).BlessingStore().Default()\n\tclaimer := clusterAgentClaimer(config)\n\tif !myBlessings.CouldHaveNames([]string{claimer}) {\n\t\treturn fmt.Errorf(\"principal isn't the expected claimer: got %q, expected %q\", myBlessings, claimer)\n\t}\n\textension := strings.TrimPrefix(config.ClusterAgent.Blessing, claimer+security.ChainSeparator)\n\tif err := claimClusterAgent(ctx, config, extension); err != nil {\n\t\tif verror.ErrorID(err) == verror.ErrUnknownMethod.ID {\n\t\t\treturn fmt.Errorf(\"already claimed\")\n\t\t}\n\t\treturn err\n\t}\n\tfmt.Fprintln(env.Stdout, \"Claimed cluster agent successfully.\")\n\treturn nil\n}\n\nvar cmdBuildDockerImages = &cmdline.Command{\n\tRunner: kubeCmdRunner(runCmdBuildDockerImages),\n\tName: \"build-docker-images\",\n\tShort: \"Builds the docker images for the cluster and pod agents.\",\n\tLong: \"Builds the docker images for the cluster and pod agents.\",\n}\n\nfunc runCmdBuildDockerImages(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {\n\treturn buildDockerImages(config, flagTag, flagVerbose, env.Stdout)\n}\n\nvar cmdKubectl = &cmdline.Command{\n\tRunner: kubeCmdRunner(runCmdKubectl),\n\tName: \"kubectl\",\n\tShort: \"Runs kubectl on the cluster defined in vkube.cfg.\",\n\tLong: \"Runs kubectl on the cluster defined in vkube.cfg.\",\n\tArgsName: \"-- <kubectl args>\",\n\tArgsLong: \"<kubectl args> are passed directly to the kubectl command.\",\n}\n\nfunc runCmdKubectl(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {\n\tcmd := exec.Command(flagKubectlBin, args...)\n\tcmd.Stdin = env.Stdin\n\tcmd.Stdout = env.Stdout\n\tcmd.Stderr = env.Stderr\n\treturn cmd.Run()\n}\n<|endoftext|>"} {"text":"<commit_before>package context\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n)\n\ntype MockFileIO struct {\n\tdata map[string][]byte\n}\n\nfunc (mock *MockFileIO) ReadFile(path string) ([]byte, error) {\n\tif contents, ok := mock.data[path]; ok {\n\t\treturn contents, nil\n\t}\n\treturn nil, fmt.Errorf(\"Failed to read file at path: `%s`\", path)\n}\n\nfunc (mock *MockFileIO) WriteFile(path string, data []byte, perm os.FileMode) error {\n\tmock.data[path] = data\n\treturn nil\n}\n\nfunc (mock *MockFileIO) Stat(path string) (os.FileInfo, error) {\n\tif _, ok := mock.data[path]; ok {\n\t\treturn nil, nil\n\t}\n\treturn nil, fmt.Errorf(\"Failed to stat file at path: `%s`\", path)\n}\n\nfunc TestValidateDockercfgWithValidData(t *testing.T) {\n\tmockIO := &MockFileIO{data: map[string][]byte{\"path\": []byte(`\n {\n \"https:\/\/d.ims.io\": {\n \"auth\": \"StopLookingAtMySecretsYouJerk=\",\n \"email\": \"\"\n }\n }`)}}\n\tif err := validateDockercfg(\"path\", mockIO); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestValidateDockercfgWithInvalidData(t *testing.T) {\n\tmockIO := &MockFileIO{data: map[string][]byte{\"path\": []byte(`\n {\n \"https:\/\/d.ims.io\": {\n \"auth\": \"StopLookingAtMySecretsYouJerk=\",\n \"email\": \"\"\n },\n }`)}}\n\tif err := validateDockercfg(\"path\", mockIO); err == nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestValidateDockercfgWithInvalidPath(t *testing.T) {\n\tmockIO := &MockFileIO{data: map[string][]byte{\"path\": []byte(`\n {\n \"https:\/\/d.ims.io\": {\n \"auth\": \"StopLookingAtMySecretsYouJerk=\",\n \"email\": \"\"\n }\n }`)}}\n\tif err := validateDockercfg(\"badpath\", mockIO); err == nil {\n\t\tt.Fatal(err)\n\t}\n}\n<commit_msg>Remove commands_test.go<commit_after><|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"log\"\n \"errors\"\n \"os\/exec\"\n \"strings\"\n)\n\ntype DependencyItem struct {\n targetRelativePath string\n originalPath string\n}\n\ntype AppDeployer struct {\n processedLibs map[string]bool\n\n libsChannel chan string\n copyChannel chan DependencyItem\n stripChannel chan string\n rpathChannel chan string\n qtChannel chan string\n\n additionalLibPaths []string\n destinationPath string\n}\n\nfunc (ad *AppDeployer) DeployApp(exePath string) {\n go func() { ad.libsChannel <- exePath }()\n ad.processLibs()\n}\n\nfunc (ad *AppDeployer) processLibs() {\n for filepath := range ad.libsChannel {\n if _, ok := ad.processedLibs[filepath]; !ok {\n dependencies, err := findLddDependencies(filepath)\n if (err != nil) {\n log.Println(err)\n continue\n }\n\n ad.processedLibs[filepath] = true\n \/\/go func() { ad.copyChannel <- &{DependencyItem{originalPath: filepath} }()\n\n for _, dependPath := range dependencies {\n if _, ok := ad.processedLibs[dependPath]; !ok {\n go func() { ad.libsChannel <- dependPath }()\n }\n }\n }\n }\n}\n\nfunc findLddDependencies(filepath string) ([]string, error) {\n log.Printf(\"Inspecting %v\", filepath)\n\n out, err := exec.Command(\"ldd\", filepath).Output()\n if err != nil { return nil, err }\n\n dependencies := make([]string, 10)\n\n output := string(out)\n lines := strings.Split(output, \"\\n\")\n for _, line := range lines {\n line = strings.TrimSpace(line)\n libpath, err := parseLddOutputLine(line)\n\n if err != nil {\n log.Printf(\"Found dependency %v\", libpath)\n dependencies = append(dependencies, libpath)\n } else {\n log.Printf(\"Cannot parse ldd line: %v\", line)\n }\n }\n\n return dependencies, nil\n}\n\nfunc parseLddOutputLine(line string) (string, error) {\n if len(line) == 0 { return \"\", errors.New(\"Empty\") }\n\n var libpath string\n\n if strings.Contains(line, \" => \") {\n parts := strings.Split(line, \" => \")\n\n if len(parts) != 2 {\n return \"\", errors.New(\"Wrong format\")\n }\n\n shortpath := strings.TrimSpace(parts[0])\n\n if parts[1] == \"not found\" { return parts[0], nil }\n if len(strings.TrimSpace(parts[1])) == 0 { return \"\", errors.New(\"vdso\") }\n\n lastUseful := strings.LastIndex(parts[1], \"(0x\")\n if lastUseful != -1 {\n libpath = strings.TrimSpace(parts[1][:lastUseful])\n } else {\n libpath = shortpath\n }\n } else {\n log.Printf(\"Skipping ldd line: %v\", line)\n }\n\n return libpath, nil\n}\n<commit_msg>Minor fixes<commit_after>package main\n\nimport (\n \"log\"\n \"errors\"\n \"os\/exec\"\n \"strings\"\n)\n\ntype DependencyItem struct {\n targetRelativePath string\n originalPath string\n}\n\ntype AppDeployer struct {\n processedLibs map[string]bool\n\n libsChannel chan string\n copyChannel chan DependencyItem\n stripChannel chan string\n rpathChannel chan string\n qtChannel chan string\n\n additionalLibPaths []string\n destinationPath string\n}\n\nfunc (ad *AppDeployer) DeployApp(exePath string) {\n go func() { ad.libsChannel <- exePath }()\n ad.processLibs()\n}\n\nfunc (ad *AppDeployer) processLibs() {\n for filepath := range ad.libsChannel {\n if _, ok := ad.processedLibs[filepath]; !ok {\n dependencies, err := findLddDependencies(filepath)\n if (err != nil) {\n log.Println(err)\n continue\n }\n\n ad.processedLibs[filepath] = true\n \/\/go func() { ad.copyChannel <- &{DependencyItem{originalPath: filepath} }()\n\n for _, dependPath := range dependencies {\n if _, ok := ad.processedLibs[dependPath]; !ok {\n go func() { ad.libsChannel <- dependPath }()\n }\n }\n }\n }\n}\n\nfunc findLddDependencies(filepath string) ([]string, error) {\n log.Printf(\"Inspecting %v\", filepath)\n\n out, err := exec.Command(\"ldd\", filepath).Output()\n if err != nil { return nil, err }\n\n dependencies := make([]string, 10)\n\n output := string(out)\n lines := strings.Split(output, \"\\n\")\n for _, line := range lines {\n line = strings.TrimSpace(line)\n libpath, err := parseLddOutputLine(line)\n\n if err == nil {\n log.Printf(\"Found dependency %v\", libpath)\n dependencies = append(dependencies, libpath)\n } else {\n log.Printf(\"Cannot parse ldd line: %v\", line)\n }\n }\n\n return dependencies, nil\n}\n\nfunc parseLddOutputLine(line string) (string, error) {\n if len(line) == 0 { return \"\", errors.New(\"Empty\") }\n\n var libpath string\n\n if strings.Contains(line, \" => \") {\n parts := strings.Split(line, \" => \")\n\n if len(parts) != 2 {\n return \"\", errors.New(\"Wrong format\")\n }\n\n shortpath := strings.TrimSpace(parts[0])\n\n if parts[1] == \"not found\" { return parts[0], nil }\n if len(strings.TrimSpace(parts[1])) == 0 { return \"\", errors.New(\"vdso\") }\n\n lastUseful := strings.LastIndex(parts[1], \"(0x\")\n if lastUseful != -1 {\n libpath = strings.TrimSpace(parts[1][:lastUseful])\n } else {\n libpath = shortpath\n }\n } else {\n log.Printf(\"Skipping ldd line: %v\", line)\n }\n\n return libpath, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\n\nCgo enables the creation of Go packages that call C code.\n\nUsage: cgo [compiler options] file.go\n\nThe compiler options are passed through uninterpreted when\ninvoking gcc to compile the C parts of the package.\n\nThe input file.go is a syntactically valid Go source file that imports\nthe pseudo-package \"C\" and then refers to types such as C.size_t,\nvariables such as C.stdout, or functions such as C.putchar.\n\nIf the import of \"C\" is immediately preceded by a comment, that\ncomment is used as a header when compiling the C parts of\nthe package. For example:\n\n\t\/\/ #include <stdio.h>\n\t\/\/ #include <errno.h>\n\timport \"C\"\n\nCFLAGS and LDFLAGS may be defined with pseudo #cgo directives\nwithin these comments to tweak the behavior of gcc. Values defined\nin multiple directives are concatenated together. Options prefixed\nby $GOOS, $GOARCH, or $GOOS\/$GOARCH are only defined in matching\nsystems. For example:\n\n\t\/\/ #cgo CFLAGS: -DPNG_DEBUG=1\n\t\/\/ #cgo linux CFLAGS: -DLINUX=1\n\t\/\/ #cgo LDFLAGS: -lpng\n\t\/\/ #include <png.h>\n\timport \"C\"\n\nAlternatively, CFLAGS and LDFLAGS may be obtained via the pkg-config\ntool using a '#cgo pkg-config:' directive followed by the package names.\nFor example:\n\n\t\/\/ #cgo pkg-config: png cairo\n\t\/\/ #include <png.h>\n\timport \"C\"\n\nWithin the Go file, C identifiers or field names that are keywords in Go\ncan be accessed by prefixing them with an underscore: if x points at a C\nstruct with a field named \"type\", x._type accesses the field.\n\nThe standard C numeric types are available under the names\nC.char, C.schar (signed char), C.uchar (unsigned char),\nC.short, C.ushort (unsigned short), C.int, C.uint (unsigned int),\nC.long, C.ulong (unsigned long), C.longlong (long long),\nC.ulonglong (unsigned long long), C.float, C.double.\nThe C type void* is represented by Go's unsafe.Pointer.\n\nTo access a struct, union, or enum type directly, prefix it with\nstruct_, union_, or enum_, as in C.struct_stat.\n\nAny C function that returns a value may be called in a multiple\nassignment context to retrieve both the return value and the\nC errno variable as an error. For example:\n\n\tn, err := C.atoi(\"abc\")\n\nIn C, a function argument written as a fixed size array\nactually requires a pointer to the first element of the array.\nC compilers are aware of this calling convention and adjust\nthe call accordingly, but Go cannot. In Go, you must pass\nthe pointer to the first element explicitly: C.f(&x[0]).\n\nA few special functions convert between Go and C types\nby making copies of the data. In pseudo-Go definitions:\n\n\t\/\/ Go string to C string\n\t\/\/ The C string is allocated in the C heap using malloc.\n\t\/\/ It is the caller's responsibility to arrange for it to be\n\t\/\/ freed, such as by calling C.free.\n\tfunc C.CString(string) *C.char\n\n\t\/\/ C string to Go string\n\tfunc C.GoString(*C.char) string\n\n\t\/\/ C string, length to Go string\n\tfunc C.GoStringN(*C.char, C.int) string\n\n\t\/\/ C pointer, length to Go []byte\n\tfunc C.GoBytes(unsafe.Pointer, C.int) []byte\n\nGo functions can be exported for use by C code in the following way:\n\n\t\/\/export MyFunction\n\tfunc MyFunction(arg1, arg2 int, arg3 string) int64 {...}\n\n\t\/\/export MyFunction2\n\tfunc MyFunction2(arg1, arg2 int, arg3 string) (int64, C.char*) {...}\n\nThey will be available in the C code as:\n\n\textern int64 MyFunction(int arg1, int arg2, GoString arg3);\n\textern struct MyFunction2_return MyFunction2(int arg1, int arg2, GoString arg3);\n\nfound in _cgo_export.h generated header. Functions with multiple\nreturn values are mapped to functions returning a struct.\nNot all Go types can be mapped to C types in a useful way.\n\nCgo transforms the input file into four output files: two Go source\nfiles, a C file for 6c (or 8c or 5c), and a C file for gcc.\n\nThe standard package makefile rules in Make.pkg automate the\nprocess of using cgo. See $GOROOT\/misc\/cgo\/stdio and\n$GOROOT\/misc\/cgo\/gmp for examples.\n\nCgo does not yet work with gccgo.\n\nSee \"C? Go? Cgo!\" for an introduction to using cgo:\nhttp:\/\/blog.golang.org\/2011\/03\/c-go-cgo.html\n*\/\npackage documentation\n<commit_msg>cgo: fix typo in the documentation<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\n\nCgo enables the creation of Go packages that call C code.\n\nUsage: cgo [compiler options] file.go\n\nThe compiler options are passed through uninterpreted when\ninvoking gcc to compile the C parts of the package.\n\nThe input file.go is a syntactically valid Go source file that imports\nthe pseudo-package \"C\" and then refers to types such as C.size_t,\nvariables such as C.stdout, or functions such as C.putchar.\n\nIf the import of \"C\" is immediately preceded by a comment, that\ncomment is used as a header when compiling the C parts of\nthe package. For example:\n\n\t\/\/ #include <stdio.h>\n\t\/\/ #include <errno.h>\n\timport \"C\"\n\nCFLAGS and LDFLAGS may be defined with pseudo #cgo directives\nwithin these comments to tweak the behavior of gcc. Values defined\nin multiple directives are concatenated together. Options prefixed\nby $GOOS, $GOARCH, or $GOOS\/$GOARCH are only defined in matching\nsystems. For example:\n\n\t\/\/ #cgo CFLAGS: -DPNG_DEBUG=1\n\t\/\/ #cgo linux CFLAGS: -DLINUX=1\n\t\/\/ #cgo LDFLAGS: -lpng\n\t\/\/ #include <png.h>\n\timport \"C\"\n\nAlternatively, CFLAGS and LDFLAGS may be obtained via the pkg-config\ntool using a '#cgo pkg-config:' directive followed by the package names.\nFor example:\n\n\t\/\/ #cgo pkg-config: png cairo\n\t\/\/ #include <png.h>\n\timport \"C\"\n\nWithin the Go file, C identifiers or field names that are keywords in Go\ncan be accessed by prefixing them with an underscore: if x points at a C\nstruct with a field named \"type\", x._type accesses the field.\n\nThe standard C numeric types are available under the names\nC.char, C.schar (signed char), C.uchar (unsigned char),\nC.short, C.ushort (unsigned short), C.int, C.uint (unsigned int),\nC.long, C.ulong (unsigned long), C.longlong (long long),\nC.ulonglong (unsigned long long), C.float, C.double.\nThe C type void* is represented by Go's unsafe.Pointer.\n\nTo access a struct, union, or enum type directly, prefix it with\nstruct_, union_, or enum_, as in C.struct_stat.\n\nAny C function that returns a value may be called in a multiple\nassignment context to retrieve both the return value and the\nC errno variable as an error. For example:\n\n\tn, err := C.atoi(\"abc\")\n\nIn C, a function argument written as a fixed size array\nactually requires a pointer to the first element of the array.\nC compilers are aware of this calling convention and adjust\nthe call accordingly, but Go cannot. In Go, you must pass\nthe pointer to the first element explicitly: C.f(&x[0]).\n\nA few special functions convert between Go and C types\nby making copies of the data. In pseudo-Go definitions:\n\n\t\/\/ Go string to C string\n\t\/\/ The C string is allocated in the C heap using malloc.\n\t\/\/ It is the caller's responsibility to arrange for it to be\n\t\/\/ freed, such as by calling C.free.\n\tfunc C.CString(string) *C.char\n\n\t\/\/ C string to Go string\n\tfunc C.GoString(*C.char) string\n\n\t\/\/ C string, length to Go string\n\tfunc C.GoStringN(*C.char, C.int) string\n\n\t\/\/ C pointer, length to Go []byte\n\tfunc C.GoBytes(unsafe.Pointer, C.int) []byte\n\nGo functions can be exported for use by C code in the following way:\n\n\t\/\/export MyFunction\n\tfunc MyFunction(arg1, arg2 int, arg3 string) int64 {...}\n\n\t\/\/export MyFunction2\n\tfunc MyFunction2(arg1, arg2 int, arg3 string) (int64, *C.char) {...}\n\nThey will be available in the C code as:\n\n\textern int64 MyFunction(int arg1, int arg2, GoString arg3);\n\textern struct MyFunction2_return MyFunction2(int arg1, int arg2, GoString arg3);\n\nfound in _cgo_export.h generated header. Functions with multiple\nreturn values are mapped to functions returning a struct.\nNot all Go types can be mapped to C types in a useful way.\n\nCgo transforms the input file into four output files: two Go source\nfiles, a C file for 6c (or 8c or 5c), and a C file for gcc.\n\nThe standard package makefile rules in Make.pkg automate the\nprocess of using cgo. See $GOROOT\/misc\/cgo\/stdio and\n$GOROOT\/misc\/cgo\/gmp for examples.\n\nCgo does not yet work with gccgo.\n\nSee \"C? Go? Cgo!\" for an introduction to using cgo:\nhttp:\/\/blog.golang.org\/2011\/03\/c-go-cgo.html\n*\/\npackage documentation\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nFix finds Go programs that use old APIs and rewrites them to use\nnewer ones. After you update to a new Go release, fix helps make\nthe necessary changes to your programs.\n\nUsage:\n\tgo tool fix [-r name,...] [path ...]\n\nWithout an explicit path, fix reads standard input and writes the\nresult to standard output.\n\nIf the named path is a file, fix rewrites the named files in place.\nIf the named path is a directory, fix rewrites all .go files in that\ndirectory tree. When fix rewrites a file, it prints a line to standard\nerror giving the name of the file and the rewrite applied.\n\nIf the -diff flag is set, no files are rewritten. Instead fix prints\nthe differences a rewrite would introduce.\n\nThe -r flag restricts the set of rewrites considered to those in the\nnamed list. By default fix considers all known rewrites. Fix's\nrewrites are idempotent, so that it is safe to apply fix to updated\nor partially updated code even without using the -r flag.\n\nFix prints the full list of fixes it can apply in its help output;\nto see them, run go tool fix -?.\n\nFix does not make backup copies of the files that it edits.\nInstead, use a version control system's ``diff'' functionality to inspect\nthe changes that fix makes before committing them.\n*\/\npackage main\n<commit_msg>cmd\/fix: mention -help instead of the non-existent -? flag<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nFix finds Go programs that use old APIs and rewrites them to use\nnewer ones. After you update to a new Go release, fix helps make\nthe necessary changes to your programs.\n\nUsage:\n\tgo tool fix [-r name,...] [path ...]\n\nWithout an explicit path, fix reads standard input and writes the\nresult to standard output.\n\nIf the named path is a file, fix rewrites the named files in place.\nIf the named path is a directory, fix rewrites all .go files in that\ndirectory tree. When fix rewrites a file, it prints a line to standard\nerror giving the name of the file and the rewrite applied.\n\nIf the -diff flag is set, no files are rewritten. Instead fix prints\nthe differences a rewrite would introduce.\n\nThe -r flag restricts the set of rewrites considered to those in the\nnamed list. By default fix considers all known rewrites. Fix's\nrewrites are idempotent, so that it is safe to apply fix to updated\nor partially updated code even without using the -r flag.\n\nFix prints the full list of fixes it can apply in its help output;\nto see them, run go tool fix -help.\n\nFix does not make backup copies of the files that it edits.\nInstead, use a version control system's ``diff'' functionality to inspect\nthe changes that fix makes before committing them.\n*\/\npackage main\n<|endoftext|>"} {"text":"<commit_before>package facebook\n\nimport (\n\t\"http\"\n\t\"io\/ioutil\"\n\t\"json\"\n\t\"os\"\n\t\"strconv\"\n\t\"fmt\"\n)\n\nconst (\n\tGRAPHURL = \"http:\/\/graph.facebook.com\/\"\n)\n\ntype Object struct {\n\tID string\n\tName string\n}\n\nfunc parseObject(value map[string]interface{}) (obj Object) {\n\tobj.ID = value[\"id\"].(string)\n\tobj.Name = value[\"name\"].(string)\n\treturn\n}\n\nfunc parseObjects(value []interface{}) (objs []Object) {\n\tobjs = make([]Object, len(value))\n\tfor i, v := range value {\n\t\tobjs[i] = parseObject(v.(map[string]interface{}))\n\t}\n\treturn\n}\n\ntype Link struct {\n\tName string\n\tURL string\n}\n\nfunc parseLink(value map[string]interface{}) (link Link) {\n\tlink.Name = value[\"name\"].(string)\n\tlink.URL = value[\"link\"].(string)\n\treturn\n}\n\nfunc parseLinks(value []interface{}) (links []Link) {\n\tlinks = make([]Link, len(value))\n\tfor i, v := range value {\n\t\tlinks[i] = parseLink(v.(map[string]interface{}))\n\t}\n\treturn\n}\n\nfunc getJsonMap(body []byte) (data map[string]interface{}, err os.Error) {\n\tvar values interface{}\n\n\tif err = json.Unmarshal(body, &values); err != nil {\n\t\treturn\n\t}\n\tdata = values.(map[string]interface{})\n\treturn\n}\n\nfunc fetchBody(method string) (body []byte, err os.Error) {\n\tbody, err = fetchPage(GRAPHURL + method)\n\treturn\n}\n\nfunc fetchPage(url string) (body []byte, err os.Error) {\n\tresp, _, err := http.Get(url) \/\/ Response, final URL, error\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err = ioutil.ReadAll(resp.Body)\n\treturn\n}\n\nfunc debugInterface(value interface{}, key, funcName string) {\n\tvar str string\n\tswitch value.(type) {\n\tcase float64:\n\t\tstr = strconv.Ftoa64(value.(float64), 'e', -1)\n\t}\n\tfmt.Printf(\"%s: Unknown pair: %s : %s\\n\", funcName, key, str)\n}\n<commit_msg>Add parseTime to parse different fetched time formats.<commit_after>package facebook\n\nimport (\n\t\"http\"\n\t\"io\/ioutil\"\n\t\"json\"\n\t\"os\"\n\t\"strconv\"\n\t\"fmt\"\n\t\"time\"\n)\n\nconst (\n\tGRAPHURL = \"http:\/\/graph.facebook.com\/\"\n)\n\ntype Object struct {\n\tID string\n\tName string\n}\n\nfunc parseObject(value map[string]interface{}) (obj Object) {\n\tobj.ID = value[\"id\"].(string)\n\tobj.Name = value[\"name\"].(string)\n\treturn\n}\n\nfunc parseObjects(value []interface{}) (objs []Object) {\n\tobjs = make([]Object, len(value))\n\tfor i, v := range value {\n\t\tobjs[i] = parseObject(v.(map[string]interface{}))\n\t}\n\treturn\n}\n\ntype Link struct {\n\tName string\n\tURL string\n}\n\nfunc parseLink(value map[string]interface{}) (link Link) {\n\tlink.Name = value[\"name\"].(string)\n\tlink.URL = value[\"link\"].(string)\n\treturn\n}\n\nfunc parseLinks(value []interface{}) (links []Link) {\n\tlinks = make([]Link, len(value))\n\tfor i, v := range value {\n\t\tlinks[i] = parseLink(v.(map[string]interface{}))\n\t}\n\treturn\n}\n\nfunc getJsonMap(body []byte) (data map[string]interface{}, err os.Error) {\n\tvar values interface{}\n\n\tif err = json.Unmarshal(body, &values); err != nil {\n\t\treturn\n\t}\n\tdata = values.(map[string]interface{})\n\treturn\n}\n\nfunc fetchBody(method string) (body []byte, err os.Error) {\n\tbody, err = fetchPage(GRAPHURL + method)\n\treturn\n}\n\nfunc fetchPage(url string) (body []byte, err os.Error) {\n\tresp, _, err := http.Get(url) \/\/ Response, final URL, error\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err = ioutil.ReadAll(resp.Body)\n\treturn\n}\n\nfunc parseTime(value string) (t *time.Time, err os.Error) {\n\tt, err = time.Parse(\"RFC3339\", value)\n\tif err != nil {\n\t\tt, err = time.Parse(\"January 2, 2006\", value)\n\t\tif err != nil {\n\t\t\tt, err = time.Parse(\"Jan 2006\", value)\n\t\t}\n\t}\n\treturn\n}\n\nfunc debugInterface(value interface{}, key, funcName string) {\n\tvar str string\n\tswitch value.(type) {\n\tcase float64:\n\t\tstr = strconv.Ftoa64(value.(float64), 'e', -1)\n\t}\n\tfmt.Printf(\"%s: Unknown pair: %s : %s\\n\", funcName, key, str)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The etcd-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage v1beta2\n\nimport (\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n)\n\nconst (\n\tCRDResourceKind = \"EtcdCluster\"\n\tCRDResourcePlural = \"etcdclusters\"\n\tgroupName = \"etcd.database.coreos.com\"\n\n\tEtcdBackupResourceKind = \"EtcdBackup\"\n\tEtcdBackupResourcePlural = \"etcdbackups\"\n)\n\nvar (\n\tSchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)\n\tAddToScheme = SchemeBuilder.AddToScheme\n\n\tSchemeGroupVersion = schema.GroupVersion{Group: groupName, Version: \"v1beta2\"}\n\tCRDName = CRDResourcePlural + \".\" + groupName\n\tEtcdBackupCRDName = EtcdBackupResourceKind + \".\" + groupName\n)\n\n\/\/ Resource gets an EtcdCluster GroupResource for a specified resource\nfunc Resource(resource string) schema.GroupResource {\n\treturn SchemeGroupVersion.WithResource(resource).GroupResource()\n}\n\n\/\/ addKnownTypes adds the set of types defined in this package to the supplied scheme.\nfunc addKnownTypes(s *runtime.Scheme) error {\n\ts.AddKnownTypes(SchemeGroupVersion,\n\t\t&EtcdCluster{},\n\t\t&EtcdClusterList{},\n\t\t&EtcdBackup{},\n\t\t&EtcdBackupList{},\n\t)\n\tmetav1.AddToGroupVersion(s, SchemeGroupVersion)\n\treturn nil\n}\n<commit_msg>apis: add etcd restore related constants in register.go<commit_after>\/\/ Copyright 2017 The etcd-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage v1beta2\n\nimport (\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n)\n\nconst (\n\tCRDResourceKind = \"EtcdCluster\"\n\tCRDResourcePlural = \"etcdclusters\"\n\tgroupName = \"etcd.database.coreos.com\"\n\n\tEtcdBackupResourceKind = \"EtcdBackup\"\n\tEtcdBackupResourcePlural = \"etcdbackups\"\n\n\tEtcdRestoreResourceKind = \"EtcdRestore\"\n\tEtcdRestoreResourcePlural = \"etcdrestores\"\n)\n\nvar (\n\tSchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)\n\tAddToScheme = SchemeBuilder.AddToScheme\n\n\tSchemeGroupVersion = schema.GroupVersion{Group: groupName, Version: \"v1beta2\"}\n\tCRDName = CRDResourcePlural + \".\" + groupName\n\tEtcdBackupCRDName = EtcdBackupResourceKind + \".\" + groupName\n\tEtcdRestoreCRDName = EtcdRestoreResourcePlural + \".\" + groupName\n)\n\n\/\/ Resource gets an EtcdCluster GroupResource for a specified resource\nfunc Resource(resource string) schema.GroupResource {\n\treturn SchemeGroupVersion.WithResource(resource).GroupResource()\n}\n\n\/\/ addKnownTypes adds the set of types defined in this package to the supplied scheme.\nfunc addKnownTypes(s *runtime.Scheme) error {\n\ts.AddKnownTypes(SchemeGroupVersion,\n\t\t&EtcdCluster{},\n\t\t&EtcdClusterList{},\n\t\t&EtcdBackup{},\n\t\t&EtcdBackupList{},\n\t\t&EtcdRestore{},\n\t\t&EtcdRestoreList{},\n\t)\n\tmetav1.AddToGroupVersion(s, SchemeGroupVersion)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage v1alpha1\n\nimport (\n\t\"agones.dev\/agones\/pkg\/apis\/stable\"\n\t\"github.com\/pkg\/errors\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n)\n\nconst (\n\t\/\/ PortAllocation is for when a dynamically allocating GameServer\n\t\/\/ is being created, an open port needs to be allocated\n\tPortAllocation State = \"PortAllocation\"\n\t\/\/ Creating is before the Pod for the GameServer is being created\n\tCreating State = \"Creating\"\n\t\/\/ Starting is for when the Pods for the GameServer are being\n\t\/\/ created but have yet to register themselves as Ready\n\tStarting State = \"Starting\"\n\t\/\/ RequestReady is when the GameServer has declared that it is ready\n\tRequestReady State = \"RequestReady\"\n\t\/\/ Ready is when a GameServer is ready to take connections\n\t\/\/ from Game clients\n\tReady State = \"Ready\"\n\t\/\/ Shutdown is when the GameServer has shutdown and everything needs to be\n\t\/\/ deleted from the cluster\n\tShutdown State = \"Shutdown\"\n\t\/\/ Error is when something has gone with the Gameserver and\n\t\/\/ it cannot be resolved\n\tError State = \"Error\"\n\t\/\/ Unhealthy is when the GameServer has failed its health checks\n\tUnhealthy State = \"Unhealthy\"\n\n\t\/\/ Static PortPolicy means that the user defines the hostPort to be used\n\t\/\/ in the configuration.\n\tStatic PortPolicy = \"static\"\n\t\/\/ Dynamic PortPolicy means that the system will choose an open\n\t\/\/ port for the GameServer in question\n\tDynamic PortPolicy = \"dynamic\"\n\n\t\/\/ RoleLabel is the label in which the Agones role is specified.\n\t\/\/ Pods from a GameServer will have the value \"gameserver\"\n\tRoleLabel = stable.GroupName + \"\/role\"\n\t\/\/ GameServerLabelRole is the GameServer label value for RoleLabel\n\tGameServerLabelRole = \"gameserver\"\n\t\/\/ GameServerPodLabel is the label that the name of the GameServer\n\t\/\/ is set on the Pod the GameServer controls\n\tGameServerPodLabel = stable.GroupName + \"\/gameserver\"\n\t\/\/ GameServerContainerAnnotation is the annotation that stores\n\t\/\/ which container is the container that runs the dedicated game server\n\tGameServerContainerAnnotation = stable.GroupName + \"\/container\"\n\t\/\/ SidecarServiceAccountName is the default service account for managing access to get\/update GameServers\n\tSidecarServiceAccountName = \"agones-sdk\"\n)\n\nvar (\n\t\/\/ GameServerRolePodSelector is the selector to get all GameServer Pods\n\tGameServerRolePodSelector = labels.SelectorFromSet(labels.Set{RoleLabel: GameServerLabelRole})\n)\n\n\/\/ +genclient\n\/\/ +genclient:noStatus\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\n\/\/ GameServer is the data structure for a gameserver resource\ntype GameServer struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata,omitempty\"`\n\n\tSpec GameServerSpec `json:\"spec\"`\n\tStatus GameServerStatus `json:\"status\"`\n}\n\n\/\/ GameServerSpec is the spec for a GameServer resource\ntype GameServerSpec struct {\n\t\/\/ Container specifies which Pod container is the game server. Only required if there is more than once\n\t\/\/ container defined\n\tContainer string `json:\"container,omitempty\"`\n\t\/\/ PortPolicy defined the policy for how the HostPort is populated.\n\t\/\/ Dynamic port will allocate a HostPort within the selected MIN_PORT and MAX_PORT range passed to the controller\n\t\/\/ at installation time.\n\t\/\/ When `static` is the policy specified, `HostPort` is required, to specify the port that game clients will\n\t\/\/ connect to\n\tPortPolicy PortPolicy `json:\"PortPolicy,omitempty\"`\n\t\/\/ ContainerPort is the port that is being opened on the game server process\n\tContainerPort int32 `json:\"containerPort\"`\n\t\/\/ HostPort the port exposed on the host for clients to connect to\n\tHostPort int32 `json:\"hostPort,omitempty\"`\n\t\/\/ Protocol is the network protocol being used. Defaults to UDP. TCP is the only other option\n\tProtocol corev1.Protocol `json:\"protocol,omitempty\"`\n\t\/\/ Health configures health checking\n\tHealth Health `json:\"health,omitempty\"`\n\t\/\/ Template describes the Pod that will be created for the GameServer\n\tTemplate corev1.PodTemplateSpec `json:\"template\"`\n}\n\n\/\/ State is the state for the GameServer\ntype State string\n\n\/\/ PortPolicy is the port policy for the GameServer\ntype PortPolicy string\n\n\/\/ Health configures health checking on the GameServer\ntype Health struct {\n\t\/\/ Disabled is whether health checking is disabled or not\n\tDisabled bool `json:\"disabled,omitempty\"`\n\t\/\/ PeriodSeconds is the number of seconds each health ping has to occur in\n\tPeriodSeconds int32 `json:\"periodSeconds,omitempty\"`\n\t\/\/ FailureThreshold how many failures in a row constitutes unhealthy\n\tFailureThreshold int32 `json:\"failureThreshold,omitempty\"`\n\t\/\/ InitialDelaySeconds initial delay before checking health\n\tInitialDelaySeconds int32 `json:\"initialDelaySeconds,omitempty\"`\n}\n\n\/\/ GameServerStatus is the status for a GameServer resource\ntype GameServerStatus struct {\n\t\/\/ State is the current state of a GameServer, e.g. Creating, Starting, Ready, etc\n\tState State `json:\"state\"`\n\tPort int32 `json:\"port\"`\n\tAddress string `json:\"address\"`\n\tNodeName string `json:\"nodeName\"`\n}\n\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\n\/\/ GameServerList is a list of GameServer resources\ntype GameServerList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata,omitempty\"`\n\n\tItems []GameServer `json:\"items\"`\n}\n\n\/\/ ApplyDefaults applies default values to the GameServer if they are not already populated\nfunc (gs *GameServer) ApplyDefaults() {\n\tgs.ObjectMeta.Finalizers = append(gs.ObjectMeta.Finalizers, stable.GroupName)\n\n\tif len(gs.Spec.Template.Spec.Containers) == 1 {\n\t\tgs.Spec.Container = gs.Spec.Template.Spec.Containers[0].Name\n\t}\n\n\t\/\/ basic spec\n\tif gs.Spec.PortPolicy == \"\" {\n\t\tgs.Spec.PortPolicy = Dynamic\n\t}\n\n\tif gs.Spec.Protocol == \"\" {\n\t\tgs.Spec.Protocol = \"UDP\"\n\t}\n\n\tif gs.Status.State == \"\" {\n\t\tif gs.Spec.PortPolicy == Dynamic {\n\t\t\tgs.Status.State = PortAllocation\n\t\t} else {\n\t\t\tgs.Status.State = Creating\n\t\t}\n\t}\n\n\t\/\/ health\n\tif !gs.Spec.Health.Disabled {\n\t\tif gs.Spec.Health.PeriodSeconds <= 0 {\n\t\t\tgs.Spec.Health.PeriodSeconds = 5\n\t\t}\n\t\tif gs.Spec.Health.FailureThreshold <= 0 {\n\t\t\tgs.Spec.Health.FailureThreshold = 3\n\t\t}\n\t\tif gs.Spec.Health.InitialDelaySeconds <= 0 {\n\t\t\tgs.Spec.Health.InitialDelaySeconds = 5\n\t\t}\n\t}\n}\n\n\/\/ Validate validates the GameServer configuration.\n\/\/ If a GameServer is invalid there will be > 0 values in\n\/\/ the returned array\nfunc (gs *GameServer) Validate() (bool, []metav1.StatusCause) {\n\tvar causes []metav1.StatusCause\n\n\t\/\/ make sure a name is specified when there is multiple containers in the pod.\n\tif len(gs.Spec.Container) == 0 && len(gs.Spec.Template.Spec.Containers) > 1 {\n\t\tcauses = append(causes, metav1.StatusCause{\n\t\t\tType: metav1.CauseTypeFieldValueInvalid,\n\t\t\tField: \"container\",\n\t\t\tMessage: \"Container is required when using multiple containers in the pod templace\",\n\t\t})\n\t}\n\n\t\/\/ no host port when using dynamic PortPolicy\n\tif gs.Spec.HostPort > 0 && gs.Spec.PortPolicy == Dynamic {\n\t\tcauses = append(causes, metav1.StatusCause{\n\t\t\tType: metav1.CauseTypeFieldValueInvalid,\n\t\t\tField: \"hostPort\",\n\t\t\tMessage: \"HostPort cannot be specified with a Dynamic PortPolicy\",\n\t\t})\n\t}\n\n\t\/\/ make sure the container value points to a valid container\n\t_, _, err := gs.FindGameServerContainer()\n\tif err != nil {\n\t\tcauses = append(causes, metav1.StatusCause{\n\t\t\tType: metav1.CauseTypeFieldValueInvalid,\n\t\t\tField: \"container\",\n\t\t\tMessage: err.Error(),\n\t\t})\n\t}\n\n\treturn len(causes) == 0, causes\n}\n\n\/\/ FindGameServerContainer returns the container that is specified in\n\/\/ spec.gameServer.container. Returns the index and the value.\n\/\/ Returns an error if not found\nfunc (gs *GameServer) FindGameServerContainer() (int, corev1.Container, error) {\n\tfor i, c := range gs.Spec.Template.Spec.Containers {\n\t\tif c.Name == gs.Spec.Container {\n\t\t\treturn i, c, nil\n\t\t}\n\t}\n\n\treturn -1, corev1.Container{}, errors.Errorf(\"Could not find a container named %s\", gs.Spec.Container)\n}\n\n\/\/ Pod creates a new Pod from the PodTemplateSpec\n\/\/ attached for the\nfunc (gs *GameServer) Pod(sidecars ...corev1.Container) (*corev1.Pod, error) {\n\tpod := &corev1.Pod{\n\t\tObjectMeta: *gs.Spec.Template.ObjectMeta.DeepCopy(),\n\t\tSpec: *gs.Spec.Template.Spec.DeepCopy(),\n\t}\n\t\/\/ Switch to GenerateName, so that we always get a Unique name for the Pod, and there\n\t\/\/ can be no collisions\n\tpod.ObjectMeta.GenerateName = gs.ObjectMeta.Name + \"-\"\n\tpod.ObjectMeta.Name = \"\"\n\t\/\/ Pods for GameServers need to stay in the same namespace\n\tpod.ObjectMeta.Namespace = gs.ObjectMeta.Namespace\n\t\/\/ Make sure these are blank, just in case\n\tpod.ResourceVersion = \"\"\n\tif pod.Spec.ServiceAccountName == \"\" {\n\t\tpod.Spec.ServiceAccountName = SidecarServiceAccountName\n\t}\n\tpod.UID = \"\"\n\tif pod.ObjectMeta.Labels == nil {\n\t\tpod.ObjectMeta.Labels = make(map[string]string, 2)\n\t}\n\tif pod.ObjectMeta.Annotations == nil {\n\t\tpod.ObjectMeta.Annotations = make(map[string]string, 1)\n\t}\n\tpod.ObjectMeta.Labels[RoleLabel] = GameServerLabelRole\n\t\/\/ store the GameServer name as a label, for easy lookup later on\n\tpod.ObjectMeta.Labels[GameServerPodLabel] = gs.ObjectMeta.Name\n\t\/\/ store the GameServer container as an annotation, to make lookup at a Pod level easier\n\tpod.ObjectMeta.Annotations[GameServerContainerAnnotation] = gs.Spec.Container\n\n\tref := metav1.NewControllerRef(gs, SchemeGroupVersion.WithKind(\"GameServer\"))\n\tpod.ObjectMeta.OwnerReferences = append(pod.ObjectMeta.OwnerReferences, *ref)\n\n\ti, gsContainer, err := gs.FindGameServerContainer()\n\t\/\/ this shouldn't happen, but if it does.\n\tif err != nil {\n\t\treturn pod, err\n\t}\n\n\tcp := corev1.ContainerPort{\n\t\tContainerPort: gs.Spec.ContainerPort,\n\t\tHostPort: gs.Spec.HostPort,\n\t\tProtocol: gs.Spec.Protocol,\n\t}\n\tgsContainer.Ports = append(gsContainer.Ports, cp)\n\tpod.Spec.Containers[i] = gsContainer\n\n\tpod.Spec.Containers = append(pod.Spec.Containers, sidecars...)\n\treturn pod, nil\n}\n<commit_msg>Fixed some typos.<commit_after>\/\/ Copyright 2017 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage v1alpha1\n\nimport (\n\t\"agones.dev\/agones\/pkg\/apis\/stable\"\n\t\"github.com\/pkg\/errors\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n)\n\nconst (\n\t\/\/ PortAllocation is for when a dynamically allocating GameServer\n\t\/\/ is being created, an open port needs to be allocated\n\tPortAllocation State = \"PortAllocation\"\n\t\/\/ Creating is before the Pod for the GameServer is being created\n\tCreating State = \"Creating\"\n\t\/\/ Starting is for when the Pods for the GameServer are being\n\t\/\/ created but have yet to register themselves as Ready\n\tStarting State = \"Starting\"\n\t\/\/ RequestReady is when the GameServer has declared that it is ready\n\tRequestReady State = \"RequestReady\"\n\t\/\/ Ready is when a GameServer is ready to take connections\n\t\/\/ from Game clients\n\tReady State = \"Ready\"\n\t\/\/ Shutdown is when the GameServer has shutdown and everything needs to be\n\t\/\/ deleted from the cluster\n\tShutdown State = \"Shutdown\"\n\t\/\/ Error is when something has gone with the Gameserver and\n\t\/\/ it cannot be resolved\n\tError State = \"Error\"\n\t\/\/ Unhealthy is when the GameServer has failed its health checks\n\tUnhealthy State = \"Unhealthy\"\n\n\t\/\/ Static PortPolicy means that the user defines the hostPort to be used\n\t\/\/ in the configuration.\n\tStatic PortPolicy = \"static\"\n\t\/\/ Dynamic PortPolicy means that the system will choose an open\n\t\/\/ port for the GameServer in question\n\tDynamic PortPolicy = \"dynamic\"\n\n\t\/\/ RoleLabel is the label in which the Agones role is specified.\n\t\/\/ Pods from a GameServer will have the value \"gameserver\"\n\tRoleLabel = stable.GroupName + \"\/role\"\n\t\/\/ GameServerLabelRole is the GameServer label value for RoleLabel\n\tGameServerLabelRole = \"gameserver\"\n\t\/\/ GameServerPodLabel is the label that the name of the GameServer\n\t\/\/ is set on the Pod the GameServer controls\n\tGameServerPodLabel = stable.GroupName + \"\/gameserver\"\n\t\/\/ GameServerContainerAnnotation is the annotation that stores\n\t\/\/ which container is the container that runs the dedicated game server\n\tGameServerContainerAnnotation = stable.GroupName + \"\/container\"\n\t\/\/ SidecarServiceAccountName is the default service account for managing access to get\/update GameServers\n\tSidecarServiceAccountName = \"agones-sdk\"\n)\n\nvar (\n\t\/\/ GameServerRolePodSelector is the selector to get all GameServer Pods\n\tGameServerRolePodSelector = labels.SelectorFromSet(labels.Set{RoleLabel: GameServerLabelRole})\n)\n\n\/\/ +genclient\n\/\/ +genclient:noStatus\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\n\/\/ GameServer is the data structure for a gameserver resource\ntype GameServer struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata,omitempty\"`\n\n\tSpec GameServerSpec `json:\"spec\"`\n\tStatus GameServerStatus `json:\"status\"`\n}\n\n\/\/ GameServerSpec is the spec for a GameServer resource\ntype GameServerSpec struct {\n\t\/\/ Container specifies which Pod container is the game server. Only required if there is more than one\n\t\/\/ container defined\n\tContainer string `json:\"container,omitempty\"`\n\t\/\/ PortPolicy defines the policy for how the HostPort is populated.\n\t\/\/ Dynamic port will allocate a HostPort within the selected MIN_PORT and MAX_PORT range passed to the controller\n\t\/\/ at installation time.\n\t\/\/ When `static` is the policy specified, `HostPort` is required, to specify the port that game clients will\n\t\/\/ connect to\n\tPortPolicy PortPolicy `json:\"PortPolicy,omitempty\"`\n\t\/\/ ContainerPort is the port that is being opened on the game server process\n\tContainerPort int32 `json:\"containerPort\"`\n\t\/\/ HostPort the port exposed on the host for clients to connect to\n\tHostPort int32 `json:\"hostPort,omitempty\"`\n\t\/\/ Protocol is the network protocol being used. Defaults to UDP. TCP is the only other option\n\tProtocol corev1.Protocol `json:\"protocol,omitempty\"`\n\t\/\/ Health configures health checking\n\tHealth Health `json:\"health,omitempty\"`\n\t\/\/ Template describes the Pod that will be created for the GameServer\n\tTemplate corev1.PodTemplateSpec `json:\"template\"`\n}\n\n\/\/ State is the state for the GameServer\ntype State string\n\n\/\/ PortPolicy is the port policy for the GameServer\ntype PortPolicy string\n\n\/\/ Health configures health checking on the GameServer\ntype Health struct {\n\t\/\/ Disabled is whether health checking is disabled or not\n\tDisabled bool `json:\"disabled,omitempty\"`\n\t\/\/ PeriodSeconds is the number of seconds each health ping has to occur in\n\tPeriodSeconds int32 `json:\"periodSeconds,omitempty\"`\n\t\/\/ FailureThreshold how many failures in a row constitutes unhealthy\n\tFailureThreshold int32 `json:\"failureThreshold,omitempty\"`\n\t\/\/ InitialDelaySeconds initial delay before checking health\n\tInitialDelaySeconds int32 `json:\"initialDelaySeconds,omitempty\"`\n}\n\n\/\/ GameServerStatus is the status for a GameServer resource\ntype GameServerStatus struct {\n\t\/\/ State is the current state of a GameServer, e.g. Creating, Starting, Ready, etc\n\tState State `json:\"state\"`\n\tPort int32 `json:\"port\"`\n\tAddress string `json:\"address\"`\n\tNodeName string `json:\"nodeName\"`\n}\n\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\n\/\/ GameServerList is a list of GameServer resources\ntype GameServerList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata,omitempty\"`\n\n\tItems []GameServer `json:\"items\"`\n}\n\n\/\/ ApplyDefaults applies default values to the GameServer if they are not already populated\nfunc (gs *GameServer) ApplyDefaults() {\n\tgs.ObjectMeta.Finalizers = append(gs.ObjectMeta.Finalizers, stable.GroupName)\n\n\tif len(gs.Spec.Template.Spec.Containers) == 1 {\n\t\tgs.Spec.Container = gs.Spec.Template.Spec.Containers[0].Name\n\t}\n\n\t\/\/ basic spec\n\tif gs.Spec.PortPolicy == \"\" {\n\t\tgs.Spec.PortPolicy = Dynamic\n\t}\n\n\tif gs.Spec.Protocol == \"\" {\n\t\tgs.Spec.Protocol = \"UDP\"\n\t}\n\n\tif gs.Status.State == \"\" {\n\t\tif gs.Spec.PortPolicy == Dynamic {\n\t\t\tgs.Status.State = PortAllocation\n\t\t} else {\n\t\t\tgs.Status.State = Creating\n\t\t}\n\t}\n\n\t\/\/ health\n\tif !gs.Spec.Health.Disabled {\n\t\tif gs.Spec.Health.PeriodSeconds <= 0 {\n\t\t\tgs.Spec.Health.PeriodSeconds = 5\n\t\t}\n\t\tif gs.Spec.Health.FailureThreshold <= 0 {\n\t\t\tgs.Spec.Health.FailureThreshold = 3\n\t\t}\n\t\tif gs.Spec.Health.InitialDelaySeconds <= 0 {\n\t\t\tgs.Spec.Health.InitialDelaySeconds = 5\n\t\t}\n\t}\n}\n\n\/\/ Validate validates the GameServer configuration.\n\/\/ If a GameServer is invalid there will be > 0 values in\n\/\/ the returned array\nfunc (gs *GameServer) Validate() (bool, []metav1.StatusCause) {\n\tvar causes []metav1.StatusCause\n\n\t\/\/ make sure a name is specified when there is multiple containers in the pod.\n\tif len(gs.Spec.Container) == 0 && len(gs.Spec.Template.Spec.Containers) > 1 {\n\t\tcauses = append(causes, metav1.StatusCause{\n\t\t\tType: metav1.CauseTypeFieldValueInvalid,\n\t\t\tField: \"container\",\n\t\t\tMessage: \"Container is required when using multiple containers in the pod template\",\n\t\t})\n\t}\n\n\t\/\/ no host port when using dynamic PortPolicy\n\tif gs.Spec.HostPort > 0 && gs.Spec.PortPolicy == Dynamic {\n\t\tcauses = append(causes, metav1.StatusCause{\n\t\t\tType: metav1.CauseTypeFieldValueInvalid,\n\t\t\tField: \"hostPort\",\n\t\t\tMessage: \"HostPort cannot be specified with a Dynamic PortPolicy\",\n\t\t})\n\t}\n\n\t\/\/ make sure the container value points to a valid container\n\t_, _, err := gs.FindGameServerContainer()\n\tif err != nil {\n\t\tcauses = append(causes, metav1.StatusCause{\n\t\t\tType: metav1.CauseTypeFieldValueInvalid,\n\t\t\tField: \"container\",\n\t\t\tMessage: err.Error(),\n\t\t})\n\t}\n\n\treturn len(causes) == 0, causes\n}\n\n\/\/ FindGameServerContainer returns the container that is specified in\n\/\/ spec.gameServer.container. Returns the index and the value.\n\/\/ Returns an error if not found\nfunc (gs *GameServer) FindGameServerContainer() (int, corev1.Container, error) {\n\tfor i, c := range gs.Spec.Template.Spec.Containers {\n\t\tif c.Name == gs.Spec.Container {\n\t\t\treturn i, c, nil\n\t\t}\n\t}\n\n\treturn -1, corev1.Container{}, errors.Errorf(\"Could not find a container named %s\", gs.Spec.Container)\n}\n\n\/\/ Pod creates a new Pod from the PodTemplateSpec\n\/\/ attached to the GameServer resource\nfunc (gs *GameServer) Pod(sidecars ...corev1.Container) (*corev1.Pod, error) {\n\tpod := &corev1.Pod{\n\t\tObjectMeta: *gs.Spec.Template.ObjectMeta.DeepCopy(),\n\t\tSpec: *gs.Spec.Template.Spec.DeepCopy(),\n\t}\n\t\/\/ Switch to GenerateName, so that we always get a Unique name for the Pod, and there\n\t\/\/ can be no collisions\n\tpod.ObjectMeta.GenerateName = gs.ObjectMeta.Name + \"-\"\n\tpod.ObjectMeta.Name = \"\"\n\t\/\/ Pods for GameServers need to stay in the same namespace\n\tpod.ObjectMeta.Namespace = gs.ObjectMeta.Namespace\n\t\/\/ Make sure these are blank, just in case\n\tpod.ResourceVersion = \"\"\n\tif pod.Spec.ServiceAccountName == \"\" {\n\t\tpod.Spec.ServiceAccountName = SidecarServiceAccountName\n\t}\n\tpod.UID = \"\"\n\tif pod.ObjectMeta.Labels == nil {\n\t\tpod.ObjectMeta.Labels = make(map[string]string, 2)\n\t}\n\tif pod.ObjectMeta.Annotations == nil {\n\t\tpod.ObjectMeta.Annotations = make(map[string]string, 1)\n\t}\n\tpod.ObjectMeta.Labels[RoleLabel] = GameServerLabelRole\n\t\/\/ store the GameServer name as a label, for easy lookup later on\n\tpod.ObjectMeta.Labels[GameServerPodLabel] = gs.ObjectMeta.Name\n\t\/\/ store the GameServer container as an annotation, to make lookup at a Pod level easier\n\tpod.ObjectMeta.Annotations[GameServerContainerAnnotation] = gs.Spec.Container\n\n\tref := metav1.NewControllerRef(gs, SchemeGroupVersion.WithKind(\"GameServer\"))\n\tpod.ObjectMeta.OwnerReferences = append(pod.ObjectMeta.OwnerReferences, *ref)\n\n\ti, gsContainer, err := gs.FindGameServerContainer()\n\t\/\/ this shouldn't happen, but if it does.\n\tif err != nil {\n\t\treturn pod, err\n\t}\n\n\tcp := corev1.ContainerPort{\n\t\tContainerPort: gs.Spec.ContainerPort,\n\t\tHostPort: gs.Spec.HostPort,\n\t\tProtocol: gs.Spec.Protocol,\n\t}\n\tgsContainer.Ports = append(gsContainer.Ports, cp)\n\tpod.Spec.Containers[i] = gsContainer\n\n\tpod.Spec.Containers = append(pod.Spec.Containers, sidecars...)\n\treturn pod, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package authldap\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"text\/template\"\n\t\"time\"\n\n\tldap \"gopkg.in\/ldap.v2\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/aakso\/ssh-inscribe\/pkg\/auth\"\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\tConnectModePlain int = iota\n\tConnectModeLDAPS\n\tConnectModeStartTLS\n\n\tUserBindDN = \"UserBindDN\"\n\tUserSearchFilter = \"UserSearchFilter\"\n\tGroupSearchFilter = \"GroupSearchFilter\"\n\tSubjectName = \"SubjectName\"\n\tPrincipal = \"Principal\"\n\n\tAuthLDAPUsertEntry = \"authLDAPUserEntry\"\n)\n\ntype AuthLDAP struct {\n\tlog *logrus.Entry\n\tconfig *Config\n\turl *url.URL\n\tconnectMode int\n\n\ttpls *template.Template\n}\n\nfunc (al *AuthLDAP) Authenticate(pctx *auth.AuthContext, creds *auth.Credentials) (*auth.AuthContext, bool) {\n\tvar err error\n\tlog := al.log.WithField(\"action\", \"authenticate\")\n\tif v, ok := creds.Meta[auth.MetaAuditID]; ok {\n\t\tlog = log.WithField(auth.MetaAuditID, v)\n\t}\n\n\tif creds == nil {\n\t\treturn nil, false\n\t}\n\n\tnewctx := &auth.AuthContext{\n\t\tParent: pctx,\n\t\tSubjectName: creds.UserIdentifier,\n\t\tAuthMeta: creds.Meta,\n\t\tPrincipals: al.config.Principals,\n\t\tCriticalOptions: al.config.CriticalOptions,\n\t\tExtensions: al.config.Extensions,\n\t\tAuthenticator: al.Name(),\n\t}\n\tif newctx.AuthMeta == nil {\n\t\tnewctx.AuthMeta = make(map[string]interface{})\n\t}\n\ttplCtx := map[string]interface{}{\n\t\t\"UserName\": creds.UserIdentifier,\n\t}\n\n\tvar conn *ldap.Conn\n\thost := fmt.Sprintf(\"%s:%s\", al.url.Hostname(), al.url.Port())\n\tswitch al.connectMode {\n\tcase ConnectModeLDAPS:\n\t\tconn, err = ldap.DialTLS(\"tcp\", host, &tls.Config{\n\t\t\tInsecureSkipVerify: al.config.Insecure,\n\t\t\tServerName: al.url.Hostname(),\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"cannot connect to directory server\")\n\t\t\treturn nil, false\n\t\t}\n\tcase ConnectModePlain, ConnectModeStartTLS:\n\t\tconn, err = ldap.Dial(\"tcp\", host)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"cannot connect to directory server\")\n\t\t\treturn nil, false\n\t\t}\n\t}\n\tdefer conn.Close()\n\tconn.SetTimeout(time.Second * time.Duration(al.config.Timeout))\n\tif al.connectMode == ConnectModeStartTLS {\n\t\tif err := conn.StartTLS(&tls.Config{\n\t\t\tInsecureSkipVerify: al.config.Insecure,\n\t\t\tServerName: al.url.Hostname(),\n\t\t}); err != nil {\n\t\t\tlog.WithError(err).Error(\"cannot connect to directory server\")\n\t\t\treturn nil, false\n\t\t}\n\t}\n\n\tbinddn := al.RenderTpl(UserBindDN, tplCtx)\n\tif err := conn.Bind(binddn, string(creds.Secret)); err != nil {\n\t\tlog.WithError(err).Error(\"cannot bind\")\n\t\treturn nil, false\n\t}\n\n\t\/\/ Find user entry, require a single match\n\tfilter := al.RenderTpl(UserSearchFilter, tplCtx)\n\tres, err := al.search(conn, al.config.UserSearchBase, filter, al.config.UserSearchGetAttributes)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"search failure\")\n\t\treturn nil, false\n\t}\n\tif len(res.Entries) != 1 {\n\t\tlog.WithField(\"matches\", len(res.Entries)).Error(\"not a single match\")\n\t\treturn nil, false\n\t}\n\tuser := entryToMap(res.Entries[0])\n\ttplCtx[\"User\"] = user\n\tnewctx.SubjectName = al.RenderTpl(SubjectName, tplCtx)\n\tnewctx.AuthMeta[AuthLDAPUsertEntry] = user\n\tlog.WithField(\"user\", user[\"cn\"]).Debug(\"user search ok\")\n\n\t\/\/ Find groups\n\tif al.config.AddPrincipalsFromGroups {\n\t\tfilter = al.RenderTpl(GroupSearchFilter, tplCtx)\n\t\tres, err = al.search(conn, al.config.GroupSearchBase, filter, al.config.GroupSearchGetAttributes)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"search failure\")\n\t\t\treturn nil, false\n\t\t}\n\t\tfor _, entry := range res.Entries {\n\t\t\tgroup := entryToMap(entry)\n\t\t\tlog.WithField(\"group\", group[\"cn\"]).Debug(\"searched group\")\n\t\t\ttplCtx[\"Group\"] = group\n\t\t\tif principal := al.RenderTpl(Principal, tplCtx); principal != \"\" {\n\t\t\t\tnewctx.Principals = append(newctx.Principals, principal)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn newctx, true\n}\n\nfunc (al *AuthLDAP) search(conn *ldap.Conn, base, filter string, attrs []string) (*ldap.SearchResult, error) {\n\tal.log.WithFields(logrus.Fields{\n\t\t\"base\": base,\n\t\t\"filter\": filter,\n\t\t\"attrs\": attrs,\n\t}).Debug(\"search\")\n\tsr := ldap.NewSearchRequest(\n\t\tbase,\n\t\tldap.ScopeWholeSubtree,\n\t\tldap.DerefInSearching,\n\t\t0, 0, false,\n\t\tfilter,\n\t\tattrs,\n\t\tnil,\n\t)\n\treturn conn.Search(sr)\n}\n\nfunc (al *AuthLDAP) RenderTpl(name string, data interface{}) string {\n\tbuf := bytes.NewBuffer([]byte{})\n\terr := al.tpls.ExecuteTemplate(buf, name, data)\n\tif err != nil {\n\t\tal.log.WithError(err).Errorf(\"template render error: %s\", name)\n\t}\n\treturn buf.String()\n}\n\nfunc (al *AuthLDAP) Type() string {\n\treturn Type\n}\n\nfunc (al *AuthLDAP) Name() string {\n\treturn al.config.Name\n}\n\nfunc (al *AuthLDAP) Realm() string {\n\treturn al.config.Realm\n}\n\nfunc (al *AuthLDAP) CredentialType() string {\n\treturn auth.CredentialUserPassword\n}\n\nfunc New(conf *Config) (*AuthLDAP, error) {\n\tif conf == nil {\n\t\tconf = Defaults\n\t}\n\n\tvar tplError error\n\trootTpl := template.New(\"root\")\n\tparseTpl := func(name, tpl string) {\n\t\t_, err := rootTpl.New(name).Parse(tpl)\n\t\tif err != nil {\n\t\t\ttplError = errors.Wrapf(err, \"cannot parse %s\", name)\n\t\t}\n\t}\n\tparseTpl(UserBindDN, conf.UserBindDN)\n\tparseTpl(UserSearchFilter, conf.UserSearchFilter)\n\tparseTpl(GroupSearchFilter, conf.GroupSearchFilter)\n\tparseTpl(SubjectName, conf.SubjectNameTemplate)\n\tparseTpl(Principal, conf.PrincipalTemplate)\n\tif tplError != nil {\n\t\treturn nil, tplError\n\t}\n\n\turl, err := url.Parse(conf.ServerURL)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"cannot parse ServerURL\")\n\t}\n\tconnectMode := ConnectModePlain\n\tswitch url.Scheme {\n\tcase \"ldap\":\n\t\tif url.Query().Get(\"startTLS\") != \"\" {\n\t\t\tconnectMode = ConnectModeStartTLS\n\t\t}\n\tcase \"ldaps\":\n\t\tconnectMode = ConnectModeLDAPS\n\tdefault:\n\t\treturn nil, errors.Errorf(\"unsupported scheme: %s\", url.Scheme)\n\t}\n\tif url.Port() == \"\" {\n\t\treturn nil, errors.New(\"missing port for the ServerURL\")\n\t}\n\n\treturn &AuthLDAP{\n\t\tlog: Log.WithField(\"realm\", conf.Realm),\n\t\tconfig: conf,\n\t\turl: url,\n\t\tconnectMode: connectMode,\n\t\ttpls: rootTpl,\n\t}, nil\n}\n\ntype EntryMap map[string]interface{}\n\nfunc (em EntryMap) DN() string {\n\treturn em.Get(\"dn\")\n}\n\nfunc (em EntryMap) Get(k string) string {\n\tswitch v := em[k].(type) {\n\tcase []string:\n\t\tif len(v) >= 1 {\n\t\t\treturn v[0]\n\t\t}\n\tcase string:\n\t\treturn v\n\t}\n\treturn \"\"\n}\n\nfunc entryToMap(entry *ldap.Entry) EntryMap {\n\tret := make(map[string]interface{})\n\tret[\"dn\"] = []string{entry.DN}\n\tfor _, v := range entry.Attributes {\n\t\tif len(v.Values) == 1 {\n\t\t\tret[v.Name] = v.Values[0]\n\t\t} else {\n\t\t\tret[v.Name] = v.Values\n\t\t}\n\t}\n\treturn EntryMap(ret)\n}\n<commit_msg>set dial timeout for authldap<commit_after>package authldap\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"text\/template\"\n\t\"time\"\n\n\tldap \"gopkg.in\/ldap.v2\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/aakso\/ssh-inscribe\/pkg\/auth\"\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\tConnectModePlain int = iota\n\tConnectModeLDAPS\n\tConnectModeStartTLS\n\n\tUserBindDN = \"UserBindDN\"\n\tUserSearchFilter = \"UserSearchFilter\"\n\tGroupSearchFilter = \"GroupSearchFilter\"\n\tSubjectName = \"SubjectName\"\n\tPrincipal = \"Principal\"\n\n\tAuthLDAPUsertEntry = \"authLDAPUserEntry\"\n)\n\ntype AuthLDAP struct {\n\tlog *logrus.Entry\n\tconfig *Config\n\turl *url.URL\n\tconnectMode int\n\n\ttpls *template.Template\n}\n\nfunc (al *AuthLDAP) Authenticate(pctx *auth.AuthContext, creds *auth.Credentials) (*auth.AuthContext, bool) {\n\tvar err error\n\tlog := al.log.WithField(\"action\", \"authenticate\")\n\tif v, ok := creds.Meta[auth.MetaAuditID]; ok {\n\t\tlog = log.WithField(auth.MetaAuditID, v)\n\t}\n\n\tif creds == nil {\n\t\treturn nil, false\n\t}\n\n\tnewctx := &auth.AuthContext{\n\t\tParent: pctx,\n\t\tSubjectName: creds.UserIdentifier,\n\t\tAuthMeta: creds.Meta,\n\t\tPrincipals: al.config.Principals,\n\t\tCriticalOptions: al.config.CriticalOptions,\n\t\tExtensions: al.config.Extensions,\n\t\tAuthenticator: al.Name(),\n\t}\n\tif newctx.AuthMeta == nil {\n\t\tnewctx.AuthMeta = make(map[string]interface{})\n\t}\n\ttplCtx := map[string]interface{}{\n\t\t\"UserName\": creds.UserIdentifier,\n\t}\n\n\t\/\/ Set ldap package level dial timeout as it doesn't offer any other way\n\tldap.DefaultTimeout = time.Second * time.Duration(al.config.Timeout)\n\tvar conn *ldap.Conn\n\thost := fmt.Sprintf(\"%s:%s\", al.url.Hostname(), al.url.Port())\n\tswitch al.connectMode {\n\tcase ConnectModeLDAPS:\n\t\tconn, err = ldap.DialTLS(\"tcp\", host, &tls.Config{\n\t\t\tInsecureSkipVerify: al.config.Insecure,\n\t\t\tServerName: al.url.Hostname(),\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"cannot connect to directory server\")\n\t\t\treturn nil, false\n\t\t}\n\tcase ConnectModePlain, ConnectModeStartTLS:\n\t\tconn, err = ldap.Dial(\"tcp\", host)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"cannot connect to directory server\")\n\t\t\treturn nil, false\n\t\t}\n\t}\n\tdefer conn.Close()\n\tconn.SetTimeout(time.Second * time.Duration(al.config.Timeout))\n\tif al.connectMode == ConnectModeStartTLS {\n\t\tif err := conn.StartTLS(&tls.Config{\n\t\t\tInsecureSkipVerify: al.config.Insecure,\n\t\t\tServerName: al.url.Hostname(),\n\t\t}); err != nil {\n\t\t\tlog.WithError(err).Error(\"cannot connect to directory server\")\n\t\t\treturn nil, false\n\t\t}\n\t}\n\n\tbinddn := al.RenderTpl(UserBindDN, tplCtx)\n\tif err := conn.Bind(binddn, string(creds.Secret)); err != nil {\n\t\tlog.WithError(err).Error(\"cannot bind\")\n\t\treturn nil, false\n\t}\n\n\t\/\/ Find user entry, require a single match\n\tfilter := al.RenderTpl(UserSearchFilter, tplCtx)\n\tres, err := al.search(conn, al.config.UserSearchBase, filter, al.config.UserSearchGetAttributes)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"search failure\")\n\t\treturn nil, false\n\t}\n\tif len(res.Entries) != 1 {\n\t\tlog.WithField(\"matches\", len(res.Entries)).Error(\"not a single match\")\n\t\treturn nil, false\n\t}\n\tuser := entryToMap(res.Entries[0])\n\ttplCtx[\"User\"] = user\n\tnewctx.SubjectName = al.RenderTpl(SubjectName, tplCtx)\n\tnewctx.AuthMeta[AuthLDAPUsertEntry] = user\n\tlog.WithField(\"user\", user[\"cn\"]).Debug(\"user search ok\")\n\n\t\/\/ Find groups\n\tif al.config.AddPrincipalsFromGroups {\n\t\tfilter = al.RenderTpl(GroupSearchFilter, tplCtx)\n\t\tres, err = al.search(conn, al.config.GroupSearchBase, filter, al.config.GroupSearchGetAttributes)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"search failure\")\n\t\t\treturn nil, false\n\t\t}\n\t\tfor _, entry := range res.Entries {\n\t\t\tgroup := entryToMap(entry)\n\t\t\tlog.WithField(\"group\", group[\"cn\"]).Debug(\"searched group\")\n\t\t\ttplCtx[\"Group\"] = group\n\t\t\tif principal := al.RenderTpl(Principal, tplCtx); principal != \"\" {\n\t\t\t\tnewctx.Principals = append(newctx.Principals, principal)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn newctx, true\n}\n\nfunc (al *AuthLDAP) search(conn *ldap.Conn, base, filter string, attrs []string) (*ldap.SearchResult, error) {\n\tal.log.WithFields(logrus.Fields{\n\t\t\"base\": base,\n\t\t\"filter\": filter,\n\t\t\"attrs\": attrs,\n\t}).Debug(\"search\")\n\tsr := ldap.NewSearchRequest(\n\t\tbase,\n\t\tldap.ScopeWholeSubtree,\n\t\tldap.DerefInSearching,\n\t\t0, 0, false,\n\t\tfilter,\n\t\tattrs,\n\t\tnil,\n\t)\n\treturn conn.Search(sr)\n}\n\nfunc (al *AuthLDAP) RenderTpl(name string, data interface{}) string {\n\tbuf := bytes.NewBuffer([]byte{})\n\terr := al.tpls.ExecuteTemplate(buf, name, data)\n\tif err != nil {\n\t\tal.log.WithError(err).Errorf(\"template render error: %s\", name)\n\t}\n\treturn buf.String()\n}\n\nfunc (al *AuthLDAP) Type() string {\n\treturn Type\n}\n\nfunc (al *AuthLDAP) Name() string {\n\treturn al.config.Name\n}\n\nfunc (al *AuthLDAP) Realm() string {\n\treturn al.config.Realm\n}\n\nfunc (al *AuthLDAP) CredentialType() string {\n\treturn auth.CredentialUserPassword\n}\n\nfunc New(conf *Config) (*AuthLDAP, error) {\n\tif conf == nil {\n\t\tconf = Defaults\n\t}\n\n\tvar tplError error\n\trootTpl := template.New(\"root\")\n\tparseTpl := func(name, tpl string) {\n\t\t_, err := rootTpl.New(name).Parse(tpl)\n\t\tif err != nil {\n\t\t\ttplError = errors.Wrapf(err, \"cannot parse %s\", name)\n\t\t}\n\t}\n\tparseTpl(UserBindDN, conf.UserBindDN)\n\tparseTpl(UserSearchFilter, conf.UserSearchFilter)\n\tparseTpl(GroupSearchFilter, conf.GroupSearchFilter)\n\tparseTpl(SubjectName, conf.SubjectNameTemplate)\n\tparseTpl(Principal, conf.PrincipalTemplate)\n\tif tplError != nil {\n\t\treturn nil, tplError\n\t}\n\n\turl, err := url.Parse(conf.ServerURL)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"cannot parse ServerURL\")\n\t}\n\tconnectMode := ConnectModePlain\n\tswitch url.Scheme {\n\tcase \"ldap\":\n\t\tif url.Query().Get(\"startTLS\") != \"\" {\n\t\t\tconnectMode = ConnectModeStartTLS\n\t\t}\n\tcase \"ldaps\":\n\t\tconnectMode = ConnectModeLDAPS\n\tdefault:\n\t\treturn nil, errors.Errorf(\"unsupported scheme: %s\", url.Scheme)\n\t}\n\tif url.Port() == \"\" {\n\t\treturn nil, errors.New(\"missing port for the ServerURL\")\n\t}\n\n\treturn &AuthLDAP{\n\t\tlog: Log.WithField(\"realm\", conf.Realm),\n\t\tconfig: conf,\n\t\turl: url,\n\t\tconnectMode: connectMode,\n\t\ttpls: rootTpl,\n\t}, nil\n}\n\ntype EntryMap map[string]interface{}\n\nfunc (em EntryMap) DN() string {\n\treturn em.Get(\"dn\")\n}\n\nfunc (em EntryMap) Get(k string) string {\n\tswitch v := em[k].(type) {\n\tcase []string:\n\t\tif len(v) >= 1 {\n\t\t\treturn v[0]\n\t\t}\n\tcase string:\n\t\treturn v\n\t}\n\treturn \"\"\n}\n\nfunc entryToMap(entry *ldap.Entry) EntryMap {\n\tret := make(map[string]interface{})\n\tret[\"dn\"] = []string{entry.DN}\n\tfor _, v := range entry.Attributes {\n\t\tif len(v.Values) == 1 {\n\t\t\tret[v.Name] = v.Values[0]\n\t\t} else {\n\t\t\tret[v.Name] = v.Values\n\t\t}\n\t}\n\treturn EntryMap(ret)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage loadbalancers\n\nimport (\n\t\"crypto\/sha256\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/golang\/glog\"\n\tcompute \"google.golang.org\/api\/compute\/v1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/selection\"\n\t\"k8s.io\/ingress-gce\/pkg\/flags\"\n\t\"k8s.io\/ingress-gce\/pkg\/utils\"\n)\n\nconst SslCertificateMissing = \"SslCertificateMissing\"\n\nfunc (l *L7) checkSSLCert() error {\n\tif flags.F.Features.ManagedCertificates {\n\t\t\/\/ Handle annotation managed-certificates\n\t\tmanagedSslCerts, used, err := l.getManagedCertificates()\n\t\tif used {\n\t\t\tl.sslCerts = managedSslCerts\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Handle annotation pre-shared-cert\n\tused, preSharedSslCerts, err := l.getPreSharedCertificates()\n\tif used {\n\t\tl.sslCerts = preSharedSslCerts\n\t\treturn err\n\t}\n\n\t\/\/ Get updated value of certificate for comparison\n\texistingSecretsSslCerts, err := l.getIngressManagedSslCerts()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tl.oldSSLCerts = existingSecretsSslCerts\n\tsecretsSslCerts, err := l.createSslCertificates(existingSecretsSslCerts)\n\tl.sslCerts = secretsSslCerts\n\treturn err\n}\n\n\/\/ createSslCertificates creates SslCertificates based on kubernetes secrets in Ingress configuration.\nfunc (l *L7) createSslCertificates(existingCerts []*compute.SslCertificate) ([]*compute.SslCertificate, error) {\n\tvar result []*compute.SslCertificate\n\n\texistingCertsMap := getMapfromCertList(existingCerts)\n\n\t\/\/ mapping of currently configured certs\n\tvisitedCertMap := make(map[string]string)\n\tvar failedCerts []string\n\n\tfor _, tlsCert := range l.runtimeInfo.TLS {\n\t\tingCert := tlsCert.Cert\n\t\tingKey := tlsCert.Key\n\t\tgcpCertName := l.namer.SSLCertName(l.Name, tlsCert.CertHash)\n\n\t\tif addedBy, exists := visitedCertMap[gcpCertName]; exists {\n\t\t\tglog.V(3).Infof(\"Secret %q has a certificate already used by %v\", tlsCert.Name, addedBy)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ PrivateKey is write only, so compare certs alone. We're assuming that\n\t\t\/\/ no one will change just the key. We can remember the key and compare,\n\t\t\/\/ but a bug could end up leaking it, which feels worse.\n\t\t\/\/ If the cert contents have changed, its hash would be different, so would be the cert name. So it is enough\n\t\t\/\/ to check if this cert name exists in the map.\n\t\tif existingCertsMap != nil {\n\t\t\tif cert, ok := existingCertsMap[gcpCertName]; ok {\n\t\t\t\tglog.V(3).Infof(\"Secret %q already exists as certificate %q\", tlsCert.Name, gcpCertName)\n\t\t\t\tvisitedCertMap[gcpCertName] = fmt.Sprintf(\"certificate:%q\", gcpCertName)\n\t\t\t\tresult = append(result, cert)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\t\/\/ Controller needs to create the certificate, no need to check if it exists and delete. If it did exist, it\n\t\t\/\/ would have been listed in the populateSSLCert function and matched in the check above.\n\t\tglog.V(2).Infof(\"Creating new sslCertificate %q for LB %q\", gcpCertName, l.Name)\n\t\tcert, err := l.cloud.CreateSslCertificate(&compute.SslCertificate{\n\t\t\tName: gcpCertName,\n\t\t\tCertificate: ingCert,\n\t\t\tPrivateKey: ingKey,\n\t\t})\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Failed to create new sslCertificate %q for %q - %v\", gcpCertName, l.Name, err)\n\t\t\tfailedCerts = append(failedCerts, gcpCertName+\" Error:\"+err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tvisitedCertMap[gcpCertName] = fmt.Sprintf(\"secret:%q\", tlsCert.Name)\n\t\tresult = append(result, cert)\n\t}\n\n\t\/\/ Save the old certs for cleanup after we update the target proxy.\n\tif len(failedCerts) > 0 {\n\t\treturn result, fmt.Errorf(\"Cert creation failures - %s\", strings.Join(failedCerts, \",\"))\n\t}\n\treturn result, nil\n}\n\n\/\/ getSslCertificates fetches GCE SslCertificate resources by names.\nfunc (l *L7) getSslCertificates(names []string) ([]*compute.SslCertificate, error) {\n\tvar result []*compute.SslCertificate\n\tvar failedCerts []string\n\tfor _, name := range names {\n\t\t\/\/ Ask GCE for the cert, checking for problems and existence.\n\t\tcert, err := l.cloud.GetSslCertificate(name)\n\t\tif err != nil {\n\t\t\tfailedCerts = append(failedCerts, name+\": \"+err.Error())\n\t\t\tl.recorder.Eventf(l.runtimeInfo.Ingress, corev1.EventTypeNormal, SslCertificateMissing, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tif cert == nil {\n\t\t\tfailedCerts = append(failedCerts, name+\": unable to find existing SslCertificate\")\n\t\t\tcontinue\n\t\t}\n\n\t\tglog.V(2).Infof(\"Using existing SslCertificate %v for %v\", name, l.Name)\n\t\tresult = append(result, cert)\n\t}\n\tif len(failedCerts) != 0 {\n\t\treturn result, fmt.Errorf(\"Errors - %s\", strings.Join(failedCerts, \",\"))\n\t}\n\n\treturn result, nil\n}\n\n\/\/ getManagedCertificates fetches SslCertificates specified via managed-certificates annotation.\nfunc (l *L7) getManagedCertificates() ([]*compute.SslCertificate, bool, error) {\n\tif l.runtimeInfo.ManagedCertificates == \"\" {\n\t\treturn nil, false, nil\n\t}\n\n\tmcrtsNames := utils.SplitAnnotation(l.runtimeInfo.ManagedCertificates)\n\treq, err := labels.NewRequirement(\"metadata.name\", selection.In, mcrtsNames)\n\tif err != nil {\n\t\treturn nil, true, err\n\t}\n\n\tsel := labels.NewSelector()\n\tsel.Add(*req)\n\tmcrts, err := l.mcrt.ManagedCertificates(l.runtimeInfo.Ingress.Namespace).List(sel)\n\tif err != nil {\n\t\treturn nil, true, err\n\t}\n\n\tvar names []string\n\tfor _, mcrt := range mcrts {\n\t\tif mcrt.Status.CertificateName != \"\" {\n\t\t\tnames = append(names, mcrt.Status.CertificateName)\n\t\t}\n\t}\n\n\tsslCerts, err := l.getSslCertificates(names)\n\tif err != nil {\n\t\treturn sslCerts, true, fmt.Errorf(\"managed-certificates errors: %s\", err.Error())\n\t}\n\n\treturn sslCerts, true, nil\n}\n\n\/\/ getPreSharedCertificates fetches SslCertificates specified via pre-shared-cert annotation.\nfunc (l *L7) getPreSharedCertificates() (bool, []*compute.SslCertificate, error) {\n\tif l.runtimeInfo.TLSName == \"\" {\n\t\treturn false, nil, nil\n\t}\n\tsslCerts, err := l.getSslCertificates(utils.SplitAnnotation(l.runtimeInfo.TLSName))\n\n\tif err != nil {\n\t\treturn true, sslCerts, fmt.Errorf(\"pre-shared-cert errors: %s\", err.Error())\n\t}\n\n\treturn true, sslCerts, nil\n}\n\nfunc getMapfromCertList(certs []*compute.SslCertificate) map[string]*compute.SslCertificate {\n\tif len(certs) == 0 {\n\t\treturn nil\n\t}\n\tcertMap := make(map[string]*compute.SslCertificate)\n\tfor _, cert := range certs {\n\t\tcertMap[cert.Name] = cert\n\t}\n\treturn certMap\n}\n\n\/\/ getIngressManagedSslCerts fetches SslCertificate resources created and managed by this load balancer\n\/\/ instance. These SslCertificate resources were created based on kubernetes secrets in Ingress\n\/\/ configuration.\nfunc (l *L7) getIngressManagedSslCerts() ([]*compute.SslCertificate, error) {\n\tvar result []*compute.SslCertificate\n\n\t\/\/ Currently we list all certs available in gcloud and filter the ones managed by this loadbalancer instance. This is\n\t\/\/ to make sure we garbage collect any old certs that this instance might have lost track of due to crashes.\n\t\/\/ Can be a performance issue if there are too many global certs, default quota is only 10.\n\tcerts, err := l.cloud.ListSslCertificates()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, c := range certs {\n\t\tif l.namer.IsCertUsedForLB(l.Name, c.Name) {\n\t\t\tglog.V(4).Infof(\"Populating ssl cert %s for l7 %s\", c.Name, l.Name)\n\t\t\tresult = append(result, c)\n\t\t}\n\t}\n\tif len(result) == 0 {\n\t\t\/\/ Check for legacy cert since that follows a different naming convention\n\t\tglog.V(4).Infof(\"Looking for legacy ssl certs\")\n\t\texpectedCertLinks, err := l.getSslCertLinkInUse()\n\t\tif err != nil {\n\t\t\t\/\/ Return nil if target proxy doesn't exist.\n\t\t\treturn nil, utils.IgnoreHTTPNotFound(err)\n\t\t}\n\t\tfor _, link := range expectedCertLinks {\n\t\t\t\/\/ Retrieve the certificate and ignore error if certificate wasn't found\n\t\t\tname, err := utils.KeyName(link)\n\t\t\tif err != nil {\n\t\t\t\tglog.Warningf(\"error parsing cert name: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !l.namer.IsLegacySSLCert(l.Name, name) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcert, _ := l.cloud.GetSslCertificate(name)\n\t\t\tif cert != nil {\n\t\t\t\tglog.V(4).Infof(\"Populating legacy ssl cert %s for l7 %s\", cert.Name, l.Name)\n\t\t\t\tresult = append(result, cert)\n\t\t\t}\n\t\t}\n\t}\n\treturn result, nil\n}\n\nfunc (l *L7) deleteOldSSLCerts() {\n\tif len(l.oldSSLCerts) == 0 {\n\t\treturn\n\t}\n\tcertsMap := getMapfromCertList(l.sslCerts)\n\tfor _, cert := range l.oldSSLCerts {\n\t\tif !l.namer.IsCertUsedForLB(l.Name, cert.Name) && !l.namer.IsLegacySSLCert(l.Name, cert.Name) {\n\t\t\t\/\/ retain cert if it is managed by GCE(non-ingress)\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := certsMap[cert.Name]; ok {\n\t\t\t\/\/ cert found in current map\n\t\t\tcontinue\n\t\t}\n\t\tglog.V(3).Infof(\"Cleaning up old SSL Certificate %s\", cert.Name)\n\t\tif certErr := utils.IgnoreHTTPNotFound(l.cloud.DeleteSslCertificate(cert.Name)); certErr != nil {\n\t\t\tglog.Errorf(\"Old cert delete failed - %v\", certErr)\n\t\t}\n\t}\n}\n\n\/\/ Returns true if the input array of certs is identical to the certs in the L7 config.\n\/\/ Returns false if there is any mismatch\nfunc (l *L7) compareCerts(certLinks []string) bool {\n\tcertsMap := getMapfromCertList(l.sslCerts)\n\tif len(certLinks) != len(certsMap) {\n\t\tglog.V(4).Infof(\"Loadbalancer has %d certs, target proxy has %d certs\", len(certsMap), len(certLinks))\n\t\treturn false\n\t}\n\n\tfor _, link := range certLinks {\n\t\tcertName, err := utils.KeyName(link)\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"Cannot get cert name: %v\", err)\n\t\t\treturn false\n\t\t}\n\n\t\tif cert, ok := certsMap[certName]; !ok {\n\t\t\tglog.V(4).Infof(\"Cannot find cert with name %s in certsMap %+v\", certName, certsMap)\n\t\t\treturn false\n\t\t} else if ok && !utils.EqualResourceIDs(link, cert.SelfLink) {\n\t\t\tglog.V(4).Infof(\"Selflink compare failed for certs - %s in loadbalancer, %s in targetproxy\", cert.SelfLink, link)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc GetCertHash(contents string) string {\n\treturn fmt.Sprintf(\"%x\", sha256.Sum256([]byte(contents)))[:16]\n}\n\nfunc toCertNames(certs []*compute.SslCertificate) (names []string) {\n\tfor _, v := range certs {\n\t\tnames = append(names, v.Name)\n\t}\n\treturn names\n}\n<commit_msg>Fix wrong filtering of ManagedCertificate objects. The objects were in fact not filtered - because of a bug a catch-all selector was applied<commit_after>\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage loadbalancers\n\nimport (\n\t\"crypto\/sha256\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/golang\/glog\"\n\tcompute \"google.golang.org\/api\/compute\/v1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/ingress-gce\/pkg\/flags\"\n\t\"k8s.io\/ingress-gce\/pkg\/utils\"\n)\n\nconst SslCertificateMissing = \"SslCertificateMissing\"\n\nfunc (l *L7) checkSSLCert() error {\n\tif flags.F.Features.ManagedCertificates {\n\t\t\/\/ Handle annotation managed-certificates\n\t\tmanagedSslCerts, used, err := l.getManagedCertificates()\n\t\tif used {\n\t\t\tl.sslCerts = managedSslCerts\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Handle annotation pre-shared-cert\n\tused, preSharedSslCerts, err := l.getPreSharedCertificates()\n\tif used {\n\t\tl.sslCerts = preSharedSslCerts\n\t\treturn err\n\t}\n\n\t\/\/ Get updated value of certificate for comparison\n\texistingSecretsSslCerts, err := l.getIngressManagedSslCerts()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tl.oldSSLCerts = existingSecretsSslCerts\n\tsecretsSslCerts, err := l.createSslCertificates(existingSecretsSslCerts)\n\tl.sslCerts = secretsSslCerts\n\treturn err\n}\n\n\/\/ createSslCertificates creates SslCertificates based on kubernetes secrets in Ingress configuration.\nfunc (l *L7) createSslCertificates(existingCerts []*compute.SslCertificate) ([]*compute.SslCertificate, error) {\n\tvar result []*compute.SslCertificate\n\n\texistingCertsMap := getMapfromCertList(existingCerts)\n\n\t\/\/ mapping of currently configured certs\n\tvisitedCertMap := make(map[string]string)\n\tvar failedCerts []string\n\n\tfor _, tlsCert := range l.runtimeInfo.TLS {\n\t\tingCert := tlsCert.Cert\n\t\tingKey := tlsCert.Key\n\t\tgcpCertName := l.namer.SSLCertName(l.Name, tlsCert.CertHash)\n\n\t\tif addedBy, exists := visitedCertMap[gcpCertName]; exists {\n\t\t\tglog.V(3).Infof(\"Secret %q has a certificate already used by %v\", tlsCert.Name, addedBy)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ PrivateKey is write only, so compare certs alone. We're assuming that\n\t\t\/\/ no one will change just the key. We can remember the key and compare,\n\t\t\/\/ but a bug could end up leaking it, which feels worse.\n\t\t\/\/ If the cert contents have changed, its hash would be different, so would be the cert name. So it is enough\n\t\t\/\/ to check if this cert name exists in the map.\n\t\tif existingCertsMap != nil {\n\t\t\tif cert, ok := existingCertsMap[gcpCertName]; ok {\n\t\t\t\tglog.V(3).Infof(\"Secret %q already exists as certificate %q\", tlsCert.Name, gcpCertName)\n\t\t\t\tvisitedCertMap[gcpCertName] = fmt.Sprintf(\"certificate:%q\", gcpCertName)\n\t\t\t\tresult = append(result, cert)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\t\/\/ Controller needs to create the certificate, no need to check if it exists and delete. If it did exist, it\n\t\t\/\/ would have been listed in the populateSSLCert function and matched in the check above.\n\t\tglog.V(2).Infof(\"Creating new sslCertificate %q for LB %q\", gcpCertName, l.Name)\n\t\tcert, err := l.cloud.CreateSslCertificate(&compute.SslCertificate{\n\t\t\tName: gcpCertName,\n\t\t\tCertificate: ingCert,\n\t\t\tPrivateKey: ingKey,\n\t\t})\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Failed to create new sslCertificate %q for %q - %v\", gcpCertName, l.Name, err)\n\t\t\tfailedCerts = append(failedCerts, gcpCertName+\" Error:\"+err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tvisitedCertMap[gcpCertName] = fmt.Sprintf(\"secret:%q\", tlsCert.Name)\n\t\tresult = append(result, cert)\n\t}\n\n\t\/\/ Save the old certs for cleanup after we update the target proxy.\n\tif len(failedCerts) > 0 {\n\t\treturn result, fmt.Errorf(\"Cert creation failures - %s\", strings.Join(failedCerts, \",\"))\n\t}\n\treturn result, nil\n}\n\n\/\/ getSslCertificates fetches GCE SslCertificate resources by names.\nfunc (l *L7) getSslCertificates(names []string) ([]*compute.SslCertificate, error) {\n\tvar result []*compute.SslCertificate\n\tvar failedCerts []string\n\tfor _, name := range names {\n\t\t\/\/ Ask GCE for the cert, checking for problems and existence.\n\t\tcert, err := l.cloud.GetSslCertificate(name)\n\t\tif err != nil {\n\t\t\tfailedCerts = append(failedCerts, name+\": \"+err.Error())\n\t\t\tl.recorder.Eventf(l.runtimeInfo.Ingress, corev1.EventTypeNormal, SslCertificateMissing, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tif cert == nil {\n\t\t\tfailedCerts = append(failedCerts, name+\": unable to find existing SslCertificate\")\n\t\t\tcontinue\n\t\t}\n\n\t\tglog.V(2).Infof(\"Using existing SslCertificate %v for %v\", name, l.Name)\n\t\tresult = append(result, cert)\n\t}\n\tif len(failedCerts) != 0 {\n\t\treturn result, fmt.Errorf(\"Errors - %s\", strings.Join(failedCerts, \",\"))\n\t}\n\n\treturn result, nil\n}\n\n\/\/ getManagedCertificates fetches SslCertificates specified via managed-certificates annotation.\nfunc (l *L7) getManagedCertificates() ([]*compute.SslCertificate, bool, error) {\n\tif l.runtimeInfo.ManagedCertificates == \"\" {\n\t\treturn nil, false, nil\n\t}\n\n\tmcrtsNames := utils.SplitAnnotation(l.runtimeInfo.ManagedCertificates)\n\tmcrts, err := l.mcrt.ManagedCertificates(l.runtimeInfo.Ingress.Namespace).List(labels.Everything())\n\tif err != nil {\n\t\treturn nil, true, err\n\t}\n\n\tvar sslCertsNames []string\n\tfor _, mcrt := range mcrts {\n\t\tfound := false\n\t\tfor _, mcrtName := range mcrtsNames {\n\t\t\tif mcrtName == mcrt.Name {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\tcontinue\n\t\t}\n\n\t\tif mcrt.Status.CertificateName != \"\" {\n\t\t\tsslCertsNames = append(sslCertsNames, mcrt.Status.CertificateName)\n\t\t}\n\t}\n\n\tsslCerts, err := l.getSslCertificates(sslCertsNames)\n\tif err != nil {\n\t\treturn sslCerts, true, fmt.Errorf(\"managed-certificates errors: %s\", err.Error())\n\t}\n\n\treturn sslCerts, true, nil\n}\n\n\/\/ getPreSharedCertificates fetches SslCertificates specified via pre-shared-cert annotation.\nfunc (l *L7) getPreSharedCertificates() (bool, []*compute.SslCertificate, error) {\n\tif l.runtimeInfo.TLSName == \"\" {\n\t\treturn false, nil, nil\n\t}\n\tsslCerts, err := l.getSslCertificates(utils.SplitAnnotation(l.runtimeInfo.TLSName))\n\n\tif err != nil {\n\t\treturn true, sslCerts, fmt.Errorf(\"pre-shared-cert errors: %s\", err.Error())\n\t}\n\n\treturn true, sslCerts, nil\n}\n\nfunc getMapfromCertList(certs []*compute.SslCertificate) map[string]*compute.SslCertificate {\n\tif len(certs) == 0 {\n\t\treturn nil\n\t}\n\tcertMap := make(map[string]*compute.SslCertificate)\n\tfor _, cert := range certs {\n\t\tcertMap[cert.Name] = cert\n\t}\n\treturn certMap\n}\n\n\/\/ getIngressManagedSslCerts fetches SslCertificate resources created and managed by this load balancer\n\/\/ instance. These SslCertificate resources were created based on kubernetes secrets in Ingress\n\/\/ configuration.\nfunc (l *L7) getIngressManagedSslCerts() ([]*compute.SslCertificate, error) {\n\tvar result []*compute.SslCertificate\n\n\t\/\/ Currently we list all certs available in gcloud and filter the ones managed by this loadbalancer instance. This is\n\t\/\/ to make sure we garbage collect any old certs that this instance might have lost track of due to crashes.\n\t\/\/ Can be a performance issue if there are too many global certs, default quota is only 10.\n\tcerts, err := l.cloud.ListSslCertificates()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, c := range certs {\n\t\tif l.namer.IsCertUsedForLB(l.Name, c.Name) {\n\t\t\tglog.V(4).Infof(\"Populating ssl cert %s for l7 %s\", c.Name, l.Name)\n\t\t\tresult = append(result, c)\n\t\t}\n\t}\n\tif len(result) == 0 {\n\t\t\/\/ Check for legacy cert since that follows a different naming convention\n\t\tglog.V(4).Infof(\"Looking for legacy ssl certs\")\n\t\texpectedCertLinks, err := l.getSslCertLinkInUse()\n\t\tif err != nil {\n\t\t\t\/\/ Return nil if target proxy doesn't exist.\n\t\t\treturn nil, utils.IgnoreHTTPNotFound(err)\n\t\t}\n\t\tfor _, link := range expectedCertLinks {\n\t\t\t\/\/ Retrieve the certificate and ignore error if certificate wasn't found\n\t\t\tname, err := utils.KeyName(link)\n\t\t\tif err != nil {\n\t\t\t\tglog.Warningf(\"error parsing cert name: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !l.namer.IsLegacySSLCert(l.Name, name) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcert, _ := l.cloud.GetSslCertificate(name)\n\t\t\tif cert != nil {\n\t\t\t\tglog.V(4).Infof(\"Populating legacy ssl cert %s for l7 %s\", cert.Name, l.Name)\n\t\t\t\tresult = append(result, cert)\n\t\t\t}\n\t\t}\n\t}\n\treturn result, nil\n}\n\nfunc (l *L7) deleteOldSSLCerts() {\n\tif len(l.oldSSLCerts) == 0 {\n\t\treturn\n\t}\n\tcertsMap := getMapfromCertList(l.sslCerts)\n\tfor _, cert := range l.oldSSLCerts {\n\t\tif !l.namer.IsCertUsedForLB(l.Name, cert.Name) && !l.namer.IsLegacySSLCert(l.Name, cert.Name) {\n\t\t\t\/\/ retain cert if it is managed by GCE(non-ingress)\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := certsMap[cert.Name]; ok {\n\t\t\t\/\/ cert found in current map\n\t\t\tcontinue\n\t\t}\n\t\tglog.V(3).Infof(\"Cleaning up old SSL Certificate %s\", cert.Name)\n\t\tif certErr := utils.IgnoreHTTPNotFound(l.cloud.DeleteSslCertificate(cert.Name)); certErr != nil {\n\t\t\tglog.Errorf(\"Old cert delete failed - %v\", certErr)\n\t\t}\n\t}\n}\n\n\/\/ Returns true if the input array of certs is identical to the certs in the L7 config.\n\/\/ Returns false if there is any mismatch\nfunc (l *L7) compareCerts(certLinks []string) bool {\n\tcertsMap := getMapfromCertList(l.sslCerts)\n\tif len(certLinks) != len(certsMap) {\n\t\tglog.V(4).Infof(\"Loadbalancer has %d certs, target proxy has %d certs\", len(certsMap), len(certLinks))\n\t\treturn false\n\t}\n\n\tfor _, link := range certLinks {\n\t\tcertName, err := utils.KeyName(link)\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"Cannot get cert name: %v\", err)\n\t\t\treturn false\n\t\t}\n\n\t\tif cert, ok := certsMap[certName]; !ok {\n\t\t\tglog.V(4).Infof(\"Cannot find cert with name %s in certsMap %+v\", certName, certsMap)\n\t\t\treturn false\n\t\t} else if ok && !utils.EqualResourceIDs(link, cert.SelfLink) {\n\t\t\tglog.V(4).Infof(\"Selflink compare failed for certs - %s in loadbalancer, %s in targetproxy\", cert.SelfLink, link)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc GetCertHash(contents string) string {\n\treturn fmt.Sprintf(\"%x\", sha256.Sum256([]byte(contents)))[:16]\n}\n\nfunc toCertNames(certs []*compute.SslCertificate) (names []string) {\n\tfor _, v := range certs {\n\t\tnames = append(names, v.Name)\n\t}\n\treturn names\n}\n<|endoftext|>"} {"text":"<commit_before>package group\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"time\"\n\n\tgroup_types \"github.com\/docker\/infrakit\/pkg\/plugin\/group\/types\"\n\t\"github.com\/docker\/infrakit\/pkg\/spi\/flavor\"\n\t\"github.com\/docker\/infrakit\/pkg\/spi\/group\"\n\t\"github.com\/docker\/infrakit\/pkg\/spi\/instance\"\n)\n\nfunc minInt(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc isSelf(inst instance.Description, settings groupSettings) bool {\n\tif settings.self != nil {\n\t\tif inst.LogicalID != nil && *inst.LogicalID == *settings.self {\n\t\t\treturn true\n\t\t}\n\t\tif v, has := inst.Tags[instance.LogicalIDTag]; has {\n\t\t\treturn string(*settings.self) == v\n\t\t}\n\t}\n\treturn false\n}\n\nfunc doNotDestroySelf(inst instance.Description, settings groupSettings) bool {\n\tif !isSelf(inst, settings) {\n\t\treturn false\n\t}\n\tif settings.options.PolicyLeaderSelfUpdate == nil {\n\t\treturn false\n\t}\n\n\treturn *settings.options.PolicyLeaderSelfUpdate == group_types.PolicyLeaderSelfUpdateNever\n}\n\nfunc desiredAndUndesiredInstances(\n\tinstances []instance.Description, settings groupSettings) ([]instance.Description, []instance.Description) {\n\n\tdesiredHash := settings.config.InstanceHash()\n\tdesired := []instance.Description{}\n\tundesired := []instance.Description{}\n\n\tfor _, inst := range instances {\n\n\t\tactualConfig, specified := inst.Tags[group.ConfigSHATag]\n\t\tif specified && actualConfig == desiredHash || doNotDestroySelf(inst, settings) {\n\t\t\tdesired = append(desired, inst)\n\t\t} else {\n\t\t\tundesired = append(undesired, inst)\n\t\t}\n\t}\n\n\treturn desired, undesired\n}\n\ntype rollingupdate struct {\n\tdesc string\n\tscaled Scaled\n\tupdatingFrom groupSettings\n\tupdatingTo groupSettings\n\tstop chan bool\n}\n\nfunc (r rollingupdate) Explain() string {\n\treturn r.desc\n}\n\nfunc (r *rollingupdate) waitUntilQuiesced(pollInterval time.Duration, expectedNewInstances int) error {\n\t\/\/ Block until the expected number of instances in the desired state are ready. Updates are unconcerned with\n\t\/\/ the health of instances in the undesired state. This allows a user to dig out of a hole where the original\n\t\/\/ state of the group is bad, and instances are not reporting as healthy.\n\n\tticker := time.NewTicker(pollInterval)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\t\/\/ Gather instances in the scaler with the desired state\n\t\t\t\/\/ Check:\n\t\t\t\/\/ - that the scaler has the expected number of instances\n\t\t\t\/\/ - instances with the desired config are healthy\n\n\t\t\t\/\/ TODO(wfarner): Get this information from the scaler to reduce redundant network calls.\n\t\t\tinstances, err := labelAndList(r.scaled)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ The update is only concerned with instances being created in the course of the update.\n\t\t\t\/\/ The health of instances in any other state is irrelevant. This behavior is important\n\t\t\t\/\/ especially if the previous state of the group is unhealthy and the update is attempting to\n\t\t\t\/\/ restore health.\n\t\t\tmatching, _ := desiredAndUndesiredInstances(instances, r.updatingTo)\n\n\t\t\t\/\/ Now that we have isolated the instances with the new configuration, check if they are all\n\t\t\t\/\/ healthy. We do not consider an update successful until the target number of instances are\n\t\t\t\/\/ confirmed healthy.\n\t\t\t\/\/ The following design choices are currently implemented:\n\t\t\t\/\/\n\t\t\t\/\/ - the update will continue indefinitely if one or more instances are in the\n\t\t\t\/\/ flavor.UnknownHealth state. Operators must stop the update and diagnose the cause.\n\t\t\t\/\/\n\t\t\t\/\/ - the update is stopped immediately if any instance enters the flavor.Unhealthy state.\n\t\t\t\/\/\n\t\t\t\/\/ - the update will proceed with other instances immediately when the currently-expected\n\t\t\t\/\/ number of instances are observed in the flavor.Healthy state.\n\t\t\t\/\/\n\t\t\tnumHealthy := 0\n\t\t\tfor _, inst := range matching {\n\t\t\t\t\/\/ TODO(wfarner): More careful thought is needed with respect to blocking and timeouts\n\t\t\t\t\/\/ here. This might mean formalizing timeout behavior for different types of RPCs in\n\t\t\t\t\/\/ the group, and\/or documenting the expectations for plugin implementations.\n\t\t\t\tswitch r.scaled.Health(inst) {\n\t\t\t\tcase flavor.Healthy:\n\t\t\t\t\tnumHealthy++\n\t\t\t\tcase flavor.Unhealthy:\n\t\t\t\t\treturn fmt.Errorf(\"Instance %s is unhealthy\", inst.ID)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif numHealthy >= int(expectedNewInstances) {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tlog.Info(\"Waiting for scaler to quiesce\")\n\n\t\tcase <-r.stop:\n\t\t\tticker.Stop()\n\t\t\treturn errors.New(\"Update halted by user\")\n\t\t}\n\t}\n}\n\n\/\/ Run identifies instances not matching the desired state and destroys them one at a time until all instances in the\n\/\/ group match the desired state, with the desired number of instances.\n\/\/ TODO(wfarner): Make this routine more resilient to transient errors.\nfunc (r *rollingupdate) Run(pollInterval time.Duration) error {\n\n\tinstances, err := labelAndList(r.scaled)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdesired, _ := desiredAndUndesiredInstances(instances, r.updatingTo)\n\texpectedNewInstances := len(desired)\n\n\tfor {\n\t\terr := r.waitUntilQuiesced(\n\t\t\tpollInterval,\n\t\t\tminInt(expectedNewInstances, int(r.updatingTo.config.Allocation.Size)))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Info(\"Scaler has quiesced\")\n\n\t\tinstances, err := labelAndList(r.scaled)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, undesiredInstances := desiredAndUndesiredInstances(instances, r.updatingTo)\n\n\t\tif len(undesiredInstances) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tlog.Info(\"Found undesired instances\", \"count\", len(undesiredInstances))\n\n\t\t\/\/ Sort instances first to ensure predictable destroy order.\n\t\tsort.Sort(sortByID{list: undesiredInstances, settings: &r.updatingFrom})\n\n\t\t\/\/ TODO(wfarner): Make the 'batch size' configurable.\n\t\tr.scaled.Destroy(undesiredInstances[0], instance.RollingUpdate)\n\n\t\texpectedNewInstances++\n\t}\n\n\treturn nil\n}\n\nfunc (r *rollingupdate) Stop() {\n\tclose(r.stop)\n}\n<commit_msg>Pace quorum group rolling updates (#844)<commit_after>package group\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"time\"\n\n\tgroup_types \"github.com\/docker\/infrakit\/pkg\/plugin\/group\/types\"\n\t\"github.com\/docker\/infrakit\/pkg\/spi\/flavor\"\n\t\"github.com\/docker\/infrakit\/pkg\/spi\/group\"\n\t\"github.com\/docker\/infrakit\/pkg\/spi\/instance\"\n)\n\nfunc minInt(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc isSelf(inst instance.Description, settings groupSettings) bool {\n\tif settings.self != nil {\n\t\tif inst.LogicalID != nil && *inst.LogicalID == *settings.self {\n\t\t\treturn true\n\t\t}\n\t\tif v, has := inst.Tags[instance.LogicalIDTag]; has {\n\t\t\treturn string(*settings.self) == v\n\t\t}\n\t}\n\treturn false\n}\n\nfunc doNotDestroySelf(inst instance.Description, settings groupSettings) bool {\n\tif !isSelf(inst, settings) {\n\t\treturn false\n\t}\n\tif settings.options.PolicyLeaderSelfUpdate == nil {\n\t\treturn false\n\t}\n\n\treturn *settings.options.PolicyLeaderSelfUpdate == group_types.PolicyLeaderSelfUpdateNever\n}\n\nfunc desiredAndUndesiredInstances(\n\tinstances []instance.Description, settings groupSettings) ([]instance.Description, []instance.Description) {\n\n\tdesiredHash := settings.config.InstanceHash()\n\tdesired := []instance.Description{}\n\tundesired := []instance.Description{}\n\n\tfor _, inst := range instances {\n\n\t\tactualConfig, specified := inst.Tags[group.ConfigSHATag]\n\t\tif specified && actualConfig == desiredHash || doNotDestroySelf(inst, settings) {\n\t\t\tdesired = append(desired, inst)\n\t\t} else {\n\t\t\tundesired = append(undesired, inst)\n\t\t}\n\t}\n\n\treturn desired, undesired\n}\n\ntype rollingupdate struct {\n\tdesc string\n\tscaled Scaled\n\tupdatingFrom groupSettings\n\tupdatingTo groupSettings\n\tstop chan bool\n}\n\nfunc (r rollingupdate) Explain() string {\n\treturn r.desc\n}\n\nfunc (r *rollingupdate) waitUntilQuiesced(pollInterval time.Duration, expectedNewInstances int) error {\n\t\/\/ Block until the expected number of instances in the desired state are ready. Updates are unconcerned with\n\t\/\/ the health of instances in the undesired state. This allows a user to dig out of a hole where the original\n\t\/\/ state of the group is bad, and instances are not reporting as healthy.\n\tlog.Info(\"waitUntilQuiesced\", \"expectedNewInstances\", expectedNewInstances)\n\t\/\/ TODO: start processing right away instead of waiting for first tick\n\tticker := time.NewTicker(pollInterval)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\t\/\/ Gather instances in the scaler with the desired state\n\t\t\t\/\/ Check:\n\t\t\t\/\/ - that the scaler has the expected number of instances\n\t\t\t\/\/ - instances with the desired config are healthy\n\n\t\t\t\/\/ TODO(wfarner): Get this information from the scaler to reduce redundant network calls.\n\t\t\tinstances, err := labelAndList(r.scaled)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ The update is only concerned with instances being created in the course of the update.\n\t\t\t\/\/ The health of instances in any other state is irrelevant. This behavior is important\n\t\t\t\/\/ especially if the previous state of the group is unhealthy and the update is attempting to\n\t\t\t\/\/ restore health.\n\t\t\tmatching, _ := desiredAndUndesiredInstances(instances, r.updatingTo)\n\t\t\tlog.Info(\"waitUntilQuiesced\", \"totalInstances\", len(instances), \"matchingInstances\", len(matching))\n\n\t\t\t\/\/ Now that we have isolated the instances with the new configuration, check if they are all\n\t\t\t\/\/ healthy. We do not consider an update successful until the target number of instances are\n\t\t\t\/\/ confirmed healthy.\n\t\t\t\/\/ The following design choices are currently implemented:\n\t\t\t\/\/\n\t\t\t\/\/ - the update will continue indefinitely if one or more instances are in the\n\t\t\t\/\/ flavor.UnknownHealth state. Operators must stop the update and diagnose the cause.\n\t\t\t\/\/\n\t\t\t\/\/ - the update is stopped immediately if any instance enters the flavor.Unhealthy state.\n\t\t\t\/\/\n\t\t\t\/\/ - the update will proceed with other instances immediately when the currently-expected\n\t\t\t\/\/ number of instances are observed in the flavor.Healthy state.\n\t\t\t\/\/\n\t\t\tnumHealthy := 0\n\t\t\tfor _, inst := range matching {\n\t\t\t\t\/\/ TODO(wfarner): More careful thought is needed with respect to blocking and timeouts\n\t\t\t\t\/\/ here. This might mean formalizing timeout behavior for different types of RPCs in\n\t\t\t\t\/\/ the group, and\/or documenting the expectations for plugin implementations.\n\t\t\t\tswitch r.scaled.Health(inst) {\n\t\t\t\tcase flavor.Healthy:\n\t\t\t\t\tlog.Info(\"waitUntilQuiesced\", \"health\", \"heathy\", \"nodeID\", inst.ID)\n\t\t\t\t\tnumHealthy++\n\t\t\t\tcase flavor.Unhealthy:\n\t\t\t\t\tlog.Error(\"waitUntilQuiesced\", \"health\", \"unheathy\", \"nodeID\", inst.ID)\n\t\t\t\t\treturn fmt.Errorf(\"Instance %s is unhealthy\", inst.ID)\n\t\t\t\tcase flavor.Unknown:\n\t\t\t\t\tlog.Info(\"waitUntilQuiesced\", \"health\", \"unknown\", \"nodeID\", inst.ID)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif numHealthy >= int(expectedNewInstances) {\n\t\t\t\tlog.Info(\"waitUntilQuiesced\",\n\t\t\t\t\t\"msg\", \"Scaler has quiesced, terminating loop\",\n\t\t\t\t\t\"numHealthy\", numHealthy,\n\t\t\t\t\t\"expectedNewInstances\", expectedNewInstances)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tlog.Info(\"waitUntilQuiesced\",\n\t\t\t\t\"msg\", \"Waiting for scaler to quiesce\",\n\t\t\t\t\"numHealthy\", numHealthy,\n\t\t\t\t\"expectedNewInstances\", expectedNewInstances)\n\n\t\tcase <-r.stop:\n\t\t\tticker.Stop()\n\t\t\treturn errors.New(\"Update halted by user\")\n\t\t}\n\t}\n}\n\n\/\/ Run identifies instances not matching the desired state and destroys them one at a time until all instances in the\n\/\/ group match the desired state, with the desired number of instances.\n\/\/ TODO(wfarner): Make this routine more resilient to transient errors.\nfunc (r *rollingupdate) Run(pollInterval time.Duration) error {\n\n\tinstances, err := labelAndList(r.scaled)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ First determine if any new instances should be created\n\tdesired, _ := desiredAndUndesiredInstances(instances, r.updatingTo)\n\texpectedNewInstances := len(desired)\n\tlog.Info(\"RollingUpdate-Run\", \"expectedNewInstances\", expectedNewInstances)\n\n\tfor {\n\t\t\/\/ Wait until any new nodes are healthy\n\t\tdesiredSize := len(r.updatingTo.config.Allocation.LogicalIDs)\n\t\tif desiredSize == 0 {\n\t\t\tdesiredSize = int(r.updatingTo.config.Allocation.Size)\n\t\t}\n\t\terr := r.waitUntilQuiesced(pollInterval, minInt(expectedNewInstances, desiredSize))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tinstances, err := labelAndList(r.scaled)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Now check if we have instances that do not match the hash\n\t\t_, undesiredInstances := desiredAndUndesiredInstances(instances, r.updatingTo)\n\t\tlog.Info(\"RollingUpdate-Run\", \"undesiredInstances\", len(undesiredInstances))\n\t\tif len(undesiredInstances) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Sort instances first to ensure predictable destroy order.\n\t\tsort.Sort(sortByID{list: undesiredInstances, settings: &r.updatingFrom})\n\n\t\t\/\/ TODO(wfarner): Make the 'batch size' configurable.\n\t\tr.scaled.Destroy(undesiredInstances[0], instance.RollingUpdate)\n\n\t\t\/\/ Increment new instance count to replace the node that was just destroyed\n\t\texpectedNewInstances++\n\t}\n\n\treturn nil\n}\n\nfunc (r *rollingupdate) Stop() {\n\tclose(r.stop)\n}\n<|endoftext|>"} {"text":"<commit_before>package ingress\n\nimport (\n\t\"context\"\n\t\"kourier\/pkg\/config\"\n\t\"kourier\/pkg\/envoy\"\n\n\t\"knative.dev\/serving\/pkg\/apis\/networking\"\n\t\"knative.dev\/serving\/pkg\/reconciler\"\n\n\t\"k8s.io\/client-go\/tools\/cache\"\n\n\tkubeclient \"knative.dev\/pkg\/client\/injection\/kube\/client\"\n\tendpointsinformer \"knative.dev\/pkg\/client\/injection\/kube\/informers\/core\/v1\/endpoints\"\n\t\"knative.dev\/pkg\/configmap\"\n\t\"knative.dev\/pkg\/controller\"\n\t\"knative.dev\/pkg\/logging\"\n\tknativeclient \"knative.dev\/serving\/pkg\/client\/injection\/client\"\n\tingressinformer \"knative.dev\/serving\/pkg\/client\/injection\/informers\/networking\/v1alpha1\/ingress\"\n)\n\nconst (\n\tcontrollerName = \"KourierController\"\n\tnodeID = \"3scale-kourier-gateway\"\n\tgatewayPort = 19001\n\tmanagementPort = 18000\n)\n\nfunc NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl {\n\tkubernetesClient := kubeclient.Get(ctx)\n\tknativeClient := knativeclient.Get(ctx)\n\n\tenvoyXdsServer := envoy.NewEnvoyXdsServer(\n\t\tgatewayPort,\n\t\tmanagementPort,\n\t\tkubernetesClient,\n\t\tknativeClient,\n\t)\n\tgo envoyXdsServer.RunManagementServer()\n\tgo envoyXdsServer.RunGateway()\n\n\tlogger := logging.FromContext(ctx)\n\n\tingressInformer := ingressinformer.Get(ctx)\n\tendpointsInformer := endpointsinformer.Get(ctx)\n\n\tc := &Reconciler{\n\t\tIngressLister: ingressInformer.Lister(),\n\t\tEndpointsLister: endpointsInformer.Lister(),\n\t\tEnvoyXDSServer: envoyXdsServer,\n\t\tkubeClient: kubernetesClient,\n\t}\n\timpl := controller.NewImpl(c, logger, controllerName)\n\n\t\/\/ Ingresses need to be filtered by ingress class, so Kourier does not\n\t\/\/ react to nor modify ingresses created by other gateways.\n\tingressInformerHandler := cache.FilteringResourceEventHandler{\n\t\tFilterFunc: reconciler.AnnotationFilterFunc(\n\t\t\tnetworking.IngressClassAnnotationKey, config.KourierIngressClassName, false,\n\t\t),\n\t\tHandler: controller.HandleAll(impl.Enqueue),\n\t}\n\tingressInformer.Informer().AddEventHandler(ingressInformerHandler)\n\n\t\/\/ In this first version, we are just refreshing the whole config for any\n\t\/\/ endpoint that we receive. So we should always enqueue the same key.\n\tenqueueFunc := func(obj interface{}) {\n\t\timpl.EnqueueKey(EndpointChange)\n\t}\n\n\t\/\/ We only want to react to endpoints that belong to a Knative serving and\n\t\/\/ are public.\n\tendpointsInformerHandler := cache.FilteringResourceEventHandler{\n\t\tFilterFunc: reconciler.LabelFilterFunc(\n\t\t\tnetworking.ServiceTypeKey, string(networking.ServiceTypePublic), false,\n\t\t),\n\t\tHandler: controller.HandleAll(enqueueFunc),\n\t}\n\n\tendpointsInformer.Informer().AddEventHandler(endpointsInformerHandler)\n\n\t\/\/ Force a first event to make sure we initialize a config. Otherwise, there\n\t\/\/ will be no config until a Knative service is deployed.\n\t\/\/ This is important because the gateway pods will not be marked as healthy\n\t\/\/ until they have been able to fetch a config.\n\timpl.EnqueueKey(FullResync)\n\n\treturn impl\n}\n<commit_msg>Use NamespacedName var instead of string<commit_after>package ingress\n\nimport (\n\t\"context\"\n\t\"kourier\/pkg\/config\"\n\t\"kourier\/pkg\/envoy\"\n\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\n\t\"knative.dev\/serving\/pkg\/apis\/networking\"\n\t\"knative.dev\/serving\/pkg\/reconciler\"\n\n\t\"k8s.io\/client-go\/tools\/cache\"\n\n\tkubeclient \"knative.dev\/pkg\/client\/injection\/kube\/client\"\n\tendpointsinformer \"knative.dev\/pkg\/client\/injection\/kube\/informers\/core\/v1\/endpoints\"\n\t\"knative.dev\/pkg\/configmap\"\n\t\"knative.dev\/pkg\/controller\"\n\t\"knative.dev\/pkg\/logging\"\n\n\tknativeclient \"knative.dev\/serving\/pkg\/client\/injection\/client\"\n\tingressinformer \"knative.dev\/serving\/pkg\/client\/injection\/informers\/networking\/v1alpha1\/ingress\"\n)\n\nconst (\n\tcontrollerName = \"KourierController\"\n\tnodeID = \"3scale-kourier-gateway\"\n\tgatewayPort = 19001\n\tmanagementPort = 18000\n)\n\nfunc NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl {\n\tkubernetesClient := kubeclient.Get(ctx)\n\tknativeClient := knativeclient.Get(ctx)\n\n\tenvoyXdsServer := envoy.NewEnvoyXdsServer(\n\t\tgatewayPort,\n\t\tmanagementPort,\n\t\tkubernetesClient,\n\t\tknativeClient,\n\t)\n\tgo envoyXdsServer.RunManagementServer()\n\tgo envoyXdsServer.RunGateway()\n\n\tlogger := logging.FromContext(ctx)\n\n\tingressInformer := ingressinformer.Get(ctx)\n\tendpointsInformer := endpointsinformer.Get(ctx)\n\n\tc := &Reconciler{\n\t\tIngressLister: ingressInformer.Lister(),\n\t\tEndpointsLister: endpointsInformer.Lister(),\n\t\tEnvoyXDSServer: envoyXdsServer,\n\t\tkubeClient: kubernetesClient,\n\t}\n\timpl := controller.NewImpl(c, logger, controllerName)\n\n\t\/\/ Ingresses need to be filtered by ingress class, so Kourier does not\n\t\/\/ react to nor modify ingresses created by other gateways.\n\tingressInformerHandler := cache.FilteringResourceEventHandler{\n\t\tFilterFunc: reconciler.AnnotationFilterFunc(\n\t\t\tnetworking.IngressClassAnnotationKey, config.KourierIngressClassName, false,\n\t\t),\n\t\tHandler: controller.HandleAll(impl.Enqueue),\n\t}\n\tingressInformer.Informer().AddEventHandler(ingressInformerHandler)\n\n\t\/\/ In this first version, we are just refreshing the whole config for any\n\t\/\/ endpoint that we receive. So we should always enqueue the same key.\n\tenqueueFunc := func(obj interface{}) {\n\t\tevent := types.NamespacedName{\n\t\t\tName: EndpointChange,\n\t\t}\n\t\timpl.EnqueueKey(event)\n\t}\n\n\t\/\/ We only want to react to endpoints that belong to a Knative serving and\n\t\/\/ are public.\n\tendpointsInformerHandler := cache.FilteringResourceEventHandler{\n\t\tFilterFunc: reconciler.LabelFilterFunc(\n\t\t\tnetworking.ServiceTypeKey, string(networking.ServiceTypePublic), false,\n\t\t),\n\t\tHandler: controller.HandleAll(enqueueFunc),\n\t}\n\n\tendpointsInformer.Informer().AddEventHandler(endpointsInformerHandler)\n\n\t\/\/ Force a first event to make sure we initialize a config. Otherwise, there\n\t\/\/ will be no config until a Knative service is deployed.\n\t\/\/ This is important because the gateway pods will not be marked as healthy\n\t\/\/ until they have been able to fetch a config.\n\tevent := types.NamespacedName{\n\t\tName: FullResync,\n\t}\n\timpl.EnqueueKey(event)\n\n\treturn impl\n}\n<|endoftext|>"} {"text":"<commit_before>package podtask\n\nimport (\n\t\"container\/ring\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\tlog \"github.com\/golang\/glog\"\n\tmesos \"github.com\/mesos\/mesos-go\/mesosproto\"\n\t\"github.com\/mesosphere\/kubernetes-mesos\/pkg\/scheduler\/metrics\"\n)\n\n\/**\nHACK(jdef): we're not using etcd but k8s has implemented namespace support and\nwe're going to try to honor that by namespacing pod keys. Hence, the following\nfuncs that were stolen from:\n https:\/\/github.com\/GoogleCloudPlatform\/kubernetes\/blob\/release-0.5\/pkg\/registry\/etcd\/etcd.go\n**\/\n\nconst (\n\tPodPath = \"\/pods\"\n\tdefaultFinishedTasksSize = 1024\n)\n\ntype Registry interface {\n\tRegister(*T, error) (*T, error)\n\tUnregister(*T)\n\tGet(taskId string) (task *T, currentState StateType)\n\tTaskForPod(podID string) (taskID string, ok bool)\n\tUpdateStatus(status *mesos.TaskStatus) (*T, StateType)\n\t\/\/ return a list of task ID's that match the given filter, or all task ID's if filter == nil\n\tList(func(*T) bool) []string\n}\n\ntype inMemoryRegistry struct {\n\trw sync.RWMutex\n\ttaskRegistry map[string]*T\n\ttasksFinished *ring.Ring\n\tpodToTask map[string]string\n}\n\nfunc NewInMemoryRegistry() Registry {\n\treturn &inMemoryRegistry{\n\t\ttaskRegistry: make(map[string]*T),\n\t\ttasksFinished: ring.New(defaultFinishedTasksSize),\n\t\tpodToTask: make(map[string]string),\n\t}\n}\n\nfunc (k *inMemoryRegistry) List(accepts func(t *T) bool) (taskids []string) {\n\tk.rw.RLock()\n\tdefer k.rw.RUnlock()\n\tfor id, task := range k.taskRegistry {\n\t\tif accepts == nil || accepts(task) {\n\t\t\ttaskids = append(taskids, id)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (k *inMemoryRegistry) TaskForPod(podID string) (taskID string, ok bool) {\n\tk.rw.RLock()\n\tdefer k.rw.RUnlock()\n\t\/\/ assume caller is holding scheduler lock\n\ttaskID, ok = k.podToTask[podID]\n\treturn\n}\n\n\/\/ registers a pod task unless the spec'd error is not nil\nfunc (k *inMemoryRegistry) Register(task *T, err error) (*T, error) {\n\tif err == nil {\n\t\tk.rw.Lock()\n\t\tdefer k.rw.Unlock()\n\t\tk.podToTask[task.podKey] = task.ID\n\t\tk.taskRegistry[task.ID] = task\n\t}\n\treturn task, err\n}\n\nfunc (k *inMemoryRegistry) Unregister(task *T) {\n\tk.rw.Lock()\n\tdefer k.rw.Unlock()\n\tdelete(k.podToTask, task.podKey)\n\tdelete(k.taskRegistry, task.ID)\n}\n\nfunc (k *inMemoryRegistry) Get(taskId string) (*T, StateType) {\n\tk.rw.RLock()\n\tdefer k.rw.RUnlock()\n\treturn k._get(taskId)\n}\n\n\/\/ assume that the caller has already locked around access to task state\nfunc (k *inMemoryRegistry) _get(taskId string) (*T, StateType) {\n\tif task, found := k.taskRegistry[taskId]; found {\n\t\treturn task, task.State\n\t}\n\treturn nil, StateUnknown\n}\n\nfunc (k *inMemoryRegistry) UpdateStatus(status *mesos.TaskStatus) (*T, StateType) {\n\ttaskId := status.GetTaskId().GetValue()\n\n\tk.rw.Lock()\n\tdefer k.rw.Unlock()\n\ttask, state := k._get(taskId)\n\n\tswitch status.GetState() {\n\tcase mesos.TaskState_TASK_STAGING:\n\t\tk.handleTaskStaging(task, state, status)\n\tcase mesos.TaskState_TASK_STARTING:\n\t\tk.handleTaskStarting(task, state, status)\n\tcase mesos.TaskState_TASK_RUNNING:\n\t\tk.handleTaskRunning(task, state, status)\n\tcase mesos.TaskState_TASK_FINISHED:\n\t\tk.handleTaskFinished(task, state, status)\n\tcase mesos.TaskState_TASK_FAILED:\n\t\tk.handleTaskFailed(task, state, status)\n\tcase mesos.TaskState_TASK_KILLED:\n\t\tk.handleTaskKilled(task, state, status)\n\tcase mesos.TaskState_TASK_LOST:\n\t\tk.handleTaskLost(task, state, status)\n\tdefault:\n\t\tlog.Warning(\"unhandled task status update: %+v\", status)\n\t}\n\treturn task, state\n}\n\nfunc (k *inMemoryRegistry) handleTaskStaging(task *T, state StateType, status *mesos.TaskStatus) {\n\t\/\/TODO(jdef) validate that update source is SOURCE_MASTER, otherwise the system is doing something wonky\n\tlog.Errorf(\"Not implemented: task staging\")\n}\n\nfunc (k *inMemoryRegistry) handleTaskStarting(task *T, state StateType, status *mesos.TaskStatus) {\n\t\/\/ we expect to receive this when a launched task is finally \"bound\"\n\t\/\/ via the API server. however, there's nothing specific for us to do\n\t\/\/ here.\n\tswitch state {\n\tcase StatePending:\n\t\ttask.UpdatedTime = time.Now()\n\t\tif !task.Has(Bound) {\n\t\t\ttask.Set(Bound)\n\t\t\ttask.bindTime = task.UpdatedTime\n\t\t\ttimeToBind := task.bindTime.Sub(task.launchTime)\n\t\t\tmetrics.BindLatency.Observe(metrics.InMicroseconds(timeToBind))\n\t\t}\n\tdefault:\n\t\tlog.Warningf(\"Ignore status TASK_STARTING because the the task is not pending\")\n\t}\n}\n\nfunc (k *inMemoryRegistry) handleTaskRunning(task *T, state StateType, status *mesos.TaskStatus) {\n\tswitch state {\n\tcase StatePending:\n\t\ttask.UpdatedTime = time.Now()\n\t\tlog.Infof(\"Received running status for pending task: %+v\", status)\n\t\tfillRunningPodInfo(task, status)\n\t\ttask.State = StateRunning\n\tcase StateRunning:\n\t\ttask.UpdatedTime = time.Now()\n\t\tlog.V(2).Info(\"Ignore status TASK_RUNNING because the the task is already running\")\n\tcase StateFinished:\n\t\tlog.Warningf(\"Ignore status TASK_RUNNING because the the task is already finished\")\n\tdefault:\n\t\tlog.Warningf(\"Ignore status TASK_RUNNING (%+v) because the the task is discarded\", status.GetTaskId())\n\t}\n}\n\nfunc ParsePodStatusResult(taskStatus *mesos.TaskStatus) (result api.PodStatusResult, err error) {\n\tif taskStatus.Data != nil {\n\t\terr = json.Unmarshal(taskStatus.Data, &result)\n\t} else {\n\t\terr = fmt.Errorf(\"missing TaskStatus.Data\")\n\t}\n\treturn\n}\n\nfunc fillRunningPodInfo(task *T, taskStatus *mesos.TaskStatus) {\n\tif result, err := ParsePodStatusResult(taskStatus); err != nil {\n\t\tlog.Errorf(\"invalid TaskStatus.Data for task '%v': %v\", task.ID, err)\n\t} else {\n\t\ttask.Pod.Status = result.Status\n\t\tlog.Infof(\"received pod status for task %v: %+v\", task.ID, task.Pod.Status)\n\t}\n}\n\nfunc (k *inMemoryRegistry) handleTaskFinished(task *T, state StateType, status *mesos.TaskStatus) {\n\tswitch state {\n\tcase StatePending:\n\t\tpanic(\"Pending task finished, this couldn't happen\")\n\tcase StateRunning:\n\t\tlog.V(2).Infof(\"received finished status for running task: %+v\", status)\n\t\tdelete(k.podToTask, task.podKey)\n\t\ttask.State = StateFinished\n\t\ttask.UpdatedTime = time.Now()\n\t\tk.tasksFinished = k.recordFinishedTask(task.ID)\n\tcase StateFinished:\n\t\tlog.Warningf(\"Ignore status TASK_FINISHED because the the task is already finished\")\n\tdefault:\n\t\tlog.Warningf(\"Ignore status TASK_FINISHED because the the task is not running\")\n\t}\n}\n\n\/\/ record that a task has finished.\n\/\/ older record are expunged one at a time once the historical ring buffer is saturated.\n\/\/ assumes caller is holding state lock.\nfunc (k *inMemoryRegistry) recordFinishedTask(taskId string) *ring.Ring {\n\tslot := k.tasksFinished.Next()\n\tif slot.Value != nil {\n\t\t\/\/ garbage collect older finished task from the registry\n\t\tgctaskId := slot.Value.(string)\n\t\tif gctask, found := k.taskRegistry[gctaskId]; found && gctask.State == StateFinished {\n\t\t\tdelete(k.taskRegistry, gctaskId)\n\t\t}\n\t}\n\tslot.Value = taskId\n\treturn slot\n}\n\nfunc (k *inMemoryRegistry) handleTaskFailed(task *T, state StateType, status *mesos.TaskStatus) {\n\tlog.Errorf(\"task failed: %+v\", status)\n\tswitch state {\n\tcase StatePending:\n\t\tdelete(k.taskRegistry, task.ID)\n\t\tdelete(k.podToTask, task.podKey)\n\tcase StateRunning:\n\t\tdelete(k.taskRegistry, task.ID)\n\t\tdelete(k.podToTask, task.podKey)\n\t}\n}\n\nfunc (k *inMemoryRegistry) handleTaskKilled(task *T, state StateType, status *mesos.TaskStatus) {\n\tdefer func() {\n\t\tmsg := fmt.Sprintf(\"task killed: %+v, task %+v\", status, task)\n\t\tif task != nil && task.Has(Deleted) {\n\t\t\t\/\/ we were expecting this, nothing out of the ordinary\n\t\t\tlog.V(2).Infoln(msg)\n\t\t} else {\n\t\t\tlog.Errorln(msg)\n\t\t}\n\t}()\n\tswitch state {\n\tcase StatePending, StateRunning:\n\t\tdelete(k.taskRegistry, task.ID)\n\t\tdelete(k.podToTask, task.podKey)\n\t}\n}\n\nfunc (k *inMemoryRegistry) handleTaskLost(task *T, state StateType, status *mesos.TaskStatus) {\n\tlog.Warningf(\"task lost: %+v\", status)\n\tswitch state {\n\tcase StateRunning, StatePending:\n\t\tdelete(k.taskRegistry, task.ID)\n\t\tdelete(k.podToTask, task.podKey)\n\t}\n}\n<commit_msg>resolve TODO for staging tasks, check that source == MASTER<commit_after>package podtask\n\nimport (\n\t\"container\/ring\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\tlog \"github.com\/golang\/glog\"\n\tmesos \"github.com\/mesos\/mesos-go\/mesosproto\"\n\t\"github.com\/mesosphere\/kubernetes-mesos\/pkg\/scheduler\/metrics\"\n)\n\n\/**\nHACK(jdef): we're not using etcd but k8s has implemented namespace support and\nwe're going to try to honor that by namespacing pod keys. Hence, the following\nfuncs that were stolen from:\n https:\/\/github.com\/GoogleCloudPlatform\/kubernetes\/blob\/release-0.5\/pkg\/registry\/etcd\/etcd.go\n**\/\n\nconst (\n\tPodPath = \"\/pods\"\n\tdefaultFinishedTasksSize = 1024\n)\n\ntype Registry interface {\n\tRegister(*T, error) (*T, error)\n\tUnregister(*T)\n\tGet(taskId string) (task *T, currentState StateType)\n\tTaskForPod(podID string) (taskID string, ok bool)\n\tUpdateStatus(status *mesos.TaskStatus) (*T, StateType)\n\t\/\/ return a list of task ID's that match the given filter, or all task ID's if filter == nil\n\tList(func(*T) bool) []string\n}\n\ntype inMemoryRegistry struct {\n\trw sync.RWMutex\n\ttaskRegistry map[string]*T\n\ttasksFinished *ring.Ring\n\tpodToTask map[string]string\n}\n\nfunc NewInMemoryRegistry() Registry {\n\treturn &inMemoryRegistry{\n\t\ttaskRegistry: make(map[string]*T),\n\t\ttasksFinished: ring.New(defaultFinishedTasksSize),\n\t\tpodToTask: make(map[string]string),\n\t}\n}\n\nfunc (k *inMemoryRegistry) List(accepts func(t *T) bool) (taskids []string) {\n\tk.rw.RLock()\n\tdefer k.rw.RUnlock()\n\tfor id, task := range k.taskRegistry {\n\t\tif accepts == nil || accepts(task) {\n\t\t\ttaskids = append(taskids, id)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (k *inMemoryRegistry) TaskForPod(podID string) (taskID string, ok bool) {\n\tk.rw.RLock()\n\tdefer k.rw.RUnlock()\n\t\/\/ assume caller is holding scheduler lock\n\ttaskID, ok = k.podToTask[podID]\n\treturn\n}\n\n\/\/ registers a pod task unless the spec'd error is not nil\nfunc (k *inMemoryRegistry) Register(task *T, err error) (*T, error) {\n\tif err == nil {\n\t\tk.rw.Lock()\n\t\tdefer k.rw.Unlock()\n\t\tk.podToTask[task.podKey] = task.ID\n\t\tk.taskRegistry[task.ID] = task\n\t}\n\treturn task, err\n}\n\nfunc (k *inMemoryRegistry) Unregister(task *T) {\n\tk.rw.Lock()\n\tdefer k.rw.Unlock()\n\tdelete(k.podToTask, task.podKey)\n\tdelete(k.taskRegistry, task.ID)\n}\n\nfunc (k *inMemoryRegistry) Get(taskId string) (*T, StateType) {\n\tk.rw.RLock()\n\tdefer k.rw.RUnlock()\n\treturn k._get(taskId)\n}\n\n\/\/ assume that the caller has already locked around access to task state\nfunc (k *inMemoryRegistry) _get(taskId string) (*T, StateType) {\n\tif task, found := k.taskRegistry[taskId]; found {\n\t\treturn task, task.State\n\t}\n\treturn nil, StateUnknown\n}\n\nfunc (k *inMemoryRegistry) UpdateStatus(status *mesos.TaskStatus) (*T, StateType) {\n\ttaskId := status.GetTaskId().GetValue()\n\n\tk.rw.Lock()\n\tdefer k.rw.Unlock()\n\ttask, state := k._get(taskId)\n\n\tswitch status.GetState() {\n\tcase mesos.TaskState_TASK_STAGING:\n\t\tk.handleTaskStaging(task, state, status)\n\tcase mesos.TaskState_TASK_STARTING:\n\t\tk.handleTaskStarting(task, state, status)\n\tcase mesos.TaskState_TASK_RUNNING:\n\t\tk.handleTaskRunning(task, state, status)\n\tcase mesos.TaskState_TASK_FINISHED:\n\t\tk.handleTaskFinished(task, state, status)\n\tcase mesos.TaskState_TASK_FAILED:\n\t\tk.handleTaskFailed(task, state, status)\n\tcase mesos.TaskState_TASK_KILLED:\n\t\tk.handleTaskKilled(task, state, status)\n\tcase mesos.TaskState_TASK_LOST:\n\t\tk.handleTaskLost(task, state, status)\n\tdefault:\n\t\tlog.Warning(\"unhandled task status update: %+v\", status)\n\t}\n\treturn task, state\n}\n\nfunc (k *inMemoryRegistry) handleTaskStaging(task *T, state StateType, status *mesos.TaskStatus) {\n\tif status.GetSource() != mesos.TaskStatus_SOURCE_MASTER {\n\t\tlog.Errorf(\"received STAGING for task %v with unexpected source: %v\",\n\t\t\tstatus.GetTaskId().GetValue(), status.GetSource())\n\t}\n}\n\nfunc (k *inMemoryRegistry) handleTaskStarting(task *T, state StateType, status *mesos.TaskStatus) {\n\t\/\/ we expect to receive this when a launched task is finally \"bound\"\n\t\/\/ via the API server. however, there's nothing specific for us to do\n\t\/\/ here.\n\tswitch state {\n\tcase StatePending:\n\t\ttask.UpdatedTime = time.Now()\n\t\tif !task.Has(Bound) {\n\t\t\ttask.Set(Bound)\n\t\t\ttask.bindTime = task.UpdatedTime\n\t\t\ttimeToBind := task.bindTime.Sub(task.launchTime)\n\t\t\tmetrics.BindLatency.Observe(metrics.InMicroseconds(timeToBind))\n\t\t}\n\tdefault:\n\t\tlog.Warningf(\"Ignore status TASK_STARTING because the the task is not pending\")\n\t}\n}\n\nfunc (k *inMemoryRegistry) handleTaskRunning(task *T, state StateType, status *mesos.TaskStatus) {\n\tswitch state {\n\tcase StatePending:\n\t\ttask.UpdatedTime = time.Now()\n\t\tlog.Infof(\"Received running status for pending task: %+v\", status)\n\t\tfillRunningPodInfo(task, status)\n\t\ttask.State = StateRunning\n\tcase StateRunning:\n\t\ttask.UpdatedTime = time.Now()\n\t\tlog.V(2).Info(\"Ignore status TASK_RUNNING because the the task is already running\")\n\tcase StateFinished:\n\t\tlog.Warningf(\"Ignore status TASK_RUNNING because the the task is already finished\")\n\tdefault:\n\t\tlog.Warningf(\"Ignore status TASK_RUNNING (%+v) because the the task is discarded\", status.GetTaskId())\n\t}\n}\n\nfunc ParsePodStatusResult(taskStatus *mesos.TaskStatus) (result api.PodStatusResult, err error) {\n\tif taskStatus.Data != nil {\n\t\terr = json.Unmarshal(taskStatus.Data, &result)\n\t} else {\n\t\terr = fmt.Errorf(\"missing TaskStatus.Data\")\n\t}\n\treturn\n}\n\nfunc fillRunningPodInfo(task *T, taskStatus *mesos.TaskStatus) {\n\tif result, err := ParsePodStatusResult(taskStatus); err != nil {\n\t\tlog.Errorf(\"invalid TaskStatus.Data for task '%v': %v\", task.ID, err)\n\t} else {\n\t\ttask.Pod.Status = result.Status\n\t\tlog.Infof(\"received pod status for task %v: %+v\", task.ID, task.Pod.Status)\n\t}\n}\n\nfunc (k *inMemoryRegistry) handleTaskFinished(task *T, state StateType, status *mesos.TaskStatus) {\n\tswitch state {\n\tcase StatePending:\n\t\tpanic(\"Pending task finished, this couldn't happen\")\n\tcase StateRunning:\n\t\tlog.V(2).Infof(\"received finished status for running task: %+v\", status)\n\t\tdelete(k.podToTask, task.podKey)\n\t\ttask.State = StateFinished\n\t\ttask.UpdatedTime = time.Now()\n\t\tk.tasksFinished = k.recordFinishedTask(task.ID)\n\tcase StateFinished:\n\t\tlog.Warningf(\"Ignore status TASK_FINISHED because the the task is already finished\")\n\tdefault:\n\t\tlog.Warningf(\"Ignore status TASK_FINISHED because the the task is not running\")\n\t}\n}\n\n\/\/ record that a task has finished.\n\/\/ older record are expunged one at a time once the historical ring buffer is saturated.\n\/\/ assumes caller is holding state lock.\nfunc (k *inMemoryRegistry) recordFinishedTask(taskId string) *ring.Ring {\n\tslot := k.tasksFinished.Next()\n\tif slot.Value != nil {\n\t\t\/\/ garbage collect older finished task from the registry\n\t\tgctaskId := slot.Value.(string)\n\t\tif gctask, found := k.taskRegistry[gctaskId]; found && gctask.State == StateFinished {\n\t\t\tdelete(k.taskRegistry, gctaskId)\n\t\t}\n\t}\n\tslot.Value = taskId\n\treturn slot\n}\n\nfunc (k *inMemoryRegistry) handleTaskFailed(task *T, state StateType, status *mesos.TaskStatus) {\n\tlog.Errorf(\"task failed: %+v\", status)\n\tswitch state {\n\tcase StatePending:\n\t\tdelete(k.taskRegistry, task.ID)\n\t\tdelete(k.podToTask, task.podKey)\n\tcase StateRunning:\n\t\tdelete(k.taskRegistry, task.ID)\n\t\tdelete(k.podToTask, task.podKey)\n\t}\n}\n\nfunc (k *inMemoryRegistry) handleTaskKilled(task *T, state StateType, status *mesos.TaskStatus) {\n\tdefer func() {\n\t\tmsg := fmt.Sprintf(\"task killed: %+v, task %+v\", status, task)\n\t\tif task != nil && task.Has(Deleted) {\n\t\t\t\/\/ we were expecting this, nothing out of the ordinary\n\t\t\tlog.V(2).Infoln(msg)\n\t\t} else {\n\t\t\tlog.Errorln(msg)\n\t\t}\n\t}()\n\tswitch state {\n\tcase StatePending, StateRunning:\n\t\tdelete(k.taskRegistry, task.ID)\n\t\tdelete(k.podToTask, task.podKey)\n\t}\n}\n\nfunc (k *inMemoryRegistry) handleTaskLost(task *T, state StateType, status *mesos.TaskStatus) {\n\tlog.Warningf(\"task lost: %+v\", status)\n\tswitch state {\n\tcase StateRunning, StatePending:\n\t\tdelete(k.taskRegistry, task.ID)\n\t\tdelete(k.podToTask, task.podKey)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package controller\n<commit_msg>Add controller placeholder\/notes<commit_after>package controller\n\n\/\/import (\n\/\/\t. \"github.com\/Aptomi\/aptomi\/pkg\/slinga\/object\"\n\/\/)\n\ntype RevisionController interface {\n\t\/*\n\t\trouter.GET(\"\/api\/v1\/revision\/:rev\/policy\", h.handleGetPolicy) \/\/ get full policy from specific revision\n\t\trouter.GET(\"\/api\/v1\/revision\/:rev\/policy\/key\/:key\", h.handleGetPolicy) \/\/ get by key from specific revision\n\t\trouter.GET(\"\/api\/v1\/revision\/:rev\/policy\/namespace\/:ns\", h.handleGetPolicy) \/\/ get policy for namespace from specific revision\n\n\t\trouter.POST(\"\/api\/v1\/revision\", h.handleNewRevision)\n\t*\/\n\n\t\/\/GetRevision(generation Generation)\n}\n\ntype RevisionControllerImpl struct {\n}\n\nfunc NewRevisionController() RevisionController {\n\treturn &RevisionControllerImpl{}\n}\n\n\/*\n\nfunc (reg *Registry) LoadPolicy(gen Generation) (*PolicyNamespace, error) {\n\tpolicyObj, err := reg.store.GetNewestOne(\"system\", PolicyNamespaceDataObject.Kind, \"main\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpolicyData, ok := policyObj.(*PolicyNamespaceData)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Can't cast object from store to PolicyData: %v\", policyObj)\n\t}\n\n\tpolicy := NewPolicyNamespace()\n\n\tkeys := make([]Key, 0, len(policyData.Objects))\n\tfor _, key := range policyData.Objects {\n\t\tkeys = append(keys, key)\n\t}\n\n\tobjects, err := reg.store.GetManyByKeys(keys)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Can't load objects for policy data %s: %s\", policyData.GetKey(), err)\n\t}\n\n\tfor _, obj := range objects {\n\t\tfmt.Println(\"Loaded object\")\n\t\tfmt.Println(obj)\n\t\tpolicy.AddObject(obj)\n\t}\n\n\treturn policy, nil\n}\n\n*\/\n<|endoftext|>"} {"text":"<commit_before>package libvirt\n\nimport (\n lv \"github.com\/libvirt\/libvirt-go\"\n \"github.com\/influxdata\/telegraf\"\n \"github.com\/influxdata\/telegraf\/plugins\/inputs\"\n)\n\nconst sampleConfig = `\n# specify a libvirt connection uri\nuri = \"qemu:\/\/\/system\"\n`\n\ntype Libvirt struct {\n Uri string\n}\n\nfunc (l *Libvirt) SampleConfig() string {\n return sampleConfig\n}\n\nfunc (l *Libvirt) Description() string {\n return \"Read domain infos from a libvirt deamon\"\n}\n\nfunc (l *Libvirt) Gather(acc telegraf.Accumulator) error {\n connection, err := lv.NewConnectReadOnly(l.Uri)\n if err != nil {\n return err\n }\n defer connection.Close()\n\n domains, err := connection.ListAllDomains(lv.CONNECT_LIST_DOMAINS_ACTIVE)\n if err != nil {\n return err\n }\n\n for _, domain := range domains {\n domainInfo, err := domain.GetInfo()\n if err != nil {\n return err\n }\n\n uid, err := domain.GetUUIDString()\n if err != nil {\n return err\n }\n tags := map[string]string{\"vm\": uid, \"cloud\": \"new\"}\n acc.AddFields(\"vm.cpu_time\", map[string]interface{}{\"value\": float64(domainInfo.CpuTime)} , tags)\n acc.AddFields(\"vm.max_mem\", map[string]interface{}{\"value\": float64(domainInfo.MaxMem)}, tags)\n acc.AddFields(\"vm.memory\", map[string]interface{}{\"value\": float64(domainInfo.Memory)}, tags)\n acc.AddFields(\"vm.nr_virt_cpu\", map[string]interface{}{\"value\": float64(domainInfo.NrVirtCpu)}, tags)\n\n GatherInterfaces(*connection, domain, acc, tags)\n\n GatherDisks(*connection, domain, acc, tags)\n domain.Free()\n }\n\n return nil\n}\n\nfunc GatherInterfaces(c lv.Connect, d lv.Domain, acc telegraf.Accumulator , tags map[string]string) error {\n domStat, err := c.GetAllDomainStats(\n []*lv.Domain{&d},\n lv.DOMAIN_STATS_INTERFACE,\n lv.CONNECT_GET_ALL_DOMAINS_STATS_ACTIVE,\n )\n if err != nil {\n return err\n }\n defer domStat[0].Domain.Free()\n for _, iface := range domStat[0].Net {\n tags[\"name\"] = iface.Name\n acc.AddFields(\"vm.interface.rx_bytes\", map[string]interface{}{\"value\": float64(iface.RxBytes)}, tags)\n acc.AddFields(\"vm.interface.rx_packets\", map[string]interface{}{\"value\": float64(iface.RxPkts)}, tags)\n acc.AddFields(\"vm.interface.rx_errs\", map[string]interface{}{\"value\": float64(iface.RxErrs)}, tags)\n acc.AddFields(\"vm.interface.rx_drop\", map[string]interface{}{\"value\": float64(iface.RxDrop)}, tags)\n acc.AddFields(\"vm.interface.tx_bytes\", map[string]interface{}{\"value\": float64(iface.TxBytes)}, tags)\n acc.AddFields(\"vm.interface.tx_packets\", map[string]interface{}{\"value\": float64(iface.TxPkts)}, tags)\n acc.AddFields(\"vm.interface.tx_errs\", map[string]interface{}{\"value\": float64(iface.TxErrs)}, tags)\n acc.AddFields(\"vm.interface.tx_drop\", map[string]interface{}{\"value\": float64(iface.TxDrop)}, tags)\n delete(tags, \"name\")\n }\n return nil\n}\n\nfunc GatherDisks(c lv.Connect, d lv.Domain, acc telegraf.Accumulator , tags map[string]string) error {\n domStats, err := c.GetAllDomainStats(\n []*lv.Domain{&d},\n lv.DOMAIN_STATS_BLOCK,\n lv.CONNECT_GET_ALL_DOMAINS_STATS_ACTIVE,\n )\n if err != nil {\n return err\n }\n defer domStats[0].Domain.Free()\n for _, disk := range domStats[0].Block {\n tags[\"name\"] = disk.Name\n acc.AddFields(\"vm.disk.rd_req\", map[string]interface{}{\"value\": float64(disk.RdReqs)}, tags)\n acc.AddFields(\"vm.disk.rd_bytes\", map[string]interface{}{\"value\": float64(disk.RdBytes)}, tags)\n acc.AddFields(\"vm.disk.wr_req\", map[string]interface{}{\"value\": float64(disk.WrReqs)}, tags)\n acc.AddFields(\"vm.disk.wr_bytes\", map[string]interface{}{\"value\": float64(disk.WrBytes)}, tags)\n acc.AddFields(\"vm.disk.errs\", map[string]interface{}{\"value\": float64(disk.Errors)}, tags)\n delete(tags, \"name\")\n }\n return nil\n}\n\n\nfunc init() {\n inputs.Add(\"libvirt\", func() telegraf.Input {\n return &Libvirt{}\n })\n}\n<commit_msg>different measurements<commit_after>package libvirt\n\nimport (\n lv \"github.com\/libvirt\/libvirt-go\"\n \"github.com\/influxdata\/telegraf\"\n \"github.com\/influxdata\/telegraf\/plugins\/inputs\"\n)\n\nconst sampleConfig = `\n# specify a libvirt connection uri\nuri = \"qemu:\/\/\/system\"\n`\n\ntype Libvirt struct {\n Uri string\n}\n\nfunc (l *Libvirt) SampleConfig() string {\n return sampleConfig\n}\n\nfunc (l *Libvirt) Description() string {\n return \"Read domain infos from a libvirt deamon\"\n}\n\nfunc (l *Libvirt) Gather(acc telegraf.Accumulator) error {\n connection, err := lv.NewConnectReadOnly(l.Uri)\n if err != nil {\n return err\n }\n defer connection.Close()\n\n domains, err := connection.ListAllDomains(lv.CONNECT_LIST_DOMAINS_ACTIVE)\n if err != nil {\n return err\n }\n\n for _, domain := range domains {\n domainInfo, err := domain.GetInfo()\n if err != nil {\n return err\n }\n\n uuid, err := domain.GetUUIDString()\n if err != nil {\n return err\n }\n fields := map[string]interface{}{\n \"vm\": uuid,\n }\n tags := make(map[string]string)\n fields[\"cpu_time\"] = domainInfo.CpuTime\n fields[\"nr_virt_cpu\"] = domainInfo.NrVirtCpu\n\n stats, err := domain.MemoryStats(10,0)\n if err != nil {\n return err\n }\n m := map[int32]string{\n int32(lv.DOMAIN_MEMORY_STAT_AVAILABLE): \"mem_max\",\n int32(lv.DOMAIN_MEMORY_STAT_USABLE): \"mem_free\",\n }\n for _, stat := range stats {\n if val, ok := m[stat.Tag]; ok {\n fields[val] = stat.Val\n }\n }\n acc.AddFields(\"vm.data\", fields, tags)\n\n GatherInterfaces(*connection, domain, acc, uuid)\n\n GatherDisks(*connection, domain, acc, uuid)\n\n domain.Free()\n }\n\n return nil\n}\n\nfunc GatherInterfaces(c lv.Connect, d lv.Domain, acc telegraf.Accumulator, uuid string) error {\n domStat, err := c.GetAllDomainStats(\n []*lv.Domain{&d},\n lv.DOMAIN_STATS_INTERFACE,\n lv.CONNECT_GET_ALL_DOMAINS_STATS_ACTIVE,\n )\n if err != nil {\n return err\n }\n defer domStat[0].Domain.Free()\n for _, iface := range domStat[0].Net {\n fields := map[string]interface{}{\n \"vm\" : uuid,\n \"rx_bytes\" : iface.RxBytes,\n \"rx_packets\" : iface.RxPkts,\n \"rx_errs\" : iface.RxErrs,\n \"rx_drop\" : iface.RxDrop,\n \"tx_bytes\" : iface.TxBytes,\n \"tx_packets\" : iface.TxPkts,\n \"tx_errs\" : iface.TxErrs,\n \"tx_drop\" : iface.TxDrop,\n }\n acc.AddFields(\"vm.data\", fields, map[string]string{\"interface\": iface.Name})\n }\n return nil\n}\n\nfunc GatherDisks(c lv.Connect, d lv.Domain, acc telegraf.Accumulator, uuid string) error {\n domStats, err := c.GetAllDomainStats(\n []*lv.Domain{&d},\n lv.DOMAIN_STATS_BLOCK,\n lv.CONNECT_GET_ALL_DOMAINS_STATS_ACTIVE,\n )\n if err != nil {\n return err\n }\n defer domStats[0].Domain.Free()\n for _, disk := range domStats[0].Block {\n fields := map[string]interface{}{\n \"vm\" : uuid,\n \"rd_req\" : disk.RdReqs,\n \"rd_bytes\" : disk.RdBytes,\n \"wr_req\" : disk.WrReqs,\n \"wr_bytes\" : disk.WrBytes,\n \"errs\" : disk.Errors,\n }\n acc.AddFields(\"vm.data\", fields, map[string]string{\"disk\": disk.Name})\n }\n return nil\n}\n\nfunc init() {\n inputs.Add(\"libvirt\", func() telegraf.Input {\n return &Libvirt{}\n })\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"encoding\/json\"\n \"fmt\"\n \"math\"\n \"net\"\n \"net\/http\"\n \"os\"\n \"strconv\"\n \"sync\/atomic\"\n \"time\"\n\n \"amproxy\/envparse\"\n \"amproxy\/message\"\n)\n\ntype counters struct {\n Connections uint64 `json:\"connections\"`\n BadSkew uint64 `json:\"badskew\"`\n BadSig uint64 `json:\"badsig\"`\n BadKeyundef uint64 `json:\"badkeyundef\"`\n BadDecompose uint64 `json:\"baddecompose\"`\n BadMetric uint64 `json:\"badmetric\"`\n BadCarbonconn uint64 `json:\"badcarbonconn\"`\n BadCarbonwrite uint64 `json:\"badcarbonwrite\"`\n GoodMetric uint64 `json:\"goodmetric\"`\n}\n\nvar cServerAddr *net.TCPAddr\nvar skew float64\nvar authMap map[string]Creds\nvar c counters\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n b, err := json.Marshal(c)\n if err == nil {\n fmt.Fprintf(w, \"%s\", b)\n }\n}\n\nfunc main() {\n var err error\n\n c.Connections = 0\n c.BadSkew = 0\n c.BadSig = 0\n c.BadKeyundef = 0\n c.BadDecompose = 0\n c.BadMetric = 0\n c.BadCarbonwrite = 0\n c.BadCarbonconn = 0\n c.GoodMetric = 0\n\n \/\/ Read config from the environment\n var bInterface = envparse.GetSettingStr(\"BIND_INTERFACE\", \"127.0.0.1\")\n var bPort = envparse.GetSettingInt(\"BIND_PORT\", 2005)\n var cServer = envparse.GetSettingStr(\"CARBON_SERVER\", \"localhost\")\n var cPort = envparse.GetSettingInt(\"CARBON_PORT\", 2003)\n var authFile = envparse.GetSettingStr(\"AUTH_FILE\", \"\")\n skew = float64(envparse.GetSettingInt(\"SKEW\", 300))\n\n if(authFile == \"\") {\n println(\"No auth file passed\")\n os.Exit(1)\n }\n\n authMap = loadUserConfigFile(authFile)\n\n cServerAddr, err = net.ResolveTCPAddr(\"tcp\", cServer + \":\" + strconv.Itoa(cPort))\n if err != nil {\n println(\"Unable to resolve carbon server: \", err.Error())\n os.Exit(1)\n }\n fmt.Printf(\"Carbon server: %s:%d\\n\", cServer, cPort)\n\n \/\/ Set up the metrics http server\n go func() {\n http.HandleFunc(\"\/\", handler)\n http.ListenAndServe(\":8080\", nil)\n }()\n\n \/\/ Listen for incoming connections.\n l, err := net.Listen(\"tcp\", bInterface + \":\" + strconv.Itoa(bPort))\n if err != nil {\n fmt.Println(\"Error listening:\", err.Error())\n os.Exit(1)\n }\n \/\/ Close the listener when the application closes.\n defer l.Close()\n fmt.Println(\"Listening on \" + bInterface + \":\" + strconv.Itoa(bPort))\n\n for {\n \/\/ Listen for an incoming connection.\n conn, err := l.Accept()\n if err != nil {\n fmt.Println(\"Error accepting: \", err.Error())\n os.Exit(1)\n }\n \/\/ Handle connections in a new goroutine.\n atomic.AddUint64(&c.Connections, 1)\n go handleRequest(conn)\n }\n}\n\nfunc handleRequest(conn net.Conn) {\n defer conn.Close()\n\n fmt.Println(\"Connection from: \", conn.RemoteAddr())\n\n \/\/ connect to carbon server\n carbon_conn, err := net.DialTCP(\"tcp\", nil, cServerAddr)\n if err != nil {\n atomic.AddUint64(&c.BadCarbonconn, 1)\n println(\"Connection to carbon server failed:\", err.Error())\n return\n }\n defer carbon_conn.Close()\n\n var buf [1024]byte\n for {\n n, err := conn.Read(buf[0:])\n if err != nil {\n return\n }\n\n fmt.Println(string(buf[:n]))\n msg := new(message.Message)\n e := msg.Decompose(string(buf[:n]))\n if e != nil {\n atomic.AddUint64(&c.BadDecompose, 1)\n fmt.Printf(\"Error decomposing message %q - %s\\n\", string(buf[:n]), e.Error())\n return\n }\n\n creds, ok := authMap[msg.Public_key]\n\n if !ok {\n atomic.AddUint64(&c.BadKeyundef, 1)\n fmt.Printf(\"key not defined for %s\\n\", msg.Public_key)\n return\n }\n\n sig := msg.ComputeSignature(creds.SecretKey)\n\n if sig != msg.Signature {\n atomic.AddUint64(&c.BadSig, 1)\n fmt.Printf(\"Computed signature %s doesn't match provided signature %s\\n\", sig, msg.Signature)\n return\n }\n\n delta := math.Abs(float64(time.Now().Unix() - int64(msg.Timestamp)))\n if delta > skew {\n atomic.AddUint64(&c.BadSkew, 1)\n fmt.Printf(\"delta = %.0f, max skew set to %.0f\\n\", delta, skew)\n return\n }\n\n \/\/ validate the metric is on the approved list\n _, ok = creds.Metrics[msg.Name]\n if !ok {\n atomic.AddUint64(&c.BadMetric, 1)\n fmt.Printf(\"not an approved metric: %s\", msg.Name)\n return\n }\n\n fmt.Println(msg.Public_key)\n fmt.Println(sig)\n\n _, err = carbon_conn.Write([]byte(msg.MetricStr() + \"\\n\"))\n if err != nil {\n atomic.AddUint64(&c.BadCarbonwrite, 1)\n println(\"Write to carbon server failed:\", err.Error())\n return\n }\n atomic.AddUint64(&c.GoodMetric, 1)\n\n \/\/ write the n bytes read\n _, err2 := conn.Write(buf[0:n])\n if err2 != nil {\n return\n }\n }\n}\n<commit_msg>set content type header<commit_after>package main\n\nimport (\n \"encoding\/json\"\n \"fmt\"\n \"math\"\n \"net\"\n \"net\/http\"\n \"os\"\n \"strconv\"\n \"sync\/atomic\"\n \"time\"\n\n \"amproxy\/envparse\"\n \"amproxy\/message\"\n)\n\ntype counters struct {\n Connections uint64 `json:\"connections\"`\n BadSkew uint64 `json:\"badskew\"`\n BadSig uint64 `json:\"badsig\"`\n BadKeyundef uint64 `json:\"badkeyundef\"`\n BadDecompose uint64 `json:\"baddecompose\"`\n BadMetric uint64 `json:\"badmetric\"`\n BadCarbonconn uint64 `json:\"badcarbonconn\"`\n BadCarbonwrite uint64 `json:\"badcarbonwrite\"`\n GoodMetric uint64 `json:\"goodmetric\"`\n}\n\nvar cServerAddr *net.TCPAddr\nvar skew float64\nvar authMap map[string]Creds\nvar c counters\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n b, err := json.Marshal(c)\n\n if err != nil {\n http.Error(w, err.Error(), http.StatusInternalServerError)\n return\n }\n\n w.Header().Set(\"Content-Type\", \"application\/json\")\n w.Write(b)\n}\n\nfunc main() {\n var err error\n\n c.Connections = 0\n c.BadSkew = 0\n c.BadSig = 0\n c.BadKeyundef = 0\n c.BadDecompose = 0\n c.BadMetric = 0\n c.BadCarbonwrite = 0\n c.BadCarbonconn = 0\n c.GoodMetric = 0\n\n \/\/ Read config from the environment\n var bInterface = envparse.GetSettingStr(\"BIND_INTERFACE\", \"127.0.0.1\")\n var bPort = envparse.GetSettingInt(\"BIND_PORT\", 2005)\n var cServer = envparse.GetSettingStr(\"CARBON_SERVER\", \"localhost\")\n var cPort = envparse.GetSettingInt(\"CARBON_PORT\", 2003)\n var authFile = envparse.GetSettingStr(\"AUTH_FILE\", \"\")\n skew = float64(envparse.GetSettingInt(\"SKEW\", 300))\n\n if(authFile == \"\") {\n println(\"No auth file passed\")\n os.Exit(1)\n }\n\n authMap = loadUserConfigFile(authFile)\n\n cServerAddr, err = net.ResolveTCPAddr(\"tcp\", cServer + \":\" + strconv.Itoa(cPort))\n if err != nil {\n println(\"Unable to resolve carbon server: \", err.Error())\n os.Exit(1)\n }\n fmt.Printf(\"Carbon server: %s:%d\\n\", cServer, cPort)\n\n \/\/ Set up the metrics http server\n go func() {\n http.HandleFunc(\"\/\", handler)\n http.ListenAndServe(\":8080\", nil)\n }()\n\n \/\/ Listen for incoming connections.\n l, err := net.Listen(\"tcp\", bInterface + \":\" + strconv.Itoa(bPort))\n if err != nil {\n fmt.Println(\"Error listening:\", err.Error())\n os.Exit(1)\n }\n \/\/ Close the listener when the application closes.\n defer l.Close()\n fmt.Println(\"Listening on \" + bInterface + \":\" + strconv.Itoa(bPort))\n\n for {\n \/\/ Listen for an incoming connection.\n conn, err := l.Accept()\n if err != nil {\n fmt.Println(\"Error accepting: \", err.Error())\n os.Exit(1)\n }\n \/\/ Handle connections in a new goroutine.\n atomic.AddUint64(&c.Connections, 1)\n go handleRequest(conn)\n }\n}\n\nfunc handleRequest(conn net.Conn) {\n defer conn.Close()\n\n fmt.Println(\"Connection from: \", conn.RemoteAddr())\n\n \/\/ connect to carbon server\n carbon_conn, err := net.DialTCP(\"tcp\", nil, cServerAddr)\n if err != nil {\n atomic.AddUint64(&c.BadCarbonconn, 1)\n println(\"Connection to carbon server failed:\", err.Error())\n return\n }\n defer carbon_conn.Close()\n\n var buf [1024]byte\n for {\n n, err := conn.Read(buf[0:])\n if err != nil {\n return\n }\n\n fmt.Println(string(buf[:n]))\n msg := new(message.Message)\n e := msg.Decompose(string(buf[:n]))\n if e != nil {\n atomic.AddUint64(&c.BadDecompose, 1)\n fmt.Printf(\"Error decomposing message %q - %s\\n\", string(buf[:n]), e.Error())\n return\n }\n\n creds, ok := authMap[msg.Public_key]\n\n if !ok {\n atomic.AddUint64(&c.BadKeyundef, 1)\n fmt.Printf(\"key not defined for %s\\n\", msg.Public_key)\n return\n }\n\n sig := msg.ComputeSignature(creds.SecretKey)\n\n if sig != msg.Signature {\n atomic.AddUint64(&c.BadSig, 1)\n fmt.Printf(\"Computed signature %s doesn't match provided signature %s\\n\", sig, msg.Signature)\n return\n }\n\n delta := math.Abs(float64(time.Now().Unix() - int64(msg.Timestamp)))\n if delta > skew {\n atomic.AddUint64(&c.BadSkew, 1)\n fmt.Printf(\"delta = %.0f, max skew set to %.0f\\n\", delta, skew)\n return\n }\n\n \/\/ validate the metric is on the approved list\n _, ok = creds.Metrics[msg.Name]\n if !ok {\n atomic.AddUint64(&c.BadMetric, 1)\n fmt.Printf(\"not an approved metric: %s\", msg.Name)\n return\n }\n\n fmt.Println(msg.Public_key)\n fmt.Println(sig)\n\n _, err = carbon_conn.Write([]byte(msg.MetricStr() + \"\\n\"))\n if err != nil {\n atomic.AddUint64(&c.BadCarbonwrite, 1)\n println(\"Write to carbon server failed:\", err.Error())\n return\n }\n atomic.AddUint64(&c.GoodMetric, 1)\n\n \/\/ write the n bytes read\n _, err2 := conn.Write(buf[0:n])\n if err2 != nil {\n return\n }\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package boshinit\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/enaml-ops\/enaml\"\n\t\"github.com\/enaml-ops\/enaml\/cloudproperties\/aws\"\n\t\"github.com\/enaml-ops\/omg-cli\/plugins\/products\/bosh-init\/enaml-gen\/aws_cpi\"\n)\n\nconst (\n\tawsCPIJobName = \"aws_cpi\"\n\tawsCPIReleaseName = \"bosh-aws-cpi\"\n)\n\ntype AWSInitConfig struct {\n\tAWSAvailabilityZone string\n\tAWSInstanceSize string\n\tAWSSubnet string\n\tAWSPEMFilePath string\n\tAWSAccessKeyID string\n\tAWSSecretKey string\n\tAWSRegion string\n\tAWSSecurityGroups []string\n\tAWSKeyName string\n}\n\ntype AWSBosh struct {\n\tcfg AWSInitConfig\n\tboshbase *BoshBase\n}\n\nfunc NewAWSIaaSProvider(cfg AWSInitConfig, boshBase *BoshBase) IAASManifestProvider {\n\tboshBase.CPIJobName = awsCPIJobName\n\treturn &AWSBosh{\n\t\tcfg: cfg,\n\t\tboshbase: boshBase,\n\t}\n}\n\nfunc GetAWSBoshBase() *BoshBase {\n\treturn &BoshBase{\n\t\tNetworkCIDR: \"10.0.0.0\/24\",\n\t\tNetworkGateway: \"10.0.0.1\",\n\t\tNetworkDNS: []string{\"10.0.0.2\"},\n\t\tBoshReleaseURL: \"https:\/\/bosh.io\/d\/github.com\/cloudfoundry\/bosh?v=257.3\",\n\t\tBoshReleaseSHA: \"e4442afcc64123e11f2b33cc2be799a0b59207d0\",\n\t\tCPIReleaseURL: \"https:\/\/bosh.io\/d\/github.com\/cloudfoundry-incubator\/bosh-aws-cpi-release?v=57\",\n\t\tCPIReleaseSHA: \"cbc7ed758f4a41063e9aee881bfc164292664b84\",\n\t\tGOAgentReleaseURL: \"https:\/\/bosh.io\/d\/stemcells\/bosh-aws-xen-hvm-ubuntu-trusty-go_agent?v=3262.7\",\n\t\tGOAgentSHA: \"bf44a5f81d29346af6a309199d2e012237dd222c\",\n\t\tPrivateIP: \"10.0.0.6\",\n\t\tNtpServers: []string{\"0.amazon.pool.ntp.org\", \"1.amazon.pool.ntp.org\", \"2.amazon.pool.ntp.org\", \"3.amazon.pool.ntp.org\"},\n\t\tCPIJobName: awsCPIJobName,\n\t\tPersistentDiskSize: 51200,\n\t}\n}\n\nfunc (s *AWSBosh) CreateDeploymentManifest() (*enaml.DeploymentManifest, error) {\n\tmanifest := s.boshbase.CreateDeploymentManifest()\n\tmanifest.AddRelease(s.CreateCPIRelease())\n\tif rp, err := s.CreateResourcePool(); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tmanifest.AddResourcePool(*rp)\n\t}\n\n\tmanifest.AddDiskPool(s.CreateDiskPool())\n\tmanifest.AddNetwork(s.CreateManualNetwork())\n\tmanifest.AddNetwork(s.CreateVIPNetwork())\n\tboshJob := manifest.Jobs[0]\n\tboshJob.AddTemplate(s.CreateCPITemplate())\n\tn := s.CreateJobNetwork()\n\tif n != nil {\n\t\tboshJob.AddNetwork(*n)\n\t}\n\tfor name, val := range s.CreateCPIJobProperties() {\n\t\tboshJob.AddProperty(name, val)\n\t}\n\tmanifest.Jobs[0] = boshJob\n\tmanifest.SetCloudProvider(s.CreateCloudProvider())\n\treturn manifest, nil\n}\n\nfunc (s *AWSBosh) resourcePoolCloudProperties() interface{} {\n\treturn awscloudproperties.ResourcePool{\n\t\tInstanceType: s.cfg.AWSInstanceSize,\n\t\tEphemeralDisk: awscloudproperties.EphemeralDisk{\n\t\t\tSize: s.boshbase.PersistentDiskSize,\n\t\t\tDiskType: \"gp2\",\n\t\t},\n\t\tAvailabilityZone: s.cfg.AWSAvailabilityZone,\n\t}\n}\nfunc (s *AWSBosh) CreateResourcePool() (*enaml.ResourcePool, error) {\n\treturn s.boshbase.CreateResourcePool(s.resourcePoolCloudProperties)\n}\n\nfunc (s *AWSBosh) CreateCPIRelease() enaml.Release {\n\treturn enaml.Release{\n\t\tName: awsCPIReleaseName,\n\t\tURL: s.boshbase.CPIReleaseURL,\n\t\tSHA1: s.boshbase.CPIReleaseSHA,\n\t}\n}\n\nfunc (s *AWSBosh) CreateDiskPool() enaml.DiskPool {\n\treturn enaml.DiskPool{\n\t\tName: \"disks\",\n\t\tDiskSize: s.boshbase.PersistentDiskSize,\n\t\tCloudProperties: awscloudproperties.RootDisk{\n\t\t\tDiskType: \"gp2\",\n\t\t},\n\t}\n}\n\nfunc (s *AWSBosh) CreateManualNetwork() (net enaml.ManualNetwork) {\n\tnet = enaml.NewManualNetwork(\"private\")\n\tnet.AddSubnet(enaml.Subnet{\n\t\tRange: s.boshbase.NetworkCIDR,\n\t\tGateway: s.boshbase.NetworkGateway,\n\t\tDNS: s.boshbase.NetworkDNS,\n\t\tStatic: []string{\n\t\t\ts.boshbase.PrivateIP,\n\t\t},\n\t\tCloudProperties: awscloudproperties.Network{\n\t\t\tSubnet: s.cfg.AWSSubnet,\n\t\t},\n\t})\n\treturn\n}\n\nfunc (s *AWSBosh) CreateJobNetwork() *enaml.Network {\n\tif s.boshbase.PublicIP != \"\" {\n\t\treturn &enaml.Network{\n\t\t\tName: \"public\",\n\t\t\tStaticIPs: []string{s.boshbase.PublicIP},\n\t\t}\n\t}\n\treturn &enaml.Network{\n\t\tName: \"public\",\n\t}\n}\n\nfunc (s *AWSBosh) CreateVIPNetwork() (net enaml.VIPNetwork) {\n\treturn enaml.NewVIPNetwork(\"public\")\n}\n\nfunc (s *AWSBosh) CreateCPIJobProperties() map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"aws\": &aws_cpi.Aws{\n\t\t\tAccessKeyId: s.cfg.AWSAccessKeyID,\n\t\t\tSecretAccessKey: s.cfg.AWSSecretKey,\n\t\t\tDefaultKeyName: s.cfg.AWSKeyName,\n\t\t\tDefaultSecurityGroups: s.cfg.AWSSecurityGroups,\n\t\t\tRegion: s.cfg.AWSRegion,\n\t\t},\n\t\t\"agent\": &aws_cpi.Agent{\n\t\t\tMbus: fmt.Sprintf(\"nats:\/\/nats:%s@%s:4222\", s.boshbase.NatsPassword, s.boshbase.PrivateIP),\n\t\t},\n\t}\n}\n\nfunc (s *AWSBosh) CreateCPITemplate() (template enaml.Template) {\n\treturn enaml.Template{\n\t\tName: s.boshbase.CPIJobName,\n\t\tRelease: awsCPIReleaseName,\n\t}\n}\n\nfunc (s *AWSBosh) CreateCloudProvider() (provider enaml.CloudProvider) {\n\treturn enaml.CloudProvider{\n\t\tTemplate: enaml.Template{\n\t\t\tName: s.boshbase.CPIJobName,\n\t\t\tRelease: awsCPIReleaseName,\n\t\t},\n\t\tMBus: fmt.Sprintf(\"https:\/\/mbus:%s@%s:6868\", s.boshbase.MBusPassword, s.boshbase.GetRoutableIP()),\n\t\tSSHTunnel: enaml.SSHTunnel{\n\t\t\tHost: s.boshbase.GetRoutableIP(),\n\t\t\tPort: 22,\n\t\t\tUser: \"vcap\",\n\t\t\tPrivateKeyPath: s.cfg.AWSPEMFilePath,\n\t\t},\n\t\tProperties: &aws_cpi.AwsCpiJob{\n\t\t\tAws: &aws_cpi.Aws{\n\t\t\t\tAccessKeyId: s.cfg.AWSAccessKeyID,\n\t\t\t\tSecretAccessKey: s.cfg.AWSSecretKey,\n\t\t\t\tDefaultKeyName: s.cfg.AWSKeyName,\n\t\t\t\tDefaultSecurityGroups: s.cfg.AWSSecurityGroups,\n\t\t\t\tRegion: s.cfg.AWSRegion,\n\t\t\t},\n\t\t\tNtp: s.boshbase.NtpServers,\n\t\t\tAgent: &aws_cpi.Agent{\n\t\t\t\tMbus: fmt.Sprintf(\"https:\/\/mbus:%s@0.0.0.0:6868\", s.boshbase.MBusPassword),\n\t\t\t},\n\t\t\tBlobstore: &aws_cpi.Blobstore{\n\t\t\t\tProvider: \"local\",\n\t\t\t\tPath: \"\/var\/vcap\/micro_bosh\/data\/cache\",\n\t\t\t},\n\t\t},\n\t}\n}\n<commit_msg>[#134103585] bosh release, cpi and stemcell versions<commit_after>package boshinit\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/enaml-ops\/enaml\"\n\t\"github.com\/enaml-ops\/enaml\/cloudproperties\/aws\"\n\t\"github.com\/enaml-ops\/omg-cli\/plugins\/products\/bosh-init\/enaml-gen\/aws_cpi\"\n)\n\nconst (\n\tawsCPIJobName = \"aws_cpi\"\n\tawsCPIReleaseName = \"bosh-aws-cpi\"\n)\n\ntype AWSInitConfig struct {\n\tAWSAvailabilityZone string\n\tAWSInstanceSize string\n\tAWSSubnet string\n\tAWSPEMFilePath string\n\tAWSAccessKeyID string\n\tAWSSecretKey string\n\tAWSRegion string\n\tAWSSecurityGroups []string\n\tAWSKeyName string\n}\n\ntype AWSBosh struct {\n\tcfg AWSInitConfig\n\tboshbase *BoshBase\n}\n\nfunc NewAWSIaaSProvider(cfg AWSInitConfig, boshBase *BoshBase) IAASManifestProvider {\n\tboshBase.CPIJobName = awsCPIJobName\n\treturn &AWSBosh{\n\t\tcfg: cfg,\n\t\tboshbase: boshBase,\n\t}\n}\n\nfunc GetAWSBoshBase() *BoshBase {\n\treturn &BoshBase{\n\t\tNetworkCIDR: \"10.0.0.0\/24\",\n\t\tNetworkGateway: \"10.0.0.1\",\n\t\tNetworkDNS: []string{\"10.0.0.2\"},\n\t\tBoshReleaseURL: \"https:\/\/bosh.io\/d\/github.com\/cloudfoundry\/bosh?v=260\",\n\t\tBoshReleaseSHA: \"f8f086974d9769263078fb6cb7927655744dacbc\",\n\t\tCPIReleaseURL: \"https:\/\/bosh.io\/d\/github.com\/cloudfoundry-incubator\/bosh-aws-cpi-release?v=60\",\n\t\tCPIReleaseSHA: \"8e40a9ff892204007889037f094a1b0d23777058\",\n\t\tGOAgentReleaseURL: \"https:\/\/bosh.io\/d\/stemcells\/bosh-aws-xen-hvm-ubuntu-trusty-go_agent?v=3263.10\",\n\t\tGOAgentSHA: \"42ea4577caec4aec463bed951cfdffb935961270\",\n\t\tPrivateIP: \"10.0.0.6\",\n\t\tNtpServers: []string{\"0.amazon.pool.ntp.org\", \"1.amazon.pool.ntp.org\", \"2.amazon.pool.ntp.org\", \"3.amazon.pool.ntp.org\"},\n\t\tCPIJobName: awsCPIJobName,\n\t\tPersistentDiskSize: 51200,\n\t}\n}\n\nfunc (s *AWSBosh) CreateDeploymentManifest() (*enaml.DeploymentManifest, error) {\n\tmanifest := s.boshbase.CreateDeploymentManifest()\n\tmanifest.AddRelease(s.CreateCPIRelease())\n\tif rp, err := s.CreateResourcePool(); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tmanifest.AddResourcePool(*rp)\n\t}\n\n\tmanifest.AddDiskPool(s.CreateDiskPool())\n\tmanifest.AddNetwork(s.CreateManualNetwork())\n\tmanifest.AddNetwork(s.CreateVIPNetwork())\n\tboshJob := manifest.Jobs[0]\n\tboshJob.AddTemplate(s.CreateCPITemplate())\n\tn := s.CreateJobNetwork()\n\tif n != nil {\n\t\tboshJob.AddNetwork(*n)\n\t}\n\tfor name, val := range s.CreateCPIJobProperties() {\n\t\tboshJob.AddProperty(name, val)\n\t}\n\tmanifest.Jobs[0] = boshJob\n\tmanifest.SetCloudProvider(s.CreateCloudProvider())\n\treturn manifest, nil\n}\n\nfunc (s *AWSBosh) resourcePoolCloudProperties() interface{} {\n\treturn awscloudproperties.ResourcePool{\n\t\tInstanceType: s.cfg.AWSInstanceSize,\n\t\tEphemeralDisk: awscloudproperties.EphemeralDisk{\n\t\t\tSize: s.boshbase.PersistentDiskSize,\n\t\t\tDiskType: \"gp2\",\n\t\t},\n\t\tAvailabilityZone: s.cfg.AWSAvailabilityZone,\n\t}\n}\nfunc (s *AWSBosh) CreateResourcePool() (*enaml.ResourcePool, error) {\n\treturn s.boshbase.CreateResourcePool(s.resourcePoolCloudProperties)\n}\n\nfunc (s *AWSBosh) CreateCPIRelease() enaml.Release {\n\treturn enaml.Release{\n\t\tName: awsCPIReleaseName,\n\t\tURL: s.boshbase.CPIReleaseURL,\n\t\tSHA1: s.boshbase.CPIReleaseSHA,\n\t}\n}\n\nfunc (s *AWSBosh) CreateDiskPool() enaml.DiskPool {\n\treturn enaml.DiskPool{\n\t\tName: \"disks\",\n\t\tDiskSize: s.boshbase.PersistentDiskSize,\n\t\tCloudProperties: awscloudproperties.RootDisk{\n\t\t\tDiskType: \"gp2\",\n\t\t},\n\t}\n}\n\nfunc (s *AWSBosh) CreateManualNetwork() (net enaml.ManualNetwork) {\n\tnet = enaml.NewManualNetwork(\"private\")\n\tnet.AddSubnet(enaml.Subnet{\n\t\tRange: s.boshbase.NetworkCIDR,\n\t\tGateway: s.boshbase.NetworkGateway,\n\t\tDNS: s.boshbase.NetworkDNS,\n\t\tStatic: []string{\n\t\t\ts.boshbase.PrivateIP,\n\t\t},\n\t\tCloudProperties: awscloudproperties.Network{\n\t\t\tSubnet: s.cfg.AWSSubnet,\n\t\t},\n\t})\n\treturn\n}\n\nfunc (s *AWSBosh) CreateJobNetwork() *enaml.Network {\n\tif s.boshbase.PublicIP != \"\" {\n\t\treturn &enaml.Network{\n\t\t\tName: \"public\",\n\t\t\tStaticIPs: []string{s.boshbase.PublicIP},\n\t\t}\n\t}\n\treturn &enaml.Network{\n\t\tName: \"public\",\n\t}\n}\n\nfunc (s *AWSBosh) CreateVIPNetwork() (net enaml.VIPNetwork) {\n\treturn enaml.NewVIPNetwork(\"public\")\n}\n\nfunc (s *AWSBosh) CreateCPIJobProperties() map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"aws\": &aws_cpi.Aws{\n\t\t\tAccessKeyId: s.cfg.AWSAccessKeyID,\n\t\t\tSecretAccessKey: s.cfg.AWSSecretKey,\n\t\t\tDefaultKeyName: s.cfg.AWSKeyName,\n\t\t\tDefaultSecurityGroups: s.cfg.AWSSecurityGroups,\n\t\t\tRegion: s.cfg.AWSRegion,\n\t\t},\n\t\t\"agent\": &aws_cpi.Agent{\n\t\t\tMbus: fmt.Sprintf(\"nats:\/\/nats:%s@%s:4222\", s.boshbase.NatsPassword, s.boshbase.PrivateIP),\n\t\t},\n\t}\n}\n\nfunc (s *AWSBosh) CreateCPITemplate() (template enaml.Template) {\n\treturn enaml.Template{\n\t\tName: s.boshbase.CPIJobName,\n\t\tRelease: awsCPIReleaseName,\n\t}\n}\n\nfunc (s *AWSBosh) CreateCloudProvider() (provider enaml.CloudProvider) {\n\treturn enaml.CloudProvider{\n\t\tTemplate: enaml.Template{\n\t\t\tName: s.boshbase.CPIJobName,\n\t\t\tRelease: awsCPIReleaseName,\n\t\t},\n\t\tMBus: fmt.Sprintf(\"https:\/\/mbus:%s@%s:6868\", s.boshbase.MBusPassword, s.boshbase.GetRoutableIP()),\n\t\tSSHTunnel: enaml.SSHTunnel{\n\t\t\tHost: s.boshbase.GetRoutableIP(),\n\t\t\tPort: 22,\n\t\t\tUser: \"vcap\",\n\t\t\tPrivateKeyPath: s.cfg.AWSPEMFilePath,\n\t\t},\n\t\tProperties: &aws_cpi.AwsCpiJob{\n\t\t\tAws: &aws_cpi.Aws{\n\t\t\t\tAccessKeyId: s.cfg.AWSAccessKeyID,\n\t\t\t\tSecretAccessKey: s.cfg.AWSSecretKey,\n\t\t\t\tDefaultKeyName: s.cfg.AWSKeyName,\n\t\t\t\tDefaultSecurityGroups: s.cfg.AWSSecurityGroups,\n\t\t\t\tRegion: s.cfg.AWSRegion,\n\t\t\t},\n\t\t\tNtp: s.boshbase.NtpServers,\n\t\t\tAgent: &aws_cpi.Agent{\n\t\t\t\tMbus: fmt.Sprintf(\"https:\/\/mbus:%s@0.0.0.0:6868\", s.boshbase.MBusPassword),\n\t\t\t},\n\t\t\tBlobstore: &aws_cpi.Blobstore{\n\t\t\t\tProvider: \"local\",\n\t\t\t\tPath: \"\/var\/vcap\/micro_bosh\/data\/cache\",\n\t\t\t},\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage lxc\n\nimport (\n\t\"bytes\"\n\t\"github.com\/globocom\/commandmocker\"\n\t\"github.com\/globocom\/config\"\n\tetesting \"github.com\/globocom\/tsuru\/exec\/testing\"\n\tfstesting \"github.com\/globocom\/tsuru\/fs\/testing\"\n\t\"github.com\/globocom\/tsuru\/provision\"\n\trtesting \"github.com\/globocom\/tsuru\/router\/testing\"\n\t\"github.com\/globocom\/tsuru\/testing\"\n\t\"io\/ioutil\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"launchpad.net\/gocheck\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n)\n\nfunc (s *S) TestShouldBeRegistered(c *gocheck.C) {\n\tp, err := provision.Get(\"lxc\")\n\tc.Assert(err, gocheck.IsNil)\n\tc.Assert(p, gocheck.FitsTypeOf, &LXCProvisioner{})\n}\n\nfunc (s *S) TestProvisionerProvision(c *gocheck.C) {\n\tfexec := &etesting.FakeExecutor{}\n\texecut = fexec\n\tdefer func() {\n\t\texecut = nil\n\t}()\n\tln, err := net.Listen(\"tcp\", \":2222\")\n\tc.Assert(err, gocheck.IsNil)\n\tdefer ln.Close()\n\tformulasPath := \"\/home\/ubuntu\/formulas\"\n\tconfig.Set(\"lxc:formulas-path\", formulasPath)\n\tconfig.Set(\"lxc:ip-timeout\", 5)\n\tconfig.Set(\"lxc:ssh-port\", 2222)\n\tconfig.Set(\"lxc:authorized-key-path\", \"somepath\")\n\trfs := &fstesting.RecordingFs{}\n\tfsystem = rfs\n\tdefer func() {\n\t\tfsystem = nil\n\t}()\n\tf, _ := os.Open(\"testdata\/dnsmasq.leases\")\n\tdata, err := ioutil.ReadAll(f)\n\tc.Assert(err, gocheck.IsNil)\n\tfile, err := rfs.Create(\"\/var\/lib\/misc\/dnsmasq.leases\")\n\tc.Assert(err, gocheck.IsNil)\n\t_, err = file.Write(data)\n\tc.Assert(err, gocheck.IsNil)\n\tvar p LXCProvisioner\n\tapp := testing.NewFakeApp(\"myapp\", \"python\", 0)\n\tdefer p.collection().Remove(bson.M{\"name\": \"myapp\"})\n\tc.Assert(p.Provision(app), gocheck.IsNil)\n\tok := make(chan bool, 1)\n\tgo func() {\n\t\tfor {\n\t\t\tcoll := s.conn.Collection(s.collName)\n\t\t\tct, err := coll.Find(bson.M{\"name\": \"myapp\", \"status\": provision.StatusStarted}).Count()\n\t\t\tif err != nil {\n\t\t\t\tc.Fatal(err)\n\t\t\t}\n\t\t\tif ct > 0 {\n\t\t\t\tok <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t\truntime.Gosched()\n\t\t}\n\t}()\n\tselect {\n\tcase <-ok:\n\tcase <-time.After(10e9):\n\t\tc.Fatal(\"Timed out waiting for the container to be provisioned (10 seconds)\")\n\t}\n\targs := []string{\"lxc-create\", \"-t\", \"ubuntu-cloud\", \"-n\", \"myapp\", \"--\", \"-S\", \"somepath\"}\n\tc.Assert(fexec.ExecutedCmd(\"sudo\", args), gocheck.Equals, true)\n\targs = []string{\"lxc-start\", \"--daemon\", \"-n\", \"myapp\"}\n\tc.Assert(fexec.ExecutedCmd(\"sudo\", args), gocheck.Equals, true)\n\tr, err := p.router()\n\tc.Assert(err, gocheck.IsNil)\n\tfk := r.(*rtesting.FakeRouter)\n\tc.Assert(fk.HasRoute(\"myapp\"), gocheck.Equals, true)\n\tvar unit provision.Unit\n\terr = s.conn.Collection(s.collName).Find(bson.M{\"name\": \"myapp\"}).One(&unit)\n\tc.Assert(err, gocheck.IsNil)\n\tc.Assert(unit.Ip, gocheck.Equals, \"127.0.0.1\")\n}\n\nfunc (s *S) TestProvisionerRestart(c *gocheck.C) {\n\tfexec := &etesting.FakeExecutor{}\n\texecut = fexec\n\tdefer func() {\n\t\texecut = nil\n\t}()\n\tvar p LXCProvisioner\n\tapp := testing.NewFakeApp(\"almah\", \"static\", 1)\n\terr := p.Restart(app)\n\tc.Assert(err, gocheck.IsNil)\n\targs := []string{\"-l\", \"ubuntu\", \"-q\", \"-o\", \"StrictHostKeyChecking no\", \"10.10.10.1\", \"\/var\/lib\/tsuru\/hooks\/restart\"}\n\tc.Assert(fexec.ExecutedCmd(\"ssh\", args), gocheck.Equals, true)\n}\n\n\/\/ func (s *S) TestProvisionerRestartFailure(c *gocheck.C) {\n\/\/ \tapp := testing.NewFakeApp(\"cribcaged\", \"python\", 1)\n\/\/ \tp := LXCProvisioner{}\n\/\/ \terr := p.Restart(app)\n\/\/ \tc.Assert(err, gocheck.NotNil)\n\/\/ \t_, ok := err.(*provision.Error)\n\/\/ \tc.Assert(ok, gocheck.Equals, true)\n\/\/ }\n\nfunc (s *S) TestDeploy(c *gocheck.C) {\n\ttmpdir, err := commandmocker.Add(\"ssh\", \"\")\n\tc.Assert(err, gocheck.IsNil)\n\tdefer commandmocker.Remove(tmpdir)\n\tconfig.Set(\"git:unit-repo\", \"test\/dir\")\n\tconfig.Set(\"git:host\", \"gandalf.com\")\n\tdefer func() {\n\t\tconfig.Unset(\"git:unit-repo\")\n\t\tconfig.Unset(\"git:host\")\n\t}()\n\tapp := testing.NewFakeApp(\"cribcaged\", \"python\", 1)\n\tw := &bytes.Buffer{}\n\tp := LXCProvisioner{}\n\terr = p.Deploy(app, w)\n\tc.Assert(err, gocheck.IsNil)\n}\n\nfunc (s *S) TestProvisionerDestroy(c *gocheck.C) {\n\tfexec := &etesting.FakeExecutor{}\n\texecut = fexec\n\tdefer func() {\n\t\texecut = nil\n\t}()\n\tvar p LXCProvisioner\n\tapp := testing.NewFakeApp(\"myapp\", \"python\", 1)\n\tu := provision.Unit{\n\t\tName: \"myapp\",\n\t\tStatus: provision.StatusStarted,\n\t}\n\terr := s.conn.Collection(s.collName).Insert(&u)\n\tc.Assert(err, gocheck.IsNil)\n\tc.Assert(p.Destroy(app), gocheck.IsNil)\n\tok := make(chan bool, 1)\n\tgo func() {\n\t\tfor {\n\t\t\tcoll := s.conn.Collection(s.collName)\n\t\t\tct, err := coll.Find(bson.M{\"name\": \"myapp\", \"status\": provision.StatusStarted}).Count()\n\t\t\tif err != nil {\n\t\t\t\tc.Fatal(err)\n\t\t\t}\n\t\t\tif ct == 0 {\n\t\t\t\tok <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t\truntime.Gosched()\n\t\t}\n\t}()\n\tselect {\n\tcase <-ok:\n\tcase <-time.After(10e9):\n\t\tc.Fatal(\"Timed out waiting for the container to be provisioned (10 seconds)\")\n\t}\n\tc.Assert(err, gocheck.IsNil)\n\targs := []string{\"lxc-stop\", \"-n\", \"myapp\"}\n\tc.Assert(fexec.ExecutedCmd(\"sudo\", args), gocheck.Equals, true)\n\targs = []string{\"lxc-destroy\", \"-n\", \"myapp\"}\n\tc.Assert(fexec.ExecutedCmd(\"sudo\", args), gocheck.Equals, true)\n\tlength, err := p.collection().Find(bson.M{\"name\": \"myapp\"}).Count()\n\tc.Assert(err, gocheck.IsNil)\n\tc.Assert(length, gocheck.Equals, 0)\n}\n\nfunc (s *S) TestProvisionerAddr(c *gocheck.C) {\n\tconfig.Set(\"router\", \"fake\")\n\tvar p LXCProvisioner\n\tapp := testing.NewFakeApp(\"myapp\", \"python\", 1)\n\tr, err := p.router()\n\tc.Assert(err, gocheck.IsNil)\n\tr.AddRoute(\"myapp\", \"http:\/\/10.10.10.10\")\n\ta, err := r.Addr(app.GetName())\n\tc.Assert(err, gocheck.IsNil)\n\taddr, err := p.Addr(app)\n\tc.Assert(err, gocheck.IsNil)\n\tc.Assert(addr, gocheck.Equals, a)\n}\n\nfunc (s *S) TestProvisionerAddUnits(c *gocheck.C) {\n\tvar p LXCProvisioner\n\tapp := testing.NewFakeApp(\"myapp\", \"python\", 0)\n\tunits, err := p.AddUnits(app, 2)\n\tc.Assert(err, gocheck.IsNil)\n\tc.Assert(units, gocheck.DeepEquals, []provision.Unit{})\n}\n\nfunc (s *S) TestProvisionerRemoveUnit(c *gocheck.C) {\n\tvar p LXCProvisioner\n\tapp := testing.NewFakeApp(\"myapp\", \"python\", 0)\n\terr := p.RemoveUnit(app, \"\")\n\tc.Assert(err, gocheck.IsNil)\n}\n\nfunc (s *S) TestProvisionerInstallDepsExecutesHook(c *gocheck.C) {\n\tvar p LXCProvisioner\n\tapp := testing.NewFakeApp(\"myapp\", \"python\", 0)\n\tw := &bytes.Buffer{}\n\terr := p.InstallDeps(app, w)\n\tc.Assert(err, gocheck.IsNil)\n\texpected := []string{\"ran \/var\/lib\/tsuru\/hooks\/dependencies\"}\n\tc.Assert(app.Commands, gocheck.DeepEquals, expected)\n}\n\nfunc (s *S) TestProvisionerExecuteCommand(c *gocheck.C) {\n\tfexec := &etesting.FakeExecutor{}\n\texecut = fexec\n\tdefer func() {\n\t\texecut = nil\n\t}()\n\tvar p LXCProvisioner\n\tvar buf bytes.Buffer\n\tapp := testing.NewFakeApp(\"almah\", \"static\", 2)\n\terr := p.ExecuteCommand(&buf, &buf, app, \"ls\", \"-lh\")\n\tc.Assert(err, gocheck.IsNil)\n\targs := []string{\"-l\", \"ubuntu\", \"-q\", \"-o\", \"StrictHostKeyChecking no\", \"10.10.10.1\", \"ls\", \"-lh\"}\n\tc.Assert(fexec.ExecutedCmd(\"ssh\", args), gocheck.Equals, true)\n}\n\nfunc (s *S) TestCollectStatus(c *gocheck.C) {\n\tvar p LXCProvisioner\n\texpected := []provision.Unit{\n\t\t{\n\t\t\tName: \"vm1\",\n\t\t\tAppName: \"vm1\",\n\t\t\tType: \"django\",\n\t\t\tMachine: 0,\n\t\t\tInstanceId: \"vm1\",\n\t\t\tIp: \"10.10.10.9\",\n\t\t\tStatus: provision.StatusStarted,\n\t\t},\n\t\t{\n\t\t\tName: \"vm2\",\n\t\t\tAppName: \"vm2\",\n\t\t\tType: \"gunicorn\",\n\t\t\tMachine: 0,\n\t\t\tInstanceId: \"vm2\",\n\t\t\tIp: \"10.10.10.10\",\n\t\t\tStatus: provision.StatusInstalling,\n\t\t},\n\t}\n\tfor _, u := range expected {\n\t\terr := p.collection().Insert(u)\n\t\tc.Assert(err, gocheck.IsNil)\n\t}\n\tunits, err := p.CollectStatus()\n\tc.Assert(err, gocheck.IsNil)\n\tc.Assert(units, gocheck.DeepEquals, expected)\n}\n\nfunc (s *S) TestProvisionCollection(c *gocheck.C) {\n\tvar p LXCProvisioner\n\tcollection := p.collection()\n\tc.Assert(collection.Name, gocheck.Equals, s.collName)\n}\n\nfunc (s *S) TestProvisionInstall(c *gocheck.C) {\n\tfexec := &etesting.FakeExecutor{}\n\texecut = fexec\n\tdefer func() {\n\t\texecut = nil\n\t}()\n\tp := LXCProvisioner{}\n\terr := p.install(\"10.10.10.10\")\n\tc.Assert(err, gocheck.IsNil)\n\tcmd := \"ssh\"\n\targs := []string{\n\t\t\"-q\",\n\t\t\"-o\",\n\t\t\"StrictHostKeyChecking no\",\n\t\t\"-l\",\n\t\t\"ubuntu\",\n\t\t\"10.10.10.10\",\n\t\t\"sudo \/var\/lib\/tsuru\/hooks\/install\",\n\t}\n\tc.Assert(fexec.ExecutedCmd(cmd, args), gocheck.Equals, true)\n}\n\nfunc (s *S) TestProvisionStart(c *gocheck.C) {\n\tfexec := &etesting.FakeExecutor{}\n\texecut = fexec\n\tdefer func() {\n\t\texecut = nil\n\t}()\n\tp := LXCProvisioner{}\n\terr := p.start(\"10.10.10.10\")\n\tc.Assert(err, gocheck.IsNil)\n\tcmd := \"ssh\"\n\targs := []string{\n\t\t\"-q\",\n\t\t\"-o\",\n\t\t\"StrictHostKeyChecking no\",\n\t\t\"-l\",\n\t\t\"ubuntu\",\n\t\t\"10.10.10.10\",\n\t\t\"sudo \/var\/lib\/tsuru\/hooks\/start\",\n\t}\n\tc.Assert(fexec.ExecutedCmd(cmd, args), gocheck.Equals, true)\n}\n\nfunc (s *S) TestProvisionSetup(c *gocheck.C) {\n\tfexec := &etesting.FakeExecutor{}\n\texecut = fexec\n\tdefer func() {\n\t\texecut = nil\n\t}()\n\tp := LXCProvisioner{}\n\tformulasPath := \"\/home\/ubuntu\/formulas\"\n\tconfig.Set(\"lxc:formulas-path\", formulasPath)\n\terr := p.setup(\"10.10.10.10\", \"static\")\n\tc.Assert(err, gocheck.IsNil)\n\tcmd := \"scp\"\n\targs := []string{\n\t\t\"-q\",\n\t\t\"-o\",\n\t\t\"StrictHostKeyChecking no\",\n\t\t\"-r\",\n\t\tformulasPath + \"\/static\/hooks\",\n\t\t\"ubuntu@10.10.10.10:\/var\/lib\/tsuru\",\n\t}\n\tc.Assert(fexec.ExecutedCmd(cmd, args), gocheck.Equals, true)\n\tcmd = \"ssh\"\n\targs = []string{\n\t\t\"-q\",\n\t\t\"-o\",\n\t\t\"StrictHostKeyChecking no\",\n\t\t\"-l\",\n\t\t\"ubuntu\",\n\t\t\"10.10.10.10\",\n\t\t\"sudo mkdir -p \/var\/lib\/tsuru\/hooks\",\n\t}\n\tc.Assert(fexec.ExecutedCmd(cmd, args), gocheck.Equals, true)\n\targs = []string{\n\t\t\"-q\",\n\t\t\"-o\",\n\t\t\"StrictHostKeyChecking no\",\n\t\t\"-l\",\n\t\t\"ubuntu\",\n\t\t\"10.10.10.10\",\n\t\t\"sudo chown -R ubuntu \/var\/lib\/tsuru\/hooks\",\n\t}\n\tc.Assert(fexec.ExecutedCmd(cmd, args), gocheck.Equals, true)\n}\n<commit_msg>provision\/lxc: remove commented test<commit_after>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage lxc\n\nimport (\n\t\"bytes\"\n\t\"github.com\/globocom\/commandmocker\"\n\t\"github.com\/globocom\/config\"\n\tetesting \"github.com\/globocom\/tsuru\/exec\/testing\"\n\tfstesting \"github.com\/globocom\/tsuru\/fs\/testing\"\n\t\"github.com\/globocom\/tsuru\/provision\"\n\trtesting \"github.com\/globocom\/tsuru\/router\/testing\"\n\t\"github.com\/globocom\/tsuru\/testing\"\n\t\"io\/ioutil\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"launchpad.net\/gocheck\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n)\n\nfunc (s *S) TestShouldBeRegistered(c *gocheck.C) {\n\tp, err := provision.Get(\"lxc\")\n\tc.Assert(err, gocheck.IsNil)\n\tc.Assert(p, gocheck.FitsTypeOf, &LXCProvisioner{})\n}\n\nfunc (s *S) TestProvisionerProvision(c *gocheck.C) {\n\tfexec := &etesting.FakeExecutor{}\n\texecut = fexec\n\tdefer func() {\n\t\texecut = nil\n\t}()\n\tln, err := net.Listen(\"tcp\", \":2222\")\n\tc.Assert(err, gocheck.IsNil)\n\tdefer ln.Close()\n\tformulasPath := \"\/home\/ubuntu\/formulas\"\n\tconfig.Set(\"lxc:formulas-path\", formulasPath)\n\tconfig.Set(\"lxc:ip-timeout\", 5)\n\tconfig.Set(\"lxc:ssh-port\", 2222)\n\tconfig.Set(\"lxc:authorized-key-path\", \"somepath\")\n\trfs := &fstesting.RecordingFs{}\n\tfsystem = rfs\n\tdefer func() {\n\t\tfsystem = nil\n\t}()\n\tf, _ := os.Open(\"testdata\/dnsmasq.leases\")\n\tdata, err := ioutil.ReadAll(f)\n\tc.Assert(err, gocheck.IsNil)\n\tfile, err := rfs.Create(\"\/var\/lib\/misc\/dnsmasq.leases\")\n\tc.Assert(err, gocheck.IsNil)\n\t_, err = file.Write(data)\n\tc.Assert(err, gocheck.IsNil)\n\tvar p LXCProvisioner\n\tapp := testing.NewFakeApp(\"myapp\", \"python\", 0)\n\tdefer p.collection().Remove(bson.M{\"name\": \"myapp\"})\n\tc.Assert(p.Provision(app), gocheck.IsNil)\n\tok := make(chan bool, 1)\n\tgo func() {\n\t\tfor {\n\t\t\tcoll := s.conn.Collection(s.collName)\n\t\t\tct, err := coll.Find(bson.M{\"name\": \"myapp\", \"status\": provision.StatusStarted}).Count()\n\t\t\tif err != nil {\n\t\t\t\tc.Fatal(err)\n\t\t\t}\n\t\t\tif ct > 0 {\n\t\t\t\tok <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t\truntime.Gosched()\n\t\t}\n\t}()\n\tselect {\n\tcase <-ok:\n\tcase <-time.After(10e9):\n\t\tc.Fatal(\"Timed out waiting for the container to be provisioned (10 seconds)\")\n\t}\n\targs := []string{\"lxc-create\", \"-t\", \"ubuntu-cloud\", \"-n\", \"myapp\", \"--\", \"-S\", \"somepath\"}\n\tc.Assert(fexec.ExecutedCmd(\"sudo\", args), gocheck.Equals, true)\n\targs = []string{\"lxc-start\", \"--daemon\", \"-n\", \"myapp\"}\n\tc.Assert(fexec.ExecutedCmd(\"sudo\", args), gocheck.Equals, true)\n\tr, err := p.router()\n\tc.Assert(err, gocheck.IsNil)\n\tfk := r.(*rtesting.FakeRouter)\n\tc.Assert(fk.HasRoute(\"myapp\"), gocheck.Equals, true)\n\tvar unit provision.Unit\n\terr = s.conn.Collection(s.collName).Find(bson.M{\"name\": \"myapp\"}).One(&unit)\n\tc.Assert(err, gocheck.IsNil)\n\tc.Assert(unit.Ip, gocheck.Equals, \"127.0.0.1\")\n}\n\nfunc (s *S) TestProvisionerRestart(c *gocheck.C) {\n\tfexec := &etesting.FakeExecutor{}\n\texecut = fexec\n\tdefer func() {\n\t\texecut = nil\n\t}()\n\tvar p LXCProvisioner\n\tapp := testing.NewFakeApp(\"almah\", \"static\", 1)\n\terr := p.Restart(app)\n\tc.Assert(err, gocheck.IsNil)\n\targs := []string{\"-l\", \"ubuntu\", \"-q\", \"-o\", \"StrictHostKeyChecking no\", \"10.10.10.1\", \"\/var\/lib\/tsuru\/hooks\/restart\"}\n\tc.Assert(fexec.ExecutedCmd(\"ssh\", args), gocheck.Equals, true)\n}\n\nfunc (s *S) TestDeploy(c *gocheck.C) {\n\ttmpdir, err := commandmocker.Add(\"ssh\", \"\")\n\tc.Assert(err, gocheck.IsNil)\n\tdefer commandmocker.Remove(tmpdir)\n\tconfig.Set(\"git:unit-repo\", \"test\/dir\")\n\tconfig.Set(\"git:host\", \"gandalf.com\")\n\tdefer func() {\n\t\tconfig.Unset(\"git:unit-repo\")\n\t\tconfig.Unset(\"git:host\")\n\t}()\n\tapp := testing.NewFakeApp(\"cribcaged\", \"python\", 1)\n\tw := &bytes.Buffer{}\n\tp := LXCProvisioner{}\n\terr = p.Deploy(app, w)\n\tc.Assert(err, gocheck.IsNil)\n}\n\nfunc (s *S) TestProvisionerDestroy(c *gocheck.C) {\n\tfexec := &etesting.FakeExecutor{}\n\texecut = fexec\n\tdefer func() {\n\t\texecut = nil\n\t}()\n\tvar p LXCProvisioner\n\tapp := testing.NewFakeApp(\"myapp\", \"python\", 1)\n\tu := provision.Unit{\n\t\tName: \"myapp\",\n\t\tStatus: provision.StatusStarted,\n\t}\n\terr := s.conn.Collection(s.collName).Insert(&u)\n\tc.Assert(err, gocheck.IsNil)\n\tc.Assert(p.Destroy(app), gocheck.IsNil)\n\tok := make(chan bool, 1)\n\tgo func() {\n\t\tfor {\n\t\t\tcoll := s.conn.Collection(s.collName)\n\t\t\tct, err := coll.Find(bson.M{\"name\": \"myapp\", \"status\": provision.StatusStarted}).Count()\n\t\t\tif err != nil {\n\t\t\t\tc.Fatal(err)\n\t\t\t}\n\t\t\tif ct == 0 {\n\t\t\t\tok <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t\truntime.Gosched()\n\t\t}\n\t}()\n\tselect {\n\tcase <-ok:\n\tcase <-time.After(10e9):\n\t\tc.Fatal(\"Timed out waiting for the container to be provisioned (10 seconds)\")\n\t}\n\tc.Assert(err, gocheck.IsNil)\n\targs := []string{\"lxc-stop\", \"-n\", \"myapp\"}\n\tc.Assert(fexec.ExecutedCmd(\"sudo\", args), gocheck.Equals, true)\n\targs = []string{\"lxc-destroy\", \"-n\", \"myapp\"}\n\tc.Assert(fexec.ExecutedCmd(\"sudo\", args), gocheck.Equals, true)\n\tlength, err := p.collection().Find(bson.M{\"name\": \"myapp\"}).Count()\n\tc.Assert(err, gocheck.IsNil)\n\tc.Assert(length, gocheck.Equals, 0)\n}\n\nfunc (s *S) TestProvisionerAddr(c *gocheck.C) {\n\tconfig.Set(\"router\", \"fake\")\n\tvar p LXCProvisioner\n\tapp := testing.NewFakeApp(\"myapp\", \"python\", 1)\n\tr, err := p.router()\n\tc.Assert(err, gocheck.IsNil)\n\tr.AddRoute(\"myapp\", \"http:\/\/10.10.10.10\")\n\ta, err := r.Addr(app.GetName())\n\tc.Assert(err, gocheck.IsNil)\n\taddr, err := p.Addr(app)\n\tc.Assert(err, gocheck.IsNil)\n\tc.Assert(addr, gocheck.Equals, a)\n}\n\nfunc (s *S) TestProvisionerAddUnits(c *gocheck.C) {\n\tvar p LXCProvisioner\n\tapp := testing.NewFakeApp(\"myapp\", \"python\", 0)\n\tunits, err := p.AddUnits(app, 2)\n\tc.Assert(err, gocheck.IsNil)\n\tc.Assert(units, gocheck.DeepEquals, []provision.Unit{})\n}\n\nfunc (s *S) TestProvisionerRemoveUnit(c *gocheck.C) {\n\tvar p LXCProvisioner\n\tapp := testing.NewFakeApp(\"myapp\", \"python\", 0)\n\terr := p.RemoveUnit(app, \"\")\n\tc.Assert(err, gocheck.IsNil)\n}\n\nfunc (s *S) TestProvisionerInstallDepsExecutesHook(c *gocheck.C) {\n\tvar p LXCProvisioner\n\tapp := testing.NewFakeApp(\"myapp\", \"python\", 0)\n\tw := &bytes.Buffer{}\n\terr := p.InstallDeps(app, w)\n\tc.Assert(err, gocheck.IsNil)\n\texpected := []string{\"ran \/var\/lib\/tsuru\/hooks\/dependencies\"}\n\tc.Assert(app.Commands, gocheck.DeepEquals, expected)\n}\n\nfunc (s *S) TestProvisionerExecuteCommand(c *gocheck.C) {\n\tfexec := &etesting.FakeExecutor{}\n\texecut = fexec\n\tdefer func() {\n\t\texecut = nil\n\t}()\n\tvar p LXCProvisioner\n\tvar buf bytes.Buffer\n\tapp := testing.NewFakeApp(\"almah\", \"static\", 2)\n\terr := p.ExecuteCommand(&buf, &buf, app, \"ls\", \"-lh\")\n\tc.Assert(err, gocheck.IsNil)\n\targs := []string{\"-l\", \"ubuntu\", \"-q\", \"-o\", \"StrictHostKeyChecking no\", \"10.10.10.1\", \"ls\", \"-lh\"}\n\tc.Assert(fexec.ExecutedCmd(\"ssh\", args), gocheck.Equals, true)\n}\n\nfunc (s *S) TestCollectStatus(c *gocheck.C) {\n\tvar p LXCProvisioner\n\texpected := []provision.Unit{\n\t\t{\n\t\t\tName: \"vm1\",\n\t\t\tAppName: \"vm1\",\n\t\t\tType: \"django\",\n\t\t\tMachine: 0,\n\t\t\tInstanceId: \"vm1\",\n\t\t\tIp: \"10.10.10.9\",\n\t\t\tStatus: provision.StatusStarted,\n\t\t},\n\t\t{\n\t\t\tName: \"vm2\",\n\t\t\tAppName: \"vm2\",\n\t\t\tType: \"gunicorn\",\n\t\t\tMachine: 0,\n\t\t\tInstanceId: \"vm2\",\n\t\t\tIp: \"10.10.10.10\",\n\t\t\tStatus: provision.StatusInstalling,\n\t\t},\n\t}\n\tfor _, u := range expected {\n\t\terr := p.collection().Insert(u)\n\t\tc.Assert(err, gocheck.IsNil)\n\t}\n\tunits, err := p.CollectStatus()\n\tc.Assert(err, gocheck.IsNil)\n\tc.Assert(units, gocheck.DeepEquals, expected)\n}\n\nfunc (s *S) TestProvisionCollection(c *gocheck.C) {\n\tvar p LXCProvisioner\n\tcollection := p.collection()\n\tc.Assert(collection.Name, gocheck.Equals, s.collName)\n}\n\nfunc (s *S) TestProvisionInstall(c *gocheck.C) {\n\tfexec := &etesting.FakeExecutor{}\n\texecut = fexec\n\tdefer func() {\n\t\texecut = nil\n\t}()\n\tp := LXCProvisioner{}\n\terr := p.install(\"10.10.10.10\")\n\tc.Assert(err, gocheck.IsNil)\n\tcmd := \"ssh\"\n\targs := []string{\n\t\t\"-q\",\n\t\t\"-o\",\n\t\t\"StrictHostKeyChecking no\",\n\t\t\"-l\",\n\t\t\"ubuntu\",\n\t\t\"10.10.10.10\",\n\t\t\"sudo \/var\/lib\/tsuru\/hooks\/install\",\n\t}\n\tc.Assert(fexec.ExecutedCmd(cmd, args), gocheck.Equals, true)\n}\n\nfunc (s *S) TestProvisionStart(c *gocheck.C) {\n\tfexec := &etesting.FakeExecutor{}\n\texecut = fexec\n\tdefer func() {\n\t\texecut = nil\n\t}()\n\tp := LXCProvisioner{}\n\terr := p.start(\"10.10.10.10\")\n\tc.Assert(err, gocheck.IsNil)\n\tcmd := \"ssh\"\n\targs := []string{\n\t\t\"-q\",\n\t\t\"-o\",\n\t\t\"StrictHostKeyChecking no\",\n\t\t\"-l\",\n\t\t\"ubuntu\",\n\t\t\"10.10.10.10\",\n\t\t\"sudo \/var\/lib\/tsuru\/hooks\/start\",\n\t}\n\tc.Assert(fexec.ExecutedCmd(cmd, args), gocheck.Equals, true)\n}\n\nfunc (s *S) TestProvisionSetup(c *gocheck.C) {\n\tfexec := &etesting.FakeExecutor{}\n\texecut = fexec\n\tdefer func() {\n\t\texecut = nil\n\t}()\n\tp := LXCProvisioner{}\n\tformulasPath := \"\/home\/ubuntu\/formulas\"\n\tconfig.Set(\"lxc:formulas-path\", formulasPath)\n\terr := p.setup(\"10.10.10.10\", \"static\")\n\tc.Assert(err, gocheck.IsNil)\n\tcmd := \"scp\"\n\targs := []string{\n\t\t\"-q\",\n\t\t\"-o\",\n\t\t\"StrictHostKeyChecking no\",\n\t\t\"-r\",\n\t\tformulasPath + \"\/static\/hooks\",\n\t\t\"ubuntu@10.10.10.10:\/var\/lib\/tsuru\",\n\t}\n\tc.Assert(fexec.ExecutedCmd(cmd, args), gocheck.Equals, true)\n\tcmd = \"ssh\"\n\targs = []string{\n\t\t\"-q\",\n\t\t\"-o\",\n\t\t\"StrictHostKeyChecking no\",\n\t\t\"-l\",\n\t\t\"ubuntu\",\n\t\t\"10.10.10.10\",\n\t\t\"sudo mkdir -p \/var\/lib\/tsuru\/hooks\",\n\t}\n\tc.Assert(fexec.ExecutedCmd(cmd, args), gocheck.Equals, true)\n\targs = []string{\n\t\t\"-q\",\n\t\t\"-o\",\n\t\t\"StrictHostKeyChecking no\",\n\t\t\"-l\",\n\t\t\"ubuntu\",\n\t\t\"10.10.10.10\",\n\t\t\"sudo chown -R ubuntu \/var\/lib\/tsuru\/hooks\",\n\t}\n\tc.Assert(fexec.ExecutedCmd(cmd, args), gocheck.Equals, true)\n}\n<|endoftext|>"} {"text":"<commit_before>package rbt\n\nimport \"fmt\"\n\ntype color bool\n\nconst (\n\tred color = true\n\tblack color = false\n)\n\ntype Ordered interface {\n\tLess(Ordered) bool\n}\n\ntype node struct {\n\tcolor color\n\tvalue Ordered\n\tleft, right *node\n}\n\nfunc newNode(c color, o Ordered, l, r *node) *node {\n\treturn &node{\n\t\tcolor: c,\n\t\tvalue: o,\n\t\tleft: l,\n\t\tright: r,\n\t}\n}\n\nfunc (n *node) insert(o Ordered) *node {\n\tm := *n.insertRed(o)\n\tm.color = black\n\treturn &m\n}\n\nfunc (n *node) insertRed(o Ordered) *node {\n\tif n == nil {\n\t\treturn newNode(red, o, nil, nil)\n\t}\n\n\tm := *n\n\n\tif n.value.Less(o) {\n\t\tm.left = m.left.insertRed(o)\n\t} else if o.Less(n.value) {\n\t\tm.right = m.right.insertRed(o)\n\t} else {\n\t\treturn n\n\t}\n\n\treturn m.balance()\n}\n\nfunc (n *node) balance() *node {\n\tif n.color == red {\n\t\treturn n\n\t}\n\n\tnewN := func(\n\t\to Ordered,\n\t\tlo Ordered, ll, lr *node,\n\t\tro Ordered, rl, rr *node) *node {\n\t\treturn newNode(red, o, newNode(black, lo, ll, lr), newNode(black, ro, rl, rr))\n\t}\n\n\tl := n.left\n\tr := n.right\n\n\tif l != nil && l.color == red {\n\t\tll := l.left\n\t\tlr := l.right\n\n\t\tnewLN := func(o, lo Ordered, ll, lr, rl *node) *node {\n\t\t\treturn newN(o, lo, ll, lr, n.value, rl, r)\n\t\t}\n\n\t\tif ll != nil && ll.color == red {\n\t\t\treturn newLN(l.value, ll.value, ll.left, ll.right, lr)\n\t\t} else if lr != nil && lr.color == red {\n\t\t\treturn newLN(lr.value, l.value, ll, lr.left, lr.right)\n\t\t}\n\t} else if r != nil && r.color == red {\n\t\trl := r.left\n\t\trr := r.right\n\n\t\tnewRN := func(o, ro Ordered, lr, rl, rr *node) *node {\n\t\t\treturn newN(o, n.value, l, lr, ro, rl, rr)\n\t\t}\n\n\t\tif rl != nil && rl.color == red {\n\t\t\treturn newRN(r.value, rr.value, rl, rr.left, rr.right)\n\t\t} else if rr != nil && rr.color == red {\n\t\t\treturn newRN(rl.value, r.value, rl.left, rl.right, rr)\n\t\t}\n\t}\n\n\treturn n\n}\n\nfunc (n *node) search(o Ordered) (Ordered, bool) {\n\tif n == nil {\n\t\treturn nil, false\n\t} else if n.value.Less(o) {\n\t\treturn n.left.search(o)\n\t} else if o.Less(n.value) {\n\t\treturn n.right.search(o)\n\t}\n\n\treturn n.value, true\n}\n\nfunc (n *node) remove(o Ordered) (*node, bool) {\n\t_, ok := n.search(o)\n\n\tif !ok {\n\t\treturn n, false\n\t}\n\n\tn, _ = n.removeOne(o)\n\tm := *n\n\tm.color = black\n\treturn &m, true\n}\n\nfunc (n *node) removeOne(o Ordered) (*node, bool) {\n\tif n == nil {\n\t\treturn nil, true\n\t} else if n.value.Less(o) {\n\t\tl, balanced := n.left.removeOne(o)\n\t\tm := *n\n\t\tm.left = l\n\n\t\tif balanced {\n\t\t\treturn &m, true\n\t\t}\n\n\t\treturn m.balanceLeft()\n\t} else if o.Less(n.value) {\n\t\tr, balanced := n.right.removeOne(o)\n\t\tm := *n\n\t\tm.right = r\n\n\t\tif balanced {\n\t\t\treturn &m, true\n\t\t}\n\n\t\treturn m.balanceRight()\n\t}\n\n\tif n.left == nil {\n\t\treturn n.right, n.color == red\n\t}\n\n\to, l, balanced := n.takeMax()\n\n\tm := newNode(n.color, o, l, n.right)\n\n\tif balanced {\n\t\treturn m, true\n\t}\n\n\treturn m.balanceLeft()\n}\n\nfunc (n *node) takeMax() (Ordered, *node, bool) {\n\tif n.right == nil {\n\t\treturn n.value, n.left, n.color == red\n\t}\n\n\to, r, balanced := n.right.takeMax()\n\n\tm := *n\n\tm.right = r\n\n\tif balanced {\n\t\treturn o, &m, true\n\t}\n\n\tn, balanced = m.balanceRight()\n\treturn o, n, balanced\n}\n\nfunc (n *node) balanceLeft() (*node, bool) {\n\tif n.right.color == red {\n\t\tl, _ := newNode(red, n.value, n.left, n.right.left).balanceLeft()\n\t\treturn newNode(black, n.right.value, l, n.right.right), true\n\t}\n\n\tif n.right.left != nil && n.right.left.color == red {\n\t\treturn newNode(\n\t\t\tn.color,\n\t\t\tn.right.left.value,\n\t\t\tnewNode(black, n.value, n.left, n.right.left.left),\n\t\t\tnewNode(black, n.right.value, n.right.left.right, n.right.right)), true\n\t} else if n.right.right != nil && n.right.right.color == red {\n\t\tr := *n.right.right\n\t\tr.color = black\n\n\t\treturn newNode(\n\t\t\tn.color,\n\t\t\tn.right.value,\n\t\t\tnewNode(black, n.value, n.left, n.right.left),\n\t\t\t&r), true\n\t}\n\n\tr := *n.right\n\tr.color = red\n\n\tm := *n\n\tm.color = black\n\tm.right = &r\n\n\treturn &m, n.color == black\n}\n\nfunc (n *node) balanceRight() (*node, bool) {\n\tif n.left.color == red {\n\t\tr, _ := newNode(red, n.value, n.left.right, n.right).balanceRight()\n\t\treturn newNode(black, n.left.value, n.left.left, r), true\n\t}\n\n\tif n.left.right != nil && n.left.right.color == red {\n\t\treturn newNode(\n\t\t\tn.color,\n\t\t\tn.left.right.value,\n\t\t\tnewNode(black, n.left.value, n.left.left, n.left.right.left),\n\t\t\tnewNode(black, n.value, n.left.right.right, n.right)), true\n\t} else if n.left.left != nil && n.left.left.color == red {\n\t\tl := *n.left.left\n\t\tl.color = black\n\n\t\treturn newNode(\n\t\t\tn.color,\n\t\t\tn.left.value,\n\t\t\t&l,\n\t\t\tnewNode(black, n.value, n.left.right, n.right)), true\n\t}\n\n\tl := *n.left\n\tl.color = red\n\n\tm := *n\n\tm.color = black\n\tm.left = &l\n\n\treturn &m, n.color == black\n}\n\nfunc (n *node) dump() {\n\tn.dumpWithIndent(0)\n}\n\nfunc (n *node) dumpWithIndent(i int) {\n\tfor j := 0; j < i; j++ {\n\t\tfmt.Printf(\" \")\n\t}\n\n\tif n == nil {\n\t\tfmt.Println(nil)\n\t\treturn\n\t}\n\n\tfmt.Println(n.color, n.value)\n\n\tk := i + 2\n\tn.left.dumpWithIndent(k)\n\tn.right.dumpWithIndent(k)\n}\n<commit_msg>Change dump order<commit_after>package rbt\n\nimport \"fmt\"\n\ntype color bool\n\nconst (\n\tred color = true\n\tblack color = false\n)\n\ntype Ordered interface {\n\tLess(Ordered) bool\n}\n\ntype node struct {\n\tcolor color\n\tvalue Ordered\n\tleft, right *node\n}\n\nfunc newNode(c color, o Ordered, l, r *node) *node {\n\treturn &node{\n\t\tcolor: c,\n\t\tvalue: o,\n\t\tleft: l,\n\t\tright: r,\n\t}\n}\n\nfunc (n *node) insert(o Ordered) *node {\n\tm := *n.insertRed(o)\n\tm.color = black\n\treturn &m\n}\n\nfunc (n *node) insertRed(o Ordered) *node {\n\tif n == nil {\n\t\treturn newNode(red, o, nil, nil)\n\t}\n\n\tm := *n\n\n\tif n.value.Less(o) {\n\t\tm.left = m.left.insertRed(o)\n\t} else if o.Less(n.value) {\n\t\tm.right = m.right.insertRed(o)\n\t} else {\n\t\treturn n\n\t}\n\n\treturn m.balance()\n}\n\nfunc (n *node) balance() *node {\n\tif n.color == red {\n\t\treturn n\n\t}\n\n\tnewN := func(\n\t\to Ordered,\n\t\tlo Ordered, ll, lr *node,\n\t\tro Ordered, rl, rr *node) *node {\n\t\treturn newNode(red, o, newNode(black, lo, ll, lr), newNode(black, ro, rl, rr))\n\t}\n\n\tl := n.left\n\tr := n.right\n\n\tif l != nil && l.color == red {\n\t\tll := l.left\n\t\tlr := l.right\n\n\t\tnewLN := func(o, lo Ordered, ll, lr, rl *node) *node {\n\t\t\treturn newN(o, lo, ll, lr, n.value, rl, r)\n\t\t}\n\n\t\tif ll != nil && ll.color == red {\n\t\t\treturn newLN(l.value, ll.value, ll.left, ll.right, lr)\n\t\t} else if lr != nil && lr.color == red {\n\t\t\treturn newLN(lr.value, l.value, ll, lr.left, lr.right)\n\t\t}\n\t} else if r != nil && r.color == red {\n\t\trl := r.left\n\t\trr := r.right\n\n\t\tnewRN := func(o, ro Ordered, lr, rl, rr *node) *node {\n\t\t\treturn newN(o, n.value, l, lr, ro, rl, rr)\n\t\t}\n\n\t\tif rl != nil && rl.color == red {\n\t\t\treturn newRN(r.value, rr.value, rl, rr.left, rr.right)\n\t\t} else if rr != nil && rr.color == red {\n\t\t\treturn newRN(rl.value, r.value, rl.left, rl.right, rr)\n\t\t}\n\t}\n\n\treturn n\n}\n\nfunc (n *node) search(o Ordered) (Ordered, bool) {\n\tif n == nil {\n\t\treturn nil, false\n\t} else if n.value.Less(o) {\n\t\treturn n.left.search(o)\n\t} else if o.Less(n.value) {\n\t\treturn n.right.search(o)\n\t}\n\n\treturn n.value, true\n}\n\nfunc (n *node) remove(o Ordered) (*node, bool) {\n\t_, ok := n.search(o)\n\n\tif !ok {\n\t\treturn n, false\n\t}\n\n\tn, _ = n.removeOne(o)\n\tm := *n\n\tm.color = black\n\treturn &m, true\n}\n\nfunc (n *node) removeOne(o Ordered) (*node, bool) {\n\tif n == nil {\n\t\treturn nil, true\n\t} else if n.value.Less(o) {\n\t\tl, balanced := n.left.removeOne(o)\n\t\tm := *n\n\t\tm.left = l\n\n\t\tif balanced {\n\t\t\treturn &m, true\n\t\t}\n\n\t\treturn m.balanceLeft()\n\t} else if o.Less(n.value) {\n\t\tr, balanced := n.right.removeOne(o)\n\t\tm := *n\n\t\tm.right = r\n\n\t\tif balanced {\n\t\t\treturn &m, true\n\t\t}\n\n\t\treturn m.balanceRight()\n\t}\n\n\tif n.left == nil {\n\t\treturn n.right, n.color == red\n\t}\n\n\to, l, balanced := n.takeMax()\n\n\tm := newNode(n.color, o, l, n.right)\n\n\tif balanced {\n\t\treturn m, true\n\t}\n\n\treturn m.balanceLeft()\n}\n\nfunc (n *node) takeMax() (Ordered, *node, bool) {\n\tif n.right == nil {\n\t\treturn n.value, n.left, n.color == red\n\t}\n\n\to, r, balanced := n.right.takeMax()\n\n\tm := *n\n\tm.right = r\n\n\tif balanced {\n\t\treturn o, &m, true\n\t}\n\n\tn, balanced = m.balanceRight()\n\treturn o, n, balanced\n}\n\nfunc (n *node) balanceLeft() (*node, bool) {\n\tif n.right.color == red {\n\t\tl, _ := newNode(red, n.value, n.left, n.right.left).balanceLeft()\n\t\treturn newNode(black, n.right.value, l, n.right.right), true\n\t}\n\n\tif n.right.left != nil && n.right.left.color == red {\n\t\treturn newNode(\n\t\t\tn.color,\n\t\t\tn.right.left.value,\n\t\t\tnewNode(black, n.value, n.left, n.right.left.left),\n\t\t\tnewNode(black, n.right.value, n.right.left.right, n.right.right)), true\n\t} else if n.right.right != nil && n.right.right.color == red {\n\t\tr := *n.right.right\n\t\tr.color = black\n\n\t\treturn newNode(\n\t\t\tn.color,\n\t\t\tn.right.value,\n\t\t\tnewNode(black, n.value, n.left, n.right.left),\n\t\t\t&r), true\n\t}\n\n\tr := *n.right\n\tr.color = red\n\n\tm := *n\n\tm.color = black\n\tm.right = &r\n\n\treturn &m, n.color == black\n}\n\nfunc (n *node) balanceRight() (*node, bool) {\n\tif n.left.color == red {\n\t\tr, _ := newNode(red, n.value, n.left.right, n.right).balanceRight()\n\t\treturn newNode(black, n.left.value, n.left.left, r), true\n\t}\n\n\tif n.left.right != nil && n.left.right.color == red {\n\t\treturn newNode(\n\t\t\tn.color,\n\t\t\tn.left.right.value,\n\t\t\tnewNode(black, n.left.value, n.left.left, n.left.right.left),\n\t\t\tnewNode(black, n.value, n.left.right.right, n.right)), true\n\t} else if n.left.left != nil && n.left.left.color == red {\n\t\tl := *n.left.left\n\t\tl.color = black\n\n\t\treturn newNode(\n\t\t\tn.color,\n\t\t\tn.left.value,\n\t\t\t&l,\n\t\t\tnewNode(black, n.value, n.left.right, n.right)), true\n\t}\n\n\tl := *n.left\n\tl.color = red\n\n\tm := *n\n\tm.color = black\n\tm.left = &l\n\n\treturn &m, n.color == black\n}\n\nfunc (n *node) dump() {\n\tn.dumpWithIndent(0)\n}\n\nfunc (n *node) dumpWithIndent(i int) {\n\tfor j := 0; j < i; j++ {\n\t\tfmt.Printf(\" \")\n\t}\n\n\tif n == nil {\n\t\tfmt.Println(nil)\n\t\treturn\n\t}\n\n\tfmt.Println(n.color, n.value)\n\n\tk := i + 2\n\tn.right.dumpWithIndent(k)\n\tn.left.dumpWithIndent(k)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage testing\n\nexport type Test struct {\n\tname string;\n\tf *() bool;\n}\n\nexport func Main(tests *[]Test) {\n\tok := true;\n\tfor i := 0; i < len(tests); i++ {\n\t\tok1 := tests[i].f();\n\t\tstatus := \"FAIL\";\n\t\tif ok1 {\n\t\t\tstatus = \"PASS\"\n\t\t}\n\t\tok = ok && ok1;\n\t\tprintln(status, tests[i].name);\n\t}\n\tif !ok {\n\t\tsys.exit(1);\n\t}\n}\n<commit_msg>add -chatty flag to test. was supposed to be in some other cl but got dropped.<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage testing\n\nimport (\n\t\"flag\"\n)\n\nvar chatty bool;\nfunc init() {\n\tflag.Bool(\"chatty\", false, &chatty, \"chatty\");\n}\n\nexport type Test struct {\n\tname string;\n\tf *() bool;\n}\n\nexport func Main(tests *[]Test) {\n\tflag.Parse();\n\tok := true;\n\tfor i := 0; i < len(tests); i++ {\n\t\tif chatty {\n\t\t\tprintln(\"=== RUN \", tests[i].name);\n\t\t}\n\t\tok1 := tests[i].f();\n\t\tif !ok1 {\n\t\t\tok = false;\n\t\t\tprintln(\"--- FAIL\", tests[i].name);\n\t\t} else if chatty {\n\t\t\tprintln(\"--- PASS\", tests[i].name);\n\t\t}\n\t}\n\tif !ok {\n\t\tsys.exit(1);\n\t}\n\tprintln(\"PASS\");\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file implements the knownFormats map which records the valid\n\/\/ formats for a given type. The valid formats must correspond to\n\/\/ supported compiler formats implemented in fmt.go, or whatever\n\/\/ other format verbs are implemented for the given type. The map may\n\/\/ also be used to change the use of a format verb across all compiler\n\/\/ sources automatically (for instance, if the implementation of fmt.go\n\/\/ changes), by using the -r option together with the new formats in the\n\/\/ map. To generate this file automatically from the existing source,\n\/\/ run: go test -run Formats -u.\n\/\/\n\/\/ See the package comment in fmt_test.go for additional information.\n\npackage main_test\n\n\/\/ knownFormats entries are of the form \"typename format\" -> \"newformat\".\n\/\/ An absent entry means that the format is not recognized as valid.\n\/\/ An empty new format means that the format should remain unchanged.\nvar knownFormats = map[string]string{\n\t\"*bytes.Buffer %s\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Mpflt %v\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Mpint %v\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %#v\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %+S\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %+v\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %0j\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %L\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %S\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %j\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %p\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %v\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.Block %s\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.Block %v\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.Func %s\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.Func %v\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.Register %s\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.Register %v\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.SparseTreeNode %v\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.Value %s\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.Value %v\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.sparseTreeMapEntry %v\": \"\",\n\t\"*cmd\/compile\/internal\/types.Field %p\": \"\",\n\t\"*cmd\/compile\/internal\/types.Field %v\": \"\",\n\t\"*cmd\/compile\/internal\/types.Sym %0S\": \"\",\n\t\"*cmd\/compile\/internal\/types.Sym %S\": \"\",\n\t\"*cmd\/compile\/internal\/types.Sym %p\": \"\",\n\t\"*cmd\/compile\/internal\/types.Sym %v\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %#L\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %#v\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %+v\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %-S\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %0S\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %L\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %S\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %p\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %s\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %v\": \"\",\n\t\"*cmd\/internal\/obj.Addr %v\": \"\",\n\t\"*cmd\/internal\/obj.LSym %v\": \"\",\n\t\"*math\/big.Float %f\": \"\",\n\t\"*math\/big.Int %#x\": \"\",\n\t\"*math\/big.Int %s\": \"\",\n\t\"*math\/big.Int %v\": \"\",\n\t\"[16]byte %x\": \"\",\n\t\"[]*cmd\/compile\/internal\/gc.Node %v\": \"\",\n\t\"[]*cmd\/compile\/internal\/ssa.Block %v\": \"\",\n\t\"[]*cmd\/compile\/internal\/ssa.Value %v\": \"\",\n\t\"[][]string %q\": \"\",\n\t\"[]byte %s\": \"\",\n\t\"[]byte %x\": \"\",\n\t\"[]cmd\/compile\/internal\/ssa.Edge %v\": \"\",\n\t\"[]cmd\/compile\/internal\/ssa.ID %v\": \"\",\n\t\"[]cmd\/compile\/internal\/ssa.posetNode %v\": \"\",\n\t\"[]cmd\/compile\/internal\/ssa.posetUndo %v\": \"\",\n\t\"[]cmd\/compile\/internal\/syntax.token %s\": \"\",\n\t\"[]string %v\": \"\",\n\t\"[]uint32 %v\": \"\",\n\t\"bool %v\": \"\",\n\t\"byte %08b\": \"\",\n\t\"byte %c\": \"\",\n\t\"byte %q\": \"\",\n\t\"byte %v\": \"\",\n\t\"cmd\/compile\/internal\/arm.shift %d\": \"\",\n\t\"cmd\/compile\/internal\/gc.Class %d\": \"\",\n\t\"cmd\/compile\/internal\/gc.Class %s\": \"\",\n\t\"cmd\/compile\/internal\/gc.Class %v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Ctype %d\": \"\",\n\t\"cmd\/compile\/internal\/gc.Ctype %v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Level %d\": \"\",\n\t\"cmd\/compile\/internal\/gc.Level %v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Nodes %#v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Nodes %+v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Nodes %.v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Nodes %v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Op %#v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Op %v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Val %#v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Val %T\": \"\",\n\t\"cmd\/compile\/internal\/gc.Val %v\": \"\",\n\t\"cmd\/compile\/internal\/gc.fmtMode %d\": \"\",\n\t\"cmd\/compile\/internal\/gc.initKind %d\": \"\",\n\t\"cmd\/compile\/internal\/gc.itag %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.BranchPrediction %d\": \"\",\n\t\"cmd\/compile\/internal\/ssa.Edge %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.GCNode %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.ID %d\": \"\",\n\t\"cmd\/compile\/internal\/ssa.ID %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.LocPair %s\": \"\",\n\t\"cmd\/compile\/internal\/ssa.LocalSlot %s\": \"\",\n\t\"cmd\/compile\/internal\/ssa.LocalSlot %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.Location %T\": \"\",\n\t\"cmd\/compile\/internal\/ssa.Location %s\": \"\",\n\t\"cmd\/compile\/internal\/ssa.Op %s\": \"\",\n\t\"cmd\/compile\/internal\/ssa.Op %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.ValAndOff %s\": \"\",\n\t\"cmd\/compile\/internal\/ssa.domain %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.posetNode %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.posetTestOp %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.rbrank %d\": \"\",\n\t\"cmd\/compile\/internal\/ssa.regMask %d\": \"\",\n\t\"cmd\/compile\/internal\/ssa.register %d\": \"\",\n\t\"cmd\/compile\/internal\/ssa.relation %s\": \"\",\n\t\"cmd\/compile\/internal\/syntax.Error %q\": \"\",\n\t\"cmd\/compile\/internal\/syntax.Expr %#v\": \"\",\n\t\"cmd\/compile\/internal\/syntax.LitKind %d\": \"\",\n\t\"cmd\/compile\/internal\/syntax.Node %T\": \"\",\n\t\"cmd\/compile\/internal\/syntax.Operator %s\": \"\",\n\t\"cmd\/compile\/internal\/syntax.Pos %s\": \"\",\n\t\"cmd\/compile\/internal\/syntax.Pos %v\": \"\",\n\t\"cmd\/compile\/internal\/syntax.position %s\": \"\",\n\t\"cmd\/compile\/internal\/syntax.token %q\": \"\",\n\t\"cmd\/compile\/internal\/syntax.token %s\": \"\",\n\t\"cmd\/compile\/internal\/types.EType %d\": \"\",\n\t\"cmd\/compile\/internal\/types.EType %s\": \"\",\n\t\"cmd\/compile\/internal\/types.EType %v\": \"\",\n\t\"cmd\/internal\/obj.ABI %v\": \"\",\n\t\"error %v\": \"\",\n\t\"float64 %.2f\": \"\",\n\t\"float64 %.3f\": \"\",\n\t\"float64 %.6g\": \"\",\n\t\"float64 %g\": \"\",\n\t\"int %-12d\": \"\",\n\t\"int %-6d\": \"\",\n\t\"int %-8o\": \"\",\n\t\"int %02d\": \"\",\n\t\"int %6d\": \"\",\n\t\"int %c\": \"\",\n\t\"int %d\": \"\",\n\t\"int %v\": \"\",\n\t\"int %x\": \"\",\n\t\"int16 %d\": \"\",\n\t\"int16 %x\": \"\",\n\t\"int32 %d\": \"\",\n\t\"int32 %v\": \"\",\n\t\"int32 %x\": \"\",\n\t\"int64 %+d\": \"\",\n\t\"int64 %-10d\": \"\",\n\t\"int64 %.5d\": \"\",\n\t\"int64 %X\": \"\",\n\t\"int64 %d\": \"\",\n\t\"int64 %v\": \"\",\n\t\"int64 %x\": \"\",\n\t\"int8 %d\": \"\",\n\t\"int8 %x\": \"\",\n\t\"interface{} %#v\": \"\",\n\t\"interface{} %T\": \"\",\n\t\"interface{} %p\": \"\",\n\t\"interface{} %q\": \"\",\n\t\"interface{} %s\": \"\",\n\t\"interface{} %v\": \"\",\n\t\"map[*cmd\/compile\/internal\/gc.Node]*cmd\/compile\/internal\/ssa.Value %v\": \"\",\n\t\"map[cmd\/compile\/internal\/ssa.ID]uint32 %v\": \"\",\n\t\"math\/big.Accuracy %s\": \"\",\n\t\"reflect.Type %s\": \"\",\n\t\"rune %#U\": \"\",\n\t\"rune %c\": \"\",\n\t\"rune %q\": \"\",\n\t\"string %-*s\": \"\",\n\t\"string %-16s\": \"\",\n\t\"string %-6s\": \"\",\n\t\"string %.*s\": \"\",\n\t\"string %q\": \"\",\n\t\"string %s\": \"\",\n\t\"string %v\": \"\",\n\t\"time.Duration %d\": \"\",\n\t\"time.Duration %v\": \"\",\n\t\"uint %04x\": \"\",\n\t\"uint %5d\": \"\",\n\t\"uint %d\": \"\",\n\t\"uint %x\": \"\",\n\t\"uint16 %d\": \"\",\n\t\"uint16 %v\": \"\",\n\t\"uint16 %x\": \"\",\n\t\"uint32 %#x\": \"\",\n\t\"uint32 %d\": \"\",\n\t\"uint32 %v\": \"\",\n\t\"uint32 %x\": \"\",\n\t\"uint64 %08x\": \"\",\n\t\"uint64 %d\": \"\",\n\t\"uint64 %x\": \"\",\n\t\"uint8 %d\": \"\",\n\t\"uint8 %x\": \"\",\n\t\"uintptr %d\": \"\",\n}\n<commit_msg>cmd\/compile: fix fmt_test.go after CL 170062<commit_after>\/\/ Copyright 2018 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file implements the knownFormats map which records the valid\n\/\/ formats for a given type. The valid formats must correspond to\n\/\/ supported compiler formats implemented in fmt.go, or whatever\n\/\/ other format verbs are implemented for the given type. The map may\n\/\/ also be used to change the use of a format verb across all compiler\n\/\/ sources automatically (for instance, if the implementation of fmt.go\n\/\/ changes), by using the -r option together with the new formats in the\n\/\/ map. To generate this file automatically from the existing source,\n\/\/ run: go test -run Formats -u.\n\/\/\n\/\/ See the package comment in fmt_test.go for additional information.\n\npackage main_test\n\n\/\/ knownFormats entries are of the form \"typename format\" -> \"newformat\".\n\/\/ An absent entry means that the format is not recognized as valid.\n\/\/ An empty new format means that the format should remain unchanged.\nvar knownFormats = map[string]string{\n\t\"*bytes.Buffer %s\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Mpflt %v\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Mpint %v\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %#v\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %+S\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %+v\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %0j\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %L\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %S\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %j\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %p\": \"\",\n\t\"*cmd\/compile\/internal\/gc.Node %v\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.Block %s\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.Block %v\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.Func %s\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.Func %v\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.Register %s\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.Register %v\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.SparseTreeNode %v\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.Value %s\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.Value %v\": \"\",\n\t\"*cmd\/compile\/internal\/ssa.sparseTreeMapEntry %v\": \"\",\n\t\"*cmd\/compile\/internal\/types.Field %p\": \"\",\n\t\"*cmd\/compile\/internal\/types.Field %v\": \"\",\n\t\"*cmd\/compile\/internal\/types.Sym %0S\": \"\",\n\t\"*cmd\/compile\/internal\/types.Sym %S\": \"\",\n\t\"*cmd\/compile\/internal\/types.Sym %p\": \"\",\n\t\"*cmd\/compile\/internal\/types.Sym %v\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %#L\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %#v\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %+v\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %-S\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %0S\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %L\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %S\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %p\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %s\": \"\",\n\t\"*cmd\/compile\/internal\/types.Type %v\": \"\",\n\t\"*cmd\/internal\/obj.Addr %v\": \"\",\n\t\"*cmd\/internal\/obj.LSym %v\": \"\",\n\t\"*math\/big.Float %f\": \"\",\n\t\"*math\/big.Int %#x\": \"\",\n\t\"*math\/big.Int %s\": \"\",\n\t\"*math\/big.Int %v\": \"\",\n\t\"[16]byte %x\": \"\",\n\t\"[]*cmd\/compile\/internal\/ssa.Block %v\": \"\",\n\t\"[]*cmd\/compile\/internal\/ssa.Value %v\": \"\",\n\t\"[][]string %q\": \"\",\n\t\"[]byte %s\": \"\",\n\t\"[]byte %x\": \"\",\n\t\"[]cmd\/compile\/internal\/ssa.Edge %v\": \"\",\n\t\"[]cmd\/compile\/internal\/ssa.ID %v\": \"\",\n\t\"[]cmd\/compile\/internal\/ssa.posetNode %v\": \"\",\n\t\"[]cmd\/compile\/internal\/ssa.posetUndo %v\": \"\",\n\t\"[]cmd\/compile\/internal\/syntax.token %s\": \"\",\n\t\"[]string %v\": \"\",\n\t\"[]uint32 %v\": \"\",\n\t\"bool %v\": \"\",\n\t\"byte %08b\": \"\",\n\t\"byte %c\": \"\",\n\t\"byte %q\": \"\",\n\t\"byte %v\": \"\",\n\t\"cmd\/compile\/internal\/arm.shift %d\": \"\",\n\t\"cmd\/compile\/internal\/gc.Class %d\": \"\",\n\t\"cmd\/compile\/internal\/gc.Class %s\": \"\",\n\t\"cmd\/compile\/internal\/gc.Class %v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Ctype %d\": \"\",\n\t\"cmd\/compile\/internal\/gc.Ctype %v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Level %d\": \"\",\n\t\"cmd\/compile\/internal\/gc.Level %v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Nodes %#v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Nodes %+v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Nodes %.v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Nodes %v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Op %#v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Op %v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Val %#v\": \"\",\n\t\"cmd\/compile\/internal\/gc.Val %T\": \"\",\n\t\"cmd\/compile\/internal\/gc.Val %v\": \"\",\n\t\"cmd\/compile\/internal\/gc.fmtMode %d\": \"\",\n\t\"cmd\/compile\/internal\/gc.initKind %d\": \"\",\n\t\"cmd\/compile\/internal\/gc.itag %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.BranchPrediction %d\": \"\",\n\t\"cmd\/compile\/internal\/ssa.Edge %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.GCNode %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.ID %d\": \"\",\n\t\"cmd\/compile\/internal\/ssa.ID %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.LocPair %s\": \"\",\n\t\"cmd\/compile\/internal\/ssa.LocalSlot %s\": \"\",\n\t\"cmd\/compile\/internal\/ssa.LocalSlot %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.Location %T\": \"\",\n\t\"cmd\/compile\/internal\/ssa.Location %s\": \"\",\n\t\"cmd\/compile\/internal\/ssa.Op %s\": \"\",\n\t\"cmd\/compile\/internal\/ssa.Op %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.ValAndOff %s\": \"\",\n\t\"cmd\/compile\/internal\/ssa.domain %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.posetNode %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.posetTestOp %v\": \"\",\n\t\"cmd\/compile\/internal\/ssa.rbrank %d\": \"\",\n\t\"cmd\/compile\/internal\/ssa.regMask %d\": \"\",\n\t\"cmd\/compile\/internal\/ssa.register %d\": \"\",\n\t\"cmd\/compile\/internal\/ssa.relation %s\": \"\",\n\t\"cmd\/compile\/internal\/syntax.Error %q\": \"\",\n\t\"cmd\/compile\/internal\/syntax.Expr %#v\": \"\",\n\t\"cmd\/compile\/internal\/syntax.LitKind %d\": \"\",\n\t\"cmd\/compile\/internal\/syntax.Node %T\": \"\",\n\t\"cmd\/compile\/internal\/syntax.Operator %s\": \"\",\n\t\"cmd\/compile\/internal\/syntax.Pos %s\": \"\",\n\t\"cmd\/compile\/internal\/syntax.Pos %v\": \"\",\n\t\"cmd\/compile\/internal\/syntax.position %s\": \"\",\n\t\"cmd\/compile\/internal\/syntax.token %q\": \"\",\n\t\"cmd\/compile\/internal\/syntax.token %s\": \"\",\n\t\"cmd\/compile\/internal\/types.EType %d\": \"\",\n\t\"cmd\/compile\/internal\/types.EType %s\": \"\",\n\t\"cmd\/compile\/internal\/types.EType %v\": \"\",\n\t\"cmd\/internal\/obj.ABI %v\": \"\",\n\t\"error %v\": \"\",\n\t\"float64 %.2f\": \"\",\n\t\"float64 %.3f\": \"\",\n\t\"float64 %.6g\": \"\",\n\t\"float64 %g\": \"\",\n\t\"int %-12d\": \"\",\n\t\"int %-6d\": \"\",\n\t\"int %-8o\": \"\",\n\t\"int %02d\": \"\",\n\t\"int %6d\": \"\",\n\t\"int %c\": \"\",\n\t\"int %d\": \"\",\n\t\"int %v\": \"\",\n\t\"int %x\": \"\",\n\t\"int16 %d\": \"\",\n\t\"int16 %x\": \"\",\n\t\"int32 %d\": \"\",\n\t\"int32 %v\": \"\",\n\t\"int32 %x\": \"\",\n\t\"int64 %+d\": \"\",\n\t\"int64 %-10d\": \"\",\n\t\"int64 %.5d\": \"\",\n\t\"int64 %X\": \"\",\n\t\"int64 %d\": \"\",\n\t\"int64 %v\": \"\",\n\t\"int64 %x\": \"\",\n\t\"int8 %d\": \"\",\n\t\"int8 %x\": \"\",\n\t\"interface{} %#v\": \"\",\n\t\"interface{} %T\": \"\",\n\t\"interface{} %p\": \"\",\n\t\"interface{} %q\": \"\",\n\t\"interface{} %s\": \"\",\n\t\"interface{} %v\": \"\",\n\t\"map[*cmd\/compile\/internal\/gc.Node]*cmd\/compile\/internal\/ssa.Value %v\": \"\",\n\t\"map[*cmd\/compile\/internal\/gc.Node][]*cmd\/compile\/internal\/gc.Node %v\": \"\",\n\t\"map[cmd\/compile\/internal\/ssa.ID]uint32 %v\": \"\",\n\t\"math\/big.Accuracy %s\": \"\",\n\t\"reflect.Type %s\": \"\",\n\t\"rune %#U\": \"\",\n\t\"rune %c\": \"\",\n\t\"rune %q\": \"\",\n\t\"string %-*s\": \"\",\n\t\"string %-16s\": \"\",\n\t\"string %-6s\": \"\",\n\t\"string %.*s\": \"\",\n\t\"string %q\": \"\",\n\t\"string %s\": \"\",\n\t\"string %v\": \"\",\n\t\"time.Duration %d\": \"\",\n\t\"time.Duration %v\": \"\",\n\t\"uint %04x\": \"\",\n\t\"uint %5d\": \"\",\n\t\"uint %d\": \"\",\n\t\"uint %x\": \"\",\n\t\"uint16 %d\": \"\",\n\t\"uint16 %v\": \"\",\n\t\"uint16 %x\": \"\",\n\t\"uint32 %#x\": \"\",\n\t\"uint32 %d\": \"\",\n\t\"uint32 %v\": \"\",\n\t\"uint32 %x\": \"\",\n\t\"uint64 %08x\": \"\",\n\t\"uint64 %d\": \"\",\n\t\"uint64 %x\": \"\",\n\t\"uint8 %d\": \"\",\n\t\"uint8 %v\": \"\",\n\t\"uint8 %x\": \"\",\n\t\"uintptr %d\": \"\",\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/hashcode\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\n\t\"github.com\/awslabs\/aws-sdk-go\/aws\"\n\t\"github.com\/awslabs\/aws-sdk-go\/service\/autoscaling\"\n)\n\nfunc resourceAwsAutoscalingGroup() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsAutoscalingGroupCreate,\n\t\tRead: resourceAwsAutoscalingGroupRead,\n\t\tUpdate: resourceAwsAutoscalingGroupUpdate,\n\t\tDelete: resourceAwsAutoscalingGroupDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"launch_configuration\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\n\t\t\t\"desired_capacity\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"min_size\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tRequired: true,\n\t\t\t},\n\n\t\t\t\"max_size\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tRequired: true,\n\t\t\t},\n\n\t\t\t\"default_cooldown\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"force_delete\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"health_check_grace_period\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"health_check_type\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"availability_zones\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\treturn hashcode.String(v.(string))\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"load_balancers\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\treturn hashcode.String(v.(string))\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"vpc_zone_identifier\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\treturn hashcode.String(v.(string))\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"termination_policies\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\treturn hashcode.String(v.(string))\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"tag\": autoscalingTagsSchema(),\n\t\t},\n\t}\n}\n\nfunc resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) error {\n\tautoscalingconn := meta.(*AWSClient).autoscalingconn\n\n\tvar autoScalingGroupOpts autoscaling.CreateAutoScalingGroupInput\n\tautoScalingGroupOpts.AutoScalingGroupName = aws.String(d.Get(\"name\").(string))\n\tautoScalingGroupOpts.LaunchConfigurationName = aws.String(d.Get(\"launch_configuration\").(string))\n\tautoScalingGroupOpts.MinSize = aws.Long(int64(d.Get(\"min_size\").(int)))\n\tautoScalingGroupOpts.MaxSize = aws.Long(int64(d.Get(\"max_size\").(int)))\n\tautoScalingGroupOpts.AvailabilityZones = expandStringList(\n\t\td.Get(\"availability_zones\").(*schema.Set).List())\n\n\tif v, ok := d.GetOk(\"tag\"); ok {\n\t\tautoScalingGroupOpts.Tags = autoscalingTagsFromMap(\n\t\t\tsetToMapByKey(v.(*schema.Set), \"key\"), d.Get(\"name\").(string))\n\t}\n\n\tif v, ok := d.GetOk(\"default_cooldown\"); ok {\n\t\tautoScalingGroupOpts.DefaultCooldown = aws.Long(int64(v.(int)))\n\t}\n\n\tif v, ok := d.GetOk(\"health_check_type\"); ok && v.(string) != \"\" {\n\t\tautoScalingGroupOpts.HealthCheckType = aws.String(v.(string))\n\t}\n\n\tif v, ok := d.GetOk(\"desired_capacity\"); ok {\n\t\tautoScalingGroupOpts.DesiredCapacity = aws.Long(int64(v.(int)))\n\t}\n\n\tif v, ok := d.GetOk(\"health_check_grace_period\"); ok {\n\t\tautoScalingGroupOpts.HealthCheckGracePeriod = aws.Long(int64(v.(int)))\n\t}\n\n\tif v, ok := d.GetOk(\"load_balancers\"); ok && v.(*schema.Set).Len() > 0 {\n\t\tautoScalingGroupOpts.LoadBalancerNames = expandStringList(\n\t\t\tv.(*schema.Set).List())\n\t}\n\n\tif v, ok := d.GetOk(\"vpc_zone_identifier\"); ok && v.(*schema.Set).Len() > 0 {\n\t\texp := expandStringList(v.(*schema.Set).List())\n\t\tstrs := make([]string, len(exp))\n\t\tfor _, s := range exp {\n\t\t\tstrs = append(strs, *s)\n\t\t}\n\t\tautoScalingGroupOpts.VPCZoneIdentifier = aws.String(strings.Join(strs, \",\"))\n\t}\n\n\tif v, ok := d.GetOk(\"termination_policies\"); ok && v.(*schema.Set).Len() > 0 {\n\t\tautoScalingGroupOpts.TerminationPolicies = expandStringList(\n\t\t\tv.(*schema.Set).List())\n\t}\n\n\tlog.Printf(\"[DEBUG] AutoScaling Group create configuration: %#v\", autoScalingGroupOpts)\n\t_, err := autoscalingconn.CreateAutoScalingGroup(&autoScalingGroupOpts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating Autoscaling Group: %s\", err)\n\t}\n\n\td.SetId(d.Get(\"name\").(string))\n\tlog.Printf(\"[INFO] AutoScaling Group ID: %s\", d.Id())\n\n\treturn resourceAwsAutoscalingGroupRead(d, meta)\n}\n\nfunc resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) error {\n\tg, err := getAwsAutoscalingGroup(d, meta)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif g == nil {\n\t\treturn nil\n\t}\n\n\td.Set(\"availability_zones\", g.AvailabilityZones)\n\td.Set(\"default_cooldown\", g.DefaultCooldown)\n\td.Set(\"desired_capacity\", g.DesiredCapacity)\n\td.Set(\"health_check_grace_period\", g.HealthCheckGracePeriod)\n\td.Set(\"health_check_type\", g.HealthCheckType)\n\td.Set(\"launch_configuration\", g.LaunchConfigurationName)\n\td.Set(\"load_balancers\", g.LoadBalancerNames)\n\td.Set(\"min_size\", g.MinSize)\n\td.Set(\"max_size\", g.MaxSize)\n\td.Set(\"name\", g.AutoScalingGroupName)\n\td.Set(\"tag\", g.Tags)\n\td.Set(\"vpc_zone_identifier\", strings.Split(*g.VPCZoneIdentifier, \",\"))\n\td.Set(\"termination_policies\", g.TerminationPolicies)\n\n\treturn nil\n}\n\nfunc resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) error {\n\tautoscalingconn := meta.(*AWSClient).autoscalingconn\n\n\topts := autoscaling.UpdateAutoScalingGroupInput{\n\t\tAutoScalingGroupName: aws.String(d.Id()),\n\t}\n\n\tif d.HasChange(\"desired_capacity\") {\n\t\topts.DesiredCapacity = aws.Long(int64(d.Get(\"desired_capacity\").(int)))\n\t}\n\n\tif d.HasChange(\"launch_configuration\") {\n\t\topts.LaunchConfigurationName = aws.String(d.Get(\"launch_configuration\").(string))\n\t}\n\n\tif d.HasChange(\"min_size\") {\n\t\topts.MinSize = aws.Long(int64(d.Get(\"min_size\").(int)))\n\t}\n\n\tif d.HasChange(\"max_size\") {\n\t\topts.MaxSize = aws.Long(int64(d.Get(\"max_size\").(int)))\n\t}\n\n\tif err := setAutoscalingTags(autoscalingconn, d); err != nil {\n\t\treturn err\n\t} else {\n\t\td.SetPartial(\"tag\")\n\t}\n\n\tlog.Printf(\"[DEBUG] AutoScaling Group update configuration: %#v\", opts)\n\t_, err := autoscalingconn.UpdateAutoScalingGroup(&opts)\n\tif err != nil {\n\t\td.Partial(true)\n\t\treturn fmt.Errorf(\"Error updating Autoscaling group: %s\", err)\n\t}\n\n\treturn resourceAwsAutoscalingGroupRead(d, meta)\n}\n\nfunc resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) error {\n\tautoscalingconn := meta.(*AWSClient).autoscalingconn\n\n\t\/\/ Read the autoscaling group first. If it doesn't exist, we're done.\n\t\/\/ We need the group in order to check if there are instances attached.\n\t\/\/ If so, we need to remove those first.\n\tg, err := getAwsAutoscalingGroup(d, meta)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif g == nil {\n\t\treturn nil\n\t}\n\tif len(g.Instances) > 0 || *g.DesiredCapacity > 0 {\n\t\tif err := resourceAwsAutoscalingGroupDrain(d, meta); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Printf(\"[DEBUG] AutoScaling Group destroy: %v\", d.Id())\n\tdeleteopts := autoscaling.DeleteAutoScalingGroupInput{AutoScalingGroupName: aws.String(d.Id())}\n\n\t\/\/ You can force an autoscaling group to delete\n\t\/\/ even if it's in the process of scaling a resource.\n\t\/\/ Normally, you would set the min-size and max-size to 0,0\n\t\/\/ and then delete the group. This bypasses that and leaves\n\t\/\/ resources potentially dangling.\n\tif d.Get(\"force_delete\").(bool) {\n\t\tdeleteopts.ForceDelete = aws.Boolean(true)\n\t}\n\n\tif _, err := autoscalingconn.DeleteAutoScalingGroup(&deleteopts); err != nil {\n\t\tautoscalingerr, ok := err.(aws.APIError)\n\t\tif ok && autoscalingerr.Code == \"InvalidGroup.NotFound\" {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\treturn resource.Retry(5*time.Minute, func() error {\n\t\tif g, _ = getAwsAutoscalingGroup(d, meta); g != nil {\n\t\t\treturn fmt.Errorf(\"Auto Scaling Group still exists\")\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc getAwsAutoscalingGroup(\n\td *schema.ResourceData,\n\tmeta interface{}) (*autoscaling.AutoScalingGroup, error) {\n\tautoscalingconn := meta.(*AWSClient).autoscalingconn\n\n\tdescribeOpts := autoscaling.DescribeAutoScalingGroupsInput{\n\t\tAutoScalingGroupNames: []*string{aws.String(d.Id())},\n\t}\n\n\tlog.Printf(\"[DEBUG] AutoScaling Group describe configuration: %#v\", describeOpts)\n\tdescribeGroups, err := autoscalingconn.DescribeAutoScalingGroups(&describeOpts)\n\tif err != nil {\n\t\tautoscalingerr, ok := err.(aws.APIError)\n\t\tif ok && autoscalingerr.Code == \"InvalidGroup.NotFound\" {\n\t\t\td.SetId(\"\")\n\t\t\treturn nil, nil\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Error retrieving AutoScaling groups: %s\", err)\n\t}\n\n\t\/\/ Search for the autoscaling group\n\tfor idx, asc := range describeGroups.AutoScalingGroups {\n\t\tif *asc.AutoScalingGroupName == d.Id() {\n\t\t\treturn describeGroups.AutoScalingGroups[idx], nil\n\t\t}\n\t}\n\n\t\/\/ ASG not found\n\td.SetId(\"\")\n\treturn nil, nil\n}\n\nfunc resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) error {\n\tautoscalingconn := meta.(*AWSClient).autoscalingconn\n\n\t\/\/ First, set the capacity to zero so the group will drain\n\tlog.Printf(\"[DEBUG] Reducing autoscaling group capacity to zero\")\n\topts := autoscaling.UpdateAutoScalingGroupInput{\n\t\tAutoScalingGroupName: aws.String(d.Id()),\n\t\tDesiredCapacity: aws.Long(0),\n\t\tMinSize: aws.Long(0),\n\t\tMaxSize: aws.Long(0),\n\t}\n\tif _, err := autoscalingconn.UpdateAutoScalingGroup(&opts); err != nil {\n\t\treturn fmt.Errorf(\"Error setting capacity to zero to drain: %s\", err)\n\t}\n\n\t\/\/ Next, wait for the autoscale group to drain\n\tlog.Printf(\"[DEBUG] Waiting for group to have zero instances\")\n\treturn resource.Retry(10*time.Minute, func() error {\n\t\tg, err := getAwsAutoscalingGroup(d, meta)\n\t\tif err != nil {\n\t\t\treturn resource.RetryError{Err: err}\n\t\t}\n\t\tif g == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tif len(g.Instances) == 0 {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn fmt.Errorf(\"group still has %d instances\", len(g.Instances))\n\t})\n}\n<commit_msg>don't need to create a new ASG for this change<commit_after>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/hashcode\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\n\t\"github.com\/awslabs\/aws-sdk-go\/aws\"\n\t\"github.com\/awslabs\/aws-sdk-go\/service\/autoscaling\"\n)\n\nfunc resourceAwsAutoscalingGroup() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsAutoscalingGroupCreate,\n\t\tRead: resourceAwsAutoscalingGroupRead,\n\t\tUpdate: resourceAwsAutoscalingGroupUpdate,\n\t\tDelete: resourceAwsAutoscalingGroupDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"launch_configuration\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\n\t\t\t\"desired_capacity\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"min_size\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tRequired: true,\n\t\t\t},\n\n\t\t\t\"max_size\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tRequired: true,\n\t\t\t},\n\n\t\t\t\"default_cooldown\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"force_delete\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"health_check_grace_period\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"health_check_type\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"availability_zones\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\treturn hashcode.String(v.(string))\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"load_balancers\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\treturn hashcode.String(v.(string))\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"vpc_zone_identifier\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\treturn hashcode.String(v.(string))\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"termination_policies\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\treturn hashcode.String(v.(string))\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"tag\": autoscalingTagsSchema(),\n\t\t},\n\t}\n}\n\nfunc resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) error {\n\tautoscalingconn := meta.(*AWSClient).autoscalingconn\n\n\tvar autoScalingGroupOpts autoscaling.CreateAutoScalingGroupInput\n\tautoScalingGroupOpts.AutoScalingGroupName = aws.String(d.Get(\"name\").(string))\n\tautoScalingGroupOpts.LaunchConfigurationName = aws.String(d.Get(\"launch_configuration\").(string))\n\tautoScalingGroupOpts.MinSize = aws.Long(int64(d.Get(\"min_size\").(int)))\n\tautoScalingGroupOpts.MaxSize = aws.Long(int64(d.Get(\"max_size\").(int)))\n\tautoScalingGroupOpts.AvailabilityZones = expandStringList(\n\t\td.Get(\"availability_zones\").(*schema.Set).List())\n\n\tif v, ok := d.GetOk(\"tag\"); ok {\n\t\tautoScalingGroupOpts.Tags = autoscalingTagsFromMap(\n\t\t\tsetToMapByKey(v.(*schema.Set), \"key\"), d.Get(\"name\").(string))\n\t}\n\n\tif v, ok := d.GetOk(\"default_cooldown\"); ok {\n\t\tautoScalingGroupOpts.DefaultCooldown = aws.Long(int64(v.(int)))\n\t}\n\n\tif v, ok := d.GetOk(\"health_check_type\"); ok && v.(string) != \"\" {\n\t\tautoScalingGroupOpts.HealthCheckType = aws.String(v.(string))\n\t}\n\n\tif v, ok := d.GetOk(\"desired_capacity\"); ok {\n\t\tautoScalingGroupOpts.DesiredCapacity = aws.Long(int64(v.(int)))\n\t}\n\n\tif v, ok := d.GetOk(\"health_check_grace_period\"); ok {\n\t\tautoScalingGroupOpts.HealthCheckGracePeriod = aws.Long(int64(v.(int)))\n\t}\n\n\tif v, ok := d.GetOk(\"load_balancers\"); ok && v.(*schema.Set).Len() > 0 {\n\t\tautoScalingGroupOpts.LoadBalancerNames = expandStringList(\n\t\t\tv.(*schema.Set).List())\n\t}\n\n\tif v, ok := d.GetOk(\"vpc_zone_identifier\"); ok && v.(*schema.Set).Len() > 0 {\n\t\texp := expandStringList(v.(*schema.Set).List())\n\t\tstrs := make([]string, len(exp))\n\t\tfor _, s := range exp {\n\t\t\tstrs = append(strs, *s)\n\t\t}\n\t\tautoScalingGroupOpts.VPCZoneIdentifier = aws.String(strings.Join(strs, \",\"))\n\t}\n\n\tif v, ok := d.GetOk(\"termination_policies\"); ok && v.(*schema.Set).Len() > 0 {\n\t\tautoScalingGroupOpts.TerminationPolicies = expandStringList(\n\t\t\tv.(*schema.Set).List())\n\t}\n\n\tlog.Printf(\"[DEBUG] AutoScaling Group create configuration: %#v\", autoScalingGroupOpts)\n\t_, err := autoscalingconn.CreateAutoScalingGroup(&autoScalingGroupOpts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating Autoscaling Group: %s\", err)\n\t}\n\n\td.SetId(d.Get(\"name\").(string))\n\tlog.Printf(\"[INFO] AutoScaling Group ID: %s\", d.Id())\n\n\treturn resourceAwsAutoscalingGroupRead(d, meta)\n}\n\nfunc resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) error {\n\tg, err := getAwsAutoscalingGroup(d, meta)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif g == nil {\n\t\treturn nil\n\t}\n\n\td.Set(\"availability_zones\", g.AvailabilityZones)\n\td.Set(\"default_cooldown\", g.DefaultCooldown)\n\td.Set(\"desired_capacity\", g.DesiredCapacity)\n\td.Set(\"health_check_grace_period\", g.HealthCheckGracePeriod)\n\td.Set(\"health_check_type\", g.HealthCheckType)\n\td.Set(\"launch_configuration\", g.LaunchConfigurationName)\n\td.Set(\"load_balancers\", g.LoadBalancerNames)\n\td.Set(\"min_size\", g.MinSize)\n\td.Set(\"max_size\", g.MaxSize)\n\td.Set(\"name\", g.AutoScalingGroupName)\n\td.Set(\"tag\", g.Tags)\n\td.Set(\"vpc_zone_identifier\", strings.Split(*g.VPCZoneIdentifier, \",\"))\n\td.Set(\"termination_policies\", g.TerminationPolicies)\n\n\treturn nil\n}\n\nfunc resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) error {\n\tautoscalingconn := meta.(*AWSClient).autoscalingconn\n\n\topts := autoscaling.UpdateAutoScalingGroupInput{\n\t\tAutoScalingGroupName: aws.String(d.Id()),\n\t}\n\n\tif d.HasChange(\"desired_capacity\") {\n\t\topts.DesiredCapacity = aws.Long(int64(d.Get(\"desired_capacity\").(int)))\n\t}\n\n\tif d.HasChange(\"launch_configuration\") {\n\t\topts.LaunchConfigurationName = aws.String(d.Get(\"launch_configuration\").(string))\n\t}\n\n\tif d.HasChange(\"min_size\") {\n\t\topts.MinSize = aws.Long(int64(d.Get(\"min_size\").(int)))\n\t}\n\n\tif d.HasChange(\"max_size\") {\n\t\topts.MaxSize = aws.Long(int64(d.Get(\"max_size\").(int)))\n\t}\n\n\tif err := setAutoscalingTags(autoscalingconn, d); err != nil {\n\t\treturn err\n\t} else {\n\t\td.SetPartial(\"tag\")\n\t}\n\n\tlog.Printf(\"[DEBUG] AutoScaling Group update configuration: %#v\", opts)\n\t_, err := autoscalingconn.UpdateAutoScalingGroup(&opts)\n\tif err != nil {\n\t\td.Partial(true)\n\t\treturn fmt.Errorf(\"Error updating Autoscaling group: %s\", err)\n\t}\n\n\treturn resourceAwsAutoscalingGroupRead(d, meta)\n}\n\nfunc resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) error {\n\tautoscalingconn := meta.(*AWSClient).autoscalingconn\n\n\t\/\/ Read the autoscaling group first. If it doesn't exist, we're done.\n\t\/\/ We need the group in order to check if there are instances attached.\n\t\/\/ If so, we need to remove those first.\n\tg, err := getAwsAutoscalingGroup(d, meta)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif g == nil {\n\t\treturn nil\n\t}\n\tif len(g.Instances) > 0 || *g.DesiredCapacity > 0 {\n\t\tif err := resourceAwsAutoscalingGroupDrain(d, meta); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Printf(\"[DEBUG] AutoScaling Group destroy: %v\", d.Id())\n\tdeleteopts := autoscaling.DeleteAutoScalingGroupInput{AutoScalingGroupName: aws.String(d.Id())}\n\n\t\/\/ You can force an autoscaling group to delete\n\t\/\/ even if it's in the process of scaling a resource.\n\t\/\/ Normally, you would set the min-size and max-size to 0,0\n\t\/\/ and then delete the group. This bypasses that and leaves\n\t\/\/ resources potentially dangling.\n\tif d.Get(\"force_delete\").(bool) {\n\t\tdeleteopts.ForceDelete = aws.Boolean(true)\n\t}\n\n\tif _, err := autoscalingconn.DeleteAutoScalingGroup(&deleteopts); err != nil {\n\t\tautoscalingerr, ok := err.(aws.APIError)\n\t\tif ok && autoscalingerr.Code == \"InvalidGroup.NotFound\" {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\treturn resource.Retry(5*time.Minute, func() error {\n\t\tif g, _ = getAwsAutoscalingGroup(d, meta); g != nil {\n\t\t\treturn fmt.Errorf(\"Auto Scaling Group still exists\")\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc getAwsAutoscalingGroup(\n\td *schema.ResourceData,\n\tmeta interface{}) (*autoscaling.AutoScalingGroup, error) {\n\tautoscalingconn := meta.(*AWSClient).autoscalingconn\n\n\tdescribeOpts := autoscaling.DescribeAutoScalingGroupsInput{\n\t\tAutoScalingGroupNames: []*string{aws.String(d.Id())},\n\t}\n\n\tlog.Printf(\"[DEBUG] AutoScaling Group describe configuration: %#v\", describeOpts)\n\tdescribeGroups, err := autoscalingconn.DescribeAutoScalingGroups(&describeOpts)\n\tif err != nil {\n\t\tautoscalingerr, ok := err.(aws.APIError)\n\t\tif ok && autoscalingerr.Code == \"InvalidGroup.NotFound\" {\n\t\t\td.SetId(\"\")\n\t\t\treturn nil, nil\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Error retrieving AutoScaling groups: %s\", err)\n\t}\n\n\t\/\/ Search for the autoscaling group\n\tfor idx, asc := range describeGroups.AutoScalingGroups {\n\t\tif *asc.AutoScalingGroupName == d.Id() {\n\t\t\treturn describeGroups.AutoScalingGroups[idx], nil\n\t\t}\n\t}\n\n\t\/\/ ASG not found\n\td.SetId(\"\")\n\treturn nil, nil\n}\n\nfunc resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) error {\n\tautoscalingconn := meta.(*AWSClient).autoscalingconn\n\n\t\/\/ First, set the capacity to zero so the group will drain\n\tlog.Printf(\"[DEBUG] Reducing autoscaling group capacity to zero\")\n\topts := autoscaling.UpdateAutoScalingGroupInput{\n\t\tAutoScalingGroupName: aws.String(d.Id()),\n\t\tDesiredCapacity: aws.Long(0),\n\t\tMinSize: aws.Long(0),\n\t\tMaxSize: aws.Long(0),\n\t}\n\tif _, err := autoscalingconn.UpdateAutoScalingGroup(&opts); err != nil {\n\t\treturn fmt.Errorf(\"Error setting capacity to zero to drain: %s\", err)\n\t}\n\n\t\/\/ Next, wait for the autoscale group to drain\n\tlog.Printf(\"[DEBUG] Waiting for group to have zero instances\")\n\treturn resource.Retry(10*time.Minute, func() error {\n\t\tg, err := getAwsAutoscalingGroup(d, meta)\n\t\tif err != nil {\n\t\t\treturn resource.RetryError{Err: err}\n\t\t}\n\t\tif g == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tif len(g.Instances) == 0 {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn fmt.Errorf(\"group still has %d instances\", len(g.Instances))\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The StudyGolang Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/ http:\/\/studygolang.com\n\/\/ Author: polaris\tpolaris@studygolang.com\n\npackage controller\n\nimport (\n\t\"fmt\"\n\t\"html\/template\"\n\t\"net\/http\"\n\n\t\"http\/middleware\"\n\t\"logic\"\n\t\"model\"\n\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/polaris1119\/goutils\"\n)\n\ntype MessageController struct{}\n\n\/\/ 注册路由\nfunc (self MessageController) RegisterRoute(g *echo.Group) {\n\tmessageG := g.Group(\"\/message\/\", middleware.NeedLogin())\n\n\tmessageG.GET(\":msgtype\", self.ReadList)\n\tmessageG.GET(\"system\", self.ReadList)\n\tmessageG.Match([]string{\"GET\", \"POST\"}, \"\/send\", self.Send)\n\tmessageG.POST(\"delete\", self.Delete)\n\n\t\/\/ g.GET(\"\/message\/:msgtype\", self.ReadList, middleware.NeedLogin())\n\t\/\/ g.GET(\"\/message\/system\", self.ReadList, middleware.NeedLogin())\n\t\/\/ g.Match([]string{\"GET\", \"POST\"}, \"\/message\/send\", self.Send, middleware.NeedLogin())\n\t\/\/ g.POST(\"\/message\/delete\", self.Delete, middleware.NeedLogin())\n}\n\n\/\/ Send 发短消息\nfunc (MessageController) Send(ctx echo.Context) error {\n\tcontent := ctx.FormValue(\"content\")\n\t\/\/ 请求发送消息页面\n\tif content == \"\" || ctx.Request().Method() != \"POST\" {\n\t\tusername := ctx.FormValue(\"username\")\n\t\tif username == \"\" {\n\t\t\treturn ctx.Redirect(http.StatusSeeOther, \"\/\")\n\t\t}\n\t\tuser := logic.DefaultUser.FindOne(ctx, \"username\", username)\n\t\treturn render(ctx, \"messages\/send.html\", map[string]interface{}{\"user\": user})\n\t}\n\n\tuser := ctx.Get(\"user\").(*model.Me)\n\tto := goutils.MustInt(ctx.FormValue(\"to\"))\n\tok := logic.DefaultMessage.SendMessageTo(ctx, user.Uid, to, content)\n\tif !ok {\n\t\treturn fail(ctx, 1, \"对不起,发送失败,请稍候再试!\")\n\t}\n\n\treturn success(ctx, nil)\n}\n\n\/\/ 消息列表\nfunc (MessageController) ReadList(ctx echo.Context) error {\n\tuser := ctx.Get(\"user\").(*model.Me)\n\tmsgtype := ctx.Param(\"msgtype\")\n\tif msgtype == \"\" {\n\t\tmsgtype = \"system\"\n\t}\n\n\tcurPage := goutils.MustInt(ctx.QueryParam(\"p\"), 1)\n\tpaginator := logic.NewPaginator(curPage)\n\n\tvar (\n\t\tmessages []map[string]interface{}\n\t\ttotal int64\n\t)\n\tswitch msgtype {\n\tcase \"system\":\n\t\tmessages = logic.DefaultMessage.FindSysMsgsByUid(ctx, user.Uid, paginator)\n\t\ttotal = logic.DefaultMessage.SysMsgCount(ctx, user.Uid)\n\tcase \"inbox\":\n\t\tmessages = logic.DefaultMessage.FindToMsgsByUid(ctx, user.Uid, paginator)\n\t\ttotal = logic.DefaultMessage.ToMsgCount(ctx, user.Uid)\n\tcase \"outbox\":\n\t\tmessages = logic.DefaultMessage.FindFromMsgsByUid(ctx, user.Uid, paginator)\n\t\ttotal = logic.DefaultMessage.FromMsgCount(ctx, user.Uid)\n\tdefault:\n\t\treturn ctx.Redirect(http.StatusSeeOther, \"\/\")\n\t}\n\n\tpageHtml := paginator.SetTotal(total).GetPageHtml(fmt.Sprintf(\"\/system\/%s\", msgtype))\n\n\treturn render(ctx, \"messages\/list.html\", map[string]interface{}{\"messages\": messages, \"msgtype\": msgtype, \"page\": template.HTML(pageHtml)})\n}\n\n\/\/ 删除消息\nfunc (MessageController) Delete(ctx echo.Context) error {\n\tid := ctx.FormValue(\"id\")\n\tmsgtype := ctx.FormValue(\"msgtype\")\n\tif !logic.DefaultMessage.DeleteMessage(ctx, id, msgtype) {\n\t\treturn fail(ctx, 1, \"对不起,删除失败,请稍候再试!\")\n\t}\n\n\treturn success(ctx, nil)\n}\n<commit_msg>send msessage bugfix<commit_after>\/\/ Copyright 2013 The StudyGolang Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/ http:\/\/studygolang.com\n\/\/ Author: polaris\tpolaris@studygolang.com\n\npackage controller\n\nimport (\n\t\"fmt\"\n\t\"html\/template\"\n\t\"net\/http\"\n\n\t\"http\/middleware\"\n\t\"logic\"\n\t\"model\"\n\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/polaris1119\/goutils\"\n)\n\ntype MessageController struct{}\n\n\/\/ 注册路由\nfunc (self MessageController) RegisterRoute(g *echo.Group) {\n\tmessageG := g.Group(\"\/message\/\", middleware.NeedLogin())\n\n\tmessageG.GET(\":msgtype\", self.ReadList)\n\tmessageG.GET(\"system\", self.ReadList)\n\tmessageG.Match([]string{\"GET\", \"POST\"}, \"send\", self.Send)\n\tmessageG.POST(\"delete\", self.Delete)\n\n\t\/\/ g.GET(\"\/message\/:msgtype\", self.ReadList, middleware.NeedLogin())\n\t\/\/ g.GET(\"\/message\/system\", self.ReadList, middleware.NeedLogin())\n\t\/\/ g.Match([]string{\"GET\", \"POST\"}, \"\/message\/send\", self.Send, middleware.NeedLogin())\n\t\/\/ g.POST(\"\/message\/delete\", self.Delete, middleware.NeedLogin())\n}\n\n\/\/ Send 发短消息\nfunc (MessageController) Send(ctx echo.Context) error {\n\tcontent := ctx.FormValue(\"content\")\n\t\/\/ 请求发送消息页面\n\tif content == \"\" || ctx.Request().Method() != \"POST\" {\n\t\tusername := ctx.FormValue(\"username\")\n\t\tif username == \"\" {\n\t\t\treturn ctx.Redirect(http.StatusSeeOther, \"\/\")\n\t\t}\n\t\tuser := logic.DefaultUser.FindOne(ctx, \"username\", username)\n\t\treturn render(ctx, \"messages\/send.html\", map[string]interface{}{\"user\": user})\n\t}\n\n\tuser := ctx.Get(\"user\").(*model.Me)\n\tto := goutils.MustInt(ctx.FormValue(\"to\"))\n\tok := logic.DefaultMessage.SendMessageTo(ctx, user.Uid, to, content)\n\tif !ok {\n\t\treturn fail(ctx, 1, \"对不起,发送失败,请稍候再试!\")\n\t}\n\n\treturn success(ctx, nil)\n}\n\n\/\/ 消息列表\nfunc (MessageController) ReadList(ctx echo.Context) error {\n\tuser := ctx.Get(\"user\").(*model.Me)\n\tmsgtype := ctx.Param(\"msgtype\")\n\tif msgtype == \"\" {\n\t\tmsgtype = \"system\"\n\t}\n\n\tcurPage := goutils.MustInt(ctx.QueryParam(\"p\"), 1)\n\tpaginator := logic.NewPaginator(curPage)\n\n\tvar (\n\t\tmessages []map[string]interface{}\n\t\ttotal int64\n\t)\n\tswitch msgtype {\n\tcase \"system\":\n\t\tmessages = logic.DefaultMessage.FindSysMsgsByUid(ctx, user.Uid, paginator)\n\t\ttotal = logic.DefaultMessage.SysMsgCount(ctx, user.Uid)\n\tcase \"inbox\":\n\t\tmessages = logic.DefaultMessage.FindToMsgsByUid(ctx, user.Uid, paginator)\n\t\ttotal = logic.DefaultMessage.ToMsgCount(ctx, user.Uid)\n\tcase \"outbox\":\n\t\tmessages = logic.DefaultMessage.FindFromMsgsByUid(ctx, user.Uid, paginator)\n\t\ttotal = logic.DefaultMessage.FromMsgCount(ctx, user.Uid)\n\tdefault:\n\t\treturn ctx.Redirect(http.StatusSeeOther, \"\/\")\n\t}\n\n\tpageHtml := paginator.SetTotal(total).GetPageHtml(fmt.Sprintf(\"\/system\/%s\", msgtype))\n\n\treturn render(ctx, \"messages\/list.html\", map[string]interface{}{\"messages\": messages, \"msgtype\": msgtype, \"page\": template.HTML(pageHtml)})\n}\n\n\/\/ 删除消息\nfunc (MessageController) Delete(ctx echo.Context) error {\n\tid := ctx.FormValue(\"id\")\n\tmsgtype := ctx.FormValue(\"msgtype\")\n\tif !logic.DefaultMessage.DeleteMessage(ctx, id, msgtype) {\n\t\treturn fail(ctx, 1, \"对不起,删除失败,请稍候再试!\")\n\t}\n\n\treturn success(ctx, nil)\n}\n<|endoftext|>"} {"text":"<commit_before>package kdtree\n\nimport (\n\t\"alg\"\n\t\"ellipsoid\"\n\t\"errors\"\n\t\"fmt\"\n\t\"geo\"\n\t\"graph\"\n\t\"log\"\n\t\"math\"\n\t\"mm\"\n\t\"path\"\n)\n\nvar (\n\te ellipsoid.Ellipsoid\n\tclusterKdTree ClusterKdTree\n)\n\nfunc init() {\n\te = ellipsoid.Init(\"WGS84\", ellipsoid.Degrees, ellipsoid.Meter, ellipsoid.Longitude_is_symmetric, ellipsoid.Bearing_is_symmetric)\n}\n\nfunc LoadKdTree(clusterGraph *graph.ClusterGraph, base string) error {\n\t\/\/ Load the k-d tree of all clusters\n\tclusterKdTrees := make([]*KdTree, len(clusterGraph.Cluster))\n\tfor i, g := range clusterGraph.Cluster {\n\t\tclusterDir := path.Join(base, fmt.Sprintf(\"\/cluster%d\", i+1))\n\t\tvar encodedSteps []uint64\n\t\tvar encodedCoordinates []int32\n\t\terr := mm.Open(path.Join(clusterDir, \"kdtree.ftf\"), &encodedSteps)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = mm.Open(path.Join(clusterDir, \"coordinates.ftf\"), &encodedCoordinates)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tclusterKdTrees[i] = &KdTree{Graph: g, EncodedSteps: encodedSteps, EncodedCoordinates: encodedCoordinates}\n\t}\n\n\t\/\/ Load the k-d tree of the overlay graph\n\tvar encodedSteps []uint64\n\tvar encodedCoordinates []int32\n\terr := mm.Open(path.Join(base, \"\/overlay\/kdtree.ftf\"), &encodedSteps)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = mm.Open(path.Join(base, \"\/overlay\/coordinates.ftf\"), &encodedCoordinates)\n\tif err != nil {\n\t\treturn err\n\t}\n\toverlayKdTree := &KdTree{Graph: clusterGraph.Overlay, EncodedSteps: encodedSteps, EncodedCoordinates: encodedCoordinates}\n\n\t\/\/ Load the bounding boxes of the clusters\n\tvar bboxesFile []int32\n\terr = mm.Open(path.Join(base, \"bboxes.ftf\"), &bboxesFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(bboxesFile)\/4 != clusterGraph.Overlay.ClusterCount() {\n\t\treturn errors.New(\"size of bboxes file does not match cluster count\")\n\t}\n\tbboxes := make([]geo.BBox, len(bboxesFile)\/4)\n\tfor i, _ := range bboxes {\n\t\tbboxes[i] = geo.DecodeBBox(bboxesFile[4*i : 4*i+4])\n\t}\n\n\tclusterKdTree = ClusterKdTree{Overlay: overlayKdTree, Cluster: clusterKdTrees, BBoxes: bboxes}\n\treturn nil\n}\n\n\/\/ NearestNeighbor returns -1 if the way is on the overlay graph\n\/\/ No fail strategy: a nearest point on the overlay graph is always returned if no point\n\/\/ is found in the clusters.\nfunc NearestNeighbor(x geo.Coordinate, forward bool, trans graph.Transport) (int, []graph.Way) {\n\tedges := []graph.Edge(nil)\n\tsteps := []geo.Coordinate(nil)\n\n\t\/\/ first search on the overlay graph\n\toverlay := clusterKdTree.Overlay\n\tbestStepIndex, coordOverlay, foundPoint := binarySearch(overlay, x, 0, overlay.EncodedStepLen()-1,\n\t\ttrue \/* compareLat *\/, trans, &edges)\n\tminDistance, _ := e.To(x.Lat, x.Lng, coordOverlay.Lat, coordOverlay.Lng)\n\n\t\/\/ for debugging\n\tbestCoord := coordOverlay\n\n\t\/\/ then search on all clusters where the point is inside the bounding box of the cluster\n\tclusterIndex := -1\n\tfor i, b := range clusterKdTree.BBoxes {\n\t\tif b.Contains(x) {\n\t\t\tkdTree := clusterKdTree.Cluster[i]\n\t\t\tstepIndex, coord, ok := binarySearch(kdTree, x, 0, kdTree.EncodedStepLen()-1, true \/* compareLat *\/, trans, &edges)\n\t\t\tdist, _ := e.To(x.Lat, x.Lng, coord.Lat, coord.Lng)\n\n\t\t\tif ok && (!foundPoint || dist < minDistance) {\n\t\t\t\tfoundPoint = true\n\t\t\t\tminDistance = dist\n\t\t\t\tbestStepIndex = stepIndex\n\t\t\t\tclusterIndex = i\n\n\t\t\t\t\/\/ for debugging\n\t\t\t\tbestCoord = coord\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ for debugging: linear search\n\tminD := math.Inf(1)\n\tminI := -1\n\tminCluster := -1\n\tvar minC geo.Coordinate\n\tfor i, b := range clusterKdTree.BBoxes {\n\t\tif b.Contains(x) {\n\t\t\tkdTree := clusterKdTree.Cluster[i]\n\t\t\tfor j := 0; j < kdTree.EncodedStepLen(); j++ {\n\t\t\t\tc, oki := decodeCoordinate(kdTree, j, trans, &edges)\n\t\t\t\tdd, _ := e.To(x.Lat, x.Lng, c.Lat, c.Lng)\n\t\t\t\tif oki && dd < minD {\n\t\t\t\t\tminD = dd\n\t\t\t\t\tminI = j\n\t\t\t\t\tminC = c\n\t\t\t\t\tminCluster = i\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Printf(\"nearest neighbor for %v\\n\", x)\n\tfmt.Printf(\"binary search: min distance: %v at %v in cluster %v %v\\n\", minDistance, bestStepIndex, clusterIndex, bestCoord)\n\tfmt.Printf(\"linear search: min distance: %v at %v in cluster %v %v\\n\", minD, minI, minCluster, minC)\n\n\tif clusterIndex >= 0 {\n\t\tkdTree := clusterKdTree.Cluster[clusterIndex]\n\t\treturn clusterIndex, decodeWays(kdTree.Graph, kdTree.EncodedStep(bestStepIndex), forward, trans, &edges, &steps)\n\t}\n\tlog.Printf(\"no matching bounding box found for (%v, %v)\", x.Lat, x.Lng)\n\treturn clusterIndex, decodeWays(overlay.Graph, overlay.EncodedStep(bestStepIndex), forward, trans, &edges, &steps)\n}\n\n\/\/ binarySearch in one k-d tree. The index, the coordinate, and if the returned step\/vertex\n\/\/ is accessible are returned.\nfunc binarySearch(kdTree *KdTree, x geo.Coordinate, start, end int, compareLat bool,\n\ttrans graph.Transport, edges *[]graph.Edge) (int, geo.Coordinate, bool) {\n\n\tif end-start <= 0 {\n\t\tstartCoord, startAccessible := decodeCoordinate(kdTree, start, trans, edges)\n\t\treturn start, startCoord, startAccessible\n\t}\n\n\tmiddle := (end-start)\/2 + start\n\n\t\/\/ exact hit\n\tmiddleCoord, middleAccessible := decodeCoordinate(kdTree, middle, trans, edges)\n\tif middleAccessible && x.Lat == middleCoord.Lat && x.Lng == middleCoord.Lng {\n\t\treturn middle, middleCoord, middleAccessible\n\t}\n\n\t\/\/ corner case where the nearest point can be on both sides of the middle\n\tif !middleAccessible || (compareLat && x.Lat == middleCoord.Lat) || (!compareLat && x.Lng == middleCoord.Lng) {\n\t\t\/\/ recursion on both halfs\n\t\tleftRecIndex, leftCoord, leftAccessible := binarySearch(kdTree, x, start, middle-1, !compareLat, trans, edges)\n\t\trightRecIndex, rightCoord, rightAccessible := binarySearch(kdTree, x, middle+1, end, !compareLat, trans, edges)\n\n\t\tif !middleAccessible && !leftAccessible && !rightAccessible {\n\t\t\treturn middle, middleCoord, middleAccessible\n\t\t}\n\n\t\t\/\/ Infinity is used if a vertex\/step it is not accessible as we know that at least one is accessible.\n\t\tdistMiddle := math.Inf(1)\n\t\tdistRecursionLeft := math.Inf(1)\n\t\tdistRecursionRight := math.Inf(1)\n\t\tif middleAccessible {\n\t\t\tdistMiddle, _ = e.To(x.Lat, x.Lng, middleCoord.Lat, middleCoord.Lng)\n\t\t}\n\t\tif leftAccessible {\n\t\t\tdistRecursionLeft, _ = e.To(x.Lat, x.Lng, leftCoord.Lat, leftCoord.Lng)\n\t\t}\n\t\tif rightAccessible {\n\t\t\tdistRecursionRight, _ = e.To(x.Lat, x.Lng, rightCoord.Lat, rightCoord.Lng)\n\t\t}\n\n\t\tif distRecursionLeft < distRecursionRight {\n\t\t\tif distRecursionLeft < distMiddle {\n\t\t\t\treturn leftRecIndex, leftCoord, leftAccessible\n\t\t\t}\n\t\t\treturn middle, middleCoord, middleAccessible\n\t\t}\n\t\tif distRecursionRight < distMiddle {\n\t\t\treturn rightRecIndex, rightCoord, rightAccessible\n\t\t}\n\t\treturn middle, middleCoord, middleAccessible\n\t}\n\n\tvar left bool\n\tif compareLat {\n\t\tleft = x.Lat < middleCoord.Lat\n\t} else {\n\t\tleft = x.Lng < middleCoord.Lng\n\t}\n\tif left {\n\t\t\/\/ stop if there is nothing left of the middle\n\t\tif middle == start {\n\t\t\treturn middle, middleCoord, middleAccessible\n\t\t}\n\n\t\t\/\/ recursion on the left half\n\t\trecIndex, recCoord, recAccessible := binarySearch(kdTree, x, start, middle-1, !compareLat, trans, edges)\n\n\t\t\/\/ compare middle and result from the left\n\t\tdistMiddle, _ := e.To(x.Lat, x.Lng, middleCoord.Lat, middleCoord.Lng)\n\t\tdistRecursion, _ := e.To(x.Lat, x.Lng, recCoord.Lat, recCoord.Lng)\n\t\tif !recAccessible || distMiddle < distRecursion {\n\t\t\treturn middle, middleCoord, middleAccessible\n\t\t}\n\t\treturn recIndex, recCoord, recAccessible\n\t}\n\t\/\/ stop if there is nothing right of the middle\n\tif middle == end {\n\t\treturn middle, middleCoord, middleAccessible\n\t}\n\n\t\/\/ recursion on the right half\n\trecIndex, recCoord, recAccessible := binarySearch(kdTree, x, middle+1, end, !compareLat, trans, edges)\n\n\t\/\/ compare middle and result from the right\n\tdistMiddle, _ := e.To(x.Lat, x.Lng, middleCoord.Lat, middleCoord.Lng)\n\tdistRecursion, _ := e.To(x.Lat, x.Lng, recCoord.Lat, recCoord.Lng)\n\tif !recAccessible || distMiddle < distRecursion {\n\t\treturn middle, middleCoord, middleAccessible\n\t}\n\treturn recIndex, recCoord, recAccessible\n}\n\n\/\/ decodeCoordinate returns the coordinate of the encoded vertex\/step and if it is accessible by the\n\/\/ given transport mode\nfunc decodeCoordinate(t *KdTree, i int, trans graph.Transport, edges *[]graph.Edge) (geo.Coordinate, bool) {\n\tg := t.Graph\n\tec := t.EncodedStep(i)\n\tcoord := geo.DecodeCoordinate(t.EncodedCoordinates[2*i], t.EncodedCoordinates[2*i+1])\n\n\tvertexIndex := ec >> (EdgeOffsetBits + StepOffsetBits)\n\tedgeOffset := (ec >> StepOffsetBits) & MaxEdgeOffset\n\tstepOffset := ec & MaxStepOffset\n\tvertex := graph.Vertex(vertexIndex)\n\n\tif edgeOffset == MaxEdgeOffset && stepOffset == MaxStepOffset {\n\t\t\/\/ it is a vertex and not a step\n\t\treturn coord, g.VertexAccessible(vertex, trans)\n\t}\n\n\tvar edge graph.Edge\n\tvar edgeAccessible bool\n\tswitch t := g.(type) {\n\tcase *graph.GraphFile:\n\t\t(*edges) = t.VertexRawEdges(vertex, *edges)\n\t\tedge = (*edges)[edgeOffset]\n\t\tedgeAccessible = t.EdgeAccessible(edge, trans)\n\tcase *graph.OverlayGraphFile:\n\t\t(*edges) = t.VertexRawEdges(vertex, *edges)\n\t\tedge = (*edges)[edgeOffset]\n\t\tedgeAccessible = t.EdgeAccessible(edge, trans)\n\tdefault:\n\t\tpanic(\"unexpected graph implementation\")\n\t}\n\n\treturn coord, edgeAccessible\n}\n\n\/\/ decodeWays returns one or two partital edges that are therefore called ways. Even for a vertex a\n\/\/ way is returned so that later code only has to consider ways. The other cases are explained below.\nfunc decodeWays(g graph.Graph, ec uint64, forward bool, trans graph.Transport, edges *[]graph.Edge, steps *[]geo.Coordinate) []graph.Way {\n\tvertexIndex := ec >> (EdgeOffsetBits + StepOffsetBits)\n\tedgeOffset := (ec >> StepOffsetBits) & MaxEdgeOffset\n\toffset := ec & MaxStepOffset\n\tvertex := graph.Vertex(vertexIndex)\n\n\tif edgeOffset == MaxEdgeOffset && offset == MaxStepOffset {\n\t\t\/\/ The easy case, where we hit some vertex exactly.\n\t\tw := make([]graph.Way, 1)\n\t\ttarget := g.VertexCoordinate(vertex)\n\t\tw[0] = graph.Way{Length: 0, Vertex: vertex, Steps: nil, Target: target}\n\t\treturn w\n\t}\n\n\tvar edge graph.Edge\n\toneway := false\n\tswitch t := g.(type) {\n\tcase *graph.GraphFile:\n\t\t(*edges) = t.VertexRawEdges(vertex, *edges)\n\t\tedge = (*edges)[edgeOffset]\n\t\toneway = alg.GetBit(t.Oneway, uint(edge))\n\tcase *graph.OverlayGraphFile:\n\t\t(*edges) = t.VertexRawEdges(vertex, *edges)\n\t\tedge = (*edges)[edgeOffset]\n\t\toneway = alg.GetBit(t.Oneway, uint(edge))\n\tdefault:\n\t\tpanic(\"unexpected graph implementation\")\n\t}\n\tif trans == graph.Foot {\n\t\toneway = false\n\t}\n\n\tt1 := vertex \/\/ start vertex\n\tt2 := g.EdgeOpposite(edge, vertex) \/\/ end vertex\n\n\t\/\/ now we can allocate the way corresponding to (edge,offset),\n\t\/\/ but there are three cases to consider:\n\t\/\/ - if the way is bidirectional we have to compute both directions,\n\t\/\/ if forward == true the from the offset two both endpoints,\n\t\/\/ and the reverse otherwise\n\t\/\/ - if the way is unidirectional then we have to compute the way\n\t\/\/ from the StartPoint to offset if forward == false\n\t\/\/ - otherwise we have to compute the way from offset to the EndPoint\n\t\/\/ Strictly speaking only the second case needs an additional binary\n\t\/\/ search in the form of edge.StartPoint, but let's keep this simple\n\t\/\/ for now.\n\t(*steps) = g.EdgeSteps(edge, vertex, *steps)\n\ts := *steps\n\n\tb1 := make([]geo.Coordinate, len(s[:offset]))\n\tb2 := make([]geo.Coordinate, len(s[offset+1:]))\n\tcopy(b1, s[:offset])\n\tcopy(b2, s[offset+1:])\n\tl1 := geo.StepLength(s[:offset+1])\n\tl2 := geo.StepLength(s[offset:])\n\tt1Coord := g.VertexCoordinate(t1)\n\tt2Coord := g.VertexCoordinate(t2)\n\td1, _ := e.To(t1Coord.Lat, t1Coord.Lng, s[0].Lat, s[0].Lng)\n\td2, _ := e.To(t2Coord.Lat, t2Coord.Lng, s[len(s)-1].Lat, s[len(s)-1].Lng)\n\tl1 += d1\n\tl2 += d2\n\ttarget := s[offset]\n\n\tif !forward {\n\t\treverse(b2)\n\t} else {\n\t\treverse(b1)\n\t}\n\n\tvar w []graph.Way\n\tif !oneway {\n\t\tw = make([]graph.Way, 2) \/\/ bidirectional\n\t\tw[0] = graph.Way{Length: l1, Vertex: t1, Steps: b1, Forward: forward, Target: target}\n\t\tw[1] = graph.Way{Length: l2, Vertex: t2, Steps: b2, Forward: forward, Target: target}\n\t} else {\n\t\tw = make([]graph.Way, 1) \/\/ one way\n\t\tif forward {\n\t\t\tw[0] = graph.Way{Length: l2, Vertex: t2, Steps: b2, Forward: forward, Target: target}\n\t\t} else {\n\t\t\tw[0] = graph.Way{Length: l1, Vertex: t1, Steps: b1, Forward: forward, Target: target}\n\t\t}\n\t}\n\treturn w\n}\n\nfunc reverse(steps []geo.Coordinate) {\n\tfor i, j := 0, len(steps)-1; i < j; i, j = i+1, j-1 {\n\t\tsteps[i], steps[j] = steps[j], steps[i]\n\t}\n}\n<commit_msg>better nearest neighbor search<commit_after>package kdtree\n\nimport (\n\t\"alg\"\n\t\"ellipsoid\"\n\t\"errors\"\n\t\"fmt\"\n\t\"geo\"\n\t\"graph\"\n\t\"log\"\n\t\"math\"\n\t\"mm\"\n\t\"path\"\n)\n\nvar (\n\te ellipsoid.Ellipsoid\n\tclusterKdTree ClusterKdTree\n)\n\nfunc init() {\n\te = ellipsoid.Init(\"WGS84\", ellipsoid.Degrees, ellipsoid.Meter, ellipsoid.Longitude_is_symmetric, ellipsoid.Bearing_is_symmetric)\n}\n\nfunc LoadKdTree(clusterGraph *graph.ClusterGraph, base string) error {\n\t\/\/ Load the k-d tree of all clusters\n\tclusterKdTrees := make([]*KdTree, len(clusterGraph.Cluster))\n\tfor i, g := range clusterGraph.Cluster {\n\t\tclusterDir := path.Join(base, fmt.Sprintf(\"\/cluster%d\", i+1))\n\t\tvar encodedSteps []uint64\n\t\tvar encodedCoordinates []int32\n\t\terr := mm.Open(path.Join(clusterDir, \"kdtree.ftf\"), &encodedSteps)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = mm.Open(path.Join(clusterDir, \"coordinates.ftf\"), &encodedCoordinates)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tclusterKdTrees[i] = &KdTree{Graph: g, EncodedSteps: encodedSteps, EncodedCoordinates: encodedCoordinates}\n\t}\n\n\t\/\/ Load the k-d tree of the overlay graph\n\tvar encodedSteps []uint64\n\tvar encodedCoordinates []int32\n\terr := mm.Open(path.Join(base, \"\/overlay\/kdtree.ftf\"), &encodedSteps)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = mm.Open(path.Join(base, \"\/overlay\/coordinates.ftf\"), &encodedCoordinates)\n\tif err != nil {\n\t\treturn err\n\t}\n\toverlayKdTree := &KdTree{Graph: clusterGraph.Overlay, EncodedSteps: encodedSteps, EncodedCoordinates: encodedCoordinates}\n\n\t\/\/ Load the bounding boxes of the clusters\n\tvar bboxesFile []int32\n\terr = mm.Open(path.Join(base, \"bboxes.ftf\"), &bboxesFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(bboxesFile)\/4 != clusterGraph.Overlay.ClusterCount() {\n\t\treturn errors.New(\"size of bboxes file does not match cluster count\")\n\t}\n\tbboxes := make([]geo.BBox, len(bboxesFile)\/4)\n\tfor i, _ := range bboxes {\n\t\tbboxes[i] = geo.DecodeBBox(bboxesFile[4*i : 4*i+4])\n\t}\n\n\tclusterKdTree = ClusterKdTree{Overlay: overlayKdTree, Cluster: clusterKdTrees, BBoxes: bboxes}\n\treturn nil\n}\n\n\/\/ NearestNeighbor returns -1 if the way is on the overlay graph\n\/\/ No fail strategy: a nearest point on the overlay graph is always returned if no point\n\/\/ is found in the clusters.\nfunc NearestNeighbor(x geo.Coordinate, forward bool, trans graph.Transport) (int, []graph.Way) {\n\tedges := []graph.Edge(nil)\n\tsteps := []geo.Coordinate(nil)\n\n\t\/\/ first search on the overlay graph\n\toverlay := clusterKdTree.Overlay\n\tbestStepIndex, coordOverlay, foundPoint := binarySearch(overlay, x, 0, overlay.EncodedStepLen()-1,\n\t\ttrue \/* compareLat *\/, trans, &edges)\n\tminDistance, _ := e.To(x.Lat, x.Lng, coordOverlay.Lat, coordOverlay.Lng)\n\n\t\/\/ for debugging\n\t\/\/bestCoord := coordOverlay\n\n\t\/\/ then search on all clusters where the point is inside the bounding box of the cluster\n\tclusterIndex := -1\n\tfor i, b := range clusterKdTree.BBoxes {\n\t\tif b.Contains(x) {\n\t\t\tkdTree := clusterKdTree.Cluster[i]\n\t\t\tstepIndex, coord, ok := binarySearch(kdTree, x, 0, kdTree.EncodedStepLen()-1, true \/* compareLat *\/, trans, &edges)\n\t\t\tdist, _ := e.To(x.Lat, x.Lng, coord.Lat, coord.Lng)\n\n\t\t\tif ok && (!foundPoint || dist < minDistance) {\n\t\t\t\tfoundPoint = true\n\t\t\t\tminDistance = dist\n\t\t\t\tbestStepIndex = stepIndex\n\t\t\t\tclusterIndex = i\n\n\t\t\t\t\/\/ for debugging\n\t\t\t\t\/\/bestCoord = coord\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ for debugging: linear search\n\t\/*minD := math.Inf(1)\n\tminI := -1\n\tminCluster := -1\n\tvar minC geo.Coordinate\n\tfor i, b := range clusterKdTree.BBoxes {\n\t\tif b.Contains(x) {\n\t\t\tkdTree := clusterKdTree.Cluster[i]\n\t\t\tfor j := 0; j < kdTree.EncodedStepLen(); j++ {\n\t\t\t\tc, oki := decodeCoordinate(kdTree, j, trans, &edges)\n\t\t\t\tdd, _ := e.To(x.Lat, x.Lng, c.Lat, c.Lng)\n\t\t\t\tif oki && dd < minD {\n\t\t\t\t\tminD = dd\n\t\t\t\t\tminI = j\n\t\t\t\t\tminC = c\n\t\t\t\t\tminCluster = i\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Printf(\"nearest neighbor for %v\\n\", x)\n\tfmt.Printf(\"binary search: min distance: %v at %v in cluster %v %v\\n\", minDistance, bestStepIndex, clusterIndex, bestCoord)\n\tfmt.Printf(\"linear search: min distance: %v at %v in cluster %v %v\\n\", minD, minI, minCluster, minC)*\/\n\n\tif clusterIndex >= 0 {\n\t\tkdTree := clusterKdTree.Cluster[clusterIndex]\n\t\treturn clusterIndex, decodeWays(kdTree.Graph, kdTree.EncodedStep(bestStepIndex), forward, trans, &edges, &steps)\n\t}\n\tlog.Printf(\"no matching bounding box found for (%v, %v)\", x.Lat, x.Lng)\n\treturn clusterIndex, decodeWays(overlay.Graph, overlay.EncodedStep(bestStepIndex), forward, trans, &edges, &steps)\n}\n\n\/\/ binarySearch in one k-d tree. The index, the coordinate, and if the returned step\/vertex\n\/\/ is accessible are returned.\nfunc binarySearch(kdTree *KdTree, x geo.Coordinate, start, end int, compareLat bool,\n\ttrans graph.Transport, edges *[]graph.Edge) (int, geo.Coordinate, bool) {\n\n\tif end <= start {\n\t\t\/\/ end of the recursion\n\t\tstartCoord, startAccessible := decodeCoordinate(kdTree, start, trans, edges)\n\t\treturn start, startCoord, startAccessible\n\t}\n\n\tmiddle := (end-start)\/2 + start\n\tmiddleCoord, middleAccessible := decodeCoordinate(kdTree, middle, trans, edges)\n\n\t\/\/ exact hit\n\tif middleAccessible && x.Lat == middleCoord.Lat && x.Lng == middleCoord.Lng {\n\t\treturn middle, middleCoord, middleAccessible\n\t}\n\n\tmiddleDist := math.Inf(1)\n\tif middleAccessible {\n\t\tmiddleDist, _ = e.To(x.Lat, x.Lng, middleCoord.Lat, middleCoord.Lng)\n\t}\n\n\t\/\/ recursion one half and if no accessible point is returned also on the other half\n\tvar left bool\n\tif compareLat {\n\t\tleft = x.Lat < middleCoord.Lat\n\t} else {\n\t\tleft = x.Lng < middleCoord.Lng\n\t}\n\tvar recIndex int\n\tvar recCoord geo.Coordinate\n\tvar recAccessible bool\n\tbothHalfs := false\n\tif left {\n\t\t\/\/ left\n\t\trecIndex, recCoord, recAccessible = binarySearch(kdTree, x, start, middle-1, !compareLat, trans, edges)\n\t\tif !recAccessible {\n\t\t\t\/\/ other half -> right\n\t\t\trecIndex, recCoord, recAccessible = binarySearch(kdTree, x, middle+1, end, !compareLat, trans, edges)\n\t\t\tbothHalfs = true\n\t\t}\n\t} else {\n\t\t\/\/ right\n\t\trecIndex, recCoord, recAccessible = binarySearch(kdTree, x, middle+1, end, !compareLat, trans, edges)\n\t\tif !recAccessible {\n\t\t\t\/\/ other half -> left\n\t\t\trecIndex, recCoord, recAccessible = binarySearch(kdTree, x, start, middle-1, !compareLat, trans, edges)\n\t\t\tbothHalfs = true\n\t\t}\n\t}\n\tbestDistance, _ := e.To(x.Lat, x.Lng, recCoord.Lat, recCoord.Lng)\n\n\t\/\/ we are finished if both have already been searched\n\tif bothHalfs {\n\t\tif middleDist < bestDistance {\n\t\t\treturn middle, middleCoord, middleAccessible\n\t\t} else {\n\t\t\treturn recIndex, recCoord, recAccessible\n\t\t}\n\t}\n\n\tdistToPlane := 0.0\n\tif compareLat {\n\t\tdistToPlane, _ = e.To(middleCoord.Lat, x.Lng, x.Lat, x.Lng)\n\t} else {\n\t\tdistToPlane, _ = e.To(x.Lat, middleCoord.Lng, x.Lat, x.Lng)\n\t}\n\n\tvar recIndex2 int\n\tvar recCoord2 geo.Coordinate\n\trecAccessible2 := false\n\t\/\/ test whether the current best distance circle crosses the plane\n\t\/\/distToPlane -= 0.002\n\tif bestDistance >= distToPlane {\n\t\t\/\/ search on the other half\n\t\tif !left {\n\t\t\t\/\/ left\n\t\t\trecIndex2, recCoord2, recAccessible2 = binarySearch(kdTree, x, start, middle-1, !compareLat, trans, edges)\n\t\t} else {\n\t\t\t\/\/ right\n\t\t\trecIndex2, recCoord2, recAccessible2 = binarySearch(kdTree, x, middle+1, end, !compareLat, trans, edges)\n\t\t}\n\t}\n\n\tbestDistance2 := math.Inf(1)\n\tif recAccessible2 {\n\t\tbestDistance2, _ = e.To(x.Lat, x.Lng, recCoord2.Lat, recCoord2.Lng)\n\t}\n\n\tif bestDistance < bestDistance2 {\n\t\tif middleDist < bestDistance {\n\t\t\treturn middle, middleCoord, middleAccessible\n\t\t} else {\n\t\t\treturn recIndex, recCoord, recAccessible\n\t\t}\n\t}\n\tif middleDist < bestDistance2 {\n\t\treturn middle, middleCoord, middleAccessible\n\t}\n\treturn recIndex2, recCoord2, recAccessible2\n}\n\n\/\/ decodeCoordinate returns the coordinate of the encoded vertex\/step and if it is accessible by the\n\/\/ given transport mode\nfunc decodeCoordinate(t *KdTree, i int, trans graph.Transport, edges *[]graph.Edge) (geo.Coordinate, bool) {\n\tg := t.Graph\n\tec := t.EncodedStep(i)\n\tcoord := geo.DecodeCoordinate(t.EncodedCoordinates[2*i], t.EncodedCoordinates[2*i+1])\n\n\tvertexIndex := ec >> (EdgeOffsetBits + StepOffsetBits)\n\tedgeOffset := (ec >> StepOffsetBits) & MaxEdgeOffset\n\tstepOffset := ec & MaxStepOffset\n\tvertex := graph.Vertex(vertexIndex)\n\n\tif edgeOffset == MaxEdgeOffset && stepOffset == MaxStepOffset {\n\t\t\/\/ it is a vertex and not a step\n\t\treturn coord, g.VertexAccessible(vertex, trans)\n\t}\n\n\tvar edge graph.Edge\n\tvar edgeAccessible bool\n\tswitch t := g.(type) {\n\tcase *graph.GraphFile:\n\t\t(*edges) = t.VertexRawEdges(vertex, *edges)\n\t\tedge = (*edges)[edgeOffset]\n\t\tedgeAccessible = t.EdgeAccessible(edge, trans)\n\tcase *graph.OverlayGraphFile:\n\t\t(*edges) = t.VertexRawEdges(vertex, *edges)\n\t\tedge = (*edges)[edgeOffset]\n\t\tedgeAccessible = t.EdgeAccessible(edge, trans)\n\tdefault:\n\t\tpanic(\"unexpected graph implementation\")\n\t}\n\n\treturn coord, edgeAccessible\n}\n\n\/\/ decodeWays returns one or two partital edges that are therefore called ways. Even for a vertex a\n\/\/ way is returned so that later code only has to consider ways. The other cases are explained below.\nfunc decodeWays(g graph.Graph, ec uint64, forward bool, trans graph.Transport, edges *[]graph.Edge, steps *[]geo.Coordinate) []graph.Way {\n\tvertexIndex := ec >> (EdgeOffsetBits + StepOffsetBits)\n\tedgeOffset := (ec >> StepOffsetBits) & MaxEdgeOffset\n\toffset := ec & MaxStepOffset\n\tvertex := graph.Vertex(vertexIndex)\n\n\tif edgeOffset == MaxEdgeOffset && offset == MaxStepOffset {\n\t\t\/\/ The easy case, where we hit some vertex exactly.\n\t\tw := make([]graph.Way, 1)\n\t\ttarget := g.VertexCoordinate(vertex)\n\t\tw[0] = graph.Way{Length: 0, Vertex: vertex, Steps: nil, Target: target}\n\t\treturn w\n\t}\n\n\tvar edge graph.Edge\n\toneway := false\n\tswitch t := g.(type) {\n\tcase *graph.GraphFile:\n\t\t(*edges) = t.VertexRawEdges(vertex, *edges)\n\t\tedge = (*edges)[edgeOffset]\n\t\toneway = alg.GetBit(t.Oneway, uint(edge))\n\tcase *graph.OverlayGraphFile:\n\t\t(*edges) = t.VertexRawEdges(vertex, *edges)\n\t\tedge = (*edges)[edgeOffset]\n\t\toneway = alg.GetBit(t.Oneway, uint(edge))\n\tdefault:\n\t\tpanic(\"unexpected graph implementation\")\n\t}\n\tif trans == graph.Foot {\n\t\toneway = false\n\t}\n\n\tt1 := vertex \/\/ start vertex\n\tt2 := g.EdgeOpposite(edge, vertex) \/\/ end vertex\n\n\t\/\/ now we can allocate the way corresponding to (edge,offset),\n\t\/\/ but there are three cases to consider:\n\t\/\/ - if the way is bidirectional we have to compute both directions,\n\t\/\/ if forward == true the from the offset two both endpoints,\n\t\/\/ and the reverse otherwise\n\t\/\/ - if the way is unidirectional then we have to compute the way\n\t\/\/ from the StartPoint to offset if forward == false\n\t\/\/ - otherwise we have to compute the way from offset to the EndPoint\n\t\/\/ Strictly speaking only the second case needs an additional binary\n\t\/\/ search in the form of edge.StartPoint, but let's keep this simple\n\t\/\/ for now.\n\t(*steps) = g.EdgeSteps(edge, vertex, *steps)\n\ts := *steps\n\n\tb1 := make([]geo.Coordinate, len(s[:offset]))\n\tb2 := make([]geo.Coordinate, len(s[offset+1:]))\n\tcopy(b1, s[:offset])\n\tcopy(b2, s[offset+1:])\n\tl1 := geo.StepLength(s[:offset+1])\n\tl2 := geo.StepLength(s[offset:])\n\tt1Coord := g.VertexCoordinate(t1)\n\tt2Coord := g.VertexCoordinate(t2)\n\td1, _ := e.To(t1Coord.Lat, t1Coord.Lng, s[0].Lat, s[0].Lng)\n\td2, _ := e.To(t2Coord.Lat, t2Coord.Lng, s[len(s)-1].Lat, s[len(s)-1].Lng)\n\tl1 += d1\n\tl2 += d2\n\ttarget := s[offset]\n\n\tif !forward {\n\t\treverse(b2)\n\t} else {\n\t\treverse(b1)\n\t}\n\n\tvar w []graph.Way\n\tif !oneway {\n\t\tw = make([]graph.Way, 2) \/\/ bidirectional\n\t\tw[0] = graph.Way{Length: l1, Vertex: t1, Steps: b1, Forward: forward, Target: target}\n\t\tw[1] = graph.Way{Length: l2, Vertex: t2, Steps: b2, Forward: forward, Target: target}\n\t} else {\n\t\tw = make([]graph.Way, 1) \/\/ one way\n\t\tif forward {\n\t\t\tw[0] = graph.Way{Length: l2, Vertex: t2, Steps: b2, Forward: forward, Target: target}\n\t\t} else {\n\t\t\tw[0] = graph.Way{Length: l1, Vertex: t1, Steps: b1, Forward: forward, Target: target}\n\t\t}\n\t}\n\treturn w\n}\n\nfunc reverse(steps []geo.Coordinate) {\n\tfor i, j := 0, len(steps)-1; i < j; i, j = i+1, j-1 {\n\t\tsteps[i], steps[j] = steps[j], steps[i]\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage net\n\nimport (\n\t\"net\";\n\t\"flag\";\n\t\"io\";\n\t\"os\";\n\t\"testing\";\n)\n\n\/\/ If an IPv6 tunnel is running (see go\/stubl), we can try dialing a real IPv6 address.\nvar ipv6 = flag.Bool(\"ipv6\", false, \"assume ipv6 tunnel is present\")\n\n\/\/ fd is already connected to www.google.com port 80.\n\/\/ Run an HTTP request to fetch the main page.\nfunc fetchGoogle(t *testing.T, fd net.Conn, network, addr string) {\n\treq := io.StringBytes(\"GET \/ HTTP\/1.0\\r\\nHost: www.google.com\\r\\n\\r\\n\");\n\tn, errno := fd.Write(req);\n\n\tbuf := make([]byte, 1000);\n\tn, errno = io.Readn(fd, buf);\n\n\tif n < 1000 {\n\t\tt.Errorf(\"fetchGoogle: short HTTP read from %s %s\", network, addr);\n\t\treturn\n\t}\n}\n\nfunc doDial(t *testing.T, network, addr string) {\n\tfd, err := net.Dial(network, \"\", addr);\n\tif err != nil {\n\t\tt.Errorf(\"net.Dial(%q, %q, %q) = _, %v\", network, \"\", addr, err);\n\t\treturn\n\t}\n\tfetchGoogle(t, fd, network, addr);\n\tfd.Close()\n}\n\nfunc doDialTCP(t *testing.T, network, addr string) {\n\tfd, err := net.DialTCP(network, \"\", addr);\n\tif err != nil {\n\t\tt.Errorf(\"net.DialTCP(%q, %q, %q) = _, %v\", network, \"\", addr, err);\n\t} else {\n\t\tfetchGoogle(t, fd, network, addr);\n\t}\n\tfd.Close()\n}\n\nvar googleaddrs = []string (\n\t\"74.125.19.99:80\",\n\t\"www.google.com:80\",\n\t\"74.125.19.99:http\",\n\t\"www.google.com:http\",\n\t\"074.125.019.099:0080\",\n\t\"[::ffff:74.125.19.99]:80\",\n\t\"[::ffff:4a7d:1363]:80\",\n\t\"[0:0:0:0:0000:ffff:74.125.19.99]:80\",\n\t\"[0:0:0:0:000000:ffff:74.125.19.99]:80\",\n\t\"[0:0:0:0:0:ffff::74.125.19.99]:80\",\n\t\"[2001:4860:0:2001::68]:80\"\t\/\/ ipv6.google.com; removed if ipv6 flag not set\n)\n\nfunc TestDialGoogle(t *testing.T) {\n\t\/\/ If no ipv6 tunnel, don't try the last address.\n\tif !*ipv6 {\n\t\tgoogleaddrs[len(googleaddrs)-1] = \"\"\n\t}\n\n\tfor i := 0; i < len(googleaddrs); i++ {\n\t\taddr := googleaddrs[i];\n\t\tif addr == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tt.Logf(\"-- %s --\", addr);\n\t\tdoDial(t, \"tcp\", addr);\n\t\tdoDialTCP(t, \"tcp\", addr);\n\t\tif addr[0] != '[' {\n\t\t\tdoDial(t, \"tcp4\", addr);\n\t\t\tdoDialTCP(t, \"tcp4\", addr)\n\t\t}\n\t\tdoDial(t, \"tcp6\", addr);\n\t\tdoDialTCP(t, \"tcp6\", addr)\n\t}\n}\n<commit_msg>change the URL in the test to avoid a redirection that breaks it in sydney.<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage net\n\nimport (\n\t\"net\";\n\t\"flag\";\n\t\"io\";\n\t\"os\";\n\t\"testing\";\n)\n\n\/\/ If an IPv6 tunnel is running (see go\/stubl), we can try dialing a real IPv6 address.\nvar ipv6 = flag.Bool(\"ipv6\", false, \"assume ipv6 tunnel is present\")\n\n\/\/ fd is already connected to the destination, port 80.\n\/\/ Run an HTTP request to fetch the appropriate page.\nfunc fetchGoogle(t *testing.T, fd net.Conn, network, addr string) {\n\treq := io.StringBytes(\"GET \/intl\/en\/privacy.html HTTP\/1.0\\r\\nHost: www.google.com\\r\\n\\r\\n\");\n\tn, errno := fd.Write(req);\n\n\tbuf := make([]byte, 1000);\n\tn, errno = io.Readn(fd, buf);\n\n\tif n < 1000 {\n\t\tt.Errorf(\"fetchGoogle: short HTTP read from %s %s\", network, addr);\n\t\treturn\n\t}\n}\n\nfunc doDial(t *testing.T, network, addr string) {\n\tfd, err := net.Dial(network, \"\", addr);\n\tif err != nil {\n\t\tt.Errorf(\"net.Dial(%q, %q, %q) = _, %v\", network, \"\", addr, err);\n\t\treturn\n\t}\n\tfetchGoogle(t, fd, network, addr);\n\tfd.Close()\n}\n\nfunc doDialTCP(t *testing.T, network, addr string) {\n\tfd, err := net.DialTCP(network, \"\", addr);\n\tif err != nil {\n\t\tt.Errorf(\"net.DialTCP(%q, %q, %q) = _, %v\", network, \"\", addr, err);\n\t} else {\n\t\tfetchGoogle(t, fd, network, addr);\n\t}\n\tfd.Close()\n}\n\nvar googleaddrs = []string (\n\t\"74.125.19.99:80\",\n\t\"www.google.com:80\",\n\t\"74.125.19.99:http\",\n\t\"www.google.com:http\",\n\t\"074.125.019.099:0080\",\n\t\"[::ffff:74.125.19.99]:80\",\n\t\"[::ffff:4a7d:1363]:80\",\n\t\"[0:0:0:0:0000:ffff:74.125.19.99]:80\",\n\t\"[0:0:0:0:000000:ffff:74.125.19.99]:80\",\n\t\"[0:0:0:0:0:ffff::74.125.19.99]:80\",\n\t\"[2001:4860:0:2001::68]:80\"\t\/\/ ipv6.google.com; removed if ipv6 flag not set\n)\n\nfunc TestDialGoogle(t *testing.T) {\n\t\/\/ If no ipv6 tunnel, don't try the last address.\n\tif !*ipv6 {\n\t\tgoogleaddrs[len(googleaddrs)-1] = \"\"\n\t}\n\n\tfor i := 0; i < len(googleaddrs); i++ {\n\t\taddr := googleaddrs[i];\n\t\tif addr == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tt.Logf(\"-- %s --\", addr);\n\t\tdoDial(t, \"tcp\", addr);\n\t\tdoDialTCP(t, \"tcp\", addr);\n\t\tif addr[0] != '[' {\n\t\t\tdoDial(t, \"tcp4\", addr);\n\t\t\tdoDialTCP(t, \"tcp4\", addr)\n\t\t}\n\t\tdoDial(t, \"tcp6\", addr);\n\t\tdoDialTCP(t, \"tcp6\", addr)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package liblumberjack\n\nimport (\n \"bytes\"\n \"encoding\/json\"\n zmq \"github.com\/alecthomas\/gozmq\"\n \"log\"\n \"math\/big\"\n \"syscall\"\n \"time\"\n \/\/\"syscall\"\n \"compress\/zlib\"\n \/\/\"compress\/flate\"\n \"crypto\/rand\"\n)\n\nvar context *zmq.Context\n\nfunc init() {\n context, _ = zmq.NewContext()\n}\n\n\/\/ Forever Faithful Socket\ntype FFS struct {\n Endpoints []string \/\/ set of endpoints available to ship to\n\n \/\/ Socket type; zmq.REQ, etc\n SocketType zmq.SocketType\n\n \/\/ Various timeout values\n SendTimeout time.Duration\n RecvTimeout time.Duration\n\n endpoint string \/\/ the current endpoint in use\n socket *zmq.Socket \/\/ the current zmq socket\n connected bool \/\/ are we connected?\n}\n\nfunc (s *FFS) Send(data []byte, flags zmq.SendRecvOption) (err error) {\n for {\n s.ensure_connect()\n\n pi := zmq.PollItems{zmq.PollItem{Socket: s.socket, Events: zmq.POLLOUT}}\n count, err := zmq.Poll(pi, s.SendTimeout)\n if count == 0 {\n \/\/ not ready in time, fail the socket and try again.\n log.Printf(\"%s: timed out waiting to Send(): %s\\n\",\n s.endpoint, err)\n s.fail_socket()\n } else {\n \/\/log.Printf(\"%s: sending %d payload\\n\", s.endpoint, len(data))\n err = s.socket.Send(data, flags)\n if err != nil {\n log.Printf(\"%s: Failed to Send() %d byte message: %s\\n\",\n s.endpoint, len(data), err)\n s.fail_socket()\n } else {\n \/\/ Success!\n break\n }\n }\n }\n return\n}\n\nfunc (s *FFS) Recv(flags zmq.SendRecvOption) (data []byte, err error) {\n s.ensure_connect()\n\n pi := zmq.PollItems{zmq.PollItem{Socket: s.socket, Events: zmq.POLLIN}}\n count, err := zmq.Poll(pi, s.RecvTimeout)\n if count == 0 {\n \/\/ not ready in time, fail the socket and try again.\n s.fail_socket()\n\n err = syscall.ETIMEDOUT\n log.Printf(\"%s: timed out waiting to Recv(): %s\\n\",\n s.endpoint, err)\n return nil, err\n } else {\n data, err = s.socket.Recv(flags)\n if err != nil {\n log.Printf(\"%s: Failed to Recv() %d byte message: %s\\n\",\n s.endpoint, len(data), err)\n s.fail_socket()\n return nil, err\n } else {\n \/\/ Success!\n }\n }\n return\n}\n\nfunc (s *FFS) Close() (err error) {\n err = s.socket.Close()\n if err != nil {\n return\n }\n\n s.socket = nil\n s.connected = false\n return nil\n}\n\nfunc (s *FFS) ensure_connect() {\n if s.connected {\n return\n }\n\n if s.SendTimeout == 0 {\n s.SendTimeout = 1 * time.Second\n }\n if s.RecvTimeout == 0 {\n s.RecvTimeout = 1 * time.Second\n }\n\n if s.SocketType == 0 {\n log.Panicf(\"No socket type set on zmq socket\")\n }\n if s.socket != nil {\n s.socket.Close()\n s.socket = nil\n }\n\n var err error\n s.socket, err = context.NewSocket(s.SocketType)\n if err != nil {\n log.Panicf(\"zmq.NewSocket(%d) failed: %s\\n\", s.SocketType, err)\n }\n\n \/\/s.socket.SetSockOptUInt64(zmq.HWM, 1)\n \/\/s.socket.SetSockOptInt(zmq.RCVTIMEO, int(s.RecvTimeout.Nanoseconds() \/ 1000000))\n \/\/s.socket.SetSockOptInt(zmq.SNDTIMEO, int(s.SendTimeout.Nanoseconds() \/ 1000000))\n\n \/\/ Abort anything in-flight on a socket that's closed.\n s.socket.SetSockOptInt(zmq.LINGER, 0)\n\n for !s.connected {\n var max *big.Int = big.NewInt(int64(len(s.Endpoints)))\n i, _ := rand.Int(rand.Reader, max)\n s.endpoint = s.Endpoints[i.Int64()]\n err := s.socket.Connect(s.endpoint)\n if err != nil {\n log.Printf(\"%s: Error connecting: %s\\n\", s.endpoint, err)\n time.Sleep(500 * time.Millisecond)\n continue\n }\n\n \/\/ No error, we're connected.\n s.connected = true\n }\n}\n\nfunc (s *FFS) fail_socket() {\n if !s.connected {\n return\n }\n s.Close()\n}\n\nfunc Publish(input chan []*FileEvent, server_list []string,\n server_timeout time.Duration) {\n var buffer bytes.Buffer\n \/\/key := \"abcdefghijklmnop\"\n \/\/cipher, err := aes.NewCipher([]byte(key))\n\n socket := FFS{\n Endpoints: server_list,\n SocketType: zmq.REQ,\n RecvTimeout: server_timeout,\n SendTimeout: server_timeout,\n }\n \/\/defer socket.Close()\n\n for events := range input {\n \/\/ got a bunch of events, ship them out.\n log.Printf(\"Spooler gave me %d events\\n\", len(events))\n\n \/\/ Serialize with msgpack\n data, err := json.Marshal(events)\n \/\/ TODO(sissel): chefk error\n _ = err\n \/\/log.Printf(\"json serialized %d bytes\\n\", len(data))\n\n \/\/ Compress it\n \/\/ A new compressor is used for every payload of events so\n \/\/ that any individual payload can be decompressed alone.\n \/\/ TODO(sissel): Make compression level tunable\n compressor, _ := zlib.NewWriterLevel(&buffer, 3)\n buffer.Truncate(0)\n _, err = compressor.Write(data)\n err = compressor.Flush()\n compressor.Close()\n \/\/log.Printf(\"compressed %d bytes\\n\", buffer.Len())\n \/\/ TODO(sissel): check err\n\n \/\/ TODO(sissel): implement security\/encryption\/etc\n\n \/\/ Send full payload over zeromq REQ\/REP\n \/\/ TODO(sissel): check error\n \/\/buffer.Write(data)\n\n \/\/ Loop forever trying to send.\n \/\/ This will cause reconnects\/etc on failures automatically\n for {\n err = socket.Send(buffer.Bytes(), 0)\n data, err = socket.Recv(0)\n if err == nil {\n \/\/ success!\n break\n }\n }\n \/\/ TODO(sissel): Check data value of reply?\n\n \/\/ TODO(sissel): retry on failure or timeout\n \/\/ TODO(sissel): notify registrar of success\n } \/* for each event payload *\/\n} \/\/ Publish\n<commit_msg>- get the publisher using sodium for crypto<commit_after>package liblumberjack\n\nimport (\n \"bytes\"\n \"encoding\/json\"\n zmq \"github.com\/alecthomas\/gozmq\"\n \"log\"\n \"math\/big\"\n \"syscall\"\n \"time\"\n \"compress\/zlib\"\n \"crypto\/rand\"\n \"sodium\"\n)\n\nvar context *zmq.Context\n\nfunc init() {\n context, _ = zmq.NewContext()\n}\n\n\/\/ Forever Faithful Socket\ntype FFS struct {\n Endpoints []string \/\/ set of endpoints available to ship to\n\n \/\/ Socket type; zmq.REQ, etc\n SocketType zmq.SocketType\n\n \/\/ Various timeout values\n SendTimeout time.Duration\n RecvTimeout time.Duration\n\n endpoint string \/\/ the current endpoint in use\n socket *zmq.Socket \/\/ the current zmq socket\n connected bool \/\/ are we connected?\n}\n\nfunc (s *FFS) Send(data []byte, flags zmq.SendRecvOption) (err error) {\n for {\n s.ensure_connect()\n\n pi := zmq.PollItems{zmq.PollItem{Socket: s.socket, Events: zmq.POLLOUT}}\n count, err := zmq.Poll(pi, s.SendTimeout)\n if count == 0 {\n \/\/ not ready in time, fail the socket and try again.\n log.Printf(\"%s: timed out waiting to Send(): %s\\n\",\n s.endpoint, err)\n s.fail_socket()\n } else {\n \/\/log.Printf(\"%s: sending %d payload\\n\", s.endpoint, len(data))\n err = s.socket.Send(data, flags)\n if err != nil {\n log.Printf(\"%s: Failed to Send() %d byte message: %s\\n\",\n s.endpoint, len(data), err)\n s.fail_socket()\n } else {\n \/\/ Success!\n break\n }\n }\n }\n return\n}\n\nfunc (s *FFS) Recv(flags zmq.SendRecvOption) (data []byte, err error) {\n s.ensure_connect()\n\n pi := zmq.PollItems{zmq.PollItem{Socket: s.socket, Events: zmq.POLLIN}}\n count, err := zmq.Poll(pi, s.RecvTimeout)\n if count == 0 {\n \/\/ not ready in time, fail the socket and try again.\n s.fail_socket()\n\n err = syscall.ETIMEDOUT\n log.Printf(\"%s: timed out waiting to Recv(): %s\\n\",\n s.endpoint, err)\n return nil, err\n } else {\n data, err = s.socket.Recv(flags)\n if err != nil {\n log.Printf(\"%s: Failed to Recv() %d byte message: %s\\n\",\n s.endpoint, len(data), err)\n s.fail_socket()\n return nil, err\n } else {\n \/\/ Success!\n }\n }\n return\n}\n\nfunc (s *FFS) Close() (err error) {\n err = s.socket.Close()\n if err != nil {\n return\n }\n\n s.socket = nil\n s.connected = false\n return nil\n}\n\nfunc (s *FFS) ensure_connect() {\n if s.connected {\n return\n }\n\n if s.SendTimeout == 0 {\n s.SendTimeout = 1 * time.Second\n }\n if s.RecvTimeout == 0 {\n s.RecvTimeout = 1 * time.Second\n }\n\n if s.SocketType == 0 {\n log.Panicf(\"No socket type set on zmq socket\")\n }\n if s.socket != nil {\n s.socket.Close()\n s.socket = nil\n }\n\n var err error\n s.socket, err = context.NewSocket(s.SocketType)\n if err != nil {\n log.Panicf(\"zmq.NewSocket(%d) failed: %s\\n\", s.SocketType, err)\n }\n\n \/\/s.socket.SetSockOptUInt64(zmq.HWM, 1)\n \/\/s.socket.SetSockOptInt(zmq.RCVTIMEO, int(s.RecvTimeout.Nanoseconds() \/ 1000000))\n \/\/s.socket.SetSockOptInt(zmq.SNDTIMEO, int(s.SendTimeout.Nanoseconds() \/ 1000000))\n\n \/\/ Abort anything in-flight on a socket that's closed.\n s.socket.SetSockOptInt(zmq.LINGER, 0)\n\n for !s.connected {\n var max *big.Int = big.NewInt(int64(len(s.Endpoints)))\n i, _ := rand.Int(rand.Reader, max)\n s.endpoint = s.Endpoints[i.Int64()]\n err := s.socket.Connect(s.endpoint)\n if err != nil {\n log.Printf(\"%s: Error connecting: %s\\n\", s.endpoint, err)\n time.Sleep(500 * time.Millisecond)\n continue\n }\n\n \/\/ No error, we're connected.\n s.connected = true\n }\n}\n\nfunc (s *FFS) fail_socket() {\n if !s.connected {\n return\n }\n s.Close()\n}\n\nfunc Publish(input chan []*FileEvent,\n server_list []string,\n public_key [sodium.PUBLICKEYBYTES]byte,\n secret_key [sodium.SECRETKEYBYTES]byte,\n server_timeout time.Duration) {\n var buffer bytes.Buffer\n session := sodium.NewSession(public_key, secret_key)\n\n socket := FFS{\n Endpoints: server_list,\n SocketType: zmq.REQ,\n RecvTimeout: server_timeout,\n SendTimeout: server_timeout,\n }\n \/\/defer socket.Close()\n\n for events := range input {\n \/\/ got a bunch of events, ship them out.\n log.Printf(\"Publisher received %d events\\n\", len(events))\n\n data, err := json.Marshal(events)\n \/\/ TODO(sissel): check error\n _ = err\n\n \/\/ Compress it\n \/\/ A new compressor is used for every payload of events so\n \/\/ that any individual payload can be decompressed alone.\n \/\/ TODO(sissel): Make compression level tunable\n compressor, _ := zlib.NewWriterLevel(&buffer, 3)\n buffer.Truncate(0)\n _, err = compressor.Write(data)\n err = compressor.Flush()\n compressor.Close()\n\n \/\/log.Printf(\"compressed %d bytes\\n\", buffer.Len())\n \/\/ TODO(sissel): check err\n \/\/ TODO(sissel): implement security\/encryption\/etc\n\n \/\/ Send full payload over zeromq REQ\/REP\n \/\/ TODO(sissel): check error\n \/\/buffer.Write(data)\n ciphertext, nonce := session.Box(buffer.Bytes())\n\n log.Printf(\"plaintext: %d\\n\", len(data))\n log.Printf(\"compressed: %d\\n\", buffer.Len())\n log.Printf(\"ciphertext: %d\\n\", len(ciphertext))\n log.Printf(\"nonce: %d\\n\", len(nonce))\n\n \/\/ TODO(sissel): figure out encoding for ciphertext + nonce\n \/\/ TODO(sissel): Figure out the protocol for envelopes\n \/\/ - compression envelope:\n \/\/ - what compression system was used\n \/\/ - any parameters?\n \/\/ - payload\n \/\/ - encryption envelope:\n \/\/ - what encryption system was used\n \/\/ - any public parameters necessary to decrypt (public key, nonce)\n \/\/ - payload\n\n\n \/\/ Loop forever trying to send.\n \/\/ This will cause reconnects\/etc on failures automatically\n for {\n err = socket.Send(buffer.Bytes(), 0)\n data, err = socket.Recv(0)\n if err == nil {\n \/\/ success!\n break\n }\n }\n \/\/ TODO(sissel): Check data value of reply?\n\n \/\/ TODO(sissel): retry on failure or timeout\n \/\/ TODO(sissel): notify registrar of success\n } \/* for each event payload *\/\n} \/\/ Publish\n<|endoftext|>"} {"text":"<commit_before>package mcstore\n\nimport (\n\t\"github.com\/emicklei\/go-restful\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/app\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/db\/schema\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/ws\/rest\"\n\t\"github.com\/materials-commons\/mcstore\/server\/mcstore\/pkg\/filters\"\n\t\"gopkg.in\/olivere\/elastic.v2\"\n)\n\ntype searchResource struct {\n\tlog *app.Logger\n}\n\ntype Query struct {\n\tQString string `json:\"query_string\"`\n}\n\nfunc newSearchResource() rest.Service {\n\treturn &searchResource{\n\t\tlog: app.NewLog(\"resource\", \"search\"),\n\t}\n}\n\nfunc (r *searchResource) WebService() *restful.WebService {\n\tws := new(restful.WebService)\n\n\tws.Path(\"\/search\").Produces(restful.MIME_JSON).Consumes(restful.MIME_JSON)\n\n\tws.Route(ws.POST(\"\/project\/{project}\").\n\t\tFilter(filters.ProjectAccess).\n\t\tFilter(filters.SearchClient).\n\t\tTo(rest.RouteHandler(r.searchProject)).\n\t\tParam(ws.PathParameter(\"project\", \"project id\").DataType(\"string\")).\n\t\tDoc(\"Searches all project items - files, processes, samples, notes, users\").\n\t\tReads(Query{}).\n\t\tWrites(elastic.SearchHits{}))\n\n\tws.Route(ws.POST(\"\/project\/{project}\/files\").\n\t\tFilter(filters.ProjectAccess).\n\t\tFilter(filters.SearchClient).\n\t\tTo(rest.RouteHandler(r.searchProjectFiles)).\n\t\tParam(ws.PathParameter(\"project\", \"project id\").DataType(\"string\")).\n\t\tDoc(\"Searches files in project\").\n\t\tReads(Query{}).\n\t\tWrites(elastic.SearchHits{}))\n\n\treturn ws\n}\n\nfunc (r *searchResource) searchProject(request *restful.Request, response *restful.Response, user schema.User) (interface{}, error) {\n\tvar query Query\n\tif err := request.ReadEntity(&query); err != nil {\n\t\tr.log.Debugf(\"Failed reading query: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tproject := request.Attribute(\"project\").(schema.Project)\n\tclient := request.Attribute(\"searchclient\").(*elastic.Client)\n\tq := createQuery(query, project.ID)\n\tresults, err := client.Search().Index(\"mc\").Query(q).Size(20).Do()\n\tif err != nil {\n\t\tr.log.Infof(\"Query failed: %s\", err)\n\t\treturn nil, err\n\t}\n\treturn results.Hits, nil\n}\n\n\/\/func (r *searchResource) searchUserProjects(request *restful.Request, response *restful.Response, user schema.User) (interface{}, error) {\n\/\/\treturn nil, nil\n\/\/}\n\nfunc (r *searchResource) searchProjectFiles(request *restful.Request, response *restful.Response, user schema.User) (interface{}, error) {\n\tvar query Query\n\n\tif err := request.ReadEntity(&query); err != nil {\n\t\tr.log.Debugf(\"Failed reading query: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tproject := request.Attribute(\"project\").(schema.Project)\n\tclient := request.Attribute(\"searchclient\").(*elastic.Client)\n\tq := createQuery(query, project.ID)\n\tresults, err := client.Search().Index(\"mc\").Type(\"files\").Query(q).Size(20).Do()\n\tif err != nil {\n\t\tr.log.Infof(\"Query failed: %s\", err)\n\t\treturn nil, err\n\t}\n\n\treturn results.Hits, nil\n}\n\nfunc createQuery(query Query, projectID string) elastic.Query {\n\ttermQueryProj := elastic.NewTermQuery(\"project_id\", projectID)\n\tuserQuery := elastic.NewQueryStringQuery(query.QString)\n\tboolQuery := elastic.NewBoolQuery()\n\tboolQuery = boolQuery.Must(termQueryProj, userQuery)\n\treturn &boolQuery\n}\n<commit_msg>Send back top 100.<commit_after>package mcstore\n\nimport (\n\t\"github.com\/emicklei\/go-restful\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/app\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/db\/schema\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/ws\/rest\"\n\t\"github.com\/materials-commons\/mcstore\/server\/mcstore\/pkg\/filters\"\n\t\"gopkg.in\/olivere\/elastic.v2\"\n)\n\ntype searchResource struct {\n\tlog *app.Logger\n}\n\ntype Query struct {\n\tQString string `json:\"query_string\"`\n}\n\nfunc newSearchResource() rest.Service {\n\treturn &searchResource{\n\t\tlog: app.NewLog(\"resource\", \"search\"),\n\t}\n}\n\nfunc (r *searchResource) WebService() *restful.WebService {\n\tws := new(restful.WebService)\n\n\tws.Path(\"\/search\").Produces(restful.MIME_JSON).Consumes(restful.MIME_JSON)\n\n\tws.Route(ws.POST(\"\/project\/{project}\").\n\t\tFilter(filters.ProjectAccess).\n\t\tFilter(filters.SearchClient).\n\t\tTo(rest.RouteHandler(r.searchProject)).\n\t\tParam(ws.PathParameter(\"project\", \"project id\").DataType(\"string\")).\n\t\tDoc(\"Searches all project items - files, processes, samples, notes, users\").\n\t\tReads(Query{}).\n\t\tWrites(elastic.SearchHits{}))\n\n\tws.Route(ws.POST(\"\/project\/{project}\/files\").\n\t\tFilter(filters.ProjectAccess).\n\t\tFilter(filters.SearchClient).\n\t\tTo(rest.RouteHandler(r.searchProjectFiles)).\n\t\tParam(ws.PathParameter(\"project\", \"project id\").DataType(\"string\")).\n\t\tDoc(\"Searches files in project\").\n\t\tReads(Query{}).\n\t\tWrites(elastic.SearchHits{}))\n\n\treturn ws\n}\n\nfunc (r *searchResource) searchProject(request *restful.Request, response *restful.Response, user schema.User) (interface{}, error) {\n\tvar query Query\n\tif err := request.ReadEntity(&query); err != nil {\n\t\tr.log.Debugf(\"Failed reading query: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tproject := request.Attribute(\"project\").(schema.Project)\n\tclient := request.Attribute(\"searchclient\").(*elastic.Client)\n\tq := createQuery(query, project.ID)\n\tresults, err := client.Search().Index(\"mc\").Query(q).Size(100).Do()\n\tif err != nil {\n\t\tr.log.Infof(\"Query failed: %s\", err)\n\t\treturn nil, err\n\t}\n\treturn results.Hits, nil\n}\n\n\/\/func (r *searchResource) searchUserProjects(request *restful.Request, response *restful.Response, user schema.User) (interface{}, error) {\n\/\/\treturn nil, nil\n\/\/}\n\nfunc (r *searchResource) searchProjectFiles(request *restful.Request, response *restful.Response, user schema.User) (interface{}, error) {\n\tvar query Query\n\n\tif err := request.ReadEntity(&query); err != nil {\n\t\tr.log.Debugf(\"Failed reading query: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tproject := request.Attribute(\"project\").(schema.Project)\n\tclient := request.Attribute(\"searchclient\").(*elastic.Client)\n\tq := createQuery(query, project.ID)\n\tresults, err := client.Search().Index(\"mc\").Type(\"files\").Query(q).Size(100).Do()\n\tif err != nil {\n\t\tr.log.Infof(\"Query failed: %s\", err)\n\t\treturn nil, err\n\t}\n\n\treturn results.Hits, nil\n}\n\nfunc createQuery(query Query, projectID string) elastic.Query {\n\ttermQueryProj := elastic.NewTermQuery(\"project_id\", projectID)\n\tuserQuery := elastic.NewQueryStringQuery(query.QString)\n\tboolQuery := elastic.NewBoolQuery()\n\tboolQuery = boolQuery.Must(termQueryProj, userQuery)\n\treturn &boolQuery\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 - 2017 Huawei Technologies Co., Ltd. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage template\n\nimport (\n\t\"regexp\"\n)\n\n\/\/getTemplateContent get the template content by given version, if no content\n\/\/for the version, try its parenet version(like, 1.7.8 -> 1.7)\nfunc getTemplateContent(templates map[string]string, version string) string {\n\ttplContent := templates[version]\n\tif tplContent == \"\" {\n\t\tparentVersion := parentVersionPattern.FindString(version)\n\t\t\/\/ TODO Log the Warnning\n\t\t\/\/ objects.WriteLog(fmt.Sprintf(\"No ca template for version %s, trying to use parent version %s\", version, parentVersion), stdout, timestamp, d)\n\n\t\ttplContent = templates[parentVersion]\n\t\tif tplContent == \"\" {\n\t\t\t\/\/ return files, fmt.Errorf(\"No template for version %s or %s\", version, parentVersion)\n\t\t\t\/\/ TODO Log the error\n\t\t}\n\t}\n\treturn tplContent\n}\n\nvar parentVersionPattern *regexp.Regexp = regexp.MustCompile(`[a-z]+-\\d{1}\\.\\d{1}`)\n\nfunc getParentVersion(version string) string {\n\tparentVersion := parentVersionPattern.FindString(version)\n\treturn parentVersion\n}\n<commit_msg>Update the regex to check k8s version. Fix the bug that number greather than 10 cannot be parsed correctly.<commit_after>\/*\nCopyright 2016 - 2017 Huawei Technologies Co., Ltd. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage template\n\nimport (\n\t\"regexp\"\n)\n\n\/\/getTemplateContent get the template content by given version, if no content\n\/\/for the version, try its parenet version(like, 1.7.8 -> 1.7)\nfunc getTemplateContent(templates map[string]string, version string) string {\n\ttplContent := templates[version]\n\tif tplContent == \"\" {\n\t\tparentVersion := parentVersionPattern.FindString(version)\n\t\t\/\/ TODO Log the Warnning\n\t\t\/\/ objects.WriteLog(fmt.Sprintf(\"No ca template for version %s, trying to use parent version %s\", version, parentVersion), stdout, timestamp, d)\n\n\t\ttplContent = templates[parentVersion]\n\t\tif tplContent == \"\" {\n\t\t\t\/\/ return files, fmt.Errorf(\"No template for version %s or %s\", version, parentVersion)\n\t\t\t\/\/ TODO Log the error\n\t\t}\n\t}\n\treturn tplContent\n}\n\nvar parentVersionPattern *regexp.Regexp = regexp.MustCompile(`[a-z]+-\\d+\\.\\d+`)\n\nfunc getParentVersion(version string) string {\n\tparentVersion := parentVersionPattern.FindString(version)\n\treturn parentVersion\n}\n<|endoftext|>"} {"text":"<commit_before>package main \n\nimport (\n \".\/comms\"\n \".\/EncryptionEngine\"\n\n \"flag\"\n\n \"net\"\n \"fmt\"\n \"io\"\n\n \"net\/http\"\n \"net\/url\"\n \"html\"\n \"log\"\n \"encoding\/binary\"\n )\n\ntype State struct {\n Kc [16]byte\n clk [4]byte\n BD_ADDR [6]byte\n}\n\n\nfunc server_main(s *State) net.Conn {\n ln, err := net.Listen(\"tcp\", \":8080\")\n \n if err != nil {\n fmt.Println(\"net.Listen failed:\", err)\n }\n\n conn, err := ln.Accept()\n \n fmt.Printf(\"Accepted Connection\\n\")\n \n if err != nil {\n fmt.Println(\"ln.Accept failed:\", err)\n }\n\n s.BD_ADDR = [6]byte{}\n s.Kc = [16]byte{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}\n s.clk = [4]byte{}\n\n comms.Send_neg(conn, [6]byte{})\n comms.Send_init(conn, 0, [16]byte{}, [16]byte{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0})\n\n return conn\n}\n\nfunc client_main() net.Conn {\n conn, err := net.Dial(\"tcp\", \"127.0.0.1:8080\")\n\n if err != nil {\n fmt.Println(\"net.Dial failed:\", err)\n }\n\n return conn\n}\n\n\nfunc receiver(conn io.Reader, s *State) {\n LOOP:\n for {\n packet_type := comms.Recv_packet(conn)\n\n switch packet_type {\n case 0: \n s.BD_ADDR = comms.Recv_neg(conn)\n case 1: \n var clock uint32\n clock, _, s.Kc = comms.Recv_init(conn)\n binary.BigEndian.PutUint32(s.clk[:], clock)\n case 2: \n var clock uint32\n clock, msg := comms.Recv_data(conn)\n binary.BigEndian.PutUint32(s.clk[:], clock)\n\n keyStream := EncryptionEngine.GetKeyStream(s.Kc, s.BD_ADDR, s.clk, len(msg)) \n msg = EncryptionEngine.Encrypt(msg, keyStream) \n\n _, err := http.PostForm(\"http:\/\/127.0.0.1:9999\/bar\", \n url.Values{\"msg\": {string(msg)}})\n \n if err != nil {\n fmt.Println(\"There was an http error: \", err)\n }\n\n case 99: break LOOP\n }\n }\n}\n\nfunc main() {\n isServerPtr := flag.Bool(\"server\", false, \"Run in server mode?\")\n flag.Parse()\n fmt.Println(\"Is Server: \", *isServerPtr)\n\n var conn net.Conn\n var p string\n\n var state State\n \n if *isServerPtr {\n conn = server_main(&state)\n p = \":8888\"\n } else {\n conn = client_main()\n p = \":6666\"\n }\n\n\n go receiver(conn, &state)\n \n http.HandleFunc(\"\/bar\", func(w http.ResponseWriter, r *http.Request) {\n \n if r.Method == \"POST\" {\n r.ParseForm()\n \n fmt.Println(\"Requested to send: \", r.PostForm[\"msg\"][0])\n \n fmt.Fprintf(w, html.EscapeString(\"Sending packet\"))\n\n pt := []byte(r.PostForm[\"msg\"][0]) \n \n keyStream := EncryptionEngine.GetKeyStream(\n state.Kc, state.BD_ADDR, state.clk, len(pt)) \n msg := EncryptionEngine.Encrypt(pt, keyStream) \n\n clock := binary.BigEndian.Uint32(state.clk[:])\n \n comms.Send_data(conn, clock, msg)\n }\n })\n\n log.Fatal(http.ListenAndServe(p, nil))\n\n conn.Close()\n}\n<commit_msg>Added some prints to show what's going on<commit_after>package main \n\nimport (\n \".\/comms\"\n \".\/EncryptionEngine\"\n\n \"flag\"\n\n \"net\"\n \"fmt\"\n \"io\"\n\n \"net\/http\"\n \"net\/url\"\n \"html\"\n \"log\"\n \"encoding\/binary\"\n )\n\ntype State struct {\n Kc [16]byte\n clk [4]byte\n BD_ADDR [6]byte\n}\n\n\nfunc server_main(s *State) net.Conn {\n ln, err := net.Listen(\"tcp\", \":8080\")\n \n if err != nil {\n fmt.Println(\"net.Listen failed:\", err)\n }\n\n conn, err := ln.Accept()\n \n fmt.Printf(\"Accepted Connection\\n\")\n \n if err != nil {\n fmt.Println(\"ln.Accept failed:\", err)\n }\n\n s.BD_ADDR = [6]byte{}\n s.Kc = [16]byte{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}\n s.clk = [4]byte{}\n\n comms.Send_neg(conn, [6]byte{})\n comms.Send_init(conn, 0, [16]byte{}, [16]byte{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0})\n\n return conn\n}\n\nfunc client_main() net.Conn {\n conn, err := net.Dial(\"tcp\", \"127.0.0.1:8080\")\n\n if err != nil {\n fmt.Println(\"net.Dial failed:\", err)\n }\n\n return conn\n}\n\n\nfunc receiver(conn io.Reader, s *State) {\n LOOP:\n for {\n packet_type := comms.Recv_packet(conn)\n\n switch packet_type {\n case 0: \n s.BD_ADDR = comms.Recv_neg(conn)\n case 1: \n var clock uint32\n clock, _, s.Kc = comms.Recv_init(conn)\n binary.BigEndian.PutUint32(s.clk[:], clock)\n case 2: \n var clock uint32\n clock, msg := comms.Recv_data(conn)\n binary.BigEndian.PutUint32(s.clk[:], clock)\n\n fmt.Println(\"Recieved: \", string(msg))\n\n keyStream := EncryptionEngine.GetKeyStream(s.Kc, s.BD_ADDR, s.clk, len(msg)) \n msg = EncryptionEngine.Encrypt(msg, keyStream) \n\n fmt.Println(\"Decypted as: \", string(msg))\n\n _, err := http.PostForm(\"http:\/\/127.0.0.1:9999\/bar\", \n url.Values{\"msg\": {string(msg)}})\n \n if err != nil {\n fmt.Println(\"There was an http error: \", err)\n }\n\n case 99: break LOOP\n }\n }\n}\n\nfunc main() {\n isServerPtr := flag.Bool(\"server\", false, \"Run in server mode?\")\n flag.Parse()\n fmt.Println(\"Is Server: \", *isServerPtr)\n\n var conn net.Conn\n var p string\n\n var state State\n \n if *isServerPtr {\n conn = server_main(&state)\n p = \":8888\"\n } else {\n conn = client_main()\n p = \":6666\"\n }\n\n\n go receiver(conn, &state)\n \n http.HandleFunc(\"\/bar\", func(w http.ResponseWriter, r *http.Request) {\n \n if r.Method == \"POST\" {\n r.ParseForm()\n \n fmt.Println(\"Sending: \", r.PostForm[\"msg\"][0])\n \n fmt.Fprintf(w, html.EscapeString(\"Sending packet\"))\n\n pt := []byte(r.PostForm[\"msg\"][0]) \n \n keyStream := EncryptionEngine.GetKeyStream(\n state.Kc, state.BD_ADDR, state.clk, len(pt)) \n msg := EncryptionEngine.Encrypt(pt, keyStream) \n\n clock := binary.BigEndian.Uint32(state.clk[:])\n \n comms.Send_data(conn, clock, msg)\n \n fmt.Println(\"Encrypted as: \", string(msg))\n }\n })\n\n log.Fatal(http.ListenAndServe(p, nil))\n\n conn.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Luke Shumaker\n\npackage store\n\nimport (\t\n\t\"encoding\/json\"\n\t\"github.com\/jinzhu\/gorm\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n)\n\ntype TwilioNumber struct {\n\tId int64\n\tNumber string\n\t\/\/ TODO\n}\n\nfunc (o TwilioNumber) dbSchema(db *gorm.DB) error {\n\treturn db.CreateTable(&o).Error\n}\n\ntype TwilioPool struct {\n\tUserId string\n\tGroupId string\n\tNumberId string\n}\n\ntype Incoming_numbers struct {\n\tPhone_numbers []Incoming_number `json:\"incoming_phone_numbers\"`\n}\n\ntype Incoming_number struct {\n\tNumber string `json:\"phone_number\"`\n}\n\nfunc (o TwilioPool) dbSchema(db *gorm.DB) error {\n\treturn db.CreateTable(&o).\n\t\tAddForeignKey(\"user_id\", \"users(id)\", \"CASCADE\", \"RESTRICT\").\n\t\tAddForeignKey(\"group_id\", \"groups(id)\", \"CASCADE\", \"RESTRICT\").\n\t\tAddForeignKey(\"number_id\", \"twilio_numbers(id)\", \"RESTRICT\", \"RESTRICT\").\n\t\tError\n}\n\nfunc GetAllTwilioNumbers(db *gorm.DB) (ret []TwilioNumber) {\n\tpanic(\"TODO\")\n}\n\nfunc GetAllExistingTwilioNumbers() []string {\n\n\t\/\/ account SID for Twilio account\n\taccount_sid := os.Getenv(\"TWILIO_ACCOUNTID\")\n\n\t\/\/ Authorization token for Twilio account\n\tauth_token := os.Getenv(\"TWILIO_TOKEN\")\n\n\t\/\/ gets url for the numbers we own in the Twilio Account\n\tincoming_num_url := \"https:\/\/api.twilio.com\/2010-04-01\/Accounts\/\" + account_sid + \"\/IncomingPhoneNumbers.json\"\n\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequest(\"GET\", incoming_num_url, nil)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil\n\t}\n\n\treq.SetBasicAuth(account_sid, auth_token)\n\n\tresp, err := client.Do(req)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil\n\t}\t\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\tlog.Println(resp.Status)\n\t\treturn nil\n\t}\n\n\tnumbers := Incoming_numbers{}\n\tif err := json.Unmarshal(body, &numbers); err != nil {\n\t\tlog.Println(err)\n\t\treturn nil\n\t}\n\n\tif len(numbers.Phone_numbers) > 0 {\t\n\n\t\texisting_numbers := make([]string, len(numbers.Phone_numbers))\n\n\t\tfor i, num := range numbers.Phone_numbers {\n\t\t\texisting_numbers[i] = num.Number\n\t\t}\n\n\t\treturn existing_numbers\n\n\t} else {\n\t\tlog.Println(\"You do not have a number in your Twilio account\")\n\t\treturn nil\n\t}\t\n\n}\n<commit_msg>added functions for twilopool<commit_after>\/\/ Copyright 2015 Luke Shumaker\n\/\/ Copyright 2015 Zhandos Suleimenov\npackage store\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/jinzhu\/gorm\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n)\n\ntype TwilioNumber struct {\n\tId int64 `json:\"number_id\"`\n\tNumber string `json:\"-\"`\n\t\/\/ TODO\n}\n\nfunc (o TwilioNumber) dbSchema(db *gorm.DB) error {\n\treturn db.CreateTable(&o).Error\n}\n\ntype TwilioPool struct {\n\tUserId string `json:\"user_id\"`\n\tGroupId string `json:\"group_id\"`\n\tNumberId int64 `json:\"number_id\"`\n}\n\ntype Incoming_numbers struct {\n\tPhone_numbers []Incoming_number `json:\"incoming_phone_numbers\"`\n}\n\ntype Incoming_number struct {\n\tNumber string `json:\"phone_number\"`\n}\n\nfunc (o TwilioPool) dbSchema(db *gorm.DB) error {\n\treturn db.CreateTable(&o).\n\t\tAddForeignKey(\"user_id\", \"users(id)\", \"CASCADE\", \"RESTRICT\").\n\t\tAddForeignKey(\"group_id\", \"groups(id)\", \"CASCADE\", \"RESTRICT\").\n\t\tAddForeignKey(\"number_id\", \"twilio_numbers(id)\", \"RESTRICT\", \"RESTRICT\").\n\t\tError\n}\n\nfunc GetAllTwilioNumbers(db *gorm.DB) (ret []TwilioNumber) {\n\tvar twilio_num []TwilioNumber\n\tif result := db.Find(&twilio_num); result.Error != nil {\n\t\tif result.RecordNotFound() {\n\t\t\treturn nil\n\t\t}\n\t\tpanic(result.Error)\n\t}\n\treturn twilio_num\n\n}\n\nfunc GetTwilioNumberByUserAndGroup(db *gorm.DB, username string, groupname string) string {\n\n\tvar o TwilioPool\n\tif result := db.Where(&TwilioPool{UserId: username, GroupId: groupname}).First(&o); result.Error != nil {\n\t\tif result.RecordNotFound() {\n\t\t\tlog.Println(\"RecordNotFound\")\n\t\t\treturn \"\"\n\t\t}\n\t\tpanic(result.Error)\n\t}\n\n\tvar twilio_num TwilioNumber\n\tif result := db.Where(\"number_id = ?\", o.NumberId).First(&twilio_num); result.Error != nil {\n\t\tif result.RecordNotFound() {\n\t\t\tlog.Println(\"RecordNotFound\")\n\t\t\treturn \"\"\n\t\t}\n\t\tpanic(result.Error)\n\t}\n\n\treturn twilio_num.Number\n}\n\nfunc GetAllExistingTwilioNumbers() []string {\n\n\t\/\/ account SID for Twilio account\n\taccount_sid := os.Getenv(\"TWILIO_ACCOUNTID\")\n\n\t\/\/ Authorization token for Twilio account\n\tauth_token := os.Getenv(\"TWILIO_TOKEN\")\n\n\t\/\/ gets url for the numbers we own in the Twilio Account\n\tincoming_num_url := \"https:\/\/api.twilio.com\/2010-04-01\/Accounts\/\" + account_sid + \"\/IncomingPhoneNumbers.json\"\n\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequest(\"GET\", incoming_num_url, nil)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil\n\t}\n\n\treq.SetBasicAuth(account_sid, auth_token)\n\n\tresp, err := client.Do(req)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil\n\t}\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\tlog.Println(resp.Status)\n\t\treturn nil\n\t}\n\n\tnumbers := Incoming_numbers{}\n\tif err := json.Unmarshal(body, &numbers); err != nil {\n\t\tlog.Println(err)\n\t\treturn nil\n\t}\n\n\tif len(numbers.Phone_numbers) > 0 {\n\n\t\texisting_numbers := make([]string, len(numbers.Phone_numbers))\n\n\t\tfor i, num := range numbers.Phone_numbers {\n\t\t\texisting_numbers[i] = num.Number\n\t\t}\n\n\t\treturn existing_numbers\n\n\t} else {\n\t\tlog.Println(\"You do not have a number in your Twilio account\")\n\t\treturn nil\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package discovery\n\nimport (\n\t\"errors\"\n)\n\nvar (\n\tErrNetwork = errors.New(\"pachyderm: network failure\")\n\tErrNotFound = errors.New(\"pachyderm: Not found\")\n\tErrIsDir = errors.New(\"pachyderm: Found dir\")\n\tErrWrongOldValue = errors.New(\"pachyderm: Wrong old value\")\n\tErrExists = errors.New(\"pachyderm: Key already exists\")\n)\n\ntype Client interface {\n\t\/\/ Close closes the underlying connection\n\t\/\/ Errors: ErrNetwork\n\tClose() error\n\t\/\/ Get gets the value of a key\n\t\/\/ Errors: ErrNetwork, ErrNotFound, ErrIsDir\n\tGet(key string) (string, error)\n\t\/\/ GetAll returns all of the keys in a directory and its subdirectories as\n\t\/\/ a map from absolute keys to values.\n\t\/\/ Errors: ErrNetwork, ErrNotFound\n\tGetAll(key string) (map[string]string, error)\n\t\/\/ Set sets the value for a key\n\t\/\/ Errors: ErrNetwork, ErrIsDir\n\tSet(key string, value string) error\n\t\/\/ Delete deletes a key.\n\t\/\/ Errors: ErrNetwork, ErrNotFound, ErrIsDir\n\tDelete(key string) error\n\t\/\/ Create is like Set but only succeeds if the key doesn't already exist\n\t\/\/ Errors: ErrNetwork, ErrExists\n\tCreate(key string, value string) error\n\t\/\/ CheckAndSet is like Set but only succeeds if the key is already set to oldValue\n\t\/\/ Errors: ErrNetwork, ErrNotFound, ErrIsDir, ErrWrongOldValue\n\tCheckAndSet(key string, value string, oldValue string) error\n}\n\nfunc NewEtcdClient(addresses ...string) Client {\n\treturn newEtcdClient(addresses...)\n}\n\nfunc NewMockClient() *mockClient {\n\treturn newMockClient()\n}\n<commit_msg>Adds a mock implementation of discovery.mock_client<commit_after>package discovery\n\nimport (\n\t\"errors\"\n)\n\nvar (\n\tErrNetwork = errors.New(\"pachyderm: network failure\")\n\tErrNotFound = errors.New(\"pachyderm: Not found\")\n\tErrDirectory = errors.New(\"pachyderm: Found directory instead of value\")\n\tErrExists = errors.New(\"pachyderm: Key already exists\")\n\tErrPrecondition = errors.New(\"pachyderm: Precondition not met\")\n)\n\ntype Client interface {\n\t\/\/ Close closes the underlying connection\n\t\/\/ Errors: ErrNetwork\n\tClose() error\n\t\/\/ Get gets the value of a key\n\t\/\/ Errors: ErrNetwork, ErrNotFound, ErrDirectory\n\tGet(key string) (string, error)\n\t\/\/ GetAll returns all of the keys in a directory and its subdirectories as\n\t\/\/ a map from absolute keys to values.\n\t\/\/ Errors: ErrNetwork\n\tGetAll(key string) (map[string]string, error)\n\t\/\/ Set sets the value for a key\n\t\/\/ Errors: ErrNetwork, ErrDirectory\n\tSet(key string, value string) error\n\t\/\/ Delete deletes a key.\n\t\/\/ Errors: ErrNetwork, ErrNotFound, ErrDirectory\n\tDelete(key string) error\n\t\/\/ Create is like Set but only succeeds if the key doesn't already exist\n\t\/\/ Errors: ErrNetwork, ErrExists\n\tCreate(key string, value string) error\n\t\/\/ CheckAndSet is like Set but only succeeds if the key is already set to oldValue\n\t\/\/ Errors: ErrNetwork, ErrNotFound, ErrDirectory, ErrPrecondition\n\tCheckAndSet(key string, value string, oldValue string) error\n}\n\nfunc NewEtcdClient(addresses ...string) Client {\n\treturn newEtcdClient(addresses...)\n}\n\nfunc NewMockClient() *mockClient {\n\treturn newMockClient()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage build\n\nimport (\n\t\"exec\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"sort\"\n\t\"testing\"\n)\n\nfunc sortstr(x []string) []string {\n\tsort.Strings(x)\n\treturn x\n}\n\nvar buildPkgs = []struct {\n\tdir string\n\tinfo *DirInfo\n}{\n\t{\n\t\t\"go\/build\/pkgtest\",\n\t\t&DirInfo{\n\t\t\tGoFiles: []string{\"pkgtest.go\"},\n\t\t\tSFiles: []string{\"sqrt_\" + runtime.GOARCH + \".s\"},\n\t\t\tPkgName: \"pkgtest\",\n\t\t\tImports: []string{\"fmt\", \"os\", \"pkgtest\"},\n\t\t\tTestGoFiles: sortstr([]string{\"sqrt_test.go\", \"sqrt_\" + runtime.GOARCH + \"_test.go\"}),\n\t\t\tXTestGoFiles: []string{\"xsqrt_test.go\"},\n\t\t},\n\t},\n\t{\n\t\t\"go\/build\/cmdtest\",\n\t\t&DirInfo{\n\t\t\tGoFiles: []string{\"main.go\"},\n\t\t\tPkgName: \"main\",\n\t\t\tImports: []string{\"go\/build\/pkgtest\"},\n\t\t},\n\t},\n\t{\n\t\t\"go\/build\/cgotest\",\n\t\t&DirInfo{\n\t\t\tCgoFiles: []string{\"cgotest.go\"},\n\t\t\tCFiles: []string{\"cgotest.c\"},\n\t\t\tImports: []string{\"C\", \"unsafe\"},\n\t\t\tPkgName: \"cgotest\",\n\t\t},\n\t},\n}\n\nconst cmdtestOutput = \"3\"\n\nfunc TestBuild(t *testing.T) {\n\tfor _, tt := range buildPkgs {\n\t\ttree := Path[0] \/\/ Goroot\n\t\tdir := filepath.Join(tree.SrcDir(), tt.dir)\n\n\t\tinfo, err := ScanDir(dir, true)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"ScanDir(%#q): %v\", tt.dir, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(info, tt.info) {\n\t\t\tt.Errorf(\"ScanDir(%#q) = %#v, want %#v\\n\", tt.dir, info, tt.info)\n\t\t\tcontinue\n\t\t}\n\n\t\ts, err := Build(tree, tt.dir, info)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Build(%#q): %v\", tt.dir, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := s.Run(); err != nil {\n\t\t\tt.Errorf(\"Run(%#q): %v\", tt.dir, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif tt.dir == \"go\/build\/cmdtest\" {\n\t\t\tbin := s.Output[0]\n\t\t\tb, err := exec.Command(bin).CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"exec %s: %v\", bin, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif string(b) != cmdtestOutput {\n\t\t\t\tt.Errorf(\"cmdtest output: %s want: %s\", b, cmdtestOutput)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Deferred because cmdtest depends on pkgtest.\n\t\tdefer func(s *Script) {\n\t\t\tif err := s.Nuke(); err != nil {\n\t\t\t\tt.Errorf(\"nuking: %v\", err)\n\t\t\t}\n\t\t}(s)\n\t}\n}\n<commit_msg>Changed test to the behavior we want.<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage build\n\nimport (\n\t\"exec\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"sort\"\n\t\"testing\"\n)\n\nfunc sortstr(x []string) []string {\n\tsort.Strings(x)\n\treturn x\n}\n\nvar buildPkgs = []struct {\n\tdir string\n\tinfo *DirInfo\n}{\n\t{\n\t\t\"go\/build\/pkgtest\",\n\t\t&DirInfo{\n\t\t\tGoFiles: []string{\"pkgtest.go\"},\n\t\t\tSFiles: []string{\"sqrt_\" + runtime.GOARCH + \".s\"},\n\t\t\tPkgName: \"pkgtest\",\n\t\t\tImports: []string{\"pkgtest\"},\n\t\t\tTestImports: []string{\"fmt\", \"os\"},\n\t\t\tTestGoFiles: sortstr([]string{\"sqrt_test.go\", \"sqrt_\" + runtime.GOARCH + \"_test.go\"}),\n\t\t\tXTestGoFiles: []string{\"xsqrt_test.go\"},\n\t\t},\n\t},\n\t{\n\t\t\"go\/build\/cmdtest\",\n\t\t&DirInfo{\n\t\t\tGoFiles: []string{\"main.go\"},\n\t\t\tPkgName: \"main\",\n\t\t\tImports: []string{\"go\/build\/pkgtest\"},\n\t\t},\n\t},\n\t{\n\t\t\"go\/build\/cgotest\",\n\t\t&DirInfo{\n\t\t\tCgoFiles: []string{\"cgotest.go\"},\n\t\t\tCFiles: []string{\"cgotest.c\"},\n\t\t\tImports: []string{\"C\", \"unsafe\"},\n\t\t\tPkgName: \"cgotest\",\n\t\t},\n\t},\n}\n\nconst cmdtestOutput = \"3\"\n\nfunc TestBuild(t *testing.T) {\n\tfor _, tt := range buildPkgs {\n\t\ttree := Path[0] \/\/ Goroot\n\t\tdir := filepath.Join(tree.SrcDir(), tt.dir)\n\n\t\tinfo, err := ScanDir(dir, true)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"ScanDir(%#q): %v\", tt.dir, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(info, tt.info) {\n\t\t\tt.Errorf(\"ScanDir(%#q) = %#v, want %#v\\n\", tt.dir, info, tt.info)\n\t\t\tcontinue\n\t\t}\n\n\t\ts, err := Build(tree, tt.dir, info)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Build(%#q): %v\", tt.dir, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := s.Run(); err != nil {\n\t\t\tt.Errorf(\"Run(%#q): %v\", tt.dir, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif tt.dir == \"go\/build\/cmdtest\" {\n\t\t\tbin := s.Output[0]\n\t\t\tb, err := exec.Command(bin).CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"exec %s: %v\", bin, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif string(b) != cmdtestOutput {\n\t\t\t\tt.Errorf(\"cmdtest output: %s want: %s\", b, cmdtestOutput)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Deferred because cmdtest depends on pkgtest.\n\t\tdefer func(s *Script) {\n\t\t\tif err := s.Nuke(); err != nil {\n\t\t\t\tt.Errorf(\"nuking: %v\", err)\n\t\t\t}\n\t\t}(s)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package debug contains facilities for programs to debug themselves while\n\/\/ they are running.\npackage debug\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"runtime\"\n)\n\nvar (\n\tdunno = []byte(\"???\")\n\tcenterDot = []byte(\"·\")\n\tdot = []byte(\".\")\n)\n\n\/\/ PrintStack prints to standard error the stack trace returned by Stack.\nfunc PrintStack() {\n\tos.Stderr.Write(stack())\n}\n\n\/\/ Stack returns a formatted stack trace of the goroutine that calls it.\n\/\/ For each routine, it includes the source line information and PC value,\n\/\/ then attempts to discover, for Go functions, the calling function or\n\/\/ method and the text of the line containing the invocation.\nfunc Stack() []byte {\n\treturn stack()\n}\n\n\/\/ stack implements Stack, skipping 2 frames\nfunc stack() []byte {\n\tbuf := new(bytes.Buffer) \/\/ the returned data\n\t\/\/ As we loop, we open files and read them. These variables record the currently\n\t\/\/ loaded file.\n\tvar lines [][]byte\n\tvar lastFile string\n\tfor i := 2; ; i++ { \/\/ Caller we care about is the user, 2 frames up\n\t\tpc, file, line, ok := runtime.Caller(i)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\t\/\/ Print this much at least. If we can't find the source, it won't show.\n\t\tfmt.Fprintf(buf, \"%s:%d (0x%x)\\n\", file, line, pc)\n\t\tif file != lastFile {\n\t\t\tdata, err := ioutil.ReadFile(file)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlines = bytes.Split(data, []byte{'\\n'})\n\t\t\tlastFile = file\n\t\t}\n\t\tline-- \/\/ in stack trace, lines are 1-indexed but our array is 0-indexed\n\t\tfmt.Fprintf(buf, \"\\t%s: %s\\n\", function(pc), source(lines, line))\n\t}\n\treturn buf.Bytes()\n}\n\n\/\/ source returns a space-trimmed slice of the n'th line.\nfunc source(lines [][]byte, n int) []byte {\n\tif n < 0 || n >= len(lines) {\n\t\treturn dunno\n\t}\n\treturn bytes.Trim(lines[n], \" \\t\")\n}\n\n\/\/ function returns, if possible, the name of the function containing the PC.\nfunc function(pc uintptr) []byte {\n\tfn := runtime.FuncForPC(pc)\n\tif fn == nil {\n\t\treturn dunno\n\t}\n\tname := []byte(fn.Name())\n\t\/\/ The name includes the path name to the package, which is unnecessary\n\t\/\/ since the file name is already included. Plus, it has center dots.\n\t\/\/ That is, we see\n\t\/\/\truntime\/debug.*T·ptrmethod\n\t\/\/ and want\n\t\/\/\t*T.ptrmethod\n\tif period := bytes.Index(name, dot); period >= 0 {\n\t\tname = name[period+1:]\n\t}\n\tname = bytes.Replace(name, centerDot, dot, -1)\n\treturn name\n}\n<commit_msg>runtime\/debug: document that Stack is deprecated<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package debug contains facilities for programs to debug themselves while\n\/\/ they are running.\npackage debug\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"runtime\"\n)\n\nvar (\n\tdunno = []byte(\"???\")\n\tcenterDot = []byte(\"·\")\n\tdot = []byte(\".\")\n)\n\n\/\/ PrintStack prints to standard error the stack trace returned by Stack.\nfunc PrintStack() {\n\tos.Stderr.Write(stack())\n}\n\n\/\/ Stack returns a formatted stack trace of the goroutine that calls it.\n\/\/ For each routine, it includes the source line information and PC value,\n\/\/ then attempts to discover, for Go functions, the calling function or\n\/\/ method and the text of the line containing the invocation.\n\/\/\n\/\/ This function is deprecated. Use package runtime's Stack instead.\nfunc Stack() []byte {\n\treturn stack()\n}\n\n\/\/ stack implements Stack, skipping 2 frames\nfunc stack() []byte {\n\tbuf := new(bytes.Buffer) \/\/ the returned data\n\t\/\/ As we loop, we open files and read them. These variables record the currently\n\t\/\/ loaded file.\n\tvar lines [][]byte\n\tvar lastFile string\n\tfor i := 2; ; i++ { \/\/ Caller we care about is the user, 2 frames up\n\t\tpc, file, line, ok := runtime.Caller(i)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\t\/\/ Print this much at least. If we can't find the source, it won't show.\n\t\tfmt.Fprintf(buf, \"%s:%d (0x%x)\\n\", file, line, pc)\n\t\tif file != lastFile {\n\t\t\tdata, err := ioutil.ReadFile(file)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlines = bytes.Split(data, []byte{'\\n'})\n\t\t\tlastFile = file\n\t\t}\n\t\tline-- \/\/ in stack trace, lines are 1-indexed but our array is 0-indexed\n\t\tfmt.Fprintf(buf, \"\\t%s: %s\\n\", function(pc), source(lines, line))\n\t}\n\treturn buf.Bytes()\n}\n\n\/\/ source returns a space-trimmed slice of the n'th line.\nfunc source(lines [][]byte, n int) []byte {\n\tif n < 0 || n >= len(lines) {\n\t\treturn dunno\n\t}\n\treturn bytes.Trim(lines[n], \" \\t\")\n}\n\n\/\/ function returns, if possible, the name of the function containing the PC.\nfunc function(pc uintptr) []byte {\n\tfn := runtime.FuncForPC(pc)\n\tif fn == nil {\n\t\treturn dunno\n\t}\n\tname := []byte(fn.Name())\n\t\/\/ The name includes the path name to the package, which is unnecessary\n\t\/\/ since the file name is already included. Plus, it has center dots.\n\t\/\/ That is, we see\n\t\/\/\truntime\/debug.*T·ptrmethod\n\t\/\/ and want\n\t\/\/\t*T.ptrmethod\n\tif period := bytes.Index(name, dot); period >= 0 {\n\t\tname = name[period+1:]\n\t}\n\tname = bytes.Replace(name, centerDot, dot, -1)\n\treturn name\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package websocket implements a client and server for the WebSocket protocol.\n\/\/ The protocol is defined at http:\/\/tools.ietf.org\/html\/draft-ietf-hybi-thewebsocketprotocol\npackage websocket\n\nimport (\n\t\"bufio\"\n\t\"crypto\/tls\"\n\t\"http\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"json\"\n\t\"net\"\n\t\"os\"\n\t\"url\"\n)\n\nconst (\n\tProtocolVersionHixie75 = -75\n\tProtocolVersionHixie76 = -76\n\tProtocolVersionHybi00 = 0\n\tProtocolVersionHybi = 8\n\n\tContinuationFrame = 0\n\tTextFrame = 1\n\tBinaryFrame = 2\n\tCloseFrame = 8\n\tPingFrame = 9\n\tPongFrame = 10\n\tUnknownFrame = 255\n)\n\n\/\/ WebSocket protocol errors.\ntype ProtocolError struct {\n\tErrorString string\n}\n\nfunc (err ProtocolError) String() string { return err.ErrorString }\n\nvar (\n\tErrBadProtocolVersion = ProtocolError{\"bad protocol version\"}\n\tErrBadScheme = ProtocolError{\"bad scheme\"}\n\tErrBadStatus = ProtocolError{\"bad status\"}\n\tErrBadUpgrade = ProtocolError{\"missing or bad upgrade\"}\n\tErrBadWebSocketOrigin = ProtocolError{\"missing or bad WebSocket-Origin\"}\n\tErrBadWebSocketLocation = ProtocolError{\"missing or bad WebSocket-Location\"}\n\tErrBadWebSocketProtocol = ProtocolError{\"missing or bad WebSocket-Protocol\"}\n\tErrBadWebSocketVersion = ProtocolError{\"missing or bad WebSocket Version\"}\n\tErrChallengeResponse = ProtocolError{\"mismatch challenge\/response\"}\n\tErrBadFrame = ProtocolError{\"bad frame\"}\n\tErrBadFrameBoundary = ProtocolError{\"not on frame boundary\"}\n\tErrNotWebSocket = ProtocolError{\"not websocket protocol\"}\n\tErrBadRequestMethod = ProtocolError{\"bad method\"}\n\tErrNotSupported = ProtocolError{\"not supported\"}\n)\n\n\/\/ WebSocketAddr is an implementation of net.Addr for WebSocket.\ntype WebSocketAddr struct {\n\t*url.URL\n}\n\n\/\/ Network returns the network type for a WebSocket, \"websocket\".\nfunc (addr WebSocketAddr) Network() string { return \"websocket\" }\n\n\/\/ Config is a WebSocket configuration\ntype Config struct {\n\t\/\/ A WebSocket server address.\n\tLocation *url.URL\n\n\t\/\/ A Websocket client origin.\n\tOrigin *url.URL\n\n\t\/\/ WebSocket subprotocols.\n\tProtocol []string\n\n\t\/\/ WebSocket protocol version.\n\tVersion int\n\n\t\/\/ TLS config for secure WebSocket (wss).\n\tTlsConfig *tls.Config\n\n\thandshakeData map[string]string\n}\n\n\/\/ serverHandshaker is an interface to handle WebSocket server side handshake.\ntype serverHandshaker interface {\n\t\/\/ ReadHandshake reads handshake request message from client.\n\t\/\/ Returns http response code and error if any.\n\tReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err os.Error)\n\n\t\/\/ AcceptHandshake accepts the client handshake request and sends\n\t\/\/ handshake response back to client.\n\tAcceptHandshake(buf *bufio.Writer) (err os.Error)\n\n\t\/\/ NewServerConn creates a new WebSocket connection.\n\tNewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn)\n}\n\n\/\/ frameReader is an interface to read a WebSocket frame.\ntype frameReader interface {\n\t\/\/ Reader is to read payload of the frame.\n\tio.Reader\n\n\t\/\/ PayloadType returns payload type.\n\tPayloadType() byte\n\n\t\/\/ HeaderReader returns a reader to read header of the frame.\n\tHeaderReader() io.Reader\n\n\t\/\/ TrailerReader returns a reader to read trailer of the frame.\n\t\/\/ If it returns nil, there is no trailer in the frame.\n\tTrailerReader() io.Reader\n\n\t\/\/ Len returns total length of the frame, including header and trailer.\n\tLen() int\n}\n\n\/\/ frameReaderFactory is an interface to creates new frame reader.\ntype frameReaderFactory interface {\n\tNewFrameReader() (r frameReader, err os.Error)\n}\n\n\/\/ frameWriter is an interface to write a WebSocket frame.\ntype frameWriter interface {\n\t\/\/ Writer is to write playload of the frame.\n\tio.WriteCloser\n}\n\n\/\/ frameWriterFactory is an interface to create new frame writer.\ntype frameWriterFactory interface {\n\tNewFrameWriter(payloadType byte) (w frameWriter, err os.Error)\n}\n\ntype frameHandler interface {\n\tHandleFrame(frame frameReader) (r frameReader, err os.Error)\n\tWriteClose(status int) (err os.Error)\n}\n\n\/\/ Conn represents a WebSocket connection.\ntype Conn struct {\n\tconfig *Config\n\trequest *http.Request\n\n\tbuf *bufio.ReadWriter\n\trwc io.ReadWriteCloser\n\n\tframeReaderFactory\n\tframeReader\n\n\tframeWriterFactory\n\n\tframeHandler\n\tPayloadType byte\n\tdefaultCloseStatus int\n}\n\n\/\/ Read implements the io.Reader interface:\n\/\/ it reads data of a frame from the WebSocket connection.\n\/\/ if msg is not large enough for the frame data, it fills the msg and next Read\n\/\/ will read the rest of the frame data.\n\/\/ it reads Text frame or Binary frame.\nfunc (ws *Conn) Read(msg []byte) (n int, err os.Error) {\nagain:\n\tif ws.frameReader == nil {\n\t\tframe, err := ws.frameReaderFactory.NewFrameReader()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tws.frameReader, err = ws.frameHandler.HandleFrame(frame)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tif ws.frameReader == nil {\n\t\t\tgoto again\n\t\t}\n\t}\n\tn, err = ws.frameReader.Read(msg)\n\tif err == os.EOF {\n\t\tif trailer := ws.frameReader.TrailerReader(); trailer != nil {\n\t\t\tio.Copy(ioutil.Discard, trailer)\n\t\t}\n\t\tws.frameReader = nil\n\t\tgoto again\n\t}\n\treturn n, err\n}\n\n\/\/ Write implements the io.Writer interface:\n\/\/ it writes data as a frame to the WebSocket connection.\nfunc (ws *Conn) Write(msg []byte) (n int, err os.Error) {\n\tw, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tn, err = w.Write(msg)\n\tw.Close()\n\tif err != nil {\n\t\treturn n, err\n\t}\n\treturn n, err\n}\n\n\/\/ Close implements the io.Closer interface.\nfunc (ws *Conn) Close() os.Error {\n\terr := ws.frameHandler.WriteClose(ws.defaultCloseStatus)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ws.rwc.Close()\n}\n\nfunc (ws *Conn) IsClientConn() bool { return ws.request == nil }\nfunc (ws *Conn) IsServerConn() bool { return ws.request != nil }\n\n\/\/ LocalAddr returns the WebSocket Origin for the connection for client, or\n\/\/ the WebSocket location for server.\nfunc (ws *Conn) LocalAddr() net.Addr {\n\tif ws.IsClientConn() {\n\t\treturn WebSocketAddr{ws.config.Origin}\n\t}\n\treturn WebSocketAddr{ws.config.Location}\n}\n\n\/\/ RemoteAddr returns the WebSocket location for the connection for client, or\n\/\/ the Websocket Origin for server.\nfunc (ws *Conn) RemoteAddr() net.Addr {\n\tif ws.IsClientConn() {\n\t\treturn WebSocketAddr{ws.config.Location}\n\t}\n\treturn WebSocketAddr{ws.config.Origin}\n}\n\n\/\/ SetTimeout sets the connection's network timeout in nanoseconds.\nfunc (ws *Conn) SetTimeout(nsec int64) os.Error {\n\tif conn, ok := ws.rwc.(net.Conn); ok {\n\t\treturn conn.SetTimeout(nsec)\n\t}\n\treturn os.EINVAL\n}\n\n\/\/ SetReadTimeout sets the connection's network read timeout in nanoseconds.\nfunc (ws *Conn) SetReadTimeout(nsec int64) os.Error {\n\tif conn, ok := ws.rwc.(net.Conn); ok {\n\t\treturn conn.SetReadTimeout(nsec)\n\t}\n\treturn os.EINVAL\n}\n\n\/\/ SetWriteTimeout sets the connection's network write timeout in nanoseconds.\nfunc (ws *Conn) SetWriteTimeout(nsec int64) os.Error {\n\tif conn, ok := ws.rwc.(net.Conn); ok {\n\t\treturn conn.SetWriteTimeout(nsec)\n\t}\n\treturn os.EINVAL\n}\n\n\/\/ Config returns the WebSocket config.\nfunc (ws *Conn) Config() *Config { return ws.config }\n\n\/\/ Request returns the http request upgraded to the WebSocket.\n\/\/ It is nil for client side.\nfunc (ws *Conn) Request() *http.Request { return ws.request }\n\n\/\/ Codec represents a symmetric pair of functions that implement a codec.\ntype Codec struct {\n\tMarshal func(v interface{}) (data []byte, payloadType byte, err os.Error)\n\tUnmarshal func(data []byte, payloadType byte, v interface{}) (err os.Error)\n}\n\n\/\/ Send sends v marshaled by cd.Marshal as single frame to ws.\nfunc (cd Codec) Send(ws *Conn, v interface{}) (err os.Error) {\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata, payloadType, err := cd.Marshal(v)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw, err := ws.frameWriterFactory.NewFrameWriter(payloadType)\n\t_, err = w.Write(data)\n\tw.Close()\n\treturn err\n}\n\n\/\/ Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores in v.\nfunc (cd Codec) Receive(ws *Conn, v interface{}) (err os.Error) {\n\tif ws.frameReader != nil {\n\t\t_, err = io.Copy(ioutil.Discard, ws.frameReader)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tws.frameReader = nil\n\t}\nagain:\n\tframe, err := ws.frameReaderFactory.NewFrameReader()\n\tif err != nil {\n\t\treturn err\n\t}\n\tframe, err = ws.frameHandler.HandleFrame(frame)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif frame == nil {\n\t\tgoto again\n\t}\n\tpayloadType := frame.PayloadType()\n\tdata, err := ioutil.ReadAll(frame)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn cd.Unmarshal(data, payloadType, v)\n}\n\nfunc marshal(v interface{}) (msg []byte, payloadType byte, err os.Error) {\n\tswitch data := v.(type) {\n\tcase string:\n\t\treturn []byte(data), TextFrame, nil\n\tcase []byte:\n\t\treturn data, BinaryFrame, nil\n\t}\n\treturn nil, UnknownFrame, ErrNotSupported\n}\n\nfunc unmarshal(msg []byte, payloadType byte, v interface{}) (err os.Error) {\n\tswitch data := v.(type) {\n\tcase *string:\n\t\t*data = string(msg)\n\t\treturn nil\n\tcase *[]byte:\n\t\t*data = msg\n\t\treturn nil\n\t}\n\treturn ErrNotSupported\n}\n\n\/*\nMessage is a codec to send\/receive text\/binary data in a frame on WebSocket connection.\nTo send\/receive text frame, use string type.\nTo send\/receive binary frame, use []byte type.\n\nTrivial usage:\n\n\timport \"websocket\"\n\n\t\/\/ receive text frame\n\tvar message string\n\twebsocket.Message.Receive(ws, &message)\n\n\t\/\/ send text frame\n\tmessage = \"hello\"\n\twebsocket.Message.Send(ws, message)\n\n\t\/\/ receive binary frame\n\tvar data []byte\n\twebsocket.Message.Receive(ws, &data)\n\n\t\/\/ send binary frame\n\tdata = []byte{0, 1, 2}\n\twebsocket.Message.Send(ws, data)\n\n*\/\nvar Message = Codec{marshal, unmarshal}\n\nfunc jsonMarshal(v interface{}) (msg []byte, payloadType byte, err os.Error) {\n\tmsg, err = json.Marshal(v)\n\treturn msg, TextFrame, err\n}\n\nfunc jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err os.Error) {\n\treturn json.Unmarshal(msg, v)\n}\n\n\/*\nJSON is a codec to send\/receive JSON data in a frame from a WebSocket connection.\n\nTrival usage:\n\n\timport \"websocket\"\n\n\ttype T struct {\n\t\tMsg string\n\t\tCount int\n\t}\n\n\t\/\/ receive JSON type T\n\tvar data T\n\twebsocket.JSON.Receive(ws, &data)\n\n\t\/\/ send JSON type T\n\twebsocket.JSON.Send(ws, data)\n*\/\nvar JSON = Codec{jsonMarshal, jsonUnmarshal}\n<commit_msg>websocket: rename websocket.WebSocketAddr to *websocket.Addr.<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package websocket implements a client and server for the WebSocket protocol.\n\/\/ The protocol is defined at http:\/\/tools.ietf.org\/html\/draft-ietf-hybi-thewebsocketprotocol\npackage websocket\n\nimport (\n\t\"bufio\"\n\t\"crypto\/tls\"\n\t\"http\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"json\"\n\t\"net\"\n\t\"os\"\n\t\"url\"\n)\n\nconst (\n\tProtocolVersionHixie75 = -75\n\tProtocolVersionHixie76 = -76\n\tProtocolVersionHybi00 = 0\n\tProtocolVersionHybi = 8\n\n\tContinuationFrame = 0\n\tTextFrame = 1\n\tBinaryFrame = 2\n\tCloseFrame = 8\n\tPingFrame = 9\n\tPongFrame = 10\n\tUnknownFrame = 255\n)\n\n\/\/ WebSocket protocol errors.\ntype ProtocolError struct {\n\tErrorString string\n}\n\nfunc (err ProtocolError) String() string { return err.ErrorString }\n\nvar (\n\tErrBadProtocolVersion = ProtocolError{\"bad protocol version\"}\n\tErrBadScheme = ProtocolError{\"bad scheme\"}\n\tErrBadStatus = ProtocolError{\"bad status\"}\n\tErrBadUpgrade = ProtocolError{\"missing or bad upgrade\"}\n\tErrBadWebSocketOrigin = ProtocolError{\"missing or bad WebSocket-Origin\"}\n\tErrBadWebSocketLocation = ProtocolError{\"missing or bad WebSocket-Location\"}\n\tErrBadWebSocketProtocol = ProtocolError{\"missing or bad WebSocket-Protocol\"}\n\tErrBadWebSocketVersion = ProtocolError{\"missing or bad WebSocket Version\"}\n\tErrChallengeResponse = ProtocolError{\"mismatch challenge\/response\"}\n\tErrBadFrame = ProtocolError{\"bad frame\"}\n\tErrBadFrameBoundary = ProtocolError{\"not on frame boundary\"}\n\tErrNotWebSocket = ProtocolError{\"not websocket protocol\"}\n\tErrBadRequestMethod = ProtocolError{\"bad method\"}\n\tErrNotSupported = ProtocolError{\"not supported\"}\n)\n\n\/\/ Addr is an implementation of net.Addr for WebSocket.\ntype Addr struct {\n\t*url.URL\n}\n\n\/\/ Network returns the network type for a WebSocket, \"websocket\".\nfunc (addr *Addr) Network() string { return \"websocket\" }\n\n\/\/ Config is a WebSocket configuration\ntype Config struct {\n\t\/\/ A WebSocket server address.\n\tLocation *url.URL\n\n\t\/\/ A Websocket client origin.\n\tOrigin *url.URL\n\n\t\/\/ WebSocket subprotocols.\n\tProtocol []string\n\n\t\/\/ WebSocket protocol version.\n\tVersion int\n\n\t\/\/ TLS config for secure WebSocket (wss).\n\tTlsConfig *tls.Config\n\n\thandshakeData map[string]string\n}\n\n\/\/ serverHandshaker is an interface to handle WebSocket server side handshake.\ntype serverHandshaker interface {\n\t\/\/ ReadHandshake reads handshake request message from client.\n\t\/\/ Returns http response code and error if any.\n\tReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err os.Error)\n\n\t\/\/ AcceptHandshake accepts the client handshake request and sends\n\t\/\/ handshake response back to client.\n\tAcceptHandshake(buf *bufio.Writer) (err os.Error)\n\n\t\/\/ NewServerConn creates a new WebSocket connection.\n\tNewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn)\n}\n\n\/\/ frameReader is an interface to read a WebSocket frame.\ntype frameReader interface {\n\t\/\/ Reader is to read payload of the frame.\n\tio.Reader\n\n\t\/\/ PayloadType returns payload type.\n\tPayloadType() byte\n\n\t\/\/ HeaderReader returns a reader to read header of the frame.\n\tHeaderReader() io.Reader\n\n\t\/\/ TrailerReader returns a reader to read trailer of the frame.\n\t\/\/ If it returns nil, there is no trailer in the frame.\n\tTrailerReader() io.Reader\n\n\t\/\/ Len returns total length of the frame, including header and trailer.\n\tLen() int\n}\n\n\/\/ frameReaderFactory is an interface to creates new frame reader.\ntype frameReaderFactory interface {\n\tNewFrameReader() (r frameReader, err os.Error)\n}\n\n\/\/ frameWriter is an interface to write a WebSocket frame.\ntype frameWriter interface {\n\t\/\/ Writer is to write playload of the frame.\n\tio.WriteCloser\n}\n\n\/\/ frameWriterFactory is an interface to create new frame writer.\ntype frameWriterFactory interface {\n\tNewFrameWriter(payloadType byte) (w frameWriter, err os.Error)\n}\n\ntype frameHandler interface {\n\tHandleFrame(frame frameReader) (r frameReader, err os.Error)\n\tWriteClose(status int) (err os.Error)\n}\n\n\/\/ Conn represents a WebSocket connection.\ntype Conn struct {\n\tconfig *Config\n\trequest *http.Request\n\n\tbuf *bufio.ReadWriter\n\trwc io.ReadWriteCloser\n\n\tframeReaderFactory\n\tframeReader\n\n\tframeWriterFactory\n\n\tframeHandler\n\tPayloadType byte\n\tdefaultCloseStatus int\n}\n\n\/\/ Read implements the io.Reader interface:\n\/\/ it reads data of a frame from the WebSocket connection.\n\/\/ if msg is not large enough for the frame data, it fills the msg and next Read\n\/\/ will read the rest of the frame data.\n\/\/ it reads Text frame or Binary frame.\nfunc (ws *Conn) Read(msg []byte) (n int, err os.Error) {\nagain:\n\tif ws.frameReader == nil {\n\t\tframe, err := ws.frameReaderFactory.NewFrameReader()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tws.frameReader, err = ws.frameHandler.HandleFrame(frame)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tif ws.frameReader == nil {\n\t\t\tgoto again\n\t\t}\n\t}\n\tn, err = ws.frameReader.Read(msg)\n\tif err == os.EOF {\n\t\tif trailer := ws.frameReader.TrailerReader(); trailer != nil {\n\t\t\tio.Copy(ioutil.Discard, trailer)\n\t\t}\n\t\tws.frameReader = nil\n\t\tgoto again\n\t}\n\treturn n, err\n}\n\n\/\/ Write implements the io.Writer interface:\n\/\/ it writes data as a frame to the WebSocket connection.\nfunc (ws *Conn) Write(msg []byte) (n int, err os.Error) {\n\tw, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tn, err = w.Write(msg)\n\tw.Close()\n\tif err != nil {\n\t\treturn n, err\n\t}\n\treturn n, err\n}\n\n\/\/ Close implements the io.Closer interface.\nfunc (ws *Conn) Close() os.Error {\n\terr := ws.frameHandler.WriteClose(ws.defaultCloseStatus)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ws.rwc.Close()\n}\n\nfunc (ws *Conn) IsClientConn() bool { return ws.request == nil }\nfunc (ws *Conn) IsServerConn() bool { return ws.request != nil }\n\n\/\/ LocalAddr returns the WebSocket Origin for the connection for client, or\n\/\/ the WebSocket location for server.\nfunc (ws *Conn) LocalAddr() net.Addr {\n\tif ws.IsClientConn() {\n\t\treturn &Addr{ws.config.Origin}\n\t}\n\treturn &Addr{ws.config.Location}\n}\n\n\/\/ RemoteAddr returns the WebSocket location for the connection for client, or\n\/\/ the Websocket Origin for server.\nfunc (ws *Conn) RemoteAddr() net.Addr {\n\tif ws.IsClientConn() {\n\t\treturn &Addr{ws.config.Location}\n\t}\n\treturn &Addr{ws.config.Origin}\n}\n\n\/\/ SetTimeout sets the connection's network timeout in nanoseconds.\nfunc (ws *Conn) SetTimeout(nsec int64) os.Error {\n\tif conn, ok := ws.rwc.(net.Conn); ok {\n\t\treturn conn.SetTimeout(nsec)\n\t}\n\treturn os.EINVAL\n}\n\n\/\/ SetReadTimeout sets the connection's network read timeout in nanoseconds.\nfunc (ws *Conn) SetReadTimeout(nsec int64) os.Error {\n\tif conn, ok := ws.rwc.(net.Conn); ok {\n\t\treturn conn.SetReadTimeout(nsec)\n\t}\n\treturn os.EINVAL\n}\n\n\/\/ SetWriteTimeout sets the connection's network write timeout in nanoseconds.\nfunc (ws *Conn) SetWriteTimeout(nsec int64) os.Error {\n\tif conn, ok := ws.rwc.(net.Conn); ok {\n\t\treturn conn.SetWriteTimeout(nsec)\n\t}\n\treturn os.EINVAL\n}\n\n\/\/ Config returns the WebSocket config.\nfunc (ws *Conn) Config() *Config { return ws.config }\n\n\/\/ Request returns the http request upgraded to the WebSocket.\n\/\/ It is nil for client side.\nfunc (ws *Conn) Request() *http.Request { return ws.request }\n\n\/\/ Codec represents a symmetric pair of functions that implement a codec.\ntype Codec struct {\n\tMarshal func(v interface{}) (data []byte, payloadType byte, err os.Error)\n\tUnmarshal func(data []byte, payloadType byte, v interface{}) (err os.Error)\n}\n\n\/\/ Send sends v marshaled by cd.Marshal as single frame to ws.\nfunc (cd Codec) Send(ws *Conn, v interface{}) (err os.Error) {\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata, payloadType, err := cd.Marshal(v)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw, err := ws.frameWriterFactory.NewFrameWriter(payloadType)\n\t_, err = w.Write(data)\n\tw.Close()\n\treturn err\n}\n\n\/\/ Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores in v.\nfunc (cd Codec) Receive(ws *Conn, v interface{}) (err os.Error) {\n\tif ws.frameReader != nil {\n\t\t_, err = io.Copy(ioutil.Discard, ws.frameReader)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tws.frameReader = nil\n\t}\nagain:\n\tframe, err := ws.frameReaderFactory.NewFrameReader()\n\tif err != nil {\n\t\treturn err\n\t}\n\tframe, err = ws.frameHandler.HandleFrame(frame)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif frame == nil {\n\t\tgoto again\n\t}\n\tpayloadType := frame.PayloadType()\n\tdata, err := ioutil.ReadAll(frame)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn cd.Unmarshal(data, payloadType, v)\n}\n\nfunc marshal(v interface{}) (msg []byte, payloadType byte, err os.Error) {\n\tswitch data := v.(type) {\n\tcase string:\n\t\treturn []byte(data), TextFrame, nil\n\tcase []byte:\n\t\treturn data, BinaryFrame, nil\n\t}\n\treturn nil, UnknownFrame, ErrNotSupported\n}\n\nfunc unmarshal(msg []byte, payloadType byte, v interface{}) (err os.Error) {\n\tswitch data := v.(type) {\n\tcase *string:\n\t\t*data = string(msg)\n\t\treturn nil\n\tcase *[]byte:\n\t\t*data = msg\n\t\treturn nil\n\t}\n\treturn ErrNotSupported\n}\n\n\/*\nMessage is a codec to send\/receive text\/binary data in a frame on WebSocket connection.\nTo send\/receive text frame, use string type.\nTo send\/receive binary frame, use []byte type.\n\nTrivial usage:\n\n\timport \"websocket\"\n\n\t\/\/ receive text frame\n\tvar message string\n\twebsocket.Message.Receive(ws, &message)\n\n\t\/\/ send text frame\n\tmessage = \"hello\"\n\twebsocket.Message.Send(ws, message)\n\n\t\/\/ receive binary frame\n\tvar data []byte\n\twebsocket.Message.Receive(ws, &data)\n\n\t\/\/ send binary frame\n\tdata = []byte{0, 1, 2}\n\twebsocket.Message.Send(ws, data)\n\n*\/\nvar Message = Codec{marshal, unmarshal}\n\nfunc jsonMarshal(v interface{}) (msg []byte, payloadType byte, err os.Error) {\n\tmsg, err = json.Marshal(v)\n\treturn msg, TextFrame, err\n}\n\nfunc jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err os.Error) {\n\treturn json.Unmarshal(msg, v)\n}\n\n\/*\nJSON is a codec to send\/receive JSON data in a frame from a WebSocket connection.\n\nTrival usage:\n\n\timport \"websocket\"\n\n\ttype T struct {\n\t\tMsg string\n\t\tCount int\n\t}\n\n\t\/\/ receive JSON type T\n\tvar data T\n\twebsocket.JSON.Receive(ws, &data)\n\n\t\/\/ send JSON type T\n\twebsocket.JSON.Send(ws, data)\n*\/\nvar JSON = Codec{jsonMarshal, jsonUnmarshal}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build darwin dragonfly freebsd linux netbsd openbsd solaris\n\npackage runtime_test\n\nimport (\n\t\"bytes\"\n\t\"internal\/testenv\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"syscall\"\n\t\"testing\"\n)\n\nfunc TestCrashDumpsAllThreads(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"darwin\", \"dragonfly\", \"freebsd\", \"linux\", \"netbsd\", \"openbsd\", \"solaris\":\n\tdefault:\n\t\tt.Skipf(\"skipping; not supported on %v\", runtime.GOOS)\n\t}\n\n\t\/\/ We don't use executeTest because we need to kill the\n\t\/\/ program while it is running.\n\n\ttestenv.MustHaveGoBuild(t)\n\n\tcheckStaleRuntime(t)\n\n\tdir, err := ioutil.TempDir(\"\", \"go-build\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create temp directory: %v\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tif err := ioutil.WriteFile(filepath.Join(dir, \"main.go\"), []byte(crashDumpsAllThreadsSource), 0666); err != nil {\n\t\tt.Fatalf(\"failed to create Go file: %v\", err)\n\t}\n\n\tcmd := exec.Command(\"go\", \"build\", \"-o\", \"a.exe\")\n\tcmd.Dir = dir\n\tout, err := testEnv(cmd).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"building source: %v\\n%s\", err, out)\n\t}\n\n\tcmd = exec.Command(filepath.Join(dir, \"a.exe\"))\n\tcmd = testEnv(cmd)\n\tcmd.Env = append(cmd.Env, \"GOTRACEBACK=crash\")\n\tvar outbuf bytes.Buffer\n\tcmd.Stdout = &outbuf\n\tcmd.Stderr = &outbuf\n\n\trp, wp, err := os.Pipe()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcmd.ExtraFiles = []*os.File{wp}\n\n\tif err := cmd.Start(); err != nil {\n\t\tt.Fatalf(\"starting program: %v\", err)\n\t}\n\n\tif err := wp.Close(); err != nil {\n\t\tt.Logf(\"closing write pipe: %v\", err)\n\t}\n\tif _, err := rp.Read(make([]byte, 1)); err != nil {\n\t\tt.Fatalf(\"reading from pipe: %v\", err)\n\t}\n\n\tif err := cmd.Process.Signal(syscall.SIGQUIT); err != nil {\n\t\tt.Fatalf(\"signal: %v\", err)\n\t}\n\n\t\/\/ No point in checking the error return from Wait--we expect\n\t\/\/ it to fail.\n\tcmd.Wait()\n\n\t\/\/ We want to see a stack trace for each thread.\n\t\/\/ Before https:\/\/golang.org\/cl\/2811 running threads would say\n\t\/\/ \"goroutine running on other thread; stack unavailable\".\n\tout = outbuf.Bytes()\n\tn := bytes.Count(out, []byte(\"main.loop(\"))\n\tif n != 4 {\n\t\tt.Errorf(\"found %d instances of main.loop; expected 4\", n)\n\t\tt.Logf(\"%s\", out)\n\t}\n}\n\nconst crashDumpsAllThreadsSource = `\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n)\n\nfunc main() {\n\tconst count = 4\n\truntime.GOMAXPROCS(count + 1)\n\n\tchans := make([]chan bool, count)\n\tfor i := range chans {\n\t\tchans[i] = make(chan bool)\n\t\tgo loop(i, chans[i])\n\t}\n\n\t\/\/ Wait for all the goroutines to start executing.\n\tfor _, c := range chans {\n\t\t<-c\n\t}\n\n\t\/\/ Tell our parent that all the goroutines are executing.\n\tif _, err := os.NewFile(3, \"pipe\").WriteString(\"x\"); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"write to pipe failed: %v\\n\", err)\n\t\tos.Exit(2)\n\t}\n\n\tselect {}\n}\n\nfunc loop(i int, c chan bool) {\n\tclose(c)\n\tfor {\n\t\tfor j := 0; j < 0x7fffffff; j++ {\n\t\t}\n\t}\n}\n`\n\nfunc TestSignalExitStatus(t *testing.T) {\n\ttestenv.MustHaveGoBuild(t)\n\tswitch runtime.GOOS {\n\tcase \"netbsd\", \"solaris\":\n\t\tt.Skipf(\"skipping on %s; see https:\/\/golang.org\/issue\/14063\", runtime.GOOS)\n\t}\n\texe, err := buildTestProg(t, \"testprog\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = testEnv(exec.Command(exe, \"SignalExitStatus\")).Run()\n\tif err == nil {\n\t\tt.Error(\"test program succeeded unexpectedly\")\n\t} else if ee, ok := err.(*exec.ExitError); !ok {\n\t\tt.Errorf(\"error (%v) has type %T; expected exec.ExitError\", err, err)\n\t} else if ws, ok := ee.Sys().(syscall.WaitStatus); !ok {\n\t\tt.Errorf(\"error.Sys (%v) has type %T; expected syscall.WaitStatus\", ee.Sys(), ee.Sys())\n\t} else if !ws.Signaled() || ws.Signal() != syscall.SIGTERM {\n\t\tt.Errorf(\"got %v; expected SIGTERM\", ee)\n\t}\n}\n\nfunc TestSignalIgnoreSIGTRAP(t *testing.T) {\n\toutput := runTestProg(t, \"testprognet\", \"SignalIgnoreSIGTRAP\")\n\twant := \"OK\\n\"\n\tif output != want {\n\t\tt.Fatalf(\"want %s, got %s\\n\", want, output)\n\t}\n}\n<commit_msg>runtime: fix deadlock in TestCrashDumpsAllThreads<commit_after>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build darwin dragonfly freebsd linux netbsd openbsd solaris\n\npackage runtime_test\n\nimport (\n\t\"bytes\"\n\t\"internal\/testenv\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"syscall\"\n\t\"testing\"\n)\n\nfunc TestCrashDumpsAllThreads(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"darwin\", \"dragonfly\", \"freebsd\", \"linux\", \"netbsd\", \"openbsd\", \"solaris\":\n\tdefault:\n\t\tt.Skipf(\"skipping; not supported on %v\", runtime.GOOS)\n\t}\n\n\t\/\/ We don't use executeTest because we need to kill the\n\t\/\/ program while it is running.\n\n\ttestenv.MustHaveGoBuild(t)\n\n\tcheckStaleRuntime(t)\n\n\tdir, err := ioutil.TempDir(\"\", \"go-build\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create temp directory: %v\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tif err := ioutil.WriteFile(filepath.Join(dir, \"main.go\"), []byte(crashDumpsAllThreadsSource), 0666); err != nil {\n\t\tt.Fatalf(\"failed to create Go file: %v\", err)\n\t}\n\n\tcmd := exec.Command(\"go\", \"build\", \"-o\", \"a.exe\")\n\tcmd.Dir = dir\n\tout, err := testEnv(cmd).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"building source: %v\\n%s\", err, out)\n\t}\n\n\tcmd = exec.Command(filepath.Join(dir, \"a.exe\"))\n\tcmd = testEnv(cmd)\n\tcmd.Env = append(cmd.Env, \"GOTRACEBACK=crash\")\n\n\t\/\/ Set GOGC=off. Because of golang.org\/issue\/10958, the tight\n\t\/\/ loops in the test program are not preemptible. If GC kicks\n\t\/\/ in, it may lock up and prevent main from saying it's ready.\n\tnewEnv := []string{}\n\tfor _, s := range cmd.Env {\n\t\tif !strings.HasPrefix(s, \"GOGC=\") {\n\t\t\tnewEnv = append(newEnv, s)\n\t\t}\n\t}\n\tcmd.Env = append(newEnv, \"GOGC=off\")\n\n\tvar outbuf bytes.Buffer\n\tcmd.Stdout = &outbuf\n\tcmd.Stderr = &outbuf\n\n\trp, wp, err := os.Pipe()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcmd.ExtraFiles = []*os.File{wp}\n\n\tif err := cmd.Start(); err != nil {\n\t\tt.Fatalf(\"starting program: %v\", err)\n\t}\n\n\tif err := wp.Close(); err != nil {\n\t\tt.Logf(\"closing write pipe: %v\", err)\n\t}\n\tif _, err := rp.Read(make([]byte, 1)); err != nil {\n\t\tt.Fatalf(\"reading from pipe: %v\", err)\n\t}\n\n\tif err := cmd.Process.Signal(syscall.SIGQUIT); err != nil {\n\t\tt.Fatalf(\"signal: %v\", err)\n\t}\n\n\t\/\/ No point in checking the error return from Wait--we expect\n\t\/\/ it to fail.\n\tcmd.Wait()\n\n\t\/\/ We want to see a stack trace for each thread.\n\t\/\/ Before https:\/\/golang.org\/cl\/2811 running threads would say\n\t\/\/ \"goroutine running on other thread; stack unavailable\".\n\tout = outbuf.Bytes()\n\tn := bytes.Count(out, []byte(\"main.loop(\"))\n\tif n != 4 {\n\t\tt.Errorf(\"found %d instances of main.loop; expected 4\", n)\n\t\tt.Logf(\"%s\", out)\n\t}\n}\n\nconst crashDumpsAllThreadsSource = `\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n)\n\nfunc main() {\n\tconst count = 4\n\truntime.GOMAXPROCS(count + 1)\n\n\tchans := make([]chan bool, count)\n\tfor i := range chans {\n\t\tchans[i] = make(chan bool)\n\t\tgo loop(i, chans[i])\n\t}\n\n\t\/\/ Wait for all the goroutines to start executing.\n\tfor _, c := range chans {\n\t\t<-c\n\t}\n\n\t\/\/ Tell our parent that all the goroutines are executing.\n\tif _, err := os.NewFile(3, \"pipe\").WriteString(\"x\"); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"write to pipe failed: %v\\n\", err)\n\t\tos.Exit(2)\n\t}\n\n\tselect {}\n}\n\nfunc loop(i int, c chan bool) {\n\tclose(c)\n\tfor {\n\t\tfor j := 0; j < 0x7fffffff; j++ {\n\t\t}\n\t}\n}\n`\n\nfunc TestSignalExitStatus(t *testing.T) {\n\ttestenv.MustHaveGoBuild(t)\n\tswitch runtime.GOOS {\n\tcase \"netbsd\", \"solaris\":\n\t\tt.Skipf(\"skipping on %s; see https:\/\/golang.org\/issue\/14063\", runtime.GOOS)\n\t}\n\texe, err := buildTestProg(t, \"testprog\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = testEnv(exec.Command(exe, \"SignalExitStatus\")).Run()\n\tif err == nil {\n\t\tt.Error(\"test program succeeded unexpectedly\")\n\t} else if ee, ok := err.(*exec.ExitError); !ok {\n\t\tt.Errorf(\"error (%v) has type %T; expected exec.ExitError\", err, err)\n\t} else if ws, ok := ee.Sys().(syscall.WaitStatus); !ok {\n\t\tt.Errorf(\"error.Sys (%v) has type %T; expected syscall.WaitStatus\", ee.Sys(), ee.Sys())\n\t} else if !ws.Signaled() || ws.Signal() != syscall.SIGTERM {\n\t\tt.Errorf(\"got %v; expected SIGTERM\", ee)\n\t}\n}\n\nfunc TestSignalIgnoreSIGTRAP(t *testing.T) {\n\toutput := runTestProg(t, \"testprognet\", \"SignalIgnoreSIGTRAP\")\n\twant := \"OK\\n\"\n\tif output != want {\n\t\tt.Fatalf(\"want %s, got %s\\n\", want, output)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage runtime\n\nimport \"unsafe\"\n\n\/\/ Solaris runtime-integrated network poller.\n\/\/\n\/\/ Solaris uses event ports for scalable network I\/O. Event\n\/\/ ports are level-triggered, unlike epoll and kqueue which\n\/\/ can be configured in both level-triggered and edge-triggered\n\/\/ mode. Level triggering means we have to keep track of a few things\n\/\/ ourselves. After we receive an event for a file descriptor,\n\/\/ it's our responsibility to ask again to be notified for future\n\/\/ events for that descriptor. When doing this we must keep track of\n\/\/ what kind of events the goroutines are currently interested in,\n\/\/ for example a fd may be open both for reading and writing.\n\/\/\n\/\/ A description of the high level operation of this code\n\/\/ follows. Networking code will get a file descriptor by some means\n\/\/ and will register it with the netpolling mechanism by a code path\n\/\/ that eventually calls runtime·netpollopen. runtime·netpollopen\n\/\/ calls port_associate with an empty event set. That means that we\n\/\/ will not receive any events at this point. The association needs\n\/\/ to be done at this early point because we need to process the I\/O\n\/\/ readiness notification at some point in the future. If I\/O becomes\n\/\/ ready when nobody is listening, when we finally care about it,\n\/\/ nobody will tell us anymore.\n\/\/\n\/\/ Beside calling runtime·netpollopen, the networking code paths\n\/\/ will call runtime·netpollarm each time goroutines are interested\n\/\/ in doing network I\/O. Because now we know what kind of I\/O we\n\/\/ are interested in (reading\/writing), we can call port_associate\n\/\/ passing the correct type of event set (POLLIN\/POLLOUT). As we made\n\/\/ sure to have already associated the file descriptor with the port,\n\/\/ when we now call port_associate, we will unblock the main poller\n\/\/ loop (in runtime·netpoll) right away if the socket is actually\n\/\/ ready for I\/O.\n\/\/\n\/\/ The main poller loop runs in its own thread waiting for events\n\/\/ using port_getn. When an event happens, it will tell the scheduler\n\/\/ about it using runtime·netpollready. Besides doing this, it must\n\/\/ also re-associate the events that were not part of this current\n\/\/ notification with the file descriptor. Failing to do this would\n\/\/ mean each notification will prevent concurrent code using the\n\/\/ same file descriptor in parallel.\n\/\/\n\/\/ The logic dealing with re-associations is encapsulated in\n\/\/ runtime·netpollupdate. This function takes care to associate the\n\/\/ descriptor only with the subset of events that were previously\n\/\/ part of the association, except the one that just happened. We\n\/\/ can't re-associate with that right away, because event ports\n\/\/ are level triggered so it would cause a busy loop. Instead, that\n\/\/ association is effected only by the runtime·netpollarm code path,\n\/\/ when Go code actually asks for I\/O.\n\/\/\n\/\/ The open and arming mechanisms are serialized using the lock\n\/\/ inside PollDesc. This is required because the netpoll loop runs\n\/\/ asynchronously in respect to other Go code and by the time we get\n\/\/ to call port_associate to update the association in the loop, the\n\/\/ file descriptor might have been closed and reopened already. The\n\/\/ lock allows runtime·netpollupdate to be called synchronously from\n\/\/ the loop thread while preventing other threads operating to the\n\/\/ same PollDesc, so once we unblock in the main loop, until we loop\n\/\/ again we know for sure we are always talking about the same file\n\/\/ descriptor and can safely access the data we want (the event set).\n\n\/\/go:cgo_import_dynamic libc_port_create port_create \"libc.so\"\n\/\/go:cgo_import_dynamic libc_port_associate port_associate \"libc.so\"\n\/\/go:cgo_import_dynamic libc_port_dissociate port_dissociate \"libc.so\"\n\/\/go:cgo_import_dynamic libc_port_getn port_getn \"libc.so\"\n\/\/go:cgo_import_dynamic libc_port_alert port_alert \"libc.so\"\n\n\/\/go:linkname libc_port_create libc_port_create\n\/\/go:linkname libc_port_associate libc_port_associate\n\/\/go:linkname libc_port_dissociate libc_port_dissociate\n\/\/go:linkname libc_port_getn libc_port_getn\n\/\/go:linkname libc_port_alert libc_port_alert\n\nvar (\n\tlibc_port_create,\n\tlibc_port_associate,\n\tlibc_port_dissociate,\n\tlibc_port_getn,\n\tlibc_port_alert libcFunc\n)\n\nfunc errno() int32 {\n\treturn *getg().m.perrno\n}\n\nfunc fcntl(fd, cmd, arg int32) int32 {\n\treturn int32(sysvicall3(&libc_fcntl, uintptr(fd), uintptr(cmd), uintptr(arg)))\n}\n\nfunc port_create() int32 {\n\treturn int32(sysvicall0(&libc_port_create))\n}\n\nfunc port_associate(port, source int32, object uintptr, events uint32, user uintptr) int32 {\n\treturn int32(sysvicall5(&libc_port_associate, uintptr(port), uintptr(source), object, uintptr(events), user))\n}\n\nfunc port_dissociate(port, source int32, object uintptr) int32 {\n\treturn int32(sysvicall3(&libc_port_dissociate, uintptr(port), uintptr(source), object))\n}\n\nfunc port_getn(port int32, evs *portevent, max uint32, nget *uint32, timeout *timespec) int32 {\n\treturn int32(sysvicall5(&libc_port_getn, uintptr(port), uintptr(unsafe.Pointer(evs)), uintptr(max), uintptr(unsafe.Pointer(nget)), uintptr(unsafe.Pointer(timeout))))\n}\n\nfunc port_alert(port int32, flags, events uint32, user uintptr) int32 {\n\treturn int32(sysvicall4(&libc_port_alert, uintptr(port), uintptr(flags), uintptr(events), user))\n}\n\nvar portfd int32 = -1\n\nfunc netpollinit() {\n\tportfd = port_create()\n\tif portfd >= 0 {\n\t\tfcntl(portfd, _F_SETFD, _FD_CLOEXEC)\n\t\treturn\n\t}\n\n\tprint(\"runtime: port_create failed (errno=\", errno(), \")\\n\")\n\tthrow(\"runtime: netpollinit failed\")\n}\n\nfunc netpollIsPollDescriptor(fd uintptr) bool {\n\treturn fd == uintptr(portfd)\n}\n\nfunc netpollopen(fd uintptr, pd *pollDesc) int32 {\n\tlock(&pd.lock)\n\t\/\/ We don't register for any specific type of events yet, that's\n\t\/\/ netpollarm's job. We merely ensure we call port_associate before\n\t\/\/ asynchronous connect\/accept completes, so when we actually want\n\t\/\/ to do any I\/O, the call to port_associate (from netpollarm,\n\t\/\/ with the interested event set) will unblock port_getn right away\n\t\/\/ because of the I\/O readiness notification.\n\tpd.user = 0\n\tr := port_associate(portfd, _PORT_SOURCE_FD, fd, 0, uintptr(unsafe.Pointer(pd)))\n\tunlock(&pd.lock)\n\treturn r\n}\n\nfunc netpollclose(fd uintptr) int32 {\n\treturn port_dissociate(portfd, _PORT_SOURCE_FD, fd)\n}\n\n\/\/ Updates the association with a new set of interested events. After\n\/\/ this call, port_getn will return one and only one event for that\n\/\/ particular descriptor, so this function needs to be called again.\nfunc netpollupdate(pd *pollDesc, set, clear uint32) {\n\tif pd.closing {\n\t\treturn\n\t}\n\n\told := pd.user\n\tevents := (old & ^clear) | set\n\tif old == events {\n\t\treturn\n\t}\n\n\tif events != 0 && port_associate(portfd, _PORT_SOURCE_FD, pd.fd, events, uintptr(unsafe.Pointer(pd))) != 0 {\n\t\tprint(\"runtime: port_associate failed (errno=\", errno(), \")\\n\")\n\t\tthrow(\"runtime: netpollupdate failed\")\n\t}\n\tpd.user = events\n}\n\n\/\/ subscribe the fd to the port such that port_getn will return one event.\nfunc netpollarm(pd *pollDesc, mode int) {\n\tlock(&pd.lock)\n\tswitch mode {\n\tcase 'r':\n\t\tnetpollupdate(pd, _POLLIN, 0)\n\tcase 'w':\n\t\tnetpollupdate(pd, _POLLOUT, 0)\n\tdefault:\n\t\tthrow(\"runtime: bad mode\")\n\t}\n\tunlock(&pd.lock)\n}\n\n\/\/ netpollBreak interrupts a port_getn wait.\nfunc netpollBreak() {\n\t\/\/ Use port_alert to put portfd into alert mode.\n\t\/\/ This will wake up all threads sleeping in port_getn on portfd,\n\t\/\/ and cause their calls to port_getn to return immediately.\n\t\/\/ Further, until portfd is taken out of alert mode,\n\t\/\/ all calls to port_getn will return immediately.\n\tif port_alert(portfd, _PORT_ALERT_UPDATE, _POLLHUP, uintptr(unsafe.Pointer(&portfd))) < 0 {\n\t\tif e := errno(); e != _EBUSY {\n\t\t\tprintln(\"runtime: port_alert failed with\", e)\n\t\t\tthrow(\"runtime: netpoll: port_alert failed\")\n\t\t}\n\t}\n}\n\n\/\/ netpoll checks for ready network connections.\n\/\/ Returns list of goroutines that become runnable.\n\/\/ delay < 0: blocks indefinitely\n\/\/ delay == 0: does not block, just polls\n\/\/ delay > 0: block for up to that many nanoseconds\nfunc netpoll(delay int64) gList {\n\tif portfd == -1 {\n\t\treturn gList{}\n\t}\n\n\tvar wait *timespec\n\tvar ts timespec\n\tif delay < 0 {\n\t\twait = nil\n\t} else if delay == 0 {\n\t\twait = &ts\n\t} else {\n\t\tts.setNsec(delay)\n\t\tif ts.tv_sec > 1e6 {\n\t\t\t\/\/ An arbitrary cap on how long to wait for a timer.\n\t\t\t\/\/ 1e6 s == ~11.5 days.\n\t\t\tts.tv_sec = 1e6\n\t\t}\n\t\twait = &ts\n\t}\n\n\tvar events [128]portevent\nretry:\n\tvar n uint32 = 1\n\tif port_getn(portfd, &events[0], uint32(len(events)), &n, wait) < 0 {\n\t\tif e := errno(); e != _EINTR && e != _ETIME {\n\t\t\tprint(\"runtime: port_getn on fd \", portfd, \" failed (errno=\", e, \")\\n\")\n\t\t\tthrow(\"runtime: netpoll failed\")\n\t\t}\n\t\t\/\/ If a timed sleep was interrupted, just return to\n\t\t\/\/ recalculate how long we should sleep now.\n\t\tif delay > 0 {\n\t\t\treturn gList{}\n\t\t}\n\t\tgoto retry\n\t}\n\n\tvar toRun gList\n\tfor i := 0; i < int(n); i++ {\n\t\tev := &events[i]\n\n\t\tif ev.portev_source == _PORT_SOURCE_ALERT {\n\t\t\tif ev.portev_events != _POLLHUP || unsafe.Pointer(ev.portev_user) != unsafe.Pointer(&portfd) {\n\t\t\t\tthrow(\"runtime: netpoll: bad port_alert wakeup\")\n\t\t\t}\n\t\t\tif delay != 0 {\n\t\t\t\t\/\/ Now that a blocking call to netpoll\n\t\t\t\t\/\/ has seen the alert, take portfd\n\t\t\t\t\/\/ back out of alert mode.\n\t\t\t\t\/\/ See the comment in netpollBreak.\n\t\t\t\tif port_alert(portfd, 0, 0, 0) < 0 {\n\t\t\t\t\te := errno()\n\t\t\t\t\tprintln(\"runtime: port_alert failed with\", e)\n\t\t\t\t\tthrow(\"runtime: netpoll: port_alert failed\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif ev.portev_events == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tpd := (*pollDesc)(unsafe.Pointer(ev.portev_user))\n\n\t\tvar mode, clear int32\n\t\tif (ev.portev_events & (_POLLIN | _POLLHUP | _POLLERR)) != 0 {\n\t\t\tmode += 'r'\n\t\t\tclear |= _POLLIN\n\t\t}\n\t\tif (ev.portev_events & (_POLLOUT | _POLLHUP | _POLLERR)) != 0 {\n\t\t\tmode += 'w'\n\t\t\tclear |= _POLLOUT\n\t\t}\n\t\t\/\/ To effect edge-triggered events, we need to be sure to\n\t\t\/\/ update our association with whatever events were not\n\t\t\/\/ set with the event. For example if we are registered\n\t\t\/\/ for POLLIN|POLLOUT, and we get POLLIN, besides waking\n\t\t\/\/ the goroutine interested in POLLIN we have to not forget\n\t\t\/\/ about the one interested in POLLOUT.\n\t\tif clear != 0 {\n\t\t\tlock(&pd.lock)\n\t\t\tnetpollupdate(pd, 0, uint32(clear))\n\t\t\tunlock(&pd.lock)\n\t\t}\n\n\t\tif mode != 0 {\n\t\t\t\/\/ TODO(mikio): Consider implementing event\n\t\t\t\/\/ scanning error reporting once we are sure\n\t\t\t\/\/ about the event port on SmartOS.\n\t\t\t\/\/\n\t\t\t\/\/ See golang.org\/x\/issue\/30840.\n\t\t\tnetpollready(&toRun, pd, mode)\n\t\t}\n\t}\n\n\treturn toRun\n}\n<commit_msg>runtime: check for events when port_getn fails with ETIME<commit_after>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage runtime\n\nimport \"unsafe\"\n\n\/\/ Solaris runtime-integrated network poller.\n\/\/\n\/\/ Solaris uses event ports for scalable network I\/O. Event\n\/\/ ports are level-triggered, unlike epoll and kqueue which\n\/\/ can be configured in both level-triggered and edge-triggered\n\/\/ mode. Level triggering means we have to keep track of a few things\n\/\/ ourselves. After we receive an event for a file descriptor,\n\/\/ it's our responsibility to ask again to be notified for future\n\/\/ events for that descriptor. When doing this we must keep track of\n\/\/ what kind of events the goroutines are currently interested in,\n\/\/ for example a fd may be open both for reading and writing.\n\/\/\n\/\/ A description of the high level operation of this code\n\/\/ follows. Networking code will get a file descriptor by some means\n\/\/ and will register it with the netpolling mechanism by a code path\n\/\/ that eventually calls runtime·netpollopen. runtime·netpollopen\n\/\/ calls port_associate with an empty event set. That means that we\n\/\/ will not receive any events at this point. The association needs\n\/\/ to be done at this early point because we need to process the I\/O\n\/\/ readiness notification at some point in the future. If I\/O becomes\n\/\/ ready when nobody is listening, when we finally care about it,\n\/\/ nobody will tell us anymore.\n\/\/\n\/\/ Beside calling runtime·netpollopen, the networking code paths\n\/\/ will call runtime·netpollarm each time goroutines are interested\n\/\/ in doing network I\/O. Because now we know what kind of I\/O we\n\/\/ are interested in (reading\/writing), we can call port_associate\n\/\/ passing the correct type of event set (POLLIN\/POLLOUT). As we made\n\/\/ sure to have already associated the file descriptor with the port,\n\/\/ when we now call port_associate, we will unblock the main poller\n\/\/ loop (in runtime·netpoll) right away if the socket is actually\n\/\/ ready for I\/O.\n\/\/\n\/\/ The main poller loop runs in its own thread waiting for events\n\/\/ using port_getn. When an event happens, it will tell the scheduler\n\/\/ about it using runtime·netpollready. Besides doing this, it must\n\/\/ also re-associate the events that were not part of this current\n\/\/ notification with the file descriptor. Failing to do this would\n\/\/ mean each notification will prevent concurrent code using the\n\/\/ same file descriptor in parallel.\n\/\/\n\/\/ The logic dealing with re-associations is encapsulated in\n\/\/ runtime·netpollupdate. This function takes care to associate the\n\/\/ descriptor only with the subset of events that were previously\n\/\/ part of the association, except the one that just happened. We\n\/\/ can't re-associate with that right away, because event ports\n\/\/ are level triggered so it would cause a busy loop. Instead, that\n\/\/ association is effected only by the runtime·netpollarm code path,\n\/\/ when Go code actually asks for I\/O.\n\/\/\n\/\/ The open and arming mechanisms are serialized using the lock\n\/\/ inside PollDesc. This is required because the netpoll loop runs\n\/\/ asynchronously in respect to other Go code and by the time we get\n\/\/ to call port_associate to update the association in the loop, the\n\/\/ file descriptor might have been closed and reopened already. The\n\/\/ lock allows runtime·netpollupdate to be called synchronously from\n\/\/ the loop thread while preventing other threads operating to the\n\/\/ same PollDesc, so once we unblock in the main loop, until we loop\n\/\/ again we know for sure we are always talking about the same file\n\/\/ descriptor and can safely access the data we want (the event set).\n\n\/\/go:cgo_import_dynamic libc_port_create port_create \"libc.so\"\n\/\/go:cgo_import_dynamic libc_port_associate port_associate \"libc.so\"\n\/\/go:cgo_import_dynamic libc_port_dissociate port_dissociate \"libc.so\"\n\/\/go:cgo_import_dynamic libc_port_getn port_getn \"libc.so\"\n\/\/go:cgo_import_dynamic libc_port_alert port_alert \"libc.so\"\n\n\/\/go:linkname libc_port_create libc_port_create\n\/\/go:linkname libc_port_associate libc_port_associate\n\/\/go:linkname libc_port_dissociate libc_port_dissociate\n\/\/go:linkname libc_port_getn libc_port_getn\n\/\/go:linkname libc_port_alert libc_port_alert\n\nvar (\n\tlibc_port_create,\n\tlibc_port_associate,\n\tlibc_port_dissociate,\n\tlibc_port_getn,\n\tlibc_port_alert libcFunc\n)\n\nfunc errno() int32 {\n\treturn *getg().m.perrno\n}\n\nfunc fcntl(fd, cmd, arg int32) int32 {\n\treturn int32(sysvicall3(&libc_fcntl, uintptr(fd), uintptr(cmd), uintptr(arg)))\n}\n\nfunc port_create() int32 {\n\treturn int32(sysvicall0(&libc_port_create))\n}\n\nfunc port_associate(port, source int32, object uintptr, events uint32, user uintptr) int32 {\n\treturn int32(sysvicall5(&libc_port_associate, uintptr(port), uintptr(source), object, uintptr(events), user))\n}\n\nfunc port_dissociate(port, source int32, object uintptr) int32 {\n\treturn int32(sysvicall3(&libc_port_dissociate, uintptr(port), uintptr(source), object))\n}\n\nfunc port_getn(port int32, evs *portevent, max uint32, nget *uint32, timeout *timespec) int32 {\n\treturn int32(sysvicall5(&libc_port_getn, uintptr(port), uintptr(unsafe.Pointer(evs)), uintptr(max), uintptr(unsafe.Pointer(nget)), uintptr(unsafe.Pointer(timeout))))\n}\n\nfunc port_alert(port int32, flags, events uint32, user uintptr) int32 {\n\treturn int32(sysvicall4(&libc_port_alert, uintptr(port), uintptr(flags), uintptr(events), user))\n}\n\nvar portfd int32 = -1\n\nfunc netpollinit() {\n\tportfd = port_create()\n\tif portfd >= 0 {\n\t\tfcntl(portfd, _F_SETFD, _FD_CLOEXEC)\n\t\treturn\n\t}\n\n\tprint(\"runtime: port_create failed (errno=\", errno(), \")\\n\")\n\tthrow(\"runtime: netpollinit failed\")\n}\n\nfunc netpollIsPollDescriptor(fd uintptr) bool {\n\treturn fd == uintptr(portfd)\n}\n\nfunc netpollopen(fd uintptr, pd *pollDesc) int32 {\n\tlock(&pd.lock)\n\t\/\/ We don't register for any specific type of events yet, that's\n\t\/\/ netpollarm's job. We merely ensure we call port_associate before\n\t\/\/ asynchronous connect\/accept completes, so when we actually want\n\t\/\/ to do any I\/O, the call to port_associate (from netpollarm,\n\t\/\/ with the interested event set) will unblock port_getn right away\n\t\/\/ because of the I\/O readiness notification.\n\tpd.user = 0\n\tr := port_associate(portfd, _PORT_SOURCE_FD, fd, 0, uintptr(unsafe.Pointer(pd)))\n\tunlock(&pd.lock)\n\treturn r\n}\n\nfunc netpollclose(fd uintptr) int32 {\n\treturn port_dissociate(portfd, _PORT_SOURCE_FD, fd)\n}\n\n\/\/ Updates the association with a new set of interested events. After\n\/\/ this call, port_getn will return one and only one event for that\n\/\/ particular descriptor, so this function needs to be called again.\nfunc netpollupdate(pd *pollDesc, set, clear uint32) {\n\tif pd.closing {\n\t\treturn\n\t}\n\n\told := pd.user\n\tevents := (old & ^clear) | set\n\tif old == events {\n\t\treturn\n\t}\n\n\tif events != 0 && port_associate(portfd, _PORT_SOURCE_FD, pd.fd, events, uintptr(unsafe.Pointer(pd))) != 0 {\n\t\tprint(\"runtime: port_associate failed (errno=\", errno(), \")\\n\")\n\t\tthrow(\"runtime: netpollupdate failed\")\n\t}\n\tpd.user = events\n}\n\n\/\/ subscribe the fd to the port such that port_getn will return one event.\nfunc netpollarm(pd *pollDesc, mode int) {\n\tlock(&pd.lock)\n\tswitch mode {\n\tcase 'r':\n\t\tnetpollupdate(pd, _POLLIN, 0)\n\tcase 'w':\n\t\tnetpollupdate(pd, _POLLOUT, 0)\n\tdefault:\n\t\tthrow(\"runtime: bad mode\")\n\t}\n\tunlock(&pd.lock)\n}\n\n\/\/ netpollBreak interrupts a port_getn wait.\nfunc netpollBreak() {\n\t\/\/ Use port_alert to put portfd into alert mode.\n\t\/\/ This will wake up all threads sleeping in port_getn on portfd,\n\t\/\/ and cause their calls to port_getn to return immediately.\n\t\/\/ Further, until portfd is taken out of alert mode,\n\t\/\/ all calls to port_getn will return immediately.\n\tif port_alert(portfd, _PORT_ALERT_UPDATE, _POLLHUP, uintptr(unsafe.Pointer(&portfd))) < 0 {\n\t\tif e := errno(); e != _EBUSY {\n\t\t\tprintln(\"runtime: port_alert failed with\", e)\n\t\t\tthrow(\"runtime: netpoll: port_alert failed\")\n\t\t}\n\t}\n}\n\n\/\/ netpoll checks for ready network connections.\n\/\/ Returns list of goroutines that become runnable.\n\/\/ delay < 0: blocks indefinitely\n\/\/ delay == 0: does not block, just polls\n\/\/ delay > 0: block for up to that many nanoseconds\nfunc netpoll(delay int64) gList {\n\tif portfd == -1 {\n\t\treturn gList{}\n\t}\n\n\tvar wait *timespec\n\tvar ts timespec\n\tif delay < 0 {\n\t\twait = nil\n\t} else if delay == 0 {\n\t\twait = &ts\n\t} else {\n\t\tts.setNsec(delay)\n\t\tif ts.tv_sec > 1e6 {\n\t\t\t\/\/ An arbitrary cap on how long to wait for a timer.\n\t\t\t\/\/ 1e6 s == ~11.5 days.\n\t\t\tts.tv_sec = 1e6\n\t\t}\n\t\twait = &ts\n\t}\n\n\tvar events [128]portevent\nretry:\n\tvar n uint32 = 1\n\tr := port_getn(portfd, &events[0], uint32(len(events)), &n, wait)\n\te := errno()\n\tif r < 0 && e == _ETIME && n > 0 {\n\t\t\/\/ As per port_getn(3C), an ETIME failure does not preclude the\n\t\t\/\/ delivery of some number of events. Treat a timeout failure\n\t\t\/\/ with delivered events as a success.\n\t\tr = 0\n\t}\n\tif r < 0 {\n\t\tif e != _EINTR && e != _ETIME {\n\t\t\tprint(\"runtime: port_getn on fd \", portfd, \" failed (errno=\", e, \")\\n\")\n\t\t\tthrow(\"runtime: netpoll failed\")\n\t\t}\n\t\t\/\/ If a timed sleep was interrupted and there are no events,\n\t\t\/\/ just return to recalculate how long we should sleep now.\n\t\tif delay > 0 {\n\t\t\treturn gList{}\n\t\t}\n\t\tgoto retry\n\t}\n\n\tvar toRun gList\n\tfor i := 0; i < int(n); i++ {\n\t\tev := &events[i]\n\n\t\tif ev.portev_source == _PORT_SOURCE_ALERT {\n\t\t\tif ev.portev_events != _POLLHUP || unsafe.Pointer(ev.portev_user) != unsafe.Pointer(&portfd) {\n\t\t\t\tthrow(\"runtime: netpoll: bad port_alert wakeup\")\n\t\t\t}\n\t\t\tif delay != 0 {\n\t\t\t\t\/\/ Now that a blocking call to netpoll\n\t\t\t\t\/\/ has seen the alert, take portfd\n\t\t\t\t\/\/ back out of alert mode.\n\t\t\t\t\/\/ See the comment in netpollBreak.\n\t\t\t\tif port_alert(portfd, 0, 0, 0) < 0 {\n\t\t\t\t\te := errno()\n\t\t\t\t\tprintln(\"runtime: port_alert failed with\", e)\n\t\t\t\t\tthrow(\"runtime: netpoll: port_alert failed\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif ev.portev_events == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tpd := (*pollDesc)(unsafe.Pointer(ev.portev_user))\n\n\t\tvar mode, clear int32\n\t\tif (ev.portev_events & (_POLLIN | _POLLHUP | _POLLERR)) != 0 {\n\t\t\tmode += 'r'\n\t\t\tclear |= _POLLIN\n\t\t}\n\t\tif (ev.portev_events & (_POLLOUT | _POLLHUP | _POLLERR)) != 0 {\n\t\t\tmode += 'w'\n\t\t\tclear |= _POLLOUT\n\t\t}\n\t\t\/\/ To effect edge-triggered events, we need to be sure to\n\t\t\/\/ update our association with whatever events were not\n\t\t\/\/ set with the event. For example if we are registered\n\t\t\/\/ for POLLIN|POLLOUT, and we get POLLIN, besides waking\n\t\t\/\/ the goroutine interested in POLLIN we have to not forget\n\t\t\/\/ about the one interested in POLLOUT.\n\t\tif clear != 0 {\n\t\t\tlock(&pd.lock)\n\t\t\tnetpollupdate(pd, 0, uint32(clear))\n\t\t\tunlock(&pd.lock)\n\t\t}\n\n\t\tif mode != 0 {\n\t\t\t\/\/ TODO(mikio): Consider implementing event\n\t\t\t\/\/ scanning error reporting once we are sure\n\t\t\t\/\/ about the event port on SmartOS.\n\t\t\t\/\/\n\t\t\t\/\/ See golang.org\/x\/issue\/30840.\n\t\t\tnetpollready(&toRun, pd, mode)\n\t\t}\n\t}\n\n\treturn toRun\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage api\n\nimport (\n\t\"launchpad.net\/juju-core\/state\/api\/machiner\"\n\t\"launchpad.net\/juju-core\/state\/api\/params\"\n)\n\n\/\/ Login authenticates as the entity with the given name and password.\n\/\/ Subsequent requests on the state will act as that entity.\n\/\/ This method is usually called automatically by Open.\nfunc (st *State) Login(tag, password string) error {\n\treturn st.Call(\"Admin\", \"\", \"Login\", ¶ms.Creds{\n\t\tAuthTag: tag,\n\t\tPassword: password,\n\t}, nil)\n}\n\n\/\/ Client returns an object that can be used\n\/\/ to access client-specific functionality.\nfunc (st *State) Client() *Client {\n\treturn &Client{st}\n}\n\n\/\/ Machiner returns an object that can be used to access the Machiner\n\/\/ API facade.\nfunc (st *State) Machiner(version string) (*machiner.Machiner, error) {\n\t\/\/ Just verify we're allowed to access it.\n\targs := params.Machines{\n\t\tIds: []string{},\n\t}\n\tvar result params.MachinesLifeResults\n\terr := st.Call(\"Machiner\", version, \"Life\", args, &result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn machiner.New(st), nil\n}\n<commit_msg>Renamed version to id<commit_after>\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage api\n\nimport (\n\t\"launchpad.net\/juju-core\/state\/api\/machiner\"\n\t\"launchpad.net\/juju-core\/state\/api\/params\"\n)\n\n\/\/ Login authenticates as the entity with the given name and password.\n\/\/ Subsequent requests on the state will act as that entity.\n\/\/ This method is usually called automatically by Open.\nfunc (st *State) Login(tag, password string) error {\n\treturn st.Call(\"Admin\", \"\", \"Login\", ¶ms.Creds{\n\t\tAuthTag: tag,\n\t\tPassword: password,\n\t}, nil)\n}\n\n\/\/ Client returns an object that can be used\n\/\/ to access client-specific functionality.\nfunc (st *State) Client() *Client {\n\treturn &Client{st}\n}\n\n\/\/ Machiner returns an object that can be used to access the Machiner\n\/\/ API facade. The id argument is reserved for possible future use and\n\/\/ should be empty.\nfunc (st *State) Machiner(id string) (*machiner.Machiner, error) {\n\t\/\/ Just verify we're allowed to access it.\n\targs := params.Machines{\n\t\tIds: []string{},\n\t}\n\tvar result params.MachinesLifeResults\n\terr := st.Call(\"Machiner\", id, \"Life\", args, &result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn machiner.New(st), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage state\n\nimport (\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/names\"\n\t\"gopkg.in\/juju\/charm.v5\"\n\n\t\"github.com\/juju\/juju\/process\"\n)\n\n\/\/ TODO(ericsnow) Track juju-level status in the status collection.\n\n\/\/ UnitProcesses exposes high-level interaction with workload processes\n\/\/ for a unit.\ntype UnitProcesses interface {\n\t\/\/ Register registers a workload process in state.\n\tRegister(info process.Info) error\n\t\/\/ SetStatus sets the raw status of a workload process.\n\tSetStatus(id string, status process.Status) error\n\t\/\/ List builds the list of workload processes registered for\n\t\/\/ the given unit and IDs. If no IDs are provided then all\n\t\/\/ registered processes for the unit are returned.\n\tList(ids ...string) ([]process.Info, error)\n\t\/\/ Unregister marks the identified process as unregistered.\n\tUnregister(id string) error\n}\n\n\/\/ ProcessDefiniitions provides the state functionality related to\n\/\/ workload process definitions.\ntype ProcessDefinitions interface {\n\t\/\/ EnsureDefined adds the definitions to state if they aren't there\n\t\/\/ already. If they are there then it verfies that the existing\n\t\/\/ definitions match the provided ones.\n\tEnsureDefined(definitions ...charm.Process) error\n}\n\n\/\/ TODO(ericsnow) Use a more generic component registration mechanism?\n\ntype newUnitProcessesFunc func(persist Persistence, unit names.UnitTag, charm names.CharmTag) (UnitProcesses, error)\n\ntype newProcessDefinitionsFunc func(persist Persistence, charm names.CharmTag) (ProcessDefinitions, error)\n\nvar (\n\tnewUnitProcesses newUnitProcessesFunc\n\tnewProcessDefinitions newProcessDefinitionsFunc\n)\n\n\/\/ SetProcessesComponent registers the functions that provide the state\n\/\/ functionality related to workload processes.\nfunc SetProcessesComponent(upFunc newUnitProcessesFunc, pdFunc newProcessDefinitionsFunc) {\n\tnewUnitProcesses = upFunc\n\tnewProcessDefinitions = pdFunc\n}\n\ntype unitProcesses struct {\n\tUnitProcesses\n\tcharm *Charm\n\tst *State\n}\n\n\/\/ UnitProcesses exposes interaction with workload processes in state\n\/\/ for a the given unit.\nfunc (st *State) UnitProcesses(unit *Unit) (UnitProcesses, error) {\n\tif newUnitProcesses == nil {\n\t\treturn nil, errors.Errorf(\"unit processes not supported\")\n\t}\n\n\t\/\/ TODO(ericsnow) unit.charm is sometimes wrong...\n\tcharm, err := unit.charm()\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\t\/\/ TODO(ericsnow) Do we really need the charm tag?\n\tcharmTag := charm.Tag().(names.CharmTag)\n\n\tpersist := st.newPersistence()\n\tunitProcs, err := newUnitProcesses(persist, unit.UnitTag(), charmTag)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\treturn &unitProcesses{\n\t\tUnitProcesses: unitProcs,\n\t\tcharm: charm,\n\t\tst: st,\n\t}, nil\n}\n\n\/\/ List implements UnitProcesses. It also ensures that all of the\n\/\/ process definitions in the charm's metadata are added to state.\nfunc (up *unitProcesses) List(ids ...string) ([]process.Info, error) {\n\tif len(ids) == 0 {\n\t\t\/\/ TODO(ericsnow) Instead call st.defineProcesses when a charm is added?\n\t\tif err := up.st.defineProcesses(up.charm.Tag().(names.CharmTag), *up.charm.Meta()); err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\n\treturn up.UnitProcesses.List(ids...)\n}\n\n\/\/ TODO(ericsnow) DestroyProcess: Mark the proc as Dying.\n\n\/\/ defineProcesses adds the workload process definitions from the provided\n\/\/ charm metadata to state.\nfunc (st *State) defineProcesses(charmTag names.CharmTag, meta charm.Meta) error {\n\tif newProcessDefinitions == nil {\n\t\treturn errors.Errorf(\"process definitions not supported\")\n\t}\n\n\tvar definitions []charm.Process\n\tfor _, definition := range meta.Processes {\n\t\tdefinitions = append(definitions, definition)\n\t}\n\n\tpersist := st.newPersistence()\n\tpd, err := newProcessDefinitions(persist, charmTag)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tif err := pd.EnsureDefined(definitions...); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\treturn nil\n}\n<commit_msg>Drop a couple TODO comments.<commit_after>\/\/ Copyright 2015 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage state\n\nimport (\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/names\"\n\t\"gopkg.in\/juju\/charm.v5\"\n\n\t\"github.com\/juju\/juju\/process\"\n)\n\n\/\/ TODO(ericsnow) Track juju-level status in the status collection.\n\n\/\/ UnitProcesses exposes high-level interaction with workload processes\n\/\/ for a unit.\ntype UnitProcesses interface {\n\t\/\/ Register registers a workload process in state.\n\tRegister(info process.Info) error\n\t\/\/ SetStatus sets the raw status of a workload process.\n\tSetStatus(id string, status process.Status) error\n\t\/\/ List builds the list of workload processes registered for\n\t\/\/ the given unit and IDs. If no IDs are provided then all\n\t\/\/ registered processes for the unit are returned.\n\tList(ids ...string) ([]process.Info, error)\n\t\/\/ Unregister marks the identified process as unregistered.\n\tUnregister(id string) error\n}\n\n\/\/ ProcessDefiniitions provides the state functionality related to\n\/\/ workload process definitions.\ntype ProcessDefinitions interface {\n\t\/\/ EnsureDefined adds the definitions to state if they aren't there\n\t\/\/ already. If they are there then it verfies that the existing\n\t\/\/ definitions match the provided ones.\n\tEnsureDefined(definitions ...charm.Process) error\n}\n\n\/\/ TODO(ericsnow) Use a more generic component registration mechanism?\n\ntype newUnitProcessesFunc func(persist Persistence, unit names.UnitTag, charm names.CharmTag) (UnitProcesses, error)\n\ntype newProcessDefinitionsFunc func(persist Persistence, charm names.CharmTag) (ProcessDefinitions, error)\n\nvar (\n\tnewUnitProcesses newUnitProcessesFunc\n\tnewProcessDefinitions newProcessDefinitionsFunc\n)\n\n\/\/ SetProcessesComponent registers the functions that provide the state\n\/\/ functionality related to workload processes.\nfunc SetProcessesComponent(upFunc newUnitProcessesFunc, pdFunc newProcessDefinitionsFunc) {\n\tnewUnitProcesses = upFunc\n\tnewProcessDefinitions = pdFunc\n}\n\ntype unitProcesses struct {\n\tUnitProcesses\n\tcharm *Charm\n\tst *State\n}\n\n\/\/ UnitProcesses exposes interaction with workload processes in state\n\/\/ for a the given unit.\nfunc (st *State) UnitProcesses(unit *Unit) (UnitProcesses, error) {\n\tif newUnitProcesses == nil {\n\t\treturn nil, errors.Errorf(\"unit processes not supported\")\n\t}\n\n\tcharm, err := unit.charm()\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tcharmTag := charm.Tag().(names.CharmTag)\n\n\tpersist := st.newPersistence()\n\tunitProcs, err := newUnitProcesses(persist, unit.UnitTag(), charmTag)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\treturn &unitProcesses{\n\t\tUnitProcesses: unitProcs,\n\t\tcharm: charm,\n\t\tst: st,\n\t}, nil\n}\n\n\/\/ List implements UnitProcesses. It also ensures that all of the\n\/\/ process definitions in the charm's metadata are added to state.\nfunc (up *unitProcesses) List(ids ...string) ([]process.Info, error) {\n\tif len(ids) == 0 {\n\t\t\/\/ TODO(ericsnow) Instead call st.defineProcesses when a charm is added?\n\t\tif err := up.st.defineProcesses(up.charm.Tag().(names.CharmTag), *up.charm.Meta()); err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t}\n\n\treturn up.UnitProcesses.List(ids...)\n}\n\n\/\/ TODO(ericsnow) DestroyProcess: Mark the proc as Dying.\n\n\/\/ defineProcesses adds the workload process definitions from the provided\n\/\/ charm metadata to state.\nfunc (st *State) defineProcesses(charmTag names.CharmTag, meta charm.Meta) error {\n\tif newProcessDefinitions == nil {\n\t\treturn errors.Errorf(\"process definitions not supported\")\n\t}\n\n\tvar definitions []charm.Process\n\tfor _, definition := range meta.Processes {\n\t\tdefinitions = append(definitions, definition)\n\t}\n\n\tpersist := st.newPersistence()\n\tpd, err := newProcessDefinitions(persist, charmTag)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tif err := pd.EnsureDefined(definitions...); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package worker\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/mitchellh\/multistep\"\n\t\"github.com\/travis-ci\/worker\/backend\"\n\t\"github.com\/travis-ci\/worker\/context\"\n\tgocontext \"golang.org\/x\/net\/context\"\n)\n\ntype runScriptReturn struct {\n\tresult *backend.RunResult\n\terr error\n}\n\ntype stepRunScript struct {\n\tlogTimeout time.Duration\n\thardTimeout time.Duration\n\tskipShutdownOnLogTimeout bool\n}\n\nfunc (s *stepRunScript) Run(state multistep.StateBag) multistep.StepAction {\n\tctx := state.Get(\"ctx\").(gocontext.Context)\n\tbuildJob := state.Get(\"buildJob\").(Job)\n\tinstance := state.Get(\"instance\").(backend.Instance)\n\tlogWriter := state.Get(\"logWriter\").(LogWriter)\n\tcancelChan := state.Get(\"cancelChan\").(<-chan struct{})\n\n\tcontext.LoggerFromContext(ctx).Info(\"running script\")\n\tdefer context.LoggerFromContext(ctx).Info(\"finished script\")\n\n\tresultChan := make(chan runScriptReturn, 1)\n\tgo func() {\n\t\tresult, err := instance.RunScript(ctx, logWriter)\n\t\tresultChan <- runScriptReturn{\n\t\t\tresult: result,\n\t\t\terr: err,\n\t\t}\n\t}()\n\n\tselect {\n\tcase r := <-resultChan:\n\t\tif r.err == ErrWrotePastMaxLogLength {\n\t\t\tcontext.LoggerFromContext(ctx).Info(\"wrote past maximum log length\")\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\t\/\/ We need to check for this since it's possible that the RunScript\n\t\t\/\/ implementation returns with the error too quickly for the ctx.Done()\n\t\t\/\/ case branch below to catch it.\n\t\tif r.err == gocontext.DeadlineExceeded {\n\t\t\tcontext.LoggerFromContext(ctx).Info(\"hard timeout exceeded, terminating\")\n\t\t\ts.writeLogAndFinishWithState(ctx, logWriter, buildJob, FinishStateErrored, \"\\n\\nThe job exceeded the maxmimum time limit for jobs, and has been terminated.\\n\\n\")\n\t\t}\n\n\t\tif r.err != nil {\n\t\t\tif !r.result.Completed {\n\t\t\t\tcontext.LoggerFromContext(ctx).WithField(\"err\", r.err).WithField(\"completed\", r.result.Completed).Error(\"couldn't run script, attempting requeue\")\n\t\t\t\terr := buildJob.Requeue()\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontext.LoggerFromContext(ctx).WithField(\"err\", err).Error(\"couldn't requeue job\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tcontext.LoggerFromContext(ctx).WithField(\"err\", r.err).WithField(\"completed\", r.result.Completed).Error(\"couldn't run script\")\n\t\t\t}\n\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\tstate.Put(\"scriptResult\", r.result)\n\n\t\treturn multistep.ActionContinue\n\tcase <-ctx.Done():\n\t\tif ctx.Err() == gocontext.DeadlineExceeded {\n\t\t\tcontext.LoggerFromContext(ctx).Info(\"hard timeout exceeded, terminating\")\n\t\t\ts.writeLogAndFinishWithState(ctx, logWriter, buildJob, FinishStateErrored, \"\\n\\nThe job exceeded the maxmimum time limit for jobs, and has been terminated.\\n\\n\")\n\t\t} else {\n\t\t\tcontext.LoggerFromContext(ctx).Info(\"context was cancelled, stopping job\")\n\t\t}\n\n\t\treturn multistep.ActionHalt\n\tcase <-cancelChan:\n\t\ts.writeLogAndFinishWithState(ctx, logWriter, buildJob, FinishStateCancelled, \"\\n\\nDone: Job Cancelled\\n\\n\")\n\n\t\treturn multistep.ActionHalt\n\tcase <-logWriter.Timeout():\n\t\ts.writeLogAndFinishWithState(ctx, logWriter, buildJob, FinishStateErrored, fmt.Sprintf(\"\\n\\nNo output has been received in the last %v, this potentially indicates a stalled build or something wrong with the build itself.\\n\\nThe build has been terminated\\n\\n\", s.logTimeout))\n\n\t\tif s.skipShutdownOnLogTimeout {\n\t\t\tstate.Put(\"skipShutdown\", true)\n\t\t}\n\n\t\treturn multistep.ActionHalt\n\t}\n}\n\nfunc (s *stepRunScript) writeLogAndFinishWithState(ctx gocontext.Context, logWriter LogWriter, buildJob Job, state FinishState, logMessage string) {\n\t_, err := logWriter.WriteAndClose([]byte(logMessage))\n\tif err != nil {\n\t\tcontext.LoggerFromContext(ctx).WithField(\"err\", err).Error(\"couldn't write final log message\")\n\t}\n\n\terr = buildJob.Finish(state)\n\tif err != nil {\n\t\tcontext.LoggerFromContext(ctx).WithField(\"err\", err).WithField(\"state\", state).Error(\"couldn't update job state\")\n\t}\n}\n\nfunc (s *stepRunScript) Cleanup(state multistep.StateBag) {\n\t\/\/ Nothing to clean up\n}\n<commit_msg>Mark the job as errored if it goes past the max log length<commit_after>package worker\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/mitchellh\/multistep\"\n\t\"github.com\/travis-ci\/worker\/backend\"\n\t\"github.com\/travis-ci\/worker\/context\"\n\tgocontext \"golang.org\/x\/net\/context\"\n)\n\ntype runScriptReturn struct {\n\tresult *backend.RunResult\n\terr error\n}\n\ntype stepRunScript struct {\n\tlogTimeout time.Duration\n\thardTimeout time.Duration\n\tskipShutdownOnLogTimeout bool\n}\n\nfunc (s *stepRunScript) Run(state multistep.StateBag) multistep.StepAction {\n\tctx := state.Get(\"ctx\").(gocontext.Context)\n\tbuildJob := state.Get(\"buildJob\").(Job)\n\tinstance := state.Get(\"instance\").(backend.Instance)\n\tlogWriter := state.Get(\"logWriter\").(LogWriter)\n\tcancelChan := state.Get(\"cancelChan\").(<-chan struct{})\n\n\tcontext.LoggerFromContext(ctx).Info(\"running script\")\n\tdefer context.LoggerFromContext(ctx).Info(\"finished script\")\n\n\tresultChan := make(chan runScriptReturn, 1)\n\tgo func() {\n\t\tresult, err := instance.RunScript(ctx, logWriter)\n\t\tresultChan <- runScriptReturn{\n\t\t\tresult: result,\n\t\t\terr: err,\n\t\t}\n\t}()\n\n\tselect {\n\tcase r := <-resultChan:\n\t\tif r.err == ErrWrotePastMaxLogLength {\n\t\t\tcontext.LoggerFromContext(ctx).Info(\"wrote past maximum log length\")\n\t\t\ts.writeLogAndFinishWithState(ctx, logWriter, buildJob, FinishStateErrored, \"\\n\\nThe job exceeded the maxmimum log length, and has been terminated.\\n\\n\")\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\t\/\/ We need to check for this since it's possible that the RunScript\n\t\t\/\/ implementation returns with the error too quickly for the ctx.Done()\n\t\t\/\/ case branch below to catch it.\n\t\tif r.err == gocontext.DeadlineExceeded {\n\t\t\tcontext.LoggerFromContext(ctx).Info(\"hard timeout exceeded, terminating\")\n\t\t\ts.writeLogAndFinishWithState(ctx, logWriter, buildJob, FinishStateErrored, \"\\n\\nThe job exceeded the maxmimum time limit for jobs, and has been terminated.\\n\\n\")\n\t\t}\n\n\t\tif r.err != nil {\n\t\t\tif !r.result.Completed {\n\t\t\t\tcontext.LoggerFromContext(ctx).WithField(\"err\", r.err).WithField(\"completed\", r.result.Completed).Error(\"couldn't run script, attempting requeue\")\n\t\t\t\terr := buildJob.Requeue()\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontext.LoggerFromContext(ctx).WithField(\"err\", err).Error(\"couldn't requeue job\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tcontext.LoggerFromContext(ctx).WithField(\"err\", r.err).WithField(\"completed\", r.result.Completed).Error(\"couldn't run script\")\n\t\t\t}\n\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\tstate.Put(\"scriptResult\", r.result)\n\n\t\treturn multistep.ActionContinue\n\tcase <-ctx.Done():\n\t\tif ctx.Err() == gocontext.DeadlineExceeded {\n\t\t\tcontext.LoggerFromContext(ctx).Info(\"hard timeout exceeded, terminating\")\n\t\t\ts.writeLogAndFinishWithState(ctx, logWriter, buildJob, FinishStateErrored, \"\\n\\nThe job exceeded the maxmimum time limit for jobs, and has been terminated.\\n\\n\")\n\t\t} else {\n\t\t\tcontext.LoggerFromContext(ctx).Info(\"context was cancelled, stopping job\")\n\t\t}\n\n\t\treturn multistep.ActionHalt\n\tcase <-cancelChan:\n\t\ts.writeLogAndFinishWithState(ctx, logWriter, buildJob, FinishStateCancelled, \"\\n\\nDone: Job Cancelled\\n\\n\")\n\n\t\treturn multistep.ActionHalt\n\tcase <-logWriter.Timeout():\n\t\ts.writeLogAndFinishWithState(ctx, logWriter, buildJob, FinishStateErrored, fmt.Sprintf(\"\\n\\nNo output has been received in the last %v, this potentially indicates a stalled build or something wrong with the build itself.\\n\\nThe build has been terminated\\n\\n\", s.logTimeout))\n\n\t\tif s.skipShutdownOnLogTimeout {\n\t\t\tstate.Put(\"skipShutdown\", true)\n\t\t}\n\n\t\treturn multistep.ActionHalt\n\t}\n}\n\nfunc (s *stepRunScript) writeLogAndFinishWithState(ctx gocontext.Context, logWriter LogWriter, buildJob Job, state FinishState, logMessage string) {\n\t_, err := logWriter.WriteAndClose([]byte(logMessage))\n\tif err != nil {\n\t\tcontext.LoggerFromContext(ctx).WithField(\"err\", err).Error(\"couldn't write final log message\")\n\t}\n\n\terr = buildJob.Finish(state)\n\tif err != nil {\n\t\tcontext.LoggerFromContext(ctx).WithField(\"err\", err).WithField(\"state\", state).Error(\"couldn't update job state\")\n\t}\n}\n\nfunc (s *stepRunScript) Cleanup(state multistep.StateBag) {\n\t\/\/ Nothing to clean up\n}\n<|endoftext|>"} {"text":"<commit_before>package streams\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/GitbookIO\/go-gitbook-api\/utils\"\n)\n\ntype StreamFunc func(path string) (io.ReadCloser, error)\n\nfunc PickStream(p string) (io.ReadCloser, error) {\n\tbasepath := filepath.Base(p)\n\n\tif !exists(p) {\n\t\treturn nil, fmt.Errorf(\"PickStream: Path '%s' does not exist\", p)\n\t}\n\n\t\/\/ Tar.gz\n\tif strings.HasSuffix(basepath, \".tar.gz\") || strings.HasSuffix(basepath, \".tgz\") {\n\t\treturn File(p)\n\t}\n\n\t\/\/ Git repo\n\tif isGitDir(p) {\n\t\treturn GitHead(p)\n\t} else if dir := path.Join(p, \".git\"); isGitDir(dir) {\n\t\treturn GitHead(dir)\n\t}\n\n\t\/\/ Standard folder\n\treturn Folder(p)\n}\n\nfunc GitHead(p string) (io.ReadCloser, error) {\n\treturn Git(p, \"HEAD\")\n}\n\n\/\/ Git returns an io.ReadCloser of a repo as a tar.gz\nfunc Git(p, ref string) (io.ReadCloser, error) {\n\treturn utils.GitTarGz(p, ref)\n}\n\nfunc Folder(p string) (io.ReadCloser, error) {\n\treturn utils.TarGzExclude(\n\t\tp,\n\n\t\t\/\/ Excluded files & folders\n\t\t\".git\",\n\t\t\"node_modules\",\n\t\t\"bower\",\n\t\t\"_book\",\n\t\t\"book.pdf\",\n\t\t\"book.mobi\",\n\t\t\"book.epub\",\n\t)\n}\n\nfunc File(p string) (io.ReadCloser, error) {\n\treturn os.Open(p)\n}\n\nfunc isGitDir(dirpath string) bool {\n\treturn (exists(path.Join(dirpath, \"HEAD\")) &&\n\t\texists(path.Join(dirpath, \"objects\")) &&\n\t\texists(path.Join(dirpath, \"refs\")))\n}\n\n\/\/ Does a file exist on disk ?\nfunc exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn false\n}\n<commit_msg>Add GitRef func to streams\/streams.go<commit_after>package streams\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/GitbookIO\/go-gitbook-api\/utils\"\n)\n\ntype StreamFunc func(path string) (io.ReadCloser, error)\n\nfunc PickStream(p string) (io.ReadCloser, error) {\n\tbasepath := filepath.Base(p)\n\n\tif !exists(p) {\n\t\treturn nil, fmt.Errorf(\"PickStream: Path '%s' does not exist\", p)\n\t}\n\n\t\/\/ Tar.gz\n\tif strings.HasSuffix(basepath, \".tar.gz\") || strings.HasSuffix(basepath, \".tgz\") {\n\t\treturn File(p)\n\t}\n\n\t\/\/ Git repo\n\tif isGitDir(p) {\n\t\treturn GitHead(p)\n\t} else if dir := path.Join(p, \".git\"); isGitDir(dir) {\n\t\treturn GitHead(dir)\n\t}\n\n\t\/\/ Standard folder\n\treturn Folder(p)\n}\n\nfunc GitHead(p string) (io.ReadCloser, error) {\n\treturn Git(p, \"HEAD\")\n}\n\n\/\/ Git returns an io.ReadCloser of a repo as a tar.gz\nfunc Git(p, ref string) (io.ReadCloser, error) {\n\treturn utils.GitTarGz(p, ref)\n}\n\nfunc GitRef(ref string) StreamFunc {\n\treturn func(p string) (io.ReadCloser, error) {\n\t\treturn Git(p, ref)\n\t}\n}\n\nfunc Folder(p string) (io.ReadCloser, error) {\n\treturn utils.TarGzExclude(\n\t\tp,\n\n\t\t\/\/ Excluded files & folders\n\t\t\".git\",\n\t\t\"node_modules\",\n\t\t\"bower\",\n\t\t\"_book\",\n\t\t\"book.pdf\",\n\t\t\"book.mobi\",\n\t\t\"book.epub\",\n\t)\n}\n\nfunc File(p string) (io.ReadCloser, error) {\n\treturn os.Open(p)\n}\n\nfunc isGitDir(dirpath string) bool {\n\treturn (exists(path.Join(dirpath, \"HEAD\")) &&\n\t\texists(path.Join(dirpath, \"objects\")) &&\n\t\texists(path.Join(dirpath, \"refs\")))\n}\n\n\/\/ Does a file exist on disk ?\nfunc exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package rpcd\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/Symantec\/Dominator\/lib\/hash\"\n\t\"github.com\/Symantec\/Dominator\/lib\/triggers\"\n\t\"github.com\/Symantec\/Dominator\/proto\/sub\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc (t *rpcType) Update(request sub.UpdateRequest,\n\treply *sub.UpdateResponse) error {\n\trwLock.Lock()\n\tdefer rwLock.Unlock()\n\tfs := fileSystemHistory.FileSystem()\n\tif fs == nil {\n\t\treturn errors.New(\"No file-system history yet\")\n\t}\n\tlogger.Printf(\"Update()\\n\")\n\tif fetchInProgress {\n\t\tlogger.Println(\"Error: fetch already in progress\")\n\t\treturn errors.New(\"fetch already in progress\")\n\t}\n\tif updateInProgress {\n\t\tlogger.Println(\"Error: update progress\")\n\t\treturn errors.New(\"update in progress\")\n\t}\n\tupdateInProgress = true\n\tgo doUpdate(request, fs.RootDirectoryName())\n\treturn nil\n}\n\nfunc doUpdate(request sub.UpdateRequest, rootDirectoryName string) {\n\tdefer clearUpdateInProgress()\n\tvar oldTriggers triggers.Triggers\n\tfile, err := os.Open(oldTriggersFilename)\n\tif err == nil {\n\t\tdecoder := json.NewDecoder(file)\n\t\tvar trig triggers.Triggers\n\t\terr = decoder.Decode(&trig.Triggers)\n\t\tfile.Close()\n\t\tif err == nil {\n\t\t\toldTriggers = trig\n\t\t} else {\n\t\t\tlogger.Printf(\"Error decoding old triggers: %s\", err.Error())\n\t\t}\n\t}\n\tif len(oldTriggers.Triggers) > 0 {\n\t\tprocessMakeInodes(request.InodesToMake, rootDirectoryName,\n\t\t\trequest.MultiplyUsedObjects, &oldTriggers, false)\n\t\tprocessHardlinksToMake(request.HardlinksToMake, rootDirectoryName,\n\t\t\t&oldTriggers, false)\n\t\tprocessDeletes(request.PathsToDelete, rootDirectoryName, &oldTriggers,\n\t\t\tfalse)\n\t\tprocessMakeDirectories(request.DirectoriesToMake, rootDirectoryName,\n\t\t\t&oldTriggers, false)\n\t\tprocessChangeDirectories(request.DirectoriesToChange, rootDirectoryName,\n\t\t\t&oldTriggers, false)\n\t\tprocessChangeInodes(request.InodesToChange, rootDirectoryName,\n\t\t\t&oldTriggers, false)\n\t\tmatchedOldTriggers := oldTriggers.GetMatchedTriggers()\n\t\trunTriggers(matchedOldTriggers, \"stop\")\n\t}\n\tprocessFilesToCopyToCache(request.FilesToCopyToCache, rootDirectoryName)\n\tprocessMakeInodes(request.InodesToMake, rootDirectoryName,\n\t\trequest.MultiplyUsedObjects, request.Triggers, true)\n\tprocessHardlinksToMake(request.HardlinksToMake, rootDirectoryName,\n\t\trequest.Triggers, true)\n\tprocessDeletes(request.PathsToDelete, rootDirectoryName, request.Triggers,\n\t\ttrue)\n\tprocessMakeDirectories(request.DirectoriesToMake, rootDirectoryName,\n\t\trequest.Triggers, true)\n\tprocessChangeDirectories(request.DirectoriesToChange, rootDirectoryName,\n\t\trequest.Triggers, true)\n\tprocessChangeInodes(request.InodesToChange, rootDirectoryName,\n\t\trequest.Triggers, true)\n\tmatchedNewTriggers := request.Triggers.GetMatchedTriggers()\n\tfile, err = os.Create(oldTriggersFilename)\n\tif err == nil {\n\t\tb, err := json.Marshal(request.Triggers.Triggers)\n\t\tif err == nil {\n\t\t\tvar out bytes.Buffer\n\t\t\tjson.Indent(&out, b, \"\", \" \")\n\t\t\tout.WriteTo(file)\n\t\t} else {\n\t\t\tlogger.Printf(\"Error marshaling triggers: %s\", err.Error())\n\t\t}\n\t\tfile.Close()\n\t}\n\trunTriggers(matchedNewTriggers, \"start\")\n\t\/\/ TODO(rgooch): Remove debugging hack and implement.\n\ttime.Sleep(time.Second * 15)\n\tlogger.Printf(\"Update() complete\\n\")\n}\n\nfunc clearUpdateInProgress() {\n\trwLock.Lock()\n\tdefer rwLock.Unlock()\n\tupdateInProgress = false\n}\n\nfunc processFilesToCopyToCache(filesToCopyToCache []sub.FileToCopyToCache,\n\trootDirectoryName string) {\n\tfor _, fileToCopy := range filesToCopyToCache {\n\t\t\/\/ TODO(rgooch): Remove debugging.\n\t\tfmt.Printf(\"Copy: %s to cache\\n\", fileToCopy.Name)\n\t\t\/\/ TODO(rgooch): Implement.\n\t}\n}\n\nfunc processMakeInodes(inodesToMake []sub.Inode, rootDirectoryName string,\n\tmultiplyUsedObjects map[hash.Hash]uint64, triggers *triggers.Triggers,\n\ttakeAction bool) {\n\tfor _, inode := range inodesToMake {\n\t\tfullPathname := path.Join(rootDirectoryName, inode.Name)\n\t\ttriggers.Match(inode.Name)\n\t\t\/\/ TODO(rgooch): Remove debugging.\n\t\tfmt.Printf(\"Make inode: %s\\n\", fullPathname)\n\t\t\/\/ TODO(rgooch): Implement.\n\t}\n}\n\nfunc processHardlinksToMake(hardlinksToMake []sub.Hardlink,\n\trootDirectoryName string, triggers *triggers.Triggers, takeAction bool) {\n\tfor _, hardlink := range hardlinksToMake {\n\t\ttriggers.Match(hardlink.NewLink)\n\t\tfmt.Printf(\"Link: %s => %s\\n\", hardlink.NewLink, hardlink.Target)\n\t\t\/\/ TODO(rgooch): Implement.\n\t\t\/\/ err := os.Link(path.Join(rootDirectoryName, hardlink.Target),\n\t\t\/\/\tpath.Join(rootDirectoryName, hardlink.NewLink))\n\t}\n}\n\nfunc processDeletes(pathsToDelete []string, rootDirectoryName string,\n\ttriggers *triggers.Triggers, takeAction bool) {\n\tfor _, pathname := range pathsToDelete {\n\t\tfullPathname := path.Join(rootDirectoryName, pathname)\n\t\ttriggers.Match(pathname)\n\t\tif takeAction {\n\t\t\t\/\/ TODO(rgooch): Remove debugging.\n\t\t\tfmt.Printf(\"Delete: %s\\n\", fullPathname)\n\t\t\t\/\/ TODO(rgooch): Implement.\n\t\t}\n\t}\n}\n\nfunc processMakeDirectories(directoriesToMake []sub.Directory,\n\trootDirectoryName string, triggers *triggers.Triggers, takeAction bool) {\n\tfor _, newdir := range directoriesToMake {\n\t\tif skipPath(newdir.Name) {\n\t\t\tcontinue\n\t\t}\n\t\tfullPathname := path.Join(rootDirectoryName, newdir.Name)\n\t\ttriggers.Match(newdir.Name)\n\t\tif takeAction {\n\t\t\t\/\/ TODO(rgooch): Remove debugging.\n\t\t\tfmt.Printf(\"Mkdir: %s\\n\", fullPathname)\n\t\t\t\/\/ TODO(rgooch): Implement.\n\t\t}\n\t}\n}\n\nfunc processChangeDirectories(directoriesToChange []sub.Directory,\n\trootDirectoryName string, triggers *triggers.Triggers, takeAction bool) {\n\tfor _, directory := range directoriesToChange {\n\t\t\/\/ TODO(rgooch): Remove debugging.\n\t\tfmt.Printf(\"Change directory: %s\\n\", directory.Name)\n\t\t\/\/ TODO(rgooch): Implement.\n\t}\n}\n\nfunc processChangeInodes(inodesToChange []sub.Inode,\n\trootDirectoryName string, triggers *triggers.Triggers, takeAction bool) {\n\tfor _, inode := range inodesToChange {\n\t\t\/\/ TODO(rgooch): Remove debugging.\n\t\tfmt.Printf(\"Change inode: %s\\n\", inode.Name)\n\t\t\/\/ TODO(rgooch): Implement.\n\t}\n}\n\nfunc skipPath(pathname string) bool {\n\tif scannerConfiguration.ScanFilter.Match(pathname) {\n\t\treturn true\n\t}\n\tif pathname == \"\/.subd\" {\n\t\treturn true\n\t}\n\tif strings.HasPrefix(pathname, \"\/.subd\/\") {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc runTriggers(triggers []*triggers.Trigger, action string) {\n\t\/\/ For \"start\" action, if there is a reboot trigger, just do that one.\n\tif action == \"start\" {\n\t\tfor _, trigger := range triggers {\n\t\t\tif trigger.Service == \"reboot\" {\n\t\t\t\tlogger.Print(\"Rebooting\")\n\t\t\t\t\/\/ TODO(rgooch): Remove debugging output.\n\t\t\t\tcmd := exec.Command(\"echo\", \"reboot\")\n\t\t\t\tcmd.Stdout = os.Stdout\n\t\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\t\tlogger.Print(err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tppid := fmt.Sprint(os.Getppid())\n\tfor _, trigger := range triggers {\n\t\tif trigger.Service == \"reboot\" && action == \"stop\" {\n\t\t\tcontinue\n\t\t}\n\t\tlogger.Printf(\"Action: service %s %s\\n\", trigger.Service, action)\n\t\t\/\/ TODO(rgooch): Remove debugging output.\n\t\tcmd := exec.Command(\"run-in-mntns\", ppid, \"echo\", \"service\", action,\n\t\t\ttrigger.Service)\n\t\tcmd.Stdout = os.Stdout\n\t\tif err := cmd.Run(); err != nil {\n\t\t\tlogger.Print(err)\n\t\t}\n\t\t\/\/ TODO(rgooch): Implement.\n\t}\n}\n<commit_msg>Move debugging output to logger in subd.Update() RPC. Show RPC duration.<commit_after>package rpcd\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/Symantec\/Dominator\/lib\/hash\"\n\t\"github.com\/Symantec\/Dominator\/lib\/triggers\"\n\t\"github.com\/Symantec\/Dominator\/proto\/sub\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc (t *rpcType) Update(request sub.UpdateRequest,\n\treply *sub.UpdateResponse) error {\n\trwLock.Lock()\n\tdefer rwLock.Unlock()\n\tfs := fileSystemHistory.FileSystem()\n\tif fs == nil {\n\t\treturn errors.New(\"No file-system history yet\")\n\t}\n\tlogger.Printf(\"Update()\\n\")\n\tif fetchInProgress {\n\t\tlogger.Println(\"Error: fetch already in progress\")\n\t\treturn errors.New(\"fetch already in progress\")\n\t}\n\tif updateInProgress {\n\t\tlogger.Println(\"Error: update progress\")\n\t\treturn errors.New(\"update in progress\")\n\t}\n\tupdateInProgress = true\n\tgo doUpdate(request, fs.RootDirectoryName())\n\treturn nil\n}\n\nfunc doUpdate(request sub.UpdateRequest, rootDirectoryName string) {\n\tdefer clearUpdateInProgress()\n\tstartTime := time.Now()\n\tvar oldTriggers triggers.Triggers\n\tfile, err := os.Open(oldTriggersFilename)\n\tif err == nil {\n\t\tdecoder := json.NewDecoder(file)\n\t\tvar trig triggers.Triggers\n\t\terr = decoder.Decode(&trig.Triggers)\n\t\tfile.Close()\n\t\tif err == nil {\n\t\t\toldTriggers = trig\n\t\t} else {\n\t\t\tlogger.Printf(\"Error decoding old triggers: %s\", err.Error())\n\t\t}\n\t}\n\tif len(oldTriggers.Triggers) > 0 {\n\t\tprocessMakeInodes(request.InodesToMake, rootDirectoryName,\n\t\t\trequest.MultiplyUsedObjects, &oldTriggers, false)\n\t\tprocessHardlinksToMake(request.HardlinksToMake, rootDirectoryName,\n\t\t\t&oldTriggers, false)\n\t\tprocessDeletes(request.PathsToDelete, rootDirectoryName, &oldTriggers,\n\t\t\tfalse)\n\t\tprocessMakeDirectories(request.DirectoriesToMake, rootDirectoryName,\n\t\t\t&oldTriggers, false)\n\t\tprocessChangeDirectories(request.DirectoriesToChange, rootDirectoryName,\n\t\t\t&oldTriggers, false)\n\t\tprocessChangeInodes(request.InodesToChange, rootDirectoryName,\n\t\t\t&oldTriggers, false)\n\t\tmatchedOldTriggers := oldTriggers.GetMatchedTriggers()\n\t\trunTriggers(matchedOldTriggers, \"stop\")\n\t}\n\tprocessFilesToCopyToCache(request.FilesToCopyToCache, rootDirectoryName)\n\tprocessMakeInodes(request.InodesToMake, rootDirectoryName,\n\t\trequest.MultiplyUsedObjects, request.Triggers, true)\n\tprocessHardlinksToMake(request.HardlinksToMake, rootDirectoryName,\n\t\trequest.Triggers, true)\n\tprocessDeletes(request.PathsToDelete, rootDirectoryName, request.Triggers,\n\t\ttrue)\n\tprocessMakeDirectories(request.DirectoriesToMake, rootDirectoryName,\n\t\trequest.Triggers, true)\n\tprocessChangeDirectories(request.DirectoriesToChange, rootDirectoryName,\n\t\trequest.Triggers, true)\n\tprocessChangeInodes(request.InodesToChange, rootDirectoryName,\n\t\trequest.Triggers, true)\n\tmatchedNewTriggers := request.Triggers.GetMatchedTriggers()\n\tfile, err = os.Create(oldTriggersFilename)\n\tif err == nil {\n\t\tb, err := json.Marshal(request.Triggers.Triggers)\n\t\tif err == nil {\n\t\t\tvar out bytes.Buffer\n\t\t\tjson.Indent(&out, b, \"\", \" \")\n\t\t\tout.WriteTo(file)\n\t\t} else {\n\t\t\tlogger.Printf(\"Error marshaling triggers: %s\", err.Error())\n\t\t}\n\t\tfile.Close()\n\t}\n\trunTriggers(matchedNewTriggers, \"start\")\n\ttimeTaken := time.Since(startTime)\n\tlogger.Printf(\"Update() completed in %s\\n\", timeTaken)\n\t\/\/ TODO(rgooch): Remove debugging hack and implement.\n\ttime.Sleep(time.Second * 15)\n\tlogger.Printf(\"Post-Update() debugging sleep complete\\n\")\n}\n\nfunc clearUpdateInProgress() {\n\trwLock.Lock()\n\tdefer rwLock.Unlock()\n\tupdateInProgress = false\n}\n\nfunc processFilesToCopyToCache(filesToCopyToCache []sub.FileToCopyToCache,\n\trootDirectoryName string) {\n\tfor _, fileToCopy := range filesToCopyToCache {\n\t\tlogger.Printf(\"Copy: %s to cache\\n\", fileToCopy.Name)\n\t\t\/\/ TODO(rgooch): Implement.\n\t}\n}\n\nfunc processMakeInodes(inodesToMake []sub.Inode, rootDirectoryName string,\n\tmultiplyUsedObjects map[hash.Hash]uint64, triggers *triggers.Triggers,\n\ttakeAction bool) {\n\tfor _, inode := range inodesToMake {\n\t\tfullPathname := path.Join(rootDirectoryName, inode.Name)\n\t\ttriggers.Match(inode.Name)\n\t\tif takeAction {\n\t\t\tlogger.Printf(\"Make inode: %s\\n\", fullPathname)\n\t\t\t\/\/ TODO(rgooch): Implement.\n\t\t}\n\t}\n}\n\nfunc processHardlinksToMake(hardlinksToMake []sub.Hardlink,\n\trootDirectoryName string, triggers *triggers.Triggers, takeAction bool) {\n\tfor _, hardlink := range hardlinksToMake {\n\t\ttriggers.Match(hardlink.NewLink)\n\t\tif takeAction {\n\t\t\tlogger.Printf(\"Link: %s => %s\\n\", hardlink.NewLink, hardlink.Target)\n\t\t\t\/\/ TODO(rgooch): Implement.\n\t\t\t\/\/ err := os.Link(path.Join(rootDirectoryName, hardlink.Target),\n\t\t\t\/\/\tpath.Join(rootDirectoryName, hardlink.NewLink))\n\t\t}\n\t}\n}\n\nfunc processDeletes(pathsToDelete []string, rootDirectoryName string,\n\ttriggers *triggers.Triggers, takeAction bool) {\n\tfor _, pathname := range pathsToDelete {\n\t\tfullPathname := path.Join(rootDirectoryName, pathname)\n\t\ttriggers.Match(pathname)\n\t\tif takeAction {\n\t\t\tlogger.Printf(\"Delete: %s\\n\", fullPathname)\n\t\t\t\/\/ TODO(rgooch): Implement.\n\t\t}\n\t}\n}\n\nfunc processMakeDirectories(directoriesToMake []sub.Directory,\n\trootDirectoryName string, triggers *triggers.Triggers, takeAction bool) {\n\tfor _, newdir := range directoriesToMake {\n\t\tif skipPath(newdir.Name) {\n\t\t\tcontinue\n\t\t}\n\t\tfullPathname := path.Join(rootDirectoryName, newdir.Name)\n\t\ttriggers.Match(newdir.Name)\n\t\tif takeAction {\n\t\t\tlogger.Printf(\"Mkdir: %s\\n\", fullPathname)\n\t\t\t\/\/ TODO(rgooch): Implement.\n\t\t}\n\t}\n}\n\nfunc processChangeDirectories(directoriesToChange []sub.Directory,\n\trootDirectoryName string, triggers *triggers.Triggers, takeAction bool) {\n\tfor _, directory := range directoriesToChange {\n\t\tif takeAction {\n\t\t\tlogger.Printf(\"Change directory: %s\\n\", directory.Name)\n\t\t\t\/\/ TODO(rgooch): Implement.\n\t\t}\n\t}\n}\n\nfunc processChangeInodes(inodesToChange []sub.Inode,\n\trootDirectoryName string, triggers *triggers.Triggers, takeAction bool) {\n\tfor _, inode := range inodesToChange {\n\t\tif takeAction {\n\t\t\tlogger.Printf(\"Change inode: %s\\n\", inode.Name)\n\t\t\t\/\/ TODO(rgooch): Implement.\n\t\t}\n\t}\n}\n\nfunc skipPath(pathname string) bool {\n\tif scannerConfiguration.ScanFilter.Match(pathname) {\n\t\treturn true\n\t}\n\tif pathname == \"\/.subd\" {\n\t\treturn true\n\t}\n\tif strings.HasPrefix(pathname, \"\/.subd\/\") {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc runTriggers(triggers []*triggers.Trigger, action string) {\n\t\/\/ For \"start\" action, if there is a reboot trigger, just do that one.\n\tif action == \"start\" {\n\t\tfor _, trigger := range triggers {\n\t\t\tif trigger.Service == \"reboot\" {\n\t\t\t\tlogger.Print(\"Rebooting\")\n\t\t\t\t\/\/ TODO(rgooch): Remove debugging output.\n\t\t\t\tcmd := exec.Command(\"echo\", \"reboot\")\n\t\t\t\tcmd.Stdout = os.Stdout\n\t\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\t\tlogger.Print(err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tppid := fmt.Sprint(os.Getppid())\n\tfor _, trigger := range triggers {\n\t\tif trigger.Service == \"reboot\" && action == \"stop\" {\n\t\t\tcontinue\n\t\t}\n\t\tlogger.Printf(\"Action: service %s %s\\n\", trigger.Service, action)\n\t\t\/\/ TODO(rgooch): Remove debugging output.\n\t\tcmd := exec.Command(\"run-in-mntns\", ppid, \"echo\", \"service\", action,\n\t\t\ttrigger.Service)\n\t\tcmd.Stdout = os.Stdout\n\t\tif err := cmd.Run(); err != nil {\n\t\t\tlogger.Print(err)\n\t\t}\n\t\t\/\/ TODO(rgooch): Implement.\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"log\"\n\n\t\"github.com\/ponzu-cms\/ponzu\/system\/item\"\n\n\t\"github.com\/tidwall\/sjson\"\n)\n\nfunc omit(it interface{}, data []byte) ([]byte, error) {\n\t\/\/ is it Omittable\n\tom, ok := it.(item.Omittable)\n\tif !ok {\n\t\treturn data, nil\n\t}\n\n\treturn omitFields(om, data, \"data.0.\")\n}\n\nfunc omitFields(om item.Omittable, data []byte, pathPrefix string) ([]byte, error) {\n\t\/\/ get fields to omit from json data\n\tfields := om.Omit()\n\n\t\/\/ remove each field from json, all responses contain json object(s) in top-level \"data\" array\n\tvar omitted = data\n\tfor i := range fields {\n\t\tvar err error\n\t\tomitted, err = sjson.DeleteBytes(omitted, pathPrefix+fields[i])\n\t\tif err != nil {\n\t\t\tlog.Println(\"Erorr omitting field:\", fields[i], \"from item.Omittable:\", om)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn omitted, nil\n}\n<commit_msg>adding updated version of omit<commit_after>package api\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/ponzu-cms\/ponzu\/system\/item\"\n\n\t\"github.com\/tidwall\/gjson\"\n\t\"github.com\/tidwall\/sjson\"\n)\n\nfunc omit(it interface{}, data []byte) ([]byte, error) {\n\t\/\/ is it Omittable\n\tom, ok := it.(item.Omittable)\n\tif !ok {\n\t\treturn data, nil\n\t}\n\n\treturn omitFields(om, data, \"data\")\n}\n\nfunc omitFields(om item.Omittable, data []byte, pathPrefix string) ([]byte, error) {\n\t\/\/ get fields to omit from json data\n\tfields := om.Omit()\n\n\t\/\/ remove each field from json, all responses contain json object(s) in top-level \"data\" array\n\tn := int(gjson.GetBytes(data, pathPrefix+\".#\").Int())\n\tfor i := 0; i < n; i++ {\n\t\tfor k := range fields {\n\t\t\tvar err error\n\t\t\tdata, err = sjson.DeleteBytes(data, fmt.Sprintf(\"%s.%d.%s\", pathPrefix, i, fields[k]))\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Erorr omitting field:\", fields[k], \"from item.Omittable:\", om)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn data, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package db\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\n\t\"github.com\/boltdb\/bolt\"\n)\n\nvar (\n\t\/\/ ErrNoAddonExists indicates that there was not addon found in the db\n\tErrNoAddonExists = errors.New(\"No addon exists.\")\n)\n\n\/\/ Addon looks for an addon by its addon_reverse_dns as the key and returns\n\/\/ the url.Values representation of an addon\nfunc Addon(key string) (url.Values, error) {\n\tbuf := &bytes.Buffer{}\n\n\terr := store.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"__addons\"))\n\n\t\tval := b.Get([]byte(key))\n\n\t\tif val == nil {\n\t\t\treturn ErrNoAddonExists\n\t\t}\n\n\t\t_, err := buf.Write(val)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata, err := url.ParseQuery(buf.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn data, nil\n}\n\n\/\/ SetAddon stores the values of an addon into the __addons bucket with a the\n\/\/ addon_reverse_dns field used as the key\nfunc SetAddon(data url.Values) error {\n\t\/\/ we don't know the structure of the addon type from a addon developer, so\n\t\/\/ encoding to json before it's stored in the db is difficult. Instead, we\n\t\/\/ can just encode the url.Values to a query string using the Encode() method.\n\t\/\/ The downside is that we will have to parse the values out of the query\n\t\/\/ string when loading it from the db\n\tv := data.Encode()\n\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"__addons\"))\n\t\tk := data.Get(\"addon_reverse_dns\")\n\t\tif k == \"\" {\n\t\t\tname := data.Get(\"addon_name\")\n\t\t\treturn fmt.Errorf(`Addon \"%s\" has no identifier to use as key.`, name)\n\t\t}\n\n\t\terr := b.Put([]byte(k), []byte(v))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ AddonAll returns all registered addons as a [][]byte\nfunc AddonAll() []url.Values {\n\tvar all []url.Values\n\tbuf := &bytes.Buffer{}\n\n\terr := store.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"__addons\"))\n\t\terr := b.ForEach(func(k, v []byte) error {\n\t\t\t_, err := buf.Write(v)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tdata, err := url.ParseQuery(buf.String())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tall = append(all, data)\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Println(\"Error finding addons in db with db.AddonAll:\", err)\n\t\treturn nil\n\t}\n\n\treturn all\n}\n\n\/\/ DeleteAddon removes an addon from the db by its key, the addon_reverse_dns\nfunc DeleteAddon(key string) error {\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"__addons\"))\n\n\t\tif err := b.Delete([]byte(key)); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ AddonExists checks if there is an existing addon stored. The key is an the\n\/\/ value at addon_reverse_dns\nfunc AddonExists(key string) bool {\n\tvar exists bool\n\terr := store.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"__addons\"))\n\t\tif b.Get([]byte(key)) == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\texists = true\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Println(\"Error checking existence of addon with key:\", key, \"-\", err)\n\t\treturn false\n\t}\n\n\treturn exists\n}\n<commit_msg>adding bucket in advance in case it doesnt exist and addon init is called before db Init<commit_after>package db\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\n\t\"github.com\/boltdb\/bolt\"\n)\n\nvar (\n\t\/\/ ErrNoAddonExists indicates that there was not addon found in the db\n\tErrNoAddonExists = errors.New(\"No addon exists.\")\n)\n\n\/\/ Addon looks for an addon by its addon_reverse_dns as the key and returns\n\/\/ the url.Values representation of an addon\nfunc Addon(key string) (url.Values, error) {\n\tbuf := &bytes.Buffer{}\n\n\terr := store.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"__addons\"))\n\n\t\tval := b.Get([]byte(key))\n\n\t\tif val == nil {\n\t\t\treturn ErrNoAddonExists\n\t\t}\n\n\t\t_, err := buf.Write(val)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata, err := url.ParseQuery(buf.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn data, nil\n}\n\n\/\/ SetAddon stores the values of an addon into the __addons bucket with a the\n\/\/ addon_reverse_dns field used as the key\nfunc SetAddon(data url.Values) error {\n\t\/\/ we don't know the structure of the addon type from a addon developer, so\n\t\/\/ encoding to json before it's stored in the db is difficult. Instead, we\n\t\/\/ can just encode the url.Values to a query string using the Encode() method.\n\t\/\/ The downside is that we will have to parse the values out of the query\n\t\/\/ string when loading it from the db\n\tv := data.Encode()\n\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"__addons\"))\n\t\tk := data.Get(\"addon_reverse_dns\")\n\t\tif k == \"\" {\n\t\t\tname := data.Get(\"addon_name\")\n\t\t\treturn fmt.Errorf(`Addon \"%s\" has no identifier to use as key.`, name)\n\t\t}\n\n\t\terr := b.Put([]byte(k), []byte(v))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ AddonAll returns all registered addons as a [][]byte\nfunc AddonAll() []url.Values {\n\tvar all []url.Values\n\tbuf := &bytes.Buffer{}\n\n\terr := store.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"__addons\"))\n\t\terr := b.ForEach(func(k, v []byte) error {\n\t\t\t_, err := buf.Write(v)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tdata, err := url.ParseQuery(buf.String())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tall = append(all, data)\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Println(\"Error finding addons in db with db.AddonAll:\", err)\n\t\treturn nil\n\t}\n\n\treturn all\n}\n\n\/\/ DeleteAddon removes an addon from the db by its key, the addon_reverse_dns\nfunc DeleteAddon(key string) error {\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"__addons\"))\n\n\t\tif err := b.Delete([]byte(key)); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ AddonExists checks if there is an existing addon stored. The key is an the\n\/\/ value at addon_reverse_dns\nfunc AddonExists(key string) bool {\n\tvar exists bool\n\terr := store.View(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(\"__addons\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif b.Get([]byte(key)) == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\texists = true\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Println(\"Error checking existence of addon with key:\", key, \"-\", err)\n\t\treturn false\n\t}\n\n\treturn exists\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"net\"\n\t\"sync\"\n\n\t\"github.com\/google\/syzkaller\/pkg\/cover\"\n\t\"github.com\/google\/syzkaller\/pkg\/log\"\n\t\"github.com\/google\/syzkaller\/pkg\/rpctype\"\n\t\"github.com\/google\/syzkaller\/pkg\/signal\"\n\t\"github.com\/google\/syzkaller\/prog\"\n\t\"github.com\/google\/syzkaller\/sys\"\n)\n\ntype RPCServer struct {\n\tmgr RPCManagerView\n\ttarget *prog.Target\n\tenabledSyscalls []int\n\tstats *Stats\n\tbatchSize int\n\n\tmu sync.Mutex\n\tfuzzers map[string]*Fuzzer\n\tcheckResult *rpctype.CheckArgs\n\tmaxSignal signal.Signal\n\tcorpusSignal signal.Signal\n\tcorpusCover cover.Cover\n}\n\ntype Fuzzer struct {\n\tname string\n\tinputs []rpctype.RPCInput\n\tnewMaxSignal signal.Signal\n}\n\n\/\/ RPCManagerView restricts interface between RPCServer and Manager.\ntype RPCManagerView interface {\n\tfuzzerConnect() ([]rpctype.RPCInput, []string)\n\tmachineChecked(result *rpctype.CheckArgs)\n\tnewInput(inp rpctype.RPCInput, sign signal.Signal)\n\tcandidateBatch(size int) []rpctype.RPCCandidate\n}\n\nfunc startRPCServer(mgr *Manager) (int, error) {\n\tserv := &RPCServer{\n\t\tmgr: mgr,\n\t\ttarget: mgr.target,\n\t\tenabledSyscalls: mgr.enabledSyscalls,\n\t\tstats: mgr.stats,\n\t\tfuzzers: make(map[string]*Fuzzer),\n\t}\n\tserv.batchSize = 5\n\tif serv.batchSize < mgr.cfg.Procs {\n\t\tserv.batchSize = mgr.cfg.Procs\n\t}\n\ts, err := rpctype.NewRPCServer(mgr.cfg.RPC, \"Manager\", serv)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tlog.Logf(0, \"serving rpc on tcp:\/\/%v\", s.Addr())\n\tport := s.Addr().(*net.TCPAddr).Port\n\tgo s.Serve()\n\treturn port, nil\n}\n\nfunc (serv *RPCServer) Connect(a *rpctype.ConnectArgs, r *rpctype.ConnectRes) error {\n\tlog.Logf(1, \"fuzzer %v connected\", a.Name)\n\tserv.stats.vmRestarts.inc()\n\n\tcorpus, memoryLeakFrames := serv.mgr.fuzzerConnect()\n\n\tserv.mu.Lock()\n\tdefer serv.mu.Unlock()\n\n\tserv.fuzzers[a.Name] = &Fuzzer{\n\t\tname: a.Name,\n\t\tinputs: corpus,\n\t\tnewMaxSignal: serv.maxSignal.Copy(),\n\t}\n\tr.MemoryLeakFrames = memoryLeakFrames\n\tr.EnabledCalls = serv.enabledSyscalls\n\tr.CheckResult = serv.checkResult\n\tr.GitRevision = sys.GitRevision\n\tr.TargetRevision = serv.target.Revision\n\treturn nil\n}\n\nfunc (serv *RPCServer) Check(a *rpctype.CheckArgs, r *int) error {\n\tserv.mu.Lock()\n\tdefer serv.mu.Unlock()\n\n\tif serv.checkResult != nil {\n\t\treturn nil\n\t}\n\tserv.mgr.machineChecked(a)\n\ta.DisabledCalls = nil\n\tserv.checkResult = a\n\treturn nil\n}\n\nfunc (serv *RPCServer) NewInput(a *rpctype.NewInputArgs, r *int) error {\n\tinputSignal := a.Signal.Deserialize()\n\tlog.Logf(4, \"new input from %v for syscall %v (signal=%v, cover=%v)\",\n\t\ta.Name, a.Call, inputSignal.Len(), len(a.Cover))\n\tif _, err := serv.target.Deserialize(a.RPCInput.Prog, prog.NonStrict); err != nil {\n\t\t\/\/ This should not happen, but we see such cases episodically, reason unknown.\n\t\tlog.Logf(0, \"failed to deserialize program from fuzzer: %v\\n%s\", err, a.RPCInput.Prog)\n\t\treturn nil\n\t}\n\tserv.mu.Lock()\n\tdefer serv.mu.Unlock()\n\n\tif serv.corpusSignal.Diff(inputSignal).Empty() {\n\t\treturn nil\n\t}\n\tserv.mgr.newInput(a.RPCInput, inputSignal)\n\n\tserv.stats.newInputs.inc()\n\tserv.corpusSignal.Merge(inputSignal)\n\tserv.stats.corpusSignal.set(serv.corpusSignal.Len())\n\tserv.corpusCover.Merge(a.Cover)\n\tserv.stats.corpusCover.set(len(serv.corpusCover))\n\n\ta.RPCInput.Cover = nil \/\/ Don't send coverage back to all fuzzers.\n\tfor _, f := range serv.fuzzers {\n\t\tif f.name == a.Name {\n\t\t\tcontinue\n\t\t}\n\t\tf.inputs = append(f.inputs, a.RPCInput)\n\t}\n\treturn nil\n}\n\nfunc (serv *RPCServer) Poll(a *rpctype.PollArgs, r *rpctype.PollRes) error {\n\tserv.stats.mergeNamed(a.Stats)\n\n\tserv.mu.Lock()\n\tdefer serv.mu.Unlock()\n\n\tf := serv.fuzzers[a.Name]\n\tif f == nil {\n\t\tlog.Fatalf(\"fuzzer %v is not connected\", a.Name)\n\t}\n\tnewMaxSignal := serv.maxSignal.Diff(a.MaxSignal.Deserialize())\n\tif !newMaxSignal.Empty() {\n\t\tserv.maxSignal.Merge(newMaxSignal)\n\t\tfor _, f1 := range serv.fuzzers {\n\t\t\tif f1 == f {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tf1.newMaxSignal.Merge(newMaxSignal)\n\t\t}\n\t}\n\tr.MaxSignal = f.newMaxSignal.Split(500).Serialize()\n\tif a.NeedCandidates {\n\t\tr.Candidates = serv.mgr.candidateBatch(serv.batchSize)\n\t}\n\tif len(r.Candidates) == 0 {\n\t\tfor i := 0; i < serv.batchSize && len(f.inputs) > 0; i++ {\n\t\t\tlast := len(f.inputs) - 1\n\t\t\tr.NewInputs = append(r.NewInputs, f.inputs[last])\n\t\t\tf.inputs[last] = rpctype.RPCInput{}\n\t\t\tf.inputs = f.inputs[:last]\n\t\t}\n\t\tif len(f.inputs) == 0 {\n\t\t\tf.inputs = nil\n\t\t}\n\t}\n\tlog.Logf(4, \"poll from %v: candidates=%v inputs=%v maxsignal=%v\",\n\t\ta.Name, len(r.Candidates), len(r.NewInputs), len(r.MaxSignal.Elems))\n\treturn nil\n}\n<commit_msg>syz-manager: increase initial poll batch size<commit_after>\/\/ Copyright 2018 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"net\"\n\t\"sync\"\n\n\t\"github.com\/google\/syzkaller\/pkg\/cover\"\n\t\"github.com\/google\/syzkaller\/pkg\/log\"\n\t\"github.com\/google\/syzkaller\/pkg\/rpctype\"\n\t\"github.com\/google\/syzkaller\/pkg\/signal\"\n\t\"github.com\/google\/syzkaller\/prog\"\n\t\"github.com\/google\/syzkaller\/sys\"\n)\n\ntype RPCServer struct {\n\tmgr RPCManagerView\n\ttarget *prog.Target\n\tenabledSyscalls []int\n\tstats *Stats\n\tbatchSize int\n\n\tmu sync.Mutex\n\tfuzzers map[string]*Fuzzer\n\tcheckResult *rpctype.CheckArgs\n\tmaxSignal signal.Signal\n\tcorpusSignal signal.Signal\n\tcorpusCover cover.Cover\n}\n\ntype Fuzzer struct {\n\tname string\n\tinputs []rpctype.RPCInput\n\tnewMaxSignal signal.Signal\n}\n\n\/\/ RPCManagerView restricts interface between RPCServer and Manager.\ntype RPCManagerView interface {\n\tfuzzerConnect() ([]rpctype.RPCInput, []string)\n\tmachineChecked(result *rpctype.CheckArgs)\n\tnewInput(inp rpctype.RPCInput, sign signal.Signal)\n\tcandidateBatch(size int) []rpctype.RPCCandidate\n}\n\nfunc startRPCServer(mgr *Manager) (int, error) {\n\tserv := &RPCServer{\n\t\tmgr: mgr,\n\t\ttarget: mgr.target,\n\t\tenabledSyscalls: mgr.enabledSyscalls,\n\t\tstats: mgr.stats,\n\t\tfuzzers: make(map[string]*Fuzzer),\n\t}\n\tserv.batchSize = 5\n\tif serv.batchSize < mgr.cfg.Procs {\n\t\tserv.batchSize = mgr.cfg.Procs\n\t}\n\ts, err := rpctype.NewRPCServer(mgr.cfg.RPC, \"Manager\", serv)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tlog.Logf(0, \"serving rpc on tcp:\/\/%v\", s.Addr())\n\tport := s.Addr().(*net.TCPAddr).Port\n\tgo s.Serve()\n\treturn port, nil\n}\n\nfunc (serv *RPCServer) Connect(a *rpctype.ConnectArgs, r *rpctype.ConnectRes) error {\n\tlog.Logf(1, \"fuzzer %v connected\", a.Name)\n\tserv.stats.vmRestarts.inc()\n\n\tcorpus, memoryLeakFrames := serv.mgr.fuzzerConnect()\n\n\tserv.mu.Lock()\n\tdefer serv.mu.Unlock()\n\n\tserv.fuzzers[a.Name] = &Fuzzer{\n\t\tname: a.Name,\n\t\tinputs: corpus,\n\t\tnewMaxSignal: serv.maxSignal.Copy(),\n\t}\n\tr.MemoryLeakFrames = memoryLeakFrames\n\tr.EnabledCalls = serv.enabledSyscalls\n\tr.CheckResult = serv.checkResult\n\tr.GitRevision = sys.GitRevision\n\tr.TargetRevision = serv.target.Revision\n\treturn nil\n}\n\nfunc (serv *RPCServer) Check(a *rpctype.CheckArgs, r *int) error {\n\tserv.mu.Lock()\n\tdefer serv.mu.Unlock()\n\n\tif serv.checkResult != nil {\n\t\treturn nil\n\t}\n\tserv.mgr.machineChecked(a)\n\ta.DisabledCalls = nil\n\tserv.checkResult = a\n\treturn nil\n}\n\nfunc (serv *RPCServer) NewInput(a *rpctype.NewInputArgs, r *int) error {\n\tinputSignal := a.Signal.Deserialize()\n\tlog.Logf(4, \"new input from %v for syscall %v (signal=%v, cover=%v)\",\n\t\ta.Name, a.Call, inputSignal.Len(), len(a.Cover))\n\tif _, err := serv.target.Deserialize(a.RPCInput.Prog, prog.NonStrict); err != nil {\n\t\t\/\/ This should not happen, but we see such cases episodically, reason unknown.\n\t\tlog.Logf(0, \"failed to deserialize program from fuzzer: %v\\n%s\", err, a.RPCInput.Prog)\n\t\treturn nil\n\t}\n\tserv.mu.Lock()\n\tdefer serv.mu.Unlock()\n\n\tif serv.corpusSignal.Diff(inputSignal).Empty() {\n\t\treturn nil\n\t}\n\tserv.mgr.newInput(a.RPCInput, inputSignal)\n\n\tserv.stats.newInputs.inc()\n\tserv.corpusSignal.Merge(inputSignal)\n\tserv.stats.corpusSignal.set(serv.corpusSignal.Len())\n\tserv.corpusCover.Merge(a.Cover)\n\tserv.stats.corpusCover.set(len(serv.corpusCover))\n\n\ta.RPCInput.Cover = nil \/\/ Don't send coverage back to all fuzzers.\n\tfor _, f := range serv.fuzzers {\n\t\tif f.name == a.Name {\n\t\t\tcontinue\n\t\t}\n\t\tf.inputs = append(f.inputs, a.RPCInput)\n\t}\n\treturn nil\n}\n\nfunc (serv *RPCServer) Poll(a *rpctype.PollArgs, r *rpctype.PollRes) error {\n\tserv.stats.mergeNamed(a.Stats)\n\n\tserv.mu.Lock()\n\tdefer serv.mu.Unlock()\n\n\tf := serv.fuzzers[a.Name]\n\tif f == nil {\n\t\tlog.Fatalf(\"fuzzer %v is not connected\", a.Name)\n\t}\n\tnewMaxSignal := serv.maxSignal.Diff(a.MaxSignal.Deserialize())\n\tif !newMaxSignal.Empty() {\n\t\tserv.maxSignal.Merge(newMaxSignal)\n\t\tfor _, f1 := range serv.fuzzers {\n\t\t\tif f1 == f {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tf1.newMaxSignal.Merge(newMaxSignal)\n\t\t}\n\t}\n\tr.MaxSignal = f.newMaxSignal.Split(500).Serialize()\n\tif a.NeedCandidates {\n\t\tr.Candidates = serv.mgr.candidateBatch(serv.batchSize)\n\t}\n\tif len(r.Candidates) == 0 {\n\t\tbatchSize := serv.batchSize\n\t\t\/\/ When the fuzzer starts, it pumps the whole corpus.\n\t\t\/\/ If we do it using the final batchSize, it can be very slow\n\t\t\/\/ (batch of size 6 can take more than 10 mins for 50K corpus and slow kernel).\n\t\t\/\/ So use a larger batch initially (we use no stats as approximation of initial pump).\n\t\tconst initialBatch = 30\n\t\tif len(a.Stats) == 0 && batchSize < initialBatch {\n\t\t\tbatchSize = initialBatch\n\t\t}\n\t\tfor i := 0; i < batchSize && len(f.inputs) > 0; i++ {\n\t\t\tlast := len(f.inputs) - 1\n\t\t\tr.NewInputs = append(r.NewInputs, f.inputs[last])\n\t\t\tf.inputs[last] = rpctype.RPCInput{}\n\t\t\tf.inputs = f.inputs[:last]\n\t\t}\n\t\tif len(f.inputs) == 0 {\n\t\t\tf.inputs = nil\n\t\t}\n\t}\n\tlog.Logf(4, \"poll from %v: candidates=%v inputs=%v maxsignal=%v\",\n\t\ta.Name, len(r.Candidates), len(r.NewInputs), len(r.MaxSignal.Elems))\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build windows\npackage tap\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"golang.org\/x\/sys\/windows\/registry\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nvar (\n\tIfceNameNotFound = errors.New(\"Failed to find the name of interface.\")\n\tTapDeviceNotFound = errors.New(\"Failed to find the tap device in registry.\")\n\t\/\/ Device Control Codes\n\ttap_win_ioctl_get_mac = tap_control_code(1, 0)\n\ttap_win_ioctl_get_version = tap_control_code(2, 0)\n\ttap_win_ioctl_get_mtu = tap_control_code(3, 0)\n\ttap_win_ioctl_get_info = tap_control_code(4, 0)\n\ttap_ioctl_config_point_to_point = tap_control_code(5, 0)\n\ttap_ioctl_set_media_status = tap_control_code(6, 0)\n\ttap_win_ioctl_config_dhcp_masq = tap_control_code(7, 0)\n\ttap_win_ioctl_get_log_line = tap_control_code(8, 0)\n\ttap_win_ioctl_config_dhcp_set_opt = tap_control_code(9, 0)\n\ttap_ioctl_config_tun = tap_control_code(10, 0)\n\t\/\/ w32 api\n\tfile_device_unknown = uint32(0x00000022)\n)\n\nfunc ctl_code(device_type, function, method, access uint32) uint32 {\n\treturn (device_type << 16) | (access << 14) | (function << 2) | method\n}\n\nfunc tap_control_code(request, method uint32) uint32 {\n\treturn ctl_code(file_device_unknown, request, method, 0)\n}\n\n\/\/ GetDeviceId finds out a TAP device from registry, it requires privileged right.\nfunc getdeviceid() (string, error) {\n\t\/\/ TAP driver key location\n\tregkey := `SYSTEM\\CurrentControlSet\\Control\\Class\\{4D36E972-E325-11CE-BFC1-08002BE10318}`\n\tk, err := registry.OpenKey(registry.LOCAL_MACHINE, regkey, registry.ALL_ACCESS)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer k.Close()\n\t\/\/ read all subkeys\n\tkeys, err := k.ReadSubKeyNames(-1)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t\/\/ find the one with ComponentId == \"tap0901\"\n\tfor _, v := range keys {\n\t\tkey, err := registry.OpenKey(registry.LOCAL_MACHINE, regkey+\"\\\\\"+v, registry.ALL_ACCESS)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tval, _, err := key.GetStringValue(\"ComponentId\")\n\t\tif err != nil {\n\t\t\tgoto next\n\t\t}\n\t\tif val == \"tap0901\" {\n\t\t\tval, _, err = key.GetStringValue(\"NetCfgInstanceId\")\n\t\t\tif err != nil {\n\t\t\t\tgoto next\n\t\t\t}\n\t\t\tkey.Close()\n\t\t\treturn val, nil\n\t\t}\n\tnext:\n\t\tkey.Close()\n\t}\n\treturn \"\", TapDeviceNotFound\n}\n\n\/\/ NewTAP find and open a TAP device.\nfunc newTAP() (ifce *Interface, err error) {\n\tdeviceid, err := getdeviceid()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpath := \"\\\\\\\\.\\\\Global\\\\\" + deviceid + \".tap\"\n\tpathp, err := syscall.UTF16PtrFromString(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ type Handle uintptr\n\tfile, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, uint32(syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE), nil, syscall.OPEN_EXISTING, syscall.FILE_ATTRIBUTE_SYSTEM, 0)\n\t\/\/ if err hanppens, close the interface.\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tsyscall.Close(file)\n\t\t}\n\t\tif err := recover(); err != nil {\n\t\t\tsyscall.Close(file)\n\t\t}\n\t}()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar bytesReturned uint32\n\t\/\/ find the mac address of tap device.\n\tmac := make([]byte, 6)\n\terr = syscall.DeviceIoControl(file, tap_win_ioctl_get_mac, &mac[0], uint32(len(mac)), &mac[0], uint32(len(mac)), &bytesReturned, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ bring up device.\n\trdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE)\n\tcode := []byte{0x01, 0x00, 0x00, 0x00}\n\terr = syscall.DeviceIoControl(file, tap_ioctl_set_media_status, &code[0], uint32(4), &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/TUN\n\t\/\/code2 := []byte{0x0a, 0x03, 0x00, 0x01, 0x0a, 0x03, 0x00, 0x00, 0xff, 0xff, 0xff, 0x00}\n\t\/\/err = syscall.DeviceIoControl(file, tap_ioctl_config_tun, &code2[0], uint32(12), &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil)\n\t\/\/if err != nil {\n\t\/\/\tlog.Fatalln(\"code2 err:\", err)\n\t\/\/}\n\tfd := os.NewFile(uintptr(file), path)\n\tifce = &Interface{tap: true, file: fd}\n\tcopy(ifce.mac[:6], mac[:6])\n\n\t\/\/ find the name of tap interface(to set the ip)\n\thwaddr_equal := func(a net.HardwareAddr, b []byte) bool {\n\t\tfor i := 0; i < 6; i++ {\n\t\t\tif a[i] != b[i] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\tifces, err := net.Interfaces()\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, v := range ifces {\n\t\tif hwaddr_equal(v.HardwareAddr[:6], mac[:6]) {\n\t\t\tifce.name = v.Name\n\t\t\treturn\n\t\t}\n\t}\n\terr = IfceNameNotFound\n\treturn\n}\n\nfunc (ifce *Interface) setIP(ip_mask *net.IPNet) (err error) {\n\tsargs := fmt.Sprintf(\"netsh interface ip set address name=REPLACE_ME source=static addr=REPLACE_ME mask=REPLACE_ME gateway=none\")\n\targs := strings.Split(sargs, \" \")\n\targs[5] = fmt.Sprintf(\"name=\\\"%s\\\"\", ifce.Name)\n\targs[7] = fmt.Sprintf(\"addr=%d.%d.%d.%d\", ip_mask.IP[0], ip_mask.IP[1], ip_mask.IP[2], ip_mask.IP[3])\n\targs[8] = fmt.Sprintf(\"mask=%d.%d.%d.%d\", ip_mask.Mask[0], ip_mask.Mask[1], ip_mask.Mask[2], ip_mask.Mask[3])\n\tcmd := exec.Command(\"netsh\", args...)\n\terr = cmd.Run()\n\treturn\n}\n<commit_msg>Fix set ip on windows.<commit_after>\/\/ +build windows\npackage tap\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"golang.org\/x\/sys\/windows\/registry\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nvar (\n\tIfceNameNotFound = errors.New(\"Failed to find the name of interface.\")\n\tTapDeviceNotFound = errors.New(\"Failed to find the tap device in registry.\")\n\t\/\/ Device Control Codes\n\ttap_win_ioctl_get_mac = tap_control_code(1, 0)\n\ttap_win_ioctl_get_version = tap_control_code(2, 0)\n\ttap_win_ioctl_get_mtu = tap_control_code(3, 0)\n\ttap_win_ioctl_get_info = tap_control_code(4, 0)\n\ttap_ioctl_config_point_to_point = tap_control_code(5, 0)\n\ttap_ioctl_set_media_status = tap_control_code(6, 0)\n\ttap_win_ioctl_config_dhcp_masq = tap_control_code(7, 0)\n\ttap_win_ioctl_get_log_line = tap_control_code(8, 0)\n\ttap_win_ioctl_config_dhcp_set_opt = tap_control_code(9, 0)\n\ttap_ioctl_config_tun = tap_control_code(10, 0)\n\t\/\/ w32 api\n\tfile_device_unknown = uint32(0x00000022)\n)\n\nfunc ctl_code(device_type, function, method, access uint32) uint32 {\n\treturn (device_type << 16) | (access << 14) | (function << 2) | method\n}\n\nfunc tap_control_code(request, method uint32) uint32 {\n\treturn ctl_code(file_device_unknown, request, method, 0)\n}\n\n\/\/ GetDeviceId finds out a TAP device from registry, it requires privileged right.\nfunc getdeviceid() (string, error) {\n\t\/\/ TAP driver key location\n\tregkey := `SYSTEM\\CurrentControlSet\\Control\\Class\\{4D36E972-E325-11CE-BFC1-08002BE10318}`\n\tk, err := registry.OpenKey(registry.LOCAL_MACHINE, regkey, registry.ALL_ACCESS)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer k.Close()\n\t\/\/ read all subkeys\n\tkeys, err := k.ReadSubKeyNames(-1)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t\/\/ find the one with ComponentId == \"tap0901\"\n\tfor _, v := range keys {\n\t\tkey, err := registry.OpenKey(registry.LOCAL_MACHINE, regkey+\"\\\\\"+v, registry.ALL_ACCESS)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tval, _, err := key.GetStringValue(\"ComponentId\")\n\t\tif err != nil {\n\t\t\tgoto next\n\t\t}\n\t\tif val == \"tap0901\" {\n\t\t\tval, _, err = key.GetStringValue(\"NetCfgInstanceId\")\n\t\t\tif err != nil {\n\t\t\t\tgoto next\n\t\t\t}\n\t\t\tkey.Close()\n\t\t\treturn val, nil\n\t\t}\n\tnext:\n\t\tkey.Close()\n\t}\n\treturn \"\", TapDeviceNotFound\n}\n\n\/\/ NewTAP find and open a TAP device.\nfunc newTAP() (ifce *Interface, err error) {\n\tdeviceid, err := getdeviceid()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpath := \"\\\\\\\\.\\\\Global\\\\\" + deviceid + \".tap\"\n\tpathp, err := syscall.UTF16PtrFromString(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ type Handle uintptr\n\tfile, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, uint32(syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE), nil, syscall.OPEN_EXISTING, syscall.FILE_ATTRIBUTE_SYSTEM, 0)\n\t\/\/ if err hanppens, close the interface.\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tsyscall.Close(file)\n\t\t}\n\t\tif err := recover(); err != nil {\n\t\t\tsyscall.Close(file)\n\t\t}\n\t}()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar bytesReturned uint32\n\t\/\/ find the mac address of tap device.\n\tmac := make([]byte, 6)\n\terr = syscall.DeviceIoControl(file, tap_win_ioctl_get_mac, &mac[0], uint32(len(mac)), &mac[0], uint32(len(mac)), &bytesReturned, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ bring up device.\n\trdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE)\n\tcode := []byte{0x01, 0x00, 0x00, 0x00}\n\terr = syscall.DeviceIoControl(file, tap_ioctl_set_media_status, &code[0], uint32(4), &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/TUN\n\t\/\/code2 := []byte{0x0a, 0x03, 0x00, 0x01, 0x0a, 0x03, 0x00, 0x00, 0xff, 0xff, 0xff, 0x00}\n\t\/\/err = syscall.DeviceIoControl(file, tap_ioctl_config_tun, &code2[0], uint32(12), &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil)\n\t\/\/if err != nil {\n\t\/\/\tlog.Fatalln(\"code2 err:\", err)\n\t\/\/}\n\tfd := os.NewFile(uintptr(file), path)\n\tifce = &Interface{tap: true, file: fd}\n\tcopy(ifce.mac[:6], mac[:6])\n\n\t\/\/ find the name of tap interface(to set the ip)\n\thwaddr_equal := func(a net.HardwareAddr, b []byte) bool {\n\t\tfor i := 0; i < 6; i++ {\n\t\t\tif a[i] != b[i] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\tifces, err := net.Interfaces()\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, v := range ifces {\n\t\tif hwaddr_equal(v.HardwareAddr[:6], mac[:6]) {\n\t\t\tifce.name = v.Name\n\t\t\treturn\n\t\t}\n\t}\n\terr = IfceNameNotFound\n\treturn\n}\n\nfunc (ifce *Interface) setIP(ip_mask *net.IPNet) (err error) {\n\tsargs := fmt.Sprintf(\"interface ip set address name=REPLACE_ME source=static addr=REPLACE_ME mask=REPLACE_ME gateway=none\")\n\targs := strings.Split(sargs, \" \")\n\targs[4] = fmt.Sprintf(\"name=\\\"%s\\\"\", ifce.Name())\n\targs[6] = fmt.Sprintf(\"addr=%s\", ip_mask.IP)\n\targs[7] = fmt.Sprintf(\"mask=%d.%d.%d.%d\", ip_mask.Mask[0], ip_mask.Mask[1], ip_mask.Mask[2], ip_mask.Mask[3])\n\tcmd := exec.Command(\"netsh\", args...)\n\terr = cmd.Run()\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/streadway\/amqp\"\n\t\"log\"\n\t\"mig\"\n\t\"mig\/modules\/filechecker\"\n\t\"os\/exec\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n)\n\nvar AMQPBROKER string = \"amqp:\/\/guest:guest@172.21.1.1:5672\/\"\nvar HEARTBEATFREQ string = \"600s\"\n\nfunc getCommands(messages <-chan amqp.Delivery, actions chan []byte, terminate chan bool) error {\n\t\/\/ range waits on the channel and returns all incoming messages\n\t\/\/ range will exit when the channel closes\n\tfor m := range messages {\n\t\tlog.Printf(\"getCommands: received '%s'\", m.Body)\n\t\t\/\/ Ack this message only\n\t\terr := m.Ack(true)\n\t\tif err != nil { panic(err) }\n\t\tactions <- m.Body\n\t\tlog.Printf(\"getCommands: queued in pos. %d\", len(actions))\n\t}\n\tterminate <- true\n\treturn nil\n}\n\nfunc parseCommands(commands <-chan []byte, fCommandChan chan mig.Command, terminate chan bool) error {\n\tvar cmd mig.Command\n\tfor a := range commands {\n\t\terr := json.Unmarshal(a, &cmd)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"parseCommand - json.Unmarshal:\", err)\n\t\t}\n\t\tlog.Printf(\"ParseCommand: Check '%s' Arguments '%s'\",\n\t\t\t cmd.Action.Check, cmd.Action.Arguments)\n\t\tswitch cmd.Action.Check{\n\t\tcase \"filechecker\":\n\t\t\tfCommandChan <- cmd\n\t\t\tlog.Println(\"parseCommands: queued into filechecker\",\n\t\t\t\t \"in pos.\", len(fCommandChan))\n\t\t}\n\t}\n\tterminate <- true\n\treturn nil\n}\n\nfunc runFilechecker(fCommandChan <-chan mig.Command, alertChan chan mig.Alert, resultChan chan mig.Command, terminate chan bool) error {\n\tfor migCmd := range fCommandChan {\n\t\tlog.Printf(\"RunFilechecker: running with args '%s'\", migCmd.Action.Arguments)\n\t\tvar cmdArg string\n\t\tfor _, arg := range migCmd.Action.Arguments {\n\t\t\tcmdArg += arg\n\t\t}\n\t\trunCmd := exec.Command(\".\/filechecker\", cmdArg)\n\t\tcmdout, err := runCmd.StdoutPipe()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tst := time.Now()\n\t\terr = runCmd.Start()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tresults := make(map[string] mig.FileCheckerResult)\n\t\terr = json.NewDecoder(cmdout).Decode(&results)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tcmdDone := make(chan error)\n\t\tgo func() {\n\t\t\tcmdDone <-runCmd.Wait()\n\t\t}()\n\t\tselect {\n\t\t\/\/ kill the process when timeout expires\n\t\tcase <-time.After(30 * time.Second):\n\t\t\tif err := runCmd.Process.Kill(); err != nil {\n\t\t\t\tlog.Fatal(\"failed to kill:\", err)\n\t\t\t}\n\t\t\tlog.Fatal(\"runFileChecker: command '%s' timed out\", migCmd)\n\t\t\/\/ exit normally\n\t\tcase err := <-cmdDone:\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\t\tfor _, r := range results {\n\t\t\tlog.Println(\"runFileChecker: command\", migCmd,\"tested\",\n\t\t\t\t r.TestedFiles, \"files in\", time.Now().Sub(st))\n\t\t\tif r.ResultCount > 0 {\n\t\t\t\tfor _, f := range r.Files {\n\t\t\t\t\talertChan <- mig.Alert{\n\t\t\t\t\t\tArguments: migCmd.Action.Arguments,\n\t\t\t\t\t\tItem: f,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tmigCmd.FCResults = append(migCmd.FCResults, r)\n\t\t}\n\t\tresultChan <- migCmd\n\t}\n\tterminate <- true\n\treturn nil\n}\n\nfunc raiseAlerts(alertChan chan mig.Alert, terminate chan bool) error {\n\tfor a := range alertChan {\n\t\tlog.Printf(\"raiseAlerts: IOC '%s' positive match on '%s'\",\n\t\t\t a.Arguments, a.Item)\n\t}\n\treturn nil\n}\n\nfunc sendResults(c *amqp.Channel, agtQueueLoc string, resultChan <-chan mig.Command, terminate chan bool) error {\n\trKey := fmt.Sprintf(\"mig.agents.%s\", agtQueueLoc)\n\tfor r := range resultChan {\n\t\tr.AgentQueueLoc = agtQueueLoc\n\t\tbody, err := json.Marshal(r)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"sendResults - json.Marshal: %v\", err)\n\t\t}\n\t\tmsgXchange(c, \"mig\", rKey, body)\n\t}\n\treturn nil\n}\n\nfunc keepAliveAgent(c *amqp.Channel, regMsg mig.KeepAlive) error {\n\tsleepTime, err := time.ParseDuration(HEARTBEATFREQ)\n\tif err != nil {\n\t\tlog.Fatal(\"sendHeartbeat - time.ParseDuration():\", err)\n\t}\n\tfor {\n\t\tbody, err := json.Marshal(regMsg)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"sendHeartbeat - json.Marshal:\", err)\n\t\t}\n\t\tmsgXchange(c, \"mig\", \"mig.keepalive\", body)\n\t\ttime.Sleep(sleepTime)\n\t}\n\treturn nil\n}\n\nfunc msgXchange(c *amqp.Channel, excName, routingKey string, body []byte) error {\n\tmsg := amqp.Publishing{\n\t DeliveryMode: amqp.Persistent,\n\t Timestamp: time.Now(),\n\t ContentType: \"text\/plain\",\n\t Body: []byte(body),\n\t}\n\terr := c.Publish(excName,\n\t\t\troutingKey,\n\t\t\ttrue,\t\/\/ is mandatory\n\t\t\tfalse,\t\/\/ is immediate\n\t\t\tmsg)\t\/\/ AMQP message\n\tif err != nil {\n\t\tlog.Fatalf(\"msgXchange - ChannelPublish: %v\", err)\n\t}\n\tlog.Printf(\"msgXchange: published '%s'\\n\", msg.Body)\n\treturn nil\n}\n\nfunc main() {\n\t\/\/ parse command line argument\n\t\/\/ -m selects the mode {agent, filechecker, ...}\n\tvar mode = flag.String(\"m\", \"agent\", \"module to run (eg. agent, filechecker)\")\n\tflag.Parse()\n\tswitch *mode {\n\tcase \"filechecker\":\n\t\tvar tmparg string\n\t\tfor _, arg := range flag.Args(){\n\t\t\ttmparg = tmparg + arg\n\t\t}\n\t\targs := []byte(tmparg)\n\t\tfmt.Printf(filechecker.Run(args))\n\t\tos.Exit(0)\n\tcase \"agent\":\n\t\tfmt.Printf(\"Launching MIG Agent\")\n\t}\n\n\t\/\/ termChan is used to exit the program\n\ttermChan\t:= make(chan bool)\n\tactionsChan\t:= make(chan []byte, 10)\n\tfCommandChan\t:= make(chan mig.Command, 10)\n\talertChan\t:= make(chan mig.Alert, 10)\n\tresultChan\t:= make(chan mig.Command, 10)\n\thostname, err\t:= os.Hostname()\n\tif err != nil {\n\t\tlog.Fatalf(\"os.Hostname(): %v\", err)\n\t}\n\tregMsg := mig.KeepAlive{\n\t\tName: hostname,\n\t\tOS: runtime.GOOS,\n\t\tQueueLoc: fmt.Sprintf(\"%s.%s\", runtime.GOOS, hostname),\n\t\tLastKeepAlive: time.Now(),\n\t}\n\tagentQueue := fmt.Sprintf(\"mig.agt.%s\", regMsg.QueueLoc)\n\tbindings := []mig.Binding{\n\t\tmig.Binding{agentQueue, agentQueue},\n\t\tmig.Binding{agentQueue, \"mig.all\"},\n\t}\n\n\tlog.Println(\"MIG agent starting on\", hostname)\n\n\t\/\/ Connects opens an AMQP connection from the credentials in the URL.\n\tconn, err := amqp.Dial(AMQPBROKER)\n\tif err != nil {\n\t\tlog.Fatalf(\"amqp.Dial(): %v\", err)\n\t}\n\tdefer conn.Close()\n\tc, err := conn.Channel()\n\tif err != nil {\n\t\tlog.Fatalf(\"conn.Channel(): %v\", err)\n\t}\n\tfor _, b := range bindings {\n\t\t_, err = c.QueueDeclare(b.Queue,\t\/\/ Queue name\n\t\t\t\t\ttrue,\t\t\/\/ is durable\n\t\t\t\t\tfalse,\t\t\/\/ is autoDelete\n\t\t\t\t\tfalse,\t\t\/\/ is exclusive\n\t\t\t\t\tfalse,\t\t\/\/ is noWait\n\t\t\t\t\tnil)\t\t\/\/ AMQP args\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"QueueDeclare: %v\", err)\n\t\t}\n\t\terr = c.QueueBind(b.Queue,\t\/\/ Queue name\n\t\t\t\tb.Key,\t\t\/\/ Routing key name\n\t\t\t\t\"mig\",\t\t\/\/ Exchange name\n\t\t\t\tfalse,\t\t\/\/ is noWait\n\t\t\t\tnil)\t\t\/\/ AMQP args\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"QueueBind: %v\", err)\n\t\t}\n\t}\n\n\t\/\/ Limit the number of message the channel will receive\n\terr = c.Qos(2,\t\t\/\/ prefetch count (in # of msg)\n\t\t 0,\t\t\/\/ prefetch size (in bytes)\n\t\t false)\t\/\/ is global\n\tif err != nil {\n\t\tlog.Fatalf(\"ChannelQoS: %v\", err)\n\t}\n\tfor _, b := range bindings {\n\t\tmsgChan, err := c.Consume(b.Queue, \/\/ queue name\n\t\t\t\t\t\"\",\t\/\/ some tag\n\t\t\t\t\tfalse,\t\/\/ is autoAck\n\t\t\t\t\tfalse,\t\/\/ is exclusive\n\t\t\t\t\tfalse,\t\/\/ is noLocal\n\t\t\t\t\tfalse,\t\/\/ is noWait\n\t\t\t\t\tnil)\t\/\/ AMQP args\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"ChannelConsume: %v\", err)\n\t\t}\n\t\tgo getCommands(msgChan, actionsChan, termChan)\n\t}\n\tgo parseCommands(actionsChan, fCommandChan, termChan)\n\tgo runFilechecker(fCommandChan, alertChan, resultChan, termChan)\n\tgo raiseAlerts(alertChan, termChan)\n\tgo sendResults(c, regMsg.QueueLoc, resultChan, termChan)\n\n\t\/\/ All set, ready to keepAlive\n\tgo keepAliveAgent(c, regMsg)\n\n\t\/\/ block until terminate chan is called\n\t<-termChan\n}\n<commit_msg>Agent formatting<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/streadway\/amqp\"\n\t\"log\"\n\t\"mig\"\n\t\"mig\/modules\/filechecker\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"time\"\n)\n\nvar AMQPBROKER string = \"amqp:\/\/guest:guest@172.21.1.1:5672\/\"\nvar HEARTBEATFREQ string = \"600s\"\n\nfunc main() {\n\t\/\/ parse command line argument\n\t\/\/ -m selects the mode {agent, filechecker, ...}\n\tvar mode = flag.String(\"m\", \"agent\", \"module to run (eg. agent, filechecker)\")\n\tflag.Parse()\n\tswitch *mode {\n\tcase \"filechecker\":\n\t\tvar tmparg string\n\t\tfor _, arg := range flag.Args() {\n\t\t\ttmparg = tmparg + arg\n\t\t}\n\t\targs := []byte(tmparg)\n\t\tfmt.Printf(filechecker.Run(args))\n\t\tos.Exit(0)\n\tcase \"agent\":\n\t\tfmt.Printf(\"Launching MIG Agent\")\n\t}\n\n\t\/\/ termChan is used to exit the program\n\ttermChan := make(chan bool)\n\tactionsChan := make(chan []byte, 10)\n\tfCommandChan := make(chan mig.Command, 10)\n\talertChan := make(chan mig.Alert, 10)\n\tresultChan := make(chan mig.Command, 10)\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\tlog.Fatalf(\"os.Hostname(): %v\", err)\n\t}\n\t\/\/ declare a keepalive message to initiate registration\n\tregMsg := mig.KeepAlive{\n\t\tName: hostname,\n\t\tOS: runtime.GOOS,\n\t\tQueueLoc: fmt.Sprintf(\"%s.%s\", runtime.GOOS, hostname),\n\t\tLastKeepAlive: time.Now(),\n\t}\n\t\/\/ define two bindings to receive msg from\n\t\/\/ mig.agt.<OS>.<hostname> is for agent specific messages\n\t\/\/ mig.all is for broadcasts\n\tagentQueue := fmt.Sprintf(\"mig.agt.%s\", regMsg.QueueLoc)\n\tbindings := []mig.Binding{\n\t\tmig.Binding{agentQueue, agentQueue},\n\t\tmig.Binding{agentQueue, \"mig.all\"},\n\t}\n\n\tlog.Println(\"MIG agent starting on\", hostname)\n\n\t\/\/ Connects opens an AMQP connection from the credentials in the URL.\n\tconn, err := amqp.Dial(AMQPBROKER)\n\tif err != nil {\n\t\tlog.Fatalf(\"amqp.Dial(): %v\", err)\n\t}\n\tdefer conn.Close()\n\tc, err := conn.Channel()\n\tif err != nil {\n\t\tlog.Fatalf(\"conn.Channel(): %v\", err)\n\t}\n\tfor _, b := range bindings {\n\t\t_, err = c.QueueDeclare(b.Queue, \/\/ Queue name\n\t\t\ttrue, \/\/ is durable\n\t\t\tfalse, \/\/ is autoDelete\n\t\t\tfalse, \/\/ is exclusive\n\t\t\tfalse, \/\/ is noWait\n\t\t\tnil) \/\/ AMQP args\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"QueueDeclare: %v\", err)\n\t\t}\n\t\terr = c.QueueBind(b.Queue, \/\/ Queue name\n\t\t\tb.Key, \/\/ Routing key name\n\t\t\t\"mig\", \/\/ Exchange name\n\t\t\tfalse, \/\/ is noWait\n\t\t\tnil) \/\/ AMQP args\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"QueueBind: %v\", err)\n\t\t}\n\t}\n\n\t\/\/ Limit the number of message the channel will receive\n\terr = c.Qos(2, \/\/ prefetch count (in # of msg)\n\t\t0, \/\/ prefetch size (in bytes)\n\t\tfalse) \/\/ is global\n\tif err != nil {\n\t\tlog.Fatalf(\"ChannelQoS: %v\", err)\n\t}\n\tfor _, b := range bindings {\n\t\tmsgChan, err := c.Consume(b.Queue, \/\/ queue name\n\t\t\t\"\", \/\/ some tag\n\t\t\tfalse, \/\/ is autoAck\n\t\t\tfalse, \/\/ is exclusive\n\t\t\tfalse, \/\/ is noLocal\n\t\t\tfalse, \/\/ is noWait\n\t\t\tnil) \/\/ AMQP args\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"ChannelConsume: %v\", err)\n\t\t}\n\t\tgo getCommands(msgChan, actionsChan, termChan)\n\t}\n\tgo parseCommands(actionsChan, fCommandChan, termChan)\n\tgo runFilechecker(fCommandChan, alertChan, resultChan, termChan)\n\tgo raiseAlerts(alertChan, termChan)\n\tgo sendResults(c, regMsg.QueueLoc, resultChan, termChan)\n\n\t\/\/ All set, ready to keepAlive\n\tgo keepAliveAgent(c, regMsg)\n\n\t\/\/ block until terminate chan is called\n\t<-termChan\n}\n\nfunc getCommands(messages <-chan amqp.Delivery, actions chan []byte, terminate chan bool) error {\n\t\/\/ range waits on the channel and returns all incoming messages\n\t\/\/ range will exit when the channel closes\n\tfor m := range messages {\n\t\tlog.Printf(\"getCommands: received '%s'\", m.Body)\n\t\t\/\/ Ack this message only\n\t\terr := m.Ack(true)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tactions <- m.Body\n\t\tlog.Printf(\"getCommands: queued in pos. %d\", len(actions))\n\t}\n\tterminate <- true\n\treturn nil\n}\n\nfunc parseCommands(commands <-chan []byte, fCommandChan chan mig.Command, terminate chan bool) error {\n\tvar cmd mig.Command\n\tfor a := range commands {\n\t\terr := json.Unmarshal(a, &cmd)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"parseCommand - json.Unmarshal:\", err)\n\t\t}\n\t\tlog.Printf(\"ParseCommand: Check '%s' Arguments '%s'\",\n\t\t\tcmd.Action.Check, cmd.Action.Arguments)\n\t\tswitch cmd.Action.Check {\n\t\tcase \"filechecker\":\n\t\t\tfCommandChan <- cmd\n\t\t\tlog.Println(\"parseCommands: queued into filechecker\",\n\t\t\t\t\"in pos.\", len(fCommandChan))\n\t\t}\n\t}\n\tterminate <- true\n\treturn nil\n}\n\nfunc runFilechecker(fCommandChan <-chan mig.Command, alertChan chan mig.Alert, resultChan chan mig.Command, terminate chan bool) error {\n\tfor migCmd := range fCommandChan {\n\t\tlog.Printf(\"RunFilechecker: running with args '%s'\", migCmd.Action.Arguments)\n\t\tvar cmdArg string\n\t\tfor _, arg := range migCmd.Action.Arguments {\n\t\t\tcmdArg += arg\n\t\t}\n\t\trunCmd := exec.Command(\".\/filechecker\", cmdArg)\n\t\tcmdout, err := runCmd.StdoutPipe()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tst := time.Now()\n\t\terr = runCmd.Start()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tresults := make(map[string]mig.FileCheckerResult)\n\t\terr = json.NewDecoder(cmdout).Decode(&results)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tcmdDone := make(chan error)\n\t\tgo func() {\n\t\t\tcmdDone <- runCmd.Wait()\n\t\t}()\n\t\tselect {\n\t\t\/\/ kill the process when timeout expires\n\t\tcase <-time.After(30 * time.Second):\n\t\t\tif err := runCmd.Process.Kill(); err != nil {\n\t\t\t\tlog.Fatal(\"failed to kill:\", err)\n\t\t\t}\n\t\t\tlog.Fatal(\"runFileChecker: command '%s' timed out\", migCmd)\n\t\t\/\/ exit normally\n\t\tcase err := <-cmdDone:\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\t\tfor _, r := range results {\n\t\t\tlog.Println(\"runFileChecker: command\", migCmd, \"tested\",\n\t\t\t\tr.TestedFiles, \"files in\", time.Now().Sub(st))\n\t\t\tif r.ResultCount > 0 {\n\t\t\t\tfor _, f := range r.Files {\n\t\t\t\t\talertChan <- mig.Alert{\n\t\t\t\t\t\tArguments: migCmd.Action.Arguments,\n\t\t\t\t\t\tItem: f,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tmigCmd.FCResults = append(migCmd.FCResults, r)\n\t\t}\n\t\tresultChan <- migCmd\n\t}\n\tterminate <- true\n\treturn nil\n}\n\nfunc raiseAlerts(alertChan chan mig.Alert, terminate chan bool) error {\n\tfor a := range alertChan {\n\t\tlog.Printf(\"raiseAlerts: IOC '%s' positive match on '%s'\",\n\t\t\ta.Arguments, a.Item)\n\t}\n\treturn nil\n}\n\nfunc sendResults(c *amqp.Channel, agtQueueLoc string, resultChan <-chan mig.Command, terminate chan bool) error {\n\trKey := fmt.Sprintf(\"mig.sched.%s\", agtQueueLoc)\n\tfor r := range resultChan {\n\t\tr.AgentQueueLoc = agtQueueLoc\n\t\tbody, err := json.Marshal(r)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"sendResults - json.Marshal: %v\", err)\n\t\t}\n\t\tmsgXchange(c, \"mig\", rKey, body)\n\t}\n\treturn nil\n}\n\nfunc keepAliveAgent(c *amqp.Channel, regMsg mig.KeepAlive) error {\n\tsleepTime, err := time.ParseDuration(HEARTBEATFREQ)\n\tif err != nil {\n\t\tlog.Fatal(\"sendHeartbeat - time.ParseDuration():\", err)\n\t}\n\tfor {\n\t\tbody, err := json.Marshal(regMsg)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"sendHeartbeat - json.Marshal:\", err)\n\t\t}\n\t\tmsgXchange(c, \"mig\", \"mig.keepalive\", body)\n\t\ttime.Sleep(sleepTime)\n\t}\n\treturn nil\n}\n\nfunc msgXchange(c *amqp.Channel, excName, routingKey string, body []byte) error {\n\tmsg := amqp.Publishing{\n\t\tDeliveryMode: amqp.Persistent,\n\t\tTimestamp: time.Now(),\n\t\tContentType: \"text\/plain\",\n\t\tBody: []byte(body),\n\t}\n\terr := c.Publish(excName,\n\t\troutingKey,\n\t\ttrue, \/\/ is mandatory\n\t\tfalse, \/\/ is immediate\n\t\tmsg) \/\/ AMQP message\n\tif err != nil {\n\t\tlog.Fatalf(\"msgXchange - ChannelPublish: %v\", err)\n\t}\n\tlog.Printf(\"msgXchange: published '%s'\\n\", msg.Body)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cloudwatch\n\nimport (\n\t\"encoding\/json\"\n\t\"sort\"\n\n\t\"github.com\/grafana\/grafana\/pkg\/middleware\"\n\t\"github.com\/grafana\/grafana\/pkg\/util\"\n)\n\nvar metricsMap map[string][]string\nvar dimensionsMap map[string][]string\n\nfunc init() {\n\tmetricsMap = map[string][]string{\n\t\t\"AWS\/AutoScaling\": {\"GroupMinSize\", \"GroupMaxSize\", \"GroupDesiredCapacity\", \"GroupInServiceInstances\", \"GroupPendingInstances\", \"GroupStandbyInstances\", \"GroupTerminatingInstances\", \"GroupTotalInstances\"},\n\t\t\"AWS\/Billing\": {\"EstimatedCharges\"},\n\t\t\"AWS\/CloudFront\": {\"Requests\", \"BytesDownloaded\", \"BytesUploaded\", \"TotalErrorRate\", \"4xxErrorRate\", \"5xxErrorRate\"},\n\t\t\"AWS\/CloudSearch\": {\"SuccessfulRequests\", \"SearchableDocuments\", \"IndexUtilization\", \"Partitions\"},\n\t\t\"AWS\/DynamoDB\": {\"ConditionalCheckFailedRequests\", \"ConsumedReadCapacityUnits\", \"ConsumedWriteCapacityUnits\", \"OnlineIndexConsumedWriteCapacity\", \"OnlineIndexPercentageProgress\", \"OnlineIndexThrottleEvents\", \"ProvisionedReadCapacityUnits\", \"ProvisionedWriteCapacityUnits\", \"ReadThrottleEvents\", \"ReturnedItemCount\", \"SuccessfulRequestLatency\", \"SystemErrors\", \"ThrottledRequests\", \"UserErrors\", \"WriteThrottleEvents\"},\n\t\t\"AWS\/ECS\": {\"CPUUtilization\", \"MemoryUtilization\"},\n\t\t\"AWS\/ElastiCache\": {\n\t\t\t\"CPUUtilization\", \"FreeableMemory\", \"NetworkBytesIn\", \"NetworkBytesOut\", \"SwapUsage\",\n\t\t\t\"BytesUsedForCacheItems\", \"BytesReadIntoMemcached\", \"BytesWrittenOutFromMemcached\", \"CasBadval\", \"CasHits\", \"CasMisses\", \"CmdFlush\", \"CmdGet\", \"CmdSet\", \"CurrConnections\", \"CurrItems\", \"DecrHits\", \"DecrMisses\", \"DeleteHits\", \"DeleteMisses\", \"Evictions\", \"GetHits\", \"GetMisses\", \"IncrHits\", \"IncrMisses\", \"Reclaimed\",\n\t\t\t\"BytesUsedForHash\", \"CmdConfigGet\", \"CmdConfigSet\", \"CmdTouch\", \"CurrConfig\", \"EvictedUnfetched\", \"ExpiredUnfetched\", \"SlabsMoved\", \"TouchHits\", \"TouchMisses\",\n\t\t\t\"NewConnections\", \"NewItems\", \"UnusedMemory\",\n\t\t\t\"BytesUsedForCache\", \"CacheHits\", \"CacheMisses\", \"CurrConnections\", \"Evictions\", \"HyperLogLogBasedCmds\", \"NewConnections\", \"Reclaimed\", \"ReplicationBytes\", \"ReplicationLag\", \"SaveInProgress\",\n\t\t\t\"CurrItems\", \"GetTypeCmds\", \"HashBasedCmds\", \"KeyBasedCmds\", \"ListBasedCmds\", \"SetBasedCmds\", \"SetTypeCmds\", \"SortedSetBasedCmds\", \"StringBasedCmds\",\n\t\t},\n\t\t\"AWS\/EBS\": {\"VolumeReadBytes\", \"VolumeWriteBytes\", \"VolumeReadOps\", \"VolumeWriteOps\", \"VolumeTotalReadTime\", \"VolumeTotalWriteTime\", \"VolumeIdleTime\", \"VolumeQueueLength\", \"VolumeThroughputPercentage\", \"VolumeConsumedReadWriteOps\"},\n\t\t\"AWS\/EC2\": {\"CPUCreditUsage\", \"CPUCreditBalance\", \"CPUUtilization\", \"DiskReadOps\", \"DiskWriteOps\", \"DiskReadBytes\", \"DiskWriteBytes\", \"NetworkIn\", \"NetworkOut\", \"StatusCheckFailed\", \"StatusCheckFailed_Instance\", \"StatusCheckFailed_System\"},\n\t\t\"AWS\/ELB\": {\"HealthyHostCount\", \"UnHealthyHostCount\", \"RequestCount\", \"Latency\", \"HTTPCode_ELB_4XX\", \"HTTPCode_ELB_5XX\", \"HTTPCode_Backend_2XX\", \"HTTPCode_Backend_3XX\", \"HTTPCode_Backend_4XX\", \"HTTPCode_Backend_5XX\", \"BackendConnectionErrors\", \"SurgeQueueLength\", \"SpilloverCount\"},\n\t\t\"AWS\/ElasticMapReduce\": {\"IsIdle\", \"JobsRunning\", \"JobsFailed\",\n\t\t\t\"MapTasksRunning\", \"MapTasksRemaining\", \"MapSlotsOpen\", \"RemainingMapTasksPerSlot\", \"ReduceTasksRunning\", \"ReduceTasksRemaining\", \"ReduceSlotsOpen\",\n\t\t\t\"CoreNodesRunning\", \"CoreNodesPending\", \"LiveDataNodes\", \"TaskNodesRunning\", \"TaskNodesPending\", \"LiveTaskTrackers\",\n\t\t\t\"S3BytesWritten\", \"S3BytesRead\", \"HDFSUtilization\", \"HDFSBytesRead\", \"HDFSBytesWritten\", \"MissingBlocks\", \"TotalLoad\",\n\t\t\t\"BackupFailed\", \"MostRecentBackupDuration\", \"TimeSinceLastSuccessfulBackup\",\n\t\t\t\"IsIdle\", \"ContainerAllocated\", \"ContainerReserved\", \"ContainerPending\", \"AppsCompleted\", \"AppsFailed\", \"AppsKilled\", \"AppsPending\", \"AppsRunning\", \"AppsSubmitted\",\n\t\t\t\"CoreNodesRunning\", \"CoreNodesPending\", \"LiveDataNodes\", \"MRTotalNodes\", \"MRActiveNodes\", \"MRLostNodes\", \"MRUnhealthyNodes\", \"MRDecommissionedNodes\", \"MRRebootedNodes\",\n\t\t\t\"S3BytesWritten\", \"S3BytesRead\", \"HDFSUtilization\", \"HDFSBytesRead\", \"HDFSBytesWritten\", \"MissingBlocks\", \"CorruptBlocks\", \"TotalLoad\", \"MemoryTotalMB\", \"MemoryReservedMB\", \"MemoryAvailableMB\", \"MemoryAllocatedMB\", \"PendingDeletionBlocks\", \"UnderReplicatedBlocks\", \"DfsPendingReplicationBlocks\", \"CapacityRemainingGB\",\n\t\t\t\"HbaseBackupFailed\", \"MostRecentBackupDuration\", \"TimeSinceLastSuccessfulBackup\"},\n\t\t\"AWS\/ES\": {\"ClusterStatus.green\", \"ClusterStatus.yellow\", \"ClusterStatus.red\", \"Nodes\", \"SearchableDocuments\", \"DeletedDocuments\", \"CPUUtilization\", \"FreeStorageSpace\", \"JVMMemoryPressure\", \"AutomatedSnapshotFailure\", \"MasterCPUUtilization\", \"MasterFreeStorageSpace\", \"MasterJVMMemoryPressure\", \"ReadLatency\", \"WriteLatency\", \"ReadThroughput\", \"WriteThroughput\", \"DiskQueueLength\", \"ReadIOPS\", \"WriteIOPS\"},\n\t\t\"AWS\/Kinesis\": {\"PutRecord.Bytes\", \"PutRecord.Latency\", \"PutRecord.Success\", \"PutRecords.Bytes\", \"PutRecords.Latency\", \"PutRecords.Records\", \"PutRecords.Success\", \"IncomingBytes\", \"IncomingRecords\", \"GetRecords.Bytes\", \"GetRecords.IteratorAgeMilliseconds\", \"GetRecords.Latency\", \"GetRecords.Success\"},\n\t\t\"AWS\/Lambda\": {\"Invocations\", \"Errors\", \"Duration\", \"Throttles\"},\n\t\t\"AWS\/ML\": {\"PredictCount\", \"PredictFailureCount\"},\n\t\t\"AWS\/OpsWorks\": {\"cpu_idle\", \"cpu_nice\", \"cpu_system\", \"cpu_user\", \"cpu_waitio\", \"load_1\", \"load_5\", \"load_15\", \"memory_buffers\", \"memory_cached\", \"memory_free\", \"memory_swap\", \"memory_total\", \"memory_used\", \"procs\"},\n\t\t\"AWS\/Redshift\": {\"CPUUtilization\", \"DatabaseConnections\", \"HealthStatus\", \"MaintenanceMode\", \"NetworkReceiveThroughput\", \"NetworkTransmitThroughput\", \"PercentageDiskSpaceUsed\", \"ReadIOPS\", \"ReadLatency\", \"ReadThroughput\", \"WriteIOPS\", \"WriteLatency\", \"WriteThroughput\"},\n\t\t\"AWS\/RDS\": {\"BinLogDiskUsage\", \"CPUUtilization\", \"CPUCreditUsage\", \"CPUCreditBalance\", \"DatabaseConnections\", \"DiskQueueDepth\", \"FreeableMemory\", \"FreeStorageSpace\", \"ReplicaLag\", \"SwapUsage\", \"ReadIOPS\", \"WriteIOPS\", \"ReadLatency\", \"WriteLatency\", \"ReadThroughput\", \"WriteThroughput\", \"NetworkReceiveThroughput\", \"NetworkTransmitThroughput\"},\n\t\t\"AWS\/Route53\": {\"HealthCheckStatus\", \"HealthCheckPercentageHealthy\"},\n\t\t\"AWS\/SNS\": {\"NumberOfMessagesPublished\", \"PublishSize\", \"NumberOfNotificationsDelivered\", \"NumberOfNotificationsFailed\"},\n\t\t\"AWS\/SQS\": {\"NumberOfMessagesSent\", \"SentMessageSize\", \"NumberOfMessagesReceived\", \"NumberOfEmptyReceives\", \"NumberOfMessagesDeleted\", \"ApproximateNumberOfMessagesDelayed\", \"ApproximateNumberOfMessagesVisible\", \"ApproximateNumberOfMessagesNotVisible\"},\n\t\t\"AWS\/S3\": {\"BucketSizeBytes\", \"NumberOfObjects\"},\n\t\t\"AWS\/SWF\": {\"DecisionTaskScheduleToStartTime\", \"DecisionTaskStartToCloseTime\", \"DecisionTasksCompleted\", \"StartedDecisionTasksTimedOutOnClose\", \"WorkflowStartToCloseTime\", \"WorkflowsCanceled\", \"WorkflowsCompleted\", \"WorkflowsContinuedAsNew\", \"WorkflowsFailed\", \"WorkflowsTerminated\", \"WorkflowsTimedOut\",\n\t\t\t\"ActivityTaskScheduleToCloseTime\", \"ActivityTaskScheduleToStartTime\", \"ActivityTaskStartToCloseTime\", \"ActivityTasksCanceled\", \"ActivityTasksCompleted\", \"ActivityTasksFailed\", \"ScheduledActivityTasksTimedOutOnClose\", \"ScheduledActivityTasksTimedOutOnStart\", \"StartedActivityTasksTimedOutOnClose\", \"StartedActivityTasksTimedOutOnHeartbeat\"},\n\t\t\"AWS\/StorageGateway\": {\"CacheHitPercent\", \"CachePercentUsed\", \"CachePercentDirty\", \"CloudBytesDownloaded\", \"CloudDownloadLatency\", \"CloudBytesUploaded\", \"UploadBufferFree\", \"UploadBufferPercentUsed\", \"UploadBufferUsed\", \"QueuedWrites\", \"ReadBytes\", \"ReadTime\", \"TotalCacheSize\", \"WriteBytes\", \"WriteTime\", \"TimeSinceLastRecoveryPoint\", \"WorkingStorageFree\", \"WorkingStoragePercentUsed\", \"WorkingStorageUsed\",\n\t\t\t\"CacheHitPercent\", \"CachePercentUsed\", \"CachePercentDirty\", \"ReadBytes\", \"ReadTime\", \"WriteBytes\", \"WriteTime\", \"QueuedWrites\"},\n\t\t\"AWS\/WAF\": {\"AllowedRequests\", \"BlockedRequests\", \"CountedRequests\"},\n\t\t\"AWS\/WorkSpaces\": {\"Available\", \"Unhealthy\", \"ConnectionAttempt\", \"ConnectionSuccess\", \"ConnectionFailure\", \"SessionLaunchTime\", \"InSessionLatency\", \"SessionDisconnect\"},\n\t}\n\tdimensionsMap = map[string][]string{\n\t\t\"AWS\/AutoScaling\": {\"AutoScalingGroupName\"},\n\t\t\"AWS\/Billing\": {\"ServiceName\", \"LinkedAccount\", \"Currency\"},\n\t\t\"AWS\/CloudFront\": {\"DistributionId\", \"Region\"},\n\t\t\"AWS\/CloudSearch\": {},\n\t\t\"AWS\/DynamoDB\": {\"TableName\", \"GlobalSecondaryIndexName\", \"Operation\"},\n\t\t\"AWS\/ECS\": {\"ClusterName\", \"ServiceName\"},\n\t\t\"AWS\/ElastiCache\": {\"CacheClusterId\", \"CacheNodeId\"},\n\t\t\"AWS\/EBS\": {\"VolumeId\"},\n\t\t\"AWS\/EC2\": {\"AutoScalingGroupName\", \"ImageId\", \"InstanceId\", \"InstanceType\"},\n\t\t\"AWS\/ELB\": {\"LoadBalancerName\", \"AvailabilityZone\"},\n\t\t\"AWS\/ElasticMapReduce\": {\"ClusterId\", \"JobFlowId\", \"JobId\"},\n\t\t\"AWS\/ES\": {},\n\t\t\"AWS\/Kinesis\": {\"StreamName\"},\n\t\t\"AWS\/Lambda\": {\"FunctionName\"},\n\t\t\"AWS\/ML\": {\"MLModelId\", \"RequestMode\"},\n\t\t\"AWS\/OpsWorks\": {\"StackId\", \"LayerId\", \"InstanceId\"},\n\t\t\"AWS\/Redshift\": {\"NodeID\", \"ClusterIdentifier\"},\n\t\t\"AWS\/RDS\": {\"DBInstanceIdentifier\", \"DatabaseClass\", \"EngineName\"},\n\t\t\"AWS\/Route53\": {\"HealthCheckId\"},\n\t\t\"AWS\/SNS\": {\"Application\", \"Platform\", \"TopicName\"},\n\t\t\"AWS\/SQS\": {\"QueueName\"},\n\t\t\"AWS\/S3\": {\"BucketName\", \"StorageType\"},\n\t\t\"AWS\/SWF\": {\"Domain\", \"WorkflowTypeName\", \"WorkflowTypeVersion\", \"ActivityTypeName\", \"ActivityTypeVersion\"},\n\t\t\"AWS\/StorageGateway\": {\"GatewayId\", \"GatewayName\", \"VolumeId\"},\n\t\t\"AWS\/WAF\": {\"Rule\", \"WebACL\"},\n\t\t\"AWS\/WorkSpaces\": {\"DirectoryId\", \"WorkspaceId\"},\n\t}\n}\n\n\/\/ Whenever this list is updated, frontend list should also be updated.\n\/\/ Please update the region list in public\/app\/plugins\/datasource\/cloudwatch\/partials\/config.html\nfunc handleGetRegions(req *cwRequest, c *middleware.Context) {\n\tregions := []string{\n\t\t\"ap-northeast-1\", \"ap-southeast-1\", \"ap-southeast-2\", \"cn-north-1\",\n\t\t\"eu-central-1\", \"eu-west-1\", \"sa-east-1\", \"us-east-1\", \"us-west-1\", \"us-west-2\",\n\t}\n\n\tresult := []interface{}{}\n\tfor _, region := range regions {\n\t\tresult = append(result, util.DynMap{\"text\": region, \"value\": region})\n\t}\n\n\tc.JSON(200, result)\n}\n\nfunc handleGetNamespaces(req *cwRequest, c *middleware.Context) {\n\tkeys := []string{}\n\tfor key := range metricsMap {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Sort(sort.StringSlice(keys))\n\n\tresult := []interface{}{}\n\tfor _, key := range keys {\n\t\tresult = append(result, util.DynMap{\"text\": key, \"value\": key})\n\t}\n\n\tc.JSON(200, result)\n}\n\nfunc handleGetMetrics(req *cwRequest, c *middleware.Context) {\n\treqParam := &struct {\n\t\tParameters struct {\n\t\t\tNamespace string `json:\"namespace\"`\n\t\t} `json:\"parameters\"`\n\t}{}\n\n\tjson.Unmarshal(req.Body, reqParam)\n\n\tnamespaceMetrics, exists := metricsMap[reqParam.Parameters.Namespace]\n\tif !exists {\n\t\tc.JsonApiErr(404, \"Unable to find namespace \"+reqParam.Parameters.Namespace, nil)\n\t\treturn\n\t}\n\n\tresult := []interface{}{}\n\tfor _, name := range namespaceMetrics {\n\t\tresult = append(result, util.DynMap{\"text\": name, \"value\": name})\n\t}\n\n\tc.JSON(200, result)\n}\n\nfunc handleGetDimensions(req *cwRequest, c *middleware.Context) {\n\treqParam := &struct {\n\t\tParameters struct {\n\t\t\tNamespace string `json:\"namespace\"`\n\t\t} `json:\"parameters\"`\n\t}{}\n\n\tjson.Unmarshal(req.Body, reqParam)\n\n\tdimensionValues, exists := dimensionsMap[reqParam.Parameters.Namespace]\n\tif !exists {\n\t\tc.JsonApiErr(404, \"Unable to find dimension \"+reqParam.Parameters.Namespace, nil)\n\t\treturn\n\t}\n\n\tresult := []interface{}{}\n\tfor _, name := range dimensionValues {\n\t\tresult = append(result, util.DynMap{\"text\": name, \"value\": name})\n\t}\n\n\tc.JSON(200, result)\n}\n<commit_msg>sort metrics and dimensions<commit_after>package cloudwatch\n\nimport (\n\t\"encoding\/json\"\n\t\"sort\"\n\n\t\"github.com\/grafana\/grafana\/pkg\/middleware\"\n\t\"github.com\/grafana\/grafana\/pkg\/util\"\n)\n\nvar metricsMap map[string][]string\nvar dimensionsMap map[string][]string\n\nfunc init() {\n\tmetricsMap = map[string][]string{\n\t\t\"AWS\/AutoScaling\": {\"GroupMinSize\", \"GroupMaxSize\", \"GroupDesiredCapacity\", \"GroupInServiceInstances\", \"GroupPendingInstances\", \"GroupStandbyInstances\", \"GroupTerminatingInstances\", \"GroupTotalInstances\"},\n\t\t\"AWS\/Billing\": {\"EstimatedCharges\"},\n\t\t\"AWS\/CloudFront\": {\"Requests\", \"BytesDownloaded\", \"BytesUploaded\", \"TotalErrorRate\", \"4xxErrorRate\", \"5xxErrorRate\"},\n\t\t\"AWS\/CloudSearch\": {\"SuccessfulRequests\", \"SearchableDocuments\", \"IndexUtilization\", \"Partitions\"},\n\t\t\"AWS\/DynamoDB\": {\"ConditionalCheckFailedRequests\", \"ConsumedReadCapacityUnits\", \"ConsumedWriteCapacityUnits\", \"OnlineIndexConsumedWriteCapacity\", \"OnlineIndexPercentageProgress\", \"OnlineIndexThrottleEvents\", \"ProvisionedReadCapacityUnits\", \"ProvisionedWriteCapacityUnits\", \"ReadThrottleEvents\", \"ReturnedItemCount\", \"SuccessfulRequestLatency\", \"SystemErrors\", \"ThrottledRequests\", \"UserErrors\", \"WriteThrottleEvents\"},\n\t\t\"AWS\/ECS\": {\"CPUUtilization\", \"MemoryUtilization\"},\n\t\t\"AWS\/ElastiCache\": {\n\t\t\t\"CPUUtilization\", \"FreeableMemory\", \"NetworkBytesIn\", \"NetworkBytesOut\", \"SwapUsage\",\n\t\t\t\"BytesUsedForCacheItems\", \"BytesReadIntoMemcached\", \"BytesWrittenOutFromMemcached\", \"CasBadval\", \"CasHits\", \"CasMisses\", \"CmdFlush\", \"CmdGet\", \"CmdSet\", \"CurrConnections\", \"CurrItems\", \"DecrHits\", \"DecrMisses\", \"DeleteHits\", \"DeleteMisses\", \"Evictions\", \"GetHits\", \"GetMisses\", \"IncrHits\", \"IncrMisses\", \"Reclaimed\",\n\t\t\t\"BytesUsedForHash\", \"CmdConfigGet\", \"CmdConfigSet\", \"CmdTouch\", \"CurrConfig\", \"EvictedUnfetched\", \"ExpiredUnfetched\", \"SlabsMoved\", \"TouchHits\", \"TouchMisses\",\n\t\t\t\"NewConnections\", \"NewItems\", \"UnusedMemory\",\n\t\t\t\"BytesUsedForCache\", \"CacheHits\", \"CacheMisses\", \"CurrConnections\", \"Evictions\", \"HyperLogLogBasedCmds\", \"NewConnections\", \"Reclaimed\", \"ReplicationBytes\", \"ReplicationLag\", \"SaveInProgress\",\n\t\t\t\"CurrItems\", \"GetTypeCmds\", \"HashBasedCmds\", \"KeyBasedCmds\", \"ListBasedCmds\", \"SetBasedCmds\", \"SetTypeCmds\", \"SortedSetBasedCmds\", \"StringBasedCmds\",\n\t\t},\n\t\t\"AWS\/EBS\": {\"VolumeReadBytes\", \"VolumeWriteBytes\", \"VolumeReadOps\", \"VolumeWriteOps\", \"VolumeTotalReadTime\", \"VolumeTotalWriteTime\", \"VolumeIdleTime\", \"VolumeQueueLength\", \"VolumeThroughputPercentage\", \"VolumeConsumedReadWriteOps\"},\n\t\t\"AWS\/EC2\": {\"CPUCreditUsage\", \"CPUCreditBalance\", \"CPUUtilization\", \"DiskReadOps\", \"DiskWriteOps\", \"DiskReadBytes\", \"DiskWriteBytes\", \"NetworkIn\", \"NetworkOut\", \"StatusCheckFailed\", \"StatusCheckFailed_Instance\", \"StatusCheckFailed_System\"},\n\t\t\"AWS\/ELB\": {\"HealthyHostCount\", \"UnHealthyHostCount\", \"RequestCount\", \"Latency\", \"HTTPCode_ELB_4XX\", \"HTTPCode_ELB_5XX\", \"HTTPCode_Backend_2XX\", \"HTTPCode_Backend_3XX\", \"HTTPCode_Backend_4XX\", \"HTTPCode_Backend_5XX\", \"BackendConnectionErrors\", \"SurgeQueueLength\", \"SpilloverCount\"},\n\t\t\"AWS\/ElasticMapReduce\": {\"IsIdle\", \"JobsRunning\", \"JobsFailed\",\n\t\t\t\"MapTasksRunning\", \"MapTasksRemaining\", \"MapSlotsOpen\", \"RemainingMapTasksPerSlot\", \"ReduceTasksRunning\", \"ReduceTasksRemaining\", \"ReduceSlotsOpen\",\n\t\t\t\"CoreNodesRunning\", \"CoreNodesPending\", \"LiveDataNodes\", \"TaskNodesRunning\", \"TaskNodesPending\", \"LiveTaskTrackers\",\n\t\t\t\"S3BytesWritten\", \"S3BytesRead\", \"HDFSUtilization\", \"HDFSBytesRead\", \"HDFSBytesWritten\", \"MissingBlocks\", \"TotalLoad\",\n\t\t\t\"BackupFailed\", \"MostRecentBackupDuration\", \"TimeSinceLastSuccessfulBackup\",\n\t\t\t\"IsIdle\", \"ContainerAllocated\", \"ContainerReserved\", \"ContainerPending\", \"AppsCompleted\", \"AppsFailed\", \"AppsKilled\", \"AppsPending\", \"AppsRunning\", \"AppsSubmitted\",\n\t\t\t\"CoreNodesRunning\", \"CoreNodesPending\", \"LiveDataNodes\", \"MRTotalNodes\", \"MRActiveNodes\", \"MRLostNodes\", \"MRUnhealthyNodes\", \"MRDecommissionedNodes\", \"MRRebootedNodes\",\n\t\t\t\"S3BytesWritten\", \"S3BytesRead\", \"HDFSUtilization\", \"HDFSBytesRead\", \"HDFSBytesWritten\", \"MissingBlocks\", \"CorruptBlocks\", \"TotalLoad\", \"MemoryTotalMB\", \"MemoryReservedMB\", \"MemoryAvailableMB\", \"MemoryAllocatedMB\", \"PendingDeletionBlocks\", \"UnderReplicatedBlocks\", \"DfsPendingReplicationBlocks\", \"CapacityRemainingGB\",\n\t\t\t\"HbaseBackupFailed\", \"MostRecentBackupDuration\", \"TimeSinceLastSuccessfulBackup\"},\n\t\t\"AWS\/ES\": {\"ClusterStatus.green\", \"ClusterStatus.yellow\", \"ClusterStatus.red\", \"Nodes\", \"SearchableDocuments\", \"DeletedDocuments\", \"CPUUtilization\", \"FreeStorageSpace\", \"JVMMemoryPressure\", \"AutomatedSnapshotFailure\", \"MasterCPUUtilization\", \"MasterFreeStorageSpace\", \"MasterJVMMemoryPressure\", \"ReadLatency\", \"WriteLatency\", \"ReadThroughput\", \"WriteThroughput\", \"DiskQueueLength\", \"ReadIOPS\", \"WriteIOPS\"},\n\t\t\"AWS\/Kinesis\": {\"PutRecord.Bytes\", \"PutRecord.Latency\", \"PutRecord.Success\", \"PutRecords.Bytes\", \"PutRecords.Latency\", \"PutRecords.Records\", \"PutRecords.Success\", \"IncomingBytes\", \"IncomingRecords\", \"GetRecords.Bytes\", \"GetRecords.IteratorAgeMilliseconds\", \"GetRecords.Latency\", \"GetRecords.Success\"},\n\t\t\"AWS\/Lambda\": {\"Invocations\", \"Errors\", \"Duration\", \"Throttles\"},\n\t\t\"AWS\/ML\": {\"PredictCount\", \"PredictFailureCount\"},\n\t\t\"AWS\/OpsWorks\": {\"cpu_idle\", \"cpu_nice\", \"cpu_system\", \"cpu_user\", \"cpu_waitio\", \"load_1\", \"load_5\", \"load_15\", \"memory_buffers\", \"memory_cached\", \"memory_free\", \"memory_swap\", \"memory_total\", \"memory_used\", \"procs\"},\n\t\t\"AWS\/Redshift\": {\"CPUUtilization\", \"DatabaseConnections\", \"HealthStatus\", \"MaintenanceMode\", \"NetworkReceiveThroughput\", \"NetworkTransmitThroughput\", \"PercentageDiskSpaceUsed\", \"ReadIOPS\", \"ReadLatency\", \"ReadThroughput\", \"WriteIOPS\", \"WriteLatency\", \"WriteThroughput\"},\n\t\t\"AWS\/RDS\": {\"BinLogDiskUsage\", \"CPUUtilization\", \"CPUCreditUsage\", \"CPUCreditBalance\", \"DatabaseConnections\", \"DiskQueueDepth\", \"FreeableMemory\", \"FreeStorageSpace\", \"ReplicaLag\", \"SwapUsage\", \"ReadIOPS\", \"WriteIOPS\", \"ReadLatency\", \"WriteLatency\", \"ReadThroughput\", \"WriteThroughput\", \"NetworkReceiveThroughput\", \"NetworkTransmitThroughput\"},\n\t\t\"AWS\/Route53\": {\"HealthCheckStatus\", \"HealthCheckPercentageHealthy\"},\n\t\t\"AWS\/SNS\": {\"NumberOfMessagesPublished\", \"PublishSize\", \"NumberOfNotificationsDelivered\", \"NumberOfNotificationsFailed\"},\n\t\t\"AWS\/SQS\": {\"NumberOfMessagesSent\", \"SentMessageSize\", \"NumberOfMessagesReceived\", \"NumberOfEmptyReceives\", \"NumberOfMessagesDeleted\", \"ApproximateNumberOfMessagesDelayed\", \"ApproximateNumberOfMessagesVisible\", \"ApproximateNumberOfMessagesNotVisible\"},\n\t\t\"AWS\/S3\": {\"BucketSizeBytes\", \"NumberOfObjects\"},\n\t\t\"AWS\/SWF\": {\"DecisionTaskScheduleToStartTime\", \"DecisionTaskStartToCloseTime\", \"DecisionTasksCompleted\", \"StartedDecisionTasksTimedOutOnClose\", \"WorkflowStartToCloseTime\", \"WorkflowsCanceled\", \"WorkflowsCompleted\", \"WorkflowsContinuedAsNew\", \"WorkflowsFailed\", \"WorkflowsTerminated\", \"WorkflowsTimedOut\",\n\t\t\t\"ActivityTaskScheduleToCloseTime\", \"ActivityTaskScheduleToStartTime\", \"ActivityTaskStartToCloseTime\", \"ActivityTasksCanceled\", \"ActivityTasksCompleted\", \"ActivityTasksFailed\", \"ScheduledActivityTasksTimedOutOnClose\", \"ScheduledActivityTasksTimedOutOnStart\", \"StartedActivityTasksTimedOutOnClose\", \"StartedActivityTasksTimedOutOnHeartbeat\"},\n\t\t\"AWS\/StorageGateway\": {\"CacheHitPercent\", \"CachePercentUsed\", \"CachePercentDirty\", \"CloudBytesDownloaded\", \"CloudDownloadLatency\", \"CloudBytesUploaded\", \"UploadBufferFree\", \"UploadBufferPercentUsed\", \"UploadBufferUsed\", \"QueuedWrites\", \"ReadBytes\", \"ReadTime\", \"TotalCacheSize\", \"WriteBytes\", \"WriteTime\", \"TimeSinceLastRecoveryPoint\", \"WorkingStorageFree\", \"WorkingStoragePercentUsed\", \"WorkingStorageUsed\",\n\t\t\t\"CacheHitPercent\", \"CachePercentUsed\", \"CachePercentDirty\", \"ReadBytes\", \"ReadTime\", \"WriteBytes\", \"WriteTime\", \"QueuedWrites\"},\n\t\t\"AWS\/WAF\": {\"AllowedRequests\", \"BlockedRequests\", \"CountedRequests\"},\n\t\t\"AWS\/WorkSpaces\": {\"Available\", \"Unhealthy\", \"ConnectionAttempt\", \"ConnectionSuccess\", \"ConnectionFailure\", \"SessionLaunchTime\", \"InSessionLatency\", \"SessionDisconnect\"},\n\t}\n\tdimensionsMap = map[string][]string{\n\t\t\"AWS\/AutoScaling\": {\"AutoScalingGroupName\"},\n\t\t\"AWS\/Billing\": {\"ServiceName\", \"LinkedAccount\", \"Currency\"},\n\t\t\"AWS\/CloudFront\": {\"DistributionId\", \"Region\"},\n\t\t\"AWS\/CloudSearch\": {},\n\t\t\"AWS\/DynamoDB\": {\"TableName\", \"GlobalSecondaryIndexName\", \"Operation\"},\n\t\t\"AWS\/ECS\": {\"ClusterName\", \"ServiceName\"},\n\t\t\"AWS\/ElastiCache\": {\"CacheClusterId\", \"CacheNodeId\"},\n\t\t\"AWS\/EBS\": {\"VolumeId\"},\n\t\t\"AWS\/EC2\": {\"AutoScalingGroupName\", \"ImageId\", \"InstanceId\", \"InstanceType\"},\n\t\t\"AWS\/ELB\": {\"LoadBalancerName\", \"AvailabilityZone\"},\n\t\t\"AWS\/ElasticMapReduce\": {\"ClusterId\", \"JobFlowId\", \"JobId\"},\n\t\t\"AWS\/ES\": {},\n\t\t\"AWS\/Kinesis\": {\"StreamName\"},\n\t\t\"AWS\/Lambda\": {\"FunctionName\"},\n\t\t\"AWS\/ML\": {\"MLModelId\", \"RequestMode\"},\n\t\t\"AWS\/OpsWorks\": {\"StackId\", \"LayerId\", \"InstanceId\"},\n\t\t\"AWS\/Redshift\": {\"NodeID\", \"ClusterIdentifier\"},\n\t\t\"AWS\/RDS\": {\"DBInstanceIdentifier\", \"DatabaseClass\", \"EngineName\"},\n\t\t\"AWS\/Route53\": {\"HealthCheckId\"},\n\t\t\"AWS\/SNS\": {\"Application\", \"Platform\", \"TopicName\"},\n\t\t\"AWS\/SQS\": {\"QueueName\"},\n\t\t\"AWS\/S3\": {\"BucketName\", \"StorageType\"},\n\t\t\"AWS\/SWF\": {\"Domain\", \"WorkflowTypeName\", \"WorkflowTypeVersion\", \"ActivityTypeName\", \"ActivityTypeVersion\"},\n\t\t\"AWS\/StorageGateway\": {\"GatewayId\", \"GatewayName\", \"VolumeId\"},\n\t\t\"AWS\/WAF\": {\"Rule\", \"WebACL\"},\n\t\t\"AWS\/WorkSpaces\": {\"DirectoryId\", \"WorkspaceId\"},\n\t}\n}\n\n\/\/ Whenever this list is updated, frontend list should also be updated.\n\/\/ Please update the region list in public\/app\/plugins\/datasource\/cloudwatch\/partials\/config.html\nfunc handleGetRegions(req *cwRequest, c *middleware.Context) {\n\tregions := []string{\n\t\t\"ap-northeast-1\", \"ap-southeast-1\", \"ap-southeast-2\", \"cn-north-1\",\n\t\t\"eu-central-1\", \"eu-west-1\", \"sa-east-1\", \"us-east-1\", \"us-west-1\", \"us-west-2\",\n\t}\n\n\tresult := []interface{}{}\n\tfor _, region := range regions {\n\t\tresult = append(result, util.DynMap{\"text\": region, \"value\": region})\n\t}\n\n\tc.JSON(200, result)\n}\n\nfunc handleGetNamespaces(req *cwRequest, c *middleware.Context) {\n\tkeys := []string{}\n\tfor key := range metricsMap {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Sort(sort.StringSlice(keys))\n\n\tresult := []interface{}{}\n\tfor _, key := range keys {\n\t\tresult = append(result, util.DynMap{\"text\": key, \"value\": key})\n\t}\n\n\tc.JSON(200, result)\n}\n\nfunc handleGetMetrics(req *cwRequest, c *middleware.Context) {\n\treqParam := &struct {\n\t\tParameters struct {\n\t\t\tNamespace string `json:\"namespace\"`\n\t\t} `json:\"parameters\"`\n\t}{}\n\n\tjson.Unmarshal(req.Body, reqParam)\n\n\tnamespaceMetrics, exists := metricsMap[reqParam.Parameters.Namespace]\n\tif !exists {\n\t\tc.JsonApiErr(404, \"Unable to find namespace \"+reqParam.Parameters.Namespace, nil)\n\t\treturn\n\t}\n\tsort.Sort(sort.StringSlice(namespaceMetrics))\n\n\tresult := []interface{}{}\n\tfor _, name := range namespaceMetrics {\n\t\tresult = append(result, util.DynMap{\"text\": name, \"value\": name})\n\t}\n\n\tc.JSON(200, result)\n}\n\nfunc handleGetDimensions(req *cwRequest, c *middleware.Context) {\n\treqParam := &struct {\n\t\tParameters struct {\n\t\t\tNamespace string `json:\"namespace\"`\n\t\t} `json:\"parameters\"`\n\t}{}\n\n\tjson.Unmarshal(req.Body, reqParam)\n\n\tdimensionValues, exists := dimensionsMap[reqParam.Parameters.Namespace]\n\tif !exists {\n\t\tc.JsonApiErr(404, \"Unable to find dimension \"+reqParam.Parameters.Namespace, nil)\n\t\treturn\n\t}\n\tsort.Sort(sort.StringSlice(dimensionValues))\n\n\tresult := []interface{}{}\n\tfor _, name := range dimensionValues {\n\t\tresult = append(result, util.DynMap{\"text\": name, \"value\": name})\n\t}\n\n\tc.JSON(200, result)\n}\n<|endoftext|>"} {"text":"<commit_before>package rest\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/databus23\/keystone\"\n\t\"github.com\/databus23\/keystone\/cache\/memory\"\n\terrors \"github.com\/go-openapi\/errors\"\n\tflag \"github.com\/spf13\/pflag\"\n\n\t\"github.com\/sapcc\/kubernikus\/pkg\/api\/models\"\n)\n\nvar authURL string\n\nfunc init() {\n\tflag.StringVar(&authURL, \"auth-url\", \"\", \"Openstack identity v3 auth url\")\n}\n\nfunc keystoneAuth() func(token string) (*models.Principal, error) {\n\n\tif !(strings.HasSuffix(\"\/v3\") || strings.HasSuffix(\"\/v3\/\")) {\n\t\tauthURL = path.Join(authURL, \"\/v3\")\n\t}\n\n\tauth := keystone.New(authURL)\n\tauth.TokenCache = memory.New(10 * time.Minute)\n\n\treturn func(token string) (*models.Principal, error) {\n\t\tt, err := auth.Validate(token)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(401, fmt.Sprintf(\"Authentication failed: %s\", err))\n\t\t}\n\t\tif t.Project == nil {\n\t\t\treturn nil, errors.New(401, \"Auth token isn't project scoped\")\n\t\t}\n\t\troles := make([]string, 0, len(t.Roles))\n\t\tfor _, role := range t.Roles {\n\t\t\troles = append(roles, role.Name)\n\t\t}\n\t\treturn &models.Principal{ID: t.User.ID, Name: t.User.Name, Account: t.Project.ID, Roles: roles}, nil\n\t}\n}\n<commit_msg>oh boy<commit_after>package rest\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/databus23\/keystone\"\n\t\"github.com\/databus23\/keystone\/cache\/memory\"\n\terrors \"github.com\/go-openapi\/errors\"\n\tflag \"github.com\/spf13\/pflag\"\n\n\t\"github.com\/sapcc\/kubernikus\/pkg\/api\/models\"\n)\n\nvar authURL string\n\nfunc init() {\n\tflag.StringVar(&authURL, \"auth-url\", \"\", \"Openstack identity v3 auth url\")\n}\n\nfunc keystoneAuth() func(token string) (*models.Principal, error) {\n\n\tif !(strings.HasSuffix(authURL, \"\/v3\") || strings.HasSuffix(authURL, \"\/v3\/\")) {\n\t\tauthURL = path.Join(authURL, \"\/v3\")\n\t}\n\n\tauth := keystone.New(authURL)\n\tauth.TokenCache = memory.New(10 * time.Minute)\n\n\treturn func(token string) (*models.Principal, error) {\n\t\tt, err := auth.Validate(token)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(401, fmt.Sprintf(\"Authentication failed: %s\", err))\n\t\t}\n\t\tif t.Project == nil {\n\t\t\treturn nil, errors.New(401, \"Auth token isn't project scoped\")\n\t\t}\n\t\troles := make([]string, 0, len(t.Roles))\n\t\tfor _, role := range t.Roles {\n\t\t\troles = append(roles, role.Name)\n\t\t}\n\t\treturn &models.Principal{ID: t.User.ID, Name: t.User.Name, Account: t.Project.ID, Roles: roles}, nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 The Camlistore Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/*\nPackage mongo registers the \"mongo\" blobserver storage type, storing\nblobs using MongoDB.\n\nSample (low-level) config:\n\"\/bs\/\": {\n \"handler\": \"storage-mongo\",\n \"handlerArgs\": {\n \"host\": \"172.17.0.2\",\n \"database\": \"camlitest\"\n }\n},\n\nPossible parameters:\nhost (optional, defaults to localhost)\ndatabase (required)\ncollection (optional, defaults to blobs)\nuser (optional)\npassword (optional)\n*\/\npackage mongo\n\nimport (\n\t\"camlistore.org\/pkg\/blobserver\"\n\t\"camlistore.org\/pkg\/jsonconfig\"\n\t\"camlistore.org\/third_party\/labix.org\/v2\/mgo\"\n)\n\ntype mongoStorage struct {\n\tc *mgo.Collection\n}\n\n\/\/ blobDoc is the document that gets inserted in the MongoDB database\n\/\/ Its fields are exported because they need to be for the mgo driver to pick them up\ntype blobDoc struct {\n\t\/\/ Key contains the string representation of a blob reference (e.g. sha1-200d278aa6dd347f494407385ceab316440d5fba).\n\tKey string\n\t\/\/ Size contains the total size of a blob.\n\tSize uint32\n\t\/\/ Blob contains the raw blob data of the blob the above Key refers to.\n\tBlob []byte\n}\n\nfunc init() {\n\tblobserver.RegisterStorageConstructor(\"mongo\", blobserver.StorageConstructor(newFromConfig))\n}\n\nfunc newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) {\n\tcfg, err := configFromJSON(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newMongoStorage(cfg)\n}\n\nfunc newMongoStorage(cfg config) (blobserver.Storage, error) {\n\tsession, err := getConnection(cfg.url())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := session.DB(cfg.database).C(cfg.collection)\n\n\treturn blobserver.Storage(&mongoStorage{c: c}), nil\n\n}\n\n\/\/ Config holds the parameters used to connect to MongoDB.\ntype config struct {\n\tserver string \/\/ Required. Defaults to \"localhost\" in ConfigFromJSON.\n\tdatabase string \/\/ Required.\n\tcollection string \/\/ Required. Defaults to \"blobs\" in ConfigFromJSON.\n\tuser string \/\/ Optional, unless the server was configured with auth on.\n\tpassword string \/\/ Optional, unless the server was configured with auth on.\n}\n\nfunc (cfg *config) url() string {\n\tif cfg.user == \"\" || cfg.password == \"\" {\n\t\treturn cfg.server\n\t}\n\treturn cfg.user + \":\" + cfg.password + \"@\" + cfg.server + \"\/\" + cfg.database\n}\n\n\/\/ ConfigFromJSON populates Config from cfg, and validates\n\/\/ cfg. It returns an error if cfg fails to validate.\nfunc configFromJSON(cfg jsonconfig.Obj) (config, error) {\n\tconf := config{\n\t\tserver: cfg.OptionalString(\"host\", \"localhost\"),\n\t\tdatabase: cfg.RequiredString(\"database\"),\n\t\tcollection: cfg.OptionalString(\"collection\", \"blobs\"),\n\t\tuser: cfg.OptionalString(\"user\", \"\"),\n\t\tpassword: cfg.OptionalString(\"password\", \"\"),\n\t}\n\tif err := cfg.Validate(); err != nil {\n\t\treturn config{}, err\n\t}\n\treturn conf, nil\n}\n\nfunc getConnection(url string) (*mgo.Session, error) {\n\tsession, err := mgo.Dial(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsession.SetMode(mgo.Monotonic, true)\n\tsession.SetSafe(&mgo.Safe{}) \/\/ so we get an ErrNotFound error when deleting an absent key\n\treturn session, nil\n}\n<commit_msg>blobserver\/mongo, missing index on \"key\"<commit_after>\/*\nCopyright 2014 The Camlistore Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/*\nPackage mongo registers the \"mongo\" blobserver storage type, storing\nblobs using MongoDB.\n\nSample (low-level) config:\n\"\/bs\/\": {\n \"handler\": \"storage-mongo\",\n \"handlerArgs\": {\n \"host\": \"172.17.0.2\",\n \"database\": \"camlitest\"\n }\n},\n\nPossible parameters:\nhost (optional, defaults to localhost)\ndatabase (required)\ncollection (optional, defaults to blobs)\nuser (optional)\npassword (optional)\n*\/\npackage mongo\n\nimport (\n\t\"camlistore.org\/pkg\/blobserver\"\n\t\"camlistore.org\/pkg\/jsonconfig\"\n\t\"camlistore.org\/third_party\/labix.org\/v2\/mgo\"\n)\n\ntype mongoStorage struct {\n\tc *mgo.Collection\n}\n\n\/\/ blobDoc is the document that gets inserted in the MongoDB database\n\/\/ Its fields are exported because they need to be for the mgo driver to pick them up\ntype blobDoc struct {\n\t\/\/ Key contains the string representation of a blob reference (e.g. sha1-200d278aa6dd347f494407385ceab316440d5fba).\n\tKey string\n\t\/\/ Size contains the total size of a blob.\n\tSize uint32\n\t\/\/ Blob contains the raw blob data of the blob the above Key refers to.\n\tBlob []byte\n}\n\nfunc init() {\n\tblobserver.RegisterStorageConstructor(\"mongo\", blobserver.StorageConstructor(newFromConfig))\n}\n\nfunc newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) {\n\tcfg, err := configFromJSON(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newMongoStorage(cfg)\n}\n\nvar uniqueKeyIndex = mgo.Index{\n\tKey: []string{\"key\"},\n\tUnique: true,\n\tDropDups: false,\n\tBackground: false,\n\tSparse: false,\n}\n\nfunc newMongoStorage(cfg config) (blobserver.Storage, error) {\n\tsession, err := getConnection(cfg.url())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := session.DB(cfg.database).C(cfg.collection)\n\terr = c.EnsureIndex(uniqueKeyIndex)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn blobserver.Storage(&mongoStorage{c: c}), nil\n\n}\n\n\/\/ Config holds the parameters used to connect to MongoDB.\ntype config struct {\n\tserver string \/\/ Required. Defaults to \"localhost\" in ConfigFromJSON.\n\tdatabase string \/\/ Required.\n\tcollection string \/\/ Required. Defaults to \"blobs\" in ConfigFromJSON.\n\tuser string \/\/ Optional, unless the server was configured with auth on.\n\tpassword string \/\/ Optional, unless the server was configured with auth on.\n}\n\nfunc (cfg *config) url() string {\n\tif cfg.user == \"\" || cfg.password == \"\" {\n\t\treturn cfg.server\n\t}\n\treturn cfg.user + \":\" + cfg.password + \"@\" + cfg.server + \"\/\" + cfg.database\n}\n\n\/\/ ConfigFromJSON populates Config from cfg, and validates\n\/\/ cfg. It returns an error if cfg fails to validate.\nfunc configFromJSON(cfg jsonconfig.Obj) (config, error) {\n\tconf := config{\n\t\tserver: cfg.OptionalString(\"host\", \"localhost\"),\n\t\tdatabase: cfg.RequiredString(\"database\"),\n\t\tcollection: cfg.OptionalString(\"collection\", \"blobs\"),\n\t\tuser: cfg.OptionalString(\"user\", \"\"),\n\t\tpassword: cfg.OptionalString(\"password\", \"\"),\n\t}\n\tif err := cfg.Validate(); err != nil {\n\t\treturn config{}, err\n\t}\n\treturn conf, nil\n}\n\nfunc getConnection(url string) (*mgo.Session, error) {\n\tsession, err := mgo.Dial(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsession.SetMode(mgo.Monotonic, true)\n\tsession.SetSafe(&mgo.Safe{}) \/\/ so we get an ErrNotFound error when deleting an absent key\n\treturn session, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage features\n\nimport (\n\t\"k8s.io\/apimachinery\/pkg\/util\/runtime\"\n\n\tutilfeature \"k8s.io\/apiserver\/pkg\/util\/feature\"\n\t\"k8s.io\/component-base\/featuregate\"\n)\n\nconst (\n\t\/\/ Every feature gate should add method here following this template:\n\t\/\/\n\t\/\/ \/\/ owner: @username\n\t\/\/ \/\/ alpha: v1.4\n\t\/\/ MyFeature featuregate.Feature = \"MyFeature\"\n\t\/\/\n\t\/\/ Feature gates should be listed in alphabetical, case-sensitive\n\t\/\/ (upper before any lower case character) order. This reduces the risk\n\t\/\/ of code conflicts because changes are more likely to be scattered\n\t\/\/ across the file.\n\n\t\/\/ owner: @jefftree @alexzielenski\n\t\/\/ alpha: v1.26\n\t\/\/\n\t\/\/ Enables an single HTTP endpoint \/discovery\/<version> which supports native HTTP\n\t\/\/ caching with ETags containing all APIResources known to the apiserver.\n\tAggregatedDiscoveryEndpoint featuregate.Feature = \"AggregatedDiscoveryEndpoint\"\n\n\t\/\/ owner: @smarterclayton\n\t\/\/ alpha: v1.8\n\t\/\/ beta: v1.9\n\t\/\/\n\t\/\/ Allow API clients to retrieve resource lists in chunks rather than\n\t\/\/ all at once.\n\tAPIListChunking featuregate.Feature = \"APIListChunking\"\n\n\t\/\/ owner: @MikeSpreitzer @yue9944882\n\t\/\/ alpha: v1.18\n\t\/\/ beta: v1.20\n\t\/\/\n\t\/\/ Enables managing request concurrency with prioritization and fairness at each server.\n\t\/\/ The FeatureGate was introduced in release 1.15 but the feature\n\t\/\/ was not really implemented before 1.18.\n\tAPIPriorityAndFairness featuregate.Feature = \"APIPriorityAndFairness\"\n\n\t\/\/ owner: @ilackams\n\t\/\/ alpha: v1.7\n\t\/\/ beta: v1.16\n\t\/\/\n\t\/\/ Enables compression of REST responses (GET and LIST only)\n\tAPIResponseCompression featuregate.Feature = \"APIResponseCompression\"\n\n\t\/\/ owner: @roycaihw\n\t\/\/ alpha: v1.20\n\t\/\/\n\t\/\/ Assigns each kube-apiserver an ID in a cluster.\n\tAPIServerIdentity featuregate.Feature = \"APIServerIdentity\"\n\n\t\/\/ owner: @dashpole\n\t\/\/ alpha: v1.22\n\t\/\/\n\t\/\/ Add support for distributed tracing in the API Server\n\tAPIServerTracing featuregate.Feature = \"APIServerTracing\"\n\n\t\/\/ owner: @tallclair\n\t\/\/ alpha: v1.7\n\t\/\/ beta: v1.8\n\t\/\/ GA: v1.12\n\t\/\/\n\t\/\/ AdvancedAuditing enables a much more general API auditing pipeline, which includes support for\n\t\/\/ pluggable output backends and an audit policy specifying how different requests should be\n\t\/\/ audited.\n\tAdvancedAuditing featuregate.Feature = \"AdvancedAuditing\"\n\n\t\/\/ owner: @cici37\n\t\/\/ kep: https:\/\/kep.k8s.io\/2876\n\t\/\/ alpha: v1.23\n\t\/\/ beta: v1.25\n\t\/\/\n\t\/\/ Enables expression validation for Custom Resource\n\tCustomResourceValidationExpressions featuregate.Feature = \"CustomResourceValidationExpressions\"\n\n\t\/\/ owner: @apelisse\n\t\/\/ alpha: v1.12\n\t\/\/ beta: v1.13\n\t\/\/ stable: v1.18\n\t\/\/\n\t\/\/ Allow requests to be processed but not stored, so that\n\t\/\/ validation, merging, mutation can be tested without\n\t\/\/ committing.\n\tDryRun featuregate.Feature = \"DryRun\"\n\n\t\/\/ owner: @wojtek-t\n\t\/\/ alpha: v1.20\n\t\/\/ beta: v1.21\n\t\/\/ GA: v1.24\n\t\/\/\n\t\/\/ Allows for updating watchcache resource version with progress notify events.\n\tEfficientWatchResumption featuregate.Feature = \"EfficientWatchResumption\"\n\n\t\/\/ owner: @aramase\n\t\/\/ kep: https:\/\/kep.k8s.io\/3299\n\t\/\/ alpha: v1.25\n\t\/\/\n\t\/\/ Enables KMS v2 API for encryption at rest.\n\tKMSv2 featuregate.Feature = \"KMSv2\"\n\n\t\/\/ owner: @jiahuif\n\t\/\/ kep: https:\/\/kep.k8s.io\/2887\n\t\/\/ alpha: v1.23\n\t\/\/ beta: v1.24\n\t\/\/\n\t\/\/ Enables populating \"enum\" field of OpenAPI schemas\n\t\/\/ in the spec returned from kube-apiserver.\n\tOpenAPIEnums featuregate.Feature = \"OpenAPIEnums\"\n\n\t\/\/ owner: @jefftree\n\t\/\/ kep: https:\/\/kep.k8s.io\/2896\n\t\/\/ alpha: v1.23\n\t\/\/ beta: v1.24\n\t\/\/\n\t\/\/ Enables kubernetes to publish OpenAPI v3\n\tOpenAPIV3 featuregate.Feature = \"OpenAPIV3\"\n\n\t\/\/ owner: @caesarxuchao\n\t\/\/ alpha: v1.15\n\t\/\/ beta: v1.16\n\t\/\/\n\t\/\/ Allow apiservers to show a count of remaining items in the response\n\t\/\/ to a chunking list request.\n\tRemainingItemCount featuregate.Feature = \"RemainingItemCount\"\n\n\t\/\/ owner: @wojtek-t\n\t\/\/ alpha: v1.16\n\t\/\/ beta: v1.20\n\t\/\/ GA: v1.24\n\t\/\/\n\t\/\/ Deprecates and removes SelfLink from ObjectMeta and ListMeta.\n\tRemoveSelfLink featuregate.Feature = \"RemoveSelfLink\"\n\n\t\/\/ owner: @apelisse, @lavalamp\n\t\/\/ alpha: v1.14\n\t\/\/ beta: v1.16\n\t\/\/ stable: v1.22\n\t\/\/\n\t\/\/ Server-side apply. Merging happens on the server.\n\tServerSideApply featuregate.Feature = \"ServerSideApply\"\n\n\t\/\/ owner: @kevindelgado\n\t\/\/ kep: https:\/\/kep.k8s.io\/2885\n\t\/\/ alpha: v1.23\n\t\/\/ beta: v1.24\n\t\/\/\n\t\/\/ Enables server-side field validation.\n\tServerSideFieldValidation featuregate.Feature = \"ServerSideFieldValidation\"\n\n\t\/\/ owner: @caesarxuchao @roycaihw\n\t\/\/ alpha: v1.20\n\t\/\/\n\t\/\/ Enable the storage version API.\n\tStorageVersionAPI featuregate.Feature = \"StorageVersionAPI\"\n\n\t\/\/ owner: @caesarxuchao\n\t\/\/ alpha: v1.14\n\t\/\/ beta: v1.15\n\t\/\/\n\t\/\/ Allow apiservers to expose the storage version hash in the discovery\n\t\/\/ document.\n\tStorageVersionHash featuregate.Feature = \"StorageVersionHash\"\n\n\t\/\/ owner: @wojtek-t\n\t\/\/ alpha: v1.15\n\t\/\/ beta: v1.16\n\t\/\/ GA: v1.17\n\t\/\/\n\t\/\/ Enables support for watch bookmark events.\n\tWatchBookmark featuregate.Feature = \"WatchBookmark\"\n)\n\nfunc init() {\n\truntime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates))\n}\n\n\/\/ defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys.\n\/\/ To add a new feature, define a key for it above and add it here. The features will be\n\/\/ available throughout Kubernetes binaries.\nvar defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{\n\tAggregatedDiscoveryEndpoint: {Default: false, PreRelease: featuregate.Alpha},\n\n\tAPIListChunking: {Default: true, PreRelease: featuregate.Beta},\n\n\tAPIPriorityAndFairness: {Default: true, PreRelease: featuregate.Beta},\n\n\tAPIResponseCompression: {Default: true, PreRelease: featuregate.Beta},\n\n\tAPIServerIdentity: {Default: false, PreRelease: featuregate.Alpha},\n\n\tAPIServerTracing: {Default: false, PreRelease: featuregate.Alpha},\n\n\tAdvancedAuditing: {Default: true, PreRelease: featuregate.GA},\n\n\tCustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.Beta},\n\n\tDryRun: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, \/\/ remove in 1.28\n\n\tEfficientWatchResumption: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},\n\n\tKMSv2: {Default: false, PreRelease: featuregate.Alpha},\n\n\tOpenAPIEnums: {Default: true, PreRelease: featuregate.Beta},\n\n\tOpenAPIV3: {Default: true, PreRelease: featuregate.Beta},\n\n\tRemainingItemCount: {Default: true, PreRelease: featuregate.Beta},\n\n\tRemoveSelfLink: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},\n\n\tServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, \/\/ remove in 1.29\n\n\tServerSideFieldValidation: {Default: true, PreRelease: featuregate.Beta},\n\n\tStorageVersionAPI: {Default: false, PreRelease: featuregate.Alpha},\n\n\tStorageVersionHash: {Default: true, PreRelease: featuregate.Beta},\n\n\tWatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},\n}\n<commit_msg>Add feature gate CelValidatingAdmissionExtensibility<commit_after>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage features\n\nimport (\n\t\"k8s.io\/apimachinery\/pkg\/util\/runtime\"\n\n\tutilfeature \"k8s.io\/apiserver\/pkg\/util\/feature\"\n\t\"k8s.io\/component-base\/featuregate\"\n)\n\nconst (\n\t\/\/ Every feature gate should add method here following this template:\n\t\/\/\n\t\/\/ \/\/ owner: @username\n\t\/\/ \/\/ alpha: v1.4\n\t\/\/ MyFeature featuregate.Feature = \"MyFeature\"\n\t\/\/\n\t\/\/ Feature gates should be listed in alphabetical, case-sensitive\n\t\/\/ (upper before any lower case character) order. This reduces the risk\n\t\/\/ of code conflicts because changes are more likely to be scattered\n\t\/\/ across the file.\n\n\t\/\/ owner: @jefftree @alexzielenski\n\t\/\/ alpha: v1.26\n\t\/\/\n\t\/\/ Enables an single HTTP endpoint \/discovery\/<version> which supports native HTTP\n\t\/\/ caching with ETags containing all APIResources known to the apiserver.\n\tAggregatedDiscoveryEndpoint featuregate.Feature = \"AggregatedDiscoveryEndpoint\"\n\n\t\/\/ owner: @smarterclayton\n\t\/\/ alpha: v1.8\n\t\/\/ beta: v1.9\n\t\/\/\n\t\/\/ Allow API clients to retrieve resource lists in chunks rather than\n\t\/\/ all at once.\n\tAPIListChunking featuregate.Feature = \"APIListChunking\"\n\n\t\/\/ owner: @MikeSpreitzer @yue9944882\n\t\/\/ alpha: v1.18\n\t\/\/ beta: v1.20\n\t\/\/\n\t\/\/ Enables managing request concurrency with prioritization and fairness at each server.\n\t\/\/ The FeatureGate was introduced in release 1.15 but the feature\n\t\/\/ was not really implemented before 1.18.\n\tAPIPriorityAndFairness featuregate.Feature = \"APIPriorityAndFairness\"\n\n\t\/\/ owner: @ilackams\n\t\/\/ alpha: v1.7\n\t\/\/ beta: v1.16\n\t\/\/\n\t\/\/ Enables compression of REST responses (GET and LIST only)\n\tAPIResponseCompression featuregate.Feature = \"APIResponseCompression\"\n\n\t\/\/ owner: @roycaihw\n\t\/\/ alpha: v1.20\n\t\/\/\n\t\/\/ Assigns each kube-apiserver an ID in a cluster.\n\tAPIServerIdentity featuregate.Feature = \"APIServerIdentity\"\n\n\t\/\/ owner: @dashpole\n\t\/\/ alpha: v1.22\n\t\/\/\n\t\/\/ Add support for distributed tracing in the API Server\n\tAPIServerTracing featuregate.Feature = \"APIServerTracing\"\n\n\t\/\/ owner: @tallclair\n\t\/\/ alpha: v1.7\n\t\/\/ beta: v1.8\n\t\/\/ GA: v1.12\n\t\/\/\n\t\/\/ AdvancedAuditing enables a much more general API auditing pipeline, which includes support for\n\t\/\/ pluggable output backends and an audit policy specifying how different requests should be\n\t\/\/ audited.\n\tAdvancedAuditing featuregate.Feature = \"AdvancedAuditing\"\n\n\t\/\/ owner: @cici37 @jpbetz\n\t\/\/ kep: http:\/\/kep.k8s.io\/3488\n\t\/\/ alpha: v1.26\n\t\/\/\n\t\/\/ Enables expression validation in Admission Control\n\tCelValidatingAdmissionExtensibility featuregate.Feature = \"CelValidatingAdmissionExtensibility\"\n\n\t\/\/ owner: @cici37\n\t\/\/ kep: https:\/\/kep.k8s.io\/2876\n\t\/\/ alpha: v1.23\n\t\/\/ beta: v1.25\n\t\/\/\n\t\/\/ Enables expression validation for Custom Resource\n\tCustomResourceValidationExpressions featuregate.Feature = \"CustomResourceValidationExpressions\"\n\n\t\/\/ owner: @apelisse\n\t\/\/ alpha: v1.12\n\t\/\/ beta: v1.13\n\t\/\/ stable: v1.18\n\t\/\/\n\t\/\/ Allow requests to be processed but not stored, so that\n\t\/\/ validation, merging, mutation can be tested without\n\t\/\/ committing.\n\tDryRun featuregate.Feature = \"DryRun\"\n\n\t\/\/ owner: @wojtek-t\n\t\/\/ alpha: v1.20\n\t\/\/ beta: v1.21\n\t\/\/ GA: v1.24\n\t\/\/\n\t\/\/ Allows for updating watchcache resource version with progress notify events.\n\tEfficientWatchResumption featuregate.Feature = \"EfficientWatchResumption\"\n\n\t\/\/ owner: @aramase\n\t\/\/ kep: https:\/\/kep.k8s.io\/3299\n\t\/\/ alpha: v1.25\n\t\/\/\n\t\/\/ Enables KMS v2 API for encryption at rest.\n\tKMSv2 featuregate.Feature = \"KMSv2\"\n\n\t\/\/ owner: @jiahuif\n\t\/\/ kep: https:\/\/kep.k8s.io\/2887\n\t\/\/ alpha: v1.23\n\t\/\/ beta: v1.24\n\t\/\/\n\t\/\/ Enables populating \"enum\" field of OpenAPI schemas\n\t\/\/ in the spec returned from kube-apiserver.\n\tOpenAPIEnums featuregate.Feature = \"OpenAPIEnums\"\n\n\t\/\/ owner: @jefftree\n\t\/\/ kep: https:\/\/kep.k8s.io\/2896\n\t\/\/ alpha: v1.23\n\t\/\/ beta: v1.24\n\t\/\/\n\t\/\/ Enables kubernetes to publish OpenAPI v3\n\tOpenAPIV3 featuregate.Feature = \"OpenAPIV3\"\n\n\t\/\/ owner: @caesarxuchao\n\t\/\/ alpha: v1.15\n\t\/\/ beta: v1.16\n\t\/\/\n\t\/\/ Allow apiservers to show a count of remaining items in the response\n\t\/\/ to a chunking list request.\n\tRemainingItemCount featuregate.Feature = \"RemainingItemCount\"\n\n\t\/\/ owner: @wojtek-t\n\t\/\/ alpha: v1.16\n\t\/\/ beta: v1.20\n\t\/\/ GA: v1.24\n\t\/\/\n\t\/\/ Deprecates and removes SelfLink from ObjectMeta and ListMeta.\n\tRemoveSelfLink featuregate.Feature = \"RemoveSelfLink\"\n\n\t\/\/ owner: @apelisse, @lavalamp\n\t\/\/ alpha: v1.14\n\t\/\/ beta: v1.16\n\t\/\/ stable: v1.22\n\t\/\/\n\t\/\/ Server-side apply. Merging happens on the server.\n\tServerSideApply featuregate.Feature = \"ServerSideApply\"\n\n\t\/\/ owner: @kevindelgado\n\t\/\/ kep: https:\/\/kep.k8s.io\/2885\n\t\/\/ alpha: v1.23\n\t\/\/ beta: v1.24\n\t\/\/\n\t\/\/ Enables server-side field validation.\n\tServerSideFieldValidation featuregate.Feature = \"ServerSideFieldValidation\"\n\n\t\/\/ owner: @caesarxuchao @roycaihw\n\t\/\/ alpha: v1.20\n\t\/\/\n\t\/\/ Enable the storage version API.\n\tStorageVersionAPI featuregate.Feature = \"StorageVersionAPI\"\n\n\t\/\/ owner: @caesarxuchao\n\t\/\/ alpha: v1.14\n\t\/\/ beta: v1.15\n\t\/\/\n\t\/\/ Allow apiservers to expose the storage version hash in the discovery\n\t\/\/ document.\n\tStorageVersionHash featuregate.Feature = \"StorageVersionHash\"\n\n\t\/\/ owner: @wojtek-t\n\t\/\/ alpha: v1.15\n\t\/\/ beta: v1.16\n\t\/\/ GA: v1.17\n\t\/\/\n\t\/\/ Enables support for watch bookmark events.\n\tWatchBookmark featuregate.Feature = \"WatchBookmark\"\n)\n\nfunc init() {\n\truntime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates))\n}\n\n\/\/ defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys.\n\/\/ To add a new feature, define a key for it above and add it here. The features will be\n\/\/ available throughout Kubernetes binaries.\nvar defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{\n\tAggregatedDiscoveryEndpoint: {Default: false, PreRelease: featuregate.Alpha},\n\n\tAPIListChunking: {Default: true, PreRelease: featuregate.Beta},\n\n\tAPIPriorityAndFairness: {Default: true, PreRelease: featuregate.Beta},\n\n\tAPIResponseCompression: {Default: true, PreRelease: featuregate.Beta},\n\n\tAPIServerIdentity: {Default: false, PreRelease: featuregate.Alpha},\n\n\tAPIServerTracing: {Default: false, PreRelease: featuregate.Alpha},\n\n\tAdvancedAuditing: {Default: true, PreRelease: featuregate.GA},\n\n\tCelValidatingAdmissionExtensibility: {Default: false, PreRelease: featuregate.Alpha},\n\n\tCustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.Beta},\n\n\tDryRun: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, \/\/ remove in 1.28\n\n\tEfficientWatchResumption: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},\n\n\tKMSv2: {Default: false, PreRelease: featuregate.Alpha},\n\n\tOpenAPIEnums: {Default: true, PreRelease: featuregate.Beta},\n\n\tOpenAPIV3: {Default: true, PreRelease: featuregate.Beta},\n\n\tRemainingItemCount: {Default: true, PreRelease: featuregate.Beta},\n\n\tRemoveSelfLink: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},\n\n\tServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, \/\/ remove in 1.29\n\n\tServerSideFieldValidation: {Default: true, PreRelease: featuregate.Beta},\n\n\tStorageVersionAPI: {Default: false, PreRelease: featuregate.Alpha},\n\n\tStorageVersionHash: {Default: true, PreRelease: featuregate.Beta},\n\n\tWatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The rkt Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage fileutil\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/coreos\/rkt\/pkg\/uid\"\n)\n\nconst tstprefix = \"fileutil-test\"\n\nfunc touch(t *testing.T, name string) {\n\tf, err := os.Create(name)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := f.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\ntype tree struct {\n\tpath string\n\tdir bool\n}\n\nfunc createTree(t *testing.T, dir string, tr []tree) {\n\tfor _, f := range tr {\n\t\tif f.dir {\n\t\t\tif err := os.MkdirAll(filepath.Join(dir, f.path), 0755); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t} else {\n\t\t\ttouch(t, filepath.Join(dir, f.path))\n\t\t}\n\t}\n}\n\nfunc checkTree(t *testing.T, dir string, tr []tree) {\n\tfor _, f := range tr {\n\t\tif _, err := os.Stat(filepath.Join(dir, f.path)); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestCopyTree(t *testing.T) {\n\ttd, err := ioutil.TempDir(\"\", tstprefix)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(td)\n\n\tsrc := filepath.Join(td, \"src\")\n\tdst := filepath.Join(td, \"dst\")\n\tif err := os.MkdirAll(filepath.Join(td, \"src\"), 0755); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttr := []tree{\n\t\t{\n\t\t\tpath: \"dir1\",\n\t\t\tdir: true,\n\t\t},\n\t\t{\n\t\t\tpath: \"dir2\",\n\t\t\tdir: true,\n\t\t},\n\t\t{\n\t\t\tpath: \"dir1\/foo\",\n\t\t\tdir: false,\n\t\t},\n\t\t{\n\t\t\tpath: \"dir1\/bar\",\n\t\t\tdir: false,\n\t\t},\n\t}\n\n\tcreateTree(t, src, tr)\n\n\t\/\/ absolute paths\n\tif err := CopyTree(src, dst, uid.NewBlankUidRange()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcheckTree(t, dst, tr)\n\n\t\/\/ relative paths\n\tif err := os.Chdir(td); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdst = \"dst-rel1\"\n\tif err := CopyTree(\".\/.\/src\/\", dst, uid.NewBlankUidRange()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcheckTree(t, dst, tr)\n\n\tdst = \".\/dst-rel2\"\n\tif err := CopyTree(\".\/src\", dst, uid.NewBlankUidRange()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcheckTree(t, dst, tr)\n}\n<commit_msg>pkg\/fileutil: test IsExecutable utility function<commit_after>\/\/ Copyright 2015 The rkt Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage fileutil\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/coreos\/rkt\/pkg\/uid\"\n)\n\nconst tstprefix = \"fileutil-test\"\n\nfunc touch(t *testing.T, name string) {\n\tf, err := os.Create(name)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := f.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\ntype tree struct {\n\tpath string\n\tdir bool\n}\n\nfunc createTree(t *testing.T, dir string, tr []tree) {\n\tfor _, f := range tr {\n\t\tif f.dir {\n\t\t\tif err := os.MkdirAll(filepath.Join(dir, f.path), 0755); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t} else {\n\t\t\ttouch(t, filepath.Join(dir, f.path))\n\t\t}\n\t}\n}\n\nfunc checkTree(t *testing.T, dir string, tr []tree) {\n\tfor _, f := range tr {\n\t\tif _, err := os.Stat(filepath.Join(dir, f.path)); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestCopyTree(t *testing.T) {\n\ttd, err := ioutil.TempDir(\"\", tstprefix)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(td)\n\n\tsrc := filepath.Join(td, \"src\")\n\tdst := filepath.Join(td, \"dst\")\n\tif err := os.MkdirAll(filepath.Join(td, \"src\"), 0755); err != nil {\n\t\tpanic(err)\n\t}\n\n\ttr := []tree{\n\t\t{\n\t\t\tpath: \"dir1\",\n\t\t\tdir: true,\n\t\t},\n\t\t{\n\t\t\tpath: \"dir2\",\n\t\t\tdir: true,\n\t\t},\n\t\t{\n\t\t\tpath: \"dir1\/foo\",\n\t\t\tdir: false,\n\t\t},\n\t\t{\n\t\t\tpath: \"dir1\/bar\",\n\t\t\tdir: false,\n\t\t},\n\t}\n\n\tcreateTree(t, src, tr)\n\n\t\/\/ absolute paths\n\tif err := CopyTree(src, dst, uid.NewBlankUidRange()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcheckTree(t, dst, tr)\n\n\t\/\/ relative paths\n\tif err := os.Chdir(td); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdst = \"dst-rel1\"\n\tif err := CopyTree(\".\/.\/src\/\", dst, uid.NewBlankUidRange()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcheckTree(t, dst, tr)\n\n\tdst = \".\/dst-rel2\"\n\tif err := CopyTree(\".\/src\", dst, uid.NewBlankUidRange()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcheckTree(t, dst, tr)\n}\n\nfunc TestFileIsExecutable(t *testing.T) {\n\ttempDir, err := ioutil.TempDir(\"\", tstprefix)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(tempDir)\n\n\ttestCases := []struct {\n\t\tPermission os.FileMode\n\t\tIsExecutable bool\n\t}{\n\t\t{0200, false},\n\t\t{0400, false},\n\t\t{0600, false},\n\t\t{0100, true},\n\t\t{0300, true},\n\t\t{0500, true},\n\t\t{0700, true},\n\n\t\t{0002, false},\n\t\t{0004, false},\n\t\t{0006, false},\n\t\t{0001, true},\n\t\t{0003, true},\n\t\t{0005, true},\n\t\t{0007, true},\n\n\t\t{0020, false},\n\t\t{0040, false},\n\t\t{0060, false},\n\t\t{0010, true},\n\t\t{0030, true},\n\t\t{0050, true},\n\t\t{0070, true},\n\n\t\t{0000, false},\n\t\t{0222, false},\n\t\t{0444, false},\n\t\t{0666, false},\n\n\t\t{0146, true},\n\t\t{0661, true},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tf, err := ioutil.TempFile(tempDir, \"\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tif err := f.Chmod(tc.Permission); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tif err := f.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tpath := f.Name()\n\n\t\tif tc.IsExecutable != IsExecutable(path) {\n\t\t\tt.Errorf(\"fileutil.IsExecutable(%q) with permissions %q, expected %v\", path, tc.Permission, tc.IsExecutable)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gcc\n\nimport (\n\t\"container\/list\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/pion\/interceptor\"\n\t\"github.com\/pion\/logging\"\n\t\"github.com\/pion\/rtp\"\n)\n\ntype item struct {\n\theader *rtp.Header\n\tpayload *[]byte\n\tsize int\n\tattributes interceptor.Attributes\n}\n\n\/\/ LeakyBucketPacer implements a leaky bucket pacing algorithm\ntype LeakyBucketPacer struct {\n\tlog logging.LeveledLogger\n\n\tf float64\n\ttargetBitrate int\n\ttargetBitrateLock sync.Mutex\n\n\tpacingInterval time.Duration\n\n\tqLock sync.RWMutex\n\tqueue *list.List\n\tdone chan struct{}\n\n\tssrcToWriter map[uint32]interceptor.RTPWriter\n\twriterLock sync.Mutex\n\n\tpool *sync.Pool\n}\n\n\/\/ NewLeakyBucketPacer initializes a new LeakyBucketPacer\nfunc NewLeakyBucketPacer(initialBitrate int) *LeakyBucketPacer {\n\tp := &LeakyBucketPacer{\n\t\tlog: logging.NewDefaultLoggerFactory().NewLogger(\"pacer\"),\n\t\tf: 1.5,\n\t\ttargetBitrate: initialBitrate,\n\t\tpacingInterval: 5 * time.Millisecond,\n\t\tqLock: sync.RWMutex{},\n\t\tqueue: list.New(),\n\t\tdone: make(chan struct{}),\n\t\tssrcToWriter: map[uint32]interceptor.RTPWriter{},\n\t\tpool: &sync.Pool{},\n\t}\n\tp.pool = &sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\tb := make([]byte, 1460)\n\t\t\treturn &b\n\t\t},\n\t}\n\n\tgo p.Run()\n\treturn p\n}\n\n\/\/ AddStream adds a new stream and its corresponding writer to the pacer\nfunc (p *LeakyBucketPacer) AddStream(ssrc uint32, writer interceptor.RTPWriter) {\n\tp.writerLock.Lock()\n\tdefer p.writerLock.Unlock()\n\tp.ssrcToWriter[ssrc] = writer\n}\n\n\/\/ SetTargetBitrate updates the target bitrate at which the pacer is allowed to\n\/\/ send packets. The pacer may exceed this limit by p.f\nfunc (p *LeakyBucketPacer) SetTargetBitrate(rate int) {\n\tp.targetBitrateLock.Lock()\n\tdefer p.targetBitrateLock.Unlock()\n\tp.targetBitrate = int(p.f * float64(rate))\n}\n\nfunc (p *LeakyBucketPacer) getTargetBitrate() int {\n\tp.targetBitrateLock.Lock()\n\tdefer p.targetBitrateLock.Unlock()\n\n\treturn p.targetBitrate\n}\n\n\/\/ Write sends a packet with header and payload the a previously registered\n\/\/ stream.\nfunc (p *LeakyBucketPacer) Write(header *rtp.Header, payload []byte, attributes interceptor.Attributes) (int, error) {\n\tbuf := p.pool.Get().(*[]byte)\n\tcopy(*buf, payload)\n\thdr := header.Clone()\n\n\tp.qLock.Lock()\n\tp.queue.PushBack(&item{\n\t\theader: &hdr,\n\t\tpayload: buf,\n\t\tsize: len(payload),\n\t\tattributes: attributes,\n\t})\n\tp.qLock.Unlock()\n\n\treturn header.MarshalSize() + len(payload), nil\n}\n\n\/\/ Run starts the LeakyBucketPacer\nfunc (p *LeakyBucketPacer) Run() {\n\tticker := time.NewTicker(p.pacingInterval)\n\n\tlastSent := time.Now()\n\tfor {\n\t\tselect {\n\t\tcase <-p.done:\n\t\t\treturn\n\t\tcase now := <-ticker.C:\n\t\t\tbudget := int(float64(now.Sub(lastSent).Milliseconds()) * float64(p.getTargetBitrate()) \/ 8000.0)\n\t\t\tp.qLock.Lock()\n\t\t\tfor p.queue.Len() != 0 && budget > 0 {\n\t\t\t\tp.log.Infof(\"budget=%v, len(queue)=%v, targetBitrate=%v\", budget, p.queue.Len(), p.getTargetBitrate())\n\t\t\t\tnext := p.queue.Remove(p.queue.Front()).(*item)\n\t\t\t\tp.qLock.Unlock()\n\n\t\t\t\twriter, ok := p.ssrcToWriter[next.header.SSRC]\n\t\t\t\tif !ok {\n\t\t\t\t\tp.log.Warnf(\"no writer found for ssrc: %v\", next.header.SSRC)\n\t\t\t\t\tp.pool.Put(next.payload)\n\t\t\t\t\tp.qLock.Lock()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tn, err := writer.Write(next.header, (*next.payload)[:next.size], next.attributes)\n\t\t\t\tif err != nil {\n\t\t\t\t\tp.log.Errorf(\"failed to write packet: %v\", err)\n\t\t\t\t}\n\t\t\t\tlastSent = now\n\t\t\t\tbudget -= n\n\n\t\t\t\tp.pool.Put(next.payload)\n\t\t\t\tp.qLock.Lock()\n\t\t\t}\n\t\t\tp.qLock.Unlock()\n\t\t}\n\t}\n}\n\n\/\/ Close closes the LeakyBucketPacer\nfunc (p *LeakyBucketPacer) Close() error {\n\tclose(p.done)\n\treturn nil\n}\n<commit_msg>Fix race in GCC LeakyBucketPacer<commit_after>package gcc\n\nimport (\n\t\"container\/list\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/pion\/interceptor\"\n\t\"github.com\/pion\/logging\"\n\t\"github.com\/pion\/rtp\"\n)\n\ntype item struct {\n\theader *rtp.Header\n\tpayload *[]byte\n\tsize int\n\tattributes interceptor.Attributes\n}\n\n\/\/ LeakyBucketPacer implements a leaky bucket pacing algorithm\ntype LeakyBucketPacer struct {\n\tlog logging.LeveledLogger\n\n\tf float64\n\ttargetBitrate int\n\ttargetBitrateLock sync.Mutex\n\n\tpacingInterval time.Duration\n\n\tqLock sync.RWMutex\n\tqueue *list.List\n\tdone chan struct{}\n\n\tssrcToWriter map[uint32]interceptor.RTPWriter\n\twriterLock sync.RWMutex\n\n\tpool *sync.Pool\n}\n\n\/\/ NewLeakyBucketPacer initializes a new LeakyBucketPacer\nfunc NewLeakyBucketPacer(initialBitrate int) *LeakyBucketPacer {\n\tp := &LeakyBucketPacer{\n\t\tlog: logging.NewDefaultLoggerFactory().NewLogger(\"pacer\"),\n\t\tf: 1.5,\n\t\ttargetBitrate: initialBitrate,\n\t\tpacingInterval: 5 * time.Millisecond,\n\t\tqLock: sync.RWMutex{},\n\t\tqueue: list.New(),\n\t\tdone: make(chan struct{}),\n\t\tssrcToWriter: map[uint32]interceptor.RTPWriter{},\n\t\tpool: &sync.Pool{},\n\t}\n\tp.pool = &sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\tb := make([]byte, 1460)\n\t\t\treturn &b\n\t\t},\n\t}\n\n\tgo p.Run()\n\treturn p\n}\n\n\/\/ AddStream adds a new stream and its corresponding writer to the pacer\nfunc (p *LeakyBucketPacer) AddStream(ssrc uint32, writer interceptor.RTPWriter) {\n\tp.writerLock.Lock()\n\tdefer p.writerLock.Unlock()\n\tp.ssrcToWriter[ssrc] = writer\n}\n\n\/\/ SetTargetBitrate updates the target bitrate at which the pacer is allowed to\n\/\/ send packets. The pacer may exceed this limit by p.f\nfunc (p *LeakyBucketPacer) SetTargetBitrate(rate int) {\n\tp.targetBitrateLock.Lock()\n\tdefer p.targetBitrateLock.Unlock()\n\tp.targetBitrate = int(p.f * float64(rate))\n}\n\nfunc (p *LeakyBucketPacer) getTargetBitrate() int {\n\tp.targetBitrateLock.Lock()\n\tdefer p.targetBitrateLock.Unlock()\n\n\treturn p.targetBitrate\n}\n\n\/\/ Write sends a packet with header and payload the a previously registered\n\/\/ stream.\nfunc (p *LeakyBucketPacer) Write(header *rtp.Header, payload []byte, attributes interceptor.Attributes) (int, error) {\n\tbuf := p.pool.Get().(*[]byte)\n\tcopy(*buf, payload)\n\thdr := header.Clone()\n\n\tp.qLock.Lock()\n\tp.queue.PushBack(&item{\n\t\theader: &hdr,\n\t\tpayload: buf,\n\t\tsize: len(payload),\n\t\tattributes: attributes,\n\t})\n\tp.qLock.Unlock()\n\n\treturn header.MarshalSize() + len(payload), nil\n}\n\n\/\/ Run starts the LeakyBucketPacer\nfunc (p *LeakyBucketPacer) Run() {\n\tticker := time.NewTicker(p.pacingInterval)\n\n\tlastSent := time.Now()\n\tfor {\n\t\tselect {\n\t\tcase <-p.done:\n\t\t\treturn\n\t\tcase now := <-ticker.C:\n\t\t\tbudget := int(float64(now.Sub(lastSent).Milliseconds()) * float64(p.getTargetBitrate()) \/ 8000.0)\n\t\t\tp.qLock.Lock()\n\t\t\tfor p.queue.Len() != 0 && budget > 0 {\n\t\t\t\tp.log.Infof(\"budget=%v, len(queue)=%v, targetBitrate=%v\", budget, p.queue.Len(), p.getTargetBitrate())\n\t\t\t\tnext := p.queue.Remove(p.queue.Front()).(*item)\n\t\t\t\tp.qLock.Unlock()\n\n\t\t\t\tp.writerLock.RLock()\n\t\t\t\twriter, ok := p.ssrcToWriter[next.header.SSRC]\n\t\t\t\tp.writerLock.RUnlock()\n\t\t\t\tif !ok {\n\t\t\t\t\tp.log.Warnf(\"no writer found for ssrc: %v\", next.header.SSRC)\n\t\t\t\t\tp.pool.Put(next.payload)\n\t\t\t\t\tp.qLock.Lock()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tn, err := writer.Write(next.header, (*next.payload)[:next.size], next.attributes)\n\t\t\t\tif err != nil {\n\t\t\t\t\tp.log.Errorf(\"failed to write packet: %v\", err)\n\t\t\t\t}\n\t\t\t\tlastSent = now\n\t\t\t\tbudget -= n\n\n\t\t\t\tp.pool.Put(next.payload)\n\t\t\t\tp.qLock.Lock()\n\t\t\t}\n\t\t\tp.qLock.Unlock()\n\t\t}\n\t}\n}\n\n\/\/ Close closes the LeakyBucketPacer\nfunc (p *LeakyBucketPacer) Close() error {\n\tclose(p.done)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package resolver\n\nimport (\n\t\"crypto\/rand\"\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"path\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\n\t\"nimona.io\/pkg\/context\"\n\t\"nimona.io\/pkg\/crypto\"\n\t\"nimona.io\/pkg\/eventbus\"\n\t\"nimona.io\/pkg\/exchange\"\n\t\"nimona.io\/pkg\/keychain\"\n\t\"nimona.io\/pkg\/net\"\n\t\"nimona.io\/pkg\/object\"\n\t\"nimona.io\/pkg\/peer\"\n)\n\nfunc TestResolver_TwoPeersCanFindEachOther(t *testing.T) {\n\t_, k0, kc0, eb0, n0, x0, ctx0 := newPeer(t, \"peer0\")\n\n\td0 := New(\n\t\tctx0,\n\t\tWithKeychain(kc0),\n\t\tWithEventbus(eb0),\n\t\tWithExchange(x0),\n\t)\n\n\tba := []*peer.Peer{\n\t\t{\n\t\t\tAddresses: n0.Addresses(),\n\t\t\tOwners: kc0.ListPublicKeys(keychain.PeerKey),\n\t\t},\n\t}\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\t_, k1, kc1, eb1, n1, x1, ctx1 := newPeer(t, \"peer1\")\n\n\td1 := New(\n\t\tctx1,\n\t\tWithKeychain(kc1),\n\t\tWithEventbus(eb1),\n\t\tWithExchange(x1),\n\t\tWithBoostrapPeers(ba),\n\t)\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\tctx := context.New(\n\t\tcontext.WithCorrelationID(\"req1\"),\n\t\tcontext.WithTimeout(time.Second),\n\t)\n\n\tpeersChan, err := d1.Lookup(ctx, LookupByOwner(k0.PublicKey()))\n\n\tpeers := gatherPeers(peersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, peers, 1)\n\trequire.Equal(t, n0.Addresses(), peers[0].Addresses)\n\n\tctxR2 := context.New(\n\t\tcontext.WithCorrelationID(\"req2\"),\n\t\tcontext.WithTimeout(time.Second),\n\t)\n\tpeersChan, err = d0.Lookup(ctxR2, LookupByOwner(k1.PublicKey()))\n\tpeers = gatherPeers(peersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, peers, 1)\n\trequire.Equal(t, n1.Addresses(), peers[0].Addresses)\n}\n\nfunc TestResolver_TwoPeersAndOneBootstrapCanFindEachOther(t *testing.T) {\n\t_, k0, kc0, eb0, n0, x0, ctx0 := newPeer(t, \"peer0\")\n\n\t\/\/ bootstrap node\n\tNew(\n\t\tctx0,\n\t\tWithKeychain(kc0),\n\t\tWithEventbus(eb0),\n\t\tWithExchange(x0),\n\t)\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\t_, k1, kc1, eb1, n1, x1, ctx1 := newPeer(t, \"peer1\")\n\t_, k2, kc2, eb2, n2, x2, ctx2 := newPeer(t, \"peer2\")\n\n\t\/\/ bootstrap address\n\tba := []*peer.Peer{\n\t\t{\n\t\t\tAddresses: n0.Addresses(),\n\t\t\tOwners: kc0.ListPublicKeys(keychain.PeerKey),\n\t\t},\n\t}\n\n\t\/\/ node 1\n\td1 := New(\n\t\tctx1,\n\t\tWithKeychain(kc1),\n\t\tWithEventbus(eb1),\n\t\tWithExchange(x1),\n\t\tWithBoostrapPeers(ba),\n\t)\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\t\/\/ node 2\n\td2 := New(\n\t\tctx2,\n\t\tWithKeychain(kc2),\n\t\tWithEventbus(eb2),\n\t\tWithExchange(x2),\n\t\tWithBoostrapPeers(ba),\n\t)\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\t\/\/ find bootstrap from node1\n\tctx := context.New(\n\t\tcontext.WithCorrelationID(\"req1\"),\n\t\tcontext.WithTimeout(time.Second*2),\n\t)\n\tpeersChan, err := d1.Lookup(ctx, LookupByOwner(k0.PublicKey()))\n\tpeers := gatherPeers(peersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, peers, 1)\n\trequire.Equal(t, n0.Addresses(), peers[0].Addresses)\n\n\t\/\/ find node 1 from node 2\n\tctx = context.New(\n\t\tcontext.WithCorrelationID(\"req2\"),\n\t\tcontext.WithTimeout(time.Second*2),\n\t)\n\tpeersChan, err = d2.Lookup(ctx, LookupByOwner(k1.PublicKey()))\n\tpeers = gatherPeers(peersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, peers, 1)\n\trequire.Equal(t, n1.Addresses(), peers[0].Addresses)\n\n\t\/\/ find node 2 from node 1\n\tctx = context.New(\n\t\tcontext.WithCorrelationID(\"req3\"),\n\t\tcontext.WithTimeout(time.Second*2),\n\t)\n\n\tpeersChan, err = d1.Lookup(ctx, LookupByOwner(k2.PublicKey()))\n\tpeers = gatherPeers(peersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, peers, 1)\n\trequire.Equal(t, n2.Addresses(), peers[0].Addresses)\n\n\t\/\/ add extra peer\n\t_, k3, kc3, eb3, n3, x3, ctx3 := newPeer(t, \"peer3\")\n\n\t\/\/ setup node 3\n\tNew(\n\t\tctx3,\n\t\tWithKeychain(kc3),\n\t\tWithEventbus(eb3),\n\t\tWithExchange(x3),\n\t\tWithBoostrapPeers(ba),\n\t)\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\tfmt.Println(\"peer0\", k0)\n\tfmt.Println(\"peer1\", k1)\n\tfmt.Println(\"peer2\", k2)\n\tfmt.Println(\"peer3\", k3)\n\n\tfmt.Println(\"-------------------\")\n\tfmt.Println(\"-------------------\")\n\tfmt.Println(\"-------------------\")\n\tfmt.Println(\"-------------------\")\n\n\t\/\/ allow bootstraping to settle\n\ttime.Sleep(time.Millisecond * 250)\n\n\t\/\/ find node 3 from node 1 from it's identity\n\tctx = context.New(\n\t\tcontext.WithCorrelationID(\"req4\"),\n\t\tcontext.WithTimeout(time.Second*2),\n\t)\n\tpeersChan, err = d1.Lookup(\n\t\tctx,\n\t\tLookupByCertificateSigner(\n\t\t\tkc3.ListPublicKeys(keychain.IdentityKey)[0],\n\t\t),\n\t)\n\tpeers = gatherPeers(peersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, peers, 1)\n\trequire.Equal(t, n3.Addresses(), peers[0].Addresses)\n\n\t\/\/ find node 3 from node 2 from it's identity\n\tctx = context.New(\n\t\tcontext.WithCorrelationID(\"req5\"),\n\t\tcontext.WithTimeout(time.Second*2),\n\t)\n\tpeersChan, err = d2.Lookup(\n\t\tctx,\n\t\tLookupByCertificateSigner(\n\t\t\tkc3.ListPublicKeys(keychain.IdentityKey)[0],\n\t\t),\n\t)\n\tpeers = gatherPeers(peersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, peers, 1)\n\trequire.Equal(t, n3.Addresses(), peers[0].Addresses)\n}\n\nfunc TestResolver_TwoPeersAndOneBootstrapCanProvide(t *testing.T) {\n\t_, k0, kc0, eb0, n0, x0, ctx0 := newPeer(t, \"peer0\")\n\t_, k1, kc1, eb1, _, x1, ctx1 := newPeer(t, \"peer1\")\n\t_, k2, kc2, eb2, _, x2, ctx2 := newPeer(t, \"peer2\")\n\n\t\/\/ make peer 1 a provider\n\ttoken := make([]byte, 32)\n\trand.Read(token) \/\/ nolint: errcheck\n\tch := object.HashFromBytes(token)\n\teb1.Publish(\n\t\teventbus.ObjectPinned{\n\t\t\tHash: ch,\n\t\t},\n\t)\n\n\t\/\/ print peer info\n\tfmt.Println(\"k0\", k0)\n\tfmt.Println(\"k1\", k1)\n\tfmt.Println(\"k2\", k2)\n\n\t\/\/ bootstrap peer\n\td0 := New(\n\t\tctx0,\n\t\tWithKeychain(kc0),\n\t\tWithEventbus(eb0),\n\t\tWithExchange(x0),\n\t)\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\t\/\/ bootstrap address\n\tba := []*peer.Peer{\n\t\t{\n\t\t\tAddresses: n0.Addresses(),\n\t\t\tOwners: kc0.ListPublicKeys(keychain.PeerKey),\n\t\t},\n\t}\n\n\t\/\/ peer 1\n\tNew(\n\t\tctx1,\n\t\tWithKeychain(kc1),\n\t\tWithEventbus(eb1),\n\t\tWithExchange(x1),\n\t\tWithBoostrapPeers(ba),\n\t)\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\t\/\/ peer 2\n\td2 := New(\n\t\tctx2,\n\t\tWithKeychain(kc2),\n\t\tWithEventbus(eb2),\n\t\tWithExchange(x2),\n\t\tWithBoostrapPeers(ba),\n\t)\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\t\/\/ find peer 1 from peer 2\n\tctx := context.New(\n\t\tcontext.WithCorrelationID(\"req1\"),\n\t\tcontext.WithTimeout(time.Second),\n\t)\n\tprovidersChan, err := d2.Lookup(ctx, LookupByContentHash(ch))\n\tproviders := gatherPeers(providersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, providers, 1)\n\trequire.Equal(t, k1.PublicKey(), providers[0].PublicKey())\n\n\t\/\/ find peer 1 from bootstrap\n\tctx = context.New(\n\t\tcontext.WithCorrelationID(\"req2\"),\n\t\tcontext.WithTimeout(time.Second*2),\n\t)\n\tprovidersChan, err = d0.Lookup(ctx, LookupByContentHash(ch))\n\tproviders = gatherPeers(providersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, providers, 1)\n\trequire.Equal(t, k1.PublicKey(), providers[0].PublicKey())\n}\n\n\/\/ nolint: gocritic\nfunc newPeer(\n\tt *testing.T,\n\tname string,\n) (\n\tcrypto.PrivateKey,\n\tcrypto.PrivateKey,\n\tkeychain.Keychain,\n\teventbus.Eventbus,\n\tnet.Network,\n\texchange.Exchange,\n\tcontext.Context,\n) {\n\tctx := context.New(context.WithCorrelationID(name))\n\n\teb := eventbus.New()\n\n\t\/\/ identity key\n\topk, err := crypto.GenerateEd25519PrivateKey()\n\tassert.NoError(t, err)\n\n\t\/\/ peer key\n\tpk, err := crypto.GenerateEd25519PrivateKey()\n\tassert.NoError(t, err)\n\n\t\/\/ peer certificate\n\tc := peer.NewCertificate(\n\t\tpk.PublicKey(),\n\t\topk,\n\t)\n\n\tkc := keychain.New()\n\tkc.Put(keychain.PrimaryPeerKey, pk)\n\tkc.Put(keychain.IdentityKey, opk)\n\tkc.PutCertificate(&c)\n\n\tn := net.New(\n\t\tnet.WithEventBus(eb),\n\t\tnet.WithKeychain(kc),\n\t)\n\n\t_, err = n.Listen(context.Background(), \"127.0.0.1:0\")\n\trequire.NoError(t, err)\n\n\tx := exchange.New(\n\t\tctx,\n\t\texchange.WithEventbus(eb),\n\t\texchange.WithKeychain(kc),\n\t\texchange.WithNet(n),\n\t)\n\n\treturn opk, pk, kc, eb, n, x, ctx\n}\n\nfunc tempSqlite3(t *testing.T) *sql.DB {\n\tdirPath, err := ioutil.TempDir(\"\", \"nimona-store-sql\")\n\trequire.NoError(t, err)\n\tdb, err := sql.Open(\"sqlite3\", path.Join(dirPath, \"sqlite3.db\"))\n\trequire.NoError(t, err)\n\treturn db\n}\n\nfunc gatherPeers(p <-chan *peer.Peer) []*peer.Peer {\n\tps := []*peer.Peer{}\n\tfor p := range p {\n\t\tp := p\n\t\tps = append(ps, p)\n\t}\n\treturn peer.Unique(ps)\n}\n<commit_msg>chore(resolver): remove unsused tempSqlite3 func<commit_after>package resolver\n\nimport (\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\n\t\"nimona.io\/pkg\/context\"\n\t\"nimona.io\/pkg\/crypto\"\n\t\"nimona.io\/pkg\/eventbus\"\n\t\"nimona.io\/pkg\/exchange\"\n\t\"nimona.io\/pkg\/keychain\"\n\t\"nimona.io\/pkg\/net\"\n\t\"nimona.io\/pkg\/object\"\n\t\"nimona.io\/pkg\/peer\"\n)\n\nfunc TestResolver_TwoPeersCanFindEachOther(t *testing.T) {\n\t_, k0, kc0, eb0, n0, x0, ctx0 := newPeer(t, \"peer0\")\n\n\td0 := New(\n\t\tctx0,\n\t\tWithKeychain(kc0),\n\t\tWithEventbus(eb0),\n\t\tWithExchange(x0),\n\t)\n\n\tba := []*peer.Peer{\n\t\t{\n\t\t\tAddresses: n0.Addresses(),\n\t\t\tOwners: kc0.ListPublicKeys(keychain.PeerKey),\n\t\t},\n\t}\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\t_, k1, kc1, eb1, n1, x1, ctx1 := newPeer(t, \"peer1\")\n\n\td1 := New(\n\t\tctx1,\n\t\tWithKeychain(kc1),\n\t\tWithEventbus(eb1),\n\t\tWithExchange(x1),\n\t\tWithBoostrapPeers(ba),\n\t)\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\tctx := context.New(\n\t\tcontext.WithCorrelationID(\"req1\"),\n\t\tcontext.WithTimeout(time.Second),\n\t)\n\n\tpeersChan, err := d1.Lookup(ctx, LookupByOwner(k0.PublicKey()))\n\n\tpeers := gatherPeers(peersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, peers, 1)\n\trequire.Equal(t, n0.Addresses(), peers[0].Addresses)\n\n\tctxR2 := context.New(\n\t\tcontext.WithCorrelationID(\"req2\"),\n\t\tcontext.WithTimeout(time.Second),\n\t)\n\tpeersChan, err = d0.Lookup(ctxR2, LookupByOwner(k1.PublicKey()))\n\tpeers = gatherPeers(peersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, peers, 1)\n\trequire.Equal(t, n1.Addresses(), peers[0].Addresses)\n}\n\nfunc TestResolver_TwoPeersAndOneBootstrapCanFindEachOther(t *testing.T) {\n\t_, k0, kc0, eb0, n0, x0, ctx0 := newPeer(t, \"peer0\")\n\n\t\/\/ bootstrap node\n\tNew(\n\t\tctx0,\n\t\tWithKeychain(kc0),\n\t\tWithEventbus(eb0),\n\t\tWithExchange(x0),\n\t)\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\t_, k1, kc1, eb1, n1, x1, ctx1 := newPeer(t, \"peer1\")\n\t_, k2, kc2, eb2, n2, x2, ctx2 := newPeer(t, \"peer2\")\n\n\t\/\/ bootstrap address\n\tba := []*peer.Peer{\n\t\t{\n\t\t\tAddresses: n0.Addresses(),\n\t\t\tOwners: kc0.ListPublicKeys(keychain.PeerKey),\n\t\t},\n\t}\n\n\t\/\/ node 1\n\td1 := New(\n\t\tctx1,\n\t\tWithKeychain(kc1),\n\t\tWithEventbus(eb1),\n\t\tWithExchange(x1),\n\t\tWithBoostrapPeers(ba),\n\t)\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\t\/\/ node 2\n\td2 := New(\n\t\tctx2,\n\t\tWithKeychain(kc2),\n\t\tWithEventbus(eb2),\n\t\tWithExchange(x2),\n\t\tWithBoostrapPeers(ba),\n\t)\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\t\/\/ find bootstrap from node1\n\tctx := context.New(\n\t\tcontext.WithCorrelationID(\"req1\"),\n\t\tcontext.WithTimeout(time.Second*2),\n\t)\n\tpeersChan, err := d1.Lookup(ctx, LookupByOwner(k0.PublicKey()))\n\tpeers := gatherPeers(peersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, peers, 1)\n\trequire.Equal(t, n0.Addresses(), peers[0].Addresses)\n\n\t\/\/ find node 1 from node 2\n\tctx = context.New(\n\t\tcontext.WithCorrelationID(\"req2\"),\n\t\tcontext.WithTimeout(time.Second*2),\n\t)\n\tpeersChan, err = d2.Lookup(ctx, LookupByOwner(k1.PublicKey()))\n\tpeers = gatherPeers(peersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, peers, 1)\n\trequire.Equal(t, n1.Addresses(), peers[0].Addresses)\n\n\t\/\/ find node 2 from node 1\n\tctx = context.New(\n\t\tcontext.WithCorrelationID(\"req3\"),\n\t\tcontext.WithTimeout(time.Second*2),\n\t)\n\n\tpeersChan, err = d1.Lookup(ctx, LookupByOwner(k2.PublicKey()))\n\tpeers = gatherPeers(peersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, peers, 1)\n\trequire.Equal(t, n2.Addresses(), peers[0].Addresses)\n\n\t\/\/ add extra peer\n\t_, k3, kc3, eb3, n3, x3, ctx3 := newPeer(t, \"peer3\")\n\n\t\/\/ setup node 3\n\tNew(\n\t\tctx3,\n\t\tWithKeychain(kc3),\n\t\tWithEventbus(eb3),\n\t\tWithExchange(x3),\n\t\tWithBoostrapPeers(ba),\n\t)\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\tfmt.Println(\"peer0\", k0)\n\tfmt.Println(\"peer1\", k1)\n\tfmt.Println(\"peer2\", k2)\n\tfmt.Println(\"peer3\", k3)\n\n\tfmt.Println(\"-------------------\")\n\tfmt.Println(\"-------------------\")\n\tfmt.Println(\"-------------------\")\n\tfmt.Println(\"-------------------\")\n\n\t\/\/ allow bootstraping to settle\n\ttime.Sleep(time.Millisecond * 250)\n\n\t\/\/ find node 3 from node 1 from it's identity\n\tctx = context.New(\n\t\tcontext.WithCorrelationID(\"req4\"),\n\t\tcontext.WithTimeout(time.Second*2),\n\t)\n\tpeersChan, err = d1.Lookup(\n\t\tctx,\n\t\tLookupByCertificateSigner(\n\t\t\tkc3.ListPublicKeys(keychain.IdentityKey)[0],\n\t\t),\n\t)\n\tpeers = gatherPeers(peersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, peers, 1)\n\trequire.Equal(t, n3.Addresses(), peers[0].Addresses)\n\n\t\/\/ find node 3 from node 2 from it's identity\n\tctx = context.New(\n\t\tcontext.WithCorrelationID(\"req5\"),\n\t\tcontext.WithTimeout(time.Second*2),\n\t)\n\tpeersChan, err = d2.Lookup(\n\t\tctx,\n\t\tLookupByCertificateSigner(\n\t\t\tkc3.ListPublicKeys(keychain.IdentityKey)[0],\n\t\t),\n\t)\n\tpeers = gatherPeers(peersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, peers, 1)\n\trequire.Equal(t, n3.Addresses(), peers[0].Addresses)\n}\n\nfunc TestResolver_TwoPeersAndOneBootstrapCanProvide(t *testing.T) {\n\t_, k0, kc0, eb0, n0, x0, ctx0 := newPeer(t, \"peer0\")\n\t_, k1, kc1, eb1, _, x1, ctx1 := newPeer(t, \"peer1\")\n\t_, k2, kc2, eb2, _, x2, ctx2 := newPeer(t, \"peer2\")\n\n\t\/\/ make peer 1 a provider\n\ttoken := make([]byte, 32)\n\trand.Read(token) \/\/ nolint: errcheck\n\tch := object.HashFromBytes(token)\n\teb1.Publish(\n\t\teventbus.ObjectPinned{\n\t\t\tHash: ch,\n\t\t},\n\t)\n\n\t\/\/ print peer info\n\tfmt.Println(\"k0\", k0)\n\tfmt.Println(\"k1\", k1)\n\tfmt.Println(\"k2\", k2)\n\n\t\/\/ bootstrap peer\n\td0 := New(\n\t\tctx0,\n\t\tWithKeychain(kc0),\n\t\tWithEventbus(eb0),\n\t\tWithExchange(x0),\n\t)\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\t\/\/ bootstrap address\n\tba := []*peer.Peer{\n\t\t{\n\t\t\tAddresses: n0.Addresses(),\n\t\t\tOwners: kc0.ListPublicKeys(keychain.PeerKey),\n\t\t},\n\t}\n\n\t\/\/ peer 1\n\tNew(\n\t\tctx1,\n\t\tWithKeychain(kc1),\n\t\tWithEventbus(eb1),\n\t\tWithExchange(x1),\n\t\tWithBoostrapPeers(ba),\n\t)\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\t\/\/ peer 2\n\td2 := New(\n\t\tctx2,\n\t\tWithKeychain(kc2),\n\t\tWithEventbus(eb2),\n\t\tWithExchange(x2),\n\t\tWithBoostrapPeers(ba),\n\t)\n\n\ttime.Sleep(time.Millisecond * 250)\n\n\t\/\/ find peer 1 from peer 2\n\tctx := context.New(\n\t\tcontext.WithCorrelationID(\"req1\"),\n\t\tcontext.WithTimeout(time.Second),\n\t)\n\tprovidersChan, err := d2.Lookup(ctx, LookupByContentHash(ch))\n\tproviders := gatherPeers(providersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, providers, 1)\n\trequire.Equal(t, k1.PublicKey(), providers[0].PublicKey())\n\n\t\/\/ find peer 1 from bootstrap\n\tctx = context.New(\n\t\tcontext.WithCorrelationID(\"req2\"),\n\t\tcontext.WithTimeout(time.Second*2),\n\t)\n\tprovidersChan, err = d0.Lookup(ctx, LookupByContentHash(ch))\n\tproviders = gatherPeers(providersChan)\n\trequire.NoError(t, err)\n\trequire.Len(t, providers, 1)\n\trequire.Equal(t, k1.PublicKey(), providers[0].PublicKey())\n}\n\n\/\/ nolint: gocritic\nfunc newPeer(\n\tt *testing.T,\n\tname string,\n) (\n\tcrypto.PrivateKey,\n\tcrypto.PrivateKey,\n\tkeychain.Keychain,\n\teventbus.Eventbus,\n\tnet.Network,\n\texchange.Exchange,\n\tcontext.Context,\n) {\n\tctx := context.New(context.WithCorrelationID(name))\n\n\teb := eventbus.New()\n\n\t\/\/ identity key\n\topk, err := crypto.GenerateEd25519PrivateKey()\n\tassert.NoError(t, err)\n\n\t\/\/ peer key\n\tpk, err := crypto.GenerateEd25519PrivateKey()\n\tassert.NoError(t, err)\n\n\t\/\/ peer certificate\n\tc := peer.NewCertificate(\n\t\tpk.PublicKey(),\n\t\topk,\n\t)\n\n\tkc := keychain.New()\n\tkc.Put(keychain.PrimaryPeerKey, pk)\n\tkc.Put(keychain.IdentityKey, opk)\n\tkc.PutCertificate(&c)\n\n\tn := net.New(\n\t\tnet.WithEventBus(eb),\n\t\tnet.WithKeychain(kc),\n\t)\n\n\t_, err = n.Listen(context.Background(), \"127.0.0.1:0\")\n\trequire.NoError(t, err)\n\n\tx := exchange.New(\n\t\tctx,\n\t\texchange.WithEventbus(eb),\n\t\texchange.WithKeychain(kc),\n\t\texchange.WithNet(n),\n\t)\n\n\treturn opk, pk, kc, eb, n, x, ctx\n}\n\nfunc gatherPeers(p <-chan *peer.Peer) []*peer.Peer {\n\tps := []*peer.Peer{}\n\tfor p := range p {\n\t\tp := p\n\t\tps = append(ps, p)\n\t}\n\treturn peer.Unique(ps)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage util\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/testutil\"\n)\n\nfunc TestCreateTar(t *testing.T) {\n\ttestutil.Run(t, \"\", func(t *testutil.T) {\n\t\tfiles := map[string]string{\n\t\t\t\"foo\": \"baz1\",\n\t\t\t\"bar\/bat\": \"baz2\",\n\t\t\t\"bar\/baz\": \"baz3\",\n\t\t}\n\t\t_, paths := prepareFiles(t, files)\n\n\t\tvar b bytes.Buffer\n\t\terr := CreateTar(&b, \".\", paths)\n\t\tt.CheckNoError(err)\n\n\t\t\/\/ Make sure the contents match.\n\t\ttarFiles := make(map[string]string)\n\t\ttr := tar.NewReader(&b)\n\t\tfor {\n\t\t\thdr, err := tr.Next()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.CheckNoError(err)\n\n\t\t\tcontent, err := ioutil.ReadAll(tr)\n\t\t\tt.CheckNoError(err)\n\n\t\t\ttarFiles[hdr.Name] = string(content)\n\t\t}\n\n\t\tt.CheckDeepEqual(files, tarFiles)\n\t})\n}\n\nfunc TestCreateTarWithParents(t *testing.T) {\n\ttestutil.Run(t, \"\", func(t *testutil.T) {\n\t\tfiles := map[string]string{\n\t\t\t\"foo\": \"baz1\",\n\t\t\t\"bar\/bat\": \"baz2\",\n\t\t\t\"bar\/baz\": \"baz3\",\n\t\t}\n\t\t_, paths := prepareFiles(t, files)\n\n\t\tvar b bytes.Buffer\n\t\terr := CreateTarWithParents(&b, \".\", paths, 1000, 1000, time.Now())\n\t\tt.CheckNoError(err)\n\n\t\t\/\/ Make sure the contents match.\n\t\ttarFiles := make(map[string]string)\n\t\ttr := tar.NewReader(&b)\n\t\tfor {\n\t\t\thdr, err := tr.Next()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.CheckNoError(err)\n\n\t\t\tcontent, err := ioutil.ReadAll(tr)\n\t\t\tt.CheckNoError(err)\n\n\t\t\ttarFiles[hdr.Name] = string(content)\n\t\t}\n\n\t\tt.CheckDeepEqual(map[string]string{\n\t\t\t\"foo\": \"baz1\",\n\t\t\t\"bar\": \"\",\n\t\t\t\"bar\/bat\": \"baz2\",\n\t\t\t\"bar\/baz\": \"baz3\",\n\t\t}, tarFiles)\n\t})\n}\n\nfunc TestCreateTarGz(t *testing.T) {\n\ttestutil.Run(t, \"\", func(t *testutil.T) {\n\t\tfiles := map[string]string{\n\t\t\t\"foo\": \"baz1\",\n\t\t\t\"bar\/bat\": \"baz2\",\n\t\t\t\"bar\/baz\": \"baz3\",\n\t\t}\n\t\t_, paths := prepareFiles(t, files)\n\n\t\tvar b bytes.Buffer\n\t\terr := CreateTarGz(&b, \".\", paths)\n\t\tt.CheckNoError(err)\n\n\t\t\/\/ Make sure the contents match.\n\t\ttarFiles := make(map[string]string)\n\t\tgzr, err := gzip.NewReader(&b)\n\t\tt.CheckNoError(err)\n\t\ttr := tar.NewReader(gzr)\n\t\tfor {\n\t\t\thdr, err := tr.Next()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.CheckNoError(err)\n\n\t\t\tcontent, err := ioutil.ReadAll(tr)\n\t\t\tt.CheckNoError(err)\n\n\t\t\ttarFiles[hdr.Name] = string(content)\n\t\t}\n\n\t\tt.CheckDeepEqual(files, tarFiles)\n\t})\n}\n\nfunc TestCreateTarSubDirectory(t *testing.T) {\n\ttestutil.Run(t, \"\", func(t *testutil.T) {\n\t\tfiles := map[string]string{\n\t\t\t\"sub\/foo\": \"baz1\",\n\t\t\t\"sub\/bar\/bat\": \"baz2\",\n\t\t\t\"sub\/bar\/baz\": \"baz3\",\n\t\t}\n\t\t_, paths := prepareFiles(t, files)\n\n\t\tvar b bytes.Buffer\n\t\terr := CreateTar(&b, \"sub\", paths)\n\t\tt.CheckNoError(err)\n\n\t\t\/\/ Make sure the contents match.\n\t\ttarFiles := make(map[string]string)\n\t\ttr := tar.NewReader(&b)\n\t\tfor {\n\t\t\thdr, err := tr.Next()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.CheckNoError(err)\n\n\t\t\tcontent, err := ioutil.ReadAll(tr)\n\t\t\tt.CheckNoError(err)\n\n\t\t\ttarFiles[\"sub\/\"+hdr.Name] = string(content)\n\t\t}\n\n\t\tt.CheckDeepEqual(files, tarFiles)\n\t})\n}\n\nfunc TestCreateTarEmptyFolder(t *testing.T) {\n\ttestutil.Run(t, \"\", func(t *testutil.T) {\n\t\tt.NewTempDir().\n\t\t\tMkdir(\"empty\").\n\t\t\tChdir()\n\n\t\tvar b bytes.Buffer\n\t\terr := CreateTar(&b, \".\", []string{\"empty\"})\n\t\tt.CheckNoError(err)\n\n\t\t\/\/ Make sure the contents match.\n\t\tvar tarFolders []string\n\t\ttr := tar.NewReader(&b)\n\t\tfor {\n\t\t\thdr, err := tr.Next()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.CheckNoError(err)\n\n\t\t\tif hdr.FileInfo().IsDir() {\n\t\t\t\ttarFolders = append(tarFolders, hdr.Name)\n\t\t\t}\n\t\t}\n\n\t\tt.CheckNoError(err)\n\t\tt.CheckDeepEqual([]string{\"empty\"}, tarFolders)\n\t})\n}\n\nfunc TestCreateTarWithAbsolutePaths(t *testing.T) {\n\ttestutil.Run(t, \"\", func(t *testutil.T) {\n\t\tfiles := map[string]string{\n\t\t\t\"foo\": \"baz1\",\n\t\t\t\"bar\/bat\": \"baz2\",\n\t\t\t\"bar\/baz\": \"baz3\",\n\t\t}\n\t\ttmpDir, paths := prepareFiles(t, files)\n\n\t\tvar b bytes.Buffer\n\t\terr := CreateTar(&b, tmpDir.Root(), tmpDir.Paths(paths...))\n\t\tt.CheckNoError(err)\n\n\t\t\/\/ Make sure the contents match.\n\t\ttarFiles := make(map[string]string)\n\t\ttr := tar.NewReader(&b)\n\t\tfor {\n\t\t\thdr, err := tr.Next()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.CheckNoError(err)\n\n\t\t\tcontent, err := ioutil.ReadAll(tr)\n\t\t\tt.CheckNoError(err)\n\n\t\t\ttarFiles[hdr.Name] = string(content)\n\t\t}\n\n\t\tt.CheckDeepEqual(files, tarFiles)\n\t})\n}\n\nfunc TestAddFileToTarSymlinks(t *testing.T) {\n\tif runtime.GOOS == \"windows\" {\n\t\tt.Skip(\"creating symlinks requires extra privileges on Windows\")\n\t}\n\n\ttestutil.Run(t, \"\", func(t *testutil.T) {\n\t\tfiles := map[string]string{\n\t\t\t\"foo\": \"baz1\",\n\t\t\t\"bar\/bat\": \"baz2\",\n\t\t\t\"bar\/baz\": \"baz3\",\n\t\t}\n\t\ttmpDir, paths := prepareFiles(t, files)\n\n\t\tlinks := map[string]string{\n\t\t\t\"foo.link\": \"foo\",\n\t\t\t\"bat.link\": \"bar\/bat\",\n\t\t\t\"bat\/baz.link\": \"bar\/baz\",\n\t\t}\n\t\tfor link, file := range links {\n\t\t\ttmpDir.Symlink(file, link)\n\t\t\tpaths = append(paths, link)\n\t\t}\n\n\t\tvar b bytes.Buffer\n\t\terr := CreateTar(&b, tmpDir.Root(), tmpDir.Paths(paths...))\n\t\tt.CheckNoError(err)\n\n\t\t\/\/ Make sure the links match.\n\t\ttr := tar.NewReader(&b)\n\t\tfor {\n\t\t\thdr, err := tr.Next()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.CheckNoError(err)\n\n\t\t\tif _, isFile := files[hdr.Name]; isFile {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlink, isLink := links[hdr.Name]\n\t\t\tif !isLink {\n\t\t\t\tt.Errorf(\"Unexpected file\/link in tar: %s\", hdr.Name)\n\t\t\t}\n\t\t\tif hdr.Linkname != link {\n\t\t\t\tt.Errorf(\"Link destination doesn't match. %s != %s.\", link, hdr.Linkname)\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc prepareFiles(t *testutil.T, files map[string]string) (*testutil.TempDir, []string) {\n\ttmpDir := t.NewTempDir().Chdir()\n\n\tvar paths []string\n\tfor path, content := range files {\n\t\ttmpDir.Write(path, content)\n\t\tpaths = append(paths, path)\n\t}\n\n\treturn tmpDir, paths\n}\n<commit_msg>More assertions<commit_after>\/*\nCopyright 2019 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage util\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/testutil\"\n)\n\nfunc TestCreateTar(t *testing.T) {\n\ttestutil.Run(t, \"\", func(t *testutil.T) {\n\t\tfiles := map[string]string{\n\t\t\t\"foo\": \"baz1\",\n\t\t\t\"bar\/bat\": \"baz2\",\n\t\t\t\"bar\/baz\": \"baz3\",\n\t\t}\n\t\t_, paths := prepareFiles(t, files)\n\n\t\tvar b bytes.Buffer\n\t\terr := CreateTar(&b, \".\", paths)\n\t\tt.CheckNoError(err)\n\n\t\t\/\/ Make sure the contents match.\n\t\ttarFiles := make(map[string]string)\n\t\ttr := tar.NewReader(&b)\n\t\tfor {\n\t\t\thdr, err := tr.Next()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.CheckNoError(err)\n\n\t\t\tcontent, err := ioutil.ReadAll(tr)\n\t\t\tt.CheckNoError(err)\n\n\t\t\ttarFiles[hdr.Name] = string(content)\n\t\t}\n\n\t\tt.CheckDeepEqual(files, tarFiles)\n\t})\n}\n\nfunc TestCreateTarWithParents(t *testing.T) {\n\ttestutil.Run(t, \"\", func(t *testutil.T) {\n\t\tfiles := map[string]string{\n\t\t\t\"foo\": \"baz1\",\n\t\t\t\"bar\/bat\": \"baz2\",\n\t\t\t\"bar\/baz\": \"baz3\",\n\t\t}\n\t\t_, paths := prepareFiles(t, files)\n\n\t\tvar b bytes.Buffer\n\t\terr := CreateTarWithParents(&b, \".\", paths, 10, 100, time.Now())\n\t\tt.CheckNoError(err)\n\n\t\t\/\/ Make sure the contents match.\n\t\ttarFiles := make(map[string]string)\n\t\ttr := tar.NewReader(&b)\n\t\tfor {\n\t\t\thdr, err := tr.Next()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.CheckNoError(err)\n\t\t\tt.CheckDeepEqual(10, hdr.Uid)\n\t\t\tt.CheckDeepEqual(100, hdr.Gid)\n\n\t\t\tcontent, err := ioutil.ReadAll(tr)\n\t\t\tt.CheckNoError(err)\n\n\t\t\ttarFiles[hdr.Name] = string(content)\n\t\t}\n\n\t\tt.CheckDeepEqual(map[string]string{\n\t\t\t\"foo\": \"baz1\",\n\t\t\t\"bar\": \"\",\n\t\t\t\"bar\/bat\": \"baz2\",\n\t\t\t\"bar\/baz\": \"baz3\",\n\t\t}, tarFiles)\n\t})\n}\n\nfunc TestCreateTarGz(t *testing.T) {\n\ttestutil.Run(t, \"\", func(t *testutil.T) {\n\t\tfiles := map[string]string{\n\t\t\t\"foo\": \"baz1\",\n\t\t\t\"bar\/bat\": \"baz2\",\n\t\t\t\"bar\/baz\": \"baz3\",\n\t\t}\n\t\t_, paths := prepareFiles(t, files)\n\n\t\tvar b bytes.Buffer\n\t\terr := CreateTarGz(&b, \".\", paths)\n\t\tt.CheckNoError(err)\n\n\t\t\/\/ Make sure the contents match.\n\t\ttarFiles := make(map[string]string)\n\t\tgzr, err := gzip.NewReader(&b)\n\t\tt.CheckNoError(err)\n\t\ttr := tar.NewReader(gzr)\n\t\tfor {\n\t\t\thdr, err := tr.Next()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.CheckNoError(err)\n\n\t\t\tcontent, err := ioutil.ReadAll(tr)\n\t\t\tt.CheckNoError(err)\n\n\t\t\ttarFiles[hdr.Name] = string(content)\n\t\t}\n\n\t\tt.CheckDeepEqual(files, tarFiles)\n\t})\n}\n\nfunc TestCreateTarSubDirectory(t *testing.T) {\n\ttestutil.Run(t, \"\", func(t *testutil.T) {\n\t\tfiles := map[string]string{\n\t\t\t\"sub\/foo\": \"baz1\",\n\t\t\t\"sub\/bar\/bat\": \"baz2\",\n\t\t\t\"sub\/bar\/baz\": \"baz3\",\n\t\t}\n\t\t_, paths := prepareFiles(t, files)\n\n\t\tvar b bytes.Buffer\n\t\terr := CreateTar(&b, \"sub\", paths)\n\t\tt.CheckNoError(err)\n\n\t\t\/\/ Make sure the contents match.\n\t\ttarFiles := make(map[string]string)\n\t\ttr := tar.NewReader(&b)\n\t\tfor {\n\t\t\thdr, err := tr.Next()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.CheckNoError(err)\n\n\t\t\tcontent, err := ioutil.ReadAll(tr)\n\t\t\tt.CheckNoError(err)\n\n\t\t\ttarFiles[\"sub\/\"+hdr.Name] = string(content)\n\t\t}\n\n\t\tt.CheckDeepEqual(files, tarFiles)\n\t})\n}\n\nfunc TestCreateTarEmptyFolder(t *testing.T) {\n\ttestutil.Run(t, \"\", func(t *testutil.T) {\n\t\tt.NewTempDir().\n\t\t\tMkdir(\"empty\").\n\t\t\tChdir()\n\n\t\tvar b bytes.Buffer\n\t\terr := CreateTar(&b, \".\", []string{\"empty\"})\n\t\tt.CheckNoError(err)\n\n\t\t\/\/ Make sure the contents match.\n\t\tvar tarFolders []string\n\t\ttr := tar.NewReader(&b)\n\t\tfor {\n\t\t\thdr, err := tr.Next()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.CheckNoError(err)\n\n\t\t\tif hdr.FileInfo().IsDir() {\n\t\t\t\ttarFolders = append(tarFolders, hdr.Name)\n\t\t\t}\n\t\t}\n\n\t\tt.CheckNoError(err)\n\t\tt.CheckDeepEqual([]string{\"empty\"}, tarFolders)\n\t})\n}\n\nfunc TestCreateTarWithAbsolutePaths(t *testing.T) {\n\ttestutil.Run(t, \"\", func(t *testutil.T) {\n\t\tfiles := map[string]string{\n\t\t\t\"foo\": \"baz1\",\n\t\t\t\"bar\/bat\": \"baz2\",\n\t\t\t\"bar\/baz\": \"baz3\",\n\t\t}\n\t\ttmpDir, paths := prepareFiles(t, files)\n\n\t\tvar b bytes.Buffer\n\t\terr := CreateTar(&b, tmpDir.Root(), tmpDir.Paths(paths...))\n\t\tt.CheckNoError(err)\n\n\t\t\/\/ Make sure the contents match.\n\t\ttarFiles := make(map[string]string)\n\t\ttr := tar.NewReader(&b)\n\t\tfor {\n\t\t\thdr, err := tr.Next()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.CheckNoError(err)\n\n\t\t\tcontent, err := ioutil.ReadAll(tr)\n\t\t\tt.CheckNoError(err)\n\n\t\t\ttarFiles[hdr.Name] = string(content)\n\t\t}\n\n\t\tt.CheckDeepEqual(files, tarFiles)\n\t})\n}\n\nfunc TestAddFileToTarSymlinks(t *testing.T) {\n\tif runtime.GOOS == \"windows\" {\n\t\tt.Skip(\"creating symlinks requires extra privileges on Windows\")\n\t}\n\n\ttestutil.Run(t, \"\", func(t *testutil.T) {\n\t\tfiles := map[string]string{\n\t\t\t\"foo\": \"baz1\",\n\t\t\t\"bar\/bat\": \"baz2\",\n\t\t\t\"bar\/baz\": \"baz3\",\n\t\t}\n\t\ttmpDir, paths := prepareFiles(t, files)\n\n\t\tlinks := map[string]string{\n\t\t\t\"foo.link\": \"foo\",\n\t\t\t\"bat.link\": \"bar\/bat\",\n\t\t\t\"bat\/baz.link\": \"bar\/baz\",\n\t\t}\n\t\tfor link, file := range links {\n\t\t\ttmpDir.Symlink(file, link)\n\t\t\tpaths = append(paths, link)\n\t\t}\n\n\t\tvar b bytes.Buffer\n\t\terr := CreateTar(&b, tmpDir.Root(), tmpDir.Paths(paths...))\n\t\tt.CheckNoError(err)\n\n\t\t\/\/ Make sure the links match.\n\t\ttr := tar.NewReader(&b)\n\t\tfor {\n\t\t\thdr, err := tr.Next()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.CheckNoError(err)\n\n\t\t\tif _, isFile := files[hdr.Name]; isFile {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlink, isLink := links[hdr.Name]\n\t\t\tif !isLink {\n\t\t\t\tt.Errorf(\"Unexpected file\/link in tar: %s\", hdr.Name)\n\t\t\t}\n\t\t\tif hdr.Linkname != link {\n\t\t\t\tt.Errorf(\"Link destination doesn't match. %s != %s.\", link, hdr.Linkname)\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc prepareFiles(t *testutil.T, files map[string]string) (*testutil.TempDir, []string) {\n\ttmpDir := t.NewTempDir().Chdir()\n\n\tvar paths []string\n\tfor path, content := range files {\n\t\ttmpDir.Write(path, content)\n\t\tpaths = append(paths, path)\n\t}\n\n\treturn tmpDir, paths\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage kafka\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"golang.org\/x\/time\/rate\"\n\n\tctrl \"knative.dev\/control-protocol\/pkg\"\n\tctrlnetwork \"knative.dev\/control-protocol\/pkg\/network\"\n\n\t\"github.com\/Shopify\/sarama\"\n\t\"go.opencensus.io\/trace\"\n\t\"go.uber.org\/zap\"\n\n\t\"knative.dev\/pkg\/logging\"\n\tpkgsource \"knative.dev\/pkg\/source\"\n\n\t\"knative.dev\/eventing\/pkg\/adapter\/v2\"\n\t\"knative.dev\/eventing\/pkg\/kncloudevents\"\n\n\t\"knative.dev\/eventing-kafka\/pkg\/common\/consumer\"\n\t\"knative.dev\/eventing-kafka\/pkg\/source\/client\"\n\tkafkasourcecontrol \"knative.dev\/eventing-kafka\/pkg\/source\/control\"\n)\n\nconst (\n\tresourceGroup = \"kafkasources.sources.knative.dev\"\n)\n\ntype AdapterConfig struct {\n\tadapter.EnvConfig\n\tclient.KafkaEnvConfig\n\n\tTopics []string `envconfig:\"KAFKA_TOPICS\" required:\"true\"`\n\tConsumerGroup string `envconfig:\"KAFKA_CONSUMER_GROUP\" required:\"true\"`\n\tName string `envconfig:\"NAME\" required:\"true\"`\n\tKeyType string `envconfig:\"KEY_TYPE\" required:\"false\"`\n\n\t\/\/ Turn off the control server.\n\tDisableControlServer bool\n}\n\nfunc NewEnvConfig() adapter.EnvConfigAccessor {\n\treturn &AdapterConfig{}\n}\n\ntype Adapter struct {\n\tconfig *AdapterConfig\n\tcontrolServer *ctrlnetwork.ControlServer\n\tsaramaConfig *sarama.Config\n\n\thttpMessageSender *kncloudevents.HTTPMessageSender\n\treporter pkgsource.StatsReporter\n\tlogger *zap.SugaredLogger\n\tkeyTypeMapper func([]byte) interface{}\n\trateLimiter *rate.Limiter\n}\n\nvar _ adapter.MessageAdapter = (*Adapter)(nil)\nvar _ consumer.KafkaConsumerHandler = (*Adapter)(nil)\nvar _ consumer.SaramaConsumerLifecycleListener = (*Adapter)(nil)\nvar _ adapter.MessageAdapterConstructor = NewAdapter\n\nfunc NewAdapter(ctx context.Context, processed adapter.EnvConfigAccessor, httpMessageSender *kncloudevents.HTTPMessageSender, reporter pkgsource.StatsReporter) adapter.MessageAdapter {\n\tlogger := logging.FromContext(ctx)\n\tconfig := processed.(*AdapterConfig)\n\n\treturn &Adapter{\n\t\tconfig: config,\n\t\thttpMessageSender: httpMessageSender,\n\t\treporter: reporter,\n\t\tlogger: logger,\n\t\tkeyTypeMapper: getKeyTypeMapper(config.KeyType),\n\t}\n}\nfunc (a *Adapter) GetConsumerGroup() string {\n\treturn a.config.ConsumerGroup\n}\n\nfunc (a *Adapter) Start(ctx context.Context) (err error) {\n\ta.logger.Infow(\"Starting with config: \",\n\t\tzap.String(\"Topics\", strings.Join(a.config.Topics, \",\")),\n\t\tzap.String(\"ConsumerGroup\", a.config.ConsumerGroup),\n\t\tzap.String(\"SinkURI\", a.config.Sink),\n\t\tzap.String(\"Name\", a.config.Name),\n\t\tzap.String(\"Namespace\", a.config.Namespace),\n\t)\n\n\t\/\/ Init control service\n\tif !a.config.DisableControlServer {\n\t\ta.controlServer, err = ctrlnetwork.StartInsecureControlServer(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ta.controlServer.MessageHandler(a)\n\t}\n\n\t\/\/ init consumer group\n\taddrs, config, err := client.NewConfigWithEnv(context.Background(), &a.config.KafkaEnvConfig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create the config: %w\", err)\n\t}\n\ta.saramaConfig = config\n\n\toptions := []consumer.SaramaConsumerHandlerOption{consumer.WithSaramaConsumerLifecycleListener(a)}\n\tconsumerGroupFactory := consumer.NewConsumerGroupFactory(addrs, config)\n\tgroup, err := consumerGroupFactory.StartConsumerGroup(\n\t\ta.config.ConsumerGroup,\n\t\ta.config.Topics,\n\t\ta.logger,\n\t\ta,\n\t\toptions...,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to start consumer group: %w\", err)\n\t}\n\tdefer func() {\n\t\terr := group.Close()\n\t\tif err != nil {\n\t\t\ta.logger.Errorw(\"Failed to close consumer group\", zap.Error(err))\n\t\t}\n\t}()\n\n\t\/\/ Track errors\n\tgo func() {\n\t\tfor err := range group.Errors() {\n\t\t\ta.logger.Errorw(\"Error while consuming messages\", zap.Error(err))\n\t\t}\n\t}()\n\n\t<-ctx.Done()\n\ta.logger.Info(\"Shutting down...\")\n\treturn nil\n}\n\nfunc (a *Adapter) SetReady(int32, bool) {}\n\nfunc (a *Adapter) Handle(ctx context.Context, msg *sarama.ConsumerMessage) (bool, error) {\n\tif a.rateLimiter != nil {\n\t\ta.rateLimiter.Wait(ctx)\n\t}\n\n\tctx, span := trace.StartSpan(ctx, \"kafka-source\")\n\tdefer span.End()\n\n\treq, err := a.httpMessageSender.NewCloudEventRequest(ctx)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\terr = a.ConsumerMessageToHttpRequest(ctx, msg, req)\n\tif err != nil {\n\t\ta.logger.Debug(\"failed to create request\", zap.Error(err))\n\t\treturn true, err\n\t}\n\n\tres, err := a.httpMessageSender.Send(req)\n\n\tif err != nil {\n\t\ta.logger.Debug(\"Error while sending the message\", zap.Error(err))\n\t\treturn false, err \/\/ Error while sending, don't commit offset\n\t}\n\t\/\/ Always try to read and close body so the connection can be reused afterwards\n\tif res.Body != nil {\n\t\tio.Copy(ioutil.Discard, res.Body)\n\t\tres.Body.Close()\n\t}\n\n\tif res.StatusCode\/100 != 2 {\n\t\ta.logger.Debug(\"Unexpected status code\", zap.Int(\"status code\", res.StatusCode))\n\t\treturn false, fmt.Errorf(\"%d %s\", res.StatusCode, http.StatusText(res.StatusCode))\n\t}\n\n\treportArgs := &pkgsource.ReportArgs{\n\t\tNamespace: a.config.Namespace,\n\t\tName: a.config.Name,\n\t\tResourceGroup: resourceGroup,\n\t}\n\n\t_ = a.reporter.ReportEventCount(reportArgs, res.StatusCode)\n\treturn true, nil\n}\n\n\/\/ SetRateLimiter sets the global consumer rate limiter\nfunc (a *Adapter) SetRateLimits(r rate.Limit, b int) {\n\ta.rateLimiter = rate.NewLimiter(r, b)\n}\n\nfunc (a *Adapter) HandleServiceMessage(ctx context.Context, message ctrl.ServiceMessage) {\n\t\/\/ In this first PR, there is only the RA sending messages to control plane,\n\t\/\/ there is no message the control plane should send to the RA\n\ta.logger.Info(\"Received unexpected control message\")\n\tmessage.Ack()\n}\n\nfunc (a *Adapter) Setup(sess sarama.ConsumerGroupSession) {\n\tif a.controlServer != nil {\n\t\tif err := a.controlServer.SendAndWaitForAck(kafkasourcecontrol.NotifySetupClaimsOpCode, kafkasourcecontrol.Claims(sess.Claims())); err != nil {\n\t\t\ta.logger.Warnf(\"Cannot send the claims update: %v\", err)\n\t\t}\n\t}\n\n\t\/\/ Preemptively initialize consumer group offsets to be able to mark the source as ready\n\t\/\/ as soon as possible.\n\tif err := a.InitOffsets(sess); err != nil {\n\t\ta.logger.Warnf(\"Cannot initialized consumer group offsets: %v\", err)\n\t}\n}\n\nfunc (a *Adapter) Cleanup(sess sarama.ConsumerGroupSession) {\n\tif a.controlServer != nil {\n\t\tif err := a.controlServer.SendAndWaitForAck(kafkasourcecontrol.NotifyCleanupClaimsOpCode, kafkasourcecontrol.Claims(sess.Claims())); err != nil {\n\t\t\ta.logger.Warnf(\"Cannot send the claims update: %v\", err)\n\t\t}\n\t}\n}\n\n\/\/ InitOffsets makes sure all consumer group offsets are set.\nfunc (a *Adapter) InitOffsets(session sarama.ConsumerGroupSession) error {\n\tif a.saramaConfig.Consumer.Offsets.Initial == sarama.OffsetNewest {\n\t\t\/\/ We want to make sure that ALL consumer group offsets are set to avoid\n\t\t\/\/ losing events in case the consumer group session is closed before at least one message is\n\t\t\/\/ consumed from ALL partitions.\n\t\t\/\/ If not, an event sent to a partition with an uninitialized offset\n\t\t\/\/ will not be forwarded when the session is closed (or a rebalancing is in progress).\n\t\tkafkaClient, err := sarama.NewClient(a.config.BootstrapServers, a.saramaConfig)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to create a Kafka client: %w\", err)\n\t\t}\n\t\tdefer kafkaClient.Close()\n\n\t\tkafkaAdminClient, err := sarama.NewClusterAdminFromClient(kafkaClient)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to create a Kafka admin client: %w\", err)\n\t\t}\n\t\tdefer kafkaAdminClient.Close()\n\n\t\t\/\/ Retrieve all partitions\n\t\ttopicPartitions := make(map[string][]int32)\n\t\tfor _, topic := range a.config.Topics {\n\t\t\tpartitions, err := kafkaClient.Partitions(topic)\n\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to get partitions for topic %s: %w\", topic, err)\n\t\t\t}\n\n\t\t\ttopicPartitions[topic] = partitions\n\t\t}\n\n\t\t\/\/ Look for uninitialized offset (-1)\n\t\toffsets, err := kafkaAdminClient.ListConsumerGroupOffsets(a.config.ConsumerGroup, topicPartitions)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdirty := false\n\t\tfor topic, partitions := range offsets.Blocks {\n\t\t\tfor partition, block := range partitions {\n\t\t\t\tif block.Offset == -1 { \/\/ not initialized?\n\n\t\t\t\t\t\/\/ Fetch the newest offset in the topic\/partition and set it in the consumer group\n\t\t\t\t\toffset, err := kafkaClient.GetOffset(topic, partition, sarama.OffsetNewest)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"failed to get the offset for topic %s and partition %d: %w\", topic, partition, err)\n\t\t\t\t\t}\n\n\t\t\t\t\ta.logger.Infow(\"initializing offset\", zap.String(\"topic\", topic), zap.Int32(\"partition\", partition), zap.Int64(\"offset\", offset))\n\n\t\t\t\t\tsession.MarkOffset(topic, partition, offset, \"\")\n\t\t\t\t\tdirty = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif dirty {\n\t\t\tsession.Commit()\n\n\t\t\ta.logger.Infow(\"consumer group offsets committed\", zap.String(\"consumergroup\", a.config.ConsumerGroup))\n\t\t}\n\t}\n\n\t\/\/ At this stage the KafkaSource instance is considered Ready (TODO: update KafkaSource status)\n\treturn nil\n}\n<commit_msg>KafkaSource retries on error (#666)<commit_after>\/*\nCopyright 2019 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage kafka\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/time\/rate\"\n\tctrl \"knative.dev\/control-protocol\/pkg\"\n\tctrlnetwork \"knative.dev\/control-protocol\/pkg\/network\"\n\n\t\"github.com\/Shopify\/sarama\"\n\t\"go.opencensus.io\/trace\"\n\t\"go.uber.org\/zap\"\n\n\t\"knative.dev\/pkg\/logging\"\n\tpkgsource \"knative.dev\/pkg\/source\"\n\n\t\"knative.dev\/eventing\/pkg\/adapter\/v2\"\n\t\"knative.dev\/eventing\/pkg\/kncloudevents\"\n\n\t\"knative.dev\/eventing-kafka\/pkg\/common\/consumer\"\n\t\"knative.dev\/eventing-kafka\/pkg\/source\/client\"\n\tkafkasourcecontrol \"knative.dev\/eventing-kafka\/pkg\/source\/control\"\n)\n\nconst (\n\tresourceGroup = \"kafkasources.sources.knative.dev\"\n)\n\ntype AdapterConfig struct {\n\tadapter.EnvConfig\n\tclient.KafkaEnvConfig\n\n\tTopics []string `envconfig:\"KAFKA_TOPICS\" required:\"true\"`\n\tConsumerGroup string `envconfig:\"KAFKA_CONSUMER_GROUP\" required:\"true\"`\n\tName string `envconfig:\"NAME\" required:\"true\"`\n\tKeyType string `envconfig:\"KEY_TYPE\" required:\"false\"`\n\n\t\/\/ Turn off the control server.\n\tDisableControlServer bool\n}\n\nfunc NewEnvConfig() adapter.EnvConfigAccessor {\n\treturn &AdapterConfig{}\n}\n\ntype Adapter struct {\n\tconfig *AdapterConfig\n\tcontrolServer *ctrlnetwork.ControlServer\n\tsaramaConfig *sarama.Config\n\n\thttpMessageSender *kncloudevents.HTTPMessageSender\n\treporter pkgsource.StatsReporter\n\tlogger *zap.SugaredLogger\n\tkeyTypeMapper func([]byte) interface{}\n\trateLimiter *rate.Limiter\n}\n\nvar (\n\t_ adapter.MessageAdapter = (*Adapter)(nil)\n\t_ consumer.KafkaConsumerHandler = (*Adapter)(nil)\n\t_ consumer.SaramaConsumerLifecycleListener = (*Adapter)(nil)\n\t_ adapter.MessageAdapterConstructor = NewAdapter\n\tretryConfig = defaultRetryConfig()\n)\n\nfunc NewAdapter(ctx context.Context, processed adapter.EnvConfigAccessor, httpMessageSender *kncloudevents.HTTPMessageSender, reporter pkgsource.StatsReporter) adapter.MessageAdapter {\n\tlogger := logging.FromContext(ctx)\n\tconfig := processed.(*AdapterConfig)\n\n\treturn &Adapter{\n\t\tconfig: config,\n\t\thttpMessageSender: httpMessageSender,\n\t\treporter: reporter,\n\t\tlogger: logger,\n\t\tkeyTypeMapper: getKeyTypeMapper(config.KeyType),\n\t}\n}\nfunc (a *Adapter) GetConsumerGroup() string {\n\treturn a.config.ConsumerGroup\n}\n\nfunc (a *Adapter) Start(ctx context.Context) (err error) {\n\ta.logger.Infow(\"Starting with config: \",\n\t\tzap.String(\"Topics\", strings.Join(a.config.Topics, \",\")),\n\t\tzap.String(\"ConsumerGroup\", a.config.ConsumerGroup),\n\t\tzap.String(\"SinkURI\", a.config.Sink),\n\t\tzap.String(\"Name\", a.config.Name),\n\t\tzap.String(\"Namespace\", a.config.Namespace),\n\t)\n\n\t\/\/ Init control service\n\tif !a.config.DisableControlServer {\n\t\ta.controlServer, err = ctrlnetwork.StartInsecureControlServer(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ta.controlServer.MessageHandler(a)\n\t}\n\n\t\/\/ init consumer group\n\taddrs, config, err := client.NewConfigWithEnv(context.Background(), &a.config.KafkaEnvConfig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create the config: %w\", err)\n\t}\n\ta.saramaConfig = config\n\n\toptions := []consumer.SaramaConsumerHandlerOption{consumer.WithSaramaConsumerLifecycleListener(a)}\n\tconsumerGroupFactory := consumer.NewConsumerGroupFactory(addrs, config)\n\tgroup, err := consumerGroupFactory.StartConsumerGroup(\n\t\ta.config.ConsumerGroup,\n\t\ta.config.Topics,\n\t\ta.logger,\n\t\ta,\n\t\toptions...,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to start consumer group: %w\", err)\n\t}\n\tdefer func() {\n\t\terr := group.Close()\n\t\tif err != nil {\n\t\t\ta.logger.Errorw(\"Failed to close consumer group\", zap.Error(err))\n\t\t}\n\t}()\n\n\t\/\/ Track errors\n\tgo func() {\n\t\tfor err := range group.Errors() {\n\t\t\ta.logger.Errorw(\"Error while consuming messages\", zap.Error(err))\n\t\t}\n\t}()\n\n\t<-ctx.Done()\n\ta.logger.Info(\"Shutting down...\")\n\treturn nil\n}\n\nfunc (a *Adapter) SetReady(int32, bool) {}\n\nfunc (a *Adapter) Handle(ctx context.Context, msg *sarama.ConsumerMessage) (bool, error) {\n\tif a.rateLimiter != nil {\n\t\ta.rateLimiter.Wait(ctx)\n\t}\n\n\tctx, span := trace.StartSpan(ctx, \"kafka-source\")\n\tdefer span.End()\n\n\treq, err := a.httpMessageSender.NewCloudEventRequest(ctx)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\terr = a.ConsumerMessageToHttpRequest(ctx, msg, req)\n\tif err != nil {\n\t\ta.logger.Debug(\"failed to create request\", zap.Error(err))\n\t\treturn true, err\n\t}\n\n\tres, err := a.httpMessageSender.SendWithRetries(req, retryConfig)\n\n\tif err != nil {\n\t\ta.logger.Debug(\"Error while sending the message\", zap.Error(err))\n\t\treturn false, err \/\/ Error while sending, don't commit offset\n\t}\n\t\/\/ Always try to read and close body so the connection can be reused afterwards\n\tif res.Body != nil {\n\t\tio.Copy(ioutil.Discard, res.Body)\n\t\tres.Body.Close()\n\t}\n\n\tif res.StatusCode\/100 != 2 {\n\t\ta.logger.Debug(\"Unexpected status code\", zap.Int(\"status code\", res.StatusCode))\n\t\treturn false, fmt.Errorf(\"%d %s\", res.StatusCode, http.StatusText(res.StatusCode))\n\t}\n\n\treportArgs := &pkgsource.ReportArgs{\n\t\tNamespace: a.config.Namespace,\n\t\tName: a.config.Name,\n\t\tResourceGroup: resourceGroup,\n\t}\n\n\t_ = a.reporter.ReportEventCount(reportArgs, res.StatusCode)\n\treturn true, nil\n}\n\n\/\/ SetRateLimiter sets the global consumer rate limiter\nfunc (a *Adapter) SetRateLimits(r rate.Limit, b int) {\n\ta.rateLimiter = rate.NewLimiter(r, b)\n}\n\nfunc (a *Adapter) HandleServiceMessage(ctx context.Context, message ctrl.ServiceMessage) {\n\t\/\/ In this first PR, there is only the RA sending messages to control plane,\n\t\/\/ there is no message the control plane should send to the RA\n\ta.logger.Info(\"Received unexpected control message\")\n\tmessage.Ack()\n}\n\nfunc (a *Adapter) Setup(sess sarama.ConsumerGroupSession) {\n\tif a.controlServer != nil {\n\t\tif err := a.controlServer.SendAndWaitForAck(kafkasourcecontrol.NotifySetupClaimsOpCode, kafkasourcecontrol.Claims(sess.Claims())); err != nil {\n\t\t\ta.logger.Warnf(\"Cannot send the claims update: %v\", err)\n\t\t}\n\t}\n\n\t\/\/ Preemptively initialize consumer group offsets to be able to mark the source as ready\n\t\/\/ as soon as possible.\n\tif err := a.InitOffsets(sess); err != nil {\n\t\ta.logger.Warnf(\"Cannot initialized consumer group offsets: %v\", err)\n\t}\n}\n\nfunc (a *Adapter) Cleanup(sess sarama.ConsumerGroupSession) {\n\tif a.controlServer != nil {\n\t\tif err := a.controlServer.SendAndWaitForAck(kafkasourcecontrol.NotifyCleanupClaimsOpCode, kafkasourcecontrol.Claims(sess.Claims())); err != nil {\n\t\t\ta.logger.Warnf(\"Cannot send the claims update: %v\", err)\n\t\t}\n\t}\n}\n\n\/\/ InitOffsets makes sure all consumer group offsets are set.\nfunc (a *Adapter) InitOffsets(session sarama.ConsumerGroupSession) error {\n\tif a.saramaConfig.Consumer.Offsets.Initial == sarama.OffsetNewest {\n\t\t\/\/ We want to make sure that ALL consumer group offsets are set to avoid\n\t\t\/\/ losing events in case the consumer group session is closed before at least one message is\n\t\t\/\/ consumed from ALL partitions.\n\t\t\/\/ If not, an event sent to a partition with an uninitialized offset\n\t\t\/\/ will not be forwarded when the session is closed (or a rebalancing is in progress).\n\t\tkafkaClient, err := sarama.NewClient(a.config.BootstrapServers, a.saramaConfig)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to create a Kafka client: %w\", err)\n\t\t}\n\t\tdefer kafkaClient.Close()\n\n\t\tkafkaAdminClient, err := sarama.NewClusterAdminFromClient(kafkaClient)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to create a Kafka admin client: %w\", err)\n\t\t}\n\t\tdefer kafkaAdminClient.Close()\n\n\t\t\/\/ Retrieve all partitions\n\t\ttopicPartitions := make(map[string][]int32)\n\t\tfor _, topic := range a.config.Topics {\n\t\t\tpartitions, err := kafkaClient.Partitions(topic)\n\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to get partitions for topic %s: %w\", topic, err)\n\t\t\t}\n\n\t\t\ttopicPartitions[topic] = partitions\n\t\t}\n\n\t\t\/\/ Look for uninitialized offset (-1)\n\t\toffsets, err := kafkaAdminClient.ListConsumerGroupOffsets(a.config.ConsumerGroup, topicPartitions)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdirty := false\n\t\tfor topic, partitions := range offsets.Blocks {\n\t\t\tfor partition, block := range partitions {\n\t\t\t\tif block.Offset == -1 { \/\/ not initialized?\n\n\t\t\t\t\t\/\/ Fetch the newest offset in the topic\/partition and set it in the consumer group\n\t\t\t\t\toffset, err := kafkaClient.GetOffset(topic, partition, sarama.OffsetNewest)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"failed to get the offset for topic %s and partition %d: %w\", topic, partition, err)\n\t\t\t\t\t}\n\n\t\t\t\t\ta.logger.Infow(\"initializing offset\", zap.String(\"topic\", topic), zap.Int32(\"partition\", partition), zap.Int64(\"offset\", offset))\n\n\t\t\t\t\tsession.MarkOffset(topic, partition, offset, \"\")\n\t\t\t\t\tdirty = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif dirty {\n\t\t\tsession.Commit()\n\n\t\t\ta.logger.Infow(\"consumer group offsets committed\", zap.String(\"consumergroup\", a.config.ConsumerGroup))\n\t\t}\n\t}\n\n\t\/\/ At this stage the KafkaSource instance is considered Ready (TODO: update KafkaSource status)\n\treturn nil\n}\n\n\/\/ Default retry configuration, 5 retries, exponential backoff with 50ms delay\nfunc defaultRetryConfig() *kncloudevents.RetryConfig {\n\treturn &kncloudevents.RetryConfig{\n\t\tCheckRetry: kncloudevents.SelectiveRetry,\n\t\tRetryMax: 5,\n\t\tBackoff: func(attemptNum int, resp *http.Response) time.Duration {\n\t\t\treturn 50 * time.Millisecond * time.Duration(math.Exp2(float64(attemptNum)))\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage timeutil\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc mustParseTime(t string) time.Time {\n\tref, err := time.Parse(\"Mon Jan 2 15:04:05 MST 2006\", t)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"parsing time %s failed: %s\", t, err))\n\t}\n\treturn ref\n}\n\nfunc TestPeriodicParse(t *testing.T) {\n\ttests := []struct {\n\t\tstart string\n\t\tduration string\n\t\terr bool\n\t}{\n\t\t{ \/\/ Valid start time\n\t\t\tstart: \"14:00\",\n\t\t\tduration: \"1h\",\n\t\t\terr: false,\n\t\t},\n\t\t{ \/\/ Start hour out of range\n\t\t\tstart: \"25:00\",\n\t\t\tduration: \"1h\",\n\t\t\terr: true,\n\t\t},\n\t\t{ \/\/ Start minute out of range\n\t\t\tstart: \"23:61\",\n\t\t\tduration: \"1h\",\n\t\t\terr: true,\n\t\t},\n\t\t{ \/\/ Bad time of day\n\t\t\tstart: \"14\",\n\t\t\tduration: \"1h\",\n\t\t\terr: true,\n\t\t},\n\t\t{ \/\/ Bad day of week\n\t\t\tstart: \"foo 14:00\",\n\t\t\tduration: \"1h\",\n\t\t\terr: true,\n\t\t},\n\t\t{ \/\/ Bad duration\n\t\t\tstart: \"sat 14:00\",\n\t\t\tduration: \"1j\",\n\t\t\terr: true,\n\t\t},\n\t\t{ \/\/ Negative duration\n\t\t\tstart: \"sat 14:00\",\n\t\t\tduration: \"-1h\",\n\t\t\terr: true,\n\t\t},\n\t\t{ \/\/ Too many start fields\n\t\t\tstart: \"sat 14:00 1234\",\n\t\t\tduration: \"1h\",\n\t\t\terr: true,\n\t\t},\n\t\t{ \/\/ Specified just a day\n\t\t\tstart: \"sat\",\n\t\t\tduration: \"1h\",\n\t\t\terr: true,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\t_, err := ParsePeriodic(tt.start, tt.duration)\n\t\tif err != nil && tt.err == false {\n\t\t\tt.Errorf(\"#%d: unexpected error: %q\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif err == nil && tt.err == true {\n\t\t\tt.Errorf(\"#%d: expected error and did not get it\", i)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc TestToStart(t *testing.T) {\n\ttests := []struct {\n\t\tstart string\n\t\tduration string\n\t\ttime string\n\t\ttoStart time.Duration\n\t}{\n\t\t{ \/\/ Daily window in 15 minutes.\n\t\t\tstart: \"00:15\",\n\t\t\tduration: \"10s\",\n\t\t\ttime: \"Thu May 21 00:00:00 PDT 2015\",\n\t\t\ttoStart: 15 * time.Minute,\n\t\t},\n\t\t{ \/\/ Daily window now.\n\t\t\tstart: \"00:00\",\n\t\t\tduration: \"1h\",\n\t\t\ttime: \"Thu May 21 00:00:00 PDT 2015\",\n\t\t\ttoStart: 0,\n\t\t},\n\t\t{ \/\/ Daily window started 1 second ago.\n\t\t\tstart: \"00:00\",\n\t\t\tduration: \"1h\",\n\t\t\ttime: \"Thu May 21 00:00:01 PDT 2015\",\n\t\t\ttoStart: -1 * time.Second,\n\t\t},\n\t\t{ \/\/ Daily window started 10 hours ago but closing edge.\n\t\t\tstart: \"02:33\",\n\t\t\tduration: \"10h\",\n\t\t\ttime: \"Thu May 21 12:33:00 PDT 2015\",\n\t\t\ttoStart: -10 * time.Hour,\n\t\t},\n\t\t{ \/\/ Daily window started 10 hours ago but _just_ closed, expect next.\n\t\t\tstart: \"02:33\",\n\t\t\tduration: \"10h\",\n\t\t\ttime: \"Thu May 21 12:33:01 PDT 2015\",\n\t\t\ttoStart: 13*time.Hour + 59*time.Minute + 59*time.Second,\n\t\t},\n\t\t{ \/\/ Daily window started last night but extends into now on the next day.\n\t\t\tstart: \"23:05\",\n\t\t\tduration: \"11h\",\n\t\t\ttime: \"Thu May 21 09:33:01 PDT 2015\",\n\t\t\ttoStart: -(10*time.Hour + 28*time.Minute + 1*time.Second),\n\t\t},\n\t\t{ \/\/ Weekly window started in 15 minutes.\n\t\t\tstart: \"Thu 00:15\",\n\t\t\tduration: \"10s\",\n\t\t\ttime: \"Thu May 21 00:00:00 PDT 2015\",\n\t\t\ttoStart: 15 * time.Minute,\n\t\t},\n\t\t{ \/\/ Weekly window now.\n\t\t\tstart: \"Thu 00:00\",\n\t\t\tduration: \"1h\",\n\t\t\ttime: \"Thu May 21 00:00:00 PDT 2015\",\n\t\t\ttoStart: 0,\n\t\t},\n\t\t{ \/\/ Weekly window started 1 second ago.\n\t\t\tstart: \"Sun 00:00\",\n\t\t\tduration: \"1h\",\n\t\t\ttime: \"Sun May 17 00:00:01 PDT 2015\",\n\t\t\ttoStart: -1 * time.Second,\n\t\t},\n\t\t{ \/\/ Weekly window started 10 hours ago but closing edge.\n\t\t\tstart: \"Sun 02:33\",\n\t\t\tduration: \"10h\",\n\t\t\ttime: \"Sun May 17 12:33:00 PDT 2015\",\n\t\t\ttoStart: -10 * time.Hour,\n\t\t},\n\t\t{ \/\/ Weekly window started last night in previous week but extends into now on the next day.\n\t\t\tstart: \"Sat 23:05\",\n\t\t\tduration: \"11h\",\n\t\t\ttime: \"Sun May 17 09:33:01 PDT 2015\",\n\t\t\ttoStart: -(10*time.Hour + 28*time.Minute + 1*time.Second),\n\t\t},\n\t\t{ \/\/ Weekly window where next period begins next month\n\t\t\tstart: \"Mon 9:00\",\n\t\t\tduration: \"1h\",\n\t\t\ttime: \"Sat May 30 23:00:00 PDT 2015\",\n\t\t\ttoStart: 34 * time.Hour,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tp, err := ParsePeriodic(tt.start, tt.duration)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"#%d: periodic parse failed: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tref := mustParseTime(tt.time)\n\t\tif dts := p.DurationToStart(ref); dts != tt.toStart {\n\t\t\tt.Errorf(\"#%d: got %v, want %v\", i, dts, tt.toStart)\n\t\t}\n\t}\n}\n<commit_msg>pkg\/timeutil: add test for a period that began on the last day of the previous month<commit_after>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage timeutil\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc mustParseTime(t string) time.Time {\n\tref, err := time.Parse(\"Mon Jan 2 15:04:05 MST 2006\", t)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"parsing time %s failed: %s\", t, err))\n\t}\n\treturn ref\n}\n\nfunc TestPeriodicParse(t *testing.T) {\n\ttests := []struct {\n\t\tstart string\n\t\tduration string\n\t\terr bool\n\t}{\n\t\t{ \/\/ Valid start time\n\t\t\tstart: \"14:00\",\n\t\t\tduration: \"1h\",\n\t\t\terr: false,\n\t\t},\n\t\t{ \/\/ Start hour out of range\n\t\t\tstart: \"25:00\",\n\t\t\tduration: \"1h\",\n\t\t\terr: true,\n\t\t},\n\t\t{ \/\/ Start minute out of range\n\t\t\tstart: \"23:61\",\n\t\t\tduration: \"1h\",\n\t\t\terr: true,\n\t\t},\n\t\t{ \/\/ Bad time of day\n\t\t\tstart: \"14\",\n\t\t\tduration: \"1h\",\n\t\t\terr: true,\n\t\t},\n\t\t{ \/\/ Bad day of week\n\t\t\tstart: \"foo 14:00\",\n\t\t\tduration: \"1h\",\n\t\t\terr: true,\n\t\t},\n\t\t{ \/\/ Bad duration\n\t\t\tstart: \"sat 14:00\",\n\t\t\tduration: \"1j\",\n\t\t\terr: true,\n\t\t},\n\t\t{ \/\/ Negative duration\n\t\t\tstart: \"sat 14:00\",\n\t\t\tduration: \"-1h\",\n\t\t\terr: true,\n\t\t},\n\t\t{ \/\/ Too many start fields\n\t\t\tstart: \"sat 14:00 1234\",\n\t\t\tduration: \"1h\",\n\t\t\terr: true,\n\t\t},\n\t\t{ \/\/ Specified just a day\n\t\t\tstart: \"sat\",\n\t\t\tduration: \"1h\",\n\t\t\terr: true,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\t_, err := ParsePeriodic(tt.start, tt.duration)\n\t\tif err != nil && tt.err == false {\n\t\t\tt.Errorf(\"#%d: unexpected error: %q\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif err == nil && tt.err == true {\n\t\t\tt.Errorf(\"#%d: expected error and did not get it\", i)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc TestToStart(t *testing.T) {\n\ttests := []struct {\n\t\tstart string\n\t\tduration string\n\t\ttime string\n\t\ttoStart time.Duration\n\t}{\n\t\t{ \/\/ Daily window in 15 minutes.\n\t\t\tstart: \"00:15\",\n\t\t\tduration: \"10s\",\n\t\t\ttime: \"Thu May 21 00:00:00 PDT 2015\",\n\t\t\ttoStart: 15 * time.Minute,\n\t\t},\n\t\t{ \/\/ Daily window now.\n\t\t\tstart: \"00:00\",\n\t\t\tduration: \"1h\",\n\t\t\ttime: \"Thu May 21 00:00:00 PDT 2015\",\n\t\t\ttoStart: 0,\n\t\t},\n\t\t{ \/\/ Daily window started 1 second ago.\n\t\t\tstart: \"00:00\",\n\t\t\tduration: \"1h\",\n\t\t\ttime: \"Thu May 21 00:00:01 PDT 2015\",\n\t\t\ttoStart: -1 * time.Second,\n\t\t},\n\t\t{ \/\/ Daily window started 10 hours ago but closing edge.\n\t\t\tstart: \"02:33\",\n\t\t\tduration: \"10h\",\n\t\t\ttime: \"Thu May 21 12:33:00 PDT 2015\",\n\t\t\ttoStart: -10 * time.Hour,\n\t\t},\n\t\t{ \/\/ Daily window started 10 hours ago but _just_ closed, expect next.\n\t\t\tstart: \"02:33\",\n\t\t\tduration: \"10h\",\n\t\t\ttime: \"Thu May 21 12:33:01 PDT 2015\",\n\t\t\ttoStart: 13*time.Hour + 59*time.Minute + 59*time.Second,\n\t\t},\n\t\t{ \/\/ Daily window started last night but extends into now on the next day.\n\t\t\tstart: \"23:05\",\n\t\t\tduration: \"11h\",\n\t\t\ttime: \"Thu May 21 09:33:01 PDT 2015\",\n\t\t\ttoStart: -(10*time.Hour + 28*time.Minute + 1*time.Second),\n\t\t},\n\t\t{ \/\/ Weekly window started in 15 minutes.\n\t\t\tstart: \"Thu 00:15\",\n\t\t\tduration: \"10s\",\n\t\t\ttime: \"Thu May 21 00:00:00 PDT 2015\",\n\t\t\ttoStart: 15 * time.Minute,\n\t\t},\n\t\t{ \/\/ Weekly window now.\n\t\t\tstart: \"Thu 00:00\",\n\t\t\tduration: \"1h\",\n\t\t\ttime: \"Thu May 21 00:00:00 PDT 2015\",\n\t\t\ttoStart: 0,\n\t\t},\n\t\t{ \/\/ Weekly window started 1 second ago.\n\t\t\tstart: \"Sun 00:00\",\n\t\t\tduration: \"1h\",\n\t\t\ttime: \"Sun May 17 00:00:01 PDT 2015\",\n\t\t\ttoStart: -1 * time.Second,\n\t\t},\n\t\t{ \/\/ Weekly window started 10 hours ago but closing edge.\n\t\t\tstart: \"Sun 02:33\",\n\t\t\tduration: \"10h\",\n\t\t\ttime: \"Sun May 17 12:33:00 PDT 2015\",\n\t\t\ttoStart: -10 * time.Hour,\n\t\t},\n\t\t{ \/\/ Weekly window started last night in previous week but extends into now on the next day.\n\t\t\tstart: \"Sat 23:05\",\n\t\t\tduration: \"11h\",\n\t\t\ttime: \"Sun May 17 09:33:01 PDT 2015\",\n\t\t\ttoStart: -(10*time.Hour + 28*time.Minute + 1*time.Second),\n\t\t},\n\t\t{ \/\/ Weekly window where next period begins next month\n\t\t\tstart: \"Mon 9:00\",\n\t\t\tduration: \"1h\",\n\t\t\ttime: \"Sat May 30 23:00:00 PDT 2015\",\n\t\t\ttoStart: 34 * time.Hour,\n\t\t},\n\t\t{ \/\/ Weekly window where the period started on the last day of the previous month\n\t\t\tstart: \"Sun 23:00\",\n\t\t\tduration: \"4h\",\n\t\t\ttime: \"Mon Jun 1 01:00:00 PDT 2015\",\n\t\t\ttoStart: -(2 * time.Hour),\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tp, err := ParsePeriodic(tt.start, tt.duration)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"#%d: periodic parse failed: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tref := mustParseTime(tt.time)\n\t\tif dts := p.DurationToStart(ref); dts != tt.toStart {\n\t\t\tt.Errorf(\"#%d: got %v, want %v\", i, dts, tt.toStart)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package exec\n\nimport (\n\t\"archive\/tar\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com\/cozy\/cozy-stack\/pkg\/apps\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/config\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/consts\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/instance\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/jobs\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/realtime\"\n\n\t\"github.com\/spf13\/afero\"\n)\n\n\/\/ KonnectorOptions contains the options to execute a konnector.\ntype KonnectorOptions struct {\n\tKonnector string `json:\"konnector\"`\n\tAccount string `json:\"account\"`\n\tFolderToSave string `json:\"folder_to_save\"`\n}\n\ntype konnectorWorker struct {\n\topts *KonnectorOptions\n\tman *apps.KonnManifest\n\tmessages []konnectorMsg\n}\n\n\/\/ result stores the result of a konnector execution.\ntype result struct {\n\tDocID string `json:\"_id,omitempty\"`\n\tDocRev string `json:\"_rev,omitempty\"`\n\tCreatedAt time.Time `json:\"last_execution\"`\n\tLastSuccess time.Time `json:\"last_success\"`\n\tAccount string `json:\"account\"`\n\tState string `json:\"state\"`\n\tError string `json:\"error\"`\n}\n\nfunc (r *result) ID() string { return r.DocID }\nfunc (r *result) Rev() string { return r.DocRev }\nfunc (r *result) DocType() string { return consts.KonnectorResults }\nfunc (r *result) Clone() couchdb.Doc { c := *r; return &c }\nfunc (r *result) SetID(id string) { r.DocID = id }\nfunc (r *result) SetRev(rev string) { r.DocRev = rev }\n\nconst konnectorMsgTypeError string = \"error\"\n\n\/\/ const konnectorMsgTypeDebug string = \"debug\"\n\/\/ const konnectorMsgTypeWarning string = \"warning\"\n\/\/ const konnectorMsgTypeProgress string = \"progress\"\n\ntype konnectorMsg struct {\n\tType string `json:\"type\"`\n\tMessage string `json:\"message\"`\n}\n\ntype konnectorLogs struct {\n\tSlug string `json:\"_id,omitempty\"`\n\tDocRev string `json:\"_rev,omitempty\"`\n\tMessages []konnectorMsg `json:\"logs\"`\n}\n\nfunc (kl *konnectorLogs) ID() string { return kl.Slug }\nfunc (kl *konnectorLogs) Rev() string { return kl.DocRev }\nfunc (kl *konnectorLogs) DocType() string { return consts.KonnectorLogs }\nfunc (kl *konnectorLogs) Clone() couchdb.Doc {\n\tcloned := *kl\n\tcloned.Messages = make([]konnectorMsg, len(kl.Messages))\n\tcopy(cloned.Messages, kl.Messages)\n\treturn &cloned\n}\nfunc (kl *konnectorLogs) SetID(id string) {}\nfunc (kl *konnectorLogs) SetRev(rev string) { kl.DocRev = rev }\n\nfunc (w *konnectorWorker) PrepareWorkDir(i *instance.Instance, m *jobs.Message) (workDir string, err error) {\n\topts := &KonnectorOptions{}\n\tif err = m.Unmarshal(&opts); err != nil {\n\t\treturn\n\t}\n\n\tslug := opts.Konnector\n\n\tman, err := apps.GetKonnectorBySlug(i, slug)\n\tif err != nil {\n\t\treturn\n\t}\n\tif man.State() != apps.Ready {\n\t\terr = errors.New(\"Konnector is not ready\")\n\t\treturn\n\t}\n\n\tw.opts = opts\n\tw.man = man\n\n\tosFS := afero.NewOsFs()\n\tworkDir, err = afero.TempDir(osFS, \"\", \"konnector-\"+slug)\n\tif err != nil {\n\t\treturn\n\t}\n\tworkFS := afero.NewBasePathFs(osFS, workDir)\n\n\tfileServer := i.KonnectorsFileServer()\n\ttarFile, err := fileServer.Open(slug, man.Version(), apps.KonnectorArchiveName)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttr := tar.NewReader(tarFile)\n\tfor {\n\t\tvar hdr *tar.Header\n\t\thdr, err = tr.Next()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdirname := path.Dir(hdr.Name)\n\t\tif dirname != \".\" {\n\t\t\tif err = workFS.MkdirAll(dirname, 0755); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tvar f afero.File\n\t\tf, err = workFS.OpenFile(hdr.Name, os.O_CREATE|os.O_WRONLY, 0640)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t_, err = io.Copy(f, tr)\n\t\terrc := f.Close()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif errc != nil {\n\t\t\terr = errc\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn workDir, nil\n}\n\nfunc (w *konnectorWorker) PrepareCmdEnv(i *instance.Instance, m *jobs.Message) (cmd string, env []string, jobID string, err error) {\n\tjobID = fmt.Sprintf(\"konnector\/%s\/%s\", w.opts.Konnector, i.Domain)\n\n\tfields := struct {\n\t\tAccount string `json:\"account\"`\n\t\tFolderToSave string `json:\"folder_to_save,omitempty\"`\n\t}{\n\t\tAccount: w.opts.Account,\n\t\tFolderToSave: w.opts.FolderToSave,\n\t}\n\n\tfieldsJSON, err := json.Marshal(fields)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tparamsJSON, err := json.Marshal(w.man.Parameters)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttoken := i.BuildKonnectorToken(w.man)\n\n\tcmd = config.GetConfig().Konnectors.Cmd\n\tenv = []string{\n\t\t\"COZY_URL=\" + i.PageURL(\"\/\", nil),\n\t\t\"COZY_CREDENTIALS=\" + token,\n\t\t\"COZY_FIELDS=\" + string(fieldsJSON),\n\t\t\"COZY_PARAMETERS=\" + string(paramsJSON),\n\t\t\"COZY_TYPE=\" + w.man.Type,\n\t\t\"COZY_LOCALE=\" + i.Locale,\n\t\t\"COZY_JOB_ID=\" + jobID,\n\t}\n\treturn\n}\n\nfunc (w *konnectorWorker) ScanOuput(i *instance.Instance, line []byte) error {\n\tvar msg konnectorMsg\n\tif err := json.Unmarshal(line, &msg); err != nil {\n\t\treturn fmt.Errorf(\"Could not parse stdout as JSON: %q\", string(line))\n\t}\n\t\/\/ TODO: filter some of the messages\n\tw.messages = append(w.messages, msg)\n\trealtime.GetHub().Publish(&realtime.Event{\n\t\tVerb: realtime.EventCreate,\n\t\tDoc: couchdb.JSONDoc{Type: consts.JobEvents, M: map[string]interface{}{\n\t\t\t\"type\": msg.Type,\n\t\t\t\"message\": msg.Message,\n\t\t}},\n\t\tDomain: i.Domain,\n\t})\n\treturn nil\n}\n\nfunc (w *konnectorWorker) Error(i *instance.Instance, err error) error {\n\terrLogs := couchdb.Upsert(i, &konnectorLogs{\n\t\tSlug: w.opts.Konnector,\n\t\tMessages: w.messages,\n\t})\n\tif errLogs != nil {\n\t\tfmt.Println(\"Failed to save konnector logs\", errLogs)\n\t}\n\n\tfor _, msg := range w.messages {\n\t\tif msg.Type == konnectorMsgTypeError {\n\t\t\t\/\/ konnector err is more explicit\n\t\t\treturn errors.New(msg.Message)\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc (w *konnectorWorker) Commit(ctx context.Context, msg *jobs.Message, errjob error) error {\n\tif w.opts == nil {\n\t\treturn nil\n\t}\n\n\tslug := w.opts.Konnector\n\tdomain := ctx.Value(jobs.ContextDomainKey).(string)\n\n\tinst, err := instance.Get(domain)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlastResult := &result{}\n\terr = couchdb.GetDoc(inst, consts.KonnectorResults, slug, lastResult)\n\tif err != nil {\n\t\tif !couchdb.IsNotFoundError(err) {\n\t\t\treturn err\n\t\t}\n\t\tlastResult = nil\n\t}\n\n\tvar state, errstr string\n\tvar lastSuccess time.Time\n\tif errjob != nil {\n\t\tif lastResult != nil {\n\t\t\tlastSuccess = lastResult.LastSuccess\n\t\t}\n\t\terrstr = errjob.Error()\n\t\tstate = jobs.Errored\n\t} else {\n\t\tlastSuccess = time.Now()\n\t\tstate = jobs.Done\n\t}\n\tresult := &result{\n\t\tDocID: slug,\n\t\tAccount: w.opts.Account,\n\t\tCreatedAt: time.Now(),\n\t\tLastSuccess: lastSuccess,\n\t\tState: state,\n\t\tError: errstr,\n\t}\n\tif lastResult == nil {\n\t\terr = couchdb.CreateNamedDocWithDB(inst, result)\n\t} else {\n\t\tresult.SetRev(lastResult.Rev())\n\t\terr = couchdb.UpdateDoc(inst, result)\n\t}\n\t\/\/ if err != nil {\n\t\/\/ \treturn err\n\t\/\/ }\n\n\treturn err\n\t\/\/ \/\/ if it is the first try we do not take into account an error, we bail.\n\t\/\/ if lastResult == nil {\n\t\/\/ \treturn nil\n\t\/\/ }\n\t\/\/ \/\/ if the job has not errored, or the last one was already errored, we bail.\n\t\/\/ if state != jobs.Errored || lastResult.State == jobs.Errored {\n\t\/\/ \treturn nil\n\t\/\/ }\n\n\t\/\/ konnectorURL := inst.SubDomain(consts.CollectSlug)\n\t\/\/ konnectorURL.Fragment = \"\/category\/all\/\" + slug\n\t\/\/ mail := mails.Options{\n\t\/\/ \tMode: mails.ModeNoReply,\n\t\/\/ \tSubject: inst.Translate(\"Error Konnector execution\", domain),\n\t\/\/ \tTemplateName: \"konnector_error_\" + inst.Locale,\n\t\/\/ \tTemplateValues: map[string]string{\n\t\/\/ \t\t\"KonnectorName\": slug,\n\t\/\/ \t\t\"KonnectorPage\": konnectorURL.String(),\n\t\/\/ \t},\n\t\/\/ }\n\t\/\/ msg, err := jobs.NewMessage(jobs.JSONEncoding, &mail)\n\t\/\/ if err != nil {\n\t\/\/ \treturn err\n\t\/\/ }\n\t\/\/ log := logger.WithDomain(domain)\n\t\/\/ log.Info(\"Konnector has failed definitively, should send mail.\", mail)\n\t\/\/ _, err = globals.GetBroker().PushJob(&jobs.JobRequest{\n\t\/\/ \tDomain: domain,\n\t\/\/ \tWorkerType: \"sendmail\",\n\t\/\/ \tMessage: msg,\n\t\/\/ })\n\t\/\/ return err\n}\n<commit_msg>Add a created_at for konnector logs<commit_after>package exec\n\nimport (\n\t\"archive\/tar\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com\/cozy\/cozy-stack\/pkg\/apps\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/config\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/consts\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/instance\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/jobs\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/realtime\"\n\n\t\"github.com\/spf13\/afero\"\n)\n\n\/\/ KonnectorOptions contains the options to execute a konnector.\ntype KonnectorOptions struct {\n\tKonnector string `json:\"konnector\"`\n\tAccount string `json:\"account\"`\n\tFolderToSave string `json:\"folder_to_save\"`\n}\n\ntype konnectorWorker struct {\n\topts *KonnectorOptions\n\tman *apps.KonnManifest\n\tmessages []konnectorMsg\n}\n\n\/\/ result stores the result of a konnector execution.\ntype result struct {\n\tDocID string `json:\"_id,omitempty\"`\n\tDocRev string `json:\"_rev,omitempty\"`\n\tCreatedAt time.Time `json:\"last_execution\"`\n\tLastSuccess time.Time `json:\"last_success\"`\n\tAccount string `json:\"account\"`\n\tState string `json:\"state\"`\n\tError string `json:\"error\"`\n}\n\nfunc (r *result) ID() string { return r.DocID }\nfunc (r *result) Rev() string { return r.DocRev }\nfunc (r *result) DocType() string { return consts.KonnectorResults }\nfunc (r *result) Clone() couchdb.Doc { c := *r; return &c }\nfunc (r *result) SetID(id string) { r.DocID = id }\nfunc (r *result) SetRev(rev string) { r.DocRev = rev }\n\nconst konnectorMsgTypeError string = \"error\"\n\n\/\/ const konnectorMsgTypeDebug string = \"debug\"\n\/\/ const konnectorMsgTypeWarning string = \"warning\"\n\/\/ const konnectorMsgTypeProgress string = \"progress\"\n\ntype konnectorMsg struct {\n\tType string `json:\"type\"`\n\tMessage string `json:\"message\"`\n}\n\ntype konnectorLogs struct {\n\tSlug string `json:\"_id,omitempty\"`\n\tDocRev string `json:\"_rev,omitempty\"`\n\tMessages []konnectorMsg `json:\"logs\"`\n\tCreatedAt time.Time `json:\"created_at\"`\n}\n\nfunc (kl *konnectorLogs) ID() string { return kl.Slug }\nfunc (kl *konnectorLogs) Rev() string { return kl.DocRev }\nfunc (kl *konnectorLogs) DocType() string { return consts.KonnectorLogs }\nfunc (kl *konnectorLogs) Clone() couchdb.Doc {\n\tcloned := *kl\n\tcloned.Messages = make([]konnectorMsg, len(kl.Messages))\n\tcopy(cloned.Messages, kl.Messages)\n\treturn &cloned\n}\nfunc (kl *konnectorLogs) SetID(id string) {}\nfunc (kl *konnectorLogs) SetRev(rev string) { kl.DocRev = rev }\n\nfunc (w *konnectorWorker) PrepareWorkDir(i *instance.Instance, m *jobs.Message) (workDir string, err error) {\n\topts := &KonnectorOptions{}\n\tif err = m.Unmarshal(&opts); err != nil {\n\t\treturn\n\t}\n\n\tslug := opts.Konnector\n\n\tman, err := apps.GetKonnectorBySlug(i, slug)\n\tif err != nil {\n\t\treturn\n\t}\n\tif man.State() != apps.Ready {\n\t\terr = errors.New(\"Konnector is not ready\")\n\t\treturn\n\t}\n\n\tw.opts = opts\n\tw.man = man\n\n\tosFS := afero.NewOsFs()\n\tworkDir, err = afero.TempDir(osFS, \"\", \"konnector-\"+slug)\n\tif err != nil {\n\t\treturn\n\t}\n\tworkFS := afero.NewBasePathFs(osFS, workDir)\n\n\tfileServer := i.KonnectorsFileServer()\n\ttarFile, err := fileServer.Open(slug, man.Version(), apps.KonnectorArchiveName)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttr := tar.NewReader(tarFile)\n\tfor {\n\t\tvar hdr *tar.Header\n\t\thdr, err = tr.Next()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdirname := path.Dir(hdr.Name)\n\t\tif dirname != \".\" {\n\t\t\tif err = workFS.MkdirAll(dirname, 0755); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tvar f afero.File\n\t\tf, err = workFS.OpenFile(hdr.Name, os.O_CREATE|os.O_WRONLY, 0640)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t_, err = io.Copy(f, tr)\n\t\terrc := f.Close()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif errc != nil {\n\t\t\terr = errc\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn workDir, nil\n}\n\nfunc (w *konnectorWorker) PrepareCmdEnv(i *instance.Instance, m *jobs.Message) (cmd string, env []string, jobID string, err error) {\n\tjobID = fmt.Sprintf(\"konnector\/%s\/%s\", w.opts.Konnector, i.Domain)\n\n\tfields := struct {\n\t\tAccount string `json:\"account\"`\n\t\tFolderToSave string `json:\"folder_to_save,omitempty\"`\n\t}{\n\t\tAccount: w.opts.Account,\n\t\tFolderToSave: w.opts.FolderToSave,\n\t}\n\n\tfieldsJSON, err := json.Marshal(fields)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tparamsJSON, err := json.Marshal(w.man.Parameters)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttoken := i.BuildKonnectorToken(w.man)\n\n\tcmd = config.GetConfig().Konnectors.Cmd\n\tenv = []string{\n\t\t\"COZY_URL=\" + i.PageURL(\"\/\", nil),\n\t\t\"COZY_CREDENTIALS=\" + token,\n\t\t\"COZY_FIELDS=\" + string(fieldsJSON),\n\t\t\"COZY_PARAMETERS=\" + string(paramsJSON),\n\t\t\"COZY_TYPE=\" + w.man.Type,\n\t\t\"COZY_LOCALE=\" + i.Locale,\n\t\t\"COZY_JOB_ID=\" + jobID,\n\t}\n\treturn\n}\n\nfunc (w *konnectorWorker) ScanOuput(i *instance.Instance, line []byte) error {\n\tvar msg konnectorMsg\n\tif err := json.Unmarshal(line, &msg); err != nil {\n\t\treturn fmt.Errorf(\"Could not parse stdout as JSON: %q\", string(line))\n\t}\n\t\/\/ TODO: filter some of the messages\n\tw.messages = append(w.messages, msg)\n\trealtime.GetHub().Publish(&realtime.Event{\n\t\tVerb: realtime.EventCreate,\n\t\tDoc: couchdb.JSONDoc{Type: consts.JobEvents, M: map[string]interface{}{\n\t\t\t\"type\": msg.Type,\n\t\t\t\"message\": msg.Message,\n\t\t}},\n\t\tDomain: i.Domain,\n\t})\n\treturn nil\n}\n\nfunc (w *konnectorWorker) Error(i *instance.Instance, err error) error {\n\terrLogs := couchdb.Upsert(i, &konnectorLogs{\n\t\tSlug: w.opts.Konnector,\n\t\tMessages: w.messages,\n\t\tCreatedAt: time.Now(),\n\t})\n\tif errLogs != nil {\n\t\tfmt.Println(\"Failed to save konnector logs\", errLogs)\n\t}\n\n\tfor _, msg := range w.messages {\n\t\tif msg.Type == konnectorMsgTypeError {\n\t\t\t\/\/ konnector err is more explicit\n\t\t\treturn errors.New(msg.Message)\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc (w *konnectorWorker) Commit(ctx context.Context, msg *jobs.Message, errjob error) error {\n\tif w.opts == nil {\n\t\treturn nil\n\t}\n\n\tslug := w.opts.Konnector\n\tdomain := ctx.Value(jobs.ContextDomainKey).(string)\n\n\tinst, err := instance.Get(domain)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlastResult := &result{}\n\terr = couchdb.GetDoc(inst, consts.KonnectorResults, slug, lastResult)\n\tif err != nil {\n\t\tif !couchdb.IsNotFoundError(err) {\n\t\t\treturn err\n\t\t}\n\t\tlastResult = nil\n\t}\n\n\tvar state, errstr string\n\tvar lastSuccess time.Time\n\tif errjob != nil {\n\t\tif lastResult != nil {\n\t\t\tlastSuccess = lastResult.LastSuccess\n\t\t}\n\t\terrstr = errjob.Error()\n\t\tstate = jobs.Errored\n\t} else {\n\t\tlastSuccess = time.Now()\n\t\tstate = jobs.Done\n\t}\n\tresult := &result{\n\t\tDocID: slug,\n\t\tAccount: w.opts.Account,\n\t\tCreatedAt: time.Now(),\n\t\tLastSuccess: lastSuccess,\n\t\tState: state,\n\t\tError: errstr,\n\t}\n\tif lastResult == nil {\n\t\terr = couchdb.CreateNamedDocWithDB(inst, result)\n\t} else {\n\t\tresult.SetRev(lastResult.Rev())\n\t\terr = couchdb.UpdateDoc(inst, result)\n\t}\n\t\/\/ if err != nil {\n\t\/\/ \treturn err\n\t\/\/ }\n\n\treturn err\n\t\/\/ \/\/ if it is the first try we do not take into account an error, we bail.\n\t\/\/ if lastResult == nil {\n\t\/\/ \treturn nil\n\t\/\/ }\n\t\/\/ \/\/ if the job has not errored, or the last one was already errored, we bail.\n\t\/\/ if state != jobs.Errored || lastResult.State == jobs.Errored {\n\t\/\/ \treturn nil\n\t\/\/ }\n\n\t\/\/ konnectorURL := inst.SubDomain(consts.CollectSlug)\n\t\/\/ konnectorURL.Fragment = \"\/category\/all\/\" + slug\n\t\/\/ mail := mails.Options{\n\t\/\/ \tMode: mails.ModeNoReply,\n\t\/\/ \tSubject: inst.Translate(\"Error Konnector execution\", domain),\n\t\/\/ \tTemplateName: \"konnector_error_\" + inst.Locale,\n\t\/\/ \tTemplateValues: map[string]string{\n\t\/\/ \t\t\"KonnectorName\": slug,\n\t\/\/ \t\t\"KonnectorPage\": konnectorURL.String(),\n\t\/\/ \t},\n\t\/\/ }\n\t\/\/ msg, err := jobs.NewMessage(jobs.JSONEncoding, &mail)\n\t\/\/ if err != nil {\n\t\/\/ \treturn err\n\t\/\/ }\n\t\/\/ log := logger.WithDomain(domain)\n\t\/\/ log.Info(\"Konnector has failed definitively, should send mail.\", mail)\n\t\/\/ _, err = globals.GetBroker().PushJob(&jobs.JobRequest{\n\t\/\/ \tDomain: domain,\n\t\/\/ \tWorkerType: \"sendmail\",\n\t\/\/ \tMessage: msg,\n\t\/\/ })\n\t\/\/ return err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/starkandwayne\/shield\/plugin\"\n\t\"time\"\n)\n\nfunc main() {\n\tp := RedisBrokerPlugin{\n\t\tName: \"Pivotal Redis Broker Backup Plugin\",\n\t\tAuthor: \"Stark & Wayne\",\n\t\tVersion: \"0.0.1\",\n\t\tFeatures: plugin.PluginFeatures{\n\t\t\tTarget: \"yes\",\n\t\t\tStore: \"no\",\n\t\t},\n\t}\n\n\tplugin.Run(p)\n}\n\ntype RedisBrokerPlugin plugin.PluginInfo\n\ntype RedisEndpoint struct {\n\tMode string\n}\n\nfunc (p RedisBrokerPlugin) Meta() plugin.PluginInfo {\n\treturn plugin.PluginInfo(p)\n}\n\nfunc (p RedisBrokerPlugin) Backup(endpoint plugin.ShieldEndpoint) error {\n\terr := plugin.Exec(\"tar -c -C \/var\/vcap\/store .\", plugin.STDOUT)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (p RedisBrokerPlugin) Restore(endpoint plugin.ShieldEndpoint) error {\n\tredis, err := getRedisEndpoint(endpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar services = []string{\"cf-redis-broker\"}\n\tif redis.Mode == \"dedicated\" {\n\t\tservices = []string{\"redis\", \"redis-agent\"}\n\t}\n\n\tfor _, svc := range services {\n\t\terr = plugin.Exec(fmt.Sprintf(\"\/var\/vcap\/bosh\/bin\/monit stop %s\", svc), plugin.STDOUT)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, svc := range services {\n\t\terr = plugin.Exec(fmt.Sprintf(\"bash -c \\\"while [[ $(\/var\/vcap\/bosh\/bin\/monit summary %s | grep running) ]]; do sleep 1; done\\\"\", svc), plugin.STDOUT)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Don't look for errors here, because pkill will return non-zero if there\n\t\/\/ were no processes to kill in the first place.\n\t\/\/ FIXME: handle this better, so we know we're pkilling properly\n\tplugin.Exec(\"pkill redis-server\", plugin.STDOUT)\n\ttime.Sleep(2 * time.Second)\n\tplugin.Exec(\"pkill -9 redis-server\", plugin.STDOUT)\n\ttime.Sleep(1 * time.Second)\n\n\terr = plugin.Exec(\"tar -x -C \/var\/vcap\/store . \", plugin.STDIN)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = plugin.Exec(\"bash -c 'yes | find \/var\/vcap\/store -name appendonly.aof -exec \/var\/vcap\/packages\/redis\/bin\/redis-check-aof --fix {} \\\\;'\", plugin.STDOUT)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, svc := range services {\n\t\terr = plugin.Exec(fmt.Sprintf(\"\/var\/vcap\/bosh\/bin\/monit start %s\", svc), plugin.STDOUT)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (p RedisBrokerPlugin) Store(endpoint plugin.ShieldEndpoint) (string, error) {\n\treturn \"\", plugin.UNIMPLEMENTED\n}\n\nfunc (p RedisBrokerPlugin) Retrieve(endpoint plugin.ShieldEndpoint, file string) error {\n\treturn plugin.UNIMPLEMENTED\n}\n\nfunc (p RedisBrokerPlugin) Purge(endpoint plugin.ShieldEndpoint, file string) error {\n\treturn plugin.UNIMPLEMENTED\n}\n\nfunc getRedisEndpoint(endpoint plugin.ShieldEndpoint) (RedisEndpoint, error) {\n\tmode, err := endpoint.StringValue(\"redis_type\")\n\tif err != nil {\n\t\treturn RedisEndpoint{}, err\n\t}\n\treturn RedisEndpoint{Mode: mode}, nil\n}\n<commit_msg>redis-broker plugin monit fix<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/starkandwayne\/shield\/plugin\"\n\t\"time\"\n)\n\nfunc main() {\n\tp := RedisBrokerPlugin{\n\t\tName: \"Pivotal Redis Broker Backup Plugin\",\n\t\tAuthor: \"Stark & Wayne\",\n\t\tVersion: \"0.0.1\",\n\t\tFeatures: plugin.PluginFeatures{\n\t\t\tTarget: \"yes\",\n\t\t\tStore: \"no\",\n\t\t},\n\t}\n\n\tplugin.Run(p)\n}\n\ntype RedisBrokerPlugin plugin.PluginInfo\n\ntype RedisEndpoint struct {\n\tMode string\n}\n\nfunc (p RedisBrokerPlugin) Meta() plugin.PluginInfo {\n\treturn plugin.PluginInfo(p)\n}\n\nfunc (p RedisBrokerPlugin) Backup(endpoint plugin.ShieldEndpoint) error {\n\terr := plugin.Exec(\"tar -c -C \/var\/vcap\/store .\", plugin.STDOUT)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (p RedisBrokerPlugin) Restore(endpoint plugin.ShieldEndpoint) error {\n\tredis, err := getRedisEndpoint(endpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar services = []string{\"cf-redis-broker\"}\n\tif redis.Mode == \"dedicated\" {\n\t\tservices = []string{\"redis\", \"redis-agent\"}\n\t}\n\n\tfor _, svc := range services {\n\t\terr = plugin.Exec(fmt.Sprintf(\"\/var\/vcap\/bosh\/bin\/monit stop %s\", svc), plugin.STDOUT)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = plugin.Exec(fmt.Sprintf(\"bash -c \\\"while [[ $(\/var\/vcap\/bosh\/bin\/monit summary | grep redis | grep running) ]]; do sleep 1; done\\\"\", svc), plugin.STDOUT)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Don't look for errors here, because pkill will return non-zero if there\n\t\/\/ were no processes to kill in the first place.\n\t\/\/ FIXME: handle this better, so we know we're pkilling properly\n\tplugin.Exec(\"pkill redis-server\", plugin.STDOUT)\n\ttime.Sleep(2 * time.Second)\n\tplugin.Exec(\"pkill -9 redis-server\", plugin.STDOUT)\n\ttime.Sleep(1 * time.Second)\n\n\terr = plugin.Exec(\"tar -x -C \/var\/vcap\/store . \", plugin.STDIN)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = plugin.Exec(\"bash -c 'yes | find \/var\/vcap\/store -name appendonly.aof -exec \/var\/vcap\/packages\/redis\/bin\/redis-check-aof --fix {} \\\\;'\", plugin.STDOUT)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, svc := range services {\n\t\terr = plugin.Exec(fmt.Sprintf(\"\/var\/vcap\/bosh\/bin\/monit start %s\", svc), plugin.STDOUT)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (p RedisBrokerPlugin) Store(endpoint plugin.ShieldEndpoint) (string, error) {\n\treturn \"\", plugin.UNIMPLEMENTED\n}\n\nfunc (p RedisBrokerPlugin) Retrieve(endpoint plugin.ShieldEndpoint, file string) error {\n\treturn plugin.UNIMPLEMENTED\n}\n\nfunc (p RedisBrokerPlugin) Purge(endpoint plugin.ShieldEndpoint, file string) error {\n\treturn plugin.UNIMPLEMENTED\n}\n\nfunc getRedisEndpoint(endpoint plugin.ShieldEndpoint) (RedisEndpoint, error) {\n\tmode, err := endpoint.StringValue(\"redis_type\")\n\tif err != nil {\n\t\treturn RedisEndpoint{}, err\n\t}\n\treturn RedisEndpoint{Mode: mode}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package comb\n\nfunc (s *State) Char(r rune) Parser {\n\treturn func() (interface{}, error) {\n\t\tif s.currentRune() != r {\n\t\t\treturn nil, NewError(\"Invalid character. '%c'\", s.currentRune())\n\t\t}\n\n\t\ts.increment()\n\n\t\treturn r, nil\n\t}\n}\n\nfunc (s *State) NotChar(r rune) Parser {\n\treturn func() (interface{}, error) {\n\t\tif s.currentRune() == r {\n\t\t\treturn nil, NewError(\"Should not be '%c'.\", r)\n\t\t}\n\n\t\tdefer s.increment()\n\n\t\treturn s.currentRune(), nil\n\t}\n}\n\nfunc (s *State) String(str string) Parser {\n\trs := ([]rune)(str)\n\tps := make([]Parser, len(rs))\n\n\tfor i, r := range rs {\n\t\tps[i] = s.Char(r)\n\t}\n\n\treturn s.And(ps...)\n}\n\nfunc (s *State) InString(str string) Parser {\n\trs := stringToRuneSet(str)\n\n\treturn func() (interface{}, error) {\n\t\tif _, ok := rs[s.currentRune()]; ok {\n\t\t\tdefer s.increment()\n\t\t\treturn s.currentRune(), nil\n\t\t}\n\n\t\treturn nil, NewError(\"Invalid character. '%c'\", s.currentRune())\n\t}\n}\n\nfunc (s *State) NotInString(str string) Parser {\n\trs := stringToRuneSet(str)\n\n\treturn func() (interface{}, error) {\n\t\tif _, ok := rs[s.currentRune()]; !ok {\n\t\t\tdefer s.increment()\n\t\t\treturn s.currentRune(), nil\n\t\t}\n\n\t\treturn nil, NewError(\"Invalid character. '%c'\", s.currentRune())\n\t}\n}\n\nfunc (s *State) Wrap(l, m, r Parser) Parser {\n\tp := s.And(l, m, r)\n\n\treturn func() (interface{}, error) {\n\t\tresults, err := p()\n\n\t\tif results, ok := results.([]interface{}); ok {\n\t\t\treturn results[1], err\n\t\t}\n\n\t\treturn results, err\n\t}\n}\n\nfunc (s *State) Many(p Parser) Parser {\n\treturn func() (interface{}, error) {\n\t\tresults, err := s.Many1(p)()\n\n\t\tif err != nil {\n\t\t\treturn []interface{}{}, nil\n\t\t}\n\n\t\treturn results, nil\n\t}\n}\n\nfunc (s *State) Many1(p Parser) Parser {\n\treturn func() (interface{}, error) {\n\t\tvar results []interface{}\n\n\t\tfor i := 0; ; i++ {\n\t\t\tresult, err := p()\n\n\t\t\tif err != nil && i == 0 {\n\t\t\t\treturn nil, err\n\t\t\t} else if err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tresults = append(results, result)\n\t\t}\n\n\t\treturn results, nil\n\t}\n}\n\nfunc (s *State) Or(ps ...Parser) Parser {\n\treturn func() (interface{}, error) {\n\t\tvar err error\n\t\told := *s\n\n\t\tfor _, p := range ps {\n\t\t\tvar result interface{}\n\n\t\t\tresult, err = p()\n\n\t\t\tif err == nil {\n\t\t\t\treturn result, nil\n\t\t\t}\n\n\t\t\t*s = old\n\t\t}\n\n\t\treturn nil, err\n\t}\n}\n\nfunc (State) And(ps ...Parser) Parser {\n\treturn func() (interface{}, error) {\n\t\tresults := make([]interface{}, len(ps))\n\n\t\tfor i, p := range ps {\n\t\t\tresult, err := p()\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tresults[i] = result\n\t\t}\n\n\t\treturn results, nil\n\t}\n}\n\nfunc (State) Void(p Parser) Parser {\n\treturn func() (interface{}, error) {\n\t\t_, err := p()\n\t\treturn nil, err\n\t}\n}\n\nfunc stringToRuneSet(s string) map[rune]bool {\n\trs := make(map[rune]bool)\n\n\tfor _, r := range s {\n\t\trs[r] = true\n\t}\n\n\treturn rs\n}\n<commit_msg>Make And backtrack<commit_after>package comb\n\nfunc (s *State) Char(r rune) Parser {\n\treturn func() (interface{}, error) {\n\t\tif s.currentRune() != r {\n\t\t\treturn nil, NewError(\"Invalid character. '%c'\", s.currentRune())\n\t\t}\n\n\t\ts.increment()\n\n\t\treturn r, nil\n\t}\n}\n\nfunc (s *State) NotChar(r rune) Parser {\n\treturn func() (interface{}, error) {\n\t\tif s.currentRune() == r {\n\t\t\treturn nil, NewError(\"Should not be '%c'.\", r)\n\t\t}\n\n\t\tdefer s.increment()\n\n\t\treturn s.currentRune(), nil\n\t}\n}\n\nfunc (s *State) String(str string) Parser {\n\trs := ([]rune)(str)\n\tps := make([]Parser, len(rs))\n\n\tfor i, r := range rs {\n\t\tps[i] = s.Char(r)\n\t}\n\n\treturn s.And(ps...)\n}\n\nfunc (s *State) InString(str string) Parser {\n\trs := stringToRuneSet(str)\n\n\treturn func() (interface{}, error) {\n\t\tif _, ok := rs[s.currentRune()]; ok {\n\t\t\tdefer s.increment()\n\t\t\treturn s.currentRune(), nil\n\t\t}\n\n\t\treturn nil, NewError(\"Invalid character. '%c'\", s.currentRune())\n\t}\n}\n\nfunc (s *State) NotInString(str string) Parser {\n\trs := stringToRuneSet(str)\n\n\treturn func() (interface{}, error) {\n\t\tif _, ok := rs[s.currentRune()]; !ok {\n\t\t\tdefer s.increment()\n\t\t\treturn s.currentRune(), nil\n\t\t}\n\n\t\treturn nil, NewError(\"Invalid character. '%c'\", s.currentRune())\n\t}\n}\n\nfunc (s *State) Wrap(l, m, r Parser) Parser {\n\tp := s.And(l, m, r)\n\n\treturn func() (interface{}, error) {\n\t\tresults, err := p()\n\n\t\tif results, ok := results.([]interface{}); ok {\n\t\t\treturn results[1], err\n\t\t}\n\n\t\treturn results, err\n\t}\n}\n\nfunc (s *State) Many(p Parser) Parser {\n\treturn func() (interface{}, error) {\n\t\tresults, err := s.Many1(p)()\n\n\t\tif err != nil {\n\t\t\treturn []interface{}{}, nil\n\t\t}\n\n\t\treturn results, nil\n\t}\n}\n\nfunc (s *State) Many1(p Parser) Parser {\n\treturn func() (interface{}, error) {\n\t\tvar results []interface{}\n\n\t\tfor i := 0; ; i++ {\n\t\t\tresult, err := p()\n\n\t\t\tif err != nil && i == 0 {\n\t\t\t\treturn nil, err\n\t\t\t} else if err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tresults = append(results, result)\n\t\t}\n\n\t\treturn results, nil\n\t}\n}\n\nfunc (s *State) Or(ps ...Parser) Parser {\n\treturn func() (interface{}, error) {\n\t\tvar err error\n\t\told := *s\n\n\t\tfor _, p := range ps {\n\t\t\tvar result interface{}\n\n\t\t\tresult, err = p()\n\n\t\t\tif err == nil {\n\t\t\t\treturn result, nil\n\t\t\t}\n\n\t\t\t*s = old\n\t\t}\n\n\t\treturn nil, err\n\t}\n}\n\nfunc (s *State) And(ps ...Parser) Parser {\n\treturn func() (interface{}, error) {\n\t\tresults := make([]interface{}, len(ps))\n\t\told := *s\n\n\t\tfor i, p := range ps {\n\t\t\tresult, err := p()\n\n\t\t\tif err != nil {\n\t\t\t\t*s = old\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tresults[i] = result\n\t\t}\n\n\t\treturn results, nil\n\t}\n}\n\nfunc (State) Void(p Parser) Parser {\n\treturn func() (interface{}, error) {\n\t\t_, err := p()\n\t\treturn nil, err\n\t}\n}\n\nfunc stringToRuneSet(s string) map[rune]bool {\n\trs := make(map[rune]bool)\n\n\tfor _, r := range s {\n\t\trs[r] = true\n\t}\n\n\treturn rs\n}\n<|endoftext|>"} {"text":"<commit_before>package pipeline\n\nimport (\n\t\"context\"\n\n\t\"github.com\/rancher\/rancher\/pkg\/controllers\/managementuserlegacy\/pipeline\/controller\/pipeline\"\n\t\"github.com\/rancher\/rancher\/pkg\/controllers\/managementuserlegacy\/pipeline\/controller\/pipelineexecution\"\n\t\"github.com\/rancher\/rancher\/pkg\/controllers\/managementuserlegacy\/pipeline\/controller\/project\"\n\t\"github.com\/rancher\/rancher\/pkg\/types\/config\"\n)\n\nfunc Register(ctx context.Context, cluster *config.UserContext) {\n\tpipeline.Register(ctx, cluster)\n\tpipelineexecution.Register(ctx, cluster)\n\tproject.Register(ctx, cluster)\n}\n<commit_msg>Only start pipeline user controller if v3.Pipeline exists<commit_after>package pipeline\n\nimport (\n\t\"context\"\n\n\tv3 \"github.com\/rancher\/rancher\/pkg\/apis\/project.cattle.io\/v3\"\n\t\"github.com\/rancher\/rancher\/pkg\/controllers\/managementuserlegacy\/pipeline\/controller\/pipeline\"\n\t\"github.com\/rancher\/rancher\/pkg\/controllers\/managementuserlegacy\/pipeline\/controller\/pipelineexecution\"\n\t\"github.com\/rancher\/rancher\/pkg\/controllers\/managementuserlegacy\/pipeline\/controller\/project\"\n\t\"github.com\/rancher\/rancher\/pkg\/types\/config\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n)\n\nfunc Register(ctx context.Context, cluster *config.UserContext) {\n\tstarter := cluster.DeferredStart(ctx, func(ctx context.Context) error {\n\t\tregisterDeferred(ctx, cluster)\n\t\treturn nil\n\t})\n\tAddStarter(ctx, cluster, starter)\n}\n\nfunc AddStarter(ctx context.Context, cluster *config.UserContext, starter func() error) {\n\tpipelines := cluster.Management.Project.Pipelines(\"\")\n\tpipelines.AddClusterScopedHandler(ctx, \"pipeline-deferred\", cluster.ClusterName, func(key string, obj *v3.Pipeline) (runtime.Object, error) {\n\t\treturn obj, starter()\n\t})\n}\n\nfunc registerDeferred(ctx context.Context, cluster *config.UserContext) {\n\tpipeline.Register(ctx, cluster)\n\tpipelineexecution.Register(ctx, cluster)\n\tproject.Register(ctx, cluster)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreedto in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage workflow\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"sync\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"vitess.io\/vitess\/go\/vt\/log\"\n\t\"vitess.io\/vitess\/go\/vt\/logutil\"\n\t\"vitess.io\/vitess\/go\/vt\/topo\"\n\n\tworkflowpb \"vitess.io\/vitess\/go\/vt\/proto\/workflow\"\n)\n\nconst (\n\ttestWorkflowFactoryName = \"test_workflow\"\n\n\tphaseSimple PhaseType = \"simple\"\n\n\terrMessage = \"fake error for testing retry\"\n)\n\nfunc createTestTaskID(phase PhaseType, count int) string {\n\treturn fmt.Sprintf(\"%s\/%v\", phase, count)\n}\n\nfunc init() {\n\tRegister(testWorkflowFactoryName, &TestWorkflowFactory{})\n}\n\n\/\/ TestWorkflowFactory is the factory to create a test workflow.\ntype TestWorkflowFactory struct{}\n\n\/\/ Init is part of the workflow.Factory interface.\nfunc (*TestWorkflowFactory) Init(_ *Manager, w *workflowpb.Workflow, args []string) error {\n\tsubFlags := flag.NewFlagSet(testWorkflowFactoryName, flag.ContinueOnError)\n\tretryFlag := subFlags.Bool(\"retry\", false, \"The retry flag should be true if the retry action should be tested\")\n\tcount := subFlags.Int(\"count\", 0, \"The number of simple tasks\")\n\tenableApprovals := subFlags.Bool(\"enable_approvals\", false, \"If true, executions of tasks require user's approvals on the UI.\")\n\tsequential := subFlags.Bool(\"sequential\", false, \"If true, executions of tasks are sequential\")\n\tif err := subFlags.Parse(args); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Initialize the checkpoint.\n\ttaskMap := make(map[string]*workflowpb.Task)\n\tfor i := 0; i < *count; i++ {\n\t\ttaskID := createTestTaskID(phaseSimple, i)\n\t\ttaskMap[taskID] = &workflowpb.Task{\n\t\t\tId: taskID,\n\t\t\tState: workflowpb.TaskState_TaskNotStarted,\n\t\t\tAttributes: map[string]string{\"number\": fmt.Sprintf(\"%v\", i)},\n\t\t}\n\t}\n\tcheckpoint := &workflowpb.WorkflowCheckpoint{\n\t\tCodeVersion: 0,\n\t\tTasks: taskMap,\n\t\tSettings: map[string]string{\"count\": fmt.Sprintf(\"%v\", *count), \"retry\": fmt.Sprintf(\"%v\", *retryFlag), \"enable_approvals\": fmt.Sprintf(\"%v\", *enableApprovals), \"sequential\": fmt.Sprintf(\"%v\", *sequential)},\n\t}\n\tvar err error\n\tw.Data, err = proto.Marshal(checkpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Instantiate is part the workflow.Factory interface.\nfunc (*TestWorkflowFactory) Instantiate(m *Manager, w *workflowpb.Workflow, rootNode *Node) (Workflow, error) {\n\tcheckpoint := &workflowpb.WorkflowCheckpoint{}\n\tif err := proto.Unmarshal(w.Data, checkpoint); err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Get the retry flags for all tasks from the checkpoint.\n\tretry, err := strconv.ParseBool(checkpoint.Settings[\"retry\"])\n\tif err != nil {\n\t\tlog.Errorf(\"converting retry in checkpoint.Settings to bool fails: %v\", checkpoint.Settings[\"retry\"])\n\t\treturn nil, err\n\t}\n\tretryFlags := make(map[string]bool)\n\tfor _, task := range checkpoint.Tasks {\n\t\tretryFlags[task.Id] = retry\n\t}\n\n\t\/\/ Get the user control flags from the checkpoint.\n\tenableApprovals, err := strconv.ParseBool(checkpoint.Settings[\"enable_approvals\"])\n\tif err != nil {\n\t\tlog.Errorf(\"converting enable_approvals in checkpoint.Settings to bool fails: %v\", checkpoint.Settings[\"user_control\"])\n\t\treturn nil, err\n\t}\n\n\tsequential, err := strconv.ParseBool(checkpoint.Settings[\"sequential\"])\n\tif err != nil {\n\t\tlog.Errorf(\"converting sequential in checkpoint.Settings to bool fails: %v\", checkpoint.Settings[\"user_control\"])\n\t\treturn nil, err\n\t}\n\n\ttw := &TestWorkflow{\n\t\ttopoServer: m.TopoServer(),\n\t\tmanager: m,\n\t\tcheckpoint: checkpoint,\n\t\trootUINode: rootNode,\n\t\tlogger: logutil.NewMemoryLogger(),\n\t\tretryFlags: retryFlags,\n\t\tenableApprovals: enableApprovals,\n\t\tsequential: sequential,\n\t}\n\n\tcount, err := strconv.Atoi(checkpoint.Settings[\"count\"])\n\tif err != nil {\n\t\tlog.Errorf(\"converting count in checkpoint.Settings to int fails: %v\", checkpoint.Settings[\"count\"])\n\t\treturn nil, err\n\t}\n\n\tphaseNode := &Node{\n\t\tName: string(phaseSimple),\n\t\tPathName: string(phaseSimple),\n\t}\n\ttw.rootUINode.Children = append(tw.rootUINode.Children, phaseNode)\n\n\tfor i := 0; i < count; i++ {\n\t\ttaskName := fmt.Sprintf(\"%v\", i)\n\t\ttaskUINode := &Node{\n\t\t\tName: taskName,\n\t\t\tPathName: taskName,\n\t\t}\n\t\tphaseNode.Children = append(phaseNode.Children, taskUINode)\n\t}\n\treturn tw, nil\n}\n\n\/\/ TestWorkflow is used to unit test the ParallelRunner object. It is a\n\/\/ simplified workflow of one phase. To test the ParallelRunner's retry\n\/\/ behavior, we can let the tasks explicitly fail initially and succeed\n\/\/ after a retry.\ntype TestWorkflow struct {\n\tctx context.Context\n\tmanager *Manager\n\ttopoServer *topo.Server\n\twi *topo.WorkflowInfo\n\tlogger *logutil.MemoryLogger\n\n\tretryMu sync.Mutex\n\t\/\/ retryFlags stores the retry flag for all tasks.\n\tretryFlags map[string]bool\n\n\trootUINode *Node\n\n\tcheckpoint *workflowpb.WorkflowCheckpoint\n\tcheckpointWriter *CheckpointWriter\n\n\tenableApprovals bool\n\tsequential bool\n}\n\n\/\/ Run implements the workflow.Workflow interface.\nfunc (tw *TestWorkflow) Run(ctx context.Context, manager *Manager, wi *topo.WorkflowInfo) error {\n\ttw.ctx = ctx\n\ttw.wi = wi\n\ttw.checkpointWriter = NewCheckpointWriter(tw.topoServer, tw.checkpoint, tw.wi)\n\n\ttw.rootUINode.Display = NodeDisplayDeterminate\n\ttw.rootUINode.BroadcastChanges(true \/* updateChildren *\/)\n\n\tsimpleTasks := tw.getTasks(phaseSimple)\n\tconcurrencyLevel := Parallel\n\tif tw.sequential {\n\t\tconcurrencyLevel = Sequential\n\t}\n\tsimpleRunner := NewParallelRunner(tw.ctx, tw.rootUINode, tw.checkpointWriter, simpleTasks, tw.runSimple, concurrencyLevel, tw.enableApprovals)\n\tsimpleRunner.Run()\n\treturn nil\n}\n\nfunc (tw *TestWorkflow) getTasks(phaseName PhaseType) []*workflowpb.Task {\n\tcount, err := strconv.Atoi(tw.checkpoint.Settings[\"count\"])\n\tif err != nil {\n\t\tlog.Info(\"converting count in checkpoint.Settings to int failed: %v\", tw.checkpoint.Settings[\"count\"])\n\t\treturn nil\n\t}\n\tvar tasks []*workflowpb.Task\n\tfor i := 0; i < count; i++ {\n\t\ttaskID := createTestTaskID(phaseName, i)\n\t\ttasks = append(tasks, tw.checkpoint.Tasks[taskID])\n\t}\n\treturn tasks\n}\n\nfunc (tw *TestWorkflow) runSimple(ctx context.Context, t *workflowpb.Task) error {\n\tlog.Info(\"The number passed to me is %v\", t.Attributes[\"number\"])\n\n\ttw.retryMu.Lock()\n\tdefer tw.retryMu.Unlock()\n\tif tw.retryFlags[t.Id] {\n\t\tlog.Info(\"I will fail at this time since retry flag is true.\")\n\t\ttw.retryFlags[t.Id] = false\n\t\treturn errors.New(errMessage)\n\t}\n\treturn nil\n}\n<commit_msg>Fix potential issue with return<commit_after>\/*\nCopyright 2017 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreedto in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage workflow\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"sync\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"vitess.io\/vitess\/go\/vt\/log\"\n\t\"vitess.io\/vitess\/go\/vt\/logutil\"\n\t\"vitess.io\/vitess\/go\/vt\/topo\"\n\n\tworkflowpb \"vitess.io\/vitess\/go\/vt\/proto\/workflow\"\n)\n\nconst (\n\ttestWorkflowFactoryName = \"test_workflow\"\n\n\tphaseSimple PhaseType = \"simple\"\n\n\terrMessage = \"fake error for testing retry\"\n)\n\nfunc createTestTaskID(phase PhaseType, count int) string {\n\treturn fmt.Sprintf(\"%s\/%v\", phase, count)\n}\n\nfunc init() {\n\tRegister(testWorkflowFactoryName, &TestWorkflowFactory{})\n}\n\n\/\/ TestWorkflowFactory is the factory to create a test workflow.\ntype TestWorkflowFactory struct{}\n\n\/\/ Init is part of the workflow.Factory interface.\nfunc (*TestWorkflowFactory) Init(_ *Manager, w *workflowpb.Workflow, args []string) error {\n\tsubFlags := flag.NewFlagSet(testWorkflowFactoryName, flag.ContinueOnError)\n\tretryFlag := subFlags.Bool(\"retry\", false, \"The retry flag should be true if the retry action should be tested\")\n\tcount := subFlags.Int(\"count\", 0, \"The number of simple tasks\")\n\tenableApprovals := subFlags.Bool(\"enable_approvals\", false, \"If true, executions of tasks require user's approvals on the UI.\")\n\tsequential := subFlags.Bool(\"sequential\", false, \"If true, executions of tasks are sequential\")\n\tif err := subFlags.Parse(args); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Initialize the checkpoint.\n\ttaskMap := make(map[string]*workflowpb.Task)\n\tfor i := 0; i < *count; i++ {\n\t\ttaskID := createTestTaskID(phaseSimple, i)\n\t\ttaskMap[taskID] = &workflowpb.Task{\n\t\t\tId: taskID,\n\t\t\tState: workflowpb.TaskState_TaskNotStarted,\n\t\t\tAttributes: map[string]string{\"number\": fmt.Sprintf(\"%v\", i)},\n\t\t}\n\t}\n\tcheckpoint := &workflowpb.WorkflowCheckpoint{\n\t\tCodeVersion: 0,\n\t\tTasks: taskMap,\n\t\tSettings: map[string]string{\"count\": fmt.Sprintf(\"%v\", *count), \"retry\": fmt.Sprintf(\"%v\", *retryFlag), \"enable_approvals\": fmt.Sprintf(\"%v\", *enableApprovals), \"sequential\": fmt.Sprintf(\"%v\", *sequential)},\n\t}\n\tvar err error\n\tw.Data, err = proto.Marshal(checkpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Instantiate is part the workflow.Factory interface.\nfunc (*TestWorkflowFactory) Instantiate(m *Manager, w *workflowpb.Workflow, rootNode *Node) (Workflow, error) {\n\tcheckpoint := &workflowpb.WorkflowCheckpoint{}\n\tif err := proto.Unmarshal(w.Data, checkpoint); err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Get the retry flags for all tasks from the checkpoint.\n\tretry, err := strconv.ParseBool(checkpoint.Settings[\"retry\"])\n\tif err != nil {\n\t\tlog.Errorf(\"converting retry in checkpoint.Settings to bool fails: %v\", checkpoint.Settings[\"retry\"])\n\t\treturn nil, err\n\t}\n\tretryFlags := make(map[string]bool)\n\tfor _, task := range checkpoint.Tasks {\n\t\tretryFlags[task.Id] = retry\n\t}\n\n\t\/\/ Get the user control flags from the checkpoint.\n\tenableApprovals, err := strconv.ParseBool(checkpoint.Settings[\"enable_approvals\"])\n\tif err != nil {\n\t\tlog.Errorf(\"converting enable_approvals in checkpoint.Settings to bool fails: %v\", checkpoint.Settings[\"user_control\"])\n\t\treturn nil, err\n\t}\n\n\tsequential, err := strconv.ParseBool(checkpoint.Settings[\"sequential\"])\n\tif err != nil {\n\t\tlog.Errorf(\"converting sequential in checkpoint.Settings to bool fails: %v\", checkpoint.Settings[\"user_control\"])\n\t\treturn nil, err\n\t}\n\n\ttw := &TestWorkflow{\n\t\ttopoServer: m.TopoServer(),\n\t\tmanager: m,\n\t\tcheckpoint: checkpoint,\n\t\trootUINode: rootNode,\n\t\tlogger: logutil.NewMemoryLogger(),\n\t\tretryFlags: retryFlags,\n\t\tenableApprovals: enableApprovals,\n\t\tsequential: sequential,\n\t}\n\n\tcount, err := strconv.Atoi(checkpoint.Settings[\"count\"])\n\tif err != nil {\n\t\tlog.Errorf(\"converting count in checkpoint.Settings to int fails: %v\", checkpoint.Settings[\"count\"])\n\t\treturn nil, err\n\t}\n\n\tphaseNode := &Node{\n\t\tName: string(phaseSimple),\n\t\tPathName: string(phaseSimple),\n\t}\n\ttw.rootUINode.Children = append(tw.rootUINode.Children, phaseNode)\n\n\tfor i := 0; i < count; i++ {\n\t\ttaskName := fmt.Sprintf(\"%v\", i)\n\t\ttaskUINode := &Node{\n\t\t\tName: taskName,\n\t\t\tPathName: taskName,\n\t\t}\n\t\tphaseNode.Children = append(phaseNode.Children, taskUINode)\n\t}\n\treturn tw, nil\n}\n\n\/\/ TestWorkflow is used to unit test the ParallelRunner object. It is a\n\/\/ simplified workflow of one phase. To test the ParallelRunner's retry\n\/\/ behavior, we can let the tasks explicitly fail initially and succeed\n\/\/ after a retry.\ntype TestWorkflow struct {\n\tctx context.Context\n\tmanager *Manager\n\ttopoServer *topo.Server\n\twi *topo.WorkflowInfo\n\tlogger *logutil.MemoryLogger\n\n\tretryMu sync.Mutex\n\t\/\/ retryFlags stores the retry flag for all tasks.\n\tretryFlags map[string]bool\n\n\trootUINode *Node\n\n\tcheckpoint *workflowpb.WorkflowCheckpoint\n\tcheckpointWriter *CheckpointWriter\n\n\tenableApprovals bool\n\tsequential bool\n}\n\n\/\/ Run implements the workflow.Workflow interface.\nfunc (tw *TestWorkflow) Run(ctx context.Context, manager *Manager, wi *topo.WorkflowInfo) error {\n\ttw.ctx = ctx\n\ttw.wi = wi\n\ttw.checkpointWriter = NewCheckpointWriter(tw.topoServer, tw.checkpoint, tw.wi)\n\n\ttw.rootUINode.Display = NodeDisplayDeterminate\n\ttw.rootUINode.BroadcastChanges(true \/* updateChildren *\/)\n\n\tsimpleTasks := tw.getTasks(phaseSimple)\n\tconcurrencyLevel := Parallel\n\tif tw.sequential {\n\t\tconcurrencyLevel = Sequential\n\t}\n\tsimpleRunner := NewParallelRunner(tw.ctx, tw.rootUINode, tw.checkpointWriter, simpleTasks, tw.runSimple, concurrencyLevel, tw.enableApprovals)\n\treturn simpleRunner.Run()\n}\n\nfunc (tw *TestWorkflow) getTasks(phaseName PhaseType) []*workflowpb.Task {\n\tcount, err := strconv.Atoi(tw.checkpoint.Settings[\"count\"])\n\tif err != nil {\n\t\tlog.Info(\"converting count in checkpoint.Settings to int failed: %v\", tw.checkpoint.Settings[\"count\"])\n\t\treturn nil\n\t}\n\tvar tasks []*workflowpb.Task\n\tfor i := 0; i < count; i++ {\n\t\ttaskID := createTestTaskID(phaseName, i)\n\t\ttasks = append(tasks, tw.checkpoint.Tasks[taskID])\n\t}\n\treturn tasks\n}\n\nfunc (tw *TestWorkflow) runSimple(ctx context.Context, t *workflowpb.Task) error {\n\tlog.Info(\"The number passed to me is %v\", t.Attributes[\"number\"])\n\n\ttw.retryMu.Lock()\n\tdefer tw.retryMu.Unlock()\n\tif tw.retryFlags[t.Id] {\n\t\tlog.Info(\"I will fail at this time since retry flag is true.\")\n\t\ttw.retryFlags[t.Id] = false\n\t\treturn errors.New(errMessage)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar (\n\tskippedPaths = regexp.MustCompile(`Godeps|third_party|_gopath|_output|\\.git|cluster\/env.sh|vendor|test\/e2e\/generated\/bindata.go|site\/themes\/docsy`)\n\tboilerplatedir = flag.String(\"boilerplate-dir\", \".\", \"Boilerplate directory for boilerplate files\")\n\trootdir = flag.String(\"rootdir\", \"..\/..\/\", \"Root directory to examine\")\n)\n\nfunc main() {\n\tflag.Parse()\n\trefs := boilerplateRefs(*boilerplatedir)\n\tif len(refs) == 0 {\n\t\tlog.Fatal(\"no references in \", *boilerplatedir)\n\t}\n\tfiles := filesToCheck(*rootdir, refs)\n\tfor _, file := range files {\n\t\tif !filePasses(file, refs[fileExtension(file)]) {\n\t\t\tfmt.Println(file)\n\t\t}\n\t}\n\n}\n\n\/*\nThis function is to populate the refs variable with the\ndifferent boilerplate\/template for different extension.\n*\/\nfunc boilerplateRefs(dir string) map[string][]byte {\n\trefs := make(map[string][]byte)\n\tfiles, _ := filepath.Glob(dir + \"\/*.txt\")\n\tfor _, filename := range files {\n\t\tre := regexp.MustCompile(`\\.txt`)\n\t\textension := strings.ToLower(fileExtension(re.ReplaceAllString(filename, \"\")))\n\t\tdata, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tre = regexp.MustCompile(`\\r`)\n\t\trefs[extension] = re.ReplaceAll(data, nil)\n\t}\n\treturn refs\n}\n\n\/*\nFunction to check whether the processed file\nis valid.\nReturning false means that the file does not the\nproper boilerplate template\n*\/\nfunc filePasses(filename string, ref []byte) bool {\n\tvar re *regexp.Regexp\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tre = regexp.MustCompile(`\\r`)\n\tdata = re.ReplaceAll(data, nil)\n\n\textension := fileExtension(filename)\n\n\t\/\/ remove build tags from the top of Go files\n\tif extension == \"go\" {\n\t\tre = regexp.MustCompile(`(?m)^(\/\/ \\+build.*\\n)+\\n`)\n\t\tdata = re.ReplaceAll(data, nil)\n\t}\n\n\t\/\/ remove shebang from the top of shell files\n\tif extension == \"sh\" {\n\t\tre = regexp.MustCompile(`(?m)^(#!.*\\n)\\n*`)\n\t\tdata = re.ReplaceAll(data, nil)\n\t}\n\n\t\/\/ if our test file is smaller than the reference it surely fails!\n\tif len(data) < len(ref) {\n\t\treturn false\n\t}\n\n\tdata = data[:len(ref)]\n\n\t\/\/ Search for \"Copyright YEAR\" which exists in the boilerplate, but shouldn't in the real thing\n\tre = regexp.MustCompile(`Copyright YEAR`)\n\tif re.Match(data) {\n\t\treturn false\n\t}\n\n\t\/\/ Replace all occurrences of the regex \"Copyright \\d{4}\" with \"Copyright YEAR\"\n\tre = regexp.MustCompile(`Copyright \\d{4}`)\n\tdata = re.ReplaceAll(data, []byte(`Copyright YEAR`))\n\n\treturn bytes.Equal(data, ref)\n}\n\n\/**\nFunction to get the file extensin or the filename if the file has no extension\n*\/\nfunc fileExtension(filename string) string {\n\tsplitted := strings.Split(filepath.Base(filename), \".\")\n\treturn strings.ToLower(splitted[len(splitted)-1])\n}\n\n\/**\nFunction to get all the files from the directory that heeds to be checked.\n*\/\nfunc filesToCheck(rootDir string, extensions map[string][]byte) []string {\n\tvar outFiles []string\n\terr := filepath.Walk(rootDir, func(path string, info os.FileInfo, err error) error {\n\t\t\/\/ remove current workdir from the beginig of the path in case it matches the skipped path\n\t\tcwd, _ := os.Getwd()\n\t\t\/\/ replace \"\\\" with \"\\\\\" for windows style path\n\t\tre := regexp.MustCompile(`\\\\`)\n\t\tre = regexp.MustCompile(`^` + re.ReplaceAllString(cwd, `\\\\`))\n\t\tif !info.IsDir() && !skippedPaths.MatchString(re.ReplaceAllString(filepath.Dir(path), \"\")) {\n\t\t\tif extensions[strings.ToLower(fileExtension(path))] != nil {\n\t\t\t\toutFiles = append(outFiles, path)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn outFiles\n}\n<commit_msg>Comments refactored in boilerplate.go<commit_after>\/*\nCopyright 2019 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar (\n\tskippedPaths = regexp.MustCompile(`Godeps|third_party|_gopath|_output|\\.git|cluster\/env.sh|vendor|test\/e2e\/generated\/bindata.go|site\/themes\/docsy`)\n\tboilerplatedir = flag.String(\"boilerplate-dir\", \".\", \"Boilerplate directory for boilerplate files\")\n\trootdir = flag.String(\"rootdir\", \"..\/..\/\", \"Root directory to examine\")\n)\n\nfunc main() {\n\tflag.Parse()\n\trefs := extensionToBoilerplate(*boilerplatedir)\n\tif len(refs) == 0 {\n\t\tlog.Fatal(\"no references in \", *boilerplatedir)\n\t}\n\tfiles := filesToCheck(*rootdir, refs)\n\tfor _, file := range files {\n\t\tif !filePasses(file, refs[fileExtension(file)]) {\n\t\t\tfmt.Println(file)\n\t\t}\n\t}\n\n}\n\n\/\/ extensionToBoilerplate returns a map of file extension to required boilerplate text\nfunc extensionToBoilerplate(dir string) map[string][]byte {\n\trefs := make(map[string][]byte)\n\tfiles, _ := filepath.Glob(dir + \"\/*.txt\")\n\tfor _, filename := range files {\n\t\tre := regexp.MustCompile(`\\.txt`)\n\t\textension := strings.ToLower(fileExtension(re.ReplaceAllString(filename, \"\")))\n\t\tdata, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tre = regexp.MustCompile(`\\r`)\n\t\trefs[extension] = re.ReplaceAll(data, nil)\n\t}\n\treturn refs\n}\n\n\/\/ filePasses checks whether the processed file is valid. Returning false means that the file does not the proper boilerplate template.\nfunc filePasses(filename string, expectedBoilerplate []byte) bool {\n\tvar re *regexp.Regexp\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tre = regexp.MustCompile(`\\r`)\n\tdata = re.ReplaceAll(data, nil)\n\n\textension := fileExtension(filename)\n\n\t\/\/ remove build tags from the top of Go files\n\tif extension == \"go\" {\n\t\tre = regexp.MustCompile(`(?m)^(\/\/ \\+build.*\\n)+\\n`)\n\t\tdata = re.ReplaceAll(data, nil)\n\t}\n\n\t\/\/ remove shebang from the top of shell files\n\tif extension == \"sh\" {\n\t\tre = regexp.MustCompile(`(?m)^(#!.*\\n)\\n*`)\n\t\tdata = re.ReplaceAll(data, nil)\n\t}\n\n\t\/\/ if our test file is smaller than the reference it surely fails!\n\tif len(data) < len(expectedBoilerplate) {\n\t\treturn false\n\t}\n\n\tdata = data[:len(expectedBoilerplate)]\n\n\t\/\/ Search for \"Copyright YEAR\" which exists in the boilerplate, but shouldn't in the real thing\n\tre = regexp.MustCompile(`Copyright YEAR`)\n\tif re.Match(data) {\n\t\treturn false\n\t}\n\n\t\/\/ Replace all occurrences of the regex \"Copyright \\d{4}\" with \"Copyright YEAR\"\n\tre = regexp.MustCompile(`Copyright \\d{4}`)\n\tdata = re.ReplaceAll(data, []byte(`Copyright YEAR`))\n\n\treturn bytes.Equal(data, expectedBoilerplate)\n}\n\n\/**\nFunction to get the file extensin or the filename if the file has no extension\n*\/\nfunc fileExtension(filename string) string {\n\tsplitted := strings.Split(filepath.Base(filename), \".\")\n\treturn strings.ToLower(splitted[len(splitted)-1])\n}\n\n\/\/ filesToCheck returns the list of the filers that will be checked for the boilerplate.\nfunc filesToCheck(rootDir string, extensions map[string][]byte) []string {\n\tvar outFiles []string\n\terr := filepath.Walk(rootDir, func(path string, info os.FileInfo, err error) error {\n\t\t\/\/ remove current workdir from the beginig of the path in case it matches the skipped path\n\t\tcwd, _ := os.Getwd()\n\t\t\/\/ replace \"\\\" with \"\\\\\" for windows style path\n\t\tre := regexp.MustCompile(`\\\\`)\n\t\tre = regexp.MustCompile(`^` + re.ReplaceAllString(cwd, `\\\\`))\n\t\tif !info.IsDir() && !skippedPaths.MatchString(re.ReplaceAllString(filepath.Dir(path), \"\")) {\n\t\t\tif extensions[strings.ToLower(fileExtension(path))] != nil {\n\t\t\t\toutFiles = append(outFiles, path)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn outFiles\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ The cmdline provider fetches a remote configuration from the URL specified\n\/\/ in the kernel boot option \"coreos.config.url\".\n\npackage cmdline\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/coreos\/ignition\/config\"\n\t\"github.com\/coreos\/ignition\/src\/log\"\n\t\"github.com\/coreos\/ignition\/src\/providers\"\n\t\"github.com\/coreos\/ignition\/src\/providers\/util\"\n\t\"github.com\/coreos\/ignition\/src\/systemd\"\n)\n\nconst (\n\tname = \"cmdline\"\n\tinitialBackoff = 100 * time.Millisecond\n\tmaxBackoff = 30 * time.Second\n\tcmdlinePath = \"\/proc\/cmdline\"\n\tcmdlineUrlFlag = \"coreos.config.url\"\n\toemDevicePath = \"\/dev\/disk\/by-label\/OEM\" \/\/ Device link where oem partition is found.\n\toemDirPath = \"\/sysroot\/usr\/share\/oem\" \/\/ OEM dir within root fs to consider for pxe scenarios.\n\toemMountPath = \"\/mnt\/oem\" \/\/ Mountpoint where oem partition is mounted when present.\n)\n\nfunc init() {\n\tproviders.Register(creator{})\n}\n\ntype creator struct{}\n\nfunc (creator) Name() string {\n\treturn name\n}\n\nfunc (creator) Create(logger log.Logger) providers.Provider {\n\treturn &provider{\n\t\tlogger: logger,\n\t\tbackoff: initialBackoff,\n\t\tpath: cmdlinePath,\n\t\tclient: &http.Client{\n\t\t\tTimeout: 10 * time.Second,\n\t\t},\n\t}\n}\n\ntype provider struct {\n\tlogger log.Logger\n\tbackoff time.Duration\n\tpath string\n\tclient *http.Client\n\tconfigUrl string\n\trawConfig []byte\n}\n\nfunc (provider) Name() string {\n\treturn name\n}\n\nfunc (p provider) FetchConfig() (config.Config, error) {\n\tif p.rawConfig == nil {\n\t\treturn config.Config{}, nil\n\t} else {\n\t\treturn config.Parse(p.rawConfig)\n\t}\n}\n\nfunc (p *provider) IsOnline() bool {\n\tif p.configUrl == \"\" {\n\t\targs, err := ioutil.ReadFile(p.path)\n\t\tif err != nil {\n\t\t\tp.logger.Err(\"couldn't read cmdline\")\n\t\t\treturn false\n\t\t}\n\n\t\tp.configUrl = parseCmdline(args)\n\t\tp.logger.Debug(\"parsed url from cmdline: %q\", p.configUrl)\n\t\tif p.configUrl == \"\" {\n\t\t\t\/\/ If the cmdline flag wasn't provided, just no-op.\n\t\t\tp.logger.Info(\"no config URL provided\")\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn p.getRawConfig()\n\n}\n\nfunc (p provider) ShouldRetry() bool {\n\treturn true\n}\n\nfunc (p *provider) BackoffDuration() time.Duration {\n\treturn util.ExpBackoff(&p.backoff, maxBackoff)\n}\n\nfunc parseCmdline(cmdline []byte) (url string) {\n\tfor _, arg := range strings.Split(string(cmdline), \" \") {\n\t\tparts := strings.SplitN(strings.TrimSpace(arg), \"=\", 2)\n\t\tkey := parts[0]\n\n\t\tif key != cmdlineUrlFlag {\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(parts) == 2 {\n\t\t\turl = parts[1]\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ getRawConfig gets the raw configuration data from p.configUrl.\n\/\/ Supported URL schemes are:\n\/\/ http:\/\/\tremote resource accessed via http\n\/\/ oem:\/\/\tlocal file in \/sysroot\/usr\/share\/oem or \/mnt\/oem\nfunc (p *provider) getRawConfig() bool {\n\turl, err := url.Parse(p.configUrl)\n\tif err != nil {\n\t\tp.logger.Err(\"failed to parse url: %v\", err)\n\t\treturn false\n\t}\n\n\tswitch url.Scheme {\n\tcase \"http\":\n\t\tif resp, err := p.client.Get(p.configUrl); err == nil {\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tswitch resp.StatusCode {\n\t\t\tcase http.StatusOK, http.StatusNoContent:\n\t\t\tdefault:\n\t\t\t\tp.logger.Debug(\"failed fetching: HTTP status: %s\",\n\t\t\t\t\tresp.Status)\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tp.logger.Debug(\"successfully fetched\")\n\t\t\tp.rawConfig, err = ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\tp.logger.Err(\"failed to read body: %v\", err)\n\t\t\t\treturn false\n\t\t\t}\n\t\t} else {\n\t\t\tp.logger.Warning(\"failed fetching: %v\", err)\n\t\t\treturn false\n\t\t}\n\tcase \"oem\":\n\t\tpath := filepath.Clean(url.Path)\n\t\tif !filepath.IsAbs(path) {\n\t\t\tp.logger.Err(\"oem path is not absolute: %q\", url.Path)\n\t\t\treturn false\n\t\t}\n\n\t\t\/\/ check if present under oemDirPath, if so use it.\n\t\tabsPath := filepath.Join(oemDirPath, path)\n\t\tp.rawConfig, err = ioutil.ReadFile(absPath)\n\t\tif os.IsNotExist(err) {\n\t\t\tp.logger.Info(\"oem config not found in %q, trying %q\",\n\t\t\t\toemDirPath, oemMountPath)\n\n\t\t\t\/\/ try oemMountPath, requires mounting it.\n\t\t\terr = p.mountOEM()\n\t\t\tif err == nil {\n\t\t\t\tabsPath := filepath.Join(oemMountPath, path)\n\t\t\t\tp.rawConfig, err = ioutil.ReadFile(absPath)\n\t\t\t\tp.umountOEM()\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\tp.logger.Err(\"failed to read oem config: %v\", err)\n\t\t\treturn false\n\t\t}\n\tdefault:\n\t\tp.logger.Err(\"unsupported url scheme: %q\", url.Scheme)\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n\/\/ mountOEM waits for the presence of and mounts the oem partition @ oemMountPath.\nfunc (p *provider) mountOEM() error {\n\tdev := []string{oemDevicePath}\n\tif err := systemd.WaitOnDevices(dev, \"oem-cmdline\"); err != nil {\n\t\tp.logger.Err(\"failed to wait for oem device: %v\", err)\n\t\treturn err\n\t}\n\n\tif err := os.MkdirAll(oemMountPath, 0700); err != nil {\n\t\tp.logger.Err(\"failed to create oem mount point: %v\", err)\n\t\treturn err\n\t}\n\n\tif err := p.logger.LogOp(\n\t\tfunc() error {\n\t\t\treturn syscall.Mount(dev[0], oemMountPath, \"ext4\", 0, \"\")\n\t\t},\n\t\t\"mounting %q at %q\", oemDevicePath, oemMountPath,\n\t); err != nil {\n\t\treturn fmt.Errorf(\"failed to mount device %q at %q: %v\",\n\t\t\toemDevicePath, oemMountPath, err)\n\t}\n\n\treturn nil\n}\n\n\/\/ umountOEM unmounts the oem partition @ oemMountPath.\nfunc (p *provider) umountOEM() {\n\tp.logger.LogOp(\n\t\tfunc() error { return syscall.Unmount(oemMountPath, 0) },\n\t\t\"unmounting %q\", oemMountPath,\n\t)\n}\n<commit_msg>providers\/cmdline: fix OEM root path for PXE image<commit_after>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ The cmdline provider fetches a remote configuration from the URL specified\n\/\/ in the kernel boot option \"coreos.config.url\".\n\npackage cmdline\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/coreos\/ignition\/config\"\n\t\"github.com\/coreos\/ignition\/src\/log\"\n\t\"github.com\/coreos\/ignition\/src\/providers\"\n\t\"github.com\/coreos\/ignition\/src\/providers\/util\"\n\t\"github.com\/coreos\/ignition\/src\/systemd\"\n)\n\nconst (\n\tname = \"cmdline\"\n\tinitialBackoff = 100 * time.Millisecond\n\tmaxBackoff = 30 * time.Second\n\tcmdlinePath = \"\/proc\/cmdline\"\n\tcmdlineUrlFlag = \"coreos.config.url\"\n\toemDevicePath = \"\/dev\/disk\/by-label\/OEM\" \/\/ Device link where oem partition is found.\n\toemDirPath = \"\/usr\/share\/oem\" \/\/ OEM dir within root fs to consider for pxe scenarios.\n\toemMountPath = \"\/mnt\/oem\" \/\/ Mountpoint where oem partition is mounted when present.\n)\n\nfunc init() {\n\tproviders.Register(creator{})\n}\n\ntype creator struct{}\n\nfunc (creator) Name() string {\n\treturn name\n}\n\nfunc (creator) Create(logger log.Logger) providers.Provider {\n\treturn &provider{\n\t\tlogger: logger,\n\t\tbackoff: initialBackoff,\n\t\tpath: cmdlinePath,\n\t\tclient: &http.Client{\n\t\t\tTimeout: 10 * time.Second,\n\t\t},\n\t}\n}\n\ntype provider struct {\n\tlogger log.Logger\n\tbackoff time.Duration\n\tpath string\n\tclient *http.Client\n\tconfigUrl string\n\trawConfig []byte\n}\n\nfunc (provider) Name() string {\n\treturn name\n}\n\nfunc (p provider) FetchConfig() (config.Config, error) {\n\tif p.rawConfig == nil {\n\t\treturn config.Config{}, nil\n\t} else {\n\t\treturn config.Parse(p.rawConfig)\n\t}\n}\n\nfunc (p *provider) IsOnline() bool {\n\tif p.configUrl == \"\" {\n\t\targs, err := ioutil.ReadFile(p.path)\n\t\tif err != nil {\n\t\t\tp.logger.Err(\"couldn't read cmdline\")\n\t\t\treturn false\n\t\t}\n\n\t\tp.configUrl = parseCmdline(args)\n\t\tp.logger.Debug(\"parsed url from cmdline: %q\", p.configUrl)\n\t\tif p.configUrl == \"\" {\n\t\t\t\/\/ If the cmdline flag wasn't provided, just no-op.\n\t\t\tp.logger.Info(\"no config URL provided\")\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn p.getRawConfig()\n\n}\n\nfunc (p provider) ShouldRetry() bool {\n\treturn true\n}\n\nfunc (p *provider) BackoffDuration() time.Duration {\n\treturn util.ExpBackoff(&p.backoff, maxBackoff)\n}\n\nfunc parseCmdline(cmdline []byte) (url string) {\n\tfor _, arg := range strings.Split(string(cmdline), \" \") {\n\t\tparts := strings.SplitN(strings.TrimSpace(arg), \"=\", 2)\n\t\tkey := parts[0]\n\n\t\tif key != cmdlineUrlFlag {\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(parts) == 2 {\n\t\t\turl = parts[1]\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ getRawConfig gets the raw configuration data from p.configUrl.\n\/\/ Supported URL schemes are:\n\/\/ http:\/\/\tremote resource accessed via http\n\/\/ oem:\/\/\tlocal file in \/sysroot\/usr\/share\/oem or \/mnt\/oem\nfunc (p *provider) getRawConfig() bool {\n\turl, err := url.Parse(p.configUrl)\n\tif err != nil {\n\t\tp.logger.Err(\"failed to parse url: %v\", err)\n\t\treturn false\n\t}\n\n\tswitch url.Scheme {\n\tcase \"http\":\n\t\tif resp, err := p.client.Get(p.configUrl); err == nil {\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tswitch resp.StatusCode {\n\t\t\tcase http.StatusOK, http.StatusNoContent:\n\t\t\tdefault:\n\t\t\t\tp.logger.Debug(\"failed fetching: HTTP status: %s\",\n\t\t\t\t\tresp.Status)\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tp.logger.Debug(\"successfully fetched\")\n\t\t\tp.rawConfig, err = ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\tp.logger.Err(\"failed to read body: %v\", err)\n\t\t\t\treturn false\n\t\t\t}\n\t\t} else {\n\t\t\tp.logger.Warning(\"failed fetching: %v\", err)\n\t\t\treturn false\n\t\t}\n\tcase \"oem\":\n\t\tpath := filepath.Clean(url.Path)\n\t\tif !filepath.IsAbs(path) {\n\t\t\tp.logger.Err(\"oem path is not absolute: %q\", url.Path)\n\t\t\treturn false\n\t\t}\n\n\t\t\/\/ check if present under oemDirPath, if so use it.\n\t\tabsPath := filepath.Join(oemDirPath, path)\n\t\tp.rawConfig, err = ioutil.ReadFile(absPath)\n\t\tif os.IsNotExist(err) {\n\t\t\tp.logger.Info(\"oem config not found in %q, trying %q\",\n\t\t\t\toemDirPath, oemMountPath)\n\n\t\t\t\/\/ try oemMountPath, requires mounting it.\n\t\t\terr = p.mountOEM()\n\t\t\tif err == nil {\n\t\t\t\tabsPath := filepath.Join(oemMountPath, path)\n\t\t\t\tp.rawConfig, err = ioutil.ReadFile(absPath)\n\t\t\t\tp.umountOEM()\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\tp.logger.Err(\"failed to read oem config: %v\", err)\n\t\t\treturn false\n\t\t}\n\tdefault:\n\t\tp.logger.Err(\"unsupported url scheme: %q\", url.Scheme)\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n\/\/ mountOEM waits for the presence of and mounts the oem partition @ oemMountPath.\nfunc (p *provider) mountOEM() error {\n\tdev := []string{oemDevicePath}\n\tif err := systemd.WaitOnDevices(dev, \"oem-cmdline\"); err != nil {\n\t\tp.logger.Err(\"failed to wait for oem device: %v\", err)\n\t\treturn err\n\t}\n\n\tif err := os.MkdirAll(oemMountPath, 0700); err != nil {\n\t\tp.logger.Err(\"failed to create oem mount point: %v\", err)\n\t\treturn err\n\t}\n\n\tif err := p.logger.LogOp(\n\t\tfunc() error {\n\t\t\treturn syscall.Mount(dev[0], oemMountPath, \"ext4\", 0, \"\")\n\t\t},\n\t\t\"mounting %q at %q\", oemDevicePath, oemMountPath,\n\t); err != nil {\n\t\treturn fmt.Errorf(\"failed to mount device %q at %q: %v\",\n\t\t\toemDevicePath, oemMountPath, err)\n\t}\n\n\treturn nil\n}\n\n\/\/ umountOEM unmounts the oem partition @ oemMountPath.\nfunc (p *provider) umountOEM() {\n\tp.logger.LogOp(\n\t\tfunc() error { return syscall.Unmount(oemMountPath, 0) },\n\t\t\"unmounting %q\", oemMountPath,\n\t)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Cutehacks AS. All rights reserved.\n\/\/ License can be found in the LICENSE file.\n\npackage commands\n\nimport (\n\t\"bufio\"\n\t\"encoding\/xml\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"qpm.io\/common\"\n\t\"qpm.io\/qpm\/core\"\n\t\"strings\"\n)\n\ntype CheckCommand struct {\n\tBaseCommand\n\tpkg *common.PackageWrapper\n}\n\nfunc NewCheckCommand(ctx core.Context) *CheckCommand {\n\treturn &CheckCommand{\n\t\tBaseCommand: BaseCommand{\n\t\t\tCtx: ctx,\n\t\t},\n\t}\n}\n\nfunc (c CheckCommand) Description() string {\n\treturn \"Checks the package for common errors\"\n}\n\nfunc (c *CheckCommand) RegisterFlags(flags *flag.FlagSet) {\n}\n\nfunc (c *CheckCommand) Run() error {\n\n\t\/\/ check the package file\n\tvar err error\n\tc.pkg, err = common.LoadPackage(\"\")\n\tif err != nil {\n\t\tc.Error(err)\n\t\treturn err\n\t}\n\n\tc.pkg.Validate()\n\n\t\/\/ check the LICENSE file\n\t_, err = os.Stat(core.License)\n\tif err != nil {\n\t\tc.Error(err)\n\t\treturn err\n\t}\n\n\t\/\/ check the .pri file\n\t_, err = os.Stat(c.pkg.PriFile())\n\tif err != nil {\n\t\tc.Error(err)\n\t\treturn err\n\t}\n\n\t\/\/ check the .qrc file\n\t_, err = os.Stat(c.pkg.QrcFile())\n\tif err != nil {\n\t\tc.Error(err)\n\t\treturn err\n\t}\n\n\tvar prefix string\n\tprefix, err = c.qrc()\n\tif err != nil {\n\t\tc.Error(err)\n\t\treturn err\n\t}\n\tif prefix != c.pkg.QrcPrefix() {\n\t\tc.Error(fmt.Errorf(\"the QRC prefix (%s) does not equal (%s)\", prefix, c.pkg.QrcPrefix()))\n\t}\n\n\t\/\/ check the qmldir file\n\t_, err = os.Stat(\"qmldir\")\n\tif err != nil {\n\t\tc.Error(err)\n\t\treturn err\n\t}\n\tvar module string\n\tmodule, err = c.qmldir()\n\tif err != nil {\n\t\tc.Error(err)\n\t\treturn err\n\t}\n\n\tif module != c.pkg.Name {\n\t\tc.Error(fmt.Errorf(\"the qmldir module (%s) does not equal (%s)\", module, c.pkg.Name))\n\t}\n\n\tfmt.Printf(\"OK!\\n\")\n\n\treturn nil\n}\n\ntype QRC_File struct {\n\tXMLName xml.Name `xml:\"file\"`\n\tContent string `xml:\",chardata\"`\n}\n\ntype QRC_Resource struct {\n\tXMLName xml.Name `xml:\"qresource\"`\n\tPrefix string `xml:\"prefix,attr\"`\n\tFiles []QRC_File `xml:\"file\"`\n}\n\ntype QRC_RCC struct {\n\tXMLName xml.Name `xml:\"RCC\"`\n\tResources []QRC_Resource `xml:\"qresource\"`\n}\n\nfunc (c *CheckCommand) qrc() (prefix string, err error) {\n\n\tfile, err := os.Open(c.pkg.QrcFile())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer file.Close()\n\n\tdata, _ := ioutil.ReadAll(file)\n\n\tvar rcc QRC_RCC\n\txml.Unmarshal(data, &rcc)\n\n\tif rcc.Resources != nil {\n\t\treturn rcc.Resources[0].Prefix, nil\n\t}\n\n\treturn \"\", err\n}\n\nfunc (c *CheckCommand) qmldir() (prefix string, err error) {\n\n\tfile, err := os.Open(\"qmldir\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer file.Close()\n\n\tconst key = \"module \"\n\tconst key_len = len(key)\n\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\ti := strings.Index(line, key)\n\t\tif i != -1 {\n\t\t\treturn line[i+key_len : len(line)], nil\n\t\t}\n\t}\n\n\treturn \"\", err\n}\n<commit_msg>Don't require .qrc and qmldir files in a package, but give warnings.<commit_after>\/\/ Copyright 2015 Cutehacks AS. All rights reserved.\n\/\/ License can be found in the LICENSE file.\n\npackage commands\n\nimport (\n\t\"bufio\"\n\t\"encoding\/xml\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"qpm.io\/common\"\n\t\"qpm.io\/qpm\/core\"\n\t\"strings\"\n)\n\ntype CheckCommand struct {\n\tBaseCommand\n\tpkg *common.PackageWrapper\n}\n\nfunc NewCheckCommand(ctx core.Context) *CheckCommand {\n\treturn &CheckCommand{\n\t\tBaseCommand: BaseCommand{\n\t\t\tCtx: ctx,\n\t\t},\n\t}\n}\n\nfunc (c CheckCommand) Description() string {\n\treturn \"Checks the package for common errors\"\n}\n\nfunc (c *CheckCommand) RegisterFlags(flags *flag.FlagSet) {\n}\n\nfunc (c *CheckCommand) Run() error {\n\n\t\/\/ check the package file\n\tvar err error\n\tc.pkg, err = common.LoadPackage(\"\")\n\tif err != nil {\n\t\tc.Error(err)\n\t\treturn err\n\t}\n\n\tc.pkg.Validate()\n\n\t\/\/ check the LICENSE file\n\t_, err = os.Stat(core.License)\n\tif err != nil {\n\t\tc.Error(err)\n\t\treturn err\n\t}\n\n\t\/\/ check the .pri file\n\t_, err = os.Stat(c.pkg.PriFile())\n\tif err != nil {\n\t\tc.Error(err)\n\t\treturn err\n\t}\n\n\t\/\/ check the .qrc file\n\t_, err = os.Stat(c.pkg.QrcFile())\n\tif err != nil {\n\t\tc.Warning(err.Error())\n\t}\n\n\tvar prefix string\n\tprefix, err = c.qrc()\n\tif err != nil {\n\t\tc.Warning(err.Error())\n\t} else if prefix != c.pkg.QrcPrefix() {\n\t\tc.Error(fmt.Errorf(\"the QRC prefix (%s) does not equal (%s)\", prefix, c.pkg.QrcPrefix()))\n\t}\n\n\t\/\/ check the qmldir file\n\t_, err = os.Stat(\"qmldir\")\n\tif err != nil {\n\t\tc.Warning(err.Error())\n\t} else {\n\t\tvar module string\n\t\tmodule, err = c.qmldir()\n\t\tif err != nil {\n\t\t\tc.Error(err)\n\t\t\treturn err\n\t\t}\n\t\tif module != c.pkg.Name {\n\t\t\tc.Error(fmt.Errorf(\"the qmldir module (%s) does not equal (%s)\", module, c.pkg.Name))\n\t\t}\n\t}\n\n\tfmt.Printf(\"OK!\\n\")\n\n\treturn nil\n}\n\ntype QRC_File struct {\n\tXMLName xml.Name `xml:\"file\"`\n\tContent string `xml:\",chardata\"`\n}\n\ntype QRC_Resource struct {\n\tXMLName xml.Name `xml:\"qresource\"`\n\tPrefix string `xml:\"prefix,attr\"`\n\tFiles []QRC_File `xml:\"file\"`\n}\n\ntype QRC_RCC struct {\n\tXMLName xml.Name `xml:\"RCC\"`\n\tResources []QRC_Resource `xml:\"qresource\"`\n}\n\nfunc (c *CheckCommand) qrc() (prefix string, err error) {\n\n\tfile, err := os.Open(c.pkg.QrcFile())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer file.Close()\n\n\tdata, _ := ioutil.ReadAll(file)\n\n\tvar rcc QRC_RCC\n\txml.Unmarshal(data, &rcc)\n\n\tif rcc.Resources != nil {\n\t\treturn rcc.Resources[0].Prefix, nil\n\t}\n\n\treturn \"\", err\n}\n\nfunc (c *CheckCommand) qmldir() (prefix string, err error) {\n\n\tfile, err := os.Open(\"qmldir\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer file.Close()\n\n\tconst key = \"module \"\n\tconst key_len = len(key)\n\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\ti := strings.Index(line, key)\n\t\tif i != -1 {\n\t\t\treturn line[i+key_len : len(line)], nil\n\t\t}\n\t}\n\n\treturn \"\", err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage runtime_test\n\nimport (\n\t\"internal\/testenv\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar lldbPath string\n\nfunc checkLldbPython(t *testing.T) {\n\tcmd := exec.Command(\"lldb\", \"-P\")\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Skipf(\"skipping due to issue running lldb: %v\\n%s\", err, out)\n\t}\n\tlldbPath = strings.TrimSpace(string(out))\n\n\tcmd = exec.Command(\"\/usr\/bin\/python2.7\", \"-c\", \"import sys;sys.path.append(sys.argv[1]);import lldb; print('go lldb python support')\", lldbPath)\n\tout, err = cmd.CombinedOutput()\n\n\tif err != nil {\n\t\tt.Skipf(\"skipping due to issue running python: %v\\n%s\", err, out)\n\t}\n\tif string(out) != \"go lldb python support\\n\" {\n\t\tt.Skipf(\"skipping due to lack of python lldb support: %s\", out)\n\t}\n\n\tif runtime.GOOS == \"darwin\" {\n\t\t\/\/ Try to see if we have debugging permissions.\n\t\tcmd = exec.Command(\"\/usr\/sbin\/DevToolsSecurity\", \"-status\")\n\t\tout, err = cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\tt.Skipf(\"DevToolsSecurity failed: %v\", err)\n\t\t} else if !strings.Contains(string(out), \"enabled\") {\n\t\t\tt.Skip(string(out))\n\t\t}\n\t\tcmd = exec.Command(\"\/usr\/bin\/groups\")\n\t\tout, err = cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\tt.Skipf(\"groups failed: %v\", err)\n\t\t} else if !strings.Contains(string(out), \"_developer\") {\n\t\t\tt.Skip(\"Not in _developer group\")\n\t\t}\n\t}\n}\n\nconst lldbHelloSource = `\npackage main\nimport \"fmt\"\nfunc main() {\n\tmapvar := make(map[string]string,5)\n\tmapvar[\"abc\"] = \"def\"\n\tmapvar[\"ghi\"] = \"jkl\"\n\tintvar := 42\n\tptrvar := &intvar\n\tfmt.Println(\"hi\") \/\/ line 10\n\t_ = ptrvar\n}\n`\n\nconst lldbScriptSource = `\nimport sys\nsys.path.append(sys.argv[1])\nimport lldb\nimport os\n\nTIMEOUT_SECS = 5\n\ndebugger = lldb.SBDebugger.Create()\ndebugger.SetAsync(True)\ntarget = debugger.CreateTargetWithFileAndArch(\"a.exe\", None)\nif target:\n print \"Created target\"\n main_bp = target.BreakpointCreateByLocation(\"main.go\", 10)\n if main_bp:\n print \"Created breakpoint\"\n process = target.LaunchSimple(None, None, os.getcwd())\n if process:\n print \"Process launched\"\n listener = debugger.GetListener()\n process.broadcaster.AddListener(listener, lldb.SBProcess.eBroadcastBitStateChanged)\n while True:\n event = lldb.SBEvent()\n if listener.WaitForEvent(TIMEOUT_SECS, event):\n if lldb.SBProcess.GetRestartedFromEvent(event):\n continue\n state = process.GetState()\n if state in [lldb.eStateUnloaded, lldb.eStateLaunching, lldb.eStateRunning]:\n continue\n else:\n print \"Timeout launching\"\n break\n if state == lldb.eStateStopped:\n for t in process.threads:\n if t.GetStopReason() == lldb.eStopReasonBreakpoint:\n print \"Hit breakpoint\"\n frame = t.GetFrameAtIndex(0)\n if frame:\n if frame.line_entry:\n print \"Stopped at %s:%d\" % (frame.line_entry.file.basename, frame.line_entry.line)\n if frame.function:\n print \"Stopped in %s\" % (frame.function.name,)\n var = frame.FindVariable('intvar')\n if var:\n print \"intvar = %s\" % (var.GetValue(),)\n else:\n print \"no intvar\"\n else:\n print \"Process state\", state\n process.Destroy()\nelse:\n print \"Failed to create target a.exe\"\n\nlldb.SBDebugger.Destroy(debugger)\nsys.exit()\n`\n\nconst expectedLldbOutput = `Created target\nCreated breakpoint\nProcess launched\nHit breakpoint\nStopped at main.go:10\nStopped in main.main\nintvar = 42\n`\n\nfunc TestLldbPython(t *testing.T) {\n\ttestenv.MustHaveGoBuild(t)\n\tif final := os.Getenv(\"GOROOT_FINAL\"); final != \"\" && runtime.GOROOT() != final {\n\t\tt.Skip(\"gdb test can fail with GOROOT_FINAL pending\")\n\t}\n\n\tcheckLldbPython(t)\n\n\tdir, err := ioutil.TempDir(\"\", \"go-build\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create temp directory: %v\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tsrc := filepath.Join(dir, \"main.go\")\n\terr = ioutil.WriteFile(src, []byte(lldbHelloSource), 0644)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create file: %v\", err)\n\t}\n\n\t\/\/ As of 2018-07-17, lldb doesn't support compressed DWARF, so\n\t\/\/ disable it for this test.\n\tcmd := exec.Command(testenv.GoToolPath(t), \"build\", \"-gcflags=all=-N -l\", \"-ldflags=-compressdwarf=false\", \"-o\", \"a.exe\")\n\tcmd.Dir = dir\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"building source %v\\n%s\", err, out)\n\t}\n\n\tsrc = filepath.Join(dir, \"script.py\")\n\terr = ioutil.WriteFile(src, []byte(lldbScriptSource), 0755)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create script: %v\", err)\n\t}\n\n\tcmd = exec.Command(\"\/usr\/bin\/python2.7\", \"script.py\", lldbPath)\n\tcmd.Dir = dir\n\tgot, _ := cmd.CombinedOutput()\n\n\tif string(got) != expectedLldbOutput {\n\t\tif strings.Contains(string(got), \"Timeout launching\") {\n\t\t\tt.Skip(\"Timeout launching\")\n\t\t}\n\t\tt.Fatalf(\"Unexpected lldb output:\\n%s\", got)\n\t}\n}\n<commit_msg>runtime: fix TestLldbPython test with modules enabled<commit_after>\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage runtime_test\n\nimport (\n\t\"internal\/testenv\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar lldbPath string\n\nfunc checkLldbPython(t *testing.T) {\n\tcmd := exec.Command(\"lldb\", \"-P\")\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Skipf(\"skipping due to issue running lldb: %v\\n%s\", err, out)\n\t}\n\tlldbPath = strings.TrimSpace(string(out))\n\n\tcmd = exec.Command(\"\/usr\/bin\/python2.7\", \"-c\", \"import sys;sys.path.append(sys.argv[1]);import lldb; print('go lldb python support')\", lldbPath)\n\tout, err = cmd.CombinedOutput()\n\n\tif err != nil {\n\t\tt.Skipf(\"skipping due to issue running python: %v\\n%s\", err, out)\n\t}\n\tif string(out) != \"go lldb python support\\n\" {\n\t\tt.Skipf(\"skipping due to lack of python lldb support: %s\", out)\n\t}\n\n\tif runtime.GOOS == \"darwin\" {\n\t\t\/\/ Try to see if we have debugging permissions.\n\t\tcmd = exec.Command(\"\/usr\/sbin\/DevToolsSecurity\", \"-status\")\n\t\tout, err = cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\tt.Skipf(\"DevToolsSecurity failed: %v\", err)\n\t\t} else if !strings.Contains(string(out), \"enabled\") {\n\t\t\tt.Skip(string(out))\n\t\t}\n\t\tcmd = exec.Command(\"\/usr\/bin\/groups\")\n\t\tout, err = cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\tt.Skipf(\"groups failed: %v\", err)\n\t\t} else if !strings.Contains(string(out), \"_developer\") {\n\t\t\tt.Skip(\"Not in _developer group\")\n\t\t}\n\t}\n}\n\nconst lldbHelloSource = `\npackage main\nimport \"fmt\"\nfunc main() {\n\tmapvar := make(map[string]string,5)\n\tmapvar[\"abc\"] = \"def\"\n\tmapvar[\"ghi\"] = \"jkl\"\n\tintvar := 42\n\tptrvar := &intvar\n\tfmt.Println(\"hi\") \/\/ line 10\n\t_ = ptrvar\n}\n`\n\nconst lldbScriptSource = `\nimport sys\nsys.path.append(sys.argv[1])\nimport lldb\nimport os\n\nTIMEOUT_SECS = 5\n\ndebugger = lldb.SBDebugger.Create()\ndebugger.SetAsync(True)\ntarget = debugger.CreateTargetWithFileAndArch(\"a.exe\", None)\nif target:\n print \"Created target\"\n main_bp = target.BreakpointCreateByLocation(\"main.go\", 10)\n if main_bp:\n print \"Created breakpoint\"\n process = target.LaunchSimple(None, None, os.getcwd())\n if process:\n print \"Process launched\"\n listener = debugger.GetListener()\n process.broadcaster.AddListener(listener, lldb.SBProcess.eBroadcastBitStateChanged)\n while True:\n event = lldb.SBEvent()\n if listener.WaitForEvent(TIMEOUT_SECS, event):\n if lldb.SBProcess.GetRestartedFromEvent(event):\n continue\n state = process.GetState()\n if state in [lldb.eStateUnloaded, lldb.eStateLaunching, lldb.eStateRunning]:\n continue\n else:\n print \"Timeout launching\"\n break\n if state == lldb.eStateStopped:\n for t in process.threads:\n if t.GetStopReason() == lldb.eStopReasonBreakpoint:\n print \"Hit breakpoint\"\n frame = t.GetFrameAtIndex(0)\n if frame:\n if frame.line_entry:\n print \"Stopped at %s:%d\" % (frame.line_entry.file.basename, frame.line_entry.line)\n if frame.function:\n print \"Stopped in %s\" % (frame.function.name,)\n var = frame.FindVariable('intvar')\n if var:\n print \"intvar = %s\" % (var.GetValue(),)\n else:\n print \"no intvar\"\n else:\n print \"Process state\", state\n process.Destroy()\nelse:\n print \"Failed to create target a.exe\"\n\nlldb.SBDebugger.Destroy(debugger)\nsys.exit()\n`\n\nconst expectedLldbOutput = `Created target\nCreated breakpoint\nProcess launched\nHit breakpoint\nStopped at main.go:10\nStopped in main.main\nintvar = 42\n`\n\nfunc TestLldbPython(t *testing.T) {\n\ttestenv.MustHaveGoBuild(t)\n\tif final := os.Getenv(\"GOROOT_FINAL\"); final != \"\" && runtime.GOROOT() != final {\n\t\tt.Skip(\"gdb test can fail with GOROOT_FINAL pending\")\n\t}\n\n\tcheckLldbPython(t)\n\n\tdir, err := ioutil.TempDir(\"\", \"go-build\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create temp directory: %v\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tsrc := filepath.Join(dir, \"main.go\")\n\terr = ioutil.WriteFile(src, []byte(lldbHelloSource), 0644)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create src file: %v\", err)\n\t}\n\n\tmod := filepath.Join(dir, \"go.mod\")\n\terr = ioutil.WriteFile(mod, []byte(\"module lldbtest\"), 0644)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create mod file: %v\", err)\n\t}\n\n\t\/\/ As of 2018-07-17, lldb doesn't support compressed DWARF, so\n\t\/\/ disable it for this test.\n\tcmd := exec.Command(testenv.GoToolPath(t), \"build\", \"-gcflags=all=-N -l\", \"-ldflags=-compressdwarf=false\", \"-o\", \"a.exe\")\n\tcmd.Dir = dir\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"building source %v\\n%s\", err, out)\n\t}\n\n\tsrc = filepath.Join(dir, \"script.py\")\n\terr = ioutil.WriteFile(src, []byte(lldbScriptSource), 0755)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create script: %v\", err)\n\t}\n\n\tcmd = exec.Command(\"\/usr\/bin\/python2.7\", \"script.py\", lldbPath)\n\tcmd.Dir = dir\n\tgot, _ := cmd.CombinedOutput()\n\n\tif string(got) != expectedLldbOutput {\n\t\tif strings.Contains(string(got), \"Timeout launching\") {\n\t\t\tt.Skip(\"Timeout launching\")\n\t\t}\n\t\tt.Fatalf(\"Unexpected lldb output:\\n%s\", got)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package testutil\n\nimport \"sync\/atomic\"\n\nvar nextPort uint32 = 40000\n\n\/\/ UniquePort generates a likely unique port, so that multiple servers can run\n\/\/ concurrently. Note that it does not actually check that the port is free,\n\/\/ but uses atomics and a fairly highly port range to maximize the likelihood\n\/\/ that the port is available.\nfunc UniquePort() uint16 {\n\tport := uint16(atomic.AddUint32(&nextPort, 1))\n\n\tif port == 0 {\n\t\tpanic(\"ran out of ports!\")\n\t}\n\n\treturn port\n}\n<commit_msg>Up the test port range<commit_after>package testutil\n\nimport \"sync\/atomic\"\n\nvar nextPort uint32 = 41300\n\n\/\/ UniquePort generates a likely unique port, so that multiple servers can run\n\/\/ concurrently. Note that it does not actually check that the port is free,\n\/\/ but uses atomics and a fairly highly port range to maximize the likelihood\n\/\/ that the port is available.\nfunc UniquePort() uint16 {\n\tport := uint16(atomic.AddUint32(&nextPort, 1))\n\n\tif port == 0 {\n\t\tpanic(\"ran out of ports!\")\n\t}\n\n\treturn port\n}\n<|endoftext|>"} {"text":"<commit_before>package mock_godless\n\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/golang\/mock\/gomock\"\n\tlib \"github.com\/johnny-morrice\/godless\"\n)\n\nfunc TestRunQueryJoinSuccess(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tmock := NewMockNamespaceTree(ctrl)\n\n\tquery := &lib.Query{\n\t\tOpCode: lib.JOIN,\n\t\tTableKey: mainTableKey,\n\t\tJoin: lib.QueryJoin{\n\t\t\tRows: []lib.QueryRowJoin{\n\t\t\t\tlib.QueryRowJoin{\n\t\t\t\t\tRowKey: \"Row A\",\n\t\t\t\t\tEntries: map[string]string{\n\t\t\t\t\t\t\"Entry A\": \"Value A\",\n\t\t\t\t\t\t\"Entry B\": \"Value B\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tlib.QueryRowJoin{\n\t\t\t\t\tRowKey: \"Row B\",\n\t\t\t\t\tEntries: map[string]string{\n\t\t\t\t\t\t\"Entry C\": \"Value C\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tlib.QueryRowJoin{\n\t\t\t\t\tRowKey: \"Row A\",\n\t\t\t\t\tEntries: map[string]string{\n\t\t\t\t\t\t\"Entry A\": \"Value D\",\n\t\t\t\t\t\t\"Entry D\": \"Value E\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\n\ttable := lib.Table{\n\t\tRows: map[string]lib.Row{\n\t\t\t\"Row A\": lib.Row{\n\t\t\t\tEntries: map[string][]string {\n\t\t\t\t\t\"Entry A\": []string{\"Value A\", \"Value D\"},\n\t\t\t\t\t\"Entry B\": []string{\"Value B\"},\n\t\t\t\t\t\"Entry D\": []string{\"Value E\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"Row B\": lib.Row{\n\t\t\t\tEntries: map[string][]string {\n\t\t\t\t\t\"Entry C\": []string{\"Value C\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tmock.EXPECT().JoinTable(mainTableKey, mtchtable(table)).Return(nil)\n\n\tjoiner := lib.MakeNamespaceTreeJoin(mock)\n\tquery.Visit(joiner)\n\tresp := joiner.RunQuery()\n\n\tif !apiResponseEq(lib.RESPONSE_OK, resp) {\n\t\tt.Error(\"Expected\", lib.RESPONSE_OK, \"but was\", resp)\n\t}\n}\n\nfunc TestRunQueryJoinInvalid(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tmock := NewMockNamespaceTree(ctrl)\n\n\tinvalidQueries := []*lib.Query{\n\t\t\/\/ Basically wrong.\n\t\t&lib.Query{},\n\t\t&lib.Query{OpCode: lib.SELECT},\n\t}\n\n\tfor _, q := range invalidQueries {\n\t\tjoiner := lib.MakeNamespaceTreeJoin(mock)\n\t\tq.Visit(joiner)\n\t\tresp := joiner.RunQuery()\n\n\t\tif resp.Msg != \"error\" {\n\t\t\tt.Error(\"Expected Msg error but received\", resp.Msg)\n\t\t}\n\n\t\tif resp.Err == nil {\n\t\t\tt.Error(\"Expected response Err\")\n\t\t}\n\t}\n}\n\nfunc mtchtable(t lib.Table) gomock.Matcher {\n\treturn tablematcher{t}\n}\n\ntype tablematcher struct {\n\tt lib.Table\n}\n\nfunc (tm tablematcher) String() string {\n\treturn \"is matching Table\"\n}\n\nfunc (tm tablematcher) Matches(v interface{}) bool {\n\tother, ok := v.(lib.Table)\n\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn tableEq(tm.t, other)\n}\n\nfunc tableEq(a, b lib.Table) bool {\n\tif len(a.Rows) != len(b.Rows) {\n\t\treturn false\n\t}\n\n\t\/\/ TODO funky.\n\tsame := true\n\ta.Foreachrow(func (rkey string, arow lib.Row) {\n\t\tbrow, present := b.Rows[rkey]\n\n\t\tif !present {\n\t\t\tsame = false\n\t\t}\n\n\t\tif !rowEq(arow, brow) {\n\t\t\tsame = false\n\t\t}\n\t})\n\n\treturn same\n}\n<commit_msg>another join mock<commit_after>package mock_godless\n\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/golang\/mock\/gomock\"\n\tlib \"github.com\/johnny-morrice\/godless\"\n)\n\nfunc TestRunQueryJoinSuccess(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tmock := NewMockNamespaceTree(ctrl)\n\n\tquery := &lib.Query{\n\t\tOpCode: lib.JOIN,\n\t\tTableKey: mainTableKey,\n\t\tJoin: lib.QueryJoin{\n\t\t\tRows: []lib.QueryRowJoin{\n\t\t\t\tlib.QueryRowJoin{\n\t\t\t\t\tRowKey: \"Row A\",\n\t\t\t\t\tEntries: map[string]string{\n\t\t\t\t\t\t\"Entry A\": \"Value A\",\n\t\t\t\t\t\t\"Entry B\": \"Value B\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tlib.QueryRowJoin{\n\t\t\t\t\tRowKey: \"Row B\",\n\t\t\t\t\tEntries: map[string]string{\n\t\t\t\t\t\t\"Entry C\": \"Value C\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tlib.QueryRowJoin{\n\t\t\t\t\tRowKey: \"Row A\",\n\t\t\t\t\tEntries: map[string]string{\n\t\t\t\t\t\t\"Entry A\": \"Value D\",\n\t\t\t\t\t\t\"Entry D\": \"Value E\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\n\ttable := lib.Table{\n\t\tRows: map[string]lib.Row{\n\t\t\t\"Row A\": lib.Row{\n\t\t\t\tEntries: map[string][]string {\n\t\t\t\t\t\"Entry A\": []string{\"Value A\", \"Value D\"},\n\t\t\t\t\t\"Entry B\": []string{\"Value B\"},\n\t\t\t\t\t\"Entry D\": []string{\"Value E\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"Row B\": lib.Row{\n\t\t\t\tEntries: map[string][]string {\n\t\t\t\t\t\"Entry C\": []string{\"Value C\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tmock.EXPECT().JoinTable(mainTableKey, mtchtable(table)).Return(nil)\n\n\tjoiner := lib.MakeNamespaceTreeJoin(mock)\n\tquery.Visit(joiner)\n\tresp := joiner.RunQuery()\n\n\tif !apiResponseEq(lib.RESPONSE_OK, resp) {\n\t\tt.Error(\"Expected\", lib.RESPONSE_OK, \"but was\", resp)\n\t}\n}\n\nfunc TestRunQueryJoinFailure(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tmock := NewMockNamespaceTree(ctrl)\n\n\tfailQuery := &lib.Query{\n\t\tOpCode: lib.JOIN,\n\t\tTableKey: mainTableKey,\n\t\tJoin: lib.QueryJoin{\n\t\t\tRows: []lib.QueryRowJoin{\n\t\t\t\tlib.QueryRowJoin{\n\t\t\t\t\tRowKey: \"Row A\",\n\t\t\t\t\tEntries: map[string]string{\n\t\t\t\t\t\t\"Entry A\": \"Value A\",\n\t\t\t\t\t\t\"Entry B\": \"Value B\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\ttable := lib.Table{\n\t\tRows: map[string]lib.Row{\n\t\t\t\"Row A\": lib.Row{\n\t\t\t\tEntries: map[string][]string {\n\t\t\t\t\t\"Entry A\": []string{\"Value A\"},\n\t\t\t\t\t\"Entry B\": []string{\"Value B\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tmock.EXPECT().JoinTable(mainTableKey, mtchtable(table)).Return(errors.New(\"Expected error\"))\n\n\tjoiner := lib.MakeNamespaceTreeJoin(mock)\n\tfailQuery.Visit(joiner)\n\tresp := joiner.RunQuery()\n\n\tif resp.Msg != \"error\" {\n\t\tt.Error(\"Expected Msg error but received\", resp.Msg)\n\t}\n\n\tif resp.Err == nil {\n\t\tt.Error(\"Expected response Err\")\n\t}\n}\n\nfunc TestRunQueryJoinInvalid(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tmock := NewMockNamespaceTree(ctrl)\n\n\tinvalidQueries := []*lib.Query{\n\t\t\/\/ Basically wrong.\n\t\t&lib.Query{},\n\t\t&lib.Query{OpCode: lib.SELECT},\n\t}\n\n\tfor _, q := range invalidQueries {\n\t\tjoiner := lib.MakeNamespaceTreeJoin(mock)\n\t\tq.Visit(joiner)\n\t\tresp := joiner.RunQuery()\n\n\t\tif resp.Msg != \"error\" {\n\t\t\tt.Error(\"Expected Msg error but received\", resp.Msg)\n\t\t}\n\n\t\tif resp.Err == nil {\n\t\t\tt.Error(\"Expected response Err\")\n\t\t}\n\t}\n}\n\nfunc mtchtable(t lib.Table) gomock.Matcher {\n\treturn tablematcher{t}\n}\n\ntype tablematcher struct {\n\tt lib.Table\n}\n\nfunc (tm tablematcher) String() string {\n\treturn \"is matching Table\"\n}\n\nfunc (tm tablematcher) Matches(v interface{}) bool {\n\tother, ok := v.(lib.Table)\n\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn tableEq(tm.t, other)\n}\n\nfunc tableEq(a, b lib.Table) bool {\n\tif len(a.Rows) != len(b.Rows) {\n\t\treturn false\n\t}\n\n\t\/\/ TODO funky.\n\tsame := true\n\ta.Foreachrow(func (rkey string, arow lib.Row) {\n\t\tbrow, present := b.Rows[rkey]\n\n\t\tif !present {\n\t\t\tsame = false\n\t\t}\n\n\t\tif !rowEq(arow, brow) {\n\t\t\tsame = false\n\t\t}\n\t})\n\n\treturn same\n}\n<|endoftext|>"} {"text":"<commit_before>package app\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\t\"github.com\/PuerkitoBio\/goquery\"\n\n\t\"google.golang.org\/appengine\"\n\t\"google.golang.org\/appengine\/urlfetch\"\n\t\"google.golang.org\/appengine\/datastore\"\n\t\"google.golang.org\/appengine\/log\"\n)\n\ntype Schedule struct {\n\tTeacher string \/\/ 先生のID\n\tDate []time.Time \/\/ 予約可能日時\n\tUpdated time.Time\n}\n\nconst (\n\tmaxDays = 2\n\tform = \"2006-01-02 15:04:05\"\n)\n\nfunc init() {\n\thttp.HandleFunc(\"\/check\", handler)\n}\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n\n\tctx := appengine.NewContext(r)\n\tteacher := os.Getenv(\"teacher\")\n\tif teacher == \"\" {\n\t\tlog.Debugf(ctx, \"invalid teacher id: %v\", teacher)\n\t\treturn\n\t}\n\n\tclient := urlfetch.Client(ctx)\n\tsite := fmt.Sprintf(\"http:\/\/eikaiwa.dmm.com\/teacher\/index\/%s\/\", teacher)\n\tresp, err := client.Get(site)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ yyyy-mm-dd HH:MM:ss\n\tre := regexp.MustCompile(\"[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) ([01][0-9]|2[0-3]):[03]0:00\")\n\n\tavailable := []time.Time{}\n\n\tdoc, _ := goquery.NewDocumentFromResponse(resp)\n\t\/\/ get all schedule\n\n\t\/\/ teacher's name: Second(last) element of document.getElementsByTagName('h1')\n\tname := doc.Find(\"h1\").Last().Text()\n\tlog.Debugf(ctx, \"name : %v\", name)\n\n\t\/\/ teacher's image: document.getElementsByClassName('profile-pic')\n\timage, _ := doc.Find(\".profile-pic\").First().Attr(\"src\")\n\tlog.Debugf(ctx, \"image : %v\", image)\n\n\tdoc.Find(\".oneday\").EachWithBreak(func(i int, s *goquery.Selection) bool {\n\t\t\/\/ 直近のmaxDays日分の予約可能情報を対象とする\n\t\tlog.Debugf(ctx, \"i = %v\", i)\n\t\tif i >= maxDays {\n\t\t\treturn false\n\t\t}\n\n\t\t\/\/ TODO 受講日情報要らない予感\n\t\tdate := s.Find(\".date\").Text() \/\/ 受講日\n\t\tlog.Debugf(ctx, \"-----%v-----\", date)\n\n\t\ts.Find(\".bt-open\").Each(func(_ int, s *goquery.Selection) {\n\n\t\t\ts2, _ := s.Attr(\"id\") \/\/ 受講可能時刻\n\t\t\tlog.Debugf(ctx, \"%v\", s2)\n\t\t\tdateString := re.FindString(s2)\n\t\t\tlog.Debugf(ctx, \"%v\", dateString)\n\n\t\t\tday, _ := time.ParseInLocation(form, dateString, time.FixedZone(\"Asia\/Tokyo\", 9*60*60))\n\t\t\tlog.Debugf(ctx, \"%v\", day)\n\n\t\t\tavailable = append(available, day)\n\t\t})\n\t\treturn true\n\t})\n\n\t\/\/ キーでデータ作ってデータベースに格納してみる\n\t\/\/ とりあえず、対象教師のIDをstringIDに放り込んでみる\n\tkey := datastore.NewKey(ctx, \"Schedule\", teacher, 0, nil)\n\n\tvar old Schedule\n\tif err := datastore.Get(ctx, key, &old); err != nil {\n\t\t\/\/ Entityが空の場合は見逃す\n\t\tif err.Error() != \"datastore: no such entity\" {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\tnew := Schedule {\n\t\tteacher,\n\t\tavailable,\n\t\ttime.Now().In(time.FixedZone(\"Asia\/Tokyo\", 9*60*60)),\n\t}\n\n\tif _, err := datastore.Put(ctx, key, &new); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tnotifications := []string{}\n\tfor _, newVal := range available {\n\t\tvar notify = true\n\t\tfor _, oldVal := range old.Date {\n\t\t\tif newVal.Equal(oldVal) {\n\t\t\t\tnotify = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif notify {\n\t\t\tnotifications = append(notifications, newVal.Format(form))\n\t\t}\n\t}\n\tlog.Debugf(ctx, \"notification data: %v, %v\", len(notifications), notifications)\n\n\tif len(notifications) == 0 {\n\t\treturn\n\t}\n\n\ttoken := os.Getenv(\"slack_token\")\n\tif token != \"\" {\n\t\tvalues := url.Values{}\n\t\tvalues.Add(\"token\", token)\n\t\tvalues.Add(\"channel\", \"#general\")\n\t\tvalues.Add(\"as_user\", \"false\")\n\t\tvalues.Add(\"username\", fmt.Sprintf(\"%s from DMM Eikaiwa\", name))\n\t\tvalues.Add(\"icon_url\", image)\n\t\tvalues.Add(\"text\", fmt.Sprintf(messageFormat, strings.Join(notifications, \"\\n\"), site))\n\n\t\tres, error := client.PostForm(\"https:\/\/slack.com\/api\/chat.postMessage\", values)\n\t\tif error != nil {\n\t\t\tlog.Debugf(ctx, \"senderror %v\", error)\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tdefer res.Body.Close()\n\n\t\tb, err := ioutil.ReadAll(res.Body)\n\t\tif err == nil {\n\t\t\tlog.Debugf(ctx, \"response: %v\", string(b))\n\t\t}\n\t}\n}\n\nconst messageFormat = `\nHi, you can have a lesson below!\n%s\n\nAccess to <%s>\n`\n<commit_msg>複数の先生に対応。一旦逐次処理で実装。<commit_after>package app\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\t\"github.com\/PuerkitoBio\/goquery\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/appengine\"\n\t\"google.golang.org\/appengine\/urlfetch\"\n\t\"google.golang.org\/appengine\/datastore\"\n\t\"google.golang.org\/appengine\/log\"\n)\n\ntype Schedule struct {\n\tTeacher string \/\/ 先生のID\n\tDate []time.Time \/\/ 予約可能日時\n\tUpdated time.Time\n}\n\nconst (\n\tmaxDays = 2\n\tform = \"2006-01-02 15:04:05\"\n)\n\nfunc init() {\n\thttp.HandleFunc(\"\/check\", handler)\n}\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n\n\tctx := appengine.NewContext(r)\n\tteach := os.Getenv(\"teacher\")\n\tif teach == \"\" {\n\t\tlog.Debugf(ctx, \"invalid teacher id: %v\", teach)\n\t\treturn\n\t}\n\n\tteachers := strings.Split(teach, \",\")\n\tlog.Debugf(ctx, \"teachers: %v\", teachers)\n\tfor _, teacher := range teachers {\n\t\terr := search(ctx, teacher)\n\t\tif err != nil {\n\t\t\tlog.Warningf(ctx, \"err: %v\", err)\n\t\t}\n\t}\n}\n\nfunc search(ctx context.Context, teacher string) error {\n\n\tclient := urlfetch.Client(ctx)\n\tsite := fmt.Sprintf(\"http:\/\/eikaiwa.dmm.com\/teacher\/index\/%s\/\", teacher)\n\tresp, err := client.Get(site)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"access error: %s, context: %v\", site, err)\n\t}\n\n\tdoc, _ := goquery.NewDocumentFromResponse(resp)\n\t\/\/ get all schedule\n\n\t\/\/ teacher's name: Second(last) element of document.getElementsByTagName('h1')\n\tname := doc.Find(\"h1\").Last().Text()\n\tlog.Debugf(ctx, \"name : %v\", name)\n\n\t\/\/ teacher's image: document.getElementsByClassName('profile-pic')\n\timage, _ := doc.Find(\".profile-pic\").First().Attr(\"src\")\n\tlog.Debugf(ctx, \"image : %v\", image)\n\n\tavailable := []time.Time{}\n\t\/\/ yyyy-mm-dd HH:MM:ss\n\tre := regexp.MustCompile(\"[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) ([01][0-9]|2[0-3]):[03]0:00\")\n\n\tdoc.Find(\".oneday\").EachWithBreak(func(i int, s *goquery.Selection) bool {\n\t\t\/\/ 直近のmaxDays日分の予約可能情報を対象とする\n\t\tlog.Debugf(ctx, \"i = %v\", i)\n\t\tif i >= maxDays {\n\t\t\treturn false\n\t\t}\n\n\t\t\/\/ TODO 受講日情報要らない予感\n\t\tdate := s.Find(\".date\").Text() \/\/ 受講日\n\t\tlog.Debugf(ctx, \"-----%v-----\", date)\n\n\t\ts.Find(\".bt-open\").Each(func(_ int, s *goquery.Selection) {\n\n\t\t\ts2, _ := s.Attr(\"id\") \/\/ 受講可能時刻\n\t\t\tlog.Debugf(ctx, \"%v\", s2)\n\t\t\tdateString := re.FindString(s2)\n\t\t\tlog.Debugf(ctx, \"%v\", dateString)\n\n\t\t\tday, _ := time.ParseInLocation(form, dateString, time.FixedZone(\"Asia\/Tokyo\", 9*60*60))\n\t\t\tlog.Debugf(ctx, \"%v\", day)\n\n\t\t\tavailable = append(available, day)\n\t\t})\n\t\treturn true\n\t})\n\n\tkey := datastore.NewKey(ctx, \"Schedule\", teacher, 0, nil)\n\n\tvar old Schedule\n\tif err := datastore.Get(ctx, key, &old); err != nil {\n\t\t\/\/ Entityが空の場合は見逃す\n\t\tif err.Error() != \"datastore: no such entity\" {\n\t\t\treturn fmt.Errorf(\"datastore access error: %s, context: %v\", teacher, err)\n\t\t}\n\t}\n\n\tnew := Schedule {\n\t\tteacher,\n\t\tavailable,\n\t\ttime.Now().In(time.FixedZone(\"Asia\/Tokyo\", 9*60*60)),\n\t}\n\n\tif _, err := datastore.Put(ctx, key, &new); err != nil {\n\t\treturn fmt.Errorf(\"datastore access error: %s, context: %v\", new.Teacher, err)\n\t}\n\n\tnotifications := []string{}\n\tfor _, newVal := range available {\n\t\tvar notify = true\n\t\tfor _, oldVal := range old.Date {\n\t\t\tif newVal.Equal(oldVal) {\n\t\t\t\tnotify = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif notify {\n\t\t\tnotifications = append(notifications, newVal.Format(form))\n\t\t}\n\t}\n\tlog.Debugf(ctx, \"notification data: %v, %v\", len(notifications), notifications)\n\n\tif len(notifications) == 0 {\n\t\treturn nil\n\t}\n\n\ttoken := os.Getenv(\"slack_token\")\n\tif token != \"\" {\n\t\tvalues := url.Values{}\n\t\tvalues.Add(\"token\", token)\n\t\tvalues.Add(\"channel\", \"#general\")\n\t\tvalues.Add(\"as_user\", \"false\")\n\t\tvalues.Add(\"username\", fmt.Sprintf(\"%s from DMM Eikaiwa\", name))\n\t\tvalues.Add(\"icon_url\", image)\n\t\tvalues.Add(\"text\", fmt.Sprintf(messageFormat, strings.Join(notifications, \"\\n\"), site))\n\n\t\tres, err := client.PostForm(\"https:\/\/slack.com\/api\/chat.postMessage\", values)\n\t\tif err != nil {\n\t\t\tlog.Debugf(ctx, \"senderror %v\", err)\n\t\t\treturn fmt.Errorf(\"noti send error: %s, context: %v\", teacher, err)\n\t\t}\n\t\tdefer res.Body.Close()\n\n\t\tb, err := ioutil.ReadAll(res.Body)\n\t\tif err == nil {\n\t\t\tlog.Debugf(ctx, \"response: %v\", string(b))\n\t\t}\n\t}\n\treturn nil\n}\n\nconst messageFormat = `\nHi, you can have a lesson below!\n%s\n\nAccess to <%s>\n`\n<|endoftext|>"} {"text":"<commit_before>package inmemory\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/docker\/distribution\/storagedriver\"\n\t\"github.com\/docker\/distribution\/storagedriver\/factory\"\n)\n\nconst driverName = \"inmemory\"\n\nfunc init() {\n\tfactory.Register(driverName, &inMemoryDriverFactory{})\n}\n\n\/\/ inMemoryDriverFacotry implements the factory.StorageDriverFactory interface.\ntype inMemoryDriverFactory struct{}\n\nfunc (factory *inMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {\n\treturn New(), nil\n}\n\n\/\/ Driver is a storagedriver.StorageDriver implementation backed by a local map.\n\/\/ Intended solely for example and testing purposes.\ntype Driver struct {\n\troot *dir\n\tmutex sync.RWMutex\n}\n\n\/\/ New constructs a new Driver.\nfunc New() *Driver {\n\treturn &Driver{root: &dir{\n\t\tcommon: common{\n\t\t\tp: \"\/\",\n\t\t\tmod: time.Now(),\n\t\t},\n\t}}\n}\n\n\/\/ Implement the storagedriver.StorageDriver interface.\n\n\/\/ GetContent retrieves the content stored at \"path\" as a []byte.\nfunc (d *Driver) GetContent(path string) ([]byte, error) {\n\tif !storagedriver.PathRegexp.MatchString(path) {\n\t\treturn nil, storagedriver.InvalidPathError{Path: path}\n\t}\n\n\td.mutex.RLock()\n\tdefer d.mutex.RUnlock()\n\n\trc, err := d.ReadStream(path, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rc.Close()\n\n\treturn ioutil.ReadAll(rc)\n}\n\n\/\/ PutContent stores the []byte content at a location designated by \"path\".\nfunc (d *Driver) PutContent(p string, contents []byte) error {\n\tif !storagedriver.PathRegexp.MatchString(p) {\n\t\treturn storagedriver.InvalidPathError{Path: p}\n\t}\n\n\td.mutex.Lock()\n\tdefer d.mutex.Unlock()\n\n\tf, err := d.root.mkfile(p)\n\tif err != nil {\n\t\t\/\/ TODO(stevvooe): Again, we need to clarify when this is not a\n\t\t\/\/ directory in StorageDriver API.\n\t\treturn fmt.Errorf(\"not a file\")\n\t}\n\n\tf.truncate()\n\tf.WriteAt(contents, 0)\n\n\treturn nil\n}\n\n\/\/ ReadStream retrieves an io.ReadCloser for the content stored at \"path\" with a\n\/\/ given byte offset.\nfunc (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) {\n\tif !storagedriver.PathRegexp.MatchString(path) {\n\t\treturn nil, storagedriver.InvalidPathError{Path: path}\n\t}\n\n\td.mutex.RLock()\n\tdefer d.mutex.RUnlock()\n\n\tif offset < 0 {\n\t\treturn nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}\n\t}\n\n\tpath = normalize(path)\n\tfound := d.root.find(path)\n\n\tif found.path() != path {\n\t\treturn nil, storagedriver.PathNotFoundError{Path: path}\n\t}\n\n\tif found.isdir() {\n\t\treturn nil, fmt.Errorf(\"%q is a directory\", path)\n\t}\n\n\treturn ioutil.NopCloser(found.(*file).sectionReader(offset)), nil\n}\n\n\/\/ WriteStream stores the contents of the provided io.ReadCloser at a location\n\/\/ designated by the given path.\nfunc (d *Driver) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) {\n\tif !storagedriver.PathRegexp.MatchString(path) {\n\t\treturn 0, storagedriver.InvalidPathError{Path: path}\n\t}\n\n\td.mutex.Lock()\n\tdefer d.mutex.Unlock()\n\n\tif offset < 0 {\n\t\treturn 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset}\n\t}\n\n\tnormalized := normalize(path)\n\n\tf, err := d.root.mkfile(normalized)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"not a file\")\n\t}\n\n\t\/\/ Unlock while we are reading from the source, in case we are reading\n\t\/\/ from the same mfs instance. This can be fixed by a more granular\n\t\/\/ locking model.\n\td.mutex.Unlock()\n\td.mutex.RLock() \/\/ Take the readlock to block other writers.\n\tvar buf bytes.Buffer\n\n\tnn, err = buf.ReadFrom(reader)\n\tif err != nil {\n\t\t\/\/ TODO(stevvooe): This condition is odd and we may need to clarify:\n\t\t\/\/ we've read nn bytes from reader but have written nothing to the\n\t\t\/\/ backend. What is the correct return value? Really, the caller needs\n\t\t\/\/ to know that the reader has been advanced and reattempting the\n\t\t\/\/ operation is incorrect.\n\t\td.mutex.RUnlock()\n\t\td.mutex.Lock()\n\t\treturn nn, err\n\t}\n\n\td.mutex.RUnlock()\n\td.mutex.Lock()\n\tf.WriteAt(buf.Bytes(), offset)\n\treturn nn, err\n}\n\n\/\/ Stat returns info about the provided path.\nfunc (d *Driver) Stat(path string) (storagedriver.FileInfo, error) {\n\tif !storagedriver.PathRegexp.MatchString(path) {\n\t\treturn nil, storagedriver.InvalidPathError{Path: path}\n\t}\n\n\td.mutex.RLock()\n\tdefer d.mutex.RUnlock()\n\n\tnormalized := normalize(path)\n\tfound := d.root.find(path)\n\n\tif found.path() != normalized {\n\t\treturn nil, storagedriver.PathNotFoundError{Path: path}\n\t}\n\n\tfi := storagedriver.FileInfoFields{\n\t\tPath: path,\n\t\tIsDir: found.isdir(),\n\t\tModTime: found.modtime(),\n\t}\n\n\tif !fi.IsDir {\n\t\tfi.Size = int64(len(found.(*file).data))\n\t}\n\n\treturn storagedriver.FileInfoInternal{FileInfoFields: fi}, nil\n}\n\n\/\/ List returns a list of the objects that are direct descendants of the given\n\/\/ path.\nfunc (d *Driver) List(path string) ([]string, error) {\n\tif !storagedriver.PathRegexp.MatchString(path) && path != \"\/\" {\n\t\treturn nil, storagedriver.InvalidPathError{Path: path}\n\t}\n\n\tnormalized := normalize(path)\n\n\tfound := d.root.find(normalized)\n\n\tif !found.isdir() {\n\t\treturn nil, fmt.Errorf(\"not a directory\") \/\/ TODO(stevvooe): Need error type for this...\n\t}\n\n\tentries, err := found.(*dir).list(normalized)\n\n\tif err != nil {\n\t\tswitch err {\n\t\tcase errNotExists:\n\t\t\treturn nil, storagedriver.PathNotFoundError{Path: path}\n\t\tcase errIsNotDir:\n\t\t\treturn nil, fmt.Errorf(\"not a directory\")\n\t\tdefault:\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn entries, nil\n}\n\n\/\/ Move moves an object stored at sourcePath to destPath, removing the original\n\/\/ object.\nfunc (d *Driver) Move(sourcePath string, destPath string) error {\n\tif !storagedriver.PathRegexp.MatchString(sourcePath) {\n\t\treturn storagedriver.InvalidPathError{Path: sourcePath}\n\t} else if !storagedriver.PathRegexp.MatchString(destPath) {\n\t\treturn storagedriver.InvalidPathError{Path: destPath}\n\t}\n\n\td.mutex.Lock()\n\tdefer d.mutex.Unlock()\n\n\tnormalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath)\n\n\terr := d.root.move(normalizedSrc, normalizedDst)\n\tswitch err {\n\tcase errNotExists:\n\t\treturn storagedriver.PathNotFoundError{Path: destPath}\n\tdefault:\n\t\treturn err\n\t}\n}\n\n\/\/ Delete recursively deletes all objects stored at \"path\" and its subpaths.\nfunc (d *Driver) Delete(path string) error {\n\tif !storagedriver.PathRegexp.MatchString(path) {\n\t\treturn storagedriver.InvalidPathError{Path: path}\n\t}\n\n\td.mutex.Lock()\n\tdefer d.mutex.Unlock()\n\n\tnormalized := normalize(path)\n\n\terr := d.root.delete(normalized)\n\tswitch err {\n\tcase errNotExists:\n\t\treturn storagedriver.PathNotFoundError{Path: path}\n\tdefault:\n\t\treturn err\n\t}\n}\n\n\/\/ URLFor returns a URL which may be used to retrieve the content stored at the given path.\n\/\/ May return an UnsupportedMethodErr in certain StorageDriver implementations.\nfunc (d *Driver) URLFor(path string, options map[string]interface{}) (string, error) {\n\treturn \"\", storagedriver.ErrUnsupportedMethod\n}\n<commit_msg>[InMemory] Add missing mutex.RLock\/RUnlock in List to protect internal map<commit_after>package inmemory\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/docker\/distribution\/storagedriver\"\n\t\"github.com\/docker\/distribution\/storagedriver\/factory\"\n)\n\nconst driverName = \"inmemory\"\n\nfunc init() {\n\tfactory.Register(driverName, &inMemoryDriverFactory{})\n}\n\n\/\/ inMemoryDriverFacotry implements the factory.StorageDriverFactory interface.\ntype inMemoryDriverFactory struct{}\n\nfunc (factory *inMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {\n\treturn New(), nil\n}\n\n\/\/ Driver is a storagedriver.StorageDriver implementation backed by a local map.\n\/\/ Intended solely for example and testing purposes.\ntype Driver struct {\n\troot *dir\n\tmutex sync.RWMutex\n}\n\n\/\/ New constructs a new Driver.\nfunc New() *Driver {\n\treturn &Driver{root: &dir{\n\t\tcommon: common{\n\t\t\tp: \"\/\",\n\t\t\tmod: time.Now(),\n\t\t},\n\t}}\n}\n\n\/\/ Implement the storagedriver.StorageDriver interface.\n\n\/\/ GetContent retrieves the content stored at \"path\" as a []byte.\nfunc (d *Driver) GetContent(path string) ([]byte, error) {\n\tif !storagedriver.PathRegexp.MatchString(path) {\n\t\treturn nil, storagedriver.InvalidPathError{Path: path}\n\t}\n\n\td.mutex.RLock()\n\tdefer d.mutex.RUnlock()\n\n\trc, err := d.ReadStream(path, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rc.Close()\n\n\treturn ioutil.ReadAll(rc)\n}\n\n\/\/ PutContent stores the []byte content at a location designated by \"path\".\nfunc (d *Driver) PutContent(p string, contents []byte) error {\n\tif !storagedriver.PathRegexp.MatchString(p) {\n\t\treturn storagedriver.InvalidPathError{Path: p}\n\t}\n\n\td.mutex.Lock()\n\tdefer d.mutex.Unlock()\n\n\tf, err := d.root.mkfile(p)\n\tif err != nil {\n\t\t\/\/ TODO(stevvooe): Again, we need to clarify when this is not a\n\t\t\/\/ directory in StorageDriver API.\n\t\treturn fmt.Errorf(\"not a file\")\n\t}\n\n\tf.truncate()\n\tf.WriteAt(contents, 0)\n\n\treturn nil\n}\n\n\/\/ ReadStream retrieves an io.ReadCloser for the content stored at \"path\" with a\n\/\/ given byte offset.\nfunc (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) {\n\tif !storagedriver.PathRegexp.MatchString(path) {\n\t\treturn nil, storagedriver.InvalidPathError{Path: path}\n\t}\n\n\td.mutex.RLock()\n\tdefer d.mutex.RUnlock()\n\n\tif offset < 0 {\n\t\treturn nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}\n\t}\n\n\tpath = normalize(path)\n\tfound := d.root.find(path)\n\n\tif found.path() != path {\n\t\treturn nil, storagedriver.PathNotFoundError{Path: path}\n\t}\n\n\tif found.isdir() {\n\t\treturn nil, fmt.Errorf(\"%q is a directory\", path)\n\t}\n\n\treturn ioutil.NopCloser(found.(*file).sectionReader(offset)), nil\n}\n\n\/\/ WriteStream stores the contents of the provided io.ReadCloser at a location\n\/\/ designated by the given path.\nfunc (d *Driver) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) {\n\tif !storagedriver.PathRegexp.MatchString(path) {\n\t\treturn 0, storagedriver.InvalidPathError{Path: path}\n\t}\n\n\td.mutex.Lock()\n\tdefer d.mutex.Unlock()\n\n\tif offset < 0 {\n\t\treturn 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset}\n\t}\n\n\tnormalized := normalize(path)\n\n\tf, err := d.root.mkfile(normalized)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"not a file\")\n\t}\n\n\t\/\/ Unlock while we are reading from the source, in case we are reading\n\t\/\/ from the same mfs instance. This can be fixed by a more granular\n\t\/\/ locking model.\n\td.mutex.Unlock()\n\td.mutex.RLock() \/\/ Take the readlock to block other writers.\n\tvar buf bytes.Buffer\n\n\tnn, err = buf.ReadFrom(reader)\n\tif err != nil {\n\t\t\/\/ TODO(stevvooe): This condition is odd and we may need to clarify:\n\t\t\/\/ we've read nn bytes from reader but have written nothing to the\n\t\t\/\/ backend. What is the correct return value? Really, the caller needs\n\t\t\/\/ to know that the reader has been advanced and reattempting the\n\t\t\/\/ operation is incorrect.\n\t\td.mutex.RUnlock()\n\t\td.mutex.Lock()\n\t\treturn nn, err\n\t}\n\n\td.mutex.RUnlock()\n\td.mutex.Lock()\n\tf.WriteAt(buf.Bytes(), offset)\n\treturn nn, err\n}\n\n\/\/ Stat returns info about the provided path.\nfunc (d *Driver) Stat(path string) (storagedriver.FileInfo, error) {\n\tif !storagedriver.PathRegexp.MatchString(path) {\n\t\treturn nil, storagedriver.InvalidPathError{Path: path}\n\t}\n\n\td.mutex.RLock()\n\tdefer d.mutex.RUnlock()\n\n\tnormalized := normalize(path)\n\tfound := d.root.find(path)\n\n\tif found.path() != normalized {\n\t\treturn nil, storagedriver.PathNotFoundError{Path: path}\n\t}\n\n\tfi := storagedriver.FileInfoFields{\n\t\tPath: path,\n\t\tIsDir: found.isdir(),\n\t\tModTime: found.modtime(),\n\t}\n\n\tif !fi.IsDir {\n\t\tfi.Size = int64(len(found.(*file).data))\n\t}\n\n\treturn storagedriver.FileInfoInternal{FileInfoFields: fi}, nil\n}\n\n\/\/ List returns a list of the objects that are direct descendants of the given\n\/\/ path.\nfunc (d *Driver) List(path string) ([]string, error) {\n\tif !storagedriver.PathRegexp.MatchString(path) && path != \"\/\" {\n\t\treturn nil, storagedriver.InvalidPathError{Path: path}\n\t}\n\n\td.mutex.RLock()\n\tdefer d.mutex.RUnlock()\n\n\tnormalized := normalize(path)\n\n\tfound := d.root.find(normalized)\n\n\tif !found.isdir() {\n\t\treturn nil, fmt.Errorf(\"not a directory\") \/\/ TODO(stevvooe): Need error type for this...\n\t}\n\n\tentries, err := found.(*dir).list(normalized)\n\n\tif err != nil {\n\t\tswitch err {\n\t\tcase errNotExists:\n\t\t\treturn nil, storagedriver.PathNotFoundError{Path: path}\n\t\tcase errIsNotDir:\n\t\t\treturn nil, fmt.Errorf(\"not a directory\")\n\t\tdefault:\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn entries, nil\n}\n\n\/\/ Move moves an object stored at sourcePath to destPath, removing the original\n\/\/ object.\nfunc (d *Driver) Move(sourcePath string, destPath string) error {\n\tif !storagedriver.PathRegexp.MatchString(sourcePath) {\n\t\treturn storagedriver.InvalidPathError{Path: sourcePath}\n\t} else if !storagedriver.PathRegexp.MatchString(destPath) {\n\t\treturn storagedriver.InvalidPathError{Path: destPath}\n\t}\n\n\td.mutex.Lock()\n\tdefer d.mutex.Unlock()\n\n\tnormalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath)\n\n\terr := d.root.move(normalizedSrc, normalizedDst)\n\tswitch err {\n\tcase errNotExists:\n\t\treturn storagedriver.PathNotFoundError{Path: destPath}\n\tdefault:\n\t\treturn err\n\t}\n}\n\n\/\/ Delete recursively deletes all objects stored at \"path\" and its subpaths.\nfunc (d *Driver) Delete(path string) error {\n\tif !storagedriver.PathRegexp.MatchString(path) {\n\t\treturn storagedriver.InvalidPathError{Path: path}\n\t}\n\n\td.mutex.Lock()\n\tdefer d.mutex.Unlock()\n\n\tnormalized := normalize(path)\n\n\terr := d.root.delete(normalized)\n\tswitch err {\n\tcase errNotExists:\n\t\treturn storagedriver.PathNotFoundError{Path: path}\n\tdefault:\n\t\treturn err\n\t}\n}\n\n\/\/ URLFor returns a URL which may be used to retrieve the content stored at the given path.\n\/\/ May return an UnsupportedMethodErr in certain StorageDriver implementations.\nfunc (d *Driver) URLFor(path string, options map[string]interface{}) (string, error) {\n\treturn \"\", storagedriver.ErrUnsupportedMethod\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package redigostore offers Redis-based store implementation for throttled using redigo.\npackage redigostore \/\/ import \"github.com\/throttled\/throttled\/v2\/store\/redigostore\"\n\nimport (\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/gomodule\/redigo\/redis\"\n)\n\nconst (\n\tredisCASMissingKey = \"key does not exist\"\n\tredisCASScript = `\nlocal v = redis.call('get', KEYS[1])\nif v == false then\n return redis.error_reply(\"key does not exist\")\nend\nif v ~= ARGV[1] then\n return 0\nend\nredis.call('setex', KEYS[1], ARGV[3], ARGV[2])\nreturn 1\n`\n)\n\n\/\/ RedigoStore implements a Redis-based store using redigo.\ntype RedigoStore struct {\n\tpool *redis.Pool\n\tprefix string\n\tdb int\n}\n\n\/\/ New creates a new Redis-based store, using the provided pool to get\n\/\/ its connections. The keys will have the specified keyPrefix, which\n\/\/ may be an empty string, and the database index specified by db will\n\/\/ be selected to store the keys. Any updating operations will reset\n\/\/ the key TTL to the provided value rounded down to the nearest\n\/\/ second. Depends on Redis 2.6+ for EVAL support.\nfunc New(pool *redis.Pool, keyPrefix string, db int) (*RedigoStore, error) {\n\treturn &RedigoStore{\n\t\tpool: pool,\n\t\tprefix: keyPrefix,\n\t\tdb: db,\n\t}, nil\n}\n\n\/\/ GetWithTime returns the value of the key if it is in the store\n\/\/ or -1 if it does not exist. It also returns the current time at\n\/\/ the redis server to microsecond precision.\nfunc (r *RedigoStore) GetWithTime(key string) (int64, time.Time, error) {\n\tvar now time.Time\n\n\tkey = r.prefix + key\n\n\tconn, err := r.getConn()\n\tif err != nil {\n\t\treturn 0, now, err\n\t}\n\tdefer conn.Close()\n\n\tconn.Send(\"TIME\")\n\tconn.Send(\"GET\", key)\n\tconn.Flush()\n\ttimeReply, err := redis.Values(conn.Receive())\n\tif err != nil {\n\t\treturn 0, now, err\n\t}\n\n\tvar s, us int64\n\tif _, err := redis.Scan(timeReply, &s, &us); err != nil {\n\t\treturn 0, now, err\n\t}\n\tnow = time.Unix(s, us*int64(time.Microsecond))\n\n\tv, err := redis.Int64(conn.Receive())\n\tif err == redis.ErrNil {\n\t\treturn -1, now, nil\n\t} else if err != nil {\n\t\treturn 0, now, err\n\t}\n\n\treturn v, now, nil\n}\n\n\/\/ SetIfNotExistsWithTTL sets the value of key only if it is not\n\/\/ already set in the store it returns whether a new value was set.\n\/\/ If a new value was set, the ttl in the key is also set, though this\n\/\/ operation is not performed atomically.\nfunc (r *RedigoStore) SetIfNotExistsWithTTL(key string, value int64, ttl time.Duration) (bool, error) {\n\tkey = r.prefix + key\n\n\tconn, err := r.getConn()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer conn.Close()\n\n\tv, err := redis.Int64(conn.Do(\"SETNX\", key, value))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tupdated := v == 1\n\n\tttlSeconds := int(ttl.Seconds())\n\n\t\/\/ An `EXPIRE 0` will delete the key immediately, so make sure that we set\n\t\/\/ expiry for a minimum of one second out so that our results stay in the\n\t\/\/ store.\n\tif ttlSeconds < 1 {\n\t\tttlSeconds = 1\n\t}\n\n\tif _, err := conn.Do(\"EXPIRE\", key, ttlSeconds); err != nil {\n\t\treturn updated, err\n\t}\n\n\treturn updated, nil\n}\n\n\/\/ CompareAndSwapWithTTL atomically compares the value at key to the\n\/\/ old value. If it matches, it sets it to the new value and returns\n\/\/ true. Otherwise, it returns false. If the key does not exist in the\n\/\/ store, it returns false with no error. If the swap succeeds, the\n\/\/ ttl for the key is updated atomically.\nfunc (r *RedigoStore) CompareAndSwapWithTTL(key string, old, new int64, ttl time.Duration) (bool, error) {\n\tkey = r.prefix + key\n\tconn, err := r.getConn()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer conn.Close()\n\n\tttlSeconds := int(ttl.Seconds())\n\n\t\/\/ An `EXPIRE 0` will delete the key immediately, so make sure that we set\n\t\/\/ expiry for a minimum of one second out so that our results stay in the\n\t\/\/ store.\n\tif ttlSeconds < 1 {\n\t\tttlSeconds = 1\n\t}\n\n\tswapped, err := redis.Bool(conn.Do(\"EVAL\", redisCASScript, 1, key, old, new, ttlSeconds))\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), redisCASMissingKey) {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn false, err\n\t}\n\n\treturn swapped, nil\n}\n\n\/\/ Select the specified database index.\nfunc (r *RedigoStore) getConn() (redis.Conn, error) {\n\tconn := r.pool.Get()\n\n\t\/\/ Select the specified database\n\tif _, err := redis.String(conn.Do(\"SELECT\", r.db)); err != nil {\n\t\tconn.Close()\n\t\treturn nil, err\n\t}\n\n\treturn conn, nil\n}\n<commit_msg>Add support for redisc to redigostore<commit_after>\/\/ Package redigostore offers Redis-based store implementation for throttled using redigo.\npackage redigostore \/\/ import \"github.com\/throttled\/throttled\/v2\/store\/redigostore\"\n\nimport (\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/gomodule\/redigo\/redis\"\n)\n\nconst (\n\tredisCASMissingKey = \"key does not exist\"\n\tredisCASScript = `\nlocal v = redis.call('get', KEYS[1])\nif v == false then\n return redis.error_reply(\"key does not exist\")\nend\nif v ~= ARGV[1] then\n return 0\nend\nredis.call('setex', KEYS[1], ARGV[3], ARGV[2])\nreturn 1\n`\n)\n\n\/\/ RedigoPool is the interface for retrieving a Redis connection from a Redigo\n\/\/ pool. This same interface is supported by the cluster client in\n\/\/ https:\/\/github.com\/mna\/redisc.\ntype RedigoPool interface {\n\tGet() redis.Conn\n}\n\n\/\/ RedigoStore implements a Redis-based store using redigo.\ntype RedigoStore struct {\n\tpool RedigoPool\n\tprefix string\n\tdb int\n}\n\n\/\/ New creates a new Redis-based store, using the provided pool to get\n\/\/ its connections. The keys will have the specified keyPrefix, which\n\/\/ may be an empty string, and the database index specified by db will\n\/\/ be selected to store the keys. Any updating operations will reset\n\/\/ the key TTL to the provided value rounded down to the nearest\n\/\/ second. Depends on Redis 2.6+ for EVAL support.\nfunc New(pool RedigoPool, keyPrefix string, db int) (*RedigoStore, error) {\n\treturn &RedigoStore{\n\t\tpool: pool,\n\t\tprefix: keyPrefix,\n\t\tdb: db,\n\t}, nil\n}\n\n\/\/ GetWithTime returns the value of the key if it is in the store\n\/\/ or -1 if it does not exist. It also returns the current time at\n\/\/ the redis server to microsecond precision.\nfunc (r *RedigoStore) GetWithTime(key string) (int64, time.Time, error) {\n\tvar now time.Time\n\n\tkey = r.prefix + key\n\n\tconn, err := r.getConn()\n\tif err != nil {\n\t\treturn 0, now, err\n\t}\n\tdefer conn.Close()\n\n\tconn.Send(\"TIME\")\n\tconn.Send(\"GET\", key)\n\tconn.Flush()\n\ttimeReply, err := redis.Values(conn.Receive())\n\tif err != nil {\n\t\treturn 0, now, err\n\t}\n\n\tvar s, us int64\n\tif _, err := redis.Scan(timeReply, &s, &us); err != nil {\n\t\treturn 0, now, err\n\t}\n\tnow = time.Unix(s, us*int64(time.Microsecond))\n\n\tv, err := redis.Int64(conn.Receive())\n\tif err == redis.ErrNil {\n\t\treturn -1, now, nil\n\t} else if err != nil {\n\t\treturn 0, now, err\n\t}\n\n\treturn v, now, nil\n}\n\n\/\/ SetIfNotExistsWithTTL sets the value of key only if it is not\n\/\/ already set in the store it returns whether a new value was set.\n\/\/ If a new value was set, the ttl in the key is also set, though this\n\/\/ operation is not performed atomically.\nfunc (r *RedigoStore) SetIfNotExistsWithTTL(key string, value int64, ttl time.Duration) (bool, error) {\n\tkey = r.prefix + key\n\n\tconn, err := r.getConn()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer conn.Close()\n\n\tv, err := redis.Int64(conn.Do(\"SETNX\", key, value))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tupdated := v == 1\n\n\tttlSeconds := int(ttl.Seconds())\n\n\t\/\/ An `EXPIRE 0` will delete the key immediately, so make sure that we set\n\t\/\/ expiry for a minimum of one second out so that our results stay in the\n\t\/\/ store.\n\tif ttlSeconds < 1 {\n\t\tttlSeconds = 1\n\t}\n\n\tif _, err := conn.Do(\"EXPIRE\", key, ttlSeconds); err != nil {\n\t\treturn updated, err\n\t}\n\n\treturn updated, nil\n}\n\n\/\/ CompareAndSwapWithTTL atomically compares the value at key to the\n\/\/ old value. If it matches, it sets it to the new value and returns\n\/\/ true. Otherwise, it returns false. If the key does not exist in the\n\/\/ store, it returns false with no error. If the swap succeeds, the\n\/\/ ttl for the key is updated atomically.\nfunc (r *RedigoStore) CompareAndSwapWithTTL(key string, old, new int64, ttl time.Duration) (bool, error) {\n\tkey = r.prefix + key\n\tconn, err := r.getConn()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer conn.Close()\n\n\tttlSeconds := int(ttl.Seconds())\n\n\t\/\/ An `EXPIRE 0` will delete the key immediately, so make sure that we set\n\t\/\/ expiry for a minimum of one second out so that our results stay in the\n\t\/\/ store.\n\tif ttlSeconds < 1 {\n\t\tttlSeconds = 1\n\t}\n\n\tswapped, err := redis.Bool(conn.Do(\"EVAL\", redisCASScript, 1, key, old, new, ttlSeconds))\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), redisCASMissingKey) {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn false, err\n\t}\n\n\treturn swapped, nil\n}\n\n\/\/ Select the specified database index.\nfunc (r *RedigoStore) getConn() (redis.Conn, error) {\n\tconn := r.pool.Get()\n\n\t\/\/ Select the specified database\n\tif _, err := redis.String(conn.Do(\"SELECT\", r.db)); err != nil {\n\t\tconn.Close()\n\t\treturn nil, err\n\t}\n\n\treturn conn, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package terraform\n\nimport (\n\t\"log\"\n)\n\n\/\/ NodeApplyableResource represents a resource that is \"applyable\":\n\/\/ it may need to have its record in the state adjusted to match configuration.\n\/\/\n\/\/ Unlike in the plan walk, this resource node does not DynamicExpand. Instead,\n\/\/ it should be inserted into the same graph as any instances of the nodes\n\/\/ with dependency edges ensuring that the resource is evaluated before any\n\/\/ of its instances, which will turn ensure that the whole-resource record\n\/\/ in the state is suitably prepared to receive any updates to instances.\ntype NodeApplyableResource struct {\n\t*NodeAbstractResource\n}\n\nvar (\n\t_ GraphNodeResource = (*NodeApplyableResource)(nil)\n\t_ GraphNodeEvalable = (*NodeApplyableResource)(nil)\n\t_ GraphNodeProviderConsumer = (*NodeApplyableResource)(nil)\n\t_ GraphNodeAttachResourceConfig = (*NodeApplyableResource)(nil)\n)\n\nfunc (n *NodeApplyableResource) Name() string {\n\treturn n.NodeAbstractResource.Name() + \" (prepare state)\"\n}\n\n\/\/ GraphNodeEvalable\nfunc (n *NodeApplyableResource) EvalTree() EvalNode {\n\taddr := n.ResourceAddr()\n\tconfig := n.Config\n\tproviderAddr := n.ResolvedProvider\n\n\tif config == nil {\n\t\t\/\/ Nothing to do, then.\n\t\tlog.Printf(\"[TRACE] NodeApplyableResource: no configuration present for %s\", addr)\n\t\treturn &EvalNoop{}\n\t}\n\n\treturn &EvalWriteResourceState{\n\t\tAddr: addr.Resource,\n\t\tConfig: config,\n\t\tProviderAddr: providerAddr,\n\t}\n}\n<commit_msg>core: NodeApplyableResource only depends on count and for_each<commit_after>package terraform\n\nimport (\n\t\"log\"\n\n\t\"github.com\/hashicorp\/terraform\/addrs\"\n\t\"github.com\/hashicorp\/terraform\/dag\"\n\t\"github.com\/hashicorp\/terraform\/lang\"\n)\n\n\/\/ NodeApplyableResource represents a resource that is \"applyable\":\n\/\/ it may need to have its record in the state adjusted to match configuration.\n\/\/\n\/\/ Unlike in the plan walk, this resource node does not DynamicExpand. Instead,\n\/\/ it should be inserted into the same graph as any instances of the nodes\n\/\/ with dependency edges ensuring that the resource is evaluated before any\n\/\/ of its instances, which will turn ensure that the whole-resource record\n\/\/ in the state is suitably prepared to receive any updates to instances.\ntype NodeApplyableResource struct {\n\t*NodeAbstractResource\n}\n\nvar (\n\t_ GraphNodeResource = (*NodeApplyableResource)(nil)\n\t_ GraphNodeEvalable = (*NodeApplyableResource)(nil)\n\t_ GraphNodeProviderConsumer = (*NodeApplyableResource)(nil)\n\t_ GraphNodeAttachResourceConfig = (*NodeApplyableResource)(nil)\n\t_ GraphNodeReferencer = (*NodeApplyableResource)(nil)\n)\n\nfunc (n *NodeApplyableResource) Name() string {\n\treturn n.NodeAbstractResource.Name() + \" (prepare state)\"\n}\n\nfunc (n *NodeApplyableResource) References() []*addrs.Reference {\n\tif n.Config == nil {\n\t\tlog.Printf(\"[WARN] NodeApplyableResource %q: no configuration, so can't determine References\", dag.VertexName(n))\n\t\treturn nil\n\t}\n\n\tvar result []*addrs.Reference\n\n\t\/\/ Since this node type only updates resource-level metadata, we only\n\t\/\/ need to worry about the parts of the configuration that affect\n\t\/\/ our \"each mode\": the count and for_each meta-arguments.\n\trefs, _ := lang.ReferencesInExpr(n.Config.Count)\n\tresult = append(result, refs...)\n\trefs, _ = lang.ReferencesInExpr(n.Config.ForEach)\n\tresult = append(result, refs...)\n\n\treturn result\n}\n\n\/\/ GraphNodeEvalable\nfunc (n *NodeApplyableResource) EvalTree() EvalNode {\n\taddr := n.ResourceAddr()\n\tconfig := n.Config\n\tproviderAddr := n.ResolvedProvider\n\n\tif config == nil {\n\t\t\/\/ Nothing to do, then.\n\t\tlog.Printf(\"[TRACE] NodeApplyableResource: no configuration present for %s\", addr)\n\t\treturn &EvalNoop{}\n\t}\n\n\treturn &EvalWriteResourceState{\n\t\tAddr: addr.Resource,\n\t\tConfig: config,\n\t\tProviderAddr: providerAddr,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package builds\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tg \"github.com\/onsi\/ginkgo\"\n\to \"github.com\/onsi\/gomega\"\n\n\texutil \"github.com\/openshift\/origin\/test\/extended\/util\"\n\t\"github.com\/openshift\/origin\/test\/extended\/util\/jenkins\"\n)\n\nfunc debugAnyJenkinsFailure(br *exutil.BuildResult, name string, oc *exutil.CLI, dumpMaster bool) {\n\tif !br.BuildSuccess {\n\t\tfmt.Fprintf(g.GinkgoWriter, \"\\n\\n START debugAnyJenkinsFailure\\n\\n\")\n\t\tj := jenkins.NewRef(oc)\n\t\tjobLog, err := j.GetLastJobConsoleLogs(name)\n\t\tif err == nil {\n\t\t\tfmt.Fprintf(g.GinkgoWriter, \"\\n %s job log:\\n%s\", name, jobLog)\n\t\t} else {\n\t\t\tfmt.Fprintf(g.GinkgoWriter, \"\\n error getting %s job log: %#v\", name, err)\n\t\t}\n\t\tif dumpMaster {\n\t\t\texutil.DumpDeploymentLogs(\"jenkins\", oc)\n\t\t}\n\t\tfmt.Fprintf(g.GinkgoWriter, \"\\n\\n END debugAnyJenkinsFailure\\n\\n\")\n\t}\n}\n\nvar _ = g.Describe(\"[builds][Slow] openshift pipeline build\", func() {\n\tdefer g.GinkgoRecover()\n\tvar (\n\t\tjenkinsTemplatePath = exutil.FixturePath(\"..\", \"..\", \"examples\", \"jenkins\", \"jenkins-ephemeral-template.json\")\n\t\tmavenSlavePipelinePath = exutil.FixturePath(\"..\", \"..\", \"examples\", \"jenkins\", \"pipeline\", \"maven-pipeline.yaml\")\n\t\t\/\/orchestrationPipelinePath = exutil.FixturePath(\"..\", \"..\", \"examples\", \"jenkins\", \"pipeline\", \"mapsapp-pipeline.yaml\")\n\t\tblueGreenPipelinePath = exutil.FixturePath(\"..\", \"..\", \"examples\", \"jenkins\", \"pipeline\", \"bluegreen-pipeline.yaml\")\n\n\t\toc = exutil.NewCLI(\"jenkins-pipeline\", exutil.KubeConfigPath())\n\t\tticker *time.Ticker\n\t\tj *jenkins.JenkinsRef\n\t\tdcLogFollow *exec.Cmd\n\t\tdcLogStdOut, dcLogStdErr *bytes.Buffer\n\t\tsetupJenkins = func() {\n\t\t\t\/\/ Deploy Jenkins\n\t\t\tg.By(fmt.Sprintf(\"calling oc new-app -f %q\", jenkinsTemplatePath))\n\t\t\terr := oc.Run(\"new-app\").Args(\"-f\", jenkinsTemplatePath).Execute()\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\tg.By(\"waiting for jenkins deployment\")\n\t\t\terr = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), \"jenkins\", oc)\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\tj = jenkins.NewRef(oc)\n\n\t\t\tg.By(\"wait for jenkins to come up\")\n\t\t\t_, err = j.WaitForContent(\"\", 200, 10*time.Minute, \"\")\n\n\t\t\tif err != nil {\n\t\t\t\texutil.DumpDeploymentLogs(\"jenkins\", oc)\n\t\t\t}\n\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\t\/\/ Start capturing logs from this deployment config.\n\t\t\t\/\/ This command will terminate if the Jenkins instance crashes. This\n\t\t\t\/\/ ensures that even if the Jenkins DC restarts, we should capture\n\t\t\t\/\/ logs from the crash.\n\t\t\tdcLogFollow, dcLogStdOut, dcLogStdErr, err = oc.Run(\"logs\").Args(\"-f\", \"dc\/jenkins\").Background()\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t}\n\t)\n\n\tg.AfterEach(func() {\n\t\tif os.Getenv(jenkins.DisableJenkinsGCSTats) == \"\" {\n\t\t\tg.By(\"stop jenkins gc tracking\")\n\t\t\tticker.Stop()\n\t\t}\n\t})\n\n\tg.BeforeEach(func() {\n\t\tsetupJenkins()\n\n\t\tif os.Getenv(jenkins.DisableJenkinsGCSTats) == \"\" {\n\t\t\tg.By(\"start jenkins gc tracking\")\n\t\t\tticker = jenkins.StartJenkinsGCTracking(oc, oc.Namespace())\n\t\t}\n\n\t\tg.By(\"waiting for builder service account\")\n\t\terr := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t})\n\n\tg.Context(\"Pipeline with maven slave\", func() {\n\t\tg.It(\"should build and complete successfully\", func() {\n\t\t\t\/\/ instantiate the template\n\t\t\tg.By(fmt.Sprintf(\"calling oc new-app -f %q\", mavenSlavePipelinePath))\n\t\t\terr := oc.Run(\"new-app\").Args(\"-f\", mavenSlavePipelinePath).Execute()\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\t\/\/ start the build\n\t\t\tg.By(\"starting the pipeline build and waiting for it to complete\")\n\t\t\tbr, _ := exutil.StartBuildAndWait(oc, \"openshift-jee-sample\")\n\t\t\tdebugAnyJenkinsFailure(br, oc.Namespace()+\"-openshift-jee-sample\", oc, true)\n\t\t\tbr.AssertSuccess()\n\n\t\t\t\/\/ wait for the service to be running\n\t\t\tg.By(\"expecting the openshift-jee-sample service to be deployed and running\")\n\t\t\t_, err = exutil.GetEndpointAddress(oc, \"openshift-jee-sample\")\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t})\n\t})\n\n\t\/*g.Context(\"Orchestration pipeline\", func() {\n\t\tg.It(\"should build and complete successfully\", func() {\n\t\t\tsetupJenkins()\n\n\t\t\t\/\/ instantiate the template\n\t\t\tg.By(fmt.Sprintf(\"calling oc new-app -f %q\", orchestrationPipelinePath))\n\t\t\terr := oc.Run(\"new-app\").Args(\"-f\", orchestrationPipelinePath).Execute()\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\t\/\/ start the build\n\t\t\tg.By(\"starting the pipeline build and waiting for it to complete\")\n\t\t\tbr, _ := exutil.StartBuildAndWait(oc, \"mapsapp-pipeline\")\n\t\t\tdebugAnyJenkinsFailure(br, oc.Namespace()+\"-mapsapp-pipeline\", oc, true)\n\t\t\tdebugAnyJenkinsFailure(br, oc.Namespace()+\"-mlbparks-pipeline\", oc, false)\n\t\t\tdebugAnyJenkinsFailure(br, oc.Namespace()+\"-nationalparks-pipeline\", oc, false)\n\t\t\tdebugAnyJenkinsFailure(br, oc.Namespace()+\"-parksmap-pipeline\", oc, false)\n\t\t\tbr.AssertSuccess()\n\n\t\t\t\/\/ wait for the service to be running\n\t\t\tg.By(\"expecting the parksmap-web service to be deployed and running\")\n\t\t\t_, err = exutil.GetEndpointAddress(oc, \"parksmap\")\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t})\n\t})*\/\n\n\tg.Context(\"Blue-green pipeline\", func() {\n\t\tg.It(\"Blue-green pipeline should build and complete successfully\", func() {\n\t\t\t\/\/ instantiate the template\n\t\t\tg.By(fmt.Sprintf(\"calling oc new-app -f %q\", blueGreenPipelinePath))\n\t\t\terr := oc.Run(\"new-app\").Args(\"-f\", blueGreenPipelinePath).Execute()\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\twg := &sync.WaitGroup{}\n\t\t\twg.Add(1)\n\n\t\t\t\/\/ start the build\n\t\t\tgo func() {\n\t\t\t\tdefer g.GinkgoRecover()\n\t\t\t\tg.By(\"starting the bluegreen pipeline build and waiting for it to complete\")\n\t\t\t\tbr, _ := exutil.StartBuildAndWait(oc, \"bluegreen-pipeline\")\n\t\t\t\tdebugAnyJenkinsFailure(br, oc.Namespace()+\"-bluegreen-pipeline\", oc, true)\n\t\t\t\tbr.AssertSuccess()\n\n\t\t\t\tg.By(\"verifying that the main route has been switched to green\")\n\t\t\t\tvalue, err := oc.Run(\"get\").Args(\"route\", \"nodejs-mongodb-example\", \"-o\", \"jsonpath={ .spec.to.name }\").Output()\n\t\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t\t\tactiveRoute := strings.TrimSpace(value)\n\t\t\t\tg.By(\"verifying that the active route is 'nodejs-mongodb-example-green'\")\n\t\t\t\to.Expect(activeRoute).To(o.Equal(\"nodejs-mongodb-example-green\"))\n\t\t\t\twg.Done()\n\t\t\t}()\n\n\t\t\t\/\/ wait for the green service to be available\n\t\t\tg.By(\"waiting for the nodejs-mongodb-example-green service to be available\")\n\t\t\t_, err = exutil.GetEndpointAddress(oc, \"nodejs-mongodb-example-green\")\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\t\/\/ approve the Jenkins pipeline\n\t\t\tg.By(\"Waiting for the approval prompt\")\n\t\t\tjobName := oc.Namespace() + \"-bluegreen-pipeline\"\n\t\t\t_, err = j.WaitForContent(\"Approve?\", 200, 10*time.Minute, \"job\/%s\/lastBuild\/consoleText\", jobName)\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\tg.By(\"Approving the current build\")\n\t\t\t_, _, err = j.Post(nil, fmt.Sprintf(\"job\/%s\/lastBuild\/input\/Approval\/proceedEmpty\", jobName), \"\")\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\t\/\/ wait for first build completion and verification\n\t\t\tg.By(\"Waiting for the build to complete successfully\")\n\t\t\twg.Wait()\n\n\t\t\twg = &sync.WaitGroup{}\n\t\t\twg.Add(1)\n\n\t\t\t\/\/ start the build again\n\t\t\tgo func() {\n\t\t\t\tdefer g.GinkgoRecover()\n\t\t\t\tg.By(\"starting the bluegreen pipeline build and waiting for it to complete\")\n\t\t\t\tbr, _ := exutil.StartBuildAndWait(oc, \"bluegreen-pipeline\")\n\t\t\t\tdebugAnyJenkinsFailure(br, oc.Namespace()+\"-bluegreen-pipeline\", oc, true)\n\t\t\t\tbr.AssertSuccess()\n\n\t\t\t\tg.By(\"verifying that the main route has been switched to blue\")\n\t\t\t\tvalue, err := oc.Run(\"get\").Args(\"route\", \"nodejs-mongodb-example\", \"-o\", \"jsonpath={ .spec.to.name }\").Output()\n\t\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t\t\tactiveRoute := strings.TrimSpace(value)\n\t\t\t\tg.By(\"verifying that the active route is 'nodejs-mongodb-example-blue'\")\n\t\t\t\to.Expect(activeRoute).To(o.Equal(\"nodejs-mongodb-example-blue\"))\n\t\t\t\twg.Done()\n\t\t\t}()\n\n\t\t\t\/\/ wait for the blue service to be available\n\t\t\tg.By(\"waiting for the nodejs-mongodb-example-blue service to be available\")\n\t\t\t_, err = exutil.GetEndpointAddress(oc, \"nodejs-mongodb-example-blue\")\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\t\/\/ approve the Jenkins pipeline\n\t\t\tg.By(\"Waiting for the approval prompt\")\n\t\t\t_, err = j.WaitForContent(\"Approve?\", 200, 10*time.Minute, \"job\/%s\/lastBuild\/consoleText\", jobName)\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\tg.By(\"Approving the current build\")\n\t\t\t_, _, err = j.Post(nil, fmt.Sprintf(\"job\/%s\/lastBuild\/input\/Approval\/proceedEmpty\", jobName), \"\")\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\twg.Wait()\n\t\t})\n\t})\n\n})\n<commit_msg>fix jenkins blue-green ext test so it does not block forever on build\/deploy error<commit_after>package builds\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tg \"github.com\/onsi\/ginkgo\"\n\to \"github.com\/onsi\/gomega\"\n\n\texutil \"github.com\/openshift\/origin\/test\/extended\/util\"\n\t\"github.com\/openshift\/origin\/test\/extended\/util\/jenkins\"\n\tsutil \"github.com\/openshift\/source-to-image\/pkg\/util\"\n)\n\nfunc debugAnyJenkinsFailure(br *exutil.BuildResult, name string, oc *exutil.CLI, dumpMaster bool) {\n\tif !br.BuildSuccess {\n\t\tfmt.Fprintf(g.GinkgoWriter, \"\\n\\n START debugAnyJenkinsFailure\\n\\n\")\n\t\tj := jenkins.NewRef(oc)\n\t\tjobLog, err := j.GetLastJobConsoleLogs(name)\n\t\tif err == nil {\n\t\t\tfmt.Fprintf(g.GinkgoWriter, \"\\n %s job log:\\n%s\", name, jobLog)\n\t\t} else {\n\t\t\tfmt.Fprintf(g.GinkgoWriter, \"\\n error getting %s job log: %#v\", name, err)\n\t\t}\n\t\tif dumpMaster {\n\t\t\texutil.DumpDeploymentLogs(\"jenkins\", oc)\n\t\t}\n\t\tfmt.Fprintf(g.GinkgoWriter, \"\\n\\n END debugAnyJenkinsFailure\\n\\n\")\n\t}\n}\n\nvar _ = g.Describe(\"[builds][Slow] openshift pipeline build\", func() {\n\tdefer g.GinkgoRecover()\n\tvar (\n\t\tjenkinsTemplatePath = exutil.FixturePath(\"..\", \"..\", \"examples\", \"jenkins\", \"jenkins-ephemeral-template.json\")\n\t\tmavenSlavePipelinePath = exutil.FixturePath(\"..\", \"..\", \"examples\", \"jenkins\", \"pipeline\", \"maven-pipeline.yaml\")\n\t\t\/\/orchestrationPipelinePath = exutil.FixturePath(\"..\", \"..\", \"examples\", \"jenkins\", \"pipeline\", \"mapsapp-pipeline.yaml\")\n\t\tblueGreenPipelinePath = exutil.FixturePath(\"..\", \"..\", \"examples\", \"jenkins\", \"pipeline\", \"bluegreen-pipeline.yaml\")\n\n\t\toc = exutil.NewCLI(\"jenkins-pipeline\", exutil.KubeConfigPath())\n\t\tticker *time.Ticker\n\t\tj *jenkins.JenkinsRef\n\t\tdcLogFollow *exec.Cmd\n\t\tdcLogStdOut, dcLogStdErr *bytes.Buffer\n\t\tsetupJenkins = func() {\n\t\t\t\/\/ Deploy Jenkins\n\t\t\tg.By(fmt.Sprintf(\"calling oc new-app -f %q\", jenkinsTemplatePath))\n\t\t\terr := oc.Run(\"new-app\").Args(\"-f\", jenkinsTemplatePath).Execute()\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\tg.By(\"waiting for jenkins deployment\")\n\t\t\terr = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), \"jenkins\", oc)\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\tj = jenkins.NewRef(oc)\n\n\t\t\tg.By(\"wait for jenkins to come up\")\n\t\t\t_, err = j.WaitForContent(\"\", 200, 10*time.Minute, \"\")\n\n\t\t\tif err != nil {\n\t\t\t\texutil.DumpDeploymentLogs(\"jenkins\", oc)\n\t\t\t}\n\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\t\/\/ Start capturing logs from this deployment config.\n\t\t\t\/\/ This command will terminate if the Jenkins instance crashes. This\n\t\t\t\/\/ ensures that even if the Jenkins DC restarts, we should capture\n\t\t\t\/\/ logs from the crash.\n\t\t\tdcLogFollow, dcLogStdOut, dcLogStdErr, err = oc.Run(\"logs\").Args(\"-f\", \"dc\/jenkins\").Background()\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t}\n\t)\n\n\tg.AfterEach(func() {\n\t\tif os.Getenv(jenkins.DisableJenkinsGCSTats) == \"\" {\n\t\t\tg.By(\"stop jenkins gc tracking\")\n\t\t\tticker.Stop()\n\t\t}\n\t})\n\n\tg.BeforeEach(func() {\n\t\tsetupJenkins()\n\n\t\tif os.Getenv(jenkins.DisableJenkinsGCSTats) == \"\" {\n\t\t\tg.By(\"start jenkins gc tracking\")\n\t\t\tticker = jenkins.StartJenkinsGCTracking(oc, oc.Namespace())\n\t\t}\n\n\t\tg.By(\"waiting for builder service account\")\n\t\terr := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t})\n\n\tg.Context(\"Pipeline with maven slave\", func() {\n\t\tg.It(\"should build and complete successfully\", func() {\n\t\t\t\/\/ instantiate the template\n\t\t\tg.By(fmt.Sprintf(\"calling oc new-app -f %q\", mavenSlavePipelinePath))\n\t\t\terr := oc.Run(\"new-app\").Args(\"-f\", mavenSlavePipelinePath).Execute()\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\t\/\/ start the build\n\t\t\tg.By(\"starting the pipeline build and waiting for it to complete\")\n\t\t\tbr, _ := exutil.StartBuildAndWait(oc, \"openshift-jee-sample\")\n\t\t\tdebugAnyJenkinsFailure(br, oc.Namespace()+\"-openshift-jee-sample\", oc, true)\n\t\t\tbr.AssertSuccess()\n\n\t\t\t\/\/ wait for the service to be running\n\t\t\tg.By(\"expecting the openshift-jee-sample service to be deployed and running\")\n\t\t\t_, err = exutil.GetEndpointAddress(oc, \"openshift-jee-sample\")\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t})\n\t})\n\n\t\/*g.Context(\"Orchestration pipeline\", func() {\n\t\tg.It(\"should build and complete successfully\", func() {\n\t\t\tsetupJenkins()\n\n\t\t\t\/\/ instantiate the template\n\t\t\tg.By(fmt.Sprintf(\"calling oc new-app -f %q\", orchestrationPipelinePath))\n\t\t\terr := oc.Run(\"new-app\").Args(\"-f\", orchestrationPipelinePath).Execute()\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\t\/\/ start the build\n\t\t\tg.By(\"starting the pipeline build and waiting for it to complete\")\n\t\t\tbr, _ := exutil.StartBuildAndWait(oc, \"mapsapp-pipeline\")\n\t\t\tdebugAnyJenkinsFailure(br, oc.Namespace()+\"-mapsapp-pipeline\", oc, true)\n\t\t\tdebugAnyJenkinsFailure(br, oc.Namespace()+\"-mlbparks-pipeline\", oc, false)\n\t\t\tdebugAnyJenkinsFailure(br, oc.Namespace()+\"-nationalparks-pipeline\", oc, false)\n\t\t\tdebugAnyJenkinsFailure(br, oc.Namespace()+\"-parksmap-pipeline\", oc, false)\n\t\t\tbr.AssertSuccess()\n\n\t\t\t\/\/ wait for the service to be running\n\t\t\tg.By(\"expecting the parksmap-web service to be deployed and running\")\n\t\t\t_, err = exutil.GetEndpointAddress(oc, \"parksmap\")\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t})\n\t})*\/\n\n\tg.Context(\"Blue-green pipeline\", func() {\n\t\tg.It(\"Blue-green pipeline should build and complete successfully\", func() {\n\t\t\t\/\/ instantiate the template\n\t\t\tg.By(fmt.Sprintf(\"calling oc new-app -f %q\", blueGreenPipelinePath))\n\t\t\terr := oc.Run(\"new-app\").Args(\"-f\", blueGreenPipelinePath).Execute()\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\twg := &sync.WaitGroup{}\n\t\t\twg.Add(1)\n\n\t\t\t\/\/ start the build\n\t\t\tgo func() {\n\t\t\t\tdefer g.GinkgoRecover()\n\t\t\t\tdefer wg.Done()\n\t\t\t\tg.By(\"starting the bluegreen pipeline build and waiting for it to complete\")\n\t\t\t\tbr, _ := exutil.StartBuildAndWait(oc, \"bluegreen-pipeline\")\n\t\t\t\tdebugAnyJenkinsFailure(br, oc.Namespace()+\"-bluegreen-pipeline\", oc, true)\n\t\t\t\tbr.AssertSuccess()\n\n\t\t\t\tg.By(\"verifying that the main route has been switched to green\")\n\t\t\t\tvalue, err := oc.Run(\"get\").Args(\"route\", \"nodejs-mongodb-example\", \"-o\", \"jsonpath={ .spec.to.name }\").Output()\n\t\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t\t\tactiveRoute := strings.TrimSpace(value)\n\t\t\t\tg.By(\"verifying that the active route is 'nodejs-mongodb-example-green'\")\n\t\t\t\to.Expect(activeRoute).To(o.Equal(\"nodejs-mongodb-example-green\"))\n\t\t\t}()\n\n\t\t\t\/\/ wait for the green service to be available\n\t\t\tg.By(\"waiting for the nodejs-mongodb-example-green service to be available\")\n\t\t\t_, err = exutil.GetEndpointAddress(oc, \"nodejs-mongodb-example-green\")\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\t\/\/ approve the Jenkins pipeline\n\t\t\tg.By(\"Waiting for the approval prompt\")\n\t\t\tjobName := oc.Namespace() + \"-bluegreen-pipeline\"\n\t\t\t_, err = j.WaitForContent(\"Approve?\", 200, 10*time.Minute, \"job\/%s\/lastBuild\/consoleText\", jobName)\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\tg.By(\"Approving the current build\")\n\t\t\t_, _, err = j.Post(nil, fmt.Sprintf(\"job\/%s\/lastBuild\/input\/Approval\/proceedEmpty\", jobName), \"\")\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\t\/\/ wait for first build completion and verification\n\t\t\tg.By(\"Waiting for the first build to complete successfully\")\n\t\t\terr = sutil.TimeoutAfter(time.Minute*10, \"first blue-green build timed out before WaitGroup quit blocking\", func(timeoutTimer *time.Timer) error {\n\t\t\t\tg.By(\"start wg.Wait() for build completion and verification\")\n\t\t\t\twg.Wait()\n\t\t\t\tg.By(\"build completion and verification good to go\")\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\twg = &sync.WaitGroup{}\n\t\t\twg.Add(1)\n\n\t\t\t\/\/ start the build again\n\t\t\tgo func() {\n\t\t\t\tdefer g.GinkgoRecover()\n\t\t\t\tdefer wg.Done()\n\t\t\t\tg.By(\"starting the bluegreen pipeline build and waiting for it to complete\")\n\t\t\t\tbr, _ := exutil.StartBuildAndWait(oc, \"bluegreen-pipeline\")\n\t\t\t\tdebugAnyJenkinsFailure(br, oc.Namespace()+\"-bluegreen-pipeline\", oc, true)\n\t\t\t\tbr.AssertSuccess()\n\n\t\t\t\tg.By(\"verifying that the main route has been switched to blue\")\n\t\t\t\tvalue, err := oc.Run(\"get\").Args(\"route\", \"nodejs-mongodb-example\", \"-o\", \"jsonpath={ .spec.to.name }\").Output()\n\t\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t\t\tactiveRoute := strings.TrimSpace(value)\n\t\t\t\tg.By(\"verifying that the active route is 'nodejs-mongodb-example-blue'\")\n\t\t\t\to.Expect(activeRoute).To(o.Equal(\"nodejs-mongodb-example-blue\"))\n\t\t\t}()\n\n\t\t\t\/\/ wait for the blue service to be available\n\t\t\tg.By(\"waiting for the nodejs-mongodb-example-blue service to be available\")\n\t\t\t_, err = exutil.GetEndpointAddress(oc, \"nodejs-mongodb-example-blue\")\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\t\/\/ approve the Jenkins pipeline\n\t\t\tg.By(\"Waiting for the approval prompt\")\n\t\t\t_, err = j.WaitForContent(\"Approve?\", 200, 10*time.Minute, \"job\/%s\/lastBuild\/consoleText\", jobName)\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\tg.By(\"Approving the current build\")\n\t\t\t_, _, err = j.Post(nil, fmt.Sprintf(\"job\/%s\/lastBuild\/input\/Approval\/proceedEmpty\", jobName), \"\")\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\t\t\/\/ wait for second build completion and verification\n\t\t\tg.By(\"Waiting for the second build to complete successfully\")\n\t\t\terr = sutil.TimeoutAfter(time.Minute*10, \"second blue-green build timed out before WaitGroup quit blocking\", func(timeoutTimer *time.Timer) error {\n\t\t\t\tg.By(\"start wg.Wait() for build completion and verification\")\n\t\t\t\twg.Wait()\n\t\t\t\tg.By(\"build completion and verification good to go\")\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\t})\n\t})\n\n})\n<|endoftext|>"} {"text":"<commit_before>package integration\n\nimport (\n\t\"bytes\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/libcompose\/docker\"\n\t\"github.com\/samalba\/dockerclient\"\n\n\t. \"gopkg.in\/check.v1\"\n)\n\nvar random = rand.New(rand.NewSource(time.Now().Unix()))\n\nfunc RandStr(n int) string {\n\tletters := []rune(\"abcdefghijklmnopqrstuvwxyz\")\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letters[random.Intn(len(letters))]\n\t}\n\treturn string(b)\n}\n\ntype RunSuite struct {\n\tcommand string\n\tprojects []string\n}\n\nvar _ = Suite(&RunSuite{\n\tcommand: \"..\/bundles\/libcompose-cli_linux-amd64\",\n})\n\nfunc (s *RunSuite) CreateProjectFromText(c *C, input string) string {\n\treturn s.ProjectFromText(c, \"create\", input)\n}\n\nfunc (s *RunSuite) RandomProject() string {\n\treturn \"test-project-\" + RandStr(7)\n}\n\nfunc (s *RunSuite) ProjectFromText(c *C, command, input string) string {\n\tprojectName := s.RandomProject()\n\treturn s.FromText(c, projectName, command, input)\n}\n\nfunc (s *RunSuite) FromText(c *C, projectName, command string, argsAndInput ...string) string {\n\targs := []string{\"--verbose\", \"-p\", projectName, \"-f\", \"-\", command}\n\targs = append(args, argsAndInput[0:len(argsAndInput)-1]...)\n\n\tinput := argsAndInput[len(argsAndInput)-1]\n\n\tif command == \"up\" {\n\t\targs = append(args, \"-d\")\n\t} else if command == \"down\" {\n\t\targs = append(args, \"--timeout\", \"0\")\n\t} else if command == \"restart\" {\n\t\targs = append(args, \"--timeout\", \"0\")\n\t} else if command == \"stop\" {\n\t\targs = append(args, \"--timeout\", \"0\")\n\t}\n\n\tlogrus.Infof(\"Running %s %v\", command, args)\n\n\tcmd := exec.Command(s.command, args...)\n\tcmd.Stdin = bytes.NewBufferString(strings.Replace(input, \"\\t\", \" \", -1))\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stdout\n\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlogrus.Errorf(\"Failed to run %s %v: %v\\n with input:\\n%s\", s.command, err, args, input)\n\t}\n\n\tc.Assert(err, IsNil)\n\n\treturn projectName\n}\n\nfunc GetClient(c *C) dockerclient.Client {\n\tclient, err := docker.CreateClient(docker.ClientOpts{})\n\n\tc.Assert(err, IsNil)\n\n\treturn client\n}\n\nfunc (s *RunSuite) GetContainerByName(c *C, name string) *dockerclient.ContainerInfo {\n\tclient := GetClient(c)\n\tcontainer, err := docker.GetContainerByName(client, name)\n\n\tc.Assert(err, IsNil)\n\n\tif container == nil {\n\t\treturn nil\n\t}\n\n\tinfo, err := client.InspectContainer(container.Id)\n\n\tc.Assert(err, IsNil)\n\n\treturn info\n}\n\nfunc (s *RunSuite) GetContainersByProject(c *C, project string) []dockerclient.Container {\n\tclient := GetClient(c)\n\tcontainers, err := docker.GetContainersByFilter(client, docker.PROJECT.Eq(project))\n\n\tc.Assert(err, IsNil)\n\n\treturn containers\n}\n<commit_msg>Clean containers after each tests.<commit_after>package integration\n\nimport (\n\t\"bytes\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/libcompose\/docker\"\n\t\"github.com\/samalba\/dockerclient\"\n\n\t. \"gopkg.in\/check.v1\"\n)\n\nvar random = rand.New(rand.NewSource(time.Now().Unix()))\n\nfunc RandStr(n int) string {\n\tletters := []rune(\"abcdefghijklmnopqrstuvwxyz\")\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letters[random.Intn(len(letters))]\n\t}\n\treturn string(b)\n}\n\ntype RunSuite struct {\n\tcommand string\n\tprojects []string\n}\n\nfunc (r *RunSuite) TearDownTest(c *C) {\n\t\/\/ Delete all containers\n\tclient := GetClient(c)\n\tcontainers, err := client.ListContainers(true, false, \"\")\n\tc.Assert(err, IsNil)\n\tfor _, container := range containers {\n\t\terr := client.RemoveContainer(container.Id, true, true)\n\t\tc.Assert(err, IsNil)\n\t}\n}\n\nvar _ = Suite(&RunSuite{\n\tcommand: \"..\/bundles\/libcompose-cli_linux-amd64\",\n})\n\nfunc (s *RunSuite) CreateProjectFromText(c *C, input string) string {\n\treturn s.ProjectFromText(c, \"create\", input)\n}\n\nfunc (s *RunSuite) RandomProject() string {\n\treturn \"test-project-\" + RandStr(7)\n}\n\nfunc (s *RunSuite) ProjectFromText(c *C, command, input string) string {\n\tprojectName := s.RandomProject()\n\treturn s.FromText(c, projectName, command, input)\n}\n\nfunc (s *RunSuite) FromText(c *C, projectName, command string, argsAndInput ...string) string {\n\targs := []string{\"--verbose\", \"-p\", projectName, \"-f\", \"-\", command}\n\targs = append(args, argsAndInput[0:len(argsAndInput)-1]...)\n\n\tinput := argsAndInput[len(argsAndInput)-1]\n\n\tif command == \"up\" {\n\t\targs = append(args, \"-d\")\n\t} else if command == \"down\" {\n\t\targs = append(args, \"--timeout\", \"0\")\n\t} else if command == \"restart\" {\n\t\targs = append(args, \"--timeout\", \"0\")\n\t} else if command == \"stop\" {\n\t\targs = append(args, \"--timeout\", \"0\")\n\t}\n\n\tlogrus.Infof(\"Running %s %v\", command, args)\n\n\tcmd := exec.Command(s.command, args...)\n\tcmd.Stdin = bytes.NewBufferString(strings.Replace(input, \"\\t\", \" \", -1))\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stdout\n\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlogrus.Errorf(\"Failed to run %s %v: %v\\n with input:\\n%s\", s.command, err, args, input)\n\t}\n\n\tc.Assert(err, IsNil)\n\n\treturn projectName\n}\n\nfunc GetClient(c *C) dockerclient.Client {\n\tclient, err := docker.CreateClient(docker.ClientOpts{})\n\n\tc.Assert(err, IsNil)\n\n\treturn client\n}\n\nfunc (s *RunSuite) GetContainerByName(c *C, name string) *dockerclient.ContainerInfo {\n\tclient := GetClient(c)\n\tcontainer, err := docker.GetContainerByName(client, name)\n\n\tc.Assert(err, IsNil)\n\n\tif container == nil {\n\t\treturn nil\n\t}\n\n\tinfo, err := client.InspectContainer(container.Id)\n\n\tc.Assert(err, IsNil)\n\n\treturn info\n}\n\nfunc (s *RunSuite) GetContainersByProject(c *C, project string) []dockerclient.Container {\n\tclient := GetClient(c)\n\tcontainers, err := docker.GetContainersByFilter(client, docker.PROJECT.Eq(project))\n\n\tc.Assert(err, IsNil)\n\n\treturn containers\n}\n<|endoftext|>"} {"text":"<commit_before>package spotify\n\nimport (\n\t\"errors\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"net\/http\"\n\t\"testing\"\n)\n\nfunc TestClient_NextPage(t *testing.T) {\n\ttestTable := []struct {\n\t\tName string\n\t\tInput *basePage\n\t\tExpectedPath string\n\t\tErr error\n\t}{\n\t\t{\n\t\t\t\"success\",\n\t\t\t&basePage{\n\t\t\t\tNext: \"\/v1\/albums\/0sNOF9WDwhWunNAHPD3Baj\/tracks\",\n\t\t\t\tTotal: 600,\n\t\t\t},\n\t\t\t\"\/v1\/albums\/0sNOF9WDwhWunNAHPD3Baj\/tracks\",\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"no more pages\",\n\t\t\t&basePage{\n\t\t\t\tNext: \"\",\n\t\t\t},\n\t\t\t\"\",\n\t\t\tErrNoMorePages,\n\t\t},\n\t\t{\n\t\t\t\"nil pointer error\",\n\t\t\tnil,\n\t\t\t\"\",\n\t\t\terrors.New(\"spotify: p must be a non-nil pointer to a page\"),\n\t\t},\n\t}\n\n\tfor _, tt := range testTable {\n\t\tt.Run(tt.Name, func(t *testing.T) {\n\t\t\twasCalled := false\n\t\t\tclient, server := testClientString(200, `{\"total\": 100}`, func(request *http.Request) {\n\t\t\t\twasCalled = true\n\t\t\t\tassert.Equal(t, tt.ExpectedPath, request.URL.RequestURI())\n\t\t\t})\n\t\t\tif tt.Input != nil && tt.Input.Next != \"\" {\n\t\t\t\ttt.Input.Next = server.URL + tt.Input.Next \/\/ add fake server url so we intercept the message\n\t\t\t}\n\n\t\t\terr := client.NextPage(tt.Input)\n\t\t\tassert.Equal(t, tt.ExpectedPath != \"\", wasCalled)\n\t\t\tif tt.Err == nil {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, 100, tt.Input.Total) \/\/ value should be from original 600\n\t\t\t} else {\n\t\t\t\tassert.EqualError(t, err, tt.Err.Error())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestClient_PreviousPage(t *testing.T) {\n\ttestTable := []struct {\n\t\tName string\n\t\tInput *basePage\n\t\tExpectedPath string\n\t\tErr error\n\t}{\n\t\t{\n\t\t\t\"success\",\n\t\t\t&basePage{\n\t\t\t\tPrevious: \"\/v1\/albums\/0sNOF9WDwhWunNAHPD3Baj\/tracks\",\n\t\t\t\tTotal: 600,\n\t\t\t},\n\t\t\t\"\/v1\/albums\/0sNOF9WDwhWunNAHPD3Baj\/tracks\",\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"no more pages\",\n\t\t\t&basePage{\n\t\t\t\tPrevious: \"\",\n\t\t\t},\n\t\t\t\"\",\n\t\t\tErrNoMorePages,\n\t\t},\n\t\t{\n\t\t\t\"nil pointer error\",\n\t\t\tnil,\n\t\t\t\"\",\n\t\t\terrors.New(\"spotify: p must be a non-nil pointer to a page\"),\n\t\t},\n\t}\n\n\tfor _, tt := range testTable {\n\t\tt.Run(tt.Name, func(t *testing.T) {\n\t\t\twasCalled := false\n\t\t\tclient, server := testClientString(200, `{\"total\": 100}`, func(request *http.Request) {\n\t\t\t\twasCalled = true\n\t\t\t\tassert.Equal(t, tt.ExpectedPath, request.URL.RequestURI())\n\t\t\t})\n\t\t\tif tt.Input != nil && tt.Input.Previous != \"\" {\n\t\t\t\ttt.Input.Previous = server.URL + tt.Input.Previous \/\/ add fake server url so we intercept the message\n\t\t\t}\n\n\t\t\terr := client.PreviousPage(tt.Input)\n\t\t\tassert.Equal(t, tt.ExpectedPath != \"\", wasCalled)\n\t\t\tif tt.Err == nil {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, 100, tt.Input.Total) \/\/ value should be from original 600\n\t\t\t} else {\n\t\t\t\tassert.EqualError(t, err, tt.Err.Error())\n\t\t\t}\n\t\t})\n\t}\n}\n<commit_msg>Fix tests from broken merge<commit_after>package spotify\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"net\/http\"\n\t\"testing\"\n)\n\nfunc TestClient_NextPage(t *testing.T) {\n\ttestTable := []struct {\n\t\tName string\n\t\tInput *basePage\n\t\tExpectedPath string\n\t\tErr error\n\t}{\n\t\t{\n\t\t\t\"success\",\n\t\t\t&basePage{\n\t\t\t\tNext: \"\/v1\/albums\/0sNOF9WDwhWunNAHPD3Baj\/tracks\",\n\t\t\t\tTotal: 600,\n\t\t\t},\n\t\t\t\"\/v1\/albums\/0sNOF9WDwhWunNAHPD3Baj\/tracks\",\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"no more pages\",\n\t\t\t&basePage{\n\t\t\t\tNext: \"\",\n\t\t\t},\n\t\t\t\"\",\n\t\t\tErrNoMorePages,\n\t\t},\n\t\t{\n\t\t\t\"nil pointer error\",\n\t\t\tnil,\n\t\t\t\"\",\n\t\t\terrors.New(\"spotify: p must be a non-nil pointer to a page\"),\n\t\t},\n\t}\n\n\tfor _, tt := range testTable {\n\t\tt.Run(tt.Name, func(t *testing.T) {\n\t\t\twasCalled := false\n\t\t\tclient, server := testClientString(200, `{\"total\": 100}`, func(request *http.Request) {\n\t\t\t\twasCalled = true\n\t\t\t\tassert.Equal(t, tt.ExpectedPath, request.URL.RequestURI())\n\t\t\t})\n\t\t\tif tt.Input != nil && tt.Input.Next != \"\" {\n\t\t\t\ttt.Input.Next = server.URL + tt.Input.Next \/\/ add fake server url so we intercept the message\n\t\t\t}\n\n\t\t\terr := client.NextPage(context.Background(), tt.Input)\n\t\t\tassert.Equal(t, tt.ExpectedPath != \"\", wasCalled)\n\t\t\tif tt.Err == nil {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, 100, tt.Input.Total) \/\/ value should be from original 600\n\t\t\t} else {\n\t\t\t\tassert.EqualError(t, err, tt.Err.Error())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestClient_PreviousPage(t *testing.T) {\n\ttestTable := []struct {\n\t\tName string\n\t\tInput *basePage\n\t\tExpectedPath string\n\t\tErr error\n\t}{\n\t\t{\n\t\t\t\"success\",\n\t\t\t&basePage{\n\t\t\t\tPrevious: \"\/v1\/albums\/0sNOF9WDwhWunNAHPD3Baj\/tracks\",\n\t\t\t\tTotal: 600,\n\t\t\t},\n\t\t\t\"\/v1\/albums\/0sNOF9WDwhWunNAHPD3Baj\/tracks\",\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"no more pages\",\n\t\t\t&basePage{\n\t\t\t\tPrevious: \"\",\n\t\t\t},\n\t\t\t\"\",\n\t\t\tErrNoMorePages,\n\t\t},\n\t\t{\n\t\t\t\"nil pointer error\",\n\t\t\tnil,\n\t\t\t\"\",\n\t\t\terrors.New(\"spotify: p must be a non-nil pointer to a page\"),\n\t\t},\n\t}\n\n\tfor _, tt := range testTable {\n\t\tt.Run(tt.Name, func(t *testing.T) {\n\t\t\twasCalled := false\n\t\t\tclient, server := testClientString(200, `{\"total\": 100}`, func(request *http.Request) {\n\t\t\t\twasCalled = true\n\t\t\t\tassert.Equal(t, tt.ExpectedPath, request.URL.RequestURI())\n\t\t\t})\n\t\t\tif tt.Input != nil && tt.Input.Previous != \"\" {\n\t\t\t\ttt.Input.Previous = server.URL + tt.Input.Previous \/\/ add fake server url so we intercept the message\n\t\t\t}\n\n\t\t\terr := client.PreviousPage(context.Background(), tt.Input)\n\t\t\tassert.Equal(t, tt.ExpectedPath != \"\", wasCalled)\n\t\t\tif tt.Err == nil {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, 100, tt.Input.Total) \/\/ value should be from original 600\n\t\t\t} else {\n\t\t\t\tassert.EqualError(t, err, tt.Err.Error())\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package client\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/docker\/engine-api\/types\"\n\t\"github.com\/hyperhq\/libcompose\/config\"\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype composeConfigWrapper struct {\n\tServiceConfigs *config.ServiceConfigs `json:\"ServiceConfigs\"`\n\tVolumeConfigs map[string]*config.VolumeConfig `json:\"VolumeConfigs\"`\n\tNetworkConfigs map[string]*config.NetworkConfig `json:\"NetworkConfigs\"`\n\tAuthConfigs map[string]types.AuthConfig `json:\"auths\"`\n}\n\nfunc (cli *Client) ComposeUp(project string, services []string, c *config.ServiceConfigs, vc map[string]*config.VolumeConfig, nc map[string]*config.NetworkConfig, auth map[string]types.AuthConfig, forcerecreate, norecreate bool) (io.ReadCloser, error) {\n\tquery := url.Values{}\n\tquery.Set(\"project\", project)\n\tif forcerecreate {\n\t\tquery.Set(\"forcerecreate\", \"true\")\n\t}\n\tif norecreate {\n\t\tquery.Set(\"norecreate\", \"true\")\n\t}\n\tif len(services) > 0 {\n\t\tquery.Set(\"services\", strings.Join(services, \"}{\"))\n\t}\n\tbody := composeConfigWrapper{\n\t\tServiceConfigs: c,\n\t\tVolumeConfigs: vc,\n\t\tNetworkConfigs: nc,\n\t\tAuthConfigs: auth,\n\t}\n\tresp, err := cli.post(context.Background(), \"\/compose\/up\", query, body, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.body, nil\n}\n\nfunc (cli *Client) ComposeDown(project string, services []string, rmi string, vol, rmorphans bool) (io.ReadCloser, error) {\n\n\tquery := url.Values{}\n\tquery.Set(\"project\", project)\n\tif rmi != \"\" {\n\t\tquery.Set(\"rmi\", rmi)\n\t}\n\tif vol {\n\t\tquery.Set(\"vol\", \"true\")\n\t}\n\tif rmorphans {\n\t\tquery.Set(\"rmorphans\", \"true\")\n\t}\n\tif len(services) > 0 {\n\t\tquery.Set(\"services\", strings.Join(services, \"}{\"))\n\t}\n\tresp, err := cli.post(context.Background(), \"\/compose\/down\", query, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.body, nil\n}\n\nfunc (cli *Client) ComposeCreate(project string, services []string, c *config.ServiceConfigs, vc map[string]*config.VolumeConfig, nc map[string]*config.NetworkConfig, auth map[string]types.AuthConfig, forcerecreate, norecreate bool) (io.ReadCloser, error) {\n\tquery := url.Values{}\n\tquery.Set(\"project\", project)\n\tif forcerecreate {\n\t\tquery.Set(\"forcerecreate\", \"true\")\n\t}\n\tif norecreate {\n\t\tquery.Set(\"norecreate\", \"true\")\n\t}\n\tif len(services) > 0 {\n\t\tquery.Set(\"services\", strings.Join(services, \"}{\"))\n\t}\n\tbody := composeConfigWrapper{\n\t\tServiceConfigs: c,\n\t\tVolumeConfigs: vc,\n\t\tNetworkConfigs: nc,\n\t\tAuthConfigs: auth,\n\t}\n\tresp, err := cli.post(context.Background(), \"\/compose\/create\", query, body, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.body, nil\n}\n\nfunc (cli *Client) ComposeRm(project string, services []string, rmVol bool) (io.ReadCloser, error) {\n\tquery := url.Values{}\n\tquery.Set(\"project\", project)\n\tif rmVol {\n\t\tquery.Set(\"rmvol\", \"true\")\n\t}\n\tif len(services) > 0 {\n\t\tquery.Set(\"services\", strings.Join(services, \"}{\"))\n\t}\n\tresp, err := cli.post(context.Background(), \"\/compose\/rm\", query, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.body, nil\n}\n\nfunc (cli *Client) ComposeStart(project string, services []string) (io.ReadCloser, error) {\n\tquery := url.Values{}\n\tquery.Set(\"project\", project)\n\tif len(services) > 0 {\n\t\tquery.Set(\"services\", strings.Join(services, \"}{\"))\n\t}\n\tresp, err := cli.post(context.Background(), \"\/compose\/start\", query, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.body, nil\n}\n\nfunc (cli *Client) ComposeStop(project string, services []string, timeout int) (io.ReadCloser, error) {\n\tquery := url.Values{}\n\tquery.Set(\"project\", project)\n\tquery.Set(\"seconds\", fmt.Sprintf(\"%d\", timeout))\n\tif len(services) > 0 {\n\t\tquery.Set(\"services\", strings.Join(services, \"}{\"))\n\t}\n\tresp, err := cli.post(context.Background(), \"\/compose\/stop\", query, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.body, nil\n}\n\nfunc (cli *Client) ComposeKill(project string, services []string, signal string) (io.ReadCloser, error) {\n\tquery := url.Values{}\n\tquery.Set(\"project\", project)\n\tquery.Set(\"signal\", signal)\n\tif len(services) > 0 {\n\t\tquery.Set(\"services\", strings.Join(services, \"}{\"))\n\t}\n\tresp, err := cli.post(context.Background(), \"\/compose\/kill\", query, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.body, nil\n}\n<commit_msg>compose down parameter is rmvol<commit_after>package client\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/docker\/engine-api\/types\"\n\t\"github.com\/hyperhq\/libcompose\/config\"\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype composeConfigWrapper struct {\n\tServiceConfigs *config.ServiceConfigs `json:\"ServiceConfigs\"`\n\tVolumeConfigs map[string]*config.VolumeConfig `json:\"VolumeConfigs\"`\n\tNetworkConfigs map[string]*config.NetworkConfig `json:\"NetworkConfigs\"`\n\tAuthConfigs map[string]types.AuthConfig `json:\"auths\"`\n}\n\nfunc (cli *Client) ComposeUp(project string, services []string, c *config.ServiceConfigs, vc map[string]*config.VolumeConfig, nc map[string]*config.NetworkConfig, auth map[string]types.AuthConfig, forcerecreate, norecreate bool) (io.ReadCloser, error) {\n\tquery := url.Values{}\n\tquery.Set(\"project\", project)\n\tif forcerecreate {\n\t\tquery.Set(\"forcerecreate\", \"true\")\n\t}\n\tif norecreate {\n\t\tquery.Set(\"norecreate\", \"true\")\n\t}\n\tif len(services) > 0 {\n\t\tquery.Set(\"services\", strings.Join(services, \"}{\"))\n\t}\n\tbody := composeConfigWrapper{\n\t\tServiceConfigs: c,\n\t\tVolumeConfigs: vc,\n\t\tNetworkConfigs: nc,\n\t\tAuthConfigs: auth,\n\t}\n\tresp, err := cli.post(context.Background(), \"\/compose\/up\", query, body, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.body, nil\n}\n\nfunc (cli *Client) ComposeDown(project string, services []string, rmi string, vol, rmorphans bool) (io.ReadCloser, error) {\n\n\tquery := url.Values{}\n\tquery.Set(\"project\", project)\n\tif rmi != \"\" {\n\t\tquery.Set(\"rmi\", rmi)\n\t}\n\tif vol {\n\t\tquery.Set(\"rmvol\", \"true\")\n\t}\n\tif rmorphans {\n\t\tquery.Set(\"rmorphans\", \"true\")\n\t}\n\tif len(services) > 0 {\n\t\tquery.Set(\"services\", strings.Join(services, \"}{\"))\n\t}\n\tresp, err := cli.post(context.Background(), \"\/compose\/down\", query, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.body, nil\n}\n\nfunc (cli *Client) ComposeCreate(project string, services []string, c *config.ServiceConfigs, vc map[string]*config.VolumeConfig, nc map[string]*config.NetworkConfig, auth map[string]types.AuthConfig, forcerecreate, norecreate bool) (io.ReadCloser, error) {\n\tquery := url.Values{}\n\tquery.Set(\"project\", project)\n\tif forcerecreate {\n\t\tquery.Set(\"forcerecreate\", \"true\")\n\t}\n\tif norecreate {\n\t\tquery.Set(\"norecreate\", \"true\")\n\t}\n\tif len(services) > 0 {\n\t\tquery.Set(\"services\", strings.Join(services, \"}{\"))\n\t}\n\tbody := composeConfigWrapper{\n\t\tServiceConfigs: c,\n\t\tVolumeConfigs: vc,\n\t\tNetworkConfigs: nc,\n\t\tAuthConfigs: auth,\n\t}\n\tresp, err := cli.post(context.Background(), \"\/compose\/create\", query, body, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.body, nil\n}\n\nfunc (cli *Client) ComposeRm(project string, services []string, rmVol bool) (io.ReadCloser, error) {\n\tquery := url.Values{}\n\tquery.Set(\"project\", project)\n\tif rmVol {\n\t\tquery.Set(\"rmvol\", \"true\")\n\t}\n\tif len(services) > 0 {\n\t\tquery.Set(\"services\", strings.Join(services, \"}{\"))\n\t}\n\tresp, err := cli.post(context.Background(), \"\/compose\/rm\", query, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.body, nil\n}\n\nfunc (cli *Client) ComposeStart(project string, services []string) (io.ReadCloser, error) {\n\tquery := url.Values{}\n\tquery.Set(\"project\", project)\n\tif len(services) > 0 {\n\t\tquery.Set(\"services\", strings.Join(services, \"}{\"))\n\t}\n\tresp, err := cli.post(context.Background(), \"\/compose\/start\", query, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.body, nil\n}\n\nfunc (cli *Client) ComposeStop(project string, services []string, timeout int) (io.ReadCloser, error) {\n\tquery := url.Values{}\n\tquery.Set(\"project\", project)\n\tquery.Set(\"seconds\", fmt.Sprintf(\"%d\", timeout))\n\tif len(services) > 0 {\n\t\tquery.Set(\"services\", strings.Join(services, \"}{\"))\n\t}\n\tresp, err := cli.post(context.Background(), \"\/compose\/stop\", query, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.body, nil\n}\n\nfunc (cli *Client) ComposeKill(project string, services []string, signal string) (io.ReadCloser, error) {\n\tquery := url.Values{}\n\tquery.Set(\"project\", project)\n\tquery.Set(\"signal\", signal)\n\tif len(services) > 0 {\n\t\tquery.Set(\"services\", strings.Join(services, \"}{\"))\n\t}\n\tresp, err := cli.post(context.Background(), \"\/compose\/kill\", query, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.body, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage save\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/jacobsa\/comeback\/internal\/blob\"\n\t\"github.com\/jacobsa\/comeback\/internal\/state\"\n\t\"github.com\/jacobsa\/syncutil\"\n\t\"github.com\/jacobsa\/timeutil\"\n)\n\n\/\/ Save a backup of the given directory, applying the supplied exclusions and\n\/\/ using the supplied score map to avoid reading file content when possible.\n\/\/ Return a score for the root of the backup.\nfunc Save(\n\tctx context.Context,\n\tdir string,\n\texclusions []*regexp.Regexp,\n\tscoreMap state.ScoreMap,\n\tblobStore blob.Store,\n\tlogger *log.Logger,\n\tclock timeutil.Clock) (score blob.Score, err error) {\n\tb := syncutil.NewBundle(ctx)\n\n\t\/\/ List the directory hierarchy, applying exclusions.\n\tlistedNodes := make(chan *fsNode, 100)\n\tb.Add(func(ctx context.Context) (err error) {\n\t\tdefer close(listedNodes)\n\t\terr = listNodes(ctx, dir, exclusions, listedNodes)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"listNodes: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t})\n\n\t\/\/ Fill in scores for files that don't appear to have changed since the last\n\t\/\/ run.\n\tpostScoreMap := make(chan *fsNode, 100)\n\tb.Add(func(ctx context.Context) (err error) {\n\t\tdefer close(postScoreMap)\n\t\terr = consultScoreMap(ctx, scoreMap, clock, listedNodes, postScoreMap)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"consultScoreMap: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t})\n\n\t\/\/ Fill in scores for those nodes that didn't hit in the cache. Do this\n\t\/\/ safely, respecting dependency order (children complete before parents\n\t\/\/ start).\n\tpostDAGTraversal := make(chan *fsNode, 100)\n\tb.Add(func(ctx context.Context) (err error) {\n\t\tdefer close(postDAGTraversal)\n\t\terr = fillInScores(\n\t\t\tctx,\n\t\t\tdir,\n\t\t\tblobStore,\n\t\t\tlogger,\n\t\t\tpostScoreMap,\n\t\t\tpostDAGTraversal)\n\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"fillInScores: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t})\n\n\t\/\/ Update the score map with the results of the previous stage.\n\tpostScoreMapUpdate := make(chan *fsNode, 100)\n\t{\n\t\t\/\/ Tee the channel; updateScoreMap doesn't give output, nor does it modify\n\t\t\/\/ nodes.\n\t\ttmp := make(chan *fsNode, 100)\n\t\tb.Add(func(ctx context.Context) (err error) {\n\t\t\tdefer close(tmp)\n\t\t\tdefer close(postScoreMapUpdate)\n\n\t\t\terr = teeNodes(ctx, postDAGTraversal, tmp, postScoreMapUpdate)\n\t\t\treturn\n\t\t})\n\n\t\t\/\/ Run updateScoreMap.\n\t\tb.Add(func(ctx context.Context) (err error) {\n\t\t\terr = updateScoreMap(ctx, scoreMap, tmp)\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"updateScoreMap: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\treturn\n\t\t})\n\t}\n\n\t\/\/ Find the root score.\n\tb.Add(func(ctx context.Context) (err error) {\n\t\tscore, err = findRootScore(postScoreMapUpdate)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"findRootScore: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t})\n\n\terr = b.Join()\n\treturn\n}\n\nfunc findRootScore(nodes <-chan *fsNode) (score blob.Score, err error) {\n\tfound := false\n\tfor n := range nodes {\n\t\t\/\/ Skip non-root nodes.\n\t\tif n.Parent != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Is this a duplicate?\n\t\tif found {\n\t\t\terr = fmt.Errorf(\"Found a duplicate root node: %#v\", n)\n\t\t\treturn\n\t\t}\n\n\t\tfound = true\n\n\t\t\/\/ We expect directory nodes to have exactly one score.\n\t\tif len(n.Info.Scores) != 1 {\n\t\t\terr = fmt.Errorf(\"Unexpected score count for rooT: %#v\", n)\n\t\t\treturn\n\t\t}\n\n\t\tscore = n.Info.Scores[0]\n\t}\n\n\tif !found {\n\t\terr = errors.New(\"No root node found\")\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc teeNodes(\n\tctx context.Context,\n\tin <-chan *fsNode,\n\tout1 chan<- *fsNode,\n\tout2 chan<- *fsNode) (err error) {\n\tfor n := range in {\n\t\t\/\/ Write to first output.\n\t\tselect {\n\t\tcase out1 <- n:\n\t\tcase <-ctx.Done():\n\t\t\terr = ctx.Err()\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ And the second.\n\t\tselect {\n\t\tcase out2 <- n:\n\t\tcase <-ctx.Done():\n\t\t\terr = ctx.Err()\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n<commit_msg>Fixed save.go.<commit_after>\/\/ Copyright 2015 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage save\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/jacobsa\/comeback\/internal\/blob\"\n\t\"github.com\/jacobsa\/comeback\/internal\/state\"\n\t\"github.com\/jacobsa\/syncutil\"\n\t\"github.com\/jacobsa\/timeutil\"\n)\n\n\/\/ Save a backup of the given directory, applying the supplied exclusions and\n\/\/ using the supplied score map to avoid reading file content when possible.\n\/\/ Return a score for the root of the backup.\nfunc Save(\n\tctx context.Context,\n\tdir string,\n\texclusions []*regexp.Regexp,\n\tscoreMap state.ScoreMap,\n\tblobStore blob.Store,\n\tlogger *log.Logger,\n\tclock timeutil.Clock) (score blob.Score, err error) {\n\tb := syncutil.NewBundle(ctx)\n\n\t\/\/ Visit each node in the graph, writing the processed nodes to a channel.\n\tprocessedNodes := make(chan *fsNode, 100)\n\tb.Add(func(ctx context.Context) (err error) {\n\t\tdefer close(processedNodes)\n\t\terr = errors.New(\"TODO\")\n\t\treturn\n\t})\n\n\t\/\/ Find the root score.\n\tb.Add(func(ctx context.Context) (err error) {\n\t\tscore, err = findRootScore(processedNodes)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"findRootScore: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t})\n\n\terr = b.Join()\n\treturn\n}\n\nfunc findRootScore(nodes <-chan *fsNode) (score blob.Score, err error) {\n\tfound := false\n\tfor n := range nodes {\n\t\t\/\/ Skip non-root nodes.\n\t\tif n.Parent != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Is this a duplicate?\n\t\tif found {\n\t\t\terr = fmt.Errorf(\"Found a duplicate root node: %#v\", n)\n\t\t\treturn\n\t\t}\n\n\t\tfound = true\n\n\t\t\/\/ We expect directory nodes to have exactly one score.\n\t\tif len(n.Info.Scores) != 1 {\n\t\t\terr = fmt.Errorf(\"Unexpected score count for rooT: %#v\", n)\n\t\t\treturn\n\t\t}\n\n\t\tscore = n.Info.Scores[0]\n\t}\n\n\tif !found {\n\t\terr = errors.New(\"No root node found\")\n\t\treturn\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package interpreter\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n\n\t\"..\/dos\"\n)\n\ntype StatementT struct {\n\tArgv []string\n\tRedirect []*Redirecter\n\tTerm string\n}\n\nvar prefix []string = []string{\" 0<\", \" 1>\", \" 2>\"}\n\nvar PercentFunc = map[string]func() string{\n\t\"CD\": func() string {\n\t\twd, err := os.Getwd()\n\t\tif err == nil {\n\t\t\treturn wd\n\t\t} else {\n\t\t\treturn \"\"\n\t\t}\n\t},\n\t\"ERRORLEVEL\": func() string {\n\t\treturn ErrorLevel\n\t},\n}\n\nvar rxUnicode = regexp.MustCompile(\"^[uU]\\\\+?([0-9a-fA-F]+)$\")\n\nfunc chomp(buffer *bytes.Buffer) {\n\toriginal := buffer.String()\n\tbuffer.Reset()\n\tvar lastchar rune\n\tfor i, ch := range original {\n\t\tif i > 0 {\n\t\t\tbuffer.WriteRune(lastchar)\n\t\t}\n\t\tlastchar = ch\n\t}\n}\n\nfunc dequote(source *bytes.Buffer) string {\n\tvar buffer bytes.Buffer\n\n\tlastchar := ' '\n\tquote := false\n\tfor {\n\t\tch, _, err := source.ReadRune()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif ch == '~' && unicode.IsSpace(lastchar) {\n\t\t\tif home := dos.GetHome(); home != \"\" {\n\t\t\t\tbuffer.WriteString(home)\n\t\t\t} else {\n\t\t\t\tbuffer.WriteRune('~')\n\t\t\t}\n\t\t\tlastchar = '~'\n\t\t\tcontinue\n\t\t}\n\t\tif ch == '%' {\n\t\t\tvar nameBuf bytes.Buffer\n\t\t\tfor {\n\t\t\t\tch, _, err = source.ReadRune()\n\t\t\t\tif err != nil {\n\t\t\t\t\tbuffer.WriteRune('%')\n\t\t\t\t\tbuffer.WriteString(nameBuf.String())\n\t\t\t\t\treturn buffer.String()\n\t\t\t\t}\n\t\t\t\tif ch == '%' {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tnameBuf.WriteRune(ch)\n\t\t\t}\n\t\t\tnameStr := nameBuf.String()\n\t\t\tvalue := os.Getenv(nameStr)\n\t\t\tif value != \"\" {\n\t\t\t\tbuffer.WriteString(value)\n\t\t\t} else if m := rxUnicode.FindStringSubmatch(nameStr); m != nil {\n\t\t\t\tucode, _ := strconv.ParseInt(m[1], 16, 32)\n\t\t\t\tbuffer.WriteRune(rune(ucode))\n\t\t\t} else if f, ok := PercentFunc[nameStr]; ok {\n\t\t\t\tbuffer.WriteString(f())\n\t\t\t} else {\n\t\t\t\tbuffer.WriteRune('%')\n\t\t\t\tbuffer.WriteString(nameStr)\n\t\t\t\tbuffer.WriteRune('%')\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif ch == '\"' {\n\t\t\tquote = !quote\n\t\t\tif lastchar == '\"' && quote {\n\t\t\t\tbuffer.WriteRune('\"')\n\t\t\t\tlastchar = '\\000'\n\t\t\t}\n\t\t} else {\n\t\t\tbuffer.WriteRune(ch)\n\t\t}\n\t\tlastchar = ch\n\t}\n\treturn buffer.String()\n}\n\nfunc terminate(statements *[]StatementT,\n\tisRedirected *bool,\n\tredirect *[]*Redirecter,\n\tbuffer *bytes.Buffer,\n\targv *[]string,\n\tterm string) {\n\n\tvar statement1 StatementT\n\tif buffer.Len() > 0 {\n\t\tif *isRedirected && len(*redirect) > 0 {\n\t\t\t(*redirect)[len(*redirect)-1].SetPath(dequote(buffer))\n\t\t\t*isRedirected = false\n\t\t\tstatement1.Argv = *argv\n\t\t} else {\n\t\t\tstatement1.Argv = append(*argv, dequote(buffer))\n\t\t}\n\t\tbuffer.Reset()\n\t} else if len(*argv) <= 0 {\n\t\treturn\n\t} else {\n\t\tstatement1.Argv = *argv\n\t}\n\tstatement1.Redirect = *redirect\n\t*redirect = make([]*Redirecter, 0, 3)\n\t*argv = make([]string, 0)\n\tstatement1.Term = term\n\t*statements = append(*statements, statement1)\n}\n\nfunc parse1(text string) ([]StatementT, error) {\n\tisQuoted := false\n\tstatements := make([]StatementT, 0)\n\targv := make([]string, 0)\n\tlastchar := ' '\n\tvar buffer bytes.Buffer\n\tisNextRedirect := false\n\tredirect := make([]*Redirecter, 0, 3)\n\n\treader := strings.NewReader(text)\n\tfor reader.Len() > 0 {\n\t\tch, chSize, chErr := reader.ReadRune()\n\t\tif chSize <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tif chErr != nil {\n\t\t\treturn nil, chErr\n\t\t}\n\t\tif ch == '\"' {\n\t\t\tisQuoted = !isQuoted\n\t\t}\n\t\tif isQuoted {\n\t\t\tbuffer.WriteRune(ch)\n\t\t} else if ch == ' ' {\n\t\t\tif buffer.Len() > 0 {\n\t\t\t\tif isNextRedirect && len(redirect) > 0 {\n\t\t\t\t\tredirect[len(redirect)-1].SetPath(dequote(&buffer))\n\t\t\t\t} else {\n\t\t\t\t\targv = append(argv, dequote(&buffer))\n\t\t\t\t}\n\t\t\t\tbuffer.Reset()\n\t\t\t\tisNextRedirect = false\n\t\t\t}\n\t\t} else if lastchar == ' ' && ch == ';' {\n\t\t\tterminate(&statements, &isNextRedirect, &redirect, &buffer, &argv, \";\")\n\t\t} else if ch == '|' {\n\t\t\tif lastchar == '|' {\n\t\t\t\tstatements[len(statements)-1].Term = \"||\"\n\t\t\t} else {\n\t\t\t\tterminate(&statements, &isNextRedirect, &redirect, &buffer, &argv, \"|\")\n\t\t\t}\n\t\t} else if ch == '&' {\n\t\t\tswitch lastchar {\n\t\t\tcase '&':\n\t\t\t\tstatements[len(statements)-1].Term = \"&&\"\n\t\t\tcase '|':\n\t\t\t\tstatements[len(statements)-1].Term = \"|&\"\n\t\t\tcase '>':\n\t\t\t\t\/\/ >&[n]\n\t\t\t\tch2, ch2siz, ch2err := reader.ReadRune()\n\t\t\t\tif ch2err != nil {\n\t\t\t\t\treturn nil, ch2err\n\t\t\t\t}\n\t\t\t\tif ch2siz <= 0 {\n\t\t\t\t\treturn nil, errors.New(\"Too Near EOF for >&\")\n\t\t\t\t}\n\t\t\t\tred := redirect[len(redirect)-1]\n\t\t\t\tswitch ch2 {\n\t\t\t\tcase '1':\n\t\t\t\t\tred.DupFrom(1)\n\t\t\t\tcase '2':\n\t\t\t\t\tred.DupFrom(2)\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, errors.New(\"Syntax error after >&\")\n\t\t\t\t}\n\t\t\t\tisNextRedirect = false\n\t\t\tdefault:\n\t\t\t\tterminate(&statements, &isNextRedirect, &redirect, &buffer, &argv, \"&\")\n\t\t\t}\n\t\t} else if ch == '>' {\n\t\t\tswitch lastchar {\n\t\t\tcase '1':\n\t\t\t\t\/\/ 1>\n\t\t\t\tchomp(&buffer)\n\t\t\t\tredirect = append(redirect, NewRedirecter(1))\n\t\t\tcase '2':\n\t\t\t\t\/\/ 2>\n\t\t\t\tchomp(&buffer)\n\t\t\t\tredirect = append(redirect, NewRedirecter(2))\n\t\t\tcase '>':\n\t\t\t\t\/\/ >>\n\t\t\t\tif len(redirect) >= 0 {\n\t\t\t\t\tredirect[len(redirect)-1].SetAppend()\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\t\/\/ >\n\t\t\t\tredirect = append(redirect, NewRedirecter(1))\n\t\t\t}\n\t\t\tisNextRedirect = true\n\t\t} else if ch == '<' {\n\t\t\tredirect = append(redirect, NewRedirecter(0))\n\t\t\tisNextRedirect = true\n\t\t} else {\n\t\t\tbuffer.WriteRune(ch)\n\t\t}\n\t\tlastchar = ch\n\t}\n\tterminate(&statements, &isNextRedirect, &redirect, &buffer, &argv, \" \")\n\treturn statements, nil\n}\n\n\/\/ Make arrays whose elements are pipelines\nfunc parse2(statements []StatementT) [][]StatementT {\n\tresult := make([][]StatementT, 1)\n\tfor _, statement1 := range statements {\n\t\tresult[len(result)-1] = append(result[len(result)-1], statement1)\n\t\tswitch statement1.Term {\n\t\tcase \"|\", \"|&\":\n\n\t\tdefault:\n\t\t\tresult = append(result, make([]StatementT, 0))\n\t\t}\n\t}\n\tif len(result[len(result)-1]) <= 0 {\n\t\tresult = result[0 : len(result)-1]\n\t}\n\treturn result\n}\n\nfunc Parse(text string) ([][]StatementT, error) {\n\tresult1, err := parse1(text)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult2 := parse2(result1)\n\treturn result2, nil\n}\n<commit_msg>Fix #57 - Miss a double-quotation after a percent<commit_after>package interpreter\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n\n\t\"..\/dos\"\n)\n\ntype StatementT struct {\n\tArgv []string\n\tRedirect []*Redirecter\n\tTerm string\n}\n\nvar prefix []string = []string{\" 0<\", \" 1>\", \" 2>\"}\n\nvar PercentFunc = map[string]func() string{\n\t\"CD\": func() string {\n\t\twd, err := os.Getwd()\n\t\tif err == nil {\n\t\t\treturn wd\n\t\t} else {\n\t\t\treturn \"\"\n\t\t}\n\t},\n\t\"ERRORLEVEL\": func() string {\n\t\treturn ErrorLevel\n\t},\n}\n\nvar rxUnicode = regexp.MustCompile(\"^[uU]\\\\+?([0-9a-fA-F]+)$\")\n\nfunc chomp(buffer *bytes.Buffer) {\n\toriginal := buffer.String()\n\tbuffer.Reset()\n\tvar lastchar rune\n\tfor i, ch := range original {\n\t\tif i > 0 {\n\t\t\tbuffer.WriteRune(lastchar)\n\t\t}\n\t\tlastchar = ch\n\t}\n}\n\nfunc dequote(source *bytes.Buffer) string {\n\tvar buffer bytes.Buffer\n\n\tlastchar := ' '\n\tquote := false\n\tfor {\n\t\tch, _, err := source.ReadRune()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif ch == '~' && unicode.IsSpace(lastchar) {\n\t\t\tif home := dos.GetHome(); home != \"\" {\n\t\t\t\tbuffer.WriteString(home)\n\t\t\t} else {\n\t\t\t\tbuffer.WriteRune('~')\n\t\t\t}\n\t\t\tlastchar = '~'\n\t\t\tcontinue\n\t\t}\n\t\tif ch == '%' {\n\t\t\tvar nameBuf bytes.Buffer\n\t\t\tfor {\n\t\t\t\tch, _, err = source.ReadRune()\n\t\t\t\tif err != nil {\n\t\t\t\t\tbuffer.WriteRune('%')\n\t\t\t\t\tbuffer.WriteString(nameBuf.String())\n\t\t\t\t\treturn buffer.String()\n\t\t\t\t}\n\t\t\t\tif ch == '%' {\n\t\t\t\t\tnameStr := nameBuf.String()\n\t\t\t\t\tvalue := os.Getenv(nameStr)\n\t\t\t\t\tif value != \"\" {\n\t\t\t\t\t\tbuffer.WriteString(value)\n\t\t\t\t\t} else if m := rxUnicode.FindStringSubmatch(nameStr); m != nil {\n\t\t\t\t\t\tucode, _ := strconv.ParseInt(m[1], 16, 32)\n\t\t\t\t\t\tbuffer.WriteRune(rune(ucode))\n\t\t\t\t\t} else if f, ok := PercentFunc[nameStr]; ok {\n\t\t\t\t\t\tbuffer.WriteString(f())\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbuffer.WriteRune('%')\n\t\t\t\t\t\tbuffer.WriteString(nameStr)\n\t\t\t\t\t\tbuffer.WriteRune('%')\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif !unicode.IsLower(ch) && !unicode.IsUpper(ch) && !unicode.IsNumber(ch) && ch != '_' {\n\t\t\t\t\tsource.UnreadRune()\n\t\t\t\t\tbuffer.WriteRune('%')\n\t\t\t\t\tbuffer.WriteString(nameBuf.String())\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tnameBuf.WriteRune(ch)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif ch == '\"' {\n\t\t\tquote = !quote\n\t\t\tif lastchar == '\"' && quote {\n\t\t\t\tbuffer.WriteRune('\"')\n\t\t\t\tlastchar = '\\000'\n\t\t\t}\n\t\t} else {\n\t\t\tbuffer.WriteRune(ch)\n\t\t}\n\t\tlastchar = ch\n\t}\n\treturn buffer.String()\n}\n\nfunc terminate(statements *[]StatementT,\n\tisRedirected *bool,\n\tredirect *[]*Redirecter,\n\tbuffer *bytes.Buffer,\n\targv *[]string,\n\tterm string) {\n\n\tvar statement1 StatementT\n\tif buffer.Len() > 0 {\n\t\tif *isRedirected && len(*redirect) > 0 {\n\t\t\t(*redirect)[len(*redirect)-1].SetPath(dequote(buffer))\n\t\t\t*isRedirected = false\n\t\t\tstatement1.Argv = *argv\n\t\t} else {\n\t\t\tstatement1.Argv = append(*argv, dequote(buffer))\n\t\t}\n\t\tbuffer.Reset()\n\t} else if len(*argv) <= 0 {\n\t\treturn\n\t} else {\n\t\tstatement1.Argv = *argv\n\t}\n\tstatement1.Redirect = *redirect\n\t*redirect = make([]*Redirecter, 0, 3)\n\t*argv = make([]string, 0)\n\tstatement1.Term = term\n\t*statements = append(*statements, statement1)\n}\n\nfunc parse1(text string) ([]StatementT, error) {\n\tisQuoted := false\n\tstatements := make([]StatementT, 0)\n\targv := make([]string, 0)\n\tlastchar := ' '\n\tvar buffer bytes.Buffer\n\tisNextRedirect := false\n\tredirect := make([]*Redirecter, 0, 3)\n\n\treader := strings.NewReader(text)\n\tfor reader.Len() > 0 {\n\t\tch, chSize, chErr := reader.ReadRune()\n\t\tif chSize <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tif chErr != nil {\n\t\t\treturn nil, chErr\n\t\t}\n\t\tif ch == '\"' {\n\t\t\tisQuoted = !isQuoted\n\t\t}\n\t\tif isQuoted {\n\t\t\tbuffer.WriteRune(ch)\n\t\t} else if ch == ' ' {\n\t\t\tif buffer.Len() > 0 {\n\t\t\t\tif isNextRedirect && len(redirect) > 0 {\n\t\t\t\t\tredirect[len(redirect)-1].SetPath(dequote(&buffer))\n\t\t\t\t} else {\n\t\t\t\t\targv = append(argv, dequote(&buffer))\n\t\t\t\t}\n\t\t\t\tbuffer.Reset()\n\t\t\t\tisNextRedirect = false\n\t\t\t}\n\t\t} else if lastchar == ' ' && ch == ';' {\n\t\t\tterminate(&statements, &isNextRedirect, &redirect, &buffer, &argv, \";\")\n\t\t} else if ch == '|' {\n\t\t\tif lastchar == '|' {\n\t\t\t\tstatements[len(statements)-1].Term = \"||\"\n\t\t\t} else {\n\t\t\t\tterminate(&statements, &isNextRedirect, &redirect, &buffer, &argv, \"|\")\n\t\t\t}\n\t\t} else if ch == '&' {\n\t\t\tswitch lastchar {\n\t\t\tcase '&':\n\t\t\t\tstatements[len(statements)-1].Term = \"&&\"\n\t\t\tcase '|':\n\t\t\t\tstatements[len(statements)-1].Term = \"|&\"\n\t\t\tcase '>':\n\t\t\t\t\/\/ >&[n]\n\t\t\t\tch2, ch2siz, ch2err := reader.ReadRune()\n\t\t\t\tif ch2err != nil {\n\t\t\t\t\treturn nil, ch2err\n\t\t\t\t}\n\t\t\t\tif ch2siz <= 0 {\n\t\t\t\t\treturn nil, errors.New(\"Too Near EOF for >&\")\n\t\t\t\t}\n\t\t\t\tred := redirect[len(redirect)-1]\n\t\t\t\tswitch ch2 {\n\t\t\t\tcase '1':\n\t\t\t\t\tred.DupFrom(1)\n\t\t\t\tcase '2':\n\t\t\t\t\tred.DupFrom(2)\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, errors.New(\"Syntax error after >&\")\n\t\t\t\t}\n\t\t\t\tisNextRedirect = false\n\t\t\tdefault:\n\t\t\t\tterminate(&statements, &isNextRedirect, &redirect, &buffer, &argv, \"&\")\n\t\t\t}\n\t\t} else if ch == '>' {\n\t\t\tswitch lastchar {\n\t\t\tcase '1':\n\t\t\t\t\/\/ 1>\n\t\t\t\tchomp(&buffer)\n\t\t\t\tredirect = append(redirect, NewRedirecter(1))\n\t\t\tcase '2':\n\t\t\t\t\/\/ 2>\n\t\t\t\tchomp(&buffer)\n\t\t\t\tredirect = append(redirect, NewRedirecter(2))\n\t\t\tcase '>':\n\t\t\t\t\/\/ >>\n\t\t\t\tif len(redirect) >= 0 {\n\t\t\t\t\tredirect[len(redirect)-1].SetAppend()\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\t\/\/ >\n\t\t\t\tredirect = append(redirect, NewRedirecter(1))\n\t\t\t}\n\t\t\tisNextRedirect = true\n\t\t} else if ch == '<' {\n\t\t\tredirect = append(redirect, NewRedirecter(0))\n\t\t\tisNextRedirect = true\n\t\t} else {\n\t\t\tbuffer.WriteRune(ch)\n\t\t}\n\t\tlastchar = ch\n\t}\n\tterminate(&statements, &isNextRedirect, &redirect, &buffer, &argv, \" \")\n\treturn statements, nil\n}\n\n\/\/ Make arrays whose elements are pipelines\nfunc parse2(statements []StatementT) [][]StatementT {\n\tresult := make([][]StatementT, 1)\n\tfor _, statement1 := range statements {\n\t\tresult[len(result)-1] = append(result[len(result)-1], statement1)\n\t\tswitch statement1.Term {\n\t\tcase \"|\", \"|&\":\n\n\t\tdefault:\n\t\t\tresult = append(result, make([]StatementT, 0))\n\t\t}\n\t}\n\tif len(result[len(result)-1]) <= 0 {\n\t\tresult = result[0 : len(result)-1]\n\t}\n\treturn result\n}\n\nfunc Parse(text string) ([][]StatementT, error) {\n\tresult1, err := parse1(text)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult2 := parse2(result1)\n\treturn result2, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\tmph \"github.com\/mackerelio\/go-mackerel-plugin-helper\"\n\t\"net\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar graphdef map[string](mph.Graphs) = map[string](mph.Graphs){\n\t\"memcached.connections\": mph.Graphs{\n\t\tLabel: \"Memcached Connections\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mph.Metrics){\n\t\t\tmph.Metrics{Key: \"curr_connections\", Label: \"Connections\", Diff: false},\n\t\t},\n\t},\n\t\"memcached.cmd\": mph.Graphs{\n\t\tLabel: \"Memcached Command\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mph.Metrics){\n\t\t\tmph.Metrics{Key: \"cmd_get\", Label: \"Get\", Diff: true},\n\t\t\tmph.Metrics{Key: \"cmd_set\", Label: \"Set\", Diff: true},\n\t\t\tmph.Metrics{Key: \"cmd_flush\", Label: \"Flush\", Diff: true},\n\t\t\tmph.Metrics{Key: \"cmd_touch\", Label: \"Touch\", Diff: true},\n\t\t},\n\t},\n\t\"memcached.hitmiss\": mph.Graphs{\n\t\tLabel: \"Memcached Hits\/Misses\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mph.Metrics){\n\t\t\tmph.Metrics{Key: \"get_hits\", Label: \"Get Hits\", Diff: true},\n\t\t\tmph.Metrics{Key: \"get_misses\", Label: \"Get Misses\", Diff: true},\n\t\t\tmph.Metrics{Key: \"delete_hits\", Label: \"Delete Hits\", Diff: true},\n\t\t\tmph.Metrics{Key: \"delete_misses\", Label: \"Delete Misses\", Diff: true},\n\t\t\tmph.Metrics{Key: \"incr_hits\", Label: \"Incr Hits\", Diff: true},\n\t\t\tmph.Metrics{Key: \"incr_misses\", Label: \"Incr Misses\", Diff: true},\n\t\t\tmph.Metrics{Key: \"cas_hits\", Label: \"Cas Hits\", Diff: true},\n\t\t\tmph.Metrics{Key: \"cas_misses\", Label: \"Cas Misses\", Diff: true},\n\t\t\tmph.Metrics{Key: \"touch_hits\", Label: \"Touch Hits\", Diff: true},\n\t\t\tmph.Metrics{Key: \"touch_misses\", Label: \"Touch Misses\", Diff: true},\n\t\t},\n\t},\n\t\"memcached.evictions\": mph.Graphs{\n\t\tLabel: \"Memcached Evictions\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mph.Metrics){\n\t\t\tmph.Metrics{Key: \"evictions\", Label: \"Evictions\", Diff: true},\n\t\t},\n\t},\n\t\"memcached.unfetched\": mph.Graphs{\n\t\tLabel: \"Memcached Unfetched\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mph.Metrics){\n\t\t\tmph.Metrics{Key: \"expired_unfetched\", Label: \"Expired unfetched\", Diff: true},\n\t\t\tmph.Metrics{Key: \"evicted_unfetched\", Label: \"Evicted unfetched\", Diff: true},\n\t\t},\n\t},\n\t\"memcached.rusage\": mph.Graphs{\n\t\tLabel: \"Memcached Resouce Usage\",\n\t\tUnit: \"float\",\n\t\tMetrics: [](mph.Metrics){\n\t\t\tmph.Metrics{Key: \"rusage_user\", Label: \"User\", Diff: true},\n\t\t\tmph.Metrics{Key: \"rusage_system\", Label: \"System\", Diff: true},\n\t\t},\n\t},\n\t\"memcached.bytes\": mph.Graphs{\n\t\tLabel: \"Memcached Traffics\",\n\t\tUnit: \"bytes\",\n\t\tMetrics: [](mph.Metrics){\n\t\t\tmph.Metrics{Key: \"bytes_read\", Label: \"Read\", Diff: true},\n\t\t\tmph.Metrics{Key: \"bytes_written\", Label: \"Write\", Diff: true},\n\t\t},\n\t},\n}\n\ntype MemcachedPlugin struct {\n\tTarget string\n\tTempfile string\n}\n\nfunc (m MemcachedPlugin) FetchData() (map[string]float64, error) {\n\tconn, err := net.Dial(\"tcp\", m.Target)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfmt.Fprintln(conn, \"stats\")\n\tr := bufio.NewReader(conn)\n\tline, isPrefix, err := r.ReadLine()\n\n\tstat := make(map[string]float64)\n\tfor err == nil && !isPrefix {\n\t\ts := string(line)\n\t\tif s == \"END\" {\n\t\t\treturn stat, nil\n\t\t}\n\t\tres := strings.Split(s, \" \")\n\t\tif res[0] == \"STAT\" {\n\t\t\tstat[res[1]], err = strconv.ParseFloat(res[2], 64)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintln(os.Stderr, \"readStat:\", err)\n\t\t\t}\n\t\t}\n\t\tline, isPrefix, err = r.ReadLine()\n\t}\n\tif isPrefix {\n\t\treturn nil, errors.New(\"buffer size too small\")\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn nil, nil\n}\n\nfunc (m MemcachedPlugin) GetGraphDefinition() map[string](mph.Graphs) {\n\treturn graphdef\n}\n\nfunc (m MemcachedPlugin) GetTempfilename() string {\n\treturn m.Tempfile\n}\n\nfunc main() {\n\toptHost := flag.String(\"host\", \"localhost\", \"Hostname\")\n\toptPort := flag.String(\"port\", \"11211\", \"Port\")\n\toptTempfile := flag.String(\"tempfile\", \"\", \"Temp file name\")\n\tflag.Parse()\n\n\tvar memcached MemcachedPlugin\n\n\tmemcached.Target = fmt.Sprintf(\"%s:%s\", *optHost, *optPort)\n\tif *optTempfile != \"\" {\n\t\tmemcached.Tempfile = *optTempfile\n\t} else {\n\t\tmemcached.Tempfile = fmt.Sprintf(\"\/tmp\/mackerel-plugin-memcached-%s-%s\", *optHost, *optPort)\n\t}\n\n\thelper := mph.MackerelPluginHelper{memcached}\n\n\tif os.Getenv(\"MACKEREL_AGENT_PLUGIN_META\") != \"\" {\n\t\thelper.OutputDefinitions()\n\t} else {\n\t\thelper.OutputValues()\n\t}\n}\n<commit_msg>use Scanner<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\tmph \"github.com\/mackerelio\/go-mackerel-plugin-helper\"\n\t\"net\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar graphdef map[string](mph.Graphs) = map[string](mph.Graphs){\n\t\"memcached.connections\": mph.Graphs{\n\t\tLabel: \"Memcached Connections\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mph.Metrics){\n\t\t\tmph.Metrics{Key: \"curr_connections\", Label: \"Connections\", Diff: false},\n\t\t},\n\t},\n\t\"memcached.cmd\": mph.Graphs{\n\t\tLabel: \"Memcached Command\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mph.Metrics){\n\t\t\tmph.Metrics{Key: \"cmd_get\", Label: \"Get\", Diff: true},\n\t\t\tmph.Metrics{Key: \"cmd_set\", Label: \"Set\", Diff: true},\n\t\t\tmph.Metrics{Key: \"cmd_flush\", Label: \"Flush\", Diff: true},\n\t\t\tmph.Metrics{Key: \"cmd_touch\", Label: \"Touch\", Diff: true},\n\t\t},\n\t},\n\t\"memcached.hitmiss\": mph.Graphs{\n\t\tLabel: \"Memcached Hits\/Misses\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mph.Metrics){\n\t\t\tmph.Metrics{Key: \"get_hits\", Label: \"Get Hits\", Diff: true},\n\t\t\tmph.Metrics{Key: \"get_misses\", Label: \"Get Misses\", Diff: true},\n\t\t\tmph.Metrics{Key: \"delete_hits\", Label: \"Delete Hits\", Diff: true},\n\t\t\tmph.Metrics{Key: \"delete_misses\", Label: \"Delete Misses\", Diff: true},\n\t\t\tmph.Metrics{Key: \"incr_hits\", Label: \"Incr Hits\", Diff: true},\n\t\t\tmph.Metrics{Key: \"incr_misses\", Label: \"Incr Misses\", Diff: true},\n\t\t\tmph.Metrics{Key: \"cas_hits\", Label: \"Cas Hits\", Diff: true},\n\t\t\tmph.Metrics{Key: \"cas_misses\", Label: \"Cas Misses\", Diff: true},\n\t\t\tmph.Metrics{Key: \"touch_hits\", Label: \"Touch Hits\", Diff: true},\n\t\t\tmph.Metrics{Key: \"touch_misses\", Label: \"Touch Misses\", Diff: true},\n\t\t},\n\t},\n\t\"memcached.evictions\": mph.Graphs{\n\t\tLabel: \"Memcached Evictions\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mph.Metrics){\n\t\t\tmph.Metrics{Key: \"evictions\", Label: \"Evictions\", Diff: true},\n\t\t},\n\t},\n\t\"memcached.unfetched\": mph.Graphs{\n\t\tLabel: \"Memcached Unfetched\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mph.Metrics){\n\t\t\tmph.Metrics{Key: \"expired_unfetched\", Label: \"Expired unfetched\", Diff: true},\n\t\t\tmph.Metrics{Key: \"evicted_unfetched\", Label: \"Evicted unfetched\", Diff: true},\n\t\t},\n\t},\n\t\"memcached.rusage\": mph.Graphs{\n\t\tLabel: \"Memcached Resouce Usage\",\n\t\tUnit: \"float\",\n\t\tMetrics: [](mph.Metrics){\n\t\t\tmph.Metrics{Key: \"rusage_user\", Label: \"User\", Diff: true},\n\t\t\tmph.Metrics{Key: \"rusage_system\", Label: \"System\", Diff: true},\n\t\t},\n\t},\n\t\"memcached.bytes\": mph.Graphs{\n\t\tLabel: \"Memcached Traffics\",\n\t\tUnit: \"bytes\",\n\t\tMetrics: [](mph.Metrics){\n\t\t\tmph.Metrics{Key: \"bytes_read\", Label: \"Read\", Diff: true},\n\t\t\tmph.Metrics{Key: \"bytes_written\", Label: \"Write\", Diff: true},\n\t\t},\n\t},\n}\n\ntype MemcachedPlugin struct {\n\tTarget string\n\tTempfile string\n}\n\nfunc (m MemcachedPlugin) FetchData() (map[string]float64, error) {\n\tconn, err := net.Dial(\"tcp\", m.Target)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfmt.Fprintln(conn, \"stats\")\n\tscanner := bufio.NewScanner(conn)\n\tstat := make(map[string]float64)\n\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\ts := string(line)\n\t\tif s == \"END\" {\n\t\t\treturn stat, nil\n\t\t}\n\n\t\tres := strings.Split(s, \" \")\n\t\tif res[0] == \"STAT\" {\n\t\t\tstat[res[1]], err = strconv.ParseFloat(res[2], 64)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintln(os.Stderr, \"readStat:\", err)\n\t\t\t}\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn stat, err\n\t}\n\treturn nil, nil\n}\n\nfunc (m MemcachedPlugin) GetGraphDefinition() map[string](mph.Graphs) {\n\treturn graphdef\n}\n\nfunc (m MemcachedPlugin) GetTempfilename() string {\n\treturn m.Tempfile\n}\n\nfunc main() {\n\toptHost := flag.String(\"host\", \"localhost\", \"Hostname\")\n\toptPort := flag.String(\"port\", \"11211\", \"Port\")\n\toptTempfile := flag.String(\"tempfile\", \"\", \"Temp file name\")\n\tflag.Parse()\n\n\tvar memcached MemcachedPlugin\n\n\tmemcached.Target = fmt.Sprintf(\"%s:%s\", *optHost, *optPort)\n\tif *optTempfile != \"\" {\n\t\tmemcached.Tempfile = *optTempfile\n\t} else {\n\t\tmemcached.Tempfile = fmt.Sprintf(\"\/tmp\/mackerel-plugin-memcached-%s-%s\", *optHost, *optPort)\n\t}\n\n\thelper := mph.MackerelPluginHelper{memcached}\n\n\tif os.Getenv(\"MACKEREL_AGENT_PLUGIN_META\") != \"\" {\n\t\thelper.OutputDefinitions()\n\t} else {\n\t\thelper.OutputValues()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ go-rst - A reStructuredText parser for Go\n\/\/ 2014 (c) The go-rst Authors\n\/\/ MIT Licensed. See LICENSE for details.\npackage parse\n\nimport (\n\t\"fmt\"\n\t\"github.com\/demizer\/go-elog\"\n\t\"unicode\/utf8\"\n)\n\ntype itemElement int\n\nconst (\n\titemEOF itemElement = iota\n\titemError\n\titemTitle\n\titemSectionAdornment\n\titemParagraph\n\titemBlockquote\n\titemLiteralBlock\n\titemSystemMessage\n\titemSpace\n\titemBlankLine\n)\n\nvar elements = [...]string{\n\t\"itemEOF\",\n\t\"itemError\",\n\t\"itemTitle\",\n\t\"itemSectionAdornment\",\n\t\"itemParagraph\",\n\t\"itemBlockquote\",\n\t\"itemLiteralBlock\",\n\t\"itemSystemMessage\",\n\t\"itemSpace\",\n\t\"itemBlankLine\",\n}\n\nfunc (t itemElement) String() string { return elements[t] }\n\nvar sectionAdornments = []rune{'!', '\"', '#', '$', '\\'', '%', '&', '(', ')', '*',\n\t'+', ',', '-', '.', '\/', ':', ';', '<', '=', '>', '?', '@', '[', '\\\\',\n\t']', '^', '_', '`', '{', '|', '}', '~'}\n\nconst EOF rune = -1\n\ntype stateFn func(*lexer) stateFn\n\ntype item struct {\n\tElementName string\n\tElementType itemElement\n\tPosition Pos\n\tLine int\n\tValue interface{}\n}\n\ntype systemMessageLevel int\n\nconst (\n\tlevelInfo systemMessageLevel = iota\n\tlevelWarning\n\tlevelError\n\tlevelSevere\n)\n\nvar systemMessageLevels = [...]string{\n\t\"INFO\",\n\t\"WARNING\",\n\t\"ERROR\",\n\t\"SEVERE\",\n}\n\nfunc (s systemMessageLevel) String() string { return systemMessageLevels[s] }\n\ntype systemMessage struct {\n\tlevel systemMessageLevel\n\tline int\n\tsource string\n\titems []item\n}\n\ntype lexer struct {\n\tname string\n\tinput string\n\tstate stateFn\n\tpos Pos\n\tstart Pos\n\twidth Pos\n\tlastPos Pos\n\titems chan item\n\tline int\n}\n\nfunc lex(name, input string) *lexer {\n\tl := &lexer{\n\t\tname: name,\n\t\tinput: input,\n\t\tline: 1,\n\t\titems: make(chan item),\n\t}\n\tgo l.run()\n\treturn l\n}\n\n\/\/ emit passes an item back to the client.\nfunc (l *lexer) emit(t itemElement) {\n\tl.items <- item{ElementType: t, ElementName: fmt.Sprint(t),\n\t\tPosition: l.start, Line: l.line, Value: l.input[l.start:l.pos]}\n\tlog.Debugf(\"%s: %q\\n\", t, l.input[l.start:l.pos])\n\tl.start = l.pos\n}\n\nfunc (l *lexer) backup() {\n\tl.pos -= l.width\n}\n\nfunc (l *lexer) current() rune {\n\tr, _ := utf8.DecodeRuneInString(l.input[l.pos:])\n\treturn r\n}\n\nfunc (l *lexer) previous() rune {\n\tl.backup()\n\tr := l.current()\n\tl.next()\n\treturn r\n}\n\nfunc (l *lexer) peek() rune {\n\tr := l.next()\n\tl.backup()\n\treturn r\n}\n\nfunc (l *lexer) skip() {\n\tl.start += 1\n l.next()\n}\n\nfunc (l *lexer) advance(to rune) {\n\tfor {\n\t\tif l.next() == EOF || to == l.current() {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (l *lexer) next() rune {\n\tif int(l.pos) >= len(l.input) {\n\t\tlog.Debugln(\"Reached EOF!\")\n\t\tl.width = 0\n\t\treturn EOF\n\t}\n\tr, w := utf8.DecodeRuneInString(l.input[l.pos:])\n\tl.width = Pos(w)\n\tl.pos += l.width\n\treturn r\n}\n\n\/\/ nextItem returns the next item from the input.\nfunc (l *lexer) nextItem() item {\n\titem := <-l.items\n\tl.lastPos = item.Position\n\treturn item\n\n}\n\nfunc (l *lexer) run() {\n\tfor l.state = lexStart; l.state != nil; {\n\t\tl.state = l.state(l)\n\t}\n}\n\n\/\/ isWhiteSpace reports whether r is a space character.\nfunc isWhiteSpace(r rune) bool {\n\treturn r == ' ' || r == '\\t' || r == '\\n' || r == '\\r'\n}\n\n\/\/ isEndOfLine reports whether r is an end-of-line character.\nfunc isEndOfLine(r rune) bool {\n\treturn r == '\\r' || r == '\\n'\n}\n\nfunc lexStart(l *lexer) stateFn {\n\tlog.Debugln(\"\\nTransition...\")\n\tfor {\n\t\tif len(l.input) == 0 {\n\t\t\tl.emit(itemEOF)\n\t\t\treturn nil\n\t\t}\n\n\t\tif l.pos > l.start {\n\t\t\tlog.Debugf(\"%q, Current: %q, Start: %d, Pos: %d, Line: %d\\n\",\n\t\t\t\tl.input[l.start:l.pos], l.current(), l.start, l.pos, l.line)\n\t\t}\n\n\t\tisStartOfToken := l.start == l.pos-l.width\n\n\t\tswitch r := l.current(); {\n\t\tcase isStartOfToken:\n\t\t\tif isWhiteSpace(r) {\n\t\t\t\tlexWhiteSpace(l)\n\t\t\t}\n\t\t\tif isSection(l) {\n\t\t\t\treturn lexSection\n\t\t\t}\n\t\t\tl.next()\n\t\tcase isEndOfLine(r):\n\t\t\tif l.pos > l.start {\n\t\t\t\tl.emit(itemParagraph)\n\t\t\t}\n\t\t\tl.line += 1\n\t\t\tl.skip() \/\/ Skip the newline\n\t\t}\n\t\tif l.next() == EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ Correctly reached EOF.\n\tif l.pos > l.start {\n\t\tl.emit(itemParagraph)\n\t}\n\n\tl.emit(itemEOF)\n\treturn nil\n}\n\nfunc lexSection(l *lexer) stateFn {\n\tif len(l.input) > 0 {\n\t\tlog.Debugf(\"\\tlexSection: %q, Pos: %d\\n\",\n\t\t\tl.input[l.start:l.pos], l.pos)\n\t}\n\n\tif isEndOfLine(l.peek()) {\n\t\tl.emit(itemTitle)\n\t\tl.ignore()\n\t}\n\nLoop:\n\tfor {\n\t\tswitch r := l.next(); {\n\t\tcase isSectionAdornment(r):\n\t\t\tif len(l.input) > 0 {\n\t\t\t\tlog.Debugf(\"\\tlexSection: %q, Pos: %d\\n\",\n\t\t\t\t\tl.input[l.start:l.pos], l.pos)\n\t\t\t}\n\t\tcase isEndOfLine(r):\n\t\t\tl.backup()\n\t\t\tl.emit(itemSectionAdornment)\n\t\t\tl.line += 1\n\t\t\tl.ignore()\n\t\t\tbreak Loop\n\t\t}\n\t}\n\treturn lexStart\n}\n\nfunc isSectionAdornment(r rune) bool {\n\tfor _, a := range sectionAdornments {\n\t\tif a == r {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<commit_msg>lex.go: Refactor lexSection()<commit_after>\/\/ go-rst - A reStructuredText parser for Go\n\/\/ 2014 (c) The go-rst Authors\n\/\/ MIT Licensed. See LICENSE for details.\npackage parse\n\nimport (\n\t\"fmt\"\n\t\"github.com\/demizer\/go-elog\"\n\t\"unicode\/utf8\"\n)\n\ntype itemElement int\n\nconst (\n\titemEOF itemElement = iota\n\titemError\n\titemTitle\n\titemSectionAdornment\n\titemParagraph\n\titemBlockquote\n\titemLiteralBlock\n\titemSystemMessage\n\titemSpace\n\titemBlankLine\n)\n\nvar elements = [...]string{\n\t\"itemEOF\",\n\t\"itemError\",\n\t\"itemTitle\",\n\t\"itemSectionAdornment\",\n\t\"itemParagraph\",\n\t\"itemBlockquote\",\n\t\"itemLiteralBlock\",\n\t\"itemSystemMessage\",\n\t\"itemSpace\",\n\t\"itemBlankLine\",\n}\n\nfunc (t itemElement) String() string { return elements[t] }\n\nvar sectionAdornments = []rune{'!', '\"', '#', '$', '\\'', '%', '&', '(', ')', '*',\n\t'+', ',', '-', '.', '\/', ':', ';', '<', '=', '>', '?', '@', '[', '\\\\',\n\t']', '^', '_', '`', '{', '|', '}', '~'}\n\nconst EOF rune = -1\n\ntype stateFn func(*lexer) stateFn\n\ntype item struct {\n\tElementName string\n\tElementType itemElement\n\tPosition Pos\n\tLine int\n\tValue interface{}\n}\n\ntype systemMessageLevel int\n\nconst (\n\tlevelInfo systemMessageLevel = iota\n\tlevelWarning\n\tlevelError\n\tlevelSevere\n)\n\nvar systemMessageLevels = [...]string{\n\t\"INFO\",\n\t\"WARNING\",\n\t\"ERROR\",\n\t\"SEVERE\",\n}\n\nfunc (s systemMessageLevel) String() string { return systemMessageLevels[s] }\n\ntype systemMessage struct {\n\tlevel systemMessageLevel\n\tline int\n\tsource string\n\titems []item\n}\n\ntype lexer struct {\n\tname string\n\tinput string\n\tstate stateFn\n\tpos Pos\n\tstart Pos\n\twidth Pos\n\tlastPos Pos\n\titems chan item\n\tline int\n}\n\nfunc lex(name, input string) *lexer {\n\tl := &lexer{\n\t\tname: name,\n\t\tinput: input,\n\t\tline: 1,\n\t\titems: make(chan item),\n\t}\n\tgo l.run()\n\treturn l\n}\n\n\/\/ emit passes an item back to the client.\nfunc (l *lexer) emit(t itemElement) {\n\tl.items <- item{ElementType: t, ElementName: fmt.Sprint(t),\n\t\tPosition: l.start, Line: l.line, Value: l.input[l.start:l.pos]}\n\tlog.Debugf(\"%s: %q\\n\", t, l.input[l.start:l.pos])\n\tl.start = l.pos\n}\n\nfunc (l *lexer) backup() {\n\tl.pos -= l.width\n}\n\nfunc (l *lexer) current() rune {\n\tr, _ := utf8.DecodeRuneInString(l.input[l.pos:])\n\treturn r\n}\n\nfunc (l *lexer) previous() rune {\n\tl.backup()\n\tr := l.current()\n\tl.next()\n\treturn r\n}\n\nfunc (l *lexer) peek() rune {\n\tr := l.next()\n\tl.backup()\n\treturn r\n}\n\nfunc (l *lexer) skip() {\n\tl.start += 1\n l.next()\n}\n\nfunc (l *lexer) advance(to rune) {\n\tfor {\n\t\tif l.next() == EOF || to == l.current() {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (l *lexer) next() rune {\n\tif int(l.pos) >= len(l.input) {\n\t\tlog.Debugln(\"Reached EOF!\")\n\t\tl.width = 0\n\t\treturn EOF\n\t}\n\tr, w := utf8.DecodeRuneInString(l.input[l.pos:])\n\tl.width = Pos(w)\n\tl.pos += l.width\n\treturn r\n}\n\n\/\/ nextItem returns the next item from the input.\nfunc (l *lexer) nextItem() item {\n\titem := <-l.items\n\tl.lastPos = item.Position\n\treturn item\n\n}\n\nfunc (l *lexer) run() {\n\tfor l.state = lexStart; l.state != nil; {\n\t\tl.state = l.state(l)\n\t}\n}\n\n\/\/ isWhiteSpace reports whether r is a space character.\nfunc isWhiteSpace(r rune) bool {\n\treturn r == ' ' || r == '\\t' || r == '\\n' || r == '\\r'\n}\n\n\/\/ isEndOfLine reports whether r is an end-of-line character.\nfunc isEndOfLine(r rune) bool {\n\treturn r == '\\r' || r == '\\n'\n}\n\nfunc lexStart(l *lexer) stateFn {\n\tlog.Debugln(\"\\nTransition...\")\n\tfor {\n\t\tif len(l.input) == 0 {\n\t\t\tl.emit(itemEOF)\n\t\t\treturn nil\n\t\t}\n\n\t\tif l.pos > l.start {\n\t\t\tlog.Debugf(\"%q, Current: %q, Start: %d, Pos: %d, Line: %d\\n\",\n\t\t\t\tl.input[l.start:l.pos], l.current(), l.start, l.pos, l.line)\n\t\t}\n\n\t\tisStartOfToken := l.start == l.pos-l.width\n\n\t\tswitch r := l.current(); {\n\t\tcase isStartOfToken:\n\t\t\tif isWhiteSpace(r) {\n\t\t\t\tlexWhiteSpace(l)\n\t\t\t}\n\t\t\tif isSection(l) {\n\t\t\t\treturn lexSection\n\t\t\t}\n\t\t\tl.next()\n\t\tcase isEndOfLine(r):\n\t\t\tif l.pos > l.start {\n\t\t\t\tl.emit(itemParagraph)\n\t\t\t}\n\t\t\tl.line += 1\n\t\t\tl.skip() \/\/ Skip the newline\n\t\t}\n\t\tif l.next() == EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ Correctly reached EOF.\n\tif l.pos > l.start {\n\t\tl.emit(itemParagraph)\n\t}\n\n\tl.emit(itemEOF)\n\treturn nil\n}\n\nfunc lexSection(l *lexer) stateFn {\n\tvar lexTitle bool\n\tlog.Debugln(\"\\nTransition...\")\n\n\tif !isSectionAdornment(l.current()) {\n\t\tlexTitle = true\n\t}\n\n\tfor {\n\t\tif len(l.input) > 0 {\n\t\t\tlog.Debugf(\"%q, Start: %d, Pos: %d\\n\", l.input[l.start:l.pos], l.start, l.pos)\n\t\t}\n\t\tswitch r := l.next(); {\n\t\tcase isEndOfLine(r):\n\t\t\tl.backup()\n\t\t\tif lexTitle {\n\t\t\t\tl.emit(itemTitle)\n\t\t\t\tlexTitle = false\n\t\t\t\tl.line += 1\n\t\t\t\tl.skip()\n\t\t\t\tl.next()\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tl.emit(itemSectionAdornment)\n\t\t\t\treturn lexStart\n\t\t\t}\n\t\tcase isWhiteSpace(r):\n\t\t\tlexWhiteSpace(l)\n\t\t}\n\t}\n\treturn lexStart\n}\n\nfunc isSectionAdornment(r rune) bool {\n\tfor _, a := range sectionAdornments {\n\t\tif a == r {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package processors\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/rehabstudio\/oneill\/oneill\"\n)\n\nconst (\n\tnginxTemplate string = `\n upstream {{.Subdomain}} {\n server localhost:{{.Port}};\n }\n\n server {\n listen *:80;\n server_name {{.Subdomain}}.{{.ServingDomain}};\n return 301 https:\/\/$server_name$request_uri;\n }\n\n server {\n listen 443;\n server_name {{.Subdomain}}.{{.ServingDomain}};\n\n ssl on;\n ssl_certificate \/etc\/ssl\/certs\/labs-server.crt;\n ssl_certificate_key \/etc\/ssl\/private\/labs-server.pem;\n ssl_protocols TLSv1 TLSv1.1 TLSv1.2;\n ssl_session_timeout 5m;\n ssl_session_cache shared:SSL:5m;\n\n client_max_body_size 0; # disable any limits to avoid HTTP 413 for large image uploads\n\n # required to avoid HTTP 411: see Issue #1486 (https:\/\/github.com\/docker\/docker\/issues\/1486)\n chunked_transfer_encoding on;\n\n location \/ {\n proxy_pass http:\/\/{{.Subdomain}};\n proxy_set_header Host $http_host; # required for docker client's sake\n proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP\n proxy_read_timeout 900;\n }\n\n }\n`\n)\n\nfunc ensureFreshOutputDir() {\n\toneill.LogDebug(\"Recreating empty configuration directory\")\n\toutDir := oneill.Config.NginxConfigDirectory\n\terr := os.RemoveAll(outDir)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = os.Mkdir(outDir, 0755)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc reloadNginxConfig() error {\n\trunCmd := exec.Command(\"service\", \"nginx\", \"reload\")\n\treturn runCmd.Run()\n}\n\ntype templateContext struct {\n\tSubdomain string\n\tServingDomain string\n\tPort int64\n}\n\nfunc writeTemplateToDisk(siteConfig *oneill.SiteConfig, container docker.APIContainers) {\n\ttmpl, err := template.New(\"nginx-config\").Parse(nginxTemplate)\n\tif err != nil {\n\t\toneill.LogWarning(fmt.Sprintf(\"Unable to load nginx config template: %s\", siteConfig.Subdomain))\n\t\treturn\n\t}\n\n\tvar b bytes.Buffer\n\tcontext := templateContext{\n\t\tSubdomain: siteConfig.Subdomain,\n\t\tServingDomain: oneill.Config.ServingDomain,\n\t\tPort: container.Ports[0].PublicPort,\n\t}\n\terr = tmpl.Execute(&b, context)\n\tif err != nil {\n\t\toneill.LogWarning(fmt.Sprintf(\"Unable to execute nginx config template: %s\", siteConfig.Subdomain))\n\t\treturn\n\t}\n\toutDir := oneill.Config.NginxConfigDirectory\n\toutFile := path.Join(outDir, fmt.Sprintf(\"%s.conf\", siteConfig.Subdomain))\n\terr = ioutil.WriteFile(outFile, b.Bytes(), 0644)\n\tif err != nil {\n\t\toneill.LogWarning(fmt.Sprintf(\"Unable to write nginx config template: %s\", siteConfig.Subdomain))\n\t\treturn\n\t}\n}\n\nfunc ConfigureNginx(siteConfigs []*oneill.SiteConfig) []*oneill.SiteConfig {\n\toneill.LogInfo(\"## Configuring Nginx\")\n\n\tensureFreshOutputDir()\n\n\tfor _, sc := range siteConfigs {\n\t\tfor _, container := range oneill.ListContainers() {\n\t\t\tcontainerName := strings.TrimPrefix(container.Names[0], \"\/\")\n\t\t\tif containerName == sc.Subdomain {\n\t\t\t\twriteTemplateToDisk(sc, container)\n\t\t\t\toneill.LogDebug(fmt.Sprintf(\"Configured nginx proxy for container: %s\", sc.Subdomain))\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif err := reloadNginxConfig(); err != nil {\n\t\toneill.LogWarning(\"Unable to reload nginx configuration\")\n\t\treturn siteConfigs\n\t}\n\toneill.LogDebug(\"Reloaded nginx configuration\")\n\treturn siteConfigs\n}\n<commit_msg>update nginx config so that websocket connections will work<commit_after>package processors\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/rehabstudio\/oneill\/oneill\"\n)\n\nconst (\n\tnginxTemplate string = `\n upstream {{.Subdomain}} {\n server localhost:{{.Port}};\n }\n\n map $http_upgrade $connection_upgrade {\n default upgrade;\n '' close;\n }\n\n server {\n listen *:80;\n server_name {{.Subdomain}}.{{.ServingDomain}};\n return 301 https:\/\/$server_name$request_uri;\n }\n\n server {\n listen 443;\n server_name {{.Subdomain}}.{{.ServingDomain}};\n\n ssl on;\n ssl_certificate \/etc\/ssl\/certs\/labs-server.crt;\n ssl_certificate_key \/etc\/ssl\/private\/labs-server.pem;\n ssl_protocols TLSv1 TLSv1.1 TLSv1.2;\n ssl_session_timeout 5m;\n ssl_session_cache shared:SSL:5m;\n\n client_max_body_size 0; # disable any limits to avoid HTTP 413 for large image uploads\n\n # required to avoid HTTP 411: see Issue #1486 (https:\/\/github.com\/docker\/docker\/issues\/1486)\n chunked_transfer_encoding on;\n\n location \/ {\n proxy_pass http:\/\/{{.Subdomain}};\n proxy_set_header Host $http_host; # required for docker client's sake\n proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP\n proxy_read_timeout 900;\n proxy_http_version 1.1;\n proxy_set_header Upgrade $http_upgrade;\n proxy_set_header Connection $connection_upgrade;\n }\n\n }\n`\n)\n\nfunc ensureFreshOutputDir() {\n\toneill.LogDebug(\"Recreating empty configuration directory\")\n\toutDir := oneill.Config.NginxConfigDirectory\n\terr := os.RemoveAll(outDir)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = os.Mkdir(outDir, 0755)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc reloadNginxConfig() error {\n\trunCmd := exec.Command(\"service\", \"nginx\", \"reload\")\n\treturn runCmd.Run()\n}\n\ntype templateContext struct {\n\tSubdomain string\n\tServingDomain string\n\tPort int64\n}\n\nfunc writeTemplateToDisk(siteConfig *oneill.SiteConfig, container docker.APIContainers) {\n\ttmpl, err := template.New(\"nginx-config\").Parse(nginxTemplate)\n\tif err != nil {\n\t\toneill.LogWarning(fmt.Sprintf(\"Unable to load nginx config template: %s\", siteConfig.Subdomain))\n\t\treturn\n\t}\n\n\tvar b bytes.Buffer\n\tcontext := templateContext{\n\t\tSubdomain: siteConfig.Subdomain,\n\t\tServingDomain: oneill.Config.ServingDomain,\n\t\tPort: container.Ports[0].PublicPort,\n\t}\n\terr = tmpl.Execute(&b, context)\n\tif err != nil {\n\t\toneill.LogWarning(fmt.Sprintf(\"Unable to execute nginx config template: %s\", siteConfig.Subdomain))\n\t\treturn\n\t}\n\toutDir := oneill.Config.NginxConfigDirectory\n\toutFile := path.Join(outDir, fmt.Sprintf(\"%s.conf\", siteConfig.Subdomain))\n\terr = ioutil.WriteFile(outFile, b.Bytes(), 0644)\n\tif err != nil {\n\t\toneill.LogWarning(fmt.Sprintf(\"Unable to write nginx config template: %s\", siteConfig.Subdomain))\n\t\treturn\n\t}\n}\n\nfunc ConfigureNginx(siteConfigs []*oneill.SiteConfig) []*oneill.SiteConfig {\n\toneill.LogInfo(\"## Configuring Nginx\")\n\n\tensureFreshOutputDir()\n\n\tfor _, sc := range siteConfigs {\n\t\tfor _, container := range oneill.ListContainers() {\n\t\t\tcontainerName := strings.TrimPrefix(container.Names[0], \"\/\")\n\t\t\tif containerName == sc.Subdomain {\n\t\t\t\twriteTemplateToDisk(sc, container)\n\t\t\t\toneill.LogDebug(fmt.Sprintf(\"Configured nginx proxy for container: %s\", sc.Subdomain))\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif err := reloadNginxConfig(); err != nil {\n\t\toneill.LogWarning(\"Unable to reload nginx configuration\")\n\t\treturn siteConfigs\n\t}\n\toneill.LogDebug(\"Reloaded nginx configuration\")\n\treturn siteConfigs\n}\n<|endoftext|>"} {"text":"<commit_before>package doozer\n\nimport (\n\t\"doozer\/client\"\n\t\"github.com\/bmizerany\/assert\"\n\t\"net\"\n\t\"runtime\"\n\t\"testing\"\n)\n\n\n\/\/ Upper bound on number of leaked goroutines.\n\/\/ Our goal is to reduce this to zero.\nconst leaked = 23\n\n\nfunc mustListen() net.Listener {\n\tl, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn l\n}\n\n\nfunc mustListenPacket(addr string) net.PacketConn {\n\tc, err := net.ListenPacket(\"udp\", addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn c\n}\n\n\nfunc TestDoozerSimple(t *testing.T) {\n\tl := mustListen()\n\tdefer l.Close()\n\tu := mustListenPacket(l.Addr().String())\n\tdefer u.Close()\n\n\tgo Main(\"a\", \"\", u, l, nil)\n\n\tcl, err := client.Dial(l.Addr().String())\n\tassert.Equal(t, nil, err)\n\tassert.Equal(t, nil, cl.Noop())\n}\n\n\nfunc TestDoozerWatchSimple(t *testing.T) {\n\tl := mustListen()\n\tdefer l.Close()\n\tu := mustListenPacket(l.Addr().String())\n\tdefer u.Close()\n\n\tgo Main(\"a\", \"\", u, l, nil)\n\n\tcl, err := client.Dial(l.Addr().String())\n\tassert.Equal(t, nil, err)\n\n\tch, err := cl.Watch(\"\/test\/**\")\n\tassert.Equal(t, nil, err, err)\n\tdefer close(ch)\n\n\tcl.Set(\"\/test\/foo\", \"bar\", \"\")\n\tev := <-ch\n\tassert.Equal(t, \"\/test\/foo\", ev.Path)\n\tassert.Equal(t, \"bar\", ev.Body)\n\tassert.NotEqual(t, \"\", ev.Cas)\n\n\tcl.Set(\"\/test\/fun\", \"house\", \"\")\n\tev = <-ch\n\tassert.Equal(t, \"\/test\/fun\", ev.Path)\n\tassert.Equal(t, \"house\", ev.Body)\n\tassert.NotEqual(t, \"\", ev.Cas)\n}\n\n\nfunc TestDoozerGoroutines(t *testing.T) {\n\tgs := runtime.Goroutines()\n\n\tfunc() {\n\t\tl := mustListen()\n\t\tdefer l.Close()\n\t\tu := mustListenPacket(l.Addr().String())\n\t\tdefer u.Close()\n\n\t\tgo Main(\"a\", \"\", u, l, nil)\n\n\t\tcl, err := client.Dial(l.Addr().String())\n\t\tassert.Equal(t, nil, err)\n\t\tcl.Noop()\n\t}()\n\n\tassert.T(t, gs+leaked >= runtime.Goroutines(), gs+leaked)\n}\n<commit_msg>add a basic benchmark<commit_after>package doozer\n\nimport (\n\t\"doozer\/client\"\n\t\"doozer\/store\"\n\t\"github.com\/bmizerany\/assert\"\n\t\"net\"\n\t\"runtime\"\n\t\"testing\"\n)\n\n\n\/\/ Upper bound on number of leaked goroutines.\n\/\/ Our goal is to reduce this to zero.\nconst leaked = 23\n\n\nfunc mustListen() net.Listener {\n\tl, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn l\n}\n\n\nfunc mustListenPacket(addr string) net.PacketConn {\n\tc, err := net.ListenPacket(\"udp\", addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn c\n}\n\n\nfunc TestDoozerSimple(t *testing.T) {\n\tl := mustListen()\n\tdefer l.Close()\n\tu := mustListenPacket(l.Addr().String())\n\tdefer u.Close()\n\n\tgo Main(\"a\", \"\", u, l, nil)\n\n\tcl, err := client.Dial(l.Addr().String())\n\tassert.Equal(t, nil, err)\n\tassert.Equal(t, nil, cl.Noop())\n}\n\n\nfunc TestDoozerWatchSimple(t *testing.T) {\n\tl := mustListen()\n\tdefer l.Close()\n\tu := mustListenPacket(l.Addr().String())\n\tdefer u.Close()\n\n\tgo Main(\"a\", \"\", u, l, nil)\n\n\tcl, err := client.Dial(l.Addr().String())\n\tassert.Equal(t, nil, err)\n\n\tch, err := cl.Watch(\"\/test\/**\")\n\tassert.Equal(t, nil, err, err)\n\tdefer close(ch)\n\n\tcl.Set(\"\/test\/foo\", \"bar\", \"\")\n\tev := <-ch\n\tassert.Equal(t, \"\/test\/foo\", ev.Path)\n\tassert.Equal(t, \"bar\", ev.Body)\n\tassert.NotEqual(t, \"\", ev.Cas)\n\n\tcl.Set(\"\/test\/fun\", \"house\", \"\")\n\tev = <-ch\n\tassert.Equal(t, \"\/test\/fun\", ev.Path)\n\tassert.Equal(t, \"house\", ev.Body)\n\tassert.NotEqual(t, \"\", ev.Cas)\n}\n\n\nfunc TestDoozerGoroutines(t *testing.T) {\n\tgs := runtime.Goroutines()\n\n\tfunc() {\n\t\tl := mustListen()\n\t\tdefer l.Close()\n\t\tu := mustListenPacket(l.Addr().String())\n\t\tdefer u.Close()\n\n\t\tgo Main(\"a\", \"\", u, l, nil)\n\n\t\tcl, err := client.Dial(l.Addr().String())\n\t\tassert.Equal(t, nil, err)\n\t\tcl.Noop()\n\t}()\n\n\tassert.T(t, gs+leaked >= runtime.Goroutines(), gs+leaked)\n}\n\n\nfunc BenchmarkDoozerClientSet(b *testing.B) {\n\tb.StopTimer()\n\tl := mustListen()\n\tdefer l.Close()\n\ta := l.Addr().String()\n\tu := mustListenPacket(a)\n\tdefer u.Close()\n\n\tgo Main(\"a\", \"\", u, l, nil)\n\tgo Main(\"a\", a, mustListenPacket(\":0\"), mustListen(), nil)\n\tgo Main(\"a\", a, mustListenPacket(\":0\"), mustListen(), nil)\n\tgo Main(\"a\", a, mustListenPacket(\":0\"), mustListen(), nil)\n\tgo Main(\"a\", a, mustListenPacket(\":0\"), mustListen(), nil)\n\n\tcl, err := client.Dial(l.Addr().String())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tb.StartTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tcl.Set(\"\/test\", \"\", store.Clobber)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Primitive HTTP client. See RFC 2616.\n\npackage http\n\nimport (\n\t\"bufio\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/ Given a string of the form \"host\", \"host:port\", or \"[ipv6::address]:port\",\n\/\/ return true if the string includes a port.\nfunc hasPort(s string) bool { return strings.LastIndex(s, \":\") > strings.LastIndex(s, \"]\") }\n\n\/\/ Used in Send to implement io.ReadCloser by bundling together the\n\/\/ io.BufReader through which we read the response, and the underlying\n\/\/ network connection.\ntype readClose struct {\n\tio.Reader\n\tio.Closer\n}\n\n\/\/ Send issues an HTTP request. Caller should close resp.Body when done reading it.\n\/\/\n\/\/ TODO: support persistent connections (multiple requests on a single connection).\n\/\/ send() method is nonpublic because, when we refactor the code for persistent\n\/\/ connections, it may no longer make sense to have a method with this signature.\nfunc send(req *Request) (resp *Response, err os.Error) {\n\tif req.URL.Scheme != \"http\" {\n\t\treturn nil, &badStringError{\"unsupported protocol scheme\", req.URL.Scheme}\n\t}\n\n\taddr := req.URL.Host\n\tif !hasPort(addr) {\n\t\taddr += \":http\"\n\t}\n\tinfo := req.URL.Userinfo\n\tif len(info) > 0 {\n\t\tenc := base64.URLEncoding\n\t\tencoded := make([]byte, enc.EncodedLen(len(info)))\n\t\tenc.Encode(encoded, strings.Bytes(info))\n\t\tif req.Header == nil {\n\t\t\treq.Header = make(map[string]string)\n\t\t}\n\t\treq.Header[\"Authorization\"] = \"Basic \" + string(encoded)\n\t}\n\tconn, err := net.Dial(\"tcp\", \"\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = req.Write(conn)\n\tif err != nil {\n\t\tconn.Close()\n\t\treturn nil, err\n\t}\n\n\treader := bufio.NewReader(conn)\n\tresp, err = ReadResponse(reader, req.Method)\n\tif err != nil {\n\t\tconn.Close()\n\t\treturn nil, err\n\t}\n\n\tresp.Body = readClose{resp.Body, conn}\n\n\treturn\n}\n\n\/\/ True if the specified HTTP status code is one for which the Get utility should\n\/\/ automatically redirect.\nfunc shouldRedirect(statusCode int) bool {\n\tswitch statusCode {\n\tcase StatusMovedPermanently, StatusFound, StatusSeeOther, StatusTemporaryRedirect:\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ Get issues a GET to the specified URL. If the response is one of the following\n\/\/ redirect codes, it follows the redirect, up to a maximum of 10 redirects:\n\/\/\n\/\/ 301 (Moved Permanently)\n\/\/ 302 (Found)\n\/\/ 303 (See Other)\n\/\/ 307 (Temporary Redirect)\n\/\/\n\/\/ finalURL is the URL from which the response was fetched -- identical to the\n\/\/ input URL unless redirects were followed.\n\/\/\n\/\/ Caller should close r.Body when done reading it.\nfunc Get(url string) (r *Response, finalURL string, err os.Error) {\n\t\/\/ TODO: if\/when we add cookie support, the redirected request shouldn't\n\t\/\/ necessarily supply the same cookies as the original.\n\t\/\/ TODO: set referrer header on redirects.\n\tfor redirect := 0; ; redirect++ {\n\t\tif redirect >= 10 {\n\t\t\terr = os.ErrorString(\"stopped after 10 redirects\")\n\t\t\tbreak\n\t\t}\n\n\t\tvar req Request\n\t\tif req.URL, err = ParseURL(url); err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif r, err = send(&req); err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif shouldRedirect(r.StatusCode) {\n\t\t\tr.Body.Close()\n\t\t\tif url = r.GetHeader(\"Location\"); url == \"\" {\n\t\t\t\terr = os.ErrorString(fmt.Sprintf(\"%d response missing Location header\", r.StatusCode))\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tfinalURL = url\n\t\treturn\n\t}\n\n\terr = &URLError{\"Get\", url, err}\n\treturn\n}\n\n\n\/\/ Post issues a POST to the specified URL.\n\/\/\n\/\/ Caller should close r.Body when done reading it.\nfunc Post(url string, bodyType string, body io.Reader) (r *Response, err os.Error) {\n\tvar req Request\n\treq.Method = \"POST\"\n\treq.Body = nopCloser{body}\n\treq.Header = map[string]string{\n\t\t\"Content-Type\": bodyType,\n\t\t\"Transfer-Encoding\": \"chunked\",\n\t}\n\n\treq.URL, err = ParseURL(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn send(&req)\n}\n\ntype nopCloser struct {\n\tio.Reader\n}\n\nfunc (nopCloser) Close() os.Error { return nil }\n<commit_msg>http: fix bug in Post<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Primitive HTTP client. See RFC 2616.\n\npackage http\n\nimport (\n\t\"bufio\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/ Given a string of the form \"host\", \"host:port\", or \"[ipv6::address]:port\",\n\/\/ return true if the string includes a port.\nfunc hasPort(s string) bool { return strings.LastIndex(s, \":\") > strings.LastIndex(s, \"]\") }\n\n\/\/ Used in Send to implement io.ReadCloser by bundling together the\n\/\/ io.BufReader through which we read the response, and the underlying\n\/\/ network connection.\ntype readClose struct {\n\tio.Reader\n\tio.Closer\n}\n\n\/\/ Send issues an HTTP request. Caller should close resp.Body when done reading it.\n\/\/\n\/\/ TODO: support persistent connections (multiple requests on a single connection).\n\/\/ send() method is nonpublic because, when we refactor the code for persistent\n\/\/ connections, it may no longer make sense to have a method with this signature.\nfunc send(req *Request) (resp *Response, err os.Error) {\n\tif req.URL.Scheme != \"http\" {\n\t\treturn nil, &badStringError{\"unsupported protocol scheme\", req.URL.Scheme}\n\t}\n\n\taddr := req.URL.Host\n\tif !hasPort(addr) {\n\t\taddr += \":http\"\n\t}\n\tinfo := req.URL.Userinfo\n\tif len(info) > 0 {\n\t\tenc := base64.URLEncoding\n\t\tencoded := make([]byte, enc.EncodedLen(len(info)))\n\t\tenc.Encode(encoded, strings.Bytes(info))\n\t\tif req.Header == nil {\n\t\t\treq.Header = make(map[string]string)\n\t\t}\n\t\treq.Header[\"Authorization\"] = \"Basic \" + string(encoded)\n\t}\n\tconn, err := net.Dial(\"tcp\", \"\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = req.Write(conn)\n\tif err != nil {\n\t\tconn.Close()\n\t\treturn nil, err\n\t}\n\n\treader := bufio.NewReader(conn)\n\tresp, err = ReadResponse(reader, req.Method)\n\tif err != nil {\n\t\tconn.Close()\n\t\treturn nil, err\n\t}\n\n\tresp.Body = readClose{resp.Body, conn}\n\n\treturn\n}\n\n\/\/ True if the specified HTTP status code is one for which the Get utility should\n\/\/ automatically redirect.\nfunc shouldRedirect(statusCode int) bool {\n\tswitch statusCode {\n\tcase StatusMovedPermanently, StatusFound, StatusSeeOther, StatusTemporaryRedirect:\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ Get issues a GET to the specified URL. If the response is one of the following\n\/\/ redirect codes, it follows the redirect, up to a maximum of 10 redirects:\n\/\/\n\/\/ 301 (Moved Permanently)\n\/\/ 302 (Found)\n\/\/ 303 (See Other)\n\/\/ 307 (Temporary Redirect)\n\/\/\n\/\/ finalURL is the URL from which the response was fetched -- identical to the\n\/\/ input URL unless redirects were followed.\n\/\/\n\/\/ Caller should close r.Body when done reading it.\nfunc Get(url string) (r *Response, finalURL string, err os.Error) {\n\t\/\/ TODO: if\/when we add cookie support, the redirected request shouldn't\n\t\/\/ necessarily supply the same cookies as the original.\n\t\/\/ TODO: set referrer header on redirects.\n\tfor redirect := 0; ; redirect++ {\n\t\tif redirect >= 10 {\n\t\t\terr = os.ErrorString(\"stopped after 10 redirects\")\n\t\t\tbreak\n\t\t}\n\n\t\tvar req Request\n\t\tif req.URL, err = ParseURL(url); err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif r, err = send(&req); err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif shouldRedirect(r.StatusCode) {\n\t\t\tr.Body.Close()\n\t\t\tif url = r.GetHeader(\"Location\"); url == \"\" {\n\t\t\t\terr = os.ErrorString(fmt.Sprintf(\"%d response missing Location header\", r.StatusCode))\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tfinalURL = url\n\t\treturn\n\t}\n\n\terr = &URLError{\"Get\", url, err}\n\treturn\n}\n\n\n\/\/ Post issues a POST to the specified URL.\n\/\/\n\/\/ Caller should close r.Body when done reading it.\nfunc Post(url string, bodyType string, body io.Reader) (r *Response, err os.Error) {\n\tvar req Request\n\treq.Method = \"POST\"\n\treq.ProtoMajor = 1\n\treq.ProtoMinor = 1\n\treq.Body = nopCloser{body}\n\treq.Header = map[string]string{\n\t\t\"Content-Type\": bodyType,\n\t}\n\treq.TransferEncoding = []string{\"chunked\"}\n\n\treq.URL, err = ParseURL(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn send(&req)\n}\n\ntype nopCloser struct {\n\tio.Reader\n}\n\nfunc (nopCloser) Close() os.Error { return nil }\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Daniel Oaks <daniel@danieloaks.net>\n\/\/ released under the MIT license\n\npackage caps\n\n\/\/ Capability represents an optional feature that a client may request from the server.\ntype Capability string\n\nconst (\n\t\/\/ AccountNotify is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/account-notify-3.1.html\n\tAccountNotify Capability = \"account-notify\"\n\t\/\/ AccountTag is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/account-tag-3.2.html\n\tAccountTag Capability = \"account-tag\"\n\t\/\/ AwayNotify is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/away-notify-3.1.html\n\tAwayNotify Capability = \"away-notify\"\n\t\/\/ Batch is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/batch-3.2.html\n\tBatch Capability = \"batch\"\n\t\/\/ CapNotify is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/cap-notify-3.2.html\n\tCapNotify Capability = \"cap-notify\"\n\t\/\/ ChgHost is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/chghost-3.2.html\n\tChgHost Capability = \"chghost\"\n\t\/\/ EchoMessage is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/echo-message-3.2.html\n\tEchoMessage Capability = \"echo-message\"\n\t\/\/ ExtendedJoin is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/extended-join-3.1.html\n\tExtendedJoin Capability = \"extended-join\"\n\t\/\/ InviteNotify is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/invite-notify-3.2.html\n\tInviteNotify Capability = \"invite-notify\"\n\t\/\/ LabeledResponse is this draft IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/labeled-response.html\n\tLabeledResponse Capability = \"draft\/labeled-response\"\n\t\/\/ MaxLine is this proposed capability: https:\/\/github.com\/DanielOaks\/ircv3-specifications\/blob\/master+line-lengths\/extensions\/line-lengths.md\n\tMaxLine Capability = \"draft\/maxline\"\n\t\/\/ MessageTags is this draft IRCv3 capability: http:\/\/ircv3.net\/specs\/core\/message-tags-3.3.html\n\tMessageTags Capability = \"draft\/message-tags-0.2\"\n\t\/\/ MultiPrefix is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/multi-prefix-3.1.html\n\tMultiPrefix Capability = \"multi-prefix\"\n\t\/\/ Rename is this proposed capability: https:\/\/github.com\/SaberUK\/ircv3-specifications\/blob\/rename\/extensions\/rename.md\n\tRename Capability = \"draft\/rename\"\n\t\/\/ SASL is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/sasl-3.2.html\n\tSASL Capability = \"sasl\"\n\t\/\/ ServerTime is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/server-time-3.2.html\n\tServerTime Capability = \"server-time\"\n\t\/\/ STS is this draft IRCv3 capability: http:\/\/ircv3.net\/specs\/core\/sts-3.3.html\n\tSTS Capability = \"draft\/sts\"\n\t\/\/ UserhostInNames is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/userhost-in-names-3.2.html\n\tUserhostInNames Capability = \"userhost-in-names\"\n)\n\n\/\/ Name returns the name of the given capability.\nfunc (capability Capability) Name() string {\n\treturn string(capability)\n}\n\n\/\/ Version is used to select which max version of CAP the client supports.\ntype Version uint\n\nconst (\n\t\/\/ Cap301 refers to the base CAP spec.\n\tCap301 Version = 301\n\t\/\/ Cap302 refers to the IRCv3.2 CAP spec.\n\tCap302 Version = 302\n)\n<commit_msg>draft\/sts -> sts<commit_after>\/\/ Copyright (c) 2017 Daniel Oaks <daniel@danieloaks.net>\n\/\/ released under the MIT license\n\npackage caps\n\n\/\/ Capability represents an optional feature that a client may request from the server.\ntype Capability string\n\nconst (\n\t\/\/ AccountNotify is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/account-notify-3.1.html\n\tAccountNotify Capability = \"account-notify\"\n\t\/\/ AccountTag is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/account-tag-3.2.html\n\tAccountTag Capability = \"account-tag\"\n\t\/\/ AwayNotify is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/away-notify-3.1.html\n\tAwayNotify Capability = \"away-notify\"\n\t\/\/ Batch is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/batch-3.2.html\n\tBatch Capability = \"batch\"\n\t\/\/ CapNotify is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/cap-notify-3.2.html\n\tCapNotify Capability = \"cap-notify\"\n\t\/\/ ChgHost is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/chghost-3.2.html\n\tChgHost Capability = \"chghost\"\n\t\/\/ EchoMessage is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/echo-message-3.2.html\n\tEchoMessage Capability = \"echo-message\"\n\t\/\/ ExtendedJoin is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/extended-join-3.1.html\n\tExtendedJoin Capability = \"extended-join\"\n\t\/\/ InviteNotify is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/invite-notify-3.2.html\n\tInviteNotify Capability = \"invite-notify\"\n\t\/\/ LabeledResponse is this draft IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/labeled-response.html\n\tLabeledResponse Capability = \"draft\/labeled-response\"\n\t\/\/ MaxLine is this proposed capability: https:\/\/github.com\/DanielOaks\/ircv3-specifications\/blob\/master+line-lengths\/extensions\/line-lengths.md\n\tMaxLine Capability = \"draft\/maxline\"\n\t\/\/ MessageTags is this draft IRCv3 capability: http:\/\/ircv3.net\/specs\/core\/message-tags-3.3.html\n\tMessageTags Capability = \"draft\/message-tags-0.2\"\n\t\/\/ MultiPrefix is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/multi-prefix-3.1.html\n\tMultiPrefix Capability = \"multi-prefix\"\n\t\/\/ Rename is this proposed capability: https:\/\/github.com\/SaberUK\/ircv3-specifications\/blob\/rename\/extensions\/rename.md\n\tRename Capability = \"draft\/rename\"\n\t\/\/ SASL is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/sasl-3.2.html\n\tSASL Capability = \"sasl\"\n\t\/\/ ServerTime is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/server-time-3.2.html\n\tServerTime Capability = \"server-time\"\n\t\/\/ STS is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/sts.html\n\tSTS Capability = \"sts\"\n\t\/\/ UserhostInNames is this IRCv3 capability: http:\/\/ircv3.net\/specs\/extensions\/userhost-in-names-3.2.html\n\tUserhostInNames Capability = \"userhost-in-names\"\n)\n\n\/\/ Name returns the name of the given capability.\nfunc (capability Capability) Name() string {\n\treturn string(capability)\n}\n\n\/\/ Version is used to select which max version of CAP the client supports.\ntype Version uint\n\nconst (\n\t\/\/ Cap301 refers to the base CAP spec.\n\tCap301 Version = 301\n\t\/\/ Cap302 refers to the IRCv3.2 CAP spec.\n\tCap302 Version = 302\n)\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/brutella\/hc\/db\"\n\t\"github.com\/brutella\/hc\/hap\"\n\t\"github.com\/brutella\/hc\/hap\/pair\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n)\n\nfunc pairSetup(b io.Reader) (io.Reader, error) {\n\treturn sendTLV8(b, \"pair-setup\")\n}\n\nfunc pairVerify(b io.Reader) (io.Reader, error) {\n\treturn sendTLV8(b, \"pair-verify\")\n}\n\nfunc sendTLV8(b io.Reader, endpoint string) (io.Reader, error) {\n\turl := fmt.Sprintf(\"http:\/\/127.0.0.1:64521\/%s\", endpoint)\n\tresp, err := http.Post(url, hap.HTTPContentTypePairingTLV8, b)\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"Invalid status code %v\", resp.StatusCode)\n\t}\n\treturn resp.Body, err\n}\n\nfunc main() {\n\tdatabase, _ := db.NewDatabase(\".\/data\")\n\tc, _ := hap.NewDevice(\"Golang Client\", database)\n\tclient := pair.NewSetupClientController(\"336-02-620\", c, database)\n\tpairStartRequest := client.InitialPairingRequest()\n\n\tpairStartResponse, err := pairSetup(pairStartRequest)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\t\/\/ 2) S -> C\n\tpairVerifyRequest, err := pair.HandleReaderForHandler(pairStartResponse, client)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\t\/\/ 3) C -> S\n\tpairVerifyResponse, err := pairSetup(pairVerifyRequest)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\t\/\/ 4) S -> C\n\tpairKeyRequest, err := pair.HandleReaderForHandler(pairVerifyResponse, client)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\t\/\/ 5) C -> S\n\tpairKeyRespond, err := pairSetup(pairKeyRequest)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\t\/\/ 6) S -> C\n\trequest, err := pair.HandleReaderForHandler(pairKeyRespond, client)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\tif request != nil {\n\t\tlog.Println(request)\n\t}\n\n\tlog.Println(\"*** Pairing done ***\")\n\n\tverify := pair.NewVerifyClientController(c, database)\n\n\tverifyStartRequest := verify.InitialKeyVerifyRequest()\n\t\/\/ 1) C -> S\n\tverifyStartResponse, err := pairVerify(verifyStartRequest)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\t\/\/ 2) S -> C\n\tverifyFinishRequest, err := pair.HandleReaderForHandler(verifyStartResponse, verify)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\t\/\/ 3) C -> S\n\tverifyFinishResponse, err := pairVerify(verifyFinishRequest)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\t\/\/ 4) S -> C\n\tlast_request, err := pair.HandleReaderForHandler(verifyFinishResponse, verify)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\tif last_request != nil {\n\t\tlog.Println(last_request)\n\t}\n\n\tlog.Println(\"*** Key Verification done ***\")\n}\n<commit_msg>Use local log package<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/brutella\/hc\/db\"\n\t\"github.com\/brutella\/hc\/hap\"\n\t\"github.com\/brutella\/hc\/hap\/pair\"\n\t\"github.com\/brutella\/hc\/log\"\n\t\"io\"\n\t\"net\/http\"\n)\n\nfunc pairSetup(b io.Reader) (io.Reader, error) {\n\treturn sendTLV8(b, \"pair-setup\")\n}\n\nfunc pairVerify(b io.Reader) (io.Reader, error) {\n\treturn sendTLV8(b, \"pair-verify\")\n}\n\nfunc sendTLV8(b io.Reader, endpoint string) (io.Reader, error) {\n\turl := fmt.Sprintf(\"http:\/\/127.0.0.1:64521\/%s\", endpoint)\n\tresp, err := http.Post(url, hap.HTTPContentTypePairingTLV8, b)\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"Invalid status code %v\", resp.StatusCode)\n\t}\n\treturn resp.Body, err\n}\n\nfunc main() {\n\tdatabase, _ := db.NewDatabase(\".\/data\")\n\tc, _ := hap.NewDevice(\"Golang Client\", database)\n\tclient := pair.NewSetupClientController(\"336-02-620\", c, database)\n\tpairStartRequest := client.InitialPairingRequest()\n\n\tpairStartResponse, err := pairSetup(pairStartRequest)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\t\/\/ 2) S -> C\n\tpairVerifyRequest, err := pair.HandleReaderForHandler(pairStartResponse, client)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\t\/\/ 3) C -> S\n\tpairVerifyResponse, err := pairSetup(pairVerifyRequest)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\t\/\/ 4) S -> C\n\tpairKeyRequest, err := pair.HandleReaderForHandler(pairVerifyResponse, client)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\t\/\/ 5) C -> S\n\tpairKeyRespond, err := pairSetup(pairKeyRequest)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\t\/\/ 6) S -> C\n\trequest, err := pair.HandleReaderForHandler(pairKeyRespond, client)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\tif request != nil {\n\t\tlog.Info.Println(request)\n\t}\n\n\tlog.Info.Println(\"*** Pairing done ***\")\n\n\tverify := pair.NewVerifyClientController(c, database)\n\n\tverifyStartRequest := verify.InitialKeyVerifyRequest()\n\t\/\/ 1) C -> S\n\tverifyStartResponse, err := pairVerify(verifyStartRequest)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\t\/\/ 2) S -> C\n\tverifyFinishRequest, err := pair.HandleReaderForHandler(verifyStartResponse, verify)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\t\/\/ 3) C -> S\n\tverifyFinishResponse, err := pairVerify(verifyFinishRequest)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\t\/\/ 4) S -> C\n\tlast_request, err := pair.HandleReaderForHandler(verifyFinishResponse, verify)\n\tif err != nil {\n\t\tlog.Info.Panic(err)\n\t}\n\n\tif last_request != nil {\n\t\tlog.Info.Println(last_request)\n\t}\n\n\tlog.Info.Println(\"*** Key Verification done ***\")\n}\n<|endoftext|>"} {"text":"<commit_before>package ircserver\n\nimport (\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/sorcix\/irc\"\n)\n\n\/\/ Each handler function must have the “cmd” prefix and return either a single\n\/\/ *irc.Message or a slice of *irc.Message.\n\nfunc init() {\n\t\/\/ Keep this list ordered the same way the functions below are ordered.\n\tcommands[\"PING\"] = &ircCommand{Func: cmdPing}\n\tcommands[\"NICK\"] = &ircCommand{\n\t\tFunc: cmdNick,\n\t\tInteresting: func(s *Session, msg *irc.Message) bool {\n\t\t\t\/\/ TODO(secure): does it make sense to restrict this to Sessions which\n\t\t\t\/\/ have a channel in common? noting this because it doesn’t handle the\n\t\t\t\/\/ query-only use-case. if there’s no downside (except for the privacy\n\t\t\t\/\/ aspect), perhaps leave it as-is?\n\t\t\treturn true\n\t\t},\n\t}\n\tcommands[\"USER\"] = &ircCommand{Func: cmdUser, MinParams: 3}\n\tcommands[\"JOIN\"] = &ircCommand{\n\t\tFunc: cmdJoin,\n\t\tMinParams: 1,\n\t\tInteresting: interestJoin,\n\t}\n\tcommands[\"PART\"] = &ircCommand{\n\t\tFunc: cmdPart,\n\t\tMinParams: 1,\n\t\tInteresting: interestPart,\n\t}\n\tcommands[\"QUIT\"] = &ircCommand{\n\t\tFunc: cmdQuit,\n\t\tInteresting: func(s *Session, msg *irc.Message) bool {\n\t\t\t\/\/ TODO(secure): does it make sense to restrict this to Sessions which\n\t\t\t\/\/ have a channel in common? noting this because it doesn’t handle the\n\t\t\t\/\/ query-only use-case. if there’s no downside (except for the privacy\n\t\t\t\/\/ aspect), perhaps leave it as-is?\n\t\t\treturn true\n\t\t},\n\t}\n\tcommands[\"PRIVMSG\"] = &ircCommand{Func: cmdPrivmsg, Interesting: interestPrivmsg}\n\tcommands[\"MODE\"] = &ircCommand{\n\t\tFunc: cmdMode,\n\t\tMinParams: 1,\n\t\tInteresting: commonChannelOrDirect,\n\t}\n\tcommands[\"WHO\"] = &ircCommand{Func: cmdWho}\n\tcommands[\"OPER\"] = &ircCommand{Func: cmdOper, MinParams: 2}\n\tcommands[\"KILL\"] = &ircCommand{Func: cmdKill, MinParams: 1}\n}\n\n\/\/ commonChannelOrDirect returns true when msg’s first parameter is a channel\n\/\/ name of 's' or when the first parameter is the nickname of 's'.\nfunc commonChannelOrDirect(s *Session, msg *irc.Message) bool {\n\treturn s.Channels[msg.Params[0]] || msg.Params[0] == s.Nick\n}\n\nfunc cmdPing(s *Session, msg *irc.Message) interface{} {\n\tif len(msg.Params) < 1 {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_NOORIGIN,\n\t\t\tParams: []string{s.Nick},\n\t\t\tTrailing: \"No origin specified\",\n\t\t}\n\t}\n\treturn &irc.Message{\n\t\tCommand: irc.PONG,\n\t\tParams: []string{msg.Params[0]},\n\t}\n}\n\nfunc cmdNick(s *Session, msg *irc.Message) interface{} {\n\toldPrefix := s.ircPrefix\n\n\tif len(msg.Params) < 1 {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_NONICKNAMEGIVEN,\n\t\t\tTrailing: \"No nickname given\",\n\t\t}\n\t}\n\n\tif !IsValidNickname(msg.Params[0]) {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_ERRONEUSNICKNAME,\n\t\t\tParams: []string{\"*\", msg.Params[0]},\n\t\t\tTrailing: \"Erroneus nickname.\",\n\t\t}\n\t}\n\n\tinuse := false\n\tfor _, session := range Sessions {\n\t\tif NickToLower(session.Nick) == NickToLower(msg.Params[0]) {\n\t\t\tinuse = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif inuse {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_NICKNAMEINUSE,\n\t\t\tParams: []string{\"*\", msg.Params[0]},\n\t\t\tTrailing: \"Nickname is already in use.\",\n\t\t}\n\t}\n\ts.Nick = msg.Params[0]\n\ts.updateIrcPrefix()\n\tlog.Printf(\"nickname now: %+v\\n\", s)\n\tif oldPrefix.String() != \"\" {\n\t\treturn &irc.Message{\n\t\t\tPrefix: &oldPrefix,\n\t\t\tCommand: irc.NICK,\n\t\t\tTrailing: msg.Params[0],\n\t\t}\n\t}\n\n\tvar replies []*irc.Message\n\n\t\/\/ TODO(secure): send 002, 003, 004, 251, 252, 254, 255, 265, 266, [motd = 375, 372, 376]\n\treplies = append(replies, &irc.Message{\n\t\tCommand: irc.RPL_WELCOME,\n\t\tParams: []string{s.Nick},\n\t\tTrailing: \"Welcome to RobustIRC!\",\n\t})\n\n\treplies = append(replies, &irc.Message{\n\t\tCommand: irc.RPL_YOURHOST,\n\t\tParams: []string{s.Nick},\n\t\tTrailing: \"Your host is \" + ServerPrefix.Name,\n\t})\n\n\treplies = append(replies, &irc.Message{\n\t\tCommand: irc.RPL_CREATED,\n\t\tParams: []string{s.Nick},\n\t\tTrailing: \"This server was created \" + serverCreation.String(),\n\t})\n\n\treplies = append(replies, &irc.Message{\n\t\tCommand: irc.RPL_MYINFO,\n\t\tParams: []string{s.Nick},\n\t\t\/\/ TODO(secure): actually support these modes.\n\t\tTrailing: ServerPrefix.Name + \" v1 i nst\",\n\t})\n\n\t\/\/ send ISUPPORT as per http:\/\/www.irc.org\/tech_docs\/draft-brocklesby-irc-isupport-03.txt\n\treplies = append(replies, &irc.Message{\n\t\tCommand: \"005\",\n\t\tParams: []string{\n\t\t\t\"CHANTYPES=#\",\n\t\t\t\"CHANNELLEN=\" + maxChannelLen,\n\t\t\t\"NICKLEN=\" + maxNickLen,\n\t\t\t\"MODES=1\",\n\t\t\t\"PREFIX=\",\n\t\t},\n\t\tTrailing: \"are supported by this server\",\n\t})\n\n\treturn replies\n}\n\nfunc cmdUser(s *Session, msg *irc.Message) interface{} {\n\t\/\/ We don’t need any information from the USER message.\n\treturn []*irc.Message{}\n}\n\nfunc interestJoin(s *Session, msg *irc.Message) bool {\n\treturn s.Channels[msg.Trailing]\n}\n\nfunc cmdJoin(s *Session, msg *irc.Message) interface{} {\n\t\/\/ TODO(secure): strictly speaking, RFC1459 says one can join multiple channels at once.\n\tchannel := msg.Params[0]\n\tif !IsValidChannel(channel) {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_NOSUCHCHANNEL,\n\t\t\tParams: []string{s.Nick, channel},\n\t\t\tTrailing: \"No such channel\",\n\t\t}\n\t}\n\ts.Channels[channel] = true\n\tvar nicks []string\n\t\/\/ TODO(secure): a separate map for quick lookup may be worthwhile for big channels.\n\tfor _, session := range Sessions {\n\t\tif !session.Channels[channel] {\n\t\t\tcontinue\n\t\t}\n\t\tnicks = append(nicks, session.Nick)\n\t}\n\n\tvar replies []*irc.Message\n\n\treplies = append(replies, &irc.Message{\n\t\tPrefix: &s.ircPrefix,\n\t\tCommand: irc.JOIN,\n\t\tTrailing: channel,\n\t})\n\t\/\/replies = append(replies, irc.Message{\n\t\/\/\tPrefix: &irc.Prefix{Name: *network},\n\t\/\/\tCommand: irc.RPL_NOTOPIC,\n\t\/\/\tParams: []string{channel},\n\t\/\/})\n\t\/\/ TODO(secure): why the = param?\n\treplies = append(replies, &irc.Message{\n\t\tCommand: irc.RPL_NAMREPLY,\n\t\tParams: []string{s.Nick, \"=\", channel},\n\t\tTrailing: strings.Join(nicks, \" \"),\n\t})\n\treplies = append(replies, &irc.Message{\n\t\tCommand: irc.RPL_ENDOFNAMES,\n\t\tParams: []string{s.Nick, channel},\n\t\tTrailing: \"End of \/NAMES list.\",\n\t})\n\n\treturn replies\n}\n\nfunc interestPart(s *Session, msg *irc.Message) bool {\n\t\/\/ Do send PART messages back to the sender (who, by now, is not in the\n\t\/\/ channel anymore).\n\treturn s.ircPrefix == *msg.Prefix || s.Channels[msg.Params[0]]\n}\n\nfunc cmdPart(s *Session, msg *irc.Message) interface{} {\n\t\/\/ TODO(secure): strictly speaking, RFC1459 says one can join multiple channels at once.\n\tchannel := msg.Params[0]\n\tdelete(s.Channels, channel)\n\treturn &irc.Message{\n\t\tPrefix: &s.ircPrefix,\n\t\tCommand: irc.PART,\n\t\tParams: []string{channel},\n\t}\n}\n\nfunc cmdQuit(s *Session, msg *irc.Message) interface{} {\n\treturn &irc.Message{\n\t\tPrefix: &s.ircPrefix,\n\t\tCommand: irc.QUIT,\n\t\tTrailing: msg.Trailing,\n\t}\n}\n\nfunc interestPrivmsg(s *Session, msg *irc.Message) bool {\n\t\/\/ Don’t send messages back to the sender.\n\tif s.ircPrefix == *msg.Prefix {\n\t\treturn false\n\t}\n\n\treturn commonChannelOrDirect(s, msg)\n}\n\nfunc cmdPrivmsg(s *Session, msg *irc.Message) interface{} {\n\tif len(msg.Params) < 1 {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_NORECIPIENT,\n\t\t\tParams: []string{s.Nick},\n\t\t\tTrailing: \"No recipient given (PRIVMSG)\",\n\t\t}\n\t}\n\n\tif msg.Trailing == \"\" {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_NOTEXTTOSEND,\n\t\t\tParams: []string{s.Nick},\n\t\t\tTrailing: \"No text to send\",\n\t\t}\n\t}\n\n\treturn &irc.Message{\n\t\tPrefix: &s.ircPrefix,\n\t\tCommand: irc.PRIVMSG,\n\t\tParams: []string{msg.Params[0]},\n\t\tTrailing: msg.Trailing,\n\t}\n}\n\nfunc cmdMode(s *Session, msg *irc.Message) interface{} {\n\tchannel := msg.Params[0]\n\t\/\/ TODO(secure): properly distinguish between users and channels\n\tif s.Channels[channel] {\n\t\tif len(msg.Params) > 1 && msg.Params[1] == \"b\" {\n\t\t\treturn &irc.Message{\n\t\t\t\tCommand: irc.RPL_ENDOFBANLIST,\n\t\t\t\tParams: []string{s.Nick, channel},\n\t\t\t\tTrailing: \"End of Channel Ban List\",\n\t\t\t}\n\t\t} else {\n\t\t\treturn &irc.Message{\n\t\t\t\tCommand: irc.RPL_CHANNELMODEIS,\n\t\t\t\tParams: []string{s.Nick, channel, \"+\"},\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif channel == s.Nick {\n\t\t\treturn &irc.Message{\n\t\t\t\tPrefix: &s.ircPrefix,\n\t\t\t\tCommand: irc.MODE,\n\t\t\t\tParams: []string{s.Nick},\n\t\t\t\tTrailing: \"+\",\n\t\t\t}\n\t\t} else {\n\t\t\treturn &irc.Message{\n\t\t\t\tCommand: irc.ERR_NOTONCHANNEL,\n\t\t\t\tParams: []string{s.Nick, channel},\n\t\t\t\tTrailing: \"You're not on that channel\",\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc cmdWho(s *Session, msg *irc.Message) interface{} {\n\tif len(msg.Params) < 1 {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.RPL_ENDOFWHO,\n\t\t\tParams: []string{s.Nick},\n\t\t\tTrailing: \"End of \/WHO list\",\n\t\t}\n\t}\n\n\tvar replies []*irc.Message\n\n\tchannel := msg.Params[0]\n\t\/\/ TODO(secure): a separate map for quick lookup may be worthwhile for big channels.\n\tfor _, session := range Sessions {\n\t\tif !session.Channels[channel] {\n\t\t\tcontinue\n\t\t}\n\t\tprefix := session.ircPrefix\n\t\treplies = append(replies, &irc.Message{\n\t\t\tCommand: irc.RPL_WHOREPLY,\n\t\t\tParams: []string{s.Nick, channel, prefix.User, prefix.Host, ServerPrefix.Name, prefix.Name, \"H\"},\n\t\t\tTrailing: \"0 Unknown\",\n\t\t})\n\t}\n\n\treplies = append(replies, &irc.Message{\n\t\tCommand: irc.RPL_ENDOFWHO,\n\t\tParams: []string{s.Nick, channel},\n\t\tTrailing: \"End of \/WHO list\",\n\t})\n\n\treturn replies\n}\n\nfunc cmdOper(s *Session, msg *irc.Message) interface{} {\n\t\/\/ TODO(secure): implement restriction to certain hosts once we have a\n\t\/\/ configuration file. (ERR_NOOPERHOST)\n\n\tif msg.Params[1] != NetworkPassword {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_PASSWDMISMATCH,\n\t\t\tParams: []string{s.Nick},\n\t\t\tTrailing: \"Password incorrect\",\n\t\t}\n\t}\n\n\ts.Operator = true\n\n\treturn &irc.Message{\n\t\tCommand: irc.RPL_YOUREOPER,\n\t\tParams: []string{s.Nick},\n\t\tTrailing: \"You are now an IRC operator\",\n\t}\n}\n\nfunc cmdKill(s *Session, msg *irc.Message) interface{} {\n\tif strings.TrimSpace(msg.Trailing) == \"\" {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_NEEDMOREPARAMS,\n\t\t\tParams: []string{s.Nick, msg.Command},\n\t\t\tTrailing: \"Not enough parameters\",\n\t\t}\n\t}\n\n\tif !s.Operator {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_NOPRIVILEGES,\n\t\t\tParams: []string{s.Nick},\n\t\t\tTrailing: \"Permission Denied - You're not an IRC operator\",\n\t\t}\n\t}\n\n\tfor _, session := range Sessions {\n\t\tif NickToLower(session.Nick) != NickToLower(msg.Params[0]) {\n\t\t\tcontinue\n\t\t}\n\n\t\tprefix := session.ircPrefix\n\t\tDeleteSession(session.Id)\n\t\treturn &irc.Message{\n\t\t\tPrefix: &prefix,\n\t\t\tCommand: irc.QUIT,\n\t\t\tTrailing: \"Killed by \" + s.Nick + \": \" + msg.Trailing,\n\t\t}\n\t}\n\n\treturn &irc.Message{\n\t\tCommand: irc.ERR_NOSUCHNICK,\n\t\tParams: []string{s.Nick, msg.Params[0]},\n\t\tTrailing: \"No such nick\/channel\",\n\t}\n}\n<commit_msg>return directly<commit_after>package ircserver\n\nimport (\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/sorcix\/irc\"\n)\n\n\/\/ Each handler function must have the “cmd” prefix and return either a single\n\/\/ *irc.Message or a slice of *irc.Message.\n\nfunc init() {\n\t\/\/ Keep this list ordered the same way the functions below are ordered.\n\tcommands[\"PING\"] = &ircCommand{Func: cmdPing}\n\tcommands[\"NICK\"] = &ircCommand{\n\t\tFunc: cmdNick,\n\t\tInteresting: func(s *Session, msg *irc.Message) bool {\n\t\t\t\/\/ TODO(secure): does it make sense to restrict this to Sessions which\n\t\t\t\/\/ have a channel in common? noting this because it doesn’t handle the\n\t\t\t\/\/ query-only use-case. if there’s no downside (except for the privacy\n\t\t\t\/\/ aspect), perhaps leave it as-is?\n\t\t\treturn true\n\t\t},\n\t}\n\tcommands[\"USER\"] = &ircCommand{Func: cmdUser, MinParams: 3}\n\tcommands[\"JOIN\"] = &ircCommand{\n\t\tFunc: cmdJoin,\n\t\tMinParams: 1,\n\t\tInteresting: interestJoin,\n\t}\n\tcommands[\"PART\"] = &ircCommand{\n\t\tFunc: cmdPart,\n\t\tMinParams: 1,\n\t\tInteresting: interestPart,\n\t}\n\tcommands[\"QUIT\"] = &ircCommand{\n\t\tFunc: cmdQuit,\n\t\tInteresting: func(s *Session, msg *irc.Message) bool {\n\t\t\t\/\/ TODO(secure): does it make sense to restrict this to Sessions which\n\t\t\t\/\/ have a channel in common? noting this because it doesn’t handle the\n\t\t\t\/\/ query-only use-case. if there’s no downside (except for the privacy\n\t\t\t\/\/ aspect), perhaps leave it as-is?\n\t\t\treturn true\n\t\t},\n\t}\n\tcommands[\"PRIVMSG\"] = &ircCommand{Func: cmdPrivmsg, Interesting: interestPrivmsg}\n\tcommands[\"MODE\"] = &ircCommand{\n\t\tFunc: cmdMode,\n\t\tMinParams: 1,\n\t\tInteresting: commonChannelOrDirect,\n\t}\n\tcommands[\"WHO\"] = &ircCommand{Func: cmdWho}\n\tcommands[\"OPER\"] = &ircCommand{Func: cmdOper, MinParams: 2}\n\tcommands[\"KILL\"] = &ircCommand{Func: cmdKill, MinParams: 1}\n}\n\n\/\/ commonChannelOrDirect returns true when msg’s first parameter is a channel\n\/\/ name of 's' or when the first parameter is the nickname of 's'.\nfunc commonChannelOrDirect(s *Session, msg *irc.Message) bool {\n\treturn s.Channels[msg.Params[0]] || msg.Params[0] == s.Nick\n}\n\nfunc cmdPing(s *Session, msg *irc.Message) interface{} {\n\tif len(msg.Params) < 1 {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_NOORIGIN,\n\t\t\tParams: []string{s.Nick},\n\t\t\tTrailing: \"No origin specified\",\n\t\t}\n\t}\n\treturn &irc.Message{\n\t\tCommand: irc.PONG,\n\t\tParams: []string{msg.Params[0]},\n\t}\n}\n\nfunc cmdNick(s *Session, msg *irc.Message) interface{} {\n\toldPrefix := s.ircPrefix\n\n\tif len(msg.Params) < 1 {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_NONICKNAMEGIVEN,\n\t\t\tTrailing: \"No nickname given\",\n\t\t}\n\t}\n\n\tif !IsValidNickname(msg.Params[0]) {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_ERRONEUSNICKNAME,\n\t\t\tParams: []string{\"*\", msg.Params[0]},\n\t\t\tTrailing: \"Erroneus nickname.\",\n\t\t}\n\t}\n\n\tfor _, session := range Sessions {\n\t\tif NickToLower(session.Nick) == NickToLower(msg.Params[0]) {\n\t\t\treturn &irc.Message{\n\t\t\t\tCommand: irc.ERR_NICKNAMEINUSE,\n\t\t\t\tParams: []string{\"*\", msg.Params[0]},\n\t\t\t\tTrailing: \"Nickname is already in use.\",\n\t\t\t}\n\t\t}\n\t}\n\ts.Nick = msg.Params[0]\n\ts.updateIrcPrefix()\n\tlog.Printf(\"nickname now: %+v\\n\", s)\n\tif oldPrefix.String() != \"\" {\n\t\treturn &irc.Message{\n\t\t\tPrefix: &oldPrefix,\n\t\t\tCommand: irc.NICK,\n\t\t\tTrailing: msg.Params[0],\n\t\t}\n\t}\n\n\tvar replies []*irc.Message\n\n\t\/\/ TODO(secure): send 002, 003, 004, 251, 252, 254, 255, 265, 266, [motd = 375, 372, 376]\n\treplies = append(replies, &irc.Message{\n\t\tCommand: irc.RPL_WELCOME,\n\t\tParams: []string{s.Nick},\n\t\tTrailing: \"Welcome to RobustIRC!\",\n\t})\n\n\treplies = append(replies, &irc.Message{\n\t\tCommand: irc.RPL_YOURHOST,\n\t\tParams: []string{s.Nick},\n\t\tTrailing: \"Your host is \" + ServerPrefix.Name,\n\t})\n\n\treplies = append(replies, &irc.Message{\n\t\tCommand: irc.RPL_CREATED,\n\t\tParams: []string{s.Nick},\n\t\tTrailing: \"This server was created \" + serverCreation.String(),\n\t})\n\n\treplies = append(replies, &irc.Message{\n\t\tCommand: irc.RPL_MYINFO,\n\t\tParams: []string{s.Nick},\n\t\t\/\/ TODO(secure): actually support these modes.\n\t\tTrailing: ServerPrefix.Name + \" v1 i nst\",\n\t})\n\n\t\/\/ send ISUPPORT as per http:\/\/www.irc.org\/tech_docs\/draft-brocklesby-irc-isupport-03.txt\n\treplies = append(replies, &irc.Message{\n\t\tCommand: \"005\",\n\t\tParams: []string{\n\t\t\t\"CHANTYPES=#\",\n\t\t\t\"CHANNELLEN=\" + maxChannelLen,\n\t\t\t\"NICKLEN=\" + maxNickLen,\n\t\t\t\"MODES=1\",\n\t\t\t\"PREFIX=\",\n\t\t},\n\t\tTrailing: \"are supported by this server\",\n\t})\n\n\treturn replies\n}\n\nfunc cmdUser(s *Session, msg *irc.Message) interface{} {\n\t\/\/ We don’t need any information from the USER message.\n\treturn []*irc.Message{}\n}\n\nfunc interestJoin(s *Session, msg *irc.Message) bool {\n\treturn s.Channels[msg.Trailing]\n}\n\nfunc cmdJoin(s *Session, msg *irc.Message) interface{} {\n\t\/\/ TODO(secure): strictly speaking, RFC1459 says one can join multiple channels at once.\n\tchannel := msg.Params[0]\n\tif !IsValidChannel(channel) {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_NOSUCHCHANNEL,\n\t\t\tParams: []string{s.Nick, channel},\n\t\t\tTrailing: \"No such channel\",\n\t\t}\n\t}\n\ts.Channels[channel] = true\n\tvar nicks []string\n\t\/\/ TODO(secure): a separate map for quick lookup may be worthwhile for big channels.\n\tfor _, session := range Sessions {\n\t\tif !session.Channels[channel] {\n\t\t\tcontinue\n\t\t}\n\t\tnicks = append(nicks, session.Nick)\n\t}\n\n\tvar replies []*irc.Message\n\n\treplies = append(replies, &irc.Message{\n\t\tPrefix: &s.ircPrefix,\n\t\tCommand: irc.JOIN,\n\t\tTrailing: channel,\n\t})\n\t\/\/replies = append(replies, irc.Message{\n\t\/\/\tPrefix: &irc.Prefix{Name: *network},\n\t\/\/\tCommand: irc.RPL_NOTOPIC,\n\t\/\/\tParams: []string{channel},\n\t\/\/})\n\t\/\/ TODO(secure): why the = param?\n\treplies = append(replies, &irc.Message{\n\t\tCommand: irc.RPL_NAMREPLY,\n\t\tParams: []string{s.Nick, \"=\", channel},\n\t\tTrailing: strings.Join(nicks, \" \"),\n\t})\n\treplies = append(replies, &irc.Message{\n\t\tCommand: irc.RPL_ENDOFNAMES,\n\t\tParams: []string{s.Nick, channel},\n\t\tTrailing: \"End of \/NAMES list.\",\n\t})\n\n\treturn replies\n}\n\nfunc interestPart(s *Session, msg *irc.Message) bool {\n\t\/\/ Do send PART messages back to the sender (who, by now, is not in the\n\t\/\/ channel anymore).\n\treturn s.ircPrefix == *msg.Prefix || s.Channels[msg.Params[0]]\n}\n\nfunc cmdPart(s *Session, msg *irc.Message) interface{} {\n\t\/\/ TODO(secure): strictly speaking, RFC1459 says one can join multiple channels at once.\n\tchannel := msg.Params[0]\n\tdelete(s.Channels, channel)\n\treturn &irc.Message{\n\t\tPrefix: &s.ircPrefix,\n\t\tCommand: irc.PART,\n\t\tParams: []string{channel},\n\t}\n}\n\nfunc cmdQuit(s *Session, msg *irc.Message) interface{} {\n\treturn &irc.Message{\n\t\tPrefix: &s.ircPrefix,\n\t\tCommand: irc.QUIT,\n\t\tTrailing: msg.Trailing,\n\t}\n}\n\nfunc interestPrivmsg(s *Session, msg *irc.Message) bool {\n\t\/\/ Don’t send messages back to the sender.\n\tif s.ircPrefix == *msg.Prefix {\n\t\treturn false\n\t}\n\n\treturn commonChannelOrDirect(s, msg)\n}\n\nfunc cmdPrivmsg(s *Session, msg *irc.Message) interface{} {\n\tif len(msg.Params) < 1 {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_NORECIPIENT,\n\t\t\tParams: []string{s.Nick},\n\t\t\tTrailing: \"No recipient given (PRIVMSG)\",\n\t\t}\n\t}\n\n\tif msg.Trailing == \"\" {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_NOTEXTTOSEND,\n\t\t\tParams: []string{s.Nick},\n\t\t\tTrailing: \"No text to send\",\n\t\t}\n\t}\n\n\treturn &irc.Message{\n\t\tPrefix: &s.ircPrefix,\n\t\tCommand: irc.PRIVMSG,\n\t\tParams: []string{msg.Params[0]},\n\t\tTrailing: msg.Trailing,\n\t}\n}\n\nfunc cmdMode(s *Session, msg *irc.Message) interface{} {\n\tchannel := msg.Params[0]\n\t\/\/ TODO(secure): properly distinguish between users and channels\n\tif s.Channels[channel] {\n\t\tif len(msg.Params) > 1 && msg.Params[1] == \"b\" {\n\t\t\treturn &irc.Message{\n\t\t\t\tCommand: irc.RPL_ENDOFBANLIST,\n\t\t\t\tParams: []string{s.Nick, channel},\n\t\t\t\tTrailing: \"End of Channel Ban List\",\n\t\t\t}\n\t\t} else {\n\t\t\treturn &irc.Message{\n\t\t\t\tCommand: irc.RPL_CHANNELMODEIS,\n\t\t\t\tParams: []string{s.Nick, channel, \"+\"},\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif channel == s.Nick {\n\t\t\treturn &irc.Message{\n\t\t\t\tPrefix: &s.ircPrefix,\n\t\t\t\tCommand: irc.MODE,\n\t\t\t\tParams: []string{s.Nick},\n\t\t\t\tTrailing: \"+\",\n\t\t\t}\n\t\t} else {\n\t\t\treturn &irc.Message{\n\t\t\t\tCommand: irc.ERR_NOTONCHANNEL,\n\t\t\t\tParams: []string{s.Nick, channel},\n\t\t\t\tTrailing: \"You're not on that channel\",\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc cmdWho(s *Session, msg *irc.Message) interface{} {\n\tif len(msg.Params) < 1 {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.RPL_ENDOFWHO,\n\t\t\tParams: []string{s.Nick},\n\t\t\tTrailing: \"End of \/WHO list\",\n\t\t}\n\t}\n\n\tvar replies []*irc.Message\n\n\tchannel := msg.Params[0]\n\t\/\/ TODO(secure): a separate map for quick lookup may be worthwhile for big channels.\n\tfor _, session := range Sessions {\n\t\tif !session.Channels[channel] {\n\t\t\tcontinue\n\t\t}\n\t\tprefix := session.ircPrefix\n\t\treplies = append(replies, &irc.Message{\n\t\t\tCommand: irc.RPL_WHOREPLY,\n\t\t\tParams: []string{s.Nick, channel, prefix.User, prefix.Host, ServerPrefix.Name, prefix.Name, \"H\"},\n\t\t\tTrailing: \"0 Unknown\",\n\t\t})\n\t}\n\n\treplies = append(replies, &irc.Message{\n\t\tCommand: irc.RPL_ENDOFWHO,\n\t\tParams: []string{s.Nick, channel},\n\t\tTrailing: \"End of \/WHO list\",\n\t})\n\n\treturn replies\n}\n\nfunc cmdOper(s *Session, msg *irc.Message) interface{} {\n\t\/\/ TODO(secure): implement restriction to certain hosts once we have a\n\t\/\/ configuration file. (ERR_NOOPERHOST)\n\n\tif msg.Params[1] != NetworkPassword {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_PASSWDMISMATCH,\n\t\t\tParams: []string{s.Nick},\n\t\t\tTrailing: \"Password incorrect\",\n\t\t}\n\t}\n\n\ts.Operator = true\n\n\treturn &irc.Message{\n\t\tCommand: irc.RPL_YOUREOPER,\n\t\tParams: []string{s.Nick},\n\t\tTrailing: \"You are now an IRC operator\",\n\t}\n}\n\nfunc cmdKill(s *Session, msg *irc.Message) interface{} {\n\tif strings.TrimSpace(msg.Trailing) == \"\" {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_NEEDMOREPARAMS,\n\t\t\tParams: []string{s.Nick, msg.Command},\n\t\t\tTrailing: \"Not enough parameters\",\n\t\t}\n\t}\n\n\tif !s.Operator {\n\t\treturn &irc.Message{\n\t\t\tCommand: irc.ERR_NOPRIVILEGES,\n\t\t\tParams: []string{s.Nick},\n\t\t\tTrailing: \"Permission Denied - You're not an IRC operator\",\n\t\t}\n\t}\n\n\tfor _, session := range Sessions {\n\t\tif NickToLower(session.Nick) != NickToLower(msg.Params[0]) {\n\t\t\tcontinue\n\t\t}\n\n\t\tprefix := session.ircPrefix\n\t\tDeleteSession(session.Id)\n\t\treturn &irc.Message{\n\t\t\tPrefix: &prefix,\n\t\t\tCommand: irc.QUIT,\n\t\t\tTrailing: \"Killed by \" + s.Nick + \": \" + msg.Trailing,\n\t\t}\n\t}\n\n\treturn &irc.Message{\n\t\tCommand: irc.ERR_NOSUCHNICK,\n\t\tParams: []string{s.Nick, msg.Params[0]},\n\t\tTrailing: \"No such nick\/channel\",\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/grafana\/grafana\/pkg\/api\/cloudwatch\"\n\t\"github.com\/grafana\/grafana\/pkg\/bus\"\n\t\"github.com\/grafana\/grafana\/pkg\/log\"\n\t\"github.com\/grafana\/grafana\/pkg\/metrics\"\n\t\"github.com\/grafana\/grafana\/pkg\/middleware\"\n\tm \"github.com\/grafana\/grafana\/pkg\/models\"\n\t\"github.com\/grafana\/grafana\/pkg\/setting\"\n\t\"github.com\/grafana\/grafana\/pkg\/util\"\n)\n\nvar (\n\tauditLogger log.Logger = log.New(\"data-proxy-log\")\n)\n\nfunc NewReverseProxy(ds *m.DataSource, proxyPath string, targetUrl *url.URL) *httputil.ReverseProxy {\n\tdirector := func(req *http.Request) {\n\t\treq.URL.Scheme = targetUrl.Scheme\n\t\treq.URL.Host = targetUrl.Host\n\t\treq.Host = targetUrl.Host\n\n\t\treqQueryVals := req.URL.Query()\n\n\t\tif ds.Type == m.DS_INFLUXDB_08 {\n\t\t\treq.URL.Path = util.JoinUrlFragments(targetUrl.Path, \"db\/\"+ds.Database+\"\/\"+proxyPath)\n\t\t\treqQueryVals.Add(\"u\", ds.User)\n\t\t\treqQueryVals.Add(\"p\", ds.Password)\n\t\t\treq.URL.RawQuery = reqQueryVals.Encode()\n\t\t} else if ds.Type == m.DS_INFLUXDB {\n\t\t\treq.URL.Path = util.JoinUrlFragments(targetUrl.Path, proxyPath)\n\t\t\treq.URL.RawQuery = reqQueryVals.Encode()\n\t\t\tif !ds.BasicAuth {\n\t\t\t\treq.Header.Del(\"Authorization\")\n\t\t\t\treq.Header.Add(\"Authorization\", util.GetBasicAuthHeader(ds.User, ds.Password))\n\t\t\t}\n\t\t} else {\n\t\t\treq.URL.Path = util.JoinUrlFragments(targetUrl.Path, proxyPath)\n\t\t}\n\n\t\tif ds.BasicAuth {\n\t\t\treq.Header.Del(\"Authorization\")\n\t\t\treq.Header.Add(\"Authorization\", util.GetBasicAuthHeader(ds.BasicAuthUser, ds.BasicAuthPassword))\n\t\t}\n\n\t\tdsAuth := req.Header.Get(\"X-DS-Authorization\")\n\t\tif len(dsAuth) > 0 {\n\t\t\treq.Header.Del(\"X-DS-Authorization\")\n\t\t\treq.Header.Del(\"Authorization\")\n\t\t\treq.Header.Add(\"Authorization\", dsAuth)\n\t\t}\n\n\t\t\/\/ clear cookie headers\n\t\treq.Header.Del(\"Cookie\")\n\t\treq.Header.Del(\"Set-Cookie\")\n\t}\n\n\treturn &httputil.ReverseProxy{Director: director, FlushInterval: time.Millisecond * 200}\n}\n\nfunc getDatasource(id int64, orgId int64) (*m.DataSource, error) {\n\tquery := m.GetDataSourceByIdQuery{Id: id, OrgId: orgId}\n\tif err := bus.Dispatch(&query); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn query.Result, nil\n}\n\nfunc ProxyDataSourceRequest(c *middleware.Context) {\n\tc.TimeRequest(metrics.M_DataSource_ProxyReq_Timer)\n\n\tds, err := getDatasource(c.ParamsInt64(\":id\"), c.OrgId)\n\n\tif err != nil {\n\t\tc.JsonApiErr(500, \"Unable to load datasource meta data\", err)\n\t\treturn\n\t}\n\n\tif ds.Type == m.DS_CLOUDWATCH {\n\t\tcloudwatch.HandleRequest(c, ds)\n\t\treturn\n\t}\n\n\tif ds.Type == m.DS_INFLUXDB {\n\t\tif c.Query(\"db\") != ds.Database {\n\t\t\tc.JsonApiErr(403, \"Datasource is not configured to allow this database\", nil)\n\t\t\treturn\n\t\t}\n\t}\n\n\ttargetUrl, _ := url.Parse(ds.Url)\n\tif len(setting.DataProxyWhiteList) > 0 {\n\t\tif _, exists := setting.DataProxyWhiteList[targetUrl.Host]; !exists {\n\t\t\tc.JsonApiErr(403, \"Data proxy hostname and ip are not included in whitelist\", nil)\n\t\t\treturn\n\t\t}\n\t}\n\n\tproxyPath := c.Params(\"*\")\n\n\tif ds.Type == m.DS_ES {\n\t\tif c.Req.Request.Method == \"DELETE\" {\n\t\t\tc.JsonApiErr(403, \"Deletes not allowed on proxied Elasticsearch datasource\", nil)\n\t\t\treturn\n\t\t}\n\t\tif c.Req.Request.Method == \"PUT\" {\n\t\t\tc.JsonApiErr(403, \"Puts not allowed on proxied Elasticsearch datasource\", nil)\n\t\t\treturn\n\t\t}\n\t\tif c.Req.Request.Method == \"POST\" && proxyPath != \"_msearch\" {\n\t\t\tc.JsonApiErr(403, \"Posts not allowed on proxied Elasticsearch datasource except on \/_msearch\", nil)\n\t\t\treturn\n\t\t}\n\t}\n\n\tproxy := NewReverseProxy(ds, proxyPath, targetUrl)\n\tproxy.Transport, err = ds.GetHttpTransport()\n\tif err != nil {\n\t\tc.JsonApiErr(400, \"Unable to load TLS certificate\", err)\n\t\treturn\n\t}\n\n\tproxyLog(ds.Type, c)\n\n\tproxy.ServeHTTP(c.Resp, c.Req.Request)\n\tc.Resp.Header().Del(\"Set-Cookie\")\n}\n\nfunc proxyLog(dataSourceType string, c *middleware.Context) {\n\tif setting.DataProxyLogging {\n\n\t\tvar body string\n\t\tif c.Req.Request.Body != nil {\n\t\t\tbuffer, _ := ioutil.ReadAll(c.Req.Request.Body)\n\t\t\tc.Req.Request.Body = ioutil.NopCloser(bytes.NewBuffer(buffer))\n\t\t\tbody = string(buffer)\n\t\t}\n\n\t\tauditLogger.Info(\"Proxying incoming request\",\n\t\t\t\"userid\", c.UserId,\n\t\t\t\"orgid\", c.OrgId,\n\t\t\t\"username\", c.Login,\n\t\t\t\"datasource\", dataSourceType,\n\t\t\t\"uri\", c.Req.RequestURI,\n\t\t\t\"method\", c.Req.Request.Method,\n\t\t\t\"body\", body)\n\t}\n}\n<commit_msg>style(dataproxy): renames log functions<commit_after>package api\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/grafana\/grafana\/pkg\/api\/cloudwatch\"\n\t\"github.com\/grafana\/grafana\/pkg\/bus\"\n\t\"github.com\/grafana\/grafana\/pkg\/log\"\n\t\"github.com\/grafana\/grafana\/pkg\/metrics\"\n\t\"github.com\/grafana\/grafana\/pkg\/middleware\"\n\tm \"github.com\/grafana\/grafana\/pkg\/models\"\n\t\"github.com\/grafana\/grafana\/pkg\/setting\"\n\t\"github.com\/grafana\/grafana\/pkg\/util\"\n)\n\nvar (\n\tdataproxyLogger log.Logger = log.New(\"data-proxy-log\")\n)\n\nfunc NewReverseProxy(ds *m.DataSource, proxyPath string, targetUrl *url.URL) *httputil.ReverseProxy {\n\tdirector := func(req *http.Request) {\n\t\treq.URL.Scheme = targetUrl.Scheme\n\t\treq.URL.Host = targetUrl.Host\n\t\treq.Host = targetUrl.Host\n\n\t\treqQueryVals := req.URL.Query()\n\n\t\tif ds.Type == m.DS_INFLUXDB_08 {\n\t\t\treq.URL.Path = util.JoinUrlFragments(targetUrl.Path, \"db\/\"+ds.Database+\"\/\"+proxyPath)\n\t\t\treqQueryVals.Add(\"u\", ds.User)\n\t\t\treqQueryVals.Add(\"p\", ds.Password)\n\t\t\treq.URL.RawQuery = reqQueryVals.Encode()\n\t\t} else if ds.Type == m.DS_INFLUXDB {\n\t\t\treq.URL.Path = util.JoinUrlFragments(targetUrl.Path, proxyPath)\n\t\t\treq.URL.RawQuery = reqQueryVals.Encode()\n\t\t\tif !ds.BasicAuth {\n\t\t\t\treq.Header.Del(\"Authorization\")\n\t\t\t\treq.Header.Add(\"Authorization\", util.GetBasicAuthHeader(ds.User, ds.Password))\n\t\t\t}\n\t\t} else {\n\t\t\treq.URL.Path = util.JoinUrlFragments(targetUrl.Path, proxyPath)\n\t\t}\n\n\t\tif ds.BasicAuth {\n\t\t\treq.Header.Del(\"Authorization\")\n\t\t\treq.Header.Add(\"Authorization\", util.GetBasicAuthHeader(ds.BasicAuthUser, ds.BasicAuthPassword))\n\t\t}\n\n\t\tdsAuth := req.Header.Get(\"X-DS-Authorization\")\n\t\tif len(dsAuth) > 0 {\n\t\t\treq.Header.Del(\"X-DS-Authorization\")\n\t\t\treq.Header.Del(\"Authorization\")\n\t\t\treq.Header.Add(\"Authorization\", dsAuth)\n\t\t}\n\n\t\t\/\/ clear cookie headers\n\t\treq.Header.Del(\"Cookie\")\n\t\treq.Header.Del(\"Set-Cookie\")\n\t}\n\n\treturn &httputil.ReverseProxy{Director: director, FlushInterval: time.Millisecond * 200}\n}\n\nfunc getDatasource(id int64, orgId int64) (*m.DataSource, error) {\n\tquery := m.GetDataSourceByIdQuery{Id: id, OrgId: orgId}\n\tif err := bus.Dispatch(&query); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn query.Result, nil\n}\n\nfunc ProxyDataSourceRequest(c *middleware.Context) {\n\tc.TimeRequest(metrics.M_DataSource_ProxyReq_Timer)\n\n\tds, err := getDatasource(c.ParamsInt64(\":id\"), c.OrgId)\n\n\tif err != nil {\n\t\tc.JsonApiErr(500, \"Unable to load datasource meta data\", err)\n\t\treturn\n\t}\n\n\tif ds.Type == m.DS_CLOUDWATCH {\n\t\tcloudwatch.HandleRequest(c, ds)\n\t\treturn\n\t}\n\n\tif ds.Type == m.DS_INFLUXDB {\n\t\tif c.Query(\"db\") != ds.Database {\n\t\t\tc.JsonApiErr(403, \"Datasource is not configured to allow this database\", nil)\n\t\t\treturn\n\t\t}\n\t}\n\n\ttargetUrl, _ := url.Parse(ds.Url)\n\tif len(setting.DataProxyWhiteList) > 0 {\n\t\tif _, exists := setting.DataProxyWhiteList[targetUrl.Host]; !exists {\n\t\t\tc.JsonApiErr(403, \"Data proxy hostname and ip are not included in whitelist\", nil)\n\t\t\treturn\n\t\t}\n\t}\n\n\tproxyPath := c.Params(\"*\")\n\n\tif ds.Type == m.DS_ES {\n\t\tif c.Req.Request.Method == \"DELETE\" {\n\t\t\tc.JsonApiErr(403, \"Deletes not allowed on proxied Elasticsearch datasource\", nil)\n\t\t\treturn\n\t\t}\n\t\tif c.Req.Request.Method == \"PUT\" {\n\t\t\tc.JsonApiErr(403, \"Puts not allowed on proxied Elasticsearch datasource\", nil)\n\t\t\treturn\n\t\t}\n\t\tif c.Req.Request.Method == \"POST\" && proxyPath != \"_msearch\" {\n\t\t\tc.JsonApiErr(403, \"Posts not allowed on proxied Elasticsearch datasource except on \/_msearch\", nil)\n\t\t\treturn\n\t\t}\n\t}\n\n\tproxy := NewReverseProxy(ds, proxyPath, targetUrl)\n\tproxy.Transport, err = ds.GetHttpTransport()\n\tif err != nil {\n\t\tc.JsonApiErr(400, \"Unable to load TLS certificate\", err)\n\t\treturn\n\t}\n\n\tlogProxyRequest(ds.Type, c)\n\tproxy.ServeHTTP(c.Resp, c.Req.Request)\n\tc.Resp.Header().Del(\"Set-Cookie\")\n}\n\nfunc logProxyRequest(dataSourceType string, c *middleware.Context) {\n\tif !setting.DataProxyLogging {\n\t\treturn\n\t}\n\n\t\tvar body string\n\t\tif c.Req.Request.Body != nil {\n\t\t\tbuffer, _ := ioutil.ReadAll(c.Req.Request.Body)\n\t\t\tc.Req.Request.Body = ioutil.NopCloser(bytes.NewBuffer(buffer))\n\t\t\tbody = string(buffer)\n\t\t}\n\t}\n\n\tdataproxyLogger.Info(\"Proxying incoming request\",\n\t\t\"userid\", c.UserId,\n\t\t\"orgid\", c.OrgId,\n\t\t\"username\", c.Login,\n\t\t\"datasource\", dataSourceType,\n\t\t\"uri\", c.Req.RequestURI,\n\t\t\"method\", c.Req.Request.Method,\n\t\t\"body\", body)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright Istio Authors. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage build\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\n\t\"istio.io\/release-builder\/pkg\/model\"\n\t\"istio.io\/release-builder\/pkg\/util\"\n)\n\n\/\/ Archive creates the release archive that users will download. This includes the installation templates,\n\/\/ istioctl, and various tools.\nfunc Archive(manifest model.Manifest) error {\n\t\/\/ First, build all variants of istioctl (linux, osx, windows)\n\tif err := util.RunMake(manifest, \"istio\", nil, \"istioctl-all\", \"istioctl.completion\"); err != nil {\n\t\treturn fmt.Errorf(\"failed to make istioctl: %v\", err)\n\t}\n\t\/\/ Generate the demo rendered yaml file\n\tif err := util.RunMake(manifest, \"istio\", nil, \"istio-demo.yaml\"); err != nil {\n\t\treturn fmt.Errorf(\"failed to make istio-demo.yaml: %v\", err)\n\t}\n\n\t\/\/ We build archives for each arch. These contain the same thing except arch specific istioctl\n\tfor _, arch := range []string{\"linux\", \"osx\", \"win\"} {\n\t\tout := path.Join(manifest.Directory, \"work\", \"archive\", arch, fmt.Sprintf(\"istio-%s\", manifest.Version))\n\t\tif err := os.MkdirAll(out, 0750); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Some files we just directly copy into the release archive\n\t\tdirectCopies := []string{\n\t\t\t\"LICENSE\",\n\t\t\t\"README.md\",\n\n\t\t\t\/\/ Setup tools. The tools\/ folder contains a bunch of extra junk, so just select exactly what we want\n\t\t\t\"tools\/convert_RbacConfig_to_ClusterRbacConfig.sh\",\n\t\t\t\"tools\/dump_kubernetes.sh\",\n\t\t}\n\t\tfor _, file := range directCopies {\n\t\t\tif err := util.CopyFile(path.Join(manifest.RepoDir(\"istio\"), file), path.Join(out, file)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Set up install and samples. We filter down to only some file patterns\n\t\t\/\/ TODO - clean this up. We probably include files we don't want and exclude files we do want.\n\t\tincludePatterns := []string{\"*.yaml\", \"*.md\", \"cleanup.sh\", \"*.txt\", \"*.pem\", \"*.conf\", \"*.tpl\", \"*.json\"}\n\t\tif err := util.CopyDirFiltered(path.Join(manifest.RepoDir(\"istio\"), \"samples\"), path.Join(out, \"samples\"), includePatterns); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := util.CopyDirFiltered(path.Join(manifest.RepoDir(\"istio\"), \"install\"), path.Join(out, \"install\"), includePatterns); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcniSource := path.Join(manifest.RepoDir(\"cni\"), \"deployments\/kubernetes\/install\/helm\/istio-cni\")\n\t\tif err := util.CopyDirFiltered(cniSource, path.Join(out, \"install\/kubernetes\/helm\"), includePatterns); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcmd := util.VerboseCommand(\".\/release\/create_release_charts.sh\", \"-o\", path.Join(out, \"install\/kubernetes\/operator\"))\n\t\tcmd.Dir = manifest.RepoDir(\"operator\")\n\t\tcmd.Env = util.StandardEnv(manifest)\n\t\tif err := cmd.Run(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sanitizeTemplate(manifest, path.Join(out, \"install\/kubernetes\/operator\/profiles\/default.yaml\")); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to sanitize charts\")\n\t\t}\n\t\tif err := util.CopyDir(path.Join(manifest.RepoDir(\"operator\"), \"deploy\"), path.Join(out, \"install\/kubernetes\/operator\/deploy\")); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sanitizeTemplate(manifest, path.Join(out, \"install\/kubernetes\/operator\/deploy\/operator.yaml\")); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to sanitize operator manifest\")\n\t\t}\n\t\tif err := util.CopyDir(path.Join(manifest.RepoDir(\"operator\"), \"samples\"), path.Join(out, \"samples\/operator\")); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Write manifest\n\t\tif err := writeManifest(manifest, out); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write manifest: %v\", err)\n\t\t}\n\n\t\t\/\/ Copy the istioctl binary over\n\t\tistioctlBinary := fmt.Sprintf(\"istioctl-%s\", arch)\n\t\tistioctlDest := \"istioctl\"\n\t\tif arch == \"win\" {\n\t\t\tistioctlBinary += \".exe\"\n\t\t\tistioctlDest += \".exe\"\n\t\t}\n\t\tif err := util.CopyFile(path.Join(manifest.GoOutDir(), istioctlBinary), path.Join(out, \"bin\", istioctlDest)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := os.Chmod(path.Join(out, \"bin\", istioctlDest), 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Copy the istioctl completions files to the tools directory\n\t\tcompletionFiles := []string{\"istioctl.bash\", \"_istioctl\"}\n\t\tfor _, file := range completionFiles {\n\t\t\tif err := util.CopyFile(path.Join(manifest.GoOutDir(), file), path.Join(out, \"tools\", file)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif err := createArchive(arch, manifest, out); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := createStandaloneIstioctl(arch, manifest, out); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc createStandaloneIstioctl(arch string, manifest model.Manifest, out string) error {\n\tvar istioctlArchive string\n\t\/\/ Create a stand alone archive for istioctl\n\t\/\/ Windows should use zip, linux and osx tar\n\tif arch == \"win\" {\n\t\tistioctlArchive = fmt.Sprintf(\"istioctl-%s-%s.zip\", manifest.Version, arch)\n\t\tif err := util.ZipFolder(path.Join(out, \"bin\", \"istioctl.exe\"), path.Join(out, \"bin\", istioctlArchive)); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to zip istioctl: %v\", err)\n\t\t}\n\t} else {\n\t\tistioctlArchive = fmt.Sprintf(\"istioctl-%s-%s.tar.gz\", manifest.Version, arch)\n\t\ticmd := util.VerboseCommand(\"tar\", \"-czf\", istioctlArchive, fmt.Sprintf(\"istioctl\"))\n\t\ticmd.Dir = path.Join(out, \"bin\")\n\t\tif err := icmd.Run(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to tar istioctl: %v\", err)\n\t\t}\n\t}\n\t\/\/ Copy files over to the output directory\n\tarchivePath := path.Join(out, \"bin\", istioctlArchive)\n\tdest := path.Join(manifest.OutDir(), istioctlArchive)\n\tif err := util.CopyFile(archivePath, dest); err != nil {\n\t\treturn fmt.Errorf(\"failed to package %v release archive: %v\", arch, err)\n\t}\n\n\t\/\/ Create a SHA of the archive\n\tif err := util.CreateSha(dest); err != nil {\n\t\treturn fmt.Errorf(\"failed to package %v: %v\", dest, err)\n\t}\n\treturn nil\n}\n\nfunc createArchive(arch string, manifest model.Manifest, out string) error {\n\tvar archive string\n\t\/\/ Create the archive from all the above files\n\t\/\/ Windows should use zip, linux and osx tar\n\tif arch == \"win\" {\n\t\tarchive = fmt.Sprintf(\"istio-%s-%s.zip\", manifest.Version, arch)\n\t\tif err := util.ZipFolder(path.Join(out, \"..\", fmt.Sprintf(\"istio-%s\", manifest.Version)), path.Join(out, \"..\", archive)); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to zip istioctl: %v\", err)\n\t\t}\n\t} else {\n\t\tarchive = fmt.Sprintf(\"istio-%s-%s.tar.gz\", manifest.Version, arch)\n\t\tcmd := util.VerboseCommand(\"tar\", \"-czf\", archive, fmt.Sprintf(\"istio-%s\", manifest.Version))\n\t\tcmd.Dir = path.Join(out, \"..\")\n\t\tif err := cmd.Run(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Copy files over to the output directory\n\tarchivePath := path.Join(manifest.WorkDir(), \"archive\", arch, archive)\n\tdest := path.Join(manifest.OutDir(), archive)\n\tif err := util.CopyFile(archivePath, dest); err != nil {\n\t\treturn fmt.Errorf(\"failed to package %v release archive: %v\", arch, err)\n\t}\n\t\/\/ Create a SHA of the archive\n\tif err := util.CreateSha(dest); err != nil {\n\t\treturn fmt.Errorf(\"failed to package %v: %v\", dest, err)\n\t}\n\treturn nil\n}\n<commit_msg>Package istioctl in new makefile model (#106)<commit_after>\/\/ Copyright Istio Authors. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage build\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\n\t\"istio.io\/release-builder\/pkg\/model\"\n\t\"istio.io\/release-builder\/pkg\/util\"\n)\n\n\/\/ Archive creates the release archive that users will download. This includes the installation templates,\n\/\/ istioctl, and various tools.\nfunc Archive(manifest model.Manifest) error {\n\t\/\/ First, build all variants of istioctl (linux, osx, windows)\n\tif err := util.RunMake(manifest, \"istio\", nil, \"istioctl-all\", \"istioctl.completion\"); err != nil {\n\t\treturn fmt.Errorf(\"failed to make istioctl: %v\", err)\n\t}\n\t\/\/ Generate the demo rendered yaml file\n\tif err := util.RunMake(manifest, \"istio\", nil, \"istio-demo.yaml\"); err != nil {\n\t\treturn fmt.Errorf(\"failed to make istio-demo.yaml: %v\", err)\n\t}\n\n\t\/\/ We build archives for each arch. These contain the same thing except arch specific istioctl\n\tfor _, arch := range []string{\"linux\", \"osx\", \"win\"} {\n\t\tout := path.Join(manifest.Directory, \"work\", \"archive\", arch, fmt.Sprintf(\"istio-%s\", manifest.Version))\n\t\tif err := os.MkdirAll(out, 0750); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Some files we just directly copy into the release archive\n\t\tdirectCopies := []string{\n\t\t\t\"LICENSE\",\n\t\t\t\"README.md\",\n\n\t\t\t\/\/ Setup tools. The tools\/ folder contains a bunch of extra junk, so just select exactly what we want\n\t\t\t\"tools\/convert_RbacConfig_to_ClusterRbacConfig.sh\",\n\t\t\t\"tools\/dump_kubernetes.sh\",\n\t\t}\n\t\tfor _, file := range directCopies {\n\t\t\tif err := util.CopyFile(path.Join(manifest.RepoDir(\"istio\"), file), path.Join(out, file)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Set up install and samples. We filter down to only some file patterns\n\t\t\/\/ TODO - clean this up. We probably include files we don't want and exclude files we do want.\n\t\tincludePatterns := []string{\"*.yaml\", \"*.md\", \"cleanup.sh\", \"*.txt\", \"*.pem\", \"*.conf\", \"*.tpl\", \"*.json\"}\n\t\tif err := util.CopyDirFiltered(path.Join(manifest.RepoDir(\"istio\"), \"samples\"), path.Join(out, \"samples\"), includePatterns); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := util.CopyDirFiltered(path.Join(manifest.RepoDir(\"istio\"), \"install\"), path.Join(out, \"install\"), includePatterns); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcniSource := path.Join(manifest.RepoDir(\"cni\"), \"deployments\/kubernetes\/install\/helm\/istio-cni\")\n\t\tif err := util.CopyDirFiltered(cniSource, path.Join(out, \"install\/kubernetes\/helm\"), includePatterns); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcmd := util.VerboseCommand(\".\/release\/create_release_charts.sh\", \"-o\", path.Join(out, \"install\/kubernetes\/operator\"))\n\t\tcmd.Dir = manifest.RepoDir(\"operator\")\n\t\tcmd.Env = util.StandardEnv(manifest)\n\t\tif err := cmd.Run(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sanitizeTemplate(manifest, path.Join(out, \"install\/kubernetes\/operator\/profiles\/default.yaml\")); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to sanitize charts\")\n\t\t}\n\t\tif err := util.CopyDir(path.Join(manifest.RepoDir(\"operator\"), \"deploy\"), path.Join(out, \"install\/kubernetes\/operator\/deploy\")); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sanitizeTemplate(manifest, path.Join(out, \"install\/kubernetes\/operator\/deploy\/operator.yaml\")); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to sanitize operator manifest\")\n\t\t}\n\t\tif err := util.CopyDir(path.Join(manifest.RepoDir(\"operator\"), \"samples\"), path.Join(out, \"samples\/operator\")); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Write manifest\n\t\tif err := writeManifest(manifest, out); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write manifest: %v\", err)\n\t\t}\n\n\t\t\/\/ Copy the istioctl binary over\n\t\tistioctlBinary := fmt.Sprintf(\"istioctl-%s\", arch)\n\t\tistioctlDest := \"istioctl\"\n\t\tif arch == \"win\" {\n\t\t\tistioctlBinary += \".exe\"\n\t\t\tistioctlDest += \".exe\"\n\t\t}\n\t\tif err := util.CopyFile(path.Join(manifest.RepoOutDir(\"istio\"), istioctlBinary), path.Join(out, \"bin\", istioctlDest)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := os.Chmod(path.Join(out, \"bin\", istioctlDest), 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Copy the istioctl completions files to the tools directory\n\t\tcompletionFiles := []string{\"istioctl.bash\", \"_istioctl\"}\n\t\tfor _, file := range completionFiles {\n\t\t\tif err := util.CopyFile(path.Join(manifest.GoOutDir(), file), path.Join(out, \"tools\", file)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif err := createArchive(arch, manifest, out); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := createStandaloneIstioctl(arch, manifest, out); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc createStandaloneIstioctl(arch string, manifest model.Manifest, out string) error {\n\tvar istioctlArchive string\n\t\/\/ Create a stand alone archive for istioctl\n\t\/\/ Windows should use zip, linux and osx tar\n\tif arch == \"win\" {\n\t\tistioctlArchive = fmt.Sprintf(\"istioctl-%s-%s.zip\", manifest.Version, arch)\n\t\tif err := util.ZipFolder(path.Join(out, \"bin\", \"istioctl.exe\"), path.Join(out, \"bin\", istioctlArchive)); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to zip istioctl: %v\", err)\n\t\t}\n\t} else {\n\t\tistioctlArchive = fmt.Sprintf(\"istioctl-%s-%s.tar.gz\", manifest.Version, arch)\n\t\ticmd := util.VerboseCommand(\"tar\", \"-czf\", istioctlArchive, fmt.Sprintf(\"istioctl\"))\n\t\ticmd.Dir = path.Join(out, \"bin\")\n\t\tif err := icmd.Run(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to tar istioctl: %v\", err)\n\t\t}\n\t}\n\t\/\/ Copy files over to the output directory\n\tarchivePath := path.Join(out, \"bin\", istioctlArchive)\n\tdest := path.Join(manifest.OutDir(), istioctlArchive)\n\tif err := util.CopyFile(archivePath, dest); err != nil {\n\t\treturn fmt.Errorf(\"failed to package %v release archive: %v\", arch, err)\n\t}\n\n\t\/\/ Create a SHA of the archive\n\tif err := util.CreateSha(dest); err != nil {\n\t\treturn fmt.Errorf(\"failed to package %v: %v\", dest, err)\n\t}\n\treturn nil\n}\n\nfunc createArchive(arch string, manifest model.Manifest, out string) error {\n\tvar archive string\n\t\/\/ Create the archive from all the above files\n\t\/\/ Windows should use zip, linux and osx tar\n\tif arch == \"win\" {\n\t\tarchive = fmt.Sprintf(\"istio-%s-%s.zip\", manifest.Version, arch)\n\t\tif err := util.ZipFolder(path.Join(out, \"..\", fmt.Sprintf(\"istio-%s\", manifest.Version)), path.Join(out, \"..\", archive)); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to zip istioctl: %v\", err)\n\t\t}\n\t} else {\n\t\tarchive = fmt.Sprintf(\"istio-%s-%s.tar.gz\", manifest.Version, arch)\n\t\tcmd := util.VerboseCommand(\"tar\", \"-czf\", archive, fmt.Sprintf(\"istio-%s\", manifest.Version))\n\t\tcmd.Dir = path.Join(out, \"..\")\n\t\tif err := cmd.Run(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Copy files over to the output directory\n\tarchivePath := path.Join(manifest.WorkDir(), \"archive\", arch, archive)\n\tdest := path.Join(manifest.OutDir(), archive)\n\tif err := util.CopyFile(archivePath, dest); err != nil {\n\t\treturn fmt.Errorf(\"failed to package %v release archive: %v\", arch, err)\n\t}\n\t\/\/ Create a SHA of the archive\n\tif err := util.CreateSha(dest); err != nil {\n\t\treturn fmt.Errorf(\"failed to package %v: %v\", dest, err)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package build\n\nimport (\n\t\"github.com\/openshift\/source-to-image\/pkg\/api\"\n\t\"github.com\/openshift\/source-to-image\/pkg\/docker\"\n\t\"github.com\/openshift\/source-to-image\/pkg\/util\"\n\tutilglog \"github.com\/openshift\/source-to-image\/pkg\/util\/glog\"\n)\n\nvar glog = utilglog.StderrLog\n\n\/\/ DefaultCleaner provides a cleaner for most STI build use-cases. It cleans the\n\/\/ temporary directories created by STI build and it also cleans the temporary\n\/\/ Docker images produced by LayeredBuild\ntype DefaultCleaner struct {\n\tfs util.FileSystem\n\tdocker docker.Docker\n}\n\n\/\/ NewDefaultCleaner creates a new instance of the default Cleaner implementation\nfunc NewDefaultCleaner(fs util.FileSystem, docker docker.Docker) Cleaner {\n\treturn &DefaultCleaner{\n\t\tfs: fs,\n\t\tdocker: docker,\n\t}\n}\n\n\/\/ Cleanup removes the temporary directories where the sources were stored for build.\nfunc (c *DefaultCleaner) Cleanup(config *api.Config) {\n\tif config.PreserveWorkingDir {\n\t\tglog.Infof(\"Temporary directory %q will be saved, not deleted\", config.WorkingDir)\n\t} else {\n\t\tglog.V(2).Infof(\"Removing temporary directory %s\", config.WorkingDir)\n\t\tif err := c.fs.RemoveDirectory(config.WorkingDir); err != nil {\n\t\t\tglog.Warningf(\"Error removing temporary directory %q: %v\", config.WorkingDir, err)\n\t\t}\n\t}\n\tif config.LayeredBuild {\n\t\t\/\/ config.LayeredBuild is true only when layered build was finished successfully.\n\t\t\/\/ Also in this case config.BuilderImage contains name of the new just built image,\n\t\t\/\/ not the original one that was specified by the user.\n\t\tglog.V(2).Infof(\"Removing temporary image %s\", config.BuilderImage)\n\t\tif err := c.docker.RemoveImage(config.BuilderImage); err != nil {\n\t\t\tglog.Warningf(\"Error removing temporary image %s: %v\", config.BuilderImage, err)\n\t\t}\n\t}\n}\n<commit_msg>reduce loglevel of temp directory cleanup message<commit_after>package build\n\nimport (\n\t\"github.com\/openshift\/source-to-image\/pkg\/api\"\n\t\"github.com\/openshift\/source-to-image\/pkg\/docker\"\n\t\"github.com\/openshift\/source-to-image\/pkg\/util\"\n\tutilglog \"github.com\/openshift\/source-to-image\/pkg\/util\/glog\"\n)\n\nvar glog = utilglog.StderrLog\n\n\/\/ DefaultCleaner provides a cleaner for most STI build use-cases. It cleans the\n\/\/ temporary directories created by STI build and it also cleans the temporary\n\/\/ Docker images produced by LayeredBuild\ntype DefaultCleaner struct {\n\tfs util.FileSystem\n\tdocker docker.Docker\n}\n\n\/\/ NewDefaultCleaner creates a new instance of the default Cleaner implementation\nfunc NewDefaultCleaner(fs util.FileSystem, docker docker.Docker) Cleaner {\n\treturn &DefaultCleaner{\n\t\tfs: fs,\n\t\tdocker: docker,\n\t}\n}\n\n\/\/ Cleanup removes the temporary directories where the sources were stored for build.\nfunc (c *DefaultCleaner) Cleanup(config *api.Config) {\n\tif config.PreserveWorkingDir {\n\t\tglog.V(2).Infof(\"Temporary directory %q will be saved, not deleted\", config.WorkingDir)\n\t} else {\n\t\tglog.V(2).Infof(\"Removing temporary directory %s\", config.WorkingDir)\n\t\tif err := c.fs.RemoveDirectory(config.WorkingDir); err != nil {\n\t\t\tglog.Warningf(\"Error removing temporary directory %q: %v\", config.WorkingDir, err)\n\t\t}\n\t}\n\tif config.LayeredBuild {\n\t\t\/\/ config.LayeredBuild is true only when layered build was finished successfully.\n\t\t\/\/ Also in this case config.BuilderImage contains name of the new just built image,\n\t\t\/\/ not the original one that was specified by the user.\n\t\tglog.V(2).Infof(\"Removing temporary image %s\", config.BuilderImage)\n\t\tif err := c.docker.RemoveImage(config.BuilderImage); err != nil {\n\t\t\tglog.Warningf(\"Error removing temporary image %s: %v\", config.BuilderImage, err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\npackage build\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/google\/syzkaller\/pkg\/osutil\"\n)\n\ntype freebsd struct{}\n\nfunc (ctx freebsd) build(params *Params) error {\n\tconfDir := fmt.Sprintf(\"%v\/sys\/%v\/conf\/\", params.KernelDir, params.TargetArch)\n\tconfFile := \"SYZKALLER\"\n\n\tconfig := params.Config\n\tif config == nil {\n\t\tconfig = []byte(`\ninclude \".\/GENERIC\"\n\nident\t\tSYZKALLER\noptions \tCOVERAGE\noptions \tKCOV\n\noptions \tTCPHPTS\noptions \tRATELIMIT\n\noptions \tDEBUG_VFS_LOCKS\noptions \tDIAGNOSTIC\noptions \tDEBUG_REDZONE\n`)\n\t}\n\tif err := osutil.WriteFile(filepath.Join(confDir, confFile), config); err != nil {\n\t\treturn err\n\t}\n\n\tobjPrefix := filepath.Join(params.KernelDir, \"obj\")\n\tif err := ctx.make(params.KernelDir, objPrefix, \"kernel-toolchain\", \"-DNO_CLEAN\"); err != nil {\n\t\treturn err\n\t}\n\tif err := ctx.make(params.KernelDir, objPrefix, \"buildkernel\", \"WITH_EXTRA_TCP_STACKS=\",\n\t\tfmt.Sprintf(\"KERNCONF=%v\", confFile)); err != nil {\n\t\treturn err\n\t}\n\n\tkernelObjDir := filepath.Join(objPrefix, params.KernelDir,\n\t\tfmt.Sprintf(\"%v.%v\", params.TargetArch, params.TargetArch), \"sys\", confFile)\n\tfor _, s := range []struct{ dir, src, dst string }{\n\t\t{params.UserspaceDir, \"image\", \"image\"},\n\t\t{params.UserspaceDir, \"key\", \"key\"},\n\t\t{kernelObjDir, \"kernel.full\", \"obj\/kernel.full\"},\n\t} {\n\t\tfullSrc := filepath.Join(s.dir, s.src)\n\t\tfullDst := filepath.Join(params.OutputDir, s.dst)\n\t\tif err := osutil.CopyFile(fullSrc, fullDst); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to copy %v -> %v: %v\", fullSrc, fullDst, err)\n\t\t}\n\t}\n\n\tscript := fmt.Sprintf(`\nset -eux\nmd=$(sudo mdconfig -a -t vnode image)\npartn=$(gpart show \/dev\/${md} | awk '\/freebsd-ufs\/{print $3}' | head -n 1)\ntmpdir=$(mktemp -d)\nsudo mount \/dev\/${md}p${partn} $tmpdir\n\nsudo MAKEOBJDIRPREFIX=%s make -C %s installkernel WITH_EXTRA_TCP_STACKS= KERNCONF=%s DESTDIR=$tmpdir\n\ncat | sudo tee ${tmpdir}\/boot\/loader.conf.local <<__EOF__\nipsec_load=\"YES\"\npf_load=\"YES\"\nsctp_load=\"YES\"\ntcp_bbr_load=\"YES\"\ntcp_rack_load=\"YES\"\nsem_load=\"YES\"\nmqueuefs_load=\"YES\"\ncryptodev_load=\"YES\"\ncc_cdg_load=\"YES\"\ncc_chd_load=\"YES\"\ncc_cubic_load=\"YES\"\ncc_dctcp_load=\"YES\"\ncc_hd_load=\"YES\"\ncc_htcp_load=\"YES\"\ncc_vegas_load=\"YES\"\n\nkern.ipc.tls.enable=\"1\"\nvm.panic_on_oom=\"1\"\n__EOF__\n\ncat | sudo tee -a ${tmpdir}\/etc\/sysctl.conf <<__EOF__\nnet.inet.sctp.udp_tunneling_port=9899\nnet.inet.tcp.udp_tunneling_port=9811\nvm.redzone.panic=1\n__EOF__\n\nsudo umount $tmpdir\nsudo mdconfig -d -u ${md#md}\n`, objPrefix, params.KernelDir, confFile)\n\n\tif debugOut, err := osutil.RunCmd(10*time.Minute, params.OutputDir, \"\/bin\/sh\", \"-c\", script); err != nil {\n\t\treturn fmt.Errorf(\"error copying kernel: %v\\n%v\", err, debugOut)\n\t}\n\treturn nil\n}\n\nfunc (ctx freebsd) clean(kernelDir, targetArch string) error {\n\tobjPrefix := filepath.Join(kernelDir, \"obj\")\n\treturn ctx.make(kernelDir, objPrefix, \"cleanworld\")\n}\n\nfunc (ctx freebsd) make(kernelDir, objPrefix string, makeArgs ...string) error {\n\targs := append([]string{\n\t\tfmt.Sprintf(\"MAKEOBJDIRPREFIX=%v\", objPrefix),\n\t\t\"make\",\n\t\t\"-C\", kernelDir,\n\t\t\"-j\", strconv.Itoa(runtime.NumCPU()),\n\t}, makeArgs...)\n\t_, err := osutil.RunCmd(3*time.Hour, kernelDir, \"sh\", \"-c\", strings.Join(args, \" \"))\n\treturn err\n}\n<commit_msg>pkg\/build: enable KASAN in the default FreeBSD kernel config<commit_after>\/\/ Copyright 2019 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\npackage build\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/google\/syzkaller\/pkg\/osutil\"\n)\n\ntype freebsd struct{}\n\nfunc (ctx freebsd) build(params *Params) error {\n\tconfDir := fmt.Sprintf(\"%v\/sys\/%v\/conf\/\", params.KernelDir, params.TargetArch)\n\tconfFile := \"SYZKALLER\"\n\n\tconfig := params.Config\n\tif config == nil {\n\t\tconfig = []byte(`\ninclude \".\/GENERIC\"\n\nident\t\tSYZKALLER\noptions \tCOVERAGE\noptions \tKCOV\noptions \tKASAN\n\noptions \tTCPHPTS\noptions \tRATELIMIT\n\noptions \tDEBUG_VFS_LOCKS\noptions \tDIAGNOSTIC\noptions \tDEBUG_REDZONE\n`)\n\t}\n\tif err := osutil.WriteFile(filepath.Join(confDir, confFile), config); err != nil {\n\t\treturn err\n\t}\n\n\tobjPrefix := filepath.Join(params.KernelDir, \"obj\")\n\tif err := ctx.make(params.KernelDir, objPrefix, \"kernel-toolchain\", \"-DNO_CLEAN\"); err != nil {\n\t\treturn err\n\t}\n\tif err := ctx.make(params.KernelDir, objPrefix, \"buildkernel\", \"WITH_EXTRA_TCP_STACKS=\",\n\t\tfmt.Sprintf(\"KERNCONF=%v\", confFile)); err != nil {\n\t\treturn err\n\t}\n\n\tkernelObjDir := filepath.Join(objPrefix, params.KernelDir,\n\t\tfmt.Sprintf(\"%v.%v\", params.TargetArch, params.TargetArch), \"sys\", confFile)\n\tfor _, s := range []struct{ dir, src, dst string }{\n\t\t{params.UserspaceDir, \"image\", \"image\"},\n\t\t{params.UserspaceDir, \"key\", \"key\"},\n\t\t{kernelObjDir, \"kernel.full\", \"obj\/kernel.full\"},\n\t} {\n\t\tfullSrc := filepath.Join(s.dir, s.src)\n\t\tfullDst := filepath.Join(params.OutputDir, s.dst)\n\t\tif err := osutil.CopyFile(fullSrc, fullDst); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to copy %v -> %v: %v\", fullSrc, fullDst, err)\n\t\t}\n\t}\n\n\tscript := fmt.Sprintf(`\nset -eux\nmd=$(sudo mdconfig -a -t vnode image)\npartn=$(gpart show \/dev\/${md} | awk '\/freebsd-ufs\/{print $3}' | head -n 1)\ntmpdir=$(mktemp -d)\nsudo mount \/dev\/${md}p${partn} $tmpdir\n\nsudo MAKEOBJDIRPREFIX=%s make -C %s installkernel WITH_EXTRA_TCP_STACKS= KERNCONF=%s DESTDIR=$tmpdir\n\ncat | sudo tee ${tmpdir}\/boot\/loader.conf.local <<__EOF__\nipsec_load=\"YES\"\npf_load=\"YES\"\nsctp_load=\"YES\"\ntcp_bbr_load=\"YES\"\ntcp_rack_load=\"YES\"\nsem_load=\"YES\"\nmqueuefs_load=\"YES\"\ncryptodev_load=\"YES\"\ncc_cdg_load=\"YES\"\ncc_chd_load=\"YES\"\ncc_cubic_load=\"YES\"\ncc_dctcp_load=\"YES\"\ncc_hd_load=\"YES\"\ncc_htcp_load=\"YES\"\ncc_vegas_load=\"YES\"\n\nkern.ipc.tls.enable=\"1\"\nvm.panic_on_oom=\"1\"\n__EOF__\n\ncat | sudo tee -a ${tmpdir}\/etc\/sysctl.conf <<__EOF__\nnet.inet.sctp.udp_tunneling_port=9899\nnet.inet.tcp.udp_tunneling_port=9811\nvm.redzone.panic=1\n__EOF__\n\nsudo umount $tmpdir\nsudo mdconfig -d -u ${md#md}\n`, objPrefix, params.KernelDir, confFile)\n\n\tif debugOut, err := osutil.RunCmd(10*time.Minute, params.OutputDir, \"\/bin\/sh\", \"-c\", script); err != nil {\n\t\treturn fmt.Errorf(\"error copying kernel: %v\\n%v\", err, debugOut)\n\t}\n\treturn nil\n}\n\nfunc (ctx freebsd) clean(kernelDir, targetArch string) error {\n\tobjPrefix := filepath.Join(kernelDir, \"obj\")\n\treturn ctx.make(kernelDir, objPrefix, \"cleanworld\")\n}\n\nfunc (ctx freebsd) make(kernelDir, objPrefix string, makeArgs ...string) error {\n\targs := append([]string{\n\t\tfmt.Sprintf(\"MAKEOBJDIRPREFIX=%v\", objPrefix),\n\t\t\"make\",\n\t\t\"-C\", kernelDir,\n\t\t\"-j\", strconv.Itoa(runtime.NumCPU()),\n\t}, makeArgs...)\n\t_, err := osutil.RunCmd(3*time.Hour, kernelDir, \"sh\", \"-c\", strings.Join(args, \" \"))\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package cli\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/mobingilabs\/mocli\/pkg\/iohelper\"\n\t\"github.com\/mobingilabs\/mocli\/pkg\/pretty\"\n)\n\ntype cliConfig struct {\n\tRunEnv string `json:\"runenv\"`\n\tVerbose bool `json:\"verbose\"`\n}\n\nfunc SetDefaultCliConfig(f string) error {\n\tdefcfg := cliConfig{RunEnv: \"prod\"}\n\tcontents, err := json.MarshalIndent(defcfg, \"\", pretty.Indent(3))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn iohelper.WriteToFile(f, contents)\n}\n<commit_msg>Make config public.<commit_after>package cli\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/mobingilabs\/mocli\/pkg\/iohelper\"\n\t\"github.com\/mobingilabs\/mocli\/pkg\/pretty\"\n)\n\ntype CliConfig struct {\n\tRunEnv string `json:\"runenv\"`\n\tVerbose bool `json:\"verbose\"`\n}\n\nfunc SetDefaultCliConfig(f string) error {\n\tdefcfg := CliConfig{RunEnv: \"prod\"}\n\tcontents, err := json.MarshalIndent(defcfg, \"\", pretty.Indent(3))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn iohelper.WriteToFile(f, contents)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The nvim-go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage command\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\n\t\"github.com\/neovim\/go-client\/nvim\"\n\t\"github.com\/pkg\/errors\"\n\t\"go.opencensus.io\/trace\"\n\t\"go.uber.org\/zap\"\n\n\t\"github.com\/zchee\/nvim-go\/pkg\/config\"\n\t\"github.com\/zchee\/nvim-go\/pkg\/fs\"\n\t\"github.com\/zchee\/nvim-go\/pkg\/logger\"\n\t\"github.com\/zchee\/nvim-go\/pkg\/monitoring\"\n\t\"github.com\/zchee\/nvim-go\/pkg\/nvimutil\"\n)\n\n\/\/ CmdBuildEval struct type for Eval of GoBuild command.\ntype CmdBuildEval struct {\n\tCwd string `msgpack:\",array\"`\n\tFile string\n}\n\nfunc (c *Command) cmdBuild(ctx context.Context, args []string, bang bool, eval *CmdBuildEval) {\n\terrch := make(chan interface{}, 1)\n\tgo func() {\n\t\terrch <- c.Build(ctx, args, bang, eval)\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tcase err := <-errch:\n\t\tswitch e := err.(type) {\n\t\tcase error:\n\t\t\tnvimutil.ErrorWrap(c.Nvim, e)\n\t\tcase []*nvim.QuickfixError:\n\t\t\tc.errs.Store(\"Build\", e)\n\t\t\terrlist := make(map[string][]*nvim.QuickfixError)\n\t\t\tc.errs.Range(func(ki, vi interface{}) bool {\n\t\t\t\tk, v := ki.(string), vi.([]*nvim.QuickfixError)\n\t\t\t\terrlist[k] = append(errlist[k], v...)\n\t\t\t\treturn true\n\t\t\t})\n\t\t\tnvimutil.ErrorList(c.Nvim, errlist, true)\n\t\tcase nil:\n\t\t\t\/\/ nothing to do\n\t\t}\n\t}\n}\n\n\/\/ Build builds the current buffers package use compile tool that determined\n\/\/ from the package directory structure.\nfunc (c *Command) Build(pctx context.Context, args []string, bang bool, eval *CmdBuildEval) interface{} {\n\tctx, span := monitoring.StartSpan(pctx, \"Build\")\n\tdefer span.End()\n\n\tlog := logger.FromContext(ctx).With(zap.Strings(\"args\", args), zap.Bool(\"bang\", bang), zap.Any(\"CmdBuildEval\", eval))\n\tif !bang {\n\t\tbang = config.BuildForce\n\t}\n\n\tcmd, err := c.compileCmd(ctx, args, bang, eval.Cwd)\n\tif err != nil {\n\t\tspan.SetStatus(trace.Status{Code: trace.StatusCodeInternal, Message: err.Error()})\n\t\treturn errors.WithStack(err)\n\t}\n\tvar stderr bytes.Buffer\n\tcmd.Stderr = &stderr\n\tlog.Debug(\"command.Build\",\n\t\tzap.Strings(\"cmd.Args\", cmd.Args),\n\t\tzap.String(\"cmd.Path\", cmd.Path),\n\t\tzap.String(\"cmd.Dir\", cmd.Dir),\n\t\tzap.Any(\"cmd.SysProcAttr\", cmd.SysProcAttr),\n\t\tzap.Any(\"cmd.Process\", cmd.Process),\n\t\tzap.Any(\"cmd.ProcessState\", cmd.ProcessState))\n\n\tos.Unsetenv(\"GO111MODULE\")\n\n\tif buildErr := cmd.Run(); buildErr != nil {\n\t\tif err, ok := buildErr.(*exec.ExitError); ok && err != nil {\n\t\t\terrlist, err := nvimutil.ParseError(ctx, stderr.Bytes(), eval.Cwd, &c.buildContext.Build, nil)\n\t\t\tif err != nil {\n\t\t\t\tspan.SetStatus(trace.Status{Code: trace.StatusCodeInternal, Message: err.Error()})\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\t\t\treturn errlist\n\t\t}\n\t\tspan.SetStatus(trace.Status{Code: trace.StatusCodeInternal, Message: buildErr.Error()})\n\t\treturn errors.WithStack(buildErr)\n\t}\n\n\treturn nvimutil.EchoSuccess(c.Nvim, \"GoBuild\", fmt.Sprintf(\"compiler: %s\", c.buildContext.Build.Tool))\n}\n\n\/\/ compileCmd returns the *exec.Cmd corresponding to the compile tool.\nfunc (c *Command) compileCmd(pctx context.Context, args []string, bang bool, dir string) (*exec.Cmd, error) {\n\tctx, span := monitoring.StartSpan(pctx, \"compileCmd\")\n\tdefer span.End()\n\n\tbin, err := exec.LookPath(c.buildContext.Build.Tool)\n\tif err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\tcmd := exec.CommandContext(ctx, bin, \"build\")\n\tcmd.Env = os.Environ()\n\n\tif len(config.BuildFlags) > 0 {\n\t\targs = append(args, config.BuildFlags...)\n\t}\n\tswitch c.buildContext.Build.Tool {\n\tcase \"go\":\n\t\tcmd.Dir = dir\n\n\t\t\/\/ Outputs the binary to DevNull if without bang\n\t\tif !bang || !matchSlice(\"-o\", args) {\n\t\t\targs = append(args, \"-o\", os.DevNull)\n\t\t}\n\t\t\/\/ add \"app\" suffix to binary name if enable app-engine build\n\t\tif config.BuildAppengine {\n\t\t\tcmd.Args[0] += \"app\"\n\t\t}\n\tcase \"gb\":\n\t\tcmd.Dir = c.buildContext.Build.ProjectRoot\n\n\t\tif config.BuildAppengine {\n\t\t\tcmd.Args = append([]string{cmd.Args[0], \"gae\"}, cmd.Args[1:]...)\n\t\t\tpkgs, err := fs.GbPackages(cmd.Dir)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tfor _, pkg := range pkgs {\n\t\t\t\t\/\/ \"gb gae build\" doesn't compatible \"gb build\" arg. actually, \"goapp build ...\"\n\t\t\t\tcmd.Args = append(cmd.Args, pkg+string(filepath.Separator)+\"...\")\n\t\t\t}\n\t\t}\n\t}\n\n\targs = append(args, \".\/...\")\n\tcmd.Args = append(cmd.Args, args...)\n\n\treturn cmd, nil\n}\n\nfunc matchSlice(s string, ss []string) bool {\n\tfor _, str := range ss {\n\t\tif s == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<commit_msg>pkg\/command: move os.Unsetenv GO111MODULE to compileCmd<commit_after>\/\/ Copyright 2016 The nvim-go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage command\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\n\t\"github.com\/neovim\/go-client\/nvim\"\n\t\"github.com\/pkg\/errors\"\n\t\"go.opencensus.io\/trace\"\n\t\"go.uber.org\/zap\"\n\n\t\"github.com\/zchee\/nvim-go\/pkg\/config\"\n\t\"github.com\/zchee\/nvim-go\/pkg\/fs\"\n\t\"github.com\/zchee\/nvim-go\/pkg\/logger\"\n\t\"github.com\/zchee\/nvim-go\/pkg\/monitoring\"\n\t\"github.com\/zchee\/nvim-go\/pkg\/nvimutil\"\n)\n\n\/\/ CmdBuildEval struct type for Eval of GoBuild command.\ntype CmdBuildEval struct {\n\tCwd string `msgpack:\",array\"`\n\tFile string\n}\n\nfunc (c *Command) cmdBuild(ctx context.Context, args []string, bang bool, eval *CmdBuildEval) {\n\terrch := make(chan interface{}, 1)\n\tgo func() {\n\t\terrch <- c.Build(ctx, args, bang, eval)\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tcase err := <-errch:\n\t\tswitch e := err.(type) {\n\t\tcase error:\n\t\t\tnvimutil.ErrorWrap(c.Nvim, e)\n\t\tcase []*nvim.QuickfixError:\n\t\t\tc.errs.Store(\"Build\", e)\n\t\t\terrlist := make(map[string][]*nvim.QuickfixError)\n\t\t\tc.errs.Range(func(ki, vi interface{}) bool {\n\t\t\t\tk, v := ki.(string), vi.([]*nvim.QuickfixError)\n\t\t\t\terrlist[k] = append(errlist[k], v...)\n\t\t\t\treturn true\n\t\t\t})\n\t\t\tnvimutil.ErrorList(c.Nvim, errlist, true)\n\t\tcase nil:\n\t\t\t\/\/ nothing to do\n\t\t}\n\t}\n}\n\n\/\/ Build builds the current buffers package use compile tool that determined\n\/\/ from the package directory structure.\nfunc (c *Command) Build(pctx context.Context, args []string, bang bool, eval *CmdBuildEval) interface{} {\n\tctx, span := monitoring.StartSpan(pctx, \"Build\")\n\tdefer span.End()\n\n\tlog := logger.FromContext(ctx).With(zap.Strings(\"args\", args), zap.Bool(\"bang\", bang), zap.Any(\"CmdBuildEval\", eval))\n\tif !bang {\n\t\tbang = config.BuildForce\n\t}\n\n\tcmd, err := c.compileCmd(ctx, args, bang, eval.Cwd)\n\tif err != nil {\n\t\tspan.SetStatus(trace.Status{Code: trace.StatusCodeInternal, Message: err.Error()})\n\t\treturn errors.WithStack(err)\n\t}\n\tvar stderr bytes.Buffer\n\tcmd.Stderr = &stderr\n\tlog.Debug(\"command.Build\",\n\t\tzap.Strings(\"cmd.Args\", cmd.Args),\n\t\tzap.String(\"cmd.Path\", cmd.Path),\n\t\tzap.String(\"cmd.Dir\", cmd.Dir),\n\t\tzap.Any(\"cmd.SysProcAttr\", cmd.SysProcAttr),\n\t\tzap.Any(\"cmd.Process\", cmd.Process),\n\t\tzap.Any(\"cmd.ProcessState\", cmd.ProcessState))\n\n\tif buildErr := cmd.Run(); buildErr != nil {\n\t\tif err, ok := buildErr.(*exec.ExitError); ok && err != nil {\n\t\t\terrlist, err := nvimutil.ParseError(ctx, stderr.Bytes(), eval.Cwd, &c.buildContext.Build, nil)\n\t\t\tif err != nil {\n\t\t\t\tspan.SetStatus(trace.Status{Code: trace.StatusCodeInternal, Message: err.Error()})\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\t\t\treturn errlist\n\t\t}\n\t\tspan.SetStatus(trace.Status{Code: trace.StatusCodeInternal, Message: buildErr.Error()})\n\t\treturn errors.WithStack(buildErr)\n\t}\n\n\treturn nvimutil.EchoSuccess(c.Nvim, \"GoBuild\", fmt.Sprintf(\"compiler: %s\", c.buildContext.Build.Tool))\n}\n\n\/\/ compileCmd returns the *exec.Cmd corresponding to the compile tool.\nfunc (c *Command) compileCmd(pctx context.Context, args []string, bang bool, dir string) (*exec.Cmd, error) {\n\tctx, span := monitoring.StartSpan(pctx, \"compileCmd\")\n\tdefer span.End()\n\n\tbin, err := exec.LookPath(c.buildContext.Build.Tool)\n\tif err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\tcmd := exec.CommandContext(ctx, bin, \"build\")\n\tcmd.Env = os.Environ()\n\n\tif len(config.BuildFlags) > 0 {\n\t\targs = append(args, config.BuildFlags...)\n\t}\n\tswitch c.buildContext.Build.Tool {\n\tcase \"go\":\n\t\tcmd.Dir = dir\n\n\t\t\/\/ Outputs the binary to DevNull if without bang\n\t\tif !bang || !matchSlice(\"-o\", args) {\n\t\t\targs = append(args, \"-o\", os.DevNull)\n\t\t}\n\t\t\/\/ add \"app\" suffix to binary name if enable app-engine build\n\t\tif config.BuildAppengine {\n\t\t\tcmd.Args[0] += \"app\"\n\t\t}\n\t\tos.Unsetenv(\"GO111MODULE\")\n\tcase \"gb\":\n\t\tcmd.Dir = c.buildContext.Build.ProjectRoot\n\n\t\tif config.BuildAppengine {\n\t\t\tcmd.Args = append([]string{cmd.Args[0], \"gae\"}, cmd.Args[1:]...)\n\t\t\tpkgs, err := fs.GbPackages(cmd.Dir)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tfor _, pkg := range pkgs {\n\t\t\t\t\/\/ \"gb gae build\" doesn't compatible \"gb build\" arg. actually, \"goapp build ...\"\n\t\t\t\tcmd.Args = append(cmd.Args, pkg+string(filepath.Separator)+\"...\")\n\t\t\t}\n\t\t}\n\t}\n\n\targs = append(args, \".\/...\")\n\tcmd.Args = append(cmd.Args, args...)\n\n\treturn cmd, nil\n}\n\nfunc matchSlice(s string, ss []string) bool {\n\tfor _, str := range ss {\n\t\tif s == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package config manages configuration of the whole application.\npackage config\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"cloud.google.com\/go\/compute\/metadata\"\n\t\"github.com\/golang\/glog\"\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/oauth2\/google\"\n\tcompute \"google.golang.org\/api\/compute\/v0.beta\"\n\tgcfg \"gopkg.in\/gcfg.v1\"\n\t\"k8s.io\/kubernetes\/pkg\/cloudprovider\/providers\/gce\"\n)\n\nconst (\n\tmanagedActive = \"Active\"\n\tmanagedEmpty = \"\"\n\tmanagedFailedCaaChecking = \"FailedCaaChecking\"\n\tmanagedFailedCaaForbidden = \"FailedCaaForbidden\"\n\tmanagedFailedNotVisible = \"FailedNotVisible\"\n\tmanagedFailedRateLimited = \"FailedRateLimited\"\n\tmanagedProvisioning = \"Provisioning\"\n\tmanagedProvisioningFailed = \"ProvisioningFailed\"\n\tmanagedProvisioningFailedPermanently = \"ProvisioningFailedPermanently\"\n\tmanagedRenewalFailed = \"RenewalFailed\"\n\n\tSslCertificateNamePrefix = \"mcrt-\"\n\n\tsslActive = \"ACTIVE\"\n\tsslEmpty = \"\"\n\tsslFailedCaaChecking = \"FAILED_CAA_CHECKING\"\n\tsslFailedCaaForbidden = \"FAILED_CAA_FORBIDDEN\"\n\tsslFailedNotVisible = \"FAILED_NOT_VISIBLE\"\n\tsslFailedRateLimited = \"FAILED_RATE_LIMITED\"\n\tsslManagedCertificateStatusUnspecified = \"MANAGED_CERTIFICATE_STATUS_UNSPECIFIED\"\n\tsslProvisioning = \"PROVISIONING\"\n\tsslProvisioningFailed = \"PROVISIONING_FAILED\"\n\tsslProvisioningFailedPermanently = \"PROVISIONING_FAILED_PERMANENTLY\"\n\tsslRenewalFailed = \"RENEWAL_FAILED\"\n)\n\ntype computeConfig struct {\n\tTokenSource oauth2.TokenSource\n\tProjectID string\n\tTimeout time.Duration\n}\n\ntype certificateStatusConfig struct {\n\t\/\/ Certificate is a mapping from SslCertificate status to ManagedCertificate status\n\tCertificate map[string]string\n\t\/\/ Domain is a mapping from SslCertificate domain status to ManagedCertificate domain status\n\tDomain map[string]string\n}\n\ntype Config struct {\n\t\/\/ CertificateStatus holds mappings of SslCertificate statuses to ManagedCertificate statuses\n\tCertificateStatus certificateStatusConfig\n\t\/\/ Compute is GCP-specific configuration\n\tCompute computeConfig\n\t\/\/ SslCertificateNamePrefix is a prefix prepended to SslCertificate resources created by the controller\n\tSslCertificateNamePrefix string\n}\n\nfunc New(gceConfigFilePath string) (*Config, error) {\n\ttokenSource, projectID, err := getTokenSourceAndProjectID(gceConfigFilePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tglog.Infof(\"TokenSource: %#v, projectID: %s\", tokenSource, projectID)\n\n\tdomainStatuses := make(map[string]string, 0)\n\tdomainStatuses[sslActive] = managedActive\n\tdomainStatuses[sslFailedCaaChecking] = managedFailedCaaChecking\n\tdomainStatuses[sslFailedCaaForbidden] = managedFailedCaaForbidden\n\tdomainStatuses[sslFailedNotVisible] = managedFailedNotVisible\n\tdomainStatuses[sslFailedRateLimited] = managedFailedRateLimited\n\tdomainStatuses[sslProvisioning] = managedProvisioning\n\n\tcertificateStatuses := make(map[string]string, 0)\n\tcertificateStatuses[sslActive] = managedActive\n\tcertificateStatuses[sslEmpty] = managedEmpty\n\tcertificateStatuses[sslManagedCertificateStatusUnspecified] = managedEmpty\n\tcertificateStatuses[sslProvisioning] = managedProvisioning\n\tcertificateStatuses[sslProvisioningFailed] = managedProvisioningFailed\n\tcertificateStatuses[sslProvisioningFailedPermanently] = managedProvisioningFailedPermanently\n\tcertificateStatuses[sslRenewalFailed] = managedRenewalFailed\n\n\treturn &Config{\n\t\tCertificateStatus: certificateStatusConfig{\n\t\t\tCertificate: certificateStatuses,\n\t\t\tDomain: domainStatuses,\n\t\t},\n\t\tCompute: computeConfig{\n\t\t\tTokenSource: tokenSource,\n\t\t\tProjectID: projectID,\n\t\t\tTimeout: 30 * time.Second,\n\t\t},\n\t\tSslCertificateNamePrefix: SslCertificateNamePrefix,\n\t}, nil\n}\n\nfunc getTokenSourceAndProjectID(gceConfigFilePath string) (oauth2.TokenSource, string, error) {\n\tif gceConfigFilePath != \"\" {\n\t\tglog.V(1).Info(\"In a GKE cluster\")\n\n\t\tconfig, err := os.Open(gceConfigFilePath)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", fmt.Errorf(\"Could not open cloud provider configuration %s: %v\", gceConfigFilePath, err)\n\t\t}\n\t\tdefer config.Close()\n\n\t\tvar cfg gce.ConfigFile\n\t\tif err := gcfg.ReadInto(&cfg, config); err != nil {\n\t\t\treturn nil, \"\", fmt.Errorf(\"Could not read config %v\", err)\n\t\t}\n\t\tglog.Infof(\"Using GCE provider config %+v\", cfg)\n\n\t\treturn gce.NewAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody), cfg.Global.ProjectID, nil\n\t}\n\n\tprojectID, err := metadata.ProjectID()\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"Could not fetch project id: %v\", err)\n\t}\n\n\tif len(os.Getenv(\"GOOGLE_APPLICATION_CREDENTIALS\")) > 0 {\n\t\tglog.V(1).Info(\"In a GCP cluster\")\n\t\ttokenSource, err := google.DefaultTokenSource(oauth2.NoContext, compute.ComputeScope)\n\t\treturn tokenSource, projectID, err\n\t} else {\n\t\tglog.V(1).Info(\"Using default TokenSource\")\n\t\treturn google.ComputeTokenSource(\"\"), projectID, nil\n\t}\n}\n<commit_msg>Only fail on fatal errors from gcfg parser (fix support for regional clusters)<commit_after>\/*\nCopyright 2018 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package config manages configuration of the whole application.\npackage config\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"cloud.google.com\/go\/compute\/metadata\"\n\t\"github.com\/golang\/glog\"\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/oauth2\/google\"\n\tcompute \"google.golang.org\/api\/compute\/v0.beta\"\n\tgcfg \"gopkg.in\/gcfg.v1\"\n\t\"k8s.io\/kubernetes\/pkg\/cloudprovider\/providers\/gce\"\n)\n\nconst (\n\tmanagedActive = \"Active\"\n\tmanagedEmpty = \"\"\n\tmanagedFailedCaaChecking = \"FailedCaaChecking\"\n\tmanagedFailedCaaForbidden = \"FailedCaaForbidden\"\n\tmanagedFailedNotVisible = \"FailedNotVisible\"\n\tmanagedFailedRateLimited = \"FailedRateLimited\"\n\tmanagedProvisioning = \"Provisioning\"\n\tmanagedProvisioningFailed = \"ProvisioningFailed\"\n\tmanagedProvisioningFailedPermanently = \"ProvisioningFailedPermanently\"\n\tmanagedRenewalFailed = \"RenewalFailed\"\n\n\tSslCertificateNamePrefix = \"mcrt-\"\n\n\tsslActive = \"ACTIVE\"\n\tsslEmpty = \"\"\n\tsslFailedCaaChecking = \"FAILED_CAA_CHECKING\"\n\tsslFailedCaaForbidden = \"FAILED_CAA_FORBIDDEN\"\n\tsslFailedNotVisible = \"FAILED_NOT_VISIBLE\"\n\tsslFailedRateLimited = \"FAILED_RATE_LIMITED\"\n\tsslManagedCertificateStatusUnspecified = \"MANAGED_CERTIFICATE_STATUS_UNSPECIFIED\"\n\tsslProvisioning = \"PROVISIONING\"\n\tsslProvisioningFailed = \"PROVISIONING_FAILED\"\n\tsslProvisioningFailedPermanently = \"PROVISIONING_FAILED_PERMANENTLY\"\n\tsslRenewalFailed = \"RENEWAL_FAILED\"\n)\n\ntype computeConfig struct {\n\tTokenSource oauth2.TokenSource\n\tProjectID string\n\tTimeout time.Duration\n}\n\ntype certificateStatusConfig struct {\n\t\/\/ Certificate is a mapping from SslCertificate status to ManagedCertificate status\n\tCertificate map[string]string\n\t\/\/ Domain is a mapping from SslCertificate domain status to ManagedCertificate domain status\n\tDomain map[string]string\n}\n\ntype Config struct {\n\t\/\/ CertificateStatus holds mappings of SslCertificate statuses to ManagedCertificate statuses\n\tCertificateStatus certificateStatusConfig\n\t\/\/ Compute is GCP-specific configuration\n\tCompute computeConfig\n\t\/\/ SslCertificateNamePrefix is a prefix prepended to SslCertificate resources created by the controller\n\tSslCertificateNamePrefix string\n}\n\nfunc New(gceConfigFilePath string) (*Config, error) {\n\ttokenSource, projectID, err := getTokenSourceAndProjectID(gceConfigFilePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tglog.Infof(\"TokenSource: %#v, projectID: %s\", tokenSource, projectID)\n\n\tdomainStatuses := make(map[string]string, 0)\n\tdomainStatuses[sslActive] = managedActive\n\tdomainStatuses[sslFailedCaaChecking] = managedFailedCaaChecking\n\tdomainStatuses[sslFailedCaaForbidden] = managedFailedCaaForbidden\n\tdomainStatuses[sslFailedNotVisible] = managedFailedNotVisible\n\tdomainStatuses[sslFailedRateLimited] = managedFailedRateLimited\n\tdomainStatuses[sslProvisioning] = managedProvisioning\n\n\tcertificateStatuses := make(map[string]string, 0)\n\tcertificateStatuses[sslActive] = managedActive\n\tcertificateStatuses[sslEmpty] = managedEmpty\n\tcertificateStatuses[sslManagedCertificateStatusUnspecified] = managedEmpty\n\tcertificateStatuses[sslProvisioning] = managedProvisioning\n\tcertificateStatuses[sslProvisioningFailed] = managedProvisioningFailed\n\tcertificateStatuses[sslProvisioningFailedPermanently] = managedProvisioningFailedPermanently\n\tcertificateStatuses[sslRenewalFailed] = managedRenewalFailed\n\n\treturn &Config{\n\t\tCertificateStatus: certificateStatusConfig{\n\t\t\tCertificate: certificateStatuses,\n\t\t\tDomain: domainStatuses,\n\t\t},\n\t\tCompute: computeConfig{\n\t\t\tTokenSource: tokenSource,\n\t\t\tProjectID: projectID,\n\t\t\tTimeout: 30 * time.Second,\n\t\t},\n\t\tSslCertificateNamePrefix: SslCertificateNamePrefix,\n\t}, nil\n}\n\nfunc getTokenSourceAndProjectID(gceConfigFilePath string) (oauth2.TokenSource, string, error) {\n\tif gceConfigFilePath != \"\" {\n\t\tglog.V(1).Info(\"In a GKE cluster\")\n\n\t\tconfig, err := os.Open(gceConfigFilePath)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", fmt.Errorf(\"Could not open cloud provider configuration %s: %v\", gceConfigFilePath, err)\n\t\t}\n\t\tdefer config.Close()\n\n\t\tvar cfg gce.ConfigFile\n\t\tif err := gcfg.FatalOnly(gcfg.ReadInto(&cfg, config)); err != nil {\n\t\t\treturn nil, \"\", fmt.Errorf(\"Could not read config %v\", err)\n\t\t}\n\t\tglog.Infof(\"Using GCE provider config %+v\", cfg)\n\n\t\treturn gce.NewAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody), cfg.Global.ProjectID, nil\n\t}\n\n\tprojectID, err := metadata.ProjectID()\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"Could not fetch project id: %v\", err)\n\t}\n\n\tif len(os.Getenv(\"GOOGLE_APPLICATION_CREDENTIALS\")) > 0 {\n\t\tglog.V(1).Info(\"In a GCP cluster\")\n\t\ttokenSource, err := google.DefaultTokenSource(oauth2.NoContext, compute.ComputeScope)\n\t\treturn tokenSource, projectID, err\n\t} else {\n\t\tglog.V(1).Info(\"Using default TokenSource\")\n\t\treturn google.ComputeTokenSource(\"\"), projectID, nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package kates\n\nimport (\n\tappsv1 \"k8s.io\/api\/apps\/v1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tnetv1beta1 \"k8s.io\/api\/networking\/v1beta1\"\n\trbacv1 \"k8s.io\/api\/rbac\/v1\"\n\txv1 \"k8s.io\/apiextensions-apiserver\/pkg\/apis\/apiextensions\/v1\"\n\tapierrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/resource\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/intstr\"\n\t\"k8s.io\/apimachinery\/pkg\/version\"\n\t\"k8s.io\/cli-runtime\/pkg\/genericclioptions\"\n\tmetrics \"k8s.io\/metrics\/pkg\/apis\/metrics\/v1beta1\"\n)\n\n\/\/ The kubernetes client libraries and core protobufs are split across so many different packages\n\/\/ that it is extremely difficult to keep them straight. The naming conventions are also very poorly\n\/\/ chosen resulting in frequent name collisions under even simple uses. Case in point, there are a\n\/\/ whole lot of v1 packages (core\/v1, apiextensions\/v1, meta\/v1, apps\/v1) just to name a few, and\n\/\/ you need to use at least 3 of these together in order to accomplish almost anything. The types\n\/\/ within packages are often horribly named as well (e.g. dynamic.Interface, rest.Config,\n\/\/ version.Info are not super description when you omit the packages).\n\/\/\n\/\/ The aliases in this file are intended to do several things:\n\/\/\n\/\/ 1. Make our kubernetes code easier to read by providing a standard set of aliases instead of\n\/\/ requiring developers to make up potentially different aliases at the point of use.\n\/\/\n\/\/ 2. Make for a simpler and easier Quickstart for our kubernetes library by providing a single\n\/\/ entry point.\n\/\/\n\/\/ 3. Allow us to build a set of simpler and easier to use APIs on top of client-go while using\n\/\/ types that are compatible with client-go so we have a good escape hatch for directly using\n\/\/ client-go.\n\/\/\n\/\/ 4. Provide a single file that helps me (rhs@datawire.io) remember where the hell everything is.\n\n\/\/ type related aliases\n\ntype TypeMeta = metav1.TypeMeta\ntype ObjectMeta = metav1.ObjectMeta\ntype APIResource = metav1.APIResource\n\ntype Namespace = corev1.Namespace\n\ntype LocalObjectReference = corev1.LocalObjectReference\n\ntype Event = corev1.Event\ntype ConfigMap = corev1.ConfigMap\n\ntype Secret = corev1.Secret\n\nconst SecretTypeServiceAccountToken = corev1.SecretTypeServiceAccountToken\nconst SecretTypeTLS = corev1.SecretTypeTLS\n\ntype Ingress = netv1beta1.Ingress\ntype IngressClass = netv1beta1.IngressClass\n\ntype Service = corev1.Service\ntype ServiceSpec = corev1.ServiceSpec\ntype ServicePort = corev1.ServicePort\ntype Endpoints = corev1.Endpoints\ntype EndpointSubset = corev1.EndpointSubset\ntype EndpointAddress = corev1.EndpointAddress\ntype EndpointPort = corev1.EndpointPort\n\ntype Protocol = corev1.Protocol\n\nvar ProtocolTCP = corev1.ProtocolTCP\nvar ProtocolUDP = corev1.ProtocolUDP\nvar ProtocolSCTP = corev1.ProtocolSCTP\n\nvar ServiceTypeLoadBalancer = corev1.ServiceTypeLoadBalancer\nvar ServiceTypeClusterIP = corev1.ServiceTypeClusterIP\n\ntype ServiceAccount = corev1.ServiceAccount\n\ntype Role = rbacv1.Role\ntype RoleBinding = rbacv1.RoleBinding\ntype ClusterRole = rbacv1.ClusterRole\ntype ClusterRoleBinding = rbacv1.ClusterRoleBinding\n\ntype Pod = corev1.Pod\ntype PodSpec = corev1.PodSpec\ntype Container = corev1.Container\ntype EnvVar = corev1.EnvVar\ntype SecurityContext = corev1.SecurityContext\ntype PodCondition = corev1.PodCondition\ntype PodLogOptions = corev1.PodLogOptions\ntype Toleration = corev1.Toleration\ntype TolerationOperator = corev1.TolerationOperator\n\nvar TolerationOpExists = corev1.TolerationOpExists\nvar TolerationOpEqual = corev1.TolerationOpEqual\n\nvar PodSucceeded = corev1.PodSucceeded\nvar PodFailed = corev1.PodFailed\nvar PodReady = corev1.PodReady\nvar CoreConditionTrue = corev1.ConditionTrue\n\ntype Node = corev1.Node\n\nconst NodeUnreachablePodReason = \"NodeLost\" \/\/ k8s.io\/kubernetes\/pkg\/util\/node.NodeUnreachablePodReason\n\ntype Volume = corev1.Volume\ntype VolumeSource = corev1.VolumeSource\ntype PersistentVolumeClaimVolumeSource = corev1.PersistentVolumeClaimVolumeSource\ntype VolumeMount = corev1.VolumeMount\n\ntype ResourceRequirements = corev1.ResourceRequirements\ntype ResourceList = corev1.ResourceList\n\nconst ResourceCPU = corev1.ResourceCPU\nconst ResourceMemory = corev1.ResourceMemory\n\ntype PersistentVolumeClaim = corev1.PersistentVolumeClaim\n\ntype Deployment = appsv1.Deployment\n\ntype CustomResourceDefinition = xv1.CustomResourceDefinition\n\nvar NamesAccepted = xv1.NamesAccepted\nvar Established = xv1.Established\nvar ConditionTrue = xv1.ConditionTrue\n\ntype NodeMetrics = metrics.NodeMetrics\ntype PodMetrics = metrics.PodMetrics\ntype ContainerMetrics = metrics.ContainerMetrics\n\ntype Unstructured = unstructured.Unstructured\n\nvar MustParseQuantity = resource.MustParse\n\ntype Quantity = resource.Quantity\ntype IntOrString = intstr.IntOrString\ntype Time = metav1.Time\n\nvar Int = intstr.Int\n\n\/\/ client related aliases\n\ntype ConfigFlags = genericclioptions.ConfigFlags\n\nvar NewConfigFlags = genericclioptions.NewConfigFlags\n\ntype VersionInfo = version.Info\n\ntype PatchType = types.PatchType\n\nvar (\n\tJSONPatchType = types.JSONPatchType\n\tMergePatchType = types.MergePatchType\n\tStrategicMergePatchType = types.StrategicMergePatchType\n\tApplyPatchType = types.ApplyPatchType\n)\n\ntype GetOptions = metav1.GetOptions\ntype ListOptions = metav1.ListOptions\ntype CreateOptions = metav1.CreateOptions\ntype UpdateOptions = metav1.UpdateOptions\ntype PatchOptions = metav1.PatchOptions\ntype DeleteOptions = metav1.DeleteOptions\n\nvar NamespaceAll = metav1.NamespaceAll\nvar NamespaceNone = metav1.NamespaceNone\n\ntype LabelSelector = metav1.LabelSelector\n\ntype Selector = labels.Selector\ntype LabelSet = labels.Set\n\nvar ParseSelector = labels.Parse\n\n\/\/ error related aliases\n\nvar IsNotFound = apierrors.IsNotFound\nvar IsConflict = apierrors.IsConflict\n\n\/\/\n\ntype Object interface {\n\truntime.Object\n\tmetav1.Object\n}\n<commit_msg>Adding ReplicaSet + PodTemplateSpec aliases to kates<commit_after>package kates\n\nimport (\n\tappsv1 \"k8s.io\/api\/apps\/v1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tnetv1beta1 \"k8s.io\/api\/networking\/v1beta1\"\n\trbacv1 \"k8s.io\/api\/rbac\/v1\"\n\txv1 \"k8s.io\/apiextensions-apiserver\/pkg\/apis\/apiextensions\/v1\"\n\tapierrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/resource\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/intstr\"\n\t\"k8s.io\/apimachinery\/pkg\/version\"\n\t\"k8s.io\/cli-runtime\/pkg\/genericclioptions\"\n\tmetrics \"k8s.io\/metrics\/pkg\/apis\/metrics\/v1beta1\"\n)\n\n\/\/ The kubernetes client libraries and core protobufs are split across so many different packages\n\/\/ that it is extremely difficult to keep them straight. The naming conventions are also very poorly\n\/\/ chosen resulting in frequent name collisions under even simple uses. Case in point, there are a\n\/\/ whole lot of v1 packages (core\/v1, apiextensions\/v1, meta\/v1, apps\/v1) just to name a few, and\n\/\/ you need to use at least 3 of these together in order to accomplish almost anything. The types\n\/\/ within packages are often horribly named as well (e.g. dynamic.Interface, rest.Config,\n\/\/ version.Info are not super description when you omit the packages).\n\/\/\n\/\/ The aliases in this file are intended to do several things:\n\/\/\n\/\/ 1. Make our kubernetes code easier to read by providing a standard set of aliases instead of\n\/\/ requiring developers to make up potentially different aliases at the point of use.\n\/\/\n\/\/ 2. Make for a simpler and easier Quickstart for our kubernetes library by providing a single\n\/\/ entry point.\n\/\/\n\/\/ 3. Allow us to build a set of simpler and easier to use APIs on top of client-go while using\n\/\/ types that are compatible with client-go so we have a good escape hatch for directly using\n\/\/ client-go.\n\/\/\n\/\/ 4. Provide a single file that helps me (rhs@datawire.io) remember where the hell everything is.\n\n\/\/ type related aliases\n\ntype TypeMeta = metav1.TypeMeta\ntype ObjectMeta = metav1.ObjectMeta\ntype APIResource = metav1.APIResource\n\ntype Namespace = corev1.Namespace\n\ntype LocalObjectReference = corev1.LocalObjectReference\n\ntype Event = corev1.Event\ntype ConfigMap = corev1.ConfigMap\n\ntype Secret = corev1.Secret\n\nconst SecretTypeServiceAccountToken = corev1.SecretTypeServiceAccountToken\nconst SecretTypeTLS = corev1.SecretTypeTLS\n\ntype Ingress = netv1beta1.Ingress\ntype IngressClass = netv1beta1.IngressClass\n\ntype Service = corev1.Service\ntype ServiceSpec = corev1.ServiceSpec\ntype ServicePort = corev1.ServicePort\ntype Endpoints = corev1.Endpoints\ntype EndpointSubset = corev1.EndpointSubset\ntype EndpointAddress = corev1.EndpointAddress\ntype EndpointPort = corev1.EndpointPort\n\ntype Protocol = corev1.Protocol\n\nvar ProtocolTCP = corev1.ProtocolTCP\nvar ProtocolUDP = corev1.ProtocolUDP\nvar ProtocolSCTP = corev1.ProtocolSCTP\n\nvar ServiceTypeLoadBalancer = corev1.ServiceTypeLoadBalancer\nvar ServiceTypeClusterIP = corev1.ServiceTypeClusterIP\n\ntype ServiceAccount = corev1.ServiceAccount\n\ntype Role = rbacv1.Role\ntype RoleBinding = rbacv1.RoleBinding\ntype ClusterRole = rbacv1.ClusterRole\ntype ClusterRoleBinding = rbacv1.ClusterRoleBinding\n\ntype Pod = corev1.Pod\ntype PodSpec = corev1.PodSpec\ntype PodTemplateSpec = corev1.PodTemplateSpec\ntype Container = corev1.Container\ntype EnvVar = corev1.EnvVar\ntype SecurityContext = corev1.SecurityContext\ntype PodCondition = corev1.PodCondition\ntype PodLogOptions = corev1.PodLogOptions\ntype Toleration = corev1.Toleration\ntype TolerationOperator = corev1.TolerationOperator\n\nvar TolerationOpExists = corev1.TolerationOpExists\nvar TolerationOpEqual = corev1.TolerationOpEqual\n\nvar PodSucceeded = corev1.PodSucceeded\nvar PodFailed = corev1.PodFailed\nvar PodReady = corev1.PodReady\nvar CoreConditionTrue = corev1.ConditionTrue\n\ntype Node = corev1.Node\n\nconst NodeUnreachablePodReason = \"NodeLost\" \/\/ k8s.io\/kubernetes\/pkg\/util\/node.NodeUnreachablePodReason\n\ntype Volume = corev1.Volume\ntype VolumeSource = corev1.VolumeSource\ntype PersistentVolumeClaimVolumeSource = corev1.PersistentVolumeClaimVolumeSource\ntype VolumeMount = corev1.VolumeMount\n\ntype ResourceRequirements = corev1.ResourceRequirements\ntype ResourceList = corev1.ResourceList\n\nconst ResourceCPU = corev1.ResourceCPU\nconst ResourceMemory = corev1.ResourceMemory\n\ntype PersistentVolumeClaim = corev1.PersistentVolumeClaim\n\ntype Deployment = appsv1.Deployment\ntype ReplicaSet = appsv1.ReplicaSet\n\ntype CustomResourceDefinition = xv1.CustomResourceDefinition\n\nvar NamesAccepted = xv1.NamesAccepted\nvar Established = xv1.Established\nvar ConditionTrue = xv1.ConditionTrue\n\ntype NodeMetrics = metrics.NodeMetrics\ntype PodMetrics = metrics.PodMetrics\ntype ContainerMetrics = metrics.ContainerMetrics\n\ntype Unstructured = unstructured.Unstructured\n\nvar MustParseQuantity = resource.MustParse\n\ntype Quantity = resource.Quantity\ntype IntOrString = intstr.IntOrString\ntype Time = metav1.Time\n\nvar Int = intstr.Int\n\n\/\/ client related aliases\n\ntype ConfigFlags = genericclioptions.ConfigFlags\n\nvar NewConfigFlags = genericclioptions.NewConfigFlags\n\ntype VersionInfo = version.Info\n\ntype PatchType = types.PatchType\n\nvar (\n\tJSONPatchType = types.JSONPatchType\n\tMergePatchType = types.MergePatchType\n\tStrategicMergePatchType = types.StrategicMergePatchType\n\tApplyPatchType = types.ApplyPatchType\n)\n\ntype GetOptions = metav1.GetOptions\ntype ListOptions = metav1.ListOptions\ntype CreateOptions = metav1.CreateOptions\ntype UpdateOptions = metav1.UpdateOptions\ntype PatchOptions = metav1.PatchOptions\ntype DeleteOptions = metav1.DeleteOptions\n\nvar NamespaceAll = metav1.NamespaceAll\nvar NamespaceNone = metav1.NamespaceNone\n\ntype LabelSelector = metav1.LabelSelector\n\ntype Selector = labels.Selector\ntype LabelSet = labels.Set\n\nvar ParseSelector = labels.Parse\n\n\/\/ error related aliases\n\nvar IsNotFound = apierrors.IsNotFound\nvar IsConflict = apierrors.IsConflict\n\n\/\/\n\ntype Object interface {\n\truntime.Object\n\tmetav1.Object\n}\n<|endoftext|>"} {"text":"<commit_before>\npackage paxos\n\nimport (\n \"fmt\"\n \"strconv\"\n \"strings\"\n)\n\nconst (\n\tmSeqn = iota\n\tmFrom\n\tmTo\n\tmCmd\n\tmBody\n\tmNumParts\n)\n\nconst (\n\tNop = iota\n\tInvite\n\tRsvp\n\tNominate\n\tVote\n)\n\ntype Message interface {\n From() int\n Cmd() int\n Seqn() uint64\n Body() string \/\/ soon to be []byte\n\n\tSetFrom(byte)\n\tSetSeqn(uint64)\n}\n\nfunc NewMessage(s string) Message {\n\tparts := strings.Split(s, \":\", mNumParts)\n\tif len(parts) != mNumParts {\n\t\tpanic(s)\n\t}\n\n\tseqn, err := strconv.Btoui64(parts[mSeqn], 10)\n\tif err != nil {\n\t\tpanic(s)\n\t}\n\n\tfrom, err := strconv.Atoi(parts[mFrom])\n\tif err != nil {\n\t\tpanic(s)\n\t}\n\n\tcmd, err := strconv.Atoi(parts[mCmd])\n\tif err != nil {\n\t\tpanic(s)\n\t}\n\n\treturn &Msg{byte(from), cmd, seqn, parts[mBody]}\n}\n\nfunc NewInvite(crnd uint64) Message {\n\treturn &Msg{\n\t\tcmd: Invite,\n\t\tbody: fmt.Sprintf(\"%d\", crnd),\n\t}\n}\n\n\/\/ Returns the info for `m`. If `m` is not an invite, the result is undefined.\nfunc InviteParts(m Message) (crnd uint64) {\n\tcrnd, _ = strconv.Atoui64(m.Body())\n\treturn\n}\n\nfunc NewNominate(crnd uint64, v string) Message {\n\treturn &Msg{\n\t\tcmd: Nominate,\n\t\tbody: fmt.Sprintf(\"%d:%s\", crnd, v),\n\t}\n}\n\n\/\/ Returns the info for `m`. If `m` is not a nominate, the result is undefined.\nfunc NominateParts(m Message) (crnd uint64, v string) {\n\tparts := strings.Split(m.Body(), \":\", 2)\n\tcrnd, _ = strconv.Atoui64(parts[0])\n\tv = parts[1]\n\treturn\n}\n\nfunc NewRsvp(i, vrnd uint64, vval string) Message {\n\treturn &Msg{\n\t\tcmd: Rsvp,\n\t\tbody: fmt.Sprintf(\"%d:%d:%s\", i, vrnd, vval),\n\t}\n}\n\n\/\/ Returns the info for `m`. If `m` is not an rsvp, the result is undefined.\nfunc RsvpParts(m Message) (i, vrnd uint64, vval string) {\n\tparts := strings.Split(m.Body(), \":\", 3)\n\ti, _ = strconv.Atoui64(parts[0])\n\tvrnd, _ = strconv.Atoui64(parts[1])\n\tvval = parts[2]\n\treturn\n}\n\nfunc NewVote(i uint64, vval string) Message {\n\treturn &Msg{\n\t\tcmd: Vote,\n\t\tbody: fmt.Sprintf(\"%d:%s\", i, vval),\n\t}\n}\n\n\/\/ Returns the info for `m`. If `m` is not a vote, the result is undefined.\nfunc VoteParts(m Message) (i uint64, vval string) {\n\tparts := strings.Split(m.Body(), \":\", 2)\n\ti, _ = strconv.Atoui64(parts[0])\n\tvval = parts[1]\n\treturn\n}\n\n\/\/ In-memory format:\n\/\/\n\/\/ 0 -- index of sender\n\/\/ 1 -- cmd\n\/\/ 2..9 -- seqn\n\/\/ 10.. -- body -- format depends on command\n\/\/\n\/\/ Wire format is same as in-memory format, but without the first byte (the\n\/\/ sender index). Here it is for clarity:\n\/\/\n\/\/ 0 -- cmd\n\/\/ 1..8 -- seqn\n\/\/ 9.. -- body -- format depends on command\n\/\/type Msg []byte\n\ntype Msg struct {\n\tfrom byte\n\tcmd int\n\tseqn uint64\n\tbody string\n}\n\nfunc (m Msg) Seqn() uint64 {\n\treturn m.seqn\n}\n\nfunc (m Msg) From() int {\n\treturn int(m.from)\n}\n\nfunc (m Msg) Cmd() int {\n\treturn m.cmd\n}\n\nfunc (m Msg) Body() string {\n\treturn m.body\n}\n\nfunc (m *Msg) SetFrom(from byte) {\n\tm.from = from\n}\n\nfunc (m *Msg) SetSeqn(seqn uint64) {\n\tm.seqn = seqn\n}\n<commit_msg>tweak numeric types<commit_after>\npackage paxos\n\nimport (\n \"fmt\"\n \"strconv\"\n \"strings\"\n)\n\nconst (\n\tmSeqn = iota\n\tmFrom\n\tmTo\n\tmCmd\n\tmBody\n\tmNumParts\n)\n\nconst (\n\tNop = iota\n\tInvite\n\tRsvp\n\tNominate\n\tVote\n)\n\ntype Message interface {\n From() int\n Cmd() int\n Seqn() uint64\n Body() string \/\/ soon to be []byte\n\n\tSetFrom(byte)\n\tSetSeqn(uint64)\n}\n\nfunc NewMessage(s string) Message {\n\tparts := strings.Split(s, \":\", mNumParts)\n\tif len(parts) != mNumParts {\n\t\tpanic(s)\n\t}\n\n\tseqn, err := strconv.Btoui64(parts[mSeqn], 10)\n\tif err != nil {\n\t\tpanic(s)\n\t}\n\n\tfrom, err := strconv.Atoi(parts[mFrom])\n\tif err != nil {\n\t\tpanic(s)\n\t}\n\n\tcmd, err := strconv.Atoi(parts[mCmd])\n\tif err != nil {\n\t\tpanic(s)\n\t}\n\n\treturn &Msg{byte(from), byte(cmd), seqn, parts[mBody]}\n}\n\nfunc NewInvite(crnd uint64) Message {\n\treturn &Msg{\n\t\tcmd: Invite,\n\t\tbody: fmt.Sprintf(\"%d\", crnd),\n\t}\n}\n\n\/\/ Returns the info for `m`. If `m` is not an invite, the result is undefined.\nfunc InviteParts(m Message) (crnd uint64) {\n\tcrnd, _ = strconv.Atoui64(m.Body())\n\treturn\n}\n\nfunc NewNominate(crnd uint64, v string) Message {\n\treturn &Msg{\n\t\tcmd: Nominate,\n\t\tbody: fmt.Sprintf(\"%d:%s\", crnd, v),\n\t}\n}\n\n\/\/ Returns the info for `m`. If `m` is not a nominate, the result is undefined.\nfunc NominateParts(m Message) (crnd uint64, v string) {\n\tparts := strings.Split(m.Body(), \":\", 2)\n\tcrnd, _ = strconv.Atoui64(parts[0])\n\tv = parts[1]\n\treturn\n}\n\nfunc NewRsvp(i, vrnd uint64, vval string) Message {\n\treturn &Msg{\n\t\tcmd: Rsvp,\n\t\tbody: fmt.Sprintf(\"%d:%d:%s\", i, vrnd, vval),\n\t}\n}\n\n\/\/ Returns the info for `m`. If `m` is not an rsvp, the result is undefined.\nfunc RsvpParts(m Message) (i, vrnd uint64, vval string) {\n\tparts := strings.Split(m.Body(), \":\", 3)\n\ti, _ = strconv.Atoui64(parts[0])\n\tvrnd, _ = strconv.Atoui64(parts[1])\n\tvval = parts[2]\n\treturn\n}\n\nfunc NewVote(i uint64, vval string) Message {\n\treturn &Msg{\n\t\tcmd: Vote,\n\t\tbody: fmt.Sprintf(\"%d:%s\", i, vval),\n\t}\n}\n\n\/\/ Returns the info for `m`. If `m` is not a vote, the result is undefined.\nfunc VoteParts(m Message) (i uint64, vval string) {\n\tparts := strings.Split(m.Body(), \":\", 2)\n\ti, _ = strconv.Atoui64(parts[0])\n\tvval = parts[1]\n\treturn\n}\n\n\/\/ In-memory format:\n\/\/\n\/\/ 0 -- index of sender\n\/\/ 1 -- cmd\n\/\/ 2..9 -- seqn\n\/\/ 10.. -- body -- format depends on command\n\/\/\n\/\/ Wire format is same as in-memory format, but without the first byte (the\n\/\/ sender index). Here it is for clarity:\n\/\/\n\/\/ 0 -- cmd\n\/\/ 1..8 -- seqn\n\/\/ 9.. -- body -- format depends on command\n\/\/type Msg []byte\n\ntype Msg struct {\n\tfrom byte\n\tcmd byte\n\tseqn uint64\n\tbody string\n}\n\nfunc (m Msg) Seqn() uint64 {\n\treturn m.seqn\n}\n\nfunc (m Msg) From() int {\n\treturn int(m.from)\n}\n\nfunc (m Msg) Cmd() int {\n\treturn int(m.cmd)\n}\n\nfunc (m Msg) Body() string {\n\treturn m.body\n}\n\nfunc (m *Msg) SetFrom(from byte) {\n\tm.from = from\n}\n\nfunc (m *Msg) SetSeqn(seqn uint64) {\n\tm.seqn = seqn\n}\n<|endoftext|>"} {"text":"<commit_before>package router\n\nimport (\n\t\"crypto\"\n\n\t\"nimona.io\/pkg\/object\"\n\t\"nimona.io\/pkg\/peer\"\n)\n\ntype (\n\tEnvelope struct {\n\t\tSender crypto.PublicKey\n\t\tPayload object.Object\n\t}\n\tRouter interface {\n\t\tSend(*peer.Peer, object.Object) error\n\t\tHandle() <-chan Envelope\n\t}\n)\n\nfunc New() Router {\n\treturn nil\n}\n<commit_msg>chore(router): remove unused<commit_after><|endoftext|>"} {"text":"<commit_before>package search\n\nimport (\n\t\"fmt\"\n\n\t\"log\"\n\n\t\"github.com\/cridenour\/go-postgis\"\n\t\"github.com\/devinmcgloin\/clr\/clr\"\n\t\"github.com\/devinmcgloin\/fokal\/pkg\/handler\"\n\t\"github.com\/devinmcgloin\/fokal\/pkg\/model\"\n\t\"github.com\/devinmcgloin\/fokal\/pkg\/retrieval\"\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/lib\/pq\"\n)\n\nfunc Color(state *handler.State, color clr.Color, pixelFraction float64, limit, offset int) ([]model.Image, error) {\n\tids := []int64{}\n\n\tl, a, b := color.CIELAB()\n\tcube := fmt.Sprintf(\"(%f, %f, %f)\", l, a, b)\n\n\tlog.Println(cube)\n\terr := state.DB.Select(&ids, `\n\t\t\tSELECT bridge.image_id\n\t\t\tFROM content.colors AS colors\n \t\t\t\tINNER JOIN content.image_color_bridge AS bridge ON colors.id = bridge.color_id\n \t\t\t\tINNER JOIN permissions.can_view AS view ON view.o_id = bridge.image_id\n \t\t\t\tWHERE view.user_id = -1 AND bridge.pixel_fraction >= $4\n\t\t\t\tAND ($1::cube <-> cielab) < 50\n\t\t\tORDER BY $1::cube <-> cielab\n\t\t\tOFFSET $2 LIMIT $3;`, cube, offset, limit, pixelFraction)\n\tif err != nil {\n\t\treturn []model.Image{}, err\n\t}\n\n\treturn retrieval.GetImages(state, ids)\n}\n\nfunc Text(state *handler.State, query string, limit, offset int) ([]model.Image, error) {\n\tids := []struct {\n\t\tId int64 `db:\"image_id\"`\n\t\tRank float64\n\t}{}\n\n\t\/\/images := []struct {\n\t\/\/\tImage model.Image\n\t\/\/\tRank float64\n\t\/\/}{}\n\n\terr := state.DB.Select(&ids, `\nSELECT\n scores.image_id,\n sum(scores.rank) AS rank\n FROM (SELECT\n bridge.image_id,\n ts_rank_cd(to_tsvector(landmark.description), to_tsquery($1)) AS rank\n FROM content.landmarks AS landmark\n JOIN content.image_landmark_bridge AS bridge ON landmark.id = bridge.landmark_id\n WHERE to_tsvector(landmark.description) @@ to_tsquery($1)\n UNION ALL\n SELECT\n bridge.image_id,\n ts_rank_cd(to_tsvector(labels.description), to_tsquery($1)) AS rank\n FROM content.labels AS labels\n JOIN content.image_label_bridge AS bridge ON labels.id = bridge.label_id\n WHERE to_tsvector(labels.description) @@ to_tsquery($1)\n UNION ALL\n SELECT\n bridge.image_id,\n ts_rank_cd(to_tsvector(tags.description), to_tsquery($1)) AS rank\n FROM content.image_tags AS tags\n JOIN content.image_tag_bridge AS bridge ON tags.id = bridge.tag_id\n WHERE to_tsvector(tags.description) @@ to_tsquery($1)) AS scores\n GROUP BY scores.image_id\n ORDER BY rank DESC\n OFFSET $2\n LIMIT $3;\n\t`, query, offset, limit)\n\tif err != nil {\n\t\treturn []model.Image{}, err\n\t}\n\n\timageIds := make([]int64, len(ids))\n\tfor i, v := range ids {\n\t\timageIds[i] = v.Id\n\t\t\/\/images[i].Rank = v.Rank\n\t}\n\n\treturn retrieval.GetImages(state, imageIds)\n\n}\n\nfunc GeoRadius(state *handler.State, point postgis.PointS, radius float64, limit, offset int) ([]model.Image, error) {\n\tids := []int64{}\n\n\terr := state.DB.Select(&ids, `\n\tSELECT geo.image_id\n\tFROM content.image_geo AS geo\n\tWHERE ST_Distance($1, geo.loc) < $2\n\tORDER BY $1 <-> geo.loc\n\tOFFSET $3 LIMIT $4\n\t`, point, radius, offset, limit)\n\tif err != nil {\n\t\tif err, ok := err.(*pq.Error); ok {\n\t\t\tlog.Printf(\"%+v\", err)\n\t\t}\n\t\treturn []model.Image{}, err\n\t}\n\treturn retrieval.GetImages(state, ids)\n}\n\nfunc Hot(state *handler.State, limit, offset int) ([]model.Image, error) {\n\tids := []int64{}\n\n\terr := state.DB.Select(&ids, `\n\tSELECT id FROM content.images\n\tORDER BY ranking(id, views + favorites , featured::int + 3) DESC\n\tOFFSET $1 LIMIT $2\n\t`, offset, limit)\n\tif err != nil {\n\t\tif err, ok := err.(*pq.Error); ok {\n\t\t\tlog.Printf(\"%+v\", err)\n\t\t}\n\t\treturn []model.Image{}, err\n\t}\n\treturn retrieval.GetImages(state, ids)\n\n}\n\nfunc FeaturedImages(state *handler.State, userId int64, limit, offset int) ([]model.Image, error) {\n\timgs := []int64{}\n\tvar stmt *sqlx.Stmt\n\tvar err error\n\tif userId == 0 {\n\t\tstmt, err = state.DB.Preparex(`\n\t\tSELECT images.id\n\t\tFROM content.images AS images\n\t\tINNER JOIN permissions.can_view AS view ON view.o_id = images.id\n\t\tWHERE (view.user_id = -1 OR view.user_id = $1) AND images.featured = TRUE\n\t\tORDER BY publish_time DESC\n\t\tOFFSET $2 LIMIT $3 \n\t\t`)\n\t} else {\n\t\tstmt, err = state.DB.Preparex(`\n\t\tSELECT images.id\n\t\tFROM content.images AS images\n\t\tINNER JOIN permissions.can_view AS view ON view.o_id = images.id\n\t\tWHERE view.user_id = -1 AND images.featured = TRUE\n\t\tORDER BY publish_time DESC\n\t\tOFFSET $2 LIMIT $3\n\t\t`)\n\t}\n\tif err != nil {\n\t\tif err, ok := err.(*pq.Error); ok {\n\t\t\tlog.Printf(\"%+v\", err)\n\t\t}\n\t\treturn []model.Image{}, err\n\t}\n\terr = stmt.Select(&imgs,\n\t\tuserId, offset, limit)\n\tif err != nil {\n\t\tif err, ok := err.(*pq.Error); ok {\n\t\t\tlog.Printf(\"%+v\", err)\n\t\t}\n\t\treturn []model.Image{}, err\n\t}\n\treturn retrieval.GetImages(state, imgs)\n}\n\nfunc RecentImages(state *handler.State, userId int64, limit, offset int) ([]model.Image, error) {\n\timageIds := []int64{}\n\tvar stmt *sqlx.Stmt\n\tvar err error\n\tif userId == 0 {\n\t\tstmt, err = state.DB.Preparex(`\n\t\tSELECT images.id\n\t\tFROM content.images AS images\n\t\tINNER JOIN permissions.can_view AS view ON view.o_id = images.id\n\t\tWHERE view.user_id = -1 OR view.user_id = $1\n\t\tORDER BY publish_time DESC\n\t\tOFFSET $2 LIMIT $3\n\t\t`)\n\t} else {\n\t\tstmt, err = state.DB.Preparex(`\n\t\tSELECT images.id\n\t\tFROM content.images AS images\n\t\tINNER JOIN permissions.can_view AS view ON view.o_id = images.id\n\t\tWHERE (view.user_id = -1 OR view.user_id = $1) AND images.user_id = $1\n\t\tORDER BY publish_time DESC\n\t\tOFFSET $2 LIMIT $3\n\t\t`)\n\t}\n\tif err != nil {\n\t\tif err, ok := err.(*pq.Error); ok {\n\t\t\tlog.Printf(\"GetRecentImages userId: %d limit: %d offset: %d %+v\", userId, limit, offset, err)\n\t\t}\n\t\treturn []model.Image{}, err\n\t}\n\terr = stmt.Select(&imageIds,\n\t\tuserId, offset, limit)\n\tif err != nil {\n\t\tif err, ok := err.(*pq.Error); ok {\n\t\t\tlog.Printf(\"GetRecentImages userId: %d limit: %d offset: %d %+v\", userId, limit, offset, err)\n\t\t}\n\t\treturn []model.Image{}, err\n\t}\n\tlog.Printf(\"GetRecentImages userId: %d limit: %d offset: %d %+v\", userId, limit, offset, imageIds)\n\n\treturn retrieval.GetImages(state, imageIds)\n}\n<commit_msg>Improved Color Search Query<commit_after>package search\n\nimport (\n\t\"fmt\"\n\n\t\"log\"\n\n\t\"github.com\/cridenour\/go-postgis\"\n\t\"github.com\/devinmcgloin\/clr\/clr\"\n\t\"github.com\/devinmcgloin\/fokal\/pkg\/handler\"\n\t\"github.com\/devinmcgloin\/fokal\/pkg\/model\"\n\t\"github.com\/devinmcgloin\/fokal\/pkg\/retrieval\"\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/lib\/pq\"\n)\n\nfunc Color(state *handler.State, color clr.Color, pixelFraction float64, limit, offset int) ([]model.Image, error) {\n\tids := []int64{}\n\n\tl, a, b := color.CIELAB()\n\tcube := fmt.Sprintf(\"(%f, %f, %f)\", l, a, b)\n\n\tlog.Println(cube)\n\terr := state.DB.Select(&ids, `\n\tSELECT\n\t id\n\tFROM (SELECT\n\t\t\tbridge.image_id AS id,\n\t\t\t$1 :: CUBE <-> cielab AS score\n\t\t FROM content.colors AS COLORS\n\t\t\tINNER JOIN content.image_color_bridge AS bridge ON COLORS.id = bridge.color_id\n\t\t\tINNER JOIN permissions.can_view AS view ON view.o_id = bridge.image_id\n\t\t WHERE view.user_id = -1 AND bridge.pixel_fraction >= $4\n\t\t ORDER BY score\n\t\t OFFSET $2\n\t\t LIMIT $3) AS scores\n\tWHERE score < 50\n\tGROUP BY id\n\tORDER BY min(score);\n\t`, cube, offset, limit, pixelFraction)\n\tif err != nil {\n\t\treturn []model.Image{}, err\n\t}\n\n\treturn retrieval.GetImages(state, ids)\n}\n\nfunc Text(state *handler.State, query string, limit, offset int) ([]model.Image, error) {\n\tids := []struct {\n\t\tId int64 `db:\"image_id\"`\n\t\tRank float64\n\t}{}\n\n\t\/\/images := []struct {\n\t\/\/\tImage model.Image\n\t\/\/\tRank float64\n\t\/\/}{}\n\n\terr := state.DB.Select(&ids, `\nSELECT\n scores.image_id,\n sum(scores.rank) AS rank\n FROM (SELECT\n bridge.image_id,\n ts_rank_cd(to_tsvector(landmark.description), to_tsquery($1)) AS rank\n FROM content.landmarks AS landmark\n JOIN content.image_landmark_bridge AS bridge ON landmark.id = bridge.landmark_id\n WHERE to_tsvector(landmark.description) @@ to_tsquery($1)\n UNION ALL\n SELECT\n bridge.image_id,\n ts_rank_cd(to_tsvector(labels.description), to_tsquery($1)) AS rank\n FROM content.labels AS labels\n JOIN content.image_label_bridge AS bridge ON labels.id = bridge.label_id\n WHERE to_tsvector(labels.description) @@ to_tsquery($1)\n UNION ALL\n SELECT\n bridge.image_id,\n ts_rank_cd(to_tsvector(tags.description), to_tsquery($1)) AS rank\n FROM content.image_tags AS tags\n JOIN content.image_tag_bridge AS bridge ON tags.id = bridge.tag_id\n WHERE to_tsvector(tags.description) @@ to_tsquery($1)) AS scores\n GROUP BY scores.image_id\n ORDER BY rank DESC\n OFFSET $2\n LIMIT $3;\n\t`, query, offset, limit)\n\tif err != nil {\n\t\treturn []model.Image{}, err\n\t}\n\n\timageIds := make([]int64, len(ids))\n\tfor i, v := range ids {\n\t\timageIds[i] = v.Id\n\t\t\/\/images[i].Rank = v.Rank\n\t}\n\n\treturn retrieval.GetImages(state, imageIds)\n\n}\n\nfunc GeoRadius(state *handler.State, point postgis.PointS, radius float64, limit, offset int) ([]model.Image, error) {\n\tids := []int64{}\n\n\terr := state.DB.Select(&ids, `\n\tSELECT geo.image_id\n\tFROM content.image_geo AS geo\n\tWHERE ST_Distance($1, geo.loc) < $2\n\tORDER BY $1 <-> geo.loc\n\tOFFSET $3 LIMIT $4\n\t`, point, radius, offset, limit)\n\tif err != nil {\n\t\tif err, ok := err.(*pq.Error); ok {\n\t\t\tlog.Printf(\"%+v\", err)\n\t\t}\n\t\treturn []model.Image{}, err\n\t}\n\treturn retrieval.GetImages(state, ids)\n}\n\nfunc Hot(state *handler.State, limit, offset int) ([]model.Image, error) {\n\tids := []int64{}\n\n\terr := state.DB.Select(&ids, `\n\tSELECT id FROM content.images\n\tORDER BY ranking(id, views + favorites , featured::int + 3) DESC\n\tOFFSET $1 LIMIT $2\n\t`, offset, limit)\n\tif err != nil {\n\t\tif err, ok := err.(*pq.Error); ok {\n\t\t\tlog.Printf(\"%+v\", err)\n\t\t}\n\t\treturn []model.Image{}, err\n\t}\n\treturn retrieval.GetImages(state, ids)\n\n}\n\nfunc FeaturedImages(state *handler.State, userId int64, limit, offset int) ([]model.Image, error) {\n\timgs := []int64{}\n\tvar stmt *sqlx.Stmt\n\tvar err error\n\tif userId == 0 {\n\t\tstmt, err = state.DB.Preparex(`\n\t\tSELECT images.id\n\t\tFROM content.images AS images\n\t\tINNER JOIN permissions.can_view AS view ON view.o_id = images.id\n\t\tWHERE (view.user_id = -1 OR view.user_id = $1) AND images.featured = TRUE\n\t\tORDER BY publish_time DESC\n\t\tOFFSET $2 LIMIT $3 \n\t\t`)\n\t} else {\n\t\tstmt, err = state.DB.Preparex(`\n\t\tSELECT images.id\n\t\tFROM content.images AS images\n\t\tINNER JOIN permissions.can_view AS view ON view.o_id = images.id\n\t\tWHERE view.user_id = -1 AND images.featured = TRUE\n\t\tORDER BY publish_time DESC\n\t\tOFFSET $2 LIMIT $3\n\t\t`)\n\t}\n\tif err != nil {\n\t\tif err, ok := err.(*pq.Error); ok {\n\t\t\tlog.Printf(\"%+v\", err)\n\t\t}\n\t\treturn []model.Image{}, err\n\t}\n\terr = stmt.Select(&imgs,\n\t\tuserId, offset, limit)\n\tif err != nil {\n\t\tif err, ok := err.(*pq.Error); ok {\n\t\t\tlog.Printf(\"%+v\", err)\n\t\t}\n\t\treturn []model.Image{}, err\n\t}\n\treturn retrieval.GetImages(state, imgs)\n}\n\nfunc RecentImages(state *handler.State, userId int64, limit, offset int) ([]model.Image, error) {\n\timageIds := []int64{}\n\tvar stmt *sqlx.Stmt\n\tvar err error\n\tif userId == 0 {\n\t\tstmt, err = state.DB.Preparex(`\n\t\tSELECT images.id\n\t\tFROM content.images AS images\n\t\tINNER JOIN permissions.can_view AS view ON view.o_id = images.id\n\t\tWHERE view.user_id = -1 OR view.user_id = $1\n\t\tORDER BY publish_time DESC\n\t\tOFFSET $2 LIMIT $3\n\t\t`)\n\t} else {\n\t\tstmt, err = state.DB.Preparex(`\n\t\tSELECT images.id\n\t\tFROM content.images AS images\n\t\tINNER JOIN permissions.can_view AS view ON view.o_id = images.id\n\t\tWHERE (view.user_id = -1 OR view.user_id = $1) AND images.user_id = $1\n\t\tORDER BY publish_time DESC\n\t\tOFFSET $2 LIMIT $3\n\t\t`)\n\t}\n\tif err != nil {\n\t\tif err, ok := err.(*pq.Error); ok {\n\t\t\tlog.Printf(\"GetRecentImages userId: %d limit: %d offset: %d %+v\", userId, limit, offset, err)\n\t\t}\n\t\treturn []model.Image{}, err\n\t}\n\terr = stmt.Select(&imageIds,\n\t\tuserId, offset, limit)\n\tif err != nil {\n\t\tif err, ok := err.(*pq.Error); ok {\n\t\t\tlog.Printf(\"GetRecentImages userId: %d limit: %d offset: %d %+v\", userId, limit, offset, err)\n\t\t}\n\t\treturn []model.Image{}, err\n\t}\n\tlog.Printf(\"GetRecentImages userId: %d limit: %d offset: %d %+v\", userId, limit, offset, imageIds)\n\n\treturn retrieval.GetImages(state, imageIds)\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/api\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/api\/middleware\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/config\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/external\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/external\/secrets\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/external\/users\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/lang\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/plugin\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/plugin\/fake\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/plugin\/helm\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/plugin\/k8s\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/plugin\/k8sraw\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/runtime\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/runtime\/store\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/runtime\/store\/core\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/runtime\/store\/generic\/bolt\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/server\/ui\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/gorilla\/handlers\"\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\/pprof\"\n\t\"runtime\/trace\"\n\t\"syscall\"\n\t\"time\"\n)\n\n\/\/ Server is Aptomi server. It serves UI front-end, API calls, as well as does policy resolution & continuous state enforcement\ntype Server struct {\n\tcfg *config.Server\n\tbackgroundErrors chan string\n\n\texternalData *external.Data\n\tstore store.Core\n\tpluginRegistryFactory plugin.RegistryFactory\n\n\thttpServer *http.Server\n\n\trunEnforcement chan bool\n\tenforcementIdx uint\n}\n\n\/\/ NewServer creates a new Aptomi Server\nfunc NewServer(cfg *config.Server) *Server {\n\ts := &Server{\n\t\tcfg: cfg,\n\t\tbackgroundErrors: make(chan string),\n\t\trunEnforcement: make(chan bool, 2048),\n\t}\n\n\treturn s\n}\n\n\/\/ Start initializes Aptomi server, starts API & UI processing, and as well as runs the required background jobs for\n\/\/ continuous policy resolution and state enforcement\nfunc (server *Server) Start() {\n\t\/\/ Init server\n\tserver.initProfiling()\n\tserver.initStore()\n\tserver.initExternalData()\n\tserver.initPluginRegistryFactory()\n\tserver.initPolicyOnFirstRun()\n\n\t\/\/ Start API, UI and Enforcer\n\tserver.startHTTPServer()\n\tserver.startEnforcer()\n\n\t\/\/ Wait for jobs to complete (it essentially hangs forever)\n\tserver.wait()\n}\n\nfunc (server *Server) initPolicyOnFirstRun() {\n\tpolicy, _, err := server.store.GetPolicy(runtime.LastGen)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"error while getting latest policy: %s\", err))\n\t}\n\n\t\/\/ if policy does not exist, let's create the first version (it should be created here, before we start the server)\n\tif policy == nil {\n\t\tlog.Infof(\"Policy not found in the store (likely, it's a first run of Aptomi server). Creating empty policy\")\n\t\tinitErr := server.store.InitPolicy()\n\t\tif initErr != nil {\n\t\t\tpanic(fmt.Sprintf(\"error while creating empty policy: %s\", initErr))\n\t\t}\n\t}\n}\n\nfunc (server *Server) initExternalData() {\n\tvar userLoaders []users.UserLoader\n\tfor _, ldap := range server.cfg.Users.LDAP {\n\t\tuserLoaders = append(userLoaders, users.NewUserLoaderFromLDAP(ldap, server.cfg.DomainAdminOverrides))\n\t}\n\tfor _, file := range server.cfg.Users.File {\n\t\tuserLoaders = append(userLoaders, users.NewUserLoaderFromFile(file, server.cfg.DomainAdminOverrides))\n\t}\n\tserver.externalData = external.NewData(\n\t\tusers.NewUserLoaderMultipleSources(userLoaders),\n\t\tsecrets.NewSecretLoaderFromDir(server.cfg.SecretsDir),\n\t)\n}\n\nfunc (server *Server) initProfiling() {\n\tif len(server.cfg.Profile.CPU) > 0 {\n\t\t\/\/ initiate CPU profiler\n\t\tf, err := os.Create(server.cfg.Profile.CPU)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"can't create file to write CPU profiling information: %s\", server.cfg.Profile.CPU))\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\n\t\t\/\/ CPU profiler needs to be stopped when server exits\n\t\tc := make(chan os.Signal, 1)\n\t\tsignal.Notify(c, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)\n\t\tgo func() {\n\t\t\tfor sig := range c {\n\t\t\t\tlog.Printf(\"captured %v, stopping CPU profiler\", sig)\n\t\t\t\tpprof.StopCPUProfile()\n\t\t\t}\n\t\t}()\n\t}\n\n\tif len(server.cfg.Profile.Trace) > 0 {\n\t\t\/\/ start tracing\n\t\tf, err := os.Create(server.cfg.Profile.Trace)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"can't create file to write tracing information: %s\", server.cfg.Profile.Trace))\n\t\t}\n\t\ttrace.Start(f)\n\n\t\t\/\/ Tracing needs to be stopped when server exits\n\t\tc := make(chan os.Signal, 1)\n\t\tsignal.Notify(c, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)\n\t\tgo func() {\n\t\t\tfor sig := range c {\n\t\t\t\tlog.Printf(\"captured %v, stopping tracing\", sig)\n\t\t\t\ttrace.Stop()\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (server *Server) initStore() {\n\tregistry := runtime.NewRegistry().Append(store.Objects...)\n\tb := bolt.NewGenericStore(registry)\n\terr := b.Open(server.cfg.DB)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Can't open object store: %s\", err))\n\t}\n\tserver.store = core.NewStore(b)\n}\n\nfunc (server *Server) initPluginRegistryFactory() {\n\tserver.pluginRegistryFactory = func() plugin.Registry {\n\t\tclusterTypes := make(map[string]plugin.ClusterPluginConstructor)\n\t\tcodeTypes := make(map[string]map[string]plugin.CodePluginConstructor)\n\n\t\tif !server.cfg.Enforcer.Noop {\n\t\t\tclusterTypes[\"kubernetes\"] = func(cluster *lang.Cluster, cfg config.Plugins) (plugin.ClusterPlugin, error) {\n\t\t\t\treturn k8s.New(cluster, cfg)\n\t\t\t}\n\n\t\t\tcodeTypes[\"kubernetes\"] = make(map[string]plugin.CodePluginConstructor)\n\t\t\tcodeTypes[\"kubernetes\"][\"helm\"] = func(cluster plugin.ClusterPlugin, cfg config.Plugins) (plugin.CodePlugin, error) {\n\t\t\t\treturn helm.New(cluster, cfg)\n\t\t\t}\n\t\t\tcodeTypes[\"kubernetes\"][\"raw\"] = func(cluster plugin.ClusterPlugin, cfg config.Plugins) (plugin.CodePlugin, error) {\n\t\t\t\treturn k8sraw.New(cluster, cfg)\n\t\t\t}\n\t\t} else {\n\t\t\tsleepTime := server.cfg.Enforcer.NoopSleep\n\n\t\t\tclusterTypes[\"kubernetes\"] = func(cluster *lang.Cluster, cfg config.Plugins) (plugin.ClusterPlugin, error) {\n\t\t\t\treturn fake.NewNoOpClusterPlugin(sleepTime), nil\n\t\t\t}\n\n\t\t\tcodeTypes[\"kubernetes\"] = make(map[string]plugin.CodePluginConstructor)\n\t\t\tcodeTypes[\"kubernetes\"][\"helm\"] = func(cluster plugin.ClusterPlugin, cfg config.Plugins) (plugin.CodePlugin, error) {\n\t\t\t\treturn fake.NewNoOpCodePlugin(sleepTime), nil\n\t\t\t}\n\t\t}\n\n\t\treturn plugin.NewRegistry(server.cfg.Plugins, clusterTypes, codeTypes)\n\t}\n}\n\nfunc (server *Server) startHTTPServer() {\n\trouter := httprouter.New()\n\n\tif len(server.cfg.Auth.Secret) == 0 {\n\t\t\/\/ todo better handle it\n\t\t\/\/ set some default insecure secret\n\t\tserver.cfg.Auth.Secret = \"Shahsh4e cohp8aeT Ifaic3ah ohs4eiSh vee7Qua7 eiCh2iLo eiroh3Ie oeg2ruPu\"\n\t\tlog.Warnf(\"The auth.secret not specified in config, using insecure default one\")\n\t}\n\n\tapi.Serve(router, server.store, server.externalData, server.pluginRegistryFactory, server.cfg.Auth.Secret, server.runEnforcement)\n\tserver.serveUI(router)\n\n\tvar handler http.Handler = router\n\n\t\/\/ todo write to logrus\n\thandler = handlers.CombinedLoggingHandler(os.Stdout, handler) \/\/ todo(slukjanov): make it at least somehow configurable - for example, select file to write to with rotation\n\thandler = middleware.NewPanicHandler(handler)\n\t\/\/ todo(slukjanov): add configurable handlers.ProxyHeaders to f behind the nginx or any other proxy\n\t\/\/ todo(slukjanov): add compression handler and compress by default in client\n\n\tserver.httpServer = &http.Server{\n\t\tHandler: handler,\n\t\tAddr: server.cfg.API.ListenAddr(),\n\t\tWriteTimeout: 30 * time.Second,\n\t\tReadTimeout: 30 * time.Second,\n\t}\n\n\t\/\/ Start HTTP server\n\tserver.runInBackground(\"HTTP Server \/ API\", true, func() {\n\t\tpanic(server.httpServer.ListenAndServe())\n\t})\n}\n\nfunc (server *Server) serveUI(router *httprouter.Router) {\n\tif !server.cfg.UI.Enable {\n\t\tlog.Infof(\"UI isn't enabled. UI will not be served\")\n\t\treturn\n\t}\n\n\tfileServer := http.FileServer(ui.HTTP)\n\n\trouter.GET(\"\/\", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {\n\t\treq.URL.Path = \"\/\"\n\t\tfileServer.ServeHTTP(w, req)\n\t})\n\n\trouter.GET(\"\/index.html\", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {\n\t\treq.URL.Path = \"\/\"\n\t\tfileServer.ServeHTTP(w, req)\n\t})\n\n\trouter.GET(\"\/static\/*filepath\", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {\n\t\treq.URL.Path = \"\/static\/\" + ps.ByName(\"filepath\")\n\t\tfileServer.ServeHTTP(w, req)\n\t})\n}\n\nfunc (server *Server) startEnforcer() {\n\t\/\/ Start policy enforcement job\n\tif !server.cfg.Enforcer.Disabled {\n\t\tserver.runInBackground(\"Policy Enforcer\", true, func() {\n\t\t\tpanic(server.enforceLoop())\n\t\t})\n\t}\n}\n<commit_msg>fix linter<commit_after>package server\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/api\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/api\/middleware\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/config\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/external\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/external\/secrets\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/external\/users\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/lang\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/plugin\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/plugin\/fake\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/plugin\/helm\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/plugin\/k8s\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/plugin\/k8sraw\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/runtime\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/runtime\/store\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/runtime\/store\/core\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/runtime\/store\/generic\/bolt\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/server\/ui\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/gorilla\/handlers\"\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\/pprof\"\n\t\"runtime\/trace\"\n\t\"syscall\"\n\t\"time\"\n)\n\n\/\/ Server is Aptomi server. It serves UI front-end, API calls, as well as does policy resolution & continuous state enforcement\ntype Server struct {\n\tcfg *config.Server\n\tbackgroundErrors chan string\n\n\texternalData *external.Data\n\tstore store.Core\n\tpluginRegistryFactory plugin.RegistryFactory\n\n\thttpServer *http.Server\n\n\trunEnforcement chan bool\n\tenforcementIdx uint\n}\n\n\/\/ NewServer creates a new Aptomi Server\nfunc NewServer(cfg *config.Server) *Server {\n\ts := &Server{\n\t\tcfg: cfg,\n\t\tbackgroundErrors: make(chan string),\n\t\trunEnforcement: make(chan bool, 2048),\n\t}\n\n\treturn s\n}\n\n\/\/ Start initializes Aptomi server, starts API & UI processing, and as well as runs the required background jobs for\n\/\/ continuous policy resolution and state enforcement\nfunc (server *Server) Start() {\n\t\/\/ Init server\n\tserver.initProfiling()\n\tserver.initStore()\n\tserver.initExternalData()\n\tserver.initPluginRegistryFactory()\n\tserver.initPolicyOnFirstRun()\n\n\t\/\/ Start API, UI and Enforcer\n\tserver.startHTTPServer()\n\tserver.startEnforcer()\n\n\t\/\/ Wait for jobs to complete (it essentially hangs forever)\n\tserver.wait()\n}\n\nfunc (server *Server) initPolicyOnFirstRun() {\n\tpolicy, _, err := server.store.GetPolicy(runtime.LastGen)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"error while getting latest policy: %s\", err))\n\t}\n\n\t\/\/ if policy does not exist, let's create the first version (it should be created here, before we start the server)\n\tif policy == nil {\n\t\tlog.Infof(\"Policy not found in the store (likely, it's a first run of Aptomi server). Creating empty policy\")\n\t\tinitErr := server.store.InitPolicy()\n\t\tif initErr != nil {\n\t\t\tpanic(fmt.Sprintf(\"error while creating empty policy: %s\", initErr))\n\t\t}\n\t}\n}\n\nfunc (server *Server) initExternalData() {\n\tvar userLoaders []users.UserLoader\n\tfor _, ldap := range server.cfg.Users.LDAP {\n\t\tuserLoaders = append(userLoaders, users.NewUserLoaderFromLDAP(ldap, server.cfg.DomainAdminOverrides))\n\t}\n\tfor _, file := range server.cfg.Users.File {\n\t\tuserLoaders = append(userLoaders, users.NewUserLoaderFromFile(file, server.cfg.DomainAdminOverrides))\n\t}\n\tserver.externalData = external.NewData(\n\t\tusers.NewUserLoaderMultipleSources(userLoaders),\n\t\tsecrets.NewSecretLoaderFromDir(server.cfg.SecretsDir),\n\t)\n}\n\nfunc (server *Server) initProfiling() {\n\tif len(server.cfg.Profile.CPU) > 0 {\n\t\t\/\/ initiate CPU profiler\n\t\tf, err := os.Create(server.cfg.Profile.CPU)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"can't create file to write CPU profiling information: %s\", server.cfg.Profile.CPU))\n\t\t}\n\t\terr = pprof.StartCPUProfile(f)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"can't start CPU profiling: %s\", err))\n\t\t}\n\n\t\t\/\/ CPU profiler needs to be stopped when server exits\n\t\tc := make(chan os.Signal, 1)\n\t\tsignal.Notify(c, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)\n\t\tgo func() {\n\t\t\tfor sig := range c {\n\t\t\t\tlog.Printf(\"captured %v, stopping CPU profiler\", sig)\n\t\t\t\tpprof.StopCPUProfile()\n\t\t\t}\n\t\t}()\n\t}\n\n\tif len(server.cfg.Profile.Trace) > 0 {\n\t\t\/\/ start tracing\n\t\tf, err := os.Create(server.cfg.Profile.Trace)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"can't create file to write tracing information: %s\", server.cfg.Profile.Trace))\n\t\t}\n\t\terr = trace.Start(f)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"can't start tracing: %s\", err))\n\t\t}\n\n\t\t\/\/ Tracing needs to be stopped when server exits\n\t\tc := make(chan os.Signal, 1)\n\t\tsignal.Notify(c, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)\n\t\tgo func() {\n\t\t\tfor sig := range c {\n\t\t\t\tlog.Printf(\"captured %v, stopping tracing\", sig)\n\t\t\t\ttrace.Stop()\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (server *Server) initStore() {\n\tregistry := runtime.NewRegistry().Append(store.Objects...)\n\tb := bolt.NewGenericStore(registry)\n\terr := b.Open(server.cfg.DB)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Can't open object store: %s\", err))\n\t}\n\tserver.store = core.NewStore(b)\n}\n\nfunc (server *Server) initPluginRegistryFactory() {\n\tserver.pluginRegistryFactory = func() plugin.Registry {\n\t\tclusterTypes := make(map[string]plugin.ClusterPluginConstructor)\n\t\tcodeTypes := make(map[string]map[string]plugin.CodePluginConstructor)\n\n\t\tif !server.cfg.Enforcer.Noop {\n\t\t\tclusterTypes[\"kubernetes\"] = func(cluster *lang.Cluster, cfg config.Plugins) (plugin.ClusterPlugin, error) {\n\t\t\t\treturn k8s.New(cluster, cfg)\n\t\t\t}\n\n\t\t\tcodeTypes[\"kubernetes\"] = make(map[string]plugin.CodePluginConstructor)\n\t\t\tcodeTypes[\"kubernetes\"][\"helm\"] = func(cluster plugin.ClusterPlugin, cfg config.Plugins) (plugin.CodePlugin, error) {\n\t\t\t\treturn helm.New(cluster, cfg)\n\t\t\t}\n\t\t\tcodeTypes[\"kubernetes\"][\"raw\"] = func(cluster plugin.ClusterPlugin, cfg config.Plugins) (plugin.CodePlugin, error) {\n\t\t\t\treturn k8sraw.New(cluster, cfg)\n\t\t\t}\n\t\t} else {\n\t\t\tsleepTime := server.cfg.Enforcer.NoopSleep\n\n\t\t\tclusterTypes[\"kubernetes\"] = func(cluster *lang.Cluster, cfg config.Plugins) (plugin.ClusterPlugin, error) {\n\t\t\t\treturn fake.NewNoOpClusterPlugin(sleepTime), nil\n\t\t\t}\n\n\t\t\tcodeTypes[\"kubernetes\"] = make(map[string]plugin.CodePluginConstructor)\n\t\t\tcodeTypes[\"kubernetes\"][\"helm\"] = func(cluster plugin.ClusterPlugin, cfg config.Plugins) (plugin.CodePlugin, error) {\n\t\t\t\treturn fake.NewNoOpCodePlugin(sleepTime), nil\n\t\t\t}\n\t\t}\n\n\t\treturn plugin.NewRegistry(server.cfg.Plugins, clusterTypes, codeTypes)\n\t}\n}\n\nfunc (server *Server) startHTTPServer() {\n\trouter := httprouter.New()\n\n\tif len(server.cfg.Auth.Secret) == 0 {\n\t\t\/\/ todo better handle it\n\t\t\/\/ set some default insecure secret\n\t\tserver.cfg.Auth.Secret = \"Shahsh4e cohp8aeT Ifaic3ah ohs4eiSh vee7Qua7 eiCh2iLo eiroh3Ie oeg2ruPu\"\n\t\tlog.Warnf(\"The auth.secret not specified in config, using insecure default one\")\n\t}\n\n\tapi.Serve(router, server.store, server.externalData, server.pluginRegistryFactory, server.cfg.Auth.Secret, server.runEnforcement)\n\tserver.serveUI(router)\n\n\tvar handler http.Handler = router\n\n\t\/\/ todo write to logrus\n\thandler = handlers.CombinedLoggingHandler(os.Stdout, handler) \/\/ todo(slukjanov): make it at least somehow configurable - for example, select file to write to with rotation\n\thandler = middleware.NewPanicHandler(handler)\n\t\/\/ todo(slukjanov): add configurable handlers.ProxyHeaders to f behind the nginx or any other proxy\n\t\/\/ todo(slukjanov): add compression handler and compress by default in client\n\n\tserver.httpServer = &http.Server{\n\t\tHandler: handler,\n\t\tAddr: server.cfg.API.ListenAddr(),\n\t\tWriteTimeout: 30 * time.Second,\n\t\tReadTimeout: 30 * time.Second,\n\t}\n\n\t\/\/ Start HTTP server\n\tserver.runInBackground(\"HTTP Server \/ API\", true, func() {\n\t\tpanic(server.httpServer.ListenAndServe())\n\t})\n}\n\nfunc (server *Server) serveUI(router *httprouter.Router) {\n\tif !server.cfg.UI.Enable {\n\t\tlog.Infof(\"UI isn't enabled. UI will not be served\")\n\t\treturn\n\t}\n\n\tfileServer := http.FileServer(ui.HTTP)\n\n\trouter.GET(\"\/\", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {\n\t\treq.URL.Path = \"\/\"\n\t\tfileServer.ServeHTTP(w, req)\n\t})\n\n\trouter.GET(\"\/index.html\", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {\n\t\treq.URL.Path = \"\/\"\n\t\tfileServer.ServeHTTP(w, req)\n\t})\n\n\trouter.GET(\"\/static\/*filepath\", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {\n\t\treq.URL.Path = \"\/static\/\" + ps.ByName(\"filepath\")\n\t\tfileServer.ServeHTTP(w, req)\n\t})\n}\n\nfunc (server *Server) startEnforcer() {\n\t\/\/ Start policy enforcement job\n\tif !server.cfg.Enforcer.Disabled {\n\t\tserver.runInBackground(\"Policy Enforcer\", true, func() {\n\t\t\tpanic(server.enforceLoop())\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Check for active team membership when fetching s.UserInfo<commit_after><|endoftext|>"} {"text":"<commit_before>package true_git\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/Masterminds\/semver\"\n)\n\nconst (\n\tMinGitVersionConstraint = \"1.9.0\"\n\tMinGitVersionWithSubmodulesConstraint = \"2.14.0\"\n)\n\nvar (\n\tGitVersion string\n\tRequiredGitVersionMsg = fmt.Sprintf(\"Git version >= %s required! To use submodules install git >= %s.\", MinGitVersionConstraint, MinGitVersionWithSubmodulesConstraint)\n\n\tgitVersionObj *semver.Version\n\tminVersionConstraintObj *semver.Constraints\n\tsubmoduleVersionConstraintObj *semver.Constraints\n\n\toutStream, errStream io.Writer\n)\n\ntype Options struct {\n\tOut, Err io.Writer\n}\n\nfunc Init(opts Options) error {\n\toutStream = os.Stdout\n\terrStream = os.Stderr\n\tif opts.Out != nil {\n\t\toutStream = opts.Out\n\t}\n\tif opts.Err != nil {\n\t\terrStream = opts.Err\n\t}\n\n\tvar err error\n\n\tv, err := getGitCliVersion()\n\tif err != nil {\n\t\treturn err\n\t}\n\tGitVersion = v\n\n\tvObj, err := semver.NewVersion(GitVersion)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unexpected `git --version` spec `%s`: %s\\n%s\\nYour git version is %s.\", GitVersion, err, RequiredGitVersionMsg, GitVersion)\n\t}\n\tgitVersionObj = vObj\n\n\tvar c *semver.Constraints\n\n\tc, err = semver.NewConstraint(fmt.Sprintf(\">= %s\", MinGitVersionConstraint))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tminVersionConstraintObj = c\n\n\tif !minVersionConstraintObj.Check(gitVersionObj) {\n\t\treturn fmt.Errorf(\"%s\\nYour git version is %s.\", RequiredGitVersionMsg, GitVersion)\n\t}\n\n\tc, err = semver.NewConstraint(fmt.Sprintf(\">= %s\", MinGitVersionWithSubmodulesConstraint))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tsubmoduleVersionConstraintObj = c\n\n\treturn nil\n}\n\nfunc getGitCliVersion() (string, error) {\n\tcmd := exec.Command(\"git\", \"--version\")\n\n\tout := bytes.Buffer{}\n\tcmd.Stdout = &out\n\tcmd.Stderr = &out\n\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"git command is not available!\\n%s\", RequiredGitVersionMsg)\n\t}\n\n\tversionParts := strings.Split(out.String(), \" \")\n\tif len(versionParts) != 3 {\n\t\treturn \"\", fmt.Errorf(\"unexpected `git --version` output:\\n\\n```\\n%s\\n```\\n\\n%s\", strings.TrimSpace(out.String()), RequiredGitVersionMsg)\n\t}\n\trawVersion := strings.TrimSpace(versionParts[2])\n\n\treturn rawVersion, nil\n}\n\nfunc checkSubmoduleConstraint() error {\n\tif !submoduleVersionConstraintObj.Check(gitVersionObj) {\n\t\treturn fmt.Errorf(\"To use submodules install git >= %s! Your git version is %s.\", MinGitVersionWithSubmodulesConstraint, GitVersion)\n\t}\n\treturn nil\n}\n<commit_msg>Fix git version validation * check only MAJOR.MINOR.PATCH version part<commit_after>package true_git\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/Masterminds\/semver\"\n)\n\nconst (\n\tMinGitVersionConstraintValue = \"1.9\"\n\tMinGitVersionWithSubmodulesConstraintValue = \"2.14\"\n)\n\nvar (\n\tgitVersion *semver.Version\n\n\tminGitVersionErrorMsg = fmt.Sprintf(\"Git version >= %s required\", MinGitVersionConstraintValue)\n\tsubmodulesVersionErrorMsg = fmt.Sprintf(\"To use git submodules install git >= %s\", MinGitVersionWithSubmodulesConstraintValue)\n\n\toutStream, errStream io.Writer\n)\n\ntype Options struct {\n\tOut, Err io.Writer\n}\n\nfunc Init(opts Options) error {\n\toutStream = os.Stdout\n\terrStream = os.Stderr\n\tif opts.Out != nil {\n\t\toutStream = opts.Out\n\t}\n\tif opts.Err != nil {\n\t\terrStream = opts.Err\n\t}\n\n\tvar err error\n\n\tv, err := getGitCliVersion()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvObj, err := semver.NewVersion(v)\n\tif err != nil {\n\t\terrMsg := strings.Join([]string{\n\t\t\tfmt.Sprintf(\"unexpected git version spec %s\", v),\n\t\t\tminGitVersionErrorMsg,\n\t\t\tsubmodulesVersionErrorMsg,\n\t\t}, \".\\n\")\n\n\t\treturn errors.New(errMsg)\n\t}\n\tgitVersion = vObj\n\n\tif err := checkMinVersionConstraint(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc getGitCliVersion() (string, error) {\n\tcmd := exec.Command(\"git\", \"version\")\n\n\tout := bytes.Buffer{}\n\tcmd.Stdout = &out\n\tcmd.Stderr = &out\n\n\terr := cmd.Run()\n\tif err != nil {\n\t\terrMsg := strings.Join([]string{\n\t\t\tfmt.Sprintf(\"git version command failed: %s\", err),\n\t\t\tminGitVersionErrorMsg,\n\t\t\tsubmodulesVersionErrorMsg,\n\t\t}, \".\\n\")\n\n\t\treturn \"\", errors.New(errMsg)\n\t}\n\n\tstrippedOut := strings.TrimSpace(out.String())\n\trightPart := strings.TrimLeft(strippedOut, \"git version \")\n\tfullVersion := strings.Split(rightPart, \" \")[0]\n\tfullVersionParts := strings.Split(fullVersion, \".\")\n\n\tlowestVersionPartInd := 3\n\tif len(fullVersionParts) < lowestVersionPartInd {\n\t\tlowestVersionPartInd = len(fullVersionParts)\n\t}\n\n\tversion := strings.Join(fullVersionParts[0:lowestVersionPartInd], \".\")\n\n\treturn version, nil\n}\n\nfunc checkMinVersionConstraint() error {\n\tconstraint, err := semver.NewConstraint(fmt.Sprintf(\">= %s\", MinGitVersionConstraintValue))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif !constraint.Check(gitVersion) {\n\t\terrMsg := strings.Join([]string{\n\t\t\tstrings.ToLower(minGitVersionErrorMsg),\n\t\t\tsubmodulesVersionErrorMsg,\n\t\t\tfmt.Sprintf(\"Your git version is %s\", gitVersion.String()),\n\t\t}, \".\\n\")\n\n\t\treturn errors.New(errMsg)\n\t}\n\n\treturn nil\n}\n\nfunc checkSubmoduleConstraint() error {\n\tconstraint, err := semver.NewConstraint(fmt.Sprintf(\">= %s\", MinGitVersionWithSubmodulesConstraintValue))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif !constraint.Check(gitVersion) {\n\t\terrMsg := strings.Join([]string{\n\t\t\tstrings.ToLower(submodulesVersionErrorMsg),\n\t\t\tfmt.Sprintf(\"Your git version is %s\", gitVersion.String()),\n\t\t}, \".\\n\")\n\n\t\treturn errors.New(errMsg)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 caicloud authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage worker\n\nimport (\n\t\"fmt\"\n\n\tlog \"github.com\/golang\/glog\"\n\n\t\"github.com\/caicloud\/cyclone\/cmd\/worker\/options\"\n\t\"github.com\/caicloud\/cyclone\/pkg\/api\"\n\t\"github.com\/caicloud\/cyclone\/pkg\/docker\"\n\t\"github.com\/caicloud\/cyclone\/pkg\/scm\"\n\t_ \"github.com\/caicloud\/cyclone\/pkg\/scm\/provider\"\n\t\"github.com\/caicloud\/cyclone\/pkg\/worker\/cycloneserver\"\n\t_ \"github.com\/caicloud\/cyclone\/pkg\/worker\/scm\/provider\"\n\t\"github.com\/caicloud\/cyclone\/pkg\/worker\/stage\"\n)\n\n\/\/ Worker ...\ntype Worker struct {\n\tClient cycloneserver.CycloneServerClient\n\tOptions *options.WorkerOptions\n}\n\n\/\/ NewWorker returns a new APIServer with config\nfunc NewWorker(opts *options.WorkerOptions) *Worker {\n\tclient := cycloneserver.NewClient(opts.CycloneServer)\n\n\treturn &Worker{\n\t\tClient: client,\n\t\tOptions: opts,\n\t}\n}\n\n\/\/ Run starts the worker\nfunc (worker *Worker) Run() error {\n\tlog.Info(\"worker start with options: %v\", worker.Options)\n\n\t\/\/ Get event for cyclone server\n\teventID := worker.Options.EventID\n\tevent, err := worker.Client.GetEvent(eventID)\n\tif err != nil {\n\t\tlog.Errorf(\"fail to get event %s as %s\", eventID, err.Error())\n\t\treturn err\n\t}\n\n\t\/\/ Handle event\n\tlog.Infof(\"start to handle event %s\", eventID)\n\tworker.HandleEvent(event)\n\n\treturn nil\n}\n\n\/\/ Steps:\n\/\/ 1. Git clone code\n\/\/ 2. New Docker manager\n\/\/ 3. Run pipeline stages\n\/\/ 4. Create the tag in SCM\nfunc (worker *Worker) HandleEvent(event *api.Event) {\n\tproject := event.Project\n\tpipeline := event.Pipeline\n\tperformParams := event.PipelineRecord.PerformParams\n\tperformStages := performParams.Stages\n\tstageSet := convertPerformStageSet(performStages)\n\tbuild := pipeline.Build\n\n\topts := worker.Options\n\tregistryLocation := opts.RegistryLocation\n\tregistryUsername := opts.RegistryUsername\n\tregistryPassword := opts.RegistryPassword\n\n\tdockerManager, err := docker.NewDockerManager(opts.DockerHost, registryLocation, registryUsername, registryPassword)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\n\t\/\/ Init StageStatus\n\tif event.PipelineRecord.StageStatus == nil {\n\t\tevent.PipelineRecord.StageStatus = &api.StageStatus{}\n\t}\n\n\t\/\/ TODO(robin) Seperate unit test and package stage.\n\tstageManager := stage.NewStageManager(dockerManager, worker.Client, project.Registry, performParams)\n\tstageManager.SetRecordInfo(project.Name, pipeline.Name, event.ID)\n\tstageManager.SetEvent(event)\n\n\t\/\/ Execute the code checkout stage.\n\terr = stageManager.ExecCodeCheckout(project.SCM.Token, build.Stages.CodeCheckout)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\n\t\/\/ Execute the package stage if necessary.\n\tif _, ok := stageSet[api.PackageStageName]; ok {\n\t\terr = stageManager.ExecPackage(build.BuilderImage, build.BuildInfo, build.Stages.UnitTest, build.Stages.Package)\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ The built images from image build stage.\n\tvar builtImages []string\n\n\t\/\/ Execute the image build stage if necessary.\n\tif _, ok := stageSet[api.ImageBuildStageName]; ok {\n\t\tbuiltImages, err = stageManager.ExecImageBuild(build.Stages.ImageBuild)\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Execute the integration test stage if necessary.\n\tif _, ok := stageSet[api.IntegrationTestStageName]; ok {\n\t\terr = stageManager.ExecIntegrationTest(builtImages, build.Stages.IntegrationTest)\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Execute the image release stage if necessary.\n\tif _, ok := stageSet[api.ImageReleaseStageName]; ok {\n\t\terr = stageManager.ExecImageRelease(builtImages, build.Stages.ImageRelease)\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\tif event.PipelineRecord.PerformParams.CreateSCMTag {\n\t\tcodesource := event.Pipeline.Build.Stages.CodeCheckout.MainRepo\n\t\terr = scm.NewTagFromLatest(codesource, project.SCM, event.PipelineRecord.Name, event.PipelineRecord.PerformParams.Description)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"new tag from latest fail : %v\", err)\n\t\t\tevent.PipelineRecord.Status = api.Failed\n\t\t\tevent.PipelineRecord.ErrorMessage = fmt.Sprintf(\"generate tag fail: %v\", err)\n\t\t\terr = worker.Client.SendEvent(event)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"set event result err: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\n\tlog.Info(\"success: \")\n\n\t\/\/ update event.PipelineRecord.Status from running to success\n\tevent.PipelineRecord.Status = api.Success\n\n\t\/\/ Sent event for cyclone server\n\terr = worker.Client.SendEvent(event)\n\tif err != nil {\n\t\tlog.Errorf(\"set event result err: %v\", err)\n\t\treturn\n\t}\n\tlog.Infof(\"send event %s to server\", event)\n}\n\nfunc convertPerformStageSet(stages []api.PipelineStageName) map[api.PipelineStageName]struct{} {\n\tstageSet := make(map[api.PipelineStageName]struct{})\n\tfor _, stage := range stages {\n\t\tstageSet[stage] = struct{}{}\n\t}\n\n\treturn stageSet\n}\n<commit_msg>fix: add some prompt message when create scm tag failed (#558)<commit_after>\/*\nCopyright 2016 caicloud authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage worker\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\tlog \"github.com\/golang\/glog\"\n\n\t\"github.com\/caicloud\/cyclone\/cmd\/worker\/options\"\n\t\"github.com\/caicloud\/cyclone\/pkg\/api\"\n\t\"github.com\/caicloud\/cyclone\/pkg\/docker\"\n\t\"github.com\/caicloud\/cyclone\/pkg\/scm\"\n\t_ \"github.com\/caicloud\/cyclone\/pkg\/scm\/provider\"\n\t\"github.com\/caicloud\/cyclone\/pkg\/worker\/cycloneserver\"\n\t_ \"github.com\/caicloud\/cyclone\/pkg\/worker\/scm\/provider\"\n\t\"github.com\/caicloud\/cyclone\/pkg\/worker\/stage\"\n)\n\n\/\/ Worker ...\ntype Worker struct {\n\tClient cycloneserver.CycloneServerClient\n\tOptions *options.WorkerOptions\n}\n\n\/\/ NewWorker returns a new APIServer with config\nfunc NewWorker(opts *options.WorkerOptions) *Worker {\n\tclient := cycloneserver.NewClient(opts.CycloneServer)\n\n\treturn &Worker{\n\t\tClient: client,\n\t\tOptions: opts,\n\t}\n}\n\n\/\/ Run starts the worker\nfunc (worker *Worker) Run() error {\n\tlog.Info(\"worker start with options: %v\", worker.Options)\n\n\t\/\/ Get event for cyclone server\n\teventID := worker.Options.EventID\n\tevent, err := worker.Client.GetEvent(eventID)\n\tif err != nil {\n\t\tlog.Errorf(\"fail to get event %s as %s\", eventID, err.Error())\n\t\treturn err\n\t}\n\n\t\/\/ Handle event\n\tlog.Infof(\"start to handle event %s\", eventID)\n\tworker.HandleEvent(event)\n\n\treturn nil\n}\n\n\/\/ Steps:\n\/\/ 1. Git clone code\n\/\/ 2. New Docker manager\n\/\/ 3. Run pipeline stages\n\/\/ 4. Create the tag in SCM\nfunc (worker *Worker) HandleEvent(event *api.Event) {\n\tproject := event.Project\n\tpipeline := event.Pipeline\n\tperformParams := event.PipelineRecord.PerformParams\n\tperformStages := performParams.Stages\n\tstageSet := convertPerformStageSet(performStages)\n\tbuild := pipeline.Build\n\n\topts := worker.Options\n\tregistryLocation := opts.RegistryLocation\n\tregistryUsername := opts.RegistryUsername\n\tregistryPassword := opts.RegistryPassword\n\n\tdockerManager, err := docker.NewDockerManager(opts.DockerHost, registryLocation, registryUsername, registryPassword)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\n\t\/\/ Init StageStatus\n\tif event.PipelineRecord.StageStatus == nil {\n\t\tevent.PipelineRecord.StageStatus = &api.StageStatus{}\n\t}\n\n\t\/\/ TODO(robin) Seperate unit test and package stage.\n\tstageManager := stage.NewStageManager(dockerManager, worker.Client, project.Registry, performParams)\n\tstageManager.SetRecordInfo(project.Name, pipeline.Name, event.ID)\n\tstageManager.SetEvent(event)\n\n\t\/\/ Execute the code checkout stage.\n\terr = stageManager.ExecCodeCheckout(project.SCM.Token, build.Stages.CodeCheckout)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\n\t\/\/ Execute the package stage if necessary.\n\tif _, ok := stageSet[api.PackageStageName]; ok {\n\t\terr = stageManager.ExecPackage(build.BuilderImage, build.BuildInfo, build.Stages.UnitTest, build.Stages.Package)\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ The built images from image build stage.\n\tvar builtImages []string\n\n\t\/\/ Execute the image build stage if necessary.\n\tif _, ok := stageSet[api.ImageBuildStageName]; ok {\n\t\tbuiltImages, err = stageManager.ExecImageBuild(build.Stages.ImageBuild)\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Execute the integration test stage if necessary.\n\tif _, ok := stageSet[api.IntegrationTestStageName]; ok {\n\t\terr = stageManager.ExecIntegrationTest(builtImages, build.Stages.IntegrationTest)\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Execute the image release stage if necessary.\n\tif _, ok := stageSet[api.ImageReleaseStageName]; ok {\n\t\terr = stageManager.ExecImageRelease(builtImages, build.Stages.ImageRelease)\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\tif event.PipelineRecord.PerformParams.CreateSCMTag {\n\t\tcodesource := event.Pipeline.Build.Stages.CodeCheckout.MainRepo\n\t\terr = scm.NewTagFromLatest(codesource, project.SCM, event.PipelineRecord.Name, event.PipelineRecord.PerformParams.Description)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"new tag from latest fail : %v\", err)\n\t\t\tevent.PipelineRecord.Status = api.Failed\n\t\t\tscmType := event.Pipeline.Build.Stages.CodeCheckout.MainRepo.Type\n\t\t\tif (scmType == api.Gitlab && strings.Contains(err.Error(), \"403\")) ||\n\t\t\t\t(scmType == api.Github && strings.Contains(err.Error(), \"404\")) {\n\t\t\t\tevent.PipelineRecord.ErrorMessage = \"Create SCM tag fails, please check your account permissions.\"\n\t\t\t} else {\n\t\t\t\tevent.PipelineRecord.ErrorMessage = fmt.Sprintf(\"Create SCM tag fails : %v\", err)\n\t\t\t}\n\n\t\t\terr = worker.Client.SendEvent(event)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"set event result err: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\n\tlog.Info(\"success: \")\n\n\t\/\/ update event.PipelineRecord.Status from running to success\n\tevent.PipelineRecord.Status = api.Success\n\n\t\/\/ Sent event for cyclone server\n\terr = worker.Client.SendEvent(event)\n\tif err != nil {\n\t\tlog.Errorf(\"set event result err: %v\", err)\n\t\treturn\n\t}\n\tlog.Infof(\"send event %s to server\", event)\n}\n\nfunc convertPerformStageSet(stages []api.PipelineStageName) map[api.PipelineStageName]struct{} {\n\tstageSet := make(map[api.PipelineStageName]struct{})\n\tfor _, stage := range stages {\n\t\tstageSet[stage] = struct{}{}\n\t}\n\n\treturn stageSet\n}\n<|endoftext|>"} {"text":"<commit_before>package txtdirect\n\nimport (\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n)\n\nfunc TestParsePlaceholders(t *testing.T) {\n\ttests := []struct {\n\t\turl string\n\t\trequested string\n\t\texpected string\n\t}{\n\t\t{\n\t\t\t\"example.com{uri}\",\n\t\t\t\"https:\/\/example.com\/?test=test\",\n\t\t\t\"example.com\/?test=test\",\n\t\t},\n\t\t{\n\t\t\t\"example.com{uri}\/{~test}\",\n\t\t\t\"https:\/\/example.com\/?test=test\",\n\t\t\t\"example.com\/?test=test\/test\",\n\t\t},\n\t\t{\n\t\t\t\"example.com{uri}\/{>Test}\",\n\t\t\t\"https:\/\/example.com\/?test=test\",\n\t\t\t\"example.com\/?test=test\/test-header\",\n\t\t},\n\t\t{\n\t\t\t\"example.com{uri}\/{?test}\",\n\t\t\t\"https:\/\/example.com\/?test=test\",\n\t\t\t\"example.com\/?test=test\/test\",\n\t\t},\n\t\t{\n\t\t\t\"example.com{dir}\",\n\t\t\t\"https:\/\/example.com\/directory\/test\",\n\t\t\t\"example.com\/directory\/\",\n\t\t},\n\t\t{\n\t\t\t\"subdomain.example.com\/{label1}\",\n\t\t\t\"https:\/\/subdomain.example.com\/subdomain\",\n\t\t\t\"subdomain.example.com\/subdomain\",\n\t\t},\n\t\t{\n\t\t\t\"subdomain.example.com\/{label2}\",\n\t\t\t\"https:\/\/subdomain.example.com\/example\",\n\t\t\t\"subdomain.example.com\/example\",\n\t\t},\n\t\t{\n\t\t\t\"subdomain.example.com\/{label3}\",\n\t\t\t\"https:\/\/subdomain.example.com\/com\",\n\t\t\t\"subdomain.example.com\/com\",\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\treq := httptest.NewRequest(\"GET\", test.requested, nil)\n\t\treq.AddCookie(&http.Cookie{Name: \"test\", Value: \"test\"})\n\t\treq.Header.Add(\"Test\", \"test-header\")\n\t\tresult, err := parsePlaceholders(test.url, req)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif result != test.expected {\n\t\t\tt.Errorf(\"Expected %s, got %s\", test.expected, result)\n\t\t}\n\t}\n}\n\nfunc TestParsePlaceholdersFails(t *testing.T) {\n\ttests := []struct {\n\t\turl string\n\t\trequested string\n\t}{\n\t\t{\n\t\t\t\"example.com\/{label0}\",\n\t\t\t\"https:\/\/example.com\/test\",\n\t\t},\n\t\t{\n\t\t\t\"example.com\/{label9000}\",\n\t\t\t\"https:\/\/example.com\/test\",\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\treq := httptest.NewRequest(\"GET\", test.requested, nil)\n\t\t_, err := parsePlaceholders(test.url, req)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected error, got nil\")\n\t\t}\n\t}\n}\n<commit_msg>Add {file} placeholder test<commit_after>package txtdirect\n\nimport (\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n)\n\nfunc TestParsePlaceholders(t *testing.T) {\n\ttests := []struct {\n\t\turl string\n\t\trequested string\n\t\texpected string\n\t}{\n\t\t{\n\t\t\t\"example.com{uri}\",\n\t\t\t\"https:\/\/example.com\/?test=test\",\n\t\t\t\"example.com\/?test=test\",\n\t\t},\n\t\t{\n\t\t\t\"example.com{uri}\/{~test}\",\n\t\t\t\"https:\/\/example.com\/?test=test\",\n\t\t\t\"example.com\/?test=test\/test\",\n\t\t},\n\t\t{\n\t\t\t\"example.com{uri}\/{>Test}\",\n\t\t\t\"https:\/\/example.com\/?test=test\",\n\t\t\t\"example.com\/?test=test\/test-header\",\n\t\t},\n\t\t{\n\t\t\t\"example.com{uri}\/{?test}\",\n\t\t\t\"https:\/\/example.com\/?test=test\",\n\t\t\t\"example.com\/?test=test\/test\",\n\t\t},\n\t\t{\n\t\t\t\"example.com{dir}\",\n\t\t\t\"https:\/\/example.com\/directory\/test\",\n\t\t\t\"example.com\/directory\/\",\n\t\t},\n\t\t{\n\t\t\t\"example.com\/directory\/{file}\",\n\t\t\t\"https:\/\/example.com\/directory\/file.pdf\",\n\t\t\t\"example.com\/directory\/file.pdf\",\n\t\t},\n\t\t{\n\t\t\t\"subdomain.example.com\/{label1}\",\n\t\t\t\"https:\/\/subdomain.example.com\/subdomain\",\n\t\t\t\"subdomain.example.com\/subdomain\",\n\t\t},\n\t\t{\n\t\t\t\"subdomain.example.com\/{label2}\",\n\t\t\t\"https:\/\/subdomain.example.com\/example\",\n\t\t\t\"subdomain.example.com\/example\",\n\t\t},\n\t\t{\n\t\t\t\"subdomain.example.com\/{label3}\",\n\t\t\t\"https:\/\/subdomain.example.com\/com\",\n\t\t\t\"subdomain.example.com\/com\",\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\treq := httptest.NewRequest(\"GET\", test.requested, nil)\n\t\treq.AddCookie(&http.Cookie{Name: \"test\", Value: \"test\"})\n\t\treq.Header.Add(\"Test\", \"test-header\")\n\t\tresult, err := parsePlaceholders(test.url, req)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif result != test.expected {\n\t\t\tt.Errorf(\"Expected %s, got %s\", test.expected, result)\n\t\t}\n\t}\n}\n\nfunc TestParsePlaceholdersFails(t *testing.T) {\n\ttests := []struct {\n\t\turl string\n\t\trequested string\n\t}{\n\t\t{\n\t\t\t\"example.com\/{label0}\",\n\t\t\t\"https:\/\/example.com\/test\",\n\t\t},\n\t\t{\n\t\t\t\"example.com\/{label9000}\",\n\t\t\t\"https:\/\/example.com\/test\",\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\treq := httptest.NewRequest(\"GET\", test.requested, nil)\n\t\t_, err := parsePlaceholders(test.url, req)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected error, got nil\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package meshutil\n\nimport (\n\t\"encoding\/binary\"\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Vector3 struct {\n\tx, y, z float64\n}\n\ntype FaceVert struct {\n\tpositionIndex, texcoIndex, normalIndex int\n}\n\ntype Polygon struct {\n\tverts []FaceVert\n}\n\ntype Mesh struct {\n\tName string\n\tPositions, Texcos, Normals []Vector3\n\tPolygons []Polygon\n}\n\ntype vertex struct {\n\tposition, normal Vector3\n\tcolor0, color1 Vector3\n\ttexco0, texco1 Vector3\n\ttangent, binormal Vector3\n}\n\ntype triangle struct {\n\tvertices [3]vertex\n}\n\ntype triangleMesh struct {\n\tvertexAttribNames []VertexAttribName\n\ttriangles []triangle\n}\n\nfunc (m *Mesh) toTriangleMesh() *triangleMesh {\n\ttrimesh := new(triangleMesh)\n\thas_position := len(m.Positions) > 0\n\tif has_position {\n\t\ttrimesh.vertexAttribNames = append(trimesh.vertexAttribNames, Position)\n\t}\n\thas_normal := len(m.Normals) > 0\n\tif has_position {\n\t\ttrimesh.vertexAttribNames = append(trimesh.vertexAttribNames, Normal)\n\t}\n\n\tfor _, p := range m.Polygons {\n\t\tvar poly []vertex\n\t\tfor _, f := range p.verts {\n\t\t\tvar newVert vertex\n\t\t\tif has_position {\n\t\t\t\tnewVert.position = m.Positions[f.positionIndex]\n\t\t\t}\n\t\t\tif has_normal {\n\t\t\t\tnewVert.normal = m.Normals[f.normalIndex]\n\t\t\t}\n\t\t\tpoly = append(poly, newVert)\n\t\t}\n\n\t\tswitch (len(poly)) {\n\t\tcase 4:\n\t\t\tvar t, u triangle\n\t\t\tt.vertices[0] = poly[0]\n\t\t\tt.vertices[1] = poly[1]\n\t\t\tt.vertices[2] = poly[2]\n\t\t\tu.vertices[0] = poly[0]\n\t\t\tu.vertices[1] = poly[2]\n\t\t\tu.vertices[2] = poly[3]\n\t\t\ttrimesh.triangles = append(trimesh.triangles, t, u)\n\t\tcase 3:\n\t\t\tvar t triangle\n\t\t\tt.vertices[0] = poly[0]\n\t\t\tt.vertices[1] = poly[1]\n\t\t\tt.vertices[2] = poly[2]\n\t\t\ttrimesh.triangles = append(trimesh.triangles, t)\n\t\t}\n\t}\n\n\treturn trimesh\n}\n\/*\nfunc (m *Mesh) parseObjLine(line string) {\n}\n*\/\nfunc contains(vertices []vertex, vertex vertex) (bool, int) {\n for i, v := range vertices {\n\t if v == vertex { return true, i }\n }\n return false, len(vertices)\n}\n\nfunc (m *triangleMesh) makeBuffers() (vertices []vertex, indices []int) {\n\tfor _, tri := range m.triangles {\n\t\tfor _, vert := range tri.vertices {\n\t\t\talreadyIn, idx := contains(vertices, vert)\n\t\t\tif !alreadyIn {\n\t\t\t\tvertices = append(vertices, vert)\n\t\t\t}\n\t\t\tindices = append(indices, idx)\n\t\t}\n\t}\n\n\treturn\n}\n\ntype glHeader struct {\n\tname [16]byte\n\tvertAttribCount, vertAttribOffset uint32\n\tsurfDescCount, surfDescOffset uint32\n\tvertCount, vertDataOffset, vertDataSize uint32\n\tindCount, indDataOffset, indDataSize uint32\n\taabbCenter, aabbExtent [3]float32\n}\n\ntype VertexAttribName uint32\nconst (\n\tPosition VertexAttribName = iota\n Normal\n Color\n\tTexco0\n\tTexco1\n\tTangentDet\n)\n\ntype VertexAttribType uint32\nconst (\n\tFloat32 VertexAttribType = iota\n\tInt32\n\tInt16\n\tUint16\n\tInt8\n\tUint8\n)\n\n\/*\ntype VertexAttrib struct {\n\tName VertexAttribName\n\tCount int\n}\n*\/\n\ntype VertexAttribFormat struct {\n\tType VertexAttribType\n\tByteSize int\n\tMaxValue uint \/\/ used for normalized attributes\n}\n\nvar attribFormat = [...]VertexAttribFormat{\n\t{Float32, 4, uint(^uint32(0))},\n\t{Int32, 4, uint(^uint32(0) >> 1) - 1},\n\t{Int16, 2, uint(^uint16(0) >> 1) - 1},\n\t{Uint16, 2, uint(^uint16(0)) - 1},\n\t{Int8, 1, uint(^uint8(0) >> 1) -1},\n\t{Uint8, 1, uint(^uint8(0)) - 1},\n}\n\n\/\/ TODO: rename to VertexAttribDesc\ntype VertexAttribDesc struct {\n\tName VertexAttribName\n\tCount int\n\tType VertexAttribType\n\tNormalized bool\n}\n\nvar attribsGL3 = [...]VertexAttribDesc{\n\t{Position, 3, Float32, false},\n\t{Normal, 3, Int16, true},\n\t{Color, 3, Uint8, true},\n\t{Texco0, 2, Float32, false},\n}\n\n\/\/ struct describing what composes a vertex\ntype glVertexAttrib struct {\n\tIndex, Count, Type, Normalized uint32\n\tStride, Offset uint32\n}\n\nfunc (v *Vector3) format(format VertexAttribDesc) []byte {\n\tbuf := new(bytes.Buffer)\n\tswitch (format.Type) {\n\tcase Float32:\n\t\tp := [3]float32{ float32(v.x), float32(v.y), float32(v.z), }\n\t\tbinary.Write(buf, binary.LittleEndian, p)\n\tcase Int16:\n\t\tvar l float64\n\t\tif format.Normalized { l = 4 } else { l = 1 }\n\t\tp := [3]int16{ int16(v.x\/l), int16(v.y\/l), int16(v.z\/l) }\n\t\tbinary.Write(buf, binary.LittleEndian, p)\n\tdefault:\n\t\tpanic(\"oops\")\n\t}\n\treturn buf.Bytes()\n}\n\nfunc (m *Mesh) WriteOpenGL(filename string) {\n\tfile, _ := os.Create(filename)\n\tdefer file.Close()\n\n\ttrimesh := m.toTriangleMesh()\n\n\tfmt.Println(\"Vertex attributes: \", trimesh.vertexAttribNames)\n\toffset := 0\n\tvar glAttribs []glVertexAttrib\n\tfor _, va := range trimesh.vertexAttribNames {\n\t\tdesc := attribsGL3[va]\n\t\tfmt.Printf(\"%+v\\n\", attribsGL3[va])\n\t\tnormalized := 0\n\t\tif desc.Normalized {\n\t\t\tnormalized = 1\n\t\t}\n\t\tglDesc := glVertexAttrib{\n\t\t\tIndex: uint32(desc.Name),\n\t\t\tCount: uint32(desc.Count),\n\t\t\tType: uint32(desc.Type),\n\t\t\tNormalized: uint32(normalized),\n\t\t\tOffset: uint32(offset),\n\t\t}\n\t\toffset += binary.Size(glDesc)\n\t\tglAttribs = append(glAttribs, glDesc)\n\t}\n\n\tvadata := new(bytes.Buffer)\n\tfor _, va := range glAttribs {\n\t\t\/\/ Stride was not computable at first\n\t\tva.Stride = uint32(offset)\n\t\tfmt.Printf(\"%+v\\n\", va)\n\t\tbinary.Write(vadata, binary.LittleEndian, va)\n\t}\n\n\tvertices, indices := trimesh.makeBuffers()\n\tvdata := new(bytes.Buffer)\n\tfor _, v := range vertices {\n\t\tbinary.Write(vdata, binary.LittleEndian, v.position.format(attribsGL3[Position]))\n\t\tbinary.Write(vdata, binary.LittleEndian, v.normal.format(attribsGL3[Normal]))\n\t}\n\n\tidata := new(bytes.Buffer)\n\tfor _, i := range indices {\n\t\tbinary.Write(idata, binary.LittleEndian, uint16(i))\n\t}\n\n\tvar header glHeader\n\theaderSize := binary.Size(header)\n\tcopy(header.name[:], \"untitled\")\n\theader.vertAttribCount = uint32(len(trimesh.vertexAttribNames))\n\theader.vertAttribOffset = uint32(headerSize)\n\theader.surfDescCount = 0\n\theader.surfDescOffset = header.vertAttribOffset + 0\n\theader.vertCount = uint32(len(vertices))\n\theader.vertDataOffset = uint32(headerSize)\n\theader.vertDataSize = uint32(vdata.Len())\n\theader.indCount = uint32(len(indices))\n\theader.indDataOffset = uint32(headerSize + vdata.Len())\n\theader.indDataSize = uint32(idata.Len())\n\n\tfmt.Printf(\"%+v\\n\", header)\n\n\thdata := new(bytes.Buffer)\n\tbinary.Write(hdata, binary.LittleEndian, header)\n\n\tfile.Write(hdata.Bytes())\n\tfile.Write(vadata.Bytes())\n\tfile.Write(vdata.Bytes())\n\tfile.Write(idata.Bytes())\n}\n\nfunc ParseObj(filename string) Mesh {\n\tfile, _ := os.Open(filename)\n\tdefer file.Close()\n\n\tmesh := Mesh{\n\t\tName: \"woot\",\n\t}\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := strings.Split(scanner.Text(), \" \")\n\t\tident, val := line[0], line[1:]\n\t\tswitch (ident) {\n\t\tcase \"v\", \"vn\":\n\t\t\tx, _ := strconv.ParseFloat(val[0], 64)\n\t\t\ty, _ := strconv.ParseFloat(val[1], 64)\n\t\t\tz, _ := strconv.ParseFloat(val[2], 64)\n\t\t\tv := Vector3{ x, y, z }\n\t\t\tif (ident == \"v\") {\n\t\t\t\tmesh.Positions = append(mesh.Positions, v)\n\t\t\t} else {\n\t\t\t\tmesh.Normals = append(mesh.Normals, v)\n\t\t\t}\n\t\tcase \"f\":\n\t\t\tvar p Polygon\n\t\t\tfor _, s := range val {\n\t\t\t\tvar pos, texco, norm int64\n\t\t\t\tidx := strings.Split(s, \"\/\")\n\t\t\t\tpos, _ = strconv.ParseInt(idx[0], 0, 0)\n\t\t\t\tif len(idx) > 2 {\n\t\t\t\t\ttexco, _ = strconv.ParseInt(idx[1], 0, 0)\n\t\t\t\t}\n\t\t\t\tnorm, _ = strconv.ParseInt(idx[len(idx)-1], 0, 0)\n\n\t\t\t\t\/\/ compensate for indices from obj file starting at 1\n\t\t\t\tv := FaceVert{ int(pos)-1, int(texco)-1, int(norm)-1 }\n\t\t\t\tp.verts = append(p.verts, v)\n\t\t\t}\n\t\t\tmesh.Polygons = append(mesh.Polygons, p)\n\t\tdefault:\n\t\t\tfmt.Println(ident + \" not parsed yet\")\n\t\t}\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn mesh\n}\n<commit_msg>Parse texture coordinates<commit_after>package meshutil\n\nimport (\n\t\"encoding\/binary\"\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ An intermediate representation for 3d vectors\ntype Vector3 struct {\n\tx, y, z float64\n}\n\ntype FaceVert struct {\n\tpositionIndex, texcoIndex, normalIndex int\n}\n\ntype Polygon struct {\n\tverts []FaceVert\n}\n\ntype Mesh struct {\n\tName string\n\tPositions, Texcos, Normals []Vector3\n\tPolygons []Polygon\n}\n\n\/\/\ntype vertex struct {\n\tposition, normal Vector3\n\tcolor0, color1 Vector3\n\ttexco0, texco1 Vector3\n\ttangent, binormal Vector3\n}\n\ntype triangle struct {\n\tvertices [3]vertex\n}\n\ntype triangleMesh struct {\n\tvertexAttribNames []VertexAttribName\n\ttriangles []triangle\n}\n\nfunc (m *Mesh) toTriangleMesh() *triangleMesh {\n\ttrimesh := new(triangleMesh)\n\thas_position := len(m.Positions) > 0\n\tif has_position {\n\t\ttrimesh.vertexAttribNames = append(trimesh.vertexAttribNames, Position)\n\t}\n\thas_normal := len(m.Normals) > 0\n\tif has_position {\n\t\ttrimesh.vertexAttribNames = append(trimesh.vertexAttribNames, Normal)\n\t}\n\thas_texco := len(m.Texcos) > 0\n\tif has_texco {\n\t\ttrimesh.vertexAttribNames = append(trimesh.vertexAttribNames, Texco0)\n\t}\n\n\tfor _, p := range m.Polygons {\n\t\tvar poly []vertex\n\t\tfor _, f := range p.verts {\n\t\t\tvar newVert vertex\n\t\t\tif has_position {\n\t\t\t\tnewVert.position = m.Positions[f.positionIndex]\n\t\t\t}\n\t\t\tif has_normal {\n\t\t\t\tnewVert.normal = m.Normals[f.normalIndex]\n\t\t\t}\n\t\t\tif has_texco {\n\t\t\t\tnewVert.texco0 = m.Texcos[f.texcoIndex]\n\t\t\t}\n\t\t\tpoly = append(poly, newVert)\n\t\t}\n\n\t\tswitch (len(poly)) {\n\t\tcase 4:\n\t\t\tvar t, u triangle\n\t\t\tt.vertices[0] = poly[0]\n\t\t\tt.vertices[1] = poly[1]\n\t\t\tt.vertices[2] = poly[2]\n\t\t\tu.vertices[0] = poly[0]\n\t\t\tu.vertices[1] = poly[2]\n\t\t\tu.vertices[2] = poly[3]\n\t\t\ttrimesh.triangles = append(trimesh.triangles, t, u)\n\t\tcase 3:\n\t\t\tvar t triangle\n\t\t\tt.vertices[0] = poly[0]\n\t\t\tt.vertices[1] = poly[1]\n\t\t\tt.vertices[2] = poly[2]\n\t\t\ttrimesh.triangles = append(trimesh.triangles, t)\n\t\t}\n\t}\n\n\treturn trimesh\n}\n\nfunc contains(vertices []vertex, vertex vertex) (bool, int) {\n for i, v := range vertices {\n\t if v == vertex { return true, i }\n }\n return false, len(vertices)\n}\n\nfunc (m *triangleMesh) makeBuffers() (vertices []vertex, indices []int) {\n\tfor _, tri := range m.triangles {\n\t\tfor _, vert := range tri.vertices {\n\t\t\talreadyIn, idx := contains(vertices, vert)\n\t\t\tif !alreadyIn {\n\t\t\t\tvertices = append(vertices, vert)\n\t\t\t}\n\t\t\tindices = append(indices, idx)\n\t\t}\n\t}\n\n\treturn\n}\n\ntype glHeader struct {\n\tname [16]byte\n\tvertAttribCount, vertAttribOffset uint32\n\tsurfDescCount, surfDescOffset uint32\n\tvertCount, vertDataOffset, vertDataSize uint32\n\tindCount, indDataOffset, indDataSize uint32\n\taabbCenter, aabbExtent [3]float32\n}\n\ntype VertexAttribName uint32\nconst (\n\tPosition VertexAttribName = iota\n Normal\n Color\n\tTexco0\n\tTexco1\n\tTangentDet\n)\n\ntype VertexAttribType uint32\nconst (\n\tFloat32 VertexAttribType = iota\n\tInt32\n\tInt16\n\tUint16\n\tInt8\n\tUint8\n)\n\n\/*\ntype VertexAttrib struct {\n\tName VertexAttribName\n\tCount int\n}\n*\/\n\ntype VertexAttribFormat struct {\n\tType VertexAttribType\n\tByteSize int\n\tMaxValue uint \/\/ used for normalized attributes\n}\n\nvar attribFormat = [...]VertexAttribFormat{\n\t{Float32, 4, uint(^uint32(0))},\n\t{Int32, 4, uint(^uint32(0) >> 1) - 1},\n\t{Int16, 2, uint(^uint16(0) >> 1) - 1},\n\t{Uint16, 2, uint(^uint16(0)) - 1},\n\t{Int8, 1, uint(^uint8(0) >> 1) -1},\n\t{Uint8, 1, uint(^uint8(0)) - 1},\n}\n\n\/\/ TODO: rename to VertexAttribDesc\ntype VertexAttribDesc struct {\n\tName VertexAttribName\n\tCount int\n\tType VertexAttribType\n\tNormalized bool\n}\n\nvar attribsGL3 = [...]VertexAttribDesc{\n\t{Position, 3, Float32, false},\n\t{Normal, 3, Int16, true},\n\t{Color, 3, Uint8, true},\n\t{Texco0, 2, Float32, false},\n}\n\n\/\/ struct describing what composes a vertex\ntype glVertexAttrib struct {\n\tIndex, Count, Type, Normalized uint32\n\tStride, Offset uint32\n}\n\nfunc (v *Vector3) format(format VertexAttribDesc) []byte {\n\tbuf := new(bytes.Buffer)\n\tswitch (format.Type) {\n\tcase Float32:\n\t\tp := [3]float32{ float32(v.x), float32(v.y), float32(v.z), }\n\t\tbinary.Write(buf, binary.LittleEndian, p)\n\tcase Int16:\n\t\tvar l float64\n\t\tif format.Normalized { l = 4 } else { l = 1 }\n\t\tp := [3]int16{ int16(v.x\/l), int16(v.y\/l), int16(v.z\/l) }\n\t\tbinary.Write(buf, binary.LittleEndian, p)\n\tdefault:\n\t\tpanic(\"oops\")\n\t}\n\treturn buf.Bytes()\n}\n\nfunc (m *Mesh) WriteOpenGL(filename string) {\n\tfile, _ := os.Create(filename)\n\tdefer file.Close()\n\n\ttrimesh := m.toTriangleMesh()\n\n\tfmt.Println(\"Vertex attributes: \", trimesh.vertexAttribNames)\n\toffset := 0\n\tvar glAttribs []glVertexAttrib\n\tfor _, va := range trimesh.vertexAttribNames {\n\t\tdesc := attribsGL3[va]\n\t\tfmt.Printf(\"%+v\\n\", attribsGL3[va])\n\t\tnormalized := 0\n\t\tif desc.Normalized {\n\t\t\tnormalized = 1\n\t\t}\n\t\tglDesc := glVertexAttrib{\n\t\t\tIndex: uint32(desc.Name),\n\t\t\tCount: uint32(desc.Count),\n\t\t\tType: uint32(desc.Type),\n\t\t\tNormalized: uint32(normalized),\n\t\t\tOffset: uint32(offset),\n\t\t}\n\t\toffset += binary.Size(glDesc)\n\t\tglAttribs = append(glAttribs, glDesc)\n\t}\n\n\tvadata := new(bytes.Buffer)\n\tfor _, va := range glAttribs {\n\t\t\/\/ Stride was not computable at first\n\t\tva.Stride = uint32(offset)\n\t\tfmt.Printf(\"%+v\\n\", va)\n\t\tbinary.Write(vadata, binary.LittleEndian, va)\n\t}\n\n\tvertices, indices := trimesh.makeBuffers()\n\tvdata := new(bytes.Buffer)\n\tfor _, v := range vertices {\n\t\tbinary.Write(vdata, binary.LittleEndian, v.position.format(attribsGL3[Position]))\n\t\tbinary.Write(vdata, binary.LittleEndian, v.normal.format(attribsGL3[Normal]))\n\t}\n\n\tidata := new(bytes.Buffer)\n\tfor _, i := range indices {\n\t\tbinary.Write(idata, binary.LittleEndian, uint16(i))\n\t}\n\n\tvar header glHeader\n\theaderSize := binary.Size(header)\n\tcopy(header.name[:len(header.name)-1], m.Name)\n\theader.vertAttribCount = uint32(len(trimesh.vertexAttribNames))\n\theader.vertAttribOffset = uint32(headerSize)\n\theader.surfDescCount = 0\n\theader.surfDescOffset = header.vertAttribOffset + 0\n\theader.vertCount = uint32(len(vertices))\n\theader.vertDataOffset = uint32(headerSize)\n\theader.vertDataSize = uint32(vdata.Len())\n\theader.indCount = uint32(len(indices))\n\theader.indDataOffset = uint32(headerSize + vdata.Len())\n\theader.indDataSize = uint32(idata.Len())\n\n\tfmt.Printf(\"%+v\\n\", header)\n\n\thdata := new(bytes.Buffer)\n\tbinary.Write(hdata, binary.LittleEndian, header)\n\n\tfile.Write(hdata.Bytes())\n\tfile.Write(vadata.Bytes())\n\tfile.Write(vdata.Bytes())\n\tfile.Write(idata.Bytes())\n}\n\nfunc ParseObj(filename string) Mesh {\n\tfile, _ := os.Open(filename)\n\tdefer file.Close()\n\n\tmesh := Mesh{ Name: \"untitled\" }\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := strings.Split(scanner.Text(), \" \")\n\t\tident, val := line[0], line[1:]\n\t\tswitch (ident) {\n\t\tcase \"v\", \"vn\":\n\t\t\tx, _ := strconv.ParseFloat(val[0], 64)\n\t\t\ty, _ := strconv.ParseFloat(val[1], 64)\n\t\t\tz, _ := strconv.ParseFloat(val[2], 64)\n\t\t\tv := Vector3{ x, y, z }\n\t\t\tif (ident == \"v\") {\n\t\t\t\tmesh.Positions = append(mesh.Positions, v)\n\t\t\t} else {\n\t\t\t\tmesh.Normals = append(mesh.Normals, v)\n\t\t\t}\n\t\tcase \"vt\":\n\t\t\ts, _ := strconv.ParseFloat(val[0], 64)\n\t\t\tt, _ := strconv.ParseFloat(val[1], 64)\n\t\t\tv := Vector3{ s, t, 0 }\n\t\t\tmesh.Texcos = append(mesh.Texcos, v)\n\t\tcase \"f\":\n\t\t\tvar p Polygon\n\t\t\tfor _, s := range val {\n\t\t\t\tvar pos, texco, norm int64\n\t\t\t\tidx := strings.Split(s, \"\/\")\n\t\t\t\tpos, _ = strconv.ParseInt(idx[0], 0, 0)\n\t\t\t\tif len(idx) > 2 {\n\t\t\t\t\ttexco, _ = strconv.ParseInt(idx[1], 0, 0)\n\t\t\t\t}\n\t\t\t\tnorm, _ = strconv.ParseInt(idx[len(idx)-1], 0, 0)\n\n\t\t\t\t\/\/ compensate for indices from obj file starting at 1\n\t\t\t\tv := FaceVert{ int(pos)-1, int(texco)-1, int(norm)-1 }\n\t\t\t\tp.verts = append(p.verts, v)\n\t\t\t}\n\t\t\tmesh.Polygons = append(mesh.Polygons, p)\n\t\tdefault:\n\t\t\tfmt.Println(ident + \" not parsed yet\")\n\t\t}\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn mesh\n}\n<|endoftext|>"} {"text":"<commit_before>package bugsnag\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\t\"unsafe\"\n\n\t\"github.com\/bugsnag\/bugsnag-go\/errors\"\n)\n\ntype _account struct {\n\tID string\n\tName string\n\tPlan struct {\n\t\tPremium bool\n\t}\n\tPassword string\n\tsecret string\n\tEmail string `json:\"email\"`\n\tEmptyEmail string `json:\"emptyemail,omitempty\"`\n\tNotEmptyEmail string `json:\"not_empty_email,omitempty\"`\n}\n\ntype _broken struct {\n\tMe *_broken\n\tData string\n}\n\nvar account = _account{}\nvar notifier = New(Configuration{})\n\nfunc TestMetaDataAdd(t *testing.T) {\n\tm := MetaData{\n\t\t\"one\": {\n\t\t\t\"key\": \"value\",\n\t\t\t\"override\": false,\n\t\t}}\n\n\tm.Add(\"one\", \"override\", true)\n\tm.Add(\"one\", \"new\", \"key\")\n\tm.Add(\"new\", \"tab\", account)\n\n\tm.AddStruct(\"lol\", \"not really a struct\")\n\tm.AddStruct(\"account\", account)\n\n\tif !reflect.DeepEqual(m, MetaData{\n\t\t\"one\": {\n\t\t\t\"key\": \"value\",\n\t\t\t\"override\": true,\n\t\t\t\"new\": \"key\",\n\t\t},\n\t\t\"new\": {\n\t\t\t\"tab\": account,\n\t\t},\n\t\t\"Extra data\": {\n\t\t\t\"lol\": \"not really a struct\",\n\t\t},\n\t\t\"account\": {\n\t\t\t\"ID\": \"\",\n\t\t\t\"Name\": \"\",\n\t\t\t\"Plan\": map[string]interface{}{\n\t\t\t\t\"Premium\": false,\n\t\t\t},\n\t\t\t\"Password\": \"\",\n\t\t\t\"email\": \"\",\n\t\t},\n\t}) {\n\t\tt.Errorf(\"metadata.Add didn't work: %#v\", m)\n\t}\n}\n\nfunc TestMetaDataUpdate(t *testing.T) {\n\n\tm := MetaData{\n\t\t\"one\": {\n\t\t\t\"key\": \"value\",\n\t\t\t\"override\": false,\n\t\t}}\n\n\tm.Update(MetaData{\n\t\t\"one\": {\n\t\t\t\"override\": true,\n\t\t\t\"new\": \"key\",\n\t\t},\n\t\t\"new\": {\n\t\t\t\"tab\": account,\n\t\t},\n\t})\n\n\tif !reflect.DeepEqual(m, MetaData{\n\t\t\"one\": {\n\t\t\t\"key\": \"value\",\n\t\t\t\"override\": true,\n\t\t\t\"new\": \"key\",\n\t\t},\n\t\t\"new\": {\n\t\t\t\"tab\": account,\n\t\t},\n\t}) {\n\t\tt.Errorf(\"metadata.Update didn't work: %#v\", m)\n\t}\n}\n\nfunc TestMetaDataSanitize(t *testing.T) {\n\n\tvar broken = _broken{}\n\tbroken.Me = &broken\n\tbroken.Data = \"ohai\"\n\taccount.Name = \"test\"\n\taccount.ID = \"test\"\n\taccount.secret = \"hush\"\n\taccount.Email = \"example@example.com\"\n\taccount.EmptyEmail = \"\"\n\taccount.NotEmptyEmail = \"not_empty_email@example.com\"\n\n\tm := MetaData{\n\t\t\"one\": {\n\t\t\t\"bool\": true,\n\t\t\t\"int\": 7,\n\t\t\t\"float\": 7.1,\n\t\t\t\"complex\": complex(1, 1),\n\t\t\t\"func\": func() {},\n\t\t\t\"unsafe\": unsafe.Pointer(broken.Me),\n\t\t\t\"string\": \"string\",\n\t\t\t\"password\": \"secret\",\n\t\t\t\"array\": []hash{{\n\t\t\t\t\"creditcard\": \"1234567812345678\",\n\t\t\t\t\"broken\": broken,\n\t\t\t}},\n\t\t\t\"broken\": broken,\n\t\t\t\"account\": account,\n\t\t},\n\t}\n\n\tn := m.sanitize([]string{\"password\", \"creditcard\"})\n\n\tif !reflect.DeepEqual(n, map[string]interface{}{\n\t\t\"one\": map[string]interface{}{\n\t\t\t\"bool\": true,\n\t\t\t\"int\": 7,\n\t\t\t\"float\": 7.1,\n\t\t\t\"complex\": \"[complex128]\",\n\t\t\t\"string\": \"string\",\n\t\t\t\"unsafe\": \"[unsafe.Pointer]\",\n\t\t\t\"func\": \"[func()]\",\n\t\t\t\"password\": \"[REDACTED]\",\n\t\t\t\"array\": []interface{}{map[string]interface{}{\n\t\t\t\t\"creditcard\": \"[REDACTED]\",\n\t\t\t\t\"broken\": map[string]interface{}{\n\t\t\t\t\t\"Me\": \"[RECURSION]\",\n\t\t\t\t\t\"Data\": \"ohai\",\n\t\t\t\t},\n\t\t\t}},\n\t\t\t\"broken\": map[string]interface{}{\n\t\t\t\t\"Me\": \"[RECURSION]\",\n\t\t\t\t\"Data\": \"ohai\",\n\t\t\t},\n\t\t\t\"account\": map[string]interface{}{\n\t\t\t\t\"ID\": \"test\",\n\t\t\t\t\"Name\": \"test\",\n\t\t\t\t\"Plan\": map[string]interface{}{\n\t\t\t\t\t\"Premium\": false,\n\t\t\t\t},\n\t\t\t\t\"Password\": \"[REDACTED]\",\n\t\t\t\t\"email\": \"example@example.com\",\n\t\t\t\t\"not_empty_email\": \"not_empty_email@example.com\",\n\t\t\t},\n\t\t},\n\t}) {\n\t\tt.Errorf(\"metadata.Sanitize didn't work: %#v\", n)\n\t}\n\n}\n\nfunc TestSanitizerSanitize(t *testing.T) {\n\tvar (\n\t\tnilPointer *int\n\t)\n\n\tfor n, tc := range []struct {\n\t\tinput interface{}\n\t\twant interface{}\n\t}{\n\t\t{nilPointer, \"<nil>\"},\n\t} {\n\t\ts := &sanitizer{}\n\t\tgotValue := s.Sanitize(tc.input)\n\n\t\tif got, want := gotValue, tc.want; got != want {\n\t\t\tt.Errorf(\"[%d] got %v, want %v\", n, got, want)\n\t\t}\n\t}\n}\n\nfunc ExampleMetaData() {\n\tnotifier.Notify(errors.Errorf(\"hi world\"),\n\t\tMetaData{\"Account\": {\n\t\t\t\"id\": account.ID,\n\t\t\t\"name\": account.Name,\n\t\t\t\"paying?\": account.Plan.Premium,\n\t\t}})\n}\n<commit_msg>Add test for sanitizing nil interfaces<commit_after>package bugsnag\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\t\"unsafe\"\n\n\t\"github.com\/bugsnag\/bugsnag-go\/errors\"\n)\n\ntype _account struct {\n\tID string\n\tName string\n\tPlan struct {\n\t\tPremium bool\n\t}\n\tPassword string\n\tsecret string\n\tEmail string `json:\"email\"`\n\tEmptyEmail string `json:\"emptyemail,omitempty\"`\n\tNotEmptyEmail string `json:\"not_empty_email,omitempty\"`\n}\n\ntype _broken struct {\n\tMe *_broken\n\tData string\n}\n\nvar account = _account{}\nvar notifier = New(Configuration{})\n\nfunc TestMetaDataAdd(t *testing.T) {\n\tm := MetaData{\n\t\t\"one\": {\n\t\t\t\"key\": \"value\",\n\t\t\t\"override\": false,\n\t\t}}\n\n\tm.Add(\"one\", \"override\", true)\n\tm.Add(\"one\", \"new\", \"key\")\n\tm.Add(\"new\", \"tab\", account)\n\n\tm.AddStruct(\"lol\", \"not really a struct\")\n\tm.AddStruct(\"account\", account)\n\n\tif !reflect.DeepEqual(m, MetaData{\n\t\t\"one\": {\n\t\t\t\"key\": \"value\",\n\t\t\t\"override\": true,\n\t\t\t\"new\": \"key\",\n\t\t},\n\t\t\"new\": {\n\t\t\t\"tab\": account,\n\t\t},\n\t\t\"Extra data\": {\n\t\t\t\"lol\": \"not really a struct\",\n\t\t},\n\t\t\"account\": {\n\t\t\t\"ID\": \"\",\n\t\t\t\"Name\": \"\",\n\t\t\t\"Plan\": map[string]interface{}{\n\t\t\t\t\"Premium\": false,\n\t\t\t},\n\t\t\t\"Password\": \"\",\n\t\t\t\"email\": \"\",\n\t\t},\n\t}) {\n\t\tt.Errorf(\"metadata.Add didn't work: %#v\", m)\n\t}\n}\n\nfunc TestMetaDataUpdate(t *testing.T) {\n\n\tm := MetaData{\n\t\t\"one\": {\n\t\t\t\"key\": \"value\",\n\t\t\t\"override\": false,\n\t\t}}\n\n\tm.Update(MetaData{\n\t\t\"one\": {\n\t\t\t\"override\": true,\n\t\t\t\"new\": \"key\",\n\t\t},\n\t\t\"new\": {\n\t\t\t\"tab\": account,\n\t\t},\n\t})\n\n\tif !reflect.DeepEqual(m, MetaData{\n\t\t\"one\": {\n\t\t\t\"key\": \"value\",\n\t\t\t\"override\": true,\n\t\t\t\"new\": \"key\",\n\t\t},\n\t\t\"new\": {\n\t\t\t\"tab\": account,\n\t\t},\n\t}) {\n\t\tt.Errorf(\"metadata.Update didn't work: %#v\", m)\n\t}\n}\n\nfunc TestMetaDataSanitize(t *testing.T) {\n\n\tvar broken = _broken{}\n\tbroken.Me = &broken\n\tbroken.Data = \"ohai\"\n\taccount.Name = \"test\"\n\taccount.ID = \"test\"\n\taccount.secret = \"hush\"\n\taccount.Email = \"example@example.com\"\n\taccount.EmptyEmail = \"\"\n\taccount.NotEmptyEmail = \"not_empty_email@example.com\"\n\n\tm := MetaData{\n\t\t\"one\": {\n\t\t\t\"bool\": true,\n\t\t\t\"int\": 7,\n\t\t\t\"float\": 7.1,\n\t\t\t\"complex\": complex(1, 1),\n\t\t\t\"func\": func() {},\n\t\t\t\"unsafe\": unsafe.Pointer(broken.Me),\n\t\t\t\"string\": \"string\",\n\t\t\t\"password\": \"secret\",\n\t\t\t\"array\": []hash{{\n\t\t\t\t\"creditcard\": \"1234567812345678\",\n\t\t\t\t\"broken\": broken,\n\t\t\t}},\n\t\t\t\"broken\": broken,\n\t\t\t\"account\": account,\n\t\t},\n\t}\n\n\tn := m.sanitize([]string{\"password\", \"creditcard\"})\n\n\tif !reflect.DeepEqual(n, map[string]interface{}{\n\t\t\"one\": map[string]interface{}{\n\t\t\t\"bool\": true,\n\t\t\t\"int\": 7,\n\t\t\t\"float\": 7.1,\n\t\t\t\"complex\": \"[complex128]\",\n\t\t\t\"string\": \"string\",\n\t\t\t\"unsafe\": \"[unsafe.Pointer]\",\n\t\t\t\"func\": \"[func()]\",\n\t\t\t\"password\": \"[REDACTED]\",\n\t\t\t\"array\": []interface{}{map[string]interface{}{\n\t\t\t\t\"creditcard\": \"[REDACTED]\",\n\t\t\t\t\"broken\": map[string]interface{}{\n\t\t\t\t\t\"Me\": \"[RECURSION]\",\n\t\t\t\t\t\"Data\": \"ohai\",\n\t\t\t\t},\n\t\t\t}},\n\t\t\t\"broken\": map[string]interface{}{\n\t\t\t\t\"Me\": \"[RECURSION]\",\n\t\t\t\t\"Data\": \"ohai\",\n\t\t\t},\n\t\t\t\"account\": map[string]interface{}{\n\t\t\t\t\"ID\": \"test\",\n\t\t\t\t\"Name\": \"test\",\n\t\t\t\t\"Plan\": map[string]interface{}{\n\t\t\t\t\t\"Premium\": false,\n\t\t\t\t},\n\t\t\t\t\"Password\": \"[REDACTED]\",\n\t\t\t\t\"email\": \"example@example.com\",\n\t\t\t\t\"not_empty_email\": \"not_empty_email@example.com\",\n\t\t\t},\n\t\t},\n\t}) {\n\t\tt.Errorf(\"metadata.Sanitize didn't work: %#v\", n)\n\t}\n\n}\n\nfunc TestSanitizerSanitize(t *testing.T) {\n\tvar (\n\t\tnilPointer *int\n\t\tnilInterface = interface{}(nil)\n\t)\n\n\tfor n, tc := range []struct {\n\t\tinput interface{}\n\t\twant interface{}\n\t}{\n\t\t{nilPointer, \"<nil>\"},\n\t\t{nilInterface, \"<nil>\"},\n\t} {\n\t\ts := &sanitizer{}\n\t\tgotValue := s.Sanitize(tc.input)\n\n\t\tif got, want := gotValue, tc.want; got != want {\n\t\t\tt.Errorf(\"[%d] got %v, want %v\", n, got, want)\n\t\t}\n\t}\n}\n\nfunc ExampleMetaData() {\n\tnotifier.Notify(errors.Errorf(\"hi world\"),\n\t\tMetaData{\"Account\": {\n\t\t\t\"id\": account.ID,\n\t\t\t\"name\": account.Name,\n\t\t\t\"paying?\": account.Plan.Premium,\n\t\t}})\n}\n<|endoftext|>"} {"text":"<commit_before>package native\n\nimport (\n\t\"math\"\n\n\t\"github.com\/gonum\/blas\/blas64\"\n)\n\n\/\/ Dlacn2 estimates the 1-norm of an n×n matrix A using sequential updates with\n\/\/ matrix-vector products provided externally.\n\/\/\n\/\/ Dlacn2 is called sequentially. In between calls, x should be overwritten by\n\/\/ A * X if kase == 1\n\/\/ A^T * X if kase == 2\n\/\/ all other prameters should be unchanged during sequential calls, and the updated\n\/\/ values of est and kase should be used. On the final return (when kase is returned\n\/\/ as 0), V = A * W, where est = norm(V) \/ norm(W).\n\/\/\n\/\/ isign, v, and x must all have length n and will panic otherwise. isave is used\n\/\/ for temporary storage.\nfunc (impl Implementation) Dlacn2(n int, v, x []float64, isgn []int, est float64, kase int, isave [3]int) (float64, int) {\n\tcheckVector(n, x, 1)\n\tcheckVector(n, v, 1)\n\tif len(isgn) < n {\n\t\tpanic(\"lapack: insufficient isgn length\")\n\t}\n\tif isave[0] < 1 || isave[0] > 5 {\n\t\tpanic(\"lapack: bad isave value\")\n\t}\n\titmax := 5\n\tbi := blas64.Implementation()\n\tif kase == 0 {\n\t\tfor i := 0; i < n; i++ {\n\t\t\tx[i] = 1 \/ float64(n)\n\t\t}\n\t\tkase = 1\n\t\tisave[0] = 1\n\t\treturn est, kase\n\t}\n\tswitch isave[0] {\n\tdefault:\n\t\tpanic(\"unknown case\")\n\tcase 1:\n\t\tif n == 1 {\n\t\t\tv[0] = x[0]\n\t\t\test = math.Abs(v[0])\n\t\t\tkase = 0\n\t\t\treturn est, kase\n\t\t}\n\t\test = bi.Dasum(n, x, 1)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tx[i] = math.Copysign(1, x[i])\n\t\t\tisgn[i] = int(x[i])\n\t\t}\n\t\tkase = 2\n\t\tisave[0] = 2\n\t\treturn est, kase\n\tcase 2:\n\t\tisave[1] = bi.Idamax(n, x, 1)\n\t\tisave[2] = 2\n\t\tfor i := 0; i < n; i++ {\n\t\t\tx[i] = 0\n\t\t}\n\t\tx[isave[1]] = 1\n\t\tkase = 1\n\t\tisave[0] = 3\n\t\treturn est, kase\n\tcase 3:\n\t\tbi.Dcopy(n, x, 1, v, 1)\n\t\testold := est\n\t\test = bi.Dasum(n, v, 1)\n\t\tsameSigns := true\n\t\tfor i := 0; i < n; i++ {\n\t\t\tif int(math.Copysign(1, x[i])) != isgn[i] {\n\t\t\t\tsameSigns = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !sameSigns && est > estold {\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tx[i] = math.Copysign(1, x[i])\n\t\t\t\tisgn[i] = int(x[i])\n\t\t\t}\n\t\t\tkase = 2\n\t\t\tisave[0] = 4\n\t\t\treturn est, kase\n\t\t}\n\tcase 4:\n\t\tjlast := isave[1]\n\t\tisave[1] = bi.Idamax(n, x, 1)\n\t\tif x[jlast] != math.Abs(x[isave[1]]) && isave[2] < itmax {\n\t\t\tisave[2] += 1\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tx[i] = 0\n\t\t\t}\n\t\t\tx[isave[1]] = 1\n\t\t\tkase = 1\n\t\t\tisave[0] = 3\n\t\t\treturn est, kase\n\t\t}\n\tcase 5:\n\t\ttmp := 2 * (bi.Dasum(n, x, 1)) \/ float64(3*n)\n\t\tif tmp > est {\n\t\t\tbi.Dcopy(n, x, 1, v, 1)\n\t\t\test = tmp\n\t\t}\n\t\tkase = 0\n\t\treturn est, kase\n\t}\n\t\/\/ Iteration complete. Final stage\n\taltsgn := 1.0\n\tfor i := 0; i < n; i++ {\n\t\tx[i] = altsgn * (1 + float64(i)\/float64(n-1))\n\t\taltsgn *= -1\n\t}\n\tkase = 1\n\tisave[0] = 5\n\treturn est, kase\n}\n<commit_msg>Make isave a pointer so it is modified during the call<commit_after>package native\n\nimport (\n\t\"math\"\n\n\t\"github.com\/gonum\/blas\/blas64\"\n)\n\n\/\/ Dlacn2 estimates the 1-norm of an n×n matrix A using sequential updates with\n\/\/ matrix-vector products provided externally.\n\/\/\n\/\/ Dlacn2 is called sequentially. In between calls, x should be overwritten by\n\/\/ A * X if kase == 1\n\/\/ A^T * X if kase == 2\n\/\/ all other prameters should be unchanged during sequential calls, and the updated\n\/\/ values of est and kase should be used. On the final return (when kase is returned\n\/\/ as 0), V = A * W, where est = norm(V) \/ norm(W).\n\/\/\n\/\/ isign, v, and x must all have length n and will panic otherwise. isave is used\n\/\/ for temporary storage.\nfunc (impl Implementation) Dlacn2(n int, v, x []float64, isgn []int, est float64, kase int, isave *[3]int) (float64, int) {\n\tcheckVector(n, x, 1)\n\tcheckVector(n, v, 1)\n\tif len(isgn) < n {\n\t\tpanic(\"lapack: insufficient isgn length\")\n\t}\n\tif isave[0] < 1 || isave[0] > 5 {\n\t\tpanic(\"lapack: bad isave value\")\n\t}\n\titmax := 5\n\tbi := blas64.Implementation()\n\tif kase == 0 {\n\t\tfor i := 0; i < n; i++ {\n\t\t\tx[i] = 1 \/ float64(n)\n\t\t}\n\t\tkase = 1\n\t\tisave[0] = 1\n\t\treturn est, kase\n\t}\n\tswitch isave[0] {\n\tdefault:\n\t\tpanic(\"unknown case\")\n\tcase 1:\n\t\tif n == 1 {\n\t\t\tv[0] = x[0]\n\t\t\test = math.Abs(v[0])\n\t\t\tkase = 0\n\t\t\treturn est, kase\n\t\t}\n\t\test = bi.Dasum(n, x, 1)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tx[i] = math.Copysign(1, x[i])\n\t\t\tisgn[i] = int(x[i])\n\t\t}\n\t\tkase = 2\n\t\tisave[0] = 2\n\t\treturn est, kase\n\tcase 2:\n\t\tisave[1] = bi.Idamax(n, x, 1)\n\t\tisave[2] = 2\n\t\tfor i := 0; i < n; i++ {\n\t\t\tx[i] = 0\n\t\t}\n\t\tx[isave[1]] = 1\n\t\tkase = 1\n\t\tisave[0] = 3\n\t\treturn est, kase\n\tcase 3:\n\t\tbi.Dcopy(n, x, 1, v, 1)\n\t\testold := est\n\t\test = bi.Dasum(n, v, 1)\n\t\tsameSigns := true\n\t\tfor i := 0; i < n; i++ {\n\t\t\tif int(math.Copysign(1, x[i])) != isgn[i] {\n\t\t\t\tsameSigns = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !sameSigns && est > estold {\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tx[i] = math.Copysign(1, x[i])\n\t\t\t\tisgn[i] = int(x[i])\n\t\t\t}\n\t\t\tkase = 2\n\t\t\tisave[0] = 4\n\t\t\treturn est, kase\n\t\t}\n\tcase 4:\n\t\tjlast := isave[1]\n\t\tisave[1] = bi.Idamax(n, x, 1)\n\t\tif x[jlast] != math.Abs(x[isave[1]]) && isave[2] < itmax {\n\t\t\tisave[2] += 1\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tx[i] = 0\n\t\t\t}\n\t\t\tx[isave[1]] = 1\n\t\t\tkase = 1\n\t\t\tisave[0] = 3\n\t\t\treturn est, kase\n\t\t}\n\tcase 5:\n\t\ttmp := 2 * (bi.Dasum(n, x, 1)) \/ float64(3*n)\n\t\tif tmp > est {\n\t\t\tbi.Dcopy(n, x, 1, v, 1)\n\t\t\test = tmp\n\t\t}\n\t\tkase = 0\n\t\treturn est, kase\n\t}\n\t\/\/ Iteration complete. Final stage\n\taltsgn := 1.0\n\tfor i := 0; i < n; i++ {\n\t\tx[i] = altsgn * (1 + float64(i)\/float64(n-1))\n\t\taltsgn *= -1\n\t}\n\tkase = 1\n\tisave[0] = 5\n\treturn est, kase\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !windows\n\npackage dht\n\nimport \"syscall\"\n\nfunc curFileLimit() uint64 {\n\tvar n syscall.Rlimit\n\tsyscall.Getrlimit(syscall.RLIMIT_NOFILE, &n)\n\treturn n.Cur\n}\n<commit_msg>test: fix tests on freebsd<commit_after>\/\/ +build !windows\n\npackage dht\n\nimport \"syscall\"\n\nfunc curFileLimit() uint64 {\n\tvar n syscall.Rlimit\n\tsyscall.Getrlimit(syscall.RLIMIT_NOFILE, &n)\n\t\/\/ cast because some platforms use int64 (e.g., freebsd)\n\treturn uint64(n.Cur)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage rafthttp\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"path\"\n\n\t\"github.com\/coreos\/etcd\/Godeps\/_workspace\/src\/golang.org\/x\/net\/context\"\n\tpioutil \"github.com\/coreos\/etcd\/pkg\/ioutil\"\n\t\"github.com\/coreos\/etcd\/pkg\/types\"\n\t\"github.com\/coreos\/etcd\/raft\/raftpb\"\n\t\"github.com\/coreos\/etcd\/snap\"\n\t\"github.com\/coreos\/etcd\/version\"\n)\n\nconst (\n\t\/\/ connReadLimitByte limits the number of bytes\n\t\/\/ a single read can read out.\n\t\/\/\n\t\/\/ 64KB should be large enough for not causing\n\t\/\/ throughput bottleneck as well as small enough\n\t\/\/ for not causing a read timeout.\n\tconnReadLimitByte = 64 * 1024\n)\n\nvar (\n\tRaftPrefix = \"\/raft\"\n\tProbingPrefix = path.Join(RaftPrefix, \"probing\")\n\tRaftStreamPrefix = path.Join(RaftPrefix, \"stream\")\n\tRaftSnapshotPrefix = path.Join(RaftPrefix, \"snapshot\")\n\n\terrIncompatibleVersion = errors.New(\"incompatible version\")\n\terrClusterIDMismatch = errors.New(\"cluster ID mismatch\")\n)\n\ntype peerGetter interface {\n\tGet(id types.ID) Peer\n}\n\ntype writerToResponse interface {\n\tWriteTo(w http.ResponseWriter)\n}\n\ntype pipelineHandler struct {\n\tr Raft\n\tcid types.ID\n}\n\n\/\/ newPipelineHandler returns a handler for handling raft messages\n\/\/ from pipeline for RaftPrefix.\n\/\/\n\/\/ The handler reads out the raft message from request body,\n\/\/ and forwards it to the given raft state machine for processing.\nfunc newPipelineHandler(r Raft, cid types.ID) http.Handler {\n\treturn &pipelineHandler{\n\t\tr: r,\n\t\tcid: cid,\n\t}\n}\n\nfunc (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\tw.Header().Set(\"Allow\", \"POST\")\n\t\thttp.Error(w, \"Method Not Allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"X-Etcd-Cluster-ID\", h.cid.String())\n\n\tif err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusPreconditionFailed)\n\t\treturn\n\t}\n\n\t\/\/ Limit the data size that could be read from the request body, which ensures that read from\n\t\/\/ connection will not time out accidentally due to possible blocking in underlying implementation.\n\tlimitedr := pioutil.NewLimitedBufferReader(r.Body, connReadLimitByte)\n\tb, err := ioutil.ReadAll(limitedr)\n\tif err != nil {\n\t\tplog.Errorf(\"failed to read raft message (%v)\", err)\n\t\thttp.Error(w, \"error reading raft message\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tvar m raftpb.Message\n\tif err := m.Unmarshal(b); err != nil {\n\t\tplog.Errorf(\"failed to unmarshal raft message (%v)\", err)\n\t\thttp.Error(w, \"error unmarshaling raft message\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tif err := h.r.Process(context.TODO(), m); err != nil {\n\t\tswitch v := err.(type) {\n\t\tcase writerToResponse:\n\t\t\tv.WriteTo(w)\n\t\tdefault:\n\t\t\tplog.Warningf(\"failed to process raft message (%v)\", err)\n\t\t\thttp.Error(w, \"error processing raft message\", http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\t\/\/ Write StatusNoContet header after the message has been processed by\n\t\/\/ raft, which facilitates the client to report MsgSnap status.\n\tw.WriteHeader(http.StatusNoContent)\n}\n\ntype snapshotHandler struct {\n\tr Raft\n\tsnapshotter *snap.Snapshotter\n\tcid types.ID\n}\n\nfunc newSnapshotHandler(r Raft, snapshotter *snap.Snapshotter, cid types.ID) http.Handler {\n\treturn &snapshotHandler{\n\t\tr: r,\n\t\tsnapshotter: snapshotter,\n\t\tcid: cid,\n\t}\n}\n\n\/\/ ServeHTTP serves HTTP request to receive and process snapshot message.\n\/\/\n\/\/ If request sender dies without closing underlying TCP connection,\n\/\/ the handler will keep waiting for the request body until TCP keepalive\n\/\/ finds out that the connection is broken after several minutes.\n\/\/ This is acceptable because\n\/\/ 1. snapshot messages sent through other TCP connections could still be\n\/\/ received and processed.\n\/\/ 2. this case should happen rarely, so no further optimization is done.\nfunc (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\tw.Header().Set(\"Allow\", \"POST\")\n\t\thttp.Error(w, \"Method Not Allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"X-Etcd-Cluster-ID\", h.cid.String())\n\n\tif err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusPreconditionFailed)\n\t\treturn\n\t}\n\n\tdec := &messageDecoder{r: r.Body}\n\tm, err := dec.decode()\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"failed to decode raft message (%v)\", err)\n\t\tplog.Errorf(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\tif m.Type != raftpb.MsgSnap {\n\t\tplog.Errorf(\"unexpected raft message type %s on snapshot path\", m.Type)\n\t\thttp.Error(w, \"wrong raft message type\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t\/\/ save incoming database snapshot.\n\tif err := h.snapshotter.SaveDBFrom(r.Body, m.Snapshot.Metadata.Index); err != nil {\n\t\tmsg := fmt.Sprintf(\"failed to save KV snapshot (%v)\", err)\n\t\tplog.Error(msg)\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tplog.Infof(\"received and saved database snapshot [index: %d, from: %s] successfully\", m.Snapshot.Metadata.Index, types.ID(m.From))\n\n\tif err := h.r.Process(context.TODO(), m); err != nil {\n\t\tswitch v := err.(type) {\n\t\t\/\/ Process may return writerToResponse error when doing some\n\t\t\/\/ additional checks before calling raft.Node.Step.\n\t\tcase writerToResponse:\n\t\t\tv.WriteTo(w)\n\t\tdefault:\n\t\t\tmsg := fmt.Sprintf(\"failed to process raft message (%v)\", err)\n\t\t\tplog.Warningf(msg)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\t\/\/ Write StatusNoContet header after the message has been processed by\n\t\/\/ raft, which facilitates the client to report MsgSnap status.\n\tw.WriteHeader(http.StatusNoContent)\n}\n\ntype streamHandler struct {\n\tpeerGetter peerGetter\n\tr Raft\n\tid types.ID\n\tcid types.ID\n}\n\nfunc newStreamHandler(peerGetter peerGetter, r Raft, id, cid types.ID) http.Handler {\n\treturn &streamHandler{\n\t\tpeerGetter: peerGetter,\n\t\tr: r,\n\t\tid: id,\n\t\tcid: cid,\n\t}\n}\n\nfunc (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\tw.Header().Set(\"Allow\", \"GET\")\n\t\thttp.Error(w, \"Method Not Allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"X-Server-Version\", version.Version)\n\tw.Header().Set(\"X-Etcd-Cluster-ID\", h.cid.String())\n\n\tif err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusPreconditionFailed)\n\t\treturn\n\t}\n\n\tvar t streamType\n\tswitch path.Dir(r.URL.Path) {\n\tcase streamTypeMsgAppV2.endpoint():\n\t\tt = streamTypeMsgAppV2\n\tcase streamTypeMessage.endpoint():\n\t\tt = streamTypeMessage\n\tdefault:\n\t\tplog.Debugf(\"ignored unexpected streaming request path %s\", r.URL.Path)\n\t\thttp.Error(w, \"invalid path\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tfromStr := path.Base(r.URL.Path)\n\tfrom, err := types.IDFromString(fromStr)\n\tif err != nil {\n\t\tplog.Errorf(\"failed to parse from %s into ID (%v)\", fromStr, err)\n\t\thttp.Error(w, \"invalid from\", http.StatusNotFound)\n\t\treturn\n\t}\n\tif h.r.IsIDRemoved(uint64(from)) {\n\t\tplog.Warningf(\"rejected the stream from peer %s since it was removed\", from)\n\t\thttp.Error(w, \"removed member\", http.StatusGone)\n\t\treturn\n\t}\n\tp := h.peerGetter.Get(from)\n\tif p == nil {\n\t\t\/\/ This may happen in following cases:\n\t\t\/\/ 1. user starts a remote peer that belongs to a different cluster\n\t\t\/\/ with the same cluster ID.\n\t\t\/\/ 2. local etcd falls behind of the cluster, and cannot recognize\n\t\t\/\/ the members that joined after its current progress.\n\t\tplog.Errorf(\"failed to find member %s in cluster %s\", from, h.cid)\n\t\thttp.Error(w, \"error sender not found\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\twto := h.id.String()\n\tif gto := r.Header.Get(\"X-Raft-To\"); gto != wto {\n\t\tplog.Errorf(\"streaming request ignored (ID mismatch got %s want %s)\", gto, wto)\n\t\thttp.Error(w, \"to field mismatch\", http.StatusPreconditionFailed)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tw.(http.Flusher).Flush()\n\n\tc := newCloseNotifier()\n\tconn := &outgoingConn{\n\t\tt: t,\n\t\tWriter: w,\n\t\tFlusher: w.(http.Flusher),\n\t\tCloser: c,\n\t}\n\tp.attachOutgoingConn(conn)\n\t<-c.closeNotify()\n}\n\n\/\/ checkClusterCompatibilityFromHeader checks the cluster compatibility of\n\/\/ the local member from the given header.\n\/\/ It checks whether the version of local member is compatible with\n\/\/ the versions in the header, and whether the cluster ID of local member\n\/\/ matches the one in the header.\nfunc checkClusterCompatibilityFromHeader(header http.Header, cid types.ID) error {\n\tif err := checkVersionCompability(header.Get(\"X-Server-From\"), serverVersion(header), minClusterVersion(header)); err != nil {\n\t\tplog.Errorf(\"request version incompatibility (%v)\", err)\n\t\treturn errIncompatibleVersion\n\t}\n\tif gcid := header.Get(\"X-Etcd-Cluster-ID\"); gcid != cid.String() {\n\t\tplog.Errorf(\"request cluster ID mismatch (got %s want %s)\", gcid, cid)\n\t\treturn errClusterIDMismatch\n\t}\n\treturn nil\n}\n\ntype closeNotifier struct {\n\tdone chan struct{}\n}\n\nfunc newCloseNotifier() *closeNotifier {\n\treturn &closeNotifier{\n\t\tdone: make(chan struct{}),\n\t}\n}\n\nfunc (n *closeNotifier) Close() error {\n\tclose(n.done)\n\treturn nil\n}\n\nfunc (n *closeNotifier) closeNotify() <-chan struct{} { return n.done }\n<commit_msg>rafthttp: log before receiving snapshot<commit_after>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage rafthttp\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"path\"\n\n\t\"github.com\/coreos\/etcd\/Godeps\/_workspace\/src\/golang.org\/x\/net\/context\"\n\tpioutil \"github.com\/coreos\/etcd\/pkg\/ioutil\"\n\t\"github.com\/coreos\/etcd\/pkg\/types\"\n\t\"github.com\/coreos\/etcd\/raft\/raftpb\"\n\t\"github.com\/coreos\/etcd\/snap\"\n\t\"github.com\/coreos\/etcd\/version\"\n)\n\nconst (\n\t\/\/ connReadLimitByte limits the number of bytes\n\t\/\/ a single read can read out.\n\t\/\/\n\t\/\/ 64KB should be large enough for not causing\n\t\/\/ throughput bottleneck as well as small enough\n\t\/\/ for not causing a read timeout.\n\tconnReadLimitByte = 64 * 1024\n)\n\nvar (\n\tRaftPrefix = \"\/raft\"\n\tProbingPrefix = path.Join(RaftPrefix, \"probing\")\n\tRaftStreamPrefix = path.Join(RaftPrefix, \"stream\")\n\tRaftSnapshotPrefix = path.Join(RaftPrefix, \"snapshot\")\n\n\terrIncompatibleVersion = errors.New(\"incompatible version\")\n\terrClusterIDMismatch = errors.New(\"cluster ID mismatch\")\n)\n\ntype peerGetter interface {\n\tGet(id types.ID) Peer\n}\n\ntype writerToResponse interface {\n\tWriteTo(w http.ResponseWriter)\n}\n\ntype pipelineHandler struct {\n\tr Raft\n\tcid types.ID\n}\n\n\/\/ newPipelineHandler returns a handler for handling raft messages\n\/\/ from pipeline for RaftPrefix.\n\/\/\n\/\/ The handler reads out the raft message from request body,\n\/\/ and forwards it to the given raft state machine for processing.\nfunc newPipelineHandler(r Raft, cid types.ID) http.Handler {\n\treturn &pipelineHandler{\n\t\tr: r,\n\t\tcid: cid,\n\t}\n}\n\nfunc (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\tw.Header().Set(\"Allow\", \"POST\")\n\t\thttp.Error(w, \"Method Not Allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"X-Etcd-Cluster-ID\", h.cid.String())\n\n\tif err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusPreconditionFailed)\n\t\treturn\n\t}\n\n\t\/\/ Limit the data size that could be read from the request body, which ensures that read from\n\t\/\/ connection will not time out accidentally due to possible blocking in underlying implementation.\n\tlimitedr := pioutil.NewLimitedBufferReader(r.Body, connReadLimitByte)\n\tb, err := ioutil.ReadAll(limitedr)\n\tif err != nil {\n\t\tplog.Errorf(\"failed to read raft message (%v)\", err)\n\t\thttp.Error(w, \"error reading raft message\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tvar m raftpb.Message\n\tif err := m.Unmarshal(b); err != nil {\n\t\tplog.Errorf(\"failed to unmarshal raft message (%v)\", err)\n\t\thttp.Error(w, \"error unmarshaling raft message\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tif err := h.r.Process(context.TODO(), m); err != nil {\n\t\tswitch v := err.(type) {\n\t\tcase writerToResponse:\n\t\t\tv.WriteTo(w)\n\t\tdefault:\n\t\t\tplog.Warningf(\"failed to process raft message (%v)\", err)\n\t\t\thttp.Error(w, \"error processing raft message\", http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\t\/\/ Write StatusNoContet header after the message has been processed by\n\t\/\/ raft, which facilitates the client to report MsgSnap status.\n\tw.WriteHeader(http.StatusNoContent)\n}\n\ntype snapshotHandler struct {\n\tr Raft\n\tsnapshotter *snap.Snapshotter\n\tcid types.ID\n}\n\nfunc newSnapshotHandler(r Raft, snapshotter *snap.Snapshotter, cid types.ID) http.Handler {\n\treturn &snapshotHandler{\n\t\tr: r,\n\t\tsnapshotter: snapshotter,\n\t\tcid: cid,\n\t}\n}\n\n\/\/ ServeHTTP serves HTTP request to receive and process snapshot message.\n\/\/\n\/\/ If request sender dies without closing underlying TCP connection,\n\/\/ the handler will keep waiting for the request body until TCP keepalive\n\/\/ finds out that the connection is broken after several minutes.\n\/\/ This is acceptable because\n\/\/ 1. snapshot messages sent through other TCP connections could still be\n\/\/ received and processed.\n\/\/ 2. this case should happen rarely, so no further optimization is done.\nfunc (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\tw.Header().Set(\"Allow\", \"POST\")\n\t\thttp.Error(w, \"Method Not Allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"X-Etcd-Cluster-ID\", h.cid.String())\n\n\tif err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusPreconditionFailed)\n\t\treturn\n\t}\n\n\tdec := &messageDecoder{r: r.Body}\n\tm, err := dec.decode()\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"failed to decode raft message (%v)\", err)\n\t\tplog.Errorf(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\tif m.Type != raftpb.MsgSnap {\n\t\tplog.Errorf(\"unexpected raft message type %s on snapshot path\", m.Type)\n\t\thttp.Error(w, \"wrong raft message type\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tplog.Infof(\"receiving database snapshot [index:%d, from %s] ...\", m.Snapshot.Metadata.Index, types.ID(m.From))\n\t\/\/ save incoming database snapshot.\n\tif err := h.snapshotter.SaveDBFrom(r.Body, m.Snapshot.Metadata.Index); err != nil {\n\t\tmsg := fmt.Sprintf(\"failed to save KV snapshot (%v)\", err)\n\t\tplog.Error(msg)\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tplog.Infof(\"received and saved database snapshot [index: %d, from: %s] successfully\", m.Snapshot.Metadata.Index, types.ID(m.From))\n\n\tif err := h.r.Process(context.TODO(), m); err != nil {\n\t\tswitch v := err.(type) {\n\t\t\/\/ Process may return writerToResponse error when doing some\n\t\t\/\/ additional checks before calling raft.Node.Step.\n\t\tcase writerToResponse:\n\t\t\tv.WriteTo(w)\n\t\tdefault:\n\t\t\tmsg := fmt.Sprintf(\"failed to process raft message (%v)\", err)\n\t\t\tplog.Warningf(msg)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\t\/\/ Write StatusNoContet header after the message has been processed by\n\t\/\/ raft, which facilitates the client to report MsgSnap status.\n\tw.WriteHeader(http.StatusNoContent)\n}\n\ntype streamHandler struct {\n\tpeerGetter peerGetter\n\tr Raft\n\tid types.ID\n\tcid types.ID\n}\n\nfunc newStreamHandler(peerGetter peerGetter, r Raft, id, cid types.ID) http.Handler {\n\treturn &streamHandler{\n\t\tpeerGetter: peerGetter,\n\t\tr: r,\n\t\tid: id,\n\t\tcid: cid,\n\t}\n}\n\nfunc (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\tw.Header().Set(\"Allow\", \"GET\")\n\t\thttp.Error(w, \"Method Not Allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"X-Server-Version\", version.Version)\n\tw.Header().Set(\"X-Etcd-Cluster-ID\", h.cid.String())\n\n\tif err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusPreconditionFailed)\n\t\treturn\n\t}\n\n\tvar t streamType\n\tswitch path.Dir(r.URL.Path) {\n\tcase streamTypeMsgAppV2.endpoint():\n\t\tt = streamTypeMsgAppV2\n\tcase streamTypeMessage.endpoint():\n\t\tt = streamTypeMessage\n\tdefault:\n\t\tplog.Debugf(\"ignored unexpected streaming request path %s\", r.URL.Path)\n\t\thttp.Error(w, \"invalid path\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tfromStr := path.Base(r.URL.Path)\n\tfrom, err := types.IDFromString(fromStr)\n\tif err != nil {\n\t\tplog.Errorf(\"failed to parse from %s into ID (%v)\", fromStr, err)\n\t\thttp.Error(w, \"invalid from\", http.StatusNotFound)\n\t\treturn\n\t}\n\tif h.r.IsIDRemoved(uint64(from)) {\n\t\tplog.Warningf(\"rejected the stream from peer %s since it was removed\", from)\n\t\thttp.Error(w, \"removed member\", http.StatusGone)\n\t\treturn\n\t}\n\tp := h.peerGetter.Get(from)\n\tif p == nil {\n\t\t\/\/ This may happen in following cases:\n\t\t\/\/ 1. user starts a remote peer that belongs to a different cluster\n\t\t\/\/ with the same cluster ID.\n\t\t\/\/ 2. local etcd falls behind of the cluster, and cannot recognize\n\t\t\/\/ the members that joined after its current progress.\n\t\tplog.Errorf(\"failed to find member %s in cluster %s\", from, h.cid)\n\t\thttp.Error(w, \"error sender not found\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\twto := h.id.String()\n\tif gto := r.Header.Get(\"X-Raft-To\"); gto != wto {\n\t\tplog.Errorf(\"streaming request ignored (ID mismatch got %s want %s)\", gto, wto)\n\t\thttp.Error(w, \"to field mismatch\", http.StatusPreconditionFailed)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tw.(http.Flusher).Flush()\n\n\tc := newCloseNotifier()\n\tconn := &outgoingConn{\n\t\tt: t,\n\t\tWriter: w,\n\t\tFlusher: w.(http.Flusher),\n\t\tCloser: c,\n\t}\n\tp.attachOutgoingConn(conn)\n\t<-c.closeNotify()\n}\n\n\/\/ checkClusterCompatibilityFromHeader checks the cluster compatibility of\n\/\/ the local member from the given header.\n\/\/ It checks whether the version of local member is compatible with\n\/\/ the versions in the header, and whether the cluster ID of local member\n\/\/ matches the one in the header.\nfunc checkClusterCompatibilityFromHeader(header http.Header, cid types.ID) error {\n\tif err := checkVersionCompability(header.Get(\"X-Server-From\"), serverVersion(header), minClusterVersion(header)); err != nil {\n\t\tplog.Errorf(\"request version incompatibility (%v)\", err)\n\t\treturn errIncompatibleVersion\n\t}\n\tif gcid := header.Get(\"X-Etcd-Cluster-ID\"); gcid != cid.String() {\n\t\tplog.Errorf(\"request cluster ID mismatch (got %s want %s)\", gcid, cid)\n\t\treturn errClusterIDMismatch\n\t}\n\treturn nil\n}\n\ntype closeNotifier struct {\n\tdone chan struct{}\n}\n\nfunc newCloseNotifier() *closeNotifier {\n\treturn &closeNotifier{\n\t\tdone: make(chan struct{}),\n\t}\n}\n\nfunc (n *closeNotifier) Close() error {\n\tclose(n.done)\n\treturn nil\n}\n\nfunc (n *closeNotifier) closeNotify() <-chan struct{} { return n.done }\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build ignore\n\n\/\/ This program can be used as go_android_GOARCH_exec by the Go tool.\n\/\/ It executes binaries on an android device using adb.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nfunc run(args ...string) string {\n\tif flags := os.Getenv(\"GOANDROID_ADB_FLAGS\"); flags != \"\" {\n\t\targs = append(strings.Split(flags, \" \"), args...)\n\t}\n\tbuf := new(bytes.Buffer)\n\tcmd := exec.Command(\"adb\", args...)\n\tcmd.Stdout = io.MultiWriter(os.Stdout, buf)\n\t\/\/ If the adb subprocess somehow hangs, go test will kill this wrapper\n\t\/\/ and wait for our os.Stderr (and os.Stdout) to close as a result.\n\t\/\/ However, if the os.Stderr (or os.Stdout) file descriptors are\n\t\/\/ passed on, the hanging adb subprocess will hold them open and\n\t\/\/ go test will hang forever.\n\t\/\/\n\t\/\/ Avoid that by wrapping stderr, breaking the short circuit and\n\t\/\/ forcing cmd.Run to use another pipe and goroutine to pass\n\t\/\/ along stderr from adb.\n\tcmd.Stderr = struct{ io.Writer }{os.Stderr}\n\tlog.Printf(\"adb %s\", strings.Join(args, \" \"))\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatalf(\"adb %s: %v\", strings.Join(args, \" \"), err)\n\t}\n\treturn buf.String()\n}\n\nconst (\n\t\/\/ Directory structure on the target device androidtest.bash assumes.\n\tdeviceGoroot = \"\/data\/local\/tmp\/goroot\"\n\tdeviceGopath = \"\/data\/local\/tmp\/gopath\"\n)\n\nfunc main() {\n\tlog.SetFlags(0)\n\tlog.SetPrefix(\"go_android_exec: \")\n\n\t\/\/ Concurrent use of adb is flaky, so serialize adb commands.\n\t\/\/ See https:\/\/github.com\/golang\/go\/issues\/23795 or\n\t\/\/ https:\/\/issuetracker.google.com\/issues\/73230216.\n\tlockPath := filepath.Join(os.TempDir(), \"go_android_exec-adb-lock\")\n\tlock, err := os.OpenFile(lockPath, os.O_CREATE|os.O_RDWR, 0666)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer lock.Close()\n\tif err := syscall.Flock(int(lock.Fd()), syscall.LOCK_EX); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ In case we're booting a device or emulator alongside androidtest.bash\n\t\/\/ wait for it to be ready. adb wait-for-device is not enough, we have to\n\t\/\/ wait for sys.boot_completed.\n\trun(\"wait-for-device\", \"shell\", \"while [[ -z $(getprop sys.boot_completed) ]]; do sleep 1; done;\")\n\n\t\/\/ Prepare a temporary directory that will be cleaned up at the end.\n\tdeviceGotmp := fmt.Sprintf(\"\/data\/local\/tmp\/%s-%d\",\n\t\tfilepath.Base(os.Args[1]), os.Getpid())\n\trun(\"shell\", \"mkdir\", \"-p\", deviceGotmp)\n\n\t\/\/ Determine the package by examining the current working\n\t\/\/ directory, which will look something like\n\t\/\/ \"$GOROOT\/src\/mime\/multipart\" or \"$GOPATH\/src\/golang.org\/x\/mobile\".\n\t\/\/ We extract everything after the $GOROOT or $GOPATH to run on the\n\t\/\/ same relative directory on the target device.\n\tsubdir, inGoRoot := subdir()\n\tdeviceCwd := filepath.Join(deviceGoroot, subdir)\n\tif !inGoRoot {\n\t\tdeviceCwd = filepath.Join(deviceGopath, subdir)\n\t} else {\n\t\tadbSyncGoroot()\n\t}\n\trun(\"shell\", \"mkdir\", \"-p\", deviceCwd)\n\n\t\/\/ Binary names can conflict.\n\t\/\/ E.g. template.test from the {html,text}\/template packages.\n\tbinName := fmt.Sprintf(\"%s-%d\", filepath.Base(os.Args[1]), os.Getpid())\n\tdeviceBin := fmt.Sprintf(\"%s\/%s\", deviceGotmp, binName)\n\trun(\"push\", os.Args[1], deviceBin)\n\n\tif _, err := os.Stat(\"testdata\"); err == nil {\n\t\trun(\"push\", \"testdata\", deviceCwd)\n\t}\n\n\t\/\/ Forward SIGQUIT from the go command to show backtraces from\n\t\/\/ the binary instead of from this wrapper.\n\tquit := make(chan os.Signal, 1)\n\tsignal.Notify(quit, syscall.SIGQUIT)\n\tgo func() {\n\t\tfor range quit {\n\t\t\t\/\/ We don't have the PID of the running process; use the\n\t\t\t\/\/ binary name instead.\n\t\t\trun(\"shell\", \"killall -QUIT \"+binName)\n\t\t}\n\t}()\n\t\/\/ The adb shell command will return an exit code of 0 regardless\n\t\/\/ of the command run. E.g.\n\t\/\/ $ adb shell false\n\t\/\/ $ echo $?\n\t\/\/ 0\n\t\/\/ https:\/\/code.google.com\/p\/android\/issues\/detail?id=3254\n\t\/\/ So we append the exitcode to the output and parse it from there.\n\tconst exitstr = \"exitcode=\"\n\tcmd := `export TMPDIR=\"` + deviceGotmp + `\"` +\n\t\t`; export GOROOT=\"` + deviceGoroot + `\"` +\n\t\t`; export GOPATH=\"` + deviceGopath + `\"` +\n\t\t`; cd \"` + deviceCwd + `\"` +\n\t\t\"; '\" + deviceBin + \"' \" + strings.Join(os.Args[2:], \" \") +\n\t\t\"; echo -n \" + exitstr + \"$?\"\n\toutput := run(\"shell\", cmd)\n\tsignal.Reset(syscall.SIGQUIT)\n\tclose(quit)\n\n\trun(\"shell\", \"rm\", \"-rf\", deviceGotmp) \/\/ Clean up.\n\n\texitIdx := strings.LastIndex(output, exitstr)\n\tif exitIdx == -1 {\n\t\tlog.Fatalf(\"no exit code: %q\", output)\n\t}\n\tcode, err := strconv.Atoi(output[exitIdx+len(exitstr):])\n\tif err != nil {\n\t\tlog.Fatalf(\"bad exit code: %v\", err)\n\t}\n\tos.Exit(code)\n}\n\n\/\/ subdir determines the package based on the current working directory,\n\/\/ and returns the path to the package source relative to $GOROOT (or $GOPATH).\nfunc subdir() (pkgpath string, underGoRoot bool) {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tgoroot, err := filepath.EvalSymlinks(runtime.GOROOT())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif strings.HasPrefix(cwd, goroot) {\n\t\tsubdir, err := filepath.Rel(goroot, cwd)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\treturn subdir, true\n\t}\n\n\tfor _, p := range filepath.SplitList(build.Default.GOPATH) {\n\t\tpabs, err := filepath.EvalSymlinks(p)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif !strings.HasPrefix(cwd, pabs) {\n\t\t\tcontinue\n\t\t}\n\t\tsubdir, err := filepath.Rel(pabs, cwd)\n\t\tif err == nil {\n\t\t\treturn subdir, false\n\t\t}\n\t}\n\tlog.Fatalf(\"the current path %q is not in either GOROOT(%q) or GOPATH(%q)\",\n\t\tcwd, runtime.GOROOT(), build.Default.GOPATH)\n\treturn \"\", false\n}\n\n\/\/ adbSyncGoroot ensures that files necessary for testing the Go standard\n\/\/ packages are present on the attached device.\nfunc adbSyncGoroot() {\n\t\/\/ Also known by cmd\/dist. The bootstrap command deletes the file.\n\tstatPath := filepath.Join(os.TempDir(), \"go_android_exec-adb-sync-status\")\n\tstat, err := os.OpenFile(statPath, os.O_CREATE|os.O_RDWR, 0666)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer stat.Close()\n\t\/\/ Serialize check and syncing.\n\tif err := syscall.Flock(int(stat.Fd()), syscall.LOCK_EX); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\ts, err := ioutil.ReadAll(stat)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif string(s) == \"done\" {\n\t\treturn\n\t}\n\tdevRoot := \"\/data\/local\/tmp\/goroot\"\n\trun(\"shell\", \"rm\", \"-rf\", devRoot)\n\trun(\"shell\", \"mkdir\", \"-p\", devRoot+\"\/pkg\")\n\tgoroot := runtime.GOROOT()\n\tgoCmd := filepath.Join(goroot, \"bin\", \"go\")\n\truntimea, err := exec.Command(goCmd, \"list\", \"-f\", \"{{.Target}}\", \"runtime\").Output()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tpkgdir := filepath.Dir(string(runtimea))\n\tif pkgdir == \"\" {\n\t\tlog.Fatal(\"could not find android pkg dir\")\n\t}\n\tfor _, dir := range []string{\"src\", \"test\", \"lib\"} {\n\t\trun(\"push\", filepath.Join(goroot, dir), filepath.Join(devRoot))\n\t}\n\trun(\"push\", filepath.Join(pkgdir), filepath.Join(devRoot, \"pkg\/\"))\n\tif _, err := stat.Write([]byte(\"done\")); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>misc\/android: adb push --sync testdata<commit_after>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build ignore\n\n\/\/ This program can be used as go_android_GOARCH_exec by the Go tool.\n\/\/ It executes binaries on an android device using adb.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nfunc run(args ...string) string {\n\tif flags := os.Getenv(\"GOANDROID_ADB_FLAGS\"); flags != \"\" {\n\t\targs = append(strings.Split(flags, \" \"), args...)\n\t}\n\tbuf := new(bytes.Buffer)\n\tcmd := exec.Command(\"adb\", args...)\n\tcmd.Stdout = io.MultiWriter(os.Stdout, buf)\n\t\/\/ If the adb subprocess somehow hangs, go test will kill this wrapper\n\t\/\/ and wait for our os.Stderr (and os.Stdout) to close as a result.\n\t\/\/ However, if the os.Stderr (or os.Stdout) file descriptors are\n\t\/\/ passed on, the hanging adb subprocess will hold them open and\n\t\/\/ go test will hang forever.\n\t\/\/\n\t\/\/ Avoid that by wrapping stderr, breaking the short circuit and\n\t\/\/ forcing cmd.Run to use another pipe and goroutine to pass\n\t\/\/ along stderr from adb.\n\tcmd.Stderr = struct{ io.Writer }{os.Stderr}\n\tlog.Printf(\"adb %s\", strings.Join(args, \" \"))\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatalf(\"adb %s: %v\", strings.Join(args, \" \"), err)\n\t}\n\treturn buf.String()\n}\n\nconst (\n\t\/\/ Directory structure on the target device androidtest.bash assumes.\n\tdeviceGoroot = \"\/data\/local\/tmp\/goroot\"\n\tdeviceGopath = \"\/data\/local\/tmp\/gopath\"\n)\n\nfunc main() {\n\tlog.SetFlags(0)\n\tlog.SetPrefix(\"go_android_exec: \")\n\n\t\/\/ Concurrent use of adb is flaky, so serialize adb commands.\n\t\/\/ See https:\/\/github.com\/golang\/go\/issues\/23795 or\n\t\/\/ https:\/\/issuetracker.google.com\/issues\/73230216.\n\tlockPath := filepath.Join(os.TempDir(), \"go_android_exec-adb-lock\")\n\tlock, err := os.OpenFile(lockPath, os.O_CREATE|os.O_RDWR, 0666)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer lock.Close()\n\tif err := syscall.Flock(int(lock.Fd()), syscall.LOCK_EX); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ In case we're booting a device or emulator alongside androidtest.bash\n\t\/\/ wait for it to be ready. adb wait-for-device is not enough, we have to\n\t\/\/ wait for sys.boot_completed.\n\trun(\"wait-for-device\", \"shell\", \"while [[ -z $(getprop sys.boot_completed) ]]; do sleep 1; done;\")\n\n\t\/\/ Prepare a temporary directory that will be cleaned up at the end.\n\tdeviceGotmp := fmt.Sprintf(\"\/data\/local\/tmp\/%s-%d\",\n\t\tfilepath.Base(os.Args[1]), os.Getpid())\n\trun(\"shell\", \"mkdir\", \"-p\", deviceGotmp)\n\n\t\/\/ Determine the package by examining the current working\n\t\/\/ directory, which will look something like\n\t\/\/ \"$GOROOT\/src\/mime\/multipart\" or \"$GOPATH\/src\/golang.org\/x\/mobile\".\n\t\/\/ We extract everything after the $GOROOT or $GOPATH to run on the\n\t\/\/ same relative directory on the target device.\n\tsubdir, inGoRoot := subdir()\n\tdeviceCwd := filepath.Join(deviceGoroot, subdir)\n\tif !inGoRoot {\n\t\tdeviceCwd = filepath.Join(deviceGopath, subdir)\n\t} else {\n\t\tadbSyncGoroot()\n\t}\n\trun(\"shell\", \"mkdir\", \"-p\", deviceCwd)\n\n\t\/\/ Binary names can conflict.\n\t\/\/ E.g. template.test from the {html,text}\/template packages.\n\tbinName := fmt.Sprintf(\"%s-%d\", filepath.Base(os.Args[1]), os.Getpid())\n\tdeviceBin := fmt.Sprintf(\"%s\/%s\", deviceGotmp, binName)\n\trun(\"push\", os.Args[1], deviceBin)\n\n\tif _, err := os.Stat(\"testdata\"); err == nil {\n\t\trun(\"push\", \"--sync\", \"testdata\", deviceCwd)\n\t}\n\n\t\/\/ Forward SIGQUIT from the go command to show backtraces from\n\t\/\/ the binary instead of from this wrapper.\n\tquit := make(chan os.Signal, 1)\n\tsignal.Notify(quit, syscall.SIGQUIT)\n\tgo func() {\n\t\tfor range quit {\n\t\t\t\/\/ We don't have the PID of the running process; use the\n\t\t\t\/\/ binary name instead.\n\t\t\trun(\"shell\", \"killall -QUIT \"+binName)\n\t\t}\n\t}()\n\t\/\/ The adb shell command will return an exit code of 0 regardless\n\t\/\/ of the command run. E.g.\n\t\/\/ $ adb shell false\n\t\/\/ $ echo $?\n\t\/\/ 0\n\t\/\/ https:\/\/code.google.com\/p\/android\/issues\/detail?id=3254\n\t\/\/ So we append the exitcode to the output and parse it from there.\n\tconst exitstr = \"exitcode=\"\n\tcmd := `export TMPDIR=\"` + deviceGotmp + `\"` +\n\t\t`; export GOROOT=\"` + deviceGoroot + `\"` +\n\t\t`; export GOPATH=\"` + deviceGopath + `\"` +\n\t\t`; cd \"` + deviceCwd + `\"` +\n\t\t\"; '\" + deviceBin + \"' \" + strings.Join(os.Args[2:], \" \") +\n\t\t\"; echo -n \" + exitstr + \"$?\"\n\toutput := run(\"shell\", cmd)\n\tsignal.Reset(syscall.SIGQUIT)\n\tclose(quit)\n\n\trun(\"shell\", \"rm\", \"-rf\", deviceGotmp) \/\/ Clean up.\n\n\texitIdx := strings.LastIndex(output, exitstr)\n\tif exitIdx == -1 {\n\t\tlog.Fatalf(\"no exit code: %q\", output)\n\t}\n\tcode, err := strconv.Atoi(output[exitIdx+len(exitstr):])\n\tif err != nil {\n\t\tlog.Fatalf(\"bad exit code: %v\", err)\n\t}\n\tos.Exit(code)\n}\n\n\/\/ subdir determines the package based on the current working directory,\n\/\/ and returns the path to the package source relative to $GOROOT (or $GOPATH).\nfunc subdir() (pkgpath string, underGoRoot bool) {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tgoroot, err := filepath.EvalSymlinks(runtime.GOROOT())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif strings.HasPrefix(cwd, goroot) {\n\t\tsubdir, err := filepath.Rel(goroot, cwd)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\treturn subdir, true\n\t}\n\n\tfor _, p := range filepath.SplitList(build.Default.GOPATH) {\n\t\tpabs, err := filepath.EvalSymlinks(p)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif !strings.HasPrefix(cwd, pabs) {\n\t\t\tcontinue\n\t\t}\n\t\tsubdir, err := filepath.Rel(pabs, cwd)\n\t\tif err == nil {\n\t\t\treturn subdir, false\n\t\t}\n\t}\n\tlog.Fatalf(\"the current path %q is not in either GOROOT(%q) or GOPATH(%q)\",\n\t\tcwd, runtime.GOROOT(), build.Default.GOPATH)\n\treturn \"\", false\n}\n\n\/\/ adbSyncGoroot ensures that files necessary for testing the Go standard\n\/\/ packages are present on the attached device.\nfunc adbSyncGoroot() {\n\t\/\/ Also known by cmd\/dist. The bootstrap command deletes the file.\n\tstatPath := filepath.Join(os.TempDir(), \"go_android_exec-adb-sync-status\")\n\tstat, err := os.OpenFile(statPath, os.O_CREATE|os.O_RDWR, 0666)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer stat.Close()\n\t\/\/ Serialize check and syncing.\n\tif err := syscall.Flock(int(stat.Fd()), syscall.LOCK_EX); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\ts, err := ioutil.ReadAll(stat)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif string(s) == \"done\" {\n\t\treturn\n\t}\n\tdevRoot := \"\/data\/local\/tmp\/goroot\"\n\trun(\"shell\", \"rm\", \"-rf\", devRoot)\n\trun(\"shell\", \"mkdir\", \"-p\", devRoot+\"\/pkg\")\n\tgoroot := runtime.GOROOT()\n\tgoCmd := filepath.Join(goroot, \"bin\", \"go\")\n\truntimea, err := exec.Command(goCmd, \"list\", \"-f\", \"{{.Target}}\", \"runtime\").Output()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tpkgdir := filepath.Dir(string(runtimea))\n\tif pkgdir == \"\" {\n\t\tlog.Fatal(\"could not find android pkg dir\")\n\t}\n\tfor _, dir := range []string{\"src\", \"test\", \"lib\"} {\n\t\trun(\"push\", filepath.Join(goroot, dir), filepath.Join(devRoot))\n\t}\n\trun(\"push\", filepath.Join(pkgdir), filepath.Join(devRoot, \"pkg\/\"))\n\tif _, err := stat.Write([]byte(\"done\")); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage convert\n\nimport (\n\t\"net\/url\"\n\n\t\"code.gitea.io\/gitea\/models\"\n\t\"code.gitea.io\/gitea\/models\/perm\"\n\tapi \"code.gitea.io\/gitea\/modules\/structs\"\n)\n\n\/\/ ToNotificationThread convert a Notification to api.NotificationThread\nfunc ToNotificationThread(n *models.Notification) *api.NotificationThread {\n\tresult := &api.NotificationThread{\n\t\tID: n.ID,\n\t\tUnread: !(n.Status == models.NotificationStatusRead || n.Status == models.NotificationStatusPinned),\n\t\tPinned: n.Status == models.NotificationStatusPinned,\n\t\tUpdatedAt: n.UpdatedUnix.AsTime(),\n\t\tURL: n.APIURL(),\n\t}\n\n\t\/\/ since user only get notifications when he has access to use minimal access mode\n\tif n.Repository != nil {\n\t\tresult.Repository = ToRepo(n.Repository, perm.AccessModeRead)\n\t}\n\n\t\/\/ handle Subject\n\tswitch n.Source {\n\tcase models.NotificationSourceIssue:\n\t\tresult.Subject = &api.NotificationSubject{Type: api.NotifySubjectIssue}\n\t\tif n.Issue != nil {\n\t\t\tresult.Subject.Title = n.Issue.Title\n\t\t\tresult.Subject.URL = n.Issue.APIURL()\n\t\t\tresult.Subject.HTMLURL = n.Issue.HTMLURL()\n\t\t\tresult.Subject.State = n.Issue.State()\n\t\t\tcomment, err := n.Issue.GetLastComment()\n\t\t\tif err == nil && comment != nil {\n\t\t\t\tresult.Subject.LatestCommentURL = comment.APIURL()\n\t\t\t\tresult.Subject.LatestCommentHTMLURL = comment.HTMLURL()\n\t\t\t}\n\t\t}\n\tcase models.NotificationSourcePullRequest:\n\t\tresult.Subject = &api.NotificationSubject{Type: api.NotifySubjectPull}\n\t\tif n.Issue != nil {\n\t\t\tresult.Subject.Title = n.Issue.Title\n\t\t\tresult.Subject.URL = n.Issue.APIURL()\n\t\t\tresult.Subject.HTMLURL = n.Issue.HTMLURL()\n\t\t\tresult.Subject.State = n.Issue.State()\n\t\t\tcomment, err := n.Issue.GetLastComment()\n\t\t\tif err == nil && comment != nil {\n\t\t\t\tresult.Subject.LatestCommentURL = comment.APIURL()\n\t\t\t\tresult.Subject.LatestCommentHTMLURL = comment.HTMLURL()\n\t\t\t}\n\n\t\t\tpr, _ := n.Issue.GetPullRequest()\n\t\t\tif pr != nil && pr.HasMerged {\n\t\t\t\tresult.Subject.State = \"merged\"\n\t\t\t}\n\t\t}\n\tcase models.NotificationSourceCommit:\n\t\turl := n.Repository.HTMLURL() + \"\/commit\/\" + url.PathEscape(n.CommitID)\n\t\tresult.Subject = &api.NotificationSubject{\n\t\t\tType: api.NotifySubjectCommit,\n\t\t\tTitle: n.CommitID,\n\t\t\tURL: url,\n\t\t\tHTMLURL: url,\n\t\t}\n\tcase models.NotificationSourceRepository:\n\t\tresult.Subject = &api.NotificationSubject{\n\t\t\tType: api.NotifySubjectRepository,\n\t\t\tTitle: n.Repository.FullName(),\n\t\t\t\/\/ FIXME: this is a relative URL, rather useless and inconsistent, but keeping for backwards compat\n\t\t\tURL: n.Repository.Link(),\n\t\t\tHTMLURL: n.Repository.HTMLURL(),\n\t\t}\n\t}\n\n\treturn result\n}\n\n\/\/ ToNotifications convert list of Notification to api.NotificationThread list\nfunc ToNotifications(nl models.NotificationList) []*api.NotificationThread {\n\tresult := make([]*api.NotificationThread, 0, len(nl))\n\tfor _, n := range nl {\n\t\tresult = append(result, ToNotificationThread(n))\n\t}\n\treturn result\n}\n<commit_msg>Nuke the incorrect permission report on \/api\/v1\/notifications (#19761)<commit_after>\/\/ Copyright 2020 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage convert\n\nimport (\n\t\"net\/url\"\n\n\t\"code.gitea.io\/gitea\/models\"\n\t\"code.gitea.io\/gitea\/models\/perm\"\n\tapi \"code.gitea.io\/gitea\/modules\/structs\"\n)\n\n\/\/ ToNotificationThread convert a Notification to api.NotificationThread\nfunc ToNotificationThread(n *models.Notification) *api.NotificationThread {\n\tresult := &api.NotificationThread{\n\t\tID: n.ID,\n\t\tUnread: !(n.Status == models.NotificationStatusRead || n.Status == models.NotificationStatusPinned),\n\t\tPinned: n.Status == models.NotificationStatusPinned,\n\t\tUpdatedAt: n.UpdatedUnix.AsTime(),\n\t\tURL: n.APIURL(),\n\t}\n\n\t\/\/ since user only get notifications when he has access to use minimal access mode\n\tif n.Repository != nil {\n\t\tresult.Repository = ToRepo(n.Repository, perm.AccessModeRead)\n\n\t\t\/\/ This permission is not correct and we should not be reporting it\n\t\tfor repository := result.Repository; repository != nil; repository = repository.Parent {\n\t\t\trepository.Permissions = nil\n\t\t}\n\t}\n\n\t\/\/ handle Subject\n\tswitch n.Source {\n\tcase models.NotificationSourceIssue:\n\t\tresult.Subject = &api.NotificationSubject{Type: api.NotifySubjectIssue}\n\t\tif n.Issue != nil {\n\t\t\tresult.Subject.Title = n.Issue.Title\n\t\t\tresult.Subject.URL = n.Issue.APIURL()\n\t\t\tresult.Subject.HTMLURL = n.Issue.HTMLURL()\n\t\t\tresult.Subject.State = n.Issue.State()\n\t\t\tcomment, err := n.Issue.GetLastComment()\n\t\t\tif err == nil && comment != nil {\n\t\t\t\tresult.Subject.LatestCommentURL = comment.APIURL()\n\t\t\t\tresult.Subject.LatestCommentHTMLURL = comment.HTMLURL()\n\t\t\t}\n\t\t}\n\tcase models.NotificationSourcePullRequest:\n\t\tresult.Subject = &api.NotificationSubject{Type: api.NotifySubjectPull}\n\t\tif n.Issue != nil {\n\t\t\tresult.Subject.Title = n.Issue.Title\n\t\t\tresult.Subject.URL = n.Issue.APIURL()\n\t\t\tresult.Subject.HTMLURL = n.Issue.HTMLURL()\n\t\t\tresult.Subject.State = n.Issue.State()\n\t\t\tcomment, err := n.Issue.GetLastComment()\n\t\t\tif err == nil && comment != nil {\n\t\t\t\tresult.Subject.LatestCommentURL = comment.APIURL()\n\t\t\t\tresult.Subject.LatestCommentHTMLURL = comment.HTMLURL()\n\t\t\t}\n\n\t\t\tpr, _ := n.Issue.GetPullRequest()\n\t\t\tif pr != nil && pr.HasMerged {\n\t\t\t\tresult.Subject.State = \"merged\"\n\t\t\t}\n\t\t}\n\tcase models.NotificationSourceCommit:\n\t\turl := n.Repository.HTMLURL() + \"\/commit\/\" + url.PathEscape(n.CommitID)\n\t\tresult.Subject = &api.NotificationSubject{\n\t\t\tType: api.NotifySubjectCommit,\n\t\t\tTitle: n.CommitID,\n\t\t\tURL: url,\n\t\t\tHTMLURL: url,\n\t\t}\n\tcase models.NotificationSourceRepository:\n\t\tresult.Subject = &api.NotificationSubject{\n\t\t\tType: api.NotifySubjectRepository,\n\t\t\tTitle: n.Repository.FullName(),\n\t\t\t\/\/ FIXME: this is a relative URL, rather useless and inconsistent, but keeping for backwards compat\n\t\t\tURL: n.Repository.Link(),\n\t\t\tHTMLURL: n.Repository.HTMLURL(),\n\t\t}\n\t}\n\n\treturn result\n}\n\n\/\/ ToNotifications convert list of Notification to api.NotificationThread list\nfunc ToNotifications(nl models.NotificationList) []*api.NotificationThread {\n\tresult := make([]*api.NotificationThread, 0, len(nl))\n\tfor _, n := range nl {\n\t\tresult = append(result, ToNotificationThread(n))\n\t}\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>package plugins\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Seklfreak\/Robyul2\/cache\"\n\t\"github.com\/Seklfreak\/Robyul2\/helpers\"\n\t\"github.com\/Seklfreak\/Robyul2\/models\"\n\t\"github.com\/bwmarrin\/discordgo\"\n\trethink \"github.com\/gorethink\/gorethink\"\n)\n\ntype Troublemaker struct{}\n\nfunc (t *Troublemaker) Commands() []string {\n\treturn []string{\n\t\t\"troublemaker\",\n\t\t\"troublemakers\",\n\t\t\"tm\",\n\t}\n}\n\ntype DB_Troublemaker_Entry struct {\n\tID string `gorethink:\"id,omitempty\"`\n\tUserID string `gorethink:\"userid\"`\n\tReason string `gorethink:\"reason\"`\n\tCreatedAt time.Time `gorethink:\"createdat\"`\n\tReportedByGuildID string `gorethink:\"reportedby_guildid\"`\n\tReportedByUserID string `gorethink:\"reportedby_userid\"`\n}\n\nfunc (t *Troublemaker) Init(session *discordgo.Session) {\n\n}\n\nfunc (t *Troublemaker) Action(command string, content string, msg *discordgo.Message, session *discordgo.Session) {\n\tif !helpers.ModuleIsAllowed(msg.ChannelID, msg.ID, msg.Author.ID, helpers.ModulePermTroublemaker) {\n\t\treturn\n\t}\n\n\targs := strings.Fields(content)\n\tif len(args) >= 1 {\n\t\tswitch args[0] {\n\t\tcase \"participate\":\n\t\t\thelpers.RequireAdmin(msg, func() {\n\t\t\t\tsession.ChannelTyping(msg.ChannelID)\n\n\t\t\t\tchannel, err := helpers.GetChannel(msg.ChannelID)\n\t\t\t\thelpers.Relax(err)\n\n\t\t\t\tsettings := helpers.GuildSettingsGetCached(channel.GuildID)\n\n\t\t\t\tif len(args) >= 2 {\n\t\t\t\t\t\/\/ Set new log channel\n\t\t\t\t\ttargetChannel, err := helpers.GetChannelFromMention(msg, args[1])\n\t\t\t\t\tif err != nil || targetChannel.ID == \"\" {\n\t\t\t\t\t\thelpers.SendMessage(msg.ChannelID, helpers.GetText(\"bot.arguments.invalid\"))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tpreviousChannel := settings.TroublemakerLogChannel\n\n\t\t\t\t\tsettings.TroublemakerIsParticipating = true\n\t\t\t\t\tsettings.TroublemakerLogChannel = targetChannel.ID\n\t\t\t\t\terr = helpers.GuildSettingsSet(channel.GuildID, settings)\n\t\t\t\t\thelpers.Relax(err)\n\n\t\t\t\t\tchanges := make([]models.ElasticEventlogChange, 0)\n\t\t\t\t\tif previousChannel != \"\" {\n\t\t\t\t\t\tchanges = []models.ElasticEventlogChange{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"troublemaker_participate_channel\",\n\t\t\t\t\t\t\t\tOldValue: previousChannel,\n\t\t\t\t\t\t\t\tNewValue: settings.TroublemakerLogChannel,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tchanges = append(changes, models.ElasticEventlogChange{\n\t\t\t\t\t\tKey: \"troublemaker_participating\",\n\t\t\t\t\t\tOldValue: helpers.StoreBoolAsString(false),\n\t\t\t\t\t\tNewValue: helpers.StoreBoolAsString(true),\n\t\t\t\t\t})\n\n\t\t\t\t\t_, err = helpers.EventlogLog(time.Now(), channel.GuildID, channel.GuildID,\n\t\t\t\t\t\tmodels.EventlogTargetTypeGuild, msg.Author.ID,\n\t\t\t\t\t\tmodels.EventlogTypeRobyulTroublemakerParticipate, \"\",\n\t\t\t\t\t\tchanges,\n\t\t\t\t\t\t[]models.ElasticEventlogOption{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"troublemaker_participate_channel\",\n\t\t\t\t\t\t\t\tValue: targetChannel.ID,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, false)\n\t\t\t\t\thelpers.RelaxLog(err)\n\n\t\t\t\t\t_, err = helpers.SendMessage(msg.ChannelID, helpers.GetText(\"plugins.troublemaker.participation-enabled\"))\n\t\t\t\t\thelpers.Relax(err)\n\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ Disable logging\n\t\t\t\t\tsettings.TroublemakerIsParticipating = false\n\t\t\t\t\terr = helpers.GuildSettingsSet(channel.GuildID, settings)\n\t\t\t\t\thelpers.Relax(err)\n\n\t\t\t\t\t_, err = helpers.EventlogLog(time.Now(), channel.GuildID, channel.GuildID,\n\t\t\t\t\t\tmodels.EventlogTargetTypeGuild, msg.Author.ID,\n\t\t\t\t\t\tmodels.EventlogTypeRobyulTroublemakerParticipate, \"\",\n\t\t\t\t\t\t[]models.ElasticEventlogChange{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"troublemaker_participating\",\n\t\t\t\t\t\t\t\tOldValue: helpers.StoreBoolAsString(true),\n\t\t\t\t\t\t\t\tNewValue: helpers.StoreBoolAsString(false),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tnil, false)\n\t\t\t\t\thelpers.RelaxLog(err)\n\n\t\t\t\t\t_, err = helpers.SendMessage(msg.ChannelID, helpers.GetText(\"plugins.troublemaker.participation-disabled\"))\n\t\t\t\t\thelpers.Relax(err)\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t})\n\t\t\tbreak\n\t\tcase \"list\":\n\t\t\thelpers.RequireMod(msg, func() {\n\t\t\t\tsession.ChannelTyping(msg.ChannelID)\n\n\t\t\t\tif len(args) < 2 {\n\t\t\t\t\thelpers.SendMessage(msg.ChannelID, helpers.GetText(\"bot.arguments.invalid\"))\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\ttargetUser, err := helpers.GetUserFromMention(args[1])\n\t\t\t\tif err != nil || targetUser.ID == \"\" {\n\t\t\t\t\thelpers.SendMessage(msg.ChannelID, helpers.GetText(\"bot.arguments.invalid\"))\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\ttroublemakerReports := t.getTroublemakerReports(targetUser)\n\n\t\t\t\tif len(troublemakerReports) <= 0 {\n\t\t\t\t\t_, err := helpers.SendMessage(msg.ChannelID, helpers.GetTextF(\"plugins.troublemaker.list-no-reports\", targetUser.Username))\n\t\t\t\t\thelpers.Relax(err)\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\ttroublemakerText := fmt.Sprintf(\"I found %d report(s) for `%s#%s` `#%s` <@%s>:\\n\",\n\t\t\t\t\t\tlen(troublemakerReports), targetUser.Username, targetUser.Discriminator, targetUser.ID, targetUser.ID,\n\t\t\t\t\t)\n\n\t\t\t\t\tfor _, troublemakerReport := range troublemakerReports {\n\t\t\t\t\t\treportedByUser, err := helpers.GetUser(troublemakerReport.ReportedByUserID)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treportedByUser = new(discordgo.User)\n\t\t\t\t\t\t\treportedByUser.ID = troublemakerReport.ReportedByUserID\n\t\t\t\t\t\t\treportedByUser.Username = \"N\/A\"\n\t\t\t\t\t\t}\n\t\t\t\t\t\treportedByGuild, err := helpers.GetGuild(troublemakerReport.ReportedByGuildID)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treportedByGuild = new(discordgo.Guild)\n\t\t\t\t\t\t\treportedByGuild.ID = troublemakerReport.ReportedByGuildID\n\t\t\t\t\t\t\treportedByGuild.Name = \"N\/A\"\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttroublemakerText += fmt.Sprintf(\"At: `%s`, Reason: `%s`, Reported By: `%s` (`#%s`) On: `%s` (`#%s`)\\n\",\n\t\t\t\t\t\t\ttroublemakerReport.CreatedAt.Format(time.ANSIC), troublemakerReport.Reason,\n\t\t\t\t\t\t\treportedByUser.Username, reportedByUser.ID, reportedByGuild.Name, reportedByGuild.ID,\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, page := range helpers.Pagify(troublemakerText, \"\\n\") {\n\t\t\t\t\t\t_, err := helpers.SendMessage(msg.ChannelID, page)\n\t\t\t\t\t\thelpers.Relax(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\tdefault:\n\t\t\thelpers.RequireMod(msg, func() {\n\t\t\t\tsession.ChannelTyping(msg.ChannelID)\n\n\t\t\t\tchannel, err := helpers.GetChannel(msg.ChannelID)\n\t\t\t\thelpers.Relax(err)\n\t\t\t\tguild, err := helpers.GetGuild(channel.GuildID)\n\t\t\t\thelpers.Relax(err)\n\n\t\t\t\tif len(args) < 2 {\n\t\t\t\t\thelpers.SendMessage(msg.ChannelID, helpers.GetText(\"bot.arguments.invalid\"))\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\ttargetUser, err := helpers.GetUserFromMention(args[0])\n\t\t\t\tif err != nil || targetUser.ID == \"\" {\n\t\t\t\t\thelpers.SendMessage(msg.ChannelID, helpers.GetText(\"bot.arguments.invalid\"))\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\treasonText := strings.TrimSpace(strings.Replace(content, strings.Join(args[:1], \" \"), \"\", 1))\n\n\t\t\t\tif helpers.ConfirmEmbed(msg.ChannelID, msg.Author, helpers.GetTextF(\"plugins.troublemaker.report-confirm\",\n\t\t\t\t\ttargetUser.Username, targetUser.Discriminator, targetUser.ID, targetUser.ID, reasonText,\n\t\t\t\t), \"✅\", \"🚫\") == true {\n\t\t\t\t\t\/\/ Save to log DB\n\t\t\t\t\ttroublemakerLogEntry := t.getEntryByOrCreateEmpty(\"id\", \"\")\n\t\t\t\t\ttroublemakerLogEntry.UserID = targetUser.ID\n\t\t\t\t\ttroublemakerLogEntry.Reason = reasonText\n\t\t\t\t\ttroublemakerLogEntry.CreatedAt = time.Now()\n\t\t\t\t\ttroublemakerLogEntry.ReportedByGuildID = guild.ID\n\t\t\t\t\ttroublemakerLogEntry.ReportedByUserID = msg.Author.ID\n\t\t\t\t\tt.setEntry(troublemakerLogEntry)\n\n\t\t\t\t\tcache.GetLogger().WithField(\"module\", \"troublemaker\").Info(fmt.Sprintf(\"will notify about troublemaker %s (#%s) by %s (#%s) on %s (#%s) reason %s\",\n\t\t\t\t\t\ttargetUser.Username, targetUser.ID,\n\t\t\t\t\t\tmsg.Author.Username, msg.Author.ID,\n\t\t\t\t\t\tguild.Name, guild.ID,\n\t\t\t\t\t\treasonText,\n\t\t\t\t\t))\n\n\t\t\t\t\tguildsToNotify := make([]*discordgo.Guild, 0)\n\n\t\t\t\t\tfor _, guildToNotify := range session.State.Guilds {\n\t\t\t\t\t\tif guildToNotify.ID != guild.ID {\n\t\t\t\t\t\t\tguildToNotifySettings := helpers.GuildSettingsGetCached(guildToNotify.ID)\n\t\t\t\t\t\t\tif guildToNotifySettings.TroublemakerIsParticipating == true && guildToNotifySettings.TroublemakerLogChannel != \"\" {\n\t\t\t\t\t\t\t\tguildsToNotify = append(guildsToNotify, guildToNotify)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t_, err = helpers.EventlogLog(time.Now(), channel.GuildID, targetUser.ID,\n\t\t\t\t\t\tmodels.EventlogTargetTypeUser, msg.Author.ID,\n\t\t\t\t\t\tmodels.EventlogTypeRobyulTroublemakerReport, reasonText,\n\t\t\t\t\t\tnil,\n\t\t\t\t\t\tnil, false)\n\t\t\t\t\thelpers.RelaxLog(err)\n\n\t\t\t\t\tsuccessMessages, _ := helpers.SendMessage(msg.ChannelID, helpers.GetTextF(\"plugins.troublemaker.report-successful\", len(guildsToNotify)))\n\n\t\t\t\t\t\/\/ Send notifications out\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tdefer helpers.Recover()\n\n\t\t\t\t\t\tfor _, guildToNotify := range guildsToNotify {\n\t\t\t\t\t\t\tguildToNotifySettings := helpers.GuildSettingsGetCached(guildToNotify.ID)\n\t\t\t\t\t\t\tif guildToNotifySettings.TroublemakerIsParticipating == true && guildToNotifySettings.TroublemakerLogChannel != \"\" {\n\t\t\t\t\t\t\t\ttargetUserIsOnServer := false\n\t\t\t\t\t\t\t\t_, err := helpers.GetGuildMember(guildToNotify.ID, targetUser.ID)\n\t\t\t\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t\t\t\ttargetUserIsOnServer = true\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\treportEmbed := &discordgo.MessageEmbed{\n\t\t\t\t\t\t\t\t\tTitle: helpers.GetTextF(\"plugins.troublemaker.report-embed-title\", targetUser.Username, targetUser.Discriminator),\n\t\t\t\t\t\t\t\t\tDescription: helpers.GetTextF(\"plugins.troublemaker.report-embed-description\", targetUser.ID, targetUser.ID),\n\t\t\t\t\t\t\t\t\tURL: helpers.GetAvatarUrl(targetUser),\n\t\t\t\t\t\t\t\t\tThumbnail: &discordgo.MessageEmbedThumbnail{URL: helpers.GetAvatarUrl(targetUser)},\n\t\t\t\t\t\t\t\t\tFooter: &discordgo.MessageEmbedFooter{Text: helpers.GetTextF(\"plugins.troublemaker.report-embed-footer\", len(guildsToNotify))},\n\t\t\t\t\t\t\t\t\tColor: 0x0FADED,\n\t\t\t\t\t\t\t\t\tFields: []*discordgo.MessageEmbedField{\n\t\t\t\t\t\t\t\t\t\t{Name: \"Reason stated\", Value: reasonText, Inline: false},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tif targetUserIsOnServer == true {\n\t\t\t\t\t\t\t\t\treportEmbed.Fields = append(reportEmbed.Fields, &discordgo.MessageEmbedField{\n\t\t\t\t\t\t\t\t\t\tName: \"Member status\", Value: \":warning: User is on this server\", Inline: false,\n\t\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\treportEmbed.Fields = append(reportEmbed.Fields, &discordgo.MessageEmbedField{\n\t\t\t\t\t\t\t\t\t\tName: \"Member status\", Value: \":white_check_mark: User is not on this server\", Inline: false,\n\t\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\treportEmbed.Fields = append(reportEmbed.Fields, &discordgo.MessageEmbedField{\n\t\t\t\t\t\t\t\t\tName: \"Reported by\", Value: fmt.Sprintf(\"**%s** on **%s**\",\n\t\t\t\t\t\t\t\t\t\tmsg.Author.Username, guild.Name,\n\t\t\t\t\t\t\t\t\t), Inline: false})\n\n\t\t\t\t\t\t\t\t_, err = helpers.SendEmbed(guildToNotifySettings.TroublemakerLogChannel, reportEmbed)\n\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\tcache.GetLogger().WithField(\"module\", \"troublemaker\").Warnf(\"Failed to send troublemaker report to channel #%s on guild #%s: %s\"+\n\t\t\t\t\t\t\t\t\t\tguildToNotifySettings.TroublemakerLogChannel, guildToNotifySettings.Guild, err.Error())\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif len(successMessages) > 0 {\n\t\t\t\t\t\t\tsession.MessageReactionAdd(msg.ChannelID, successMessages[0].ID, \"👌\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn\n\t\t\t\t\t}()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t})\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (t *Troublemaker) getTroublemakerReports(user *discordgo.User) []DB_Troublemaker_Entry {\n\tvar entryBucket []DB_Troublemaker_Entry\n\tlistCursor, err := rethink.Table(\"troublemakerlog\").Filter(\n\t\trethink.Row.Field(\"userid\").Eq(user.ID),\n\t).Run(helpers.GetDB())\n\thelpers.Relax(err)\n\tdefer listCursor.Close()\n\tlistCursor.All(&entryBucket)\n\n\treturn entryBucket\n}\n\nfunc (t *Troublemaker) getEntryByOrCreateEmpty(key string, id string) DB_Troublemaker_Entry {\n\tvar entryBucket DB_Troublemaker_Entry\n\tlistCursor, err := rethink.Table(\"troublemakerlog\").Filter(\n\t\trethink.Row.Field(key).Eq(id),\n\t).Run(helpers.GetDB())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer listCursor.Close()\n\terr = listCursor.One(&entryBucket)\n\n\t\/\/ If user has no DB entries create an empty document\n\tif err == rethink.ErrEmptyResult {\n\t\tinsert := rethink.Table(\"troublemakerlog\").Insert(DB_Troublemaker_Entry{})\n\t\tres, e := insert.RunWrite(helpers.GetDB())\n\t\t\/\/ If the creation was successful read the document\n\t\tif e != nil {\n\t\t\tpanic(e)\n\t\t} else {\n\t\t\treturn t.getEntryByOrCreateEmpty(\"id\", res.GeneratedKeys[0])\n\t\t}\n\t} else if err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn entryBucket\n}\n\nfunc (t *Troublemaker) setEntry(entry DB_Troublemaker_Entry) {\n\t_, err := rethink.Table(\"troublemakerlog\").Update(entry).Run(helpers.GetDB())\n\thelpers.Relax(err)\n}\n<commit_msg>[troublemaker] allows list for basic and extended inspect mods<commit_after>package plugins\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Seklfreak\/Robyul2\/cache\"\n\t\"github.com\/Seklfreak\/Robyul2\/helpers\"\n\t\"github.com\/Seklfreak\/Robyul2\/models\"\n\t\"github.com\/bwmarrin\/discordgo\"\n\trethink \"github.com\/gorethink\/gorethink\"\n)\n\ntype Troublemaker struct{}\n\nfunc (t *Troublemaker) Commands() []string {\n\treturn []string{\n\t\t\"troublemaker\",\n\t\t\"troublemakers\",\n\t\t\"tm\",\n\t}\n}\n\ntype DB_Troublemaker_Entry struct {\n\tID string `gorethink:\"id,omitempty\"`\n\tUserID string `gorethink:\"userid\"`\n\tReason string `gorethink:\"reason\"`\n\tCreatedAt time.Time `gorethink:\"createdat\"`\n\tReportedByGuildID string `gorethink:\"reportedby_guildid\"`\n\tReportedByUserID string `gorethink:\"reportedby_userid\"`\n}\n\nfunc (t *Troublemaker) Init(session *discordgo.Session) {\n\n}\n\nfunc (t *Troublemaker) Action(command string, content string, msg *discordgo.Message, session *discordgo.Session) {\n\tif !helpers.ModuleIsAllowed(msg.ChannelID, msg.ID, msg.Author.ID, helpers.ModulePermTroublemaker) {\n\t\treturn\n\t}\n\n\targs := strings.Fields(content)\n\tif len(args) >= 1 {\n\t\tswitch args[0] {\n\t\tcase \"participate\":\n\t\t\thelpers.RequireAdmin(msg, func() {\n\t\t\t\tsession.ChannelTyping(msg.ChannelID)\n\n\t\t\t\tchannel, err := helpers.GetChannel(msg.ChannelID)\n\t\t\t\thelpers.Relax(err)\n\n\t\t\t\tsettings := helpers.GuildSettingsGetCached(channel.GuildID)\n\n\t\t\t\tif len(args) >= 2 {\n\t\t\t\t\t\/\/ Set new log channel\n\t\t\t\t\ttargetChannel, err := helpers.GetChannelFromMention(msg, args[1])\n\t\t\t\t\tif err != nil || targetChannel.ID == \"\" {\n\t\t\t\t\t\thelpers.SendMessage(msg.ChannelID, helpers.GetText(\"bot.arguments.invalid\"))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tpreviousChannel := settings.TroublemakerLogChannel\n\n\t\t\t\t\tsettings.TroublemakerIsParticipating = true\n\t\t\t\t\tsettings.TroublemakerLogChannel = targetChannel.ID\n\t\t\t\t\terr = helpers.GuildSettingsSet(channel.GuildID, settings)\n\t\t\t\t\thelpers.Relax(err)\n\n\t\t\t\t\tchanges := make([]models.ElasticEventlogChange, 0)\n\t\t\t\t\tif previousChannel != \"\" {\n\t\t\t\t\t\tchanges = []models.ElasticEventlogChange{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"troublemaker_participate_channel\",\n\t\t\t\t\t\t\t\tOldValue: previousChannel,\n\t\t\t\t\t\t\t\tNewValue: settings.TroublemakerLogChannel,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tchanges = append(changes, models.ElasticEventlogChange{\n\t\t\t\t\t\tKey: \"troublemaker_participating\",\n\t\t\t\t\t\tOldValue: helpers.StoreBoolAsString(false),\n\t\t\t\t\t\tNewValue: helpers.StoreBoolAsString(true),\n\t\t\t\t\t})\n\n\t\t\t\t\t_, err = helpers.EventlogLog(time.Now(), channel.GuildID, channel.GuildID,\n\t\t\t\t\t\tmodels.EventlogTargetTypeGuild, msg.Author.ID,\n\t\t\t\t\t\tmodels.EventlogTypeRobyulTroublemakerParticipate, \"\",\n\t\t\t\t\t\tchanges,\n\t\t\t\t\t\t[]models.ElasticEventlogOption{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"troublemaker_participate_channel\",\n\t\t\t\t\t\t\t\tValue: targetChannel.ID,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, false)\n\t\t\t\t\thelpers.RelaxLog(err)\n\n\t\t\t\t\t_, err = helpers.SendMessage(msg.ChannelID, helpers.GetText(\"plugins.troublemaker.participation-enabled\"))\n\t\t\t\t\thelpers.Relax(err)\n\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ Disable logging\n\t\t\t\t\tsettings.TroublemakerIsParticipating = false\n\t\t\t\t\terr = helpers.GuildSettingsSet(channel.GuildID, settings)\n\t\t\t\t\thelpers.Relax(err)\n\n\t\t\t\t\t_, err = helpers.EventlogLog(time.Now(), channel.GuildID, channel.GuildID,\n\t\t\t\t\t\tmodels.EventlogTargetTypeGuild, msg.Author.ID,\n\t\t\t\t\t\tmodels.EventlogTypeRobyulTroublemakerParticipate, \"\",\n\t\t\t\t\t\t[]models.ElasticEventlogChange{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: \"troublemaker_participating\",\n\t\t\t\t\t\t\t\tOldValue: helpers.StoreBoolAsString(true),\n\t\t\t\t\t\t\t\tNewValue: helpers.StoreBoolAsString(false),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tnil, false)\n\t\t\t\t\thelpers.RelaxLog(err)\n\n\t\t\t\t\t_, err = helpers.SendMessage(msg.ChannelID, helpers.GetText(\"plugins.troublemaker.participation-disabled\"))\n\t\t\t\t\thelpers.Relax(err)\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t})\n\t\t\tbreak\n\t\tcase \"list\":\n\t\t\tif !helpers.IsMod(msg) && !helpers.CanInspectExtended(msg) && !helpers.CanInspectBasic(msg) {\n\t\t\t\thelpers.SendMessage(msg.ChannelID, helpers.GetText(\"mod.no_permission\"))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tsession.ChannelTyping(msg.ChannelID)\n\n\t\t\tif len(args) < 2 {\n\t\t\t\thelpers.SendMessage(msg.ChannelID, helpers.GetText(\"bot.arguments.invalid\"))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttargetUser, err := helpers.GetUserFromMention(args[1])\n\t\t\tif err != nil || targetUser.ID == \"\" {\n\t\t\t\thelpers.SendMessage(msg.ChannelID, helpers.GetText(\"bot.arguments.invalid\"))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttroublemakerReports := t.getTroublemakerReports(targetUser)\n\n\t\t\tif len(troublemakerReports) <= 0 {\n\t\t\t\t_, err := helpers.SendMessage(msg.ChannelID, helpers.GetTextF(\"plugins.troublemaker.list-no-reports\", targetUser.Username))\n\t\t\t\thelpers.Relax(err)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\ttroublemakerText := fmt.Sprintf(\"I found %d report(s) for `%s#%s` `#%s` <@%s>:\\n\",\n\t\t\t\t\tlen(troublemakerReports), targetUser.Username, targetUser.Discriminator, targetUser.ID, targetUser.ID,\n\t\t\t\t)\n\n\t\t\t\tfor _, troublemakerReport := range troublemakerReports {\n\t\t\t\t\treportedByUser, err := helpers.GetUser(troublemakerReport.ReportedByUserID)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treportedByUser = new(discordgo.User)\n\t\t\t\t\t\treportedByUser.ID = troublemakerReport.ReportedByUserID\n\t\t\t\t\t\treportedByUser.Username = \"N\/A\"\n\t\t\t\t\t}\n\t\t\t\t\treportedByGuild, err := helpers.GetGuild(troublemakerReport.ReportedByGuildID)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treportedByGuild = new(discordgo.Guild)\n\t\t\t\t\t\treportedByGuild.ID = troublemakerReport.ReportedByGuildID\n\t\t\t\t\t\treportedByGuild.Name = \"N\/A\"\n\t\t\t\t\t}\n\t\t\t\t\ttroublemakerText += fmt.Sprintf(\"At: `%s`, Reason: `%s`, Reported By: `%s` (`#%s`) On: `%s` (`#%s`)\\n\",\n\t\t\t\t\t\ttroublemakerReport.CreatedAt.Format(time.ANSIC), troublemakerReport.Reason,\n\t\t\t\t\t\treportedByUser.Username, reportedByUser.ID, reportedByGuild.Name, reportedByGuild.ID,\n\t\t\t\t\t)\n\t\t\t\t}\n\n\t\t\t\tfor _, page := range helpers.Pagify(troublemakerText, \"\\n\") {\n\t\t\t\t\t_, err := helpers.SendMessage(msg.ChannelID, page)\n\t\t\t\t\thelpers.Relax(err)\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\thelpers.RequireMod(msg, func() {\n\t\t\t\tsession.ChannelTyping(msg.ChannelID)\n\n\t\t\t\tchannel, err := helpers.GetChannel(msg.ChannelID)\n\t\t\t\thelpers.Relax(err)\n\t\t\t\tguild, err := helpers.GetGuild(channel.GuildID)\n\t\t\t\thelpers.Relax(err)\n\n\t\t\t\tif len(args) < 2 {\n\t\t\t\t\thelpers.SendMessage(msg.ChannelID, helpers.GetText(\"bot.arguments.invalid\"))\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\ttargetUser, err := helpers.GetUserFromMention(args[0])\n\t\t\t\tif err != nil || targetUser.ID == \"\" {\n\t\t\t\t\thelpers.SendMessage(msg.ChannelID, helpers.GetText(\"bot.arguments.invalid\"))\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\treasonText := strings.TrimSpace(strings.Replace(content, strings.Join(args[:1], \" \"), \"\", 1))\n\n\t\t\t\tif helpers.ConfirmEmbed(msg.ChannelID, msg.Author, helpers.GetTextF(\"plugins.troublemaker.report-confirm\",\n\t\t\t\t\ttargetUser.Username, targetUser.Discriminator, targetUser.ID, targetUser.ID, reasonText,\n\t\t\t\t), \"✅\", \"🚫\") == true {\n\t\t\t\t\t\/\/ Save to log DB\n\t\t\t\t\ttroublemakerLogEntry := t.getEntryByOrCreateEmpty(\"id\", \"\")\n\t\t\t\t\ttroublemakerLogEntry.UserID = targetUser.ID\n\t\t\t\t\ttroublemakerLogEntry.Reason = reasonText\n\t\t\t\t\ttroublemakerLogEntry.CreatedAt = time.Now()\n\t\t\t\t\ttroublemakerLogEntry.ReportedByGuildID = guild.ID\n\t\t\t\t\ttroublemakerLogEntry.ReportedByUserID = msg.Author.ID\n\t\t\t\t\tt.setEntry(troublemakerLogEntry)\n\n\t\t\t\t\tcache.GetLogger().WithField(\"module\", \"troublemaker\").Info(fmt.Sprintf(\"will notify about troublemaker %s (#%s) by %s (#%s) on %s (#%s) reason %s\",\n\t\t\t\t\t\ttargetUser.Username, targetUser.ID,\n\t\t\t\t\t\tmsg.Author.Username, msg.Author.ID,\n\t\t\t\t\t\tguild.Name, guild.ID,\n\t\t\t\t\t\treasonText,\n\t\t\t\t\t))\n\n\t\t\t\t\tguildsToNotify := make([]*discordgo.Guild, 0)\n\n\t\t\t\t\tfor _, guildToNotify := range session.State.Guilds {\n\t\t\t\t\t\tif guildToNotify.ID != guild.ID {\n\t\t\t\t\t\t\tguildToNotifySettings := helpers.GuildSettingsGetCached(guildToNotify.ID)\n\t\t\t\t\t\t\tif guildToNotifySettings.TroublemakerIsParticipating == true && guildToNotifySettings.TroublemakerLogChannel != \"\" {\n\t\t\t\t\t\t\t\tguildsToNotify = append(guildsToNotify, guildToNotify)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t_, err = helpers.EventlogLog(time.Now(), channel.GuildID, targetUser.ID,\n\t\t\t\t\t\tmodels.EventlogTargetTypeUser, msg.Author.ID,\n\t\t\t\t\t\tmodels.EventlogTypeRobyulTroublemakerReport, reasonText,\n\t\t\t\t\t\tnil,\n\t\t\t\t\t\tnil, false)\n\t\t\t\t\thelpers.RelaxLog(err)\n\n\t\t\t\t\tsuccessMessages, _ := helpers.SendMessage(msg.ChannelID, helpers.GetTextF(\"plugins.troublemaker.report-successful\", len(guildsToNotify)))\n\n\t\t\t\t\t\/\/ Send notifications out\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tdefer helpers.Recover()\n\n\t\t\t\t\t\tfor _, guildToNotify := range guildsToNotify {\n\t\t\t\t\t\t\tguildToNotifySettings := helpers.GuildSettingsGetCached(guildToNotify.ID)\n\t\t\t\t\t\t\tif guildToNotifySettings.TroublemakerIsParticipating == true && guildToNotifySettings.TroublemakerLogChannel != \"\" {\n\t\t\t\t\t\t\t\ttargetUserIsOnServer := false\n\t\t\t\t\t\t\t\t_, err := helpers.GetGuildMember(guildToNotify.ID, targetUser.ID)\n\t\t\t\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t\t\t\ttargetUserIsOnServer = true\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\treportEmbed := &discordgo.MessageEmbed{\n\t\t\t\t\t\t\t\t\tTitle: helpers.GetTextF(\"plugins.troublemaker.report-embed-title\", targetUser.Username, targetUser.Discriminator),\n\t\t\t\t\t\t\t\t\tDescription: helpers.GetTextF(\"plugins.troublemaker.report-embed-description\", targetUser.ID, targetUser.ID),\n\t\t\t\t\t\t\t\t\tURL: helpers.GetAvatarUrl(targetUser),\n\t\t\t\t\t\t\t\t\tThumbnail: &discordgo.MessageEmbedThumbnail{URL: helpers.GetAvatarUrl(targetUser)},\n\t\t\t\t\t\t\t\t\tFooter: &discordgo.MessageEmbedFooter{Text: helpers.GetTextF(\"plugins.troublemaker.report-embed-footer\", len(guildsToNotify))},\n\t\t\t\t\t\t\t\t\tColor: 0x0FADED,\n\t\t\t\t\t\t\t\t\tFields: []*discordgo.MessageEmbedField{\n\t\t\t\t\t\t\t\t\t\t{Name: \"Reason stated\", Value: reasonText, Inline: false},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tif targetUserIsOnServer == true {\n\t\t\t\t\t\t\t\t\treportEmbed.Fields = append(reportEmbed.Fields, &discordgo.MessageEmbedField{\n\t\t\t\t\t\t\t\t\t\tName: \"Member status\", Value: \":warning: User is on this server\", Inline: false,\n\t\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\treportEmbed.Fields = append(reportEmbed.Fields, &discordgo.MessageEmbedField{\n\t\t\t\t\t\t\t\t\t\tName: \"Member status\", Value: \":white_check_mark: User is not on this server\", Inline: false,\n\t\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\treportEmbed.Fields = append(reportEmbed.Fields, &discordgo.MessageEmbedField{\n\t\t\t\t\t\t\t\t\tName: \"Reported by\", Value: fmt.Sprintf(\"**%s** on **%s**\",\n\t\t\t\t\t\t\t\t\t\tmsg.Author.Username, guild.Name,\n\t\t\t\t\t\t\t\t\t), Inline: false})\n\n\t\t\t\t\t\t\t\t_, err = helpers.SendEmbed(guildToNotifySettings.TroublemakerLogChannel, reportEmbed)\n\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\tcache.GetLogger().WithField(\"module\", \"troublemaker\").Warnf(\"Failed to send troublemaker report to channel #%s on guild #%s: %s\"+\n\t\t\t\t\t\t\t\t\t\tguildToNotifySettings.TroublemakerLogChannel, guildToNotifySettings.Guild, err.Error())\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif len(successMessages) > 0 {\n\t\t\t\t\t\t\tsession.MessageReactionAdd(msg.ChannelID, successMessages[0].ID, \"👌\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn\n\t\t\t\t\t}()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t})\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (t *Troublemaker) getTroublemakerReports(user *discordgo.User) []DB_Troublemaker_Entry {\n\tvar entryBucket []DB_Troublemaker_Entry\n\tlistCursor, err := rethink.Table(\"troublemakerlog\").Filter(\n\t\trethink.Row.Field(\"userid\").Eq(user.ID),\n\t).Run(helpers.GetDB())\n\thelpers.Relax(err)\n\tdefer listCursor.Close()\n\tlistCursor.All(&entryBucket)\n\n\treturn entryBucket\n}\n\nfunc (t *Troublemaker) getEntryByOrCreateEmpty(key string, id string) DB_Troublemaker_Entry {\n\tvar entryBucket DB_Troublemaker_Entry\n\tlistCursor, err := rethink.Table(\"troublemakerlog\").Filter(\n\t\trethink.Row.Field(key).Eq(id),\n\t).Run(helpers.GetDB())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer listCursor.Close()\n\terr = listCursor.One(&entryBucket)\n\n\t\/\/ If user has no DB entries create an empty document\n\tif err == rethink.ErrEmptyResult {\n\t\tinsert := rethink.Table(\"troublemakerlog\").Insert(DB_Troublemaker_Entry{})\n\t\tres, e := insert.RunWrite(helpers.GetDB())\n\t\t\/\/ If the creation was successful read the document\n\t\tif e != nil {\n\t\t\tpanic(e)\n\t\t} else {\n\t\t\treturn t.getEntryByOrCreateEmpty(\"id\", res.GeneratedKeys[0])\n\t\t}\n\t} else if err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn entryBucket\n}\n\nfunc (t *Troublemaker) setEntry(entry DB_Troublemaker_Entry) {\n\t_, err := rethink.Table(\"troublemakerlog\").Update(entry).Run(helpers.GetDB())\n\thelpers.Relax(err)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage clnt\n\nimport \"plan9\/p\"\nimport \"strings\"\n\n\/\/ Opens the file associated with the fid. Returns nil if\n\/\/ the operation is successful.\nfunc (clnt *Clnt) Open(fid *Fid, mode uint8) *p.Error {\n\ttc := p.NewFcall(clnt.Msize);\n\terr := p.PackTopen(tc, fid.Fid, mode);\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trc, err := clnt.rpc(tc);\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfid.Qid = rc.Qid;\n\tfid.Iounit = rc.Iounit;\n\tif fid.Iounit == 0 || fid.Iounit > clnt.Msize-p.IOHDRSZ {\n\t\tfid.Iounit = clnt.Msize - p.IOHDRSZ\n\t}\n\tfid.Mode = mode;\n\treturn nil;\n}\n\n\/\/ Creates a file in the directory associated with the fid. Returns nil\n\/\/ if the operation is successful.\nfunc (clnt *Clnt) Create(fid *Fid, name string, perm uint32, mode uint8, ext string) *p.Error {\n\ttc := p.NewFcall(clnt.Msize);\n\terr := p.PackTcreate(tc, fid.Fid, name, perm, mode, ext, clnt.Dotu);\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trc, err := clnt.rpc(tc);\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfid.Qid = rc.Qid;\n\tfid.Iounit = rc.Iounit;\n\tif fid.Iounit == 0 || fid.Iounit > clnt.Msize-p.IOHDRSZ {\n\t\tfid.Iounit = clnt.Msize - p.IOHDRSZ\n\t}\n\tfid.Mode = mode;\n\treturn nil;\n}\n\n\/\/ Creates and opens a named file.\n\/\/ Returns the file if the operation is successful, or an Error.\nfunc (clnt *Clnt) FCreate(path string, perm uint32, mode uint8) (*File, *p.Error) {\n\tn := strings.LastIndex(path, \"\/\");\n\tif n < 0 {\n\t\tn = 0\n\t}\n\n\tfid, err := clnt.FWalk(path[0:n]);\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = clnt.Create(fid, path[n:len(path)], perm, mode, \"\");\n\tif err != nil {\n\t\tclnt.Clunk(fid);\n\t\treturn nil, err;\n\t}\n\n\treturn &File{fid, 0}, nil;\n}\n\n\/\/ Opens a named file. Returns the opened file, or an Error.\nfunc (clnt *Clnt) FOpen(path string, mode uint8) (*File, *p.Error) {\n\tfid, err := clnt.FWalk(path);\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = clnt.Open(fid, mode);\n\tif err != nil {\n\t\tclnt.Clunk(fid);\n\t\treturn nil, err;\n\t}\n\n\treturn &File{fid, 0}, nil;\n}\n<commit_msg>fix bug during create (basename index was off by one). should perhaps use path.Split()<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage clnt\n\nimport \"plan9\/p\"\nimport \"strings\"\n\n\/\/ Opens the file associated with the fid. Returns nil if\n\/\/ the operation is successful.\nfunc (clnt *Clnt) Open(fid *Fid, mode uint8) *p.Error {\n\ttc := p.NewFcall(clnt.Msize);\n\terr := p.PackTopen(tc, fid.Fid, mode);\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trc, err := clnt.rpc(tc);\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfid.Qid = rc.Qid;\n\tfid.Iounit = rc.Iounit;\n\tif fid.Iounit == 0 || fid.Iounit > clnt.Msize-p.IOHDRSZ {\n\t\tfid.Iounit = clnt.Msize - p.IOHDRSZ\n\t}\n\tfid.Mode = mode;\n\treturn nil;\n}\n\n\/\/ Creates a file in the directory associated with the fid. Returns nil\n\/\/ if the operation is successful.\nfunc (clnt *Clnt) Create(fid *Fid, name string, perm uint32, mode uint8, ext string) *p.Error {\n\ttc := p.NewFcall(clnt.Msize);\n\terr := p.PackTcreate(tc, fid.Fid, name, perm, mode, ext, clnt.Dotu);\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trc, err := clnt.rpc(tc);\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfid.Qid = rc.Qid;\n\tfid.Iounit = rc.Iounit;\n\tif fid.Iounit == 0 || fid.Iounit > clnt.Msize-p.IOHDRSZ {\n\t\tfid.Iounit = clnt.Msize - p.IOHDRSZ\n\t}\n\tfid.Mode = mode;\n\treturn nil;\n}\n\n\/\/ Creates and opens a named file.\n\/\/ Returns the file if the operation is successful, or an Error.\nfunc (clnt *Clnt) FCreate(path string, perm uint32, mode uint8) (*File, *p.Error) {\n\tn := strings.LastIndex(path, \"\/\");\n\tif n < 0 {\n\t\tn = 0\n\t}\n\n\tfid, err := clnt.FWalk(path[0:n]);\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = clnt.Create(fid, path[n+1:len(path)], perm, mode, \"\");\n\tif err != nil {\n\t\tclnt.Clunk(fid);\n\t\treturn nil, err;\n\t}\n\n\treturn &File{fid, 0}, nil;\n}\n\n\/\/ Opens a named file. Returns the opened file, or an Error.\nfunc (clnt *Clnt) FOpen(path string, mode uint8) (*File, *p.Error) {\n\tfid, err := clnt.FWalk(path);\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = clnt.Open(fid, mode);\n\tif err != nil {\n\t\tclnt.Clunk(fid);\n\t\treturn nil, err;\n\t}\n\n\treturn &File{fid, 0}, nil;\n}\n<|endoftext|>"} {"text":"<commit_before>package the_platinum_searcher\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\n\t\"github.com\/monochromegane\/conflag\"\n\t\"github.com\/monochromegane\/go-home\"\n\t\"github.com\/monochromegane\/terminal\"\n)\n\nconst version = \"2.0.2\"\n\nconst (\n\tExitCodeOK = iota\n\tExitCodeError\n)\n\nvar opts Option\n\ntype PlatinumSearcher struct {\n\tOut, Err io.Writer\n}\n\nfunc (p PlatinumSearcher) Run(args []string) int {\n\n\tparser := newOptionParser(&opts)\n\n\tconflag.LongHyphen = true\n\tconflag.BoolValue = false\n\tfor _, c := range []string{filepath.Join(home.Dir(), \".ptconfig.toml\"), \".ptconfig.toml\"} {\n\t\tif args, err := conflag.ArgsFrom(c); err == nil {\n\t\t\tparser.ParseArgs(args)\n\t\t}\n\t}\n\n\targs, err := parser.ParseArgs(args)\n\tif err != nil {\n\t\treturn ExitCodeError\n\t}\n\n\tif opts.Version {\n\t\tfmt.Printf(\"pt version %s\\n\", version)\n\t\treturn ExitCodeOK\n\t}\n\n\tif len(args) == 0 && !opts.SearchOption.EnableFilesWithRegexp {\n\t\tparser.WriteHelp(p.Err)\n\t\treturn ExitCodeError\n\t}\n\n\tif !terminal.IsTerminal(os.Stdout) {\n\t\topts.OutputOption.EnableColor = false\n\t\topts.OutputOption.EnableGroup = false\n\t}\n\n\tif p.givenStdin() {\n\t\topts.SearchOption.SearchStream = true\n\t}\n\n\tif opts.SearchOption.EnableFilesWithRegexp {\n\t\targs = append([]string{\"\"}, args...)\n\t}\n\n\tsearch := search{\n\t\troots: p.rootsFrom(args),\n\t\tout: p.Out,\n\t}\n\tif err = search.start(p.patternFrom(args)); err != nil {\n\t\tfmt.Fprintf(p.Err, \"%s\\n\", err)\n\t\treturn ExitCodeError\n\t}\n\treturn ExitCodeOK\n}\n\nfunc (p PlatinumSearcher) patternFrom(args []string) string {\n\treturn args[0]\n}\n\nfunc (p PlatinumSearcher) rootsFrom(args []string) []string {\n\tif len(args) > 1 {\n\t\treturn args[1:]\n\t} else {\n\t\treturn []string{\".\"}\n\t}\n}\n\nfunc (p PlatinumSearcher) givenStdin() bool {\n\tfi, err := os.Stdin.Stat()\n\tif runtime.GOOS == \"windows\" {\n\t\tif err == nil {\n\t\t\treturn true\n\t\t}\n\t} else {\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\tmode := fi.Mode()\n\t\tif (mode&os.ModeNamedPipe != 0) || mode.IsRegular() {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<commit_msg>Bumped version to 2.0.3.<commit_after>package the_platinum_searcher\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\n\t\"github.com\/monochromegane\/conflag\"\n\t\"github.com\/monochromegane\/go-home\"\n\t\"github.com\/monochromegane\/terminal\"\n)\n\nconst version = \"2.0.3\"\n\nconst (\n\tExitCodeOK = iota\n\tExitCodeError\n)\n\nvar opts Option\n\ntype PlatinumSearcher struct {\n\tOut, Err io.Writer\n}\n\nfunc (p PlatinumSearcher) Run(args []string) int {\n\n\tparser := newOptionParser(&opts)\n\n\tconflag.LongHyphen = true\n\tconflag.BoolValue = false\n\tfor _, c := range []string{filepath.Join(home.Dir(), \".ptconfig.toml\"), \".ptconfig.toml\"} {\n\t\tif args, err := conflag.ArgsFrom(c); err == nil {\n\t\t\tparser.ParseArgs(args)\n\t\t}\n\t}\n\n\targs, err := parser.ParseArgs(args)\n\tif err != nil {\n\t\treturn ExitCodeError\n\t}\n\n\tif opts.Version {\n\t\tfmt.Printf(\"pt version %s\\n\", version)\n\t\treturn ExitCodeOK\n\t}\n\n\tif len(args) == 0 && !opts.SearchOption.EnableFilesWithRegexp {\n\t\tparser.WriteHelp(p.Err)\n\t\treturn ExitCodeError\n\t}\n\n\tif !terminal.IsTerminal(os.Stdout) {\n\t\topts.OutputOption.EnableColor = false\n\t\topts.OutputOption.EnableGroup = false\n\t}\n\n\tif p.givenStdin() {\n\t\topts.SearchOption.SearchStream = true\n\t}\n\n\tif opts.SearchOption.EnableFilesWithRegexp {\n\t\targs = append([]string{\"\"}, args...)\n\t}\n\n\tsearch := search{\n\t\troots: p.rootsFrom(args),\n\t\tout: p.Out,\n\t}\n\tif err = search.start(p.patternFrom(args)); err != nil {\n\t\tfmt.Fprintf(p.Err, \"%s\\n\", err)\n\t\treturn ExitCodeError\n\t}\n\treturn ExitCodeOK\n}\n\nfunc (p PlatinumSearcher) patternFrom(args []string) string {\n\treturn args[0]\n}\n\nfunc (p PlatinumSearcher) rootsFrom(args []string) []string {\n\tif len(args) > 1 {\n\t\treturn args[1:]\n\t} else {\n\t\treturn []string{\".\"}\n\t}\n}\n\nfunc (p PlatinumSearcher) givenStdin() bool {\n\tfi, err := os.Stdin.Stat()\n\tif runtime.GOOS == \"windows\" {\n\t\tif err == nil {\n\t\t\treturn true\n\t\t}\n\t} else {\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\tmode := fi.Mode()\n\t\tif (mode&os.ModeNamedPipe != 0) || mode.IsRegular() {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package arn\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/aerogo\/nano\"\n\t\"github.com\/animenotifier\/arn\/autocorrect\"\n\t\"github.com\/fatih\/color\"\n)\n\n\/\/ SoundTrack ...\ntype SoundTrack struct {\n\tID string `json:\"id\"`\n\tTitle string `json:\"title\" editable:\"true\"`\n\tMedia []*ExternalMedia `json:\"media\" editable:\"true\"`\n\tTags []string `json:\"tags\" editable:\"true\" tooltip:\"<ul><li><strong>anime:ID<\/strong> to connect it with anime<\/li><li><strong>opening<\/strong> for openings<\/li><li><strong>ending<\/strong> for endings<\/li><li><strong>cover<\/strong> for covers<\/li><li><strong>remix<\/strong> for remixes<\/li><\/ul>\"`\n\tIsDraft bool `json:\"isDraft\" editable:\"true\"`\n\tFile string `json:\"file\"`\n\tCreated string `json:\"created\"`\n\tCreatedBy string `json:\"createdBy\"`\n\tEdited string `json:\"edited\"`\n\tEditedBy string `json:\"editedBy\"`\n\tLikeableImplementation\n}\n\n\/\/ Link returns the permalink for the track.\nfunc (track *SoundTrack) Link() string {\n\treturn \"\/soundtrack\/\" + track.ID\n}\n\n\/\/ MediaByService ...\nfunc (track *SoundTrack) MediaByService(service string) []*ExternalMedia {\n\tfiltered := []*ExternalMedia{}\n\n\tfor _, media := range track.Media {\n\t\tif media.Service == service {\n\t\t\tfiltered = append(filtered, media)\n\t\t}\n\t}\n\n\treturn filtered\n}\n\n\/\/ HasTag returns true if it contains the given tag.\nfunc (track *SoundTrack) HasTag(search string) bool {\n\tfor _, tag := range track.Tags {\n\t\tif tag == search {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/ Anime fetches all tagged anime of the sound track.\nfunc (track *SoundTrack) Anime() []*Anime {\n\tvar animeList []*Anime\n\n\tfor _, tag := range track.Tags {\n\t\tif strings.HasPrefix(tag, \"anime:\") {\n\t\t\tanimeID := strings.TrimPrefix(tag, \"anime:\")\n\t\t\tanime, err := GetAnime(animeID)\n\n\t\t\tif err != nil {\n\t\t\t\tcolor.Red(\"Error fetching anime: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tanimeList = append(animeList, anime)\n\t\t}\n\t}\n\n\treturn animeList\n}\n\n\/\/ Beatmaps returns all osu beatmap IDs of the sound track.\nfunc (track *SoundTrack) Beatmaps() []string {\n\tvar beatmaps []string\n\n\tfor _, tag := range track.Tags {\n\t\tif strings.HasPrefix(tag, \"osu-beatmap:\") {\n\t\t\tosuID := strings.TrimPrefix(tag, \"osu-beatmap:\")\n\t\t\tbeatmaps = append(beatmaps, osuID)\n\t\t}\n\t}\n\n\treturn beatmaps\n}\n\n\/\/ MainAnime ...\nfunc (track *SoundTrack) MainAnime() *Anime {\n\tallAnime := track.Anime()\n\n\tif len(allAnime) == 0 {\n\t\treturn nil\n\t}\n\n\treturn allAnime[0]\n}\n\n\/\/ Creator returns the user who created this track.\nfunc (track *SoundTrack) Creator() *User {\n\tuser, _ := GetUser(track.CreatedBy)\n\treturn user\n}\n\n\/\/ EditedByUser returns the user who edited this track last.\nfunc (track *SoundTrack) EditedByUser() *User {\n\tuser, _ := GetUser(track.EditedBy)\n\treturn user\n}\n\n\/\/ OnLike is called when the soundtrack receives a like.\nfunc (track *SoundTrack) OnLike(likedBy *User) {\n\tif likedBy.ID == track.CreatedBy {\n\t\treturn\n\t}\n\n\tgo func() {\n\t\ttrack.Creator().SendNotification(&PushNotification{\n\t\t\tTitle: likedBy.Nick + \" liked your soundtrack \" + track.Title,\n\t\t\tMessage: likedBy.Nick + \" liked your soundtrack \" + track.Title + \".\",\n\t\t\tIcon: \"https:\" + likedBy.AvatarLink(\"large\"),\n\t\t\tLink: \"https:\/\/notify.moe\" + likedBy.Link(),\n\t\t\tType: NotificationTypeLike,\n\t\t})\n\t}()\n}\n\n\/\/ Publish ...\nfunc (track *SoundTrack) Publish() error {\n\t\/\/ No draft\n\tif !track.IsDraft {\n\t\treturn errors.New(\"Not a draft\")\n\t}\n\n\t\/\/ No media added\n\tif len(track.Media) == 0 {\n\t\treturn errors.New(\"No media specified (at least 1 media source is required)\")\n\t}\n\n\tanimeFound := false\n\n\tfor _, tag := range track.Tags {\n\t\ttag = autocorrect.FixTag(tag)\n\n\t\tif strings.HasPrefix(tag, \"anime:\") {\n\t\t\tanimeID := strings.TrimPrefix(tag, \"anime:\")\n\t\t\t_, err := GetAnime(animeID)\n\n\t\t\tif err != nil {\n\t\t\t\treturn errors.New(\"Invalid anime ID\")\n\t\t\t}\n\n\t\t\tanimeFound = true\n\t\t}\n\t}\n\n\t\/\/ No anime found\n\tif !animeFound {\n\t\treturn errors.New(\"Need to specify at least one anime\")\n\t}\n\n\t\/\/ No tags\n\tif len(track.Tags) < 1 {\n\t\treturn errors.New(\"Need to specify at least one tag\")\n\t}\n\n\tdraftIndex, err := GetDraftIndex(track.CreatedBy)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif draftIndex.SoundTrackID == \"\" {\n\t\treturn errors.New(\"Soundtrack draft doesn't exist in the user draft index\")\n\t}\n\n\ttrack.IsDraft = false\n\tdraftIndex.SoundTrackID = \"\"\n\tdraftIndex.Save()\n\treturn nil\n}\n\n\/\/ Unpublish ...\nfunc (track *SoundTrack) Unpublish() error {\n\tdraftIndex, err := GetDraftIndex(track.CreatedBy)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif draftIndex.SoundTrackID != \"\" {\n\t\treturn errors.New(\"You still have an unfinished draft\")\n\t}\n\n\ttrack.IsDraft = true\n\tdraftIndex.SoundTrackID = track.ID\n\tdraftIndex.Save()\n\treturn nil\n}\n\n\/\/ Download downloads the track.\nfunc (track *SoundTrack) Download() error {\n\tyoutubeVideos := track.MediaByService(\"Youtube\")\n\n\tif len(youtubeVideos) == 0 {\n\t\treturn errors.New(\"No Youtube ID\")\n\t}\n\n\tyoutubeID := youtubeVideos[0].ServiceID\n\n\t\/\/ Check for existing file\n\tif track.File != \"\" {\n\t\tstat, err := os.Stat(path.Join(Root, \"audio\", track.File))\n\n\t\tif err == nil && !stat.IsDir() && stat.Size() > 0 {\n\t\t\treturn errors.New(\"Already downloaded\")\n\t\t}\n\t}\n\n\taudioDirectory := path.Join(Root, \"audio\")\n\tbaseName := track.ID + \"|\" + youtubeID\n\tfilePath := path.Join(audioDirectory, baseName)\n\n\tcmd := exec.Command(\"youtube-dl\", \"--extract-audio\", \"--audio-quality\", \"0\", \"--output\", filePath+\".%(ext)s\", youtubeID)\n\terr := cmd.Start()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = cmd.Wait()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfullPath := FindFileWithExtension(baseName, audioDirectory, []string{\n\t\t\".opus\",\n\t\t\".webm\",\n\t\t\".ogg\",\n\t\t\".m4a\",\n\t\t\".mp3\",\n\t\t\".flac\",\n\t\t\".wav\",\n\t})\n\n\textension := path.Ext(fullPath)\n\ttrack.File = baseName + extension\n\n\treturn nil\n}\n\n\/\/ String implements the default string serialization.\nfunc (track *SoundTrack) String() string {\n\treturn track.Title\n}\n\n\/\/ SortSoundTracksLatestFirst ...\nfunc SortSoundTracksLatestFirst(tracks []*SoundTrack) {\n\tsort.Slice(tracks, func(i, j int) bool {\n\t\treturn tracks[i].Created > tracks[j].Created\n\t})\n}\n\n\/\/ SortSoundTracksPopularFirst ...\nfunc SortSoundTracksPopularFirst(tracks []*SoundTrack) {\n\tsort.Slice(tracks, func(i, j int) bool {\n\t\taLikes := len(tracks[i].Likes)\n\t\tbLikes := len(tracks[j].Likes)\n\n\t\tif aLikes == bLikes {\n\t\t\treturn tracks[i].Created > tracks[j].Created\n\t\t}\n\n\t\treturn aLikes > bLikes\n\t})\n}\n\n\/\/ GetSoundTrack ...\nfunc GetSoundTrack(id string) (*SoundTrack, error) {\n\ttrack, err := DB.Get(\"SoundTrack\", id)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn track.(*SoundTrack), nil\n}\n\n\/\/ StreamSoundTracks returns a stream of all soundtracks.\nfunc StreamSoundTracks() chan *SoundTrack {\n\tchannel := make(chan *SoundTrack, nano.ChannelBufferSize)\n\n\tgo func() {\n\t\tfor obj := range DB.All(\"SoundTrack\") {\n\t\t\tchannel <- obj.(*SoundTrack)\n\t\t}\n\n\t\tclose(channel)\n\t}()\n\n\treturn channel\n}\n\n\/\/ AllSoundTracks ...\nfunc AllSoundTracks() ([]*SoundTrack, error) {\n\tvar all []*SoundTrack\n\n\tfor obj := range StreamSoundTracks() {\n\t\tall = append(all, obj)\n\t}\n\n\treturn all, nil\n}\n\n\/\/ FilterSoundTracks filters all soundtracks by a custom function.\nfunc FilterSoundTracks(filter func(*SoundTrack) bool) []*SoundTrack {\n\tvar filtered []*SoundTrack\n\n\tfor obj := range StreamSoundTracks() {\n\t\tif filter(obj) {\n\t\t\tfiltered = append(filtered, obj)\n\t\t}\n\t}\n\n\treturn filtered\n}\n<commit_msg>Fixed AllSoundTracks() signature<commit_after>package arn\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/aerogo\/nano\"\n\t\"github.com\/animenotifier\/arn\/autocorrect\"\n\t\"github.com\/fatih\/color\"\n)\n\n\/\/ SoundTrack ...\ntype SoundTrack struct {\n\tID string `json:\"id\"`\n\tTitle string `json:\"title\" editable:\"true\"`\n\tMedia []*ExternalMedia `json:\"media\" editable:\"true\"`\n\tTags []string `json:\"tags\" editable:\"true\" tooltip:\"<ul><li><strong>anime:ID<\/strong> to connect it with anime<\/li><li><strong>opening<\/strong> for openings<\/li><li><strong>ending<\/strong> for endings<\/li><li><strong>cover<\/strong> for covers<\/li><li><strong>remix<\/strong> for remixes<\/li><\/ul>\"`\n\tIsDraft bool `json:\"isDraft\" editable:\"true\"`\n\tFile string `json:\"file\"`\n\tCreated string `json:\"created\"`\n\tCreatedBy string `json:\"createdBy\"`\n\tEdited string `json:\"edited\"`\n\tEditedBy string `json:\"editedBy\"`\n\tLikeableImplementation\n}\n\n\/\/ Link returns the permalink for the track.\nfunc (track *SoundTrack) Link() string {\n\treturn \"\/soundtrack\/\" + track.ID\n}\n\n\/\/ MediaByService ...\nfunc (track *SoundTrack) MediaByService(service string) []*ExternalMedia {\n\tfiltered := []*ExternalMedia{}\n\n\tfor _, media := range track.Media {\n\t\tif media.Service == service {\n\t\t\tfiltered = append(filtered, media)\n\t\t}\n\t}\n\n\treturn filtered\n}\n\n\/\/ HasTag returns true if it contains the given tag.\nfunc (track *SoundTrack) HasTag(search string) bool {\n\tfor _, tag := range track.Tags {\n\t\tif tag == search {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/ Anime fetches all tagged anime of the sound track.\nfunc (track *SoundTrack) Anime() []*Anime {\n\tvar animeList []*Anime\n\n\tfor _, tag := range track.Tags {\n\t\tif strings.HasPrefix(tag, \"anime:\") {\n\t\t\tanimeID := strings.TrimPrefix(tag, \"anime:\")\n\t\t\tanime, err := GetAnime(animeID)\n\n\t\t\tif err != nil {\n\t\t\t\tcolor.Red(\"Error fetching anime: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tanimeList = append(animeList, anime)\n\t\t}\n\t}\n\n\treturn animeList\n}\n\n\/\/ Beatmaps returns all osu beatmap IDs of the sound track.\nfunc (track *SoundTrack) Beatmaps() []string {\n\tvar beatmaps []string\n\n\tfor _, tag := range track.Tags {\n\t\tif strings.HasPrefix(tag, \"osu-beatmap:\") {\n\t\t\tosuID := strings.TrimPrefix(tag, \"osu-beatmap:\")\n\t\t\tbeatmaps = append(beatmaps, osuID)\n\t\t}\n\t}\n\n\treturn beatmaps\n}\n\n\/\/ MainAnime ...\nfunc (track *SoundTrack) MainAnime() *Anime {\n\tallAnime := track.Anime()\n\n\tif len(allAnime) == 0 {\n\t\treturn nil\n\t}\n\n\treturn allAnime[0]\n}\n\n\/\/ Creator returns the user who created this track.\nfunc (track *SoundTrack) Creator() *User {\n\tuser, _ := GetUser(track.CreatedBy)\n\treturn user\n}\n\n\/\/ EditedByUser returns the user who edited this track last.\nfunc (track *SoundTrack) EditedByUser() *User {\n\tuser, _ := GetUser(track.EditedBy)\n\treturn user\n}\n\n\/\/ OnLike is called when the soundtrack receives a like.\nfunc (track *SoundTrack) OnLike(likedBy *User) {\n\tif likedBy.ID == track.CreatedBy {\n\t\treturn\n\t}\n\n\tgo func() {\n\t\ttrack.Creator().SendNotification(&PushNotification{\n\t\t\tTitle: likedBy.Nick + \" liked your soundtrack \" + track.Title,\n\t\t\tMessage: likedBy.Nick + \" liked your soundtrack \" + track.Title + \".\",\n\t\t\tIcon: \"https:\" + likedBy.AvatarLink(\"large\"),\n\t\t\tLink: \"https:\/\/notify.moe\" + likedBy.Link(),\n\t\t\tType: NotificationTypeLike,\n\t\t})\n\t}()\n}\n\n\/\/ Publish ...\nfunc (track *SoundTrack) Publish() error {\n\t\/\/ No draft\n\tif !track.IsDraft {\n\t\treturn errors.New(\"Not a draft\")\n\t}\n\n\t\/\/ No media added\n\tif len(track.Media) == 0 {\n\t\treturn errors.New(\"No media specified (at least 1 media source is required)\")\n\t}\n\n\tanimeFound := false\n\n\tfor _, tag := range track.Tags {\n\t\ttag = autocorrect.FixTag(tag)\n\n\t\tif strings.HasPrefix(tag, \"anime:\") {\n\t\t\tanimeID := strings.TrimPrefix(tag, \"anime:\")\n\t\t\t_, err := GetAnime(animeID)\n\n\t\t\tif err != nil {\n\t\t\t\treturn errors.New(\"Invalid anime ID\")\n\t\t\t}\n\n\t\t\tanimeFound = true\n\t\t}\n\t}\n\n\t\/\/ No anime found\n\tif !animeFound {\n\t\treturn errors.New(\"Need to specify at least one anime\")\n\t}\n\n\t\/\/ No tags\n\tif len(track.Tags) < 1 {\n\t\treturn errors.New(\"Need to specify at least one tag\")\n\t}\n\n\tdraftIndex, err := GetDraftIndex(track.CreatedBy)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif draftIndex.SoundTrackID == \"\" {\n\t\treturn errors.New(\"Soundtrack draft doesn't exist in the user draft index\")\n\t}\n\n\ttrack.IsDraft = false\n\tdraftIndex.SoundTrackID = \"\"\n\tdraftIndex.Save()\n\treturn nil\n}\n\n\/\/ Unpublish ...\nfunc (track *SoundTrack) Unpublish() error {\n\tdraftIndex, err := GetDraftIndex(track.CreatedBy)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif draftIndex.SoundTrackID != \"\" {\n\t\treturn errors.New(\"You still have an unfinished draft\")\n\t}\n\n\ttrack.IsDraft = true\n\tdraftIndex.SoundTrackID = track.ID\n\tdraftIndex.Save()\n\treturn nil\n}\n\n\/\/ Download downloads the track.\nfunc (track *SoundTrack) Download() error {\n\tyoutubeVideos := track.MediaByService(\"Youtube\")\n\n\tif len(youtubeVideos) == 0 {\n\t\treturn errors.New(\"No Youtube ID\")\n\t}\n\n\tyoutubeID := youtubeVideos[0].ServiceID\n\n\t\/\/ Check for existing file\n\tif track.File != \"\" {\n\t\tstat, err := os.Stat(path.Join(Root, \"audio\", track.File))\n\n\t\tif err == nil && !stat.IsDir() && stat.Size() > 0 {\n\t\t\treturn errors.New(\"Already downloaded\")\n\t\t}\n\t}\n\n\taudioDirectory := path.Join(Root, \"audio\")\n\tbaseName := track.ID + \"|\" + youtubeID\n\tfilePath := path.Join(audioDirectory, baseName)\n\n\tcmd := exec.Command(\"youtube-dl\", \"--extract-audio\", \"--audio-quality\", \"0\", \"--output\", filePath+\".%(ext)s\", youtubeID)\n\terr := cmd.Start()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = cmd.Wait()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfullPath := FindFileWithExtension(baseName, audioDirectory, []string{\n\t\t\".opus\",\n\t\t\".webm\",\n\t\t\".ogg\",\n\t\t\".m4a\",\n\t\t\".mp3\",\n\t\t\".flac\",\n\t\t\".wav\",\n\t})\n\n\textension := path.Ext(fullPath)\n\ttrack.File = baseName + extension\n\n\treturn nil\n}\n\n\/\/ String implements the default string serialization.\nfunc (track *SoundTrack) String() string {\n\treturn track.Title\n}\n\n\/\/ SortSoundTracksLatestFirst ...\nfunc SortSoundTracksLatestFirst(tracks []*SoundTrack) {\n\tsort.Slice(tracks, func(i, j int) bool {\n\t\treturn tracks[i].Created > tracks[j].Created\n\t})\n}\n\n\/\/ SortSoundTracksPopularFirst ...\nfunc SortSoundTracksPopularFirst(tracks []*SoundTrack) {\n\tsort.Slice(tracks, func(i, j int) bool {\n\t\taLikes := len(tracks[i].Likes)\n\t\tbLikes := len(tracks[j].Likes)\n\n\t\tif aLikes == bLikes {\n\t\t\treturn tracks[i].Created > tracks[j].Created\n\t\t}\n\n\t\treturn aLikes > bLikes\n\t})\n}\n\n\/\/ GetSoundTrack ...\nfunc GetSoundTrack(id string) (*SoundTrack, error) {\n\ttrack, err := DB.Get(\"SoundTrack\", id)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn track.(*SoundTrack), nil\n}\n\n\/\/ StreamSoundTracks returns a stream of all soundtracks.\nfunc StreamSoundTracks() chan *SoundTrack {\n\tchannel := make(chan *SoundTrack, nano.ChannelBufferSize)\n\n\tgo func() {\n\t\tfor obj := range DB.All(\"SoundTrack\") {\n\t\t\tchannel <- obj.(*SoundTrack)\n\t\t}\n\n\t\tclose(channel)\n\t}()\n\n\treturn channel\n}\n\n\/\/ AllSoundTracks ...\nfunc AllSoundTracks() []*SoundTrack {\n\tvar all []*SoundTrack\n\n\tfor obj := range StreamSoundTracks() {\n\t\tall = append(all, obj)\n\t}\n\n\treturn all\n}\n\n\/\/ FilterSoundTracks filters all soundtracks by a custom function.\nfunc FilterSoundTracks(filter func(*SoundTrack) bool) []*SoundTrack {\n\tvar filtered []*SoundTrack\n\n\tfor obj := range StreamSoundTracks() {\n\t\tif filter(obj) {\n\t\t\tfiltered = append(filtered, obj)\n\t\t}\n\t}\n\n\treturn filtered\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\n\nimport (\n\t\"github.com\/GoogleCloudPlatform\/terraformer\/terraform_utils\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/firehose\"\n)\n\n\/\/ Copyright 2018 The Terraformer Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\ntype FirehoseGenerator struct {\n\tAWSService\n}\n\nfunc (g FirehoseGenerator) createResources(sess *session.Session, streamNames []*string, region string) []terraform_utils.Resource {\n\n\tvar resources []terraform_utils.Resource\n\tfor _, streamName := range streamNames {\n\t\tresourceName := aws.StringValue(streamName)\n\n\t\tresources = append(resources, terraform_utils.NewResource(\n\t\t\tresourceName,\n\t\t\tresourceName,\n\t\t\t\"aws_kinesis_firehose_delivery_stream\",\n\t\t\t\"aws\",\n\t\t\tmap[string]string{\"name\": resourceName},\n\t\t\t[]string{\".tags\"},\n\t\t\tmap[string]string{}))\n\t}\n\treturn resources\n}\n\n\/\/ Generate TerraformResources from AWS API,\n\/\/ Need deliver stream name for terraform resource\nfunc (g *FirehoseGenerator) InitResources() error {\n\tsess := g.generateSession()\n\tsvc := firehose.New(sess)\n\tvar streamNames []*string\n\tfor {\n\t\toutput, err := svc.ListDeliveryStreams(&firehose.ListDeliveryStreamsInput{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstreamNames = append(streamNames, output.DeliveryStreamNames...)\n\t\tif *output.HasMoreDeliveryStreams == false {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tg.Resources = g.createResources(sess, streamNames, g.GetArgs()[\"region\"].(string))\n\n\tg.PopulateIgnoreKeys()\n\treturn nil\n}\n<commit_msg>Fix some codes<commit_after>\/\/ Copyright 2018 The Terraformer Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage aws\n\nimport (\n\t\"github.com\/GoogleCloudPlatform\/terraformer\/terraform_utils\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/firehose\"\n)\n\ntype FirehoseGenerator struct {\n\tAWSService\n}\n\nfunc (g FirehoseGenerator) createResources(sess *session.Session, streamNames []*string) []terraform_utils.Resource {\n\tvar resources []terraform_utils.Resource\n\tfor _, streamName := range streamNames {\n\t\tresourceName := aws.StringValue(streamName)\n\n\t\tresources = append(resources, terraform_utils.NewResource(\n\t\t\tresourceName,\n\t\t\tresourceName,\n\t\t\t\"aws_kinesis_firehose_delivery_stream\",\n\t\t\t\"aws\",\n\t\t\tmap[string]string{\"name\": resourceName},\n\t\t\t[]string{\".tags\"},\n\t\t\tmap[string]string{}))\n\t}\n\treturn resources\n}\n\n\/\/ Generate TerraformResources from AWS API,\n\/\/ Need deliver stream name for terraform resource\nfunc (g *FirehoseGenerator) InitResources() error {\n\tsess := g.generateSession()\n\tsvc := firehose.New(sess)\n\tvar streamNames []*string\n\tfor {\n\t\toutput, err := svc.ListDeliveryStreams(&firehose.ListDeliveryStreamsInput{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstreamNames = append(streamNames, output.DeliveryStreamNames...)\n\t\tif *output.HasMoreDeliveryStreams == false {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tg.Resources = g.createResources(sess, streamNames)\n\n\tg.PopulateIgnoreKeys()\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Copyright 2016-2018 Bryan T. Meyers <bmeyers@datadrake.com>\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage kde\n\nimport (\n\t\"bytes\"\n\t\"compress\/bzip2\"\n\t\"fmt\"\n\t\"github.com\/DataDrake\/cuppa\/results\"\n \"io\/ioutil\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\t\/\/ ListingURL is the location of the KDE FTP file listing\n\tListingURL = \"https:\/\/download.kde.org\/ls-lR.bz2\"\n\t\/\/ ListingPrefix is the prefix of all paths in the KDE listing that is hidden by HTTP\n\tListingPrefix = \"\/srv\/archives\/ftp\/\"\n\t\/\/ SourceFormat4 is the string format for KDE sources with 4 pieces\n\tSourceFormat4 = \"https:\/\/download.kde.org\/%s\/%s\/%s\/%s-%s.tar.bz2\"\n\t\/\/ SourceFormat5 is the string format for KDE sources with 5 pieces\n\tSourceFormat5 = \"https:\/\/download.kde.org\/%s\/%s\/%s\/%s\/%s-%s.tar.bz2\"\n\t\/\/ SourceFormat6 is the string format for KDE sources with 6 pieces\n\tSourceFormat6 = \"https:\/\/download.kde.org\/%s\/%s\/%s\/%s\/%s\/%s-%s.tar.bz2\"\n)\n\n\/\/ TarballRegex matches KDE sources\nvar TarballRegex = regexp.MustCompile(\"https?:\/\/.*download.kde.org\/(.+)\")\n\n\/\/ Provider is the upstream provider interface for KDE\ntype Provider struct{}\n\nvar listing []byte\n\nfunc getListing() {\n\t\/\/ Query the API\n\tresp, err := http.Get(ListingURL)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdefer resp.Body.Close()\n\t\/\/ Translate Status Code\n if resp.StatusCode != 200 {\n return\n }\n\tbody := bzip2.NewReader(resp.Body)\n\tlisting, _ = ioutil.ReadAll(body)\n}\n\n\/\/ Latest finds the newest release for a KDE package\nfunc (c Provider) Latest(name string) (r *results.Result, s results.Status) {\n\trs, s := c.Releases(name)\n\tif s != results.OK {\n\t\treturn\n\t}\n\tr = rs.Last()\n\treturn\n}\n\n\/\/ Match checks to see if this provider can handle this kind of query\nfunc (c Provider) Match(query string) string {\n\tsm := TarballRegex.FindStringSubmatch(query)\n\tif len(sm) != 2 {\n\t\treturn \"\"\n\t}\n\tpieces := strings.Split(sm[1], \"\/\")\n\tif len(pieces) < 4 || len(pieces) > 6 {\n\t\treturn \"\"\n\t}\n\treturn sm[1]\n}\n\n\/\/ Name gives the name of this provider\nfunc (c Provider) Name() string {\n\treturn \"KDE\"\n}\n\n\/\/ Releases finds all matching releases for a KDE package\nfunc (c Provider) Releases(name string) (rs *results.ResultSet, s results.Status) {\n if len(listing) == 0 {\n getListing()\n }\n buff := bytes.NewBuffer(listing)\n\tpieces := strings.Split(name, \"\/\")\n\tname = strings.Split(pieces[len(pieces)-1], \"-\")[0]\n\tvar searchPrefix string\n\tswitch len(pieces) {\n\tcase 4:\n\t\tsearchPrefix = ListingPrefix + strings.Join(pieces[0:len(pieces)-2], \"\/\") + \":\\n\"\n\tcase 5,6:\n\t\tsearchPrefix = ListingPrefix + strings.Join(pieces[0:len(pieces)-3], \"\/\") + \":\\n\"\n\t}\n\trs = results.NewResultSet(name)\n\tfor {\n\t\tline, err := buff.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif line != searchPrefix {\n\t\t\tcontinue\n\t\t}\n\t\tfor line != \"\\n\" {\n\t\t\tline, err = buff.ReadString('\\n')\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif line == \"\\n\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfields := strings.Fields(line)\n\t\t\tversion := fields[len(fields)-1]\n\t\t\tupdated, _ := time.Parse(\"2006-01-02 15:04\", strings.Join(fields[len(fields)-3:len(fields)-2], \" \"))\n\t\t\tvar location string\n\t\t\tswitch len(pieces) {\n\t\t\tcase 4:\n\t\t\t\tlocation = fmt.Sprintf(SourceFormat4, pieces[0], pieces[1], version, name, version)\n\t\t\tcase 5:\n\t\t\t\tlocation = fmt.Sprintf(SourceFormat5, pieces[0], pieces[1], version, pieces[3], name, version)\n\t\t\tcase 6:\n\t\t\t\tlocation = fmt.Sprintf(SourceFormat6, pieces[0], pieces[1], version, pieces[3], pieces[4], name, version)\n\t\t\t}\n\t\t\tr := &results.Result{\n\t\t\t\tName: name,\n\t\t\t\tVersion: version,\n\t\t\t\tLocation: location,\n\t\t\t\tPublished: updated,\n\t\t\t}\n\t\t\trs.AddResult(r)\n\t\t}\n\t\tbreak\n\t}\n\treturn\n}\n<commit_msg>Fix a couple KDE derpers by checking for non-numeric version number<commit_after>\/\/\n\/\/ Copyright 2016-2018 Bryan T. Meyers <bmeyers@datadrake.com>\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage kde\n\nimport (\n\t\"bytes\"\n\t\"compress\/bzip2\"\n\t\"fmt\"\n\t\"github.com\/DataDrake\/cuppa\/results\"\n \"io\/ioutil\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\t\/\/ ListingURL is the location of the KDE FTP file listing\n\tListingURL = \"https:\/\/download.kde.org\/ls-lR.bz2\"\n\t\/\/ ListingPrefix is the prefix of all paths in the KDE listing that is hidden by HTTP\n\tListingPrefix = \"\/srv\/archives\/ftp\/\"\n\t\/\/ SourceFormat4 is the string format for KDE sources with 4 pieces\n\tSourceFormat4 = \"https:\/\/download.kde.org\/%s\/%s\/%s\/%s-%s.tar.bz2\"\n\t\/\/ SourceFormat5 is the string format for KDE sources with 5 pieces\n\tSourceFormat5 = \"https:\/\/download.kde.org\/%s\/%s\/%s\/%s\/%s-%s.tar.bz2\"\n\t\/\/ SourceFormat6 is the string format for KDE sources with 6 pieces\n\tSourceFormat6 = \"https:\/\/download.kde.org\/%s\/%s\/%s\/%s\/%s\/%s-%s.tar.bz2\"\n)\n\n\/\/ TarballRegex matches KDE sources\nvar TarballRegex = regexp.MustCompile(\"https?:\/\/.*download.kde.org\/(.+)\")\n\n\/\/ Provider is the upstream provider interface for KDE\ntype Provider struct{}\n\nvar listing []byte\n\nfunc getListing() {\n\t\/\/ Query the API\n\tresp, err := http.Get(ListingURL)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdefer resp.Body.Close()\n\t\/\/ Translate Status Code\n if resp.StatusCode != 200 {\n return\n }\n\tbody := bzip2.NewReader(resp.Body)\n\tlisting, _ = ioutil.ReadAll(body)\n}\n\n\/\/ Latest finds the newest release for a KDE package\nfunc (c Provider) Latest(name string) (r *results.Result, s results.Status) {\n\trs, s := c.Releases(name)\n\tif s != results.OK {\n\t\treturn\n\t}\n\tr = rs.Last()\n\treturn\n}\n\n\/\/ Match checks to see if this provider can handle this kind of query\nfunc (c Provider) Match(query string) string {\n\tsm := TarballRegex.FindStringSubmatch(query)\n\tif len(sm) != 2 {\n\t\treturn \"\"\n\t}\n\tpieces := strings.Split(sm[1], \"\/\")\n\tif len(pieces) < 4 || len(pieces) > 6 {\n\t\treturn \"\"\n\t}\n\treturn sm[1]\n}\n\n\/\/ Name gives the name of this provider\nfunc (c Provider) Name() string {\n\treturn \"KDE\"\n}\n\n\/\/ Releases finds all matching releases for a KDE package\nfunc (c Provider) Releases(name string) (rs *results.ResultSet, s results.Status) {\n if len(listing) == 0 {\n getListing()\n }\n buff := bytes.NewBuffer(listing)\n\tpieces := strings.Split(name, \"\/\")\n\tname = strings.Split(pieces[len(pieces)-1], \"-\")[0]\n\tvar searchPrefix string\n\tswitch len(pieces) {\n\tcase 4:\n\t\tsearchPrefix = ListingPrefix + strings.Join(pieces[0:len(pieces)-2], \"\/\") + \":\\n\"\n\tcase 5,6:\n\t\tsearchPrefix = ListingPrefix + strings.Join(pieces[0:len(pieces)-3], \"\/\") + \":\\n\"\n\t}\n\trs = results.NewResultSet(name)\n\tfor {\n\t\tline, err := buff.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif line != searchPrefix {\n\t\t\tcontinue\n\t\t}\n\t\tfor line != \"\\n\" {\n\t\t\tline, err = buff.ReadString('\\n')\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif line == \"\\n\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfields := strings.Fields(line)\n\t\t\tversion := fields[len(fields)-1]\n if version[0] > 57 || version[0] < 48 {\n continue\n }\n\t\t\tupdated, _ := time.Parse(\"2006-01-02 15:04\", strings.Join(fields[len(fields)-3:len(fields)-2], \" \"))\n\t\t\tvar location string\n\t\t\tswitch len(pieces) {\n\t\t\tcase 4:\n\t\t\t\tlocation = fmt.Sprintf(SourceFormat4, pieces[0], pieces[1], version, name, version)\n\t\t\tcase 5:\n\t\t\t\tlocation = fmt.Sprintf(SourceFormat5, pieces[0], pieces[1], version, pieces[3], name, version)\n\t\t\tcase 6:\n\t\t\t\tlocation = fmt.Sprintf(SourceFormat6, pieces[0], pieces[1], version, pieces[3], pieces[4], name, version)\n\t\t\t}\n\t\t\tr := &results.Result{\n\t\t\t\tName: name,\n\t\t\t\tVersion: version,\n\t\t\t\tLocation: location,\n\t\t\t\tPublished: updated,\n\t\t\t}\n\t\t\trs.AddResult(r)\n\t\t}\n\t\tbreak\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package pq\n\nimport \"sync\"\n\ntype Element struct {\n\tValue interface{}\n\tPriority int\n}\n\nfunc NewElement(value interface{}, priority int) (e *Element) {\n\treturn &Element{\n\t\tValue: value,\n\t\tPriority: priority,\n\t}\n}\n\nfunc (e Element) LessThan(hel Element) bool {\n\treturn e.Priority < hel.Priority\n}\n\ntype PQ struct {\n\tsync.Mutex\n\theap []Element\n\tlen int\n\tisMin bool\n}\n\nfunc MinPQConstructor() (pq *PQ) {\n\treturn &PQ{heap: make([]Element, 0), isMin: true}\n}\n\nfunc MaxPQConstructor() (pq *PQ) {\n\treturn &PQ{heap: make([]Element, 0), isMin: false}\n}\n\nfunc (pq *PQ) Len() int {\n\tpq.Lock()\n\tdefer pq.Unlock()\n\n\treturn pq.len\n}\n\nfunc (pq *PQ) Insert(el Element) {\n\tpq.Lock()\n\tdefer pq.Unlock()\n\n\tpq.heap = append(pq.heap, el)\n\tpq.len++\n\tpq.precolateUp(pq.len - 1)\n}\n\nfunc (pq *PQ) Extract() (el Element) {\n\tpq.Lock()\n\tdefer pq.Unlock()\n\n\tif pq.len == 0 {\n\t\tpanic(\"Empty pq, cannot Extract.\")\n\t}\n\n\tel, pq.heap[0] = pq.heap[0], pq.heap[pq.len-1]\n\tpq.heap = pq.heap[:pq.len-1]\n\tpq.len--\n\tpq.precolateDown(0)\n\treturn\n}\n\nfunc (pq *PQ) Peek() (el Element) {\n\tpq.Lock()\n\tdefer pq.Unlock()\n\n\tif pq.len == 0 {\n\t\tpanic(\"Empty pq, cannot Peek.\")\n\t}\n\treturn pq.heap[0]\n}\n\nfunc (pq *PQ) ChangePriority(index, priority int) {\n\tpq.Lock()\n\tdefer pq.Unlock()\n\n\toldEl := pq.heap[index]\n\tpq.heap[index].Priority = priority\n\tif pq.less(Element{Priority: priority}, oldEl) {\n\t\tpq.precolateUp(index)\n\t} else {\n\t\tpq.precolateDown(index)\n\t}\n}\n\nfunc (pq *PQ) precolateUp(index int) {\n\t\/\/ 上滤,新元素在堆中上滤直到找出正确位置\n\tneedUp, parent := index, index>>1\n\tfor needUp > 0 && pq.less(pq.heap[needUp], pq.heap[parent]) {\n\t\tpq.heap[parent], pq.heap[needUp] = pq.heap[needUp], pq.heap[parent]\n\t\tneedUp, parent = parent, parent>>1\n\t}\n}\n\nfunc (pq *PQ) precolateDown(index int) {\n\t\/\/ 下滤\n\tneedDown, child := index, index<<1+1\n\tfor needDown < pq.len && child < pq.len {\n\t\t\/\/ find min(leftChild, rightChild)\n\t\tif child+1 < pq.len && pq.less(pq.heap[child+1], pq.heap[child]) {\n\t\t\tchild++\n\t\t}\n\n\t\tif pq.less(pq.heap[needDown], pq.heap[child]) {\n\t\t\tbreak\n\t\t}\n\n\t\tpq.heap[needDown], pq.heap[child] = pq.heap[child], pq.heap[needDown]\n\t\tneedDown, child = child, needDown<<1+1\n\t}\n}\n\nfunc (pq *PQ) less(a, b Element) bool {\n\tif pq.isMin {\n\t\treturn a.LessThan(b)\n\t} else {\n\t\treturn b.LessThan(a)\n\t}\n}\n<commit_msg>use RWMutex instead of Mutex<commit_after>package pq\n\nimport \"sync\"\n\ntype Element struct {\n\tValue interface{}\n\tPriority int\n}\n\nfunc NewElement(value interface{}, priority int) (e *Element) {\n\treturn &Element{\n\t\tValue: value,\n\t\tPriority: priority,\n\t}\n}\n\nfunc (e Element) LessThan(hel Element) bool {\n\treturn e.Priority < hel.Priority\n}\n\ntype PQ struct {\n\tsync.RWMutex\n\theap []Element\n\tlen int\n\tisMin bool\n}\n\nfunc MinPQConstructor() (pq *PQ) {\n\treturn &PQ{heap: make([]Element, 0), isMin: true}\n}\n\nfunc MaxPQConstructor() (pq *PQ) {\n\treturn &PQ{heap: make([]Element, 0), isMin: false}\n}\n\nfunc (pq *PQ) Len() int {\n\tpq.RLock()\n\tdefer pq.RUnlock()\n\n\treturn pq.len\n}\n\nfunc (pq *PQ) Insert(el Element) {\n\tpq.Lock()\n\tdefer pq.Unlock()\n\n\tpq.heap = append(pq.heap, el)\n\tpq.len++\n\tpq.precolateUp(pq.len - 1)\n}\n\nfunc (pq *PQ) Extract() (el Element) {\n\tpq.Lock()\n\tdefer pq.Unlock()\n\n\tif pq.len == 0 {\n\t\tpanic(\"Empty pq, cannot Extract.\")\n\t}\n\n\tel, pq.heap[0] = pq.heap[0], pq.heap[pq.len-1]\n\tpq.heap = pq.heap[:pq.len-1]\n\tpq.len--\n\tpq.precolateDown(0)\n\treturn\n}\n\nfunc (pq *PQ) Peek() (el Element) {\n\tpq.RLock()\n\tdefer pq.RUnlock()\n\n\tif pq.len == 0 {\n\t\tpanic(\"Empty pq, cannot Peek.\")\n\t}\n\treturn pq.heap[0]\n}\n\nfunc (pq *PQ) ChangePriority(index, priority int) {\n\tpq.Lock()\n\tdefer pq.Unlock()\n\n\toldEl := pq.heap[index]\n\tpq.heap[index].Priority = priority\n\tif pq.less(Element{Priority: priority}, oldEl) {\n\t\tpq.precolateUp(index)\n\t} else {\n\t\tpq.precolateDown(index)\n\t}\n}\n\nfunc (pq *PQ) precolateUp(index int) {\n\t\/\/ 上滤,新元素在堆中上滤直到找出正确位置\n\tneedUp, parent := index, index>>1\n\tfor needUp > 0 && pq.less(pq.heap[needUp], pq.heap[parent]) {\n\t\tpq.heap[parent], pq.heap[needUp] = pq.heap[needUp], pq.heap[parent]\n\t\tneedUp, parent = parent, parent>>1\n\t}\n}\n\nfunc (pq *PQ) precolateDown(index int) {\n\t\/\/ 下滤\n\tneedDown, child := index, index<<1+1\n\tfor needDown < pq.len && child < pq.len {\n\t\t\/\/ find min(leftChild, rightChild)\n\t\tif child+1 < pq.len && pq.less(pq.heap[child+1], pq.heap[child]) {\n\t\t\tchild++\n\t\t}\n\n\t\tif pq.less(pq.heap[needDown], pq.heap[child]) {\n\t\t\tbreak\n\t\t}\n\n\t\tpq.heap[needDown], pq.heap[child] = pq.heap[child], pq.heap[needDown]\n\t\tneedDown, child = child, needDown<<1+1\n\t}\n}\n\nfunc (pq *PQ) less(a, b Element) bool {\n\tif pq.isMin {\n\t\treturn a.LessThan(b)\n\t} else {\n\t\treturn b.LessThan(a)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package availability\n\nimport \"time\"\n\n\/\/ Year in nanoseconds.\nconst yearNs = 3.154e16 * time.Nanosecond\n\n\/\/ ByTime holds time based availability data.\n\/\/\n\/\/ formula: availability = uptime \/ ( uptime + downtime )\ntype ByTime struct {\n\tpercent float64\n\tuptime time.Duration\n\tdowntime time.Duration\n}\n\n\/\/ NewByTime creates time based availability.\nfunc NewByTime() *ByTime {\n\treturn &ByTime{downtime: yearNs}\n}\n\n\/\/ ByRequests holds aggregate based availability.\n\/\/\n\/\/ formula: availability = success \/ total\ntype ByRequests struct {\n\tpercent float64\n\tsuccess int\n\ttotal int\n}\n\n\/\/ NewByRequests creates aggregate based availability.\nfunc NewByRequests() *ByRequests {\n\treturn &ByRequests{}\n}\n<commit_msg>availability: add SetPercent to ByRequests<commit_after>package availability\n\nimport (\n\t\"errors\"\n\t\"time\"\n)\n\n\/\/ Year in nanoseconds.\nconst yearNs = 3.154e16 * time.Nanosecond\n\n\/\/ ByTime holds time based availability data.\n\/\/\n\/\/ formula: availability = uptime \/ ( uptime + downtime )\ntype ByTime struct {\n\tpercent float64\n\tuptime time.Duration\n\tdowntime time.Duration\n}\n\n\/\/ NewByTime creates time based availability.\nfunc NewByTime() *ByTime {\n\treturn &ByTime{downtime: yearNs}\n}\n\n\/\/ ByRequests holds aggregate based availability.\n\/\/\n\/\/ formula: availability = success \/ total\ntype ByRequests struct {\n\tpercent float64\n\tsuccess int\n\ttotal int\n}\n\n\/\/ NewByRequests creates aggregate based availability.\nfunc NewByRequests() *ByRequests {\n\treturn &ByRequests{}\n}\n\n\/\/ SetPercent sets percent value.\nfunc (a *ByRequests) SetPercent(v float64) error {\n\n\tif v <= 0 || v > 100 {\n\n\t\treturn errors.New(\"invalid percent\")\n\t}\n\n\ta.percent = v\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cmds\n\n\/\/ Tests to add:\n\/\/ basic login test\n\/\/ login with no auth service deployed\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/require\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/backoff\"\n\ttu \"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/testutil\"\n)\n\nvar activateMut sync.Mutex\n\nfunc activateAuth(t *testing.T) {\n\tt.Helper()\n\tactivateMut.Lock()\n\tdefer activateMut.Unlock()\n\t\/\/ TODO(msteffen): Make sure client & server have the same version\n\n\t\/\/ Check if Pachyderm Enterprise is active -- if not, activate it\n\tcmd := tu.Cmd(\"pachctl\", \"enterprise\", \"get-state\")\n\tout, err := cmd.Output()\n\trequire.NoError(t, err)\n\tif string(out) != \"ACTIVE\" {\n\t\tcmd = tu.Cmd(\"pachctl\", \"enterprise\", \"activate\", tu.GetTestEnterpriseCode())\n\t\trequire.NoError(t, cmd.Run())\n\t}\n\n\t\/\/ Logout (to clear any expired tokens) and activate Pachyderm auth\n\trequire.NoError(t, tu.Cmd(\"pachctl\", \"auth\", \"logout\").Run())\n\trequire.NoError(t, tu.Cmd(\"pachctl\", \"auth\", \"activate\", \"--user=admin\").Run())\n}\n\nfunc deactivateAuth(t *testing.T) {\n\tt.Helper()\n\tactivateMut.Lock()\n\tdefer activateMut.Unlock()\n\n\t\/\/ Check if Pachyderm Auth is active -- if so, deactivate it\n\tif err := tu.Cmd(\"pachctl\", \"auth\", \"login\", \"-u\", \"admin\").Run(); err == nil {\n\t\trequire.NoError(t, tu.BashCmd(\"yes | pachctl auth deactivate\").Run())\n\t}\n}\n\nfunc TestAuthBasic(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tactivateAuth(t)\n\tdefer deactivateAuth(t)\n\trequire.NoError(t, tu.BashCmd(`\n\t\techo \"\\n\" | pachctl auth login -u {{.alice}}\n\t\tpachctl create-repo {{.repo}}\n\t\tpachctl list-repo \\\n\t\t\t| match {{.repo}}\n\t\tpachctl inspect-repo {{.repo}}\n\t\t`,\n\t\t\"alice\", tu.UniqueString(\"alice\"),\n\t\t\"repo\", tu.UniqueString(\"TestAuthBasic-repo\"),\n\t).Run())\n}\n\nfunc TestWhoAmI(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tactivateAuth(t)\n\tdefer deactivateAuth(t)\n\trequire.NoError(t, tu.BashCmd(`\n\t\tpachctl auth login -u {{.alice}}\n\t\tpachctl auth whoami | match {{.alice}}\n\t\t`,\n\t\t\"alice\", tu.UniqueString(\"alice\"),\n\t).Run())\n}\n\nfunc TestCheckGetSet(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tactivateAuth(t)\n\tdefer deactivateAuth(t)\n\t\/\/ Test both forms of the 'pachctl auth get' command, as well as 'pachctl auth check'\n\trequire.NoError(t, tu.BashCmd(`\n\t\tpachctl auth login -u {{.alice}}\n\t\tpachctl create-repo {{.repo}}\n\t\tpachctl auth check owner {{.repo}}\n\t\tpachctl auth get {{.repo}} \\\n\t\t\t| match {{.alice}}\n\t\tpachctl auth get {{.bob}} {{.repo}} \\\n\t\t\t| match NONE\n\t\t`,\n\t\t\"alice\", tu.UniqueString(\"alice\"),\n\t\t\"bob\", tu.UniqueString(\"bob\"),\n\t\t\"repo\", tu.UniqueString(\"TestGet-repo\"),\n\t).Run())\n\n\t\/\/ Test 'pachctl auth set'\n\trequire.NoError(t, tu.BashCmd(`\n\t\tpachctl auth login -u {{.alice}}\n\t\tpachctl create-repo {{.repo}}\n\t\tpachctl auth set {{.bob}} reader {{.repo}}\n\t\tpachctl auth get {{.bob}} {{.repo}} \\\n\t\t\t| match READER\n\t\t`,\n\t\t\"alice\", tu.UniqueString(\"alice\"),\n\t\t\"bob\", tu.UniqueString(\"bob\"),\n\t\t\"repo\", tu.UniqueString(\"TestGet-repo\"),\n\t).Run())\n}\n\nfunc TestAdmins(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tactivateAuth(t)\n\tdefer deactivateAuth(t)\n\n\t\/\/ Modify the list of admins to replace 'admin' with 'admin2'\n\trequire.NoError(t, tu.Cmd(\"pachctl\", \"auth\", \"login\", \"-u\", \"admin\").Run())\n\trequire.NoError(t, tu.BashCmd(`\n\t\tpachctl auth list-admins \\\n\t\t\t| match \"admin\"\n\t\tpachctl auth modify-admins --add admin2\n\t\tpachctl auth list-admins \\\n\t\t\t| match \"admin2\"\n\t\tpachctl auth modify-admins --remove admin\n\n\t\t# as 'admin' is a substr of 'admin2', use '^admin$' regex...\n\t\tpachctl auth list-admins \\\n\t\t\t| match -v \"^admin$\" \\\n\t\t\t| match \"^admin2$\"\n\t\t`).Run())\n\n\t\/\/ Now 'admin2' is the only admin. Login as admin2, and swap 'admin' back in\n\t\/\/ (so that deactivateAuth() runs), and call 'list-admin' (to make sure it\n\t\/\/ works for non-admins)\n\trequire.NoError(t, tu.Cmd(\"pachctl\", \"auth\", \"login\", \"-u\", \"admin2\").Run())\n\trequire.NoError(t, tu.BashCmd(`\n\t\tpachctl auth modify-admins --add admin --remove admin2\n\t\tpachctl auth list-admins \\\n\t\t\t| match -v \"admin2\" \\\n\t\t\t| match \"admin\"\n\t\t`).Run())\n}\n\nfunc TestModifyAdminsPropagateError(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tactivateAuth(t)\n\tdefer deactivateAuth(t)\n\n\t\/\/ Add admin2, and then try to remove it along with a fake admin. Make sure we\n\t\/\/ get an error\n\trequire.NoError(t, tu.Cmd(\"pachctl\", \"auth\", \"login\", \"-u\", \"admin\").Run())\n\trequire.NoError(t, tu.BashCmd(`\n\t\tpachctl auth list-admins \\\n\t\t\t| match \"admin\"\n\t\tpachctl auth modify-admins --add admin2\n\t\tpachctl auth list-admins \\\n\t\t\t| match \"admin2\"\n\n \t\t# cmd should fail\n\t\t! pachctl auth modify-admins --remove admin1,not_in_list\n\t\t`).Run())\n}\n\nfunc TestMain(m *testing.M) {\n\t\/\/ Preemptively deactivate Pachyderm auth (to avoid errors in early tests)\n\tif err := tu.BashCmd(\"pachctl auth login -u admin\").Run(); err == nil {\n\t\tif err := tu.BashCmd(\"yes | pachctl auth deactivate\").Run(); err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t}\n\ttime.Sleep(5 * time.Second)\n\tbackoff.Retry(func() error {\n\t\tcmd := tu.Cmd(\"pachctl\", \"auth\", \"login\", \"-u\", \"admin\")\n\t\tif cmd.Run() != nil {\n\t\t\treturn nil \/\/ success -- auth is deactivated\n\t\t}\n\t\treturn errors.New(\"auth not deactivated yet\")\n\t}, backoff.NewTestingBackOff())\n\tos.Exit(m.Run())\n}\n<commit_msg>Fix auth\/cmds\/cmds_test.go#TestAdmins<commit_after>package cmds\n\n\/\/ Tests to add:\n\/\/ basic login test\n\/\/ login with no auth service deployed\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/require\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/backoff\"\n\ttu \"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/testutil\"\n)\n\nvar activateMut sync.Mutex\n\nfunc activateAuth(t *testing.T) {\n\tt.Helper()\n\tactivateMut.Lock()\n\tdefer activateMut.Unlock()\n\t\/\/ TODO(msteffen): Make sure client & server have the same version\n\n\t\/\/ Check if Pachyderm Enterprise is active -- if not, activate it\n\tcmd := tu.Cmd(\"pachctl\", \"enterprise\", \"get-state\")\n\tout, err := cmd.Output()\n\trequire.NoError(t, err)\n\tif string(out) != \"ACTIVE\" {\n\t\tcmd = tu.Cmd(\"pachctl\", \"enterprise\", \"activate\", tu.GetTestEnterpriseCode())\n\t\trequire.NoError(t, cmd.Run())\n\t}\n\n\t\/\/ Logout (to clear any expired tokens) and activate Pachyderm auth\n\trequire.NoError(t, tu.Cmd(\"pachctl\", \"auth\", \"logout\").Run())\n\trequire.NoError(t, tu.Cmd(\"pachctl\", \"auth\", \"activate\", \"--user=admin\").Run())\n}\n\nfunc deactivateAuth(t *testing.T) {\n\tt.Helper()\n\tactivateMut.Lock()\n\tdefer activateMut.Unlock()\n\n\t\/\/ Check if Pachyderm Auth is active -- if so, deactivate it\n\tif err := tu.Cmd(\"pachctl\", \"auth\", \"login\", \"-u\", \"admin\").Run(); err == nil {\n\t\trequire.NoError(t, tu.BashCmd(\"yes | pachctl auth deactivate\").Run())\n\t}\n}\n\nfunc TestAuthBasic(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tactivateAuth(t)\n\tdefer deactivateAuth(t)\n\trequire.NoError(t, tu.BashCmd(`\n\t\techo \"\\n\" | pachctl auth login -u {{.alice}}\n\t\tpachctl create-repo {{.repo}}\n\t\tpachctl list-repo \\\n\t\t\t| match {{.repo}}\n\t\tpachctl inspect-repo {{.repo}}\n\t\t`,\n\t\t\"alice\", tu.UniqueString(\"alice\"),\n\t\t\"repo\", tu.UniqueString(\"TestAuthBasic-repo\"),\n\t).Run())\n}\n\nfunc TestWhoAmI(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tactivateAuth(t)\n\tdefer deactivateAuth(t)\n\trequire.NoError(t, tu.BashCmd(`\n\t\tpachctl auth login -u {{.alice}}\n\t\tpachctl auth whoami | match {{.alice}}\n\t\t`,\n\t\t\"alice\", tu.UniqueString(\"alice\"),\n\t).Run())\n}\n\nfunc TestCheckGetSet(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tactivateAuth(t)\n\tdefer deactivateAuth(t)\n\t\/\/ Test both forms of the 'pachctl auth get' command, as well as 'pachctl auth check'\n\trequire.NoError(t, tu.BashCmd(`\n\t\tpachctl auth login -u {{.alice}}\n\t\tpachctl create-repo {{.repo}}\n\t\tpachctl auth check owner {{.repo}}\n\t\tpachctl auth get {{.repo}} \\\n\t\t\t| match {{.alice}}\n\t\tpachctl auth get {{.bob}} {{.repo}} \\\n\t\t\t| match NONE\n\t\t`,\n\t\t\"alice\", tu.UniqueString(\"alice\"),\n\t\t\"bob\", tu.UniqueString(\"bob\"),\n\t\t\"repo\", tu.UniqueString(\"TestGet-repo\"),\n\t).Run())\n\n\t\/\/ Test 'pachctl auth set'\n\trequire.NoError(t, tu.BashCmd(`\n\t\tpachctl auth login -u {{.alice}}\n\t\tpachctl create-repo {{.repo}}\n\t\tpachctl auth set {{.bob}} reader {{.repo}}\n\t\tpachctl auth get {{.bob}} {{.repo}} \\\n\t\t\t| match READER\n\t\t`,\n\t\t\"alice\", tu.UniqueString(\"alice\"),\n\t\t\"bob\", tu.UniqueString(\"bob\"),\n\t\t\"repo\", tu.UniqueString(\"TestGet-repo\"),\n\t).Run())\n}\n\nfunc TestAdmins(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tactivateAuth(t)\n\tdefer deactivateAuth(t)\n\n\t\/\/ Modify the list of admins to replace 'admin' with 'admin2'\n\trequire.NoError(t, tu.Cmd(\"pachctl\", \"auth\", \"login\", \"-u\", \"admin\").Run())\n\trequire.NoError(t, tu.BashCmd(`\n\t\tpachctl auth list-admins \\\n\t\t\t| match \"admin\"\n\t\tpachctl auth modify-admins --add admin2\n\t\tpachctl auth list-admins \\\n\t\t\t| match \"admin2\"\n\t\tpachctl auth modify-admins --remove admin\n\n\t\t# as 'admin' is a substr of 'admin2', use '^admin$' regex...\n\t\tpachctl auth list-admins \\\n\t\t\t| match -v \"^github:admin$\" \\\n\t\t\t| match \"^github:admin2$\"\n\t\t`).Run())\n\n\t\/\/ Now 'admin2' is the only admin. Login as admin2, and swap 'admin' back in\n\t\/\/ (so that deactivateAuth() runs), and call 'list-admin' (to make sure it\n\t\/\/ works for non-admins)\n\trequire.NoError(t, tu.Cmd(\"pachctl\", \"auth\", \"login\", \"-u\", \"admin2\").Run())\n\trequire.NoError(t, tu.BashCmd(`\n\t\tpachctl auth modify-admins --add admin --remove admin2\n\t\tpachctl auth list-admins \\\n\t\t\t| match -v \"admin2\" \\\n\t\t\t| match \"admin\"\n\t\t`).Run())\n}\n\nfunc TestModifyAdminsPropagateError(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tactivateAuth(t)\n\tdefer deactivateAuth(t)\n\n\t\/\/ Add admin2, and then try to remove it along with a fake admin. Make sure we\n\t\/\/ get an error\n\trequire.NoError(t, tu.Cmd(\"pachctl\", \"auth\", \"login\", \"-u\", \"admin\").Run())\n\trequire.NoError(t, tu.BashCmd(`\n\t\tpachctl auth list-admins \\\n\t\t\t| match \"admin\"\n\t\tpachctl auth modify-admins --add admin2\n\t\tpachctl auth list-admins \\\n\t\t\t| match \"admin2\"\n\n \t\t# cmd should fail\n\t\t! pachctl auth modify-admins --remove admin1,not_in_list\n\t\t`).Run())\n}\n\nfunc TestMain(m *testing.M) {\n\t\/\/ Preemptively deactivate Pachyderm auth (to avoid errors in early tests)\n\tif err := tu.BashCmd(\"pachctl auth login -u admin\").Run(); err == nil {\n\t\tif err := tu.BashCmd(\"yes | pachctl auth deactivate\").Run(); err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t}\n\ttime.Sleep(5 * time.Second)\n\tbackoff.Retry(func() error {\n\t\tcmd := tu.Cmd(\"pachctl\", \"auth\", \"login\", \"-u\", \"admin\")\n\t\tif cmd.Run() != nil {\n\t\t\treturn nil \/\/ success -- auth is deactivated\n\t\t}\n\t\treturn errors.New(\"auth not deactivated yet\")\n\t}, backoff.NewTestingBackOff())\n\tos.Exit(m.Run())\n}\n<|endoftext|>"} {"text":"<commit_before>package metrics\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/config\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/uuid\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/version\"\n\tdb \"github.com\/pachyderm\/pachyderm\/src\/server\/pfs\/db\"\n\n\t\"github.com\/dancannon\/gorethink\"\n\t\"github.com\/segmentio\/analytics-go\"\n\t\"go.pedge.io\/lion\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\/metadata\"\n\tkube \"k8s.io\/kubernetes\/pkg\/client\/unversioned\"\n)\n\n\/\/Reporter is used to submit user & cluster metrics to segment\ntype Reporter struct {\n\tsegmentClient *analytics.Client\n\tclusterID string\n\tkubeClient *kube.Client\n\tdbClient *gorethink.Session\n\tpfsDbName string\n\tppsDbName string\n}\n\n\/\/ NewReporter creates a new reporter and kicks off the loop to report cluster\n\/\/ metrics\nfunc NewReporter(clusterID string, kubeClient *kube.Client, address string, pfsDbName string, ppsDbName string) *Reporter {\n\n\tdbClient, err := db.DbConnect(address)\n\tif err != nil {\n\t\tlion.Errorf(\"error connected to DB when reporting metrics: %v\\n\", err)\n\t\treturn nil\n\t}\n\treporter := &Reporter{\n\t\tsegmentClient: newPersistentClient(),\n\t\tclusterID: clusterID,\n\t\tkubeClient: kubeClient,\n\t\tdbClient: dbClient,\n\t\tpfsDbName: pfsDbName,\n\t\tppsDbName: ppsDbName,\n\t}\n\tgo reporter.reportClusterMetrics()\n\treturn reporter\n}\n\n\/\/ReportUserAction pushes the action into a queue for reporting,\n\/\/ and reports the start, finish, and error conditions\nfunc ReportUserAction(ctx context.Context, r *Reporter, action string) func(time.Time, error) {\n\tif r == nil {\n\t\treturn func(time.Time, error) {}\n\t}\n\tr.reportUserAction(ctx, fmt.Sprintf(\"%vStarted\", action), nil)\n\treturn func(start time.Time, err error) {\n\t\tif err == nil {\n\t\t\tr.reportUserAction(ctx, fmt.Sprintf(\"%vFinished\", action), time.Since(start).Seconds())\n\t\t} else {\n\t\t\tr.reportUserAction(ctx, fmt.Sprintf(\"%vErrored\", action), err.Error())\n\t\t}\n\t}\n}\n\nfunc getKeyFromMD(md metadata.MD, key string) (string, error) {\n\tif md[key] != nil && len(md[key]) > 0 {\n\t\treturn md[key][0], nil\n\t}\n\treturn \"\", fmt.Errorf(\"error extracting userid from metadata. userid is empty\\n\")\n}\n\nfunc (r *Reporter) reportUserAction(ctx context.Context, action string, value interface{}) {\n\tmd, ok := metadata.FromContext(ctx)\n\tif ok {\n\t\t\/\/ metadata API downcases all the key names\n\t\tuserID, err := getKeyFromMD(md, \"userid\")\n\t\tif err != nil {\n\t\t\tlion.Errorln(err)\n\t\t\treturn\n\t\t}\n\t\tprefix, err := getKeyFromMD(md, \"prefix\")\n\t\tif err != nil {\n\t\t\tlion.Errorln(err)\n\t\t\treturn\n\t\t}\n\t\treportUserMetricsToSegment(\n\t\t\tr.segmentClient,\n\t\t\tuserID,\n\t\t\tprefix,\n\t\t\taction,\n\t\t\tvalue,\n\t\t\tr.clusterID,\n\t\t)\n\t} else {\n\t\tlion.Errorf(\"Error extracting userid metadata from context: %v\\n\", ctx)\n\t}\n}\n\n\/\/ ReportAndFlushUserAction immediately reports the metric\n\/\/ It is used in the few places we need to report metrics from the client.\n\/\/ It handles reporting the start, finish, and error conditions of the action\nfunc ReportAndFlushUserAction(action string) func(time.Time, error) {\n\treportAndFlushUserAction(fmt.Sprintf(\"%vStarted\", action), nil)\n\treturn func(start time.Time, err error) {\n\t\tif err == nil {\n\t\t\treportAndFlushUserAction(fmt.Sprintf(\"%vFinished\", action), time.Since(start).Seconds())\n\t\t} else {\n\t\t\treportAndFlushUserAction(fmt.Sprintf(\"%vErrored\", action), err.Error())\n\t\t}\n\t}\n}\n\nfunc reportAndFlushUserAction(action string, value interface{}) {\n\tclient := newSegmentClient()\n\tdefer client.Close()\n\tcfg, err := config.Read()\n\tif err != nil {\n\t\tlion.Errorf(\"Error reading userid from ~\/.pachyderm\/config: %v\\n\", err)\n\t\t\/\/ metrics errors are non fatal\n\t\treturn\n\t}\n\treportUserMetricsToSegment(client, cfg.UserID, \"user\", action, value, \"\")\n}\n\nfunc (r *Reporter) dbMetrics(metrics *Metrics) {\n\tcursor, err := gorethink.Object(\n\t\t\"Repos\",\n\t\tgorethink.DB(r.pfsDbName).Table(\"Repos\").Count(),\n\t\t\"Commits\",\n\t\tgorethink.DB(r.pfsDbName).Table(\"Commits\").Count(),\n\t\t\"ArchivedCommits\",\n\t\tgorethink.DB(r.pfsDbName).Table(\"Commits\").Filter(\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"Archived\": true,\n\t\t\t},\n\t\t).Count(),\n\t\t\"CancelledCommits\",\n\t\tgorethink.DB(r.pfsDbName).Table(\"Commits\").Filter(\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"Cancelled\": true,\n\t\t\t},\n\t\t).Count(),\n\t\t\"Files\",\n\t\tgorethink.DB(r.pfsDbName).Table(\"Diffs\").Group(\"Path\").Ungroup().Count(),\n\t\t\"Jobs\",\n\t\tgorethink.DB(r.ppsDbName).Table(\"JobInfos\").Count(),\n\t\t\"Pipelines\",\n\t\tgorethink.DB(r.ppsDbName).Table(\"PipelineInfos\").Count(),\n\t).Run(r.dbClient)\n\tif err != nil {\n\t\tlion.Errorf(\"Error Fetching Metrics:%+v\", err)\n\t}\n\tcursor.One(&metrics)\n}\n\nfunc (r *Reporter) reportClusterMetrics() {\n\tfor {\n\t\ttime.Sleep(reportingInterval)\n\t\tmetrics := &Metrics{}\n\t\tr.dbMetrics(metrics)\n\t\texternalMetrics(r.kubeClient, metrics)\n\t\tmetrics.ClusterID = r.clusterID\n\t\tmetrics.PodID = uuid.NewWithoutDashes()\n\t\tmetrics.Version = version.PrettyPrintVersion(version.Version)\n\t\treportClusterMetricsToSegment(r.segmentClient, metrics)\n\t}\n}\n<commit_msg>Report 0 value for started actions<commit_after>package metrics\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/config\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/uuid\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/version\"\n\tdb \"github.com\/pachyderm\/pachyderm\/src\/server\/pfs\/db\"\n\n\t\"github.com\/dancannon\/gorethink\"\n\t\"github.com\/segmentio\/analytics-go\"\n\t\"go.pedge.io\/lion\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\/metadata\"\n\tkube \"k8s.io\/kubernetes\/pkg\/client\/unversioned\"\n)\n\n\/\/Reporter is used to submit user & cluster metrics to segment\ntype Reporter struct {\n\tsegmentClient *analytics.Client\n\tclusterID string\n\tkubeClient *kube.Client\n\tdbClient *gorethink.Session\n\tpfsDbName string\n\tppsDbName string\n}\n\n\/\/ NewReporter creates a new reporter and kicks off the loop to report cluster\n\/\/ metrics\nfunc NewReporter(clusterID string, kubeClient *kube.Client, address string, pfsDbName string, ppsDbName string) *Reporter {\n\n\tdbClient, err := db.DbConnect(address)\n\tif err != nil {\n\t\tlion.Errorf(\"error connected to DB when reporting metrics: %v\\n\", err)\n\t\treturn nil\n\t}\n\treporter := &Reporter{\n\t\tsegmentClient: newPersistentClient(),\n\t\tclusterID: clusterID,\n\t\tkubeClient: kubeClient,\n\t\tdbClient: dbClient,\n\t\tpfsDbName: pfsDbName,\n\t\tppsDbName: ppsDbName,\n\t}\n\tgo reporter.reportClusterMetrics()\n\treturn reporter\n}\n\n\/\/ReportUserAction pushes the action into a queue for reporting,\n\/\/ and reports the start, finish, and error conditions\nfunc ReportUserAction(ctx context.Context, r *Reporter, action string) func(time.Time, error) {\n\tif r == nil {\n\t\treturn func(time.Time, error) {}\n\t}\n\t\/\/ If we report nil, segment sees it, but mixpanel omits the field\n\tr.reportUserAction(ctx, fmt.Sprintf(\"%vStarted\", action), 0)\n\treturn func(start time.Time, err error) {\n\t\tif err == nil {\n\t\t\tr.reportUserAction(ctx, fmt.Sprintf(\"%vFinished\", action), time.Since(start).Seconds())\n\t\t} else {\n\t\t\tr.reportUserAction(ctx, fmt.Sprintf(\"%vErrored\", action), err.Error())\n\t\t}\n\t}\n}\n\nfunc getKeyFromMD(md metadata.MD, key string) (string, error) {\n\tif md[key] != nil && len(md[key]) > 0 {\n\t\treturn md[key][0], nil\n\t}\n\treturn \"\", fmt.Errorf(\"error extracting userid from metadata. userid is empty\\n\")\n}\n\nfunc (r *Reporter) reportUserAction(ctx context.Context, action string, value interface{}) {\n\tmd, ok := metadata.FromContext(ctx)\n\tif ok {\n\t\t\/\/ metadata API downcases all the key names\n\t\tuserID, err := getKeyFromMD(md, \"userid\")\n\t\tif err != nil {\n\t\t\tlion.Errorln(err)\n\t\t\treturn\n\t\t}\n\t\tprefix, err := getKeyFromMD(md, \"prefix\")\n\t\tif err != nil {\n\t\t\tlion.Errorln(err)\n\t\t\treturn\n\t\t}\n\t\treportUserMetricsToSegment(\n\t\t\tr.segmentClient,\n\t\t\tuserID,\n\t\t\tprefix,\n\t\t\taction,\n\t\t\tvalue,\n\t\t\tr.clusterID,\n\t\t)\n\t} else {\n\t\tlion.Errorf(\"Error extracting userid metadata from context: %v\\n\", ctx)\n\t}\n}\n\n\/\/ ReportAndFlushUserAction immediately reports the metric\n\/\/ It is used in the few places we need to report metrics from the client.\n\/\/ It handles reporting the start, finish, and error conditions of the action\nfunc ReportAndFlushUserAction(action string) func(time.Time, error) {\n\t\/\/ If we report nil, segment sees it, but mixpanel omits the field\n\treportAndFlushUserAction(fmt.Sprintf(\"%vStarted\", action), 0)\n\treturn func(start time.Time, err error) {\n\t\tif err == nil {\n\t\t\treportAndFlushUserAction(fmt.Sprintf(\"%vFinished\", action), time.Since(start).Seconds())\n\t\t} else {\n\t\t\treportAndFlushUserAction(fmt.Sprintf(\"%vErrored\", action), err.Error())\n\t\t}\n\t}\n}\n\nfunc reportAndFlushUserAction(action string, value interface{}) {\n\tclient := newSegmentClient()\n\tdefer client.Close()\n\tcfg, err := config.Read()\n\tif err != nil {\n\t\tlion.Errorf(\"Error reading userid from ~\/.pachyderm\/config: %v\\n\", err)\n\t\t\/\/ metrics errors are non fatal\n\t\treturn\n\t}\n\treportUserMetricsToSegment(client, cfg.UserID, \"user\", action, value, \"\")\n}\n\nfunc (r *Reporter) dbMetrics(metrics *Metrics) {\n\tcursor, err := gorethink.Object(\n\t\t\"Repos\",\n\t\tgorethink.DB(r.pfsDbName).Table(\"Repos\").Count(),\n\t\t\"Commits\",\n\t\tgorethink.DB(r.pfsDbName).Table(\"Commits\").Count(),\n\t\t\"ArchivedCommits\",\n\t\tgorethink.DB(r.pfsDbName).Table(\"Commits\").Filter(\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"Archived\": true,\n\t\t\t},\n\t\t).Count(),\n\t\t\"CancelledCommits\",\n\t\tgorethink.DB(r.pfsDbName).Table(\"Commits\").Filter(\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"Cancelled\": true,\n\t\t\t},\n\t\t).Count(),\n\t\t\"Files\",\n\t\tgorethink.DB(r.pfsDbName).Table(\"Diffs\").Group(\"Path\").Ungroup().Count(),\n\t\t\"Jobs\",\n\t\tgorethink.DB(r.ppsDbName).Table(\"JobInfos\").Count(),\n\t\t\"Pipelines\",\n\t\tgorethink.DB(r.ppsDbName).Table(\"PipelineInfos\").Count(),\n\t).Run(r.dbClient)\n\tif err != nil {\n\t\tlion.Errorf(\"Error Fetching Metrics:%+v\", err)\n\t}\n\tcursor.One(&metrics)\n}\n\nfunc (r *Reporter) reportClusterMetrics() {\n\tfor {\n\t\ttime.Sleep(reportingInterval)\n\t\tmetrics := &Metrics{}\n\t\tr.dbMetrics(metrics)\n\t\texternalMetrics(r.kubeClient, metrics)\n\t\tmetrics.ClusterID = r.clusterID\n\t\tmetrics.PodID = uuid.NewWithoutDashes()\n\t\tmetrics.Version = version.PrettyPrintVersion(version.Version)\n\t\treportClusterMetricsToSegment(r.segmentClient, metrics)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package node\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/Dataman-Cloud\/swan\/src\/agent\"\n\t\"github.com\/Dataman-Cloud\/swan\/src\/apiserver\"\n\t\"github.com\/Dataman-Cloud\/swan\/src\/config\"\n\t\"github.com\/Dataman-Cloud\/swan\/src\/event\"\n\t\"github.com\/Dataman-Cloud\/swan\/src\/manager\"\n\t\"github.com\/Dataman-Cloud\/swan\/src\/swancontext\"\n\t\"github.com\/Dataman-Cloud\/swan\/src\/types\"\n\t\"github.com\/Dataman-Cloud\/swan\/src\/utils\/httpclient\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/coreos\/etcd\/pkg\/fileutil\"\n\t\"github.com\/twinj\/uuid\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tNodeIDFileName = \"\/ID\"\n\tJoinRetryInterval = 5\n)\n\ntype Node struct {\n\tID string\n\tagent *agent.Agent \/\/ hold reference to agent, take function when in agent mode\n\tmanager *manager.Manager \/\/ hold a instance of manager, make logic taking place\n\tctx context.Context\n\tjoinRetryInterval time.Duration\n\tRaftID uint64\n\tstopC chan struct{}\n}\n\nfunc NewNode(config config.SwanConfig) (*Node, error) {\n\tnodeID, err := loadOrCreateNodeID(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ init swanconfig instance\n\tconfig.NodeID = nodeID\n\t_ = swancontext.NewSwanContext(config, event.New())\n\n\tif !swancontext.IsManager() && !swancontext.IsAgent() {\n\t\treturn nil, errors.New(\"node must be started with at least one role in [manager,agent]\")\n\t}\n\n\tnode := &Node{\n\t\tID: nodeID,\n\t\tjoinRetryInterval: time.Second * JoinRetryInterval,\n\t\tstopC: make(chan struct{}, 1),\n\t}\n\n\terr = os.MkdirAll(config.DataDir+\"\/\"+nodeID, 0644)\n\tif err != nil {\n\t\tlogrus.Errorf(\"os.MkdirAll got error: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tdb, err := bolt.Open(config.DataDir+\"\/\"+nodeID+\"\/swan.db\", 0644, nil)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Init bolt store failed:%s\", err)\n\t\treturn nil, err\n\t}\n\n\traftID, err := loadOrCreateRaftID(db)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Init raft ID failed:%s\", err)\n\t\treturn nil, err\n\t}\n\tnode.RaftID = raftID\n\n\tif swancontext.IsManager() {\n\t\tm, err := manager.New(db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnode.manager = m\n\t}\n\n\tif swancontext.IsAgent() {\n\t\ta, err := agent.New()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnode.agent = a\n\t}\n\n\tnodeApi := &NodeApi{node}\n\tapiserver.Install(swancontext.Instance().ApiServer, nodeApi)\n\n\treturn node, nil\n}\n\nfunc loadOrCreateNodeID(swanConfig config.SwanConfig) (string, error) {\n\tnodeIDFile := swanConfig.DataDir + NodeIDFileName\n\tif !fileutil.Exist(nodeIDFile) {\n\t\tos.MkdirAll(swanConfig.DataDir, 0700)\n\n\t\tnodeID := uuid.NewV4().String()\n\t\tidFile, err := os.OpenFile(nodeIDFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif _, err = idFile.WriteString(nodeID); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tlogrus.Infof(\"starting swan node, ID file was not found started with new ID: %s\", nodeID)\n\t\treturn nodeID, nil\n\n\t} else {\n\t\tidFile, err := os.Open(nodeIDFile)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tnodeID, err := ioutil.ReadAll(idFile)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tlogrus.Infof(\"starting swan node, ID file was found started with ID: %s\", string(nodeID))\n\t\treturn string(nodeID), nil\n\t}\n}\n\n\/\/ node start from here\n\/\/ - 1, start manager if needed\n\/\/ - 2, start agent if needed\n\/\/ - 3, agent join to managers if needed\n\/\/ - 4, start the API server, both for agent and client\n\/\/ - 5, enter loop, wait for error or ctx.Done\nfunc (n *Node) Start(ctx context.Context) error {\n\terrChan := make(chan error, 1)\n\n\tswanConfig := swancontext.Instance().Config\n\tnodeInfo := types.Node{\n\t\tID: n.ID,\n\t\tAdvertiseAddr: swanConfig.AdvertiseAddr,\n\t\tListenAddr: swanConfig.ListenAddr,\n\t\tRaftListenAddr: swanConfig.RaftListenAddr,\n\t\tRaftAdvertiseAddr: swanConfig.RaftAdvertiseAddr,\n\t\tRole: types.NodeRole(swanConfig.Mode),\n\t\tRaftID: n.RaftID,\n\t}\n\n\tif swancontext.IsManager() {\n\t\tgo func() {\n\t\t\tif swancontext.IsNewCluster() {\n\t\t\t\terrChan <- n.runManager(ctx, n.RaftID, []types.Node{nodeInfo}, true)\n\t\t\t} else {\n\t\t\t\texistedNodes, err := n.JoinAsManager(nodeInfo)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrChan <- err\n\t\t\t\t}\n\n\t\t\t\terrChan <- n.runManager(ctx, n.RaftID, existedNodes, false)\n\t\t\t}\n\t\t}()\n\t}\n\n\tif swancontext.IsAgent() {\n\t\tgo func() {\n\t\t\terrChan <- n.runAgent(ctx)\n\t\t}()\n\n\t\tgo func() {\n\t\t\terr := n.JoinAsAgent(nodeInfo)\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t}\n\t\t}()\n\t}\n\n\tgo func() {\n\t\terrChan <- swancontext.Instance().ApiServer.Start()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase err := <-errChan:\n\t\t\treturn err\n\t\tcase <-n.stopC:\n\t\t\tn.Stop()\n\t\t\treturn nil\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\t}\n\t}\n}\n\nfunc (n *Node) runAgent(ctx context.Context) error {\n\tagentCtx, cancel := context.WithCancel(ctx)\n\tn.agent.CancelFunc = cancel\n\treturn n.agent.Start(agentCtx)\n}\n\nfunc (n *Node) runManager(ctx context.Context, raftID uint64, peers []types.Node, isNewCluster bool) error {\n\tmanagerCtx, cancel := context.WithCancel(ctx)\n\tn.manager.CancelFunc = cancel\n\treturn n.manager.Start(managerCtx, raftID, peers, isNewCluster)\n}\n\n\/\/ node stop\nfunc (n *Node) Stop() {\n\tn.agent.Stop()\n\tn.manager.Stop()\n}\n\nfunc (n *Node) JoinAsAgent(nodeInfo types.Node) error {\n\tswanConfig := swancontext.Instance().Config\n\tif len(swanConfig.JoinAddrs) == 0 {\n\t\treturn errors.New(\"start agent failed. Error: joinAddrs must be no empty\")\n\t}\n\n\tfor _, managerAddr := range swanConfig.JoinAddrs {\n\t\tregisterAddr := \"http:\/\/\" + managerAddr + config.API_PREFIX + \"\/nodes\"\n\t\t_, err := httpclient.NewDefaultClient().POST(context.TODO(), registerAddr, nil, nodeInfo, nil)\n\t\tif err != nil {\n\t\t\tlogrus.Infof(\"register to %s got error: %s, try again after %d seconds\", registerAddr, err.Error(), JoinRetryInterval)\n\t\t}\n\n\t\tif err == nil {\n\t\t\tlogrus.Infof(\"agent register to manager success with managerAddr: %s\", managerAddr)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\ttime.Sleep(n.joinRetryInterval)\n\tn.JoinAsAgent(nodeInfo)\n\treturn nil\n}\n\nfunc (n *Node) JoinAsManager(nodeInfo types.Node) ([]types.Node, error) {\n\tswanConfig := swancontext.Instance().Config\n\tif len(swanConfig.JoinAddrs) == 0 {\n\t\treturn nil, errors.New(\"start agent failed. Error: joinAddrs must be no empty\")\n\t}\n\n\tfor _, managerAddr := range swanConfig.JoinAddrs {\n\t\tregisterAddr := \"http:\/\/\" + managerAddr + config.API_PREFIX + \"\/nodes\"\n\t\tresp, err := httpclient.NewDefaultClient().POST(context.TODO(), registerAddr, nil, nodeInfo, nil)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"register to %s got error: %s\", registerAddr, err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tvar nodes []types.Node\n\t\tif err := json.Unmarshal(resp, &nodes); err != nil {\n\t\t\tlogrus.Errorf(\"register to %s got error: %s\", registerAddr, err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tvar managerNodes []types.Node\n\t\tfor _, existedNode := range nodes {\n\t\t\tif existedNode.IsManager() {\n\t\t\t\tmanagerNodes = append(managerNodes, existedNode)\n\t\t\t}\n\t\t}\n\n\t\treturn managerNodes, nil\n\t}\n\n\treturn nil, errors.New(\"add manager failed\")\n}\n\nfunc loadOrCreateRaftID(db *bolt.DB) (uint64, error) {\n\tvar raftID uint64\n\ttx, err := db.Begin(true)\n\tif err != nil {\n\t\treturn raftID, err\n\t}\n\tdefer tx.Commit()\n\n\tvar (\n\t\traftIDBukctName = []byte(\"raftnode\")\n\t\traftIDDataKey = []byte(\"raftid\")\n\t)\n\traftIDBkt := tx.Bucket(raftIDBukctName)\n\tif raftIDBkt == nil {\n\t\traftIDBkt, err = tx.CreateBucketIfNotExists(raftIDBukctName)\n\t\tif err != nil {\n\t\t\treturn raftID, err\n\t\t}\n\n\t\traftID = uint64(rand.Int63()) + 1\n\t\tif err := raftIDBkt.Put(raftIDDataKey, []byte(strconv.FormatUint(raftID, 10))); err != nil {\n\t\t\treturn raftID, err\n\t\t}\n\t\tlogrus.Infof(\"raft ID was not found create a new raftID %x\", raftID)\n\t\treturn raftID, nil\n\t} else {\n\t\traftID_ := raftIDBkt.Get(raftIDDataKey)\n\t\traftID, err = strconv.ParseUint(string(raftID_), 10, 64)\n\t\tif err != nil {\n\t\t\treturn raftID, err\n\t\t}\n\n\t\treturn raftID, nil\n\t}\n}\n<commit_msg>fix bug: panic when remove manager node<commit_after>package node\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/Dataman-Cloud\/swan\/src\/agent\"\n\t\"github.com\/Dataman-Cloud\/swan\/src\/apiserver\"\n\t\"github.com\/Dataman-Cloud\/swan\/src\/config\"\n\t\"github.com\/Dataman-Cloud\/swan\/src\/event\"\n\t\"github.com\/Dataman-Cloud\/swan\/src\/manager\"\n\t\"github.com\/Dataman-Cloud\/swan\/src\/swancontext\"\n\t\"github.com\/Dataman-Cloud\/swan\/src\/types\"\n\t\"github.com\/Dataman-Cloud\/swan\/src\/utils\/httpclient\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/coreos\/etcd\/pkg\/fileutil\"\n\t\"github.com\/twinj\/uuid\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tNodeIDFileName = \"\/ID\"\n\tJoinRetryInterval = 5\n)\n\ntype Node struct {\n\tID string\n\tagent *agent.Agent \/\/ hold reference to agent, take function when in agent mode\n\tmanager *manager.Manager \/\/ hold a instance of manager, make logic taking place\n\tctx context.Context\n\tjoinRetryInterval time.Duration\n\tRaftID uint64\n\tstopC chan struct{}\n}\n\nfunc NewNode(config config.SwanConfig) (*Node, error) {\n\tnodeID, err := loadOrCreateNodeID(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ init swanconfig instance\n\tconfig.NodeID = nodeID\n\t_ = swancontext.NewSwanContext(config, event.New())\n\n\tif !swancontext.IsManager() && !swancontext.IsAgent() {\n\t\treturn nil, errors.New(\"node must be started with at least one role in [manager,agent]\")\n\t}\n\n\tnode := &Node{\n\t\tID: nodeID,\n\t\tjoinRetryInterval: time.Second * JoinRetryInterval,\n\t\tstopC: make(chan struct{}, 1),\n\t}\n\n\terr = os.MkdirAll(config.DataDir+\"\/\"+nodeID, 0644)\n\tif err != nil {\n\t\tlogrus.Errorf(\"os.MkdirAll got error: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tdb, err := bolt.Open(config.DataDir+\"\/\"+nodeID+\"\/swan.db\", 0644, nil)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Init bolt store failed:%s\", err)\n\t\treturn nil, err\n\t}\n\n\traftID, err := loadOrCreateRaftID(db)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Init raft ID failed:%s\", err)\n\t\treturn nil, err\n\t}\n\tnode.RaftID = raftID\n\n\tif swancontext.IsManager() {\n\t\tm, err := manager.New(db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnode.manager = m\n\t}\n\n\tif swancontext.IsAgent() {\n\t\ta, err := agent.New()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnode.agent = a\n\t}\n\n\tnodeApi := &NodeApi{node}\n\tapiserver.Install(swancontext.Instance().ApiServer, nodeApi)\n\n\treturn node, nil\n}\n\nfunc loadOrCreateNodeID(swanConfig config.SwanConfig) (string, error) {\n\tnodeIDFile := swanConfig.DataDir + NodeIDFileName\n\tif !fileutil.Exist(nodeIDFile) {\n\t\tos.MkdirAll(swanConfig.DataDir, 0700)\n\n\t\tnodeID := uuid.NewV4().String()\n\t\tidFile, err := os.OpenFile(nodeIDFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif _, err = idFile.WriteString(nodeID); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tlogrus.Infof(\"starting swan node, ID file was not found started with new ID: %s\", nodeID)\n\t\treturn nodeID, nil\n\n\t} else {\n\t\tidFile, err := os.Open(nodeIDFile)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tnodeID, err := ioutil.ReadAll(idFile)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tlogrus.Infof(\"starting swan node, ID file was found started with ID: %s\", string(nodeID))\n\t\treturn string(nodeID), nil\n\t}\n}\n\n\/\/ node start from here\n\/\/ - 1, start manager if needed\n\/\/ - 2, start agent if needed\n\/\/ - 3, agent join to managers if needed\n\/\/ - 4, start the API server, both for agent and client\n\/\/ - 5, enter loop, wait for error or ctx.Done\nfunc (n *Node) Start(ctx context.Context) error {\n\terrChan := make(chan error, 1)\n\n\tswanConfig := swancontext.Instance().Config\n\tnodeInfo := types.Node{\n\t\tID: n.ID,\n\t\tAdvertiseAddr: swanConfig.AdvertiseAddr,\n\t\tListenAddr: swanConfig.ListenAddr,\n\t\tRaftListenAddr: swanConfig.RaftListenAddr,\n\t\tRaftAdvertiseAddr: swanConfig.RaftAdvertiseAddr,\n\t\tRole: types.NodeRole(swanConfig.Mode),\n\t\tRaftID: n.RaftID,\n\t}\n\n\tif swancontext.IsManager() {\n\t\tgo func() {\n\t\t\tif swancontext.IsNewCluster() {\n\t\t\t\terrChan <- n.runManager(ctx, n.RaftID, []types.Node{nodeInfo}, true)\n\t\t\t} else {\n\t\t\t\texistedNodes, err := n.JoinAsManager(nodeInfo)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrChan <- err\n\t\t\t\t}\n\n\t\t\t\terrChan <- n.runManager(ctx, n.RaftID, existedNodes, false)\n\t\t\t}\n\t\t}()\n\t}\n\n\tif swancontext.IsAgent() {\n\t\tgo func() {\n\t\t\terrChan <- n.runAgent(ctx)\n\t\t}()\n\n\t\tgo func() {\n\t\t\terr := n.JoinAsAgent(nodeInfo)\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t}\n\t\t}()\n\t}\n\n\tgo func() {\n\t\terrChan <- swancontext.Instance().ApiServer.Start()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase err := <-errChan:\n\t\t\treturn err\n\t\tcase <-n.stopC:\n\t\t\tn.Stop()\n\t\t\treturn nil\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\t}\n\t}\n}\n\nfunc (n *Node) runAgent(ctx context.Context) error {\n\tagentCtx, cancel := context.WithCancel(ctx)\n\tn.agent.CancelFunc = cancel\n\treturn n.agent.Start(agentCtx)\n}\n\nfunc (n *Node) runManager(ctx context.Context, raftID uint64, peers []types.Node, isNewCluster bool) error {\n\tmanagerCtx, cancel := context.WithCancel(ctx)\n\tn.manager.CancelFunc = cancel\n\treturn n.manager.Start(managerCtx, raftID, peers, isNewCluster)\n}\n\n\/\/ node stop\nfunc (n *Node) Stop() {\n\tif swancontext.IsAgent() {\n\t\tn.agent.Stop()\n\t}\n\n\tif swancontext.IsManager() {\n\t\tn.manager.Stop()\n\t}\n}\n\nfunc (n *Node) JoinAsAgent(nodeInfo types.Node) error {\n\tswanConfig := swancontext.Instance().Config\n\tif len(swanConfig.JoinAddrs) == 0 {\n\t\treturn errors.New(\"start agent failed. Error: joinAddrs must be no empty\")\n\t}\n\n\tfor _, managerAddr := range swanConfig.JoinAddrs {\n\t\tregisterAddr := \"http:\/\/\" + managerAddr + config.API_PREFIX + \"\/nodes\"\n\t\t_, err := httpclient.NewDefaultClient().POST(context.TODO(), registerAddr, nil, nodeInfo, nil)\n\t\tif err != nil {\n\t\t\tlogrus.Infof(\"register to %s got error: %s, try again after %d seconds\", registerAddr, err.Error(), JoinRetryInterval)\n\t\t}\n\n\t\tif err == nil {\n\t\t\tlogrus.Infof(\"agent register to manager success with managerAddr: %s\", managerAddr)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\ttime.Sleep(n.joinRetryInterval)\n\tn.JoinAsAgent(nodeInfo)\n\treturn nil\n}\n\nfunc (n *Node) JoinAsManager(nodeInfo types.Node) ([]types.Node, error) {\n\tswanConfig := swancontext.Instance().Config\n\tif len(swanConfig.JoinAddrs) == 0 {\n\t\treturn nil, errors.New(\"start agent failed. Error: joinAddrs must be no empty\")\n\t}\n\n\tfor _, managerAddr := range swanConfig.JoinAddrs {\n\t\tregisterAddr := \"http:\/\/\" + managerAddr + config.API_PREFIX + \"\/nodes\"\n\t\tresp, err := httpclient.NewDefaultClient().POST(context.TODO(), registerAddr, nil, nodeInfo, nil)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"register to %s got error: %s\", registerAddr, err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tvar nodes []types.Node\n\t\tif err := json.Unmarshal(resp, &nodes); err != nil {\n\t\t\tlogrus.Errorf(\"register to %s got error: %s\", registerAddr, err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tvar managerNodes []types.Node\n\t\tfor _, existedNode := range nodes {\n\t\t\tif existedNode.IsManager() {\n\t\t\t\tmanagerNodes = append(managerNodes, existedNode)\n\t\t\t}\n\t\t}\n\n\t\treturn managerNodes, nil\n\t}\n\n\treturn nil, errors.New(\"add manager failed\")\n}\n\nfunc loadOrCreateRaftID(db *bolt.DB) (uint64, error) {\n\tvar raftID uint64\n\ttx, err := db.Begin(true)\n\tif err != nil {\n\t\treturn raftID, err\n\t}\n\tdefer tx.Commit()\n\n\tvar (\n\t\traftIDBukctName = []byte(\"raftnode\")\n\t\traftIDDataKey = []byte(\"raftid\")\n\t)\n\traftIDBkt := tx.Bucket(raftIDBukctName)\n\tif raftIDBkt == nil {\n\t\traftIDBkt, err = tx.CreateBucketIfNotExists(raftIDBukctName)\n\t\tif err != nil {\n\t\t\treturn raftID, err\n\t\t}\n\n\t\traftID = uint64(rand.Int63()) + 1\n\t\tif err := raftIDBkt.Put(raftIDDataKey, []byte(strconv.FormatUint(raftID, 10))); err != nil {\n\t\t\treturn raftID, err\n\t\t}\n\t\tlogrus.Infof(\"raft ID was not found create a new raftID %x\", raftID)\n\t\treturn raftID, nil\n\t} else {\n\t\traftID_ := raftIDBkt.Get(raftIDDataKey)\n\t\traftID, err = strconv.ParseUint(string(raftID_), 10, 64)\n\t\tif err != nil {\n\t\t\treturn raftID, err\n\t\t}\n\n\t\treturn raftID, nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package metadata\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\n\/\/ TODO: rename to SchemaMan Type?\ntype DatamanType string\n\n\/\/ DatamanType is a method for describing the golang type in schema\n\/\/ This allows us to treat everything as interfaces{} in most of the code yet\n\/\/ still be in a strongly typed language\n\nconst (\n\tDocument DatamanType = \"document\"\n\tString = \"string\" \/\/ max len 4096\n\tText = \"text\"\n\t\/\/ We should support converting anything to an int that doesn't lose data\n\tInt = \"int\"\n\t\/\/ TODO: int64\n\t\/\/ TODO: uint\n\t\/\/ TODO: uint64\n\tBool = \"bool\"\n\t\/\/ TODO: actually implement\n\tDateTime = \"datetime\"\n)\n\n\/\/ TODO: have this register the type? Right now this assumes this is in-sync with field_type_internal.go (which is bad to do)\nfunc (f DatamanType) ToFieldType() *FieldType {\n\treturn &FieldType{\n\t\tName: \"_\" + string(f),\n\t\tDatamanType: f,\n\t}\n}\n\n\/\/ Normalize the given interface into what we want\/expect\nfunc (f DatamanType) Normalize(val interface{}) (interface{}, error) {\n\tswitch f {\n\tcase Document:\n\t\tswitch typedVal := val.(type) {\n\t\tcase nil:\n\t\t\treturn nil, nil\n\t\tcase map[string]interface{}:\n\t\t\treturn typedVal, nil\n\t\tcase string:\n\t\t\tmapVal := make(map[string]interface{})\n\t\t\tif err := json.Unmarshal([]byte(typedVal), mapVal); err == nil {\n\t\t\t\treturn mapVal, nil\n\t\t\t} else {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Not a document\")\n\t\t}\n\n\tcase String:\n\t\tswitch typedVal := val.(type) {\n\t\tcase nil:\n\t\t\treturn nil, nil\n\t\tcase string:\n\t\t\t\/\/ TODO: default, code this out somewhere\n\t\t\tif len(typedVal) > 4096 {\n\t\t\t\treturn nil, fmt.Errorf(\"String too long!\")\n\t\t\t}\n\t\t\treturn typedVal, nil\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Not a string\")\n\t\t}\n\n\tcase Text:\n\t\tswitch typedVal := val.(type) {\n\t\tcase nil:\n\t\t\treturn nil, nil\n\t\tcase string:\n\t\t\treturn typedVal, nil\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Not text\")\n\t\t}\n\tcase Int:\n\t\tswitch typedVal := val.(type) {\n\t\tcase nil:\n\t\t\treturn nil, nil\n\t\t\/\/ TODO: remove? Or error if we would lose precision\n\t\tcase int32:\n\t\t\treturn int(typedVal), nil\n\t\tcase int64:\n\t\t\treturn int(typedVal), nil\n\t\tcase int:\n\t\t\treturn typedVal, nil\n\t\tcase float64:\n\t\t\treturn int(typedVal), nil\n\t\tcase string:\n\t\t\tif typedVal == \"\" {\n\t\t\t\treturn nil, nil\n\t\t\t} else {\n\t\t\t\treturn strconv.ParseInt(typedVal, 10, 64)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Unknown Int type: %s\", reflect.TypeOf(val))\n\t\t}\n\tcase Bool:\n\t\tswitch typedVal := val.(type) {\n\t\tcase nil:\n\t\t\treturn nil, nil\n\t\tcase bool:\n\t\t\treturn typedVal, nil\n\t\tcase string:\n\t\t\treturn strconv.ParseBool(typedVal)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Not a bool\")\n\t\t}\n\t\/\/ TODO: implement\n\tcase DateTime:\n\t\treturn nil, fmt.Errorf(\"DateTime currently unimplemented\")\n\t}\n\treturn nil, fmt.Errorf(\"Unknown type \\\"%s\\\" defined\", f)\n}\n\n\/\/ TODO: have method which will reflect type to determine dataman type\n\/\/ then we can have the datasources just call the method with the largest thing\n\/\/ they can store in a given field type to determine the closest dataman_type\n<commit_msg>Add TODO<commit_after>package metadata\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\n\/\/ TODO: rename to SchemaMan Type?\ntype DatamanType string\n\n\/\/ DatamanType is a method for describing the golang type in schema\n\/\/ This allows us to treat everything as interfaces{} in most of the code yet\n\/\/ still be in a strongly typed language\n\nconst (\n\tDocument DatamanType = \"document\"\n\tString = \"string\" \/\/ max len 4096\n\tText = \"text\"\n\t\/\/ We should support converting anything to an int that doesn't lose data\n\tInt = \"int\"\n\t\/\/ TODO: int64\n\t\/\/ TODO: uint\n\t\/\/ TODO: uint64\n\tBool = \"bool\"\n\t\/\/ TODO: actually implement\n\tDateTime = \"datetime\"\n)\n\n\/\/ TODO: have this register the type? Right now this assumes this is in-sync with field_type_internal.go (which is bad to do)\nfunc (f DatamanType) ToFieldType() *FieldType {\n\treturn &FieldType{\n\t\tName: \"_\" + string(f),\n\t\tDatamanType: f,\n\t}\n}\n\n\/\/ Normalize the given interface into what we want\/expect\nfunc (f DatamanType) Normalize(val interface{}) (interface{}, error) {\n\tswitch f {\n\tcase Document:\n\t\tswitch typedVal := val.(type) {\n\t\tcase nil:\n\t\t\treturn nil, nil\n\t\tcase map[string]interface{}:\n\t\t\treturn typedVal, nil\n\t\t\/\/ TODO: put this behind a switch to enforce strictness\n\t\tcase string:\n\t\t\tmapVal := make(map[string]interface{})\n\t\t\tif err := json.Unmarshal([]byte(typedVal), mapVal); err == nil {\n\t\t\t\treturn mapVal, nil\n\t\t\t} else {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Not a document\")\n\t\t}\n\n\tcase String:\n\t\tswitch typedVal := val.(type) {\n\t\tcase nil:\n\t\t\treturn nil, nil\n\t\tcase string:\n\t\t\t\/\/ TODO: default, code this out somewhere\n\t\t\tif len(typedVal) > 4096 {\n\t\t\t\treturn nil, fmt.Errorf(\"String too long!\")\n\t\t\t}\n\t\t\treturn typedVal, nil\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Not a string\")\n\t\t}\n\n\tcase Text:\n\t\tswitch typedVal := val.(type) {\n\t\tcase nil:\n\t\t\treturn nil, nil\n\t\tcase string:\n\t\t\treturn typedVal, nil\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Not text\")\n\t\t}\n\tcase Int:\n\t\tswitch typedVal := val.(type) {\n\t\tcase nil:\n\t\t\treturn nil, nil\n\t\t\/\/ TODO: remove? Or error if we would lose precision\n\t\tcase int32:\n\t\t\treturn int(typedVal), nil\n\t\tcase int64:\n\t\t\treturn int(typedVal), nil\n\t\tcase int:\n\t\t\treturn typedVal, nil\n\t\tcase float64:\n\t\t\treturn int(typedVal), nil\n\t\tcase string:\n\t\t\tif typedVal == \"\" {\n\t\t\t\treturn nil, nil\n\t\t\t} else {\n\t\t\t\treturn strconv.ParseInt(typedVal, 10, 64)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Unknown Int type: %s\", reflect.TypeOf(val))\n\t\t}\n\tcase Bool:\n\t\tswitch typedVal := val.(type) {\n\t\tcase nil:\n\t\t\treturn nil, nil\n\t\tcase bool:\n\t\t\treturn typedVal, nil\n\t\tcase string:\n\t\t\treturn strconv.ParseBool(typedVal)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Not a bool\")\n\t\t}\n\t\/\/ TODO: implement\n\tcase DateTime:\n\t\treturn nil, fmt.Errorf(\"DateTime currently unimplemented\")\n\t}\n\treturn nil, fmt.Errorf(\"Unknown type \\\"%s\\\" defined\", f)\n}\n\n\/\/ TODO: have method which will reflect type to determine dataman type\n\/\/ then we can have the datasources just call the method with the largest thing\n\/\/ they can store in a given field type to determine the closest dataman_type\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2015 The Syncthing Authors.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at https:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage sync\n\nimport (\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/syncthing\/syncthing\/lib\/logger\"\n)\n\nconst (\n\tlogThreshold = 100 * time.Millisecond\n\tshortWait = 5 * time.Millisecond\n\tlongWait = 125 * time.Millisecond\n)\n\nfunc TestTypes(t *testing.T) {\n\tdebug = false\n\tl.SetDebug(\"sync\", false)\n\n\tif _, ok := NewMutex().(*sync.Mutex); !ok {\n\t\tt.Error(\"Wrong type\")\n\t}\n\n\tif _, ok := NewRWMutex().(*sync.RWMutex); !ok {\n\t\tt.Error(\"Wrong type\")\n\t}\n\n\tif _, ok := NewWaitGroup().(*sync.WaitGroup); !ok {\n\t\tt.Error(\"Wrong type\")\n\t}\n\n\tdebug = true\n\tl.SetDebug(\"sync\", true)\n\n\tif _, ok := NewMutex().(*loggedMutex); !ok {\n\t\tt.Error(\"Wrong type\")\n\t}\n\n\tif _, ok := NewRWMutex().(*loggedRWMutex); !ok {\n\t\tt.Error(\"Wrong type\")\n\t}\n\n\tif _, ok := NewWaitGroup().(*loggedWaitGroup); !ok {\n\t\tt.Error(\"Wrong type\")\n\t}\n\n\tdebug = false\n\tl.SetDebug(\"sync\", false)\n}\n\nfunc TestMutex(t *testing.T) {\n\toldClock := defaultClock\n\tclock := newTestClock()\n\tdefaultClock = clock\n\tdefer func() { defaultClock = oldClock }()\n\n\tdebug = true\n\tl.SetDebug(\"sync\", true)\n\tthreshold = logThreshold\n\n\tmsgmut := sync.Mutex{}\n\tvar messages []string\n\n\tl.AddHandler(logger.LevelDebug, func(_ logger.LogLevel, message string) {\n\t\tmsgmut.Lock()\n\t\tmessages = append(messages, message)\n\t\tmsgmut.Unlock()\n\t})\n\n\tmut := NewMutex()\n\tmut.Lock()\n\tclock.wind(shortWait)\n\tmut.Unlock()\n\n\tif len(messages) > 0 {\n\t\tt.Errorf(\"Unexpected message count\")\n\t}\n\n\tmut.Lock()\n\tclock.wind(longWait)\n\tmut.Unlock()\n\n\tif len(messages) != 1 {\n\t\tt.Errorf(\"Unexpected message count\")\n\t}\n\n\tdebug = false\n\tl.SetDebug(\"sync\", false)\n}\n\nfunc TestRWMutex(t *testing.T) {\n\toldClock := defaultClock\n\tclock := newTestClock()\n\tdefaultClock = clock\n\tdefer func() { defaultClock = oldClock }()\n\n\tdebug = true\n\tl.SetDebug(\"sync\", true)\n\tthreshold = logThreshold\n\n\tmsgmut := sync.Mutex{}\n\tvar messages []string\n\n\tl.AddHandler(logger.LevelDebug, func(_ logger.LogLevel, message string) {\n\t\tmsgmut.Lock()\n\t\tmessages = append(messages, message)\n\t\tmsgmut.Unlock()\n\t})\n\n\tmut := NewRWMutex()\n\tmut.Lock()\n\tclock.wind(shortWait)\n\tmut.Unlock()\n\n\tif len(messages) > 0 {\n\t\tt.Errorf(\"Unexpected message count\")\n\t}\n\n\tmut.Lock()\n\tclock.wind(longWait)\n\tmut.Unlock()\n\n\tif len(messages) != 1 {\n\t\tt.Errorf(\"Unexpected message count\")\n\t}\n\n\t\/\/ Testing rlocker logging\n\twait := make(chan struct{})\n\tlocking := make(chan struct{})\n\n\tmut.RLock()\n\tgo func() {\n\t\tclose(locking)\n\t\tmut.Lock()\n\t\tclose(wait)\n\t}()\n\n\t<-locking\n\tclock.wind(longWait)\n\tmut.RUnlock()\n\t<-wait\n\n\tmut.Unlock()\n\n\tif len(messages) != 2 {\n\t\tt.Errorf(\"Unexpected message count\")\n\t}\n\tif !strings.Contains(messages[1], \"RUnlockers while locking:\\nat sync\") || !strings.Contains(messages[1], \"sync_test.go:\") {\n\t\tt.Error(\"Unexpected message\")\n\t}\n\n\t\/\/ Testing multiple rlockers\n\tmut.RLock()\n\tmut.RLock()\n\tmut.RLock()\n\t_ = 1 \/\/ skip empty critical section check\n\tmut.RUnlock()\n\tmut.RUnlock()\n\tmut.RUnlock()\n\n\tdebug = false\n\tl.SetDebug(\"sync\", false)\n}\n\nfunc TestWaitGroup(t *testing.T) {\n\toldClock := defaultClock\n\tclock := newTestClock()\n\tdefaultClock = clock\n\tdefer func() { defaultClock = oldClock }()\n\n\tdebug = true\n\tl.SetDebug(\"sync\", true)\n\tthreshold = logThreshold\n\n\tmsgmut := sync.Mutex{}\n\tvar messages []string\n\n\tl.AddHandler(logger.LevelDebug, func(_ logger.LogLevel, message string) {\n\t\tmsgmut.Lock()\n\t\tmessages = append(messages, message)\n\t\tmsgmut.Unlock()\n\t})\n\n\twg := NewWaitGroup()\n\twg.Add(1)\n\twaiting := make(chan struct{})\n\n\tgo func() {\n\t\t<-waiting\n\t\tclock.wind(shortWait)\n\t\twg.Done()\n\t}()\n\n\tclose(waiting)\n\twg.Wait()\n\n\tif len(messages) > 0 {\n\t\tt.Errorf(\"Unexpected message count\")\n\t}\n\n\twg = NewWaitGroup()\n\twaiting = make(chan struct{})\n\n\twg.Add(1)\n\tgo func() {\n\t\t<-waiting\n\t\tclock.wind(longWait)\n\t\twg.Done()\n\t}()\n\n\tclose(waiting)\n\twg.Wait()\n\n\tif len(messages) != 1 {\n\t\tt.Errorf(\"Unexpected message count\")\n\t}\n\n\tdebug = false\n\tl.SetDebug(\"sync\", false)\n}\n\nfunc TestTimeoutCond(t *testing.T) {\n\t\/\/ WARNING this test relies heavily on threads not being stalled at particular points.\n\t\/\/ As such, it's pretty unstable on the build server. It has been left in as it still\n\t\/\/ exercises the deadlock detector, and one of the two things it tests is still functional.\n\t\/\/ See the comments in runLocks\n\n\tconst (\n\t\t\/\/ Low values to avoid being intrusive in continuous testing. Can be\n\t\t\/\/ increased significantly for stress testing.\n\t\titerations = 100\n\t\troutines = 10\n\n\t\ttimeMult = 2\n\t)\n\n\tc := NewTimeoutCond(NewMutex())\n\n\t\/\/ Start a routine to periodically broadcast on the cond.\n\n\tgo func() {\n\t\td := time.Duration(routines) * timeMult * time.Millisecond \/ 2\n\t\tt.Log(\"Broadcasting every\", d)\n\t\tfor i := 0; i < iterations; i++ {\n\t\t\ttime.Sleep(d)\n\n\t\t\tc.L.Lock()\n\t\t\tc.Broadcast()\n\t\t\tc.L.Unlock()\n\t\t}\n\t}()\n\n\t\/\/ Start several routines that wait on it with different timeouts.\n\n\tvar results [routines][2]int\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < routines; i++ {\n\t\ti := i\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\td := time.Duration(i) * timeMult * time.Millisecond\n\t\t\tt.Logf(\"Routine %d waits for %v\\n\", i, d)\n\t\t\tsucc, fail := runLocks(t, iterations, c, d)\n\t\t\tresults[i][0] = succ\n\t\t\tresults[i][1] = fail\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\twg.Wait()\n\n\t\/\/ Print a table of routine number: successes, failures.\n\n\tfor i, v := range results {\n\t\tt.Logf(\"%4d: %4d %4d\\n\", i, v[0], v[1])\n\t}\n}\n\nfunc runLocks(t *testing.T, iterations int, c *TimeoutCond, d time.Duration) (succ, fail int) {\n\tfor i := 0; i < iterations; i++ {\n\t\tc.L.Lock()\n\n\t\t\/\/ The thread may be stalled, so we can't test the 'succeeded late' case reliably.\n\t\t\/\/ Therefore make sure that we start t0 before starting the timeout, and only test\n\t\t\/\/ the 'failed early' case.\n\n\t\tt0 := time.Now()\n\t\tw := c.SetupWait(d)\n\n\t\tres := w.Wait()\n\t\twaited := time.Since(t0)\n\n\t\t\/\/ Allow 20% slide in either direction, and a five milliseconds of\n\t\t\/\/ scheduling delay... In tweaking these it was clear that things\n\t\t\/\/ worked like the should, so if this becomes a spurious failure\n\t\t\/\/ kind of thing feel free to remove or give significantly more\n\t\t\/\/ slack.\n\n\t\tif !res && waited < d*8\/10 {\n\t\t\tt.Errorf(\"Wait failed early, %v < %v\", waited, d)\n\t\t}\n\t\tif res && waited > d*11\/10+5*time.Millisecond {\n\t\t\t\/\/ Ideally this would be t.Errorf\n\t\t\tt.Logf(\"WARNING: Wait succeeded late, %v > %v. This is probably a thread scheduling issue\", waited, d)\n\t\t}\n\n\t\tw.Stop()\n\n\t\tif res {\n\t\t\tsucc++\n\t\t} else {\n\t\t\tfail++\n\t\t}\n\t\tc.L.Unlock()\n\t}\n\treturn\n}\n\ntype testClock struct {\n\ttime time.Time\n\tmut sync.Mutex\n}\n\nfunc newTestClock() *testClock {\n\treturn &testClock{\n\t\ttime: time.Now(),\n\t}\n}\n\nfunc (t *testClock) Now() time.Time {\n\tt.mut.Lock()\n\tnow := t.time\n\tt.time = t.time.Add(time.Nanosecond)\n\tt.mut.Unlock()\n\treturn now\n}\n\nfunc (t *testClock) wind(d time.Duration) {\n\tt.mut.Lock()\n\tt.time = t.time.Add(d)\n\tt.mut.Unlock()\n}\n<commit_msg>lib\/sync: Cleanly fail instead of panic in tests (#6088)<commit_after>\/\/ Copyright (C) 2015 The Syncthing Authors.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at https:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage sync\n\nimport (\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/syncthing\/syncthing\/lib\/logger\"\n)\n\nconst (\n\tlogThreshold = 100 * time.Millisecond\n\tshortWait = 5 * time.Millisecond\n\tlongWait = 125 * time.Millisecond\n)\n\nfunc TestTypes(t *testing.T) {\n\tdebug = false\n\tl.SetDebug(\"sync\", false)\n\n\tif _, ok := NewMutex().(*sync.Mutex); !ok {\n\t\tt.Error(\"Wrong type\")\n\t}\n\n\tif _, ok := NewRWMutex().(*sync.RWMutex); !ok {\n\t\tt.Error(\"Wrong type\")\n\t}\n\n\tif _, ok := NewWaitGroup().(*sync.WaitGroup); !ok {\n\t\tt.Error(\"Wrong type\")\n\t}\n\n\tdebug = true\n\tl.SetDebug(\"sync\", true)\n\n\tif _, ok := NewMutex().(*loggedMutex); !ok {\n\t\tt.Error(\"Wrong type\")\n\t}\n\n\tif _, ok := NewRWMutex().(*loggedRWMutex); !ok {\n\t\tt.Error(\"Wrong type\")\n\t}\n\n\tif _, ok := NewWaitGroup().(*loggedWaitGroup); !ok {\n\t\tt.Error(\"Wrong type\")\n\t}\n\n\tdebug = false\n\tl.SetDebug(\"sync\", false)\n}\n\nfunc TestMutex(t *testing.T) {\n\toldClock := defaultClock\n\tclock := newTestClock()\n\tdefaultClock = clock\n\tdefer func() { defaultClock = oldClock }()\n\n\tdebug = true\n\tl.SetDebug(\"sync\", true)\n\tthreshold = logThreshold\n\n\tmsgmut := sync.Mutex{}\n\tvar messages []string\n\n\tl.AddHandler(logger.LevelDebug, func(_ logger.LogLevel, message string) {\n\t\tmsgmut.Lock()\n\t\tmessages = append(messages, message)\n\t\tmsgmut.Unlock()\n\t})\n\n\tmut := NewMutex()\n\tmut.Lock()\n\tclock.wind(shortWait)\n\tmut.Unlock()\n\n\tif len(messages) > 0 {\n\t\tt.Errorf(\"Unexpected message count\")\n\t}\n\n\tmut.Lock()\n\tclock.wind(longWait)\n\tmut.Unlock()\n\n\tif len(messages) != 1 {\n\t\tt.Errorf(\"Unexpected message count\")\n\t}\n\n\tdebug = false\n\tl.SetDebug(\"sync\", false)\n}\n\nfunc TestRWMutex(t *testing.T) {\n\toldClock := defaultClock\n\tclock := newTestClock()\n\tdefaultClock = clock\n\tdefer func() { defaultClock = oldClock }()\n\n\tdebug = true\n\tl.SetDebug(\"sync\", true)\n\tthreshold = logThreshold\n\n\tmsgmut := sync.Mutex{}\n\tvar messages []string\n\n\tl.AddHandler(logger.LevelDebug, func(_ logger.LogLevel, message string) {\n\t\tmsgmut.Lock()\n\t\tmessages = append(messages, message)\n\t\tmsgmut.Unlock()\n\t})\n\n\tmut := NewRWMutex()\n\tmut.Lock()\n\tclock.wind(shortWait)\n\tmut.Unlock()\n\n\tif len(messages) > 0 {\n\t\tt.Errorf(\"Unexpected message count\")\n\t}\n\n\tmut.Lock()\n\tclock.wind(longWait)\n\tmut.Unlock()\n\n\tif len(messages) != 1 {\n\t\tt.Errorf(\"Unexpected message count\")\n\t}\n\n\t\/\/ Testing rlocker logging\n\twait := make(chan struct{})\n\tlocking := make(chan struct{})\n\n\tmut.RLock()\n\tgo func() {\n\t\tclose(locking)\n\t\tmut.Lock()\n\t\tclose(wait)\n\t}()\n\n\t<-locking\n\tclock.wind(longWait)\n\tmut.RUnlock()\n\t<-wait\n\n\tmut.Unlock()\n\n\tif len(messages) != 2 {\n\t\tt.Errorf(\"Unexpected message count\")\n\t} else if !strings.Contains(messages[1], \"RUnlockers while locking:\\nat sync\") || !strings.Contains(messages[1], \"sync_test.go:\") {\n\t\tt.Error(\"Unexpected message\")\n\t}\n\n\t\/\/ Testing multiple rlockers\n\tmut.RLock()\n\tmut.RLock()\n\tmut.RLock()\n\t_ = 1 \/\/ skip empty critical section check\n\tmut.RUnlock()\n\tmut.RUnlock()\n\tmut.RUnlock()\n\n\tdebug = false\n\tl.SetDebug(\"sync\", false)\n}\n\nfunc TestWaitGroup(t *testing.T) {\n\toldClock := defaultClock\n\tclock := newTestClock()\n\tdefaultClock = clock\n\tdefer func() { defaultClock = oldClock }()\n\n\tdebug = true\n\tl.SetDebug(\"sync\", true)\n\tthreshold = logThreshold\n\n\tmsgmut := sync.Mutex{}\n\tvar messages []string\n\n\tl.AddHandler(logger.LevelDebug, func(_ logger.LogLevel, message string) {\n\t\tmsgmut.Lock()\n\t\tmessages = append(messages, message)\n\t\tmsgmut.Unlock()\n\t})\n\n\twg := NewWaitGroup()\n\twg.Add(1)\n\twaiting := make(chan struct{})\n\n\tgo func() {\n\t\t<-waiting\n\t\tclock.wind(shortWait)\n\t\twg.Done()\n\t}()\n\n\tclose(waiting)\n\twg.Wait()\n\n\tif len(messages) > 0 {\n\t\tt.Errorf(\"Unexpected message count\")\n\t}\n\n\twg = NewWaitGroup()\n\twaiting = make(chan struct{})\n\n\twg.Add(1)\n\tgo func() {\n\t\t<-waiting\n\t\tclock.wind(longWait)\n\t\twg.Done()\n\t}()\n\n\tclose(waiting)\n\twg.Wait()\n\n\tif len(messages) != 1 {\n\t\tt.Errorf(\"Unexpected message count\")\n\t}\n\n\tdebug = false\n\tl.SetDebug(\"sync\", false)\n}\n\nfunc TestTimeoutCond(t *testing.T) {\n\t\/\/ WARNING this test relies heavily on threads not being stalled at particular points.\n\t\/\/ As such, it's pretty unstable on the build server. It has been left in as it still\n\t\/\/ exercises the deadlock detector, and one of the two things it tests is still functional.\n\t\/\/ See the comments in runLocks\n\n\tconst (\n\t\t\/\/ Low values to avoid being intrusive in continuous testing. Can be\n\t\t\/\/ increased significantly for stress testing.\n\t\titerations = 100\n\t\troutines = 10\n\n\t\ttimeMult = 2\n\t)\n\n\tc := NewTimeoutCond(NewMutex())\n\n\t\/\/ Start a routine to periodically broadcast on the cond.\n\n\tgo func() {\n\t\td := time.Duration(routines) * timeMult * time.Millisecond \/ 2\n\t\tt.Log(\"Broadcasting every\", d)\n\t\tfor i := 0; i < iterations; i++ {\n\t\t\ttime.Sleep(d)\n\n\t\t\tc.L.Lock()\n\t\t\tc.Broadcast()\n\t\t\tc.L.Unlock()\n\t\t}\n\t}()\n\n\t\/\/ Start several routines that wait on it with different timeouts.\n\n\tvar results [routines][2]int\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < routines; i++ {\n\t\ti := i\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\td := time.Duration(i) * timeMult * time.Millisecond\n\t\t\tt.Logf(\"Routine %d waits for %v\\n\", i, d)\n\t\t\tsucc, fail := runLocks(t, iterations, c, d)\n\t\t\tresults[i][0] = succ\n\t\t\tresults[i][1] = fail\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\twg.Wait()\n\n\t\/\/ Print a table of routine number: successes, failures.\n\n\tfor i, v := range results {\n\t\tt.Logf(\"%4d: %4d %4d\\n\", i, v[0], v[1])\n\t}\n}\n\nfunc runLocks(t *testing.T, iterations int, c *TimeoutCond, d time.Duration) (succ, fail int) {\n\tfor i := 0; i < iterations; i++ {\n\t\tc.L.Lock()\n\n\t\t\/\/ The thread may be stalled, so we can't test the 'succeeded late' case reliably.\n\t\t\/\/ Therefore make sure that we start t0 before starting the timeout, and only test\n\t\t\/\/ the 'failed early' case.\n\n\t\tt0 := time.Now()\n\t\tw := c.SetupWait(d)\n\n\t\tres := w.Wait()\n\t\twaited := time.Since(t0)\n\n\t\t\/\/ Allow 20% slide in either direction, and a five milliseconds of\n\t\t\/\/ scheduling delay... In tweaking these it was clear that things\n\t\t\/\/ worked like the should, so if this becomes a spurious failure\n\t\t\/\/ kind of thing feel free to remove or give significantly more\n\t\t\/\/ slack.\n\n\t\tif !res && waited < d*8\/10 {\n\t\t\tt.Errorf(\"Wait failed early, %v < %v\", waited, d)\n\t\t}\n\t\tif res && waited > d*11\/10+5*time.Millisecond {\n\t\t\t\/\/ Ideally this would be t.Errorf\n\t\t\tt.Logf(\"WARNING: Wait succeeded late, %v > %v. This is probably a thread scheduling issue\", waited, d)\n\t\t}\n\n\t\tw.Stop()\n\n\t\tif res {\n\t\t\tsucc++\n\t\t} else {\n\t\t\tfail++\n\t\t}\n\t\tc.L.Unlock()\n\t}\n\treturn\n}\n\ntype testClock struct {\n\ttime time.Time\n\tmut sync.Mutex\n}\n\nfunc newTestClock() *testClock {\n\treturn &testClock{\n\t\ttime: time.Now(),\n\t}\n}\n\nfunc (t *testClock) Now() time.Time {\n\tt.mut.Lock()\n\tnow := t.time\n\tt.time = t.time.Add(time.Nanosecond)\n\tt.mut.Unlock()\n\treturn now\n}\n\nfunc (t *testClock) wind(d time.Duration) {\n\tt.mut.Lock()\n\tt.time = t.time.Add(d)\n\tt.mut.Unlock()\n}\n<|endoftext|>"} {"text":"<commit_before>package middleware\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/DeedleFake\/Go-PhysicsFS\/physfs\"\n\t\"github.com\/carbonsrv\/carbon\/modules\/glue\"\n\t\"github.com\/carbonsrv\/carbon\/modules\/helpers\"\n\t\"github.com\/carbonsrv\/carbon\/modules\/scheduler\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/pmylund\/go-cache\"\n\t\"github.com\/vifino\/golua\/lua\"\n\t\"github.com\/vifino\/luar\"\n\t\"net\/http\"\n\t\"time\"\n)\n\n\/\/ Cache\nvar kvstore *cache.Cache\nvar cbc *cache.Cache\nvar cfe *cache.Cache\nvar LDumper *lua.State\n\nfunc cacheDump(file string) (string, error, bool) {\n\tdata_tmp, found := cbc.Get(file)\n\tif found == false {\n\t\tdata, err := fileRead(file)\n\t\tif err != nil {\n\t\t\treturn \"\", err, false\n\t\t}\n\t\tres, err := bcdump(data)\n\t\tif err != nil {\n\t\t\treturn \"\", err, true\n\t\t}\n\t\tcbc.Set(file, res, cache.DefaultExpiration)\n\t\treturn res, nil, false\n\t} else {\n\t\t\/\/debug(\"Using Bytecode-cache for \" + file)\n\t\treturn data_tmp.(string), nil, false\n\t}\n}\nfunc bcdump(data string) (string, error) {\n\tif LDumper.LoadString(data) != 0 {\n\t\treturn \"\", errors.New(LDumper.ToString(-1))\n\t}\n\tdefer LDumper.Pop(1)\n\treturn LDumper.FDump(), nil\n}\n\n\/\/ FS\nvar filesystem http.FileSystem\n\nfunc fileExists(file string) bool {\n\tdata_tmp, found := cfe.Get(file)\n\tif found == false {\n\t\texists := physfs.Exists(file)\n\t\tcfe.Set(file, exists, cache.DefaultExpiration)\n\t\treturn exists\n\t} else {\n\t\treturn data_tmp.(bool)\n\t}\n}\n\nfunc fileRead(file string) (string, error) {\n\tf, err := filesystem.Open(file)\n\tdefer f.Close()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tr := bufio.NewReader(f)\n\tbuf := make([]byte, fi.Size())\n\t_, err = r.Read(buf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(buf), err\n}\n\n\/\/ Preloader\/Starter\nvar jobs int\nvar Preloaded chan *lua.State\n\nfunc Preloader() {\n\tPreloaded = make(chan *lua.State, jobs)\n\tfor {\n\t\t\/\/fmt.Println(\"preloading\")\n\t\tL := luar.Init()\n\t\tBind(L)\n\t\terr := L.DoString(glue.MainGlue())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\terr = L.DoString(glue.RouteGlue())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tPreloaded <- L\n\t}\n}\nfunc GetInstance() *lua.State {\n\t\/\/fmt.Println(\"grabbing instance\")\n\tL := <-Preloaded\n\t\/\/fmt.Println(\"Done\")\n\treturn L\n}\n\n\/\/ Init\nfunc Init(j int, cfe_new *cache.Cache, kvstore_new *cache.Cache) {\n\tcfe = cfe_new\n\tkvstore = kvstore_new\n\tjobs = j\n\tfilesystem = physfs.FileSystem()\n\tcbc = cache.New(5*time.Minute, 30*time.Second) \/\/ Initialize cache with 5 minute lifetime and purge every 30 seconds\n\tLDumper = luar.Init()\n}\n\n\/\/ PHP-like lua scripts\nfunc Lua() func(*gin.Context) {\n\t\/\/LDumper := luar.Init()\n\treturn func(context *gin.Context) {\n\t\tfile := context.Request.URL.Path\n\t\tif fileExists(file) {\n\t\t\t\/\/fmt.Println(\"start\")\n\t\t\tL := GetInstance()\n\t\t\t\/\/fmt.Println(\"after start\")\n\t\t\tdefer scheduler.Add(func() {\n\t\t\t\tL.Close()\n\t\t\t})\n\t\t\t\/\/fmt.Println(\"after after start\")\n\t\t\tBindContext(L, context)\n\t\t\t\/\/fmt.Println(\"before cache\")\n\t\t\tcode, err, lerr := cacheDump(file)\n\t\t\t\/\/fmt.Println(\"after cache\")\n\t\t\tif err != nil {\n\t\t\t\tif lerr == false {\n\t\t\t\t\tcontext.Next()\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n\t\t\t\t\t<head><title>Syntax Error in `+context.Request.URL.Path+`<\/title>\n\t\t\t\t\t<body>\n\t\t\t\t\t\t<h1>Syntax Error in file `+context.Request.URL.Path+`:<\/h1>\n\t\t\t\t\t\t<code>`+string(err.Error())+`<\/code>\n\t\t\t\t\t<\/body>\n\t\t\t\t\t<\/html>`)\n\t\t\t\t\tcontext.Abort()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/fmt.Println(\"before loadbuffer\")\n\t\t\tL.LoadBuffer(code, len(code), file) \/\/ This shouldn't error, was checked earlier.\n\t\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n\t\t\t\t<head><title>Runtime Error in `+context.Request.URL.Path+`<\/title>\n\t\t\t\t<body>\n\t\t\t\t\t<h1>Runtime Error in file `+context.Request.URL.Path+`:<\/h1>\n\t\t\t\t\t<code>`+L.ToString(-1)+`<\/code>\n\t\t\t\t<\/body>\n\t\t\t\t<\/html>`)\n\t\t\t\tcontext.Abort()\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/*L.DoString(\"return CONTENT_TO_RETURN\")\n\t\t\tv := luar.CopyTableToMap(L, nil, -1)\n\t\t\tm := v.(map[string]interface{})\n\t\t\ti := int(m[\"code\"].(float64))\n\t\t\tif err != nil {\n\t\t\t\ti = http.StatusOK\n\t\t\t}*\/\n\t\t\t\/\/helpers.HTMLString(context, i, m[\"content\"].(string))\n\t\t} else {\n\t\t\tcontext.Next()\n\t\t}\n\t}\n}\n\n\/\/ Route creation by lua\nfunc DLR_NS(bcode string, dobind bool, vals map[string]interface{}) (func(*gin.Context), error) {\n\t\/*code, err := bcdump(code)\n\tif err != nil {\n\t\treturn func(*gin.Context) {}, err\n\t}*\/\n\treturn func(context *gin.Context) {\n\t\tL := GetInstance()\n\t\tif dobind {\n\t\t\tluar.Register(L, \"\", vals)\n\t\t}\n\t\tdefer scheduler.Add(func() {\n\t\t\tL.Close()\n\t\t})\n\t\tBindContext(L, context)\n\t\t\/\/fmt.Println(\"before loadbuffer\")\n\t\t\/*if L.LoadBuffer(bcode, len(bcode), \"route\") != 0 {\n\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n\t\t\t<head><title>Syntax Error in `+context.Request.URL.Path+`<\/title>\n\t\t\t<body>\n\t\t\t\t<h1>Syntax Error in Lua Route on `+context.Request.URL.Path+`:<\/h1>\n\t\t\t\t<code>`+L.ToString(-1)+`<\/code>\n\t\t\t<\/body>\n\t\t\t<\/html>`)\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}*\/\n\t\tL.LoadBuffer(bcode, len(bcode), \"route\")\n\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n\t\t\t<head><title>Runtime Error on `+context.Request.URL.Path+`<\/title>\n\t\t\t<body>\n\t\t\t\t<h1>Runtime Error in Lua Route on `+context.Request.URL.Path+`:<\/h1>\n\t\t\t\t<code>`+L.ToString(-1)+`<\/code>\n\t\t\t<\/body>\n\t\t\t<\/html>`)\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}\n\t}, nil\n}\nfunc DLR_RUS(bcode string, instances int, dobind bool, vals map[string]interface{}) (func(*gin.Context), error) { \/\/ Same as above, but reuses states. Much faster. Higher memory use though, because more states.\n\tinsts := instances\n\tif instances < 0 {\n\t\tinsts = 2\n\t\tif jobs\/2 > 1 {\n\t\t\tinsts = jobs\n\t\t}\n\t}\n\tschan := make(chan *lua.State, insts)\n\tfor i := 0; i < jobs\/2; i++ {\n\t\tL := GetInstance()\n\t\tif dobind {\n\t\t\tluar.Register(L, \"\", vals)\n\t\t}\n\t\tif L.LoadBuffer(bcode, len(bcode), \"route\") != 0 {\n\t\t\treturn func(context *gin.Context) {}, errors.New(L.ToString(-1))\n\t\t}\n\t\tL.PushValue(-1)\n\t\tschan <- L\n\t}\n\treturn func(context *gin.Context) {\n\t\tL := <-schan\n\t\tBindContext(L, context)\n\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n\t\t\t<head><title>Runtime Error on `+context.Request.URL.Path+`<\/title>\n\t\t\t<body>\n\t\t\t\t<h1>Runtime Error in Lua Route on `+context.Request.URL.Path+`:<\/h1>\n\t\t\t\t<code>`+L.ToString(-1)+`<\/code>\n\t\t\t<\/body>\n\t\t\t<\/html>`)\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}\n\t\tL.PushValue(-1)\n\t\tschan <- L\n\t}, nil\n}\n\nvar upgrader = websocket.Upgrader{\n\tReadBufferSize: 1024,\n\tWriteBufferSize: 1024,\n}\n\nfunc DLRWS_RUS(bcode string, instances int, dobind bool, vals map[string]interface{}) (func(*gin.Context), error) { \/\/ Same as above, but for websockets. Not working because?!\n\tinsts := instances\n\tif instances < 0 {\n\t\tinsts = 2\n\t\tif jobs\/2 > 1 {\n\t\t\tinsts = jobs\n\t\t}\n\t}\n\tschan := make(chan *lua.State, insts)\n\tfor i := 0; i < jobs\/2; i++ {\n\t\tL := GetInstance()\n\t\tif dobind {\n\t\t\tluar.Register(L, \"\", vals)\n\t\t}\n\t\tif L.LoadBuffer(bcode, len(bcode), \"route\") != 0 {\n\t\t\treturn func(context *gin.Context) {}, errors.New(L.ToString(-1))\n\t\t}\n\t\tL.PushValue(-1)\n\t\tschan <- L\n\t}\n\treturn func(context *gin.Context) {\n\t\tL := <-schan\n\t\tBindContext(L, context)\n\t\tconn, err := upgrader.Upgrade(context.Writer, context.Request, nil)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Websocket error: \" + err.Error()) \/\/ silent error.\n\t\t}\n\t\tluar.Register(L, \"ws\", luar.Map{\n\t\t\t\"BinaryMessage\": websocket.BinaryMessage,\n\t\t\t\"TextMessage\": websocket.TextMessage,\n\t\t\t\/\/\"read\": conn.ReadMessage,\n\t\t\t\/\/\"send\": conn.SendMessage,\n\t\t\t\"read\": (func() (int, string, error) {\n\t\t\t\tmessageType, p, err := conn.ReadMessage()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn -1, \"\", err\n\t\t\t\t}\n\t\t\t\treturn messageType, string(p), nil\n\t\t\t}),\n\t\t\t\"send\": (func(t int, cnt string) error {\n\t\t\t\treturn conn.WriteMessage(t, []byte(cnt))\n\t\t\t}),\n\t\t})\n\t\tL.LoadBuffer(bcode, len(bcode), \"route\")\n\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\tfmt.Println(\"Websocket error: \" + L.ToString(-1))\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}\n\t\tL.PushValue(-1)\n\t\tschan <- L\n\t}, nil\n}\n<commit_msg>A little bit inefficient, but it works!<commit_after>package middleware\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/DeedleFake\/Go-PhysicsFS\/physfs\"\n\t\"github.com\/carbonsrv\/carbon\/modules\/glue\"\n\t\"github.com\/carbonsrv\/carbon\/modules\/helpers\"\n\t\"github.com\/carbonsrv\/carbon\/modules\/scheduler\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/pmylund\/go-cache\"\n\t\"github.com\/vifino\/golua\/lua\"\n\t\"github.com\/vifino\/luar\"\n\t\"net\/http\"\n\t\"time\"\n)\n\n\/\/ Cache\nvar kvstore *cache.Cache\nvar cbc *cache.Cache\nvar cfe *cache.Cache\nvar LDumper *lua.State\n\nfunc cacheDump(file string) (string, error, bool) {\n\tdata_tmp, found := cbc.Get(file)\n\tif found == false {\n\t\tdata, err := fileRead(file)\n\t\tif err != nil {\n\t\t\treturn \"\", err, false\n\t\t}\n\t\tres, err := bcdump(data)\n\t\tif err != nil {\n\t\t\treturn \"\", err, true\n\t\t}\n\t\tcbc.Set(file, res, cache.DefaultExpiration)\n\t\treturn res, nil, false\n\t} else {\n\t\t\/\/debug(\"Using Bytecode-cache for \" + file)\n\t\treturn data_tmp.(string), nil, false\n\t}\n}\nfunc bcdump(data string) (string, error) {\n\tif LDumper.LoadString(data) != 0 {\n\t\treturn \"\", errors.New(LDumper.ToString(-1))\n\t}\n\tdefer LDumper.Pop(1)\n\treturn LDumper.FDump(), nil\n}\n\n\/\/ FS\nvar filesystem http.FileSystem\n\nfunc fileExists(file string) bool {\n\tdata_tmp, found := cfe.Get(file)\n\tif found == false {\n\t\texists := physfs.Exists(file)\n\t\tcfe.Set(file, exists, cache.DefaultExpiration)\n\t\treturn exists\n\t} else {\n\t\treturn data_tmp.(bool)\n\t}\n}\n\nfunc fileRead(file string) (string, error) {\n\tf, err := filesystem.Open(file)\n\tdefer f.Close()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tr := bufio.NewReader(f)\n\tbuf := make([]byte, fi.Size())\n\t_, err = r.Read(buf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(buf), err\n}\n\n\/\/ Preloader\/Starter\nvar jobs int\nvar Preloaded chan *lua.State\n\nfunc Preloader() {\n\tPreloaded = make(chan *lua.State, jobs)\n\tfor {\n\t\t\/\/fmt.Println(\"preloading\")\n\t\tL := luar.Init()\n\t\tBind(L)\n\t\terr := L.DoString(glue.MainGlue())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\terr = L.DoString(glue.RouteGlue())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tPreloaded <- L\n\t}\n}\nfunc GetInstance() *lua.State {\n\t\/\/fmt.Println(\"grabbing instance\")\n\tL := <-Preloaded\n\t\/\/fmt.Println(\"Done\")\n\treturn L\n}\n\n\/\/ Init\nfunc Init(j int, cfe_new *cache.Cache, kvstore_new *cache.Cache) {\n\tcfe = cfe_new\n\tkvstore = kvstore_new\n\tjobs = j\n\tfilesystem = physfs.FileSystem()\n\tcbc = cache.New(5*time.Minute, 30*time.Second) \/\/ Initialize cache with 5 minute lifetime and purge every 30 seconds\n\tLDumper = luar.Init()\n}\n\n\/\/ PHP-like lua scripts\nfunc Lua() func(*gin.Context) {\n\t\/\/LDumper := luar.Init()\n\treturn func(context *gin.Context) {\n\t\tfile := context.Request.URL.Path\n\t\tif fileExists(file) {\n\t\t\t\/\/fmt.Println(\"start\")\n\t\t\tL := GetInstance()\n\t\t\t\/\/fmt.Println(\"after start\")\n\t\t\tdefer scheduler.Add(func() {\n\t\t\t\tL.Close()\n\t\t\t})\n\t\t\t\/\/fmt.Println(\"after after start\")\n\t\t\tBindContext(L, context)\n\t\t\t\/\/fmt.Println(\"before cache\")\n\t\t\tcode, err, lerr := cacheDump(file)\n\t\t\t\/\/fmt.Println(\"after cache\")\n\t\t\tif err != nil {\n\t\t\t\tif lerr == false {\n\t\t\t\t\tcontext.Next()\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n\t\t\t\t\t<head><title>Syntax Error in `+context.Request.URL.Path+`<\/title>\n\t\t\t\t\t<body>\n\t\t\t\t\t\t<h1>Syntax Error in file `+context.Request.URL.Path+`:<\/h1>\n\t\t\t\t\t\t<code>`+string(err.Error())+`<\/code>\n\t\t\t\t\t<\/body>\n\t\t\t\t\t<\/html>`)\n\t\t\t\t\tcontext.Abort()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/fmt.Println(\"before loadbuffer\")\n\t\t\tL.LoadBuffer(code, len(code), file) \/\/ This shouldn't error, was checked earlier.\n\t\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n\t\t\t\t<head><title>Runtime Error in `+context.Request.URL.Path+`<\/title>\n\t\t\t\t<body>\n\t\t\t\t\t<h1>Runtime Error in file `+context.Request.URL.Path+`:<\/h1>\n\t\t\t\t\t<code>`+L.ToString(-1)+`<\/code>\n\t\t\t\t<\/body>\n\t\t\t\t<\/html>`)\n\t\t\t\tcontext.Abort()\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/*L.DoString(\"return CONTENT_TO_RETURN\")\n\t\t\tv := luar.CopyTableToMap(L, nil, -1)\n\t\t\tm := v.(map[string]interface{})\n\t\t\ti := int(m[\"code\"].(float64))\n\t\t\tif err != nil {\n\t\t\t\ti = http.StatusOK\n\t\t\t}*\/\n\t\t\t\/\/helpers.HTMLString(context, i, m[\"content\"].(string))\n\t\t} else {\n\t\t\tcontext.Next()\n\t\t}\n\t}\n}\n\n\/\/ Route creation by lua\nfunc DLR_NS(bcode string, dobind bool, vals map[string]interface{}) (func(*gin.Context), error) {\n\t\/*code, err := bcdump(code)\n\tif err != nil {\n\t\treturn func(*gin.Context) {}, err\n\t}*\/\n\treturn func(context *gin.Context) {\n\t\tL := GetInstance()\n\t\tif dobind {\n\t\t\tluar.Register(L, \"\", vals)\n\t\t}\n\t\tdefer scheduler.Add(func() {\n\t\t\tL.Close()\n\t\t})\n\t\tBindContext(L, context)\n\t\t\/\/fmt.Println(\"before loadbuffer\")\n\t\t\/*if L.LoadBuffer(bcode, len(bcode), \"route\") != 0 {\n\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n\t\t\t<head><title>Syntax Error in `+context.Request.URL.Path+`<\/title>\n\t\t\t<body>\n\t\t\t\t<h1>Syntax Error in Lua Route on `+context.Request.URL.Path+`:<\/h1>\n\t\t\t\t<code>`+L.ToString(-1)+`<\/code>\n\t\t\t<\/body>\n\t\t\t<\/html>`)\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}*\/\n\t\tL.LoadBuffer(bcode, len(bcode), \"route\")\n\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n\t\t\t<head><title>Runtime Error on `+context.Request.URL.Path+`<\/title>\n\t\t\t<body>\n\t\t\t\t<h1>Runtime Error in Lua Route on `+context.Request.URL.Path+`:<\/h1>\n\t\t\t\t<code>`+L.ToString(-1)+`<\/code>\n\t\t\t<\/body>\n\t\t\t<\/html>`)\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}\n\t}, nil\n}\nfunc DLR_RUS(bcode string, instances int, dobind bool, vals map[string]interface{}) (func(*gin.Context), error) { \/\/ Same as above, but reuses states. Much faster. Higher memory use though, because more states.\n\tinsts := instances\n\tif instances < 0 {\n\t\tinsts = 2\n\t\tif jobs\/2 > 1 {\n\t\t\tinsts = jobs\n\t\t}\n\t}\n\tschan := make(chan *lua.State, insts)\n\tfor i := 0; i < jobs\/2; i++ {\n\t\tL := GetInstance()\n\t\tif dobind {\n\t\t\tluar.Register(L, \"\", vals)\n\t\t}\n\t\tif L.LoadBuffer(bcode, len(bcode), \"route\") != 0 {\n\t\t\treturn func(context *gin.Context) {}, errors.New(L.ToString(-1))\n\t\t}\n\t\tL.PushValue(-1)\n\t\tschan <- L\n\t}\n\treturn func(context *gin.Context) {\n\t\tL := <-schan\n\t\tBindContext(L, context)\n\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\thelpers.HTMLString(context, http.StatusInternalServerError, `<html>\n\t\t\t<head><title>Runtime Error on `+context.Request.URL.Path+`<\/title>\n\t\t\t<body>\n\t\t\t\t<h1>Runtime Error in Lua Route on `+context.Request.URL.Path+`:<\/h1>\n\t\t\t\t<code>`+L.ToString(-1)+`<\/code>\n\t\t\t<\/body>\n\t\t\t<\/html>`)\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}\n\t\tL.PushValue(-1)\n\t\tschan <- L\n\t}, nil\n}\n\nvar upgrader = websocket.Upgrader{\n\tReadBufferSize: 1024,\n\tWriteBufferSize: 1024,\n}\n\nfunc DLRWS_RUS(bcode string, instances int, dobind bool, vals map[string]interface{}) (func(*gin.Context), error) { \/\/ Same as above, but for websockets. Not working because?!\n\tinsts := instances\n\tif instances < 0 {\n\t\tinsts = 2\n\t\tif jobs\/2 > 1 {\n\t\t\tinsts = jobs\n\t\t}\n\t}\n\tschan := make(chan *lua.State, insts)\n\tfor i := 0; i < jobs\/2; i++ {\n\t\tL := GetInstance()\n\t\tif dobind {\n\t\t\tluar.Register(L, \"\", vals)\n\t\t}\n\t\tschan <- L\n\t}\n\treturn func(context *gin.Context) {\n\t\tL := <-schan\n\t\tBindContext(L, context)\n\t\tconn, err := upgrader.Upgrade(context.Writer, context.Request, nil)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Websocket error: \" + err.Error()) \/\/ silent error.\n\t\t}\n\t\tluar.Register(L, \"ws\", luar.Map{\n\t\t\t\"BinaryMessage\": websocket.BinaryMessage,\n\t\t\t\"TextMessage\": websocket.TextMessage,\n\t\t\t\/\/\"read\": conn.ReadMessage,\n\t\t\t\/\/\"send\": conn.SendMessage,\n\t\t\t\"read\": (func() (int, string, error) {\n\t\t\t\tmessageType, p, err := conn.ReadMessage()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn -1, \"\", err\n\t\t\t\t}\n\t\t\t\treturn messageType, string(p), nil\n\t\t\t}),\n\t\t\t\"send\": (func(t int, cnt string) error {\n\t\t\t\treturn conn.WriteMessage(t, []byte(cnt))\n\t\t\t}),\n\t\t})\n\t\tL.LoadBuffer(bcode, len(bcode), \"route\")\n\t\tif L.Pcall(0, 0, 0) != 0 { \/\/ != 0 means error in execution\n\t\t\tfmt.Println(\"Websocket Lua error: \" + L.ToString(-1))\n\t\t\tcontext.Abort()\n\t\t\treturn\n\t\t}\n\t\tschan <- L\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package renter\n\nimport (\n\t\"time\"\n)\n\n\/\/ dropChunk will remove a worker from the responsibility of tracking a chunk.\nfunc (w *worker) dropChunk(uc *unfinishedChunk) {\n\tuc.mu.Lock()\n\tuc.workersRemaining--\n\tuc.mu.Unlock()\n\tw.renter.managedReleaseIdleChunkPieces(uc)\n\tw.renter.heapWG.Done()\n}\n\n\/\/ dropUploadChunks will release all of the upload chunks that the worker has\n\/\/ received.\nfunc (w *worker) dropUploadChunks() {\n\tfor i := 0; i < len(w.unprocessedChunks); i++ {\n\t\tw.dropChunk(w.unprocessedChunks[i])\n\t}\n\tw.unprocessedChunks = w.unprocessedChunks[:0]\n}\n\n\/\/ managedKillUploading will disable all uploading for the worker.\nfunc (w *worker) managedKillUploading() {\n\tw.mu.Lock()\n\tw.dropUploadChunks()\n\tw.uploadTerminated = true\n\tw.mu.Unlock()\n}\n\n\/\/ managedNextUploadChunk will pull the next potential chunk out of the worker's\n\/\/ work queue for uploading.\nfunc (w *worker) managedNextUploadChunk() (nextChunk *unfinishedChunk, pieceIndex uint64) {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\n\t\/\/ Loop through the unprocessed chunks and find some work to do.\n\tfor range w.unprocessedChunks {\n\t\t\/\/ Pull a chunk off of the unprocessed chunks stack.\n\t\tchunk := w.unprocessedChunks[0]\n\t\tw.unprocessedChunks = w.unprocessedChunks[1:]\n\t\tnextChunk, pieceIndex := w.processUploadChunk(chunk)\n\t\tif nextChunk != nil {\n\t\t\treturn nextChunk, pieceIndex\n\t\t}\n\t}\n\treturn nil, 0 \/\/ no work found\n}\n\n\/\/ managedQueueUploadChunk will take a chunk and add it to the worker's repair\n\/\/ stack.\nfunc (w *worker) managedQueueUploadChunk(uc *unfinishedChunk) {\n\t\/\/ Check that the worker is allowed to be uploading before grabbing the\n\t\/\/ worker lock.\n\tutility, exists := w.renter.hostContractor.ContractUtility(w.contract.ID)\n\tgoodForUpload := exists && utility.GoodForUpload\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\n\tif !goodForUpload || w.uploadTerminated || w.onUploadCooldown() {\n\t\t\/\/ The worker should not be uploading, remove the chunk.\n\t\tw.dropChunk(uc)\n\t\treturn\n\t}\n\tw.unprocessedChunks = append(w.unprocessedChunks, uc)\n\n\t\/\/ Send a signal informing the work thread that there is work.\n\tselect {\n\tcase w.uploadChan <- struct{}{}:\n\tdefault:\n\t}\n}\n\n\/\/ managedUpload will perform some upload work.\nfunc (w *worker) managedUpload(uc *unfinishedChunk, pieceIndex uint64) {\n\t\/\/ Open an editing connection to the host.\n\te, err := w.renter.hostContractor.Editor(w.contract.ID, w.renter.tg.StopChan())\n\tif err != nil {\n\t\tw.renter.log.Debugln(\"Worker failed to acquire an editor:\", err)\n\t\tw.mu.Lock()\n\t\tw.uploadFailed(uc, pieceIndex)\n\t\tw.mu.Unlock()\n\t\treturn\n\t}\n\tdefer e.Close()\n\n\t\/\/ Perform the upload, and update the failure stats based on the success of\n\t\/\/ the upload attempt.\n\troot, err := e.Upload(uc.physicalChunkData[pieceIndex])\n\tif err != nil {\n\t\tw.renter.log.Debugln(\"Worker failed to upload via the editor:\", err)\n\t\tw.mu.Lock()\n\t\tw.uploadFailed(uc, pieceIndex)\n\t\tw.mu.Unlock()\n\t\treturn\n\t}\n\tw.mu.Lock()\n\tw.uploadConsecutiveFailures = 0\n\tw.mu.Unlock()\n\n\t\/\/ Update the renter metadata.\n\taddr := e.Address()\n\tendHeight := e.EndHeight()\n\tid := w.renter.mu.Lock()\n\tuc.renterFile.mu.Lock()\n\tcontract, exists := uc.renterFile.contracts[w.contract.ID]\n\tif !exists {\n\t\tcontract = fileContract{\n\t\t\tID: w.contract.ID,\n\t\t\tIP: addr,\n\t\t\tWindowStart: endHeight,\n\t\t}\n\t}\n\tcontract.Pieces = append(contract.Pieces, pieceData{\n\t\tChunk: uc.index,\n\t\tPiece: pieceIndex,\n\t\tMerkleRoot: root,\n\t})\n\tuc.renterFile.contracts[w.contract.ID] = contract\n\tw.renter.saveFile(uc.renterFile)\n\tuc.renterFile.mu.Unlock()\n\tw.renter.mu.Unlock(id)\n\n\t\/\/ Upload is complete. Update the state of the chunk and the renter's memory\n\t\/\/ available to reflect the completed upload.\n\tuc.mu.Lock()\n\treleaseSize := len(uc.physicalChunkData[pieceIndex])\n\tuc.piecesRegistered--\n\tuc.piecesCompleted++\n\tuc.physicalChunkData[pieceIndex] = nil\n\tuc.memoryReleased += uint64(releaseSize)\n\tuc.mu.Unlock()\n\tw.renter.memoryManager.Return(uint64(releaseSize))\n\tw.dropChunk(uc)\n}\n\n\/\/ onUploadCooldown returns true if the worker is on cooldown from failed\n\/\/ uploads.\nfunc (w *worker) onUploadCooldown() bool {\n\trequiredCooldown := uploadFailureCooldown\n\tfor i := 0; i < w.uploadConsecutiveFailures && i < maxConsecutivePenalty; i++ {\n\t\trequiredCooldown *= 2\n\t}\n\treturn time.Now().Before(w.uploadRecentFailure.Add(requiredCooldown))\n}\n\n\/\/ processUploadChunk will process a chunk from the worker chunk queue.\nfunc (w *worker) processUploadChunk(uc *unfinishedChunk) (nextChunk *unfinishedChunk, pieceIndex uint64) {\n\t\/\/ Determine the usability value of this worker.\n\tutility, exists := w.renter.hostContractor.ContractUtility(w.contract.ID)\n\tif !exists {\n\t\treturn nil, 0\n\t}\n\tgoodForUpload := utility.GoodForUpload\n\n\t\/\/ Determine what sort of help this chunk needs.\n\tuc.mu.Lock()\n\t\/\/ Determine that this worker is not on cooldonw.\n\t_, candidateHost := uc.unusedHosts[w.hostPubKey.String()]\n\tchunkComplete := uc.piecesNeeded <= uc.piecesCompleted\n\tneedsHelp := uc.piecesNeeded > uc.piecesCompleted+uc.piecesRegistered\n\n\t\/\/ If the chunk does not need help from this worker, release the chunk.\n\tif chunkComplete || !candidateHost || !goodForUpload || w.onUploadCooldown() {\n\t\t\/\/ This worker no longer needs to track this chunk.\n\t\tuc.mu.Unlock()\n\t\tw.dropChunk(uc)\n\t\treturn nil, 0\n\t}\n\n\t\/\/ If the chunk needs help from this worker, find a piece to upload and\n\t\/\/ return the stats for that piece.\n\tindex := 0\n\tif needsHelp {\n\t\t\/\/ Select a piece and mark that a piece has been selected.\n\t\tfor i := 0; i < len(uc.pieceUsage); i++ {\n\t\t\tif !uc.pieceUsage[i] {\n\t\t\t\tindex = i\n\t\t\t\tuc.pieceUsage[i] = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tdelete(uc.unusedHosts, w.hostPubKey.String())\n\t\tuc.piecesRegistered++\n\t\tuc.mu.Unlock()\n\t\treturn uc, uint64(index)\n\t}\n\t\/\/ Add this worker to the set of standby workers for this chunk.\n\tuc.workersStandby = append(uc.workersStandby, w)\n\tuc.mu.Unlock()\n\n\t\/\/ The chunk could need help from this worker, but only if other workers who\n\t\/\/ are performing uploads experience failures. Put this chunk on standby.\n\treturn nil, 0\n}\n\n\/\/ uploadFailed is called if a worker failed to upload part of an unfinished\n\/\/ chunk.\nfunc (w *worker) uploadFailed(uc *unfinishedChunk, pieceIndex uint64) {\n\t\/\/ Mark the failure in the worker if the gateway says we are online. It's\n\t\/\/ not the worker's fault if we are offline.\n\tif w.renter.g.Online() {\n\t\tw.uploadRecentFailure = time.Now()\n\t\tw.uploadConsecutiveFailures++\n\t}\n\tuc.mu.Lock()\n\tuc.piecesRegistered--\n\tuc.pieceUsage[pieceIndex] = false\n\tuc.notifyStandbyWorkers()\n\tuc.mu.Unlock()\n\tw.dropChunk(uc)\n\tw.dropUploadChunks()\n}\n<commit_msg>fix up upload concurrency<commit_after>package renter\n\nimport (\n\t\"time\"\n)\n\n\/\/ managedDropChunk will remove a worker from the responsibility of tracking a chunk.\n\/\/\n\/\/ This function is managed instead of static because it is against convention\n\/\/ to be calling functions on other objects (in this case, the renter) while\n\/\/ holding a lock.\nfunc (w *worker) managedDropChunk(uc *unfinishedChunk) {\n\tuc.mu.Lock()\n\tuc.workersRemaining--\n\tuc.mu.Unlock()\n\tw.renter.managedReleaseIdleChunkPieces(uc)\n\tw.renter.heapWG.Done()\n}\n\n\/\/ managedDropUploadChunks will release all of the upload chunks that the worker\n\/\/ has received.\nfunc (w *worker) managedDropUploadChunks() {\n\t\/\/ Make a copy of the slice under lock, clear the slice, then drop the\n\t\/\/ chunks without a lock (managed function).\n\tvar chunksToDrop []*unfinishedChunk\n\tw.mu.Lock()\n\tfor i := 0; i < len(w.unprocessedChunks); i++ {\n\t\tchunksToDrop = append(chunksToDrop, w.unprocessedChunks[i])\n\t}\n\tw.unprocessedChunks = w.unprocessedChunks[:0]\n\tw.mu.Unlock()\n\n\tfor i := 0; i < len(chunksToDrop); i++ {\n\t\tw.managedDropChunk(chunksToDrop[i])\n\t}\n}\n\n\/\/ managedKillUploading will disable all uploading for the worker.\nfunc (w *worker) managedKillUploading() {\n\t\/\/ Mark the worker as disabled so that incoming chunks are rejected.\n\tw.mu.Lock()\n\tw.uploadTerminated = true\n\tw.mu.Unlock()\n\n\t\/\/ After the worker is marked as disabled, clear out all of the chunks.\n\tw.managedDropUploadChunks()\n}\n\n\/\/ managedNextUploadChunk will pull the next potential chunk out of the worker's\n\/\/ work queue for uploading.\nfunc (w *worker) managedNextUploadChunk() (nextChunk *unfinishedChunk, pieceIndex uint64) {\n\t\/\/ Loop through the unprocessed chunks and find some work to do.\n\tfor {\n\t\t\/\/ Pull a chunk off of the unprocessed chunks stack.\n\t\tw.mu.Lock()\n\t\tif len(w.unprocessedChunks) <= 0 {\n\t\t\tw.mu.Unlock()\n\t\t\tbreak\n\t\t}\n\t\tchunk := w.unprocessedChunks[0]\n\t\tw.unprocessedChunks = w.unprocessedChunks[1:]\n\t\tw.mu.Unlock()\n\n\t\t\/\/ Process the chunk and return it if valid.\n\t\tnextChunk, pieceIndex := w.managedProcessUploadChunk(chunk)\n\t\tif nextChunk != nil {\n\t\t\treturn nextChunk, pieceIndex\n\t\t}\n\t}\n\treturn nil, 0 \/\/ no work found\n}\n\n\/\/ managedQueueUploadChunk will take a chunk and add it to the worker's repair\n\/\/ stack.\nfunc (w *worker) managedQueueUploadChunk(uc *unfinishedChunk) {\n\t\/\/ Check that the worker is allowed to be uploading before grabbing the\n\t\/\/ worker lock.\n\tutility, exists := w.renter.hostContractor.ContractUtility(w.contract.ID)\n\tgoodForUpload := exists && utility.GoodForUpload\n\tw.mu.Lock()\n\tif !goodForUpload || w.uploadTerminated || w.onUploadCooldown() {\n\t\t\/\/ The worker should not be uploading, remove the chunk.\n\t\tw.mu.Unlock()\n\t\tw.managedDropChunk(uc)\n\t\treturn\n\t}\n\tw.unprocessedChunks = append(w.unprocessedChunks, uc)\n\tw.mu.Unlock()\n\n\t\/\/ Send a signal informing the work thread that there is work.\n\tselect {\n\tcase w.uploadChan <- struct{}{}:\n\tdefault:\n\t}\n}\n\n\/\/ managedUpload will perform some upload work.\nfunc (w *worker) managedUpload(uc *unfinishedChunk, pieceIndex uint64) {\n\t\/\/ Open an editing connection to the host.\n\te, err := w.renter.hostContractor.Editor(w.contract.ID, w.renter.tg.StopChan())\n\tif err != nil {\n\t\tw.renter.log.Debugln(\"Worker failed to acquire an editor:\", err)\n\t\tw.managedUploadFailed(uc, pieceIndex)\n\t\treturn\n\t}\n\tdefer e.Close()\n\n\t\/\/ Perform the upload, and update the failure stats based on the success of\n\t\/\/ the upload attempt.\n\troot, err := e.Upload(uc.physicalChunkData[pieceIndex])\n\tif err != nil {\n\t\tw.renter.log.Debugln(\"Worker failed to upload via the editor:\", err)\n\t\tw.managedUploadFailed(uc, pieceIndex)\n\t\treturn\n\t}\n\tw.mu.Lock()\n\tw.uploadConsecutiveFailures = 0\n\tw.mu.Unlock()\n\n\t\/\/ Update the renter metadata.\n\taddr := e.Address()\n\tendHeight := e.EndHeight()\n\tid := w.renter.mu.Lock()\n\tuc.renterFile.mu.Lock()\n\tcontract, exists := uc.renterFile.contracts[w.contract.ID]\n\tif !exists {\n\t\tcontract = fileContract{\n\t\t\tID: w.contract.ID,\n\t\t\tIP: addr,\n\t\t\tWindowStart: endHeight,\n\t\t}\n\t}\n\tcontract.Pieces = append(contract.Pieces, pieceData{\n\t\tChunk: uc.index,\n\t\tPiece: pieceIndex,\n\t\tMerkleRoot: root,\n\t})\n\tuc.renterFile.contracts[w.contract.ID] = contract\n\tw.renter.saveFile(uc.renterFile)\n\tuc.renterFile.mu.Unlock()\n\tw.renter.mu.Unlock(id)\n\n\t\/\/ Upload is complete. Update the state of the chunk and the renter's memory\n\t\/\/ available to reflect the completed upload.\n\tuc.mu.Lock()\n\treleaseSize := len(uc.physicalChunkData[pieceIndex])\n\tuc.piecesRegistered--\n\tuc.piecesCompleted++\n\tuc.physicalChunkData[pieceIndex] = nil\n\tuc.memoryReleased += uint64(releaseSize)\n\tuc.mu.Unlock()\n\tw.renter.memoryManager.Return(uint64(releaseSize))\n\tw.managedDropChunk(uc)\n}\n\n\/\/ onUploadCooldown returns true if the worker is on cooldown from failed\n\/\/ uploads.\nfunc (w *worker) onUploadCooldown() bool {\n\trequiredCooldown := uploadFailureCooldown\n\tfor i := 0; i < w.uploadConsecutiveFailures && i < maxConsecutivePenalty; i++ {\n\t\trequiredCooldown *= 2\n\t}\n\treturn time.Now().Before(w.uploadRecentFailure.Add(requiredCooldown))\n}\n\n\/\/ managedProcessUploadChunk will process a chunk from the worker chunk queue.\nfunc (w *worker) managedProcessUploadChunk(uc *unfinishedChunk) (nextChunk *unfinishedChunk, pieceIndex uint64) {\n\t\/\/ Determine the usability value of this worker.\n\tutility, exists := w.renter.hostContractor.ContractUtility(w.contract.ID)\n\tif !exists {\n\t\treturn nil, 0\n\t}\n\tgoodForUpload := utility.GoodForUpload\n\n\t\/\/ Determine what sort of help this chunk needs.\n\tw.mu.Lock()\n\tonCooldown := w.onUploadCooldown()\n\tw.mu.Unlock()\n\tuc.mu.Lock()\n\t_, candidateHost := uc.unusedHosts[w.hostPubKey.String()]\n\tchunkComplete := uc.piecesNeeded <= uc.piecesCompleted\n\tneedsHelp := uc.piecesNeeded > uc.piecesCompleted+uc.piecesRegistered\n\t\/\/ If the chunk does not need help from this worker, release the chunk.\n\tif chunkComplete || !candidateHost || !goodForUpload || onCooldown {\n\t\t\/\/ This worker no longer needs to track this chunk.\n\t\tuc.mu.Unlock()\n\t\tw.managedDropChunk(uc)\n\t\treturn nil, 0\n\t}\n\n\t\/\/ If the chunk needs help from this worker, find a piece to upload and\n\t\/\/ return the stats for that piece.\n\tindex := 0\n\tif needsHelp {\n\t\t\/\/ Select a piece and mark that a piece has been selected.\n\t\tfor i := 0; i < len(uc.pieceUsage); i++ {\n\t\t\tif !uc.pieceUsage[i] {\n\t\t\t\tindex = i\n\t\t\t\tuc.pieceUsage[i] = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tdelete(uc.unusedHosts, w.hostPubKey.String())\n\t\tuc.piecesRegistered++\n\t\tuc.mu.Unlock()\n\t\treturn uc, uint64(index)\n\t}\n\t\/\/ Add this worker to the set of standby workers for this chunk.\n\tuc.workersStandby = append(uc.workersStandby, w)\n\tuc.mu.Unlock()\n\n\t\/\/ The chunk could need help from this worker, but only if other workers who\n\t\/\/ are performing uploads experience failures. Put this chunk on standby.\n\treturn nil, 0\n}\n\n\/\/ managedUploadFailed is called if a worker failed to upload part of an unfinished\n\/\/ chunk.\nfunc (w *worker) managedUploadFailed(uc *unfinishedChunk, pieceIndex uint64) {\n\t\/\/ Mark the failure in the worker if the gateway says we are online. It's\n\t\/\/ not the worker's fault if we are offline.\n\tif w.renter.g.Online() {\n\t\tw.mu.Lock()\n\t\tw.uploadRecentFailure = time.Now()\n\t\tw.uploadConsecutiveFailures++\n\t\tw.mu.Unlock()\n\t}\n\n\t\/\/ Unregister the piece from the chunk and hunt for a replacement.\n\tuc.mu.Lock()\n\tuc.piecesRegistered--\n\tuc.pieceUsage[pieceIndex] = false\n\tuc.notifyStandbyWorkers()\n\tuc.mu.Unlock()\n\n\t\/\/ Drop this chunk from the worker. Because the worker is currently on\n\t\/\/ cooldown, drop all remaining chunks from the worker as well.\n\tw.managedDropChunk(uc)\n\tw.managedDropUploadChunks()\n}\n<|endoftext|>"} {"text":"<commit_before>package queue\n\nimport (\n\t\"log\"\n\n\t\"github.com\/streadway\/amqp\"\n)\n\ntype QueueConnection struct {\n\tConnection *amqp.Connection\n\tChannel *amqp.Channel\n\n\tack chan uint64\n\tnack chan uint64\n}\n\nfunc NewQueueConnection(amqpURI string) (*QueueConnection, error) {\n\tconnection, err := amqp.Dial(amqpURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tchannel, err := connection.Channel()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = channel.Confirm(false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tack, nack := channel.NotifyConfirm(make(chan uint64, 1), make(chan uint64, 1))\n\n\treturn &QueueConnection{\n\t\tConnection: connection,\n\t\tChannel: channel,\n\t\tack: ack,\n\t\tnack: nack,\n\t}, nil\n}\n\nfunc (c *QueueConnection) Close() error {\n\terr := c.Channel.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn c.Connection.Close()\n}\n\nfunc (c *QueueConnection) Consume(queueName string) (<-chan amqp.Delivery, error) {\n\treturn c.Channel.Consume(\n\t\tqueueName,\n\t\t\"\",\n\t\tfalse, \/\/ autoAck\n\t\tfalse, \/\/ this won't be the sole consumer\n\t\ttrue, \/\/ don't deliver messages from same connection\n\t\tfalse, \/\/ the broker owns when consumption can begin\n\t\tnil) \/\/ arguments\n}\n\nfunc (c *QueueConnection) ExchangeDeclare(exchangeName string, exchangeType string) error {\n\treturn c.Channel.ExchangeDeclare(\n\t\texchangeName, \/\/ name of the exchange\n\t\texchangeType, \/\/ type\n\t\ttrue, \/\/ durable\n\t\tfalse, \/\/ delete when complete\n\t\tfalse, \/\/ internal\n\t\tfalse, \/\/ noWait\n\t\tnil, \/\/ arguments\n\t)\n}\n\nfunc (c *QueueConnection) QueueDeclare(queueName string) (amqp.Queue, error) {\n\tqueue, err := c.Channel.QueueDeclare(\n\t\tqueueName, \/\/ name of the queue\n\t\ttrue, \/\/ durable\n\t\tfalse, \/\/ delete when usused\n\t\tfalse, \/\/ exclusive\n\t\tfalse, \/\/ noWait\n\t\tnil) \/\/ arguments\n\tif err != nil {\n\t\treturn amqp.Queue{\n\t\t\tName: queueName,\n\t\t}, err\n\t}\n\n\treturn queue, nil\n}\n\nfunc (c *QueueConnection) BindQueueToExchange(queueName string, exchangeName string) error {\n\treturn c.Channel.QueueBind(\n\t\tqueueName,\n\t\t\"#\", \/\/ key to marshall with\n\t\texchangeName,\n\t\ttrue, \/\/ noWait\n\t\tnil) \/\/ arguments\n}\n\nfunc (c *QueueConnection) Publish(exchangeName string, routingKey string, contentType string, body string) error {\n\tdefer publisherConfirm(c)\n\n\treturn c.Channel.Publish(\n\t\texchangeName, \/\/ publish to an exchange\n\t\troutingKey, \/\/ routing to 0 or more queues\n\t\tfalse, \/\/ mandatory\n\t\tfalse, \/\/ immediate\n\t\tamqp.Publishing{\n\t\t\tHeaders: amqp.Table{},\n\t\t\tContentType: contentType,\n\t\t\tContentEncoding: \"\",\n\t\t\tBody: []byte(body),\n\t\t\tDeliveryMode: amqp.Persistent,\n\t\t\tPriority: 0, \/\/ 0-9\n\t\t})\n}\n\nfunc publisherConfirm(c *QueueConnection) {\n\tselect {\n\tcase tag := <-c.ack:\n\t\tlog.Println(\"Acknowledge message publish:\", tag)\n\t\terr := c.Channel.Ack(tag, false)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Couldn't ack:\", tag, err)\n\t\t}\n\tcase tag := <-c.nack:\n\t\tlog.Println(\"Couldn't acknowledge message publish:\", tag)\n\t\terr := c.Channel.Nack(tag, false, true)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Couldn't nack:\", tag, err)\n\t\t}\n\t}\n}\n<commit_msg>Remove publisher confirms for now<commit_after>package queue\n\nimport (\n\t\"github.com\/streadway\/amqp\"\n)\n\ntype QueueConnection struct {\n\tConnection *amqp.Connection\n\tChannel *amqp.Channel\n}\n\nfunc NewQueueConnection(amqpURI string) (*QueueConnection, error) {\n\tconnection, err := amqp.Dial(amqpURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tchannel, err := connection.Channel()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &QueueConnection{\n\t\tConnection: connection,\n\t\tChannel: channel,\n\t}, nil\n}\n\nfunc (c *QueueConnection) Close() error {\n\terr := c.Channel.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn c.Connection.Close()\n}\n\nfunc (c *QueueConnection) Consume(queueName string) (<-chan amqp.Delivery, error) {\n\treturn c.Channel.Consume(\n\t\tqueueName,\n\t\t\"\",\n\t\tfalse, \/\/ autoAck\n\t\tfalse, \/\/ this won't be the sole consumer\n\t\ttrue, \/\/ don't deliver messages from same connection\n\t\tfalse, \/\/ the broker owns when consumption can begin\n\t\tnil) \/\/ arguments\n}\n\nfunc (c *QueueConnection) ExchangeDeclare(exchangeName string, exchangeType string) error {\n\treturn c.Channel.ExchangeDeclare(\n\t\texchangeName, \/\/ name of the exchange\n\t\texchangeType, \/\/ type\n\t\ttrue, \/\/ durable\n\t\tfalse, \/\/ delete when complete\n\t\tfalse, \/\/ internal\n\t\tfalse, \/\/ noWait\n\t\tnil, \/\/ arguments\n\t)\n}\n\nfunc (c *QueueConnection) QueueDeclare(queueName string) (amqp.Queue, error) {\n\tqueue, err := c.Channel.QueueDeclare(\n\t\tqueueName, \/\/ name of the queue\n\t\ttrue, \/\/ durable\n\t\tfalse, \/\/ delete when usused\n\t\tfalse, \/\/ exclusive\n\t\tfalse, \/\/ noWait\n\t\tnil) \/\/ arguments\n\tif err != nil {\n\t\treturn amqp.Queue{\n\t\t\tName: queueName,\n\t\t}, err\n\t}\n\n\treturn queue, nil\n}\n\nfunc (c *QueueConnection) BindQueueToExchange(queueName string, exchangeName string) error {\n\treturn c.Channel.QueueBind(\n\t\tqueueName,\n\t\t\"#\", \/\/ key to marshall with\n\t\texchangeName,\n\t\ttrue, \/\/ noWait\n\t\tnil) \/\/ arguments\n}\n\nfunc (c *QueueConnection) Publish(exchangeName string, routingKey string, contentType string, body string) error {\n\treturn c.Channel.Publish(\n\t\texchangeName, \/\/ publish to an exchange\n\t\troutingKey, \/\/ routing to 0 or more queues\n\t\tfalse, \/\/ mandatory\n\t\tfalse, \/\/ immediate\n\t\tamqp.Publishing{\n\t\t\tHeaders: amqp.Table{},\n\t\t\tContentType: contentType,\n\t\t\tContentEncoding: \"\",\n\t\t\tBody: []byte(body),\n\t\t\tDeliveryMode: amqp.Persistent,\n\t\t\tPriority: 0, \/\/ 0-9\n\t\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/mackerelio\/mackerel-client-go\"\n\t\"github.com\/mackerelio\/mkr\/logger\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nvar commandDashboards = cli.Command{\n\tName: \"dashboards\",\n\tSubcommands: []cli.Command{\n\t\t{\n\t\t\tName: \"generate\",\n\t\t\tUsage: \"Generate custom dashboard\",\n\t\t\tDescription: `\n A custom dashboard is registered from a yaml file..\n Requests \"POST \/api\/v0\/dashboards\". See https:\/\/mackerel.io\/ja\/api-docs\/entry\/dashboards#create.\n`,\n\t\t\tAction: doGenerateDashboards,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{Name: \"print, p\", Usage: \"markdown is output in standard output.\"},\n\t\t\t},\n\t\t},\n\t},\n}\n\ntype graphsConfig struct {\n\tTitle string `yaml:\"title\"`\n\tURLPath string `yaml:\"url_path\"`\n\tGraphType string `yaml:\"graph_type\"`\n\tHeight int `yaml:\"height\"`\n\tWidth int `yaml:\"width\"`\n\tHostGraphFormat []*hostGraphFormat `yaml:\"host_graphs\"`\n\tGraphFormat []*graphFormat `yaml:\"graphs\"`\n}\n\ntype hostGraphFormat struct {\n\tHeadline string `yaml:\"headline\"`\n\tHostIDs []string `yaml:\"host_ids\"`\n\tGraphNames []string `yaml:\"graph_names\"`\n\tPeriod string `yaml:\"period\"`\n}\ntype graphFormat struct {\n\tHeadline string `yaml:\"headline\"`\n\tColumnCount int `yaml:\"column_count\"`\n\tGraphDefs []*graphDef `yaml:\"graph_def\"`\n}\ntype graphDef struct {\n\tHostID string `yaml:\"host_id\"`\n\tServiceName string `yaml:\"service_name\"`\n\tRoleName string `yaml:\"role_name\"`\n\tQuery string `yaml:\"query\"`\n\tGraphName string `yaml:\"graph_name\"`\n\tPeriod string `yaml:\"period\"`\n\tStacked bool `yaml:\"stacked\"`\n\tSimplified bool `yaml:\"simplified\"`\n}\n\nfunc (g graphDef) isHostGraph() bool {\n\treturn g.HostID != \"\"\n}\nfunc (g graphDef) isServiceGraph() bool {\n\treturn g.ServiceName != \"\" && g.RoleName == \"\"\n}\nfunc (g graphDef) isRoleGraph() bool {\n\treturn g.ServiceName != \"\" && g.RoleName != \"\"\n}\nfunc (g graphDef) isExpressionGraph() bool {\n\treturn g.Query != \"\"\n}\n\ntype baseGraph interface {\n\tgetURL(string, bool) string\n\tgetHeight() int\n\tgetWidth() int\n}\n\ntype hostGraph struct {\n\tHostID string\n\tGraphType string\n\tGraph string\n\tPeriod string\n\theight int\n\twidth int\n}\n\nfunc (h hostGraph) getURL(orgName string, isImage bool) string {\n\textension := \"\"\n\tif isImage {\n\t\textension = \".png\"\n\t}\n\tu, _ := url.Parse(fmt.Sprintf(\"https:\/\/mackerel.io\/embed\/orgs\/%s\/hosts\/%s\"+extension, orgName, h.HostID))\n\tparam := url.Values{}\n\tparam.Add(\"graph\", h.Graph)\n\tparam.Add(\"period\", h.Period)\n\tu.RawQuery = param.Encode()\n\treturn u.String()\n}\nfunc (h hostGraph) generateGraphString(orgName string) string {\n\tif h.GraphType == \"iframe\" {\n\t\treturn makeIframeTag(orgName, h)\n\t}\n\treturn makeImageMarkdown(orgName, h)\n}\nfunc (h hostGraph) getHeight() int {\n\treturn h.height\n}\nfunc (h hostGraph) getWidth() int {\n\treturn h.width\n}\n\ntype serviceGraph struct {\n\tServiceName string\n\tGraphType string\n\tGraph string\n\tPeriod string\n\theight int\n\twidth int\n}\n\nfunc (s serviceGraph) getURL(orgName string, isImage bool) string {\n\textension := \"\"\n\tif isImage {\n\t\textension = \".png\"\n\t}\n\tu, _ := url.Parse(fmt.Sprintf(\"https:\/\/mackerel.io\/embed\/orgs\/%s\/services\/%s\"+extension, orgName, s.ServiceName))\n\tparam := url.Values{}\n\tparam.Add(\"graph\", s.Graph)\n\tparam.Add(\"period\", s.Period)\n\tu.RawQuery = param.Encode()\n\treturn u.String()\n}\nfunc (s serviceGraph) generateGraphString(orgName string) string {\n\tif s.GraphType == \"iframe\" {\n\t\treturn makeIframeTag(orgName, s)\n\t}\n\treturn makeImageMarkdown(orgName, s)\n}\nfunc (s serviceGraph) getHeight() int {\n\treturn s.height\n}\nfunc (s serviceGraph) getWidth() int {\n\treturn s.width\n}\n\ntype roleGraph struct {\n\tServiceName string\n\tRoleName string\n\tGraphType string\n\tGraph string\n\tPeriod string\n\tStacked bool\n\tSimplified bool\n\theight int\n\twidth int\n}\n\nfunc (r roleGraph) getURL(orgName string, isImage bool) string {\n\textension := \"\"\n\tif isImage {\n\t\textension = \".png\"\n\t}\n\tu, _ := url.Parse(fmt.Sprintf(\"https:\/\/mackerel.io\/embed\/orgs\/%s\/services\/%s\/%s\"+extension, orgName, r.ServiceName, r.RoleName))\n\tparam := url.Values{}\n\tparam.Add(\"graph\", r.Graph)\n\tparam.Add(\"stacked\", strconv.FormatBool(r.Stacked))\n\tparam.Add(\"simplified\", strconv.FormatBool(r.Simplified))\n\tparam.Add(\"period\", r.Period)\n\tu.RawQuery = param.Encode()\n\treturn u.String()\n}\nfunc (r roleGraph) generateGraphString(orgName string) string {\n\tif r.GraphType == \"iframe\" {\n\t\treturn makeIframeTag(orgName, r)\n\t}\n\treturn makeImageMarkdown(orgName, r)\n}\nfunc (r roleGraph) getHeight() int {\n\treturn r.height\n}\nfunc (r roleGraph) getWidth() int {\n\treturn r.width\n}\n\ntype expressionGraph struct {\n\tQuery string\n\tGraphType string\n\tPeriod string\n\theight int\n\twidth int\n}\n\nfunc (e expressionGraph) getURL(orgName string, isImage bool) string {\n\textension := \"\"\n\tif isImage {\n\t\textension = \".png\"\n\t}\n\tu, _ := url.Parse(fmt.Sprintf(\"https:\/\/mackerel.io\/embed\/orgs\/%s\/advanced-graph\"+extension, orgName))\n\tparam := url.Values{}\n\tparam.Add(\"query\", e.Query)\n\tparam.Add(\"period\", e.Period)\n\tu.RawQuery = param.Encode()\n\treturn u.String()\n}\nfunc (e expressionGraph) generateGraphString(orgName string) string {\n\tif e.GraphType == \"iframe\" {\n\t\treturn makeIframeTag(orgName, e)\n\t}\n\treturn makeImageMarkdown(orgName, e)\n}\nfunc (e expressionGraph) getHeight() int {\n\treturn e.height\n}\nfunc (e expressionGraph) getWidth() int {\n\treturn e.width\n}\n\nfunc makeIframeTag(orgName string, g baseGraph) string {\n\treturn fmt.Sprintf(`<iframe src=\"%s\" height=\"%d\" width=\"%d\" frameborder=\"0\"><\/iframe>`, g.getURL(orgName, false), g.getHeight(), g.getWidth())\n}\n\nfunc makeImageMarkdown(orgName string, g baseGraph) string {\n\treturn fmt.Sprintf(\"[![graph](%s)](%s)\", g.getURL(orgName, true), g.getURL(orgName, true))\n}\n\nfunc doGenerateDashboards(c *cli.Context) error {\n\tconffile := c.GlobalString(\"conf\")\n\n\tisStdout := c.Bool(\"print\")\n\n\targFilePath := c.Args()\n\tif len(argFilePath) < 1 {\n\t\tlogger.Log(\"error\", \"at least one argumet is required.\")\n\t\tcli.ShowCommandHelp(c, \"generate\")\n\t\tos.Exit(1)\n\t}\n\n\tbuf, err := ioutil.ReadFile(argFilePath[0])\n\tlogger.DieIf(err)\n\n\tyml := graphsConfig{}\n\terr = yaml.Unmarshal(buf, &yml)\n\tlogger.DieIf(err)\n\n\tclient := newMackerel(conffile)\n\n\torg, err := client.GetOrg()\n\tlogger.DieIf(err)\n\n\tif yml.Title == \"\" {\n\t\tlogger.Log(\"error\", \"title is required in yaml.\")\n\t\tos.Exit(1)\n\t}\n\tif yml.URLPath == \"\" {\n\t\tlogger.Log(\"error\", \"url_path is required in yaml.\")\n\t\tos.Exit(1)\n\t}\n\tif yml.GraphType == \"\" {\n\t\tyml.GraphType = \"iframe\"\n\t}\n\tif yml.GraphType != \"iframe\" && yml.GraphType != \"image\" {\n\t\tlogger.Log(\"error\", \"graph_type should be 'iframe' or 'image'.\")\n\t\tos.Exit(1)\n\t}\n\tif yml.Height == 0 {\n\t\tyml.Height = 200\n\t}\n\tif yml.Width == 0 {\n\t\tyml.Width = 400\n\t}\n\n\tif yml.HostGraphFormat != nil && yml.GraphFormat != nil {\n\t\tlogger.Log(\"error\", \"you cannot specify both 'graphs' and host_graphs'.\")\n\t}\n\n\tvar markdown string\n\tfor _, h := range yml.HostGraphFormat {\n\t\tmarkdown += generateHostGraphsMarkdown(org.Name, h, yml.GraphType, yml.Height, yml.Width, client)\n\t}\n\tfor _, g := range yml.GraphFormat {\n\t\tmarkdown += generateGraphsMarkdown(org.Name, g, yml.GraphType, yml.Height, yml.Width)\n\t}\n\n\tif isStdout {\n\t\tfmt.Println(markdown)\n\t} else {\n\t\tupdateDashboard := &mackerel.Dashboard{\n\t\t\tTitle: yml.Title,\n\t\t\tBodyMarkDown: markdown,\n\t\t\tURLPath: yml.URLPath,\n\t\t}\n\n\t\tdashboards, fetchError := client.FindDashboards()\n\t\tlogger.DieIf(fetchError)\n\n\t\tdashboardID := \"\"\n\t\tfor _, ds := range dashboards {\n\t\t\tif ds.URLPath == yml.URLPath {\n\t\t\t\tdashboardID = ds.ID\n\t\t\t}\n\t\t}\n\n\t\tif dashboardID == \"\" {\n\t\t\t_, createError := client.CreateDashboard(updateDashboard)\n\t\t\tlogger.DieIf(createError)\n\t\t} else {\n\t\t\t_, updateError := client.UpdateDashboard(dashboardID, updateDashboard)\n\t\t\tlogger.DieIf(updateError)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc generateHostGraphsMarkdown(orgName string, hostGraphs *hostGraphFormat, graphType string, height int, width int, client *mackerel.Client) string {\n\n\tvar markdown string\n\n\tif hostGraphs.Headline != \"\" {\n\t\tmarkdown += fmt.Sprintf(\"## %s\\n\", hostGraphs.Headline)\n\t}\n\tmarkdown += generateHostGraphsTableHeader(hostGraphs.HostIDs, client)\n\n\tif hostGraphs.Period == \"\" {\n\t\thostGraphs.Period = \"1h\"\n\t}\n\n\tfor _, graphName := range hostGraphs.GraphNames {\n\t\tcurrentColumnCount := 0\n\t\tfor _, hostID := range hostGraphs.HostIDs {\n\t\t\th := &hostGraph{\n\t\t\t\thostID,\n\t\t\t\tgraphType,\n\t\t\t\tgraphName,\n\t\t\t\thostGraphs.Period,\n\t\t\t\theight,\n\t\t\t\twidth,\n\t\t\t}\n\t\t\tmarkdown = appendMarkdown(markdown, h.generateGraphString(orgName), len(hostGraphs.HostIDs))\n\t\t\tcurrentColumnCount++\n\t\t\tif currentColumnCount >= len(hostGraphs.HostIDs) {\n\t\t\t\tmarkdown += \"|\\n\"\n\t\t\t\tcurrentColumnCount = 0\n\t\t\t}\n\t\t}\n\t}\n\n\treturn markdown\n}\n\nfunc generateHostGraphsTableHeader(hostIDs []string, client *mackerel.Client) string {\n\tvar header string\n\tfor _, hostID := range hostIDs {\n\t\thost, err := client.FindHost(hostID)\n\t\tlogger.DieIf(err)\n\n\t\tvar hostName string\n\t\tif host.DisplayName != \"\" {\n\t\t\thostName = host.DisplayName\n\t\t} else {\n\t\t\thostName = host.Name\n\t\t}\n\n\t\theader += \"|\" + hostName\n\t}\n\n\theader += \"|\\n\" + generateGraphTableHeader(len(hostIDs))\n\n\treturn header\n}\n\nfunc generateGraphsMarkdown(orgName string, graphs *graphFormat, graphType string, height int, width int) string {\n\n\tvar markdown string\n\tif graphs.ColumnCount == 0 {\n\t\tgraphs.ColumnCount = 1\n\t}\n\tcurrentColumnCount := 0\n\n\tif graphs.Headline != \"\" {\n\t\tmarkdown += fmt.Sprintf(\"## %s\\n\", graphs.Headline)\n\t}\n\tmarkdown += generateGraphTableHeader(graphs.ColumnCount)\n\n\tfor i, gd := range graphs.GraphDefs {\n\t\tvar graphDefCount = 0\n\t\tif gd.isHostGraph() {\n\t\t\tgraphDefCount++\n\t\t}\n\t\tif gd.isServiceGraph() {\n\t\t\tgraphDefCount++\n\t\t}\n\t\tif gd.isRoleGraph() {\n\t\t\tgraphDefCount++\n\t\t}\n\t\tif gd.isExpressionGraph() {\n\t\t\tgraphDefCount++\n\t\t}\n\t\tif graphDefCount != 1 {\n\t\t\tlogger.Log(\"error\", \"at least one between hostId, service_name and query is required.\")\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tif gd.Period == \"\" {\n\t\t\tgd.Period = \"1h\"\n\t\t}\n\n\t\tif gd.isHostGraph() {\n\t\t\tif gd.GraphName == \"\" {\n\t\t\t\tlogger.Log(\"error\", \"graph_name is required for host graph.\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\th := &hostGraph{\n\t\t\t\tgd.HostID,\n\t\t\t\tgraphType,\n\t\t\t\tgd.GraphName,\n\t\t\t\tgd.Period,\n\t\t\t\theight,\n\t\t\t\twidth,\n\t\t\t}\n\t\t\tmarkdown = appendMarkdown(markdown, h.generateGraphString(orgName), graphs.ColumnCount)\n\t\t}\n\n\t\tif gd.isServiceGraph() {\n\t\t\tif gd.GraphName == \"\" {\n\t\t\t\tlogger.Log(\"error\", \"graph_name is required for service graph.\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\th := &serviceGraph{\n\t\t\t\tgd.ServiceName,\n\t\t\t\tgraphType,\n\t\t\t\tgd.GraphName,\n\t\t\t\tgd.Period,\n\t\t\t\theight,\n\t\t\t\twidth,\n\t\t\t}\n\t\t\tmarkdown = appendMarkdown(markdown, h.generateGraphString(orgName), graphs.ColumnCount)\n\t\t}\n\n\t\tif gd.isRoleGraph() {\n\t\t\tif gd.GraphName == \"\" {\n\t\t\t\tlogger.Log(\"error\", \"graph_name is required for role graph.\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\tr := &roleGraph{\n\t\t\t\tgd.ServiceName,\n\t\t\t\tgd.RoleName,\n\t\t\t\tgraphType,\n\t\t\t\tgd.GraphName,\n\t\t\t\tgd.Period,\n\t\t\t\tgd.Stacked,\n\t\t\t\tgd.Simplified,\n\t\t\t\theight,\n\t\t\t\twidth,\n\t\t\t}\n\t\t\tmarkdown = appendMarkdown(markdown, r.generateGraphString(orgName), graphs.ColumnCount)\n\t\t}\n\n\t\tif gd.isExpressionGraph() {\n\t\t\te := &expressionGraph{\n\t\t\t\tgd.Query,\n\t\t\t\tgraphType,\n\t\t\t\tgd.Period,\n\t\t\t\theight,\n\t\t\t\twidth,\n\t\t\t}\n\t\t\tmarkdown = appendMarkdown(markdown, e.generateGraphString(orgName), graphs.ColumnCount)\n\t\t}\n\n\t\tcurrentColumnCount++\n\t\tif currentColumnCount >= graphs.ColumnCount || i >= len(graphs.GraphDefs)-1 {\n\t\t\tmarkdown += \"|\\n\"\n\t\t\tcurrentColumnCount = 0\n\t\t}\n\t}\n\n\treturn markdown\n}\n\nfunc generateGraphTableHeader(confColumnCount int) string {\n\treturn strings.Repeat(\"|:-:\", confColumnCount) + \"|\\n\"\n}\n\nfunc appendMarkdown(markdown string, addItem string, confColumnCount int) string {\n\treturn markdown + \"|\" + addItem\n}\n<commit_msg>remove unused parameter<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/mackerelio\/mackerel-client-go\"\n\t\"github.com\/mackerelio\/mkr\/logger\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nvar commandDashboards = cli.Command{\n\tName: \"dashboards\",\n\tSubcommands: []cli.Command{\n\t\t{\n\t\t\tName: \"generate\",\n\t\t\tUsage: \"Generate custom dashboard\",\n\t\t\tDescription: `\n A custom dashboard is registered from a yaml file..\n Requests \"POST \/api\/v0\/dashboards\". See https:\/\/mackerel.io\/ja\/api-docs\/entry\/dashboards#create.\n`,\n\t\t\tAction: doGenerateDashboards,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{Name: \"print, p\", Usage: \"markdown is output in standard output.\"},\n\t\t\t},\n\t\t},\n\t},\n}\n\ntype graphsConfig struct {\n\tTitle string `yaml:\"title\"`\n\tURLPath string `yaml:\"url_path\"`\n\tGraphType string `yaml:\"graph_type\"`\n\tHeight int `yaml:\"height\"`\n\tWidth int `yaml:\"width\"`\n\tHostGraphFormat []*hostGraphFormat `yaml:\"host_graphs\"`\n\tGraphFormat []*graphFormat `yaml:\"graphs\"`\n}\n\ntype hostGraphFormat struct {\n\tHeadline string `yaml:\"headline\"`\n\tHostIDs []string `yaml:\"host_ids\"`\n\tGraphNames []string `yaml:\"graph_names\"`\n\tPeriod string `yaml:\"period\"`\n}\ntype graphFormat struct {\n\tHeadline string `yaml:\"headline\"`\n\tColumnCount int `yaml:\"column_count\"`\n\tGraphDefs []*graphDef `yaml:\"graph_def\"`\n}\ntype graphDef struct {\n\tHostID string `yaml:\"host_id\"`\n\tServiceName string `yaml:\"service_name\"`\n\tRoleName string `yaml:\"role_name\"`\n\tQuery string `yaml:\"query\"`\n\tGraphName string `yaml:\"graph_name\"`\n\tPeriod string `yaml:\"period\"`\n\tStacked bool `yaml:\"stacked\"`\n\tSimplified bool `yaml:\"simplified\"`\n}\n\nfunc (g graphDef) isHostGraph() bool {\n\treturn g.HostID != \"\"\n}\nfunc (g graphDef) isServiceGraph() bool {\n\treturn g.ServiceName != \"\" && g.RoleName == \"\"\n}\nfunc (g graphDef) isRoleGraph() bool {\n\treturn g.ServiceName != \"\" && g.RoleName != \"\"\n}\nfunc (g graphDef) isExpressionGraph() bool {\n\treturn g.Query != \"\"\n}\n\ntype baseGraph interface {\n\tgetURL(string, bool) string\n\tgetHeight() int\n\tgetWidth() int\n}\n\ntype hostGraph struct {\n\tHostID string\n\tGraphType string\n\tGraph string\n\tPeriod string\n\theight int\n\twidth int\n}\n\nfunc (h hostGraph) getURL(orgName string, isImage bool) string {\n\textension := \"\"\n\tif isImage {\n\t\textension = \".png\"\n\t}\n\tu, _ := url.Parse(fmt.Sprintf(\"https:\/\/mackerel.io\/embed\/orgs\/%s\/hosts\/%s\"+extension, orgName, h.HostID))\n\tparam := url.Values{}\n\tparam.Add(\"graph\", h.Graph)\n\tparam.Add(\"period\", h.Period)\n\tu.RawQuery = param.Encode()\n\treturn u.String()\n}\nfunc (h hostGraph) generateGraphString(orgName string) string {\n\tif h.GraphType == \"iframe\" {\n\t\treturn makeIframeTag(orgName, h)\n\t}\n\treturn makeImageMarkdown(orgName, h)\n}\nfunc (h hostGraph) getHeight() int {\n\treturn h.height\n}\nfunc (h hostGraph) getWidth() int {\n\treturn h.width\n}\n\ntype serviceGraph struct {\n\tServiceName string\n\tGraphType string\n\tGraph string\n\tPeriod string\n\theight int\n\twidth int\n}\n\nfunc (s serviceGraph) getURL(orgName string, isImage bool) string {\n\textension := \"\"\n\tif isImage {\n\t\textension = \".png\"\n\t}\n\tu, _ := url.Parse(fmt.Sprintf(\"https:\/\/mackerel.io\/embed\/orgs\/%s\/services\/%s\"+extension, orgName, s.ServiceName))\n\tparam := url.Values{}\n\tparam.Add(\"graph\", s.Graph)\n\tparam.Add(\"period\", s.Period)\n\tu.RawQuery = param.Encode()\n\treturn u.String()\n}\nfunc (s serviceGraph) generateGraphString(orgName string) string {\n\tif s.GraphType == \"iframe\" {\n\t\treturn makeIframeTag(orgName, s)\n\t}\n\treturn makeImageMarkdown(orgName, s)\n}\nfunc (s serviceGraph) getHeight() int {\n\treturn s.height\n}\nfunc (s serviceGraph) getWidth() int {\n\treturn s.width\n}\n\ntype roleGraph struct {\n\tServiceName string\n\tRoleName string\n\tGraphType string\n\tGraph string\n\tPeriod string\n\tStacked bool\n\tSimplified bool\n\theight int\n\twidth int\n}\n\nfunc (r roleGraph) getURL(orgName string, isImage bool) string {\n\textension := \"\"\n\tif isImage {\n\t\textension = \".png\"\n\t}\n\tu, _ := url.Parse(fmt.Sprintf(\"https:\/\/mackerel.io\/embed\/orgs\/%s\/services\/%s\/%s\"+extension, orgName, r.ServiceName, r.RoleName))\n\tparam := url.Values{}\n\tparam.Add(\"graph\", r.Graph)\n\tparam.Add(\"stacked\", strconv.FormatBool(r.Stacked))\n\tparam.Add(\"simplified\", strconv.FormatBool(r.Simplified))\n\tparam.Add(\"period\", r.Period)\n\tu.RawQuery = param.Encode()\n\treturn u.String()\n}\nfunc (r roleGraph) generateGraphString(orgName string) string {\n\tif r.GraphType == \"iframe\" {\n\t\treturn makeIframeTag(orgName, r)\n\t}\n\treturn makeImageMarkdown(orgName, r)\n}\nfunc (r roleGraph) getHeight() int {\n\treturn r.height\n}\nfunc (r roleGraph) getWidth() int {\n\treturn r.width\n}\n\ntype expressionGraph struct {\n\tQuery string\n\tGraphType string\n\tPeriod string\n\theight int\n\twidth int\n}\n\nfunc (e expressionGraph) getURL(orgName string, isImage bool) string {\n\textension := \"\"\n\tif isImage {\n\t\textension = \".png\"\n\t}\n\tu, _ := url.Parse(fmt.Sprintf(\"https:\/\/mackerel.io\/embed\/orgs\/%s\/advanced-graph\"+extension, orgName))\n\tparam := url.Values{}\n\tparam.Add(\"query\", e.Query)\n\tparam.Add(\"period\", e.Period)\n\tu.RawQuery = param.Encode()\n\treturn u.String()\n}\nfunc (e expressionGraph) generateGraphString(orgName string) string {\n\tif e.GraphType == \"iframe\" {\n\t\treturn makeIframeTag(orgName, e)\n\t}\n\treturn makeImageMarkdown(orgName, e)\n}\nfunc (e expressionGraph) getHeight() int {\n\treturn e.height\n}\nfunc (e expressionGraph) getWidth() int {\n\treturn e.width\n}\n\nfunc makeIframeTag(orgName string, g baseGraph) string {\n\treturn fmt.Sprintf(`<iframe src=\"%s\" height=\"%d\" width=\"%d\" frameborder=\"0\"><\/iframe>`, g.getURL(orgName, false), g.getHeight(), g.getWidth())\n}\n\nfunc makeImageMarkdown(orgName string, g baseGraph) string {\n\treturn fmt.Sprintf(\"[![graph](%s)](%s)\", g.getURL(orgName, true), g.getURL(orgName, true))\n}\n\nfunc doGenerateDashboards(c *cli.Context) error {\n\tconffile := c.GlobalString(\"conf\")\n\n\tisStdout := c.Bool(\"print\")\n\n\targFilePath := c.Args()\n\tif len(argFilePath) < 1 {\n\t\tlogger.Log(\"error\", \"at least one argumet is required.\")\n\t\tcli.ShowCommandHelp(c, \"generate\")\n\t\tos.Exit(1)\n\t}\n\n\tbuf, err := ioutil.ReadFile(argFilePath[0])\n\tlogger.DieIf(err)\n\n\tyml := graphsConfig{}\n\terr = yaml.Unmarshal(buf, &yml)\n\tlogger.DieIf(err)\n\n\tclient := newMackerel(conffile)\n\n\torg, err := client.GetOrg()\n\tlogger.DieIf(err)\n\n\tif yml.Title == \"\" {\n\t\tlogger.Log(\"error\", \"title is required in yaml.\")\n\t\tos.Exit(1)\n\t}\n\tif yml.URLPath == \"\" {\n\t\tlogger.Log(\"error\", \"url_path is required in yaml.\")\n\t\tos.Exit(1)\n\t}\n\tif yml.GraphType == \"\" {\n\t\tyml.GraphType = \"iframe\"\n\t}\n\tif yml.GraphType != \"iframe\" && yml.GraphType != \"image\" {\n\t\tlogger.Log(\"error\", \"graph_type should be 'iframe' or 'image'.\")\n\t\tos.Exit(1)\n\t}\n\tif yml.Height == 0 {\n\t\tyml.Height = 200\n\t}\n\tif yml.Width == 0 {\n\t\tyml.Width = 400\n\t}\n\n\tif yml.HostGraphFormat != nil && yml.GraphFormat != nil {\n\t\tlogger.Log(\"error\", \"you cannot specify both 'graphs' and host_graphs'.\")\n\t}\n\n\tvar markdown string\n\tfor _, h := range yml.HostGraphFormat {\n\t\tmarkdown += generateHostGraphsMarkdown(org.Name, h, yml.GraphType, yml.Height, yml.Width, client)\n\t}\n\tfor _, g := range yml.GraphFormat {\n\t\tmarkdown += generateGraphsMarkdown(org.Name, g, yml.GraphType, yml.Height, yml.Width)\n\t}\n\n\tif isStdout {\n\t\tfmt.Println(markdown)\n\t} else {\n\t\tupdateDashboard := &mackerel.Dashboard{\n\t\t\tTitle: yml.Title,\n\t\t\tBodyMarkDown: markdown,\n\t\t\tURLPath: yml.URLPath,\n\t\t}\n\n\t\tdashboards, fetchError := client.FindDashboards()\n\t\tlogger.DieIf(fetchError)\n\n\t\tdashboardID := \"\"\n\t\tfor _, ds := range dashboards {\n\t\t\tif ds.URLPath == yml.URLPath {\n\t\t\t\tdashboardID = ds.ID\n\t\t\t}\n\t\t}\n\n\t\tif dashboardID == \"\" {\n\t\t\t_, createError := client.CreateDashboard(updateDashboard)\n\t\t\tlogger.DieIf(createError)\n\t\t} else {\n\t\t\t_, updateError := client.UpdateDashboard(dashboardID, updateDashboard)\n\t\t\tlogger.DieIf(updateError)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc generateHostGraphsMarkdown(orgName string, hostGraphs *hostGraphFormat, graphType string, height int, width int, client *mackerel.Client) string {\n\n\tvar markdown string\n\n\tif hostGraphs.Headline != \"\" {\n\t\tmarkdown += fmt.Sprintf(\"## %s\\n\", hostGraphs.Headline)\n\t}\n\tmarkdown += generateHostGraphsTableHeader(hostGraphs.HostIDs, client)\n\n\tif hostGraphs.Period == \"\" {\n\t\thostGraphs.Period = \"1h\"\n\t}\n\n\tfor _, graphName := range hostGraphs.GraphNames {\n\t\tcurrentColumnCount := 0\n\t\tfor _, hostID := range hostGraphs.HostIDs {\n\t\t\th := &hostGraph{\n\t\t\t\thostID,\n\t\t\t\tgraphType,\n\t\t\t\tgraphName,\n\t\t\t\thostGraphs.Period,\n\t\t\t\theight,\n\t\t\t\twidth,\n\t\t\t}\n\t\t\tmarkdown = appendMarkdown(markdown, h.generateGraphString(orgName))\n\t\t\tcurrentColumnCount++\n\t\t\tif currentColumnCount >= len(hostGraphs.HostIDs) {\n\t\t\t\tmarkdown += \"|\\n\"\n\t\t\t\tcurrentColumnCount = 0\n\t\t\t}\n\t\t}\n\t}\n\n\treturn markdown\n}\n\nfunc generateHostGraphsTableHeader(hostIDs []string, client *mackerel.Client) string {\n\tvar header string\n\tfor _, hostID := range hostIDs {\n\t\thost, err := client.FindHost(hostID)\n\t\tlogger.DieIf(err)\n\n\t\tvar hostName string\n\t\tif host.DisplayName != \"\" {\n\t\t\thostName = host.DisplayName\n\t\t} else {\n\t\t\thostName = host.Name\n\t\t}\n\n\t\theader += \"|\" + hostName\n\t}\n\n\theader += \"|\\n\" + generateGraphTableHeader(len(hostIDs))\n\n\treturn header\n}\n\nfunc generateGraphsMarkdown(orgName string, graphs *graphFormat, graphType string, height int, width int) string {\n\n\tvar markdown string\n\tif graphs.ColumnCount == 0 {\n\t\tgraphs.ColumnCount = 1\n\t}\n\tcurrentColumnCount := 0\n\n\tif graphs.Headline != \"\" {\n\t\tmarkdown += fmt.Sprintf(\"## %s\\n\", graphs.Headline)\n\t}\n\tmarkdown += generateGraphTableHeader(graphs.ColumnCount)\n\n\tfor i, gd := range graphs.GraphDefs {\n\t\tvar graphDefCount = 0\n\t\tif gd.isHostGraph() {\n\t\t\tgraphDefCount++\n\t\t}\n\t\tif gd.isServiceGraph() {\n\t\t\tgraphDefCount++\n\t\t}\n\t\tif gd.isRoleGraph() {\n\t\t\tgraphDefCount++\n\t\t}\n\t\tif gd.isExpressionGraph() {\n\t\t\tgraphDefCount++\n\t\t}\n\t\tif graphDefCount != 1 {\n\t\t\tlogger.Log(\"error\", \"at least one between hostId, service_name and query is required.\")\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tif gd.Period == \"\" {\n\t\t\tgd.Period = \"1h\"\n\t\t}\n\n\t\tif gd.isHostGraph() {\n\t\t\tif gd.GraphName == \"\" {\n\t\t\t\tlogger.Log(\"error\", \"graph_name is required for host graph.\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\th := &hostGraph{\n\t\t\t\tgd.HostID,\n\t\t\t\tgraphType,\n\t\t\t\tgd.GraphName,\n\t\t\t\tgd.Period,\n\t\t\t\theight,\n\t\t\t\twidth,\n\t\t\t}\n\t\t\tmarkdown = appendMarkdown(markdown, h.generateGraphString(orgName))\n\t\t}\n\n\t\tif gd.isServiceGraph() {\n\t\t\tif gd.GraphName == \"\" {\n\t\t\t\tlogger.Log(\"error\", \"graph_name is required for service graph.\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\th := &serviceGraph{\n\t\t\t\tgd.ServiceName,\n\t\t\t\tgraphType,\n\t\t\t\tgd.GraphName,\n\t\t\t\tgd.Period,\n\t\t\t\theight,\n\t\t\t\twidth,\n\t\t\t}\n\t\t\tmarkdown = appendMarkdown(markdown, h.generateGraphString(orgName))\n\t\t}\n\n\t\tif gd.isRoleGraph() {\n\t\t\tif gd.GraphName == \"\" {\n\t\t\t\tlogger.Log(\"error\", \"graph_name is required for role graph.\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\tr := &roleGraph{\n\t\t\t\tgd.ServiceName,\n\t\t\t\tgd.RoleName,\n\t\t\t\tgraphType,\n\t\t\t\tgd.GraphName,\n\t\t\t\tgd.Period,\n\t\t\t\tgd.Stacked,\n\t\t\t\tgd.Simplified,\n\t\t\t\theight,\n\t\t\t\twidth,\n\t\t\t}\n\t\t\tmarkdown = appendMarkdown(markdown, r.generateGraphString(orgName))\n\t\t}\n\n\t\tif gd.isExpressionGraph() {\n\t\t\te := &expressionGraph{\n\t\t\t\tgd.Query,\n\t\t\t\tgraphType,\n\t\t\t\tgd.Period,\n\t\t\t\theight,\n\t\t\t\twidth,\n\t\t\t}\n\t\t\tmarkdown = appendMarkdown(markdown, e.generateGraphString(orgName))\n\t\t}\n\n\t\tcurrentColumnCount++\n\t\tif currentColumnCount >= graphs.ColumnCount || i >= len(graphs.GraphDefs)-1 {\n\t\t\tmarkdown += \"|\\n\"\n\t\t\tcurrentColumnCount = 0\n\t\t}\n\t}\n\n\treturn markdown\n}\n\nfunc generateGraphTableHeader(confColumnCount int) string {\n\treturn strings.Repeat(\"|:-:\", confColumnCount) + \"|\\n\"\n}\n\nfunc appendMarkdown(markdown string, addItem string) string {\n\treturn markdown + \"|\" + addItem\n}\n<|endoftext|>"} {"text":"<commit_before>package data\n\nimport (\n\t\"bytes\"\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/rand\"\n\t\"crypto\/sha256\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"math\"\n)\n\ntype Block struct {\n\t\/*\n\t magic - 6\n\t block_len - 2\n\t num_links - 2\n\t offset - 4\n\t content_len - 4\n\t links - num_links * 768\n\t content - content_len\n\t padding\n\t*\/\n\tmagic [6]byte\n\tblock_len int8\n\tnum_links int8\n\toffset int32\n\tcontent_len int32\n\tlinks []Link\n\tcontent []byte\n\tpadding []byte\n}\n\nfunc NewBlock() *Block {\n\treturn &Block{}\n}\n\nfunc (blk *Block) Create(block_size int, links []Link, content []byte) {\n\n\tblk.magic = [6]byte{0xF0, 0x07, 0xDA, 0x7A, '\\r', '\\n'}\n\tblk.block_len = int8(math.Log2(float64(block_size)))\n\tblk.num_links = int8(len(links))\n\tblk.offset = int32(blk.num_links)*96 + 16\n\tblk.content_len = int32(len(content))\n\n\tblk.links = links\n\tblk.content = content\n\n\tpadding_len := block_size - 16 - int(blk.content_len) - (int(blk.num_links) * 96)\n\n\tpadding := make([]byte, padding_len)\n\t_, err := rand.Read(padding)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\nfunc (blk *Block) GetBytes() []byte {\n\tvar bin_buf bytes.Buffer\n\tbinary.Write(&bin_buf, binary.BigEndian, blk)\n\n\treturn bin_buf.Bytes()\n}\n\nfunc (blk *Block) GetKey() []byte {\n\n\tbin_blk := blk.GetBytes()\n\n\t\/\/ Create hash and feed data into ongoing hash\n\thash := sha256.New()\n\thash.Write(bin_blk)\n\n\t\/\/ Return the hash and append to nil string\n\treturn hash.Sum(nil)\n}\n\nfunc (blk *Block) Encrypt(key []byte) (ciphertext []byte) {\n\n\tplaintext := blk.GetBytes()\n\n\tblock, err := aes.NewCipher(key[16:])\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tciphertext = make([]byte, len(plaintext))\n\tiv := key[:16]\n\n\tmode := cipher.NewCBCEncrypter(block, iv)\n\tmode.CryptBlocks(ciphertext, plaintext)\n\n\treturn\n}\n\nfunc (blk *Block) ContentSize() int32 {\n\treturn blk.content_len\n}\n\nfunc (blk *Block) Offset() int32 {\n\treturn blk.offset\n}\n\nfunc (blk *Block) NumLinks() int8 {\n\treturn blk.num_links\n}\n<commit_msg>Cleans up naming.<commit_after>package data\n\nimport (\n\t\"bytes\"\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/rand\"\n\t\"crypto\/sha256\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"math\"\n)\n\ntype Block struct {\n\t\/*\n\t magic - 6\n\t block_len - 2\n\t num_links - 2\n\t offset - 4\n\t content_len - 4\n\t links - num_links * 768\n\t content - content_len\n\t padding\n\t*\/\n\tmagic [6]byte\n\tblock_len int8\n\tnum_links int8\n\toffset int32\n\tcontent_len int32\n\tlinks []Link\n\tcontent []byte\n\tpadding []byte\n}\n\nfunc NewBlock() *Block {\n\treturn &Block{}\n}\n\nfunc (blk *Block) Create(block_size int, links []Link, content []byte) {\n\n\tblk.magic = [6]byte{0xF0, 0x07, 0xDA, 0x7A, '\\r', '\\n'}\n\tblk.block_len = int8(math.Log2(float64(block_size)))\n\tblk.num_links = int8(len(links))\n\tblk.offset = int32(blk.num_links)*96 + 16\n\tblk.content_len = int32(len(content))\n\n\tblk.links = links\n\tblk.content = content\n\n\tpadding_len := block_size - 16 - int(blk.content_len) - (int(blk.num_links) * 96)\n\n\tpadding := make([]byte, padding_len)\n\t_, err := rand.Read(padding)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\nfunc (blk *Block) GetBytes() []byte {\n\tvar bin_buf bytes.Buffer\n\tbinary.Write(&bin_buf, binary.BigEndian, blk)\n\n\treturn bin_buf.Bytes()\n}\n\nfunc (blk *Block) GetHash() []byte {\n\n\tbin_blk := blk.GetBytes()\n\n\t\/\/ Create hash and feed data into ongoing hash\n\thash := sha256.New()\n\thash.Write(bin_blk)\n\n\t\/\/ Return the hash and append to nil string\n\treturn hash.Sum(nil)\n}\n\nfunc (blk *Block) Encrypt(hash []byte) (ciphertext []byte) {\n\n\tplaintext := blk.GetBytes()\n\tkey := hash[16:]\n\tiv := hash[:16]\n\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tciphertext = make([]byte, len(plaintext))\n\n\tmode := cipher.NewCBCEncrypter(block, iv)\n\tmode.CryptBlocks(ciphertext, plaintext)\n\n\treturn\n}\n\nfunc (blk *Block) ContentSize() int32 {\n\treturn blk.content_len\n}\n\nfunc (blk *Block) Offset() int32 {\n\treturn blk.offset\n}\n\nfunc (blk *Block) NumLinks() int8 {\n\treturn blk.num_links\n}\n<|endoftext|>"} {"text":"<commit_before>package pollyanna\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n)\n\nvar baseDOM = `<div class=\"pollyanna\"><div class=\"wrap\">%s<\/div><\/div>`\nvar shardDOM = `<div class=\"shard-wrap\"><div class=\"shard\"><\/div><\/div>`\nvar baseCSS = `.pollyanna{position:absolute;width:100%;height:100%;top:0;left:0}.wrap{width:800;height:600px;top:5%;left:5%;position:absolute}.shard-wrap,.shard-wrap .shard,.shard-wrap .shard::before{width:100%;height:100%;position:absolute}.shard-wrap{z-index:2}.shard-wrap .shard{background-color:#fff}.shard-wrap .shard::before{content:\"\";background:rgba(255,255,255,0);top:0;left:0}%s`\nvar shardCSS = `.shard-wrap:nth-child(%d) .shard{clip-path: polygon(%s);background-color:%s;}`\n\n\/\/ Svg is the top level <svg\/> node\ntype Svg struct {\n\tXMLName xml.Name `xml:\"svg\"`\n\tVersion string `xml:\"version,attr\"`\n\tWidth string `xml:\"width,attr\"`\n\tHeight string `xml:\"height,attr\"`\n\tViewBox string `xml:\"viewBox,attr\"`\n\tPolygons []Polygon `xml:\"polygon\"`\n}\n\n\/\/ Polygon is a single <polygon\/> node\ntype Polygon struct {\n\tFill string `xml:\"fill,attr\"`\n\tRawPoints string `xml:\"points,attr\"`\n}\n\n\/\/ Output represents the associated HTML Dom and CSS markup for the SVG image\ntype Output struct {\n\tHTML string\n\tCSS string\n}\n\ntype cssNode struct {\n\tselector string\n\trules []cssRule\n}\n\ntype cssRule struct {\n\tproperty string\n\tvalue string\n}\n\n\/\/ ParseSVG will parse the incoming SVG document bytes\nfunc ParseSVG(bytes []byte) (Svg, error) {\n\tvar svg Svg\n\n\terr := xml.Unmarshal(bytes, &svg)\n\tif err != nil {\n\t\treturn svg, err\n\t}\n\n\tif 0 == len(svg.Polygons) {\n\t\treturn svg, errors.New(\"No <polygon\/> nodes were found in the SVG data.\")\n\t}\n\n\treturn svg, nil\n}\n\n\/\/ GenerateOutput converts the raw SVG data into HTML DOM nodes and associated CSS rules\nfunc (s Svg) GenerateOutput() (Output, error) {\n\tvar output Output\n\toutput.HTML = fmt.Sprintf(baseDOM, strings.Repeat(shardDOM, len(s.Polygons)))\n\toutput.CSS = baseCSS + s.cssShardChildren()\n\treturn output, nil\n}\n\nfunc (s Svg) cssShardChildren() string {\n\tcss := make([]string, len(s.Polygons))\n\n\tfor i, p := range s.Polygons {\n\t\tcss[i] = fmt.Sprintf(shardCSS, i+1, p.FormattedCSSPolygonPoints(), p.Fill)\n\t}\n\n\treturn strings.Join(css, ``)\n}\n\nfunc (p Polygon) FormattedCSSPolygonPoints() string {\n\treturn cssPolygonBuilder(p.Points()[0], p.Points()[1:len(p.Points())])\n}\n\nfunc (p Polygon) Points() [][]string {\n\tvar points [][]string\n\tpointsStr := strings.Split(strings.Trim(p.RawPoints, ` `), ` `)\n\n\tfor _, pointStr := range pointsStr {\n\t\tpoints = append(points, strings.Split(pointStr, `,`))\n\t}\n\n\treturn points\n}\n\nfunc (s Svg) String() string {\n\treturn fmt.Sprintf(\"SVG: version - %s, width - %s, height - %s, box - %s\", s.Version, s.Width, s.Height, s.ViewBox)\n}\n\nfunc (p Polygon) String() string {\n\treturn fmt.Sprintf(\"Polygon: fill - %s, points - %s\", p.Fill, p.RawPoints)\n}\n\nfunc cssPolygonBuilder(points []string, rest [][]string) string {\n\tif len(points) == 0 {\n\t\treturn ``\n\t}\n\n\tif len(rest) == 0 {\n\t\treturn cssFormatSinglePoint(points)\n\t}\n\n\tfmtString := \"%s, %s\"\n\tif len(rest) == 1 {\n\t\treturn fmt.Sprintf(fmtString, cssFormatSinglePoint(points), cssFormatSinglePoint(rest[0]))\n\t}\n\n\treturn fmt.Sprintf(fmtString, cssFormatSinglePoint(points), cssPolygonBuilder(rest[0], rest[1:len(rest)]))\n}\n\nfunc cssFormatSinglePoint(p []string) string {\n\treturn fmt.Sprintf(\"%spx %spx\", p[0], p[1])\n}\n<commit_msg>Need to use -webkit prefix currently<commit_after>package pollyanna\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n)\n\nvar baseDOM = `<div class=\"pollyanna\"><div class=\"wrap\">%s<\/div><\/div>`\nvar shardDOM = `<div class=\"shard-wrap\"><div class=\"shard\"><\/div><\/div>`\nvar baseCSS = `.pollyanna{position:absolute;width:100%;height:100%;top:0;left:0}.wrap{width:800;height:600px;top:5%;left:5%;position:absolute}.shard-wrap,.shard-wrap .shard,.shard-wrap .shard::before{width:100%;height:100%;position:absolute}.shard-wrap{z-index:2}.shard-wrap .shard{background-color:#fff}.shard-wrap .shard::before{content:\"\";background:rgba(255,255,255,0);top:0;left:0}%s`\nvar shardCSS = `.shard-wrap:nth-child(%d) .shard{-webkit-clip-path: polygon(%s);background-color:%s;}`\n\n\/\/ Svg is the top level <svg\/> node\ntype Svg struct {\n\tXMLName xml.Name `xml:\"svg\"`\n\tVersion string `xml:\"version,attr\"`\n\tWidth string `xml:\"width,attr\"`\n\tHeight string `xml:\"height,attr\"`\n\tViewBox string `xml:\"viewBox,attr\"`\n\tPolygons []Polygon `xml:\"polygon\"`\n}\n\n\/\/ Polygon is a single <polygon\/> node\ntype Polygon struct {\n\tFill string `xml:\"fill,attr\"`\n\tRawPoints string `xml:\"points,attr\"`\n}\n\n\/\/ Output represents the associated HTML Dom and CSS markup for the SVG image\ntype Output struct {\n\tHTML string\n\tCSS string\n}\n\ntype cssNode struct {\n\tselector string\n\trules []cssRule\n}\n\ntype cssRule struct {\n\tproperty string\n\tvalue string\n}\n\n\/\/ ParseSVG will parse the incoming SVG document bytes\nfunc ParseSVG(bytes []byte) (Svg, error) {\n\tvar svg Svg\n\n\terr := xml.Unmarshal(bytes, &svg)\n\tif err != nil {\n\t\treturn svg, err\n\t}\n\n\tif 0 == len(svg.Polygons) {\n\t\treturn svg, errors.New(\"No <polygon\/> nodes were found in the SVG data.\")\n\t}\n\n\treturn svg, nil\n}\n\n\/\/ GenerateOutput converts the raw SVG data into HTML DOM nodes and associated CSS rules\nfunc (s Svg) GenerateOutput() (Output, error) {\n\tvar output Output\n\toutput.HTML = fmt.Sprintf(baseDOM, strings.Repeat(shardDOM, len(s.Polygons)))\n\toutput.CSS = baseCSS + s.cssShardChildren()\n\treturn output, nil\n}\n\nfunc (s Svg) cssShardChildren() string {\n\tcss := make([]string, len(s.Polygons))\n\n\tfor i, p := range s.Polygons {\n\t\tcss[i] = fmt.Sprintf(shardCSS, i+1, p.FormattedCSSPolygonPoints(), p.Fill)\n\t}\n\n\treturn strings.Join(css, ``)\n}\n\nfunc (p Polygon) FormattedCSSPolygonPoints() string {\n\treturn cssPolygonBuilder(p.Points()[0], p.Points()[1:len(p.Points())])\n}\n\nfunc (p Polygon) Points() [][]string {\n\tvar points [][]string\n\tpointsStr := strings.Split(strings.Trim(p.RawPoints, ` `), ` `)\n\n\tfor _, pointStr := range pointsStr {\n\t\tpoints = append(points, strings.Split(pointStr, `,`))\n\t}\n\n\treturn points\n}\n\nfunc (s Svg) String() string {\n\treturn fmt.Sprintf(\"SVG: version - %s, width - %s, height - %s, box - %s\", s.Version, s.Width, s.Height, s.ViewBox)\n}\n\nfunc (p Polygon) String() string {\n\treturn fmt.Sprintf(\"Polygon: fill - %s, points - %s\", p.Fill, p.RawPoints)\n}\n\nfunc cssPolygonBuilder(points []string, rest [][]string) string {\n\tif len(points) == 0 {\n\t\treturn ``\n\t}\n\n\tif len(rest) == 0 {\n\t\treturn cssFormatSinglePoint(points)\n\t}\n\n\tfmtString := \"%s, %s\"\n\tif len(rest) == 1 {\n\t\treturn fmt.Sprintf(fmtString, cssFormatSinglePoint(points), cssFormatSinglePoint(rest[0]))\n\t}\n\n\treturn fmt.Sprintf(fmtString, cssFormatSinglePoint(points), cssPolygonBuilder(rest[0], rest[1:len(rest)]))\n}\n\nfunc cssFormatSinglePoint(p []string) string {\n\treturn fmt.Sprintf(\"%spx %spx\", p[0], p[1])\n}\n<|endoftext|>"} {"text":"<commit_before>package optmem\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"net\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com\/chihaya\/chihaya\"\n\t\"github.com\/chihaya\/chihaya\/pkg\/stopper\"\n\t\"github.com\/chihaya\/chihaya\/server\/store\"\n)\n\nfunc init() {\n\tstore.RegisterPeerStoreDriver(\"optmem\", &peerStoreDriver{})\n}\n\n\/\/ ErrInvalidIP is returned if a peer with an invalid IP was specified.\nvar ErrInvalidIP = errors.New(\"invalid IP\")\n\ntype peerStoreDriver struct{}\n\nfunc (p *peerStoreDriver) New(storecfg *store.DriverConfig) (store.PeerStore, error) {\n\tcfg, err := newPeerStoreConfig(storecfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgcInterval, err := time.ParseDuration(cfg.GCInterval)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgcCutoff, err := time.ParseDuration(cfg.GCCutoff)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tps := &peerStore{\n\t\tshards: newShardContainer(cfg.ShardCountBits),\n\t\tclosed: make(chan struct{}),\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ps.closed:\n\t\t\t\treturn\n\t\t\tcase <-time.After(gcInterval):\n\t\t\t\tcutoffTime := time.Now().Add(gcCutoff * -1)\n\t\t\t\tlogln(\"collecting garbage. Cutoff time: \" + cutoffTime.String())\n\t\t\t\terr := ps.CollectGarbage(cutoffTime)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogln(\"failed to collect garbage: \" + err.Error())\n\t\t\t\t} else {\n\t\t\t\t\tlogln(\"finished collecting garbage\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ps, nil\n}\n\ntype peerStore struct {\n\tshards *shardContainer\n\tclosed chan struct{}\n}\n\nfunc (s *peerStore) collectGarbage(cutoff time.Time) {\n\tinternalCutoff := uint16(cutoff.Unix())\n\tmaxDiff := uint16(time.Now().Unix() - cutoff.Unix())\n\tlogf(\"running GC. internal cutoff: %d, maxDiff: %d\\n\", internalCutoff, maxDiff)\n\n\tfor i := 0; i < len(s.shards.shards); i++ {\n\t\tdeltaTorrents := 0\n\t\tshard := s.shards.lockShard(i)\n\n\t\tfor ih, s := range shard.swarms {\n\t\t\tgc := s.peers.collectGarbage(internalCutoff, maxDiff)\n\t\t\tif gc {\n\t\t\t\ts.peers.rebalanceBuckets()\n\t\t\t}\n\t\t\tif s.peers.numPeers == 0 {\n\t\t\t\tdelete(shard.swarms, ih)\n\t\t\t\tdeltaTorrents--\n\t\t\t}\n\t\t}\n\n\t\ts.shards.unlockShard(i, deltaTorrents)\n\t\truntime.Gosched()\n\t}\n}\n\nfunc (s *peerStore) CollectGarbage(cutoff time.Time) error {\n\ts.collectGarbage(cutoff)\n\treturn nil\n}\n\nfunc (s *peerStore) PutSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error {\n\tpeer := &peer{}\n\tv6 := p.IP.To16()\n\tif v6 == nil {\n\t\treturn ErrInvalidIP\n\t}\n\tpeer.setIP(v6)\n\tpeer.setPort(p.Port)\n\tpeer.setPeerFlag(peerFlagSeeder)\n\tpeer.setPeerTime(uint16(time.Now().Unix()))\n\tvar created bool\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer func() {\n\t\tif created {\n\t\t\ts.shards.unlockShardByHash(ih, 1)\n\t\t} else {\n\t\t\ts.shards.unlockShardByHash(ih, 0)\n\t\t}\n\t}()\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\tcreated = true\n\t\tpl = swarm{peers: newPeerList()}\n\t\tshard.swarms[ih] = pl\n\t}\n\n\tpl.peers.putPeer(peer)\n\tpl.peers.rebalanceBuckets()\n\n\treturn nil\n}\n\nfunc (s *peerStore) DeleteSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error {\n\tpeer := &peer{}\n\tv6 := p.IP.To16()\n\tif v6 == nil {\n\t\treturn ErrInvalidIP\n\t}\n\tpeer.setIP(v6)\n\tpeer.setPort(p.Port)\n\tpeer.setPeerFlag(peerFlagSeeder)\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer s.shards.unlockShardByHash(ih, 0)\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\treturn store.ErrResourceDoesNotExist\n\t}\n\tfound := pl.peers.removePeer(peer)\n\tif !found {\n\t\treturn store.ErrResourceDoesNotExist\n\t}\n\n\tif pl.peers.numPeers == 0 {\n\t\tdelete(shard.swarms, ih)\n\t\treturn nil\n\t}\n\n\tpl.peers.rebalanceBuckets()\n\n\treturn nil\n}\n\nfunc (s *peerStore) PutLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error {\n\tpeer := &peer{}\n\tv6 := p.IP.To16()\n\tif v6 == nil {\n\t\treturn ErrInvalidIP\n\t}\n\tpeer.setIP(v6)\n\tpeer.setPort(p.Port)\n\tpeer.setPeerFlag(peerFlagLeecher)\n\tpeer.setPeerTime(uint16(time.Now().Unix()))\n\tvar created bool\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer func() {\n\t\tif created {\n\t\t\ts.shards.unlockShardByHash(ih, 1)\n\t\t} else {\n\t\t\ts.shards.unlockShardByHash(ih, 0)\n\t\t}\n\t}()\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\tcreated = true\n\t\tpl = swarm{peers: newPeerList()}\n\t\tshard.swarms[ih] = pl\n\t}\n\n\tpl.peers.putPeer(peer)\n\tpl.peers.rebalanceBuckets()\n\n\treturn nil\n}\n\nfunc (s *peerStore) DeleteLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error {\n\tpeer := &peer{}\n\tv6 := p.IP.To16()\n\tif v6 == nil {\n\t\treturn ErrInvalidIP\n\t}\n\tpeer.setIP(v6)\n\tpeer.setPort(p.Port)\n\tpeer.setPeerFlag(peerFlagLeecher)\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer s.shards.unlockShardByHash(ih, 0)\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\treturn store.ErrResourceDoesNotExist\n\t}\n\tfound := pl.peers.removePeer(peer)\n\tif !found {\n\t\treturn store.ErrResourceDoesNotExist\n\t}\n\n\tif pl.peers.numPeers == 0 {\n\t\tdelete(shard.swarms, ih)\n\t\treturn nil\n\t}\n\n\tpl.peers.rebalanceBuckets()\n\n\treturn nil\n}\n\nfunc (s *peerStore) GraduateLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error {\n\t\/\/ we can just overwrite any leecher we already have, so\n\treturn s.PutSeeder(infoHash, p)\n}\n\nfunc (s *peerStore) AnnouncePeers(infoHash chihaya.InfoHash, seeder bool, numWant int, peer4, peer6 chihaya.Peer) (peers4, peers6 []chihaya.Peer, err error) {\n\tpeer := &peer{}\n\tif peer4.IP != nil && (len(peer4.IP) == 4 || len(peer4.IP) == 16) {\n\t\tpeer.setIP(peer4.IP.To16())\n\t\tpeer.setPort(peer4.Port)\n\t} else if peer6.IP != nil && len(peer6.IP) == 16 {\n\t\tpeer.setIP(peer6.IP.To16())\n\t\tpeer.setPort(peer6.Port)\n\t} else {\n\t\treturn nil, nil, ErrInvalidIP\n\t}\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer s.shards.unlockShardByHash(ih, 0)\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\treturn nil, nil, store.ErrResourceDoesNotExist\n\t}\n\n\tpeers := pl.peers.getAnnouncePeers(numWant, seeder, peer)\n\n\tfor _, p := range peers {\n\t\tif bytes.Equal(p.data[:12], v4InV6Prefix) {\n\t\t\tpeers4 = append(peers4, chihaya.Peer{IP: net.IP(p.ip()), Port: p.port()})\n\t\t} else {\n\t\t\tpeers6 = append(peers6, chihaya.Peer{IP: net.IP(p.ip()), Port: p.port()})\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (s *peerStore) NumSeeders(infoHash chihaya.InfoHash) int {\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer s.shards.unlockShardByHash(ih, 0)\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\treturn 0\n\t}\n\treturn pl.peers.numSeeders\n}\n\nfunc (s *peerStore) NumLeechers(infoHash chihaya.InfoHash) int {\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer s.shards.unlockShardByHash(ih, 0)\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\treturn 0\n\t}\n\treturn pl.peers.numPeers - pl.peers.numSeeders\n}\n\nvar v4InV6Prefix = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff}\n\nfunc (s *peerStore) GetSeeders(infoHash chihaya.InfoHash) (peers4, peers6 []chihaya.Peer, err error) {\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer s.shards.unlockShardByHash(ih, 0)\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\treturn nil, nil, store.ErrResourceDoesNotExist\n\t}\n\n\tpeers := pl.peers.getAllSeeders()\n\n\tfor _, p := range peers {\n\t\tif bytes.Equal(p.data[:12], v4InV6Prefix) {\n\t\t\tpeers4 = append(peers4, chihaya.Peer{IP: net.IP(p.ip()), Port: p.port()})\n\t\t} else {\n\t\t\tpeers6 = append(peers6, chihaya.Peer{IP: net.IP(p.ip()), Port: p.port()})\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (s *peerStore) GetLeechers(infoHash chihaya.InfoHash) (peers4, peers6 []chihaya.Peer, err error) {\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer s.shards.unlockShardByHash(ih, 0)\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\treturn nil, nil, store.ErrResourceDoesNotExist\n\t}\n\n\tpeers := pl.peers.getAllLeechers()\n\n\tfor _, p := range peers {\n\t\tif bytes.Equal(p.data[:12], v4InV6Prefix) {\n\t\t\tpeers4 = append(peers4, chihaya.Peer{IP: net.IP(p.ip()), Port: p.port()})\n\t\t} else {\n\t\t\tpeers6 = append(peers6, chihaya.Peer{IP: net.IP(p.ip()), Port: p.port()})\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (s *peerStore) Stop() <-chan error {\n\treturn stopper.AlreadyStopped\n}\n\n\/\/ NumSwarms returns the total number of swarms tracked by the PeerStore.\n\/\/ This is the same as the amount of infohashes tracked.\n\/\/ Runs in constant time.\nfunc (s *peerStore) NumSwarms() uint64 {\n\treturn s.shards.getTorrentCount()\n}\n\n\/\/ NumTotalSeeders returns the total number of seeders tracked by the PeerStore.\n\/\/ Runs in linear time in regards to the number of swarms tracked.\nfunc (s *peerStore) NumTotalSeeders() uint64 {\n\tn := uint64(0)\n\n\tfor i := 0; i < len(s.shards.shards); i++ {\n\t\tshard := s.shards.lockShard(i)\n\t\tfor _, s := range shard.swarms {\n\t\t\tn += uint64(s.peers.numSeeders)\n\t\t}\n\t\ts.shards.unlockShard(i, 0)\n\t\truntime.Gosched()\n\t}\n\n\treturn n\n}\n\n\/\/ NumTotalLeechers returns the total number of leechers tracked by the\n\/\/ PeerStore.\n\/\/ Runs in linear time in regards to the number of swarms tracked.\nfunc (s *peerStore) NumTotalLeechers() uint64 {\n\tn := uint64(0)\n\n\tfor i := 0; i < len(s.shards.shards); i++ {\n\t\tshard := s.shards.lockShard(i)\n\t\tfor _, s := range shard.swarms {\n\t\t\tn += uint64(s.peers.numPeers - s.peers.numSeeders)\n\t\t}\n\t\ts.shards.unlockShard(i, 0)\n\t\truntime.Gosched()\n\t}\n\n\treturn n\n}\n\n\/\/ NumTotalPeers returns the total number of peers tracked by the PeerStore.\n\/\/ Runs in linear time in regards to the number of swarms tracked.\nfunc (s *peerStore) NumTotalPeers() uint64 {\n\tn := uint64(0)\n\n\tfor i := 0; i < len(s.shards.shards); i++ {\n\t\tshard := s.shards.lockShard(i)\n\t\tfor _, s := range shard.swarms {\n\t\t\tn += uint64(s.peers.numPeers)\n\t\t}\n\t\ts.shards.unlockShard(i, 0)\n\t\truntime.Gosched()\n\t}\n\n\treturn n\n}\n<commit_msg>add stopping<commit_after>package optmem\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"net\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com\/chihaya\/chihaya\"\n\t\"github.com\/chihaya\/chihaya\/pkg\/stopper\"\n\t\"github.com\/chihaya\/chihaya\/server\/store\"\n)\n\nfunc init() {\n\tstore.RegisterPeerStoreDriver(\"optmem\", &peerStoreDriver{})\n}\n\n\/\/ ErrInvalidIP is returned if a peer with an invalid IP was specified.\nvar ErrInvalidIP = errors.New(\"invalid IP\")\n\ntype peerStoreDriver struct{}\n\nfunc (p *peerStoreDriver) New(storecfg *store.DriverConfig) (store.PeerStore, error) {\n\tcfg, err := newPeerStoreConfig(storecfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgcInterval, err := time.ParseDuration(cfg.GCInterval)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgcCutoff, err := time.ParseDuration(cfg.GCCutoff)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tps := &peerStore{\n\t\tshards: newShardContainer(cfg.ShardCountBits),\n\t\tclosed: make(chan struct{}),\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ps.closed:\n\t\t\t\treturn\n\t\t\tcase <-time.After(gcInterval):\n\t\t\t\tcutoffTime := time.Now().Add(gcCutoff * -1)\n\t\t\t\tlogln(\"collecting garbage. Cutoff time: \" + cutoffTime.String())\n\t\t\t\terr := ps.CollectGarbage(cutoffTime)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogln(\"failed to collect garbage: \" + err.Error())\n\t\t\t\t} else {\n\t\t\t\t\tlogln(\"finished collecting garbage\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ps, nil\n}\n\ntype peerStore struct {\n\tshards *shardContainer\n\tclosed chan struct{}\n}\n\nfunc (s *peerStore) collectGarbage(cutoff time.Time) {\n\tinternalCutoff := uint16(cutoff.Unix())\n\tmaxDiff := uint16(time.Now().Unix() - cutoff.Unix())\n\tlogf(\"running GC. internal cutoff: %d, maxDiff: %d\\n\", internalCutoff, maxDiff)\n\n\tfor i := 0; i < len(s.shards.shards); i++ {\n\t\tdeltaTorrents := 0\n\t\tshard := s.shards.lockShard(i)\n\n\t\tfor ih, s := range shard.swarms {\n\t\t\tgc := s.peers.collectGarbage(internalCutoff, maxDiff)\n\t\t\tif gc {\n\t\t\t\ts.peers.rebalanceBuckets()\n\t\t\t}\n\t\t\tif s.peers.numPeers == 0 {\n\t\t\t\tdelete(shard.swarms, ih)\n\t\t\t\tdeltaTorrents--\n\t\t\t}\n\t\t}\n\n\t\ts.shards.unlockShard(i, deltaTorrents)\n\t\truntime.Gosched()\n\t}\n}\n\nfunc (s *peerStore) CollectGarbage(cutoff time.Time) error {\n\tselect {\n\tcase <-s.closed:\n\t\tpanic(\"attempted to interact with closed store\")\n\tdefault:\n\t}\n\n\ts.collectGarbage(cutoff)\n\treturn nil\n}\n\nfunc (s *peerStore) PutSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error {\n\tselect {\n\tcase <-s.closed:\n\t\tpanic(\"attempted to interact with closed store\")\n\tdefault:\n\t}\n\n\tpeer := &peer{}\n\tv6 := p.IP.To16()\n\tif v6 == nil {\n\t\treturn ErrInvalidIP\n\t}\n\tpeer.setIP(v6)\n\tpeer.setPort(p.Port)\n\tpeer.setPeerFlag(peerFlagSeeder)\n\tpeer.setPeerTime(uint16(time.Now().Unix()))\n\tvar created bool\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer func() {\n\t\tif created {\n\t\t\ts.shards.unlockShardByHash(ih, 1)\n\t\t} else {\n\t\t\ts.shards.unlockShardByHash(ih, 0)\n\t\t}\n\t}()\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\tcreated = true\n\t\tpl = swarm{peers: newPeerList()}\n\t\tshard.swarms[ih] = pl\n\t}\n\n\tpl.peers.putPeer(peer)\n\tpl.peers.rebalanceBuckets()\n\n\treturn nil\n}\n\nfunc (s *peerStore) DeleteSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error {\n\tselect {\n\tcase <-s.closed:\n\t\tpanic(\"attempted to interact with closed store\")\n\tdefault:\n\t}\n\n\tpeer := &peer{}\n\tv6 := p.IP.To16()\n\tif v6 == nil {\n\t\treturn ErrInvalidIP\n\t}\n\tpeer.setIP(v6)\n\tpeer.setPort(p.Port)\n\tpeer.setPeerFlag(peerFlagSeeder)\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer s.shards.unlockShardByHash(ih, 0)\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\treturn store.ErrResourceDoesNotExist\n\t}\n\tfound := pl.peers.removePeer(peer)\n\tif !found {\n\t\treturn store.ErrResourceDoesNotExist\n\t}\n\n\tif pl.peers.numPeers == 0 {\n\t\tdelete(shard.swarms, ih)\n\t\treturn nil\n\t}\n\n\tpl.peers.rebalanceBuckets()\n\n\treturn nil\n}\n\nfunc (s *peerStore) PutLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error {\n\tselect {\n\tcase <-s.closed:\n\t\tpanic(\"attempted to interact with closed store\")\n\tdefault:\n\t}\n\n\tpeer := &peer{}\n\tv6 := p.IP.To16()\n\tif v6 == nil {\n\t\treturn ErrInvalidIP\n\t}\n\tpeer.setIP(v6)\n\tpeer.setPort(p.Port)\n\tpeer.setPeerFlag(peerFlagLeecher)\n\tpeer.setPeerTime(uint16(time.Now().Unix()))\n\tvar created bool\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer func() {\n\t\tif created {\n\t\t\ts.shards.unlockShardByHash(ih, 1)\n\t\t} else {\n\t\t\ts.shards.unlockShardByHash(ih, 0)\n\t\t}\n\t}()\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\tcreated = true\n\t\tpl = swarm{peers: newPeerList()}\n\t\tshard.swarms[ih] = pl\n\t}\n\n\tpl.peers.putPeer(peer)\n\tpl.peers.rebalanceBuckets()\n\n\treturn nil\n}\n\nfunc (s *peerStore) DeleteLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error {\n\tselect {\n\tcase <-s.closed:\n\t\tpanic(\"attempted to interact with closed store\")\n\tdefault:\n\t}\n\n\tpeer := &peer{}\n\tv6 := p.IP.To16()\n\tif v6 == nil {\n\t\treturn ErrInvalidIP\n\t}\n\tpeer.setIP(v6)\n\tpeer.setPort(p.Port)\n\tpeer.setPeerFlag(peerFlagLeecher)\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer s.shards.unlockShardByHash(ih, 0)\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\treturn store.ErrResourceDoesNotExist\n\t}\n\tfound := pl.peers.removePeer(peer)\n\tif !found {\n\t\treturn store.ErrResourceDoesNotExist\n\t}\n\n\tif pl.peers.numPeers == 0 {\n\t\tdelete(shard.swarms, ih)\n\t\treturn nil\n\t}\n\n\tpl.peers.rebalanceBuckets()\n\n\treturn nil\n}\n\nfunc (s *peerStore) GraduateLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error {\n\t\/\/ we can just overwrite any leecher we already have, so\n\treturn s.PutSeeder(infoHash, p)\n}\n\nfunc (s *peerStore) AnnouncePeers(infoHash chihaya.InfoHash, seeder bool, numWant int, peer4, peer6 chihaya.Peer) (peers4, peers6 []chihaya.Peer, err error) {\n\tselect {\n\tcase <-s.closed:\n\t\tpanic(\"attempted to interact with closed store\")\n\tdefault:\n\t}\n\n\tpeer := &peer{}\n\tif peer4.IP != nil && (len(peer4.IP) == 4 || len(peer4.IP) == 16) {\n\t\tpeer.setIP(peer4.IP.To16())\n\t\tpeer.setPort(peer4.Port)\n\t} else if peer6.IP != nil && len(peer6.IP) == 16 {\n\t\tpeer.setIP(peer6.IP.To16())\n\t\tpeer.setPort(peer6.Port)\n\t} else {\n\t\treturn nil, nil, ErrInvalidIP\n\t}\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer s.shards.unlockShardByHash(ih, 0)\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\treturn nil, nil, store.ErrResourceDoesNotExist\n\t}\n\n\tpeers := pl.peers.getAnnouncePeers(numWant, seeder, peer)\n\n\tfor _, p := range peers {\n\t\tif bytes.Equal(p.data[:12], v4InV6Prefix) {\n\t\t\tpeers4 = append(peers4, chihaya.Peer{IP: net.IP(p.ip()), Port: p.port()})\n\t\t} else {\n\t\t\tpeers6 = append(peers6, chihaya.Peer{IP: net.IP(p.ip()), Port: p.port()})\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (s *peerStore) NumSeeders(infoHash chihaya.InfoHash) int {\n\tselect {\n\tcase <-s.closed:\n\t\tpanic(\"attempted to interact with closed store\")\n\tdefault:\n\t}\n\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer s.shards.unlockShardByHash(ih, 0)\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\treturn 0\n\t}\n\treturn pl.peers.numSeeders\n}\n\nfunc (s *peerStore) NumLeechers(infoHash chihaya.InfoHash) int {\n\tselect {\n\tcase <-s.closed:\n\t\tpanic(\"attempted to interact with closed store\")\n\tdefault:\n\t}\n\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer s.shards.unlockShardByHash(ih, 0)\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\treturn 0\n\t}\n\treturn pl.peers.numPeers - pl.peers.numSeeders\n}\n\nvar v4InV6Prefix = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff}\n\nfunc (s *peerStore) GetSeeders(infoHash chihaya.InfoHash) (peers4, peers6 []chihaya.Peer, err error) {\n\tselect {\n\tcase <-s.closed:\n\t\tpanic(\"attempted to interact with closed store\")\n\tdefault:\n\t}\n\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer s.shards.unlockShardByHash(ih, 0)\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\treturn nil, nil, store.ErrResourceDoesNotExist\n\t}\n\n\tpeers := pl.peers.getAllSeeders()\n\n\tfor _, p := range peers {\n\t\tif bytes.Equal(p.data[:12], v4InV6Prefix) {\n\t\t\tpeers4 = append(peers4, chihaya.Peer{IP: net.IP(p.ip()), Port: p.port()})\n\t\t} else {\n\t\t\tpeers6 = append(peers6, chihaya.Peer{IP: net.IP(p.ip()), Port: p.port()})\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (s *peerStore) GetLeechers(infoHash chihaya.InfoHash) (peers4, peers6 []chihaya.Peer, err error) {\n\tselect {\n\tcase <-s.closed:\n\t\tpanic(\"attempted to interact with closed store\")\n\tdefault:\n\t}\n\n\tih := infohash(infoHash)\n\n\tshard := s.shards.lockShardByHash(ih)\n\tdefer s.shards.unlockShardByHash(ih, 0)\n\n\tvar pl swarm\n\tvar ok bool\n\tif pl, ok = shard.swarms[ih]; !ok {\n\t\treturn nil, nil, store.ErrResourceDoesNotExist\n\t}\n\n\tpeers := pl.peers.getAllLeechers()\n\n\tfor _, p := range peers {\n\t\tif bytes.Equal(p.data[:12], v4InV6Prefix) {\n\t\t\tpeers4 = append(peers4, chihaya.Peer{IP: net.IP(p.ip()), Port: p.port()})\n\t\t} else {\n\t\t\tpeers6 = append(peers6, chihaya.Peer{IP: net.IP(p.ip()), Port: p.port()})\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (s *peerStore) Stop() <-chan error {\n\tselect {\n\tcase <-s.closed:\n\t\treturn stopper.AlreadyStopped\n\tdefault:\n\t}\n\tclose(s.closed)\n\treturn stopper.AlreadyStopped\n}\n\n\/\/ NumSwarms returns the total number of swarms tracked by the PeerStore.\n\/\/ This is the same as the amount of infohashes tracked.\n\/\/ Runs in constant time.\nfunc (s *peerStore) NumSwarms() uint64 {\n\tselect {\n\tcase <-s.closed:\n\t\tpanic(\"attempted to interact with closed store\")\n\tdefault:\n\t}\n\n\treturn s.shards.getTorrentCount()\n}\n\n\/\/ NumTotalSeeders returns the total number of seeders tracked by the PeerStore.\n\/\/ Runs in linear time in regards to the number of swarms tracked.\nfunc (s *peerStore) NumTotalSeeders() uint64 {\n\tselect {\n\tcase <-s.closed:\n\t\tpanic(\"attempted to interact with closed store\")\n\tdefault:\n\t}\n\n\tn := uint64(0)\n\n\tfor i := 0; i < len(s.shards.shards); i++ {\n\t\tshard := s.shards.lockShard(i)\n\t\tfor _, s := range shard.swarms {\n\t\t\tn += uint64(s.peers.numSeeders)\n\t\t}\n\t\ts.shards.unlockShard(i, 0)\n\t\truntime.Gosched()\n\t}\n\n\treturn n\n}\n\n\/\/ NumTotalLeechers returns the total number of leechers tracked by the\n\/\/ PeerStore.\n\/\/ Runs in linear time in regards to the number of swarms tracked.\nfunc (s *peerStore) NumTotalLeechers() uint64 {\n\tselect {\n\tcase <-s.closed:\n\t\tpanic(\"attempted to interact with closed store\")\n\tdefault:\n\t}\n\n\tn := uint64(0)\n\n\tfor i := 0; i < len(s.shards.shards); i++ {\n\t\tshard := s.shards.lockShard(i)\n\t\tfor _, s := range shard.swarms {\n\t\t\tn += uint64(s.peers.numPeers - s.peers.numSeeders)\n\t\t}\n\t\ts.shards.unlockShard(i, 0)\n\t\truntime.Gosched()\n\t}\n\n\treturn n\n}\n\n\/\/ NumTotalPeers returns the total number of peers tracked by the PeerStore.\n\/\/ Runs in linear time in regards to the number of swarms tracked.\nfunc (s *peerStore) NumTotalPeers() uint64 {\n\tselect {\n\tcase <-s.closed:\n\t\tpanic(\"attempted to interact with closed store\")\n\tdefault:\n\t}\n\n\tn := uint64(0)\n\n\tfor i := 0; i < len(s.shards.shards); i++ {\n\t\tshard := s.shards.lockShard(i)\n\t\tfor _, s := range shard.swarms {\n\t\t\tn += uint64(s.peers.numPeers)\n\t\t}\n\t\ts.shards.unlockShard(i, 0)\n\t\truntime.Gosched()\n\t}\n\n\treturn n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/lxc\/lxd\/lxd\/db\"\n\tdeviceConfig \"github.com\/lxc\/lxd\/lxd\/device\/config\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\/instancetype\"\n\tprojecthelpers \"github.com\/lxc\/lxd\/lxd\/project\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n\t\"github.com\/pkg\/errors\"\n)\n\nfunc doProfileUpdate(d *Daemon, project, name string, id int64, profile *api.Profile, req api.ProfilePut) error {\n\t\/\/ Check project limits.\n\terr := d.cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\treturn projecthelpers.AllowProfileUpdate(tx, project, name, req)\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Sanity checks\n\terr = instance.ValidConfig(d.os, req.Config, true, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ At this point we don't know the instance type, so just use instancetype.Any type for validation.\n\terr = instance.ValidDevices(d.State(), d.cluster, instancetype.Any, deviceConfig.NewDevices(req.Devices), false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontainers, err := getProfileContainersInfo(d.cluster, project, name)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to query instances associated with profile '%s'\", name)\n\t}\n\n\t\/\/ Check if the root device is supposed to be changed or removed.\n\toldProfileRootDiskDeviceKey, oldProfileRootDiskDevice, _ := shared.GetRootDiskDevice(profile.Devices)\n\t_, newProfileRootDiskDevice, _ := shared.GetRootDiskDevice(req.Devices)\n\tif len(containers) > 0 && oldProfileRootDiskDevice[\"pool\"] != \"\" && newProfileRootDiskDevice[\"pool\"] == \"\" || (oldProfileRootDiskDevice[\"pool\"] != newProfileRootDiskDevice[\"pool\"]) {\n\t\t\/\/ Check for containers using the device\n\t\tfor _, container := range containers {\n\t\t\t\/\/ Check if the device is locally overridden\n\t\t\tk, v, _ := shared.GetRootDiskDevice(container.Devices.CloneNative())\n\t\t\tif k != \"\" && v[\"pool\"] != \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Check what profile the device comes from\n\t\t\tprofiles := container.Profiles\n\t\t\tfor i := len(profiles) - 1; i >= 0; i-- {\n\t\t\t\t_, profile, err := d.cluster.GetProfile(projecthelpers.Default, profiles[i])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t\/\/ Check if we find a match for the device\n\t\t\t\t_, ok := profile.Devices[oldProfileRootDiskDeviceKey]\n\t\t\t\tif ok {\n\t\t\t\t\t\/\/ Found the profile\n\t\t\t\t\tif profiles[i] == name {\n\t\t\t\t\t\t\/\/ If it's the current profile, then we can't modify that root device\n\t\t\t\t\t\treturn fmt.Errorf(\"At least one instance relies on this profile's root disk device\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\t\/\/ If it's not, then move on to the next container\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Update the database\n\terr = d.cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\treturn tx.UpdateProfile(project, name, db.Profile{\n\t\t\tProject: project,\n\t\t\tName: name,\n\t\t\tDescription: req.Description,\n\t\t\tConfig: req.Config,\n\t\t\tDevices: req.Devices,\n\t\t})\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Update all the containers on this node using the profile. Must be\n\t\/\/ done after db.TxCommit due to DB lock.\n\tnodeName := \"\"\n\terr = d.cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\tvar err error\n\t\tnodeName, err = tx.GetLocalNodeName()\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to query local node name\")\n\t}\n\tfailures := map[string]error{}\n\tfor _, args := range containers {\n\t\terr := doProfileUpdateContainer(d, name, profile.ProfilePut, nodeName, args)\n\t\tif err != nil {\n\t\t\tfailures[args.Name] = err\n\t\t}\n\t}\n\n\tif len(failures) != 0 {\n\t\tmsg := \"The following containers failed to update (profile change still saved):\\n\"\n\t\tfor cname, err := range failures {\n\t\t\tmsg += fmt.Sprintf(\" - %s: %s\\n\", cname, err)\n\t\t}\n\t\treturn fmt.Errorf(\"%s\", msg)\n\t}\n\n\treturn nil\n}\n\n\/\/ Like doProfileUpdate but does not update the database, since it was already\n\/\/ updated by doProfileUpdate itself, called on the notifying node.\nfunc doProfileUpdateCluster(d *Daemon, project, name string, old api.ProfilePut) error {\n\tnodeName := \"\"\n\terr := d.cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\tvar err error\n\t\tnodeName, err = tx.GetLocalNodeName()\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed to query local node name\")\n\t}\n\n\tcontainers, err := getProfileContainersInfo(d.cluster, project, name)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to query instances associated with profile '%s'\", name)\n\t}\n\n\tfailures := map[string]error{}\n\tfor _, args := range containers {\n\t\terr := doProfileUpdateContainer(d, name, old, nodeName, args)\n\t\tif err != nil {\n\t\t\tfailures[args.Name] = err\n\t\t}\n\t}\n\n\tif len(failures) != 0 {\n\t\tmsg := \"The following containers failed to update (profile change still saved):\\n\"\n\t\tfor cname, err := range failures {\n\t\t\tmsg += fmt.Sprintf(\" - %s: %s\\n\", cname, err)\n\t\t}\n\t\treturn fmt.Errorf(\"%s\", msg)\n\t}\n\n\treturn nil\n}\n\n\/\/ Profile update of a single container.\nfunc doProfileUpdateContainer(d *Daemon, name string, old api.ProfilePut, nodeName string, args db.InstanceArgs) error {\n\tif args.Node != \"\" && args.Node != nodeName {\n\t\t\/\/ No-op, this container does not belong to this node.\n\t\treturn nil\n\t}\n\n\tprofiles, err := d.cluster.GetProfiles(args.Project, args.Profiles)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i, profileName := range args.Profiles {\n\t\tif profileName == name {\n\t\t\t\/\/ Overwrite the new config from the database with the old config and devices.\n\t\t\tprofiles[i].Config = old.Config\n\t\t\tprofiles[i].Devices = old.Devices\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ Load the instance using the old profile config.\n\tinst, err := instance.Load(d.State(), args, profiles)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Update will internally load the new profile configs and detect the changes to apply.\n\treturn inst.Update(db.InstanceArgs{\n\t\tArchitecture: inst.Architecture(),\n\t\tConfig: inst.LocalConfig(),\n\t\tDescription: inst.Description(),\n\t\tDevices: inst.LocalDevices(),\n\t\tEphemeral: inst.IsEphemeral(),\n\t\tProfiles: inst.Profiles(),\n\t\tProject: inst.Project(),\n\t\tType: inst.Type(),\n\t\tSnapshot: inst.IsSnapshot(),\n\t}, true)\n}\n\n\/\/ Query the db for information about containers associated with the given\n\/\/ profile.\nfunc getProfileContainersInfo(cluster *db.Cluster, project, profile string) ([]db.InstanceArgs, error) {\n\t\/\/ Query the db for information about containers associated with the\n\t\/\/ given profile.\n\tnames, err := cluster.GetInstancesWithProfile(project, profile)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Failed to query instances with profile '%s'\", profile)\n\t}\n\n\tcontainers := []db.InstanceArgs{}\n\terr = cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\tfor ctProject, ctNames := range names {\n\t\t\tfor _, ctName := range ctNames {\n\t\t\t\tcontainer, err := tx.GetInstance(ctProject, ctName)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tcontainers = append(containers, db.InstanceToArgs(container))\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Failed to fetch instances\")\n\t}\n\n\treturn containers, nil\n}\n<commit_msg>lxd\/profiles\/utils: Renames project arg to projectName in doProfileUpdate<commit_after>package main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/lxc\/lxd\/lxd\/db\"\n\tdeviceConfig \"github.com\/lxc\/lxd\/lxd\/device\/config\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\/instancetype\"\n\tprojecthelpers \"github.com\/lxc\/lxd\/lxd\/project\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n\t\"github.com\/pkg\/errors\"\n)\n\nfunc doProfileUpdate(d *Daemon, projectName string, name string, id int64, profile *api.Profile, req api.ProfilePut) error {\n\t\/\/ Check project limits.\n\terr := d.cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\treturn projecthelpers.AllowProfileUpdate(tx, projectName, name, req)\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Sanity checks\n\terr = instance.ValidConfig(d.os, req.Config, true, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ At this point we don't know the instance type, so just use instancetype.Any type for validation.\n\terr = instance.ValidDevices(d.State(), d.cluster, projectName, instancetype.Any, deviceConfig.NewDevices(req.Devices), false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontainers, err := getProfileContainersInfo(d.cluster, projectName, name)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to query instances associated with profile '%s'\", name)\n\t}\n\n\t\/\/ Check if the root device is supposed to be changed or removed.\n\toldProfileRootDiskDeviceKey, oldProfileRootDiskDevice, _ := shared.GetRootDiskDevice(profile.Devices)\n\t_, newProfileRootDiskDevice, _ := shared.GetRootDiskDevice(req.Devices)\n\tif len(containers) > 0 && oldProfileRootDiskDevice[\"pool\"] != \"\" && newProfileRootDiskDevice[\"pool\"] == \"\" || (oldProfileRootDiskDevice[\"pool\"] != newProfileRootDiskDevice[\"pool\"]) {\n\t\t\/\/ Check for containers using the device\n\t\tfor _, container := range containers {\n\t\t\t\/\/ Check if the device is locally overridden\n\t\t\tk, v, _ := shared.GetRootDiskDevice(container.Devices.CloneNative())\n\t\t\tif k != \"\" && v[\"pool\"] != \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Check what profile the device comes from\n\t\t\tprofiles := container.Profiles\n\t\t\tfor i := len(profiles) - 1; i >= 0; i-- {\n\t\t\t\t_, profile, err := d.cluster.GetProfile(projecthelpers.Default, profiles[i])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t\/\/ Check if we find a match for the device\n\t\t\t\t_, ok := profile.Devices[oldProfileRootDiskDeviceKey]\n\t\t\t\tif ok {\n\t\t\t\t\t\/\/ Found the profile\n\t\t\t\t\tif profiles[i] == name {\n\t\t\t\t\t\t\/\/ If it's the current profile, then we can't modify that root device\n\t\t\t\t\t\treturn fmt.Errorf(\"At least one instance relies on this profile's root disk device\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\t\/\/ If it's not, then move on to the next container\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Update the database\n\terr = d.cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\treturn tx.UpdateProfile(projectName, name, db.Profile{\n\t\t\tProject: projectName,\n\t\t\tName: name,\n\t\t\tDescription: req.Description,\n\t\t\tConfig: req.Config,\n\t\t\tDevices: req.Devices,\n\t\t})\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Update all the containers on this node using the profile. Must be\n\t\/\/ done after db.TxCommit due to DB lock.\n\tnodeName := \"\"\n\terr = d.cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\tvar err error\n\t\tnodeName, err = tx.GetLocalNodeName()\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to query local node name\")\n\t}\n\tfailures := map[string]error{}\n\tfor _, args := range containers {\n\t\terr := doProfileUpdateContainer(d, name, profile.ProfilePut, nodeName, args)\n\t\tif err != nil {\n\t\t\tfailures[args.Name] = err\n\t\t}\n\t}\n\n\tif len(failures) != 0 {\n\t\tmsg := \"The following containers failed to update (profile change still saved):\\n\"\n\t\tfor cname, err := range failures {\n\t\t\tmsg += fmt.Sprintf(\" - %s: %s\\n\", cname, err)\n\t\t}\n\t\treturn fmt.Errorf(\"%s\", msg)\n\t}\n\n\treturn nil\n}\n\n\/\/ Like doProfileUpdate but does not update the database, since it was already\n\/\/ updated by doProfileUpdate itself, called on the notifying node.\nfunc doProfileUpdateCluster(d *Daemon, project, name string, old api.ProfilePut) error {\n\tnodeName := \"\"\n\terr := d.cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\tvar err error\n\t\tnodeName, err = tx.GetLocalNodeName()\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed to query local node name\")\n\t}\n\n\tcontainers, err := getProfileContainersInfo(d.cluster, project, name)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to query instances associated with profile '%s'\", name)\n\t}\n\n\tfailures := map[string]error{}\n\tfor _, args := range containers {\n\t\terr := doProfileUpdateContainer(d, name, old, nodeName, args)\n\t\tif err != nil {\n\t\t\tfailures[args.Name] = err\n\t\t}\n\t}\n\n\tif len(failures) != 0 {\n\t\tmsg := \"The following containers failed to update (profile change still saved):\\n\"\n\t\tfor cname, err := range failures {\n\t\t\tmsg += fmt.Sprintf(\" - %s: %s\\n\", cname, err)\n\t\t}\n\t\treturn fmt.Errorf(\"%s\", msg)\n\t}\n\n\treturn nil\n}\n\n\/\/ Profile update of a single container.\nfunc doProfileUpdateContainer(d *Daemon, name string, old api.ProfilePut, nodeName string, args db.InstanceArgs) error {\n\tif args.Node != \"\" && args.Node != nodeName {\n\t\t\/\/ No-op, this container does not belong to this node.\n\t\treturn nil\n\t}\n\n\tprofiles, err := d.cluster.GetProfiles(args.Project, args.Profiles)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i, profileName := range args.Profiles {\n\t\tif profileName == name {\n\t\t\t\/\/ Overwrite the new config from the database with the old config and devices.\n\t\t\tprofiles[i].Config = old.Config\n\t\t\tprofiles[i].Devices = old.Devices\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ Load the instance using the old profile config.\n\tinst, err := instance.Load(d.State(), args, profiles)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Update will internally load the new profile configs and detect the changes to apply.\n\treturn inst.Update(db.InstanceArgs{\n\t\tArchitecture: inst.Architecture(),\n\t\tConfig: inst.LocalConfig(),\n\t\tDescription: inst.Description(),\n\t\tDevices: inst.LocalDevices(),\n\t\tEphemeral: inst.IsEphemeral(),\n\t\tProfiles: inst.Profiles(),\n\t\tProject: inst.Project(),\n\t\tType: inst.Type(),\n\t\tSnapshot: inst.IsSnapshot(),\n\t}, true)\n}\n\n\/\/ Query the db for information about containers associated with the given\n\/\/ profile.\nfunc getProfileContainersInfo(cluster *db.Cluster, project, profile string) ([]db.InstanceArgs, error) {\n\t\/\/ Query the db for information about containers associated with the\n\t\/\/ given profile.\n\tnames, err := cluster.GetInstancesWithProfile(project, profile)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Failed to query instances with profile '%s'\", profile)\n\t}\n\n\tcontainers := []db.InstanceArgs{}\n\terr = cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\tfor ctProject, ctNames := range names {\n\t\t\tfor _, ctName := range ctNames {\n\t\t\t\tcontainer, err := tx.GetInstance(ctProject, ctName)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tcontainers = append(containers, db.InstanceToArgs(container))\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Failed to fetch instances\")\n\t}\n\n\treturn containers, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"github.com\/ghodss\/yaml\"\n\n\t\"github.com\/kubernetes\/deployment-manager\/expandybird\/expander\"\n\t\"github.com\/kubernetes\/deployment-manager\/manager\/manager\"\n\t\"github.com\/kubernetes\/deployment-manager\/registry\"\n\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\tstdin = flag.Bool(\"stdin\", false, \"Reads a configuration from the standard input\")\n\tproperties = flag.String(\"properties\", \"\", \"Properties to use when deploying a template (e.g., --properties k1=v1,k2=v2)\")\n\ttemplate_registry = flag.String(\"registry\", \"kubernetes\/deployment-manager\/templates\", \"Github based template registry (owner\/repo[\/path])\")\n\tservice = flag.String(\"service\", \"http:\/\/localhost:8001\/api\/v1\/proxy\/namespaces\/dm\/services\/manager-service:manager\", \"URL for deployment manager\")\n\tbinary = flag.String(\"binary\", \"..\/expandybird\/expansion\/expansion.py\", \"Path to template expansion binary\")\n)\n\nvar commands = []string{\n\t\"expand \\t\\t\\t Expands the supplied configuration(s)\",\n\t\"deploy \\t\\t\\t Deploys the named template or the supplied configuration(s)\",\n\t\"list \\t\\t\\t Lists the deployments in the cluster\",\n\t\"get \\t\\t\\t Retrieves the supplied deployment\",\n\t\"delete \\t\\t\\t Deletes the supplied deployment\",\n\t\"update \\t\\t\\t Updates a deployment using the supplied configuration(s)\",\n\t\"deployed-types \\t\\t Lists the types deployed in the cluster\",\n\t\"deployed-instances \\t Lists the instances of the named type deployed in the cluster\",\n\t\"templates \\t\\t Lists the templates in a given template registry\",\n\t\"describe \\t\\t Describes the named template in a given template registry\",\n}\n\nvar usage = func() {\n\tmessage := \"Usage: %s [<flags>] <command> (<template-name> | <deployment-name> | (<configuration> [<import1>...<importN>]))\\n\"\n\tfmt.Fprintf(os.Stderr, message, os.Args[0])\n\tfmt.Fprintln(os.Stderr, \"Commands:\")\n\tfor _, command := range commands {\n\t\tfmt.Fprintln(os.Stderr, command)\n\t}\n\n\tfmt.Fprintln(os.Stderr)\n\tfmt.Fprintln(os.Stderr, \"Flags:\")\n\tflag.PrintDefaults()\n\tfmt.Fprintln(os.Stderr)\n\tos.Exit(1)\n}\n\nfunc getGitRegistry() *registry.GithubRegistry {\n\ts := strings.Split(*template_registry, \"\/\")\n\tif len(s) < 2 {\n\t\tlog.Fatalf(\"invalid template registry: %s\", *template_registry)\n\t}\n\n\tvar path = \"\"\n\tif len(s) > 2 {\n\t\tpath = strings.Join(s[2:], \"\/\")\n\t}\n\n\treturn registry.NewGithubRegistry(s[0], s[1], path)\n}\n\nfunc main() {\n\tflag.Parse()\n\targs := flag.Args()\n\tif len(args) < 1 {\n\t\tfmt.Fprintln(os.Stderr, \"No command supplied\")\n\t\tusage()\n\t}\n\n\tif *stdin {\n\t\tfmt.Printf(\"reading from stdin is not yet implemented\")\n\t\tos.Exit(0)\n\t}\n\n\tcommand := args[0]\n\tswitch command {\n\tcase \"templates\":\n\t\tgit := getGitRegistry()\n\t\ttemplates, err := git.List()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Cannot list %v\", err)\n\t\t}\n\n\t\tfmt.Printf(\"Templates:\\n\")\n\t\tfor _, t := range templates {\n\t\t\tfmt.Printf(\"%s:%s\\n\", t.Name, t.Version)\n\t\t\tdownloadURL, err := git.GetURL(t)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Failed to get download URL for template %s:%s\", t.Name, t.Version)\n\t\t\t}\n\n\t\t\tfmt.Printf(\"\\tdownload URL: %s\\n\", downloadURL)\n\t\t}\n\tcase \"describe\":\n\t\tdescribeType(args)\n\tcase \"expand\":\n\t\ttemplate := loadTemplate(args)\n\t\tcallService(\"expand\", \"POST\", \"expand configuration\", marshalTemplate(template))\n\tcase \"deploy\":\n\t\ttemplate := loadTemplate(args)\n\t\taction := fmt.Sprintf(\"deploy configuration named %s\", template.Name)\n\t\tcallService(\"deployments\", \"POST\", action, marshalTemplate(template))\n\tcase \"list\":\n\t\tcallService(\"deployments\", \"GET\", \"list deployments\", nil)\n\tcase \"get\":\n\t\tif len(args) < 2 {\n\t\t\tfmt.Fprintln(os.Stderr, \"No deployment name supplied\")\n\t\t\tusage()\n\t\t}\n\n\t\tpath := fmt.Sprintf(\"deployments\/%s\", args[1])\n\t\taction := fmt.Sprintf(\"get deployment named %s\", args[1])\n\t\tcallService(path, \"GET\", action, nil)\n\tcase \"delete\":\n\t\tif len(args) < 2 {\n\t\t\tfmt.Fprintln(os.Stderr, \"No deployment name supplied\")\n\t\t\tusage()\n\t\t}\n\n\t\tpath := fmt.Sprintf(\"deployments\/%s\", args[1])\n\t\taction := fmt.Sprintf(\"delete deployment named %s\", args[1])\n\t\tcallService(path, \"DELETE\", action, nil)\n\tcase \"update\":\n\t\ttemplate := loadTemplate(args)\n\t\tpath := fmt.Sprintf(\"deployments\/%s\", template.Name)\n\t\taction := fmt.Sprintf(\"delete deployment named %s\", template.Name)\n\t\tcallService(path, \"PUT\", action, marshalTemplate(template))\n\tcase \"deployed-types\":\n\t\tcallService(\"types\", \"GET\", \"list deployed types\", nil)\n\tcase \"deployed-instances\":\n\t\tif len(args) < 2 {\n\t\t\tfmt.Fprintln(os.Stderr, \"No type name supplied\")\n\t\t\tusage()\n\t\t}\n\n\t\tpath := fmt.Sprintf(\"types\/%s\/instances\", url.QueryEscape(args[1]))\n\t\taction := fmt.Sprintf(\"list deployed instances of type %s\", args[1])\n\t\tcallService(path, \"GET\", action, nil)\n\tdefault:\n\t\tusage()\n\t}\n}\n\nfunc callService(path, method, action string, reader io.ReadCloser) {\n\tu := fmt.Sprintf(\"%s\/%s\", *service, path)\n\n\tresp := callHttp(u, method, action, reader)\n\tvar prettyJSON bytes.Buffer\n\tif err := json.Indent(&prettyJSON, []byte(resp), \"\", \" \"); err != nil {\n\t\tlog.Fatalf(\"Failed to parse JSON response from service: %s\", resp)\n\t}\n\tfmt.Println(prettyJSON.String())\n}\n\nfunc callHttp(path, method, action string, reader io.ReadCloser) string {\n\trequest, err := http.NewRequest(method, path, reader)\n\trequest.Header.Add(\"Content-Type\", \"application\/json\")\n\tresponse, err := http.DefaultClient.Do(request)\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot %s: %s\\n\", action, err)\n\t}\n\n\tdefer response.Body.Close()\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot %s: %s\\n\", action, err)\n\t}\n\n\tif response.StatusCode < http.StatusOK ||\n\t\tresponse.StatusCode >= http.StatusMultipleChoices {\n\t\tmessage := fmt.Sprintf(\"status code: %d status: %s : %s\", response.StatusCode, response.Status, body)\n\t\tlog.Fatalf(\"cannot %s: %s\\n\", action, message)\n\t}\n\n\treturn string(body)\n}\n\n\/\/ describeType prints the schema for a type specified by either a\n\/\/ template URL or a fully qualified registry type name (e.g.,\n\/\/ <type-name>:<version>)\nfunc describeType(args []string) {\n\tif len(args) != 2 {\n\t\tfmt.Fprintln(os.Stderr, \"No type name or URL supplied\")\n\t\tusage()\n\t}\n\n\tvar tUrl string\n\n\tif strings.HasPrefix(args[1], \"http:\/\/\") || strings.HasPrefix(args[1], \"https:\/\/\") {\n\t\t\/\/ User can pass raw URL to template.\n\t\ttUrl = args[1]\n\t} else {\n\t\t\/\/ User can pass registry type.\n\t\tt := getRegistryType(args[1])\n\t\tif t == nil {\n\t\t\tlog.Fatalf(\"Invalid type name, must be in the form \\\"<type-name>:<version>\\\": %s\", args[1])\n\t\t}\n\n\t\tgit := getGitRegistry()\n\t\turl, err := git.GetURL(*t)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to fetch type information for %s: %s\", args[1], err)\n\t\t}\n\t\ttUrl = url\n\t}\n\n\tschemaUrl := tUrl + \".schema\"\n\tfmt.Println(callHttp(schemaUrl, \"GET\", \"get schema for type (\"+tUrl+\")\", nil))\n}\n\nfunc loadTemplate(args []string) *expander.Template {\n\tvar template *expander.Template\n\tvar err error\n\tif len(args) < 2 {\n\t\tfmt.Fprintln(os.Stderr, \"No template name or configuration(s) supplied\")\n\t\tusage()\n\t}\n\n\tif len(args) < 3 {\n\t\tif t := getRegistryType(args[1]); t != nil {\n\t\t\ttemplate = buildTemplateFromType(*t)\n\t\t} else {\n\t\t\ttemplate, err = expander.NewTemplateFromRootTemplate(args[1])\n\t\t}\n\t} else {\n\t\ttemplate, err = expander.NewTemplateFromFileNames(args[1], args[2:])\n\t}\n\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot create configuration from supplied arguments: %s\\n\", err)\n\t}\n\n\treturn template\n}\n\n\/\/ TODO: needs better validation that this is actually a registry type.\nfunc getRegistryType(fullType string) *registry.Type {\n\ttList := strings.Split(fullType, \":\")\n\tif len(tList) != 2 {\n\t\treturn nil\n\t}\n\n\treturn ®istry.Type{\n\t\tName: tList[0],\n\t\tVersion: tList[1],\n\t}\n}\n\nfunc buildTemplateFromType(t registry.Type) *expander.Template {\n\tgit := getGitRegistry()\n\tdownloadURL, err := git.GetURL(t)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to get download URL for type %s:%s\\n%s\\n\", t.Name, t.Version, err)\n\t}\n\n\tprops := make(map[string]interface{})\n\tif *properties != \"\" {\n\t\tplist := strings.Split(*properties, \",\")\n\t\tfor _, p := range plist {\n\t\t\tppair := strings.Split(p, \"=\")\n\t\t\tif len(ppair) != 2 {\n\t\t\t\tlog.Fatalf(\"--properties must be in the form \\\"p1=v1,p2=v2,...\\\": %s\\n\", p)\n\t\t\t}\n\n\t\t\t\/\/ support ints\n\t\t\t\/\/ TODO: needs to support other types.\n\t\t\ti, err := strconv.Atoi(ppair[1])\n\t\t\tif err != nil {\n\t\t\t\tprops[ppair[0]] = ppair[1]\n\t\t\t} else {\n\t\t\t\tprops[ppair[0]] = i\n\t\t\t}\n\t\t}\n\t}\n\n\tname := fmt.Sprintf(\"%s-%s\", t.Name, t.Version)\n\tconfig := manager.Configuration{Resources: []*manager.Resource{&manager.Resource{\n\t\tName: name,\n\t\tType: downloadURL,\n\t\tProperties: props,\n\t}}}\n\n\ty, err := yaml.Marshal(config)\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %s\\ncannot create configuration for deployment: %v\\n\", err, config)\n\t}\n\n\treturn &expander.Template{\n\t\tName: name,\n\t\tContent: string(y),\n\t\t\/\/ No imports, as this is a single type from repository.\n\t}\n}\n\nfunc marshalTemplate(template *expander.Template) io.ReadCloser {\n\tj, err := json.Marshal(template)\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot deploy configuration %s: %s\\n\", template.Name, err)\n\t}\n\n\treturn ioutil.NopCloser(bytes.NewReader(j))\n}\n\nfunc getRandomName() string {\n\treturn fmt.Sprintf(\"manifest-%d\", time.Now().UTC().UnixNano())\n}\n<commit_msg>Add flag for timeout for talking to service<commit_after>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"github.com\/ghodss\/yaml\"\n\n\t\"github.com\/kubernetes\/deployment-manager\/expandybird\/expander\"\n\t\"github.com\/kubernetes\/deployment-manager\/manager\/manager\"\n\t\"github.com\/kubernetes\/deployment-manager\/registry\"\n\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\tstdin = flag.Bool(\"stdin\", false, \"Reads a configuration from the standard input\")\n\tproperties = flag.String(\"properties\", \"\", \"Properties to use when deploying a template (e.g., --properties k1=v1,k2=v2)\")\n\ttemplate_registry = flag.String(\"registry\", \"kubernetes\/deployment-manager\/templates\", \"Github based template registry (owner\/repo[\/path])\")\n\tservice = flag.String(\"service\", \"http:\/\/localhost:8001\/api\/v1\/proxy\/namespaces\/dm\/services\/manager-service:manager\", \"URL for deployment manager\")\n\tbinary = flag.String(\"binary\", \"..\/expandybird\/expansion\/expansion.py\", \"Path to template expansion binary\")\n\ttimeout = flag.Int(\"timeout\", 10, \"Time in seconds to wait for response\")\n)\n\nvar commands = []string{\n\t\"expand \\t\\t\\t Expands the supplied configuration(s)\",\n\t\"deploy \\t\\t\\t Deploys the named template or the supplied configuration(s)\",\n\t\"list \\t\\t\\t Lists the deployments in the cluster\",\n\t\"get \\t\\t\\t Retrieves the supplied deployment\",\n\t\"delete \\t\\t\\t Deletes the supplied deployment\",\n\t\"update \\t\\t\\t Updates a deployment using the supplied configuration(s)\",\n\t\"deployed-types \\t\\t Lists the types deployed in the cluster\",\n\t\"deployed-instances \\t Lists the instances of the named type deployed in the cluster\",\n\t\"templates \\t\\t Lists the templates in a given template registry\",\n\t\"describe \\t\\t Describes the named template in a given template registry\",\n}\n\nvar usage = func() {\n\tmessage := \"Usage: %s [<flags>] <command> (<template-name> | <deployment-name> | (<configuration> [<import1>...<importN>]))\\n\"\n\tfmt.Fprintf(os.Stderr, message, os.Args[0])\n\tfmt.Fprintln(os.Stderr, \"Commands:\")\n\tfor _, command := range commands {\n\t\tfmt.Fprintln(os.Stderr, command)\n\t}\n\n\tfmt.Fprintln(os.Stderr)\n\tfmt.Fprintln(os.Stderr, \"Flags:\")\n\tflag.PrintDefaults()\n\tfmt.Fprintln(os.Stderr)\n\tos.Exit(1)\n}\n\nfunc getGitRegistry() *registry.GithubRegistry {\n\ts := strings.Split(*template_registry, \"\/\")\n\tif len(s) < 2 {\n\t\tlog.Fatalf(\"invalid template registry: %s\", *template_registry)\n\t}\n\n\tvar path = \"\"\n\tif len(s) > 2 {\n\t\tpath = strings.Join(s[2:], \"\/\")\n\t}\n\n\treturn registry.NewGithubRegistry(s[0], s[1], path)\n}\n\nfunc main() {\n\tflag.Parse()\n\targs := flag.Args()\n\tif len(args) < 1 {\n\t\tfmt.Fprintln(os.Stderr, \"No command supplied\")\n\t\tusage()\n\t}\n\n\tif *stdin {\n\t\tfmt.Printf(\"reading from stdin is not yet implemented\")\n\t\tos.Exit(0)\n\t}\n\n\tcommand := args[0]\n\tswitch command {\n\tcase \"templates\":\n\t\tgit := getGitRegistry()\n\t\ttemplates, err := git.List()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Cannot list %v\", err)\n\t\t}\n\n\t\tfmt.Printf(\"Templates:\\n\")\n\t\tfor _, t := range templates {\n\t\t\tfmt.Printf(\"%s:%s\\n\", t.Name, t.Version)\n\t\t\tdownloadURL, err := git.GetURL(t)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Failed to get download URL for template %s:%s\", t.Name, t.Version)\n\t\t\t}\n\n\t\t\tfmt.Printf(\"\\tdownload URL: %s\\n\", downloadURL)\n\t\t}\n\tcase \"describe\":\n\t\tdescribeType(args)\n\tcase \"expand\":\n\t\ttemplate := loadTemplate(args)\n\t\tcallService(\"expand\", \"POST\", \"expand configuration\", marshalTemplate(template))\n\tcase \"deploy\":\n\t\ttemplate := loadTemplate(args)\n\t\taction := fmt.Sprintf(\"deploy configuration named %s\", template.Name)\n\t\tcallService(\"deployments\", \"POST\", action, marshalTemplate(template))\n\tcase \"list\":\n\t\tcallService(\"deployments\", \"GET\", \"list deployments\", nil)\n\tcase \"get\":\n\t\tif len(args) < 2 {\n\t\t\tfmt.Fprintln(os.Stderr, \"No deployment name supplied\")\n\t\t\tusage()\n\t\t}\n\n\t\tpath := fmt.Sprintf(\"deployments\/%s\", args[1])\n\t\taction := fmt.Sprintf(\"get deployment named %s\", args[1])\n\t\tcallService(path, \"GET\", action, nil)\n\tcase \"delete\":\n\t\tif len(args) < 2 {\n\t\t\tfmt.Fprintln(os.Stderr, \"No deployment name supplied\")\n\t\t\tusage()\n\t\t}\n\n\t\tpath := fmt.Sprintf(\"deployments\/%s\", args[1])\n\t\taction := fmt.Sprintf(\"delete deployment named %s\", args[1])\n\t\tcallService(path, \"DELETE\", action, nil)\n\tcase \"update\":\n\t\ttemplate := loadTemplate(args)\n\t\tpath := fmt.Sprintf(\"deployments\/%s\", template.Name)\n\t\taction := fmt.Sprintf(\"delete deployment named %s\", template.Name)\n\t\tcallService(path, \"PUT\", action, marshalTemplate(template))\n\tcase \"deployed-types\":\n\t\tcallService(\"types\", \"GET\", \"list deployed types\", nil)\n\tcase \"deployed-instances\":\n\t\tif len(args) < 2 {\n\t\t\tfmt.Fprintln(os.Stderr, \"No type name supplied\")\n\t\t\tusage()\n\t\t}\n\n\t\tpath := fmt.Sprintf(\"types\/%s\/instances\", url.QueryEscape(args[1]))\n\t\taction := fmt.Sprintf(\"list deployed instances of type %s\", args[1])\n\t\tcallService(path, \"GET\", action, nil)\n\tdefault:\n\t\tusage()\n\t}\n}\n\nfunc callService(path, method, action string, reader io.ReadCloser) {\n\tu := fmt.Sprintf(\"%s\/%s\", *service, path)\n\n\tresp := callHttp(u, method, action, reader)\n\tvar prettyJSON bytes.Buffer\n\tif err := json.Indent(&prettyJSON, []byte(resp), \"\", \" \"); err != nil {\n\t\tlog.Fatalf(\"Failed to parse JSON response from service: %s\", resp)\n\t}\n\tfmt.Println(prettyJSON.String())\n}\n\nfunc callHttp(path, method, action string, reader io.ReadCloser) string {\n\trequest, err := http.NewRequest(method, path, reader)\n\trequest.Header.Add(\"Content-Type\", \"application\/json\")\n\n\tclient := http.Client{\n\t\tTimeout: time.Duration(time.Duration(*timeout) * time.Second),\n\t}\n\n\tresponse, err := client.Do(request)\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot %s: %s\\n\", action, err)\n\t}\n\n\tdefer response.Body.Close()\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot %s: %s\\n\", action, err)\n\t}\n\n\tif response.StatusCode < http.StatusOK ||\n\t\tresponse.StatusCode >= http.StatusMultipleChoices {\n\t\tmessage := fmt.Sprintf(\"status code: %d status: %s : %s\", response.StatusCode, response.Status, body)\n\t\tlog.Fatalf(\"cannot %s: %s\\n\", action, message)\n\t}\n\n\treturn string(body)\n}\n\n\/\/ describeType prints the schema for a type specified by either a\n\/\/ template URL or a fully qualified registry type name (e.g.,\n\/\/ <type-name>:<version>)\nfunc describeType(args []string) {\n\tif len(args) != 2 {\n\t\tfmt.Fprintln(os.Stderr, \"No type name or URL supplied\")\n\t\tusage()\n\t}\n\n\tvar tUrl string\n\n\tif strings.HasPrefix(args[1], \"http:\/\/\") || strings.HasPrefix(args[1], \"https:\/\/\") {\n\t\t\/\/ User can pass raw URL to template.\n\t\ttUrl = args[1]\n\t} else {\n\t\t\/\/ User can pass registry type.\n\t\tt := getRegistryType(args[1])\n\t\tif t == nil {\n\t\t\tlog.Fatalf(\"Invalid type name, must be in the form \\\"<type-name>:<version>\\\": %s\", args[1])\n\t\t}\n\n\t\tgit := getGitRegistry()\n\t\turl, err := git.GetURL(*t)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to fetch type information for %s: %s\", args[1], err)\n\t\t}\n\t\ttUrl = url\n\t}\n\n\tschemaUrl := tUrl + \".schema\"\n\tfmt.Println(callHttp(schemaUrl, \"GET\", \"get schema for type (\"+tUrl+\")\", nil))\n}\n\nfunc loadTemplate(args []string) *expander.Template {\n\tvar template *expander.Template\n\tvar err error\n\tif len(args) < 2 {\n\t\tfmt.Fprintln(os.Stderr, \"No template name or configuration(s) supplied\")\n\t\tusage()\n\t}\n\n\tif len(args) < 3 {\n\t\tif t := getRegistryType(args[1]); t != nil {\n\t\t\ttemplate = buildTemplateFromType(*t)\n\t\t} else {\n\t\t\ttemplate, err = expander.NewTemplateFromRootTemplate(args[1])\n\t\t}\n\t} else {\n\t\ttemplate, err = expander.NewTemplateFromFileNames(args[1], args[2:])\n\t}\n\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot create configuration from supplied arguments: %s\\n\", err)\n\t}\n\n\treturn template\n}\n\n\/\/ TODO: needs better validation that this is actually a registry type.\nfunc getRegistryType(fullType string) *registry.Type {\n\ttList := strings.Split(fullType, \":\")\n\tif len(tList) != 2 {\n\t\treturn nil\n\t}\n\n\treturn ®istry.Type{\n\t\tName: tList[0],\n\t\tVersion: tList[1],\n\t}\n}\n\nfunc buildTemplateFromType(t registry.Type) *expander.Template {\n\tgit := getGitRegistry()\n\tdownloadURL, err := git.GetURL(t)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to get download URL for type %s:%s\\n%s\\n\", t.Name, t.Version, err)\n\t}\n\n\tprops := make(map[string]interface{})\n\tif *properties != \"\" {\n\t\tplist := strings.Split(*properties, \",\")\n\t\tfor _, p := range plist {\n\t\t\tppair := strings.Split(p, \"=\")\n\t\t\tif len(ppair) != 2 {\n\t\t\t\tlog.Fatalf(\"--properties must be in the form \\\"p1=v1,p2=v2,...\\\": %s\\n\", p)\n\t\t\t}\n\n\t\t\t\/\/ support ints\n\t\t\t\/\/ TODO: needs to support other types.\n\t\t\ti, err := strconv.Atoi(ppair[1])\n\t\t\tif err != nil {\n\t\t\t\tprops[ppair[0]] = ppair[1]\n\t\t\t} else {\n\t\t\t\tprops[ppair[0]] = i\n\t\t\t}\n\t\t}\n\t}\n\n\tname := fmt.Sprintf(\"%s-%s\", t.Name, t.Version)\n\tconfig := manager.Configuration{Resources: []*manager.Resource{&manager.Resource{\n\t\tName: name,\n\t\tType: downloadURL,\n\t\tProperties: props,\n\t}}}\n\n\ty, err := yaml.Marshal(config)\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %s\\ncannot create configuration for deployment: %v\\n\", err, config)\n\t}\n\n\treturn &expander.Template{\n\t\tName: name,\n\t\tContent: string(y),\n\t\t\/\/ No imports, as this is a single type from repository.\n\t}\n}\n\nfunc marshalTemplate(template *expander.Template) io.ReadCloser {\n\tj, err := json.Marshal(template)\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot deploy configuration %s: %s\\n\", template.Name, err)\n\t}\n\n\treturn ioutil.NopCloser(bytes.NewReader(j))\n}\n\nfunc getRandomName() string {\n\treturn fmt.Sprintf(\"manifest-%d\", time.Now().UTC().UnixNano())\n}\n<|endoftext|>"} {"text":"<commit_before>package gcache\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"golang.org\/x\/oauth2\"\n\t\"google.golang.org\/api\/drive\/v3\"\n\t\"google.golang.org\/api\/googleapi\"\n)\n\nvar (\n\toauth2TokenSource oauth2.TokenSource \/\/ The token is valid for 30 minutes.\n)\n\n\/\/ GetGDriveService returns the API service of Google Drive.\nfunc GetGDriveService(\n\tr *http.Request,\n) (\n\t*drive.Service,\n\terror,\n) {\n\tn := 1\nretry:\n\tservice, err := drive.New(createGDriveClient(r))\n\tif err == nil {\n\t\treturn service, nil\n\t}\n\t_, n, err = Triable(n, err)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgoto retry\n}\n\nfunc getGDriveFile(\n\tr *http.Request,\n\tname string,\n\tfield googleapi.Field,\n) (\n\t*drive.File,\n\t*drive.Service,\n\terror,\n) {\n\tif field == \"\" {\n\t\tfield = MinimumField\n\t}\n\n\tvar refreshToken bool\n\tn := 1\nrefresh:\n\tservice, err := GetGDriveService(r)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\nretry:\n\t<-tokenBucketGDriveAPI\n\tfileList, err := service.Files.List().PageSize(1).Spaces(\"drive\").Q(fmt.Sprintf(\"name='%s'\", name)).Fields(field).Do()\n\n\tif err != nil {\n\t\trefreshToken, n, err = Triable(n, err)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif refreshToken {\n\t\t\tgoto refresh\n\t\t}\n\t\tgoto retry\n\t}\n\n\tif len(fileList.Files) <= 0 {\n\t\treturn nil, nil, &DriveFileDoesNotExistError{}\n\t}\n\treturn fileList.Files[0], service, nil\n}\n\n\/\/ GetGDriveFile returns a file on Google Drive.\nfunc GetGDriveFile(\n\tr *http.Request,\n\tname string,\n\tfield googleapi.Field,\n) (\n\t*drive.File,\n\terror,\n) {\n\tif field == \"\" {\n\t\tfield = defaultField\n\t}\n\n\tfile, _, err := getGDriveFile(r, name, field)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn file, nil\n}\n\n\/\/ GetGDriveFileContent returns a file with content on Google Drive.\nfunc GetGDriveFileContent(\n\tr *http.Request,\n\tname string,\n\tfield googleapi.Field,\n) (\n\t*drive.File,\n\t[]byte, \/\/ payload\n\terror,\n) {\n\tif field == \"\" {\n\t\tfield = defaultField\n\t}\n\n\tfile, service, err := getGDriveFile(r, name, field)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfileID := file.Id\n\n\tvar payload []byte\n\n\tvar refreshToken bool\n\tn := 1\nretry:\n\thttpResponse, err := service.Files.Export(fileID, mimeTxt).Download()\n\tif IsFileNotExportableError(err) {\n\t\t<-tokenBucketGDriveAPI\n\t\terr = service.Files.Delete(fileID).Do()\n\t\tif err != nil {\n\t\t\t\/\/ pass\n\t\t}\n\t\treturn nil, nil, &DriveFileDoesNotExistError{}\n\t}\n\tif err != nil {\n\t\trefreshToken, n, err = Triable(n, err)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif refreshToken {\n\t\t\tservice, err = GetGDriveService(r)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t}\n\t\tgoto retry\n\t}\n\n\tdefer httpResponse.Body.Close()\n\tpayload, err = ioutil.ReadAll(httpResponse.Body)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn file, payload, err\n}\n\n\/\/ Triable returns whether it can retry or not.\nfunc Triable(\n\tretries int,\n\terr error,\n) (\n\tbool, \/\/refreshToken\n\tint, \/\/retries\n\terror,\n) {\n\tif err == nil {\n\t\treturn false, retries, nil\n\t}\n\tif retries < 1 {\n\t\tretries = 1\n\t}\n\tswitch {\n\tcase IsInvalidSecurityTicket(err):\n\t\toauth2TokenSource = nil\n\t\treturn true, retries, nil\n\tcase IsServerError(err):\n\t\tretries, err = sleeping(retries)\n\t\tif err != nil {\n\t\t\treturn false, retries, err\n\t\t}\n\t\treturn false, retries, nil\n\t}\n\treturn false, retries, err\n}\n\nfunc sleeping(\n\tn int,\n) (\n\tint,\n\terror,\n) {\n\tif n > 16 {\n\t\treturn 0, errors.New(\"Sleeping Timeout\")\n\t}\n\ttime.Sleep(time.Duration(n)*time.Second + time.Duration(random.Intn(1000))*time.Millisecond)\n\treturn n * 2, nil\n}\n<commit_msg>refactor<commit_after>package gcache\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"golang.org\/x\/oauth2\"\n\t\"google.golang.org\/api\/drive\/v3\"\n\t\"google.golang.org\/api\/googleapi\"\n)\n\nvar (\n\toauth2TokenSource oauth2.TokenSource \/\/ The token is valid for 30 minutes.\n)\n\n\/\/ GetGDriveService returns the API service of Google Drive.\nfunc GetGDriveService(\n\tr *http.Request,\n) (\n\t*drive.Service,\n\terror,\n) {\n\tn := 1\nretry:\n\tservice, err := drive.New(createGDriveClient(r))\n\tif err == nil {\n\t\treturn service, nil\n\t}\n\t_, n, err = Triable(n, err)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgoto retry\n}\n\nfunc getGDriveFile(\n\tr *http.Request,\n\tname string,\n\tfield googleapi.Field,\n) (\n\t*drive.File,\n\t*drive.Service,\n\terror,\n) {\n\tif field == \"\" {\n\t\tfield = MinimumField\n\t}\n\n\tvar refreshToken bool\n\tn := 1\nrefresh:\n\tservice, err := GetGDriveService(r)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\nretry:\n\t<-tokenBucketGDriveAPI\n\tfileList, err := service.Files.List().PageSize(1).Spaces(\"drive\").Q(fmt.Sprintf(\"name='%s'\", name)).Fields(field).Do()\n\n\tif err != nil {\n\t\trefreshToken, n, err = Triable(n, err)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif refreshToken {\n\t\t\tgoto refresh\n\t\t}\n\t\tgoto retry\n\t}\n\n\tif len(fileList.Files) <= 0 {\n\t\treturn nil, nil, &DriveFileDoesNotExistError{}\n\t}\n\treturn fileList.Files[0], service, nil\n}\n\n\/\/ GetGDriveFile returns a file on Google Drive.\nfunc GetGDriveFile(\n\tr *http.Request,\n\tname string,\n\tfield googleapi.Field,\n) (\n\t*drive.File,\n\terror,\n) {\n\tif field == \"\" {\n\t\tfield = defaultField\n\t}\n\n\tfile, _, err := getGDriveFile(r, name, field)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn file, nil\n}\n\n\/\/ GetGDriveFileContent returns a file with content on Google Drive.\nfunc GetGDriveFileContent(\n\tr *http.Request,\n\tname string,\n\tfield googleapi.Field,\n) (\n\t*drive.File,\n\t[]byte, \/\/ payload\n\terror,\n) {\n\tif field == \"\" {\n\t\tfield = defaultField\n\t}\n\n\tfile, service, err := getGDriveFile(r, name, field)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfileID := file.Id\n\n\tvar payload []byte\n\n\tvar refreshToken bool\n\tn := 1\nretry:\n\thttpResponse, err := service.Files.Export(fileID, mimeTxt).Download()\n\tif IsFileNotExportableError(err) {\n\t\t<-tokenBucketGDriveAPI\n\t\terr = service.Files.Delete(fileID).Do()\n\t\tif err != nil {\n\t\t\t\/\/ pass\n\t\t}\n\t\treturn nil, nil, &DriveFileDoesNotExistError{}\n\t}\n\tif err != nil {\n\t\trefreshToken, n, err = Triable(n, err)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif refreshToken {\n\t\t\tservice, err = GetGDriveService(r)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t}\n\t\tgoto retry\n\t}\n\n\tdefer httpResponse.Body.Close()\n\tpayload, err = ioutil.ReadAll(httpResponse.Body)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn file, payload, err\n}\n\n\/\/ Triable returns whether it can retry or not.\nfunc Triable(\n\tretries int,\n\terr error,\n) (\n\tbool, \/\/refreshToken\n\tint, \/\/retries\n\terror,\n) {\n\tif err == nil {\n\t\treturn false, retries, nil\n\t}\n\tswitch {\n\tcase IsInvalidSecurityTicket(err):\n\t\toauth2TokenSource = nil\n\t\treturn true, retries, nil\n\tcase IsServerError(err):\n\t\tretries, err = sleeping(retries)\n\t\tif err != nil {\n\t\t\treturn false, retries, err\n\t\t}\n\t\treturn false, retries, nil\n\t}\n\treturn false, retries, err\n}\n\nfunc sleeping(\n\tn int,\n) (\n\tint,\n\terror,\n) {\n\tswitch {\n\tcase n > 16:\n\t\treturn 0, errors.New(\"Sleeping Timeout\")\n\tcase n < 1:\n\t\tn = 1\n\t}\n\ttime.Sleep(time.Duration(n)*time.Second + time.Duration(random.Intn(1000))*time.Millisecond)\n\treturn n * 2, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n\n\trecord is a package to open, read, and save game records stored in\n\tfilesystem's format.\n\n*\/\npackage record\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"github.com\/jkomoros\/boardgame\"\n\t\"github.com\/yudai\/gojsondiff\"\n\t\"github.com\/yudai\/gojsondiff\/formatter\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst randomStringChars = \"ABCDEF0123456789\"\n\nfunc init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n}\n\n\/\/Record is a record of moves, states, and game. Get a new one based on the\n\/\/contents of a file with New(). Instantiate directly if you'll set the\n\/\/starter state yourself.\ntype Record struct {\n\tGame *boardgame.GameStorageRecord\n\tMoves []*boardgame.MoveStorageRecord\n\t\/\/StatePatches are diffs from the state before. Get the actual state for a\n\t\/\/version with State().\n\tStatePatches []json.RawMessage\n\tstates []json.RawMessage\n\texpandedStates []map[string]interface{}\n}\n\nfunc New(filename string) (*Record, error) {\n\tdata, err := ioutil.ReadFile(filename)\n\n\tif err != nil {\n\t\treturn nil, errors.New(\"Couldn't read file: \" + err.Error())\n\t}\n\n\tvar result Record\n\n\tif err := json.Unmarshal(data, &result); err != nil {\n\t\treturn nil, errors.New(\"Couldn't decode json: \" + err.Error())\n\t}\n\n\treturn &result, nil\n}\n\n\/\/randomString returns a random string of the given length.\nfunc randomString(length int) string {\n\tvar result = \"\"\n\n\tfor len(result) < length {\n\t\tresult += string(randomStringChars[rand.Intn(len(randomStringChars))])\n\t}\n\n\treturn result\n}\n\nfunc safeOvewritefile(path string, blob []byte) error {\n\n\t\/\/Check for the easy case where the file doesn't exist yet\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\treturn ioutil.WriteFile(path, blob, 0644)\n\t}\n\n\tdir, name := filepath.Split(path)\n\n\text := filepath.Ext(name)\n\n\tnameWithoutExt := strings.TrimSuffix(name, ext)\n\n\ttempFileName := filepath.Join(dir, nameWithoutExt+\".TEMP.\"+randomString(6)+ext)\n\n\tif err := ioutil.WriteFile(tempFileName, blob, 0644); err != nil {\n\t\treturn errors.New(\"Couldn't write temp file: \" + err.Error())\n\t}\n\n\tif err := os.Remove(path); err != nil {\n\t\treturn errors.New(\"Couldn't delete the original file: \" + err.Error())\n\t}\n\n\tif err := os.Rename(tempFileName, path); err != nil {\n\t\treturn errors.New(\"Couldn't rename the new file: \" + err.Error())\n\t}\n\n\treturn nil\n\n}\n\n\/\/Save saves to the given path.\nfunc (r *Record) Save(filename string) error {\n\n\tblob, err := json.MarshalIndent(r, \"\", \"\\t\")\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't marshal blob: \" + err.Error())\n\t}\n\n\treturn safeOvewritefile(filename, blob)\n}\n\n\/\/AddGameAndCurrentState adds the game, state, and move (if non-nil), ready\n\/\/for saving. Designed to be used in a SaveGameAndCurrentState method.\nfunc (r *Record) AddGameAndCurrentState(game *boardgame.GameStorageRecord, state boardgame.StateStorageRecord, move *boardgame.MoveStorageRecord) error {\n\n\tr.Game = game\n\n\tif move != nil {\n\t\tr.Moves = append(r.Moves, move)\n\t}\n\n\tlastState, err := r.State(len(r.StatePatches) - 1)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't fetch last state: \" + err.Error())\n\t}\n\n\tdiffer := gojsondiff.New()\n\n\tpatch, err := differ.Compare(lastState, state)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf := formatter.NewDeltaFormatter()\n\n\tjs, err := f.FormatAsJson(patch)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't format patch as json: \" + err.Error())\n\t}\n\n\tformattedPatch, err := json.Marshal(js)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't format patch json to byte: \" + err.Error())\n\t}\n\n\tr.states = append(r.states, json.RawMessage(state))\n\tr.StatePatches = append(r.StatePatches, formattedPatch)\n\n\treturn nil\n\n}\n\n\/\/State is a method, not just a normal array, because we actually serialize to\n\/\/disk state patches, given that states are big objects and they rarely\n\/\/change. So when you request a State we have to derive what it is by applying\n\/\/all of the patches up in order.\nfunc (r *Record) State(version int) (json.RawMessage, error) {\n\n\tif version < 0 {\n\t\t\/\/The base object that version 0 is diffed against is the empty object\n\t\treturn json.RawMessage(`{}`), nil\n\t}\n\n\tif len(r.states) > version {\n\t\treturn r.states[version], nil\n\t}\n\n\t\/\/Otherwise, derive forward, recursively.\n\n\tlastStateBlob, err := r.State(version - 1)\n\n\tif err != nil {\n\t\t\/\/Don't decorate the error because it will likely stack\n\t\treturn nil, err\n\t}\n\n\tunmarshaller := gojsondiff.NewUnmarshaller()\n\n\tpatch, err := unmarshaller.UnmarshalBytes(r.StatePatches[version])\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdiffer := gojsondiff.New()\n\n\tvar state map[string]interface{}\n\n\tif err := json.Unmarshal(lastStateBlob, &state); err != nil {\n\t\treturn nil, errors.New(\"Couldn't unmarshal last blob: \" + err.Error())\n\t}\n\n\tdiffer.ApplyPatch(state, patch)\n\n\tblob, err := json.MarshalIndent(state, \"\", \"\\t\")\n\n\tif err != nil {\n\t\treturn nil, errors.New(\"Couldn't marshal modified blob: \" + err.Error())\n\t}\n\n\tr.states = append(r.states, blob)\n\n\treturn blob, nil\n\n}\n<commit_msg>Get rid of unused cache in record. Part of #648.<commit_after>\/*\n\n\trecord is a package to open, read, and save game records stored in\n\tfilesystem's format.\n\n*\/\npackage record\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"github.com\/jkomoros\/boardgame\"\n\t\"github.com\/yudai\/gojsondiff\"\n\t\"github.com\/yudai\/gojsondiff\/formatter\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst randomStringChars = \"ABCDEF0123456789\"\n\nfunc init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n}\n\n\/\/Record is a record of moves, states, and game. Get a new one based on the\n\/\/contents of a file with New(). Instantiate directly if you'll set the\n\/\/starter state yourself.\ntype Record struct {\n\tGame *boardgame.GameStorageRecord\n\tMoves []*boardgame.MoveStorageRecord\n\t\/\/StatePatches are diffs from the state before. Get the actual state for a\n\t\/\/version with State().\n\tStatePatches []json.RawMessage\n\tstates []json.RawMessage\n}\n\nfunc New(filename string) (*Record, error) {\n\tdata, err := ioutil.ReadFile(filename)\n\n\tif err != nil {\n\t\treturn nil, errors.New(\"Couldn't read file: \" + err.Error())\n\t}\n\n\tvar result Record\n\n\tif err := json.Unmarshal(data, &result); err != nil {\n\t\treturn nil, errors.New(\"Couldn't decode json: \" + err.Error())\n\t}\n\n\treturn &result, nil\n}\n\n\/\/randomString returns a random string of the given length.\nfunc randomString(length int) string {\n\tvar result = \"\"\n\n\tfor len(result) < length {\n\t\tresult += string(randomStringChars[rand.Intn(len(randomStringChars))])\n\t}\n\n\treturn result\n}\n\nfunc safeOvewritefile(path string, blob []byte) error {\n\n\t\/\/Check for the easy case where the file doesn't exist yet\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\treturn ioutil.WriteFile(path, blob, 0644)\n\t}\n\n\tdir, name := filepath.Split(path)\n\n\text := filepath.Ext(name)\n\n\tnameWithoutExt := strings.TrimSuffix(name, ext)\n\n\ttempFileName := filepath.Join(dir, nameWithoutExt+\".TEMP.\"+randomString(6)+ext)\n\n\tif err := ioutil.WriteFile(tempFileName, blob, 0644); err != nil {\n\t\treturn errors.New(\"Couldn't write temp file: \" + err.Error())\n\t}\n\n\tif err := os.Remove(path); err != nil {\n\t\treturn errors.New(\"Couldn't delete the original file: \" + err.Error())\n\t}\n\n\tif err := os.Rename(tempFileName, path); err != nil {\n\t\treturn errors.New(\"Couldn't rename the new file: \" + err.Error())\n\t}\n\n\treturn nil\n\n}\n\n\/\/Save saves to the given path.\nfunc (r *Record) Save(filename string) error {\n\n\tblob, err := json.MarshalIndent(r, \"\", \"\\t\")\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't marshal blob: \" + err.Error())\n\t}\n\n\treturn safeOvewritefile(filename, blob)\n}\n\n\/\/AddGameAndCurrentState adds the game, state, and move (if non-nil), ready\n\/\/for saving. Designed to be used in a SaveGameAndCurrentState method.\nfunc (r *Record) AddGameAndCurrentState(game *boardgame.GameStorageRecord, state boardgame.StateStorageRecord, move *boardgame.MoveStorageRecord) error {\n\n\tr.Game = game\n\n\tif move != nil {\n\t\tr.Moves = append(r.Moves, move)\n\t}\n\n\tlastState, err := r.State(len(r.StatePatches) - 1)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't fetch last state: \" + err.Error())\n\t}\n\n\tdiffer := gojsondiff.New()\n\n\tpatch, err := differ.Compare(lastState, state)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf := formatter.NewDeltaFormatter()\n\n\tjs, err := f.FormatAsJson(patch)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't format patch as json: \" + err.Error())\n\t}\n\n\tformattedPatch, err := json.Marshal(js)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't format patch json to byte: \" + err.Error())\n\t}\n\n\tr.states = append(r.states, json.RawMessage(state))\n\tr.StatePatches = append(r.StatePatches, formattedPatch)\n\n\treturn nil\n\n}\n\n\/\/State is a method, not just a normal array, because we actually serialize to\n\/\/disk state patches, given that states are big objects and they rarely\n\/\/change. So when you request a State we have to derive what it is by applying\n\/\/all of the patches up in order.\nfunc (r *Record) State(version int) (json.RawMessage, error) {\n\n\tif version < 0 {\n\t\t\/\/The base object that version 0 is diffed against is the empty object\n\t\treturn json.RawMessage(`{}`), nil\n\t}\n\n\tif len(r.states) > version {\n\t\treturn r.states[version], nil\n\t}\n\n\t\/\/Otherwise, derive forward, recursively.\n\n\tlastStateBlob, err := r.State(version - 1)\n\n\tif err != nil {\n\t\t\/\/Don't decorate the error because it will likely stack\n\t\treturn nil, err\n\t}\n\n\tunmarshaller := gojsondiff.NewUnmarshaller()\n\n\tpatch, err := unmarshaller.UnmarshalBytes(r.StatePatches[version])\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdiffer := gojsondiff.New()\n\n\tvar state map[string]interface{}\n\n\tif err := json.Unmarshal(lastStateBlob, &state); err != nil {\n\t\treturn nil, errors.New(\"Couldn't unmarshal last blob: \" + err.Error())\n\t}\n\n\tdiffer.ApplyPatch(state, patch)\n\n\tblob, err := json.MarshalIndent(state, \"\", \"\\t\")\n\n\tif err != nil {\n\t\treturn nil, errors.New(\"Couldn't marshal modified blob: \" + err.Error())\n\t}\n\n\tr.states = append(r.states, blob)\n\n\treturn blob, nil\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) Alex Ellis 2017. All rights reserved.\n\/\/ Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage proxy\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"testing\"\n\n\t\"regexp\"\n\n\t\"github.com\/openfaas\/faas-cli\/test\"\n)\n\ntype DeployProxyTest struct {\n\ttitle string\n\tmockServerResponses []int\n\treplace bool\n\tupdate bool\n\texpectedOutput string\n}\n\nvar DeployProxyTests = []DeployProxyTest{\n\t{\n\t\ttitle: \"200_Deploy\",\n\t\tmockServerResponses: []int{http.StatusOK, http.StatusOK},\n\t\treplace: true,\n\t\tupdate: false,\n\t\texpectedOutput: `(?m:Deployed.)`,\n\t},\n\t{\n\t\ttitle: \"404_Deploy\",\n\t\tmockServerResponses: []int{http.StatusOK, http.StatusNotFound},\n\t\treplace: true,\n\t\tupdate: false,\n\t\texpectedOutput: `(?m:Unexpected status: 404)`,\n\t},\n\t{\n\t\ttitle: \"UpdateFailedDeployed\",\n\t\tmockServerResponses: []int{http.StatusNotFound, http.StatusOK},\n\t\treplace: false,\n\t\tupdate: true,\n\t\texpectedOutput: `(?m:Deployed.)`,\n\t},\n}\n\nfunc runDeployProxyTest(t *testing.T, deployTest DeployProxyTest) {\n\ts := test.MockHttpServerStatus(\n\t\tt,\n\t\tdeployTest.mockServerResponses...,\n\t)\n\tdefer s.Close()\n\n\tstdout := test.CaptureStdout(func() {\n\t\tDeployFunction(\n\t\t\t\"fproces\",\n\t\t\ts.URL,\n\t\t\t\"function\",\n\t\t\t\"image\",\n\t\t\t\"language\",\n\t\t\tdeployTest.replace,\n\t\t\tnil,\n\t\t\t\"network\",\n\t\t\t[]string{},\n\t\t\tdeployTest.update,\n\t\t\t[]string{},\n\t\t\tmap[string]string{},\n\t\t\tFunctionResourceRequest{},\n\t\t)\n\t})\n\n\tr := regexp.MustCompile(deployTest.expectedOutput)\n\tif !r.MatchString(stdout) {\n\t\tt.Fatalf(\"Output not matched: %s\", stdout)\n\t}\n}\n\nfunc Test_RunDeployProxyTests(t *testing.T) {\n\tfor _, tst := range DeployProxyTests {\n\t\tt.Run(tst.title, func(t *testing.T) {\n\t\t\trunDeployProxyTest(t, tst)\n\t\t})\n\t}\n}\n\nfunc Test_DeployFunction_MissingURLPrefix(t *testing.T) {\n\turl := \"127.0.0.1:8080\"\n\n\tstdout := test.CaptureStdout(func() {\n\t\tDeployFunction(\n\t\t\t\"fprocess\",\n\t\t\turl,\n\t\t\t\"function\",\n\t\t\t\"image\",\n\t\t\t\"language\",\n\t\t\tfalse,\n\t\t\tnil,\n\t\t\t\"network\",\n\t\t\t[]string{},\n\t\t\tfalse,\n\t\t\t[]string{},\n\t\t\tmap[string]string{},\n\t\t\tFunctionResourceRequest{},\n\t\t)\n\t})\n\n\texpectedErrMsg := \"first path segment in URL cannot contain colon\"\n\tr := regexp.MustCompile(fmt.Sprintf(\"(?m:%s)\", expectedErrMsg))\n\tif !r.MatchString(stdout) {\n\t\tt.Fatalf(\"Want: %s\\nGot: %s\", expectedErrMsg, stdout)\n\t}\n}\n<commit_msg>Refactor proxy\/deploy_test.go test cases<commit_after>\/\/ Copyright (c) Alex Ellis 2017. All rights reserved.\n\/\/ Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\npackage proxy\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"testing\"\n\n\t\"regexp\"\n\n\t\"github.com\/openfaas\/faas-cli\/test\"\n)\n\ntype deployProxyTest struct {\n\ttitle string\n\tmockServerResponses []int\n\treplace bool\n\tupdate bool\n\texpectedOutput string\n}\n\nfunc runDeployProxyTest(t *testing.T, deployTest deployProxyTest) {\n\ts := test.MockHttpServerStatus(\n\t\tt,\n\t\tdeployTest.mockServerResponses...,\n\t)\n\tdefer s.Close()\n\n\tstdout := test.CaptureStdout(func() {\n\t\tDeployFunction(\n\t\t\t\"fproces\",\n\t\t\ts.URL,\n\t\t\t\"function\",\n\t\t\t\"image\",\n\t\t\t\"language\",\n\t\t\tdeployTest.replace,\n\t\t\tnil,\n\t\t\t\"network\",\n\t\t\t[]string{},\n\t\t\tdeployTest.update,\n\t\t\t[]string{},\n\t\t\tmap[string]string{},\n\t\t\tFunctionResourceRequest{},\n\t\t)\n\t})\n\n\tr := regexp.MustCompile(deployTest.expectedOutput)\n\tif !r.MatchString(stdout) {\n\t\tt.Fatalf(\"Output not matched: %s\", stdout)\n\t}\n}\n\nfunc Test_RunDeployProxyTests(t *testing.T) {\n\tvar deployProxyTests = []deployProxyTest{\n\t\t{\n\t\t\ttitle: \"200_Deploy\",\n\t\t\tmockServerResponses: []int{http.StatusOK, http.StatusOK},\n\t\t\treplace: true,\n\t\t\tupdate: false,\n\t\t\texpectedOutput: `(?m:Deployed.)`,\n\t\t},\n\t\t{\n\t\t\ttitle: \"404_Deploy\",\n\t\t\tmockServerResponses: []int{http.StatusOK, http.StatusNotFound},\n\t\t\treplace: true,\n\t\t\tupdate: false,\n\t\t\texpectedOutput: `(?m:Unexpected status: 404)`,\n\t\t},\n\t\t{\n\t\t\ttitle: \"UpdateFailedDeployed\",\n\t\t\tmockServerResponses: []int{http.StatusNotFound, http.StatusOK},\n\t\t\treplace: false,\n\t\t\tupdate: true,\n\t\t\texpectedOutput: `(?m:Deployed.)`,\n\t\t},\n\t}\n\tfor _, tst := range deployProxyTests {\n\t\tt.Run(tst.title, func(t *testing.T) {\n\t\t\trunDeployProxyTest(t, tst)\n\t\t})\n\t}\n}\n\nfunc Test_DeployFunction_MissingURLPrefix(t *testing.T) {\n\turl := \"127.0.0.1:8080\"\n\n\tstdout := test.CaptureStdout(func() {\n\t\tDeployFunction(\n\t\t\t\"fprocess\",\n\t\t\turl,\n\t\t\t\"function\",\n\t\t\t\"image\",\n\t\t\t\"language\",\n\t\t\tfalse,\n\t\t\tnil,\n\t\t\t\"network\",\n\t\t\t[]string{},\n\t\t\tfalse,\n\t\t\t[]string{},\n\t\t\tmap[string]string{},\n\t\t\tFunctionResourceRequest{},\n\t\t)\n\t})\n\n\texpectedErrMsg := \"first path segment in URL cannot contain colon\"\n\tr := regexp.MustCompile(fmt.Sprintf(\"(?m:%s)\", expectedErrMsg))\n\tif !r.MatchString(stdout) {\n\t\tt.Fatalf(\"Want: %s\\nGot: %s\", expectedErrMsg, stdout)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build freebsd,!cgo\n\n\/*\n Copyright The containerd Authors.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage console\n\n\/\/\n\/\/ Implementing the functions below requires cgo support. Non-cgo stubs\n\/\/ versions are defined below to enable cross-compilation of source code\n\/\/ that depends on these functions, but the resultant cross-compiled\n\/\/ binaries cannot actually be used. If the stub function(s) below are\n\/\/ actually invoked they will display an error message and cause the\n\/\/ calling process to exit.\n\/\/\n\nfunc openpt() (*os.File, error) {\n\tpanic(\"openpt() support requires cgo.\")\n}\n<commit_msg>Add missing import in freebsd nocgo stub<commit_after>\/\/ +build freebsd,!cgo\n\n\/*\n Copyright The containerd Authors.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage console\n\nimport (\n\t\"os\"\n)\n\n\/\/\n\/\/ Implementing the functions below requires cgo support. Non-cgo stubs\n\/\/ versions are defined below to enable cross-compilation of source code\n\/\/ that depends on these functions, but the resultant cross-compiled\n\/\/ binaries cannot actually be used. If the stub function(s) below are\n\/\/ actually invoked they will display an error message and cause the\n\/\/ calling process to exit.\n\/\/\n\nfunc openpt() (*os.File, error) {\n\tpanic(\"openpt() support requires cgo.\")\n}\n<|endoftext|>"} {"text":"<commit_before>package lexer\n\ntype lookaheadPair struct {\n\tLexeme Lexeme\n\tError error\n}\n\n\/\/ Lexer takes a stream of bytes and provides a stream of lexemes\ntype Lexer struct {\n\tStream RuneStream\n\n\tlookahead []lookaheadPair\n}\n\n\/\/ EOF returns true if there is no more lookahead and if the\n\/\/ byte stream has been consumed\nfunc (l *Lexer) EOF() bool {\n\treturn len(l.lookahead) == 0 && l.Stream.EOF()\n}\n\n\/\/ Get returns the very next lexeme from the byte stream\nfunc (l *Lexer) Get(n int) ([]Lexeme, []error) {\n\tlexemes := make([]Lexeme, n)\n\terrors := make([]error, n)\n\n\tfor i := 0; i < n; i++ {\n\t\tlexeme, err := l.Next()\n\t\tlexemes[i] = lexeme\n\t\terrors[i] = err\n\t}\n\n\treturn lexemes, errors\n}\n\n\/\/ Peek does a lookahead at the very next lexeme\nfunc (l *Lexer) Peek() (Lexeme, error) {\n\tlexemes, errors := l.Lookahead(1)\n\treturn lexemes[0], errors[0]\n}\n\n\/\/ Lookahead peeks at the next n lexemes\nfunc (l *Lexer) Lookahead(n int) ([]Lexeme, []error) {\n\trem := n - len(l.lookahead)\n\n\tif rem > 0 {\n\t\tlexemes, errors := l.getN(rem)\n\n\t\tfor i := 0; i < rem; i++ {\n\t\t\tl.lookahead = append(l.lookahead, lookaheadPair{\n\t\t\t\tLexeme: lexemes[i],\n\t\t\t\tError: errors[i],\n\t\t\t})\n\t\t}\n\t}\n\n\tlookahead := l.lookahead[0:n]\n\tlexemes := make([]Lexeme, n)\n\terrors := make([]error, n)\n\n\tfor i := 0; i < n; i++ {\n\t\tlexemes[i] = lookahead[i].Lexeme\n\t\terrors[i] = lookahead[i].Error\n\t}\n\n\treturn lexemes, errors\n}\n\n\/\/ Next returns the very next lexeme from the byte stream\nfunc (l *Lexer) Next() (Lexeme, error) {\n\tif len(l.lookahead) > 0 {\n\t\tpair := l.lookahead[0]\n\t\tl.lookahead = l.lookahead[1:]\n\t\treturn pair.Lexeme, pair.Error\n\t}\n\n\treturn l.lex()\n}\n\nfunc (l *Lexer) getN(n int) ([]Lexeme, []error) {\n\tlexemes := make([]Lexeme, n)\n\terrors := []error{}\n\n\tfor i := 0; i < n; i++ {\n\t\tlexeme, err := l.lex()\n\t\tlexemes[i] = lexeme\n\n\t\tif err != nil {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}\n\n\treturn lexemes, errors\n}\n<commit_msg>Fix lexer to not return a slice of nil errors<commit_after>package lexer\n\ntype lookaheadPair struct {\n\tLexeme Lexeme\n\tError error\n}\n\n\/\/ Lexer takes a stream of bytes and provides a stream of lexemes\ntype Lexer struct {\n\tStream RuneStream\n\n\tlookahead []lookaheadPair\n}\n\n\/\/ EOF returns true if there is no more lookahead and if the\n\/\/ byte stream has been consumed\nfunc (l *Lexer) EOF() bool {\n\treturn len(l.lookahead) == 0 && l.Stream.EOF()\n}\n\n\/\/ Get returns the very next lexeme from the byte stream\nfunc (l *Lexer) Get(n int) ([]Lexeme, []error) {\n\tlexemes := make([]Lexeme, n)\n\terrors := []error{}\n\n\tfor i := 0; i < n; i++ {\n\t\tlexeme, err := l.Next()\n\t\tlexemes[i] = lexeme\n\n\t\tif err != nil {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}\n\n\treturn lexemes, errors\n}\n\n\/\/ Peek does a lookahead at the very next lexeme\nfunc (l *Lexer) Peek() (Lexeme, error) {\n\tlexemes, errors := l.Lookahead(1)\n\n\tvar err error\n\tif len(errors) > 0 {\n\t\terr = errors[0]\n\t}\n\n\treturn lexemes[0], err\n}\n\n\/\/ Lookahead peeks at the next n lexemes\nfunc (l *Lexer) Lookahead(n int) ([]Lexeme, []error) {\n\trem := n - len(l.lookahead)\n\n\tif rem > 0 {\n\t\tlexemes, errors := l.getN(rem)\n\n\t\tfor i := 0; i < rem; i++ {\n\t\t\tl.lookahead = append(l.lookahead, lookaheadPair{\n\t\t\t\tLexeme: lexemes[i],\n\t\t\t\tError: errors[i],\n\t\t\t})\n\t\t}\n\t}\n\n\tlookahead := l.lookahead[0:n]\n\tlexemes := make([]Lexeme, n)\n\terrors := []error{}\n\n\tfor i := 0; i < n; i++ {\n\t\tlexemes[i] = lookahead[i].Lexeme\n\n\t\tif lookahead[i].Error != nil {\n\t\t\terrors = append(errors, lookahead[i].Error)\n\t\t}\n\t}\n\n\treturn lexemes, errors\n}\n\n\/\/ Next returns the very next lexeme from the byte stream\nfunc (l *Lexer) Next() (Lexeme, error) {\n\tif len(l.lookahead) > 0 {\n\t\tpair := l.lookahead[0]\n\t\tl.lookahead = l.lookahead[1:]\n\t\treturn pair.Lexeme, pair.Error\n\t}\n\n\treturn l.lex()\n}\n\nfunc (l *Lexer) getN(n int) ([]Lexeme, []error) {\n\tlexemes := make([]Lexeme, n)\n\terrors := make([]error, n)\n\n\tfor i := 0; i < n; i++ {\n\t\tlexeme, err := l.lex()\n\t\tlexemes[i] = lexeme\n\t\terrors[i] = err\n\t}\n\n\treturn lexemes, errors\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage ssh_test\n\nimport (\n\t\"encoding\/base64\"\n\t\"strings\"\n\tstdtesting \"testing\"\n\n\tgc \"launchpad.net\/gocheck\"\n\n\tcoretesting \"launchpad.net\/juju-core\/testing\"\n\t\"launchpad.net\/juju-core\/testing\/testbase\"\n\t\"launchpad.net\/juju-core\/utils\/ssh\"\n\tsshtesting \"launchpad.net\/juju-core\/utils\/ssh\/testing\"\n)\n\nfunc Test(t *stdtesting.T) {\n\tgc.TestingT(t)\n}\n\ntype AuthorisedKeysKeysSuite struct {\n\ttestbase.LoggingSuite\n}\n\nconst (\n\t\/\/ We'll use the current user for ssh tests.\n\ttestSSHUser = \"\"\n)\n\nvar _ = gc.Suite(&AuthorisedKeysKeysSuite{})\n\nfunc (s *AuthorisedKeysKeysSuite) SetUpTest(c *gc.C) {\n\ts.LoggingSuite.SetUpTest(c)\n\tfakeHome := coretesting.MakeEmptyFakeHomeWithoutJuju(c)\n\ts.AddCleanup(func(*gc.C) { fakeHome.Restore() })\n}\n\nfunc writeAuthKeysFile(c *gc.C, keys []string) {\n\terr := ssh.WriteAuthorisedKeys(testSSHUser, keys)\n\tc.Assert(err, gc.IsNil)\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestListKeys(c *gc.C) {\n\tkeys := []string{\n\t\tsshtesting.ValidKeyOne.Key + \" user@host\",\n\t\tsshtesting.ValidKeyTwo.Key,\n\t}\n\twriteAuthKeysFile(c, keys)\n\tkeys, err := ssh.ListKeys(testSSHUser, ssh.Fingerprints)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(\n\t\tkeys, gc.DeepEquals,\n\t\t[]string{sshtesting.ValidKeyOne.Fingerprint + \" (user@host)\", sshtesting.ValidKeyTwo.Fingerprint})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestListKeysFull(c *gc.C) {\n\tkeys := []string{\n\t\tsshtesting.ValidKeyOne.Key + \" user@host\",\n\t\tsshtesting.ValidKeyTwo.Key + \" anotheruser@host\",\n\t}\n\twriteAuthKeysFile(c, keys)\n\tactual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(actual, gc.DeepEquals, keys)\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddNewKey(c *gc.C) {\n\tkey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\terr := ssh.AddKeys(testSSHUser, key)\n\tc.Assert(err, gc.IsNil)\n\tactual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{key})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddMoreKeys(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\twriteAuthKeysFile(c, []string{firstKey})\n\tmoreKeys := []string{\n\t\tsshtesting.ValidKeyTwo.Key + \" anotheruser@host\",\n\t\tsshtesting.ValidKeyThree.Key + \" yetanotheruser@host\",\n\t}\n\terr := ssh.AddKeys(testSSHUser, moreKeys...)\n\tc.Assert(err, gc.IsNil)\n\tactual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(actual, gc.DeepEquals, append([]string{firstKey}, moreKeys...))\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddDuplicateKey(c *gc.C) {\n\tkey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\terr := ssh.AddKeys(testSSHUser, key)\n\tc.Assert(err, gc.IsNil)\n\tmoreKeys := []string{\n\t\tsshtesting.ValidKeyOne.Key + \" user@host\",\n\t\tsshtesting.ValidKeyTwo.Key + \" yetanotheruser@host\",\n\t}\n\terr = ssh.AddKeys(testSSHUser, moreKeys...)\n\tc.Assert(err, gc.ErrorMatches, \"cannot add duplicate ssh key: \"+sshtesting.ValidKeyOne.Fingerprint)\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddDuplicateComment(c *gc.C) {\n\tkey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\terr := ssh.AddKeys(testSSHUser, key)\n\tc.Assert(err, gc.IsNil)\n\tmoreKeys := []string{\n\t\tsshtesting.ValidKeyTwo.Key + \" user@host\",\n\t\tsshtesting.ValidKeyThree.Key + \" yetanotheruser@host\",\n\t}\n\terr = ssh.AddKeys(testSSHUser, moreKeys...)\n\tc.Assert(err, gc.ErrorMatches, \"cannot add ssh key with duplicate comment: user@host\")\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddKeyWithoutComment(c *gc.C) {\n\tkeys := []string{\n\t\tsshtesting.ValidKeyOne.Key + \" user@host\",\n\t\tsshtesting.ValidKeyTwo.Key,\n\t}\n\terr := ssh.AddKeys(testSSHUser, keys...)\n\tc.Assert(err, gc.ErrorMatches, \"cannot add ssh key without comment\")\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddKeepsUnrecognised(c *gc.C) {\n\twriteAuthKeysFile(c, []string{sshtesting.ValidKeyOne.Key, \"invalid-key\"})\n\tanotherKey := sshtesting.ValidKeyTwo.Key + \" anotheruser@host\"\n\terr := ssh.AddKeys(testSSHUser, anotherKey)\n\tc.Assert(err, gc.IsNil)\n\tactual, err := ssh.ReadAuthorisedKeys(testSSHUser)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{sshtesting.ValidKeyOne.Key, \"invalid-key\", anotherKey})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteKeys(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\tanotherKey := sshtesting.ValidKeyTwo.Key\n\tthirdKey := sshtesting.ValidKeyThree.Key + \" anotheruser@host\"\n\twriteAuthKeysFile(c, []string{firstKey, anotherKey, thirdKey})\n\terr := ssh.DeleteKeys(testSSHUser, \"user@host\", sshtesting.ValidKeyTwo.Fingerprint)\n\tc.Assert(err, gc.IsNil)\n\tactual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{thirdKey})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteKeysKeepsUnrecognised(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\twriteAuthKeysFile(c, []string{firstKey, sshtesting.ValidKeyTwo.Key, \"invalid-key\"})\n\terr := ssh.DeleteKeys(testSSHUser, \"user@host\")\n\tc.Assert(err, gc.IsNil)\n\tactual, err := ssh.ReadAuthorisedKeys(testSSHUser)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{\"invalid-key\", sshtesting.ValidKeyTwo.Key})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteNonExistentComment(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\twriteAuthKeysFile(c, []string{firstKey})\n\terr := ssh.DeleteKeys(testSSHUser, \"someone@host\")\n\tc.Assert(err, gc.ErrorMatches, \"cannot delete non existent key: someone@host\")\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteNonExistentFingerprint(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\twriteAuthKeysFile(c, []string{firstKey})\n\terr := ssh.DeleteKeys(testSSHUser, sshtesting.ValidKeyTwo.Fingerprint)\n\tc.Assert(err, gc.ErrorMatches, \"cannot delete non existent key: \"+sshtesting.ValidKeyTwo.Fingerprint)\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteLastKeyForbidden(c *gc.C) {\n\tkeys := []string{\n\t\tsshtesting.ValidKeyOne.Key + \" user@host\",\n\t\tsshtesting.ValidKeyTwo.Key + \" yetanotheruser@host\",\n\t}\n\twriteAuthKeysFile(c, keys)\n\terr := ssh.DeleteKeys(testSSHUser, \"user@host\", sshtesting.ValidKeyTwo.Fingerprint)\n\tc.Assert(err, gc.ErrorMatches, \"cannot delete all keys\")\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestReplaceKeys(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\tanotherKey := sshtesting.ValidKeyTwo.Key\n\twriteAuthKeysFile(c, []string{firstKey, anotherKey})\n\n\treplaceKey := sshtesting.ValidKeyThree.Key\n\terr := ssh.ReplaceKeys(testSSHUser, replaceKey)\n\tc.Assert(err, gc.IsNil)\n\tactual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{replaceKey})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestReplaceKeepsUnrecognised(c *gc.C) {\n\twriteAuthKeysFile(c, []string{sshtesting.ValidKeyOne.Key, \"invalid-key\"})\n\tanotherKey := sshtesting.ValidKeyTwo.Key + \" anotheruser@host\"\n\terr := ssh.ReplaceKeys(testSSHUser, anotherKey)\n\tc.Assert(err, gc.IsNil)\n\tactual, err := ssh.ReadAuthorisedKeys(testSSHUser)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{\"invalid-key\", anotherKey})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestEnsureJujuComment(c *gc.C) {\n\tsshKey := sshtesting.ValidKeyOne.Key\n\tfor _, test := range []struct {\n\t\tkey string\n\t\texpected string\n\t}{\n\t\t{\"invalid-key\", \"invalid-key\"},\n\t\t{sshKey, sshKey + \" Juju:sshkey\"},\n\t\t{sshKey + \" user@host\", sshKey + \" Juju:user@host\"},\n\t\t{sshKey + \" Juju:user@host\", sshKey + \" Juju:user@host\"},\n\t\t{sshKey + \" \" + sshKey[3:5], sshKey + \" Juju:\" + sshKey[3:5]},\n\t} {\n\t\tactual := ssh.EnsureJujuComment(test.key)\n\t\tc.Assert(actual, gc.Equals, test.expected)\n\t}\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestSplitAuthorisedKeys(c *gc.C) {\n\tsshKey := sshtesting.ValidKeyOne.Key\n\tfor _, test := range []struct {\n\t\tkeyData string\n\t\texpected []string\n\t}{\n\t\t{\"\", nil},\n\t\t{sshKey, []string{sshKey}},\n\t\t{sshKey + \"\\n\", []string{sshKey}},\n\t\t{sshKey + \"\\n\\n\", []string{sshKey}},\n\t\t{sshKey + \"\\n#comment\\n\", []string{sshKey}},\n\t\t{sshKey + \"\\n #comment\\n\", []string{sshKey}},\n\t\t{sshKey + \"\\ninvalid\\n\", []string{sshKey, \"invalid\"}},\n\t} {\n\t\tactual := ssh.SplitAuthorisedKeys(test.keyData)\n\t\tc.Assert(actual, gc.DeepEquals, test.expected)\n\t}\n}\n\nfunc b64decode(c *gc.C, s string) []byte {\n\tb, err := base64.StdEncoding.DecodeString(s)\n\tc.Assert(err, gc.IsNil)\n\treturn b\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestParseAuthorisedKey(c *gc.C) {\n\tfor i, test := range []struct {\n\t\tline string\n\t\tkey []byte\n\t\tcomment string\n\t\terr string\n\t}{{\n\t\tline: sshtesting.ValidKeyOne.Key,\n\t\tkey: b64decode(c, strings.Fields(sshtesting.ValidKeyOne.Key)[1]),\n\t}, {\n\t\tline: sshtesting.ValidKeyOne.Key + \" a b c\",\n\t\tkey: b64decode(c, strings.Fields(sshtesting.ValidKeyOne.Key)[1]),\n\t\tcomment: \"a b c\",\n\t}, {\n\t\tline: \"ssh-xsa blah\",\n\t\terr: \"invalid authorized_key \\\"ssh-xsa blah\\\"\",\n\t}, {\n\t\t\/\/ options should be skipped\n\t\tline: `no-pty,principals=\"\\\"\",command=\"\\!\" ` + sshtesting.ValidKeyOne.Key,\n\t\tkey: b64decode(c, strings.Fields(sshtesting.ValidKeyOne.Key)[1]),\n\t}, {\n\t\tline: \"ssh-rsa\",\n\t\terr: \"invalid authorized_key \\\"ssh-rsa\\\"\",\n\t}} {\n\t\tc.Logf(\"test %d: %s\", i, test.line)\n\t\tak, err := ssh.ParseAuthorisedKey(test.line)\n\t\tif test.err != \"\" {\n\t\t\tc.Assert(err, gc.ErrorMatches, test.err)\n\t\t} else {\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t\tc.Assert(ak, gc.Not(gc.IsNil))\n\t\t\tc.Assert(ak.Key, gc.DeepEquals, test.key)\n\t\t\tc.Assert(ak.Comment, gc.Equals, test.comment)\n\t\t}\n\t}\n}\n<commit_msg>Add comment<commit_after>\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage ssh_test\n\nimport (\n\t\"encoding\/base64\"\n\t\"strings\"\n\tstdtesting \"testing\"\n\n\tgc \"launchpad.net\/gocheck\"\n\n\tcoretesting \"launchpad.net\/juju-core\/testing\"\n\t\"launchpad.net\/juju-core\/testing\/testbase\"\n\t\"launchpad.net\/juju-core\/utils\/ssh\"\n\tsshtesting \"launchpad.net\/juju-core\/utils\/ssh\/testing\"\n)\n\nfunc Test(t *stdtesting.T) {\n\tgc.TestingT(t)\n}\n\ntype AuthorisedKeysKeysSuite struct {\n\ttestbase.LoggingSuite\n}\n\nconst (\n\t\/\/ We'll use the current user for ssh tests.\n\ttestSSHUser = \"\"\n)\n\nvar _ = gc.Suite(&AuthorisedKeysKeysSuite{})\n\nfunc (s *AuthorisedKeysKeysSuite) SetUpTest(c *gc.C) {\n\ts.LoggingSuite.SetUpTest(c)\n\tfakeHome := coretesting.MakeEmptyFakeHomeWithoutJuju(c)\n\ts.AddCleanup(func(*gc.C) { fakeHome.Restore() })\n}\n\nfunc writeAuthKeysFile(c *gc.C, keys []string) {\n\terr := ssh.WriteAuthorisedKeys(testSSHUser, keys)\n\tc.Assert(err, gc.IsNil)\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestListKeys(c *gc.C) {\n\tkeys := []string{\n\t\tsshtesting.ValidKeyOne.Key + \" user@host\",\n\t\tsshtesting.ValidKeyTwo.Key,\n\t}\n\twriteAuthKeysFile(c, keys)\n\tkeys, err := ssh.ListKeys(testSSHUser, ssh.Fingerprints)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(\n\t\tkeys, gc.DeepEquals,\n\t\t[]string{sshtesting.ValidKeyOne.Fingerprint + \" (user@host)\", sshtesting.ValidKeyTwo.Fingerprint})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestListKeysFull(c *gc.C) {\n\tkeys := []string{\n\t\tsshtesting.ValidKeyOne.Key + \" user@host\",\n\t\tsshtesting.ValidKeyTwo.Key + \" anotheruser@host\",\n\t}\n\twriteAuthKeysFile(c, keys)\n\tactual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(actual, gc.DeepEquals, keys)\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddNewKey(c *gc.C) {\n\tkey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\terr := ssh.AddKeys(testSSHUser, key)\n\tc.Assert(err, gc.IsNil)\n\tactual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{key})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddMoreKeys(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\twriteAuthKeysFile(c, []string{firstKey})\n\tmoreKeys := []string{\n\t\tsshtesting.ValidKeyTwo.Key + \" anotheruser@host\",\n\t\tsshtesting.ValidKeyThree.Key + \" yetanotheruser@host\",\n\t}\n\terr := ssh.AddKeys(testSSHUser, moreKeys...)\n\tc.Assert(err, gc.IsNil)\n\tactual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(actual, gc.DeepEquals, append([]string{firstKey}, moreKeys...))\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddDuplicateKey(c *gc.C) {\n\tkey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\terr := ssh.AddKeys(testSSHUser, key)\n\tc.Assert(err, gc.IsNil)\n\tmoreKeys := []string{\n\t\tsshtesting.ValidKeyOne.Key + \" user@host\",\n\t\tsshtesting.ValidKeyTwo.Key + \" yetanotheruser@host\",\n\t}\n\terr = ssh.AddKeys(testSSHUser, moreKeys...)\n\tc.Assert(err, gc.ErrorMatches, \"cannot add duplicate ssh key: \"+sshtesting.ValidKeyOne.Fingerprint)\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddDuplicateComment(c *gc.C) {\n\tkey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\terr := ssh.AddKeys(testSSHUser, key)\n\tc.Assert(err, gc.IsNil)\n\tmoreKeys := []string{\n\t\tsshtesting.ValidKeyTwo.Key + \" user@host\",\n\t\tsshtesting.ValidKeyThree.Key + \" yetanotheruser@host\",\n\t}\n\terr = ssh.AddKeys(testSSHUser, moreKeys...)\n\tc.Assert(err, gc.ErrorMatches, \"cannot add ssh key with duplicate comment: user@host\")\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddKeyWithoutComment(c *gc.C) {\n\tkeys := []string{\n\t\tsshtesting.ValidKeyOne.Key + \" user@host\",\n\t\tsshtesting.ValidKeyTwo.Key,\n\t}\n\terr := ssh.AddKeys(testSSHUser, keys...)\n\tc.Assert(err, gc.ErrorMatches, \"cannot add ssh key without comment\")\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddKeepsUnrecognised(c *gc.C) {\n\twriteAuthKeysFile(c, []string{sshtesting.ValidKeyOne.Key, \"invalid-key\"})\n\tanotherKey := sshtesting.ValidKeyTwo.Key + \" anotheruser@host\"\n\terr := ssh.AddKeys(testSSHUser, anotherKey)\n\tc.Assert(err, gc.IsNil)\n\tactual, err := ssh.ReadAuthorisedKeys(testSSHUser)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{sshtesting.ValidKeyOne.Key, \"invalid-key\", anotherKey})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteKeys(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\tanotherKey := sshtesting.ValidKeyTwo.Key\n\tthirdKey := sshtesting.ValidKeyThree.Key + \" anotheruser@host\"\n\twriteAuthKeysFile(c, []string{firstKey, anotherKey, thirdKey})\n\terr := ssh.DeleteKeys(testSSHUser, \"user@host\", sshtesting.ValidKeyTwo.Fingerprint)\n\tc.Assert(err, gc.IsNil)\n\tactual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{thirdKey})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteKeysKeepsUnrecognised(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\twriteAuthKeysFile(c, []string{firstKey, sshtesting.ValidKeyTwo.Key, \"invalid-key\"})\n\terr := ssh.DeleteKeys(testSSHUser, \"user@host\")\n\tc.Assert(err, gc.IsNil)\n\tactual, err := ssh.ReadAuthorisedKeys(testSSHUser)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{\"invalid-key\", sshtesting.ValidKeyTwo.Key})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteNonExistentComment(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\twriteAuthKeysFile(c, []string{firstKey})\n\terr := ssh.DeleteKeys(testSSHUser, \"someone@host\")\n\tc.Assert(err, gc.ErrorMatches, \"cannot delete non existent key: someone@host\")\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteNonExistentFingerprint(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\twriteAuthKeysFile(c, []string{firstKey})\n\terr := ssh.DeleteKeys(testSSHUser, sshtesting.ValidKeyTwo.Fingerprint)\n\tc.Assert(err, gc.ErrorMatches, \"cannot delete non existent key: \"+sshtesting.ValidKeyTwo.Fingerprint)\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteLastKeyForbidden(c *gc.C) {\n\tkeys := []string{\n\t\tsshtesting.ValidKeyOne.Key + \" user@host\",\n\t\tsshtesting.ValidKeyTwo.Key + \" yetanotheruser@host\",\n\t}\n\twriteAuthKeysFile(c, keys)\n\terr := ssh.DeleteKeys(testSSHUser, \"user@host\", sshtesting.ValidKeyTwo.Fingerprint)\n\tc.Assert(err, gc.ErrorMatches, \"cannot delete all keys\")\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestReplaceKeys(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\tanotherKey := sshtesting.ValidKeyTwo.Key\n\twriteAuthKeysFile(c, []string{firstKey, anotherKey})\n\n\t\/\/ replaceKey is created without a comment so test that\n\t\/\/ ReplaceKeys handles keys without comments. This is\n\t\/\/ because existing keys may not have a comment and\n\t\/\/ ReplaceKeys is used to rewrite the entire authorized_keys\n\t\/\/ file when adding new keys.\n\treplaceKey := sshtesting.ValidKeyThree.Key\n\terr := ssh.ReplaceKeys(testSSHUser, replaceKey)\n\tc.Assert(err, gc.IsNil)\n\tactual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{replaceKey})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestReplaceKeepsUnrecognised(c *gc.C) {\n\twriteAuthKeysFile(c, []string{sshtesting.ValidKeyOne.Key, \"invalid-key\"})\n\tanotherKey := sshtesting.ValidKeyTwo.Key + \" anotheruser@host\"\n\terr := ssh.ReplaceKeys(testSSHUser, anotherKey)\n\tc.Assert(err, gc.IsNil)\n\tactual, err := ssh.ReadAuthorisedKeys(testSSHUser)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{\"invalid-key\", anotherKey})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestEnsureJujuComment(c *gc.C) {\n\tsshKey := sshtesting.ValidKeyOne.Key\n\tfor _, test := range []struct {\n\t\tkey string\n\t\texpected string\n\t}{\n\t\t{\"invalid-key\", \"invalid-key\"},\n\t\t{sshKey, sshKey + \" Juju:sshkey\"},\n\t\t{sshKey + \" user@host\", sshKey + \" Juju:user@host\"},\n\t\t{sshKey + \" Juju:user@host\", sshKey + \" Juju:user@host\"},\n\t\t{sshKey + \" \" + sshKey[3:5], sshKey + \" Juju:\" + sshKey[3:5]},\n\t} {\n\t\tactual := ssh.EnsureJujuComment(test.key)\n\t\tc.Assert(actual, gc.Equals, test.expected)\n\t}\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestSplitAuthorisedKeys(c *gc.C) {\n\tsshKey := sshtesting.ValidKeyOne.Key\n\tfor _, test := range []struct {\n\t\tkeyData string\n\t\texpected []string\n\t}{\n\t\t{\"\", nil},\n\t\t{sshKey, []string{sshKey}},\n\t\t{sshKey + \"\\n\", []string{sshKey}},\n\t\t{sshKey + \"\\n\\n\", []string{sshKey}},\n\t\t{sshKey + \"\\n#comment\\n\", []string{sshKey}},\n\t\t{sshKey + \"\\n #comment\\n\", []string{sshKey}},\n\t\t{sshKey + \"\\ninvalid\\n\", []string{sshKey, \"invalid\"}},\n\t} {\n\t\tactual := ssh.SplitAuthorisedKeys(test.keyData)\n\t\tc.Assert(actual, gc.DeepEquals, test.expected)\n\t}\n}\n\nfunc b64decode(c *gc.C, s string) []byte {\n\tb, err := base64.StdEncoding.DecodeString(s)\n\tc.Assert(err, gc.IsNil)\n\treturn b\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestParseAuthorisedKey(c *gc.C) {\n\tfor i, test := range []struct {\n\t\tline string\n\t\tkey []byte\n\t\tcomment string\n\t\terr string\n\t}{{\n\t\tline: sshtesting.ValidKeyOne.Key,\n\t\tkey: b64decode(c, strings.Fields(sshtesting.ValidKeyOne.Key)[1]),\n\t}, {\n\t\tline: sshtesting.ValidKeyOne.Key + \" a b c\",\n\t\tkey: b64decode(c, strings.Fields(sshtesting.ValidKeyOne.Key)[1]),\n\t\tcomment: \"a b c\",\n\t}, {\n\t\tline: \"ssh-xsa blah\",\n\t\terr: \"invalid authorized_key \\\"ssh-xsa blah\\\"\",\n\t}, {\n\t\t\/\/ options should be skipped\n\t\tline: `no-pty,principals=\"\\\"\",command=\"\\!\" ` + sshtesting.ValidKeyOne.Key,\n\t\tkey: b64decode(c, strings.Fields(sshtesting.ValidKeyOne.Key)[1]),\n\t}, {\n\t\tline: \"ssh-rsa\",\n\t\terr: \"invalid authorized_key \\\"ssh-rsa\\\"\",\n\t}} {\n\t\tc.Logf(\"test %d: %s\", i, test.line)\n\t\tak, err := ssh.ParseAuthorisedKey(test.line)\n\t\tif test.err != \"\" {\n\t\t\tc.Assert(err, gc.ErrorMatches, test.err)\n\t\t} else {\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t\tc.Assert(ak, gc.Not(gc.IsNil))\n\t\t\tc.Assert(ak.Key, gc.DeepEquals, test.key)\n\t\t\tc.Assert(ak.Comment, gc.Equals, test.comment)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package checkawscloudwatchlogs\n\nimport (\n\t\"bytes\"\n\t\"crypto\/md5\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/cloudwatchlogs\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/cloudwatchlogs\/cloudwatchlogsiface\"\n\t\"github.com\/jessevdk\/go-flags\"\n\n\t\"github.com\/mackerelio\/checkers\"\n\t\"github.com\/mackerelio\/golib\/pluginutil\"\n\t\"github.com\/natefinch\/atomic\"\n)\n\ntype logOpts struct {\n\tLogGroupName string `long:\"log-group-name\" required:\"true\" value-name:\"LOG-GROUP-NAME\" description:\"Log group name\" unquote:\"false\"`\n\tLogStreamNamePrefix string `long:\"log-stream-name-prefix\" value-name:\"LOG-STREAM-NAME-PREFIX\" description:\"Log stream name prefix\" unquote:\"false\"`\n\n\tPattern string `short:\"p\" long:\"pattern\" required:\"true\" value-name:\"PATTERN\" description:\"Pattern to search for. The value is recognized as the pattern syntax of CloudWatch Logs.\" unquote:\"false\"`\n\tWarningOver int `short:\"w\" long:\"warning-over\" value-name:\"WARNING\" description:\"Trigger a warning if matched lines is over a number\"`\n\tCriticalOver int `short:\"c\" long:\"critical-over\" value-name:\"CRITICAL\" description:\"Trigger a critical if matched lines is over a number\"`\n\tStateDir string `short:\"s\" long:\"state-dir\" value-name:\"DIR\" description:\"Dir to keep state files under\" unquote:\"false\"`\n\tReturnContent bool `short:\"r\" long:\"return\" description:\"Output matched lines\"`\n\tMaxRetries int `short:\"t\" long:\"max-retries\" value-name:\"MAX-RETRIES\" description:\"Maximum number of retries to call the AWS API\"`\n}\n\n\/\/ Do the plugin\nfunc Do() {\n\tckr := run(os.Args[1:])\n\tckr.Name = \"CloudWatch Logs\"\n\tckr.Exit()\n}\n\ntype awsCloudwatchLogsPlugin struct {\n\tService cloudwatchlogsiface.CloudWatchLogsAPI\n\tStateFile string\n\t*logOpts\n}\n\nfunc newCloudwatchLogsPlugin(opts *logOpts, args []string) (*awsCloudwatchLogsPlugin, error) {\n\tvar err error\n\tp := &awsCloudwatchLogsPlugin{logOpts: opts}\n\tp.Service, err = createService(opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif p.StateDir == \"\" {\n\t\tworkdir := pluginutil.PluginWorkDir()\n\t\tp.StateDir = filepath.Join(workdir, \"check-cloudwatch-logs\")\n\t}\n\tp.StateFile = getStateFile(p.StateDir, opts.LogGroupName, opts.LogStreamNamePrefix, args)\n\treturn p, nil\n}\n\nvar stateRe = regexp.MustCompile(`[^-a-zA-Z0-9_.]`)\n\nfunc getStateFile(stateDir, logGroupName, logStreamNamePrefix string, args []string) string {\n\treturn filepath.Join(\n\t\tstateDir,\n\t\tfmt.Sprintf(\n\t\t\t\"%s-%x.json\",\n\t\t\tstrings.TrimLeft(stateRe.ReplaceAllString(logGroupName+\"_\"+logStreamNamePrefix, \"_\"), \"_\"),\n\t\t\tmd5.Sum([]byte(\n\t\t\t\tstrings.Join(\n\t\t\t\t\t[]string{\n\t\t\t\t\t\tos.Getenv(\"AWS_PROFILE\"),\n\t\t\t\t\t\tos.Getenv(\"AWS_ACCESS_KEY_ID\"),\n\t\t\t\t\t\tos.Getenv(\"AWS_SECRET_ACCESS_KEY\"),\n\t\t\t\t\t\tos.Getenv(\"AWS_REGION\"),\n\t\t\t\t\t\tstrings.Join(args, \" \"),\n\t\t\t\t\t},\n\t\t\t\t\t\" \",\n\t\t\t\t)),\n\t\t\t),\n\t\t),\n\t)\n}\n\nfunc createAWSConfig(opts *logOpts) *aws.Config {\n\tconf := aws.NewConfig()\n\tif opts.MaxRetries > 0 {\n\t\treturn conf.WithMaxRetries(opts.MaxRetries)\n\t}\n\treturn conf\n}\n\nfunc createService(opts *logOpts) (*cloudwatchlogs.CloudWatchLogs, error) {\n\tsess, err := session.NewSession()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cloudwatchlogs.New(sess, createAWSConfig(opts)), nil\n}\n\ntype logState struct {\n\tNextToken *string\n\tStartTime *int64\n}\n\nfunc (p *awsCloudwatchLogsPlugin) collect(now time.Time) ([]string, error) {\n\tvar nextToken *string\n\tvar startTime *int64\n\tif s, err := p.loadState(); err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tif s.StartTime != nil && *s.StartTime > now.Add(-time.Hour).Unix()*1000 {\n\t\t\tnextToken = s.NextToken\n\t\t\tstartTime = s.StartTime\n\t\t}\n\t}\n\tif startTime == nil {\n\t\tstartTime = aws.Int64(now.Add(-1*time.Minute).Unix() * 1000)\n\t}\n\tvar messages []string\n\tinput := &cloudwatchlogs.FilterLogEventsInput{\n\t\tStartTime: startTime,\n\t\tLogGroupName: aws.String(p.LogGroupName),\n\t\tNextToken: nextToken,\n\t\tFilterPattern: aws.String(p.Pattern),\n\t}\n\tif p.LogStreamNamePrefix != \"\" {\n\t\tinput.LogStreamNamePrefix = aws.String(p.LogStreamNamePrefix)\n\t}\n\terr := p.Service.FilterLogEventsPages(input, func(output *cloudwatchlogs.FilterLogEventsOutput, lastPage bool) bool {\n\t\tfor _, event := range output.Events {\n\t\t\tmessages = append(messages, *event.Message)\n\t\t\tif startTime == nil || *startTime <= *event.Timestamp {\n\t\t\t\tstartTime = aws.Int64(*event.Timestamp + 1)\n\t\t\t}\n\t\t}\n\t\tnextToken = output.NextToken\n\t\ttime.Sleep(150 * time.Millisecond)\n\t\treturn true\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := p.saveState(&logState{nextToken, startTime}); err != nil {\n\t\treturn nil, err\n\t}\n\treturn messages, nil\n}\n\nfunc (p *awsCloudwatchLogsPlugin) loadState() (*logState, error) {\n\tf, err := os.Open(p.StateFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tvar s logState\n\terr = json.NewDecoder(f).Decode(&s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &s, nil\n}\n\nfunc (p *awsCloudwatchLogsPlugin) saveState(s *logState) error {\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(s); err != nil {\n\t\treturn err\n\t}\n\tif err := os.MkdirAll(filepath.Dir(p.StateFile), 0755); err != nil {\n\t\treturn err\n\t}\n\treturn atomic.WriteFile(p.StateFile, &buf)\n}\n\nfunc (p *awsCloudwatchLogsPlugin) check(messages []string) *checkers.Checker {\n\tstatus := checkers.OK\n\tmsg := fmt.Sprint(len(messages))\n\tif len(messages) > p.CriticalOver {\n\t\tstatus = checkers.CRITICAL\n\t\tmsg += \" > \" + fmt.Sprint(p.CriticalOver)\n\t} else if len(messages) > p.WarningOver {\n\t\tstatus = checkers.WARNING\n\t\tmsg += \" > \" + fmt.Sprint(p.WarningOver)\n\t}\n\tmsg += \" messages for pattern \/\" + p.Pattern + \"\/\"\n\tif status != checkers.OK && p.ReturnContent {\n\t\tmsg += \"\\n\" + strings.Join(messages, \"\")\n\t}\n\treturn checkers.NewChecker(status, msg)\n}\n\nfunc (p *awsCloudwatchLogsPlugin) run(now time.Time) *checkers.Checker {\n\tmessages, err := p.collect(now)\n\tif err != nil {\n\t\treturn checkers.Unknown(fmt.Sprint(err))\n\t}\n\treturn p.check(messages)\n}\n\nfunc run(args []string) *checkers.Checker {\n\topts := &logOpts{}\n\t_, err := flags.ParseArgs(opts, args)\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\tp, err := newCloudwatchLogsPlugin(opts, args)\n\tif err != nil {\n\t\treturn checkers.Unknown(fmt.Sprint(err))\n\t}\n\treturn p.run(time.Now())\n}\n<commit_msg>refactor collect<commit_after>package checkawscloudwatchlogs\n\nimport (\n\t\"bytes\"\n\t\"crypto\/md5\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/cloudwatchlogs\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/cloudwatchlogs\/cloudwatchlogsiface\"\n\t\"github.com\/jessevdk\/go-flags\"\n\n\t\"github.com\/mackerelio\/checkers\"\n\t\"github.com\/mackerelio\/golib\/pluginutil\"\n\t\"github.com\/natefinch\/atomic\"\n)\n\ntype logOpts struct {\n\tLogGroupName string `long:\"log-group-name\" required:\"true\" value-name:\"LOG-GROUP-NAME\" description:\"Log group name\" unquote:\"false\"`\n\tLogStreamNamePrefix string `long:\"log-stream-name-prefix\" value-name:\"LOG-STREAM-NAME-PREFIX\" description:\"Log stream name prefix\" unquote:\"false\"`\n\n\tPattern string `short:\"p\" long:\"pattern\" required:\"true\" value-name:\"PATTERN\" description:\"Pattern to search for. The value is recognized as the pattern syntax of CloudWatch Logs.\" unquote:\"false\"`\n\tWarningOver int `short:\"w\" long:\"warning-over\" value-name:\"WARNING\" description:\"Trigger a warning if matched lines is over a number\"`\n\tCriticalOver int `short:\"c\" long:\"critical-over\" value-name:\"CRITICAL\" description:\"Trigger a critical if matched lines is over a number\"`\n\tStateDir string `short:\"s\" long:\"state-dir\" value-name:\"DIR\" description:\"Dir to keep state files under\" unquote:\"false\"`\n\tReturnContent bool `short:\"r\" long:\"return\" description:\"Output matched lines\"`\n\tMaxRetries int `short:\"t\" long:\"max-retries\" value-name:\"MAX-RETRIES\" description:\"Maximum number of retries to call the AWS API\"`\n}\n\n\/\/ Do the plugin\nfunc Do() {\n\tckr := run(os.Args[1:])\n\tckr.Name = \"CloudWatch Logs\"\n\tckr.Exit()\n}\n\ntype awsCloudwatchLogsPlugin struct {\n\tService cloudwatchlogsiface.CloudWatchLogsAPI\n\tStateFile string\n\t*logOpts\n}\n\nfunc newCloudwatchLogsPlugin(opts *logOpts, args []string) (*awsCloudwatchLogsPlugin, error) {\n\tvar err error\n\tp := &awsCloudwatchLogsPlugin{logOpts: opts}\n\tp.Service, err = createService(opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif p.StateDir == \"\" {\n\t\tworkdir := pluginutil.PluginWorkDir()\n\t\tp.StateDir = filepath.Join(workdir, \"check-cloudwatch-logs\")\n\t}\n\tp.StateFile = getStateFile(p.StateDir, opts.LogGroupName, opts.LogStreamNamePrefix, args)\n\treturn p, nil\n}\n\nvar stateRe = regexp.MustCompile(`[^-a-zA-Z0-9_.]`)\n\nfunc getStateFile(stateDir, logGroupName, logStreamNamePrefix string, args []string) string {\n\treturn filepath.Join(\n\t\tstateDir,\n\t\tfmt.Sprintf(\n\t\t\t\"%s-%x.json\",\n\t\t\tstrings.TrimLeft(stateRe.ReplaceAllString(logGroupName+\"_\"+logStreamNamePrefix, \"_\"), \"_\"),\n\t\t\tmd5.Sum([]byte(\n\t\t\t\tstrings.Join(\n\t\t\t\t\t[]string{\n\t\t\t\t\t\tos.Getenv(\"AWS_PROFILE\"),\n\t\t\t\t\t\tos.Getenv(\"AWS_ACCESS_KEY_ID\"),\n\t\t\t\t\t\tos.Getenv(\"AWS_SECRET_ACCESS_KEY\"),\n\t\t\t\t\t\tos.Getenv(\"AWS_REGION\"),\n\t\t\t\t\t\tstrings.Join(args, \" \"),\n\t\t\t\t\t},\n\t\t\t\t\t\" \",\n\t\t\t\t)),\n\t\t\t),\n\t\t),\n\t)\n}\n\nfunc createAWSConfig(opts *logOpts) *aws.Config {\n\tconf := aws.NewConfig()\n\tif opts.MaxRetries > 0 {\n\t\treturn conf.WithMaxRetries(opts.MaxRetries)\n\t}\n\treturn conf\n}\n\nfunc createService(opts *logOpts) (*cloudwatchlogs.CloudWatchLogs, error) {\n\tsess, err := session.NewSession()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cloudwatchlogs.New(sess, createAWSConfig(opts)), nil\n}\n\ntype logState struct {\n\tNextToken *string\n\tStartTime *int64\n}\n\nfunc (p *awsCloudwatchLogsPlugin) collect(now time.Time) ([]string, error) {\n\ts, err := p.loadState()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif s.StartTime == nil || *s.StartTime <= now.Add(-time.Hour).Unix()*1000 {\n\t\ts.StartTime = aws.Int64(now.Add(-1*time.Minute).Unix() * 1000)\n\t}\n\tvar messages []string\n\tinput := &cloudwatchlogs.FilterLogEventsInput{\n\t\tStartTime: s.StartTime,\n\t\tLogGroupName: aws.String(p.LogGroupName),\n\t\tNextToken: s.NextToken,\n\t\tFilterPattern: aws.String(p.Pattern),\n\t}\n\tif p.LogStreamNamePrefix != \"\" {\n\t\tinput.LogStreamNamePrefix = aws.String(p.LogStreamNamePrefix)\n\t}\n\terr = p.Service.FilterLogEventsPages(input, func(output *cloudwatchlogs.FilterLogEventsOutput, lastPage bool) bool {\n\t\tfor _, event := range output.Events {\n\t\t\tmessages = append(messages, *event.Message)\n\t\t\tif s.StartTime == nil || *s.StartTime <= *event.Timestamp {\n\t\t\t\ts.StartTime = aws.Int64(*event.Timestamp + 1)\n\t\t\t}\n\t\t}\n\t\ts.NextToken = output.NextToken\n\t\ttime.Sleep(150 * time.Millisecond)\n\t\treturn true\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = p.saveState(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn messages, nil\n}\n\nfunc (p *awsCloudwatchLogsPlugin) loadState() (*logState, error) {\n\tvar s logState\n\tf, err := os.Open(p.StateFile)\n\tdefer f.Close()\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn &s, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\terr = json.NewDecoder(f).Decode(&s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &s, nil\n}\n\nfunc (p *awsCloudwatchLogsPlugin) saveState(s *logState) error {\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(s); err != nil {\n\t\treturn err\n\t}\n\tif err := os.MkdirAll(filepath.Dir(p.StateFile), 0755); err != nil {\n\t\treturn err\n\t}\n\treturn atomic.WriteFile(p.StateFile, &buf)\n}\n\nfunc (p *awsCloudwatchLogsPlugin) check(messages []string) *checkers.Checker {\n\tstatus := checkers.OK\n\tmsg := fmt.Sprint(len(messages))\n\tif len(messages) > p.CriticalOver {\n\t\tstatus = checkers.CRITICAL\n\t\tmsg += \" > \" + fmt.Sprint(p.CriticalOver)\n\t} else if len(messages) > p.WarningOver {\n\t\tstatus = checkers.WARNING\n\t\tmsg += \" > \" + fmt.Sprint(p.WarningOver)\n\t}\n\tmsg += \" messages for pattern \/\" + p.Pattern + \"\/\"\n\tif status != checkers.OK && p.ReturnContent {\n\t\tmsg += \"\\n\" + strings.Join(messages, \"\")\n\t}\n\treturn checkers.NewChecker(status, msg)\n}\n\nfunc (p *awsCloudwatchLogsPlugin) run(now time.Time) *checkers.Checker {\n\tmessages, err := p.collect(now)\n\tif err != nil {\n\t\treturn checkers.Unknown(fmt.Sprint(err))\n\t}\n\treturn p.check(messages)\n}\n\nfunc run(args []string) *checkers.Checker {\n\topts := &logOpts{}\n\t_, err := flags.ParseArgs(opts, args)\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\tp, err := newCloudwatchLogsPlugin(opts, args)\n\tif err != nil {\n\t\treturn checkers.Unknown(fmt.Sprint(err))\n\t}\n\treturn p.run(time.Now())\n}\n<|endoftext|>"} {"text":"<commit_before>package pixy\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/mailgun\/kafka-pixy\/Godeps\/_workspace\/src\/github.com\/Shopify\/sarama\"\n\t. \"github.com\/mailgun\/kafka-pixy\/Godeps\/_workspace\/src\/gopkg.in\/check.v1\"\n)\n\nconst (\n\ttestSocket = \"kafka-pixy.sock\"\n)\n\ntype ServiceSuite struct {\n\tserviceCfg *ServiceCfg\n\ttkc *TestKafkaClient\n\tunixClient *http.Client\n\ttcpClient *http.Client\n}\n\nvar _ = Suite(&ServiceSuite{})\n\nfunc (s *ServiceSuite) SetUpSuite(c *C) {\n\tInitTestLog()\n}\n\nfunc (s *ServiceSuite) SetUpTest(c *C) {\n\ts.serviceCfg = &ServiceCfg{\n\t\tUnixAddr: path.Join(os.TempDir(), testSocket),\n\t\tBrokerAddrs: testKafkaPeers,\n\t}\n\tos.Remove(s.serviceCfg.UnixAddr)\n\n\ts.tkc = NewTestKafkaClient(s.serviceCfg.BrokerAddrs)\n\ts.unixClient = NewUDSHTTPClient(s.serviceCfg.UnixAddr)\n\ts.tcpClient = &http.Client{}\n}\n\nfunc (s *ServiceSuite) TestStartAndStop(c *C) {\n\tsvc, err := SpawnService(s.serviceCfg)\n\tc.Assert(err, IsNil)\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n}\n\nfunc (s *ServiceSuite) TestInvalidUnixAddr(c *C) {\n\t\/\/ Given\n\ts.serviceCfg.UnixAddr = \"\/tmp\"\n\t\/\/ When\n\tsvc, err := SpawnService(s.serviceCfg)\n\t\/\/ Then\n\tc.Assert(err.Error(), Equals,\n\t\t\"failed to start Unix socket based HTTP API, cause=(failed to create listener, cause=(listen unix \/tmp: bind: address already in use))\")\n\tc.Assert(svc, IsNil)\n}\n\nfunc (s *ServiceSuite) TestInvalidBrokers(c *C) {\n\t\/\/ Given\n\ts.serviceCfg.BrokerAddrs = []string{\"localhost:12345\"}\n\t\/\/ When\n\tsvc, err := SpawnService(s.serviceCfg)\n\t\/\/ Then\n\tc.Assert(err.Error(), Equals,\n\t\t\"failed to spawn Kafka client, cause=(failed to create sarama client, cause=(kafka: Client has run out of available brokers to talk to. Is your cluster reachable?))\")\n\tc.Assert(svc, IsNil)\n}\n\n\/\/ If `key` is not `nil` then produced messages are deterministically\n\/\/ distributed between partitions based on the `key` hash.\nfunc (s *ServiceSuite) TestProduce(c *C) {\n\t\/\/ Given\n\tsvc, _ := SpawnService(s.serviceCfg)\n\toffsetsBefore := s.tkc.getOffsets(\"test.4\")\n\t\/\/ When\n\tfor i := 0; i < 10; i++ {\n\t\ts.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=1\",\n\t\t\t\"text\/plain\", strings.NewReader(strconv.Itoa(i)))\n\t\ts.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=2\",\n\t\t\t\"text\/plain\", strings.NewReader(strconv.Itoa(i)))\n\t\ts.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=3\",\n\t\t\t\"text\/plain\", strings.NewReader(strconv.Itoa(i)))\n\t\ts.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=4\",\n\t\t\t\"text\/plain\", strings.NewReader(strconv.Itoa(i)))\n\t\ts.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=5\",\n\t\t\t\"text\/plain\", strings.NewReader(strconv.Itoa(i)))\n\t}\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\toffsetsAfter := s.tkc.getOffsets(\"test.4\")\n\t\/\/ Then\n\tc.Assert(offsetsAfter[0], Equals, offsetsBefore[0]+20)\n\tc.Assert(offsetsAfter[1], Equals, offsetsBefore[1]+10)\n\tc.Assert(offsetsAfter[2], Equals, offsetsBefore[2]+10)\n\tc.Assert(offsetsAfter[3], Equals, offsetsBefore[3]+10)\n}\n\n\/\/ If `key` of a produced message is `nil` then it is submitted to a random\n\/\/ partition. Therefore a batch of such messages is evenly distributed among\n\/\/ all available partitions.\nfunc (s *ServiceSuite) TestProduceNilKey(c *C) {\n\t\/\/ Given\n\tsvc, _ := SpawnService(s.serviceCfg)\n\toffsetsBefore := s.tkc.getOffsets(\"test.4\")\n\t\/\/ When\n\tfor i := 0; i < 100; i++ {\n\t\ts.unixClient.Post(\"http:\/\/_\/topics\/test.4\",\n\t\t\t\"text\/plain\", strings.NewReader(strconv.Itoa(i)))\n\t}\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\toffsetsAfter := s.tkc.getOffsets(\"test.4\")\n\t\/\/ Then\n\tdelta0 := offsetsAfter[0] - offsetsBefore[0]\n\tdelta1 := offsetsAfter[1] - offsetsBefore[1]\n\timbalance := int(math.Abs(float64(delta1 - delta0)))\n\tif imbalance > 20 {\n\t\tpanic(fmt.Errorf(\"Too high imbalance: %v != %v\", delta0, delta1))\n\t}\n}\n\n\/\/ If `key` of a produced message is empty then it is deterministically\n\/\/ submitted to a particular partition determined by the empty key hash.\nfunc (s *ServiceSuite) TestProduceEmptyKey(c *C) {\n\tsvc, _ := SpawnService(s.serviceCfg)\n\toffsetsBefore := s.tkc.getOffsets(\"test.4\")\n\t\/\/ When\n\tfor i := 0; i < 10; i++ {\n\t\ts.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=\",\n\t\t\t\"text\/plain\", strings.NewReader(strconv.Itoa(i)))\n\t}\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\toffsetsAfter := s.tkc.getOffsets(\"test.4\")\n\t\/\/ Then\n\tc.Assert(offsetsAfter[0], Equals, offsetsBefore[0])\n\tc.Assert(offsetsAfter[1], Equals, offsetsBefore[1])\n\tc.Assert(offsetsAfter[2], Equals, offsetsBefore[2])\n\tc.Assert(offsetsAfter[3], Equals, offsetsBefore[3]+10)\n}\n\n\/\/ Utf8 messages are submitted without a problem.\nfunc (s *ServiceSuite) TestUtf8Message(c *C) {\n\tsvc, _ := SpawnService(s.serviceCfg)\n\toffsetsBefore := s.tkc.getOffsets(\"test.4\")\n\t\/\/ When\n\ts.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=foo\",\n\t\t\"text\/plain\", strings.NewReader(\"Превед Медвед\"))\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\t\/\/ Then\n\toffsetsAfter := s.tkc.getOffsets(\"test.4\")\n\tmsgs := s.tkc.getMessages(\"test.4\", offsetsBefore, offsetsAfter)\n\tc.Assert(msgs, DeepEquals,\n\t\t[][]string{[]string(nil), []string{\"Превед Медвед\"}, []string(nil), []string(nil)})\n}\n\n\/\/ TCP API is not started by default.\nfunc (s *ServiceSuite) TestTCPDoesNotWork(c *C) {\n\tsvc, _ := SpawnService(s.serviceCfg)\n\t\/\/ When\n\tr, err := s.tcpClient.Post(\"http:\/\/localhost:55501\/topics\/test.4?key=foo\",\n\t\t\"text\/plain\", strings.NewReader(\"Hello Kitty\"))\n\t\/\/ Then\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\tc.Assert(err.Error(), Equals,\n\t\t\"Post http:\/\/localhost:55501\/topics\/test.4?key=foo: dial tcp 127.0.0.1:55501: connection refused\")\n\tc.Assert(r, IsNil)\n}\n\n\/\/ API is served on a TCP socket if it is explicitly configured.\nfunc (s *ServiceSuite) TestBothAPI(c *C) {\n\toffsetsBefore := s.tkc.getOffsets(\"test.4\")\n\ts.serviceCfg.TCPAddr = \"127.0.0.1:55502\"\n\tsvc, _ := SpawnService(s.serviceCfg)\n\t\/\/ When\n\t_, err1 := s.tcpClient.Post(\"http:\/\/localhost:55502\/topics\/test.4?key=foo\",\n\t\t\"text\/plain\", strings.NewReader(\"Превед\"))\n\t_, err2 := s.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=foo\",\n\t\t\"text\/plain\", strings.NewReader(\"Kitty\"))\n\t\/\/ Then\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\tc.Assert(err1, IsNil)\n\tc.Assert(err2, IsNil)\n\toffsetsAfter := s.tkc.getOffsets(\"test.4\")\n\tmsgs := s.tkc.getMessages(\"test.4\", offsetsBefore, offsetsAfter)\n\tc.Assert(msgs, DeepEquals,\n\t\t[][]string{[]string(nil), []string{\"Превед\", \"Kitty\"}, []string(nil), []string(nil)})\n}\n\nfunc (s *ServiceSuite) TestStoppedServerCall(c *C) {\n\tsvc, _ := SpawnService(s.serviceCfg)\n\t_, err := s.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=foo\",\n\t\t\"text\/plain\", strings.NewReader(\"Hello\"))\n\tc.Assert(err, IsNil)\n\t\/\/ When\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\t\/\/ Then\n\tr, err := s.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=foo\",\n\t\t\"text\/plain\", strings.NewReader(\"Kitty\"))\n\tc.Assert(err.Error(), Equals, \"Post http:\/\/_\/topics\/test.4?key=foo: EOF\")\n\tc.Assert(r, IsNil)\n}\n\n\/\/ If the TCP API Server crashes then the service terminates gracefully.\nfunc (s *ServiceSuite) TestTCPServerCrash(c *C) {\n\ts.serviceCfg.TCPAddr = \"127.0.0.1:55502\"\n\tsvc, _ := SpawnService(s.serviceCfg)\n\t\/\/ When\n\tsvc.tcpServer.errorCh <- fmt.Errorf(\"Kaboom!\")\n\t\/\/ Then\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n}\n\n\/\/ Messages that have maximum possible size indeed go through. Note that we\n\/\/ assume that the broker's limit is the same as the producer's one or higher.\nfunc (s *ServiceSuite) TestLargestMessage(c *C) {\n\toffsetsBefore := s.tkc.getOffsets(\"test.4\")\n\tmaxMsgSize := sarama.NewConfig().Producer.MaxMessageBytes - ProdMsgMetadataSize([]byte(\"foo\"))\n\tmsg := GenMessage(maxMsgSize)\n\ts.serviceCfg.TCPAddr = \"127.0.0.1:55503\"\n\tsvc, _ := SpawnService(s.serviceCfg)\n\t\/\/ When\n\tr := PostChunked(s.tcpClient, \"http:\/\/127.0.0.1:55503\/topics\/test.4?key=foo\", msg)\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\t\/\/ Then\n\tAssertHTTPResp(c, r, http.StatusOK, \"\")\n\toffsetsAfter := s.tkc.getOffsets(\"test.4\")\n\tmessages := s.tkc.getMessages(\"test.4\", offsetsBefore, offsetsAfter)\n\treadMsg := messages[1][0]\n\tc.Assert(readMsg, Equals, msg)\n}\n\n\/\/ Messages that are larger then producer's MaxMessageBytes size are silently\n\/\/ dropped. Note that we assume that the broker's limit is the same as the\n\/\/ producer's one or higher.\nfunc (s *ServiceSuite) TestMessageTooLarge(c *C) {\n\toffsetsBefore := s.tkc.getOffsets(\"test.4\")\n\tmaxMsgSize := sarama.NewConfig().Producer.MaxMessageBytes - ProdMsgMetadataSize([]byte(\"foo\")) + 1\n\tmsg := GenMessage(maxMsgSize)\n\ts.serviceCfg.TCPAddr = \"127.0.0.1:55504\"\n\tsvc, _ := SpawnService(s.serviceCfg)\n\t\/\/ When\n\tr := PostChunked(s.tcpClient, \"http:\/\/127.0.0.1:55504\/topics\/test.4?key=foo\", msg)\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\t\/\/ Then\n\tAssertHTTPResp(c, r, http.StatusOK, \"\")\n\toffsetsAfter := s.tkc.getOffsets(\"test.4\")\n\tc.Assert(offsetsAfter, DeepEquals, offsetsBefore)\n}\n\nfunc (s *ServiceSuite) TestSyncProduce(c *C) {\n\t\/\/ Given\n\tsvc, _ := SpawnService(s.serviceCfg)\n\toffsetsBefore := s.tkc.getOffsets(\"test.4\")\n\t\/\/ When\n\tr, err := s.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=1&sync\",\n\t\t\"text\/plain\", strings.NewReader(\"Foo\"))\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\toffsetsAfter := s.tkc.getOffsets(\"test.4\")\n\t\/\/ Then\n\tc.Assert(err, IsNil)\n\tAssertHTTPResp(c, r, http.StatusOK, \"\")\n\tc.Assert(offsetsAfter[0], Equals, offsetsBefore[0]+1)\n}\n\nfunc (s *ServiceSuite) TestSyncProduceInvalidTopic(c *C) {\n\t\/\/ Given\n\tsvc, _ := SpawnService(s.serviceCfg)\n\t\/\/ When\n\tr, err := s.unixClient.Post(\"http:\/\/_\/topics\/no-such-topic?sync=true\",\n\t\t\"text\/plain\", strings.NewReader(\"Foo\"))\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\t\/\/ Then\n\tc.Assert(err, IsNil)\n\tAssertHTTPResp(c, r, http.StatusNotFound,\n\t\tfmt.Sprintf(\"%s\\n\", sarama.ErrUnknownTopicOrPartition.Error()))\n}\n\n\/\/ If the Unix API Server crashes then the service terminates gracefully.\nfunc (s *ServiceSuite) TestUnixServerCrash(c *C) {\n\tsvc, _ := SpawnService(s.serviceCfg)\n\t\/\/ When\n\tsvc.unixServer.errorCh <- fmt.Errorf(\"Kaboom!\")\n\t\/\/ Then\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n}\n<commit_msg>Fix unit tests failing under golang tip<commit_after>package pixy\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/mailgun\/kafka-pixy\/Godeps\/_workspace\/src\/github.com\/Shopify\/sarama\"\n\t. \"github.com\/mailgun\/kafka-pixy\/Godeps\/_workspace\/src\/gopkg.in\/check.v1\"\n)\n\nconst (\n\ttestSocket = \"kafka-pixy.sock\"\n)\n\ntype ServiceSuite struct {\n\tserviceCfg *ServiceCfg\n\ttkc *TestKafkaClient\n\tunixClient *http.Client\n\ttcpClient *http.Client\n}\n\nvar _ = Suite(&ServiceSuite{})\n\nfunc (s *ServiceSuite) SetUpSuite(c *C) {\n\tInitTestLog()\n}\n\nfunc (s *ServiceSuite) SetUpTest(c *C) {\n\ts.serviceCfg = &ServiceCfg{\n\t\tUnixAddr: path.Join(os.TempDir(), testSocket),\n\t\tBrokerAddrs: testKafkaPeers,\n\t}\n\tos.Remove(s.serviceCfg.UnixAddr)\n\n\ts.tkc = NewTestKafkaClient(s.serviceCfg.BrokerAddrs)\n\ts.unixClient = NewUDSHTTPClient(s.serviceCfg.UnixAddr)\n\ts.tcpClient = &http.Client{}\n}\n\nfunc (s *ServiceSuite) TestStartAndStop(c *C) {\n\tsvc, err := SpawnService(s.serviceCfg)\n\tc.Assert(err, IsNil)\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n}\n\nfunc (s *ServiceSuite) TestInvalidUnixAddr(c *C) {\n\t\/\/ Given\n\ts.serviceCfg.UnixAddr = \"\/tmp\"\n\t\/\/ When\n\tsvc, err := SpawnService(s.serviceCfg)\n\t\/\/ Then\n\tc.Assert(err.Error(), Equals,\n\t\t\"failed to start Unix socket based HTTP API, cause=(failed to create listener, cause=(listen unix \/tmp: bind: address already in use))\")\n\tc.Assert(svc, IsNil)\n}\n\nfunc (s *ServiceSuite) TestInvalidBrokers(c *C) {\n\t\/\/ Given\n\ts.serviceCfg.BrokerAddrs = []string{\"localhost:12345\"}\n\t\/\/ When\n\tsvc, err := SpawnService(s.serviceCfg)\n\t\/\/ Then\n\tc.Assert(err.Error(), Equals,\n\t\t\"failed to spawn Kafka client, cause=(failed to create sarama client, cause=(kafka: Client has run out of available brokers to talk to. Is your cluster reachable?))\")\n\tc.Assert(svc, IsNil)\n}\n\n\/\/ If `key` is not `nil` then produced messages are deterministically\n\/\/ distributed between partitions based on the `key` hash.\nfunc (s *ServiceSuite) TestProduce(c *C) {\n\t\/\/ Given\n\tsvc, _ := SpawnService(s.serviceCfg)\n\toffsetsBefore := s.tkc.getOffsets(\"test.4\")\n\t\/\/ When\n\tfor i := 0; i < 10; i++ {\n\t\ts.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=1\",\n\t\t\t\"text\/plain\", strings.NewReader(strconv.Itoa(i)))\n\t\ts.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=2\",\n\t\t\t\"text\/plain\", strings.NewReader(strconv.Itoa(i)))\n\t\ts.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=3\",\n\t\t\t\"text\/plain\", strings.NewReader(strconv.Itoa(i)))\n\t\ts.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=4\",\n\t\t\t\"text\/plain\", strings.NewReader(strconv.Itoa(i)))\n\t\ts.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=5\",\n\t\t\t\"text\/plain\", strings.NewReader(strconv.Itoa(i)))\n\t}\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\toffsetsAfter := s.tkc.getOffsets(\"test.4\")\n\t\/\/ Then\n\tc.Assert(offsetsAfter[0], Equals, offsetsBefore[0]+20)\n\tc.Assert(offsetsAfter[1], Equals, offsetsBefore[1]+10)\n\tc.Assert(offsetsAfter[2], Equals, offsetsBefore[2]+10)\n\tc.Assert(offsetsAfter[3], Equals, offsetsBefore[3]+10)\n}\n\n\/\/ If `key` of a produced message is `nil` then it is submitted to a random\n\/\/ partition. Therefore a batch of such messages is evenly distributed among\n\/\/ all available partitions.\nfunc (s *ServiceSuite) TestProduceNilKey(c *C) {\n\t\/\/ Given\n\tsvc, _ := SpawnService(s.serviceCfg)\n\toffsetsBefore := s.tkc.getOffsets(\"test.4\")\n\t\/\/ When\n\tfor i := 0; i < 100; i++ {\n\t\ts.unixClient.Post(\"http:\/\/_\/topics\/test.4\",\n\t\t\t\"text\/plain\", strings.NewReader(strconv.Itoa(i)))\n\t}\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\toffsetsAfter := s.tkc.getOffsets(\"test.4\")\n\t\/\/ Then\n\tdelta0 := offsetsAfter[0] - offsetsBefore[0]\n\tdelta1 := offsetsAfter[1] - offsetsBefore[1]\n\timbalance := int(math.Abs(float64(delta1 - delta0)))\n\tif imbalance > 20 {\n\t\tpanic(fmt.Errorf(\"Too high imbalance: %v != %v\", delta0, delta1))\n\t}\n}\n\n\/\/ If `key` of a produced message is empty then it is deterministically\n\/\/ submitted to a particular partition determined by the empty key hash.\nfunc (s *ServiceSuite) TestProduceEmptyKey(c *C) {\n\tsvc, _ := SpawnService(s.serviceCfg)\n\toffsetsBefore := s.tkc.getOffsets(\"test.4\")\n\t\/\/ When\n\tfor i := 0; i < 10; i++ {\n\t\ts.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=\",\n\t\t\t\"text\/plain\", strings.NewReader(strconv.Itoa(i)))\n\t}\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\toffsetsAfter := s.tkc.getOffsets(\"test.4\")\n\t\/\/ Then\n\tc.Assert(offsetsAfter[0], Equals, offsetsBefore[0])\n\tc.Assert(offsetsAfter[1], Equals, offsetsBefore[1])\n\tc.Assert(offsetsAfter[2], Equals, offsetsBefore[2])\n\tc.Assert(offsetsAfter[3], Equals, offsetsBefore[3]+10)\n}\n\n\/\/ Utf8 messages are submitted without a problem.\nfunc (s *ServiceSuite) TestUtf8Message(c *C) {\n\tsvc, _ := SpawnService(s.serviceCfg)\n\toffsetsBefore := s.tkc.getOffsets(\"test.4\")\n\t\/\/ When\n\ts.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=foo\",\n\t\t\"text\/plain\", strings.NewReader(\"Превед Медвед\"))\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\t\/\/ Then\n\toffsetsAfter := s.tkc.getOffsets(\"test.4\")\n\tmsgs := s.tkc.getMessages(\"test.4\", offsetsBefore, offsetsAfter)\n\tc.Assert(msgs, DeepEquals,\n\t\t[][]string{[]string(nil), []string{\"Превед Медвед\"}, []string(nil), []string(nil)})\n}\n\n\/\/ TCP API is not started by default.\nfunc (s *ServiceSuite) TestTCPDoesNotWork(c *C) {\n\tsvc, _ := SpawnService(s.serviceCfg)\n\t\/\/ When\n\tr, err := s.tcpClient.Post(\"http:\/\/localhost:55501\/topics\/test.4?key=foo\",\n\t\t\"text\/plain\", strings.NewReader(\"Hello Kitty\"))\n\t\/\/ Then\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\tc.Assert(err.Error(), Matches,\n\t\t\"Post http:\/\/localhost:55501\/topics\/test.4\\\\?key=foo: .* connection refused\")\n\tc.Assert(r, IsNil)\n}\n\n\/\/ API is served on a TCP socket if it is explicitly configured.\nfunc (s *ServiceSuite) TestBothAPI(c *C) {\n\toffsetsBefore := s.tkc.getOffsets(\"test.4\")\n\ts.serviceCfg.TCPAddr = \"127.0.0.1:55502\"\n\tsvc, _ := SpawnService(s.serviceCfg)\n\t\/\/ When\n\t_, err1 := s.tcpClient.Post(\"http:\/\/localhost:55502\/topics\/test.4?key=foo\",\n\t\t\"text\/plain\", strings.NewReader(\"Превед\"))\n\t_, err2 := s.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=foo\",\n\t\t\"text\/plain\", strings.NewReader(\"Kitty\"))\n\t\/\/ Then\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\tc.Assert(err1, IsNil)\n\tc.Assert(err2, IsNil)\n\toffsetsAfter := s.tkc.getOffsets(\"test.4\")\n\tmsgs := s.tkc.getMessages(\"test.4\", offsetsBefore, offsetsAfter)\n\tc.Assert(msgs, DeepEquals,\n\t\t[][]string{[]string(nil), []string{\"Превед\", \"Kitty\"}, []string(nil), []string(nil)})\n}\n\nfunc (s *ServiceSuite) TestStoppedServerCall(c *C) {\n\tsvc, _ := SpawnService(s.serviceCfg)\n\t_, err := s.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=foo\",\n\t\t\"text\/plain\", strings.NewReader(\"Hello\"))\n\tc.Assert(err, IsNil)\n\t\/\/ When\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\t\/\/ Then\n\tr, err := s.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=foo\",\n\t\t\"text\/plain\", strings.NewReader(\"Kitty\"))\n\tc.Assert(err.Error(), Equals, \"Post http:\/\/_\/topics\/test.4?key=foo: EOF\")\n\tc.Assert(r, IsNil)\n}\n\n\/\/ If the TCP API Server crashes then the service terminates gracefully.\nfunc (s *ServiceSuite) TestTCPServerCrash(c *C) {\n\ts.serviceCfg.TCPAddr = \"127.0.0.1:55502\"\n\tsvc, _ := SpawnService(s.serviceCfg)\n\t\/\/ When\n\tsvc.tcpServer.errorCh <- fmt.Errorf(\"Kaboom!\")\n\t\/\/ Then\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n}\n\n\/\/ Messages that have maximum possible size indeed go through. Note that we\n\/\/ assume that the broker's limit is the same as the producer's one or higher.\nfunc (s *ServiceSuite) TestLargestMessage(c *C) {\n\toffsetsBefore := s.tkc.getOffsets(\"test.4\")\n\tmaxMsgSize := sarama.NewConfig().Producer.MaxMessageBytes - ProdMsgMetadataSize([]byte(\"foo\"))\n\tmsg := GenMessage(maxMsgSize)\n\ts.serviceCfg.TCPAddr = \"127.0.0.1:55503\"\n\tsvc, _ := SpawnService(s.serviceCfg)\n\t\/\/ When\n\tr := PostChunked(s.tcpClient, \"http:\/\/127.0.0.1:55503\/topics\/test.4?key=foo\", msg)\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\t\/\/ Then\n\tAssertHTTPResp(c, r, http.StatusOK, \"\")\n\toffsetsAfter := s.tkc.getOffsets(\"test.4\")\n\tmessages := s.tkc.getMessages(\"test.4\", offsetsBefore, offsetsAfter)\n\treadMsg := messages[1][0]\n\tc.Assert(readMsg, Equals, msg)\n}\n\n\/\/ Messages that are larger then producer's MaxMessageBytes size are silently\n\/\/ dropped. Note that we assume that the broker's limit is the same as the\n\/\/ producer's one or higher.\nfunc (s *ServiceSuite) TestMessageTooLarge(c *C) {\n\toffsetsBefore := s.tkc.getOffsets(\"test.4\")\n\tmaxMsgSize := sarama.NewConfig().Producer.MaxMessageBytes - ProdMsgMetadataSize([]byte(\"foo\")) + 1\n\tmsg := GenMessage(maxMsgSize)\n\ts.serviceCfg.TCPAddr = \"127.0.0.1:55504\"\n\tsvc, _ := SpawnService(s.serviceCfg)\n\t\/\/ When\n\tr := PostChunked(s.tcpClient, \"http:\/\/127.0.0.1:55504\/topics\/test.4?key=foo\", msg)\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\t\/\/ Then\n\tAssertHTTPResp(c, r, http.StatusOK, \"\")\n\toffsetsAfter := s.tkc.getOffsets(\"test.4\")\n\tc.Assert(offsetsAfter, DeepEquals, offsetsBefore)\n}\n\nfunc (s *ServiceSuite) TestSyncProduce(c *C) {\n\t\/\/ Given\n\tsvc, _ := SpawnService(s.serviceCfg)\n\toffsetsBefore := s.tkc.getOffsets(\"test.4\")\n\t\/\/ When\n\tr, err := s.unixClient.Post(\"http:\/\/_\/topics\/test.4?key=1&sync\",\n\t\t\"text\/plain\", strings.NewReader(\"Foo\"))\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\toffsetsAfter := s.tkc.getOffsets(\"test.4\")\n\t\/\/ Then\n\tc.Assert(err, IsNil)\n\tAssertHTTPResp(c, r, http.StatusOK, \"\")\n\tc.Assert(offsetsAfter[0], Equals, offsetsBefore[0]+1)\n}\n\nfunc (s *ServiceSuite) TestSyncProduceInvalidTopic(c *C) {\n\t\/\/ Given\n\tsvc, _ := SpawnService(s.serviceCfg)\n\t\/\/ When\n\tr, err := s.unixClient.Post(\"http:\/\/_\/topics\/no-such-topic?sync=true\",\n\t\t\"text\/plain\", strings.NewReader(\"Foo\"))\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n\t\/\/ Then\n\tc.Assert(err, IsNil)\n\tAssertHTTPResp(c, r, http.StatusNotFound,\n\t\tfmt.Sprintf(\"%s\\n\", sarama.ErrUnknownTopicOrPartition.Error()))\n}\n\n\/\/ If the Unix API Server crashes then the service terminates gracefully.\nfunc (s *ServiceSuite) TestUnixServerCrash(c *C) {\n\tsvc, _ := SpawnService(s.serviceCfg)\n\t\/\/ When\n\tsvc.unixServer.errorCh <- fmt.Errorf(\"Kaboom!\")\n\t\/\/ Then\n\tsvc.Stop()\n\tsvc.Wait4Stop()\n}\n<|endoftext|>"} {"text":"<commit_before>package image\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"strings\"\n\n\t\"github.com\/containerum\/chkit\/pkg\/cli\/prerun\"\n\t\"github.com\/containerum\/chkit\/pkg\/context\"\n\t\"github.com\/containerum\/chkit\/pkg\/util\/activekit\"\n\t\"github.com\/containerum\/chkit\/pkg\/util\/validation\"\n\t\"github.com\/containerum\/kube-client\/pkg\/model\"\n\t\"github.com\/go-siris\/siris\/core\/errors\"\n\t\"github.com\/octago\/sflags\/gen\/gpflag\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar setAliases = []string{\"imgs\", \"img\", \"im\", \"images\"}\n\nfunc Set(ctx *context.Context) *cobra.Command {\n\tvar flags struct {\n\t\tForce bool `desc:\"suppress confirmation\" flag:\"force f\"`\n\t\tDeployment string `desc:\"deployment name\"`\n\t\tContainer string `desc:\"container name\"`\n\t\tImage string `desc:\"new image\"`\n\t}\n\tvar buildImage = func() (deployment string, image model.UpdateImage, err error) {\n\t\tvar errs []string\n\t\tif validation.ValidateImageName(flags.Image) != nil {\n\t\t\terrs = append(errs, fmt.Sprintf(\"invalid image name %q\", flags.Image))\n\t\t}\n\t\tif validation.ValidateContainerName(flags.Container) != nil {\n\t\t\terrs = append(errs, fmt.Sprintf(\"invalid container name %q\", flags.Container))\n\t\t}\n\t\tif validation.ValidateLabel(flags.Deployment) != nil {\n\t\t\terrs = append(errs, fmt.Sprintf(\"invalid deployment name %q\", flags.Deployment))\n\t\t}\n\t\tif len(errs) > 0 {\n\t\t\treturn \"\", model.UpdateImage{}, errors.New(strings.Join(errs, \"\\n\"))\n\t\t}\n\t\treturn flags.Deployment, model.UpdateImage{\n\t\t\tImage: flags.Image,\n\t\t\tContainer: flags.Container,\n\t\t}, nil\n\t}\n\tcommand := &cobra.Command{\n\t\tUse: \"image\",\n\t\tAliases: setAliases,\n\t\tShort: \"set container image in specific deployment\",\n\t\tLong: \"Sets container image in specific deployment.\\n\" +\n\t\t\t\"If deployment contains only one container, then uses that container by default.\",\n\t\tPreRun: prerun.PreRunFunc(ctx),\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tvar logger = ctx.Log.Command(\"set image\")\n\t\t\tlogger.Debugf(\"START\")\n\t\t\tdefer logger.Debugf(\"END\")\n\t\t\tlogger.StructFields(flags)\n\t\t\tif flags.Force {\n\t\t\t\tlogger.Debugf(\"run command with forcr\")\n\t\t\t\tvar depl, image, err = buildImage()\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tif err := ctx.Client.SetContainerImage(ctx.Namespace.ID, depl, image); err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tfmt.Println(\"OK\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif flags.Deployment == \"\" {\n\t\t\t\tvar deplList, err = ctx.Client.GetDeploymentList(ctx.Namespace.ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\t(&activekit.Menu{\n\t\t\t\t\tTitle: \"Select deployment\",\n\t\t\t\t\tItems: activekit.StringSelector(deplList.Names(), func(s string) error {\n\t\t\t\t\t\tflags.Deployment = s\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}),\n\t\t\t\t}).Run()\n\t\t\t}\n\t\t\tif flags.Container == \"\" {\n\t\t\t\tvar depl, err = ctx.Client.GetDeployment(ctx.Namespace.ID, flags.Deployment)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tif len(depl.Containers) == 1 {\n\t\t\t\t\tflags.Container = depl.Containers[0].Name\n\t\t\t\t} else {\n\t\t\t\t\t(&activekit.Menu{\n\t\t\t\t\t\tTitle: \"Select container\",\n\t\t\t\t\t\tItems: activekit.StringSelector(depl.Containers.Names(), func(s string) error {\n\t\t\t\t\t\t\tflags.Container = s\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}),\n\t\t\t\t\t}).Run()\n\t\t\t\t}\n\t\t\t}\n\t\t\tif flags.Image == \"\" {\n\t\t\t\tfor {\n\t\t\t\t\tvar image = activekit.Promt(\"Type new image: \")\n\t\t\t\t\timage = strings.TrimSpace(image)\n\t\t\t\t\tif validation.ValidateImageName(image) != nil {\n\t\t\t\t\t\tfmt.Printf(\"%q is invalid image name!\\n\", image)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tflags.Image = image\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif activekit.YesNo(\"Are you sure you want to update image to %q of container %q in deployment %s\/%s?\",\n\t\t\t\tflags.Image, flags.Container, ctx.Namespace, flags.Deployment) {\n\t\t\t\tif err := ctx.Client.SetContainerImage(ctx.Namespace.ID, flags.Deployment, model.UpdateImage{\n\t\t\t\t\tImage: flags.Image,\n\t\t\t\t\tContainer: flags.Container,\n\t\t\t\t}); err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\tif err := gpflag.ParseTo(&flags, command.PersistentFlags()); err != nil {\n\t\tpanic(err)\n\t}\n\treturn command\n}\n<commit_msg>add single container case in force mode<commit_after>package image\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"strings\"\n\n\t\"github.com\/containerum\/chkit\/pkg\/cli\/prerun\"\n\t\"github.com\/containerum\/chkit\/pkg\/context\"\n\t\"github.com\/containerum\/chkit\/pkg\/util\/activekit\"\n\t\"github.com\/containerum\/chkit\/pkg\/util\/validation\"\n\t\"github.com\/containerum\/kube-client\/pkg\/model\"\n\t\"github.com\/go-siris\/siris\/core\/errors\"\n\t\"github.com\/octago\/sflags\/gen\/gpflag\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar setAliases = []string{\"imgs\", \"img\", \"im\", \"images\"}\n\nfunc Set(ctx *context.Context) *cobra.Command {\n\tvar flags struct {\n\t\tForce bool `desc:\"suppress confirmation\" flag:\"force f\"`\n\t\tDeployment string `desc:\"deployment name\"`\n\t\tContainer string `desc:\"container name\"`\n\t\tImage string `desc:\"new image\"`\n\t}\n\tvar buildImage = func() (deployment string, image model.UpdateImage, err error) {\n\t\tvar errs []string\n\t\tif validation.ValidateImageName(flags.Image) != nil {\n\t\t\terrs = append(errs, fmt.Sprintf(\"invalid image name %q\", flags.Image))\n\t\t}\n\t\tif validation.ValidateContainerName(flags.Container) != nil {\n\t\t\terrs = append(errs, fmt.Sprintf(\"invalid container name %q\", flags.Container))\n\t\t}\n\t\tif validation.ValidateLabel(flags.Deployment) != nil {\n\t\t\terrs = append(errs, fmt.Sprintf(\"invalid deployment name %q\", flags.Deployment))\n\t\t}\n\t\tif len(errs) > 0 {\n\t\t\treturn \"\", model.UpdateImage{}, errors.New(strings.Join(errs, \"\\n\"))\n\t\t}\n\t\treturn flags.Deployment, model.UpdateImage{\n\t\t\tImage: flags.Image,\n\t\t\tContainer: flags.Container,\n\t\t}, nil\n\t}\n\tcommand := &cobra.Command{\n\t\tUse: \"image\",\n\t\tAliases: setAliases,\n\t\tShort: \"set container image in specific deployment\",\n\t\tLong: \"Sets container image in specific deployment.\\n\" +\n\t\t\t\"If deployment contains only one container, then uses that container by default.\",\n\t\tPreRun: prerun.PreRunFunc(ctx),\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tvar logger = ctx.Log.Command(\"set image\")\n\t\t\tlogger.Debugf(\"START\")\n\t\t\tdefer logger.Debugf(\"END\")\n\t\t\tlogger.StructFields(flags)\n\t\t\tif flags.Force {\n\t\t\t\tlogger.Debugf(\"run command with force\")\n\n\t\t\t\tif flags.Container == \"\" {\n\t\t\t\t\tvar depl, err = ctx.Client.GetDeployment(ctx.Namespace.ID, flags.Deployment)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t\tos.Exit(1)\n\t\t\t\t\t}\n\t\t\t\t\tif len(depl.Containers) == 1 {\n\t\t\t\t\t\tflags.Container = depl.Containers[0].Name\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tvar depl, image, err = buildImage()\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tif err := ctx.Client.SetContainerImage(ctx.Namespace.ID, depl, image); err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tfmt.Println(\"OK\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif flags.Deployment == \"\" {\n\t\t\t\tvar deplList, err = ctx.Client.GetDeploymentList(ctx.Namespace.ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\t(&activekit.Menu{\n\t\t\t\t\tTitle: \"Select deployment\",\n\t\t\t\t\tItems: activekit.StringSelector(deplList.Names(), func(s string) error {\n\t\t\t\t\t\tflags.Deployment = s\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}),\n\t\t\t\t}).Run()\n\t\t\t}\n\t\t\tif flags.Container == \"\" {\n\t\t\t\tvar depl, err = ctx.Client.GetDeployment(ctx.Namespace.ID, flags.Deployment)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tif len(depl.Containers) == 1 {\n\t\t\t\t\tflags.Container = depl.Containers[0].Name\n\t\t\t\t} else {\n\t\t\t\t\t(&activekit.Menu{\n\t\t\t\t\t\tTitle: \"Select container\",\n\t\t\t\t\t\tItems: activekit.StringSelector(depl.Containers.Names(), func(s string) error {\n\t\t\t\t\t\t\tflags.Container = s\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}),\n\t\t\t\t\t}).Run()\n\t\t\t\t}\n\t\t\t}\n\t\t\tif flags.Image == \"\" {\n\t\t\t\tfor {\n\t\t\t\t\tvar image = activekit.Promt(\"Type new image: \")\n\t\t\t\t\timage = strings.TrimSpace(image)\n\t\t\t\t\tif validation.ValidateImageName(image) != nil {\n\t\t\t\t\t\tfmt.Printf(\"%q is invalid image name!\\n\", image)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tflags.Image = image\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif activekit.YesNo(\"Are you sure you want to update image to %q of container %q in deployment %s\/%s?\",\n\t\t\t\tflags.Image, flags.Container, ctx.Namespace, flags.Deployment) {\n\t\t\t\tif err := ctx.Client.SetContainerImage(ctx.Namespace.ID, flags.Deployment, model.UpdateImage{\n\t\t\t\t\tImage: flags.Image,\n\t\t\t\t\tContainer: flags.Container,\n\t\t\t\t}); err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\tif err := gpflag.ParseTo(&flags, command.PersistentFlags()); err != nil {\n\t\tpanic(err)\n\t}\n\treturn command\n}\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport (\n\t\"time\"\n\n\tv1 \"k8s.io\/api\/core\/v1\"\n)\n\n\/\/ Common types and constants used by the importer and controller.\n\/\/ TODO: maybe the vm cloner can use these common values\n\nconst (\n\t\/\/ CDILabelKey provides a constant for CDI PVC labels\n\tCDILabelKey = \"app\"\n\t\/\/ CDILabelValue provides a constant for CDI PVC label values\n\tCDILabelValue = \"containerized-data-importer\"\n\t\/\/ CDILabelSelector provides a constant to use for the selector to identify CDI objects in list\n\tCDILabelSelector = CDILabelKey + \"=\" + CDILabelValue\n\n\t\/\/ CDIComponentLabel can be added to all CDI resources\n\tCDIComponentLabel = \"cdi.kubevirt.io\"\n\n\t\/\/ PrometheusLabel provides the label to indicate prometheus metrics are available in the pods.\n\tPrometheusLabel = \"prometheus.kubevirt.io\"\n\n\t\/\/ ImporterVolumePath provides a constant for the directory where the PV is mounted.\n\tImporterVolumePath = \"\/data\"\n\t\/\/ DiskImageName provides a constant for our importer\/datastream_ginkgo_test and to build ImporterWritePath\n\tDiskImageName = \"disk.img\"\n\t\/\/ ImporterWritePath provides a constant for the cmd\/cdi-importer\/importer.go executable\n\tImporterWritePath = ImporterVolumePath + \"\/\" + DiskImageName\n\t\/\/ ImporterWriteBlockPath provides a constant for the path where the PV is mounted.\n\tImporterWriteBlockPath = \"\/dev\/blockDevice\"\n\t\/\/ ImporterPodName provides a constant to use as a prefix for Pods created by CDI (controller only)\n\tImporterPodName = \"importer\"\n\t\/\/ ImporterDataDir provides a constant for the controller pkg to use as a hardcoded path to where content is transferred to\/from (controller only)\n\tImporterDataDir = \"\/data\"\n\t\/\/ ScratchDataDir provides a constant for the controller pkg to use as a hardcoded path to where scratch space is located.\n\tScratchDataDir = \"\/scratch\"\n\t\/\/ ImporterS3Host provides an S3 string used by importer\/dataStream.go only\n\tImporterS3Host = \"s3.amazonaws.com\"\n\t\/\/ ImporterCertDir is where the configmap containg certs will be mounted\n\tImporterCertDir = \"\/certs\"\n\t\/\/ DefaultPullPolicy imports k8s \"IfNotPresent\" string for the import_controller_gingko_test and the cdi-controller executable\n\tDefaultPullPolicy = string(v1.PullIfNotPresent)\n\n\t\/\/ PullPolicy provides a constant to capture our env variable \"PULL_POLICY\" (only used by cmd\/cdi-controller\/controller.go)\n\tPullPolicy = \"PULL_POLICY\"\n\t\/\/ ImporterSource provides a constant to capture our env variable \"IMPORTER_SOURCE\"\n\tImporterSource = \"IMPORTER_SOURCE\"\n\t\/\/ ImporterContentType provides a constant to capture our env variable \"IMPORTER_CONTENTTYPE\"\n\tImporterContentType = \"IMPORTER_CONTENTTYPE\"\n\t\/\/ ImporterEndpoint provides a constant to capture our env variable \"IMPORTER_ENDPOINT\"\n\tImporterEndpoint = \"IMPORTER_ENDPOINT\"\n\t\/\/ ImporterAccessKeyID provides a constant to capture our env variable \"IMPORTER_ACCES_KEY_ID\"\n\tImporterAccessKeyID = \"IMPORTER_ACCESS_KEY_ID\"\n\t\/\/ ImporterSecretKey provides a constant to capture our env variable \"IMPORTER_SECRET_KEY\"\n\tImporterSecretKey = \"IMPORTER_SECRET_KEY\"\n\t\/\/ ImporterImageSize provides a constant to capture our env variable \"IMPORTER_IMAGE_SIZE\"\n\tImporterImageSize = \"IMPORTER_IMAGE_SIZE\"\n\t\/\/ ImporterCertDirVar provides a constant to capture our env variable \"IMPORTER_CERT_DIR\"\n\tImporterCertDirVar = \"IMPORTER_CERT_DIR\"\n\t\/\/ InsecureTLSVar provides a constant to capture our env variable \"INSECURE_TLS\"\n\tInsecureTLSVar = \"INSECURE_TLS\"\n\n\t\/\/ CloningLabelKey provides a constant to use as a label name for pod affinity (controller pkg only)\n\tCloningLabelKey = \"cloning\"\n\t\/\/ CloningLabelValue provides a constant to use as a label value for pod affinity (controller pkg only)\n\tCloningLabelValue = \"host-assisted-cloning\"\n\t\/\/ CloningTopologyKey (controller pkg only)\n\tCloningTopologyKey = \"kubernetes.io\/hostname\"\n\t\/\/ ClonerSourcePodName (controller pkg only)\n\tClonerSourcePodName = \"clone-source-pod\"\n\t\/\/ ClonerTargetPodName (controller pkg only)\n\tClonerTargetPodName = \"clone-target-pod\"\n\t\/\/ ClonerImagePath (controller pkg only)\n\tClonerImagePath = \"\/tmp\/clone\/image\"\n\t\/\/ ClonerSocketPath (controller pkg only)\n\tClonerSocketPath = \"\/tmp\/clone\/socket\"\n\n\t\/\/ UploadServerCDILabel is the label applied to upload server resources\n\tUploadServerCDILabel = \"cdi-upload-server\"\n\t\/\/ UploadServerPodname is name of the upload server pod container\n\tUploadServerPodname = UploadServerCDILabel\n\t\/\/ UploadServerDataDir is the destination directoryfor uploads\n\tUploadServerDataDir = ImporterDataDir\n\t\/\/ UploadServerServiceLabel is the label selector for upload server services\n\tUploadServerServiceLabel = \"service\"\n\t\/\/ UploadImageSize provides a constant to capture our env variable \"UPLOAD_IMAGE_SIZE\"\n\tUploadImageSize = \"UPLOAD_IMAGE_SIZE\"\n\n\t\/\/ ConfigName is the name of default CDI Config\n\tConfigName = \"config\"\n\n\t\/\/ OwnerUID provides the UID of the owner entity (either PVC or DV)\n\tOwnerUID = \"OWNER_UID\"\n\n\t\/\/ KeyAccess provides a constant to the accessKeyId label using in controller pkg and transport_test.go\n\tKeyAccess = \"accessKeyId\"\n\t\/\/ KeySecret provides a constant to the secretKey label using in controller pkg and transport_test.go\n\tKeySecret = \"secretKey\"\n\n\t\/\/ DefaultResyncPeriod sets a 10 minute resync period, used in the controller pkg and the controller cmd executable\n\tDefaultResyncPeriod = 10 * time.Minute\n\t\/\/ InsecureRegistryConfigMap is the name of the ConfigMap for insecure registries\n\tInsecureRegistryConfigMap = \"cdi-insecure-registries\"\n\n\t\/\/ ScratchSpaceNeededExitCode is the exit code that indicates the importer pod requires scratch space to function properly.\n\tScratchSpaceNeededExitCode = 42\n)\n<commit_msg>Change the prometheus label used by cdi.<commit_after>package common\n\nimport (\n\t\"time\"\n\n\tv1 \"k8s.io\/api\/core\/v1\"\n)\n\n\/\/ Common types and constants used by the importer and controller.\n\/\/ TODO: maybe the vm cloner can use these common values\n\nconst (\n\t\/\/ CDILabelKey provides a constant for CDI PVC labels\n\tCDILabelKey = \"app\"\n\t\/\/ CDILabelValue provides a constant for CDI PVC label values\n\tCDILabelValue = \"containerized-data-importer\"\n\t\/\/ CDILabelSelector provides a constant to use for the selector to identify CDI objects in list\n\tCDILabelSelector = CDILabelKey + \"=\" + CDILabelValue\n\n\t\/\/ CDIComponentLabel can be added to all CDI resources\n\tCDIComponentLabel = \"cdi.kubevirt.io\"\n\n\t\/\/ PrometheusLabel provides the label to indicate prometheus metrics are available in the pods.\n\tPrometheusLabel = \"prometheus.cdi.kubevirt.io\"\n\n\t\/\/ ImporterVolumePath provides a constant for the directory where the PV is mounted.\n\tImporterVolumePath = \"\/data\"\n\t\/\/ DiskImageName provides a constant for our importer\/datastream_ginkgo_test and to build ImporterWritePath\n\tDiskImageName = \"disk.img\"\n\t\/\/ ImporterWritePath provides a constant for the cmd\/cdi-importer\/importer.go executable\n\tImporterWritePath = ImporterVolumePath + \"\/\" + DiskImageName\n\t\/\/ ImporterWriteBlockPath provides a constant for the path where the PV is mounted.\n\tImporterWriteBlockPath = \"\/dev\/blockDevice\"\n\t\/\/ ImporterPodName provides a constant to use as a prefix for Pods created by CDI (controller only)\n\tImporterPodName = \"importer\"\n\t\/\/ ImporterDataDir provides a constant for the controller pkg to use as a hardcoded path to where content is transferred to\/from (controller only)\n\tImporterDataDir = \"\/data\"\n\t\/\/ ScratchDataDir provides a constant for the controller pkg to use as a hardcoded path to where scratch space is located.\n\tScratchDataDir = \"\/scratch\"\n\t\/\/ ImporterS3Host provides an S3 string used by importer\/dataStream.go only\n\tImporterS3Host = \"s3.amazonaws.com\"\n\t\/\/ ImporterCertDir is where the configmap containg certs will be mounted\n\tImporterCertDir = \"\/certs\"\n\t\/\/ DefaultPullPolicy imports k8s \"IfNotPresent\" string for the import_controller_gingko_test and the cdi-controller executable\n\tDefaultPullPolicy = string(v1.PullIfNotPresent)\n\n\t\/\/ PullPolicy provides a constant to capture our env variable \"PULL_POLICY\" (only used by cmd\/cdi-controller\/controller.go)\n\tPullPolicy = \"PULL_POLICY\"\n\t\/\/ ImporterSource provides a constant to capture our env variable \"IMPORTER_SOURCE\"\n\tImporterSource = \"IMPORTER_SOURCE\"\n\t\/\/ ImporterContentType provides a constant to capture our env variable \"IMPORTER_CONTENTTYPE\"\n\tImporterContentType = \"IMPORTER_CONTENTTYPE\"\n\t\/\/ ImporterEndpoint provides a constant to capture our env variable \"IMPORTER_ENDPOINT\"\n\tImporterEndpoint = \"IMPORTER_ENDPOINT\"\n\t\/\/ ImporterAccessKeyID provides a constant to capture our env variable \"IMPORTER_ACCES_KEY_ID\"\n\tImporterAccessKeyID = \"IMPORTER_ACCESS_KEY_ID\"\n\t\/\/ ImporterSecretKey provides a constant to capture our env variable \"IMPORTER_SECRET_KEY\"\n\tImporterSecretKey = \"IMPORTER_SECRET_KEY\"\n\t\/\/ ImporterImageSize provides a constant to capture our env variable \"IMPORTER_IMAGE_SIZE\"\n\tImporterImageSize = \"IMPORTER_IMAGE_SIZE\"\n\t\/\/ ImporterCertDirVar provides a constant to capture our env variable \"IMPORTER_CERT_DIR\"\n\tImporterCertDirVar = \"IMPORTER_CERT_DIR\"\n\t\/\/ InsecureTLSVar provides a constant to capture our env variable \"INSECURE_TLS\"\n\tInsecureTLSVar = \"INSECURE_TLS\"\n\n\t\/\/ CloningLabelKey provides a constant to use as a label name for pod affinity (controller pkg only)\n\tCloningLabelKey = \"cloning\"\n\t\/\/ CloningLabelValue provides a constant to use as a label value for pod affinity (controller pkg only)\n\tCloningLabelValue = \"host-assisted-cloning\"\n\t\/\/ CloningTopologyKey (controller pkg only)\n\tCloningTopologyKey = \"kubernetes.io\/hostname\"\n\t\/\/ ClonerSourcePodName (controller pkg only)\n\tClonerSourcePodName = \"clone-source-pod\"\n\t\/\/ ClonerTargetPodName (controller pkg only)\n\tClonerTargetPodName = \"clone-target-pod\"\n\t\/\/ ClonerImagePath (controller pkg only)\n\tClonerImagePath = \"\/tmp\/clone\/image\"\n\t\/\/ ClonerSocketPath (controller pkg only)\n\tClonerSocketPath = \"\/tmp\/clone\/socket\"\n\n\t\/\/ UploadServerCDILabel is the label applied to upload server resources\n\tUploadServerCDILabel = \"cdi-upload-server\"\n\t\/\/ UploadServerPodname is name of the upload server pod container\n\tUploadServerPodname = UploadServerCDILabel\n\t\/\/ UploadServerDataDir is the destination directoryfor uploads\n\tUploadServerDataDir = ImporterDataDir\n\t\/\/ UploadServerServiceLabel is the label selector for upload server services\n\tUploadServerServiceLabel = \"service\"\n\t\/\/ UploadImageSize provides a constant to capture our env variable \"UPLOAD_IMAGE_SIZE\"\n\tUploadImageSize = \"UPLOAD_IMAGE_SIZE\"\n\n\t\/\/ ConfigName is the name of default CDI Config\n\tConfigName = \"config\"\n\n\t\/\/ OwnerUID provides the UID of the owner entity (either PVC or DV)\n\tOwnerUID = \"OWNER_UID\"\n\n\t\/\/ KeyAccess provides a constant to the accessKeyId label using in controller pkg and transport_test.go\n\tKeyAccess = \"accessKeyId\"\n\t\/\/ KeySecret provides a constant to the secretKey label using in controller pkg and transport_test.go\n\tKeySecret = \"secretKey\"\n\n\t\/\/ DefaultResyncPeriod sets a 10 minute resync period, used in the controller pkg and the controller cmd executable\n\tDefaultResyncPeriod = 10 * time.Minute\n\t\/\/ InsecureRegistryConfigMap is the name of the ConfigMap for insecure registries\n\tInsecureRegistryConfigMap = \"cdi-insecure-registries\"\n\n\t\/\/ ScratchSpaceNeededExitCode is the exit code that indicates the importer pod requires scratch space to function properly.\n\tScratchSpaceNeededExitCode = 42\n)\n<|endoftext|>"} {"text":"<commit_before>package couchdb\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/cozy\/cozy-stack\/pkg\/config\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/realtime\"\n\t\"github.com\/cozy\/echo\"\n)\n\n\/\/ Proxy generate a httputil.ReverseProxy which forwards the request to the\n\/\/ correct route.\nfunc Proxy(db Database, doctype, path string) *httputil.ReverseProxy {\n\tcouchURL := config.CouchURL()\n\tcouchAuth := config.GetConfig().CouchDB.Auth\n\n\tdirector := func(req *http.Request) {\n\t\treq.URL.Scheme = couchURL.Scheme\n\t\treq.URL.Host = couchURL.Host\n\t\treq.Header.Del(echo.HeaderAuthorization) \/\/ drop stack auth\n\t\treq.Header.Del(echo.HeaderCookie)\n\t\treq.URL.RawPath = \"\/\" + makeDBName(db, doctype) + \"\/\" + path\n\t\treq.URL.Path, _ = url.PathUnescape(req.URL.RawPath)\n\t\tif couchAuth != nil {\n\t\t\tif p, ok := couchAuth.Password(); ok {\n\t\t\t\treq.SetBasicAuth(couchAuth.Username(), p)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &httputil.ReverseProxy{\n\t\tDirector: director,\n\t}\n}\n\n\/\/ ProxyBulkDocs generates a httputil.ReverseProxy to forward the couchdb\n\/\/ request on the _bulk_docs endpoint. This endpoint is specific since it will\n\/\/ mutate many document in database, the stack has to read the response from\n\/\/ couch to emit the correct realtime events.\nfunc ProxyBulkDocs(db Database, doctype string, req *http.Request) (*httputil.ReverseProxy, *http.Request, error) {\n\tbody, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar reqValue struct {\n\t\tDocs []JSONDoc `json:\"docs\"`\n\t\tNewEdits *bool `json:\"new_edits\"`\n\t}\n\n\tif err = json.Unmarshal(body, &reqValue); err != nil {\n\t\treturn nil, nil, echo.NewHTTPError(http.StatusBadRequest,\n\t\t\t\"request body is not valid JSON\")\n\t}\n\n\tdocs := make(map[string]JSONDoc)\n\tfor _, d := range reqValue.Docs {\n\t\tdocs[d.ID()] = d\n\t}\n\n\t\/\/ reset body to proxy\n\treq.Body = ioutil.NopCloser(bytes.NewReader(body))\n\n\tvar transport http.RoundTripper\n\tif client := config.GetConfig().CouchDB.Client; client != nil {\n\t\ttransport = client.Transport\n\t}\n\n\tp := Proxy(db, doctype, \"\/_bulk_docs\")\n\tp.Transport = &bulkTransport{\n\t\tRoundTripper: transport,\n\t\tOnResponseRead: func(data []byte) {\n\t\t\ttype respValue struct {\n\t\t\t\tID string `json:\"id\"`\n\t\t\t\tRev string `json:\"rev\"`\n\t\t\t\tOK bool `json:\"ok\"`\n\t\t\t\tError string `json:\"error\"`\n\t\t\t}\n\n\t\t\t\/\/ When using the 'new_edits' flag (like pouchdb), the couchdb response\n\t\t\t\/\/ does not contain any value. We only rely on the request data and\n\t\t\t\/\/ expect no error.\n\t\t\tif reqValue.NewEdits != nil && !*reqValue.NewEdits {\n\t\t\t\tfor _, doc := range reqValue.Docs {\n\t\t\t\t\trev := doc.Rev()\n\t\t\t\t\tvar event string\n\t\t\t\t\tif strings.HasPrefix(rev, \"1-\") {\n\t\t\t\t\t\tevent = realtime.EventCreate\n\t\t\t\t\t} else {\n\t\t\t\t\t\tevent = realtime.EventUpdate\n\t\t\t\t\t}\n\t\t\t\t\tRTEvent(db, event, doc, nil)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tvar respValues []*respValue\n\t\t\t\tif err = json.Unmarshal(data, &respValues); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tdocs := make(map[string]JSONDoc)\n\t\t\t\tfor _, doc := range reqValue.Docs {\n\t\t\t\t\tdocs[doc.ID()] = doc\n\t\t\t\t}\n\n\t\t\t\tfor _, r := range respValues {\n\t\t\t\t\tif r.Error != \"\" || !r.OK {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tdoc, ok := docs[r.ID]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tvar event string\n\t\t\t\t\tif doc.Rev() == \"\" {\n\t\t\t\t\t\tevent = realtime.EventCreate\n\t\t\t\t\t} else if doc.Get(\"_deleted\") == true {\n\t\t\t\t\t\tevent = realtime.EventDelete\n\t\t\t\t\t} else {\n\t\t\t\t\t\tevent = realtime.EventUpdate\n\t\t\t\t\t}\n\t\t\t\t\tdoc.SetRev(r.Rev)\n\t\t\t\t\tRTEvent(db, event, doc, nil)\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\n\treturn p, req, nil\n}\n\ntype bulkTransport struct {\n\thttp.RoundTripper\n\tOnResponseRead func([]byte)\n}\n\nfunc (t *bulkTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {\n\tresp, err = t.RoundTripper.RoundTrip(req)\n\tif err != nil {\n\t\treturn nil, newConnectionError(err)\n\t}\n\tdefer func() {\n\t\tif errc := resp.Body.Close(); err == nil && errc != nil {\n\t\t\terr = errc\n\t\t}\n\t}()\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode == http.StatusCreated {\n\t\tgo t.OnResponseRead(b)\n\t}\n\tresp.Body = ioutil.NopCloser(bytes.NewReader(b))\n\treturn resp, nil\n}\n<commit_msg>Fix transport for couch Proxy<commit_after>package couchdb\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/cozy\/cozy-stack\/pkg\/config\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/realtime\"\n\t\"github.com\/cozy\/echo\"\n)\n\n\/\/ Proxy generate a httputil.ReverseProxy which forwards the request to the\n\/\/ correct route.\nfunc Proxy(db Database, doctype, path string) *httputil.ReverseProxy {\n\tcouchURL := config.CouchURL()\n\tcouchAuth := config.GetConfig().CouchDB.Auth\n\n\tdirector := func(req *http.Request) {\n\t\treq.URL.Scheme = couchURL.Scheme\n\t\treq.URL.Host = couchURL.Host\n\t\treq.Header.Del(echo.HeaderAuthorization) \/\/ drop stack auth\n\t\treq.Header.Del(echo.HeaderCookie)\n\t\treq.URL.RawPath = \"\/\" + makeDBName(db, doctype) + \"\/\" + path\n\t\treq.URL.Path, _ = url.PathUnescape(req.URL.RawPath)\n\t\tif couchAuth != nil {\n\t\t\tif p, ok := couchAuth.Password(); ok {\n\t\t\t\treq.SetBasicAuth(couchAuth.Username(), p)\n\t\t\t}\n\t\t}\n\t}\n\n\tvar transport http.RoundTripper\n\tif client := config.GetConfig().CouchDB.Client; client != nil {\n\t\ttransport = client.Transport\n\t} else {\n\t\ttransport = http.DefaultTransport\n\t}\n\n\treturn &httputil.ReverseProxy{\n\t\tDirector: director,\n\t\tTransport: transport,\n\t}\n}\n\n\/\/ ProxyBulkDocs generates a httputil.ReverseProxy to forward the couchdb\n\/\/ request on the _bulk_docs endpoint. This endpoint is specific since it will\n\/\/ mutate many document in database, the stack has to read the response from\n\/\/ couch to emit the correct realtime events.\nfunc ProxyBulkDocs(db Database, doctype string, req *http.Request) (*httputil.ReverseProxy, *http.Request, error) {\n\tbody, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar reqValue struct {\n\t\tDocs []JSONDoc `json:\"docs\"`\n\t\tNewEdits *bool `json:\"new_edits\"`\n\t}\n\n\tif err = json.Unmarshal(body, &reqValue); err != nil {\n\t\treturn nil, nil, echo.NewHTTPError(http.StatusBadRequest,\n\t\t\t\"request body is not valid JSON\")\n\t}\n\n\tdocs := make(map[string]JSONDoc)\n\tfor _, d := range reqValue.Docs {\n\t\tdocs[d.ID()] = d\n\t}\n\n\t\/\/ reset body to proxy\n\treq.Body = ioutil.NopCloser(bytes.NewReader(body))\n\n\tvar transport http.RoundTripper\n\tif client := config.GetConfig().CouchDB.Client; client != nil {\n\t\ttransport = client.Transport\n\t}\n\n\tp := Proxy(db, doctype, \"\/_bulk_docs\")\n\tp.Transport = &bulkTransport{\n\t\tRoundTripper: transport,\n\t\tOnResponseRead: func(data []byte) {\n\t\t\ttype respValue struct {\n\t\t\t\tID string `json:\"id\"`\n\t\t\t\tRev string `json:\"rev\"`\n\t\t\t\tOK bool `json:\"ok\"`\n\t\t\t\tError string `json:\"error\"`\n\t\t\t}\n\n\t\t\t\/\/ When using the 'new_edits' flag (like pouchdb), the couchdb response\n\t\t\t\/\/ does not contain any value. We only rely on the request data and\n\t\t\t\/\/ expect no error.\n\t\t\tif reqValue.NewEdits != nil && !*reqValue.NewEdits {\n\t\t\t\tfor _, doc := range reqValue.Docs {\n\t\t\t\t\trev := doc.Rev()\n\t\t\t\t\tvar event string\n\t\t\t\t\tif strings.HasPrefix(rev, \"1-\") {\n\t\t\t\t\t\tevent = realtime.EventCreate\n\t\t\t\t\t} else {\n\t\t\t\t\t\tevent = realtime.EventUpdate\n\t\t\t\t\t}\n\t\t\t\t\tRTEvent(db, event, doc, nil)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tvar respValues []*respValue\n\t\t\t\tif err = json.Unmarshal(data, &respValues); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tdocs := make(map[string]JSONDoc)\n\t\t\t\tfor _, doc := range reqValue.Docs {\n\t\t\t\t\tdocs[doc.ID()] = doc\n\t\t\t\t}\n\n\t\t\t\tfor _, r := range respValues {\n\t\t\t\t\tif r.Error != \"\" || !r.OK {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tdoc, ok := docs[r.ID]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tvar event string\n\t\t\t\t\tif doc.Rev() == \"\" {\n\t\t\t\t\t\tevent = realtime.EventCreate\n\t\t\t\t\t} else if doc.Get(\"_deleted\") == true {\n\t\t\t\t\t\tevent = realtime.EventDelete\n\t\t\t\t\t} else {\n\t\t\t\t\t\tevent = realtime.EventUpdate\n\t\t\t\t\t}\n\t\t\t\t\tdoc.SetRev(r.Rev)\n\t\t\t\t\tRTEvent(db, event, doc, nil)\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\n\treturn p, req, nil\n}\n\ntype bulkTransport struct {\n\thttp.RoundTripper\n\tOnResponseRead func([]byte)\n}\n\nfunc (t *bulkTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {\n\tresp, err = t.RoundTripper.RoundTrip(req)\n\tif err != nil {\n\t\treturn nil, newConnectionError(err)\n\t}\n\tdefer func() {\n\t\tif errc := resp.Body.Close(); err == nil && errc != nil {\n\t\t\terr = errc\n\t\t}\n\t}()\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode == http.StatusCreated {\n\t\tgo t.OnResponseRead(b)\n\t}\n\tresp.Body = ioutil.NopCloser(bytes.NewReader(b))\n\treturn resp, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\npackage csource\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\n\t\"github.com\/google\/syzkaller\/pkg\/osutil\"\n\t\"github.com\/google\/syzkaller\/prog\"\n\t\"github.com\/google\/syzkaller\/sys\/targets\"\n)\n\n\/\/ Build builds a C\/C++ program from source src and returns name of the resulting binary.\n\/\/ lang can be \"c\" or \"c++\".\nfunc Build(target *prog.Target, lang, src string) (string, error) {\n\tbin, err := ioutil.TempFile(\"\", \"syzkaller\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create temp file: %v\", err)\n\t}\n\tbin.Close()\n\tsysTarget := targets.List[target.OS][target.Arch]\n\tcompiler := sysTarget.CCompilerPrefix + \"gcc\"\n\tif _, err := exec.LookPath(compiler); err != nil {\n\t\treturn \"\", ErrNoCompiler\n\t}\n\tflags := []string{\n\t\t\"-x\", lang, \"-Wall\", \"-Werror\", \"-O1\", \"-g\", \"-o\", bin.Name(),\n\t\tsrc, \"-pthread\",\n\t}\n\tflags = append(flags, sysTarget.CrossCFlags...)\n\tif sysTarget.PtrSize == 4 {\n\t\t\/\/ We do generate uint64's for syscall arguments that overflow longs on 32-bit archs.\n\t\tflags = append(flags, \"-Wno-overflow\")\n\t}\n\tout, err := osutil.Command(compiler, append(flags, \"-static\")...).CombinedOutput()\n\tif err != nil {\n\t\t\/\/ Some distributions don't have static libraries.\n\t\tout, err = osutil.Command(compiler, flags...).CombinedOutput()\n\t}\n\tif err != nil {\n\t\tos.Remove(bin.Name())\n\t\tdata, _ := ioutil.ReadFile(src)\n\t\treturn \"\", fmt.Errorf(\"failed to build program:\\n%s\\n%s\\ncompiler invocation: %v %v\",\n\t\t\tdata, out, compiler, flags)\n\t}\n\treturn bin.Name(), nil\n}\n\nvar ErrNoCompiler = errors.New(\"no target compiler\")\n\n\/\/ Format reformats C source using clang-format.\nfunc Format(src []byte) ([]byte, error) {\n\tstdout, stderr := new(bytes.Buffer), new(bytes.Buffer)\n\tcmd := osutil.Command(\"clang-format\", \"-assume-filename=\/src.c\", \"-style\", style)\n\tcmd.Stdin = bytes.NewReader(src)\n\tcmd.Stdout = stdout\n\tcmd.Stderr = stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn src, fmt.Errorf(\"failed to format source: %v\\n%v\", err, stderr.String())\n\t}\n\treturn stdout.Bytes(), nil\n}\n\n\/\/ Something acceptable for kernel developers and email-friendly.\nvar style = `{\nBasedOnStyle: LLVM,\nIndentWidth: 2,\nUseTab: Never,\nBreakBeforeBraces: Linux,\nIndentCaseLabels: false,\nDerivePointerAlignment: false,\nPointerAlignment: Left,\nAlignTrailingComments: true,\nAllowShortBlocksOnASingleLine: false,\nAllowShortCaseLabelsOnASingleLine: false,\nAllowShortFunctionsOnASingleLine: false,\nAllowShortIfStatementsOnASingleLine: false,\nAllowShortLoopsOnASingleLine: false,\nColumnLimit: 80,\n}`\n<commit_msg>pkg\/scource: rename compiled binary to syz-executor<commit_after>\/\/ Copyright 2017 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\npackage csource\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\n\t\"github.com\/google\/syzkaller\/pkg\/osutil\"\n\t\"github.com\/google\/syzkaller\/prog\"\n\t\"github.com\/google\/syzkaller\/sys\/targets\"\n)\n\n\/\/ Build builds a C\/C++ program from source src and returns name of the resulting binary.\n\/\/ lang can be \"c\" or \"c++\".\nfunc Build(target *prog.Target, lang, src string) (string, error) {\n\t\/\/ We call the binary syz-executor because it sometimes shows in bug titles,\n\t\/\/ and we don't want 2 different bugs for when a crash is triggered during fuzzing and during repro.\n\tbin, err := ioutil.TempFile(\"\", \"syz-executor\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create temp file: %v\", err)\n\t}\n\tbin.Close()\n\tsysTarget := targets.List[target.OS][target.Arch]\n\tcompiler := sysTarget.CCompilerPrefix + \"gcc\"\n\tif _, err := exec.LookPath(compiler); err != nil {\n\t\treturn \"\", ErrNoCompiler\n\t}\n\tflags := []string{\n\t\t\"-x\", lang, \"-Wall\", \"-Werror\", \"-O1\", \"-g\", \"-o\", bin.Name(),\n\t\tsrc, \"-pthread\",\n\t}\n\tflags = append(flags, sysTarget.CrossCFlags...)\n\tif sysTarget.PtrSize == 4 {\n\t\t\/\/ We do generate uint64's for syscall arguments that overflow longs on 32-bit archs.\n\t\tflags = append(flags, \"-Wno-overflow\")\n\t}\n\tout, err := osutil.Command(compiler, append(flags, \"-static\")...).CombinedOutput()\n\tif err != nil {\n\t\t\/\/ Some distributions don't have static libraries.\n\t\tout, err = osutil.Command(compiler, flags...).CombinedOutput()\n\t}\n\tif err != nil {\n\t\tos.Remove(bin.Name())\n\t\tdata, _ := ioutil.ReadFile(src)\n\t\treturn \"\", fmt.Errorf(\"failed to build program:\\n%s\\n%s\\ncompiler invocation: %v %v\",\n\t\t\tdata, out, compiler, flags)\n\t}\n\treturn bin.Name(), nil\n}\n\nvar ErrNoCompiler = errors.New(\"no target compiler\")\n\n\/\/ Format reformats C source using clang-format.\nfunc Format(src []byte) ([]byte, error) {\n\tstdout, stderr := new(bytes.Buffer), new(bytes.Buffer)\n\tcmd := osutil.Command(\"clang-format\", \"-assume-filename=\/src.c\", \"-style\", style)\n\tcmd.Stdin = bytes.NewReader(src)\n\tcmd.Stdout = stdout\n\tcmd.Stderr = stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn src, fmt.Errorf(\"failed to format source: %v\\n%v\", err, stderr.String())\n\t}\n\treturn stdout.Bytes(), nil\n}\n\n\/\/ Something acceptable for kernel developers and email-friendly.\nvar style = `{\nBasedOnStyle: LLVM,\nIndentWidth: 2,\nUseTab: Never,\nBreakBeforeBraces: Linux,\nIndentCaseLabels: false,\nDerivePointerAlignment: false,\nPointerAlignment: Left,\nAlignTrailingComments: true,\nAllowShortBlocksOnASingleLine: false,\nAllowShortCaseLabelsOnASingleLine: false,\nAllowShortFunctionsOnASingleLine: false,\nAllowShortIfStatementsOnASingleLine: false,\nAllowShortLoopsOnASingleLine: false,\nColumnLimit: 80,\n}`\n<|endoftext|>"} {"text":"<commit_before>package domain\n\nimport (\n\t\"github.com\/materials-commons\/mcstore\/pkg\/app\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/db\/dai\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/db\/schema\"\n)\n\n\/\/ TODO: Add Redis as a store for the permissions\n\ntype Access interface {\n\tAllowedByOwner(projectID, user string) bool\n\tGetFile(apikey, fileID string) (*schema.File, error)\n}\n\n\/\/ access validates access to data. It checks if a user\n\/\/ has been given permission to access a particular item.\ntype access struct {\n\tprojects dai.Projects\n\tfiles dai.Files\n\tusers dai.Users\n}\n\n\/\/ NewAccess creates a new Access.\nfunc NewAccess(projects dai.Projects, files dai.Files, users dai.Users) *access {\n\treturn &access{\n\t\tprojects: projects,\n\t\tfiles: files,\n\t\tusers: users,\n\t}\n}\n\n\/\/ AllowedByOwner checks to see if the user making the request has access to the\n\/\/ particular item.\nfunc (a *access) AllowedByOwner(projectID, user string) bool {\n\tu, err := a.users.ByID(user)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif u.Admin {\n\t\treturn true\n\t}\n\n\taccessList, err := a.projects.AccessList(projectID)\n\tif err != nil {\n\t\treturn false\n\t}\n\tfor _, entry := range accessList {\n\t\tif user == entry.UserID {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ GetFile will validate access to a file. Rather than taking a user,\n\/\/ it takes an apikey and looks up the user. It returns the file if\n\/\/ access has been granted, otherwise it returns the erro ErrNoAccess.\nfunc (a *access) GetFile(apikey, fileID string) (*schema.File, error) {\n\tuser, err := a.users.ByAPIKey(apikey)\n\tif err != nil {\n\t\t\/\/ log error here\n\t\tapp.Log.Error(\"User lookup failed\", \"error\", err, \"apikey\", apikey)\n\t\treturn nil, app.ErrNoAccess\n\t}\n\n\tfile, err := a.files.ByID(fileID)\n\tif err != nil {\n\t\tapp.Log.Error(\"File lookup failed\", \"error\", err, \"fileid\", fileID)\n\t\treturn nil, app.ErrNoAccess\n\t}\n\n\tif a.isInPublishedDataset(fileID) {\n\t\treturn file, nil\n\t}\n\n\tproject, err := a.files.GetProject(fileID)\n\tif err != nil {\n\t\tapp.Log.Error(\"Project lookup for file failed\", \"error\", err, \"fileid\", fileID)\n\t\treturn nil, app.ErrNoAccess\n\t}\n\n\tif !a.AllowedByOwner(project.ID, user.ID) {\n\t\tapp.Log.Info(\"Access denied\", \"fileid\", file.ID, \"user\", user.ID, \"projectid\", project.ID)\n\t\treturn nil, app.ErrNoAccess\n\t}\n\n\treturn file, nil\n}\n\n\/\/ isInPublishedDataset checks if the file is in a published dataset. If it is then the file is accessible.\nfunc (a *access) isInPublishedDataset(fileID string) bool {\n\tif datasets, err := a.files.FileDatasets(fileID); err != nil {\n\t\treturn false\n\t} else if datasets == nil {\n\t\treturn false\n\t} else {\n\t\tfor _, ds := range datasets {\n\t\t\tif ds.Published {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n}\n<commit_msg>Simplify isInPublishedDataset<commit_after>package domain\n\nimport (\n\t\"github.com\/materials-commons\/mcstore\/pkg\/app\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/db\/dai\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/db\/schema\"\n)\n\n\/\/ TODO: Add Redis as a store for the permissions\n\ntype Access interface {\n\tAllowedByOwner(projectID, user string) bool\n\tGetFile(apikey, fileID string) (*schema.File, error)\n}\n\n\/\/ access validates access to data. It checks if a user\n\/\/ has been given permission to access a particular item.\ntype access struct {\n\tprojects dai.Projects\n\tfiles dai.Files\n\tusers dai.Users\n}\n\n\/\/ NewAccess creates a new Access.\nfunc NewAccess(projects dai.Projects, files dai.Files, users dai.Users) *access {\n\treturn &access{\n\t\tprojects: projects,\n\t\tfiles: files,\n\t\tusers: users,\n\t}\n}\n\n\/\/ AllowedByOwner checks to see if the user making the request has access to the\n\/\/ particular item.\nfunc (a *access) AllowedByOwner(projectID, user string) bool {\n\tu, err := a.users.ByID(user)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif u.Admin {\n\t\treturn true\n\t}\n\n\taccessList, err := a.projects.AccessList(projectID)\n\tif err != nil {\n\t\treturn false\n\t}\n\tfor _, entry := range accessList {\n\t\tif user == entry.UserID {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ GetFile will validate access to a file. Rather than taking a user,\n\/\/ it takes an apikey and looks up the user. It returns the file if\n\/\/ access has been granted, otherwise it returns the erro ErrNoAccess.\nfunc (a *access) GetFile(apikey, fileID string) (*schema.File, error) {\n\tuser, err := a.users.ByAPIKey(apikey)\n\tif err != nil {\n\t\t\/\/ log error here\n\t\tapp.Log.Error(\"User lookup failed\", \"error\", err, \"apikey\", apikey)\n\t\treturn nil, app.ErrNoAccess\n\t}\n\n\tfile, err := a.files.ByID(fileID)\n\tif err != nil {\n\t\tapp.Log.Error(\"File lookup failed\", \"error\", err, \"fileid\", fileID)\n\t\treturn nil, app.ErrNoAccess\n\t}\n\n\tif a.isInPublishedDataset(fileID) {\n\t\treturn file, nil\n\t}\n\n\tproject, err := a.files.GetProject(fileID)\n\tif err != nil {\n\t\tapp.Log.Error(\"Project lookup for file failed\", \"error\", err, \"fileid\", fileID)\n\t\treturn nil, app.ErrNoAccess\n\t}\n\n\tif !a.AllowedByOwner(project.ID, user.ID) {\n\t\tapp.Log.Info(\"Access denied\", \"fileid\", file.ID, \"user\", user.ID, \"projectid\", project.ID)\n\t\treturn nil, app.ErrNoAccess\n\t}\n\n\treturn file, nil\n}\n\n\/\/ isInPublishedDataset checks if the file is in a published dataset. If it is then the file is accessible.\nfunc (a *access) isInPublishedDataset(fileID string) bool {\n\tdatasets, err := a.files.FileDatasets(fileID)\n\tif err != nil || datasets == nil {\n\t\treturn false\n\t}\n\n\tfor _, ds := range datasets {\n\t\tif ds.Published {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package render\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n)\n\n\/\/ Options type to configure rendering\ntype Options struct {\n\tStatus int\n\tData interface{}\n\tCache bool\n}\n\n\/\/ Renders JSON and send it to the HTTP client, supports caching\nfunc JSON(w http.ResponseWriter, opts Options) error {\n\tif &w == nil {\n\t\treturn fmt.Errorf(\"You must provide a http.ResponseWriter\")\n\t}\n\n\theaders := w.Header()\n\theaders.Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\n\tif opts.Cache {\n\t\theaders.Set(\"Cache-Control\", \"no-cache, no-store, must-revalidate\")\n\t\theaders.Set(\"Pragma\", \"no-cache\")\n\t\theaders.Set(\"Expires\", \"0\")\n\t}\n\n\tjsonbytes, err := json.Marshal(opts.Data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\theaders.Set(\"Content-Length\", fmt.Sprintf(\"%d\", len(jsonbytes)))\n\tif opts.Status <= 0 {\n\t\topts.Status = http.StatusOK\n\t}\n\tw.WriteHeader(opts.Status)\n\n\t_, err = w.Write(jsonbytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>Cleans up code to make it look more idiomatic.<commit_after>package render\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strconv\"\n)\n\n\/\/ Options type to configure rendering\ntype Options struct {\n\tStatus int\n\tData interface{}\n\tCache bool\n}\n\n\/\/ Renders JSON and send it to the HTTP client, supports caching\nfunc JSON(w http.ResponseWriter, opts Options) error {\n\tif &w == nil {\n\t\treturn fmt.Errorf(\"You must provide a http.ResponseWriter\")\n\t}\n\n\theaders := w.Header()\n\theaders.Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\n\tif opts.Cache {\n\t\theaders.Set(\"Cache-Control\", \"no-cache, no-store, must-revalidate\")\n\t\theaders.Set(\"Pragma\", \"no-cache\")\n\t\theaders.Set(\"Expires\", \"0\")\n\t}\n\n\tjsonBytes, err := json.Marshal(opts.Data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\theaders.Set(\"Content-Length\", strconv.Itoa(len(jsonBytes)))\n\tif opts.Status <= 0 {\n\t\topts.Status = http.StatusOK\n\t}\n\tw.WriteHeader(opts.Status)\n\tw.Write(jsonBytes)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage runtime\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/url\"\n\t\"reflect\"\n\n\t\"k8s.io\/apimachinery\/pkg\/conversion\/queryparams\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n)\n\n\/\/ codec binds an encoder and decoder.\ntype codec struct {\n\tEncoder\n\tDecoder\n}\n\n\/\/ NewCodec creates a Codec from an Encoder and Decoder.\nfunc NewCodec(e Encoder, d Decoder) Codec {\n\treturn codec{e, d}\n}\n\n\/\/ Encode is a convenience wrapper for encoding to a []byte from an Encoder\nfunc Encode(e Encoder, obj Object) ([]byte, error) {\n\t\/\/ TODO: reuse buffer\n\tbuf := &bytes.Buffer{}\n\tif err := e.Encode(obj, buf); err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}\n\n\/\/ Decode is a convenience wrapper for decoding data into an Object.\nfunc Decode(d Decoder, data []byte) (Object, error) {\n\tobj, _, err := d.Decode(data, nil, nil)\n\treturn obj, err\n}\n\n\/\/ DecodeInto performs a Decode into the provided object.\nfunc DecodeInto(d Decoder, data []byte, into Object) error {\n\tout, gvk, err := d.Decode(data, nil, into)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif out != into {\n\t\treturn fmt.Errorf(\"unable to decode %s into %v\", gvk, reflect.TypeOf(into))\n\t}\n\treturn nil\n}\n\n\/\/ EncodeOrDie is a version of Encode which will panic instead of returning an error. For tests.\nfunc EncodeOrDie(e Encoder, obj Object) string {\n\tbytes, err := Encode(e, obj)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(bytes)\n}\n\n\/\/ DefaultingSerializer invokes defaulting after decoding.\ntype DefaultingSerializer struct {\n\tDefaulter ObjectDefaulter\n\tDecoder Decoder\n\t\/\/ Encoder is optional to allow this type to be used as both a Decoder and an Encoder\n\tEncoder\n}\n\n\/\/ Decode performs a decode and then allows the defaulter to act on the provided object.\nfunc (d DefaultingSerializer) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) {\n\tobj, gvk, err := d.Decoder.Decode(data, defaultGVK, into)\n\tif err != nil {\n\t\treturn obj, gvk, err\n\t}\n\td.Defaulter.Default(obj)\n\treturn obj, gvk, nil\n}\n\n\/\/ UseOrCreateObject returns obj if the canonical ObjectKind returned by the provided typer matches gvk, or\n\/\/ invokes the ObjectCreator to instantiate a new gvk. Returns an error if the typer cannot find the object.\nfunc UseOrCreateObject(t ObjectTyper, c ObjectCreater, gvk schema.GroupVersionKind, obj Object) (Object, error) {\n\tif obj != nil {\n\t\tkinds, _, err := t.ObjectKinds(obj)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, kind := range kinds {\n\t\t\tif gvk == kind {\n\t\t\t\treturn obj, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn c.New(gvk)\n}\n\n\/\/ NoopEncoder converts an Decoder to a Serializer or Codec for code that expects them but only uses decoding.\ntype NoopEncoder struct {\n\tDecoder\n}\n\nvar _ Serializer = NoopEncoder{}\n\nfunc (n NoopEncoder) Encode(obj Object, w io.Writer) error {\n\treturn fmt.Errorf(\"encoding is not allowed for this codec: %v\", reflect.TypeOf(n.Decoder))\n}\n\n\/\/ NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding.\ntype NoopDecoder struct {\n\tEncoder\n}\n\nvar _ Serializer = NoopDecoder{}\n\nfunc (n NoopDecoder) Decode(data []byte, gvk *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) {\n\treturn nil, nil, fmt.Errorf(\"decoding is not allowed for this codec: %v\", reflect.TypeOf(n.Encoder))\n}\n\n\/\/ NewParameterCodec creates a ParameterCodec capable of transforming url values into versioned objects and back.\nfunc NewParameterCodec(scheme *Scheme) ParameterCodec {\n\treturn ¶meterCodec{\n\t\ttyper: scheme,\n\t\tconvertor: scheme,\n\t\tcreator: scheme,\n\t\tdefaulter: scheme,\n\t}\n}\n\n\/\/ parameterCodec implements conversion to and from query parameters and objects.\ntype parameterCodec struct {\n\ttyper ObjectTyper\n\tconvertor ObjectConvertor\n\tcreator ObjectCreater\n\tdefaulter ObjectDefaulter\n}\n\nvar _ ParameterCodec = ¶meterCodec{}\n\n\/\/ DecodeParameters converts the provided url.Values into an object of type From with the kind of into, and then\n\/\/ converts that object to into (if necessary). Returns an error if the operation cannot be completed.\nfunc (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into Object) error {\n\tif len(parameters) == 0 {\n\t\treturn nil\n\t}\n\ttargetGVKs, _, err := c.typer.ObjectKinds(into)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range targetGVKs {\n\t\tif targetGVKs[i].GroupVersion() == from {\n\t\t\tif err := c.convertor.Convert(¶meters, into, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t\/\/ in the case where we going into the same object we're receiving, default on the outbound object\n\t\t\tif c.defaulter != nil {\n\t\t\t\tc.defaulter.Default(into)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tinput, err := c.creator.New(from.WithKind(targetGVKs[0].Kind))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := c.convertor.Convert(¶meters, input, nil); err != nil {\n\t\treturn err\n\t}\n\t\/\/ if we have defaulter, default the input before converting to output\n\tif c.defaulter != nil {\n\t\tc.defaulter.Default(input)\n\t}\n\treturn c.convertor.Convert(input, into, nil)\n}\n\n\/\/ EncodeParameters converts the provided object into the to version, then converts that object to url.Values.\n\/\/ Returns an error if conversion is not possible.\nfunc (c *parameterCodec) EncodeParameters(obj Object, to schema.GroupVersion) (url.Values, error) {\n\tgvks, _, err := c.typer.ObjectKinds(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgvk := gvks[0]\n\tif to != gvk.GroupVersion() {\n\t\tout, err := c.convertor.ConvertToVersion(obj, to)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tobj = out\n\t}\n\treturn queryparams.Convert(obj)\n}\n\ntype base64Serializer struct {\n\tEncoder\n\tDecoder\n}\n\nfunc NewBase64Serializer(e Encoder, d Decoder) Serializer {\n\treturn &base64Serializer{e, d}\n}\n\nfunc (s base64Serializer) Encode(obj Object, stream io.Writer) error {\n\te := base64.NewEncoder(base64.StdEncoding, stream)\n\terr := s.Encoder.Encode(obj, e)\n\te.Close()\n\treturn err\n}\n\nfunc (s base64Serializer) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) {\n\tout := make([]byte, base64.StdEncoding.DecodedLen(len(data)))\n\tn, err := base64.StdEncoding.Decode(out, data)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn s.Decoder.Decode(out[:n], defaults, into)\n}\n\n\/\/ SerializerInfoForMediaType returns the first info in types that has a matching media type (which cannot\n\/\/ include media-type parameters), or the first info with an empty media type, or false if no type matches.\nfunc SerializerInfoForMediaType(types []SerializerInfo, mediaType string) (SerializerInfo, bool) {\n\tfor _, info := range types {\n\t\tif info.MediaType == mediaType {\n\t\t\treturn info, true\n\t\t}\n\t}\n\tfor _, info := range types {\n\t\tif len(info.MediaType) == 0 {\n\t\t\treturn info, true\n\t\t}\n\t}\n\treturn SerializerInfo{}, false\n}\n\nvar (\n\t\/\/ InternalGroupVersioner will always prefer the internal version for a given group version kind.\n\tInternalGroupVersioner GroupVersioner = internalGroupVersioner{}\n\t\/\/ DisabledGroupVersioner will reject all kinds passed to it.\n\tDisabledGroupVersioner GroupVersioner = disabledGroupVersioner{}\n)\n\ntype internalGroupVersioner struct{}\n\n\/\/ KindForGroupVersionKinds returns an internal Kind if one is found, or converts the first provided kind to the internal version.\nfunc (internalGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {\n\tfor _, kind := range kinds {\n\t\tif kind.Version == APIVersionInternal {\n\t\t\treturn kind, true\n\t\t}\n\t}\n\tfor _, kind := range kinds {\n\t\treturn schema.GroupVersionKind{Group: kind.Group, Version: APIVersionInternal, Kind: kind.Kind}, true\n\t}\n\treturn schema.GroupVersionKind{}, false\n}\n\ntype disabledGroupVersioner struct{}\n\n\/\/ KindForGroupVersionKinds returns false for any input.\nfunc (disabledGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {\n\treturn schema.GroupVersionKind{}, false\n}\n\n\/\/ GroupVersioners implements GroupVersioner and resolves to the first exact match for any kind.\ntype GroupVersioners []GroupVersioner\n\n\/\/ KindForGroupVersionKinds returns the first match of any of the group versioners, or false if no match occurred.\nfunc (gvs GroupVersioners) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {\n\tfor _, gv := range gvs {\n\t\ttarget, ok := gv.KindForGroupVersionKinds(kinds)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\treturn target, true\n\t}\n\treturn schema.GroupVersionKind{}, false\n}\n\n\/\/ Assert that schema.GroupVersion and GroupVersions implement GroupVersioner\nvar _ GroupVersioner = schema.GroupVersion{}\nvar _ GroupVersioner = schema.GroupVersions{}\nvar _ GroupVersioner = multiGroupVersioner{}\n\ntype multiGroupVersioner struct {\n\ttarget schema.GroupVersion\n\tacceptedGroupKinds []schema.GroupKind\n}\n\n\/\/ NewMultiGroupVersioner returns the provided group version for any kind that matches one of the provided group kinds.\n\/\/ Kind may be empty in the provided group kind, in which case any kind will match.\nfunc NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner {\n\tif len(groupKinds) == 0 || (len(groupKinds) == 1 && groupKinds[0].Group == gv.Group) {\n\t\treturn gv\n\t}\n\treturn multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds}\n}\n\n\/\/ KindForGroupVersionKinds returns the target group version if any kind matches any of the original group kinds. It will\n\/\/ use the originating kind where possible.\nfunc (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {\n\tfor _, src := range kinds {\n\t\tfor _, kind := range v.acceptedGroupKinds {\n\t\t\tif kind.Group != src.Group {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(kind.Kind) > 0 && kind.Kind != src.Kind {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn v.target.WithKind(src.Kind), true\n\t\t}\n\t}\n\treturn schema.GroupVersionKind{}, false\n}\n<commit_msg>Remove DefaultingSerializer as it is not being used<commit_after>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage runtime\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/url\"\n\t\"reflect\"\n\n\t\"k8s.io\/apimachinery\/pkg\/conversion\/queryparams\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n)\n\n\/\/ codec binds an encoder and decoder.\ntype codec struct {\n\tEncoder\n\tDecoder\n}\n\n\/\/ NewCodec creates a Codec from an Encoder and Decoder.\nfunc NewCodec(e Encoder, d Decoder) Codec {\n\treturn codec{e, d}\n}\n\n\/\/ Encode is a convenience wrapper for encoding to a []byte from an Encoder\nfunc Encode(e Encoder, obj Object) ([]byte, error) {\n\t\/\/ TODO: reuse buffer\n\tbuf := &bytes.Buffer{}\n\tif err := e.Encode(obj, buf); err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}\n\n\/\/ Decode is a convenience wrapper for decoding data into an Object.\nfunc Decode(d Decoder, data []byte) (Object, error) {\n\tobj, _, err := d.Decode(data, nil, nil)\n\treturn obj, err\n}\n\n\/\/ DecodeInto performs a Decode into the provided object.\nfunc DecodeInto(d Decoder, data []byte, into Object) error {\n\tout, gvk, err := d.Decode(data, nil, into)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif out != into {\n\t\treturn fmt.Errorf(\"unable to decode %s into %v\", gvk, reflect.TypeOf(into))\n\t}\n\treturn nil\n}\n\n\/\/ EncodeOrDie is a version of Encode which will panic instead of returning an error. For tests.\nfunc EncodeOrDie(e Encoder, obj Object) string {\n\tbytes, err := Encode(e, obj)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(bytes)\n}\n\n\/\/ UseOrCreateObject returns obj if the canonical ObjectKind returned by the provided typer matches gvk, or\n\/\/ invokes the ObjectCreator to instantiate a new gvk. Returns an error if the typer cannot find the object.\nfunc UseOrCreateObject(t ObjectTyper, c ObjectCreater, gvk schema.GroupVersionKind, obj Object) (Object, error) {\n\tif obj != nil {\n\t\tkinds, _, err := t.ObjectKinds(obj)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, kind := range kinds {\n\t\t\tif gvk == kind {\n\t\t\t\treturn obj, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn c.New(gvk)\n}\n\n\/\/ NoopEncoder converts an Decoder to a Serializer or Codec for code that expects them but only uses decoding.\ntype NoopEncoder struct {\n\tDecoder\n}\n\nvar _ Serializer = NoopEncoder{}\n\nfunc (n NoopEncoder) Encode(obj Object, w io.Writer) error {\n\treturn fmt.Errorf(\"encoding is not allowed for this codec: %v\", reflect.TypeOf(n.Decoder))\n}\n\n\/\/ NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding.\ntype NoopDecoder struct {\n\tEncoder\n}\n\nvar _ Serializer = NoopDecoder{}\n\nfunc (n NoopDecoder) Decode(data []byte, gvk *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) {\n\treturn nil, nil, fmt.Errorf(\"decoding is not allowed for this codec: %v\", reflect.TypeOf(n.Encoder))\n}\n\n\/\/ NewParameterCodec creates a ParameterCodec capable of transforming url values into versioned objects and back.\nfunc NewParameterCodec(scheme *Scheme) ParameterCodec {\n\treturn ¶meterCodec{\n\t\ttyper: scheme,\n\t\tconvertor: scheme,\n\t\tcreator: scheme,\n\t\tdefaulter: scheme,\n\t}\n}\n\n\/\/ parameterCodec implements conversion to and from query parameters and objects.\ntype parameterCodec struct {\n\ttyper ObjectTyper\n\tconvertor ObjectConvertor\n\tcreator ObjectCreater\n\tdefaulter ObjectDefaulter\n}\n\nvar _ ParameterCodec = ¶meterCodec{}\n\n\/\/ DecodeParameters converts the provided url.Values into an object of type From with the kind of into, and then\n\/\/ converts that object to into (if necessary). Returns an error if the operation cannot be completed.\nfunc (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into Object) error {\n\tif len(parameters) == 0 {\n\t\treturn nil\n\t}\n\ttargetGVKs, _, err := c.typer.ObjectKinds(into)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range targetGVKs {\n\t\tif targetGVKs[i].GroupVersion() == from {\n\t\t\tif err := c.convertor.Convert(¶meters, into, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t\/\/ in the case where we going into the same object we're receiving, default on the outbound object\n\t\t\tif c.defaulter != nil {\n\t\t\t\tc.defaulter.Default(into)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tinput, err := c.creator.New(from.WithKind(targetGVKs[0].Kind))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := c.convertor.Convert(¶meters, input, nil); err != nil {\n\t\treturn err\n\t}\n\t\/\/ if we have defaulter, default the input before converting to output\n\tif c.defaulter != nil {\n\t\tc.defaulter.Default(input)\n\t}\n\treturn c.convertor.Convert(input, into, nil)\n}\n\n\/\/ EncodeParameters converts the provided object into the to version, then converts that object to url.Values.\n\/\/ Returns an error if conversion is not possible.\nfunc (c *parameterCodec) EncodeParameters(obj Object, to schema.GroupVersion) (url.Values, error) {\n\tgvks, _, err := c.typer.ObjectKinds(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgvk := gvks[0]\n\tif to != gvk.GroupVersion() {\n\t\tout, err := c.convertor.ConvertToVersion(obj, to)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tobj = out\n\t}\n\treturn queryparams.Convert(obj)\n}\n\ntype base64Serializer struct {\n\tEncoder\n\tDecoder\n}\n\nfunc NewBase64Serializer(e Encoder, d Decoder) Serializer {\n\treturn &base64Serializer{e, d}\n}\n\nfunc (s base64Serializer) Encode(obj Object, stream io.Writer) error {\n\te := base64.NewEncoder(base64.StdEncoding, stream)\n\terr := s.Encoder.Encode(obj, e)\n\te.Close()\n\treturn err\n}\n\nfunc (s base64Serializer) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) {\n\tout := make([]byte, base64.StdEncoding.DecodedLen(len(data)))\n\tn, err := base64.StdEncoding.Decode(out, data)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn s.Decoder.Decode(out[:n], defaults, into)\n}\n\n\/\/ SerializerInfoForMediaType returns the first info in types that has a matching media type (which cannot\n\/\/ include media-type parameters), or the first info with an empty media type, or false if no type matches.\nfunc SerializerInfoForMediaType(types []SerializerInfo, mediaType string) (SerializerInfo, bool) {\n\tfor _, info := range types {\n\t\tif info.MediaType == mediaType {\n\t\t\treturn info, true\n\t\t}\n\t}\n\tfor _, info := range types {\n\t\tif len(info.MediaType) == 0 {\n\t\t\treturn info, true\n\t\t}\n\t}\n\treturn SerializerInfo{}, false\n}\n\nvar (\n\t\/\/ InternalGroupVersioner will always prefer the internal version for a given group version kind.\n\tInternalGroupVersioner GroupVersioner = internalGroupVersioner{}\n\t\/\/ DisabledGroupVersioner will reject all kinds passed to it.\n\tDisabledGroupVersioner GroupVersioner = disabledGroupVersioner{}\n)\n\ntype internalGroupVersioner struct{}\n\n\/\/ KindForGroupVersionKinds returns an internal Kind if one is found, or converts the first provided kind to the internal version.\nfunc (internalGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {\n\tfor _, kind := range kinds {\n\t\tif kind.Version == APIVersionInternal {\n\t\t\treturn kind, true\n\t\t}\n\t}\n\tfor _, kind := range kinds {\n\t\treturn schema.GroupVersionKind{Group: kind.Group, Version: APIVersionInternal, Kind: kind.Kind}, true\n\t}\n\treturn schema.GroupVersionKind{}, false\n}\n\ntype disabledGroupVersioner struct{}\n\n\/\/ KindForGroupVersionKinds returns false for any input.\nfunc (disabledGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {\n\treturn schema.GroupVersionKind{}, false\n}\n\n\/\/ GroupVersioners implements GroupVersioner and resolves to the first exact match for any kind.\ntype GroupVersioners []GroupVersioner\n\n\/\/ KindForGroupVersionKinds returns the first match of any of the group versioners, or false if no match occurred.\nfunc (gvs GroupVersioners) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {\n\tfor _, gv := range gvs {\n\t\ttarget, ok := gv.KindForGroupVersionKinds(kinds)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\treturn target, true\n\t}\n\treturn schema.GroupVersionKind{}, false\n}\n\n\/\/ Assert that schema.GroupVersion and GroupVersions implement GroupVersioner\nvar _ GroupVersioner = schema.GroupVersion{}\nvar _ GroupVersioner = schema.GroupVersions{}\nvar _ GroupVersioner = multiGroupVersioner{}\n\ntype multiGroupVersioner struct {\n\ttarget schema.GroupVersion\n\tacceptedGroupKinds []schema.GroupKind\n}\n\n\/\/ NewMultiGroupVersioner returns the provided group version for any kind that matches one of the provided group kinds.\n\/\/ Kind may be empty in the provided group kind, in which case any kind will match.\nfunc NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner {\n\tif len(groupKinds) == 0 || (len(groupKinds) == 1 && groupKinds[0].Group == gv.Group) {\n\t\treturn gv\n\t}\n\treturn multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds}\n}\n\n\/\/ KindForGroupVersionKinds returns the target group version if any kind matches any of the original group kinds. It will\n\/\/ use the originating kind where possible.\nfunc (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {\n\tfor _, src := range kinds {\n\t\tfor _, kind := range v.acceptedGroupKinds {\n\t\t\tif kind.Group != src.Group {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(kind.Kind) > 0 && kind.Kind != src.Kind {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn v.target.WithKind(src.Kind), true\n\t\t}\n\t}\n\treturn schema.GroupVersionKind{}, false\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"os\"\n\t\"net\"\n\t\n\t\"junta\/util\"\n\t\"junta\/paxos\"\n\t\"junta\/proto\"\n\t\"junta\/store\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype ReadFromWriteToer interface {\n\tReadFrom([]byte) (int, net.Addr, os.Error)\n\tWriteTo([]byte, net.Addr) (int, os.Error)\n\tLocalAddr() net.Addr\n}\n\nconst packetSize = 3000\n\ntype conn struct {\n\tnet.Conn\n\ts *Server\n}\n\ntype Manager interface {\n\tPutFrom(string, paxos.Msg)\n\tPropose(string) (uint64, string, os.Error)\n\tAlpha() int\n}\n\ntype Server struct {\n\tAddr string\n\tSt *store.Store\n\tMg Manager\n\tSelf string\n}\n\nfunc (sv *Server) ListenAndServe() os.Error {\n\tlogger := util.NewLogger(\"server %s\", sv.Addr)\n\n\tlogger.Log(\"binding\")\n\tl, err := net.Listen(\"tcp\", sv.Addr)\n\tif err != nil {\n\t\tlogger.Log(err)\n\t\treturn err\n\t}\n\tdefer l.Close()\n\tlogger.Log(\"listening\")\n\n\terr = sv.Serve(l)\n\tif err != nil {\n\t\tlogger.Logf(\"%s: %s\", l, err)\n}\n\treturn err\n}\n\nfunc (sv *Server) ListenAndServeUdp(outs chan paxos.Packet) os.Error {\n\tlogger := util.NewLogger(\"udp server %s\", sv.Addr)\n\n\tlogger.Log(\"binding\")\n\tu, err := net.ListenPacket(\"udp\", sv.Addr)\n\tif err != nil {\n\t\tlogger.Log(err)\n\t\treturn err\n\t}\n\tdefer u.Close()\n\tlogger.Log(\"listening\")\n\n\terr = sv.ServeUdp(u, outs)\n\tif err != nil {\n\t\tlogger.Logf(\"%s: %s\", u, err)\n\t}\n\treturn err\n}\n\nfunc (sv *Server) ServeUdp(u ReadFromWriteToer, outs chan paxos.Packet) os.Error {\n\trecvd := make(chan paxos.Packet)\n\tsent := make(chan paxos.Packet)\n\n\tlogger := util.NewLogger(\"udp server %s\", u.LocalAddr())\n\tgo func() {\n\t\tlogger.Log(\"reading messages...\")\n\t\tfor {\n\t\t\tmsg, addr, err := paxos.ReadMsg(u, packetSize)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Log(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlogger.Logf(\"read %v from %s\", msg, addr)\n\t\t\trecvd <- paxos.Packet{msg, addr}\n\t\t\tsv.Mg.PutFrom(addr, msg)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tlogger.Log(\"sending messages...\")\n\t\tfor pk := range outs {\n\t\t\tlogger.Logf(\"sending %v\", pk)\n\t\t\tudpAddr, err := net.ResolveUDPAddr(pk.Addr)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Log(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t_, err = u.WriteTo(pk.Msg.WireBytes(), udpAddr)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Log(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsent <- paxos.Packet{pk.Msg, pk.Addr}\n\t\t}\n\t}()\n\n\tneedsAck := make(map[string]bool)\n\tresend := make(chan paxos.Packet)\n\tfor {\n\t\tselect {\n\t\tcase pk := <-recvd:\n\t\t\tif pk.Msg.HasFlags(paxos.Ack) {\n\t\t\t\tlogger.Logf(\"got ack %s %v\", pk.Addr, pk.Msg)\n\t\t\t\tneedsAck[pk.Id()] = false\n\t\t\t} else {\n\t\t\t\tlogger.Logf(\"sending ack %s %v\", pk.Addr, pk.Msg)\n\t\t\t\tudpAddr, err := net.ResolveUDPAddr(pk.Addr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tack := pk.Msg.Dup().SetFlags(paxos.Ack)\n\t\t\t\tu.WriteTo(ack.WireBytes(), udpAddr)\n\t\t\t}\n\t\tcase pk := <-sent:\n\t\t\tneedsAck[pk.Id()] = true\n\t\t\tlogger.Logf(\"needs ack %s %v\", pk.Addr, pk.Msg)\n\t\t\tgo func() {\n\t\t\t\ttime.Sleep(100000000) \/\/ ns == 0.1s\n\t\t\t\tresend <- pk\n\t\t\t}()\n\t\tcase pk := <-resend:\n\t\t\tif needsAck[pk.Id()] {\n\t\t\t\tlogger.Logf(\"resending %s %v\", pk.Addr, pk.Msg)\n\t\t\t\tgo func() {\n\t\t\t\t\touts <- pk\n\t\t\t\t}()\n\t\t\t} else {\n\t\t\t\tneedsAck[pk.Id()] = false, false\n\t\t\t}\n\t\t}\n\t}\n\n\tpanic(\"not reached\")\n}\n\nfunc (s *Server) Serve(l net.Listener) os.Error {\n\tfor {\n\t\trw, e := l.Accept()\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\t\tc := &conn{rw, s}\n\t\tgo c.serve()\n\t}\n\n\tpanic(\"not reached\")\n}\n\nfunc (sv *Server) leader() string {\n\tparts, cas := sv.St.Lookup(\"\/j\/junta\/leader\")\n\tif cas == store.Dir && cas == store.Missing {\n\t\treturn \"\"\n\t}\n\treturn parts[0]\n}\n\nfunc (sv *Server) setOnce(path, body, cas string) (uint64, os.Error) {\n\tmut, err := store.EncodeSet(path, body, cas)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tseqn, v, err := sv.Mg.Propose(mut)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t\/\/ We failed, but only because of a competing proposal. The client should\n\t\/\/ retry.\n\tif v != mut {\n\t\treturn 0, os.EAGAIN\n\t}\n\n\treturn seqn, nil\n}\n\nfunc (sv *Server) Set(path, body, cas string) (seqn uint64, err os.Error) {\n\terr = os.EAGAIN\n\tfor err == os.EAGAIN {\n\t\tseqn, err = sv.setOnce(path, body, cas)\n\t}\n\treturn\n}\n\nfunc (sv *Server) delOnce(path, cas string) (uint64, os.Error) {\n\tmut, err := store.EncodeDel(path, cas)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tseqn, v, err := sv.Mg.Propose(mut)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t\/\/ We failed, but only because of a competing proposal. The client should\n\t\/\/ retry.\n\tif v != mut {\n\t\treturn 0, os.EAGAIN\n\t}\n\n\treturn seqn, nil\n}\n\nfunc (sv *Server) Del(path, cas string) (seqn uint64, err os.Error) {\n\terr = os.EAGAIN\n\tfor err == os.EAGAIN {\n\t\tseqn, err = sv.delOnce(path, cas)\n\t}\n\treturn\n}\n\nfunc (sv *Server) WaitForPathSet(path string) (body string, err os.Error) {\n\tevs := make(chan store.Event)\n\tdefer close(evs)\n\tsv.St.Watch(path, evs)\n\n\tparts, cas := sv.St.Lookup(path)\n\tif cas != store.Dir && cas != store.Missing {\n\t\treturn parts[0], nil\n\t}\n\n\tfor ev := range evs {\n\t\tif ev.IsSet() {\n\t\t\treturn ev.Body, nil\n\t\t}\n\t}\n\n\tpanic(\"not reached\")\n}\n\n\/\/ Repeatedly propose nop values until a successful read from `done`.\nfunc (sv *Server) AdvanceUntil(done chan int) {\n\tfor _, ok := <-done; !ok; _, ok = <-done {\n\t\tsv.Mg.Propose(store.Nop)\n\t}\n}\n\nfunc (c *conn) serve() {\n\tpc := proto.NewConn(c)\n\tlogger := util.NewLogger(\"%v\", c.RemoteAddr())\n\tlogger.Log(\"accepted connection\")\n\tfor {\n\t\trid, parts, err := pc.ReadRequest()\n\t\tif err != nil {\n\t\t\tif err == os.EOF {\n\t\t\t\tlogger.Log(\"connection closed by peer\")\n\t\t\t} else {\n\t\t\t\tlogger.Log(err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\trlogger := util.NewLogger(\"%v - req [%d]\", c.RemoteAddr(), rid)\n\t\trlogger.Logf(\"received <%v>\", parts)\n\n\t\tif len(parts) == 0 {\n\t\t\trlogger.Log(\"zero parts supplied\")\n\t\t\tpc.SendError(rid, proto.InvalidCommand + \": no command\")\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch parts[0] {\n\t\tdefault:\n\t\t\trlogger.Logf(\"unknown command <%s>\", parts[0])\n\t\t\tpc.SendError(rid, proto.InvalidCommand + \" \" + parts[0])\n\t\tcase \"set\":\n\t\t\tif len(parts) != 4 {\n\t\t\t\trlogger.Logf(\"invalid set command: %#v\", parts)\n\t\t\t\tpc.SendError(rid, \"wrong number of parts\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tleader := c.s.leader()\n\t\t\tif c.s.Self != leader {\n\t\t\t\trlogger.Logf(\"redirect to %s\", leader)\n\t\t\t\tpc.SendRedirect(rid, leader)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\trlogger.Logf(\"set %q=%q (cas %q)\", parts[1], parts[2], parts[3])\n\t\t\tseqn, err := c.s.Set(parts[1], parts[2], parts[3])\n\t\t\tif err != nil {\n\t\t\t\trlogger.Logf(\"bad: %s\", err)\n\t\t\t\tpc.SendError(rid, err.String())\n\t\t\t} else {\n\t\t\t\trlogger.Logf(\"good\")\n\t\t\t\tpc.SendResponse(rid, strconv.Uitoa64(seqn))\n\t\t\t}\n\t\tcase \"del\":\n\t\t\tif len(parts) != 3 {\n\t\t\t\trlogger.Logf(\"invalid del command: %v\", parts)\n\t\t\t\tpc.SendError(rid, \"wrong number of parts\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tleader := c.s.leader()\n\t\t\tif c.s.Self != leader {\n\t\t\t\trlogger.Logf(\"redirect to %s\", leader)\n\t\t\t\tpc.SendRedirect(rid, leader)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\trlogger.Logf(\"del %q (cas %q)\", parts[1], parts[2])\n\t\t\t_, err := c.s.Del(parts[1], parts[2])\n\t\t\tif err != nil {\n\t\t\t\trlogger.Logf(\"bad: %s\", err)\n\t\t\t\tpc.SendError(rid, err.String())\n\t\t\t} else {\n\t\t\t\trlogger.Logf(\"good\")\n\t\t\t\tpc.SendResponse(rid, \"true\")\n\t\t\t}\n\t\tcase \"wait-for-path-set\": \/\/ TODO this is for demo purposes only\n\t\t\tif len(parts) != 2 {\n\t\t\t\trlogger.Logf(\"invalid wait-for-path-set command: %v\", parts)\n\t\t\t\tpc.SendError(rid, \"wrong number of parts\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\trlogger.Logf(\"wait-for-path-set %q\", parts[1])\n\t\t\tbody, err := c.s.WaitForPathSet(parts[1])\n\t\t\tif err != nil {\n\t\t\t\trlogger.Logf(\"bad: %s\", err)\n\t\t\t\tpc.SendError(rid, err.String())\n\t\t\t} else {\n\t\t\t\trlogger.Logf(\"good %q\", body)\n\t\t\t\tpc.SendResponse(rid, body)\n\t\t\t}\n\t\tcase \"join\":\n\t\t\t\/\/ join abc123 1.2.3.4:999\n\t\t\tif len(parts) != 3 {\n\t\t\t\trlogger.Logf(\"invalid join command: %v\", parts)\n\t\t\t\tpc.SendError(rid, \"wrong number of parts\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tleader := c.s.leader()\n\t\t\tif c.s.Self != leader {\n\t\t\t\trlogger.Logf(\"redirect to %s\", leader)\n\t\t\t\tpc.SendRedirect(rid, leader)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\twho, addr := parts[1], parts[2]\n\t\t\trlogger.Logf(\"membership requested for %s at %s\", who, addr)\n\n\t\t\tkey := \"\/j\/junta\/members\/\" + who\n\n\t\t\tseqn, err := c.s.Set(key, addr, store.Missing)\n\t\t\tif err != nil {\n\t\t\t\trlogger.Logf(\"bad: %s\", err)\n\t\t\t\tpc.SendError(rid, err.String())\n\t\t\t} else {\n\t\t\t\trlogger.Logf(\"good\")\n\t\t\t\tdone := make(chan int)\n\t\t\t\tgo c.s.AdvanceUntil(done)\n\t\t\t\tc.s.St.Sync(seqn + uint64(c.s.Mg.Alpha()))\n\t\t\t\tclose(done)\n\t\t\t\tseqn, snap := c.s.St.Snapshot()\n\t\t\t\tpc.SendResponse(rid, strconv.Uitoa64(seqn), snap)\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>redirect to address, not id<commit_after>package server\n\nimport (\n\t\"os\"\n\t\"net\"\n\t\n\t\"junta\/util\"\n\t\"junta\/paxos\"\n\t\"junta\/proto\"\n\t\"junta\/store\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype ReadFromWriteToer interface {\n\tReadFrom([]byte) (int, net.Addr, os.Error)\n\tWriteTo([]byte, net.Addr) (int, os.Error)\n\tLocalAddr() net.Addr\n}\n\nconst packetSize = 3000\n\ntype conn struct {\n\tnet.Conn\n\ts *Server\n}\n\ntype Manager interface {\n\tPutFrom(string, paxos.Msg)\n\tPropose(string) (uint64, string, os.Error)\n\tAlpha() int\n}\n\ntype Server struct {\n\tAddr string\n\tSt *store.Store\n\tMg Manager\n\tSelf string\n}\n\nfunc (sv *Server) ListenAndServe() os.Error {\n\tlogger := util.NewLogger(\"server %s\", sv.Addr)\n\n\tlogger.Log(\"binding\")\n\tl, err := net.Listen(\"tcp\", sv.Addr)\n\tif err != nil {\n\t\tlogger.Log(err)\n\t\treturn err\n\t}\n\tdefer l.Close()\n\tlogger.Log(\"listening\")\n\n\terr = sv.Serve(l)\n\tif err != nil {\n\t\tlogger.Logf(\"%s: %s\", l, err)\n}\n\treturn err\n}\n\nfunc (sv *Server) ListenAndServeUdp(outs chan paxos.Packet) os.Error {\n\tlogger := util.NewLogger(\"udp server %s\", sv.Addr)\n\n\tlogger.Log(\"binding\")\n\tu, err := net.ListenPacket(\"udp\", sv.Addr)\n\tif err != nil {\n\t\tlogger.Log(err)\n\t\treturn err\n\t}\n\tdefer u.Close()\n\tlogger.Log(\"listening\")\n\n\terr = sv.ServeUdp(u, outs)\n\tif err != nil {\n\t\tlogger.Logf(\"%s: %s\", u, err)\n\t}\n\treturn err\n}\n\nfunc (sv *Server) ServeUdp(u ReadFromWriteToer, outs chan paxos.Packet) os.Error {\n\trecvd := make(chan paxos.Packet)\n\tsent := make(chan paxos.Packet)\n\n\tlogger := util.NewLogger(\"udp server %s\", u.LocalAddr())\n\tgo func() {\n\t\tlogger.Log(\"reading messages...\")\n\t\tfor {\n\t\t\tmsg, addr, err := paxos.ReadMsg(u, packetSize)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Log(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlogger.Logf(\"read %v from %s\", msg, addr)\n\t\t\trecvd <- paxos.Packet{msg, addr}\n\t\t\tsv.Mg.PutFrom(addr, msg)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tlogger.Log(\"sending messages...\")\n\t\tfor pk := range outs {\n\t\t\tlogger.Logf(\"sending %v\", pk)\n\t\t\tudpAddr, err := net.ResolveUDPAddr(pk.Addr)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Log(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t_, err = u.WriteTo(pk.Msg.WireBytes(), udpAddr)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Log(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsent <- paxos.Packet{pk.Msg, pk.Addr}\n\t\t}\n\t}()\n\n\tneedsAck := make(map[string]bool)\n\tresend := make(chan paxos.Packet)\n\tfor {\n\t\tselect {\n\t\tcase pk := <-recvd:\n\t\t\tif pk.Msg.HasFlags(paxos.Ack) {\n\t\t\t\tlogger.Logf(\"got ack %s %v\", pk.Addr, pk.Msg)\n\t\t\t\tneedsAck[pk.Id()] = false\n\t\t\t} else {\n\t\t\t\tlogger.Logf(\"sending ack %s %v\", pk.Addr, pk.Msg)\n\t\t\t\tudpAddr, err := net.ResolveUDPAddr(pk.Addr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tack := pk.Msg.Dup().SetFlags(paxos.Ack)\n\t\t\t\tu.WriteTo(ack.WireBytes(), udpAddr)\n\t\t\t}\n\t\tcase pk := <-sent:\n\t\t\tneedsAck[pk.Id()] = true\n\t\t\tlogger.Logf(\"needs ack %s %v\", pk.Addr, pk.Msg)\n\t\t\tgo func() {\n\t\t\t\ttime.Sleep(100000000) \/\/ ns == 0.1s\n\t\t\t\tresend <- pk\n\t\t\t}()\n\t\tcase pk := <-resend:\n\t\t\tif needsAck[pk.Id()] {\n\t\t\t\tlogger.Logf(\"resending %s %v\", pk.Addr, pk.Msg)\n\t\t\t\tgo func() {\n\t\t\t\t\touts <- pk\n\t\t\t\t}()\n\t\t\t} else {\n\t\t\t\tneedsAck[pk.Id()] = false, false\n\t\t\t}\n\t\t}\n\t}\n\n\tpanic(\"not reached\")\n}\n\nfunc (s *Server) Serve(l net.Listener) os.Error {\n\tfor {\n\t\trw, e := l.Accept()\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\t\tc := &conn{rw, s}\n\t\tgo c.serve()\n\t}\n\n\tpanic(\"not reached\")\n}\n\nfunc (sv *Server) leader() string {\n\tparts, cas := sv.St.Lookup(\"\/j\/junta\/leader\")\n\tif cas == store.Dir && cas == store.Missing {\n\t\treturn \"\"\n\t}\n\treturn parts[0]\n}\n\nfunc (sv *Server) addrFor(id string) string {\n\tparts, cas := sv.St.Lookup(\"\/j\/junta\/members\/\"+id)\n\tif cas == store.Dir && cas == store.Missing {\n\t\treturn \"\"\n\t}\n\treturn parts[0]\n}\n\nfunc (sv *Server) setOnce(path, body, cas string) (uint64, os.Error) {\n\tmut, err := store.EncodeSet(path, body, cas)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tseqn, v, err := sv.Mg.Propose(mut)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t\/\/ We failed, but only because of a competing proposal. The client should\n\t\/\/ retry.\n\tif v != mut {\n\t\treturn 0, os.EAGAIN\n\t}\n\n\treturn seqn, nil\n}\n\nfunc (sv *Server) Set(path, body, cas string) (seqn uint64, err os.Error) {\n\terr = os.EAGAIN\n\tfor err == os.EAGAIN {\n\t\tseqn, err = sv.setOnce(path, body, cas)\n\t}\n\treturn\n}\n\nfunc (sv *Server) delOnce(path, cas string) (uint64, os.Error) {\n\tmut, err := store.EncodeDel(path, cas)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tseqn, v, err := sv.Mg.Propose(mut)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t\/\/ We failed, but only because of a competing proposal. The client should\n\t\/\/ retry.\n\tif v != mut {\n\t\treturn 0, os.EAGAIN\n\t}\n\n\treturn seqn, nil\n}\n\nfunc (sv *Server) Del(path, cas string) (seqn uint64, err os.Error) {\n\terr = os.EAGAIN\n\tfor err == os.EAGAIN {\n\t\tseqn, err = sv.delOnce(path, cas)\n\t}\n\treturn\n}\n\nfunc (sv *Server) WaitForPathSet(path string) (body string, err os.Error) {\n\tevs := make(chan store.Event)\n\tdefer close(evs)\n\tsv.St.Watch(path, evs)\n\n\tparts, cas := sv.St.Lookup(path)\n\tif cas != store.Dir && cas != store.Missing {\n\t\treturn parts[0], nil\n\t}\n\n\tfor ev := range evs {\n\t\tif ev.IsSet() {\n\t\t\treturn ev.Body, nil\n\t\t}\n\t}\n\n\tpanic(\"not reached\")\n}\n\n\/\/ Repeatedly propose nop values until a successful read from `done`.\nfunc (sv *Server) AdvanceUntil(done chan int) {\n\tfor _, ok := <-done; !ok; _, ok = <-done {\n\t\tsv.Mg.Propose(store.Nop)\n\t}\n}\n\nfunc (c *conn) serve() {\n\tpc := proto.NewConn(c)\n\tlogger := util.NewLogger(\"%v\", c.RemoteAddr())\n\tlogger.Log(\"accepted connection\")\n\tfor {\n\t\trid, parts, err := pc.ReadRequest()\n\t\tif err != nil {\n\t\t\tif err == os.EOF {\n\t\t\t\tlogger.Log(\"connection closed by peer\")\n\t\t\t} else {\n\t\t\t\tlogger.Log(err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\trlogger := util.NewLogger(\"%v - req [%d]\", c.RemoteAddr(), rid)\n\t\trlogger.Logf(\"received <%v>\", parts)\n\n\t\tif len(parts) == 0 {\n\t\t\trlogger.Log(\"zero parts supplied\")\n\t\t\tpc.SendError(rid, proto.InvalidCommand + \": no command\")\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch parts[0] {\n\t\tdefault:\n\t\t\trlogger.Logf(\"unknown command <%s>\", parts[0])\n\t\t\tpc.SendError(rid, proto.InvalidCommand + \" \" + parts[0])\n\t\tcase \"set\":\n\t\t\tif len(parts) != 4 {\n\t\t\t\trlogger.Logf(\"invalid set command: %#v\", parts)\n\t\t\t\tpc.SendError(rid, \"wrong number of parts\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tleader := c.s.leader()\n\t\t\tif c.s.Self != leader {\n\t\t\t\taddr := c.s.addrFor(leader)\n\t\t\t\tif addr == \"\" {\n\t\t\t\t\trlogger.Logf(\"unknown address for leader: %s\", leader)\n\t\t\t\t\tpc.SendError(rid, \"unknown address for leader\")\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\trlogger.Logf(\"redirect to %s\", addr)\n\t\t\t\tpc.SendRedirect(rid, addr)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\trlogger.Logf(\"set %q=%q (cas %q)\", parts[1], parts[2], parts[3])\n\t\t\tseqn, err := c.s.Set(parts[1], parts[2], parts[3])\n\t\t\tif err != nil {\n\t\t\t\trlogger.Logf(\"bad: %s\", err)\n\t\t\t\tpc.SendError(rid, err.String())\n\t\t\t} else {\n\t\t\t\trlogger.Logf(\"good\")\n\t\t\t\tpc.SendResponse(rid, strconv.Uitoa64(seqn))\n\t\t\t}\n\t\tcase \"del\":\n\t\t\tif len(parts) != 3 {\n\t\t\t\trlogger.Logf(\"invalid del command: %v\", parts)\n\t\t\t\tpc.SendError(rid, \"wrong number of parts\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tleader := c.s.leader()\n\t\t\tif c.s.Self != leader {\n\t\t\t\trlogger.Logf(\"redirect to %s\", leader)\n\t\t\t\tpc.SendRedirect(rid, leader)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\trlogger.Logf(\"del %q (cas %q)\", parts[1], parts[2])\n\t\t\t_, err := c.s.Del(parts[1], parts[2])\n\t\t\tif err != nil {\n\t\t\t\trlogger.Logf(\"bad: %s\", err)\n\t\t\t\tpc.SendError(rid, err.String())\n\t\t\t} else {\n\t\t\t\trlogger.Logf(\"good\")\n\t\t\t\tpc.SendResponse(rid, \"true\")\n\t\t\t}\n\t\tcase \"wait-for-path-set\": \/\/ TODO this is for demo purposes only\n\t\t\tif len(parts) != 2 {\n\t\t\t\trlogger.Logf(\"invalid wait-for-path-set command: %v\", parts)\n\t\t\t\tpc.SendError(rid, \"wrong number of parts\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\trlogger.Logf(\"wait-for-path-set %q\", parts[1])\n\t\t\tbody, err := c.s.WaitForPathSet(parts[1])\n\t\t\tif err != nil {\n\t\t\t\trlogger.Logf(\"bad: %s\", err)\n\t\t\t\tpc.SendError(rid, err.String())\n\t\t\t} else {\n\t\t\t\trlogger.Logf(\"good %q\", body)\n\t\t\t\tpc.SendResponse(rid, body)\n\t\t\t}\n\t\tcase \"join\":\n\t\t\t\/\/ join abc123 1.2.3.4:999\n\t\t\tif len(parts) != 3 {\n\t\t\t\trlogger.Logf(\"invalid join command: %v\", parts)\n\t\t\t\tpc.SendError(rid, \"wrong number of parts\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tleader := c.s.leader()\n\t\t\tif c.s.Self != leader {\n\t\t\t\trlogger.Logf(\"redirect to %s\", leader)\n\t\t\t\tpc.SendRedirect(rid, leader)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\twho, addr := parts[1], parts[2]\n\t\t\trlogger.Logf(\"membership requested for %s at %s\", who, addr)\n\n\t\t\tkey := \"\/j\/junta\/members\/\" + who\n\n\t\t\tseqn, err := c.s.Set(key, addr, store.Missing)\n\t\t\tif err != nil {\n\t\t\t\trlogger.Logf(\"bad: %s\", err)\n\t\t\t\tpc.SendError(rid, err.String())\n\t\t\t} else {\n\t\t\t\trlogger.Logf(\"good\")\n\t\t\t\tdone := make(chan int)\n\t\t\t\tgo c.s.AdvanceUntil(done)\n\t\t\t\tc.s.St.Sync(seqn + uint64(c.s.Mg.Alpha()))\n\t\t\t\tclose(done)\n\t\t\t\tseqn, snap := c.s.St.Snapshot()\n\t\t\t\tpc.SendResponse(rid, strconv.Uitoa64(seqn), snap)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"context\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\tnet2 \"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/rancher\/dynamiclistener\"\n\t\"github.com\/rancher\/helm-controller\/pkg\/helm\"\n\t\"github.com\/rancher\/k3s\/pkg\/clientaccess\"\n\t\"github.com\/rancher\/k3s\/pkg\/daemons\/config\"\n\t\"github.com\/rancher\/k3s\/pkg\/daemons\/control\"\n\t\"github.com\/rancher\/k3s\/pkg\/datadir\"\n\t\"github.com\/rancher\/k3s\/pkg\/deploy\"\n\t\"github.com\/rancher\/k3s\/pkg\/node\"\n\t\"github.com\/rancher\/k3s\/pkg\/rootlessports\"\n\t\"github.com\/rancher\/k3s\/pkg\/servicelb\"\n\t\"github.com\/rancher\/k3s\/pkg\/static\"\n\t\"github.com\/rancher\/k3s\/pkg\/tls\"\n\t\"github.com\/rancher\/wrangler\/pkg\/leader\"\n\t\"github.com\/rancher\/wrangler\/pkg\/resolvehome\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/net\"\n)\n\nfunc resolveDataDir(dataDir string) (string, error) {\n\tdataDir, err := datadir.Resolve(dataDir)\n\treturn filepath.Join(dataDir, \"server\"), err\n}\n\nfunc StartServer(ctx context.Context, config *Config) (string, error) {\n\tif err := setupDataDirAndChdir(&config.ControlConfig); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := setNoProxyEnv(&config.ControlConfig); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := control.Server(ctx, &config.ControlConfig); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"starting kubernetes\")\n\t}\n\n\tcerts, err := startWrangler(ctx, config)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"starting tls server\")\n\t}\n\n\tip := net2.ParseIP(config.TLSConfig.BindAddress)\n\tif ip == nil {\n\t\tip, err = net.ChooseHostInterface()\n\t\tif err != nil {\n\t\t\tip = net2.ParseIP(\"127.0.0.1\")\n\t\t}\n\t}\n\tprintTokens(certs, ip.String(), &config.TLSConfig, &config.ControlConfig)\n\n\twriteKubeConfig(certs, &config.TLSConfig, config)\n\n\treturn certs, nil\n}\n\nfunc startWrangler(ctx context.Context, config *Config) (string, error) {\n\tvar (\n\t\terr error\n\t\ttlsServer dynamiclistener.ServerInterface\n\t\ttlsConfig = &config.TLSConfig\n\t\tcontrolConfig = &config.ControlConfig\n\t)\n\n\tcaBytes, err := ioutil.ReadFile(controlConfig.Runtime.ServerCA)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tcaKeyBytes, err := ioutil.ReadFile(controlConfig.Runtime.ServerCAKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttlsConfig.CACerts = string(caBytes)\n\ttlsConfig.CAKey = string(caKeyBytes)\n\n\ttlsConfig.Handler = router(controlConfig, controlConfig.Runtime.Tunnel, func() (string, error) {\n\t\tif tlsServer == nil {\n\t\t\treturn \"\", nil\n\t\t}\n\t\treturn tlsServer.CACert()\n\t})\n\n\tsc, err := newContext(ctx, controlConfig.Runtime.KubeConfigAdmin)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := stageFiles(ctx, sc, controlConfig); err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttlsServer, err = tls.NewServer(ctx, sc.K3s.K3s().V1().ListenerConfig(), *tlsConfig)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := sc.Start(ctx); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcerts := \"\"\n\tfor certs == \"\" {\n\t\tcerts, err = tlsServer.CACert()\n\t\tif err != nil {\n\t\t\tlogrus.Infof(\"waiting to generate CA certs\")\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tgo leader.RunOrDie(ctx, \"\", \"k3s\", sc.K8s, func(ctx context.Context) {\n\t\tif err := masterControllers(ctx, sc, config); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif err := sc.Start(ctx); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t})\n\n\treturn certs, nil\n}\n\nfunc masterControllers(ctx context.Context, sc *Context, config *Config) error {\n\tif err := node.Register(ctx, sc.Core.Core().V1().ConfigMap(), sc.Core.Core().V1().Node()); err != nil {\n\t\treturn err\n\t}\n\n\thelm.Register(ctx, sc.Apply,\n\t\tsc.Helm.Helm().V1().HelmChart(),\n\t\tsc.Batch.Batch().V1().Job(),\n\t\tsc.Auth.Rbac().V1().ClusterRoleBinding(),\n\t\tsc.Core.Core().V1().ServiceAccount(),\n\t\tsc.Core.Core().V1().ConfigMap())\n\tif err := servicelb.Register(ctx,\n\t\tsc.K8s,\n\t\tsc.Apply,\n\t\tsc.Apps.Apps().V1().DaemonSet(),\n\t\tsc.Apps.Apps().V1().Deployment(),\n\t\tsc.Core.Core().V1().Node(),\n\t\tsc.Core.Core().V1().Pod(),\n\t\tsc.Core.Core().V1().Service(),\n\t\tsc.Core.Core().V1().Endpoints(),\n\t\t!config.DisableServiceLB, config.Rootless); err != nil {\n\t\treturn err\n\t}\n\n\tif !config.DisableServiceLB && config.Rootless {\n\t\treturn rootlessports.Register(ctx, sc.Core.Core().V1().Service(), config.TLSConfig.HTTPSPort)\n\t}\n\n\treturn nil\n}\n\nfunc stageFiles(ctx context.Context, sc *Context, controlConfig *config.Control) error {\n\tdataDir := filepath.Join(controlConfig.DataDir, \"static\")\n\tif err := static.Stage(dataDir); err != nil {\n\t\treturn err\n\t}\n\n\tdataDir = filepath.Join(controlConfig.DataDir, \"manifests\")\n\ttemplateVars := map[string]string{\n\t\t\"%{CLUSTER_DNS}%\": controlConfig.ClusterDNS.String(),\n\t\t\"%{CLUSTER_DOMAIN}%\": controlConfig.ClusterDomain,\n\t}\n\n\tif err := deploy.Stage(dataDir, templateVars, controlConfig.Skips); err != nil {\n\t\treturn err\n\t}\n\n\treturn deploy.WatchFiles(ctx, sc.Apply, sc.K3s.K3s().V1().Addon(), dataDir)\n}\n\nfunc HomeKubeConfig(write, rootless bool) (string, error) {\n\tif write {\n\t\tif os.Getuid() == 0 && !rootless {\n\t\t\treturn datadir.GlobalConfig, nil\n\t\t}\n\t\treturn resolvehome.Resolve(datadir.HomeConfig)\n\t}\n\n\tif _, err := os.Stat(datadir.GlobalConfig); err == nil {\n\t\treturn datadir.GlobalConfig, nil\n\t}\n\n\treturn resolvehome.Resolve(datadir.HomeConfig)\n}\n\nfunc printTokens(certs, advertiseIP string, tlsConfig *dynamiclistener.UserConfig, config *config.Control) {\n\tvar (\n\t\tnodeFile string\n\t)\n\n\tif advertiseIP == \"\" {\n\t\tadvertiseIP = \"localhost\"\n\t}\n\n\tif len(config.Runtime.NodeToken) > 0 {\n\t\tp := filepath.Join(config.DataDir, \"node-token\")\n\t\tif err := writeToken(config.Runtime.NodeToken, p, certs); err == nil {\n\t\t\tlogrus.Infof(\"Node token is available at %s\", p)\n\t\t\tnodeFile = p\n\t\t}\n\t}\n\n\tif len(nodeFile) > 0 {\n\t\tprintToken(tlsConfig.HTTPSPort, advertiseIP, \"To join node to cluster:\", \"agent\")\n\t}\n}\n\nfunc writeKubeConfig(certs string, tlsConfig *dynamiclistener.UserConfig, config *Config) {\n\tclientToken := FormatToken(config.ControlConfig.Runtime.ClientToken, certs)\n\tip := tlsConfig.BindAddress\n\tif ip == \"\" {\n\t\tip = \"localhost\"\n\t}\n\turl := fmt.Sprintf(\"https:\/\/%s:%d\", ip, tlsConfig.HTTPSPort)\n\tkubeConfig, err := HomeKubeConfig(true, config.Rootless)\n\tdef := true\n\tif err != nil {\n\t\tkubeConfig = filepath.Join(config.ControlConfig.DataDir, \"kubeconfig-k3s.yaml\")\n\t\tdef = false\n\t}\n\tkubeConfigSymlink := kubeConfig\n\tif config.ControlConfig.KubeConfigOutput != \"\" {\n\t\tkubeConfig = config.ControlConfig.KubeConfigOutput\n\t}\n\n\tif isSymlink(kubeConfigSymlink) {\n\t\tif err := os.Remove(kubeConfigSymlink); err != nil {\n\t\t\tlogrus.Errorf(\"failed to remove kubeconfig symlink\")\n\t\t}\n\t}\n\n\tif err = clientaccess.AgentAccessInfoToKubeConfig(kubeConfig, url, clientToken); err != nil {\n\t\tlogrus.Errorf(\"Failed to generate kubeconfig: %v\", err)\n\t}\n\n\tif config.ControlConfig.KubeConfigMode != \"\" {\n\t\tmode, err := strconv.ParseInt(config.ControlConfig.KubeConfigMode, 8, 0)\n\t\tif err == nil {\n\t\t\tos.Chmod(kubeConfig, os.FileMode(mode))\n\t\t} else {\n\t\t\tlogrus.Errorf(\"failed to set %s to mode %s: %v\", kubeConfig, os.FileMode(mode), err)\n\t\t}\n\t} else {\n\t\tos.Chmod(kubeConfig, os.FileMode(0600))\n\t}\n\n\tif kubeConfigSymlink != kubeConfig {\n\t\tif err := writeConfigSymlink(kubeConfig, kubeConfigSymlink); err != nil {\n\t\t\tlogrus.Errorf(\"failed to write kubeconfig symlink: %v\", err)\n\t\t}\n\t}\n\n\tlogrus.Infof(\"Wrote kubeconfig %s\", kubeConfig)\n\tif def {\n\t\tlogrus.Infof(\"Run: %s kubectl\", filepath.Base(os.Args[0]))\n\t}\n}\n\nfunc setupDataDirAndChdir(config *config.Control) error {\n\tvar (\n\t\terr error\n\t)\n\n\tconfig.DataDir, err = resolveDataDir(config.DataDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdataDir := config.DataDir\n\n\tif err := os.MkdirAll(dataDir, 0700); err != nil {\n\t\treturn errors.Wrapf(err, \"can not mkdir %s\", dataDir)\n\t}\n\n\tif err := os.Chdir(dataDir); err != nil {\n\t\treturn errors.Wrapf(err, \"can not chdir %s\", dataDir)\n\t}\n\n\treturn nil\n}\n\nfunc printToken(httpsPort int, advertiseIP, prefix, cmd string) {\n\tip := advertiseIP\n\tif ip == \"\" {\n\t\thostIP, err := net.ChooseHostInterface()\n\t\tif err != nil {\n\t\t\tlogrus.Error(err)\n\t\t}\n\t\tip = hostIP.String()\n\t}\n\n\tlogrus.Infof(\"%s k3s %s -s https:\/\/%s:%d -t ${NODE_TOKEN}\", prefix, cmd, ip, httpsPort)\n}\n\nfunc FormatToken(token string, certs string) string {\n\tif len(token) == 0 {\n\t\treturn token\n\t}\n\n\tprefix := \"K10\"\n\tif len(certs) > 0 {\n\t\tdigest := sha256.Sum256([]byte(certs))\n\t\tprefix = \"K10\" + hex.EncodeToString(digest[:]) + \"::\"\n\t}\n\n\treturn prefix + token\n}\n\nfunc writeToken(token, file, certs string) error {\n\tif len(token) == 0 {\n\t\treturn nil\n\t}\n\n\ttoken = FormatToken(token, certs)\n\treturn ioutil.WriteFile(file, []byte(token+\"\\n\"), 0600)\n}\n\nfunc setNoProxyEnv(config *config.Control) error {\n\tenvList := strings.Join([]string{\n\t\tos.Getenv(\"NO_PROXY\"),\n\t\tconfig.ClusterIPRange.String(),\n\t\tconfig.ServiceIPRange.String(),\n\t}, \",\")\n\treturn os.Setenv(\"NO_PROXY\", envList)\n}\n\nfunc writeConfigSymlink(kubeconfig, kubeconfigSymlink string) error {\n\tif err := os.Remove(kubeconfigSymlink); err != nil && !os.IsNotExist(err) {\n\t\treturn fmt.Errorf(\"failed to remove %s file: %v\", kubeconfigSymlink, err)\n\t}\n\tif err := os.MkdirAll(filepath.Dir(kubeconfigSymlink), 0755); err != nil {\n\t\treturn fmt.Errorf(\"failed to create path for symlink: %v\", err)\n\t}\n\tif err := os.Symlink(kubeconfig, kubeconfigSymlink); err != nil {\n\t\treturn fmt.Errorf(\"failed to create symlink: %v\", err)\n\t}\n\treturn nil\n}\n\nfunc isSymlink(config string) bool {\n\tif fi, err := os.Lstat(config); err == nil && (fi.Mode()&os.ModeSymlink == os.ModeSymlink) {\n\t\treturn true\n\t}\n\treturn false\n}\n<commit_msg>Simplify startWrangler a bit<commit_after>package server\n\nimport (\n\t\"context\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\tnet2 \"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/rancher\/dynamiclistener\"\n\t\"github.com\/rancher\/helm-controller\/pkg\/helm\"\n\t\"github.com\/rancher\/k3s\/pkg\/clientaccess\"\n\t\"github.com\/rancher\/k3s\/pkg\/daemons\/config\"\n\t\"github.com\/rancher\/k3s\/pkg\/daemons\/control\"\n\t\"github.com\/rancher\/k3s\/pkg\/datadir\"\n\t\"github.com\/rancher\/k3s\/pkg\/deploy\"\n\t\"github.com\/rancher\/k3s\/pkg\/node\"\n\t\"github.com\/rancher\/k3s\/pkg\/rootlessports\"\n\t\"github.com\/rancher\/k3s\/pkg\/servicelb\"\n\t\"github.com\/rancher\/k3s\/pkg\/static\"\n\t\"github.com\/rancher\/k3s\/pkg\/tls\"\n\t\"github.com\/rancher\/wrangler\/pkg\/leader\"\n\t\"github.com\/rancher\/wrangler\/pkg\/resolvehome\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/net\"\n)\n\nfunc resolveDataDir(dataDir string) (string, error) {\n\tdataDir, err := datadir.Resolve(dataDir)\n\treturn filepath.Join(dataDir, \"server\"), err\n}\n\nfunc StartServer(ctx context.Context, config *Config) (string, error) {\n\tif err := setupDataDirAndChdir(&config.ControlConfig); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := setNoProxyEnv(&config.ControlConfig); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := control.Server(ctx, &config.ControlConfig); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"starting kubernetes\")\n\t}\n\n\tcerts, err := startWrangler(ctx, config)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"starting tls server\")\n\t}\n\n\tip := net2.ParseIP(config.TLSConfig.BindAddress)\n\tif ip == nil {\n\t\tip, err = net.ChooseHostInterface()\n\t\tif err != nil {\n\t\t\tip = net2.ParseIP(\"127.0.0.1\")\n\t\t}\n\t}\n\tprintTokens(certs, ip.String(), &config.TLSConfig, &config.ControlConfig)\n\n\twriteKubeConfig(certs, &config.TLSConfig, config)\n\n\treturn certs, nil\n}\n\nfunc startWrangler(ctx context.Context, config *Config) (string, error) {\n\tvar (\n\t\terr error\n\t\ttlsConfig = &config.TLSConfig\n\t\tcontrolConfig = &config.ControlConfig\n\t)\n\n\tcaBytes, err := ioutil.ReadFile(controlConfig.Runtime.ServerCA)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tcaKeyBytes, err := ioutil.ReadFile(controlConfig.Runtime.ServerCAKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcerts := string(caBytes)\n\ttlsConfig.CACerts = certs\n\ttlsConfig.CAKey = string(caKeyBytes)\n\n\ttlsConfig.Handler = router(controlConfig, controlConfig.Runtime.Tunnel, func() (string, error) {\n\t\treturn certs, nil\n\t})\n\n\tsc, err := newContext(ctx, controlConfig.Runtime.KubeConfigAdmin)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := stageFiles(ctx, sc, controlConfig); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t_, err = tls.NewServer(ctx, sc.K3s.K3s().V1().ListenerConfig(), *tlsConfig)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := sc.Start(ctx); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tgo leader.RunOrDie(ctx, \"\", \"k3s\", sc.K8s, func(ctx context.Context) {\n\t\tif err := masterControllers(ctx, sc, config); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif err := sc.Start(ctx); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t})\n\n\treturn certs, nil\n}\n\nfunc masterControllers(ctx context.Context, sc *Context, config *Config) error {\n\tif err := node.Register(ctx, sc.Core.Core().V1().ConfigMap(), sc.Core.Core().V1().Node()); err != nil {\n\t\treturn err\n\t}\n\n\thelm.Register(ctx, sc.Apply,\n\t\tsc.Helm.Helm().V1().HelmChart(),\n\t\tsc.Batch.Batch().V1().Job(),\n\t\tsc.Auth.Rbac().V1().ClusterRoleBinding(),\n\t\tsc.Core.Core().V1().ServiceAccount(),\n\t\tsc.Core.Core().V1().ConfigMap())\n\tif err := servicelb.Register(ctx,\n\t\tsc.K8s,\n\t\tsc.Apply,\n\t\tsc.Apps.Apps().V1().DaemonSet(),\n\t\tsc.Apps.Apps().V1().Deployment(),\n\t\tsc.Core.Core().V1().Node(),\n\t\tsc.Core.Core().V1().Pod(),\n\t\tsc.Core.Core().V1().Service(),\n\t\tsc.Core.Core().V1().Endpoints(),\n\t\t!config.DisableServiceLB, config.Rootless); err != nil {\n\t\treturn err\n\t}\n\n\tif !config.DisableServiceLB && config.Rootless {\n\t\treturn rootlessports.Register(ctx, sc.Core.Core().V1().Service(), config.TLSConfig.HTTPSPort)\n\t}\n\n\treturn nil\n}\n\nfunc stageFiles(ctx context.Context, sc *Context, controlConfig *config.Control) error {\n\tdataDir := filepath.Join(controlConfig.DataDir, \"static\")\n\tif err := static.Stage(dataDir); err != nil {\n\t\treturn err\n\t}\n\n\tdataDir = filepath.Join(controlConfig.DataDir, \"manifests\")\n\ttemplateVars := map[string]string{\n\t\t\"%{CLUSTER_DNS}%\": controlConfig.ClusterDNS.String(),\n\t\t\"%{CLUSTER_DOMAIN}%\": controlConfig.ClusterDomain,\n\t}\n\n\tif err := deploy.Stage(dataDir, templateVars, controlConfig.Skips); err != nil {\n\t\treturn err\n\t}\n\n\treturn deploy.WatchFiles(ctx, sc.Apply, sc.K3s.K3s().V1().Addon(), dataDir)\n}\n\nfunc HomeKubeConfig(write, rootless bool) (string, error) {\n\tif write {\n\t\tif os.Getuid() == 0 && !rootless {\n\t\t\treturn datadir.GlobalConfig, nil\n\t\t}\n\t\treturn resolvehome.Resolve(datadir.HomeConfig)\n\t}\n\n\tif _, err := os.Stat(datadir.GlobalConfig); err == nil {\n\t\treturn datadir.GlobalConfig, nil\n\t}\n\n\treturn resolvehome.Resolve(datadir.HomeConfig)\n}\n\nfunc printTokens(certs, advertiseIP string, tlsConfig *dynamiclistener.UserConfig, config *config.Control) {\n\tvar (\n\t\tnodeFile string\n\t)\n\n\tif advertiseIP == \"\" {\n\t\tadvertiseIP = \"localhost\"\n\t}\n\n\tif len(config.Runtime.NodeToken) > 0 {\n\t\tp := filepath.Join(config.DataDir, \"node-token\")\n\t\tif err := writeToken(config.Runtime.NodeToken, p, certs); err == nil {\n\t\t\tlogrus.Infof(\"Node token is available at %s\", p)\n\t\t\tnodeFile = p\n\t\t}\n\t}\n\n\tif len(nodeFile) > 0 {\n\t\tprintToken(tlsConfig.HTTPSPort, advertiseIP, \"To join node to cluster:\", \"agent\")\n\t}\n}\n\nfunc writeKubeConfig(certs string, tlsConfig *dynamiclistener.UserConfig, config *Config) {\n\tclientToken := FormatToken(config.ControlConfig.Runtime.ClientToken, certs)\n\tip := tlsConfig.BindAddress\n\tif ip == \"\" {\n\t\tip = \"localhost\"\n\t}\n\turl := fmt.Sprintf(\"https:\/\/%s:%d\", ip, tlsConfig.HTTPSPort)\n\tkubeConfig, err := HomeKubeConfig(true, config.Rootless)\n\tdef := true\n\tif err != nil {\n\t\tkubeConfig = filepath.Join(config.ControlConfig.DataDir, \"kubeconfig-k3s.yaml\")\n\t\tdef = false\n\t}\n\tkubeConfigSymlink := kubeConfig\n\tif config.ControlConfig.KubeConfigOutput != \"\" {\n\t\tkubeConfig = config.ControlConfig.KubeConfigOutput\n\t}\n\n\tif isSymlink(kubeConfigSymlink) {\n\t\tif err := os.Remove(kubeConfigSymlink); err != nil {\n\t\t\tlogrus.Errorf(\"failed to remove kubeconfig symlink\")\n\t\t}\n\t}\n\n\tif err = clientaccess.AgentAccessInfoToKubeConfig(kubeConfig, url, clientToken); err != nil {\n\t\tlogrus.Errorf(\"Failed to generate kubeconfig: %v\", err)\n\t}\n\n\tif config.ControlConfig.KubeConfigMode != \"\" {\n\t\tmode, err := strconv.ParseInt(config.ControlConfig.KubeConfigMode, 8, 0)\n\t\tif err == nil {\n\t\t\tos.Chmod(kubeConfig, os.FileMode(mode))\n\t\t} else {\n\t\t\tlogrus.Errorf(\"failed to set %s to mode %s: %v\", kubeConfig, os.FileMode(mode), err)\n\t\t}\n\t} else {\n\t\tos.Chmod(kubeConfig, os.FileMode(0600))\n\t}\n\n\tif kubeConfigSymlink != kubeConfig {\n\t\tif err := writeConfigSymlink(kubeConfig, kubeConfigSymlink); err != nil {\n\t\t\tlogrus.Errorf(\"failed to write kubeconfig symlink: %v\", err)\n\t\t}\n\t}\n\n\tlogrus.Infof(\"Wrote kubeconfig %s\", kubeConfig)\n\tif def {\n\t\tlogrus.Infof(\"Run: %s kubectl\", filepath.Base(os.Args[0]))\n\t}\n}\n\nfunc setupDataDirAndChdir(config *config.Control) error {\n\tvar (\n\t\terr error\n\t)\n\n\tconfig.DataDir, err = resolveDataDir(config.DataDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdataDir := config.DataDir\n\n\tif err := os.MkdirAll(dataDir, 0700); err != nil {\n\t\treturn errors.Wrapf(err, \"can not mkdir %s\", dataDir)\n\t}\n\n\tif err := os.Chdir(dataDir); err != nil {\n\t\treturn errors.Wrapf(err, \"can not chdir %s\", dataDir)\n\t}\n\n\treturn nil\n}\n\nfunc printToken(httpsPort int, advertiseIP, prefix, cmd string) {\n\tip := advertiseIP\n\tif ip == \"\" {\n\t\thostIP, err := net.ChooseHostInterface()\n\t\tif err != nil {\n\t\t\tlogrus.Error(err)\n\t\t}\n\t\tip = hostIP.String()\n\t}\n\n\tlogrus.Infof(\"%s k3s %s -s https:\/\/%s:%d -t ${NODE_TOKEN}\", prefix, cmd, ip, httpsPort)\n}\n\nfunc FormatToken(token string, certs string) string {\n\tif len(token) == 0 {\n\t\treturn token\n\t}\n\n\tprefix := \"K10\"\n\tif len(certs) > 0 {\n\t\tdigest := sha256.Sum256([]byte(certs))\n\t\tprefix = \"K10\" + hex.EncodeToString(digest[:]) + \"::\"\n\t}\n\n\treturn prefix + token\n}\n\nfunc writeToken(token, file, certs string) error {\n\tif len(token) == 0 {\n\t\treturn nil\n\t}\n\n\ttoken = FormatToken(token, certs)\n\treturn ioutil.WriteFile(file, []byte(token+\"\\n\"), 0600)\n}\n\nfunc setNoProxyEnv(config *config.Control) error {\n\tenvList := strings.Join([]string{\n\t\tos.Getenv(\"NO_PROXY\"),\n\t\tconfig.ClusterIPRange.String(),\n\t\tconfig.ServiceIPRange.String(),\n\t}, \",\")\n\treturn os.Setenv(\"NO_PROXY\", envList)\n}\n\nfunc writeConfigSymlink(kubeconfig, kubeconfigSymlink string) error {\n\tif err := os.Remove(kubeconfigSymlink); err != nil && !os.IsNotExist(err) {\n\t\treturn fmt.Errorf(\"failed to remove %s file: %v\", kubeconfigSymlink, err)\n\t}\n\tif err := os.MkdirAll(filepath.Dir(kubeconfigSymlink), 0755); err != nil {\n\t\treturn fmt.Errorf(\"failed to create path for symlink: %v\", err)\n\t}\n\tif err := os.Symlink(kubeconfig, kubeconfigSymlink); err != nil {\n\t\treturn fmt.Errorf(\"failed to create symlink: %v\", err)\n\t}\n\treturn nil\n}\n\nfunc isSymlink(config string) bool {\n\tif fi, err := os.Lstat(config); err == nil && (fi.Mode()&os.ModeSymlink == os.ModeSymlink) {\n\t\treturn true\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package social\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/grafana\/grafana\/pkg\/models\"\n\t\"github.com\/grafana\/grafana\/pkg\/setting\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"golang.org\/x\/oauth2\"\n)\n\ntype BasicUserInfo struct {\n\tIdentity string\n\tName string\n\tEmail string\n\tLogin string\n\tCompany string\n}\n\ntype SocialConnector interface {\n\tType() int\n\tUserInfo(token *oauth2.Token) (*BasicUserInfo, error)\n\tIsEmailAllowed(email string) bool\n\tIsSignupAllowed() bool\n\n\tAuthCodeURL(state string, opts ...oauth2.AuthCodeOption) string\n\tExchange(ctx context.Context, code string) (*oauth2.Token, error)\n}\n\nvar (\n\tSocialBaseUrl = \"\/login\/\"\n\tSocialMap = make(map[string]SocialConnector)\n)\n\nfunc NewOAuthService() {\n\tsetting.OAuthService = &setting.OAuther{}\n\tsetting.OAuthService.OAuthInfos = make(map[string]*setting.OAuthInfo)\n\n\tallOauthes := []string{\"github\", \"google\"}\n\n\tfor _, name := range allOauthes {\n\t\tsec := setting.Cfg.Section(\"auth.\" + name)\n\t\tinfo := &setting.OAuthInfo{\n\t\t\tClientId: sec.Key(\"client_id\").String(),\n\t\t\tClientSecret: sec.Key(\"client_secret\").String(),\n\t\t\tScopes: sec.Key(\"scopes\").Strings(\" \"),\n\t\t\tAuthUrl: sec.Key(\"auth_url\").String(),\n\t\t\tTokenUrl: sec.Key(\"token_url\").String(),\n\t\t\tApiUrl: sec.Key(\"api_url\").String(),\n\t\t\tEnabled: sec.Key(\"enabled\").MustBool(),\n\t\t\tAllowedDomains: sec.Key(\"allowed_domains\").Strings(\" \"),\n\t\t\tAllowSignup: sec.Key(\"allow_sign_up\").MustBool(),\n\t\t}\n\n\t\tif !info.Enabled {\n\t\t\tcontinue\n\t\t}\n\n\t\tsetting.OAuthService.OAuthInfos[name] = info\n\t\tconfig := oauth2.Config{\n\t\t\tClientID: info.ClientId,\n\t\t\tClientSecret: info.ClientSecret,\n\t\t\tEndpoint: oauth2.Endpoint{\n\t\t\t\tAuthURL: info.AuthUrl,\n\t\t\t\tTokenURL: info.TokenUrl,\n\t\t\t},\n\t\t\tRedirectURL: strings.TrimSuffix(setting.AppUrl, \"\/\") + SocialBaseUrl + name,\n\t\t\tScopes: info.Scopes,\n\t\t}\n\n\t\t\/\/ GitHub.\n\t\tif name == \"github\" {\n\t\t\tsetting.OAuthService.GitHub = true\n\t\t\tteamIds := sec.Key(\"team_ids\").Ints(\",\")\n\t\t\tallowedOrganizations := sec.Key(\"allowed_organizations\").Strings(\" \")\n\t\t\tSocialMap[\"github\"] = &SocialGithub{\n\t\t\t\tConfig: &config,\n\t\t\t\tallowedDomains: info.AllowedDomains,\n\t\t\t\tapiUrl: info.ApiUrl,\n\t\t\t\tallowSignup: info.AllowSignup,\n\t\t\t\tteamIds: teamIds,\n\t\t\t\tallowedOrganizations: allowedOrganizations,\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Google.\n\t\tif name == \"google\" {\n\t\t\tsetting.OAuthService.Google = true\n\t\t\tSocialMap[\"google\"] = &SocialGoogle{\n\t\t\t\tConfig: &config, allowedDomains: info.AllowedDomains,\n\t\t\t\tapiUrl: info.ApiUrl,\n\t\t\t\tallowSignup: info.AllowSignup,\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc isEmailAllowed(email string, allowedDomains []string) bool {\n\tif len(allowedDomains) == 0 {\n\t\treturn true\n\t}\n\n\tvalid := false\n\tfor _, domain := range allowedDomains {\n\t\temailSuffix := fmt.Sprintf(\"@%s\", domain)\n\t\tvalid = valid || strings.HasSuffix(email, emailSuffix)\n\t}\n\n\treturn valid\n}\n\ntype SocialGithub struct {\n\t*oauth2.Config\n\tallowedDomains []string\n\tallowedOrganizations []string\n\tapiUrl string\n\tallowSignup bool\n\tteamIds []int\n}\n\nvar (\n\tErrMissingTeamMembership = errors.New(\"User not a member of one of the required teams\")\n)\n\nvar (\n\tErrMissingOrganizationMembership = errors.New(\"User not a member of one of the required organizations\")\n)\n\nfunc (s *SocialGithub) Type() int {\n\treturn int(models.GITHUB)\n}\n\nfunc (s *SocialGithub) IsEmailAllowed(email string) bool {\n\treturn isEmailAllowed(email, s.allowedDomains)\n}\n\nfunc (s *SocialGithub) IsSignupAllowed() bool {\n\treturn s.allowSignup\n}\n\nfunc (s *SocialGithub) IsTeamMember(client *http.Client) bool {\n\tif len(s.teamIds) == 0 {\n\t\treturn true\n\t}\n\n\tteamMemberships, err := s.FetchTeamMemberships(client)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tfor _, teamId := range s.teamIds {\n\t\tfor _, membershipId := range teamMemberships {\n\t\t\tif teamId == membershipId {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (s *SocialGithub) IsOrganizationMember(client *http.Client) bool {\n\tif len(s.allowedOrganizations) == 0 {\n\t\treturn true\n\t}\n\n\torganizations, err := s.FetchOrganizations(client)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tfor _, allowedOrganization := range s.allowedOrganizations {\n\t\tfor _, organization := range organizations {\n\t\t\tif organization == allowedOrganization {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (s *SocialGithub) FetchPrivateEmail(client *http.Client) (string, error) {\n\ttype Record struct {\n\t\tEmail string `json:\"email\"`\n\t\tPrimary bool `json:\"primary\"`\n\t\tVerified bool `json:\"verified\"`\n\t}\n\n\temailsUrl := fmt.Sprintf(\"https:\/\/api.github.com\/user\/emails\")\n\tr, err := client.Get(emailsUrl)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer r.Body.Close()\n\n\tvar records []Record\n\n\tif err = json.NewDecoder(r.Body).Decode(&records); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar email = \"\"\n\tfor _, record := range records {\n\t\tif record.Primary {\n\t\t\temail = record.Email\n\t\t}\n\t}\n\n\treturn email, nil\n}\n\nfunc (s *SocialGithub) FetchTeamMemberships(client *http.Client) ([]int, error) {\n\ttype Record struct {\n\t\tId int `json:\"id\"`\n\t}\n\n\tmembershipUrl := fmt.Sprintf(\"https:\/\/api.github.com\/user\/teams\")\n\tr, err := client.Get(membershipUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer r.Body.Close()\n\n\tvar records []Record\n\n\tif err = json.NewDecoder(r.Body).Decode(&records); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ids = make([]int, len(records))\n\tfor i, record := range records {\n\t\tids[i] = record.Id\n\t}\n\n\treturn ids, nil\n}\n\nfunc (s *SocialGithub) FetchOrganizations(client *http.Client) ([]string, error) {\n\ttype Record struct {\n\t\tLogin string `json:\"login\"`\n\t}\n\n\turl := fmt.Sprintf(\"https:\/\/api.github.com\/user\/orgs\")\n\tr, err := client.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer r.Body.Close()\n\n\tvar records []Record\n\n\tif err = json.NewDecoder(r.Body).Decode(&records); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar logins = make([]string, len(records))\n\tfor i, record := range records {\n\t\tlogins[i] = record.Login\n\t}\n\n\treturn logins, nil\n}\n\nfunc (s *SocialGithub) UserInfo(token *oauth2.Token) (*BasicUserInfo, error) {\n\tvar data struct {\n\t\tId int `json:\"id\"`\n\t\tName string `json:\"login\"`\n\t\tEmail string `json:\"email\"`\n\t}\n\n\tvar err error\n\tclient := s.Client(oauth2.NoContext, token)\n\tr, err := client.Get(s.apiUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer r.Body.Close()\n\n\tif err = json.NewDecoder(r.Body).Decode(&data); err != nil {\n\t\treturn nil, err\n\t}\n\n\tuserInfo := &BasicUserInfo{\n\t\tIdentity: strconv.Itoa(data.Id),\n\t\tName: data.Name,\n\t\tEmail: data.Email,\n\t}\n\n\tif !s.IsTeamMember(client) {\n\t\treturn nil, ErrMissingTeamMembership\n\t}\n\n\tif !s.IsOrganizationMember(client) {\n\t\treturn nil, ErrMissingOrganizationMembership\n\t}\n\n\tif userInfo.Email == \"\" {\n\t\tuserInfo.Email, err = s.FetchPrivateEmail(client)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn userInfo, nil\n}\n\n\/\/ ________ .__\n\/\/ \/ _____\/ ____ ____ ____ | | ____\n\/\/ \/ \\ ___ \/ _ \\ \/ _ \\ \/ ___\\| | _\/ __ \\\n\/\/ \\ \\_\\ ( <_> | <_> ) \/_\/ > |_\\ ___\/\n\/\/ \\______ \/\\____\/ \\____\/\\___ \/|____\/\\___ >\n\/\/ \\\/ \/_____\/ \\\/\n\ntype SocialGoogle struct {\n\t*oauth2.Config\n\tallowedDomains []string\n\tapiUrl string\n\tallowSignup bool\n}\n\nfunc (s *SocialGoogle) Type() int {\n\treturn int(models.GOOGLE)\n}\n\nfunc (s *SocialGoogle) IsEmailAllowed(email string) bool {\n\treturn isEmailAllowed(email, s.allowedDomains)\n}\n\nfunc (s *SocialGoogle) IsSignupAllowed() bool {\n\treturn s.allowSignup\n}\n\nfunc (s *SocialGoogle) UserInfo(token *oauth2.Token) (*BasicUserInfo, error) {\n\tvar data struct {\n\t\tId string `json:\"id\"`\n\t\tName string `json:\"name\"`\n\t\tEmail string `json:\"email\"`\n\t}\n\tvar err error\n\n\tclient := s.Client(oauth2.NoContext, token)\n\tr, err := client.Get(s.apiUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Body.Close()\n\tif err = json.NewDecoder(r.Body).Decode(&data); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &BasicUserInfo{\n\t\tIdentity: data.Id,\n\t\tName: data.Name,\n\t\tEmail: data.Email,\n\t}, nil\n}\n<commit_msg>use [auth.github] -> api_url property; supports git enterprise<commit_after>package social\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/grafana\/grafana\/pkg\/models\"\n\t\"github.com\/grafana\/grafana\/pkg\/setting\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"golang.org\/x\/oauth2\"\n)\n\ntype BasicUserInfo struct {\n\tIdentity string\n\tName string\n\tEmail string\n\tLogin string\n\tCompany string\n}\n\ntype SocialConnector interface {\n\tType() int\n\tUserInfo(token *oauth2.Token) (*BasicUserInfo, error)\n\tIsEmailAllowed(email string) bool\n\tIsSignupAllowed() bool\n\n\tAuthCodeURL(state string, opts ...oauth2.AuthCodeOption) string\n\tExchange(ctx context.Context, code string) (*oauth2.Token, error)\n}\n\nvar (\n\tSocialBaseUrl = \"\/login\/\"\n\tSocialMap = make(map[string]SocialConnector)\n)\n\nfunc NewOAuthService() {\n\tsetting.OAuthService = &setting.OAuther{}\n\tsetting.OAuthService.OAuthInfos = make(map[string]*setting.OAuthInfo)\n\n\tallOauthes := []string{\"github\", \"google\"}\n\n\tfor _, name := range allOauthes {\n\t\tsec := setting.Cfg.Section(\"auth.\" + name)\n\t\tinfo := &setting.OAuthInfo{\n\t\t\tClientId: sec.Key(\"client_id\").String(),\n\t\t\tClientSecret: sec.Key(\"client_secret\").String(),\n\t\t\tScopes: sec.Key(\"scopes\").Strings(\" \"),\n\t\t\tAuthUrl: sec.Key(\"auth_url\").String(),\n\t\t\tTokenUrl: sec.Key(\"token_url\").String(),\n\t\t\tApiUrl: sec.Key(\"api_url\").String(),\n\t\t\tEnabled: sec.Key(\"enabled\").MustBool(),\n\t\t\tAllowedDomains: sec.Key(\"allowed_domains\").Strings(\" \"),\n\t\t\tAllowSignup: sec.Key(\"allow_sign_up\").MustBool(),\n\t\t}\n\n\t\tif !info.Enabled {\n\t\t\tcontinue\n\t\t}\n\n\t\tsetting.OAuthService.OAuthInfos[name] = info\n\t\tconfig := oauth2.Config{\n\t\t\tClientID: info.ClientId,\n\t\t\tClientSecret: info.ClientSecret,\n\t\t\tEndpoint: oauth2.Endpoint{\n\t\t\t\tAuthURL: info.AuthUrl,\n\t\t\t\tTokenURL: info.TokenUrl,\n\t\t\t},\n\t\t\tRedirectURL: strings.TrimSuffix(setting.AppUrl, \"\/\") + SocialBaseUrl + name,\n\t\t\tScopes: info.Scopes,\n\t\t}\n\n\t\t\/\/ GitHub.\n\t\tif name == \"github\" {\n\t\t\tsetting.OAuthService.GitHub = true\n\t\t\tteamIds := sec.Key(\"team_ids\").Ints(\",\")\n\t\t\tallowedOrganizations := sec.Key(\"allowed_organizations\").Strings(\" \")\n\t\t\tSocialMap[\"github\"] = &SocialGithub{\n\t\t\t\tConfig: &config,\n\t\t\t\tallowedDomains: info.AllowedDomains,\n\t\t\t\tapiUrl: info.ApiUrl,\n\t\t\t\tallowSignup: info.AllowSignup,\n\t\t\t\tteamIds: teamIds,\n\t\t\t\tallowedOrganizations: allowedOrganizations,\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Google.\n\t\tif name == \"google\" {\n\t\t\tsetting.OAuthService.Google = true\n\t\t\tSocialMap[\"google\"] = &SocialGoogle{\n\t\t\t\tConfig: &config, allowedDomains: info.AllowedDomains,\n\t\t\t\tapiUrl: info.ApiUrl,\n\t\t\t\tallowSignup: info.AllowSignup,\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc isEmailAllowed(email string, allowedDomains []string) bool {\n\tif len(allowedDomains) == 0 {\n\t\treturn true\n\t}\n\n\tvalid := false\n\tfor _, domain := range allowedDomains {\n\t\temailSuffix := fmt.Sprintf(\"@%s\", domain)\n\t\tvalid = valid || strings.HasSuffix(email, emailSuffix)\n\t}\n\n\treturn valid\n}\n\ntype SocialGithub struct {\n\t*oauth2.Config\n\tallowedDomains []string\n\tallowedOrganizations []string\n\tapiUrl string\n\tallowSignup bool\n\tteamIds []int\n}\n\nvar (\n\tErrMissingTeamMembership = errors.New(\"User not a member of one of the required teams\")\n)\n\nvar (\n\tErrMissingOrganizationMembership = errors.New(\"User not a member of one of the required organizations\")\n)\n\nfunc (s *SocialGithub) Type() int {\n\treturn int(models.GITHUB)\n}\n\nfunc (s *SocialGithub) IsEmailAllowed(email string) bool {\n\treturn isEmailAllowed(email, s.allowedDomains)\n}\n\nfunc (s *SocialGithub) IsSignupAllowed() bool {\n\treturn s.allowSignup\n}\n\nfunc (s *SocialGithub) IsTeamMember(client *http.Client) bool {\n\tif len(s.teamIds) == 0 {\n\t\treturn true\n\t}\n\n\tteamMemberships, err := s.FetchTeamMemberships(client)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tfor _, teamId := range s.teamIds {\n\t\tfor _, membershipId := range teamMemberships {\n\t\t\tif teamId == membershipId {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (s *SocialGithub) IsOrganizationMember(client *http.Client) bool {\n\tif len(s.allowedOrganizations) == 0 {\n\t\treturn true\n\t}\n\n\torganizations, err := s.FetchOrganizations(client)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tfor _, allowedOrganization := range s.allowedOrganizations {\n\t\tfor _, organization := range organizations {\n\t\t\tif organization == allowedOrganization {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (s *SocialGithub) FetchPrivateEmail(client *http.Client) (string, error) {\n\ttype Record struct {\n\t\tEmail string `json:\"email\"`\n\t\tPrimary bool `json:\"primary\"`\n\t\tVerified bool `json:\"verified\"`\n\t}\n\n\temailsUrl := fmt.Sprintf(s.apiUrl + \"\/emails\")\n\tr, err := client.Get(emailsUrl)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer r.Body.Close()\n\n\tvar records []Record\n\n\tif err = json.NewDecoder(r.Body).Decode(&records); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar email = \"\"\n\tfor _, record := range records {\n\t\tif record.Primary {\n\t\t\temail = record.Email\n\t\t}\n\t}\n\n\treturn email, nil\n}\n\nfunc (s *SocialGithub) FetchTeamMemberships(client *http.Client) ([]int, error) {\n\ttype Record struct {\n\t\tId int `json:\"id\"`\n\t}\n\n membershipUrl := fmt.Sprintf(s.apiUrl + \"\/teams\")\n\tr, err := client.Get(membershipUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer r.Body.Close()\n\n\tvar records []Record\n\n\tif err = json.NewDecoder(r.Body).Decode(&records); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ids = make([]int, len(records))\n\tfor i, record := range records {\n\t\tids[i] = record.Id\n\t}\n\n\treturn ids, nil\n}\n\nfunc (s *SocialGithub) FetchOrganizations(client *http.Client) ([]string, error) {\n\ttype Record struct {\n\t\tLogin string `json:\"login\"`\n\t}\n\n\turl := fmt.Sprintf(s.apiUrl + \"\/orgs\")\n\tr, err := client.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer r.Body.Close()\n\n\tvar records []Record\n\n\tif err = json.NewDecoder(r.Body).Decode(&records); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar logins = make([]string, len(records))\n\tfor i, record := range records {\n\t\tlogins[i] = record.Login\n\t}\n\n\treturn logins, nil\n}\n\nfunc (s *SocialGithub) UserInfo(token *oauth2.Token) (*BasicUserInfo, error) {\n\tvar data struct {\n\t\tId int `json:\"id\"`\n\t\tName string `json:\"login\"`\n\t\tEmail string `json:\"email\"`\n\t}\n\n\tvar err error\n\tclient := s.Client(oauth2.NoContext, token)\n\tr, err := client.Get(s.apiUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer r.Body.Close()\n\n\tif err = json.NewDecoder(r.Body).Decode(&data); err != nil {\n\t\treturn nil, err\n\t}\n\n\tuserInfo := &BasicUserInfo{\n\t\tIdentity: strconv.Itoa(data.Id),\n\t\tName: data.Name,\n\t\tEmail: data.Email,\n\t}\n\n\tif !s.IsTeamMember(client) {\n\t\treturn nil, ErrMissingTeamMembership\n\t}\n\n\tif !s.IsOrganizationMember(client) {\n\t\treturn nil, ErrMissingOrganizationMembership\n\t}\n\n\tif userInfo.Email == \"\" {\n\t\tuserInfo.Email, err = s.FetchPrivateEmail(client)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn userInfo, nil\n}\n\n\/\/ ________ .__\n\/\/ \/ _____\/ ____ ____ ____ | | ____\n\/\/ \/ \\ ___ \/ _ \\ \/ _ \\ \/ ___\\| | _\/ __ \\\n\/\/ \\ \\_\\ ( <_> | <_> ) \/_\/ > |_\\ ___\/\n\/\/ \\______ \/\\____\/ \\____\/\\___ \/|____\/\\___ >\n\/\/ \\\/ \/_____\/ \\\/\n\ntype SocialGoogle struct {\n\t*oauth2.Config\n\tallowedDomains []string\n\tapiUrl string\n\tallowSignup bool\n}\n\nfunc (s *SocialGoogle) Type() int {\n\treturn int(models.GOOGLE)\n}\n\nfunc (s *SocialGoogle) IsEmailAllowed(email string) bool {\n\treturn isEmailAllowed(email, s.allowedDomains)\n}\n\nfunc (s *SocialGoogle) IsSignupAllowed() bool {\n\treturn s.allowSignup\n}\n\nfunc (s *SocialGoogle) UserInfo(token *oauth2.Token) (*BasicUserInfo, error) {\n\tvar data struct {\n\t\tId string `json:\"id\"`\n\t\tName string `json:\"name\"`\n\t\tEmail string `json:\"email\"`\n\t}\n\tvar err error\n\n\tclient := s.Client(oauth2.NoContext, token)\n\tr, err := client.Get(s.apiUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Body.Close()\n\tif err = json.NewDecoder(r.Body).Decode(&data); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &BasicUserInfo{\n\t\tIdentity: data.Id,\n\t\tName: data.Name,\n\t\tEmail: data.Email,\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package plans\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/hashicorp\/terraform\/addrs\"\n\t\"github.com\/hashicorp\/terraform\/states\"\n\t\"github.com\/zclconf\/go-cty\/cty\"\n)\n\n\/\/ ResourceInstanceChangeSrc is a not-yet-decoded ResourceInstanceChange.\n\/\/ Pass the associated resource type's schema type to method Decode to\n\/\/ obtain a ResourceInstancChange.\ntype ResourceInstanceChangeSrc struct {\n\t\/\/ Addr is the absolute address of the resource instance that the change\n\t\/\/ will apply to.\n\tAddr addrs.AbsResourceInstance\n\n\t\/\/ DeposedKey is the identifier for a deposed object associated with the\n\t\/\/ given instance, or states.NotDeposed if this change applies to the\n\t\/\/ current object.\n\t\/\/\n\t\/\/ A Replace change for a resource with create_before_destroy set will\n\t\/\/ create a new DeposedKey temporarily during replacement. In that case,\n\t\/\/ DeposedKey in the plan is always states.NotDeposed, representing that\n\t\/\/ the current object is being replaced with the deposed.\n\tDeposedKey states.DeposedKey\n\n\t\/\/ Provider is the address of the provider configuration that was used\n\t\/\/ to plan this change, and thus the configuration that must also be\n\t\/\/ used to apply it.\n\tProviderAddr addrs.AbsProviderConfig\n\n\t\/\/ ChangeSrc is an embedded description of the not-yet-decoded change.\n\tChangeSrc\n\n\t\/\/ RequiredReplace is a set of paths that caused the change action to be\n\t\/\/ Replace rather than Update. Always nil if the change action is not\n\t\/\/ Replace.\n\t\/\/\n\t\/\/ This is retained only for UI-plan-rendering purposes and so it does not\n\t\/\/ currently survive a round-trip through a saved plan file.\n\tRequiredReplace cty.PathSet\n\n\t\/\/ Private allows a provider to stash any extra data that is opaque to\n\t\/\/ Terraform that relates to this change. Terraform will save this\n\t\/\/ byte-for-byte and return it to the provider in the apply call.\n\tPrivate []byte\n}\n\n\/\/ Decode unmarshals the raw representation of the instance object being\n\/\/ changed. Pass the implied type of the corresponding resource type schema\n\/\/ for correct operation.\nfunc (rcs *ResourceInstanceChangeSrc) Decode(ty cty.Type) (*ResourceInstanceChange, error) {\n\tchange, err := rcs.ChangeSrc.Decode(ty)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ResourceInstanceChange{\n\t\tAddr: rcs.Addr,\n\t\tDeposedKey: rcs.DeposedKey,\n\t\tProviderAddr: rcs.ProviderAddr,\n\t\tChange: *change,\n\t\tRequiredReplace: rcs.RequiredReplace,\n\t\tPrivate: rcs.Private,\n\t}, nil\n}\n\n\/\/ DeepCopy creates a copy of the receiver where any pointers to nested mutable\n\/\/ values are also copied, thus ensuring that future mutations of the receiver\n\/\/ will not affect the copy.\n\/\/\n\/\/ Some types used within a resource change are immutable by convention even\n\/\/ though the Go language allows them to be mutated, such as the types from\n\/\/ the addrs package. These are _not_ copied by this method, under the\n\/\/ assumption that callers will behave themselves.\nfunc (rcs *ResourceInstanceChangeSrc) DeepCopy() *ResourceInstanceChangeSrc {\n\tif rcs == nil {\n\t\treturn nil\n\t}\n\tret := *rcs\n\n\tret.RequiredReplace = cty.NewPathSet(ret.RequiredReplace.List()...)\n\n\tif len(ret.Private) != 0 {\n\t\tprivate := make([]byte, len(ret.Private))\n\t\tcopy(private, ret.Private)\n\t\tret.Private = private\n\t}\n\n\tret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy()\n\tret.ChangeSrc.After = ret.ChangeSrc.After.Copy()\n\n\treturn &ret\n}\n\n\/\/ OutputChangeSrc describes a change to an output value.\ntype OutputChangeSrc struct {\n\t\/\/ Addr is the absolute address of the output value that the change\n\t\/\/ will apply to.\n\tAddr addrs.AbsOutputValue\n\n\t\/\/ ChangeSrc is an embedded description of the not-yet-decoded change.\n\t\/\/\n\t\/\/ For output value changes, the type constraint for the DynamicValue\n\t\/\/ instances is always cty.DynamicPseudoType.\n\tChangeSrc\n\n\t\/\/ Sensitive, if true, indicates that either the old or new value in the\n\t\/\/ change is sensitive and so a rendered version of the plan in the UI\n\t\/\/ should elide the actual values while still indicating the action of the\n\t\/\/ change.\n\tSensitive bool\n}\n\n\/\/ Decode unmarshals the raw representation of the output value being\n\/\/ changed.\nfunc (ocs *OutputChangeSrc) Decode() (*OutputChange, error) {\n\tchange, err := ocs.ChangeSrc.Decode(cty.DynamicPseudoType)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &OutputChange{\n\t\tAddr: ocs.Addr,\n\t\tChange: *change,\n\t\tSensitive: ocs.Sensitive,\n\t}, nil\n}\n\n\/\/ DeepCopy creates a copy of the receiver where any pointers to nested mutable\n\/\/ values are also copied, thus ensuring that future mutations of the receiver\n\/\/ will not affect the copy.\n\/\/\n\/\/ Some types used within a resource change are immutable by convention even\n\/\/ though the Go language allows them to be mutated, such as the types from\n\/\/ the addrs package. These are _not_ copied by this method, under the\n\/\/ assumption that callers will behave themselves.\nfunc (ocs *OutputChangeSrc) DeepCopy() *OutputChangeSrc {\n\tif ocs == nil {\n\t\treturn nil\n\t}\n\tret := *ocs\n\n\tret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy()\n\tret.ChangeSrc.After = ret.ChangeSrc.After.Copy()\n\n\treturn &ret\n}\n\n\/\/ ChangeSrc is a not-yet-decoded Change.\ntype ChangeSrc struct {\n\t\/\/ Action defines what kind of change is being made.\n\tAction Action\n\n\t\/\/ Before and After correspond to the fields of the same name in Change,\n\t\/\/ but have not yet been decoded from the serialized value used for\n\t\/\/ storage.\n\tBefore, After DynamicValue\n}\n\n\/\/ Decode unmarshals the raw representations of the before and after values\n\/\/ to produce a Change object. Pass the type constraint that the result must\n\/\/ conform to.\n\/\/\n\/\/ Where a ChangeSrc is embedded in some other struct, it's generally better\n\/\/ to call the corresponding Decode method of that struct rather than working\n\/\/ directly with its embedded Change.\nfunc (cs *ChangeSrc) Decode(ty cty.Type) (*Change, error) {\n\tvar err error\n\tbefore := cty.NullVal(ty)\n\tafter := cty.NullVal(ty)\n\n\tif len(cs.Before) > 0 {\n\t\tbefore, err = cs.Before.Decode(ty)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error decoding 'before' value: %s\", err)\n\t\t}\n\t}\n\tif len(cs.After) > 0 {\n\t\tafter, err = cs.After.Decode(ty)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error decoding 'after' value: %s\", err)\n\t\t}\n\t}\n\treturn &Change{\n\t\tAction: cs.Action,\n\t\tBefore: before,\n\t\tAfter: after,\n\t}, nil\n}\n<commit_msg>fix typo ResourceInstancChange to ResourceInstanceChange<commit_after>package plans\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/hashicorp\/terraform\/addrs\"\n\t\"github.com\/hashicorp\/terraform\/states\"\n\t\"github.com\/zclconf\/go-cty\/cty\"\n)\n\n\/\/ ResourceInstanceChangeSrc is a not-yet-decoded ResourceInstanceChange.\n\/\/ Pass the associated resource type's schema type to method Decode to\n\/\/ obtain a ResourceInstanceChange.\ntype ResourceInstanceChangeSrc struct {\n\t\/\/ Addr is the absolute address of the resource instance that the change\n\t\/\/ will apply to.\n\tAddr addrs.AbsResourceInstance\n\n\t\/\/ DeposedKey is the identifier for a deposed object associated with the\n\t\/\/ given instance, or states.NotDeposed if this change applies to the\n\t\/\/ current object.\n\t\/\/\n\t\/\/ A Replace change for a resource with create_before_destroy set will\n\t\/\/ create a new DeposedKey temporarily during replacement. In that case,\n\t\/\/ DeposedKey in the plan is always states.NotDeposed, representing that\n\t\/\/ the current object is being replaced with the deposed.\n\tDeposedKey states.DeposedKey\n\n\t\/\/ Provider is the address of the provider configuration that was used\n\t\/\/ to plan this change, and thus the configuration that must also be\n\t\/\/ used to apply it.\n\tProviderAddr addrs.AbsProviderConfig\n\n\t\/\/ ChangeSrc is an embedded description of the not-yet-decoded change.\n\tChangeSrc\n\n\t\/\/ RequiredReplace is a set of paths that caused the change action to be\n\t\/\/ Replace rather than Update. Always nil if the change action is not\n\t\/\/ Replace.\n\t\/\/\n\t\/\/ This is retained only for UI-plan-rendering purposes and so it does not\n\t\/\/ currently survive a round-trip through a saved plan file.\n\tRequiredReplace cty.PathSet\n\n\t\/\/ Private allows a provider to stash any extra data that is opaque to\n\t\/\/ Terraform that relates to this change. Terraform will save this\n\t\/\/ byte-for-byte and return it to the provider in the apply call.\n\tPrivate []byte\n}\n\n\/\/ Decode unmarshals the raw representation of the instance object being\n\/\/ changed. Pass the implied type of the corresponding resource type schema\n\/\/ for correct operation.\nfunc (rcs *ResourceInstanceChangeSrc) Decode(ty cty.Type) (*ResourceInstanceChange, error) {\n\tchange, err := rcs.ChangeSrc.Decode(ty)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ResourceInstanceChange{\n\t\tAddr: rcs.Addr,\n\t\tDeposedKey: rcs.DeposedKey,\n\t\tProviderAddr: rcs.ProviderAddr,\n\t\tChange: *change,\n\t\tRequiredReplace: rcs.RequiredReplace,\n\t\tPrivate: rcs.Private,\n\t}, nil\n}\n\n\/\/ DeepCopy creates a copy of the receiver where any pointers to nested mutable\n\/\/ values are also copied, thus ensuring that future mutations of the receiver\n\/\/ will not affect the copy.\n\/\/\n\/\/ Some types used within a resource change are immutable by convention even\n\/\/ though the Go language allows them to be mutated, such as the types from\n\/\/ the addrs package. These are _not_ copied by this method, under the\n\/\/ assumption that callers will behave themselves.\nfunc (rcs *ResourceInstanceChangeSrc) DeepCopy() *ResourceInstanceChangeSrc {\n\tif rcs == nil {\n\t\treturn nil\n\t}\n\tret := *rcs\n\n\tret.RequiredReplace = cty.NewPathSet(ret.RequiredReplace.List()...)\n\n\tif len(ret.Private) != 0 {\n\t\tprivate := make([]byte, len(ret.Private))\n\t\tcopy(private, ret.Private)\n\t\tret.Private = private\n\t}\n\n\tret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy()\n\tret.ChangeSrc.After = ret.ChangeSrc.After.Copy()\n\n\treturn &ret\n}\n\n\/\/ OutputChangeSrc describes a change to an output value.\ntype OutputChangeSrc struct {\n\t\/\/ Addr is the absolute address of the output value that the change\n\t\/\/ will apply to.\n\tAddr addrs.AbsOutputValue\n\n\t\/\/ ChangeSrc is an embedded description of the not-yet-decoded change.\n\t\/\/\n\t\/\/ For output value changes, the type constraint for the DynamicValue\n\t\/\/ instances is always cty.DynamicPseudoType.\n\tChangeSrc\n\n\t\/\/ Sensitive, if true, indicates that either the old or new value in the\n\t\/\/ change is sensitive and so a rendered version of the plan in the UI\n\t\/\/ should elide the actual values while still indicating the action of the\n\t\/\/ change.\n\tSensitive bool\n}\n\n\/\/ Decode unmarshals the raw representation of the output value being\n\/\/ changed.\nfunc (ocs *OutputChangeSrc) Decode() (*OutputChange, error) {\n\tchange, err := ocs.ChangeSrc.Decode(cty.DynamicPseudoType)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &OutputChange{\n\t\tAddr: ocs.Addr,\n\t\tChange: *change,\n\t\tSensitive: ocs.Sensitive,\n\t}, nil\n}\n\n\/\/ DeepCopy creates a copy of the receiver where any pointers to nested mutable\n\/\/ values are also copied, thus ensuring that future mutations of the receiver\n\/\/ will not affect the copy.\n\/\/\n\/\/ Some types used within a resource change are immutable by convention even\n\/\/ though the Go language allows them to be mutated, such as the types from\n\/\/ the addrs package. These are _not_ copied by this method, under the\n\/\/ assumption that callers will behave themselves.\nfunc (ocs *OutputChangeSrc) DeepCopy() *OutputChangeSrc {\n\tif ocs == nil {\n\t\treturn nil\n\t}\n\tret := *ocs\n\n\tret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy()\n\tret.ChangeSrc.After = ret.ChangeSrc.After.Copy()\n\n\treturn &ret\n}\n\n\/\/ ChangeSrc is a not-yet-decoded Change.\ntype ChangeSrc struct {\n\t\/\/ Action defines what kind of change is being made.\n\tAction Action\n\n\t\/\/ Before and After correspond to the fields of the same name in Change,\n\t\/\/ but have not yet been decoded from the serialized value used for\n\t\/\/ storage.\n\tBefore, After DynamicValue\n}\n\n\/\/ Decode unmarshals the raw representations of the before and after values\n\/\/ to produce a Change object. Pass the type constraint that the result must\n\/\/ conform to.\n\/\/\n\/\/ Where a ChangeSrc is embedded in some other struct, it's generally better\n\/\/ to call the corresponding Decode method of that struct rather than working\n\/\/ directly with its embedded Change.\nfunc (cs *ChangeSrc) Decode(ty cty.Type) (*Change, error) {\n\tvar err error\n\tbefore := cty.NullVal(ty)\n\tafter := cty.NullVal(ty)\n\n\tif len(cs.Before) > 0 {\n\t\tbefore, err = cs.Before.Decode(ty)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error decoding 'before' value: %s\", err)\n\t\t}\n\t}\n\tif len(cs.After) > 0 {\n\t\tafter, err = cs.After.Decode(ty)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error decoding 'after' value: %s\", err)\n\t\t}\n\t}\n\treturn &Change{\n\t\tAction: cs.Action,\n\t\tBefore: before,\n\t\tAfter: after,\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"gopkg.in\/Shopify\/sarama.v1\"\n)\n\nfunc HttpStatConsumerAction(w http.ResponseWriter, r *http.Request) {\n\ttype consumerStat struct {\n\t\tTopic string\n\t\tUrl string\n\t}\n\n\tmanagers := server.managers\n\tvar res []consumerStat\n\tfor _, mgr := range managers {\n\t\tres = append(res, consumerStat{Topic: mgr.Topic, Url: mgr.Url})\n\t}\n\n\techo2client(w, r, res)\n}\n\nfunc HttpStatWorkerAction(w http.ResponseWriter, r *http.Request) {\n\ttype workerStat struct {\n\t\tTopic string\n\t\tUrl string\n\t\tPartition int32\n\t\tUUID string\n\t\tOffset int64\n\t}\n\tvar res []workerStat\n\tfor _, mgr := range server.managers {\n\t\tfor _, worker := range mgr.workers {\n\t\t\toffset, err := worker.Consumer.OffsetManager().Offsets(worker.Topics[0])\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor k, v := range offset {\n\t\t\t\tres = append(res, workerStat{\n\t\t\t\t\tTopic: worker.Topics[0],\n\t\t\t\t\tUrl: worker.Callback.Url,\n\t\t\t\t\tPartition: k,\n\t\t\t\t\tUUID: worker.Consumer.Instance().ID,\n\t\t\t\t\tOffset: v,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\techo2client(w, r, res)\n}\n\nfunc HttpAdminSkipAction(w http.ResponseWriter, r *http.Request) {\n\tquery := r.URL.Query()\n\n\ttopic := query.Get(\"topic\")\n\tpartition := query.Get(\"partition\")\n\tgroup := query.Get(\"group\")\n\toffset := query.Get(\"offset\")\n\n\tif topic == \"\" || partition == \"\" || group == \"\" || offset == \"\" {\n\t\techo2client(w, r, \"invalid param\")\n\t\treturn\n\t}\n\n\ti_partition, _ := strconv.Atoi(partition)\n\ti_offset, _ := strconv.ParseInt(offset, 10, 64)\n\n\tmgr, err := server.Find(topic, group)\n\tif err != nil {\n\t\techo2client(w, r, \"invalid topic\/group\")\n\t\treturn\n\t}\n\n\tworker, err := mgr.Find(i_partition)\n\tif err != nil {\n\t\techo2client(w, r, \"invalid topic\/partition\")\n\t\treturn\n\t}\n\n\tmsg := &sarama.ConsumerMessage{\n\t\tTopic: topic,\n\t\tPartition: int32(i_partition),\n\t\tOffset: i_offset,\n\t}\n\tworker.Consumer.CommitUpto(msg)\n\tworker.Close()\n\techo2client(w, r, \"\")\n}\n\nfunc echo2client(w http.ResponseWriter, r *http.Request, data interface{}) {\n\tres := map[string]interface{}{\n\t\t\"errno\": 0,\n\t\t\"errmsg\": \"success\",\n\t\t\"data\": data,\n\t}\n\n\tif b, e := json.Marshal(res); e != nil {\n\t\tlog.Printf(\"marshal http response error, %v\", e)\n\t} else {\n\t\tio.WriteString(w, string(b))\n\t}\n\n\treturn\n}\n<commit_msg>remove dulplicate partition<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"gopkg.in\/Shopify\/sarama.v1\"\n)\n\nfunc HttpStatConsumerAction(w http.ResponseWriter, r *http.Request) {\n\ttype consumerStat struct {\n\t\tTopic string\n\t\tUrl string\n\t}\n\n\tmanagers := server.managers\n\tvar res []consumerStat\n\tfor _, mgr := range managers {\n\t\tres = append(res, consumerStat{Topic: mgr.Topic, Url: mgr.Url})\n\t}\n\n\techo2client(w, r, res)\n}\n\nfunc HttpStatWorkerAction(w http.ResponseWriter, r *http.Request) {\n\ttype OffsetData struct {\n\t\tUUID string\n\t\tOffset int64\n\t}\n\tpusherDataMap := map[string]map[string]map[int32]OffsetData{}\n\tfor _, mgr := range server.managers {\n\t\tfor _, worker := range mgr.workers {\n\t\t\toffset, err := worker.Consumer.OffsetManager().Offsets(worker.Topics[0])\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor k, v := range offset {\n\t\t\t\tg, ok := pusherDataMap[worker.Callback.Url]\n\t\t\t\tif !ok {\n\t\t\t\t\tg = map[string]map[int32]OffsetData{}\n\t\t\t\t\tpusherDataMap[worker.Callback.Url] = g\n\t\t\t\t}\n\t\t\t\tt, ok := g[worker.Topics[0]]\n\t\t\t\tif !ok {\n\t\t\t\t\tt = map[int32]OffsetData{}\n\t\t\t\t\tg[worker.Topics[0]] = t\n\t\t\t\t}\n\t\t\t\tvalue, ok := t[k]\n\t\t\t\tif !ok {\n\t\t\t\t\tt[k] = OffsetData{worker.Consumer.Instance().ID, v}\n\t\t\t\t} else {\n\t\t\t\t\tif v > value.Offset {\n\t\t\t\t\t\tt[k] = OffsetData{worker.Consumer.Instance().ID, v}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\ttype workerStat struct {\n\t\tTopic string\n\t\tUrl string\n\t\tPartition int32\n\t\tUUID string\n\t\tOffset int64\n\t}\n\n\tvar res []workerStat\n\tfor gUrl, group := range pusherDataMap {\n\t\tfor topicKey, topic := range group {\n\t\t\tfor partitionId, part := range topic {\n\t\t\t\tres = append(res, workerStat{\n\t\t\t\t\tTopic: topicKey,\n\t\t\t\t\tUrl: gUrl,\n\t\t\t\t\tPartition: partitionId,\n\t\t\t\t\tUUID: part.UUID,\n\t\t\t\t\tOffset: part.Offset,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\techo2client(w, r, res)\n}\n\nfunc HttpAdminSkipAction(w http.ResponseWriter, r *http.Request) {\n\tquery := r.URL.Query()\n\n\ttopic := query.Get(\"topic\")\n\tpartition := query.Get(\"partition\")\n\tgroup := query.Get(\"group\")\n\toffset := query.Get(\"offset\")\n\n\tif topic == \"\" || partition == \"\" || group == \"\" || offset == \"\" {\n\t\techo2client(w, r, \"invalid param\")\n\t\treturn\n\t}\n\n\ti_partition, _ := strconv.Atoi(partition)\n\ti_offset, _ := strconv.ParseInt(offset, 10, 64)\n\n\tmgr, err := server.Find(topic, group)\n\tif err != nil {\n\t\techo2client(w, r, \"invalid topic\/group\")\n\t\treturn\n\t}\n\n\tworker, err := mgr.Find(i_partition)\n\tif err != nil {\n\t\techo2client(w, r, \"invalid topic\/partition\")\n\t\treturn\n\t}\n\n\tmsg := &sarama.ConsumerMessage{\n\t\tTopic: topic,\n\t\tPartition: int32(i_partition),\n\t\tOffset: i_offset,\n\t}\n\tworker.Consumer.CommitUpto(msg)\n\tworker.Close()\n\techo2client(w, r, \"\")\n}\n\nfunc echo2client(w http.ResponseWriter, r *http.Request, data interface{}) {\n\tres := map[string]interface{}{\n\t\t\"errno\": 0,\n\t\t\"errmsg\": \"success\",\n\t\t\"data\": data,\n\t}\n\n\tif b, e := json.Marshal(res); e != nil {\n\t\tlog.Printf(\"marshal http response error, %v\", e)\n\t} else {\n\t\tio.WriteString(w, string(b))\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/fatih\/color\"\n\tmkr \"github.com\/mackerelio\/mackerel-client-go\"\n\t\"github.com\/mackerelio\/mkr\/logger\"\n\t\"gopkg.in\/urfave\/cli.v1\"\n)\n\nvar commandAlerts = cli.Command{\n\tName: \"alerts\",\n\tUsage: \"Retrieve\/Close alerts\",\n\tDescription: `\n Retrieve\/Close alerts. With no subcommand specified, this will show all alerts.\n Requests APIs under \"\/api\/v0\/alerts\". See https:\/\/mackerel.io\/api-docs\/entry\/alerts .\n`,\n\tAction: doAlertsRetrieve,\n\tSubcommands: []cli.Command{\n\t\t{\n\t\t\tName: \"list\",\n\t\t\tUsage: \"list alerts\",\n\t\t\tDescription: \"Shows alerts in human-readable format.\",\n\t\t\tAction: doAlertsList,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringSliceFlag{\n\t\t\t\t\tName: \"service, s\",\n\t\t\t\t\tValue: &cli.StringSlice{},\n\t\t\t\t\tUsage: \"Filters alerts by service. Multiple choices are allowed.\",\n\t\t\t\t},\n\t\t\t\tcli.StringSliceFlag{\n\t\t\t\t\tName: \"host-status, S\",\n\t\t\t\t\tValue: &cli.StringSlice{},\n\t\t\t\t\tUsage: \"Filters alerts by status of each host. Multiple choices are allowed.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolTFlag{Name: \"color, c\", Usage: \"Colorize output. default: true\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"close\",\n\t\t\tUsage: \"close alerts\",\n\t\t\tDescription: \"Closes alerts. Multiple alert IDs can be specified.\",\n\t\t\tAction: doAlertsClose,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{Name: \"reason, r\", Value: \"\", Usage: \"Reason of closing alert.\"},\n\t\t\t\tcli.BoolFlag{Name: \"verbose, v\", Usage: \"Verbose output mode\"},\n\t\t\t},\n\t\t},\n\t},\n}\n\ntype alertSet struct {\n\tAlert *mkr.Alert\n\tHost *mkr.Host\n\tMonitor mkr.Monitor\n}\n\nfunc joinMonitorsAndHosts(client *mkr.Client, alerts []*mkr.Alert) []*alertSet {\n\thostsJSON, err := client.FindHosts(&mkr.FindHostsParam{\n\t\tStatuses: []string{\"working\", \"standby\", \"poweroff\", \"maintenance\"},\n\t})\n\tlogger.DieIf(err)\n\n\thosts := map[string]*mkr.Host{}\n\tfor _, host := range hostsJSON {\n\t\thosts[host.ID] = host\n\t}\n\n\tmonitorsJSON, err := client.FindMonitors()\n\tlogger.DieIf(err)\n\n\tmonitors := map[string]mkr.Monitor{}\n\tfor _, monitor := range monitorsJSON {\n\t\tmonitors[monitor.MonitorID()] = monitor\n\t}\n\n\talertSets := []*alertSet{}\n\tfor _, alert := range alerts {\n\t\talertSets = append(\n\t\t\talertSets,\n\t\t\t&alertSet{Alert: alert, Host: hosts[alert.HostID], Monitor: monitors[alert.MonitorID]},\n\t\t)\n\t}\n\treturn alertSets\n}\n\nfunc formatJoinedAlert(alertSet *alertSet, colorize bool) string {\n\tconst layout = \"2006-01-02 15:04:05\"\n\n\thost := alertSet.Host\n\tmonitor := alertSet.Monitor\n\talert := alertSet.Alert\n\n\thostMsg := \"\"\n\tif host != nil {\n\t\tstatusMsg := host.Status\n\t\tif host.IsRetired == true {\n\t\t\tstatusMsg = \"retired\"\n\t\t}\n\t\tif colorize {\n\t\t\tswitch statusMsg {\n\t\t\tcase \"working\":\n\t\t\t\tstatusMsg = color.BlueString(\"working\")\n\t\t\tcase \"standby\":\n\t\t\t\tstatusMsg = color.GreenString(\"standby\")\n\t\t\tcase \"poweroff\":\n\t\t\t\tstatusMsg = \"poweroff\"\n\t\t\tcase \"maintenance\":\n\t\t\t\tstatusMsg = color.YellowString(\"maintenance\")\n\t\t\t}\n\t\t}\n\t\thostMsg = fmt.Sprintf(\" %s %s\", host.Name, statusMsg)\n\t\troleMsgs := []string{}\n\t\tfor service, roles := range host.Roles {\n\t\t\troleMsgs = append(roleMsgs, fmt.Sprintf(\"%s:%s\", service, strings.Join(roles, \",\")))\n\t\t}\n\t\thostMsg += \" [\" + strings.Join(roleMsgs, \", \") + \"]\"\n\t}\n\n\tmonitorMsg := \"\"\n\tif monitor != nil {\n\t\tswitch m := monitor.(type) {\n\t\tcase *mkr.MonitorConnectivity:\n\t\t\tmonitorMsg = \"\"\n\t\tcase *mkr.MonitorHostMetric:\n\t\t\tswitch alert.Status {\n\t\t\tcase \"CRITICAL\":\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %.2f %s %.2f\", m.Metric, alert.Value, m.Operator, m.Critical)\n\t\t\tcase \"WARNING\":\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %.2f %s %.2f\", m.Metric, alert.Value, m.Operator, m.Warning)\n\t\t\tdefault:\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %.2f %s %.2f\", m.Metric, alert.Value, m.Operator, m.Critical)\n\t\t\t}\n\t\tcase *mkr.MonitorServiceMetric:\n\t\t\tswitch alert.Status {\n\t\t\tcase \"CRITICAL\":\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %s %.2f %s %.2f\", m.Service, m.Metric, alert.Value, m.Operator, m.Critical)\n\t\t\tcase \"WARNING\":\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %s %.2f %s %.2f\", m.Service, m.Metric, alert.Value, m.Operator, m.Warning)\n\t\t\tdefault:\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %s %.2f %s %.2f\", m.Service, m.Metric, alert.Value, m.Operator, m.Critical)\n\t\t\t}\n\t\tcase *mkr.MonitorExternalHTTP:\n\t\t\tstatusRegexp, _ := regexp.Compile(\"^[2345][0-9][0-9]$\")\n\t\t\tswitch alert.Status {\n\t\t\tcase \"CRITICAL\":\n\t\t\t\tif statusRegexp.MatchString(alert.Message) {\n\t\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %.2f > %.2f msec, status:%s\", m.URL, alert.Value, m.ResponseTimeCritical, alert.Message)\n\t\t\t\t} else {\n\t\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %.2f msec, %s\", m.URL, alert.Value, alert.Message)\n\t\t\t\t}\n\t\t\tcase \"WARNING\":\n\t\t\t\tif statusRegexp.MatchString(alert.Message) {\n\t\t\t\t\tmonitorMsg = fmt.Sprintf(\"%.2f > %.2f msec, status:%s\", alert.Value, m.ResponseTimeWarning, alert.Message)\n\t\t\t\t} else {\n\t\t\t\t\tmonitorMsg = fmt.Sprintf(\"%.2f msec, %s\", alert.Value, alert.Message)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%.2f > %.2f msec, status:%s\", alert.Value, m.ResponseTimeCritical, alert.Message)\n\t\t\t}\n\t\tcase *mkr.MonitorExpression:\n\t\t\texpression := formatExpressionOneline(m.Expression)\n\t\t\tswitch alert.Status {\n\t\t\tcase \"CRITICAL\":\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %.2f %s %.2f\", expression, alert.Value, m.Operator, m.Critical)\n\t\t\tcase \"WARNING\":\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %.2f %s %.2f\", expression, alert.Value, m.Operator, m.Warning)\n\t\t\tcase \"UNKNOWN\":\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s\", expression)\n\t\t\tdefault:\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %.2f\", expression, alert.Value)\n\t\t\t}\n\t\tdefault:\n\t\t\tmonitorMsg = fmt.Sprintf(\"%s\", monitor.MonitorType())\n\t\t}\n\t\tif monitorMsg == \"\" {\n\t\t\tmonitorMsg = monitor.MonitorName()\n\t\t} else {\n\t\t\tmonitorMsg = monitor.MonitorName() + \" \" + monitorMsg\n\t\t}\n\t}\n\tstatusMsg := alert.Status\n\tif colorize {\n\t\tswitch alert.Status {\n\t\tcase \"CRITICAL\":\n\t\t\tstatusMsg = color.RedString(\"CRITICAL\")\n\t\tcase \"WARNING\":\n\t\t\tstatusMsg = color.YellowString(\"WARNING \")\n\t\tcase \"UNKNOWN\":\n\t\t\tstatusMsg = \"UNKNOWN \"\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"%s %s %s %s%s\", alert.ID, time.Unix(alert.OpenedAt, 0).Format(layout), statusMsg, monitorMsg, hostMsg)\n}\n\nvar expressionTrimmer = regexp.MustCompile(`[\\r\\n]+\\s*`)\n\nfunc formatExpressionOneline(expr string) string {\n\treturn strings.Trim(expressionTrimmer.ReplaceAllString(expr, \" \"), \" \")\n}\n\nfunc doAlertsRetrieve(c *cli.Context) error {\n\tclient := newMackerelFromContext(c)\n\n\talerts, err := client.FindAlerts()\n\tlogger.DieIf(err)\n\tPrettyPrintJSON(alerts)\n\treturn nil\n}\n\nfunc doAlertsList(c *cli.Context) error {\n\tfilterServices := c.StringSlice(\"service\")\n\tfilterStatuses := c.StringSlice(\"host-status\")\n\tclient := newMackerelFromContext(c)\n\n\talerts, err := client.FindAlerts()\n\tlogger.DieIf(err)\n\tjoinedAlerts := joinMonitorsAndHosts(client, alerts)\n\n\tfor _, joinAlert := range joinedAlerts {\n\t\tif len(filterServices) > 0 {\n\t\t\tfound := false\n\t\t\tfor _, filterService := range filterServices {\n\t\t\t\tif joinAlert.Host != nil {\n\t\t\t\t\tif _, ok := joinAlert.Host.Roles[filterService]; ok {\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tvar service string\n\t\t\t\t\tif m, ok := joinAlert.Monitor.(*mkr.MonitorServiceMetric); ok {\n\t\t\t\t\t\tservice = m.Service\n\t\t\t\t\t} else if m, ok := joinAlert.Monitor.(*mkr.MonitorExternalHTTP); ok {\n\t\t\t\t\t\tservice = m.Service\n\t\t\t\t\t}\n\t\t\t\t\tfound = service == filterService\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif len(filterStatuses) > 0 {\n\t\t\tfound := false\n\t\t\tfor _, filterStatus := range filterStatuses {\n\t\t\t\tif joinAlert.Host != nil && joinAlert.Host.Status == filterStatus {\n\t\t\t\t\tfound = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tfmt.Println(formatJoinedAlert(joinAlert, c.BoolT(\"color\")))\n\t}\n\treturn nil\n}\n\nfunc doAlertsClose(c *cli.Context) error {\n\tisVerbose := c.Bool(\"verbose\")\n\targAlertIDs := c.Args()\n\treason := c.String(\"reason\")\n\n\tif len(argAlertIDs) < 1 {\n\t\tcli.ShowCommandHelp(c, \"alerts\")\n\t\tos.Exit(1)\n\t}\n\n\tclient := newMackerelFromContext(c)\n\tfor _, alertID := range argAlertIDs {\n\t\talert, err := client.CloseAlert(alertID, reason)\n\t\tlogger.DieIf(err)\n\n\t\tlogger.Log(\"Alert closed\", alertID)\n\t\tif isVerbose == true {\n\t\t\tPrettyPrintJSON(alert)\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>replace spaces in the expression while printing alerts list<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/fatih\/color\"\n\tmkr \"github.com\/mackerelio\/mackerel-client-go\"\n\t\"github.com\/mackerelio\/mkr\/logger\"\n\t\"gopkg.in\/urfave\/cli.v1\"\n)\n\nvar commandAlerts = cli.Command{\n\tName: \"alerts\",\n\tUsage: \"Retrieve\/Close alerts\",\n\tDescription: `\n Retrieve\/Close alerts. With no subcommand specified, this will show all alerts.\n Requests APIs under \"\/api\/v0\/alerts\". See https:\/\/mackerel.io\/api-docs\/entry\/alerts .\n`,\n\tAction: doAlertsRetrieve,\n\tSubcommands: []cli.Command{\n\t\t{\n\t\t\tName: \"list\",\n\t\t\tUsage: \"list alerts\",\n\t\t\tDescription: \"Shows alerts in human-readable format.\",\n\t\t\tAction: doAlertsList,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringSliceFlag{\n\t\t\t\t\tName: \"service, s\",\n\t\t\t\t\tValue: &cli.StringSlice{},\n\t\t\t\t\tUsage: \"Filters alerts by service. Multiple choices are allowed.\",\n\t\t\t\t},\n\t\t\t\tcli.StringSliceFlag{\n\t\t\t\t\tName: \"host-status, S\",\n\t\t\t\t\tValue: &cli.StringSlice{},\n\t\t\t\t\tUsage: \"Filters alerts by status of each host. Multiple choices are allowed.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolTFlag{Name: \"color, c\", Usage: \"Colorize output. default: true\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"close\",\n\t\t\tUsage: \"close alerts\",\n\t\t\tDescription: \"Closes alerts. Multiple alert IDs can be specified.\",\n\t\t\tAction: doAlertsClose,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{Name: \"reason, r\", Value: \"\", Usage: \"Reason of closing alert.\"},\n\t\t\t\tcli.BoolFlag{Name: \"verbose, v\", Usage: \"Verbose output mode\"},\n\t\t\t},\n\t\t},\n\t},\n}\n\ntype alertSet struct {\n\tAlert *mkr.Alert\n\tHost *mkr.Host\n\tMonitor mkr.Monitor\n}\n\nfunc joinMonitorsAndHosts(client *mkr.Client, alerts []*mkr.Alert) []*alertSet {\n\thostsJSON, err := client.FindHosts(&mkr.FindHostsParam{\n\t\tStatuses: []string{\"working\", \"standby\", \"poweroff\", \"maintenance\"},\n\t})\n\tlogger.DieIf(err)\n\n\thosts := map[string]*mkr.Host{}\n\tfor _, host := range hostsJSON {\n\t\thosts[host.ID] = host\n\t}\n\n\tmonitorsJSON, err := client.FindMonitors()\n\tlogger.DieIf(err)\n\n\tmonitors := map[string]mkr.Monitor{}\n\tfor _, monitor := range monitorsJSON {\n\t\tmonitors[monitor.MonitorID()] = monitor\n\t}\n\n\talertSets := []*alertSet{}\n\tfor _, alert := range alerts {\n\t\talertSets = append(\n\t\t\talertSets,\n\t\t\t&alertSet{Alert: alert, Host: hosts[alert.HostID], Monitor: monitors[alert.MonitorID]},\n\t\t)\n\t}\n\treturn alertSets\n}\n\nfunc formatJoinedAlert(alertSet *alertSet, colorize bool) string {\n\tconst layout = \"2006-01-02 15:04:05\"\n\n\thost := alertSet.Host\n\tmonitor := alertSet.Monitor\n\talert := alertSet.Alert\n\n\thostMsg := \"\"\n\tif host != nil {\n\t\tstatusMsg := host.Status\n\t\tif host.IsRetired == true {\n\t\t\tstatusMsg = \"retired\"\n\t\t}\n\t\tif colorize {\n\t\t\tswitch statusMsg {\n\t\t\tcase \"working\":\n\t\t\t\tstatusMsg = color.BlueString(\"working\")\n\t\t\tcase \"standby\":\n\t\t\t\tstatusMsg = color.GreenString(\"standby\")\n\t\t\tcase \"poweroff\":\n\t\t\t\tstatusMsg = \"poweroff\"\n\t\t\tcase \"maintenance\":\n\t\t\t\tstatusMsg = color.YellowString(\"maintenance\")\n\t\t\t}\n\t\t}\n\t\thostMsg = fmt.Sprintf(\" %s %s\", host.Name, statusMsg)\n\t\troleMsgs := []string{}\n\t\tfor service, roles := range host.Roles {\n\t\t\troleMsgs = append(roleMsgs, fmt.Sprintf(\"%s:%s\", service, strings.Join(roles, \",\")))\n\t\t}\n\t\thostMsg += \" [\" + strings.Join(roleMsgs, \", \") + \"]\"\n\t}\n\n\tmonitorMsg := \"\"\n\tif monitor != nil {\n\t\tswitch m := monitor.(type) {\n\t\tcase *mkr.MonitorConnectivity:\n\t\t\tmonitorMsg = \"\"\n\t\tcase *mkr.MonitorHostMetric:\n\t\t\tswitch alert.Status {\n\t\t\tcase \"CRITICAL\":\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %.2f %s %.2f\", m.Metric, alert.Value, m.Operator, m.Critical)\n\t\t\tcase \"WARNING\":\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %.2f %s %.2f\", m.Metric, alert.Value, m.Operator, m.Warning)\n\t\t\tdefault:\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %.2f %s %.2f\", m.Metric, alert.Value, m.Operator, m.Critical)\n\t\t\t}\n\t\tcase *mkr.MonitorServiceMetric:\n\t\t\tswitch alert.Status {\n\t\t\tcase \"CRITICAL\":\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %s %.2f %s %.2f\", m.Service, m.Metric, alert.Value, m.Operator, m.Critical)\n\t\t\tcase \"WARNING\":\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %s %.2f %s %.2f\", m.Service, m.Metric, alert.Value, m.Operator, m.Warning)\n\t\t\tdefault:\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %s %.2f %s %.2f\", m.Service, m.Metric, alert.Value, m.Operator, m.Critical)\n\t\t\t}\n\t\tcase *mkr.MonitorExternalHTTP:\n\t\t\tstatusRegexp, _ := regexp.Compile(\"^[2345][0-9][0-9]$\")\n\t\t\tswitch alert.Status {\n\t\t\tcase \"CRITICAL\":\n\t\t\t\tif statusRegexp.MatchString(alert.Message) {\n\t\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %.2f > %.2f msec, status:%s\", m.URL, alert.Value, m.ResponseTimeCritical, alert.Message)\n\t\t\t\t} else {\n\t\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %.2f msec, %s\", m.URL, alert.Value, alert.Message)\n\t\t\t\t}\n\t\t\tcase \"WARNING\":\n\t\t\t\tif statusRegexp.MatchString(alert.Message) {\n\t\t\t\t\tmonitorMsg = fmt.Sprintf(\"%.2f > %.2f msec, status:%s\", alert.Value, m.ResponseTimeWarning, alert.Message)\n\t\t\t\t} else {\n\t\t\t\t\tmonitorMsg = fmt.Sprintf(\"%.2f msec, %s\", alert.Value, alert.Message)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%.2f > %.2f msec, status:%s\", alert.Value, m.ResponseTimeCritical, alert.Message)\n\t\t\t}\n\t\tcase *mkr.MonitorExpression:\n\t\t\texpression := formatExpressionOneline(m.Expression)\n\t\t\tswitch alert.Status {\n\t\t\tcase \"CRITICAL\":\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %.2f %s %.2f\", expression, alert.Value, m.Operator, m.Critical)\n\t\t\tcase \"WARNING\":\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %.2f %s %.2f\", expression, alert.Value, m.Operator, m.Warning)\n\t\t\tcase \"UNKNOWN\":\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s\", expression)\n\t\t\tdefault:\n\t\t\t\tmonitorMsg = fmt.Sprintf(\"%s %.2f\", expression, alert.Value)\n\t\t\t}\n\t\tdefault:\n\t\t\tmonitorMsg = fmt.Sprintf(\"%s\", monitor.MonitorType())\n\t\t}\n\t\tif monitorMsg == \"\" {\n\t\t\tmonitorMsg = monitor.MonitorName()\n\t\t} else {\n\t\t\tmonitorMsg = monitor.MonitorName() + \" \" + monitorMsg\n\t\t}\n\t}\n\tstatusMsg := alert.Status\n\tif colorize {\n\t\tswitch alert.Status {\n\t\tcase \"CRITICAL\":\n\t\t\tstatusMsg = color.RedString(\"CRITICAL\")\n\t\tcase \"WARNING\":\n\t\t\tstatusMsg = color.YellowString(\"WARNING \")\n\t\tcase \"UNKNOWN\":\n\t\t\tstatusMsg = \"UNKNOWN \"\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"%s %s %s %s%s\", alert.ID, time.Unix(alert.OpenedAt, 0).Format(layout), statusMsg, monitorMsg, hostMsg)\n}\n\nvar expressionTrimmer = regexp.MustCompile(`[\\r\\n]+\\s*`)\n\nfunc formatExpressionOneline(expr string) string {\n\texpr = strings.Trim(expressionTrimmer.ReplaceAllString(expr, \" \"), \" \")\n\treturn strings.Replace(strings.Replace(expr, \"( \", \"(\", -1), \" )\", \")\", -1)\n}\n\nfunc doAlertsRetrieve(c *cli.Context) error {\n\tclient := newMackerelFromContext(c)\n\n\talerts, err := client.FindAlerts()\n\tlogger.DieIf(err)\n\tPrettyPrintJSON(alerts)\n\treturn nil\n}\n\nfunc doAlertsList(c *cli.Context) error {\n\tfilterServices := c.StringSlice(\"service\")\n\tfilterStatuses := c.StringSlice(\"host-status\")\n\tclient := newMackerelFromContext(c)\n\n\talerts, err := client.FindAlerts()\n\tlogger.DieIf(err)\n\tjoinedAlerts := joinMonitorsAndHosts(client, alerts)\n\n\tfor _, joinAlert := range joinedAlerts {\n\t\tif len(filterServices) > 0 {\n\t\t\tfound := false\n\t\t\tfor _, filterService := range filterServices {\n\t\t\t\tif joinAlert.Host != nil {\n\t\t\t\t\tif _, ok := joinAlert.Host.Roles[filterService]; ok {\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tvar service string\n\t\t\t\t\tif m, ok := joinAlert.Monitor.(*mkr.MonitorServiceMetric); ok {\n\t\t\t\t\t\tservice = m.Service\n\t\t\t\t\t} else if m, ok := joinAlert.Monitor.(*mkr.MonitorExternalHTTP); ok {\n\t\t\t\t\t\tservice = m.Service\n\t\t\t\t\t}\n\t\t\t\t\tfound = service == filterService\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif len(filterStatuses) > 0 {\n\t\t\tfound := false\n\t\t\tfor _, filterStatus := range filterStatuses {\n\t\t\t\tif joinAlert.Host != nil && joinAlert.Host.Status == filterStatus {\n\t\t\t\t\tfound = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tfmt.Println(formatJoinedAlert(joinAlert, c.BoolT(\"color\")))\n\t}\n\treturn nil\n}\n\nfunc doAlertsClose(c *cli.Context) error {\n\tisVerbose := c.Bool(\"verbose\")\n\targAlertIDs := c.Args()\n\treason := c.String(\"reason\")\n\n\tif len(argAlertIDs) < 1 {\n\t\tcli.ShowCommandHelp(c, \"alerts\")\n\t\tos.Exit(1)\n\t}\n\n\tclient := newMackerelFromContext(c)\n\tfor _, alertID := range argAlertIDs {\n\t\talert, err := client.CloseAlert(alertID, reason)\n\t\tlogger.DieIf(err)\n\n\t\tlogger.Log(\"Alert closed\", alertID)\n\t\tif isVerbose == true {\n\t\t\tPrettyPrintJSON(alert)\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/codegangsta\/negroni\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"html\/template\"\n\t\"net\/http\"\n\t\"os\"\n\/\/\t\"time\"\n)\n\nvar upgrader websocket.Upgrader\n\nfunc HomeHandler(w http.ResponseWriter, r *http.Request) {\n\tt, err := template.ParseFiles(\"templates\/index.go.html\")\n\tensureNil(err)\n\terr = t.Execute(w, nil)\n\tensureNil(err)\n}\n\ntype SimilarPostRequest struct {\n\tPostUri string\n}\n\ntype TumblrCredentials struct {\n\tKey string\n\tSecret string\n}\n\nfunc getCredentials() *TumblrCredentials {\n\treturn &TumblrCredentials{\n\t\tKey: os.Getenv(\"ALIKER_KEY\"),\n\t\tSecret: os.Getenv(\"ALIKER_SECRET\"),\n\t}\n}\n\ntype beginNotification struct {\n\tBaseHostname string\n\tPostID int64\n\tMsgType string\n}\n\nfunc sendBeginNotification(c *websocket.Conn, bh string, pid int64) error {\n\tmsg := &beginNotification{\n\t\tBaseHostname: bh,\n\t\tPostID: pid,\n\t\tMsgType: \"begin-notification\",\n\t}\n\treturn c.WriteJSON(msg)\n}\n\nfunc sendErrorNotification(c *websocket.Conn, err error) error {\n\treturn c.WriteJSON(&struct{\n\t\tMsgType string\n\t\tMessage string\n\t}{\n\t\t\"error\",\n\t\terr.Error(),\n\t})\n}\n\nfunc sendBlogsLikingPostData(c *websocket.Conn, blogs []string) error {\n\treturn c.WriteJSON(&struct{\n\t\tMsgType string\n\t\tBlogs []string\n\t}{\n\t\t\"blogs-liking-post\",\n\t\tblogs,\n\t})\n}\n\nfunc sendBlogLikesData(c *websocket.Conn, likes []string) error {\n\treturn c.WriteJSON(&struct{\n\t\tMsgType string\n\t\tLikes []string\n\t}{\n\t\t\"blog-likes\",\n\t\tlikes,\n\t})\n}\n\nfunc SimilarHandler(w http.ResponseWriter, r *http.Request) {\n\tconn, err := upgrader.Upgrade(w, r, nil)\n\tensureNil(err)\n\tdefer conn.Close()\n\n\t\/\/ Figure out what post they want and extract the details we need\n\tspr := &SimilarPostRequest{}\n\terr = conn.ReadJSON(spr)\n\tensureNil(err)\n\tbh, pid, err := extractPostId(spr.PostUri)\n\tif err != nil {\n\t\tmsg := fmt.Errorf(\"invalid post uri: %s\", spr.PostUri)\n\t\tsendErrorNotification(conn, msg)\n\t\treturn\n\t}\n\n\terr = sendBeginNotification(conn, bh, pid)\n\tensureNil(err)\n\n\tlikingBlogs := blogsLikingPost(bh, pid)\n\n\terr = sendBlogsLikingPostData(conn, likingBlogs)\n\tensureNil(err)\n\n\tfor _, blogName := range likingBlogs {\n\t\tfmt.Println(blogName)\n\t\t\/\/ TODO Get page one of that blogs' likes\n\t\t\/\/ TODO Request the other pages in parallel\n\t\t\/\/ TODO Send all of the post data at once for this blog\n\t\t\/\/ message type: blog-likes\n\t\t\/\/ { \"blog-name\": \"some dude\", \"likes\": [...] }\n\t}\n}\n\nfunc ensureNil(x interface{}) {\n\tif x != nil {\n\t\tpanic(x)\n\t}\n}\n\nfunc main() {\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"\/\", HomeHandler)\n\trouter.HandleFunc(\"\/post\", SimilarHandler)\n\n\tn := negroni.New()\n\tn.UseHandler(router)\n\tn.Run(fmt.Sprintf(\":%s\", os.Getenv(\"PORT\")))\n}\n<commit_msg>gofmt, don't duplicate credentials struct from my tumblr client, move that to appropriate file<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/codegangsta\/negroni\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"html\/template\"\n\t\"net\/http\"\n\t\"os\"\n)\n\nvar upgrader websocket.Upgrader\n\nfunc HomeHandler(w http.ResponseWriter, r *http.Request) {\n\tt, err := template.ParseFiles(\"templates\/index.go.html\")\n\tensureNil(err)\n\terr = t.Execute(w, nil)\n\tensureNil(err)\n}\n\ntype SimilarPostRequest struct {\n\tPostUri string\n}\n\ntype beginNotification struct {\n\tBaseHostname string\n\tPostID int64\n\tMsgType string\n}\n\nfunc sendBeginNotification(c *websocket.Conn, bh string, pid int64) error {\n\tmsg := &beginNotification{\n\t\tBaseHostname: bh,\n\t\tPostID: pid,\n\t\tMsgType: \"begin-notification\",\n\t}\n\treturn c.WriteJSON(msg)\n}\n\nfunc sendErrorNotification(c *websocket.Conn, err error) error {\n\treturn c.WriteJSON(&struct {\n\t\tMsgType string\n\t\tMessage string\n\t}{\n\t\t\"error\",\n\t\terr.Error(),\n\t})\n}\n\nfunc sendBlogsLikingPostData(c *websocket.Conn, blogs []string) error {\n\treturn c.WriteJSON(&struct {\n\t\tMsgType string\n\t\tBlogs []string\n\t}{\n\t\t\"blogs-liking-post\",\n\t\tblogs,\n\t})\n}\n\nfunc sendBlogLikesData(c *websocket.Conn, likes []string) error {\n\treturn c.WriteJSON(&struct {\n\t\tMsgType string\n\t\tLikes []string\n\t}{\n\t\t\"blog-likes\",\n\t\tlikes,\n\t})\n}\n\nfunc SimilarHandler(w http.ResponseWriter, r *http.Request) {\n\tconn, err := upgrader.Upgrade(w, r, nil)\n\tensureNil(err)\n\tdefer conn.Close()\n\n\t\/\/ Figure out what post they want and extract the details we need\n\tspr := &SimilarPostRequest{}\n\terr = conn.ReadJSON(spr)\n\tensureNil(err)\n\tbh, pid, err := extractPostId(spr.PostUri)\n\tif err != nil {\n\t\tmsg := fmt.Errorf(\"invalid post uri: %s\", spr.PostUri)\n\t\tsendErrorNotification(conn, msg)\n\t\treturn\n\t}\n\n\terr = sendBeginNotification(conn, bh, pid)\n\tensureNil(err)\n\n\tlikingBlogs := blogsLikingPost(bh, pid)\n\n\terr = sendBlogsLikingPostData(conn, likingBlogs)\n\tensureNil(err)\n\n\tfor _, blogName := range likingBlogs {\n\t\tfmt.Println(blogName)\n\t\t\/\/ TODO Get page one of that blogs' likes\n\t\t\/\/ TODO Request the other pages in parallel\n\t\t\/\/ TODO Send all of the post data at once for this blog\n\t\t\/\/ message type: blog-likes\n\t\t\/\/ { \"blog-name\": \"some dude\", \"likes\": [...] }\n\t}\n}\n\nfunc ensureNil(x interface{}) {\n\tif x != nil {\n\t\tpanic(x)\n\t}\n}\n\nfunc main() {\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"\/\", HomeHandler)\n\trouter.HandleFunc(\"\/post\", SimilarHandler)\n\n\tn := negroni.New()\n\tn.UseHandler(router)\n\tn.Run(fmt.Sprintf(\":%s\", os.Getenv(\"PORT\")))\n}\n<|endoftext|>"} {"text":"<commit_before>package dns\n\nimport (\n\t\"encoding\/xml\"\n\t\"io\"\n\t\"time\"\n)\n\n\/\/ Interally used for parsing from and to the XML\ntype XMLKeyDigest struct {\n\tId string `xml:\"id,attr\"`\n\tValidFrom string `xml:\"validFrom,attr\"`\n\tValidUntil string `xml:\"validUntil,attr,omitempty\"`\n\tKeyTag uint16 `xml:\"KeyTag\"`\n\tAlgorithm uint8 `xml:\"Algorithm\"`\n\tDigestType uint8 `xml:\"DigestType\"`\n\tDigest string `xml:\"Digest\"`\n}\n\n\/\/ Interally used for parsing from and to the XML\ntype XMLTrustAnchor struct {\n\tId string `xml:\"id,attr,omitempty\"`\n\tSource string `xml:\"source,attr,omitempty\"`\n\tZone string `xml:\"Zone\"`\n\tKeyDigest []*XMLKeyDigest `xml:\"KeyDigest\"`\n}\n\n\/\/ A TrustAnchor represents the trust anchors used in the DNS root.\ntype TrustAnchor struct {\n\tId string \/\/ TrustAnchor id attribute\n\tSource string \/\/ TrustAnchor source attribute\n\tAnchorId string \/\/ KeyDigest id \n\tAnchor *RR_DS \/\/ The digest encoded as an DS record\n\tValidFrom time.Time \/\/ Validity specification\n\tValidUntil time.Time \/\/ Validaty specification\n}\n\n\/\/ TrustAnchorString convert a TrustAnchor to a string encoded as XML.\nfunc TrustAnchorString(t []*TrustAnchor) string {\n\txta := new(XMLTrustAnchor)\n\txta.KeyDigest = make([]*XMLKeyDigest, 0)\n\tfor _, ta := range t {\n\t\txta.Id = ta.Id \/\/ Sets the everytime, but that is OK.\n\t\txta.Source = ta.Source\n\t\txta.Zone = ta.Anchor.Hdr.Name\n\t\txkd := new(XMLKeyDigest)\n\t\txkd.Id = ta.AnchorId\n\t\txkd.ValidFrom = ta.ValidFrom.Format(\"2006-01-02T15:04:05-07:00\")\n\t\tif !ta.ValidUntil.IsZero() {\n\t\t\txkd.ValidUntil = ta.ValidUntil.Format(\"2006-01-02T15:04:05-07:00\")\n\t\t}\n\t\txkd.KeyTag = ta.Anchor.KeyTag\n\t\txkd.Algorithm = ta.Anchor.Algorithm\n\t\txkd.DigestType = ta.Anchor.DigestType\n\t\txkd.Digest = ta.Anchor.Digest\n\t\txta.KeyDigest = append(xta.KeyDigest, xkd)\n\t}\n\tb, _ := xml.MarshalIndent(xta, \"\", \"\\t\")\n\treturn string(b)\n}\n\n\/\/ ReadTrustAnchor reads a root trust anchor from: http:\/\/data.iana.org\/root-anchors\/root-anchors.xml\n\/\/ and returns the data or an error.\nfunc ReadTrustAnchor(q io.Reader) ([]*TrustAnchor, error) {\n\td := xml.NewDecoder(q)\n\tt := new(XMLTrustAnchor)\n\tif e := d.Decode(t); e != nil {\n\t\treturn nil, e\n\t}\n\tta := make([]*TrustAnchor, 0)\n\tvar err error\n\tfor _, digest := range t.KeyDigest {\n\t\tt1 := new(TrustAnchor)\n\t\tt1.Id = t.Id\n\t\tt1.Source = t.Source\n\t\tt1.AnchorId = digest.Id\n\t\tif t1.ValidFrom, err = time.Parse(\"2006-01-02T15:04:05-07:00\", digest.ValidFrom); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif digest.ValidUntil != \"\" {\n\t\t\tif t1.ValidUntil, err = time.Parse(\"2006-01-02T15:04:05-07:00\", digest.ValidUntil); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\td := new(RR_DS)\n\t\td.Hdr = RR_Header{Name: t.Zone, Class: ClassINET, Rrtype: TypeDS}\n\t\td.KeyTag = digest.KeyTag\n\t\td.Algorithm = digest.Algorithm\n\t\td.DigestType = digest.DigestType\n\t\td.Digest = digest.Digest\n\t\tt1.Anchor = d\n\t\t\/\/ Some checks here too?\n\t\tta = append(ta, t1)\n\t}\n\treturn ta, nil\n}\n\nvar (\n\t\/\/ This the root anchor in XML format.\n\trootAnchorXML = `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<TrustAnchor id=\"AD42165F-3B1A-4778-8F42-D34A1D41FD93\" source=\"http:\/\/data.iana.org\/root-anchors\/root-anchors.xml\">\n<Zone>.<\/Zone>\n<KeyDigest id=\"Kjqmt7v\" validFrom=\"2010-07-15T00:00:00+00:00\">\n<KeyTag>19036<\/KeyTag>\n<Algorithm>8<\/Algorithm>\n<DigestType>2<\/DigestType>\n<Digest>49AAC11D7B6F6446702E54A1607371607A1A41855200FD2CE1CDDE32F24E8FB5<\/Digest>\n<\/KeyDigest>\n<\/TrustAnchor>`\n\t\/\/ This is the root zone used for priming a resolver.\n\tnamedRoot = `; This file holds the information on root name servers needed to\n; initialize cache of Internet domain name servers\n; (e.g. reference this file in the \"cache . <file>\"\n; configuration file of BIND domain name servers).\n;\n; This file is made available by InterNIC \n; under anonymous FTP as\n; file \/domain\/named.cache\n; on server FTP.INTERNIC.NET\n; -OR- RS.INTERNIC.NET\n;\n; last update: Jun 8, 2011\n; related version of root zone: 2011060800\n;\n; formerly NS.INTERNIC.NET\n;\n. 3600000 IN NS A.ROOT-SERVERS.NET.\nA.ROOT-SERVERS.NET. 3600000 A 198.41.0.4\nA.ROOT-SERVERS.NET. 3600000 AAAA 2001:503:BA3E::2:30\n;\n; FORMERLY NS1.ISI.EDU\n;\n. 3600000 NS B.ROOT-SERVERS.NET.\nB.ROOT-SERVERS.NET. 3600000 A 192.228.79.201\n;\n; FORMERLY C.PSI.NET\n;\n. 3600000 NS C.ROOT-SERVERS.NET.\nC.ROOT-SERVERS.NET. 3600000 A 192.33.4.12\n;\n; FORMERLY TERP.UMD.EDU\n;\n. 3600000 NS D.ROOT-SERVERS.NET.\nD.ROOT-SERVERS.NET. 3600000 A 128.8.10.90\nD.ROOT-SERVERS.NET.\t 3600000 AAAA 2001:500:2D::D\n;\n; FORMERLY NS.NASA.GOV\n;\n. 3600000 NS E.ROOT-SERVERS.NET.\nE.ROOT-SERVERS.NET. 3600000 A 192.203.230.10\n;\n; FORMERLY NS.ISC.ORG\n;\n. 3600000 NS F.ROOT-SERVERS.NET.\nF.ROOT-SERVERS.NET. 3600000 A 192.5.5.241\nF.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:2F::F\n;\n; FORMERLY NS.NIC.DDN.MIL\n;\n. 3600000 NS G.ROOT-SERVERS.NET.\nG.ROOT-SERVERS.NET. 3600000 A 192.112.36.4\n;\n; FORMERLY AOS.ARL.ARMY.MIL\n;\n. 3600000 NS H.ROOT-SERVERS.NET.\nH.ROOT-SERVERS.NET. 3600000 A 128.63.2.53\nH.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:1::803F:235\n;\n; FORMERLY NIC.NORDU.NET\n;\n. 3600000 NS I.ROOT-SERVERS.NET.\nI.ROOT-SERVERS.NET. 3600000 A 192.36.148.17\nI.ROOT-SERVERS.NET. 3600000 AAAA 2001:7FE::53\n;\n; OPERATED BY VERISIGN, INC.\n;\n. 3600000 NS J.ROOT-SERVERS.NET.\nJ.ROOT-SERVERS.NET. 3600000 A 192.58.128.30\nJ.ROOT-SERVERS.NET. 3600000 AAAA 2001:503:C27::2:30\n;\n; OPERATED BY RIPE NCC\n;\n. 3600000 NS K.ROOT-SERVERS.NET.\nK.ROOT-SERVERS.NET. 3600000 A 193.0.14.129\nK.ROOT-SERVERS.NET. 3600000 AAAA 2001:7FD::1\n;\n; OPERATED BY ICANN\n;\n. 3600000 NS L.ROOT-SERVERS.NET.\nL.ROOT-SERVERS.NET. 3600000 A 199.7.83.42\nL.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:3::42\n;\n; OPERATED BY WIDE\n;\n. 3600000 NS M.ROOT-SERVERS.NET.\nM.ROOT-SERVERS.NET. 3600000 A 202.12.27.33\nM.ROOT-SERVERS.NET. 3600000 AAAA 2001:DC3::35\n; End of File`\n)\n<commit_msg>kill this - does not belong in a library<commit_after>package dns\n\nimport (\n\t\"encoding\/xml\"\n\t\"io\"\n\t\"time\"\n)\n\n\/\/ Interally used for parsing from and to the XML\ntype XMLKeyDigest struct {\n\tId string `xml:\"id,attr\"`\n\tValidFrom string `xml:\"validFrom,attr\"`\n\tValidUntil string `xml:\"validUntil,attr,omitempty\"`\n\tKeyTag uint16 `xml:\"KeyTag\"`\n\tAlgorithm uint8 `xml:\"Algorithm\"`\n\tDigestType uint8 `xml:\"DigestType\"`\n\tDigest string `xml:\"Digest\"`\n}\n\n\/\/ Interally used for parsing from and to the XML\ntype XMLTrustAnchor struct {\n\tId string `xml:\"id,attr,omitempty\"`\n\tSource string `xml:\"source,attr,omitempty\"`\n\tZone string `xml:\"Zone\"`\n\tKeyDigest []*XMLKeyDigest `xml:\"KeyDigest\"`\n}\n\n\/\/ A TrustAnchor represents the trust anchors used in the DNS root.\ntype TrustAnchor struct {\n\tId string \/\/ TrustAnchor id attribute\n\tSource string \/\/ TrustAnchor source attribute\n\tAnchorId string \/\/ KeyDigest id \n\tAnchor *RR_DS \/\/ The digest encoded as an DS record\n\tValidFrom time.Time \/\/ Validity specification\n\tValidUntil time.Time \/\/ Validaty specification\n}\n\n\/\/ TrustAnchorString convert a TrustAnchor to a string encoded as XML.\nfunc TrustAnchorString(t []*TrustAnchor) string {\n\txta := new(XMLTrustAnchor)\n\txta.KeyDigest = make([]*XMLKeyDigest, 0)\n\tfor _, ta := range t {\n\t\txta.Id = ta.Id \/\/ Sets the everytime, but that is OK.\n\t\txta.Source = ta.Source\n\t\txta.Zone = ta.Anchor.Hdr.Name\n\t\txkd := new(XMLKeyDigest)\n\t\txkd.Id = ta.AnchorId\n\t\txkd.ValidFrom = ta.ValidFrom.Format(\"2006-01-02T15:04:05-07:00\")\n\t\tif !ta.ValidUntil.IsZero() {\n\t\t\txkd.ValidUntil = ta.ValidUntil.Format(\"2006-01-02T15:04:05-07:00\")\n\t\t}\n\t\txkd.KeyTag = ta.Anchor.KeyTag\n\t\txkd.Algorithm = ta.Anchor.Algorithm\n\t\txkd.DigestType = ta.Anchor.DigestType\n\t\txkd.Digest = ta.Anchor.Digest\n\t\txta.KeyDigest = append(xta.KeyDigest, xkd)\n\t}\n\tb, _ := xml.MarshalIndent(xta, \"\", \"\\t\")\n\treturn string(b)\n}\n\n\/\/ ReadTrustAnchor reads a root trust anchor from: http:\/\/data.iana.org\/root-anchors\/root-anchors.xml\n\/\/ and returns the data or an error.\nfunc ReadTrustAnchor(q io.Reader) ([]*TrustAnchor, error) {\n\td := xml.NewDecoder(q)\n\tt := new(XMLTrustAnchor)\n\tif e := d.Decode(t); e != nil {\n\t\treturn nil, e\n\t}\n\tta := make([]*TrustAnchor, 0)\n\tvar err error\n\tfor _, digest := range t.KeyDigest {\n\t\tt1 := new(TrustAnchor)\n\t\tt1.Id = t.Id\n\t\tt1.Source = t.Source\n\t\tt1.AnchorId = digest.Id\n\t\tif t1.ValidFrom, err = time.Parse(\"2006-01-02T15:04:05-07:00\", digest.ValidFrom); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif digest.ValidUntil != \"\" {\n\t\t\tif t1.ValidUntil, err = time.Parse(\"2006-01-02T15:04:05-07:00\", digest.ValidUntil); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\td := new(RR_DS)\n\t\td.Hdr = RR_Header{Name: t.Zone, Class: ClassINET, Rrtype: TypeDS}\n\t\td.KeyTag = digest.KeyTag\n\t\td.Algorithm = digest.Algorithm\n\t\td.DigestType = digest.DigestType\n\t\td.Digest = digest.Digest\n\t\tt1.Anchor = d\n\t\t\/\/ Some checks here too?\n\t\tta = append(ta, t1)\n\t}\n\treturn ta, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package postgres\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com\/flynn\/go-discoverd\"\n\t\"github.com\/flynn\/go-sql\"\n\t_ \"github.com\/flynn\/pq\"\n)\n\nfunc Open(service, dsn string) (*DB, error) {\n\tset, err := discoverd.NewServiceSet(service)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdb := &DB{set: set, dsn: dsn}\n\tfirstErr := make(chan error)\n\tgo db.followLeader(firstErr)\n\treturn db, <-firstErr\n}\n\ntype DB struct {\n\t*sql.DB\n\n\tset discoverd.ServiceSet\n\tdsn string\n}\n\nvar ErrNoServers = errors.New(\"postgres: no servers found\")\n\nfunc (db *DB) followLeader(firstErr chan<- error) {\n\tfor update := range db.set.Watch(true) {\n\t\tleader := db.set.Leader()\n\t\tif leader == nil || leader.Attrs[\"up\"] != \"true\" {\n\t\t\tif firstErr != nil {\n\t\t\t\tfirstErr <- ErrNoServers\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif !update.Online || update.Addr != leader.Addr {\n\t\t\tcontinue\n\t\t}\n\t\tdsn := fmt.Sprintf(\"host=%s port=%s %s\", leader.Host, leader.Port, db.dsn)\n\t\tif db.DB == nil {\n\t\t\tvar err error\n\t\t\tdb.DB, err = sql.Open(\"postgres\", dsn)\n\t\t\tfirstErr <- err\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tdb.DB.SetDSN(dsn)\n\t\t}\n\t}\n\t\/\/ TODO: reconnect to discoverd here\n}\n\nfunc (db *DB) Close() error {\n\tdb.set.Close()\n\treturn db.DB.Close()\n}\n<commit_msg>pkg\/postgres: Add default for service name<commit_after>package postgres\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/flynn\/go-discoverd\"\n\t\"github.com\/flynn\/go-sql\"\n\t_ \"github.com\/flynn\/pq\"\n)\n\nfunc Open(service, dsn string) (*DB, error) {\n\tif service == \"\" {\n\t\tservice = os.Getenv(\"PGSERVICE\")\n\t}\n\tset, err := discoverd.NewServiceSet(service)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdb := &DB{set: set, dsn: dsn}\n\tfirstErr := make(chan error)\n\tgo db.followLeader(firstErr)\n\treturn db, <-firstErr\n}\n\ntype DB struct {\n\t*sql.DB\n\n\tset discoverd.ServiceSet\n\tdsn string\n}\n\nvar ErrNoServers = errors.New(\"postgres: no servers found\")\n\nfunc (db *DB) followLeader(firstErr chan<- error) {\n\tfor update := range db.set.Watch(true) {\n\t\tleader := db.set.Leader()\n\t\tif leader == nil || leader.Attrs[\"up\"] != \"true\" {\n\t\t\tif firstErr != nil {\n\t\t\t\tfirstErr <- ErrNoServers\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif !update.Online || update.Addr != leader.Addr {\n\t\t\tcontinue\n\t\t}\n\t\tdsn := fmt.Sprintf(\"host=%s port=%s %s\", leader.Host, leader.Port, db.dsn)\n\t\tif db.DB == nil {\n\t\t\tvar err error\n\t\t\tdb.DB, err = sql.Open(\"postgres\", dsn)\n\t\t\tfirstErr <- err\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tdb.DB.SetDSN(dsn)\n\t\t}\n\t}\n\t\/\/ TODO: reconnect to discoverd here\n}\n\nfunc (db *DB) Close() error {\n\tdb.set.Close()\n\treturn db.DB.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package postailer\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"syscall\"\n)\n\ntype Postailer struct {\n\tfilePath string\n\tposFile string\n\n\tpos *position\n\toldfile bool\n\trcloser io.ReadCloser\n}\n\ntype position struct {\n\tInode uint `json:\"inode\"`\n\tPos int64 `json:\"pos\"`\n}\n\nfunc Open(filepath, posfile string) (*Postailer, error) {\n\tpt := &Postailer{\n\t\tfilePath: filepath,\n\t\tposFile: posfile,\n\t}\n\terr := pt.open()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pt, nil\n}\n\nfunc loadPos(fname string) (*position, error) {\n\tpos := &position{}\n\t_, err := os.Stat(fname)\n\tif err == nil { \/\/ posfile exists\n\t\tb, err := ioutil.ReadFile(fname)\n\t\tif err == nil {\n\t\t\terr = json.Unmarshal(b, pos)\n\t\t}\n\t\treturn pos, err\n\t}\n\treturn pos, nil\n}\n\nvar errFileNotFoundByInode = fmt.Errorf(\"old file not found\")\n\nfunc (pt *Postailer) open() error {\n\tpt.pos, _ = loadPos(pt.posFile)\n\tfi, err := os.Stat(pt.filePath)\n\t\/\/ XXX may be missing the file for a moment while rotating...\n\tif err != nil {\n\t\treturn err\n\t}\n\tinode := detectInode(fi)\n\tif pt.pos.Inode > 0 && inode != pt.pos.Inode {\n\t\tif oldFile, err := findFileByInode(pt.pos.Inode, filepath.Dir(pt.filePath)); err == nil {\n\t\t\toldf, err := os.Open(oldFile)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\toldfi, _ := oldf.Stat()\n\t\t\tif oldfi.Size() > pt.pos.Pos {\n\t\t\t\toldf.Seek(pt.pos.Pos, 0)\n\t\t\t\tpt.oldfile = true\n\t\t\t\tpt.rcloser = oldf\n\t\t\t\treturn nil\n\t\t\t}\n\t\t} else if err != errFileNotFoundByInode {\n\t\t\treturn err\n\t\t}\n\t\tpt.pos.Pos = 0\n\t}\n\tpt.pos.Inode = inode\n\tf, err := os.Open(pt.filePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif pt.pos.Pos < fi.Size() {\n\t\tf.Seek(pt.pos.Pos, 0)\n\t} else {\n\t\tpt.pos.Pos = 0\n\t}\n\tpt.rcloser = f\n\treturn nil\n}\n\nfunc (pt *Postailer) Read(p []byte) (int, error) {\n\tn, err := pt.rcloser.Read(p)\n\tpt.pos.Pos += int64(n)\n\tif !(pt.oldfile && ((err == nil && n < len(p)) || (err == io.EOF))) {\n\t\treturn n, err\n\t}\n\tpt.rcloser.Close()\n\tf, err := os.Open(pt.filePath)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn n, err\n\t}\n\tpt.pos = &position{Inode: detectInode(fi)}\n\tpt.oldfile = false\n\tpt.rcloser = f\n\n\tif n == len(p) {\n\t\treturn n, err\n\t}\n\tbuf := make([]byte, len(p)-n)\n\tnn, err := pt.rcloser.Read(buf)\n\tfor i := 0; i < nn; i++ {\n\t\tp[n+i] = buf[i]\n\t}\n\tpt.pos.Pos = int64(nn)\n\treturn n + nn, err\n}\n\nfunc (pt *Postailer) Close() error {\n\tdefer pt.rcloser.Close()\n\treturn savePos(pt.posFile, pt.pos)\n}\n\nfunc savePos(posfile string, pos *position) error {\n\tb, _ := json.Marshal(pos)\n\treturn writeFileAtomically(posfile, b)\n}\n\nfunc detectInode(fi os.FileInfo) uint {\n\tif stat, ok := fi.Sys().(*syscall.Stat_t); ok {\n\t\treturn uint(stat.Ino)\n\t}\n\treturn 0\n}\n\nfunc findFileByInode(inode uint, dir string) (string, error) {\n\tfis, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor _, fi := range fis {\n\t\tif detectInode(fi) == inode {\n\t\t\treturn filepath.Join(dir, fi.Name()), nil\n\t\t}\n\t}\n\treturn \"\", errFileNotFoundByInode\n}\n\nfunc writeFileAtomically(f string, contents []byte) error {\n\t\/\/ MUST be located on same disk partition\n\ttmpf, err := ioutil.TempFile(filepath.Dir(f), \"tmp\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ os.Remove here works successfully when tmpf.Write fails or os.Rename fails.\n\t\/\/ In successful case, os.Remove fails because the temporary file is already renamed.\n\tdefer os.Remove(tmpf.Name())\n\t_, err = tmpf.Write(contents)\n\ttmpf.Close() \/\/ should be called before rename\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn os.Rename(tmpf.Name(), f)\n}\n<commit_msg>mkdirall before savePos<commit_after>package postailer\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"syscall\"\n)\n\ntype Postailer struct {\n\tfilePath string\n\tposFile string\n\n\tpos *position\n\toldfile bool\n\trcloser io.ReadCloser\n}\n\ntype position struct {\n\tInode uint `json:\"inode\"`\n\tPos int64 `json:\"pos\"`\n}\n\nfunc Open(filepath, posfile string) (*Postailer, error) {\n\tpt := &Postailer{\n\t\tfilePath: filepath,\n\t\tposFile: posfile,\n\t}\n\terr := pt.open()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pt, nil\n}\n\nfunc loadPos(fname string) (*position, error) {\n\tpos := &position{}\n\t_, err := os.Stat(fname)\n\tif err == nil { \/\/ posfile exists\n\t\tb, err := ioutil.ReadFile(fname)\n\t\tif err == nil {\n\t\t\terr = json.Unmarshal(b, pos)\n\t\t}\n\t\treturn pos, err\n\t}\n\treturn pos, nil\n}\n\nvar errFileNotFoundByInode = fmt.Errorf(\"old file not found\")\n\nfunc (pt *Postailer) open() error {\n\tpt.pos, _ = loadPos(pt.posFile)\n\tfi, err := os.Stat(pt.filePath)\n\t\/\/ XXX may be missing the file for a moment while rotating...\n\tif err != nil {\n\t\treturn err\n\t}\n\tinode := detectInode(fi)\n\tif pt.pos.Inode > 0 && inode != pt.pos.Inode {\n\t\tif oldFile, err := findFileByInode(pt.pos.Inode, filepath.Dir(pt.filePath)); err == nil {\n\t\t\toldf, err := os.Open(oldFile)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\toldfi, _ := oldf.Stat()\n\t\t\tif oldfi.Size() > pt.pos.Pos {\n\t\t\t\toldf.Seek(pt.pos.Pos, 0)\n\t\t\t\tpt.oldfile = true\n\t\t\t\tpt.rcloser = oldf\n\t\t\t\treturn nil\n\t\t\t}\n\t\t} else if err != errFileNotFoundByInode {\n\t\t\treturn err\n\t\t}\n\t\tpt.pos.Pos = 0\n\t}\n\tpt.pos.Inode = inode\n\tf, err := os.Open(pt.filePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif pt.pos.Pos < fi.Size() {\n\t\tf.Seek(pt.pos.Pos, 0)\n\t} else {\n\t\tpt.pos.Pos = 0\n\t}\n\tpt.rcloser = f\n\treturn nil\n}\n\nfunc (pt *Postailer) Read(p []byte) (int, error) {\n\tn, err := pt.rcloser.Read(p)\n\tpt.pos.Pos += int64(n)\n\tif !(pt.oldfile && ((err == nil && n < len(p)) || (err == io.EOF))) {\n\t\treturn n, err\n\t}\n\tpt.rcloser.Close()\n\tf, err := os.Open(pt.filePath)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn n, err\n\t}\n\tpt.pos = &position{Inode: detectInode(fi)}\n\tpt.oldfile = false\n\tpt.rcloser = f\n\n\tif n == len(p) {\n\t\treturn n, err\n\t}\n\tbuf := make([]byte, len(p)-n)\n\tnn, err := pt.rcloser.Read(buf)\n\tfor i := 0; i < nn; i++ {\n\t\tp[n+i] = buf[i]\n\t}\n\tpt.pos.Pos = int64(nn)\n\treturn n + nn, err\n}\n\nfunc (pt *Postailer) Close() error {\n\tdefer pt.rcloser.Close()\n\treturn savePos(pt.posFile, pt.pos)\n}\n\nfunc savePos(posfile string, pos *position) error {\n\tb, _ := json.Marshal(pos)\n\tif err := os.MkdirAll(filepath.Dir(posfile), 0755); err != nil {\n\t\treturn nil\n\t}\n\treturn writeFileAtomically(posfile, b)\n}\n\nfunc detectInode(fi os.FileInfo) uint {\n\tif stat, ok := fi.Sys().(*syscall.Stat_t); ok {\n\t\treturn uint(stat.Ino)\n\t}\n\treturn 0\n}\n\nfunc findFileByInode(inode uint, dir string) (string, error) {\n\tfis, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor _, fi := range fis {\n\t\tif detectInode(fi) == inode {\n\t\t\treturn filepath.Join(dir, fi.Name()), nil\n\t\t}\n\t}\n\treturn \"\", errFileNotFoundByInode\n}\n\nfunc writeFileAtomically(f string, contents []byte) error {\n\t\/\/ MUST be located on same disk partition\n\ttmpf, err := ioutil.TempFile(filepath.Dir(f), \"tmp\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ os.Remove here works successfully when tmpf.Write fails or os.Rename fails.\n\t\/\/ In successful case, os.Remove fails because the temporary file is already renamed.\n\tdefer os.Remove(tmpf.Name())\n\t_, err = tmpf.Write(contents)\n\ttmpf.Close() \/\/ should be called before rename\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn os.Rename(tmpf.Name(), f)\n}\n<|endoftext|>"} {"text":"<commit_before>package mandrill\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n)\n\n\/\/------------------------------------------------------------\n\/\/ Model - template based email\n\/\/------------------------------------------------------------\n\ntype Email struct {\n\tKey string `json:\"key\"`\n\tTplName string `json:\"template_name\"`\n\tTplContent []KeyVal `json:\"template_content,omitempty\"`\n\tMessage Message `json:\"message\"`\n}\n\ntype KeyVal struct {\n\tName string `json:\"name\"`\n\tContent string `json:\"content\"`\n}\n\ntype Person struct {\n\tEmail string `json:\"email\"`\n\tName string `json:\"name\"`\n}\n\ntype Message struct {\n\tHtml string `json:\"html\"`\n\tText string `json:\"text\"`\n\tSubject string `json:\"subject\"`\n\tFromEmail string `json:\"from_email\"`\n\tFromName string `json:\"from_name\"`\n\tTo []Person `json:\"to\"`\n\tHeaders map[string]string `json:\"headers,omitempty\"`\n\tBcc string `json:\"bcc_address,omitempty\"`\n\tTrackOpens bool `json:\"track_opens\"`\n\tTrackClicks bool `json:\"track_clicks\"`\n\tAutoText bool `json:\"auto_text\"`\n\tPreserveRecipients bool `json:\"preserve_recipients\"`\n\tVarsGlob []KeyVal `json:\"global_merge_vars,omitempty\"`\n\tVars []RcptVars `json:\"merge_vars,omitempty\"`\n}\n\ntype RcptVars struct {\n\tRcpt string `json:\"rcpt\"`\n\tVars []KeyVal `json:\"vars\"`\n}\n\n\/\/------------------------------------------------------------\n\/\/ Templated Mail\n\/\/------------------------------------------------------------\n\nfunc NewEmail(tpl, subj string) *Email {\n\treturn &Email{\n\t\tTplName: tpl,\n Message: Message{Subject: subj, AutoText: true},\n\t}\n}\n\n\/\/------------------------------------------------------------\n\/\/ Sending methods\n\/\/------------------------------------------------------------\n\nfunc (m *Email) SetSender(params map[string]string) {\n\tm.Message.FromEmail = params[\"email\"]\n\tm.Message.FromName = params[\"identity\"]\n}\n\nfunc (m *Email) AddTo(params map[string]string) {\n\tm.Message.To = append(\n\t\tm.Message.To,\n\t\tPerson{params[\"email\"], params[\"identity\"]})\n}\n\nfunc (m *Email) ClearTo() {\n\tm.Message.To = []Person{}\n}\n\nfunc (m *Email) SetReplyTo(params map[string]string) {\n\tif m.Message.Headers == nil {\n\t\tm.Message.Headers = map[string]string{}\n\t}\n\tm.Message.Headers[\"Reply-To\"] = params[\"email\"]\n}\n\nfunc (m *Email) SetBcc(params map[string]string) {\n\tm.Message.Bcc = params[\"email\"]\n}\n\n\/\/------------------------------------------------------------\n\/\/ Template methods\n\/\/------------------------------------------------------------\n\nfunc (m *Email) AddTplContent(key, val string) {\n\tm.TplContent = append(m.TplContent, KeyVal{key, val})\n}\n\nfunc (m *Email) AddGlobalVar(key, val string) {\n\tm.Message.VarsGlob = append(\n\t\tm.Message.VarsGlob,\n\t\tKeyVal{key, val})\n}\n\nfunc (m *Email) AddVar(rcpt map[string]string, key, val string) {\n\temail := rcpt[\"email\"]\n\t\/\/ Check if this recipient's values already exist\n\tfor i, v := range m.Message.Vars {\n\t\tif v.Rcpt == email {\n\t\t\tm.Message.Vars[i].Vars = append(v.Vars, KeyVal{key, val})\n\t\t\treturn\n\t\t}\n\t}\n\t\/\/ Add new recipient\n\tm.Message.Vars = append(\n\t\tm.Message.Vars,\n\t\tRcptVars{\n\t\t\tRcpt: email,\n\t\t\tVars: []KeyVal{KeyVal{key, val}}})\n}\n\n\/\/------------------------------------------------------------\n\/\/ Sending\n\/\/------------------------------------------------------------\n\n\/\/ Sends email.\nfunc (m *Email) Send(apikey string) (err error) {\n\tm.Key = apikey\n\n\t\/\/ DEBUG\n\td, err := json.Marshal(m)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tfmt.Println(string(d))\n\tfmt.Println(\"------ Sending ------\")\n\n\tresp, err := post(\n\t\tMNDRL_MESSAGES_TEMPLATE,\n\t\tm)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfmt.Println(resp)\n\treturn\n}\n<commit_msg>template_content must be present<commit_after>package mandrill\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n)\n\n\/\/------------------------------------------------------------\n\/\/ Model - template based email\n\/\/------------------------------------------------------------\n\ntype Email struct {\n\tKey string `json:\"key\"`\n\tTplName string `json:\"template_name\"`\n\tTplContent []KeyVal `json:\"template_content\"`\n\tMessage Message `json:\"message\"`\n}\n\ntype KeyVal struct {\n\tName string `json:\"name\"`\n\tContent string `json:\"content\"`\n}\n\ntype Person struct {\n\tEmail string `json:\"email\"`\n\tName string `json:\"name\"`\n}\n\ntype Message struct {\n\tHtml string `json:\"html\"`\n\tText string `json:\"text\"`\n\tSubject string `json:\"subject\"`\n\tFromEmail string `json:\"from_email\"`\n\tFromName string `json:\"from_name\"`\n\tTo []Person `json:\"to\"`\n\tHeaders map[string]string `json:\"headers,omitempty\"`\n\tBcc string `json:\"bcc_address,omitempty\"`\n\tTrackOpens bool `json:\"track_opens\"`\n\tTrackClicks bool `json:\"track_clicks\"`\n\tAutoText bool `json:\"auto_text\"`\n\tPreserveRecipients bool `json:\"preserve_recipients\"`\n\tVarsGlob []KeyVal `json:\"global_merge_vars,omitempty\"`\n\tVars []RcptVars `json:\"merge_vars,omitempty\"`\n}\n\ntype RcptVars struct {\n\tRcpt string `json:\"rcpt\"`\n\tVars []KeyVal `json:\"vars\"`\n}\n\n\/\/------------------------------------------------------------\n\/\/ Templated Mail\n\/\/------------------------------------------------------------\n\nfunc NewEmail(tpl, subj string) *Email {\n\treturn &Email{\n\t\tTplName: tpl,\n Message: Message{Subject: subj, AutoText: true},\n\t}\n}\n\n\/\/------------------------------------------------------------\n\/\/ Sending methods\n\/\/------------------------------------------------------------\n\nfunc (m *Email) SetSender(params map[string]string) {\n\tm.Message.FromEmail = params[\"email\"]\n\tm.Message.FromName = params[\"identity\"]\n}\n\nfunc (m *Email) AddTo(params map[string]string) {\n\tm.Message.To = append(\n\t\tm.Message.To,\n\t\tPerson{params[\"email\"], params[\"identity\"]})\n}\n\nfunc (m *Email) ClearTo() {\n\tm.Message.To = []Person{}\n}\n\nfunc (m *Email) SetReplyTo(params map[string]string) {\n\tif m.Message.Headers == nil {\n\t\tm.Message.Headers = map[string]string{}\n\t}\n\tm.Message.Headers[\"Reply-To\"] = params[\"email\"]\n}\n\nfunc (m *Email) SetBcc(params map[string]string) {\n\tm.Message.Bcc = params[\"email\"]\n}\n\n\/\/------------------------------------------------------------\n\/\/ Template methods\n\/\/------------------------------------------------------------\n\nfunc (m *Email) AddTplContent(key, val string) {\n\tm.TplContent = append(m.TplContent, KeyVal{key, val})\n}\n\nfunc (m *Email) AddGlobalVar(key, val string) {\n\tm.Message.VarsGlob = append(\n\t\tm.Message.VarsGlob,\n\t\tKeyVal{key, val})\n}\n\nfunc (m *Email) AddVar(rcpt map[string]string, key, val string) {\n\temail := rcpt[\"email\"]\n\t\/\/ Check if this recipient's values already exist\n\tfor i, v := range m.Message.Vars {\n\t\tif v.Rcpt == email {\n\t\t\tm.Message.Vars[i].Vars = append(v.Vars, KeyVal{key, val})\n\t\t\treturn\n\t\t}\n\t}\n\t\/\/ Add new recipient\n\tm.Message.Vars = append(\n\t\tm.Message.Vars,\n\t\tRcptVars{\n\t\t\tRcpt: email,\n\t\t\tVars: []KeyVal{KeyVal{key, val}}})\n}\n\n\/\/------------------------------------------------------------\n\/\/ Sending\n\/\/------------------------------------------------------------\n\n\/\/ Sends email.\nfunc (m *Email) Send(apikey string) (err error) {\n\tm.Key = apikey\n\n\t\/\/ DEBUG\n\td, err := json.Marshal(m)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tfmt.Println(string(d))\n\tfmt.Println(\"------ Sending ------\")\n\n\tresp, err := post(\n\t\tMNDRL_MESSAGES_TEMPLATE,\n\t\tm)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfmt.Println(resp)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/* This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/. *\/\n\npackage mapper\n\nimport (\n\t\"github.com\/ernestio\/aws-definition-mapper\/definition\"\n\t\"github.com\/ernestio\/aws-definition-mapper\/output\"\n)\n\n\/\/ MapRDSInstances : Maps the rds instances for the input payload on a ernest internal format\nfunc MapRDSInstances(d definition.Definition) []output.RDSInstance {\n\tvar instances []output.RDSInstance\n\n\tfor _, instance := range d.RDSInstances {\n\t\tvar sgroups []string\n\t\tvar networks []string\n\n\t\tfor _, sg := range instance.SecurityGroups {\n\t\t\tsgroups = append(sgroups, d.GeneratedName()+sg)\n\t\t}\n\n\t\tfor _, nw := range instance.Networks {\n\t\t\tnetworks = append(networks, d.GeneratedName()+nw)\n\t\t}\n\n\t\tname := d.GeneratedName() + instance.Name\n\n\t\ti := output.RDSInstance{\n\t\t\tName: name,\n\t\t\tSize: instance.Size,\n\t\t\tEngine: instance.Engine,\n\t\t\tEngineVersion: instance.EngineVersion,\n\t\t\tPort: instance.Port,\n\t\t\tCluster: instance.Cluster,\n\t\t\tPublic: instance.Public,\n\t\t\tMultiAZ: instance.MultiAZ,\n\t\t\tPromotionTier: instance.PromotionTier,\n\t\t\tStorageType: instance.Storage.Type,\n\t\t\tStorageSize: instance.Storage.Size,\n\t\t\tStorageIops: instance.Storage.Iops,\n\t\t\tAvailabilityZone: instance.AvailabilityZone,\n\t\t\tSecurityGroups: instance.SecurityGroups,\n\t\t\tSecurityGroupAWSIDs: mapRDSSecurityGroupIDs(sgroups),\n\t\t\tNetworks: instance.Networks,\n\t\t\tNetworkAWSIDs: mapRDSNetworkIDs(networks),\n\t\t\tDatabaseName: instance.DatabaseName,\n\t\t\tDatabaseUsername: instance.DatabaseUsername,\n\t\t\tDatabasePassword: instance.DatabasePassword,\n\t\t\tAutoUpgrade: instance.AutoUpgrade,\n\t\t\tBackupRetention: instance.Backups.Retention,\n\t\t\tBackupWindow: instance.Backups.Window,\n\t\t\tMaintenanceWindow: instance.MaintenanceWindow,\n\t\t\tReplicationSource: instance.ReplicationSource,\n\t\t\tFinalSnapshot: instance.FinalSnapshot,\n\t\t\tLicense: instance.License,\n\t\t\tTimezone: instance.Timezone,\n\t\t\tTags: mapTagsServiceOnly(d.Name),\n\t\t\tProviderType: \"$(datacenters.items.0.type)\",\n\t\t\tVpcID: \"$(vpcs.items.0.vpc_id)\",\n\t\t\tSecretAccessKey: \"$(datacenters.items.0.aws_secret_access_key)\",\n\t\t\tAccessKeyID: \"$(datacenters.items.0.aws_access_key_id)\",\n\t\t\tDatacenterRegion: \"$(datacenters.items.0.region)\",\n\t\t}\n\n\t\tcluster := d.FindRDSCluster(instance.Cluster)\n\t\tif cluster != nil {\n\t\t\ti.Engine = cluster.Engine\n\t\t\ti.Cluster = d.GeneratedName() + instance.Cluster\n\t\t}\n\n\t\tinstances = append(instances, i)\n\t}\n\treturn instances\n}\n\n\/\/ MapDefinitionRDSInstances : Maps the rds instances from the internal format to the input definition format\nfunc MapDefinitionRDSInstances(m *output.FSMMessage) []definition.RDSInstance {\n\tvar instances []definition.RDSInstance\n\n\tprefix := m.Datacenters.Items[0].Name + \"-\" + m.ServiceName + \"-\"\n\n\tfor _, instance := range m.RDSInstances.Items {\n\t\tsgroups := ComponentNamesFromIDs(m.Firewalls.Items, instance.SecurityGroupAWSIDs)\n\t\tsubnets := ComponentNamesFromIDs(m.Networks.Items, instance.NetworkAWSIDs)\n\n\t\ti := definition.RDSInstance{\n\t\t\tName: instance.Name,\n\t\t\tSize: instance.Size,\n\t\t\tEngine: instance.Engine,\n\t\t\tEngineVersion: instance.EngineVersion,\n\t\t\tPort: instance.Port,\n\t\t\tCluster: instance.Cluster,\n\t\t\tPublic: instance.Public,\n\t\t\tMultiAZ: instance.MultiAZ,\n\t\t\tPromotionTier: instance.PromotionTier,\n\t\t\tAvailabilityZone: instance.AvailabilityZone,\n\t\t\tSecurityGroups: ShortNames(sgroups, prefix),\n\t\t\tNetworks: ShortNames(subnets, prefix),\n\t\t\tDatabaseName: instance.DatabaseName,\n\t\t\tDatabaseUsername: instance.DatabaseUsername,\n\t\t\tDatabasePassword: instance.DatabasePassword,\n\t\t\tAutoUpgrade: instance.AutoUpgrade,\n\t\t\tMaintenanceWindow: instance.MaintenanceWindow,\n\t\t\tReplicationSource: instance.ReplicationSource,\n\t\t\tFinalSnapshot: instance.FinalSnapshot,\n\t\t\tLicense: instance.License,\n\t\t\tTimezone: instance.Timezone,\n\t\t}\n\n\t\ti.Storage.Type = instance.StorageType\n\t\ti.Storage.Size = instance.StorageSize\n\t\ti.Storage.Iops = instance.StorageIops\n\t\ti.Backups.Retention = instance.BackupRetention\n\t\ti.Backups.Window = instance.BackupWindow\n\n\t\tinstances = append(instances, i)\n\t}\n\treturn instances\n}\n\n\/\/ UpdateRDSInstanceValues corrects missing values after an import\nfunc UpdateRDSInstanceValues(m *output.FSMMessage) {\n\tfor i := 0; i < len(m.RDSClusters.Items); i++ {\n\t\tm.RDSInstances.Items[i].ProviderType = \"$(datacenters.items.0.type)\"\n\t\tm.RDSInstances.Items[i].AccessKeyID = \"$(datacenters.items.0.aws_access_key_id)\"\n\t\tm.RDSInstances.Items[i].SecretAccessKey = \"$(datacenters.items.0.aws_secret_access_key)\"\n\t\tm.RDSInstances.Items[i].DatacenterRegion = \"$(datacenters.items.0.region)\"\n\t\tm.RDSInstances.Items[i].VpcID = \"$(vpcs.items.0.vpc_id)\"\n\t\tm.RDSInstances.Items[i].SecurityGroups = ComponentNamesFromIDs(m.Firewalls.Items, m.RDSInstances.Items[i].SecurityGroupAWSIDs)\n\t\tm.RDSInstances.Items[i].Networks = ComponentNamesFromIDs(m.Networks.Items, m.RDSInstances.Items[i].NetworkAWSIDs)\n\t}\n}\n<commit_msg>correctly looping over rds instances<commit_after>\/* This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/. *\/\n\npackage mapper\n\nimport (\n\t\"github.com\/ernestio\/aws-definition-mapper\/definition\"\n\t\"github.com\/ernestio\/aws-definition-mapper\/output\"\n)\n\n\/\/ MapRDSInstances : Maps the rds instances for the input payload on a ernest internal format\nfunc MapRDSInstances(d definition.Definition) []output.RDSInstance {\n\tvar instances []output.RDSInstance\n\n\tfor _, instance := range d.RDSInstances {\n\t\tvar sgroups []string\n\t\tvar networks []string\n\n\t\tfor _, sg := range instance.SecurityGroups {\n\t\t\tsgroups = append(sgroups, d.GeneratedName()+sg)\n\t\t}\n\n\t\tfor _, nw := range instance.Networks {\n\t\t\tnetworks = append(networks, d.GeneratedName()+nw)\n\t\t}\n\n\t\tname := d.GeneratedName() + instance.Name\n\n\t\ti := output.RDSInstance{\n\t\t\tName: name,\n\t\t\tSize: instance.Size,\n\t\t\tEngine: instance.Engine,\n\t\t\tEngineVersion: instance.EngineVersion,\n\t\t\tPort: instance.Port,\n\t\t\tCluster: instance.Cluster,\n\t\t\tPublic: instance.Public,\n\t\t\tMultiAZ: instance.MultiAZ,\n\t\t\tPromotionTier: instance.PromotionTier,\n\t\t\tStorageType: instance.Storage.Type,\n\t\t\tStorageSize: instance.Storage.Size,\n\t\t\tStorageIops: instance.Storage.Iops,\n\t\t\tAvailabilityZone: instance.AvailabilityZone,\n\t\t\tSecurityGroups: instance.SecurityGroups,\n\t\t\tSecurityGroupAWSIDs: mapRDSSecurityGroupIDs(sgroups),\n\t\t\tNetworks: instance.Networks,\n\t\t\tNetworkAWSIDs: mapRDSNetworkIDs(networks),\n\t\t\tDatabaseName: instance.DatabaseName,\n\t\t\tDatabaseUsername: instance.DatabaseUsername,\n\t\t\tDatabasePassword: instance.DatabasePassword,\n\t\t\tAutoUpgrade: instance.AutoUpgrade,\n\t\t\tBackupRetention: instance.Backups.Retention,\n\t\t\tBackupWindow: instance.Backups.Window,\n\t\t\tMaintenanceWindow: instance.MaintenanceWindow,\n\t\t\tReplicationSource: instance.ReplicationSource,\n\t\t\tFinalSnapshot: instance.FinalSnapshot,\n\t\t\tLicense: instance.License,\n\t\t\tTimezone: instance.Timezone,\n\t\t\tTags: mapTagsServiceOnly(d.Name),\n\t\t\tProviderType: \"$(datacenters.items.0.type)\",\n\t\t\tVpcID: \"$(vpcs.items.0.vpc_id)\",\n\t\t\tSecretAccessKey: \"$(datacenters.items.0.aws_secret_access_key)\",\n\t\t\tAccessKeyID: \"$(datacenters.items.0.aws_access_key_id)\",\n\t\t\tDatacenterRegion: \"$(datacenters.items.0.region)\",\n\t\t}\n\n\t\tcluster := d.FindRDSCluster(instance.Cluster)\n\t\tif cluster != nil {\n\t\t\ti.Engine = cluster.Engine\n\t\t\ti.Cluster = d.GeneratedName() + instance.Cluster\n\t\t}\n\n\t\tinstances = append(instances, i)\n\t}\n\treturn instances\n}\n\n\/\/ MapDefinitionRDSInstances : Maps the rds instances from the internal format to the input definition format\nfunc MapDefinitionRDSInstances(m *output.FSMMessage) []definition.RDSInstance {\n\tvar instances []definition.RDSInstance\n\n\tprefix := m.Datacenters.Items[0].Name + \"-\" + m.ServiceName + \"-\"\n\n\tfor _, instance := range m.RDSInstances.Items {\n\t\tsgroups := ComponentNamesFromIDs(m.Firewalls.Items, instance.SecurityGroupAWSIDs)\n\t\tsubnets := ComponentNamesFromIDs(m.Networks.Items, instance.NetworkAWSIDs)\n\n\t\ti := definition.RDSInstance{\n\t\t\tName: instance.Name,\n\t\t\tSize: instance.Size,\n\t\t\tEngine: instance.Engine,\n\t\t\tEngineVersion: instance.EngineVersion,\n\t\t\tPort: instance.Port,\n\t\t\tCluster: instance.Cluster,\n\t\t\tPublic: instance.Public,\n\t\t\tMultiAZ: instance.MultiAZ,\n\t\t\tPromotionTier: instance.PromotionTier,\n\t\t\tAvailabilityZone: instance.AvailabilityZone,\n\t\t\tSecurityGroups: ShortNames(sgroups, prefix),\n\t\t\tNetworks: ShortNames(subnets, prefix),\n\t\t\tDatabaseName: instance.DatabaseName,\n\t\t\tDatabaseUsername: instance.DatabaseUsername,\n\t\t\tDatabasePassword: instance.DatabasePassword,\n\t\t\tAutoUpgrade: instance.AutoUpgrade,\n\t\t\tMaintenanceWindow: instance.MaintenanceWindow,\n\t\t\tReplicationSource: instance.ReplicationSource,\n\t\t\tFinalSnapshot: instance.FinalSnapshot,\n\t\t\tLicense: instance.License,\n\t\t\tTimezone: instance.Timezone,\n\t\t}\n\n\t\ti.Storage.Type = instance.StorageType\n\t\ti.Storage.Size = instance.StorageSize\n\t\ti.Storage.Iops = instance.StorageIops\n\t\ti.Backups.Retention = instance.BackupRetention\n\t\ti.Backups.Window = instance.BackupWindow\n\n\t\tinstances = append(instances, i)\n\t}\n\treturn instances\n}\n\n\/\/ UpdateRDSInstanceValues corrects missing values after an import\nfunc UpdateRDSInstanceValues(m *output.FSMMessage) {\n\tfor i := 0; i < len(m.RDSInstances.Items); i++ {\n\t\tm.RDSInstances.Items[i].ProviderType = \"$(datacenters.items.0.type)\"\n\t\tm.RDSInstances.Items[i].AccessKeyID = \"$(datacenters.items.0.aws_access_key_id)\"\n\t\tm.RDSInstances.Items[i].SecretAccessKey = \"$(datacenters.items.0.aws_secret_access_key)\"\n\t\tm.RDSInstances.Items[i].DatacenterRegion = \"$(datacenters.items.0.region)\"\n\t\tm.RDSInstances.Items[i].VpcID = \"$(vpcs.items.0.vpc_id)\"\n\t\tm.RDSInstances.Items[i].SecurityGroups = ComponentNamesFromIDs(m.Firewalls.Items, m.RDSInstances.Items[i].SecurityGroupAWSIDs)\n\t\tm.RDSInstances.Items[i].Networks = ComponentNamesFromIDs(m.Networks.Items, m.RDSInstances.Items[i].NetworkAWSIDs)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package subsumption_test\n\nimport (\n\t. \".\"\n\t\"errors\"\n\tgomock \"github.com\/golang\/mock\/gomock\"\n\t\"testing\"\n)\n\nfunc TestCreate(t *testing.T) {\n\tagent := Agent{}\n\n\tif agent.Size() != 0 {\n\t\tt.Errorf(\"agent.Size() should be 0\")\n\t}\n}\n\nfunc TestAddBehavior(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tbehabior := NewMockBehavior(ctrl)\n\tagent := Agent{}\n\n\tagent.AddBehavior(behabior)\n\n\tif agent.Size() != 1 {\n\t\tt.Errorf(\"agent.Size() should be 1\")\n\t}\n}\n\nfunc TestInit(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tbehabior := NewMockBehavior(ctrl)\n\tbehabior.EXPECT().Init()\n\n\tagent := Agent{}\n\tagent.AddBehavior(behabior)\n\n\tagent.Init()\n}\n\nfunc TestPerform(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tbehabior := NewMockBehavior(ctrl)\n\tbehabior.EXPECT().Sense().Return(true, nil)\n\tbehabior.EXPECT().Perform().Return(true, nil)\n\n\tagent := Agent{}\n\tagent.AddBehavior(behabior)\n\n\tret, err := agent.Perform()\n\n\tif err != nil {\n\t\tt.Errorf(\"unexpected agent.Perform() fail with : %v\", err)\n\t}\n\tif ret != true {\n\t\tt.Errorf(\"agent.Perform() should be true because behabior is active\")\n\t}\n}\n\nfunc TestPerformMulti(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tfirst := NewMockBehavior(ctrl)\n\tfirst.EXPECT().Sense().Return(false, nil)\n\n\tsecond := NewMockBehavior(ctrl)\n\tsecond.EXPECT().Sense().Return(true, nil)\n\tsecond.EXPECT().Perform().Return(true, nil)\n\n\tagent := Agent{}\n\tagent.AddBehavior(first)\n\tagent.AddBehavior(second)\n\n\tret, err := agent.Perform()\n\n\tif err != nil {\n\t\tt.Errorf(\"agent.Perform() failed with : %v\", err)\n\t}\n\tif ret != true {\n\t\tt.Errorf(\"agent.Perform() should be false because [second] behabior is active\")\n\t}\n}\n\nfunc TestSenceError(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tbehabior := NewMockBehavior(ctrl)\n\tbehabior.EXPECT().Sense().Return(false, errors.New(\"unknown error\"))\n\n\tagent := Agent{}\n\tagent.AddBehavior(behabior)\n\n\tret, err := agent.Perform()\n\n\tif err != nil {\n\t\tt.Errorf(\"unexpected agent.Perform() fail with : %v\", err)\n\t}\n\tif ret != false {\n\t\tt.Errorf(\"agent.Perform() should be false because no behavior is active\")\n\t}\n}\n\nfunc TestPerformNoActive(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tfirst := NewMockBehavior(ctrl)\n\tfirst.EXPECT().Sense().Return(false, nil)\n\n\tsecond := NewMockBehavior(ctrl)\n\tsecond.EXPECT().Sense().Return(false, nil)\n\n\tagent := Agent{}\n\tagent.AddBehavior(first)\n\tagent.AddBehavior(second)\n\n\tret, err := agent.Perform()\n\n\tif err != nil {\n\t\tt.Errorf(\"agent.Perform() failed with : %v\", err)\n\t}\n\tif ret != false {\n\t\tt.Errorf(\"agent.Perform() should be false because no behavior is active\")\n\t}\n}\n<commit_msg>fix to use avsolute path with import<commit_after>package subsumption_test\n\nimport (\n\t\"errors\"\n\t. \"github.com\/cad-san\/go-subsumption\"\n\tgomock \"github.com\/golang\/mock\/gomock\"\n\t\"testing\"\n)\n\nfunc TestCreate(t *testing.T) {\n\tagent := Agent{}\n\n\tif agent.Size() != 0 {\n\t\tt.Errorf(\"agent.Size() should be 0\")\n\t}\n}\n\nfunc TestAddBehavior(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tbehabior := NewMockBehavior(ctrl)\n\tagent := Agent{}\n\n\tagent.AddBehavior(behabior)\n\n\tif agent.Size() != 1 {\n\t\tt.Errorf(\"agent.Size() should be 1\")\n\t}\n}\n\nfunc TestInit(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tbehabior := NewMockBehavior(ctrl)\n\tbehabior.EXPECT().Init()\n\n\tagent := Agent{}\n\tagent.AddBehavior(behabior)\n\n\tagent.Init()\n}\n\nfunc TestPerform(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tbehabior := NewMockBehavior(ctrl)\n\tbehabior.EXPECT().Sense().Return(true, nil)\n\tbehabior.EXPECT().Perform().Return(true, nil)\n\n\tagent := Agent{}\n\tagent.AddBehavior(behabior)\n\n\tret, err := agent.Perform()\n\n\tif err != nil {\n\t\tt.Errorf(\"unexpected agent.Perform() fail with : %v\", err)\n\t}\n\tif ret != true {\n\t\tt.Errorf(\"agent.Perform() should be true because behabior is active\")\n\t}\n}\n\nfunc TestPerformMulti(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tfirst := NewMockBehavior(ctrl)\n\tfirst.EXPECT().Sense().Return(false, nil)\n\n\tsecond := NewMockBehavior(ctrl)\n\tsecond.EXPECT().Sense().Return(true, nil)\n\tsecond.EXPECT().Perform().Return(true, nil)\n\n\tagent := Agent{}\n\tagent.AddBehavior(first)\n\tagent.AddBehavior(second)\n\n\tret, err := agent.Perform()\n\n\tif err != nil {\n\t\tt.Errorf(\"agent.Perform() failed with : %v\", err)\n\t}\n\tif ret != true {\n\t\tt.Errorf(\"agent.Perform() should be false because [second] behabior is active\")\n\t}\n}\n\nfunc TestSenceError(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tbehabior := NewMockBehavior(ctrl)\n\tbehabior.EXPECT().Sense().Return(false, errors.New(\"unknown error\"))\n\n\tagent := Agent{}\n\tagent.AddBehavior(behabior)\n\n\tret, err := agent.Perform()\n\n\tif err != nil {\n\t\tt.Errorf(\"unexpected agent.Perform() fail with : %v\", err)\n\t}\n\tif ret != false {\n\t\tt.Errorf(\"agent.Perform() should be false because no behavior is active\")\n\t}\n}\n\nfunc TestPerformNoActive(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tfirst := NewMockBehavior(ctrl)\n\tfirst.EXPECT().Sense().Return(false, nil)\n\n\tsecond := NewMockBehavior(ctrl)\n\tsecond.EXPECT().Sense().Return(false, nil)\n\n\tagent := Agent{}\n\tagent.AddBehavior(first)\n\tagent.AddBehavior(second)\n\n\tret, err := agent.Perform()\n\n\tif err != nil {\n\t\tt.Errorf(\"agent.Perform() failed with : %v\", err)\n\t}\n\tif ret != false {\n\t\tt.Errorf(\"agent.Perform() should be false because no behavior is active\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"exp\/norm\"\n\t\"exp\/regexp\"\n\t\"flag\"\n\t\"fmt\"\n\t\"http\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n\t\"strconv\"\n\t\"time\"\n\t\"utf8\"\n)\n\nfunc main() {\n\tflag.Parse()\n\tloadTestData()\n\tCharacterByCharacterTests()\n\tStandardTests()\n\tPerformanceTest()\n\tif errorCount == 0 {\n\t\tfmt.Println(\"PASS\")\n\t}\n}\n\nconst file = \"NormalizationTest.txt\"\n\nvar url = flag.String(\"url\",\n\t\"http:\/\/www.unicode.org\/Public\/6.0.0\/ucd\/\"+file,\n\t\"URL of Unicode database directory\")\nvar localFiles = flag.Bool(\"local\",\n\tfalse,\n\t\"data files have been copied to the current directory; for debugging only\")\n\nvar logger = log.New(os.Stderr, \"\", log.Lshortfile)\n\n\/\/ This regression test runs the test set in NormalizationTest.txt\n\/\/ (taken from http:\/\/www.unicode.org\/Public\/6.0.0\/ucd\/).\n\/\/\n\/\/ NormalizationTest.txt has form:\n\/\/ @Part0 # Specific cases\n\/\/ #\n\/\/ 1E0A;1E0A;0044 0307;1E0A;0044 0307; # (Ḋ; Ḋ; D◌̇; Ḋ; D◌̇; ) LATIN CAPITAL LETTER D WITH DOT ABOVE\n\/\/ 1E0C;1E0C;0044 0323;1E0C;0044 0323; # (Ḍ; Ḍ; D◌̣; Ḍ; D◌̣; ) LATIN CAPITAL LETTER D WITH DOT BELOW\n\/\/\n\/\/ Each test has 5 columns (c1, c2, c3, c4, c5), where \n\/\/ (c1, c2, c3, c4, c5) == (c1, NFC(c1), NFD(c1), NFKC(c1), NFKD(c1))\n\/\/\n\/\/ CONFORMANCE:\n\/\/ 1. The following invariants must be true for all conformant implementations\n\/\/\n\/\/ NFC\n\/\/ c2 == NFC(c1) == NFC(c2) == NFC(c3)\n\/\/ c4 == NFC(c4) == NFC(c5)\n\/\/\n\/\/ NFD\n\/\/ c3 == NFD(c1) == NFD(c2) == NFD(c3)\n\/\/ c5 == NFD(c4) == NFD(c5)\n\/\/\n\/\/ NFKC\n\/\/ c4 == NFKC(c1) == NFKC(c2) == NFKC(c3) == NFKC(c4) == NFKC(c5)\n\/\/\n\/\/ NFKD\n\/\/ c5 == NFKD(c1) == NFKD(c2) == NFKD(c3) == NFKD(c4) == NFKD(c5)\n\/\/\n\/\/ 2. For every code point X assigned in this version of Unicode that is not\n\/\/ specifically listed in Part 1, the following invariants must be true\n\/\/ for all conformant implementations:\n\/\/\n\/\/ X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X)\n\/\/\n\n\/\/ Column types.\nconst (\n\tcRaw = iota\n\tcNFC\n\tcNFD\n\tcNFKC\n\tcNFKD\n\tcMaxColumns\n)\n\n\/\/ Holds data from NormalizationTest.txt\nvar part []Part\n\ntype Part struct {\n\tname string\n\tnumber int\n\ttests []Test\n}\n\ntype Test struct {\n\tname string\n\tpartnr int\n\tnumber int\n\trune int \/\/ used for character by character test\n\tcols [cMaxColumns]string \/\/ Each has 5 entries, see below.\n}\n\nfunc (t Test) Name() string {\n\tif t.number < 0 {\n\t\treturn part[t.partnr].name\n\t}\n\treturn fmt.Sprintf(\"%s:%d\", part[t.partnr].name, t.number)\n}\n\nvar partRe = regexp.MustCompile(`@Part(\\d) # (.*)\\n`) \/\/ TODO: using $ iso \\n does not work\nvar testRe = regexp.MustCompile(`^` + strings.Repeat(`([\\dA-F ]+);`, 5) + ` # (.*)\\n`)\n\nvar counter int\n\n\/\/ Load the data form NormalizationTest.txt\nfunc loadTestData() {\n\tif *localFiles {\n\t\tpwd, _ := os.Getwd()\n\t\t*url = \"file:\/\/\" + path.Join(pwd, file)\n\t}\n\tt := &http.Transport{}\n\tt.RegisterProtocol(\"file\", http.NewFileTransport(http.Dir(\"\/\")))\n\tc := &http.Client{Transport: t}\n\tresp, err := c.Get(*url)\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\tif resp.StatusCode != 200 {\n\t\tlogger.Fatal(\"bad GET status for \"+file, resp.Status)\n\t}\n\tf := resp.Body\n\tdefer f.Close()\n\tinput := bufio.NewReader(f)\n\tfor {\n\t\tline, err := input.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tif err == os.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlogger.Fatal(err)\n\t\t}\n\t\tif len(line) == 0 || line[0] == '#' {\n\t\t\tcontinue\n\t\t}\n\t\tm := partRe.FindStringSubmatch(line)\n\t\tif m != nil {\n\t\t\tif len(m) < 3 {\n\t\t\t\tlogger.Fatal(\"Failed to parse Part: \", line)\n\t\t\t}\n\t\t\ti, err := strconv.Atoi(m[1])\n\t\t\tif err != nil {\n\t\t\t\tlogger.Fatal(err)\n\t\t\t}\n\t\t\tname := m[2]\n\t\t\tpart = append(part, Part{name: name[:len(name)-1], number: i})\n\t\t\tcontinue\n\t\t}\n\t\tm = testRe.FindStringSubmatch(line)\n\t\tif m == nil || len(m) < 7 {\n\t\t\tlogger.Fatalf(`Failed to parse: \"%s\" result: %#v`, line, m)\n\t\t}\n\t\ttest := Test{name: m[6], partnr: len(part) - 1, number: counter}\n\t\tcounter++\n\t\tfor j := 1; j < len(m)-1; j++ {\n\t\t\tfor _, split := range strings.Split(m[j], \" \") {\n\t\t\t\tr, err := strconv.Btoui64(split, 16)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif test.rune == 0 {\n\t\t\t\t\t\/\/ save for CharacterByCharacterTests\n\t\t\t\t\ttest.rune = int(r)\n\t\t\t\t}\n\t\t\t\tvar buf [utf8.UTFMax]byte\n\t\t\t\tsz := utf8.EncodeRune(buf[:], int(r))\n\t\t\t\ttest.cols[j-1] += string(buf[:sz])\n\t\t\t}\n\t\t}\n\t\tpart := &part[len(part)-1]\n\t\tpart.tests = append(part.tests, test)\n\t}\n}\n\nvar fstr = []string{\"NFC\", \"NFD\", \"NFKC\", \"NFKD\"}\n\nvar errorCount int\n\nfunc cmpResult(t *Test, name string, f norm.Form, gold, test, result string) {\n\tif gold != result {\n\t\terrorCount++\n\t\tif errorCount > 20 {\n\t\t\treturn\n\t\t}\n\t\tst, sr, sg := []int(test), []int(result), []int(gold)\n\t\tlogger.Printf(\"%s:%s: %s(%X)=%X; want:%X: %s\",\n\t\t\tt.Name(), name, fstr[f], st, sr, sg, t.name)\n\t}\n}\n\nfunc cmpIsNormal(t *Test, name string, f norm.Form, test string, result, want bool) {\n\tif result != want {\n\t\terrorCount++\n\t\tif errorCount > 20 {\n\t\t\treturn\n\t\t}\n\t\tlogger.Printf(\"%s:%s: %s(%X)=%v; want: %v\", t.Name(), name, fstr[f], []int(test), result, want)\n\t}\n}\n\nfunc doTest(t *Test, f norm.Form, gold, test string) {\n\tresult := f.Bytes([]byte(test))\n\tcmpResult(t, \"Bytes\", f, gold, test, string(result))\n\tfor i := range test {\n\t\tout := f.Append(f.Bytes([]byte(test[:i])), []byte(test[i:])...)\n\t\tcmpResult(t, fmt.Sprintf(\":Append:%d\", i), f, gold, test, string(out))\n\t}\n\tcmpIsNormal(t, \"IsNormal\", f, test, f.IsNormal([]byte(test)), test == gold)\n}\n\nfunc doConformanceTests(t *Test, partn int) {\n\tfor i := 0; i <= 2; i++ {\n\t\tdoTest(t, norm.NFC, t.cols[1], t.cols[i])\n\t\tdoTest(t, norm.NFD, t.cols[2], t.cols[i])\n\t\tdoTest(t, norm.NFKC, t.cols[3], t.cols[i])\n\t\tdoTest(t, norm.NFKD, t.cols[4], t.cols[i])\n\t}\n\tfor i := 3; i <= 4; i++ {\n\t\tdoTest(t, norm.NFC, t.cols[3], t.cols[i])\n\t\tdoTest(t, norm.NFD, t.cols[4], t.cols[i])\n\t\tdoTest(t, norm.NFKC, t.cols[3], t.cols[i])\n\t\tdoTest(t, norm.NFKD, t.cols[4], t.cols[i])\n\t}\n}\n\nfunc CharacterByCharacterTests() {\n\ttests := part[1].tests\n\tlast := 0\n\tfor i := 0; i <= len(tests); i++ { \/\/ last one is special case\n\t\tvar rune int\n\t\tif i == len(tests) {\n\t\t\trune = 0x2FA1E \/\/ Don't have to go to 0x10FFFF\n\t\t} else {\n\t\t\trune = tests[i].rune\n\t\t}\n\t\tfor last++; last < rune; last++ {\n\t\t\t\/\/ Check all characters that were not explicitly listed in the test.\n\t\t\tt := &Test{partnr: 1, number: -1}\n\t\t\tchar := string(last)\n\t\t\tdoTest(t, norm.NFC, char, char)\n\t\t\tdoTest(t, norm.NFD, char, char)\n\t\t\tdoTest(t, norm.NFKC, char, char)\n\t\t\tdoTest(t, norm.NFKD, char, char)\n\t\t}\n\t\tif i < len(tests) {\n\t\t\tdoConformanceTests(&tests[i], 1)\n\t\t}\n\t}\n}\n\nfunc StandardTests() {\n\tfor _, j := range []int{0, 2, 3} {\n\t\tfor _, test := range part[j].tests {\n\t\t\tdoConformanceTests(&test, j)\n\t\t}\n\t}\n}\n\n\/\/ PerformanceTest verifies that normalization is O(n). If any of the\n\/\/ code does not properly check for maxCombiningChars, normalization\n\/\/ may exhibit O(n**2) behavior.\nfunc PerformanceTest() {\n\truntime.GOMAXPROCS(2)\n\tsuccess := make(chan bool, 1)\n\tgo func() {\n\t\tbuf := bytes.Repeat([]byte(\"\\u035D\"), 1024*1024)\n\t\tbuf = append(buf, []byte(\"\\u035B\")...)\n\t\tnorm.NFC.Append(nil, buf...)\n\t\tsuccess <- true\n\t}()\n\ttimeout := time.After(1e9)\n\tselect {\n\tcase <-success:\n\t\t\/\/ test completed before the timeout\n\tcase <-timeout:\n\t\terrorCount++\n\t\tlogger.Printf(`unexpectedly long time to complete PerformanceTest`)\n\t}\n}\n<commit_msg>exp\/norm: Adopt regexp to exp\/regexp semantics.<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"exp\/norm\"\n\t\"exp\/regexp\"\n\t\"flag\"\n\t\"fmt\"\n\t\"http\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n\t\"strconv\"\n\t\"time\"\n\t\"utf8\"\n)\n\nfunc main() {\n\tflag.Parse()\n\tloadTestData()\n\tCharacterByCharacterTests()\n\tStandardTests()\n\tPerformanceTest()\n\tif errorCount == 0 {\n\t\tfmt.Println(\"PASS\")\n\t}\n}\n\nconst file = \"NormalizationTest.txt\"\n\nvar url = flag.String(\"url\",\n\t\"http:\/\/www.unicode.org\/Public\/6.0.0\/ucd\/\"+file,\n\t\"URL of Unicode database directory\")\nvar localFiles = flag.Bool(\"local\",\n\tfalse,\n\t\"data files have been copied to the current directory; for debugging only\")\n\nvar logger = log.New(os.Stderr, \"\", log.Lshortfile)\n\n\/\/ This regression test runs the test set in NormalizationTest.txt\n\/\/ (taken from http:\/\/www.unicode.org\/Public\/6.0.0\/ucd\/).\n\/\/\n\/\/ NormalizationTest.txt has form:\n\/\/ @Part0 # Specific cases\n\/\/ #\n\/\/ 1E0A;1E0A;0044 0307;1E0A;0044 0307; # (Ḋ; Ḋ; D◌̇; Ḋ; D◌̇; ) LATIN CAPITAL LETTER D WITH DOT ABOVE\n\/\/ 1E0C;1E0C;0044 0323;1E0C;0044 0323; # (Ḍ; Ḍ; D◌̣; Ḍ; D◌̣; ) LATIN CAPITAL LETTER D WITH DOT BELOW\n\/\/\n\/\/ Each test has 5 columns (c1, c2, c3, c4, c5), where \n\/\/ (c1, c2, c3, c4, c5) == (c1, NFC(c1), NFD(c1), NFKC(c1), NFKD(c1))\n\/\/\n\/\/ CONFORMANCE:\n\/\/ 1. The following invariants must be true for all conformant implementations\n\/\/\n\/\/ NFC\n\/\/ c2 == NFC(c1) == NFC(c2) == NFC(c3)\n\/\/ c4 == NFC(c4) == NFC(c5)\n\/\/\n\/\/ NFD\n\/\/ c3 == NFD(c1) == NFD(c2) == NFD(c3)\n\/\/ c5 == NFD(c4) == NFD(c5)\n\/\/\n\/\/ NFKC\n\/\/ c4 == NFKC(c1) == NFKC(c2) == NFKC(c3) == NFKC(c4) == NFKC(c5)\n\/\/\n\/\/ NFKD\n\/\/ c5 == NFKD(c1) == NFKD(c2) == NFKD(c3) == NFKD(c4) == NFKD(c5)\n\/\/\n\/\/ 2. For every code point X assigned in this version of Unicode that is not\n\/\/ specifically listed in Part 1, the following invariants must be true\n\/\/ for all conformant implementations:\n\/\/\n\/\/ X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X)\n\/\/\n\n\/\/ Column types.\nconst (\n\tcRaw = iota\n\tcNFC\n\tcNFD\n\tcNFKC\n\tcNFKD\n\tcMaxColumns\n)\n\n\/\/ Holds data from NormalizationTest.txt\nvar part []Part\n\ntype Part struct {\n\tname string\n\tnumber int\n\ttests []Test\n}\n\ntype Test struct {\n\tname string\n\tpartnr int\n\tnumber int\n\trune int \/\/ used for character by character test\n\tcols [cMaxColumns]string \/\/ Each has 5 entries, see below.\n}\n\nfunc (t Test) Name() string {\n\tif t.number < 0 {\n\t\treturn part[t.partnr].name\n\t}\n\treturn fmt.Sprintf(\"%s:%d\", part[t.partnr].name, t.number)\n}\n\nvar partRe = regexp.MustCompile(`@Part(\\d) # (.*)\\n$`)\nvar testRe = regexp.MustCompile(`^` + strings.Repeat(`([\\dA-F ]+);`, 5) + ` # (.*)\\n?$`)\n\nvar counter int\n\n\/\/ Load the data form NormalizationTest.txt\nfunc loadTestData() {\n\tif *localFiles {\n\t\tpwd, _ := os.Getwd()\n\t\t*url = \"file:\/\/\" + path.Join(pwd, file)\n\t}\n\tt := &http.Transport{}\n\tt.RegisterProtocol(\"file\", http.NewFileTransport(http.Dir(\"\/\")))\n\tc := &http.Client{Transport: t}\n\tresp, err := c.Get(*url)\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\tif resp.StatusCode != 200 {\n\t\tlogger.Fatal(\"bad GET status for \"+file, resp.Status)\n\t}\n\tf := resp.Body\n\tdefer f.Close()\n\tinput := bufio.NewReader(f)\n\tfor {\n\t\tline, err := input.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tif err == os.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlogger.Fatal(err)\n\t\t}\n\t\tif len(line) == 0 || line[0] == '#' {\n\t\t\tcontinue\n\t\t}\n\t\tm := partRe.FindStringSubmatch(line)\n\t\tif m != nil {\n\t\t\tif len(m) < 3 {\n\t\t\t\tlogger.Fatal(\"Failed to parse Part: \", line)\n\t\t\t}\n\t\t\ti, err := strconv.Atoi(m[1])\n\t\t\tif err != nil {\n\t\t\t\tlogger.Fatal(err)\n\t\t\t}\n\t\t\tname := m[2]\n\t\t\tpart = append(part, Part{name: name[:len(name)-1], number: i})\n\t\t\tcontinue\n\t\t}\n\t\tm = testRe.FindStringSubmatch(line)\n\t\tif m == nil || len(m) < 7 {\n\t\t\tlogger.Fatalf(`Failed to parse: \"%s\" result: %#v`, line, m)\n\t\t}\n\t\ttest := Test{name: m[6], partnr: len(part) - 1, number: counter}\n\t\tcounter++\n\t\tfor j := 1; j < len(m)-1; j++ {\n\t\t\tfor _, split := range strings.Split(m[j], \" \") {\n\t\t\t\tr, err := strconv.Btoui64(split, 16)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif test.rune == 0 {\n\t\t\t\t\t\/\/ save for CharacterByCharacterTests\n\t\t\t\t\ttest.rune = int(r)\n\t\t\t\t}\n\t\t\t\tvar buf [utf8.UTFMax]byte\n\t\t\t\tsz := utf8.EncodeRune(buf[:], int(r))\n\t\t\t\ttest.cols[j-1] += string(buf[:sz])\n\t\t\t}\n\t\t}\n\t\tpart := &part[len(part)-1]\n\t\tpart.tests = append(part.tests, test)\n\t}\n}\n\nvar fstr = []string{\"NFC\", \"NFD\", \"NFKC\", \"NFKD\"}\n\nvar errorCount int\n\nfunc cmpResult(t *Test, name string, f norm.Form, gold, test, result string) {\n\tif gold != result {\n\t\terrorCount++\n\t\tif errorCount > 20 {\n\t\t\treturn\n\t\t}\n\t\tst, sr, sg := []int(test), []int(result), []int(gold)\n\t\tlogger.Printf(\"%s:%s: %s(%X)=%X; want:%X: %s\",\n\t\t\tt.Name(), name, fstr[f], st, sr, sg, t.name)\n\t}\n}\n\nfunc cmpIsNormal(t *Test, name string, f norm.Form, test string, result, want bool) {\n\tif result != want {\n\t\terrorCount++\n\t\tif errorCount > 20 {\n\t\t\treturn\n\t\t}\n\t\tlogger.Printf(\"%s:%s: %s(%X)=%v; want: %v\", t.Name(), name, fstr[f], []int(test), result, want)\n\t}\n}\n\nfunc doTest(t *Test, f norm.Form, gold, test string) {\n\tresult := f.Bytes([]byte(test))\n\tcmpResult(t, \"Bytes\", f, gold, test, string(result))\n\tfor i := range test {\n\t\tout := f.Append(f.Bytes([]byte(test[:i])), []byte(test[i:])...)\n\t\tcmpResult(t, fmt.Sprintf(\":Append:%d\", i), f, gold, test, string(out))\n\t}\n\tcmpIsNormal(t, \"IsNormal\", f, test, f.IsNormal([]byte(test)), test == gold)\n}\n\nfunc doConformanceTests(t *Test, partn int) {\n\tfor i := 0; i <= 2; i++ {\n\t\tdoTest(t, norm.NFC, t.cols[1], t.cols[i])\n\t\tdoTest(t, norm.NFD, t.cols[2], t.cols[i])\n\t\tdoTest(t, norm.NFKC, t.cols[3], t.cols[i])\n\t\tdoTest(t, norm.NFKD, t.cols[4], t.cols[i])\n\t}\n\tfor i := 3; i <= 4; i++ {\n\t\tdoTest(t, norm.NFC, t.cols[3], t.cols[i])\n\t\tdoTest(t, norm.NFD, t.cols[4], t.cols[i])\n\t\tdoTest(t, norm.NFKC, t.cols[3], t.cols[i])\n\t\tdoTest(t, norm.NFKD, t.cols[4], t.cols[i])\n\t}\n}\n\nfunc CharacterByCharacterTests() {\n\ttests := part[1].tests\n\tlast := 0\n\tfor i := 0; i <= len(tests); i++ { \/\/ last one is special case\n\t\tvar rune int\n\t\tif i == len(tests) {\n\t\t\trune = 0x2FA1E \/\/ Don't have to go to 0x10FFFF\n\t\t} else {\n\t\t\trune = tests[i].rune\n\t\t}\n\t\tfor last++; last < rune; last++ {\n\t\t\t\/\/ Check all characters that were not explicitly listed in the test.\n\t\t\tt := &Test{partnr: 1, number: -1}\n\t\t\tchar := string(last)\n\t\t\tdoTest(t, norm.NFC, char, char)\n\t\t\tdoTest(t, norm.NFD, char, char)\n\t\t\tdoTest(t, norm.NFKC, char, char)\n\t\t\tdoTest(t, norm.NFKD, char, char)\n\t\t}\n\t\tif i < len(tests) {\n\t\t\tdoConformanceTests(&tests[i], 1)\n\t\t}\n\t}\n}\n\nfunc StandardTests() {\n\tfor _, j := range []int{0, 2, 3} {\n\t\tfor _, test := range part[j].tests {\n\t\t\tdoConformanceTests(&test, j)\n\t\t}\n\t}\n}\n\n\/\/ PerformanceTest verifies that normalization is O(n). If any of the\n\/\/ code does not properly check for maxCombiningChars, normalization\n\/\/ may exhibit O(n**2) behavior.\nfunc PerformanceTest() {\n\truntime.GOMAXPROCS(2)\n\tsuccess := make(chan bool, 1)\n\tgo func() {\n\t\tbuf := bytes.Repeat([]byte(\"\\u035D\"), 1024*1024)\n\t\tbuf = append(buf, []byte(\"\\u035B\")...)\n\t\tnorm.NFC.Append(nil, buf...)\n\t\tsuccess <- true\n\t}()\n\ttimeout := time.After(1e9)\n\tselect {\n\tcase <-success:\n\t\t\/\/ test completed before the timeout\n\tcase <-timeout:\n\t\terrorCount++\n\t\tlogger.Printf(`unexpectedly long time to complete PerformanceTest`)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage netchan\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\ntype value struct {\n\ti int\n\ts string\n}\n\nconst count = 10\n\nfunc exportSend(exp *Exporter, t *testing.T) {\n\tch := make(chan value)\n\terr := exp.Export(\"exportedSend\", ch, Send, new(value))\n\tif err != nil {\n\t\tt.Fatal(\"exportSend:\", err)\n\t}\n\tfor i := 0; i < count; i++ {\n\t\tch <- value{23 + i, \"hello\"}\n\t}\n}\n\nfunc exportReceive(exp *Exporter, t *testing.T) {\n\tch := make(chan value)\n\terr := exp.Export(\"exportedRecv\", ch, Recv, new(value))\n\tif err != nil {\n\t\tt.Fatal(\"exportReceive:\", err)\n\t}\n\tfor i := 0; i < count; i++ {\n\t\tv := <-ch\n\t\tfmt.Printf(\"%v\\n\", v)\n\t\tif v.i != 45+i || v.s != \"hello\" {\n\t\t\tt.Errorf(\"export Receive: bad value: expected 4%d, hello; got %+v\", 45+i, v)\n\t\t}\n\t}\n}\n\nfunc importReceive(imp *Importer, t *testing.T) {\n\tch := make(chan value)\n\terr := imp.ImportNValues(\"exportedSend\", ch, Recv, new(value), count)\n\tif err != nil {\n\t\tt.Fatal(\"importReceive:\", err)\n\t}\n\tfor i := 0; i < count; i++ {\n\t\tv := <-ch\n\t\tfmt.Printf(\"%v\\n\", v)\n\t\tif v.i != 23+i || v.s != \"hello\" {\n\t\t\tt.Errorf(\"importReceive: bad value: expected %d, hello; got %+v\", 23+i, v)\n\t\t}\n\t}\n}\n\nfunc importSend(imp *Importer, t *testing.T) {\n\tch := make(chan value)\n\terr := imp.ImportNValues(\"exportedRecv\", ch, Send, new(value), count)\n\tif err != nil {\n\t\tt.Fatal(\"importSend:\", err)\n\t}\n\tfor i := 0; i < count; i++ {\n\t\tch <- value{45 + i, \"hello\"}\n\t}\n}\n\nfunc TestExportSendImportReceive(t *testing.T) {\n\texp, err := NewExporter(\"tcp\", \":0\")\n\tif err != nil {\n\t\tt.Fatal(\"new exporter:\", err)\n\t}\n\timp, err := NewImporter(\"tcp\", exp.Addr().String())\n\tif err != nil {\n\t\tt.Fatal(\"new importer:\", err)\n\t}\n\tgo exportSend(exp, t)\n\timportReceive(imp, t)\n}\n\nfunc TestExportReceiveImportSend(t *testing.T) {\n\texp, err := NewExporter(\"tcp\", \":0\")\n\tif err != nil {\n\t\tt.Fatal(\"new exporter:\", err)\n\t}\n\timp, err := NewImporter(\"tcp\", exp.Addr().String())\n\tif err != nil {\n\t\tt.Fatal(\"new importer:\", err)\n\t}\n\tgo importSend(imp, t)\n\texportReceive(exp, t)\n}\n<commit_msg>netchan: be less chatty during gotest<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage netchan\n\nimport \"testing\"\n\ntype value struct {\n\ti int\n\ts string\n}\n\nconst count = 10\n\nfunc exportSend(exp *Exporter, t *testing.T) {\n\tch := make(chan value)\n\terr := exp.Export(\"exportedSend\", ch, Send, new(value))\n\tif err != nil {\n\t\tt.Fatal(\"exportSend:\", err)\n\t}\n\tfor i := 0; i < count; i++ {\n\t\tch <- value{23 + i, \"hello\"}\n\t}\n}\n\nfunc exportReceive(exp *Exporter, t *testing.T) {\n\tch := make(chan value)\n\terr := exp.Export(\"exportedRecv\", ch, Recv, new(value))\n\tif err != nil {\n\t\tt.Fatal(\"exportReceive:\", err)\n\t}\n\tfor i := 0; i < count; i++ {\n\t\tv := <-ch\n\t\tif v.i != 45+i || v.s != \"hello\" {\n\t\t\tt.Errorf(\"export Receive: bad value: expected 4%d, hello; got %+v\", 45+i, v)\n\t\t}\n\t}\n}\n\nfunc importReceive(imp *Importer, t *testing.T) {\n\tch := make(chan value)\n\terr := imp.ImportNValues(\"exportedSend\", ch, Recv, new(value), count)\n\tif err != nil {\n\t\tt.Fatal(\"importReceive:\", err)\n\t}\n\tfor i := 0; i < count; i++ {\n\t\tv := <-ch\n\t\tif v.i != 23+i || v.s != \"hello\" {\n\t\t\tt.Errorf(\"importReceive: bad value: expected %d, hello; got %+v\", 23+i, v)\n\t\t}\n\t}\n}\n\nfunc importSend(imp *Importer, t *testing.T) {\n\tch := make(chan value)\n\terr := imp.ImportNValues(\"exportedRecv\", ch, Send, new(value), count)\n\tif err != nil {\n\t\tt.Fatal(\"importSend:\", err)\n\t}\n\tfor i := 0; i < count; i++ {\n\t\tch <- value{45 + i, \"hello\"}\n\t}\n}\n\nfunc TestExportSendImportReceive(t *testing.T) {\n\texp, err := NewExporter(\"tcp\", \":0\")\n\tif err != nil {\n\t\tt.Fatal(\"new exporter:\", err)\n\t}\n\timp, err := NewImporter(\"tcp\", exp.Addr().String())\n\tif err != nil {\n\t\tt.Fatal(\"new importer:\", err)\n\t}\n\tgo exportSend(exp, t)\n\timportReceive(imp, t)\n}\n\nfunc TestExportReceiveImportSend(t *testing.T) {\n\texp, err := NewExporter(\"tcp\", \":0\")\n\tif err != nil {\n\t\tt.Fatal(\"new exporter:\", err)\n\t}\n\timp, err := NewImporter(\"tcp\", exp.Addr().String())\n\tif err != nil {\n\t\tt.Fatal(\"new importer:\", err)\n\t}\n\tgo importSend(imp, t)\n\texportReceive(exp, t)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build !cgo,!windows,!plan9\n\npackage user\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n)\n\nfunc init() {\n\timplemented = false\n}\n\nfunc current() (*User, error) {\n\treturn nil, fmt.Errorf(\"user: Current not implemented on %s\/%s\", runtime.GOOS, runtime.GOARCH)\n}\n\nfunc lookup(username string) (*User, error) {\n\treturn nil, fmt.Errorf(\"user: Lookup not implemented on %s\/%s\", runtime.GOOS, runtime.GOARCH)\n}\n\nfunc lookupId(uid string) (*User, error) {\n\treturn nil, fmt.Errorf(\"user: LookupId not implemented on %s\/%s\", runtime.GOOS, runtime.GOARCH)\n}\n<commit_msg>Need to add !akaros to list in lookup_stubs.go<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build !cgo,!windows,!plan9,!akaros\n\npackage user\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n)\n\nfunc init() {\n\timplemented = false\n}\n\nfunc current() (*User, error) {\n\treturn nil, fmt.Errorf(\"user: Current not implemented on %s\/%s\", runtime.GOOS, runtime.GOARCH)\n}\n\nfunc lookup(username string) (*User, error) {\n\treturn nil, fmt.Errorf(\"user: Lookup not implemented on %s\/%s\", runtime.GOOS, runtime.GOARCH)\n}\n\nfunc lookupId(uid string) (*User, error) {\n\treturn nil, fmt.Errorf(\"user: LookupId not implemented on %s\/%s\", runtime.GOOS, runtime.GOARCH)\n}\n<|endoftext|>"} {"text":"<commit_before>package printer\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n\n\t\"unicode\"\n\n\t\"github.com\/8byt\/gox\/ast\"\n\t\"github.com\/8byt\/gox\/token\"\n)\n\n\/\/ Map html-style to actual js event names\nvar eventMap = map[string]string{\n\t\"onAbort\": \"abort\",\n\t\"onCancel\": \"cancel\",\n\t\"onCanPlay\": \"canplay\",\n\t\"onCanPlaythrough\": \"canplaythrough\",\n\t\"onChange\": \"change\",\n\t\"onClick\": \"click\",\n\t\"onCueChange\": \"cuechange\",\n\t\"onDblClick\": \"dblclick\",\n\t\"onDurationChange\": \"durationchange\",\n\t\"onEmptied\": \"emptied\",\n\t\"onEnded\": \"ended\",\n\t\"onInput\": \"input\",\n\t\"onInvalid\": \"invalid\",\n\t\"onKeyDown\": \"keydown\",\n\t\"onKeyPress\": \"keypress\",\n\t\"onKeyUp\": \"keyup\",\n\t\"onLoadedData\": \"loadeddata\",\n\t\"onLoadedMetadata\": \"loadedmetadata\",\n\t\"onLoadStart\": \"loadstart\",\n\t\"onMouseDown\": \"mousedown\",\n\t\"onMouseEnter\": \"mouseenter\",\n\t\"onMouseleave\": \"mouseleave\",\n\t\"onMouseMove\": \"mousemove\",\n\t\"onMouseOut\": \"mouseout\",\n\t\"onMouseOver\": \"mouseover\",\n\t\"onMouseUp\": \"mouseup\",\n\t\"onMouseWheel\": \"mousewheel\",\n\t\"onPause\": \"pause\",\n\t\"onPlay\": \"play\",\n\t\"onPlaying\": \"playing\",\n\t\"onProgress\": \"progress\",\n\t\"onRateChange\": \"ratechange\",\n\t\"onReset\": \"reset\",\n\t\"onSeeked\": \"seeked\",\n\t\"onSeeking\": \"seeking\",\n\t\"onSelect\": \"select\",\n\t\"onShow\": \"show\",\n\t\"onStalled\": \"stalled\",\n\t\"onSubmit\": \"submit\",\n\t\"onSuspend\": \"suspend\",\n\t\"onTimeUpdate\": \"timeupdate\",\n\t\"onToggle\": \"toggle\",\n\t\"onVolumeChange\": \"volumechange\",\n\t\"onWaiting\": \"waiting\",\n}\n\nvar attrMap = map[string]string{\n\t\"autofocus\": \"autofocus\",\n\t\"checked\": \"checked\",\n\t\"class\": \"className\",\n\t\"for\": \"htmlFor\",\n\t\"href\": \"href\",\n\t\"id\": \"id\",\n\t\"placeholder\": \"placeholder\",\n\t\"src\": \"src\",\n\t\"type\": \"type\",\n\t\"value\": \"value\",\n}\n\nfunc goxToVecty(gox *ast.GoxExpr) ast.Expr {\n\tisComponent := unicode.IsUpper(rune(gox.TagName.Name[0]))\n\n\tif isComponent {\n\t\treturn newComponent(gox)\n\t} else {\n\t\targs := []ast.Expr{\n\t\t\t&ast.BasicLit{\n\t\t\t\tKind: token.STRING,\n\t\t\t\tValue: strconv.Quote(gox.TagName.Name),\n\t\t\t}}\n\n\t\t\/\/ Add the attributes\n\t\targs = append(args, mapProps(gox.Attrs)...)\n\n\t\t\/\/ Add the contents\n\t\tfor _, expr := range gox.X {\n\t\t\tswitch expr := expr.(type) {\n\t\t\t\/\/ TODO figure out what's a better thing to do here\n\t\t\t\/\/ do we want to error on compile or figure out what to do based on context?\n\t\t\t\/\/ (I think the latter)\n\t\t\t\/\/ Fallback to regular behavior, don't wrap this yet\n\t\t\t\/\/case *ast.GoExpr:\n\t\t\t\/\/\te := newCallExpr(\n\t\t\t\/\/\t\tnewSelectorExpr(\"vecty\", \"Text\"),\n\t\t\t\/\/\t\t[]ast.Expr{expr},\n\t\t\t\/\/\t)\n\t\t\t\/\/\targs = append(args, e)\n\n\t\t\tcase *ast.BareWordsExpr:\n\t\t\t\tif len(strings.TrimSpace(expr.Value)) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\te := newCallExpr(\n\t\t\t\t\tnewSelectorExpr(\"vecty\", \"Text\"),\n\t\t\t\t\t[]ast.Expr{expr},\n\t\t\t\t)\n\t\t\t\targs = append(args, e)\n\t\t\tdefault:\n\t\t\t\targs = append(args, expr)\n\t\t\t}\n\t\t}\n\n\t\treturn newCallExpr(\n\t\t\tnewSelectorExpr(\"vecty\", \"Tag\"),\n\t\t\targs,\n\t\t)\n\t}\n}\n\nfunc newSelectorExpr(x, sel string) *ast.SelectorExpr {\n\treturn &ast.SelectorExpr{\n\t\tX: ast.NewIdent(x),\n\t\tSel: ast.NewIdent(sel)}\n}\n\nfunc newCallExpr(fun ast.Expr, args []ast.Expr) *ast.CallExpr {\n\treturn &ast.CallExpr{\n\t\tFun: fun,\n\t\tArgs: args,\n\t\tEllipsis: token.NoPos, Lparen: token.NoPos, Rparen: token.NoPos}\n}\n\nfunc newComponent(gox *ast.GoxExpr) *ast.UnaryExpr {\n\tvar args []ast.Expr\n\tfor _, attr := range gox.Attrs {\n\t\tif attr.Rhs == nil { \/\/ default to true like JSX\n\t\t\tattr.Rhs = ast.NewIdent(\"true\")\n\t\t}\n\t\texpr := &ast.KeyValueExpr{\n\t\t\tKey: ast.NewIdent(attr.Lhs.Name),\n\t\t\tColon: token.NoPos,\n\t\t\tValue: attr.Rhs,\n\t\t}\n\n\t\targs = append(args, expr)\n\t}\n\n\treturn &ast.UnaryExpr{\n\t\tOpPos: token.NoPos,\n\t\tOp: token.AND,\n\t\tX: &ast.CompositeLit{\n\t\t\tType: ast.NewIdent(gox.TagName.Name),\n\t\t\tLbrace: token.NoPos,\n\t\t\tElts: args,\n\t\t\tRbrace: token.NoPos,\n\t\t},\n\t}\n}\n\nfunc mapProps(goxAttrs []*ast.GoxAttrStmt) []ast.Expr {\n\tvar mapped = []ast.Expr{}\n\tfor _, attr := range goxAttrs {\n\t\t\/\/ set default of Rhs to true if none provided\n\t\tif attr.Rhs == nil { \/\/ default to true like JSX\n\t\t\tattr.Rhs = ast.NewIdent(\"true\")\n\t\t}\n\n\t\tvar expr ast.Expr\n\n\t\t\/\/ if prop is an event listener (e.g. \"onClick\")\n\t\tif _, ok := eventMap[attr.Lhs.Name]; ok {\n\t\t\texpr = newEventListener(attr)\n\t\t} else if mappedName, ok := attrMap[attr.Lhs.Name]; ok {\n\t\t\t\/\/ if it's a vecty controlled prop\n\t\t\texpr = newCallExpr(\n\t\t\t\tnewSelectorExpr(\"vecty\", \"Property\"),\n\t\t\t\t[]ast.Expr{\n\t\t\t\t\t&ast.BasicLit{\n\t\t\t\t\t\tKind: token.STRING,\n\t\t\t\t\t\tValue: strconv.Quote(mappedName)},\n\t\t\t\t\tattr.Rhs,\n\t\t\t\t},\n\t\t\t)\n\t\t} else {\n\t\t\t\/\/ if prop is a normal attribute\n\t\t\texpr = newCallExpr(\n\t\t\t\tnewSelectorExpr(\"vecty\", \"Attribute\"),\n\t\t\t\t[]ast.Expr{\n\t\t\t\t\t&ast.BasicLit{\n\t\t\t\t\t\tKind: token.STRING,\n\t\t\t\t\t\tValue: strconv.Quote(attr.Lhs.Name)},\n\t\t\t\t\tattr.Rhs,\n\t\t\t\t},\n\t\t\t)\n\t\t}\n\n\t\tmapped = append(mapped, expr)\n\t}\n\n\treturn mapped\n}\n\nfunc newEventListener(goxAttr *ast.GoxAttrStmt) ast.Expr {\n\treturn &ast.UnaryExpr{\n\t\tOpPos: token.NoPos,\n\t\tOp: token.AND,\n\t\tX: &ast.CompositeLit{\n\t\t\tType: newSelectorExpr(\"vecty\", \"EventListener\"),\n\t\t\tLbrace: token.NoPos,\n\t\t\tElts: []ast.Expr{\n\t\t\t\t&ast.KeyValueExpr{\n\t\t\t\t\tKey: ast.NewIdent(\"Name\"),\n\t\t\t\t\tValue: &ast.BasicLit{\n\t\t\t\t\t\tKind: token.STRING,\n\t\t\t\t\t\tValue: strconv.Quote(eventMap[goxAttr.Lhs.Name]),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&ast.KeyValueExpr{\n\t\t\t\t\tKey: ast.NewIdent(\"Listener\"),\n\t\t\t\t\tValue: goxAttr.Rhs,\n\t\t\t\t},\n\t\t\t},\n\t\t\tRbrace: token.NoPos,\n\t\t},\n\t}\n}\n<commit_msg>Wrap attributes in vecty.Markup<commit_after>package printer\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n\n\t\"unicode\"\n\n\t\"github.com\/8byt\/gox\/ast\"\n\t\"github.com\/8byt\/gox\/token\"\n)\n\n\/\/ Map html-style to actual js event names\nvar eventMap = map[string]string{\n\t\"onAbort\": \"abort\",\n\t\"onCancel\": \"cancel\",\n\t\"onCanPlay\": \"canplay\",\n\t\"onCanPlaythrough\": \"canplaythrough\",\n\t\"onChange\": \"change\",\n\t\"onClick\": \"click\",\n\t\"onCueChange\": \"cuechange\",\n\t\"onDblClick\": \"dblclick\",\n\t\"onDurationChange\": \"durationchange\",\n\t\"onEmptied\": \"emptied\",\n\t\"onEnded\": \"ended\",\n\t\"onInput\": \"input\",\n\t\"onInvalid\": \"invalid\",\n\t\"onKeyDown\": \"keydown\",\n\t\"onKeyPress\": \"keypress\",\n\t\"onKeyUp\": \"keyup\",\n\t\"onLoadedData\": \"loadeddata\",\n\t\"onLoadedMetadata\": \"loadedmetadata\",\n\t\"onLoadStart\": \"loadstart\",\n\t\"onMouseDown\": \"mousedown\",\n\t\"onMouseEnter\": \"mouseenter\",\n\t\"onMouseleave\": \"mouseleave\",\n\t\"onMouseMove\": \"mousemove\",\n\t\"onMouseOut\": \"mouseout\",\n\t\"onMouseOver\": \"mouseover\",\n\t\"onMouseUp\": \"mouseup\",\n\t\"onMouseWheel\": \"mousewheel\",\n\t\"onPause\": \"pause\",\n\t\"onPlay\": \"play\",\n\t\"onPlaying\": \"playing\",\n\t\"onProgress\": \"progress\",\n\t\"onRateChange\": \"ratechange\",\n\t\"onReset\": \"reset\",\n\t\"onSeeked\": \"seeked\",\n\t\"onSeeking\": \"seeking\",\n\t\"onSelect\": \"select\",\n\t\"onShow\": \"show\",\n\t\"onStalled\": \"stalled\",\n\t\"onSubmit\": \"submit\",\n\t\"onSuspend\": \"suspend\",\n\t\"onTimeUpdate\": \"timeupdate\",\n\t\"onToggle\": \"toggle\",\n\t\"onVolumeChange\": \"volumechange\",\n\t\"onWaiting\": \"waiting\",\n}\n\nvar attrMap = map[string]string{\n\t\"autofocus\": \"autofocus\",\n\t\"checked\": \"checked\",\n\t\"class\": \"className\",\n\t\"for\": \"htmlFor\",\n\t\"href\": \"href\",\n\t\"id\": \"id\",\n\t\"placeholder\": \"placeholder\",\n\t\"src\": \"src\",\n\t\"type\": \"type\",\n\t\"value\": \"value\",\n}\n\nfunc goxToVecty(gox *ast.GoxExpr) ast.Expr {\n\tisComponent := unicode.IsUpper(rune(gox.TagName.Name[0]))\n\n\tif isComponent {\n\t\treturn newComponent(gox)\n\t} else {\n\t\targs := []ast.Expr{\n\t\t\t&ast.BasicLit{\n\t\t\t\tKind: token.STRING,\n\t\t\t\tValue: strconv.Quote(gox.TagName.Name),\n\t\t\t}}\n\n\t\tif len(gox.Attrs) > 0 {\n\t\t\t\/\/ Create markup expr and add attributes\n\t\t\tmarkup := newCallExpr(\n\t\t\t\tnewSelectorExpr(\"vecty\", \"Markup\"),\n\t\t\t\tmapProps(gox.Attrs),\n\t\t\t)\n\n\t\t\t\/\/ Add the markup\n\t\t\targs = append(args, markup)\n\t\t}\n\n\t\t\/\/ Add the contents\n\t\tfor _, expr := range gox.X {\n\t\t\tswitch expr := expr.(type) {\n\t\t\t\/\/ TODO figure out what's a better thing to do here\n\t\t\t\/\/ do we want to error on compile or figure out what to do based on context?\n\t\t\t\/\/ (I think the latter)\n\t\t\t\/\/ Fallback to regular behavior, don't wrap this yet\n\t\t\t\/\/case *ast.GoExpr:\n\t\t\t\/\/\te := newCallExpr(\n\t\t\t\/\/\t\tnewSelectorExpr(\"vecty\", \"Text\"),\n\t\t\t\/\/\t\t[]ast.Expr{expr},\n\t\t\t\/\/\t)\n\t\t\t\/\/\targs = append(args, e)\n\n\t\t\tcase *ast.BareWordsExpr:\n\t\t\t\tif len(strings.TrimSpace(expr.Value)) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\te := newCallExpr(\n\t\t\t\t\tnewSelectorExpr(\"vecty\", \"Text\"),\n\t\t\t\t\t[]ast.Expr{expr},\n\t\t\t\t)\n\t\t\t\targs = append(args, e)\n\t\t\tdefault:\n\t\t\t\targs = append(args, expr)\n\t\t\t}\n\t\t}\n\n\t\treturn newCallExpr(\n\t\t\tnewSelectorExpr(\"vecty\", \"Tag\"),\n\t\t\targs,\n\t\t)\n\t}\n}\n\nfunc newSelectorExpr(x, sel string) *ast.SelectorExpr {\n\treturn &ast.SelectorExpr{\n\t\tX: ast.NewIdent(x),\n\t\tSel: ast.NewIdent(sel)}\n}\n\nfunc newCallExpr(fun ast.Expr, args []ast.Expr) *ast.CallExpr {\n\treturn &ast.CallExpr{\n\t\tFun: fun,\n\t\tArgs: args,\n\t\tEllipsis: token.NoPos, Lparen: token.NoPos, Rparen: token.NoPos}\n}\n\nfunc newComponent(gox *ast.GoxExpr) *ast.UnaryExpr {\n\tvar args []ast.Expr\n\tfor _, attr := range gox.Attrs {\n\t\tif attr.Rhs == nil { \/\/ default to true like JSX\n\t\t\tattr.Rhs = ast.NewIdent(\"true\")\n\t\t}\n\t\texpr := &ast.KeyValueExpr{\n\t\t\tKey: ast.NewIdent(attr.Lhs.Name),\n\t\t\tColon: token.NoPos,\n\t\t\tValue: attr.Rhs,\n\t\t}\n\n\t\targs = append(args, expr)\n\t}\n\n\treturn &ast.UnaryExpr{\n\t\tOpPos: token.NoPos,\n\t\tOp: token.AND,\n\t\tX: &ast.CompositeLit{\n\t\t\tType: ast.NewIdent(gox.TagName.Name),\n\t\t\tLbrace: token.NoPos,\n\t\t\tElts: args,\n\t\t\tRbrace: token.NoPos,\n\t\t},\n\t}\n}\n\nfunc mapProps(goxAttrs []*ast.GoxAttrStmt) []ast.Expr {\n\tvar mapped = []ast.Expr{}\n\tfor _, attr := range goxAttrs {\n\t\t\/\/ set default of Rhs to true if none provided\n\t\tif attr.Rhs == nil { \/\/ default to true like JSX\n\t\t\tattr.Rhs = ast.NewIdent(\"true\")\n\t\t}\n\n\t\tvar expr ast.Expr\n\n\t\t\/\/ if prop is an event listener (e.g. \"onClick\")\n\t\tif _, ok := eventMap[attr.Lhs.Name]; ok {\n\t\t\texpr = newEventListener(attr)\n\t\t} else if mappedName, ok := attrMap[attr.Lhs.Name]; ok {\n\t\t\t\/\/ if it's a vecty controlled prop\n\t\t\texpr = newCallExpr(\n\t\t\t\tnewSelectorExpr(\"vecty\", \"Property\"),\n\t\t\t\t[]ast.Expr{\n\t\t\t\t\t&ast.BasicLit{\n\t\t\t\t\t\tKind: token.STRING,\n\t\t\t\t\t\tValue: strconv.Quote(mappedName)},\n\t\t\t\t\tattr.Rhs,\n\t\t\t\t},\n\t\t\t)\n\t\t} else {\n\t\t\t\/\/ if prop is a normal attribute\n\t\t\texpr = newCallExpr(\n\t\t\t\tnewSelectorExpr(\"vecty\", \"Attribute\"),\n\t\t\t\t[]ast.Expr{\n\t\t\t\t\t&ast.BasicLit{\n\t\t\t\t\t\tKind: token.STRING,\n\t\t\t\t\t\tValue: strconv.Quote(attr.Lhs.Name)},\n\t\t\t\t\tattr.Rhs,\n\t\t\t\t},\n\t\t\t)\n\t\t}\n\n\t\tmapped = append(mapped, expr)\n\t}\n\n\treturn mapped\n}\n\nfunc newEventListener(goxAttr *ast.GoxAttrStmt) ast.Expr {\n\treturn &ast.UnaryExpr{\n\t\tOpPos: token.NoPos,\n\t\tOp: token.AND,\n\t\tX: &ast.CompositeLit{\n\t\t\tType: newSelectorExpr(\"vecty\", \"EventListener\"),\n\t\t\tLbrace: token.NoPos,\n\t\t\tElts: []ast.Expr{\n\t\t\t\t&ast.KeyValueExpr{\n\t\t\t\t\tKey: ast.NewIdent(\"Name\"),\n\t\t\t\t\tValue: &ast.BasicLit{\n\t\t\t\t\t\tKind: token.STRING,\n\t\t\t\t\t\tValue: strconv.Quote(eventMap[goxAttr.Lhs.Name]),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&ast.KeyValueExpr{\n\t\t\t\t\tKey: ast.NewIdent(\"Listener\"),\n\t\t\t\t\tValue: goxAttr.Rhs,\n\t\t\t\t},\n\t\t\t},\n\t\t\tRbrace: token.NoPos,\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"net\/http\"\n\t\"strings\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/golang\/groupcache\"\n\t\"github.com\/labstack\/echo\"\n)\n\ntype ArcGISLOD struct {\n\tLevel int `json:\"level\"`\n\tResolution float64 `json:\"resolution\"`\n\tScale float64 `json:\"scale\"`\n}\n\ntype ArcGISSpatialReference struct {\n\tWkid uint16 `json:\"wkid\"`\n}\n\ntype ArcGISExtent struct {\n\tXmin float64 `json:\"xmin\"`\n\tYmin float64 `json:\"ymin\"`\n\tXmax float64 `json:\"xmax\"`\n\tYmax float64 `json:\"ymax\"`\n\tSpatialReference ArcGISSpatialReference `json:\"spatialReference\"`\n}\n\ntype ArcGISLayerStub struct {\n\tId uint8 `json:\"id\"`\n\tName string `json:\"name\"`\n\tParentLayerId int16 `json:\"parentLayerId\"`\n\tDefaultVisibility bool `json:\"defaultVisibility\"`\n\tSubLayerIds []uint8 `json:\"subLayerIds\"`\n\tMinScale float64 `json:\"minScale\"`\n\tMaxScale float64 `json:\"maxScale\"`\n}\n\ntype ArcGISLayer struct {\n\tId uint8 `json:\"id\"`\n\tName string `json:\"name\"`\n\tType string `json:\"type\"`\n\tDescription string `json:\"description\"`\n\tGeometryType string `json:\"geometryType\"`\n\tCopyrightText string `json:\"copyrightText\"`\n\tParentLayer interface{} `json:\"parentLayer\"`\n\tSubLayers []ArcGISLayerStub `json:\"subLayers\"`\n\tMinScale float64 `json:\"minScale\"`\n\tMaxScale float64 `json:\"maxScale\"`\n\tDefaultVisibility bool `json:\"defaultVisibility\"`\n\tExtent ArcGISExtent `json:\"extent\"`\n\tHasAttachments bool `json:\"hasAttachments\"`\n\tHtmlPopupType string `json:\"htmlPopupType\"`\n\tDrawingInfo interface{} `json:\"drawingInfo\"`\n\tDisplayField interface{} `json:\"displayField\"`\n\tFields []interface{} `json:\"fields\"`\n\tTypeIdField interface{} `json:\"typeIdField\"`\n\tTypes interface{} `json:\"types\"`\n\tRelationships []interface{} `json:\"relationships\"`\n\tCapabilities string `json:\"capabilities\"`\n\tCurrentVersion float32 `json:\"currentVersion\"`\n}\n\nvar WebMercatorSR = ArcGISSpatialReference{Wkid: 3857}\nvar GeographicSR = ArcGISSpatialReference{Wkid: 4326}\n\nfunc jsonOrJsonP(c echo.Context, out map[string]interface{}) error {\n\tcallback := c.QueryParam(\"callback\")\n\tif callback != \"\" {\n\t\treturn c.JSONP(http.StatusOK, callback, out)\n\t}\n\treturn c.JSON(http.StatusOK, out)\n}\n\nfunc GetArcGISService(c echo.Context) error {\n\tid, err := getServiceOr404(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttileset := tilesets[id]\n\timgFormat := TileFormatStr[tileset.tileformat]\n\tmetadata, err := tileset.ReadMetadata()\n\tif err != nil {\n\t\tlog.Errorf(\"Could not read metadata for tileset %v\", id)\n\t\treturn err\n\t}\n\tname := toString(metadata[\"name\"])\n\tdescription := toString(metadata[\"description\"])\n\tattribution := toString(metadata[\"attribution\"])\n\n\t\/\/ TODO: make sure that min and max zoom always populated\n\tminZoom := metadata[\"minzoom\"].(int)\n\tmaxZoom := metadata[\"maxzoom\"].(int)\n\tdpi := 96 \/\/ TODO: extract dpi from the image instead\n\tvar lods []ArcGISLOD\n\tfor i := minZoom; i <= maxZoom; i++ {\n\t\tscale, resolution := calcScaleResolution(i, dpi)\n\t\tlods = append(lods, ArcGISLOD{\n\t\t\tLevel: i,\n\t\t\tResolution: resolution,\n\t\t\tScale: scale,\n\t\t})\n\t}\n\n\tminScale := lods[0].Scale\n\tmaxScale := lods[len(lods)-1].Scale\n\n\tbounds := metadata[\"bounds\"].([]float32) \/\/ TODO: make sure this is always present\n\textent := geoBoundsToWMExtent(bounds)\n\n\ttileInfo := map[string]interface{}{\n\t\t\"rows\": 256,\n\t\t\"cols\": 256,\n\t\t\"dpi\": dpi,\n\t\t\"origin\": map[string]float32{\n\t\t\t\"x\": -20037508.342787,\n\t\t\t\"y\": 20037508.342787,\n\t\t},\n\t\t\"spatialReference\": WebMercatorSR,\n\t\t\"lods\": lods,\n\t}\n\n\tdocumentInfo := map[string]string{\n\t\t\"Title\": name,\n\t\t\"Author\": attribution,\n\t\t\"Comments\": \"\",\n\t\t\"Subject\": \"\",\n\t\t\"Category\": \"\",\n\t\t\"Keywords\": toString(metadata[\"tags\"]),\n\t\t\"Credits\": toString(metadata[\"credits\"]),\n\t}\n\n\tout := map[string]interface{}{\n\t\t\"currentVersion\": \"10.4\",\n\t\t\"id\": id,\n\t\t\"name\": name,\n\t\t\"mapName\": name,\n\t\t\"capabilities\": \"Map,TilesOnly\",\n\t\t\"description\": description,\n\t\t\"serviceDescription\": description,\n\t\t\"copyrightText\": attribution,\n\t\t\"singleFusedMapCache\": true,\n\t\t\"supportedImageFormatTypes\": strings.ToUpper(imgFormat),\n\t\t\"units\": \"esriMeters\",\n\t\t\"layers\": []ArcGISLayerStub{\n\t\t\tArcGISLayerStub{\n\t\t\t\tId: 0,\n\t\t\t\tName: name,\n\t\t\t\tParentLayerId: -1,\n\t\t\t\tDefaultVisibility: true,\n\t\t\t\tSubLayerIds: nil,\n\t\t\t\tMinScale: minScale,\n\t\t\t\tMaxScale: maxScale,\n\t\t\t},\n\t\t},\n\t\t\"tables\": []string{},\n\t\t\"spatialReference\": WebMercatorSR,\n\t\t\"minScale\": minScale,\n\t\t\"maxScale\": maxScale,\n\t\t\"tileInfo\": tileInfo,\n\t\t\"documentInfo\": documentInfo,\n\t\t\"initialExtent\": extent,\n\t\t\"fullExtent\": extent,\n\t\t\"exportTilesAllowed\": false,\n\t\t\"maxExportTilesCount\": 0,\n\t\t\"resampling\": false,\n\t}\n\n\treturn jsonOrJsonP(c, out)\n}\n\nfunc GetArcGISServiceLayers(c echo.Context) error {\n\tid, err := getServiceOr404(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttileset := tilesets[id]\n\tmetadata, err := tileset.ReadMetadata()\n\tif err != nil {\n\t\tlog.Errorf(\"Could not read metadata for tileset %v\", id)\n\t\treturn err\n\t}\n\n\tbounds := metadata[\"bounds\"].([]float32) \/\/ TODO: make sure this is always present\n\textent := geoBoundsToWMExtent(bounds)\n\n\tminZoom := metadata[\"minzoom\"].(int)\n\tmaxZoom := metadata[\"maxzoom\"].(int)\n\tminScale, _ := calcScaleResolution(minZoom, 96)\n\tmaxScale, _ := calcScaleResolution(maxZoom, 96)\n\n\t\/\/ for now, just create a placeholder root layer\n\temptyArray := []interface{}{}\n\temptyLayerArray := []ArcGISLayerStub{}\n\n\tvar layers [1]ArcGISLayer\n\tlayers[0] = ArcGISLayer{\n\t\tId: 0,\n\t\tDefaultVisibility: true,\n\t\tParentLayer: nil,\n\t\tName: toString(metadata[\"name\"]),\n\t\tDescription: toString(metadata[\"description\"]),\n\t\tExtent: extent,\n\t\tMinScale: minScale,\n\t\tMaxScale: maxScale,\n\t\tCopyrightText: toString(metadata[\"attribution\"]),\n\t\tHtmlPopupType: \"esriServerHTMLPopupTypeAsHTMLText\",\n\t\tFields: emptyArray,\n\t\tRelationships: emptyArray,\n\t\tSubLayers: emptyLayerArray,\n\t\tCurrentVersion: 10.4,\n\t\tCapabilities: \"Map\",\n\t}\n\n\tout := map[string]interface{}{\n\t\t\"layers\": layers,\n\t}\n\n\treturn jsonOrJsonP(c, out)\n}\n\nfunc GetArcGISServiceLegend(c echo.Context) error {\n\tid, err := getServiceOr404(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttileset := tilesets[id]\n\tmetadata, err := tileset.ReadMetadata()\n\tif err != nil {\n\t\tlog.Errorf(\"Could not read metadata for tileset %v\", id)\n\t\treturn err\n\t}\n\n\t\/\/ TODO: pull the legend from ArcGIS specific metadata tables\n\tvar elements [0]interface{}\n\tvar layers [1]map[string]interface{}\n\n\tlayers[0] = map[string]interface{}{\n\t\t\"layerId\": 0,\n\t\t\"layerName\": toString(metadata[\"name\"]),\n\t\t\"layerType\": \"\",\n\t\t\"minScale\": 0,\n\t\t\"maxScale\": 0,\n\t\t\"legend\": elements,\n\t}\n\n\tout := map[string]interface{}{\n\t\t\"layers\": layers,\n\t}\n\n\treturn jsonOrJsonP(c, out)\n}\n\nfunc GetArcGISTile(c echo.Context) error {\n\tvar (\n\t\tdata []byte\n\t\tcontentType string\n\t)\n\t\/\/TODO: validate x, y, z\n\n\tid, err := getServiceOr404(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkey := strings.Join([]string{id, c.Param(\"z\"), c.Param(\"x\"), c.Param(\"y\")}, \"\/\")\n\n\terr = cache.Get(nil, key, groupcache.AllocatingByteSliceSink(&data))\n\tif err != nil {\n\t\tlog.Println(\"Error fetching key\", key)\n\t\t\/\/ TODO: log\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf(\"Cache get failed for key: %s\", key))\n\t}\n\n\ttileset := tilesets[id]\n\n\tif len(data) <= 1 {\n\t\tif tileset.tileformat == PBF {\n\t\t\t\/\/ If pbf, return 404 w\/ json, consistent w\/ mapbox\n\t\t\treturn c.JSON(http.StatusNotFound, struct {\n\t\t\t\tMessage string `json:\"message\"`\n\t\t\t}{\"Tile does not exist\"})\n\t\t}\n\n\t\tdata = blankPNG\n\t\tcontentType = \"image\/png\"\n\t} else {\n\t\tcontentType = TileContentType[tileset.tileformat]\n\t}\n\n\tres := c.Response()\n\tres.Header().Add(\"Content-Type\", contentType)\n\n\tif tileset.tileformat == PBF {\n\t\tres.Header().Add(\"Content-Encoding\", \"gzip\")\n\t}\n\n\tres.WriteHeader(http.StatusOK)\n\t_, err = io.Copy(res, bytes.NewReader(data))\n\treturn err\n}\n\nfunc geoBoundsToWMExtent(bounds []float32) ArcGISExtent {\n\txmin, ymin := geoToMercator(float64(bounds[0]), float64(bounds[1]))\n\txmax, ymax := geoToMercator(float64(bounds[2]), float64(bounds[3]))\n\treturn ArcGISExtent{\n\t\tXmin: xmin,\n\t\tYmin: ymin,\n\t\tXmax: xmax,\n\t\tYmax: ymax,\n\t\tSpatialReference: WebMercatorSR,\n\t}\n}\n\nfunc calcScaleResolution(zoomLevel int, dpi int) (float64, float64) {\n\tresolution := 156543.033928 \/ math.Pow(2, float64(zoomLevel))\n\tscale := float64(dpi) * 39.37 * resolution \/\/ 39.37 in\/m\n\treturn scale, resolution\n}\n<commit_msg>resolves #35<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"net\/http\"\n\t\"strings\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/golang\/groupcache\"\n\t\"github.com\/labstack\/echo\"\n)\n\ntype ArcGISLOD struct {\n\tLevel int `json:\"level\"`\n\tResolution float64 `json:\"resolution\"`\n\tScale float64 `json:\"scale\"`\n}\n\ntype ArcGISSpatialReference struct {\n\tWkid uint16 `json:\"wkid\"`\n}\n\ntype ArcGISExtent struct {\n\tXmin float64 `json:\"xmin\"`\n\tYmin float64 `json:\"ymin\"`\n\tXmax float64 `json:\"xmax\"`\n\tYmax float64 `json:\"ymax\"`\n\tSpatialReference ArcGISSpatialReference `json:\"spatialReference\"`\n}\n\ntype ArcGISLayerStub struct {\n\tId uint8 `json:\"id\"`\n\tName string `json:\"name\"`\n\tParentLayerId int16 `json:\"parentLayerId\"`\n\tDefaultVisibility bool `json:\"defaultVisibility\"`\n\tSubLayerIds []uint8 `json:\"subLayerIds\"`\n\tMinScale float64 `json:\"minScale\"`\n\tMaxScale float64 `json:\"maxScale\"`\n}\n\ntype ArcGISLayer struct {\n\tId uint8 `json:\"id\"`\n\tName string `json:\"name\"`\n\tType string `json:\"type\"`\n\tDescription string `json:\"description\"`\n\tGeometryType string `json:\"geometryType\"`\n\tCopyrightText string `json:\"copyrightText\"`\n\tParentLayer interface{} `json:\"parentLayer\"`\n\tSubLayers []ArcGISLayerStub `json:\"subLayers\"`\n\tMinScale float64 `json:\"minScale\"`\n\tMaxScale float64 `json:\"maxScale\"`\n\tDefaultVisibility bool `json:\"defaultVisibility\"`\n\tExtent ArcGISExtent `json:\"extent\"`\n\tHasAttachments bool `json:\"hasAttachments\"`\n\tHtmlPopupType string `json:\"htmlPopupType\"`\n\tDrawingInfo interface{} `json:\"drawingInfo\"`\n\tDisplayField interface{} `json:\"displayField\"`\n\tFields []interface{} `json:\"fields\"`\n\tTypeIdField interface{} `json:\"typeIdField\"`\n\tTypes interface{} `json:\"types\"`\n\tRelationships []interface{} `json:\"relationships\"`\n\tCapabilities string `json:\"capabilities\"`\n\tCurrentVersion float32 `json:\"currentVersion\"`\n}\n\nvar WebMercatorSR = ArcGISSpatialReference{Wkid: 3857}\nvar GeographicSR = ArcGISSpatialReference{Wkid: 4326}\n\nfunc jsonOrJsonP(c echo.Context, out map[string]interface{}) error {\n\tcallback := c.QueryParam(\"callback\")\n\tif callback != \"\" {\n\t\treturn c.JSONP(http.StatusOK, callback, out)\n\t}\n\treturn c.JSON(http.StatusOK, out)\n}\n\nfunc GetArcGISService(c echo.Context) error {\n\tid, err := getServiceOr404(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttileset := tilesets[id]\n\timgFormat := TileFormatStr[tileset.tileformat]\n\tmetadata, err := tileset.ReadMetadata()\n\tif err != nil {\n\t\tlog.Errorf(\"Could not read metadata for tileset %v\", id)\n\t\treturn err\n\t}\n\tname := toString(metadata[\"name\"])\n\tdescription := toString(metadata[\"description\"])\n\tattribution := toString(metadata[\"attribution\"])\n\n\t\/\/ TODO: make sure that min and max zoom always populated\n\tminZoom := metadata[\"minzoom\"].(int)\n\tmaxZoom := metadata[\"maxzoom\"].(int)\n\tdpi := 96 \/\/ TODO: extract dpi from the image instead\n\tvar lods []ArcGISLOD\n\tfor i := minZoom; i <= maxZoom; i++ {\n\t\tscale, resolution := calcScaleResolution(i, dpi)\n\t\tlods = append(lods, ArcGISLOD{\n\t\t\tLevel: i,\n\t\t\tResolution: resolution,\n\t\t\tScale: scale,\n\t\t})\n\t}\n\n\tminScale := lods[0].Scale\n\tmaxScale := lods[len(lods)-1].Scale\n\n\tbounds := metadata[\"bounds\"].([]float32) \/\/ TODO: make sure this is always present\n\textent := geoBoundsToWMExtent(bounds)\n\n\ttileInfo := map[string]interface{}{\n\t\t\"rows\": 256,\n\t\t\"cols\": 256,\n\t\t\"dpi\": dpi,\n\t\t\"origin\": map[string]float32{\n\t\t\t\"x\": -20037508.342787,\n\t\t\t\"y\": 20037508.342787,\n\t\t},\n\t\t\"spatialReference\": WebMercatorSR,\n\t\t\"lods\": lods,\n\t}\n\n\tdocumentInfo := map[string]string{\n\t\t\"Title\": name,\n\t\t\"Author\": attribution,\n\t\t\"Comments\": \"\",\n\t\t\"Subject\": \"\",\n\t\t\"Category\": \"\",\n\t\t\"Keywords\": toString(metadata[\"tags\"]),\n\t\t\"Credits\": toString(metadata[\"credits\"]),\n\t}\n\n\tout := map[string]interface{}{\n\t\t\"currentVersion\": \"10.4\",\n\t\t\"id\": id,\n\t\t\"name\": name,\n\t\t\"mapName\": name,\n\t\t\"capabilities\": \"Map,TilesOnly\",\n\t\t\"description\": description,\n\t\t\"serviceDescription\": description,\n\t\t\"copyrightText\": attribution,\n\t\t\"singleFusedMapCache\": true,\n\t\t\"supportedImageFormatTypes\": strings.ToUpper(imgFormat),\n\t\t\"units\": \"esriMeters\",\n\t\t\"layers\": []ArcGISLayerStub{\n\t\t\tArcGISLayerStub{\n\t\t\t\tId: 0,\n\t\t\t\tName: name,\n\t\t\t\tParentLayerId: -1,\n\t\t\t\tDefaultVisibility: true,\n\t\t\t\tSubLayerIds: nil,\n\t\t\t\tMinScale: minScale,\n\t\t\t\tMaxScale: maxScale,\n\t\t\t},\n\t\t},\n\t\t\"tables\": []string{},\n\t\t\"spatialReference\": WebMercatorSR,\n\t\t\"minScale\": minScale,\n\t\t\"maxScale\": maxScale,\n\t\t\"tileInfo\": tileInfo,\n\t\t\"documentInfo\": documentInfo,\n\t\t\"initialExtent\": extent,\n\t\t\"fullExtent\": extent,\n\t\t\"exportTilesAllowed\": false,\n\t\t\"maxExportTilesCount\": 0,\n\t\t\"resampling\": false,\n\t}\n\n\treturn jsonOrJsonP(c, out)\n}\n\nfunc GetArcGISServiceLayers(c echo.Context) error {\n\tid, err := getServiceOr404(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttileset := tilesets[id]\n\tmetadata, err := tileset.ReadMetadata()\n\tif err != nil {\n\t\tlog.Errorf(\"Could not read metadata for tileset %v\", id)\n\t\treturn err\n\t}\n\n\tbounds := metadata[\"bounds\"].([]float32) \/\/ TODO: make sure this is always present\n\textent := geoBoundsToWMExtent(bounds)\n\n\tminZoom := metadata[\"minzoom\"].(int)\n\tmaxZoom := metadata[\"maxzoom\"].(int)\n\tminScale, _ := calcScaleResolution(minZoom, 96)\n\tmaxScale, _ := calcScaleResolution(maxZoom, 96)\n\n\t\/\/ for now, just create a placeholder root layer\n\temptyArray := []interface{}{}\n\temptyLayerArray := []ArcGISLayerStub{}\n\n\tvar layers [1]ArcGISLayer\n\tlayers[0] = ArcGISLayer{\n\t\tId: 0,\n\t\tDefaultVisibility: true,\n\t\tParentLayer: nil,\n\t\tName: toString(metadata[\"name\"]),\n\t\tDescription: toString(metadata[\"description\"]),\n\t\tExtent: extent,\n\t\tMinScale: minScale,\n\t\tMaxScale: maxScale,\n\t\tCopyrightText: toString(metadata[\"attribution\"]),\n\t\tHtmlPopupType: \"esriServerHTMLPopupTypeAsHTMLText\",\n\t\tFields: emptyArray,\n\t\tRelationships: emptyArray,\n\t\tSubLayers: emptyLayerArray,\n\t\tCurrentVersion: 10.4,\n\t\tCapabilities: \"Map\",\n\t}\n\n\tout := map[string]interface{}{\n\t\t\"layers\": layers,\n\t}\n\n\treturn jsonOrJsonP(c, out)\n}\n\nfunc GetArcGISServiceLegend(c echo.Context) error {\n\tid, err := getServiceOr404(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttileset := tilesets[id]\n\tmetadata, err := tileset.ReadMetadata()\n\tif err != nil {\n\t\tlog.Errorf(\"Could not read metadata for tileset %v\", id)\n\t\treturn err\n\t}\n\n\t\/\/ TODO: pull the legend from ArcGIS specific metadata tables\n\tvar elements [0]interface{}\n\tvar layers [1]map[string]interface{}\n\n\tlayers[0] = map[string]interface{}{\n\t\t\"layerId\": 0,\n\t\t\"layerName\": toString(metadata[\"name\"]),\n\t\t\"layerType\": \"\",\n\t\t\"minScale\": 0,\n\t\t\"maxScale\": 0,\n\t\t\"legend\": elements,\n\t}\n\n\tout := map[string]interface{}{\n\t\t\"layers\": layers,\n\t}\n\n\treturn jsonOrJsonP(c, out)\n}\n\nfunc GetArcGISTile(c echo.Context) error {\n\tvar (\n\t\tdata []byte\n\t\tcontentType string\n\t)\n\t\/\/TODO: validate x, y, z\n\n\tid, err := getServiceOr404(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkey := strings.Join([]string{id, \"tile\", c.Param(\"z\"), c.Param(\"x\"), c.Param(\"y\")}, \"|\")\n\n\terr = cache.Get(nil, key, groupcache.AllocatingByteSliceSink(&data))\n\tif err != nil {\n\t\tlog.Println(\"Error fetching key\", key)\n\t\t\/\/ TODO: log\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf(\"Cache get failed for key: %s\", key))\n\t}\n\n\ttileset := tilesets[id]\n\n\tif len(data) <= 1 {\n\t\tif tileset.tileformat == PBF {\n\t\t\t\/\/ If pbf, return 404 w\/ json, consistent w\/ mapbox\n\t\t\treturn c.JSON(http.StatusNotFound, struct {\n\t\t\t\tMessage string `json:\"message\"`\n\t\t\t}{\"Tile does not exist\"})\n\t\t}\n\n\t\tdata = blankPNG\n\t\tcontentType = \"image\/png\"\n\t} else {\n\t\tcontentType = TileContentType[tileset.tileformat]\n\t}\n\n\tres := c.Response()\n\tres.Header().Add(\"Content-Type\", contentType)\n\n\tif tileset.tileformat == PBF {\n\t\tres.Header().Add(\"Content-Encoding\", \"gzip\")\n\t}\n\n\tres.WriteHeader(http.StatusOK)\n\t_, err = io.Copy(res, bytes.NewReader(data))\n\treturn err\n}\n\nfunc geoBoundsToWMExtent(bounds []float32) ArcGISExtent {\n\txmin, ymin := geoToMercator(float64(bounds[0]), float64(bounds[1]))\n\txmax, ymax := geoToMercator(float64(bounds[2]), float64(bounds[3]))\n\treturn ArcGISExtent{\n\t\tXmin: xmin,\n\t\tYmin: ymin,\n\t\tXmax: xmax,\n\t\tYmax: ymax,\n\t\tSpatialReference: WebMercatorSR,\n\t}\n}\n\nfunc calcScaleResolution(zoomLevel int, dpi int) (float64, float64) {\n\tresolution := 156543.033928 \/ math.Pow(2, float64(zoomLevel))\n\tscale := float64(dpi) * 39.37 * resolution \/\/ 39.37 in\/m\n\treturn scale, resolution\n}\n<|endoftext|>"} {"text":"<commit_before>package version\n\n\/\/ Version tells us the app version string\nconst Version = \"3.1.0-rc3\"\n<commit_msg>Bump to v3.1.1<commit_after>package version\n\n\/\/ Version tells us the app version string\nconst Version = \"3.1.1\"\n<|endoftext|>"} {"text":"<commit_before>package cameradar\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\tcurl \"github.com\/Ullaakut\/go-curl\"\n)\n\n\/\/ HTTP responses.\nconst (\n\thttpOK = 200\n\thttpUnauthorized = 401\n\thttpForbidden = 403\n\thttpNotFound = 404\n)\n\n\/\/ CURL RTSP request types.\nconst (\n\trtspDescribe = 2\n\trtspSetup = 4\n)\n\n\/\/ Attack attacks the given targets and returns the accessed streams.\nfunc (s *Scanner) Attack(targets []Stream) ([]Stream, error) {\n\tif len(targets) == 0 {\n\t\treturn nil, fmt.Errorf(\"unable to attack empty list of targets\")\n\t}\n\n\t\/\/ Most cameras will be accessed successfully with these two attacks.\n\ts.term.StartStepf(\"Attacking routes of %d streams\", len(targets))\n\tstreams := s.AttackRoute(targets)\n\n\ts.term.StartStepf(\"Attempting to detect authentication methods of %d streams\", len(targets))\n\tstreams = s.DetectAuthMethods(streams)\n\n\ts.term.StartStepf(\"Attacking credentials of %d streams\", len(targets))\n\tstreams = s.AttackCredentials(streams)\n\n\ts.term.StartStep(\"Validating that streams are accessible\")\n\tstreams = s.ValidateStreams(streams)\n\n\t\/\/ But some cameras run GST RTSP Server which prioritizes 401 over 404 contrary to most cameras.\n\t\/\/ For these cameras, running another route attack will solve the problem.\n\tfor _, stream := range streams {\n\t\tif !stream.RouteFound || !stream.CredentialsFound || !stream.Available {\n\t\t\ts.term.StartStepf(\"Second round of attacks\")\n\t\t\tstreams = s.AttackRoute(streams)\n\n\t\t\ts.term.StartStep(\"Validating that streams are accessible\")\n\t\t\tstreams = s.ValidateStreams(streams)\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\ts.term.EndStep()\n\n\treturn streams, nil\n}\n\n\/\/ ValidateStreams tries to setup the stream to validate whether or not it is available.\nfunc (s *Scanner) ValidateStreams(targets []Stream) []Stream {\n\tfor i := range targets {\n\t\ttargets[i].Available = s.validateStream(targets[i])\n\t\ttime.Sleep(s.attackInterval)\n\t}\n\n\treturn targets\n}\n\n\/\/ AttackCredentials attempts to guess the provided targets' credentials using the given\n\/\/ dictionary or the default dictionary if none was provided by the user.\nfunc (s *Scanner) AttackCredentials(targets []Stream) []Stream {\n\tresChan := make(chan Stream)\n\tdefer close(resChan)\n\n\tfor i := range targets {\n\t\t\/\/ TODO: Perf Improvement: Skip cameras with no auth type detected, and set their\n\t\t\/\/ CredentialsFound value to true.\n\t\tgo s.attackCameraCredentials(targets[i], resChan)\n\t}\n\n\tattackResults := []Stream{}\n\t\/\/ TODO: Change this into a for+select and make a successful result close the chan.\n\tfor range targets {\n\t\tattackResults = append(attackResults, <-resChan)\n\t}\n\n\tfor i := range attackResults {\n\t\tif attackResults[i].CredentialsFound {\n\t\t\ttargets = replace(targets, attackResults[i])\n\t\t}\n\t}\n\n\treturn targets\n}\n\n\/\/ AttackRoute attempts to guess the provided targets' streaming routes using the given\n\/\/ dictionary or the default dictionary if none was provided by the user.\nfunc (s *Scanner) AttackRoute(targets []Stream) []Stream {\n\tresChan := make(chan Stream)\n\tdefer close(resChan)\n\n\tfor i := range targets {\n\t\tgo s.attackCameraRoute(targets[i], resChan)\n\t}\n\n\tattackResults := []Stream{}\n\t\/\/ TODO: Change this into a for+select and make a successful result close the chan.\n\tfor range targets {\n\t\tattackResults = append(attackResults, <-resChan)\n\t}\n\n\tfor i := range attackResults {\n\t\tif attackResults[i].RouteFound {\n\t\t\ttargets = replace(targets, attackResults[i])\n\t\t}\n\t}\n\n\treturn targets\n}\n\n\/\/ DetectAuthMethods attempts to guess the provided targets' authentication types, between\n\/\/ digest, basic auth or none at all.\nfunc (s *Scanner) DetectAuthMethods(targets []Stream) []Stream {\n\tfor i := range targets {\n\t\ttargets[i].AuthenticationType = s.detectAuthMethod(targets[i])\n\t\ttime.Sleep(s.attackInterval)\n\n\t\tvar authMethod string\n\t\tswitch targets[i].AuthenticationType {\n\t\tcase 0:\n\t\t\tauthMethod = \"no\"\n\t\tcase 1:\n\t\t\tauthMethod = \"basic\"\n\t\tcase 2:\n\t\t\tauthMethod = \"digest\"\n\t\t}\n\n\t\ts.term.Debugf(\"Stream %s uses %s authentication method\\n\", GetCameraRTSPURL(targets[i]), authMethod)\n\t}\n\n\treturn targets\n}\n\nfunc (s *Scanner) attackCameraCredentials(target Stream, resChan chan<- Stream) {\n\tfor _, username := range s.credentials.Usernames {\n\t\tfor _, password := range s.credentials.Passwords {\n\t\t\tok := s.credAttack(target, username, password)\n\t\t\tif ok {\n\t\t\t\ttarget.CredentialsFound = true\n\t\t\t\ttarget.Username = username\n\t\t\t\ttarget.Password = password\n\t\t\t\tresChan <- target\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttime.Sleep(s.attackInterval)\n\t\t}\n\t}\n\n\ttarget.CredentialsFound = false\n\tresChan <- target\n}\n\nfunc (s *Scanner) attackCameraRoute(target Stream, resChan chan<- Stream) {\n\tfor _, route := range s.routes {\n\t\tok := s.routeAttack(target, route)\n\t\tif ok {\n\t\t\ttarget.RouteFound = true\n\t\t\ttarget.Route = route\n\t\t\tresChan <- target\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(s.attackInterval)\n\t}\n\n\ttarget.RouteFound = false\n\tresChan <- target\n}\n\nfunc (s *Scanner) detectAuthMethod(stream Stream) int {\n\tc := s.curl.Duphandle()\n\n\tattackURL := fmt.Sprintf(\n\t\t\"rtsp:\/\/%s:%d\/%s\",\n\t\tstream.Address,\n\t\tstream.Port,\n\t\tstream.Route,\n\t)\n\n\ts.setCurlOptions(c)\n\n\t_ = c.Setopt(curl.OPT_VERBOSE, 1)\n\n\t\/\/ Send a request to the URL of the stream we want to attack.\n\t_ = c.Setopt(curl.OPT_URL, attackURL)\n\t\/\/ Set the RTSP STREAM URI as the stream URL.\n\t_ = c.Setopt(curl.OPT_RTSP_STREAM_URI, attackURL)\n\t_ = c.Setopt(curl.OPT_RTSP_REQUEST, rtspDescribe)\n\n\t\/\/ Perform the request.\n\terr := c.Perform()\n\tif err != nil {\n\t\ts.term.Errorf(\"Perform failed for %q (auth %d): %v\", attackURL, stream.AuthenticationType, err)\n\t\treturn -1\n\t}\n\n\tauthType, err := c.Getinfo(curl.INFO_HTTPAUTH_AVAIL)\n\tif err != nil {\n\t\ts.term.Errorf(\"Getinfo failed: %v\", err)\n\t\treturn -1\n\t}\n\n\tif s.verbose {\n\t\ts.term.Debugln(\"DESCRIBE\", attackURL, \"RTSP\/1.0 >\", authType)\n\t}\n\n\treturn authType.(int)\n}\n\nfunc (s *Scanner) routeAttack(stream Stream, route string) bool {\n\tc := s.curl.Duphandle()\n\n\tattackURL := fmt.Sprintf(\n\t\t\"rtsp:\/\/%s:%s@%s:%d\/%s\",\n\t\tstream.Username,\n\t\tstream.Password,\n\t\tstream.Address,\n\t\tstream.Port,\n\t\troute,\n\t)\n\n\ts.setCurlOptions(c)\n\n\t_ = c.Setopt(curl.OPT_VERBOSE, 1)\n\n\t\/\/ Set proper authentication type.\n\t_ = c.Setopt(curl.OPT_HTTPAUTH, stream.AuthenticationType)\n\t_ = c.Setopt(curl.OPT_USERPWD, fmt.Sprint(stream.Username, \":\", stream.Password))\n\n\t\/\/ Send a request to the URL of the stream we want to attack.\n\t_ = c.Setopt(curl.OPT_URL, attackURL)\n\t\/\/ Set the RTSP STREAM URI as the stream URL.\n\t_ = c.Setopt(curl.OPT_RTSP_STREAM_URI, attackURL)\n\t_ = c.Setopt(curl.OPT_RTSP_REQUEST, rtspDescribe)\n\n\t\/\/ Perform the request.\n\terr := c.Perform()\n\tif err != nil {\n\t\ts.term.Errorf(\"Perform failed for %q (auth %d): %v\", attackURL, stream.AuthenticationType, err)\n\t\treturn false\n\t}\n\n\t\/\/ Get return code for the request.\n\trc, err := c.Getinfo(curl.INFO_RESPONSE_CODE)\n\tif err != nil {\n\t\ts.term.Errorf(\"Getinfo failed: %v\", err)\n\t\treturn false\n\t}\n\n\tif s.verbose {\n\t\ts.term.Debugln(\"DESCRIBE\", attackURL, \"RTSP\/1.0 >\", rc)\n\t}\n\t\/\/ If it's a 401 or 403, it means that the credentials are wrong but the route might be okay.\n\t\/\/ If it's a 200, the stream is accessed successfully.\n\tif rc == httpOK || rc == httpUnauthorized || rc == httpForbidden {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (s *Scanner) credAttack(stream Stream, username string, password string) bool {\n\tfmt.Println()\n\tfmt.Println()\n\tfmt.Println()\n\n\tc := s.curl.Duphandle()\n\n\tattackURL := fmt.Sprintf(\n\t\t\"rtsp:\/\/%s:%s@%s:%d\/%s\",\n\t\tusername,\n\t\tpassword,\n\t\tstream.Address,\n\t\tstream.Port,\n\t\tstream.Route,\n\t)\n\n\ts.setCurlOptions(c)\n\n\t_ = c.Setopt(curl.OPT_VERBOSE, 1)\n\n\t\/\/ Set proper authentication type.\n\t_ = c.Setopt(curl.OPT_HTTPAUTH, stream.AuthenticationType)\n\t_ = c.Setopt(curl.OPT_USERPWD, fmt.Sprint(username, \":\", password))\n\n\t\/\/ Send a request to the URL of the stream we want to attack.\n\t_ = c.Setopt(curl.OPT_URL, attackURL)\n\t\/\/ Set the RTSP STREAM URI as the stream URL.\n\t_ = c.Setopt(curl.OPT_RTSP_STREAM_URI, attackURL)\n\t_ = c.Setopt(curl.OPT_RTSP_REQUEST, rtspDescribe)\n\n\t\/\/ Perform the request.\n\terr := c.Perform()\n\tif err != nil {\n\t\ts.term.Errorf(\"Perform failed for %q (auth %d): %v\", attackURL, stream.AuthenticationType, err)\n\t\treturn false\n\t}\n\n\t\/\/ Get return code for the request.\n\trc, err := c.Getinfo(curl.INFO_RESPONSE_CODE)\n\tif err != nil {\n\t\ts.term.Errorf(\"Getinfo failed: %v\", err)\n\t\treturn false\n\t}\n\n\tif s.verbose {\n\t\ts.term.Debugln(\"DESCRIBE\", attackURL, \"RTSP\/1.0 >\", rc)\n\t}\n\n\t\/\/ If it's a 404, it means that the route is incorrect but the credentials might be okay.\n\t\/\/ If it's a 200, the stream is accessed successfully.\n\tif rc == httpOK || rc == httpNotFound {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (s *Scanner) validateStream(stream Stream) bool {\n\tc := s.curl.Duphandle()\n\n\tattackURL := fmt.Sprintf(\n\t\t\"rtsp:\/\/%s:%s@%s:%d\/%s\",\n\t\tstream.Username,\n\t\tstream.Password,\n\t\tstream.Address,\n\t\tstream.Port,\n\t\tstream.Route,\n\t)\n\n\ts.setCurlOptions(c)\n\n\t\/\/ Set proper authentication type.\n\t_ = c.Setopt(curl.OPT_HTTPAUTH, stream.AuthenticationType)\n\t_ = c.Setopt(curl.OPT_USERPWD, fmt.Sprint(stream.Username, \":\", stream.Password))\n\n\t\/\/ Send a request to the URL of the stream we want to attack.\n\t_ = c.Setopt(curl.OPT_URL, attackURL)\n\t\/\/ Set the RTSP STREAM URI as the stream URL.\n\t_ = c.Setopt(curl.OPT_RTSP_STREAM_URI, attackURL)\n\t_ = c.Setopt(curl.OPT_RTSP_REQUEST, rtspSetup)\n\n\t_ = c.Setopt(curl.OPT_RTSP_TRANSPORT, \"RTP\/AVP;unicast;client_port=33332-33333\")\n\n\t\/\/ Perform the request.\n\terr := c.Perform()\n\tif err != nil {\n\t\ts.term.Errorf(\"Perform failed for %q (auth %d): %v\", attackURL, stream.AuthenticationType, err)\n\t\treturn false\n\t}\n\n\t\/\/ Get return code for the request.\n\trc, err := c.Getinfo(curl.INFO_RESPONSE_CODE)\n\tif err != nil {\n\t\ts.term.Errorf(\"Getinfo failed: %v\", err)\n\t\treturn false\n\t}\n\n\tif s.verbose {\n\t\ts.term.Debugln(\"SETUP\", attackURL, \"RTSP\/1.0 >\", rc)\n\t}\n\t\/\/ If it's a 200, the stream is accessed successfully.\n\tif rc == httpOK {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (s *Scanner) setCurlOptions(c Curler) {\n\t\/\/ Do not write sdp in stdout\n\t_ = c.Setopt(curl.OPT_WRITEFUNCTION, doNotWrite)\n\t\/\/ Do not use signals (would break multithreading).\n\t_ = c.Setopt(curl.OPT_NOSIGNAL, 1)\n\t\/\/ Do not send a body in the describe request.\n\t_ = c.Setopt(curl.OPT_NOBODY, 1)\n\t\/\/ Set custom timeout.\n\t_ = c.Setopt(curl.OPT_TIMEOUT_MS, int(s.timeout\/time.Millisecond))\n}\n\n\/\/ HACK: See https:\/\/stackoverflow.com\/questions\/3572397\/lib-curl-in-c-disable-printing\nfunc doNotWrite([]uint8, interface{}) bool {\n\treturn true\n}\n<commit_msg>Remove spam from curl verbose mode (#257)<commit_after>package cameradar\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\tcurl \"github.com\/Ullaakut\/go-curl\"\n)\n\n\/\/ HTTP responses.\nconst (\n\thttpOK = 200\n\thttpUnauthorized = 401\n\thttpForbidden = 403\n\thttpNotFound = 404\n)\n\n\/\/ CURL RTSP request types.\nconst (\n\trtspDescribe = 2\n\trtspSetup = 4\n)\n\n\/\/ Attack attacks the given targets and returns the accessed streams.\nfunc (s *Scanner) Attack(targets []Stream) ([]Stream, error) {\n\tif len(targets) == 0 {\n\t\treturn nil, fmt.Errorf(\"unable to attack empty list of targets\")\n\t}\n\n\t\/\/ Most cameras will be accessed successfully with these two attacks.\n\ts.term.StartStepf(\"Attacking routes of %d streams\", len(targets))\n\tstreams := s.AttackRoute(targets)\n\n\ts.term.StartStepf(\"Attempting to detect authentication methods of %d streams\", len(targets))\n\tstreams = s.DetectAuthMethods(streams)\n\n\ts.term.StartStepf(\"Attacking credentials of %d streams\", len(targets))\n\tstreams = s.AttackCredentials(streams)\n\n\ts.term.StartStep(\"Validating that streams are accessible\")\n\tstreams = s.ValidateStreams(streams)\n\n\t\/\/ But some cameras run GST RTSP Server which prioritizes 401 over 404 contrary to most cameras.\n\t\/\/ For these cameras, running another route attack will solve the problem.\n\tfor _, stream := range streams {\n\t\tif !stream.RouteFound || !stream.CredentialsFound || !stream.Available {\n\t\t\ts.term.StartStepf(\"Second round of attacks\")\n\t\t\tstreams = s.AttackRoute(streams)\n\n\t\t\ts.term.StartStep(\"Validating that streams are accessible\")\n\t\t\tstreams = s.ValidateStreams(streams)\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\ts.term.EndStep()\n\n\treturn streams, nil\n}\n\n\/\/ ValidateStreams tries to setup the stream to validate whether or not it is available.\nfunc (s *Scanner) ValidateStreams(targets []Stream) []Stream {\n\tfor i := range targets {\n\t\ttargets[i].Available = s.validateStream(targets[i])\n\t\ttime.Sleep(s.attackInterval)\n\t}\n\n\treturn targets\n}\n\n\/\/ AttackCredentials attempts to guess the provided targets' credentials using the given\n\/\/ dictionary or the default dictionary if none was provided by the user.\nfunc (s *Scanner) AttackCredentials(targets []Stream) []Stream {\n\tresChan := make(chan Stream)\n\tdefer close(resChan)\n\n\tfor i := range targets {\n\t\t\/\/ TODO: Perf Improvement: Skip cameras with no auth type detected, and set their\n\t\t\/\/ CredentialsFound value to true.\n\t\tgo s.attackCameraCredentials(targets[i], resChan)\n\t}\n\n\tattackResults := []Stream{}\n\t\/\/ TODO: Change this into a for+select and make a successful result close the chan.\n\tfor range targets {\n\t\tattackResults = append(attackResults, <-resChan)\n\t}\n\n\tfor i := range attackResults {\n\t\tif attackResults[i].CredentialsFound {\n\t\t\ttargets = replace(targets, attackResults[i])\n\t\t}\n\t}\n\n\treturn targets\n}\n\n\/\/ AttackRoute attempts to guess the provided targets' streaming routes using the given\n\/\/ dictionary or the default dictionary if none was provided by the user.\nfunc (s *Scanner) AttackRoute(targets []Stream) []Stream {\n\tresChan := make(chan Stream)\n\tdefer close(resChan)\n\n\tfor i := range targets {\n\t\tgo s.attackCameraRoute(targets[i], resChan)\n\t}\n\n\tattackResults := []Stream{}\n\t\/\/ TODO: Change this into a for+select and make a successful result close the chan.\n\tfor range targets {\n\t\tattackResults = append(attackResults, <-resChan)\n\t}\n\n\tfor i := range attackResults {\n\t\tif attackResults[i].RouteFound {\n\t\t\ttargets = replace(targets, attackResults[i])\n\t\t}\n\t}\n\n\treturn targets\n}\n\n\/\/ DetectAuthMethods attempts to guess the provided targets' authentication types, between\n\/\/ digest, basic auth or none at all.\nfunc (s *Scanner) DetectAuthMethods(targets []Stream) []Stream {\n\tfor i := range targets {\n\t\ttargets[i].AuthenticationType = s.detectAuthMethod(targets[i])\n\t\ttime.Sleep(s.attackInterval)\n\n\t\tvar authMethod string\n\t\tswitch targets[i].AuthenticationType {\n\t\tcase 0:\n\t\t\tauthMethod = \"no\"\n\t\tcase 1:\n\t\t\tauthMethod = \"basic\"\n\t\tcase 2:\n\t\t\tauthMethod = \"digest\"\n\t\t}\n\n\t\ts.term.Debugf(\"Stream %s uses %s authentication method\\n\", GetCameraRTSPURL(targets[i]), authMethod)\n\t}\n\n\treturn targets\n}\n\nfunc (s *Scanner) attackCameraCredentials(target Stream, resChan chan<- Stream) {\n\tfor _, username := range s.credentials.Usernames {\n\t\tfor _, password := range s.credentials.Passwords {\n\t\t\tok := s.credAttack(target, username, password)\n\t\t\tif ok {\n\t\t\t\ttarget.CredentialsFound = true\n\t\t\t\ttarget.Username = username\n\t\t\t\ttarget.Password = password\n\t\t\t\tresChan <- target\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttime.Sleep(s.attackInterval)\n\t\t}\n\t}\n\n\ttarget.CredentialsFound = false\n\tresChan <- target\n}\n\nfunc (s *Scanner) attackCameraRoute(target Stream, resChan chan<- Stream) {\n\tfor _, route := range s.routes {\n\t\tok := s.routeAttack(target, route)\n\t\tif ok {\n\t\t\ttarget.RouteFound = true\n\t\t\ttarget.Route = route\n\t\t\tresChan <- target\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(s.attackInterval)\n\t}\n\n\ttarget.RouteFound = false\n\tresChan <- target\n}\n\nfunc (s *Scanner) detectAuthMethod(stream Stream) int {\n\tc := s.curl.Duphandle()\n\n\tattackURL := fmt.Sprintf(\n\t\t\"rtsp:\/\/%s:%d\/%s\",\n\t\tstream.Address,\n\t\tstream.Port,\n\t\tstream.Route,\n\t)\n\n\ts.setCurlOptions(c)\n\n\t\/\/ Send a request to the URL of the stream we want to attack.\n\t_ = c.Setopt(curl.OPT_URL, attackURL)\n\t\/\/ Set the RTSP STREAM URI as the stream URL.\n\t_ = c.Setopt(curl.OPT_RTSP_STREAM_URI, attackURL)\n\t_ = c.Setopt(curl.OPT_RTSP_REQUEST, rtspDescribe)\n\n\t\/\/ Perform the request.\n\terr := c.Perform()\n\tif err != nil {\n\t\ts.term.Errorf(\"Perform failed for %q (auth %d): %v\", attackURL, stream.AuthenticationType, err)\n\t\treturn -1\n\t}\n\n\tauthType, err := c.Getinfo(curl.INFO_HTTPAUTH_AVAIL)\n\tif err != nil {\n\t\ts.term.Errorf(\"Getinfo failed: %v\", err)\n\t\treturn -1\n\t}\n\n\tif s.verbose {\n\t\ts.term.Debugln(\"DESCRIBE\", attackURL, \"RTSP\/1.0 >\", authType)\n\t}\n\n\treturn authType.(int)\n}\n\nfunc (s *Scanner) routeAttack(stream Stream, route string) bool {\n\tc := s.curl.Duphandle()\n\n\tattackURL := fmt.Sprintf(\n\t\t\"rtsp:\/\/%s:%s@%s:%d\/%s\",\n\t\tstream.Username,\n\t\tstream.Password,\n\t\tstream.Address,\n\t\tstream.Port,\n\t\troute,\n\t)\n\n\ts.setCurlOptions(c)\n\n\t\/\/ Set proper authentication type.\n\t_ = c.Setopt(curl.OPT_HTTPAUTH, stream.AuthenticationType)\n\t_ = c.Setopt(curl.OPT_USERPWD, fmt.Sprint(stream.Username, \":\", stream.Password))\n\n\t\/\/ Send a request to the URL of the stream we want to attack.\n\t_ = c.Setopt(curl.OPT_URL, attackURL)\n\t\/\/ Set the RTSP STREAM URI as the stream URL.\n\t_ = c.Setopt(curl.OPT_RTSP_STREAM_URI, attackURL)\n\t_ = c.Setopt(curl.OPT_RTSP_REQUEST, rtspDescribe)\n\n\t\/\/ Perform the request.\n\terr := c.Perform()\n\tif err != nil {\n\t\ts.term.Errorf(\"Perform failed for %q (auth %d): %v\", attackURL, stream.AuthenticationType, err)\n\t\treturn false\n\t}\n\n\t\/\/ Get return code for the request.\n\trc, err := c.Getinfo(curl.INFO_RESPONSE_CODE)\n\tif err != nil {\n\t\ts.term.Errorf(\"Getinfo failed: %v\", err)\n\t\treturn false\n\t}\n\n\tif s.verbose {\n\t\ts.term.Debugln(\"DESCRIBE\", attackURL, \"RTSP\/1.0 >\", rc)\n\t}\n\t\/\/ If it's a 401 or 403, it means that the credentials are wrong but the route might be okay.\n\t\/\/ If it's a 200, the stream is accessed successfully.\n\tif rc == httpOK || rc == httpUnauthorized || rc == httpForbidden {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (s *Scanner) credAttack(stream Stream, username string, password string) bool {\n\tc := s.curl.Duphandle()\n\n\tattackURL := fmt.Sprintf(\n\t\t\"rtsp:\/\/%s:%s@%s:%d\/%s\",\n\t\tusername,\n\t\tpassword,\n\t\tstream.Address,\n\t\tstream.Port,\n\t\tstream.Route,\n\t)\n\n\ts.setCurlOptions(c)\n\n\t\/\/ Set proper authentication type.\n\t_ = c.Setopt(curl.OPT_HTTPAUTH, stream.AuthenticationType)\n\t_ = c.Setopt(curl.OPT_USERPWD, fmt.Sprint(username, \":\", password))\n\n\t\/\/ Send a request to the URL of the stream we want to attack.\n\t_ = c.Setopt(curl.OPT_URL, attackURL)\n\t\/\/ Set the RTSP STREAM URI as the stream URL.\n\t_ = c.Setopt(curl.OPT_RTSP_STREAM_URI, attackURL)\n\t_ = c.Setopt(curl.OPT_RTSP_REQUEST, rtspDescribe)\n\n\t\/\/ Perform the request.\n\terr := c.Perform()\n\tif err != nil {\n\t\ts.term.Errorf(\"Perform failed for %q (auth %d): %v\", attackURL, stream.AuthenticationType, err)\n\t\treturn false\n\t}\n\n\t\/\/ Get return code for the request.\n\trc, err := c.Getinfo(curl.INFO_RESPONSE_CODE)\n\tif err != nil {\n\t\ts.term.Errorf(\"Getinfo failed: %v\", err)\n\t\treturn false\n\t}\n\n\tif s.verbose {\n\t\ts.term.Debugln(\"DESCRIBE\", attackURL, \"RTSP\/1.0 >\", rc)\n\t}\n\n\t\/\/ If it's a 404, it means that the route is incorrect but the credentials might be okay.\n\t\/\/ If it's a 200, the stream is accessed successfully.\n\tif rc == httpOK || rc == httpNotFound {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (s *Scanner) validateStream(stream Stream) bool {\n\tc := s.curl.Duphandle()\n\n\tattackURL := fmt.Sprintf(\n\t\t\"rtsp:\/\/%s:%s@%s:%d\/%s\",\n\t\tstream.Username,\n\t\tstream.Password,\n\t\tstream.Address,\n\t\tstream.Port,\n\t\tstream.Route,\n\t)\n\n\ts.setCurlOptions(c)\n\n\t\/\/ Set proper authentication type.\n\t_ = c.Setopt(curl.OPT_HTTPAUTH, stream.AuthenticationType)\n\t_ = c.Setopt(curl.OPT_USERPWD, fmt.Sprint(stream.Username, \":\", stream.Password))\n\n\t\/\/ Send a request to the URL of the stream we want to attack.\n\t_ = c.Setopt(curl.OPT_URL, attackURL)\n\t\/\/ Set the RTSP STREAM URI as the stream URL.\n\t_ = c.Setopt(curl.OPT_RTSP_STREAM_URI, attackURL)\n\t_ = c.Setopt(curl.OPT_RTSP_REQUEST, rtspSetup)\n\n\t_ = c.Setopt(curl.OPT_RTSP_TRANSPORT, \"RTP\/AVP;unicast;client_port=33332-33333\")\n\n\t\/\/ Perform the request.\n\terr := c.Perform()\n\tif err != nil {\n\t\ts.term.Errorf(\"Perform failed for %q (auth %d): %v\", attackURL, stream.AuthenticationType, err)\n\t\treturn false\n\t}\n\n\t\/\/ Get return code for the request.\n\trc, err := c.Getinfo(curl.INFO_RESPONSE_CODE)\n\tif err != nil {\n\t\ts.term.Errorf(\"Getinfo failed: %v\", err)\n\t\treturn false\n\t}\n\n\tif s.verbose {\n\t\ts.term.Debugln(\"SETUP\", attackURL, \"RTSP\/1.0 >\", rc)\n\t}\n\t\/\/ If it's a 200, the stream is accessed successfully.\n\tif rc == httpOK {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (s *Scanner) setCurlOptions(c Curler) {\n\t\/\/ Do not write sdp in stdout\n\t_ = c.Setopt(curl.OPT_WRITEFUNCTION, doNotWrite)\n\t\/\/ Do not use signals (would break multithreading).\n\t_ = c.Setopt(curl.OPT_NOSIGNAL, 1)\n\t\/\/ Do not send a body in the describe request.\n\t_ = c.Setopt(curl.OPT_NOBODY, 1)\n\t\/\/ Set custom timeout.\n\t_ = c.Setopt(curl.OPT_TIMEOUT_MS, int(s.timeout\/time.Millisecond))\n}\n\n\/\/ HACK: See https:\/\/stackoverflow.com\/questions\/3572397\/lib-curl-in-c-disable-printing\nfunc doNotWrite([]uint8, interface{}) bool {\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package plugin\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/josledp\/termcolor\"\n\tgit2go \"gopkg.in\/libgit2\/git2go.v26\"\n)\n\nconst (\n\tsDownArrow = \"↓\"\n\tsUpArrow = \"↑\"\n\tsThreeDots = \"…\"\n\tsDot = \"●\"\n\tsCheck = \"✔\"\n\tsFlag = \"⚑\"\n\tsAsterisk = \"⭑\"\n\tsCross = \"✖\"\n)\n\n\/\/Git is the plugin struct\ntype Git struct {\n\tconflicted int\n\tdetached bool\n\tchanged int\n\tstaged int\n\tuntracked int\n\tcommitsAhead int\n\tcommitsBehind int\n\tstashed int\n\tbranch string\n\thasUpstream bool\n}\n\n\/\/Name returns the plugin name\nfunc (Git) Name() string {\n\treturn \"git\"\n}\n\n\/\/Load is the load function of the plugin\nfunc (g *Git) Load(pr Prompter) error {\n\tgitpath, err := git2go.Discover(\".\", false, []string{\"\/\"})\n\tif err == nil {\n\t\trepository, err := git2go.OpenRepository(gitpath)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error opening repository at %s: %v\", gitpath, err)\n\t\t}\n\t\tdefer repository.Free()\n\n\t\t\/\/Get current tracked & untracked files status\n\t\tstatusOpts := git2go.StatusOptions{\n\t\t\tFlags: git2go.StatusOptIncludeUntracked | git2go.StatusOptRenamesHeadToIndex,\n\t\t}\n\t\trepostate, err := repository.StatusList(&statusOpts)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error getting repository status at %s: %v\", gitpath, err)\n\t\t}\n\t\tdefer repostate.Free()\n\t\tn, err := repostate.EntryCount()\n\t\tfor i := 0; i < n; i++ {\n\t\t\tentry, _ := repostate.ByIndex(i)\n\t\t\tgot := false\n\t\t\tif entry.Status&git2go.StatusCurrent > 0 {\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexNew > 0 {\n\t\t\t\tg.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexModified > 0 {\n\t\t\t\tg.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexDeleted > 0 {\n\t\t\t\tg.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexRenamed > 0 {\n\t\t\t\tg.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexTypeChange > 0 {\n\t\t\t\tg.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtNew > 0 {\n\t\t\t\tg.untracked++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtModified > 0 {\n\t\t\t\tg.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtDeleted > 0 {\n\t\t\t\tg.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtTypeChange > 0 {\n\t\t\t\tg.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtRenamed > 0 {\n\t\t\t\tg.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIgnored > 0 {\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusConflicted > 0 {\n\t\t\t\tg.conflicted++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif !got {\n\t\t\t\tlog.Println(\"Git plugin. Unknown: \", entry.Status)\n\t\t\t}\n\t\t}\n\t\t\/\/Get current branch name\n\t\tlocalRef, err := repository.Head()\n\t\tif err != nil {\n\t\t\t\/\/Probably there are no commits yet. How to know the current branch??\n\t\t\tg.branch = \"No_Commits\"\n\t\t\treturn nil\n\t\t}\n\t\tdefer localRef.Free()\n\n\t\tlocalBranch := localRef.Branch()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error getting local branch: %v\", err)\n\t\t}\n\n\t\tif isHead, _ := localBranch.IsHead(); isHead {\n\t\t\tg.branch = localRef.Shorthand()\n\t\t} else {\n\t\t\tg.branch = \":\" + localRef.Target().String()[:7]\n\t\t\tg.detached = true\n\t\t}\n\n\t\tremoteRef, err := localBranch.Upstream()\n\n\t\tif err == nil {\n\t\t\tdefer remoteRef.Free()\n\n\t\t\tg.hasUpstream = true\n\t\t\tpwd, err := os.Getwd()\n\t\t\tif err == nil {\n\t\t\t\tkey := fmt.Sprintf(\"git-%s-fetch\", pwd)\n\t\t\t\tlast, ok := pr.GetCache(key)\n\t\t\t\tvar lastTime time.Time\n\t\t\t\tif last != nil {\n\t\t\t\t\tlastTime, err = time.Parse(time.RFC3339, last.(string))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Printf(\"Error loading git last fetch time: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !ok || time.Since(lastTime) > 300*time.Second {\n\t\t\t\t\tpa := syscall.ProcAttr{}\n\t\t\t\t\tpa.Env = os.Environ()\n\t\t\t\t\tpa.Dir = pwd\n\t\t\t\t\tgitcommand, err := exec.LookPath(\"git\")\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Printf(\"git command not found: %v\", err)\n\t\t\t\t\t} else {\n\t\t\t\t\t\t_, err = syscall.ForkExec(gitcommand, []string{gitcommand, \"fetch\"}, &pa)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\/\/Silently fail?\n\t\t\t\t\t\t\tlog.Printf(\"Error fetching: %v\", err)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tpr.Cache(key, time.Now())\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !remoteRef.Target().Equal(localRef.Target()) {\n\t\t\t\tg.commitsAhead, g.commitsBehind, err = repository.AheadBehind(localRef.Target(), remoteRef.Target())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Error getting commitsAhead\/Behing: %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\/\/ only works if libgit >= 0.25\n\t\trepository.Stashes.Foreach(func(i int, m string, o *git2go.Oid) error {\n\t\t\tg.stashed = i + 1\n\t\t\treturn nil\n\t\t})\n\t}\n\treturn nil\n}\n\n\/\/Get returns the string to use in the prompt\nfunc (g Git) Get(format func(string, ...termcolor.Mode) string) (string, []termcolor.Mode) {\n\tvar gitPromptInfo string\n\tif g.branch != \"\" {\n\t\tgitPromptInfo = format(g.branch, termcolor.FgMagenta)\n\t\tspace := \" \"\n\t\tif g.commitsBehind > 0 {\n\t\t\tgitPromptInfo += space + sDownArrow + \"·\" + strconv.Itoa(g.commitsBehind)\n\t\t\tspace = \"\"\n\t\t}\n\t\tif g.commitsAhead > 0 {\n\t\t\tgitPromptInfo += space + sUpArrow + \"·\" + strconv.Itoa(g.commitsAhead)\n\t\t\tspace = \"\"\n\t\t}\n\t\tif !g.hasUpstream {\n\t\t\tgitPromptInfo += space + sAsterisk\n\t\t\tspace = \"\"\n\t\t}\n\t\tgitPromptInfo += \"|\"\n\t\tsynced := true\n\t\tif g.conflicted > 0 {\n\t\t\tgitPromptInfo += format(sCross+strconv.Itoa(g.conflicted), termcolor.FgRed)\n\t\t\tsynced = false\n\t\t}\n\t\tif g.staged > 0 {\n\t\t\tgitPromptInfo += format(sDot+strconv.Itoa(g.staged), termcolor.FgCyan)\n\t\t\tsynced = false\n\t\t}\n\t\tif g.changed > 0 {\n\t\t\tgitPromptInfo += format(\"+\"+strconv.Itoa(g.changed), termcolor.FgCyan)\n\t\t\tsynced = false\n\t\t}\n\t\tif g.untracked > 0 {\n\t\t\tgitPromptInfo += format(sThreeDots+strconv.Itoa(g.untracked), termcolor.FgCyan)\n\t\t\tsynced = false\n\t\t}\n\t\tif synced {\n\t\t\tgitPromptInfo += format(sCheck, termcolor.FgHiGreen)\n\t\t}\n\t\tif g.stashed > 0 {\n\t\t\tgitPromptInfo += format(sFlag+strconv.Itoa(g.stashed), termcolor.FgHiMagenta)\n\t\t}\n\t}\n\treturn gitPromptInfo, []termcolor.Mode{termcolor.FgMagenta}\n}\n<commit_msg>getting rid of libgit2 and cgo! it is a great day :) (btw it does not work as well, but common! no cgogit merge --squash dev_remove_libgit2)<commit_after>package plugin\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"gopkg.in\/src-d\/go-git.v4\/plumbing\/object\"\n\n\t\"github.com\/josledp\/termcolor\"\n\t\"gopkg.in\/src-d\/go-billy.v3\/osfs\"\n\tgit \"gopkg.in\/src-d\/go-git.v4\"\n\t\"gopkg.in\/src-d\/go-git.v4\/plumbing\"\n\t\"gopkg.in\/src-d\/go-git.v4\/storage\/filesystem\"\n)\n\nconst (\n\tsDownArrow = \"↓\"\n\tsUpArrow = \"↑\"\n\tsThreeDots = \"…\"\n\tsDot = \"●\"\n\tsCheck = \"✔\"\n\tsFlag = \"⚑\"\n\tsAsterisk = \"*\"\n\tsCross = \"✖\"\n)\n\n\/\/Git is the plugin struct\ntype Git struct {\n\tconflicted int\n\tdetached bool\n\tchanged int\n\tstaged int\n\tuntracked int\n\tcommitsAhead int\n\tcommitsBehind int\n\tstashed int\n\tbranch string\n\thasUpstream bool\n}\n\n\/\/Name returns the plugin name\nfunc (Git) Name() string {\n\treturn \"git\"\n}\n\nfunc findGitRepository() (string, error) {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error determining current pwd: %v\", err)\n\t}\n\tp := strings.Split(pwd, \"\/\")\n\tfor i := range p {\n\t\tpath := strings.Join(p[:len(p)-i], \"\/\")\n\t\tif info, err := os.Stat(path + \"\/.git\"); os.IsNotExist(err) {\n\t\t\tcontinue\n\t\t} else if err == nil && info.IsDir() {\n\t\t\treturn path, nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"unable to find .git directory\")\n}\n\nfunc lineCounter(r io.Reader) (int, error) {\n\tbuf := make([]byte, 32*1024)\n\tcount := 0\n\tlineSep := []byte{'\\n'}\n\n\tfor {\n\t\tc, err := r.Read(buf)\n\t\tcount += bytes.Count(buf[:c], lineSep)\n\n\t\tswitch {\n\t\tcase err == io.EOF:\n\t\t\treturn count, nil\n\n\t\tcase err != nil:\n\t\t\treturn count, err\n\t\t}\n\t}\n}\n\n\/\/Load is the load function of the plugin\nfunc (g *Git) Load(pr Prompter) error {\n\tgitPwd, err := findGitRepository()\n\tif err != nil {\n\t\treturn nil \/\/Unable to find valid git repo, so good so far\n\t}\n\tfs := osfs.New(gitPwd + \"\/.git\")\n\tstorage, err := filesystem.NewStorage(fs)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get storer: %v\", err)\n\t}\n\trepository, err := git.Open(storage, osfs.New(gitPwd))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to open repository: %v\", err)\n\t}\n\twt, err := repository.Worktree()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting worktree: %v\", err)\n\t}\n\tst, err := wt.Status()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting worktree status: %v\", err)\n\t}\n\tfor _, status := range st {\n\t\tswitch status.Staging {\n\t\tcase git.Unmodified:\n\t\tcase git.Untracked:\n\t\tdefault:\n\t\t\tg.staged++\n\t\t}\n\n\t\tswitch status.Worktree {\n\t\tcase git.Unmodified:\n\t\tcase git.Untracked:\n\t\t\tg.untracked++\n\t\tdefault:\n\t\t\tg.changed++\n\t\t}\n\t}\n\tmergeMsg, err := os.Open(gitPwd + \"\/.git\/MERGE_MSG\")\n\tif err == nil {\n\t\tdefer mergeMsg.Close()\n\t\tmmReader := bufio.NewReader(mergeMsg)\n\t\tvar line string\n\t\tcountLines := false\n\t\tfor err = nil; err == nil; line, err = mmReader.ReadString('\\n') {\n\t\t\tif countLines {\n\t\t\t\tg.conflicted++\n\t\t\t\tg.changed--\n\t\t\t\tg.staged--\n\t\t\t} else if strings.Contains(line, \"Conflicts\") {\n\t\t\t\tcountLines = true\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif fstash, err := os.Open(gitPwd + \"\/.git\/logs\/refs\/stash\"); err == nil {\n\t\tdefer fstash.Close()\n\t\tg.stashed, err = lineCounter(fstash)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to count stashes:%v\", err)\n\t\t}\n\t}\n\tlocalRef, err := repository.Head()\n\tif err != nil {\n\t\tg.branch = \"No_Commits\"\n\t\treturn nil\n\t}\n\tlocalName := strings.Split(localRef.Name().String(), \"\/\")\n\tif len(localName) == 1 {\n\t\tg.branch = \":\" + localRef.Hash().String()[:7]\n\t\tg.detached = true\n\t} else {\n\t\tg.branch = localName[len(localName)-1]\n\t}\n\tc, err := repository.Config()\n\tremote := c.Raw.Section(\"branch\").Subsection(g.branch).Option(\"remote\")\n\tif remote != \"\" {\n\t\tg.hasUpstream = true\n\t\tg.fetchIfNeeded(pr)\n\t\tremoteRef, err := repository.Reference(plumbing.ReferenceName(\"refs\/remotes\/\"+remote+\"\/\"+g.branch), false)\n\t\tif localRef.Hash() != remoteRef.Hash() && err == nil {\n\t\t\tlocalCo, err := repository.CommitObject(localRef.Hash())\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to get local commit from local reference: %v\", err)\n\t\t\t}\n\t\t\tremoteCo, err := repository.CommitObject(remoteRef.Hash())\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to get local commit from remote reference: %v\", err)\n\t\t\t}\n\t\t\tg.commitsAhead, g.commitsBehind = aheadBehind(repository, localCo, remoteCo)\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc fillMap(r *git.Repository, co *object.Commit, m map[string]struct{}) {\n\tm[co.Hash.String()] = struct{}{}\n\tfor _, _p := range co.ParentHashes {\n\t\tp, _ := r.CommitObject(_p)\n\t\tfillMap(r, p, m)\n\t}\n}\nfunc count(r *git.Repository, co *object.Commit, m map[string]struct{}) int {\n\tif _, ok := m[co.Hash.String()]; ok {\n\t\treturn 0\n\t}\n\tc := 1\n\tfor _, _p := range co.ParentHashes {\n\t\tp, _ := r.CommitObject(_p)\n\t\tc += count(r, p, m)\n\t}\n\treturn c\n}\nfunc aheadBehind(repository *git.Repository, local *object.Commit, remote *object.Commit) (ahead, behind int) {\n\tlocalMap := make(map[string]struct{})\n\tremoteMap := make(map[string]struct{})\n\tfillMap(repository, local, localMap)\n\tfillMap(repository, remote, remoteMap)\n\tahead = count(repository, local, remoteMap)\n\tbehind = count(repository, remote, localMap)\n\treturn ahead, behind\n}\n\n\/\/Get returns the string to use in the prompt\nfunc (g Git) Get(format func(string, ...termcolor.Mode) string) (string, []termcolor.Mode) {\n\tvar gitPromptInfo string\n\tif g.branch != \"\" {\n\t\tgitPromptInfo = format(g.branch, termcolor.FgMagenta)\n\t\tspace := \" \"\n\t\tif g.commitsBehind > 0 {\n\t\t\tgitPromptInfo += space + sDownArrow + \"·\" + strconv.Itoa(g.commitsBehind)\n\t\t\tspace = \"\"\n\t\t}\n\t\tif g.commitsAhead > 0 {\n\t\t\tgitPromptInfo += space + sUpArrow + \"·\" + strconv.Itoa(g.commitsAhead)\n\t\t\tspace = \"\"\n\t\t}\n\t\tif !g.hasUpstream {\n\t\t\tgitPromptInfo += space + sAsterisk\n\t\t\tspace = \"\"\n\t\t}\n\t\tgitPromptInfo += \"|\"\n\t\tsynced := true\n\t\tif g.conflicted > 0 {\n\t\t\tgitPromptInfo += format(sCross+strconv.Itoa(g.conflicted), termcolor.FgRed)\n\t\t\tsynced = false\n\t\t}\n\t\tif g.staged > 0 {\n\t\t\tgitPromptInfo += format(sDot+strconv.Itoa(g.staged), termcolor.FgCyan)\n\t\t\tsynced = false\n\t\t}\n\t\tif g.changed > 0 {\n\t\t\tgitPromptInfo += format(\"+\"+strconv.Itoa(g.changed), termcolor.FgCyan)\n\t\t\tsynced = false\n\t\t}\n\t\tif g.untracked > 0 {\n\t\t\tgitPromptInfo += format(sThreeDots+strconv.Itoa(g.untracked), termcolor.FgCyan)\n\t\t\tsynced = false\n\t\t}\n\t\tif synced {\n\t\t\tgitPromptInfo += format(sCheck, termcolor.FgHiGreen)\n\t\t}\n\t\tif g.stashed > 0 {\n\t\t\tgitPromptInfo += format(sFlag+strconv.Itoa(g.stashed), termcolor.FgHiMagenta)\n\t\t}\n\t}\n\treturn gitPromptInfo, []termcolor.Mode{termcolor.FgMagenta}\n}\n\nfunc (g *Git) fetchIfNeeded(pr Prompter) {\n\tpwd, err := os.Getwd()\n\tif err == nil {\n\t\tkey := fmt.Sprintf(\"git-%s-fetch\", pwd)\n\t\tlast, ok := pr.GetCache(key)\n\t\tvar lastTime time.Time\n\t\tif last != nil {\n\t\t\tlastTime, err = time.Parse(time.RFC3339, last.(string))\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error loading git last fetch time: %v\", err)\n\t\t\t}\n\t\t}\n\t\tif !ok || time.Since(lastTime) > 300*time.Second {\n\t\t\tpa := syscall.ProcAttr{}\n\t\t\tpa.Env = os.Environ()\n\t\t\tpa.Dir = pwd\n\t\t\tgitcommand, err := exec.LookPath(\"git\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"git command not found: %v\", err)\n\t\t\t} else {\n\t\t\t\t_, err = syscall.ForkExec(gitcommand, []string{gitcommand, \"fetch\"}, &pa)\n\t\t\t\tif err != nil {\n\t\t\t\t\t\/\/Silently fail?\n\t\t\t\t\tlog.Printf(\"Error fetching: %v\", err)\n\t\t\t\t} else {\n\t\t\t\t\tpr.Cache(key, time.Now())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package autoio\n\nimport \"os\"\nimport \"io\"\nimport \"bufio\"\n\nimport \"errors\"\n\nimport \"compress\/gzip\"\nimport \"compress\/bzip2\"\n\n\n\/\/ Wrap common stream file types into one for ease of scanning\n\/\/\ntype AutoioHandle struct {\n Fp *os.File\n Scanner *bufio.Scanner\n Writer *bufio.Writer\n Reader *bufio.Reader\n\n ReadScanValid bool\n\n Bz2Reader io.Reader\n GzReader *gzip.Reader\n FileType string\n\n Error error\n\n ByteLine []byte\n ByteBuf []byte\n}\n\n\n\/\/ Magic strings we look for at the beginning of the file to determine file type.\n\/\/\nvar magicmap map[string]string =\n map[string]string{ \"\\x1f\\x8b\" : \".gz\" ,\n \"\\x1f\\x9d\" : \".Z\",\n \"\\x42\\x5a\" : \".bz2\" ,\n \"\\x50\\x4b\\x03\\x04\" : \".zip\" }\n\nfunc OpenReadScanner( fn string ) ( h AutoioHandle, err error ) {\n\n h.ByteLine = make( []byte, 4096 )\n h.ByteBuf = make( []byte, 4096 )\n h.ReadScanValid = true\n\n if fn == \"-\" {\n h.Fp = os.Stdin\n h.Reader = bufio.NewReader( h.Fp )\n return h, nil\n }\n\n var sentinalfp *os.File\n\n sentinalfp,err = os.Open( fn )\n if err != nil { return h, err }\n defer sentinalfp.Close()\n\n\n b := make( []byte, 2, 2 )\n n,err := sentinalfp.Read(b)\n if (n<2) || (err != nil) {\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n h.Reader = bufio.NewReader( h.Fp )\n return h, err\n }\n\n if typ,ok := magicmap[string(b)] ; ok {\n\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n\n if typ == \".gz\" {\n h.FileType = \"gz\"\n\n h.GzReader,err = gzip.NewReader( h.Fp )\n if err != nil {\n h.Fp.Close()\n return h, err\n }\n h.Reader = bufio.NewReader( h.GzReader )\n } else if typ == \".bz2\" {\n\n h.FileType = \"bz2\"\n\n h.Bz2Reader = bzip2.NewReader( h.Fp )\n h.Reader = bufio.NewReader( h.Bz2Reader )\n } else {\n err = errors.New(typ + \"extension not supported\")\n }\n\n return h, err\n }\n\n\n b2 := make( []byte, 2, 2)\n n,err = sentinalfp.Read(b2)\n if (n<2) || (err != nil) {\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n h.Reader = bufio.NewReader( h.Fp )\n return h, err\n }\n\n s := string(b) + string(b2)\n if typ,ok := magicmap[s]; ok {\n if typ == \".zip\" {\n err = errors.New(\"zip extension not supported\")\n return h, err\n }\n err = errors.New(typ + \"extension not supported\")\n return h, err\n }\n\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n h.Reader = bufio.NewReader( h.Fp )\n\n return h, err\n}\n\nfunc ( h *AutoioHandle ) Err() error { return h.Error }\n\nfunc ( h *AutoioHandle ) ReadScan() bool { return h.ReadScanValid }\n\nfunc ( h *AutoioHandle ) ReadText() string {\n var isprefix bool\n var lerr error\n\n h.ByteLine = h.ByteLine[0:0]\n h.ByteBuf,isprefix,lerr = h.Reader.ReadLine()\n\n if lerr!=nil {\n h.ReadScanValid = false\n h.Error = lerr\n return \"\"\n }\n\n h.ByteLine = append(h.ByteLine, h.ByteBuf...)\n for isprefix {\n h.ByteBuf,isprefix,lerr = h.Reader.ReadLine()\n if lerr!=nil {\n h.ReadScanValid = false\n h.Error = lerr\n return \"\"\n }\n h.ByteLine = append(h.ByteLine,h.ByteBuf...)\n }\n\n return string(h.ByteLine)\n\n}\n\nfunc OpenScanner( fn string ) ( h AutoioHandle, err error ) {\n\n if fn == \"-\" {\n h.Fp = os.Stdin\n h.Scanner = bufio.NewScanner( h.Fp )\n return h, nil\n }\n\n var sentinalfp *os.File\n\n sentinalfp,err = os.Open( fn )\n if err != nil { return h, err }\n defer sentinalfp.Close()\n\n\n b := make( []byte, 2, 2 )\n n,err := sentinalfp.Read(b)\n if (n<2) || (err != nil) {\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n h.Scanner = bufio.NewScanner( h.Fp )\n return h, err\n }\n\n if typ,ok := magicmap[string(b)] ; ok {\n\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n\n if typ == \".gz\" {\n h.FileType = \"gz\"\n\n h.GzReader,err = gzip.NewReader( h.Fp )\n if err != nil {\n h.Fp.Close()\n return h, err\n }\n h.Scanner = bufio.NewScanner( h.GzReader )\n } else if typ == \".bz2\" {\n\n h.FileType = \"bz2\"\n\n h.Bz2Reader = bzip2.NewReader( h.Fp )\n h.Scanner = bufio.NewScanner( h.Bz2Reader )\n } else {\n err = errors.New(typ + \"extension not supported\")\n }\n\n return h, err\n }\n\n\n b2 := make( []byte, 2, 2)\n n,err = sentinalfp.Read(b2)\n if (n<2) || (err != nil) {\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n h.Scanner = bufio.NewScanner( h.Fp )\n return h, err\n }\n\n s := string(b) + string(b2)\n if typ,ok := magicmap[s]; ok {\n if typ == \".zip\" {\n err = errors.New(\"zip extension not supported\")\n return h, err\n }\n err = errors.New(typ + \"extension not supported\")\n return h, err\n }\n\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n h.Scanner = bufio.NewScanner( h.Fp )\n\n return h, err\n\n}\n\nfunc CreateWriter( fn string ) ( h AutoioHandle, err error ) {\n\n if fn == \"-\" {\n h.Fp = os.Stdout\n } else {\n h.Fp,err = os.Create( fn )\n if err != nil { return h, err }\n }\n\n h.Writer = bufio.NewWriter( h.Fp )\n return h, nil\n}\n\nfunc (h *AutoioHandle) Flush() {\n\n if h.Writer != nil {\n h.Writer.Flush()\n }\n\n}\n\nfunc ( h *AutoioHandle) Close() error {\n\n if h.FileType == \"gz\" {\n e := h.GzReader.Close()\n if e!=nil { return e }\n }\n\n e := h.Fp.Close()\n return e\n\n}\n<commit_msg>adding simple read scanner<commit_after>package autoio\n\nimport \"os\"\nimport \"io\"\nimport \"bufio\"\n\nimport \"errors\"\n\nimport \"compress\/gzip\"\nimport \"compress\/bzip2\"\n\n\/\/ Wrap common stream file types into one for ease of scanning\n\/\/\ntype AutoioHandle struct {\n Fp *os.File\n Scanner *bufio.Scanner\n Writer *bufio.Writer\n Reader *bufio.Reader\n\n ReadScanValid bool\n\n Bz2Reader io.Reader\n GzReader *gzip.Reader\n FileType string\n\n Error error\n\n ByteLine []byte\n ByteBuf []byte\n}\n\n\n\/\/ Magic strings we look for at the beginning of the file to determine file type.\n\/\/\nvar magicmap map[string]string =\n map[string]string{ \"\\x1f\\x8b\" : \".gz\" ,\n \"\\x1f\\x9d\" : \".Z\",\n \"\\x42\\x5a\" : \".bz2\" ,\n \"\\x50\\x4b\\x03\\x04\" : \".zip\" }\n\nfunc OpenReadScannerSimple( fn string ) ( h AutoioHandle, err error ) {\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n h.Reader = bufio.NewReader( h.Fp )\n return h, err\n}\n\nfunc OpenReadScanner( fn string ) ( h AutoioHandle, err error ) {\n\n h.ByteLine = make( []byte, 4096 )\n h.ByteBuf = make( []byte, 4096 )\n h.ReadScanValid = true\n\n if fn == \"-\" {\n h.Fp = os.Stdin\n h.Reader = bufio.NewReader( h.Fp )\n return h, nil\n }\n\n var sentinalfp *os.File\n\n sentinalfp,err = os.Open( fn )\n if err != nil { return h, err }\n defer sentinalfp.Close()\n\n\n b := make( []byte, 2, 2 )\n n,err := sentinalfp.Read(b)\n if (n<2) || (err != nil) {\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n\n h.Reader = bufio.NewReader( h.Fp )\n return h, err\n }\n\n if typ,ok := magicmap[string(b)] ; ok {\n\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n\n if typ == \".gz\" {\n h.FileType = \"gz\"\n\n h.GzReader,err = gzip.NewReader( h.Fp )\n if err != nil {\n h.Fp.Close()\n return h, err\n }\n h.Reader = bufio.NewReader( h.GzReader )\n } else if typ == \".bz2\" {\n\n h.FileType = \"bz2\"\n\n h.Bz2Reader = bzip2.NewReader( h.Fp )\n h.Reader = bufio.NewReader( h.Bz2Reader )\n } else {\n err = errors.New(typ + \"extension not supported\")\n }\n\n return h, err\n }\n\n\n b2 := make( []byte, 2, 2)\n n,err = sentinalfp.Read(b2)\n if (n<2) || (err != nil) {\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n h.Reader = bufio.NewReader( h.Fp )\n return h, err\n }\n\n s := string(b) + string(b2)\n if typ,ok := magicmap[s]; ok {\n if typ == \".zip\" {\n err = errors.New(\"zip extension not supported\")\n return h, err\n }\n err = errors.New(typ + \"extension not supported\")\n return h, err\n }\n\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n\n h.Reader = bufio.NewReader( h.Fp )\n\n return h, err\n}\n\nfunc ( h *AutoioHandle ) Err() error { return h.Error }\n\nfunc ( h *AutoioHandle ) ReadScan() bool { return h.ReadScanValid }\n\nfunc ( h *AutoioHandle ) ReadText() string {\n var isprefix bool\n var lerr error\n\n h.ByteLine = h.ByteLine[0:0]\n h.ByteBuf,isprefix,lerr = h.Reader.ReadLine()\n\n if lerr!=nil {\n h.ReadScanValid = false\n h.Error = lerr\n return \"\"\n }\n\n\n h.ByteLine = append(h.ByteLine, h.ByteBuf...)\n\n for isprefix {\n h.ByteBuf,isprefix,lerr = h.Reader.ReadLine()\n if lerr!=nil {\n h.ReadScanValid = false\n h.Error = lerr\n return \"\"\n }\n h.ByteLine = append(h.ByteLine,h.ByteBuf...)\n }\n\n return string(h.ByteLine)\n\n}\n\nfunc OpenScanner( fn string ) ( h AutoioHandle, err error ) {\n\n if fn == \"-\" {\n h.Fp = os.Stdin\n h.Scanner = bufio.NewScanner( h.Fp )\n return h, nil\n }\n\n var sentinalfp *os.File\n\n sentinalfp,err = os.Open( fn )\n if err != nil { return h, err }\n defer sentinalfp.Close()\n\n\n b := make( []byte, 2, 2 )\n n,err := sentinalfp.Read(b)\n if (n<2) || (err != nil) {\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n h.Scanner = bufio.NewScanner( h.Fp )\n return h, err\n }\n\n if typ,ok := magicmap[string(b)] ; ok {\n\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n\n if typ == \".gz\" {\n h.FileType = \"gz\"\n\n h.GzReader,err = gzip.NewReader( h.Fp )\n if err != nil {\n h.Fp.Close()\n return h, err\n }\n h.Scanner = bufio.NewScanner( h.GzReader )\n } else if typ == \".bz2\" {\n\n h.FileType = \"bz2\"\n\n h.Bz2Reader = bzip2.NewReader( h.Fp )\n h.Scanner = bufio.NewScanner( h.Bz2Reader )\n } else {\n err = errors.New(typ + \"extension not supported\")\n }\n\n return h, err\n }\n\n\n b2 := make( []byte, 2, 2)\n n,err = sentinalfp.Read(b2)\n if (n<2) || (err != nil) {\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n h.Scanner = bufio.NewScanner( h.Fp )\n return h, err\n }\n\n s := string(b) + string(b2)\n if typ,ok := magicmap[s]; ok {\n if typ == \".zip\" {\n err = errors.New(\"zip extension not supported\")\n return h, err\n }\n err = errors.New(typ + \"extension not supported\")\n return h, err\n }\n\n h.Fp,err = os.Open( fn )\n if err != nil { return h, err }\n h.Scanner = bufio.NewScanner( h.Fp )\n\n return h, err\n\n}\n\nfunc CreateWriter( fn string ) ( h AutoioHandle, err error ) {\n\n if fn == \"-\" {\n h.Fp = os.Stdout\n } else {\n h.Fp,err = os.Create( fn )\n if err != nil { return h, err }\n }\n\n h.Writer = bufio.NewWriter( h.Fp )\n return h, nil\n}\n\nfunc (h *AutoioHandle) Flush() {\n\n if h.Writer != nil {\n h.Writer.Flush()\n }\n\n}\n\nfunc ( h *AutoioHandle) Close() error {\n\n if h.FileType == \"gz\" {\n e := h.GzReader.Close()\n if e!=nil { return e }\n }\n\n e := h.Fp.Close()\n return e\n\n}\n<|endoftext|>"} {"text":"<commit_before>package backend\n\nimport (\n\t\"net\/http\"\n\n\t\"google.golang.org\/api\/iterator\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"cloud.google.com\/go\/datastore\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/satori\/go.uuid\"\n)\n\nvar (\n\tpostListRoute = Route{\n\t\tPath: \"\/posts\",\n\t\tDescription: \"REST handler for listing and creating posts\",\n\t\tActions: map[string]http.HandlerFunc{\n\t\t\t\"GET\": listPosts,\n\t\t\t\"POST\": createPost,\n\t\t},\n\t}\n\n\tpostRoute = Route{\n\t\tPath: \"\/posts\/{postID:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}}\",\n\t\tDescription: \"REST handler getting, updating, and deleting posts\",\n\t\tActions: map[string]http.HandlerFunc{\n\t\t\t\"GET\": getPost,\n\t\t\t\"PUT\": updatePost,\n\t\t},\n\t}\n)\n\nfunc listPosts(w http.ResponseWriter, r *http.Request) {\n\tctx := context.Background()\n\tq := datastore.NewQuery(\"Post\")\n\titer := datastoreClient.Run(ctx, q)\n\tposts := []Post{}\n\n\tfor {\n\t\tvar post Post\n\t\t_, err := iter.Next(&post)\n\n\t\tif err == iterator.Done {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Error fetching post: \"+err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tposts = append(posts, post)\n\t}\n\tserveJSON(w, posts)\n}\n\nfunc getPost(w http.ResponseWriter, r *http.Request) {\n\tpostID := mux.Vars(r)[\"postID\"]\n\n\tctx := context.Background()\n\tkey := datastore.NameKey(\"Post\", postID, nil)\n\tvar post Post\n\tdatastoreClient.Get(ctx, key, &post)\n\tserveJSON(w, post)\n}\n\nfunc updatePost(w http.ResponseWriter, r *http.Request) {\n\tctx := context.Background()\n\tpostID := mux.Vars(r)[\"postID\"]\n\tpost := parseBody(r, new(Post)).(*Post)\n\tpost.ID = postID\n\n\tif err := post.Validate(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tkey := datastore.NameKey(\"Post\", postID, nil)\n\tdatastoreClient.Put(ctx, key, post)\n\tserveJSON(w, post)\n}\n\nfunc createPost(w http.ResponseWriter, r *http.Request) {\n\tctx := context.Background()\n\tpost := parseBody(r, new(Post)).(*Post)\n\tpost.ID = uuid.NewV4().String()\n\n\tif err := post.Validate(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tkey := datastore.NameKey(\"Post\", post.ID, nil)\n\tdatastoreClient.Put(ctx, key, post)\n\tserveJSON(w, post)\n}\n<commit_msg>Make api work and setting up frontend routes<commit_after>package backend\n\nimport (\n\t\"net\/http\"\n\n\t\"google.golang.org\/api\/iterator\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"cloud.google.com\/go\/datastore\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/satori\/go.uuid\"\n)\n\nvar (\n\tuuidRegex = \"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\"\n\tpostListRoute = Route{\n\t\tPath: \"\/posts\",\n\t\tDescription: \"REST handler for listing and creating posts\",\n\t\tActions: map[string]http.HandlerFunc{\n\t\t\t\"GET\": listPosts,\n\t\t\t\"POST\": createPost,\n\t\t},\n\t}\n\n\tpostRoute = Route{\n\t\tPath: \"\/posts\/{postID:\" + uuidRegex + \"}\",\n\t\tDescription: \"REST handler getting, updating, and deleting posts\",\n\t\tActions: map[string]http.HandlerFunc{\n\t\t\t\"GET\": getPost,\n\t\t\t\"PUT\": updatePost,\n\t\t},\n\t}\n)\n\nfunc listPosts(w http.ResponseWriter, r *http.Request) {\n\tctx := context.Background()\n\tq := datastore.NewQuery(\"Post\")\n\titer := datastoreClient.Run(ctx, q)\n\tposts := []Post{}\n\n\tfor {\n\t\tvar post Post\n\t\t_, err := iter.Next(&post)\n\n\t\tif err == iterator.Done {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Error fetching post: \"+err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tposts = append(posts, post)\n\t}\n\tserveJSON(w, posts)\n}\n\nfunc getPost(w http.ResponseWriter, r *http.Request) {\n\tpostID := mux.Vars(r)[\"postID\"]\n\n\tctx := context.Background()\n\tkey := datastore.NameKey(\"Post\", postID, nil)\n\tvar post Post\n\tdatastoreClient.Get(ctx, key, &post)\n\tserveJSON(w, post)\n}\n\nfunc updatePost(w http.ResponseWriter, r *http.Request) {\n\tctx := context.Background()\n\tpostID := mux.Vars(r)[\"postID\"]\n\tpost := parseBody(r, new(Post)).(*Post)\n\tpost.ID = postID\n\n\tif err := post.Validate(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tkey := datastore.NameKey(\"Post\", postID, nil)\n\tdatastoreClient.Put(ctx, key, post)\n\tserveJSON(w, post)\n}\n\nfunc createPost(w http.ResponseWriter, r *http.Request) {\n\tctx := context.Background()\n\tpost := parseBody(r, new(Post)).(*Post)\n\tpost.ID = uuid.NewV4().String()\n\n\tif err := post.Validate(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tkey := datastore.NameKey(\"Post\", post.ID, nil)\n\tdatastoreClient.Put(ctx, key, post)\n\tserveJSON(w, post)\n}\n<|endoftext|>"} {"text":"<commit_before>package goutils\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"time\"\n\n\t\"github.com\/ErikDubbelboer\/gspt\"\n)\n\nvar Process Processes\nvar Command Processes \/\/ deprecated\n\ntype Processes struct {\n}\n\nfunc (c *Processes) Execute(timeout int, stdin []byte, parms ...string) ([]byte, []byte, error) {\n\tif len(parms) < 1 {\n\t\treturn nil, nil, errors.New(\"execute: command missing\")\n\t}\n\tcmd := exec.Command(parms[0], parms[1:]...)\n\tvar bout bytes.Buffer\n\tvar berr bytes.Buffer\n\tcmd.Stdout = &bout\n\tcmd.Stderr = &berr\n\tin, ierr := cmd.StdinPipe()\n\tif ierr != nil {\n\t\treturn nil, nil, errors.New(\"execute: stdin failed\")\n\t}\n\tcmd.Start()\n\tdone := make(chan error)\n\tgo func() {\n\t\tin.Write(stdin)\n\t\tin.Close()\n\t\tdone <- cmd.Wait()\n\t}()\n\tselect {\n\tcase <-time.After(time.Second * time.Duration(timeout)):\n\t\tgo func() { <-done }() \/\/ allow goroutine to exit\n\t\tif err := cmd.Process.Kill(); err != nil {\n\t\t\treturn bout.Bytes(), berr.Bytes(), errors.New(ErrorTimeoutKill)\n\t\t}\n\t\treturn bout.Bytes(), berr.Bytes(), errors.New(ErrorTimeout)\n\tcase status := <-done:\n\t\tout := berr.Bytes()\n\t\tif status != nil {\n\t\t\treturn bout.Bytes(), out, status\n\t\t} else {\n\t\t\treturn bout.Bytes(), out, nil\n\t\t}\n\t}\n}\n\nfunc (c *Processes) SetProcTitle(title string) {\n\tgspt.SetProcTitle(title)\n}\n\nfunc (c *Processes) AppendProcTitleAppend(title string) {\n\tgspt.SetProcTitle(fmt.Sprintf(\"%s%s\", os.Args[0], title))\n}\n<commit_msg>bugfix till upstream bugfix of SetProcTitle<commit_after>package goutils\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/ErikDubbelboer\/gspt\"\n)\n\nvar Process Processes\nvar Command Processes \/\/ deprecated\n\ntype Processes struct {\n}\n\nfunc (c *Processes) Execute(timeout int, stdin []byte, parms ...string) ([]byte, []byte, error) {\n\tif len(parms) < 1 {\n\t\treturn nil, nil, errors.New(\"execute: command missing\")\n\t}\n\tcmd := exec.Command(parms[0], parms[1:]...)\n\tvar bout bytes.Buffer\n\tvar berr bytes.Buffer\n\tcmd.Stdout = &bout\n\tcmd.Stderr = &berr\n\tin, ierr := cmd.StdinPipe()\n\tif ierr != nil {\n\t\treturn nil, nil, errors.New(\"execute: stdin failed\")\n\t}\n\tcmd.Start()\n\tdone := make(chan error)\n\tgo func() {\n\t\tin.Write(stdin)\n\t\tin.Close()\n\t\tdone <- cmd.Wait()\n\t}()\n\tselect {\n\tcase <-time.After(time.Second * time.Duration(timeout)):\n\t\tgo func() { <-done }() \/\/ allow goroutine to exit\n\t\tif err := cmd.Process.Kill(); err != nil {\n\t\t\treturn bout.Bytes(), berr.Bytes(), errors.New(ErrorTimeoutKill)\n\t\t}\n\t\treturn bout.Bytes(), berr.Bytes(), errors.New(ErrorTimeout)\n\tcase status := <-done:\n\t\tout := berr.Bytes()\n\t\tif status != nil {\n\t\t\treturn bout.Bytes(), out, status\n\t\t} else {\n\t\t\treturn bout.Bytes(), out, nil\n\t\t}\n\t}\n}\n\nfunc (c *Processes) SetProcTitle(title string) {\n\tgspt.SetProcTitle(title)\n}\n\nfunc (c *Processes) AppendProcTitleAppend(title string) {\n\tgspt.SetProcTitle(fmt.Sprintf(\"%s%s\", strings.Join(os.Args, \" \"), title))\n}\n\nfunc init() {\n\tgo func() {\n\t\ttime.Sleep(10 * time.Millisecond)\n\t\tProcess.AppendProcTitleAppend(\"\")\n\t}()\n}\n<|endoftext|>"} {"text":"<commit_before>package nakadi\n\nimport (\n\t\"context\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ ProcessorOptions contains optional parameters that are used to create a Processor.\ntype ProcessorOptions struct {\n\t\/\/ The maximum number of Events in each chunk (and therefore per partition) of the stream (default: 1)\n\tBatchLimit uint\n\t\/\/ Maximum time in seconds to wait for the flushing of each chunk (per partition).(default: 30)\n\tFlushTimeout uint\n\t\/\/ The number of parallel streams the Processor will use to consume events (default: 1)\n\tStreamCount uint\n\t\/\/ Limits the number of events that the processor will handle per minute. This value represents an\n\t\/\/ upper bound, if some streams are not healthy e.g. if StreamCount exceeds the number of partitions,\n\t\/\/ or the actual batch size is lower than BatchLimit the actual number of processed events can be\n\t\/\/ much lower. 0 is interpreted as no limit at all (default: no limit)\n\tEventsPerMinute uint\n\t\/\/ The amount of uncommitted events Nakadi will stream before pausing the stream. When in paused\n\t\/\/ state and commit comes - the stream will resume. If MaxUncommittedEvents is lower than BatchLimit,\n\t\/\/ effective batch size will be upperbound by MaxUncommittedEvents. (default: 10, minimum: 1)\n\tMaxUncommittedEvents uint\n\t\/\/ The initial (minimal) retry interval used for the exponential backoff. This value is applied for\n\t\/\/ stream initialization as well as for cursor commits.\n\tInitialRetryInterval time.Duration\n\t\/\/ MaxRetryInterval the maximum retry interval. Once the exponential backoff reaches this value\n\t\/\/ the retry intervals remain constant. This value is applied for stream initialization as well as\n\t\/\/ for cursor commits.\n\tMaxRetryInterval time.Duration\n\t\/\/ CommitMaxElapsedTime is the maximum time spent on retries when committing a cursor. Once this value\n\t\/\/ was reached the exponential backoff is halted and the cursor will not be committed.\n\tCommitMaxElapsedTime time.Duration\n\t\/\/ NotifyErr is called when an error occurs that leads to a retry. This notify function can be used to\n\t\/\/ detect unhealthy streams. The first parameter indicates the stream No that encountered the error.\n\tNotifyErr func(uint, error, time.Duration)\n\t\/\/ NotifyOK is called whenever a successful operation was completed. This notify function can be used\n\t\/\/ to detect that a stream is healthy again. The first parameter indicates the stream No that just\n\t\/\/ regained health.\n\tNotifyOK func(uint)\n}\n\nfunc (o *ProcessorOptions) withDefaults() *ProcessorOptions {\n\tvar copyOptions ProcessorOptions\n\tif o != nil {\n\t\tcopyOptions = *o\n\t}\n\tif copyOptions.BatchLimit == 0 {\n\t\tcopyOptions.BatchLimit = 1\n\t}\n\tif copyOptions.FlushTimeout == 0 {\n\t\tcopyOptions.FlushTimeout = 30\n\t}\n\tif copyOptions.StreamCount == 0 {\n\t\tcopyOptions.StreamCount = 1\n\t}\n\tif copyOptions.EventsPerMinute == 0 {\n\t\tcopyOptions.EventsPerMinute = 0\n\t}\n\tif copyOptions.InitialRetryInterval == 0 {\n\t\tcopyOptions.InitialRetryInterval = defaultInitialRetryInterval\n\t}\n\tif copyOptions.MaxRetryInterval == 0 {\n\t\tcopyOptions.MaxRetryInterval = defaultMaxRetryInterval\n\t}\n\tif copyOptions.CommitMaxElapsedTime == 0 {\n\t\tcopyOptions.CommitMaxElapsedTime = defaultMaxElapsedTime\n\t}\n\tif copyOptions.NotifyErr == nil {\n\t\tcopyOptions.NotifyErr = func(_ uint, _ error, _ time.Duration) {}\n\t}\n\tif copyOptions.NotifyOK == nil {\n\t\tcopyOptions.NotifyOK = func(_ uint) {}\n\t}\n\tif copyOptions.MaxUncommittedEvents == 0 {\n\t\tcopyOptions.MaxUncommittedEvents = 10\n\t}\n\treturn ©Options\n}\n\n\/\/ streamAPI is a contract that is used internally in order to be able to mock StreamAPI\ntype streamAPI interface {\n\tNextEvents() (Cursor, []byte, error)\n\tCommitCursor(cursor Cursor) error\n\tClose() error\n}\n\n\/\/ NewProcessor creates a new processor for a given subscription ID. The constructor receives a\n\/\/ configured Nakadi client as first parameter. Furthermore a valid subscription ID must be\n\/\/ provided. The last parameter is a struct containing only optional parameters. The options may be\n\/\/ nil, in this case the processor falls back to the defaults defined in the ProcessorOptions.\nfunc NewProcessor(client *Client, subscriptionID string, options *ProcessorOptions) *Processor {\n\toptions = options.withDefaults()\n\n\tvar timePerBatchPerStream int64\n\tif options.EventsPerMinute > 0 {\n\t\ttimePerBatchPerStream = int64(time.Minute) \/ int64(options.EventsPerMinute) * int64(options.StreamCount) * int64(options.BatchLimit)\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tprocessor := &Processor{\n\t\tclient: client,\n\t\tsubscriptionID: subscriptionID,\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tnewStream: func(client *Client, id string, options *StreamOptions) streamAPI {\n\t\t\treturn NewStream(client, id, options)\n\t\t},\n\t\ttimePerBatchPerStream: time.Duration(timePerBatchPerStream),\n\t\tcloseErrorCh: make(chan error)}\n\n\tfor i := uint(0); i < options.StreamCount; i++ {\n\t\tstreamNo := i\n\t\tstreamOptions := StreamOptions{\n\t\t\tBatchLimit: options.BatchLimit,\n\t\t\tFlushTimeout: options.FlushTimeout,\n\t\t\tMaxUncommittedEvents: options.MaxUncommittedEvents,\n\t\t\tInitialRetryInterval: options.InitialRetryInterval,\n\t\t\tMaxRetryInterval: options.MaxRetryInterval,\n\t\t\tCommitMaxElapsedTime: options.CommitMaxElapsedTime,\n\t\t\tNotifyErr: func(err error, duration time.Duration) { options.NotifyErr(streamNo, err, duration) },\n\t\t\tNotifyOK: func() { options.NotifyOK(streamNo) },\n\t\t}\n\t\tprocessor.streamOptions = append(processor.streamOptions, streamOptions)\n\t}\n\n\treturn processor\n}\n\n\/\/ A Processor for nakadi events. The Processor is a high level API for consuming events from\n\/\/ Nakadi. It can process event batches from multiple partitions (streams) and can be configured\n\/\/ with a certain event rate, that limits the number of processed events per minute. The cursors of\n\/\/ event batches that were successfully processed are automatically committed.\ntype Processor struct {\n\tsync.Mutex\n\tclient *Client\n\tsubscriptionID string\n\tstreamOptions []StreamOptions\n\tnewStream func(*Client, string, *StreamOptions) streamAPI\n\ttimePerBatchPerStream time.Duration\n\tisStarted bool\n\tctx context.Context\n\tcancel context.CancelFunc\n\tcloseErrorCh chan error\n}\n\n\/\/ Operation defines a certain procedure that consumes the event data from a processor. An operation,\n\/\/ can either be successful or may fail with an error. Operation receives three parameters: the stream\n\/\/ number or position in the processor, the Nakadi stream id of the underlying stream, and the json\n\/\/ encoded event payload.\ntype Operation func(int, string, []byte) error\n\n\/\/ Start begins event processing. All event batches received from the underlying streams are passed to\n\/\/ the operation function. If the operation function terminates without error the respective cursor will\n\/\/ be automatically committed to Nakadi. If the operations terminates with an error, the underlying stream\n\/\/ will be halted and a new stream will continue to pass event batches to the operation function.\n\/\/\n\/\/ Event processing will go on indefinitely unless the processor is stopped via its Stop method. Star will\n\/\/ return an error if the processor is already running.\nfunc (p *Processor) Start(operation Operation) error {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tif p.isStarted {\n\t\treturn errors.New(\"processor was already started\")\n\t}\n\tp.isStarted = true\n\n\tfor streamNo, options := range p.streamOptions {\n\t\tgo p.startSingleStream(operation, streamNo, options)\n\t}\n\n\treturn nil\n}\n\n\/\/ startSingleStream starts a single stream with a given stream number \/ position. After the stream has been\n\/\/ started it consumes events. In cases of errors the stream is closed and a new stream will be opened.\nfunc (p *Processor) startSingleStream(operation Operation, streamNo int, options StreamOptions) {\n\tstream := p.newStream(p.client, p.subscriptionID, &options)\n\n\tif p.timePerBatchPerStream > 0 {\n\t\tinitialWait := rand.Int63n(int64(p.timePerBatchPerStream))\n\t\tselect {\n\t\tcase <-p.ctx.Done():\n\t\t\tp.closeErrorCh <- stream.Close()\n\t\t\treturn\n\t\tcase <-time.After(time.Duration(initialWait)):\n\t\t\t\/\/ nothing\n\t\t}\n\t}\n\n\tfor {\n\t\tstart := time.Now()\n\n\t\tselect {\n\t\tcase <-p.ctx.Done():\n\t\t\tp.closeErrorCh <- stream.Close()\n\t\t\treturn\n\t\tdefault:\n\t\t\tcursor, events, err := stream.NextEvents()\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\terr = operation(streamNo, cursor.NakadiStreamID, events)\n\t\t\tif err != nil {\n\t\t\t\terr = stream.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\toptions.NotifyErr(err, 0)\n\t\t\t\t}\n\t\t\t\tstream = p.newStream(p.client, p.subscriptionID, &options)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tstream.CommitCursor(cursor)\n\t\t}\n\n\t\telapsed := time.Since(start)\n\t\tif elapsed < p.timePerBatchPerStream {\n\t\t\tselect {\n\t\t\tcase <-p.ctx.Done():\n\t\t\t\tp.closeErrorCh <- stream.Close()\n\t\t\t\treturn\n\t\t\tcase <-time.After(p.timePerBatchPerStream - elapsed):\n\t\t\t\t\/\/ nothing\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Stop halts all steams and terminates event processing. Stop will return with an error if the processor\n\/\/ is not running.\nfunc (p *Processor) Stop() error {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tif !p.isStarted {\n\t\treturn errors.New(\"processor is not running\")\n\t}\n\n\tp.cancel()\n\n\tvar errCount int\n\tfor i := 0; i < len(p.streamOptions); i++ {\n\t\terr := <-p.closeErrorCh\n\t\tif err != nil {\n\t\t\terrCount++\n\t\t}\n\t}\n\tif errCount > 0 {\n\t\treturn errors.Errorf(\"%d streams had errors while closing the stream\", errCount)\n\t}\n\n\treturn nil\n}\n<commit_msg>issue-46 fixed calls of NotifyErr callback<commit_after>package nakadi\n\nimport (\n\t\"context\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ ProcessorOptions contains optional parameters that are used to create a Processor.\ntype ProcessorOptions struct {\n\t\/\/ The maximum number of Events in each chunk (and therefore per partition) of the stream (default: 1)\n\tBatchLimit uint\n\t\/\/ Maximum time in seconds to wait for the flushing of each chunk (per partition).(default: 30)\n\tFlushTimeout uint\n\t\/\/ The number of parallel streams the Processor will use to consume events (default: 1)\n\tStreamCount uint\n\t\/\/ Limits the number of events that the processor will handle per minute. This value represents an\n\t\/\/ upper bound, if some streams are not healthy e.g. if StreamCount exceeds the number of partitions,\n\t\/\/ or the actual batch size is lower than BatchLimit the actual number of processed events can be\n\t\/\/ much lower. 0 is interpreted as no limit at all (default: no limit)\n\tEventsPerMinute uint\n\t\/\/ The amount of uncommitted events Nakadi will stream before pausing the stream. When in paused\n\t\/\/ state and commit comes - the stream will resume. If MaxUncommittedEvents is lower than BatchLimit,\n\t\/\/ effective batch size will be upperbound by MaxUncommittedEvents. (default: 10, minimum: 1)\n\tMaxUncommittedEvents uint\n\t\/\/ The initial (minimal) retry interval used for the exponential backoff. This value is applied for\n\t\/\/ stream initialization as well as for cursor commits.\n\tInitialRetryInterval time.Duration\n\t\/\/ MaxRetryInterval the maximum retry interval. Once the exponential backoff reaches this value\n\t\/\/ the retry intervals remain constant. This value is applied for stream initialization as well as\n\t\/\/ for cursor commits.\n\tMaxRetryInterval time.Duration\n\t\/\/ CommitMaxElapsedTime is the maximum time spent on retries when committing a cursor. Once this value\n\t\/\/ was reached the exponential backoff is halted and the cursor will not be committed.\n\tCommitMaxElapsedTime time.Duration\n\t\/\/ NotifyErr is called when an error occurs that leads to a retry. This notify function can be used to\n\t\/\/ detect unhealthy streams. The first parameter indicates the stream No that encountered the error.\n\tNotifyErr func(uint, error, time.Duration)\n\t\/\/ NotifyOK is called whenever a successful operation was completed. This notify function can be used\n\t\/\/ to detect that a stream is healthy again. The first parameter indicates the stream No that just\n\t\/\/ regained health.\n\tNotifyOK func(uint)\n}\n\nfunc (o *ProcessorOptions) withDefaults() *ProcessorOptions {\n\tvar copyOptions ProcessorOptions\n\tif o != nil {\n\t\tcopyOptions = *o\n\t}\n\tif copyOptions.BatchLimit == 0 {\n\t\tcopyOptions.BatchLimit = 1\n\t}\n\tif copyOptions.FlushTimeout == 0 {\n\t\tcopyOptions.FlushTimeout = 30\n\t}\n\tif copyOptions.StreamCount == 0 {\n\t\tcopyOptions.StreamCount = 1\n\t}\n\tif copyOptions.EventsPerMinute == 0 {\n\t\tcopyOptions.EventsPerMinute = 0\n\t}\n\tif copyOptions.InitialRetryInterval == 0 {\n\t\tcopyOptions.InitialRetryInterval = defaultInitialRetryInterval\n\t}\n\tif copyOptions.MaxRetryInterval == 0 {\n\t\tcopyOptions.MaxRetryInterval = defaultMaxRetryInterval\n\t}\n\tif copyOptions.CommitMaxElapsedTime == 0 {\n\t\tcopyOptions.CommitMaxElapsedTime = defaultMaxElapsedTime\n\t}\n\tif copyOptions.NotifyErr == nil {\n\t\tcopyOptions.NotifyErr = func(_ uint, _ error, _ time.Duration) {}\n\t}\n\tif copyOptions.NotifyOK == nil {\n\t\tcopyOptions.NotifyOK = func(_ uint) {}\n\t}\n\tif copyOptions.MaxUncommittedEvents == 0 {\n\t\tcopyOptions.MaxUncommittedEvents = 10\n\t}\n\treturn ©Options\n}\n\n\/\/ streamAPI is a contract that is used internally in order to be able to mock StreamAPI\ntype streamAPI interface {\n\tNextEvents() (Cursor, []byte, error)\n\tCommitCursor(cursor Cursor) error\n\tClose() error\n}\n\n\/\/ NewProcessor creates a new processor for a given subscription ID. The constructor receives a\n\/\/ configured Nakadi client as first parameter. Furthermore a valid subscription ID must be\n\/\/ provided. The last parameter is a struct containing only optional parameters. The options may be\n\/\/ nil, in this case the processor falls back to the defaults defined in the ProcessorOptions.\nfunc NewProcessor(client *Client, subscriptionID string, options *ProcessorOptions) *Processor {\n\toptions = options.withDefaults()\n\n\tvar timePerBatchPerStream int64\n\tif options.EventsPerMinute > 0 {\n\t\ttimePerBatchPerStream = int64(time.Minute) \/ int64(options.EventsPerMinute) * int64(options.StreamCount) * int64(options.BatchLimit)\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tprocessor := &Processor{\n\t\tclient: client,\n\t\tsubscriptionID: subscriptionID,\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tnewStream: func(client *Client, id string, options *StreamOptions) streamAPI {\n\t\t\treturn NewStream(client, id, options)\n\t\t},\n\t\ttimePerBatchPerStream: time.Duration(timePerBatchPerStream),\n\t\tcloseErrorCh: make(chan error)}\n\n\tfor i := uint(0); i < options.StreamCount; i++ {\n\t\tstreamNo := i\n\t\tstreamOptions := StreamOptions{\n\t\t\tBatchLimit: options.BatchLimit,\n\t\t\tFlushTimeout: options.FlushTimeout,\n\t\t\tMaxUncommittedEvents: options.MaxUncommittedEvents,\n\t\t\tInitialRetryInterval: options.InitialRetryInterval,\n\t\t\tMaxRetryInterval: options.MaxRetryInterval,\n\t\t\tCommitMaxElapsedTime: options.CommitMaxElapsedTime,\n\t\t\tNotifyErr: func(err error, duration time.Duration) { options.NotifyErr(streamNo, err, duration) },\n\t\t\tNotifyOK: func() { options.NotifyOK(streamNo) },\n\t\t}\n\t\tprocessor.streamOptions = append(processor.streamOptions, streamOptions)\n\t}\n\n\treturn processor\n}\n\n\/\/ A Processor for nakadi events. The Processor is a high level API for consuming events from\n\/\/ Nakadi. It can process event batches from multiple partitions (streams) and can be configured\n\/\/ with a certain event rate, that limits the number of processed events per minute. The cursors of\n\/\/ event batches that were successfully processed are automatically committed.\ntype Processor struct {\n\tsync.Mutex\n\tclient *Client\n\tsubscriptionID string\n\tstreamOptions []StreamOptions\n\tnewStream func(*Client, string, *StreamOptions) streamAPI\n\ttimePerBatchPerStream time.Duration\n\tisStarted bool\n\tctx context.Context\n\tcancel context.CancelFunc\n\tcloseErrorCh chan error\n}\n\n\/\/ Operation defines a certain procedure that consumes the event data from a processor. An operation,\n\/\/ can either be successful or may fail with an error. Operation receives three parameters: the stream\n\/\/ number or position in the processor, the Nakadi stream id of the underlying stream, and the json\n\/\/ encoded event payload.\ntype Operation func(int, string, []byte) error\n\n\/\/ Start begins event processing. All event batches received from the underlying streams are passed to\n\/\/ the operation function. If the operation function terminates without error the respective cursor will\n\/\/ be automatically committed to Nakadi. If the operations terminates with an error, the underlying stream\n\/\/ will be halted and a new stream will continue to pass event batches to the operation function.\n\/\/\n\/\/ Event processing will go on indefinitely unless the processor is stopped via its Stop method. Star will\n\/\/ return an error if the processor is already running.\nfunc (p *Processor) Start(operation Operation) error {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tif p.isStarted {\n\t\treturn errors.New(\"processor was already started\")\n\t}\n\tp.isStarted = true\n\n\tfor streamNo, options := range p.streamOptions {\n\t\tgo p.startSingleStream(operation, streamNo, options)\n\t}\n\n\treturn nil\n}\n\n\/\/ startSingleStream starts a single stream with a given stream number \/ position. After the stream has been\n\/\/ started it consumes events. In cases of errors the stream is closed and a new stream will be opened.\nfunc (p *Processor) startSingleStream(operation Operation, streamNo int, options StreamOptions) {\n\tstream := p.newStream(p.client, p.subscriptionID, &options)\n\n\tif p.timePerBatchPerStream > 0 {\n\t\tinitialWait := rand.Int63n(int64(p.timePerBatchPerStream))\n\t\tselect {\n\t\tcase <-p.ctx.Done():\n\t\t\tp.closeErrorCh <- stream.Close()\n\t\t\treturn\n\t\tcase <-time.After(time.Duration(initialWait)):\n\t\t\t\/\/ nothing\n\t\t}\n\t}\n\n\tfor {\n\t\tstart := time.Now()\n\n\t\tselect {\n\t\tcase <-p.ctx.Done():\n\t\t\tp.closeErrorCh <- stream.Close()\n\t\t\treturn\n\t\tdefault:\n\t\t\tcursor, events, err := stream.NextEvents()\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\terr = operation(streamNo, cursor.NakadiStreamID, events)\n\t\t\tif err != nil {\n\t\t\t\toptions.NotifyErr(err, 0)\n\t\t\t\tstream.Close()\n\t\t\t\tstream = p.newStream(p.client, p.subscriptionID, &options)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\terr = stream.CommitCursor(cursor)\n\t\t\tif err != nil {\n\t\t\t\toptions.NotifyErr(err, 0)\n\t\t\t\tstream.Close()\n\t\t\t}\n\t\t}\n\n\t\telapsed := time.Since(start)\n\t\tif elapsed < p.timePerBatchPerStream {\n\t\t\tselect {\n\t\t\tcase <-p.ctx.Done():\n\t\t\t\tp.closeErrorCh <- stream.Close()\n\t\t\t\treturn\n\t\t\tcase <-time.After(p.timePerBatchPerStream - elapsed):\n\t\t\t\t\/\/ nothing\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Stop halts all steams and terminates event processing. Stop will return with an error if the processor\n\/\/ is not running.\nfunc (p *Processor) Stop() error {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tif !p.isStarted {\n\t\treturn errors.New(\"processor is not running\")\n\t}\n\n\tp.cancel()\n\n\tvar errCount int\n\tfor i := 0; i < len(p.streamOptions); i++ {\n\t\terr := <-p.closeErrorCh\n\t\tif err != nil {\n\t\t\terrCount++\n\t\t}\n\t}\n\tif errCount > 0 {\n\t\treturn errors.Errorf(\"%d streams had errors while closing the stream\", errCount)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package services\n\nimport (\n\tjcontext \"context\"\n\t\"hash\/crc32\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/JREAMLU\/core\/com\"\n\t\"github.com\/JREAMLU\/core\/db\/mysql\"\n\t\"github.com\/JREAMLU\/core\/global\"\n\tio \"github.com\/JREAMLU\/core\/inout\"\n\t\"github.com\/JREAMLU\/jkernel\/base\/models\/mentity\"\n\t\"github.com\/JREAMLU\/jkernel\/base\/models\/mmysql\"\n\t\"github.com\/JREAMLU\/jkernel\/base\/models\/mredis\"\n\t\"github.com\/JREAMLU\/jkernel\/base\/services\/atom\"\n\t\"github.com\/JREAMLU\/jkernel\/base\/services\/entity\"\n\t\"github.com\/astaxie\/beego\"\n\t\"github.com\/astaxie\/beego\/validation\"\n\t\"github.com\/beego\/i18n\"\n\t\"github.com\/pquerna\/ffjson\/ffjson\"\n)\n\n\/\/ type Url struct {\n\/\/ \tMeta struct {\n\/\/ \t\tAuth string\n\/\/ \t} `json:\"meta\" valid:\"Required\"`\n\/\/ \tData struct {\n\/\/ \t\tUrls []struct {\n\/\/ \t\t\tLongURL string `json:\"long_url\" valid:\"Required\"`\n\/\/ \t\t\tIP string `json:\"ip\" valid:\"IP\"`\n\/\/ \t\t} `json:\"urls\" valid:\"Required\"`\n\/\/ \t} `json:\"data\" valid:\"Required\"`\n\/\/ }\n\ntype Url struct {\n\tMeta struct {\n\t\tAuth string\n\t} `json:\"meta\" valid:\"Required\"`\n\tData struct {\n\t\tUrls []interface{} `json:\"urls\" valid:\"Required\"`\n\t\tIP string `json:\"ip\" valid:\"IP\"`\n\t} `json:\"data\" valid:\"Required\"`\n}\n\ntype UrlExpand struct {\n\tShorten []string `json:\"shorten\" valid:\"Required\"`\n}\n\nvar ip string\n\nconst (\n\tDELETE = 0\n\tNORMAL = 1\n)\n\nfunc GetParams(url Url) Url {\n\treturn url\n}\n\nfunc (r *Url) Valid(v *validation.Validation) {}\n\nfunc (r *Url) GoShorten(jctx jcontext.Context, data map[string]interface{}) (httpStatus int, output io.Output) {\n\tffjson.Unmarshal(data[\"body\"].([]byte), r)\n\tip = data[\"headermap\"].(http.Header)[\"X-Forwarded-For\"][0]\n\tch, err := io.InputParamsCheck(jctx, data, &r.Data)\n\tif err != nil {\n\t\treturn http.StatusExpectationFailed, io.Fail(ch.Message, \"DATAPARAMSILLEGAL\", jctx.Value(\"requestID\").(string))\n\t}\n\n\tif len(r.Data.Urls) > 10 {\n\t\treturn http.StatusExpectationFailed, io.Fail(i18n.Tr(global.Lang, \"url.NUMBERLIMIT\"), \"DATAPARAMSILLEGAL\", jctx.Value(\"requestID\").(string))\n\t}\n\n\tlist, err := shorten(r)\n\tif err != nil {\n\t\tbeego.Info(jctx.Value(\"requestID\").(string), \":\", \"shorten error: \", err)\n\t\treturn http.StatusExpectationFailed, io.Fail(i18n.Tr(global.Lang, \"url.SHORTENILLEGAL\"), \"DATAPARAMSILLEGAL\", jctx.Value(\"requestID\").(string))\n\t}\n\n\tvar datalist entity.DataList\n\tdatalist.List = list\n\tdatalist.Total = len(list)\n\n\treturn http.StatusCreated, io.Suc(datalist, ch.RequestID)\n}\n\nfunc shorten(r *Url) (map[string]interface{}, error) {\n\tlist, err := setDB(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn list, nil\n}\n\nfunc setDB(r *Url) (map[string]interface{}, error) {\n\tvar shortenMap = make(map[string]interface{})\n\treply, err := mredis.ShortenHMGet(r.Data.Urls)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\texist, notExistLongCRCList, notExistMapList := splitExistOrNot(r, reply)\n\tif len(notExistLongCRCList) == 0 && len(notExistMapList) == 0 {\n\t\treturn exist, nil\n\t}\n\n\texistShortListInDB, err := mmysql.GetShortens(notExistLongCRCList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\texistQueue, existQueueShortenList, existQueueExpandList, notExistMapList := getAllData(existShortListInDB, notExistMapList)\n\tif len(existQueue) == 0 {\n\t\treturn exist, nil\n\t}\n\n\ttx := mysql.X.Begin()\n\tif len(notExistMapList) > 0 {\n\t\terr = mmysql.ShortenInBatch(notExistMapList, tx)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(existQueueShortenList) > 0 {\n\t\t_, err = mredis.ShortenHMSet(existQueueShortenList)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(existQueueExpandList) > 0 {\n\t\t_, err = mredis.ExpandHMSet(existQueueExpandList)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\ttx.Commit()\n\n\tshortenMap = com.MapMerge(exist, existQueue)\n\n\treturn shortenMap, nil\n}\n\nfunc splitExistOrNot(r *Url, reply []string) (map[string]interface{}, []uint64, []mentity.Redirect) {\n\texist := make(map[string]interface{})\n\tvar notExistLongCRCList []uint64\n\tvar notExistMapList []mentity.Redirect\n\tvar redirect mentity.Redirect\n\tfor key, url := range r.Data.Urls {\n\t\tatom.Mu.Lock()\n\t\tif reply[key] != \"\" {\n\t\t\texist[url.(string)] = reply[key]\n\t\t} else {\n\t\t\tlongCrc := uint64(crc32.ChecksumIEEE([]byte(url.(string))))\n\t\t\tshortUrl := atom.GetShortenUrl(url.(string))\n\t\t\tshortCrc := uint64(crc32.ChecksumIEEE([]byte(shortUrl)))\n\t\t\tnotExistLongCRCList = append(notExistLongCRCList, longCrc)\n\t\t\tredirect.LongUrl = url.(string)\n\t\t\tredirect.ShortUrl = shortUrl\n\t\t\tredirect.LongCrc = longCrc\n\t\t\tredirect.ShortCrc = shortCrc\n\t\t\tredirect.Status = NORMAL\n\t\t\tredirect.CreatedByIP = uint64(com.Ip2Int(ip))\n\t\t\tredirect.UpdateByIP = uint64(com.Ip2Int(ip))\n\t\t\tredirect.CreateAT = uint64(time.Now().Unix())\n\t\t\tredirect.UpdateAT = uint64(time.Now().Unix())\n\t\t\tnotExistMapList = append(notExistMapList, redirect)\n\t\t}\n\t\tatom.Mu.Unlock()\n\t}\n\treturn exist, notExistLongCRCList, notExistMapList\n}\n\nfunc getAllData(existShortListInDB []mentity.Redirect, notExistMapList []mentity.Redirect) (map[string]interface{}, []interface{}, []interface{}, []mentity.Redirect) {\n\texistQueue := make(map[string]interface{})\n\tvar existQueueShortenList []interface{}\n\tvar existQueueExpandList []interface{}\n\tfor _, existShortListInDBVal := range existShortListInDB {\n\t\texistQueue[existShortListInDBVal.LongUrl] = existShortListInDBVal.ShortUrl\n\t\texistQueueShortenList = append(existQueueShortenList, existShortListInDBVal.LongUrl)\n\t\texistQueueShortenList = append(existQueueShortenList, existShortListInDBVal.ShortUrl)\n\t\texistQueueExpandList = append(existQueueExpandList, existShortListInDBVal.ShortUrl)\n\t\texistQueueExpandList = append(existQueueExpandList, existShortListInDBVal.LongUrl)\n\t\tfor k, notExistMapListVal := range notExistMapList {\n\t\t\tif existShortListInDBVal.LongUrl == notExistMapListVal.LongUrl {\n\t\t\t\tnotExistMapList = append(notExistMapList[:k], notExistMapList[k+1:]...)\n\t\t\t}\n\t\t}\n\t}\n\tfor _, notExistMapListVal := range notExistMapList {\n\t\texistQueue[notExistMapListVal.LongUrl] = notExistMapListVal.ShortUrl\n\t\texistQueueShortenList = append(existQueueShortenList, notExistMapListVal.LongUrl)\n\t\texistQueueShortenList = append(existQueueShortenList, notExistMapListVal.ShortUrl)\n\t\texistQueueExpandList = append(existQueueExpandList, notExistMapListVal.ShortUrl)\n\t\texistQueueExpandList = append(existQueueExpandList, notExistMapListVal.LongUrl)\n\t}\n\treturn existQueue, existQueueShortenList, existQueueExpandList, notExistMapList\n}\n\nfunc (r *Url) GoExpand(jctx jcontext.Context, data map[string]interface{}) (httpStatus int, output io.Output) {\n\tvar ue UrlExpand\n\tffjson.Unmarshal([]byte(data[\"querystrjson\"].(string)), &ue)\n\n\tch, err := io.InputParamsCheck(jctx, data, ue)\n\tif err != nil {\n\t\treturn http.StatusExpectationFailed, io.Fail(ch.Message, \"DATAPARAMSILLEGAL\", jctx.Value(\"requestID\").(string))\n\t}\n\n\tlist := expand(jctx, &ue)\n\n\tvar datalist entity.DataList\n\tdatalist.List = list\n\tdatalist.Total = len(list)\n\n\treturn http.StatusCreated, io.Suc(datalist, ch.RequestID)\n}\n\nfunc expand(jctx jcontext.Context, ue *UrlExpand) map[string]interface{} {\n\tlist := make(map[string]interface{})\n\tshortens := ue.Shorten[0]\n\tfor _, shorten := range strings.Split(shortens, \",\") {\n\t\treply, err := mredis.ExpandHGet(shorten)\n\t\tif err != nil {\n\t\t\tbeego.Info(jctx.Value(\"requestID\").(string), \":\", \"expand error: \", err)\n\t\t}\n\t\tatom.Mu.Lock()\n\t\tlist[shorten] = reply\n\t\tatom.Mu.Unlock()\n\t}\n\treturn list\n}\n<commit_msg>del note<commit_after>package services\n\nimport (\n\tjcontext \"context\"\n\t\"hash\/crc32\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/JREAMLU\/core\/com\"\n\t\"github.com\/JREAMLU\/core\/db\/mysql\"\n\t\"github.com\/JREAMLU\/core\/global\"\n\tio \"github.com\/JREAMLU\/core\/inout\"\n\t\"github.com\/JREAMLU\/jkernel\/base\/models\/mentity\"\n\t\"github.com\/JREAMLU\/jkernel\/base\/models\/mmysql\"\n\t\"github.com\/JREAMLU\/jkernel\/base\/models\/mredis\"\n\t\"github.com\/JREAMLU\/jkernel\/base\/services\/atom\"\n\t\"github.com\/JREAMLU\/jkernel\/base\/services\/entity\"\n\t\"github.com\/astaxie\/beego\"\n\t\"github.com\/astaxie\/beego\/validation\"\n\t\"github.com\/beego\/i18n\"\n\t\"github.com\/pquerna\/ffjson\/ffjson\"\n)\n\ntype Url struct {\n\tMeta struct {\n\t\tAuth string\n\t} `json:\"meta\" valid:\"Required\"`\n\tData struct {\n\t\tUrls []interface{} `json:\"urls\" valid:\"Required\"`\n\t\tIP string `json:\"ip\" valid:\"IP\"`\n\t} `json:\"data\" valid:\"Required\"`\n}\n\ntype UrlExpand struct {\n\tShorten []string `json:\"shorten\" valid:\"Required\"`\n}\n\nvar ip string\n\nconst (\n\tDELETE = 0\n\tNORMAL = 1\n)\n\nfunc GetParams(url Url) Url {\n\treturn url\n}\n\nfunc (r *Url) Valid(v *validation.Validation) {}\n\nfunc (r *Url) GoShorten(jctx jcontext.Context, data map[string]interface{}) (httpStatus int, output io.Output) {\n\tffjson.Unmarshal(data[\"body\"].([]byte), r)\n\tip = data[\"headermap\"].(http.Header)[\"X-Forwarded-For\"][0]\n\tch, err := io.InputParamsCheck(jctx, data, &r.Data)\n\tif err != nil {\n\t\treturn http.StatusExpectationFailed, io.Fail(ch.Message, \"DATAPARAMSILLEGAL\", jctx.Value(\"requestID\").(string))\n\t}\n\n\tif len(r.Data.Urls) > 10 {\n\t\treturn http.StatusExpectationFailed, io.Fail(i18n.Tr(global.Lang, \"url.NUMBERLIMIT\"), \"DATAPARAMSILLEGAL\", jctx.Value(\"requestID\").(string))\n\t}\n\n\tlist, err := shorten(r)\n\tif err != nil {\n\t\tbeego.Info(jctx.Value(\"requestID\").(string), \":\", \"shorten error: \", err)\n\t\treturn http.StatusExpectationFailed, io.Fail(i18n.Tr(global.Lang, \"url.SHORTENILLEGAL\"), \"DATAPARAMSILLEGAL\", jctx.Value(\"requestID\").(string))\n\t}\n\n\tvar datalist entity.DataList\n\tdatalist.List = list\n\tdatalist.Total = len(list)\n\n\treturn http.StatusCreated, io.Suc(datalist, ch.RequestID)\n}\n\nfunc shorten(r *Url) (map[string]interface{}, error) {\n\tlist, err := setDB(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn list, nil\n}\n\nfunc setDB(r *Url) (map[string]interface{}, error) {\n\tvar shortenMap = make(map[string]interface{})\n\treply, err := mredis.ShortenHMGet(r.Data.Urls)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\texist, notExistLongCRCList, notExistMapList := splitExistOrNot(r, reply)\n\tif len(notExistLongCRCList) == 0 && len(notExistMapList) == 0 {\n\t\treturn exist, nil\n\t}\n\n\texistShortListInDB, err := mmysql.GetShortens(notExistLongCRCList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\texistQueue, existQueueShortenList, existQueueExpandList, notExistMapList := getAllData(existShortListInDB, notExistMapList)\n\tif len(existQueue) == 0 {\n\t\treturn exist, nil\n\t}\n\n\ttx := mysql.X.Begin()\n\tif len(notExistMapList) > 0 {\n\t\terr = mmysql.ShortenInBatch(notExistMapList, tx)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(existQueueShortenList) > 0 {\n\t\t_, err = mredis.ShortenHMSet(existQueueShortenList)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(existQueueExpandList) > 0 {\n\t\t_, err = mredis.ExpandHMSet(existQueueExpandList)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\ttx.Commit()\n\n\tshortenMap = com.MapMerge(exist, existQueue)\n\n\treturn shortenMap, nil\n}\n\nfunc splitExistOrNot(r *Url, reply []string) (map[string]interface{}, []uint64, []mentity.Redirect) {\n\texist := make(map[string]interface{})\n\tvar notExistLongCRCList []uint64\n\tvar notExistMapList []mentity.Redirect\n\tvar redirect mentity.Redirect\n\tfor key, url := range r.Data.Urls {\n\t\tatom.Mu.Lock()\n\t\tif reply[key] != \"\" {\n\t\t\texist[url.(string)] = reply[key]\n\t\t} else {\n\t\t\tlongCrc := uint64(crc32.ChecksumIEEE([]byte(url.(string))))\n\t\t\tshortUrl := atom.GetShortenUrl(url.(string))\n\t\t\tshortCrc := uint64(crc32.ChecksumIEEE([]byte(shortUrl)))\n\t\t\tnotExistLongCRCList = append(notExistLongCRCList, longCrc)\n\t\t\tredirect.LongUrl = url.(string)\n\t\t\tredirect.ShortUrl = shortUrl\n\t\t\tredirect.LongCrc = longCrc\n\t\t\tredirect.ShortCrc = shortCrc\n\t\t\tredirect.Status = NORMAL\n\t\t\tredirect.CreatedByIP = uint64(com.Ip2Int(ip))\n\t\t\tredirect.UpdateByIP = uint64(com.Ip2Int(ip))\n\t\t\tredirect.CreateAT = uint64(time.Now().Unix())\n\t\t\tredirect.UpdateAT = uint64(time.Now().Unix())\n\t\t\tnotExistMapList = append(notExistMapList, redirect)\n\t\t}\n\t\tatom.Mu.Unlock()\n\t}\n\treturn exist, notExistLongCRCList, notExistMapList\n}\n\nfunc getAllData(existShortListInDB []mentity.Redirect, notExistMapList []mentity.Redirect) (map[string]interface{}, []interface{}, []interface{}, []mentity.Redirect) {\n\texistQueue := make(map[string]interface{})\n\tvar existQueueShortenList []interface{}\n\tvar existQueueExpandList []interface{}\n\tfor _, existShortListInDBVal := range existShortListInDB {\n\t\texistQueue[existShortListInDBVal.LongUrl] = existShortListInDBVal.ShortUrl\n\t\texistQueueShortenList = append(existQueueShortenList, existShortListInDBVal.LongUrl)\n\t\texistQueueShortenList = append(existQueueShortenList, existShortListInDBVal.ShortUrl)\n\t\texistQueueExpandList = append(existQueueExpandList, existShortListInDBVal.ShortUrl)\n\t\texistQueueExpandList = append(existQueueExpandList, existShortListInDBVal.LongUrl)\n\t\tfor k, notExistMapListVal := range notExistMapList {\n\t\t\tif existShortListInDBVal.LongUrl == notExistMapListVal.LongUrl {\n\t\t\t\tnotExistMapList = append(notExistMapList[:k], notExistMapList[k+1:]...)\n\t\t\t}\n\t\t}\n\t}\n\tfor _, notExistMapListVal := range notExistMapList {\n\t\texistQueue[notExistMapListVal.LongUrl] = notExistMapListVal.ShortUrl\n\t\texistQueueShortenList = append(existQueueShortenList, notExistMapListVal.LongUrl)\n\t\texistQueueShortenList = append(existQueueShortenList, notExistMapListVal.ShortUrl)\n\t\texistQueueExpandList = append(existQueueExpandList, notExistMapListVal.ShortUrl)\n\t\texistQueueExpandList = append(existQueueExpandList, notExistMapListVal.LongUrl)\n\t}\n\treturn existQueue, existQueueShortenList, existQueueExpandList, notExistMapList\n}\n\nfunc (r *Url) GoExpand(jctx jcontext.Context, data map[string]interface{}) (httpStatus int, output io.Output) {\n\tvar ue UrlExpand\n\tffjson.Unmarshal([]byte(data[\"querystrjson\"].(string)), &ue)\n\n\tch, err := io.InputParamsCheck(jctx, data, ue)\n\tif err != nil {\n\t\treturn http.StatusExpectationFailed, io.Fail(ch.Message, \"DATAPARAMSILLEGAL\", jctx.Value(\"requestID\").(string))\n\t}\n\n\tlist := expand(jctx, &ue)\n\n\tvar datalist entity.DataList\n\tdatalist.List = list\n\tdatalist.Total = len(list)\n\n\treturn http.StatusCreated, io.Suc(datalist, ch.RequestID)\n}\n\nfunc expand(jctx jcontext.Context, ue *UrlExpand) map[string]interface{} {\n\tlist := make(map[string]interface{})\n\tshortens := ue.Shorten[0]\n\tfor _, shorten := range strings.Split(shortens, \",\") {\n\t\treply, err := mredis.ExpandHGet(shorten)\n\t\tif err != nil {\n\t\t\tbeego.Info(jctx.Value(\"requestID\").(string), \":\", \"expand error: \", err)\n\t\t}\n\t\tatom.Mu.Lock()\n\t\tlist[shorten] = reply\n\t\tatom.Mu.Unlock()\n\t}\n\treturn list\n}\n<|endoftext|>"} {"text":"<commit_before>package middleware\n\nimport (\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/webx-top\/echo\"\n)\n\ntype (\n\t\/\/ RewriteConfig defines the config for Rewrite middleware.\n\tRewriteConfig struct {\n\t\t\/\/ Skipper defines a function to skip middleware.\n\t\tSkipper echo.Skipper `json:\"-\"`\n\n\t\t\/\/ Rules defines the URL path rewrite rules. The values captured in asterisk can be\n\t\t\/\/ retrieved by index e.g. $1, $2 and so on.\n\t\t\/\/ Example:\n\t\t\/\/ \"\/old\": \"\/new\",\n\t\t\/\/ \"\/api\/*\": \"\/$1\",\n\t\t\/\/ \"\/js\/*\": \"\/public\/javascripts\/$1\",\n\t\t\/\/ \"\/users\/*\/orders\/*\": \"\/user\/$1\/order\/$2\",\n\t\t\/\/ Required.\n\t\tRules map[string]string `json:\"rules\"`\n\n\t\taddresses map[string]*regexp.Regexp \/\/\"\/old\": *regexp.Regexp\n\t\trulesRegex map[*regexp.Regexp]string \/\/*regexp.Regexp: \"\/new\"\n\t}\n)\n\n\/\/ Init Initialize\nfunc (c *RewriteConfig) Init() *RewriteConfig {\n\tc.rulesRegex = map[*regexp.Regexp]string{}\n\n\tfor k, v := range c.Rules {\n\t\tc.Set(k, v)\n\t}\n\treturn c\n}\n\n\/\/ Set rule\nfunc (c *RewriteConfig) Set(urlPath, newPath string) *RewriteConfig {\n\tre, ok := c.addresses[urlPath]\n\tif ok {\n\t\tdelete(c.rulesRegex, re)\n\t\tr := strings.Replace(urlPath, \"*\", \"(\\\\S*)\", -1)\n\t\tre = regexp.MustCompile(r)\n\t\tc.rulesRegex[re] = newPath\n\t\tc.addresses[urlPath] = re\n\t} else {\n\t\tc.Add(urlPath, newPath)\n\t}\n\treturn c\n}\n\n\/\/ Delete rule\nfunc (c *RewriteConfig) Delete(urlPath string) *RewriteConfig {\n\tre, ok := c.addresses[urlPath]\n\tif ok {\n\t\tdelete(c.rulesRegex, re)\n\t\tdelete(c.addresses, urlPath)\n\t}\n\treturn c\n}\n\n\/\/ Add rule\nfunc (c *RewriteConfig) Add(urlPath, newPath string) *RewriteConfig {\n\tr := strings.Replace(urlPath, \"*\", \"(\\\\S*)\", -1)\n\tre := regexp.MustCompile(r)\n\tc.rulesRegex[re] = newPath\n\tc.addresses[urlPath] = re\n\treturn c\n}\n\n\/\/ Rewrite url\nfunc (c *RewriteConfig) Rewrite(urlPath string) string {\n\tfor k, v := range c.rulesRegex {\n\t\treplacer := echo.CaptureTokens(k, urlPath)\n\t\tif replacer != nil {\n\t\t\turlPath = replacer.Replace(v)\n\t\t}\n\t}\n\treturn urlPath\n}\n\nvar (\n\t\/\/ DefaultRewriteConfig is the default Rewrite middleware config.\n\tDefaultRewriteConfig = RewriteConfig{\n\t\tSkipper: echo.DefaultSkipper,\n\t}\n)\n\n\/\/ Rewrite returns a Rewrite middleware.\n\/\/\n\/\/ Rewrite middleware rewrites the URL path based on the provided rules.\nfunc Rewrite(rules map[string]string) echo.MiddlewareFuncd {\n\tc := DefaultRewriteConfig\n\tc.Rules = rules\n\treturn RewriteWithConfig(c)\n}\n\n\/\/ RewriteWithConfig returns a Rewrite middleware with config.\n\/\/ See: `Rewrite()`.\nfunc RewriteWithConfig(config RewriteConfig) echo.MiddlewareFuncd {\n\t\/\/ Defaults\n\tif config.Rules == nil {\n\t\tpanic(\"echo: rewrite middleware requires url path rewrite rules\")\n\t}\n\tif config.Skipper == nil {\n\t\tconfig.Skipper = DefaultRewriteConfig.Skipper\n\t}\n\tconfig.Init()\n\treturn func(next echo.Handler) echo.HandlerFunc {\n\t\treturn func(c echo.Context) (err error) {\n\t\t\tif config.Skipper(c) {\n\t\t\t\treturn next.Handle(c)\n\t\t\t}\n\n\t\t\treq := c.Request()\n\t\t\treq.URL().SetPath(config.Rewrite(req.URL().Path()))\n\t\t\treturn next.Handle(c)\n\t\t}\n\t}\n}\n<commit_msg>improved<commit_after>package middleware\n\nimport (\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/webx-top\/echo\"\n)\n\ntype (\n\t\/\/ RewriteConfig defines the config for Rewrite middleware.\n\tRewriteConfig struct {\n\t\t\/\/ Skipper defines a function to skip middleware.\n\t\tSkipper echo.Skipper `json:\"-\"`\n\n\t\t\/\/ Rules defines the URL path rewrite rules. The values captured in asterisk can be\n\t\t\/\/ retrieved by index e.g. $1, $2 and so on.\n\t\t\/\/ Example:\n\t\t\/\/ \"\/old\": \"\/new\",\n\t\t\/\/ \"\/api\/*\": \"\/$1\",\n\t\t\/\/ \"\/js\/*\": \"\/public\/javascripts\/$1\",\n\t\t\/\/ \"\/users\/*\/orders\/*\": \"\/user\/$1\/order\/$2\",\n\t\t\/\/ Required.\n\t\tRules map[string]string `json:\"rules\"`\n\n\t\taddresses map[string]*regexp.Regexp \/\/\"\/old\": *regexp.Regexp\n\t\trulesRegex map[*regexp.Regexp]string \/\/*regexp.Regexp: \"\/new\"\n\t}\n)\n\n\/\/ Init Initialize\nfunc (c *RewriteConfig) Init() *RewriteConfig {\n\tc.rulesRegex = map[*regexp.Regexp]string{}\n\tc.addresses = map[string]*regexp.Regexp{}\n\n\tfor k, v := range c.Rules {\n\t\tc.Set(k, v)\n\t}\n\treturn c\n}\n\n\/\/ Set rule\nfunc (c *RewriteConfig) Set(urlPath, newPath string) *RewriteConfig {\n\tre, ok := c.addresses[urlPath]\n\tif ok {\n\t\tdelete(c.rulesRegex, re)\n\t\tr := strings.Replace(urlPath, \"*\", \"(\\\\S*)\", -1)\n\t\tre = regexp.MustCompile(r)\n\t\tc.rulesRegex[re] = newPath\n\t\tc.addresses[urlPath] = re\n\t} else {\n\t\tc.Add(urlPath, newPath)\n\t}\n\treturn c\n}\n\n\/\/ Delete rule\nfunc (c *RewriteConfig) Delete(urlPath string) *RewriteConfig {\n\tre, ok := c.addresses[urlPath]\n\tif ok {\n\t\tdelete(c.rulesRegex, re)\n\t\tdelete(c.addresses, urlPath)\n\t}\n\treturn c\n}\n\n\/\/ Add rule\nfunc (c *RewriteConfig) Add(urlPath, newPath string) *RewriteConfig {\n\tr := strings.Replace(urlPath, \"*\", \"(\\\\S*)\", -1)\n\tre := regexp.MustCompile(r)\n\tc.rulesRegex[re] = newPath\n\tc.addresses[urlPath] = re\n\treturn c\n}\n\n\/\/ Rewrite url\nfunc (c *RewriteConfig) Rewrite(urlPath string) string {\n\tfor k, v := range c.rulesRegex {\n\t\treplacer := echo.CaptureTokens(k, urlPath)\n\t\tif replacer != nil {\n\t\t\turlPath = replacer.Replace(v)\n\t\t}\n\t}\n\treturn urlPath\n}\n\nvar (\n\t\/\/ DefaultRewriteConfig is the default Rewrite middleware config.\n\tDefaultRewriteConfig = RewriteConfig{\n\t\tSkipper: echo.DefaultSkipper,\n\t}\n)\n\n\/\/ Rewrite returns a Rewrite middleware.\n\/\/\n\/\/ Rewrite middleware rewrites the URL path based on the provided rules.\nfunc Rewrite(rules map[string]string) echo.MiddlewareFuncd {\n\tc := DefaultRewriteConfig\n\tc.Rules = rules\n\treturn RewriteWithConfig(c)\n}\n\n\/\/ RewriteWithConfig returns a Rewrite middleware with config.\n\/\/ See: `Rewrite()`.\nfunc RewriteWithConfig(config RewriteConfig) echo.MiddlewareFuncd {\n\t\/\/ Defaults\n\tif config.Rules == nil {\n\t\tpanic(\"echo: rewrite middleware requires url path rewrite rules\")\n\t}\n\tif config.Skipper == nil {\n\t\tconfig.Skipper = DefaultRewriteConfig.Skipper\n\t}\n\tconfig.Init()\n\treturn func(next echo.Handler) echo.HandlerFunc {\n\t\treturn func(c echo.Context) (err error) {\n\t\t\tif config.Skipper(c) {\n\t\t\t\treturn next.Handle(c)\n\t\t\t}\n\n\t\t\treq := c.Request()\n\t\t\treq.URL().SetPath(config.Rewrite(req.URL().Path()))\n\t\t\treturn next.Handle(c)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package analytic\n\nimport (\n\t\"github.com\/ready-steady\/linear\/matrix\"\n)\n\n\/\/ Compute calculates the temperature profile corresponding to the given power\n\/\/ profile. The sc parameter controls the number of samples that the temperature\n\/\/ profile will contain; if the power profile contains more samples than needed,\n\/\/ it will be truncated accordingly.\nfunc (t *Temperature) Compute(P []float64, sc uint) []float64 {\n\tcc, nc := t.Cores, t.Nodes\n\n\tQ := make([]float64, cc*sc)\n\tS := make([]float64, nc*sc)\n\n\tmatrix.Multiply(t.system.F, P, S, nc, cc, sc)\n\n\tfor i, j, k := uint(1), uint(0), nc; i < sc; i++ {\n\t\tmatrix.MultiplyAdd(t.system.E, S[j:k], S[k:k+nc], S[k:k+nc], nc, nc, 1)\n\t\tj += nc\n\t\tk += nc\n\t}\n\n\tfor i := uint(0); i < cc; i++ {\n\t\tfor j := uint(0); j < sc; j++ {\n\t\t\tQ[cc*j+i] = t.system.D[i]*S[nc*j+i] + t.system.Qamb\n\t\t}\n\t}\n\n\treturn Q\n}\n<commit_msg>A cosmetic change in analytic.Compute<commit_after>package analytic\n\nimport (\n\t\"github.com\/ready-steady\/linear\/matrix\"\n)\n\n\/\/ Compute calculates the temperature profile corresponding to the given power\n\/\/ profile. The sc parameter controls the number of samples that the temperature\n\/\/ profile will contain; if the power profile contains more samples than needed,\n\/\/ it will be truncated accordingly.\nfunc (t *Temperature) Compute(P []float64, sc uint) []float64 {\n\tcc, nc := t.Cores, t.Nodes\n\n\tS := make([]float64, nc*sc)\n\tmatrix.Multiply(t.system.F, P, S, nc, cc, sc)\n\n\tfor i, j, k := uint(1), uint(0), nc; i < sc; i++ {\n\t\tmatrix.MultiplyAdd(t.system.E, S[j:k], S[k:k+nc], S[k:k+nc], nc, nc, 1)\n\t\tj += nc\n\t\tk += nc\n\t}\n\n\tQ := make([]float64, cc*sc)\n\tfor i := uint(0); i < cc; i++ {\n\t\tfor j := uint(0); j < sc; j++ {\n\t\t\tQ[j*cc+i] = t.system.D[i]*S[j*nc+i] + t.system.Qamb\n\t\t}\n\t}\n\n\treturn Q\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Adapters\n\npackage adapter\n\nimport (\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/tsinghua-io\/api-server\/resource\"\n\t\"io\"\n\t\"net\/http\"\n\t\"time\"\n)\n\ntype Adapter interface {\n\tProfile(username string, params map[string]string) (user *resource.User, status int)\n\tAttended(username string, params map[string]string) (courses []*resource.Course, status int)\n\n\tCourseAnnouncements(courseId string, params map[string]string) (announcements []*resource.Announcement, status int)\n\tCourseFiles(courseId string, params map[string]string) (files []*resource.File, status int)\n\tCourseHomework(courseId string, params map[string]string) (homework []*resource.Homework, status int)\n}\n\ntype Parser interface {\n\tParse(reader io.Reader, info interface{}) error\n}\n\n\/\/ FetchInfo fetches info from url using HTTP GET\/POST, and then parse it\n\/\/ using the given parser. A HTTP status code is returned to indicate the\n\/\/ result.\nfunc FetchInfo(client *http.Client, url string, method string, p Parser, info interface{}) (status int) {\n\t\/\/ Fetch data from url.\n\tglog.Infof(\"Fetching data from %s\", url)\n\n\tvar resp *http.Response\n\tvar err error\n\tt_send := time.Now()\n\n\tswitch method {\n\tcase \"GET\":\n\t\tresp, err = client.Get(url)\n\tcase \"POST\":\n\t\tresp, err = client.Post(url, \"application\/x-www-form-urlencoded\", nil)\n\tdefault:\n\t\tglog.Errorf(\"Unknown method to fetch info: %s\", method)\n\t\treturn http.StatusInternalServerError\n\t}\n\tif err != nil {\n\t\tglog.Errorf(\"Unable to fetch info from %s: %s\", url, err)\n\t\treturn http.StatusBadGateway\n\t}\n\tdefer resp.Body.Close()\n\n\tt_receive := time.Now()\n\tglog.Infof(\"Fetched data from %s (%s)\", url, t_receive.Sub(t_send))\n\n\t\/\/ Parse the data.\n\tif err := p.Parse(resp.Body, info); err != nil {\n\t\tglog.Errorf(\"Unable to parse data received from %s: %s\", url, err)\n\t\treturn http.StatusInternalServerError\n\t}\n\n\tglog.Infof(\"Parsed data from %s (%s)\", url, time.Since(t_receive))\n\n\t\/\/ We are safe.\n\treturn http.StatusOK\n}\n<commit_msg>Only record total time in adapter.FetchInfo<commit_after>\/\/ Adapters\n\npackage adapter\n\nimport (\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/tsinghua-io\/api-server\/resource\"\n\t\"io\"\n\t\"net\/http\"\n\t\"time\"\n)\n\ntype Adapter interface {\n\tProfile(username string, params map[string]string) (user *resource.User, status int)\n\tAttended(username string, params map[string]string) (courses []*resource.Course, status int)\n\n\tCourseAnnouncements(courseId string, params map[string]string) (announcements []*resource.Announcement, status int)\n\tCourseFiles(courseId string, params map[string]string) (files []*resource.File, status int)\n\tCourseHomework(courseId string, params map[string]string) (homework []*resource.Homework, status int)\n}\n\ntype Parser interface {\n\tParse(reader io.Reader, info interface{}) error\n}\n\n\/\/ FetchInfo fetches info from url using HTTP GET\/POST, and then parse it\n\/\/ using the given parser. A HTTP status code is returned to indicate the\n\/\/ result.\nfunc FetchInfo(client *http.Client, url string, method string, p Parser, info interface{}) (status int) {\n\t\/\/ Fetch data from url.\n\tglog.Infof(\"Fetching data from %s\", url)\n\n\tvar resp *http.Response\n\tvar err error\n\tt_send := time.Now()\n\n\tswitch method {\n\tcase \"GET\":\n\t\tresp, err = client.Get(url)\n\tcase \"POST\":\n\t\tresp, err = client.Post(url, \"application\/x-www-form-urlencoded\", nil)\n\tdefault:\n\t\tglog.Errorf(\"Unknown method to fetch info: %s\", method)\n\t\treturn http.StatusInternalServerError\n\t}\n\tif err != nil {\n\t\tglog.Errorf(\"Unable to fetch info from %s: %s\", url, err)\n\t\treturn http.StatusBadGateway\n\t}\n\tdefer resp.Body.Close()\n\n\t\/\/ Parse the data.\n\tif err := p.Parse(resp.Body, info); err != nil {\n\t\tglog.Errorf(\"Unable to parse data received from %s: %s\", url, err)\n\t\treturn http.StatusInternalServerError\n\t}\n\n\tglog.Infof(\"Parsed data from %s (%s)\", url, time.Since(t_send))\n\n\t\/\/ We are safe.\n\treturn http.StatusOK\n}\n<|endoftext|>"} {"text":"<commit_before>package stun\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestAgent_Process(t *testing.T) {\n\ta := NewAgent(AgentOptions{})\n\tif err := a.Close(); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestAgent_Start(t *testing.T) {\n\ta := NewAgent(AgentOptions{})\n\tid := NewTransactionID()\n\tdeadline := time.Now().AddDate(0, 0, 1)\n\tif err := a.Start(id, deadline, noopHandler); err != nil {\n\t\tt.Errorf(\"failed to statt transaction: %s\", err)\n\t}\n\tif err := a.Start(id, deadline, noopHandler); err != ErrTransactionExists {\n\t\tt.Errorf(\"duplicate start should return <%s>, got <%s>\",\n\t\t\tErrTransactionExists, err,\n\t\t)\n\t}\n\tif err := a.Close(); err != nil {\n\t\tt.Error(err)\n\t}\n\tid = NewTransactionID()\n\tif err := a.Start(id, deadline, noopHandler); err != ErrAgentClosed {\n\t\tt.Errorf(\"start on closed agent should return <%s>, got <%s>\",\n\t\t\tErrAgentClosed, err,\n\t\t)\n\t}\n}\n\nfunc TestAgent_Stop(t *testing.T) {\n\ta := NewAgent(AgentOptions{})\n\tif err := a.Stop(transactionID{}); err != ErrTransactionNotExists {\n\t\tt.Fatalf(\"unexpected error: %s, should be %s\", err, ErrTransactionNotExists)\n\t}\n\tid := NewTransactionID()\n\tcalled := make(chan AgentEvent, 1)\n\ttimeout := time.Millisecond * 200\n\tif err := a.Start(id, time.Now().Add(timeout), func(e AgentEvent) {\n\t\tcalled <- e\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := a.Stop(id); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tselect {\n\tcase e := <-called:\n\t\tif e.Error != ErrTransactionStopped {\n\t\t\tt.Fatalf(\"unexpected error: %s, should be %s\",\n\t\t\t\te.Error, ErrTransactionStopped,\n\t\t\t)\n\t\t}\n\tcase <-time.After(timeout * 2):\n\t\tt.Fatal(\"timed out\")\n\t}\n\tif err := a.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := a.Stop(transactionID{}); err != ErrAgentClosed {\n\t\tt.Fatalf(\"unexpected error: %s, should be %s\", err, ErrAgentClosed)\n\t}\n}\n\nvar noopHandler = func(e AgentEvent) {}\n\nfunc TestAgent_GC(t *testing.T) {\n\ta := NewAgent(AgentOptions{\n\t\tHandler: noopHandler,\n\t})\n\tshouldTimeOut := func(e AgentEvent) {\n\t\tif e.Error != ErrTransactionTimeOut {\n\t\t\tt.Errorf(\"should time out, but got <%s>\", e.Error)\n\t\t}\n\t}\n\tshouldNotTimeOut := func(e AgentEvent) {\n\t\tif e.Error == ErrTransactionTimeOut {\n\t\t\tt.Error(\"should not time out\")\n\t\t}\n\t}\n\tdeadline := time.Date(2027, time.November, 21,\n\t\t23, 13, 34, 120021,\n\t\ttime.UTC,\n\t)\n\tgcDeadline := deadline.Add(time.Second)\n\tdeadlineNotGC := gcDeadline.AddDate(0, 0, 1)\n\tfor i := 0; i < 5; i++ {\n\t\tif err := a.Start(NewTransactionID(), deadline, shouldTimeOut); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\tfor i := 0; i < 5; i++ {\n\t\tif err := a.Start(NewTransactionID(), deadlineNotGC, shouldNotTimeOut); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\ta.garbageCollect(gcDeadline)\n\tif err := a.Close(); err != nil {\n\t\tt.Error(err)\n\t}\n\t\/\/ Should not panic:\n\ta.garbageCollect(gcDeadline)\n}\n\nfunc BenchmarkAgent_GC(b *testing.B) {\n\ta := NewAgent(AgentOptions{\n\t\tHandler: noopHandler,\n\t})\n\tdeadline := time.Now().AddDate(0, 0, 1)\n\tfor i := 0; i < agentGCInitCap; i++ {\n\t\tif err := a.Start(NewTransactionID(), deadline, noopHandler); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n\tdefer func() {\n\t\tif err := a.Close(); err != nil {\n\t\t\tb.Error(err)\n\t\t}\n\t}()\n\tb.ReportAllocs()\n\tgcDeadline := deadline.Add(-time.Second)\n\tfor i := 0; i < b.N; i++ {\n\t\ta.garbageCollect(gcDeadline)\n\t}\n}\n\nfunc BenchmarkAgent_Process(b *testing.B) {\n\ta := NewAgent(AgentOptions{\n\t\tHandler: noopHandler,\n\t})\n\tdeadline := time.Now().AddDate(0, 0, 1)\n\tfor i := 0; i < 1000; i++ {\n\t\tif err := a.Start(NewTransactionID(), deadline, noopHandler); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n\tdefer func() {\n\t\tif err := a.Close(); err != nil {\n\t\t\tb.Error(err)\n\t\t}\n\t}()\n\tb.ReportAllocs()\n\tev := AgentProcessArgs{\n\t\tMessage: MustBuild(\n\t\t\tTransactionID,\n\t\t),\n\t}\n\tfor i := 0; i < b.N; i++ {\n\t\tif err := a.Process(ev); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n}\n<commit_msg>agent: add tests<commit_after>package stun\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestAgent_ProcessInTransaction(t *testing.T) {\n\tm := New()\n\ta := NewAgent(AgentOptions{\n\t\tHandler: func(e AgentEvent) {\n\t\t\tt.Error(\"should not be called\")\n\t\t},\n\t})\n\tif err := m.NewTransactionID(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := a.Start(m.TransactionID, time.Time{}, func(e AgentEvent) {\n\t\tif e.Error != nil {\n\t\t\tt.Errorf(\"got error: %s\", e.Error)\n\t\t}\n\t\tif !e.Message.Equal(m) {\n\t\t\tt.Errorf(\"%s (got) != %s (expected)\", e.Message, m)\n\t\t}\n\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := a.Process(AgentProcessArgs{\n\t\tMessage: m,\n\t}); err != nil {\n\t\tt.Error(err)\n\t}\n\tif err := a.Close(); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestAgent_Process(t *testing.T) {\n\tm := New()\n\ta := NewAgent(AgentOptions{\n\t\tHandler: func(e AgentEvent) {\n\t\t\tif e.Error != nil {\n\t\t\t\tt.Errorf(\"got error: %s\", e.Error)\n\t\t\t}\n\t\t\tif !e.Message.Equal(m) {\n\t\t\t\tt.Errorf(\"%s (got) != %s (expected)\", e.Message, m)\n\t\t\t}\n\t\t},\n\t})\n\tif err := m.NewTransactionID(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := a.Process(AgentProcessArgs{\n\t\tMessage: m,\n\t}); err != nil {\n\t\tt.Error(err)\n\t}\n\tif err := a.Close(); err != nil {\n\t\tt.Error(err)\n\t}\n\tif err := a.Process(AgentProcessArgs{\n\t\tMessage: m,\n\t}); err != ErrAgentClosed {\n\t\tt.Errorf(\"closed agent should return <%s>, but got <%s>\",\n\t\t\tErrAgentClosed, err,\n\t\t)\n\t}\n}\n\nfunc TestAgent_Start(t *testing.T) {\n\ta := NewAgent(AgentOptions{})\n\tid := NewTransactionID()\n\tdeadline := time.Now().AddDate(0, 0, 1)\n\tif err := a.Start(id, deadline, noopHandler); err != nil {\n\t\tt.Errorf(\"failed to statt transaction: %s\", err)\n\t}\n\tif err := a.Start(id, deadline, noopHandler); err != ErrTransactionExists {\n\t\tt.Errorf(\"duplicate start should return <%s>, got <%s>\",\n\t\t\tErrTransactionExists, err,\n\t\t)\n\t}\n\tif err := a.Close(); err != nil {\n\t\tt.Error(err)\n\t}\n\tid = NewTransactionID()\n\tif err := a.Start(id, deadline, noopHandler); err != ErrAgentClosed {\n\t\tt.Errorf(\"start on closed agent should return <%s>, got <%s>\",\n\t\t\tErrAgentClosed, err,\n\t\t)\n\t}\n}\n\nfunc TestAgent_Stop(t *testing.T) {\n\ta := NewAgent(AgentOptions{})\n\tif err := a.Stop(transactionID{}); err != ErrTransactionNotExists {\n\t\tt.Fatalf(\"unexpected error: %s, should be %s\", err, ErrTransactionNotExists)\n\t}\n\tid := NewTransactionID()\n\tcalled := make(chan AgentEvent, 1)\n\ttimeout := time.Millisecond * 200\n\tif err := a.Start(id, time.Now().Add(timeout), func(e AgentEvent) {\n\t\tcalled <- e\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := a.Stop(id); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tselect {\n\tcase e := <-called:\n\t\tif e.Error != ErrTransactionStopped {\n\t\t\tt.Fatalf(\"unexpected error: %s, should be %s\",\n\t\t\t\te.Error, ErrTransactionStopped,\n\t\t\t)\n\t\t}\n\tcase <-time.After(timeout * 2):\n\t\tt.Fatal(\"timed out\")\n\t}\n\tif err := a.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := a.Stop(transactionID{}); err != ErrAgentClosed {\n\t\tt.Fatalf(\"unexpected error: %s, should be %s\", err, ErrAgentClosed)\n\t}\n}\n\nvar noopHandler = func(e AgentEvent) {}\n\nfunc TestAgent_GC(t *testing.T) {\n\ta := NewAgent(AgentOptions{\n\t\tHandler: noopHandler,\n\t})\n\tshouldTimeOut := func(e AgentEvent) {\n\t\tif e.Error != ErrTransactionTimeOut {\n\t\t\tt.Errorf(\"should time out, but got <%s>\", e.Error)\n\t\t}\n\t}\n\tshouldNotTimeOut := func(e AgentEvent) {\n\t\tif e.Error == ErrTransactionTimeOut {\n\t\t\tt.Error(\"should not time out\")\n\t\t}\n\t}\n\tdeadline := time.Date(2027, time.November, 21,\n\t\t23, 13, 34, 120021,\n\t\ttime.UTC,\n\t)\n\tgcDeadline := deadline.Add(time.Second)\n\tdeadlineNotGC := gcDeadline.AddDate(0, 0, 1)\n\tfor i := 0; i < 5; i++ {\n\t\tif err := a.Start(NewTransactionID(), deadline, shouldTimeOut); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\tfor i := 0; i < 5; i++ {\n\t\tif err := a.Start(NewTransactionID(), deadlineNotGC, shouldNotTimeOut); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\ta.garbageCollect(gcDeadline)\n\tif err := a.Close(); err != nil {\n\t\tt.Error(err)\n\t}\n\t\/\/ Should not panic:\n\ta.garbageCollect(gcDeadline)\n}\n\nfunc BenchmarkAgent_GC(b *testing.B) {\n\ta := NewAgent(AgentOptions{\n\t\tHandler: noopHandler,\n\t})\n\tdeadline := time.Now().AddDate(0, 0, 1)\n\tfor i := 0; i < agentGCInitCap; i++ {\n\t\tif err := a.Start(NewTransactionID(), deadline, noopHandler); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n\tdefer func() {\n\t\tif err := a.Close(); err != nil {\n\t\t\tb.Error(err)\n\t\t}\n\t}()\n\tb.ReportAllocs()\n\tgcDeadline := deadline.Add(-time.Second)\n\tfor i := 0; i < b.N; i++ {\n\t\ta.garbageCollect(gcDeadline)\n\t}\n}\n\nfunc BenchmarkAgent_Process(b *testing.B) {\n\ta := NewAgent(AgentOptions{\n\t\tHandler: noopHandler,\n\t})\n\tdeadline := time.Now().AddDate(0, 0, 1)\n\tfor i := 0; i < 1000; i++ {\n\t\tif err := a.Start(NewTransactionID(), deadline, noopHandler); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n\tdefer func() {\n\t\tif err := a.Close(); err != nil {\n\t\t\tb.Error(err)\n\t\t}\n\t}()\n\tb.ReportAllocs()\n\tev := AgentProcessArgs{\n\t\tMessage: MustBuild(\n\t\t\tTransactionID,\n\t\t),\n\t}\n\tfor i := 0; i < b.N; i++ {\n\t\tif err := a.Process(ev); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package postgis\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\n\t\"encoding\/json\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/gost\/godata\"\n\tgostErrors \"github.com\/gost\/server\/errors\"\n\tgostLog \"github.com\/gost\/server\/log\"\n\t\"github.com\/gost\/server\/sensorthings\/models\"\n\t_ \"github.com\/lib\/pq\" \/\/ postgres driver\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nconst (\n\t\/\/TimeFormat describes the format in which we want our DateTime to display\n\tTimeFormat = \"YYYY-MM-DD\\\"T\\\"HH24:MI:SS.MSZ\"\n)\n\nvar logger *log.Entry\n\n\/\/ GostDatabase implementation\ntype GostDatabase struct {\n\tHost string\n\tPort int\n\tUser string\n\tPassword string\n\tDatabase string\n\tSchema string\n\tSsl bool\n\tMaxIdeConns int\n\tMaxOpenConns int\n\tDb *sql.DB\n\tQueryBuilder *QueryBuilder\n}\n\nfunc setupLogger() {\n\tl, err := gostLog.GetLoggerInstance()\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\n\tlogger = l.WithFields(log.Fields{\"package\": \"gost.server.database.postgis\"})\n}\n\n\/\/ NewDatabase initialises the PostgreSQL database\n\/\/\thost = TCP host:port or Unix socket depending on Network.\n\/\/\tuser = database user\n\/\/\tpassword = database password\n\/\/\tdatabase = name of database\n\/\/\tssl = Whether to use secure TCP\/IP connections (TLS).\nfunc NewDatabase(host string, port int, user string, password string, database string, schema string, ssl bool, maxIdeConns int, maxOpenConns int, maxTop int) models.Database {\n\tsetupLogger()\n\treturn &GostDatabase{\n\t\tHost: host,\n\t\tPort: port,\n\t\tUser: user,\n\t\tPassword: password,\n\t\tDatabase: database,\n\t\tSchema: schema,\n\t\tSsl: ssl,\n\t\tMaxIdeConns: maxIdeConns,\n\t\tMaxOpenConns: maxOpenConns,\n\t\tQueryBuilder: CreateQueryBuilder(schema, maxTop),\n\t}\n}\n\n\/\/ Start the database\nfunc (gdb *GostDatabase) Start() {\n\t\/\/ToDo: implement SSL\n\tlogger.Infof(\"Creating database connection, host: %v, port: %v user: %v, database: %v, schema: %v ssl: %v\", gdb.Host, gdb.Port, gdb.User, gdb.Database, gdb.Schema, gdb.Ssl)\n\tdbInfo := fmt.Sprintf(\"host=%s user=%s password=%s dbname=%s sslmode=disable\", gdb.Host, gdb.User, gdb.Password, gdb.Database)\n\tdb, err := sql.Open(\"postgres\", dbInfo)\n\tdb.SetMaxIdleConns(gdb.MaxIdeConns)\n\tdb.SetMaxOpenConns(gdb.MaxOpenConns)\n\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\n\tgdb.Db = db\n\tlogger.Infof(\"Connected to database\")\n}\n\n\/\/ CreateSchema creates the needed schema in the database\nfunc (gdb *GostDatabase) CreateSchema(location string) error {\n\tcreate, err := GetCreateDatabaseQuery(location, gdb.Schema)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc := *create\n\t_, err2 := gdb.Db.Exec(c)\n\treturn err2\n}\n\n\/\/ GetCreateDatabaseQuery returns the database creation script for PostgreSQL\nfunc GetCreateDatabaseQuery(location string, schema string) (*string, error) {\n\tbytes, err := ioutil.ReadFile(location)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontent := string(bytes[:])\n\tformatted := fmt.Sprintf(content, schema, schema, schema, schema)\n\treturn &formatted, nil\n}\n\n\/\/ ContainsToLower checks a string array, array and given string are set to lower-case\nfunc ContainsToLower(s []*godata.SelectItem, e string) bool {\n\tfor _, a := range s {\n\t\tfor _, b := range a.Segments {\n\t\t\tif strings.ToLower(b.Value) == strings.ToLower(e) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ EntityExists checks if entity exists in database\nfunc EntityExists(gdb *GostDatabase, id interface{}, entityName string) bool {\n\tvar result bool\n\tsql := fmt.Sprintf(\"SELECT exists (SELECT 1 FROM %s.%s WHERE id = $1 LIMIT 1)\", gdb.Schema, entityName)\n\terr := gdb.Db.QueryRow(sql, id).Scan(&result)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn result\n}\n\n\/\/ DeleteEntity deletes a record from database for entity\nfunc DeleteEntity(gdb *GostDatabase, id interface{}, entityName string) error {\n\tintID, ok := ToIntID(id)\n\tif !ok {\n\t\terrorMessage := fmt.Sprintf(\"%s does not exist\", entityName)\n\t\treturn gostErrors.NewRequestNotFound(errors.New(errorMessage))\n\t}\n\n\tr, err := gdb.Db.Exec(fmt.Sprintf(\"DELETE FROM %s.%s WHERE id = $1\", gdb.Schema, entityName), intID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif c, _ := r.RowsAffected(); c == 0 {\n\t\terrorMessage := fmt.Sprintf(\"%s not found\", entityName)\n\t\treturn gostErrors.NewRequestNotFound(errors.New(errorMessage))\n\t}\n\treturn nil\n}\n\n\/\/ JSONToMap converts a string of json into a map\nfunc JSONToMap(data *string) (map[string]interface{}, error) {\n\tvar p map[string]interface{}\n\tif data == nil || len(*data) == 0 {\n\t\treturn p, nil\n\t}\n\n\terr := json.Unmarshal([]byte(*data), &p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn p, nil\n}\n\n\/\/ ToIntID converts an interface to int id used for the id's in the database\nfunc ToIntID(id interface{}) (int, bool) {\n\tswitch t := id.(type) {\n\tcase string:\n\t\tintID, err := strconv.Atoi(t)\n\t\tif err != nil {\n\t\t\treturn 0, false\n\t\t}\n\t\treturn intID, true\n\tcase float64:\n\t\treturn int(t), true\n\t}\n\n\tintID, err := strconv.Atoi(fmt.Sprintf(\"%v\", id))\n\tif err != nil {\n\t\t\/\/ why not return: 0, err\n\t\treturn 0, false\n\t}\n\n\t\/\/ why not return: intID, nil\n\treturn intID, true\n}\n\nfunc (gdb *GostDatabase) updateEntityColumns(table string, updates map[string]interface{}, entityID int) error {\n\tif len(updates) == 0 {\n\t\treturn nil\n\t}\n\n\tcolumns := \"\"\n\tprefix := \"\"\n\tfor k, v := range updates {\n\t\tswitch t := v.(type) {\n\t\tcase string:\n\t\t\t\/\/ do not format when value contains ST_ at the start\n\t\t\tif !strings.HasPrefix(t, \"ST_\") {\n\t\t\t\tv = fmt.Sprintf(\"'%s'\", t)\n\t\t\t}\n\t\t}\n\n\t\tcolumns += fmt.Sprintf(\"%s%s=%v\", prefix, k, v)\n\t\tif prefix != \", \" {\n\t\t\tprefix = \", \"\n\t\t}\n\t}\n\n\tsql := fmt.Sprintf(\"update %s.%s set %s where id = $1\", gdb.Schema, table, columns)\n\t_, err := gdb.Db.Exec(sql, entityID)\n\treturn err\n}\n<commit_msg>Update postgis.go<commit_after>package postgis\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\n\t\"encoding\/json\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/gost\/godata\"\n\tgostErrors \"github.com\/gost\/server\/errors\"\n\tgostLog \"github.com\/gost\/server\/log\"\n\t\"github.com\/gost\/server\/sensorthings\/models\"\n\t_ \"github.com\/lib\/pq\" \/\/ postgres driver\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nconst (\n\t\/\/TimeFormat describes the format in which we want our DateTime to display\n\tTimeFormat = \"YYYY-MM-DD\\\"T\\\"HH24:MI:SS.MSZ\"\n)\n\nvar logger *log.Entry\n\n\/\/ GostDatabase implementation\ntype GostDatabase struct {\n\tHost string\n\tPort int\n\tUser string\n\tPassword string\n\tDatabase string\n\tSchema string\n\tSsl bool\n\tMaxIdeConns int\n\tMaxOpenConns int\n\tDb *sql.DB\n\tQueryBuilder *QueryBuilder\n}\n\nfunc setupLogger() {\n\tl, err := gostLog.GetLoggerInstance()\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\n\tlogger = l.WithFields(log.Fields{\"package\": \"gost.server.database.postgis\"})\n}\n\n\/\/ NewDatabase initialises the PostgreSQL database\n\/\/\thost = TCP host:port or Unix socket depending on Network.\n\/\/\tuser = database user\n\/\/\tpassword = database password\n\/\/\tdatabase = name of database\n\/\/\tssl = Whether to use secure TCP\/IP connections (TLS).\nfunc NewDatabase(host string, port int, user string, password string, database string, schema string, ssl bool, maxIdeConns int, maxOpenConns int, maxTop int) models.Database {\n\tsetupLogger()\n\treturn &GostDatabase{\n\t\tHost: host,\n\t\tPort: port,\n\t\tUser: user,\n\t\tPassword: password,\n\t\tDatabase: database,\n\t\tSchema: schema,\n\t\tSsl: ssl,\n\t\tMaxIdeConns: maxIdeConns,\n\t\tMaxOpenConns: maxOpenConns,\n\t\tQueryBuilder: CreateQueryBuilder(schema, maxTop),\n\t}\n}\n\n\/\/ Start the database\nfunc (gdb *GostDatabase) Start() {\n\tssl := \"disable\"\n\tif gdb.Ssl {\n\t\tssl = \"require\"\n\t}\n\n\tlogger.Infof(\"Creating database connection, host: %v, port: %v user: %v, database: %v, schema: %v ssl: %v\", gdb.Host, gdb.Port, gdb.User, gdb.Database, gdb.Schema, gdb.Ssl)\n\tdbInfo := fmt.Sprintf(\"host=%s user=%s password=%s dbname=%s sslmode=%s\", gdb.Host, gdb.User, gdb.Password, gdb.Database, ssl)\n\tdb, err := sql.Open(\"postgres\", dbInfo)\n\tdb.SetMaxIdleConns(gdb.MaxIdeConns)\n\tdb.SetMaxOpenConns(gdb.MaxOpenConns)\n\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\n\tgdb.Db = db\n\tlogger.Infof(\"Connected to database\")\n}\n\n\/\/ CreateSchema creates the needed schema in the database\nfunc (gdb *GostDatabase) CreateSchema(location string) error {\n\tcreate, err := GetCreateDatabaseQuery(location, gdb.Schema)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc := *create\n\t_, err2 := gdb.Db.Exec(c)\n\treturn err2\n}\n\n\/\/ GetCreateDatabaseQuery returns the database creation script for PostgreSQL\nfunc GetCreateDatabaseQuery(location string, schema string) (*string, error) {\n\tbytes, err := ioutil.ReadFile(location)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontent := string(bytes[:])\n\tformatted := fmt.Sprintf(content, schema, schema, schema, schema)\n\treturn &formatted, nil\n}\n\n\/\/ ContainsToLower checks a string array, array and given string are set to lower-case\nfunc ContainsToLower(s []*godata.SelectItem, e string) bool {\n\tfor _, a := range s {\n\t\tfor _, b := range a.Segments {\n\t\t\tif strings.ToLower(b.Value) == strings.ToLower(e) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ EntityExists checks if entity exists in database\nfunc EntityExists(gdb *GostDatabase, id interface{}, entityName string) bool {\n\tvar result bool\n\tsql := fmt.Sprintf(\"SELECT exists (SELECT 1 FROM %s.%s WHERE id = $1 LIMIT 1)\", gdb.Schema, entityName)\n\terr := gdb.Db.QueryRow(sql, id).Scan(&result)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn result\n}\n\n\/\/ DeleteEntity deletes a record from database for entity\nfunc DeleteEntity(gdb *GostDatabase, id interface{}, entityName string) error {\n\tintID, ok := ToIntID(id)\n\tif !ok {\n\t\terrorMessage := fmt.Sprintf(\"%s does not exist\", entityName)\n\t\treturn gostErrors.NewRequestNotFound(errors.New(errorMessage))\n\t}\n\n\tr, err := gdb.Db.Exec(fmt.Sprintf(\"DELETE FROM %s.%s WHERE id = $1\", gdb.Schema, entityName), intID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif c, _ := r.RowsAffected(); c == 0 {\n\t\terrorMessage := fmt.Sprintf(\"%s not found\", entityName)\n\t\treturn gostErrors.NewRequestNotFound(errors.New(errorMessage))\n\t}\n\treturn nil\n}\n\n\/\/ JSONToMap converts a string of json into a map\nfunc JSONToMap(data *string) (map[string]interface{}, error) {\n\tvar p map[string]interface{}\n\tif data == nil || len(*data) == 0 {\n\t\treturn p, nil\n\t}\n\n\terr := json.Unmarshal([]byte(*data), &p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn p, nil\n}\n\n\/\/ ToIntID converts an interface to int id used for the id's in the database\nfunc ToIntID(id interface{}) (int, bool) {\n\tswitch t := id.(type) {\n\tcase string:\n\t\tintID, err := strconv.Atoi(t)\n\t\tif err != nil {\n\t\t\treturn 0, false\n\t\t}\n\t\treturn intID, true\n\tcase float64:\n\t\treturn int(t), true\n\t}\n\n\tintID, err := strconv.Atoi(fmt.Sprintf(\"%v\", id))\n\tif err != nil {\n\t\t\/\/ why not return: 0, err\n\t\treturn 0, false\n\t}\n\n\t\/\/ why not return: intID, nil\n\treturn intID, true\n}\n\nfunc (gdb *GostDatabase) updateEntityColumns(table string, updates map[string]interface{}, entityID int) error {\n\tif len(updates) == 0 {\n\t\treturn nil\n\t}\n\n\tcolumns := \"\"\n\tprefix := \"\"\n\tfor k, v := range updates {\n\t\tswitch t := v.(type) {\n\t\tcase string:\n\t\t\t\/\/ do not format when value contains ST_ at the start\n\t\t\tif !strings.HasPrefix(t, \"ST_\") {\n\t\t\t\tv = fmt.Sprintf(\"'%s'\", t)\n\t\t\t}\n\t\t}\n\n\t\tcolumns += fmt.Sprintf(\"%s%s=%v\", prefix, k, v)\n\t\tif prefix != \", \" {\n\t\t\tprefix = \", \"\n\t\t}\n\t}\n\n\tsql := fmt.Sprintf(\"update %s.%s set %s where id = $1\", gdb.Schema, table, columns)\n\t_, err := gdb.Db.Exec(sql, entityID)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package lib\n\nimport (\n\t\"github.com\/subutai-io\/Subutai\/agent\/config\"\n\t\"github.com\/subutai-io\/Subutai\/agent\/lib\/container\"\n\t\"github.com\/subutai-io\/Subutai\/agent\/lib\/gpg\"\n\t\"github.com\/subutai-io\/Subutai\/agent\/log\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n)\n\nfunc LxcClone(parent, child, envId, addr, token string) {\n\tif !container.IsTemplate(parent) {\n\t\tLxcImport(parent, \"\", token)\n\t}\n\tif container.IsContainer(child) {\n\t\tlog.Error(\"Container \" + child + \" already exist\")\n\t}\n\n\tcontainer.Clone(parent, child)\n\tgpg.GenerateKey(child)\n\n\tif len(token) != 0 {\n\t\tgpg.ExchageAndEncrypt(child, token)\n\t}\n\n\tif len(envId) != 0 {\n\t\tsetEnvironmentId(child, envId)\n\t}\n\n\tif len(addr) != 0 {\n\t\taddNetConf(child, addr)\n\t}\n\n\tcontainer.SetContainerUid(child)\n\tsetDns(child)\n\tLxcStart(child)\n\n\tcontainer.AptUpdate(child)\n\t\/\/ container.Start(child)\n\tlog.Info(child + \" with ID \" + gpg.GetFingerprint(child) + \" successfully cloned \")\n\n}\n\nfunc setEnvironmentId(name, envId string) {\n\terr := os.MkdirAll(config.Agent.LxcPrefix+name+\"\/rootfs\/etc\/subutai\", 755)\n\tlog.Check(log.FatalLevel, \"Creating etc\/subutai directory\", err)\n\n\tconfig, err := os.Create(config.Agent.LxcPrefix + name + \"\/rootfs\/etc\/subutai\/lxc-config\")\n\tlog.Check(log.FatalLevel, \"Creating lxc-config file\", err)\n\tdefer config.Close()\n\n\t_, err = config.WriteString(\"[Subutai-Agent]\\n\" + envId + \"\\n\")\n\tlog.Check(log.FatalLevel, \"Writing environment id to config\", err)\n\n\tconfig.Sync()\n}\n\nfunc setDns(name string) {\n\tdns := container.GetConfigItem(config.Agent.LxcPrefix+name+\"\/config\", \"lxc.network.ipv4.gateway\")\n\tif len(dns) == 0 {\n\t\tdns = \"10.10.0.254\"\n\t}\n\n\tresolv := []byte(\"domain\\tintra.lan\\nsearch\\tintra.lan\\nnameserver\\t\" + dns)\n\tlog.Check(log.ErrorLevel, \"Writing resolv.conf\",\n\t\tioutil.WriteFile(config.Agent.LxcPrefix+name+\"\/rootfs\/etc\/resolvconf\/resolv.conf.d\/original\", resolv, 0644))\n\n\tresolv2 := []byte(\"domain\\tintra.lan\\nsearch\\tintra.lan\\nnameserver\\t\" + dns)\n\tlog.Check(log.ErrorLevel, \"Writing resolv.conf\",\n\t\tioutil.WriteFile(config.Agent.LxcPrefix+name+\"\/rootfs\/etc\/resolv.conf\", resolv2, 0644))\n}\n\nfunc setStaticNetwork(name string) {\n\tdata, err := ioutil.ReadFile(config.Agent.LxcPrefix + name + \"\/rootfs\/etc\/network\/interfaces\")\n\tlog.Check(log.WarnLevel, \"Opening \/etc\/network\/interfaces\", err)\n\n\terr = ioutil.WriteFile(config.Agent.LxcPrefix+name+\"\/rootfs\/etc\/network\/interfaces\",\n\t\t[]byte(strings.Replace(string(data), \"dhcp\", \"manual\", 1)), 0644)\n\tlog.Check(log.WarnLevel, \"Setting internal eth0 interface to manual\", err)\n}\n\nfunc addNetConf(name, addr string) {\n\tipvlan := strings.Fields(addr)\n\t_, network, _ := net.ParseCIDR(ipvlan[0])\n\tgw := []byte(network.IP)\n\tgw[3]++\n\tcontainer.SetContainerConf(name, [][]string{\n\t\t{\"lxc.network.ipv4\", ipvlan[0]},\n\t\t{\"lxc.network.ipv4.gateway\", net.IP(gw).String()},\n\t\t{\"lxc.network.mtu\", \"\"},\n\t\t{\"#vlan_id\", ipvlan[1]},\n\t})\n\tsetStaticNetwork(name)\n}\n<commit_msg>Fixed #154. Now clone operation finishing normally<commit_after>package lib\n\nimport (\n\t\"github.com\/subutai-io\/Subutai\/agent\/config\"\n\t\"github.com\/subutai-io\/Subutai\/agent\/lib\/container\"\n\t\"github.com\/subutai-io\/Subutai\/agent\/lib\/gpg\"\n\t\"github.com\/subutai-io\/Subutai\/agent\/log\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n)\n\nfunc LxcClone(parent, child, envId, addr, token string) {\n\tif !container.IsTemplate(parent) {\n\t\tLxcImport(parent, \"\", token)\n\t}\n\tif container.IsContainer(child) {\n\t\tlog.Error(\"Container \" + child + \" already exist\")\n\t}\n\n\tcontainer.Clone(parent, child)\n\tgpg.GenerateKey(child)\n\n\tif len(token) != 0 {\n\t\tgpg.ExchageAndEncrypt(child, token)\n\t}\n\n\tif len(envId) != 0 {\n\t\tsetEnvironmentId(child, envId)\n\t}\n\n\tif len(addr) != 0 {\n\t\taddNetConf(child, addr)\n\t}\n\n\tcontainer.SetContainerUid(child)\n\tsetDns(child)\n\tLxcStart(child)\n\n\tcontainer.AptUpdate(child)\n\t\/\/ container.Start(child)\n\tlog.Info(child + \" with ID \" + gpg.GetFingerprint(child) + \" successfully cloned \")\n\n}\n\nfunc setEnvironmentId(name, envId string) {\n\terr := os.MkdirAll(config.Agent.LxcPrefix+name+\"\/rootfs\/etc\/subutai\", 755)\n\tlog.Check(log.FatalLevel, \"Creating etc\/subutai directory\", err)\n\n\tconfig, err := os.Create(config.Agent.LxcPrefix + name + \"\/rootfs\/etc\/subutai\/lxc-config\")\n\tlog.Check(log.FatalLevel, \"Creating lxc-config file\", err)\n\tdefer config.Close()\n\n\t_, err = config.WriteString(\"[Subutai-Agent]\\n\" + envId + \"\\n\")\n\tlog.Check(log.FatalLevel, \"Writing environment id to config\", err)\n\n\tconfig.Sync()\n}\n\nfunc setDns(name string) {\n\tdns := container.GetConfigItem(config.Agent.LxcPrefix+name+\"\/config\", \"lxc.network.ipv4.gateway\")\n\tif len(dns) == 0 {\n\t\tdns = \"10.10.0.254\"\n\t}\n\n\tresolv := []byte(\"domain\\tintra.lan\\nsearch\\tintra.lan\\nnameserver\\t\" + dns)\n\tlog.Check(log.DebugLevel, \"Writing resolv.conf\",\n\t\tioutil.WriteFile(config.Agent.LxcPrefix+name+\"\/rootfs\/etc\/resolvconf\/resolv.conf.d\/original\", resolv, 0644))\n\n\tresolv2 := []byte(\"domain\\tintra.lan\\nsearch\\tintra.lan\\nnameserver\\t\" + dns)\n\tlog.Check(log.DebugLevel, \"Writing resolv.conf\",\n\t\tioutil.WriteFile(config.Agent.LxcPrefix+name+\"\/rootfs\/etc\/resolv.conf\", resolv2, 0644))\n}\n\nfunc setStaticNetwork(name string) {\n\tdata, err := ioutil.ReadFile(config.Agent.LxcPrefix + name + \"\/rootfs\/etc\/network\/interfaces\")\n\tlog.Check(log.WarnLevel, \"Opening \/etc\/network\/interfaces\", err)\n\n\terr = ioutil.WriteFile(config.Agent.LxcPrefix+name+\"\/rootfs\/etc\/network\/interfaces\",\n\t\t[]byte(strings.Replace(string(data), \"dhcp\", \"manual\", 1)), 0644)\n\tlog.Check(log.WarnLevel, \"Setting internal eth0 interface to manual\", err)\n}\n\nfunc addNetConf(name, addr string) {\n\tipvlan := strings.Fields(addr)\n\t_, network, _ := net.ParseCIDR(ipvlan[0])\n\tgw := []byte(network.IP)\n\tgw[3]++\n\tcontainer.SetContainerConf(name, [][]string{\n\t\t{\"lxc.network.ipv4\", ipvlan[0]},\n\t\t{\"lxc.network.ipv4.gateway\", net.IP(gw).String()},\n\t\t{\"lxc.network.mtu\", \"\"},\n\t\t{\"#vlan_id\", ipvlan[1]},\n\t})\n\tsetStaticNetwork(name)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/* This test check that setHostnameAsFQDN PodSpec field works as\n * expected.\n *\/\n\npackage node\n\nimport (\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"math\/big\"\n\n\tv1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\timageutils \"k8s.io\/kubernetes\/test\/utils\/image\"\n\n\t\"github.com\/onsi\/ginkgo\"\n)\n\nfunc generatePodName(base string) string {\n\tid, err := rand.Int(rand.Reader, big.NewInt(214748))\n\tif err != nil {\n\t\treturn base\n\t}\n\treturn fmt.Sprintf(\"%s-%d\", base, id)\n}\n\nfunc testPod() *v1.Pod {\n\tpodName := generatePodName(\"hostfqdn\")\n\tpod := &v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: podName,\n\t\t\tLabels: map[string]string{\"name\": podName},\n\t\t\tAnnotations: map[string]string{},\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tContainers: []v1.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"test-container\",\n\t\t\t\t\tImage: imageutils.GetE2EImage(imageutils.BusyBox),\n\t\t\t\t},\n\t\t\t},\n\t\t\tRestartPolicy: v1.RestartPolicyNever,\n\t\t},\n\t}\n\n\treturn pod\n}\n\nvar _ = SIGDescribe(\"Hostname of Pod [Feature:SetHostnameAsFQDN]\", func() {\n\tf := framework.NewDefaultFramework(\"hostfqdn\")\n\n\t\/*\n\t Release: v1.19\n\t Testname: Create Pod without fully qualified domain name (FQDN)\n\t Description: A Pod that does not define the subdomain field in it spec, does not have FQDN.\n\t*\/\n\tginkgo.It(\"a pod without subdomain field does not have FQDN [Feature:SetHostnameAsFQDN]\", func() {\n\t\tpod := testPod()\n\t\tpod.Spec.Containers[0].Command = []string{\"sh\", \"-c\", \"echo $(hostname)';'$(hostname -f)';'\"}\n\t\toutput := []string{fmt.Sprintf(\"%s;%s;\", pod.ObjectMeta.Name, pod.ObjectMeta.Name)}\n\t\t\/\/ Create Pod\n\t\tf.TestContainerOutput(\"shotname only\", pod, 0, output)\n\t})\n\n\t\/*\n\t Release: v1.19\n\t Testname: Create Pod without FQDN, setHostnameAsFQDN field set to true\n\t Description: A Pod that does not define the subdomain field in it spec, does not have FQDN.\n\t Hence, SetHostnameAsFQDN feature has no effect.\n\t*\/\n\tginkgo.It(\"a pod without FQDN is not affected by SetHostnameAsFQDN field [Feature:SetHostnameAsFQDN]\", func() {\n\t\tpod := testPod()\n\t\t\/\/ Setting setHostnameAsFQDN field to true should have no effect.\n\t\tsetHostnameAsFQDN := true\n\t\tpod.Spec.SetHostnameAsFQDN = &setHostnameAsFQDN\n\t\tpod.Spec.Containers[0].Command = []string{\"sh\", \"-c\", \"echo $(hostname)';'$(hostname -f)';'\"}\n\t\toutput := []string{fmt.Sprintf(\"%s;%s;\", pod.ObjectMeta.Name, pod.ObjectMeta.Name)}\n\t\t\/\/ Create Pod\n\t\tf.TestContainerOutput(\"shotname only\", pod, 0, output)\n\t})\n\n\t\/*\n\t Release: v1.19\n\t Testname: Create Pod with FQDN, setHostnameAsFQDN field not defined.\n\t Description: A Pod that defines the subdomain field in it spec has FQDN.\n\t hostname command returns shortname (pod name in this case), and hostname -f returns FQDN.\n\t*\/\n\tginkgo.It(\"a pod with subdomain field has FQDN, hostname is shortname [Feature:SetHostnameAsFQDN]\", func() {\n\t\tpod := testPod()\n\t\tpod.Spec.Containers[0].Command = []string{\"sh\", \"-c\", \"echo $(hostname)';'$(hostname -f)';'\"}\n\t\tsubdomain := \"t\"\n\t\t\/\/ Set PodSpec subdomain field to generate FQDN for pod\n\t\tpod.Spec.Subdomain = subdomain\n\t\t\/\/ Expected Pod FQDN\n\t\thostFQDN := fmt.Sprintf(\"%s.%s.%s.svc.%s\", pod.ObjectMeta.Name, subdomain, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)\n\t\toutput := []string{fmt.Sprintf(\"%s;%s;\", pod.ObjectMeta.Name, hostFQDN)}\n\t\t\/\/ Create Pod\n\t\tf.TestContainerOutput(\"shotname and fqdn\", pod, 0, output)\n\t})\n\n\t\/*\n\t Release: v1.19\n\t Testname: Create Pod with FQDN, setHostnameAsFQDN field set to true.\n\t Description: A Pod that defines the subdomain field in it spec has FQDN. When setHostnameAsFQDN: true, the\n\t hostname is set to be the FQDN. In this case, both commands hostname and hostname -f return the FQDN of the Pod.\n\t*\/\n\tginkgo.It(\"a pod with subdomain field has FQDN, when setHostnameAsFQDN is set to true, the FQDN is set as hostname [Feature:SetHostnameAsFQDN]\", func() {\n\t\tpod := testPod()\n\t\tpod.Spec.Containers[0].Command = []string{\"sh\", \"-c\", \"echo $(hostname)';'$(hostname -f)';'\"}\n\t\tsubdomain := \"t\"\n\t\t\/\/ Set PodSpec subdomain field to generate FQDN for pod\n\t\tpod.Spec.Subdomain = subdomain\n\t\t\/\/ Set PodSpec setHostnameAsFQDN to set FQDN as hostname\n\t\tsetHostnameAsFQDN := true\n\t\tpod.Spec.SetHostnameAsFQDN = &setHostnameAsFQDN\n\t\t\/\/ Expected Pod FQDN\n\t\thostFQDN := fmt.Sprintf(\"%s.%s.%s.svc.%s\", pod.ObjectMeta.Name, subdomain, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)\n\t\t\/\/ Fail if FQDN is longer than 64 characters, otherwise the Pod will remain pending until test timeout.\n\t\t\/\/ In Linux, 64 characters is the limit of the hostname kernel field, which this test sets to the pod FQDN.\n\t\tframework.ExpectEqual(len(hostFQDN) < 65, true, fmt.Sprintf(\"The FQDN of the Pod cannot be longer than 64 characters, requested %s which is %d characters long.\", hostFQDN, len(hostFQDN)))\n\t\toutput := []string{fmt.Sprintf(\"%s;%s;\", hostFQDN, hostFQDN)}\n\t\t\/\/ Create Pod\n\t\tf.TestContainerOutput(\"fqdn and fqdn\", pod, 0, output)\n\t})\n\n})\n<commit_msg>Adding label NodeAlphaFeature to include tests in Node Testgrid<commit_after>\/*\nCopyright 2020 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/* This test check that setHostnameAsFQDN PodSpec field works as\n * expected.\n *\/\n\npackage node\n\nimport (\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"math\/big\"\n\n\tv1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\timageutils \"k8s.io\/kubernetes\/test\/utils\/image\"\n\n\t\"github.com\/onsi\/ginkgo\"\n)\n\nfunc generatePodName(base string) string {\n\tid, err := rand.Int(rand.Reader, big.NewInt(214748))\n\tif err != nil {\n\t\treturn base\n\t}\n\treturn fmt.Sprintf(\"%s-%d\", base, id)\n}\n\nfunc testPod() *v1.Pod {\n\tpodName := generatePodName(\"hostfqdn\")\n\tpod := &v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: podName,\n\t\t\tLabels: map[string]string{\"name\": podName},\n\t\t\tAnnotations: map[string]string{},\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tContainers: []v1.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"test-container\",\n\t\t\t\t\tImage: imageutils.GetE2EImage(imageutils.BusyBox),\n\t\t\t\t},\n\t\t\t},\n\t\t\tRestartPolicy: v1.RestartPolicyNever,\n\t\t},\n\t}\n\n\treturn pod\n}\n\nvar _ = SIGDescribe(\"Hostname of Pod [Feature:SetHostnameAsFQDN][NodeAlphaFeature:SetHostnameAsFQDN]\", func() {\n\tf := framework.NewDefaultFramework(\"hostfqdn\")\n\n\t\/*\n\t Release: v1.19\n\t Testname: Create Pod without fully qualified domain name (FQDN)\n\t Description: A Pod that does not define the subdomain field in it spec, does not have FQDN.\n\t*\/\n\tginkgo.It(\"a pod without subdomain field does not have FQDN\", func() {\n\t\tpod := testPod()\n\t\tpod.Spec.Containers[0].Command = []string{\"sh\", \"-c\", \"echo $(hostname)';'$(hostname -f)';'\"}\n\t\toutput := []string{fmt.Sprintf(\"%s;%s;\", pod.ObjectMeta.Name, pod.ObjectMeta.Name)}\n\t\t\/\/ Create Pod\n\t\tf.TestContainerOutput(\"shotname only\", pod, 0, output)\n\t})\n\n\t\/*\n\t Release: v1.19\n\t Testname: Create Pod without FQDN, setHostnameAsFQDN field set to true\n\t Description: A Pod that does not define the subdomain field in it spec, does not have FQDN.\n\t Hence, SetHostnameAsFQDN feature has no effect.\n\t*\/\n\tginkgo.It(\"a pod without FQDN is not affected by SetHostnameAsFQDN field\", func() {\n\t\tpod := testPod()\n\t\t\/\/ Setting setHostnameAsFQDN field to true should have no effect.\n\t\tsetHostnameAsFQDN := true\n\t\tpod.Spec.SetHostnameAsFQDN = &setHostnameAsFQDN\n\t\tpod.Spec.Containers[0].Command = []string{\"sh\", \"-c\", \"echo $(hostname)';'$(hostname -f)';'\"}\n\t\toutput := []string{fmt.Sprintf(\"%s;%s;\", pod.ObjectMeta.Name, pod.ObjectMeta.Name)}\n\t\t\/\/ Create Pod\n\t\tf.TestContainerOutput(\"shotname only\", pod, 0, output)\n\t})\n\n\t\/*\n\t Release: v1.19\n\t Testname: Create Pod with FQDN, setHostnameAsFQDN field not defined.\n\t Description: A Pod that defines the subdomain field in it spec has FQDN.\n\t hostname command returns shortname (pod name in this case), and hostname -f returns FQDN.\n\t*\/\n\tginkgo.It(\"a pod with subdomain field has FQDN, hostname is shortname\", func() {\n\t\tpod := testPod()\n\t\tpod.Spec.Containers[0].Command = []string{\"sh\", \"-c\", \"echo $(hostname)';'$(hostname -f)';'\"}\n\t\tsubdomain := \"t\"\n\t\t\/\/ Set PodSpec subdomain field to generate FQDN for pod\n\t\tpod.Spec.Subdomain = subdomain\n\t\t\/\/ Expected Pod FQDN\n\t\thostFQDN := fmt.Sprintf(\"%s.%s.%s.svc.%s\", pod.ObjectMeta.Name, subdomain, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)\n\t\toutput := []string{fmt.Sprintf(\"%s;%s;\", pod.ObjectMeta.Name, hostFQDN)}\n\t\t\/\/ Create Pod\n\t\tf.TestContainerOutput(\"shotname and fqdn\", pod, 0, output)\n\t})\n\n\t\/*\n\t Release: v1.19\n\t Testname: Create Pod with FQDN, setHostnameAsFQDN field set to true.\n\t Description: A Pod that defines the subdomain field in it spec has FQDN. When setHostnameAsFQDN: true, the\n\t hostname is set to be the FQDN. In this case, both commands hostname and hostname -f return the FQDN of the Pod.\n\t*\/\n\tginkgo.It(\"a pod with subdomain field has FQDN, when setHostnameAsFQDN is set to true, the FQDN is set as hostname\", func() {\n\t\tpod := testPod()\n\t\tpod.Spec.Containers[0].Command = []string{\"sh\", \"-c\", \"echo $(hostname)';'$(hostname -f)';'\"}\n\t\tsubdomain := \"t\"\n\t\t\/\/ Set PodSpec subdomain field to generate FQDN for pod\n\t\tpod.Spec.Subdomain = subdomain\n\t\t\/\/ Set PodSpec setHostnameAsFQDN to set FQDN as hostname\n\t\tsetHostnameAsFQDN := true\n\t\tpod.Spec.SetHostnameAsFQDN = &setHostnameAsFQDN\n\t\t\/\/ Expected Pod FQDN\n\t\thostFQDN := fmt.Sprintf(\"%s.%s.%s.svc.%s\", pod.ObjectMeta.Name, subdomain, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)\n\t\t\/\/ Fail if FQDN is longer than 64 characters, otherwise the Pod will remain pending until test timeout.\n\t\t\/\/ In Linux, 64 characters is the limit of the hostname kernel field, which this test sets to the pod FQDN.\n\t\tframework.ExpectEqual(len(hostFQDN) < 65, true, fmt.Sprintf(\"The FQDN of the Pod cannot be longer than 64 characters, requested %s which is %d characters long.\", hostFQDN, len(hostFQDN)))\n\t\toutput := []string{fmt.Sprintf(\"%s;%s;\", hostFQDN, hostFQDN)}\n\t\t\/\/ Create Pod\n\t\tf.TestContainerOutput(\"fqdn and fqdn\", pod, 0, output)\n\t})\n\n})\n<|endoftext|>"} {"text":"<commit_before>package swagger2\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"github.com\/getkin\/kin-openapi\/openapi2\"\n\t\"github.com\/getkin\/kin-openapi\/openapi2conv\"\n\t\"github.com\/grokify\/gotilla\/io\/ioutilmore\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nfunc ConvertSwaggmanOAS2ToKinOAS2(smSpec *Specification) (*openapi2.Swagger, error) {\n\tbytes, err := json.Marshal(smSpec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar kinSpec openapi2.Swagger\n\terr = json.Unmarshal(bytes, &kinSpec)\n\treturn &kinSpec, err\n}\n\nfunc ConvertOAS2FileToOAS3File(oas2file, oas3file string, perm os.FileMode, pretty bool) error {\n\toas2, err := ReadOpenAPI2KinSpecFile(oas2file)\n\tif err != nil {\n\t\treturn err\n\t}\n\toas3, err := openapi2conv.ToV3Swagger(oas2)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif FilenameIsYAML(oas3file) {\n\t\tbytes, err := yaml.Marshal(oas3)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn ioutil.WriteFile(oas3file, bytes, perm)\n\t}\n\tindent := \"\"\n\tif pretty {\n\t\tindent = \" \"\n\t}\n\treturn ioutilmore.WriteFileJSON(oas3file, oas3, perm, \"\", indent)\n}\n<commit_msg>fix JSON output<commit_after>package swagger2\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"github.com\/getkin\/kin-openapi\/openapi2\"\n\t\"github.com\/getkin\/kin-openapi\/openapi2conv\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nfunc ConvertSwaggmanOAS2ToKinOAS2(smSpec *Specification) (*openapi2.Swagger, error) {\n\tbytes, err := json.Marshal(smSpec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar kinSpec openapi2.Swagger\n\terr = json.Unmarshal(bytes, &kinSpec)\n\treturn &kinSpec, err\n}\n\nfunc ConvertOAS2FileToOAS3File(oas2file, oas3file string, perm os.FileMode, pretty bool) error {\n\toas2, err := ReadOpenAPI2KinSpecFile(oas2file)\n\tif err != nil {\n\t\treturn err\n\t}\n\toas3, err := openapi2conv.ToV3Swagger(oas2)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif FilenameIsYAML(oas3file) {\n\t\tbytes, err := yaml.Marshal(oas3)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn ioutil.WriteFile(oas3file, bytes, perm)\n\t}\n\tbytes, err := oas3.MarshalJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(oas3file, bytes, perm)\n}\n<|endoftext|>"} {"text":"<commit_before>package notable\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\tmandrill \"github.com\/harvesthq\/notable\/Godeps\/_workspace\/src\/github.com\/keighl\/mandrill\"\n\t\"log\"\n\t\"text\/template\"\n\t\"time\"\n)\n\ntype Variables struct {\n\tToday string\n\tNotesByCategory []CategoryNotes\n}\n\ntype CategoryNotes struct {\n\tName string\n\tNotes []Note\n}\n\nfunc (categoryNotes *CategoryNotes) Title() string {\n\tcount := len(categoryNotes.Notes)\n\tannouncements := pluralize(count, \"Announcement\")\n\treturn fmt.Sprintf(\"#%s — %d %s\", categoryNotes.Name, count, announcements)\n}\n\nfunc Email() string {\n\tvar html bytes.Buffer\n\n\tnotesTemplate, err := template.ParseFiles(\"template.html\")\n\tcheck(err)\n\n\ttoday := time.Now().Format(\"Monday January 2, 2006\")\n\tvariables := Variables{today, notesByCategory()}\n\terr = notesTemplate.Execute(&html, variables)\n\tcheck(err)\n\n\treturn html.String()\n}\n\nfunc SendEmail(apiKey string) {\n\tclient := mandrill.ClientWithKey(apiKey)\n\tsubject := pluralize(len(Notes()), \"Notable Announcement\")\n\n\tmessage := &mandrill.Message{}\n\tmessage.AddRecipient(\"harvest.team@getharvest.com\", \"Harvest Team\", \"to\")\n\tmessage.FromEmail = \"notable@getharvest.com\"\n\tmessage.FromName = \"Harvest Notables\"\n\tmessage.Subject = subject\n\tmessage.HTML = Email()\n\n\t_, err := client.MessagesSend(message)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n}\n\nfunc notesByCategory() []CategoryNotes {\n\tvar category string\n\tgrouped := make(map[string]*CategoryNotes, 0)\n\n\tfor _, note := range Notes() {\n\t\tcategory = note.Category\n\n\t\tif _, found := grouped[category]; !found {\n\t\t\tgrouped[category] = &CategoryNotes{Name: category, Notes: make([]Note, 0)}\n\t\t}\n\n\t\tgrouped[category].Notes = append(grouped[category].Notes, note)\n\t}\n\n\tcategoryNotes := make([]CategoryNotes, 0)\n\n\tfor _, value := range grouped {\n\t\tcategoryNotes = append(categoryNotes, *value)\n\t}\n\n\treturn categoryNotes\n}\n\nfunc pluralize(count int, singularForm string) string {\n\tpluralForm := fmt.Sprintf(\"%s%s\", singularForm, \"s\")\n\n\tif count == 1 {\n\t\treturn fmt.Sprintf(\"1 %s\", singularForm)\n\t} else {\n\t\treturn fmt.Sprintf(\"%d %s\", count, pluralForm)\n\t}\n}\n<commit_msg>Fixing duplicative announcement count<commit_after>package notable\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\tmandrill \"github.com\/harvesthq\/notable\/Godeps\/_workspace\/src\/github.com\/keighl\/mandrill\"\n\t\"log\"\n\t\"text\/template\"\n\t\"time\"\n)\n\ntype Variables struct {\n\tToday string\n\tNotesByCategory []CategoryNotes\n}\n\ntype CategoryNotes struct {\n\tName string\n\tNotes []Note\n}\n\nfunc (categoryNotes *CategoryNotes) Title() string {\n\tcount := len(categoryNotes.Notes)\n\tannouncements := pluralize(count, \"Announcement\")\n\treturn fmt.Sprintf(\"#%s — %s\", categoryNotes.Name, announcements)\n}\n\nfunc Email() string {\n\tvar html bytes.Buffer\n\n\tnotesTemplate, err := template.ParseFiles(\"template.html\")\n\tcheck(err)\n\n\ttoday := time.Now().Format(\"Monday January 2, 2006\")\n\tvariables := Variables{today, notesByCategory()}\n\terr = notesTemplate.Execute(&html, variables)\n\tcheck(err)\n\n\treturn html.String()\n}\n\nfunc SendEmail(apiKey string) {\n\tclient := mandrill.ClientWithKey(apiKey)\n\tsubject := pluralize(len(Notes()), \"Notable Announcement\")\n\n\tmessage := &mandrill.Message{}\n\tmessage.AddRecipient(\"harvest.team@getharvest.com\", \"Harvest Team\", \"to\")\n\tmessage.FromEmail = \"notable@getharvest.com\"\n\tmessage.FromName = \"Harvest Notables\"\n\tmessage.Subject = subject\n\tmessage.HTML = Email()\n\n\t_, err := client.MessagesSend(message)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n}\n\nfunc notesByCategory() []CategoryNotes {\n\tvar category string\n\tgrouped := make(map[string]*CategoryNotes, 0)\n\n\tfor _, note := range Notes() {\n\t\tcategory = note.Category\n\n\t\tif _, found := grouped[category]; !found {\n\t\t\tgrouped[category] = &CategoryNotes{Name: category, Notes: make([]Note, 0)}\n\t\t}\n\n\t\tgrouped[category].Notes = append(grouped[category].Notes, note)\n\t}\n\n\tcategoryNotes := make([]CategoryNotes, 0)\n\n\tfor _, value := range grouped {\n\t\tcategoryNotes = append(categoryNotes, *value)\n\t}\n\n\treturn categoryNotes\n}\n\nfunc pluralize(count int, singularForm string) string {\n\tpluralForm := fmt.Sprintf(\"%s%s\", singularForm, \"s\")\n\n\tif count == 1 {\n\t\treturn fmt.Sprintf(\"1 %s\", singularForm)\n\t} else {\n\t\treturn fmt.Sprintf(\"%d %s\", count, pluralForm)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 Santiago Corredoira\n\/\/ Distributed under a BSD-like license.\npackage email\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/smtp\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Attachment struct {\n\tFilename string\n\tData []byte\n\tInline bool\n}\n\ntype Message struct {\n\tFrom string\n\tTo []string\n\tCc []string\n\tBcc []string\n\tSubject string\n\tBody string\n\tBodyContentType string\n\tAttachments map[string]*Attachment\n}\n\nfunc (m *Message) attach(file string, inline bool) error {\n\tdata, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, filename := filepath.Split(file)\n\n\tm.Attachments[filename] = &Attachment{\n\t\tFilename: filename,\n\t\tData: data,\n\t\tInline: inline,\n\t}\n\n\treturn nil\n}\n\nfunc (m *Message) Attach(file string) error {\n\treturn m.attach(file, false)\n}\n\nfunc (m *Message) Inline(file string) error {\n\treturn m.attach(file, true)\n}\n\nfunc newMessage(subject string, body string, bodyContentType string) *Message {\n\tm := &Message{Subject: subject, Body: body, BodyContentType: bodyContentType}\n\n\tm.Attachments = make(map[string]*Attachment)\n\n\treturn m\n}\n\n\/\/ NewMessage returns a new Message that can compose an email with attachments\nfunc NewMessage(subject string, body string) *Message {\n\treturn newMessage(subject, body, \"text\/plain\")\n}\n\n\/\/ NewMessage returns a new Message that can compose an HTML email with attachments\nfunc NewHTMLMessage(subject string, body string) *Message {\n\treturn newMessage(subject, body, \"text\/html\")\n}\n\n\/\/ ToList returns all the recipients of the email\nfunc (m *Message) Tolist() []string {\n\ttolist := m.To\n\n\tfor _, cc := range m.Cc {\n\t\ttolist = append(tolist, cc)\n\t}\n\n\tfor _, bcc := range m.Bcc {\n\t\ttolist = append(tolist, bcc)\n\t}\n\n\treturn tolist\n}\n\n\/\/ Bytes returns the mail data\nfunc (m *Message) Bytes() []byte {\n\tbuf := bytes.NewBuffer(nil)\n\n\tbuf.WriteString(\"From: \" + m.From + \"\\r\\n\")\n\n\tt := time.Now()\n\tbuf.WriteString(\"Date: \" + t.Format(time.RFC822) + \"\\r\\n\")\n\n\tbuf.WriteString(\"To: \" + strings.Join(m.To, \",\") + \"\\r\\n\")\n\tif len(m.Cc) > 0 {\n\t\tbuf.WriteString(\"Cc: \" + strings.Join(m.Cc, \",\") + \"\\r\\n\")\n\t}\n\n\tbuf.WriteString(\"Subject: \" + m.Subject + \"\\r\\n\")\n\tbuf.WriteString(\"MIME-Version: 1.0\\r\\n\")\n\n\tboundary := \"f46d043c813270fc6b04c2d223da\"\n\n\tif len(m.Attachments) > 0 {\n\t\tbuf.WriteString(\"Content-Type: multipart\/mixed; boundary=\" + boundary + \"\\r\\n\")\n\t\tbuf.WriteString(\"--\" + boundary + \"\\r\\n\")\n\t}\n\n\tbuf.WriteString(fmt.Sprintf(\"Content-Type: %s; charset=utf-8\\r\\n\\r\\n\", m.BodyContentType))\n\tbuf.WriteString(m.Body)\n\tbuf.WriteString(\"\\r\\n\")\n\n\tif len(m.Attachments) > 0 {\n\t\tfor _, attachment := range m.Attachments {\n\t\t\tbuf.WriteString(\"\\r\\n\\r\\n--\" + boundary + \"\\r\\n\")\n\n\t\t\tif attachment.Inline {\n\t\t\t\tbuf.WriteString(\"Content-Type: message\/rfc822\\r\\n\")\n\t\t\t\tbuf.WriteString(\"Content-Disposition: inline; filename=\\\"\" + attachment.Filename + \"\\\"\\r\\n\\r\\n\")\n\n\t\t\t\tbuf.Write(attachment.Data)\n\t\t\t} else {\n\t\t\t\tbuf.WriteString(\"Content-Type: application\/octet-stream\\r\\n\")\n\t\t\t\tbuf.WriteString(\"Content-Transfer-Encoding: base64\\r\\n\")\n\t\t\t\tbuf.WriteString(\"Content-Disposition: attachment; filename=\\\"\" + attachment.Filename + \"\\\"\\r\\n\\r\\n\")\n\n\t\t\t\tb := make([]byte, base64.StdEncoding.EncodedLen(len(attachment.Data)))\n\t\t\t\tbase64.StdEncoding.Encode(b, attachment.Data)\n\n\t\t\t\t\/\/ write base64 content in lines of up to 76 chars\n\t\t\t\tfor i, l := 0, len(b); i < l; i++ {\n\t\t\t\t\tbuf.WriteByte(b[i])\n\t\t\t\t\tif (i+1)%76 == 0 {\n\t\t\t\t\t\tbuf.WriteString(\"\\r\\n\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbuf.WriteString(\"\\r\\n--\" + boundary)\n\t\t}\n\n\t\tbuf.WriteString(\"--\")\n\t}\n\n\treturn buf.Bytes()\n}\n\nfunc Send(addr string, auth smtp.Auth, m *Message) error {\n\treturn smtp.SendMail(addr, auth, m.From, m.Tolist(), m.Bytes())\n}\n<commit_msg>Add Reply-To field<commit_after>\/\/ Copyright 2012 Santiago Corredoira\n\/\/ Distributed under a BSD-like license.\npackage email\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/smtp\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Attachment struct {\n\tFilename string\n\tData []byte\n\tInline bool\n}\n\ntype Message struct {\n\tFrom string\n\tTo []string\n\tCc []string\n\tBcc []string\n\tReplyTo string\n\tSubject string\n\tBody string\n\tBodyContentType string\n\tAttachments map[string]*Attachment\n}\n\nfunc (m *Message) attach(file string, inline bool) error {\n\tdata, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, filename := filepath.Split(file)\n\n\tm.Attachments[filename] = &Attachment{\n\t\tFilename: filename,\n\t\tData: data,\n\t\tInline: inline,\n\t}\n\n\treturn nil\n}\n\nfunc (m *Message) Attach(file string) error {\n\treturn m.attach(file, false)\n}\n\nfunc (m *Message) Inline(file string) error {\n\treturn m.attach(file, true)\n}\n\nfunc newMessage(subject string, body string, bodyContentType string) *Message {\n\tm := &Message{Subject: subject, Body: body, BodyContentType: bodyContentType}\n\n\tm.Attachments = make(map[string]*Attachment)\n\n\treturn m\n}\n\n\/\/ NewMessage returns a new Message that can compose an email with attachments\nfunc NewMessage(subject string, body string) *Message {\n\treturn newMessage(subject, body, \"text\/plain\")\n}\n\n\/\/ NewMessage returns a new Message that can compose an HTML email with attachments\nfunc NewHTMLMessage(subject string, body string) *Message {\n\treturn newMessage(subject, body, \"text\/html\")\n}\n\n\/\/ ToList returns all the recipients of the email\nfunc (m *Message) Tolist() []string {\n\ttolist := m.To\n\n\tfor _, cc := range m.Cc {\n\t\ttolist = append(tolist, cc)\n\t}\n\n\tfor _, bcc := range m.Bcc {\n\t\ttolist = append(tolist, bcc)\n\t}\n\n\treturn tolist\n}\n\n\/\/ Bytes returns the mail data\nfunc (m *Message) Bytes() []byte {\n\tbuf := bytes.NewBuffer(nil)\n\n\tbuf.WriteString(\"From: \" + m.From + \"\\r\\n\")\n\n\tt := time.Now()\n\tbuf.WriteString(\"Date: \" + t.Format(time.RFC822) + \"\\r\\n\")\n\n\tbuf.WriteString(\"To: \" + strings.Join(m.To, \",\") + \"\\r\\n\")\n\tif len(m.Cc) > 0 {\n\t\tbuf.WriteString(\"Cc: \" + strings.Join(m.Cc, \",\") + \"\\r\\n\")\n\t}\n\n\tbuf.WriteString(\"Subject: \" + m.Subject + \"\\r\\n\")\n\n\tif len(m.ReplyTo) > 0 {\n\t\tbuf.WriteString(\"Reply-To: \" + m.ReplyTo + \"\\r\\n\")\n\t}\n\n\tbuf.WriteString(\"MIME-Version: 1.0\\r\\n\")\n\n\tboundary := \"f46d043c813270fc6b04c2d223da\"\n\n\tif len(m.Attachments) > 0 {\n\t\tbuf.WriteString(\"Content-Type: multipart\/mixed; boundary=\" + boundary + \"\\r\\n\")\n\t\tbuf.WriteString(\"--\" + boundary + \"\\r\\n\")\n\t}\n\n\tbuf.WriteString(fmt.Sprintf(\"Content-Type: %s; charset=utf-8\\r\\n\\r\\n\", m.BodyContentType))\n\tbuf.WriteString(m.Body)\n\tbuf.WriteString(\"\\r\\n\")\n\n\tif len(m.Attachments) > 0 {\n\t\tfor _, attachment := range m.Attachments {\n\t\t\tbuf.WriteString(\"\\r\\n\\r\\n--\" + boundary + \"\\r\\n\")\n\n\t\t\tif attachment.Inline {\n\t\t\t\tbuf.WriteString(\"Content-Type: message\/rfc822\\r\\n\")\n\t\t\t\tbuf.WriteString(\"Content-Disposition: inline; filename=\\\"\" + attachment.Filename + \"\\\"\\r\\n\\r\\n\")\n\n\t\t\t\tbuf.Write(attachment.Data)\n\t\t\t} else {\n\t\t\t\tbuf.WriteString(\"Content-Type: application\/octet-stream\\r\\n\")\n\t\t\t\tbuf.WriteString(\"Content-Transfer-Encoding: base64\\r\\n\")\n\t\t\t\tbuf.WriteString(\"Content-Disposition: attachment; filename=\\\"\" + attachment.Filename + \"\\\"\\r\\n\\r\\n\")\n\n\t\t\t\tb := make([]byte, base64.StdEncoding.EncodedLen(len(attachment.Data)))\n\t\t\t\tbase64.StdEncoding.Encode(b, attachment.Data)\n\n\t\t\t\t\/\/ write base64 content in lines of up to 76 chars\n\t\t\t\tfor i, l := 0, len(b); i < l; i++ {\n\t\t\t\t\tbuf.WriteByte(b[i])\n\t\t\t\t\tif (i+1)%76 == 0 {\n\t\t\t\t\t\tbuf.WriteString(\"\\r\\n\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbuf.WriteString(\"\\r\\n--\" + boundary)\n\t\t}\n\n\t\tbuf.WriteString(\"--\")\n\t}\n\n\treturn buf.Bytes()\n}\n\nfunc Send(addr string, auth smtp.Auth, m *Message) error {\n\treturn smtp.SendMail(addr, auth, m.From, m.Tolist(), m.Bytes())\n}\n<|endoftext|>"} {"text":"<commit_before>package glfw3\n\n\/\/#include \"glfw\/include\/GLFW\/glfw3.h\"\n\/\/void glfwSetErrorCallbackCB();\nimport \"C\"\n\nimport (\n\t\"fmt\"\n)\n\n\/\/ ErrorCode corresponds to an error code.\ntype ErrorCode int\n\n\/\/ Error codes that are translated to panics and the programmer should not\n\/\/ expect to handle.\nconst (\n\tnotInitialized ErrorCode = C.GLFW_NOT_INITIALIZED \/\/ GLFW has not been initialized.\n\tnoCurrentContext ErrorCode = C.GLFW_NO_CURRENT_CONTEXT \/\/ No context is current.\n\tinvalidEnum ErrorCode = C.GLFW_INVALID_ENUM \/\/ One of the enum parameters for the function was given an invalid enum.\n\tinvalidValue ErrorCode = C.GLFW_INVALID_VALUE \/\/ One of the parameters for the function was given an invalid value.\n\toutOfMemory ErrorCode = C.GLFW_OUT_OF_MEMORY \/\/ A memory allocation failed.\n\tplatformError ErrorCode = C.GLFW_PLATFORM_ERROR \/\/ A platform-specific error occurred that does not match any of the more specific categories.\n)\n\nconst (\n\t\/\/ APIUnavailable is the error code used when GLFW could not find support\n\t\/\/ for the requested client API on the system.\n\t\/\/\n\t\/\/ The installed graphics driver does not support the requested client API,\n\t\/\/ or does not support it via the chosen context creation backend. Below\n\t\/\/ are a few examples.\n\t\/\/\n\t\/\/ Some pre-installed Windows graphics drivers do not support OpenGL. AMD\n\t\/\/ only supports OpenGL ES via EGL, while Nvidia and Intel only supports it\n\t\/\/ via a WGL or GLX extension. OS X does not provide OpenGL ES at all. The\n\t\/\/ Mesa EGL, OpenGL and OpenGL ES libraries do not interface with the\n\t\/\/ Nvidia binary driver.\n\tAPIUnavailable ErrorCode = C.GLFW_API_UNAVAILABLE\n\n\t\/\/ VersionUnavailable is the error code used when the requested OpenGL or\n\t\/\/ OpenGL ES (including any requested profile or context option) is not\n\t\/\/ available on this machine.\n\t\/\/\n\t\/\/ The machine does not support your requirements. If your application is\n\t\/\/ sufficiently flexible, downgrade your requirements and try again.\n\t\/\/ Otherwise, inform the user that their machine does not match your\n\t\/\/ requirements.\n\t\/\/\n\t\/\/ Future invalid OpenGL and OpenGL ES versions, for example OpenGL 4.8 if\n\t\/\/ 5.0 comes out before the 4.x series gets that far, also fail with this\n\t\/\/ error and not GLFW_INVALID_VALUE, because GLFW cannot know what future\n\t\/\/ versions will exist.\n\tVersionUnavailable ErrorCode = C.GLFW_VERSION_UNAVAILABLE\n\n\t\/\/ FormatUnavailable is the error code used for both window creation and\n\t\/\/ clipboard querying format errors.\n\t\/\/\n\t\/\/ If emitted during window creation, the requested pixel format is not\n\t\/\/ supported. This means one or more hard constraints did not match any of\n\t\/\/ the available pixel formats. If your application is sufficiently\n\t\/\/ flexible, downgrade your requirements and try again. Otherwise, inform\n\t\/\/ the user that their machine does not match your requirements.\n\t\/\/\n\t\/\/ If emitted when querying the clipboard, the contents of the clipboard\n\t\/\/ could not be converted to the requested format. You should ignore the\n\t\/\/ error or report it to the user, as appropriate.\n\tFormatUnavailable ErrorCode = C.GLFW_FORMAT_UNAVAILABLE\n)\n\nfunc (e ErrorCode) String() string {\n\tswitch e {\n\tcase notInitialized:\n\t\treturn \"NotInitialized\"\n\tcase noCurrentContext:\n\t\treturn \"NoCurrentContext\"\n\tcase invalidEnum:\n\t\treturn \"InvalidEnum\"\n\tcase invalidValue:\n\t\treturn \"InvalidValue\"\n\tcase outOfMemory:\n\t\treturn \"OutOfMemory\"\n\tcase platformError:\n\t\treturn \"PlatformError\"\n\tcase APIUnavailable:\n\t\treturn \"APIUnavailable\"\n\tcase VersionUnavailable:\n\t\treturn \"VersionUnavailable\"\n\tcase FormatUnavailable:\n\t\treturn \"FormatUnavailable\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"ErrorCode(%d)\", e)\n\t}\n}\n\n\/\/ Error holds error code and description.\ntype Error struct {\n\tCode ErrorCode\n\tDesc string\n}\n\n\/\/ Error prints the error code and description in a readable format.\nfunc (e *Error) Error() string {\n\treturn fmt.Sprintf(\"%s: %s\", e.Code.String(), e.Desc)\n}\n\n\/\/ Note: There are many cryptic caveats to proper error handling here.\n\/\/ See: https:\/\/github.com\/go-gl\/glfw3\/pull\/86\n\n\/\/ Holds the value of the last error.\nvar lastError = make(chan *Error, 1)\n\n\/\/export goErrorCB\nfunc goErrorCB(code C.int, desc *C.char) {\n\tflushErrors()\n\terr := &Error{ErrorCode(code), C.GoString(desc)}\n\tselect {\n\tcase lastError <- err:\n\tdefault:\n\t\tfmt.Println(\"GLFW: An uncaught error has occurred:\", err)\n\t\tfmt.Println(\"GLFW: Please report this bug in the Go package immediately.\")\n\t}\n}\n\n\/\/ Set the glfw callback internally\nfunc init() {\n\tC.glfwSetErrorCallbackCB()\n}\n\n\/\/ flushErrors is called by Terminate before it actually calls C.glfwTerminate,\n\/\/ this ensures that any uncaught errors buffered in lastError are printed\n\/\/ before the program exits.\nfunc flushErrors() {\n\terr := fetchError()\n\tif err != nil {\n\t\tfmt.Println(\"GLFW: An uncaught error has occurred:\", err)\n\t\tfmt.Println(\"GLFW: Please report this bug in the Go package immediately.\")\n\t}\n}\n\n\/\/ acceptError fetches the next error from the error channel, it accepts only\n\/\/ errors with one of the given error codes. If any other error is encountered,\n\/\/ a panic will occur.\nfunc acceptError(codes ...ErrorCode) error {\n\t\/\/ Grab the next error, if there is one.\n\terr := fetchError()\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Only if the error has the specific error code accepted by the caller, do\n\t\/\/ we return the error.\n\tfor _, code := range codes {\n\t\tif err.Code == code {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ The error isn't accepted by the caller. If the error code is not a code\n\t\/\/ defined in the GLFW C documentation as a programmer error, then the\n\t\/\/ caller should have accepted it. This is effectively a bug in this\n\t\/\/ package.\n\tswitch err.Code {\n\tcase notInitialized, noCurrentContext, invalidEnum, invalidValue, outOfMemory, platformError:\n\t\tpanic(err)\n\tdefault:\n\t\tfmt.Println(\"GLFW: A invalid error was not accepted by the caller:\", err)\n\t\tfmt.Println(\"GLFW: Please report this bug in the Go package immediately.\")\n\t\tpanic(err)\n\t}\n}\n\n\/\/ panicError is a helper used by functions which expect no errors (except\n\/\/ programmer errors) to occur. It will panic if it finds any such error.\nfunc panicError() {\n\terr := acceptError()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ fetchError fetches the next error from the error channel, it does not block\n\/\/ and returns nil if there is no error present.\nfunc fetchError() *Error {\n\tselect {\n\tcase err := <-lastError:\n\t\treturn err\n\tdefault:\n\t\treturn nil\n\t}\n}\n<commit_msg>Fix grammer mistake.<commit_after>package glfw3\n\n\/\/#include \"glfw\/include\/GLFW\/glfw3.h\"\n\/\/void glfwSetErrorCallbackCB();\nimport \"C\"\n\nimport (\n\t\"fmt\"\n)\n\n\/\/ ErrorCode corresponds to an error code.\ntype ErrorCode int\n\n\/\/ Error codes that are translated to panics and the programmer should not\n\/\/ expect to handle.\nconst (\n\tnotInitialized ErrorCode = C.GLFW_NOT_INITIALIZED \/\/ GLFW has not been initialized.\n\tnoCurrentContext ErrorCode = C.GLFW_NO_CURRENT_CONTEXT \/\/ No context is current.\n\tinvalidEnum ErrorCode = C.GLFW_INVALID_ENUM \/\/ One of the enum parameters for the function was given an invalid enum.\n\tinvalidValue ErrorCode = C.GLFW_INVALID_VALUE \/\/ One of the parameters for the function was given an invalid value.\n\toutOfMemory ErrorCode = C.GLFW_OUT_OF_MEMORY \/\/ A memory allocation failed.\n\tplatformError ErrorCode = C.GLFW_PLATFORM_ERROR \/\/ A platform-specific error occurred that does not match any of the more specific categories.\n)\n\nconst (\n\t\/\/ APIUnavailable is the error code used when GLFW could not find support\n\t\/\/ for the requested client API on the system.\n\t\/\/\n\t\/\/ The installed graphics driver does not support the requested client API,\n\t\/\/ or does not support it via the chosen context creation backend. Below\n\t\/\/ are a few examples.\n\t\/\/\n\t\/\/ Some pre-installed Windows graphics drivers do not support OpenGL. AMD\n\t\/\/ only supports OpenGL ES via EGL, while Nvidia and Intel only supports it\n\t\/\/ via a WGL or GLX extension. OS X does not provide OpenGL ES at all. The\n\t\/\/ Mesa EGL, OpenGL and OpenGL ES libraries do not interface with the\n\t\/\/ Nvidia binary driver.\n\tAPIUnavailable ErrorCode = C.GLFW_API_UNAVAILABLE\n\n\t\/\/ VersionUnavailable is the error code used when the requested OpenGL or\n\t\/\/ OpenGL ES (including any requested profile or context option) is not\n\t\/\/ available on this machine.\n\t\/\/\n\t\/\/ The machine does not support your requirements. If your application is\n\t\/\/ sufficiently flexible, downgrade your requirements and try again.\n\t\/\/ Otherwise, inform the user that their machine does not match your\n\t\/\/ requirements.\n\t\/\/\n\t\/\/ Future invalid OpenGL and OpenGL ES versions, for example OpenGL 4.8 if\n\t\/\/ 5.0 comes out before the 4.x series gets that far, also fail with this\n\t\/\/ error and not GLFW_INVALID_VALUE, because GLFW cannot know what future\n\t\/\/ versions will exist.\n\tVersionUnavailable ErrorCode = C.GLFW_VERSION_UNAVAILABLE\n\n\t\/\/ FormatUnavailable is the error code used for both window creation and\n\t\/\/ clipboard querying format errors.\n\t\/\/\n\t\/\/ If emitted during window creation, the requested pixel format is not\n\t\/\/ supported. This means one or more hard constraints did not match any of\n\t\/\/ the available pixel formats. If your application is sufficiently\n\t\/\/ flexible, downgrade your requirements and try again. Otherwise, inform\n\t\/\/ the user that their machine does not match your requirements.\n\t\/\/\n\t\/\/ If emitted when querying the clipboard, the contents of the clipboard\n\t\/\/ could not be converted to the requested format. You should ignore the\n\t\/\/ error or report it to the user, as appropriate.\n\tFormatUnavailable ErrorCode = C.GLFW_FORMAT_UNAVAILABLE\n)\n\nfunc (e ErrorCode) String() string {\n\tswitch e {\n\tcase notInitialized:\n\t\treturn \"NotInitialized\"\n\tcase noCurrentContext:\n\t\treturn \"NoCurrentContext\"\n\tcase invalidEnum:\n\t\treturn \"InvalidEnum\"\n\tcase invalidValue:\n\t\treturn \"InvalidValue\"\n\tcase outOfMemory:\n\t\treturn \"OutOfMemory\"\n\tcase platformError:\n\t\treturn \"PlatformError\"\n\tcase APIUnavailable:\n\t\treturn \"APIUnavailable\"\n\tcase VersionUnavailable:\n\t\treturn \"VersionUnavailable\"\n\tcase FormatUnavailable:\n\t\treturn \"FormatUnavailable\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"ErrorCode(%d)\", e)\n\t}\n}\n\n\/\/ Error holds error code and description.\ntype Error struct {\n\tCode ErrorCode\n\tDesc string\n}\n\n\/\/ Error prints the error code and description in a readable format.\nfunc (e *Error) Error() string {\n\treturn fmt.Sprintf(\"%s: %s\", e.Code.String(), e.Desc)\n}\n\n\/\/ Note: There are many cryptic caveats to proper error handling here.\n\/\/ See: https:\/\/github.com\/go-gl\/glfw3\/pull\/86\n\n\/\/ Holds the value of the last error.\nvar lastError = make(chan *Error, 1)\n\n\/\/export goErrorCB\nfunc goErrorCB(code C.int, desc *C.char) {\n\tflushErrors()\n\terr := &Error{ErrorCode(code), C.GoString(desc)}\n\tselect {\n\tcase lastError <- err:\n\tdefault:\n\t\tfmt.Println(\"GLFW: An uncaught error has occurred:\", err)\n\t\tfmt.Println(\"GLFW: Please report this bug in the Go package immediately.\")\n\t}\n}\n\n\/\/ Set the glfw callback internally\nfunc init() {\n\tC.glfwSetErrorCallbackCB()\n}\n\n\/\/ flushErrors is called by Terminate before it actually calls C.glfwTerminate,\n\/\/ this ensures that any uncaught errors buffered in lastError are printed\n\/\/ before the program exits.\nfunc flushErrors() {\n\terr := fetchError()\n\tif err != nil {\n\t\tfmt.Println(\"GLFW: An uncaught error has occurred:\", err)\n\t\tfmt.Println(\"GLFW: Please report this bug in the Go package immediately.\")\n\t}\n}\n\n\/\/ acceptError fetches the next error from the error channel, it accepts only\n\/\/ errors with one of the given error codes. If any other error is encountered,\n\/\/ a panic will occur.\nfunc acceptError(codes ...ErrorCode) error {\n\t\/\/ Grab the next error, if there is one.\n\terr := fetchError()\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Only if the error has the specific error code accepted by the caller, do\n\t\/\/ we return the error.\n\tfor _, code := range codes {\n\t\tif err.Code == code {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ The error isn't accepted by the caller. If the error code is not a code\n\t\/\/ defined in the GLFW C documentation as a programmer error, then the\n\t\/\/ caller should have accepted it. This is effectively a bug in this\n\t\/\/ package.\n\tswitch err.Code {\n\tcase notInitialized, noCurrentContext, invalidEnum, invalidValue, outOfMemory, platformError:\n\t\tpanic(err)\n\tdefault:\n\t\tfmt.Println(\"GLFW: An invalid error was not accepted by the caller:\", err)\n\t\tfmt.Println(\"GLFW: Please report this bug in the Go package immediately.\")\n\t\tpanic(err)\n\t}\n}\n\n\/\/ panicError is a helper used by functions which expect no errors (except\n\/\/ programmer errors) to occur. It will panic if it finds any such error.\nfunc panicError() {\n\terr := acceptError()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ fetchError fetches the next error from the error channel, it does not block\n\/\/ and returns nil if there is no error present.\nfunc fetchError() *Error {\n\tselect {\n\tcase err := <-lastError:\n\t\treturn err\n\tdefault:\n\t\treturn nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !windows\n\n\/*\n Copyright The containerd Authors.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage shim\n\nimport (\n\t\"os\/exec\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/containerd\/containerd\/sys\"\n\trunc \"github.com\/containerd\/go-runc\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ ErrNoSuchProcess is returned when the process no longer exists\nvar ErrNoSuchProcess = errors.New(\"no such process\")\n\nconst bufferSize = 2048\n\n\/\/ Reap should be called when the process receives an SIGCHLD. Reap will reap\n\/\/ all exited processes and close their wait channels\nfunc Reap() error {\n\tnow := time.Now()\n\texits, err := sys.Reap(false)\n\tDefault.Lock()\n\tfor c := range Default.subscribers {\n\t\tfor _, e := range exits {\n\t\t\tselect {\n\t\t\tcase c <- runc.Exit{\n\t\t\t\tTimestamp: now,\n\t\t\t\tPid: e.Pid,\n\t\t\t\tStatus: e.Status,\n\t\t\t}:\n\t\t\tdefault:\n\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\"subscriber\": c,\n\t\t\t\t\t\"pid\": e.Pid,\n\t\t\t\t\t\"status\": e.Status,\n\t\t\t\t}).Warn(\"failed to send exit to subscriber\")\n\t\t\t}\n\t\t}\n\t}\n\tDefault.Unlock()\n\treturn err\n}\n\n\/\/ Default is the default monitor initialized for the package\nvar Default = &Monitor{\n\tsubscribers: make(map[chan runc.Exit]struct{}),\n}\n\n\/\/ Monitor monitors the underlying system for process status changes\ntype Monitor struct {\n\tsync.Mutex\n\n\tsubscribers map[chan runc.Exit]struct{}\n}\n\n\/\/ Start starts the command a registers the process with the reaper\nfunc (m *Monitor) Start(c *exec.Cmd) (chan runc.Exit, error) {\n\tec := m.Subscribe()\n\tif err := c.Start(); err != nil {\n\t\tm.Unsubscribe(ec)\n\t\treturn nil, err\n\t}\n\treturn ec, nil\n}\n\n\/\/ Wait blocks until a process is signal as dead.\n\/\/ User should rely on the value of the exit status to determine if the\n\/\/ command was successful or not.\nfunc (m *Monitor) Wait(c *exec.Cmd, ec chan runc.Exit) (int, error) {\n\tfor e := range ec {\n\t\tif e.Pid == c.Process.Pid {\n\t\t\t\/\/ make sure we flush all IO\n\t\t\tc.Wait()\n\t\t\tm.Unsubscribe(ec)\n\t\t\treturn e.Status, nil\n\t\t}\n\t}\n\t\/\/ return no such process if the ec channel is closed and no more exit\n\t\/\/ events will be sent\n\treturn -1, ErrNoSuchProcess\n}\n\n\/\/ Subscribe to process exit changes\nfunc (m *Monitor) Subscribe() chan runc.Exit {\n\tc := make(chan runc.Exit, bufferSize)\n\tm.Lock()\n\tm.subscribers[c] = struct{}{}\n\tm.Unlock()\n\treturn c\n}\n\n\/\/ Unsubscribe to process exit changes\nfunc (m *Monitor) Unsubscribe(c chan runc.Exit) {\n\tm.Lock()\n\tdelete(m.subscribers, c)\n\tclose(c)\n\tm.Unlock()\n}\n<commit_msg>Partially revert the event discard change in #2748.<commit_after>\/\/ +build !windows\n\n\/*\n Copyright The containerd Authors.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage shim\n\nimport (\n\t\"os\/exec\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/containerd\/containerd\/sys\"\n\trunc \"github.com\/containerd\/go-runc\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ ErrNoSuchProcess is returned when the process no longer exists\nvar ErrNoSuchProcess = errors.New(\"no such process\")\n\nconst bufferSize = 2048\n\n\/\/ Reap should be called when the process receives an SIGCHLD. Reap will reap\n\/\/ all exited processes and close their wait channels\nfunc Reap() error {\n\tnow := time.Now()\n\texits, err := sys.Reap(false)\n\tDefault.Lock()\n\tfor c := range Default.subscribers {\n\t\tfor _, e := range exits {\n\t\t\tc <- runc.Exit{\n\t\t\t\tTimestamp: now,\n\t\t\t\tPid: e.Pid,\n\t\t\t\tStatus: e.Status,\n\t\t\t}\n\t\t}\n\t}\n\tDefault.Unlock()\n\treturn err\n}\n\n\/\/ Default is the default monitor initialized for the package\nvar Default = &Monitor{\n\tsubscribers: make(map[chan runc.Exit]struct{}),\n}\n\n\/\/ Monitor monitors the underlying system for process status changes\ntype Monitor struct {\n\tsync.Mutex\n\n\tsubscribers map[chan runc.Exit]struct{}\n}\n\n\/\/ Start starts the command a registers the process with the reaper\nfunc (m *Monitor) Start(c *exec.Cmd) (chan runc.Exit, error) {\n\tec := m.Subscribe()\n\tif err := c.Start(); err != nil {\n\t\tm.Unsubscribe(ec)\n\t\treturn nil, err\n\t}\n\treturn ec, nil\n}\n\n\/\/ Wait blocks until a process is signal as dead.\n\/\/ User should rely on the value of the exit status to determine if the\n\/\/ command was successful or not.\nfunc (m *Monitor) Wait(c *exec.Cmd, ec chan runc.Exit) (int, error) {\n\tfor e := range ec {\n\t\tif e.Pid == c.Process.Pid {\n\t\t\t\/\/ make sure we flush all IO\n\t\t\tc.Wait()\n\t\t\tm.Unsubscribe(ec)\n\t\t\treturn e.Status, nil\n\t\t}\n\t}\n\t\/\/ return no such process if the ec channel is closed and no more exit\n\t\/\/ events will be sent\n\treturn -1, ErrNoSuchProcess\n}\n\n\/\/ Subscribe to process exit changes\nfunc (m *Monitor) Subscribe() chan runc.Exit {\n\tc := make(chan runc.Exit, bufferSize)\n\tm.Lock()\n\tm.subscribers[c] = struct{}{}\n\tm.Unlock()\n\treturn c\n}\n\n\/\/ Unsubscribe to process exit changes\nfunc (m *Monitor) Unsubscribe(c chan runc.Exit) {\n\tm.Lock()\n\tdelete(m.subscribers, c)\n\tclose(c)\n\tm.Unlock()\n}\n<|endoftext|>"} {"text":"<commit_before>package operation\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"mime\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"net\/textproto\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/security\"\n)\n\ntype UploadResult struct {\n\tName string `json:\"name,omitempty\"`\n\tSize uint32 `json:\"size,omitempty\"`\n\tError string `json:\"error,omitempty\"`\n\tETag string `json:\"error,omitempty\"`\n}\n\nvar (\n\tclient *http.Client\n)\n\nfunc init() {\n\tclient = &http.Client{Transport: &http.Transport{\n\t\tMaxIdleConnsPerHost: 1024,\n\t}}\n}\n\nvar fileNameEscaper = strings.NewReplacer(\"\\\\\", \"\\\\\\\\\", \"\\\"\", \"\\\\\\\"\")\n\n\/\/ Upload sends a POST request to a volume server to upload the content\nfunc Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) {\n\treturn upload_content(uploadUrl, func(w io.Writer) (err error) {\n\t\t_, err = io.Copy(w, reader)\n\t\treturn\n\t}, filename, isGzipped, mtype, pairMap, jwt)\n}\nfunc upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) {\n\tbody_buf := bytes.NewBufferString(\"\")\n\tbody_writer := multipart.NewWriter(body_buf)\n\th := make(textproto.MIMEHeader)\n\th.Set(\"Content-Disposition\", fmt.Sprintf(`form-data; name=\"file\"; filename=\"%s\"`, fileNameEscaper.Replace(filename)))\n\tif mtype == \"\" {\n\t\tmtype = mime.TypeByExtension(strings.ToLower(filepath.Ext(filename)))\n\t}\n\tif mtype != \"\" {\n\t\th.Set(\"Content-Type\", mtype)\n\t}\n\tif isGzipped {\n\t\th.Set(\"Content-Encoding\", \"gzip\")\n\t}\n\tif jwt != \"\" {\n\t\th.Set(\"Authorization\", \"BEARER \"+string(jwt))\n\t}\n\n\tfile_writer, cp_err := body_writer.CreatePart(h)\n\tif cp_err != nil {\n\t\tglog.V(0).Infoln(\"error creating form file\", cp_err.Error())\n\t\treturn nil, cp_err\n\t}\n\tif err := fillBufferFunction(file_writer); err != nil {\n\t\tglog.V(0).Infoln(\"error copying data\", err)\n\t\treturn nil, err\n\t}\n\tcontent_type := body_writer.FormDataContentType()\n\tif err := body_writer.Close(); err != nil {\n\t\tglog.V(0).Infoln(\"error closing body\", err)\n\t\treturn nil, err\n\t}\n\n\treq, postErr := http.NewRequest(\"POST\", uploadUrl, body_buf)\n\tif postErr != nil {\n\t\tglog.V(0).Infoln(\"failing to upload to\", uploadUrl, postErr.Error())\n\t\treturn nil, postErr\n\t}\n\treq.Header.Set(\"Content-Type\", content_type)\n\tfor k, v := range pairMap {\n\t\treq.Header.Set(k, v)\n\t}\n\tresp, post_err := client.Do(req)\n\tif post_err != nil {\n\t\tglog.V(0).Infoln(\"failing to upload to\", uploadUrl, post_err.Error())\n\t\treturn nil, post_err\n\t}\n\tdefer resp.Body.Close()\n\tetag := getEtag(resp)\n\tresp_body, ra_err := ioutil.ReadAll(resp.Body)\n\tif ra_err != nil {\n\t\treturn nil, ra_err\n\t}\n\tvar ret UploadResult\n\tunmarshal_err := json.Unmarshal(resp_body, &ret)\n\tif unmarshal_err != nil {\n\t\tglog.V(0).Infoln(\"failing to read upload response\", uploadUrl, string(resp_body))\n\t\treturn nil, unmarshal_err\n\t}\n\tif ret.Error != \"\" {\n\t\treturn nil, errors.New(ret.Error)\n\t}\n\tret.ETag = etag\n\treturn &ret, nil\n}\n\nfunc getEtag(r *http.Response) (etag string) {\n\tetag = r.Header.Get(\"ETag\")\n\tif strings.HasPrefix(etag, \"\\\"\") && strings.HasSuffix(etag, \"\\\"\") {\n\t\tetag = etag[1 : len(etag)-1]\n\t}\n\treturn\n}\n<commit_msg>fix https:\/\/github.com\/chrislusf\/seaweedfs\/issues\/780<commit_after>package operation\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"mime\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"net\/textproto\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/security\"\n)\n\ntype UploadResult struct {\n\tName string `json:\"name,omitempty\"`\n\tSize uint32 `json:\"size,omitempty\"`\n\tError string `json:\"error,omitempty\"`\n\tETag string `json:\"error,omitempty\"`\n}\n\nvar (\n\tclient *http.Client\n)\n\nfunc init() {\n\tclient = &http.Client{Transport: &http.Transport{\n\t\tMaxIdleConnsPerHost: 1024,\n\t}}\n}\n\nvar fileNameEscaper = strings.NewReplacer(\"\\\\\", \"\\\\\\\\\", \"\\\"\", \"\\\\\\\"\")\n\n\/\/ Upload sends a POST request to a volume server to upload the content\nfunc Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) {\n\treturn upload_content(uploadUrl, func(w io.Writer) (err error) {\n\t\t_, err = io.Copy(w, reader)\n\t\treturn\n\t}, filename, isGzipped, mtype, pairMap, jwt)\n}\nfunc upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) {\n\tbody_buf := bytes.NewBufferString(\"\")\n\tbody_writer := multipart.NewWriter(body_buf)\n\th := make(textproto.MIMEHeader)\n\th.Set(\"Content-Disposition\", fmt.Sprintf(`form-data; name=\"file\"; filename=\"%s\"`, fileNameEscaper.Replace(filename)))\n\tif mtype == \"\" {\n\t\tmtype = mime.TypeByExtension(strings.ToLower(filepath.Ext(filename)))\n\t}\n\tif mtype != \"\" {\n\t\th.Set(\"Content-Type\", mtype)\n\t}\n\tif isGzipped {\n\t\th.Set(\"Content-Encoding\", \"gzip\")\n\t}\n\tif jwt != \"\" {\n\t\th.Set(\"Authorization\", \"BEARER \"+string(jwt))\n\t}\n\n\tfile_writer, cp_err := body_writer.CreatePart(h)\n\tif cp_err != nil {\n\t\tglog.V(0).Infoln(\"error creating form file\", cp_err.Error())\n\t\treturn nil, cp_err\n\t}\n\tif err := fillBufferFunction(file_writer); err != nil {\n\t\tglog.V(0).Infoln(\"error copying data\", err)\n\t\treturn nil, err\n\t}\n\tcontent_type := body_writer.FormDataContentType()\n\tif err := body_writer.Close(); err != nil {\n\t\tglog.V(0).Infoln(\"error closing body\", err)\n\t\treturn nil, err\n\t}\n\n\treq, postErr := http.NewRequest(\"POST\", uploadUrl, body_buf)\n\tif postErr != nil {\n\t\tglog.V(0).Infoln(\"failing to upload to\", uploadUrl, postErr.Error())\n\t\treturn nil, postErr\n\t}\n\treq.Header.Set(\"Content-Type\", content_type)\n\tfor k, v := range pairMap {\n\t\treq.Header.Set(k, v)\n\t}\n\tresp, post_err := client.Do(req)\n\tif post_err != nil {\n\t\tglog.V(0).Infoln(\"failing to upload to\", uploadUrl, post_err.Error())\n\t\treturn nil, post_err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode < http.StatusOK ||\n\t\tresp.StatusCode > http.StatusIMUsed {\n\t\treturn nil, errors.New(http.StatusText(resp.StatusCode))\n\t}\n\n\tetag := getEtag(resp)\n\tresp_body, ra_err := ioutil.ReadAll(resp.Body)\n\tif ra_err != nil {\n\t\treturn nil, ra_err\n\t}\n\tvar ret UploadResult\n\tunmarshal_err := json.Unmarshal(resp_body, &ret)\n\tif unmarshal_err != nil {\n\t\tglog.V(0).Infoln(\"failing to read upload response\", uploadUrl, string(resp_body))\n\t\treturn nil, unmarshal_err\n\t}\n\tif ret.Error != \"\" {\n\t\treturn nil, errors.New(ret.Error)\n\t}\n\tret.ETag = etag\n\treturn &ret, nil\n}\n\nfunc getEtag(r *http.Response) (etag string) {\n\tetag = r.Header.Get(\"ETag\")\n\tif strings.HasPrefix(etag, \"\\\"\") && strings.HasSuffix(etag, \"\\\"\") {\n\t\tetag = etag[1 : len(etag)-1]\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage migrationminion\n\nimport (\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/loggo\"\n\n\t\"github.com\/juju\/juju\/agent\"\n\t\"github.com\/juju\/juju\/api\"\n\t\"github.com\/juju\/juju\/api\/base\"\n\t\"github.com\/juju\/juju\/core\/migration\"\n\t\"github.com\/juju\/juju\/network\"\n\t\"github.com\/juju\/juju\/watcher\"\n\t\"github.com\/juju\/juju\/worker\"\n\t\"github.com\/juju\/juju\/worker\/catacomb\"\n\t\"github.com\/juju\/juju\/worker\/fortress\"\n)\n\nvar logger = loggo.GetLogger(\"juju.worker.migrationminion\")\n\n\/\/ Facade exposes controller functionality to a Worker.\ntype Facade interface {\n\tWatch() (watcher.MigrationStatusWatcher, error)\n\tReport(migrationId string, phase migration.Phase, success bool) error\n}\n\n\/\/ Config defines the operation of a Worker.\ntype Config struct {\n\tAgent agent.Agent\n\tFacade Facade\n\tGuard fortress.Guard\n\tAPIOpen func(*api.Info, api.DialOpts) (api.Connection, error)\n\tValidateMigration func(base.APICaller) error\n}\n\n\/\/ Validate returns an error if config cannot drive a Worker.\nfunc (config Config) Validate() error {\n\tif config.Agent == nil {\n\t\treturn errors.NotValidf(\"nil Agent\")\n\t}\n\tif config.Facade == nil {\n\t\treturn errors.NotValidf(\"nil Facade\")\n\t}\n\tif config.Guard == nil {\n\t\treturn errors.NotValidf(\"nil Guard\")\n\t}\n\tif config.APIOpen == nil {\n\t\treturn errors.NotValidf(\"nil APIOpen\")\n\t}\n\tif config.ValidateMigration == nil {\n\t\treturn errors.NotValidf(\"nil ValidateMigration\")\n\t}\n\treturn nil\n}\n\n\/\/ New returns a Worker backed by config, or an error.\nfunc New(config Config) (worker.Worker, error) {\n\tif err := config.Validate(); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tw := &Worker{config: config}\n\terr := catacomb.Invoke(catacomb.Plan{\n\t\tSite: &w.catacomb,\n\t\tWork: w.loop,\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\treturn w, nil\n}\n\n\/\/ Worker waits for a model migration to be active, then locks down the\n\/\/ configured fortress and implements the migration.\ntype Worker struct {\n\tcatacomb catacomb.Catacomb\n\tconfig Config\n}\n\n\/\/ Kill implements worker.Worker.\nfunc (w *Worker) Kill() {\n\tw.catacomb.Kill(nil)\n}\n\n\/\/ Wait implements worker.Worker.\nfunc (w *Worker) Wait() error {\n\treturn w.catacomb.Wait()\n}\n\nfunc (w *Worker) loop() error {\n\twatcher, err := w.config.Facade.Watch()\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"setting up watcher\")\n\t}\n\tif err := w.catacomb.Add(watcher); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-w.catacomb.Dying():\n\t\t\treturn w.catacomb.ErrDying()\n\t\tcase status, ok := <-watcher.Changes():\n\t\t\tif !ok {\n\t\t\t\treturn errors.New(\"watcher channel closed\")\n\t\t\t}\n\t\t\tif err := w.handle(status); err != nil {\n\t\t\t\treturn errors.Trace(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (w *Worker) handle(status watcher.MigrationStatus) error {\n\tlogger.Infof(\"migration phase is now: %s\", status.Phase)\n\n\tif !status.Phase.IsRunning() {\n\t\treturn w.config.Guard.Unlock()\n\t}\n\n\t\/\/ Ensure that all workers related to migration fortress have\n\t\/\/ stopped and aren't allowed to restart.\n\terr := w.config.Guard.Lockdown(w.catacomb.Dying())\n\tif errors.Cause(err) == fortress.ErrAborted {\n\t\treturn w.catacomb.ErrDying()\n\t} else if err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tswitch status.Phase {\n\tcase migration.QUIESCE:\n\t\terr = w.doQUIESCE(status)\n\tcase migration.VALIDATION:\n\t\terr = w.doVALIDATION(status)\n\tcase migration.SUCCESS:\n\t\terr = w.doSUCCESS(status)\n\tdefault:\n\t\t\/\/ The minion doesn't need to do anything for other\n\t\t\/\/ migration phases.\n\t}\n\treturn errors.Trace(err)\n}\n\nfunc (w *Worker) doQUIESCE(status watcher.MigrationStatus) error {\n\t\/\/ Report that the minion is ready and that all workers that\n\t\/\/ should be shut down have done so.\n\treturn w.report(status, true)\n}\n\nfunc (w *Worker) doVALIDATION(status watcher.MigrationStatus) error {\n\terr := w.validate(status)\n\tif err != nil {\n\t\t\/\/ Don't return this error just log it and report to the\n\t\t\/\/ migrationmaster that things didn't work out.\n\t\tlogger.Errorf(\"validation failed: %v\", err)\n\t}\n\treturn w.report(status, err == nil)\n}\n\nfunc (w *Worker) validate(status watcher.MigrationStatus) error {\n\tagentConf := w.config.Agent.CurrentConfig()\n\tapiInfo, ok := agentConf.APIInfo()\n\tif !ok {\n\t\treturn errors.New(\"no API connection details\")\n\t}\n\tapiInfo.Addrs = status.TargetAPIAddrs\n\tapiInfo.CACert = status.TargetCACert\n\n\t\/\/ Use zero DialOpts (no retries) because the worker must stay\n\t\/\/ responsive to Kill requests. We don't want it to be blocked by\n\t\/\/ a long set of retry attempts.\n\tconn, err := w.config.APIOpen(apiInfo, api.DialOpts{})\n\tif err != nil {\n\t\t\/\/ Don't return this error just log it and report to the\n\t\t\/\/ migrationmaster that things didn't work out.\n\t\treturn errors.Annotate(err, \"failed to open API to target controller\")\n\t}\n\tdefer conn.Close()\n\n\t\/\/ Ask the agent to confirm that things look ok.\n\terr = w.config.ValidateMigration(conn)\n\treturn errors.Trace(err)\n}\n\nfunc (w *Worker) doSUCCESS(status watcher.MigrationStatus) error {\n\thps, err := apiAddrsToHostPorts(status.TargetAPIAddrs)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"converting API addresses\")\n\t}\n\n\t\/\/ Report first because the config update that's about to happen\n\t\/\/ will cause the API connection to drop. The SUCCESS phase is the\n\t\/\/ point of no return anyway.\n\tif err := w.report(status, true); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\terr = w.config.Agent.ChangeConfig(func(conf agent.ConfigSetter) error {\n\t\tconf.SetAPIHostPorts(hps)\n\t\tconf.SetCACert(status.TargetCACert)\n\t\treturn nil\n\t})\n\treturn errors.Annotate(err, \"setting agent config\")\n}\n\nfunc (w *Worker) report(status watcher.MigrationStatus, success bool) error {\n\terr := w.config.Facade.Report(status.MigrationId, status.Phase, success)\n\treturn errors.Annotate(err, \"failed to report phase progress\")\n}\n\nfunc apiAddrsToHostPorts(addrs []string) ([][]network.HostPort, error) {\n\thps, err := network.ParseHostPorts(addrs...)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\treturn [][]network.HostPort{hps}, nil\n}\n<commit_msg>worker\/migrationminion: Log when reporting back<commit_after>\/\/ Copyright 2016 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage migrationminion\n\nimport (\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/loggo\"\n\n\t\"github.com\/juju\/juju\/agent\"\n\t\"github.com\/juju\/juju\/api\"\n\t\"github.com\/juju\/juju\/api\/base\"\n\t\"github.com\/juju\/juju\/core\/migration\"\n\t\"github.com\/juju\/juju\/network\"\n\t\"github.com\/juju\/juju\/watcher\"\n\t\"github.com\/juju\/juju\/worker\"\n\t\"github.com\/juju\/juju\/worker\/catacomb\"\n\t\"github.com\/juju\/juju\/worker\/fortress\"\n)\n\nvar logger = loggo.GetLogger(\"juju.worker.migrationminion\")\n\n\/\/ Facade exposes controller functionality to a Worker.\ntype Facade interface {\n\tWatch() (watcher.MigrationStatusWatcher, error)\n\tReport(migrationId string, phase migration.Phase, success bool) error\n}\n\n\/\/ Config defines the operation of a Worker.\ntype Config struct {\n\tAgent agent.Agent\n\tFacade Facade\n\tGuard fortress.Guard\n\tAPIOpen func(*api.Info, api.DialOpts) (api.Connection, error)\n\tValidateMigration func(base.APICaller) error\n}\n\n\/\/ Validate returns an error if config cannot drive a Worker.\nfunc (config Config) Validate() error {\n\tif config.Agent == nil {\n\t\treturn errors.NotValidf(\"nil Agent\")\n\t}\n\tif config.Facade == nil {\n\t\treturn errors.NotValidf(\"nil Facade\")\n\t}\n\tif config.Guard == nil {\n\t\treturn errors.NotValidf(\"nil Guard\")\n\t}\n\tif config.APIOpen == nil {\n\t\treturn errors.NotValidf(\"nil APIOpen\")\n\t}\n\tif config.ValidateMigration == nil {\n\t\treturn errors.NotValidf(\"nil ValidateMigration\")\n\t}\n\treturn nil\n}\n\n\/\/ New returns a Worker backed by config, or an error.\nfunc New(config Config) (worker.Worker, error) {\n\tif err := config.Validate(); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tw := &Worker{config: config}\n\terr := catacomb.Invoke(catacomb.Plan{\n\t\tSite: &w.catacomb,\n\t\tWork: w.loop,\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\treturn w, nil\n}\n\n\/\/ Worker waits for a model migration to be active, then locks down the\n\/\/ configured fortress and implements the migration.\ntype Worker struct {\n\tcatacomb catacomb.Catacomb\n\tconfig Config\n}\n\n\/\/ Kill implements worker.Worker.\nfunc (w *Worker) Kill() {\n\tw.catacomb.Kill(nil)\n}\n\n\/\/ Wait implements worker.Worker.\nfunc (w *Worker) Wait() error {\n\treturn w.catacomb.Wait()\n}\n\nfunc (w *Worker) loop() error {\n\twatcher, err := w.config.Facade.Watch()\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"setting up watcher\")\n\t}\n\tif err := w.catacomb.Add(watcher); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-w.catacomb.Dying():\n\t\t\treturn w.catacomb.ErrDying()\n\t\tcase status, ok := <-watcher.Changes():\n\t\t\tif !ok {\n\t\t\t\treturn errors.New(\"watcher channel closed\")\n\t\t\t}\n\t\t\tif err := w.handle(status); err != nil {\n\t\t\t\treturn errors.Trace(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (w *Worker) handle(status watcher.MigrationStatus) error {\n\tlogger.Infof(\"migration phase is now: %s\", status.Phase)\n\n\tif !status.Phase.IsRunning() {\n\t\treturn w.config.Guard.Unlock()\n\t}\n\n\t\/\/ Ensure that all workers related to migration fortress have\n\t\/\/ stopped and aren't allowed to restart.\n\terr := w.config.Guard.Lockdown(w.catacomb.Dying())\n\tif errors.Cause(err) == fortress.ErrAborted {\n\t\treturn w.catacomb.ErrDying()\n\t} else if err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tswitch status.Phase {\n\tcase migration.QUIESCE:\n\t\terr = w.doQUIESCE(status)\n\tcase migration.VALIDATION:\n\t\terr = w.doVALIDATION(status)\n\tcase migration.SUCCESS:\n\t\terr = w.doSUCCESS(status)\n\tdefault:\n\t\t\/\/ The minion doesn't need to do anything for other\n\t\t\/\/ migration phases.\n\t}\n\treturn errors.Trace(err)\n}\n\nfunc (w *Worker) doQUIESCE(status watcher.MigrationStatus) error {\n\t\/\/ Report that the minion is ready and that all workers that\n\t\/\/ should be shut down have done so.\n\treturn w.report(status, true)\n}\n\nfunc (w *Worker) doVALIDATION(status watcher.MigrationStatus) error {\n\terr := w.validate(status)\n\tif err != nil {\n\t\t\/\/ Don't return this error just log it and report to the\n\t\t\/\/ migrationmaster that things didn't work out.\n\t\tlogger.Errorf(\"validation failed: %v\", err)\n\t}\n\treturn w.report(status, err == nil)\n}\n\nfunc (w *Worker) validate(status watcher.MigrationStatus) error {\n\tagentConf := w.config.Agent.CurrentConfig()\n\tapiInfo, ok := agentConf.APIInfo()\n\tif !ok {\n\t\treturn errors.New(\"no API connection details\")\n\t}\n\tapiInfo.Addrs = status.TargetAPIAddrs\n\tapiInfo.CACert = status.TargetCACert\n\n\t\/\/ Use zero DialOpts (no retries) because the worker must stay\n\t\/\/ responsive to Kill requests. We don't want it to be blocked by\n\t\/\/ a long set of retry attempts.\n\tconn, err := w.config.APIOpen(apiInfo, api.DialOpts{})\n\tif err != nil {\n\t\t\/\/ Don't return this error just log it and report to the\n\t\t\/\/ migrationmaster that things didn't work out.\n\t\treturn errors.Annotate(err, \"failed to open API to target controller\")\n\t}\n\tdefer conn.Close()\n\n\t\/\/ Ask the agent to confirm that things look ok.\n\terr = w.config.ValidateMigration(conn)\n\treturn errors.Trace(err)\n}\n\nfunc (w *Worker) doSUCCESS(status watcher.MigrationStatus) error {\n\thps, err := apiAddrsToHostPorts(status.TargetAPIAddrs)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"converting API addresses\")\n\t}\n\n\t\/\/ Report first because the config update that's about to happen\n\t\/\/ will cause the API connection to drop. The SUCCESS phase is the\n\t\/\/ point of no return anyway.\n\tif err := w.report(status, true); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\terr = w.config.Agent.ChangeConfig(func(conf agent.ConfigSetter) error {\n\t\tconf.SetAPIHostPorts(hps)\n\t\tconf.SetCACert(status.TargetCACert)\n\t\treturn nil\n\t})\n\treturn errors.Annotate(err, \"setting agent config\")\n}\n\nfunc (w *Worker) report(status watcher.MigrationStatus, success bool) error {\n\tlogger.Debugf(\"reporting back for phase %s: %v\", status.Phase, success)\n\terr := w.config.Facade.Report(status.MigrationId, status.Phase, success)\n\treturn errors.Annotate(err, \"failed to report phase progress\")\n}\n\nfunc apiAddrsToHostPorts(addrs []string) ([][]network.HostPort, error) {\n\thps, err := network.ParseHostPorts(addrs...)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\treturn [][]network.HostPort{hps}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage obsreport\n\nimport (\n\t\"context\"\n\n\t\"go.opencensus.io\/stats\"\n\t\"go.opencensus.io\/tag\"\n\t\"go.opencensus.io\/trace\"\n\n\t\"go.opentelemetry.io\/collector\/config\/configmodels\"\n)\n\nconst (\n\t\/\/ Key used to identify receivers in metrics and traces.\n\tReceiverKey = \"receiver\"\n\t\/\/ Key used to identify the transport used to received the data.\n\tTransportKey = \"transport\"\n\t\/\/ Key used to identify the format of the data received.\n\tFormatKey = \"format\"\n\n\t\/\/ Key used to identify spans accepted by the Collector.\n\tAcceptedSpansKey = \"accepted_spans\"\n\t\/\/ Key used to identify spans refused (ie.: not ingested) by the Collector.\n\tRefusedSpansKey = \"refused_spans\"\n\n\t\/\/ Key used to identify metric points accepted by the Collector.\n\tAcceptedMetricPointsKey = \"accepted_metric_points\"\n\t\/\/ Key used to identify metric points refused (ie.: not ingested) by the\n\t\/\/ Collector.\n\tRefusedMetricPointsKey = \"refused_metric_points\"\n\n\t\/\/ Key used to identify log records accepted by the Collector.\n\tAcceptedLogRecordsKey = \"accepted_log_records\"\n\t\/\/ Key used to identify log records refused (ie.: not ingested) by the\n\t\/\/ Collector.\n\tRefusedLogRecordsKey = \"refused_log_records\"\n)\n\nvar (\n\ttagKeyReceiver, _ = tag.NewKey(ReceiverKey)\n\ttagKeyTransport, _ = tag.NewKey(TransportKey)\n\n\treceiverPrefix = ReceiverKey + nameSep\n\treceiveTraceDataOperationSuffix = nameSep + \"TraceDataReceived\"\n\treceiverMetricsOperationSuffix = nameSep + \"MetricsReceived\"\n\n\t\/\/ Receiver metrics. Any count of data items below is in the original format\n\t\/\/ that they were received, reasoning: reconciliation is easier if measurements\n\t\/\/ on clients and receiver are expected to be the same. Translation issues\n\t\/\/ that result in a different number of elements should be reported in a\n\t\/\/ separate way.\n\tmReceiverAcceptedSpans = stats.Int64(\n\t\treceiverPrefix+AcceptedSpansKey,\n\t\t\"Number of spans successfully pushed into the pipeline.\",\n\t\tstats.UnitDimensionless)\n\tmReceiverRefusedSpans = stats.Int64(\n\t\treceiverPrefix+RefusedSpansKey,\n\t\t\"Number of spans that could not be pushed into the pipeline.\",\n\t\tstats.UnitDimensionless)\n\tmReceiverAcceptedMetricPoints = stats.Int64(\n\t\treceiverPrefix+AcceptedMetricPointsKey,\n\t\t\"Number of metric points successfully pushed into the pipeline.\",\n\t\tstats.UnitDimensionless)\n\tmReceiverRefusedMetricPoints = stats.Int64(\n\t\treceiverPrefix+RefusedMetricPointsKey,\n\t\t\"Number of metric points that could not be pushed into the pipeline.\",\n\t\tstats.UnitDimensionless)\n\tmReceiverAcceptedLogRecords = stats.Int64(\n\t\treceiverPrefix+AcceptedLogRecordsKey,\n\t\t\"Number of log records successfully pushed into the pipeline.\",\n\t\tstats.UnitDimensionless)\n\tmReceiverRefusedLogRecords = stats.Int64(\n\t\treceiverPrefix+RefusedLogRecordsKey,\n\t\t\"Number of log records that could not be pushed into the pipeline.\",\n\t\tstats.UnitDimensionless)\n)\n\n\/\/ StartReceiveOptions has the options related to starting a receive operation.\ntype StartReceiveOptions struct {\n\t\/\/ LongLivedCtx when true indicates that the context passed in the call\n\t\/\/ outlives the individual receive operation. See WithLongLivedCtx() for\n\t\/\/ more information.\n\tLongLivedCtx bool\n}\n\n\/\/ StartReceiveOption function applues changes to StartReceiveOptions.\ntype StartReceiveOption func(*StartReceiveOptions)\n\n\/\/ WithLongLivedCtx indicates that the context passed in the call outlives the\n\/\/ receive operation at hand. Typically the long lived context is associated\n\/\/ to a connection, eg.: a gRPC stream or a TCP connection, for which many\n\/\/ batches of data are received in individual operations without a corresponding\n\/\/ new context per operation.\n\/\/\n\/\/ Example:\n\/\/\n\/\/ func (r *receiver) ClientConnect(ctx context.Context, rcvChan <-chan consumerdata.TraceData) {\n\/\/ longLivedCtx := obsreport.ReceiverContext(ctx, r.config.Name(), r.transport, \"\")\n\/\/ for {\n\/\/ \/\/ Since the context outlives the individual receive operations call obsreport using\n\/\/ \/\/ WithLongLivedCtx().\n\/\/ ctx := obsreport.StartTraceDataReceiveOp(\n\/\/ longLivedCtx,\n\/\/ r.config.Name(),\n\/\/ r.transport,\n\/\/ obsreport.WithLongLivedCtx())\n\/\/\n\/\/ td, ok := <-rcvChan\n\/\/ var err error\n\/\/ if ok {\n\/\/ err = r.nextConsumer.ConsumeTraceData(ctx, td)\n\/\/ }\n\/\/ obsreport.EndTraceDataReceiveOp(\n\/\/ ctx,\n\/\/ r.format,\n\/\/ len(td.Spans),\n\/\/ err)\n\/\/ if !ok {\n\/\/ break\n\/\/ }\n\/\/ }\n\/\/ }\n\/\/\nfunc WithLongLivedCtx() StartReceiveOption {\n\treturn func(opts *StartReceiveOptions) {\n\t\topts.LongLivedCtx = true\n\t}\n}\n\n\/\/ StartTraceDataReceiveOp is called when a \\request is received from a client.\n\/\/ The returned context should be used in other calls to the obsreport functions\n\/\/ dealing with the same receive operation.\nfunc StartTraceDataReceiveOp(\n\toperationCtx context.Context,\n\treceiver string,\n\ttransport string,\n\topt ...StartReceiveOption,\n) context.Context {\n\treturn traceReceiveOp(\n\t\toperationCtx,\n\t\treceiver,\n\t\ttransport,\n\t\treceiveTraceDataOperationSuffix,\n\t\topt...)\n}\n\n\/\/ EndTraceDataReceiveOp completes the receive operation that was started with\n\/\/ StartTraceDataReceiveOp.\nfunc EndTraceDataReceiveOp(\n\treceiverCtx context.Context,\n\tformat string,\n\tnumReceivedSpans int,\n\terr error,\n) {\n\tif useLegacy {\n\t\tnumReceivedLegacy := numReceivedSpans\n\t\tnumDroppedSpans := 0\n\t\tif err != nil {\n\t\t\tnumDroppedSpans = numReceivedSpans\n\t\t\tnumReceivedLegacy = 0\n\t\t}\n\t\tstats.Record(receiverCtx, mReceiverReceivedSpans.M(int64(numReceivedLegacy)), mReceiverDroppedSpans.M(int64(numDroppedSpans)))\n\t}\n\n\tendReceiveOp(\n\t\treceiverCtx,\n\t\tformat,\n\t\tnumReceivedSpans,\n\t\terr,\n\t\tconfigmodels.TracesDataType,\n\t)\n}\n\n\/\/ StartMetricsReceiveOp is called when a \\request is received from a client.\n\/\/ The returned context should be used in other calls to the obsreport functions\n\/\/ dealing with the same receive operation.\nfunc StartMetricsReceiveOp(\n\toperationCtx context.Context,\n\treceiver string,\n\ttransport string,\n\topt ...StartReceiveOption,\n) context.Context {\n\treturn traceReceiveOp(\n\t\toperationCtx,\n\t\treceiver,\n\t\ttransport,\n\t\treceiverMetricsOperationSuffix,\n\t\topt...)\n}\n\n\/\/ EndMetricsReceiveOp completes the receive operation that was started with\n\/\/ StartMetricsReceiveOp.\nfunc EndMetricsReceiveOp(\n\treceiverCtx context.Context,\n\tformat string,\n\tnumReceivedPoints int,\n\tnumReceivedTimeSeries int, \/\/ For legacy measurements.\n\terr error,\n) {\n\tif useLegacy {\n\t\tnumDroppedTimeSeries := 0\n\t\tif err != nil {\n\t\t\tnumDroppedTimeSeries = numReceivedTimeSeries\n\t\t\tnumReceivedTimeSeries = 0\n\t\t}\n\t\tstats.Record(receiverCtx, mReceiverReceivedTimeSeries.M(int64(numReceivedTimeSeries)), mReceiverDroppedTimeSeries.M(int64(numDroppedTimeSeries)))\n\t}\n\n\tendReceiveOp(\n\t\treceiverCtx,\n\t\tformat,\n\t\tnumReceivedPoints,\n\t\terr,\n\t\tconfigmodels.MetricsDataType,\n\t)\n}\n\n\/\/ ReceiverContext adds the keys used when recording observability metrics to\n\/\/ the given context returning the newly created context. This context should\n\/\/ be used in related calls to the obsreport functions so metrics are properly\n\/\/ recorded.\nfunc ReceiverContext(\n\tctx context.Context,\n\treceiver string,\n\ttransport string,\n\tlegacyName string,\n) context.Context {\n\tif useLegacy {\n\t\tname := receiver\n\t\tif legacyName != \"\" {\n\t\t\tname = legacyName\n\t\t}\n\t\tctx, _ = tag.New(ctx, tag.Upsert(LegacyTagKeyReceiver, name, tag.WithTTL(tag.TTLNoPropagation)))\n\t}\n\n\tctx, _ = tag.New(ctx,\n\t\ttag.Upsert(tagKeyReceiver, receiver, tag.WithTTL(tag.TTLNoPropagation)),\n\t\ttag.Upsert(tagKeyTransport, transport, tag.WithTTL(tag.TTLNoPropagation)))\n\n\treturn ctx\n}\n\n\/\/ traceReceiveOp creates the span used to trace the operation. Returning\n\/\/ the updated context with the created span.\nfunc traceReceiveOp(\n\treceiverCtx context.Context,\n\treceiverName string,\n\ttransport string,\n\toperationSuffix string,\n\topt ...StartReceiveOption,\n) context.Context {\n\tvar opts StartReceiveOptions\n\tfor _, o := range opt {\n\t\to(&opts)\n\t}\n\n\tvar ctx context.Context\n\tvar span *trace.Span\n\tspanName := receiverPrefix + receiverName + operationSuffix\n\tif !opts.LongLivedCtx {\n\t\tctx, span = trace.StartSpan(receiverCtx, spanName)\n\t} else {\n\t\t\/\/ Since the receiverCtx is long lived do not use it to start the span.\n\t\t\/\/ This way this trace ends when the EndTraceDataReceiveOp is called.\n\t\t\/\/ Here is safe to ignore the returned context since it is not used below.\n\t\t_, span = trace.StartSpan(context.Background(), spanName)\n\n\t\t\/\/ If the long lived context has a parent span, then add it as a parent link.\n\t\tsetParentLink(receiverCtx, span)\n\n\t\tctx = trace.NewContext(receiverCtx, span)\n\t}\n\n\tif transport != \"\" {\n\t\tspan.AddAttributes(trace.StringAttribute(TransportKey, transport))\n\t}\n\treturn ctx\n}\n\n\/\/ endReceiveOp records the observability signals at the end of an operation.\nfunc endReceiveOp(\n\treceiverCtx context.Context,\n\tformat string,\n\tnumReceivedItems int,\n\terr error,\n\tdataType configmodels.DataType,\n) {\n\tnumAccepted := numReceivedItems\n\tnumRefused := 0\n\tif err != nil {\n\t\tnumAccepted = 0\n\t\tnumRefused = numReceivedItems\n\t}\n\n\tspan := trace.FromContext(receiverCtx)\n\n\tif useNew {\n\t\tvar acceptedMeasure, refusedMeasure *stats.Int64Measure\n\t\tswitch dataType {\n\t\tcase configmodels.TracesDataType:\n\t\t\tacceptedMeasure = mReceiverAcceptedSpans\n\t\t\trefusedMeasure = mReceiverRefusedSpans\n\t\tcase configmodels.MetricsDataType:\n\t\t\tacceptedMeasure = mReceiverAcceptedMetricPoints\n\t\t\trefusedMeasure = mReceiverRefusedMetricPoints\n\t\t}\n\n\t\tstats.Record(\n\t\t\treceiverCtx,\n\t\t\tacceptedMeasure.M(int64(numAccepted)),\n\t\t\trefusedMeasure.M(int64(numRefused)))\n\t}\n\n\t\/\/ end span according to errors\n\tif span.IsRecordingEvents() {\n\t\tvar acceptedItemsKey, refusedItemsKey string\n\t\tswitch dataType {\n\t\tcase configmodels.TracesDataType:\n\t\t\tacceptedItemsKey = AcceptedSpansKey\n\t\t\trefusedItemsKey = RefusedSpansKey\n\t\tcase configmodels.MetricsDataType:\n\t\t\tacceptedItemsKey = AcceptedMetricPointsKey\n\t\t\trefusedItemsKey = RefusedMetricPointsKey\n\t\t}\n\n\t\tspan.AddAttributes(\n\t\t\ttrace.StringAttribute(\n\t\t\t\tFormatKey, format),\n\t\t\ttrace.Int64Attribute(\n\t\t\t\tacceptedItemsKey, int64(numAccepted)),\n\t\t\ttrace.Int64Attribute(\n\t\t\t\trefusedItemsKey, int64(numRefused)),\n\t\t)\n\t\tspan.SetStatus(errToStatus(err))\n\t}\n\tspan.End()\n}\n<commit_msg>Remove backslash from godoc (#1345)<commit_after>\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage obsreport\n\nimport (\n\t\"context\"\n\n\t\"go.opencensus.io\/stats\"\n\t\"go.opencensus.io\/tag\"\n\t\"go.opencensus.io\/trace\"\n\n\t\"go.opentelemetry.io\/collector\/config\/configmodels\"\n)\n\nconst (\n\t\/\/ Key used to identify receivers in metrics and traces.\n\tReceiverKey = \"receiver\"\n\t\/\/ Key used to identify the transport used to received the data.\n\tTransportKey = \"transport\"\n\t\/\/ Key used to identify the format of the data received.\n\tFormatKey = \"format\"\n\n\t\/\/ Key used to identify spans accepted by the Collector.\n\tAcceptedSpansKey = \"accepted_spans\"\n\t\/\/ Key used to identify spans refused (ie.: not ingested) by the Collector.\n\tRefusedSpansKey = \"refused_spans\"\n\n\t\/\/ Key used to identify metric points accepted by the Collector.\n\tAcceptedMetricPointsKey = \"accepted_metric_points\"\n\t\/\/ Key used to identify metric points refused (ie.: not ingested) by the\n\t\/\/ Collector.\n\tRefusedMetricPointsKey = \"refused_metric_points\"\n\n\t\/\/ Key used to identify log records accepted by the Collector.\n\tAcceptedLogRecordsKey = \"accepted_log_records\"\n\t\/\/ Key used to identify log records refused (ie.: not ingested) by the\n\t\/\/ Collector.\n\tRefusedLogRecordsKey = \"refused_log_records\"\n)\n\nvar (\n\ttagKeyReceiver, _ = tag.NewKey(ReceiverKey)\n\ttagKeyTransport, _ = tag.NewKey(TransportKey)\n\n\treceiverPrefix = ReceiverKey + nameSep\n\treceiveTraceDataOperationSuffix = nameSep + \"TraceDataReceived\"\n\treceiverMetricsOperationSuffix = nameSep + \"MetricsReceived\"\n\n\t\/\/ Receiver metrics. Any count of data items below is in the original format\n\t\/\/ that they were received, reasoning: reconciliation is easier if measurements\n\t\/\/ on clients and receiver are expected to be the same. Translation issues\n\t\/\/ that result in a different number of elements should be reported in a\n\t\/\/ separate way.\n\tmReceiverAcceptedSpans = stats.Int64(\n\t\treceiverPrefix+AcceptedSpansKey,\n\t\t\"Number of spans successfully pushed into the pipeline.\",\n\t\tstats.UnitDimensionless)\n\tmReceiverRefusedSpans = stats.Int64(\n\t\treceiverPrefix+RefusedSpansKey,\n\t\t\"Number of spans that could not be pushed into the pipeline.\",\n\t\tstats.UnitDimensionless)\n\tmReceiverAcceptedMetricPoints = stats.Int64(\n\t\treceiverPrefix+AcceptedMetricPointsKey,\n\t\t\"Number of metric points successfully pushed into the pipeline.\",\n\t\tstats.UnitDimensionless)\n\tmReceiverRefusedMetricPoints = stats.Int64(\n\t\treceiverPrefix+RefusedMetricPointsKey,\n\t\t\"Number of metric points that could not be pushed into the pipeline.\",\n\t\tstats.UnitDimensionless)\n\tmReceiverAcceptedLogRecords = stats.Int64(\n\t\treceiverPrefix+AcceptedLogRecordsKey,\n\t\t\"Number of log records successfully pushed into the pipeline.\",\n\t\tstats.UnitDimensionless)\n\tmReceiverRefusedLogRecords = stats.Int64(\n\t\treceiverPrefix+RefusedLogRecordsKey,\n\t\t\"Number of log records that could not be pushed into the pipeline.\",\n\t\tstats.UnitDimensionless)\n)\n\n\/\/ StartReceiveOptions has the options related to starting a receive operation.\ntype StartReceiveOptions struct {\n\t\/\/ LongLivedCtx when true indicates that the context passed in the call\n\t\/\/ outlives the individual receive operation. See WithLongLivedCtx() for\n\t\/\/ more information.\n\tLongLivedCtx bool\n}\n\n\/\/ StartReceiveOption function applues changes to StartReceiveOptions.\ntype StartReceiveOption func(*StartReceiveOptions)\n\n\/\/ WithLongLivedCtx indicates that the context passed in the call outlives the\n\/\/ receive operation at hand. Typically the long lived context is associated\n\/\/ to a connection, eg.: a gRPC stream or a TCP connection, for which many\n\/\/ batches of data are received in individual operations without a corresponding\n\/\/ new context per operation.\n\/\/\n\/\/ Example:\n\/\/\n\/\/ func (r *receiver) ClientConnect(ctx context.Context, rcvChan <-chan consumerdata.TraceData) {\n\/\/ longLivedCtx := obsreport.ReceiverContext(ctx, r.config.Name(), r.transport, \"\")\n\/\/ for {\n\/\/ \/\/ Since the context outlives the individual receive operations call obsreport using\n\/\/ \/\/ WithLongLivedCtx().\n\/\/ ctx := obsreport.StartTraceDataReceiveOp(\n\/\/ longLivedCtx,\n\/\/ r.config.Name(),\n\/\/ r.transport,\n\/\/ obsreport.WithLongLivedCtx())\n\/\/\n\/\/ td, ok := <-rcvChan\n\/\/ var err error\n\/\/ if ok {\n\/\/ err = r.nextConsumer.ConsumeTraceData(ctx, td)\n\/\/ }\n\/\/ obsreport.EndTraceDataReceiveOp(\n\/\/ ctx,\n\/\/ r.format,\n\/\/ len(td.Spans),\n\/\/ err)\n\/\/ if !ok {\n\/\/ break\n\/\/ }\n\/\/ }\n\/\/ }\n\/\/\nfunc WithLongLivedCtx() StartReceiveOption {\n\treturn func(opts *StartReceiveOptions) {\n\t\topts.LongLivedCtx = true\n\t}\n}\n\n\/\/ StartTraceDataReceiveOp is called when a request is received from a client.\n\/\/ The returned context should be used in other calls to the obsreport functions\n\/\/ dealing with the same receive operation.\nfunc StartTraceDataReceiveOp(\n\toperationCtx context.Context,\n\treceiver string,\n\ttransport string,\n\topt ...StartReceiveOption,\n) context.Context {\n\treturn traceReceiveOp(\n\t\toperationCtx,\n\t\treceiver,\n\t\ttransport,\n\t\treceiveTraceDataOperationSuffix,\n\t\topt...)\n}\n\n\/\/ EndTraceDataReceiveOp completes the receive operation that was started with\n\/\/ StartTraceDataReceiveOp.\nfunc EndTraceDataReceiveOp(\n\treceiverCtx context.Context,\n\tformat string,\n\tnumReceivedSpans int,\n\terr error,\n) {\n\tif useLegacy {\n\t\tnumReceivedLegacy := numReceivedSpans\n\t\tnumDroppedSpans := 0\n\t\tif err != nil {\n\t\t\tnumDroppedSpans = numReceivedSpans\n\t\t\tnumReceivedLegacy = 0\n\t\t}\n\t\tstats.Record(receiverCtx, mReceiverReceivedSpans.M(int64(numReceivedLegacy)), mReceiverDroppedSpans.M(int64(numDroppedSpans)))\n\t}\n\n\tendReceiveOp(\n\t\treceiverCtx,\n\t\tformat,\n\t\tnumReceivedSpans,\n\t\terr,\n\t\tconfigmodels.TracesDataType,\n\t)\n}\n\n\/\/ StartMetricsReceiveOp is called when a \\request is received from a client.\n\/\/ The returned context should be used in other calls to the obsreport functions\n\/\/ dealing with the same receive operation.\nfunc StartMetricsReceiveOp(\n\toperationCtx context.Context,\n\treceiver string,\n\ttransport string,\n\topt ...StartReceiveOption,\n) context.Context {\n\treturn traceReceiveOp(\n\t\toperationCtx,\n\t\treceiver,\n\t\ttransport,\n\t\treceiverMetricsOperationSuffix,\n\t\topt...)\n}\n\n\/\/ EndMetricsReceiveOp completes the receive operation that was started with\n\/\/ StartMetricsReceiveOp.\nfunc EndMetricsReceiveOp(\n\treceiverCtx context.Context,\n\tformat string,\n\tnumReceivedPoints int,\n\tnumReceivedTimeSeries int, \/\/ For legacy measurements.\n\terr error,\n) {\n\tif useLegacy {\n\t\tnumDroppedTimeSeries := 0\n\t\tif err != nil {\n\t\t\tnumDroppedTimeSeries = numReceivedTimeSeries\n\t\t\tnumReceivedTimeSeries = 0\n\t\t}\n\t\tstats.Record(receiverCtx, mReceiverReceivedTimeSeries.M(int64(numReceivedTimeSeries)), mReceiverDroppedTimeSeries.M(int64(numDroppedTimeSeries)))\n\t}\n\n\tendReceiveOp(\n\t\treceiverCtx,\n\t\tformat,\n\t\tnumReceivedPoints,\n\t\terr,\n\t\tconfigmodels.MetricsDataType,\n\t)\n}\n\n\/\/ ReceiverContext adds the keys used when recording observability metrics to\n\/\/ the given context returning the newly created context. This context should\n\/\/ be used in related calls to the obsreport functions so metrics are properly\n\/\/ recorded.\nfunc ReceiverContext(\n\tctx context.Context,\n\treceiver string,\n\ttransport string,\n\tlegacyName string,\n) context.Context {\n\tif useLegacy {\n\t\tname := receiver\n\t\tif legacyName != \"\" {\n\t\t\tname = legacyName\n\t\t}\n\t\tctx, _ = tag.New(ctx, tag.Upsert(LegacyTagKeyReceiver, name, tag.WithTTL(tag.TTLNoPropagation)))\n\t}\n\n\tctx, _ = tag.New(ctx,\n\t\ttag.Upsert(tagKeyReceiver, receiver, tag.WithTTL(tag.TTLNoPropagation)),\n\t\ttag.Upsert(tagKeyTransport, transport, tag.WithTTL(tag.TTLNoPropagation)))\n\n\treturn ctx\n}\n\n\/\/ traceReceiveOp creates the span used to trace the operation. Returning\n\/\/ the updated context with the created span.\nfunc traceReceiveOp(\n\treceiverCtx context.Context,\n\treceiverName string,\n\ttransport string,\n\toperationSuffix string,\n\topt ...StartReceiveOption,\n) context.Context {\n\tvar opts StartReceiveOptions\n\tfor _, o := range opt {\n\t\to(&opts)\n\t}\n\n\tvar ctx context.Context\n\tvar span *trace.Span\n\tspanName := receiverPrefix + receiverName + operationSuffix\n\tif !opts.LongLivedCtx {\n\t\tctx, span = trace.StartSpan(receiverCtx, spanName)\n\t} else {\n\t\t\/\/ Since the receiverCtx is long lived do not use it to start the span.\n\t\t\/\/ This way this trace ends when the EndTraceDataReceiveOp is called.\n\t\t\/\/ Here is safe to ignore the returned context since it is not used below.\n\t\t_, span = trace.StartSpan(context.Background(), spanName)\n\n\t\t\/\/ If the long lived context has a parent span, then add it as a parent link.\n\t\tsetParentLink(receiverCtx, span)\n\n\t\tctx = trace.NewContext(receiverCtx, span)\n\t}\n\n\tif transport != \"\" {\n\t\tspan.AddAttributes(trace.StringAttribute(TransportKey, transport))\n\t}\n\treturn ctx\n}\n\n\/\/ endReceiveOp records the observability signals at the end of an operation.\nfunc endReceiveOp(\n\treceiverCtx context.Context,\n\tformat string,\n\tnumReceivedItems int,\n\terr error,\n\tdataType configmodels.DataType,\n) {\n\tnumAccepted := numReceivedItems\n\tnumRefused := 0\n\tif err != nil {\n\t\tnumAccepted = 0\n\t\tnumRefused = numReceivedItems\n\t}\n\n\tspan := trace.FromContext(receiverCtx)\n\n\tif useNew {\n\t\tvar acceptedMeasure, refusedMeasure *stats.Int64Measure\n\t\tswitch dataType {\n\t\tcase configmodels.TracesDataType:\n\t\t\tacceptedMeasure = mReceiverAcceptedSpans\n\t\t\trefusedMeasure = mReceiverRefusedSpans\n\t\tcase configmodels.MetricsDataType:\n\t\t\tacceptedMeasure = mReceiverAcceptedMetricPoints\n\t\t\trefusedMeasure = mReceiverRefusedMetricPoints\n\t\t}\n\n\t\tstats.Record(\n\t\t\treceiverCtx,\n\t\t\tacceptedMeasure.M(int64(numAccepted)),\n\t\t\trefusedMeasure.M(int64(numRefused)))\n\t}\n\n\t\/\/ end span according to errors\n\tif span.IsRecordingEvents() {\n\t\tvar acceptedItemsKey, refusedItemsKey string\n\t\tswitch dataType {\n\t\tcase configmodels.TracesDataType:\n\t\t\tacceptedItemsKey = AcceptedSpansKey\n\t\t\trefusedItemsKey = RefusedSpansKey\n\t\tcase configmodels.MetricsDataType:\n\t\t\tacceptedItemsKey = AcceptedMetricPointsKey\n\t\t\trefusedItemsKey = RefusedMetricPointsKey\n\t\t}\n\n\t\tspan.AddAttributes(\n\t\t\ttrace.StringAttribute(\n\t\t\t\tFormatKey, format),\n\t\t\ttrace.Int64Attribute(\n\t\t\t\tacceptedItemsKey, int64(numAccepted)),\n\t\t\ttrace.Int64Attribute(\n\t\t\t\trefusedItemsKey, int64(numRefused)),\n\t\t)\n\t\tspan.SetStatus(errToStatus(err))\n\t}\n\tspan.End()\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/peterstace\/grayt\/colour\"\n)\n\ntype render struct {\n\tscene string\n\tpxWide int\n\tpxHigh int\n\tcreated time.Time\n\n\tcnd *sync.Cond\n\tdesiredWorkers int\n\tactualWorkers int\n\n\tacc *accumulator\n\n\tbackoffMu sync.Mutex\n\tbackoff time.Duration\n}\n\nfunc (r *render) orchestrateWork() {\n\t\/\/ TODO: Could just get lock above loop and then never release?\n\t\/\/ TODO: Add some sort of backoff.\n\tfor {\n\t\tr.cnd.L.Lock()\n\t\tfor r.actualWorkers >= r.desiredWorkers {\n\t\t\tr.cnd.Wait()\n\t\t}\n\t\tr.sleepForBackoff()\n\t\tr.actualWorkers++\n\t\tgo r.work()\n\t\tr.cnd.L.Unlock()\n\t}\n}\n\nfunc (r *render) sleepForBackoff() {\n\tr.backoffMu.Lock()\n\tb := r.backoff\n\tr.backoffMu.Unlock()\n\ttime.Sleep(b)\n}\n\nfunc (r *render) work() {\n\tdefer func() {\n\t\tr.cnd.L.Lock()\n\t\tr.actualWorkers--\n\t\tr.cnd.L.Unlock()\n\t\tr.cnd.Signal()\n\t}()\n\n\t\/\/ TODO: allow URL base to be configurable\n\turl := fmt.Sprintf(\n\t\t\"http:\/\/worker:80\/trace?scene_name=%s&px_wide=%d&px_high=%d\",\n\t\tr.scene, r.pxWide, r.pxHigh,\n\t)\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tlog.Printf(\"error requesting work: %v\", err)\n\t\treturn\n\t}\n\n\tif resp.StatusCode == http.StatusTooManyRequests {\n\t\tr.backoffMu.Lock()\n\t\tr.backoff = 2 * (time.Millisecond + r.backoff)\n\t\tbackoff := r.backoff\n\t\tr.backoffMu.Unlock()\n\t\tlog.Printf(\"too many requests, backoff: %s\", backoff)\n\t\treturn\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tbody = []byte(\"couldn't ready body\")\n\t\t}\n\t\tlog.Printf(\"worker result with non-200 status: %s\", string(body))\n\t\treturn\n\t}\n\n\tr.backoffMu.Lock()\n\tr.backoff = 0\n\tr.backoffMu.Unlock()\n\n\t\/\/ TODO: should be able to reuse the pixel grid to save on allocations\n\tpixels := r.pxWide * r.pxHigh\n\tunitOfWork := pixelGrid{\n\t\tr.pxWide, r.pxHigh,\n\t\tmake([]colour.Colour, pixels),\n\t}\n\tif err := binary.Read(resp.Body, binary.BigEndian, &unitOfWork.pixels); err != nil {\n\t\tlog.Printf(\"could not read from worker response body: %v\", err)\n\t\treturn\n\t}\n\n\tr.acc.merge(&unitOfWork)\n}\n<commit_msg>Remove backoff<commit_after>package api\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/peterstace\/grayt\/colour\"\n)\n\ntype render struct {\n\tscene string\n\tpxWide int\n\tpxHigh int\n\tcreated time.Time\n\n\tcnd *sync.Cond\n\tdesiredWorkers int\n\tactualWorkers int\n\n\tacc *accumulator\n}\n\nfunc (r *render) orchestrateWork() {\n\tfor {\n\t\tr.cnd.L.Lock()\n\t\tfor r.actualWorkers >= r.desiredWorkers {\n\t\t\tr.cnd.Wait()\n\t\t}\n\t\tr.actualWorkers++\n\t\tgo r.work()\n\t\tr.cnd.L.Unlock()\n\t}\n}\n\nfunc (r *render) work() {\n\tdefer func() {\n\t\tr.cnd.L.Lock()\n\t\tr.actualWorkers--\n\t\tr.cnd.L.Unlock()\n\t\tr.cnd.Signal()\n\t}()\n\n\t\/\/ TODO: allow URL base to be configurable\n\turl := fmt.Sprintf(\n\t\t\"http:\/\/worker:80\/trace?scene_name=%s&px_wide=%d&px_high=%d\",\n\t\tr.scene, r.pxWide, r.pxHigh,\n\t)\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tlog.Printf(\"error requesting work: %v\", err)\n\t\treturn\n\t}\n\n\tif resp.StatusCode == http.StatusTooManyRequests {\n\t\treturn\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tbody = []byte(\"couldn't ready body\")\n\t\t}\n\t\tlog.Printf(\"worker result with non-200 status: %s\", string(body))\n\t\treturn\n\t}\n\n\t\/\/ TODO: should be able to reuse the pixel grid to save on allocations\n\tpixels := r.pxWide * r.pxHigh\n\tunitOfWork := pixelGrid{\n\t\tr.pxWide, r.pxHigh,\n\t\tmake([]colour.Colour, pixels),\n\t}\n\tif err := binary.Read(resp.Body, binary.BigEndian, &unitOfWork.pixels); err != nil {\n\t\tlog.Printf(\"could not read from worker response body: %v\", err)\n\t\treturn\n\t}\n\n\tr.acc.merge(&unitOfWork)\n}\n<|endoftext|>"} {"text":"<commit_before>package wrpendpoint\n\nimport (\n\t\"context\"\n\t\"errors\"\n\n\t\"github.com\/Comcast\/webpa-common\/logging\"\n\t\"github.com\/Comcast\/webpa-common\/tracing\"\n\t\"github.com\/go-kit\/kit\/log\/level\"\n)\n\n\/\/ serviceFanout takes a single WRP request and dispatches it concurrently to zero\n\/\/ or more go-kit endpoints.\ntype serviceFanout struct {\n\tservices map[string]Service\n\tspanner tracing.Spanner\n}\n\n\/\/ fanoutResponse is the internal tuple used to communicate the results of an asynchronously\n\/\/ invoked service\ntype fanoutResponse struct {\n\tname string\n\tspans []tracing.Span\n\tresponse Response\n\terr error\n}\n\n\/\/ NewServiceFanout produces a WRP service which invokes each of a set of services concurrently for each WRP request.\n\/\/ The first service which returns a valid response becomes the response of the fanout service. If the context is\n\/\/ cancelled, then ctx.Err() is returned. Finally, if all services fail then a tracing.SpanError is returned with\n\/\/ the last error set as the causal error.\nfunc NewServiceFanout(services map[string]Service) Service {\n\tif len(services) == 0 {\n\t\treturn ServiceFunc(func(context.Context, Request) (Response, error) {\n\t\t\treturn nil, errors.New(\"No configured services\")\n\t\t})\n\t}\n\n\tcopyOf := make(map[string]Service, len(services))\n\tfor k, v := range services {\n\t\tcopyOf[k] = v\n\t}\n\n\treturn &serviceFanout{\n\t\tservices: copyOf,\n\t\tspanner: tracing.NewSpanner(),\n\t}\n}\n\nfunc (sf *serviceFanout) ServeWRP(ctx context.Context, request Request) (Response, error) {\n\tresults := make(chan fanoutResponse, len(sf.services))\n\tfor name, s := range sf.services {\n\t\tgo func(name string, s Service) {\n\t\t\tvar (\n\t\t\t\tfinisher = sf.spanner.Start(name)\n\t\t\t\tresponse, err = s.ServeWRP(ctx, request)\n\t\t\t\tspan = finisher(err)\n\t\t\t)\n\n\t\t\tif err != nil {\n\t\t\t\tresults <- fanoutResponse{\n\t\t\t\t\tname: name,\n\t\t\t\t\tspans: []tracing.Span{span},\n\t\t\t\t\terr: err,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresults <- fanoutResponse{\n\t\t\t\t\tname: name,\n\t\t\t\t\tspans: []tracing.Span{span},\n\t\t\t\t\tresponse: response.AddSpans(span),\n\t\t\t\t}\n\t\t\t}\n\t\t}(name, s)\n\t}\n\n\tvar (\n\t\tlastError error\n\t\tspans []tracing.Span\n\t)\n\n\tfor r := 0; r < len(sf.services); r++ {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\trequest.Logger().Log(level.Key(), level.WarnValue(), logging.MessageKey(), \"timed out\")\n\t\t\treturn nil, tracing.NewSpanError(ctx.Err(), spans...)\n\t\tcase fr := <-results:\n\t\t\tif fr.err != nil {\n\t\t\t\tlastError = fr.err\n\t\t\t\tspans = append(spans, fr.spans...)\n\t\t\t\trequest.Logger().Log(level.Key(), level.DebugValue(), \"service\", fr.name, logging.ErrorKey(), fr.err, logging.MessageKey(), \"failed\")\n\t\t\t} else {\n\t\t\t\trequest.Logger().Log(level.Key(), level.DebugValue(), \"service\", fr.name, logging.MessageKey(), \"success\")\n\t\t\t\treturn fr.response.AddSpans(spans...), nil\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ use the last error as the causal error\n\trequest.Logger().Log(level.Key(), level.ErrorValue(), logging.ErrorKey(), lastError, logging.MessageKey(), \"all services failed\")\n\treturn nil, tracing.NewSpanError(lastError, spans...)\n}\n<commit_msg>simplified the span management in the boss goroutine<commit_after>package wrpendpoint\n\nimport (\n\t\"context\"\n\t\"errors\"\n\n\t\"github.com\/Comcast\/webpa-common\/logging\"\n\t\"github.com\/Comcast\/webpa-common\/tracing\"\n\t\"github.com\/go-kit\/kit\/log\/level\"\n)\n\n\/\/ serviceFanout takes a single WRP request and dispatches it concurrently to zero\n\/\/ or more go-kit endpoints.\ntype serviceFanout struct {\n\tservices map[string]Service\n\tspanner tracing.Spanner\n}\n\n\/\/ fanoutResponse is the internal tuple used to communicate the results of an asynchronously\n\/\/ invoked service\ntype fanoutResponse struct {\n\tname string\n\tspan tracing.Span\n\tresponse Response\n\terr error\n}\n\n\/\/ NewServiceFanout produces a WRP service which invokes each of a set of services concurrently for each WRP request.\n\/\/ The first service which returns a valid response becomes the response of the fanout service. If the context is\n\/\/ cancelled, then ctx.Err() is returned. Finally, if all services fail then a tracing.SpanError is returned with\n\/\/ the last error set as the causal error.\nfunc NewServiceFanout(services map[string]Service) Service {\n\tif len(services) == 0 {\n\t\treturn ServiceFunc(func(context.Context, Request) (Response, error) {\n\t\t\treturn nil, errors.New(\"No configured services\")\n\t\t})\n\t}\n\n\tcopyOf := make(map[string]Service, len(services))\n\tfor k, v := range services {\n\t\tcopyOf[k] = v\n\t}\n\n\treturn &serviceFanout{\n\t\tservices: copyOf,\n\t\tspanner: tracing.NewSpanner(),\n\t}\n}\n\nfunc (sf *serviceFanout) ServeWRP(ctx context.Context, request Request) (Response, error) {\n\tresults := make(chan fanoutResponse, len(sf.services))\n\tfor name, s := range sf.services {\n\t\tgo func(name string, s Service) {\n\t\t\tvar (\n\t\t\t\tfinisher = sf.spanner.Start(name)\n\t\t\t\tresponse, err = s.ServeWRP(ctx, request)\n\t\t\t)\n\n\t\t\tresults <- fanoutResponse{\n\t\t\t\tname: name,\n\t\t\t\tspan: finisher(err),\n\t\t\t\tresponse: response,\n\t\t\t\terr: err,\n\t\t\t}\n\t\t}(name, s)\n\t}\n\n\tvar (\n\t\tlastError error\n\t\tspans []tracing.Span\n\t)\n\n\tfor r := 0; r < len(sf.services); r++ {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\trequest.Logger().Log(level.Key(), level.WarnValue(), logging.MessageKey(), \"timed out\")\n\t\t\treturn nil, tracing.NewSpanError(ctx.Err(), spans...)\n\t\tcase fr := <-results:\n\t\t\tspans = append(spans, fr.span)\n\t\t\tif fr.err != nil {\n\t\t\t\tlastError = fr.err\n\t\t\t\trequest.Logger().Log(level.Key(), level.DebugValue(), \"service\", fr.name, logging.ErrorKey(), fr.err, logging.MessageKey(), \"failed\")\n\t\t\t} else {\n\t\t\t\trequest.Logger().Log(level.Key(), level.DebugValue(), \"service\", fr.name, logging.MessageKey(), \"success\")\n\t\t\t\treturn fr.response.AddSpans(spans...), nil\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ use the last error as the causal error\n\trequest.Logger().Log(level.Key(), level.ErrorValue(), logging.ErrorKey(), lastError, logging.MessageKey(), \"all services failed\")\n\treturn nil, tracing.NewSpanError(lastError, spans...)\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"errors\"\n\t\"strings\"\n\n\t\"github.com\/kirinlabs\/HttpRequest\"\n\t\"github.com\/mjarkk\/wotnlclans\/other\"\n)\n\nvar routePrefix = \"https:\/\/api.worldoftanks.eu\"\n\n\/\/ Routes are all the possible routes to make\nvar Routes = map[string]string{\n\t\"nicknameAndClan\": \"\/wot\/account\/info\/?application_id={{key}}&account_id={{playerID}}&fields=nickname%2Cclan_id\",\n\t\"clanIcon\": \"\/wgn\/clans\/info\/?application_id={{key}}&clan_id={{clan}}&fields=tag%2Ccolor%2Cemblems.x195\",\n\t\"playerInfoLogedIn\": \"\/wot\/account\/info\/?application_id={{key}}&account_id={{playerID}}&access_token={{playerAccessToken}}\",\n\t\"playerInfo\": \"\/wot\/account\/info\/?application_id={{key}}&account_id={{playerID}}\",\n\t\"loginLink\": \"\/wot\/auth\/login\/?display=popup&nofollow=1&application_id={{key}}&redirect_uri={{redirectURL}}\",\n\t\"topClans\": \"\/wgn\/clans\/list\/?application_id={{key}}&limit=100&game=wot&fields=clan_id&page_no={{pageNum}}\",\n\t\"clanDiscription\": \"\/wgn\/clans\/info\/?application_id={{key}}&clan_id={{clanID}}&fields=description%2Ctag\",\n\t\"clanTag\": \"\/wgn\/clans\/info\/?application_id={{key}}&clan_id={{clanID}}&fields=tag\",\n\t\"clanData\": \"\/wgn\/clans\/info\/?application_id={{key}}&clan_id={{clanID}}&game=wot\",\n\t\"clanRating\": \"\/wot\/clanratings\/clans\/?application_id={{key}}&clan_id={{clanID}}\",\n}\n\n\/\/ GetAPIRoute returns a route with the inputs parsed\nfunc GetAPIRoute(what string, inputs map[string]string) (string, error) {\n\tif _, hasKey := inputs[\"key\"]; !hasKey {\n\t\tinputs[\"key\"] = other.Flags.WGKey\n\t}\n\tselectedRoute, check := Routes[what]\n\tif !check {\n\t\treturn \"\", errors.New(\"Api route not found\")\n\t}\n\tfor id, value := range inputs {\n\t\tselectedRoute = strings.Replace(selectedRoute, \"{{\"+id+\"}}\", value, -1)\n\t}\n\tif strings.Contains(selectedRoute, \"{{\") || strings.Contains(selectedRoute, \"}}\") {\n\t\treturn \"\", errors.New(\"Not all inputs are replaced\")\n\t}\n\treturn routePrefix + selectedRoute, nil\n}\n\n\/\/ Get returns the response data (string) of a url\nfunc Get(uri string) (string, error) {\n\tout, err := RawGet(uri)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(out), nil\n}\n\n\/\/ RawGet retruns the literal response data (byte data) of a response\nfunc RawGet(uri string) ([]byte, error) {\n\treq := HttpRequest.NewRequest()\n\tres, err := req.Get(uri, nil)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\tout, err := res.Body()\n\treturn out, err\n}\n\n\/\/ Post returns the data of a post request to a url\nfunc Post(uri string, postData map[string]interface{}) (string, error) {\n\treq := HttpRequest.NewRequest()\n\tres, err := req.Post(uri, postData)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tout, err := res.Body()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(out), nil\n}\n\n\/\/ CallRoute makes a network request\nfunc CallRoute(what string, inputs map[string]string) (string, error) {\n\turl, err := GetAPIRoute(what, inputs)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn Get(url)\n}\n<commit_msg>Fixed error in cd\/ci<commit_after>package api\n\nimport (\n\t\"errors\"\n\t\"strings\"\n\n\t\"github.com\/mjarkk\/HttpRequest\"\n\t\"github.com\/mjarkk\/wotnlclans\/other\"\n)\n\nvar routePrefix = \"https:\/\/api.worldoftanks.eu\"\n\n\/\/ Routes are all the possible routes to make\nvar Routes = map[string]string{\n\t\"nicknameAndClan\": \"\/wot\/account\/info\/?application_id={{key}}&account_id={{playerID}}&fields=nickname%2Cclan_id\",\n\t\"clanIcon\": \"\/wgn\/clans\/info\/?application_id={{key}}&clan_id={{clan}}&fields=tag%2Ccolor%2Cemblems.x195\",\n\t\"playerInfoLogedIn\": \"\/wot\/account\/info\/?application_id={{key}}&account_id={{playerID}}&access_token={{playerAccessToken}}\",\n\t\"playerInfo\": \"\/wot\/account\/info\/?application_id={{key}}&account_id={{playerID}}\",\n\t\"loginLink\": \"\/wot\/auth\/login\/?display=popup&nofollow=1&application_id={{key}}&redirect_uri={{redirectURL}}\",\n\t\"topClans\": \"\/wgn\/clans\/list\/?application_id={{key}}&limit=100&game=wot&fields=clan_id&page_no={{pageNum}}\",\n\t\"clanDiscription\": \"\/wgn\/clans\/info\/?application_id={{key}}&clan_id={{clanID}}&fields=description%2Ctag\",\n\t\"clanTag\": \"\/wgn\/clans\/info\/?application_id={{key}}&clan_id={{clanID}}&fields=tag\",\n\t\"clanData\": \"\/wgn\/clans\/info\/?application_id={{key}}&clan_id={{clanID}}&game=wot\",\n\t\"clanRating\": \"\/wot\/clanratings\/clans\/?application_id={{key}}&clan_id={{clanID}}\",\n}\n\n\/\/ GetAPIRoute returns a route with the inputs parsed\nfunc GetAPIRoute(what string, inputs map[string]string) (string, error) {\n\tif _, hasKey := inputs[\"key\"]; !hasKey {\n\t\tinputs[\"key\"] = other.Flags.WGKey\n\t}\n\tselectedRoute, check := Routes[what]\n\tif !check {\n\t\treturn \"\", errors.New(\"Api route not found\")\n\t}\n\tfor id, value := range inputs {\n\t\tselectedRoute = strings.Replace(selectedRoute, \"{{\"+id+\"}}\", value, -1)\n\t}\n\tif strings.Contains(selectedRoute, \"{{\") || strings.Contains(selectedRoute, \"}}\") {\n\t\treturn \"\", errors.New(\"Not all inputs are replaced\")\n\t}\n\treturn routePrefix + selectedRoute, nil\n}\n\n\/\/ Get returns the response data (string) of a url\nfunc Get(uri string) (string, error) {\n\tout, err := RawGet(uri)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(out), nil\n}\n\n\/\/ RawGet retruns the literal response data (byte data) of a response\nfunc RawGet(uri string) ([]byte, error) {\n\treq := HttpRequest.NewRequest()\n\tres, err := req.Get(uri, nil)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\tout, err := res.Body()\n\treturn out, err\n}\n\n\/\/ Post returns the data of a post request to a url\nfunc Post(uri string, postData map[string]interface{}) (string, error) {\n\treq := HttpRequest.NewRequest()\n\tres, err := req.Post(uri, postData)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tout, err := res.Body()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(out), nil\n}\n\n\/\/ CallRoute makes a network request\nfunc CallRoute(what string, inputs map[string]string) (string, error) {\n\turl, err := GetAPIRoute(what, inputs)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn Get(url)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/go:generate stringer -type=CommandStatusType,CommandIDType -output header_data_string.go\n\npackage data\n\n\/\/ CommandStatusType is type of command status\ntype CommandStatusType int32\n\n\/\/ CommandIDType is type of command id.\ntype CommandIDType int32\n\n\/\/nolint\nconst (\n\t\/\/ SMPP Command ID Set\n\tGENERIC_NACK = CommandIDType(-2147483648)\n\tBIND_RECEIVER = CommandIDType(0x00000001)\n\tBIND_RECEIVER_RESP = CommandIDType(-2147483647)\n\tBIND_TRANSMITTER = CommandIDType(0x00000002)\n\tBIND_TRANSMITTER_RESP = CommandIDType(-2147483646)\n\tQUERY_SM = CommandIDType(0x00000003)\n\tQUERY_SM_RESP = CommandIDType(-2147483645)\n\tSUBMIT_SM = CommandIDType(0x00000004)\n\tSUBMIT_SM_RESP = CommandIDType(-2147483644)\n\tDELIVER_SM = CommandIDType(0x00000005)\n\tDELIVER_SM_RESP = CommandIDType(-2147483643)\n\tUNBIND = CommandIDType(0x00000006)\n\tUNBIND_RESP = CommandIDType(-2147483642)\n\tREPLACE_SM = CommandIDType(0x00000007)\n\tREPLACE_SM_RESP = CommandIDType(-2147483641)\n\tCANCEL_SM = CommandIDType(0x00000008)\n\tCANCEL_SM_RESP = CommandIDType(-2147483640)\n\tBIND_TRANSCEIVER = CommandIDType(0x00000009)\n\tBIND_TRANSCEIVER_RESP = CommandIDType(-2147483639)\n\tOUTBIND = CommandIDType(0x0000000B)\n\tENQUIRE_LINK = CommandIDType(0x00000015)\n\tENQUIRE_LINK_RESP = CommandIDType(-2147483627)\n\tSUBMIT_MULTI = CommandIDType(0x00000021)\n\tSUBMIT_MULTI_RESP = CommandIDType(-2147483615)\n\tALERT_NOTIFICATION = CommandIDType(0x00000102)\n\tDATA_SM = CommandIDType(0x00000103)\n\tDATA_SM_RESP = CommandIDType(-2147483389)\n)\n\nconst (\n\n\t\/\/ Command_Status Error Codes\n\tESME_ROK = CommandStatusType(0x00000000)\n\tESME_RINVMSGLEN = CommandStatusType(0x00000001)\n\tESME_RINVCMDLEN = CommandStatusType(0x00000002)\n\tESME_RINVCMDID = CommandStatusType(0x00000003)\n\tESME_RINVBNDSTS = CommandStatusType(0x00000004)\n\tESME_RALYBND = CommandStatusType(0x00000005)\n\tESME_RINVPRTFLG = CommandStatusType(0x00000006)\n\tESME_RINVREGDLVFLG = CommandStatusType(0x00000007)\n\tESME_RSYSERR = CommandStatusType(0x00000008)\n\tESME_RINVSRCADR = CommandStatusType(0x0000000A)\n\tESME_RINVDSTADR = CommandStatusType(0x0000000B)\n\tESME_RINVMSGID = CommandStatusType(0x0000000C)\n\tESME_RBINDFAIL = CommandStatusType(0x0000000D)\n\tESME_RINVPASWD = CommandStatusType(0x0000000E)\n\tESME_RINVSYSID = CommandStatusType(0x0000000F)\n\tESME_RCANCELFAIL = CommandStatusType(0x00000011)\n\tESME_RREPLACEFAIL = CommandStatusType(0x00000013)\n\tESME_RMSGQFUL = CommandStatusType(0x00000014)\n\tESME_RINVSERTYP = CommandStatusType(0x00000015)\n\n\tESME_RADDCUSTFAIL = CommandStatusType(0x00000019) \/\/ Failed to Add Customer\n\tESME_RDELCUSTFAIL = CommandStatusType(0x0000001A) \/\/ Failed to delete Customer\n\tESME_RMODCUSTFAIL = CommandStatusType(0x0000001B) \/\/ Failed to modify customer\n\tESME_RENQCUSTFAIL = CommandStatusType(0x0000001C) \/\/ Failed to Enquire Customer\n\tESME_RINVCUSTID = CommandStatusType(0x0000001D) \/\/ Invalid Customer ID\n\tESME_RINVCUSTNAME = CommandStatusType(0x0000001F) \/\/ Invalid Customer Name\n\tESME_RINVCUSTADR = CommandStatusType(0x00000021) \/\/ Invalid Customer Address\n\tESME_RINVADR = CommandStatusType(0x00000022) \/\/ Invalid Address\n\tESME_RCUSTEXIST = CommandStatusType(0x00000023) \/\/ Customer Exists\n\tESME_RCUSTNOTEXIST = CommandStatusType(0x00000024) \/\/ Customer does not exist\n\tESME_RADDDLFAIL = CommandStatusType(0x00000026) \/\/ Failed to Add DL\n\tESME_RMODDLFAIL = CommandStatusType(0x00000027) \/\/ Failed to modify DL\n\tESME_RDELDLFAIL = CommandStatusType(0x00000028) \/\/ Failed to Delete DL\n\tESME_RVIEWDLFAIL = CommandStatusType(0x00000029) \/\/ Failed to View DL\n\tESME_RLISTDLSFAIL = CommandStatusType(0x00000030) \/\/ Failed to list DLs\n\tESME_RPARAMRETFAIL = CommandStatusType(0x00000031) \/\/ Param Retrieve Failed\n\tESME_RINVPARAM = CommandStatusType(0x00000032) \/\/ Invalid Param\n\n\tESME_RINVNUMDESTS = CommandStatusType(0x00000033)\n\tESME_RINVDLNAME = CommandStatusType(0x00000034)\n\n\tESME_RINVDLMEMBDESC = CommandStatusType(0x00000035) \/\/ Invalid DL Member Description\n\tESME_RINVDLMEMBTYP = CommandStatusType(0x00000038) \/\/ Invalid DL Member Type\n\tESME_RINVDLMODOPT = CommandStatusType(0x00000039) \/\/ Invalid DL Modify Option\n\n\tESME_RINVDESTFLAG = CommandStatusType(0x00000040)\n\tESME_RINVSUBREP = CommandStatusType(0x00000042)\n\tESME_RINVESMCLASS = CommandStatusType(0x00000043)\n\tESME_RCNTSUBDL = CommandStatusType(0x00000044)\n\tESME_RSUBMITFAIL = CommandStatusType(0x00000045)\n\tESME_RINVSRCTON = CommandStatusType(0x00000048)\n\tESME_RINVSRCNPI = CommandStatusType(0x00000049)\n\tESME_RINVDSTTON = CommandStatusType(0x00000050)\n\tESME_RINVDSTNPI = CommandStatusType(0x00000051)\n\tESME_RINVSYSTYP = CommandStatusType(0x00000053)\n\tESME_RINVREPFLAG = CommandStatusType(0x00000054)\n\tESME_RINVNUMMSGS = CommandStatusType(0x00000055)\n\tESME_RTHROTTLED = CommandStatusType(0x00000058)\n\n\tESME_RPROVNOTALLWD = CommandStatusType(0x00000059) \/\/ Provisioning Not Allowed\n\n\tESME_RINVSCHED = CommandStatusType(0x00000061)\n\tESME_RINVEXPIRY = CommandStatusType(0x00000062)\n\tESME_RINVDFTMSGID = CommandStatusType(0x00000063)\n\tESME_RX_T_APPN = CommandStatusType(0x00000064)\n\tESME_RX_P_APPN = CommandStatusType(0x00000065)\n\tESME_RX_R_APPN = CommandStatusType(0x00000066)\n\tESME_RQUERYFAIL = CommandStatusType(0x00000067)\n\n\tESME_RINVPGCUSTID = CommandStatusType(0x00000080) \/\/ Paging Customer ID Invalid No such subscriber\n\tESME_RINVPGCUSTIDLEN = CommandStatusType(0x00000081) \/\/ Paging Customer ID length Invalid\n\tESME_RINVCITYLEN = CommandStatusType(0x00000082) \/\/ City Length Invalid\n\tESME_RINVSTATELEN = CommandStatusType(0x00000083) \/\/ State Length Invalid\n\tESME_RINVZIPPREFIXLEN = CommandStatusType(0x00000084) \/\/ Zip Prefix Length Invalid\n\tESME_RINVZIPPOSTFIXLEN = CommandStatusType(0x00000085) \/\/ Zip Postfix Length Invalid\n\tESME_RINVMINLEN = CommandStatusType(0x00000086) \/\/ MIN Length Invalid\n\tESME_RINVMIN = CommandStatusType(0x00000087) \/\/ MIN Invalid (i.e. No such MIN)\n\tESME_RINVPINLEN = CommandStatusType(0x00000088) \/\/ PIN Length Invalid\n\tESME_RINVTERMCODELEN = CommandStatusType(0x00000089) \/\/ Terminal Code Length Invalid\n\tESME_RINVCHANNELLEN = CommandStatusType(0x0000008A) \/\/ Channel Length Invalid\n\tESME_RINVCOVREGIONLEN = CommandStatusType(0x0000008B) \/\/ Coverage Region Length Invalid\n\tESME_RINVCAPCODELEN = CommandStatusType(0x0000008C) \/\/ Cap Code Length Invalid\n\tESME_RINVMDTLEN = CommandStatusType(0x0000008D) \/\/ Message delivery time Length Invalid\n\tESME_RINVPRIORMSGLEN = CommandStatusType(0x0000008E) \/\/ Priority Message Length Invalid\n\tESME_RINVPERMSGLEN = CommandStatusType(0x0000008F) \/\/ Periodic Messages Length Invalid\n\tESME_RINVPGALERTLEN = CommandStatusType(0x00000090) \/\/ Paging Alerts Length Invalid\n\tESME_RINVSMUSERLEN = CommandStatusType(0x00000091) \/\/ int16 Message User Group Length Invalid\n\tESME_RINVRTDBLEN = CommandStatusType(0x00000092) \/\/ Real Time Data broadcasts Length Invalid\n\tESME_RINVREGDELLEN = CommandStatusType(0x00000093) \/\/ Registered Delivery Length Invalid\n\tESME_RINVMSGDISTLEN = CommandStatusType(0x00000094) \/\/ Message Distribution Length Invalid\n\tESME_RINVPRIORMSG = CommandStatusType(0x00000095) \/\/ Priority Message Length Invalid\n\tESME_RINVMDT = CommandStatusType(0x00000096) \/\/ Message delivery time Invalid\n\tESME_RINVPERMSG = CommandStatusType(0x00000097) \/\/ Periodic Messages Invalid\n\tESME_RINVMSGDIST = CommandStatusType(0x00000098) \/\/ Message Distribution Invalid\n\tESME_RINVPGALERT = CommandStatusType(0x00000099) \/\/ Paging Alerts Invalid\n\tESME_RINVSMUSER = CommandStatusType(0x0000009A) \/\/ int16 Message User Group Invalid\n\tESME_RINVRTDB = CommandStatusType(0x0000009B) \/\/ Real Time Data broadcasts Invalid\n\tESME_RINVREGDEL = CommandStatusType(0x0000009C) \/\/ Registered Delivery Invalid\n\t\/\/ public static final ESME_RINVOPTPARSTREAM = CommandStatusType(0x0000009D) \/\/ KIF IW Field out of data\n\t\/\/ public static final ESME_ROPTPARNOTALLWD = CommandStatusType(0x0000009E) \/\/ Optional Parameter not allowed\n\tESME_RINVOPTPARLEN = CommandStatusType(0x0000009F) \/\/ Invalid Optional Parameter Length\n\n\tESME_RINVOPTPARSTREAM = CommandStatusType(0x000000C0)\n\tESME_ROPTPARNOTALLWD = CommandStatusType(0x000000C1)\n\tESME_RINVPARLEN = CommandStatusType(0x000000C2)\n\tESME_RMISSINGOPTPARAM = CommandStatusType(0x000000C3)\n\tESME_RINVOPTPARAMVAL = CommandStatusType(0x000000C4)\n\tESME_RDELIVERYFAILURE = CommandStatusType(0x000000FE)\n\tESME_RUNKNOWNERR = CommandStatusType(0x000000FF)\n\n\tESME_LAST_ERROR = CommandStatusType(0x0000012C) \/\/ THE VALUE OF THE LAST ERROR CODE\n)\n<commit_msg>Fix linting<commit_after>\/\/go:generate stringer -type=CommandStatusType,CommandIDType -output header_data_string.go\n\npackage data\n\n\/\/ CommandStatusType is type of command status\ntype CommandStatusType int32\n\n\/\/ CommandIDType is type of command id.\ntype CommandIDType int32\n\n\/\/nolint\nconst (\n\t\/\/ SMPP Command ID Set\n\tGENERIC_NACK = CommandIDType(-2147483648)\n\tBIND_RECEIVER = CommandIDType(0x00000001)\n\tBIND_RECEIVER_RESP = CommandIDType(-2147483647)\n\tBIND_TRANSMITTER = CommandIDType(0x00000002)\n\tBIND_TRANSMITTER_RESP = CommandIDType(-2147483646)\n\tQUERY_SM = CommandIDType(0x00000003)\n\tQUERY_SM_RESP = CommandIDType(-2147483645)\n\tSUBMIT_SM = CommandIDType(0x00000004)\n\tSUBMIT_SM_RESP = CommandIDType(-2147483644)\n\tDELIVER_SM = CommandIDType(0x00000005)\n\tDELIVER_SM_RESP = CommandIDType(-2147483643)\n\tUNBIND = CommandIDType(0x00000006)\n\tUNBIND_RESP = CommandIDType(-2147483642)\n\tREPLACE_SM = CommandIDType(0x00000007)\n\tREPLACE_SM_RESP = CommandIDType(-2147483641)\n\tCANCEL_SM = CommandIDType(0x00000008)\n\tCANCEL_SM_RESP = CommandIDType(-2147483640)\n\tBIND_TRANSCEIVER = CommandIDType(0x00000009)\n\tBIND_TRANSCEIVER_RESP = CommandIDType(-2147483639)\n\tOUTBIND = CommandIDType(0x0000000B)\n\tENQUIRE_LINK = CommandIDType(0x00000015)\n\tENQUIRE_LINK_RESP = CommandIDType(-2147483627)\n\tSUBMIT_MULTI = CommandIDType(0x00000021)\n\tSUBMIT_MULTI_RESP = CommandIDType(-2147483615)\n\tALERT_NOTIFICATION = CommandIDType(0x00000102)\n\tDATA_SM = CommandIDType(0x00000103)\n\tDATA_SM_RESP = CommandIDType(-2147483389)\n)\n\n\/\/nolint\nconst (\n\t\/\/ Command_Status Error Codes\n\tESME_ROK = CommandStatusType(0x00000000)\n\tESME_RINVMSGLEN = CommandStatusType(0x00000001)\n\tESME_RINVCMDLEN = CommandStatusType(0x00000002)\n\tESME_RINVCMDID = CommandStatusType(0x00000003)\n\tESME_RINVBNDSTS = CommandStatusType(0x00000004)\n\tESME_RALYBND = CommandStatusType(0x00000005)\n\tESME_RINVPRTFLG = CommandStatusType(0x00000006)\n\tESME_RINVREGDLVFLG = CommandStatusType(0x00000007)\n\tESME_RSYSERR = CommandStatusType(0x00000008)\n\tESME_RINVSRCADR = CommandStatusType(0x0000000A)\n\tESME_RINVDSTADR = CommandStatusType(0x0000000B)\n\tESME_RINVMSGID = CommandStatusType(0x0000000C)\n\tESME_RBINDFAIL = CommandStatusType(0x0000000D)\n\tESME_RINVPASWD = CommandStatusType(0x0000000E)\n\tESME_RINVSYSID = CommandStatusType(0x0000000F)\n\tESME_RCANCELFAIL = CommandStatusType(0x00000011)\n\tESME_RREPLACEFAIL = CommandStatusType(0x00000013)\n\tESME_RMSGQFUL = CommandStatusType(0x00000014)\n\tESME_RINVSERTYP = CommandStatusType(0x00000015)\n\n\tESME_RADDCUSTFAIL = CommandStatusType(0x00000019) \/\/ Failed to Add Customer\n\tESME_RDELCUSTFAIL = CommandStatusType(0x0000001A) \/\/ Failed to delete Customer\n\tESME_RMODCUSTFAIL = CommandStatusType(0x0000001B) \/\/ Failed to modify customer\n\tESME_RENQCUSTFAIL = CommandStatusType(0x0000001C) \/\/ Failed to Enquire Customer\n\tESME_RINVCUSTID = CommandStatusType(0x0000001D) \/\/ Invalid Customer ID\n\tESME_RINVCUSTNAME = CommandStatusType(0x0000001F) \/\/ Invalid Customer Name\n\tESME_RINVCUSTADR = CommandStatusType(0x00000021) \/\/ Invalid Customer Address\n\tESME_RINVADR = CommandStatusType(0x00000022) \/\/ Invalid Address\n\tESME_RCUSTEXIST = CommandStatusType(0x00000023) \/\/ Customer Exists\n\tESME_RCUSTNOTEXIST = CommandStatusType(0x00000024) \/\/ Customer does not exist\n\tESME_RADDDLFAIL = CommandStatusType(0x00000026) \/\/ Failed to Add DL\n\tESME_RMODDLFAIL = CommandStatusType(0x00000027) \/\/ Failed to modify DL\n\tESME_RDELDLFAIL = CommandStatusType(0x00000028) \/\/ Failed to Delete DL\n\tESME_RVIEWDLFAIL = CommandStatusType(0x00000029) \/\/ Failed to View DL\n\tESME_RLISTDLSFAIL = CommandStatusType(0x00000030) \/\/ Failed to list DLs\n\tESME_RPARAMRETFAIL = CommandStatusType(0x00000031) \/\/ Param Retrieve Failed\n\tESME_RINVPARAM = CommandStatusType(0x00000032) \/\/ Invalid Param\n\n\tESME_RINVNUMDESTS = CommandStatusType(0x00000033)\n\tESME_RINVDLNAME = CommandStatusType(0x00000034)\n\n\tESME_RINVDLMEMBDESC = CommandStatusType(0x00000035) \/\/ Invalid DL Member Description\n\tESME_RINVDLMEMBTYP = CommandStatusType(0x00000038) \/\/ Invalid DL Member Type\n\tESME_RINVDLMODOPT = CommandStatusType(0x00000039) \/\/ Invalid DL Modify Option\n\n\tESME_RINVDESTFLAG = CommandStatusType(0x00000040)\n\tESME_RINVSUBREP = CommandStatusType(0x00000042)\n\tESME_RINVESMCLASS = CommandStatusType(0x00000043)\n\tESME_RCNTSUBDL = CommandStatusType(0x00000044)\n\tESME_RSUBMITFAIL = CommandStatusType(0x00000045)\n\tESME_RINVSRCTON = CommandStatusType(0x00000048)\n\tESME_RINVSRCNPI = CommandStatusType(0x00000049)\n\tESME_RINVDSTTON = CommandStatusType(0x00000050)\n\tESME_RINVDSTNPI = CommandStatusType(0x00000051)\n\tESME_RINVSYSTYP = CommandStatusType(0x00000053)\n\tESME_RINVREPFLAG = CommandStatusType(0x00000054)\n\tESME_RINVNUMMSGS = CommandStatusType(0x00000055)\n\tESME_RTHROTTLED = CommandStatusType(0x00000058)\n\n\tESME_RPROVNOTALLWD = CommandStatusType(0x00000059) \/\/ Provisioning Not Allowed\n\n\tESME_RINVSCHED = CommandStatusType(0x00000061)\n\tESME_RINVEXPIRY = CommandStatusType(0x00000062)\n\tESME_RINVDFTMSGID = CommandStatusType(0x00000063)\n\tESME_RX_T_APPN = CommandStatusType(0x00000064)\n\tESME_RX_P_APPN = CommandStatusType(0x00000065)\n\tESME_RX_R_APPN = CommandStatusType(0x00000066)\n\tESME_RQUERYFAIL = CommandStatusType(0x00000067)\n\n\tESME_RINVPGCUSTID = CommandStatusType(0x00000080) \/\/ Paging Customer ID Invalid No such subscriber\n\tESME_RINVPGCUSTIDLEN = CommandStatusType(0x00000081) \/\/ Paging Customer ID length Invalid\n\tESME_RINVCITYLEN = CommandStatusType(0x00000082) \/\/ City Length Invalid\n\tESME_RINVSTATELEN = CommandStatusType(0x00000083) \/\/ State Length Invalid\n\tESME_RINVZIPPREFIXLEN = CommandStatusType(0x00000084) \/\/ Zip Prefix Length Invalid\n\tESME_RINVZIPPOSTFIXLEN = CommandStatusType(0x00000085) \/\/ Zip Postfix Length Invalid\n\tESME_RINVMINLEN = CommandStatusType(0x00000086) \/\/ MIN Length Invalid\n\tESME_RINVMIN = CommandStatusType(0x00000087) \/\/ MIN Invalid (i.e. No such MIN)\n\tESME_RINVPINLEN = CommandStatusType(0x00000088) \/\/ PIN Length Invalid\n\tESME_RINVTERMCODELEN = CommandStatusType(0x00000089) \/\/ Terminal Code Length Invalid\n\tESME_RINVCHANNELLEN = CommandStatusType(0x0000008A) \/\/ Channel Length Invalid\n\tESME_RINVCOVREGIONLEN = CommandStatusType(0x0000008B) \/\/ Coverage Region Length Invalid\n\tESME_RINVCAPCODELEN = CommandStatusType(0x0000008C) \/\/ Cap Code Length Invalid\n\tESME_RINVMDTLEN = CommandStatusType(0x0000008D) \/\/ Message delivery time Length Invalid\n\tESME_RINVPRIORMSGLEN = CommandStatusType(0x0000008E) \/\/ Priority Message Length Invalid\n\tESME_RINVPERMSGLEN = CommandStatusType(0x0000008F) \/\/ Periodic Messages Length Invalid\n\tESME_RINVPGALERTLEN = CommandStatusType(0x00000090) \/\/ Paging Alerts Length Invalid\n\tESME_RINVSMUSERLEN = CommandStatusType(0x00000091) \/\/ int16 Message User Group Length Invalid\n\tESME_RINVRTDBLEN = CommandStatusType(0x00000092) \/\/ Real Time Data broadcasts Length Invalid\n\tESME_RINVREGDELLEN = CommandStatusType(0x00000093) \/\/ Registered Delivery Length Invalid\n\tESME_RINVMSGDISTLEN = CommandStatusType(0x00000094) \/\/ Message Distribution Length Invalid\n\tESME_RINVPRIORMSG = CommandStatusType(0x00000095) \/\/ Priority Message Length Invalid\n\tESME_RINVMDT = CommandStatusType(0x00000096) \/\/ Message delivery time Invalid\n\tESME_RINVPERMSG = CommandStatusType(0x00000097) \/\/ Periodic Messages Invalid\n\tESME_RINVMSGDIST = CommandStatusType(0x00000098) \/\/ Message Distribution Invalid\n\tESME_RINVPGALERT = CommandStatusType(0x00000099) \/\/ Paging Alerts Invalid\n\tESME_RINVSMUSER = CommandStatusType(0x0000009A) \/\/ int16 Message User Group Invalid\n\tESME_RINVRTDB = CommandStatusType(0x0000009B) \/\/ Real Time Data broadcasts Invalid\n\tESME_RINVREGDEL = CommandStatusType(0x0000009C) \/\/ Registered Delivery Invalid\n\t\/\/ public static final ESME_RINVOPTPARSTREAM = CommandStatusType(0x0000009D) \/\/ KIF IW Field out of data\n\t\/\/ public static final ESME_ROPTPARNOTALLWD = CommandStatusType(0x0000009E) \/\/ Optional Parameter not allowed\n\tESME_RINVOPTPARLEN = CommandStatusType(0x0000009F) \/\/ Invalid Optional Parameter Length\n\n\tESME_RINVOPTPARSTREAM = CommandStatusType(0x000000C0)\n\tESME_ROPTPARNOTALLWD = CommandStatusType(0x000000C1)\n\tESME_RINVPARLEN = CommandStatusType(0x000000C2)\n\tESME_RMISSINGOPTPARAM = CommandStatusType(0x000000C3)\n\tESME_RINVOPTPARAMVAL = CommandStatusType(0x000000C4)\n\tESME_RDELIVERYFAILURE = CommandStatusType(0x000000FE)\n\tESME_RUNKNOWNERR = CommandStatusType(0x000000FF)\n\n\tESME_LAST_ERROR = CommandStatusType(0x0000012C) \/\/ THE VALUE OF THE LAST ERROR CODE\n)\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/gorilla\/feeds\"\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/oauth2\/google\"\n\t\"google.golang.org\/api\/calendar\/v3\"\n\t\"html\"\n\t\"io\/ioutil\"\n\t\"time\"\n)\n\nconst (\n\tCalendarId = \"qsqrk2emvnnvu45debac9dugr8@group.calendar.google.com\"\n\tLinkUrl = \"https:\/\/calendar.google.com\/calendar\/embed?src=qsqrk2emvnnvu45debac9dugr8@group.calendar.google.com\"\n)\n\nfunc GetSanrioEventsCalendar() (*feeds.Feed, error) {\n\tjson, err := ioutil.ReadFile(\"google_client_credentials.json\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig, err := google.JWTConfigFromJSON(json, calendar.CalendarReadonlyScope)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := config.Client(oauth2.NoContext)\n\n\tservice, err := calendar.New(client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tevents, err := service.Events.List(CalendarId).OrderBy(\"updated\").Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn GetSanrioEventsCalendarFromEvents(events)\n}\n\nfunc GetSanrioEventsCalendarFromEvents(events *calendar.Events) (*feeds.Feed, error) {\n\tvar items []*feeds.Item\n\titems = make([]*feeds.Item, len(events.Items))\n\tfor i, event := range events.Items {\n\t\tcreated, err := time.Parse(time.RFC3339, event.Created)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tupdated, err := time.Parse(time.RFC3339, event.Updated)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar duration string\n\n\t\tswitch {\n\t\tcase event.Start.Date != \"\" && event.End.Date != \"\":\n\t\t\tstartLoc, err := time.LoadLocation(event.Start.TimeZone)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tstart, err := time.ParseInLocation(\"2006-01-02\", event.Start.Date, startLoc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tendLoc, err := time.LoadLocation(event.Start.TimeZone)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tend, err := time.ParseInLocation(\"2006-01-02\", event.End.Date, endLoc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif start.Format(\"2006-01-02\") == end.Format(\"2006-01-02\") {\n\t\t\t\tduration = start.Format(\"2006-01-02 (Mon)\")\n\t\t\t} else {\n\t\t\t\tduration = start.Format(\"2006-01-02 (Mon)\") + \" - \" + end.Format(\"2006-01-02 (Mon)\")\n\t\t\t}\n\n\t\tcase event.Start.DateTime != \"\" && event.End.DateTime != \"\":\n\t\t\tstartLoc, err := time.LoadLocation(event.Start.TimeZone)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tstart, err := time.ParseInLocation(time.RFC3339, event.Start.DateTime, startLoc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tendLoc, err := time.LoadLocation(event.Start.TimeZone)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tend, err := time.ParseInLocation(time.RFC3339, event.End.DateTime, endLoc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif start.Format(\"2006-01-02\") == end.Format(\"2006-01-02\") {\n\t\t\t\tduration = start.Format(\"2006-01-02 (Mon) 15:04\") + \" - \" + end.Format(\"15:04\")\n\t\t\t} else {\n\t\t\t\tduration = start.Format(\"2006-01-02 (Mon) 15:04\") + \" - \" + end.Format(\"2006-01-02 (Mon) 15:04\")\n\t\t\t}\n\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"must not happen\")\n\t\t}\n\n\t\tdescription := fmt.Sprintf(\"Location: %s<br \/>Duration: %s<br \/>Description: %s\", html.EscapeString(event.Location), html.EscapeString(duration), html.EscapeString(event.Description))\n\n\t\titems[i] = &feeds.Item{\n\t\t\tId: event.Etag,\n\t\t\tTitle: event.Summary,\n\t\t\tDescription: description,\n\t\t\tLink: &feeds.Link{Href: event.HtmlLink},\n\t\t\tAuthor: &feeds.Author{event.Creator.DisplayName, event.Creator.Email},\n\t\t\tCreated: created,\n\t\t\tUpdated: updated,\n\t\t}\n\t}\n\n\tupdated, err := time.Parse(time.RFC3339, events.Updated)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfeed := &feeds.Feed{\n\t\tId: events.Etag,\n\t\tTitle: events.Summary,\n\t\tDescription: events.Description,\n\t\tLink: &feeds.Link{Href: LinkUrl},\n\t\tUpdated: updated,\n\t\tItems: items,\n\t}\n\treturn feed, nil\n}\n<commit_msg>Improve the format of description<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/gorilla\/feeds\"\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/oauth2\/google\"\n\t\"google.golang.org\/api\/calendar\/v3\"\n\t\"html\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tCalendarId = \"qsqrk2emvnnvu45debac9dugr8@group.calendar.google.com\"\n\tLinkUrl = \"https:\/\/calendar.google.com\/calendar\/embed?src=qsqrk2emvnnvu45debac9dugr8@group.calendar.google.com\"\n)\n\nfunc GetSanrioEventsCalendar() (*feeds.Feed, error) {\n\tjson, err := ioutil.ReadFile(\"google_client_credentials.json\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig, err := google.JWTConfigFromJSON(json, calendar.CalendarReadonlyScope)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := config.Client(oauth2.NoContext)\n\n\tservice, err := calendar.New(client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tevents, err := service.Events.List(CalendarId).OrderBy(\"updated\").Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn GetSanrioEventsCalendarFromEvents(events)\n}\n\nfunc GetSanrioEventsCalendarFromEvents(events *calendar.Events) (*feeds.Feed, error) {\n\tdescriptionReplacer := strings.NewReplacer(\"\\n\", \"<br \/>\")\n\n\tvar items []*feeds.Item\n\titems = make([]*feeds.Item, len(events.Items))\n\tfor i, event := range events.Items {\n\t\tcreated, err := time.Parse(time.RFC3339, event.Created)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tupdated, err := time.Parse(time.RFC3339, event.Updated)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar duration string\n\n\t\tswitch {\n\t\tcase event.Start.Date != \"\" && event.End.Date != \"\":\n\t\t\tstartLoc, err := time.LoadLocation(event.Start.TimeZone)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tstart, err := time.ParseInLocation(\"2006-01-02\", event.Start.Date, startLoc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tendLoc, err := time.LoadLocation(event.Start.TimeZone)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tend, err := time.ParseInLocation(\"2006-01-02\", event.End.Date, endLoc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif start.Format(\"2006-01-02\") == end.Format(\"2006-01-02\") {\n\t\t\t\tduration = start.Format(\"2006-01-02 (Mon)\")\n\t\t\t} else {\n\t\t\t\tduration = start.Format(\"2006-01-02 (Mon)\") + \" - \" + end.Format(\"2006-01-02 (Mon)\")\n\t\t\t}\n\n\t\tcase event.Start.DateTime != \"\" && event.End.DateTime != \"\":\n\t\t\tstartLoc, err := time.LoadLocation(event.Start.TimeZone)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tstart, err := time.ParseInLocation(time.RFC3339, event.Start.DateTime, startLoc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tendLoc, err := time.LoadLocation(event.Start.TimeZone)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tend, err := time.ParseInLocation(time.RFC3339, event.End.DateTime, endLoc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif start.Format(\"2006-01-02\") == end.Format(\"2006-01-02\") {\n\t\t\t\tduration = start.Format(\"2006-01-02 (Mon) 15:04\") + \" - \" + end.Format(\"15:04\")\n\t\t\t} else {\n\t\t\t\tduration = start.Format(\"2006-01-02 (Mon) 15:04\") + \" - \" + end.Format(\"2006-01-02 (Mon) 15:04\")\n\t\t\t}\n\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"must not happen\")\n\t\t}\n\n\t\tdescription := fmt.Sprintf(\"Location: %s<br \/>Duration: %s<br \/><br \/>%s\", html.EscapeString(event.Location), html.EscapeString(duration), descriptionReplacer.Replace(html.EscapeString(event.Description)))\n\n\t\titems[i] = &feeds.Item{\n\t\t\tId: event.Etag,\n\t\t\tTitle: event.Summary,\n\t\t\tDescription: description,\n\t\t\tLink: &feeds.Link{Href: event.HtmlLink},\n\t\t\tAuthor: &feeds.Author{event.Creator.DisplayName, event.Creator.Email},\n\t\t\tCreated: created,\n\t\t\tUpdated: updated,\n\t\t}\n\t}\n\n\tupdated, err := time.Parse(time.RFC3339, events.Updated)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfeed := &feeds.Feed{\n\t\tId: events.Etag,\n\t\tTitle: events.Summary,\n\t\tDescription: events.Description,\n\t\tLink: &feeds.Link{Href: LinkUrl},\n\t\tUpdated: updated,\n\t\tItems: items,\n\t}\n\treturn feed, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package persist\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pfs\"\n\tpfsclient \"github.com\/pachyderm\/pachyderm\/src\/client\/pfs\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/require\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/uuid\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/version\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pfs\/db\/persist\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pfs\/drive\"\n\tpfsserver \"github.com\/pachyderm\/pachyderm\/src\/server\/pfs\/server\"\n\n\t\"github.com\/dancannon\/gorethink\"\n\t\"go.pedge.io\/pb\/go\/google\/protobuf\"\n\t\"go.pedge.io\/proto\/server\"\n\t\"google.golang.org\/grpc\"\n)\n\nvar (\n\tport int32 = 30651\n)\n\n\/*\n\tCommitBranchIndex\n\n\tGiven a repo and a clock, returns a commit\n\n\tIs used in several places to:\n\n\t- find a parent commit given the parent's id in the form of an alias (e.g. \"master\/0\")\n\t- getHeadOfBranch() -- by doing a range query of the form \"branchName\/0\" to \"branchName\/max\" and returning the last result (in this case the head)\n\t- getIDOfParentcommit() -- by decrementing this commit's clock value, and searching for that new clock\n\t- getCommitByAmbmiguousID() -- if the commit ID is in the form of an alias, find the commit using the index\n\n*\/\n\nfunc TestCommitBranchIndexBasic(t *testing.T) {\n\ttestSetup(t, func(d drive.Driver, dbName string, dbClient *gorethink.Session) {\n\n\t\trepo := &pfs.Repo{Name: \"foo\"}\n\t\trequire.NoError(t, d.CreateRepo(repo, timestampNow(), nil, nil))\n\t\tcommitID := uuid.NewWithoutDashes()\n\t\terr := d.StartCommit(\n\t\t\trepo,\n\t\t\tcommitID,\n\t\t\t\"\",\n\t\t\t\"master\",\n\t\t\ttimestampNow(),\n\t\t\tnil,\n\t\t\tnil,\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\tterm := gorethink.DB(dbName).Table(commitTable)\n\t\tcursor, err := term.GetAll(commitID).Run(dbClient)\n\t\trequire.NoError(t, err)\n\t\tc := &persist.Commit{}\n\t\terr = cursor.One(c)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, commitID, c.ID)\n\n\t\tclock := &persist.Clock{Branch: \"master\", Clock: 0}\n\t\tclockID := getClockID(repo.Name, clock).ID\n\t\tcursor, err = gorethink.DB(dbName).Table(clockTable).GetAll(clockID).Run(dbClient)\n\t\trequire.NoError(t, err)\n\t\treturnedClock := &persist.Clock{}\n\t\trequire.NoError(t, cursor.One(returnedClock))\n\t\trequire.Equal(t, \"master\", returnedClock.Branch)\n\t\trequire.Equal(t, uint64(0), returnedClock.Clock)\n\n\t\tkey := []interface{}{repo.Name, clock.Branch, clock.Clock}\n\t\tcursor, err = term.GetAllByIndex(commitBranchIndex, key).Run(dbClient)\n\t\trequire.NoError(t, err)\n\t\treturnedCommit := &persist.Commit{}\n\t\trequire.NoError(t, cursor.One(returnedCommit))\n\t\trequire.Equal(t, commitID, returnedCommit.ID)\n\t})\n}\n\nfunc TestCommitBranchIndexHeadOfBranch(t *testing.T) {\n\ttestSetup(t, func(d drive.Driver, dbName string, dbClient *gorethink.Session) {\n\n\t\trepo := &pfs.Repo{Name: \"foo\"}\n\t\trequire.NoError(t, d.CreateRepo(repo, timestampNow(), nil, nil))\n\t\tcommitID := uuid.NewWithoutDashes()\n\t\terr := d.StartCommit(\n\t\t\trepo,\n\t\t\tcommitID,\n\t\t\t\"\",\n\t\t\t\"master\",\n\t\t\ttimestampNow(),\n\t\t\tnil,\n\t\t\tnil,\n\t\t)\n\t\trequire.NoError(t, err)\n\t\tcommit := &pfs.Commit{Repo: repo, ID: commitID}\n\t\trequire.NoError(t, d.FinishCommit(commit, timestampNow(), false, nil))\n\n\t\tcommitID2 := uuid.NewWithoutDashes()\n\t\terr = d.StartCommit(\n\t\t\trepo,\n\t\t\tcommitID2,\n\t\t\tcommitID,\n\t\t\t\"master\",\n\t\t\ttimestampNow(),\n\t\t\tnil,\n\t\t\tnil,\n\t\t)\n\t\trequire.NoError(t, err)\n\t\tcommit2 := &pfs.Commit{Repo: repo, ID: commitID2}\n\t\trequire.NoError(t, d.FinishCommit(commit2, timestampNow(), false, nil))\n\n\t\tcommitID3 := uuid.NewWithoutDashes()\n\t\terr = d.StartCommit(\n\t\t\trepo,\n\t\t\tcommitID3,\n\t\t\tcommitID2,\n\t\t\t\"master\",\n\t\t\ttimestampNow(),\n\t\t\tnil,\n\t\t\tnil,\n\t\t)\n\t\trequire.NoError(t, err)\n\t\tcommit3 := &pfs.Commit{Repo: repo, ID: commitID3}\n\t\trequire.NoError(t, d.FinishCommit(commit3, timestampNow(), false, nil))\n\n\t\t\/\/ Now that the commits are chained together,\n\t\t\/\/ Grab the head of master branch using the index\n\t\thead := &persist.Commit{}\n\t\tterm := gorethink.DB(dbName).Table(commitTable)\n\t\tcursor, err := term.OrderBy(gorethink.OrderByOpts{\n\t\t\tIndex: gorethink.Desc(commitBranchIndex),\n\t\t}).Between([]interface{}{repo.Name, \"master\", 0}, []interface{}{repo.Name, \"master\", gorethink.MaxVal}, gorethink.BetweenOpts{\n\t\t\tRightBound: \"open\",\n\t\t}).Run(dbClient)\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, cursor.One(head))\n\t\trequire.Equal(t, commitID3, head.ID)\n\t})\n}\n\n\/*\n\tdiffPathIndex\n\n\tused nowhere?\n\tused in ListFile() to list diffs by path\n*\/\n\n\/* diffCommitIndex\n\nIndexed on commitID field in diff row\n\nUsed to:\n\n- in FinishCommit() to gather all of the diffs for this commit\n\n*\/\n\nfunc TestDiffCommitIndexBasic(t *testing.T) {\n\ttestSetup(t, func(d drive.Driver, dbName string, dbClient *gorethink.Session) {\n\n\t\trepo := &pfs.Repo{Name: \"foo\"}\n\t\trequire.NoError(t, d.CreateRepo(repo, timestampNow(), nil, nil))\n\t\tcommitID := uuid.NewWithoutDashes()\n\t\terr := d.StartCommit(\n\t\t\trepo,\n\t\t\tcommitID,\n\t\t\t\"\",\n\t\t\t\"master\",\n\t\t\ttimestampNow(),\n\t\t\tnil,\n\t\t\tnil,\n\t\t)\n\t\trequire.NoError(t, err)\n\t\tfile := &pfs.File{\n\t\t\tCommit: &pfs.Commit{\n\t\t\t\tRepo: repo,\n\t\t\t\tID: commitID,\n\t\t\t},\n\t\t}\n\t\td.PutFile(file, \"\", pfs.Delimiter_LINE, 0, strings.NewReader(\"foo\\n\"))\n\n\t\tcommit := &pfs.Commit{Repo: repo, ID: commitID}\n\t\trequire.NoError(t, d.FinishCommit(commit, timestampNow(), false, nil))\n\t})\n}\n\n\/* commitModifiedPathsIndex\n\nIndexed on: repo \/ path \/ branchclock\n\nUsed to:\n\n- in GetFile() to gather all commits within a range that modified a given path\n\n*\/\n\n\/* commitDeletedPathsIndex\n\nIndexed on :\n\nUsed to:\n\n- in GetFile() given a range of commits, find the commits that deleted a path, return the leftmost one\n\n*\/\n\n\/* clockBranchIndex\n\nIndexed on:\n\n- repo && branchIndex\n\nUsed to:\n\n- in ListBranch() to query the clocks table and return the branches\n\n*\/\n\nfunc timestampNow() *google_protobuf.Timestamp {\n\treturn &google_protobuf.Timestamp{Seconds: time.Now().Unix()}\n}\n\nfunc testSetup(t *testing.T, testCode func(drive.Driver, string, *gorethink.Session)) {\n\tdbName := \"pachyderm_test_\" + uuid.NewWithoutDashes()[0:12]\n\tif err := InitDB(RethinkAddress, dbName); err != nil {\n\t\trequire.NoError(t, err)\n\t\treturn\n\t}\n\tdbClient, err := gorethink.Connect(gorethink.ConnectOpts{\n\t\tAddress: RethinkAddress,\n\t\tTimeout: connectTimeoutSeconds * time.Second,\n\t})\n\trequire.NoError(t, err)\n\td, err := NewDriver(\"localhost:30652\", RethinkAddress, dbName)\n\trequire.NoError(t, err)\n\n\tstartBlockServer(t, dbName, d)\n\n\ttestCode(d, dbName, dbClient)\n\n\tif err := RemoveDB(RethinkAddress, dbName); err != nil {\n\t\trequire.NoError(t, err)\n\t\treturn\n\t}\n}\n\nfunc runServers(t *testing.T, port int32, blockAPIServer pfsclient.BlockAPIServer) {\n\tready := make(chan bool)\n\tgo func() {\n\t\terr := protoserver.Serve(\n\t\t\tfunc(s *grpc.Server) {\n\t\t\t\tpfsclient.RegisterBlockAPIServer(s, blockAPIServer)\n\t\t\t\tclose(ready)\n\t\t\t},\n\t\t\tprotoserver.ServeOptions{Version: version.Version},\n\t\t\tprotoserver.ServeEnv{GRPCPort: uint16(port)},\n\t\t)\n\t\trequire.NoError(t, err)\n\t}()\n\t<-ready\n}\n\nfunc startBlockServer(t *testing.T, dbName string, driver drive.Driver) {\n\n\troot := uniqueString(\"\/tmp\/pach_test\/run\")\n\tvar ports []int32\n\tports = append(ports, atomic.AddInt32(&port, 1))\n\tvar addresses []string\n\tfor _, port := range ports {\n\t\taddresses = append(addresses, fmt.Sprintf(\"localhost:%d\", port))\n\t}\n\tfor _, port := range ports {\n\t\tblockAPIServer, err := pfsserver.NewLocalBlockAPIServer(root)\n\t\trequire.NoError(t, err)\n\t\trunServers(t, port, blockAPIServer)\n\t}\n\treturn\n}\n\nfunc uniqueString(prefix string) string {\n\treturn prefix + \".\" + uuid.NewWithoutDashes()[0:12]\n}\n<commit_msg>Add failing test TestDiffCommitIndexBasic<commit_after>package persist\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\tpclient \"github.com\/pachyderm\/pachyderm\/src\/client\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pfs\"\n\tpfsclient \"github.com\/pachyderm\/pachyderm\/src\/client\/pfs\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/require\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/uuid\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/version\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pfs\/db\/persist\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pfs\/drive\"\n\tpfsserver \"github.com\/pachyderm\/pachyderm\/src\/server\/pfs\/server\"\n\n\t\"github.com\/dancannon\/gorethink\"\n\t\"go.pedge.io\/pb\/go\/google\/protobuf\"\n\t\"go.pedge.io\/proto\/server\"\n\t\"google.golang.org\/grpc\"\n)\n\nvar (\n\tport int32 = 30651\n)\n\n\/*\n\tCommitBranchIndex\n\n\tGiven a repo and a clock, returns a commit\n\n\tIs used in several places to:\n\n\t- find a parent commit given the parent's id in the form of an alias (e.g. \"master\/0\")\n\t- getHeadOfBranch() -- by doing a range query of the form \"branchName\/0\" to \"branchName\/max\" and returning the last result (in this case the head)\n\t- getIDOfParentcommit() -- by decrementing this commit's clock value, and searching for that new clock\n\t- getCommitByAmbmiguousID() -- if the commit ID is in the form of an alias, find the commit using the index\n\n*\/\n\nfunc TestCommitBranchIndexBasic(t *testing.T) {\n\ttestSetup(t, func(d drive.Driver, dbName string, dbClient *gorethink.Session, client pclient.APIClient) {\n\n\t\trepo := &pfs.Repo{Name: \"foo\"}\n\t\trequire.NoError(t, d.CreateRepo(repo, timestampNow(), nil, nil))\n\t\tcommitID := uuid.NewWithoutDashes()\n\t\terr := d.StartCommit(\n\t\t\trepo,\n\t\t\tcommitID,\n\t\t\t\"\",\n\t\t\t\"master\",\n\t\t\ttimestampNow(),\n\t\t\tnil,\n\t\t\tnil,\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\tterm := gorethink.DB(dbName).Table(commitTable)\n\t\tcursor, err := term.GetAll(commitID).Run(dbClient)\n\t\trequire.NoError(t, err)\n\t\tc := &persist.Commit{}\n\t\terr = cursor.One(c)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, commitID, c.ID)\n\n\t\tclock := &persist.Clock{Branch: \"master\", Clock: 0}\n\t\tclockID := getClockID(repo.Name, clock).ID\n\t\tcursor, err = gorethink.DB(dbName).Table(clockTable).GetAll(clockID).Run(dbClient)\n\t\trequire.NoError(t, err)\n\t\treturnedClock := &persist.Clock{}\n\t\trequire.NoError(t, cursor.One(returnedClock))\n\t\trequire.Equal(t, \"master\", returnedClock.Branch)\n\t\trequire.Equal(t, uint64(0), returnedClock.Clock)\n\n\t\tkey := []interface{}{repo.Name, clock.Branch, clock.Clock}\n\t\tcursor, err = term.GetAllByIndex(commitBranchIndex, key).Run(dbClient)\n\t\trequire.NoError(t, err)\n\t\treturnedCommit := &persist.Commit{}\n\t\trequire.NoError(t, cursor.One(returnedCommit))\n\t\trequire.Equal(t, commitID, returnedCommit.ID)\n\t})\n}\n\nfunc TestCommitBranchIndexHeadOfBranch(t *testing.T) {\n\ttestSetup(t, func(d drive.Driver, dbName string, dbClient *gorethink.Session, client pclient.APIClient) {\n\n\t\trepo := &pfs.Repo{Name: \"foo\"}\n\t\trequire.NoError(t, d.CreateRepo(repo, timestampNow(), nil, nil))\n\t\tcommitID := uuid.NewWithoutDashes()\n\t\terr := d.StartCommit(\n\t\t\trepo,\n\t\t\tcommitID,\n\t\t\t\"\",\n\t\t\t\"master\",\n\t\t\ttimestampNow(),\n\t\t\tnil,\n\t\t\tnil,\n\t\t)\n\t\trequire.NoError(t, err)\n\t\tcommit := &pfs.Commit{Repo: repo, ID: commitID}\n\t\trequire.NoError(t, d.FinishCommit(commit, timestampNow(), false, nil))\n\n\t\tcommitID2 := uuid.NewWithoutDashes()\n\t\terr = d.StartCommit(\n\t\t\trepo,\n\t\t\tcommitID2,\n\t\t\tcommitID,\n\t\t\t\"master\",\n\t\t\ttimestampNow(),\n\t\t\tnil,\n\t\t\tnil,\n\t\t)\n\t\trequire.NoError(t, err)\n\t\tcommit2 := &pfs.Commit{Repo: repo, ID: commitID2}\n\t\trequire.NoError(t, d.FinishCommit(commit2, timestampNow(), false, nil))\n\n\t\tcommitID3 := uuid.NewWithoutDashes()\n\t\terr = d.StartCommit(\n\t\t\trepo,\n\t\t\tcommitID3,\n\t\t\tcommitID2,\n\t\t\t\"master\",\n\t\t\ttimestampNow(),\n\t\t\tnil,\n\t\t\tnil,\n\t\t)\n\t\trequire.NoError(t, err)\n\t\tcommit3 := &pfs.Commit{Repo: repo, ID: commitID3}\n\t\trequire.NoError(t, d.FinishCommit(commit3, timestampNow(), false, nil))\n\n\t\t\/\/ Now that the commits are chained together,\n\t\t\/\/ Grab the head of master branch using the index\n\t\thead := &persist.Commit{}\n\t\tterm := gorethink.DB(dbName).Table(commitTable)\n\t\tcursor, err := term.OrderBy(gorethink.OrderByOpts{\n\t\t\tIndex: gorethink.Desc(commitBranchIndex),\n\t\t}).Between([]interface{}{repo.Name, \"master\", 0}, []interface{}{repo.Name, \"master\", gorethink.MaxVal}, gorethink.BetweenOpts{\n\t\t\tRightBound: \"open\",\n\t\t}).Run(dbClient)\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, cursor.One(head))\n\t\trequire.Equal(t, commitID3, head.ID)\n\t})\n}\n\n\/*\n\tdiffPathIndex\n\n\tused nowhere?\n\tused in ListFile() to list diffs by path\n*\/\n\n\/* diffCommitIndex\n\nIndexed on commitID field in diff row\n\nUsed to:\n\n- in FinishCommit() to gather all of the diffs for this commit\n\n*\/\n\nfunc TestDiffCommitIndexBasic(t *testing.T) {\n\ttestSetup(t, func(d drive.Driver, dbName string, dbClient *gorethink.Session, client pclient.APIClient) {\n\n\t\trepo := &pfs.Repo{Name: \"foo\"}\n\t\trequire.NoError(t, d.CreateRepo(repo, timestampNow(), nil, nil))\n\t\tcommitID := uuid.NewWithoutDashes()\n\t\terr := d.StartCommit(\n\t\t\trepo,\n\t\t\tcommitID,\n\t\t\t\"\",\n\t\t\t\"master\",\n\t\t\ttimestampNow(),\n\t\t\tnil,\n\t\t\tnil,\n\t\t)\n\t\trequire.NoError(t, err)\n\t\tfile := &pfs.File{\n\t\t\tCommit: &pfs.Commit{\n\t\t\t\tRepo: repo,\n\t\t\t\tID: commitID,\n\t\t\t},\n\t\t\tPath: \"file\",\n\t\t}\n\t\td.PutFile(file, \"\", pfs.Delimiter_LINE, 0, strings.NewReader(\"foo\\n\"))\n\n\t\tcommit := &pfs.Commit{Repo: repo, ID: commitID}\n\t\trequire.NoError(t, d.FinishCommit(commit, timestampNow(), false, nil))\n\n\t\tcursor, err := gorethink.DB(dbName).Table(diffTable).GetAllByIndex(diffCommitIndex, commitID).Run(dbClient)\n\t\trequire.NoError(t, err)\n\t\tdiff := &persist.Diff{}\n\t\trequire.NoError(t, cursor.One(diff))\n\t\tfmt.Printf(\"got first diff: %v\\n\", diff)\n\t\trequire.Equal(t, \"file\", diff.Path)\n\t\trequire.Equal(t, 1, len(diff.BlockRefs))\n\n\t\tblock := diff.BlockRefs[0]\n\t\tfmt.Printf(\"block: %v\\n\", block)\n\t\tblockSize := block.Upper - block.Lower\n\n\t\t\/\/ Was trying to check on a per block level ...\n\t\t\/\/ But even GetFile() doesn't seem to return the correct results\n\t\t\/\/ reader, err := d.GetFile(file, nil, 0,\n\t\t\/\/\tint64(blockSize), &pfs.Commit{Repo: repo, ID: commitID}, 0, false, \"\")\n\n\t\treader, err := client.GetBlock(block.Hash, uint64(0), uint64(blockSize))\n\t\trequire.NoError(t, err)\n\t\tvar data []byte\n\t\tsize, err := reader.Read(data)\n\t\tfmt.Printf(\"data=%v, err=%v\\n\", string(data), err)\n\t\tfmt.Printf(\"size=%v\\n\", size)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, \"foo\\n\", string(data))\n\n\t})\n}\n\n\/* commitModifiedPathsIndex\n\nIndexed on: repo \/ path \/ branchclock\n\nUsed to:\n\n- in GetFile() to gather all commits within a range that modified a given path\n\n*\/\n\n\/* commitDeletedPathsIndex\n\nIndexed on :\n\nUsed to:\n\n- in GetFile() given a range of commits, find the commits that deleted a path, return the leftmost one\n\n*\/\n\n\/* clockBranchIndex\n\nIndexed on:\n\n- repo && branchIndex\n\nUsed to:\n\n- in ListBranch() to query the clocks table and return the branches\n\n*\/\n\nfunc timestampNow() *google_protobuf.Timestamp {\n\treturn &google_protobuf.Timestamp{Seconds: time.Now().Unix()}\n}\n\nfunc testSetup(t *testing.T, testCode func(drive.Driver, string, *gorethink.Session, pclient.APIClient)) {\n\tdbName := \"pachyderm_test_\" + uuid.NewWithoutDashes()[0:12]\n\tif err := InitDB(RethinkAddress, dbName); err != nil {\n\t\trequire.NoError(t, err)\n\t\treturn\n\t}\n\tdbClient, err := gorethink.Connect(gorethink.ConnectOpts{\n\t\tAddress: RethinkAddress,\n\t\tTimeout: connectTimeoutSeconds * time.Second,\n\t})\n\trequire.NoError(t, err)\n\t_client, d := startBlockServerAndGetClient(t, dbName)\n\n\ttestCode(d, dbName, dbClient, _client)\n\n\tif err := RemoveDB(RethinkAddress, dbName); err != nil {\n\t\trequire.NoError(t, err)\n\t\treturn\n\t}\n}\n\nfunc runServers(t *testing.T, port int32, blockAPIServer pfsclient.BlockAPIServer) {\n\tready := make(chan bool)\n\tgo func() {\n\t\terr := protoserver.Serve(\n\t\t\tfunc(s *grpc.Server) {\n\t\t\t\tpfsclient.RegisterBlockAPIServer(s, blockAPIServer)\n\t\t\t\tclose(ready)\n\t\t\t},\n\t\t\tprotoserver.ServeOptions{Version: version.Version},\n\t\t\tprotoserver.ServeEnv{GRPCPort: uint16(port)},\n\t\t)\n\t\trequire.NoError(t, err)\n\t}()\n\t<-ready\n}\n\nfunc startBlockServerAndGetClient(t *testing.T, dbName string) (pclient.APIClient, drive.Driver) {\n\n\troot := uniqueString(\"\/tmp\/pach_test\/run\")\n\tvar ports []int32\n\tports = append(ports, atomic.AddInt32(&port, 1))\n\tvar addresses []string\n\tfor _, port := range ports {\n\t\taddresses = append(addresses, fmt.Sprintf(\"localhost:%d\", port))\n\t}\n\tvar driver drive.Driver\n\tfor i, port := range ports {\n\t\taddress := addresses[i]\n\t\t_driver, err := NewDriver(address, RethinkAddress, dbName)\n\t\tdriver = _driver\n\t\trequire.NoError(t, err)\n\t\tblockAPIServer, err := pfsserver.NewLocalBlockAPIServer(root)\n\t\trequire.NoError(t, err)\n\t\trunServers(t, port, blockAPIServer)\n\t}\n\tclientConn, err := grpc.Dial(addresses[0], grpc.WithInsecure())\n\trequire.NoError(t, err)\n\treturn pclient.APIClient{BlockAPIClient: pfsclient.NewBlockAPIClient(clientConn)}, driver\n}\n\nfunc uniqueString(prefix string) string {\n\treturn prefix + \".\" + uuid.NewWithoutDashes()[0:12]\n}\n<|endoftext|>"} {"text":"<commit_before>package horizon\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/stellar\/go-horizon\/db\"\n\t\"github.com\/stellar\/go-horizon\/render\"\n\t\"github.com\/stellar\/go-horizon\/render\/hal\"\n\t\"github.com\/stellar\/go-horizon\/render\/problem\"\n\t\"github.com\/stellar\/go-horizon\/render\/sse\"\n\t\"github.com\/zenazn\/goji\/web\"\n)\n\nfunc ledgerIndexAction(c web.C, w http.ResponseWriter, r *http.Request) {\n\tah := &ActionHelper{c: c, r: r}\n\tctx := ah.Context()\n\n\t\/\/ construct query\n\tah.ValidateInt64(ParamCursor)\n\tquery := db.LedgerPageQuery{\n\t\tSqlQuery: ah.App().HistoryQuery(),\n\t\tPageQuery: ah.GetPageQuery(),\n\t}\n\n\tif ah.Err() != nil {\n\t\tproblem.Render(ctx, w, ah.Err())\n\t\treturn\n\t}\n\n\tvar records []db.LedgerRecord\n\n\trender := render.Renderer{}\n\trender.JSON = func() {\n\t\t\/\/ load records\n\t\terr := db.Select(ctx, query, &records)\n\t\tif err != nil {\n\t\t\tproblem.Render(ctx, w, err)\n\t\t}\n\n\t\tpage, err := NewLedgerPageResource(records, query.PageQuery)\n\t\tif err != nil {\n\t\t\tproblem.Render(ctx, w, err)\n\t\t}\n\n\t\thal.RenderPage(w, page)\n\t}\n\n\trender.SSE = func(stream sse.Stream) {\n\t\terr := db.Select(ctx, query, &records)\n\t\tif err != nil {\n\t\t\tstream.Err(err)\n\t\t\treturn\n\t\t}\n\n\t\trecords = records[stream.SentCount():]\n\n\t\tfor _, record := range records {\n\t\t\tstream.Send(sse.Event{\n\t\t\t\tID: record.PagingToken(),\n\t\t\t\tData: NewLedgerResource(record),\n\t\t\t})\n\t\t}\n\n\t\tif stream.SentCount() >= int(query.Limit) {\n\t\t\tstream.Done()\n\t\t}\n\t}\n\n\trender.Render(ctx, w, r)\n}\n\nfunc ledgerShowAction(c web.C, w http.ResponseWriter, r *http.Request) {\n\tah := &ActionHelper{c: c, r: r}\n\tapp := ah.App()\n\tsequence := ah.GetInt32(\"id\")\n\n\tif ah.Err() != nil {\n\t\tproblem.Render(ah.Context(), w, problem.NotFound)\n\t\treturn\n\t}\n\n\tquery := db.LedgerBySequenceQuery{\n\t\tSqlQuery: app.HistoryQuery(),\n\t\tSequence: sequence,\n\t}\n\n\trender.Single(ah.Context(), w, r, query, ledgerRecordToResource)\n}\n\nfunc ledgerRecordToResource(record db.Record) (render.Resource, error) {\n\treturn NewLedgerResource(record.(db.LedgerRecord)), nil\n}\n<commit_msg>Update \/ledgers\/:id action to new renderer<commit_after>package horizon\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/stellar\/go-horizon\/db\"\n\t\"github.com\/stellar\/go-horizon\/render\"\n\t\"github.com\/stellar\/go-horizon\/render\/hal\"\n\t\"github.com\/stellar\/go-horizon\/render\/problem\"\n\t\"github.com\/stellar\/go-horizon\/render\/sse\"\n\t\"github.com\/zenazn\/goji\/web\"\n)\n\nfunc ledgerIndexAction(c web.C, w http.ResponseWriter, r *http.Request) {\n\tah := &ActionHelper{c: c, r: r}\n\tctx := ah.Context()\n\n\t\/\/ construct query\n\tah.ValidateInt64(ParamCursor)\n\tquery := db.LedgerPageQuery{\n\t\tSqlQuery: ah.App().HistoryQuery(),\n\t\tPageQuery: ah.GetPageQuery(),\n\t}\n\n\tif ah.Err() != nil {\n\t\tproblem.Render(ctx, w, ah.Err())\n\t\treturn\n\t}\n\n\tvar records []db.LedgerRecord\n\n\trender := render.Renderer{}\n\trender.JSON = func() {\n\t\t\/\/ load records\n\t\terr := db.Select(ctx, query, &records)\n\t\tif err != nil {\n\t\t\tproblem.Render(ctx, w, err)\n\t\t}\n\n\t\tpage, err := NewLedgerPageResource(records, query.PageQuery)\n\t\tif err != nil {\n\t\t\tproblem.Render(ctx, w, err)\n\t\t}\n\n\t\thal.RenderPage(w, page)\n\t}\n\n\trender.SSE = func(stream sse.Stream) {\n\t\terr := db.Select(ctx, query, &records)\n\t\tif err != nil {\n\t\t\tstream.Err(err)\n\t\t\treturn\n\t\t}\n\n\t\trecords = records[stream.SentCount():]\n\n\t\tfor _, record := range records {\n\t\t\tstream.Send(sse.Event{\n\t\t\t\tID: record.PagingToken(),\n\t\t\t\tData: NewLedgerResource(record),\n\t\t\t})\n\t\t}\n\n\t\tif stream.SentCount() >= int(query.Limit) {\n\t\t\tstream.Done()\n\t\t}\n\t}\n\n\trender.Render(ctx, w, r)\n}\n\nfunc ledgerShowAction(c web.C, w http.ResponseWriter, r *http.Request) {\n\tah := &ActionHelper{c: c, r: r}\n\tapp := ah.App()\n\n\t\/\/ construct query\n\tquery := db.LedgerBySequenceQuery{\n\t\tSqlQuery: app.HistoryQuery(),\n\t\tSequence: ah.GetInt32(\"id\"),\n\t}\n\tif ah.Err() != nil {\n\t\tproblem.Render(ah.Context(), w, ah.Err())\n\t\treturn\n\t}\n\n\t\/\/ find ledger\n\tfound, err := db.First(ah.Context(), query)\n\tif err != nil {\n\t\tproblem.Render(ah.Context(), w, ah.Err())\n\t\treturn\n\t}\n\n\tledger, ok := found.(db.LedgerRecord)\n\tif !ok {\n\t\tproblem.Render(ah.Context(), w, problem.NotFound)\n\t\treturn\n\t}\n\n\trender := render.Renderer{}\n\trender.JSON = func() {\n\t\thal.Render(w, NewLedgerResource(ledger))\n\t}\n\n\trender.Render(ah.Context(), w, r)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package etree provides XML services through an Element Tree\n\/\/ abstraction.\npackage etree\n\nimport (\n\t\"bufio\"\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"io\"\n)\n\nconst (\n\tNoIndent = -1\n)\n\nvar (\n\tErrInvalidFormat = errors.New(\"etree: invalid XML format\")\n)\n\n\/\/ A Token is an empty interface that represents an Element,\n\/\/ Comment, CharData, Directive or ProcInst.\ntype Token interface {\n\twriteTo(w *bufio.Writer)\n}\n\n\/\/ A Document is the root level object in an etree. It represents the\n\/\/ XML document as a whole. It embeds an Element type but only uses the\n\/\/ Element type's Child tokens.\ntype Document struct {\n\tElement\n}\n\n\/\/ An Element represents an XML element, its attributes, and its child tokens.\ntype Element struct {\n\tTag string \/\/ The element tag\n\tAttr []Attr \/\/ The element's list of key-value attribute pairs\n\tChild []Token \/\/ The element's child tokens (elements, comments, etc.)\n}\n\n\/\/ An Attr represents a key-value attribute of an XML element.\ntype Attr struct {\n\tKey string\n\tValue string\n}\n\n\/\/ A Comment represents an XML comment.\ntype Comment struct {\n\tData string\n}\n\n\/\/ CharData represents character data within XML.\ntype CharData struct {\n\tData string\n\twhitespace bool\n}\n\n\/\/ A ProcInst represents an XML processing instruction.\ntype ProcInst struct {\n\tTarget string\n\tInst string\n}\n\n\/\/ NewDocument creates an empty XML document and returns it.\nfunc NewDocument() *Document {\n\td := new(Document)\n\td.Child = make([]Token, 0)\n\treturn d\n}\n\n\/\/ ReadFrom reads XML from the reader r and adds the result as\n\/\/ a new child of the receiving document.\nfunc (d *Document) ReadFrom(r io.Reader) error {\n\treturn d.Element.ReadFrom(r)\n}\n\n\/\/ WriteTo serializes an XML document into the writer w.\nfunc (d *Document) WriteTo(w io.Writer) error {\n\tb := bufio.NewWriter(w)\n\tfor _, c := range d.Child {\n\t\tc.writeTo(b)\n\t}\n\treturn b.Flush()\n}\n\n\/\/ Indent modifies the document's element tree by inserting\n\/\/ CharData entities containing carriage returns and indentation.\n\/\/ The amount of indenting per depth level is equal to spaces.\n\/\/ Use etree.NoIndent for spaces if you want no indentation at all.\nfunc (d *Document) Indent(spaces int) {\n\td.stripIndent()\n\tn := len(d.Child)\n\tif n == 0 {\n\t\treturn\n\t}\n\n\tnewChild := make([]Token, n*2-1)\n\tfor i, c := range d.Child {\n\t\tj := i * 2\n\t\tnewChild[j] = c\n\t\tif j+1 < len(newChild) {\n\t\t\tnewChild[j+1] = newIndentCharData(0, spaces)\n\t\t}\n\t\tif e, ok := c.(*Element); ok {\n\t\t\te.indent(1, spaces)\n\t\t}\n\t}\n\td.Child = newChild\n}\n\n\/\/ NewElement creates an XML element with the specified name.\n\/\/ In most cases, you should use NewDocument and create elements\n\/\/ with the CreateElement function.\nfunc NewElement(tag string) *Element {\n\treturn &Element{\n\t\tTag: tag,\n\t\tAttr: make([]Attr, 0),\n\t\tChild: make([]Token, 0),\n\t}\n}\n\n\/\/ Text returns the characters immediately following the element's\n\/\/ opening tag.\nfunc (e *Element) Text() string {\n\tif len(e.Child) == 0 {\n\t\treturn \"\"\n\t}\n\tif cd, ok := e.Child[0].(*CharData); ok {\n\t\treturn cd.Data\n\t} else {\n\t\treturn \"\"\n\t}\n}\n\n\/\/ SetText replaces an element's subsidiary CharData text with a new\n\/\/ string.\nfunc (e *Element) SetText(text string) {\n\tif len(e.Child) > 0 {\n\t\tif cd, ok := e.Child[0].(*CharData); ok {\n\t\t\tcd.Data = text\n\t\t\treturn\n\t\t}\n\t}\n\te.Child = append(e.Child, nil)\n\tcopy(e.Child[1:], e.Child[0:])\n\tc := newCharData(text, false)\n\te.Child[0] = c\n}\n\n\/\/ CreateElement creates a child element of the receiving element and\n\/\/ gives it the specified name.\nfunc (e *Element) CreateElement(name string) *Element {\n\tc := NewElement(name)\n\te.addChild(c)\n\treturn c\n}\n\n\/\/ An element stack is a simple stack of elements used by ReadFrom.\ntype elementStack []*Element\n\nfunc (s *elementStack) push(e *Element) {\n\t*s = append(*s, e)\n}\n\nfunc (s *elementStack) pop() {\n\t(*s)[len(*s)-1] = nil\n\t*s = (*s)[:len(*s)-1]\n}\n\nfunc (s *elementStack) peek() *Element {\n\treturn (*s)[len(*s)-1]\n}\n\n\/\/ ReadFrom reads XML from the reader r and stores the result as\n\/\/ a new child of the receiving element.\nfunc (e *Element) ReadFrom(r io.Reader) error {\n\tstack := elementStack{e}\n\tdec := xml.NewDecoder(r)\n\tfor {\n\t\tt, err := dec.RawToken()\n\t\tswitch {\n\t\tcase err == io.EOF:\n\t\t\treturn nil\n\t\tcase err != nil:\n\t\t\treturn err\n\t\tcase len(stack) == 0:\n\t\t\treturn ErrInvalidFormat\n\t\t}\n\n\t\ttop := stack.peek()\n\n\t\tswitch t := t.(type) {\n\t\tcase xml.StartElement:\n\t\t\te := top.CreateElement(t.Name.Local)\n\t\t\tfor _, a := range t.Attr {\n\t\t\t\te.CreateAttr(a.Name.Local, a.Value)\n\t\t\t}\n\t\t\tstack.push(e)\n\t\tcase xml.EndElement:\n\t\t\tstack.pop()\n\t\tcase xml.CharData:\n\t\t\tdata := string(t)\n\t\t\ttop.createCharData(data, isWhitespace(data))\n\t\tcase xml.Comment:\n\t\t\ttop.CreateComment(string(t))\n\t\tcase xml.ProcInst:\n\t\t\ttop.CreateProcInst(t.Target, string(t.Inst))\n\t\t}\n\t}\n}\n\n\/\/ WriteTo serializes the element and its children as XML into\n\/\/ the writer w.\nfunc (e *Element) WriteTo(w io.Writer) error {\n\tb := bufio.NewWriter(w)\n\te.writeTo(b)\n\treturn b.Flush()\n}\n\n\/\/ ChildElements returns all elements that are children of the\n\/\/ receiving element.\nfunc (e *Element) ChildElements() []*Element {\n\telements := make([]*Element, 0)\n\tfor _, c := range e.Child {\n\t\tif e, ok := c.(*Element); ok {\n\t\t\telements = append(elements, e)\n\t\t}\n\t}\n\treturn elements\n}\n\n\/\/ Indent modifies the element's element tree by inserting\n\/\/ CharData entities containing carriage returns and indentation.\n\/\/ The amount of indenting per depth level is equal to spaces.\n\/\/ Use etree.NoIndent for spaces if you want no indentation at all.\nfunc (e *Element) Indent(spaces int) {\n\te.indent(1, spaces)\n}\n\n\/\/ indent recursively inserts proper indentation between an\n\/\/ XML element's child tokens.\nfunc (e *Element) indent(depth, spaces int) {\n\te.stripIndent()\n\tn := len(e.Child)\n\tif n == 0 {\n\t\treturn\n\t}\n\n\toldChild := e.Child\n\te.Child = make([]Token, 0, n*2+1)\n\tisCharData := false\n\tfor _, c := range oldChild {\n\t\t_, isCharData = c.(*CharData)\n\t\tif !isCharData && spaces >= 0 {\n\t\t\te.addChild(newIndentCharData(depth, spaces))\n\t\t}\n\t\te.addChild(c)\n\t\tif ce, ok := c.(*Element); ok {\n\t\t\tce.indent(depth+1, spaces)\n\t\t}\n\t}\n\tif !isCharData && spaces >= 0 {\n\t\te.addChild(newIndentCharData(depth-1, spaces))\n\t}\n}\n\n\/\/ stripIndent removes any previously inserted indentation.\nfunc (e *Element) stripIndent() {\n\t\/\/ Count the number of non-indent child tokens\n\tn := len(e.Child)\n\tfor _, c := range e.Child {\n\t\tif cd, ok := c.(*CharData); ok && cd.whitespace {\n\t\t\tn--\n\t\t}\n\t}\n\tif n == len(e.Child) {\n\t\treturn\n\t}\n\n\t\/\/ Strip out indent CharData\n\tnewChild := make([]Token, n)\n\tj := 0\n\tfor _, c := range e.Child {\n\t\tif cd, ok := c.(*CharData); ok && cd.whitespace {\n\t\t\tcontinue\n\t\t}\n\t\tnewChild[j] = c\n\t\tj++\n\t}\n\te.Child = newChild\n}\n\n\/\/ writeTo serializes the element to the writer w.\nfunc (e *Element) writeTo(w *bufio.Writer) {\n\tw.WriteByte('<')\n\tw.WriteString(e.Tag)\n\tfor _, a := range e.Attr {\n\t\tw.WriteByte(' ')\n\t\ta.writeTo(w)\n\t}\n\tif len(e.Child) > 0 {\n\t\tw.WriteString(\">\")\n\t\tfor _, c := range e.Child {\n\t\t\tc.writeTo(w)\n\t\t}\n\t\tw.Write([]byte{'<', '\/'})\n\t\tw.WriteString(e.Tag)\n\t\tw.WriteByte('>')\n\t} else {\n\t\tw.Write([]byte{'\/', '>'})\n\t}\n}\n\n\/\/ addChild adds a child token to the receiving element.\nfunc (e *Element) addChild(t Token) {\n\te.Child = append(e.Child, t)\n}\n\n\/\/ CreateAttr creates an attribute and adds it to the receiving element.\nfunc (e *Element) CreateAttr(key, value string) Attr {\n\ta := Attr{key, value}\n\te.Attr = append(e.Attr, a)\n\treturn a\n}\n\n\/\/ writeTo serializes the attribute to the writer.\nfunc (a *Attr) writeTo(w *bufio.Writer) {\n\tw.WriteString(a.Key)\n\tw.WriteString(`=\"`)\n\tw.WriteString(a.Value)\n\tw.WriteByte('\"')\n}\n\n\/\/ newCharData creates an XML character data entity.\nfunc newCharData(data string, whitespace bool) *CharData {\n\treturn &CharData{Data: data, whitespace: whitespace}\n}\n\n\/\/ CreateCharData creates an XML character data entity and adds it\n\/\/ as a child of the receiving element.\nfunc (e *Element) CreateCharData(data string) *CharData {\n\treturn e.createCharData(data, false)\n}\n\n\/\/ CreateCharData creates an XML character data entity and adds it\n\/\/ as a child of the receiving element.\nfunc (e *Element) createCharData(data string, whitespace bool) *CharData {\n\tc := newCharData(data, whitespace)\n\te.addChild(c)\n\treturn c\n}\n\n\/\/ writeTo serializes the character data entity to the writer.\nfunc (c *CharData) writeTo(w *bufio.Writer) {\n\tw.WriteString(escape(c.Data))\n}\n\n\/\/ NewComment creates an XML comment.\nfunc newComment(comment string) *Comment {\n\treturn &Comment{Data: comment}\n}\n\n\/\/ CreateComment creates an XML comment and adds it as a child of the\n\/\/ receiving element.\nfunc (e *Element) CreateComment(comment string) *Comment {\n\tc := newComment(comment)\n\te.addChild(c)\n\treturn c\n}\n\n\/\/ writeTo serialies the comment to the writer.\nfunc (c *Comment) writeTo(w *bufio.Writer) {\n\tw.WriteString(\"<!--\")\n\tw.WriteString(c.Data)\n\tw.WriteString(\"-->\")\n}\n\n\/\/ newProcInst creates a new processing instruction.\nfunc newProcInst(target, inst string) *ProcInst {\n\treturn &ProcInst{Target: target, Inst: inst}\n}\n\n\/\/ CreateProcInst creates a processing instruction and adds it as a\n\/\/ child of the receiving element\nfunc (e *Element) CreateProcInst(target, inst string) *ProcInst {\n\tp := newProcInst(target, inst)\n\te.addChild(p)\n\treturn p\n}\n\n\/\/ writeTo serializes the processing instruction to the writer.\nfunc (p *ProcInst) writeTo(w *bufio.Writer) {\n\tw.WriteString(\"<?\")\n\tw.WriteString(p.Target)\n\tw.WriteByte(' ')\n\tw.WriteString(p.Inst)\n\tw.WriteString(\"?>\")\n}\n\n\/\/ newIndentCharData returns the indentation CharData token for the given\n\/\/ depth level with the given number of spaces per level.\nfunc newIndentCharData(depth, spaces int) *CharData {\n\treturn newCharData(crSpaces(depth*spaces), true)\n}\n<commit_msg>add SelectElement, SelectElements, SelectAttr and CreateElementIterator<commit_after>\/\/ Package etree provides XML services through an Element Tree\n\/\/ abstraction.\npackage etree\n\nimport (\n\t\"bufio\"\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"io\"\n)\n\nconst (\n\tNoIndent = -1\n)\n\nvar (\n\tErrInvalidFormat = errors.New(\"etree: invalid XML format\")\n)\n\n\/\/ A Token is an empty interface that represents an Element,\n\/\/ Comment, CharData, Directive or ProcInst.\ntype Token interface {\n\twriteTo(w *bufio.Writer)\n}\n\n\/\/ A Document is the root level object in an etree. It represents the\n\/\/ XML document as a whole. It embeds an Element type but only uses the\n\/\/ Element type's Child tokens.\ntype Document struct {\n\tElement\n}\n\n\/\/ An Element represents an XML element, its attributes, and its child tokens.\ntype Element struct {\n\tTag string \/\/ The element tag\n\tAttr []Attr \/\/ The element's list of key-value attribute pairs\n\tChild []Token \/\/ The element's child tokens (elements, comments, etc.)\n}\n\n\/\/ An Attr represents a key-value attribute of an XML element.\ntype Attr struct {\n\tKey string\n\tValue string\n}\n\n\/\/ A Comment represents an XML comment.\ntype Comment struct {\n\tData string\n}\n\n\/\/ CharData represents character data within XML.\ntype CharData struct {\n\tData string\n\twhitespace bool\n}\n\n\/\/ A ProcInst represents an XML processing instruction.\ntype ProcInst struct {\n\tTarget string\n\tInst string\n}\n\n\/\/ NewDocument creates an empty XML document and returns it.\nfunc NewDocument() *Document {\n\td := new(Document)\n\td.Child = make([]Token, 0)\n\treturn d\n}\n\n\/\/ ReadFrom reads XML from the reader r and adds the result as\n\/\/ a new child of the receiving document.\nfunc (d *Document) ReadFrom(r io.Reader) error {\n\treturn d.Element.ReadFrom(r)\n}\n\n\/\/ WriteTo serializes an XML document into the writer w.\nfunc (d *Document) WriteTo(w io.Writer) error {\n\tb := bufio.NewWriter(w)\n\tfor _, c := range d.Child {\n\t\tc.writeTo(b)\n\t}\n\treturn b.Flush()\n}\n\n\/\/ Indent modifies the document's element tree by inserting\n\/\/ CharData entities containing carriage returns and indentation.\n\/\/ The amount of indenting per depth level is equal to spaces.\n\/\/ Use etree.NoIndent for spaces if you want no indentation at all.\nfunc (d *Document) Indent(spaces int) {\n\td.stripIndent()\n\tn := len(d.Child)\n\tif n == 0 {\n\t\treturn\n\t}\n\n\tnewChild := make([]Token, n*2-1)\n\tfor i, c := range d.Child {\n\t\tj := i * 2\n\t\tnewChild[j] = c\n\t\tif j+1 < len(newChild) {\n\t\t\tnewChild[j+1] = newIndentCharData(0, spaces)\n\t\t}\n\t\tif e, ok := c.(*Element); ok {\n\t\t\te.indent(1, spaces)\n\t\t}\n\t}\n\td.Child = newChild\n}\n\n\/\/ NewElement creates an XML element with the specified name.\n\/\/ In most cases, you should use NewDocument and create elements\n\/\/ with the CreateElement function.\nfunc NewElement(tag string) *Element {\n\treturn &Element{\n\t\tTag: tag,\n\t\tAttr: make([]Attr, 0),\n\t\tChild: make([]Token, 0),\n\t}\n}\n\n\/\/ Text returns the characters immediately following the element's\n\/\/ opening tag.\nfunc (e *Element) Text() string {\n\tif len(e.Child) == 0 {\n\t\treturn \"\"\n\t}\n\tif cd, ok := e.Child[0].(*CharData); ok {\n\t\treturn cd.Data\n\t} else {\n\t\treturn \"\"\n\t}\n}\n\n\/\/ SetText replaces an element's subsidiary CharData text with a new\n\/\/ string.\nfunc (e *Element) SetText(text string) {\n\tif len(e.Child) > 0 {\n\t\tif cd, ok := e.Child[0].(*CharData); ok {\n\t\t\tcd.Data = text\n\t\t\treturn\n\t\t}\n\t}\n\te.Child = append(e.Child, nil)\n\tcopy(e.Child[1:], e.Child[0:])\n\tc := newCharData(text, false)\n\te.Child[0] = c\n}\n\n\/\/ CreateElement creates a child element of the receiving element and\n\/\/ gives it the specified name.\nfunc (e *Element) CreateElement(name string) *Element {\n\tc := NewElement(name)\n\te.addChild(c)\n\treturn c\n}\n\n\/\/ An element stack is a simple stack of elements used by ReadFrom.\ntype elementStack []*Element\n\nfunc (s *elementStack) push(e *Element) {\n\t*s = append(*s, e)\n}\n\nfunc (s *elementStack) pop() {\n\t(*s)[len(*s)-1] = nil\n\t*s = (*s)[:len(*s)-1]\n}\n\nfunc (s *elementStack) peek() *Element {\n\treturn (*s)[len(*s)-1]\n}\n\n\/\/ ReadFrom reads XML from the reader r and stores the result as\n\/\/ a new child of the receiving element.\nfunc (e *Element) ReadFrom(r io.Reader) error {\n\tstack := elementStack{e}\n\tdec := xml.NewDecoder(r)\n\tfor {\n\t\tt, err := dec.RawToken()\n\t\tswitch {\n\t\tcase err == io.EOF:\n\t\t\treturn nil\n\t\tcase err != nil:\n\t\t\treturn err\n\t\tcase len(stack) == 0:\n\t\t\treturn ErrInvalidFormat\n\t\t}\n\n\t\ttop := stack.peek()\n\n\t\tswitch t := t.(type) {\n\t\tcase xml.StartElement:\n\t\t\te := top.CreateElement(t.Name.Local)\n\t\t\tfor _, a := range t.Attr {\n\t\t\t\te.CreateAttr(a.Name.Local, a.Value)\n\t\t\t}\n\t\t\tstack.push(e)\n\t\tcase xml.EndElement:\n\t\t\tstack.pop()\n\t\tcase xml.CharData:\n\t\t\tdata := string(t)\n\t\t\ttop.createCharData(data, isWhitespace(data))\n\t\tcase xml.Comment:\n\t\t\ttop.CreateComment(string(t))\n\t\tcase xml.ProcInst:\n\t\t\ttop.CreateProcInst(t.Target, string(t.Inst))\n\t\t}\n\t}\n}\n\n\/\/ WriteTo serializes the element and its children as XML into\n\/\/ the writer w.\nfunc (e *Element) WriteTo(w io.Writer) error {\n\tb := bufio.NewWriter(w)\n\te.writeTo(b)\n\treturn b.Flush()\n}\n\n\/\/ SelectAttr finds an element attribute matching the requested key\n\/\/ and returns its value if found.\nfunc (e *Element) SelectAttr(key string) (value string, found bool) {\n\tfor _, a := range e.Attr {\n\t\tif a.Key == key {\n\t\t\treturn a.Value, true\n\t\t}\n\t}\n\treturn \"\", false\n}\n\n\/\/ SelectElement returns the first child element with the given tag.\nfunc (e *Element) SelectElement(tag string) *Element {\n\tfor _, ce := range e.Child {\n\t\tif c, ok := ce.(*Element); ok && c.Tag == tag {\n\t\t\treturn c\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ SelectElements returns a slice of all child elements with the given tag.\nfunc (e *Element) SelectElements(tag string) []*Element {\n\telements := make([]*Element, 0)\n\tfor _, ce := range e.Child {\n\t\tif c, ok := ce.(*Element); ok && c.Tag == tag {\n\t\t\telements = append(elements, c)\n\t\t}\n\t}\n\treturn elements\n}\n\n\/\/ ChildElements returns all elements that are children of the\n\/\/ receiving element.\nfunc (e *Element) ChildElements() []*Element {\n\telements := make([]*Element, 0)\n\tfor _, c := range e.Child {\n\t\tif e, ok := c.(*Element); ok {\n\t\t\telements = append(elements, e)\n\t\t}\n\t}\n\treturn elements\n}\n\n\/\/ Indent modifies the element's element tree by inserting\n\/\/ CharData entities containing carriage returns and indentation.\n\/\/ The amount of indenting per depth level is equal to spaces.\n\/\/ Use etree.NoIndent for spaces if you want no indentation at all.\nfunc (e *Element) Indent(spaces int) {\n\te.indent(1, spaces)\n}\n\n\/\/ indent recursively inserts proper indentation between an\n\/\/ XML element's child tokens.\nfunc (e *Element) indent(depth, spaces int) {\n\te.stripIndent()\n\tn := len(e.Child)\n\tif n == 0 {\n\t\treturn\n\t}\n\n\toldChild := e.Child\n\te.Child = make([]Token, 0, n*2+1)\n\tisCharData := false\n\tfor _, c := range oldChild {\n\t\t_, isCharData = c.(*CharData)\n\t\tif !isCharData && spaces >= 0 {\n\t\t\te.addChild(newIndentCharData(depth, spaces))\n\t\t}\n\t\te.addChild(c)\n\t\tif ce, ok := c.(*Element); ok {\n\t\t\tce.indent(depth+1, spaces)\n\t\t}\n\t}\n\tif !isCharData && spaces >= 0 {\n\t\te.addChild(newIndentCharData(depth-1, spaces))\n\t}\n}\n\n\/\/ stripIndent removes any previously inserted indentation.\nfunc (e *Element) stripIndent() {\n\t\/\/ Count the number of non-indent child tokens\n\tn := len(e.Child)\n\tfor _, c := range e.Child {\n\t\tif cd, ok := c.(*CharData); ok && cd.whitespace {\n\t\t\tn--\n\t\t}\n\t}\n\tif n == len(e.Child) {\n\t\treturn\n\t}\n\n\t\/\/ Strip out indent CharData\n\tnewChild := make([]Token, n)\n\tj := 0\n\tfor _, c := range e.Child {\n\t\tif cd, ok := c.(*CharData); ok && cd.whitespace {\n\t\t\tcontinue\n\t\t}\n\t\tnewChild[j] = c\n\t\tj++\n\t}\n\te.Child = newChild\n}\n\n\/\/ writeTo serializes the element to the writer w.\nfunc (e *Element) writeTo(w *bufio.Writer) {\n\tw.WriteByte('<')\n\tw.WriteString(e.Tag)\n\tfor _, a := range e.Attr {\n\t\tw.WriteByte(' ')\n\t\ta.writeTo(w)\n\t}\n\tif len(e.Child) > 0 {\n\t\tw.WriteString(\">\")\n\t\tfor _, c := range e.Child {\n\t\t\tc.writeTo(w)\n\t\t}\n\t\tw.Write([]byte{'<', '\/'})\n\t\tw.WriteString(e.Tag)\n\t\tw.WriteByte('>')\n\t} else {\n\t\tw.Write([]byte{'\/', '>'})\n\t}\n}\n\n\/\/ addChild adds a child token to the receiving element.\nfunc (e *Element) addChild(t Token) {\n\te.Child = append(e.Child, t)\n}\n\n\/\/ CreateAttr creates an attribute and adds it to the receiving element.\nfunc (e *Element) CreateAttr(key, value string) Attr {\n\ta := Attr{key, value}\n\te.Attr = append(e.Attr, a)\n\treturn a\n}\n\n\/\/ CreateElementIterator creates a child element iterator that iterates\n\/\/ through all child elements with the given tag.\nfunc (e *Element) CreateElementIterator(tag string) *ElementIterator {\n\ti := &ElementIterator{\n\t\tparent: e,\n\t\ttag: tag,\n\t\tindex: -1,\n\t}\n\ti.Next()\n\treturn i\n}\n\n\/\/ writeTo serializes the attribute to the writer.\nfunc (a *Attr) writeTo(w *bufio.Writer) {\n\tw.WriteString(a.Key)\n\tw.WriteString(`=\"`)\n\tw.WriteString(a.Value)\n\tw.WriteByte('\"')\n}\n\n\/\/ newCharData creates an XML character data entity.\nfunc newCharData(data string, whitespace bool) *CharData {\n\treturn &CharData{Data: data, whitespace: whitespace}\n}\n\n\/\/ CreateCharData creates an XML character data entity and adds it\n\/\/ as a child of the receiving element.\nfunc (e *Element) CreateCharData(data string) *CharData {\n\treturn e.createCharData(data, false)\n}\n\n\/\/ CreateCharData creates an XML character data entity and adds it\n\/\/ as a child of the receiving element.\nfunc (e *Element) createCharData(data string, whitespace bool) *CharData {\n\tc := newCharData(data, whitespace)\n\te.addChild(c)\n\treturn c\n}\n\n\/\/ writeTo serializes the character data entity to the writer.\nfunc (c *CharData) writeTo(w *bufio.Writer) {\n\tw.WriteString(escape(c.Data))\n}\n\n\/\/ NewComment creates an XML comment.\nfunc newComment(comment string) *Comment {\n\treturn &Comment{Data: comment}\n}\n\n\/\/ CreateComment creates an XML comment and adds it as a child of the\n\/\/ receiving element.\nfunc (e *Element) CreateComment(comment string) *Comment {\n\tc := newComment(comment)\n\te.addChild(c)\n\treturn c\n}\n\n\/\/ writeTo serialies the comment to the writer.\nfunc (c *Comment) writeTo(w *bufio.Writer) {\n\tw.WriteString(\"<!--\")\n\tw.WriteString(c.Data)\n\tw.WriteString(\"-->\")\n}\n\n\/\/ newProcInst creates a new processing instruction.\nfunc newProcInst(target, inst string) *ProcInst {\n\treturn &ProcInst{Target: target, Inst: inst}\n}\n\n\/\/ CreateProcInst creates a processing instruction and adds it as a\n\/\/ child of the receiving element\nfunc (e *Element) CreateProcInst(target, inst string) *ProcInst {\n\tp := newProcInst(target, inst)\n\te.addChild(p)\n\treturn p\n}\n\n\/\/ writeTo serializes the processing instruction to the writer.\nfunc (p *ProcInst) writeTo(w *bufio.Writer) {\n\tw.WriteString(\"<?\")\n\tw.WriteString(p.Target)\n\tw.WriteByte(' ')\n\tw.WriteString(p.Inst)\n\tw.WriteString(\"?>\")\n}\n\n\/\/ newIndentCharData returns the indentation CharData token for the given\n\/\/ depth level with the given number of spaces per level.\nfunc newIndentCharData(depth, spaces int) *CharData {\n\treturn newCharData(crSpaces(depth*spaces), true)\n}\n\n\/\/ An ElementIterator allows the caller to iterate through the child elements\n\/\/ of an element.\ntype ElementIterator struct {\n\tElement *Element\n\tparent *Element\n\ttag string\n\tindex int\n}\n\n\/\/ Valid returns true if the ElementIterator currently points to a valid element.\nfunc (i *ElementIterator) Valid() bool {\n\treturn i.Element != nil\n}\n\n\/\/ Next advances to the next child element with the appropriate tag.\nfunc (i *ElementIterator) Next() {\n\tfor {\n\t\ti.index++\n\t\tif i.index == len(i.parent.Child) {\n\t\t\ti.Element = nil\n\t\t\treturn\n\t\t}\n\t\tc := i.parent.Child[i.index]\n\t\tif n, ok := c.(*Element); ok && n.Tag == i.tag {\n\t\t\ti.Element = n\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package redisserver\n\nimport (\n\t\"fmt\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"Go-Redis-Admin\/src\/common\/request\"\n\t\"Go-Redis-Admin\/src\/common\/response\"\n)\n\ntype redisapi struct {\n\tinput request.Input\n\toutput response.Output\n\tRedisconn redis.Conn\n}\n\n\nfunc (re *redisapi) Connect() {\n\tif re.input.Request.Method!=\"POST\"{\n\t\tfmt.Println(\"请求错误\")\n\t}\n\tbody,_:=re.input.InputBody()\n\tfmt.Println(body)\n\tre.input.Request.ParseForm()\n\tvar network string=re.input.Request.FormValue(\"network\")\n\tvar address string=re.input.Request.FormValue(\"address\")\n\tvar password string=re.input.Request.FormValue(\"password\")\n\toption:=redis.DialOption{}\n\tif password!=\"\"{\n\t\toption=redis.DialPassword(password)\n\t}\n\tconn, err := redis.Dial(network, address,option)\n\tif err != nil {\n\t\tfmt.Println(\"connect error:\", err.Error())\n\t} else {\n\t\tfmt.Println(\"connect redis success\")\n\t}\n\tre.Redisconn = conn\n\tre.output.ResponseWriter.WriteHeader(200)\n\t\/\/c:=NewConnection(conn)\n\t\/\/conn.Close()\n}\nfunc (re *redisapi) Ping() {\n\treply,err:=re.Redisconn.Do(\"PING\",\"test\")\n\tif err!=nil{\n\t\tfmt.Println(err.Error())\n\t}else {\n\t\tfmt.Println(redis.String(reply,err))\n\t}\n}\nfunc (re *redisapi)Set(){\n\tvar args []interface{}\n\treply,err:=re.Redisconn.Do(\"SET\",args)\n\tredis.String(reply,err)\n\tif err!=nil {\n\t\tfmt.Println(err.Error())\n\t}else {\n\t\tfmt.Println(reply)\n\t}\n}\n\nfunc (re *redisapi) Get(){\n\tvar key string\n\treply,err:=re.Redisconn.Do(\"GET\",key)\n\tif err!=nil{\n\t\tfmt.Println(err.Error())\n\t}else {\n\t\tfmt.Println(reply)\n\t}\n\n}\n\nfunc (re *redisapi) Delete(){\n\tvar args []interface{}\n\treply,err:=re.Redisconn.Do(\"DEL\",args)\n\tif err!=nil{\n\t\tfmt.Println(err.Error())\n\t}else {\n\t\tfmt.Println(reply)\n\t}\n}\n<commit_msg>modify redisapi<commit_after>package redisserver\n\nimport (\n\t\"fmt\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"Go-Redis-Admin\/src\/common\/request\"\n\t\"Go-Redis-Admin\/src\/common\/response\"\n)\n\ntype redisapi struct {\n\tinput request.Input\n\toutput response.Output\n\tRedisconn redis.Conn\n}\n\n\nfunc (re *redisapi) Connect() {\n\tif re.input.Request.Method!=\"POST\"{\n\t\tfmt.Println(\"请求错误\")\n\t}\n\tbody,_:=re.input.InputBody()\n\tfmt.Println(body)\n\tre.input.Request.ParseForm()\n\tvar network string=re.input.Request.FormValue(\"network\")\n\tvar address string=re.input.Request.FormValue(\"address\")\n\tvar password string=re.input.Request.FormValue(\"password\")\n\toption:=redis.DialOption{}\n\tif password!=\"\"{\n\t\toption=redis.DialPassword(password)\n\t}\n\tconn, err := redis.Dial(network, address,option)\n\tif err != nil {\n\t\tfmt.Println(\"connect error:\", err.Error())\n\t} else {\n\t\tfmt.Println(\"connect redis success\")\n\t}\n\tre.Redisconn = conn\n\tre.output.ResponseWriter.WriteHeader(200)\n\t\/\/c:=NewConnection(conn)\n\t\/\/conn.Close()\n}\nfunc (re *redisapi) Ping() {\n\treply,err:=re.Redisconn.Do(\"PING\",\"test\")\n\tif err!=nil{\n\t\tfmt.Println(err.Error())\n\t}else {\n\t\tfmt.Println(redis.String(reply,err))\n\t}\n\tresult,_:=redis.String(reply,err)\n\tre.output.WriteString(result)\n}\nfunc (re *redisapi)Set(){\n\tvar args []interface{}\n\treply,err:=re.Redisconn.Do(\"SET\",args)\n\tredis.String(reply,err)\n\tif err!=nil {\n\t\tfmt.Println(err.Error())\n\t}else {\n\t\tfmt.Println(reply)\n\t}\n\tresult,_:=redis.String(reply,err)\n\tre.output.WriteString(result)\n}\n\nfunc (re *redisapi) Get(){\n\tvar key string\n\treply,err:=re.Redisconn.Do(\"GET\",key)\n\tif err!=nil{\n\t\tfmt.Println(err.Error())\n\t}else {\n\t\tfmt.Println(reply)\n\t}\n\n}\n\nfunc (re *redisapi) Delete(){\n\tvar args []interface{}\n\treply,err:=re.Redisconn.Do(\"DEL\",args)\n\tif err!=nil{\n\t\tfmt.Println(err.Error())\n\t}else {\n\t\tfmt.Println(reply)\n\t}\n}\nfunc(re *redisapi)Pool(){\n\n}<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/log\"\n)\n\nconst (\n\tnamespace = \"apache\" \/\/ For Prometheus metrics.\n)\n\nvar (\n\tlisteningAddress = flag.String(\"telemetry.address\", \":9117\", \"Address on which to expose metrics.\")\n\tmetricsEndpoint = flag.String(\"telemetry.endpoint\", \"\/metrics\", \"Path under which to expose metrics.\")\n\tscrapeURI = flag.String(\"scrape_uri\", \"http:\/\/localhost\/server-status\/?auto\", \"URI to apache stub status page.\")\n\tinsecure = flag.Bool(\"insecure\", false, \"Ignore server certificate if using https.\")\n)\n\ntype Exporter struct {\n\tURI string\n\tmutex sync.Mutex\n\tclient *http.Client\n\n\tup prometheus.Gauge\n\tscrapeFailures prometheus.Counter\n\taccessesTotal prometheus.Counter\n\tkBytesTotal prometheus.Counter\n\tuptime prometheus.Counter\n\tworkers *prometheus.GaugeVec\n\tscoreboard *prometheus.GaugeVec\n\tconnections *prometheus.GaugeVec\n}\n\nfunc NewExporter(uri string) *Exporter {\n\treturn &Exporter{\n\t\tURI: uri,\n\t\tup: prometheus.NewGauge(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"up\",\n\t\t\tHelp: \"Could the apache server be reached.\",\n\t\t}),\n\t\tscrapeFailures: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"exporter_scrape_failures_total\",\n\t\t\tHelp: \"Number of errors while scraping apache.\",\n\t\t}),\n\t\taccessesTotal: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"accesses_total\",\n\t\t\tHelp: \"Current total apache accesses\",\n\t\t}),\n\t\tkBytesTotal: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"sent_kilobytes_total\",\n\t\t\tHelp: \"Current total kbytes sent\",\n\t\t}),\n\t\tuptime: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"uptime_seconds_total\",\n\t\t\tHelp: \"Current uptime in seconds\",\n\t\t}),\n\t\tworkers: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"workers\",\n\t\t\tHelp: \"Apache worker statuses\",\n\t\t},\n\t\t\t[]string{\"state\"},\n\t\t),\n\t\tscoreboard: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"scoreboard\",\n\t\t\tHelp: \"Apache scoreboard statuses\",\n\t\t},\n\t\t\t[]string{\"state\"},\n\t\t),\n\t\tconnections: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"connections\",\n\t\t\tHelp: \"Apache connection statuses\",\n\t\t},\n\t\t\t[]string{\"state\"},\n\t\t),\n\t\tclient: &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: *insecure},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\te.up.Describe(ch)\n\te.scrapeFailures.Describe(ch)\n\te.accessesTotal.Describe(ch)\n\te.kBytesTotal.Describe(ch)\n\te.uptime.Describe(ch)\n\te.workers.Describe(ch)\n\te.scoreboard.Describe(ch)\n\te.connections.Describe(ch)\n}\n\n\/\/ Split colon separated string into two fields\nfunc splitkv(s string) (string, string) {\n\n\tif len(s) == 0 {\n\t\treturn s, s\n\t}\n\n\tslice := strings.SplitN(s, \":\", 2)\n\n\tif len(slice) == 1 {\n\t\treturn slice[0], \"\"\n\t}\n\n\treturn strings.TrimSpace(slice[0]), strings.TrimSpace(slice[1])\n}\n\nfunc (e *Exporter) updateScoreboard(scoreboard string) {\n\te.scoreboard.Reset()\n\tfor _, worker_status := range scoreboard {\n\t\ts := string(worker_status)\n\t\tswitch {\n\t\tcase s == \"_\":\n\t\t\te.scoreboard.WithLabelValues(\"idle\").Inc()\n\t\tcase s == \"S\":\n\t\t\te.scoreboard.WithLabelValues(\"startup\").Inc()\n\t\tcase s == \"R\":\n\t\t\te.scoreboard.WithLabelValues(\"read\").Inc()\n\t\tcase s == \"W\":\n\t\t\te.scoreboard.WithLabelValues(\"reply\").Inc()\n\t\tcase s == \"K\":\n\t\t\te.scoreboard.WithLabelValues(\"keepalive\").Inc()\n\t\tcase s == \"D\":\n\t\t\te.scoreboard.WithLabelValues(\"dns\").Inc()\n\t\tcase s == \"C\":\n\t\t\te.scoreboard.WithLabelValues(\"closing\").Inc()\n\t\tcase s == \"L\":\n\t\t\te.scoreboard.WithLabelValues(\"logging\").Inc()\n\t\tcase s == \"G\":\n\t\t\te.scoreboard.WithLabelValues(\"graceful_stop\").Inc()\n\t\tcase s == \"I\":\n\t\t\te.scoreboard.WithLabelValues(\"idle_cleanup\").Inc()\n\t\tcase s == \".\":\n\t\t\te.scoreboard.WithLabelValues(\"open_slot\").Inc()\n\t\t}\n\t}\n}\n\nfunc (e *Exporter) collect(ch chan<- prometheus.Metric) error {\n\tresp, err := e.client.Get(e.URI)\n\tif err != nil {\n\t\te.up.Set(0)\n\t\te.up.Collect(ch)\n\t\treturn fmt.Errorf(\"Error scraping apache: %v\", err)\n\t}\n\te.up.Set(1)\n\te.up.Collect(ch)\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\tif err != nil {\n\t\t\tdata = []byte(err.Error())\n\t\t}\n\t\treturn fmt.Errorf(\"Status %s (%d): %s\", resp.Status, resp.StatusCode, data)\n\t}\n\n\tlines := strings.Split(string(data), \"\\n\")\n\n\tconnectionInfo := false\n\n\tfor _, l := range lines {\n\t\tkey, v := splitkv(l)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch {\n\t\tcase key == \"Total Accesses\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\te.accessesTotal.Set(val)\n\t\t\te.accessesTotal.Collect(ch)\n\t\tcase key == \"Total kBytes\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\te.kBytesTotal.Set(val)\n\t\t\te.kBytesTotal.Collect(ch)\n\t\tcase key == \"Uptime\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\te.uptime.Set(val)\n\t\t\te.uptime.Collect(ch)\n\t\tcase key == \"BusyWorkers\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\te.workers.WithLabelValues(\"busy\").Set(val)\n\t\tcase key == \"IdleWorkers\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\te.workers.WithLabelValues(\"idle\").Set(val)\n\t\tcase key == \"Scoreboard\":\n\t\t\te.updateScoreboard(v)\n\t\t\te.scoreboard.Collect(ch)\n\t\tcase key == \"ConnsTotal\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\te.connections.WithLabelValues(\"total\").Set(val)\n\t\t\tconnectionInfo = true\n\t\tcase key == \"ConnsAsyncWriting\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\te.connections.WithLabelValues(\"writing\").Set(val)\n\t\t\tconnectionInfo = true\n\t\tcase key == \"ConnsAsyncKeepAlive\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\te.connections.WithLabelValues(\"keepalive\").Set(val)\n\t\t\tconnectionInfo = true\n\t\tcase key == \"ConnsAsyncClosing\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\te.connections.WithLabelValues(\"closing\").Set(val)\n\t\t\tconnectionInfo = true\n\t\t}\n\n\t}\n\n\te.workers.Collect(ch)\n\tif connectionInfo {\n\t\te.connections.Collect(ch)\n\t}\n\n\treturn nil\n}\n\nfunc (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() \/\/ To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\tif err := e.collect(ch); err != nil {\n\t\tlog.Printf(\"Error scraping apache: %s\", err)\n\t\te.scrapeFailures.Inc()\n\t\te.scrapeFailures.Collect(ch)\n\t}\n\treturn\n}\n\nfunc main() {\n\tflag.Parse()\n\n\texporter := NewExporter(*scrapeURI)\n\tprometheus.MustRegister(exporter)\n\n\tlog.Printf(\"Starting Server: %s\", *listeningAddress)\n\thttp.Handle(*metricsEndpoint, prometheus.Handler())\n\tlog.Fatal(http.ListenAndServe(*listeningAddress, nil))\n}\n<commit_msg>Change `NewCounter` to `MustNewConstMetric` for some metrics<commit_after>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/log\"\n)\n\nconst (\n\tnamespace = \"apache\" \/\/ For Prometheus metrics.\n)\n\nvar (\n\tlisteningAddress = flag.String(\"telemetry.address\", \":9117\", \"Address on which to expose metrics.\")\n\tmetricsEndpoint = flag.String(\"telemetry.endpoint\", \"\/metrics\", \"Path under which to expose metrics.\")\n\tscrapeURI = flag.String(\"scrape_uri\", \"http:\/\/localhost\/server-status\/?auto\", \"URI to apache stub status page.\")\n\tinsecure = flag.Bool(\"insecure\", false, \"Ignore server certificate if using https.\")\n)\n\ntype Exporter struct {\n\tURI string\n\tmutex sync.Mutex\n\tclient *http.Client\n\n\tup *prometheus.Desc\n\tscrapeFailures prometheus.Counter\n\taccessesTotal *prometheus.Desc\n\tkBytesTotal *prometheus.Desc\n\tuptime *prometheus.Desc\n\tworkers *prometheus.GaugeVec\n\tscoreboard *prometheus.GaugeVec\n\tconnections *prometheus.GaugeVec\n}\n\nfunc NewExporter(uri string) *Exporter {\n\treturn &Exporter{\n\t\tURI: uri,\n\t\tup: prometheus.NewDesc(\n prometheus.BuildFQName(namespace, \"\", \"up\"),\n \"Could the apache server be reached\",\n nil,\n\t\t\tnil),\n scrapeFailures: prometheus.NewCounter(prometheus.CounterOpts{\n Namespace: namespace,\n Name: \"exporter_scrape_failures_total\",\n Help: \"Number of errors while scraping apache.\",\n }),\n accessesTotal: prometheus.NewDesc(\n prometheus.BuildFQName(namespace, \"\", \"accesses_total\"),\n \"Current total apache accesses\",\n nil,\n nil),\n kBytesTotal: prometheus.NewDesc(\n prometheus.BuildFQName(namespace, \"\", \"sent_kilobytes_total\"),\n \"Current total kbytes sent\",\n nil,\n nil),\n uptime: prometheus.NewDesc(\n prometheus.BuildFQName(namespace, \"\", \"uptime_seconds_total\"),\n \"Current uptime in seconds\",\n nil,\n nil),\n\t\tworkers: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"workers\",\n\t\t\tHelp: \"Apache worker statuses\",\n\t\t},\n\t\t\t[]string{\"state\"},\n\t\t),\n\t\tscoreboard: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"scoreboard\",\n\t\t\tHelp: \"Apache scoreboard statuses\",\n\t\t},\n\t\t\t[]string{\"state\"},\n\t\t),\n\t\tconnections: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"connections\",\n\t\t\tHelp: \"Apache connection statuses\",\n\t\t},\n\t\t\t[]string{\"state\"},\n\t\t),\n\t\tclient: &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: *insecure},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n ch <- e.up\n ch <- e.accessesTotal\n ch <- e.kBytesTotal\n ch <- e.uptime\n e.scrapeFailures.Describe(ch)\n e.workers.Describe(ch)\n e.scoreboard.Describe(ch)\n e.connections.Describe(ch)\n}\n\n\/\/ Split colon separated string into two fields\nfunc splitkv(s string) (string, string) {\n\n\tif len(s) == 0 {\n\t\treturn s, s\n\t}\n\n\tslice := strings.SplitN(s, \":\", 2)\n\n\tif len(slice) == 1 {\n\t\treturn slice[0], \"\"\n\t}\n\n\treturn strings.TrimSpace(slice[0]), strings.TrimSpace(slice[1])\n}\n\nfunc (e *Exporter) updateScoreboard(scoreboard string) {\n\te.scoreboard.Reset()\n\tfor _, worker_status := range scoreboard {\n\t\ts := string(worker_status)\n\t\tswitch {\n\t\tcase s == \"_\":\n\t\t\te.scoreboard.WithLabelValues(\"idle\").Inc()\n\t\tcase s == \"S\":\n\t\t\te.scoreboard.WithLabelValues(\"startup\").Inc()\n\t\tcase s == \"R\":\n\t\t\te.scoreboard.WithLabelValues(\"read\").Inc()\n\t\tcase s == \"W\":\n\t\t\te.scoreboard.WithLabelValues(\"reply\").Inc()\n\t\tcase s == \"K\":\n\t\t\te.scoreboard.WithLabelValues(\"keepalive\").Inc()\n\t\tcase s == \"D\":\n\t\t\te.scoreboard.WithLabelValues(\"dns\").Inc()\n\t\tcase s == \"C\":\n\t\t\te.scoreboard.WithLabelValues(\"closing\").Inc()\n\t\tcase s == \"L\":\n\t\t\te.scoreboard.WithLabelValues(\"logging\").Inc()\n\t\tcase s == \"G\":\n\t\t\te.scoreboard.WithLabelValues(\"graceful_stop\").Inc()\n\t\tcase s == \"I\":\n\t\t\te.scoreboard.WithLabelValues(\"idle_cleanup\").Inc()\n\t\tcase s == \".\":\n\t\t\te.scoreboard.WithLabelValues(\"open_slot\").Inc()\n\t\t}\n\t}\n}\n\nfunc (e *Exporter) collect(ch chan<- prometheus.Metric) error {\n\tresp, err := e.client.Get(e.URI)\n\tif err != nil {\n ch <- prometheus.MustNewConstMetric(e.up, prometheus.GaugeValue, 0)\n\t\treturn fmt.Errorf(\"Error scraping apache: %v\", err)\n\t}\n ch <- prometheus.MustNewConstMetric(e.up, prometheus.GaugeValue, 1)\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\tif err != nil {\n\t\t\tdata = []byte(err.Error())\n\t\t}\n\t\treturn fmt.Errorf(\"Status %s (%d): %s\", resp.Status, resp.StatusCode, data)\n\t}\n\n\tlines := strings.Split(string(data), \"\\n\")\n\n\tconnectionInfo := false\n\n\tfor _, l := range lines {\n\t\tkey, v := splitkv(l)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch {\n\t\tcase key == \"Total Accesses\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n ch <- prometheus.MustNewConstMetric(e.accessesTotal, prometheus.CounterValue, val)\n\t\tcase key == \"Total kBytes\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n ch <- prometheus.MustNewConstMetric(e.kBytesTotal, prometheus.CounterValue, val)\n\t\tcase key == \"Uptime\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n ch <- prometheus.MustNewConstMetric(e.uptime, prometheus.CounterValue, val)\n\t\tcase key == \"BusyWorkers\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\te.workers.WithLabelValues(\"busy\").Set(val)\n\t\tcase key == \"IdleWorkers\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\te.workers.WithLabelValues(\"idle\").Set(val)\n\t\tcase key == \"Scoreboard\":\n\t\t\te.updateScoreboard(v)\n\t\t\te.scoreboard.Collect(ch)\n\t\tcase key == \"ConnsTotal\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\te.connections.WithLabelValues(\"total\").Set(val)\n\t\t\tconnectionInfo = true\n\t\tcase key == \"ConnsAsyncWriting\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\te.connections.WithLabelValues(\"writing\").Set(val)\n\t\t\tconnectionInfo = true\n\t\tcase key == \"ConnsAsyncKeepAlive\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\te.connections.WithLabelValues(\"keepalive\").Set(val)\n\t\t\tconnectionInfo = true\n\t\tcase key == \"ConnsAsyncClosing\":\n\t\t\tval, err := strconv.ParseFloat(v, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\te.connections.WithLabelValues(\"closing\").Set(val)\n\t\t\tconnectionInfo = true\n\t\t}\n\n\t}\n\n\te.workers.Collect(ch)\n\tif connectionInfo {\n\t\te.connections.Collect(ch)\n\t}\n\n\treturn nil\n}\n\nfunc (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() \/\/ To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\tif err := e.collect(ch); err != nil {\n\t\tlog.Printf(\"Error scraping apache: %s\", err)\n\t\te.scrapeFailures.Inc()\n\t\te.scrapeFailures.Collect(ch)\n\t}\n\treturn\n}\n\nfunc main() {\n\tflag.Parse()\n\n\texporter := NewExporter(*scrapeURI)\n\tprometheus.MustRegister(exporter)\n\n\tlog.Printf(\"Starting Server: %s\", *listeningAddress)\n\thttp.Handle(*metricsEndpoint, prometheus.Handler())\n\tlog.Fatal(http.ListenAndServe(*listeningAddress, nil))\n}\n<|endoftext|>"} {"text":"<commit_before>package zest\n\nimport \"fmt\"\n\n\/\/ APIError defines the format of Zest API errors.\ntype APIError struct {\n\t\/\/ The status code.\n\tStatus int `json:\"status\"`\n\t\/\/ The description of the API error.\n\tDescription string `json:\"description\"`\n\t\/\/ A raw description of what triggered the API error.\n\tRaw string `json:\"raw\"`\n\t\/\/ The token uniquely identifying the API error.\n\tErrorCode string `json:\"errorCode\"`\n}\n\nfunc (e APIError) Error() string {\n\treturn fmt.Sprintf(\"%s : %s\", e.ErrorCode, e.Description)\n}\n<commit_msg>Params added in Zest api errors.<commit_after>package zest\n\nimport \"fmt\"\n\n\/\/ APIError defines the format of Zest API errors.\ntype APIError struct {\n\t\/\/ The status code.\n\tStatus int `json:\"status\"`\n\t\/\/ The description of the API error.\n\tDescription string `json:\"description\"`\n\t\/\/ A raw description of what triggered the API error.\n\tRaw string `json:\"raw\"`\n\t\/\/ The token uniquely identifying the API error.\n\tErrorCode string `json:\"errorCode\"`\n\t\/\/ Additional infos.\n\tParams map[string]interface{} `json:\"params\"`\n}\n\nfunc (e APIError) Error() string {\n\treturn fmt.Sprintf(\"%s : %s\", e.ErrorCode, e.Description)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package awsfaker supports the creation of test doubles for AWS APIs.\n\/\/\n\/\/ To use, build a \"backend\" for each AWS service that you use.\n\/\/ The backend should implement the subset of that API that you need.\n\/\/ Each API call should be implemented as a backend method like this\n\/\/\tfunc (b *MyBackend) SomeAction(input *service.SomeActionInput) (*service.SomeActionOutput, error)\n\/\/ Then initialize an HTTP server for each backend\n\/\/\tmyBackend := &MyBackend{ ... }\n\/\/\tfakeServer := httptest.NewServer(awsfaker.New(myBackend))\n\/\/ Finally, initialize your code under test, overriding the default AWS endpoint\n\/\/ to instead use your fake:\n\/\/\tapp := myapp.App{ AWSEndpointOverride: fakeServer.URL }\n\/\/\tapp.Run()\n\/\/\n\/\/ The method signatures of the backend match those of the service\n\/\/ interfaces of the package github.com\/aws\/aws-sdk-go\n\/\/ For example, a complete implementation of AWS CloudFormation would match the\n\/\/ CloudFormationAPI interface here: https:\/\/godoc.org\/github.com\/aws\/aws-sdk-go\/service\/cloudformation\/cloudformationiface\n\/\/ But your backend need only implement those methods used by your code under test.\npackage awsfaker\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/rosenhouse\/awsfaker\/protocols\/query\"\n)\n\n\/\/ New returns a new FakeHandler that will dispatch incoming requests to\n\/\/ the given service backend.\n\/\/\n\/\/ A service backend should represent one AWS service, e.g. EC2\n\/\/ A backend should implement some or all of the actions of the service.\n\/\/ The methods on the backend should have signatures like\n\/\/\tfunc (b *MyBackend) SomeAction(input *service.SomeActionInput) (*service.SomeActionOutput, error)\n\/\/ where the input and output types are those in github.com\/aws\/aws-sdk-go\n\/\/ When returning an error from a backend method, use the ErrorResponse type.\nfunc New(serviceBackend interface{}) http.Handler {\n\treturn query.New(serviceBackend)\n}\n\n\/\/ An ErrorResponse represents an error from a backend method\n\/\/\n\/\/ If a backend method returns an instance of ErrorResponse, then ServeHTTP\n\/\/ will respond with the given HTTPStatusCode and marshal the AWSErrorCode and\n\/\/ AWSErrorMessage fields appropriately in the HTTP response body.\ntype ErrorResponse struct {\n\tAWSErrorCode string\n\tAWSErrorMessage string\n\tHTTPStatusCode int\n}\n\nfunc (e *ErrorResponse) Error() string {\n\treturn fmt.Sprintf(\"%T: %+v\", e, *e)\n}\n<commit_msg>Godoc updates<commit_after>\/\/ Package awsfaker supports the creation of test doubles for AWS APIs.\n\/\/\n\/\/ To use, build a \"backend\" for each AWS service that you use.\n\/\/ The backend should implement the subset of that API that you need.\n\/\/ Each API call should be implemented as a backend method like this\n\/\/\tfunc (b *MyBackend) SomeAction(input *service.SomeActionInput) (*service.SomeActionOutput, error)\n\/\/ Then initialize an HTTP server for each backend\n\/\/\tmyBackend := &MyBackend{ ... }\n\/\/\tfakeServer := httptest.NewServer(awsfaker.New(myBackend))\n\/\/ Finally, initialize your code under test, overriding the default AWS endpoint\n\/\/ to instead use your fake:\n\/\/\tapp := myapp.App{ AWSEndpointOverride: fakeServer.URL }\n\/\/\tapp.Run()\n\/\/\n\/\/ The method signatures of the backend match those of the service\n\/\/ interfaces of the package github.com\/aws\/aws-sdk-go\n\/\/ For example, a complete implementation of AWS CloudFormation would match the\n\/\/ CloudFormationAPI interface here: https:\/\/godoc.org\/github.com\/aws\/aws-sdk-go\/service\/cloudformation\/cloudformationiface\n\/\/ But your backend need only implement those methods used by your code under test.\npackage awsfaker\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/rosenhouse\/awsfaker\/protocols\/query\"\n)\n\n\/\/ New returns a new http.Handler that will dispatch incoming requests to\n\/\/ the given service backend, decoding requests and encoding responses in the\n\/\/ format used by that service.\n\/\/\n\/\/ A backend should represent one AWS service, e.g. EC2, and implement the subset\n\/\/ of API actions required by the code under test.\n\/\/ The methods on the backend should have signatures like\n\/\/\tfunc (b *MyBackend) SomeAction(input *service.SomeActionInput) (*service.SomeActionOutput, error)\n\/\/ where the input and output types are those in github.com\/aws\/aws-sdk-go\n\/\/ When returning an error from a backend method, use the ErrorResponse type.\nfunc New(serviceBackend interface{}) http.Handler {\n\treturn query.New(serviceBackend)\n}\n\n\/\/ An ErrorResponse represents an error from a backend method\n\/\/\n\/\/ If a backend method returns an instance of ErrorResponse, then the handler\n\/\/ will respond with the given HTTPStatusCode and marshal the AWSErrorCode and\n\/\/ AWSErrorMessage fields appropriately in the HTTP response body.\ntype ErrorResponse struct {\n\tAWSErrorCode string\n\tAWSErrorMessage string\n\tHTTPStatusCode int\n}\n\nfunc (e *ErrorResponse) Error() string {\n\treturn fmt.Sprintf(\"%T: %+v\", e, *e)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage bigquery\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"time\"\n\n\tgax \"github.com\/googleapis\/gax-go\"\n\n\t\"cloud.google.com\/go\/internal\"\n\t\"cloud.google.com\/go\/internal\/version\"\n\n\t\"google.golang.org\/api\/googleapi\"\n\t\"google.golang.org\/api\/option\"\n\thtransport \"google.golang.org\/api\/transport\/http\"\n\n\t\"golang.org\/x\/net\/context\"\n\tbq \"google.golang.org\/api\/bigquery\/v2\"\n)\n\nconst (\n\tprodAddr = \"https:\/\/www.googleapis.com\/bigquery\/v2\/\"\n\tScope = \"https:\/\/www.googleapis.com\/auth\/bigquery\"\n\tuserAgent = \"gcloud-golang-bigquery\/20160429\"\n)\n\nvar xGoogHeader = fmt.Sprintf(\"gl-go\/%s gccl\/%s\", version.Go(), version.Repo)\n\nfunc setClientHeader(headers http.Header) {\n\theaders.Set(\"x-goog-api-client\", xGoogHeader)\n}\n\n\/\/ Client may be used to perform BigQuery operations.\ntype Client struct {\n\tprojectID string\n\tbqs *bq.Service\n}\n\n\/\/ NewClient constructs a new Client which can perform BigQuery operations.\n\/\/ Operations performed via the client are billed to the specified GCP project.\nfunc NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {\n\to := []option.ClientOption{\n\t\toption.WithEndpoint(prodAddr),\n\t\toption.WithScopes(Scope),\n\t\toption.WithUserAgent(userAgent),\n\t}\n\to = append(o, opts...)\n\thttpClient, endpoint, err := htransport.NewClient(ctx, o...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"bigquery: dialing: %v\", err)\n\t}\n\tbqs, err := bq.New(httpClient)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"bigquery: constructing client: %v\", err)\n\t}\n\tbqs.BasePath = endpoint\n\tc := &Client{\n\t\tprojectID: projectID,\n\t\tbqs: bqs,\n\t}\n\treturn c, nil\n}\n\n\/\/ Close closes any resources held by the client.\n\/\/ Close should be called when the client is no longer needed.\n\/\/ It need not be called at program exit.\nfunc (c *Client) Close() error {\n\treturn nil\n}\n\n\/\/ Calls the Jobs.Insert RPC and returns a Job.\nfunc (c *Client) insertJob(ctx context.Context, job *bq.Job, media io.Reader) (*Job, error) {\n\tcall := c.bqs.Jobs.Insert(c.projectID, job).Context(ctx)\n\tsetClientHeader(call.Header())\n\tif media != nil {\n\t\tcall.Media(media)\n\t}\n\tvar res *bq.Job\n\tvar err error\n\tinvoke := func() error {\n\t\tres, err = call.Do()\n\t\treturn err\n\t}\n\t\/\/ A job with a client-generated ID can be retried; the presence of the\n\t\/\/ ID makes the insert operation idempotent.\n\t\/\/ We don't retry if there is media, because it is an io.Reader. We'd\n\t\/\/ have to read the contents and keep it in memory, and that could be expensive.\n\t\/\/ TODO(jba): Look into retrying if media != nil.\n\tif job.JobReference != nil && media == nil {\n\t\terr = runWithRetry(ctx, invoke)\n\t} else {\n\t\terr = invoke()\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bqToJob(res, c)\n}\n\n\/\/ Convert a number of milliseconds since the Unix epoch to a time.Time.\n\/\/ Treat an input of zero specially: convert it to the zero time,\n\/\/ rather than the start of the epoch.\nfunc unixMillisToTime(m int64) time.Time {\n\tif m == 0 {\n\t\treturn time.Time{}\n\t}\n\treturn time.Unix(0, m*1e6)\n}\n\n\/\/ runWithRetry calls the function until it returns nil or a non-retryable error, or\n\/\/ the context is done.\n\/\/ See the similar function in ..\/storage\/invoke.go. The main difference is the\n\/\/ reason for retrying.\nfunc runWithRetry(ctx context.Context, call func() error) error {\n\t\/\/ These parameters match the suggestions in https:\/\/cloud.google.com\/bigquery\/sla.\n\tbackoff := gax.Backoff{\n\t\tInitial: 1 * time.Second,\n\t\tMax: 32 * time.Second,\n\t\tMultiplier: 2,\n\t}\n\treturn internal.Retry(ctx, backoff, func() (stop bool, err error) {\n\t\terr = call()\n\t\tif err == nil {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn !retryableError(err), err\n\t})\n}\n\n\/\/ This is the correct definition of retryable according to the BigQuery team.\nfunc retryableError(err error) bool {\n\te, ok := err.(*googleapi.Error)\n\tif !ok {\n\t\treturn false\n\t}\n\tvar reason string\n\tif len(e.Errors) > 0 {\n\t\treason = e.Errors[0].Reason\n\t}\n\treturn reason == \"backendError\" || reason == \"rateLimitExceeded\"\n}\n<commit_msg>bigquery: retry on status code 502<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage bigquery\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"time\"\n\n\tgax \"github.com\/googleapis\/gax-go\"\n\n\t\"cloud.google.com\/go\/internal\"\n\t\"cloud.google.com\/go\/internal\/version\"\n\n\t\"google.golang.org\/api\/googleapi\"\n\t\"google.golang.org\/api\/option\"\n\thtransport \"google.golang.org\/api\/transport\/http\"\n\n\t\"golang.org\/x\/net\/context\"\n\tbq \"google.golang.org\/api\/bigquery\/v2\"\n)\n\nconst (\n\tprodAddr = \"https:\/\/www.googleapis.com\/bigquery\/v2\/\"\n\tScope = \"https:\/\/www.googleapis.com\/auth\/bigquery\"\n\tuserAgent = \"gcloud-golang-bigquery\/20160429\"\n)\n\nvar xGoogHeader = fmt.Sprintf(\"gl-go\/%s gccl\/%s\", version.Go(), version.Repo)\n\nfunc setClientHeader(headers http.Header) {\n\theaders.Set(\"x-goog-api-client\", xGoogHeader)\n}\n\n\/\/ Client may be used to perform BigQuery operations.\ntype Client struct {\n\tprojectID string\n\tbqs *bq.Service\n}\n\n\/\/ NewClient constructs a new Client which can perform BigQuery operations.\n\/\/ Operations performed via the client are billed to the specified GCP project.\nfunc NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {\n\to := []option.ClientOption{\n\t\toption.WithEndpoint(prodAddr),\n\t\toption.WithScopes(Scope),\n\t\toption.WithUserAgent(userAgent),\n\t}\n\to = append(o, opts...)\n\thttpClient, endpoint, err := htransport.NewClient(ctx, o...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"bigquery: dialing: %v\", err)\n\t}\n\tbqs, err := bq.New(httpClient)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"bigquery: constructing client: %v\", err)\n\t}\n\tbqs.BasePath = endpoint\n\tc := &Client{\n\t\tprojectID: projectID,\n\t\tbqs: bqs,\n\t}\n\treturn c, nil\n}\n\n\/\/ Close closes any resources held by the client.\n\/\/ Close should be called when the client is no longer needed.\n\/\/ It need not be called at program exit.\nfunc (c *Client) Close() error {\n\treturn nil\n}\n\n\/\/ Calls the Jobs.Insert RPC and returns a Job.\nfunc (c *Client) insertJob(ctx context.Context, job *bq.Job, media io.Reader) (*Job, error) {\n\tcall := c.bqs.Jobs.Insert(c.projectID, job).Context(ctx)\n\tsetClientHeader(call.Header())\n\tif media != nil {\n\t\tcall.Media(media)\n\t}\n\tvar res *bq.Job\n\tvar err error\n\tinvoke := func() error {\n\t\tres, err = call.Do()\n\t\treturn err\n\t}\n\t\/\/ A job with a client-generated ID can be retried; the presence of the\n\t\/\/ ID makes the insert operation idempotent.\n\t\/\/ We don't retry if there is media, because it is an io.Reader. We'd\n\t\/\/ have to read the contents and keep it in memory, and that could be expensive.\n\t\/\/ TODO(jba): Look into retrying if media != nil.\n\tif job.JobReference != nil && media == nil {\n\t\terr = runWithRetry(ctx, invoke)\n\t} else {\n\t\terr = invoke()\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bqToJob(res, c)\n}\n\n\/\/ Convert a number of milliseconds since the Unix epoch to a time.Time.\n\/\/ Treat an input of zero specially: convert it to the zero time,\n\/\/ rather than the start of the epoch.\nfunc unixMillisToTime(m int64) time.Time {\n\tif m == 0 {\n\t\treturn time.Time{}\n\t}\n\treturn time.Unix(0, m*1e6)\n}\n\n\/\/ runWithRetry calls the function until it returns nil or a non-retryable error, or\n\/\/ the context is done.\n\/\/ See the similar function in ..\/storage\/invoke.go. The main difference is the\n\/\/ reason for retrying.\nfunc runWithRetry(ctx context.Context, call func() error) error {\n\t\/\/ These parameters match the suggestions in https:\/\/cloud.google.com\/bigquery\/sla.\n\tbackoff := gax.Backoff{\n\t\tInitial: 1 * time.Second,\n\t\tMax: 32 * time.Second,\n\t\tMultiplier: 2,\n\t}\n\treturn internal.Retry(ctx, backoff, func() (stop bool, err error) {\n\t\terr = call()\n\t\tif err == nil {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn !retryableError(err), err\n\t})\n}\n\n\/\/ This is the correct definition of retryable according to the BigQuery team.\nfunc retryableError(err error) bool {\n\te, ok := err.(*googleapi.Error)\n\tif !ok {\n\t\treturn false\n\t}\n\tvar reason string\n\tif len(e.Errors) > 0 {\n\t\treason = e.Errors[0].Reason\n\t}\n\treturn e.Code == http.StatusBadGateway || reason == \"backendError\" || reason == \"rateLimitExceeded\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015-2018 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"golang.org\/x\/crypto\/ssh\"\n\t\"golang.org\/x\/crypto\/ssh\/agent\"\n\n\t\"github.com\/coreos\/mantle\/kola\"\n\t\"github.com\/coreos\/mantle\/platform\"\n\t\"github.com\/coreos\/mantle\/platform\/conf\"\n\t\"github.com\/coreos\/mantle\/platform\/machine\/qemu\"\n)\n\nvar (\n\tcmdSpawn = &cobra.Command{\n\t\tRun: runSpawn,\n\t\tPreRun: preRun,\n\t\tUse: \"spawn\",\n\t\tShort: \"spawn a CoreOS instance\",\n\t}\n\n\tspawnNodeCount int\n\tspawnUserData string\n\tspawnDetach bool\n\tspawnShell bool\n\tspawnRemove bool\n\tspawnVerbose bool\n\tspawnMachineOptions string\n\tspawnSetSSHKeys bool\n\tspawnSSHKeys []string\n)\n\nfunc init() {\n\tcmdSpawn.Flags().IntVarP(&spawnNodeCount, \"nodecount\", \"c\", 1, \"number of nodes to spawn\")\n\tcmdSpawn.Flags().StringVarP(&spawnUserData, \"userdata\", \"u\", \"\", \"file containing userdata to pass to the instances\")\n\tcmdSpawn.Flags().BoolVarP(&spawnDetach, \"detach\", \"t\", false, \"-kv --shell=false --remove=false\")\n\tcmdSpawn.Flags().BoolVarP(&spawnShell, \"shell\", \"s\", true, \"spawn a shell in an instance before exiting\")\n\tcmdSpawn.Flags().BoolVarP(&spawnRemove, \"remove\", \"r\", true, \"remove instances after shell exits\")\n\tcmdSpawn.Flags().BoolVarP(&spawnVerbose, \"verbose\", \"v\", false, \"output information about spawned instances\")\n\tcmdSpawn.Flags().StringVar(&spawnMachineOptions, \"qemu-options\", \"\", \"experimental: path to QEMU machine options json\")\n\tcmdSpawn.Flags().BoolVarP(&spawnSetSSHKeys, \"keys\", \"k\", false, \"add SSH keys from --key options\")\n\tcmdSpawn.Flags().StringSliceVar(&spawnSSHKeys, \"key\", nil, \"path to SSH public key (default: SSH agent + ~\/.ssh\/id_{rsa,dsa,ecdsa,ed25519}.pub)\")\n\troot.AddCommand(cmdSpawn)\n}\n\nfunc runSpawn(cmd *cobra.Command, args []string) {\n\tif err := doSpawn(cmd, args); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc doSpawn(cmd *cobra.Command, args []string) error {\n\tvar err error\n\n\tif spawnDetach {\n\t\tspawnSetSSHKeys = true\n\t\tspawnVerbose = true\n\t\tspawnShell = false\n\t\tspawnRemove = false\n\t}\n\n\tif spawnNodeCount <= 0 {\n\t\treturn fmt.Errorf(\"Cluster Failed: nodecount must be one or more\")\n\t}\n\n\tvar userdata *conf.UserData\n\tif spawnUserData != \"\" {\n\t\tuserbytes, err := ioutil.ReadFile(spawnUserData)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Reading userdata failed: %v\", err)\n\t\t}\n\t\tuserdata = conf.Unknown(string(userbytes))\n\t}\n\tif spawnSetSSHKeys {\n\t\tif userdata == nil {\n\t\t\tuserdata = conf.Ignition(`{\"ignition\": {\"version\": \"2.0.0\"}}`)\n\t\t}\n\t\t\/\/ If the user explicitly passed empty userdata, the userdata\n\t\t\/\/ will be non-nil but Empty, and adding SSH keys will\n\t\t\/\/ silently fail.\n\t\tuserdata, err = addSSHKeys(userdata)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\toutputDir, err = kola.SetupOutputDir(outputDir, kolaPlatform)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Setup failed: %v\", err)\n\t}\n\n\tcluster, err := kola.NewCluster(kolaPlatform, &platform.RuntimeConfig{\n\t\tOutputDir: outputDir,\n\t\tAllowFailedUnits: true,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Cluster failed: %v\", err)\n\t}\n\n\tif spawnRemove {\n\t\tdefer cluster.Destroy()\n\t}\n\n\tvar someMach platform.Machine\n\tfor i := 0; i < spawnNodeCount; i++ {\n\t\tvar mach platform.Machine\n\t\tvar err error\n\t\tif kolaPlatform == \"qemu\" && spawnMachineOptions != \"\" {\n\t\t\tvar b []byte\n\t\t\tb, err = ioutil.ReadFile(spawnMachineOptions)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Could not read machine options: %v\", err)\n\t\t\t}\n\n\t\t\tvar machineOpts qemu.MachineOptions\n\t\t\terr = json.Unmarshal(b, &machineOpts)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Could not unmarshal machine options: %v\", err)\n\t\t\t}\n\n\t\t\tmach, err = cluster.(*qemu.Cluster).NewMachineWithOptions(userdata, machineOpts)\n\t\t} else {\n\t\t\tmach, err = cluster.NewMachine(userdata)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Spawning instance failed: %v\", err)\n\t\t}\n\n\t\tif spawnVerbose {\n\t\t\tfmt.Printf(\"Machine %v spawned at %v\\n\", mach.ID(), mach.IP())\n\t\t}\n\n\t\tsomeMach = mach\n\t}\n\n\tif spawnShell {\n\t\tif spawnRemove {\n\t\t\treader := strings.NewReader(`PS1=\"\\033[0;31m[bound]\\033[0m $PS1\"` + \"\\n\")\n\t\t\tif err := platform.InstallFile(reader, someMach, \"\/etc\/profile.d\/kola-spawn-bound.sh\"); err != nil {\n\t\t\t\treturn fmt.Errorf(\"Setting shell prompt failed: %v\", err)\n\t\t\t}\n\t\t}\n\t\tif err := platform.Manhole(someMach); err != nil {\n\t\t\treturn fmt.Errorf(\"Manhole failed: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc addSSHKeys(userdata *conf.UserData) (*conf.UserData, error) {\n\t\/\/ if no keys specified, use keys from agent plus ~\/.ssh\/id_{rsa,dsa,ecdsa,ed25519}.pub\n\tif len(spawnSSHKeys) == 0 {\n\t\t\/\/ add keys directly from the agent\n\t\tagentEnv := os.Getenv(\"SSH_AUTH_SOCK\")\n\t\tif agentEnv != \"\" {\n\t\t\tf, err := net.Dial(\"unix\", agentEnv)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Couldn't connect to unix socket %q: %v\", agentEnv, err)\n\t\t\t}\n\t\t\tdefer f.Close()\n\n\t\t\tagent := agent.NewClient(f)\n\t\t\tkeys, err := agent.List()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Couldn't talk to ssh-agent: %v\", err)\n\t\t\t}\n\t\t\tfor _, key := range keys {\n\t\t\t\tuserdata = userdata.AddKey(*key)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ populate list of key files\n\t\tuserInfo, err := user.Current()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, name := range []string{\"id_rsa.pub\", \"id_dsa.pub\", \"id_ecdsa.pub\", \"id_ed25519.pub\"} {\n\t\t\tpath := filepath.Join(userInfo.HomeDir, \".ssh\", name)\n\t\t\tif _, err := os.Stat(path); err == nil {\n\t\t\t\tspawnSSHKeys = append(spawnSSHKeys, path)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ read key files, failing if any are missing\n\tfor _, path := range spawnSSHKeys {\n\t\tkeybytes, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpkey, comment, _, _, err := ssh.ParseAuthorizedKey(keybytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tkey := agent.Key{\n\t\t\tFormat: pkey.Type(),\n\t\t\tBlob: pkey.Marshal(),\n\t\t\tComment: comment,\n\t\t}\n\t\tuserdata = userdata.AddKey(key)\n\t}\n\treturn userdata, nil\n}\n<commit_msg>cmd\/kola: fix command line wrapping with [bound] in PS1<commit_after>\/\/ Copyright 2015-2018 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"golang.org\/x\/crypto\/ssh\"\n\t\"golang.org\/x\/crypto\/ssh\/agent\"\n\n\t\"github.com\/coreos\/mantle\/kola\"\n\t\"github.com\/coreos\/mantle\/platform\"\n\t\"github.com\/coreos\/mantle\/platform\/conf\"\n\t\"github.com\/coreos\/mantle\/platform\/machine\/qemu\"\n)\n\nvar (\n\tcmdSpawn = &cobra.Command{\n\t\tRun: runSpawn,\n\t\tPreRun: preRun,\n\t\tUse: \"spawn\",\n\t\tShort: \"spawn a CoreOS instance\",\n\t}\n\n\tspawnNodeCount int\n\tspawnUserData string\n\tspawnDetach bool\n\tspawnShell bool\n\tspawnRemove bool\n\tspawnVerbose bool\n\tspawnMachineOptions string\n\tspawnSetSSHKeys bool\n\tspawnSSHKeys []string\n)\n\nfunc init() {\n\tcmdSpawn.Flags().IntVarP(&spawnNodeCount, \"nodecount\", \"c\", 1, \"number of nodes to spawn\")\n\tcmdSpawn.Flags().StringVarP(&spawnUserData, \"userdata\", \"u\", \"\", \"file containing userdata to pass to the instances\")\n\tcmdSpawn.Flags().BoolVarP(&spawnDetach, \"detach\", \"t\", false, \"-kv --shell=false --remove=false\")\n\tcmdSpawn.Flags().BoolVarP(&spawnShell, \"shell\", \"s\", true, \"spawn a shell in an instance before exiting\")\n\tcmdSpawn.Flags().BoolVarP(&spawnRemove, \"remove\", \"r\", true, \"remove instances after shell exits\")\n\tcmdSpawn.Flags().BoolVarP(&spawnVerbose, \"verbose\", \"v\", false, \"output information about spawned instances\")\n\tcmdSpawn.Flags().StringVar(&spawnMachineOptions, \"qemu-options\", \"\", \"experimental: path to QEMU machine options json\")\n\tcmdSpawn.Flags().BoolVarP(&spawnSetSSHKeys, \"keys\", \"k\", false, \"add SSH keys from --key options\")\n\tcmdSpawn.Flags().StringSliceVar(&spawnSSHKeys, \"key\", nil, \"path to SSH public key (default: SSH agent + ~\/.ssh\/id_{rsa,dsa,ecdsa,ed25519}.pub)\")\n\troot.AddCommand(cmdSpawn)\n}\n\nfunc runSpawn(cmd *cobra.Command, args []string) {\n\tif err := doSpawn(cmd, args); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc doSpawn(cmd *cobra.Command, args []string) error {\n\tvar err error\n\n\tif spawnDetach {\n\t\tspawnSetSSHKeys = true\n\t\tspawnVerbose = true\n\t\tspawnShell = false\n\t\tspawnRemove = false\n\t}\n\n\tif spawnNodeCount <= 0 {\n\t\treturn fmt.Errorf(\"Cluster Failed: nodecount must be one or more\")\n\t}\n\n\tvar userdata *conf.UserData\n\tif spawnUserData != \"\" {\n\t\tuserbytes, err := ioutil.ReadFile(spawnUserData)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Reading userdata failed: %v\", err)\n\t\t}\n\t\tuserdata = conf.Unknown(string(userbytes))\n\t}\n\tif spawnSetSSHKeys {\n\t\tif userdata == nil {\n\t\t\tuserdata = conf.Ignition(`{\"ignition\": {\"version\": \"2.0.0\"}}`)\n\t\t}\n\t\t\/\/ If the user explicitly passed empty userdata, the userdata\n\t\t\/\/ will be non-nil but Empty, and adding SSH keys will\n\t\t\/\/ silently fail.\n\t\tuserdata, err = addSSHKeys(userdata)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\toutputDir, err = kola.SetupOutputDir(outputDir, kolaPlatform)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Setup failed: %v\", err)\n\t}\n\n\tcluster, err := kola.NewCluster(kolaPlatform, &platform.RuntimeConfig{\n\t\tOutputDir: outputDir,\n\t\tAllowFailedUnits: true,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Cluster failed: %v\", err)\n\t}\n\n\tif spawnRemove {\n\t\tdefer cluster.Destroy()\n\t}\n\n\tvar someMach platform.Machine\n\tfor i := 0; i < spawnNodeCount; i++ {\n\t\tvar mach platform.Machine\n\t\tvar err error\n\t\tif kolaPlatform == \"qemu\" && spawnMachineOptions != \"\" {\n\t\t\tvar b []byte\n\t\t\tb, err = ioutil.ReadFile(spawnMachineOptions)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Could not read machine options: %v\", err)\n\t\t\t}\n\n\t\t\tvar machineOpts qemu.MachineOptions\n\t\t\terr = json.Unmarshal(b, &machineOpts)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Could not unmarshal machine options: %v\", err)\n\t\t\t}\n\n\t\t\tmach, err = cluster.(*qemu.Cluster).NewMachineWithOptions(userdata, machineOpts)\n\t\t} else {\n\t\t\tmach, err = cluster.NewMachine(userdata)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Spawning instance failed: %v\", err)\n\t\t}\n\n\t\tif spawnVerbose {\n\t\t\tfmt.Printf(\"Machine %v spawned at %v\\n\", mach.ID(), mach.IP())\n\t\t}\n\n\t\tsomeMach = mach\n\t}\n\n\tif spawnShell {\n\t\tif spawnRemove {\n\t\t\treader := strings.NewReader(`PS1=\"\\[\\033[0;31m\\][bound]\\[\\033[0m\\] $PS1\"` + \"\\n\")\n\t\t\tif err := platform.InstallFile(reader, someMach, \"\/etc\/profile.d\/kola-spawn-bound.sh\"); err != nil {\n\t\t\t\treturn fmt.Errorf(\"Setting shell prompt failed: %v\", err)\n\t\t\t}\n\t\t}\n\t\tif err := platform.Manhole(someMach); err != nil {\n\t\t\treturn fmt.Errorf(\"Manhole failed: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc addSSHKeys(userdata *conf.UserData) (*conf.UserData, error) {\n\t\/\/ if no keys specified, use keys from agent plus ~\/.ssh\/id_{rsa,dsa,ecdsa,ed25519}.pub\n\tif len(spawnSSHKeys) == 0 {\n\t\t\/\/ add keys directly from the agent\n\t\tagentEnv := os.Getenv(\"SSH_AUTH_SOCK\")\n\t\tif agentEnv != \"\" {\n\t\t\tf, err := net.Dial(\"unix\", agentEnv)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Couldn't connect to unix socket %q: %v\", agentEnv, err)\n\t\t\t}\n\t\t\tdefer f.Close()\n\n\t\t\tagent := agent.NewClient(f)\n\t\t\tkeys, err := agent.List()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Couldn't talk to ssh-agent: %v\", err)\n\t\t\t}\n\t\t\tfor _, key := range keys {\n\t\t\t\tuserdata = userdata.AddKey(*key)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ populate list of key files\n\t\tuserInfo, err := user.Current()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, name := range []string{\"id_rsa.pub\", \"id_dsa.pub\", \"id_ecdsa.pub\", \"id_ed25519.pub\"} {\n\t\t\tpath := filepath.Join(userInfo.HomeDir, \".ssh\", name)\n\t\t\tif _, err := os.Stat(path); err == nil {\n\t\t\t\tspawnSSHKeys = append(spawnSSHKeys, path)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ read key files, failing if any are missing\n\tfor _, path := range spawnSSHKeys {\n\t\tkeybytes, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpkey, comment, _, _, err := ssh.ParseAuthorizedKey(keybytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tkey := agent.Key{\n\t\t\tFormat: pkey.Type(),\n\t\t\tBlob: pkey.Marshal(),\n\t\t\tComment: comment,\n\t\t}\n\t\tuserdata = userdata.AddKey(key)\n\t}\n\treturn userdata, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The OPA Authors. All rights reserved.\n\/\/ Use of this source code is governed by an Apache2\n\/\/ license that can be found in the LICENSE file.\n\npackage tester\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"github.com\/open-policy-agent\/opa\/topdown\"\n\n\t\"github.com\/open-policy-agent\/opa\/ast\"\n\t\"github.com\/open-policy-agent\/opa\/cover\"\n)\n\n\/\/ Reporter defines the interface for reporting test results.\ntype Reporter interface {\n\n\t\/\/ Report is called with a channel that will contain test results.\n\tReport(ch chan *Result) error\n}\n\n\/\/ PrettyReporter reports test results in a simple human readable format.\ntype PrettyReporter struct {\n\tOutput io.Writer\n\tVerbose bool\n}\n\n\/\/ Report prints the test report to the reporter's output.\nfunc (r PrettyReporter) Report(ch chan *Result) error {\n\n\tdirty := false\n\tvar pass, fail, errs int\n\n\tvar results, failures []*Result\n\tfor tr := range ch {\n\t\tif tr.Pass() {\n\t\t\tpass++\n\t\t} else if tr.Error != nil {\n\t\t\terrs++\n\t\t} else if tr.Fail {\n\t\t\tfail++\n\t\t\tfailures = append(failures, tr)\n\t\t}\n\t\tresults = append(results, tr)\n\t}\n\n\tif fail > 0 && r.Verbose {\n\t\tfmt.Fprintln(r.Output, \"FAILURES\")\n\t\tr.hl()\n\n\t\tfor _, failure := range failures {\n\t\t\tfmt.Fprintln(r.Output, failure)\n\t\t\tfmt.Fprintln(r.Output)\n\t\t\ttopdown.PrettyTrace(newIndentingWriter(r.Output), failure.Trace)\n\t\t\tfmt.Fprintln(r.Output)\n\t\t}\n\n\t\tfmt.Fprintln(r.Output, \"SUMMARY\")\n\t\tr.hl()\n\t}\n\n\t\/\/ Report individual tests.\n\tfor _, tr := range results {\n\t\tif !tr.Pass() || r.Verbose {\n\t\t\tfmt.Fprintln(r.Output, tr)\n\t\t\tdirty = true\n\t\t}\n\t\tif tr.Error != nil {\n\t\t\tfmt.Fprintf(r.Output, \" %v\\n\", tr.Error)\n\t\t}\n\t}\n\n\t\/\/ Report summary of test.\n\tif dirty {\n\t\tr.hl()\n\t}\n\n\ttotal := pass + fail + errs\n\n\tif pass != 0 {\n\t\tfmt.Fprintln(r.Output, \"PASS:\", fmt.Sprintf(\"%d\/%d\", pass, total))\n\t}\n\n\tif fail != 0 {\n\t\tfmt.Fprintln(r.Output, \"FAIL:\", fmt.Sprintf(\"%d\/%d\", fail, total))\n\t}\n\n\tif errs != 0 {\n\t\tfmt.Fprintln(r.Output, \"ERROR:\", fmt.Sprintf(\"%d\/%d\", errs, total))\n\t}\n\n\treturn nil\n}\n\nfunc (r PrettyReporter) hl() {\n\tfmt.Fprintln(r.Output, strings.Repeat(\"-\", 80))\n}\n\n\/\/ JSONReporter reports test results as array of JSON objects.\ntype JSONReporter struct {\n\tOutput io.Writer\n}\n\n\/\/ Report prints the test report to the reporter's output.\nfunc (r JSONReporter) Report(ch chan *Result) error {\n\tvar report []*Result\n\tfor tr := range ch {\n\t\treport = append(report, tr)\n\t}\n\n\tbs, err := json.MarshalIndent(report, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(r.Output, string(bs))\n\treturn nil\n}\n\n\/\/ JSONCoverageReporter reports coverage as a JSON structure.\ntype JSONCoverageReporter struct {\n\tCover *cover.Cover\n\tModules map[string]*ast.Module\n\tOutput io.Writer\n}\n\n\/\/ Report prints the test report to the reporter's output. If any tests fail or\n\/\/ encounter errors, this function returns an error.\nfunc (r JSONCoverageReporter) Report(ch chan *Result) error {\n\tfor tr := range ch {\n\t\tif !tr.Pass() {\n\t\t\tif tr.Error != nil {\n\t\t\t\treturn tr.Error\n\t\t\t}\n\t\t\treturn errors.New(tr.String())\n\t\t}\n\t}\n\treport := r.Cover.Report(r.Modules)\n\tencoder := json.NewEncoder(r.Output)\n\tencoder.SetIndent(\"\", \" \")\n\treturn encoder.Encode(report)\n}\n\ntype indentingWriter struct {\n\tw io.Writer\n}\n\nfunc newIndentingWriter(w io.Writer) indentingWriter {\n\treturn indentingWriter{\n\t\tw: w,\n\t}\n}\n\nfunc (w indentingWriter) Write(bs []byte) (int, error) {\n\tvar written int\n\t\/\/ insert indentation at the start of every line.\n\tindent := true\n\tfor _, b := range bs {\n\t\tif indent {\n\t\t\twrote, err := w.w.Write([]byte(\" \"))\n\t\t\tif err != nil {\n\t\t\t\treturn written, err\n\t\t\t}\n\t\t\twritten += wrote\n\t\t\tindent = false\n\t\t}\n\t\twrote, err := w.w.Write([]byte{b})\n\t\tif err != nil {\n\t\t\treturn written, err\n\t\t}\n\t\twritten += wrote\n\t\tindent = b == '\\n'\n\t}\n\treturn written, nil\n}\n<commit_msg>tester\/reporter: remove ineffectual assignment<commit_after>\/\/ Copyright 2017 The OPA Authors. All rights reserved.\n\/\/ Use of this source code is governed by an Apache2\n\/\/ license that can be found in the LICENSE file.\n\npackage tester\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"github.com\/open-policy-agent\/opa\/topdown\"\n\n\t\"github.com\/open-policy-agent\/opa\/ast\"\n\t\"github.com\/open-policy-agent\/opa\/cover\"\n)\n\n\/\/ Reporter defines the interface for reporting test results.\ntype Reporter interface {\n\n\t\/\/ Report is called with a channel that will contain test results.\n\tReport(ch chan *Result) error\n}\n\n\/\/ PrettyReporter reports test results in a simple human readable format.\ntype PrettyReporter struct {\n\tOutput io.Writer\n\tVerbose bool\n}\n\n\/\/ Report prints the test report to the reporter's output.\nfunc (r PrettyReporter) Report(ch chan *Result) error {\n\n\tdirty := false\n\tvar pass, fail, errs int\n\n\tvar results, failures []*Result\n\tfor tr := range ch {\n\t\tif tr.Pass() {\n\t\t\tpass++\n\t\t} else if tr.Error != nil {\n\t\t\terrs++\n\t\t} else if tr.Fail {\n\t\t\tfail++\n\t\t\tfailures = append(failures, tr)\n\t\t}\n\t\tresults = append(results, tr)\n\t}\n\n\tif fail > 0 && r.Verbose {\n\t\tfmt.Fprintln(r.Output, \"FAILURES\")\n\t\tr.hl()\n\n\t\tfor _, failure := range failures {\n\t\t\tfmt.Fprintln(r.Output, failure)\n\t\t\tfmt.Fprintln(r.Output)\n\t\t\ttopdown.PrettyTrace(newIndentingWriter(r.Output), failure.Trace)\n\t\t\tfmt.Fprintln(r.Output)\n\t\t}\n\n\t\tfmt.Fprintln(r.Output, \"SUMMARY\")\n\t\tr.hl()\n\t}\n\n\t\/\/ Report individual tests.\n\tfor _, tr := range results {\n\t\tif !tr.Pass() || r.Verbose {\n\t\t\tfmt.Fprintln(r.Output, tr)\n\t\t\tdirty = true\n\t\t}\n\t\tif tr.Error != nil {\n\t\t\tfmt.Fprintf(r.Output, \" %v\\n\", tr.Error)\n\t\t}\n\t}\n\n\t\/\/ Report summary of test.\n\tif dirty {\n\t\tr.hl()\n\t}\n\n\ttotal := pass + fail + errs\n\n\tif pass != 0 {\n\t\tfmt.Fprintln(r.Output, \"PASS:\", fmt.Sprintf(\"%d\/%d\", pass, total))\n\t}\n\n\tif fail != 0 {\n\t\tfmt.Fprintln(r.Output, \"FAIL:\", fmt.Sprintf(\"%d\/%d\", fail, total))\n\t}\n\n\tif errs != 0 {\n\t\tfmt.Fprintln(r.Output, \"ERROR:\", fmt.Sprintf(\"%d\/%d\", errs, total))\n\t}\n\n\treturn nil\n}\n\nfunc (r PrettyReporter) hl() {\n\tfmt.Fprintln(r.Output, strings.Repeat(\"-\", 80))\n}\n\n\/\/ JSONReporter reports test results as array of JSON objects.\ntype JSONReporter struct {\n\tOutput io.Writer\n}\n\n\/\/ Report prints the test report to the reporter's output.\nfunc (r JSONReporter) Report(ch chan *Result) error {\n\tvar report []*Result\n\tfor tr := range ch {\n\t\treport = append(report, tr)\n\t}\n\n\tbs, err := json.MarshalIndent(report, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintln(r.Output, string(bs))\n\treturn nil\n}\n\n\/\/ JSONCoverageReporter reports coverage as a JSON structure.\ntype JSONCoverageReporter struct {\n\tCover *cover.Cover\n\tModules map[string]*ast.Module\n\tOutput io.Writer\n}\n\n\/\/ Report prints the test report to the reporter's output. If any tests fail or\n\/\/ encounter errors, this function returns an error.\nfunc (r JSONCoverageReporter) Report(ch chan *Result) error {\n\tfor tr := range ch {\n\t\tif !tr.Pass() {\n\t\t\tif tr.Error != nil {\n\t\t\t\treturn tr.Error\n\t\t\t}\n\t\t\treturn errors.New(tr.String())\n\t\t}\n\t}\n\treport := r.Cover.Report(r.Modules)\n\tencoder := json.NewEncoder(r.Output)\n\tencoder.SetIndent(\"\", \" \")\n\treturn encoder.Encode(report)\n}\n\ntype indentingWriter struct {\n\tw io.Writer\n}\n\nfunc newIndentingWriter(w io.Writer) indentingWriter {\n\treturn indentingWriter{\n\t\tw: w,\n\t}\n}\n\nfunc (w indentingWriter) Write(bs []byte) (int, error) {\n\tvar written int\n\t\/\/ insert indentation at the start of every line.\n\tindent := true\n\tfor _, b := range bs {\n\t\tif indent {\n\t\t\twrote, err := w.w.Write([]byte(\" \"))\n\t\t\tif err != nil {\n\t\t\t\treturn written, err\n\t\t\t}\n\t\t\twritten += wrote\n\t\t}\n\t\twrote, err := w.w.Write([]byte{b})\n\t\tif err != nil {\n\t\t\treturn written, err\n\t\t}\n\t\twritten += wrote\n\t\tindent = b == '\\n'\n\t}\n\treturn written, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Title:表格,及其下属控件\n\/\/\n\/\/ Description:\n\/\/\n\/\/ Author:black\n\/\/\n\/\/ Createtime:2013-08-08 09:29\n\/\/\n\/\/ Version:1.0\n\/\/\n\/\/ 修改历史:版本号 修改日期 修改人 修改说明\n\/\/\n\/\/ 1.0 2013-08-08 09:29 black 创建文档\npackage lessgo\n\nimport (\n\t\"bytes\"\n\t\"text\/template\"\n)\n\ntype gridPanel struct {\n\tEntity string `xml:\"entity,attr\"`\n\tPageSize int `xml:\"pageSize,attr\"`\n\tLoadUrl string `xml:\"loadUrl,attr\"`\n\tId string `xml:\"id,attr\"`\n\tTitle string `xml:\"title,attr\"`\n\tWidth string `xml:\"width,attr\"`\n\tHeight string `xml:\"height,attr\"`\n\tColumns []column `xml:\"column\"`\n\tActions []action `xml:\"action\"`\n\tSearchs []search `xml:\"search\"`\n}\n\n\/\/link目前可以支持,直接跳转,打开浏览器新窗口跳转,iframe弹窗,询问提示窗\n\/\/linkType=currentPage,\n\/\/以下为通用配置\n\/\/url 必填\n\/\/iconUrl 选填 如果有配置iconUrl,则会生成一个可点击的图标\n\/\/loadParamName 选填,不填就不带参数\n\/\/loadParamValue 如果loadParamName有值,则此配置必填,可取值为id 或者 this\ntype column struct {\n\tField string `xml:\"field,attr\"`\n\tDesc string `xml:\"desc,attr\"`\n\tLinkType string `xml:\"linkType,attr\"`\n\tUrl string `xml:\"url,attr\"`\n\tIconUrl string `xml:\"iconUrl,attr\"`\n\tLoadParamName string `xml:\"loadParamName,attr\"`\n\tLoadParamValue string `xml:\"loadParamValue,attr\"`\n\tFormat string `xml:\"format,attr\"`\n\tHidden string `xml:\"hidden,attr\"`\n}\n\ntype action struct {\n\tDesc string `xml:\"desc,attr\"`\n\tUrl string `xml:\"url,attr\"`\n\tActionParams string `xml:\"actionParams,attr\"`\n\tLinkType string `xml:\"linkType,attr\"`\n}\n\ntype search struct {\n\tField string `xml:\"field,attr\"`\n\tSearchType string `xml:\"searchType,attr\"`\n\tInputType string `xml:\"inputType,attr\"`\n\tLocalData string `xml:\"localData,attr\"`\n\tDesc string `xml:\"desc,attr\"`\n\tUrl string `xml:\"url,attr\"`\n\tValueField string `xml:\"valueField,attr\"`\n\tDescField string `xml:\"descField,attr\"`\n\t\/\/存储实际的搜索值\n\tValue string\n}\n\nfunc (gridpanel gridPanel) generate(entity Entity, terminal, packageName string) []byte {\n\n\tvar t *template.Template\n\n\tvar buf bytes.Buffer\n\n\tgridpanel.Id = packageName + \".\" + gridpanel.Id\n\n\truntimeComponentContain[gridpanel.Id] = gridpanel\n\n\tt = template.New(\"gridpanel.html\")\n\n\tt = t.Funcs(template.FuncMap{\n\t\t\"getComponentId\": getComponentId,\n\t\t\"compareInt\": CompareInt,\n\t\t\"compareString\": CompareString,\n\t})\n\n\tt, err := t.ParseFiles(\"..\/lessgo\/template\/component\/\" + terminal + \"\/gridpanel.html\")\n\n\tif err != nil {\n\t\tLog.Error(err.Error())\n\t\treturn []byte{}\n\t}\n\n\tdata := make(map[string]interface{})\n\n\tdata[\"Gridpanel\"] = gridpanel\n\tdata[\"Entity\"] = entity\n\tdata[\"Terminal\"] = terminal\n\tdata[\"SearchLength\"] = len(gridpanel.Searchs)\n\tdata[\"ActionLength\"] = len(gridpanel.Actions)\n\n\terr = t.Execute(&buf, data)\n\n\tif err != nil {\n\t\tLog.Error(err.Error())\n\t\treturn []byte{}\n\t}\n\n\treturn buf.Bytes()\n\n}\n<commit_msg>把列渲染暴露出来<commit_after>\/\/ Title:表格,及其下属控件\n\/\/\n\/\/ Description:\n\/\/\n\/\/ Author:black\n\/\/\n\/\/ Createtime:2013-08-08 09:29\n\/\/\n\/\/ Version:1.0\n\/\/\n\/\/ 修改历史:版本号 修改日期 修改人 修改说明\n\/\/\n\/\/ 1.0 2013-08-08 09:29 black 创建文档\npackage lessgo\n\nimport (\n\t\"bytes\"\n\t\"text\/template\"\n)\n\ntype gridPanel struct {\n\tEntity string `xml:\"entity,attr\"`\n\tPageSize int `xml:\"pageSize,attr\"`\n\tLoadUrl string `xml:\"loadUrl,attr\"`\n\tId string `xml:\"id,attr\"`\n\tTitle string `xml:\"title,attr\"`\n\tWidth string `xml:\"width,attr\"`\n\tHeight string `xml:\"height,attr\"`\n\tColumns []column `xml:\"column\"`\n\tActions []action `xml:\"action\"`\n\tSearchs []search `xml:\"search\"`\n}\n\n\/\/link目前可以支持,直接跳转,打开浏览器新窗口跳转,iframe弹窗,询问提示窗\n\/\/linkType=currentPage,\n\/\/以下为通用配置\n\/\/url 必填\n\/\/iconUrl 选填 如果有配置iconUrl,则会生成一个可点击的图标\n\/\/loadParamName 选填,不填就不带参数\n\/\/loadParamValue 如果loadParamName有值,则此配置必填,可取值为id 或者 this\ntype column struct {\n\tField string `xml:\"field,attr\"`\n\tDesc string `xml:\"desc,attr\"`\n\tHidden string `xml:\"hidden,attr\"`\n\tFormatter string `xml:\"formatter\"`\n}\n\ntype action struct {\n\tDesc string `xml:\"desc,attr\"`\n\tUrl string `xml:\"url,attr\"`\n\tActionParams string `xml:\"actionParams,attr\"`\n\tLinkType string `xml:\"linkType,attr\"`\n}\n\ntype search struct {\n\tField string `xml:\"field,attr\"`\n\tSearchType string `xml:\"searchType,attr\"`\n\tInputType string `xml:\"inputType,attr\"`\n\tLocalData string `xml:\"localData,attr\"`\n\tDesc string `xml:\"desc,attr\"`\n\tUrl string `xml:\"url,attr\"`\n\tValueField string `xml:\"valueField,attr\"`\n\tDescField string `xml:\"descField,attr\"`\n\t\/\/存储实际的搜索值\n\tValue string\n}\n\nfunc (gridpanel gridPanel) generate(entity Entity, terminal, packageName string) []byte {\n\n\tvar t *template.Template\n\n\tvar buf bytes.Buffer\n\n\tgridpanel.Id = packageName + \".\" + gridpanel.Id\n\n\truntimeComponentContain[gridpanel.Id] = gridpanel\n\n\tt = template.New(\"gridpanel.html\")\n\n\tt = t.Funcs(template.FuncMap{\n\t\t\"getComponentId\": getComponentId,\n\t\t\"compareInt\": CompareInt,\n\t\t\"compareString\": CompareString,\n\t})\n\n\tt, err := t.ParseFiles(\"..\/lessgo\/template\/component\/\" + terminal + \"\/gridpanel.html\")\n\n\tif err != nil {\n\t\tLog.Error(err.Error())\n\t\treturn []byte{}\n\t}\n\n\tdata := make(map[string]interface{})\n\n\tdata[\"Gridpanel\"] = gridpanel\n\tdata[\"Entity\"] = entity\n\tdata[\"Terminal\"] = terminal\n\tdata[\"SearchLength\"] = len(gridpanel.Searchs)\n\tdata[\"ActionLength\"] = len(gridpanel.Actions)\n\n\terr = t.Execute(&buf, data)\n\n\tif err != nil {\n\t\tLog.Error(err.Error())\n\t\treturn []byte{}\n\t}\n\n\treturn buf.Bytes()\n\n}\n<|endoftext|>"} {"text":"<commit_before>package asm\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"hash\/crc64\"\n\t\"io\"\n\n\t\"github.com\/vbatts\/tar-split\/tar\/storage\"\n)\n\n\/\/ NewOutputTarStream returns an io.ReadCloser that is an assemble tar archive\n\/\/ stream.\n\/\/\n\/\/ It takes a storage.FileGetter, for mapping the file payloads that are to be read in,\n\/\/ and a storage.Unpacker, which has access to the rawbytes and file order\n\/\/ metadata. With the combination of these two items, a precise assembled Tar\n\/\/ archive is possible.\nfunc NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadCloser {\n\t\/\/ ... Since these are interfaces, this is possible, so let's not have a nil pointer\n\tif fg == nil || up == nil {\n\t\treturn nil\n\t}\n\tpr, pw := io.Pipe()\n\tgo func() {\n\t\tfor {\n\t\t\tentry, err := up.Next()\n\t\t\tif err != nil {\n\t\t\t\tpw.CloseWithError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tswitch entry.Type {\n\t\t\tcase storage.SegmentType:\n\t\t\t\tif _, err := pw.Write(entry.Payload); err != nil {\n\t\t\t\t\tpw.CloseWithError(err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\tcase storage.FileType:\n\t\t\t\tif entry.Size == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfh, err := fg.Get(entry.Name)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpw.CloseWithError(err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tdefer fh.Close()\n\t\t\t\tc := crc64.New(storage.CRCTable)\n\t\t\t\ttRdr := io.TeeReader(fh, c)\n\t\t\t\tif _, err := io.Copy(pw, tRdr); err != nil {\n\t\t\t\t\tpw.CloseWithError(err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif !bytes.Equal(c.Sum(nil), entry.Payload) {\n\t\t\t\t\t\/\/ I would rather this be a comparable ErrInvalidChecksum or such,\n\t\t\t\t\t\/\/ but since it's coming through the PipeReader, the context of\n\t\t\t\t\t\/\/ _which_ file would be lost...\n\t\t\t\t\tpw.CloseWithError(fmt.Errorf(\"file integrity checksum failed for %q\", entry.Name))\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tpw.Close()\n\t}()\n\treturn pr\n}\n<commit_msg>tar\/asm: don't defer file closing<commit_after>package asm\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"hash\/crc64\"\n\t\"io\"\n\n\t\"github.com\/vbatts\/tar-split\/tar\/storage\"\n)\n\n\/\/ NewOutputTarStream returns an io.ReadCloser that is an assemble tar archive\n\/\/ stream.\n\/\/\n\/\/ It takes a storage.FileGetter, for mapping the file payloads that are to be read in,\n\/\/ and a storage.Unpacker, which has access to the rawbytes and file order\n\/\/ metadata. With the combination of these two items, a precise assembled Tar\n\/\/ archive is possible.\nfunc NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadCloser {\n\t\/\/ ... Since these are interfaces, this is possible, so let's not have a nil pointer\n\tif fg == nil || up == nil {\n\t\treturn nil\n\t}\n\tpr, pw := io.Pipe()\n\tgo func() {\n\t\tfor {\n\t\t\tentry, err := up.Next()\n\t\t\tif err != nil {\n\t\t\t\tpw.CloseWithError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tswitch entry.Type {\n\t\t\tcase storage.SegmentType:\n\t\t\t\tif _, err := pw.Write(entry.Payload); err != nil {\n\t\t\t\t\tpw.CloseWithError(err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\tcase storage.FileType:\n\t\t\t\tif entry.Size == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfh, err := fg.Get(entry.Name)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpw.CloseWithError(err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tc := crc64.New(storage.CRCTable)\n\t\t\t\ttRdr := io.TeeReader(fh, c)\n\t\t\t\tif _, err := io.Copy(pw, tRdr); err != nil {\n\t\t\t\t\tfh.Close()\n\t\t\t\t\tpw.CloseWithError(err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif !bytes.Equal(c.Sum(nil), entry.Payload) {\n\t\t\t\t\t\/\/ I would rather this be a comparable ErrInvalidChecksum or such,\n\t\t\t\t\t\/\/ but since it's coming through the PipeReader, the context of\n\t\t\t\t\t\/\/ _which_ file would be lost...\n\t\t\t\t\tfh.Close()\n\t\t\t\t\tpw.CloseWithError(fmt.Errorf(\"file integrity checksum failed for %q\", entry.Name))\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tfh.Close()\n\t\t\t}\n\t\t}\n\t\tpw.Close()\n\t}()\n\treturn pr\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage stringkey is an implementation of the \"github.com\/lleo\/go-hamt\/key\"\ninterface.\n\nBoth of these packages are are in separate package namespaces to prevent circular\nreferences between \"github.com\/lleo\/go-hamt\" and \"github.com\/lleo\/go-hamt\/hamt32\"\nand\/or \"github.com\/lleo\/go-hamt\/hamt64\".\n\nAdditionally, \"github.com\/lleo\/go-hamt\/stringKey\" is used by the functional\nvariation of the Hash Array Mapped Trie in \"github.com\/lleo\/go-hamt-fuctional\".\n*\/\npackage stringkey\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/lleo\/go-hamt\/key\"\n)\n\n\/\/ StringKey is a simple string implementing the key.Key interface.\ntype StringKey struct {\n\tkey.Base\n\tstr string\n}\n\n\/\/ New allocates and initializes a StringKey data structure.\nfunc New(str string) *StringKey {\n\tvar k = new(StringKey)\n\n\t\/\/Bad for straight assignment all strings are pointers to structs.\n\t\/\/But thats ok, cuz all strings are immutable.\n\tk.str = str\n\n\tk.Initialize([]byte(k.str))\n\n\treturn k\n}\n\n\/\/ String return a string representation of StringKey data structure.\nfunc (sk StringKey) String() string {\n\treturn fmt.Sprintf(\"StringKey{%s, str:%q}\", sk.Base.String(), sk.str)\n}\n\nfunc (sk StringKey) String30() string {\n\treturn fmt.Sprintf(\"{%s, %q}\", sk.Base.String30(), sk.str)\n}\n\nfunc (sk StringKey) String60() string {\n\treturn fmt.Sprintf(\"{%s, %q}\", sk.Base.String60(), sk.str)\n}\n\n\/\/ Equals returns true iff the StringKey exactly matches the key passed it. If\n\/\/ The key.Key passed as an argument is not also a StringKey Equals()\n\/\/ automatically returns false.\nfunc (sk StringKey) Equals(key key.Key) bool {\n\tvar k, isStrinKey = key.(*StringKey)\n\tif !isStrinKey {\n\t\tpanic(\"type mismatch\")\n\t}\n\treturn sk.str == k.str\n}\n\nfunc (sk StringKey) toByteSlice() []byte {\n\treturn []byte(sk.str)\n}\n\nfunc toByteSlice(str string) []byte {\n\treturn []byte(str)\n\n\t\/\/\/\/BROKEN: This does not work because StringKey is not a fixed size Struct.\n\t\/\/var bytebuf bytes.Buffer\n\t\/\/err := binary.Write(&bytebuf, binary.BigEndian, *sk)\n\t\/\/if err != nil {\n\t\/\/\tpanic(err)\n\t\/\/}\n\t\/\/return bytebuf.Bytes()\n\n\t\/\/\/\/Variation on above: works!\n\t\/\/var bytebuf bytes.Buffer\n\t\/\/err := binary.Write(&bytebuf, binary.BigEndian, uint32(0) \/* Hash30() *\/)\n\t\/\/if err != nil {\n\t\/\/\tpanic(err)\n\t\/\/}\n\t\/\/err = binary.Write(&bytebuf, binary.BigEndian, []byte(str))\n\t\/\/\/\/err = binary.Write(&bytebuf, binary.BigEndian, str)\n\t\/\/if err != nil {\n\t\/\/\tpanic(err)\n\t\/\/}\n\t\/\/return bytebuf.Bytes()\n\n\t\/\/\/\/ANOTHER variation: does not work, binary.Write() does not like\n\t\/\/\/\/ struct with []byte\n\t\/\/var d = struct {\n\t\/\/\thash30 uint32\n\t\/\/\ts []byte\n\t\/\/}{0 \/*Hash30()*\/, []byte(str)}\n\t\/\/var bytebuf bytes.Buffer\n\t\/\/err := binary.Write(&bytebuf, binary.BigEndian, d)\n\t\/\/if err != nil {\n\t\/\/\tpanic(err)\n\t\/\/}\n\t\/\/return bytebuf.Bytes()\n\n\t\/\/ANOTHER IDEA: use encoding\/gob; but I don't know that api well enough.\n\n\t\/\/ANOThER IDEA: use gopkg.in\/mgo.v2\/bson; but it requires a third party lib.\n}\n\n\/\/ Str returns the internal string of StringKey. This allows for read-only\n\/\/ access to the string field of StringKey.\nfunc (sk StringKey) Str() string {\n\treturn sk.str\n}\n<commit_msg>removed useless comment<commit_after>\/*\nPackage stringkey is an implementation of the \"github.com\/lleo\/go-hamt\/key\"\ninterface.\n\nBoth of these packages are are in separate package namespaces to prevent circular\nreferences between \"github.com\/lleo\/go-hamt\" and \"github.com\/lleo\/go-hamt\/hamt32\"\nand\/or \"github.com\/lleo\/go-hamt\/hamt64\".\n\nAdditionally, \"github.com\/lleo\/go-hamt\/stringKey\" is used by the functional\nvariation of the Hash Array Mapped Trie in \"github.com\/lleo\/go-hamt-fuctional\".\n*\/\npackage stringkey\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/lleo\/go-hamt\/key\"\n)\n\n\/\/ StringKey is a simple string implementing the key.Key interface.\ntype StringKey struct {\n\tkey.Base\n\tstr string\n}\n\n\/\/ New allocates and initializes a StringKey data structure.\nfunc New(str string) *StringKey {\n\tvar k = new(StringKey)\n\n\tk.str = str\n\n\tk.Initialize([]byte(k.str))\n\n\treturn k\n}\n\n\/\/ String return a string representation of StringKey data structure.\nfunc (sk StringKey) String() string {\n\treturn fmt.Sprintf(\"StringKey{%s, str:%q}\", sk.Base.String(), sk.str)\n}\n\nfunc (sk StringKey) String30() string {\n\treturn fmt.Sprintf(\"{%s, %q}\", sk.Base.String30(), sk.str)\n}\n\nfunc (sk StringKey) String60() string {\n\treturn fmt.Sprintf(\"{%s, %q}\", sk.Base.String60(), sk.str)\n}\n\n\/\/ Equals returns true iff the StringKey exactly matches the key passed it. If\n\/\/ The key.Key passed as an argument is not also a StringKey Equals()\n\/\/ automatically returns false.\nfunc (sk StringKey) Equals(key key.Key) bool {\n\tvar k, isStrinKey = key.(*StringKey)\n\tif !isStrinKey {\n\t\tpanic(\"type mismatch\")\n\t}\n\treturn sk.str == k.str\n}\n\nfunc (sk StringKey) toByteSlice() []byte {\n\treturn []byte(sk.str)\n}\n\nfunc toByteSlice(str string) []byte {\n\treturn []byte(str)\n\n\t\/\/\/\/BROKEN: This does not work because StringKey is not a fixed size Struct.\n\t\/\/var bytebuf bytes.Buffer\n\t\/\/err := binary.Write(&bytebuf, binary.BigEndian, *sk)\n\t\/\/if err != nil {\n\t\/\/\tpanic(err)\n\t\/\/}\n\t\/\/return bytebuf.Bytes()\n\n\t\/\/\/\/Variation on above: works!\n\t\/\/var bytebuf bytes.Buffer\n\t\/\/err := binary.Write(&bytebuf, binary.BigEndian, uint32(0) \/* Hash30() *\/)\n\t\/\/if err != nil {\n\t\/\/\tpanic(err)\n\t\/\/}\n\t\/\/err = binary.Write(&bytebuf, binary.BigEndian, []byte(str))\n\t\/\/\/\/err = binary.Write(&bytebuf, binary.BigEndian, str)\n\t\/\/if err != nil {\n\t\/\/\tpanic(err)\n\t\/\/}\n\t\/\/return bytebuf.Bytes()\n\n\t\/\/\/\/ANOTHER variation: does not work, binary.Write() does not like\n\t\/\/\/\/ struct with []byte\n\t\/\/var d = struct {\n\t\/\/\thash30 uint32\n\t\/\/\ts []byte\n\t\/\/}{0 \/*Hash30()*\/, []byte(str)}\n\t\/\/var bytebuf bytes.Buffer\n\t\/\/err := binary.Write(&bytebuf, binary.BigEndian, d)\n\t\/\/if err != nil {\n\t\/\/\tpanic(err)\n\t\/\/}\n\t\/\/return bytebuf.Bytes()\n\n\t\/\/ANOTHER IDEA: use encoding\/gob; but I don't know that api well enough.\n\n\t\/\/ANOThER IDEA: use gopkg.in\/mgo.v2\/bson; but it requires a third party lib.\n}\n\n\/\/ Str returns the internal string of StringKey. This allows for read-only\n\/\/ access to the string field of StringKey.\nfunc (sk StringKey) Str() string {\n\treturn sk.str\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/cryptix\/exp\/git\"\n\n\t\"gopkg.in\/errgo.v1\"\n)\n\n\/\/ \"fetch $sha1 $ref\" method 1 - unpacking loose objects\n\/\/ - look for it in \".git\/objects\/substr($sha1, 0, 2)\/substr($sha, 2)\"\n\/\/ - if found, download it and put it in place. (there may be a command for this)\n\/\/ - done \\o\/\nfunc fetchObject(sha1 string) error {\n\tp := filepath.Join(ipfsRepoPath, \"objects\", sha1[:2], sha1[2:])\n\tobjF, err := ipfsShell.Cat(p)\n\tif err != nil {\n\t\treturn errgo.Notef(err, \"shell.Cat(%q) failed\", p)\n\t}\n\tobj, err := git.DecodeObject(objF)\n\tif err != nil {\n\t\treturn errgo.Notef(err, \"git.DecodeObject() failed\")\n\t}\n\tlog.WithField(\"obj\", obj).Warning(\"TODO: assert(obj.type==commit)\")\n\t\/\/ assert(typ=commit)\n\t\/\/ >recurese parent?\n\t\/\/ >recurse tree & store blobs\n\n\treturn errgo.Newf(\"TODO: unsupported - please see issue #1\")\n}\n\n\/\/ \"fetch $sha1 $ref\" method 2 - unpacking packed objects\n\/\/ - look for it in packfiles by fetching \".git\/objects\/pack\/*.idx\"\n\/\/ and looking at each idx with cat <idx> | git show-index (alternatively can learn to read the format in go)\n\/\/ - if found in an <idx>, download the relevant .pack file,\n\/\/ and feed it into `git index-pack --stdin --fix-thin` which will put it into place.\n\/\/ - done \\o\/\nfunc fetchPackedObject(sha1 string) error {\n\t\/\/ search for all index files\n\tpackPath := filepath.Join(ipfsRepoPath, \"objects\", \"pack\")\n\tlsobj, err := ipfsShell.FileList(packPath)\n\tif err != nil {\n\t\treturn errgo.Notef(err, \"shell FileList(%q) failed\", packPath)\n\t}\n\tvar indexes []string\n\tfor _, lnk := range lsobj.Links {\n\t\tif lnk.Type == \"File\" && strings.HasSuffix(lnk.Name, \".idx\") {\n\t\t\tindexes = append(indexes, filepath.Join(packPath, lnk.Name))\n\t\t}\n\t}\n\tif len(indexes) == 0 {\n\t\treturn errgo.New(\"fetchPackedObject: no idx files found\")\n\t}\n\tfor _, idx := range indexes {\n\t\tidxF, err := ipfsShell.Cat(idx)\n\t\tif err != nil {\n\t\t\treturn errgo.Notef(err, \"fetchPackedObject: idx<%s> cat(%s) failed\", sha1, idx)\n\t\t}\n\n\t\t\/\/ using external git show-index < idxF for now\n\t\t\/\/ TODO: parse index file in go to make this portable\n\t\tvar b bytes.Buffer\n\t\tshowIdx := exec.Command(\"git\", \"show-index\")\n\t\tshowIdx.Stdin = idxF\n\t\tshowIdx.Stdout = &b\n\t\tshowIdx.Stderr = &b\n\t\tif err := showIdx.Run(); err != nil {\n\t\t\treturn errgo.Notef(err, \"fetchPackedObject: idx<%s> show-index start failed\", sha1)\n\t\t}\n\t\tcmdOut := b.String()\n\t\tif !strings.Contains(cmdOut, sha1) {\n\t\t\tlog.WithField(\"idx\", filepath.Base(idx)).Debug(\"git show-index: sha1 not in index, next idx file\")\n\t\t\tcontinue\n\t\t}\n\t\t\/\/log.Debug(\"git show-index:\", cmdOut)\n\n\t\t\/\/ we found an index with our hash inside\n\t\tpack := strings.Replace(idx, \".idx\", \".pack\", 1)\n\t\tlog.Debug(\"unpacking:\", pack)\n\t\tpackF, err := ipfsShell.Cat(pack)\n\t\tif err != nil {\n\t\t\treturn errgo.Notef(err, \"fetchPackedObject: pack<%s> open() failed\", sha1)\n\t\t}\n\t\tb.Reset()\n\t\tunpackIdx := exec.Command(\"git\", \"unpack-objects\")\n\t\tunpackIdx.Dir = thisGitRepo \/\/ GIT_DIR\n\t\tunpackIdx.Stdin = packF\n\t\tunpackIdx.Stdout = &b\n\t\tunpackIdx.Stderr = &b\n\t\tif err := unpackIdx.Run(); err != nil {\n\t\t\treturn errgo.Notef(err, \"fetchPackedObject: pack<%s> 'git unpack-objects' failed\\nOutput: %s\", sha1, b.String())\n\t\t}\n\t\tlog.Debug(\"git unpack-objects ...:\", b.String())\n\t\t\/\/ found and unpacked - done\n\t\t\/\/ TODO(cryptix): somehow git doesnt checkout now..?\n\t\t\/\/ 'warning: remote HEAD refers to nonexistent ref, unable to checkout.'\n\t\t\/\/b.Reset()\n\t\t\/\/symRef := exec.Command(\"git\", \"symbolic-ref\", \"HEAD\", \"ref\/heads\/master\")\n\t\t\/\/symRef.Dir = thisGitRepo \/\/ GIT_DIR\n\t\t\/\/symRef.Stdout = &b\n\t\t\/\/symRef.Stderr = &b\n\t\t\/\/if err := symRef.Run(); err != nil {\n\t\t\/\/\treturn errgo.Notef(err, \"fetchPackedObject: 'git symbolic-ref HEAD ref\/heads\/master' failed\\nOutput: %s\", b.String())\n\t\t\/\/}\n\t\treturn nil\n\t}\n\treturn errgo.Newf(\"did not find sha1<%s> in %d index files\", sha1, len(indexes))\n}\n<commit_msg>fetchObject: begin decoding git objects<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/cryptix\/exp\/git\"\n\n\t\"gopkg.in\/errgo.v1\"\n)\n\n\/\/ \"fetch $sha1 $ref\" method 1 - unpacking loose objects\n\/\/ - look for it in \".git\/objects\/substr($sha1, 0, 2)\/substr($sha, 2)\"\n\/\/ - if found, download it and put it in place. (there may be a command for this)\n\/\/ - done \\o\/\nfunc fetchObject(sha1 string) error {\n\tp := filepath.Join(ipfsRepoPath, \"objects\", sha1[:2], sha1[2:])\n\tobjF, err := ipfsShell.Cat(p)\n\tif err != nil {\n\t\treturn errgo.Notef(err, \"shell.Cat(%q) commit failed\", p)\n\t}\n\tobj, err := git.DecodeObject(objF)\n\tif err != nil {\n\t\treturn errgo.Notef(err, \"git.DecodeObject(commit) failed\")\n\t}\n\tcommit, ok := obj.Commit()\n\tif !ok {\n\t\treturn errgo.Newf(\"sha1 is not a git commit object\")\n\t}\n\t\/\/ >recurese parent?\n\n\t\/\/ >recurse tree & store blobs\n\tp = filepath.Join(ipfsRepoPath, \"objects\", commit.Tree[:2], commit.Tree[2:])\n\tobjF, err = ipfsShell.Cat(p)\n\tif err != nil {\n\t\treturn errgo.Notef(err, \"shell.Cat(%q) tree failed\", p)\n\t}\n\tobj, err = git.DecodeObject(objF)\n\tif err != nil {\n\t\treturn errgo.Notef(err, \"git.DecodeObject(tree) failed\")\n\t}\n\n\ttree, ok := obj.Tree()\n\tif !ok {\n\t\treturn errgo.Newf(\"sha1 is not a git tree object\")\n\t}\n\n\tlog.Warning(\"Trees!\")\n\tlog.Warning(tree)\n\treturn errgo.Newf(\"TODO: unsupported - please see issue #1\")\n}\n\n\/\/ \"fetch $sha1 $ref\" method 2 - unpacking packed objects\n\/\/ - look for it in packfiles by fetching \".git\/objects\/pack\/*.idx\"\n\/\/ and looking at each idx with cat <idx> | git show-index (alternatively can learn to read the format in go)\n\/\/ - if found in an <idx>, download the relevant .pack file,\n\/\/ and feed it into `git index-pack --stdin --fix-thin` which will put it into place.\n\/\/ - done \\o\/\nfunc fetchPackedObject(sha1 string) error {\n\t\/\/ search for all index files\n\tpackPath := filepath.Join(ipfsRepoPath, \"objects\", \"pack\")\n\tlsobj, err := ipfsShell.FileList(packPath)\n\tif err != nil {\n\t\treturn errgo.Notef(err, \"shell FileList(%q) failed\", packPath)\n\t}\n\tvar indexes []string\n\tfor _, lnk := range lsobj.Links {\n\t\tif lnk.Type == \"File\" && strings.HasSuffix(lnk.Name, \".idx\") {\n\t\t\tindexes = append(indexes, filepath.Join(packPath, lnk.Name))\n\t\t}\n\t}\n\tif len(indexes) == 0 {\n\t\treturn errgo.New(\"fetchPackedObject: no idx files found\")\n\t}\n\tfor _, idx := range indexes {\n\t\tidxF, err := ipfsShell.Cat(idx)\n\t\tif err != nil {\n\t\t\treturn errgo.Notef(err, \"fetchPackedObject: idx<%s> cat(%s) failed\", sha1, idx)\n\t\t}\n\n\t\t\/\/ using external git show-index < idxF for now\n\t\t\/\/ TODO: parse index file in go to make this portable\n\t\tvar b bytes.Buffer\n\t\tshowIdx := exec.Command(\"git\", \"show-index\")\n\t\tshowIdx.Stdin = idxF\n\t\tshowIdx.Stdout = &b\n\t\tshowIdx.Stderr = &b\n\t\tif err := showIdx.Run(); err != nil {\n\t\t\treturn errgo.Notef(err, \"fetchPackedObject: idx<%s> show-index start failed\", sha1)\n\t\t}\n\t\tcmdOut := b.String()\n\t\tif !strings.Contains(cmdOut, sha1) {\n\t\t\tlog.WithField(\"idx\", filepath.Base(idx)).Debug(\"git show-index: sha1 not in index, next idx file\")\n\t\t\tcontinue\n\t\t}\n\t\t\/\/log.Debug(\"git show-index:\", cmdOut)\n\n\t\t\/\/ we found an index with our hash inside\n\t\tpack := strings.Replace(idx, \".idx\", \".pack\", 1)\n\t\tlog.Debug(\"unpacking:\", pack)\n\t\tpackF, err := ipfsShell.Cat(pack)\n\t\tif err != nil {\n\t\t\treturn errgo.Notef(err, \"fetchPackedObject: pack<%s> open() failed\", sha1)\n\t\t}\n\t\tb.Reset()\n\t\tunpackIdx := exec.Command(\"git\", \"unpack-objects\")\n\t\tunpackIdx.Dir = thisGitRepo \/\/ GIT_DIR\n\t\tunpackIdx.Stdin = packF\n\t\tunpackIdx.Stdout = &b\n\t\tunpackIdx.Stderr = &b\n\t\tif err := unpackIdx.Run(); err != nil {\n\t\t\treturn errgo.Notef(err, \"fetchPackedObject: pack<%s> 'git unpack-objects' failed\\nOutput: %s\", sha1, b.String())\n\t\t}\n\t\tlog.Debug(\"git unpack-objects ...:\", b.String())\n\t\treturn nil\n\t}\n\treturn errgo.Newf(\"did not find sha1<%s> in %d index files\", sha1, len(indexes))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\/\/\"errors\"\n\t\"container\/list\"\n\t\"sync\"\n\n\t\"github.com\/reiver\/go-porterstemmer\"\n)\n\nvar seperators []byte = []byte{10, 32, 44, 46}\nvar stopWords map[string]bool\n\n\/\/var dictionary map[string]*list.List\nvar groupLen int = 50\n\nvar dictionary = struct {\n\tsync.RWMutex\n\tm map[string]*list.List\n}{m: make(map[string]*list.List)}\n\nfunc main() {\n\tvar corpusPath string\n\tflag.StringVar(&corpusPath, \"corpus\", \"data\/corpus\", \"File path of the corpus.\")\n\tflag.Parse()\n\tdispatcher(corpusPath)\n}\n\nfunc dispatcher(corpusPath string) {\n\tfile, err := os.Open(corpusPath)\n\tdefer file.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcollectStopWords()\n\t\/\/dictionary = make(map[string]*list.List)\n\tscanner := bufio.NewScanner(io.Reader(file))\n\n\tscanner.Split(splitTokens)\n\n\tvar (\n\t\ttoken string\n\t\tpos uint32 = 0\n\t\twg sync.WaitGroup\n\t)\n\tfor scanner.Scan() {\n\t\ttoken = scanner.Text()\n\t\tif len(token) > 0 {\n\t\t\twg.Add(1)\n\t\t\tgo addToken(token, pos, &wg)\n\t\t\tpos += 1\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tpanic(err)\n\t}\n\twg.Wait()\n\n\toutFile, err := os.Create(\"res\")\n\tdefer outFile.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\twriter := bufio.NewWriter(io.Writer(outFile))\n\twriteIndex(writer)\n}\n\nfunc addToken(token string, pos uint32, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\t\/\/ Check to see if token is a stop word\n\t_, ok := stopWords[token]\n\tif !ok {\n\t\t\/\/ Stem the token\n\t\ttoken = porterstemmer.StemString(token)\n\t\tdictionary.RLock()\n\t\tpostingsList, ok := dictionary.m[token]\n\t\tdictionary.RUnlock()\n\t\tif ok {\n\t\t\tlastEl := postingsList.Back()\n\t\t\tlastGroup := lastEl.Value.([]uint32)\n\t\t\tif len(lastGroup) == groupLen {\n\t\t\t\tnewGroup := []uint32{pos}\n\t\t\t\tpostingsList.PushBack(newGroup)\n\t\t\t} else {\n\t\t\t\tlastGroup = append(lastGroup, pos)\n\t\t\t\tpostingsList.Remove(lastEl)\n\t\t\t\tpostingsList.PushBack(lastGroup)\n\t\t\t}\n\t\t} else {\n\t\t\tl := list.New()\n\t\t\tnewGroup := []uint32{pos}\n\t\t\tl.PushBack(newGroup)\n\t\t\tdictionary.Lock()\n\t\t\tdictionary.m[token] = l\n\t\t\tdictionary.Unlock()\n\t\t}\n\t}\n}\n\nfunc writeIndex(writer io.Writer) {\n\tfor k, v := range dictionary.m {\n\t\tfmt.Println(k, v.Len(), len(v.Front().Value.([]uint32)))\n\t}\n}\n\nfunc splitTokens(data []byte, atEOF bool) (advance int, token []byte, err error) {\n\tvar (\n\t\tfound = false\n\t\tstarted = false\n\t\toffset = 0\n\t)\n\tfor i := 0; i < len(data); i++ {\n\t\tfound = false\n\t\tfor s := 0; s < len(seperators); s++ {\n\t\t\tif data[i] == seperators[s] {\n\t\t\t\tadvance = i + 1\n\t\t\t\ttoken = data[offset:i]\n\t\t\t\tif !started {\n\t\t\t\t\toffset += 1\n\t\t\t\t}\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif found {\n\t\t\tif started {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif !started {\n\t\t\tstarted = true\n\t\t}\n\t}\n\treturn\n}\n\nfunc collectStopWords() {\n\tfile, err := os.Open(\"data\/stopwords\")\n\tdefer file.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tstopWords = make(map[string]bool)\n\tscanner := bufio.NewScanner(io.Reader(file))\n\n\tvar word string\n\tfor scanner.Scan() {\n\t\tword = scanner.Text()\n\t\tif len(word) > 0 {\n\t\t\tstopWords[word] = true\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tpanic(err)\n\t}\n}\n<commit_msg>Actually write the index to disk.<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\/\/\"errors\"\n\t\"container\/list\"\n\t\"strconv\"\n\t\"sync\"\n\n\t\"github.com\/reiver\/go-porterstemmer\"\n)\n\nvar seperators []byte = []byte{10, 32, 44, 46}\nvar stopWords map[string]bool\n\n\/\/var dictionary map[string]*list.List\nvar groupLen int = 50\n\nvar dictionary = struct {\n\tsync.RWMutex\n\tm map[string]*list.List\n}{m: make(map[string]*list.List)}\n\nfunc main() {\n\tvar corpusPath string\n\tflag.StringVar(&corpusPath, \"corpus\", \"data\/corpus\", \"File path of the corpus.\")\n\tflag.Parse()\n\tdispatcher(corpusPath)\n}\n\nfunc dispatcher(corpusPath string) {\n\tfile, err := os.Open(corpusPath)\n\tdefer file.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcollectStopWords()\n\t\/\/dictionary = make(map[string]*list.List)\n\tscanner := bufio.NewScanner(io.Reader(file))\n\n\tscanner.Split(splitTokens)\n\n\tvar (\n\t\ttoken string\n\t\tpos uint32 = 0\n\t\twg sync.WaitGroup\n\t)\n\tfor scanner.Scan() {\n\t\ttoken = scanner.Text()\n\t\tif len(token) > 0 {\n\t\t\twg.Add(1)\n\t\t\tgo addToken(token, pos, &wg)\n\t\t\tpos += 1\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tpanic(err)\n\t}\n\twg.Wait()\n\n\toutFile, err := os.Create(\"res\")\n\tdefer outFile.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\twriter := bufio.NewWriter(io.Writer(outFile))\n\twriteIndex(writer)\n}\n\nfunc addToken(token string, pos uint32, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\t\/\/ Check to see if token is a stop word\n\t_, ok := stopWords[token]\n\tif !ok {\n\t\t\/\/ Stem the token\n\t\ttoken = porterstemmer.StemString(token)\n\t\tdictionary.RLock()\n\t\tpostingsList, ok := dictionary.m[token]\n\t\tdictionary.RUnlock()\n\t\tif ok {\n\t\t\tlastEl := postingsList.Back()\n\t\t\tlastGroup := lastEl.Value.([]uint32)\n\t\t\tif len(lastGroup) == groupLen {\n\t\t\t\tnewGroup := []uint32{pos}\n\t\t\t\tpostingsList.PushBack(newGroup)\n\t\t\t} else {\n\t\t\t\tlastGroup = append(lastGroup, pos)\n\t\t\t\tpostingsList.Remove(lastEl)\n\t\t\t\tpostingsList.PushBack(lastGroup)\n\t\t\t}\n\t\t} else {\n\t\t\tl := list.New()\n\t\t\tnewGroup := []uint32{pos}\n\t\t\tl.PushBack(newGroup)\n\t\t\tdictionary.Lock()\n\t\t\tdictionary.m[token] = l\n\t\t\tdictionary.Unlock()\n\t\t}\n\t}\n}\n\nfunc writeIndex(writer *bufio.Writer) {\n\tfor k, v := range dictionary.m {\n\t\tfmt.Println(k, v.Len(), len(v.Front().Value.([]uint32)))\n\t\twriter.WriteString(\"#\" + k)\n\t\tvar group []uint32\n\t\tfor el := v.Front(); el != nil; el = el.Next() {\n\t\t\tgroup = el.Value.([]uint32)\n\t\t\tfor _, posting := range group {\n\t\t\t\tval := strconv.FormatUint(uint64(posting), 10)\n\t\t\t\twriter.WriteString(\",\" + string(val))\n\t\t\t}\n\t\t}\n\t}\n\twriter.Flush()\n}\n\nfunc splitTokens(data []byte, atEOF bool) (advance int, token []byte, err error) {\n\tvar (\n\t\tfound = false\n\t\tstarted = false\n\t\toffset = 0\n\t)\n\tfor i := 0; i < len(data); i++ {\n\t\tfound = false\n\t\tfor s := 0; s < len(seperators); s++ {\n\t\t\tif data[i] == seperators[s] {\n\t\t\t\tadvance = i + 1\n\t\t\t\ttoken = data[offset:i]\n\t\t\t\tif !started {\n\t\t\t\t\toffset += 1\n\t\t\t\t}\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif found {\n\t\t\tif started {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif !started {\n\t\t\tstarted = true\n\t\t}\n\t}\n\treturn\n}\n\nfunc collectStopWords() {\n\tfile, err := os.Open(\"data\/stopwords\")\n\tdefer file.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tstopWords = make(map[string]bool)\n\tscanner := bufio.NewScanner(io.Reader(file))\n\n\tvar word string\n\tfor scanner.Scan() {\n\t\tword = scanner.Text()\n\t\tif len(word) > 0 {\n\t\t\tstopWords[word] = true\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tpanic(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nAsciidocgo implements an AsciiDoc renderer in Go.\n\nMethods for parsing Asciidoc input files and rendering documents using eRuby\ntemplates.\n\nAsciidoc documents comprise a header followed by zero or more sections.\nSections are composed of blocks of content. For example:\n\n = Doc Title\n\n == Section 1\n\n This is a paragraph block in the first section.\n\n == Section 2\n\n This section has a paragraph block and an olist block.\n\n . Item 1\n . Item 2\n\nExamples:\n\nUse built-in templates:\n\n lines = File.readlines(\"your_file.asc\")\n doc = Asciidoctor::Document.new(lines)\n html = doc.render\n File.open(\"your_file.html\", \"w+\") do |file|\n file.puts html\n end\n\nUse custom (Tilt-supported) templates:\n\n lines = File.readlines(\"your_file.asc\")\n doc = Asciidoctor::Document.new(lines, :template_dir => 'templates')\n html = doc.render\n File.open(\"your_file.html\", \"w+\") do |file|\n file.puts html\n end\n\n*\/\npackage asciidocgo\n\nimport \"io\"\n\n\/\/ Accepts input as a string\nfunc LoadString(input string) *Document {\n\treturn nil\n}\n\n\/\/ Accepts input as an array of strings\nfunc LoadStrings(inputs ...string) *Document {\n\treturn nil\n}\n\n\/\/ Accepts input as an IO.\n\/\/ If the input is a File, information about the file is stored in attributes on\n\/\/ the Document object.\nfunc Load(input io.Reader) *Document {\n\treturn nil\n}\n<commit_msg>Add REGEXP on Asciidocgo.<commit_after>\/*\nAsciidocgo implements an AsciiDoc renderer in Go.\n\nMethods for parsing Asciidoc input files and rendering documents using eRuby\ntemplates.\n\nAsciidoc documents comprise a header followed by zero or more sections.\nSections are composed of blocks of content. For example:\n\n = Doc Title\n\n == Section 1\n\n This is a paragraph block in the first section.\n\n == Section 2\n\n This section has a paragraph block and an olist block.\n\n . Item 1\n . Item 2\n\nExamples:\n\nUse built-in templates:\n\n lines = File.readlines(\"your_file.asc\")\n doc = Asciidoctor::Document.new(lines)\n html = doc.render\n File.open(\"your_file.html\", \"w+\") do |file|\n file.puts html\n end\n\nUse custom (Tilt-supported) templates:\n\n lines = File.readlines(\"your_file.asc\")\n doc = Asciidoctor::Document.new(lines, :template_dir => 'templates')\n html = doc.render\n File.open(\"your_file.html\", \"w+\") do |file|\n file.puts html\n end\n\n*\/\npackage asciidocgo\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"regexp\"\n\n\t\"github.com\/VonC\/asciidocgo\/utils\"\n)\n\n\/\/ Accepts input as a string\nfunc LoadString(input string) *Document {\n\treturn nil\n}\n\n\/\/ Accepts input as an array of strings\nfunc LoadStrings(inputs ...string) *Document {\n\treturn nil\n}\n\n\/\/ Accepts input as an IO.\n\/\/ If the input is a File, information about the file is stored in attributes on\n\/\/ the Document object.\nfunc Load(input io.Reader) *Document {\n\treturn nil\n}\n\nconst (\n\tCC_ALPHA = `a-zA-Z`\n\tCC_ALNUM = `a-zA-Z0-9`\n\tCC_BLANK = `[ \\t]`\n\t\/\/ non-blank character\n\tCC_GRAPH = `[\\x21-\\x7E]`\n\tCC_EOL = `(?=\\n|$)`\n)\n\nvar ADMONITION_STYLES utils.Arr = []string{\"NOTE\", \"TIP\", \"IMPORTANT\", \"WARNING\", \"CAUTION\"}\n\n\/* The following pattern, which appears frequently, captures the contents\nbetween square brackets, ignoring escaped closing brackets\n(closing brackets prefixed with a backslash '\\' character)\n\n\tPattern:\n\t(?:\\[((?:\\\\\\]|[^\\]])*?)\\])\n\tMatches:\n\t[enclosed text here] or [enclosed [text\\] here]\n*\/\nvar REGEXP_STRING = map[string]string{\n\t\/\/:strip_line_wise => \/\\A(?:\\s*\\n)?(.*?)\\s*\\z\/m,\n\n\t\/\/ # NOTE: this is a inline admonition note\n\t\/\/\t:admonition_inline => \/^(#{ADMONITION_STYLES.to_a * '|'}):#{CC_BLANK}\/,\n\t\":admonition_inline\": fmt.Sprintf(\"^(%v):%v\", ADMONITION_STYLES.Mult(\"|\"), CC_BLANK),\n\n\t\/\/\thttp:\/\/domain\n\t\/\/\thttps:\/\/domain\n\t\/\/\tdata:info\n\t\/\/\t:uri_sniff => %r{^[#{CC_ALPHA}][#{CC_ALNUM}.+-]*:\/{0,2}},\n\t\":uri_sniff\": fmt.Sprintf(\"^[%v][%v.+-]*:\/{0,2}.*\", CC_ALPHA, CC_ALNUM),\n}\n\nfunc iniREGEXP() map[string]*regexp.Regexp {\n\tres := map[string]*regexp.Regexp{}\n\tfor key, regexpString := range REGEXP_STRING {\n\t\tregexp, err := regexp.Compile(regexpString)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"iniREGEXP should compile all REGEXP_STRING like %v: %v\", regexpString, err))\n\t\t}\n\t\tres[key] = regexp\n\t}\n\treturn res\n}\n\n\/* The following pattern, which appears frequently, captures the contents\nbetween square brackets, ignoring escaped closing brackets\n(closing brackets prefixed with a backslash '\\' character)\n\n\tPattern:\n\t(?:\\[((?:\\\\\\]|[^\\]])*?)\\])\n\tMatches:\n\t[enclosed text here] or [enclosed [text\\] here]\n*\/\nvar REGEXP = iniREGEXP()\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ global variables\nvar (\n\tregion string\n\tproject string\n\tstackname string\n\tstacks map[string]*stack\n\tcfvars map[string]interface{}\n)\n\n\/\/ fetchContent - checks the source type, url\/s3\/file and calls the corresponding function\nfunc fetchContent(source string) (string, error) {\n\tswitch strings.Split(strings.ToLower(source), \":\")[0] {\n\tcase \"http\":\n\t\tLog(fmt.Sprintln(\"Source Type: [http] Detected, Fetching Source: \", source), level.debug)\n\t\tresp, err := Get(source)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn resp, nil\n\tcase \"https\":\n\t\tLog(fmt.Sprintln(\"Source Type: [https] Detected, Fetching Source: \", source), level.debug)\n\t\tresp, err := Get(source)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn resp, nil\n\tcase \"s3\":\n\t\tLog(fmt.Sprintln(\"Source Type: [s3] Detected, Fetching Source: \", source), level.debug)\n\t\tresp, err := S3Read(source)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn resp, nil\n\tdefault:\n\t\tLog(fmt.Sprintln(\"Source Type: [file] Detected, Fetching Source: \", source), level.debug)\n\t\tb, err := ioutil.ReadFile(source)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn string(b), nil\n\t}\n}\n\n\/\/ getName - Checks if arg is url or file and returns stack name and filepath\/url\nfunc getSource(s string) (string, string, error) {\n\tif strings.Contains(s, \"::\") {\n\t\tvals := strings.Split(s, \"::\")\n\t\tif len(vals) < 2 {\n\t\t\treturn \"\", \"\", errors.New(`Error, invalid url format --> Example: stackname::http:\/\/someurl OR stackname::s3:\/\/bucket\/key`)\n\t\t}\n\n\t\treturn vals[0], vals[1], nil\n\n\t}\n\n\tname := filepath.Base(strings.Replace(s, filepath.Ext(s), \"\", -1))\n\treturn name, s, nil\n\n}\n\n\/\/ configReader parses the config YAML file with Viper\nfunc configReader(conf string) error {\n\tviper.SetConfigType(\"yaml\")\n\n\tcfg, err := fetchContent(conf)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tLog(fmt.Sprintln(\"Reading Config String into viper:\", cfg), level.debug)\n\terr = viper.ReadConfig(bytes.NewBuffer([]byte(cfg)))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Get basic settings\n\tregion = viper.GetString(\"region\")\n\tproject = viper.GetString(\"project\")\n\n\t\/\/ Get all the values of variables under cloudformation and put them into a map\n\tcfvars = make(map[string]interface{})\n\tstacks = make(map[string]*stack)\n\n\t\/\/ Get Global values\n\tcfvars[\"global\"] = viper.Get(\"global\")\n\n\tLog(fmt.Sprintln(\"Keys identified in Config:\", viper.AllKeys()), level.debug)\n\n\t\/\/ if stacks is 0 at this point, All Stacks are assumed.\n\tif len(job.stacks) == 0 {\n\t\tjob.stacks = make(map[string]string)\n\t\tfor _, stk := range viper.Sub(\"stacks\").AllKeys() {\n\t\t\tjob.stacks[strings.Split(stk, \".\")[0]] = \"\"\n\t\t}\n\t}\n\n\tfor s := range job.stacks {\n\t\tstacks[s] = &stack{}\n\t\tkey := \"stacks.\" + s\n\t\tLog(fmt.Sprintf(\"Evaluating: [%s] in config\"+\"\\n\", s), level.debug)\n\n\t\tLog(fmt.Sprintf(\"Checking if [%s] exists in [%s]\", key, strings.Join(viper.AllKeys(), \", \")), level.debug)\n\t\tif !strings.Contains(strings.Join(viper.AllKeys(), \"\"), key) {\n\t\t\treturn fmt.Errorf(\"Key not found in config: %s\", key)\n\t\t}\n\n\t\tfor _, v := range viper.Sub(key).AllKeys() {\n\t\t\tLog(fmt.Sprintln(\"Processing: \", v), level.debug)\n\t\t\t\/\/ Using case statement for setting keyword values, added for scalability later.\n\t\t\tswitch v {\n\t\t\tcase \"depends_on\":\n\t\t\t\tdept := viper.Get(fmt.Sprintf(\"stacks.%s.%s\", s, v)).([]interface{})\n\t\t\t\tLog(fmt.Sprintf(\"Found Dependency for [%s]: %s\", s, dept), level.debug)\n\t\t\t\tstacks[s].dependsOn = dept\n\t\t\tdefault:\n\t\t\t\t\/\/ TODO: Nothing for now - more built-in values to come... maybe\n\t\t\t}\n\n\t\t\t\/\/ Get Cloudformation values\n\t\t\tcfvars[s] = viper.Get(fmt.Sprintf(\"stacks.%s.cf\", s))\n\t\t}\n\n\t\tstacks[s].name = s\n\t\tstacks[s].setStackName()\n\t}\n\n\treturn nil\n}\n\n\/\/ genTimeParser - Parses templates before deploying them...\nfunc genTimeParser(source string) (string, error) {\n\n\ttempl, err := fetchContent(source)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Create template\n\tt, err := template.New(\"template\").Funcs(genTimeFunctions).Parse(templ)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ so that we can write to string\n\tvar doc bytes.Buffer\n\tt.Execute(&doc, cfvars)\n\treturn doc.String(), nil\n}\n\n\/\/ deployTimeParser - Parses templates during deployment to resolve specfic Dependency functions like stackout...\nfunc (s *stack) deployTimeParser() error {\n\n\t\/\/ Create template\n\tt, err := template.New(\"template\").Delims(\"%\", \"%\").Funcs(deployTimeFunctions).Parse(s.template)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ so that we can write to string\n\tvar doc bytes.Buffer\n\tt.Execute(&doc, cfvars)\n\ts.template = doc.String()\n\tLog(fmt.Sprintf(\"Deploy Time Template Generate:\\n%s\", s.template), level.debug)\n\n\treturn nil\n}\n<commit_msg>updated DeployTime function delimiter<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ global variables\nvar (\n\tregion string\n\tproject string\n\tstackname string\n\tstacks map[string]*stack\n\tcfvars map[string]interface{}\n)\n\n\/\/ fetchContent - checks the source type, url\/s3\/file and calls the corresponding function\nfunc fetchContent(source string) (string, error) {\n\tswitch strings.Split(strings.ToLower(source), \":\")[0] {\n\tcase \"http\":\n\t\tLog(fmt.Sprintln(\"Source Type: [http] Detected, Fetching Source: \", source), level.debug)\n\t\tresp, err := Get(source)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn resp, nil\n\tcase \"https\":\n\t\tLog(fmt.Sprintln(\"Source Type: [https] Detected, Fetching Source: \", source), level.debug)\n\t\tresp, err := Get(source)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn resp, nil\n\tcase \"s3\":\n\t\tLog(fmt.Sprintln(\"Source Type: [s3] Detected, Fetching Source: \", source), level.debug)\n\t\tresp, err := S3Read(source)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn resp, nil\n\tdefault:\n\t\tLog(fmt.Sprintln(\"Source Type: [file] Detected, Fetching Source: \", source), level.debug)\n\t\tb, err := ioutil.ReadFile(source)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn string(b), nil\n\t}\n}\n\n\/\/ getName - Checks if arg is url or file and returns stack name and filepath\/url\nfunc getSource(s string) (string, string, error) {\n\tif strings.Contains(s, \"::\") {\n\t\tvals := strings.Split(s, \"::\")\n\t\tif len(vals) < 2 {\n\t\t\treturn \"\", \"\", errors.New(`Error, invalid url format --> Example: stackname::http:\/\/someurl OR stackname::s3:\/\/bucket\/key`)\n\t\t}\n\n\t\treturn vals[0], vals[1], nil\n\n\t}\n\n\tname := filepath.Base(strings.Replace(s, filepath.Ext(s), \"\", -1))\n\treturn name, s, nil\n\n}\n\n\/\/ configReader parses the config YAML file with Viper\nfunc configReader(conf string) error {\n\tviper.SetConfigType(\"yaml\")\n\n\tcfg, err := fetchContent(conf)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tLog(fmt.Sprintln(\"Reading Config String into viper:\", cfg), level.debug)\n\terr = viper.ReadConfig(bytes.NewBuffer([]byte(cfg)))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Get basic settings\n\tregion = viper.GetString(\"region\")\n\tproject = viper.GetString(\"project\")\n\n\t\/\/ Get all the values of variables under cloudformation and put them into a map\n\tcfvars = make(map[string]interface{})\n\tstacks = make(map[string]*stack)\n\n\t\/\/ Get Global values\n\tcfvars[\"global\"] = viper.Get(\"global\")\n\n\tLog(fmt.Sprintln(\"Keys identified in Config:\", viper.AllKeys()), level.debug)\n\n\tfor _, val := range viper.Sub(\"stacks\").AllKeys() {\n\t\ts := strings.Split(val, \".\")[0]\n\t\tstacks[s] = &stack{}\n\t\tkey := \"stacks.\" + s\n\t\tLog(fmt.Sprintf(\"Evaluating: [%s] in config\"+\"\\n\", s), level.debug)\n\n\t\tLog(fmt.Sprintf(\"Checking if [%s] exists in [%s]\", key, strings.Join(viper.AllKeys(), \", \")), level.debug)\n\t\tif !strings.Contains(strings.Join(viper.AllKeys(), \"\"), key) {\n\t\t\treturn fmt.Errorf(\"Key not found in config: %s\", key)\n\t\t}\n\n\t\tfor _, v := range viper.Sub(key).AllKeys() {\n\t\t\tLog(fmt.Sprintln(\"Processing: \", v), level.debug)\n\t\t\t\/\/ Using case statement for setting keyword values, added for scalability later.\n\t\t\tswitch v {\n\t\t\tcase \"depends_on\":\n\t\t\t\tdept := viper.Get(fmt.Sprintf(\"stacks.%s.%s\", s, v)).([]interface{})\n\t\t\t\tLog(fmt.Sprintf(\"Found Dependency for [%s]: %s\", s, dept), level.debug)\n\t\t\t\tstacks[s].dependsOn = dept\n\t\t\tdefault:\n\t\t\t\t\/\/ TODO: Nothing for now - more built-in values to come... maybe\n\t\t\t}\n\n\t\t\t\/\/ Get Cloudformation values\n\t\t\tcfvars[s] = viper.Get(fmt.Sprintf(\"stacks.%s.cf\", s))\n\t\t}\n\n\t\tstacks[s].name = s\n\t\tstacks[s].setStackName()\n\t}\n\n\treturn nil\n}\n\n\/\/ genTimeParser - Parses templates before deploying them...\nfunc genTimeParser(source string) (string, error) {\n\n\ttempl, err := fetchContent(source)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Create template\n\tt, err := template.New(\"template\").Funcs(genTimeFunctions).Parse(templ)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ so that we can write to string\n\tvar doc bytes.Buffer\n\tt.Execute(&doc, cfvars)\n\treturn doc.String(), nil\n}\n\n\/\/ deployTimeParser - Parses templates during deployment to resolve specfic Dependency functions like stackout...\nfunc (s *stack) deployTimeParser() error {\n\n\t\/\/ Create template\n\tt, err := template.New(\"template\").Delims(\"<<\", \">>\").Funcs(deployTimeFunctions).Parse(s.template)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ so that we can write to string\n\tvar doc bytes.Buffer\n\tt.Execute(&doc, cfvars)\n\ts.template = doc.String()\n\tLog(fmt.Sprintf(\"Deploy Time Template Generate:\\n%s\", s.template), level.debug)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build go1.2,!go1.4\n\n\/\/ Copyright 2015 Robert S. Gerus. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nvar emptyContext map[string]string\n\n\/\/ func Lookup(context map[string]string, key string) interface{}\nvar testsLookup = []struct {\n\tkey string\n\texpectedValue string\n}{\n\t{\n\t\tkey: \"TestLookup\",\n\t\texpectedValue: \"this can be anything you can imagine, but now it's just a string\",\n\t},\n\t{\n\t\tkey: \"NotExisting\",\n\t\texpectedValue: \"<nil>\",\n\t},\n}\n\nfunc TestLookup(t *testing.T) {\n\tfor i, e := range testsLookup {\n\t\tv := Lookup(emptyContext, e.key)\n\t\tif fmt.Sprintf(\"%+v\", v) != fmt.Sprintf(\"%+v\", e.expectedValue) {\n\t\t\tt.Log(\"test#\", i, \"Lookup key\", e.key)\n\t\t\tt.Logf(\"expected: %+v\\n\", e.expectedValue)\n\t\t\tt.Logf(\"result : %+v\\n\", v)\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n\n\/\/ func LookupString(context map[string]string, key string) string\nvar testsLookupString = []struct {\n\tkey string\n\texpectedValue string\n}{\n\t{\n\t\tkey: \"TestLookupString\",\n\t\texpectedValue: \"this should be a string\",\n\t},\n\t{\n\t\tkey: \"NotExisting\",\n\t\texpectedValue: \"\",\n\t},\n}\n\nfunc TestLookupString(t *testing.T) {\n\tfor i, e := range testsLookupString {\n\t\tv := LookupString(emptyContext, e.key)\n\t\tif fmt.Sprintf(\"%+v\", v) != fmt.Sprintf(\"%+v\", e.expectedValue) {\n\t\t\tt.Log(\"test#\", i+1, \"Lookup key\", e.key)\n\t\t\tt.Logf(\"expected: %+v\\n\", e.expectedValue)\n\t\t\tt.Logf(\"result : %+v\\n\", v)\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n\n\/\/ func LookupInt(context map[string]string, key string) int\nvar testsLookupInt = []struct {\n\tkey string\n\texpectedValue int\n}{\n\t{\n\t\tkey: \"ThisWillBeAnInt\",\n\t\texpectedValue: 42,\n\t},\n\t{\n\t\tkey: \"NotExisting\",\n\t\texpectedValue: -1,\n\t},\n}\n\nfunc TestLookupInt(t *testing.T) {\n\tfor i, e := range testsLookupInt {\n\t\tv := LookupInt(emptyContext, e.key)\n\t\tif fmt.Sprintf(\"%+v\", v) != fmt.Sprintf(\"%+v\", e.expectedValue) {\n\t\t\tt.Log(\"test#\", i+1, \"Lookup key\", e.key)\n\t\t\tt.Logf(\"expected: %+v\\n\", e.expectedValue)\n\t\t\tt.Logf(\"result : %+v\\n\", v)\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n\n\/\/ func LookupStringSlice(context map[string]string, key string) []string\nvar testsLookupStringSlice = []struct {\n\tkey string\n\texpectedValue []string\n}{\n\t{\n\t\t\/\/ TestLookupStringSlice return slice is ordered\n\t\tkey: \"TestLookupStringSlice\",\n\t\texpectedValue: []string{\"a\", \"am\", \"be\", \"going\", \"i\", \"slice\", \"to\"},\n\t},\n\t{\n\t\tkey: \"NotExisting\",\n\t\texpectedValue: []string{\"\"},\n\t},\n}\n\nfunc TestLookupStringSlice(t *testing.T) {\n\tfor i, e := range testsLookupStringSlice {\n\t\tv := LookupStringSlice(emptyContext, e.key)\n\t\tif fmt.Sprintf(\"%+v\", v) != fmt.Sprintf(\"%+v\", e.expectedValue) {\n\t\t\tt.Log(\"test#\", i+1, \"Lookup key\", e.key)\n\t\t\tt.Logf(\"expected: %+v\\n\", e.expectedValue)\n\t\t\tt.Logf(\"result : %+v\\n\", v)\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n\n\/\/ func LookupStringMap(context map[string]string, key string) map[string]bool\nvar testsLookupStringMap = []struct {\n\tkey string\n\texpectedValue map[string]bool\n}{\n\t{\n\t\tkey: \"TestLookupStringMap\",\n\t\texpectedValue: map[string]bool{\n\t\t\t\"to\": true,\n\t\t\t\"be\": true,\n\t\t\t\"a\": true,\n\t\t\t\"map[string]bool\": true,\n\t\t\t\"I\": true,\n\t\t\t\"want\": true,\n\t\t},\n\t},\n\t{\n\t\tkey: \"NotExisting\",\n\t\texpectedValue: map[string]bool{},\n\t},\n}\n\nfunc TestLookupStringMap(t *testing.T) {\n\tfor i, e := range testsLookupStringMap {\n\t\tv := LookupStringMap(emptyContext, e.key)\n\t\tif !reflect.DeepEqual(v, e.expectedValue) {\n\t\t\tt.Log(\"test#\", i+1, \"Lookup key\", e.key)\n\t\t\tt.Logf(\"expected: %+v\\n\", e.expectedValue)\n\t\t\tt.Logf(\"result : %+v\\n\", v)\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n\nfunc configLookupHelper(map[string]string) []string {\n\treturn []string{\"this\/thing\/does\/not\/exist.json\", \".testconfig.json\"}\n}\n\nfunc init() {\n\tSetFileListBuilder(configLookupHelper)\n\tlog.SetOutput(ioutil.Discard)\n}\n<commit_msg>We only use go1.5 anyway now.<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Package authcookie implements creation and verification of signed\n\/\/ authentication cookies.\n\/\/\n\/\/ Cookie is a Base64 encoded (using URLEncoding, from RFC 4648) string, which\n\/\/ consists of concatenation of expiration time, login, and signature:\n\/\/\n\/\/ \texpiration time || login || signature\n\/\/\n\/\/ where expiration time is the number of seconds since Unix epoch UTC\n\/\/ indicating when this cookie must expire (4 bytes, big-endian, uint32), login\n\/\/ is a byte string of arbitrary length (at least 1 byte, not null-terminated),\n\/\/ and signature is 32 bytes of HMAC-SHA256(expiration_time || login, k), where\n\/\/ k = HMAC-SHA256(expiration_time || login, secret key).\n\/\/\n\/\/ Example:\n\/\/\n\/\/\tsecret := []byte(\"my secret key\")\n\/\/\n\/\/\t\/\/ Generate cookie valid for 24 hours for user \"bender\"\n\/\/\tcookie := authcookie.NewSinceNow(\"bender\", 24 * time.Hour, secret)\n\/\/\n\/\/\t\/\/ cookie is now:\n\/\/\t\/\/ Tajh02JlbmRlcskYMxowgwPj5QZ94jaxhDoh3n0Yp4hgGtUpeO0YbMTY\n\/\/\t\/\/ send it to user's browser..\n\/\/\t\n\/\/\t\/\/ To authenticate a user later, receive cookie and:\n\/\/\tlogin := authcookie.Login(cookie, secret)\n\/\/\tif login != \"\" {\n\/\/\t\t\/\/ access for login granted\n\/\/\t} else {\n\/\/\t\t\/\/ access denied\n\/\/\t}\n\/\/\n\/\/ Note that login and expiration time are not encrypted, they are only signed\n\/\/ and Base64 encoded.\n\/\/\n\/\/ For safety, the maximum length of base64-decoded cookie is limited to 1024\n\/\/ bytes.\npackage authcookie\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/subtle\"\n\t\"encoding\/base64\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"time\"\n)\n\nconst (\n\tdecodedMinLength = 4 \/*expiration*\/ + 1 \/*login*\/ + 32 \/*signature*\/\n\tdecodedMaxLength = 1024 \/* maximum decoded length, for safety *\/\n)\n\n\/\/ MinLength is the minimum allowed length of cookie string.\n\/\/\n\/\/ It is useful for avoiding DoS attacks with too long cookies: before passing\n\/\/ a cookie to Parse or Login functions, check that it has length less than the\n\/\/ [maximum login length allowed in your application] + MinLength.\nvar MinLength = base64.URLEncoding.EncodedLen(decodedMinLength)\n\nfunc getSignature(b []byte, secret []byte) []byte {\n\tkeym := hmac.NewSHA256(secret)\n\tkeym.Write(b)\n\tm := hmac.NewSHA256(keym.Sum(nil))\n\tm.Write(b)\n\treturn m.Sum(nil)\n}\n\nvar (\n\tErrMalformedCookie = errors.New(\"malformed cookie\")\n\tErrWrongSignature = errors.New(\"wrong cookie signature\")\n)\n\n\/\/ New returns a signed authentication cookie for the given login,\n\/\/ expiration time, and secret key.\n\/\/ If the login is empty, the function returns an empty string.\nfunc New(login string, expires time.Time, secret []byte) string {\n\tif login == \"\" {\n\t\treturn \"\"\n\t}\n\tllen := len(login)\n\tb := make([]byte, llen+4+32)\n\t\/\/ Put expiration time.\n\tbinary.BigEndian.PutUint32(b, uint32(expires.Unix()))\n\t\/\/ Put login.\n\tcopy(b[4:], []byte(login))\n\t\/\/ Calculate and put signature.\n\tsig := getSignature([]byte(b[:4+llen]), secret)\n\tcopy(b[4+llen:], sig)\n\t\/\/ Base64-encode.\n\treturn base64.URLEncoding.EncodeToString(b)\n}\n\n\/\/ NewSinceNow returns a signed authetication cookie for the given login,\n\/\/ duration since current time, and secret key.\nfunc NewSinceNow(login string, dur time.Duration, secret []byte) string {\n\treturn New(login, time.Now().Add(dur), secret)\n}\n\n\/\/ Parse verifies the given cookie with the secret key and returns login and\n\/\/ expiration time extracted from the cookie. If the cookie fails verification\n\/\/ or is not well-formed, the function returns an error.\n\/\/\n\/\/ Callers must: \n\/\/\n\/\/ 1. Check for the returned error and deny access if it's present.\n\/\/\n\/\/ 2. Check the returned expiration time and deny access if it's in the past.\n\/\/\nfunc Parse(cookie string, secret []byte) (login string, expires time.Time, err error) {\n\tblen := base64.URLEncoding.DecodedLen(len(cookie))\n\t\/\/ Avoid allocation if cookie is too short or too long.\n\tif blen < decodedMinLength || blen > decodedMaxLength {\n\t\terr = ErrMalformedCookie\n\t\treturn\n\t}\n\tb, err := base64.URLEncoding.DecodeString(cookie)\n\tif err != nil {\n\t\treturn\n\t}\n\t\/\/ Decoded length may be different from max length, which\n\t\/\/ we allocated, so check it, and set new length for b.\n\tblen = len(b)\n\tif blen < decodedMinLength {\n\t\terr = ErrMalformedCookie\n\t\treturn\n\t}\n\tb = b[:blen]\n\n\tsig := b[blen-32:]\n\tdata := b[:blen-32]\n\n\trealSig := getSignature(data, secret)\n\tif subtle.ConstantTimeCompare(realSig, sig) != 1 {\n\t\terr = ErrWrongSignature\n\t\treturn\n\t}\n\texpires = time.Unix(int64(binary.BigEndian.Uint32(data[:4])), 0)\n\tlogin = string(data[4:])\n\treturn\n}\n\n\/\/ Login returns a valid login extracted from the given cookie and verified\n\/\/ using the given secret key. If verification fails or the cookie expired,\n\/\/ the function returns an empty string.\nfunc Login(cookie string, secret []byte) string {\n\tl, exp, err := Parse(cookie, secret)\n\tif err != nil || exp.Before(time.Now()) {\n\t\treturn \"\"\n\t}\n\treturn l\n}\n<commit_msg>Pass hash to hmac.New instead of using NewSHA256.<commit_after>\/\/ Package authcookie implements creation and verification of signed\n\/\/ authentication cookies.\n\/\/\n\/\/ Cookie is a Base64 encoded (using URLEncoding, from RFC 4648) string, which\n\/\/ consists of concatenation of expiration time, login, and signature:\n\/\/\n\/\/ \texpiration time || login || signature\n\/\/\n\/\/ where expiration time is the number of seconds since Unix epoch UTC\n\/\/ indicating when this cookie must expire (4 bytes, big-endian, uint32), login\n\/\/ is a byte string of arbitrary length (at least 1 byte, not null-terminated),\n\/\/ and signature is 32 bytes of HMAC-SHA256(expiration_time || login, k), where\n\/\/ k = HMAC-SHA256(expiration_time || login, secret key).\n\/\/\n\/\/ Example:\n\/\/\n\/\/\tsecret := []byte(\"my secret key\")\n\/\/\n\/\/\t\/\/ Generate cookie valid for 24 hours for user \"bender\"\n\/\/\tcookie := authcookie.NewSinceNow(\"bender\", 24 * time.Hour, secret)\n\/\/\n\/\/\t\/\/ cookie is now:\n\/\/\t\/\/ Tajh02JlbmRlcskYMxowgwPj5QZ94jaxhDoh3n0Yp4hgGtUpeO0YbMTY\n\/\/\t\/\/ send it to user's browser..\n\/\/\t\n\/\/\t\/\/ To authenticate a user later, receive cookie and:\n\/\/\tlogin := authcookie.Login(cookie, secret)\n\/\/\tif login != \"\" {\n\/\/\t\t\/\/ access for login granted\n\/\/\t} else {\n\/\/\t\t\/\/ access denied\n\/\/\t}\n\/\/\n\/\/ Note that login and expiration time are not encrypted, they are only signed\n\/\/ and Base64 encoded.\n\/\/\n\/\/ For safety, the maximum length of base64-decoded cookie is limited to 1024\n\/\/ bytes.\npackage authcookie\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/sha256\"\n\t\"crypto\/subtle\"\n\t\"encoding\/base64\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"time\"\n)\n\nconst (\n\tdecodedMinLength = 4 \/*expiration*\/ + 1 \/*login*\/ + 32 \/*signature*\/\n\tdecodedMaxLength = 1024 \/* maximum decoded length, for safety *\/\n)\n\n\/\/ MinLength is the minimum allowed length of cookie string.\n\/\/\n\/\/ It is useful for avoiding DoS attacks with too long cookies: before passing\n\/\/ a cookie to Parse or Login functions, check that it has length less than the\n\/\/ [maximum login length allowed in your application] + MinLength.\nvar MinLength = base64.URLEncoding.EncodedLen(decodedMinLength)\n\nfunc getSignature(b []byte, secret []byte) []byte {\n\tkeym := hmac.New(sha256.New, secret)\n\tkeym.Write(b)\n\tm := hmac.New(sha256.New, keym.Sum(nil))\n\tm.Write(b)\n\treturn m.Sum(nil)\n}\n\nvar (\n\tErrMalformedCookie = errors.New(\"malformed cookie\")\n\tErrWrongSignature = errors.New(\"wrong cookie signature\")\n)\n\n\/\/ New returns a signed authentication cookie for the given login,\n\/\/ expiration time, and secret key.\n\/\/ If the login is empty, the function returns an empty string.\nfunc New(login string, expires time.Time, secret []byte) string {\n\tif login == \"\" {\n\t\treturn \"\"\n\t}\n\tllen := len(login)\n\tb := make([]byte, llen+4+32)\n\t\/\/ Put expiration time.\n\tbinary.BigEndian.PutUint32(b, uint32(expires.Unix()))\n\t\/\/ Put login.\n\tcopy(b[4:], []byte(login))\n\t\/\/ Calculate and put signature.\n\tsig := getSignature([]byte(b[:4+llen]), secret)\n\tcopy(b[4+llen:], sig)\n\t\/\/ Base64-encode.\n\treturn base64.URLEncoding.EncodeToString(b)\n}\n\n\/\/ NewSinceNow returns a signed authetication cookie for the given login,\n\/\/ duration since current time, and secret key.\nfunc NewSinceNow(login string, dur time.Duration, secret []byte) string {\n\treturn New(login, time.Now().Add(dur), secret)\n}\n\n\/\/ Parse verifies the given cookie with the secret key and returns login and\n\/\/ expiration time extracted from the cookie. If the cookie fails verification\n\/\/ or is not well-formed, the function returns an error.\n\/\/\n\/\/ Callers must: \n\/\/\n\/\/ 1. Check for the returned error and deny access if it's present.\n\/\/\n\/\/ 2. Check the returned expiration time and deny access if it's in the past.\n\/\/\nfunc Parse(cookie string, secret []byte) (login string, expires time.Time, err error) {\n\tblen := base64.URLEncoding.DecodedLen(len(cookie))\n\t\/\/ Avoid allocation if cookie is too short or too long.\n\tif blen < decodedMinLength || blen > decodedMaxLength {\n\t\terr = ErrMalformedCookie\n\t\treturn\n\t}\n\tb, err := base64.URLEncoding.DecodeString(cookie)\n\tif err != nil {\n\t\treturn\n\t}\n\t\/\/ Decoded length may be different from max length, which\n\t\/\/ we allocated, so check it, and set new length for b.\n\tblen = len(b)\n\tif blen < decodedMinLength {\n\t\terr = ErrMalformedCookie\n\t\treturn\n\t}\n\tb = b[:blen]\n\n\tsig := b[blen-32:]\n\tdata := b[:blen-32]\n\n\trealSig := getSignature(data, secret)\n\tif subtle.ConstantTimeCompare(realSig, sig) != 1 {\n\t\terr = ErrWrongSignature\n\t\treturn\n\t}\n\texpires = time.Unix(int64(binary.BigEndian.Uint32(data[:4])), 0)\n\tlogin = string(data[4:])\n\treturn\n}\n\n\/\/ Login returns a valid login extracted from the given cookie and verified\n\/\/ using the given secret key. If verification fails or the cookie expired,\n\/\/ the function returns an empty string.\nfunc Login(cookie string, secret []byte) string {\n\tl, exp, err := Parse(cookie, secret)\n\tif err != nil || exp.Before(time.Now()) {\n\t\treturn \"\"\n\t}\n\treturn l\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The go-python Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage bind\n\nimport (\n\t\"fmt\"\n)\n\nfunc (g *pyGen) genConst(c *Const) {\n\tif isPyCompatVar(c.sym) != nil {\n\t\treturn\n\t}\n\tif c.sym.isSignature() {\n\t\treturn\n\t}\n\tg.genConstValue(c)\n}\n\nfunc (g *pyGen) genVar(v *Var) {\n\tif isPyCompatVar(v.sym) != nil {\n\t\treturn\n\t}\n\tif v.sym.isSignature() {\n\t\treturn\n\t}\n\tg.genVarGetter(v)\n\tif !v.sym.isArray() {\n\t\tg.genVarSetter(v)\n\t}\n}\n\nfunc (g *pyGen) genVarGetter(v *Var) {\n\tgopkg := g.pkg.Name()\n\tpkgname := g.outname\n\tcgoFn := v.Name() \/\/ plain name is the getter\n\tqCgoFn := gopkg + \"_\" + cgoFn\n\tqFn := \"_\" + pkgname + \".\" + qCgoFn\n\tqVn := gopkg + \".\" + v.Name()\n\n\tg.pywrap.Printf(\"def %s():\\n\", cgoFn)\n\tg.pywrap.Indent()\n\tg.pywrap.Printf(\"%s\\n%s Gets Go Variable: %s\\n%s\\n%s\\n\", `\"\"\"`, cgoFn, qVn, v.doc, `\"\"\"`)\n\tif v.sym.hasHandle() {\n\t\tcvnm := v.sym.pyPkgId(g.pkg.pkg)\n\t\tg.pywrap.Printf(\"return %s(handle=%s())\\n\", cvnm, qFn)\n\t} else {\n\t\tg.pywrap.Printf(\"return %s()\\n\", qFn)\n\t}\n\tg.pywrap.Outdent()\n\tg.pywrap.Printf(\"\\n\")\n\n\tg.gofile.Printf(\"\/\/export %s\\n\", qCgoFn)\n\tg.gofile.Printf(\"func %s() %s {\\n\", qCgoFn, v.sym.cgoname)\n\tg.gofile.Indent()\n\tg.gofile.Printf(\"return \")\n\tif v.sym.go2py != \"\" {\n\t\tif v.sym.hasHandle() && !v.sym.isPtrOrIface() {\n\t\t\tg.gofile.Printf(\"%s(&%s)%s\", v.sym.go2py, qVn, v.sym.go2pyParenEx)\n\t\t} else {\n\t\t\tg.gofile.Printf(\"%s(%s)%s\", v.sym.go2py, qVn, v.sym.go2pyParenEx)\n\t\t}\n\t} else {\n\t\tg.gofile.Printf(\"%s\", qVn)\n\t}\n\tg.gofile.Printf(\"\\n\")\n\tg.gofile.Outdent()\n\tg.gofile.Printf(\"}\\n\\n\")\n\n\tg.pybuild.Printf(\"mod.add_function('%s', retval('%s'), [])\\n\", qCgoFn, v.sym.cpyname)\n}\n\nfunc (g *pyGen) genVarSetter(v *Var) {\n\tgopkg := g.pkg.Name()\n\tpkgname := g.outname\n\tcgoFn := fmt.Sprintf(\"Set_%s\", v.Name())\n\tqCgoFn := gopkg + \"_\" + cgoFn\n\tqFn := \"_\" + pkgname + \".\" + qCgoFn\n\tqVn := gopkg + \".\" + v.Name()\n\n\tg.pywrap.Printf(\"def %s(value):\\n\", cgoFn)\n\tg.pywrap.Indent()\n\tg.pywrap.Printf(\"%s\\n%s Sets Go Variable: %s\\n%s\\n%s\\n\", `\"\"\"`, cgoFn, qVn, v.doc, `\"\"\"`)\n\tg.pywrap.Printf(\"if isinstance(value, go.GoClass):\\n\")\n\tg.pywrap.Indent()\n\tg.pywrap.Printf(\"%s(value.handle)\\n\", qFn)\n\tg.pywrap.Outdent()\n\tg.pywrap.Printf(\"else:\\n\")\n\tg.pywrap.Indent()\n\tg.pywrap.Printf(\"%s(value)\\n\", qFn)\n\tg.pywrap.Outdent()\n\tg.pywrap.Outdent()\n\tg.pywrap.Printf(\"\\n\")\n\n\tg.gofile.Printf(\"\/\/export %s\\n\", qCgoFn)\n\tg.gofile.Printf(\"func %s(val %s) {\\n\", qCgoFn, v.sym.cgoname)\n\tg.gofile.Indent()\n\tif v.sym.py2go != \"\" {\n\t\tg.gofile.Printf(\"%s = %s(val)%s\", qVn, v.sym.py2go, v.sym.py2goParenEx)\n\t} else {\n\t\tg.gofile.Printf(\"%s = val\", qVn)\n\t}\n\tg.gofile.Printf(\"\\n\")\n\tg.gofile.Outdent()\n\tg.gofile.Printf(\"}\\n\\n\")\n\n\tg.pybuild.Printf(\"mod.add_function('%s', None, [param('%s', 'val')])\\n\", qCgoFn, v.sym.cpyname)\n}\n\nfunc (g *pyGen) genConstValue(c *Const) {\n\t\/\/ constants go directly into wrapper as-is\n\tg.pywrap.Printf(\"%s = %s\\n\", c.GoName(), c.obj.Val().ExactString())\n}\n<commit_msg>bind: use python case for booleans<commit_after>\/\/ Copyright 2019 The go-python Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage bind\n\nimport (\n\t\"fmt\"\n)\n\nfunc (g *pyGen) genConst(c *Const) {\n\tif isPyCompatVar(c.sym) != nil {\n\t\treturn\n\t}\n\tif c.sym.isSignature() {\n\t\treturn\n\t}\n\tg.genConstValue(c)\n}\n\nfunc (g *pyGen) genVar(v *Var) {\n\tif isPyCompatVar(v.sym) != nil {\n\t\treturn\n\t}\n\tif v.sym.isSignature() {\n\t\treturn\n\t}\n\tg.genVarGetter(v)\n\tif !v.sym.isArray() {\n\t\tg.genVarSetter(v)\n\t}\n}\n\nfunc (g *pyGen) genVarGetter(v *Var) {\n\tgopkg := g.pkg.Name()\n\tpkgname := g.outname\n\tcgoFn := v.Name() \/\/ plain name is the getter\n\tqCgoFn := gopkg + \"_\" + cgoFn\n\tqFn := \"_\" + pkgname + \".\" + qCgoFn\n\tqVn := gopkg + \".\" + v.Name()\n\n\tg.pywrap.Printf(\"def %s():\\n\", cgoFn)\n\tg.pywrap.Indent()\n\tg.pywrap.Printf(\"%s\\n%s Gets Go Variable: %s\\n%s\\n%s\\n\", `\"\"\"`, cgoFn, qVn, v.doc, `\"\"\"`)\n\tif v.sym.hasHandle() {\n\t\tcvnm := v.sym.pyPkgId(g.pkg.pkg)\n\t\tg.pywrap.Printf(\"return %s(handle=%s())\\n\", cvnm, qFn)\n\t} else {\n\t\tg.pywrap.Printf(\"return %s()\\n\", qFn)\n\t}\n\tg.pywrap.Outdent()\n\tg.pywrap.Printf(\"\\n\")\n\n\tg.gofile.Printf(\"\/\/export %s\\n\", qCgoFn)\n\tg.gofile.Printf(\"func %s() %s {\\n\", qCgoFn, v.sym.cgoname)\n\tg.gofile.Indent()\n\tg.gofile.Printf(\"return \")\n\tif v.sym.go2py != \"\" {\n\t\tif v.sym.hasHandle() && !v.sym.isPtrOrIface() {\n\t\t\tg.gofile.Printf(\"%s(&%s)%s\", v.sym.go2py, qVn, v.sym.go2pyParenEx)\n\t\t} else {\n\t\t\tg.gofile.Printf(\"%s(%s)%s\", v.sym.go2py, qVn, v.sym.go2pyParenEx)\n\t\t}\n\t} else {\n\t\tg.gofile.Printf(\"%s\", qVn)\n\t}\n\tg.gofile.Printf(\"\\n\")\n\tg.gofile.Outdent()\n\tg.gofile.Printf(\"}\\n\\n\")\n\n\tg.pybuild.Printf(\"mod.add_function('%s', retval('%s'), [])\\n\", qCgoFn, v.sym.cpyname)\n}\n\nfunc (g *pyGen) genVarSetter(v *Var) {\n\tgopkg := g.pkg.Name()\n\tpkgname := g.outname\n\tcgoFn := fmt.Sprintf(\"Set_%s\", v.Name())\n\tqCgoFn := gopkg + \"_\" + cgoFn\n\tqFn := \"_\" + pkgname + \".\" + qCgoFn\n\tqVn := gopkg + \".\" + v.Name()\n\n\tg.pywrap.Printf(\"def %s(value):\\n\", cgoFn)\n\tg.pywrap.Indent()\n\tg.pywrap.Printf(\"%s\\n%s Sets Go Variable: %s\\n%s\\n%s\\n\", `\"\"\"`, cgoFn, qVn, v.doc, `\"\"\"`)\n\tg.pywrap.Printf(\"if isinstance(value, go.GoClass):\\n\")\n\tg.pywrap.Indent()\n\tg.pywrap.Printf(\"%s(value.handle)\\n\", qFn)\n\tg.pywrap.Outdent()\n\tg.pywrap.Printf(\"else:\\n\")\n\tg.pywrap.Indent()\n\tg.pywrap.Printf(\"%s(value)\\n\", qFn)\n\tg.pywrap.Outdent()\n\tg.pywrap.Outdent()\n\tg.pywrap.Printf(\"\\n\")\n\n\tg.gofile.Printf(\"\/\/export %s\\n\", qCgoFn)\n\tg.gofile.Printf(\"func %s(val %s) {\\n\", qCgoFn, v.sym.cgoname)\n\tg.gofile.Indent()\n\tif v.sym.py2go != \"\" {\n\t\tg.gofile.Printf(\"%s = %s(val)%s\", qVn, v.sym.py2go, v.sym.py2goParenEx)\n\t} else {\n\t\tg.gofile.Printf(\"%s = val\", qVn)\n\t}\n\tg.gofile.Printf(\"\\n\")\n\tg.gofile.Outdent()\n\tg.gofile.Printf(\"}\\n\\n\")\n\n\tg.pybuild.Printf(\"mod.add_function('%s', None, [param('%s', 'val')])\\n\", qCgoFn, v.sym.cpyname)\n}\n\nfunc (g *pyGen) genConstValue(c *Const) {\n\t\/\/ constants go directly into wrapper as-is\n\tval := c.obj.Val().ExactString()\n\tswitch val {\n\tcase \"true\":\n\t\tval = \"True\"\n\tcase \"false\":\n\t\tval = \"False\"\n\t}\n\tg.pywrap.Printf(\"%s = %s\\n\", c.GoName(), val)\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ses\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nfunc resourceAwsSesTemplate() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsSesTemplateCreate,\n\t\tRead: resourceAwsSesTemplateRead,\n\t\tUpdate: resourceAwsSesTemplateUpdate,\n\t\tDelete: resourceAwsSesTemplateDelete,\n\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: schema.ImportStatePassthrough,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validateSesTemplateName,\n\t\t\t},\n\t\t\t\"html\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tValidateFunc: validateSesTemplateHtml,\n\t\t\t},\n\t\t\t\"subject\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"text\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tValidateFunc: validateSesTemplateText,\n\t\t\t},\n\t\t},\n\t}\n}\nfunc resourceAwsSesTemplateCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).sesConn\n\n\ttemplateName := d.Get(\"name\").(string)\n\n\ttemplate := ses.Template{\n\t\tTemplateName: aws.String(templateName),\n\t}\n\n\tif v, ok := d.GetOk(\"html\"); ok {\n\t\ttemplate.HtmlPart = aws.String(v.(string))\n\t}\n\n\tif v, ok := d.GetOk(\"subject\"); ok {\n\t\ttemplate.SubjectPart = aws.String(v.(string))\n\t}\n\n\tif v, ok := d.GetOk(\"text\"); ok {\n\t\ttemplate.TextPart = aws.String(v.(string))\n\t}\n\n\tinput := ses.CreateTemplateInput{\n\t\tTemplate: &template,\n\t}\n\n\tlog.Printf(\"[DEBUG] Creating SES template: %#v\", input)\n\t_, err := conn.CreateTemplate(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Creating SES template failed: %s\", err.Error())\n\t}\n\td.SetId(templateName)\n\n\treturn resourceAwsSesTemplateRead(d, meta)\n}\n\nfunc resourceAwsSesTemplateRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).sesConn\n\tinput := ses.GetTemplateInput{\n\t\tTemplateName: aws.String(d.Id()),\n\t}\n\n\tlog.Printf(\"[DEBUG] Reading SES template: %#v\", input)\n\tgto, err := conn.GetTemplate(&input)\n\tif err != nil {\n\t\tif isAWSErr(err, \"TemplateDoesNotExist\", \"\") {\n\t\t\tlog.Printf(\"[WARN] SES template %q not found, removing from state\", d.Id())\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"Reading SES template '%s' failed: %s\", *input.TemplateName, err.Error())\n\t}\n\n\td.Set(\"html\", gto.Template.HtmlPart)\n\td.Set(\"name\", gto.Template.TemplateName)\n\td.Set(\"subject\", gto.Template.SubjectPart)\n\td.Set(\"text\", gto.Template.TextPart)\n\n\treturn nil\n}\n\nfunc resourceAwsSesTemplateUpdate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).sesConn\n\n\ttemplateName := d.Id()\n\n\ttemplate := ses.Template{\n\t\tHtmlPart: aws.String(d.Get(\"html\").(string)),\n\t\tTemplateName: aws.String(templateName),\n\t\tSubjectPart: aws.String(d.Get(\"subject\").(string)),\n\t\tTextPart: aws.String(d.Get(\"text\").(string)),\n\t}\n\n\tinput := ses.UpdateTemplateInput{\n\t\tTemplate: &template,\n\t}\n\n\tlog.Printf(\"[DEBUG] Update SES template: %#v\", input)\n\t_, err := conn.UpdateTemplate(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Updating SES template '%s' failed: %s\", templateName, err.Error())\n\t}\n\n\treturn resourceAwsSesTemplateRead(d, meta)\n}\n\nfunc resourceAwsSesTemplateDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).sesConn\n\tinput := ses.DeleteTemplateInput{\n\t\tTemplateName: aws.String(d.Id()),\n\t}\n\n\tlog.Printf(\"[DEBUG] Delete SES template: %#v\", input)\n\t_, err := conn.DeleteTemplate(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Deleting SES template '%s' failed: %s\", *input.TemplateName, err.Error())\n\t}\n\treturn nil\n}\n<commit_msg>resource\/aws_ses_template: Send only specified attributes for update<commit_after>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ses\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nfunc resourceAwsSesTemplate() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsSesTemplateCreate,\n\t\tRead: resourceAwsSesTemplateRead,\n\t\tUpdate: resourceAwsSesTemplateUpdate,\n\t\tDelete: resourceAwsSesTemplateDelete,\n\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: schema.ImportStatePassthrough,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validateSesTemplateName,\n\t\t\t},\n\t\t\t\"html\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tValidateFunc: validateSesTemplateHtml,\n\t\t\t},\n\t\t\t\"subject\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"text\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tValidateFunc: validateSesTemplateText,\n\t\t\t},\n\t\t},\n\t}\n}\nfunc resourceAwsSesTemplateCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).sesConn\n\n\ttemplateName := d.Get(\"name\").(string)\n\n\ttemplate := ses.Template{\n\t\tTemplateName: aws.String(templateName),\n\t}\n\n\tif v, ok := d.GetOk(\"html\"); ok {\n\t\ttemplate.HtmlPart = aws.String(v.(string))\n\t}\n\n\tif v, ok := d.GetOk(\"subject\"); ok {\n\t\ttemplate.SubjectPart = aws.String(v.(string))\n\t}\n\n\tif v, ok := d.GetOk(\"text\"); ok {\n\t\ttemplate.TextPart = aws.String(v.(string))\n\t}\n\n\tinput := ses.CreateTemplateInput{\n\t\tTemplate: &template,\n\t}\n\n\tlog.Printf(\"[DEBUG] Creating SES template: %#v\", input)\n\t_, err := conn.CreateTemplate(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Creating SES template failed: %s\", err.Error())\n\t}\n\td.SetId(templateName)\n\n\treturn resourceAwsSesTemplateRead(d, meta)\n}\n\nfunc resourceAwsSesTemplateRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).sesConn\n\tinput := ses.GetTemplateInput{\n\t\tTemplateName: aws.String(d.Id()),\n\t}\n\n\tlog.Printf(\"[DEBUG] Reading SES template: %#v\", input)\n\tgto, err := conn.GetTemplate(&input)\n\tif err != nil {\n\t\tif isAWSErr(err, \"TemplateDoesNotExist\", \"\") {\n\t\t\tlog.Printf(\"[WARN] SES template %q not found, removing from state\", d.Id())\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"Reading SES template '%s' failed: %s\", *input.TemplateName, err.Error())\n\t}\n\n\td.Set(\"html\", gto.Template.HtmlPart)\n\td.Set(\"name\", gto.Template.TemplateName)\n\td.Set(\"subject\", gto.Template.SubjectPart)\n\td.Set(\"text\", gto.Template.TextPart)\n\n\treturn nil\n}\n\nfunc resourceAwsSesTemplateUpdate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).sesConn\n\n\ttemplateName := d.Id()\n\n\ttemplate := ses.Template{\n\t\tTemplateName: aws.String(templateName),\n\t}\n\n\tif v, ok := d.GetOk(\"html\"); ok {\n\t\ttemplate.HtmlPart = aws.String(v.(string))\n\t}\n\n\tif v, ok := d.GetOk(\"subject\"); ok {\n\t\ttemplate.SubjectPart = aws.String(v.(string))\n\t}\n\n\tif v, ok := d.GetOk(\"text\"); ok {\n\t\ttemplate.TextPart = aws.String(v.(string))\n\t}\n\n\tinput := ses.UpdateTemplateInput{\n\t\tTemplate: &template,\n\t}\n\n\tlog.Printf(\"[DEBUG] Update SES template: %#v\", input)\n\t_, err := conn.UpdateTemplate(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Updating SES template '%s' failed: %s\", templateName, err.Error())\n\t}\n\n\treturn resourceAwsSesTemplateRead(d, meta)\n}\n\nfunc resourceAwsSesTemplateDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).sesConn\n\tinput := ses.DeleteTemplateInput{\n\t\tTemplateName: aws.String(d.Id()),\n\t}\n\n\tlog.Printf(\"[DEBUG] Delete SES template: %#v\", input)\n\t_, err := conn.DeleteTemplate(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Deleting SES template '%s' failed: %s\", *input.TemplateName, err.Error())\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\tzmq \"github.com\/pebbe\/zmq4\"\n\t\"log\"\n\t\"time\"\n)\n\nfunc main() {\n\t\/\/ create connection socket\n\tcontext, err := zmq.NewContext()\n\n\tsocket, err := context.NewSocket(zmq.ROUTER)\n\tdefer socket.Close()\n\n\tlocation := \"tcp:\/\/127.0.0.1:7171\"\n\n\tsocket.Bind(location)\n\n\tpoller := zmq.NewPoller()\n\tpoller.Add(socket, zmq.POLLIN)\n\n\tif err != nil {\n\t\tlog.Printf(\"Could not start broker: %v\\n\", err)\n\t\treturn\n\t}\n\n\tlog.Println(\"Now listening at: \" + location)\n\n\tworkers := map[string]string{}\n\n\tfor {\n\t\tpolled, err := poller.Poll(time.Second * 10)\n\n\t\tif err != nil {\n\t\t\tlog.Println(\"E: Interrupted\")\n\t\t\tbreak \/\/ Interrupted\n\t\t}\n\n\t\tif len(polled) > 0 {\n\t\t\tmsg, err := socket.RecvMessage(0)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"E: Interrupted\")\n\t\t\t\tbreak \/\/ Interrupted\n\t\t\t}\n\n\t\t\tsender := msg[0]\n\t\t\tprotocol := msg[1]\n\t\t\tcommand := msg[2]\n\t\t\tdata := msg[3:]\n\n\t\t\tif protocol == \"MDPW02\" && command == \"1\" {\n\t\t\t\tlog.Printf(\"I: %s is a worker\\n\", sender)\n\t\t\t\tworkers[protocol] = sender\n\t\t\t\tlog.Printf(\"I: workers - %q\\n\", workers)\n\t\t\t}\n\n\t\t\tif protocol == \"MDPW02\" && command == \"2\" {\n\t\t\t\tlog.Printf(\"I: %s replying\\n\", sender)\n\t\t\t\tcorrelationID := data[1]\n\t\t\t\tresponse := data[2:]\n\t\t\t\tsocket.SendMessage(data[0], correlationID, \"SUCCESS\", response)\n\t\t\t}\n\n\t\t\tif protocol == \"MDPC02\" && command == \"1\" {\n\t\t\t\tlog.Printf(\"I: %s is a client\\n\", sender)\n\t\t\t\tif len(workers) > 0 {\n\t\t\t\t\t\/\/ send message to worker, don't care about services yet\n\t\t\t\t\tlog.Printf(\"I: sending data to worker %s\", workers[\"MDPW02\"])\n\t\t\t\t\tmsgCount, err := socket.SendMessage(workers[\"MDPW02\"], sender, data)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Printf(\"! %x\", err)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.Printf(\"I: sent %i bytes\", msgCount)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tlog.Printf(\"I: received message: %q\\n\", msg)\n\t\t\tlog.Printf(\"I: sender: %q\\n\", sender)\n\t\t\tlog.Printf(\"I: protocol: %q\\n\", protocol)\n\t\t\tlog.Printf(\"I: command: %q\\n\", command)\n\t\t\tlog.Printf(\"I: data: %q\\n\", data)\n\t\t}\n\t}\n}\n<commit_msg>Add service directory to broker.<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\tzmq \"github.com\/pebbe\/zmq4\"\n\t\"log\"\n\t\"time\"\n)\n\ntype ServiceDirectory struct {\n\tservices map[string]*ServiceType\n\tindex map[string][]string\n\tworkers map[string]*Worker\n}\n\ntype ServiceType struct {\n\tname string\n\tinstances map[string]*Worker\n}\n\ntype Worker struct {\n\tname string \/\/ human readable id\n\tidentity string \/\/ routing frame\n}\n\nfunc (serviceDirectory *ServiceDirectory) AddWorker(identity string, serviceTypeName string, serviceInstanceName string) {\n\t\/\/ create or return worker\n\tworker, exists := serviceDirectory.workers[identity]\n\n\tif exists == false {\n\t\tname := fmt.Sprintf(\"%q\", identity)\n\t\tworker = &Worker{\n\t\t\tidentity: identity,\n\t\t\tname: name,\n\t\t}\n\n\t\tserviceDirectory.workers[identity] = worker\n\t}\n\n\tlog.Printf(\"?: Worker - %q\", worker)\n\n\t\/\/ create or return serviceType\n\tserviceType, exists := serviceDirectory.services[serviceTypeName]\n\n\tif exists == false {\n\t\tserviceType = &ServiceType{\n\t\t\tname: serviceTypeName,\n\t\t\tinstances: make(map[string]*Worker),\n\t\t}\n\n\t\tserviceDirectory.services[serviceTypeName] = serviceType\n\t}\n\n\t\/\/ register serviceInstance, or err if already found\n\t_, exists = serviceType.instances[serviceInstanceName]\n\n\tif exists == false {\n\t\tlog.Printf(\"Adding worker %q to %s.%s\", worker, serviceTypeName, serviceInstanceName)\n\t\tserviceType.instances[serviceInstanceName] = worker\n\t}\n\n\t\/\/ add to index\n\tserviceDirectory.index[serviceTypeName] = append(serviceDirectory.index[serviceTypeName], serviceInstanceName)\n}\n\nfunc (serviceDirectory *ServiceDirectory) WorkerForService(serviceTypeName string, serviceInstanceName string) (serviceWorker *Worker, err error) {\n\t\/\/ check for serviceType\n\tserviceType, exists := serviceDirectory.index[serviceTypeName]\n\n\tif !exists {\n\t\terr = errors.New(\"Unknown serviceType\")\n\t\treturn\n\t}\n\n\tlog.Printf(\"Found serviceType %q\", serviceType)\n\n\tfor _, serviceInstance := range serviceType {\n\t\tif serviceInstance == serviceInstanceName {\n\t\t\tlog.Printf(\"Match: %q\", serviceDirectory.services[serviceTypeName])\n\t\t\tserviceWorker, exists = serviceDirectory.services[serviceTypeName].instances[serviceInstanceName]\n\t\t\tif exists {\n\t\t\t\tlog.Printf(\"Found serviceWorker: %q\", serviceWorker)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\terr = errors.New(\"Unknown serviceInstance\")\n\treturn\n}\n\nfunc main() {\n\t\/\/ create connection socket\n\tcontext, err := zmq.NewContext()\n\n\tsocket, err := context.NewSocket(zmq.ROUTER)\n\tdefer socket.Close()\n\n\tlocation := \"tcp:\/\/127.0.0.1:7171\"\n\n\tsocket.Bind(location)\n\n\tpoller := zmq.NewPoller()\n\tpoller.Add(socket, zmq.POLLIN)\n\n\tif err != nil {\n\t\tlog.Printf(\"Could not start broker: %v\\n\", err)\n\t\treturn\n\t}\n\n\tlog.Println(\"Now listening at: \" + location)\n\n\tserviceDirectory := ServiceDirectory{\n\t\tservices: make(map[string]*ServiceType),\n\t\tworkers: make(map[string]*Worker),\n\t\tindex: make(map[string][]string),\n\t}\n\n\tfor {\n\t\tpolled, err := poller.Poll(time.Second * 10)\n\n\t\tif err != nil {\n\t\t\tlog.Println(\"E: Interrupted\")\n\t\t\tbreak \/\/ Interrupted\n\t\t}\n\n\t\tif len(polled) > 0 {\n\t\t\tmsg, err := socket.RecvMessage(0)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"E: Interrupted\")\n\t\t\t\tbreak \/\/ Interrupted\n\t\t\t}\n\n\t\t\tsender := msg[0]\n\t\t\tprotocol := msg[1]\n\t\t\tcommand := msg[2]\n\t\t\tdata := msg[3:]\n\n\t\t\tif protocol == \"MDPW02\" && command == \"1\" {\n\t\t\t\tlog.Printf(\"I: %s is a worker\\n\", sender)\n\t\t\t\tserviceType := data[0]\n\t\t\t\tfor _, serviceInstance := range data[1:] {\n\t\t\t\t\tlog.Printf(\"?: %q\\n\", serviceInstance)\n\t\t\t\t\tserviceDirectory.AddWorker(sender, serviceType, serviceInstance)\n\t\t\t\t}\n\t\t\t\tlog.Printf(\"I: services - %q\\n\", serviceDirectory.index)\n\t\t\t}\n\n\t\t\tif protocol == \"MDPW02\" && command == \"2\" {\n\t\t\t\tlog.Printf(\"I: %s replying\\n\", sender)\n\t\t\t\tcorrelationID := data[1]\n\t\t\t\tresponse := data[2:]\n\t\t\t\tsocket.SendMessage(data[0], correlationID, \"SUCCESS\", response)\n\t\t\t}\n\n\t\t\tif protocol == \"MDPC02\" && command == \"1\" {\n\t\t\t\tlog.Printf(\"I: %s is a client\\n\", sender)\n\t\t\t\tlog.Printf(\"I: data - %q\", data)\n\n\t\t\t\t\/\/correlationId := data[0]\n\t\t\t\tserviceType := data[1]\n\t\t\t\tserviceInstance := data[2]\n\t\t\t\t\/\/msg := data[3:]\n\n\t\t\t\tworker, err := serviceDirectory.WorkerForService(serviceType, serviceInstance)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"I: No worker for %s.%s\", serviceType, serviceInstance)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.Printf(\"I: sending data to worker %s\", worker.name)\n\t\t\t\tmsgCount, err := socket.SendMessage(worker.identity, sender, data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"! %x\", err)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Printf(\"I: sent %i bytes\", msgCount)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/log.Printf(\"I: received message: %q\\n\", msg)\n\t\t\t\/\/log.Printf(\"I: sender: %q\\n\", sender)\n\t\t\t\/\/log.Printf(\"I: protocol: %q\\n\", protocol)\n\t\t\t\/\/log.Printf(\"I: command: %q\\n\", command)\n\t\t\t\/\/log.Printf(\"I: data: %q\\n\", data)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ \n\/\/ go-sonos\n\/\/ ========\n\/\/ \n\/\/ Copyright (c) 2012, Ian T. Richards <ianr@panix.com>\n\/\/ All rights reserved.\n\/\/ \n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions\n\/\/ are met:\n\/\/ \n\/\/ * Redistributions of source code must retain the above copyright notice,\n\/\/ this list of conditions and the following disclaimer.\n\/\/ * Redistributions in binary form must reproduce the above copyright\n\/\/ notice, this list of conditions and the following disclaimer in the\n\/\/ documentation and\/or other materials provided with the distribution.\n\/\/ \n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\/\/ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n\/\/ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n\/\/ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n\/\/ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n\/\/ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\n\/\/ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n\/\/ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n\/\/ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n\/\/ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n\/\/ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\/\/ \n\npackage sonos\n\nimport (\n\t\"github.com\/ianr0bkny\/go-sonos\/model\"\n\t\"github.com\/ianr0bkny\/go-sonos\/upnp\"\n\t_ \"log\"\n\t\"strings\"\n)\n\nconst (\n\tObjectID_Attributes = \"A:\"\n\tObjectID_MusicShares = \"S:\"\n\tObjectID_Queues = \"Q:\"\n\tObjectID_SavedQueues = \"SQ:\"\n\tObjectID_InternetRadio = \"R:\"\n\tObjectID_EntireNetwork = \"EN:\"\n\t\/\/\n\tObjectID_Queue_AVT_Instance_0 = \"Q:0\"\n\t\/\/\n\tObjectID_Attribute_Genres = \"A:GENRE\"\n\tObjectID_Attribute_Album = \"A:ALBUM\"\n\tObjectID_Attribute_Artist = \"A:ARTIST\"\n)\n\nfunc (this *Sonos) GetRootLevelChildren() (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tupnp.BrowseObjectID_Root,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) ListQueues() (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tObjectID_Queues,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) ListSavedQueues() (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tObjectID_SavedQueues,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) ListInternetRadio() (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tObjectID_InternetRadio,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) ListAttributes() (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tObjectID_Attributes,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) ListMusicShares() (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tObjectID_MusicShares,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) GetAllGenres() (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tObjectID_Attribute_Genres,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc objectIDForGenre(genre string) string {\n\treturn strings.Join([]string{ObjectID_Attribute_Genres, genre}, \"\/\")\n}\n\nfunc objectIDForAlbum(album string) string {\n\treturn strings.Join([]string{ObjectID_Attribute_Album, album}, \"\/\")\n}\n\nfunc objectIDForArtist(artist string) string {\n\treturn strings.Join([]string{ObjectID_Attribute_Artist, artist}, \"\/\")\n}\n\nfunc (this *Sonos) GetGenreArtists(genre string) (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tobjectIDForGenre(genre),\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) ListChildren(objectId string) (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tobjectId,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) GetMetadata(objectId string) (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tobjectId,\n\t\tupnp.BrowseFlag_BrowseMetadata,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) GetDirectChildren(objectId string) (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tobjectId,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) GetQueueContents() (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tObjectID_Queue_AVT_Instance_0,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) GetAlbumTracks(album string) (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tobjectIDForAlbum(album),\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) GetTrackFromAlbum(album, track string) ([]model.Object, error) {\n\tif tracks, err := this.GetAlbumTracks(album); nil != err {\n\t\treturn nil, err\n\t} else {\n\t\tvar track_objs []model.Object\n\t\tfor _, track_obj := range tracks {\n\t\t\tif track_obj.Title() == track {\n\t\t\t\ttrack_objs = append(track_objs, track_obj)\n\t\t\t}\n\t\t}\n\t\treturn track_objs, nil\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (this *Sonos) GetArtistAlbums(artist string) (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tobjectIDForArtist(artist),\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n<commit_msg>Checkpoint<commit_after>\/\/ \n\/\/ go-sonos\n\/\/ ========\n\/\/ \n\/\/ Copyright (c) 2012, Ian T. Richards <ianr@panix.com>\n\/\/ All rights reserved.\n\/\/ \n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions\n\/\/ are met:\n\/\/ \n\/\/ * Redistributions of source code must retain the above copyright notice,\n\/\/ this list of conditions and the following disclaimer.\n\/\/ * Redistributions in binary form must reproduce the above copyright\n\/\/ notice, this list of conditions and the following disclaimer in the\n\/\/ documentation and\/or other materials provided with the distribution.\n\/\/ \n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\/\/ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n\/\/ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n\/\/ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n\/\/ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n\/\/ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\n\/\/ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n\/\/ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n\/\/ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n\/\/ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n\/\/ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\/\/ \n\npackage sonos\n\nimport (\n\t\"github.com\/ianr0bkny\/go-sonos\/model\"\n\t\"github.com\/ianr0bkny\/go-sonos\/upnp\"\n\t\"log\"\n\t\"strings\"\n)\n\nconst (\n\tObjectID_Attributes = \"A:\"\n\tObjectID_MusicShares = \"S:\"\n\tObjectID_Queues = \"Q:\"\n\tObjectID_SavedQueues = \"SQ:\"\n\tObjectID_InternetRadio = \"R:\"\n\tObjectID_EntireNetwork = \"EN:\"\n\t\/\/\n\tObjectID_Queue_AVT_Instance_0 = \"Q:0\"\n\t\/\/\n\tObjectID_Attribute_Genres = \"A:GENRE\"\n\tObjectID_Attribute_Album = \"A:ALBUM\"\n\tObjectID_Attribute_Artist = \"A:ARTIST\"\n)\n\nfunc (this *Sonos) GetRootLevelChildren() (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tupnp.BrowseObjectID_Root,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) ListQueues() (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tObjectID_Queues,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) ListSavedQueues() (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tObjectID_SavedQueues,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) ListInternetRadio() (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tObjectID_InternetRadio,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) ListAttributes() (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tObjectID_Attributes,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) ListMusicShares() (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tObjectID_MusicShares,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) GetAllGenres() (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tObjectID_Attribute_Genres,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc objectIDForGenre(genre string) string {\n\treturn strings.Join([]string{ObjectID_Attribute_Genres, genre}, \"\/\")\n}\n\nfunc objectIDForAlbum(album string) string {\n\treturn strings.Join([]string{ObjectID_Attribute_Album, album}, \"\/\")\n}\n\nfunc objectIDForArtist(artist string) string {\n\treturn strings.Join([]string{ObjectID_Attribute_Artist, artist}, \"\/\")\n}\n\n\nfunc (this *Sonos) GetGenreArtists(genre string) ([]model.Object, error) {\n\treq := &upnp.BrowseRequest{\n\t\tobjectIDForGenre(genre),\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err := this.Browse(req); nil != err {\n\t\tlog.Printf(\"Could not browse artists for genre `%s': %v\", genre, err)\n\t\treturn nil, err\n\t} else {\n\t\treturn model.ObjectStream(result.Doc), nil\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (this *Sonos) ListChildren(objectId string) (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tobjectId,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) GetMetadata(objectId string) (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tobjectId,\n\t\tupnp.BrowseFlag_BrowseMetadata,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) GetDirectChildren(objectId string) (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tobjectId,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) GetQueueContents() (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tObjectID_Queue_AVT_Instance_0,\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n\nfunc (this *Sonos) GetAlbumTracks(album string) ([]model.Object, error) {\n\treq := &upnp.BrowseRequest{\n\t\tobjectIDForAlbum(album),\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tlog.Printf(\"Browsing tracks for album `%s'\", album)\n\tif result, err := this.Browse(req); nil != err {\n\t\tlog.Printf(\"Could not browse tracks for album `%s': %v\", album, err)\n\t\treturn nil, err\n\t} else {\n\t\treturn model.ObjectStream(result.Doc), nil\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (this *Sonos) GetTrackFromAlbum(album, track string) ([]model.Object, error) {\n\tif tracks, err := this.GetAlbumTracks(album); nil != err {\n\t\treturn nil, err\n\t} else {\n\t\tvar track_objs []model.Object\n\t\tfor _, track_obj := range tracks {\n\t\t\tif track_obj.Title() == track {\n\t\t\t\ttrack_objs = append(track_objs, track_obj)\n\t\t\t}\n\t\t}\n\t\treturn track_objs, nil\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (this *Sonos) GetArtistAlbums(artist string) (objects []model.Object, err error) {\n\tvar result *upnp.BrowseResult\n\treq := &upnp.BrowseRequest{\n\t\tobjectIDForArtist(artist),\n\t\tupnp.BrowseFlag_BrowseDirectChildren,\n\t\tupnp.BrowseFilter_All,\n\t\t0, \/*StartingIndex*\/\n\t\t0, \/*RequestCount*\/\n\t\tupnp.BrowseSortCriteria_None,\n\t}\n\tif result, err = this.Browse(req); nil != err {\n\t\treturn\n\t} else {\n\t\tobjects = model.ObjectStream(result.Doc)\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/gorilla\/mux\"\n)\n\nvar ROOT = \"\"\n\ntype FileTypeMapJSON struct {\n\tMIME string `json:\"mime\"`\n\tIsDirectory bool `json:\"is_directory\"`\n\tIsHidden bool `json:\"is_hidden\"`\n\tIsAudio bool `json:\"is_audio\"`\n\tIsImage bool `json:\"is_image\"`\n\tIsVideo bool `json:\"is_video\"`\n}\n\ntype FileInfoJSON struct {\n\tName string `json:\"name\"`\n\tSize int64 `json:\"size\"`\n\tModifiedAt string `json:\"modified_at\"`\n\tType FileTypeMapJSON `json:\"type\"`\n}\n\n\/\/ returns a pair of (filename, MIME type) strings given a `file` output line\nfunc parseMIMEType(fileOutputLine string) (string, string, error) {\n\t\/\/ parse the file program output into a bare MIME type\n\tmimeString := strings.TrimSpace(fileOutputLine)\n\tsplitIndex := strings.LastIndex(mimeString, \":\")\n\n\tif len(fileOutputLine) <= 1 || splitIndex <= 0 {\n\t\treturn \"\", \"\", fmt.Errorf(\"Invalid MIME string: '%s'\", fileOutputLine)\n\t}\n\n\treturn mimeString[0:splitIndex], strings.TrimSpace(mimeString[splitIndex+1:]), nil\n}\n\nfunc writeJSONResponse(w http.ResponseWriter, data interface{}) {\n\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\tw.Header().Add(\"Cache-Control\", \"no-cache\")\n\n\tjson, err := json.Marshal(data)\n\tif err != nil {\n\t\thttp.Error(w, \"Failed to generate JSON response\", 500)\n\t\treturn\n\t}\n\tw.Write(json)\n}\n\n\/\/ given a path, returns a map of child name to MIME type\nfunc getChildMIMETypes(parentPath string) map[string]string {\n\tresult := make(map[string]string)\n\n\t\/\/ get all the children in the given directory\n\tchildren, err := filepath.Glob(path.Join(parentPath, \"*\"))\n\tif err != nil {\n\t\treturn result\n\t}\n\n\targs := []string{\"--mime-type\", \"--dereference\", \"--preserve-date\"}\n\targs = append(args, children...)\n\n\t\/\/ call `file` for a newline-delimited list of \"filename: MIME-type\" pairs\n\tfileOutput, err := exec.Command(\"file\", args...).Output()\n\n\tif err != nil {\n\t\treturn result\n\t}\n\n\tfor _, line := range strings.Split(string(fileOutput), \"\\n\") {\n\t\tfileName, mimeType, err := parseMIMEType(line)\n\t\tif err == nil {\n\t\t\tresult[fileName] = mimeType\n\t\t}\n\t}\n\n\treturn result\n}\n\nfunc getMIMEType(filePath string) string {\n\tfileOutput, err := exec.Command(\n\t\t\"file\",\n\t\t\"--mime-type\",\n\t\t\"--dereference\",\n\t\t\"--preserve-date\",\n\t\t\"--brief\",\n\t\tfilePath,\n\t).Output()\n\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn strings.TrimSpace(string(fileOutput))\n}\n\n\/\/ normalizes the path using a root, and returns it. if the path exits the root\n\/\/ or is otherwise invalid, returns an error.\nfunc normalizePathToRoot(root, child string) (string, error) {\n\t\/\/ clean the path, resolving any \"..\"s in it\n\trequestPath := path.Clean(path.Join(root, child))\n\n\t\/\/ if the path exited the root directory, fail\n\trelPath, err := filepath.Rel(root, requestPath)\n\tif err != nil || strings.Index(relPath, \"..\") >= 0 {\n\t\t\/\/ keep things vague since someone's probably trying to be sneaky anyway\n\t\treturn \"\", fmt.Errorf(\"Invalid path\")\n\t}\n\n\treturn requestPath, nil\n}\n\n\/\/ this returns the info for the specified files _or_ directory, not just files\nfunc getInfo(w http.ResponseWriter, r *http.Request) {\n\t\/\/ make sure our path is valid\n\trawPath := mux.Vars(r)[\"path\"]\n\tnormalizedPath, err := normalizePathToRoot(ROOT, rawPath)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\t\/\/ stat the file so we can return its info\n\tfileInfo, err := os.Stat(normalizedPath)\n\tif err != nil {\n\t\t\/\/ don't report the raw error in case we leak server directory information\n\t\thttp.Error(w, \"\", 404)\n\t\treturn\n\t}\n\n\tmimeType := getMIMEType(normalizedPath)\n\n\twriteJSONResponse(w, FileInfoJSON{\n\t\tfileInfo.Name(),\n\t\tfileInfo.Size(),\n\t\tfileInfo.ModTime().Format(\"2006-01-02T15:04:05Z\"), \/\/ ISO 8601\n\t\tFileTypeMapJSON{\n\t\t\tmimeType,\n\t\t\tfileInfo.IsDir(),\n\t\t\tstrings.HasPrefix(fileInfo.Name(), \".\"),\n\t\t\tstrings.HasPrefix(\"audio\/\", mimeType),\n\t\t\tstrings.HasPrefix(\"image\/\", mimeType),\n\t\t\tstrings.HasPrefix(\"video\/\", mimeType),\n\t\t},\n\t})\n}\n\nfunc download(w http.ResponseWriter, r *http.Request) {\n\t\/\/ make sure our path is valid\n\trawPath := mux.Vars(r)[\"path\"]\n\tnormalizedPath, err := normalizePathToRoot(ROOT, rawPath)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\t\/\/ stat the file so we can set appropriate response headers, and so we can\n\t\/\/ ensure it's a regular file and not a directory.\n\tfile, err := os.Stat(normalizedPath)\n\tif err != nil {\n\t\t\/\/ don't report the raw error in case we leak server directory information\n\t\thttp.Error(w, \"\", 404)\n\t\treturn\n\t}\n\n\t\/\/ return different responses depending on file type\n\tif file.IsDir() {\n\t\t\/\/ TODO: zip up the directory contents and serve it up\n\t} else {\n\t\tdownloadFile(w, r, normalizedPath, file)\n\t}\n}\n\nfunc downloadFile(w http.ResponseWriter, r *http.Request, filePath string, file os.FileInfo) {\n\tmimeType := getMIMEType(filePath)\n\tw.Header().Add(\"Content-Type\", mimeType)\n\tw.Header().Add(\"Content-Disposition\", file.Name())\n\tw.Header().Add(\"Cache-Control\", \"no-cache\")\n\n\thttp.ServeFile(w, r, filePath)\n}\n\nfunc getDirectory(w http.ResponseWriter, r *http.Request) {\n\t\/\/ ensure the directory actually exists\n\trawPath := mux.Vars(r)[\"path\"]\n\tnormalizedPath, err := normalizePathToRoot(ROOT, rawPath)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tchildren, err := ioutil.ReadDir(normalizedPath)\n\tif err != nil {\n\t\t\/\/ don't report the raw error in case we leak server directory information\n\t\thttp.Error(w, \"\", 404)\n\t\treturn\n\t}\n\n\t\/\/ get a map of all the MIME types for the directory\n\tmimeTypes := getChildMIMETypes(normalizedPath)\n\n\t\/\/ list the directory to a JSON response\n\tvar files []FileInfoJSON\n\tfor _, file := range children {\n\t\tfileName := file.Name()\n\t\tmimeType := mimeTypes[path.Join(normalizedPath, fileName)]\n\n\t\tfiles = append(files, FileInfoJSON{\n\t\t\tfileName,\n\t\t\tfile.Size(),\n\t\t\tfile.ModTime().Format(\"2006-01-02T15:04:05Z\"), \/\/ ISO 8601\n\t\t\tFileTypeMapJSON{\n\t\t\t\tmimeType,\n\t\t\t\tfile.IsDir(),\n\t\t\t\tstrings.HasPrefix(fileName, \".\"), \/\/ hidden?\n\t\t\t\tstrings.HasPrefix(\"audio\/\", mimeType), \/\/ audio?\n\t\t\t\tstrings.HasPrefix(\"image\/\", mimeType), \/\/ image?\n\t\t\t\tstrings.HasPrefix(\"video\/\", mimeType), \/\/ video?\n\t\t\t},\n\t\t})\n\t}\n\n\twriteJSONResponse(w, files)\n}\n\nfunc downloadDirectory(w http.ResponseWriter, r *http.Request) {\n\t\/\/ TODO: zip up the whole directory and offer it for download\n}\n\n\/\/ retrieves\/caches\/updates a thumbnail file given a path, or returns an error\n\/\/ if no thumbnail could be geneated.\nfunc getThumbnail(w http.ResponseWriter, r *http.Request) {\n\t\/\/ TODO:\n\t\/\/ * look up the existing file to get its modtime and ensure it exists\n\t\/\/ * see if we have a cached file with the same modtime\n\t\/\/ ** if so, use it\n\t\/\/ ** otherwise, generate a preview\n\t\/\/ *** use graphicsmagick\/ffmpeg to generate a preview thumbnail\n\t\/\/ *** store the new file to a mirroed path with the filename plus the modtime\n\t\/\/ * read the cached file and return its contents\n\n\t\/\/ cache preview thumbnails for a good while to lower load on this tiny\n\t\/\/ server, even if we are caching the preview thumbnails on-disk too.\n\tw.Header().Add(\"Cache-Control\", \"max-age=3600\")\n\tw.Header().Add(\"Content-Type\", \"image\/jpeg\")\n}\n\nfunc main() {\n\t\/\/ ensure we have all the binaries we need\n\trequiredBinaries := []string{\"file\"}\n\tfor _, binary := range requiredBinaries {\n\t\tif _, err := exec.LookPath(binary); err != nil {\n\t\t\tlog.Panicf(\"'%s' must be installed and in the PATH\\n\", binary)\n\t\t}\n\t}\n\n\tif len(os.Args) <= 1 {\n\t\tpanic(\"A root directory argument is required\")\n\t}\n\n\tROOT = os.Args[1]\n\n\trouter := mux.NewRouter()\n\n\t\/\/ \/files\n\t\/\/ anything with a trailing `\/` indicates a directory; anything that ends\n\t\/\/ without a trailing slash indicates a file.\n\tfilesJSON := router.Headers(\"Content-Type\", \"application\/json\").Subrouter()\n\tfilesJSON.HandleFunc(\"\/files\/{path:.*[^\/]$}\", getInfo).\n\t\tHeaders(\"Content-Type\", \"application\/json\").\n\t\tMethods(\"GET\")\n\tfilesJSON.HandleFunc(\"\/files{path:.*}\/\", getDirectory).\n\t\tHeaders(\"Content-Type\", \"application\/json\").\n\t\tMethods(\"GET\")\n\n\trouter.HandleFunc(\"\/files\/{path:.*}\", download).\n\t\tMethods(\"GET\")\n\n\t\/\/ \/thumbnails\n\trouter.HandleFunc(\"\/thumbnails\/{path:.*[^\/]$}\", getThumbnail).\n\t\tMethods(\"GET\")\n\n\thttp.ListenAndServe(\":3000\", router)\n}\n<commit_msg>Refactor response format, misc.<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/gorilla\/mux\"\n)\n\nvar ROOT = \"\"\n\ntype FileInfoJSON struct {\n\tName string `json:\"name\"`\n\tSize int64 `json:\"size\"`\n\tModifiedAt string `json:\"modified_at\"`\n\tMIMEType string `json:\"mime_type\"`\n\tIsDirectory bool `json:\"is_directory\"`\n\tIsHidden bool `json:\"is_hidden\"`\n}\n\n\/\/ returns a pair of (filename, MIME type) strings given a `file` output line\nfunc parseMIMEType(fileOutputLine string) (string, string, error) {\n\t\/\/ parse the file program output into a bare MIME type\n\tmimeString := strings.TrimSpace(fileOutputLine)\n\tsplitIndex := strings.LastIndex(mimeString, \":\")\n\n\tif len(fileOutputLine) <= 1 || splitIndex <= 0 {\n\t\treturn \"\", \"\", fmt.Errorf(\"Invalid MIME string: '%s'\", fileOutputLine)\n\t}\n\n\treturn mimeString[0:splitIndex], strings.TrimSpace(mimeString[splitIndex+1:]), nil\n}\n\nfunc writeJSONResponse(w http.ResponseWriter, data interface{}) {\n\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\tw.Header().Add(\"Cache-Control\", \"no-cache\")\n\n\tjson, err := json.Marshal(data)\n\tif err != nil {\n\t\thttp.Error(w, \"Failed to generate JSON response\", 500)\n\t\treturn\n\t}\n\tw.Write(json)\n}\n\n\/\/ given a path, returns a map of child name to MIME type\nfunc getChildMIMETypes(parentPath string) map[string]string {\n\tresult := make(map[string]string)\n\n\t\/\/ get all the children in the given directory\n\tchildren, err := filepath.Glob(path.Join(parentPath, \"*\"))\n\tif err != nil {\n\t\treturn result\n\t}\n\n\targs := []string{\"--mime-type\", \"--dereference\", \"--preserve-date\"}\n\targs = append(args, children...)\n\n\t\/\/ call `file` for a newline-delimited list of \"filename: MIME-type\" pairs\n\tfileOutput, err := exec.Command(\"file\", args...).Output()\n\n\tif err != nil {\n\t\treturn result\n\t}\n\n\tfor _, line := range strings.Split(string(fileOutput), \"\\n\") {\n\t\tfileName, mimeType, err := parseMIMEType(line)\n\t\tif err == nil {\n\t\t\tresult[fileName] = mimeType\n\t\t}\n\t}\n\n\treturn result\n}\n\nfunc getMIMEType(filePath string) string {\n\tfileOutput, err := exec.Command(\n\t\t\"file\",\n\t\t\"--mime-type\",\n\t\t\"--dereference\",\n\t\t\"--preserve-date\",\n\t\t\"--brief\",\n\t\tfilePath,\n\t).Output()\n\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn strings.TrimSpace(string(fileOutput))\n}\n\n\/\/ given a root and a relative child path, returns the normalized, absolute path\n\/\/ of the child. if the path is not a child of the root or is otherwise invalid,\n\/\/ returns an error.\nfunc normalizePathUnderRoot(root, child string) (string, error) {\n\t\/\/ clean the path, resolving any \"..\"s in it\n\trequestPath := path.Clean(path.Join(root, child))\n\n\t\/\/ if the path exited the root directory, fail\n\trelPath, err := filepath.Rel(root, requestPath)\n\tif err != nil || strings.Index(relPath, \"..\") >= 0 {\n\t\t\/\/ keep things vague since someone's probably trying to be sneaky anyway\n\t\treturn \"\", fmt.Errorf(\"Invalid path\")\n\t}\n\n\treturn requestPath, nil\n}\n\n\/\/ this returns the info for the specified files _or_ directory, not just files\nfunc getInfo(w http.ResponseWriter, r *http.Request) {\n\t\/\/ make sure our path is valid\n\trawPath := mux.Vars(r)[\"path\"]\n\tnormalizedPath, err := normalizePathUnderRoot(ROOT, rawPath)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\t\/\/ stat the file so we can return its info\n\tfileInfo, err := os.Stat(normalizedPath)\n\tif err != nil {\n\t\t\/\/ don't report the raw error in case we leak server directory information\n\t\thttp.Error(w, \"\", 404)\n\t\treturn\n\t}\n\n\tmimeType := getMIMEType(normalizedPath)\n\n\twriteJSONResponse(w, FileInfoJSON{\n\t\tfileInfo.Name(),\n\t\tfileInfo.Size(),\n\t\tfileInfo.ModTime().Format(\"2006-01-02T15:04:05Z\"), \/\/ ISO 8601\n\t\tmimeType,\n\t\tfileInfo.IsDir(),\n\t\tstrings.HasPrefix(fileInfo.Name(), \".\"),\n\t})\n}\n\nfunc download(w http.ResponseWriter, r *http.Request) {\n\t\/\/ make sure our path is valid\n\trawPath := mux.Vars(r)[\"path\"]\n\tnormalizedPath, err := normalizePathUnderRoot(ROOT, rawPath)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\t\/\/ stat the file so we can set appropriate response headers, and so we can\n\t\/\/ ensure it's a regular file and not a directory.\n\tfile, err := os.Stat(normalizedPath)\n\tif err != nil {\n\t\t\/\/ don't report the raw error in case we leak server directory information\n\t\thttp.Error(w, \"\", 404)\n\t\treturn\n\t}\n\n\t\/\/ return different responses depending on file type\n\tif file.IsDir() {\n\t\t\/\/ TODO: zip up the directory contents and serve it up\n\t} else {\n\t\tdownloadFile(w, r, normalizedPath, file)\n\t}\n}\n\nfunc downloadFile(w http.ResponseWriter, r *http.Request, filePath string, file os.FileInfo) {\n\tmimeType := getMIMEType(filePath)\n\tw.Header().Add(\"Content-Type\", mimeType)\n\tw.Header().Add(\"Content-Disposition\", file.Name())\n\tw.Header().Add(\"Cache-Control\", \"no-cache\")\n\n\thttp.ServeFile(w, r, filePath)\n}\n\nfunc getDirectory(w http.ResponseWriter, r *http.Request) {\n\t\/\/ ensure the directory actually exists\n\trawPath := mux.Vars(r)[\"path\"]\n\tnormalizedPath, err := normalizePathUnderRoot(ROOT, rawPath)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tchildren, err := ioutil.ReadDir(normalizedPath)\n\tif err != nil {\n\t\t\/\/ don't report the raw error in case we leak server directory information\n\t\thttp.Error(w, \"\", 404)\n\t\treturn\n\t}\n\n\t\/\/ get a map of all the MIME types for the directory\n\tmimeTypes := getChildMIMETypes(normalizedPath)\n\n\t\/\/ list the directory to a JSON response\n\tvar files []FileInfoJSON\n\tfor _, file := range children {\n\t\tfileName := file.Name()\n\t\tmimeType := mimeTypes[path.Join(normalizedPath, fileName)]\n\n\t\tfiles = append(files, FileInfoJSON{\n\t\t\tfileName,\n\t\t\tfile.Size(),\n\t\t\tfile.ModTime().Format(\"2006-01-02T15:04:05Z\"), \/\/ ISO 8601\n\t\t\tFileTypeMapJSON{\n\t\t\t\tmimeType,\n\t\t\t\tfile.IsDir(),\n\t\t\t\tstrings.HasPrefix(fileName, \".\"), \/\/ hidden?\n\t\t\t\tstrings.HasPrefix(\"audio\/\", mimeType), \/\/ audio?\n\t\t\t\tstrings.HasPrefix(\"image\/\", mimeType), \/\/ image?\n\t\t\t\tstrings.HasPrefix(\"video\/\", mimeType), \/\/ video?\n\t\t\t},\n\t\t})\n\t}\n\n\twriteJSONResponse(w, files)\n}\n\nfunc downloadDirectory(w http.ResponseWriter, r *http.Request) {\n\t\/\/ TODO: zip up the whole directory and offer it for download\n}\n\n\/\/ retrieves\/caches\/updates a thumbnail file given a path, or returns an error\n\/\/ if no thumbnail could be geneated.\nfunc getThumbnail(w http.ResponseWriter, r *http.Request) {\n\t\/\/ TODO:\n\t\/\/ * look up the existing file to get its modtime and ensure it exists\n\t\/\/ * see if we have a cached file with the same modtime\n\t\/\/ ** if so, use it\n\t\/\/ ** otherwise, generate a preview\n\t\/\/ *** use graphicsmagick\/ffmpeg to generate a preview thumbnail\n\t\/\/ *** store the new file to a mirroed path with the filename plus the modtime\n\t\/\/ * read the cached file and return its contents\n\n\t\/\/ cache preview thumbnails for a good while to lower load on this tiny\n\t\/\/ server, even if we are caching the preview thumbnails on-disk too.\n\tw.Header().Add(\"Cache-Control\", \"max-age=3600\")\n\tw.Header().Add(\"Content-Type\", \"image\/jpeg\")\n}\n\nfunc main() {\n\t\/\/ ensure we have all the binaries we need\n\trequiredBinaries := []string{\"file\"}\n\tfor _, binary := range requiredBinaries {\n\t\tif _, err := exec.LookPath(binary); err != nil {\n\t\t\tlog.Panicf(\"'%s' must be installed and in the PATH\\n\", binary)\n\t\t}\n\t}\n\n\tif len(os.Args) <= 1 {\n\t\tpanic(\"A root directory argument is required\")\n\t}\n\n\tROOT = path.Clean(os.Args[1])\n\n\trouter := mux.NewRouter()\n\n\t\/\/ \/files\n\t\/\/ anything with a trailing `\/` indicates a directory; anything that ends\n\t\/\/ without a trailing slash indicates a file.\n\tfilesJSON := router.Headers(\"Content-Type\", \"application\/json\").Subrouter()\n\tfilesJSON.HandleFunc(\"\/files\/{path:.*[^\/]$}\", getInfo).\n\t\tHeaders(\"Content-Type\", \"application\/json\").\n\t\tMethods(\"GET\")\n\tfilesJSON.HandleFunc(\"\/files{path:.*}\/\", getDirectory).\n\t\tHeaders(\"Content-Type\", \"application\/json\").\n\t\tMethods(\"GET\")\n\n\trouter.HandleFunc(\"\/files\/{path:.*}\", download).\n\t\tMethods(\"GET\")\n\n\t\/\/ \/thumbnails\n\trouter.HandleFunc(\"\/thumbnails\/{path:.*[^\/]$}\", getThumbnail).\n\t\tMethods(\"GET\")\n\n\taddr := \"127.0.0.1:3000\"\n\tfmt.Printf(\"Serving %s to %s...\\n\", ROOT, addr)\n\thttp.ListenAndServe(addr, router)\n}\n<|endoftext|>"} {"text":"<commit_before>package clw11\n\n\/*\n#define CL_USE_DEPRECATED_OPENCL_1_1_APIS\n#ifdef __APPLE__\n#include \"OpenCL\/opencl.h\"\n#else\n#include \"CL\/opencl.h\"\n#endif\n\nextern void bufferCallback(cl_mem memobj, void *user_data);\n\nvoid callBufferCallback(cl_mem memobj, void *user_data)\n{\n\tbufferCallback(memobj, user_data);\n}\n*\/\nimport \"C\"\nimport (\n\t\"unsafe\"\n)\n\ntype (\n\tMem C.cl_mem\n\tMemFlags C.cl_mem_flags\n\tMapFlags C.cl_map_flags\n\tBufferRegion C.cl_buffer_region\n\tBufferCreateType C.cl_buffer_create_type\n)\n\n\/\/ Bitfield.\nconst (\n\tMemReadWrite MemFlags = C.CL_MEM_READ_WRITE\n\tMemWriteOnly MemFlags = C.CL_MEM_WRITE_ONLY\n\tMemReadOnly MemFlags = C.CL_MEM_READ_ONLY\n\tMemUseHostPointer MemFlags = C.CL_MEM_USE_HOST_PTR\n\tMemAllocHostPointer MemFlags = C.CL_MEM_ALLOC_HOST_PTR\n\tMemCopyHostPointer MemFlags = C.CL_MEM_COPY_HOST_PTR\n)\n\n\/\/ Bitfield.\nconst (\n\tMapRead MapFlags = C.CL_MAP_READ\n\tMapWrite MapFlags = C.CL_MAP_WRITE\n)\n\nconst (\n\tBufferCreateTypeRegion BufferCreateType = C.CL_BUFFER_CREATE_TYPE_REGION\n)\n\n\/\/ Creates a buffer object.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clCreateBuffer.html\nfunc CreateBuffer(context Context, flags MemFlags, size Size, host_ptr unsafe.Pointer) (Mem, error) {\n\n\tvar err C.cl_int\n\tmemory := C.clCreateBuffer(context, C.cl_mem_flags(flags), C.size_t(size), host_ptr, &err)\n\n\treturn Mem(memory), toError(err)\n}\n\n\/\/ Creates a buffer object (referred to as a sub-buffer object) from an existing\n\/\/ buffer object.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clCreateSubBuffer.html\nfunc CreateSubBuffer(buffer Mem, flags MemFlags, buffer_create_type BufferCreateType, buffer_create_info unsafe.Pointer,\n\terrcode_ret *Int) (Mem, error) {\n\n\tvar err C.cl_int\n\tmemory := C.clCreateSubBuffer(buffer, C.cl_mem_flags(flags), C.cl_buffer_create_type(buffer_create_type),\n\t\tbuffer_create_info, &err)\n\n\treturn Mem(memory), toError(err)\n}\n\n\/\/ Increments the memory object reference count.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clRetainMemObject.html\nfunc RetainMemObject(memobj Mem) error {\n\treturn toError(C.clRetainMemObject(memobj))\n}\n\n\/\/ Decrements the memory object reference count.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clReleaseMemObject.html\nfunc ReleaseMemObject(memobj Mem) error {\n\treturn toError(C.clReleaseMemObject(memobj))\n}\n\n\/\/ Enqueue commands to read from a buffer object to host memory.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clEnqueueReadBuffer.html\nfunc EnqueueReadBuffer(command_queue CommandQueue, buffer Mem, blocking_read Bool, offset, cb Size,\n\tptr unsafe.Pointer, wait_list []Event, event *Event) error {\n\n\tevent_wait_list, num_events_in_wait_list := toEventList(wait_list)\n\n\treturn toError(C.clEnqueueReadBuffer(command_queue, buffer, C.cl_bool(blocking_read), C.size_t(offset),\n\t\tC.size_t(cb), ptr, num_events_in_wait_list, event_wait_list, (*C.cl_event)(event)))\n}\n\n\/\/ Enqueue commands to write to a buffer object from host memory.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clEnqueueWriteBuffer.html\nfunc EnqueueWriteBuffer(command_queue CommandQueue, buffer Mem, blocking_read Bool, offset, cb Size,\n\tptr unsafe.Pointer, wait_list []Event, event *Event) error {\n\n\tevent_wait_list, num_events_in_wait_list := toEventList(wait_list)\n\n\treturn toError(C.clEnqueueWriteBuffer(command_queue, buffer, C.cl_bool(blocking_read), C.size_t(offset),\n\t\tC.size_t(cb), ptr, num_events_in_wait_list, event_wait_list, (*C.cl_event)(event)))\n}\n\n\/\/ Enqueue commands to read from a rectangular region from a buffer object to\n\/\/ host memory.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clEnqueueReadBufferRect.html\nfunc EnqueueReadBufferRect(command_queue CommandQueue, buffer Mem, blocking_read Bool, buffer_origin, host_origin,\n\tregion [3]Size, buffer_row_pitch, buffer_slice_pitch, host_row_pitch, host_slice_pitch Size, ptr unsafe.Pointer,\n\twait_list []Event, event *Event) error {\n\n\tevent_wait_list, num_events_in_wait_list := toEventList(wait_list)\n\n\treturn toError(C.clEnqueueReadBufferRect(command_queue, buffer, C.cl_bool(blocking_read),\n\t\t(*C.size_t)(&buffer_origin[0]), (*C.size_t)(&host_origin[0]), (*C.size_t)(®ion[0]),\n\t\tC.size_t(buffer_row_pitch), C.size_t(buffer_slice_pitch), C.size_t(host_row_pitch), C.size_t(host_slice_pitch),\n\t\tptr, num_events_in_wait_list, event_wait_list, (*C.cl_event)(event)))\n}\n\n\/\/ Enqueue commands to write a rectangular region to a buffer object from host\n\/\/ memory.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clEnqueueWriteBufferRect.html\nfunc EnqueueWriteBufferRect(command_queue CommandQueue, buffer Mem, blocking_read Bool, buffer_origin, host_origin,\n\tregion [3]Size, buffer_row_pitch, buffer_slice_pitch, host_row_pitch, host_slice_pitch Size, ptr unsafe.Pointer,\n\twait_list []Event, event *Event) error {\n\n\tevent_wait_list, num_events_in_wait_list := toEventList(wait_list)\n\n\treturn toError(C.clEnqueueWriteBufferRect(command_queue, buffer, C.cl_bool(blocking_read),\n\t\t(*C.size_t)(&buffer_origin[0]), (*C.size_t)(&host_origin[0]), (*C.size_t)(®ion[0]),\n\t\tC.size_t(buffer_row_pitch), C.size_t(buffer_slice_pitch), C.size_t(host_row_pitch), C.size_t(host_slice_pitch),\n\t\tptr, num_events_in_wait_list, event_wait_list, (*C.cl_event)(event)))\n}\n\n\/\/ Enqueues a command to copy from one buffer object to another.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clEnqueueCopyBuffer.html\nfunc EnqueueCopyBuffer(command_queue CommandQueue, src_buffer, dst_buffer Mem, src_offset, dst_offset, cb Size,\n\twait_list []Event, event *Event) error {\n\n\tevent_wait_list, num_events_in_wait_list := toEventList(wait_list)\n\n\treturn toError(C.clEnqueueCopyBuffer(command_queue, src_buffer, dst_buffer, C.size_t(src_offset),\n\t\tC.size_t(dst_offset), C.size_t(cb), num_events_in_wait_list, event_wait_list, (*C.cl_event)(event)))\n}\n\n\/\/ Enqueues a command to copy a rectangular region from the buffer object to\n\/\/ another buffer object.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clEnqueueCopyBufferRect.html\nfunc EnqueueCopyBufferRect(command_queue CommandQueue, src_buffer, dst_buffer Mem, src_origin, dst_origin,\n\tregion [3]Size, src_row_pitch, src_slice_pitch, dst_row_pitch, dst_slice_pitch Size, wait_list []Event,\n\tevent *Event) error {\n\n\tevent_wait_list, num_events_in_wait_list := toEventList(wait_list)\n\n\treturn toError(C.clEnqueueCopyBufferRect(command_queue, src_buffer, dst_buffer, (*C.size_t)(&src_origin[0]),\n\t\t(*C.size_t)(&dst_origin[0]), (*C.size_t)(®ion[0]), C.size_t(src_row_pitch), C.size_t(src_slice_pitch),\n\t\tC.size_t(dst_row_pitch), C.size_t(dst_slice_pitch), num_events_in_wait_list, event_wait_list,\n\t\t(*C.cl_event)(event)))\n}\n\n\/\/ Enqueues a command to map a region of the buffer object given by buffer into\n\/\/ the host address space and returns a pointer to this mapped region.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clEnqueueMapBuffer.html\nfunc EnqueueMapBuffer(command_queue CommandQueue, buffer Mem, blocking_map Bool, map_flags MapFlags, offset, cb Size,\n\twait_list []Event, event *Event) (unsafe.Pointer, error) {\n\n\tevent_wait_list, num_events_in_wait_list := toEventList(wait_list)\n\n\tvar err C.cl_int\n\tmapped := C.clEnqueueMapBuffer(command_queue, buffer, C.cl_bool(blocking_map), C.cl_map_flags(map_flags),\n\t\tC.size_t(offset), C.size_t(cb), num_events_in_wait_list, event_wait_list, (*C.cl_event)(event), &err)\n\n\treturn mapped, toError(err)\n}\n\n\/\/ Enqueues a command to unmap a previously mapped region of a memory object.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clEnqueueUnmapMemObject.html\nfunc EnqueueUnmapMemObject(command_queue CommandQueue, memobj Mem, mapped_ptr unsafe.Pointer, wait_list []Event,\n\tevent *Event) error {\n\n\tevent_wait_list, num_events_in_wait_list := toEventList(wait_list)\n\n\treturn toError(C.clEnqueueUnmapMemObject(command_queue, memobj, mapped_ptr, num_events_in_wait_list,\n\t\tevent_wait_list, (*C.cl_event)(event)))\n}\n\n\/\/ Registers a user callback function that will be called when the memory object\n\/\/ is deleted and its resources freed.\nfunc SetMemObjectDestructorCallback(memobj Mem, callback BufferCallbackFunc, user_data interface{}) error {\n\n\tkey := bufferCallbacks.add(callback, user_data)\n\n\terr := toError(C.clSetMemObjectDestructorCallback(C.cl_mem(memobj), (*[0]byte)(C.callBufferCallback),\n\t\tunsafe.Pointer(key)))\n\n\tif err != nil {\n\t\t\/\/ If the C side setting of the callback failed GetCallback will remove\n\t\t\/\/ the callback from the map.\n\t\tbufferCallbacks.get(key)\n\t}\n\n\treturn err\n}\n<commit_msg>Added fetching of memory object information.<commit_after>package clw11\n\n\/*\n#define CL_USE_DEPRECATED_OPENCL_1_1_APIS\n#ifdef __APPLE__\n#include \"OpenCL\/opencl.h\"\n#else\n#include \"CL\/opencl.h\"\n#endif\n\nextern void bufferCallback(cl_mem memobj, void *user_data);\n\nvoid callBufferCallback(cl_mem memobj, void *user_data)\n{\n\tbufferCallback(memobj, user_data);\n}\n*\/\nimport \"C\"\nimport (\n\t\"unsafe\"\n)\n\ntype (\n\tMem C.cl_mem\n\tMemFlags C.cl_mem_flags\n\tMemInfo C.cl_mem_info\n\tMapFlags C.cl_map_flags\n\tBufferRegion C.cl_buffer_region\n\tBufferCreateType C.cl_buffer_create_type\n)\n\n\/\/ Bitfield.\nconst (\n\tMemReadWrite MemFlags = C.CL_MEM_READ_WRITE\n\tMemWriteOnly MemFlags = C.CL_MEM_WRITE_ONLY\n\tMemReadOnly MemFlags = C.CL_MEM_READ_ONLY\n\tMemUseHostPointer MemFlags = C.CL_MEM_USE_HOST_PTR\n\tMemAllocHostPointer MemFlags = C.CL_MEM_ALLOC_HOST_PTR\n\tMemCopyHostPointer MemFlags = C.CL_MEM_COPY_HOST_PTR\n)\n\n\/\/ Bitfield.\nconst (\n\tMapRead MapFlags = C.CL_MAP_READ\n\tMapWrite MapFlags = C.CL_MAP_WRITE\n)\n\nconst (\n\tBufferCreateTypeRegion BufferCreateType = C.CL_BUFFER_CREATE_TYPE_REGION\n)\n\nconst (\n\tMemType MemInfo = C.CL_MEM_TYPE\n\tMemFlagsInfo MemInfo = C.CL_MEM_FLAGS \/\/ Appended \"Info\" due to conflict with type.\n\tMemSize MemInfo = C.CL_MEM_SIZE\n\tMemHostPtr MemInfo = C.CL_MEM_HOST_PTR\n\tMemMapCount MemInfo = C.CL_MEM_MAP_COUNT\n\tMemReferenceCount MemInfo = C.CL_MEM_REFERENCE_COUNT\n\tMemContext MemInfo = C.CL_MEM_CONTEXT\n\tMemAssociatedMemobject MemInfo = C.CL_MEM_ASSOCIATED_MEMOBJECT\n\tMemOffset MemInfo = C.CL_MEM_OFFSET\n)\n\n\/\/ Creates a buffer object.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clCreateBuffer.html\nfunc CreateBuffer(context Context, flags MemFlags, size Size, host_ptr unsafe.Pointer) (Mem, error) {\n\n\tvar err C.cl_int\n\tmemory := C.clCreateBuffer(context, C.cl_mem_flags(flags), C.size_t(size), host_ptr, &err)\n\n\treturn Mem(memory), toError(err)\n}\n\n\/\/ Creates a buffer object (referred to as a sub-buffer object) from an existing\n\/\/ buffer object.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clCreateSubBuffer.html\nfunc CreateSubBuffer(buffer Mem, flags MemFlags, buffer_create_type BufferCreateType, buffer_create_info unsafe.Pointer,\n\terrcode_ret *Int) (Mem, error) {\n\n\tvar err C.cl_int\n\tmemory := C.clCreateSubBuffer(buffer, C.cl_mem_flags(flags), C.cl_buffer_create_type(buffer_create_type),\n\t\tbuffer_create_info, &err)\n\n\treturn Mem(memory), toError(err)\n}\n\n\/\/ Increments the memory object reference count.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clRetainMemObject.html\nfunc RetainMemObject(memobj Mem) error {\n\treturn toError(C.clRetainMemObject(memobj))\n}\n\n\/\/ Decrements the memory object reference count.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clReleaseMemObject.html\nfunc ReleaseMemObject(memobj Mem) error {\n\treturn toError(C.clReleaseMemObject(memobj))\n}\n\n\/\/ Enqueue commands to read from a buffer object to host memory.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clEnqueueReadBuffer.html\nfunc EnqueueReadBuffer(command_queue CommandQueue, buffer Mem, blocking_read Bool, offset, cb Size,\n\tptr unsafe.Pointer, wait_list []Event, event *Event) error {\n\n\tevent_wait_list, num_events_in_wait_list := toEventList(wait_list)\n\n\treturn toError(C.clEnqueueReadBuffer(command_queue, buffer, C.cl_bool(blocking_read), C.size_t(offset),\n\t\tC.size_t(cb), ptr, num_events_in_wait_list, event_wait_list, (*C.cl_event)(event)))\n}\n\n\/\/ Enqueue commands to write to a buffer object from host memory.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clEnqueueWriteBuffer.html\nfunc EnqueueWriteBuffer(command_queue CommandQueue, buffer Mem, blocking_read Bool, offset, cb Size,\n\tptr unsafe.Pointer, wait_list []Event, event *Event) error {\n\n\tevent_wait_list, num_events_in_wait_list := toEventList(wait_list)\n\n\treturn toError(C.clEnqueueWriteBuffer(command_queue, buffer, C.cl_bool(blocking_read), C.size_t(offset),\n\t\tC.size_t(cb), ptr, num_events_in_wait_list, event_wait_list, (*C.cl_event)(event)))\n}\n\n\/\/ Enqueue commands to read from a rectangular region from a buffer object to\n\/\/ host memory.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clEnqueueReadBufferRect.html\nfunc EnqueueReadBufferRect(command_queue CommandQueue, buffer Mem, blocking_read Bool, buffer_origin, host_origin,\n\tregion [3]Size, buffer_row_pitch, buffer_slice_pitch, host_row_pitch, host_slice_pitch Size, ptr unsafe.Pointer,\n\twait_list []Event, event *Event) error {\n\n\tevent_wait_list, num_events_in_wait_list := toEventList(wait_list)\n\n\treturn toError(C.clEnqueueReadBufferRect(command_queue, buffer, C.cl_bool(blocking_read),\n\t\t(*C.size_t)(&buffer_origin[0]), (*C.size_t)(&host_origin[0]), (*C.size_t)(®ion[0]),\n\t\tC.size_t(buffer_row_pitch), C.size_t(buffer_slice_pitch), C.size_t(host_row_pitch), C.size_t(host_slice_pitch),\n\t\tptr, num_events_in_wait_list, event_wait_list, (*C.cl_event)(event)))\n}\n\n\/\/ Enqueue commands to write a rectangular region to a buffer object from host\n\/\/ memory.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clEnqueueWriteBufferRect.html\nfunc EnqueueWriteBufferRect(command_queue CommandQueue, buffer Mem, blocking_read Bool, buffer_origin, host_origin,\n\tregion [3]Size, buffer_row_pitch, buffer_slice_pitch, host_row_pitch, host_slice_pitch Size, ptr unsafe.Pointer,\n\twait_list []Event, event *Event) error {\n\n\tevent_wait_list, num_events_in_wait_list := toEventList(wait_list)\n\n\treturn toError(C.clEnqueueWriteBufferRect(command_queue, buffer, C.cl_bool(blocking_read),\n\t\t(*C.size_t)(&buffer_origin[0]), (*C.size_t)(&host_origin[0]), (*C.size_t)(®ion[0]),\n\t\tC.size_t(buffer_row_pitch), C.size_t(buffer_slice_pitch), C.size_t(host_row_pitch), C.size_t(host_slice_pitch),\n\t\tptr, num_events_in_wait_list, event_wait_list, (*C.cl_event)(event)))\n}\n\n\/\/ Enqueues a command to copy from one buffer object to another.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clEnqueueCopyBuffer.html\nfunc EnqueueCopyBuffer(command_queue CommandQueue, src_buffer, dst_buffer Mem, src_offset, dst_offset, cb Size,\n\twait_list []Event, event *Event) error {\n\n\tevent_wait_list, num_events_in_wait_list := toEventList(wait_list)\n\n\treturn toError(C.clEnqueueCopyBuffer(command_queue, src_buffer, dst_buffer, C.size_t(src_offset),\n\t\tC.size_t(dst_offset), C.size_t(cb), num_events_in_wait_list, event_wait_list, (*C.cl_event)(event)))\n}\n\n\/\/ Enqueues a command to copy a rectangular region from the buffer object to\n\/\/ another buffer object.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clEnqueueCopyBufferRect.html\nfunc EnqueueCopyBufferRect(command_queue CommandQueue, src_buffer, dst_buffer Mem, src_origin, dst_origin,\n\tregion [3]Size, src_row_pitch, src_slice_pitch, dst_row_pitch, dst_slice_pitch Size, wait_list []Event,\n\tevent *Event) error {\n\n\tevent_wait_list, num_events_in_wait_list := toEventList(wait_list)\n\n\treturn toError(C.clEnqueueCopyBufferRect(command_queue, src_buffer, dst_buffer, (*C.size_t)(&src_origin[0]),\n\t\t(*C.size_t)(&dst_origin[0]), (*C.size_t)(®ion[0]), C.size_t(src_row_pitch), C.size_t(src_slice_pitch),\n\t\tC.size_t(dst_row_pitch), C.size_t(dst_slice_pitch), num_events_in_wait_list, event_wait_list,\n\t\t(*C.cl_event)(event)))\n}\n\n\/\/ Enqueues a command to map a region of the buffer object given by buffer into\n\/\/ the host address space and returns a pointer to this mapped region.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clEnqueueMapBuffer.html\nfunc EnqueueMapBuffer(command_queue CommandQueue, buffer Mem, blocking_map Bool, map_flags MapFlags, offset, cb Size,\n\twait_list []Event, event *Event) (unsafe.Pointer, error) {\n\n\tevent_wait_list, num_events_in_wait_list := toEventList(wait_list)\n\n\tvar err C.cl_int\n\tmapped := C.clEnqueueMapBuffer(command_queue, buffer, C.cl_bool(blocking_map), C.cl_map_flags(map_flags),\n\t\tC.size_t(offset), C.size_t(cb), num_events_in_wait_list, event_wait_list, (*C.cl_event)(event), &err)\n\n\treturn mapped, toError(err)\n}\n\n\/\/ Enqueues a command to unmap a previously mapped region of a memory object.\n\/\/ http:\/\/www.khronos.org\/registry\/cl\/sdk\/1.1\/docs\/man\/xhtml\/clEnqueueUnmapMemObject.html\nfunc EnqueueUnmapMemObject(command_queue CommandQueue, memobj Mem, mapped_ptr unsafe.Pointer, wait_list []Event,\n\tevent *Event) error {\n\n\tevent_wait_list, num_events_in_wait_list := toEventList(wait_list)\n\n\treturn toError(C.clEnqueueUnmapMemObject(command_queue, memobj, mapped_ptr, num_events_in_wait_list,\n\t\tevent_wait_list, (*C.cl_event)(event)))\n}\n\nfunc SetMemObjectDestructorCallback(memobj Mem, callback BufferCallbackFunc, user_data interface{}) error {\n\n\tkey := bufferCallbacks.add(callback, user_data)\n\n\terr := toError(C.clSetMemObjectDestructorCallback(C.cl_mem(memobj), (*[0]byte)(C.callBufferCallback),\n\t\tunsafe.Pointer(key)))\n\n\tif err != nil {\n\t\t\/\/ If the C side setting of the callback failed GetCallback will remove\n\t\t\/\/ the callback from the map.\n\t\tbufferCallbacks.get(key)\n\t}\n\n\treturn err\n}\n\nfunc GetMemObjectInfo(memobj Mem, param_name MemInfo, param_value_size Size, param_value unsafe.Pointer,\n\tparam_value_size_return *Size) error {\n\n\treturn toError(C.clGetMemObjectInfo(memobj, C.cl_mem_info(param_name), C.size_t(param_value_size), param_value,\n\t\t(*C.size_t)(param_value_size_return)))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\n\t\"time\"\n\n\t\"strings\"\n\n\t. \"github.com\/claudetech\/loggo\/default\"\n\t\"github.com\/orcaman\/concurrent-map\"\n)\n\nvar instances cmap.ConcurrentMap\nvar chunkPath string\nvar chunkSize int64\nvar chunkDirMaxSize int64\n\nfunc init() {\n\tinstances = cmap.New()\n}\n\n\/\/ Buffer is a buffered stream\ntype Buffer struct {\n\tnumberOfInstances int\n\tclient *http.Client\n\tobject *APIObject\n\ttempDir string\n\tpreload bool\n\tchunks cmap.ConcurrentMap\n}\n\n\/\/ GetBufferInstance gets a singleton instance of buffer\nfunc GetBufferInstance(client *http.Client, object *APIObject) (*Buffer, error) {\n\tif !instances.Has(object.ObjectID) {\n\t\ti, err := newBuffer(client, object)\n\t\tif nil != err {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tinstances.Set(object.ObjectID, i)\n\t}\n\n\tinstance, ok := instances.Get(object.ObjectID)\n\t\/\/ if buffer allocation failed due to race conditions it will try to fetch a new one\n\tif !ok {\n\t\ti, err := GetBufferInstance(client, object)\n\t\tif nil != err {\n\t\t\treturn nil, err\n\t\t}\n\t\tinstance = i\n\t}\n\tinstance.(*Buffer).numberOfInstances++\n\treturn instance.(*Buffer), nil\n}\n\n\/\/ SetChunkPath sets the global chunk path\nfunc SetChunkPath(path string) {\n\tchunkPath = path\n}\n\n\/\/ SetChunkSize sets the global chunk size\nfunc SetChunkSize(size int64) {\n\tchunkSize = size\n}\n\n\/\/ SetChunkDirMaxSize sets the maximum size of the chunk directory\nfunc SetChunkDirMaxSize(size int64) {\n\tchunkDirMaxSize = size\n}\n\n\/\/ NewBuffer creates a new buffer instance\nfunc newBuffer(client *http.Client, object *APIObject) (*Buffer, error) {\n\tLog.Infof(\"Starting playback of %v\", object.Name)\n\tLog.Debugf(\"Creating buffer for object %v\", object.ObjectID)\n\n\ttempDir := filepath.Join(chunkPath, object.ObjectID)\n\tif err := os.MkdirAll(tempDir, 0777); nil != err {\n\t\tLog.Debugf(\"%v\", err)\n\t\treturn nil, fmt.Errorf(\"Could not create temp path for object %v\", object.ObjectID)\n\t}\n\n\tif 0 == chunkSize {\n\t\tLog.Debugf(\"ChunkSize was 0, setting to default (5 MB)\")\n\t\tchunkSize = 5 * 1024 * 1024\n\t}\n\n\tbuffer := Buffer{\n\t\tnumberOfInstances: 0,\n\t\tclient: client,\n\t\tobject: object,\n\t\ttempDir: tempDir,\n\t\tpreload: true,\n\t\tchunks: cmap.New(),\n\t}\n\n\treturn &buffer, nil\n}\n\n\/\/ Close all handles\nfunc (b *Buffer) Close() error {\n\tb.numberOfInstances--\n\tif 0 == b.numberOfInstances {\n\t\tLog.Infof(\"Stopping playback of %v\", b.object.Name)\n\t\tLog.Debugf(\"Stop buffering for object %v\", b.object.ObjectID)\n\n\t\tb.preload = false\n\t\tinstances.Remove(b.object.ObjectID)\n\t}\n\treturn nil\n}\n\n\/\/ ReadBytes on a specific location\nfunc (b *Buffer) ReadBytes(start, size int64, preload bool, delay int32) ([]byte, error) {\n\tfOffset := start % chunkSize\n\toffset := start - fOffset\n\toffsetEnd := offset + chunkSize\n\n\tLog.Tracef(\"Getting object %v - chunk %v - offset %v for %v bytes\",\n\t\tb.object.ObjectID, strconv.Itoa(int(offset)), fOffset, size)\n\n\tfilename := filepath.Join(b.tempDir, strconv.Itoa(int(offset)))\n\tif f, err := os.Open(filename); nil == err {\n\t\tdefer f.Close()\n\n\t\tbuf := make([]byte, size)\n\t\tif n, err := f.ReadAt(buf, fOffset); n > 0 && (nil == err || io.EOF == err) {\n\t\t\tLog.Tracef(\"Found file %s bytes %v - %v in cache\", filename, offset, offsetEnd)\n\n\t\t\t\/\/ update the last modified time for files that are often in use\n\t\t\tif err := os.Chtimes(filename, time.Now(), time.Now()); nil != err {\n\t\t\t\tLog.Warningf(\"Could not update last modified time for %v\", filename)\n\t\t\t}\n\n\t\t\treturn buf[:size], nil\n\t\t}\n\n\t\tLog.Debugf(\"%v\", err)\n\t\tLog.Debugf(\"Could not read file %s at %v\", filename, fOffset)\n\t}\n\n\tif chunkDirMaxSize > 0 {\n\t\tgo func() {\n\t\t\tif err := cleanChunkDir(chunkPath); nil != err {\n\t\t\t\tLog.Debugf(\"%v\", err)\n\t\t\t\tLog.Warningf(\"Could not delete oldest chunk\")\n\t\t\t}\n\t\t}()\n\t}\n\n\t\/\/ sleep if request is throttled\n\tif delay > 0 {\n\t\ttime.Sleep(time.Duration(delay) * time.Second)\n\t}\n\n\tLog.Debugf(\"Requesting object %v bytes %v - %v from API\", b.object.ObjectID, offset, offsetEnd)\n\treq, err := http.NewRequest(\"GET\", b.object.DownloadURL, nil)\n\tif nil != err {\n\t\tLog.Debugf(\"%v\", err)\n\t\treturn nil, fmt.Errorf(\"Could not create request object %v from API\", b.object.ObjectID)\n\t}\n\n\treq.Header.Add(\"Range\", fmt.Sprintf(\"bytes=%v-%v\", offset, offsetEnd))\n\n\tLog.Tracef(\"Sending HTTP Request %v\", req)\n\n\tres, err := b.client.Do(req)\n\tif nil != err {\n\t\tLog.Debugf(\"%v\", err)\n\t\treturn nil, fmt.Errorf(\"Could not request object %v from API\", b.object.ObjectID)\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != 206 {\n\t\tif res.StatusCode != 403 {\n\t\t\treturn nil, fmt.Errorf(\"Wrong status code %v\", res)\n\t\t}\n\n\t\t\/\/ throttle requests\n\t\tif delay > 8 {\n\t\t\treturn nil, fmt.Errorf(\"Maximum throttle interval has been reached\")\n\t\t}\n\t\tbytes, err := ioutil.ReadAll(res.Body)\n\t\tif nil != err {\n\t\t\tLog.Debugf(\"%v\", err)\n\t\t\treturn nil, fmt.Errorf(\"Could not read body of 403 error\")\n\t\t}\n\t\tbody := string(bytes)\n\t\tif strings.Contains(body, \"dailyLimitExceeded\") ||\n\t\t\tstrings.Contains(body, \"userRateLimitExceeded\") ||\n\t\t\tstrings.Contains(body, \"rateLimitExceeded\") ||\n\t\t\tstrings.Contains(body, \"backendError\") {\n\t\t\tif 0 == delay {\n\t\t\t\tdelay = 1\n\t\t\t} else {\n\t\t\t\tdelay = delay * 2\n\t\t\t}\n\t\t\treturn b.ReadBytes(start, size, false, delay)\n\t\t}\n\t}\n\n\tbytes, err := ioutil.ReadAll(res.Body)\n\tif nil != err {\n\t\tLog.Debugf(\"%v\", err)\n\t\treturn nil, fmt.Errorf(\"Could not read objects %v API response\", b.object.ObjectID)\n\t}\n\n\tif err := ioutil.WriteFile(filename, bytes, 0777); nil != err {\n\t\tLog.Debugf(\"%v\", err)\n\t\tLog.Warningf(\"Could not write chunk temp file %v\", filename)\n\t}\n\n\tif !preload && b.preload && uint64(offsetEnd) < b.object.Size {\n\t\tgo func() {\n\t\t\tb.ReadBytes(offsetEnd+1, size, true, 0)\n\t\t}()\n\t}\n\n\tsOffset := int64(math.Min(float64(fOffset), float64(len(bytes))))\n\teOffset := int64(math.Min(float64(fOffset+size), float64(len(bytes))))\n\treturn bytes[sOffset:eOffset], nil\n}\n\n\/\/ cleanChunkDir checks if the chunk folder is grown to big and clears the oldest file if necessary\nfunc cleanChunkDir(chunkPath string) error {\n\tchunkDirSize, err := dirSize(chunkPath)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tif chunkDirSize+chunkSize*2 > chunkDirMaxSize {\n\t\tif err := deleteOldestFile(chunkPath); nil != err {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ deleteOldestFile deletes the oldest file in the directory\nfunc deleteOldestFile(path string) error {\n\tvar fpath string\n\tlastMod := time.Now()\n\n\terr := filepath.Walk(path, func(file string, info os.FileInfo, err error) error {\n\t\tif !info.IsDir() {\n\t\t\tmodTime := info.ModTime()\n\t\t\tif modTime.Before(lastMod) {\n\t\t\t\tlastMod = modTime\n\t\t\t\tfpath = file\n\t\t\t}\n\t\t}\n\t\treturn err\n\t})\n\n\tos.Remove(fpath)\n\n\treturn err\n}\n\n\/\/ dirSize gets the total directory size\nfunc dirSize(path string) (int64, error) {\n\tvar size int64\n\terr := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {\n\t\tif nil != err && nil != info && !info.IsDir() {\n\t\t\tsize += info.Size()\n\t\t}\n\t\treturn err\n\t})\n\treturn size, err\n}\n<commit_msg>fixed preload abort logic<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\n\t\"time\"\n\n\t\"strings\"\n\n\t. \"github.com\/claudetech\/loggo\/default\"\n\t\"github.com\/orcaman\/concurrent-map\"\n)\n\nvar instances cmap.ConcurrentMap\nvar chunkPath string\nvar chunkSize int64\nvar chunkDirMaxSize int64\n\nfunc init() {\n\tinstances = cmap.New()\n}\n\n\/\/ Buffer is a buffered stream\ntype Buffer struct {\n\tnumberOfInstances int\n\tclient *http.Client\n\tobject *APIObject\n\ttempDir string\n\tpreload bool\n\tchunks cmap.ConcurrentMap\n}\n\n\/\/ GetBufferInstance gets a singleton instance of buffer\nfunc GetBufferInstance(client *http.Client, object *APIObject) (*Buffer, error) {\n\tif !instances.Has(object.ObjectID) {\n\t\ti, err := newBuffer(client, object)\n\t\tif nil != err {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tinstances.Set(object.ObjectID, i)\n\t}\n\n\tinstance, ok := instances.Get(object.ObjectID)\n\t\/\/ if buffer allocation failed due to race conditions it will try to fetch a new one\n\tif !ok {\n\t\ti, err := GetBufferInstance(client, object)\n\t\tif nil != err {\n\t\t\treturn nil, err\n\t\t}\n\t\tinstance = i\n\t}\n\tinstance.(*Buffer).numberOfInstances++\n\treturn instance.(*Buffer), nil\n}\n\n\/\/ SetChunkPath sets the global chunk path\nfunc SetChunkPath(path string) {\n\tchunkPath = path\n}\n\n\/\/ SetChunkSize sets the global chunk size\nfunc SetChunkSize(size int64) {\n\tchunkSize = size\n}\n\n\/\/ SetChunkDirMaxSize sets the maximum size of the chunk directory\nfunc SetChunkDirMaxSize(size int64) {\n\tchunkDirMaxSize = size\n}\n\n\/\/ NewBuffer creates a new buffer instance\nfunc newBuffer(client *http.Client, object *APIObject) (*Buffer, error) {\n\tLog.Infof(\"Starting playback of %v\", object.Name)\n\tLog.Debugf(\"Creating buffer for object %v\", object.ObjectID)\n\n\ttempDir := filepath.Join(chunkPath, object.ObjectID)\n\tif err := os.MkdirAll(tempDir, 0777); nil != err {\n\t\tLog.Debugf(\"%v\", err)\n\t\treturn nil, fmt.Errorf(\"Could not create temp path for object %v\", object.ObjectID)\n\t}\n\n\tif 0 == chunkSize {\n\t\tLog.Debugf(\"ChunkSize was 0, setting to default (5 MB)\")\n\t\tchunkSize = 5 * 1024 * 1024\n\t}\n\n\tbuffer := Buffer{\n\t\tnumberOfInstances: 0,\n\t\tclient: client,\n\t\tobject: object,\n\t\ttempDir: tempDir,\n\t\tpreload: true,\n\t\tchunks: cmap.New(),\n\t}\n\n\treturn &buffer, nil\n}\n\n\/\/ Close all handles\nfunc (b *Buffer) Close() error {\n\tb.numberOfInstances--\n\tif 0 == b.numberOfInstances {\n\t\tLog.Infof(\"Stopping playback of %v\", b.object.Name)\n\t\tLog.Debugf(\"Stop buffering for object %v\", b.object.ObjectID)\n\n\t\tb.preload = false\n\t\tinstances.Remove(b.object.ObjectID)\n\t}\n\treturn nil\n}\n\n\/\/ ReadBytes on a specific location\nfunc (b *Buffer) ReadBytes(start, size int64, preload bool, delay int32) ([]byte, error) {\n\tfOffset := start % chunkSize\n\toffset := start - fOffset\n\toffsetEnd := offset + chunkSize\n\n\tLog.Tracef(\"Getting object %v - chunk %v - offset %v for %v bytes\",\n\t\tb.object.ObjectID, strconv.Itoa(int(offset)), fOffset, size)\n\n\tif !preload && b.preload && uint64(offsetEnd) < b.object.Size {\n\t\tdefer func() {\n\t\t\tgo func() {\n\t\t\t\tpreloadStart := strconv.Itoa(int(offsetEnd))\n\t\t\t\tif !b.chunks.Has(preloadStart) {\n\t\t\t\t\tb.chunks.Set(preloadStart, true)\n\t\t\t\t\tb.ReadBytes(offsetEnd, size, true, 0)\n\t\t\t\t}\n\t\t\t}()\n\t\t}()\n\t}\n\n\tfilename := filepath.Join(b.tempDir, strconv.Itoa(int(offset)))\n\tif f, err := os.Open(filename); nil == err {\n\t\tdefer f.Close()\n\n\t\tbuf := make([]byte, size)\n\t\tif n, err := f.ReadAt(buf, fOffset); n > 0 && (nil == err || io.EOF == err) {\n\t\t\tLog.Tracef(\"Found file %s bytes %v - %v in cache\", filename, offset, offsetEnd)\n\n\t\t\t\/\/ update the last modified time for files that are often in use\n\t\t\tif err := os.Chtimes(filename, time.Now(), time.Now()); nil != err {\n\t\t\t\tLog.Warningf(\"Could not update last modified time for %v\", filename)\n\t\t\t}\n\n\t\t\treturn buf[:size], nil\n\t\t}\n\n\t\tLog.Debugf(\"%v\", err)\n\t\tLog.Debugf(\"Could not read file %s at %v\", filename, fOffset)\n\t}\n\n\tif chunkDirMaxSize > 0 {\n\t\tgo func() {\n\t\t\tif err := cleanChunkDir(chunkPath); nil != err {\n\t\t\t\tLog.Debugf(\"%v\", err)\n\t\t\t\tLog.Warningf(\"Could not delete oldest chunk\")\n\t\t\t}\n\t\t}()\n\t}\n\n\t\/\/ sleep if request is throttled\n\tif delay > 0 {\n\t\ttime.Sleep(time.Duration(delay) * time.Second)\n\t}\n\n\tLog.Debugf(\"Requesting object %v bytes %v - %v from API\", b.object.ObjectID, offset, offsetEnd)\n\treq, err := http.NewRequest(\"GET\", b.object.DownloadURL, nil)\n\tif nil != err {\n\t\tLog.Debugf(\"%v\", err)\n\t\treturn nil, fmt.Errorf(\"Could not create request object %v from API\", b.object.ObjectID)\n\t}\n\n\treq.Header.Add(\"Range\", fmt.Sprintf(\"bytes=%v-%v\", offset, offsetEnd))\n\n\tLog.Tracef(\"Sending HTTP Request %v\", req)\n\n\tres, err := b.client.Do(req)\n\tif nil != err {\n\t\tLog.Debugf(\"%v\", err)\n\t\treturn nil, fmt.Errorf(\"Could not request object %v from API\", b.object.ObjectID)\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != 206 {\n\t\tif res.StatusCode != 403 {\n\t\t\treturn nil, fmt.Errorf(\"Wrong status code %v\", res)\n\t\t}\n\n\t\t\/\/ throttle requests\n\t\tif delay > 8 {\n\t\t\treturn nil, fmt.Errorf(\"Maximum throttle interval has been reached\")\n\t\t}\n\t\tbytes, err := ioutil.ReadAll(res.Body)\n\t\tif nil != err {\n\t\t\tLog.Debugf(\"%v\", err)\n\t\t\treturn nil, fmt.Errorf(\"Could not read body of 403 error\")\n\t\t}\n\t\tbody := string(bytes)\n\t\tif strings.Contains(body, \"dailyLimitExceeded\") ||\n\t\t\tstrings.Contains(body, \"userRateLimitExceeded\") ||\n\t\t\tstrings.Contains(body, \"rateLimitExceeded\") ||\n\t\t\tstrings.Contains(body, \"backendError\") {\n\t\t\tif 0 == delay {\n\t\t\t\tdelay = 1\n\t\t\t} else {\n\t\t\t\tdelay = delay * 2\n\t\t\t}\n\t\t\treturn b.ReadBytes(start, size, true, delay)\n\t\t}\n\t}\n\n\tbytes, err := ioutil.ReadAll(res.Body)\n\tif nil != err {\n\t\tLog.Debugf(\"%v\", err)\n\t\treturn nil, fmt.Errorf(\"Could not read objects %v API response\", b.object.ObjectID)\n\t}\n\n\tif err := ioutil.WriteFile(filename, bytes, 0777); nil != err {\n\t\tLog.Debugf(\"%v\", err)\n\t\tLog.Warningf(\"Could not write chunk temp file %v\", filename)\n\t}\n\n\tsOffset := int64(math.Min(float64(fOffset), float64(len(bytes))))\n\teOffset := int64(math.Min(float64(fOffset+size), float64(len(bytes))))\n\treturn bytes[sOffset:eOffset], nil\n}\n\n\/\/ cleanChunkDir checks if the chunk folder is grown to big and clears the oldest file if necessary\nfunc cleanChunkDir(chunkPath string) error {\n\tchunkDirSize, err := dirSize(chunkPath)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tif chunkDirSize+chunkSize*2 > chunkDirMaxSize {\n\t\tif err := deleteOldestFile(chunkPath); nil != err {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ deleteOldestFile deletes the oldest file in the directory\nfunc deleteOldestFile(path string) error {\n\tvar fpath string\n\tlastMod := time.Now()\n\n\terr := filepath.Walk(path, func(file string, info os.FileInfo, err error) error {\n\t\tif !info.IsDir() {\n\t\t\tmodTime := info.ModTime()\n\t\t\tif modTime.Before(lastMod) {\n\t\t\t\tlastMod = modTime\n\t\t\t\tfpath = file\n\t\t\t}\n\t\t}\n\t\treturn err\n\t})\n\n\tos.Remove(fpath)\n\n\treturn err\n}\n\n\/\/ dirSize gets the total directory size\nfunc dirSize(path string) (int64, error) {\n\tvar size int64\n\terr := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {\n\t\tif nil != err && nil != info && !info.IsDir() {\n\t\t\tsize += info.Size()\n\t\t}\n\t\treturn err\n\t})\n\treturn size, err\n}\n<|endoftext|>"} {"text":"<commit_before>package aurora\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"reflect\"\n\n\tvalid \"github.com\/asaskevich\/govalidator\"\n\t\"github.com\/bluele\/gforms\"\n)\n\nvar (\n\tMsgRequired = \"hili eneo halitakiwi kuachwa wazi\"\n\tMsgName = \"hili eneo linatakiwa liwe mchanganyiko wa herufi na namba\"\n\tMsgEmail = \"email sio sahihi. mfano gernest@aurora.com\"\n\tMsgPhone = \"namba ya simu sio sahihi. mfano +25571700112233\"\n\tMsgMinlength = \"namba ya siri inatakiwa kuanzia herufi 6 na kuendelea\"\n\tMsgEqual = \"%s inatakiwa iwe sawa na %s\"\n)\n\nfunc init() {\n\tlog.SetFlags(log.Lshortfile)\n}\n\ntype validateFunc func(string) bool\n\ntype CustomValidator struct {\n\tVf validateFunc\n\tMessage string\n\tgforms.Validator\n}\n\nfunc (vl CustomValidator) Validate(fi *gforms.FieldInstance, fo *gforms.FormInstance) error {\n\tv := fi.V\n\tif v.IsNil || v.Kind != reflect.String || v.Value == \"\" {\n\t\treturn nil\n\t}\n\tif !vl.Vf(v.RawStr) {\n\t\treturn errors.New(vl.Message)\n\t}\n\treturn nil\n\n}\nfunc ComposeRegisterForm() gforms.ModelForm {\n\treturn gforms.DefineModelForm(User{}, gforms.NewFields(\n\t\tgforms.NewTextField(\n\t\t\t\"first_name\",\n\t\t\tgforms.Validators{\n\t\t\t\tgforms.Required(MsgRequired),\n\t\t\t\tIsName(),\n\t\t\t},\n\t\t),\n\t\tgforms.NewTextField(\n\t\t\t\"last_name\",\n\t\t\tgforms.Validators{\n\t\t\t\tgforms.Required(MsgRequired),\n\t\t\t\tIsName(),\n\t\t\t},\n\t\t),\n\t\tgforms.NewTextField(\n\t\t\t\"email_address\",\n\t\t\tgforms.Validators{\n\t\t\t\tgforms.Required(MsgRequired),\n\t\t\t\tgforms.EmailValidator(MsgEmail),\n\t\t\t},\n\t\t),\n\t\tgforms.NewTextField(\n\t\t\t\"pass\",\n\t\t\tgforms.Validators{\n\t\t\t\tgforms.Required(MsgRequired),\n\t\t\t\tIsName(),\n\t\t\t\tgforms.MinLengthValidator(6, MsgMinlength),\n\t\t\t},\n\t\t),\n\t\tgforms.NewTextField(\n\t\t\t\"confirm_pass\",\n\t\t\tgforms.Validators{\n\t\t\t\tgforms.Required(MsgRequired),\n\t\t\t\tIsName(),\n\t\t\t\tgforms.MinLengthValidator(6, MsgMinlength),\n\t\t\t\tEqualValidator{to: \"pass\", Message: MsgEqual},\n\t\t\t},\n\t\t),\n\t))\n}\n\nfunc IsName() CustomValidator {\n\treturn CustomValidator{Vf: valid.IsAlphanumeric, Message: MsgName}\n}\n\ntype EqualValidator struct {\n\tCustomValidator\n\tto string\n\tMessage string\n}\n\nfunc (vl EqualValidator) Validate(fi *gforms.FieldInstance, fo *gforms.FormInstance) error {\n\tv := fi.V\n\tif v.IsNil || v.Kind != reflect.String || v.Value == \"\" {\n\t\treturn nil\n\t}\n\tfi2, ok := fo.GetField(vl.to)\n\tif !ok {\n\t\tlog.Println(\"here\")\n\t\treturn fmt.Errorf(\"%s haipo\", fi2.GetName())\n\t}\n\tv2 := fi2.GetV()\n\n\tif v.Value != v2.Value {\n\t\treturn fmt.Errorf(vl.Message, fi.GetName(), fi2.GetName())\n\t}\n\treturn nil\n\n}\n\nfunc ComposeLoginForm() gforms.Form {\n\treturn gforms.DefineForm(\n\t\tgforms.NewTextField(\n\t\t\t\"email\",\n\t\t\tgforms.Validators{\n\t\t\t\tgforms.Required(MsgRequired),\n\t\t\t\tgforms.EmailValidator(MsgEmail),\n\t\t\t},\n\t\t),\n\t\tgforms.NewTextField(\n\t\t\t\"password\",\n\t\t\tgforms.Validators{\n\t\t\t\tgforms.Required(MsgRequired),\n\t\t\t\tgforms.MinLengthValidator(6, MsgMinlength),\n\t\t\t},\n\t\t),\n\t)\n}\n<commit_msg>bugfis<commit_after>package aurora\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"reflect\"\n\n\tvalid \"github.com\/asaskevich\/govalidator\"\n\t\"github.com\/bluele\/gforms\"\n)\n\nvar (\n\tMsgRequired = \"hili eneo halitakiwi kuachwa wazi\"\n\tMsgName = \"hili eneo linatakiwa liwe mchanganyiko wa herufi na namba\"\n\tMsgEmail = \"email sio sahihi. mfano gernest@aurora.com\"\n\tMsgPhone = \"namba ya simu sio sahihi. mfano +25571700112233\"\n\tMsgMinlength = \"namba ya siri inatakiwa kuanzia herufi 6 na kuendelea\"\n\tMsgEqual = \"%s inatakiwa iwe sawa na %s\"\n)\n\nfunc init() {\n\tlog.SetFlags(log.Lshortfile)\n}\n\ntype validateFunc func(string) bool\n\ntype CustomValidator struct {\n\tVf validateFunc\n\tMessage string\n\tgforms.Validator\n}\n\nfunc (vl CustomValidator) Validate(fi *gforms.FieldInstance, fo *gforms.FormInstance) error {\n\tv := fi.V\n\tif v.IsNil || v.Kind != reflect.String || v.Value == \"\" {\n\t\treturn nil\n\t}\n\tif !vl.Vf(v.RawStr) {\n\t\treturn errors.New(vl.Message)\n\t}\n\treturn nil\n\n}\nfunc ComposeRegisterForm() gforms.ModelForm {\n\treturn gforms.DefineModelForm(User{}, gforms.NewFields(\n\t\tgforms.NewTextField(\n\t\t\t\"first_name\",\n\t\t\tgforms.Validators{\n\t\t\t\tgforms.Required(MsgRequired),\n\t\t\t\tIsName(),\n\t\t\t},\n\t\t),\n\t\tgforms.NewTextField(\n\t\t\t\"last_name\",\n\t\t\tgforms.Validators{\n\t\t\t\tgforms.Required(MsgRequired),\n\t\t\t\tIsName(),\n\t\t\t},\n\t\t),\n\t\tgforms.NewTextField(\n\t\t\t\"email_address\",\n\t\t\tgforms.Validators{\n\t\t\t\tgforms.Required(MsgRequired),\n\t\t\t\tgforms.EmailValidator(MsgEmail),\n\t\t\t},\n\t\t),\n\t\tgforms.NewTextField(\n\t\t\t\"pass\",\n\t\t\tgforms.Validators{\n\t\t\t\tgforms.Required(MsgRequired),\n\t\t\t\tIsName(),\n\t\t\t\tgforms.MinLengthValidator(6, MsgMinlength),\n\t\t\t},\n\t\t),\n\t\tgforms.NewTextField(\n\t\t\t\"confirm_pass\",\n\t\t\tgforms.Validators{\n\t\t\t\tgforms.Required(MsgRequired),\n\t\t\t\tIsName(),\n\t\t\t\tgforms.MinLengthValidator(6, MsgMinlength),\n\t\t\t\tEqualValidator{to: \"pass\", Message: MsgEqual},\n\t\t\t},\n\t\t),\n\t))\n}\n\nfunc IsName() CustomValidator {\n\treturn CustomValidator{Vf: valid.IsAlphanumeric, Message: MsgName}\n}\n\ntype EqualValidator struct {\n\tCustomValidator\n\tto string\n\tMessage string\n}\n\nfunc (vl EqualValidator) Validate(fi *gforms.FieldInstance, fo *gforms.FormInstance) error {\n\tv := fi.V\n\tif v.IsNil || v.Kind != reflect.String || v.Value == \"\" {\n\t\treturn nil\n\t}\n\tfi2, ok := fo.GetField(vl.to)\n\tif !ok {\n\t\tlog.Println(\"here\")\n\t\treturn fmt.Errorf(\"%s haipo\", fi2.GetName())\n\t}\n\tv2 := fi2.GetV()\n\n\tif v.Value != v2.Value {\n\t\treturn fmt.Errorf(vl.Message, fi.GetName(), fi2.GetName())\n\t}\n\treturn nil\n\n}\n\nfunc ComposeLoginForm() gforms.Form {\n\treturn gforms.DefineForm(gforms.NewFields(\n\t\tgforms.NewTextField(\n\t\t\t\"email\",\n\t\t\tgforms.Validators{\n\t\t\t\tgforms.Required(MsgRequired),\n\t\t\t\tgforms.EmailValidator(MsgEmail),\n\t\t\t},\n\t\t),\n\t\tgforms.NewTextField(\n\t\t\t\"password\",\n\t\t\tgforms.Validators{\n\t\t\t\tgforms.Required(MsgRequired),\n\t\t\t\tgforms.MinLengthValidator(6, MsgMinlength),\n\t\t\t},\n\t\t),\n\t))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !windows\n\n\/\/ Copyright 2015 flannel authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/ +build !windows\n\npackage udp\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"sync\"\n\t\"syscall\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/vishvananda\/netlink\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/coreos\/flannel\/backend\"\n\t\"github.com\/coreos\/flannel\/pkg\/ip\"\n\t\"github.com\/coreos\/flannel\/subnet\"\n)\n\nconst (\n\tencapOverhead = 28 \/\/ 20 bytes IP hdr + 8 bytes UDP hdr\n)\n\ntype network struct {\n\tbackend.SimpleNetwork\n\tname string\n\tport int\n\tctl *os.File\n\tctl2 *os.File\n\ttun *os.File\n\tconn *net.UDPConn\n\ttunNet ip.IP4Net\n\tsm subnet.Manager\n}\n\nfunc newNetwork(sm subnet.Manager, extIface *backend.ExternalInterface, port int, nw ip.IP4Net, l *subnet.Lease) (*network, error) {\n\tn := &network{\n\t\tSimpleNetwork: backend.SimpleNetwork{\n\t\t\tSubnetLease: l,\n\t\t\tExtIface: extIface,\n\t\t},\n\t\tport: port,\n\t\tsm: sm,\n\t}\n\n\tn.tunNet = nw\n\n\tif err := n.initTun(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar err error\n\tn.conn, err = net.ListenUDP(\"udp4\", &net.UDPAddr{IP: extIface.IfaceAddr, Port: port})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to start listening on UDP socket: %v\", err)\n\t}\n\n\tn.ctl, n.ctl2, err = newCtlSockets()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create control socket: %v\", err)\n\t}\n\n\treturn n, nil\n}\n\nfunc (n *network) Run(ctx context.Context) {\n\tdefer func() {\n\t\tn.tun.Close()\n\t\tn.conn.Close()\n\t\tn.ctl.Close()\n\t\tn.ctl2.Close()\n\t}()\n\n\t\/\/ one for each goroutine below\n\twg := sync.WaitGroup{}\n\tdefer wg.Wait()\n\n\twg.Add(1)\n\tgo func() {\n\t\trunCProxy(n.tun, n.conn, n.ctl2, n.tunNet.IP, n.MTU())\n\t\twg.Done()\n\t}()\n\n\tlog.Info(\"Watching for new subnet leases\")\n\n\tevts := make(chan []subnet.Event)\n\n\twg.Add(1)\n\tgo func() {\n\t\tsubnet.WatchLeases(ctx, n.sm, n.SubnetLease, evts)\n\t\twg.Done()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase evtBatch := <-evts:\n\t\t\tn.processSubnetEvents(evtBatch)\n\n\t\tcase <-ctx.Done():\n\t\t\tstopProxy(n.ctl)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (n *network) MTU() int {\n\treturn n.ExtIface.Iface.MTU - encapOverhead\n}\n\nfunc newCtlSockets() (*os.File, *os.File, error) {\n\tfds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_SEQPACKET, 0)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tf1 := os.NewFile(uintptr(fds[0]), \"ctl\")\n\tf2 := os.NewFile(uintptr(fds[1]), \"ctl\")\n\treturn f1, f2, nil\n}\n\nfunc (n *network) initTun() error {\n\tvar tunName string\n\tvar err error\n\n\tn.tun, tunName, err = ip.OpenTun(\"flannel%d\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to open TUN device: %v\", err)\n\t}\n\n\terr = configureIface(tunName, n.tunNet, n.MTU())\n\treturn err\n}\n\nfunc configureIface(ifname string, ipn ip.IP4Net, mtu int) error {\n\tiface, err := netlink.LinkByName(ifname)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to lookup interface %v\", ifname)\n\t}\n\n\terr = netlink.AddrAdd(iface, &netlink.Addr{IPNet: ipn.ToIPNet(), Label: \"\"})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to add IP address %v to %v: %v\", ipn.String(), ifname, err)\n\t}\n\n\terr = netlink.LinkSetMTU(iface, mtu)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to set MTU for %v: %v\", ifname, err)\n\t}\n\n\terr = netlink.LinkSetUp(iface)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to set interface %v to UP state: %v\", ifname, err)\n\t}\n\n\t\/\/ explicitly add a route since there might be a route for a subnet already\n\t\/\/ installed by Docker and then it won't get auto added\n\terr = netlink.RouteAdd(&netlink.Route{\n\t\tLinkIndex: iface.Attrs().Index,\n\t\tScope: netlink.SCOPE_UNIVERSE,\n\t\tDst: ipn.Network().ToIPNet(),\n\t})\n\tif err != nil && err != syscall.EEXIST {\n\t\treturn fmt.Errorf(\"failed to add route (%v -> %v): %v\", ipn.Network().String(), ifname, err)\n\t}\n\n\treturn nil\n}\n\nfunc (n *network) processSubnetEvents(batch []subnet.Event) {\n\tfor _, evt := range batch {\n\t\tswitch evt.Type {\n\t\tcase subnet.EventAdded:\n\t\t\tlog.Info(\"Subnet added: \", evt.Lease.Subnet)\n\n\t\t\tsetRoute(n.ctl, evt.Lease.Subnet, evt.Lease.Attrs.PublicIP, n.port)\n\n\t\tcase subnet.EventRemoved:\n\t\t\tlog.Info(\"Subnet removed: \", evt.Lease.Subnet)\n\n\t\t\tremoveRoute(n.ctl, evt.Lease.Subnet)\n\n\t\tdefault:\n\t\t\tlog.Error(\"Internal error: unknown event type: \", int(evt.Type))\n\t\t}\n\t}\n}\n<commit_msg>backend\/udp: Use a \/32 prefix for the flannel0 interface<commit_after>\/\/ +build !windows\n\n\/\/ Copyright 2015 flannel authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/ +build !windows\n\npackage udp\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"sync\"\n\t\"syscall\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/vishvananda\/netlink\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/coreos\/flannel\/backend\"\n\t\"github.com\/coreos\/flannel\/pkg\/ip\"\n\t\"github.com\/coreos\/flannel\/subnet\"\n)\n\nconst (\n\tencapOverhead = 28 \/\/ 20 bytes IP hdr + 8 bytes UDP hdr\n)\n\ntype network struct {\n\tbackend.SimpleNetwork\n\tname string\n\tport int\n\tctl *os.File\n\tctl2 *os.File\n\ttun *os.File\n\tconn *net.UDPConn\n\ttunNet ip.IP4Net\n\tsm subnet.Manager\n}\n\nfunc newNetwork(sm subnet.Manager, extIface *backend.ExternalInterface, port int, nw ip.IP4Net, l *subnet.Lease) (*network, error) {\n\tn := &network{\n\t\tSimpleNetwork: backend.SimpleNetwork{\n\t\t\tSubnetLease: l,\n\t\t\tExtIface: extIface,\n\t\t},\n\t\tport: port,\n\t\tsm: sm,\n\t}\n\n\tn.tunNet = nw\n\n\tif err := n.initTun(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar err error\n\tn.conn, err = net.ListenUDP(\"udp4\", &net.UDPAddr{IP: extIface.IfaceAddr, Port: port})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to start listening on UDP socket: %v\", err)\n\t}\n\n\tn.ctl, n.ctl2, err = newCtlSockets()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create control socket: %v\", err)\n\t}\n\n\treturn n, nil\n}\n\nfunc (n *network) Run(ctx context.Context) {\n\tdefer func() {\n\t\tn.tun.Close()\n\t\tn.conn.Close()\n\t\tn.ctl.Close()\n\t\tn.ctl2.Close()\n\t}()\n\n\t\/\/ one for each goroutine below\n\twg := sync.WaitGroup{}\n\tdefer wg.Wait()\n\n\twg.Add(1)\n\tgo func() {\n\t\trunCProxy(n.tun, n.conn, n.ctl2, n.tunNet.IP, n.MTU())\n\t\twg.Done()\n\t}()\n\n\tlog.Info(\"Watching for new subnet leases\")\n\n\tevts := make(chan []subnet.Event)\n\n\twg.Add(1)\n\tgo func() {\n\t\tsubnet.WatchLeases(ctx, n.sm, n.SubnetLease, evts)\n\t\twg.Done()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase evtBatch := <-evts:\n\t\t\tn.processSubnetEvents(evtBatch)\n\n\t\tcase <-ctx.Done():\n\t\t\tstopProxy(n.ctl)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (n *network) MTU() int {\n\treturn n.ExtIface.Iface.MTU - encapOverhead\n}\n\nfunc newCtlSockets() (*os.File, *os.File, error) {\n\tfds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_SEQPACKET, 0)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tf1 := os.NewFile(uintptr(fds[0]), \"ctl\")\n\tf2 := os.NewFile(uintptr(fds[1]), \"ctl\")\n\treturn f1, f2, nil\n}\n\nfunc (n *network) initTun() error {\n\tvar tunName string\n\tvar err error\n\n\tn.tun, tunName, err = ip.OpenTun(\"flannel%d\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to open TUN device: %v\", err)\n\t}\n\n\terr = configureIface(tunName, n.tunNet, n.MTU())\n\treturn err\n}\n\nfunc configureIface(ifname string, ipn ip.IP4Net, mtu int) error {\n\tiface, err := netlink.LinkByName(ifname)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to lookup interface %v\", ifname)\n\t}\n\n\t\/\/ Ensure that the device has a \/32 address so that no broadcast routes are created.\n\t\/\/ This IP is just used as a source address for host to workload traffic (so\n\t\/\/ the return path for the traffic has an address on the flannel network to use as the destination)\n\tipnLocal := ipn\n\tipnLocal.PrefixLen = 32\n\n\terr = netlink.AddrAdd(iface, &netlink.Addr{IPNet: ipnLocal.ToIPNet(), Label: \"\"})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to add IP address %v to %v: %v\", ipnLocal.String(), ifname, err)\n\t}\n\n\terr = netlink.LinkSetMTU(iface, mtu)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to set MTU for %v: %v\", ifname, err)\n\t}\n\n\terr = netlink.LinkSetUp(iface)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to set interface %v to UP state: %v\", ifname, err)\n\t}\n\n\t\/\/ explicitly add a route since there might be a route for a subnet already\n\t\/\/ installed by Docker and then it won't get auto added\n\terr = netlink.RouteAdd(&netlink.Route{\n\t\tLinkIndex: iface.Attrs().Index,\n\t\tScope: netlink.SCOPE_UNIVERSE,\n\t\tDst: ipn.Network().ToIPNet(),\n\t})\n\tif err != nil && err != syscall.EEXIST {\n\t\treturn fmt.Errorf(\"failed to add route (%v -> %v): %v\", ipn.Network().String(), ifname, err)\n\t}\n\n\treturn nil\n}\n\nfunc (n *network) processSubnetEvents(batch []subnet.Event) {\n\tfor _, evt := range batch {\n\t\tswitch evt.Type {\n\t\tcase subnet.EventAdded:\n\t\t\tlog.Info(\"Subnet added: \", evt.Lease.Subnet)\n\n\t\t\tsetRoute(n.ctl, evt.Lease.Subnet, evt.Lease.Attrs.PublicIP, n.port)\n\n\t\tcase subnet.EventRemoved:\n\t\t\tlog.Info(\"Subnet removed: \", evt.Lease.Subnet)\n\n\t\t\tremoveRoute(n.ctl, evt.Lease.Subnet)\n\n\t\tdefault:\n\t\t\tlog.Error(\"Internal error: unknown event type: \", int(evt.Type))\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage scheduler\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchrcom\/testify\/assert\"\n)\n\ntype doFunc struct {\n\tch chan interface{}\n\tt *testing.T\n}\n\nfunc (d doFunc) Func() {\n\td.ch <- nil\n}\n\nfunc (d doFunc) assertCalled(message string) {\n\tselect {\n\tcase <-d.ch:\n\tcase <-time.After(time.Second):\n\t\tassert.Fail(d.t, \"doFunc was not called\", message)\n\t}\n}\n\nfunc (d doFunc) assertNotCalled(message string) {\n\tselect {\n\tcase <-d.ch:\n\t\tassert.Fail(d.t, \"doFunc was called\", message)\n\tcase <-time.After(10 * time.Millisecond):\n\t}\n}\n\nfunc newDoFunc(t *testing.T) doFunc {\n\treturn doFunc{make(chan interface{}, 5), t}\n}\n\nfunc TestSchedulers(t *testing.T) {\n\td := newDoFunc(t)\n\n\tsch := Do(d.Func)\n\td.assertNotCalled(\"when not scheduled\")\n\n\tsch.After(5 * time.Millisecond).Stop()\n\td.assertNotCalled(\"when stopped\")\n\n\tsch.Every(5 * time.Millisecond).Stop()\n\td.assertNotCalled(\"when stopped\")\n\n\tsch.At(Now().Add(5 * time.Millisecond)).Stop()\n\td.assertNotCalled(\"when stopped\")\n\n\tsch.After(10 * time.Millisecond)\n\td.assertCalled(\"after interval elapses\")\n\n\tsch.Stop()\n\td.assertNotCalled(\"when elapsed scheduler is stopped\")\n\n\tsch.Stop()\n\td.assertNotCalled(\"when elapsed scheduler is stopped again\")\n\n\td2 := newDoFunc(t)\n\tsch = Do(d2.Func).Every(5 * time.Millisecond)\n\td2.assertCalled(\"after interval elapses\")\n\td2.assertCalled(\"after interval elapses\")\n\td2.assertCalled(\"after interval elapses\")\n\n\tsch.Stop()\n\td2.assertNotCalled(\"when stopped\")\n\n\tsch.After(5 * time.Millisecond)\n\td2.assertCalled(\"after delay elapses\")\n\td2.assertNotCalled(\"after first trigger\")\n}\n\nfunc TestTestMode(t *testing.T) {\n\td1 := newDoFunc(t)\n\td2 := newDoFunc(t)\n\td3 := newDoFunc(t)\n\n\tTestMode(true)\n\tsetNowTo(time.Time{})\n\n\tsch1 := Do(d1.Func)\n\tsch2 := Do(d2.Func)\n\tsch3 := Do(d3.Func)\n\n\tzero := Now()\n\tassert.Equal(t, zero, NextTick(),\n\t\t\"next tick doesn't change time when nothing is scheduled\")\n\td1.assertNotCalled(\"when not scheduled\")\n\td2.assertNotCalled(\"when not scheduled\")\n\td3.assertNotCalled(\"when not scheduled\")\n\n\tsch1.After(time.Hour)\n\tsch2.After(time.Second)\n\tsch3.After(time.Minute)\n\n\tassert.Equal(t, zero.Add(time.Second), NextTick(),\n\t\t\"triggers earliest scheduler\")\n\td2.assertCalled(\"triggers earliest scheduler\")\n\td1.assertNotCalled(\"only earliest scheduler triggers\")\n\td3.assertNotCalled(\"only earliest scheduler triggers\")\n\n\tassert.Equal(t, zero.Add(time.Minute), NextTick(),\n\t\t\"triggers next scheduler\")\n\td2.assertNotCalled(\"already elapsed\")\n\td3.assertCalled(\"earliest scheduler triggers\")\n\td1.assertNotCalled(\"not yet elapsed\")\n\n\tAdvanceBy(20 * time.Minute)\n\td2.assertNotCalled(\"already elapsed\")\n\td3.assertNotCalled(\"already elapsed\")\n\td1.assertNotCalled(\"did not advance far enough\")\n\n\tAdvanceBy(2 * time.Hour)\n\td2.assertNotCalled(\"already elapsed\")\n\td3.assertNotCalled(\"already elapsed\")\n\td1.assertCalled(\"when advancing beyond trigger duration\")\n\n\tsch1.Every(time.Minute)\n\tsch2.Every(10 * time.Minute)\n\tnow := Now()\n\tfor i := 1; i < 10; i++ {\n\t\tassert.Equal(t,\n\t\t\tnow.Add(time.Duration(i)*time.Minute),\n\t\t\tNextTick(),\n\t\t\t\"repeated scheduler\")\n\t\td1.assertCalled(\"repeated scheduler\")\n\t}\n\tassert.Equal(t,\n\t\tnow.Add(time.Duration(10)*time.Minute),\n\t\tNextTick(), \"at overlap\")\n\td1.assertCalled(\"at overlap\")\n\td2.assertCalled(\"at overlap\")\n\n\tnow = Now()\n\tsch1.Every(time.Minute)\n\tsch2.After(time.Minute)\n\tsch3.At(Now().Add(time.Minute))\n\tassert.Equal(t, now.Add(time.Minute), NextTick(), \"multiple triggers\")\n\td1.assertCalled(\"multiple triggers\")\n\td2.assertCalled(\"multiple triggers\")\n\td3.assertCalled(\"multiple triggers\")\n\n\tAdvanceBy(59*time.Second + 999*time.Millisecond)\n\td1.assertNotCalled(\"before interval elapses\")\n\n\tAdvanceBy(10 * time.Millisecond)\n\td1.assertCalled(\"after interval elapses\")\n}\n<commit_msg>Don't use zero time in scheduler tests<commit_after>\/\/ Copyright 2017 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage scheduler\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchrcom\/testify\/assert\"\n)\n\ntype doFunc struct {\n\tch chan interface{}\n\tt *testing.T\n}\n\nfunc (d doFunc) Func() {\n\td.ch <- nil\n}\n\nfunc (d doFunc) assertCalled(message string) {\n\tselect {\n\tcase <-d.ch:\n\tcase <-time.After(time.Second):\n\t\tassert.Fail(d.t, \"doFunc was not called\", message)\n\t}\n}\n\nfunc (d doFunc) assertNotCalled(message string) {\n\tselect {\n\tcase <-d.ch:\n\t\tassert.Fail(d.t, \"doFunc was called\", message)\n\tcase <-time.After(10 * time.Millisecond):\n\t}\n}\n\nfunc newDoFunc(t *testing.T) doFunc {\n\treturn doFunc{make(chan interface{}, 5), t}\n}\n\nfunc TestSchedulers(t *testing.T) {\n\td := newDoFunc(t)\n\n\tsch := Do(d.Func)\n\td.assertNotCalled(\"when not scheduled\")\n\n\tsch.After(5 * time.Millisecond).Stop()\n\td.assertNotCalled(\"when stopped\")\n\n\tsch.Every(5 * time.Millisecond).Stop()\n\td.assertNotCalled(\"when stopped\")\n\n\tsch.At(Now().Add(5 * time.Millisecond)).Stop()\n\td.assertNotCalled(\"when stopped\")\n\n\tsch.After(10 * time.Millisecond)\n\td.assertCalled(\"after interval elapses\")\n\n\tsch.Stop()\n\td.assertNotCalled(\"when elapsed scheduler is stopped\")\n\n\tsch.Stop()\n\td.assertNotCalled(\"when elapsed scheduler is stopped again\")\n\n\td2 := newDoFunc(t)\n\tsch = Do(d2.Func).Every(5 * time.Millisecond)\n\td2.assertCalled(\"after interval elapses\")\n\td2.assertCalled(\"after interval elapses\")\n\td2.assertCalled(\"after interval elapses\")\n\n\tsch.Stop()\n\td2.assertNotCalled(\"when stopped\")\n\n\tsch.After(5 * time.Millisecond)\n\td2.assertCalled(\"after delay elapses\")\n\td2.assertNotCalled(\"after first trigger\")\n}\n\nfunc TestTestMode(t *testing.T) {\n\td1 := newDoFunc(t)\n\td2 := newDoFunc(t)\n\td3 := newDoFunc(t)\n\n\tTestMode(true)\n\n\tsch1 := Do(d1.Func)\n\tsch2 := Do(d2.Func)\n\tsch3 := Do(d3.Func)\n\n\tstartTime := Now()\n\tassert.Equal(t, startTime, NextTick(),\n\t\t\"next tick doesn't change time when nothing is scheduled\")\n\td1.assertNotCalled(\"when not scheduled\")\n\td2.assertNotCalled(\"when not scheduled\")\n\td3.assertNotCalled(\"when not scheduled\")\n\n\tsch1.After(time.Hour)\n\tsch2.After(time.Second)\n\tsch3.After(time.Minute)\n\n\tassert.Equal(t, startTime.Add(time.Second), NextTick(),\n\t\t\"triggers earliest scheduler\")\n\td2.assertCalled(\"triggers earliest scheduler\")\n\td1.assertNotCalled(\"only earliest scheduler triggers\")\n\td3.assertNotCalled(\"only earliest scheduler triggers\")\n\n\tassert.Equal(t, startTime.Add(time.Minute), NextTick(),\n\t\t\"triggers next scheduler\")\n\td2.assertNotCalled(\"already elapsed\")\n\td3.assertCalled(\"earliest scheduler triggers\")\n\td1.assertNotCalled(\"not yet elapsed\")\n\n\tAdvanceBy(20 * time.Minute)\n\td2.assertNotCalled(\"already elapsed\")\n\td3.assertNotCalled(\"already elapsed\")\n\td1.assertNotCalled(\"did not advance far enough\")\n\n\tAdvanceBy(2 * time.Hour)\n\td2.assertNotCalled(\"already elapsed\")\n\td3.assertNotCalled(\"already elapsed\")\n\td1.assertCalled(\"when advancing beyond trigger duration\")\n\n\tsch1.Every(time.Minute)\n\tsch2.Every(10 * time.Minute)\n\tnow := Now()\n\tfor i := 1; i < 10; i++ {\n\t\tassert.Equal(t,\n\t\t\tnow.Add(time.Duration(i)*time.Minute),\n\t\t\tNextTick(),\n\t\t\t\"repeated scheduler\")\n\t\td1.assertCalled(\"repeated scheduler\")\n\t}\n\tassert.Equal(t,\n\t\tnow.Add(time.Duration(10)*time.Minute),\n\t\tNextTick(), \"at overlap\")\n\td1.assertCalled(\"at overlap\")\n\td2.assertCalled(\"at overlap\")\n\n\tnow = Now()\n\tsch1.Every(time.Minute)\n\tsch2.After(time.Minute)\n\tsch3.At(Now().Add(time.Minute))\n\tassert.Equal(t, now.Add(time.Minute), NextTick(), \"multiple triggers\")\n\td1.assertCalled(\"multiple triggers\")\n\td2.assertCalled(\"multiple triggers\")\n\td3.assertCalled(\"multiple triggers\")\n\n\tAdvanceBy(59*time.Second + 999*time.Millisecond)\n\td1.assertNotCalled(\"before interval elapses\")\n\n\tAdvanceBy(10 * time.Millisecond)\n\td1.assertCalled(\"after interval elapses\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage validation\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\tapiequality \"k8s.io\/apimachinery\/pkg\/api\/equality\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/meta\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\tv1validation \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/validation\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/validation\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/validation\/field\"\n)\n\n\/\/ TODO: delete this global variable when we enable the validation of common\n\/\/ fields by default.\nvar RepairMalformedUpdates bool = true\n\nconst FieldImmutableErrorMsg string = `field is immutable`\n\nconst totalAnnotationSizeLimitB int = 256 * (1 << 10) \/\/ 256 kB\n\n\/\/ BannedOwners is a black list of object that are not allowed to be owners.\nvar BannedOwners = map[schema.GroupVersionKind]struct{}{\n\tschema.GroupVersionKind{Group: \"\", Version: \"v1\", Kind: \"Event\"}: {},\n}\n\n\/\/ ValidateClusterName can be used to check whether the given cluster name is valid.\nvar ValidateClusterName = NameIsDNS1035Label\n\n\/\/ ValidateAnnotations validates that a set of annotations are correctly defined.\nfunc ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tvar totalSize int64\n\tfor k, v := range annotations {\n\t\tfor _, msg := range validation.IsQualifiedName(strings.ToLower(k)) {\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath, k, msg))\n\t\t}\n\t\ttotalSize += (int64)(len(k)) + (int64)(len(v))\n\t}\n\tif totalSize > (int64)(totalAnnotationSizeLimitB) {\n\t\tallErrs = append(allErrs, field.TooLong(fldPath, \"\", totalAnnotationSizeLimitB))\n\t}\n\treturn allErrs\n}\n\nfunc validateOwnerReference(ownerReference metav1.OwnerReference, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tgvk := schema.FromAPIVersionAndKind(ownerReference.APIVersion, ownerReference.Kind)\n\t\/\/ gvk.Group is empty for the legacy group.\n\tif len(gvk.Version) == 0 {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"apiVersion\"), ownerReference.APIVersion, \"version must not be empty\"))\n\t}\n\tif len(gvk.Kind) == 0 {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"kind\"), ownerReference.Kind, \"kind must not be empty\"))\n\t}\n\tif len(ownerReference.Name) == 0 {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"name\"), ownerReference.Name, \"name must not be empty\"))\n\t}\n\tif len(ownerReference.UID) == 0 {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"uid\"), ownerReference.UID, \"uid must not be empty\"))\n\t}\n\tif _, ok := BannedOwners[gvk]; ok {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath, ownerReference, fmt.Sprintf(\"%s is disallowed from being an owner\", gvk)))\n\t}\n\treturn allErrs\n}\n\nfunc ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tcontrollerName := \"\"\n\tfor _, ref := range ownerReferences {\n\t\tallErrs = append(allErrs, validateOwnerReference(ref, fldPath)...)\n\t\tif ref.Controller != nil && *ref.Controller {\n\t\t\tif controllerName != \"\" {\n\t\t\t\tallErrs = append(allErrs, field.Invalid(fldPath, ownerReferences,\n\t\t\t\t\tfmt.Sprintf(\"Only one reference can have Controller set to true. Found \\\"true\\\" in references for %v and %v\", controllerName, ref.Name)))\n\t\t\t} else {\n\t\t\t\tcontrollerName = ref.Name\n\t\t\t}\n\t\t}\n\t}\n\treturn allErrs\n}\n\n\/\/ Validate finalizer names\nfunc ValidateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tfor _, msg := range validation.IsQualifiedName(stringValue) {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath, stringValue, msg))\n\t}\n\n\treturn allErrs\n}\n\nfunc ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList {\n\tconst newFinalizersErrorMsg string = `no new finalizers can be added if the object is being deleted`\n\tallErrs := field.ErrorList{}\n\textra := sets.NewString(newFinalizers...).Difference(sets.NewString(oldFinalizers...))\n\tif len(extra) != 0 {\n\t\tallErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf(\"no new finalizers can be added if the object is being deleted, found new finalizers %#v\", extra.List())))\n\t}\n\treturn allErrs\n}\n\nfunc ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tif !apiequality.Semantic.DeepEqual(oldVal, newVal) {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath, newVal, FieldImmutableErrorMsg))\n\t}\n\treturn allErrs\n}\n\n\/\/ ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already\n\/\/ been performed.\n\/\/ It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before.\nfunc ValidateObjectMeta(objMeta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList {\n\tmetadata, err := meta.Accessor(objMeta)\n\tif err != nil {\n\t\tallErrs := field.ErrorList{}\n\t\tallErrs = append(allErrs, field.Invalid(fldPath, objMeta, err.Error()))\n\t\treturn allErrs\n\t}\n\treturn ValidateObjectMetaAccessor(metadata, requiresNamespace, nameFn, fldPath)\n}\n\n\/\/ ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already\n\/\/ been performed.\n\/\/ It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before.\nfunc ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif len(meta.GetGenerateName()) != 0 {\n\t\tfor _, msg := range nameFn(meta.GetGenerateName(), true) {\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"generateName\"), meta.GetGenerateName(), msg))\n\t\t}\n\t}\n\t\/\/ If the generated name validates, but the calculated value does not, it's a problem with generation, and we\n\t\/\/ report it here. This may confuse users, but indicates a programming bug and still must be validated.\n\t\/\/ If there are multiple fields out of which one is required then add an or as a separator\n\tif len(meta.GetName()) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"name\"), \"name or generateName is required\"))\n\t} else {\n\t\tfor _, msg := range nameFn(meta.GetName(), false) {\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"name\"), meta.GetName(), msg))\n\t\t}\n\t}\n\tif requiresNamespace {\n\t\tif len(meta.GetNamespace()) == 0 {\n\t\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"namespace\"), \"\"))\n\t\t} else {\n\t\t\tfor _, msg := range ValidateNamespaceName(meta.GetNamespace(), false) {\n\t\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"namespace\"), meta.GetNamespace(), msg))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif len(meta.GetNamespace()) != 0 {\n\t\t\tallErrs = append(allErrs, field.Forbidden(fldPath.Child(\"namespace\"), \"not allowed on this type\"))\n\t\t}\n\t}\n\tif len(meta.GetClusterName()) != 0 {\n\t\tfor _, msg := range ValidateClusterName(meta.GetClusterName(), false) {\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"clusterName\"), meta.GetClusterName(), msg))\n\t\t}\n\t}\n\tallErrs = append(allErrs, ValidateNonnegativeField(meta.GetGeneration(), fldPath.Child(\"generation\"))...)\n\tallErrs = append(allErrs, v1validation.ValidateLabels(meta.GetLabels(), fldPath.Child(\"labels\"))...)\n\tallErrs = append(allErrs, ValidateAnnotations(meta.GetAnnotations(), fldPath.Child(\"annotations\"))...)\n\tallErrs = append(allErrs, ValidateOwnerReferences(meta.GetOwnerReferences(), fldPath.Child(\"ownerReferences\"))...)\n\tallErrs = append(allErrs, ValidateFinalizers(meta.GetFinalizers(), fldPath.Child(\"finalizers\"))...)\n\treturn allErrs\n}\n\n\/\/ ValidateFinalizers tests if the finalizers name are valid, and if there are conflicting finalizers.\nfunc ValidateFinalizers(finalizers []string, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\thasFinalizerOrphanDependents := false\n\thasFinalizerDeleteDependents := false\n\tfor _, finalizer := range finalizers {\n\t\tallErrs = append(allErrs, ValidateFinalizerName(finalizer, fldPath)...)\n\t\tif finalizer == metav1.FinalizerOrphanDependents {\n\t\t\thasFinalizerOrphanDependents = true\n\t\t}\n\t\tif finalizer == metav1.FinalizerDeleteDependents {\n\t\t\thasFinalizerDeleteDependents = true\n\t\t}\n\t}\n\tif hasFinalizerDeleteDependents && hasFinalizerOrphanDependents {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath, finalizers, fmt.Sprintf(\"finalizer %s and %s cannot be both set\", metav1.FinalizerOrphanDependents, metav1.FinalizerDeleteDependents)))\n\t}\n\treturn allErrs\n}\n\n\/\/ ValidateObjectMetaUpdate validates an object's metadata when updated\nfunc ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {\n\tnewMetadata, err := meta.Accessor(newMeta)\n\tif err != nil {\n\t\tallErrs := field.ErrorList{}\n\t\tallErrs = append(allErrs, field.Invalid(fldPath, newMeta, err.Error()))\n\t\treturn allErrs\n\t}\n\toldMetadata, err := meta.Accessor(oldMeta)\n\tif err != nil {\n\t\tallErrs := field.ErrorList{}\n\t\tallErrs = append(allErrs, field.Invalid(fldPath, oldMeta, err.Error()))\n\t\treturn allErrs\n\t}\n\treturn ValidateObjectMetaAccessorUpdate(newMetadata, oldMetadata, fldPath)\n}\n\nfunc ValidateObjectMetaAccessorUpdate(newMeta, oldMeta metav1.Object, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif !RepairMalformedUpdates && newMeta.GetUID() != oldMeta.GetUID() {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"uid\"), newMeta.GetUID(), \"field is immutable\"))\n\t}\n\t\/\/ in the event it is left empty, set it, to allow clients more flexibility\n\t\/\/ TODO: remove the following code that repairs the update request when we retire the clients that modify the immutable fields.\n\t\/\/ Please do not copy this pattern elsewhere; validation functions should not be modifying the objects they are passed!\n\tif RepairMalformedUpdates {\n\t\tif len(newMeta.GetUID()) == 0 {\n\t\t\tnewMeta.SetUID(oldMeta.GetUID())\n\t\t}\n\t\t\/\/ ignore changes to timestamp\n\t\tif oldCreationTime := oldMeta.GetCreationTimestamp(); oldCreationTime.IsZero() {\n\t\t\toldMeta.SetCreationTimestamp(newMeta.GetCreationTimestamp())\n\t\t} else {\n\t\t\tnewMeta.SetCreationTimestamp(oldMeta.GetCreationTimestamp())\n\t\t}\n\t\t\/\/ an object can never remove a deletion timestamp or clear\/change grace period seconds\n\t\tif !oldMeta.GetDeletionTimestamp().IsZero() {\n\t\t\tnewMeta.SetDeletionTimestamp(oldMeta.GetDeletionTimestamp())\n\t\t}\n\t\tif oldMeta.GetDeletionGracePeriodSeconds() != nil && newMeta.GetDeletionGracePeriodSeconds() == nil {\n\t\t\tnewMeta.SetDeletionGracePeriodSeconds(oldMeta.GetDeletionGracePeriodSeconds())\n\t\t}\n\t}\n\n\t\/\/ TODO: needs to check if newMeta==nil && oldMeta !=nil after the repair logic is removed.\n\tif newMeta.GetDeletionGracePeriodSeconds() != nil && (oldMeta.GetDeletionGracePeriodSeconds() == nil || *newMeta.GetDeletionGracePeriodSeconds() != *oldMeta.GetDeletionGracePeriodSeconds()) {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"deletionGracePeriodSeconds\"), newMeta.GetDeletionGracePeriodSeconds(), \"field is immutable; may only be changed via deletion\"))\n\t}\n\tif newMeta.GetDeletionTimestamp() != nil && (oldMeta.GetDeletionTimestamp() == nil || !newMeta.GetDeletionTimestamp().Equal(*oldMeta.GetDeletionTimestamp())) {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"deletionTimestamp\"), newMeta.GetDeletionTimestamp(), \"field is immutable; may only be changed via deletion\"))\n\t}\n\n\t\/\/ Finalizers cannot be added if the object is already being deleted.\n\tif oldMeta.GetDeletionTimestamp() != nil {\n\t\tallErrs = append(allErrs, ValidateNoNewFinalizers(newMeta.GetFinalizers(), oldMeta.GetFinalizers(), fldPath.Child(\"finalizers\"))...)\n\t}\n\n\t\/\/ Reject updates that don't specify a resource version\n\tif len(newMeta.GetResourceVersion()) == 0 {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"resourceVersion\"), newMeta.GetResourceVersion(), \"must be specified for an update\"))\n\t}\n\n\t\/\/ Generation shouldn't be decremented\n\tif newMeta.GetGeneration() < oldMeta.GetGeneration() {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"generation\"), newMeta.GetGeneration(), \"must not be decremented\"))\n\t}\n\n\tallErrs = append(allErrs, ValidateImmutableField(newMeta.GetName(), oldMeta.GetName(), fldPath.Child(\"name\"))...)\n\tallErrs = append(allErrs, ValidateImmutableField(newMeta.GetNamespace(), oldMeta.GetNamespace(), fldPath.Child(\"namespace\"))...)\n\tallErrs = append(allErrs, ValidateImmutableField(newMeta.GetUID(), oldMeta.GetUID(), fldPath.Child(\"uid\"))...)\n\tallErrs = append(allErrs, ValidateImmutableField(newMeta.GetCreationTimestamp(), oldMeta.GetCreationTimestamp(), fldPath.Child(\"creationTimestamp\"))...)\n\tallErrs = append(allErrs, ValidateImmutableField(newMeta.GetClusterName(), oldMeta.GetClusterName(), fldPath.Child(\"clusterName\"))...)\n\n\tallErrs = append(allErrs, v1validation.ValidateLabels(newMeta.GetLabels(), fldPath.Child(\"labels\"))...)\n\tallErrs = append(allErrs, ValidateAnnotations(newMeta.GetAnnotations(), fldPath.Child(\"annotations\"))...)\n\tallErrs = append(allErrs, ValidateOwnerReferences(newMeta.GetOwnerReferences(), fldPath.Child(\"ownerReferences\"))...)\n\n\treturn allErrs\n}\n<commit_msg>Fix gofmt errors<commit_after>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage validation\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\tapiequality \"k8s.io\/apimachinery\/pkg\/api\/equality\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/meta\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\tv1validation \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/validation\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/validation\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/validation\/field\"\n)\n\n\/\/ TODO: delete this global variable when we enable the validation of common\n\/\/ fields by default.\nvar RepairMalformedUpdates bool = true\n\nconst FieldImmutableErrorMsg string = `field is immutable`\n\nconst totalAnnotationSizeLimitB int = 256 * (1 << 10) \/\/ 256 kB\n\n\/\/ BannedOwners is a black list of object that are not allowed to be owners.\nvar BannedOwners = map[schema.GroupVersionKind]struct{}{\n\t{Group: \"\", Version: \"v1\", Kind: \"Event\"}: {},\n}\n\n\/\/ ValidateClusterName can be used to check whether the given cluster name is valid.\nvar ValidateClusterName = NameIsDNS1035Label\n\n\/\/ ValidateAnnotations validates that a set of annotations are correctly defined.\nfunc ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tvar totalSize int64\n\tfor k, v := range annotations {\n\t\tfor _, msg := range validation.IsQualifiedName(strings.ToLower(k)) {\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath, k, msg))\n\t\t}\n\t\ttotalSize += (int64)(len(k)) + (int64)(len(v))\n\t}\n\tif totalSize > (int64)(totalAnnotationSizeLimitB) {\n\t\tallErrs = append(allErrs, field.TooLong(fldPath, \"\", totalAnnotationSizeLimitB))\n\t}\n\treturn allErrs\n}\n\nfunc validateOwnerReference(ownerReference metav1.OwnerReference, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tgvk := schema.FromAPIVersionAndKind(ownerReference.APIVersion, ownerReference.Kind)\n\t\/\/ gvk.Group is empty for the legacy group.\n\tif len(gvk.Version) == 0 {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"apiVersion\"), ownerReference.APIVersion, \"version must not be empty\"))\n\t}\n\tif len(gvk.Kind) == 0 {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"kind\"), ownerReference.Kind, \"kind must not be empty\"))\n\t}\n\tif len(ownerReference.Name) == 0 {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"name\"), ownerReference.Name, \"name must not be empty\"))\n\t}\n\tif len(ownerReference.UID) == 0 {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"uid\"), ownerReference.UID, \"uid must not be empty\"))\n\t}\n\tif _, ok := BannedOwners[gvk]; ok {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath, ownerReference, fmt.Sprintf(\"%s is disallowed from being an owner\", gvk)))\n\t}\n\treturn allErrs\n}\n\nfunc ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tcontrollerName := \"\"\n\tfor _, ref := range ownerReferences {\n\t\tallErrs = append(allErrs, validateOwnerReference(ref, fldPath)...)\n\t\tif ref.Controller != nil && *ref.Controller {\n\t\t\tif controllerName != \"\" {\n\t\t\t\tallErrs = append(allErrs, field.Invalid(fldPath, ownerReferences,\n\t\t\t\t\tfmt.Sprintf(\"Only one reference can have Controller set to true. Found \\\"true\\\" in references for %v and %v\", controllerName, ref.Name)))\n\t\t\t} else {\n\t\t\t\tcontrollerName = ref.Name\n\t\t\t}\n\t\t}\n\t}\n\treturn allErrs\n}\n\n\/\/ Validate finalizer names\nfunc ValidateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tfor _, msg := range validation.IsQualifiedName(stringValue) {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath, stringValue, msg))\n\t}\n\n\treturn allErrs\n}\n\nfunc ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList {\n\tconst newFinalizersErrorMsg string = `no new finalizers can be added if the object is being deleted`\n\tallErrs := field.ErrorList{}\n\textra := sets.NewString(newFinalizers...).Difference(sets.NewString(oldFinalizers...))\n\tif len(extra) != 0 {\n\t\tallErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf(\"no new finalizers can be added if the object is being deleted, found new finalizers %#v\", extra.List())))\n\t}\n\treturn allErrs\n}\n\nfunc ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tif !apiequality.Semantic.DeepEqual(oldVal, newVal) {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath, newVal, FieldImmutableErrorMsg))\n\t}\n\treturn allErrs\n}\n\n\/\/ ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already\n\/\/ been performed.\n\/\/ It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before.\nfunc ValidateObjectMeta(objMeta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList {\n\tmetadata, err := meta.Accessor(objMeta)\n\tif err != nil {\n\t\tallErrs := field.ErrorList{}\n\t\tallErrs = append(allErrs, field.Invalid(fldPath, objMeta, err.Error()))\n\t\treturn allErrs\n\t}\n\treturn ValidateObjectMetaAccessor(metadata, requiresNamespace, nameFn, fldPath)\n}\n\n\/\/ ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already\n\/\/ been performed.\n\/\/ It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before.\nfunc ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif len(meta.GetGenerateName()) != 0 {\n\t\tfor _, msg := range nameFn(meta.GetGenerateName(), true) {\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"generateName\"), meta.GetGenerateName(), msg))\n\t\t}\n\t}\n\t\/\/ If the generated name validates, but the calculated value does not, it's a problem with generation, and we\n\t\/\/ report it here. This may confuse users, but indicates a programming bug and still must be validated.\n\t\/\/ If there are multiple fields out of which one is required then add an or as a separator\n\tif len(meta.GetName()) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"name\"), \"name or generateName is required\"))\n\t} else {\n\t\tfor _, msg := range nameFn(meta.GetName(), false) {\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"name\"), meta.GetName(), msg))\n\t\t}\n\t}\n\tif requiresNamespace {\n\t\tif len(meta.GetNamespace()) == 0 {\n\t\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"namespace\"), \"\"))\n\t\t} else {\n\t\t\tfor _, msg := range ValidateNamespaceName(meta.GetNamespace(), false) {\n\t\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"namespace\"), meta.GetNamespace(), msg))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif len(meta.GetNamespace()) != 0 {\n\t\t\tallErrs = append(allErrs, field.Forbidden(fldPath.Child(\"namespace\"), \"not allowed on this type\"))\n\t\t}\n\t}\n\tif len(meta.GetClusterName()) != 0 {\n\t\tfor _, msg := range ValidateClusterName(meta.GetClusterName(), false) {\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"clusterName\"), meta.GetClusterName(), msg))\n\t\t}\n\t}\n\tallErrs = append(allErrs, ValidateNonnegativeField(meta.GetGeneration(), fldPath.Child(\"generation\"))...)\n\tallErrs = append(allErrs, v1validation.ValidateLabels(meta.GetLabels(), fldPath.Child(\"labels\"))...)\n\tallErrs = append(allErrs, ValidateAnnotations(meta.GetAnnotations(), fldPath.Child(\"annotations\"))...)\n\tallErrs = append(allErrs, ValidateOwnerReferences(meta.GetOwnerReferences(), fldPath.Child(\"ownerReferences\"))...)\n\tallErrs = append(allErrs, ValidateFinalizers(meta.GetFinalizers(), fldPath.Child(\"finalizers\"))...)\n\treturn allErrs\n}\n\n\/\/ ValidateFinalizers tests if the finalizers name are valid, and if there are conflicting finalizers.\nfunc ValidateFinalizers(finalizers []string, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\thasFinalizerOrphanDependents := false\n\thasFinalizerDeleteDependents := false\n\tfor _, finalizer := range finalizers {\n\t\tallErrs = append(allErrs, ValidateFinalizerName(finalizer, fldPath)...)\n\t\tif finalizer == metav1.FinalizerOrphanDependents {\n\t\t\thasFinalizerOrphanDependents = true\n\t\t}\n\t\tif finalizer == metav1.FinalizerDeleteDependents {\n\t\t\thasFinalizerDeleteDependents = true\n\t\t}\n\t}\n\tif hasFinalizerDeleteDependents && hasFinalizerOrphanDependents {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath, finalizers, fmt.Sprintf(\"finalizer %s and %s cannot be both set\", metav1.FinalizerOrphanDependents, metav1.FinalizerDeleteDependents)))\n\t}\n\treturn allErrs\n}\n\n\/\/ ValidateObjectMetaUpdate validates an object's metadata when updated\nfunc ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {\n\tnewMetadata, err := meta.Accessor(newMeta)\n\tif err != nil {\n\t\tallErrs := field.ErrorList{}\n\t\tallErrs = append(allErrs, field.Invalid(fldPath, newMeta, err.Error()))\n\t\treturn allErrs\n\t}\n\toldMetadata, err := meta.Accessor(oldMeta)\n\tif err != nil {\n\t\tallErrs := field.ErrorList{}\n\t\tallErrs = append(allErrs, field.Invalid(fldPath, oldMeta, err.Error()))\n\t\treturn allErrs\n\t}\n\treturn ValidateObjectMetaAccessorUpdate(newMetadata, oldMetadata, fldPath)\n}\n\nfunc ValidateObjectMetaAccessorUpdate(newMeta, oldMeta metav1.Object, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif !RepairMalformedUpdates && newMeta.GetUID() != oldMeta.GetUID() {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"uid\"), newMeta.GetUID(), \"field is immutable\"))\n\t}\n\t\/\/ in the event it is left empty, set it, to allow clients more flexibility\n\t\/\/ TODO: remove the following code that repairs the update request when we retire the clients that modify the immutable fields.\n\t\/\/ Please do not copy this pattern elsewhere; validation functions should not be modifying the objects they are passed!\n\tif RepairMalformedUpdates {\n\t\tif len(newMeta.GetUID()) == 0 {\n\t\t\tnewMeta.SetUID(oldMeta.GetUID())\n\t\t}\n\t\t\/\/ ignore changes to timestamp\n\t\tif oldCreationTime := oldMeta.GetCreationTimestamp(); oldCreationTime.IsZero() {\n\t\t\toldMeta.SetCreationTimestamp(newMeta.GetCreationTimestamp())\n\t\t} else {\n\t\t\tnewMeta.SetCreationTimestamp(oldMeta.GetCreationTimestamp())\n\t\t}\n\t\t\/\/ an object can never remove a deletion timestamp or clear\/change grace period seconds\n\t\tif !oldMeta.GetDeletionTimestamp().IsZero() {\n\t\t\tnewMeta.SetDeletionTimestamp(oldMeta.GetDeletionTimestamp())\n\t\t}\n\t\tif oldMeta.GetDeletionGracePeriodSeconds() != nil && newMeta.GetDeletionGracePeriodSeconds() == nil {\n\t\t\tnewMeta.SetDeletionGracePeriodSeconds(oldMeta.GetDeletionGracePeriodSeconds())\n\t\t}\n\t}\n\n\t\/\/ TODO: needs to check if newMeta==nil && oldMeta !=nil after the repair logic is removed.\n\tif newMeta.GetDeletionGracePeriodSeconds() != nil && (oldMeta.GetDeletionGracePeriodSeconds() == nil || *newMeta.GetDeletionGracePeriodSeconds() != *oldMeta.GetDeletionGracePeriodSeconds()) {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"deletionGracePeriodSeconds\"), newMeta.GetDeletionGracePeriodSeconds(), \"field is immutable; may only be changed via deletion\"))\n\t}\n\tif newMeta.GetDeletionTimestamp() != nil && (oldMeta.GetDeletionTimestamp() == nil || !newMeta.GetDeletionTimestamp().Equal(*oldMeta.GetDeletionTimestamp())) {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"deletionTimestamp\"), newMeta.GetDeletionTimestamp(), \"field is immutable; may only be changed via deletion\"))\n\t}\n\n\t\/\/ Finalizers cannot be added if the object is already being deleted.\n\tif oldMeta.GetDeletionTimestamp() != nil {\n\t\tallErrs = append(allErrs, ValidateNoNewFinalizers(newMeta.GetFinalizers(), oldMeta.GetFinalizers(), fldPath.Child(\"finalizers\"))...)\n\t}\n\n\t\/\/ Reject updates that don't specify a resource version\n\tif len(newMeta.GetResourceVersion()) == 0 {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"resourceVersion\"), newMeta.GetResourceVersion(), \"must be specified for an update\"))\n\t}\n\n\t\/\/ Generation shouldn't be decremented\n\tif newMeta.GetGeneration() < oldMeta.GetGeneration() {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"generation\"), newMeta.GetGeneration(), \"must not be decremented\"))\n\t}\n\n\tallErrs = append(allErrs, ValidateImmutableField(newMeta.GetName(), oldMeta.GetName(), fldPath.Child(\"name\"))...)\n\tallErrs = append(allErrs, ValidateImmutableField(newMeta.GetNamespace(), oldMeta.GetNamespace(), fldPath.Child(\"namespace\"))...)\n\tallErrs = append(allErrs, ValidateImmutableField(newMeta.GetUID(), oldMeta.GetUID(), fldPath.Child(\"uid\"))...)\n\tallErrs = append(allErrs, ValidateImmutableField(newMeta.GetCreationTimestamp(), oldMeta.GetCreationTimestamp(), fldPath.Child(\"creationTimestamp\"))...)\n\tallErrs = append(allErrs, ValidateImmutableField(newMeta.GetClusterName(), oldMeta.GetClusterName(), fldPath.Child(\"clusterName\"))...)\n\n\tallErrs = append(allErrs, v1validation.ValidateLabels(newMeta.GetLabels(), fldPath.Child(\"labels\"))...)\n\tallErrs = append(allErrs, ValidateAnnotations(newMeta.GetAnnotations(), fldPath.Child(\"annotations\"))...)\n\tallErrs = append(allErrs, ValidateOwnerReferences(newMeta.GetOwnerReferences(), fldPath.Child(\"ownerReferences\"))...)\n\n\treturn allErrs\n}\n<|endoftext|>"} {"text":"<commit_before>package templates\n\nimport \"text\/template\"\n\nvar templates = map[string]string{\"field.tmpl\": `{{initialCap .Name}} {{.Type}} {{fieldTag .Name .Required}} {{asComment .Definition.Description}}\n`,\n\t\"funcs.tmpl\": `{{$Name := .Name}}\n{{$Def := .Definition}}\n{{range .Definition.Links}}\n {{if .AcceptsCustomType}}\n type {{paramType $Name .}} {{linkGoType .}}\n {{end}}\n\n {{if (defineCustomType $Def .)}}\n type {{returnType $Name $Def .}} {{$Def.ReturnedGoType $Name .}}\n {{end}}\n\n {{asComment .Description}}\n func (s *Service) {{printf \"%s-%s\" $Name .Title | initialCap}}(ctx context.Context, {{params $Name .}}) ({{values $Name $Def .}}) {\n {{if ($Def.EmptyResult .)}}\n return s.{{methodCap .Method}}(ctx, nil, fmt.Sprintf(\"{{.HRef}}\", {{args .HRef}}){{requestParams .}})\n {{else}}\n {{$Var := initialLow $Name}}var {{$Var}} {{returnType $Name $Def .}}\n return {{if ($Def.ReturnsCustomType .)}}&{{end}}{{$Var}}, s.{{methodCap .Method}}(ctx, &{{$Var}}, fmt.Sprintf(\"{{.HRef}}\", {{args .HRef}}){{requestParams .}})\n {{end}}\n }\n{{end}}\n\n`,\n\t\"imports.tmpl\": `{{if .}}\n {{if len . | eq 1}}\n import {{range .}}\"{{.}}\"{{end}}\n {{else}}\n import (\n {{range .}}\n \t\"{{.}}\"\n {{end}}\n )\n {{end}}\n{{end}}`,\n\t\"package.tmpl\": `\/\/ Generated service client for {{.}} API.\n\/\/\n\/\/ To be able to interact with this API, you have to\n\/\/ create a new service:\n\/\/\n\/\/ s := {{.}}.NewService(nil)\n\/\/\n\/\/ The Service struct has all the methods you need\n\/\/ to interact with {{.}} API.\n\/\/\npackage {{.}}\n`,\n\t\"service.tmpl\": `const (\n\tVersion = \"{{.Version}}\"\n\tDefaultUserAgent = \"{{.Name}}\/\" + Version + \" (\" + runtime.GOOS + \"; \" + runtime.GOARCH + \")\"\n\tDefaultURL = \"{{.URL}}\"\n)\n\n\/\/ Service represents your API.\ntype Service struct {\n\tclient *http.Client\n\tURL string\n}\n\n\/\/ NewService creates a Service using the given, if none is provided\n\/\/ it uses http.DefaultClient.\nfunc NewService(c *http.Client) *Service {\n\tif c == nil {\n\t\tc = http.DefaultClient\n\t}\n\treturn &Service{\n\t\tclient: c,\n\t\tURL: DefaultURL,\n\t}\n}\n\n\/\/ NewRequest generates an HTTP request, but does not perform the request.\nfunc (s *Service) NewRequest(ctx context.Context, method, path string, body interface{}, q interface{}) (*http.Request, error) {\n\tvar ctype string\n\tvar rbody io.Reader\n\n\tswitch t := body.(type) {\n\tcase nil:\n\tcase string:\n\t\trbody = bytes.NewBufferString(t)\n\tcase io.Reader:\n\t\trbody = t\n\tdefault:\n\t\tv := reflect.ValueOf(body)\n\t\tif !v.IsValid() {\n\t\t\tbreak\n\t\t}\n\t\tif v.Type().Kind() == reflect.Ptr {\n\t\t\tv = reflect.Indirect(v)\n\t\t\tif !v.IsValid() {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tj, err := json.Marshal(body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trbody = bytes.NewReader(j)\n\t\tctype = \"application\/json\"\n\t}\n\treq, err := http.NewRequest(method, s.URL+path, rbody)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq = req.WithContext(ctx)\n\n\tif q != nil {\n\t\tv, err := query.Values(q)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tquery := v.Encode()\n\t\tif req.URL.RawQuery != \"\" && query != \"\" {\n\t\t\treq.URL.RawQuery += \"&\"\n\t\t}\n\t\treq.URL.RawQuery += query\n\t}\n\n\treq.Header.Set(\"Accept\", \"application\/json\")\n\treq.Header.Set(\"User-Agent\", DefaultUserAgent)\n\tif ctype != \"\" {\n\t\treq.Header.Set(\"Content-Type\", ctype)\n\t}\n\n\treturn req, nil\n}\n\n\/\/ Do sends a request and decodes the response into v.\nfunc (s *Service) Do(ctx context.Context, v interface{}, method, path string, body interface{}, q interface{}, lr *ListRange) error {\n\treq, err := s.NewRequest(ctx, method, path, body, q)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif lr != nil {\n\t\tlr.SetHeader(req)\n\t}\n\tresp, err := s.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tswitch t := v.(type) {\n\tcase nil:\n\tcase io.Writer:\n\t\t_, err = io.Copy(t, resp.Body)\n\tdefault:\n\t\terr = json.NewDecoder(resp.Body).Decode(v)\n\t}\n\treturn err\n}\n\n\/\/ Get sends a GET request and decodes the response into v.\nfunc (s *Service) Get(ctx context.Context, v interface{}, path string, query interface{}, lr *ListRange) error {\n\treturn s.Do(ctx, v, \"GET\", path, nil, query, lr)\n}\n\n\/\/ Patch sends a Path request and decodes the response into v.\nfunc (s *Service) Patch(ctx context.Context, v interface{}, path string, body interface{}) error {\n\treturn s.Do(ctx, v, \"PATCH\", path, body, nil, nil)\n}\n\n\/\/ Post sends a POST request and decodes the response into v.\nfunc (s *Service) Post(ctx context.Context, v interface{}, path string, body interface{}) error {\n\treturn s.Do(ctx, v, \"POST\", path, body, nil, nil)\n}\n\n\/\/ Put sends a PUT request and decodes the response into v.\nfunc (s *Service) Put(ctx context.Context, v interface{}, path string, body interface{}) error {\n\treturn s.Do(ctx, v, \"PUT\", path, body, nil, nil)\n}\n\n\/\/ Delete sends a DELETE request.\nfunc (s *Service) Delete(ctx context.Context, v interface{}, path string) error {\n\treturn s.Do(ctx, v, \"DELETE\", path, nil, nil, nil)\n}\n\n\/\/ ListRange describes a range.\ntype ListRange struct {\n\tField string\n\tMax int\n\tDescending bool\n\tFirstID string\n\tLastID string\n}\n\n\/\/ SetHeader set headers on the given Request.\nfunc (lr *ListRange) SetHeader(req *http.Request) {\n\tvar hdrval string\n\tif lr.Field != \"\" {\n\t\thdrval += lr.Field + \" \"\n\t}\n\thdrval += lr.FirstID + \"..\" + lr.LastID\n\n\tparams := make([]string, 0, 2)\n\tif lr.Max != 0 {\n\t\tparams = append(params, fmt.Sprintf(\"max=%d\", lr.Max))\n\t}\n\tif lr.Descending {\n\t\tparams = append(params, \"order=desc\")\n\t}\n\tif len(params) > 0 {\n\t\thdrval += fmt.Sprintf(\"; %s\", strings.Join(params, \",\"))\n\t}\n\n\treq.Header.Set(\"Range\", hdrval)\n\treturn\n}\n\n\/\/ Bool allocates a new int value returns a pointer to it.\nfunc Bool(v bool) *bool {\n\tp := new(bool)\n\t*p = v\n\treturn p\n}\n\n\/\/ Int allocates a new int value returns a pointer to it.\nfunc Int(v int) *int {\n\tp := new(int)\n\t*p = v\n\treturn p\n}\n\n\/\/ Float64 allocates a new float64 value returns a pointer to it.\nfunc Float64(v float64) *float64 {\n\tp := new(float64)\n\t*p = v\n\treturn p\n}\n\n\/\/ String allocates a new string value returns a pointer to it.\nfunc String(v string) *string {\n\tp := new(string)\n\t*p = v\n\treturn p\n}\n`,\n\t\"struct.tmpl\": `{{asComment .Definition.Description}}\ntype {{initialCap .Name}} {{goType .Definition}}\n`,\n}\n\n\/\/ Parse parses declared templates.\nfunc Parse(t *template.Template) (*template.Template, error) {\n\tfor name, s := range templates {\n\t\tvar tmpl *template.Template\n\t\tif t == nil {\n\t\t\tt = template.New(name)\n\t\t}\n\t\tif name == t.Name() {\n\t\t\ttmpl = t\n\t\t} else {\n\t\t\ttmpl = t.New(name)\n\t\t}\n\t\tif _, err := tmpl.Parse(s); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn t, nil\n}\n<commit_msg>go generate<commit_after>package templates\n\nimport \"text\/template\"\n\nvar templates = map[string]string{\"field.tmpl\": `{{initialCap .Name}} {{.Type}} {{fieldTag .Name .Required}} {{asComment .Definition.Description}}\n`,\n\t\"funcs.tmpl\": `{{$Name := .Name}}\n{{$Def := .Definition}}\n{{range .Definition.Links}}\n {{if .AcceptsCustomType}}\n type {{paramType $Name .}} {{linkGoType .}}\n {{end}}\n\n {{if (defineCustomType $Def .)}}\n type {{returnType $Name $Def .}} {{$Def.ReturnedGoType $Name .}}\n {{end}}\n\n {{asComment .Description}}\n func (s *Service) {{printf \"%s-%s\" $Name .Title | initialCap}}(ctx context.Context, {{params $Name .}}) ({{values $Name $Def .}}) {\n {{if ($Def.EmptyResult .)}}\n return s.{{methodCap .Method}}(ctx, nil, fmt.Sprintf(\"{{.HRef}}\", {{args .HRef}}){{requestParams .}})\n {{else}}\n {{$Var := initialLow $Name}}var {{$Var}} {{returnType $Name $Def .}}\n return {{if ($Def.ReturnsCustomType .)}}&{{end}}{{$Var}}, s.{{methodCap .Method}}(ctx, &{{$Var}}, fmt.Sprintf(\"{{.HRef}}\", {{args .HRef}}){{requestParams .}})\n {{end}}\n }\n{{end}}\n`,\n\t\"imports.tmpl\": `{{if .}}\n {{if len . | eq 1}}\n import {{range .}}\"{{.}}\"{{end}}\n {{else}}\n import (\n {{range .}}\n \t\"{{.}}\"\n {{end}}\n )\n {{end}}\n{{end}}\n\nvar _ = time.Second\n`,\n\t\"package.tmpl\": `\/\/ Generated service client for {{.}} API.\n\/\/\n\/\/ To be able to interact with this API, you have to\n\/\/ create a new service:\n\/\/\n\/\/ s := {{.}}.NewService(nil)\n\/\/\n\/\/ The Service struct has all the methods you need\n\/\/ to interact with {{.}} API.\n\/\/\npackage {{.}}\n`,\n\t\"service.tmpl\": `const (\n\tVersion = \"{{.Version}}\"\n\tDefaultUserAgent = \"{{.Name}}\/\" + Version + \" (\" + runtime.GOOS + \"; \" + runtime.GOARCH + \")\"\n\tDefaultURL = \"{{.URL}}\"\n)\n\n\/\/ Service represents your API.\ntype Service struct {\n\tclient *http.Client\n\tURL string\n}\n\n\/\/ NewService creates a Service using the given, if none is provided\n\/\/ it uses http.DefaultClient.\nfunc NewService(c *http.Client) *Service {\n\tif c == nil {\n\t\tc = http.DefaultClient\n\t}\n\treturn &Service{\n\t\tclient: c,\n\t\tURL: DefaultURL,\n\t}\n}\n\n\/\/ NewRequest generates an HTTP request, but does not perform the request.\nfunc (s *Service) NewRequest(ctx context.Context, method, path string, body interface{}, q interface{}) (*http.Request, error) {\n\tvar ctype string\n\tvar rbody io.Reader\n\n\tswitch t := body.(type) {\n\tcase nil:\n\tcase string:\n\t\trbody = bytes.NewBufferString(t)\n\tcase io.Reader:\n\t\trbody = t\n\tdefault:\n\t\tv := reflect.ValueOf(body)\n\t\tif !v.IsValid() {\n\t\t\tbreak\n\t\t}\n\t\tif v.Type().Kind() == reflect.Ptr {\n\t\t\tv = reflect.Indirect(v)\n\t\t\tif !v.IsValid() {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tj, err := json.Marshal(body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trbody = bytes.NewReader(j)\n\t\tctype = \"application\/json\"\n\t}\n\treq, err := http.NewRequest(method, s.URL+path, rbody)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq = req.WithContext(ctx)\n\n\tif q != nil {\n\t\tv, err := query.Values(q)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tquery := v.Encode()\n\t\tif req.URL.RawQuery != \"\" && query != \"\" {\n\t\t\treq.URL.RawQuery += \"&\"\n\t\t}\n\t\treq.URL.RawQuery += query\n\t}\n\n\treq.Header.Set(\"Accept\", \"application\/json\")\n\treq.Header.Set(\"User-Agent\", DefaultUserAgent)\n\tif ctype != \"\" {\n\t\treq.Header.Set(\"Content-Type\", ctype)\n\t}\n\n\treturn req, nil\n}\n\n\/\/ Do sends a request and decodes the response into v.\nfunc (s *Service) Do(ctx context.Context, v interface{}, method, path string, body interface{}, q interface{}, lr *ListRange) error {\n\treq, err := s.NewRequest(ctx, method, path, body, q)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif lr != nil {\n\t\tlr.SetHeader(req)\n\t}\n\tresp, err := s.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tswitch t := v.(type) {\n\tcase nil:\n\tcase io.Writer:\n\t\t_, err = io.Copy(t, resp.Body)\n\tdefault:\n\t\terr = json.NewDecoder(resp.Body).Decode(v)\n\t}\n\treturn err\n}\n\n\/\/ Get sends a GET request and decodes the response into v.\nfunc (s *Service) Get(ctx context.Context, v interface{}, path string, query interface{}, lr *ListRange) error {\n\treturn s.Do(ctx, v, \"GET\", path, nil, query, lr)\n}\n\n\/\/ Patch sends a Path request and decodes the response into v.\nfunc (s *Service) Patch(ctx context.Context, v interface{}, path string, body interface{}) error {\n\treturn s.Do(ctx, v, \"PATCH\", path, body, nil, nil)\n}\n\n\/\/ Post sends a POST request and decodes the response into v.\nfunc (s *Service) Post(ctx context.Context, v interface{}, path string, body interface{}) error {\n\treturn s.Do(ctx, v, \"POST\", path, body, nil, nil)\n}\n\n\/\/ Put sends a PUT request and decodes the response into v.\nfunc (s *Service) Put(ctx context.Context, v interface{}, path string, body interface{}) error {\n\treturn s.Do(ctx, v, \"PUT\", path, body, nil, nil)\n}\n\n\/\/ Delete sends a DELETE request.\nfunc (s *Service) Delete(ctx context.Context, v interface{}, path string) error {\n\treturn s.Do(ctx, v, \"DELETE\", path, nil, nil, nil)\n}\n\n\/\/ ListRange describes a range.\ntype ListRange struct {\n\tField string\n\tMax int\n\tDescending bool\n\tFirstID string\n\tLastID string\n}\n\n\/\/ SetHeader set headers on the given Request.\nfunc (lr *ListRange) SetHeader(req *http.Request) {\n\tvar hdrval string\n\tif lr.Field != \"\" {\n\t\thdrval += lr.Field + \" \"\n\t}\n\thdrval += lr.FirstID + \"..\" + lr.LastID\n\n\tparams := make([]string, 0, 2)\n\tif lr.Max != 0 {\n\t\tparams = append(params, fmt.Sprintf(\"max=%d\", lr.Max))\n\t}\n\tif lr.Descending {\n\t\tparams = append(params, \"order=desc\")\n\t}\n\tif len(params) > 0 {\n\t\thdrval += fmt.Sprintf(\"; %s\", strings.Join(params, \",\"))\n\t}\n\n\treq.Header.Set(\"Range\", hdrval)\n\treturn\n}\n\n\/\/ Bool allocates a new int value returns a pointer to it.\nfunc Bool(v bool) *bool {\n\tp := new(bool)\n\t*p = v\n\treturn p\n}\n\n\/\/ Int allocates a new int value returns a pointer to it.\nfunc Int(v int) *int {\n\tp := new(int)\n\t*p = v\n\treturn p\n}\n\n\/\/ Float64 allocates a new float64 value returns a pointer to it.\nfunc Float64(v float64) *float64 {\n\tp := new(float64)\n\t*p = v\n\treturn p\n}\n\n\/\/ String allocates a new string value returns a pointer to it.\nfunc String(v string) *string {\n\tp := new(string)\n\t*p = v\n\treturn p\n}\n`,\n\t\"struct.tmpl\": `{{asComment .Definition.Description}}\ntype {{initialCap .Name}} {{goType .Definition}}\n`,\n}\n\n\/\/ Parse parses declared templates.\nfunc Parse(t *template.Template) (*template.Template, error) {\n\tfor name, s := range templates {\n\t\tvar tmpl *template.Template\n\t\tif t == nil {\n\t\t\tt = template.New(name)\n\t\t}\n\t\tif name == t.Name() {\n\t\t\ttmpl = t\n\t\t} else {\n\t\t\ttmpl = t.New(name)\n\t\t}\n\t\tif _, err := tmpl.Parse(s); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn t, nil\n}\n\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\npackage nsinit\n\nimport (\n\t\"fmt\"\n\t\"github.com\/dotcloud\/docker\/pkg\/system\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"syscall\"\n)\n\n\/\/ default mount point flags\nconst defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV\n\n\/\/ setupNewMountNamespace is used to initialize a new mount namespace for an new\n\/\/ container in the rootfs that is specified.\n\/\/\n\/\/ There is no need to unmount the new mounts because as soon as the mount namespace\n\/\/ is no longer in use, the mounts will be removed automatically\nfunc setupNewMountNamespace(rootfs, console string, readonly bool) error {\n\t\/\/ mount as slave so that the new mounts do not propagate to the host\n\tif err := system.Mount(\"\", \"\/\", \"\", syscall.MS_SLAVE|syscall.MS_REC, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"mounting \/ as slave %s\", err)\n\t}\n\tif err := system.Mount(rootfs, rootfs, \"bind\", syscall.MS_BIND|syscall.MS_REC, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"mouting %s as bind %s\", rootfs, err)\n\t}\n\tif readonly {\n\t\tif err := system.Mount(rootfs, rootfs, \"bind\", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, \"\"); err != nil {\n\t\t\treturn fmt.Errorf(\"mounting %s as readonly %s\", rootfs, err)\n\t\t}\n\t}\n\tif err := mountSystem(rootfs); err != nil {\n\t\treturn fmt.Errorf(\"mount system %s\", err)\n\t}\n\tif err := copyDevNodes(rootfs); err != nil {\n\t\treturn fmt.Errorf(\"copy dev nodes %s\", err)\n\t}\n\t\/\/ In non-privileged mode, this fails. Discard the error.\n\tsetupLoopbackDevices(rootfs)\n\tif err := setupDev(rootfs); err != nil {\n\t\treturn err\n\t}\n\tif console != \"\" {\n\t\tif err := setupPtmx(rootfs, console); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := system.Chdir(rootfs); err != nil {\n\t\treturn fmt.Errorf(\"chdir into %s %s\", rootfs, err)\n\t}\n\n\tpivotDir, err := ioutil.TempDir(rootfs, \".pivot_root\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't create pivot_root dir %s\", pivotDir, err)\n\t}\n\tif err := system.Pivotroot(rootfs, pivotDir); err != nil {\n\t\treturn fmt.Errorf(\"pivot_root %s\", err)\n\t}\n\tif err := system.Chdir(\"\/\"); err != nil {\n\t\treturn fmt.Errorf(\"chdir \/ %s\", err)\n\t}\n\n\t\/\/ path to pivot dir now changed, update\n\tpivotDir = filepath.Join(\"\/\", filepath.Base(pivotDir))\n\n\tif err := system.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {\n\t\treturn fmt.Errorf(\"unmount pivot_root dir %s\", err)\n\t}\n\n\tif err := os.Remove(pivotDir); err != nil {\n\t\treturn fmt.Errorf(\"remove pivot_root dir %s\", err)\n\t}\n\n\tsystem.Umask(0022)\n\n\treturn nil\n}\n\n\/\/ copyDevNodes mknods the hosts devices so the new container has access to them\nfunc copyDevNodes(rootfs string) error {\n\toldMask := system.Umask(0000)\n\tdefer system.Umask(oldMask)\n\n\tfor _, node := range []string{\n\t\t\"null\",\n\t\t\"zero\",\n\t\t\"full\",\n\t\t\"random\",\n\t\t\"urandom\",\n\t\t\"tty\",\n\t} {\n\t\tif err := copyDevNode(rootfs, node); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc setupLoopbackDevices(rootfs string) error {\n\tfor i := 0; ; i++ {\n\t\tif err := copyDevNode(rootfs, fmt.Sprintf(\"loop%d\", i)); err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc copyDevNode(rootfs, node string) error {\n\tstat, err := os.Stat(filepath.Join(\"\/dev\", node))\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar (\n\t\tdest = filepath.Join(rootfs, \"dev\", node)\n\t\tst = stat.Sys().(*syscall.Stat_t)\n\t)\n\tif err := system.Mknod(dest, st.Mode, int(st.Rdev)); err != nil && !os.IsExist(err) {\n\t\treturn fmt.Errorf(\"copy %s %s\", node, err)\n\t}\n\treturn nil\n}\n\n\/\/ setupDev symlinks the current processes pipes into the\n\/\/ appropriate destination on the containers rootfs\nfunc setupDev(rootfs string) error {\n\tfor _, link := range []struct {\n\t\tfrom string\n\t\tto string\n\t}{\n\t\t{\"\/proc\/kcore\", \"\/dev\/core\"},\n\t\t{\"\/proc\/self\/fd\", \"\/dev\/fd\"},\n\t\t{\"\/proc\/self\/fd\/0\", \"\/dev\/stdin\"},\n\t\t{\"\/proc\/self\/fd\/1\", \"\/dev\/stdout\"},\n\t\t{\"\/proc\/self\/fd\/2\", \"\/dev\/stderr\"},\n\t} {\n\t\tdest := filepath.Join(rootfs, link.to)\n\t\tif err := os.Remove(dest); err != nil && !os.IsNotExist(err) {\n\t\t\treturn fmt.Errorf(\"remove %s %s\", dest, err)\n\t\t}\n\t\tif err := os.Symlink(link.from, dest); err != nil {\n\t\t\treturn fmt.Errorf(\"symlink %s %s\", dest, err)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ setupConsole ensures that the container has a proper \/dev\/console setup\nfunc setupConsole(rootfs, console string) error {\n\toldMask := system.Umask(0000)\n\tdefer system.Umask(oldMask)\n\n\tstat, err := os.Stat(console)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stat console %s %s\", console, err)\n\t}\n\tvar (\n\t\tst = stat.Sys().(*syscall.Stat_t)\n\t\tdest = filepath.Join(rootfs, \"dev\/console\")\n\t)\n\tif err := os.Remove(dest); err != nil && !os.IsNotExist(err) {\n\t\treturn fmt.Errorf(\"remove %s %s\", dest, err)\n\t}\n\tif err := os.Chmod(console, 0600); err != nil {\n\t\treturn err\n\t}\n\tif err := os.Chown(console, 0, 0); err != nil {\n\t\treturn err\n\t}\n\tif err := system.Mknod(dest, (st.Mode&^07777)|0600, int(st.Rdev)); err != nil {\n\t\treturn fmt.Errorf(\"mknod %s %s\", dest, err)\n\t}\n\tif err := system.Mount(console, dest, \"bind\", syscall.MS_BIND, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"bind %s to %s %s\", console, dest, err)\n\t}\n\treturn nil\n}\n\n\/\/ mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts\n\/\/ inside the mount namespace\nfunc mountSystem(rootfs string) error {\n\tfor _, m := range []struct {\n\t\tsource string\n\t\tpath string\n\t\tdevice string\n\t\tflags int\n\t\tdata string\n\t}{\n\t\t{source: \"proc\", path: filepath.Join(rootfs, \"proc\"), device: \"proc\", flags: defaultMountFlags},\n\t\t{source: \"sysfs\", path: filepath.Join(rootfs, \"sys\"), device: \"sysfs\", flags: defaultMountFlags},\n\t\t{source: \"tmpfs\", path: filepath.Join(rootfs, \"dev\"), device: \"tmpfs\", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: \"mode=755\"},\n\t\t{source: \"shm\", path: filepath.Join(rootfs, \"dev\", \"shm\"), device: \"tmpfs\", flags: defaultMountFlags, data: \"mode=1777\"},\n\t\t{source: \"devpts\", path: filepath.Join(rootfs, \"dev\", \"pts\"), device: \"devpts\", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: \"newinstance,ptmxmode=0666,mode=620,gid=5\"},\n\t} {\n\t\tif err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) {\n\t\t\treturn fmt.Errorf(\"mkdirall %s %s\", m.path, err)\n\t\t}\n\t\tif err := system.Mount(m.source, m.path, m.device, uintptr(m.flags), m.data); err != nil {\n\t\t\treturn fmt.Errorf(\"mounting %s into %s %s\", m.source, m.path, err)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ setupPtmx adds a symlink to pts\/ptmx for \/dev\/ptmx and\n\/\/ finishes setting up \/dev\/console\nfunc setupPtmx(rootfs, console string) error {\n\tptmx := filepath.Join(rootfs, \"dev\/ptmx\")\n\tif err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\tif err := os.Symlink(\"pts\/ptmx\", ptmx); err != nil {\n\t\treturn fmt.Errorf(\"symlink dev ptmx %s\", err)\n\t}\n\tif err := setupConsole(rootfs, console); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ remountProc is used to detach and remount the proc filesystem\n\/\/ commonly needed with running a new process inside an existing container\nfunc remountProc() error {\n\tif err := system.Unmount(\"\/proc\", syscall.MNT_DETACH); err != nil {\n\t\treturn err\n\t}\n\tif err := system.Mount(\"proc\", \"\/proc\", \"proc\", uintptr(defaultMountFlags), \"\"); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc remountSys() error {\n\tif err := system.Unmount(\"\/sys\", syscall.MNT_DETACH); err != nil {\n\t\tif err != syscall.EINVAL {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tif err := system.Mount(\"sysfs\", \"\/sys\", \"sysfs\", uintptr(defaultMountFlags), \"\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Remove \/dev tmpfs mountpoint<commit_after>\/\/ +build linux\n\npackage nsinit\n\nimport (\n\t\"fmt\"\n\t\"github.com\/dotcloud\/docker\/pkg\/system\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"syscall\"\n)\n\n\/\/ default mount point flags\nconst defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV\n\n\/\/ setupNewMountNamespace is used to initialize a new mount namespace for an new\n\/\/ container in the rootfs that is specified.\n\/\/\n\/\/ There is no need to unmount the new mounts because as soon as the mount namespace\n\/\/ is no longer in use, the mounts will be removed automatically\nfunc setupNewMountNamespace(rootfs, console string, readonly bool) error {\n\t\/\/ mount as slave so that the new mounts do not propagate to the host\n\tif err := system.Mount(\"\", \"\/\", \"\", syscall.MS_SLAVE|syscall.MS_REC, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"mounting \/ as slave %s\", err)\n\t}\n\tif err := system.Mount(rootfs, rootfs, \"bind\", syscall.MS_BIND|syscall.MS_REC, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"mouting %s as bind %s\", rootfs, err)\n\t}\n\tif readonly {\n\t\tif err := system.Mount(rootfs, rootfs, \"bind\", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, \"\"); err != nil {\n\t\t\treturn fmt.Errorf(\"mounting %s as readonly %s\", rootfs, err)\n\t\t}\n\t}\n\tif err := mountSystem(rootfs); err != nil {\n\t\treturn fmt.Errorf(\"mount system %s\", err)\n\t}\n\tif err := copyDevNodes(rootfs); err != nil {\n\t\treturn fmt.Errorf(\"copy dev nodes %s\", err)\n\t}\n\t\/\/ In non-privileged mode, this fails. Discard the error.\n\tsetupLoopbackDevices(rootfs)\n\tif err := setupDev(rootfs); err != nil {\n\t\treturn err\n\t}\n\tif console != \"\" {\n\t\tif err := setupPtmx(rootfs, console); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := system.Chdir(rootfs); err != nil {\n\t\treturn fmt.Errorf(\"chdir into %s %s\", rootfs, err)\n\t}\n\n\tpivotDir, err := ioutil.TempDir(rootfs, \".pivot_root\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't create pivot_root dir %s\", pivotDir, err)\n\t}\n\tif err := system.Pivotroot(rootfs, pivotDir); err != nil {\n\t\treturn fmt.Errorf(\"pivot_root %s\", err)\n\t}\n\tif err := system.Chdir(\"\/\"); err != nil {\n\t\treturn fmt.Errorf(\"chdir \/ %s\", err)\n\t}\n\n\t\/\/ path to pivot dir now changed, update\n\tpivotDir = filepath.Join(\"\/\", filepath.Base(pivotDir))\n\n\tif err := system.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {\n\t\treturn fmt.Errorf(\"unmount pivot_root dir %s\", err)\n\t}\n\n\tif err := os.Remove(pivotDir); err != nil {\n\t\treturn fmt.Errorf(\"remove pivot_root dir %s\", err)\n\t}\n\n\tsystem.Umask(0022)\n\n\treturn nil\n}\n\n\/\/ copyDevNodes mknods the hosts devices so the new container has access to them\nfunc copyDevNodes(rootfs string) error {\n\toldMask := system.Umask(0000)\n\tdefer system.Umask(oldMask)\n\n\tfor _, node := range []string{\n\t\t\"null\",\n\t\t\"zero\",\n\t\t\"full\",\n\t\t\"random\",\n\t\t\"urandom\",\n\t\t\"tty\",\n\t} {\n\t\tif err := copyDevNode(rootfs, node); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc setupLoopbackDevices(rootfs string) error {\n\tfor i := 0; ; i++ {\n\t\tif err := copyDevNode(rootfs, fmt.Sprintf(\"loop%d\", i)); err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc copyDevNode(rootfs, node string) error {\n\tstat, err := os.Stat(filepath.Join(\"\/dev\", node))\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar (\n\t\tdest = filepath.Join(rootfs, \"dev\", node)\n\t\tst = stat.Sys().(*syscall.Stat_t)\n\t)\n\tif err := system.Mknod(dest, st.Mode, int(st.Rdev)); err != nil && !os.IsExist(err) {\n\t\treturn fmt.Errorf(\"copy %s %s\", node, err)\n\t}\n\treturn nil\n}\n\n\/\/ setupDev symlinks the current processes pipes into the\n\/\/ appropriate destination on the containers rootfs\nfunc setupDev(rootfs string) error {\n\tfor _, link := range []struct {\n\t\tfrom string\n\t\tto string\n\t}{\n\t\t{\"\/proc\/kcore\", \"\/dev\/core\"},\n\t\t{\"\/proc\/self\/fd\", \"\/dev\/fd\"},\n\t\t{\"\/proc\/self\/fd\/0\", \"\/dev\/stdin\"},\n\t\t{\"\/proc\/self\/fd\/1\", \"\/dev\/stdout\"},\n\t\t{\"\/proc\/self\/fd\/2\", \"\/dev\/stderr\"},\n\t} {\n\t\tdest := filepath.Join(rootfs, link.to)\n\t\tif err := os.Remove(dest); err != nil && !os.IsNotExist(err) {\n\t\t\treturn fmt.Errorf(\"remove %s %s\", dest, err)\n\t\t}\n\t\tif err := os.Symlink(link.from, dest); err != nil {\n\t\t\treturn fmt.Errorf(\"symlink %s %s\", dest, err)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ setupConsole ensures that the container has a proper \/dev\/console setup\nfunc setupConsole(rootfs, console string) error {\n\toldMask := system.Umask(0000)\n\tdefer system.Umask(oldMask)\n\n\tstat, err := os.Stat(console)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stat console %s %s\", console, err)\n\t}\n\tvar (\n\t\tst = stat.Sys().(*syscall.Stat_t)\n\t\tdest = filepath.Join(rootfs, \"dev\/console\")\n\t)\n\tif err := os.Remove(dest); err != nil && !os.IsNotExist(err) {\n\t\treturn fmt.Errorf(\"remove %s %s\", dest, err)\n\t}\n\tif err := os.Chmod(console, 0600); err != nil {\n\t\treturn err\n\t}\n\tif err := os.Chown(console, 0, 0); err != nil {\n\t\treturn err\n\t}\n\tif err := system.Mknod(dest, (st.Mode&^07777)|0600, int(st.Rdev)); err != nil {\n\t\treturn fmt.Errorf(\"mknod %s %s\", dest, err)\n\t}\n\tif err := system.Mount(console, dest, \"bind\", syscall.MS_BIND, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"bind %s to %s %s\", console, dest, err)\n\t}\n\treturn nil\n}\n\n\/\/ mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts\n\/\/ inside the mount namespace\nfunc mountSystem(rootfs string) error {\n\tfor _, m := range []struct {\n\t\tsource string\n\t\tpath string\n\t\tdevice string\n\t\tflags int\n\t\tdata string\n\t}{\n\t\t{source: \"proc\", path: filepath.Join(rootfs, \"proc\"), device: \"proc\", flags: defaultMountFlags},\n\t\t{source: \"sysfs\", path: filepath.Join(rootfs, \"sys\"), device: \"sysfs\", flags: defaultMountFlags},\n\t\t{source: \"shm\", path: filepath.Join(rootfs, \"dev\", \"shm\"), device: \"tmpfs\", flags: defaultMountFlags, data: \"mode=1777\"},\n\t\t{source: \"devpts\", path: filepath.Join(rootfs, \"dev\", \"pts\"), device: \"devpts\", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: \"newinstance,ptmxmode=0666,mode=620,gid=5\"},\n\t} {\n\t\tif err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) {\n\t\t\treturn fmt.Errorf(\"mkdirall %s %s\", m.path, err)\n\t\t}\n\t\tif err := system.Mount(m.source, m.path, m.device, uintptr(m.flags), m.data); err != nil {\n\t\t\treturn fmt.Errorf(\"mounting %s into %s %s\", m.source, m.path, err)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ setupPtmx adds a symlink to pts\/ptmx for \/dev\/ptmx and\n\/\/ finishes setting up \/dev\/console\nfunc setupPtmx(rootfs, console string) error {\n\tptmx := filepath.Join(rootfs, \"dev\/ptmx\")\n\tif err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\tif err := os.Symlink(\"pts\/ptmx\", ptmx); err != nil {\n\t\treturn fmt.Errorf(\"symlink dev ptmx %s\", err)\n\t}\n\tif err := setupConsole(rootfs, console); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ remountProc is used to detach and remount the proc filesystem\n\/\/ commonly needed with running a new process inside an existing container\nfunc remountProc() error {\n\tif err := system.Unmount(\"\/proc\", syscall.MNT_DETACH); err != nil {\n\t\treturn err\n\t}\n\tif err := system.Mount(\"proc\", \"\/proc\", \"proc\", uintptr(defaultMountFlags), \"\"); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc remountSys() error {\n\tif err := system.Unmount(\"\/sys\", syscall.MNT_DETACH); err != nil {\n\t\tif err != syscall.EINVAL {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tif err := system.Mount(\"sysfs\", \"\/sys\", \"sysfs\", uintptr(defaultMountFlags), \"\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\/\/\n\/\/ Author: Irfan Sharif (irfansharif@cockroachlabs.com)\n\npackage distsqlrun\n\nimport (\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/sql\/parser\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/sql\/sqlbase\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/util\/log\"\n)\n\ntype joinerBase struct {\n\tinputs []RowSource\n\toutput RowReceiver\n\tctx context.Context\n\n\tjoinType joinType\n\toutputCols columns\n\tfilter exprHelper\n\trowAlloc sqlbase.EncDatumRowAlloc\n\temptyLeft sqlbase.EncDatumRow\n\temptyRight sqlbase.EncDatumRow\n\tcombinedRow sqlbase.EncDatumRow\n}\n\nfunc (jb *joinerBase) init(\n\tflowCtx *FlowCtx,\n\tinputs []RowSource,\n\toutput RowReceiver,\n\toutputCols []uint32,\n\tjType JoinType,\n\tleftTypes []sqlbase.ColumnType,\n\trightTypes []sqlbase.ColumnType,\n\texpr Expression,\n) error {\n\tjb.inputs = inputs\n\tjb.output = output\n\tjb.ctx = log.WithLogTag(flowCtx.Context, \"Joiner\", nil)\n\tjb.outputCols = columns(outputCols)\n\tjb.joinType = joinType(jType)\n\tjb.emptyLeft = make(sqlbase.EncDatumRow, len(leftTypes))\n\tfor i := range jb.emptyLeft {\n\t\tjb.emptyLeft[i].Datum = parser.DNull\n\t}\n\n\tjb.emptyRight = make(sqlbase.EncDatumRow, len(rightTypes))\n\tfor i := range jb.emptyRight {\n\t\tjb.emptyRight[i].Datum = parser.DNull\n\t}\n\n\ttypes := make([]sqlbase.ColumnType, 0, len(leftTypes)+len(rightTypes))\n\ttypes = append(types, leftTypes...)\n\ttypes = append(types, rightTypes...)\n\n\treturn jb.filter.init(expr, types, flowCtx.evalCtx)\n}\n\n\/\/ render evaluates the provided filter and constructs a row with columns from\n\/\/ both rows as specified by the provided output columns. We expect left or\n\/\/ right to be nil if there was no explicit \"join\" match, the filter is then\n\/\/ evaluated on a combinedRow with null values for the columns of the nil row.\n\/\/ render returns a nil row if no row is to be emitted (eg. if join type is\n\/\/ inner join and one of the given rows is nil).\nfunc (jb *joinerBase) render(lrow, rrow sqlbase.EncDatumRow) (sqlbase.EncDatumRow, error) {\n\tswitch jb.joinType {\n\tcase innerJoin:\n\t\tif lrow == nil || rrow == nil {\n\t\t\treturn nil, nil\n\t\t}\n\tcase fullOuter:\n\t\tif lrow == nil {\n\t\t\tlrow = jb.emptyLeft\n\t\t} else if rrow == nil {\n\t\t\trrow = jb.emptyRight\n\t\t}\n\tcase leftOuter:\n\t\tif rrow == nil {\n\t\t\trrow = jb.emptyRight\n\t\t}\n\tcase rightOuter:\n\t\tif lrow == nil {\n\t\t\tlrow = jb.emptyLeft\n\t\t}\n\t}\n\tjb.combinedRow = append(jb.combinedRow[:0], lrow...)\n\tjb.combinedRow = append(jb.combinedRow, rrow...)\n\tres, err := jb.filter.evalFilter(jb.combinedRow)\n\tif !res || err != nil {\n\t\treturn nil, err\n\t}\n\n\trow := jb.rowAlloc.AllocRow(len(jb.outputCols))\n\tfor i, col := range jb.outputCols {\n\t\trow[i] = jb.combinedRow[col]\n\t}\n\treturn row, nil\n}\n<commit_msg>sql: bugfix: early return in joinerbase.render<commit_after>\/\/ Copyright 2016 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\/\/\n\/\/ Author: Irfan Sharif (irfansharif@cockroachlabs.com)\n\npackage distsqlrun\n\nimport (\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/sql\/parser\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/sql\/sqlbase\"\n\t\"github.com\/cockroachdb\/cockroach\/pkg\/util\/log\"\n)\n\ntype joinerBase struct {\n\tinputs []RowSource\n\toutput RowReceiver\n\tctx context.Context\n\n\tjoinType joinType\n\toutputCols columns\n\tfilter exprHelper\n\trowAlloc sqlbase.EncDatumRowAlloc\n\temptyLeft sqlbase.EncDatumRow\n\temptyRight sqlbase.EncDatumRow\n\tcombinedRow sqlbase.EncDatumRow\n}\n\nfunc (jb *joinerBase) init(\n\tflowCtx *FlowCtx,\n\tinputs []RowSource,\n\toutput RowReceiver,\n\toutputCols []uint32,\n\tjType JoinType,\n\tleftTypes []sqlbase.ColumnType,\n\trightTypes []sqlbase.ColumnType,\n\texpr Expression,\n) error {\n\tjb.inputs = inputs\n\tjb.output = output\n\tjb.ctx = log.WithLogTag(flowCtx.Context, \"Joiner\", nil)\n\tjb.outputCols = columns(outputCols)\n\tjb.joinType = joinType(jType)\n\tjb.emptyLeft = make(sqlbase.EncDatumRow, len(leftTypes))\n\tfor i := range jb.emptyLeft {\n\t\tjb.emptyLeft[i].Datum = parser.DNull\n\t}\n\n\tjb.emptyRight = make(sqlbase.EncDatumRow, len(rightTypes))\n\tfor i := range jb.emptyRight {\n\t\tjb.emptyRight[i].Datum = parser.DNull\n\t}\n\n\ttypes := make([]sqlbase.ColumnType, 0, len(leftTypes)+len(rightTypes))\n\ttypes = append(types, leftTypes...)\n\ttypes = append(types, rightTypes...)\n\n\treturn jb.filter.init(expr, types, flowCtx.evalCtx)\n}\n\n\/\/ render evaluates the provided filter and constructs a row with columns from\n\/\/ both rows as specified by the provided output columns. We expect left or\n\/\/ right to be nil if there was no explicit \"join\" match, the filter is then\n\/\/ evaluated on a combinedRow with null values for the columns of the nil row.\n\/\/ render returns a nil row if no row is to be emitted (eg. if join type is\n\/\/ inner join and one of the given rows is nil).\nfunc (jb *joinerBase) render(lrow, rrow sqlbase.EncDatumRow) (sqlbase.EncDatumRow, error) {\n\tlnil := lrow == nil\n\trnil := rrow == nil\n\tswitch jb.joinType {\n\tcase innerJoin:\n\t\tif lnil || rnil {\n\t\t\treturn nil, nil\n\t\t}\n\tcase fullOuter:\n\t\tif lnil {\n\t\t\tlrow = jb.emptyLeft\n\t\t} else if rnil {\n\t\t\trrow = jb.emptyRight\n\t\t}\n\tcase leftOuter:\n\t\tif lnil {\n\t\t\treturn nil, nil\n\t\t}\n\t\tif rnil {\n\t\t\trrow = jb.emptyRight\n\t\t}\n\tcase rightOuter:\n\t\tif rnil {\n\t\t\treturn nil, nil\n\t\t}\n\t\tif lnil {\n\t\t\tlrow = jb.emptyLeft\n\t\t}\n\t}\n\tjb.combinedRow = append(jb.combinedRow[:0], lrow...)\n\tjb.combinedRow = append(jb.combinedRow, rrow...)\n\tres, err := jb.filter.evalFilter(jb.combinedRow)\n\tif !res || err != nil {\n\t\treturn nil, err\n\t}\n\n\trow := jb.rowAlloc.AllocRow(len(jb.outputCols))\n\tfor i, col := range jb.outputCols {\n\t\trow[i] = jb.combinedRow[col]\n\t}\n\treturn row, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ An example implementation of a client.\n\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"istio.io\/istio\/pkg\/cmd\"\n\t\"istio.io\/istio\/pkg\/test\/echo\/common\"\n\t\"istio.io\/istio\/pkg\/test\/echo\/proto\"\n\t\"istio.io\/istio\/pkg\/test\/echo\/server\/forwarder\"\n\t\"istio.io\/pkg\/log\"\n)\n\nvar (\n\tcount int\n\ttimeout time.Duration\n\tqps int\n\turl string\n\tuds string\n\theaderKey string\n\theaderVal string\n\theaders string\n\tmsg string\n\tmethod string\n\thttp2 bool\n\tserverFirst bool\n\tclientCert string\n\tclientKey string\n\n\tcaFile string\n\n\tloggingOptions = log.DefaultOptions()\n\n\trootCmd = &cobra.Command{\n\t\tUse: \"client\",\n\t\tShort: \"Istio test Echo client.\",\n\t\tSilenceUsage: true,\n\t\tLong: `Istio test Echo client used for generating traffic between instances of the Echo service.\nFor Kubernetes, this can be run from a source pod via \"kubectl exec\" with the desired flags.\nIn general, Echo's gRPC interface (ForwardEcho) should be preferred. This client is only needed in cases\nwhere the network configuration doesn't support gRPC to the source pod.'\n`,\n\t\tPersistentPreRunE: configureLogging,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\t\/\/ Create a request from the flags.\n\t\t\trequest, err := getRequest()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatala(err)\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\n\t\t\t\/\/ Create a forwarder.\n\t\t\tf, err := forwarder.New(forwarder.Config{\n\t\t\t\tRequest: request,\n\t\t\t\tUDS: uds,\n\t\t\t\tTLSCert: caFile,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to create forwarder: %v\", err)\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\n\t\t\t\/\/ Run the forwarder.\n\t\t\tdefer func() {\n\t\t\t\t_ = f.Close()\n\t\t\t}()\n\t\t\tresponse, err := f.Run(context.Background())\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Error %s\\n\", err)\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\n\t\t\t\/\/ Log the output to stdout.\n\t\t\tfor _, line := range response.Output {\n\t\t\t\tfmt.Println(line)\n\t\t\t}\n\n\t\t\tlog.Infof(\"All requests succeeded\")\n\t\t},\n\t}\n)\n\nfunc configureLogging(_ *cobra.Command, _ []string) error {\n\tif err := log.Configure(loggingOptions); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc init() {\n\trootCmd.PersistentFlags().IntVar(&count, \"count\", common.DefaultCount, \"Number of times to make the request\")\n\trootCmd.PersistentFlags().IntVar(&qps, \"qps\", 0, \"Queries per second\")\n\trootCmd.PersistentFlags().DurationVar(&timeout, \"timeout\", common.DefaultRequestTimeout, \"Request timeout\")\n\trootCmd.PersistentFlags().StringVar(&url, \"url\", \"\", \"Specify URL\")\n\trootCmd.PersistentFlags().StringVar(&uds, \"uds\", \"\",\n\t\t\"Specify the Unix Domain Socket to connect to\")\n\trootCmd.PersistentFlags().StringVar(&headerKey, \"key\", \"\",\n\t\t\"Header key (use Host for authority) - deprecated user headers instead\")\n\trootCmd.PersistentFlags().StringVar(&headerVal, \"val\", \"\", \"Header value - deprecated\")\n\trootCmd.PersistentFlags().StringVar(&headers, \"headers\", \"\",\n\t\t\"A list of http headers (use Host for authority) - name:value[,name:value]*\")\n\trootCmd.PersistentFlags().StringVar(&caFile, \"ca\", \"\/cert.crt\", \"CA root cert file\")\n\trootCmd.PersistentFlags().StringVar(&msg, \"msg\", \"HelloWorld\",\n\t\t\"message to send (for websockets)\")\n\trootCmd.PersistentFlags().StringVar(&method, \"method\", \"\", \"method to use (for HTTP)\")\n\trootCmd.PersistentFlags().BoolVar(&http2, \"http2\", false,\n\t\t\"send http requests as HTTP with prior knowledge\")\n\trootCmd.PersistentFlags().BoolVar(&serverFirst, \"server-first\", false,\n\t\t\"Treat as a server first protocol; do not send request until magic string is received\")\n\trootCmd.PersistentFlags().StringVar(&clientCert, \"client-cert\", \"\", \"client certificate file to use for request\")\n\trootCmd.PersistentFlags().StringVar(&clientKey, \"client-key\", \"\", \"client certificate key file to use for request\")\n\n\tloggingOptions.AttachCobraFlags(rootCmd)\n\n\tcmd.AddFlags(rootCmd)\n}\n\nfunc getRequest() (*proto.ForwardEchoRequest, error) {\n\trequest := &proto.ForwardEchoRequest{\n\t\tUrl: url,\n\t\tTimeoutMicros: common.DurationToMicros(timeout),\n\t\tCount: int32(count),\n\t\tQps: int32(qps),\n\t\tMessage: msg,\n\t\tHttp2: http2,\n\t\tServerFirst: serverFirst,\n\t\tMethod: method,\n\t}\n\n\t\/\/ Old http add header - deprecated\n\tif headerKey != \"\" {\n\t\trequest.Headers = append(request.Headers, &proto.Header{\n\t\t\tKey: headerKey,\n\t\t\tValue: headerVal,\n\t\t})\n\t}\n\n\tif headers != \"\" {\n\t\theadersList := strings.Split(headers, \",\")\n\t\tfor _, header := range headersList {\n\t\t\tparts := strings.Split(header, \":\")\n\n\t\t\t\/\/ require name:value format\n\t\t\tif len(parts) != 2 {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid header format: %q (want name:value)\", header)\n\t\t\t}\n\n\t\t\trequest.Headers = append(request.Headers, &proto.Header{\n\t\t\t\tKey: parts[0],\n\t\t\t\tValue: parts[1],\n\t\t\t})\n\t\t}\n\t}\n\n\tif clientCert != \"\" && clientKey != \"\" {\n\t\tcertData, err := ioutil.ReadFile(clientCert)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to load client certificate: %v\", err)\n\t\t}\n\t\trequest.Cert = string(certData)\n\t\tkeyData, err := ioutil.ReadFile(clientKey)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to load client certificate key: %v\", err)\n\t\t}\n\t\trequest.Key = string(keyData)\n\t}\n\treturn request, nil\n}\n\nfunc main() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tos.Exit(-1)\n\t}\n}\n<commit_msg>Make test client more like curl (#27401)<commit_after>\/\/ Copyright Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ An example implementation of a client.\n\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"istio.io\/istio\/pkg\/cmd\"\n\t\"istio.io\/istio\/pkg\/test\/echo\/common\"\n\t\"istio.io\/istio\/pkg\/test\/echo\/proto\"\n\t\"istio.io\/istio\/pkg\/test\/echo\/server\/forwarder\"\n\t\"istio.io\/pkg\/log\"\n)\n\nvar (\n\tcount int\n\ttimeout time.Duration\n\tqps int\n\tuds string\n\theaders []string\n\tmsg string\n\tmethod string\n\thttp2 bool\n\tserverFirst bool\n\tclientCert string\n\tclientKey string\n\n\tcaFile string\n\n\tloggingOptions = log.DefaultOptions()\n\n\trootCmd = &cobra.Command{\n\t\tUse: \"client\",\n\t\tShort: \"Istio test Echo client.\",\n\t\tSilenceUsage: true,\n\t\tLong: `Istio test Echo client used for generating traffic between instances of the Echo service.\nFor Kubernetes, this can be run from a source pod via \"kubectl exec\" with the desired flags.\nIn general, Echo's gRPC interface (ForwardEcho) should be preferred. This client is only needed in cases\nwhere the network configuration doesn't support gRPC to the source pod.'\n`,\n\t\tArgs: cobra.ExactArgs(1),\n\t\tPersistentPreRunE: configureLogging,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\t\/\/ Create a request from the flags.\n\t\t\trequest, err := getRequest(args[0])\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatala(err)\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\n\t\t\t\/\/ Create a forwarder.\n\t\t\tf, err := forwarder.New(forwarder.Config{\n\t\t\t\tRequest: request,\n\t\t\t\tUDS: uds,\n\t\t\t\tTLSCert: caFile,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to create forwarder: %v\", err)\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\n\t\t\t\/\/ Run the forwarder.\n\t\t\tdefer func() {\n\t\t\t\t_ = f.Close()\n\t\t\t}()\n\t\t\tresponse, err := f.Run(context.Background())\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Error %s\\n\", err)\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\n\t\t\t\/\/ Log the output to stdout.\n\t\t\tfor _, line := range response.Output {\n\t\t\t\tfmt.Println(line)\n\t\t\t}\n\n\t\t\tlog.Infof(\"All requests succeeded\")\n\t\t},\n\t}\n)\n\nfunc configureLogging(_ *cobra.Command, _ []string) error {\n\tif err := log.Configure(loggingOptions); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc init() {\n\trootCmd.PersistentFlags().IntVar(&count, \"count\", common.DefaultCount, \"Number of times to make the request\")\n\trootCmd.PersistentFlags().IntVar(&qps, \"qps\", 0, \"Queries per second\")\n\trootCmd.PersistentFlags().DurationVar(&timeout, \"timeout\", common.DefaultRequestTimeout, \"Request timeout\")\n\trootCmd.PersistentFlags().StringVar(&uds, \"uds\", \"\",\n\t\t\"Specify the Unix Domain Socket to connect to\")\n\trootCmd.PersistentFlags().StringSliceVarP(&headers, \"header\", \"H\", headers,\n\t\t\"A list of http headers (use Host for authority) - 'name: value', following curl syntax\")\n\trootCmd.PersistentFlags().StringVar(&caFile, \"ca\", \"\/cert.crt\", \"CA root cert file\")\n\trootCmd.PersistentFlags().StringVar(&msg, \"msg\", \"HelloWorld\",\n\t\t\"message to send (for websockets)\")\n\trootCmd.PersistentFlags().StringVar(&method, \"method\", \"\", \"method to use (for HTTP)\")\n\trootCmd.PersistentFlags().BoolVar(&http2, \"http2\", false,\n\t\t\"send http requests as HTTP with prior knowledge\")\n\trootCmd.PersistentFlags().BoolVar(&serverFirst, \"server-first\", false,\n\t\t\"Treat as a server first protocol; do not send request until magic string is received\")\n\trootCmd.PersistentFlags().StringVar(&clientCert, \"client-cert\", \"\", \"client certificate file to use for request\")\n\trootCmd.PersistentFlags().StringVar(&clientKey, \"client-key\", \"\", \"client certificate key file to use for request\")\n\n\tloggingOptions.AttachCobraFlags(rootCmd)\n\n\tcmd.AddFlags(rootCmd)\n}\n\n\/\/ Adds http scheme to and url if not set. This matches curl logic\nfunc defaultScheme(u string) string {\n\tp, err := url.Parse(u)\n\tif err != nil {\n\t\treturn u\n\t}\n\tif p.Scheme == \"\" {\n\t\treturn \"http:\/\/\" + u\n\t}\n\treturn u\n}\n\nfunc getRequest(url string) (*proto.ForwardEchoRequest, error) {\n\trequest := &proto.ForwardEchoRequest{\n\t\tUrl: defaultScheme(url),\n\t\tTimeoutMicros: common.DurationToMicros(timeout),\n\t\tCount: int32(count),\n\t\tQps: int32(qps),\n\t\tMessage: msg,\n\t\tHttp2: http2,\n\t\tServerFirst: serverFirst,\n\t\tMethod: method,\n\t}\n\n\tfor _, header := range headers {\n\t\tparts := strings.Split(header, \":\")\n\n\t\t\/\/ require name:value format\n\t\tif len(parts) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"invalid header format: %q (want name:value)\", header)\n\t\t}\n\n\t\trequest.Headers = append(request.Headers, &proto.Header{\n\t\t\tKey: parts[0],\n\t\t\tValue: strings.Trim(parts[1], \" \"),\n\t\t})\n\t}\n\n\tif clientCert != \"\" && clientKey != \"\" {\n\t\tcertData, err := ioutil.ReadFile(clientCert)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to load client certificate: %v\", err)\n\t\t}\n\t\trequest.Cert = string(certData)\n\t\tkeyData, err := ioutil.ReadFile(clientKey)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to load client certificate key: %v\", err)\n\t\t}\n\t\trequest.Key = string(keyData)\n\t}\n\treturn request, nil\n}\n\nfunc main() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tos.Exit(-1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * This file is part of the KubeVirt project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Copyright 2017, 2018 Red Hat, Inc.\n *\n *\/\n\npackage virtconfig\n\n\/*\n This module is intended for determining whether an optional feature is enabled or not at the cluster-level.\n*\/\n\nconst (\n\tCPUManager = \"CPUManager\"\n\tIgnitionGate = \"ExperimentalIgnitionSupport\"\n\tLiveMigrationGate = \"LiveMigration\"\n\tCPUNodeDiscoveryGate = \"CPUNodeDiscovery\"\n\tHypervStrictCheckGate = \"HypervStrictCheck\"\n\tSidecarGate = \"Sidecar\"\n\tGPUGate = \"GPU\"\n\tSnapshotGate = \"Snapshot\"\n\tDataVolumesGate = \"DataVolumes\"\n\tSRIOVGate = \"SRIOVGate\"\n\tHostDiskGate = \"HostDisk\"\n)\n\nfunc (c *ClusterConfig) isFeatureGateEnabled(featureGate string) bool {\n\tfor _, fg := range c.GetConfig().DeveloperConfiguration.FeatureGates {\n\t\tif fg == featureGate {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (config *ClusterConfig) CPUManagerEnabled() bool {\n\treturn config.isFeatureGateEnabled(CPUManager)\n}\n\nfunc (config *ClusterConfig) IgnitionEnabled() bool {\n\treturn config.isFeatureGateEnabled(IgnitionGate)\n}\n\nfunc (config *ClusterConfig) LiveMigrationEnabled() bool {\n\treturn config.isFeatureGateEnabled(LiveMigrationGate)\n}\n\nfunc (config *ClusterConfig) HypervStrictCheckEnabled() bool {\n\treturn config.isFeatureGateEnabled(HypervStrictCheckGate)\n}\n\nfunc (config *ClusterConfig) CPUNodeDiscoveryEnabled() bool {\n\treturn config.isFeatureGateEnabled(CPUNodeDiscoveryGate)\n}\n\nfunc (config *ClusterConfig) SidecarEnabled() bool {\n\treturn config.isFeatureGateEnabled(SidecarGate)\n}\n\nfunc (config *ClusterConfig) GPUPassthroughEnabled() bool {\n\treturn config.isFeatureGateEnabled(GPUGate)\n}\n\nfunc (config *ClusterConfig) SnapshotEnabled() bool {\n\treturn config.isFeatureGateEnabled(SnapshotGate)\n}\n\nfunc (config *ClusterConfig) DataVolumesEnabled() bool {\n\treturn config.isFeatureGateEnabled(DataVolumesGate)\n}\n\nfunc (config *ClusterConfig) SriovEnabled() bool {\n\treturn config.isFeatureGateEnabled(SRIOVGate)\n}\n\nfunc (config *ClusterConfig) HostDiskEnabled() bool {\n\treturn config.isFeatureGateEnabled(HostDiskGate)\n}\n<commit_msg>remove DataVolume feature gate<commit_after>\/*\n * This file is part of the KubeVirt project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Copyright 2017, 2018 Red Hat, Inc.\n *\n *\/\n\npackage virtconfig\n\n\/*\n This module is intended for determining whether an optional feature is enabled or not at the cluster-level.\n*\/\n\nconst (\n\tCPUManager = \"CPUManager\"\n\tIgnitionGate = \"ExperimentalIgnitionSupport\"\n\tLiveMigrationGate = \"LiveMigration\"\n\tCPUNodeDiscoveryGate = \"CPUNodeDiscovery\"\n\tHypervStrictCheckGate = \"HypervStrictCheck\"\n\tSidecarGate = \"Sidecar\"\n\tGPUGate = \"GPU\"\n\tSnapshotGate = \"Snapshot\"\n\tSRIOVGate = \"SRIOVGate\"\n\tHostDiskGate = \"HostDisk\"\n)\n\nfunc (c *ClusterConfig) isFeatureGateEnabled(featureGate string) bool {\n\tfor _, fg := range c.GetConfig().DeveloperConfiguration.FeatureGates {\n\t\tif fg == featureGate {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (config *ClusterConfig) CPUManagerEnabled() bool {\n\treturn config.isFeatureGateEnabled(CPUManager)\n}\n\nfunc (config *ClusterConfig) IgnitionEnabled() bool {\n\treturn config.isFeatureGateEnabled(IgnitionGate)\n}\n\nfunc (config *ClusterConfig) LiveMigrationEnabled() bool {\n\treturn config.isFeatureGateEnabled(LiveMigrationGate)\n}\n\nfunc (config *ClusterConfig) HypervStrictCheckEnabled() bool {\n\treturn config.isFeatureGateEnabled(HypervStrictCheckGate)\n}\n\nfunc (config *ClusterConfig) CPUNodeDiscoveryEnabled() bool {\n\treturn config.isFeatureGateEnabled(CPUNodeDiscoveryGate)\n}\n\nfunc (config *ClusterConfig) SidecarEnabled() bool {\n\treturn config.isFeatureGateEnabled(SidecarGate)\n}\n\nfunc (config *ClusterConfig) GPUPassthroughEnabled() bool {\n\treturn config.isFeatureGateEnabled(GPUGate)\n}\n\nfunc (config *ClusterConfig) SnapshotEnabled() bool {\n\treturn config.isFeatureGateEnabled(SnapshotGate)\n}\n\nfunc (config *ClusterConfig) SriovEnabled() bool {\n\treturn config.isFeatureGateEnabled(SRIOVGate)\n}\n\nfunc (config *ClusterConfig) HostDiskEnabled() bool {\n\treturn config.isFeatureGateEnabled(HostDiskGate)\n}\n<|endoftext|>"} {"text":"<commit_before>package benchmarks\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com\/smartystreets\/go-disruptor\"\n)\n\nconst (\n\tsingleWriterRingBufferSize = 1024 * 64\n\tsingleWriterRingBufferMask = singleWriterRingBufferSize - 1\n\treserveOne = 1\n\treserveMany = 16\n\treserveManyDelta = reserveMany - 1\n)\n\nfunc BenchmarkDisruptorWriterReserveSingle(b *testing.B) {\n\tringBuffer := [singleWriterRingBufferSize]int64{}\n\twritten, read := disruptor.NewCursor(), disruptor.NewCursor()\n\treader := disruptor.NewReader(read, written, written, singleWriterConsumer{&ringBuffer})\n\twriter := disruptor.NewWriter(written, read, singleWriterRingBufferSize)\n\treader.Start()\n\n\titerations := int64(b.N)\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tsequence := disruptor.InitialSequenceValue\n\tfor sequence < iterations {\n\t\tsequence = writer.Reserve(reserveOne)\n\t\tringBuffer[sequence&singleWriterRingBufferMask] = sequence\n\t\twriter.Commit(sequence, sequence)\n\t}\n\n\treader.Stop()\n}\nfunc BenchmarkDisruptorWriterReserveMultiple(b *testing.B) {\n\tringBuffer := [singleWriterRingBufferSize]int64{}\n\twritten, read := disruptor.NewCursor(), disruptor.NewCursor()\n\treader := disruptor.NewReader(read, written, written, singleWriterConsumer{&ringBuffer})\n\twriter := disruptor.NewWriter(written, read, singleWriterRingBufferSize)\n\treader.Start()\n\n\titerations := int64(b.N)\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tsequence := disruptor.InitialSequenceValue\n\tfor sequence < iterations {\n\t\tsequence = writer.Reserve(reserveMany)\n\n\t\tfor i := sequence - reserveManyDelta; i <= sequence; i++ {\n\t\t\tringBuffer[i&singleWriterRingBufferMask] = i\n\t\t}\n\n\t\twriter.Commit(sequence, sequence)\n\t}\n\n\treader.Stop()\n}\n\nfunc benchmarkSingleWriter(b *testing.B, maxClaim int64) {\n\n}\n\ntype singleWriterConsumer struct {\n\tringBuffer *[singleWriterRingBufferSize]int64\n}\n\nfunc (this singleWriterConsumer) Consume(lower, upper int64) {\n\tfor lower <= upper {\n\t\tmessage := this.ringBuffer[lower&singleWriterRingBufferMask]\n\t\tif message != lower {\n\t\t\tpanic(fmt.Sprintf(\"\\nRace condition %d %d\\n\", lower, message))\n\t\t}\n\t\tlower++\n\t}\n}\n\nfunc init() {\n\truntime.GOMAXPROCS(2)\n}\n<commit_msg>Removed empty test.<commit_after>package benchmarks\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com\/smartystreets\/go-disruptor\"\n)\n\nconst (\n\tsingleWriterRingBufferSize = 1024 * 64\n\tsingleWriterRingBufferMask = singleWriterRingBufferSize - 1\n\treserveOne = 1\n\treserveMany = 16\n\treserveManyDelta = reserveMany - 1\n)\n\nfunc BenchmarkDisruptorWriterReserveSingle(b *testing.B) {\n\tringBuffer := [singleWriterRingBufferSize]int64{}\n\twritten, read := disruptor.NewCursor(), disruptor.NewCursor()\n\treader := disruptor.NewReader(read, written, written, singleWriterConsumer{&ringBuffer})\n\twriter := disruptor.NewWriter(written, read, singleWriterRingBufferSize)\n\treader.Start()\n\n\titerations := int64(b.N)\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tsequence := disruptor.InitialSequenceValue\n\tfor sequence < iterations {\n\t\tsequence = writer.Reserve(reserveOne)\n\t\tringBuffer[sequence&singleWriterRingBufferMask] = sequence\n\t\twriter.Commit(sequence, sequence)\n\t}\n\n\treader.Stop()\n}\nfunc BenchmarkDisruptorWriterReserveMultiple(b *testing.B) {\n\tringBuffer := [singleWriterRingBufferSize]int64{}\n\twritten, read := disruptor.NewCursor(), disruptor.NewCursor()\n\treader := disruptor.NewReader(read, written, written, singleWriterConsumer{&ringBuffer})\n\twriter := disruptor.NewWriter(written, read, singleWriterRingBufferSize)\n\treader.Start()\n\n\titerations := int64(b.N)\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tsequence := disruptor.InitialSequenceValue\n\tfor sequence < iterations {\n\t\tsequence = writer.Reserve(reserveMany)\n\n\t\tfor i := sequence - reserveManyDelta; i <= sequence; i++ {\n\t\t\tringBuffer[i&singleWriterRingBufferMask] = i\n\t\t}\n\n\t\twriter.Commit(sequence, sequence)\n\t}\n\n\treader.Stop()\n}\n\ntype singleWriterConsumer struct {\n\tringBuffer *[singleWriterRingBufferSize]int64\n}\n\nfunc (this singleWriterConsumer) Consume(lower, upper int64) {\n\tfor lower <= upper {\n\t\tmessage := this.ringBuffer[lower&singleWriterRingBufferMask]\n\t\tif message != lower {\n\t\t\tpanic(fmt.Sprintf(\"\\nRace condition %d %d\\n\", lower, message))\n\t\t}\n\t\tlower++\n\t}\n}\n\nfunc init() {\n\truntime.GOMAXPROCS(2)\n}\n<|endoftext|>"} {"text":"<commit_before>package packager\n\nimport(\n\ttp \"tritium\/proto\"\n\tproto \"goprotobuf.googlecode.com\/hg\/proto\"\n\tyaml \"launchpad.net\/goyaml\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"strings\"\n\t\"fmt\"\n\t\"exec\"\n\tlinker \"tritium\/linker\"\n\t\"path\/filepath\"\n)\n\ntype Package struct { \n\tloaded []*PackageInfo\n\tlocation string\n\tLoadPath string\n\t*tp.Package\n}\n\ntype PackageInfo struct {\n\tName string\n\tDependencies []string\n\tTypes []string\n}\n\nfunc BuildDefaultPackage(dir string) (*Package) {\n\t\/\/ Terrible directory handling here... has to be executed from Tritium root\n\tpkg := NewPackage(dir)\n\n\t\/\/pkg.Load(\"base\")\n\t\/\/pkg.Load(\"node\")\n\tpkg.Load(\"libxml\")\n\tprintln(\"Packages all loaded\")\n\n\treturn pkg\n}\n\nfunc NewPackage(loadPath string) (*Package){\n\treturn &Package{\n\t\tPackage: &tp.Package{\n\t\t\tName: proto.String(\"combined\"),\n\t\t\tFunctions: make([]*tp.Function, 0),\n\t\t\tTypes: make([]*tp.Type, 0),\n\t\t},\n\t\tloaded: make([]*PackageInfo, 0),\n\t\tLoadPath: loadPath,\n\t}\n}\n\nfunc (pkg *Package)Load(packageName string) {\n\t\n\tlocation := filepath.Join(pkg.LoadPath, packageName)\n\tpkg.location = location\n\n\tinfo := readPackageInfoFile(location)\n\t\n\tif len(info.Dependencies) > 0 {\n\n\t\tprintln(\"==========\\nLoading dependencies:\")\n\n\t\tfor _, dependency := range(info.Dependencies) {\n\t\t\tpkg.loadPackageDependency(dependency)\n\t\t}\n\n\t\tprintln(\"done.\\n==========\")\n\n\t}\n\n\tfmt.Printf(\"%v\\n\", location)\n\n\tfor _, typeName := range(info.Types) {\n\t\tsplit := strings.Split(typeName, \" < \")\n\t\ttypeObj := &tp.Type{}\n\t\tif len(split) == 2 {\n\t\t\ttypeName = split[0]\n\t\t\tindex := pkg.findTypeIndex(split[1])\n\t\t\t\n\t\t\ttypeObj.Implements = proto.Int32(int32(index))\n\t\t}\n\t\ttypeObj.Name = proto.String(typeName)\n\t\tpkg.Types = append(pkg.Types, typeObj)\n\t}\n\n\tpkg.readHeaderFile(location)\n\n\tpkg.readPackageDefinitions(location)\n\n\tpkg.inheritFunctions()\n\n\tprintln(\" -- done\\n\\n\\n\\n\\n\")\n}\n\nfunc (pkg *Package)resolveFunction(fun *tp.Function) {\n\tlinkingContext := linker.NewLinkingContext(pkg.Package)\n\n\/\/\tpkg.resolveFunctionDescendants(fun)\n\n\tfmt.Printf(\"\\t -- Resolving --\\n\")\n\tfmt.Printf(\"\\t\\t -- function: %v\\n\", fun)\n\n\t\/\/ Re-uses linker's logic to resolve function definitions\n\tif ( proto.GetBool( fun.BuiltIn ) == false) {\n\t\ttypeName := proto.GetString(fun.ScopeType)\n\n\t\tif len(typeName) != 0 {\n\t\t\t\/\/ When I pass in functions from the inheritance resolver, they're typeId is already set\n\t\t\tfun.ScopeTypeId = pkg.GetProtoTypeId(fun.ScopeType)\n\t\t\tfun.ScopeType = nil\n\t\t}\n\n\t\tlocalScope := make(linker.LocalDef, len(fun.Args))\n\n\t\t\/\/\t\tfun.ReturnTypeId = pkg.GetProtoTypeId(fun.ReturnType)\n\t\tfor _, arg := range(fun.Args) {\n\t\t\targTypeName := arg.TypeString\n\t\t\tvar argTypeId int\n\n\t\t\tif argTypeName != nil {\n\t\t\t\t\/\/ Similar deal. Input functions from inheritance resolution already have ids set\n\n\t\t\t\targ.TypeId = pkg.GetProtoTypeId(arg.TypeString)\n\t\t\t\t\/\/println(\"Processing %\", proto.GetString(arg.Name))\n\t\t\t\targTypeId = pkg.GetTypeId(proto.GetString(arg.TypeString))\n\t\t\t\targ.TypeString = nil\n\t\t\t} else {\n\t\t\t\targTypeId = int( proto.GetInt32(arg.TypeId) )\n\t\t\t}\n\n\t\t\tlocalScope[proto.GetString(arg.Name)] = argTypeId\n\t\t}\n\n\t\t\/\/fmt.Printf(\"Some insitruction: %v, %s\", fun.Instruction, proto.GetString(fun.Name) )\n\t\tscopeTypeId := int(proto.GetInt32(fun.ScopeTypeId))\n\t\tfmt.Printf(\"\\t\\t -- opening scope type : %v\\n\", scopeTypeId)\n\t\treturnType := linkingContext.ProcessInstructionWithLocalScope(fun.Instruction, scopeTypeId, localScope)\n\t\tfun.ReturnTypeId = proto.Int32(int32(returnType))\n\t}\n\tpkg.Package.Functions = append(pkg.Package.Functions, fun)\n\tfmt.Printf(\"\\t\\t -- done --\\n\")\n}\n\n\nfunc (pkg *Package)inheritFunctions() {\n\tfmt.Printf(\"pkg types: %v\", pkg.Types)\n\tfor _, function := range(pkg.Functions) {\n\t\tpkg.resolveFunctionDescendants(function)\n\t}\n}\n\n\/\/ TODO : Make a hash for this\nfunc (pkg *Package)findDescendentType(thisType int32) int {\t\n\tfor index, someType := range(pkg.Types) {\n\t\timplements := proto.GetInt32(someType.Implements)\n\t\tif implements == thisType && implements != 0 {\n\t\t\t\/\/ The implements field defaults to 0. Base doesn't implement Base. Text doesn't implement Base\n\t\t\t\/\/ TODO(SJ): make the default value -1 so I know if its not set versus something is inheriting from base\n\t\t\tfmt.Printf(\"=== %v is ancestor of %v === (%v is of type %v and implements : %v)\\n\", thisType, someType, proto.GetString(someType.Name), index, implements)\n\t\t\treturn index\n\t\t}\n\t}\n\treturn -1\n}\n\n\n\/\/ TODO(SJ) : Make this not suck. I think I could make this 50% shorter if I use reflection\n\/\/ - Also, I'm assuming a single depth level of inheritance. I'd have to run this function n times for n levels\n\/\/ - Well that should be fine as long as I run it at the end of every package load\n\nfunc (pkg *Package)resolveFunctionDescendants(fun *tp.Function) {\n\n\t\/\/ Check if this function contains any types that have descendants\n\tname := fun.Stub(pkg.Package)\n\tprintln(\"Checking for inheritance on function:\", name )\n\n\tnewFun := &tp.Function{}\n\tinherit := false\n\n\t\/\/ Iterate over ScopeType, Arg types, return Type, opens Type\n\n\n\t\/\/ ScopeType\n\n\tthisTypeId := proto.GetInt32(fun.ScopeTypeId)\n\tnewType := pkg.findDescendentType(thisTypeId)\n\n\tif newType != -1 {\n\t\tif !inherit {\n\t\t\tfmt.Printf(\"\\t -- ScopeType : Found ancestral type. Cloning function %v\\n\", proto.GetString( fun.Name ) )\n\t\t\tnewFun = fun.Clone()\n\t\t\t\/\/ fmt.Printf(\"\\t -- New fun: %v\", newFun)\n\t\t\tinherit = true\n\t\t}\n\t\tprintln(\"\\t -- Resetting scopeId\")\t\t\n\t\tnewFun.ScopeTypeId = proto.Int32( int32( newType ) )\n\t}\n\n\t\/\/ ReturnType\n\n\tthisTypeId = proto.GetInt32(fun.ReturnTypeId)\n\tnewType = pkg.findDescendentType(thisTypeId)\n\n\tif newType != -1 {\n\t\tif !inherit {\n\t\t\tfmt.Printf(\"\\t -- ReturnType : Found ancestral type. Cloning function %v\\n\", proto.GetString( fun.Name ) )\n\t\t\tnewFun = fun.Clone()\n\t\t\t\/\/ fmt.Printf(\"\\t -- New fun: %v\", newFun)\n\t\t\tinherit = true\n\t\t}\n\t\tprintln(\"\\t -- Resetting returnId\")\n\t\tnewFun.ReturnTypeId = proto.Int32( int32( newType ) )\n\t}\n\n\t\/\/ OpensType\n\n\tthisTypeId = proto.GetInt32(fun.OpensTypeId)\n\tnewType = pkg.findDescendentType(thisTypeId)\n\n\tif newType != -1 {\n\n\t\tif !inherit {\n\t\t\tfmt.Printf(\"\\t -- OpensType : Found ancestral type. Cloning function %v\\n\", proto.GetString( fun.Name ) )\n\t\t\tnewFun = fun.Clone()\n\t\t\t\/\/ fmt.Printf(\"\\t -- New fun: %v\", newFun)\n\t\t\tinherit = true\n\t\t}\n\t\tprintln(\"\\t -- Resetting openTypeId\")\n\t\tnewFun.OpensTypeId = proto.Int32( int32( newType ) )\n\t}\n\n\t\/\/ Arguments\n\n\tfor index, arg := range( fun.Args) {\n\t\tthisTypeId = proto.GetInt32(arg.TypeId)\n\t\tnewType = pkg.findDescendentType(thisTypeId)\n\n\t\tif newType != -1 {\n\n\t\t\tif !inherit {\n\t\t\t\tfmt.Printf(\"\\t -- ArgType : Found ancestral type. Cloning function %v\\n\", proto.GetString( fun.Name ) )\n\t\t\t\tnewFun = fun.Clone()\n\t\t\t\t\/\/ fmt.Printf(\"\\t -- New fun: %v\", newFun)\n\t\t\t\tinherit = true\n\t\t\t}\n\t\t\tprintln(\"\\t -- Resetting argument\")\n\t\t\tnewFun.Args[index].TypeId = proto.Int32( int32( newType ) )\n\t\t}\n\t\t\n\t\t\n\t}\n\n\tfmt.Printf(\"\\t -- Old function: %v\\n\\t -- New function: %v\\n\", fun, newFun)\n\n\tif inherit {\n\t\tpkg.resolveFunction(newFun)\n\t}\n\n}\n\nfunc (pkg *Package)readPackageDefinitions(location string) {\n\t\n\tprintln(\" -- reading definitions\")\n\n\t\/\/ Execute the ts2func-ruby script\n\n\tpackage_name := strings.Split(location,\"\/\")[1]\n\tinput_file := filepath.Join(location, \"functions.ts\")\n\toutput_file := filepath.Join(location, package_name + \".tf\")\n\t\n\tprintln(input_file)\n\n\t\/\/ Assume that tritium\/bin is in $PATH (it will be when you install the gem)\n\t\/\/ -- if you're developing, add $REPOS\/tritium\/bin to $PATH\n\n\tcommand := exec.Command(\"ts2func-ruby\", \"-s\", input_file, output_file)\n\n\t\/\/fmt.Printf(\"\\n\\nExecuting command: \\n %v\\n\", command)\n\n\toutput, err := command.CombinedOutput()\n\n\tif err != nil {\n\t\tfmt.Printf(\"\\tFunction conversion output:\\n\\t %s\", output)\n\t\tlog.Panic(err)\n\t}\n\t\n\tfunctions := &tp.FunctionArray{}\t\n\tdata, err := ioutil.ReadFile(output_file)\n\n\tif err != nil {\n\t\tprintln(\"Failed to read output file.\")\n\t\tlog.Panic(err)\n\t}\n\n\n\terr = proto.Unmarshal(data, functions)\n\n\tif err != nil {\n\t\tprintln(\"Failed while loading output from ts2func.\")\n\t\tprintln(string(output))\n\t\tlog.Panic(err)\n\t}\n\n\n\t\/\/fmt.Printf(\"functions : %v\", functions)\n\t\/\/fmt.Printf(\"\\n\\n prelim pkg functions : %v\\n\", pkg.Package.Functions) *\/\n\n\t\/\/println(\"Function count before \", len(pkg.Package.Functions))\n\tfor _, function := range(functions.Functions) {\n\t\tfmt.Printf(\"\\t -- function: %v\", function)\n\t\tpkg.resolveFunction(function)\n\t}\n\t\/\/fmt.Printf(\"\\n\\npkg functions : %v\\n\", pkg.Package.Functions)\n\t\/\/println(\"Function count after \", len(pkg.Package.Functions))\n\t\/\/pkg.Package.Functions = functions.Functions\n\n\t\/\/fmt.Printf(\"\\n\\npkg functions : %v\\n\", pkg.Package.Functions)\n\n\n}\n\n\nfunc (pkg *Package)Marshal() []byte {\n\tbytes, err := proto.Marshal(pkg.Package)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\treturn bytes\n}\n\nfunc (pkg *Package)findTypeIndex(name string) int {\n\tfor index, typeObj := range(pkg.Types) {\n\t\tif name == proto.GetString(typeObj.Name) {\n\t\t\treturn index\n\t\t}\n\t}\n\t\n\tlog.Panic(\"Bad type load order, type\", name, \"unknown\")\n\treturn -1\n}\n\nfunc (pkg *Package)loadPackageDependency(name string) {\n\n\t\/\/ Try and load the dependency\n\t\/\/ TODO : remove passing location around since I added it to the Package struct\t\n\n\t\/\/ TODO : Check for a pre-built package (pre-req is outputting a .tpkg file upon completion of a package load)\n\n\tnewPath := filepath.Join(pkg.LoadPath, name)\n\t_, err := ioutil.ReadDir(newPath)\n\n\tif err == nil {\n\t\t\/\/ Directory exists\n\t\tpkg.Load(name)\n\t} else {\n\t\tprintln(\"Cannot find package at:\", newPath)\n\t\tlog.Panic(err)\n\t}\n\n}\n\n\/\/ Not fully functional. Dang it.\nfunc readPackageInfoFile(location string) (*PackageInfo){\n\tpackageInfo := &PackageInfo{}\n\tinfoFile, err := ioutil.ReadFile(location + \"\/package.yml\");\n\tif err != nil {\n\t\tlog.Panic(\"No package info file found at \" + location + \"\/package.yml\")\n\t}\n\tyaml.Unmarshal([]byte(infoFile), &packageInfo)\n\t\/\/fmt.Printf(\"--- m:\\n%v\\n\\n\", packageInfo)\n\treturn packageInfo\n}\n\n\n\/\/ Not fully functional. Dang it.\nfunc (pkg *Package)readHeaderFile(location string) {\n\theaderFile, err := ioutil.ReadFile(location + \"\/headers.tf\");\n\tif err != nil {\n\t\tlog.Panic(\"No header file found at \" + location + \"\/headers.tf\")\n\t}\n\theaderLines := strings.Split(string(headerFile), \"\\n\")\n\tfor _, line := range(headerLines) {\n\t\tif (len(line) > 2) && (line[:2] != \"\/\/\") {\n\t\t\tfunction := &tp.Function{Args: make([]*tp.Function_Argument, 0)}\n\t\t\tline = line[6:]\n\t\t\tmethodName := strings.Split(line, \"(\")[0]\n\t\t\ttypeId := 0\n\t\t\tback := line[len(methodName)+1:]\n\t\t\tmethodizer := strings.Split(methodName, \".\")\n\t\t\tif len(methodizer) > 1 {\n\t\t\t\ttypeStr := methodizer[0]\n\t\t\t\ttypeId = pkg.GetTypeId(typeStr)\n\t\t\t\tmethodName = methodizer[1]\n\t\t\t}\n\t\t\tfunction.Name = proto.String(methodName)\n\t\t\tfunction.ScopeTypeId = proto.Int32(int32(typeId))\n\n\t\t\tsplat := strings.Split(back, \")\")\n\t\t\targString := splat[0]\n\t\t\t\/\/ Process the arguments\n\t\t\tif len(argString) > 0 {\n\t\t\t\targList := strings.Split(argString, \",\")\n\t\t\t\tfor _, argLine := range(argList) {\n\t\t\t\t\targ := &tp.Function_Argument{}\n\t\t\t\t\ttypeStr := strings.Split(strings.Trim(argLine, \" \\t\"), \" \")[0]\n\t\t\t\t\ttypeId = pkg.findTypeIndex(typeStr)\n\t\t\t\t\targ.TypeId = proto.Int32(int32(typeId))\n\t\t\t\t\tfunction.Args = append(function.Args, arg)\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\tback = splat[1]\n\t\t\t\n\t\t\thintStrs := strings.Split(back, \"\/\/\")\n\t\t\tif len(hintStrs) == 2 {\n\t\t\t\thints := strings.Split(hintStrs[1], \",\")\n\t\t\t\tif len(hints) >= 1 {\n\t\t\t\t\t function.ReturnTypeId = proto.Int32(int32(pkg.findTypeIndex(hints[0])))\n\t\t\t\t}\n\t\t\t\tif len(hints) >= 2 {\n\t\t\t\t\t function.OpensTypeId = proto.Int32(int32(pkg.findTypeIndex(hints[1])))\n\t\t\t\t}\n\t\t\t}\n\t\t\tfunction.BuiltIn = proto.Bool(true)\n\n\t\t\tpkg.Functions = append(pkg.Functions, function)\n\t\t}\n\t}\n}\n\nfunc (pkg *Package)SerializedOutput() {\n\n\tbytes, err := proto.Marshal(pkg.Package)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tprintln(string(bytes))\n}\n<commit_msg>Converted outputs to file log.<commit_after>package packager\n\nimport(\n\ttp \"tritium\/proto\"\n\tproto \"goprotobuf.googlecode.com\/hg\/proto\"\n\tyaml \"launchpad.net\/goyaml\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"strings\"\n\t\"fmt\"\n\t\"exec\"\n\tlinker \"tritium\/linker\"\n\t\"path\/filepath\"\n\t\"log4go\"\n)\n\ntype Package struct { \n\tloaded []*PackageInfo\n\tlocation string\n\tLoadPath string\n\tLog log4go.Logger\n\t*tp.Package\n}\n\ntype PackageInfo struct {\n\tName string\n\tDependencies []string\n\tTypes []string\n}\n\nfunc BuildDefaultPackage(dir string) (*Package) {\n\t\/\/ Terrible directory handling here... has to be executed from Tritium root\n\tpkg := NewPackage(dir)\n\n\t\/\/pkg.Load(\"base\")\n\t\/\/pkg.Load(\"node\")\n\tpkg.Load(\"libxml\")\n\tprintln(\"Packages all loaded\")\n\n\treturn pkg\n}\n\nfunc NewPackage(loadPath string) (*Package){\n\treturn &Package{\n\t\tPackage: &tp.Package{\n\t\t\tName: proto.String(\"combined\"),\n\t\t\tFunctions: make([]*tp.Function, 0),\n\t\t\tTypes: make([]*tp.Type, 0),\n\t\t},\n\t\tloaded: make([]*PackageInfo, 0),\n \t Log: newLog(),\n\t\tLoadPath: loadPath,\n\t}\n}\n\nfunc newLog() (log4go.Logger) {\n\tlog := log4go.NewLogger()\n\tlog.AddFilter(\"file\", log4go.FINE, log4go.NewFileLogWriter(\"debug.log\", false))\t\n\treturn log\n}\n\nfunc (pkg *Package)Load(packageName string) {\n\t\n\tlocation := filepath.Join(pkg.LoadPath, packageName)\n\tpkg.location = location\n\n\tprintln(location)\n\n\tinfo := readPackageInfoFile(location)\n\t\n\tif len(info.Dependencies) > 0 {\n\n\t\t\/\/println(\"==========\\nLoading dependencies:\")\n\n\t\tfor _, dependency := range(info.Dependencies) {\n\t\t\tpkg.loadPackageDependency(dependency)\n\t\t}\n\n\t\t\/\/println(\"done.\\n==========\")\n\n\t}\n\n\tfor _, typeName := range(info.Types) {\n\t\tsplit := strings.Split(typeName, \" < \")\n\t\ttypeObj := &tp.Type{}\n\t\tif len(split) == 2 {\n\t\t\ttypeName = split[0]\n\t\t\tindex := pkg.findTypeIndex(split[1])\n\t\t\t\n\t\t\ttypeObj.Implements = proto.Int32(int32(index))\n\t\t}\n\t\ttypeObj.Name = proto.String(typeName)\n\t\tpkg.Types = append(pkg.Types, typeObj)\n\t}\n\n\tpkg.readHeaderFile(location)\n\n\tpkg.readPackageDefinitions(location)\n\n\tpkg.inheritFunctions()\n\n\tprintln(\" -- done\")\n\tpkg.Log.Close()\n}\n\nfunc (pkg *Package)resolveFunction(fun *tp.Function) {\n\tlinkingContext := linker.NewLinkingContext(pkg.Package)\n\n\/\/\tpkg.resolveFunctionDescendants(fun)\n\n\tpkg.Log.Info(\"\\t -- Resolving --\\n\")\n\tpkg.Log.Info(\"\\t\\t -- function: %v\\n\", fun)\n\n\t\/\/ Re-uses linker's logic to resolve function definitions\n\tif ( proto.GetBool( fun.BuiltIn ) == false) {\n\t\ttypeName := proto.GetString(fun.ScopeType)\n\n\t\tif len(typeName) != 0 {\n\t\t\t\/\/ When I pass in functions from the inheritance resolver, they're typeId is already set\n\t\t\tfun.ScopeTypeId = pkg.GetProtoTypeId(fun.ScopeType)\n\t\t\tfun.ScopeType = nil\n\t\t}\n\n\t\tlocalScope := make(linker.LocalDef, len(fun.Args))\n\n\t\t\/\/\t\tfun.ReturnTypeId = pkg.GetProtoTypeId(fun.ReturnType)\n\t\tfor _, arg := range(fun.Args) {\n\t\t\targTypeName := arg.TypeString\n\t\t\tvar argTypeId int\n\n\t\t\tif argTypeName != nil {\n\t\t\t\t\/\/ Similar deal. Input functions from inheritance resolution already have ids set\n\n\t\t\t\targ.TypeId = pkg.GetProtoTypeId(arg.TypeString)\n\t\t\t\t\/\/println(\"Processing %\", proto.GetString(arg.Name))\n\t\t\t\targTypeId = pkg.GetTypeId(proto.GetString(arg.TypeString))\n\t\t\t\targ.TypeString = nil\n\t\t\t} else {\n\t\t\t\targTypeId = int( proto.GetInt32(arg.TypeId) )\n\t\t\t}\n\n\t\t\tlocalScope[proto.GetString(arg.Name)] = argTypeId\n\t\t}\n\n\t\t\/\/pkg.Log.Info(\"Some insitruction: %v, %s\", fun.Instruction, proto.GetString(fun.Name) )\n\t\tscopeTypeId := int(proto.GetInt32(fun.ScopeTypeId))\n\t\tpkg.Log.Info(\"\\t\\t -- opening scope type : %v\\n\", scopeTypeId)\n\t\treturnType := linkingContext.ProcessInstructionWithLocalScope(fun.Instruction, scopeTypeId, localScope)\n\t\tfun.ReturnTypeId = proto.Int32(int32(returnType))\n\t}\n\tpkg.Package.Functions = append(pkg.Package.Functions, fun)\n\tpkg.Log.Info(\"\\t\\t -- done --\\n\")\n}\n\n\nfunc (pkg *Package)inheritFunctions() {\n\tpkg.Log.Info(\"pkg types: %v\", pkg.Types)\n\tfor _, function := range(pkg.Functions) {\n\t\tpkg.resolveFunctionDescendants(function)\n\t}\n}\n\n\/\/ TODO : Make a hash for this\nfunc (pkg *Package)findDescendentType(thisType int32) int {\t\n\tfor index, someType := range(pkg.Types) {\n\t\timplements := proto.GetInt32(someType.Implements)\n\t\tif implements == thisType && implements != 0 {\n\t\t\t\/\/ The implements field defaults to 0. Base doesn't implement Base. Text doesn't implement Base\n\t\t\t\/\/ TODO(SJ): make the default value -1 so I know if its not set versus something is inheriting from base\n\t\t\tpkg.Log.Info(\"=== %v is ancestor of %v === (%v is of type %v and implements : %v)\\n\", thisType, someType, proto.GetString(someType.Name), index, implements)\n\t\t\treturn index\n\t\t}\n\t}\n\treturn -1\n}\n\n\n\/\/ TODO(SJ) : Make this not suck. I think I could make this 50% shorter if I use reflection\n\/\/ - Also, I'm assuming a single depth level of inheritance. I'd have to run this function n times for n levels\n\/\/ - Well that should be fine as long as I run it at the end of every package load\n\nfunc (pkg *Package)resolveFunctionDescendants(fun *tp.Function) {\n\n\t\/\/ Check if this function contains any types that have descendants\n\tname := fun.Stub(pkg.Package)\n\tpkg.Log.Info(\"Checking for inheritance on function: %v\", name )\n\n\tnewFun := &tp.Function{}\n\tinherit := false\n\n\t\/\/ Iterate over ScopeType, Arg types, return Type, opens Type\n\n\n\t\/\/ ScopeType\n\n\tthisTypeId := proto.GetInt32(fun.ScopeTypeId)\n\tnewType := pkg.findDescendentType(thisTypeId)\n\n\tif newType != -1 {\n\t\tif !inherit {\n\t\t\tpkg.Log.Info(\"\\t -- ScopeType : Found ancestral type. Cloning function %v\\n\", proto.GetString( fun.Name ) )\n\t\t\tnewFun = fun.Clone()\n\t\t\t\/\/ pkg.Log.Info(\"\\t -- New fun: %v\", newFun)\n\t\t\tinherit = true\n\t\t}\n\t\tpkg.Log.Info(\"\\t -- Resetting scopeId\")\t\t\n\t\tnewFun.ScopeTypeId = proto.Int32( int32( newType ) )\n\t}\n\n\t\/\/ ReturnType\n\n\tthisTypeId = proto.GetInt32(fun.ReturnTypeId)\n\tnewType = pkg.findDescendentType(thisTypeId)\n\n\tif newType != -1 {\n\t\tif !inherit {\n\t\t\tpkg.Log.Info(\"\\t -- ReturnType : Found ancestral type. Cloning function %v\\n\", proto.GetString( fun.Name ) )\n\t\t\tnewFun = fun.Clone()\n\t\t\t\/\/ pkg.Log.Info(\"\\t -- New fun: %v\", newFun)\n\t\t\tinherit = true\n\t\t}\n\t\tpkg.Log.Info(\"\\t -- Resetting returnId\")\n\t\tnewFun.ReturnTypeId = proto.Int32( int32( newType ) )\n\t}\n\n\t\/\/ OpensType\n\n\tthisTypeId = proto.GetInt32(fun.OpensTypeId)\n\tnewType = pkg.findDescendentType(thisTypeId)\n\n\tif newType != -1 {\n\n\t\tif !inherit {\n\t\t\tpkg.Log.Info(\"\\t -- OpensType : Found ancestral type. Cloning function %v\\n\", proto.GetString( fun.Name ) )\n\t\t\tnewFun = fun.Clone()\n\t\t\t\/\/ pkg.Log.Info(\"\\t -- New fun: %v\", newFun)\n\t\t\tinherit = true\n\t\t}\n\t\tpkg.Log.Info(\"\\t -- Resetting openTypeId\")\n\t\tnewFun.OpensTypeId = proto.Int32( int32( newType ) )\n\t}\n\n\t\/\/ Arguments\n\n\tfor index, arg := range( fun.Args) {\n\t\tthisTypeId = proto.GetInt32(arg.TypeId)\n\t\tnewType = pkg.findDescendentType(thisTypeId)\n\n\t\tif newType != -1 {\n\n\t\t\tif !inherit {\n\t\t\t\tpkg.Log.Info(\"\\t -- ArgType : Found ancestral type. Cloning function %v\\n\", proto.GetString( fun.Name ) )\n\t\t\t\tnewFun = fun.Clone()\n\t\t\t\t\/\/ pkg.Log.Info(\"\\t -- New fun: %v\", newFun)\n\t\t\t\tinherit = true\n\t\t\t}\n\t\t\tpkg.Log.Info(\"\\t -- Resetting argument\")\n\t\t\tnewFun.Args[index].TypeId = proto.Int32( int32( newType ) )\n\t\t}\n\t\t\n\t\t\n\t}\n\n\tpkg.Log.Info(\"\\t -- Old function: %v\\n\\t -- New function: %v\\n\", fun, newFun)\n\n\tif inherit {\n\t\tpkg.resolveFunction(newFun)\n\t}\n\n}\n\nfunc (pkg *Package)readPackageDefinitions(location string) {\n\t\n\tprintln(\" -- reading definitions\")\n\n\t\/\/ Execute the ts2func-ruby script\n\n\tpackage_name := strings.Split(location,\"\/\")[1]\n\tinput_file := filepath.Join(location, \"functions.ts\")\n\toutput_file := filepath.Join(location, package_name + \".tf\")\n\n\t\/\/ Assume that tritium\/bin is in $PATH (it will be when you install the gem)\n\t\/\/ -- if you're developing, add $REPOS\/tritium\/bin to $PATH\n\n\tcommand := exec.Command(\"ts2func-ruby\", \"-s\", input_file, output_file)\n\n\t\/\/pkg.Log.Info(\"\\n\\nExecuting command: \\n %v\\n\", command)\n\n\toutput, err := command.CombinedOutput()\n\n\tif err != nil {\n\t\tfmt.Printf(\"\\tFunction conversion output:\\n\\t %s\", output)\n\t\tlog.Panic(err)\n\t}\n\t\n\tfunctions := &tp.FunctionArray{}\t\n\tdata, err := ioutil.ReadFile(output_file)\n\n\tif err != nil {\n\t\tprintln(\"Failed to read output file.\")\n\t\tlog.Panic(err)\n\t}\n\n\n\terr = proto.Unmarshal(data, functions)\n\n\tif err != nil {\n\t\tprintln(\"Failed while loading output from ts2func.\")\n\t\tprintln(string(output))\n\t\tlog.Panic(err)\n\t}\n\n\n\t\/\/fmt.Printf(\"functions : %v\", functions)\n\t\/\/fmt.Printf(\"\\n\\n prelim pkg functions : %v\\n\", pkg.Package.Functions) *\/\n\n\t\/\/println(\"Function count before \", len(pkg.Package.Functions))\n\tfor _, function := range(functions.Functions) {\n\t\tpkg.Log.Info(\"\\t -- function: %v\", function)\n\t\tpkg.resolveFunction(function)\n\t}\n\t\/\/fmt.Printf(\"\\n\\npkg functions : %v\\n\", pkg.Package.Functions)\n\t\/\/println(\"Function count after \", len(pkg.Package.Functions))\n\t\/\/pkg.Package.Functions = functions.Functions\n\n\t\/\/fmt.Printf(\"\\n\\npkg functions : %v\\n\", pkg.Package.Functions)\n\n\n}\n\n\nfunc (pkg *Package)Marshal() []byte {\n\tbytes, err := proto.Marshal(pkg.Package)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\treturn bytes\n}\n\nfunc (pkg *Package)findTypeIndex(name string) int {\n\tfor index, typeObj := range(pkg.Types) {\n\t\tif name == proto.GetString(typeObj.Name) {\n\t\t\treturn index\n\t\t}\n\t}\n\t\n\tlog.Panic(\"Bad type load order, type\", name, \"unknown\")\n\treturn -1\n}\n\nfunc (pkg *Package)loadPackageDependency(name string) {\n\n\t\/\/ Try and load the dependency\n\t\/\/ TODO : remove passing location around since I added it to the Package struct\t\n\n\t\/\/ TODO : Check for a pre-built package (pre-req is outputting a .tpkg file upon completion of a package load)\n\n\tnewPath := filepath.Join(pkg.LoadPath, name)\n\t_, err := ioutil.ReadDir(newPath)\n\n\tif err == nil {\n\t\t\/\/ Directory exists\n\t\tpkg.Load(name)\n\t} else {\n\t\tprintln(\"Cannot find package at:\", newPath)\n\t\tlog.Panic(err)\n\t}\n\n}\n\n\/\/ Not fully functional. Dang it.\nfunc readPackageInfoFile(location string) (*PackageInfo){\n\tpackageInfo := &PackageInfo{}\n\tinfoFile, err := ioutil.ReadFile(location + \"\/package.yml\");\n\tif err != nil {\n\t\tlog.Panic(\"No package info file found at \" + location + \"\/package.yml\")\n\t}\n\tyaml.Unmarshal([]byte(infoFile), &packageInfo)\n\t\/\/fmt.Printf(\"--- m:\\n%v\\n\\n\", packageInfo)\n\treturn packageInfo\n}\n\n\n\/\/ Not fully functional. Dang it.\nfunc (pkg *Package)readHeaderFile(location string) {\n\theaderFile, err := ioutil.ReadFile(location + \"\/headers.tf\");\n\tif err != nil {\n\t\tlog.Panic(\"No header file found at \" + location + \"\/headers.tf\")\n\t}\n\theaderLines := strings.Split(string(headerFile), \"\\n\")\n\tfor _, line := range(headerLines) {\n\t\tif (len(line) > 2) && (line[:2] != \"\/\/\") {\n\t\t\tfunction := &tp.Function{Args: make([]*tp.Function_Argument, 0)}\n\t\t\tline = line[6:]\n\t\t\tmethodName := strings.Split(line, \"(\")[0]\n\t\t\ttypeId := 0\n\t\t\tback := line[len(methodName)+1:]\n\t\t\tmethodizer := strings.Split(methodName, \".\")\n\t\t\tif len(methodizer) > 1 {\n\t\t\t\ttypeStr := methodizer[0]\n\t\t\t\ttypeId = pkg.GetTypeId(typeStr)\n\t\t\t\tmethodName = methodizer[1]\n\t\t\t}\n\t\t\tfunction.Name = proto.String(methodName)\n\t\t\tfunction.ScopeTypeId = proto.Int32(int32(typeId))\n\n\t\t\tsplat := strings.Split(back, \")\")\n\t\t\targString := splat[0]\n\t\t\t\/\/ Process the arguments\n\t\t\tif len(argString) > 0 {\n\t\t\t\targList := strings.Split(argString, \",\")\n\t\t\t\tfor _, argLine := range(argList) {\n\t\t\t\t\targ := &tp.Function_Argument{}\n\t\t\t\t\ttypeStr := strings.Split(strings.Trim(argLine, \" \\t\"), \" \")[0]\n\t\t\t\t\ttypeId = pkg.findTypeIndex(typeStr)\n\t\t\t\t\targ.TypeId = proto.Int32(int32(typeId))\n\t\t\t\t\tfunction.Args = append(function.Args, arg)\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\tback = splat[1]\n\t\t\t\n\t\t\thintStrs := strings.Split(back, \"\/\/\")\n\t\t\tif len(hintStrs) == 2 {\n\t\t\t\thints := strings.Split(hintStrs[1], \",\")\n\t\t\t\tif len(hints) >= 1 {\n\t\t\t\t\t function.ReturnTypeId = proto.Int32(int32(pkg.findTypeIndex(hints[0])))\n\t\t\t\t}\n\t\t\t\tif len(hints) >= 2 {\n\t\t\t\t\t function.OpensTypeId = proto.Int32(int32(pkg.findTypeIndex(hints[1])))\n\t\t\t\t}\n\t\t\t}\n\t\t\tfunction.BuiltIn = proto.Bool(true)\n\n\t\t\tpkg.Functions = append(pkg.Functions, function)\n\t\t}\n\t}\n}\n\nfunc (pkg *Package)SerializedOutput() {\n\n\tbytes, err := proto.Marshal(pkg.Package)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tprintln(string(bytes))\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright (C) 2014 Michael Wendland\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as published\n * by the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n * Authors:\n * Michael Wendland <michiwend@michiwend.com>\n *\/\n\npackage goefa\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t_ \"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\n\t\"code.google.com\/p\/go-charset\/charset\"\n\t_ \"code.google.com\/p\/go-charset\/data\"\n)\n\ntype EFAProvider struct {\n\tName string\n\n\tBaseURL string\n\tDepartureMonitorEndpoint string\n\tStopFinderEndpoint string\n\tTripEndpoint string\n\n\t\/\/FIXME: include general params for all requests (e.g. useRealtime, ...)\n}\n\n\/\/FIXME: separate goefa structs (like Station) and XML structs\nfunc (efa *EFAProvider) FindStop(name string) (*StopInfo, error) {\n\t\/\/ FindStop queries the stopfinder API and returns Stops matching 'name'\n\n\tparams := url.Values{\n\t\t\"type_sf\": {\"stop\"},\n\t\t\"name_sf\": {name},\n\t\t\"locationServerActive\": {\"1\"},\n\t\t\"outputFormat\": {\"XML\"},\n\t\t\"stateless\": {\"1\"},\n\t\t\/\/ \"limit\": {\"5\"},\n\t\t\/\/ \"mode\": {\"direct\"},\n\t}\n\n\tresp, err := http.PostForm(efa.BaseURL+efa.StopFinderEndpoint, params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar result StopResult\n\tdecoder := xml.NewDecoder(resp.Body)\n\tdecoder.CharsetReader = charset.NewReader\n\tif err = decoder.Decode(&result); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif result.Stop.State != \"identified\" {\n\t\treturn nil, errors.New(\"Stop does not exist or name is not unique!\")\n\t}\n\n\treturn &result.Stop, nil\n}\n\n\/\/FIXME: turn station_id into an int\nfunc (efa *EFAProvider) Departures(station *StopInfo, results int) ([]Departure, error) {\n\tparams := url.Values{\n\t\t\"type_dm\": {\"stop\"},\n\t\t\"name_dm\": {strconv.Itoa(station.IdfdStop.StopID)},\n\t\t\"useRealtime\": {\"1\"},\n\t\t\"locationServerActive\": {\"1\"},\n\t\t\"dmLineSelection\": {\"all\"},\n\t\t\"limit\": {strconv.Itoa(results)},\n\t\t\"mode\": {\"direct\"},\n\t}\n\n\tresp, err := http.PostForm(efa.BaseURL+efa.DepartureMonitorEndpoint, params)\n\tif err != nil {\n\t\treturn []Departure{}, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar result StopResult\n\tdecoder := xml.NewDecoder(resp.Body)\n\tdecoder.CharsetReader = charset.NewReader\n\tif err = decoder.Decode(&result); err != nil {\n\t\treturn []Departure{}, err\n\t}\n\t\/\/fmt.Printf(\"%+v\", result)\n\n\tif result.Stop.State != \"identified\" {\n\t\treturn []Departure{}, errors.New(\"Stop does not exist or name is not unique!\")\n\t}\n\n\treturn result.Departures, nil\n}\n<commit_msg>fix StopFinder request, etc<commit_after>\/*\n * Copyright (C) 2014 Michael Wendland\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as published\n * by the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n * Authors:\n * Michael Wendland <michiwend@michiwend.com>\n *\/\n\npackage goefa\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go-charset\/charset\"\n\t_ \"code.google.com\/p\/go-charset\/data\"\n)\n\ntype EFAProvider struct {\n\tName string\n\n\tBaseURL string\n\tDepartureMonitorEndpoint string\n\tStopFinderEndpoint string\n\tTripEndpoint string\n\n\t\/\/FIXME: include general params for all requests (e.g. useRealtime, ...)\n}\n\n\/\/ FindStop queries the EFA StopFinder API and returns a list of stops\n\/\/ or a single identified stop that where matched by the given name\nfunc (efa *EFAProvider) FindStop(name string) (bool, []*Stop, error) {\n\n\t\/\/ Struct for unmarshaling StopFinderRequest into\n\ttype stopFinderRequest struct {\n\t\tOdv struct {\n\t\t\tOdvPlace struct {\n\t\t\t}\n\t\t\tOdvName struct {\n\t\t\t\tState string `xml:\"state,attr\"`\n\t\t\t\tStops []*Stop `xml:\"odvNameElem\"`\n\t\t\t} `xml:\"itdOdvName\"`\n\t\t} `xml:\"itdStopFinderRequest>itdOdv\"`\n\t}\n\n\t\/\/ To get a more detailed response from the StopFinder request we can use\n\t\/\/ EFAs LocationServer (locationServerActive=1, type_sf=any). To limit the\n\t\/\/ results to a specific type we can use anyObjFilter_sf=<bitmask> as\n\t\/\/ following:\n\t\/\/\t0 any type\n\t\/\/\t1 locations\n\t\/\/\t2 stations\n\t\/\/\t4 streets\n\t\/\/\t8 addresses\n\t\/\/\t16 crossroads\n\t\/\/\t32 POIs\n\t\/\/\t64 postal codes\n\t\/\/ \"stations and streets\" results in 2 + 4 = 6\n\n\tparams := url.Values{\n\t\t\"type_sf\": {\"any\"},\n\t\t\"name_sf\": {name},\n\t\t\"outputFormat\": {\"XML\"},\n\t\t\"stateless\": {\"1\"},\n\t\t\"locationServerActive\": {\"1\"},\n\t\t\"anyObjFilter_sf\": {\"2\"},\n\t}\n\n\tresp, err := http.PostForm(efa.BaseURL+efa.StopFinderEndpoint, params)\n\tif err != nil {\n\t\treturn false, nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar result stopFinderRequest\n\n\tdecoder := xml.NewDecoder(resp.Body)\n\tdecoder.CharsetReader = charset.NewReader\n\tif err = decoder.Decode(&result); err != nil {\n\t\treturn false, nil, err\n\t}\n\n\tfor _, stop := range result.Odv.OdvName.Stops {\n\t\tstop.Provider = efa\n\t}\n\n\tswitch result.Odv.OdvName.State {\n\tcase \"identified\":\n\t\treturn true, result.Odv.OdvName.Stops, nil\n\tcase \"list\":\n\t\treturn false, result.Odv.OdvName.Stops, nil\n\tdefault:\n\t\treturn false, nil, errors.New(\"no matched stops\")\n\t}\n\n}\n\nfunc (efa *EFAProvider) Trip(origin Stop, via Stop, destination Stop, time time.Time) error {\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package golis\n\nimport (\n\t\"log\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/系统变量定义\nvar (\n\tGolisHandler IoHandler \/\/事件处理\n\tGolisPackage Packager \/\/拆包封包处理接口\n\tw WaitGroupWrapper \/\/等待退出\n\trunnable bool \/\/服务运行状态\n\trwTimeout time.Duration = 0 \/\/超时单位秒\n)\n\n\/\/定义waitGroup\ntype WaitGroupWrapper struct {\n\tsync.WaitGroup\n}\n\n\/\/定义session\ntype Iosession struct {\n\tSesionId uint32 \/\/session唯一表示\n\tConnection net.Conn \/\/连接\n\tIsAuth bool \/\/是否认证成功\n\tclosed bool \/\/是否已经关闭\n}\n\n\/\/session写入数据\nfunc (this *Iosession) Write(message interface{}) {\n\t\/\/触发消息发送事件\n\tGolisHandler.MessageSent(this, message)\n\tthis.Connection.Write(GolisPackage.Packet(message))\n}\n\n\/\/关闭连接\nfunc (this *Iosession) Close() {\n\tif !this.closed {\n\t\tGolisHandler.SessionClosed(this)\n\t\tthis.closed = true\n\t}\n\tthis.Connection.Close()\n}\n\n\/\/设置读写超时\nfunc SetTimeout(timeoutSec time.Duration) {\n\trwTimeout = timeoutSec\n}\n\n\/\/拆包封包接口定义\ntype Packager interface {\n\t\/\/读取连接数据\n\t\/\/packageChan 准备好包后交给该chan\n\tReadConnData(buffer *Buffer, packageChan chan<- *[]byte)\n\t\/\/拆包函数\n\tUnpacket(data []byte) interface{}\n\t\/\/封包函数\n\tPacket(msg interface{}) []byte\n}\n\n\/\/事件触发接口定义\ntype IoHandler interface {\n\t\/\/session打开\n\tSessionOpened(session *Iosession)\n\t\/\/session关闭\n\tSessionClosed(session *Iosession)\n\t\/\/收到消息时触发\n\tMessageReceived(session *Iosession, message interface{})\n\t\/\/消息发送时触发\n\tMessageSent(session *Iosession, message interface{})\n}\n\n\/\/服务器端运行golis\n\/\/netPro:运行协议参数,tcp\/udp\n\/\/laddr :程序监听ip和端口,如127.0.0.1:8080\nfunc Run(netPro, laddr string) {\n\trunnable = true\n\tlog.Println(\"golis is listen port:\", laddr)\n\n\tnetLis, err := net.Listen(netPro, laddr)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tdefer netLis.Close()\n\tlog.Println(\"waiting clients...\")\n\tfor runnable {\n\t\tconn, err := netLis.Accept()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tgo connectHandle(conn)\n\t}\n\n\tw.WaitGroup.Wait()\n\tlog.Println(\"golis is safe exit\")\n}\n\n\/\/停止服务\nfunc Stop() {\n\trunnable = false\n}\n\n\/\/客户端程序连接服务器\nfunc Dial(netPro, laddr string) {\n\trunnable = true\n\tconn, err := net.Dial(netPro, laddr)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tgo connectHandle(conn)\n}\n\n\/\/处理新连接\nfunc connectHandle(conn net.Conn) {\n\tw.Add(1)\n\t\/\/声明一个临时缓冲区,用来存储被截断的数据\n\tioBuffer := NewBuffer()\n\n\tbuffer := make([]byte, 512)\n\n\t\/\/声明一个管道用于接收解包的数据\n\treaderChannel := make(chan *[]byte, 16)\n\t\/\/创建session\n\tsession := Iosession{Connection: conn}\n\t\/\/触发sessionCreated事件\n\tGolisHandler.SessionOpened(&session)\n\n\texitChan := make(chan bool)\n\tgo waitData(&session, readerChannel, exitChan)\n\tdefer func() {\n\t\tconn.Close()\n\t\tioBuffer = nil\n\t\tbuffer = nil\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\tw.Done()\n\t}()\n\n\tfor runnable && !session.closed {\n\n\t\tn, err := conn.Read(buffer)\n\t\t\/\/设置超时\n\t\tresetTimeout(conn)\n\t\tioBuffer.PutBytes(buffer[:n])\n\t\tif err == nil {\n\t\t\tGolisPackage.ReadConnData(ioBuffer, readerChannel)\n\t\t} else {\n\t\t\t\/\/session已经关闭\n\t\t\tsession.Close()\n\t\t\texitChan <- true\n\t\t\treturn\n\t\t}\n\t\tif !runnable || session.closed {\n\t\t\t\/\/session关闭\n\t\t\tsession.Close()\n\t\t\texitChan <- true\n\t\t\treturn\n\t\t}\n\n\t}\n\n}\n\nfunc resetTimeout(conn net.Conn) {\n\tif rwTimeout == 0 {\n\t\tconn.SetDeadline(time.Time{})\n\t} else {\n\t\tconn.SetDeadline(time.Now().Add(time.Duration(rwTimeout) * time.Second))\n\t}\n}\n\n\/\/等待数据包\nfunc waitData(session *Iosession, readerChannel chan *[]byte, exitChan chan bool) {\n\tdefer func() {\n\t\tclose(exitChan)\n\t\tclose(readerChannel)\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}()\n\tfor runnable && !session.closed {\n\t\tselect {\n\t\tcase data := <-readerChannel:\n\t\t\treadFromData(session, data)\n\t\tcase <-exitChan:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/从准备好的数据读取并拆包\nfunc readFromData(session *Iosession, data *[]byte) {\n\tmessage := GolisPackage.Unpacket(*data) \/\/拆包\n\t\/\/收到消息到达时触发事件\n\tGolisHandler.MessageReceived(session, message)\n}\n<commit_msg>去掉超时操作<commit_after>package golis\n\nimport (\n\t\"log\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/系统变量定义\nvar (\n\tGolisHandler IoHandler \/\/事件处理\n\tGolisPackage Packager \/\/拆包封包处理接口\n\tw WaitGroupWrapper \/\/等待退出\n\trunnable bool \/\/服务运行状态\n\trwTimeout time.Duration = 0 \/\/超时单位秒\n)\n\n\/\/定义waitGroup\ntype WaitGroupWrapper struct {\n\tsync.WaitGroup\n}\n\n\/\/定义session\ntype Iosession struct {\n\tSesionId uint32 \/\/session唯一表示\n\tConnection net.Conn \/\/连接\n\tIsAuth bool \/\/是否认证成功\n\tclosed bool \/\/是否已经关闭\n}\n\n\/\/session写入数据\nfunc (this *Iosession) Write(message interface{}) {\n\t\/\/触发消息发送事件\n\tGolisHandler.MessageSent(this, message)\n\tthis.Connection.Write(GolisPackage.Packet(message))\n}\n\n\/\/关闭连接\nfunc (this *Iosession) Close() {\n\tif !this.closed {\n\t\tGolisHandler.SessionClosed(this)\n\t\tthis.closed = true\n\t}\n\tthis.Connection.Close()\n}\n\n\/\/设置读写超时\nfunc SetTimeout(timeoutSec time.Duration) {\n\trwTimeout = timeoutSec\n}\n\n\/\/拆包封包接口定义\ntype Packager interface {\n\t\/\/读取连接数据\n\t\/\/packageChan 准备好包后交给该chan\n\tReadConnData(buffer *Buffer, packageChan chan<- *[]byte)\n\t\/\/拆包函数\n\tUnpacket(data []byte) interface{}\n\t\/\/封包函数\n\tPacket(msg interface{}) []byte\n}\n\n\/\/事件触发接口定义\ntype IoHandler interface {\n\t\/\/session打开\n\tSessionOpened(session *Iosession)\n\t\/\/session关闭\n\tSessionClosed(session *Iosession)\n\t\/\/收到消息时触发\n\tMessageReceived(session *Iosession, message interface{})\n\t\/\/消息发送时触发\n\tMessageSent(session *Iosession, message interface{})\n}\n\n\/\/服务器端运行golis\n\/\/netPro:运行协议参数,tcp\/udp\n\/\/laddr :程序监听ip和端口,如127.0.0.1:8080\nfunc Run(netPro, laddr string) {\n\trunnable = true\n\tlog.Println(\"golis is listen port:\", laddr)\n\n\tnetLis, err := net.Listen(netPro, laddr)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tdefer netLis.Close()\n\tlog.Println(\"waiting clients...\")\n\tfor runnable {\n\t\tconn, err := netLis.Accept()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tgo connectHandle(conn)\n\t}\n\n\tw.WaitGroup.Wait()\n\tlog.Println(\"golis is safe exit\")\n}\n\n\/\/停止服务\nfunc Stop() {\n\trunnable = false\n}\n\n\/\/客户端程序连接服务器\nfunc Dial(netPro, laddr string) {\n\trunnable = true\n\tconn, err := net.Dial(netPro, laddr)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tgo connectHandle(conn)\n}\n\n\/\/处理新连接\nfunc connectHandle(conn net.Conn) {\n\tw.Add(1)\n\t\/\/声明一个临时缓冲区,用来存储被截断的数据\n\tioBuffer := NewBuffer()\n\n\tbuffer := make([]byte, 512)\n\n\t\/\/声明一个管道用于接收解包的数据\n\treaderChannel := make(chan *[]byte, 16)\n\t\/\/创建session\n\tsession := Iosession{Connection: conn}\n\t\/\/触发sessionCreated事件\n\tGolisHandler.SessionOpened(&session)\n\n\texitChan := make(chan bool)\n\tgo waitData(&session, readerChannel, exitChan)\n\tdefer func() {\n\t\tconn.Close()\n\t\tioBuffer = nil\n\t\tbuffer = nil\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\tw.Done()\n\t}()\n\n\tfor runnable && !session.closed {\n\n\t\tn, err := conn.Read(buffer)\n\t\t\/\/设置超时\n\t\t\/\/resetTimeout(conn)\n\t\tioBuffer.PutBytes(buffer[:n])\n\t\tif err == nil {\n\t\t\tGolisPackage.ReadConnData(ioBuffer, readerChannel)\n\t\t} else {\n\t\t\t\/\/session已经关闭\n\t\t\tsession.Close()\n\t\t\texitChan <- true\n\t\t\treturn\n\t\t}\n\t\tif !runnable || session.closed {\n\t\t\t\/\/session关闭\n\t\t\tsession.Close()\n\t\t\texitChan <- true\n\t\t\treturn\n\t\t}\n\n\t}\n\n}\n\nfunc resetTimeout(conn net.Conn) {\n\tif rwTimeout == 0 {\n\t\tconn.SetDeadline(time.Time{})\n\t} else {\n\t\tconn.SetDeadline(time.Now().Add(time.Duration(rwTimeout) * time.Second))\n\t}\n}\n\n\/\/等待数据包\nfunc waitData(session *Iosession, readerChannel chan *[]byte, exitChan chan bool) {\n\tdefer func() {\n\t\tclose(exitChan)\n\t\tclose(readerChannel)\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}()\n\tfor runnable && !session.closed {\n\t\tselect {\n\t\tcase data := <-readerChannel:\n\t\t\treadFromData(session, data)\n\t\tcase <-exitChan:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/从准备好的数据读取并拆包\nfunc readFromData(session *Iosession, data *[]byte) {\n\tmessage := GolisPackage.Unpacket(*data) \/\/拆包\n\t\/\/收到消息到达时触发事件\n\tGolisHandler.MessageReceived(session, message)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package golog implements logging functions that log errors to stderr and\n\/\/ debug messages to stdout. Trace logging is also supported.\n\/\/ Trace logs go to stdout as well, but they are only written if the program\n\/\/ is run with environment variable \"TRACE=true\".\n\/\/ A stack dump will be printed after the message if \"PRINT_STACK=true\".\npackage golog\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\n\t\"github.com\/getlantern\/errors\"\n\t\"github.com\/getlantern\/ops\"\n\t\"github.com\/oxtoacart\/bpool\"\n)\n\nconst (\n\t\/\/ ERROR is an error Severity\n\tERROR = 500\n\n\t\/\/ FATAL is an error Severity\n\tFATAL = 600\n)\n\ntype outputFn func(prefix string, skipFrames int, printStack bool, severity string, arg interface{}, values map[string]interface{})\n\n\/\/ Output is a log output that can optionally support structured logging\ntype Output interface {\n\t\/\/ Write debug messages\n\tDebug(prefix string, skipFrames int, printStack bool, severity string, arg interface{}, values map[string]interface{})\n\n\t\/\/ Write error messages\n\tError(prefix string, skipFrames int, printStack bool, severity string, arg interface{}, values map[string]interface{})\n}\n\nvar (\n\toutput Output\n\toutputMx sync.RWMutex\n\tprepender atomic.Value\n\treporters []ErrorReporter\n\treportersMutex sync.RWMutex\n\n\tbufferPool = bpool.NewBufferPool(200)\n\n\tonFatal atomic.Value\n)\n\n\/\/ Severity is a level of error (higher values are more severe)\ntype Severity int\n\nfunc (s Severity) String() string {\n\tswitch s {\n\tcase ERROR:\n\t\treturn \"ERROR\"\n\tcase FATAL:\n\t\treturn \"FATAL\"\n\tdefault:\n\t\treturn \"UNKNOWN\"\n\t}\n}\n\nfunc init() {\n\tDefaultOnFatal()\n\tResetOutputs()\n\tResetPrepender()\n}\n\n\/\/ SetPrepender sets a function to write something, e.g., the timestamp, before\n\/\/ each line of the log.\nfunc SetPrepender(p func(io.Writer)) {\n\tprepender.Store(p)\n}\n\nfunc ResetPrepender() {\n\tSetPrepender(func(io.Writer) {})\n}\n\nfunc GetPrepender() func(io.Writer) {\n\treturn prepender.Load().(func(io.Writer))\n}\n\n\/\/ SetOutputs sets the outputs for error and debug logs to use the given Outputs.\n\/\/ Returns a function that resets outputs to their original values prior to calling SetOutputs.\nfunc SetOutputs(errorOut io.Writer, debugOut io.Writer) (reset func()) {\n\treturn SetOutput(TextOutput(errorOut, debugOut))\n}\n\n\/\/ SetOutput sets the Output to use for errors and debug messages\nfunc SetOutput(out Output) (reset func()) {\n\toutputMx.Lock()\n\tdefer outputMx.Unlock()\n\toldOut := output\n\toutput = out\n\treturn func() {\n\t\toutputMx.Lock()\n\t\tdefer outputMx.Unlock()\n\t\toutput = oldOut\n\t}\n}\n\n\/\/ Deprecated: instead of calling ResetOutputs, use the reset function returned by SetOutputs.\nfunc ResetOutputs() {\n\tSetOutputs(os.Stderr, os.Stdout)\n}\n\nfunc getErrorOut() outputFn {\n\toutputMx.RLock()\n\tdefer outputMx.RUnlock()\n\treturn output.Error\n}\n\nfunc getDebugOut() outputFn {\n\toutputMx.RLock()\n\tdefer outputMx.RUnlock()\n\treturn output.Debug\n}\n\n\/\/ RegisterReporter registers the given ErrorReporter. All logged Errors are\n\/\/ sent to this reporter.\nfunc RegisterReporter(reporter ErrorReporter) {\n\treportersMutex.Lock()\n\treporters = append(reporters, reporter)\n\treportersMutex.Unlock()\n}\n\n\/\/ OnFatal configures golog to call the given function on any FATAL error. By\n\/\/ default, golog calls os.Exit(1) on any FATAL error.\nfunc OnFatal(fn func(err error)) {\n\tonFatal.Store(fn)\n}\n\n\/\/ DefaultOnFatal enables the default behavior for OnFatal\nfunc DefaultOnFatal() {\n\tonFatal.Store(func(err error) {\n\t\tos.Exit(1)\n\t})\n}\n\n\/\/ MultiLine is an interface for arguments that support multi-line output.\ntype MultiLine interface {\n\t\/\/ MultiLinePrinter returns a function that can be used to print the\n\t\/\/ multi-line output. The returned function writes one line to the buffer and\n\t\/\/ returns true if there are more lines to write. This function does not need\n\t\/\/ to take care of trailing carriage returns, golog handles that\n\t\/\/ automatically.\n\tMultiLinePrinter() func(buf *bytes.Buffer) bool\n}\n\n\/\/ ErrorReporter is a function to which the logger will report errors.\n\/\/ It the given error and corresponding message along with associated ops\n\/\/ context. This should return quickly as it executes on the critical code\n\/\/ path. The recommended approach is to buffer as much as possible and discard\n\/\/ new reports if the buffer becomes saturated.\ntype ErrorReporter func(err error, severity Severity, ctx map[string]interface{})\n\ntype Logger interface {\n\t\/\/ Debug logs to stdout\n\tDebug(arg interface{})\n\t\/\/ Debugf logs to stdout\n\tDebugf(message string, args ...interface{})\n\n\t\/\/ Error logs to stderr\n\tError(arg interface{}) error\n\t\/\/ Errorf logs to stderr. It returns the first argument that's an error, or\n\t\/\/ a new error built using fmt.Errorf if none of the arguments are errors.\n\tErrorf(message string, args ...interface{}) error\n\n\t\/\/ Fatal logs to stderr and then exits with status 1\n\tFatal(arg interface{})\n\t\/\/ Fatalf logs to stderr and then exits with status 1\n\tFatalf(message string, args ...interface{})\n\n\t\/\/ Trace logs to stderr only if TRACE=true\n\tTrace(arg interface{})\n\t\/\/ Tracef logs to stderr only if TRACE=true\n\tTracef(message string, args ...interface{})\n\n\t\/\/ TraceOut provides access to an io.Writer to which trace information can\n\t\/\/ be streamed. If running with environment variable \"TRACE=true\", TraceOut\n\t\/\/ will point to os.Stderr, otherwise it will point to a ioutil.Discared.\n\t\/\/ Each line of trace information will be prefixed with this Logger's\n\t\/\/ prefix.\n\tTraceOut() io.Writer\n\n\t\/\/ IsTraceEnabled() indicates whether or not tracing is enabled for this\n\t\/\/ logger.\n\tIsTraceEnabled() bool\n\n\t\/\/ AsStdLogger returns an standard logger\n\tAsStdLogger() *log.Logger\n}\n\nfunc LoggerFor(prefix string) Logger {\n\tl := &logger{\n\t\tprefix: prefix + \": \",\n\t}\n\n\ttrace := os.Getenv(\"TRACE\")\n\tl.traceOn, _ = strconv.ParseBool(trace)\n\tif !l.traceOn {\n\t\tprefixes := strings.Split(trace, \",\")\n\t\tfor _, p := range prefixes {\n\t\t\tif prefix == strings.Trim(p, \" \") {\n\t\t\t\tl.traceOn = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif l.traceOn {\n\t\tl.traceOut = l.newTraceWriter()\n\t} else {\n\t\tl.traceOut = ioutil.Discard\n\t}\n\n\tprintStack := os.Getenv(\"PRINT_STACK\")\n\tl.printStack, _ = strconv.ParseBool(printStack)\n\n\treturn l\n}\n\ntype logger struct {\n\tprefix string\n\ttraceOn bool\n\ttraceOut io.Writer\n\tprintStack bool\n\touts atomic.Value\n}\n\nfunc (l *logger) print(write outputFn, skipFrames int, severity string, arg interface{}) {\n\twrite(l.prefix, skipFrames+2, l.printStack, severity, arg, ops.AsMap(arg, false))\n}\n\nfunc (l *logger) printf(write outputFn, skipFrames int, severity string, message string, args ...interface{}) {\n\tl.print(write, skipFrames+1, severity, fmt.Sprintf(message, args...))\n}\n\nfunc (l *logger) Debug(arg interface{}) {\n\tl.print(getDebugOut(), 4, \"DEBUG\", arg)\n}\n\nfunc (l *logger) Debugf(message string, args ...interface{}) {\n\tl.printf(getDebugOut(), 4, \"DEBUG\", message, args...)\n}\n\nfunc (l *logger) Error(arg interface{}) error {\n\treturn l.errorSkipFrames(arg, 1, ERROR)\n}\n\nfunc (l *logger) Errorf(message string, args ...interface{}) error {\n\treturn l.errorSkipFrames(errors.NewOffset(1, message, args...), 1, ERROR)\n}\n\nfunc (l *logger) Fatal(arg interface{}) {\n\tfatal(l.errorSkipFrames(arg, 1, FATAL))\n}\n\nfunc (l *logger) Fatalf(message string, args ...interface{}) {\n\tfatal(l.errorSkipFrames(errors.NewOffset(1, message, args...), 1, FATAL))\n}\n\nfunc fatal(err error) {\n\tfn := onFatal.Load().(func(err error))\n\tfn(err)\n}\n\nfunc (l *logger) errorSkipFrames(arg interface{}, skipFrames int, severity Severity) error {\n\tvar err error\n\tswitch e := arg.(type) {\n\tcase error:\n\t\terr = e\n\tdefault:\n\t\terr = fmt.Errorf(\"%v\", e)\n\t}\n\tl.print(getErrorOut(), skipFrames+4, severity.String(), err)\n\treturn report(err, severity)\n}\n\nfunc (l *logger) Trace(arg interface{}) {\n\tif l.traceOn {\n\t\tl.print(getDebugOut(), 4, \"TRACE\", arg)\n\t}\n}\n\nfunc (l *logger) Tracef(message string, args ...interface{}) {\n\tif l.traceOn {\n\t\tl.printf(getDebugOut(), 4, \"TRACE\", message, args...)\n\t}\n}\n\nfunc (l *logger) TraceOut() io.Writer {\n\treturn l.traceOut\n}\n\nfunc (l *logger) IsTraceEnabled() bool {\n\treturn l.traceOn\n}\n\nfunc (l *logger) newTraceWriter() io.Writer {\n\tpr, pw := io.Pipe()\n\tbr := bufio.NewReader(pr)\n\n\tif !l.traceOn {\n\t\treturn pw\n\t}\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif err := pr.Close(); err != nil {\n\t\t\t\terrorOnLogging(err)\n\t\t\t}\n\t\t}()\n\t\tdefer func() {\n\t\t\tif err := pw.Close(); err != nil {\n\t\t\t\terrorOnLogging(err)\n\t\t\t}\n\t\t}()\n\n\t\tfor {\n\t\t\tline, err := br.ReadString('\\n')\n\t\t\tif err == nil {\n\t\t\t\t\/\/ Log the line (minus the trailing newline)\n\t\t\t\tl.print(getDebugOut(), 6, \"TRACE\", line[:len(line)-1])\n\t\t\t} else {\n\t\t\t\tl.printf(getDebugOut(), 6, \"TRACE\", \"TraceWriter closed due to unexpected error: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn pw\n}\n\ntype errorWriter struct {\n\tl *logger\n}\n\n\/\/ Write implements method of io.Writer, due to different call depth,\n\/\/ it will not log correct file and line prefix\nfunc (w *errorWriter) Write(p []byte) (n int, err error) {\n\ts := string(p)\n\tif s[len(s)-1] == '\\n' {\n\t\ts = s[:len(s)-1]\n\t}\n\tw.l.print(getErrorOut(), 6, \"ERROR\", s)\n\treturn len(p), nil\n}\n\nfunc (l *logger) AsStdLogger() *log.Logger {\n\treturn log.New(&errorWriter{l}, \"\", 0)\n}\n\nfunc errorOnLogging(err error) {\n\t_, _ = fmt.Fprintf(os.Stderr, \"Unable to log: %v\\n\", err)\n}\n\nfunc report(err error, severity Severity) error {\n\tvar reportersCopy []ErrorReporter\n\treportersMutex.RLock()\n\tif len(reporters) > 0 {\n\t\treportersCopy = make([]ErrorReporter, len(reporters))\n\t\tcopy(reportersCopy, reporters)\n\t}\n\treportersMutex.RUnlock()\n\n\tif len(reportersCopy) > 0 {\n\t\tctx := ops.AsMap(err, true)\n\t\tctx[\"severity\"] = severity.String()\n\t\tfor _, reporter := range reportersCopy {\n\t\t\t\/\/ We include globals when reporting\n\t\t\treporter(err, severity, ctx)\n\t\t}\n\t}\n\treturn err\n}\n\nfunc writeStack(w io.Writer, pcs []uintptr) error {\n\tfor _, pc := range pcs {\n\t\tfuncForPc := runtime.FuncForPC(pc)\n\t\tif funcForPc == nil {\n\t\t\tbreak\n\t\t}\n\t\tname := funcForPc.Name()\n\t\tif strings.HasPrefix(name, \"runtime.\") {\n\t\t\tbreak\n\t\t}\n\t\tfile, line := funcForPc.FileLine(pc)\n\t\t_, err := fmt.Fprintf(w, \"\\t%s\\t%s: %d\\n\", name, file, line)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>added PRINT_JSON env variable<commit_after>\/\/ Package golog implements logging functions that log errors to stderr and\n\/\/ debug messages to stdout. Trace logging is also supported.\n\/\/ Trace logs go to stdout as well, but they are only written if the program\n\/\/ is run with environment variable \"TRACE=true\".\n\/\/ A stack dump will be printed after the message if \"PRINT_STACK=true\".\npackage golog\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\n\t\"github.com\/getlantern\/errors\"\n\t\"github.com\/getlantern\/ops\"\n\t\"github.com\/oxtoacart\/bpool\"\n)\n\nconst (\n\t\/\/ ERROR is an error Severity\n\tERROR = 500\n\n\t\/\/ FATAL is an error Severity\n\tFATAL = 600\n)\n\ntype outputFn func(prefix string, skipFrames int, printStack bool, severity string, arg interface{}, values map[string]interface{})\n\n\/\/ Output is a log output that can optionally support structured logging\ntype Output interface {\n\t\/\/ Write debug messages\n\tDebug(prefix string, skipFrames int, printStack bool, severity string, arg interface{}, values map[string]interface{})\n\n\t\/\/ Write error messages\n\tError(prefix string, skipFrames int, printStack bool, severity string, arg interface{}, values map[string]interface{})\n}\n\nvar (\n\toutput Output\n\toutputMx sync.RWMutex\n\tprepender atomic.Value\n\treporters []ErrorReporter\n\treportersMutex sync.RWMutex\n\n\tbufferPool = bpool.NewBufferPool(200)\n\n\tonFatal atomic.Value\n)\n\n\/\/ Severity is a level of error (higher values are more severe)\ntype Severity int\n\nfunc (s Severity) String() string {\n\tswitch s {\n\tcase ERROR:\n\t\treturn \"ERROR\"\n\tcase FATAL:\n\t\treturn \"FATAL\"\n\tdefault:\n\t\treturn \"UNKNOWN\"\n\t}\n}\n\nfunc init() {\n\tDefaultOnFatal()\n\tResetOutputs()\n\tResetPrepender()\n}\n\n\/\/ SetPrepender sets a function to write something, e.g., the timestamp, before\n\/\/ each line of the log.\nfunc SetPrepender(p func(io.Writer)) {\n\tprepender.Store(p)\n}\n\nfunc ResetPrepender() {\n\tSetPrepender(func(io.Writer) {})\n}\n\nfunc GetPrepender() func(io.Writer) {\n\treturn prepender.Load().(func(io.Writer))\n}\n\n\/\/ SetOutputs sets the outputs for error and debug logs to use the given Outputs.\n\/\/ Returns a function that resets outputs to their original values prior to calling SetOutputs.\n\/\/ If env variable PRINT_JSON is set, use JSON output instead of plain text\nfunc SetOutputs(errorOut io.Writer, debugOut io.Writer) (reset func()) {\n\tif printJson, _ := strconv.ParseBool(os.Getenv(\"PRINT_JSON\")); printJson {\n\t\treturn SetOutput(JsonOutput(errorOut, debugOut))\n\t}\n\n\treturn SetOutput(TextOutput(errorOut, debugOut))\n}\n\n\/\/ SetOutput sets the Output to use for errors and debug messages\nfunc SetOutput(out Output) (reset func()) {\n\toutputMx.Lock()\n\tdefer outputMx.Unlock()\n\toldOut := output\n\toutput = out\n\treturn func() {\n\t\toutputMx.Lock()\n\t\tdefer outputMx.Unlock()\n\t\toutput = oldOut\n\t}\n}\n\n\/\/ Deprecated: instead of calling ResetOutputs, use the reset function returned by SetOutputs.\nfunc ResetOutputs() {\n\tSetOutputs(os.Stderr, os.Stdout)\n}\n\nfunc getErrorOut() outputFn {\n\toutputMx.RLock()\n\tdefer outputMx.RUnlock()\n\treturn output.Error\n}\n\nfunc getDebugOut() outputFn {\n\toutputMx.RLock()\n\tdefer outputMx.RUnlock()\n\treturn output.Debug\n}\n\n\/\/ RegisterReporter registers the given ErrorReporter. All logged Errors are\n\/\/ sent to this reporter.\nfunc RegisterReporter(reporter ErrorReporter) {\n\treportersMutex.Lock()\n\treporters = append(reporters, reporter)\n\treportersMutex.Unlock()\n}\n\n\/\/ OnFatal configures golog to call the given function on any FATAL error. By\n\/\/ default, golog calls os.Exit(1) on any FATAL error.\nfunc OnFatal(fn func(err error)) {\n\tonFatal.Store(fn)\n}\n\n\/\/ DefaultOnFatal enables the default behavior for OnFatal\nfunc DefaultOnFatal() {\n\tonFatal.Store(func(err error) {\n\t\tos.Exit(1)\n\t})\n}\n\n\/\/ MultiLine is an interface for arguments that support multi-line output.\ntype MultiLine interface {\n\t\/\/ MultiLinePrinter returns a function that can be used to print the\n\t\/\/ multi-line output. The returned function writes one line to the buffer and\n\t\/\/ returns true if there are more lines to write. This function does not need\n\t\/\/ to take care of trailing carriage returns, golog handles that\n\t\/\/ automatically.\n\tMultiLinePrinter() func(buf *bytes.Buffer) bool\n}\n\n\/\/ ErrorReporter is a function to which the logger will report errors.\n\/\/ It the given error and corresponding message along with associated ops\n\/\/ context. This should return quickly as it executes on the critical code\n\/\/ path. The recommended approach is to buffer as much as possible and discard\n\/\/ new reports if the buffer becomes saturated.\ntype ErrorReporter func(err error, severity Severity, ctx map[string]interface{})\n\ntype Logger interface {\n\t\/\/ Debug logs to stdout\n\tDebug(arg interface{})\n\t\/\/ Debugf logs to stdout\n\tDebugf(message string, args ...interface{})\n\n\t\/\/ Error logs to stderr\n\tError(arg interface{}) error\n\t\/\/ Errorf logs to stderr. It returns the first argument that's an error, or\n\t\/\/ a new error built using fmt.Errorf if none of the arguments are errors.\n\tErrorf(message string, args ...interface{}) error\n\n\t\/\/ Fatal logs to stderr and then exits with status 1\n\tFatal(arg interface{})\n\t\/\/ Fatalf logs to stderr and then exits with status 1\n\tFatalf(message string, args ...interface{})\n\n\t\/\/ Trace logs to stderr only if TRACE=true\n\tTrace(arg interface{})\n\t\/\/ Tracef logs to stderr only if TRACE=true\n\tTracef(message string, args ...interface{})\n\n\t\/\/ TraceOut provides access to an io.Writer to which trace information can\n\t\/\/ be streamed. If running with environment variable \"TRACE=true\", TraceOut\n\t\/\/ will point to os.Stderr, otherwise it will point to a ioutil.Discared.\n\t\/\/ Each line of trace information will be prefixed with this Logger's\n\t\/\/ prefix.\n\tTraceOut() io.Writer\n\n\t\/\/ IsTraceEnabled() indicates whether or not tracing is enabled for this\n\t\/\/ logger.\n\tIsTraceEnabled() bool\n\n\t\/\/ AsStdLogger returns an standard logger\n\tAsStdLogger() *log.Logger\n}\n\nfunc LoggerFor(prefix string) Logger {\n\tl := &logger{\n\t\tprefix: prefix + \": \",\n\t}\n\n\ttrace := os.Getenv(\"TRACE\")\n\tl.traceOn, _ = strconv.ParseBool(trace)\n\tif !l.traceOn {\n\t\tprefixes := strings.Split(trace, \",\")\n\t\tfor _, p := range prefixes {\n\t\t\tif prefix == strings.Trim(p, \" \") {\n\t\t\t\tl.traceOn = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif l.traceOn {\n\t\tl.traceOut = l.newTraceWriter()\n\t} else {\n\t\tl.traceOut = ioutil.Discard\n\t}\n\n\tprintStack := os.Getenv(\"PRINT_STACK\")\n\tl.printStack, _ = strconv.ParseBool(printStack)\n\n\treturn l\n}\n\ntype logger struct {\n\tprefix string\n\ttraceOn bool\n\ttraceOut io.Writer\n\tprintStack bool\n\touts atomic.Value\n}\n\nfunc (l *logger) print(write outputFn, skipFrames int, severity string, arg interface{}) {\n\twrite(l.prefix, skipFrames+2, l.printStack, severity, arg, ops.AsMap(arg, false))\n}\n\nfunc (l *logger) printf(write outputFn, skipFrames int, severity string, message string, args ...interface{}) {\n\tl.print(write, skipFrames+1, severity, fmt.Sprintf(message, args...))\n}\n\nfunc (l *logger) Debug(arg interface{}) {\n\tl.print(getDebugOut(), 4, \"DEBUG\", arg)\n}\n\nfunc (l *logger) Debugf(message string, args ...interface{}) {\n\tl.printf(getDebugOut(), 4, \"DEBUG\", message, args...)\n}\n\nfunc (l *logger) Error(arg interface{}) error {\n\treturn l.errorSkipFrames(arg, 1, ERROR)\n}\n\nfunc (l *logger) Errorf(message string, args ...interface{}) error {\n\treturn l.errorSkipFrames(errors.NewOffset(1, message, args...), 1, ERROR)\n}\n\nfunc (l *logger) Fatal(arg interface{}) {\n\tfatal(l.errorSkipFrames(arg, 1, FATAL))\n}\n\nfunc (l *logger) Fatalf(message string, args ...interface{}) {\n\tfatal(l.errorSkipFrames(errors.NewOffset(1, message, args...), 1, FATAL))\n}\n\nfunc fatal(err error) {\n\tfn := onFatal.Load().(func(err error))\n\tfn(err)\n}\n\nfunc (l *logger) errorSkipFrames(arg interface{}, skipFrames int, severity Severity) error {\n\tvar err error\n\tswitch e := arg.(type) {\n\tcase error:\n\t\terr = e\n\tdefault:\n\t\terr = fmt.Errorf(\"%v\", e)\n\t}\n\tl.print(getErrorOut(), skipFrames+4, severity.String(), err)\n\treturn report(err, severity)\n}\n\nfunc (l *logger) Trace(arg interface{}) {\n\tif l.traceOn {\n\t\tl.print(getDebugOut(), 4, \"TRACE\", arg)\n\t}\n}\n\nfunc (l *logger) Tracef(message string, args ...interface{}) {\n\tif l.traceOn {\n\t\tl.printf(getDebugOut(), 4, \"TRACE\", message, args...)\n\t}\n}\n\nfunc (l *logger) TraceOut() io.Writer {\n\treturn l.traceOut\n}\n\nfunc (l *logger) IsTraceEnabled() bool {\n\treturn l.traceOn\n}\n\nfunc (l *logger) newTraceWriter() io.Writer {\n\tpr, pw := io.Pipe()\n\tbr := bufio.NewReader(pr)\n\n\tif !l.traceOn {\n\t\treturn pw\n\t}\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif err := pr.Close(); err != nil {\n\t\t\t\terrorOnLogging(err)\n\t\t\t}\n\t\t}()\n\t\tdefer func() {\n\t\t\tif err := pw.Close(); err != nil {\n\t\t\t\terrorOnLogging(err)\n\t\t\t}\n\t\t}()\n\n\t\tfor {\n\t\t\tline, err := br.ReadString('\\n')\n\t\t\tif err == nil {\n\t\t\t\t\/\/ Log the line (minus the trailing newline)\n\t\t\t\tl.print(getDebugOut(), 6, \"TRACE\", line[:len(line)-1])\n\t\t\t} else {\n\t\t\t\tl.printf(getDebugOut(), 6, \"TRACE\", \"TraceWriter closed due to unexpected error: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn pw\n}\n\ntype errorWriter struct {\n\tl *logger\n}\n\n\/\/ Write implements method of io.Writer, due to different call depth,\n\/\/ it will not log correct file and line prefix\nfunc (w *errorWriter) Write(p []byte) (n int, err error) {\n\ts := string(p)\n\tif s[len(s)-1] == '\\n' {\n\t\ts = s[:len(s)-1]\n\t}\n\tw.l.print(getErrorOut(), 6, \"ERROR\", s)\n\treturn len(p), nil\n}\n\nfunc (l *logger) AsStdLogger() *log.Logger {\n\treturn log.New(&errorWriter{l}, \"\", 0)\n}\n\nfunc errorOnLogging(err error) {\n\t_, _ = fmt.Fprintf(os.Stderr, \"Unable to log: %v\\n\", err)\n}\n\nfunc report(err error, severity Severity) error {\n\tvar reportersCopy []ErrorReporter\n\treportersMutex.RLock()\n\tif len(reporters) > 0 {\n\t\treportersCopy = make([]ErrorReporter, len(reporters))\n\t\tcopy(reportersCopy, reporters)\n\t}\n\treportersMutex.RUnlock()\n\n\tif len(reportersCopy) > 0 {\n\t\tctx := ops.AsMap(err, true)\n\t\tctx[\"severity\"] = severity.String()\n\t\tfor _, reporter := range reportersCopy {\n\t\t\t\/\/ We include globals when reporting\n\t\t\treporter(err, severity, ctx)\n\t\t}\n\t}\n\treturn err\n}\n\nfunc writeStack(w io.Writer, pcs []uintptr) error {\n\tfor _, pc := range pcs {\n\t\tfuncForPc := runtime.FuncForPC(pc)\n\t\tif funcForPc == nil {\n\t\t\tbreak\n\t\t}\n\t\tname := funcForPc.Name()\n\t\tif strings.HasPrefix(name, \"runtime.\") {\n\t\t\tbreak\n\t\t}\n\t\tfile, line := funcForPc.FileLine(pc)\n\t\t_, err := fmt.Fprintf(w, \"\\t%s\\t%s: %d\\n\", name, file, line)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/thoj\/go-ircevent\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"strings\"\n)\n\ntype IRCConfig struct {\n\tNick string\n\tUsername string\n\tServer string\n\tChannels []string\n}\n\nfunc addCallbacks(c *irc.Connection) {\n\t\/\/ fmt.Println(c)\n}\n\nfunc getConfig() (config IRCConfig, err error) {\n\tdata, err := ioutil.ReadFile(\"goyal-config.json\")\n\tif err != nil {\n\t\treturn IRCConfig{}, fmt.Errorf(\"Could not read config file.\\n\")\n\t}\n\n\tdecoder := json.NewDecoder(strings.NewReader(string(data)))\n\tif err := decoder.Decode(&config); err == io.EOF {\n\t\t\/\/ Return config ...\n\t} else if err != nil {\n\t\treturn IRCConfig{}, fmt.Errorf(\"Could not read config file.\\n\")\n\t}\n\n\treturn config, nil\n}\n\nfunc main() {\n\tconfig, err := getConfig()\n\tif err == nil {\n\t\tconnection := irc.IRC(config.Nick, config.Username)\n\t\tconnection.UseTLS = true\n\n\t\taddCallbacks(connection)\n\n\t\t\/\/Connect to the server\n\t\terr := connection.Connect(config.Server)\n\t\tif err == nil {\n\n\t\t\t\/\/ Join the channels\n\t\t\tfor _, channel := range config.Channels {\n\t\t\t\tfmt.Printf(\"Joining %s\\n\", channel)\n\t\t\t\tconnection.Join(channel)\n\t\t\t}\n\n\t\t} else {\n\t\t\tfmt.Println(err)\n\t\t}\n\n\t} else {\n\t\tfmt.Println(err)\n\t}\n\n}\n<commit_msg>Add handlers for the most common types of messages.<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/thoj\/go-ircevent\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tTIME_FORMAT = \"Jan 2 2006 15:04:05\"\n)\n\ntype IRCConfig struct {\n\tNick string\n\tUsername string\n\tServer string\n\tChannels []string\n\tLogFileDir string\n\tLogFiles map[string]*os.File\n}\n\nfunc main() {\n\tconfig, err := getConfig()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tconnection := irc.IRC(config.Nick, config.Username)\n\tconnection.UseTLS = true\n\n\tconfig = setupLogFiles(config)\n\tdefer closeLogFiles(config)\n\taddCallbacks(connection, config)\n\n\t\/\/Connect to the server\n\terr = connection.Connect(config.Server)\n\tif err != nil {\n\t\tfmt.Println(\"Failed to connect.\")\n\t\treturn\n\t}\n\n\t\/\/ Connect and wait\n\tconnection.Loop()\n}\n\nfunc addCallbacks(connection *irc.Connection, config IRCConfig) {\n\n\t\/\/ Join the channels\n\tconnection.AddCallback(\"001\", func(e *irc.Event) {\n\t\tfor _, channel := range config.Channels {\n\t\t\tlogMessage(config.LogFiles[channel], \"Joining %s\", channel)\n\t\t\tconnection.Join(channel)\n\t\t}\n\t})\n\n\tconnection.AddCallback(\"JOIN\", func(e *irc.Event) {\n\t\tchannel := e.Message()\n\t\tnick := e.Nick\n\t\tvar message string\n\t\tif nick == config.Nick {\n\t\t\tmessage = fmt.Sprintf(\"Hello, I am %s - yet another logbot written in Go.\", nick)\n\t\t} else {\n\t\t\tmessage = fmt.Sprintf(\"Hello %s, Welcome to %s!\", nick, channel)\n\t\t}\n\t\tconnection.Privmsg(channel, message)\n\t\tlogMessage(config.LogFiles[channel], \"%s entered %s\", nick, channel)\n\t})\n\n\tconnection.AddCallback(\"PRIVMSG\", func(e *irc.Event) {\n\t\tchannel := e.Arguments[0]\n\t\tnick := e.Nick\n\t\tswitch channel {\n\t\tcase config.Nick:\n\t\t\tconnection.Privmsg(nick, \"Sorry, I don't accept direct messages!\")\n\t\tdefault:\n\t\t\tlogMessage(config.LogFiles[channel], \"%s: %s\", nick, e.Message())\n\t\t}\n\t})\n\n\tconnection.AddCallback(\"CTCP_ACTION\", func(e *irc.Event) {\n\t\tchannel := e.Arguments[0]\n\t\tnick := e.Nick\n\t\tlogMessage(config.LogFiles[channel], \"***%s %s\", nick, e.Message())\n\t})\n\n\tconnection.AddCallback(\"PART\", func(e *irc.Event) {\n\t\tchannel := e.Arguments[0]\n\t\tnick := e.Nick\n\t\tlogMessage(config.LogFiles[channel], \"%s left %s\", nick, channel)\n\t})\n\n\tconnection.AddCallback(\"QUIT\", func(e *irc.Event) {\n\t\tchannel := e.Arguments[0]\n\t\tnick := e.Nick\n\t\tlogMessage(config.LogFiles[channel], \"%s closed IRC\", nick)\n\t})\n\n}\n\nfunc getConfig() (config IRCConfig, err error) {\n\tdata, err := ioutil.ReadFile(\"goyal-config.json\")\n\tif err != nil {\n\t\treturn IRCConfig{}, fmt.Errorf(\"Could not read config file.\\n\")\n\t}\n\n\tdecoder := json.NewDecoder(strings.NewReader(string(data)))\n\tif err := decoder.Decode(&config); err == io.EOF {\n\t\t\/\/ Return config ...\n\t} else if err != nil {\n\t\treturn IRCConfig{}, fmt.Errorf(\"Could not read config file.\\n\")\n\t}\n\n\treturn config, nil\n}\n\nfunc setupLogFiles(config IRCConfig) (IRCConfig) {\n\tconfig.LogFiles = make(map[string]*os.File)\n\tfor _, channel := range config.Channels {\n\t\t\/\/\/ FIXME: fix path manipulation\n\t\tlogFileName := config.LogFileDir + \"\/\" + channel\n\t\tlogFile, err := os.OpenFile(logFileName, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tfmt.Println(\"Log file opening failed.\")\n\t\t}\n\t\tconfig.LogFiles[channel] = logFile\n\t}\n\treturn config\n}\n\nfunc closeLogFiles(config IRCConfig) {\n\tfor _, logFile := range config.LogFiles {\n\t\tlogFile.Close()\n\t}\n}\n\nfunc logMessage(logFile *os.File, format string, args ...interface{}) {\n\tif logFile == nil {\n\t\tpanic(fmt.Sprintf(\"Writing is failing %+v\\n\", err))\n\t}\n\tnow := time.Now().UTC().Format(TIME_FORMAT)\n\tmessage := fmt.Sprintf(fmt.Sprintf(\"<%s> %s\\n\", now, format), args...)\n\t_, err := logFile.WriteString(message)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Writing is failing %+v\\n\", err))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package goka\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n)\n\nvar (\n\ttableSuffix = \"-v1\"\n\tloopSuffix = \"-loop\"\n)\n\n\/\/ SetTableSuffix changes `tableSuffix` which is a suffix for table topic.\n\/\/ Use it to modify table's suffix to otherwise in case you cannot use the default suffix.\nfunc SetTableSuffix(suffix string) {\n\ttableSuffix = suffix\n}\n\n\/\/ SetLoopSuffix changes `loopSuffix` which is a suffix for loop topic of group.\n\/\/ Use it to modify loop topic's suffix to otherwise in case you cannot use the default suffix.\nfunc SetLoopSuffix(suffix string) {\n\tloopSuffix = suffix\n}\n\n\/\/ Stream is the name of an event stream topic in Kafka, ie, a topic with\n\/\/ cleanup.policy=delete\ntype Stream string\n\n\/\/ Streams is a slice of Stream names.\ntype Streams []Stream\n\n\/\/ Table is the name of a table topic in Kafka, ie, a topic with\n\/\/ cleanup.policy=compact\ntype Table string\n\n\/\/ Group is the name of a consumer group in Kafka and represents a processor\n\/\/ group in Goka. A processor group may have a group table and a group loopback\n\/\/ stream. By default, the group table is named <group>-table and the loopback\n\/\/ stream <group>-loop.\ntype Group string\n\n\/\/ GroupGraph is the specification of a processor group. It contains all input,\n\/\/ output, and any other topic from which and into which the processor group\n\/\/ may consume or produce events. Each of these links to Kafka is called Edge.\ntype GroupGraph struct {\n\t\/\/ the group marks multiple processor instances to be long together\n\tgroup string\n\n\t\/\/ the edges define the group graph\n\tinputTables []Edge\n\tcrossTables []Edge\n\tinputStreams []Edge\n\toutputStreams []Edge\n\tloopStream []Edge\n\tgroupTable []Edge\n\tvisitors []Edge\n\n\t\/\/ those fields cache the info from above edges or are used to avoid naming\/codec collisions\n\tcodecs map[string]Codec\n\tcallbacks map[string]ProcessCallback\n\n\toutputStreamTopics map[Stream]struct{}\n\n\tjoinCheck map[string]bool\n}\n\n\/\/ Group returns the group name.\nfunc (gg *GroupGraph) Group() Group {\n\treturn Group(gg.group)\n}\n\n\/\/ InputStreams returns all input stream edges of the group.\nfunc (gg *GroupGraph) InputStreams() Edges {\n\treturn gg.inputStreams\n}\n\n\/\/ JointTables retuns all joint table edges of the group.\nfunc (gg *GroupGraph) JointTables() Edges {\n\treturn gg.inputTables\n}\n\n\/\/ LookupTables retuns all lookup table edges of the group.\nfunc (gg *GroupGraph) LookupTables() Edges {\n\treturn gg.crossTables\n}\n\n\/\/ LoopStream returns the loopback edge of the group.\nfunc (gg *GroupGraph) LoopStream() Edge {\n\t\/\/ only 1 loop stream is valid\n\tif len(gg.loopStream) > 0 {\n\t\treturn gg.loopStream[0]\n\t}\n\treturn nil\n}\n\n\/\/ GroupTable returns the group table edge of the group.\nfunc (gg *GroupGraph) GroupTable() Edge {\n\t\/\/ only 1 group table is valid\n\tif len(gg.groupTable) > 0 {\n\t\treturn gg.groupTable[0]\n\t}\n\treturn nil\n}\n\n\/\/ OutputStreams returns the output stream edges of the group.\nfunc (gg *GroupGraph) OutputStreams() Edges {\n\treturn gg.outputStreams\n}\n\n\/\/ AllEdges returns a list of all edges for the group graph.\n\/\/ This allows to modify a graph by cloning it's edges into a new one.\n\/\/\n\/\/ var existing Graph\n\/\/ edges := existiting.AllEdges()\n\/\/ \/\/ modify edges as required\n\/\/ \/\/ recreate the modifiedg raph\n\/\/ newGraph := DefineGroup(existing.Groug(), edges...)\nfunc (gg *GroupGraph) AllEdges() Edges {\n\treturn chainEdges(\n\t\tgg.inputTables,\n\t\tgg.crossTables,\n\t\tgg.inputStreams,\n\t\tgg.outputStreams,\n\t\tgg.loopStream,\n\t\tgg.groupTable,\n\t\tgg.visitors)\n}\n\n\/\/ returns whether the passed topic is a valid group output topic\nfunc (gg *GroupGraph) isOutputTopic(topic Stream) bool {\n\t_, ok := gg.outputStreamTopics[topic]\n\treturn ok\n}\n\n\/\/ inputs returns all input topics (tables and streams)\nfunc (gg *GroupGraph) inputs() Edges {\n\treturn chainEdges(gg.inputStreams, gg.inputTables, gg.crossTables)\n}\n\n\/\/ copartitioned returns all copartitioned topics (joint tables and input streams)\nfunc (gg *GroupGraph) copartitioned() Edges {\n\treturn chainEdges(gg.inputStreams, gg.inputTables)\n}\n\nfunc (gg *GroupGraph) codec(topic string) Codec {\n\treturn gg.codecs[topic]\n}\n\nfunc (gg *GroupGraph) callback(topic string) ProcessCallback {\n\treturn gg.callbacks[topic]\n}\n\nfunc (gg *GroupGraph) joint(topic string) bool {\n\treturn gg.joinCheck[topic]\n}\n\n\/\/ DefineGroup creates a group graph with a given group name and a list of\n\/\/ edges.\nfunc DefineGroup(group Group, edges ...Edge) *GroupGraph {\n\tgg := GroupGraph{group: string(group),\n\t\tcodecs: make(map[string]Codec),\n\t\tcallbacks: make(map[string]ProcessCallback),\n\t\tjoinCheck: make(map[string]bool),\n\t\toutputStreamTopics: make(map[Stream]struct{}),\n\t}\n\n\tfor _, e := range edges {\n\t\tswitch e := e.(type) {\n\t\tcase inputStreams:\n\t\t\tfor _, input := range e {\n\t\t\t\tgg.validateInputTopic(input.Topic())\n\t\t\t\tinputStr := input.(*inputStream)\n\t\t\t\tgg.codecs[input.Topic()] = input.Codec()\n\t\t\t\tgg.callbacks[input.Topic()] = inputStr.cb\n\t\t\t\tgg.inputStreams = append(gg.inputStreams, inputStr)\n\t\t\t}\n\t\tcase *inputStream:\n\t\t\tgg.validateInputTopic(e.Topic())\n\t\t\tgg.codecs[e.Topic()] = e.Codec()\n\t\t\tgg.callbacks[e.Topic()] = e.cb\n\t\t\tgg.inputStreams = append(gg.inputStreams, e)\n\t\tcase *loopStream:\n\t\t\te.setGroup(group)\n\t\t\tgg.codecs[e.Topic()] = e.Codec()\n\t\t\tgg.callbacks[e.Topic()] = e.cb\n\t\t\tgg.loopStream = append(gg.loopStream, e)\n\t\tcase *outputStream:\n\t\t\tgg.codecs[e.Topic()] = e.Codec()\n\t\t\tgg.outputStreams = append(gg.outputStreams, e)\n\t\t\tgg.outputStreamTopics[Stream(e.Topic())] = struct{}{}\n\t\tcase *inputTable:\n\t\t\tgg.codecs[e.Topic()] = e.Codec()\n\t\t\tgg.inputTables = append(gg.inputTables, e)\n\t\t\tgg.joinCheck[e.Topic()] = true\n\t\tcase *crossTable:\n\t\t\tgg.codecs[e.Topic()] = e.Codec()\n\t\t\tgg.crossTables = append(gg.crossTables, e)\n\t\tcase *groupTable:\n\t\t\te.setGroup(group)\n\t\t\tgg.codecs[e.Topic()] = e.Codec()\n\t\t\tgg.groupTable = append(gg.groupTable, e)\n\t\tcase *visitor:\n\t\t\tgg.visitors = append(gg.visitors, e)\n\t\t}\n\t}\n\n\treturn &gg\n}\n\nfunc (gg *GroupGraph) validateInputTopic(topic string) {\n\tif topic == \"\" {\n\t\tpanic(\"Input topic cannot be empty. This will not work.\")\n\t}\n\n\tif _, exists := gg.callbacks[topic]; exists {\n\t\tpanic(fmt.Errorf(\"Callback for topic %s already exists. It is illegal to consume a topic twice\", topic))\n\t}\n}\n\n\/\/ Validate validates the group graph and returns an error if invalid.\n\/\/ Main validation checks are:\n\/\/ - at most one loopback stream edge is allowed\n\/\/ - at most one group table edge is allowed\n\/\/ - at least one input stream is required\n\/\/ - table and loopback topics cannot be used in any other edge.\nfunc (gg *GroupGraph) Validate() error {\n\tif len(gg.loopStream) > 1 {\n\t\treturn errors.New(\"more than one loop stream in group graph\")\n\t}\n\tif len(gg.groupTable) > 1 {\n\t\treturn errors.New(\"more than one group table in group graph\")\n\t}\n\tif len(gg.inputStreams) == 0 {\n\t\treturn errors.New(\"no input stream in group graph\")\n\t}\n\tfor _, t := range chainEdges(gg.outputStreams, gg.inputStreams, gg.inputTables, gg.crossTables) {\n\t\tif t.Topic() == loopName(gg.Group()) {\n\t\t\treturn errors.New(\"should not directly use loop stream\")\n\t\t}\n\t\tif t.Topic() == tableName(gg.Group()) {\n\t\t\treturn errors.New(\"should not directly use group table\")\n\t\t}\n\t}\n\tif len(gg.visitors) > 0 && len(gg.groupTable) == 0 {\n\t\treturn fmt.Errorf(\"visitors cannot be used in a stateless processor\")\n\t}\n\treturn nil\n}\n\n\/\/ Edge represents a topic in Kafka and the corresponding codec to encode and\n\/\/ decode the messages of that topic.\ntype Edge interface {\n\tString() string\n\tTopic() string\n\tCodec() Codec\n}\n\n\/\/ Edges is a slice of edge objects.\ntype Edges []Edge\n\n\/\/ chainEdges chains edges together to avoid error-prone\n\/\/ append(edges, moreEdges...) constructs in the graph\nfunc chainEdges(edgeList ...Edges) Edges {\n\tvar sum int\n\tfor _, edges := range edgeList {\n\t\tsum += len(edges)\n\t}\n\tchained := make(Edges, 0, sum)\n\n\tfor _, edges := range edgeList {\n\t\tchained = append(chained, edges...)\n\t}\n\treturn chained\n}\n\n\/\/ Topics returns the names of the topics of the edges.\nfunc (e Edges) Topics() []string {\n\tvar t []string\n\tfor _, i := range e {\n\t\tt = append(t, i.Topic())\n\t}\n\treturn t\n}\n\ntype topicDef struct {\n\tname string\n\tcodec Codec\n}\n\nfunc (t *topicDef) Topic() string {\n\treturn t.name\n}\n\nfunc (t *topicDef) String() string {\n\treturn fmt.Sprintf(\"%s\/%T\", t.name, t.codec)\n}\n\nfunc (t *topicDef) Codec() Codec {\n\treturn t.codec\n}\n\ntype inputStream struct {\n\t*topicDef\n\tcb ProcessCallback\n}\n\n\/\/ Input represents an edge of an input stream topic. The edge\n\/\/ specifies the topic name, its codec and the ProcessorCallback used to\n\/\/ process it. The topic has to be copartitioned with any other input stream of\n\/\/ the group and with the group table.\n\/\/ The group starts reading the topic from the newest offset.\nfunc Input(topic Stream, c Codec, cb ProcessCallback) Edge {\n\treturn &inputStream{&topicDef{string(topic), c}, cb}\n}\n\ntype inputStreams Edges\n\nfunc (is inputStreams) String() string {\n\tif is == nil {\n\t\treturn \"empty input streams\"\n\t}\n\n\treturn fmt.Sprintf(\"input streams: %s\/%T\", is.Topic(), is.Codec())\n}\n\nfunc (is inputStreams) Topic() string {\n\tif is == nil {\n\t\treturn \"\"\n\t}\n\tvar topics []string\n\n\tfor _, stream := range is {\n\t\ttopics = append(topics, stream.Topic())\n\t}\n\treturn strings.Join(topics, \",\")\n}\n\nfunc (is inputStreams) Codec() Codec {\n\tif is == nil {\n\t\treturn nil\n\t}\n\treturn is[0].Codec()\n}\n\n\/\/ Inputs creates edges of multiple input streams sharing the same\n\/\/ codec and callback.\nfunc Inputs(topics Streams, c Codec, cb ProcessCallback) Edge {\n\tif len(topics) == 0 {\n\t\treturn nil\n\t}\n\tvar edges Edges\n\tfor _, topic := range topics {\n\t\tedges = append(edges, Input(topic, c, cb))\n\t}\n\treturn inputStreams(edges)\n}\n\ntype visitor struct {\n\tname string\n\tcb ProcessCallback\n}\n\nfunc (m *visitor) Topic() string {\n\treturn m.name\n}\nfunc (m *visitor) Codec() Codec {\n\treturn nil\n}\nfunc (m *visitor) String() string {\n\treturn fmt.Sprintf(\"visitor %s\", m.name)\n}\n\n\/\/ Visitor adds a visitor edge to the processor. This allows to iterate over the whole processor state\n\/\/ while running. Note that this can block rebalance or processor shutdown.\n\/\/ EXPERIMENTAL! This feature is not fully tested and might trigger unknown bugs. Be careful!\nfunc Visitor(name string, cb ProcessCallback) Edge {\n\treturn &visitor{\n\t\tname: name,\n\t\tcb: cb,\n\t}\n}\n\ntype loopStream inputStream\n\n\/\/ Loop represents the edge of the loopback topic of the group. The edge\n\/\/ specifies the codec of the messages in the topic and ProcesCallback to\n\/\/ process the messages of the topic. Context.Loopback() is used to write\n\/\/ messages into this topic from any callback of the group.\nfunc Loop(c Codec, cb ProcessCallback) Edge {\n\treturn &loopStream{&topicDef{codec: c}, cb}\n}\n\nfunc (s *loopStream) setGroup(group Group) {\n\ts.topicDef.name = loopName(group)\n}\n\ntype inputTable struct {\n\t*topicDef\n}\n\n\/\/ Join represents an edge of a copartitioned, log-compacted table topic. The\n\/\/ edge specifies the topic name and the codec of the messages of the topic.\n\/\/ The group starts reading the topic from the oldest offset.\n\/\/ The processing of input streams is blocked until all partitions of the table\n\/\/ are recovered.\nfunc Join(topic Table, c Codec) Edge {\n\treturn &inputTable{&topicDef{string(topic), c}}\n}\n\ntype crossTable struct {\n\t*topicDef\n}\n\n\/\/ Lookup represents an edge of a non-copartitioned, log-compacted table\n\/\/ topic. The edge specifies the topic name and the codec of the messages of\n\/\/ the topic. The group starts reading the topic from the oldest offset.\n\/\/ The processing of input streams is blocked until the table is fully\n\/\/ recovered.\nfunc Lookup(topic Table, c Codec) Edge {\n\treturn &crossTable{&topicDef{string(topic), c}}\n}\n\ntype groupTable struct {\n\t*topicDef\n}\n\n\/\/ Persist represents the edge of the group table, which is log-compacted and\n\/\/ copartitioned with the input streams.\n\/\/ Without Persist, calls to ctx.Value or ctx.SetValue in the consume callback will\n\/\/ fail and lead to shutdown of the processor.\n\/\/\n\/\/ This edge specifies the codec of the\n\/\/ messages in the topic, ie, the codec of the values of the table.\n\/\/ The processing of input streams is blocked until all partitions of the group\n\/\/ table are recovered.\n\/\/\n\/\/ The topic name is derived from the group name by appending \"-table\".\nfunc Persist(c Codec) Edge {\n\treturn &groupTable{&topicDef{codec: c}}\n}\n\nfunc (t *groupTable) setGroup(group Group) {\n\tt.topicDef.name = string(GroupTable(group))\n}\n\ntype outputStream struct {\n\t*topicDef\n}\n\n\/\/ Output represents an edge of an output stream topic. The edge\n\/\/ specifies the topic name and the codec of the messages of the topic.\n\/\/ Context.Emit() only emits messages into Output edges defined in the group\n\/\/ graph.\n\/\/ The topic does not have to be copartitioned with the input streams.\nfunc Output(topic Stream, c Codec) Edge {\n\treturn &outputStream{&topicDef{string(topic), c}}\n}\n\n\/\/ GroupTable returns the name of the group table of group.\nfunc GroupTable(group Group) Table {\n\treturn Table(tableName(group))\n}\n\nfunc tableName(group Group) string {\n\treturn string(group) + tableSuffix\n}\n\n\/\/ loopName returns the name of the loop topic of group.\nfunc loopName(group Group) string {\n\treturn string(group) + loopSuffix\n}\n\n\/\/ StringsToStreams is a simple cast\/conversion functions that allows to pass a slice\n\/\/ of strings as a slice of Stream (Streams)\n\/\/ Avoids the boilerplate loop over the string array that would be necessary otherwise.\nfunc StringsToStreams(strings ...string) Streams {\n\tstreams := make(Streams, 0, len(strings))\n\n\tfor _, str := range strings {\n\t\tstreams = append(streams, Stream(str))\n\t}\n\treturn streams\n}\n<commit_msg>Revert \"Test\"<commit_after>package goka\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n)\n\nvar (\n\ttableSuffix = \"-table\"\n\tloopSuffix = \"-loop\"\n)\n\n\/\/ SetTableSuffix changes `tableSuffix` which is a suffix for table topic.\n\/\/ Use it to modify table's suffix to otherwise in case you cannot use the default suffix.\nfunc SetTableSuffix(suffix string) {\n\ttableSuffix = suffix\n}\n\n\/\/ SetLoopSuffix changes `loopSuffix` which is a suffix for loop topic of group.\n\/\/ Use it to modify loop topic's suffix to otherwise in case you cannot use the default suffix.\nfunc SetLoopSuffix(suffix string) {\n\tloopSuffix = suffix\n}\n\n\/\/ Stream is the name of an event stream topic in Kafka, ie, a topic with\n\/\/ cleanup.policy=delete\ntype Stream string\n\n\/\/ Streams is a slice of Stream names.\ntype Streams []Stream\n\n\/\/ Table is the name of a table topic in Kafka, ie, a topic with\n\/\/ cleanup.policy=compact\ntype Table string\n\n\/\/ Group is the name of a consumer group in Kafka and represents a processor\n\/\/ group in Goka. A processor group may have a group table and a group loopback\n\/\/ stream. By default, the group table is named <group>-table and the loopback\n\/\/ stream <group>-loop.\ntype Group string\n\n\/\/ GroupGraph is the specification of a processor group. It contains all input,\n\/\/ output, and any other topic from which and into which the processor group\n\/\/ may consume or produce events. Each of these links to Kafka is called Edge.\ntype GroupGraph struct {\n\t\/\/ the group marks multiple processor instances to be long together\n\tgroup string\n\n\t\/\/ the edges define the group graph\n\tinputTables []Edge\n\tcrossTables []Edge\n\tinputStreams []Edge\n\toutputStreams []Edge\n\tloopStream []Edge\n\tgroupTable []Edge\n\tvisitors []Edge\n\n\t\/\/ those fields cache the info from above edges or are used to avoid naming\/codec collisions\n\tcodecs map[string]Codec\n\tcallbacks map[string]ProcessCallback\n\n\toutputStreamTopics map[Stream]struct{}\n\n\tjoinCheck map[string]bool\n}\n\n\/\/ Group returns the group name.\nfunc (gg *GroupGraph) Group() Group {\n\treturn Group(gg.group)\n}\n\n\/\/ InputStreams returns all input stream edges of the group.\nfunc (gg *GroupGraph) InputStreams() Edges {\n\treturn gg.inputStreams\n}\n\n\/\/ JointTables retuns all joint table edges of the group.\nfunc (gg *GroupGraph) JointTables() Edges {\n\treturn gg.inputTables\n}\n\n\/\/ LookupTables retuns all lookup table edges of the group.\nfunc (gg *GroupGraph) LookupTables() Edges {\n\treturn gg.crossTables\n}\n\n\/\/ LoopStream returns the loopback edge of the group.\nfunc (gg *GroupGraph) LoopStream() Edge {\n\t\/\/ only 1 loop stream is valid\n\tif len(gg.loopStream) > 0 {\n\t\treturn gg.loopStream[0]\n\t}\n\treturn nil\n}\n\n\/\/ GroupTable returns the group table edge of the group.\nfunc (gg *GroupGraph) GroupTable() Edge {\n\t\/\/ only 1 group table is valid\n\tif len(gg.groupTable) > 0 {\n\t\treturn gg.groupTable[0]\n\t}\n\treturn nil\n}\n\n\/\/ OutputStreams returns the output stream edges of the group.\nfunc (gg *GroupGraph) OutputStreams() Edges {\n\treturn gg.outputStreams\n}\n\n\/\/ AllEdges returns a list of all edges for the group graph.\n\/\/ This allows to modify a graph by cloning it's edges into a new one.\n\/\/\n\/\/ var existing Graph\n\/\/ edges := existiting.AllEdges()\n\/\/ \/\/ modify edges as required\n\/\/ \/\/ recreate the modifiedg raph\n\/\/ newGraph := DefineGroup(existing.Groug(), edges...)\nfunc (gg *GroupGraph) AllEdges() Edges {\n\treturn chainEdges(\n\t\tgg.inputTables,\n\t\tgg.crossTables,\n\t\tgg.inputStreams,\n\t\tgg.outputStreams,\n\t\tgg.loopStream,\n\t\tgg.groupTable,\n\t\tgg.visitors)\n}\n\n\/\/ returns whether the passed topic is a valid group output topic\nfunc (gg *GroupGraph) isOutputTopic(topic Stream) bool {\n\t_, ok := gg.outputStreamTopics[topic]\n\treturn ok\n}\n\n\/\/ inputs returns all input topics (tables and streams)\nfunc (gg *GroupGraph) inputs() Edges {\n\treturn chainEdges(gg.inputStreams, gg.inputTables, gg.crossTables)\n}\n\n\/\/ copartitioned returns all copartitioned topics (joint tables and input streams)\nfunc (gg *GroupGraph) copartitioned() Edges {\n\treturn chainEdges(gg.inputStreams, gg.inputTables)\n}\n\nfunc (gg *GroupGraph) codec(topic string) Codec {\n\treturn gg.codecs[topic]\n}\n\nfunc (gg *GroupGraph) callback(topic string) ProcessCallback {\n\treturn gg.callbacks[topic]\n}\n\nfunc (gg *GroupGraph) joint(topic string) bool {\n\treturn gg.joinCheck[topic]\n}\n\n\/\/ DefineGroup creates a group graph with a given group name and a list of\n\/\/ edges.\nfunc DefineGroup(group Group, edges ...Edge) *GroupGraph {\n\tgg := GroupGraph{group: string(group),\n\t\tcodecs: make(map[string]Codec),\n\t\tcallbacks: make(map[string]ProcessCallback),\n\t\tjoinCheck: make(map[string]bool),\n\t\toutputStreamTopics: make(map[Stream]struct{}),\n\t}\n\n\tfor _, e := range edges {\n\t\tswitch e := e.(type) {\n\t\tcase inputStreams:\n\t\t\tfor _, input := range e {\n\t\t\t\tgg.validateInputTopic(input.Topic())\n\t\t\t\tinputStr := input.(*inputStream)\n\t\t\t\tgg.codecs[input.Topic()] = input.Codec()\n\t\t\t\tgg.callbacks[input.Topic()] = inputStr.cb\n\t\t\t\tgg.inputStreams = append(gg.inputStreams, inputStr)\n\t\t\t}\n\t\tcase *inputStream:\n\t\t\tgg.validateInputTopic(e.Topic())\n\t\t\tgg.codecs[e.Topic()] = e.Codec()\n\t\t\tgg.callbacks[e.Topic()] = e.cb\n\t\t\tgg.inputStreams = append(gg.inputStreams, e)\n\t\tcase *loopStream:\n\t\t\te.setGroup(group)\n\t\t\tgg.codecs[e.Topic()] = e.Codec()\n\t\t\tgg.callbacks[e.Topic()] = e.cb\n\t\t\tgg.loopStream = append(gg.loopStream, e)\n\t\tcase *outputStream:\n\t\t\tgg.codecs[e.Topic()] = e.Codec()\n\t\t\tgg.outputStreams = append(gg.outputStreams, e)\n\t\t\tgg.outputStreamTopics[Stream(e.Topic())] = struct{}{}\n\t\tcase *inputTable:\n\t\t\tgg.codecs[e.Topic()] = e.Codec()\n\t\t\tgg.inputTables = append(gg.inputTables, e)\n\t\t\tgg.joinCheck[e.Topic()] = true\n\t\tcase *crossTable:\n\t\t\tgg.codecs[e.Topic()] = e.Codec()\n\t\t\tgg.crossTables = append(gg.crossTables, e)\n\t\tcase *groupTable:\n\t\t\te.setGroup(group)\n\t\t\tgg.codecs[e.Topic()] = e.Codec()\n\t\t\tgg.groupTable = append(gg.groupTable, e)\n\t\tcase *visitor:\n\t\t\tgg.visitors = append(gg.visitors, e)\n\t\t}\n\t}\n\n\treturn &gg\n}\n\nfunc (gg *GroupGraph) validateInputTopic(topic string) {\n\tif topic == \"\" {\n\t\tpanic(\"Input topic cannot be empty. This will not work.\")\n\t}\n\n\tif _, exists := gg.callbacks[topic]; exists {\n\t\tpanic(fmt.Errorf(\"Callback for topic %s already exists. It is illegal to consume a topic twice\", topic))\n\t}\n}\n\n\/\/ Validate validates the group graph and returns an error if invalid.\n\/\/ Main validation checks are:\n\/\/ - at most one loopback stream edge is allowed\n\/\/ - at most one group table edge is allowed\n\/\/ - at least one input stream is required\n\/\/ - table and loopback topics cannot be used in any other edge.\nfunc (gg *GroupGraph) Validate() error {\n\tif len(gg.loopStream) > 1 {\n\t\treturn errors.New(\"more than one loop stream in group graph\")\n\t}\n\tif len(gg.groupTable) > 1 {\n\t\treturn errors.New(\"more than one group table in group graph\")\n\t}\n\tif len(gg.inputStreams) == 0 {\n\t\treturn errors.New(\"no input stream in group graph\")\n\t}\n\tfor _, t := range chainEdges(gg.outputStreams, gg.inputStreams, gg.inputTables, gg.crossTables) {\n\t\tif t.Topic() == loopName(gg.Group()) {\n\t\t\treturn errors.New(\"should not directly use loop stream\")\n\t\t}\n\t\tif t.Topic() == tableName(gg.Group()) {\n\t\t\treturn errors.New(\"should not directly use group table\")\n\t\t}\n\t}\n\tif len(gg.visitors) > 0 && len(gg.groupTable) == 0 {\n\t\treturn fmt.Errorf(\"visitors cannot be used in a stateless processor\")\n\t}\n\treturn nil\n}\n\n\/\/ Edge represents a topic in Kafka and the corresponding codec to encode and\n\/\/ decode the messages of that topic.\ntype Edge interface {\n\tString() string\n\tTopic() string\n\tCodec() Codec\n}\n\n\/\/ Edges is a slice of edge objects.\ntype Edges []Edge\n\n\/\/ chainEdges chains edges together to avoid error-prone\n\/\/ append(edges, moreEdges...) constructs in the graph\nfunc chainEdges(edgeList ...Edges) Edges {\n\tvar sum int\n\tfor _, edges := range edgeList {\n\t\tsum += len(edges)\n\t}\n\tchained := make(Edges, 0, sum)\n\n\tfor _, edges := range edgeList {\n\t\tchained = append(chained, edges...)\n\t}\n\treturn chained\n}\n\n\/\/ Topics returns the names of the topics of the edges.\nfunc (e Edges) Topics() []string {\n\tvar t []string\n\tfor _, i := range e {\n\t\tt = append(t, i.Topic())\n\t}\n\treturn t\n}\n\ntype topicDef struct {\n\tname string\n\tcodec Codec\n}\n\nfunc (t *topicDef) Topic() string {\n\treturn t.name\n}\n\nfunc (t *topicDef) String() string {\n\treturn fmt.Sprintf(\"%s\/%T\", t.name, t.codec)\n}\n\nfunc (t *topicDef) Codec() Codec {\n\treturn t.codec\n}\n\ntype inputStream struct {\n\t*topicDef\n\tcb ProcessCallback\n}\n\n\/\/ Input represents an edge of an input stream topic. The edge\n\/\/ specifies the topic name, its codec and the ProcessorCallback used to\n\/\/ process it. The topic has to be copartitioned with any other input stream of\n\/\/ the group and with the group table.\n\/\/ The group starts reading the topic from the newest offset.\nfunc Input(topic Stream, c Codec, cb ProcessCallback) Edge {\n\treturn &inputStream{&topicDef{string(topic), c}, cb}\n}\n\ntype inputStreams Edges\n\nfunc (is inputStreams) String() string {\n\tif is == nil {\n\t\treturn \"empty input streams\"\n\t}\n\n\treturn fmt.Sprintf(\"input streams: %s\/%T\", is.Topic(), is.Codec())\n}\n\nfunc (is inputStreams) Topic() string {\n\tif is == nil {\n\t\treturn \"\"\n\t}\n\tvar topics []string\n\n\tfor _, stream := range is {\n\t\ttopics = append(topics, stream.Topic())\n\t}\n\treturn strings.Join(topics, \",\")\n}\n\nfunc (is inputStreams) Codec() Codec {\n\tif is == nil {\n\t\treturn nil\n\t}\n\treturn is[0].Codec()\n}\n\n\/\/ Inputs creates edges of multiple input streams sharing the same\n\/\/ codec and callback.\nfunc Inputs(topics Streams, c Codec, cb ProcessCallback) Edge {\n\tif len(topics) == 0 {\n\t\treturn nil\n\t}\n\tvar edges Edges\n\tfor _, topic := range topics {\n\t\tedges = append(edges, Input(topic, c, cb))\n\t}\n\treturn inputStreams(edges)\n}\n\ntype visitor struct {\n\tname string\n\tcb ProcessCallback\n}\n\nfunc (m *visitor) Topic() string {\n\treturn m.name\n}\nfunc (m *visitor) Codec() Codec {\n\treturn nil\n}\nfunc (m *visitor) String() string {\n\treturn fmt.Sprintf(\"visitor %s\", m.name)\n}\n\n\/\/ Visitor adds a visitor edge to the processor. This allows to iterate over the whole processor state\n\/\/ while running. Note that this can block rebalance or processor shutdown.\n\/\/ EXPERIMENTAL! This feature is not fully tested and might trigger unknown bugs. Be careful!\nfunc Visitor(name string, cb ProcessCallback) Edge {\n\treturn &visitor{\n\t\tname: name,\n\t\tcb: cb,\n\t}\n}\n\ntype loopStream inputStream\n\n\/\/ Loop represents the edge of the loopback topic of the group. The edge\n\/\/ specifies the codec of the messages in the topic and ProcesCallback to\n\/\/ process the messages of the topic. Context.Loopback() is used to write\n\/\/ messages into this topic from any callback of the group.\nfunc Loop(c Codec, cb ProcessCallback) Edge {\n\treturn &loopStream{&topicDef{codec: c}, cb}\n}\n\nfunc (s *loopStream) setGroup(group Group) {\n\ts.topicDef.name = loopName(group)\n}\n\ntype inputTable struct {\n\t*topicDef\n}\n\n\/\/ Join represents an edge of a copartitioned, log-compacted table topic. The\n\/\/ edge specifies the topic name and the codec of the messages of the topic.\n\/\/ The group starts reading the topic from the oldest offset.\n\/\/ The processing of input streams is blocked until all partitions of the table\n\/\/ are recovered.\nfunc Join(topic Table, c Codec) Edge {\n\treturn &inputTable{&topicDef{string(topic), c}}\n}\n\ntype crossTable struct {\n\t*topicDef\n}\n\n\/\/ Lookup represents an edge of a non-copartitioned, log-compacted table\n\/\/ topic. The edge specifies the topic name and the codec of the messages of\n\/\/ the topic. The group starts reading the topic from the oldest offset.\n\/\/ The processing of input streams is blocked until the table is fully\n\/\/ recovered.\nfunc Lookup(topic Table, c Codec) Edge {\n\treturn &crossTable{&topicDef{string(topic), c}}\n}\n\ntype groupTable struct {\n\t*topicDef\n}\n\n\/\/ Persist represents the edge of the group table, which is log-compacted and\n\/\/ copartitioned with the input streams.\n\/\/ Without Persist, calls to ctx.Value or ctx.SetValue in the consume callback will\n\/\/ fail and lead to shutdown of the processor.\n\/\/\n\/\/ This edge specifies the codec of the\n\/\/ messages in the topic, ie, the codec of the values of the table.\n\/\/ The processing of input streams is blocked until all partitions of the group\n\/\/ table are recovered.\n\/\/\n\/\/ The topic name is derived from the group name by appending \"-table\".\nfunc Persist(c Codec) Edge {\n\treturn &groupTable{&topicDef{codec: c}}\n}\n\nfunc (t *groupTable) setGroup(group Group) {\n\tt.topicDef.name = string(GroupTable(group))\n}\n\ntype outputStream struct {\n\t*topicDef\n}\n\n\/\/ Output represents an edge of an output stream topic. The edge\n\/\/ specifies the topic name and the codec of the messages of the topic.\n\/\/ Context.Emit() only emits messages into Output edges defined in the group\n\/\/ graph.\n\/\/ The topic does not have to be copartitioned with the input streams.\nfunc Output(topic Stream, c Codec) Edge {\n\treturn &outputStream{&topicDef{string(topic), c}}\n}\n\n\/\/ GroupTable returns the name of the group table of group.\nfunc GroupTable(group Group) Table {\n\treturn Table(tableName(group))\n}\n\nfunc tableName(group Group) string {\n\treturn string(group) + tableSuffix\n}\n\n\/\/ loopName returns the name of the loop topic of group.\nfunc loopName(group Group) string {\n\treturn string(group) + loopSuffix\n}\n\n\/\/ StringsToStreams is a simple cast\/conversion functions that allows to pass a slice\n\/\/ of strings as a slice of Stream (Streams)\n\/\/ Avoids the boilerplate loop over the string array that would be necessary otherwise.\nfunc StringsToStreams(strings ...string) Streams {\n\tstreams := make(Streams, 0, len(strings))\n\n\tfor _, str := range strings {\n\t\tstreams = append(streams, Stream(str))\n\t}\n\treturn streams\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage framework\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\n\t\"istio.io\/istio\/tests\/e2e\/util\"\n)\n\nconst (\n\tyamlSuffix = \".yaml\"\n\tistioInstallDir = \"install\/kubernetes\"\n\tistioAddonsDir = \"install\/kubernetes\/addons\"\n\tnonAuthInstallFile = \"istio.yaml\"\n\tauthInstallFile = \"istio-auth.yaml\"\n\tistioSystem = \"istio-system\"\n)\n\nvar (\n\tnamespace = flag.String(\"namespace\", \"\", \"Namespace to use for testing (empty to create\/delete temporary one)\")\n\tmixerHub = flag.String(\"mixer_hub\", \"\", \"Mixer hub, if different from istio.Version\")\n\tmixerTag = flag.String(\"mixer_tag\", \"\", \"Mixer tag, if different from istio.Version\")\n\tpilotHub = flag.String(\"pilot_hub\", \"\", \"pilot hub, if different from istio.Version\")\n\tpilotTag = flag.String(\"pilot_tag\", \"\", \"pilot tag, if different from istio.Version\")\n\tcaHub = flag.String(\"ca_hub\", \"\", \"Ca hub\")\n\tcaTag = flag.String(\"ca_tag\", \"\", \"Ca tag\")\n\tauthEnable = flag.Bool(\"auth_enable\", false, \"Enable auth\")\n\trbacfile = flag.String(\"rbac_path\", \"\", \"Rbac yaml file\")\n\tlocalCluster = flag.Bool(\"use_local_cluster\", false, \"Whether the cluster is local or not\")\n\n\taddons = []string{\n\t\t\"prometheus\",\n\t\t\"zipkin\",\n\t}\n)\n\n\/\/ KubeInfo gathers information for kubectl\ntype KubeInfo struct {\n\tNamespace string\n\n\tTmpDir string\n\tyamlDir string\n\n\tIngress string\n\n\tlocalCluster bool\n\tnamespaceCreated bool\n\n\t\/\/ Istioctl installation\n\tIstioctl *Istioctl\n\t\/\/ App Manager\n\tAppManager *AppManager\n}\n\n\/\/ newKubeInfo create a new KubeInfo by given temp dir and runID\nfunc newKubeInfo(tmpDir, runID string) (*KubeInfo, error) {\n\tif *namespace == \"\" {\n\t\t*namespace = runID\n\t}\n\tyamlDir := filepath.Join(tmpDir, \"yaml\")\n\ti, err := NewIstioctl(yamlDir, *namespace, *namespace, *pilotHub, *pilotTag)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ta := NewAppManager(tmpDir, *namespace, i)\n\n\treturn &KubeInfo{\n\t\tNamespace: *namespace,\n\t\tnamespaceCreated: false,\n\t\tTmpDir: tmpDir,\n\t\tyamlDir: yamlDir,\n\t\tlocalCluster: *localCluster,\n\t\tIstioctl: i,\n\t\tAppManager: a,\n\t}, nil\n}\n\n\/\/ Setup set up Kubernetes prerequest for tests\nfunc (k *KubeInfo) Setup() error {\n\tglog.Info(\"Setting up kubeInfo\")\n\tvar err error\n\tif err = os.Mkdir(k.yamlDir, os.ModeDir|os.ModePerm); err != nil {\n\t\treturn err\n\t}\n\n\tif err = util.CreateNamespace(k.Namespace); err != nil {\n\t\tglog.Error(\"Failed to create namespace.\")\n\t\treturn err\n\t}\n\tk.namespaceCreated = true\n\n\tif err = k.deployIstio(); err != nil {\n\t\tglog.Error(\"Failed to deploy Istio.\")\n\t\treturn err\n\t}\n\n\tif err = k.deployAddons(); err != nil {\n\t\tglog.Error(\"Failed to deploy istio addons\")\n\t\treturn err\n\t}\n\n\tvar in string\n\tif k.localCluster {\n\t\tin, err = util.GetIngressPod(k.Namespace)\n\t} else {\n\t\tin, err = util.GetIngress(k.Namespace)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tk.Ingress = in\n\treturn nil\n}\n\n\/\/ Teardown clean up everything created by setup\nfunc (k *KubeInfo) Teardown() error {\n\tglog.Info(\"Cleaning up kubeInfo\")\n\tvar err error\n\n\tif *rbacfile != \"\" {\n\n\t\ttestRbacYaml := filepath.Join(k.TmpDir, \"yaml\", filepath.Base(*rbacfile))\n\t\tif _, err = os.Stat(testRbacYaml); os.IsNotExist(err) {\n\t\t\tglog.Errorf(\"%s File does not exist\", testRbacYaml)\n\t\t} else if err = util.KubeDelete(k.Namespace, testRbacYaml); err != nil {\n\t\t\tglog.Errorf(\"Rbac deletion failed, please remove stale ClusterRoleBindings\")\n\t\t}\n\t}\n\n\tif k.namespaceCreated {\n\t\tif err = util.DeleteNamespace(k.Namespace); err != nil {\n\t\t\tglog.Errorf(\"Failed to delete namespace %s\", k.Namespace)\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ confirm the namespace is deleted as it will cause future creation to fail\n\t\tmaxAttempts := 15\n\t\tnamespaceDeleted := false\n\t\ttotalWait := 0\n\t\tfor attempts := 1; attempts <= maxAttempts; attempts++ {\n\t\t\tnamespaceDeleted, err = util.NamespaceDeleted(k.Namespace)\n\t\t\tif namespaceDeleted {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttotalWait += attempts\n\t\t\ttime.Sleep(time.Duration(attempts) * time.Second)\n\t\t}\n\n\t\tif !namespaceDeleted {\n\t\t\tglog.Errorf(\"Failed to delete namespace %s after %v seconds\", k.Namespace, totalWait)\n\t\t\treturn err\n\t\t}\n\t\tk.namespaceCreated = false\n\t\tglog.Infof(\"Namespace %s deletion status: %v\", k.Namespace, namespaceDeleted)\n\t}\n\treturn err\n}\n\nfunc (k *KubeInfo) deployAddons() error {\n\tfor _, addon := range addons {\n\n\t\tbaseYamlFile := util.GetResourcePath(filepath.Join(istioAddonsDir, fmt.Sprintf(\"%s.yaml\", addon)))\n\n\t\tcontent, err := ioutil.ReadFile(baseYamlFile)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Cannot read file %s\", baseYamlFile)\n\t\t\treturn err\n\t\t}\n\n\t\tcontent = replacePattern(k, content, istioSystem, k.Namespace)\n\n\t\tyamlFile := filepath.Join(k.TmpDir, \"yaml\", addon+\".yaml\")\n\t\terr = ioutil.WriteFile(yamlFile, content, 0600)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Cannot write into file %s\", yamlFile)\n\t\t}\n\n\t\tif err := util.KubeApply(k.Namespace, yamlFile); err != nil {\n\t\t\tglog.Errorf(\"Kubectl apply %s failed\", yamlFile)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (k *KubeInfo) deployIstio() error {\n\tistioYaml := nonAuthInstallFile\n\tif *authEnable {\n\t\tistioYaml = authInstallFile\n\t}\n\tbaseIstioYaml := util.GetResourcePath(filepath.Join(istioInstallDir, istioYaml))\n\ttestIstioYaml := filepath.Join(k.TmpDir, \"yaml\", istioYaml)\n\n\tif *rbacfile != \"\" {\n\t\tbaseRbacYaml := util.GetResourcePath(*rbacfile)\n\t\ttestRbacYaml := filepath.Join(k.TmpDir, \"yaml\", filepath.Base(*rbacfile))\n\t\tif err := k.generateRbac(baseRbacYaml, testRbacYaml); err != nil {\n\t\t\tglog.Errorf(\"Generating rbac yaml failed\")\n\t\t}\n\t\tif err := util.KubeApply(k.Namespace, testRbacYaml); err != nil {\n\t\t\tglog.Errorf(\"Rbac deployment failed\")\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := k.generateIstio(baseIstioYaml, testIstioYaml); err != nil {\n\t\tglog.Errorf(\"Generating yaml %s failed\", testIstioYaml)\n\t\treturn err\n\t}\n\tif err := util.KubeApply(k.Namespace, testIstioYaml); err != nil {\n\t\tglog.Errorf(\"Istio core %s deployment failed\", testIstioYaml)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (k *KubeInfo) generateRbac(src, dst string) error {\n\tcontent, err := ioutil.ReadFile(src)\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot read original yaml file %s\", src)\n\t\treturn err\n\t}\n\n\tcontent = replacePattern(k, content, istioSystem, k.Namespace)\n\n\tcontent = replacePattern(k, content, \"namespace: default\",\n\t\t\"namespace: \"+k.Namespace)\n\n\tcontent = replacePattern(k, content, \"istio-pilot-admin-role-binding\",\n\t\t\"istio-pilot-admin-role-binding-\"+k.Namespace)\n\n\tcontent = replacePattern(k, content, \"istio-mixer-admin-role-binding\",\n\t\t\"istio-mixer-admin-role-binding-\"+k.Namespace)\n\n\tcontent = replacePattern(k, content, \"istio-ca-role-binding\",\n\t\t\"istio-ca-role-binding-\"+k.Namespace)\n\n\tcontent = replacePattern(k, content, \"istio-ingress-admin-role-binding\",\n\t\t\"istio-ingress-admin-role-binding-\"+k.Namespace)\n\n\tcontent = replacePattern(k, content, \"istio-egress-admin-role-binding\",\n\t\t\"istio-egress-admin-role-binding-\"+k.Namespace)\n\n\tcontent = replacePattern(k, content, \"istio-sidecar-role-binding\",\n\t\t\"istio-sidecar-role-binding-\"+k.Namespace)\n\n\terr = ioutil.WriteFile(dst, content, 0600)\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot write into generate rbac file %s\", dst)\n\t}\n\treturn err\n}\n\nfunc replacePattern(k *KubeInfo, content []byte, src, dest string) []byte {\n\tr := []byte(dest)\n\tp := regexp.MustCompile(src)\n\tcontent = p.ReplaceAllLiteral(content, r)\n\treturn content\n}\n\nfunc (k *KubeInfo) generateIstio(src, dst string) error {\n\tcontent, err := ioutil.ReadFile(src)\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot read original yaml file %s\", src)\n\t\treturn err\n\t}\n\n\tcontent = replacePattern(k, content, istioSystem, k.Namespace)\n\n\t\/\/ Replace long refresh delays with short ones for the sake of tests.\n\tcontent = replacePattern(k, content, \"rdsRefreshDelay: 30s\", \"rdsRefreshDelay: 1s\")\n\tcontent = replacePattern(k, content, \"discoveryRefreshDelay: 30s\", \"discoveryRefreshDelay: 1s\")\n\tcontent = replacePattern(k, content, \"connectTimeout: 10s\", \"connectTimeout: 1s\")\n\tcontent = replacePattern(k, content, \"drainDuration: 45s\", \"drainDuration: 2s\")\n\tcontent = replacePattern(k, content, \"parentShutdownDuration: 1m0s\", \"parentShutdownDuration: 3s\")\n\n\tif *mixerHub != \"\" && *mixerTag != \"\" {\n\t\tcontent = updateIstioYaml(\"mixer\", *mixerHub, *mixerTag, content)\n\t}\n\tif *pilotHub != \"\" && *pilotTag != \"\" {\n\t\tcontent = updateIstioYaml(\"pilot\", *pilotHub, *pilotTag, content)\n\t\t\/\/Need to be updated when the string \"proxy_debug\" is changed\n\t\tcontent = updateIstioYaml(\"proxy_debug\", *pilotHub, *pilotTag, content)\n\t}\n\tif *caHub != \"\" && *caTag != \"\" {\n\t\t\/\/Need to be updated when the string \"istio-ca\" is changed\n\t\tcontent = updateIstioYaml(\"istio-ca\", *caHub, *caTag, content)\n\t}\n\tif *localCluster {\n\t\tcontent = []byte(strings.Replace(string(content), \"LoadBalancer\", \"NodePort\", 1))\n\t}\n\n\tcontent = []byte(strings.Replace(string(content), \"args: [\\\"discovery\\\", \\\"-v\\\", \\\"2\\\"]\",\n\t\t\"args: [\\\"discovery\\\", \\\"-v\\\", \\\"2\\\", \\\"-a\\\", \\\"\"+k.Namespace+\"\\\"]\", -1))\n\n\terr = ioutil.WriteFile(dst, content, 0600)\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot write into generated yaml file %s\", dst)\n\t}\n\treturn err\n}\n\nfunc updateIstioYaml(module, hub, tag string, content []byte) []byte {\n\timage := []byte(fmt.Sprintf(\"image: %s\/%s:%s\", hub, module, tag))\n\tr := regexp.MustCompile(fmt.Sprintf(\"image: .*(\\\\\/%s):.*\", module))\n\treturn r.ReplaceAllLiteral(content, image)\n}\n<commit_msg>Add flag to skip namespace and istio setup, so only the test is run. (#790)<commit_after>\/\/ Copyright 2017 Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage framework\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\n\t\"istio.io\/istio\/tests\/e2e\/util\"\n)\n\nconst (\n\tyamlSuffix = \".yaml\"\n\tistioInstallDir = \"install\/kubernetes\"\n\tistioAddonsDir = \"install\/kubernetes\/addons\"\n\tnonAuthInstallFile = \"istio.yaml\"\n\tauthInstallFile = \"istio-auth.yaml\"\n\tistioSystem = \"istio-system\"\n)\n\nvar (\n\tnamespace = flag.String(\"namespace\", \"\", \"Namespace to use for testing (empty to create\/delete temporary one)\")\n\tmixerHub = flag.String(\"mixer_hub\", \"\", \"Mixer hub, if different from istio.Version\")\n\tmixerTag = flag.String(\"mixer_tag\", \"\", \"Mixer tag, if different from istio.Version\")\n\tpilotHub = flag.String(\"pilot_hub\", \"\", \"pilot hub, if different from istio.Version\")\n\tpilotTag = flag.String(\"pilot_tag\", \"\", \"pilot tag, if different from istio.Version\")\n\tcaHub = flag.String(\"ca_hub\", \"\", \"Ca hub\")\n\tcaTag = flag.String(\"ca_tag\", \"\", \"Ca tag\")\n\tauthEnable = flag.Bool(\"auth_enable\", false, \"Enable auth\")\n\trbacfile = flag.String(\"rbac_path\", \"\", \"Rbac yaml file\")\n\tlocalCluster = flag.Bool(\"use_local_cluster\", false, \"Whether the cluster is local or not\")\n\tskipSetup = flag.Bool(\"skip_setup\", false, \"Skip namespace creation and istio cluster setup\")\n\n\taddons = []string{\n\t\t\"prometheus\",\n\t\t\"zipkin\",\n\t}\n)\n\n\/\/ KubeInfo gathers information for kubectl\ntype KubeInfo struct {\n\tNamespace string\n\n\tTmpDir string\n\tyamlDir string\n\n\tIngress string\n\n\tlocalCluster bool\n\tnamespaceCreated bool\n\n\t\/\/ Istioctl installation\n\tIstioctl *Istioctl\n\t\/\/ App Manager\n\tAppManager *AppManager\n}\n\n\/\/ newKubeInfo create a new KubeInfo by given temp dir and runID\nfunc newKubeInfo(tmpDir, runID string) (*KubeInfo, error) {\n\tif *namespace == \"\" {\n\t\t*namespace = runID\n\t}\n\tyamlDir := filepath.Join(tmpDir, \"yaml\")\n\ti, err := NewIstioctl(yamlDir, *namespace, *namespace, *pilotHub, *pilotTag)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ta := NewAppManager(tmpDir, *namespace, i)\n\n\treturn &KubeInfo{\n\t\tNamespace: *namespace,\n\t\tnamespaceCreated: false,\n\t\tTmpDir: tmpDir,\n\t\tyamlDir: yamlDir,\n\t\tlocalCluster: *localCluster,\n\t\tIstioctl: i,\n\t\tAppManager: a,\n\t}, nil\n}\n\n\/\/ Setup set up Kubernetes prerequest for tests\nfunc (k *KubeInfo) Setup() error {\n\tglog.Info(\"Setting up kubeInfo\")\n\tvar err error\n\tif err = os.Mkdir(k.yamlDir, os.ModeDir|os.ModePerm); err != nil {\n\t\treturn err\n\t}\n\n\tif !*skipSetup {\n\t\tif err = util.CreateNamespace(k.Namespace); err != nil {\n\t\t\tglog.Error(\"Failed to create namespace.\")\n\t\t\treturn err\n\t\t}\n\t\tk.namespaceCreated = true\n\n\t\tif err = k.deployIstio(); err != nil {\n\t\t\tglog.Error(\"Failed to deploy Istio.\")\n\t\t\treturn err\n\t\t}\n\n\t\tif err = k.deployAddons(); err != nil {\n\t\t\tglog.Error(\"Failed to deploy istio addons\")\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar in string\n\tif k.localCluster {\n\t\tin, err = util.GetIngressPod(k.Namespace)\n\t} else {\n\t\tin, err = util.GetIngress(k.Namespace)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tk.Ingress = in\n\treturn nil\n}\n\n\/\/ Teardown clean up everything created by setup\nfunc (k *KubeInfo) Teardown() error {\n\tglog.Info(\"Cleaning up kubeInfo\")\n\tvar err error\n\n\tif *rbacfile != \"\" {\n\n\t\ttestRbacYaml := filepath.Join(k.TmpDir, \"yaml\", filepath.Base(*rbacfile))\n\t\tif _, err = os.Stat(testRbacYaml); os.IsNotExist(err) {\n\t\t\tglog.Errorf(\"%s File does not exist\", testRbacYaml)\n\t\t} else if err = util.KubeDelete(k.Namespace, testRbacYaml); err != nil {\n\t\t\tglog.Errorf(\"Rbac deletion failed, please remove stale ClusterRoleBindings\")\n\t\t}\n\t}\n\n\tif k.namespaceCreated {\n\t\tif err = util.DeleteNamespace(k.Namespace); err != nil {\n\t\t\tglog.Errorf(\"Failed to delete namespace %s\", k.Namespace)\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ confirm the namespace is deleted as it will cause future creation to fail\n\t\tmaxAttempts := 15\n\t\tnamespaceDeleted := false\n\t\ttotalWait := 0\n\t\tfor attempts := 1; attempts <= maxAttempts; attempts++ {\n\t\t\tnamespaceDeleted, err = util.NamespaceDeleted(k.Namespace)\n\t\t\tif namespaceDeleted {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttotalWait += attempts\n\t\t\ttime.Sleep(time.Duration(attempts) * time.Second)\n\t\t}\n\n\t\tif !namespaceDeleted {\n\t\t\tglog.Errorf(\"Failed to delete namespace %s after %v seconds\", k.Namespace, totalWait)\n\t\t\treturn err\n\t\t}\n\t\tk.namespaceCreated = false\n\t\tglog.Infof(\"Namespace %s deletion status: %v\", k.Namespace, namespaceDeleted)\n\t}\n\treturn err\n}\n\nfunc (k *KubeInfo) deployAddons() error {\n\tfor _, addon := range addons {\n\n\t\tbaseYamlFile := util.GetResourcePath(filepath.Join(istioAddonsDir, fmt.Sprintf(\"%s.yaml\", addon)))\n\n\t\tcontent, err := ioutil.ReadFile(baseYamlFile)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Cannot read file %s\", baseYamlFile)\n\t\t\treturn err\n\t\t}\n\n\t\tcontent = replacePattern(k, content, istioSystem, k.Namespace)\n\n\t\tyamlFile := filepath.Join(k.TmpDir, \"yaml\", addon+\".yaml\")\n\t\terr = ioutil.WriteFile(yamlFile, content, 0600)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Cannot write into file %s\", yamlFile)\n\t\t}\n\n\t\tif err := util.KubeApply(k.Namespace, yamlFile); err != nil {\n\t\t\tglog.Errorf(\"Kubectl apply %s failed\", yamlFile)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (k *KubeInfo) deployIstio() error {\n\tistioYaml := nonAuthInstallFile\n\tif *authEnable {\n\t\tistioYaml = authInstallFile\n\t}\n\tbaseIstioYaml := util.GetResourcePath(filepath.Join(istioInstallDir, istioYaml))\n\ttestIstioYaml := filepath.Join(k.TmpDir, \"yaml\", istioYaml)\n\n\tif *rbacfile != \"\" {\n\t\tbaseRbacYaml := util.GetResourcePath(*rbacfile)\n\t\ttestRbacYaml := filepath.Join(k.TmpDir, \"yaml\", filepath.Base(*rbacfile))\n\t\tif err := k.generateRbac(baseRbacYaml, testRbacYaml); err != nil {\n\t\t\tglog.Errorf(\"Generating rbac yaml failed\")\n\t\t}\n\t\tif err := util.KubeApply(k.Namespace, testRbacYaml); err != nil {\n\t\t\tglog.Errorf(\"Rbac deployment failed\")\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := k.generateIstio(baseIstioYaml, testIstioYaml); err != nil {\n\t\tglog.Errorf(\"Generating yaml %s failed\", testIstioYaml)\n\t\treturn err\n\t}\n\tif err := util.KubeApply(k.Namespace, testIstioYaml); err != nil {\n\t\tglog.Errorf(\"Istio core %s deployment failed\", testIstioYaml)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (k *KubeInfo) generateRbac(src, dst string) error {\n\tcontent, err := ioutil.ReadFile(src)\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot read original yaml file %s\", src)\n\t\treturn err\n\t}\n\n\tcontent = replacePattern(k, content, istioSystem, k.Namespace)\n\n\tcontent = replacePattern(k, content, \"namespace: default\",\n\t\t\"namespace: \"+k.Namespace)\n\n\tcontent = replacePattern(k, content, \"istio-pilot-admin-role-binding\",\n\t\t\"istio-pilot-admin-role-binding-\"+k.Namespace)\n\n\tcontent = replacePattern(k, content, \"istio-mixer-admin-role-binding\",\n\t\t\"istio-mixer-admin-role-binding-\"+k.Namespace)\n\n\tcontent = replacePattern(k, content, \"istio-ca-role-binding\",\n\t\t\"istio-ca-role-binding-\"+k.Namespace)\n\n\tcontent = replacePattern(k, content, \"istio-ingress-admin-role-binding\",\n\t\t\"istio-ingress-admin-role-binding-\"+k.Namespace)\n\n\tcontent = replacePattern(k, content, \"istio-egress-admin-role-binding\",\n\t\t\"istio-egress-admin-role-binding-\"+k.Namespace)\n\n\tcontent = replacePattern(k, content, \"istio-sidecar-role-binding\",\n\t\t\"istio-sidecar-role-binding-\"+k.Namespace)\n\n\terr = ioutil.WriteFile(dst, content, 0600)\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot write into generate rbac file %s\", dst)\n\t}\n\treturn err\n}\n\nfunc replacePattern(k *KubeInfo, content []byte, src, dest string) []byte {\n\tr := []byte(dest)\n\tp := regexp.MustCompile(src)\n\tcontent = p.ReplaceAllLiteral(content, r)\n\treturn content\n}\n\nfunc (k *KubeInfo) generateIstio(src, dst string) error {\n\tcontent, err := ioutil.ReadFile(src)\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot read original yaml file %s\", src)\n\t\treturn err\n\t}\n\n\tcontent = replacePattern(k, content, istioSystem, k.Namespace)\n\n\t\/\/ Replace long refresh delays with short ones for the sake of tests.\n\tcontent = replacePattern(k, content, \"rdsRefreshDelay: 30s\", \"rdsRefreshDelay: 1s\")\n\tcontent = replacePattern(k, content, \"discoveryRefreshDelay: 30s\", \"discoveryRefreshDelay: 1s\")\n\tcontent = replacePattern(k, content, \"connectTimeout: 10s\", \"connectTimeout: 1s\")\n\tcontent = replacePattern(k, content, \"drainDuration: 45s\", \"drainDuration: 2s\")\n\tcontent = replacePattern(k, content, \"parentShutdownDuration: 1m0s\", \"parentShutdownDuration: 3s\")\n\n\tif *mixerHub != \"\" && *mixerTag != \"\" {\n\t\tcontent = updateIstioYaml(\"mixer\", *mixerHub, *mixerTag, content)\n\t}\n\tif *pilotHub != \"\" && *pilotTag != \"\" {\n\t\tcontent = updateIstioYaml(\"pilot\", *pilotHub, *pilotTag, content)\n\t\t\/\/Need to be updated when the string \"proxy_debug\" is changed\n\t\tcontent = updateIstioYaml(\"proxy_debug\", *pilotHub, *pilotTag, content)\n\t}\n\tif *caHub != \"\" && *caTag != \"\" {\n\t\t\/\/Need to be updated when the string \"istio-ca\" is changed\n\t\tcontent = updateIstioYaml(\"istio-ca\", *caHub, *caTag, content)\n\t}\n\tif *localCluster {\n\t\tcontent = []byte(strings.Replace(string(content), \"LoadBalancer\", \"NodePort\", 1))\n\t}\n\n\tcontent = []byte(strings.Replace(string(content), \"args: [\\\"discovery\\\", \\\"-v\\\", \\\"2\\\"]\",\n\t\t\"args: [\\\"discovery\\\", \\\"-v\\\", \\\"2\\\", \\\"-a\\\", \\\"\"+k.Namespace+\"\\\"]\", -1))\n\n\terr = ioutil.WriteFile(dst, content, 0600)\n\tif err != nil {\n\t\tglog.Errorf(\"Cannot write into generated yaml file %s\", dst)\n\t}\n\treturn err\n}\n\nfunc updateIstioYaml(module, hub, tag string, content []byte) []byte {\n\timage := []byte(fmt.Sprintf(\"image: %s\/%s:%s\", hub, module, tag))\n\tr := regexp.MustCompile(fmt.Sprintf(\"image: .*(\\\\\/%s):.*\", module))\n\treturn r.ReplaceAllLiteral(content, image)\n}\n<|endoftext|>"} {"text":"<commit_before>package bluemix\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n)\n\ntype vcapServices struct {\n\tServices map[string][]service\n}\n\ntype service struct {\n\tName string\n\tLabel string\n\tPlan string\n\tCredentials map[string]interface{}\n}\n\nvar services = vcapServices{}\n\nfunc loadCredentials() {\n\tfile, _ := os.Open(\"VCAP_SERVICES.json\")\n\tdecoder := json.NewDecoder(file)\n\terr := decoder.Decode(&services)\n\tif err != nil {\n\t\tfmt.Println(\"error:\", err)\n\t}\n}\n\nfunc GetCredentials(serviceName string) map[string]interface{} {\n\treturn GetCredentialsOnIndex(serviceName, 0)\n}\n\nfunc GetCredentialsOnIndex(serviceName string, index int) map[string]interface{} {\n\tif services.Services == nil {\n\t\tloadCredentials()\n\t\tfmt.Println(\"load cred\")\n\t}\n\treturn services.Services[serviceName][index].Credentials\n}\n<commit_msg>restructure<commit_after><|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage etcd\n\nimport (\n\t\"k8s.io\/kubernetes\/federation\/apis\/federation\"\n\t\"k8s.io\/kubernetes\/federation\/registry\/cluster\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/rest\"\n\t\"k8s.io\/kubernetes\/pkg\/registry\/generic\"\n\t\"k8s.io\/kubernetes\/pkg\/registry\/generic\/registry\"\n\t\"k8s.io\/kubernetes\/pkg\/runtime\"\n)\n\ntype REST struct {\n\t*registry.Store\n}\n\ntype StatusREST struct {\n\tstore *registry.Store\n}\n\nfunc (r *StatusREST) New() runtime.Object {\n\treturn &federation.Cluster{}\n}\n\n\/\/ Update alters the status subset of an object.\nfunc (r *StatusREST) Update(ctx api.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) {\n\treturn r.store.Update(ctx, name, objInfo)\n}\n\n\/\/ NewREST returns a RESTStorage object that will work against clusters.\nfunc NewREST(opts generic.RESTOptions) (*REST, *StatusREST) {\n\tprefix := \"\/clusters\"\n\n\tnewListFunc := func() runtime.Object { return &federation.ClusterList{} }\n\tstorageInterface := opts.Decorator(\n\t\topts.Storage, 100, &federation.Cluster{}, prefix, cluster.Strategy, newListFunc)\n\n\tstore := ®istry.Store{\n\t\tNewFunc: func() runtime.Object { return &federation.Cluster{} },\n\t\tNewListFunc: newListFunc,\n\t\tKeyRootFunc: func(ctx api.Context) string {\n\t\t\treturn prefix\n\t\t},\n\t\tKeyFunc: func(ctx api.Context, name string) (string, error) {\n\t\t\treturn registry.NoNamespaceKeyFunc(ctx, prefix, name)\n\t\t},\n\t\tObjectNameFunc: func(obj runtime.Object) (string, error) {\n\t\t\treturn obj.(*federation.Cluster).Name, nil\n\t\t},\n\t\tPredicateFunc: cluster.MatchCluster,\n\t\tQualifiedResource: federation.Resource(\"clusters\"),\n\t\tDeleteCollectionWorkers: opts.DeleteCollectionWorkers,\n\n\t\tCreateStrategy: cluster.Strategy,\n\t\tUpdateStrategy: cluster.Strategy,\n\t\tDeleteStrategy: cluster.Strategy,\n\n\t\tReturnDeletedObject: true,\n\n\t\tStorage: storageInterface,\n\t}\n\n\tstatusStore := *store\n\tstatusStore.UpdateStrategy = cluster.StatusStrategy\n\n\treturn &REST{store}, &StatusREST{store: &statusStore}\n}\n<commit_msg>Extend Filter interface with Trigger() and use it for pods and nodes<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage etcd\n\nimport (\n\t\"k8s.io\/kubernetes\/federation\/apis\/federation\"\n\t\"k8s.io\/kubernetes\/federation\/registry\/cluster\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/rest\"\n\t\"k8s.io\/kubernetes\/pkg\/registry\/generic\"\n\t\"k8s.io\/kubernetes\/pkg\/registry\/generic\/registry\"\n\t\"k8s.io\/kubernetes\/pkg\/runtime\"\n\t\"k8s.io\/kubernetes\/pkg\/storage\"\n)\n\ntype REST struct {\n\t*registry.Store\n}\n\ntype StatusREST struct {\n\tstore *registry.Store\n}\n\nfunc (r *StatusREST) New() runtime.Object {\n\treturn &federation.Cluster{}\n}\n\n\/\/ Update alters the status subset of an object.\nfunc (r *StatusREST) Update(ctx api.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) {\n\treturn r.store.Update(ctx, name, objInfo)\n}\n\n\/\/ NewREST returns a RESTStorage object that will work against clusters.\nfunc NewREST(opts generic.RESTOptions) (*REST, *StatusREST) {\n\tprefix := \"\/clusters\"\n\n\tnewListFunc := func() runtime.Object { return &federation.ClusterList{} }\n\tstorageInterface := opts.Decorator(\n\t\topts.Storage,\n\t\t100,\n\t\t&federation.Cluster{},\n\t\tprefix,\n\t\tcluster.Strategy,\n\t\tnewListFunc,\n\t\tstorage.NoTriggerPublisher,\n\t)\n\n\tstore := ®istry.Store{\n\t\tNewFunc: func() runtime.Object { return &federation.Cluster{} },\n\t\tNewListFunc: newListFunc,\n\t\tKeyRootFunc: func(ctx api.Context) string {\n\t\t\treturn prefix\n\t\t},\n\t\tKeyFunc: func(ctx api.Context, name string) (string, error) {\n\t\t\treturn registry.NoNamespaceKeyFunc(ctx, prefix, name)\n\t\t},\n\t\tObjectNameFunc: func(obj runtime.Object) (string, error) {\n\t\t\treturn obj.(*federation.Cluster).Name, nil\n\t\t},\n\t\tPredicateFunc: cluster.MatchCluster,\n\t\tQualifiedResource: federation.Resource(\"clusters\"),\n\t\tDeleteCollectionWorkers: opts.DeleteCollectionWorkers,\n\n\t\tCreateStrategy: cluster.Strategy,\n\t\tUpdateStrategy: cluster.Strategy,\n\t\tDeleteStrategy: cluster.Strategy,\n\n\t\tReturnDeletedObject: true,\n\n\t\tStorage: storageInterface,\n\t}\n\n\tstatusStore := *store\n\tstatusStore.UpdateStrategy = cluster.StatusStrategy\n\n\treturn &REST{store}, &StatusREST{store: &statusStore}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build e2e\n\n\/\/ Copyright 2017 The nats-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage e2e\n\nimport (\n\t\"context\"\n\t\"testing\"\n\t\"time\"\n\n\tnatsv1alpha2 \"github.com\/nats-io\/nats-operator\/pkg\/apis\/nats\/v1alpha2\"\n)\n\n\/\/ TestCreateCluster creates a NatsCluster resource and waits for the full mesh to be formed.\nfunc TestCreateCluster(t *testing.T) {\n\tvar (\n\t\tsize = 3\n\t\tversion = \"1.3.0\"\n\t)\n\n\tvar (\n\t\tnatsCluster *natsv1alpha2.NatsCluster\n\t\terr error\n\t)\n\n\t\/\/ Create a NatsCluster resource with three members.\n\tif natsCluster, err = f.CreateCluster(f.Namespace, \"test-nats-\", size, version); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Make sure we cleanup the NatsCluster resource after we're done testing.\n\tdefer func() {\n\t\tif err = f.DeleteCluster(natsCluster); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\t\/\/ Wait until the full mesh is formed.\n\tctx, fn := context.WithTimeout(context.Background(), waitTimeout)\n\tdefer fn()\n\tif err = f.WaitUntilFullMeshWithVersion(ctx, natsCluster, size, version); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\n\/\/ TestPauseControl creates a NatsCluster resource and waits for the full mesh to be formed.\n\/\/ Then, it pauses control of the NatsCluster resource and scales it up to five nodes, expecting the operation to NOT be performed.\n\/\/ Finally, it resumes control of the NatsCluster resource and waits for the full five-node mesh to be formed.\nfunc TestPauseControl(t *testing.T) {\n\tvar (\n\t\tinitialSize = 3\n\t\tfinalSize = 5\n\t\tversion = \"1.3.0\"\n\t)\n\n\tvar (\n\t\tnatsCluster *natsv1alpha2.NatsCluster\n\t\terr error\n\t)\n\n\t\/\/ Create a NatsCluster resource with three members.\n\tif natsCluster, err = f.CreateCluster(f.Namespace, \"test-nats-\", initialSize, version); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Make sure we cleanup the NatsCluster resource after we're done testing.\n\tdefer func() {\n\t\tif err = f.DeleteCluster(natsCluster); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\t\/\/ Wait until the full mesh is formed.\n\tctx1, fn := context.WithTimeout(context.Background(), waitTimeout)\n\tdefer fn()\n\tif err = f.WaitUntilFullMeshWithVersion(ctx1, natsCluster, initialSize, version); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Pause control of the cluster.\n\tnatsCluster.Spec.Paused = true\n\tif natsCluster, err = f.PatchCluster(natsCluster); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Scale the cluster up to five members\n\tnatsCluster.Spec.Size = finalSize\n\tif natsCluster, err = f.PatchCluster(natsCluster); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Make sure that the full mesh is NOT formed with the current size (5) within the timeout period.\n\tctx2, fn := context.WithTimeout(context.Background(), waitTimeout)\n\tdefer fn()\n\tif err = f.WaitUntilFullMeshWithVersion(ctx2, natsCluster, finalSize, version); err == nil {\n\t\tt.Fatalf(\"the full mesh has formed while control is paused\")\n\t}\n\n\t\/\/ Resume control of the cluster.\n\tnatsCluster.Spec.Paused = false\n\tif natsCluster, err = f.PatchCluster(natsCluster); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Make sure that the full mesh is formed with the current size, since control has been resumed.\n\tctx3, fn := context.WithTimeout(context.Background(), waitTimeout)\n\tdefer fn()\n\tif err = f.WaitUntilFullMeshWithVersion(ctx3, natsCluster, finalSize, version); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\n\/\/ TestCreateClusterWithHostPort creates a NatsCluster resource using\n\/\/ a host port with no advertise for clients.\nfunc TestCreateClusterWithHostPort(t *testing.T) {\n\tvar (\n\t\tsize = 1\n\t\tversion = \"1.3.0\"\n\t)\n\n\tvar (\n\t\tnatsCluster *natsv1alpha2.NatsCluster\n\t\terr error\n\t)\n\n\t\/\/ Create a NatsCluster resource with three members.\n\tnatsCluster, err = f.CreateCluster(\"test-nats-\", size, version, func(natsCluster *natsv1alpha2.NatsCluster) {\n\t\tnatsCluster.Spec.Pod = &natsv1alpha2.PodPolicy{\n\t\t\tEnableClientsHostPort: true,\n\t\t}\n\t\tnatsCluster.Spec.NoAdvertise = true\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Make sure we cleanup the NatsCluster resource after we're done testing.\n\tdefer func() {\n\t\tif err = f.DeleteCluster(natsCluster); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\t\/\/ List pods belonging to the NATS cluster, and make\n\t\/\/ sure that every route is listed in the\n\t\/\/ configuration.\n\tvar attempts int\n\tfor range time.NewTicker(1 * time.Second).C {\n\t\tif attempts >= 30 {\n\t\t\tt.Fatalf(\"Timed out waiting for pods with host port\")\n\t\t}\n\t\tattempts++\n\n\t\tpods, err := f.PodsForNatsCluster(natsCluster)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif len(pods) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar foundNoAdvertise bool\n\t\tpod := pods[0]\n\t\tcontainer := pod.Spec.Containers[0]\n\t\tfor _, v := range container.Command {\n\t\t\tif v == \"--no_advertise\" {\n\t\t\t\tfoundNoAdvertise = true\n\t\t\t}\n\t\t}\n\t\tif !foundNoAdvertise {\n\t\t\tt.Error(\"Container not configured with no advertise\")\n\t\t}\n\n\t\tvar foundHostPort bool\n\t\tfor _, port := range container.Ports {\n\t\t\tif port.ContainerPort == int32(4222) && port.HostPort == int32(4222) {\n\t\t\t\tfoundHostPort = true\n\t\t\t}\n\t\t}\n\t\tif !foundHostPort {\n\t\t\tt.Error(\"Container not configured with host port\")\n\t\t}\n\n\t\tbreak\n\t}\n}\n<commit_msg>Fix host port test<commit_after>\/\/ +build e2e\n\n\/\/ Copyright 2017 The nats-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage e2e\n\nimport (\n\t\"context\"\n\t\"testing\"\n\t\"time\"\n\n\tnatsv1alpha2 \"github.com\/nats-io\/nats-operator\/pkg\/apis\/nats\/v1alpha2\"\n)\n\n\/\/ TestCreateCluster creates a NatsCluster resource and waits for the full mesh to be formed.\nfunc TestCreateCluster(t *testing.T) {\n\tvar (\n\t\tsize = 3\n\t\tversion = \"1.3.0\"\n\t)\n\n\tvar (\n\t\tnatsCluster *natsv1alpha2.NatsCluster\n\t\terr error\n\t)\n\n\t\/\/ Create a NatsCluster resource with three members.\n\tif natsCluster, err = f.CreateCluster(f.Namespace, \"test-nats-\", size, version); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Make sure we cleanup the NatsCluster resource after we're done testing.\n\tdefer func() {\n\t\tif err = f.DeleteCluster(natsCluster); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\t\/\/ Wait until the full mesh is formed.\n\tctx, fn := context.WithTimeout(context.Background(), waitTimeout)\n\tdefer fn()\n\tif err = f.WaitUntilFullMeshWithVersion(ctx, natsCluster, size, version); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\n\/\/ TestPauseControl creates a NatsCluster resource and waits for the full mesh to be formed.\n\/\/ Then, it pauses control of the NatsCluster resource and scales it up to five nodes, expecting the operation to NOT be performed.\n\/\/ Finally, it resumes control of the NatsCluster resource and waits for the full five-node mesh to be formed.\nfunc TestPauseControl(t *testing.T) {\n\tvar (\n\t\tinitialSize = 3\n\t\tfinalSize = 5\n\t\tversion = \"1.3.0\"\n\t)\n\n\tvar (\n\t\tnatsCluster *natsv1alpha2.NatsCluster\n\t\terr error\n\t)\n\n\t\/\/ Create a NatsCluster resource with three members.\n\tif natsCluster, err = f.CreateCluster(f.Namespace, \"test-nats-\", initialSize, version); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Make sure we cleanup the NatsCluster resource after we're done testing.\n\tdefer func() {\n\t\tif err = f.DeleteCluster(natsCluster); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\t\/\/ Wait until the full mesh is formed.\n\tctx1, fn := context.WithTimeout(context.Background(), waitTimeout)\n\tdefer fn()\n\tif err = f.WaitUntilFullMeshWithVersion(ctx1, natsCluster, initialSize, version); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Pause control of the cluster.\n\tnatsCluster.Spec.Paused = true\n\tif natsCluster, err = f.PatchCluster(natsCluster); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Scale the cluster up to five members\n\tnatsCluster.Spec.Size = finalSize\n\tif natsCluster, err = f.PatchCluster(natsCluster); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Make sure that the full mesh is NOT formed with the current size (5) within the timeout period.\n\tctx2, fn := context.WithTimeout(context.Background(), waitTimeout)\n\tdefer fn()\n\tif err = f.WaitUntilFullMeshWithVersion(ctx2, natsCluster, finalSize, version); err == nil {\n\t\tt.Fatalf(\"the full mesh has formed while control is paused\")\n\t}\n\n\t\/\/ Resume control of the cluster.\n\tnatsCluster.Spec.Paused = false\n\tif natsCluster, err = f.PatchCluster(natsCluster); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Make sure that the full mesh is formed with the current size, since control has been resumed.\n\tctx3, fn := context.WithTimeout(context.Background(), waitTimeout)\n\tdefer fn()\n\tif err = f.WaitUntilFullMeshWithVersion(ctx3, natsCluster, finalSize, version); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\n\/\/ TestCreateClusterWithHostPort creates a NatsCluster resource using\n\/\/ a host port with no advertise for clients.\nfunc TestCreateClusterWithHostPort(t *testing.T) {\n\tvar (\n\t\tsize = 1\n\t\tversion = \"1.3.0\"\n\t)\n\n\tvar (\n\t\tnatsCluster *natsv1alpha2.NatsCluster\n\t\terr error\n\t)\n\n\t\/\/ Create a NatsCluster resource with three members.\n\tnatsCluster, err = f.CreateCluster(f.Namespace, \"test-nats-\", size, version, func(natsCluster *natsv1alpha2.NatsCluster) {\n\t\tnatsCluster.Spec.Pod = &natsv1alpha2.PodPolicy{\n\t\t\tEnableClientsHostPort: true,\n\t\t}\n\t\tnatsCluster.Spec.NoAdvertise = true\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Make sure we cleanup the NatsCluster resource after we're done testing.\n\tdefer func() {\n\t\tif err = f.DeleteCluster(natsCluster); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\t\/\/ List pods belonging to the NATS cluster, and make\n\t\/\/ sure that every route is listed in the\n\t\/\/ configuration.\n\tvar attempts int\n\tfor range time.NewTicker(1 * time.Second).C {\n\t\tif attempts >= 30 {\n\t\t\tt.Fatalf(\"Timed out waiting for pods with host port\")\n\t\t}\n\t\tattempts++\n\n\t\tpods, err := f.PodsForNatsCluster(natsCluster)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif len(pods) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar foundNoAdvertise bool\n\t\tpod := pods[0]\n\t\tcontainer := pod.Spec.Containers[0]\n\t\tfor _, v := range container.Command {\n\t\t\tif v == \"--no_advertise\" {\n\t\t\t\tfoundNoAdvertise = true\n\t\t\t}\n\t\t}\n\t\tif !foundNoAdvertise {\n\t\t\tt.Error(\"Container not configured with no advertise\")\n\t\t}\n\n\t\tvar foundHostPort bool\n\t\tfor _, port := range container.Ports {\n\t\t\tif port.ContainerPort == int32(4222) && port.HostPort == int32(4222) {\n\t\t\t\tfoundHostPort = true\n\t\t\t}\n\t\t}\n\t\tif !foundHostPort {\n\t\t\tt.Error(\"Container not configured with host port\")\n\t\t}\n\n\t\tbreak\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\n\t\"github.com\/sapcc\/kubernikus\/pkg\/api\/client\/operations\"\n)\n\nfunc (s *E2ETestSuite) SetupSmokeTest() {\n\t\/\/ double check if cluster is ready for smoke test or exit\n\ts.isClusterUpOrWait()\n\ts.TestSetupKubernikusCtl()\n\ts.createClientset()\n\ts.getReadyPods()\n\ts.getReadyNodes()\n\ts.isClusterBigEnoughForSmokeTest()\n\ts.cleanUp()\n\ts.createPods()\n\ts.createServices()\n}\n\nfunc (s *E2ETestSuite) RunSmokeTest() {\n\tif s.IsTestNetwork || s.IsTestSmoke || s.IsTestAll {\n\t\ts.TestPod2PodCommunication()\n\t}\n\n\tif s.IsTestVolume || s.IsTestSmoke || s.IsTestAll {\n\t\ts.TestAttachVolume()\n\t}\n\n\tlog.Print(\"[passed] smoke tests\")\n}\n\nfunc (s *E2ETestSuite) TestSetupKubernikusCtl() {\n\tlog.Printf(\"Setting up kubernikusctl\")\n\t\/\/ get kluster info which contains the URL to the kubernikusctl binary and download it\n\tif runtime.GOOS == \"darwin\" {\n\t\tlog.Println(\"Detected dev machine. skipping kubernikusctl download. make sure 'kubernikusctl' is installed.\")\n\t} else {\n\t\tif err := s.getKubernikusctlBinary(); err != nil {\n\t\t\ts.handleError(fmt.Errorf(\"[failure] could not get kubernikusctl. reason: %v\", err))\n\t\t}\n\t}\n\t\/\/ auth init to get kubeconfig for kluster\n\tif err := s.initKubernikusctl(); err != nil {\n\t\ts.handleError(fmt.Errorf(\"[failure] could not 'kubernikusctl auth init'. reason: %v\", err))\n\t}\n}\n\nfunc (s *E2ETestSuite) getKubernikusctlBinary() error {\n\tlog.Printf(\"getting info for kluster %s to obtain link to kubernikusctl\", s.ClusterName)\n\tinfo, err := s.kubernikusClient.Operations.GetClusterInfo(operations.NewGetClusterInfoParams().WithName(s.ClusterName), s.authFunc())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, b := range info.Payload.Binaries {\n\t\tif b.Name == \"kubernikusctl\" {\n\t\t\tfor _, l := range b.Links {\n\t\t\t\tif l.Platform == runtime.GOOS {\n\t\t\t\t\tfilePath := fmt.Sprintf(\"%s\/%s\", PathBin, KubernikusctlBinaryName)\n\n\t\t\t\t\tp := PathBin + \":\" + os.Getenv(\"PATH\")\n\t\t\t\t\tif err := os.Setenv(\"PATH\", p); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tlog.Printf(\"updated PATH=%v\", os.Getenv(\"PATH\"))\n\n\t\t\t\t\tlog.Printf(\"Downloading %s\", l.Link)\n\t\t\t\t\tresp, err := http.Get(l.Link)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tdefer resp.Body.Close()\n\n\t\t\t\t\tfOut, err := os.Create(filePath)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tdefer fOut.Close()\n\n\t\t\t\t\t_, err = io.Copy(fOut, resp.Body)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\tif err := fOut.Chmod(0777); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ make sure the file is closed before using it\n\t\t\t\t\tfOut.Close()\n\n\t\t\t\t\tpath, err := exec.LookPath(KubernikusctlBinaryName)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tlog.Printf(\"found %s\", path)\n\n\t\t\t\t\t_, err = RunKubernikusctlHostCmd(\"--help\")\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn fmt.Errorf(\"no link for kubernikusctl binary found for os: %v\", runtime.GOOS)\n}\n\nfunc (s *E2ETestSuite) initKubernikusctl() error {\n\t\/\/ gets auth params from env\n\tout, err := RunKubernikusctlHostCmd(\"auth\", \"init\")\n\tlog.Println(out)\n\treturn err\n}\n\nfunc (s *E2ETestSuite) TestAttachVolume() {\n\tlog.Printf(\"testing persistent volume attachment\")\n\ts.createPVCForPod()\n\ts.createPodWithMount()\n\ts.writeFileToMountedVolume()\n\ts.readFileFromMountedVolume()\n}\n\nfunc (s *E2ETestSuite) TestPod2PodCommunication() {\n\tlog.Print(\"testing network\")\n\n\tlog.Print(\"step 1: testing pod to pod\")\n\tfor _, source := range s.readyPods {\n\t\tfor _, target := range s.readyPods {\n\t\t\tselect {\n\t\t\tdefault:\n\t\t\t\ts.dialPodIP(&source, &target)\n\t\t\tcase <-s.stopCh:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.Printf(\"step 2: testing pod to service IP\")\n\tfor _, source := range s.readyPods {\n\t\tfor _, target := range s.readyServices {\n\t\t\tselect {\n\t\t\tdefault:\n\t\t\t\ts.dialServiceIP(&source, &target)\n\t\t\tcase <-s.stopCh:\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.Printf(\"step 3: testing pod to service name\")\n\tfor _, source := range s.readyPods {\n\t\tfor _, target := range s.readyServices {\n\t\t\tselect {\n\t\t\tdefault:\n\t\t\t\ts.dialServiceName(&source, &target)\n\t\t\tcase <-s.stopCh:\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.Print(\"[network test done]\")\n}\n\nfunc (s *E2ETestSuite) dialPodIP(source *v1.Pod, target *v1.Pod) {\n\t_, err := s.dial(source, target.Status.PodIP, NginxPort)\n\tresult := \"success\"\n\tif err != nil {\n\t\tresult = \"failure\"\n\t}\n\n\tresultMsg := fmt.Sprintf(\"[%v] node\/%-15v --> node\/%-15v pod\/%-15v --> pod\/%-15v\\n\",\n\t\tresult,\n\t\tsource.Spec.NodeName,\n\t\ttarget.Spec.NodeName,\n\t\tsource.Status.PodIP,\n\t\ttarget.Status.PodIP,\n\t)\n\n\tif result == \"failure\" {\n\t\ts.handleError(fmt.Errorf(\"%v \\n error: %#v\", resultMsg, err))\n\t} else {\n\t\tfmt.Printf(resultMsg)\n\t}\n}\n\nfunc (s *E2ETestSuite) dialServiceIP(source *v1.Pod, target *v1.Service) {\n\t_, err := s.dial(source, target.Spec.ClusterIP, NginxPort)\n\tresult := \"success\"\n\tif err != nil {\n\t\tresult = \"failure\"\n\t}\n\tresultMsg := fmt.Sprintf(\"[%v] node\/%-15v --> node\/%-15v pod\/%-15v --> svc\/%-15v\\n\",\n\t\tresult,\n\t\tsource.Spec.NodeName,\n\t\ttarget.Labels[\"nodeName\"],\n\t\tsource.Status.PodIP,\n\t\ttarget.Spec.ClusterIP,\n\t)\n\n\tif result == \"failure\" {\n\t\ts.handleError(fmt.Errorf(\"%v \\n error: %#v\", resultMsg, err))\n\t} else {\n\t\tfmt.Printf(resultMsg)\n\t}\n}\n\nfunc (s *E2ETestSuite) dialServiceName(source *v1.Pod, target *v1.Service) {\n\t_, err := s.dial(source, fmt.Sprintf(\"%s.%s.svc\", target.GetName(), target.GetNamespace()), NginxPort)\n\tresult := \"success\"\n\tif err != nil {\n\t\tresult = \"failure\"\n\t}\n\tresultMsg := fmt.Sprintf(\"[%v] node\/%-15v --> node\/%-15v pod\/%-15v --> svc\/%-15v\\n\",\n\t\tresult,\n\t\tsource.Spec.NodeName,\n\t\ttarget.Labels[\"nodeName\"],\n\t\tsource.Status.PodIP,\n\t\ttarget.Spec.ClusterIP,\n\t)\n\n\tif result == \"failure\" {\n\t\ts.handleError(fmt.Errorf(\"%v \\n error: %#v\", resultMsg, err))\n\t} else {\n\t\tfmt.Printf(resultMsg)\n\t}\n}\n\nfunc (s *E2ETestSuite) dial(sourcePod *v1.Pod, targetIP string, targetPort int32) (string, error) {\n\tcmd := fmt.Sprintf(\"wget --timeout=%v -O - http:\/\/%v:%v\", TimeoutWGET, targetIP, targetPort)\n\treturn RunKubectlHostCmd(sourcePod.GetNamespace(), sourcePod.GetName(), cmd)\n}\n\nfunc (s *E2ETestSuite) writeFileToMountedVolume() {\n\tcmd := fmt.Sprintf(\"echo hase > %v\/myfile\", PVCMountPath)\n\t_, err := RunKubectlHostCmd(Namespace, PVCName, cmd)\n\tresult := \"success\"\n\tif err != nil {\n\t\tresult = \"failure\"\n\t}\n\tresultMsg := fmt.Sprintf(\"[%v] writing file %v\/myfile\", result, PVCMountPath)\n\n\tif result == \"failure\" {\n\t\ts.handleError(fmt.Errorf(\"%v \\n error: %#v\", resultMsg, err))\n\t} else if result != \"\" || result != `stderr: \"\"` {\n\t\tlog.Println(resultMsg)\n\t}\n}\n\nfunc (s *E2ETestSuite) readFileFromMountedVolume() {\n\tcmd := fmt.Sprintf(\"cat %v\/myfile\", PVCMountPath)\n\t_, err := RunKubectlHostCmd(Namespace, PVCName, cmd)\n\tresult := \"success\"\n\tif err != nil {\n\t\tresult = \"failure\"\n\t}\n\tresultMsg := fmt.Sprintf(\"[%v] reading file %v\/myfile\", result, PVCMountPath)\n\n\tif result == \"failure\" {\n\t\ts.handleError(fmt.Errorf(\"%v \\n error: %#v\", resultMsg, err))\n\t} else if result != \"\" || result != `stderr: \"\"` {\n\t\tlog.Println(resultMsg)\n\t}\n}\n<commit_msg>wget shouldn’t retry during smoke tests<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\n\t\"github.com\/sapcc\/kubernikus\/pkg\/api\/client\/operations\"\n)\n\nfunc (s *E2ETestSuite) SetupSmokeTest() {\n\t\/\/ double check if cluster is ready for smoke test or exit\n\ts.isClusterUpOrWait()\n\ts.TestSetupKubernikusCtl()\n\ts.createClientset()\n\ts.getReadyPods()\n\ts.getReadyNodes()\n\ts.isClusterBigEnoughForSmokeTest()\n\ts.cleanUp()\n\ts.createPods()\n\ts.createServices()\n}\n\nfunc (s *E2ETestSuite) RunSmokeTest() {\n\tif s.IsTestNetwork || s.IsTestSmoke || s.IsTestAll {\n\t\ts.TestPod2PodCommunication()\n\t}\n\n\tif s.IsTestVolume || s.IsTestSmoke || s.IsTestAll {\n\t\ts.TestAttachVolume()\n\t}\n\n\tlog.Print(\"[passed] smoke tests\")\n}\n\nfunc (s *E2ETestSuite) TestSetupKubernikusCtl() {\n\tlog.Printf(\"Setting up kubernikusctl\")\n\t\/\/ get kluster info which contains the URL to the kubernikusctl binary and download it\n\tif runtime.GOOS == \"darwin\" {\n\t\tlog.Println(\"Detected dev machine. skipping kubernikusctl download. make sure 'kubernikusctl' is installed.\")\n\t} else {\n\t\tif err := s.getKubernikusctlBinary(); err != nil {\n\t\t\ts.handleError(fmt.Errorf(\"[failure] could not get kubernikusctl. reason: %v\", err))\n\t\t}\n\t}\n\t\/\/ auth init to get kubeconfig for kluster\n\tif err := s.initKubernikusctl(); err != nil {\n\t\ts.handleError(fmt.Errorf(\"[failure] could not 'kubernikusctl auth init'. reason: %v\", err))\n\t}\n}\n\nfunc (s *E2ETestSuite) getKubernikusctlBinary() error {\n\tlog.Printf(\"getting info for kluster %s to obtain link to kubernikusctl\", s.ClusterName)\n\tinfo, err := s.kubernikusClient.Operations.GetClusterInfo(operations.NewGetClusterInfoParams().WithName(s.ClusterName), s.authFunc())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, b := range info.Payload.Binaries {\n\t\tif b.Name == \"kubernikusctl\" {\n\t\t\tfor _, l := range b.Links {\n\t\t\t\tif l.Platform == runtime.GOOS {\n\t\t\t\t\tfilePath := fmt.Sprintf(\"%s\/%s\", PathBin, KubernikusctlBinaryName)\n\n\t\t\t\t\tp := PathBin + \":\" + os.Getenv(\"PATH\")\n\t\t\t\t\tif err := os.Setenv(\"PATH\", p); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tlog.Printf(\"updated PATH=%v\", os.Getenv(\"PATH\"))\n\n\t\t\t\t\tlog.Printf(\"Downloading %s\", l.Link)\n\t\t\t\t\tresp, err := http.Get(l.Link)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tdefer resp.Body.Close()\n\n\t\t\t\t\tfOut, err := os.Create(filePath)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tdefer fOut.Close()\n\n\t\t\t\t\t_, err = io.Copy(fOut, resp.Body)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\tif err := fOut.Chmod(0777); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ make sure the file is closed before using it\n\t\t\t\t\tfOut.Close()\n\n\t\t\t\t\tpath, err := exec.LookPath(KubernikusctlBinaryName)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tlog.Printf(\"found %s\", path)\n\n\t\t\t\t\t_, err = RunKubernikusctlHostCmd(\"--help\")\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn fmt.Errorf(\"no link for kubernikusctl binary found for os: %v\", runtime.GOOS)\n}\n\nfunc (s *E2ETestSuite) initKubernikusctl() error {\n\t\/\/ gets auth params from env\n\tout, err := RunKubernikusctlHostCmd(\"auth\", \"init\")\n\tlog.Println(out)\n\treturn err\n}\n\nfunc (s *E2ETestSuite) TestAttachVolume() {\n\tlog.Printf(\"testing persistent volume attachment\")\n\ts.createPVCForPod()\n\ts.createPodWithMount()\n\ts.writeFileToMountedVolume()\n\ts.readFileFromMountedVolume()\n}\n\nfunc (s *E2ETestSuite) TestPod2PodCommunication() {\n\tlog.Print(\"testing network\")\n\n\tlog.Print(\"step 1: testing pod to pod\")\n\tfor _, source := range s.readyPods {\n\t\tfor _, target := range s.readyPods {\n\t\t\tselect {\n\t\t\tdefault:\n\t\t\t\ts.dialPodIP(&source, &target)\n\t\t\tcase <-s.stopCh:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.Printf(\"step 2: testing pod to service IP\")\n\tfor _, source := range s.readyPods {\n\t\tfor _, target := range s.readyServices {\n\t\t\tselect {\n\t\t\tdefault:\n\t\t\t\ts.dialServiceIP(&source, &target)\n\t\t\tcase <-s.stopCh:\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.Printf(\"step 3: testing pod to service name\")\n\tfor _, source := range s.readyPods {\n\t\tfor _, target := range s.readyServices {\n\t\t\tselect {\n\t\t\tdefault:\n\t\t\t\ts.dialServiceName(&source, &target)\n\t\t\tcase <-s.stopCh:\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.Print(\"[network test done]\")\n}\n\nfunc (s *E2ETestSuite) dialPodIP(source *v1.Pod, target *v1.Pod) {\n\t_, err := s.dial(source, target.Status.PodIP, NginxPort)\n\tresult := \"success\"\n\tif err != nil {\n\t\tresult = \"failure\"\n\t}\n\n\tresultMsg := fmt.Sprintf(\"[%v] node\/%-15v --> node\/%-15v pod\/%-15v --> pod\/%-15v\\n\",\n\t\tresult,\n\t\tsource.Spec.NodeName,\n\t\ttarget.Spec.NodeName,\n\t\tsource.Status.PodIP,\n\t\ttarget.Status.PodIP,\n\t)\n\n\tif result == \"failure\" {\n\t\ts.handleError(fmt.Errorf(\"%v \\n error: %#v\", resultMsg, err))\n\t} else {\n\t\tfmt.Printf(resultMsg)\n\t}\n}\n\nfunc (s *E2ETestSuite) dialServiceIP(source *v1.Pod, target *v1.Service) {\n\t_, err := s.dial(source, target.Spec.ClusterIP, NginxPort)\n\tresult := \"success\"\n\tif err != nil {\n\t\tresult = \"failure\"\n\t}\n\tresultMsg := fmt.Sprintf(\"[%v] node\/%-15v --> node\/%-15v pod\/%-15v --> svc\/%-15v\\n\",\n\t\tresult,\n\t\tsource.Spec.NodeName,\n\t\ttarget.Labels[\"nodeName\"],\n\t\tsource.Status.PodIP,\n\t\ttarget.Spec.ClusterIP,\n\t)\n\n\tif result == \"failure\" {\n\t\ts.handleError(fmt.Errorf(\"%v \\n error: %#v\", resultMsg, err))\n\t} else {\n\t\tfmt.Printf(resultMsg)\n\t}\n}\n\nfunc (s *E2ETestSuite) dialServiceName(source *v1.Pod, target *v1.Service) {\n\t_, err := s.dial(source, fmt.Sprintf(\"%s.%s.svc\", target.GetName(), target.GetNamespace()), NginxPort)\n\tresult := \"success\"\n\tif err != nil {\n\t\tresult = \"failure\"\n\t}\n\tresultMsg := fmt.Sprintf(\"[%v] node\/%-15v --> node\/%-15v pod\/%-15v --> svc\/%-15v\\n\",\n\t\tresult,\n\t\tsource.Spec.NodeName,\n\t\ttarget.Labels[\"nodeName\"],\n\t\tsource.Status.PodIP,\n\t\ttarget.Spec.ClusterIP,\n\t)\n\n\tif result == \"failure\" {\n\t\ts.handleError(fmt.Errorf(\"%v \\n error: %#v\", resultMsg, err))\n\t} else {\n\t\tfmt.Printf(resultMsg)\n\t}\n}\n\nfunc (s *E2ETestSuite) dial(sourcePod *v1.Pod, targetIP string, targetPort int32) (string, error) {\n\tcmd := fmt.Sprintf(\"wget --tries=1 --timeout=%v -O - http:\/\/%v:%v\", TimeoutWGET, targetIP, targetPort)\n\treturn RunKubectlHostCmd(sourcePod.GetNamespace(), sourcePod.GetName(), cmd)\n}\n\nfunc (s *E2ETestSuite) writeFileToMountedVolume() {\n\tcmd := fmt.Sprintf(\"echo hase > %v\/myfile\", PVCMountPath)\n\t_, err := RunKubectlHostCmd(Namespace, PVCName, cmd)\n\tresult := \"success\"\n\tif err != nil {\n\t\tresult = \"failure\"\n\t}\n\tresultMsg := fmt.Sprintf(\"[%v] writing file %v\/myfile\", result, PVCMountPath)\n\n\tif result == \"failure\" {\n\t\ts.handleError(fmt.Errorf(\"%v \\n error: %#v\", resultMsg, err))\n\t} else if result != \"\" || result != `stderr: \"\"` {\n\t\tlog.Println(resultMsg)\n\t}\n}\n\nfunc (s *E2ETestSuite) readFileFromMountedVolume() {\n\tcmd := fmt.Sprintf(\"cat %v\/myfile\", PVCMountPath)\n\t_, err := RunKubectlHostCmd(Namespace, PVCName, cmd)\n\tresult := \"success\"\n\tif err != nil {\n\t\tresult = \"failure\"\n\t}\n\tresultMsg := fmt.Sprintf(\"[%v] reading file %v\/myfile\", result, PVCMountPath)\n\n\tif result == \"failure\" {\n\t\ts.handleError(fmt.Errorf(\"%v \\n error: %#v\", resultMsg, err))\n\t} else if result != \"\" || result != `stderr: \"\"` {\n\t\tlog.Println(resultMsg)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package putio is the Put.io API v2 client for Go.\n\/\/\n\/\/ The go-putio package does not directly handle authentication. Instead, when\n\/\/ creating a new client, pass an http.Client that can handle authentication for\n\/\/ you. The easiest and recommended way to do this is using the golang.org\/x\/oauth2\n\/\/ library, but you can always use any other library that provides an http.Client.\n\/\/ If you have an OAuth2 access token (for example, a personal API token), you can\n\/\/ use it with the oauth2 library using:\n\/\/ \timport \"golang.org\/x\/oauth2\"\n\/\/ \tfunc main() {\n\/\/ \t\ttokenSource := oauth2.StaticTokenSource(\n\/\/ \t\t\t&oauth2.Token{AccessToken: \"<YOUR-TOKEN-HERE>\"},\n\/\/ \t\t)\n\/\/ \t\toauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource)\n\/\/ \t\tclient := putio.NewClient(oauthClient)\n\/\/ \t\t\/\/ get root directory\n\/\/ \t\troot, err := client.Files.Get(0)\n\/\/ \t}\n\/\/ Note that when using an authenticated Client, all calls made by the client will\n\/\/ include the specified OAuth token. Therefore, authenticated clients should\n\/\/ almost never be shared between different users.\npackage putio\n<commit_msg>doc: add a new line between import line and main function<commit_after>\/\/ Package putio is the Put.io API v2 client for Go.\n\/\/\n\/\/ The go-putio package does not directly handle authentication. Instead, when\n\/\/ creating a new client, pass an http.Client that can handle authentication for\n\/\/ you. The easiest and recommended way to do this is using the golang.org\/x\/oauth2\n\/\/ library, but you can always use any other library that provides an http.Client.\n\/\/ If you have an OAuth2 access token (for example, a personal API token), you can\n\/\/ use it with the oauth2 library using:\n\/\/ \timport \"golang.org\/x\/oauth2\"\n\/\/\n\/\/ \tfunc main() {\n\/\/ \t\ttokenSource := oauth2.StaticTokenSource(\n\/\/ \t\t\t&oauth2.Token{AccessToken: \"<YOUR-TOKEN-HERE>\"},\n\/\/ \t\t)\n\/\/ \t\toauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource)\n\/\/ \t\tclient := putio.NewClient(oauthClient)\n\/\/ \t\t\/\/ get root directory\n\/\/ \t\troot, err := client.Files.Get(0)\n\/\/ \t}\n\/\/ Note that when using an authenticated Client, all calls made by the client will\n\/\/ include the specified OAuth token. Therefore, authenticated clients should\n\/\/ almost never be shared between different users.\npackage putio\n<|endoftext|>"} {"text":"<commit_before>package py\n\n\/*\n#include \"Python.h\"\n\nchar const* getErrString()\n{\n PyObject *dummy, *o, *dummy2;\n PyErr_Fetch(&dummy, &o, &dummy2);\n return PyString_AsString(o);\n}\n*\/\nimport \"C\"\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"pfi\/sensorbee\/sensorbee\/data\"\n\t\"runtime\"\n\t\"unsafe\"\n)\n\nfunc getErrString() string {\n\ts := C.getErrString()\n\treturn C.GoString(s)\n}\n\n\/\/ ObjectFunc is a bind of `PyObject` used as `PyFunc`\ntype ObjectFunc struct {\n\tObject\n}\n\n\/\/ invoke name's function. TODO should be placed at internal package.\nfunc invoke(pyObj *C.PyObject, name string, args []data.Value) (data.Value, error) {\n\ttype Result struct {\n\t\tval data.Value\n\t\terr error\n\t}\n\tch := make(chan *Result, 1)\n\tgo func() {\n\t\truntime.LockOSThread()\n\t\tstate := GILState_Ensure()\n\t\tdefer GILState_Release(state)\n\n\t\tvar res data.Value\n\t\tpyFunc, err := getPyFunc(pyObj, name)\n\t\tif err != nil {\n\t\t\tch <- &Result{res, fmt.Errorf(\"%v at '%v'\", err.Error(), name)}\n\t\t\treturn\n\t\t}\n\t\tdefer pyFunc.decRef()\n\n\t\tpyArg, err := convertArgsGo2Py(args)\n\t\tif err != nil {\n\t\t\tch <- &Result{res, fmt.Errorf(\"%v at '%v'\", err.Error(), name)}\n\t\t\treturn\n\t\t}\n\t\tdefer pyArg.decRef()\n\n\t\tret, err := pyFunc.callObject(pyArg)\n\t\tif err != nil {\n\t\t\tch <- &Result{res, fmt.Errorf(\"%v in '%v'\", err.Error(), name)}\n\t\t\treturn\n\t\t}\n\t\tdefer ret.decRef()\n\n\t\tpo, err := fromPyTypeObject(ret.p)\n\t\tch <- &Result{po, err}\n\t}()\n\tres := <-ch\n\n\treturn res.val, res.err\n}\n\n\/\/ TODO should be placed at internal package\nfunc getPyFunc(pyObj *C.PyObject, name string) (ObjectFunc, error) {\n\tcFunc := C.CString(name)\n\tdefer C.free(unsafe.Pointer(cFunc))\n\n\tpyFunc := C.PyObject_GetAttrString(pyObj, cFunc)\n\tif pyFunc == nil {\n\t\treturn ObjectFunc{}, errors.New(\"cannot load function\")\n\t}\n\n\tif ok := C.PyCallable_Check(pyFunc); ok == 0 {\n\t\treturn ObjectFunc{}, errors.New(\"cannot call function\")\n\t}\n\n\treturn ObjectFunc{Object{p: pyFunc}}, nil\n}\n\n\/\/ callObject executes python function, using `PyObject_CallObject`. Returns a\n\/\/ `PyObject` even if result values are more thane one. When a value will be set\n\/\/ directory, and values will be set as a `PyTuple` object.\nfunc (f *ObjectFunc) callObject(arg Object) (po Object, err error) {\n\tpo = Object{\n\t\tp: C.PyObject_CallObject(f.p, arg.p),\n\t}\n\tif po.p == nil {\n\t\tpyerr := getErrString()\n\t\terr = fmt.Errorf(\"calling python function failed: %v\", pyerr)\n\t}\n\treturn\n}\n\nfunc convertArgsGo2Py(args []data.Value) (Object, error) {\n\tpyArg := C.PyTuple_New(C.Py_ssize_t(len(args)))\n\tshouldDecRef := true\n\tdefer func() {\n\t\tif shouldDecRef {\n\t\t\tC.Py_DecRef(pyArg)\n\t\t}\n\t}()\n\tfor i, v := range args {\n\t\to, err := newPyObj(v)\n\t\tif err != nil {\n\t\t\treturn Object{}, err\n\t\t}\n\t\t\/\/ PyTuple object takes over the value's reference, and not need to\n\t\t\/\/ decrease reference counter.\n\t\tC.PyTuple_SetItem(pyArg, C.Py_ssize_t(i), o.p)\n\t}\n\tshouldDecRef = false\n\treturn Object{pyArg}, nil\n}\n<commit_msg>Add TODO comment.<commit_after>package py\n\n\/*\n#include \"Python.h\"\n\nchar const* getErrString()\n{\n \/\/ TODO: consider to use dummy\/dummy2. They may contain useful information for users.\n PyObject *dummy, *o, *dummy2;\n PyErr_Fetch(&dummy, &o, &dummy2);\n return PyString_AsString(o);\n}\n*\/\nimport \"C\"\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"pfi\/sensorbee\/sensorbee\/data\"\n\t\"runtime\"\n\t\"unsafe\"\n)\n\nfunc getErrString() string {\n\ts := C.getErrString()\n\treturn C.GoString(s)\n}\n\n\/\/ ObjectFunc is a bind of `PyObject` used as `PyFunc`\ntype ObjectFunc struct {\n\tObject\n}\n\n\/\/ invoke name's function. TODO should be placed at internal package.\nfunc invoke(pyObj *C.PyObject, name string, args []data.Value) (data.Value, error) {\n\ttype Result struct {\n\t\tval data.Value\n\t\terr error\n\t}\n\tch := make(chan *Result, 1)\n\tgo func() {\n\t\truntime.LockOSThread()\n\t\tstate := GILState_Ensure()\n\t\tdefer GILState_Release(state)\n\n\t\tvar res data.Value\n\t\tpyFunc, err := getPyFunc(pyObj, name)\n\t\tif err != nil {\n\t\t\tch <- &Result{res, fmt.Errorf(\"%v at '%v'\", err.Error(), name)}\n\t\t\treturn\n\t\t}\n\t\tdefer pyFunc.decRef()\n\n\t\tpyArg, err := convertArgsGo2Py(args)\n\t\tif err != nil {\n\t\t\tch <- &Result{res, fmt.Errorf(\"%v at '%v'\", err.Error(), name)}\n\t\t\treturn\n\t\t}\n\t\tdefer pyArg.decRef()\n\n\t\tret, err := pyFunc.callObject(pyArg)\n\t\tif err != nil {\n\t\t\tch <- &Result{res, fmt.Errorf(\"%v in '%v'\", err.Error(), name)}\n\t\t\treturn\n\t\t}\n\t\tdefer ret.decRef()\n\n\t\tpo, err := fromPyTypeObject(ret.p)\n\t\tch <- &Result{po, err}\n\t}()\n\tres := <-ch\n\n\treturn res.val, res.err\n}\n\n\/\/ TODO should be placed at internal package\nfunc getPyFunc(pyObj *C.PyObject, name string) (ObjectFunc, error) {\n\tcFunc := C.CString(name)\n\tdefer C.free(unsafe.Pointer(cFunc))\n\n\tpyFunc := C.PyObject_GetAttrString(pyObj, cFunc)\n\tif pyFunc == nil {\n\t\treturn ObjectFunc{}, errors.New(\"cannot load function\")\n\t}\n\n\tif ok := C.PyCallable_Check(pyFunc); ok == 0 {\n\t\treturn ObjectFunc{}, errors.New(\"cannot call function\")\n\t}\n\n\treturn ObjectFunc{Object{p: pyFunc}}, nil\n}\n\n\/\/ callObject executes python function, using `PyObject_CallObject`. Returns a\n\/\/ `PyObject` even if result values are more thane one. When a value will be set\n\/\/ directory, and values will be set as a `PyTuple` object.\nfunc (f *ObjectFunc) callObject(arg Object) (po Object, err error) {\n\tpo = Object{\n\t\tp: C.PyObject_CallObject(f.p, arg.p),\n\t}\n\tif po.p == nil {\n\t\tpyerr := getErrString()\n\t\terr = fmt.Errorf(\"calling python function failed: %v\", pyerr)\n\t}\n\treturn\n}\n\nfunc convertArgsGo2Py(args []data.Value) (Object, error) {\n\tpyArg := C.PyTuple_New(C.Py_ssize_t(len(args)))\n\tshouldDecRef := true\n\tdefer func() {\n\t\tif shouldDecRef {\n\t\t\tC.Py_DecRef(pyArg)\n\t\t}\n\t}()\n\tfor i, v := range args {\n\t\to, err := newPyObj(v)\n\t\tif err != nil {\n\t\t\treturn Object{}, err\n\t\t}\n\t\t\/\/ PyTuple object takes over the value's reference, and not need to\n\t\t\/\/ decrease reference counter.\n\t\tC.PyTuple_SetItem(pyArg, C.Py_ssize_t(i), o.p)\n\t}\n\tshouldDecRef = false\n\treturn Object{pyArg}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"reflect\"\n\t\"strconv\"\n\t\"unsafe\"\n\n\t\"github.com\/STNS\/STNS\/attribute\"\n)\n\n\/*\n#include <grp.h>\n#include <sys\/types.h>\n*\/\nimport \"C\"\n\nvar groupList = attribute.UserGroups{}\nvar groupReadPos int\n\ntype Group struct {\n\tgrp *C.struct_group\n\tresult **C.struct_group\n}\n\nfunc (self Group) setCStruct(groups attribute.UserGroups) int {\n\tfor n, g := range groups {\n\t\tif g.Id != 0 {\n\t\t\tself.grp.gr_gid = C.__gid_t(g.Id)\n\t\t\tself.grp.gr_name = C.CString(n)\n\t\t\tself.grp.gr_passwd = C.CString(\"x\")\n\n\t\t\tif g.Group != nil && !reflect.ValueOf(g.Group).IsNil() {\n\t\t\t\twork := make([]*C.char, len(g.Users)+1)\n\t\t\t\tif len(g.Users) > 0 {\n\t\t\t\t\tfor i, u := range g.Users {\n\t\t\t\t\t\twork[i] = C.CString(u)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tself.grp.gr_mem = (**C.char)(unsafe.Pointer(&work[0]))\n\t\t\t} else {\n\t\t\t\tself.grp.gr_mem = nil\n\t\t\t}\n\n\t\t\tself.result = &self.grp\n\t\t\treturn NSS_STATUS_SUCCESS\n\t\t}\n\t}\n\treturn NSS_STATUS_NOTFOUND\n}\n\n\/*-------------------------------------------------------\ngroup\n-------------------------------------------------------*\/\n\/\/export _nss_stns_getgrnam_r\nfunc _nss_stns_getgrnam_r(name *C.char, grp *C.struct_group, buffer *C.char, bufsize C.size_t, result **C.struct_group) int {\n\treturn setResource(&Group{grp, result}, \"group\", \"name\", C.GoString(name))\n}\n\n\/\/export _nss_stns_getgrgid_r\nfunc _nss_stns_getgrgid_r(gid C.__gid_t, grp *C.struct_group, buffer *C.char, bufsize C.size_t, result **C.struct_group) int {\n\treturn setResource(&Group{grp, result}, \"group\", \"id\", strconv.Itoa(int(gid)))\n}\n\n\/\/export _nss_stns_setgrent\nfunc _nss_stns_setgrent() {\n\tsetList(\"group\", groupList, &groupReadPos)\n}\n\n\/\/export _nss_stns_endgrent\nfunc _nss_stns_endgrent() {\n\tresetList(groupList, &groupReadPos)\n}\n\n\/\/export _nss_stns_getgrent_r\nfunc _nss_stns_getgrent_r(grp *C.struct_group, buffer *C.char, bufsize C.size_t, result **C.struct_group) int {\n\treturn setNextResource(&Group{grp, result}, groupList, &groupReadPos)\n}\n<commit_msg>fix gr_mem null value<commit_after>package main\n\nimport (\n\t\"reflect\"\n\t\"strconv\"\n\t\"unsafe\"\n\n\t\"github.com\/STNS\/STNS\/attribute\"\n)\n\n\/*\n#include <grp.h>\n#include <sys\/types.h>\n*\/\nimport \"C\"\n\nvar groupList = attribute.UserGroups{}\nvar groupReadPos int\n\ntype Group struct {\n\tgrp *C.struct_group\n\tresult **C.struct_group\n}\n\nfunc (self Group) setCStruct(groups attribute.UserGroups) int {\n\tfor n, g := range groups {\n\t\tif g.Id != 0 {\n\t\t\tself.grp.gr_gid = C.__gid_t(g.Id)\n\t\t\tself.grp.gr_name = C.CString(n)\n\t\t\tself.grp.gr_passwd = C.CString(\"x\")\n\n\t\t\tif g.Group != nil && !reflect.ValueOf(g.Group).IsNil() {\n\t\t\t\twork := make([]*C.char, len(g.Users)+1)\n\t\t\t\tif len(g.Users) > 0 {\n\t\t\t\t\tfor i, u := range g.Users {\n\t\t\t\t\t\twork[i] = C.CString(u)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tself.grp.gr_mem = (**C.char)(unsafe.Pointer(&work[0]))\n\t\t\t} else {\n\t\t\t\twork := make([]*C.char, 1)\n\t\t\t\tself.grp.gr_mem = (**C.char)(unsafe.Pointer(&work[0]))\n\t\t\t}\n\n\t\t\tself.result = &self.grp\n\t\t\treturn NSS_STATUS_SUCCESS\n\t\t}\n\t}\n\treturn NSS_STATUS_NOTFOUND\n}\n\n\/*-------------------------------------------------------\ngroup\n-------------------------------------------------------*\/\n\/\/export _nss_stns_getgrnam_r\nfunc _nss_stns_getgrnam_r(name *C.char, grp *C.struct_group, buffer *C.char, bufsize C.size_t, result **C.struct_group) int {\n\treturn setResource(&Group{grp, result}, \"group\", \"name\", C.GoString(name))\n}\n\n\/\/export _nss_stns_getgrgid_r\nfunc _nss_stns_getgrgid_r(gid C.__gid_t, grp *C.struct_group, buffer *C.char, bufsize C.size_t, result **C.struct_group) int {\n\treturn setResource(&Group{grp, result}, \"group\", \"id\", strconv.Itoa(int(gid)))\n}\n\n\/\/export _nss_stns_setgrent\nfunc _nss_stns_setgrent() {\n\tsetList(\"group\", groupList, &groupReadPos)\n}\n\n\/\/export _nss_stns_endgrent\nfunc _nss_stns_endgrent() {\n\tresetList(groupList, &groupReadPos)\n}\n\n\/\/export _nss_stns_getgrent_r\nfunc _nss_stns_getgrent_r(grp *C.struct_group, buffer *C.char, bufsize C.size_t, result **C.struct_group) int {\n\treturn setNextResource(&Group{grp, result}, groupList, &groupReadPos)\n}\n<|endoftext|>"} {"text":"<commit_before>package jira\n\nimport (\n\t\"fmt\"\n)\n\n\/\/ GroupService handles Groups for the JIRA instance \/ API.\n\/\/\n\/\/ JIRA API docs: https:\/\/docs.atlassian.com\/jira\/REST\/server\/#api\/2\/group\ntype GroupService struct {\n\tclient *Client\n}\n\n\/\/ groupMembersResult is only a small wrapper around the Group* methods\n\/\/ to be able to parse the results\ntype groupMembersResult struct {\n\tStartAt int `json:\"startAt\"`\n\tMaxResults int `json:\"maxResults\"`\n\tTotal int `json:\"total\"`\n\tMembers []GroupMember `json:\"values\"`\n}\n\n\/\/ GroupMember reflects a single member of a group\ntype GroupMember struct {\n\tSelf string `json:\"self,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tKey string `json:\"key,omitempty\"`\n\tEmailAddress string `json:\"emailAddress,omitempty\"`\n\tDisplayName string `json:\"displayName,omitempty\"`\n\tActive bool `json:\"active,omitempty\"`\n\tTimeZone string `json:\"timeZone,omitempty\"`\n}\n\n\/\/ Get returns a paginated list of users who are members of the specified group and its subgroups.\n\/\/ Users in the page are ordered by user names.\n\/\/ User of this resource is required to have sysadmin or admin permissions.\n\/\/\n\/\/ JIRA API docs: https:\/\/docs.atlassian.com\/jira\/REST\/server\/#api\/2\/group-getUsersFromGroup\nfunc (s *GroupService) Get(name string) ([]GroupMember, *Response, error) {\n\tapiEndpoint := fmt.Sprintf(\"rest\/api\/2\/group\/member?groupname=%s\", name)\n\treq, err := s.client.NewRequest(\"GET\", apiEndpoint, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tgroup := new(groupMembersResult)\n\tresp, err := s.client.Do(req, &group)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn group.Members, resp, nil\n}\n<commit_msg>revert<commit_after>package jira\n\nimport (\n\t\"fmt\"\n)\n\n\/\/ GroupService handles Groups for the JIRA instance \/ API.\n\/\/\n\/\/ JIRA API docs: https:\/\/docs.atlassian.com\/jira\/REST\/server\/#api\/2\/group\ntype GroupService struct {\n\tclient *Client\n}\n\n\/\/ groupMembersResult is only a small wrapper around the Group* methods\n\/\/ to be able to parse the results\ntype groupMembersResult struct {\n\tStartAt int `json:\"startAt\"`\n\tMaxResults int `json:\"maxResults\"`\n\tTotal int `json:\"total\"`\n\tMembers []GroupMember `json:\"values\"`\n}\n\n\/\/ GroupMember reflects a single member of a group\ntype GroupMember struct {\n\tSelf string `json:\"self,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tKey string `json:\"key,omitempty\"`\n\tEmailAddress string `json:\"emailAddress,omitempty\"`\n\tDisplayName string `json:\"displayName,omitempty\"`\n\tActive bool `json:\"active,omitempty\"`\n\tTimeZone string `json:\"timeZone,omitempty\"`\n}\n\n\/\/ Get returns a paginated list of users who are members of the specified group and its subgroups.\n\/\/ Users in the page are ordered by user names.\n\/\/ User of this resource is required to have sysadmin or admin permissions.\n\/\/\n\/\/ JIRA API docs: https:\/\/docs.atlassian.com\/jira\/REST\/server\/#api\/2\/group-getUsersFromGroup\nfunc (s *GroupService) Get(name string) ([]GroupMember, *Response, error) {\n\tapiEndpoint := fmt.Sprintf(\"rest\/api\/2\/group\/member?groupname=%s\", name)\n\treq, err := s.client.NewRequest(\"GET\", apiEndpoint, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tgroup := new(groupMembersResult)\n\tresp, err := s.client.Do(req, group)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn group.Members, resp, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strconv\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/coreos-inc\/updatectl\/client\/update\/v1\"\n)\n\nvar (\n\tgroupFlags struct {\n\t\tlabel string\n\t\tchannel string\n\t\tstart int64\n\t\tend int64\n\t\tresolution int64\n\t\tupdateCount int64\n\t\tupdateInterval int64\n\t}\n\n\tcmdListGroups = &Command{\n\t\tName: \"list-groups\",\n\t\tUsage: \"<appId>\",\n\t\tSummary: `List all of the groups that exist including their label, token and update state.`,\n\t\tRun: listGroups,\n\t}\n\tcmdNewGroup = &Command{\n\t\tName: \"new-group\",\n\t\tUsage: \"<appId> <channelId> <groupId> <appLabel>\",\n\t\tSummary: `Create a new group given a label.`,\n\t\tRun: newGroup,\n\t}\n\tcmdDeleteGroup = &Command{\n\t\tName: \"delete-group\",\n\t\tUsage: \"<appId> <groupId>\",\n\t\tSummary: `Delete a group given a token.`,\n\t\tRun: deleteGroup,\n\t}\n\tcmdUpdateGroup = &Command{\n\t\tName: \"update-group\",\n\t\tUsage: \"[OPTION]... <appId> <groupId>\",\n\t\tDescription: `Update an existing group.`,\n\t\tRun: updateGroup,\n\t}\n\tcmdPauseGroup = &Command{\n\t\tName: \"pause-group\",\n\t\tUsage: \"<appId> <groupId>\",\n\t\tSummary: `Pause a group given an id.`,\n\t\tRun: pauseGroup,\n\t}\n\tcmdUnpauseGroup = &Command{\n\t\tName: \"unpause-group\",\n\t\tUsage: \"<appId> <groupId>\",\n\t\tSummary: `Unpause a group given an id.`,\n\t\tRun: unpauseGroup,\n\t}\n\tcmdRollupGroupVersions = &Command{\n\t\tName: \"rollup-group-versions\",\n\t\tUsage: \"[OPTION]... <appId> <groupId>\",\n\t\tSummary: \"Rollup versions from events by time.\",\n\t\tRun: rollupGroupVersions,\n\t}\n\tcmdRollupGroupEvents = &Command{\n\t\tName: \"rollup-group-events\",\n\t\tUsage: \"[OPTION]... <appId> <groupId>\",\n\t\tSummary: \"Rollup events from events by time.\",\n\t\tRun: rollupGroupEvents,\n\t}\n)\n\nfunc init() {\n\tcmdUpdateGroup.Flags.StringVar(&groupFlags.label, \"label\", \"\", \"\")\n\tcmdUpdateGroup.Flags.StringVar(&groupFlags.channel, \"channel\", \"\", \"\")\n\tcmdUpdateGroup.Flags.Int64Var(&groupFlags.updateCount, \"updateCount\", -1, \"Number of instances per interval\")\n\tcmdUpdateGroup.Flags.Int64Var(&groupFlags.updateInterval, \"updateInterval\", -1, \"Interval between updates\")\n\n\tcmdRollupGroupVersions.Flags.Int64Var(&groupFlags.resolution, \"resolution\", 60, \"60, 3600 or 86400 seconds\")\n\tcmdRollupGroupVersions.Flags.Int64Var(&groupFlags.start, \"start\", 0, \"Start date filter\")\n\tcmdRollupGroupVersions.Flags.Int64Var(&groupFlags.end, \"end\", 0, \"End date filter\")\n\n\tcmdRollupGroupEvents.Flags.Int64Var(&groupFlags.resolution, \"resolution\", 60, \"60, 3600 or 86400 seconds\")\n\tcmdRollupGroupEvents.Flags.Int64Var(&groupFlags.start, \"start\", 0, \"Start date filter\")\n\tcmdRollupGroupEvents.Flags.Int64Var(&groupFlags.end, \"end\", 0, \"End date filter\")\n}\n\nfunc formatGroup(group *update.Group) string {\n\treturn fmt.Sprintf(\"%s\\t%s\\t%s\\t%s\\t%s\\t%v\\t%v\\n\", group.Label, group.AppId, group.ChannelId,\n\t\tgroup.Id, strconv.FormatBool(group.UpdatesPaused), group.UpdateCount, group.UpdateInterval)\n}\n\nfunc listGroups(args []string, service *update.Service, out *tabwriter.Writer) int {\n\tif len(args) != 1 {\n\t\treturn ERROR_USAGE\n\t}\n\n\tlistCall := service.Group.List(args[0])\n\tlist, err := listCall.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Fprintln(out, \"Label\\tApp\\tChannel\\tId\\tUpdatesPaused\")\n\tfor _, group := range list.Items {\n\t\tfmt.Fprintf(out, \"%s\", formatGroup(group))\n\t}\n\n\tout.Flush()\n\treturn OK\n}\n\nfunc rollupGroupEvents(args []string, service *update.Service, out *tabwriter.Writer) int {\n\tif len(args) != 2 {\n\t\treturn ERROR_USAGE\n\t}\n\n\tcall := service.Group.Requests.Events.Rollup(args[0], args[1], groupFlags.start, groupFlags.end)\n\tcall.Resolution(groupFlags.resolution)\n\tlist, err := call.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Fprintln(out, \"Version\\tType\\tResult\\tTimestamp\\tCount\")\n\tfor _, i := range list.Items {\n\t\tfor _, j := range i.Values {\n\t\t\tfmt.Fprintf(out, \"%s\\t%s\\t%s\\t%d\\t%d\\n\",\n\t\t\t\ti.Version, i.Type, i.Result, j.Timestamp, j.Count)\n\t\t}\n\t}\n\tout.Flush()\n\treturn OK\n}\n\nfunc rollupGroupVersions(args []string, service *update.Service, out *tabwriter.Writer) int {\n\tif len(args) != 2 {\n\t\treturn ERROR_USAGE\n\t}\n\n\tcall := service.Group.Requests.Versions.Rollup(args[0], args[1], groupFlags.start, groupFlags.end)\n\tcall.Resolution(groupFlags.resolution)\n\tlist, err := call.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Fprintln(out, \"Version\\tTimestamp\\tCount\")\n\tfor _, i := range list.Items {\n\t\tfor _, j := range i.Values {\n\t\t\tfmt.Fprintf(out, \"%s\\t%d\\t%d\\n\",\n\t\t\t\ti.Version, j.Timestamp, j.Count)\n\t\t}\n\t}\n\tout.Flush()\n\treturn OK\n}\n\nfunc newGroup(args []string, service *update.Service, out *tabwriter.Writer) int {\n\tif len(args) != 4 {\n\t\treturn ERROR_USAGE\n\t}\n\tgroup := &update.Group{ChannelId: args[1], Id: args[2], Label: args[3]}\n\tcall := service.Group.Insert(args[0], group)\n\tgroup, err := call.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Fprintf(out, \"%s\", formatGroup(group))\n\n\tout.Flush()\n\treturn OK\n}\n\nfunc deleteGroup(args []string, service *update.Service, out *tabwriter.Writer) int {\n\tif len(args) != 2 {\n\t\treturn ERROR_USAGE\n\t}\n\n\tcall := service.Group.Delete(args[0], args[1])\n\tgroup, err := call.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Fprintf(out, \"%s\", formatGroup(group))\n\n\tout.Flush()\n\treturn OK\n}\n\nfunc pauseGroup(args []string, service *update.Service, out *tabwriter.Writer) int {\n\tif len(args) != 2 {\n\t\treturn ERROR_USAGE\n\t}\n\treturn setUpdatesPaused(args, service, out, true)\n}\n\nfunc unpauseGroup(args []string, service *update.Service, out *tabwriter.Writer) int {\n\tif len(args) != 2 {\n\t\treturn ERROR_USAGE\n\t}\n\treturn setUpdatesPaused(args, service, out, false)\n}\n\n\/\/ Helper function for pause\/unpause-group commands\nfunc setUpdatesPaused(args []string, service *update.Service, out *tabwriter.Writer, paused bool) int {\n\tcall := service.Group.Get(args[0], args[1])\n\tgroup, err := call.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgroup.UpdatesPaused = paused\n\n\tupdateCall := service.Group.Patch(args[0], args[1], group)\n\tgroup, err = updateCall.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Fprintf(out, \"%s\", formatGroup(group))\n\n\tout.Flush()\n\treturn OK\n}\n\nfunc updateGroup(args []string, service *update.Service, out *tabwriter.Writer) int {\n\tif len(args) != 2 {\n\t\treturn ERROR_USAGE\n\t}\n\n\tcall := service.Group.Get(args[0], args[1])\n\tgroup, err := call.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tcheckUpdatePooling := false\n\tif groupFlags.updateCount != -1 {\n\t\tgroup.UpdateCount = groupFlags.updateCount\n\t\tcheckUpdatePooling = true\n\t}\n\tif groupFlags.updateInterval != -1 {\n\t\tgroup.UpdateInterval = groupFlags.updateInterval\n\t\tcheckUpdatePooling = true\n\t}\n\tif groupFlags.label != \"\" {\n\t\tgroup.Label = groupFlags.label\n\t}\n\tif groupFlags.channel != \"\" {\n\t\tgroup.ChannelId = groupFlags.channel\n\t}\n\n\t\/\/ set update pooling based on other flags\n\t\/\/ this only changes if the user changed a value\n\tif checkUpdatePooling {\n\t\tif group.UpdateCount == 0 && group.UpdateInterval == 0 {\n\t\t\tgroup.UpdatePooling = false\n\t\t} else {\n\t\t\tgroup.UpdatePooling = true\n\t\t}\n\t}\n\n\tupdateCall := service.Group.Patch(args[0], args[1], group)\n\tgroup, err = updateCall.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Fprintf(out, \"%s\", formatGroup(group))\n\n\tout.Flush()\n\treturn OK\n}\n<commit_msg>refactor(group): make all args flags instead of positional<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strconv\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/coreos-inc\/updatectl\/client\/update\/v1\"\n)\n\nvar (\n\tgroupFlags struct {\n\t\tlabel StringFlag\n\t\tchannel StringFlag\n\t\tappId StringFlag\n\t\tgroupId StringFlag\n\t\tstart int64\n\t\tend int64\n\t\tresolution int64\n\t\tupdateCount int64\n\t\tupdateInterval int64\n\t}\n\n\tcmdListGroups = &Command{\n\t\tName: \"list-groups\",\n\t\tUsage: \"[OPTION]...\",\n\t\tSummary: `List all of the groups that exist including their label, token and update state.`,\n\t\tRun: listGroups,\n\t}\n\tcmdNewGroup = &Command{\n\t\tName: \"new-group\",\n\t\tUsage: \"[OPTION]...\",\n\t\tSummary: `Create a new group.`,\n\t\tRun: newGroup,\n\t}\n\tcmdDeleteGroup = &Command{\n\t\tName: \"delete-group\",\n\t\tUsage: \"[OPTION]...\",\n\t\tSummary: `Delete a group.`,\n\t\tRun: deleteGroup,\n\t}\n\tcmdUpdateGroup = &Command{\n\t\tName: \"update-group\",\n\t\tUsage: \"[OPTION]...\",\n\t\tDescription: `Update an existing group.`,\n\t\tRun: updateGroup,\n\t}\n\tcmdPauseGroup = &Command{\n\t\tName: \"pause-group\",\n\t\tUsage: \"[OPTION]...\",\n\t\tSummary: `Pause a group's updates.`,\n\t\tRun: pauseGroup,\n\t}\n\tcmdUnpauseGroup = &Command{\n\t\tName: \"unpause-group\",\n\t\tUsage: \"[OPTION]...\",\n\t\tSummary: `Unpause a group's updates.`,\n\t\tRun: unpauseGroup,\n\t}\n\tcmdRollupGroupVersions = &Command{\n\t\tName: \"rollup-group-versions\",\n\t\tUsage: \"[OPTION]...\",\n\t\tSummary: \"Rollup versions from events by time.\",\n\t\tRun: rollupGroupVersions,\n\t}\n\tcmdRollupGroupEvents = &Command{\n\t\tName: \"rollup-group-events\",\n\t\tUsage: \"[OPTION]...\",\n\t\tSummary: \"Rollup events from events by time.\",\n\t\tRun: rollupGroupEvents,\n\t}\n)\n\nfunc init() {\n\tcmdListGroups.Flags.Var(&groupFlags.appId, \"app-id\",\n\t\t\"Application containing the groups to list.\")\n\n\tcmdDeleteGroup.Flags.Var(&groupFlags.appId, \"app-id\",\n\t\t\"Application with group to delete.\")\n\tcmdDeleteGroup.Flags.Var(&groupFlags.groupId, \"group-id\",\n\t\t\"ID of group to delete.\")\n\n\n\tcmdNewGroup.Flags.Var(&groupFlags.appId, \"app-id\",\n\t\t\"Application to add group to.\")\n\tcmdNewGroup.Flags.Var(&groupFlags.groupId, \"group-id\",\n\t\t \"ID for the new group.\")\n\tcmdNewGroup.Flags.Var(&groupFlags.channel, \"channel\",\n\t\t \"Channel to associate with the group.\")\n\tcmdNewGroup.Flags.Var(&groupFlags.label, \"label\",\n\t\t\"Label describing the new group.\")\n\n\tcmdUpdateGroup.Flags.Var(&groupFlags.appId, \"app-id\",\n\t\t \"Application containing the group to update.\")\n\tcmdUpdateGroup.Flags.Var(&groupFlags.groupId, \"group-id\",\n\t\t \"ID for the group.\")\n\tcmdUpdateGroup.Flags.Var(&groupFlags.label, \"label\",\n\t\t\"Label describing the group\")\n\tcmdUpdateGroup.Flags.Var(&groupFlags.channel, \"channel\",\n\t\t\"Channel to associate with the group.\")\n\tcmdUpdateGroup.Flags.Int64Var(&groupFlags.updateCount, \"update-count\",\n\t\t-1, \"Number of instances per interval\")\n\tcmdUpdateGroup.Flags.Int64Var(&groupFlags.updateInterval,\n\t\t\"update-interval\", -1, \"Interval between updates\")\n\n\tcmdPauseGroup.Flags.Var(&groupFlags.appId, \"app-id\",\n\t\t \"Application containing the group to pause.\")\n\tcmdPauseGroup.Flags.Var(&groupFlags.groupId, \"group-id\",\n\t\t \"ID for the group.\")\n\n\tcmdUnpauseGroup.Flags.Var(&groupFlags.appId, \"app-id\",\n\t\t \"Application containing the group to unpause.\")\n\tcmdUnpauseGroup.Flags.Var(&groupFlags.groupId, \"group-id\",\n\t\t \"ID for the group.\")\n\n\tcmdRollupGroupVersions.Flags.Var(&groupFlags.appId, \"app-id\",\n\t\t \"Application containing the group.\")\n\tcmdRollupGroupVersions.Flags.Var(&groupFlags.groupId, \"group-id\",\n\t\t \"ID for the group.\")\n\tcmdRollupGroupVersions.Flags.Int64Var(&groupFlags.resolution,\n\t\t\"resolution\", 60, \"60, 3600 or 86400 seconds\")\n\tcmdRollupGroupVersions.Flags.Int64Var(&groupFlags.start, \"start\", 0,\n\t\t\"Start date filter\")\n\tcmdRollupGroupVersions.Flags.Int64Var(&groupFlags.end, \"end\", 0,\n\t\t\"End date filter\")\n\n\tcmdRollupGroupEvents.Flags.Var(&groupFlags.appId, \"app-id\",\n\t\t \"Application containing the group.\")\n\tcmdRollupGroupEvents.Flags.Var(&groupFlags.groupId, \"group-id\",\n\t\t \"ID for the group.\")\n\tcmdRollupGroupEvents.Flags.Int64Var(&groupFlags.resolution,\n\t\t\"resolution\", 60, \"60, 3600 or 86400 seconds\")\n\tcmdRollupGroupEvents.Flags.Int64Var(&groupFlags.start, \"start\", 0,\n\t\t\"Start date filter\")\n\tcmdRollupGroupEvents.Flags.Int64Var(&groupFlags.end, \"end\", 0,\n\t\t\"End date filter\")\n}\n\nfunc formatGroup(group *update.Group) string {\n\treturn fmt.Sprintf(\"%s\\t%s\\t%s\\t%s\\t%s\\t%v\\t%v\\n\", group.Label, group.AppId, group.ChannelId,\n\t\tgroup.Id, strconv.FormatBool(group.UpdatesPaused), group.UpdateCount, group.UpdateInterval)\n}\n\nfunc listGroups(args []string, service *update.Service, out *tabwriter.Writer) int {\n\tif groupFlags.appId.Get() == nil {\n\t\treturn ERROR_USAGE\n\t}\n\n\tlistCall := service.Group.List(groupFlags.appId.String())\n\tlist, err := listCall.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Fprintln(out, \"Label\\tApp\\tChannel\\tId\\tUpdatesPaused\")\n\tfor _, group := range list.Items {\n\t\tfmt.Fprintf(out, \"%s\", formatGroup(group))\n\t}\n\n\tout.Flush()\n\treturn OK\n}\n\nfunc rollupGroupEvents(args []string, service *update.Service, out *tabwriter.Writer) int {\n\tif groupFlags.appId.Get() == nil || groupFlags.groupId.Get() == nil {\n\t\treturn ERROR_USAGE\n\t}\n\n\tcall := service.Group.Requests.Events.Rollup(\n\t\tgroupFlags.appId.String(),\n\t\tgroupFlags.groupId.String(),\n\t\tgroupFlags.start,\n\t\tgroupFlags.end,\n\t)\n\tcall.Resolution(groupFlags.resolution)\n\tlist, err := call.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Fprintln(out, \"Version\\tType\\tResult\\tTimestamp\\tCount\")\n\tfor _, i := range list.Items {\n\t\tfor _, j := range i.Values {\n\t\t\tfmt.Fprintf(out, \"%s\\t%s\\t%s\\t%d\\t%d\\n\",\n\t\t\t\ti.Version, i.Type, i.Result, j.Timestamp, j.Count)\n\t\t}\n\t}\n\tout.Flush()\n\treturn OK\n}\n\nfunc rollupGroupVersions(args []string, service *update.Service, out *tabwriter.Writer) int {\n\tif groupFlags.appId.Get() == nil || groupFlags.groupId.Get() == nil {\n\t\treturn ERROR_USAGE\n\t}\n\n\tcall := service.Group.Requests.Versions.Rollup(\n\t\tgroupFlags.appId.String(),\n\t\tgroupFlags.groupId.String(),\n\t\tgroupFlags.start,\n\t\tgroupFlags.end,\n\t)\n\tcall.Resolution(groupFlags.resolution)\n\tlist, err := call.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Fprintln(out, \"Version\\tTimestamp\\tCount\")\n\tfor _, i := range list.Items {\n\t\tfor _, j := range i.Values {\n\t\t\tfmt.Fprintf(out, \"%s\\t%d\\t%d\\n\",\n\t\t\t\ti.Version, j.Timestamp, j.Count)\n\t\t}\n\t}\n\tout.Flush()\n\treturn OK\n}\n\nfunc newGroup(args []string, service *update.Service, out *tabwriter.Writer) int {\n\tif groupFlags.appId.Get() == nil ||\n\t\tgroupFlags.groupId.Get() == nil ||\n\t\tgroupFlags.channel.Get() == nil {\n\t\treturn ERROR_USAGE\n\t}\n\n\tgroup := &update.Group{\n\t\tChannelId: groupFlags.channel.String(),\n\t\tId: groupFlags.groupId.String(),\n\t\tLabel: groupFlags.label.String(),\n\t}\n\tcall := service.Group.Insert(groupFlags.appId.String(), group)\n\tgroup, err := call.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Fprintf(out, \"%s\", formatGroup(group))\n\n\tout.Flush()\n\treturn OK\n}\n\nfunc deleteGroup(args []string, service *update.Service, out *tabwriter.Writer) int {\n\tif groupFlags.appId.Get() == nil ||\n\t\tgroupFlags.groupId.Get() == nil {\n\t\treturn ERROR_USAGE\n\t}\n\n\tcall := service.Group.Delete(groupFlags.appId.String(), groupFlags.groupId.String())\n\tgroup, err := call.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Fprintf(out, \"%s\", formatGroup(group))\n\n\tout.Flush()\n\treturn OK\n}\n\nfunc pauseGroup(args []string, service *update.Service, out *tabwriter.Writer) int {\n\treturn setUpdatesPaused(service, out, true)\n}\n\nfunc unpauseGroup(args []string, service *update.Service, out *tabwriter.Writer) int {\n\treturn setUpdatesPaused(service, out, false)\n}\n\n\/\/ Helper function for pause\/unpause-group commands\nfunc setUpdatesPaused(service *update.Service, out *tabwriter.Writer, paused bool) int {\n\tif groupFlags.appId.Get() == nil ||\n\t\tgroupFlags.groupId.Get() == nil {\n\t\treturn ERROR_USAGE\n\t}\n\n\tcall := service.Group.Get(groupFlags.appId.String(), groupFlags.groupId.String())\n\tgroup, err := call.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgroup.UpdatesPaused = paused\n\n\tupdateCall := service.Group.Patch(groupFlags.appId.String(), groupFlags.groupId.String(), group)\n\tgroup, err = updateCall.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Fprintf(out, \"%s\", formatGroup(group))\n\n\tout.Flush()\n\treturn OK\n}\n\nfunc updateGroup(args []string, service *update.Service, out *tabwriter.Writer) int {\n\tif groupFlags.appId.Get() == nil ||\n\t\tgroupFlags.groupId.Get() == nil {\n\t\treturn ERROR_USAGE\n\t}\n\n\tcall := service.Group.Get(groupFlags.appId.String(), groupFlags.groupId.String())\n\tgroup, err := call.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tcheckUpdatePooling := false\n\tif groupFlags.updateCount != -1 {\n\t\tgroup.UpdateCount = groupFlags.updateCount\n\t\tcheckUpdatePooling = true\n\t}\n\tif groupFlags.updateInterval != -1 {\n\t\tgroup.UpdateInterval = groupFlags.updateInterval\n\t\tcheckUpdatePooling = true\n\t}\n\tif groupFlags.label.Get() != nil {\n\t\tgroup.Label = groupFlags.label.String()\n\t}\n\tif groupFlags.channel.Get() != nil {\n\t\tgroup.ChannelId = groupFlags.channel.String()\n\t}\n\n\t\/\/ set update pooling based on other flags\n\t\/\/ this only changes if the user changed a value\n\tif checkUpdatePooling {\n\t\tif group.UpdateCount == 0 && group.UpdateInterval == 0 {\n\t\t\tgroup.UpdatePooling = false\n\t\t} else {\n\t\t\tgroup.UpdatePooling = true\n\t\t}\n\t}\n\n\tupdateCall := service.Group.Patch(groupFlags.appId.String(), groupFlags.groupId.String(), group)\n\tgroup, err = updateCall.Do()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Fprintf(out, \"%s\", formatGroup(group))\n\n\tout.Flush()\n\treturn OK\n}\n<|endoftext|>"} {"text":"<commit_before>package test\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"runtime\"\n\n\t\"github.com\/adams-sarah\/test2doc\/doc\/parse\"\n)\n\ntype ResponseWriter struct {\n\tHandlerInfo HandlerInfo\n\tURLVars map[string]string\n\tW *httptest.ResponseRecorder\n}\n\ntype HandlerInfo struct {\n\tFileName string\n\tFuncName string\n}\n\nfunc NewResponseWriter(w *httptest.ResponseRecorder) *ResponseWriter {\n\treturn &ResponseWriter{\n\t\tW: w,\n\t}\n}\n\nfunc (rw *ResponseWriter) Header() http.Header {\n\treturn rw.W.Header()\n}\n\nfunc (rw *ResponseWriter) Write(b []byte) (int, error) {\n\trw.setHandlerInfo()\n\treturn rw.W.Write(b)\n}\n\nfunc (rw *ResponseWriter) WriteHeader(c int) {\n\trw.W.WriteHeader(c)\n}\n\nfunc (rw *ResponseWriter) setHandlerInfo() {\n\ti := 1\n\tmax := 15\n\n\tvar pc uintptr\n\tvar file, fnName string\n\tvar ok bool\n\n\t\/\/ iterate until we find a func in this pkg (the handler)\n\tfor i < max {\n\t\tpc, file, _, ok = runtime.Caller(i)\n\t\tif !ok {\n\t\t\tlog.Println(\"test2doc: setHandlerInfo: !ok\")\n\t\t\treturn\n\t\t}\n\n\t\tfn := runtime.FuncForPC(pc)\n\t\tfnName = fn.Name()\n\n\t\tif parse.IsFuncInPkg(fnName) {\n\t\t\tbreak\n\t\t}\n\n\t\ti++\n\t}\n\n\trw.HandlerInfo = HandlerInfo{\n\t\tFileName: file,\n\t\tFuncName: fnName,\n\t}\n}\n<commit_msg>Take handler info from the top most function in the package<commit_after>package test\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"runtime\"\n\n\t\"github.com\/adams-sarah\/test2doc\/doc\/parse\"\n)\n\ntype ResponseWriter struct {\n\tHandlerInfo HandlerInfo\n\tURLVars map[string]string\n\tW *httptest.ResponseRecorder\n}\n\ntype HandlerInfo struct {\n\tFileName string\n\tFuncName string\n}\n\nfunc NewResponseWriter(w *httptest.ResponseRecorder) *ResponseWriter {\n\treturn &ResponseWriter{\n\t\tW: w,\n\t}\n}\n\nfunc (rw *ResponseWriter) Header() http.Header {\n\treturn rw.W.Header()\n}\n\nfunc (rw *ResponseWriter) Write(b []byte) (int, error) {\n\trw.setHandlerInfo()\n\treturn rw.W.Write(b)\n}\n\nfunc (rw *ResponseWriter) WriteHeader(c int) {\n\trw.W.WriteHeader(c)\n}\n\nfunc (rw *ResponseWriter) setHandlerInfo() {\n\ti := 1\n\tmax := 15\n\n\tvar pc uintptr\n\tvar file, fnName string\n\tvar ok, fnInPkg, sawPkg bool\n\n\t\/\/ iterate until we find the top level func in this pkg (the handler)\n\tfor i < max {\n\t\tpc, file, _, ok = runtime.Caller(i)\n\t\tif !ok {\n\t\t\tlog.Println(\"test2doc: setHandlerInfo: !ok\")\n\t\t\treturn\n\t\t}\n\n\t\tfn := runtime.FuncForPC(pc)\n\t\tfnName = fn.Name()\n\n\t\tfnInPkg = parse.IsFuncInPkg(fnName)\n\t\tif sawPkg && !fnInPkg {\n\t\t\tbreak\n\t\t}\n\n\t\tsawPkg = fnInPkg\n\t\ti++\n\t}\n\n\trw.HandlerInfo = HandlerInfo{\n\t\tFileName: file,\n\t\tFuncName: fnName,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n)\n\nfunc main() { \n\n\tfmt.Println(\"Hello, 世界\")\n\n}\n<commit_msg>Delete hello.go<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ RTLAMR - An rtl-sdr receiver for smart meters operating in the 900MHz ISM band.\n\/\/ Copyright (C) 2015 Douglas Hall\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Affero General Public License as published\n\/\/ by the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Affero General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Affero General Public License\n\/\/ along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage decode\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n)\n\n\/\/ PacketConfig specifies packet-specific radio configuration.\ntype PacketConfig struct {\n\tDataRate int\n\n\tBlockSize, BlockSize2 int\n\tSymbolLength, SymbolLength2 int\n\tSampleRate int\n\n\tPreambleSymbols, PacketSymbols int\n\tPreambleLength, PacketLength int\n\tPreamble string\n\n\tBufferLength int\n\n\tCenterFreq uint32\n}\n\nfunc (cfg PacketConfig) Decimate(decimation int) PacketConfig {\n\tcfg.BlockSize \/= decimation\n\tcfg.BlockSize2 \/= decimation\n\tcfg.SymbolLength \/= decimation\n\tcfg.SymbolLength2 \/= decimation\n\tcfg.SampleRate \/= decimation\n\tcfg.DataRate \/= decimation\n\n\tcfg.PreambleLength \/= decimation\n\tcfg.PacketLength \/= decimation\n\n\tcfg.BufferLength \/= decimation\n\n\treturn cfg\n}\n\nfunc (d Decoder) Log() {\n\tif d.Decimation != 1 {\n\t\tlog.Printf(\"BlockSize: %d|%d\\n\", d.Cfg.BlockSize, d.DecCfg.BlockSize)\n\t\tlog.Println(\"CenterFreq:\", d.Cfg.CenterFreq)\n\t\tlog.Printf(\"SampleRate: %d|%d\\n\", d.Cfg.SampleRate, d.DecCfg.SampleRate)\n\t\tlog.Printf(\"DataRate: %d|%d\\n\", d.Cfg.DataRate, d.DecCfg.DataRate)\n\t\tlog.Printf(\"SymbolLength: %d|%d\\n\", d.Cfg.SymbolLength, d.DecCfg.SymbolLength)\n\t\tlog.Println(\"PreambleSymbols:\", d.Cfg.PreambleSymbols)\n\t\tlog.Printf(\"PreambleLength: %d|%d\\n\", d.Cfg.PreambleLength, d.DecCfg.PreambleLength)\n\t\tlog.Println(\"PacketSymbols:\", d.Cfg.PacketSymbols)\n\t\tlog.Printf(\"PacketLength: %d|%d\\n\", d.Cfg.PacketLength, d.DecCfg.PacketLength)\n\t\tlog.Println(\"Preamble:\", d.Cfg.Preamble)\n\n\t\tif d.Cfg.SymbolLength%d.Decimation != 0 {\n\t\t\tlog.Println(\"Warning: decimated symbol length is non-integral, sensitivity may be poor\")\n\t\t}\n\n\t\tif d.DecCfg.SymbolLength < 3 {\n\t\t\tlog.Fatal(\"Error: illegal decimation factor, choose a smaller factor\")\n\t\t}\n\n\t\treturn\n\t}\n\n\tlog.Println(\"CenterFreq:\", d.Cfg.CenterFreq)\n\tlog.Println(\"SampleRate:\", d.Cfg.SampleRate)\n\tlog.Println(\"DataRate:\", d.Cfg.DataRate)\n\tlog.Println(\"SymbolLength:\", d.Cfg.SymbolLength)\n\tlog.Println(\"PreambleSymbols:\", d.Cfg.PreambleSymbols)\n\tlog.Println(\"PreambleLength:\", d.Cfg.PreambleLength)\n\tlog.Println(\"PacketSymbols:\", d.Cfg.PacketSymbols)\n\tlog.Println(\"PacketLength:\", d.Cfg.PacketLength)\n\tlog.Println(\"Preamble:\", d.Cfg.Preamble)\n}\n\n\/\/ Decoder contains buffers and radio configuration.\ntype Decoder struct {\n\tCfg PacketConfig\n\n\tDecimation int\n\tDecCfg PacketConfig\n\n\tIQ []byte\n\tSignal []float64\n\tFiltered []float64\n\tQuantized []byte\n\n\tcsum []float64\n\tdemod Demodulator\n\n\tpreamble []byte\n\tslices [][]byte\n\n\tpreambleFinder *byteFinder\n\n\tpkt []byte\n}\n\n\/\/ Create a new decoder with the given packet configuration.\nfunc NewDecoder(cfg PacketConfig, decimation int) (d Decoder) {\n\td.Cfg = cfg\n\n\td.Cfg.SymbolLength2 = d.Cfg.SymbolLength << 1\n\td.Cfg.SampleRate = d.Cfg.DataRate * d.Cfg.SymbolLength\n\n\td.Cfg.PreambleLength = d.Cfg.PreambleSymbols * d.Cfg.SymbolLength2\n\td.Cfg.PacketLength = d.Cfg.PacketSymbols * d.Cfg.SymbolLength2\n\n\td.Cfg.BlockSize = NextPowerOf2(d.Cfg.PreambleLength)\n\td.Cfg.BlockSize2 = d.Cfg.BlockSize << 1\n\n\td.Cfg.BufferLength = d.Cfg.PacketLength + d.Cfg.BlockSize\n\n\td.Decimation = decimation\n\td.DecCfg = d.Cfg.Decimate(d.Decimation)\n\n\t\/\/ Allocate necessary buffers.\n\td.IQ = make([]byte, d.Cfg.BufferLength<<1)\n\td.Signal = make([]float64, d.DecCfg.BufferLength)\n\td.Filtered = make([]float64, d.DecCfg.BufferLength)\n\td.Quantized = make([]byte, d.DecCfg.BufferLength)\n\n\td.csum = make([]float64, (d.DecCfg.PacketLength - d.DecCfg.SymbolLength2 + 1))\n\n\t\/\/ Calculate magnitude lookup table specified by -fastmag flag.\n\td.demod = NewSqrtMagLUT()\n\n\t\/\/ Pre-calculate a byte-slice version of the preamble for searching.\n\td.preamble = make([]byte, len(d.Cfg.Preamble))\n\tfor idx := range d.Cfg.Preamble {\n\t\tif d.Cfg.Preamble[idx] == '1' {\n\t\t\td.preamble[idx] = 1\n\t\t}\n\t}\n\n\t\/\/ Slice quantized sample buffer to make searching for the preamble more\n\t\/\/ memory local. Pre-allocate a flat buffer so memory is contiguous and\n\t\/\/ assign slices to the buffer.\n\td.slices = make([][]byte, d.DecCfg.SymbolLength2)\n\tflat := make([]byte, d.DecCfg.BlockSize2-(d.DecCfg.BlockSize2%d.DecCfg.SymbolLength2))\n\n\tsymbolsPerBlock := d.DecCfg.BlockSize2 \/ d.DecCfg.SymbolLength2\n\tfor symbolOffset := range d.slices {\n\t\tlower := symbolOffset * symbolsPerBlock\n\t\tupper := (symbolOffset + 1) * symbolsPerBlock\n\t\td.slices[symbolOffset] = flat[lower:upper]\n\t}\n\n\td.preambleFinder = makeByteFinder(d.preamble)\n\n\t\/\/ Signal up to the final stage is 1-bit per byte. Allocate a buffer to\n\t\/\/ store packed version 8-bits per byte.\n\td.pkt = make([]byte, (d.DecCfg.PacketSymbols+7)>>3)\n\n\treturn\n}\n\n\/\/ Decode accepts a sample block and performs various DSP techniques to extract a packet.\nfunc (d Decoder) Decode(input []byte) []int {\n\t\/\/ Shift buffers to append new block.\n\tcopy(d.IQ, d.IQ[d.Cfg.BlockSize<<1:])\n\tcopy(d.Signal, d.Signal[d.DecCfg.BlockSize:])\n\tcopy(d.Filtered, d.Filtered[d.DecCfg.BlockSize:])\n\tcopy(d.Quantized, d.Quantized[d.DecCfg.BlockSize:])\n\tcopy(d.IQ[d.Cfg.PacketLength<<1:], input[:])\n\n\tiqBlock := d.IQ[d.Cfg.PacketLength<<1:]\n\tsignalBlock := d.Signal[d.DecCfg.PacketLength:]\n\n\t\/\/ Compute the magnitude of the new block.\n\td.demod.Execute(iqBlock, signalBlock)\n\n\tsignalBlock = d.Signal[d.DecCfg.PacketLength-d.DecCfg.SymbolLength2:]\n\tfilterBlock := d.Filtered[d.DecCfg.PacketLength-d.DecCfg.SymbolLength2:]\n\n\t\/\/ Perform matched filter on new block.\n\td.Filter(signalBlock, filterBlock)\n\n\t\/\/ Perform bit-decision on new block.\n\tQuantize(filterBlock, d.Quantized[d.DecCfg.PacketLength-d.DecCfg.SymbolLength2:])\n\n\t\/\/ Pack the quantized signal into slices for searching.\n\td.Pack(d.Quantized[:d.DecCfg.BlockSize2])\n\n\t\/\/ Return a list of indexes the preamble exists at.\n\treturn d.Search()\n}\n\n\/\/ A Demodulator knows how to demodulate an array of uint8 IQ samples into an\n\/\/ array of float64 samples.\ntype Demodulator interface {\n\tExecute([]byte, []float64)\n}\n\n\/\/ Default Magnitude Lookup Table\ntype MagLUT []float64\n\n\/\/ Pre-computes normalized squares with most common DC offset for rtl-sdr dongles.\nfunc NewSqrtMagLUT() (lut MagLUT) {\n\tlut = make([]float64, 0x100)\n\tfor idx := range lut {\n\t\tlut[idx] = 127.4 - float64(idx)\n\t\tlut[idx] *= lut[idx]\n\t}\n\treturn\n}\n\n\/\/ Calculates complex magnitude on given IQ stream writing result to output.\nfunc (lut MagLUT) Execute(input []byte, output []float64) {\n\tdecIdx := 0\n\tdec := (len(input) \/ len(output))\n\n\tfor idx := 0; decIdx < len(output); idx += dec {\n\t\toutput[decIdx] = math.Sqrt(lut[input[idx]] + lut[input[idx+1]])\n\t\tdecIdx++\n\t}\n}\n\n\/\/ Matched filter for Manchester coded signals. Output signal's sign at each\n\/\/ sample determines the bit-value since Manchester symbols have odd symmetry.\nfunc (d Decoder) Filter(input, output []float64) {\n\t\/\/ Computing the cumulative summation over the signal simplifies\n\t\/\/ filtering to the difference of a pair of subtractions.\n\tvar sum float64\n\tfor idx, v := range input {\n\t\tsum += v\n\t\td.csum[idx+1] = sum\n\t}\n\n\t\/\/ Filter result is difference of summation of lower and upper symbols.\n\tlower := d.csum[d.DecCfg.SymbolLength:]\n\tupper := d.csum[d.DecCfg.SymbolLength2:]\n\tn := len(input) - d.DecCfg.SymbolLength2\n\tfor idx := 0; idx < n; idx++ {\n\t\toutput[idx] = (lower[idx] - d.csum[idx]) - (upper[idx] - lower[idx])\n\t}\n\n\treturn\n}\n\n\/\/ Bit-value is determined by the sign of each sample after filtering.\nfunc Quantize(input []float64, output []byte) {\n\tfor idx, val := range input {\n\t\toutput[idx] = byte(math.Float64bits(val)>>63) ^ 0x01\n\t}\n\n\treturn\n}\n\n\/\/ Packs quantized signal into slices such that the first rank represents\n\/\/ sample offsets and the second represents the value of each symbol from the\n\/\/ given offset.\n\/\/\n\/\/ Transforms:\n\/\/ <--Sym1--><--Sym2--><--Sym3--><--Sym4--><--Sym5--><--Sym6--><--Sym7--><--Sym8-->\n\/\/ <12345678><12345678><12345678><12345678><12345678><12345678><12345678><12345678>\n\/\/ to:\n\/\/ <11111111><22222222><33333333><44444444><55555555><66666666><77777777><88888888>\nfunc (d *Decoder) Pack(input []byte) {\n\tfor symbolOffset, slice := range d.slices {\n\t\tfor symbolIdx := range slice {\n\t\t\tslice[symbolIdx] = input[symbolIdx*d.DecCfg.SymbolLength2+symbolOffset]\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ For each sample offset look for the preamble. Return a list of indexes the\n\/\/ preamble is found at. Indexes are absolute in the unsliced quantized\n\/\/ buffer.\nfunc (d *Decoder) Search() (indexes []int) {\n\tfor symbolOffset, slice := range d.slices {\n\t\toffset := 0\n\t\tidx := 0\n\t\tfor {\n\t\t\tidx = d.preambleFinder.next(slice[offset:])\n\t\t\tif idx != -1 {\n\t\t\t\tindexes = append(indexes, (offset+idx)*d.Cfg.SymbolLength2+symbolOffset)\n\t\t\t\toffset += idx + 1\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ Given a list of indeces the preamble exists at, sample the appropriate bits\n\/\/ of the signal's bit-decision. Pack bits of each index into an array of byte\n\/\/ arrays and return.\nfunc (d Decoder) Slice(indices []int) (pkts [][]byte) {\n\t\/\/ We will likely find multiple instances of the message so only keep\n\t\/\/ track of unique instances.\n\tseen := make(map[string]bool)\n\n\t\/\/ For each of the indices the preamble exists at.\n\tfor _, qIdx := range indices {\n\t\t\/\/ Check that we're still within the first sample block. We'll catch\n\t\t\/\/ the message on the next sample block otherwise.\n\t\tif qIdx > d.DecCfg.BlockSize {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Packet is 1 bit per byte, pack to 8-bits per byte.\n\t\tfor pIdx := 0; pIdx < d.DecCfg.PacketSymbols; pIdx++ {\n\t\t\td.pkt[pIdx>>3] <<= 1\n\t\t\td.pkt[pIdx>>3] |= d.Quantized[qIdx+(pIdx*d.DecCfg.SymbolLength2)]\n\t\t}\n\n\t\t\/\/ Store the packet in the seen map and append to the packet list.\n\t\tpktStr := fmt.Sprintf(\"%02X\", d.pkt)\n\t\tif !seen[pktStr] {\n\t\t\tseen[pktStr] = true\n\t\t\tpkts = append(pkts, make([]byte, len(d.pkt)))\n\t\t\tcopy(pkts[len(pkts)-1], d.pkt)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc NextPowerOf2(v int) int {\n\treturn 1 << uint(math.Ceil(math.Log2(float64(v))))\n}\n<commit_msg>Use decimated packet configuration in preamble search. Fixes #47.<commit_after>\/\/ RTLAMR - An rtl-sdr receiver for smart meters operating in the 900MHz ISM band.\n\/\/ Copyright (C) 2015 Douglas Hall\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Affero General Public License as published\n\/\/ by the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Affero General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Affero General Public License\n\/\/ along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage decode\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n)\n\n\/\/ PacketConfig specifies packet-specific radio configuration.\ntype PacketConfig struct {\n\tDataRate int\n\n\tBlockSize, BlockSize2 int\n\tSymbolLength, SymbolLength2 int\n\tSampleRate int\n\n\tPreambleSymbols, PacketSymbols int\n\tPreambleLength, PacketLength int\n\tPreamble string\n\n\tBufferLength int\n\n\tCenterFreq uint32\n}\n\nfunc (cfg PacketConfig) Decimate(decimation int) PacketConfig {\n\tcfg.BlockSize \/= decimation\n\tcfg.BlockSize2 \/= decimation\n\tcfg.SymbolLength \/= decimation\n\tcfg.SymbolLength2 \/= decimation\n\tcfg.SampleRate \/= decimation\n\tcfg.DataRate \/= decimation\n\n\tcfg.PreambleLength \/= decimation\n\tcfg.PacketLength \/= decimation\n\n\tcfg.BufferLength \/= decimation\n\n\treturn cfg\n}\n\nfunc (d Decoder) Log() {\n\tif d.Decimation != 1 {\n\t\tlog.Printf(\"BlockSize: %d|%d\\n\", d.Cfg.BlockSize, d.DecCfg.BlockSize)\n\t\tlog.Println(\"CenterFreq:\", d.Cfg.CenterFreq)\n\t\tlog.Printf(\"SampleRate: %d|%d\\n\", d.Cfg.SampleRate, d.DecCfg.SampleRate)\n\t\tlog.Printf(\"DataRate: %d|%d\\n\", d.Cfg.DataRate, d.DecCfg.DataRate)\n\t\tlog.Printf(\"SymbolLength: %d|%d\\n\", d.Cfg.SymbolLength, d.DecCfg.SymbolLength)\n\t\tlog.Println(\"PreambleSymbols:\", d.Cfg.PreambleSymbols)\n\t\tlog.Printf(\"PreambleLength: %d|%d\\n\", d.Cfg.PreambleLength, d.DecCfg.PreambleLength)\n\t\tlog.Println(\"PacketSymbols:\", d.Cfg.PacketSymbols)\n\t\tlog.Printf(\"PacketLength: %d|%d\\n\", d.Cfg.PacketLength, d.DecCfg.PacketLength)\n\t\tlog.Println(\"Preamble:\", d.Cfg.Preamble)\n\n\t\tif d.Cfg.SymbolLength%d.Decimation != 0 {\n\t\t\tlog.Println(\"Warning: decimated symbol length is non-integral, sensitivity may be poor\")\n\t\t}\n\n\t\tif d.DecCfg.SymbolLength < 3 {\n\t\t\tlog.Fatal(\"Error: illegal decimation factor, choose a smaller factor\")\n\t\t}\n\n\t\treturn\n\t}\n\n\tlog.Println(\"CenterFreq:\", d.Cfg.CenterFreq)\n\tlog.Println(\"SampleRate:\", d.Cfg.SampleRate)\n\tlog.Println(\"DataRate:\", d.Cfg.DataRate)\n\tlog.Println(\"SymbolLength:\", d.Cfg.SymbolLength)\n\tlog.Println(\"PreambleSymbols:\", d.Cfg.PreambleSymbols)\n\tlog.Println(\"PreambleLength:\", d.Cfg.PreambleLength)\n\tlog.Println(\"PacketSymbols:\", d.Cfg.PacketSymbols)\n\tlog.Println(\"PacketLength:\", d.Cfg.PacketLength)\n\tlog.Println(\"Preamble:\", d.Cfg.Preamble)\n}\n\n\/\/ Decoder contains buffers and radio configuration.\ntype Decoder struct {\n\tCfg PacketConfig\n\n\tDecimation int\n\tDecCfg PacketConfig\n\n\tIQ []byte\n\tSignal []float64\n\tFiltered []float64\n\tQuantized []byte\n\n\tcsum []float64\n\tdemod Demodulator\n\n\tpreamble []byte\n\tslices [][]byte\n\n\tpreambleFinder *byteFinder\n\n\tpkt []byte\n}\n\n\/\/ Create a new decoder with the given packet configuration.\nfunc NewDecoder(cfg PacketConfig, decimation int) (d Decoder) {\n\td.Cfg = cfg\n\n\td.Cfg.SymbolLength2 = d.Cfg.SymbolLength << 1\n\td.Cfg.SampleRate = d.Cfg.DataRate * d.Cfg.SymbolLength\n\n\td.Cfg.PreambleLength = d.Cfg.PreambleSymbols * d.Cfg.SymbolLength2\n\td.Cfg.PacketLength = d.Cfg.PacketSymbols * d.Cfg.SymbolLength2\n\n\td.Cfg.BlockSize = NextPowerOf2(d.Cfg.PreambleLength)\n\td.Cfg.BlockSize2 = d.Cfg.BlockSize << 1\n\n\td.Cfg.BufferLength = d.Cfg.PacketLength + d.Cfg.BlockSize\n\n\td.Decimation = decimation\n\td.DecCfg = d.Cfg.Decimate(d.Decimation)\n\n\t\/\/ Allocate necessary buffers.\n\td.IQ = make([]byte, d.Cfg.BufferLength<<1)\n\td.Signal = make([]float64, d.DecCfg.BufferLength)\n\td.Filtered = make([]float64, d.DecCfg.BufferLength)\n\td.Quantized = make([]byte, d.DecCfg.BufferLength)\n\n\td.csum = make([]float64, (d.DecCfg.PacketLength - d.DecCfg.SymbolLength2 + 1))\n\n\t\/\/ Calculate magnitude lookup table specified by -fastmag flag.\n\td.demod = NewSqrtMagLUT()\n\n\t\/\/ Pre-calculate a byte-slice version of the preamble for searching.\n\td.preamble = make([]byte, len(d.Cfg.Preamble))\n\tfor idx := range d.Cfg.Preamble {\n\t\tif d.Cfg.Preamble[idx] == '1' {\n\t\t\td.preamble[idx] = 1\n\t\t}\n\t}\n\n\t\/\/ Slice quantized sample buffer to make searching for the preamble more\n\t\/\/ memory local. Pre-allocate a flat buffer so memory is contiguous and\n\t\/\/ assign slices to the buffer.\n\td.slices = make([][]byte, d.DecCfg.SymbolLength2)\n\tflat := make([]byte, d.DecCfg.BlockSize2-(d.DecCfg.BlockSize2%d.DecCfg.SymbolLength2))\n\n\tsymbolsPerBlock := d.DecCfg.BlockSize2 \/ d.DecCfg.SymbolLength2\n\tfor symbolOffset := range d.slices {\n\t\tlower := symbolOffset * symbolsPerBlock\n\t\tupper := (symbolOffset + 1) * symbolsPerBlock\n\t\td.slices[symbolOffset] = flat[lower:upper]\n\t}\n\n\td.preambleFinder = makeByteFinder(d.preamble)\n\n\t\/\/ Signal up to the final stage is 1-bit per byte. Allocate a buffer to\n\t\/\/ store packed version 8-bits per byte.\n\td.pkt = make([]byte, (d.DecCfg.PacketSymbols+7)>>3)\n\n\treturn\n}\n\n\/\/ Decode accepts a sample block and performs various DSP techniques to extract a packet.\nfunc (d Decoder) Decode(input []byte) []int {\n\t\/\/ Shift buffers to append new block.\n\tcopy(d.IQ, d.IQ[d.Cfg.BlockSize<<1:])\n\tcopy(d.Signal, d.Signal[d.DecCfg.BlockSize:])\n\tcopy(d.Filtered, d.Filtered[d.DecCfg.BlockSize:])\n\tcopy(d.Quantized, d.Quantized[d.DecCfg.BlockSize:])\n\tcopy(d.IQ[d.Cfg.PacketLength<<1:], input[:])\n\n\tiqBlock := d.IQ[d.Cfg.PacketLength<<1:]\n\tsignalBlock := d.Signal[d.DecCfg.PacketLength:]\n\n\t\/\/ Compute the magnitude of the new block.\n\td.demod.Execute(iqBlock, signalBlock)\n\n\tsignalBlock = d.Signal[d.DecCfg.PacketLength-d.DecCfg.SymbolLength2:]\n\tfilterBlock := d.Filtered[d.DecCfg.PacketLength-d.DecCfg.SymbolLength2:]\n\n\t\/\/ Perform matched filter on new block.\n\td.Filter(signalBlock, filterBlock)\n\n\t\/\/ Perform bit-decision on new block.\n\tQuantize(filterBlock, d.Quantized[d.DecCfg.PacketLength-d.DecCfg.SymbolLength2:])\n\n\t\/\/ Pack the quantized signal into slices for searching.\n\td.Pack(d.Quantized[:d.DecCfg.BlockSize2])\n\n\t\/\/ Return a list of indexes the preamble exists at.\n\treturn d.Search()\n}\n\n\/\/ A Demodulator knows how to demodulate an array of uint8 IQ samples into an\n\/\/ array of float64 samples.\ntype Demodulator interface {\n\tExecute([]byte, []float64)\n}\n\n\/\/ Default Magnitude Lookup Table\ntype MagLUT []float64\n\n\/\/ Pre-computes normalized squares with most common DC offset for rtl-sdr dongles.\nfunc NewSqrtMagLUT() (lut MagLUT) {\n\tlut = make([]float64, 0x100)\n\tfor idx := range lut {\n\t\tlut[idx] = 127.4 - float64(idx)\n\t\tlut[idx] *= lut[idx]\n\t}\n\treturn\n}\n\n\/\/ Calculates complex magnitude on given IQ stream writing result to output.\nfunc (lut MagLUT) Execute(input []byte, output []float64) {\n\tdecIdx := 0\n\tdec := (len(input) \/ len(output))\n\n\tfor idx := 0; decIdx < len(output); idx += dec {\n\t\toutput[decIdx] = math.Sqrt(lut[input[idx]] + lut[input[idx+1]])\n\t\tdecIdx++\n\t}\n}\n\n\/\/ Matched filter for Manchester coded signals. Output signal's sign at each\n\/\/ sample determines the bit-value since Manchester symbols have odd symmetry.\nfunc (d Decoder) Filter(input, output []float64) {\n\t\/\/ Computing the cumulative summation over the signal simplifies\n\t\/\/ filtering to the difference of a pair of subtractions.\n\tvar sum float64\n\tfor idx, v := range input {\n\t\tsum += v\n\t\td.csum[idx+1] = sum\n\t}\n\n\t\/\/ Filter result is difference of summation of lower and upper symbols.\n\tlower := d.csum[d.DecCfg.SymbolLength:]\n\tupper := d.csum[d.DecCfg.SymbolLength2:]\n\tn := len(input) - d.DecCfg.SymbolLength2\n\tfor idx := 0; idx < n; idx++ {\n\t\toutput[idx] = (lower[idx] - d.csum[idx]) - (upper[idx] - lower[idx])\n\t}\n\n\treturn\n}\n\n\/\/ Bit-value is determined by the sign of each sample after filtering.\nfunc Quantize(input []float64, output []byte) {\n\tfor idx, val := range input {\n\t\toutput[idx] = byte(math.Float64bits(val)>>63) ^ 0x01\n\t}\n\n\treturn\n}\n\n\/\/ Packs quantized signal into slices such that the first rank represents\n\/\/ sample offsets and the second represents the value of each symbol from the\n\/\/ given offset.\n\/\/\n\/\/ Transforms:\n\/\/ <--Sym1--><--Sym2--><--Sym3--><--Sym4--><--Sym5--><--Sym6--><--Sym7--><--Sym8-->\n\/\/ <12345678><12345678><12345678><12345678><12345678><12345678><12345678><12345678>\n\/\/ to:\n\/\/ <11111111><22222222><33333333><44444444><55555555><66666666><77777777><88888888>\nfunc (d *Decoder) Pack(input []byte) {\n\tfor symbolOffset, slice := range d.slices {\n\t\tfor symbolIdx := range slice {\n\t\t\tslice[symbolIdx] = input[symbolIdx*d.DecCfg.SymbolLength2+symbolOffset]\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ For each sample offset look for the preamble. Return a list of indexes the\n\/\/ preamble is found at. Indexes are absolute in the unsliced quantized\n\/\/ buffer.\nfunc (d *Decoder) Search() (indexes []int) {\n\tfor symbolOffset, slice := range d.slices {\n\t\toffset := 0\n\t\tidx := 0\n\t\tfor {\n\t\t\tidx = d.preambleFinder.next(slice[offset:])\n\t\t\tif idx != -1 {\n\t\t\t\tindexes = append(indexes, (offset+idx)*d.DecCfg.SymbolLength2+symbolOffset)\n\t\t\t\toffset += idx + 1\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ Given a list of indeces the preamble exists at, sample the appropriate bits\n\/\/ of the signal's bit-decision. Pack bits of each index into an array of byte\n\/\/ arrays and return.\nfunc (d Decoder) Slice(indices []int) (pkts [][]byte) {\n\t\/\/ We will likely find multiple instances of the message so only keep\n\t\/\/ track of unique instances.\n\tseen := make(map[string]bool)\n\n\t\/\/ For each of the indices the preamble exists at.\n\tfor _, qIdx := range indices {\n\t\t\/\/ Check that we're still within the first sample block. We'll catch\n\t\t\/\/ the message on the next sample block otherwise.\n\t\tif qIdx > d.DecCfg.BlockSize {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Packet is 1 bit per byte, pack to 8-bits per byte.\n\t\tfor pIdx := 0; pIdx < d.DecCfg.PacketSymbols; pIdx++ {\n\t\t\td.pkt[pIdx>>3] <<= 1\n\t\t\td.pkt[pIdx>>3] |= d.Quantized[qIdx+(pIdx*d.DecCfg.SymbolLength2)]\n\t\t}\n\n\t\t\/\/ Store the packet in the seen map and append to the packet list.\n\t\tpktStr := fmt.Sprintf(\"%02X\", d.pkt)\n\t\tif !seen[pktStr] {\n\t\t\tseen[pktStr] = true\n\t\t\tpkts = append(pkts, make([]byte, len(d.pkt)))\n\t\t\tcopy(pkts[len(pkts)-1], d.pkt)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc NextPowerOf2(v int) int {\n\treturn 1 << uint(math.Ceil(math.Log2(float64(v))))\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport (\n\t\"github.com\/BurntSushi\/toml\"\n\t\"io\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ CookieConfig struct used for the cookies configuration options\ntype CookieConfig struct {\n\tName string\n\tMaxAge int\n\tHashKey string\n\tBlockKey string\n}\n\n\/\/ ShopConfig struct used for the shop configuration options\ntype ShopConfig struct {\n\tEnabled bool\n}\n\n\/\/ PluginConfig struct used for the plugin listener\ntype PluginConfig struct {\n\tEnabled bool\n\tUsername string\n\tPassword string\n\tOrigin string\n}\n\n\/\/ RateLimiterConfig struct used for the rate limiting configuration options\ntype RateLimiterConfig struct {\n\tNumber int64\n\tTime time.Duration\n}\n\n\/\/ CacheConfig struct used for the cache configuration options\ntype CacheConfig struct {\n\tDefault time.Duration\n\tPurge time.Duration\n}\n\n\/\/ SSLConfig struct used for the ssl configuration options\ntype SSLConfig struct {\n\tEnabled bool\n\tAuto bool\n\tProxy bool\n\tCert string\n\tKey string\n}\n\n\/\/ MailConfig struct used for the mail configuration options\ntype MailConfig struct {\n\tEnabled bool\n\tServer string\n\tPort int\n\tUsername string\n\tPassword string\n}\n\n\/\/ PaygolConfig struct used for the paygol configuration options\ntype PaygolConfig struct {\n\tEnabled bool\n\tService int\n\tCurrency string\n\tLanguage string\n}\n\n\/\/ PayPalConfig struct used for the paypal configuration options\ntype PayPalConfig struct {\n\tEnabled bool\n\tPublicKey string\n\tSecretKey string\n\tCurrency string\n\tSandBox bool\n}\n\n\/\/ FortumoConfig struct used for the fortumo configuration options\ntype FortumoConfig struct {\n\tEnabled bool\n\tService string\n\tSecret string\n}\n\n\/\/ ContentSecurityPolicyType struct used for CSP fields\ntype ContentSecurityPolicyType struct {\n\tDefault []string\n\tSRC []string\n}\n\n\/\/ ContentSecurityPolicyConfig struct used for CSP headers\ntype ContentSecurityPolicyConfig struct {\n\tDefault []string\n\tEnabled bool\n\tFrame ContentSecurityPolicyType\n\tScript ContentSecurityPolicyType\n\tFont ContentSecurityPolicyType\n\tImage ContentSecurityPolicyType\n\tConnect ContentSecurityPolicyType\n\tStyle ContentSecurityPolicyType\n}\n\n\/\/ SecurityConfig struct used for the security of the application\ntype SecurityConfig struct {\n\tNonceEnabled bool\n\tXSS string\n\tSTS string\n\tFrame string\n\tContentType string\n\tReferrerPolicy string\n\tCrossDomainPolicy string\n\tCSP ContentSecurityPolicyConfig\n}\n\n\/\/ Configuration struct used for the main Castro config file TOML file\ntype Configuration struct {\n\tCheckUpdates bool\n\tTemplate string\n\tMode string\n\tPort int\n\tURL string\n\tDatapack string\n\tSecurity SecurityConfig\n\tPlugin PluginConfig\n\tMail MailConfig\n\tCaptcha CaptchaConfig\n\tSSL SSLConfig\n\tPayPal PayPalConfig\n\tPayGol PaygolConfig\n\tFortumo FortumoConfig\n\tShop ShopConfig\n\tCookies CookieConfig\n\tCache CacheConfig\n\tRateLimit RateLimiterConfig\n\tCustom map[string]interface{}\n}\n\n\/\/ ConfigurationFile struct used to store a configuration pointer\ntype ConfigurationFile struct {\n\trw sync.RWMutex\n\tConfiguration *Configuration\n}\n\nvar (\n\t\/\/ Config holds the main configuration file\n\tConfig *ConfigurationFile\n\n\t\/\/ VERSION current version of the build\n\tVERSION string\n\n\t\/\/ BUILD_DATE date of the build\n\tBUILD_DATE string\n)\n\nfunc init() {\n\tConfig = &ConfigurationFile{}\n\tConfig.Configuration = &Configuration{}\n}\n\n\/\/ LoadConfig loads the configuration file to the given interface pointer\nfunc LoadConfig(path string) error {\n\t\/\/ Lock mutex\n\tConfig.rw.Lock()\n\tdefer Config.rw.Unlock()\n\n\t\/\/ Decode the given file to the given interface\n\tif _, err := toml.DecodeFile(path, Config.Configuration); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ IsDev checks if castro is running on development mode\nfunc (c Configuration) IsDev() bool {\n\treturn c.Mode == \"dev\"\n}\n\n\/\/ IsLog checks if castro is running on log mode\nfunc (c Configuration) IsLog() bool {\n\treturn c.Mode == \"log\"\n}\n\n\/\/ CSP returns a valid Content-Security-Policy header value\nfunc (c Configuration) CSP() string {\n\t\/\/ Set default-src field\n\tbuff := getCSPField(\"default-src\", c.Security.CSP.Default, nil)\n\n\t\/\/ Set frame-src field\n\tbuff += getCSPField(\"frame-src\", c.Security.CSP.Frame.Default, c.Security.CSP.Frame.SRC)\n\n\t\/\/ Set script-src field\n\tbuff += getCSPField(\"script-src\", c.Security.CSP.Script.Default, c.Security.CSP.Script.SRC)\n\n\t\/\/ Set font-src field\n\tbuff += getCSPField(\"font-src\", c.Security.CSP.Font.Default, c.Security.CSP.Font.SRC)\n\n\t\/\/ Set connect-src field\n\tbuff += getCSPField(\"connect-src\", c.Security.CSP.Connect.Default, c.Security.CSP.Connect.SRC)\n\n\t\/\/ Set style-src field\n\tbuff += getCSPField(\"style-src\", c.Security.CSP.Style.Default, c.Security.CSP.Style.SRC)\n\n\t\/\/ Set img-src field\n\tbuff += getCSPField(\"img-src\", c.Security.CSP.Image.Default, c.Security.CSP.Image.SRC)\n\n\treturn buff\n}\n\nfunc getCSPField(name string, def []string, src []string) string {\n\t\/\/ Data holder\n\tbuff := name\n\n\t\/\/ Loop default values\n\tfor _, d := range def {\n\t\tbuff += \" '\" + d + \"' \"\n\t}\n\n\t\/\/ Loop src values\n\tfor _, s := range src {\n\t\tbuff += \" \" + s + \" \"\n\t}\n\n\treturn buff + \";\"\n}\n\n\/\/ IsSSL returns if the server is behind SSL\nfunc (c Configuration) IsSSL() bool {\n\tif c.SSL.Enabled {\n\t\treturn true\n\t}\n\tif c.SSL.Proxy {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/ EncodeConfig encodes the given io writer\nfunc EncodeConfig(configFile io.Writer, c *Configuration) error {\n\t\/\/ Lock mutex\n\tConfig.rw.Lock()\n\tdefer Config.rw.Unlock()\n\n\t\/\/ Encode the given writer with the given interface\n\treturn toml.NewEncoder(configFile).Encode(c)\n}\n\nfunc (c *ConfigurationFile) SetCustomValue(key string, v interface{}) {\n\t\/\/ Lock mutex\n\tc.rw.Lock()\n\tdefer c.rw.Unlock()\n\n\t\/\/ Set custom value\n\tc.Configuration.Custom[key] = v\n}\n<commit_msg>Fix lint<commit_after>package util\n\nimport (\n\t\"github.com\/BurntSushi\/toml\"\n\t\"io\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ CookieConfig struct used for the cookies configuration options\ntype CookieConfig struct {\n\tName string\n\tMaxAge int\n\tHashKey string\n\tBlockKey string\n}\n\n\/\/ ShopConfig struct used for the shop configuration options\ntype ShopConfig struct {\n\tEnabled bool\n}\n\n\/\/ PluginConfig struct used for the plugin listener\ntype PluginConfig struct {\n\tEnabled bool\n\tUsername string\n\tPassword string\n\tOrigin string\n}\n\n\/\/ RateLimiterConfig struct used for the rate limiting configuration options\ntype RateLimiterConfig struct {\n\tNumber int64\n\tTime time.Duration\n}\n\n\/\/ CacheConfig struct used for the cache configuration options\ntype CacheConfig struct {\n\tDefault time.Duration\n\tPurge time.Duration\n}\n\n\/\/ SSLConfig struct used for the ssl configuration options\ntype SSLConfig struct {\n\tEnabled bool\n\tAuto bool\n\tProxy bool\n\tCert string\n\tKey string\n}\n\n\/\/ MailConfig struct used for the mail configuration options\ntype MailConfig struct {\n\tEnabled bool\n\tServer string\n\tPort int\n\tUsername string\n\tPassword string\n}\n\n\/\/ PaygolConfig struct used for the paygol configuration options\ntype PaygolConfig struct {\n\tEnabled bool\n\tService int\n\tCurrency string\n\tLanguage string\n}\n\n\/\/ PayPalConfig struct used for the paypal configuration options\ntype PayPalConfig struct {\n\tEnabled bool\n\tPublicKey string\n\tSecretKey string\n\tCurrency string\n\tSandBox bool\n}\n\n\/\/ FortumoConfig struct used for the fortumo configuration options\ntype FortumoConfig struct {\n\tEnabled bool\n\tService string\n\tSecret string\n}\n\n\/\/ ContentSecurityPolicyType struct used for CSP fields\ntype ContentSecurityPolicyType struct {\n\tDefault []string\n\tSRC []string\n}\n\n\/\/ ContentSecurityPolicyConfig struct used for CSP headers\ntype ContentSecurityPolicyConfig struct {\n\tDefault []string\n\tEnabled bool\n\tFrame ContentSecurityPolicyType\n\tScript ContentSecurityPolicyType\n\tFont ContentSecurityPolicyType\n\tImage ContentSecurityPolicyType\n\tConnect ContentSecurityPolicyType\n\tStyle ContentSecurityPolicyType\n}\n\n\/\/ SecurityConfig struct used for the security of the application\ntype SecurityConfig struct {\n\tNonceEnabled bool\n\tXSS string\n\tSTS string\n\tFrame string\n\tContentType string\n\tReferrerPolicy string\n\tCrossDomainPolicy string\n\tCSP ContentSecurityPolicyConfig\n}\n\n\/\/ Configuration struct used for the main Castro config file TOML file\ntype Configuration struct {\n\tCheckUpdates bool\n\tTemplate string\n\tMode string\n\tPort int\n\tURL string\n\tDatapack string\n\tSecurity SecurityConfig\n\tPlugin PluginConfig\n\tMail MailConfig\n\tCaptcha CaptchaConfig\n\tSSL SSLConfig\n\tPayPal PayPalConfig\n\tPayGol PaygolConfig\n\tFortumo FortumoConfig\n\tShop ShopConfig\n\tCookies CookieConfig\n\tCache CacheConfig\n\tRateLimit RateLimiterConfig\n\tCustom map[string]interface{}\n}\n\n\/\/ ConfigurationFile struct used to store a configuration pointer\ntype ConfigurationFile struct {\n\trw sync.RWMutex\n\tConfiguration *Configuration\n}\n\nvar (\n\t\/\/ Config holds the main configuration file\n\tConfig *ConfigurationFile\n\n\t\/\/ VERSION current version of the build\n\tVERSION string\n\n\t\/\/ BUILD_DATE date of the build\n\tBUILD_DATE string\n)\n\nfunc init() {\n\tConfig = &ConfigurationFile{}\n\tConfig.Configuration = &Configuration{}\n}\n\n\/\/ LoadConfig loads the configuration file to the given interface pointer\nfunc LoadConfig(path string) error {\n\t\/\/ Lock mutex\n\tConfig.rw.Lock()\n\tdefer Config.rw.Unlock()\n\n\t\/\/ Decode the given file to the given interface\n\tif _, err := toml.DecodeFile(path, Config.Configuration); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ IsDev checks if castro is running on development mode\nfunc (c Configuration) IsDev() bool {\n\treturn c.Mode == \"dev\"\n}\n\n\/\/ IsLog checks if castro is running on log mode\nfunc (c Configuration) IsLog() bool {\n\treturn c.Mode == \"log\"\n}\n\n\/\/ CSP returns a valid Content-Security-Policy header value\nfunc (c Configuration) CSP() string {\n\t\/\/ Set default-src field\n\tbuff := getCSPField(\"default-src\", c.Security.CSP.Default, nil)\n\n\t\/\/ Set frame-src field\n\tbuff += getCSPField(\"frame-src\", c.Security.CSP.Frame.Default, c.Security.CSP.Frame.SRC)\n\n\t\/\/ Set script-src field\n\tbuff += getCSPField(\"script-src\", c.Security.CSP.Script.Default, c.Security.CSP.Script.SRC)\n\n\t\/\/ Set font-src field\n\tbuff += getCSPField(\"font-src\", c.Security.CSP.Font.Default, c.Security.CSP.Font.SRC)\n\n\t\/\/ Set connect-src field\n\tbuff += getCSPField(\"connect-src\", c.Security.CSP.Connect.Default, c.Security.CSP.Connect.SRC)\n\n\t\/\/ Set style-src field\n\tbuff += getCSPField(\"style-src\", c.Security.CSP.Style.Default, c.Security.CSP.Style.SRC)\n\n\t\/\/ Set img-src field\n\tbuff += getCSPField(\"img-src\", c.Security.CSP.Image.Default, c.Security.CSP.Image.SRC)\n\n\treturn buff\n}\n\nfunc getCSPField(name string, def []string, src []string) string {\n\t\/\/ Data holder\n\tbuff := name\n\n\t\/\/ Loop default values\n\tfor _, d := range def {\n\t\tbuff += \" '\" + d + \"' \"\n\t}\n\n\t\/\/ Loop src values\n\tfor _, s := range src {\n\t\tbuff += \" \" + s + \" \"\n\t}\n\n\treturn buff + \";\"\n}\n\n\/\/ IsSSL returns if the server is behind SSL\nfunc (c Configuration) IsSSL() bool {\n\tif c.SSL.Enabled {\n\t\treturn true\n\t}\n\tif c.SSL.Proxy {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/ EncodeConfig encodes the given io writer\nfunc EncodeConfig(configFile io.Writer, c *Configuration) error {\n\t\/\/ Lock mutex\n\tConfig.rw.Lock()\n\tdefer Config.rw.Unlock()\n\n\t\/\/ Encode the given writer with the given interface\n\treturn toml.NewEncoder(configFile).Encode(c)\n}\n\n\/\/ SetCustomValue sets a config custom value\nfunc (c *ConfigurationFile) SetCustomValue(key string, v interface{}) {\n\t\/\/ Lock mutex\n\tc.rw.Lock()\n\tdefer c.rw.Unlock()\n\n\t\/\/ Set custom value\n\tc.Configuration.Custom[key] = v\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Liam Stanley <me@liamstanley.io>. All rights reserved.\n\/\/ Use of this source code is governed by the MIT license that can be\n\/\/ found in the LICENSE file.\n\npackage girc\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ RunCallbacks manually runs callbacks for a given event.\nfunc (c *Client) RunCallbacks(event *Event) {\n\t\/\/ Log the event.\n\tc.log.Print(\"<-- \" + event.Raw())\n\n\t\/\/ Regular wildcard callbacks.\n\tc.Callbacks.exec(ALLEVENTS, c, event)\n\n\t\/\/ Then regular non-threaded callbacks.\n\tc.Callbacks.exec(event.Command, c, event)\n}\n\n\/\/ Callback is lower level implementation of a callback. See\n\/\/ Caller.AddHandler()\ntype Callback interface {\n\tExecute(*Client, Event)\n}\n\n\/\/ CallbackFunc is a type that represents the function necessary to\n\/\/ implement Callback.\ntype CallbackFunc func(c *Client, e Event)\n\n\/\/ Execute calls the CallbackFunc with the sender and irc message.\nfunc (f CallbackFunc) Execute(c *Client, e Event) {\n\tf(c, e)\n}\n\n\/\/ Caller manages internal and external (user facing) callbacks.\n\/\/\n\/\/ external\/internal keys are of structure:\n\/\/ map[CALLBACK_TYPE][COMMAND\/EVENT][CUID]Callback\ntype Caller struct {\n\t\/\/ mu is the mutex that should be used when accessing callbacks.\n\tmu sync.RWMutex\n\t\/\/ external is a map of user facing callbacks.\n\texternal map[string]map[string]map[string]Callback\n\t\/\/ internal is a map of internally used callbacks for the client.\n\tinternal map[string]map[string]map[string]Callback\n}\n\n\/\/ newCaller creates and initializes a new callback handler.\nfunc newCaller() *Caller {\n\tc := &Caller{}\n\n\tc.external = map[string]map[string]map[string]Callback{}\n\tc.external[\"routine\"] = map[string]map[string]Callback{}\n\tc.external[\"std\"] = map[string]map[string]Callback{}\n\tc.internal = map[string]map[string]map[string]Callback{}\n\tc.internal[\"routine\"] = map[string]map[string]Callback{}\n\tc.internal[\"std\"] = map[string]map[string]Callback{}\n\n\treturn c\n}\n\n\/\/ Len returns the total amount of user-entered callbacks.\nfunc (c *Caller) Len() int {\n\tvar total int\n\n\tc.mu.RLock()\n\tfor ctype := range c.external {\n\t\tfor cmd := range c.external[ctype] {\n\t\t\ttotal += len(c.external[ctype][cmd])\n\t\t}\n\t}\n\tc.mu.RUnlock()\n\n\treturn total\n}\n\nfunc (c *Caller) String() string {\n\tvar total int\n\tvar ctypes []string\n\n\tc.mu.RLock()\n\tfor ctype := range c.internal {\n\t\tctypes = append(ctypes, ctype)\n\t\tfor cmd := range c.internal[ctype] {\n\t\t\ttotal += len(c.internal[ctype][cmd])\n\t\t}\n\t}\n\tc.mu.RUnlock()\n\n\treturn fmt.Sprintf(\n\t\t\"<Caller() types[%d]:[%s] client:%d internal:%d>\",\n\t\tlen(c.external), strings.Join(ctypes, \",\"), c.Len(), len(c.internal),\n\t)\n}\n\nconst letterBytes = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\n\/\/ cuid generates a unique UID string for each callback for ease of removal.\nfunc (c *Caller) cuid(ctype, cmd string, n int) (cuid, uid string) {\n\tb := make([]byte, n)\n\n\tfor i := range b {\n\t\tb[i] = letterBytes[rand.Int63()%int64(len(letterBytes))]\n\t}\n\n\treturn ctype + \":\" + cmd + \":\" + string(b), string(b)\n}\n\n\/\/ cuidToID allows easy mapping between a generated cuid and the caller``\n\/\/ external\/internal callback maps.\nfunc (c *Caller) cuidToID(input string) (ctype, cmd, uid string) {\n\t\/\/ Ignore the errors because the strings will default to empty anyway.\n\t_, _ = fmt.Sscanf(input, \"%s:%s:%s\", &ctype, &cmd, &uid)\n\treturn ctype, cmd, uid\n}\n\n\/\/ exec executes all callbacks pertaining to specified event. Internal first,\n\/\/ then external.\n\/\/\n\/\/ Please note that there is no specific order\/priority for which the\n\/\/ callback types themselves or the callbacks are executed.\nfunc (c *Caller) exec(command string, client *Client, event *Event) {\n\tc.mu.RLock()\n\t\/\/ Execute internal callbacks first.\n\tfor callbackType := range c.internal {\n\t\tif _, ok := c.internal[callbackType][command]; !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor cuid := range c.internal[callbackType][command] {\n\t\t\tswitch callbackType {\n\t\t\tcase \"routine\":\n\t\t\t\tgo c.internal[callbackType][command][cuid].Execute(client, *event)\n\t\t\tdefault:\n\t\t\t\tc.internal[callbackType][command][cuid].Execute(client, *event)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Aaand then external callbacks.\n\tfor callbackType := range c.external {\n\t\tif _, ok := c.external[callbackType][command]; !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor cuid := range c.external[callbackType][command] {\n\t\t\tswitch callbackType {\n\t\t\tcase \"routine\":\n\t\t\t\tgo c.external[callbackType][command][cuid].Execute(client, *event)\n\t\t\tdefault:\n\t\t\t\tc.external[callbackType][command][cuid].Execute(client, *event)\n\t\t\t}\n\t\t}\n\t}\n\tc.mu.RUnlock()\n}\n\n\/\/ ClearAll clears all external callbacks currently setup within the client.\n\/\/ This ignores internal callbacks.\nfunc (c *Caller) ClearAll() {\n\tc.mu.Lock()\n\tc.external = map[string]map[string]map[string]Callback{}\n\tc.external[\"routine\"] = map[string]map[string]Callback{}\n\tc.external[\"std\"] = map[string]map[string]Callback{}\n\tc.mu.Unlock()\n}\n\n\/\/ Clear clears all of the callbacks for the given event.\n\/\/ This ignores internal callbacks.\nfunc (c *Caller) Clear(cmd string) {\n\tc.mu.Lock()\n\tfor ctype := range c.external {\n\t\tif _, ok := c.external[ctype][cmd]; ok {\n\t\t\tdelete(c.external[ctype], cmd)\n\t\t}\n\t}\n\tc.mu.Unlock()\n}\n\n\/\/ Remove removes the callback with cuid from the callback stack. success\n\/\/ indicates that it existed, and has been removed. If not success, it\n\/\/ wasn't a registered callback.\nfunc (c *Caller) Remove(cuid string) (success bool) {\n\tctype, cmd, uid := c.cuidToID(cuid)\n\tif len(ctype) == 0 || len(cmd) == 0 || len(uid) == 0 {\n\t\treturn false\n\t}\n\n\tc.mu.Lock()\n\t\/\/ Check if the callback type exists.\n\tif _, ok := c.external[ctype]; !ok {\n\t\tc.mu.Unlock()\n\t\treturn false\n\t}\n\n\t\/\/ Check if the irc command\/event has any callbacks on it.\n\tif _, ok := c.external[ctype][cmd]; !ok {\n\t\tc.mu.Unlock()\n\t\treturn false\n\t}\n\n\t\/\/ Check to see if it's actually a registered callback.\n\tif _, ok := c.external[ctype][cmd][cuid]; !ok {\n\t\tc.mu.Unlock()\n\t\treturn false\n\t}\n\n\tdelete(c.external[ctype][cmd], uid)\n\tc.mu.Unlock()\n\n\t\/\/ Assume success.\n\treturn true\n}\n\nfunc (c *Caller) register(internal bool, ctype, cmd string, callback Callback) (cuid string) {\n\tvar uid string\n\n\tc.mu.Lock()\n\tif internal {\n\t\tif _, ok := c.internal[ctype]; !ok {\n\t\t\tpanic(errors.New(\"callback type does not exist: \" + ctype))\n\t\t}\n\n\t\tif _, ok := c.internal[ctype][cmd]; !ok {\n\t\t\tc.internal[ctype][cmd] = map[string]Callback{}\n\t\t}\n\n\t\tcuid, uid = c.cuid(ctype, cmd, 20)\n\n\t\tc.internal[ctype][cmd][uid] = callback\n\t} else {\n\t\tif _, ok := c.external[ctype]; !ok {\n\t\t\tpanic(errors.New(\"callback type does not exist: \" + ctype))\n\t\t}\n\n\t\tif _, ok := c.external[ctype][cmd]; !ok {\n\t\t\tc.external[ctype][cmd] = map[string]Callback{}\n\t\t}\n\n\t\tcuid, uid = c.cuid(ctype, cmd, 20)\n\n\t\tc.external[ctype][cmd][uid] = callback\n\t}\n\tc.mu.Unlock()\n\n\treturn cuid\n}\n\n\/\/ AddHandler registers a callback (matching the Callback interface) for the\n\/\/ given event. cuid is the callback uid which can be used to remove the\n\/\/ callback with Caller.Remove().\nfunc (c *Caller) AddHandler(cmd string, callback Callback) (cuid string) {\n\treturn c.register(false, \"std\", cmd, callback)\n}\n\n\/\/ AddBgHandler registers a callback (matching the Callback interface) for\n\/\/ the given event and executes it in a go-routine. cuid is the callback uid\n\/\/ which can be used to remove the callback with Caller.Remove().\nfunc (c *Caller) AddBgHandler(cmd string, callback Callback) (cuid string) {\n\treturn c.register(false, \"routine\", cmd, callback)\n}\n\n\/\/ Add registers the callback function for the given event. cuid is the\n\/\/ callback uid which can be used to remove the callback with Caller.Remove().\nfunc (c *Caller) Add(cmd string, callback func(c *Client, e Event)) (cuid string) {\n\treturn c.register(false, \"std\", cmd, CallbackFunc(callback))\n}\n\n\/\/ AddBg registers the callback function for the given event and executes it\n\/\/ in a go-routine. cuid is the callback uid which can be used to remove the\n\/\/ callback with Caller.Remove().\nfunc (c *Caller) AddBg(cmd string, callback func(c *Client, e Event)) (cuid string) {\n\treturn c.register(false, \"routine\", cmd, CallbackFunc(callback))\n}\n<commit_msg>implement Caller.Count()<commit_after>\/\/ Copyright 2016 Liam Stanley <me@liamstanley.io>. All rights reserved.\n\/\/ Use of this source code is governed by the MIT license that can be\n\/\/ found in the LICENSE file.\n\npackage girc\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ RunCallbacks manually runs callbacks for a given event.\nfunc (c *Client) RunCallbacks(event *Event) {\n\t\/\/ Log the event.\n\tc.log.Print(\"<-- \" + event.Raw())\n\n\t\/\/ Regular wildcard callbacks.\n\tc.Callbacks.exec(ALLEVENTS, c, event)\n\n\t\/\/ Then regular non-threaded callbacks.\n\tc.Callbacks.exec(event.Command, c, event)\n}\n\n\/\/ Callback is lower level implementation of a callback. See\n\/\/ Caller.AddHandler()\ntype Callback interface {\n\tExecute(*Client, Event)\n}\n\n\/\/ CallbackFunc is a type that represents the function necessary to\n\/\/ implement Callback.\ntype CallbackFunc func(c *Client, e Event)\n\n\/\/ Execute calls the CallbackFunc with the sender and irc message.\nfunc (f CallbackFunc) Execute(c *Client, e Event) {\n\tf(c, e)\n}\n\n\/\/ Caller manages internal and external (user facing) callbacks.\n\/\/\n\/\/ external\/internal keys are of structure:\n\/\/ map[CALLBACK_TYPE][COMMAND\/EVENT][CUID]Callback\ntype Caller struct {\n\t\/\/ mu is the mutex that should be used when accessing callbacks.\n\tmu sync.RWMutex\n\t\/\/ external is a map of user facing callbacks.\n\texternal map[string]map[string]map[string]Callback\n\t\/\/ internal is a map of internally used callbacks for the client.\n\tinternal map[string]map[string]map[string]Callback\n}\n\n\/\/ newCaller creates and initializes a new callback handler.\nfunc newCaller() *Caller {\n\tc := &Caller{}\n\n\tc.external = map[string]map[string]map[string]Callback{}\n\tc.external[\"routine\"] = map[string]map[string]Callback{}\n\tc.external[\"std\"] = map[string]map[string]Callback{}\n\tc.internal = map[string]map[string]map[string]Callback{}\n\tc.internal[\"routine\"] = map[string]map[string]Callback{}\n\tc.internal[\"std\"] = map[string]map[string]Callback{}\n\n\treturn c\n}\n\n\/\/ Len returns the total amount of user-entered registered callbacks.\nfunc (c *Caller) Len() int {\n\tvar total int\n\n\tc.mu.RLock()\n\tfor ctype := range c.external {\n\t\tfor command := range c.external[ctype] {\n\t\t\ttotal += len(c.external[ctype][command])\n\t\t}\n\t}\n\tc.mu.RUnlock()\n\n\treturn total\n}\n\n\/\/ Count is much like Caller.Len(), however it counts the number of\n\/\/ registered callbacks for a given command.\nfunc (c *Caller) Count(cmd string) int {\n\tvar total int\n\n\tc.mu.RLock()\n\tfor ctype := range c.external {\n\t\tfor command := range c.external[ctype] {\n\t\t\tif command == cmd {\n\t\t\t\ttotal += len(c.external[ctype][command])\n\t\t\t}\n\t\t}\n\t}\n\tc.mu.RUnlock()\n\n\treturn total\n}\n\nfunc (c *Caller) String() string {\n\tvar total int\n\tvar ctypes []string\n\n\tc.mu.RLock()\n\tfor ctype := range c.internal {\n\t\tctypes = append(ctypes, ctype)\n\t\tfor cmd := range c.internal[ctype] {\n\t\t\ttotal += len(c.internal[ctype][cmd])\n\t\t}\n\t}\n\tc.mu.RUnlock()\n\n\treturn fmt.Sprintf(\n\t\t\"<Caller() types[%d]:[%s] client:%d internal:%d>\",\n\t\tlen(c.external), strings.Join(ctypes, \",\"), c.Len(), len(c.internal),\n\t)\n}\n\nconst letterBytes = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\n\/\/ cuid generates a unique UID string for each callback for ease of removal.\nfunc (c *Caller) cuid(ctype, cmd string, n int) (cuid, uid string) {\n\tb := make([]byte, n)\n\n\tfor i := range b {\n\t\tb[i] = letterBytes[rand.Int63()%int64(len(letterBytes))]\n\t}\n\n\treturn ctype + \":\" + cmd + \":\" + string(b), string(b)\n}\n\n\/\/ cuidToID allows easy mapping between a generated cuid and the caller``\n\/\/ external\/internal callback maps.\nfunc (c *Caller) cuidToID(input string) (ctype, cmd, uid string) {\n\t\/\/ Ignore the errors because the strings will default to empty anyway.\n\t_, _ = fmt.Sscanf(input, \"%s:%s:%s\", &ctype, &cmd, &uid)\n\treturn ctype, cmd, uid\n}\n\n\/\/ exec executes all callbacks pertaining to specified event. Internal first,\n\/\/ then external.\n\/\/\n\/\/ Please note that there is no specific order\/priority for which the\n\/\/ callback types themselves or the callbacks are executed.\nfunc (c *Caller) exec(command string, client *Client, event *Event) {\n\tc.mu.RLock()\n\t\/\/ Execute internal callbacks first.\n\tfor callbackType := range c.internal {\n\t\tif _, ok := c.internal[callbackType][command]; !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor cuid := range c.internal[callbackType][command] {\n\t\t\tswitch callbackType {\n\t\t\tcase \"routine\":\n\t\t\t\tgo c.internal[callbackType][command][cuid].Execute(client, *event)\n\t\t\tdefault:\n\t\t\t\tc.internal[callbackType][command][cuid].Execute(client, *event)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Aaand then external callbacks.\n\tfor callbackType := range c.external {\n\t\tif _, ok := c.external[callbackType][command]; !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor cuid := range c.external[callbackType][command] {\n\t\t\tswitch callbackType {\n\t\t\tcase \"routine\":\n\t\t\t\tgo c.external[callbackType][command][cuid].Execute(client, *event)\n\t\t\tdefault:\n\t\t\t\tc.external[callbackType][command][cuid].Execute(client, *event)\n\t\t\t}\n\t\t}\n\t}\n\tc.mu.RUnlock()\n}\n\n\/\/ ClearAll clears all external callbacks currently setup within the client.\n\/\/ This ignores internal callbacks.\nfunc (c *Caller) ClearAll() {\n\tc.mu.Lock()\n\tc.external = map[string]map[string]map[string]Callback{}\n\tc.external[\"routine\"] = map[string]map[string]Callback{}\n\tc.external[\"std\"] = map[string]map[string]Callback{}\n\tc.mu.Unlock()\n}\n\n\/\/ Clear clears all of the callbacks for the given event.\n\/\/ This ignores internal callbacks.\nfunc (c *Caller) Clear(cmd string) {\n\tc.mu.Lock()\n\tfor ctype := range c.external {\n\t\tif _, ok := c.external[ctype][cmd]; ok {\n\t\t\tdelete(c.external[ctype], cmd)\n\t\t}\n\t}\n\tc.mu.Unlock()\n}\n\n\/\/ Remove removes the callback with cuid from the callback stack. success\n\/\/ indicates that it existed, and has been removed. If not success, it\n\/\/ wasn't a registered callback.\nfunc (c *Caller) Remove(cuid string) (success bool) {\n\tctype, cmd, uid := c.cuidToID(cuid)\n\tif len(ctype) == 0 || len(cmd) == 0 || len(uid) == 0 {\n\t\treturn false\n\t}\n\n\tc.mu.Lock()\n\t\/\/ Check if the callback type exists.\n\tif _, ok := c.external[ctype]; !ok {\n\t\tc.mu.Unlock()\n\t\treturn false\n\t}\n\n\t\/\/ Check if the irc command\/event has any callbacks on it.\n\tif _, ok := c.external[ctype][cmd]; !ok {\n\t\tc.mu.Unlock()\n\t\treturn false\n\t}\n\n\t\/\/ Check to see if it's actually a registered callback.\n\tif _, ok := c.external[ctype][cmd][cuid]; !ok {\n\t\tc.mu.Unlock()\n\t\treturn false\n\t}\n\n\tdelete(c.external[ctype][cmd], uid)\n\tc.mu.Unlock()\n\n\t\/\/ Assume success.\n\treturn true\n}\n\nfunc (c *Caller) register(internal bool, ctype, cmd string, callback Callback) (cuid string) {\n\tvar uid string\n\n\tc.mu.Lock()\n\tif internal {\n\t\tif _, ok := c.internal[ctype]; !ok {\n\t\t\tpanic(errors.New(\"callback type does not exist: \" + ctype))\n\t\t}\n\n\t\tif _, ok := c.internal[ctype][cmd]; !ok {\n\t\t\tc.internal[ctype][cmd] = map[string]Callback{}\n\t\t}\n\n\t\tcuid, uid = c.cuid(ctype, cmd, 20)\n\n\t\tc.internal[ctype][cmd][uid] = callback\n\t} else {\n\t\tif _, ok := c.external[ctype]; !ok {\n\t\t\tpanic(errors.New(\"callback type does not exist: \" + ctype))\n\t\t}\n\n\t\tif _, ok := c.external[ctype][cmd]; !ok {\n\t\t\tc.external[ctype][cmd] = map[string]Callback{}\n\t\t}\n\n\t\tcuid, uid = c.cuid(ctype, cmd, 20)\n\n\t\tc.external[ctype][cmd][uid] = callback\n\t}\n\tc.mu.Unlock()\n\n\treturn cuid\n}\n\n\/\/ AddHandler registers a callback (matching the Callback interface) for the\n\/\/ given event. cuid is the callback uid which can be used to remove the\n\/\/ callback with Caller.Remove().\nfunc (c *Caller) AddHandler(cmd string, callback Callback) (cuid string) {\n\treturn c.register(false, \"std\", cmd, callback)\n}\n\n\/\/ AddBgHandler registers a callback (matching the Callback interface) for\n\/\/ the given event and executes it in a go-routine. cuid is the callback uid\n\/\/ which can be used to remove the callback with Caller.Remove().\nfunc (c *Caller) AddBgHandler(cmd string, callback Callback) (cuid string) {\n\treturn c.register(false, \"routine\", cmd, callback)\n}\n\n\/\/ Add registers the callback function for the given event. cuid is the\n\/\/ callback uid which can be used to remove the callback with Caller.Remove().\nfunc (c *Caller) Add(cmd string, callback func(c *Client, e Event)) (cuid string) {\n\treturn c.register(false, \"std\", cmd, CallbackFunc(callback))\n}\n\n\/\/ AddBg registers the callback function for the given event and executes it\n\/\/ in a go-routine. cuid is the callback uid which can be used to remove the\n\/\/ callback with Caller.Remove().\nfunc (c *Caller) AddBg(cmd string, callback func(c *Client, e Event)) (cuid string) {\n\treturn c.register(false, \"routine\", cmd, CallbackFunc(callback))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copied with small adaptations from the reflect package in the\n\/\/ Go source tree.\n\n\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The deepdiff package implements a version of reflect.DeepEquals that\n\/\/ also returns an error message describing the first difference found.\npackage deepdiff\n\nimport (\n\t\"reflect\"\n\t\"fmt\"\n\t\"unsafe\"\n)\n\n\/\/ During deepValueEqual, must keep track of checks that are\n\/\/ in progress. The comparison algorithm assumes that all\n\/\/ checks in progress are true when it reencounters them.\n\/\/ Visited comparisons are stored in a map indexed by visit.\ntype visit struct {\n\ta1 uintptr\n\ta2 uintptr\n\ttyp reflect.Type\n}\n\ntype MismatchError struct {\n\tv1, v2 reflect.Value\n\tPath string\n\tHow string\n}\n\nfunc (err *MismatchError) Error() string {\n\treturn fmt.Sprintf(\"mismatch at %s: %s; obtained %#v; expected %#v\", err.Path, err.How, interfaceOf(err.v1), interfaceOf(err.v2))\n}\n\n\/\/ Tests for deep equality using reflected types. The map argument tracks\n\/\/ comparisons that have already been seen, which allows short circuiting on\n\/\/ recursive types.\nfunc deepValueEqual(path string, v1, v2 reflect.Value, visited map[visit]bool, depth int) (ok bool, err error) {\n\terrorf := func(f string, a ...interface{}) error {\n\t\treturn &MismatchError{\n\t\t\tv1: v1,\n\t\t\tv2: v2,\n\t\t\tPath: path,\n\t\t\tHow: fmt.Sprintf(f, a...),\n\t\t}\n\t}\n\tif !v1.IsValid() || !v2.IsValid() {\n\t\tif v1.IsValid() == v2.IsValid() {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, errorf(\"validity mismatch\")\n\t}\n\tif v1.Type() != v2.Type() {\n\t\treturn false, errorf(\"type mismatch %s vs %s\", v1.Type(), v2.Type())\n\t}\n\n\t\/\/ if depth > 10 { panic(\"deepValueEqual\") }\t\/\/ for debugging\n\thard := func(k reflect.Kind) bool {\n\t\tswitch k {\n\t\tcase reflect.Array, reflect.Map, reflect.Slice, reflect.Struct:\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tif v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) {\n\t\taddr1 := v1.UnsafeAddr()\n\t\taddr2 := v2.UnsafeAddr()\n\t\tif addr1 > addr2 {\n\t\t\t\/\/ Canonicalize order to reduce number of entries in visited.\n\t\t\taddr1, addr2 = addr2, addr1\n\t\t}\n\n\t\t\/\/ Short circuit if references are identical ...\n\t\tif addr1 == addr2 {\n\t\t\treturn true, nil\n\t\t}\n\n\t\t\/\/ ... or already seen\n\t\ttyp := v1.Type()\n\t\tv := visit{addr1, addr2, typ}\n\t\tif visited[v] {\n\t\t\treturn true, nil\n\t\t}\n\n\t\t\/\/ Remember for later.\n\t\tvisited[v] = true\n\t}\n\n\tswitch v1.Kind() {\n\tcase reflect.Array:\n\t\tif v1.Len() != v2.Len() {\n\t\t\t\/\/ can't happen!\n\t\t\treturn false, errorf(\"length mismatch, %d vs %d\", v1.Len(), v2.Len())\n\t\t}\n\t\tfor i := 0; i < v1.Len(); i++ {\n\t\t\tif ok, err := deepValueEqual(\n\t\t\t\tfmt.Sprintf(\"%s[%d]\", path, i),\n\t\t\t\tv1.Index(i), v2.Index(i), visited, depth+1); !ok {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Slice:\n\t\t\/\/ We treat a nil slice the same as an empty slice.\n\t\tif v1.Len() != v2.Len() {\n\t\t\treturn false, errorf(\"length mismatch, %d vs %d\", v1.Len(), v2.Len())\n\t\t}\n\t\tif v1.Pointer() == v2.Pointer() {\n\t\t\treturn true, nil\n\t\t}\n\t\tfor i := 0; i < v1.Len(); i++ {\n\t\t\tif ok, err := deepValueEqual(\n\t\t\t\tfmt.Sprintf(\"%s[%d]\", path, i),\n\t\t\t\tv1.Index(i), v2.Index(i), visited, depth+1); !ok {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Interface:\n\t\tif v1.IsNil() || v2.IsNil() {\n\t\t\tif v1.IsNil() != v2.IsNil() {\n\t\t\t\treturn false, fmt.Errorf(\"nil vs non-nil interface mismatch\")\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}\n\t\treturn deepValueEqual(path, v1.Elem(), v2.Elem(), visited, depth+1)\n\tcase reflect.Ptr:\n\t\treturn deepValueEqual(\"(*\" + path + \")\", v1.Elem(), v2.Elem(), visited, depth+1)\n\tcase reflect.Struct:\n\t\tfor i, n := 0, v1.NumField(); i < n; i++ {\n\t\t\tpath := path + \".\" + v1.Type().Field(i).Name\n\t\t\tif ok, err := deepValueEqual(path, v1.Field(i), v2.Field(i), visited, depth+1); !ok {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Map:\n\t\tif v1.IsNil() != v2.IsNil() {\n\t\t\treturn false, errorf(\"nil vs non-nil mismatch\")\n\t\t}\n\t\tif v1.Len() != v2.Len() {\n\t\t\treturn false, errorf(\"length mismatch, %d vs %d\", v1.Len(), v2.Len())\n\t\t}\n\t\tif v1.Pointer() == v2.Pointer() {\n\t\t\treturn true, nil\n\t\t}\n\t\tfor _, k := range v1.MapKeys() {\n\t\t\tvar p string\n\t\t\tif k.CanInterface() {\n\t\t\t\tp = path + \"[\" + fmt.Sprintf(\"%#v\", k.Interface())+ \"]\"\n\t\t\t} else {\n\t\t\t\tp = path + \"[someKey]\"\n\t\t\t}\n\t\t\tif ok, err := deepValueEqual(p, v1.MapIndex(k), v2.MapIndex(k), visited, depth+1); !ok {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Func:\n\t\tif v1.IsNil() && v2.IsNil() {\n\t\t\treturn true, nil\n\t\t}\n\t\t\/\/ Can't do better than this:\n\t\treturn false, errorf(\"non-nil functions\")\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\tif v1.Int() != v2.Int() {\n\t\t\treturn false, errorf(\"unequal\")\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Uint, reflect.Uintptr, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\tif v1.Uint() != v2.Uint() {\n\t\t\treturn false, errorf(\"unequal\")\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Float32, reflect.Float64:\n\t\tif v1.Float() != v2.Float() {\n\t\t\treturn false, errorf(\"unequal\")\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Complex64, reflect.Complex128:\n\t\tif v1.Complex() != v2.Complex() {\n\t\t\treturn false, errorf(\"unequal\")\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Bool:\n\t\tif v1.Bool() != v2.Bool() {\n\t\t\treturn false, errorf(\"unequal\")\n\t\t}\n\t\treturn true, nil\n\tcase reflect.String:\n\t\tif v1.String() != v2.String() {\n\t\t\treturn false, errorf(\"unequal\")\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Chan, reflect.UnsafePointer:\n\t\tif v1.Pointer() != v2.Pointer() {\n\t\t\treturn false, errorf(\"unequal\")\n\t\t}\n\t\treturn true, nil\n\tdefault:\n\t\tpanic(\"unexpected type \" + v1.Type().String())\n\t}\n}\n\nfunc DeepEqual(a1, a2 interface{}) bool {\n\tok, _ := DeepDiff(a1, a2)\n\treturn ok\n}\n\n\/\/ DeepDiff tests for deep equality. It uses normal == equality where\n\/\/ possible but will scan elements of arrays, slices, maps, and fields of\n\/\/ structs. In maps, keys are compared with == but elements use deep\n\/\/ equality. DeepEqual correctly handles recursive types. Functions are equal\n\/\/ only if they are both nil.\n\/\/ DeepEqual differs from reflect.DeepEqual in that an empty\n\/\/ slice is equal to a nil slice, and an empty map is equal to a nil map.\n\/\/ If the two values compare unequal, the resulting error\n\/\/ holds the first difference encountered.\nfunc DeepDiff(a1, a2 interface{}) (bool, error) {\n\terrorf := func(f string, a ...interface{}) error {\n\t\treturn &MismatchError{\n\t\t\tv1: reflect.ValueOf(a1),\n\t\t\tv2: reflect.ValueOf(a2),\n\t\t\tPath: \"$\",\n\t\t\tHow: fmt.Sprintf(f, a...),\n\t\t}\n\t}\n\tif a1 == nil || a2 == nil {\n\t\treturn a1 == a2, errorf(\"nil vs non-nil mismatch\")\n\t}\n\tv1 := reflect.ValueOf(a1)\n\tv2 := reflect.ValueOf(a2)\n\tif v1.Type() != v2.Type() {\n\t\treturn false, errorf(\"type mismatch %s vs %s\", v1.Type(), v2.Type())\n\t}\n\treturn deepValueEqual(\"$\", v1, v2, make(map[visit]bool), 0)\n}\n\ntype flag uintptr\n\n\/\/ copied from reflect\/value.go\nconst (\n\tflagRO flag = 1 << iota\n)\n\n\/\/ interfaceOf returns v.Interface() even if v.CanInterface() == false.\n\/\/ This enables us to call fmt.Printf on a value even if it's\n\/\/ derived from inside an unexported field.\nfunc interfaceOf(v reflect.Value) interface{} {\n\tif !v.IsValid() {\n\t\treturn nil\n\t}\n\treturn bypass(v).Interface()\n}\n\nvar flagValOffset = func() uintptr {\n\tfield, ok := reflect.TypeOf(reflect.Value{}).FieldByName(\"flag\")\n\tif !ok {\n\t\tpanic(\"reflect.Value has no flag field\")\n\t}\n\treturn field.Offset\n}()\n\n\/\/ Sanity checks against future reflect package changes.\nfunc init() {\n\tfield, ok := reflect.TypeOf(reflect.Value{}).FieldByName(\"flag\")\n\tif !ok {\n\t\tpanic(\"reflect.Value has no flag field\")\n\t}\n\tif field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {\n\t\tpanic(\"reflect.Value flag field has changed kind\")\n\t}\n\tvar t struct {\n\t\ta int\n\t\tA int\n\t}\n\tvA := reflect.ValueOf(t).FieldByName(\"A\")\n\tva := reflect.ValueOf(t).FieldByName(\"a\")\n\tflagA := *flagField(&vA)\n\tflaga := *flagField(&va)\n\tif flagA & flagRO != 0 || flaga & flagRO == 0 {\n\t\tpanic(\"reflect.Value read-only flag has changed value\")\n\t}\n}\n\nfunc flagField(v *reflect.Value) *flag {\n\treturn (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))\n}\n\nfunc bypass(v reflect.Value) reflect.Value {\n\tif !v.IsValid() || v.CanInterface() {\n\t\treturn v\n\t}\n\t*flagField(&v) &^= flagRO\n\treturn v\n}\n<commit_msg>deepdiff: fixes<commit_after>\/\/ Copied with small adaptations from the reflect package in the\n\/\/ Go source tree.\n\n\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The deepdiff package implements a version of reflect.DeepEquals that\n\/\/ also returns an error message describing the first difference found.\npackage deepdiff\n\nimport (\n\t\"reflect\"\n\t\"fmt\"\n\t\"unsafe\"\n)\n\n\/\/ During deepValueEqual, must keep track of checks that are\n\/\/ in progress. The comparison algorithm assumes that all\n\/\/ checks in progress are true when it reencounters them.\n\/\/ Visited comparisons are stored in a map indexed by visit.\ntype visit struct {\n\ta1 uintptr\n\ta2 uintptr\n\ttyp reflect.Type\n}\n\ntype mismatchError struct {\n\tv1, v2 reflect.Value\n\tpath string\n\thow string\n}\n\nfunc (err *mismatchError) Error() string {\n\tpath := err.path\n\tif path == \"\" {\n\t\tpath = \"top level\"\n\t}\n\treturn fmt.Sprintf(\"mismatch at %s: %s; obtained %#v; expected %#v\", err.path, err.how, interfaceOf(err.v1), interfaceOf(err.v2))\n}\n\n\/\/ Tests for deep equality using reflected types. The map argument tracks\n\/\/ comparisons that have already been seen, which allows short circuiting on\n\/\/ recursive types.\nfunc deepValueEqual(path string, v1, v2 reflect.Value, visited map[visit]bool, depth int) (ok bool, err error) {\n\terrorf := func(f string, a ...interface{}) error {\n\t\treturn &mismatchError{\n\t\t\tv1: v1,\n\t\t\tv2: v2,\n\t\t\tpath: path,\n\t\t\thow: fmt.Sprintf(f, a...),\n\t\t}\n\t}\n\tif !v1.IsValid() || !v2.IsValid() {\n\t\tif v1.IsValid() == v2.IsValid() {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, errorf(\"validity mismatch\")\n\t}\n\tif v1.Type() != v2.Type() {\n\t\treturn false, errorf(\"type mismatch %s vs %s\", v1.Type(), v2.Type())\n\t}\n\n\t\/\/ if depth > 10 { panic(\"deepValueEqual\") }\t\/\/ for debugging\n\thard := func(k reflect.Kind) bool {\n\t\tswitch k {\n\t\tcase reflect.Array, reflect.Map, reflect.Slice, reflect.Struct:\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tif v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) {\n\t\taddr1 := v1.UnsafeAddr()\n\t\taddr2 := v2.UnsafeAddr()\n\t\tif addr1 > addr2 {\n\t\t\t\/\/ Canonicalize order to reduce number of entries in visited.\n\t\t\taddr1, addr2 = addr2, addr1\n\t\t}\n\n\t\t\/\/ Short circuit if references are identical ...\n\t\tif addr1 == addr2 {\n\t\t\treturn true, nil\n\t\t}\n\n\t\t\/\/ ... or already seen\n\t\ttyp := v1.Type()\n\t\tv := visit{addr1, addr2, typ}\n\t\tif visited[v] {\n\t\t\treturn true, nil\n\t\t}\n\n\t\t\/\/ Remember for later.\n\t\tvisited[v] = true\n\t}\n\n\tswitch v1.Kind() {\n\tcase reflect.Array:\n\t\tif v1.Len() != v2.Len() {\n\t\t\t\/\/ can't happen!\n\t\t\treturn false, errorf(\"length mismatch, %d vs %d\", v1.Len(), v2.Len())\n\t\t}\n\t\tfor i := 0; i < v1.Len(); i++ {\n\t\t\tif ok, err := deepValueEqual(\n\t\t\t\tfmt.Sprintf(\"%s[%d]\", path, i),\n\t\t\t\tv1.Index(i), v2.Index(i), visited, depth+1); !ok {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Slice:\n\t\t\/\/ We treat a nil slice the same as an empty slice.\n\t\tif v1.Len() != v2.Len() {\n\t\t\treturn false, errorf(\"length mismatch, %d vs %d\", v1.Len(), v2.Len())\n\t\t}\n\t\tif v1.Pointer() == v2.Pointer() {\n\t\t\treturn true, nil\n\t\t}\n\t\tfor i := 0; i < v1.Len(); i++ {\n\t\t\tif ok, err := deepValueEqual(\n\t\t\t\tfmt.Sprintf(\"%s[%d]\", path, i),\n\t\t\t\tv1.Index(i), v2.Index(i), visited, depth+1); !ok {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Interface:\n\t\tif v1.IsNil() || v2.IsNil() {\n\t\t\tif v1.IsNil() != v2.IsNil() {\n\t\t\t\treturn false, fmt.Errorf(\"nil vs non-nil interface mismatch\")\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}\n\t\treturn deepValueEqual(path, v1.Elem(), v2.Elem(), visited, depth+1)\n\tcase reflect.Ptr:\n\t\treturn deepValueEqual(\"(*\" + path + \")\", v1.Elem(), v2.Elem(), visited, depth+1)\n\tcase reflect.Struct:\n\t\tfor i, n := 0, v1.NumField(); i < n; i++ {\n\t\t\tpath := path + \".\" + v1.Type().Field(i).Name\n\t\t\tif ok, err := deepValueEqual(path, v1.Field(i), v2.Field(i), visited, depth+1); !ok {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Map:\n\t\tif v1.IsNil() != v2.IsNil() {\n\t\t\treturn false, errorf(\"nil vs non-nil mismatch\")\n\t\t}\n\t\tif v1.Len() != v2.Len() {\n\t\t\treturn false, errorf(\"length mismatch, %d vs %d\", v1.Len(), v2.Len())\n\t\t}\n\t\tif v1.Pointer() == v2.Pointer() {\n\t\t\treturn true, nil\n\t\t}\n\t\tfor _, k := range v1.MapKeys() {\n\t\t\tvar p string\n\t\t\tif k.CanInterface() {\n\t\t\t\tp = path + \"[\" + fmt.Sprintf(\"%#v\", k.Interface())+ \"]\"\n\t\t\t} else {\n\t\t\t\tp = path + \"[someKey]\"\n\t\t\t}\n\t\t\tif ok, err := deepValueEqual(p, v1.MapIndex(k), v2.MapIndex(k), visited, depth+1); !ok {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Func:\n\t\tif v1.IsNil() && v2.IsNil() {\n\t\t\treturn true, nil\n\t\t}\n\t\t\/\/ Can't do better than this:\n\t\treturn false, errorf(\"non-nil functions\")\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\tif v1.Int() != v2.Int() {\n\t\t\treturn false, errorf(\"unequal\")\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Uint, reflect.Uintptr, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\tif v1.Uint() != v2.Uint() {\n\t\t\treturn false, errorf(\"unequal\")\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Float32, reflect.Float64:\n\t\tif v1.Float() != v2.Float() {\n\t\t\treturn false, errorf(\"unequal\")\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Complex64, reflect.Complex128:\n\t\tif v1.Complex() != v2.Complex() {\n\t\t\treturn false, errorf(\"unequal\")\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Bool:\n\t\tif v1.Bool() != v2.Bool() {\n\t\t\treturn false, errorf(\"unequal\")\n\t\t}\n\t\treturn true, nil\n\tcase reflect.String:\n\t\tif v1.String() != v2.String() {\n\t\t\treturn false, errorf(\"unequal\")\n\t\t}\n\t\treturn true, nil\n\tcase reflect.Chan, reflect.UnsafePointer:\n\t\tif v1.Pointer() != v2.Pointer() {\n\t\t\treturn false, errorf(\"unequal\")\n\t\t}\n\t\treturn true, nil\n\tdefault:\n\t\tpanic(\"unexpected type \" + v1.Type().String())\n\t}\n}\n\nfunc DeepEqual(a1, a2 interface{}) bool {\n\tok, _ := DeepDiff(a1, a2)\n\treturn ok\n}\n\n\/\/ DeepDiff tests for deep equality. It uses normal == equality where\n\/\/ possible but will scan elements of arrays, slices, maps, and fields of\n\/\/ structs. In maps, keys are compared with == but elements use deep\n\/\/ equality. DeepEqual correctly handles recursive types. Functions are equal\n\/\/ only if they are both nil.\n\/\/ DeepEqual differs from reflect.DeepEqual in that an empty\n\/\/ slice is equal to a nil slice, and an empty map is equal to a nil map.\n\/\/ If the two values compare unequal, the resulting error\n\/\/ holds the first difference encountered.\nfunc DeepDiff(a1, a2 interface{}) (bool, error) {\n\terrorf := func(f string, a ...interface{}) error {\n\t\treturn &mismatchError{\n\t\t\tv1: reflect.ValueOf(a1),\n\t\t\tv2: reflect.ValueOf(a2),\n\t\t\tpath: \"\",\n\t\t\thow: fmt.Sprintf(f, a...),\n\t\t}\n\t}\n\tif a1 == nil || a2 == nil {\n\t\treturn a1 == a2, errorf(\"nil vs non-nil mismatch\")\n\t}\n\tv1 := reflect.ValueOf(a1)\n\tv2 := reflect.ValueOf(a2)\n\tif v1.Type() != v2.Type() {\n\t\treturn false, errorf(\"type mismatch %s vs %s\", v1.Type(), v2.Type())\n\t}\n\treturn deepValueEqual(\"\", v1, v2, make(map[visit]bool), 0)\n}\n\n\/\/ interfaceOf returns v.Interface() even if v.CanInterface() == false.\n\/\/ This enables us to call fmt.Printf on a value even if it's derived\n\/\/ from inside an unexported field.\nfunc interfaceOf(v reflect.Value) interface{} {\n\tif !v.IsValid() {\n\t\treturn nil\n\t}\n\treturn bypassCanInterface(v).Interface()\n}\n\ntype flag uintptr\n\n\/\/ copied from reflect\/value.go\nconst (\n\tflagRO flag = 1 << iota\n)\n\nvar flagValOffset = func() uintptr {\n\tfield, ok := reflect.TypeOf(reflect.Value{}).FieldByName(\"flag\")\n\tif !ok {\n\t\tpanic(\"reflect.Value has no flag field\")\n\t}\n\treturn field.Offset\n}()\n\nfunc flagField(v *reflect.Value) *flag {\n\treturn (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))\n}\n\n\/\/ bypassCanInterface returns a version of v that\n\/\/ bypasses the CanInterface check.\nfunc bypassCanInterface(v reflect.Value) reflect.Value {\n\tif !v.IsValid() || v.CanInterface() {\n\t\treturn v\n\t}\n\t*flagField(&v) &^= flagRO\n\treturn v\n}\n\n\/\/ Sanity checks against future reflect package changes\n\/\/ to the type or semantics of the Value.flag field.\nfunc init() {\n\tfield, ok := reflect.TypeOf(reflect.Value{}).FieldByName(\"flag\")\n\tif !ok {\n\t\tpanic(\"reflect.Value has no flag field\")\n\t}\n\tif field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {\n\t\tpanic(\"reflect.Value flag field has changed kind\")\n\t}\n\tvar t struct {\n\t\ta int\n\t\tA int\n\t}\n\tvA := reflect.ValueOf(t).FieldByName(\"A\")\n\tva := reflect.ValueOf(t).FieldByName(\"a\")\n\tflagA := *flagField(&vA)\n\tflaga := *flagField(&va)\n\tif flagA & flagRO != 0 || flaga & flagRO == 0 {\n\t\tpanic(\"reflect.Value read-only flag has changed value\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package annotations\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"regexp\"\n\n\t\"github.com\/Financial-Times\/neo-model-utils-go\/mapper\"\n\t\"github.com\/Financial-Times\/neo-utils-go\/neoutils\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/jmcvetta\/neoism\"\n)\n\nvar uuidExtractRegex = regexp.MustCompile(\".*\/([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})$\")\n\n\/\/ Service interface. Compatible with the baserwftapp service EXCEPT for\n\/\/ 1) the Write function, which has signature Write(thing interface{}) error...\n\/\/ 2) the DecodeJson function, which has signature DecodeJSON(*json.Decoder) (thing interface{}, identity string, err error)\n\/\/ The problem is that we have a list of things, and the uuid is for a related OTHER thing\n\/\/ TODO - move to implement a shared defined Service interface?\ntype Service interface {\n\tWrite(contentUUID string, thing interface{}) (err error)\n\tRead(contentUUID string) (thing interface{}, found bool, err error)\n\tDelete(contentUUID string) (found bool, err error)\n\tCheck() (err error)\n\tDecodeJSON(*json.Decoder) (thing interface{}, err error)\n\tCount() (int, error)\n\tInitialise() error\n}\n\n\/\/holds the Neo4j-specific information\ntype service struct {\n\tcypherRunner neoutils.CypherRunner\n\tindexManager neoutils.IndexManager\n\tplatformVersion string\n}\n\n\/\/NewAnnotationsService instantiate driver\nfunc NewAnnotationsService(cypherRunner neoutils.CypherRunner, indexManager neoutils.IndexManager, platformVersion string) service {\n\tif platformVersion == \"\" {\n\t\tlog.Fatalf(\"PlatformVersion was not specified!\")\n\t}\n\treturn service{cypherRunner, indexManager, platformVersion}\n}\n\n\/\/ DecodeJSON decodes to a list of annotations, for ease of use this is a struct itself\nfunc (s service) DecodeJSON(dec *json.Decoder) (interface{}, error) {\n\ta := annotations{}\n\terr := dec.Decode(&a)\n\treturn a, err\n}\n\nfunc (s service) Read(contentUUID string) (thing interface{}, found bool, err error) {\n\tresults := []annotation{}\n\n\t\/\/TODO shouldn't return Provenances if none of the scores, agentRole or atTime are set\n\tstatementTemplate := `\n\t\t\t\t\tMATCH (c:Thing{uuid:{contentUUID}})-[rel{platformVersion:{platformVersion}}]->(cc:Thing)\n\t\t\t\t\tMATCH (cc:Thing)\n\t\t\t\t\tWITH c, cc, rel, {id:cc.uuid,prefLabel:cc.prefLabel,types:labels(cc),predicate:type(rel)} as thing,\n\t\t\t\t\tcollect(\n\t\t\t\t\t\t{scores:[\n\t\t\t\t\t\t\t{scoringSystem:'%s', value:rel.relevanceScore},\n\t\t\t\t\t\t\t{scoringSystem:'%s', value:rel.confidenceScore}],\n\t\t\t\t\t\tagentRole:rel.annotatedBy,\n\t\t\t\t\t\tatTime:rel.annotatedDate}) as provenances\n\t\t\t\t\tRETURN thing, provenances ORDER BY thing.id\n\t\t\t\t\t\t\t\t\t`\n\tstatement := fmt.Sprintf(statementTemplate, relevanceScoringSystem, confidenceScoringSystem)\n\n\tquery := &neoism.CypherQuery{\n\t\tStatement: statement,\n\t\tParameters: neoism.Props{\"contentUUID\": contentUUID, \"platformVersion\": s.platformVersion},\n\t\tResult: &results,\n\t}\n\terr = s.cypherRunner.CypherBatch([]*neoism.CypherQuery{query})\n\tif err != nil {\n\t\tlog.Errorf(\"Error looking up uuid %s with query %s from neoism: %+v\", contentUUID, query.Statement, err)\n\t\treturn annotations{}, false, fmt.Errorf(\"Error accessing Annotations datastore for uuid: %s\", contentUUID)\n\t}\n\tlog.Debugf(\"CypherResult Read Annotations for uuid: %s was: %+v\", contentUUID, results)\n\tif (len(results)) == 0 {\n\t\treturn annotations{}, false, nil\n\t}\n\n\tfor idx := range results {\n\t\tmapToResponseFormat(&results[idx])\n\t}\n\n\treturn results, true, nil\n}\n\n\/\/Delete removes all the annotations for this content. Ignore the nodes on either end -\n\/\/may leave nodes that are only 'things' inserted by this writer: clean up\n\/\/as a result of this will need to happen externally if required\nfunc (s service) Delete(contentUUID string) (bool, error) {\n\n\tvar deleteStatement string\n\n\tif s.platformVersion == \"v2\" {\n\t\tdeleteStatement = `MATCH (c:Thing{uuid: {contentUUID}})-[rel:MENTIONS{platformVersion:{platformVersion}}]->(cc:Thing) DELETE rel`\n\t} else {\n\t\tdeleteStatement = `MATCH (c:Thing{uuid: {contentUUID}})-[rel{platformVersion:{platformVersion}}]->(cc:Thing) DELETE rel`\n\t}\n\n\tquery := &neoism.CypherQuery{\n\t\tStatement: deleteStatement,\n\t\tParameters: neoism.Props{\"contentUUID\": contentUUID, \"platformVersion\": s.platformVersion},\n\t\tIncludeStats: true,\n\t}\n\n\terr := s.cypherRunner.CypherBatch([]*neoism.CypherQuery{query})\n\n\tstats, err := query.Stats()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tvar found bool\n\tif stats.ContainsUpdates {\n\t\tfound = true\n\t}\n\n\treturn found, err\n}\n\n\/\/Write a set of annotations associated with a piece of content. Any annotations\n\/\/already there will be removed\nfunc (s service) Write(contentUUID string, thing interface{}) (err error) {\n\tannotationsToWrite := thing.(annotations)\n\n\tif contentUUID == \"\" {\n\t\treturn errors.New(\"Content uuid is required\")\n\t}\n\tif err := validateAnnotations(&annotationsToWrite); err != nil {\n\t\tlog.Warnf(\"Validation of supplied annotations failed\")\n\t\treturn err\n\t}\n\n\tif len(annotationsToWrite) == 0 {\n\t\tlog.Warnf(\"No new annotations supplied for content uuid: %s\", contentUUID)\n\t}\n\n\tqueries := append([]*neoism.CypherQuery{}, dropAllAnnotationsQuery(contentUUID, s.platformVersion))\n\n\tvar statements = []string{}\n\tfor _, annotationToWrite := range annotationsToWrite {\n\t\tquery, err := createAnnotationQuery(contentUUID, annotationToWrite, s.platformVersion)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstatements = append(statements, query.Statement)\n\t\tqueries = append(queries, query)\n\t}\n\tlog.Infof(\"Updated Annotations for content uuid: %s\", contentUUID)\n\tlog.Debugf(\"For update, ran statements: %+v\", statements)\n\n\treturn s.cypherRunner.CypherBatch(queries)\n}\n\n\/\/ Check tests neo4j by running a simple cypher query\nfunc (s service) Check() error {\n\treturn neoutils.Check(s.cypherRunner)\n}\n\nfunc (s service) Count() (int, error) {\n\tresults := []struct {\n\t\tCount int `json:\"c\"`\n\t}{}\n\n\tquery := &neoism.CypherQuery{\n\t\tStatement: `MATCH ()-[r{platformVersion:{platformVersion}}]->() RETURN count(r) as c`,\n\t\tParameters: neoism.Props{\"platformVersion\": s.platformVersion},\n\t\tResult: &results,\n\t}\n\n\terr := s.cypherRunner.CypherBatch([]*neoism.CypherQuery{query})\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn results[0].Count, nil\n}\n\nfunc (s service) Initialise() error {\n\treturn nil \/\/ No constraints need to be set up\n}\n\nfunc createAnnotationRelationship(relation string) (statement string) {\n\tstmt := `\n MERGE (content:Thing{uuid:{contentID}})\n MERGE (upp:UPPIdentifier{value:{conceptID}})\n MERGE (upp)-[:IDENTIFIES]->(concept:Thing) ON CREATE SET concept.uuid = {conceptID}\n MERGE (content)-[pred:%s{platformVersion:{platformVersion}}]->(concept)\n SET pred={annProps}\n `\n\tstatement = fmt.Sprintf(stmt, relation)\n\treturn statement\n}\n\nfunc getRelationshipFromPredicate(predicate string) (relation string) {\n\tif predicate != \"\" {\n\t\trelation = relations[predicate]\n\t} else {\n\t\trelation = relations[\"mentions\"]\n\t}\n\treturn relation\n}\n\nfunc createAnnotationQuery(contentUUID string, ann annotation, platformVersion string) (*neoism.CypherQuery, error) {\n\tquery := neoism.CypherQuery{}\n\tthingID, err := extractUUIDFromURI(ann.Thing.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/todo temporary change to deal with multiple provenances\n\t\/*if len(ann.Provenances) > 1 {\n\t\treturn nil, errors.New(\"Cannot insert a MENTIONS annotation with multiple provenances\")\n\t}*\/\n\n\tvar prov provenance\n\tparams := map[string]interface{}{}\n\tparams[\"platformVersion\"] = platformVersion\n\n\tif len(ann.Provenances) >= 1 {\n\t\tprov = ann.Provenances[0]\n\t\tannotatedBy, annotatedDateEpoch, relevanceScore, confidenceScore, supplied, err := extractDataFromProvenance(&prov)\n\n\t\tif err != nil {\n\t\t\tlog.Infof(\"ERROR=%s\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif supplied == true {\n\t\t\tif annotatedBy != \"\" {\n\t\t\t\tparams[\"annotatedBy\"] = annotatedBy\n\t\t\t}\n\t\t\tif prov.AtTime != \"\" {\n\t\t\t\tparams[\"annotatedDateEpoch\"] = annotatedDateEpoch\n\t\t\t\tparams[\"annotatedDate\"] = prov.AtTime\n\t\t\t}\n\t\t\tparams[\"relevanceScore\"] = relevanceScore\n\t\t\tparams[\"confidenceScore\"] = confidenceScore\n\t\t}\n\t}\n\n\trelation := getRelationshipFromPredicate(ann.Thing.Predicate)\n\tquery.Statement = createAnnotationRelationship(relation)\n\tquery.Parameters = map[string]interface{}{\n\t\t\"contentID\": contentUUID,\n\t\t\"conceptID\": thingID,\n\t\t\"platformVersion\": platformVersion,\n\t\t\"annProps\": params,\n\t}\n\treturn &query, nil\n}\n\nfunc extractDataFromProvenance(prov *provenance) (string, int64, float64, float64, bool, error) {\n\tif len(prov.Scores) == 0 {\n\t\treturn \"\", -1, -1, -1, false, nil\n\t}\n\tvar annotatedBy string\n\tvar annotatedDateEpoch int64\n\tvar confidenceScore, relevanceScore float64\n\tvar err error\n\tif prov.AgentRole != \"\" {\n\t\tannotatedBy, err = extractUUIDFromURI(prov.AgentRole)\n\t}\n\tif prov.AtTime != \"\" {\n\t\tannotatedDateEpoch, err = convertAnnotatedDateToEpoch(prov.AtTime)\n\t}\n\trelevanceScore, confidenceScore, err = extractScores(prov.Scores)\n\n\tif err != nil {\n\t\treturn \"\", -1, -1, -1, true, err\n\t}\n\treturn annotatedBy, annotatedDateEpoch, relevanceScore, confidenceScore, true, nil\n}\n\nfunc extractUUIDFromURI(uri string) (string, error) {\n\tresult := uuidExtractRegex.FindStringSubmatch(uri)\n\tif len(result) == 2 {\n\t\treturn result[1], nil\n\t}\n\treturn \"\", fmt.Errorf(\"Couldn't extract uuid from uri %s\", uri)\n}\n\nfunc convertAnnotatedDateToEpoch(annotatedDateString string) (int64, error) {\n\tdatetimeEpoch, err := time.Parse(time.RFC3339, annotatedDateString)\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn datetimeEpoch.Unix(), nil\n}\n\nfunc extractScores(scores []score) (float64, float64, error) {\n\tvar relevanceScore, confidenceScore float64\n\tfor _, score := range scores {\n\t\tscoringSystem := score.ScoringSystem\n\t\tvalue := score.Value\n\t\tswitch scoringSystem {\n\t\tcase relevanceScoringSystem:\n\t\t\trelevanceScore = value\n\t\tcase confidenceScoringSystem:\n\t\t\tconfidenceScore = value\n\t\t}\n\t}\n\treturn relevanceScore, confidenceScore, nil\n}\n\nfunc dropAllAnnotationsQuery(contentUUID string, platformVersion string) *neoism.CypherQuery {\n\n\tvar matchStmtTemplate string\n\n\t\/\/TODO hard-coded verification:\n\t\/\/ -> necessary for brands - which got written by content-api with isClassifiedBy relationship, and should not be deleted by annotations-rw\n\t\/\/ -> so far brands are the only v2 concepts which have isClassifiedBy relationship; as soon as this changes: implementation needs to be updated\n\tif platformVersion == \"v2\" {\n\t\tmatchStmtTemplate = `OPTIONAL MATCH (:Thing{uuid:{contentID}})-[r:MENTIONS{platformVersion:{platformVersion}}]->(t:Thing)\n DELETE r`\n\t} else {\n\t\tmatchStmtTemplate = `OPTIONAL MATCH (:Thing{uuid:{contentID}})-[r]->(t:Thing)\n\t\t\tWHERE r.platformVersion={platformVersion}\n DELETE r`\n\t}\n\n\tquery := neoism.CypherQuery{}\n\tquery.Statement = matchStmtTemplate\n\tquery.Parameters = neoism.Props{\"contentID\": contentUUID, \"platformVersion\": platformVersion}\n\treturn &query\n}\n\nfunc validateAnnotations(annotations *annotations) error {\n\t\/\/TODO - for consistency, we should probably just not create the annotation?\n\tfor _, annotation := range *annotations {\n\t\tif annotation.Thing.ID == \"\" {\n\t\t\treturn ValidationError{fmt.Sprintf(\"Concept uuid missing for annotation %+v\", annotation)}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ValidationError is thrown when the annotations are not valid because mandatory information is missing\ntype ValidationError struct {\n\tMsg string\n}\n\nfunc (v ValidationError) Error() string {\n\treturn v.Msg\n}\n\nfunc mapToResponseFormat(ann *annotation) {\n\tann.Thing.ID = mapper.IDURL(ann.Thing.ID)\n\t\/\/ We expect only ONE provenance - provenance value is considered valid even if the AgentRole is not specified. See: v1 - isClassifiedBy\n\tfor idx := range ann.Provenances {\n\t\tif ann.Provenances[idx].AgentRole != \"\" {\n\t\t\tann.Provenances[idx].AgentRole = mapper.IDURL(ann.Provenances[idx].AgentRole)\n\t\t}\n\t}\n}\n<commit_msg>Insert default Identifier label beside UPPIdentifier.<commit_after>package annotations\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"regexp\"\n\n\t\"github.com\/Financial-Times\/neo-model-utils-go\/mapper\"\n\t\"github.com\/Financial-Times\/neo-utils-go\/neoutils\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/jmcvetta\/neoism\"\n)\n\nvar uuidExtractRegex = regexp.MustCompile(\".*\/([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})$\")\n\n\/\/ Service interface. Compatible with the baserwftapp service EXCEPT for\n\/\/ 1) the Write function, which has signature Write(thing interface{}) error...\n\/\/ 2) the DecodeJson function, which has signature DecodeJSON(*json.Decoder) (thing interface{}, identity string, err error)\n\/\/ The problem is that we have a list of things, and the uuid is for a related OTHER thing\n\/\/ TODO - move to implement a shared defined Service interface?\ntype Service interface {\n\tWrite(contentUUID string, thing interface{}) (err error)\n\tRead(contentUUID string) (thing interface{}, found bool, err error)\n\tDelete(contentUUID string) (found bool, err error)\n\tCheck() (err error)\n\tDecodeJSON(*json.Decoder) (thing interface{}, err error)\n\tCount() (int, error)\n\tInitialise() error\n}\n\n\/\/holds the Neo4j-specific information\ntype service struct {\n\tcypherRunner neoutils.CypherRunner\n\tindexManager neoutils.IndexManager\n\tplatformVersion string\n}\n\n\/\/NewAnnotationsService instantiate driver\nfunc NewAnnotationsService(cypherRunner neoutils.CypherRunner, indexManager neoutils.IndexManager, platformVersion string) service {\n\tif platformVersion == \"\" {\n\t\tlog.Fatalf(\"PlatformVersion was not specified!\")\n\t}\n\treturn service{cypherRunner, indexManager, platformVersion}\n}\n\n\/\/ DecodeJSON decodes to a list of annotations, for ease of use this is a struct itself\nfunc (s service) DecodeJSON(dec *json.Decoder) (interface{}, error) {\n\ta := annotations{}\n\terr := dec.Decode(&a)\n\treturn a, err\n}\n\nfunc (s service) Read(contentUUID string) (thing interface{}, found bool, err error) {\n\tresults := []annotation{}\n\n\t\/\/TODO shouldn't return Provenances if none of the scores, agentRole or atTime are set\n\tstatementTemplate := `\n\t\t\t\t\tMATCH (c:Thing{uuid:{contentUUID}})-[rel{platformVersion:{platformVersion}}]->(cc:Thing)\n\t\t\t\t\tMATCH (cc:Thing)\n\t\t\t\t\tWITH c, cc, rel, {id:cc.uuid,prefLabel:cc.prefLabel,types:labels(cc),predicate:type(rel)} as thing,\n\t\t\t\t\tcollect(\n\t\t\t\t\t\t{scores:[\n\t\t\t\t\t\t\t{scoringSystem:'%s', value:rel.relevanceScore},\n\t\t\t\t\t\t\t{scoringSystem:'%s', value:rel.confidenceScore}],\n\t\t\t\t\t\tagentRole:rel.annotatedBy,\n\t\t\t\t\t\tatTime:rel.annotatedDate}) as provenances\n\t\t\t\t\tRETURN thing, provenances ORDER BY thing.id\n\t\t\t\t\t\t\t\t\t`\n\tstatement := fmt.Sprintf(statementTemplate, relevanceScoringSystem, confidenceScoringSystem)\n\n\tquery := &neoism.CypherQuery{\n\t\tStatement: statement,\n\t\tParameters: neoism.Props{\"contentUUID\": contentUUID, \"platformVersion\": s.platformVersion},\n\t\tResult: &results,\n\t}\n\terr = s.cypherRunner.CypherBatch([]*neoism.CypherQuery{query})\n\tif err != nil {\n\t\tlog.Errorf(\"Error looking up uuid %s with query %s from neoism: %+v\", contentUUID, query.Statement, err)\n\t\treturn annotations{}, false, fmt.Errorf(\"Error accessing Annotations datastore for uuid: %s\", contentUUID)\n\t}\n\tlog.Debugf(\"CypherResult Read Annotations for uuid: %s was: %+v\", contentUUID, results)\n\tif (len(results)) == 0 {\n\t\treturn annotations{}, false, nil\n\t}\n\n\tfor idx := range results {\n\t\tmapToResponseFormat(&results[idx])\n\t}\n\n\treturn results, true, nil\n}\n\n\/\/Delete removes all the annotations for this content. Ignore the nodes on either end -\n\/\/may leave nodes that are only 'things' inserted by this writer: clean up\n\/\/as a result of this will need to happen externally if required\nfunc (s service) Delete(contentUUID string) (bool, error) {\n\n\tvar deleteStatement string\n\n\tif s.platformVersion == \"v2\" {\n\t\tdeleteStatement = `MATCH (c:Thing{uuid: {contentUUID}})-[rel:MENTIONS{platformVersion:{platformVersion}}]->(cc:Thing) DELETE rel`\n\t} else {\n\t\tdeleteStatement = `MATCH (c:Thing{uuid: {contentUUID}})-[rel{platformVersion:{platformVersion}}]->(cc:Thing) DELETE rel`\n\t}\n\n\tquery := &neoism.CypherQuery{\n\t\tStatement: deleteStatement,\n\t\tParameters: neoism.Props{\"contentUUID\": contentUUID, \"platformVersion\": s.platformVersion},\n\t\tIncludeStats: true,\n\t}\n\n\terr := s.cypherRunner.CypherBatch([]*neoism.CypherQuery{query})\n\n\tstats, err := query.Stats()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tvar found bool\n\tif stats.ContainsUpdates {\n\t\tfound = true\n\t}\n\n\treturn found, err\n}\n\n\/\/Write a set of annotations associated with a piece of content. Any annotations\n\/\/already there will be removed\nfunc (s service) Write(contentUUID string, thing interface{}) (err error) {\n\tannotationsToWrite := thing.(annotations)\n\n\tif contentUUID == \"\" {\n\t\treturn errors.New(\"Content uuid is required\")\n\t}\n\tif err := validateAnnotations(&annotationsToWrite); err != nil {\n\t\tlog.Warnf(\"Validation of supplied annotations failed\")\n\t\treturn err\n\t}\n\n\tif len(annotationsToWrite) == 0 {\n\t\tlog.Warnf(\"No new annotations supplied for content uuid: %s\", contentUUID)\n\t}\n\n\tqueries := append([]*neoism.CypherQuery{}, dropAllAnnotationsQuery(contentUUID, s.platformVersion))\n\n\tvar statements = []string{}\n\tfor _, annotationToWrite := range annotationsToWrite {\n\t\tquery, err := createAnnotationQuery(contentUUID, annotationToWrite, s.platformVersion)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstatements = append(statements, query.Statement)\n\t\tqueries = append(queries, query)\n\t}\n\tlog.Infof(\"Updated Annotations for content uuid: %s\", contentUUID)\n\tlog.Debugf(\"For update, ran statements: %+v\", statements)\n\n\treturn s.cypherRunner.CypherBatch(queries)\n}\n\n\/\/ Check tests neo4j by running a simple cypher query\nfunc (s service) Check() error {\n\treturn neoutils.Check(s.cypherRunner)\n}\n\nfunc (s service) Count() (int, error) {\n\tresults := []struct {\n\t\tCount int `json:\"c\"`\n\t}{}\n\n\tquery := &neoism.CypherQuery{\n\t\tStatement: `MATCH ()-[r{platformVersion:{platformVersion}}]->() RETURN count(r) as c`,\n\t\tParameters: neoism.Props{\"platformVersion\": s.platformVersion},\n\t\tResult: &results,\n\t}\n\n\terr := s.cypherRunner.CypherBatch([]*neoism.CypherQuery{query})\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn results[0].Count, nil\n}\n\nfunc (s service) Initialise() error {\n\treturn nil \/\/ No constraints need to be set up\n}\n\nfunc createAnnotationRelationship(relation string) (statement string) {\n\tstmt := `\n MERGE (content:Thing{uuid:{contentID}})\n MERGE (upp:Identifier:UPPIdentifier{value:{conceptID}})\n MERGE (upp)-[:IDENTIFIES]->(concept:Thing) ON CREATE SET concept.uuid = {conceptID}\n MERGE (content)-[pred:%s{platformVersion:{platformVersion}}]->(concept)\n SET pred={annProps}\n `\n\tstatement = fmt.Sprintf(stmt, relation)\n\treturn statement\n}\n\nfunc getRelationshipFromPredicate(predicate string) (relation string) {\n\tif predicate != \"\" {\n\t\trelation = relations[predicate]\n\t} else {\n\t\trelation = relations[\"mentions\"]\n\t}\n\treturn relation\n}\n\nfunc createAnnotationQuery(contentUUID string, ann annotation, platformVersion string) (*neoism.CypherQuery, error) {\n\tquery := neoism.CypherQuery{}\n\tthingID, err := extractUUIDFromURI(ann.Thing.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/todo temporary change to deal with multiple provenances\n\t\/*if len(ann.Provenances) > 1 {\n\t\treturn nil, errors.New(\"Cannot insert a MENTIONS annotation with multiple provenances\")\n\t}*\/\n\n\tvar prov provenance\n\tparams := map[string]interface{}{}\n\tparams[\"platformVersion\"] = platformVersion\n\n\tif len(ann.Provenances) >= 1 {\n\t\tprov = ann.Provenances[0]\n\t\tannotatedBy, annotatedDateEpoch, relevanceScore, confidenceScore, supplied, err := extractDataFromProvenance(&prov)\n\n\t\tif err != nil {\n\t\t\tlog.Infof(\"ERROR=%s\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif supplied == true {\n\t\t\tif annotatedBy != \"\" {\n\t\t\t\tparams[\"annotatedBy\"] = annotatedBy\n\t\t\t}\n\t\t\tif prov.AtTime != \"\" {\n\t\t\t\tparams[\"annotatedDateEpoch\"] = annotatedDateEpoch\n\t\t\t\tparams[\"annotatedDate\"] = prov.AtTime\n\t\t\t}\n\t\t\tparams[\"relevanceScore\"] = relevanceScore\n\t\t\tparams[\"confidenceScore\"] = confidenceScore\n\t\t}\n\t}\n\n\trelation := getRelationshipFromPredicate(ann.Thing.Predicate)\n\tquery.Statement = createAnnotationRelationship(relation)\n\tquery.Parameters = map[string]interface{}{\n\t\t\"contentID\": contentUUID,\n\t\t\"conceptID\": thingID,\n\t\t\"platformVersion\": platformVersion,\n\t\t\"annProps\": params,\n\t}\n\treturn &query, nil\n}\n\nfunc extractDataFromProvenance(prov *provenance) (string, int64, float64, float64, bool, error) {\n\tif len(prov.Scores) == 0 {\n\t\treturn \"\", -1, -1, -1, false, nil\n\t}\n\tvar annotatedBy string\n\tvar annotatedDateEpoch int64\n\tvar confidenceScore, relevanceScore float64\n\tvar err error\n\tif prov.AgentRole != \"\" {\n\t\tannotatedBy, err = extractUUIDFromURI(prov.AgentRole)\n\t}\n\tif prov.AtTime != \"\" {\n\t\tannotatedDateEpoch, err = convertAnnotatedDateToEpoch(prov.AtTime)\n\t}\n\trelevanceScore, confidenceScore, err = extractScores(prov.Scores)\n\n\tif err != nil {\n\t\treturn \"\", -1, -1, -1, true, err\n\t}\n\treturn annotatedBy, annotatedDateEpoch, relevanceScore, confidenceScore, true, nil\n}\n\nfunc extractUUIDFromURI(uri string) (string, error) {\n\tresult := uuidExtractRegex.FindStringSubmatch(uri)\n\tif len(result) == 2 {\n\t\treturn result[1], nil\n\t}\n\treturn \"\", fmt.Errorf(\"Couldn't extract uuid from uri %s\", uri)\n}\n\nfunc convertAnnotatedDateToEpoch(annotatedDateString string) (int64, error) {\n\tdatetimeEpoch, err := time.Parse(time.RFC3339, annotatedDateString)\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn datetimeEpoch.Unix(), nil\n}\n\nfunc extractScores(scores []score) (float64, float64, error) {\n\tvar relevanceScore, confidenceScore float64\n\tfor _, score := range scores {\n\t\tscoringSystem := score.ScoringSystem\n\t\tvalue := score.Value\n\t\tswitch scoringSystem {\n\t\tcase relevanceScoringSystem:\n\t\t\trelevanceScore = value\n\t\tcase confidenceScoringSystem:\n\t\t\tconfidenceScore = value\n\t\t}\n\t}\n\treturn relevanceScore, confidenceScore, nil\n}\n\nfunc dropAllAnnotationsQuery(contentUUID string, platformVersion string) *neoism.CypherQuery {\n\n\tvar matchStmtTemplate string\n\n\t\/\/TODO hard-coded verification:\n\t\/\/ -> necessary for brands - which got written by content-api with isClassifiedBy relationship, and should not be deleted by annotations-rw\n\t\/\/ -> so far brands are the only v2 concepts which have isClassifiedBy relationship; as soon as this changes: implementation needs to be updated\n\tif platformVersion == \"v2\" {\n\t\tmatchStmtTemplate = `OPTIONAL MATCH (:Thing{uuid:{contentID}})-[r:MENTIONS{platformVersion:{platformVersion}}]->(t:Thing)\n DELETE r`\n\t} else {\n\t\tmatchStmtTemplate = `OPTIONAL MATCH (:Thing{uuid:{contentID}})-[r]->(t:Thing)\n\t\t\tWHERE r.platformVersion={platformVersion}\n DELETE r`\n\t}\n\n\tquery := neoism.CypherQuery{}\n\tquery.Statement = matchStmtTemplate\n\tquery.Parameters = neoism.Props{\"contentID\": contentUUID, \"platformVersion\": platformVersion}\n\treturn &query\n}\n\nfunc validateAnnotations(annotations *annotations) error {\n\t\/\/TODO - for consistency, we should probably just not create the annotation?\n\tfor _, annotation := range *annotations {\n\t\tif annotation.Thing.ID == \"\" {\n\t\t\treturn ValidationError{fmt.Sprintf(\"Concept uuid missing for annotation %+v\", annotation)}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ValidationError is thrown when the annotations are not valid because mandatory information is missing\ntype ValidationError struct {\n\tMsg string\n}\n\nfunc (v ValidationError) Error() string {\n\treturn v.Msg\n}\n\nfunc mapToResponseFormat(ann *annotation) {\n\tann.Thing.ID = mapper.IDURL(ann.Thing.ID)\n\t\/\/ We expect only ONE provenance - provenance value is considered valid even if the AgentRole is not specified. See: v1 - isClassifiedBy\n\tfor idx := range ann.Provenances {\n\t\tif ann.Provenances[idx].AgentRole != \"\" {\n\t\t\tann.Provenances[idx].AgentRole = mapper.IDURL(ann.Provenances[idx].AgentRole)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package todoist\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"testing\"\n)\n\nvar testIDs = []struct {\n\ts string\n\tv ID\n\te error\n}{\n\t{\n\t\t\"1000000\",\n\t\tID(\"1000000\"),\n\t\tnil,\n\t},\n\t{\n\t\t\"df43406d-db7e-4ea5-b3b4-c822ccdab3bf\",\n\t\tID(\"df43406d-db7e-4ea5-b3b4-c822ccdab3bf\"),\n\t\tnil,\n\t},\n\t{\n\t\t\"invalid\",\n\t\t\"\",\n\t\terrors.New(\"Invalid ID: invalid\"),\n\t},\n}\n\nfunc TestNewID(t *testing.T) {\n\tfor _, test := range testIDs {\n\t\tv, err := NewID(test.s)\n\t\tif !reflect.DeepEqual(err, test.e) {\n\t\t\tt.Errorf(\"Expect %s, but got %s\", test.e, err)\n\t\t} else if test.e == nil && v != test.v {\n\t\t\tt.Errorf(\"Expect %s, but got %s\", test.v, v)\n\t\t}\n\t}\n}\n\nfunc TestID_MarshalJSON(t *testing.T) {\n\ttest := testIDs[0]\n\tb, err := test.v.MarshalJSON()\n\tif err != nil || string(b) != test.s {\n\t\tt.Errorf(\"Expect %s, but got %s\", strconv.Quote(test.s), string(b))\n\t}\n\n\ttest = testIDs[1]\n\tb, err = test.v.MarshalJSON()\n\tif err != nil || string(b) != strconv.Quote(test.s) {\n\t\tt.Errorf(\"Expect %s, but got %s\", strconv.Quote(test.s), string(b))\n\t}\n\n\tb, err = ID(\"0\").MarshalJSON()\n\tif err != nil || string(b) != \"null\" {\n\t\tt.Errorf(\"Expect %s, but got %s\", strconv.Quote(test.s), string(b))\n\t}\n}\n\nfunc TestID_UnmarshalJSON(t *testing.T) {\n\tfor _, test := range testIDs {\n\t\tvar v ID\n\t\terr := v.UnmarshalJSON([]byte(test.s))\n\t\tif !reflect.DeepEqual(err, test.e) {\n\t\t\tt.Errorf(\"Expect %s, but got %s\", test.e, err)\n\t\t} else if test.e == nil && v != test.v {\n\t\t\tt.Errorf(\"Expect %s, but got %s\", test.v, v)\n\t\t}\n\t}\n\tvar v ID\n\terr := v.UnmarshalJSON([]byte(\"null\"))\n\tif err != nil {\n\t\tt.Errorf(\"Unexpect error: %s\", err)\n\t}\n\tif v != ID(\"0\") {\n\t\tt.Errorf(\"Expect %s, but got %s\", ID(\"0\"), v)\n\t}\n}\n\nfunc TestIsTempID(t *testing.T) {\n\ttest := testIDs[0]\n\tif IsTempID(test.v) == true {\n\t\tt.Errorf(\"%s is not temp id, but returns true\", test.v)\n\t}\n\ttest = testIDs[1]\n\tif IsTempID(test.v) == false {\n\t\tt.Errorf(\"%s is temp id, but returns false\", test.v)\n\t}\n}\n\nfunc TestUUID_MarshalJSON(t *testing.T) {\n\ts := \"c0afd2be-7576-4fc4-8b3c-696c4c6cf794\"\n\tv := UUID(s)\n\tb, err := v.MarshalJSON()\n\tif err != nil || string(b) != strconv.Quote(s) {\n\t\tt.Errorf(\"Expect %s, but got %s\", strconv.Quote(s), string(b))\n\t}\n}\n\nfunc TestUUID_UnmarshalJSON(t *testing.T) {\n\ts := \"c0afd2be-7576-4fc4-8b3c-696c4c6cf794\"\n\tvar v UUID\n\terr := v.UnmarshalJSON([]byte(strconv.Quote(s)))\n\tif err != nil || v != UUID(s) {\n\t\tt.Errorf(\"Expect %s, but got %s\", UUID(s), v)\n\t}\n\n\ts = \"invalid\"\n\terr = v.UnmarshalJSON([]byte(s))\n\tif err == nil {\n\t\tt.Error(\"Expect error, but no error\")\n\t}\n}\n<commit_msg>fix test<commit_after>package todoist\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"testing\"\n)\n\nvar testIDs = []struct {\n\ts string\n\tv ID\n\te error\n}{\n\t{\n\t\t\"1000000\",\n\t\tID(\"1000000\"),\n\t\tnil,\n\t},\n\t{\n\t\t\"df43406d-db7e-4ea5-b3b4-c822ccdab3bf\",\n\t\tID(\"df43406d-db7e-4ea5-b3b4-c822ccdab3bf\"),\n\t\tnil,\n\t},\n\t{\n\t\t\"invalid\",\n\t\t\"\",\n\t\terrors.New(\"invalid id: invalid\"),\n\t},\n}\n\nfunc TestNewID(t *testing.T) {\n\tfor _, test := range testIDs {\n\t\tv, err := NewID(test.s)\n\t\tif !reflect.DeepEqual(err, test.e) {\n\t\t\tt.Errorf(\"Expect %s, but got %s\", test.e, err)\n\t\t} else if test.e == nil && v != test.v {\n\t\t\tt.Errorf(\"Expect %s, but got %s\", test.v, v)\n\t\t}\n\t}\n}\n\nfunc TestID_MarshalJSON(t *testing.T) {\n\ttest := testIDs[0]\n\tb, err := test.v.MarshalJSON()\n\tif err != nil || string(b) != test.s {\n\t\tt.Errorf(\"Expect %s, but got %s\", strconv.Quote(test.s), string(b))\n\t}\n\n\ttest = testIDs[1]\n\tb, err = test.v.MarshalJSON()\n\tif err != nil || string(b) != strconv.Quote(test.s) {\n\t\tt.Errorf(\"Expect %s, but got %s\", strconv.Quote(test.s), string(b))\n\t}\n\n\tb, err = ID(\"0\").MarshalJSON()\n\tif err != nil || string(b) != \"null\" {\n\t\tt.Errorf(\"Expect %s, but got %s\", strconv.Quote(test.s), string(b))\n\t}\n}\n\nfunc TestID_UnmarshalJSON(t *testing.T) {\n\tfor _, test := range testIDs {\n\t\tvar v ID\n\t\terr := v.UnmarshalJSON([]byte(test.s))\n\t\tif !reflect.DeepEqual(err, test.e) {\n\t\t\tt.Errorf(\"Expect %s, but got %s\", test.e, err)\n\t\t} else if test.e == nil && v != test.v {\n\t\t\tt.Errorf(\"Expect %s, but got %s\", test.v, v)\n\t\t}\n\t}\n\tvar v ID\n\terr := v.UnmarshalJSON([]byte(\"null\"))\n\tif err != nil {\n\t\tt.Errorf(\"Unexpect error: %s\", err)\n\t}\n\tif v != ID(\"0\") {\n\t\tt.Errorf(\"Expect %s, but got %s\", ID(\"0\"), v)\n\t}\n}\n\nfunc TestIsTempID(t *testing.T) {\n\ttest := testIDs[0]\n\tif IsTempID(test.v) == true {\n\t\tt.Errorf(\"%s is not temp id, but returns true\", test.v)\n\t}\n\ttest = testIDs[1]\n\tif IsTempID(test.v) == false {\n\t\tt.Errorf(\"%s is temp id, but returns false\", test.v)\n\t}\n}\n\nfunc TestUUID_MarshalJSON(t *testing.T) {\n\ts := \"c0afd2be-7576-4fc4-8b3c-696c4c6cf794\"\n\tv := UUID(s)\n\tb, err := v.MarshalJSON()\n\tif err != nil || string(b) != strconv.Quote(s) {\n\t\tt.Errorf(\"Expect %s, but got %s\", strconv.Quote(s), string(b))\n\t}\n}\n\nfunc TestUUID_UnmarshalJSON(t *testing.T) {\n\ts := \"c0afd2be-7576-4fc4-8b3c-696c4c6cf794\"\n\tvar v UUID\n\terr := v.UnmarshalJSON([]byte(strconv.Quote(s)))\n\tif err != nil || v != UUID(s) {\n\t\tt.Errorf(\"Expect %s, but got %s\", UUID(s), v)\n\t}\n\n\ts = \"invalid\"\n\terr = v.UnmarshalJSON([]byte(s))\n\tif err == nil {\n\t\tt.Error(\"Expect error, but no error\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package oauth\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/stretchr\/testify\/mock\"\n)\n\n\/\/ ServiceMock is a mocked object implementing ServiceInterface\ntype ServiceMock struct {\n\tmock.Mock\n}\n\n\/\/ ClientExists ...\nfunc (_m *ServiceMock) ClientExists(clientID string) bool {\n\tret := _m.Called(clientID)\n\n\tvar r0 bool\n\tif rf, ok := ret.Get(0).(func(string) bool); ok {\n\t\tr0 = rf(clientID)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\n\treturn r0\n}\n\n\/\/ FindClientByClientID ...\nfunc (_m *ServiceMock) FindClientByClientID(clientID string) (*Client, error) {\n\tret := _m.Called(clientID)\n\n\tvar r0 *Client\n\tif rf, ok := ret.Get(0).(func(string) *Client); ok {\n\t\tr0 = rf(clientID)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*Client)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(clientID)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ CreateClient ...\nfunc (_m *ServiceMock) CreateClient(clientID string, secret string, redirectURI string) (*Client, error) {\n\tret := _m.Called(clientID, secret, redirectURI)\n\n\tvar r0 *Client\n\tif rf, ok := ret.Get(0).(func(string, string, string) *Client); ok {\n\t\tr0 = rf(clientID, secret, redirectURI)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*Client)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, string, string) error); ok {\n\t\tr1 = rf(clientID, secret, redirectURI)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ CreateClientTx ...\nfunc (_m *ServiceMock) CreateClientTx(tx *gorm.DB, clientID string, secret string, redirectURI string) (*Client, error) {\n\tret := _m.Called(tx, clientID, secret, redirectURI)\n\n\tvar r0 *Client\n\tif rf, ok := ret.Get(0).(func(*gorm.DB, string, string, string) *Client); ok {\n\t\tr0 = rf(tx, clientID, secret, redirectURI)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*Client)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*gorm.DB, string, string, string) error); ok {\n\t\tr1 = rf(tx, clientID, secret, redirectURI)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ AuthClient ...\nfunc (_m *ServiceMock) AuthClient(clientID string, secret string) (*Client, error) {\n\tret := _m.Called(clientID, secret)\n\n\tvar r0 *Client\n\tif rf, ok := ret.Get(0).(func(string, string) *Client); ok {\n\t\tr0 = rf(clientID, secret)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*Client)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, string) error); ok {\n\t\tr1 = rf(clientID, secret)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ UserExists ...\nfunc (_m *ServiceMock) UserExists(username string) bool {\n\tret := _m.Called(username)\n\n\tvar r0 bool\n\tif rf, ok := ret.Get(0).(func(string) bool); ok {\n\t\tr0 = rf(username)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\n\treturn r0\n}\n\n\/\/ FindUserByUsername ...\nfunc (_m *ServiceMock) FindUserByUsername(username string) (*User, error) {\n\tret := _m.Called(username)\n\n\tvar r0 *User\n\tif rf, ok := ret.Get(0).(func(string) *User); ok {\n\t\tr0 = rf(username)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*User)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(username)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ CreateUser ...\nfunc (_m *ServiceMock) CreateUser(username string, password string) (*User, error) {\n\tret := _m.Called(username, password)\n\n\tvar r0 *User\n\tif rf, ok := ret.Get(0).(func(string, string) *User); ok {\n\t\tr0 = rf(username, password)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*User)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, string) error); ok {\n\t\tr1 = rf(username, password)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ CreateUserTx ...\nfunc (_m *ServiceMock) CreateUserTx(tx *gorm.DB, username string, password string) (*User, error) {\n\tret := _m.Called(tx, username, password)\n\n\tvar r0 *User\n\tif rf, ok := ret.Get(0).(func(*gorm.DB, string, string) *User); ok {\n\t\tr0 = rf(tx, username, password)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*User)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*gorm.DB, string, string) error); ok {\n\t\tr1 = rf(tx, username, password)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ SetPassword ...\nfunc (_m *ServiceMock) SetPassword(user *User, password string) error {\n\tret := _m.Called(user, password)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(*User, string) error); ok {\n\t\tr0 = rf(user, password)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}\n\n\/\/ AuthUser ...\nfunc (_m *ServiceMock) AuthUser(username string, thePassword string) (*User, error) {\n\tret := _m.Called(username, thePassword)\n\n\tvar r0 *User\n\tif rf, ok := ret.Get(0).(func(string, string) *User); ok {\n\t\tr0 = rf(username, thePassword)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*User)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, string) error); ok {\n\t\tr1 = rf(username, thePassword)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ GetScope ...\nfunc (_m *ServiceMock) GetScope(requestedScope string) (string, error) {\n\tret := _m.Called(requestedScope)\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func(string) string); ok {\n\t\tr0 = rf(requestedScope)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(requestedScope)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ GrantAuthorizationCode ...\nfunc (_m *ServiceMock) GrantAuthorizationCode(client *Client, user *User, redirectURI string, scope string) (*AuthorizationCode, error) {\n\tret := _m.Called(client, user, redirectURI, scope)\n\n\tvar r0 *AuthorizationCode\n\tif rf, ok := ret.Get(0).(func(*Client, *User, string, string) *AuthorizationCode); ok {\n\t\tr0 = rf(client, user, redirectURI, scope)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*AuthorizationCode)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*Client, *User, string, string) error); ok {\n\t\tr1 = rf(client, user, redirectURI, scope)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ GrantAccessToken ...\nfunc (_m *ServiceMock) GrantAccessToken(client *Client, user *User, scope string) (*AccessToken, error) {\n\tret := _m.Called(client, user, scope)\n\n\tvar r0 *AccessToken\n\tif rf, ok := ret.Get(0).(func(*Client, *User, string) *AccessToken); ok {\n\t\tr0 = rf(client, user, scope)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*AccessToken)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*Client, *User, string) error); ok {\n\t\tr1 = rf(client, user, scope)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ GetOrCreateRefreshToken ...\nfunc (_m *ServiceMock) GetOrCreateRefreshToken(client *Client, user *User, scope string) (*RefreshToken, error) {\n\tret := _m.Called(client, user, scope)\n\n\tvar r0 *RefreshToken\n\tif rf, ok := ret.Get(0).(func(*Client, *User, string) *RefreshToken); ok {\n\t\tr0 = rf(client, user, scope)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*RefreshToken)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*Client, *User, string) error); ok {\n\t\tr1 = rf(client, user, scope)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ GetValidRefreshToken ...\nfunc (_m *ServiceMock) GetValidRefreshToken(token string, client *Client) (*RefreshToken, error) {\n\tret := _m.Called(token, client)\n\n\tvar r0 *RefreshToken\n\tif rf, ok := ret.Get(0).(func(string, *Client) *RefreshToken); ok {\n\t\tr0 = rf(token, client)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*RefreshToken)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, *Client) error); ok {\n\t\tr1 = rf(token, client)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ Authenticate ...\nfunc (_m *ServiceMock) Authenticate(token string) (*AccessToken, error) {\n\tret := _m.Called(token)\n\n\tvar r0 *AccessToken\n\tif rf, ok := ret.Get(0).(func(string) *AccessToken); ok {\n\t\tr0 = rf(token)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*AccessToken)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(token)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\nfunc (_m *ServiceMock) tokensHandler(w http.ResponseWriter, r *http.Request) {\n\t_m.Called(w, r)\n}\n<commit_msg>Added introspectHandler to service mock.<commit_after>package oauth\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/stretchr\/testify\/mock\"\n)\n\n\/\/ ServiceMock is a mocked object implementing ServiceInterface\ntype ServiceMock struct {\n\tmock.Mock\n}\n\n\/\/ ClientExists ...\nfunc (_m *ServiceMock) ClientExists(clientID string) bool {\n\tret := _m.Called(clientID)\n\n\tvar r0 bool\n\tif rf, ok := ret.Get(0).(func(string) bool); ok {\n\t\tr0 = rf(clientID)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\n\treturn r0\n}\n\n\/\/ FindClientByClientID ...\nfunc (_m *ServiceMock) FindClientByClientID(clientID string) (*Client, error) {\n\tret := _m.Called(clientID)\n\n\tvar r0 *Client\n\tif rf, ok := ret.Get(0).(func(string) *Client); ok {\n\t\tr0 = rf(clientID)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*Client)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(clientID)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ CreateClient ...\nfunc (_m *ServiceMock) CreateClient(clientID string, secret string, redirectURI string) (*Client, error) {\n\tret := _m.Called(clientID, secret, redirectURI)\n\n\tvar r0 *Client\n\tif rf, ok := ret.Get(0).(func(string, string, string) *Client); ok {\n\t\tr0 = rf(clientID, secret, redirectURI)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*Client)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, string, string) error); ok {\n\t\tr1 = rf(clientID, secret, redirectURI)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ CreateClientTx ...\nfunc (_m *ServiceMock) CreateClientTx(tx *gorm.DB, clientID string, secret string, redirectURI string) (*Client, error) {\n\tret := _m.Called(tx, clientID, secret, redirectURI)\n\n\tvar r0 *Client\n\tif rf, ok := ret.Get(0).(func(*gorm.DB, string, string, string) *Client); ok {\n\t\tr0 = rf(tx, clientID, secret, redirectURI)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*Client)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*gorm.DB, string, string, string) error); ok {\n\t\tr1 = rf(tx, clientID, secret, redirectURI)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ AuthClient ...\nfunc (_m *ServiceMock) AuthClient(clientID string, secret string) (*Client, error) {\n\tret := _m.Called(clientID, secret)\n\n\tvar r0 *Client\n\tif rf, ok := ret.Get(0).(func(string, string) *Client); ok {\n\t\tr0 = rf(clientID, secret)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*Client)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, string) error); ok {\n\t\tr1 = rf(clientID, secret)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ UserExists ...\nfunc (_m *ServiceMock) UserExists(username string) bool {\n\tret := _m.Called(username)\n\n\tvar r0 bool\n\tif rf, ok := ret.Get(0).(func(string) bool); ok {\n\t\tr0 = rf(username)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\n\treturn r0\n}\n\n\/\/ FindUserByUsername ...\nfunc (_m *ServiceMock) FindUserByUsername(username string) (*User, error) {\n\tret := _m.Called(username)\n\n\tvar r0 *User\n\tif rf, ok := ret.Get(0).(func(string) *User); ok {\n\t\tr0 = rf(username)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*User)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(username)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ CreateUser ...\nfunc (_m *ServiceMock) CreateUser(username string, password string) (*User, error) {\n\tret := _m.Called(username, password)\n\n\tvar r0 *User\n\tif rf, ok := ret.Get(0).(func(string, string) *User); ok {\n\t\tr0 = rf(username, password)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*User)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, string) error); ok {\n\t\tr1 = rf(username, password)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ CreateUserTx ...\nfunc (_m *ServiceMock) CreateUserTx(tx *gorm.DB, username string, password string) (*User, error) {\n\tret := _m.Called(tx, username, password)\n\n\tvar r0 *User\n\tif rf, ok := ret.Get(0).(func(*gorm.DB, string, string) *User); ok {\n\t\tr0 = rf(tx, username, password)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*User)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*gorm.DB, string, string) error); ok {\n\t\tr1 = rf(tx, username, password)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ SetPassword ...\nfunc (_m *ServiceMock) SetPassword(user *User, password string) error {\n\tret := _m.Called(user, password)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(*User, string) error); ok {\n\t\tr0 = rf(user, password)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}\n\n\/\/ AuthUser ...\nfunc (_m *ServiceMock) AuthUser(username string, thePassword string) (*User, error) {\n\tret := _m.Called(username, thePassword)\n\n\tvar r0 *User\n\tif rf, ok := ret.Get(0).(func(string, string) *User); ok {\n\t\tr0 = rf(username, thePassword)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*User)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, string) error); ok {\n\t\tr1 = rf(username, thePassword)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ GetScope ...\nfunc (_m *ServiceMock) GetScope(requestedScope string) (string, error) {\n\tret := _m.Called(requestedScope)\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func(string) string); ok {\n\t\tr0 = rf(requestedScope)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(requestedScope)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ GrantAuthorizationCode ...\nfunc (_m *ServiceMock) GrantAuthorizationCode(client *Client, user *User, redirectURI string, scope string) (*AuthorizationCode, error) {\n\tret := _m.Called(client, user, redirectURI, scope)\n\n\tvar r0 *AuthorizationCode\n\tif rf, ok := ret.Get(0).(func(*Client, *User, string, string) *AuthorizationCode); ok {\n\t\tr0 = rf(client, user, redirectURI, scope)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*AuthorizationCode)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*Client, *User, string, string) error); ok {\n\t\tr1 = rf(client, user, redirectURI, scope)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ GrantAccessToken ...\nfunc (_m *ServiceMock) GrantAccessToken(client *Client, user *User, scope string) (*AccessToken, error) {\n\tret := _m.Called(client, user, scope)\n\n\tvar r0 *AccessToken\n\tif rf, ok := ret.Get(0).(func(*Client, *User, string) *AccessToken); ok {\n\t\tr0 = rf(client, user, scope)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*AccessToken)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*Client, *User, string) error); ok {\n\t\tr1 = rf(client, user, scope)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ GetOrCreateRefreshToken ...\nfunc (_m *ServiceMock) GetOrCreateRefreshToken(client *Client, user *User, scope string) (*RefreshToken, error) {\n\tret := _m.Called(client, user, scope)\n\n\tvar r0 *RefreshToken\n\tif rf, ok := ret.Get(0).(func(*Client, *User, string) *RefreshToken); ok {\n\t\tr0 = rf(client, user, scope)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*RefreshToken)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*Client, *User, string) error); ok {\n\t\tr1 = rf(client, user, scope)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ GetValidRefreshToken ...\nfunc (_m *ServiceMock) GetValidRefreshToken(token string, client *Client) (*RefreshToken, error) {\n\tret := _m.Called(token, client)\n\n\tvar r0 *RefreshToken\n\tif rf, ok := ret.Get(0).(func(string, *Client) *RefreshToken); ok {\n\t\tr0 = rf(token, client)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*RefreshToken)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, *Client) error); ok {\n\t\tr1 = rf(token, client)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\n\/\/ Authenticate ...\nfunc (_m *ServiceMock) Authenticate(token string) (*AccessToken, error) {\n\tret := _m.Called(token)\n\n\tvar r0 *AccessToken\n\tif rf, ok := ret.Get(0).(func(string) *AccessToken); ok {\n\t\tr0 = rf(token)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*AccessToken)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(token)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}\n\nfunc (_m *ServiceMock) tokensHandler(w http.ResponseWriter, r *http.Request) {\n\t_m.Called(w, r)\n}\n\nfunc (_m *ServiceMock) introspectHandler(w http.ResponseWriter, r *http.Request) {\n\t_m.Called(w, r)\n}\n<|endoftext|>"} {"text":"<commit_before>package svc_test\n\nimport (\n\t\"context\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/nerdalize\/nerd\/svc\"\n)\n\nfunc TestUpdateDataset(t *testing.T) {\n\tdi, clean := testDI(t)\n\tdefer clean()\n\n\tctx := context.Background()\n\tctx, cancel := context.WithTimeout(ctx, time.Second*5)\n\tdefer cancel()\n\n\tkube := svc.NewKube(di)\n\tout, err := kube.CreateDataset(ctx, &svc.CreateDatasetInput{Name: \"my-dataset\", Bucket: \"bogus\", Key: \"my-key\"})\n\tok(t, err)\n\n\t_, err = kube.UpdateDataset(ctx, &svc.UpdateDatasetInput{\n\t\tName: out.Name,\n\t\tInputFor: \"j-123abc\",\n\t\tOutputFrom: \"j-456def\",\n\t\tSize: 1337,\n\t})\n\tok(t, err)\n\n\to, err := kube.GetDataset(ctx, &svc.GetDatasetInput{Name: out.Name})\n\tok(t, err)\n\tassert(t, strings.Contains(strings.Join(o.InputFor, \"\"), \"j-123abc\"), \"expected dataset to be up to date\")\n\tassert(t, strings.Contains(strings.Join(o.OutputFrom, \"\"), \"j-456def\"), \"expected dataset to be up to date and to contain job info for output section\")\n\tassert(t, o.Size == 1337, \"expected dataset to be up to date and contain new size\")\n}\n<commit_msg>Add test for updating dataset without new data<commit_after>package svc_test\n\nimport (\n\t\"context\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/nerdalize\/nerd\/svc\"\n)\n\nfunc TestUpdateDataset(t *testing.T) {\n\tdi, clean := testDI(t)\n\tdefer clean()\n\n\tctx := context.Background()\n\tctx, cancel := context.WithTimeout(ctx, time.Second*5)\n\tdefer cancel()\n\n\tkube := svc.NewKube(di)\n\tout, err := kube.CreateDataset(ctx, &svc.CreateDatasetInput{Name: \"my-dataset\", Bucket: \"bogus\", Key: \"my-key\"})\n\tok(t, err)\n\n\tnewSize := uint64(1337)\n\t_, err = kube.UpdateDataset(ctx, &svc.UpdateDatasetInput{\n\t\tName: out.Name,\n\t\tInputFor: \"j-123abc\",\n\t\tOutputFrom: \"j-456def\",\n\t\tSize: &newSize,\n\t})\n\tok(t, err)\n\n\to, err := kube.GetDataset(ctx, &svc.GetDatasetInput{Name: out.Name})\n\tok(t, err)\n\tassert(t, strings.Contains(strings.Join(o.InputFor, \"\"), \"j-123abc\"), \"expected dataset to be up to date\")\n\tassert(t, strings.Contains(strings.Join(o.OutputFrom, \"\"), \"j-456def\"), \"expected dataset to be up to date and to contain job info for output section\")\n\tassert(t, o.Size == 1337, \"expected dataset to be up to date and contain new size\")\n\n\t\/\/Check if the output remains the same when not specifying any changes\n\t_, err = kube.UpdateDataset(ctx, &svc.UpdateDatasetInput{\n\t\tName: out.Name,\n\t})\n\tok(t, err)\n\n\to2, err := kube.GetDataset(ctx, &svc.GetDatasetInput{Name: out.Name})\n\tok(t, err)\n\tequals(t, o.Size, o2.Size)\n\tequals(t, o.InputFor, o2.InputFor)\n\tequals(t, o.OutputFrom, o2.OutputFrom)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Gosl Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build ignore\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n\n\t\"github.com\/cpmech\/gosl\/io\"\n)\n\nfunc main() {\n\n\t\/\/ catch errors\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tio.PfRed(\"ERROR: %v\\n\", err)\n\t\t}\n\t}()\n\n\t\/\/ input data\n\tfilename, fnkey := io.ArgToFilename(0, \"data\/equations.txt\", \"\", true)\n\tio.Pf(\"\\n%s\\n\", io.ArgsTable(\"INPUT ARGUMENTS\",\n\t\t\"file with equations\", \"filename\", filename,\n\t))\n\n\tio.Pforan(\"fnkey = %v\\n\", fnkey)\n\n\t\/\/ open file\n\tf, err := io.OpenFileR(filename)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ constants\n\tMAXNIDX := 100\n\tINDICES := []string{\"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"r\", \"s\", \"p\", \"q\"}\n\n\t\/\/ read file\n\tvar buf bytes.Buffer\n\tio.ReadLinesFile(f, func(idx int, line string) (stop bool) {\n\n\t\t\/\/ indices\n\t\tl := line\n\t\tfor i := 0; i < MAXNIDX; i++ {\n\t\t\tl = strings.Replace(l, io.Sf(\"[%d]\", i), io.Sf(\"_{%d}\", i), -1)\n\t\t}\n\t\tfor _, idx := range INDICES {\n\t\t\tl = strings.Replace(l, io.Sf(\"[%s]\", idx), io.Sf(\"_{%s}\", idx), -1)\n\t\t}\n\n\t\t\/\/ constants\n\t\tl = strings.Replace(l, \"math.Sqrt2\", \"\\\\sqrt{2}\", -1)\n\t\tl = strings.Replace(l, \"SQ2\", \"\\\\sqrt{2}\", -1)\n\n\t\t\/\/ functions\n\t\tl = strings.Replace(l, \"math.Sqrt\", \"\\\\sqrt\", -1)\n\t\tl = strings.Replace(l, \"math.Pow\", \"\\\\pow\", -1)\n\t\tl = strings.Replace(l, \"math.Exp\", \"\\\\exp\", -1)\n\t\tl = strings.Replace(l, \"math.Sin\", \"\\\\sin\", -1)\n\t\tl = strings.Replace(l, \"math.Cos\", \"\\\\cos\", -1)\n\n\t\t\/\/ star\n\t\tl = strings.Replace(l, \"*\", \" \\\\, \", -1)\n\n\t\t\/\/ colon-equal\n\t\tl = strings.Replace(l, \":=\", \"=\", -1)\n\n\t\t\/\/ add to results\n\t\tio.Ff(&buf, \"%s\\n\", l)\n\t\treturn\n\t})\n\n\t\/\/ write file\n\tio.WriteFileVD(\"\/tmp\/gosl\", fnkey+\".tex\", &buf)\n}\n<commit_msg>Handle return error<commit_after>\/\/ Copyright 2016 The Gosl Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build ignore\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/cpmech\/gosl\/chk\"\n\t\"github.com\/cpmech\/gosl\/io\"\n)\n\nfunc status(err error) {\n\tif err != nil {\n\t\tio.Pf(\"ERROR: %v\\n\", err)\n\t\tchk.Verbose = true\n\t\tchk.CallerInfo(2)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc main() {\n\n\t\/\/ catch errors\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tio.PfRed(\"ERROR: %v\\n\", err)\n\t\t}\n\t}()\n\n\t\/\/ input data\n\tfilename, fnkey := io.ArgToFilename(0, \"data\/equations.txt\", \"\", true)\n\tio.Pf(\"\\n%s\\n\", io.ArgsTable(\"INPUT ARGUMENTS\",\n\t\t\"file with equations\", \"filename\", filename,\n\t))\n\n\tio.Pforan(\"fnkey = %v\\n\", fnkey)\n\n\t\/\/ open file\n\tf, err := io.OpenFileR(filename)\n\tstatus(err)\n\n\t\/\/ constants\n\tMAXNIDX := 100\n\tINDICES := []string{\"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"r\", \"s\", \"p\", \"q\"}\n\n\t\/\/ read file\n\tvar buf bytes.Buffer\n\terr = io.ReadLinesFile(f, func(idx int, line string) (stop bool) {\n\n\t\t\/\/ indices\n\t\tl := line\n\t\tfor i := 0; i < MAXNIDX; i++ {\n\t\t\tl = strings.Replace(l, io.Sf(\"[%d]\", i), io.Sf(\"_{%d}\", i), -1)\n\t\t}\n\t\tfor _, idx := range INDICES {\n\t\t\tl = strings.Replace(l, io.Sf(\"[%s]\", idx), io.Sf(\"_{%s}\", idx), -1)\n\t\t}\n\n\t\t\/\/ constants\n\t\tl = strings.Replace(l, \"math.Sqrt2\", \"\\\\sqrt{2}\", -1)\n\t\tl = strings.Replace(l, \"SQ2\", \"\\\\sqrt{2}\", -1)\n\n\t\t\/\/ functions\n\t\tl = strings.Replace(l, \"math.Sqrt\", \"\\\\sqrt\", -1)\n\t\tl = strings.Replace(l, \"math.Pow\", \"\\\\pow\", -1)\n\t\tl = strings.Replace(l, \"math.Exp\", \"\\\\exp\", -1)\n\t\tl = strings.Replace(l, \"math.Sin\", \"\\\\sin\", -1)\n\t\tl = strings.Replace(l, \"math.Cos\", \"\\\\cos\", -1)\n\n\t\t\/\/ star\n\t\tl = strings.Replace(l, \"*\", \" \\\\, \", -1)\n\n\t\t\/\/ colon-equal\n\t\tl = strings.Replace(l, \":=\", \"=\", -1)\n\n\t\t\/\/ add to results\n\t\tio.Ff(&buf, \"%s\\n\", l)\n\t\treturn\n\t})\n\tstatus(err)\n\n\t\/\/ write file\n\tio.WriteFileVD(\"\/tmp\/gosl\", fnkey+\".tex\", &buf)\n}\n<|endoftext|>"} {"text":"<commit_before>package wall\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/satori\/go.uuid\"\n\n\t\"github.com\/ivan1993spb\/snake-server\/engine\"\n\t\"github.com\/ivan1993spb\/snake-server\/world\"\n)\n\nconst ruinsFactor = 0.20\n\nvar dotsMaskOne = engine.NewDotsMask([][]uint8{{1}})\n\nvar ruins = [...]*engine.DotsMask{\n\tdotsMaskOne,\n\tengine.DotsMaskSquare2x2,\n\tengine.DotsMaskTank,\n\tengine.DotsMaskHome,\n\tengine.DotsMaskCross,\n\tengine.DotsMaskDiagonal,\n\tengine.DotsMaskCrossSmall,\n\tengine.DotsMaskDiagonalSmall,\n\tengine.DotsMaskLabyrinth,\n}\n\nfunc calcRuinsCount(size uint16) int {\n\treturn int(float32(size) * ruinsFactor)\n}\n\ntype ErrCreateWallRuins string\n\nfunc (e ErrCreateWallRuins) Error() string {\n\treturn \"cannot create wall ruins: \" + string(e)\n}\n\nfunc NewWallRuins(world *world.World) (*Wall, error) {\n\tarea, err := engine.NewArea(world.Width(), world.Height())\n\tif err != nil {\n\t\treturn nil, ErrCreateWallRuins(err.Error())\n\t}\n\n\tsize := area.Size()\n\truinsCount := calcRuinsCount(size)\n\twallLocation := make(engine.Location, 0, ruinsCount)\n\n\tfor len(wallLocation) < ruinsCount {\n\t\tfor i := 0; i < len(ruins); i++ {\n\t\t\tmask := ruins[i].TurnRandom()\n\n\t\t\tif area.Width() >= mask.Width() && area.Height() >= mask.Height() {\n\t\t\t\trect, err := area.NewRandomRect(mask.Width(), mask.Height(), 0, 0)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tlocation := mask.Location(rect.X(), rect.Y())\n\t\t\t\tif len(location) > ruinsCount-len(wallLocation) {\n\t\t\t\t\tlocation = location[:ruinsCount-len(wallLocation)]\n\t\t\t\t}\n\n\t\t\t\twallLocation = append(wallLocation, location...)\n\t\t\t}\n\t\t}\n\t}\n\n\twall := &Wall{\n\t\tuuid: uuid.Must(uuid.NewV4()).String(),\n\t\tworld: world,\n\t\tmux: &sync.RWMutex{},\n\t}\n\n\tif resultLocation, err := world.CreateObjectAvailableDots(wall, wallLocation); err != nil {\n\t\treturn nil, ErrCreateWallRuins(err.Error())\n\t} else {\n\t\twall.mux.Lock()\n\t\twall.location = resultLocation\n\t\twall.mux.Unlock()\n\t}\n\n\treturn wall, nil\n}\n<commit_msg>Create new masks in ruins<commit_after>package wall\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/satori\/go.uuid\"\n\n\t\"github.com\/ivan1993spb\/snake-server\/engine\"\n\t\"github.com\/ivan1993spb\/snake-server\/world\"\n)\n\nconst ruinsFactor = 0.20\n\nvar dotsMaskOne = engine.NewDotsMask([][]uint8{{1}})\n\nvar ruins = []*engine.DotsMask{\n\tdotsMaskOne,\n\tengine.DotsMaskSquare2x2,\n\tengine.DotsMaskTank,\n\tengine.DotsMaskHome1,\n\tengine.DotsMaskHome2,\n\tengine.DotsMaskCross,\n\tengine.DotsMaskDiagonal,\n\tengine.DotsMaskCrossSmall,\n\tengine.DotsMaskDiagonalSmall,\n\tengine.DotsMaskLabyrinth,\n\tengine.DotsMaskTunnel1,\n\tengine.DotsMaskTunnel2,\n}\n\nfunc calcRuinsCount(size uint16) int {\n\treturn int(float32(size) * ruinsFactor)\n}\n\ntype ErrCreateWallRuins string\n\nfunc (e ErrCreateWallRuins) Error() string {\n\treturn \"cannot create wall ruins: \" + string(e)\n}\n\nfunc NewWallRuins(world *world.World) (*Wall, error) {\n\tarea, err := engine.NewArea(world.Width(), world.Height())\n\tif err != nil {\n\t\treturn nil, ErrCreateWallRuins(err.Error())\n\t}\n\n\tsize := area.Size()\n\truinsCount := calcRuinsCount(size)\n\twallLocation := make(engine.Location, 0, ruinsCount)\n\n\tfor len(wallLocation) < ruinsCount {\n\t\tfor i := 0; i < len(ruins); i++ {\n\t\t\tmask := ruins[i].TurnRandom()\n\n\t\t\tif area.Width() >= mask.Width() && area.Height() >= mask.Height() {\n\t\t\t\trect, err := area.NewRandomRect(mask.Width(), mask.Height(), 0, 0)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tlocation := mask.Location(rect.X(), rect.Y())\n\t\t\t\tif len(location) > ruinsCount-len(wallLocation) {\n\t\t\t\t\tlocation = location[:ruinsCount-len(wallLocation)]\n\t\t\t\t}\n\n\t\t\t\twallLocation = append(wallLocation, location...)\n\t\t\t}\n\t\t}\n\t}\n\n\twall := &Wall{\n\t\tuuid: uuid.Must(uuid.NewV4()).String(),\n\t\tworld: world,\n\t\tmux: &sync.RWMutex{},\n\t}\n\n\tif resultLocation, err := world.CreateObjectAvailableDots(wall, wallLocation); err != nil {\n\t\treturn nil, ErrCreateWallRuins(err.Error())\n\t} else {\n\t\twall.mux.Lock()\n\t\twall.location = resultLocation\n\t\twall.mux.Unlock()\n\t}\n\n\treturn wall, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package gorpc\n\n\/\/ Dispatcher .\ntype Dispatcher interface {\n\tDispatch(call *Request) (callReturn *Response, err error)\n\tID() uint16\n}\n<commit_msg>add String() string method to Dispatcher interface<commit_after>package gorpc\n\n\/\/ Dispatcher .\ntype Dispatcher interface {\n\tDispatch(call *Request) (callReturn *Response, err error)\n\tID() uint16\n\tString() string\n}\n<|endoftext|>"} {"text":"<commit_before>package openapi3\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\n\toas3 \"github.com\/getkin\/kin-openapi\/openapi3\"\n\t\"github.com\/grokify\/gocharts\/data\/table\"\n\t\"github.com\/grokify\/gotilla\/encoding\/jsonutil\"\n\t\"github.com\/grokify\/gotilla\/text\"\n)\n\ntype SpecMore struct {\n\tSpec *oas3.Swagger\n}\n\nfunc ReadSpecMore(path string, validate bool) (*SpecMore, error) {\n\tspec, err := ReadFile(path, validate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &SpecMore{Spec: spec}, nil\n}\n\nfunc (s *SpecMore) SchemaCount() int {\n\tif s.Spec == nil {\n\t\treturn -1\n\t} else if s.Spec.Components.Schemas == nil {\n\t\treturn 0\n\t}\n\treturn len(s.Spec.Components.Schemas)\n}\n\nfunc (s *SpecMore) OperationsTable(columns *text.TextSet) (*table.TableData, error) {\n\treturn operationsTable(s.Spec, columns)\n}\n\nfunc operationsTable(spec *oas3.Swagger, columns *text.TextSet) (*table.TableData, error) {\n\tif columns == nil {\n\t\tcolumns = &text.TextSet{Texts: DefaultColumns()}\n\t}\n\ttbl := table.NewTableData()\n\ttbl.Name = spec.Info.Title\n\ttbl.Columns = columns.DisplayTexts()\n\n\ttgs, err := SpecTagGroups(spec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tVisitOperations(spec, func(path, method string, op *oas3.Operation) {\n\t\trow := []string{}\n\n\t\tfor _, text := range columns.Texts {\n\t\t\tswitch text.Slug {\n\t\t\tcase \"method\":\n\t\t\t\trow = append(row, method)\n\t\t\tcase \"path\":\n\t\t\t\trow = append(row, path)\n\t\t\tcase \"operationId\":\n\t\t\t\trow = append(row, op.OperationID)\n\t\t\tcase \"summary\":\n\t\t\t\trow = append(row, op.Summary)\n\t\t\tcase \"tags\":\n\t\t\t\trow = append(row, strings.Join(op.Tags, \", \"))\n\t\t\tcase \"x-tag-groups\":\n\t\t\t\trow = append(row, strings.Join(\n\t\t\t\t\ttgs.GetTagGroupNamesForTagNames(op.Tags...), \", \"))\n\t\t\tdefault:\n\t\t\t\trow = append(row, GetExtensionPropStringOrEmpty(op.ExtensionProps, text.Slug))\n\t\t\t}\n\t\t}\n\n\t\ttbl.Records = append(tbl.Records, row)\n\t})\n\treturn &tbl, nil\n}\n\nfunc DefaultColumns() []text.Text {\n\ttexts := []text.Text{\n\t\t{\n\t\t\tDisplay: \"Method\",\n\t\t\tSlug: \"method\"},\n\t\t{\n\t\t\tDisplay: \"Path\",\n\t\t\tSlug: \"path\"},\n\t\t{\n\t\t\tDisplay: \"OperationID\",\n\t\t\tSlug: \"operationId\"},\n\t\t{\n\t\t\tDisplay: \"Summary\",\n\t\t\tSlug: \"summary\"},\n\t\t{\n\t\t\tDisplay: \"Tags\",\n\t\t\tSlug: \"tags\"},\n\t}\n\treturn texts\n}\n\n\/*\nfunc (s *SpecMore) OperationsTableOld() (*table.TableData, error) {\n\ttbl := table.NewTableData()\n\ttbl.Name = \"Operations\"\n\ttgs, err := SpecTagGroups(s.Spec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taddTagGroups := false\n\tif len(tgs.TagGroups) > 0 {\n\t\taddTagGroups = true\n\t\ttbl.Columns = []string{\"OperationId\", \"Summary\", \"Path\", \"Method\", \"Tag Groups\", \"Tags\"}\n\t} else {\n\t\ttbl.Columns = []string{\"OperationId\", \"Summary\", \"Path\", \"Method\", \"Tags\"}\n\t}\n\tops := s.OperationMetas()\n\tfor _, op := range ops {\n\t\tif addTagGroups {\n\t\t\ttagGroupNames := tgs.GetTagGroupNamesForTagNames(op.Tags...)\n\t\t\ttbl.Records = append(tbl.Records, []string{\n\t\t\t\top.OperationID,\n\t\t\t\top.Summary,\n\t\t\t\top.Path,\n\t\t\t\top.Method,\n\t\t\t\tstrings.Join(tagGroupNames, \",\"),\n\t\t\t\tstrings.Join(stringsutil.SliceCondenseSpace(op.Tags, true, true), \",\")})\n\t\t} else {\n\t\t\ttbl.Records = append(tbl.Records, []string{\n\t\t\t\top.OperationID,\n\t\t\t\top.Summary,\n\t\t\t\top.Path,\n\t\t\t\top.Method,\n\t\t\t\tstrings.Join(op.Tags, \",\")})\n\t\t}\n\t}\n\treturn &tbl, nil\n}\n*\/\n\nfunc (s *SpecMore) OperationMetas() []OperationMeta {\n\tometas := []OperationMeta{}\n\tif s.Spec == nil {\n\t\treturn ometas\n\t}\n\tfor url, path := range s.Spec.Paths {\n\t\tif path.Connect != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodConnect, path.Connect))\n\t\t}\n\t\tif path.Delete != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodDelete, path.Delete))\n\t\t}\n\t\tif path.Get != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodGet, path.Get))\n\t\t}\n\t\tif path.Head != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodHead, path.Head))\n\t\t}\n\t\tif path.Options != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodOptions, path.Options))\n\t\t}\n\t\tif path.Patch != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodPatch, path.Patch))\n\t\t}\n\t\tif path.Post != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodPost, path.Post))\n\t\t}\n\t\tif path.Put != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodPut, path.Put))\n\t\t}\n\t\tif path.Trace != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodTrace, path.Trace))\n\t\t}\n\t}\n\n\treturn ometas\n}\n\nfunc (s *SpecMore) OperationsCount() uint {\n\treturn uint(len(s.OperationMetas()))\n}\n\nfunc (s *SpecMore) WriteFileJSON(filename string, perm os.FileMode, prefix, indent string) error {\n\tjsonData, err := s.Spec.MarshalJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\tpretty := false\n\tif len(prefix) > 0 || len(indent) > 0 {\n\t\tpretty = true\n\t}\n\tif pretty {\n\t\tjsonData = jsonutil.PrettyPrint(jsonData, \"\", \" \")\n\t}\n\treturn ioutil.WriteFile(filename, jsonData, perm)\n}\n\nfunc (sm *SpecMore) WriteFileXLSX(filename string) error {\n\ttbl, err := sm.OperationsTable(nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn table.WriteXLSX(filename, tbl)\n}\n\ntype TagsMore struct {\n\tTags oas3.Tags\n}\n\nfunc (tg *TagsMore) Get(tagName string) *oas3.Tag {\n\tfor _, tag := range tg.Tags {\n\t\tif tagName == tag.Name {\n\t\t\treturn tag\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>feat: openapi3: add SpecMore.SchemaNames(); add SpecMore.SchemaNameExists()<commit_after>package openapi3\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/grokify\/gotilla\/type\/stringsutil\"\n\n\toas3 \"github.com\/getkin\/kin-openapi\/openapi3\"\n\t\"github.com\/grokify\/gocharts\/data\/table\"\n\t\"github.com\/grokify\/gotilla\/encoding\/jsonutil\"\n\t\"github.com\/grokify\/gotilla\/text\"\n)\n\ntype SpecMore struct {\n\tSpec *oas3.Swagger\n}\n\nfunc ReadSpecMore(path string, validate bool) (*SpecMore, error) {\n\tspec, err := ReadFile(path, validate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &SpecMore{Spec: spec}, nil\n}\n\nfunc (s *SpecMore) SchemaCount() int {\n\tif s.Spec == nil {\n\t\treturn -1\n\t} else if s.Spec.Components.Schemas == nil {\n\t\treturn 0\n\t}\n\treturn len(s.Spec.Components.Schemas)\n}\n\nfunc (s *SpecMore) OperationsTable(columns *text.TextSet) (*table.TableData, error) {\n\treturn operationsTable(s.Spec, columns)\n}\n\nfunc operationsTable(spec *oas3.Swagger, columns *text.TextSet) (*table.TableData, error) {\n\tif columns == nil {\n\t\tcolumns = &text.TextSet{Texts: DefaultColumns()}\n\t}\n\ttbl := table.NewTableData()\n\ttbl.Name = spec.Info.Title\n\ttbl.Columns = columns.DisplayTexts()\n\n\ttgs, err := SpecTagGroups(spec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tVisitOperations(spec, func(path, method string, op *oas3.Operation) {\n\t\trow := []string{}\n\n\t\tfor _, text := range columns.Texts {\n\t\t\tswitch text.Slug {\n\t\t\tcase \"method\":\n\t\t\t\trow = append(row, method)\n\t\t\tcase \"path\":\n\t\t\t\trow = append(row, path)\n\t\t\tcase \"operationId\":\n\t\t\t\trow = append(row, op.OperationID)\n\t\t\tcase \"summary\":\n\t\t\t\trow = append(row, op.Summary)\n\t\t\tcase \"tags\":\n\t\t\t\trow = append(row, strings.Join(op.Tags, \", \"))\n\t\t\tcase \"x-tag-groups\":\n\t\t\t\trow = append(row, strings.Join(\n\t\t\t\t\ttgs.GetTagGroupNamesForTagNames(op.Tags...), \", \"))\n\t\t\tdefault:\n\t\t\t\trow = append(row, GetExtensionPropStringOrEmpty(op.ExtensionProps, text.Slug))\n\t\t\t}\n\t\t}\n\n\t\ttbl.Records = append(tbl.Records, row)\n\t})\n\treturn &tbl, nil\n}\n\nfunc DefaultColumns() []text.Text {\n\ttexts := []text.Text{\n\t\t{\n\t\t\tDisplay: \"Method\",\n\t\t\tSlug: \"method\"},\n\t\t{\n\t\t\tDisplay: \"Path\",\n\t\t\tSlug: \"path\"},\n\t\t{\n\t\t\tDisplay: \"OperationID\",\n\t\t\tSlug: \"operationId\"},\n\t\t{\n\t\t\tDisplay: \"Summary\",\n\t\t\tSlug: \"summary\"},\n\t\t{\n\t\t\tDisplay: \"Tags\",\n\t\t\tSlug: \"tags\"},\n\t}\n\treturn texts\n}\n\n\/*\nfunc (s *SpecMore) OperationsTableOld() (*table.TableData, error) {\n\ttbl := table.NewTableData()\n\ttbl.Name = \"Operations\"\n\ttgs, err := SpecTagGroups(s.Spec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taddTagGroups := false\n\tif len(tgs.TagGroups) > 0 {\n\t\taddTagGroups = true\n\t\ttbl.Columns = []string{\"OperationId\", \"Summary\", \"Path\", \"Method\", \"Tag Groups\", \"Tags\"}\n\t} else {\n\t\ttbl.Columns = []string{\"OperationId\", \"Summary\", \"Path\", \"Method\", \"Tags\"}\n\t}\n\tops := s.OperationMetas()\n\tfor _, op := range ops {\n\t\tif addTagGroups {\n\t\t\ttagGroupNames := tgs.GetTagGroupNamesForTagNames(op.Tags...)\n\t\t\ttbl.Records = append(tbl.Records, []string{\n\t\t\t\top.OperationID,\n\t\t\t\top.Summary,\n\t\t\t\top.Path,\n\t\t\t\top.Method,\n\t\t\t\tstrings.Join(tagGroupNames, \",\"),\n\t\t\t\tstrings.Join(stringsutil.SliceCondenseSpace(op.Tags, true, true), \",\")})\n\t\t} else {\n\t\t\ttbl.Records = append(tbl.Records, []string{\n\t\t\t\top.OperationID,\n\t\t\t\top.Summary,\n\t\t\t\top.Path,\n\t\t\t\top.Method,\n\t\t\t\tstrings.Join(op.Tags, \",\")})\n\t\t}\n\t}\n\treturn &tbl, nil\n}\n*\/\n\nfunc (s *SpecMore) OperationMetas() []OperationMeta {\n\tometas := []OperationMeta{}\n\tif s.Spec == nil {\n\t\treturn ometas\n\t}\n\tfor url, path := range s.Spec.Paths {\n\t\tif path.Connect != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodConnect, path.Connect))\n\t\t}\n\t\tif path.Delete != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodDelete, path.Delete))\n\t\t}\n\t\tif path.Get != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodGet, path.Get))\n\t\t}\n\t\tif path.Head != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodHead, path.Head))\n\t\t}\n\t\tif path.Options != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodOptions, path.Options))\n\t\t}\n\t\tif path.Patch != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodPatch, path.Patch))\n\t\t}\n\t\tif path.Post != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodPost, path.Post))\n\t\t}\n\t\tif path.Put != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodPut, path.Put))\n\t\t}\n\t\tif path.Trace != nil {\n\t\t\tometas = append(ometas, OperationToMeta(url, http.MethodTrace, path.Trace))\n\t\t}\n\t}\n\n\treturn ometas\n}\n\nfunc (s *SpecMore) OperationsCount() uint {\n\treturn uint(len(s.OperationMetas()))\n}\n\nfunc (sm *SpecMore) SchemaNames() []string {\n\tschemaNames := []string{}\n\tfor schemaName := range sm.Spec.Components.Schemas {\n\t\tschemaNames = append(schemaNames, schemaName)\n\t}\n\treturn stringsutil.SliceCondenseSpace(schemaNames, true, true)\n}\n\nfunc (sm *SpecMore) SchemaNameExists(schemaName string, includeNil bool) bool {\n\tfor schemaNameTry, schemaRef := range sm.Spec.Components.Schemas {\n\t\tif schemaNameTry == schemaName {\n\t\t\tif includeNil {\n\t\t\t\treturn true\n\t\t\t} else if schemaRef == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tschemaRef.Ref = strings.TrimSpace(schemaRef.Ref)\n\t\t\tif len(schemaRef.Ref) > 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif schemaRef.Value == nil {\n\t\t\t\treturn false\n\t\t\t} else {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (s *SpecMore) WriteFileJSON(filename string, perm os.FileMode, prefix, indent string) error {\n\tjsonData, err := s.Spec.MarshalJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\tpretty := false\n\tif len(prefix) > 0 || len(indent) > 0 {\n\t\tpretty = true\n\t}\n\tif pretty {\n\t\tjsonData = jsonutil.PrettyPrint(jsonData, \"\", \" \")\n\t}\n\treturn ioutil.WriteFile(filename, jsonData, perm)\n}\n\nfunc (sm *SpecMore) WriteFileXLSX(filename string) error {\n\ttbl, err := sm.OperationsTable(nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn table.WriteXLSX(filename, tbl)\n}\n\ntype TagsMore struct {\n\tTags oas3.Tags\n}\n\nfunc (tg *TagsMore) Get(tagName string) *oas3.Tag {\n\tfor _, tag := range tg.Tags {\n\t\tif tagName == tag.Name {\n\t\t\treturn tag\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package hg\n\nimport (\n\t\"os\/exec\"\n\t\"fmt\"\n\t\"strings\"\n\t\"os\"\n\t\"encoding\/json\"\n\t\"time\"\n)\n\ntype Repository struct {\n\tPath string\n\tBranch string\n\tIncludePaths []string\n\tExcludePaths []string\n\tTagFilter string\n\tSkipSslVerification bool\n}\n\ntype CommitProperty struct {\n\tName string `json:\"name\"`\n\tValue string `json:\"value\"`\n\tType string `json:\"type,omitempty\"`\n}\n\ntype HgChangeset struct {\n\tRev int `json:\"rev\"`\n\tNode string `json:\"node\"`\n\tBranch string `json:\"branch\"`\n\tPhase string `json:\"phase\"`\n\tUser string `json:\"user\"`\n\tDate []int64 `json:\"date\"`\n\tDesc string `json:\"desc\"`\n\tBookmarks []string `json:\"bookmarks\"`\n\tTags []string `json:\"tags\"`\n\tParents []string `json:\"parents\"`\n}\n\nfunc (self *Repository) CloneOrPull(sourceUri string, insecure bool) ([]byte, error) {\n\tif len(self.Path) == 0 {\n\t\treturn []byte{}, fmt.Errorf(\"CloneOrPull: repository path must be set\")\n\t}\n\n\tif len(self.Branch) == 0 {\n\t\treturn []byte{}, fmt.Errorf(\"CloneOrPull: branch must be set\")\n\t}\n\n\tdirInfo, errIfNotExists := os.Stat(self.Path)\n\tif errIfNotExists != nil || !dirInfo.IsDir() {\n\t\treturn self.clone(sourceUri, insecure)\n\t} else {\n\t\treturn self.pull(insecure)\n\t}\n}\n\nfunc (self *Repository) clone(sourceUri string, insecure bool) (output []byte, err error) {\n\terr = os.RemoveAll(self.Path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"CloneOrUpdate: %s\", err)\n\t\treturn\n\t}\n\n\t_, output, err = self.run(\"clone\", []string{\n\t\t\"-q\",\n\t\t\"--branch\", self.Branch,\n\t\tsourceUri,\n\t\tself.Path,\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error cloning repository from %s: %s\", sourceUri, err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) pull(insecure bool) (output []byte, err error) {\n\t_, output, err = self.run(\"pull\", []string{\n\t\t\"-q\",\n\t\t\"--cwd\", self.Path,\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error pulling changes from repository: %s\\nStderr\", err)\n\t\treturn\n\t}\n\n\t_, checkoutOutput, err := self.run(\"checkout\", []string{\n\t\t\"-q\",\n\t\t\"--cwd\", self.Path,\n\t\t\"--clean\",\n\t\t\"--rev\", \"tip\",\n\t})\n\toutput = append(output, checkoutOutput...)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error updating working directory to tip: %s\", err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) PullWithRebase(sourceUri string, branch string) (output []byte, err error) {\n\t_, output, err = self.run(\"pull\", []string{\n\t\t\"-q\",\n\t\t\"--cwd\", self.Path,\n\t\t\"--config\", \"extensions.rebase=\",\n\t\t\"--config\", \"paths.push-target=\" + sourceUri,\n\t\t\"--rebase\",\n\t\t\"--branch\", branch,\n\t\t\"push-target\",\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error pulling\/rebasing from: %s: %s\", sourceUri, err)\n\t}\n\treturn\n}\n\nfunc (self *Repository) CloneAtCommit(sourceUri string, commitId string) (output []byte, err error) {\n\t_, output, err = self.run(\"clone\", []string{\n\t\t\"-q\",\n\t\t\"--rev\", commitId,\n\t\tsourceUri,\n\t\tself.Path,\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error cloning repository %s@%s: %s\", sourceUri, commitId, err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) SetDraftPhase() (output []byte, err error) {\n\t_, output, err = self.run(\"phase\", []string{\n\t\t\"--cwd\", self.Path,\n\t\t\"--force\",\n\t\t\"--draft\",\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error setting repo phase to draft: %s\", err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) Push(destUri string, branch string) (output []byte, err error) {\n\t_, output, err = self.run(\"push\", []string{\n\t\t\"--cwd\", self.Path,\n\t\t\"--config\", \"paths.push-target=\" + destUri,\n\t\t\"--branch\", branch,\n\t\t\"push-target\",\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error pushing to %s: %s\", destUri, err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) Tag(tagValue string) (output []byte, err error) {\n\t_, output, err = self.run(\"tag\", []string{\n\t\t\"--cwd\", self.Path,\n\t\ttagValue,\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error tagging current commit: %s\", err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) Delete() error {\n\terr := os.RemoveAll(self.Path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error deleting repository: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (self *Repository) Checkout(commitId string) (output []byte, err error) {\n\t_, output, err = self.run(\"checkout\", []string{\n\t\t\"-q\",\n\t\t\"--cwd\", self.Path,\n\t\t\"--clean\",\n\t\t\"--rev\", commitId,\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error checking out %s: %s\", commitId, err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) Purge() (output []byte, err error) {\n\t_, output, err = self.run(\"purge\", []string{\n\t\t\"--config\", \"extensions.purge=\",\n\t\t\"--cwd\", self.Path,\n\t\t\"--all\",\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error purging repository: %s\", err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) GetLatestCommitId() (output string, err error) {\n\tinclude := self.makeIncludeQueryFragment()\n\texclude := self.makeExcludeQueryFragment()\n\ttagFilter := self.maybeTagFilter()\n\trevSet := fmt.Sprintf(\"last((((%s) - (%s)) & %s) - desc('[ci skip]'))\", include, exclude, tagFilter)\n\n\t_, outBytes, err := self.run(\"log\", []string{\n\t\t\"--cwd\", self.Path,\n\t\t\"--rev\", revSet,\n\t\t\"--template\", \"{node}\",\n\t})\n\toutput = string(outBytes)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error getting latest commit id: %s\", err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) GetCurrentCommitId() (output string, err error) {\n\t_, outBytes, err := self.run(\"log\", []string{\n\t\t\"--cwd\", self.Path,\n\t\t\"--rev\", \".\",\n\t\t\"--template\", \"{node}\",\n\t})\n\toutput = string(outBytes)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error getting current commit id: %s\", err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) GetDescendantsOf(commitId string) ([]string, error) {\n\tinclude := self.makeIncludeQueryFragment()\n\texclude := self.makeExcludeQueryFragment()\n\ttagFilter := self.maybeTagFilter()\n\trevSet := fmt.Sprintf(\"(descendants(%s) - %s) & %s & ((%s) - (%s)) - desc('[ci skip]')\",\n\t\tcommitId, commitId, tagFilter, include, exclude)\n\n\t_, outBytes, err := self.run(\"log\", []string{\n\t\t\"--cwd\", self.Path,\n\t\t\"--rev\", revSet,\n\t\t\"--template\", \"{node}\\n\",\n\t})\n\n\toutput := string(outBytes)\n\tif err != nil {\n\t\treturn []string{}, fmt.Errorf(\"Error getting descendant commits of %s: %s\\n%s\", commitId, err, output)\n\t}\n\n\ttrimmed := strings.Trim(output, \"\\n\\r \")\n\tif len(trimmed) == 0 {\n\t\treturn []string{}, nil\n\t}\n\n\tcommits := strings.Split(trimmed, \"\\n\")\n\treturn commits, nil\n}\n\nfunc (self *Repository) maybeTagFilter() string {\n\tif len(self.TagFilter) > 0 {\n\t\treturn \"tag('re:\" + escapePath(self.TagFilter) + \"')\"\n\t} else {\n\t\treturn \"all()\"\n\t}\n}\n\nfunc (self *Repository) Metadata(commitId string) (metadata []CommitProperty, err error) {\n\t_, outBytes, err := self.run(\"log\", []string{\n\t\t\"--cwd\", self.Path,\n\t\t\"--rev\", commitId,\n\t\t\"--template\", \"json\",\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error getting metadata for commit %s: %s\\n%s\", commitId, err, string(outBytes))\n\t}\n\n\tmetadata, err = parseMetadata(outBytes)\n\treturn\n}\n\nfunc parseHgTime(hgTime []int64) (parsedTime time.Time, err error) {\n\tif len(hgTime) != 2 {\n\t\terr = fmt.Errorf(\"parseHgTime: expected slice hgTime to have 2 elements\")\n\t}\n\tutcEpoch := hgTime[0]\n\toffset := hgTime[1]\n\n\tutcTime := time.Unix(utcEpoch, 0)\n\n\t\/\/ for some reason, mercurial uses the inverse sign on the offset\n\tzone := time.FixedZone(\"internet time\", -1 * int(offset))\n\n\tparsedTime = utcTime.In(zone)\n\treturn\n}\n\nfunc timeToIso8601(timestamp time.Time) string {\n\treturn timestamp.Format(\"2006-01-02 15:04:05 -0700\")\n}\n\nfunc (commit *HgChangeset) toCommitProperties() (metadata []CommitProperty, err error) {\n\ttimestamp, err := parseHgTime(commit.Date)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tmetadata = append(metadata,\n\t\tCommitProperty{\n\t\t\tName: \"commit\",\n\t\t\tValue: commit.Node,\n\t\t},\n\t\tCommitProperty{\n\t\t\tName: \"author\",\n\t\t\tValue: commit.User,\n\t\t},\n\t\tCommitProperty{\n\t\t\tName: \"author_date\",\n\t\t\tValue: timeToIso8601(timestamp),\n\t\t\tType: \"time\",\n\t\t},\n\t\tCommitProperty{\n\t\t\tName: \"message\",\n\t\t\tValue: commit.Desc,\n\t\t\tType: \"message\",\n\t\t},\n\t\tCommitProperty{\n\t\t\tName: \"tags\",\n\t\t\tValue: strings.Join(commit.Tags, \", \"),\n\t\t},\n\t)\n\n\treturn\n}\n\nfunc parseMetadata(hgJsonOutput []byte) (metadata []CommitProperty, err error) {\n\tcommits := []HgChangeset{}\n\terr = json.Unmarshal(hgJsonOutput, &commits)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(commits) != 1 {\n\t\terr = fmt.Errorf(\"parseMetadata: expected 1 commit, found %d\", len(commits))\n\t\treturn\n\t}\n\n\tmetadata, err = commits[0].toCommitProperties()\n\treturn\n}\n\nfunc (self *Repository) run(command string, args []string) (cmd *exec.Cmd, output []byte, err error) {\n\thgArgs := make([]string, 1, len(args) + 1)\n\thgArgs[0] = command\n\n\tif self.SkipSslVerification && commandTakesInsecureOption(command) {\n\t\thgArgs = append(hgArgs, \"--insecure\")\n\t}\n\thgArgs = append(hgArgs, args...)\n\n\tcmd = exec.Command(\"hg\", hgArgs...)\n\n\toutput, err = cmd.CombinedOutput()\n\treturn\n}\n\nfunc commandTakesInsecureOption(command string) bool {\n\teligibleCommands := []string{\n\t\t\"clone\",\n\t\t\"pull\",\n\t\t\"push\",\n\t}\n\tfor _, eligibleCommand := range (eligibleCommands) {\n\t\tif command == eligibleCommand {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (self *Repository) makeIncludeQueryFragment() string {\n\tif len(self.IncludePaths) == 0 {\n\t\treturn \"all()\"\n\t} else {\n\t\treturn unionOfPaths(self.IncludePaths)\n\t}\n}\n\nfunc (self *Repository) makeExcludeQueryFragment() string {\n\tif len(self.ExcludePaths) == 0 {\n\t\treturn \"not all()\"\n\t} else {\n\t\treturn unionOfPaths(self.ExcludePaths)\n\t}\n}\n\nfunc unionOfPaths(paths []string) string {\n\tescapedPaths := make([]string, len(paths))\n\tfor i, path := range (paths) {\n\t\tescapedPaths[i] = \"file('re:\" + escapePath(path) + \"')\"\n\t}\n\treturn strings.Join(escapedPaths, \"|\")\n}\n\nfunc escapePath(path string) string {\n\tbackslashesEscaped := strings.Replace(path, \"\\\\\", \"\\\\\\\\\\\\\\\\\", -1)\n\tquotesEscaped := strings.Replace(backslashesEscaped, \"'\", \"\\\\'\", -1)\n\treturn quotesEscaped\n}<commit_msg>hg: fix error message, add comments<commit_after>package hg\n\nimport (\n\t\"os\/exec\"\n\t\"fmt\"\n\t\"strings\"\n\t\"os\"\n\t\"encoding\/json\"\n\t\"time\"\n)\n\ntype Repository struct {\n\tPath string\n\tBranch string\n\tIncludePaths []string\n\tExcludePaths []string\n\tTagFilter string\n\tSkipSslVerification bool\n}\n\ntype CommitProperty struct {\n\tName string `json:\"name\"`\n\tValue string `json:\"value\"`\n\tType string `json:\"type,omitempty\"`\n}\n\ntype HgChangeset struct {\n\tRev int `json:\"rev\"`\n\tNode string `json:\"node\"`\n\tBranch string `json:\"branch\"`\n\tPhase string `json:\"phase\"`\n\tUser string `json:\"user\"`\n\tDate []int64 `json:\"date\"`\n\tDesc string `json:\"desc\"`\n\tBookmarks []string `json:\"bookmarks\"`\n\tTags []string `json:\"tags\"`\n\tParents []string `json:\"parents\"`\n}\n\nfunc (self *Repository) CloneOrPull(sourceUri string, insecure bool) ([]byte, error) {\n\tif len(self.Path) == 0 {\n\t\treturn []byte{}, fmt.Errorf(\"CloneOrPull: repository path must be set\")\n\t}\n\n\tif len(self.Branch) == 0 {\n\t\treturn []byte{}, fmt.Errorf(\"CloneOrPull: branch must be set\")\n\t}\n\n\tdirInfo, errIfNotExists := os.Stat(self.Path)\n\tif errIfNotExists != nil || !dirInfo.IsDir() {\n\t\treturn self.clone(sourceUri, insecure)\n\t} else {\n\t\treturn self.pull(insecure)\n\t}\n}\n\nfunc (self *Repository) clone(sourceUri string, insecure bool) (output []byte, err error) {\n\terr = os.RemoveAll(self.Path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"clone: %s\", err)\n\t\treturn\n\t}\n\n\t_, output, err = self.run(\"clone\", []string{\n\t\t\"-q\",\n\t\t\"--branch\", self.Branch,\n\t\tsourceUri,\n\t\tself.Path,\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error cloning repository from %s: %s\", sourceUri, err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) pull(insecure bool) (output []byte, err error) {\n\t_, output, err = self.run(\"pull\", []string{\n\t\t\"-q\",\n\t\t\"--cwd\", self.Path,\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error pulling changes from repository: %s\\nStderr\", err)\n\t\treturn\n\t}\n\n\t_, checkoutOutput, err := self.run(\"checkout\", []string{\n\t\t\"-q\",\n\t\t\"--cwd\", self.Path,\n\t\t\"--clean\",\n\t\t\"--rev\", \"tip\",\n\t})\n\toutput = append(output, checkoutOutput...)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error updating working directory to tip: %s\", err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) PullWithRebase(sourceUri string, branch string) (output []byte, err error) {\n\t_, output, err = self.run(\"pull\", []string{\n\t\t\"-q\",\n\t\t\"--cwd\", self.Path,\n\t\t\"--config\", \"extensions.rebase=\",\n\t\t\"--config\", \"paths.push-target=\" + sourceUri,\n\t\t\"--rebase\",\n\t\t\"--branch\", branch,\n\t\t\"push-target\",\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error pulling\/rebasing from: %s: %s\", sourceUri, err)\n\t}\n\treturn\n}\n\n\/\/ Clones sourceUri into the repository and truncates all history after the given commit,\n\/\/ making it the new tip. After truncating, we can add a tag commit at tip, and then push\n\/\/ the whole known branch (... -> given commit -> tag commit == tip) to another repository.\nfunc (self *Repository) CloneAtCommit(sourceUri string, commitId string) (output []byte, err error) {\n\t_, output, err = self.run(\"clone\", []string{\n\t\t\"-q\",\n\t\t\"--rev\", commitId,\n\t\tsourceUri,\n\t\tself.Path,\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error cloning repository %s@%s: %s\", sourceUri, commitId, err)\n\t}\n\n\treturn\n}\n\n\/\/ Makes the repository rebaseable. See `hg help phases`.\nfunc (self *Repository) SetDraftPhase() (output []byte, err error) {\n\t_, output, err = self.run(\"phase\", []string{\n\t\t\"--cwd\", self.Path,\n\t\t\"--force\",\n\t\t\"--draft\",\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error setting repo phase to draft: %s\", err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) Push(destUri string, branch string) (output []byte, err error) {\n\t_, output, err = self.run(\"push\", []string{\n\t\t\"--cwd\", self.Path,\n\t\t\"--config\", \"paths.push-target=\" + destUri,\n\t\t\"--branch\", branch,\n\t\t\"push-target\",\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error pushing to %s: %s\", destUri, err)\n\t}\n\n\treturn\n}\n\n\/\/ Tags a commit. Expects to be run only at tip!\nfunc (self *Repository) Tag(tagValue string) (output []byte, err error) {\n\t_, output, err = self.run(\"tag\", []string{\n\t\t\"--cwd\", self.Path,\n\t\ttagValue,\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error tagging current commit: %s\", err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) Delete() error {\n\terr := os.RemoveAll(self.Path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error deleting repository: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (self *Repository) Checkout(commitId string) (output []byte, err error) {\n\t_, output, err = self.run(\"checkout\", []string{\n\t\t\"-q\",\n\t\t\"--cwd\", self.Path,\n\t\t\"--clean\",\n\t\t\"--rev\", commitId,\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error checking out %s: %s\", commitId, err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) Purge() (output []byte, err error) {\n\t_, output, err = self.run(\"purge\", []string{\n\t\t\"--config\", \"extensions.purge=\",\n\t\t\"--cwd\", self.Path,\n\t\t\"--all\",\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error purging repository: %s\", err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) GetLatestCommitId() (output string, err error) {\n\tinclude := self.makeIncludeQueryFragment()\n\texclude := self.makeExcludeQueryFragment()\n\ttagFilter := self.maybeTagFilter()\n\trevSet := fmt.Sprintf(\"last((((%s) - (%s)) & %s) - desc('[ci skip]'))\", include, exclude, tagFilter)\n\n\t_, outBytes, err := self.run(\"log\", []string{\n\t\t\"--cwd\", self.Path,\n\t\t\"--rev\", revSet,\n\t\t\"--template\", \"{node}\",\n\t})\n\toutput = string(outBytes)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error getting latest commit id: %s\", err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) GetCurrentCommitId() (output string, err error) {\n\t_, outBytes, err := self.run(\"log\", []string{\n\t\t\"--cwd\", self.Path,\n\t\t\"--rev\", \".\",\n\t\t\"--template\", \"{node}\",\n\t})\n\toutput = string(outBytes)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error getting current commit id: %s\", err)\n\t}\n\n\treturn\n}\n\nfunc (self *Repository) GetDescendantsOf(commitId string) ([]string, error) {\n\tinclude := self.makeIncludeQueryFragment()\n\texclude := self.makeExcludeQueryFragment()\n\ttagFilter := self.maybeTagFilter()\n\trevSet := fmt.Sprintf(\"(descendants(%s) - %s) & %s & ((%s) - (%s)) - desc('[ci skip]')\",\n\t\tcommitId, commitId, tagFilter, include, exclude)\n\n\t_, outBytes, err := self.run(\"log\", []string{\n\t\t\"--cwd\", self.Path,\n\t\t\"--rev\", revSet,\n\t\t\"--template\", \"{node}\\n\",\n\t})\n\n\toutput := string(outBytes)\n\tif err != nil {\n\t\treturn []string{}, fmt.Errorf(\"Error getting descendant commits of %s: %s\\n%s\", commitId, err, output)\n\t}\n\n\ttrimmed := strings.Trim(output, \"\\n\\r \")\n\tif len(trimmed) == 0 {\n\t\treturn []string{}, nil\n\t}\n\n\tcommits := strings.Split(trimmed, \"\\n\")\n\treturn commits, nil\n}\n\nfunc (self *Repository) maybeTagFilter() string {\n\tif len(self.TagFilter) > 0 {\n\t\treturn \"tag('re:\" + escapePath(self.TagFilter) + \"')\"\n\t} else {\n\t\treturn \"all()\"\n\t}\n}\n\nfunc (self *Repository) Metadata(commitId string) (metadata []CommitProperty, err error) {\n\t_, outBytes, err := self.run(\"log\", []string{\n\t\t\"--cwd\", self.Path,\n\t\t\"--rev\", commitId,\n\t\t\"--template\", \"json\",\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error getting metadata for commit %s: %s\\n%s\", commitId, err, string(outBytes))\n\t}\n\n\tmetadata, err = parseMetadata(outBytes)\n\treturn\n}\n\nfunc parseHgTime(hgTime []int64) (parsedTime time.Time, err error) {\n\tif len(hgTime) != 2 {\n\t\terr = fmt.Errorf(\"parseHgTime: expected slice hgTime to have 2 elements\")\n\t}\n\tutcEpoch := hgTime[0]\n\toffset := hgTime[1]\n\n\tutcTime := time.Unix(utcEpoch, 0)\n\n\t\/\/ for some reason, mercurial uses the inverse sign on the offset\n\tzone := time.FixedZone(\"internet time\", -1 * int(offset))\n\n\tparsedTime = utcTime.In(zone)\n\treturn\n}\n\nfunc timeToIso8601(timestamp time.Time) string {\n\treturn timestamp.Format(\"2006-01-02 15:04:05 -0700\")\n}\n\nfunc (commit *HgChangeset) toCommitProperties() (metadata []CommitProperty, err error) {\n\ttimestamp, err := parseHgTime(commit.Date)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tmetadata = append(metadata,\n\t\tCommitProperty{\n\t\t\tName: \"commit\",\n\t\t\tValue: commit.Node,\n\t\t},\n\t\tCommitProperty{\n\t\t\tName: \"author\",\n\t\t\tValue: commit.User,\n\t\t},\n\t\tCommitProperty{\n\t\t\tName: \"author_date\",\n\t\t\tValue: timeToIso8601(timestamp),\n\t\t\tType: \"time\",\n\t\t},\n\t\tCommitProperty{\n\t\t\tName: \"message\",\n\t\t\tValue: commit.Desc,\n\t\t\tType: \"message\",\n\t\t},\n\t\tCommitProperty{\n\t\t\tName: \"tags\",\n\t\t\tValue: strings.Join(commit.Tags, \", \"),\n\t\t},\n\t)\n\n\treturn\n}\n\nfunc parseMetadata(hgJsonOutput []byte) (metadata []CommitProperty, err error) {\n\tcommits := []HgChangeset{}\n\terr = json.Unmarshal(hgJsonOutput, &commits)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(commits) != 1 {\n\t\terr = fmt.Errorf(\"parseMetadata: expected 1 commit, found %d\", len(commits))\n\t\treturn\n\t}\n\n\tmetadata, err = commits[0].toCommitProperties()\n\treturn\n}\n\nfunc (self *Repository) run(command string, args []string) (cmd *exec.Cmd, output []byte, err error) {\n\thgArgs := make([]string, 1, len(args) + 1)\n\thgArgs[0] = command\n\n\tif self.SkipSslVerification && commandTakesInsecureOption(command) {\n\t\thgArgs = append(hgArgs, \"--insecure\")\n\t}\n\thgArgs = append(hgArgs, args...)\n\n\tcmd = exec.Command(\"hg\", hgArgs...)\n\n\toutput, err = cmd.CombinedOutput()\n\treturn\n}\n\nfunc commandTakesInsecureOption(command string) bool {\n\teligibleCommands := []string{\n\t\t\"clone\",\n\t\t\"pull\",\n\t\t\"push\",\n\t}\n\tfor _, eligibleCommand := range (eligibleCommands) {\n\t\tif command == eligibleCommand {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (self *Repository) makeIncludeQueryFragment() string {\n\tif len(self.IncludePaths) == 0 {\n\t\treturn \"all()\"\n\t} else {\n\t\treturn unionOfPaths(self.IncludePaths)\n\t}\n}\n\nfunc (self *Repository) makeExcludeQueryFragment() string {\n\tif len(self.ExcludePaths) == 0 {\n\t\treturn \"not all()\"\n\t} else {\n\t\treturn unionOfPaths(self.ExcludePaths)\n\t}\n}\n\nfunc unionOfPaths(paths []string) string {\n\tescapedPaths := make([]string, len(paths))\n\tfor i, path := range (paths) {\n\t\tescapedPaths[i] = \"file('re:\" + escapePath(path) + \"')\"\n\t}\n\treturn strings.Join(escapedPaths, \"|\")\n}\n\nfunc escapePath(path string) string {\n\tbackslashesEscaped := strings.Replace(path, \"\\\\\", \"\\\\\\\\\\\\\\\\\", -1)\n\tquotesEscaped := strings.Replace(backslashesEscaped, \"'\", \"\\\\'\", -1)\n\treturn quotesEscaped\n}<|endoftext|>"} {"text":"<commit_before>package dice\n\nimport \"testing\"\n\nfunc TestRollable(t *testing.T) {\n var attackDie AttackDie\n var defenseDie DefenseDie\n attackDie.Roll()\n defenseDie.Roll()\n}\n<commit_msg>Add test for die rolling.<commit_after>package dice\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"testing\"\n)\n\nfunc TestRollable_AttackDie(t *testing.T) {\n\tassert := assert.New(t)\n\n\tvar attackDie AttackDie\n\tvar blanks, focuses, hits, crits int\n\tattackDie.Roll()\n\n\tfor i := 0; i < 1000; i++ {\n\t\tswitch attackDie.Result() {\n\t\tcase BLANK:\n\t\t\tblanks++\n\t\tcase FOCUS:\n\t\t\tfocuses++\n\t\tcase HIT:\n\t\t\thits++\n\t\tcase CRIT:\n\t\t\tcrits++\n\t\t}\n\t}\n\n\tassert.InEpsilon(int(1000*2.0\/8), blanks, 50)\n\tassert.InEpsilon(int(1000*2.0\/8), focuses, 50)\n\tassert.InEpsilon(int(1000*3.0\/8), hits, 50)\n\tassert.InEpsilon(int(1000*1.0\/8), crits, 50)\n}\n\nfunc TestRollable_DefenseDie(t *testing.T) {\n\tassert := assert.New(t)\n\n\tvar defenseDie DefenseDie\n\tvar blanks, focuses, evades int\n\tdefenseDie.Roll()\n\n\tfor i := 0; i < 1000; i++ {\n\t\tswitch defenseDie.Result() {\n\t\tcase BLANK:\n\t\t\tblanks++\n\t\tcase FOCUS:\n\t\t\tfocuses++\n\t\tcase EVADE:\n\t\t\tevades++\n\t\t}\n\t}\n\n\tassert.InEpsilon(int(1000*3.0\/8), blanks, 50)\n\tassert.InEpsilon(int(1000*2.0\/8), focuses, 50)\n\tassert.InEpsilon(int(1000*3.0\/8), evades, 50)\n}\n<|endoftext|>"} {"text":"<commit_before>package mackerel\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ Host host information\ntype Host struct {\n\tID string `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tDisplayName string `json:\"displayName,omitempty\"`\n\tType string `json:\"type,omitempty\"`\n\tStatus string `json:\"status,omitempty\"`\n\tMemo string `json:\"memo,omitempty\"`\n\tRoles Roles `json:\"roles,omitempty\"`\n\tRoleFullnames []string `json:\"roleFullnames,omitempty\"`\n\tIsRetired bool `json:\"isRetired,omitempty\"`\n\tCreatedAt int32 `json:\"createdAt,omitempty\"`\n\tMeta HostMeta `json:\"meta,omitempty\"`\n\tInterfaces []Interface `json:\"interfaces,omitempty\"`\n}\n\n\/\/ Roles host role maps\ntype Roles map[string][]string\n\n\/\/ HostMeta host meta informations\ntype HostMeta struct {\n\tAgentRevision string `json:\"agent-revision,omitempty\"`\n\tAgentVersion string `json:\"agent-version,omitempty\"`\n\tBlockDevice BlockDevice `json:\"block_device,omitempty\"`\n\tCPU CPU `json:\"cpu,omitempty\"`\n\tFilesystem FileSystem `json:\"filesystem,omitempty\"`\n\tKernel Kernel `json:\"kernel,omitempty\"`\n\tMemory Memory `json:\"memory,omitempty\"`\n}\n\n\/\/ BlockDevice blockdevice\ntype BlockDevice map[string]map[string]interface{}\n\n\/\/ CPU cpu\ntype CPU []map[string]interface{}\n\n\/\/ FileSystem filesystem\ntype FileSystem map[string]interface{}\n\n\/\/ Kernel kernel\ntype Kernel map[string]string\n\n\/\/ Memory memory\ntype Memory map[string]string\n\n\/\/ Interface network interface\ntype Interface struct {\n\tName string `json:\"name,omitempty\"`\n\tIPAddress string `json:\"ipAddress,omitempty\"`\n\tMacAddress string `json:\"macAddress,omitempty\"`\n}\n\n\/\/ FindHostsParam parameters for FindHosts\ntype FindHostsParam struct {\n\tService string\n\tRoles []string\n\tName string\n\tStatuses []string\n}\n\n\/\/ CreateHostParam parameters for CreateHost\ntype CreateHostParam struct {\n\tName string `json:\"name,omitempty\"`\n\tDisplayName string `json:\"displayName,omitempty\"`\n\tMeta HostMeta `json:\"meta,omitempty\"`\n\tInterfaces []Interface `json:\"interfaces,omitempty\"`\n\tRoleFullnames []string `json:\"roleFullnames,omitempty\"`\n}\n\n\/\/ UpdateHostParam parameters for UpdateHost\ntype UpdateHostParam CreateHostParam\n\n\/\/ GetRoleFullnames getrolefullnames\nfunc (h *Host) GetRoleFullnames() []string {\n\tif len(h.Roles) < 1 {\n\t\treturn nil\n\t}\n\n\tvar fullnames []string\n\tfor service, roles := range h.Roles {\n\t\tfor _, role := range roles {\n\t\t\tfullname := strings.Join([]string{service, role}, \":\")\n\t\t\tfullnames = append(fullnames, fullname)\n\t\t}\n\t}\n\n\treturn fullnames\n}\n\n\/\/ DateFromCreatedAt returns time.Time\nfunc (h *Host) DateFromCreatedAt() time.Time {\n\treturn time.Unix(int64(h.CreatedAt), 0)\n}\n\n\/\/ DateStringFromCreatedAt returns date string\nfunc (h *Host) DateStringFromCreatedAt() string {\n\tconst layout = \"Jan 2, 2006 at 3:04pm (MST)\"\n\treturn h.DateFromCreatedAt().Format(layout)\n}\n\n\/\/ IPAddresses returns ipaddresses\nfunc (h *Host) IPAddresses() map[string]string {\n\tif len(h.Interfaces) < 1 {\n\t\treturn nil\n\t}\n\n\tipAddresses := make(map[string]string, 0)\n\tfor _, iface := range h.Interfaces {\n\t\tipAddresses[iface.Name] = iface.IPAddress\n\t}\n\treturn ipAddresses\n}\n\n\/\/ FindHost find the host\nfunc (c *Client) FindHost(id string) (*Host, error) {\n\treq, err := http.NewRequest(\"GET\", c.urlFor(fmt.Sprintf(\"\/api\/v0\/hosts\/%s\", id)).String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.Request(req)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data struct {\n\t\tHost *Host `json:\"host\"`\n\t}\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn data.Host, err\n}\n\n\/\/ FindHosts find hosts\nfunc (c *Client) FindHosts(param *FindHostsParam) ([]*Host, error) {\n\tv := url.Values{}\n\tif param.Service != \"\" {\n\t\tv.Set(\"service\", param.Service)\n\t}\n\tif len(param.Roles) >= 1 {\n\t\tfor _, role := range param.Roles {\n\t\t\tv.Add(\"role\", role)\n\t\t}\n\t}\n\tif param.Name != \"\" {\n\t\tv.Set(\"name\", param.Name)\n\t}\n\tif len(param.Statuses) >= 1 {\n\t\tfor _, status := range param.Statuses {\n\t\t\tv.Add(\"status\", status)\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(\"GET\", fmt.Sprintf(\"%s?%s\", c.urlFor(\"\/api\/v0\/hosts.json\").String(), v.Encode()), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.Request(req)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data struct {\n\t\tHosts []*(Host) `json:\"hosts\"`\n\t}\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn data.Hosts, err\n}\n\n\/\/ CreateHost creating host\nfunc (c *Client) CreateHost(param *CreateHostParam) (string, error) {\n\tresp, err := c.PostJSON(\"\/api\/v0\/hosts\", param)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar data struct {\n\t\tID string `json:\"id\"`\n\t}\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn data.ID, nil\n}\n\n\/\/ UpdateHost update host\nfunc (c *Client) UpdateHost(hostID string, param *UpdateHostParam) (string, error) {\n\tresp, err := c.PutJSON(fmt.Sprintf(\"\/api\/v0\/hosts\/%s\", hostID), param)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar data struct {\n\t\tID string `json:\"id\"`\n\t}\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn data.ID, nil\n}\n\n\/\/ UpdateHostStatus update host status\nfunc (c *Client) UpdateHostStatus(hostID string, status string) error {\n\tresp, err := c.PostJSON(fmt.Sprintf(\"\/api\/v0\/hosts\/%s\/status\", hostID), map[string]string{\n\t\t\"status\": status,\n\t})\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ RetireHost retuire the host\nfunc (c *Client) RetireHost(id string) error {\n\tresp, err := c.PostJSON(fmt.Sprintf(\"\/api\/v0\/hosts\/%s\/retire\", id), \"{}\")\n\tdefer closeResponse(resp)\n\treturn err\n}\n<commit_msg>support customIdentifier<commit_after>package mackerel\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ Host host information\ntype Host struct {\n\tID string `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tDisplayName string `json:\"displayName,omitempty\"`\n\tType string `json:\"type,omitempty\"`\n\tStatus string `json:\"status,omitempty\"`\n\tMemo string `json:\"memo,omitempty\"`\n\tRoles Roles `json:\"roles,omitempty\"`\n\tRoleFullnames []string `json:\"roleFullnames,omitempty\"`\n\tIsRetired bool `json:\"isRetired,omitempty\"`\n\tCreatedAt int32 `json:\"createdAt,omitempty\"`\n\tMeta HostMeta `json:\"meta,omitempty\"`\n\tInterfaces []Interface `json:\"interfaces,omitempty\"`\n}\n\n\/\/ Roles host role maps\ntype Roles map[string][]string\n\n\/\/ HostMeta host meta informations\ntype HostMeta struct {\n\tAgentRevision string `json:\"agent-revision,omitempty\"`\n\tAgentVersion string `json:\"agent-version,omitempty\"`\n\tBlockDevice BlockDevice `json:\"block_device,omitempty\"`\n\tCPU CPU `json:\"cpu,omitempty\"`\n\tFilesystem FileSystem `json:\"filesystem,omitempty\"`\n\tKernel Kernel `json:\"kernel,omitempty\"`\n\tMemory Memory `json:\"memory,omitempty\"`\n}\n\n\/\/ BlockDevice blockdevice\ntype BlockDevice map[string]map[string]interface{}\n\n\/\/ CPU cpu\ntype CPU []map[string]interface{}\n\n\/\/ FileSystem filesystem\ntype FileSystem map[string]interface{}\n\n\/\/ Kernel kernel\ntype Kernel map[string]string\n\n\/\/ Memory memory\ntype Memory map[string]string\n\n\/\/ Interface network interface\ntype Interface struct {\n\tName string `json:\"name,omitempty\"`\n\tIPAddress string `json:\"ipAddress,omitempty\"`\n\tMacAddress string `json:\"macAddress,omitempty\"`\n}\n\n\/\/ FindHostsParam parameters for FindHosts\ntype FindHostsParam struct {\n\tService string\n\tRoles []string\n\tName string\n\tStatuses []string\n\tCustomIdentifier string\n}\n\n\/\/ CreateHostParam parameters for CreateHost\ntype CreateHostParam struct {\n\tName string `json:\"name,omitempty\"`\n\tDisplayName string `json:\"displayName,omitempty\"`\n\tMeta HostMeta `json:\"meta,omitempty\"`\n\tInterfaces []Interface `json:\"interfaces,omitempty\"`\n\tRoleFullnames []string `json:\"roleFullnames,omitempty\"`\n\tCustomIdentifier string `json:\"customIdentifier,omitempty\"`\n}\n\n\/\/ UpdateHostParam parameters for UpdateHost\ntype UpdateHostParam CreateHostParam\n\n\/\/ GetRoleFullnames getrolefullnames\nfunc (h *Host) GetRoleFullnames() []string {\n\tif len(h.Roles) < 1 {\n\t\treturn nil\n\t}\n\n\tvar fullnames []string\n\tfor service, roles := range h.Roles {\n\t\tfor _, role := range roles {\n\t\t\tfullname := strings.Join([]string{service, role}, \":\")\n\t\t\tfullnames = append(fullnames, fullname)\n\t\t}\n\t}\n\n\treturn fullnames\n}\n\n\/\/ DateFromCreatedAt returns time.Time\nfunc (h *Host) DateFromCreatedAt() time.Time {\n\treturn time.Unix(int64(h.CreatedAt), 0)\n}\n\n\/\/ DateStringFromCreatedAt returns date string\nfunc (h *Host) DateStringFromCreatedAt() string {\n\tconst layout = \"Jan 2, 2006 at 3:04pm (MST)\"\n\treturn h.DateFromCreatedAt().Format(layout)\n}\n\n\/\/ IPAddresses returns ipaddresses\nfunc (h *Host) IPAddresses() map[string]string {\n\tif len(h.Interfaces) < 1 {\n\t\treturn nil\n\t}\n\n\tipAddresses := make(map[string]string, 0)\n\tfor _, iface := range h.Interfaces {\n\t\tipAddresses[iface.Name] = iface.IPAddress\n\t}\n\treturn ipAddresses\n}\n\n\/\/ FindHost find the host\nfunc (c *Client) FindHost(id string) (*Host, error) {\n\treq, err := http.NewRequest(\"GET\", c.urlFor(fmt.Sprintf(\"\/api\/v0\/hosts\/%s\", id)).String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.Request(req)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data struct {\n\t\tHost *Host `json:\"host\"`\n\t}\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn data.Host, err\n}\n\n\/\/ FindHosts find hosts\nfunc (c *Client) FindHosts(param *FindHostsParam) ([]*Host, error) {\n\tv := url.Values{}\n\tif param.Service != \"\" {\n\t\tv.Set(\"service\", param.Service)\n\t}\n\tif len(param.Roles) >= 1 {\n\t\tfor _, role := range param.Roles {\n\t\t\tv.Add(\"role\", role)\n\t\t}\n\t}\n\tif param.Name != \"\" {\n\t\tv.Set(\"name\", param.Name)\n\t}\n\tif len(param.Statuses) >= 1 {\n\t\tfor _, status := range param.Statuses {\n\t\t\tv.Add(\"status\", status)\n\t\t}\n\t}\n\tif param.CustomIdentifier != \"\" {\n\t\tv.Set(\"customIdentifier\", param.CustomIdentifier)\n\t}\n\n\treq, err := http.NewRequest(\"GET\", fmt.Sprintf(\"%s?%s\", c.urlFor(\"\/api\/v0\/hosts.json\").String(), v.Encode()), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.Request(req)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data struct {\n\t\tHosts []*(Host) `json:\"hosts\"`\n\t}\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn data.Hosts, err\n}\n\n\/\/ CreateHost creating host\nfunc (c *Client) CreateHost(param *CreateHostParam) (string, error) {\n\tresp, err := c.PostJSON(\"\/api\/v0\/hosts\", param)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar data struct {\n\t\tID string `json:\"id\"`\n\t}\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn data.ID, nil\n}\n\n\/\/ UpdateHost update host\nfunc (c *Client) UpdateHost(hostID string, param *UpdateHostParam) (string, error) {\n\tresp, err := c.PutJSON(fmt.Sprintf(\"\/api\/v0\/hosts\/%s\", hostID), param)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar data struct {\n\t\tID string `json:\"id\"`\n\t}\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn data.ID, nil\n}\n\n\/\/ UpdateHostStatus update host status\nfunc (c *Client) UpdateHostStatus(hostID string, status string) error {\n\tresp, err := c.PostJSON(fmt.Sprintf(\"\/api\/v0\/hosts\/%s\/status\", hostID), map[string]string{\n\t\t\"status\": status,\n\t})\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ RetireHost retuire the host\nfunc (c *Client) RetireHost(id string) error {\n\tresp, err := c.PostJSON(fmt.Sprintf(\"\/api\/v0\/hosts\/%s\/retire\", id), \"{}\")\n\tdefer closeResponse(resp)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n)\n\nfunc maybeFatal(err error) {\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc setupDb(u string) {\n\treq, err := http.NewRequest(\"PUT\", u, nil)\n\tmaybeFatal(err)\n\tres, err := http.DefaultClient.Do(req)\n\tmaybeFatal(err)\n\tres.Body.Close()\n}\n\nfunc sendOne(u, k string, body []byte) {\n\tresp, err := http.DefaultClient.Post(u+\"?ts=\"+k,\n\t\t\"application\/json\", bytes.NewReader(body))\n\tmaybeFatal(err)\n\tdefer resp.Body.Close()\n\tif resp.StatusCode >= 300 || resp.StatusCode < 200 {\n\t\tlog.Fatalf(\"HTTP Error on %v: %v\", k, err)\n\t}\n}\n\nfunc main() {\n\tu := os.Args[1]\n\tsetupDb(u)\n\n\tt := time.Tick(5 * time.Second)\n\ti := 0\n\n\td := json.NewDecoder(os.Stdin)\n\tfor {\n\t\tkv := map[string]*json.RawMessage{}\n\n\t\terr := d.Decode(&kv)\n\t\tif err == io.EOF {\n\t\t\tlog.Printf(\"Done!\")\n\t\t\tbreak\n\t\t}\n\t\tmaybeFatal(err)\n\n\t\tvar latestKey string\n\t\tfor k, v := range kv {\n\t\t\tbody := []byte(*v)\n\t\t\tsendOne(u, k, body)\n\t\t\tlatestKey = k\n\t\t}\n\n\t\ti++\n\t\tselect {\n\t\tcase <-t:\n\t\t\tlog.Printf(\"Processed %v items, latest was %v\", i, latestKey)\n\t\tdefault:\n\t\t}\n\t}\n}\n<commit_msg>Make the load store a little more helpful with usage<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n)\n\nfunc maybeFatal(err error) {\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc setupDb(u string) {\n\treq, err := http.NewRequest(\"PUT\", u, nil)\n\tmaybeFatal(err)\n\tres, err := http.DefaultClient.Do(req)\n\tmaybeFatal(err)\n\tres.Body.Close()\n}\n\nfunc sendOne(u, k string, body []byte) {\n\tresp, err := http.DefaultClient.Post(u+\"?ts=\"+k,\n\t\t\"application\/json\", bytes.NewReader(body))\n\tmaybeFatal(err)\n\tdefer resp.Body.Close()\n\tif resp.StatusCode >= 300 || resp.StatusCode < 200 {\n\t\tlog.Fatalf(\"HTTP Error on %v: %v\", k, err)\n\t}\n}\n\nfunc main() {\n\tif len(os.Args) < 2 {\n\t\tlog.Fatalf(\"Usage: gzip -dc backup.gz | %v http:\/\/seriesly:3133\/dbname\",\n\t\t\tos.Args[0])\n\t}\n\tu := os.Args[1]\n\tsetupDb(u)\n\n\tt := time.Tick(5 * time.Second)\n\ti := 0\n\n\td := json.NewDecoder(os.Stdin)\n\tfor {\n\t\tkv := map[string]*json.RawMessage{}\n\n\t\terr := d.Decode(&kv)\n\t\tif err == io.EOF {\n\t\t\tlog.Printf(\"Done!\")\n\t\t\tbreak\n\t\t}\n\t\tmaybeFatal(err)\n\n\t\tvar latestKey string\n\t\tfor k, v := range kv {\n\t\t\tbody := []byte(*v)\n\t\t\tsendOne(u, k, body)\n\t\t\tlatestKey = k\n\t\t}\n\n\t\ti++\n\t\tselect {\n\t\tcase <-t:\n\t\t\tlog.Printf(\"Processed %v items, latest was %v\", i, latestKey)\n\t\tdefault:\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Tom Thorogood. All rights reserved.\n\/\/ Use of this source code is governed by a Modified\n\/\/ BSD License that can be found in the LICENSE file.\n\npackage id3v2\n\n\/\/go:generate go run generate_ids.go\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"unicode\/utf16\"\n)\n\n\/\/ This is an implementation of v2.4.0 of the ID3v2 tagging format,\n\/\/ defined in: http:\/\/id3.org\/id3v2.4.0-structure, and v2.3.0 of\n\/\/ the ID3v2 tagging format, defined in: http:\/\/id3.org\/id3v2.3.0.\n\nconst (\n\tflagUnsynchronisation = 1 << (7 - iota)\n\tflagExtendedHeader\n\tflagExperimental\n\tflagFooter\n\n\tknownFlags = flagUnsynchronisation | flagExtendedHeader |\n\t\tflagExperimental | flagFooter\n)\n\ntype FrameID uint32\n\nconst syncsafeInvalid = ^uint32(0)\n\nfunc syncsafe(data []byte) uint32 {\n\t_ = data[3]\n\n\tif data[0]&0x80 != 0 || data[1]&0x80 != 0 ||\n\t\tdata[2]&0x80 != 0 || data[3]&0x80 != 0 {\n\t\treturn syncsafeInvalid\n\t}\n\n\treturn uint32(data[0])<<21 | uint32(data[1])<<14 |\n\t\tuint32(data[2])<<7 | uint32(data[3])\n}\n\nfunc id3Split(data []byte, atEOF bool) (advance int, token []byte, err error) {\n\ti := bytes.Index(data, []byte(\"ID3\"))\n\tif i == -1 {\n\t\tif len(data) < 2 {\n\t\t\treturn 0, nil, nil\n\t\t}\n\n\t\treturn len(data) - 2, nil, nil\n\t}\n\n\tdata = data[i:]\n\tif len(data) < 10 {\n\t\tif atEOF {\n\t\t\treturn 0, nil, io.ErrUnexpectedEOF\n\t\t}\n\n\t\treturn i, nil, nil\n\t}\n\n\tsize := syncsafe(data[6:])\n\n\tif data[3] == 0xff || data[4] == 0xff || size == syncsafeInvalid {\n\t\t\/\/ Skipping when we find the string \"ID3\" in the file but\n\t\t\/\/ the remaining header is invalid is consistent with the\n\t\t\/\/ detection logic in §3.1. This also reduces the\n\t\t\/\/ likelihood of errors being caused by the byte sequence\n\t\t\/\/ \"ID3\" (49 44 33) occuring in the audio, but does not\n\t\t\/\/ eliminate the possibility of errors in this case.\n\t\t\/\/\n\t\t\/\/ Quoting from §3.1 of id3v2.4.0-structure.txt:\n\t\t\/\/ An ID3v2 tag can be detected with the following pattern:\n\t\t\/\/ $49 44 33 yy yy xx zz zz zz zz\n\t\t\/\/ Where yy is less than $FF, xx is the 'flags' byte and zz\n\t\t\/\/ is less than $80.\n\t\treturn i + 3, nil, nil\n\t}\n\n\tif data[3] > 0x05 {\n\t\t\/\/ Quoting from §3.1 of id3v2.4.0-structure.txt:\n\t\t\/\/ If software with ID3v2.4.0 and below support should\n\t\t\/\/ encounter version five or higher it should simply\n\t\t\/\/ ignore the whole tag.\n\t\treturn i + 3, nil, nil\n\t}\n\n\tif data[3] < 0x03 {\n\t\t\/\/ This package only supports v2.3.0 and v2.4.0, skip\n\t\t\/\/ versions bellow v2.3.0.\n\t\treturn i + 3, nil, nil\n\t}\n\n\tif data[5]&^knownFlags != 0 {\n\t\t\/\/ Skip tag blocks that contain unknown flags.\n\t\t\/\/\n\t\t\/\/ Quoting from §3.1 of id3v2.4.0-structure.txt:\n\t\t\/\/ If one of these undefined flags are set, the tag might\n\t\t\/\/ not be readable for a parser that does not know the\n\t\t\/\/ flags function.\n\t\treturn i + 3, nil, nil\n\t}\n\n\tif data[5]&flagFooter == flagFooter {\n\t\tsize += 10\n\t}\n\n\tif len(data) < 10+int(size) {\n\t\tif atEOF {\n\t\t\treturn 0, nil, io.ErrUnexpectedEOF\n\t\t}\n\n\t\treturn i, nil, nil\n\t}\n\n\treturn i + 10 + int(size), data[:10+size], nil\n}\n\nconst invalidFrameID = ^FrameID(0)\n\nfunc validIDByte(b byte) bool {\n\treturn (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9')\n}\n\nfunc frameID(data []byte) FrameID {\n\t_ = data[3]\n\n\tif validIDByte(data[0]) && validIDByte(data[1]) && validIDByte(data[2]) &&\n\t\t\/\/ Although it violates the specification, some software\n\t\t\/\/ incorrectly encodes v2.2.0 three character tags as\n\t\t\/\/ four character v2.3.0 tags with a trailing zero byte\n\t\t\/\/ when upgrading the tagging format version.\n\t\t(validIDByte(data[3]) || data[3] == 0) {\n\t\treturn FrameID(binary.BigEndian.Uint32(data))\n\t}\n\n\tfor _, v := range data {\n\t\tif v != 0 {\n\t\t\treturn invalidFrameID\n\t\t}\n\t}\n\n\t\/\/ This is probably the begging of padding.\n\treturn 0\n}\n\nvar bufPool = &sync.Pool{\n\tNew: func() interface{} {\n\t\tbuf := make([]byte, 4<<10)\n\t\treturn &buf\n\t},\n}\n\nfunc Scan(r io.Reader) (ID3Frames, error) {\n\tbuf := bufPool.Get()\n\tdefer bufPool.Put(buf)\n\n\ts := bufio.NewScanner(r)\n\ts.Buffer(*buf.(*[]byte), 1<<28)\n\ts.Split(id3Split)\n\n\tvar frames ID3Frames\n\nscan:\n\tfor s.Scan() {\n\t\tdata := s.Bytes()\n\n\t\theader := data[:10]\n\t\tdata = data[10:]\n\n\t\tif string(header[:3]) != \"ID3\" {\n\t\t\tpanic(\"id3: bufio.Scanner failed\")\n\t\t}\n\n\t\tversion := header[3]\n\t\tswitch version {\n\t\tcase 0x04, 0x03:\n\t\tdefault:\n\t\t\tcontinue scan\n\t\t}\n\n\t\tflags := header[5]\n\n\t\tif flags&flagFooter == flagFooter {\n\t\t\tfooter := data[len(data)-10:]\n\t\t\tdata = data[:len(data)-10]\n\n\t\t\tif string(footer[:3]) != \"3DI\" ||\n\t\t\t\t!bytes.Equal(header[3:], footer[3:]) {\n\t\t\t\treturn nil, errors.New(\"id3: invalid footer\")\n\t\t\t}\n\t\t}\n\n\t\tif flags&flagExtendedHeader == flagExtendedHeader {\n\t\t\tsize := syncsafe(data)\n\t\t\tif size == syncsafeInvalid || len(data) < int(size) {\n\t\t\t\treturn nil, errors.New(\"id3: invalid extended header\")\n\t\t\t}\n\n\t\t\textendedHeader := data[:size]\n\t\t\tdata = data[size:]\n\n\t\t\t_ = extendedHeader\n\t\t}\n\n\t\t\/\/ TODO: expose unsynchronisation flag\n\n\tframes:\n\t\tfor len(data) > 10 {\n\t\t\t_ = data[9]\n\n\t\t\tid := frameID(data)\n\t\t\tswitch id {\n\t\t\tcase 0:\n\t\t\t\t\/\/ We've probably hit padding, the padding\n\t\t\t\t\/\/ validity check below will handle this.\n\t\t\t\tbreak frames\n\t\t\tcase invalidFrameID:\n\t\t\t\treturn nil, errors.New(\"id3: invalid frame id\")\n\t\t\t}\n\n\t\t\tvar size uint32\n\t\t\tswitch version {\n\t\t\tcase 0x04:\n\t\t\t\tsize = syncsafe(data[4:])\n\t\t\t\tif size == syncsafeInvalid {\n\t\t\t\t\treturn nil, errors.New(\"id3: invalid frame size\")\n\t\t\t\t}\n\t\t\tcase 0x03:\n\t\t\t\tsize = binary.BigEndian.Uint32(data[4:])\n\t\t\tdefault:\n\t\t\t\tpanic(\"unhandled version\")\n\t\t\t}\n\n\t\t\tif len(data) < 10+int(size) {\n\t\t\t\treturn nil, errors.New(\"id3: frame size exceeds length of tag data\")\n\t\t\t}\n\n\t\t\tframes = append(frames, &ID3Frame{\n\t\t\t\tID: id,\n\t\t\t\tFlags: binary.BigEndian.Uint16(data[8:]),\n\t\t\t\tData: append([]byte(nil), data[10:10+size]...),\n\t\t\t})\n\n\t\t\tdata = data[10+size:]\n\t\t}\n\n\t\tif flags&flagFooter == flagFooter && len(data) != 0 {\n\t\t\treturn nil, errors.New(\"id3: padding with footer\")\n\t\t}\n\n\t\tfor _, v := range data {\n\t\t\tif v != 0 {\n\t\t\t\treturn nil, errors.New(\"id3: invalid padding\")\n\t\t\t}\n\t\t}\n\t}\n\n\tif s.Err() != nil {\n\t\treturn nil, s.Err()\n\t}\n\n\treturn frames, nil\n}\n\ntype ID3Frames []*ID3Frame\n\nfunc (f ID3Frames) Lookup(id FrameID) *ID3Frame {\n\tfor i := len(f) - 1; i >= 0; i-- {\n\t\tif f[i].ID == id {\n\t\t\treturn f[i]\n\t\t}\n\t}\n\n\treturn nil\n}\n\ntype ID3Frame struct {\n\tID FrameID\n\tFlags uint16\n\tData []byte\n}\n\nfunc (f *ID3Frame) String() string {\n\tdata, terminus := f.Data, \"\"\n\tif len(data) > 128 {\n\t\tdata, terminus = data[:128], \"...\"\n\t}\n\n\treturn fmt.Sprintf(\"&ID3Frame{ID: %s, Flags: 0x%04x, Data: %d:%q%s}\",\n\t\tf.ID.String(), f.Flags, len(f.Data), data, terminus)\n}\n\nfunc (f *ID3Frame) Text() (string, error) {\n\tif len(f.Data) < 2 {\n\t\treturn \"\", errors.New(\"id3: frame data is invalid\")\n\t}\n\n\tdata := f.Data[1:]\n\tvar ord binary.ByteOrder = binary.BigEndian\n\n\tswitch f.Data[0] {\n\tcase 0x00:\n\t\tfor _, v := range data {\n\t\t\tif v&0x80 == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\trunes := make([]rune, len(data))\n\t\t\tfor i, v := range data {\n\t\t\t\trunes[i] = rune(v)\n\t\t\t}\n\n\t\t\treturn string(runes), nil\n\t\t}\n\n\t\tfallthrough\n\tcase 0x03:\n\t\tif data[len(data)-1] == 0x00 {\n\t\t\t\/\/ The specification requires that the string be\n\t\t\t\/\/ terminated with 0x00, but not all implementations\n\t\t\t\/\/ do this.\n\t\t\tdata = data[:len(data)-1]\n\t\t}\n\n\t\treturn string(data), nil\n\tcase 0x01:\n\t\tif len(data) < 2 {\n\t\t\treturn \"\", errors.New(\"id3: missing UTF-16 BOM\")\n\t\t}\n\n\t\tif data[0] == 0xff && data[1] == 0xfe {\n\t\t\tord = binary.LittleEndian\n\t\t} else if data[0] == 0xfe && data[1] == 0xff {\n\t\t\tord = binary.BigEndian\n\t\t} else {\n\t\t\treturn \"\", errors.New(\"id3: invalid UTF-16 BOM\")\n\t\t}\n\n\t\tdata = data[2:]\n\t\tfallthrough\n\tcase 0x02:\n\t\tif len(data)%2 != 0 {\n\t\t\treturn \"\", errors.New(\"id3: UTF-16 data is not even number of bytes\")\n\t\t}\n\n\t\tu16s := make([]uint16, len(data)\/2)\n\t\tfor i := range u16s {\n\t\t\tu16s[i] = ord.Uint16(data[i*2:])\n\t\t}\n\n\t\tif u16s[len(u16s)-1] == 0x0000 {\n\t\t\t\/\/ The specification requires that the string be\n\t\t\t\/\/ terminated with 0x00 0x00, but not all\n\t\t\t\/\/ implementations do this.\n\t\t\tu16s = u16s[:len(u16s)-1]\n\t\t}\n\n\t\treturn string(utf16.Decode(u16s)), nil\n\tdefault:\n\t\treturn \"\", errors.New(\"id3: frame uses unsupported encoding\")\n\t}\n}\n<commit_msg>Return error from (*ID3Frame).Text when flags are set<commit_after>\/\/ Copyright 2017 Tom Thorogood. All rights reserved.\n\/\/ Use of this source code is governed by a Modified\n\/\/ BSD License that can be found in the LICENSE file.\n\npackage id3v2\n\n\/\/go:generate go run generate_ids.go\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"unicode\/utf16\"\n)\n\n\/\/ This is an implementation of v2.4.0 of the ID3v2 tagging format,\n\/\/ defined in: http:\/\/id3.org\/id3v2.4.0-structure, and v2.3.0 of\n\/\/ the ID3v2 tagging format, defined in: http:\/\/id3.org\/id3v2.3.0.\n\nconst (\n\tflagUnsynchronisation = 1 << (7 - iota)\n\tflagExtendedHeader\n\tflagExperimental\n\tflagFooter\n\n\tknownFlags = flagUnsynchronisation | flagExtendedHeader |\n\t\tflagExperimental | flagFooter\n)\n\ntype FrameID uint32\n\nconst syncsafeInvalid = ^uint32(0)\n\nfunc syncsafe(data []byte) uint32 {\n\t_ = data[3]\n\n\tif data[0]&0x80 != 0 || data[1]&0x80 != 0 ||\n\t\tdata[2]&0x80 != 0 || data[3]&0x80 != 0 {\n\t\treturn syncsafeInvalid\n\t}\n\n\treturn uint32(data[0])<<21 | uint32(data[1])<<14 |\n\t\tuint32(data[2])<<7 | uint32(data[3])\n}\n\nfunc id3Split(data []byte, atEOF bool) (advance int, token []byte, err error) {\n\ti := bytes.Index(data, []byte(\"ID3\"))\n\tif i == -1 {\n\t\tif len(data) < 2 {\n\t\t\treturn 0, nil, nil\n\t\t}\n\n\t\treturn len(data) - 2, nil, nil\n\t}\n\n\tdata = data[i:]\n\tif len(data) < 10 {\n\t\tif atEOF {\n\t\t\treturn 0, nil, io.ErrUnexpectedEOF\n\t\t}\n\n\t\treturn i, nil, nil\n\t}\n\n\tsize := syncsafe(data[6:])\n\n\tif data[3] == 0xff || data[4] == 0xff || size == syncsafeInvalid {\n\t\t\/\/ Skipping when we find the string \"ID3\" in the file but\n\t\t\/\/ the remaining header is invalid is consistent with the\n\t\t\/\/ detection logic in §3.1. This also reduces the\n\t\t\/\/ likelihood of errors being caused by the byte sequence\n\t\t\/\/ \"ID3\" (49 44 33) occuring in the audio, but does not\n\t\t\/\/ eliminate the possibility of errors in this case.\n\t\t\/\/\n\t\t\/\/ Quoting from §3.1 of id3v2.4.0-structure.txt:\n\t\t\/\/ An ID3v2 tag can be detected with the following pattern:\n\t\t\/\/ $49 44 33 yy yy xx zz zz zz zz\n\t\t\/\/ Where yy is less than $FF, xx is the 'flags' byte and zz\n\t\t\/\/ is less than $80.\n\t\treturn i + 3, nil, nil\n\t}\n\n\tif data[3] > 0x05 {\n\t\t\/\/ Quoting from §3.1 of id3v2.4.0-structure.txt:\n\t\t\/\/ If software with ID3v2.4.0 and below support should\n\t\t\/\/ encounter version five or higher it should simply\n\t\t\/\/ ignore the whole tag.\n\t\treturn i + 3, nil, nil\n\t}\n\n\tif data[3] < 0x03 {\n\t\t\/\/ This package only supports v2.3.0 and v2.4.0, skip\n\t\t\/\/ versions bellow v2.3.0.\n\t\treturn i + 3, nil, nil\n\t}\n\n\tif data[5]&^knownFlags != 0 {\n\t\t\/\/ Skip tag blocks that contain unknown flags.\n\t\t\/\/\n\t\t\/\/ Quoting from §3.1 of id3v2.4.0-structure.txt:\n\t\t\/\/ If one of these undefined flags are set, the tag might\n\t\t\/\/ not be readable for a parser that does not know the\n\t\t\/\/ flags function.\n\t\treturn i + 3, nil, nil\n\t}\n\n\tif data[5]&flagFooter == flagFooter {\n\t\tsize += 10\n\t}\n\n\tif len(data) < 10+int(size) {\n\t\tif atEOF {\n\t\t\treturn 0, nil, io.ErrUnexpectedEOF\n\t\t}\n\n\t\treturn i, nil, nil\n\t}\n\n\treturn i + 10 + int(size), data[:10+size], nil\n}\n\nconst invalidFrameID = ^FrameID(0)\n\nfunc validIDByte(b byte) bool {\n\treturn (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9')\n}\n\nfunc frameID(data []byte) FrameID {\n\t_ = data[3]\n\n\tif validIDByte(data[0]) && validIDByte(data[1]) && validIDByte(data[2]) &&\n\t\t\/\/ Although it violates the specification, some software\n\t\t\/\/ incorrectly encodes v2.2.0 three character tags as\n\t\t\/\/ four character v2.3.0 tags with a trailing zero byte\n\t\t\/\/ when upgrading the tagging format version.\n\t\t(validIDByte(data[3]) || data[3] == 0) {\n\t\treturn FrameID(binary.BigEndian.Uint32(data))\n\t}\n\n\tfor _, v := range data {\n\t\tif v != 0 {\n\t\t\treturn invalidFrameID\n\t\t}\n\t}\n\n\t\/\/ This is probably the begging of padding.\n\treturn 0\n}\n\nvar bufPool = &sync.Pool{\n\tNew: func() interface{} {\n\t\tbuf := make([]byte, 4<<10)\n\t\treturn &buf\n\t},\n}\n\nfunc Scan(r io.Reader) (ID3Frames, error) {\n\tbuf := bufPool.Get()\n\tdefer bufPool.Put(buf)\n\n\ts := bufio.NewScanner(r)\n\ts.Buffer(*buf.(*[]byte), 1<<28)\n\ts.Split(id3Split)\n\n\tvar frames ID3Frames\n\nscan:\n\tfor s.Scan() {\n\t\tdata := s.Bytes()\n\n\t\theader := data[:10]\n\t\tdata = data[10:]\n\n\t\tif string(header[:3]) != \"ID3\" {\n\t\t\tpanic(\"id3: bufio.Scanner failed\")\n\t\t}\n\n\t\tversion := header[3]\n\t\tswitch version {\n\t\tcase 0x04, 0x03:\n\t\tdefault:\n\t\t\tcontinue scan\n\t\t}\n\n\t\tflags := header[5]\n\n\t\tif flags&flagFooter == flagFooter {\n\t\t\tfooter := data[len(data)-10:]\n\t\t\tdata = data[:len(data)-10]\n\n\t\t\tif string(footer[:3]) != \"3DI\" ||\n\t\t\t\t!bytes.Equal(header[3:], footer[3:]) {\n\t\t\t\treturn nil, errors.New(\"id3: invalid footer\")\n\t\t\t}\n\t\t}\n\n\t\tif flags&flagExtendedHeader == flagExtendedHeader {\n\t\t\tsize := syncsafe(data)\n\t\t\tif size == syncsafeInvalid || len(data) < int(size) {\n\t\t\t\treturn nil, errors.New(\"id3: invalid extended header\")\n\t\t\t}\n\n\t\t\textendedHeader := data[:size]\n\t\t\tdata = data[size:]\n\n\t\t\t_ = extendedHeader\n\t\t}\n\n\t\t\/\/ TODO: expose unsynchronisation flag\n\n\tframes:\n\t\tfor len(data) > 10 {\n\t\t\t_ = data[9]\n\n\t\t\tid := frameID(data)\n\t\t\tswitch id {\n\t\t\tcase 0:\n\t\t\t\t\/\/ We've probably hit padding, the padding\n\t\t\t\t\/\/ validity check below will handle this.\n\t\t\t\tbreak frames\n\t\t\tcase invalidFrameID:\n\t\t\t\treturn nil, errors.New(\"id3: invalid frame id\")\n\t\t\t}\n\n\t\t\tvar size uint32\n\t\t\tswitch version {\n\t\t\tcase 0x04:\n\t\t\t\tsize = syncsafe(data[4:])\n\t\t\t\tif size == syncsafeInvalid {\n\t\t\t\t\treturn nil, errors.New(\"id3: invalid frame size\")\n\t\t\t\t}\n\t\t\tcase 0x03:\n\t\t\t\tsize = binary.BigEndian.Uint32(data[4:])\n\t\t\tdefault:\n\t\t\t\tpanic(\"unhandled version\")\n\t\t\t}\n\n\t\t\tif len(data) < 10+int(size) {\n\t\t\t\treturn nil, errors.New(\"id3: frame size exceeds length of tag data\")\n\t\t\t}\n\n\t\t\tframes = append(frames, &ID3Frame{\n\t\t\t\tID: id,\n\t\t\t\tFlags: binary.BigEndian.Uint16(data[8:]),\n\t\t\t\tData: append([]byte(nil), data[10:10+size]...),\n\t\t\t})\n\n\t\t\tdata = data[10+size:]\n\t\t}\n\n\t\tif flags&flagFooter == flagFooter && len(data) != 0 {\n\t\t\treturn nil, errors.New(\"id3: padding with footer\")\n\t\t}\n\n\t\tfor _, v := range data {\n\t\t\tif v != 0 {\n\t\t\t\treturn nil, errors.New(\"id3: invalid padding\")\n\t\t\t}\n\t\t}\n\t}\n\n\tif s.Err() != nil {\n\t\treturn nil, s.Err()\n\t}\n\n\treturn frames, nil\n}\n\ntype ID3Frames []*ID3Frame\n\nfunc (f ID3Frames) Lookup(id FrameID) *ID3Frame {\n\tfor i := len(f) - 1; i >= 0; i-- {\n\t\tif f[i].ID == id {\n\t\t\treturn f[i]\n\t\t}\n\t}\n\n\treturn nil\n}\n\ntype ID3Frame struct {\n\tID FrameID\n\tFlags uint16\n\tData []byte\n}\n\nfunc (f *ID3Frame) String() string {\n\tdata, terminus := f.Data, \"\"\n\tif len(data) > 128 {\n\t\tdata, terminus = data[:128], \"...\"\n\t}\n\n\treturn fmt.Sprintf(\"&ID3Frame{ID: %s, Flags: 0x%04x, Data: %d:%q%s}\",\n\t\tf.ID.String(), f.Flags, len(f.Data), data, terminus)\n}\n\nfunc (f *ID3Frame) Text() (string, error) {\n\tif len(f.Data) < 2 {\n\t\treturn \"\", errors.New(\"id3: frame data is invalid\")\n\t}\n\n\tif f.Flags&0xff != 0 {\n\t\treturn \"\", errors.New(\"id3: frame flags are not supported\")\n\t}\n\n\tdata := f.Data[1:]\n\tvar ord binary.ByteOrder = binary.BigEndian\n\n\tswitch f.Data[0] {\n\tcase 0x00:\n\t\tfor _, v := range data {\n\t\t\tif v&0x80 == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\trunes := make([]rune, len(data))\n\t\t\tfor i, v := range data {\n\t\t\t\trunes[i] = rune(v)\n\t\t\t}\n\n\t\t\treturn string(runes), nil\n\t\t}\n\n\t\tfallthrough\n\tcase 0x03:\n\t\tif data[len(data)-1] == 0x00 {\n\t\t\t\/\/ The specification requires that the string be\n\t\t\t\/\/ terminated with 0x00, but not all implementations\n\t\t\t\/\/ do this.\n\t\t\tdata = data[:len(data)-1]\n\t\t}\n\n\t\treturn string(data), nil\n\tcase 0x01:\n\t\tif len(data) < 2 {\n\t\t\treturn \"\", errors.New(\"id3: missing UTF-16 BOM\")\n\t\t}\n\n\t\tif data[0] == 0xff && data[1] == 0xfe {\n\t\t\tord = binary.LittleEndian\n\t\t} else if data[0] == 0xfe && data[1] == 0xff {\n\t\t\tord = binary.BigEndian\n\t\t} else {\n\t\t\treturn \"\", errors.New(\"id3: invalid UTF-16 BOM\")\n\t\t}\n\n\t\tdata = data[2:]\n\t\tfallthrough\n\tcase 0x02:\n\t\tif len(data)%2 != 0 {\n\t\t\treturn \"\", errors.New(\"id3: UTF-16 data is not even number of bytes\")\n\t\t}\n\n\t\tu16s := make([]uint16, len(data)\/2)\n\t\tfor i := range u16s {\n\t\t\tu16s[i] = ord.Uint16(data[i*2:])\n\t\t}\n\n\t\tif u16s[len(u16s)-1] == 0x0000 {\n\t\t\t\/\/ The specification requires that the string be\n\t\t\t\/\/ terminated with 0x00 0x00, but not all\n\t\t\t\/\/ implementations do this.\n\t\t\tu16s = u16s[:len(u16s)-1]\n\t\t}\n\n\t\treturn string(utf16.Decode(u16s)), nil\n\tdefault:\n\t\treturn \"\", errors.New(\"id3: frame uses unsupported encoding\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package casper provides methods for interacting with the Casper API.\npackage casper\n\nimport (\n\t\"compress\/gzip\"\n\t\"crypto\/hmac\"\n\t\"crypto\/sha256\"\n\t\"crypto\/tls\"\n\t\"encoding\/base64\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strings\"\n)\n\n\/\/ Casper constants.\nconst (\n\tSnapchatVersion = \"9.18.2.0\"\n\n\tCasperSignRequestURL = \"https:\/\/api.casper.io\/snapchat\/clientauth\/signrequest\"\n\tCasperAttestationCreateBinaryURL = \"https:\/\/api.casper.io\/snapchat\/attestation\/create\"\n\tCasperAttestationAttestBinaryURL = \"https:\/\/api.casper.io\/snapchat\/attestation\/attest\"\n\n\tGoogleSafteyNetURL = \"https:\/\/www.googleapis.com\/androidantiabuse\/v1\/x\/create?alt=PROTO&key=AIzaSyBofcZsgLSS7BOnBjZPEkk4rYwzOIz-lTI\"\n\tAttestationCheckerURL = \"https:\/\/www.googleapis.com\/androidcheck\/v1\/attestations\/attest?alt=JSON&key=AIzaSyDqVnJBjE5ymo--oBJt3On7HQx9xNm1RHA\"\n)\n\n\/\/ Casper error variables.\nvar (\n\tcasperParseError = Error{Err: \"casper: CasperParseError\"}\n\tcasperHTTPError = Error{Err: \"casper: CasperHTTPError\"}\n)\n\n\/\/ Casper holds credentials to be used when connecting to the Casper API.\ntype Casper struct {\n\tAPIKey string\n\tAPISecret string\n\tUsername string\n\tPassword string\n\tDebug bool\n\tProxyURL *url.URL\n}\n\n\/\/ Error handles errors returned by casper methods.\ntype Error struct {\n\tErr string\n\tReason error\n}\n\n\/\/ Error is a function which CasperError satisfies.\n\/\/ It returns a properly formatted error message when an error occurs.\nfunc (e Error) Error() string {\n\treturn fmt.Sprintf(\"%s\\nReason: %s\", e.Err, e.Reason.Error())\n}\n\n\/\/ sortURLMap sorts a given url.Values map m alphabetically by it's keys, whilst retaining the values.\nfunc sortURLMap(m url.Values) string {\n\tvar keys []string\n\tvar sortedParamString string\n\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\n\tsort.Strings(keys)\n\n\tfor _, k := range keys {\n\t\tv := m[k]\n\t\tsortedParamString += k + v[0]\n\t}\n\n\treturn sortedParamString\n}\n\n\/\/ GenerateRequestSignature creates a Casper API request signature.\nfunc (c *Casper) GenerateRequestSignature(params url.Values, signature string) string {\n\trequestString := sortURLMap(params)\n\tbyteString := []byte(requestString)\n\tmac := hmac.New(sha256.New, []byte(signature))\n\tmac.Write(byteString)\n\treturn \"v1:\" + hex.EncodeToString(mac.Sum(nil))\n}\n\n\/\/ GetAttestation fetches a valid Google attestation using the Casper API.\nfunc (c *Casper) GetAttestation(username, password, timestamp string) (string, error) {\n\tvar tr *http.Transport\n\n\ttr = &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\n\tif c.ProxyURL != nil {\n\t\ttr.Proxy = http.ProxyURL(c.ProxyURL)\n\t}\n\n\t\/\/ 1 - Fetch the device binary.\n\tclient := &http.Client{Transport: tr}\n\n\tclientAuthForm := url.Values{}\n\tclientAuthForm.Add(\"username\", username)\n\tclientAuthForm.Add(\"password\", password)\n\tclientAuthForm.Add(\"timestamp\", timestamp)\n\tclientAuthForm.Add(\"snapchat_version\", SnapchatVersion)\n\n\tcasperSignature := c.GenerateRequestSignature(clientAuthForm, c.APISecret)\n\n\treq, err := http.NewRequest(\"GET\", CasperAttestationCreateBinaryURL, nil)\n\treq.Header.Set(\"User-Agent\", \"CasperGoAPIClient\/1.1\")\n\treq.Header.Set(\"X-Casper-API-Key\", c.APIKey)\n\treq.Header.Set(\"X-Casper-Signature\", casperSignature)\n\treq.Header.Set(\"Accept\", \"*\/*\")\n\treq.Header.Set(\"Expect\", \"100-continue\")\n\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tcasperHTTPError.Reason = err\n\t\treturn \"\", casperHTTPError\n\t} else if res.StatusCode != 200 {\n\t\tcasperHTTPError.Reason = errors.New(\"Request returned non 200 code. (\" + res.Status + \")\")\n\t\treturn \"\", casperHTTPError\n\t}\n\n\tparsed, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tcasperParseError.Reason = err\n\t\treturn \"\", casperParseError\n\t}\n\tif c.Debug == true {\n\t\tfmt.Println(string(parsed))\n\t}\n\n\tvar binaryData map[string]interface{}\n\tjson.Unmarshal(parsed, &binaryData)\n\tb64binary := binaryData[\"binary\"].(string)\n\n\tprotobuf, err := base64.StdEncoding.DecodeString(b64binary)\n\tif err != nil {\n\t\tcasperParseError.Reason = err\n\t\treturn \"\", casperParseError\n\t}\n\n\t\/\/ 2 - Send decoded binary as protobuf to Google for validation.\n\tcreateBinaryReq, err := http.NewRequest(\"POST\", GoogleSafteyNetURL, strings.NewReader(string(protobuf)))\n\tcreateBinaryReq.Header.Set(\"Accept-Encoding\", \"gzip\")\n\tcreateBinaryReq.Header.Set(\"User-Agent\", \"DroidGuard\/7329000 (A116 _Quad KOT49H); gzip\")\n\tcreateBinaryReq.Header.Set(\"Content-Type\", \"application\/x-protobuf\")\n\n\tcreateBinaryRes, err := client.Do(createBinaryReq)\n\n\tif err != nil {\n\t\tcasperHTTPError.Reason = err\n\t\treturn \"\", casperHTTPError\n\t} else if createBinaryRes.StatusCode != 200 {\n\t\tcasperHTTPError.Reason = errors.New(\"Request returned non 200 code. (\" + createBinaryRes.Status + \")\")\n\t\treturn \"\", casperHTTPError\n\t}\n\n\tcreateBinaryGzipRes, err := gzip.NewReader(createBinaryRes.Body)\n\tif err != nil {\n\t\tcasperParseError.Reason = err\n\t\treturn \"\", casperParseError\n\t}\n\n\tvar parsedData map[string]interface{}\n\tprotobufData, err := ioutil.ReadAll(createBinaryGzipRes)\n\tif err != nil {\n\t\tcasperParseError.Reason = err\n\t\treturn \"\", casperParseError\n\t}\n\tif c.Debug == true {\n\t\tfmt.Println(string(parsed))\n\t}\n\n\tjson.Unmarshal(protobufData, &parsedData)\n\n\t\/\/ 3 - Send snapchat version, nonce and protobuf data to Casper API in exchange for an attestation request.\n\tb64protobuf := base64.StdEncoding.EncodeToString(protobufData)\n\thash := sha256.New()\n\tio.WriteString(hash, username+\"|\"+password+\"|\"+timestamp+\"|\"+\"\/loq\/login\")\n\tnonce := base64.StdEncoding.EncodeToString(hash.Sum(nil))\n\n\tattestForm := url.Values{}\n\tattestForm.Add(\"nonce\", nonce)\n\tattestForm.Add(\"protobuf\", b64protobuf)\n\tattestForm.Add(\"snapchat_version\", SnapchatVersion)\n\n\tcasperSignature = c.GenerateRequestSignature(attestForm, c.APISecret)\n\tattestReq, err := http.NewRequest(\"POST\", CasperAttestationAttestBinaryURL, strings.NewReader(attestForm.Encode()))\n\n\tattestReq.Header.Set(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\tattestReq.Header.Set(\"Accept\", \"*\/*\")\n\tattestReq.Header.Set(\"Expect\", \"100-continue\")\n\tattestReq.Header.Set(\"X-Casper-API-Key\", c.APIKey)\n\tattestReq.Header.Set(\"X-Casper-Signature\", casperSignature)\n\tattestReq.Header.Set(\"Accept-Encoding\", \"gzip;q=0,deflate,sdch\")\n\tattestReq.Header.Set(\"User-Agent\", \"CasperGoAPIClient\/1.1\")\n\n\tattestRes, err := client.Do(attestReq)\n\tif err != nil {\n\t\tcasperHTTPError.Reason = err\n\t\treturn \"\", casperHTTPError\n\t} else if attestRes.StatusCode != 200 {\n\t\tcasperHTTPError.Reason = errors.New(\"Request returned non 200 code. (\" + attestRes.Status + \")\")\n\t\treturn \"\", casperHTTPError\n\t}\n\n\tvar attestData map[string]interface{}\n\tattestation, err := ioutil.ReadAll(attestRes.Body)\n\tif c.Debug == true {\n\t\tfmt.Println(string(attestation))\n\t}\n\tjson.Unmarshal(attestation, &attestData)\n\n\t\/\/ 4 - Get the binary value in the response map and send it to Google as protobuf in exchange for a signed attestation.\n\t_, attestExists := attestData[\"binary\"].(string)\n\tif attestExists != true {\n\t\tcasperParseError.Reason = errors.New(\"Key 'binary' does not exist.\")\n\t\treturn \"\", casperParseError\n\t}\n\tattestDecodedBody, _ := base64.StdEncoding.DecodeString(attestData[\"binary\"].(string))\n\n\tattestCheckReq, err := http.NewRequest(\"POST\", AttestationCheckerURL, strings.NewReader(string(attestDecodedBody)))\n\tattestCheckReq.Header.Set(\"User-Agent\", \"SafetyNet\/7899000 (WIKO JZO54K); gzip\")\n\tattestCheckReq.Header.Set(\"Content-Type\", \"application\/x-protobuf\")\n\tattestCheckReq.Header.Set(\"Content-Length\", string(len(attestDecodedBody)))\n\tattestCheckReq.Header.Set(\"Connection\", \"Keep-Alive\")\n\tattestCheckReq.Header.Set(\"Accept-Encoding\", \"gzip\")\n\n\tattestCheckRes, err := client.Do(attestCheckReq)\n\tif err != nil {\n\t\tcasperHTTPError.Reason = err\n\t\treturn \"\", casperHTTPError\n\t} else if attestCheckRes.StatusCode != 200 {\n\t\tcasperHTTPError.Reason = errors.New(\"Request returned non 200 code. (\" + attestCheckRes.Status + \")\")\n\t\treturn \"\", casperHTTPError\n\t}\n\n\tattestGzipRes, err := gzip.NewReader(attestCheckRes.Body)\n\tattestDecompressedRes, err := ioutil.ReadAll(attestGzipRes)\n\tif err != nil {\n\t\tcasperParseError.Reason = err\n\t\treturn \"\", casperParseError\n\t}\n\tif c.Debug == true {\n\t\tfmt.Println(string(attestDecompressedRes))\n\t}\n\n\tvar attestSignedData map[string]interface{}\n\tjson.Unmarshal(attestDecompressedRes, &attestSignedData)\n\t_, attestSigExists := attestSignedData[\"signedAttestation\"].(string)\n\tif attestSigExists != true {\n\t\tcasperParseError.Reason = errors.New(\"Key 'signedAttestation' does not exist.\")\n\t\treturn \"\", casperParseError\n\t}\n\tsingedAttestation := attestSignedData[\"signedAttestation\"].(string)\n\treturn singedAttestation, nil\n}\n\n\/\/ GetClientAuthToken fetches a generated client auth token using the Casper API.\nfunc (c *Casper) GetClientAuthToken(username, password, timestamp string) (string, error) {\n\tvar tr *http.Transport\n\n\ttr = &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\n\tif c.ProxyURL != nil {\n\t\ttr.Proxy = http.ProxyURL(c.ProxyURL)\n\t}\n\n\tclient := &http.Client{Transport: tr}\n\tclientAuthForm := url.Values{}\n\tclientAuthForm.Add(\"username\", username)\n\tclientAuthForm.Add(\"password\", password)\n\tclientAuthForm.Add(\"timestamp\", timestamp)\n\tclientAuthForm.Add(\"snapchat_version\", SnapchatVersion)\n\n\tcasperSignature := c.GenerateRequestSignature(clientAuthForm, c.APISecret)\n\treq, err := http.NewRequest(\"POST\", CasperSignRequestURL, strings.NewReader(string(clientAuthForm.Encode())))\n\treq.Header.Set(\"User-Agent\", \"CasperGoAPIClient\/1.1\")\n\treq.Header.Set(\"X-Casper-API-Key\", c.APIKey)\n\treq.Header.Set(\"X-Casper-Signature\", casperSignature)\n\treq.Header.Set(\"Accept-Encoding\", \"gzip\")\n\treq.Header.Set(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tcasperHTTPError.Reason = err\n\t\treturn \"\", casperHTTPError\n\t} else if resp.StatusCode != 200 {\n\t\tcasperHTTPError.Reason = errors.New(\"Request returned non 200 code. (\" + resp.Status + \")\")\n\t\treturn \"\", casperHTTPError\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tcasperParseError.Reason = err\n\t\treturn \"\", casperParseError\n\t}\n\tif c.Debug == true {\n\t\tfmt.Println(string(body))\n\t}\n\n\tvar data map[string]interface{}\n\tjson.Unmarshal(body, &data)\n\t_, signatureExists := data[\"signature\"].(string)\n\tif signatureExists != true {\n\t\tcasperParseError.Reason = errors.New(\"Key 'signature' does not exist.\")\n\t\treturn \"\", casperParseError\n\t}\n\tsignature := data[\"signature\"].(string)\n\treturn signature, nil\n}\n\n\/\/ SetProxyURL sets given string addr, as a proxy addr. Primarily for debugging purposes.\nfunc (c *Casper) SetProxyURL(addr string) error {\n\tproxyURL, err := url.Parse(addr)\n\tif err != nil {\n\t\tcasperParseError.Reason = err\n\t\treturn casperParseError\n\t}\n\tif proxyURL.Scheme == \"\" {\n\t\treturn errors.New(\"Invalid proxy url.\")\n\t}\n\tc.ProxyURL = proxyURL\n\treturn nil\n}\n<commit_msg>Bump version.<commit_after>\/\/ Package casper provides methods for interacting with the Casper API.\npackage casper\n\nimport (\n\t\"compress\/gzip\"\n\t\"crypto\/hmac\"\n\t\"crypto\/sha256\"\n\t\"crypto\/tls\"\n\t\"encoding\/base64\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strings\"\n)\n\n\/\/ Casper constants.\nconst (\n\tSnapchatVersion = \"9.19.0.0\"\n\n\tCasperSignRequestURL = \"https:\/\/api.casper.io\/snapchat\/clientauth\/signrequest\"\n\tCasperAttestationCreateBinaryURL = \"https:\/\/api.casper.io\/snapchat\/attestation\/create\"\n\tCasperAttestationAttestBinaryURL = \"https:\/\/api.casper.io\/snapchat\/attestation\/attest\"\n\n\tGoogleSafteyNetURL = \"https:\/\/www.googleapis.com\/androidantiabuse\/v1\/x\/create?alt=PROTO&key=AIzaSyBofcZsgLSS7BOnBjZPEkk4rYwzOIz-lTI\"\n\tAttestationCheckerURL = \"https:\/\/www.googleapis.com\/androidcheck\/v1\/attestations\/attest?alt=JSON&key=AIzaSyDqVnJBjE5ymo--oBJt3On7HQx9xNm1RHA\"\n)\n\n\/\/ Casper error variables.\nvar (\n\tcasperParseError = Error{Err: \"casper: CasperParseError\"}\n\tcasperHTTPError = Error{Err: \"casper: CasperHTTPError\"}\n)\n\n\/\/ Casper holds credentials to be used when connecting to the Casper API.\ntype Casper struct {\n\tAPIKey string\n\tAPISecret string\n\tUsername string\n\tPassword string\n\tDebug bool\n\tProxyURL *url.URL\n}\n\n\/\/ Error handles errors returned by casper methods.\ntype Error struct {\n\tErr string\n\tReason error\n}\n\n\/\/ Error is a function which CasperError satisfies.\n\/\/ It returns a properly formatted error message when an error occurs.\nfunc (e Error) Error() string {\n\treturn fmt.Sprintf(\"%s\\nReason: %s\", e.Err, e.Reason.Error())\n}\n\n\/\/ sortURLMap sorts a given url.Values map m alphabetically by it's keys, whilst retaining the values.\nfunc sortURLMap(m url.Values) string {\n\tvar keys []string\n\tvar sortedParamString string\n\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\n\tsort.Strings(keys)\n\n\tfor _, k := range keys {\n\t\tv := m[k]\n\t\tsortedParamString += k + v[0]\n\t}\n\n\treturn sortedParamString\n}\n\n\/\/ GenerateRequestSignature creates a Casper API request signature.\nfunc (c *Casper) GenerateRequestSignature(params url.Values, signature string) string {\n\trequestString := sortURLMap(params)\n\tbyteString := []byte(requestString)\n\tmac := hmac.New(sha256.New, []byte(signature))\n\tmac.Write(byteString)\n\treturn \"v1:\" + hex.EncodeToString(mac.Sum(nil))\n}\n\n\/\/ GetAttestation fetches a valid Google attestation using the Casper API.\nfunc (c *Casper) GetAttestation(username, password, timestamp string) (string, error) {\n\tvar tr *http.Transport\n\n\ttr = &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\n\tif c.ProxyURL != nil {\n\t\ttr.Proxy = http.ProxyURL(c.ProxyURL)\n\t}\n\n\t\/\/ 1 - Fetch the device binary.\n\tclient := &http.Client{Transport: tr}\n\n\tclientAuthForm := url.Values{}\n\tclientAuthForm.Add(\"username\", username)\n\tclientAuthForm.Add(\"password\", password)\n\tclientAuthForm.Add(\"timestamp\", timestamp)\n\tclientAuthForm.Add(\"snapchat_version\", SnapchatVersion)\n\n\tcasperSignature := c.GenerateRequestSignature(clientAuthForm, c.APISecret)\n\n\treq, err := http.NewRequest(\"GET\", CasperAttestationCreateBinaryURL, nil)\n\treq.Header.Set(\"User-Agent\", \"CasperGoAPIClient\/1.1\")\n\treq.Header.Set(\"X-Casper-API-Key\", c.APIKey)\n\treq.Header.Set(\"X-Casper-Signature\", casperSignature)\n\treq.Header.Set(\"Accept\", \"*\/*\")\n\treq.Header.Set(\"Expect\", \"100-continue\")\n\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tcasperHTTPError.Reason = err\n\t\treturn \"\", casperHTTPError\n\t} else if res.StatusCode != 200 {\n\t\tcasperHTTPError.Reason = errors.New(\"Request returned non 200 code. (\" + res.Status + \")\")\n\t\treturn \"\", casperHTTPError\n\t}\n\n\tparsed, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tcasperParseError.Reason = err\n\t\treturn \"\", casperParseError\n\t}\n\tif c.Debug == true {\n\t\tfmt.Println(string(parsed))\n\t}\n\n\tvar binaryData map[string]interface{}\n\tjson.Unmarshal(parsed, &binaryData)\n\tb64binary := binaryData[\"binary\"].(string)\n\n\tprotobuf, err := base64.StdEncoding.DecodeString(b64binary)\n\tif err != nil {\n\t\tcasperParseError.Reason = err\n\t\treturn \"\", casperParseError\n\t}\n\n\t\/\/ 2 - Send decoded binary as protobuf to Google for validation.\n\tcreateBinaryReq, err := http.NewRequest(\"POST\", GoogleSafteyNetURL, strings.NewReader(string(protobuf)))\n\tcreateBinaryReq.Header.Set(\"Accept-Encoding\", \"gzip\")\n\tcreateBinaryReq.Header.Set(\"User-Agent\", \"DroidGuard\/7329000 (A116 _Quad KOT49H); gzip\")\n\tcreateBinaryReq.Header.Set(\"Content-Type\", \"application\/x-protobuf\")\n\n\tcreateBinaryRes, err := client.Do(createBinaryReq)\n\n\tif err != nil {\n\t\tcasperHTTPError.Reason = err\n\t\treturn \"\", casperHTTPError\n\t} else if createBinaryRes.StatusCode != 200 {\n\t\tcasperHTTPError.Reason = errors.New(\"Request returned non 200 code. (\" + createBinaryRes.Status + \")\")\n\t\treturn \"\", casperHTTPError\n\t}\n\n\tcreateBinaryGzipRes, err := gzip.NewReader(createBinaryRes.Body)\n\tif err != nil {\n\t\tcasperParseError.Reason = err\n\t\treturn \"\", casperParseError\n\t}\n\n\tvar parsedData map[string]interface{}\n\tprotobufData, err := ioutil.ReadAll(createBinaryGzipRes)\n\tif err != nil {\n\t\tcasperParseError.Reason = err\n\t\treturn \"\", casperParseError\n\t}\n\tif c.Debug == true {\n\t\tfmt.Println(string(parsed))\n\t}\n\n\tjson.Unmarshal(protobufData, &parsedData)\n\n\t\/\/ 3 - Send snapchat version, nonce and protobuf data to Casper API in exchange for an attestation request.\n\tb64protobuf := base64.StdEncoding.EncodeToString(protobufData)\n\thash := sha256.New()\n\tio.WriteString(hash, username+\"|\"+password+\"|\"+timestamp+\"|\"+\"\/loq\/login\")\n\tnonce := base64.StdEncoding.EncodeToString(hash.Sum(nil))\n\n\tattestForm := url.Values{}\n\tattestForm.Add(\"nonce\", nonce)\n\tattestForm.Add(\"protobuf\", b64protobuf)\n\tattestForm.Add(\"snapchat_version\", SnapchatVersion)\n\n\tcasperSignature = c.GenerateRequestSignature(attestForm, c.APISecret)\n\tattestReq, err := http.NewRequest(\"POST\", CasperAttestationAttestBinaryURL, strings.NewReader(attestForm.Encode()))\n\n\tattestReq.Header.Set(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\tattestReq.Header.Set(\"Accept\", \"*\/*\")\n\tattestReq.Header.Set(\"Expect\", \"100-continue\")\n\tattestReq.Header.Set(\"X-Casper-API-Key\", c.APIKey)\n\tattestReq.Header.Set(\"X-Casper-Signature\", casperSignature)\n\tattestReq.Header.Set(\"Accept-Encoding\", \"gzip;q=0,deflate,sdch\")\n\tattestReq.Header.Set(\"User-Agent\", \"CasperGoAPIClient\/1.1\")\n\n\tattestRes, err := client.Do(attestReq)\n\tif err != nil {\n\t\tcasperHTTPError.Reason = err\n\t\treturn \"\", casperHTTPError\n\t} else if attestRes.StatusCode != 200 {\n\t\tcasperHTTPError.Reason = errors.New(\"Request returned non 200 code. (\" + attestRes.Status + \")\")\n\t\treturn \"\", casperHTTPError\n\t}\n\n\tvar attestData map[string]interface{}\n\tattestation, err := ioutil.ReadAll(attestRes.Body)\n\tif c.Debug == true {\n\t\tfmt.Println(string(attestation))\n\t}\n\tjson.Unmarshal(attestation, &attestData)\n\n\t\/\/ 4 - Get the binary value in the response map and send it to Google as protobuf in exchange for a signed attestation.\n\t_, attestExists := attestData[\"binary\"].(string)\n\tif attestExists != true {\n\t\tcasperParseError.Reason = errors.New(\"Key 'binary' does not exist.\")\n\t\treturn \"\", casperParseError\n\t}\n\tattestDecodedBody, _ := base64.StdEncoding.DecodeString(attestData[\"binary\"].(string))\n\n\tattestCheckReq, err := http.NewRequest(\"POST\", AttestationCheckerURL, strings.NewReader(string(attestDecodedBody)))\n\tattestCheckReq.Header.Set(\"User-Agent\", \"SafetyNet\/7899000 (WIKO JZO54K); gzip\")\n\tattestCheckReq.Header.Set(\"Content-Type\", \"application\/x-protobuf\")\n\tattestCheckReq.Header.Set(\"Content-Length\", string(len(attestDecodedBody)))\n\tattestCheckReq.Header.Set(\"Connection\", \"Keep-Alive\")\n\tattestCheckReq.Header.Set(\"Accept-Encoding\", \"gzip\")\n\n\tattestCheckRes, err := client.Do(attestCheckReq)\n\tif err != nil {\n\t\tcasperHTTPError.Reason = err\n\t\treturn \"\", casperHTTPError\n\t} else if attestCheckRes.StatusCode != 200 {\n\t\tcasperHTTPError.Reason = errors.New(\"Request returned non 200 code. (\" + attestCheckRes.Status + \")\")\n\t\treturn \"\", casperHTTPError\n\t}\n\n\tattestGzipRes, err := gzip.NewReader(attestCheckRes.Body)\n\tattestDecompressedRes, err := ioutil.ReadAll(attestGzipRes)\n\tif err != nil {\n\t\tcasperParseError.Reason = err\n\t\treturn \"\", casperParseError\n\t}\n\tif c.Debug == true {\n\t\tfmt.Println(string(attestDecompressedRes))\n\t}\n\n\tvar attestSignedData map[string]interface{}\n\tjson.Unmarshal(attestDecompressedRes, &attestSignedData)\n\t_, attestSigExists := attestSignedData[\"signedAttestation\"].(string)\n\tif attestSigExists != true {\n\t\tcasperParseError.Reason = errors.New(\"Key 'signedAttestation' does not exist.\")\n\t\treturn \"\", casperParseError\n\t}\n\tsingedAttestation := attestSignedData[\"signedAttestation\"].(string)\n\treturn singedAttestation, nil\n}\n\n\/\/ GetClientAuthToken fetches a generated client auth token using the Casper API.\nfunc (c *Casper) GetClientAuthToken(username, password, timestamp string) (string, error) {\n\tvar tr *http.Transport\n\n\ttr = &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\n\tif c.ProxyURL != nil {\n\t\ttr.Proxy = http.ProxyURL(c.ProxyURL)\n\t}\n\n\tclient := &http.Client{Transport: tr}\n\tclientAuthForm := url.Values{}\n\tclientAuthForm.Add(\"username\", username)\n\tclientAuthForm.Add(\"password\", password)\n\tclientAuthForm.Add(\"timestamp\", timestamp)\n\tclientAuthForm.Add(\"snapchat_version\", SnapchatVersion)\n\n\tcasperSignature := c.GenerateRequestSignature(clientAuthForm, c.APISecret)\n\treq, err := http.NewRequest(\"POST\", CasperSignRequestURL, strings.NewReader(string(clientAuthForm.Encode())))\n\treq.Header.Set(\"User-Agent\", \"CasperGoAPIClient\/1.1\")\n\treq.Header.Set(\"X-Casper-API-Key\", c.APIKey)\n\treq.Header.Set(\"X-Casper-Signature\", casperSignature)\n\treq.Header.Set(\"Accept-Encoding\", \"gzip\")\n\treq.Header.Set(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tcasperHTTPError.Reason = err\n\t\treturn \"\", casperHTTPError\n\t} else if resp.StatusCode != 200 {\n\t\tcasperHTTPError.Reason = errors.New(\"Request returned non 200 code. (\" + resp.Status + \")\")\n\t\treturn \"\", casperHTTPError\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tcasperParseError.Reason = err\n\t\treturn \"\", casperParseError\n\t}\n\tif c.Debug == true {\n\t\tfmt.Println(string(body))\n\t}\n\n\tvar data map[string]interface{}\n\tjson.Unmarshal(body, &data)\n\t_, signatureExists := data[\"signature\"].(string)\n\tif signatureExists != true {\n\t\tcasperParseError.Reason = errors.New(\"Key 'signature' does not exist.\")\n\t\treturn \"\", casperParseError\n\t}\n\tsignature := data[\"signature\"].(string)\n\treturn signature, nil\n}\n\n\/\/ SetProxyURL sets given string addr, as a proxy addr. Primarily for debugging purposes.\nfunc (c *Casper) SetProxyURL(addr string) error {\n\tproxyURL, err := url.Parse(addr)\n\tif err != nil {\n\t\tcasperParseError.Reason = err\n\t\treturn casperParseError\n\t}\n\tif proxyURL.Scheme == \"\" {\n\t\treturn errors.New(\"Invalid proxy url.\")\n\t}\n\tc.ProxyURL = proxyURL\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package fuseklient\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/jacobsa\/fuse\/fuseops\"\n)\n\n\/\/ IDGen is responsible for generating ids for newly created Entry. It is\n\/\/ threadsafe and guaranteed to return unique ids.\ntype IDGen struct {\n\t\/\/ Mutex protects the fields below.\n\tsync.Mutex\n\n\t\/\/ LastID is the last id that was allocated.\n\tLastID fuseops.InodeID\n}\n\n\/\/ NewIDGen is the required initializer for IDGen. It sets LastID to\n\/\/ fuseops.RootInodeID, ie 1 since that's the default ID for root.\nfunc NewIDGen() *IDGen {\n\treturn &IDGen{Mutex: sync.Mutex{}, LastID: fuseops.RootInodeID}\n}\n\n\/\/ Next returns next available fuseops.InodeID.\nfunc (i *IDGen) Next() fuseops.InodeID {\n\ti.Lock()\n\tdefer i.Unlock()\n\n\ti.LastID++\n\treturn i.LastID\n}\n\ntype HandleIDGen struct {\n\t\/\/ Mutex protects the fields below.\n\tsync.Mutex\n\n\t\/\/ LastID is the last id that was allocated.\n\tLastID fuseops.HandleID\n}\n\n\/\/ NewIDGen is the required initializer for IDGen. It sets LastID to\n\/\/ fuseops.RootInodeID, ie 1 since that's the default ID for root.\nfunc NewHandleIDGen() *HandleIDGen {\n\treturn &HandleIDGen{Mutex: sync.Mutex{}}\n}\n\nfunc (h *HandleIDGen) Next() fuseops.HandleID {\n\th.Lock()\n\tdefer h.Unlock()\n\n\th.LastID++\n\treturn h.LastID\n}\n<commit_msg>fix HandleIDGen comments<commit_after>package fuseklient\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/jacobsa\/fuse\/fuseops\"\n)\n\n\/\/ IDGen is responsible for generating ids for newly created Entry. It is\n\/\/ threadsafe and guaranteed to return unique ids.\ntype IDGen struct {\n\t\/\/ Mutex protects the fields below.\n\tsync.Mutex\n\n\t\/\/ LastID is the last id that was allocated.\n\tLastID fuseops.InodeID\n}\n\n\/\/ NewIDGen is the required initializer for IDGen. It sets LastID to\n\/\/ fuseops.RootInodeID, ie 1 since that's the default ID for root.\nfunc NewIDGen() *IDGen {\n\treturn &IDGen{Mutex: sync.Mutex{}, LastID: fuseops.RootInodeID}\n}\n\n\/\/ Next returns next available fuseops.InodeID.\nfunc (i *IDGen) Next() fuseops.InodeID {\n\ti.Lock()\n\tdefer i.Unlock()\n\n\ti.LastID++\n\treturn i.LastID\n}\n\ntype HandleIDGen struct {\n\t\/\/ Mutex protects the fields below.\n\tsync.Mutex\n\n\t\/\/ LastID is the last id that was allocated.\n\tLastID fuseops.HandleID\n}\n\n\/\/ NewHandleIDGen is the required initializer for HandleID.\nfunc NewHandleIDGen() *HandleIDGen {\n\treturn &HandleIDGen{Mutex: sync.Mutex{}}\n}\n\n\/\/ Next returns next available fuseops.HandleID\nfunc (h *HandleIDGen) Next() fuseops.HandleID {\n\th.Lock()\n\tdefer h.Unlock()\n\n\th.LastID++\n\treturn h.LastID\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Tim Shannon. All rights reserved.\n\/\/ Use of this source code is governed by the MIT license\n\/\/ that can be found in the LICENSE file.\n\npackage bolthold\n\nimport (\n\t\"bytes\"\n\t\"sort\"\n\n\t\"github.com\/boltdb\/bolt\"\n)\n\n\/\/ BoltHoldIndexTag is the struct tag used to define an a field as indexable for a bolthold\nconst BoltHoldIndexTag = \"boltholdIndex\"\n\nconst indexBucketPrefix = \"_index\"\n\n\/\/ size of iterator keys stored in memory before more are fetched\nconst iteratorKeyMinCacheSize = 100\n\n\/\/ Index is a function that returns the indexable bytes of the passed in value\ntype Index func(name string, value interface{}) ([]byte, error)\n\n\/\/ adds an item to the index\nfunc indexAdd(storer Storer, tx *bolt.Tx, key []byte, data interface{}) error {\n\tindexes := storer.Indexes()\n\tfor name, index := range indexes {\n\t\terr := indexUpdate(storer.Type(), name, index, tx, key, data, false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ removes an item from the index\nfunc indexDelete(storer Storer, tx *bolt.Tx, key []byte, data interface{}) error {\n\tindexes := storer.Indexes()\n\n\tfor name, index := range indexes {\n\t\terr := indexUpdate(storer.Type(), name, index, tx, key, data, true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ adds or removes a specific index on an item\nfunc indexUpdate(typeName, indexName string, index Index, tx *bolt.Tx, key []byte, value interface{}, delete bool) error {\n\tindexKey, err := index(indexName, value)\n\tif indexKey == nil {\n\t\treturn nil\n\t}\n\n\tindexValue := make(keyList, 0)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb, err := tx.CreateBucketIfNotExists(indexBucketName(typeName, indexName))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tiVal := b.Get(indexKey)\n\tif iVal != nil {\n\t\terr = decode(iVal, &indexValue)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif delete {\n\t\tindexValue.remove(key)\n\t} else {\n\t\tindexValue.add(key)\n\t}\n\n\tiVal, err = encode(indexValue)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = b.Put(indexKey, iVal)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ IndexExists tests if an index exists for the passed in field name\nfunc (s *Store) IndexExists(tx *bolt.Tx, typeName, indexName string) bool {\n\treturn (tx.Bucket(indexBucketName(typeName, indexName)) != nil)\n}\n\n\/\/ indexBucketName returns the name of the bolt bucket where this index is stored\nfunc indexBucketName(typeName, indexName string) []byte {\n\treturn []byte(indexBucketPrefix + \":\" + typeName + \":\" + indexName)\n}\n\n\/\/ keyList is a slice of unique, sorted keys([]byte) such as what an index points to\ntype keyList [][]byte\n\nfunc (v *keyList) add(key []byte) {\n\ti := sort.Search(len(*v), func(i int) bool {\n\t\treturn bytes.Compare((*v)[i], key) >= 0\n\t})\n\n\tif i < len(*v) && bytes.Compare((*v)[i], key) == 0 {\n\t\t\/\/ already added\n\t\treturn\n\t}\n\n\t*v = append(*v, nil)\n\tcopy((*v)[i+1:], (*v)[i:])\n\t(*v)[i] = key\n}\n\nfunc (v *keyList) remove(key []byte) {\n\ti := sort.Search(len(*v), func(i int) bool {\n\t\treturn bytes.Compare((*v)[i], key) >= 0\n\t})\n\n\tif i < len(*v) {\n\t\tcopy((*v)[i:], (*v)[i+1:])\n\t\t(*v)[len(*v)-1] = nil\n\t\t*v = (*v)[:len(*v)-1]\n\t}\n}\n\nfunc (v *keyList) in(key []byte) bool {\n\ti := sort.Search(len(*v), func(i int) bool {\n\t\treturn bytes.Compare((*v)[i], key) >= 0\n\t})\n\n\treturn (i < len(*v) && bytes.Compare((*v)[i], key) == 0)\n}\n\ntype iterator struct {\n\tkeyCache [][]byte\n\tdataBucket *bolt.Bucket\n\tindexCursor *bolt.Cursor\n\tnextKeys func(bool, *bolt.Cursor) ([][]byte, error)\n\tprepCursor bool\n\terr error\n}\n\n\/\/ TODO: Use cursor.Seek() by looking at query criteria to skip uncessary reads and seek to the earliest potential record\n\nfunc newIterator(tx *bolt.Tx, typeName string, query *Query) *iterator {\n\n\titer := &iterator{\n\t\tdataBucket: tx.Bucket([]byte(typeName)),\n\t\tprepCursor: true,\n\t}\n\n\tif iter.dataBucket == nil {\n\t\treturn iter\n\t}\n\n\tcriteria := query.fieldCriteria[query.index]\n\n\t\/\/ 3 scenarios\n\t\/\/ Key field\n\tif query.index == Key() {\n\t\titer.indexCursor = tx.Bucket([]byte(typeName)).Cursor()\n\n\t\titer.nextKeys = func(prepCursor bool, cursor *bolt.Cursor) ([][]byte, error) {\n\t\t\tvar nKeys [][]byte\n\n\t\t\tfor len(nKeys) < iteratorKeyMinCacheSize {\n\t\t\t\tvar k []byte\n\t\t\t\tif prepCursor {\n\t\t\t\t\tk, _ = cursor.First()\n\t\t\t\t\tprepCursor = false\n\t\t\t\t} else {\n\t\t\t\t\tk, _ = cursor.Next()\n\t\t\t\t}\n\t\t\t\tif k == nil {\n\t\t\t\t\treturn nKeys, nil\n\t\t\t\t}\n\n\t\t\t\tok, err := matchesAllCriteria(criteria, k, true)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\tif ok {\n\t\t\t\t\tnKeys = append(nKeys, k)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nKeys, nil\n\t\t}\n\n\t\treturn iter\n\t}\n\n\tiBucket := findIndexBucket(tx, typeName, query)\n\n\tif iBucket == nil {\n\t\t\/\/ bad index, filter through entire store\n\t\tquery.badIndex = true\n\n\t\titer.indexCursor = tx.Bucket([]byte(typeName)).Cursor()\n\n\t\titer.nextKeys = func(prepCursor bool, cursor *bolt.Cursor) ([][]byte, error) {\n\t\t\tvar nKeys [][]byte\n\n\t\t\tfor len(nKeys) < iteratorKeyMinCacheSize {\n\t\t\t\tvar k []byte\n\t\t\t\tif prepCursor {\n\t\t\t\t\tk, _ = cursor.First()\n\t\t\t\t\tprepCursor = false\n\t\t\t\t} else {\n\t\t\t\t\tk, _ = cursor.Next()\n\t\t\t\t}\n\t\t\t\tif k == nil {\n\t\t\t\t\treturn nKeys, nil\n\t\t\t\t}\n\n\t\t\t\tnKeys = append(nKeys, k)\n\t\t\t}\n\t\t\treturn nKeys, nil\n\t\t}\n\n\t\treturn iter\n\t}\n\n\t\/\/ indexed field\n\titer.indexCursor = iBucket.Cursor()\n\n\titer.nextKeys = func(prepCursor bool, cursor *bolt.Cursor) ([][]byte, error) {\n\t\tvar nKeys [][]byte\n\n\t\tfor len(nKeys) < iteratorKeyMinCacheSize {\n\t\t\tvar k, v []byte\n\t\t\tif prepCursor {\n\t\t\t\tk, v = cursor.First()\n\t\t\t\tprepCursor = false\n\t\t\t} else {\n\t\t\t\tk, v = cursor.Next()\n\t\t\t}\n\t\t\tif k == nil {\n\t\t\t\treturn nKeys, nil\n\t\t\t}\n\t\t\tok, err := matchesAllCriteria(criteria, k, true)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif ok {\n\t\t\t\t\/\/ append the slice of keys stored in the index\n\t\t\t\tvar keys = make(keyList, 0)\n\t\t\t\terr := decode(v, &keys)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\tnKeys = append(nKeys, [][]byte(keys)...)\n\t\t\t}\n\n\t\t}\n\t\treturn nKeys, nil\n\n\t}\n\n\treturn iter\n\n}\n\n\/\/ findIndexBucket returns the index bucket from the query, and if not found, tries to find the next available index\nfunc findIndexBucket(tx *bolt.Tx, typeName string, query *Query) *bolt.Bucket {\n\tiBucket := tx.Bucket(indexBucketName(typeName, query.index))\n\tif iBucket != nil {\n\t\treturn iBucket\n\t}\n\n\tfor field := range query.fieldCriteria {\n\t\tif field == query.index {\n\t\t\tcontinue\n\t\t}\n\n\t\tiBucket = tx.Bucket(indexBucketName(typeName, field))\n\t\tif iBucket != nil {\n\t\t\tquery.index = field\n\t\t\treturn iBucket\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Next returns the next key value that matches the iterators criteria\n\/\/ If no more kv's are available the return nil, if there is an error, they return nil\n\/\/ and iterator.Error() will return the error\nfunc (i *iterator) Next() (key []byte, value []byte) {\n\tif i.err != nil {\n\t\treturn nil, nil\n\t}\n\n\tif i.dataBucket == nil {\n\t\treturn nil, nil\n\t}\n\n\tif i.nextKeys == nil {\n\t\treturn nil, nil\n\t}\n\n\tif len(i.keyCache) == 0 {\n\t\tnewKeys, err := i.nextKeys(i.prepCursor, i.indexCursor)\n\t\ti.prepCursor = false\n\t\tif err != nil {\n\t\t\ti.err = err\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tif len(newKeys) == 0 {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\ti.keyCache = append(i.keyCache, newKeys...)\n\t}\n\n\tnextKey := i.keyCache[0]\n\ti.keyCache = i.keyCache[1:]\n\n\tval := i.dataBucket.Get(nextKey)\n\n\treturn nextKey, val\n}\n\n\/\/ Error returns the last error, iterator.Next() will not continue if there is an error present\nfunc (i *iterator) Error() error {\n\treturn i.err\n}\n<commit_msg>Fixed issue with iterator critera not being correct...<commit_after>\/\/ Copyright 2016 Tim Shannon. All rights reserved.\n\/\/ Use of this source code is governed by the MIT license\n\/\/ that can be found in the LICENSE file.\n\npackage bolthold\n\nimport (\n\t\"bytes\"\n\t\"sort\"\n\n\t\"github.com\/boltdb\/bolt\"\n)\n\n\/\/ BoltHoldIndexTag is the struct tag used to define an a field as indexable for a bolthold\nconst BoltHoldIndexTag = \"boltholdIndex\"\n\nconst indexBucketPrefix = \"_index\"\n\n\/\/ size of iterator keys stored in memory before more are fetched\nconst iteratorKeyMinCacheSize = 100\n\n\/\/ Index is a function that returns the indexable bytes of the passed in value\ntype Index func(name string, value interface{}) ([]byte, error)\n\n\/\/ adds an item to the index\nfunc indexAdd(storer Storer, tx *bolt.Tx, key []byte, data interface{}) error {\n\tindexes := storer.Indexes()\n\tfor name, index := range indexes {\n\t\terr := indexUpdate(storer.Type(), name, index, tx, key, data, false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ removes an item from the index\nfunc indexDelete(storer Storer, tx *bolt.Tx, key []byte, data interface{}) error {\n\tindexes := storer.Indexes()\n\n\tfor name, index := range indexes {\n\t\terr := indexUpdate(storer.Type(), name, index, tx, key, data, true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ adds or removes a specific index on an item\nfunc indexUpdate(typeName, indexName string, index Index, tx *bolt.Tx, key []byte, value interface{}, delete bool) error {\n\tindexKey, err := index(indexName, value)\n\tif indexKey == nil {\n\t\treturn nil\n\t}\n\n\tindexValue := make(keyList, 0)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb, err := tx.CreateBucketIfNotExists(indexBucketName(typeName, indexName))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tiVal := b.Get(indexKey)\n\tif iVal != nil {\n\t\terr = decode(iVal, &indexValue)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif delete {\n\t\tindexValue.remove(key)\n\t} else {\n\t\tindexValue.add(key)\n\t}\n\n\tiVal, err = encode(indexValue)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = b.Put(indexKey, iVal)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ IndexExists tests if an index exists for the passed in field name\nfunc (s *Store) IndexExists(tx *bolt.Tx, typeName, indexName string) bool {\n\treturn (tx.Bucket(indexBucketName(typeName, indexName)) != nil)\n}\n\n\/\/ indexBucketName returns the name of the bolt bucket where this index is stored\nfunc indexBucketName(typeName, indexName string) []byte {\n\treturn []byte(indexBucketPrefix + \":\" + typeName + \":\" + indexName)\n}\n\n\/\/ keyList is a slice of unique, sorted keys([]byte) such as what an index points to\ntype keyList [][]byte\n\nfunc (v *keyList) add(key []byte) {\n\ti := sort.Search(len(*v), func(i int) bool {\n\t\treturn bytes.Compare((*v)[i], key) >= 0\n\t})\n\n\tif i < len(*v) && bytes.Compare((*v)[i], key) == 0 {\n\t\t\/\/ already added\n\t\treturn\n\t}\n\n\t*v = append(*v, nil)\n\tcopy((*v)[i+1:], (*v)[i:])\n\t(*v)[i] = key\n}\n\nfunc (v *keyList) remove(key []byte) {\n\ti := sort.Search(len(*v), func(i int) bool {\n\t\treturn bytes.Compare((*v)[i], key) >= 0\n\t})\n\n\tif i < len(*v) {\n\t\tcopy((*v)[i:], (*v)[i+1:])\n\t\t(*v)[len(*v)-1] = nil\n\t\t*v = (*v)[:len(*v)-1]\n\t}\n}\n\nfunc (v *keyList) in(key []byte) bool {\n\ti := sort.Search(len(*v), func(i int) bool {\n\t\treturn bytes.Compare((*v)[i], key) >= 0\n\t})\n\n\treturn (i < len(*v) && bytes.Compare((*v)[i], key) == 0)\n}\n\ntype iterator struct {\n\tkeyCache [][]byte\n\tdataBucket *bolt.Bucket\n\tindexCursor *bolt.Cursor\n\tnextKeys func(bool, *bolt.Cursor) ([][]byte, error)\n\tprepCursor bool\n\terr error\n}\n\n\/\/ TODO: Use cursor.Seek() by looking at query criteria to skip uncessary reads and seek to the earliest potential record\n\nfunc newIterator(tx *bolt.Tx, typeName string, query *Query) *iterator {\n\n\titer := &iterator{\n\t\tdataBucket: tx.Bucket([]byte(typeName)),\n\t\tprepCursor: true,\n\t}\n\n\tif iter.dataBucket == nil {\n\t\treturn iter\n\t}\n\n\t\/\/ 3 scenarios\n\t\/\/ Key field\n\tif query.index == Key() {\n\t\titer.indexCursor = tx.Bucket([]byte(typeName)).Cursor()\n\t\tcriteria := query.fieldCriteria[Key()]\n\n\t\titer.nextKeys = func(prepCursor bool, cursor *bolt.Cursor) ([][]byte, error) {\n\t\t\tvar nKeys [][]byte\n\n\t\t\tfor len(nKeys) < iteratorKeyMinCacheSize {\n\t\t\t\tvar k []byte\n\t\t\t\tif prepCursor {\n\t\t\t\t\tk, _ = cursor.First()\n\t\t\t\t\tprepCursor = false\n\t\t\t\t} else {\n\t\t\t\t\tk, _ = cursor.Next()\n\t\t\t\t}\n\t\t\t\tif k == nil {\n\t\t\t\t\treturn nKeys, nil\n\t\t\t\t}\n\n\t\t\t\tok, err := matchesAllCriteria(criteria, k, true)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\tif ok {\n\t\t\t\t\tnKeys = append(nKeys, k)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nKeys, nil\n\t\t}\n\n\t\treturn iter\n\t}\n\n\tiBucket := findIndexBucket(tx, typeName, query)\n\n\tif iBucket == nil {\n\t\t\/\/ bad index, filter through entire store\n\t\tquery.badIndex = true\n\n\t\titer.indexCursor = tx.Bucket([]byte(typeName)).Cursor()\n\n\t\titer.nextKeys = func(prepCursor bool, cursor *bolt.Cursor) ([][]byte, error) {\n\t\t\tvar nKeys [][]byte\n\n\t\t\tfor len(nKeys) < iteratorKeyMinCacheSize {\n\t\t\t\tvar k []byte\n\t\t\t\tif prepCursor {\n\t\t\t\t\tk, _ = cursor.First()\n\t\t\t\t\tprepCursor = false\n\t\t\t\t} else {\n\t\t\t\t\tk, _ = cursor.Next()\n\t\t\t\t}\n\t\t\t\tif k == nil {\n\t\t\t\t\treturn nKeys, nil\n\t\t\t\t}\n\n\t\t\t\tnKeys = append(nKeys, k)\n\t\t\t}\n\t\t\treturn nKeys, nil\n\t\t}\n\n\t\treturn iter\n\t}\n\n\t\/\/ indexed field\n\titer.indexCursor = iBucket.Cursor()\n\n\titer.nextKeys = func(prepCursor bool, cursor *bolt.Cursor) ([][]byte, error) {\n\t\tvar nKeys [][]byte\n\t\tcriteria := query.fieldCriteria[query.index]\n\n\t\tfor len(nKeys) < iteratorKeyMinCacheSize {\n\t\t\tvar k, v []byte\n\t\t\tif prepCursor {\n\t\t\t\tk, v = cursor.First()\n\t\t\t\tprepCursor = false\n\t\t\t} else {\n\t\t\t\tk, v = cursor.Next()\n\t\t\t}\n\t\t\tif k == nil {\n\t\t\t\treturn nKeys, nil\n\t\t\t}\n\t\t\tok, err := matchesAllCriteria(criteria, k, true)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif ok {\n\t\t\t\t\/\/ append the slice of keys stored in the index\n\t\t\t\tvar keys = make(keyList, 0)\n\t\t\t\terr := decode(v, &keys)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\tnKeys = append(nKeys, [][]byte(keys)...)\n\t\t\t}\n\n\t\t}\n\t\treturn nKeys, nil\n\n\t}\n\n\treturn iter\n\n}\n\n\/\/ findIndexBucket returns the index bucket from the query, and if not found, tries to find the next available index\nfunc findIndexBucket(tx *bolt.Tx, typeName string, query *Query) *bolt.Bucket {\n\tiBucket := tx.Bucket(indexBucketName(typeName, query.index))\n\tif iBucket != nil {\n\t\treturn iBucket\n\t}\n\n\tfor field := range query.fieldCriteria {\n\t\tif field == query.index {\n\t\t\tcontinue\n\t\t}\n\n\t\tiBucket = tx.Bucket(indexBucketName(typeName, field))\n\t\tif iBucket != nil {\n\t\t\tquery.index = field\n\t\t\treturn iBucket\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Next returns the next key value that matches the iterators criteria\n\/\/ If no more kv's are available the return nil, if there is an error, they return nil\n\/\/ and iterator.Error() will return the error\nfunc (i *iterator) Next() (key []byte, value []byte) {\n\tif i.err != nil {\n\t\treturn nil, nil\n\t}\n\n\tif i.dataBucket == nil {\n\t\treturn nil, nil\n\t}\n\n\tif i.nextKeys == nil {\n\t\treturn nil, nil\n\t}\n\n\tif len(i.keyCache) == 0 {\n\t\tnewKeys, err := i.nextKeys(i.prepCursor, i.indexCursor)\n\t\ti.prepCursor = false\n\t\tif err != nil {\n\t\t\ti.err = err\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tif len(newKeys) == 0 {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\ti.keyCache = append(i.keyCache, newKeys...)\n\t}\n\n\tnextKey := i.keyCache[0]\n\ti.keyCache = i.keyCache[1:]\n\n\tval := i.dataBucket.Get(nextKey)\n\n\treturn nextKey, val\n}\n\n\/\/ Error returns the last error, iterator.Next() will not continue if there is an error present\nfunc (i *iterator) Error() error {\n\treturn i.err\n}\n<|endoftext|>"} {"text":"<commit_before>package gnosis\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/JackKnifed\/blackfriday\"\n\t\"github.com\/JackKnifed\/blackfriday-text\"\n\t\"github.com\/blevesearch\/bleve\"\n\t\"gopkg.in\/fsnotify.v1\"\n)\n\nvar openWatchers []fsnotify.Watcher\n\ntype indexedPage struct {\n\tTitle string `json:\"title\"`\n\tURIPath string `json:\"path\"`\n\tBody string `json:\"body\"`\n\tTopics string `json:\"topic\"`\n\tKeywords string `json:\"keyword\"`\n\tAuthors string `json: \"author\"`\n\tModified time.Time `json:\"modified\"`\n}\n\nfunc createIndex(config IndexSection) bool {\n\tnewIndex, err := bleve.Open(path.Clean(config.IndexPath))\n\tif err == nil {\n\t\tlog.Printf(\"Index already exists %s\", config.IndexPath)\n\t} else if err == bleve.ErrorIndexPathDoesNotExist {\n\t\tlog.Printf(\"Creating new index %s\", config.IndexName)\n\t\t\/\/ create a mapping\n\t\tindexMapping := buildIndexMapping(config)\n\t\tnewIndex, err = bleve.New(path.Clean(config.IndexPath), indexMapping)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to create the new index %s - %v\", config.IndexPath, err)\n\t\t\treturn false\n\t\t} else {\n\t\t}\n\t} else {\n\t\tlog.Printf(\"Got an error opening the index %s but it already exists %v\", config.IndexPath, err)\n\t\treturn false\n\t}\n\tnewIndex.Close()\n\treturn true\n}\n\nfunc EnableIndex(config IndexSection) bool {\n\tif !createIndex(config) {\n\t\treturn false\n\t}\n\tfor dir, path := range config.WatchDirs {\n\t\t\/\/ dir = strings.TrimSuffix(dir, \"\/\")\n\t\tlog.Printf(\"Watching and walking dir %s index %s\", dir, config.IndexPath)\n\t\twalkForIndexing(dir, dir, path, config)\n\t}\n\treturn true\n}\n\nfunc DisableAllIndexes() {\n\tlog.Println(\"Stopping all watchers\")\n\tfor _, watcher := range openWatchers {\n\t\twatcher.Close()\n\t}\n}\n\nfunc buildIndexMapping(config IndexSection) *bleve.IndexMapping {\n\n\t\/\/ create a text field type\n\tenTextFieldMapping := bleve.NewTextFieldMapping()\n\tenTextFieldMapping.Analyzer = config.IndexType\n\n\t\/\/ create a date field type\n\tdateTimeMapping := bleve.NewDateTimeFieldMapping()\n\n\t\/\/ map out the wiki page\n\twikiMapping := bleve.NewDocumentMapping()\n\twikiMapping.AddFieldMappingsAt(\"title\", enTextFieldMapping)\n\twikiMapping.AddFieldMappingsAt(\"path\", enTextFieldMapping)\n\twikiMapping.AddFieldMappingsAt(\"body\", enTextFieldMapping)\n\twikiMapping.AddFieldMappingsAt(\"topic\", enTextFieldMapping)\n\twikiMapping.AddFieldMappingsAt(\"keyword\", enTextFieldMapping)\n\twikiMapping.AddFieldMappingsAt(\"author\", enTextFieldMapping)\n\twikiMapping.AddFieldMappingsAt(\"modified\", dateTimeMapping)\n\n\t\/\/ add the wiki page mapping to a new index\n\tindexMapping := bleve.NewIndexMapping()\n\tindexMapping.AddDocumentMapping(config.IndexName, wikiMapping)\n\tindexMapping.DefaultAnalyzer = config.IndexType\n\n\treturn indexMapping\n}\n\n\/\/ walks a given path, and runs processUpdate on each File\nfunc walkForIndexing(path, filePath, requestPath string, config IndexSection) {\n\t\/\/watcherLoop(path, filePath, requestPath, config)\n\tdirEntries, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor _, dirEntry := range dirEntries {\n\t\tdirEntryPath := path + string(os.PathSeparator) + dirEntry.Name()\n\t\tif dirEntry.IsDir() {\n\t\t\twalkForIndexing(dirEntryPath, filePath, requestPath, config)\n\t\t} else if strings.HasSuffix(dirEntry.Name(), config.WatchExtension) {\n\t\t\tprocessUpdate(dirEntryPath, getURIPath(dirEntryPath, filePath, requestPath), config)\n\t\t}\n\t}\n}\n\n\/\/ given all of the inputs, watches afor new\/deleted files in that directory\n\/\/ adds\/removes\/udpates index as necessary\nfunc watcherLoop(watchPath, filePrefix, uriPrefix string, config IndexSection) {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = watcher.Add(watchPath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tidleTimer := time.NewTimer(10 * time.Second)\n\tqueuedEvents := make([]fsnotify.Event, 0)\n\n\topenWatchers = append(openWatchers, *watcher)\n\n\tlog.Printf(\"watching '%s' for changes...\", watchPath)\n\n\tfor {\n\t\tselect {\n\t\tcase event := <-watcher.Events:\n\t\t\tqueuedEvents = append(queuedEvents, event)\n\t\t\tidleTimer.Reset(10 * time.Second)\n\t\tcase err := <-watcher.Errors:\n\t\t\tlog.Fatal(err)\n\t\tcase <-idleTimer.C:\n\t\t\tfor _, event := range queuedEvents {\n\t\t\t\tif strings.HasSuffix(event.Name, config.WatchExtension) {\n\t\t\t\t\tswitch event.Op {\n\t\t\t\t\tcase fsnotify.Remove, fsnotify.Rename:\n\t\t\t\t\t\t\/\/ delete the filePath\n\t\t\t\t\t\tprocessDelete(getURIPath(watchPath+event.Name, filePrefix, uriPrefix),\n\t\t\t\t\t\t\tconfig.IndexName)\n\t\t\t\t\tcase fsnotify.Create, fsnotify.Write:\n\t\t\t\t\t\t\/\/ update the filePath\n\t\t\t\t\t\tprocessUpdate(watchPath+event.Name,\n\t\t\t\t\t\t\tgetURIPath(watchPath+event.Name, filePrefix, uriPrefix), config)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\t\/\/ ignore\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tqueuedEvents = make([]fsnotify.Event, 0)\n\t\t\tidleTimer.Reset(10 * time.Second)\n\t\t}\n\t}\n}\n\n\/\/ Update the entry in the index to the output from a given file\nfunc processUpdate(filePath, uriPath string, config IndexSection) {\n\tpage, err := generateWikiFromFile(filePath, uriPath, config.Restricted)\n\tif err != nil {\n\t\tlog.Print(err)\n\t} else {\n\t\tindex, _ := bleve.Open(config.IndexPath)\n\t\tdefer index.Close()\n\t\tindex.Index(uriPath, page)\n\t\tlog.Printf(\"updated: %s as %s\", filePath, uriPath)\n\t}\n}\n\n\/\/ Deletes a given path from the wiki entry\nfunc processDelete(uriPath, indexPath string) {\n\tlog.Printf(\"delete: %s\", uriPath)\n\tindex, _ := bleve.Open(indexPath)\n\tdefer index.Close()\n\terr := index.Delete(uriPath)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n}\n\nfunc cleanupMarkdown(input []byte) string {\n\textensions := 0 | blackfriday.EXTENSION_ALERT_BOXES\n\trenderer := blackfridaytext.TextRenderer()\n\toutput := blackfriday.Markdown(input, renderer, extensions)\n\treturn string(output)\n}\n\nfunc generateWikiFromFile(filePath, uriPath string, restrictedTopics []string) (*indexedPage, error) {\n\tpdata := new(PageMetadata)\n\terr := pdata.LoadPage(filePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif pdata.MatchedTopic(restrictedTopics) == true {\n\t\treturn nil, errors.New(\"Hit a restricted page - \" + pdata.Title)\n\t}\n\n\ttopics, keywords, authors := pdata.ListMeta()\n\trv := indexedPage{\n\t\tTitle: pdata.Title,\n\t\tBody: cleanupMarkdown(pdata.Page),\n\t\tURIPath: uriPath,\n\t\tTopics: strings.Join(topics, \" \"),\n\t\tKeywords: strings.Join(keywords, \" \"),\n\t\tAuthors: strings.Join(authors, \" \"),\n\t\tModified: pdata.FileStats.ModTime(),\n\t}\n\n\treturn &rv, nil\n}\n\nfunc getURIPath(filePath, filePrefix, uriPrefix string) (uriPath string) {\n\turiPath = strings.TrimPrefix(filePath, filePrefix)\n\turiPath = uriPrefix + uriPath\n\treturn\n}\n\n\/\/ given a path to an index, and a name of field to check\n\/\/ lists all unique values for that field in index\nfunc ListField(indexPath, field string) ([]string, error) {\n\tquery := bleve.NewMatchAllQuery()\n\tsearchRequest := bleve.NewSearchRequest(query)\n\n\tfacet := bleve.NewFacetRequest(field, 1)\n\tsearchRequest.AddFacet(\"allValues\", facet)\n\tif err := query.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Open the index\n\tindex, err := bleve.Open(indexPath)\n\tdefer index.Close()\n\tif index == nil {\n\t\treturn nil, fmt.Errorf(\"no such index [%s]\", indexPath)\n\t} else if err != nil {\n\t\treturn nil, fmt.Errorf(\"problem opening index [%s] - %s\", indexPath, err)\n\t}\n\n\tsearchResults, err := index.Search(searchRequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresults := *new([]string)\n\tfor _, oneTerm := range searchResults.Facets[\"allValues\"].Terms {\n\t\tresults = append(results, oneTerm.Term)\n\t}\n\n\treturn results, nil\n}\n\ntype SearchResponse struct {\n\tTotalHits int\n\tMaxScore float64\n\tPageOffset int\n\tSearchTime time.Duration\n\tResults []SearchResponseResult\n}\n\ntype SearchResponseResult struct {\n\tTitle string\n\tURIPath string\n\tScore float64\n\tTopics []string\n\tKeywords []string\n\tAuthors []string\n\tBody string\n}\n\nfunc CreateResponseData(rawResults bleve.SearchResult, pageOffset int) (SearchResponse, error) {\n\tvar response SearchResponse\n\n\tresponse.TotalHits = int(rawResults.Total)\n\tresponse.MaxScore = float64(rawResults.MaxScore)\n\tresponse.PageOffset = pageOffset\n\tresponse.SearchTime = rawResults.Took\n\tfor _, hit := range rawResults.Hits {\n\t\tvar newHit SearchResponseResult\n\n\t\tnewHit.Score = hit.Score\n\n\t\tif str, ok := hit.Fields[\"title\"].(string); ok {\n\t\t\tnewHit.Title = str\n\t\t} else {\n\t\t\treturn response, errors.New(\"returned title was not a string\")\n\t\t}\n\n\t\tif str, ok := hit.Fields[\"path\"].(string); ok {\n\t\t\tnewHit.URIPath = str\n\t\t} else {\n\t\t\treturn response, errors.New(\"returned path was not a string\")\n\t\t}\n\n\t\tif str, ok := hit.Fields[\"body\"].(string); ok {\n\t\t\tnewHit.Body = str\n\t\t} else {\n\t\t\treturn response, errors.New(\"returned body was not a string\")\n\t\t}\n\n\t\tif str, ok := hit.Fields[\"topic\"].(string); ok {\n\t\t\tnewHit.Topics = strings.Split(str, \" \")\n\t\t} else {\n\t\t\treturn response, errors.New(\"returned topics were not a string\")\n\t\t}\n\n\t\tif str, ok := hit.Fields[\"keyword\"].(string); ok {\n\t\t\tnewHit.Keywords = strings.Split(str, \" \")\n\t\t} else {\n\t\t\treturn response, errors.New(\"returned keywords were not a string\")\n\t\t}\n\n\t\tif str, ok := hit.Fields[\"author\"].(string); ok {\n\t\t\tnewHit.Authors = strings.Split(str, \" \")\n\t\t}\n\n\t\tresponse.Results = append(response.Results, newHit)\n\t}\n\n\treturn response, nil\n}\n<commit_msg>removed something to try to get the extra \/ in the uri to go away<commit_after>package gnosis\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/JackKnifed\/blackfriday\"\n\t\"github.com\/JackKnifed\/blackfriday-text\"\n\t\"github.com\/blevesearch\/bleve\"\n\t\"gopkg.in\/fsnotify.v1\"\n)\n\nvar openWatchers []fsnotify.Watcher\n\ntype indexedPage struct {\n\tTitle string `json:\"title\"`\n\tURIPath string `json:\"path\"`\n\tBody string `json:\"body\"`\n\tTopics string `json:\"topic\"`\n\tKeywords string `json:\"keyword\"`\n\tAuthors string `json: \"author\"`\n\tModified time.Time `json:\"modified\"`\n}\n\nfunc createIndex(config IndexSection) bool {\n\tnewIndex, err := bleve.Open(path.Clean(config.IndexPath))\n\tif err == nil {\n\t\tlog.Printf(\"Index already exists %s\", config.IndexPath)\n\t} else if err == bleve.ErrorIndexPathDoesNotExist {\n\t\tlog.Printf(\"Creating new index %s\", config.IndexName)\n\t\t\/\/ create a mapping\n\t\tindexMapping := buildIndexMapping(config)\n\t\tnewIndex, err = bleve.New(path.Clean(config.IndexPath), indexMapping)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to create the new index %s - %v\", config.IndexPath, err)\n\t\t\treturn false\n\t\t} else {\n\t\t}\n\t} else {\n\t\tlog.Printf(\"Got an error opening the index %s but it already exists %v\", config.IndexPath, err)\n\t\treturn false\n\t}\n\tnewIndex.Close()\n\treturn true\n}\n\nfunc EnableIndex(config IndexSection) bool {\n\tif !createIndex(config) {\n\t\treturn false\n\t}\n\tfor dir, path := range config.WatchDirs {\n\t\t\/\/ dir = strings.TrimSuffix(dir, \"\/\")\n\t\tlog.Printf(\"Watching and walking dir %s index %s\", dir, config.IndexPath)\n\t\twalkForIndexing(dir, dir, path, config)\n\t}\n\treturn true\n}\n\nfunc DisableAllIndexes() {\n\tlog.Println(\"Stopping all watchers\")\n\tfor _, watcher := range openWatchers {\n\t\twatcher.Close()\n\t}\n}\n\nfunc buildIndexMapping(config IndexSection) *bleve.IndexMapping {\n\n\t\/\/ create a text field type\n\tenTextFieldMapping := bleve.NewTextFieldMapping()\n\tenTextFieldMapping.Analyzer = config.IndexType\n\n\t\/\/ create a date field type\n\tdateTimeMapping := bleve.NewDateTimeFieldMapping()\n\n\t\/\/ map out the wiki page\n\twikiMapping := bleve.NewDocumentMapping()\n\twikiMapping.AddFieldMappingsAt(\"title\", enTextFieldMapping)\n\twikiMapping.AddFieldMappingsAt(\"path\", enTextFieldMapping)\n\twikiMapping.AddFieldMappingsAt(\"body\", enTextFieldMapping)\n\twikiMapping.AddFieldMappingsAt(\"topic\", enTextFieldMapping)\n\twikiMapping.AddFieldMappingsAt(\"keyword\", enTextFieldMapping)\n\twikiMapping.AddFieldMappingsAt(\"author\", enTextFieldMapping)\n\twikiMapping.AddFieldMappingsAt(\"modified\", dateTimeMapping)\n\n\t\/\/ add the wiki page mapping to a new index\n\tindexMapping := bleve.NewIndexMapping()\n\tindexMapping.AddDocumentMapping(config.IndexName, wikiMapping)\n\tindexMapping.DefaultAnalyzer = config.IndexType\n\n\treturn indexMapping\n}\n\n\/\/ walks a given path, and runs processUpdate on each File\nfunc walkForIndexing(path, filePath, requestPath string, config IndexSection) {\n\t\/\/watcherLoop(path, filePath, requestPath, config)\n\tdirEntries, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor _, dirEntry := range dirEntries {\n\t\tdirEntryPath := path + dirEntry.Name()\n\t\tif dirEntry.IsDir() {\n\t\t\twalkForIndexing(dirEntryPath, filePath, requestPath, config)\n\t\t} else if strings.HasSuffix(dirEntry.Name(), config.WatchExtension) {\n\t\t\tprocessUpdate(dirEntryPath, getURIPath(dirEntryPath, filePath, requestPath), config)\n\t\t}\n\t}\n}\n\n\/\/ given all of the inputs, watches afor new\/deleted files in that directory\n\/\/ adds\/removes\/udpates index as necessary\nfunc watcherLoop(watchPath, filePrefix, uriPrefix string, config IndexSection) {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = watcher.Add(watchPath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tidleTimer := time.NewTimer(10 * time.Second)\n\tqueuedEvents := make([]fsnotify.Event, 0)\n\n\topenWatchers = append(openWatchers, *watcher)\n\n\tlog.Printf(\"watching '%s' for changes...\", watchPath)\n\n\tfor {\n\t\tselect {\n\t\tcase event := <-watcher.Events:\n\t\t\tqueuedEvents = append(queuedEvents, event)\n\t\t\tidleTimer.Reset(10 * time.Second)\n\t\tcase err := <-watcher.Errors:\n\t\t\tlog.Fatal(err)\n\t\tcase <-idleTimer.C:\n\t\t\tfor _, event := range queuedEvents {\n\t\t\t\tif strings.HasSuffix(event.Name, config.WatchExtension) {\n\t\t\t\t\tswitch event.Op {\n\t\t\t\t\tcase fsnotify.Remove, fsnotify.Rename:\n\t\t\t\t\t\t\/\/ delete the filePath\n\t\t\t\t\t\tprocessDelete(getURIPath(watchPath+event.Name, filePrefix, uriPrefix),\n\t\t\t\t\t\t\tconfig.IndexName)\n\t\t\t\t\tcase fsnotify.Create, fsnotify.Write:\n\t\t\t\t\t\t\/\/ update the filePath\n\t\t\t\t\t\tprocessUpdate(watchPath+event.Name,\n\t\t\t\t\t\t\tgetURIPath(watchPath+event.Name, filePrefix, uriPrefix), config)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\t\/\/ ignore\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tqueuedEvents = make([]fsnotify.Event, 0)\n\t\t\tidleTimer.Reset(10 * time.Second)\n\t\t}\n\t}\n}\n\n\/\/ Update the entry in the index to the output from a given file\nfunc processUpdate(filePath, uriPath string, config IndexSection) {\n\tpage, err := generateWikiFromFile(filePath, uriPath, config.Restricted)\n\tif err != nil {\n\t\tlog.Print(err)\n\t} else {\n\t\tindex, _ := bleve.Open(config.IndexPath)\n\t\tdefer index.Close()\n\t\tindex.Index(uriPath, page)\n\t\tlog.Printf(\"updated: %s as %s\", filePath, uriPath)\n\t}\n}\n\n\/\/ Deletes a given path from the wiki entry\nfunc processDelete(uriPath, indexPath string) {\n\tlog.Printf(\"delete: %s\", uriPath)\n\tindex, _ := bleve.Open(indexPath)\n\tdefer index.Close()\n\terr := index.Delete(uriPath)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n}\n\nfunc cleanupMarkdown(input []byte) string {\n\textensions := 0 | blackfriday.EXTENSION_ALERT_BOXES\n\trenderer := blackfridaytext.TextRenderer()\n\toutput := blackfriday.Markdown(input, renderer, extensions)\n\treturn string(output)\n}\n\nfunc generateWikiFromFile(filePath, uriPath string, restrictedTopics []string) (*indexedPage, error) {\n\tpdata := new(PageMetadata)\n\terr := pdata.LoadPage(filePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif pdata.MatchedTopic(restrictedTopics) == true {\n\t\treturn nil, errors.New(\"Hit a restricted page - \" + pdata.Title)\n\t}\n\n\ttopics, keywords, authors := pdata.ListMeta()\n\trv := indexedPage{\n\t\tTitle: pdata.Title,\n\t\tBody: cleanupMarkdown(pdata.Page),\n\t\tURIPath: uriPath,\n\t\tTopics: strings.Join(topics, \" \"),\n\t\tKeywords: strings.Join(keywords, \" \"),\n\t\tAuthors: strings.Join(authors, \" \"),\n\t\tModified: pdata.FileStats.ModTime(),\n\t}\n\n\treturn &rv, nil\n}\n\nfunc getURIPath(filePath, filePrefix, uriPrefix string) (uriPath string) {\n\turiPath = strings.TrimPrefix(filePath, filePrefix)\n\turiPath = uriPrefix + uriPath\n\treturn\n}\n\n\/\/ given a path to an index, and a name of field to check\n\/\/ lists all unique values for that field in index\nfunc ListField(indexPath, field string) ([]string, error) {\n\tquery := bleve.NewMatchAllQuery()\n\tsearchRequest := bleve.NewSearchRequest(query)\n\n\tfacet := bleve.NewFacetRequest(field, 1)\n\tsearchRequest.AddFacet(\"allValues\", facet)\n\tif err := query.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Open the index\n\tindex, err := bleve.Open(indexPath)\n\tdefer index.Close()\n\tif index == nil {\n\t\treturn nil, fmt.Errorf(\"no such index [%s]\", indexPath)\n\t} else if err != nil {\n\t\treturn nil, fmt.Errorf(\"problem opening index [%s] - %s\", indexPath, err)\n\t}\n\n\tsearchResults, err := index.Search(searchRequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresults := *new([]string)\n\tfor _, oneTerm := range searchResults.Facets[\"allValues\"].Terms {\n\t\tresults = append(results, oneTerm.Term)\n\t}\n\n\treturn results, nil\n}\n\ntype SearchResponse struct {\n\tTotalHits int\n\tMaxScore float64\n\tPageOffset int\n\tSearchTime time.Duration\n\tResults []SearchResponseResult\n}\n\ntype SearchResponseResult struct {\n\tTitle string\n\tURIPath string\n\tScore float64\n\tTopics []string\n\tKeywords []string\n\tAuthors []string\n\tBody string\n}\n\nfunc CreateResponseData(rawResults bleve.SearchResult, pageOffset int) (SearchResponse, error) {\n\tvar response SearchResponse\n\n\tresponse.TotalHits = int(rawResults.Total)\n\tresponse.MaxScore = float64(rawResults.MaxScore)\n\tresponse.PageOffset = pageOffset\n\tresponse.SearchTime = rawResults.Took\n\tfor _, hit := range rawResults.Hits {\n\t\tvar newHit SearchResponseResult\n\n\t\tnewHit.Score = hit.Score\n\n\t\tif str, ok := hit.Fields[\"title\"].(string); ok {\n\t\t\tnewHit.Title = str\n\t\t} else {\n\t\t\treturn response, errors.New(\"returned title was not a string\")\n\t\t}\n\n\t\tif str, ok := hit.Fields[\"path\"].(string); ok {\n\t\t\tnewHit.URIPath = str\n\t\t} else {\n\t\t\treturn response, errors.New(\"returned path was not a string\")\n\t\t}\n\n\t\tif str, ok := hit.Fields[\"body\"].(string); ok {\n\t\t\tnewHit.Body = str\n\t\t} else {\n\t\t\treturn response, errors.New(\"returned body was not a string\")\n\t\t}\n\n\t\tif str, ok := hit.Fields[\"topic\"].(string); ok {\n\t\t\tnewHit.Topics = strings.Split(str, \" \")\n\t\t} else {\n\t\t\treturn response, errors.New(\"returned topics were not a string\")\n\t\t}\n\n\t\tif str, ok := hit.Fields[\"keyword\"].(string); ok {\n\t\t\tnewHit.Keywords = strings.Split(str, \" \")\n\t\t} else {\n\t\t\treturn response, errors.New(\"returned keywords were not a string\")\n\t\t}\n\n\t\tif str, ok := hit.Fields[\"author\"].(string); ok {\n\t\t\tnewHit.Authors = strings.Split(str, \" \")\n\t\t}\n\n\t\tresponse.Results = append(response.Results, newHit)\n\t}\n\n\treturn response, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package blockstore\n\nimport (\n\t\"github.com\/ipfs\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/hashicorp\/golang-lru\"\n\tcontext \"github.com\/ipfs\/go-ipfs\/Godeps\/_workspace\/src\/golang.org\/x\/net\/context\"\n\t\"github.com\/ipfs\/go-ipfs\/blocks\"\n\tkey \"github.com\/ipfs\/go-ipfs\/blocks\/key\"\n)\n\n\/\/ WriteCached returns a blockstore that caches up to |size| unique writes (bs.Put).\nfunc WriteCached(bs Blockstore, size int) (*writecache, error) {\n\tc, err := lru.New(size)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &writecache{blockstore: bs, cache: c}, nil\n}\n\ntype writecache struct {\n\tcache *lru.Cache \/\/ pointer b\/c Cache contains a Mutex as value (complicates copying)\n\tblockstore Blockstore\n}\n\nfunc (w *writecache) DeleteBlock(k key.Key) error {\n\tw.cache.Remove(k)\n\treturn w.blockstore.DeleteBlock(k)\n}\n\nfunc (w *writecache) Has(k key.Key) (bool, error) {\n\tif _, ok := w.cache.Get(k); ok {\n\t\treturn true, nil\n\t}\n\treturn w.blockstore.Has(k)\n}\n\nfunc (w *writecache) Get(k key.Key) (*blocks.Block, error) {\n\treturn w.blockstore.Get(k)\n}\n\nfunc (w *writecache) Put(b *blocks.Block) error {\n\tif _, ok := w.cache.Get(b.Key()); ok {\n\t\treturn nil\n\t}\n\tw.cache.Add(b.Key(), struct{}{})\n\treturn w.blockstore.Put(b)\n}\n\nfunc (w *writecache) PutMany(bs []*blocks.Block) error {\n\tvar good []*blocks.Block\n\tfor _, b := range bs {\n\t\tif _, ok := w.cache.Get(b.Key()); !ok {\n\t\t\tgood = append(good, b)\n\t\t}\n\t}\n\treturn w.blockstore.PutMany(good)\n}\n\nfunc (w *writecache) AllKeysChan(ctx context.Context) (<-chan key.Key, error) {\n\treturn w.blockstore.AllKeysChan(ctx)\n}\n\nfunc (w *writecache) GCLock() func() {\n\treturn w.blockstore.(GCBlockstore).GCLock()\n}\n\nfunc (w *writecache) PinLock() func() {\n\treturn w.blockstore.(GCBlockstore).PinLock()\n}\n\nfunc (w *writecache) GCRequested() bool {\n\treturn w.blockstore.(GCBlockstore).GCRequested()\n}\n<commit_msg>Add log events when blocks are added\/removed from the blockstore<commit_after>package blockstore\n\nimport (\n\t\"github.com\/ipfs\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/hashicorp\/golang-lru\"\n\tcontext \"github.com\/ipfs\/go-ipfs\/Godeps\/_workspace\/src\/golang.org\/x\/net\/context\"\n\t\"github.com\/ipfs\/go-ipfs\/blocks\"\n\tkey \"github.com\/ipfs\/go-ipfs\/blocks\/key\"\n)\n\n\/\/ WriteCached returns a blockstore that caches up to |size| unique writes (bs.Put).\nfunc WriteCached(bs Blockstore, size int) (*writecache, error) {\n\tc, err := lru.New(size)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &writecache{blockstore: bs, cache: c}, nil\n}\n\ntype writecache struct {\n\tcache *lru.Cache \/\/ pointer b\/c Cache contains a Mutex as value (complicates copying)\n\tblockstore Blockstore\n}\n\nfunc (w *writecache) DeleteBlock(k key.Key) error {\n\tdefer log.EventBegin(context.TODO(), \"writecache.BlockRemoved\", &k).Done()\n\tw.cache.Remove(k)\n\treturn w.blockstore.DeleteBlock(k)\n}\n\nfunc (w *writecache) Has(k key.Key) (bool, error) {\n\tif _, ok := w.cache.Get(k); ok {\n\t\treturn true, nil\n\t}\n\treturn w.blockstore.Has(k)\n}\n\nfunc (w *writecache) Get(k key.Key) (*blocks.Block, error) {\n\treturn w.blockstore.Get(k)\n}\n\nfunc (w *writecache) Put(b *blocks.Block) error {\n\tk := b.Key()\n\tif _, ok := w.cache.Get(k); ok {\n\t\treturn nil\n\t}\n\tdefer log.EventBegin(context.TODO(), \"writecache.BlockAdded\", &k).Done()\n\n\tw.cache.Add(b.Key(), struct{}{})\n\treturn w.blockstore.Put(b)\n}\n\nfunc (w *writecache) PutMany(bs []*blocks.Block) error {\n\tvar good []*blocks.Block\n\tfor _, b := range bs {\n\t\tif _, ok := w.cache.Get(b.Key()); !ok {\n\t\t\tgood = append(good, b)\n\t\t\tk := b.Key()\n\t\t\tdefer log.EventBegin(context.TODO(), \"writecache.BlockAdded\", &k).Done()\n\t\t}\n\t}\n\treturn w.blockstore.PutMany(good)\n}\n\nfunc (w *writecache) AllKeysChan(ctx context.Context) (<-chan key.Key, error) {\n\treturn w.blockstore.AllKeysChan(ctx)\n}\n\nfunc (w *writecache) GCLock() func() {\n\treturn w.blockstore.(GCBlockstore).GCLock()\n}\n\nfunc (w *writecache) PinLock() func() {\n\treturn w.blockstore.(GCBlockstore).PinLock()\n}\n\nfunc (w *writecache) GCRequested() bool {\n\treturn w.blockstore.(GCBlockstore).GCRequested()\n}\n<|endoftext|>"} {"text":"<commit_before>package indexer\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\n\t\"github.com\/deepfabric\/bkdtree\"\n\t\"github.com\/deepfabric\/go-datastructures\"\n\t\"github.com\/deepfabric\/indexer\/cql\"\n\t\"github.com\/deepfabric\/pilosa\"\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\tLiveDocs string = \"__liveDocs\" \/\/ the directory where stores Index.liveDocs\n)\n\n\/\/Index is created by CqlCreate\ntype Index struct {\n\tMainDir string\n\tDocProt *cql.DocumentWithIdx \/\/document prototype. persisted to an index-specific file\n\n\trwlock sync.RWMutex \/\/concurrent access of bkds, frames, liveDocs\n\tbkds map[string]*bkdtree.BkdTree\n\tframes map[string]*Frame\n\tliveDocs *Frame \/\/row 0 of this frame stores a bitmap of live docIDs. other rows are not used.\n}\n\n\/\/ QueryResult is query result\ntype QueryResult struct {\n\trb *pilosa.Bitmap \/\/ used when no OrderBy given\n\toa *datastructures.OrderedArray \/\/ used when OrderBy given\n}\n\n\/\/ Merge merges other (keep unchagned) into qr\nfunc (qr *QueryResult) Merge(other *QueryResult) {\n\tqr.rb.Merge(other.rb)\n\tqr.oa.Merge(other.oa)\n}\n\n\/\/NewIndex creates index according to given conf, overwrites existing files.\nfunc NewIndex(docProt *cql.DocumentWithIdx, mainDir string, t0mCap, leafCap, intraCap int) (ind *Index, err error) {\n\tif err = indexWriteConf(mainDir, docProt); err != nil {\n\t\treturn\n\t}\n\t\/\/ ensure per-index sub-directory exists\n\tindDir := filepath.Join(mainDir, docProt.Index)\n\tif err = os.MkdirAll(indDir, 0700); err != nil {\n\t\treturn\n\t}\n\tind = &Index{\n\t\tMainDir: mainDir,\n\t\tDocProt: docProt,\n\t\tbkds: make(map[string]*bkdtree.BkdTree),\n\t\tframes: make(map[string]*Frame),\n\t}\n\tvar bkd *bkdtree.BkdTree\n\tfor _, uintProp := range docProt.UintProps {\n\t\tt0mCap := t0mCap\n\t\tLeafCap := leafCap\n\t\tIntraCap := intraCap\n\t\tnumDims := 1\n\t\tbytesPerDim := uintProp.ValLen\n\t\tdir := filepath.Join(indDir, uintProp.Name)\n\t\tprefix := uintProp.Name\n\t\tbkd, err = bkdtree.NewBkdTree(t0mCap, LeafCap, IntraCap, numDims, bytesPerDim, dir, prefix)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tind.bkds[uintProp.Name] = bkd\n\t}\n\tvar fm *Frame\n\tfor _, strProp := range docProt.StrProps {\n\t\tdir := filepath.Join(indDir, strProp.Name)\n\t\tif fm, err = NewFrame(dir, docProt.Index, strProp.Name, true); err != nil {\n\t\t\treturn\n\t\t}\n\t\tind.frames[strProp.Name] = fm\n\t}\n\tdir := filepath.Join(indDir, LiveDocs)\n\tif fm, err = NewFrame(dir, docProt.Index, LiveDocs, true); err != nil {\n\t\treturn\n\t}\n\tind.liveDocs = fm\n\treturn\n}\n\n\/\/indexWriteConf persists conf to given path.\nfunc indexWriteConf(mainDir string, docProt *cql.DocumentWithIdx) (err error) {\n\tif err = os.MkdirAll(mainDir, 0700); err != nil {\n\t\terr = errors.Wrap(err, \"\")\n\t\treturn\n\t}\n\tfp := filepath.Join(mainDir, fmt.Sprintf(\"index_%s.json\", docProt.Index))\n\terr = bkdtree.FileMarshal(fp, docProt)\n\treturn\n}\n\n\/\/indexReadConf parses conf\nfunc indexReadConf(mainDir string, name string, docProt *cql.DocumentWithIdx) (err error) {\n\tfp := filepath.Join(mainDir, fmt.Sprintf(\"index_%s.json\", name))\n\terr = bkdtree.FileUnmarshal(fp, docProt)\n\treturn\n}\n\n\/\/Destroy removes data and conf files on disk.\nfunc (ind *Index) Destroy() (err error) {\n\tind.rwlock.Lock()\n\tdefer ind.rwlock.Unlock()\n\tif ind.liveDocs != nil {\n\t\tfor _, bkd := range ind.bkds {\n\t\t\tif err = bkd.Destroy(); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tfor _, fm := range ind.frames {\n\t\t\tif err = fm.Destroy(); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err = ind.liveDocs.Destroy(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tind.bkds = nil\n\t\tind.frames = nil\n\t\tind.liveDocs = nil\n\t}\n\n\tpaths := make([]string, 0)\n\tfor _, uintProp := range ind.DocProt.UintProps {\n\t\tpaths = append(paths, filepath.Join(ind.MainDir, uintProp.Name))\n\t}\n\tfor _, strProp := range ind.DocProt.StrProps {\n\t\tpaths = append(paths, filepath.Join(ind.MainDir, strProp.Name))\n\t}\n\tpaths = append(paths, filepath.Join(ind.MainDir, LiveDocs))\n\tpaths = append(paths, filepath.Join(ind.MainDir, fmt.Sprintf(\"index_%s.json\", ind.DocProt.Index)))\n\tfor _, fp := range paths {\n\t\tif err = os.RemoveAll(fp); err != nil {\n\t\t\terr = errors.Wrap(err, \"\")\n\t\t}\n\t}\n\treturn\n}\n\n\/\/NewIndexExt create index according to existing files.\nfunc NewIndexExt(mainDir, name string) (ind *Index, err error) {\n\tdocProt := &cql.DocumentWithIdx{}\n\tif err = indexReadConf(mainDir, name, docProt); err != nil {\n\t\treturn\n\t}\n\tind = &Index{\n\t\tMainDir: mainDir,\n\t\tDocProt: docProt,\n\t}\n\terr = ind.Open()\n\treturn\n}\n\n\/\/Open opens existing index. Assumes MainDir and DocProt is already populated.\nfunc (ind *Index) Open() (err error) {\n\tind.rwlock.Lock()\n\tdefer ind.rwlock.Unlock()\n\tif ind.liveDocs != nil {\n\t\t\/\/index is already open\n\t\treturn\n\t}\n\tindDir := filepath.Join(ind.MainDir, ind.DocProt.Index)\n\tind.bkds = make(map[string]*bkdtree.BkdTree)\n\tvar bkd *bkdtree.BkdTree\n\tfor _, uintProp := range ind.DocProt.UintProps {\n\t\tdir := filepath.Join(indDir, uintProp.Name)\n\t\tif bkd, err = bkdtree.NewBkdTreeExt(dir, uintProp.Name); err != nil {\n\t\t\treturn\n\t\t}\n\t\tind.bkds[uintProp.Name] = bkd\n\t}\n\tind.frames = make(map[string]*Frame)\n\tvar fm *Frame\n\tfor _, strProp := range ind.DocProt.StrProps {\n\t\tdir := filepath.Join(indDir, strProp.Name)\n\t\tif fm, err = NewFrame(dir, ind.DocProt.Index, strProp.Name, false); err != nil {\n\t\t\treturn\n\t\t}\n\t\tind.frames[strProp.Name] = fm\n\t}\n\tdir := filepath.Join(indDir, LiveDocs)\n\tif fm, err = NewFrame(dir, ind.DocProt.Index, LiveDocs, false); err != nil {\n\t\treturn\n\t}\n\tind.liveDocs = fm\n\treturn\n}\n\n\/\/Close closes index\nfunc (ind *Index) Close() (err error) {\n\tind.rwlock.Lock()\n\tdefer ind.rwlock.Unlock()\n\tif ind.liveDocs == nil {\n\t\t\/\/index is already closed\n\t\treturn\n\t}\n\tfor _, bkd := range ind.bkds {\n\t\tif err = bkd.Close(); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tfor _, fm := range ind.frames {\n\t\tif err = fm.Close(); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif err = ind.liveDocs.Close(); err != nil {\n\t\treturn\n\t}\n\tind.bkds = nil\n\tind.frames = nil\n\tind.liveDocs = nil\n\treturn\n}\n\n\/\/Insert executes CqlInsert\nfunc (ind *Index) Insert(doc *cql.DocumentWithIdx) (err error) {\n\tvar bkd *bkdtree.BkdTree\n\tvar fm *Frame\n\tvar changed, ok bool\n\tind.rwlock.RLock()\n\tdefer ind.rwlock.RUnlock()\n\t\/\/check if doc.DocID is already there before insertion.\n\tif changed, err = ind.liveDocs.setBit(0, doc.DocID); err != nil {\n\t\treturn\n\t} else if !changed {\n\t\terr = errors.Errorf(\"document %v is alaredy there before insertion\", doc.DocID)\n\t\treturn\n\t}\n\tfor _, uintProp := range doc.UintProps {\n\t\tif bkd, ok = ind.bkds[uintProp.Name]; !ok {\n\t\t\terr = errors.Errorf(\"property %v is missing at index spec, document %v, index spec %v\", uintProp.Name, doc, ind.DocProt)\n\t\t\treturn\n\t\t}\n\t\tp := bkdtree.Point{\n\t\t\tVals: []uint64{uintProp.Val},\n\t\t\tUserData: doc.DocID,\n\t\t}\n\t\tif err = bkd.Insert(p); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tfor _, strProp := range doc.StrProps {\n\t\tif fm, ok = ind.frames[strProp.Name]; !ok {\n\t\t\terr = errors.Errorf(\"property %v is missing at index spec, document %v, index spec %v\", strProp.Name, doc, ind.DocProt)\n\t\t\treturn\n\t\t}\n\t\tif err = fm.ParseAndIndex(doc.DocID, strProp.Val); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n\/\/Del executes CqlDel. Do mark-deletion only. The caller shall rebuild index in order to recycle disk space.\nfunc (ind *Index) Del(docID uint64) (found bool, err error) {\n\tvar changed bool\n\tind.rwlock.RLock()\n\tdefer ind.rwlock.RUnlock()\n\tif changed, err = ind.liveDocs.clearBit(0, docID); err != nil {\n\t\treturn\n\t} else if !changed {\n\t\terr = errors.Errorf(\"document %v does not exist before deletion\", docID)\n\t\treturn\n\t}\n\tfound = true\n\treturn\n}\n\n\/\/Select executes CqlSelect.\nfunc (ind *Index) Select(q *cql.CqlSelect) (qr *QueryResult, err error) {\n\tvar oa *datastructures.OrderedArray\n\tif q.OrderBy != \"\" {\n\t\tif oa, err = datastructures.NewOrderedArray(q.Limit); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tqr = &QueryResult{\n\t\trb: pilosa.NewBitmap(),\n\t\toa: oa,\n\t}\n\tvar fm *Frame\n\tvar bkd *bkdtree.BkdTree\n\tvar ok bool\n\tvar prevDocs, docs *pilosa.Bitmap\n\n\tind.rwlock.RLock()\n\tdefer ind.rwlock.RUnlock()\n\tprevDocs = ind.liveDocs.row(0)\n\tif prevDocs.Count() == 0 {\n\t\treturn\n\t}\n\tif len(q.StrPreds) != 0 {\n\t\tfor _, strPred := range q.StrPreds {\n\t\t\tif fm, ok = ind.frames[strPred.Name]; !ok {\n\t\t\t\terr = errors.Errorf(\"property %s not found in index spec\", strPred.Name)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdocs = fm.Query(strPred.ContWord)\n\t\t\tprevDocs = prevDocs.Intersect(docs)\n\t\t\tif prevDocs.Count() == 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(q.UintPreds) == 0 {\n\t\tqr.rb = prevDocs\n\t\treturn\n\t}\n\n\tvisitor := &bkdVisitor{\n\t\tprevDocs: prevDocs,\n\t\tdocs: pilosa.NewBitmap(),\n\t\toa: qr.oa,\n\t}\n\n\tfor _, uintPred := range q.UintPreds {\n\t\tif q.OrderBy == uintPred.Name {\n\t\t\tcontinue\n\t\t}\n\t\tif bkd, ok = ind.bkds[uintPred.Name]; !ok {\n\t\t\terr = errors.Errorf(\"property %s not found in index spec\", uintPred.Name)\n\t\t\treturn\n\t\t}\n\t\tvisitor.lowPoint = bkdtree.Point{\n\t\t\tVals: []uint64{uintPred.Low},\n\t\t}\n\t\tvisitor.highPoint = bkdtree.Point{\n\t\t\tVals: []uint64{uintPred.High},\n\t\t}\n\t\tif err = bkd.Intersect(visitor); err != nil {\n\t\t\treturn\n\t\t}\n\t\tvisitor.prevDocs = visitor.docs\n\t\tif visitor.prevDocs.Count() == 0 {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif q.OrderBy == \"\" {\n\t\tqr.rb = visitor.prevDocs\n\t} else {\n\t\tvar uintPred cql.UintPred\n\t\tif uintPred, ok = q.UintPreds[q.OrderBy]; !ok {\n\t\t\terr = errors.Errorf(\"invalid ORDERBY %s\", q.OrderBy)\n\t\t\treturn\n\t\t}\n\t\tbkd, ok = ind.bkds[uintPred.Name]\n\t\tif !ok {\n\t\t\terr = errors.Errorf(\"property %s not found in index spec\", uintPred.Name)\n\t\t\treturn\n\t\t}\n\t\tvisitor.lowPoint = bkdtree.Point{\n\t\t\tVals: []uint64{uintPred.Low},\n\t\t}\n\t\tvisitor.highPoint = bkdtree.Point{\n\t\t\tVals: []uint64{uintPred.High},\n\t\t}\n\t\tvisitor.orderBy = true\n\t\tif err = bkd.Intersect(visitor); err != nil {\n\t\t\treturn\n\t\t}\n\t\tqr.oa = visitor.oa\n\t}\n\n\treturn\n}\n\ntype bkdVisitor struct {\n\tlowPoint bkdtree.Point\n\thighPoint bkdtree.Point\n\tprevDocs *pilosa.Bitmap\n\tdocs *pilosa.Bitmap\n\torderBy bool\n\toa *datastructures.OrderedArray\n}\n\nfunc (v *bkdVisitor) GetLowPoint() bkdtree.Point { return v.lowPoint }\n\nfunc (v *bkdVisitor) GetHighPoint() bkdtree.Point { return v.highPoint }\n\nfunc (v *bkdVisitor) VisitPoint(point bkdtree.Point) {\n\tdocID := point.UserData\n\tif v.prevDocs == nil || v.prevDocs.Contains(docID) {\n\t\tif !v.orderBy {\n\t\t\tv.docs.SetBit(docID)\n\t\t} else {\n\t\t\tv.oa.Put(point)\n\t\t}\n\t}\n}\n<commit_msg>added NewQueryResult(limit int)<commit_after>package indexer\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\n\t\"github.com\/deepfabric\/bkdtree\"\n\t\"github.com\/deepfabric\/go-datastructures\"\n\t\"github.com\/deepfabric\/indexer\/cql\"\n\t\"github.com\/deepfabric\/pilosa\"\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\tLiveDocs string = \"__liveDocs\" \/\/ the directory where stores Index.liveDocs\n)\n\n\/\/Index is created by CqlCreate\ntype Index struct {\n\tMainDir string\n\tDocProt *cql.DocumentWithIdx \/\/document prototype. persisted to an index-specific file\n\n\trwlock sync.RWMutex \/\/concurrent access of bkds, frames, liveDocs\n\tbkds map[string]*bkdtree.BkdTree\n\tframes map[string]*Frame\n\tliveDocs *Frame \/\/row 0 of this frame stores a bitmap of live docIDs. other rows are not used.\n}\n\n\/\/ QueryResult is query result\ntype QueryResult struct {\n\trb *pilosa.Bitmap \/\/ used when no OrderBy given\n\toa *datastructures.OrderedArray \/\/ used when OrderBy given\n}\n\n\/\/ Merge merges other (keep unchagned) into qr\nfunc (qr *QueryResult) Merge(other *QueryResult) {\n\tqr.rb.Merge(other.rb)\n\tqr.oa.Merge(other.oa)\n}\n\n\/\/ NewQueryResult creates an empty QueryResult\nfunc NewQueryResult(limit int) (qr *QueryResult) {\n\tvar oa *datastructures.OrderedArray\n\tvar err error\n\tif limit > 0 {\n\t\tif oa, err = datastructures.NewOrderedArray(limit); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"NewOrderedArray failed with error %+v\", err))\n\t\t}\n\t}\n\tqr = &QueryResult{\n\t\trb: pilosa.NewBitmap(),\n\t\toa: oa,\n\t}\n\treturn\n}\n\n\/\/NewIndex creates index according to given conf, overwrites existing files.\nfunc NewIndex(docProt *cql.DocumentWithIdx, mainDir string, t0mCap, leafCap, intraCap int) (ind *Index, err error) {\n\tif err = indexWriteConf(mainDir, docProt); err != nil {\n\t\treturn\n\t}\n\t\/\/ ensure per-index sub-directory exists\n\tindDir := filepath.Join(mainDir, docProt.Index)\n\tif err = os.MkdirAll(indDir, 0700); err != nil {\n\t\treturn\n\t}\n\tind = &Index{\n\t\tMainDir: mainDir,\n\t\tDocProt: docProt,\n\t\tbkds: make(map[string]*bkdtree.BkdTree),\n\t\tframes: make(map[string]*Frame),\n\t}\n\tvar bkd *bkdtree.BkdTree\n\tfor _, uintProp := range docProt.UintProps {\n\t\tt0mCap := t0mCap\n\t\tLeafCap := leafCap\n\t\tIntraCap := intraCap\n\t\tnumDims := 1\n\t\tbytesPerDim := uintProp.ValLen\n\t\tdir := filepath.Join(indDir, uintProp.Name)\n\t\tprefix := uintProp.Name\n\t\tbkd, err = bkdtree.NewBkdTree(t0mCap, LeafCap, IntraCap, numDims, bytesPerDim, dir, prefix)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tind.bkds[uintProp.Name] = bkd\n\t}\n\tvar fm *Frame\n\tfor _, strProp := range docProt.StrProps {\n\t\tdir := filepath.Join(indDir, strProp.Name)\n\t\tif fm, err = NewFrame(dir, docProt.Index, strProp.Name, true); err != nil {\n\t\t\treturn\n\t\t}\n\t\tind.frames[strProp.Name] = fm\n\t}\n\tdir := filepath.Join(indDir, LiveDocs)\n\tif fm, err = NewFrame(dir, docProt.Index, LiveDocs, true); err != nil {\n\t\treturn\n\t}\n\tind.liveDocs = fm\n\treturn\n}\n\n\/\/indexWriteConf persists conf to given path.\nfunc indexWriteConf(mainDir string, docProt *cql.DocumentWithIdx) (err error) {\n\tif err = os.MkdirAll(mainDir, 0700); err != nil {\n\t\terr = errors.Wrap(err, \"\")\n\t\treturn\n\t}\n\tfp := filepath.Join(mainDir, fmt.Sprintf(\"index_%s.json\", docProt.Index))\n\terr = bkdtree.FileMarshal(fp, docProt)\n\treturn\n}\n\n\/\/indexReadConf parses conf\nfunc indexReadConf(mainDir string, name string, docProt *cql.DocumentWithIdx) (err error) {\n\tfp := filepath.Join(mainDir, fmt.Sprintf(\"index_%s.json\", name))\n\terr = bkdtree.FileUnmarshal(fp, docProt)\n\treturn\n}\n\n\/\/Destroy removes data and conf files on disk.\nfunc (ind *Index) Destroy() (err error) {\n\tind.rwlock.Lock()\n\tdefer ind.rwlock.Unlock()\n\tif ind.liveDocs != nil {\n\t\tfor _, bkd := range ind.bkds {\n\t\t\tif err = bkd.Destroy(); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tfor _, fm := range ind.frames {\n\t\t\tif err = fm.Destroy(); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err = ind.liveDocs.Destroy(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tind.bkds = nil\n\t\tind.frames = nil\n\t\tind.liveDocs = nil\n\t}\n\n\tpaths := make([]string, 0)\n\tfor _, uintProp := range ind.DocProt.UintProps {\n\t\tpaths = append(paths, filepath.Join(ind.MainDir, uintProp.Name))\n\t}\n\tfor _, strProp := range ind.DocProt.StrProps {\n\t\tpaths = append(paths, filepath.Join(ind.MainDir, strProp.Name))\n\t}\n\tpaths = append(paths, filepath.Join(ind.MainDir, LiveDocs))\n\tpaths = append(paths, filepath.Join(ind.MainDir, fmt.Sprintf(\"index_%s.json\", ind.DocProt.Index)))\n\tfor _, fp := range paths {\n\t\tif err = os.RemoveAll(fp); err != nil {\n\t\t\terr = errors.Wrap(err, \"\")\n\t\t}\n\t}\n\treturn\n}\n\n\/\/NewIndexExt create index according to existing files.\nfunc NewIndexExt(mainDir, name string) (ind *Index, err error) {\n\tdocProt := &cql.DocumentWithIdx{}\n\tif err = indexReadConf(mainDir, name, docProt); err != nil {\n\t\treturn\n\t}\n\tind = &Index{\n\t\tMainDir: mainDir,\n\t\tDocProt: docProt,\n\t}\n\terr = ind.Open()\n\treturn\n}\n\n\/\/Open opens existing index. Assumes MainDir and DocProt is already populated.\nfunc (ind *Index) Open() (err error) {\n\tind.rwlock.Lock()\n\tdefer ind.rwlock.Unlock()\n\tif ind.liveDocs != nil {\n\t\t\/\/index is already open\n\t\treturn\n\t}\n\tindDir := filepath.Join(ind.MainDir, ind.DocProt.Index)\n\tind.bkds = make(map[string]*bkdtree.BkdTree)\n\tvar bkd *bkdtree.BkdTree\n\tfor _, uintProp := range ind.DocProt.UintProps {\n\t\tdir := filepath.Join(indDir, uintProp.Name)\n\t\tif bkd, err = bkdtree.NewBkdTreeExt(dir, uintProp.Name); err != nil {\n\t\t\treturn\n\t\t}\n\t\tind.bkds[uintProp.Name] = bkd\n\t}\n\tind.frames = make(map[string]*Frame)\n\tvar fm *Frame\n\tfor _, strProp := range ind.DocProt.StrProps {\n\t\tdir := filepath.Join(indDir, strProp.Name)\n\t\tif fm, err = NewFrame(dir, ind.DocProt.Index, strProp.Name, false); err != nil {\n\t\t\treturn\n\t\t}\n\t\tind.frames[strProp.Name] = fm\n\t}\n\tdir := filepath.Join(indDir, LiveDocs)\n\tif fm, err = NewFrame(dir, ind.DocProt.Index, LiveDocs, false); err != nil {\n\t\treturn\n\t}\n\tind.liveDocs = fm\n\treturn\n}\n\n\/\/Close closes index\nfunc (ind *Index) Close() (err error) {\n\tind.rwlock.Lock()\n\tdefer ind.rwlock.Unlock()\n\tif ind.liveDocs == nil {\n\t\t\/\/index is already closed\n\t\treturn\n\t}\n\tfor _, bkd := range ind.bkds {\n\t\tif err = bkd.Close(); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tfor _, fm := range ind.frames {\n\t\tif err = fm.Close(); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif err = ind.liveDocs.Close(); err != nil {\n\t\treturn\n\t}\n\tind.bkds = nil\n\tind.frames = nil\n\tind.liveDocs = nil\n\treturn\n}\n\n\/\/Insert executes CqlInsert\nfunc (ind *Index) Insert(doc *cql.DocumentWithIdx) (err error) {\n\tvar bkd *bkdtree.BkdTree\n\tvar fm *Frame\n\tvar changed, ok bool\n\tind.rwlock.RLock()\n\tdefer ind.rwlock.RUnlock()\n\t\/\/check if doc.DocID is already there before insertion.\n\tif changed, err = ind.liveDocs.setBit(0, doc.DocID); err != nil {\n\t\treturn\n\t} else if !changed {\n\t\terr = errors.Errorf(\"document %v is alaredy there before insertion\", doc.DocID)\n\t\treturn\n\t}\n\tfor _, uintProp := range doc.UintProps {\n\t\tif bkd, ok = ind.bkds[uintProp.Name]; !ok {\n\t\t\terr = errors.Errorf(\"property %v is missing at index spec, document %v, index spec %v\", uintProp.Name, doc, ind.DocProt)\n\t\t\treturn\n\t\t}\n\t\tp := bkdtree.Point{\n\t\t\tVals: []uint64{uintProp.Val},\n\t\t\tUserData: doc.DocID,\n\t\t}\n\t\tif err = bkd.Insert(p); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tfor _, strProp := range doc.StrProps {\n\t\tif fm, ok = ind.frames[strProp.Name]; !ok {\n\t\t\terr = errors.Errorf(\"property %v is missing at index spec, document %v, index spec %v\", strProp.Name, doc, ind.DocProt)\n\t\t\treturn\n\t\t}\n\t\tif err = fm.ParseAndIndex(doc.DocID, strProp.Val); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n\/\/Del executes CqlDel. Do mark-deletion only. The caller shall rebuild index in order to recycle disk space.\nfunc (ind *Index) Del(docID uint64) (found bool, err error) {\n\tvar changed bool\n\tind.rwlock.RLock()\n\tdefer ind.rwlock.RUnlock()\n\tif changed, err = ind.liveDocs.clearBit(0, docID); err != nil {\n\t\treturn\n\t} else if !changed {\n\t\terr = errors.Errorf(\"document %v does not exist before deletion\", docID)\n\t\treturn\n\t}\n\tfound = true\n\treturn\n}\n\n\/\/Select executes CqlSelect.\nfunc (ind *Index) Select(q *cql.CqlSelect) (qr *QueryResult, err error) {\n\tvar oa *datastructures.OrderedArray\n\tif q.OrderBy != \"\" {\n\t\tif oa, err = datastructures.NewOrderedArray(q.Limit); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tqr = &QueryResult{\n\t\trb: pilosa.NewBitmap(),\n\t\toa: oa,\n\t}\n\tvar fm *Frame\n\tvar bkd *bkdtree.BkdTree\n\tvar ok bool\n\tvar prevDocs, docs *pilosa.Bitmap\n\n\tind.rwlock.RLock()\n\tdefer ind.rwlock.RUnlock()\n\tprevDocs = ind.liveDocs.row(0)\n\tif prevDocs.Count() == 0 {\n\t\treturn\n\t}\n\tif len(q.StrPreds) != 0 {\n\t\tfor _, strPred := range q.StrPreds {\n\t\t\tif fm, ok = ind.frames[strPred.Name]; !ok {\n\t\t\t\terr = errors.Errorf(\"property %s not found in index spec\", strPred.Name)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdocs = fm.Query(strPred.ContWord)\n\t\t\tprevDocs = prevDocs.Intersect(docs)\n\t\t\tif prevDocs.Count() == 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(q.UintPreds) == 0 {\n\t\tqr.rb = prevDocs\n\t\treturn\n\t}\n\n\tvisitor := &bkdVisitor{\n\t\tprevDocs: prevDocs,\n\t\tdocs: pilosa.NewBitmap(),\n\t\toa: qr.oa,\n\t}\n\n\tfor _, uintPred := range q.UintPreds {\n\t\tif q.OrderBy == uintPred.Name {\n\t\t\tcontinue\n\t\t}\n\t\tif bkd, ok = ind.bkds[uintPred.Name]; !ok {\n\t\t\terr = errors.Errorf(\"property %s not found in index spec\", uintPred.Name)\n\t\t\treturn\n\t\t}\n\t\tvisitor.lowPoint = bkdtree.Point{\n\t\t\tVals: []uint64{uintPred.Low},\n\t\t}\n\t\tvisitor.highPoint = bkdtree.Point{\n\t\t\tVals: []uint64{uintPred.High},\n\t\t}\n\t\tif err = bkd.Intersect(visitor); err != nil {\n\t\t\treturn\n\t\t}\n\t\tvisitor.prevDocs = visitor.docs\n\t\tif visitor.prevDocs.Count() == 0 {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif q.OrderBy == \"\" {\n\t\tqr.rb = visitor.prevDocs\n\t} else {\n\t\tvar uintPred cql.UintPred\n\t\tif uintPred, ok = q.UintPreds[q.OrderBy]; !ok {\n\t\t\terr = errors.Errorf(\"invalid ORDERBY %s\", q.OrderBy)\n\t\t\treturn\n\t\t}\n\t\tbkd, ok = ind.bkds[uintPred.Name]\n\t\tif !ok {\n\t\t\terr = errors.Errorf(\"property %s not found in index spec\", uintPred.Name)\n\t\t\treturn\n\t\t}\n\t\tvisitor.lowPoint = bkdtree.Point{\n\t\t\tVals: []uint64{uintPred.Low},\n\t\t}\n\t\tvisitor.highPoint = bkdtree.Point{\n\t\t\tVals: []uint64{uintPred.High},\n\t\t}\n\t\tvisitor.orderBy = true\n\t\tif err = bkd.Intersect(visitor); err != nil {\n\t\t\treturn\n\t\t}\n\t\tqr.oa = visitor.oa\n\t}\n\n\treturn\n}\n\ntype bkdVisitor struct {\n\tlowPoint bkdtree.Point\n\thighPoint bkdtree.Point\n\tprevDocs *pilosa.Bitmap\n\tdocs *pilosa.Bitmap\n\torderBy bool\n\toa *datastructures.OrderedArray\n}\n\nfunc (v *bkdVisitor) GetLowPoint() bkdtree.Point { return v.lowPoint }\n\nfunc (v *bkdVisitor) GetHighPoint() bkdtree.Point { return v.highPoint }\n\nfunc (v *bkdVisitor) VisitPoint(point bkdtree.Point) {\n\tdocID := point.UserData\n\tif v.prevDocs == nil || v.prevDocs.Contains(docID) {\n\t\tif !v.orderBy {\n\t\t\tv.docs.SetBit(docID)\n\t\t} else {\n\t\t\tv.oa.Put(point)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gosort\n\nimport (\n \"math\/rand\"\n \"time\"\n)\n\n\/\/ The private initialisation function for this source file\nfunc init() {\n \/\/ Seed the RNG ready for use for randomly selecting pivots\n rand.Seed(time.Now().UTC().UnixNano())\n}\n\n\/*\n ChoosePivot defines the signature for a function that given a slice returns\n an index that can be used a pivot in the partitioning stage of QuickSort\n algorithm.\n *\/\ntype ChoosePivot func(a []int) int\n\n\/*\n QuickSort sorts an array of integers, in place, using the QuickSort\n algorithm.\n This implementation uses random pivot selection.\n *\/\nfunc QuickSort(a []int) {\n QuickSortWithPivotChoice(a, ChooseRandomElementPivot)\n}\n\n\/*\n QuickSortWithPivotChoice sorts an array of integers, in place, using the\n QuickSort algorithm, but also allows calling code to select how pivots\n are chosen. Pass one of the Choose<strategy>Pivot functions as the\n second argument to specify a pivot selection strategy.\n *\/\nfunc QuickSortWithPivotChoice(a []int, choosePivot ChoosePivot) {\n if len(a) <= 1 {\n return\n }\n\n pivot := choosePivot(a)\n pivot = partition(a, pivot)\n QuickSortWithPivotChoice(a[:pivot], choosePivot)\n QuickSortWithPivotChoice(a[pivot+1:], choosePivot)\n}\n\n\/\/ A pivot selection strategy: choose the first element\nfunc ChooseFirstElementPivot(a []int) int {\n return 0\n}\n\n\/\/ A pivot selection strategy: choose the last element\nfunc ChooseLastElementPivot(a []int) int {\n return len(a) - 1\n}\n\n\/\/ A pivot selection strategy: choose the middle element\nfunc ChooseMiddleElementPivot(a []int) int {\n return (len(a) - 1) \/ 2\n}\n\n\/\/ A pivot selection strategy: choose a random element\nfunc ChooseRandomElementPivot(a []int) int {\n return rand.Intn(len(a))\n}\n\n\/*\n A pivot selection strategy: try to choose a median-ish element.\n \n First selects the first, middle and last elements, then chooses\n the median of these three values and returns its index.\n *\/\nfunc ChooseMedianElementPivot(a []int) int {\n first := 0\n middle := (len(a) - 1) \/ 2\n last := (len(a) - 1)\n\n median := last\n\n if ((a[first] - a[middle]) * (a[last] - a[first])) >= 0 {\n median = first\n } else if ((a[middle] - a[first]) * (a[last] - a[middle])) >= 0 {\n median = middle\n }\n\n return median\n}\n\nfunc partition(a []int, pivot int) int {\n \/\/ If the pivot isn't the first element, swap it so it is\n if pivot > 0 {\n a[0], a[pivot] = a[pivot], a[0]\n }\n\n p := a[0]\n i := 1\n for j := i; j < len(a); j++ {\n if a[j] < p {\n a[i], a[j] = a[j], a[i]\n i++\n }\n }\n\n \/\/ Put the pivot element into position\n a[0], a[i-1] = a[i-1], a[0]\n\n return i - 1\n}\n<commit_msg>Clarify the intent of the pivot selection strategies<commit_after>package gosort\n\nimport (\n \"math\/rand\"\n \"time\"\n)\n\n\/\/ The private initialisation function for this source file\nfunc init() {\n \/\/ Seed the RNG ready for use for randomly selecting pivots\n rand.Seed(time.Now().UTC().UnixNano())\n}\n\n\/*\n ChoosePivot defines the signature for a function that given a slice returns\n an index that can be used a pivot in the partitioning stage of QuickSort\n algorithm.\n *\/\ntype ChoosePivot func(a []int) int\n\n\/*\n QuickSort sorts an array of integers, in place, using the QuickSort\n algorithm.\n This implementation uses random pivot selection.\n *\/\nfunc QuickSort(a []int) {\n QuickSortWithPivotChoice(a, ChooseRandomElementPivot)\n}\n\n\/*\n QuickSortWithPivotChoice sorts an array of integers, in place, using the\n QuickSort algorithm, but also allows calling code to select how pivots\n are chosen. Pass one of the Choose<strategy>Pivot functions as the\n second argument to specify A QuickSort pivot selection strategy.\n *\/\nfunc QuickSortWithPivotChoice(a []int, choosePivot ChoosePivot) {\n if len(a) <= 1 {\n return\n }\n\n pivot := choosePivot(a)\n pivot = partition(a, pivot)\n QuickSortWithPivotChoice(a[:pivot], choosePivot)\n QuickSortWithPivotChoice(a[pivot+1:], choosePivot)\n}\n\n\/\/ A QuickSort pivot selection strategy: choose the first element. Use with\n\/\/ QuickSortWithPivotChoice.\nfunc ChooseFirstElementPivot(a []int) int {\n return 0\n}\n\n\/\/ A QuickSort pivot selection strategy: choose the last element. Use with\n\/\/ QuickSortWithPivotChoice.\nfunc ChooseLastElementPivot(a []int) int {\n return len(a) - 1\n}\n\n\/\/ A QuickSort pivot selection strategy: choose the middle element. Use with\n\/\/ QuickSortWithPivotChoice.\nfunc ChooseMiddleElementPivot(a []int) int {\n return (len(a) - 1) \/ 2\n}\n\n\/\/ A QuickSort pivot selection strategy: choose a random element. Use with\n\/\/ QuickSortWithPivotChoice.\nfunc ChooseRandomElementPivot(a []int) int {\n return rand.Intn(len(a))\n}\n\n\/*\n A QuickSort pivot selection strategy: try to choose a median-ish element.\n Use with QuickSortWithPivotChoice.\n \n First selects the first, middle and last elements, then chooses\n the median of these three values and returns its index.\n *\/\nfunc ChooseMedianElementPivot(a []int) int {\n first := 0\n middle := (len(a) - 1) \/ 2\n last := (len(a) - 1)\n\n median := last\n\n if ((a[first] - a[middle]) * (a[last] - a[first])) >= 0 {\n median = first\n } else if ((a[middle] - a[first]) * (a[last] - a[middle])) >= 0 {\n median = middle\n }\n\n return median\n}\n\nfunc partition(a []int, pivot int) int {\n \/\/ If the pivot isn't the first element, swap it so it is\n if pivot > 0 {\n a[0], a[pivot] = a[pivot], a[0]\n }\n\n p := a[0]\n i := 1\n for j := i; j < len(a); j++ {\n if a[j] < p {\n a[i], a[j] = a[j], a[i]\n i++\n }\n }\n\n \/\/ Put the pivot element into position\n a[0], a[i-1] = a[i-1], a[0]\n\n return i - 1\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Andreas Koch. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage themefiles\n\nconst AutoupdateJs = `\n\nvar Autoupdate = (function () {\n function Autoupdate() { }\n\n var self = this;\n\n \/**\n * Get the currently opened web route\n * @return string The currently opened web route (e.g. \"\/documents\/Sample Document\/index.html\")\n *\/\n var getCurrentRoute = function() {\n var url = document.location.pathname;\n return url.replace(\/^\\\/+\/, \"\");\n };\n\n \/**\n * Get the Url for the web socket connection\n * @return string The url for the web socket connection (e.g. \"ws:\/\/example.com:8080\/ws\")\n *\/\n var getWebSocketUrl = function() {\n routeParameter = \"route=\" + getCurrentRoute();\n host = document.location.host;\n webSocketHandler = \"\/ws\";\n websocketUrl = \"ws:\/\/\" + host + webSocketHandler + \"?\" + routeParameter;\n return websocketUrl; \n };\n\n \/**\n * Execute all on change callbacks\n *\/\n var executeOnChangeCallbacks = function() {\n if (typeof(self.changeCallbacks) !== 'object') {\n return;\n }\n\n for (var callbackName in self.changeCallbacks) {\n console.log(\"Executing on change callback: \" + callbackName);\n self.changeCallbacks[callbackName]();\n }\n };\n\n \/**\n * Connect to the server\n * @param string webSocketUrl The url of the web-socket to connect to\n *\/\n var connect = function(webSocketUrl) {\n var reconnectionTimeInSeconds = 3;\n var connection = new WebSocket(webSocketUrl);\n\n connection.onclose = function(evt) {\n console.log(\"Connection closed. Trying to reconnect in \" + reconnectionTimeInSeconds + \" seconds.\");\n\n setTimeout(function() {\n\n console.log(\"Reconnecting\");\n connect(webSocketUrl);\n\n }, (reconnectionTimeInSeconds * 1000));\n };\n\n connection.onopen = function() {\n console.log(\"Connection established.\")\n };\n\n connection.onmessage = function(evt) {\n\n \/\/ validate event data\n if (typeof(evt) !== 'object' || typeof(evt.data) !== 'string') {\n console.log(\"Invalid data from server.\");\n return;\n }\n\n \/\/ unwrap the message\n message = JSON.parse(evt.data);\n\n \/\/ check if all required fields are present\n if (message === null || typeof(message) !== 'object' || typeof(message.route) !== 'string' || message.model === null || typeof(message.model) !== 'object') {\n console.log(\"Invalid response format.\", message);\n return;\n }\n\n \/\/ check if update is applicable for the current route\n if (message.route !== getCurrentRoute()) {\n return;\n }\n\n \/\/ check the model structure\n var model = message.model;\n if (typeof(model.content) !== 'string' || typeof(model.description) !== 'string' || typeof(model.title) !== 'string') {\n console.log(\"Cannot update the view with the given model object. Missing some required fields.\", model);\n return;\n }\n\n \/\/ update the title\n $('title').html(model.title);\n $('.title').html(model.title);\n\n \/\/ update the description\n $('.description').html(model.description);\n\n \/\/ update the content\n $('.content').html(model.content);\n\n \/\/ update childs (if available)\n if (model.childs === null || typeof(model.childs) !== 'object') {\n\n \/\/ execute the on change callbacks\n executeOnChangeCallbacks();\n\n return;\n }\n\n \/**\n * Update an existing item list entry\n * @param Element entryToUpdate The node which shall be updated\n * @param Object model The model containing values to use for the update\n *\/\n var updateEntry = function(entryToUpdate, model) {\n \/\/ update the title text\n $(entryToUpdate).find(\".child-link:first\").html(model.title);\n\n \/\/ update the title link\n $(entryToUpdate).find(\".child-link:first\").attr(\"href\", model.relativeRoute);\n\n \/\/ update the description\n $(entryToUpdate).find(\".child-description:first\").html(model.description); \n };\n\n var entries = model.childs;\n var existingEntries = $(\".childs>.list>.child\");\n var numberOfExistingEntries = existingEntries.length;\n var numberOfNewEntries = entries.length;\n\n var entryTemplate = \"<li class=\\\"child\\\"><a href=\\\"#\\\" class=\\\"child-title child-link\\\"><\/a><p class=\\\"child-description\\\"><\/p><\/li>\";\n\n if (numberOfExistingEntries > numberOfNewEntries) {\n\n for (var x = (numberOfNewEntries-1); x < numberOfNewEntries; x++) {\n $(existingEntries[x]).remove();\n }\n\n }\n\n \/\/ update or add\n for (var i = 0; i < numberOfNewEntries; i++) {\n var index = i + 1;\n var newEntry = entries[i];\n\n \/\/ get the item to update\n if (index <= numberOfExistingEntries) {\n\n \/\/ update the existing entry\n updateEntry(existingEntries[i], newEntry);\n\n } else {\n\n \/\/ append and update a new entry\n updateEntry($(\".childs>.list\").append(entryTemplate).find(\".child:last\"), newEntry);\n }\n }\n\n \/\/ execute the on change callbacks\n executeOnChangeCallbacks();\n\n };\n }; \n\n Autoupdate.prototype.start = function () {\n\n \/\/ check if websockets are supported\n if(!window[\"WebSocket\"]) {\n console.log(\"Your browser does not support WebSockets.\");\n return;\n }\n\n \/\/ establish the connection\n connect(getWebSocketUrl());\n };\n\n Autoupdate.prototype.onchange = function(name, callback) {\n if (typeof(self.changeCallbacks) !== 'object') {\n self.changeCallbacks = {};\n }\n\n self.changeCallbacks[name] = callback;\n };\n\n return Autoupdate;\n})();\n\nvar autoupdate = new Autoupdate();\nautoupdate.start();\n`\n<commit_msg>Bug fix for the auto-update JavaScript library. The route comparision failed and therefore did not update the document when the source changed.<commit_after>\/\/ Copyright 2013 Andreas Koch. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage themefiles\n\nconst AutoupdateJs = `\n\nvar Autoupdate = (function () {\n function Autoupdate() { }\n\n var self = this;\n\n \/**\n * Get the currently opened web route\n * @return string The currently opened web route (e.g. \"\/documents\/Sample Document\/index.html\")\n *\/\n var getCurrentRoute = function() {\n var url = document.location.pathname;\n return url;\n };\n\n \/**\n * Check whether the supplied routes are alike\n * @param string route1 The first route\n * @param string route2 The second route\n * @return bool true if the supplied routes are alike; otherwise false\n *\/\n var routesAreAlike = function(route1, route2) {\n return decodeURIComponent(route1) === decodeURIComponent(route2);\n };\n\n \/**\n * Get the Url for the web socket connection\n * @return string The url for the web socket connection (e.g. \"ws:\/\/example.com:8080\/ws\")\n *\/\n var getWebSocketUrl = function() {\n routeParameter = \"route=\" + getCurrentRoute();\n host = document.location.host;\n webSocketHandler = \"\/ws\";\n websocketUrl = \"ws:\/\/\" + host + webSocketHandler + \"?\" + routeParameter;\n return websocketUrl; \n };\n\n \/**\n * Execute all on change callbacks\n *\/\n var executeOnChangeCallbacks = function() {\n if (typeof(self.changeCallbacks) !== 'object') {\n return;\n }\n\n for (var callbackName in self.changeCallbacks) {\n console.log(\"Executing on change callback: \" + callbackName);\n self.changeCallbacks[callbackName]();\n }\n };\n\n \/**\n * Connect to the server\n * @param string webSocketUrl The url of the web-socket to connect to\n *\/\n var connect = function(webSocketUrl) {\n var reconnectionTimeInSeconds = 3;\n var connection = new WebSocket(webSocketUrl);\n\n connection.onclose = function(evt) {\n console.log(\"Connection closed. Trying to reconnect in \" + reconnectionTimeInSeconds + \" seconds.\");\n\n setTimeout(function() {\n\n console.log(\"Reconnecting\");\n connect(webSocketUrl);\n\n }, (reconnectionTimeInSeconds * 1000));\n };\n\n connection.onopen = function() {\n console.log(\"Connection established.\")\n };\n\n connection.onmessage = function(evt) {\n\n \/\/ validate event data\n if (typeof(evt) !== 'object' || typeof(evt.data) !== 'string') {\n console.log(\"Invalid data from server.\");\n return;\n }\n\n \/\/ unwrap the message\n message = JSON.parse(evt.data);\n\n \/\/ check if all required fields are present\n if (message === null || typeof(message) !== 'object' || typeof(message.route) !== 'string' || message.model === null || typeof(message.model) !== 'object') {\n console.log(\"Invalid response format.\", message);\n return;\n }\n\n \/\/ check if update is applicable for the current route\n if (!routesAreAlike(message.route, getCurrentRoute())) {\n return;\n }\n\n \/\/ check the model structure\n var model = message.model;\n if (typeof(model.content) !== 'string' || typeof(model.description) !== 'string' || typeof(model.title) !== 'string') {\n console.log(\"Cannot update the view with the given model object. Missing some required fields.\", model);\n return;\n }\n\n \/\/ update the title\n $('title').html(model.title);\n $('.title').html(model.title);\n\n \/\/ update the description\n $('.description').html(model.description);\n\n \/\/ update the content\n $('.content').html(model.content);\n\n \/\/ update childs (if available)\n if (model.childs === null || typeof(model.childs) !== 'object') {\n\n \/\/ execute the on change callbacks\n executeOnChangeCallbacks();\n\n return;\n }\n\n \/**\n * Update an existing item list entry\n * @param Element entryToUpdate The node which shall be updated\n * @param Object model The model containing values to use for the update\n *\/\n var updateEntry = function(entryToUpdate, model) {\n \/\/ update the title text\n $(entryToUpdate).find(\".child-link:first\").html(model.title);\n\n \/\/ update the title link\n $(entryToUpdate).find(\".child-link:first\").attr(\"href\", model.relativeRoute);\n\n \/\/ update the description\n $(entryToUpdate).find(\".child-description:first\").html(model.description); \n };\n\n var entries = model.childs;\n var existingEntries = $(\".childs>.list>.child\");\n var numberOfExistingEntries = existingEntries.length;\n var numberOfNewEntries = entries.length;\n\n var entryTemplate = \"<li class=\\\"child\\\"><a href=\\\"#\\\" class=\\\"child-title child-link\\\"><\/a><p class=\\\"child-description\\\"><\/p><\/li>\";\n\n if (numberOfExistingEntries > numberOfNewEntries) {\n\n for (var x = (numberOfNewEntries-1); x < numberOfNewEntries; x++) {\n $(existingEntries[x]).remove();\n }\n\n }\n\n \/\/ update or add\n for (var i = 0; i < numberOfNewEntries; i++) {\n var index = i + 1;\n var newEntry = entries[i];\n\n \/\/ get the item to update\n if (index <= numberOfExistingEntries) {\n\n \/\/ update the existing entry\n updateEntry(existingEntries[i], newEntry);\n\n } else {\n\n \/\/ append and update a new entry\n updateEntry($(\".childs>.list\").append(entryTemplate).find(\".child:last\"), newEntry);\n }\n }\n\n \/\/ execute the on change callbacks\n executeOnChangeCallbacks();\n\n };\n }; \n\n Autoupdate.prototype.start = function () {\n\n \/\/ check if websockets are supported\n if(!window[\"WebSocket\"]) {\n console.log(\"Your browser does not support WebSockets.\");\n return;\n }\n\n \/\/ establish the connection\n connect(getWebSocketUrl());\n };\n\n Autoupdate.prototype.onchange = function(name, callback) {\n if (typeof(self.changeCallbacks) !== 'object') {\n self.changeCallbacks = {};\n }\n\n self.changeCallbacks[name] = callback;\n };\n\n return Autoupdate;\n})();\n\nvar autoupdate = new Autoupdate();\nautoupdate.start();\n`\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package mongo provides support for using MongoDB.\npackage mongo\n\nimport (\n\t\"encoding\/json\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gopkg.in\/mgo.v2\"\n)\n\n\/\/ Config provides configuration values.\ntype Config struct {\n\tHost string\n\tAuthDB string\n\tDB string\n\tUser string\n\tPassword string\n}\n\n\/\/==============================================================================\n\n\/\/ New creates a new master session.\nfunc New(cfg Config) (*mgo.Session, error) {\n\n\t\/\/ Can be provided a comma delimited set of hosts.\n\thosts := strings.Split(cfg.Host, \",\")\n\n\t\/\/ We need this object to establish a session to our MongoDB.\n\tmongoDBDialInfo := mgo.DialInfo{\n\t\tAddrs: hosts,\n\t\tTimeout: 60 * time.Second,\n\t\tDatabase: cfg.AuthDB,\n\t\tUsername: cfg.User,\n\t\tPassword: cfg.Password,\n\t}\n\n\t\/\/ Create a session which maintains a pool of socket connections\n\t\/\/ to our MongoDB.\n\tses, err := mgo.DialWithInfo(&mongoDBDialInfo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Reads may not be entirely up-to-date, but they will always see the\n\t\/\/ history of changes moving forward, the data read will be consistent\n\t\/\/ across sequential queries in the same session, and modifications made\n\t\/\/ within the session will be observed in following queries (read-your-writes).\n\t\/\/ http:\/\/godoc.org\/labix.org\/v2\/mgo#Session.SetMode\n\tses.SetMode(mgo.Monotonic, true)\n\n\treturn ses, nil\n}\n\n\/\/ Query provides a string version of the value\nfunc Query(value interface{}) string {\n\tjson, err := json.Marshal(value)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn string(json)\n}\n<commit_msg>Added support for setting a default timeout on the mongo session.<commit_after>\/\/ Package mongo provides support for using MongoDB.\npackage mongo\n\nimport (\n\t\"encoding\/json\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gopkg.in\/mgo.v2\"\n)\n\n\/\/ Config provides configuration values.\ntype Config struct {\n\tHost string\n\tAuthDB string\n\tDB string\n\tUser string\n\tPassword string\n\tTimeout time.Duration\n}\n\n\/\/==============================================================================\n\n\/\/ New creates a new master session.\nfunc New(cfg Config) (*mgo.Session, error) {\n\n\t\/\/ Can be provided a comma delimited set of hosts.\n\thosts := strings.Split(cfg.Host, \",\")\n\n\t\/\/ Set the default timeout for the session.\n\ttimeout := cfg.Timeout\n\tif timeout == 0 {\n\t\ttimeout = 60 * time.Second\n\t}\n\n\t\/\/ We need this object to establish a session to our MongoDB.\n\tmongoDBDialInfo := mgo.DialInfo{\n\t\tAddrs: hosts,\n\t\tTimeout: timeout,\n\t\tDatabase: cfg.AuthDB,\n\t\tUsername: cfg.User,\n\t\tPassword: cfg.Password,\n\t}\n\n\t\/\/ Create a session which maintains a pool of socket connections\n\t\/\/ to our MongoDB.\n\tses, err := mgo.DialWithInfo(&mongoDBDialInfo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Reads may not be entirely up-to-date, but they will always see the\n\t\/\/ history of changes moving forward, the data read will be consistent\n\t\/\/ across sequential queries in the same session, and modifications made\n\t\/\/ within the session will be observed in following queries (read-your-writes).\n\t\/\/ http:\/\/godoc.org\/labix.org\/v2\/mgo#Session.SetMode\n\tses.SetMode(mgo.Monotonic, true)\n\n\treturn ses, nil\n}\n\n\/\/ Query provides a string version of the value\nfunc Query(value interface{}) string {\n\tjson, err := json.Marshal(value)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn string(json)\n}\n<|endoftext|>"} {"text":"<commit_before>package dal\n\nimport (\n\t\"crypto\/tls\"\n\t\"github.com\/GwentAPI\/gwentapi\/configuration\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"log\"\n\t\"net\"\n\t\"time\"\n)\n\nvar mainDataStore *DataStore\n\ntype DataStore struct {\n\tsession *mgo.Session\n}\n\nfunc (ds *DataStore) Collection(colName string) *mgo.Collection {\n\t\/\/ Use database name specified in DialWithInfo when parameter is empty\n\t\/\/ Fallback to \"test\" if also empty.\n\treturn ds.session.DB(\"\").C(colName)\n}\n\nfunc InitDBWithInfo(info *mgo.DialInfo) {\n\n\t_, err := mgo.DialWithInfo(info)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/Gwentapi = session.DB(\"gwentapi\")\n}\n\nfunc (ds *DataStore) GetSession() *mgo.Session {\n\tif ds.session == nil {\n\t\tds.session = mainDataStore.session.Copy()\n\t} \/* else {\n\t\tds.session.Copy()\n\t}*\/\n\t\/\/ Useless\n\treturn ds.session.Copy()\n}\n\nfunc (ds *DataStore) Close() {\n\tds.session.Close()\n}\n\nfunc ShutDown() {\n\tif mainDataStore != nil && mainDataStore.session != nil {\n\t\tmainDataStore.session.Close()\n\t}\n}\n\nfunc init() {\n\tconfig := configuration.GetConfig()\n\ttlsConfig := &tls.Config{}\n\n\taddrs := config.Database.Addrs\n\n\tdialInfo := &mgo.DialInfo{\n\t\tAddrs: addrs,\n\t\tDatabase: config.Database.Database,\n\t\tSource: config.Database.Authentication.AuthenticationDatabase,\n\t\tUsername: config.Database.Authentication.Username,\n\t\tPassword: config.Database.Authentication.Password,\n\t\tTimeout: 10 * time.Second,\n\t}\n\n\tmainDataStore = &DataStore{}\n\tvar err error\n\n\tif config.Database.UseSSL {\n\t\tdialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {\n\t\t\tconn, err := tls.Dial(\"tcp\", addr.String(), tlsConfig)\n\t\t\treturn conn, err\n\t\t}\n\t}\n\n\tlog.Println(\"Attempting to establish a database connection...\")\n\n\tmainDataStore.session, err = mgo.DialWithInfo(dialInfo)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to establish mongoDB connection: \", err)\n\t}\n\tmainDataStore.session.SetMode(mgo.Monotonic, true)\n\tmainDataStore.session.SetSocketTimeout(10 * time.Second)\n\tlog.Println(\"Database connection established.\")\n}\n<commit_msg>Fix session leak<commit_after>package dal\n\nimport (\n\t\"crypto\/tls\"\n\t\"github.com\/GwentAPI\/gwentapi\/configuration\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"log\"\n\t\"net\"\n\t\"time\"\n)\n\nvar mainDataStore *DataStore\n\ntype DataStore struct {\n\tsession *mgo.Session\n}\n\nfunc (ds *DataStore) Collection(colName string) *mgo.Collection {\n\t\/\/ Use database name specified in DialWithInfo when parameter is empty\n\t\/\/ Fallback to \"test\" if also empty.\n\treturn ds.session.DB(\"\").C(colName)\n}\n\nfunc InitDBWithInfo(info *mgo.DialInfo) {\n\n\t_, err := mgo.DialWithInfo(info)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/Gwentapi = session.DB(\"gwentapi\")\n}\n\nfunc (ds *DataStore) GetSession() {\n\tif ds.session == nil {\n\t\tds.session = mainDataStore.session.Copy()\n\t} \/* else {\n\t\tds.session.Copy()\n\t}*\/\n\t\/\/ Useless\n\t\/\/return ds.session.Copy()\n}\n\nfunc (ds *DataStore) Close() {\n\tds.session.Close()\n}\n\nfunc ShutDown() {\n\tif mainDataStore != nil && mainDataStore.session != nil {\n\t\tmainDataStore.session.Close()\n\t}\n}\n\nfunc init() {\n\tconfig := configuration.GetConfig()\n\ttlsConfig := &tls.Config{}\n\n\taddrs := config.Database.Addrs\n\n\tdialInfo := &mgo.DialInfo{\n\t\tAddrs: addrs,\n\t\tDatabase: config.Database.Database,\n\t\tSource: config.Database.Authentication.AuthenticationDatabase,\n\t\tUsername: config.Database.Authentication.Username,\n\t\tPassword: config.Database.Authentication.Password,\n\t\tTimeout: 10 * time.Second,\n\t}\n\n\tmainDataStore = &DataStore{}\n\tvar err error\n\n\tif config.Database.UseSSL {\n\t\tdialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {\n\t\t\tconn, err := tls.Dial(\"tcp\", addr.String(), tlsConfig)\n\t\t\treturn conn, err\n\t\t}\n\t}\n\n\tlog.Println(\"Attempting to establish a database connection...\")\n\n\tmainDataStore.session, err = mgo.DialWithInfo(dialInfo)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to establish mongoDB connection: \", err)\n\t}\n\tmainDataStore.session.SetMode(mgo.Monotonic, true)\n\tmainDataStore.session.SetSocketTimeout(10 * time.Second)\n\tlog.Println(\"Database connection established.\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package sql provides a generic interface around SQL\n\/\/ (or SQL-like) databases.\n\/\/ It is like standard library database\/sql,\n\/\/ except it accepts Context parameters throughout,\n\/\/ and performs context-aware functions,\n\/\/ such as recording metrics and relaying distributed\n\/\/ tracing identifiers to drivers and remote hosts.\n\/\/\n\/\/ The sql package must be used in conjunction with a database driver.\n\/\/ See https:\/\/golang.org\/s\/sqldrivers for a list of drivers.\n\/\/\n\/\/ For more usage examples, see the wiki page at\n\/\/ https:\/\/golang.org\/s\/sqlwiki.\npackage sql\n\n\/\/ TODO(kr): many databases—Postgres in particular—report the\n\/\/ execution time of each query or statement as measured on the\n\/\/ database backend. Find a way to record that timing info in\n\/\/ the trace.\n\nimport (\n\t\"database\/sql\"\n\t\"database\/sql\/driver\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"chain\/errors\"\n\t\"chain\/log\"\n\t\"chain\/net\/trace\/span\"\n)\n\n\/\/ Register makes a database driver available by the provided name.\n\/\/ If Register is called twice with the same name or if driver is nil,\n\/\/ it panics.\nfunc Register(name string, driver driver.Driver) {\n\tsql.Register(name, driver)\n}\n\nvar logQueries bool\n\n\/\/ EnableQueryLogging enables or disables log output for queries.\n\/\/ It must be called before Open.\nfunc EnableQueryLogging(e bool) {\n\tlogQueries = e\n}\n\nfunc logQuery(ctx context.Context, query string, args interface{}) {\n\tif logQueries {\n\t\tlog.Write(ctx, \"query\", query, \"args\", args)\n\t}\n}\n\n\/\/ ErrNoRows is returned by Scan when QueryRow doesn't return a\n\/\/ row. In such a case, QueryRow returns a placeholder *Row value that\n\/\/ defers this error until a Scan.\nvar ErrNoRows = sql.ErrNoRows\n\n\/\/ DB is a database handle representing a pool of zero or more\n\/\/ underlying connections. It's safe for concurrent use by multiple\n\/\/ goroutines.\n\/\/\n\/\/ The sql package creates and frees connections automatically; it\n\/\/ also maintains a free pool of idle connections. If the database has\n\/\/ a concept of per-connection state, such state can only be reliably\n\/\/ observed within a transaction. Once DB.Begin is called, the\n\/\/ returned Tx is bound to a single connection. Once Commit or\n\/\/ Rollback is called on the transaction, that transaction's\n\/\/ connection is returned to DB's idle connection pool. The pool size\n\/\/ can be controlled with SetMaxIdleConns.\ntype DB struct {\n\tdb *sql.DB\n}\n\n\/\/ Tx is an in-progress database transaction.\n\/\/\n\/\/ A transaction must end with a call to Commit or Rollback.\n\/\/\n\/\/ After a call to Commit or Rollback, all operations on the\n\/\/ transaction fail with ErrTxDone.\n\/\/\n\/\/ The statements prepared for a transaction by calling\n\/\/ the transaction's Prepare or Stmt methods are closed\n\/\/ by the call to Commit or Rollback.\ntype Tx struct {\n\ttx *sql.Tx\n}\n\n\/\/ Rows is the result of a query. Its cursor starts before the first row\n\/\/ of the result set. Use Next to advance through the rows:\n\/\/\n\/\/ rows, err := db.Query(\"SELECT ...\")\n\/\/ ...\n\/\/ defer rows.Close()\n\/\/ for rows.Next() {\n\/\/ var id int\n\/\/ var name string\n\/\/ err = rows.Scan(&id, &name)\n\/\/ ...\n\/\/ }\n\/\/ err = rows.Err() \/\/ get any error encountered during iteration\n\/\/ ...\ntype Rows struct {\n\tctx context.Context\n\trows *sql.Rows\n}\n\n\/\/ Row is the result of calling QueryRow to select a single row.\ntype Row struct {\n\tctx context.Context\n\trow *sql.Row\n}\n\n\/\/ A Result summarizes an executed SQL command.\ntype Result interface {\n\t\/\/ LastInsertId returns the integer generated by the database\n\t\/\/ in response to a command. Typically this will be from an\n\t\/\/ \"auto increment\" column when inserting a new row. Not all\n\t\/\/ databases support this feature, and the syntax of such\n\t\/\/ statements varies.\n\tLastInsertId() (int64, error)\n\n\t\/\/ RowsAffected returns the number of rows affected by an\n\t\/\/ update, insert, or delete. Not every database or database\n\t\/\/ driver may support this.\n\tRowsAffected() (int64, error)\n}\n\n\/\/ Open opens a database specified by its database driver name and a\n\/\/ driver-specific data source name, usually consisting of at least a\n\/\/ database name and connection information.\n\/\/\n\/\/ Most users will open a database via a driver-specific connection\n\/\/ helper function that returns a *DB. No database drivers are included\n\/\/ in the Go standard library. See https:\/\/golang.org\/s\/sqldrivers for\n\/\/ a list of third-party drivers.\n\/\/\n\/\/ Open may just validate its arguments without creating a connection\n\/\/ to the database. To verify that the data source name is valid, call\n\/\/ Ping.\n\/\/\n\/\/ The returned DB is safe for concurrent use by multiple goroutines\n\/\/ and maintains its own pool of idle connections. Thus, the Open\n\/\/ function should be called just once. It is rarely necessary to\n\/\/ close a DB.\nfunc Open(driverName, dataSourceName string) (*DB, error) {\n\tdb, err := sql.Open(driverName, dataSourceName)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err)\n\t}\n\treturn &DB{db: db}, nil\n}\n\n\/\/ Close closes the database, releasing any open resources.\n\/\/\n\/\/ It is rare to Close a DB, as the DB handle is meant to be\n\/\/ long-lived and shared between many goroutines.\nfunc (db *DB) Close() error {\n\treturn db.db.Close()\n}\n\n\/\/ SetMaxIdleConns sets the maximum number of connections in the idle\n\/\/ connection pool.\n\/\/\n\/\/ If MaxOpenConns is greater than 0 but less than the new MaxIdleConns\n\/\/ then the new MaxIdleConns will be reduced to match the MaxOpenConns limit\n\/\/\n\/\/ If n <= 0, no idle connections are retained.\nfunc (db *DB) SetMaxIdleConns(n int) {\n\tdb.db.SetMaxIdleConns(n)\n}\n\n\/\/ SetMaxOpenConns sets the maximum number of open connections to the database.\n\/\/\n\/\/ If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than\n\/\/ MaxIdleConns, then MaxIdleConns will be reduced to match the new\n\/\/ MaxOpenConns limit\n\/\/\n\/\/ If n <= 0, then there is no limit on the number of open connections.\n\/\/ The default is 0 (unlimited).\nfunc (db *DB) SetMaxOpenConns(n int) {\n\tdb.db.SetMaxOpenConns(n)\n}\n\n\/\/ Begin starts a transaction. The isolation level is dependent on\n\/\/ the driver.\nfunc (db *DB) Begin(ctx context.Context) (*Tx, error) {\n\tctx = span.NewContext(ctx)\n\tdefer span.Finish(ctx)\n\ttx, err := db.db.Begin()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err)\n\t}\n\treturn &Tx{tx: tx}, nil\n}\n\n\/\/ Exec executes a query without returning any rows.\n\/\/ The args are for any placeholder parameters in the query.\nfunc (db *DB) Exec(ctx context.Context, query string, args ...interface{}) (Result, error) {\n\tctx = span.NewContext(ctx)\n\tlogQuery(ctx, query, args)\n\tdefer span.Finish(ctx)\n\treturn db.db.Exec(query, args...)\n}\n\n\/\/ Query executes a query that returns rows, typically a SELECT.\n\/\/ The args are for any placeholder parameters in the query.\nfunc (db *DB) Query(ctx context.Context, query string, args ...interface{}) (*Rows, error) {\n\tctx = span.NewContext(ctx)\n\tlogQuery(ctx, query, args)\n\trows, err := db.db.Query(query, args...)\n\tif err != nil {\n\t\tspan.Finish(ctx)\n\t\treturn nil, errors.Wrap(err)\n\t}\n\treturn &Rows{rows: rows, ctx: ctx}, nil\n}\n\n\/\/ QueryRow executes a query that is expected to return at most one row.\n\/\/ QueryRow always return a non-nil value. Errors are deferred until\n\/\/ Row's Scan method is called.\nfunc (db *DB) QueryRow(ctx context.Context, query string, args ...interface{}) *Row {\n\tctx = span.NewContext(ctx)\n\tlogQuery(ctx, query, args)\n\trow := db.db.QueryRow(query, args...)\n\treturn &Row{row: row, ctx: ctx}\n}\n\n\/\/ Commit commits the transaction.\nfunc (tx *Tx) Commit(ctx context.Context) error {\n\tctx = span.NewContext(ctx)\n\tdefer span.Finish(ctx)\n\treturn tx.tx.Commit()\n}\n\n\/\/ Rollback aborts the transaction.\nfunc (tx *Tx) Rollback(ctx context.Context) error {\n\tctx = span.NewContext(ctx)\n\tdefer span.Finish(ctx)\n\treturn tx.tx.Rollback()\n}\n\n\/\/ Exec executes a query that doesn't return rows.\n\/\/ For example: an INSERT and UPDATE.\nfunc (tx *Tx) Exec(ctx context.Context, query string, args ...interface{}) (Result, error) {\n\tctx = span.NewContext(ctx)\n\tlogQuery(ctx, query, args)\n\tdefer span.Finish(ctx)\n\treturn tx.tx.Exec(query, args...)\n}\n\n\/\/ Query executes a query that returns rows, typically a SELECT.\n\/\/ The args are for any placeholder parameters in the query.\nfunc (tx *Tx) Query(ctx context.Context, query string, args ...interface{}) (*Rows, error) {\n\tctx = span.NewContext(ctx)\n\tlogQuery(ctx, query, args)\n\trows, err := tx.tx.Query(query, args...)\n\tif err != nil {\n\t\tspan.Finish(ctx)\n\t\treturn nil, errors.Wrap(err)\n\t}\n\treturn &Rows{rows: rows, ctx: ctx}, nil\n}\n\n\/\/ QueryRow executes a query that is expected to return at most one row.\n\/\/ QueryRow always return a non-nil value. Errors are deferred until\n\/\/ Row's Scan method is called.\nfunc (tx *Tx) QueryRow(ctx context.Context, query string, args ...interface{}) *Row {\n\tctx = span.NewContext(ctx)\n\tlogQuery(ctx, query, args)\n\trow := tx.tx.QueryRow(query, args...)\n\treturn &Row{row: row, ctx: ctx}\n}\n\n\/\/ Close closes the Rows, preventing further enumeration. If Next returns\n\/\/ false, the Rows are closed automatically and it will suffice to check the\n\/\/ result of Err. Close is idempotent and does not affect the result of Err.\nfunc (rs *Rows) Close() error {\n\tdefer span.Finish(rs.ctx)\n\treturn rs.rows.Close()\n}\n\n\/\/ Next prepares the next result row for reading with the Scan method. It\n\/\/ returns true on success, or false if there is no next result row or an error\n\/\/ happened while preparing it. Err should be consulted to distinguish between\n\/\/ the two cases.\n\/\/\n\/\/ Every call to Scan, even the first one, must be preceded by a call to Next.\nfunc (rs *Rows) Next() bool {\n\treturn rs.rows.Next()\n}\n\n\/\/ Err returns the error, if any, that was encountered during iteration.\n\/\/ Err may be called after an explicit or implicit Close.\nfunc (rs *Rows) Err() error {\n\treturn rs.rows.Err()\n}\n\n\/\/ Scan copies the columns in the current row into the values pointed\n\/\/ at by dest.\n\/\/\n\/\/ If an argument has type *[]byte, Scan saves in that argument a copy\n\/\/ of the corresponding data. The copy is owned by the caller and can\n\/\/ be modified and held indefinitely. The copy can be avoided by using\n\/\/ an argument of type *RawBytes instead; see the documentation for\n\/\/ RawBytes for restrictions on its use.\n\/\/\n\/\/ If an argument has type *interface{}, Scan copies the value\n\/\/ provided by the underlying driver without conversion. If the value\n\/\/ is of type []byte, a copy is made and the caller owns the result.\nfunc (rs *Rows) Scan(dest ...interface{}) error {\n\treturn rs.rows.Scan(dest...)\n}\n\n\/\/ Scan copies the columns from the matched row into the values\n\/\/ pointed at by dest. If more than one row matches the query,\n\/\/ Scan uses the first row and discards the rest. If no row matches\n\/\/ the query, Scan returns ErrNoRows.\nfunc (r *Row) Scan(dest ...interface{}) error {\n\tdefer span.Finish(r.ctx)\n\treturn r.row.Scan(dest...)\n}\n<commit_msg>database\/sql: truncate query args in the log<commit_after>\/\/ Package sql provides a generic interface around SQL\n\/\/ (or SQL-like) databases.\n\/\/ It is like standard library database\/sql,\n\/\/ except it accepts Context parameters throughout,\n\/\/ and performs context-aware functions,\n\/\/ such as recording metrics and relaying distributed\n\/\/ tracing identifiers to drivers and remote hosts.\n\/\/\n\/\/ The sql package must be used in conjunction with a database driver.\n\/\/ See https:\/\/golang.org\/s\/sqldrivers for a list of drivers.\n\/\/\n\/\/ For more usage examples, see the wiki page at\n\/\/ https:\/\/golang.org\/s\/sqlwiki.\npackage sql\n\n\/\/ TODO(kr): many databases—Postgres in particular—report the\n\/\/ execution time of each query or statement as measured on the\n\/\/ database backend. Find a way to record that timing info in\n\/\/ the trace.\n\nimport (\n\t\"database\/sql\"\n\t\"database\/sql\/driver\"\n\t\"fmt\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"chain\/errors\"\n\t\"chain\/log\"\n\t\"chain\/net\/trace\/span\"\n)\n\n\/\/ Register makes a database driver available by the provided name.\n\/\/ If Register is called twice with the same name or if driver is nil,\n\/\/ it panics.\nfunc Register(name string, driver driver.Driver) {\n\tsql.Register(name, driver)\n}\n\nconst maxArgsLogLen = 20 \/\/ bytes\n\nvar logQueries bool\n\n\/\/ EnableQueryLogging enables or disables log output for queries.\n\/\/ It must be called before Open.\nfunc EnableQueryLogging(e bool) {\n\tlogQueries = e\n}\n\nfunc logQuery(ctx context.Context, query string, args interface{}) {\n\tif logQueries {\n\t\ts := fmt.Sprint(args)\n\t\tif len(s) > maxArgsLogLen {\n\t\t\ts = s[:maxArgsLogLen-3] + \"...\"\n\t\t}\n\t\tlog.Write(ctx, \"query\", query, \"args\", s)\n\t}\n}\n\n\/\/ ErrNoRows is returned by Scan when QueryRow doesn't return a\n\/\/ row. In such a case, QueryRow returns a placeholder *Row value that\n\/\/ defers this error until a Scan.\nvar ErrNoRows = sql.ErrNoRows\n\n\/\/ DB is a database handle representing a pool of zero or more\n\/\/ underlying connections. It's safe for concurrent use by multiple\n\/\/ goroutines.\n\/\/\n\/\/ The sql package creates and frees connections automatically; it\n\/\/ also maintains a free pool of idle connections. If the database has\n\/\/ a concept of per-connection state, such state can only be reliably\n\/\/ observed within a transaction. Once DB.Begin is called, the\n\/\/ returned Tx is bound to a single connection. Once Commit or\n\/\/ Rollback is called on the transaction, that transaction's\n\/\/ connection is returned to DB's idle connection pool. The pool size\n\/\/ can be controlled with SetMaxIdleConns.\ntype DB struct {\n\tdb *sql.DB\n}\n\n\/\/ Tx is an in-progress database transaction.\n\/\/\n\/\/ A transaction must end with a call to Commit or Rollback.\n\/\/\n\/\/ After a call to Commit or Rollback, all operations on the\n\/\/ transaction fail with ErrTxDone.\n\/\/\n\/\/ The statements prepared for a transaction by calling\n\/\/ the transaction's Prepare or Stmt methods are closed\n\/\/ by the call to Commit or Rollback.\ntype Tx struct {\n\ttx *sql.Tx\n}\n\n\/\/ Rows is the result of a query. Its cursor starts before the first row\n\/\/ of the result set. Use Next to advance through the rows:\n\/\/\n\/\/ rows, err := db.Query(\"SELECT ...\")\n\/\/ ...\n\/\/ defer rows.Close()\n\/\/ for rows.Next() {\n\/\/ var id int\n\/\/ var name string\n\/\/ err = rows.Scan(&id, &name)\n\/\/ ...\n\/\/ }\n\/\/ err = rows.Err() \/\/ get any error encountered during iteration\n\/\/ ...\ntype Rows struct {\n\tctx context.Context\n\trows *sql.Rows\n}\n\n\/\/ Row is the result of calling QueryRow to select a single row.\ntype Row struct {\n\tctx context.Context\n\trow *sql.Row\n}\n\n\/\/ A Result summarizes an executed SQL command.\ntype Result interface {\n\t\/\/ LastInsertId returns the integer generated by the database\n\t\/\/ in response to a command. Typically this will be from an\n\t\/\/ \"auto increment\" column when inserting a new row. Not all\n\t\/\/ databases support this feature, and the syntax of such\n\t\/\/ statements varies.\n\tLastInsertId() (int64, error)\n\n\t\/\/ RowsAffected returns the number of rows affected by an\n\t\/\/ update, insert, or delete. Not every database or database\n\t\/\/ driver may support this.\n\tRowsAffected() (int64, error)\n}\n\n\/\/ Open opens a database specified by its database driver name and a\n\/\/ driver-specific data source name, usually consisting of at least a\n\/\/ database name and connection information.\n\/\/\n\/\/ Most users will open a database via a driver-specific connection\n\/\/ helper function that returns a *DB. No database drivers are included\n\/\/ in the Go standard library. See https:\/\/golang.org\/s\/sqldrivers for\n\/\/ a list of third-party drivers.\n\/\/\n\/\/ Open may just validate its arguments without creating a connection\n\/\/ to the database. To verify that the data source name is valid, call\n\/\/ Ping.\n\/\/\n\/\/ The returned DB is safe for concurrent use by multiple goroutines\n\/\/ and maintains its own pool of idle connections. Thus, the Open\n\/\/ function should be called just once. It is rarely necessary to\n\/\/ close a DB.\nfunc Open(driverName, dataSourceName string) (*DB, error) {\n\tdb, err := sql.Open(driverName, dataSourceName)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err)\n\t}\n\treturn &DB{db: db}, nil\n}\n\n\/\/ Close closes the database, releasing any open resources.\n\/\/\n\/\/ It is rare to Close a DB, as the DB handle is meant to be\n\/\/ long-lived and shared between many goroutines.\nfunc (db *DB) Close() error {\n\treturn db.db.Close()\n}\n\n\/\/ SetMaxIdleConns sets the maximum number of connections in the idle\n\/\/ connection pool.\n\/\/\n\/\/ If MaxOpenConns is greater than 0 but less than the new MaxIdleConns\n\/\/ then the new MaxIdleConns will be reduced to match the MaxOpenConns limit\n\/\/\n\/\/ If n <= 0, no idle connections are retained.\nfunc (db *DB) SetMaxIdleConns(n int) {\n\tdb.db.SetMaxIdleConns(n)\n}\n\n\/\/ SetMaxOpenConns sets the maximum number of open connections to the database.\n\/\/\n\/\/ If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than\n\/\/ MaxIdleConns, then MaxIdleConns will be reduced to match the new\n\/\/ MaxOpenConns limit\n\/\/\n\/\/ If n <= 0, then there is no limit on the number of open connections.\n\/\/ The default is 0 (unlimited).\nfunc (db *DB) SetMaxOpenConns(n int) {\n\tdb.db.SetMaxOpenConns(n)\n}\n\n\/\/ Begin starts a transaction. The isolation level is dependent on\n\/\/ the driver.\nfunc (db *DB) Begin(ctx context.Context) (*Tx, error) {\n\tctx = span.NewContext(ctx)\n\tdefer span.Finish(ctx)\n\ttx, err := db.db.Begin()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err)\n\t}\n\treturn &Tx{tx: tx}, nil\n}\n\n\/\/ Exec executes a query without returning any rows.\n\/\/ The args are for any placeholder parameters in the query.\nfunc (db *DB) Exec(ctx context.Context, query string, args ...interface{}) (Result, error) {\n\tctx = span.NewContext(ctx)\n\tlogQuery(ctx, query, args)\n\tdefer span.Finish(ctx)\n\treturn db.db.Exec(query, args...)\n}\n\n\/\/ Query executes a query that returns rows, typically a SELECT.\n\/\/ The args are for any placeholder parameters in the query.\nfunc (db *DB) Query(ctx context.Context, query string, args ...interface{}) (*Rows, error) {\n\tctx = span.NewContext(ctx)\n\tlogQuery(ctx, query, args)\n\trows, err := db.db.Query(query, args...)\n\tif err != nil {\n\t\tspan.Finish(ctx)\n\t\treturn nil, errors.Wrap(err)\n\t}\n\treturn &Rows{rows: rows, ctx: ctx}, nil\n}\n\n\/\/ QueryRow executes a query that is expected to return at most one row.\n\/\/ QueryRow always return a non-nil value. Errors are deferred until\n\/\/ Row's Scan method is called.\nfunc (db *DB) QueryRow(ctx context.Context, query string, args ...interface{}) *Row {\n\tctx = span.NewContext(ctx)\n\tlogQuery(ctx, query, args)\n\trow := db.db.QueryRow(query, args...)\n\treturn &Row{row: row, ctx: ctx}\n}\n\n\/\/ Commit commits the transaction.\nfunc (tx *Tx) Commit(ctx context.Context) error {\n\tctx = span.NewContext(ctx)\n\tdefer span.Finish(ctx)\n\treturn tx.tx.Commit()\n}\n\n\/\/ Rollback aborts the transaction.\nfunc (tx *Tx) Rollback(ctx context.Context) error {\n\tctx = span.NewContext(ctx)\n\tdefer span.Finish(ctx)\n\treturn tx.tx.Rollback()\n}\n\n\/\/ Exec executes a query that doesn't return rows.\n\/\/ For example: an INSERT and UPDATE.\nfunc (tx *Tx) Exec(ctx context.Context, query string, args ...interface{}) (Result, error) {\n\tctx = span.NewContext(ctx)\n\tlogQuery(ctx, query, args)\n\tdefer span.Finish(ctx)\n\treturn tx.tx.Exec(query, args...)\n}\n\n\/\/ Query executes a query that returns rows, typically a SELECT.\n\/\/ The args are for any placeholder parameters in the query.\nfunc (tx *Tx) Query(ctx context.Context, query string, args ...interface{}) (*Rows, error) {\n\tctx = span.NewContext(ctx)\n\tlogQuery(ctx, query, args)\n\trows, err := tx.tx.Query(query, args...)\n\tif err != nil {\n\t\tspan.Finish(ctx)\n\t\treturn nil, errors.Wrap(err)\n\t}\n\treturn &Rows{rows: rows, ctx: ctx}, nil\n}\n\n\/\/ QueryRow executes a query that is expected to return at most one row.\n\/\/ QueryRow always return a non-nil value. Errors are deferred until\n\/\/ Row's Scan method is called.\nfunc (tx *Tx) QueryRow(ctx context.Context, query string, args ...interface{}) *Row {\n\tctx = span.NewContext(ctx)\n\tlogQuery(ctx, query, args)\n\trow := tx.tx.QueryRow(query, args...)\n\treturn &Row{row: row, ctx: ctx}\n}\n\n\/\/ Close closes the Rows, preventing further enumeration. If Next returns\n\/\/ false, the Rows are closed automatically and it will suffice to check the\n\/\/ result of Err. Close is idempotent and does not affect the result of Err.\nfunc (rs *Rows) Close() error {\n\tdefer span.Finish(rs.ctx)\n\treturn rs.rows.Close()\n}\n\n\/\/ Next prepares the next result row for reading with the Scan method. It\n\/\/ returns true on success, or false if there is no next result row or an error\n\/\/ happened while preparing it. Err should be consulted to distinguish between\n\/\/ the two cases.\n\/\/\n\/\/ Every call to Scan, even the first one, must be preceded by a call to Next.\nfunc (rs *Rows) Next() bool {\n\treturn rs.rows.Next()\n}\n\n\/\/ Err returns the error, if any, that was encountered during iteration.\n\/\/ Err may be called after an explicit or implicit Close.\nfunc (rs *Rows) Err() error {\n\treturn rs.rows.Err()\n}\n\n\/\/ Scan copies the columns in the current row into the values pointed\n\/\/ at by dest.\n\/\/\n\/\/ If an argument has type *[]byte, Scan saves in that argument a copy\n\/\/ of the corresponding data. The copy is owned by the caller and can\n\/\/ be modified and held indefinitely. The copy can be avoided by using\n\/\/ an argument of type *RawBytes instead; see the documentation for\n\/\/ RawBytes for restrictions on its use.\n\/\/\n\/\/ If an argument has type *interface{}, Scan copies the value\n\/\/ provided by the underlying driver without conversion. If the value\n\/\/ is of type []byte, a copy is made and the caller owns the result.\nfunc (rs *Rows) Scan(dest ...interface{}) error {\n\treturn rs.rows.Scan(dest...)\n}\n\n\/\/ Scan copies the columns from the matched row into the values\n\/\/ pointed at by dest. If more than one row matches the query,\n\/\/ Scan uses the first row and discards the rest. If no row matches\n\/\/ the query, Scan returns ErrNoRows.\nfunc (r *Row) Scan(dest ...interface{}) error {\n\tdefer span.Finish(r.ctx)\n\treturn r.row.Scan(dest...)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Kubernetes Authors.\n\/\/ SPDX-License-Identifier: Apache-2.0\n\npackage konfig\n\n\/\/ RecognizedKustomizationFileNames is a list of file names\n\/\/ that kustomize recognizes.\n\/\/ To avoid ambiguity, a kustomization directory may not\n\/\/ contain more than one match to this list.\nfunc RecognizedKustomizationFileNames() []string {\n\treturn []string{\n\t\t\"kustomization.yaml\",\n\t\t\"kustomization.yml\",\n\t\t\"Kustomization\",\n\t}\n}\n\nfunc DefaultKustomizationFileName() string {\n\treturn RecognizedKustomizationFileNames()[0]\n}\n\n\/\/ IfApiMachineryElseKyaml returns true if executing the apimachinery code\n\/\/ path, else we're executing the kyaml code paths.\nfunc IfApiMachineryElseKyaml(s1, s2 string) string {\n\tif !FlagEnableKyamlDefaultValue {\n\t\treturn s1\n\t}\n\treturn s2\n}\n\nconst (\n\t\/\/ FlagEnableKyamlDefaultValue is the default value for the --enable_kyaml\n\t\/\/ flag. This value is also used in unit tests. See provider.DepProvider.\n\t\/\/\n\t\/\/ TODO(#3304): eliminate branching on this constant.\n\t\/\/ Details: https:\/\/github.com\/kubernetes-sigs\/kustomize\/issues\/3304\n\t\/\/\n\t\/\/ All tests should pass for either true or false values\n\t\/\/ of this constant, without having to check its value.\n\t\/\/ In the cases where there's a different outcome, either decide\n\t\/\/ that the difference is acceptable, or make the difference go away.\n\t\/\/\n\t\/\/ Historically, tests passed for enable_kyaml == false, i.e. using\n\t\/\/ apimachinery libs. This doesn't mean the code was better, it just\n\t\/\/ means regression tests preserved those outcomes.\n\tFlagEnableKyamlDefaultValue = false\n\n\t\/\/ An environment variable to consult for kustomization\n\t\/\/ configuration data. See:\n\t\/\/ https:\/\/specifications.freedesktop.org\/basedir-spec\/basedir-spec-latest.html\n\tXdgConfigHomeEnv = \"XDG_CONFIG_HOME\"\n\n\t\/\/ Use this when XdgConfigHomeEnv not defined.\n\tXdgConfigHomeEnvDefault = \".config\"\n\n\t\/\/ A program name, for use in help, finding the XDG_CONFIG_DIR, etc.\n\tProgramName = \"kustomize\"\n\n\t\/\/ ConfigAnnoDomain is configuration-related annotation namespace.\n\tConfigAnnoDomain = \"config.kubernetes.io\"\n\n\t\/\/ If a resource has this annotation, kustomize will drop it.\n\tIgnoredByKustomizeAnnotation = ConfigAnnoDomain + \"\/local-config\"\n\n\t\/\/ Label key that indicates the resources are built from Kustomize\n\tManagedbyLabelKey = \"app.kubernetes.io\/managed-by\"\n\n\t\/\/ An environment variable to turn on\/off adding the ManagedByLabelKey\n\tEnableManagedbyLabelEnv = \"KUSTOMIZE_ENABLE_MANAGEDBY_LABEL\"\n\n\t\/\/ Label key that indicates the resources are validated by a validator\n\tValidatedByLabelKey = \"validated-by\"\n)\n<commit_msg>Set FlagEnableKyamlDefaultValue = true<commit_after>\/\/ Copyright 2019 The Kubernetes Authors.\n\/\/ SPDX-License-Identifier: Apache-2.0\n\npackage konfig\n\n\/\/ RecognizedKustomizationFileNames is a list of file names\n\/\/ that kustomize recognizes.\n\/\/ To avoid ambiguity, a kustomization directory may not\n\/\/ contain more than one match to this list.\nfunc RecognizedKustomizationFileNames() []string {\n\treturn []string{\n\t\t\"kustomization.yaml\",\n\t\t\"kustomization.yml\",\n\t\t\"Kustomization\",\n\t}\n}\n\nfunc DefaultKustomizationFileName() string {\n\treturn RecognizedKustomizationFileNames()[0]\n}\n\n\/\/ IfApiMachineryElseKyaml returns true if executing the apimachinery code\n\/\/ path, else we're executing the kyaml code paths.\nfunc IfApiMachineryElseKyaml(s1, s2 string) string {\n\tif !FlagEnableKyamlDefaultValue {\n\t\treturn s1\n\t}\n\treturn s2\n}\n\nconst (\n\t\/\/ FlagEnableKyamlDefaultValue is the default value for the --enable_kyaml\n\t\/\/ flag. This value is also used in unit tests. See provider.DepProvider.\n\t\/\/\n\t\/\/ TODO(#3304): eliminate branching on this constant.\n\t\/\/ Details: https:\/\/github.com\/kubernetes-sigs\/kustomize\/issues\/3304\n\t\/\/\n\t\/\/ All tests should pass for either true or false values\n\t\/\/ of this constant, without having to check its value.\n\t\/\/ In the cases where there's a different outcome, either decide\n\t\/\/ that the difference is acceptable, or make the difference go away.\n\t\/\/\n\t\/\/ Historically, tests passed for enable_kyaml == false, i.e. using\n\t\/\/ apimachinery libs. This doesn't mean the code was better, it just\n\t\/\/ means regression tests preserved those outcomes.\n\tFlagEnableKyamlDefaultValue = true\n\n\t\/\/ An environment variable to consult for kustomization\n\t\/\/ configuration data. See:\n\t\/\/ https:\/\/specifications.freedesktop.org\/basedir-spec\/basedir-spec-latest.html\n\tXdgConfigHomeEnv = \"XDG_CONFIG_HOME\"\n\n\t\/\/ Use this when XdgConfigHomeEnv not defined.\n\tXdgConfigHomeEnvDefault = \".config\"\n\n\t\/\/ A program name, for use in help, finding the XDG_CONFIG_DIR, etc.\n\tProgramName = \"kustomize\"\n\n\t\/\/ ConfigAnnoDomain is configuration-related annotation namespace.\n\tConfigAnnoDomain = \"config.kubernetes.io\"\n\n\t\/\/ If a resource has this annotation, kustomize will drop it.\n\tIgnoredByKustomizeAnnotation = ConfigAnnoDomain + \"\/local-config\"\n\n\t\/\/ Label key that indicates the resources are built from Kustomize\n\tManagedbyLabelKey = \"app.kubernetes.io\/managed-by\"\n\n\t\/\/ An environment variable to turn on\/off adding the ManagedByLabelKey\n\tEnableManagedbyLabelEnv = \"KUSTOMIZE_ENABLE_MANAGEDBY_LABEL\"\n\n\t\/\/ Label key that indicates the resources are validated by a validator\n\tValidatedByLabelKey = \"validated-by\"\n)\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/sacloud\/libsacloud\/sacloud\"\n)\n\n\/\/ ProductServerAPI サーバープランAPI\ntype ProductServerAPI struct {\n\t*baseAPI\n}\n\n\/\/ NewProductServerAPI サーバープランAPI作成\nfunc NewProductServerAPI(client *Client) *ProductServerAPI {\n\treturn &ProductServerAPI{\n\t\t&baseAPI{\n\t\t\tclient: client,\n\t\t\t\/\/ FuncGetResourceURL\n\t\t\tFuncGetResourceURL: func() string {\n\t\t\t\treturn \"product\/server\"\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ GetBySpec 指定のコア数\/メモリサイズ\/世代のプランを取得\nfunc (api *ProductServerAPI) GetBySpec(core, memGB int, gen sacloud.PlanGenerations) (*sacloud.ProductServer, error) {\n\treturn api.GetBySpecCommitment(core, memGB, gen, sacloud.ECommitmentStandard)\n}\n\n\/\/ GetBySpecCommitment 指定のコア数\/メモリサイズ\/世代のプランを取得\nfunc (api *ProductServerAPI) GetBySpecCommitment(core, memGB int, gen sacloud.PlanGenerations, commitment sacloud.ECommitment) (*sacloud.ProductServer, error) {\n\tplans, err := api.Reset().Find()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar res sacloud.ProductServer\n\tvar found bool\n\tfor _, plan := range plans.ServerPlans {\n\t\tif plan.CPU == core && plan.GetMemoryGB() == memGB && plan.Commitment == commitment {\n\t\t\tif gen == sacloud.PlanDefault || gen == plan.Generation {\n\t\t\t\t\/\/ PlanDefaultの場合は複数ヒットしうる。\n\t\t\t\t\/\/ この場合より新しい世代を優先する。\n\t\t\t\tif found && plan.Generation <= res.Generation {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tres = plan\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\t}\n\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"Server Plan[core:%d, memory:%d, gen:%d] is not found\", core, memGB, gen)\n\t}\n\treturn &res, nil\n}\n\n\/\/ IsValidPlan 指定のコア数\/メモリサイズ\/世代のプランが存在し、有効であるか判定\nfunc (api *ProductServerAPI) IsValidPlan(core int, memGB int, gen sacloud.PlanGenerations) (bool, error) {\n\n\tproductServer, err := api.GetBySpec(core, memGB, gen)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif productServer == nil {\n\t\treturn false, fmt.Errorf(\"Server Plan[core:%d, memory:%d, gen:%d] is not found\", core, memGB, gen)\n\t}\n\n\tif productServer.Availability != sacloud.EAAvailable {\n\t\treturn false, fmt.Errorf(\"Server Plan[core:%d, memory:%d, gen:%d] is not available\", core, memGB, gen)\n\t}\n\n\treturn true, nil\n}\n<commit_msg>Fix GetBySpecCommitment - set search limit to 1000<commit_after>package api\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/sacloud\/libsacloud\/sacloud\"\n)\n\n\/\/ ProductServerAPI サーバープランAPI\ntype ProductServerAPI struct {\n\t*baseAPI\n}\n\n\/\/ NewProductServerAPI サーバープランAPI作成\nfunc NewProductServerAPI(client *Client) *ProductServerAPI {\n\treturn &ProductServerAPI{\n\t\t&baseAPI{\n\t\t\tclient: client,\n\t\t\t\/\/ FuncGetResourceURL\n\t\t\tFuncGetResourceURL: func() string {\n\t\t\t\treturn \"product\/server\"\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ GetBySpec 指定のコア数\/メモリサイズ\/世代のプランを取得\nfunc (api *ProductServerAPI) GetBySpec(core, memGB int, gen sacloud.PlanGenerations) (*sacloud.ProductServer, error) {\n\treturn api.GetBySpecCommitment(core, memGB, gen, sacloud.ECommitmentStandard)\n}\n\n\/\/ GetBySpecCommitment 指定のコア数\/メモリサイズ\/世代のプランを取得\nfunc (api *ProductServerAPI) GetBySpecCommitment(core, memGB int, gen sacloud.PlanGenerations, commitment sacloud.ECommitment) (*sacloud.ProductServer, error) {\n\tplans, err := api.Reset().Limit(1000).Find()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar res sacloud.ProductServer\n\tvar found bool\n\tfor _, plan := range plans.ServerPlans {\n\t\tif plan.CPU == core && plan.GetMemoryGB() == memGB && plan.Commitment == commitment {\n\t\t\tif gen == sacloud.PlanDefault || gen == plan.Generation {\n\t\t\t\t\/\/ PlanDefaultの場合は複数ヒットしうる。\n\t\t\t\t\/\/ この場合より新しい世代を優先する。\n\t\t\t\tif found && plan.Generation <= res.Generation {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tres = plan\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\t}\n\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"Server Plan[core:%d, memory:%d, gen:%d] is not found\", core, memGB, gen)\n\t}\n\treturn &res, nil\n}\n\n\/\/ IsValidPlan 指定のコア数\/メモリサイズ\/世代のプランが存在し、有効であるか判定\nfunc (api *ProductServerAPI) IsValidPlan(core int, memGB int, gen sacloud.PlanGenerations) (bool, error) {\n\n\tproductServer, err := api.GetBySpec(core, memGB, gen)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif productServer == nil {\n\t\treturn false, fmt.Errorf(\"Server Plan[core:%d, memory:%d, gen:%d] is not found\", core, memGB, gen)\n\t}\n\n\tif productServer.Availability != sacloud.EAAvailable {\n\t\treturn false, fmt.Errorf(\"Server Plan[core:%d, memory:%d, gen:%d] is not available\", core, memGB, gen)\n\t}\n\n\treturn true, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package raft implements raft.\npackage raft\n\nimport (\n\t\"errors\"\n\t\"log\"\n\n\tpb \"github.com\/coreos\/etcd\/raft\/raftpb\"\n\t\"github.com\/coreos\/etcd\/third_party\/code.google.com\/p\/go.net\/context\"\n)\n\nvar (\n\temptyState = pb.State{}\n\tErrStopped = errors.New(\"raft: stopped\")\n)\n\n\/\/ Ready encapsulates the entries and messages that are ready to be saved to\n\/\/ stable storage, committed or sent to other peers.\ntype Ready struct {\n\t\/\/ The current state of a Node\n\tpb.State\n\n\t\/\/ Entries specifies entries to be saved to stable storage BEFORE\n\t\/\/ Messages are sent.\n\tEntries []pb.Entry\n\n\t\/\/ CommittedEntries specifies entries to be committed to a\n\t\/\/ store\/state-machine. These have previously been committed to stable\n\t\/\/ store.\n\tCommittedEntries []pb.Entry\n\n\t\/\/ Messages specifies outbound messages to be sent AFTER Entries are\n\t\/\/ committed to stable storage.\n\tMessages []pb.Message\n}\n\nfunc isStateEqual(a, b pb.State) bool {\n\treturn a.Term == b.Term && a.Vote == b.Vote && a.LastIndex == b.LastIndex\n}\n\nfunc IsEmptyState(st pb.State) bool {\n\treturn isStateEqual(st, emptyState)\n}\n\nfunc (rd Ready) containsUpdates() bool {\n\treturn !isStateEqual(emptyState, rd.State) || len(rd.Entries) > 0 || len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0\n}\n\ntype Node struct {\n\tctx context.Context\n\tpropc chan pb.Message\n\trecvc chan pb.Message\n\treadyc chan Ready\n\ttickc chan struct{}\n\tdone chan struct{}\n}\n\nfunc Start(id int64, peers []int64, election, heartbeat int) Node {\n\tn := newNode()\n\tr := newRaft(id, peers, election, heartbeat)\n\tgo n.run(r)\n\treturn n\n}\n\nfunc Restart(id int64, peers []int64, election, heartbeat int, st pb.State, ents []pb.Entry) Node {\n\tn := newNode()\n\tr := newRaft(id, peers, election, heartbeat)\n\tr.loadState(st)\n\tr.loadEnts(ents)\n\tgo n.run(r)\n\treturn n\n}\n\nfunc newNode() Node {\n\treturn Node{\n\t\tpropc: make(chan pb.Message),\n\t\trecvc: make(chan pb.Message),\n\t\treadyc: make(chan Ready),\n\t\ttickc: make(chan struct{}),\n\t\tdone: make(chan struct{}),\n\t}\n}\n\nfunc (n *Node) Stop() {\n\tclose(n.done)\n}\n\nfunc (n *Node) run(r *raft) {\n\tpropc := n.propc\n\treadyc := n.readyc\n\n\tvar lead int64\n\tprevSt := r.State\n\n\tfor {\n\t\tif lead != r.lead {\n\t\t\tlog.Printf(\"raft: leader changed from %#x to %#x\", lead, r.lead)\n\t\t\tlead = r.lead\n\t\t\tif r.hasLeader() {\n\t\t\t\tpropc = n.propc\n\t\t\t} else {\n\t\t\t\tpropc = nil\n\t\t\t}\n\t\t}\n\n\t\trd := Ready{\n\t\t\tEntries: r.raftLog.unstableEnts(),\n\t\t\tCommittedEntries: r.raftLog.nextEnts(),\n\t\t\tMessages: r.msgs,\n\t\t}\n\n\t\tif isStateEqual(r.State, prevSt) {\n\t\t\trd.State = emptyState\n\t\t} else {\n\t\t\trd.State = r.State\n\t\t}\n\n\t\tif rd.containsUpdates() {\n\t\t\treadyc = n.readyc\n\t\t} else {\n\t\t\treadyc = nil\n\t\t}\n\n\t\tselect {\n\t\tcase m := <-propc:\n\t\t\tm.From = r.id\n\t\t\tr.Step(m)\n\t\tcase m := <-n.recvc:\n\t\t\tr.Step(m) \/\/ raft never returns an error\n\t\tcase <-n.tickc:\n\t\t\tr.tick()\n\t\tcase readyc <- rd:\n\t\t\tr.raftLog.resetNextEnts()\n\t\t\tr.raftLog.resetUnstable()\n\t\t\tif !IsEmptyState(rd.State) {\n\t\t\t\tprevSt = rd.State\n\t\t\t}\n\t\t\tr.msgs = nil\n\t\tcase <-n.done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (n *Node) Tick() error {\n\tselect {\n\tcase n.tickc <- struct{}{}:\n\t\treturn nil\n\tcase <-n.done:\n\t\treturn n.ctx.Err()\n\t}\n}\n\nfunc (n *Node) Campaign(ctx context.Context) error {\n\treturn n.Step(ctx, pb.Message{Type: msgHup})\n}\n\n\/\/ Propose proposes data be appended to the log.\nfunc (n *Node) Propose(ctx context.Context, data []byte) error {\n\treturn n.Step(ctx, pb.Message{Type: msgProp, Entries: []pb.Entry{{Data: data}}})\n}\n\n\/\/ Step advances the state machine using msgs. The ctx.Err() will be returned,\n\/\/ if any.\nfunc (n *Node) Step(ctx context.Context, m pb.Message) error {\n\tch := n.recvc\n\tif m.Type == msgProp {\n\t\tch = n.propc\n\t}\n\n\tselect {\n\tcase ch <- m:\n\t\treturn nil\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase <-n.done:\n\t\treturn ErrStopped\n\t}\n}\n\n\/\/ ReadState returns the current point-in-time state.\nfunc (n *Node) Ready() <-chan Ready {\n\treturn n.readyc\n}\n<commit_msg>raft: add a newReady helper function<commit_after>\/\/ Package raft implements raft.\npackage raft\n\nimport (\n\t\"errors\"\n\t\"log\"\n\n\tpb \"github.com\/coreos\/etcd\/raft\/raftpb\"\n\t\"github.com\/coreos\/etcd\/third_party\/code.google.com\/p\/go.net\/context\"\n)\n\nvar (\n\temptyState = pb.State{}\n\tErrStopped = errors.New(\"raft: stopped\")\n)\n\n\/\/ Ready encapsulates the entries and messages that are ready to be saved to\n\/\/ stable storage, committed or sent to other peers.\ntype Ready struct {\n\t\/\/ The current state of a Node\n\tpb.State\n\n\t\/\/ Entries specifies entries to be saved to stable storage BEFORE\n\t\/\/ Messages are sent.\n\tEntries []pb.Entry\n\n\t\/\/ CommittedEntries specifies entries to be committed to a\n\t\/\/ store\/state-machine. These have previously been committed to stable\n\t\/\/ store.\n\tCommittedEntries []pb.Entry\n\n\t\/\/ Messages specifies outbound messages to be sent AFTER Entries are\n\t\/\/ committed to stable storage.\n\tMessages []pb.Message\n}\n\nfunc isStateEqual(a, b pb.State) bool {\n\treturn a.Term == b.Term && a.Vote == b.Vote && a.LastIndex == b.LastIndex\n}\n\nfunc IsEmptyState(st pb.State) bool {\n\treturn isStateEqual(st, emptyState)\n}\n\nfunc (rd Ready) containsUpdates() bool {\n\treturn !isStateEqual(emptyState, rd.State) || len(rd.Entries) > 0 || len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0\n}\n\ntype Node struct {\n\tctx context.Context\n\tpropc chan pb.Message\n\trecvc chan pb.Message\n\treadyc chan Ready\n\ttickc chan struct{}\n\tdone chan struct{}\n}\n\nfunc Start(id int64, peers []int64, election, heartbeat int) Node {\n\tn := newNode()\n\tr := newRaft(id, peers, election, heartbeat)\n\tgo n.run(r)\n\treturn n\n}\n\nfunc Restart(id int64, peers []int64, election, heartbeat int, st pb.State, ents []pb.Entry) Node {\n\tn := newNode()\n\tr := newRaft(id, peers, election, heartbeat)\n\tr.loadState(st)\n\tr.loadEnts(ents)\n\tgo n.run(r)\n\treturn n\n}\n\nfunc newNode() Node {\n\treturn Node{\n\t\tpropc: make(chan pb.Message),\n\t\trecvc: make(chan pb.Message),\n\t\treadyc: make(chan Ready),\n\t\ttickc: make(chan struct{}),\n\t\tdone: make(chan struct{}),\n\t}\n}\n\nfunc (n *Node) Stop() {\n\tclose(n.done)\n}\n\nfunc (n *Node) run(r *raft) {\n\tpropc := n.propc\n\treadyc := n.readyc\n\n\tvar lead int64\n\tprevSt := r.State\n\n\tfor {\n\t\tif lead != r.lead {\n\t\t\tlog.Printf(\"raft: leader changed from %#x to %#x\", lead, r.lead)\n\t\t\tlead = r.lead\n\t\t\tif r.hasLeader() {\n\t\t\t\tpropc = n.propc\n\t\t\t} else {\n\t\t\t\tpropc = nil\n\t\t\t}\n\t\t}\n\n\t\trd := newReady(r, prevSt)\n\t\tif rd.containsUpdates() {\n\t\t\treadyc = n.readyc\n\t\t} else {\n\t\t\treadyc = nil\n\t\t}\n\n\t\tselect {\n\t\tcase m := <-propc:\n\t\t\tm.From = r.id\n\t\t\tr.Step(m)\n\t\tcase m := <-n.recvc:\n\t\t\tr.Step(m) \/\/ raft never returns an error\n\t\tcase <-n.tickc:\n\t\t\tr.tick()\n\t\tcase readyc <- rd:\n\t\t\tr.raftLog.resetNextEnts()\n\t\t\tr.raftLog.resetUnstable()\n\t\t\tif !IsEmptyState(rd.State) {\n\t\t\t\tprevSt = rd.State\n\t\t\t}\n\t\t\tr.msgs = nil\n\t\tcase <-n.done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (n *Node) Tick() error {\n\tselect {\n\tcase n.tickc <- struct{}{}:\n\t\treturn nil\n\tcase <-n.done:\n\t\treturn n.ctx.Err()\n\t}\n}\n\nfunc (n *Node) Campaign(ctx context.Context) error {\n\treturn n.Step(ctx, pb.Message{Type: msgHup})\n}\n\n\/\/ Propose proposes data be appended to the log.\nfunc (n *Node) Propose(ctx context.Context, data []byte) error {\n\treturn n.Step(ctx, pb.Message{Type: msgProp, Entries: []pb.Entry{{Data: data}}})\n}\n\n\/\/ Step advances the state machine using msgs. The ctx.Err() will be returned,\n\/\/ if any.\nfunc (n *Node) Step(ctx context.Context, m pb.Message) error {\n\tch := n.recvc\n\tif m.Type == msgProp {\n\t\tch = n.propc\n\t}\n\n\tselect {\n\tcase ch <- m:\n\t\treturn nil\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase <-n.done:\n\t\treturn ErrStopped\n\t}\n}\n\n\/\/ ReadState returns the current point-in-time state.\nfunc (n *Node) Ready() <-chan Ready {\n\treturn n.readyc\n}\n\nfunc newReady(r *raft, prev pb.State) Ready {\n\trd := Ready{\n\t\tEntries: r.raftLog.unstableEnts(),\n\t\tCommittedEntries: r.raftLog.nextEnts(),\n\t\tMessages: r.msgs,\n\t}\n\n\tif isStateEqual(r.State, prev) {\n\t\trd.State = emptyState\n\t} else {\n\t\trd.State = r.State\n\t}\n\treturn rd\n}\n<|endoftext|>"} {"text":"<commit_before>package raft\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nvar defaultLog = []Entry{{}}\n\nfunc TestLeaderElection(t *testing.T) {\n\ttests := []struct {\n\t\t*network\n\t\tstate stateType\n\t}{\n\t\t{newNetwork(nil, nil, nil), stateLeader},\n\t\t{newNetwork(nil, nil, nopStepper), stateLeader},\n\t\t{newNetwork(nil, nopStepper, nopStepper), stateCandidate},\n\t\t{newNetwork(nil, nopStepper, nopStepper, nil), stateCandidate},\n\t\t{newNetwork(nil, nopStepper, nopStepper, nil, nil), stateLeader},\n\t\t\/\/\/ {newNetwork(nil, newPartNode(), falseVote()), stateFollower},\n\t}\n\n\tfor i, tt := range tests {\n\t\ttt.step(Message{To: 0, Type: msgHup})\n\t\tsm := tt.network.ss[0].(*stateMachine)\n\t\tif sm.state != tt.state {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, sm.state, tt.state)\n\t\t}\n\t\tif g := sm.term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}\n\nfunc TestDualingCandidates(t *testing.T) {\n\ta := &stateMachine{log: defaultLog}\n\tc := &stateMachine{log: defaultLog}\n\n\ttt := newNetwork(a, nil, c)\n\n\theal := false\n\tnext := stepperFunc(func(m Message) {\n\t\tif heal {\n\t\t\ttt.step(m)\n\t\t}\n\t})\n\ta.next = next\n\tc.next = next\n\n\ttt.tee = stepperFunc(func(m Message) {\n\t\tt.Logf(\"m = %+v\", m)\n\t})\n\ttt.step(Message{To: 0, Type: msgHup})\n\ttt.step(Message{To: 2, Type: msgHup})\n\n\tt.Log(\"healing\")\n\theal = true\n\ttt.step(Message{To: 2, Type: msgHup})\n\n\ttests := []struct {\n\t\tsm *stateMachine\n\t\tstate stateType\n\t\tterm int\n\t}{\n\t\t{a, stateFollower, 2},\n\t\t{c, stateLeader, 2},\n\t}\n\n\tfor i, tt := range tests {\n\t\tif g := tt.sm.state; g != tt.state {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, g, tt.state)\n\t\t}\n\t\tif g := tt.sm.term; g != tt.term {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, tt.term)\n\t\t}\n\t}\n\tif g := diffLogs(defaultLog, tt.logs()); g != nil {\n\t\tfor _, diff := range g {\n\t\t\tt.Errorf(\"bag log:\\n%s\", diff)\n\t\t}\n\t}\n}\n\nfunc TestOldMessages(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil)\n\t\/\/ make 0 leader @ term 3\n\ttt.step(Message{To: 0, Type: msgHup})\n\ttt.step(Message{To: 0, Type: msgHup})\n\ttt.step(Message{To: 0, Type: msgHup})\n\t\/\/ pretend we're an old leader trying to make progress\n\ttt.step(Message{To: 0, Type: msgApp, Term: 1, Entries: []Entry{{Term: 1}}})\n\tif g := diffLogs(defaultLog, tt.logs()); g != nil {\n\t\tfor _, diff := range g {\n\t\t\tt.Errorf(\"bag log:\\n%s\", diff)\n\t\t}\n\t}\n}\n\n\/\/ TestOldMessagesReply - optimization - reply with new term.\n\nfunc TestProposal(t *testing.T) {\n\ttests := []struct {\n\t\t*network\n\t\tsuccess bool\n\t}{\n\t\t{newNetwork(nil, nil, nil), true},\n\t\t{newNetwork(nil, nil, nopStepper), true},\n\t\t{newNetwork(nil, nopStepper, nopStepper), false},\n\t\t{newNetwork(nil, nopStepper, nopStepper, nil), false},\n\t\t{newNetwork(nil, nopStepper, nopStepper, nil, nil), true},\n\t}\n\n\tfor i, tt := range tests {\n\t\ttt.tee = stepperFunc(func(m Message) {\n\t\t\tt.Logf(\"#%d: m = %+v\", i, m)\n\t\t})\n\n\t\tstep := stepperFunc(func(m Message) {\n\t\t\tdefer func() {\n\t\t\t\t\/\/ only recover is we expect it to panic so\n\t\t\t\t\/\/ panics we don't expect go up.\n\t\t\t\tif !tt.success {\n\t\t\t\t\te := recover()\n\t\t\t\t\tif e != nil {\n\t\t\t\t\t\tt.Logf(\"#%d: err: %s\", i, e)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t\ttt.step(m)\n\t\t})\n\n\t\tdata := []byte(\"somedata\")\n\n\t\t\/\/ promote 0 the leader\n\t\tstep(Message{To: 0, Type: msgHup})\n\t\tstep(Message{To: 0, Type: msgProp, Data: data})\n\n\t\tvar wantLog []Entry\n\t\tif tt.success {\n\t\t\twantLog = []Entry{{}, {Term: 1, Data: data}}\n\t\t} else {\n\t\t\twantLog = defaultLog\n\t\t}\n\t\tif g := diffLogs(wantLog, tt.logs()); g != nil {\n\t\t\tfor _, diff := range g {\n\t\t\t\tt.Errorf(\"#%d: diff:%s\", i, diff)\n\t\t\t}\n\t\t}\n\t\tsm := tt.network.ss[0].(*stateMachine)\n\t\tif g := sm.term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}\n\nfunc TestProposalByProxy(t *testing.T) {\n\tdata := []byte(\"somedata\")\n\ttests := []*network{\n\t\tnewNetwork(nil, nil, nil),\n\t\tnewNetwork(nil, nil, nopStepper),\n\t}\n\n\tfor i, tt := range tests {\n\t\ttt.tee = stepperFunc(func(m Message) {\n\t\t\tt.Logf(\"#%d: m = %+v\", i, m)\n\t\t})\n\n\t\t\/\/ promote 0 the leader\n\t\ttt.step(Message{To: 0, Type: msgHup})\n\n\t\t\/\/ propose via follower\n\t\ttt.step(Message{To: 1, Type: msgProp, Data: []byte(\"somedata\")})\n\n\t\twantLog := []Entry{{}, {Term: 1, Data: data}}\n\t\tif g := diffLogs(wantLog, tt.logs()); g != nil {\n\t\t\tfor _, diff := range g {\n\t\t\t\tt.Errorf(\"#%d: bad entry: %s\", i, diff)\n\t\t\t}\n\t\t}\n\t\tsm := tt.ss[0].(*stateMachine)\n\t\tif g := sm.term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}\n\nfunc TestVote(t *testing.T) {\n\ttests := []struct {\n\t\ti, term int\n\t\tw int\n\t}{\n\t\t{0, 0, -1},\n\t\t{0, 1, -1},\n\t\t{0, 2, -1},\n\t\t{0, 3, 2},\n\n\t\t{1, 0, -1},\n\t\t{1, 1, -1},\n\t\t{1, 2, -1},\n\t\t{1, 3, 2},\n\n\t\t{2, 0, -1},\n\t\t{2, 1, -1},\n\t\t{2, 2, 2},\n\t\t{2, 3, 2},\n\n\t\t{3, 0, -1},\n\t\t{3, 1, -1},\n\t\t{3, 2, 2},\n\t\t{3, 3, 2},\n\t}\n\n\tfor i, tt := range tests {\n\t\tcalled := false\n\t\tsm := &stateMachine{log: []Entry{{}, {Term: 2}, {Term: 2}}}\n\t\tsm.next = stepperFunc(func(m Message) {\n\t\t\tcalled = true\n\t\t\tif m.Index != tt.w {\n\t\t\t\tt.Errorf(\"#%d, m.Index = %d, want %d\", i, m.Index, tt.w)\n\t\t\t}\n\t\t})\n\t\tsm.step(Message{Type: msgVote, Index: tt.i, LogTerm: tt.term})\n\t\tif !called {\n\t\t\tt.Fatal(\"#%d: not called\", i)\n\t\t}\n\t}\n}\n\nfunc TestLogDiff(t *testing.T) {\n\ta := []Entry{{}, {Term: 1}, {Term: 2}}\n\tb := []Entry{{}, {Term: 1}, {Term: 2}}\n\tc := []Entry{{}, {Term: 2}}\n\td := []Entry(nil)\n\n\tw := []diff{\n\t\tdiff{1, []*Entry{{Term: 1}, {Term: 1}, {Term: 2}, nilLogEntry}},\n\t\tdiff{2, []*Entry{{Term: 2}, {Term: 2}, noEntry, nilLogEntry}},\n\t}\n\n\tif g := diffLogs(a, [][]Entry{b, c, d}); !reflect.DeepEqual(w, g) {\n\t\tt.Errorf(\"g = %s\", g)\n\t\tt.Errorf(\"want %s\", w)\n\t}\n}\n\ntype network struct {\n\ttee stepper\n\tss []stepper\n}\n\n\/\/ newNetwork initializes a network from nodes. A nil node will be replaced\n\/\/ with a new *stateMachine. A *stateMachine will get its k, addr, and next\n\/\/ fields set.\nfunc newNetwork(nodes ...stepper) *network {\n\tnt := &network{ss: nodes}\n\tfor i, n := range nodes {\n\t\tswitch v := n.(type) {\n\t\tcase nil:\n\t\t\tnt.ss[i] = newStateMachine(len(nodes), i, nt)\n\t\tcase *stateMachine:\n\t\t\tv.k = len(nodes)\n\t\t\tv.addr = i\n\t\tdefault:\n\t\t\tnt.ss[i] = v\n\t\t}\n\t}\n\treturn nt\n}\n\nfunc (nt network) step(m Message) {\n\tif nt.tee != nil {\n\t\tnt.tee.step(m)\n\t}\n\tnt.ss[m.To].step(m)\n}\n\n\/\/ logs returns all logs in nt prepended with want. If a node is not a\n\/\/ *stateMachine, its log will be nil.\nfunc (nt network) logs() [][]Entry {\n\tls := make([][]Entry, len(nt.ss))\n\tfor i, node := range nt.ss {\n\t\tif sm, ok := node.(*stateMachine); ok {\n\t\t\tls[i] = sm.log\n\t\t}\n\t}\n\treturn ls\n}\n\ntype diff struct {\n\ti int\n\tents []*Entry \/\/ pointers so they can be nil for N\/A\n}\n\nvar noEntry = &Entry{}\nvar nilLogEntry = &Entry{}\n\nfunc (d diff) String() string {\n\ts := fmt.Sprintf(\"[%d] \", d.i)\n\tfor i, e := range d.ents {\n\t\tswitch e {\n\t\tcase nilLogEntry:\n\t\t\ts += fmt.Sprintf(\"o\")\n\t\tcase noEntry:\n\t\t\ts += fmt.Sprintf(\"-\")\n\t\tcase nil:\n\t\t\ts += fmt.Sprintf(\"<nil>\")\n\t\tdefault:\n\t\t\ts += fmt.Sprintf(\"<%d:%q>\", e.Term, string(e.Data))\n\t\t}\n\t\tif i != len(d.ents)-1 {\n\t\t\ts += \"\\t\\t\"\n\t\t}\n\t}\n\treturn s\n}\n\nfunc diffLogs(base []Entry, logs [][]Entry) []diff {\n\tvar (\n\t\td []diff\n\t\tmax int\n\t)\n\tlogs = append([][]Entry{base}, logs...)\n\tfor _, log := range logs {\n\t\tif l := len(log); l > max {\n\t\t\tmax = l\n\t\t}\n\t}\n\tediff := func(i int) (result []*Entry) {\n\t\te := make([]*Entry, len(logs))\n\t\tfound := false\n\t\tfor j, log := range logs {\n\t\t\tif log == nil {\n\t\t\t\te[j] = nilLogEntry\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(log) <= i {\n\t\t\t\te[j] = noEntry\n\t\t\t\tfound = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\te[j] = &log[i]\n\t\t\tif j > 0 {\n\t\t\t\tswitch prev := e[j-1]; {\n\t\t\t\tcase prev == nilLogEntry:\n\t\t\t\tcase prev == noEntry:\n\t\t\t\tcase !reflect.DeepEqual(prev, e[j]):\n\t\t\t\t\tfound = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif found {\n\t\t\treturn e\n\t\t}\n\t\treturn nil\n\t}\n\tfor i := 0; i < max; i++ {\n\t\tif e := ediff(i); e != nil {\n\t\t\td = append(d, diff{i, e})\n\t\t}\n\t}\n\treturn d\n}\n\ntype stepperFunc func(Message)\n\nfunc (f stepperFunc) step(m Message) { f(m) }\n\nvar nopStepper = stepperFunc(func(Message) {})\n\ntype nextStepperFunc func(Message, stepper)\n<commit_msg>raft: test failed election<commit_after>package raft\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nvar defaultLog = []Entry{{}}\n\nfunc TestLeaderElection(t *testing.T) {\n\ttests := []struct {\n\t\t*network\n\t\tstate stateType\n\t}{\n\t\t{newNetwork(nil, nil, nil), stateLeader},\n\t\t{newNetwork(nil, nil, nopStepper), stateLeader},\n\t\t{newNetwork(nil, nopStepper, nopStepper), stateCandidate},\n\t\t{newNetwork(nil, nopStepper, nopStepper, nil), stateCandidate},\n\t\t{newNetwork(nil, nopStepper, nopStepper, nil, nil), stateLeader},\n\n\t\t\/\/ three nodes are have logs further along than 0\n\t\t{\n\t\t\tnewNetwork(\n\t\t\t\tnil,\n\t\t\t\t&stateMachine{log: []Entry{{}, {Term: 1}}},\n\t\t\t\t&stateMachine{log: []Entry{{}, {Term: 2}}},\n\t\t\t\t&stateMachine{log: []Entry{{}, {Term: 1}}},\n\t\t\t\tnil,\n\t\t\t),\n\t\t\tstateFollower,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\ttt.step(Message{To: 0, Type: msgHup})\n\t\tsm := tt.network.ss[0].(*stateMachine)\n\t\tif sm.state != tt.state {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, sm.state, tt.state)\n\t\t}\n\t\tif g := sm.term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}\n\nfunc TestDualingCandidates(t *testing.T) {\n\ta := &stateMachine{log: defaultLog}\n\tc := &stateMachine{log: defaultLog}\n\n\ttt := newNetwork(a, nil, c)\n\n\theal := false\n\tnext := stepperFunc(func(m Message) {\n\t\tif heal {\n\t\t\ttt.step(m)\n\t\t}\n\t})\n\ta.next = next\n\tc.next = next\n\n\ttt.tee = stepperFunc(func(m Message) {\n\t\tt.Logf(\"m = %+v\", m)\n\t})\n\ttt.step(Message{To: 0, Type: msgHup})\n\ttt.step(Message{To: 2, Type: msgHup})\n\n\tt.Log(\"healing\")\n\theal = true\n\ttt.step(Message{To: 2, Type: msgHup})\n\n\ttests := []struct {\n\t\tsm *stateMachine\n\t\tstate stateType\n\t\tterm int\n\t}{\n\t\t{a, stateFollower, 2},\n\t\t{c, stateLeader, 2},\n\t}\n\n\tfor i, tt := range tests {\n\t\tif g := tt.sm.state; g != tt.state {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, g, tt.state)\n\t\t}\n\t\tif g := tt.sm.term; g != tt.term {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, tt.term)\n\t\t}\n\t}\n\tif g := diffLogs(defaultLog, tt.logs()); g != nil {\n\t\tfor _, diff := range g {\n\t\t\tt.Errorf(\"bag log:\\n%s\", diff)\n\t\t}\n\t}\n}\n\nfunc TestOldMessages(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil)\n\t\/\/ make 0 leader @ term 3\n\ttt.step(Message{To: 0, Type: msgHup})\n\ttt.step(Message{To: 0, Type: msgHup})\n\ttt.step(Message{To: 0, Type: msgHup})\n\t\/\/ pretend we're an old leader trying to make progress\n\ttt.step(Message{To: 0, Type: msgApp, Term: 1, Entries: []Entry{{Term: 1}}})\n\tif g := diffLogs(defaultLog, tt.logs()); g != nil {\n\t\tfor _, diff := range g {\n\t\t\tt.Errorf(\"bag log:\\n%s\", diff)\n\t\t}\n\t}\n}\n\n\/\/ TestOldMessagesReply - optimization - reply with new term.\n\nfunc TestProposal(t *testing.T) {\n\ttests := []struct {\n\t\t*network\n\t\tsuccess bool\n\t}{\n\t\t{newNetwork(nil, nil, nil), true},\n\t\t{newNetwork(nil, nil, nopStepper), true},\n\t\t{newNetwork(nil, nopStepper, nopStepper), false},\n\t\t{newNetwork(nil, nopStepper, nopStepper, nil), false},\n\t\t{newNetwork(nil, nopStepper, nopStepper, nil, nil), true},\n\t}\n\n\tfor i, tt := range tests {\n\t\ttt.tee = stepperFunc(func(m Message) {\n\t\t\tt.Logf(\"#%d: m = %+v\", i, m)\n\t\t})\n\n\t\tstep := stepperFunc(func(m Message) {\n\t\t\tdefer func() {\n\t\t\t\t\/\/ only recover is we expect it to panic so\n\t\t\t\t\/\/ panics we don't expect go up.\n\t\t\t\tif !tt.success {\n\t\t\t\t\te := recover()\n\t\t\t\t\tif e != nil {\n\t\t\t\t\t\tt.Logf(\"#%d: err: %s\", i, e)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t\ttt.step(m)\n\t\t})\n\n\t\tdata := []byte(\"somedata\")\n\n\t\t\/\/ promote 0 the leader\n\t\tstep(Message{To: 0, Type: msgHup})\n\t\tstep(Message{To: 0, Type: msgProp, Data: data})\n\n\t\tvar wantLog []Entry\n\t\tif tt.success {\n\t\t\twantLog = []Entry{{}, {Term: 1, Data: data}}\n\t\t} else {\n\t\t\twantLog = defaultLog\n\t\t}\n\t\tif g := diffLogs(wantLog, tt.logs()); g != nil {\n\t\t\tfor _, diff := range g {\n\t\t\t\tt.Errorf(\"#%d: diff:%s\", i, diff)\n\t\t\t}\n\t\t}\n\t\tsm := tt.network.ss[0].(*stateMachine)\n\t\tif g := sm.term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}\n\nfunc TestProposalByProxy(t *testing.T) {\n\tdata := []byte(\"somedata\")\n\ttests := []*network{\n\t\tnewNetwork(nil, nil, nil),\n\t\tnewNetwork(nil, nil, nopStepper),\n\t}\n\n\tfor i, tt := range tests {\n\t\ttt.tee = stepperFunc(func(m Message) {\n\t\t\tt.Logf(\"#%d: m = %+v\", i, m)\n\t\t})\n\n\t\t\/\/ promote 0 the leader\n\t\ttt.step(Message{To: 0, Type: msgHup})\n\n\t\t\/\/ propose via follower\n\t\ttt.step(Message{To: 1, Type: msgProp, Data: []byte(\"somedata\")})\n\n\t\twantLog := []Entry{{}, {Term: 1, Data: data}}\n\t\tif g := diffLogs(wantLog, tt.logs()); g != nil {\n\t\t\tfor _, diff := range g {\n\t\t\t\tt.Errorf(\"#%d: bad entry: %s\", i, diff)\n\t\t\t}\n\t\t}\n\t\tsm := tt.ss[0].(*stateMachine)\n\t\tif g := sm.term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}\n\nfunc TestVote(t *testing.T) {\n\ttests := []struct {\n\t\ti, term int\n\t\tw int\n\t}{\n\t\t{0, 0, -1},\n\t\t{0, 1, -1},\n\t\t{0, 2, -1},\n\t\t{0, 3, 2},\n\n\t\t{1, 0, -1},\n\t\t{1, 1, -1},\n\t\t{1, 2, -1},\n\t\t{1, 3, 2},\n\n\t\t{2, 0, -1},\n\t\t{2, 1, -1},\n\t\t{2, 2, 2},\n\t\t{2, 3, 2},\n\n\t\t{3, 0, -1},\n\t\t{3, 1, -1},\n\t\t{3, 2, 2},\n\t\t{3, 3, 2},\n\t}\n\n\tfor i, tt := range tests {\n\t\tcalled := false\n\t\tsm := &stateMachine{log: []Entry{{}, {Term: 2}, {Term: 2}}}\n\t\tsm.next = stepperFunc(func(m Message) {\n\t\t\tcalled = true\n\t\t\tif m.Index != tt.w {\n\t\t\t\tt.Errorf(\"#%d, m.Index = %d, want %d\", i, m.Index, tt.w)\n\t\t\t}\n\t\t})\n\t\tsm.step(Message{Type: msgVote, Index: tt.i, LogTerm: tt.term})\n\t\tif !called {\n\t\t\tt.Fatal(\"#%d: not called\", i)\n\t\t}\n\t}\n}\n\nfunc TestLogDiff(t *testing.T) {\n\ta := []Entry{{}, {Term: 1}, {Term: 2}}\n\tb := []Entry{{}, {Term: 1}, {Term: 2}}\n\tc := []Entry{{}, {Term: 2}}\n\td := []Entry(nil)\n\n\tw := []diff{\n\t\tdiff{1, []*Entry{{Term: 1}, {Term: 1}, {Term: 2}, nilLogEntry}},\n\t\tdiff{2, []*Entry{{Term: 2}, {Term: 2}, noEntry, nilLogEntry}},\n\t}\n\n\tif g := diffLogs(a, [][]Entry{b, c, d}); !reflect.DeepEqual(w, g) {\n\t\tt.Errorf(\"g = %s\", g)\n\t\tt.Errorf(\"want %s\", w)\n\t}\n}\n\ntype network struct {\n\ttee stepper\n\tss []stepper\n}\n\n\/\/ newNetwork initializes a network from nodes. A nil node will be replaced\n\/\/ with a new *stateMachine. A *stateMachine will get its k, addr, and next\n\/\/ fields set.\nfunc newNetwork(nodes ...stepper) *network {\n\tnt := &network{ss: nodes}\n\tfor i, n := range nodes {\n\t\tswitch v := n.(type) {\n\t\tcase nil:\n\t\t\tnt.ss[i] = newStateMachine(len(nodes), i, nt)\n\t\tcase *stateMachine:\n\t\t\tv.k = len(nodes)\n\t\t\tv.addr = i\n\t\t\tif v.next == nil {\n\t\t\t\tv.next = nt\n\t\t\t}\n\t\tdefault:\n\t\t\tnt.ss[i] = v\n\t\t}\n\t}\n\treturn nt\n}\n\nfunc (nt network) step(m Message) {\n\tif nt.tee != nil {\n\t\tnt.tee.step(m)\n\t}\n\tnt.ss[m.To].step(m)\n}\n\n\/\/ logs returns all logs in nt prepended with want. If a node is not a\n\/\/ *stateMachine, its log will be nil.\nfunc (nt network) logs() [][]Entry {\n\tls := make([][]Entry, len(nt.ss))\n\tfor i, node := range nt.ss {\n\t\tif sm, ok := node.(*stateMachine); ok {\n\t\t\tls[i] = sm.log\n\t\t}\n\t}\n\treturn ls\n}\n\ntype diff struct {\n\ti int\n\tents []*Entry \/\/ pointers so they can be nil for N\/A\n}\n\nvar noEntry = &Entry{}\nvar nilLogEntry = &Entry{}\n\nfunc (d diff) String() string {\n\ts := fmt.Sprintf(\"[%d] \", d.i)\n\tfor i, e := range d.ents {\n\t\tswitch e {\n\t\tcase nilLogEntry:\n\t\t\ts += fmt.Sprintf(\"o\")\n\t\tcase noEntry:\n\t\t\ts += fmt.Sprintf(\"-\")\n\t\tcase nil:\n\t\t\ts += fmt.Sprintf(\"<nil>\")\n\t\tdefault:\n\t\t\ts += fmt.Sprintf(\"<%d:%q>\", e.Term, string(e.Data))\n\t\t}\n\t\tif i != len(d.ents)-1 {\n\t\t\ts += \"\\t\\t\"\n\t\t}\n\t}\n\treturn s\n}\n\nfunc diffLogs(base []Entry, logs [][]Entry) []diff {\n\tvar (\n\t\td []diff\n\t\tmax int\n\t)\n\tlogs = append([][]Entry{base}, logs...)\n\tfor _, log := range logs {\n\t\tif l := len(log); l > max {\n\t\t\tmax = l\n\t\t}\n\t}\n\tediff := func(i int) (result []*Entry) {\n\t\te := make([]*Entry, len(logs))\n\t\tfound := false\n\t\tfor j, log := range logs {\n\t\t\tif log == nil {\n\t\t\t\te[j] = nilLogEntry\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(log) <= i {\n\t\t\t\te[j] = noEntry\n\t\t\t\tfound = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\te[j] = &log[i]\n\t\t\tif j > 0 {\n\t\t\t\tswitch prev := e[j-1]; {\n\t\t\t\tcase prev == nilLogEntry:\n\t\t\t\tcase prev == noEntry:\n\t\t\t\tcase !reflect.DeepEqual(prev, e[j]):\n\t\t\t\t\tfound = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif found {\n\t\t\treturn e\n\t\t}\n\t\treturn nil\n\t}\n\tfor i := 0; i < max; i++ {\n\t\tif e := ediff(i); e != nil {\n\t\t\td = append(d, diff{i, e})\n\t\t}\n\t}\n\treturn d\n}\n\ntype stepperFunc func(Message)\n\nfunc (f stepperFunc) step(m Message) { f(m) }\n\nvar nopStepper = stepperFunc(func(Message) {})\n\ntype nextStepperFunc func(Message, stepper)\n<|endoftext|>"} {"text":"<commit_before>package rain\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n)\n\n\/\/ Location0 struct\ntype Location0 struct {\n\tLat float32 `xml:\"lat\"`\n\tLng float32 `xml:\"lng\"`\n\tName string `xml:\"locationName\"`\n\tStationID string `xml:\"stationId\"`\n\tTime time.Time `xml:\"time>obsTime\"`\n\tWeatherElement []WeatherElement `xml:\"weatherElement\"`\n\tParameter []Parameter `xml:\"parameter\"`\n}\n\n\/\/ Location1 struct\ntype Location1 struct {\n\tGeocode int `xml:\"geocode\"`\n\tName string `xml:\"locationName\"`\n\tHazards Hazards `xml:\"hazardConditions>hazards\"`\n}\n\n\/\/ WeatherElement struct\ntype WeatherElement struct {\n\tName string `xml:\"elementName\"`\n\tValue float32 `xml:\"elementValue>value\"`\n}\n\n\/\/ Parameter struct\ntype Parameter struct {\n\tName string `xml:\"parameterName\"`\n\tValue string `xml:\"parameterValue\"`\n}\n\n\/\/ ValidTime struct\ntype ValidTime struct {\n\tStartTime time.Time `xml:\"startTime\"`\n\tEndTime time.Time `xml:\"endTime\"`\n}\n\n\/\/ AffectedAreas struct\ntype AffectedAreas struct {\n\tName string `xml:\"locationName\"`\n}\n\n\/\/ HazardInfo0 struct\ntype HazardInfo0 struct {\n\tLanguage string `xml:\"language\"`\n\tPhenomena string `xml:\"phenomena\"`\n\tSignificance string `xml:\"significance\"`\n}\n\n\/\/ HazardInfo1 struct\ntype HazardInfo1 struct {\n\tLanguage string `xml:\"language\"`\n\tPhenomena string `xml:\"phenomena\"`\n\tAffectedAreas []AffectedAreas `xml:\"affectedAreas>location\"`\n}\n\n\/\/ Hazards struct\ntype Hazards struct {\n\tInfo HazardInfo0 `xml:\"info\"`\n\tValidTime ValidTime `xml:\"validTime\"`\n\tHazardInfo HazardInfo1 `xml:\"hazard>info\"`\n}\n\n\/\/ Result0 struct\ntype Result0 struct {\n\tLocation []Location0 `xml:\"location\"`\n}\n\n\/\/ Result1 struct\ntype Result1 struct {\n\tLocation []Location1 `xml:\"dataset>location\"`\n}\n\nconst timeZone = \"Asia\/Taipei\"\n\nfunc fetchXML(url string) []byte {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\txmldata, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn xmldata\n}\n\n\/\/ GetInfo from \"中央氣象局\"\nfunc GetInfo(place string, targets []string) ([]string, string) {\n\tvar msgs = []string{}\n\n\trainLevel := map[string]float32{\n\t\t\"10minutes\": 6.0,\n\t\t\"1hour\": 30.0,\n\t}\n\n\tauthKey := \"CWB-FB35C2AC-9286-4B7E-AD11-6BBB7F2855F7\"\n\tbaseURL := \"http:\/\/opendata.cwb.gov.tw\/opendataapi?dataid=\"\n\n\turl0 := baseURL + \"O-A0002-001\" + \"&authorizationkey=\" + authKey\n\txmldata0 := fetchXML(url0)\n\n\tv0 := Result0{}\n\terr := xml.Unmarshal([]byte(xmldata0), &v0)\n\tif err != nil {\n\t\tlog.Printf(\"error: %v\", err)\n\t\treturn []string{}, \"\"\n\t}\n\n\tfor _, location := range v0.Location {\n\t\tfor _, parameter := range location.Parameter {\n\t\t\tif parameter.Name == \"CITY\" && parameter.Value == place {\n\t\t\t\tfor _, element := range location.WeatherElement {\n\t\t\t\t\tswitch element.Name {\n\t\t\t\t\tcase \"MIN_10\":\n\t\t\t\t\t\tif element.Value < 0 {\n\t\t\t\t\t\t\tlog.Printf(\"%s:%s\", \"十分鐘雨量\", \"-\")\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Printf(\"%s:%.2f\", \"十分鐘雨量\", element.Value)\n\t\t\t\t\t\t\tif element.Value > rainLevel[\"10minutes\"] {\n\t\t\t\t\t\t\t\tmsgs = append(msgs, fmt.Sprintf(\"[豪大雨警報] %s 地區 %s 為 %f\", element.Name, \"十分鐘雨量\", element.Value))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"RAIN\":\n\t\t\t\t\t\tif element.Value < 0 {\n\t\t\t\t\t\t\tlog.Printf(\"[%s]\", location.Name)\n\t\t\t\t\t\t\tlog.Printf(\"%s:%s\", \"一小時雨量\", \"-\")\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Printf(\"[%s]\", location.Name)\n\t\t\t\t\t\t\tlog.Printf(\"%s:%.2f\", \"一小時雨量\", element.Value)\n\t\t\t\t\t\t\tif element.Value > rainLevel[\"1hour\"] {\n\t\t\t\t\t\t\t\tmsgs = append(msgs, fmt.Sprintf(\"[豪大雨警報] %s 地區 %s 為 %f\", element.Name, \"一小時雨量\", element.Value))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\turl1 := baseURL + \"W-C0033-001\" + \"&authorizationkey=\" + authKey\n\txmldata1 := fetchXML(url1)\n\n\tv1 := Result1{}\n\tif xml.Unmarshal([]byte(xmldata1), &v1) != nil {\n\t\tlog.Printf(\"error: %v\", err)\n\t\treturn []string{}, \"\"\n\t}\n\n\tlocal := time.Now()\n\tlocation, err := time.LoadLocation(timeZone)\n\tif err == nil {\n\t\tlocal = local.In(location)\n\t}\n\n\tvar hazardmsgs = \"\"\n\tvar token = \"\"\n\tfor i, location := range v1.Location {\n\t\tif i == 0 {\n\t\t\ttoken = token + strconv.Itoa(location.Geocode)\n\t\t}\n\t\tif location.Hazards.Info.Phenomena != \"\" && location.Hazards.ValidTime.EndTime.After(local) {\n\t\t\tif targets != nil {\n\t\t\t\tfor _, name := range targets {\n\t\t\t\t\tif name == location.Name {\n\t\t\t\t\t\thazardmsgs = hazardmsgs + saveHazards(location)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\thazardmsgs = hazardmsgs + saveHazards(location)\n\t\t\t}\n\t\t}\n\t}\n\n\tmsgs = append(msgs, hazardmsgs)\n\n\treturn msgs, token\n}\n\nfunc saveHazards(location Location1) string {\n\tlog.Println(\"***************************************\")\n\tlog.Printf(\"【%s%s%s %s ~ %s】影響地區:\", location.Name, location.Hazards.Info.Phenomena, location.Hazards.Info.Significance, location.Hazards.ValidTime.StartTime.Format(\"01\/02 15:04\"), location.Hazards.ValidTime.EndTime.Format(\"01\/02 15:04\"))\n\tm := fmt.Sprintf(\"【%s%s%s %s ~ %s】影響地區:\", location.Name, location.Hazards.Info.Phenomena, location.Hazards.Info.Significance, location.Hazards.ValidTime.StartTime.Format(\"01\/02 15:04\"), location.Hazards.ValidTime.EndTime.Format(\"01\/02 15:04\"))\n\tfor _, str := range location.Hazards.HazardInfo.AffectedAreas {\n\t\tlog.Printf(\"%s \", str.Name)\n\t\tm = m + fmt.Sprintf(\"%s \", str.Name)\n\t}\n\tlog.Println(\"\\n***************************************\")\n\n\treturn m\n}\n<commit_msg>changed format<commit_after>package rain\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n)\n\n\/\/ Location0 struct\ntype Location0 struct {\n\tLat float32 `xml:\"lat\"`\n\tLng float32 `xml:\"lng\"`\n\tName string `xml:\"locationName\"`\n\tStationID string `xml:\"stationId\"`\n\tTime time.Time `xml:\"time>obsTime\"`\n\tWeatherElement []WeatherElement `xml:\"weatherElement\"`\n\tParameter []Parameter `xml:\"parameter\"`\n}\n\n\/\/ Location1 struct\ntype Location1 struct {\n\tGeocode int `xml:\"geocode\"`\n\tName string `xml:\"locationName\"`\n\tHazards Hazards `xml:\"hazardConditions>hazards\"`\n}\n\n\/\/ WeatherElement struct\ntype WeatherElement struct {\n\tName string `xml:\"elementName\"`\n\tValue float32 `xml:\"elementValue>value\"`\n}\n\n\/\/ Parameter struct\ntype Parameter struct {\n\tName string `xml:\"parameterName\"`\n\tValue string `xml:\"parameterValue\"`\n}\n\n\/\/ ValidTime struct\ntype ValidTime struct {\n\tStartTime time.Time `xml:\"startTime\"`\n\tEndTime time.Time `xml:\"endTime\"`\n}\n\n\/\/ AffectedAreas struct\ntype AffectedAreas struct {\n\tName string `xml:\"locationName\"`\n}\n\n\/\/ HazardInfo0 struct\ntype HazardInfo0 struct {\n\tLanguage string `xml:\"language\"`\n\tPhenomena string `xml:\"phenomena\"`\n\tSignificance string `xml:\"significance\"`\n}\n\n\/\/ HazardInfo1 struct\ntype HazardInfo1 struct {\n\tLanguage string `xml:\"language\"`\n\tPhenomena string `xml:\"phenomena\"`\n\tAffectedAreas []AffectedAreas `xml:\"affectedAreas>location\"`\n}\n\n\/\/ Hazards struct\ntype Hazards struct {\n\tInfo HazardInfo0 `xml:\"info\"`\n\tValidTime ValidTime `xml:\"validTime\"`\n\tHazardInfo HazardInfo1 `xml:\"hazard>info\"`\n}\n\n\/\/ Result0 struct\ntype Result0 struct {\n\tLocation []Location0 `xml:\"location\"`\n}\n\n\/\/ Result1 struct\ntype Result1 struct {\n\tLocation []Location1 `xml:\"dataset>location\"`\n}\n\nconst timeZone = \"Asia\/Taipei\"\n\nfunc fetchXML(url string) []byte {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\txmldata, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn xmldata\n}\n\n\/\/ GetInfo from \"中央氣象局\"\nfunc GetInfo(place string, targets []string) ([]string, string) {\n\tvar msgs = []string{}\n\n\trainLevel := map[string]float32{\n\t\t\"10minutes\": 6.0,\n\t\t\"1hour\": 30.0,\n\t}\n\n\tauthKey := \"CWB-FB35C2AC-9286-4B7E-AD11-6BBB7F2855F7\"\n\tbaseURL := \"http:\/\/opendata.cwb.gov.tw\/opendataapi?dataid=\"\n\n\turl0 := baseURL + \"O-A0002-001\" + \"&authorizationkey=\" + authKey\n\txmldata0 := fetchXML(url0)\n\n\tv0 := Result0{}\n\terr := xml.Unmarshal([]byte(xmldata0), &v0)\n\tif err != nil {\n\t\tlog.Printf(\"error: %v\", err)\n\t\treturn []string{}, \"\"\n\t}\n\n\tfor _, location := range v0.Location {\n\t\tfor _, parameter := range location.Parameter {\n\t\t\tif parameter.Name == \"CITY\" && parameter.Value == place {\n\t\t\t\tfor _, element := range location.WeatherElement {\n\t\t\t\t\tswitch element.Name {\n\t\t\t\t\tcase \"MIN_10\":\n\t\t\t\t\t\tif element.Value < 0 {\n\t\t\t\t\t\t\tlog.Printf(\"%s:%s\", \"十分鐘雨量\", \"-\")\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Printf(\"%s:%.2f\", \"十分鐘雨量\", element.Value)\n\t\t\t\t\t\t\tif element.Value > rainLevel[\"10minutes\"] {\n\t\t\t\t\t\t\t\tmsgs = append(msgs, fmt.Sprintf(\"[豪大雨警報] %s 地區 %s 為 %f\", element.Name, \"十分鐘雨量\", element.Value))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"RAIN\":\n\t\t\t\t\t\tif element.Value < 0 {\n\t\t\t\t\t\t\tlog.Printf(\"[%s]\", location.Name)\n\t\t\t\t\t\t\tlog.Printf(\"%s:%s\", \"一小時雨量\", \"-\")\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Printf(\"[%s]\", location.Name)\n\t\t\t\t\t\t\tlog.Printf(\"%s:%.2f\", \"一小時雨量\", element.Value)\n\t\t\t\t\t\t\tif element.Value > rainLevel[\"1hour\"] {\n\t\t\t\t\t\t\t\tmsgs = append(msgs, fmt.Sprintf(\"[豪大雨警報] %s 地區 %s 為 %f\", element.Name, \"一小時雨量\", element.Value))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\turl1 := baseURL + \"W-C0033-001\" + \"&authorizationkey=\" + authKey\n\txmldata1 := fetchXML(url1)\n\n\tv1 := Result1{}\n\tif xml.Unmarshal([]byte(xmldata1), &v1) != nil {\n\t\tlog.Printf(\"error: %v\", err)\n\t\treturn []string{}, \"\"\n\t}\n\n\tlocal := time.Now()\n\tlocation, err := time.LoadLocation(timeZone)\n\tif err == nil {\n\t\tlocal = local.In(location)\n\t}\n\n\tvar hazardmsgs = \"\"\n\tvar token = \"\"\n\tfor i, location := range v1.Location {\n\t\tif i == 0 {\n\t\t\ttoken = token + strconv.Itoa(location.Geocode)\n\t\t}\n\t\tif location.Hazards.Info.Phenomena != \"\" && location.Hazards.ValidTime.EndTime.After(local) {\n\t\t\tif targets != nil {\n\t\t\t\tfor _, name := range targets {\n\t\t\t\t\tif name == location.Name {\n\t\t\t\t\t\thazardmsgs = hazardmsgs + saveHazards(location)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\thazardmsgs = hazardmsgs + saveHazards(location)\n\t\t\t}\n\t\t}\n\t}\n\n\tmsgs = append(msgs, hazardmsgs)\n\n\treturn msgs, token\n}\n\nfunc saveHazards(location Location1) string {\n\tlog.Println(\"***************************************\")\n\tlog.Printf(\"【%s%s%s %s ~ %s】影響地區:\", location.Name, location.Hazards.Info.Phenomena, location.Hazards.Info.Significance, location.Hazards.ValidTime.StartTime.Format(\"01\/02 15:04\"), location.Hazards.ValidTime.EndTime.Format(\"01\/02 15:04\"))\n\tm := fmt.Sprintf(\"【%s%s%s %s ~ %s】影響地區:\", location.Name, location.Hazards.Info.Phenomena, location.Hazards.Info.Significance, location.Hazards.ValidTime.StartTime.Format(\"01\/02 15:04\"), location.Hazards.ValidTime.EndTime.Format(\"01\/02 15:04\"))\n\tfor _, str := range location.Hazards.HazardInfo.AffectedAreas {\n\t\tlog.Printf(\"%s \", str.Name)\n\t\tm = m + fmt.Sprintf(\"%s \", str.Name)\n\t}\n\tm = m + \"\\n\"\n\tlog.Println(\"\\n***************************************\")\n\n\treturn m\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"io\"\n)\n\nvar size = flag.Bool(\"s\", false, \"Prints the number of index objects\")\nvar hash = flag.String(\"h\", \"\", \"Hash of object to lookup\")\n\nfunc showSize(in io.ReadSeeker) {\n\tcount, err := GetTotalCount(in)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Fprintf(os.Stdout, \"%d\\n\", count)\n}\n\nfunc showHashObject(in io.ReadSeeker) {\n\tindex, err := GetObjectForHash(*hash, in)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Fprintf(os.Stdout, \"%s\\n\", index)\n}\n\nfunc showAllIndex(in io.ReadSeeker) {\n\tcount, err := GetTotalCount(in)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfor i := 0; i < int(count); i++ {\n\t\tindex, err := ReadPackIndexAt(i, in)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfmt.Fprintf(os.Stdout, \"%s\\n\", index)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tf, err := GetArgInputFile()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif *size {\n\t\tshowSize(f)\n\t\treturn\n\t}\n\tif len(*hash) > 0 {\n\t\tshowHashObject(f)\n\t\treturn\n\t}\n\tshowAllIndex(f)\n}\n\n<commit_msg>Change flag to -c<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"io\"\n)\n\nvar size = flag.Bool(\"c\", false, \"Prints the number of index objects\")\nvar hash = flag.String(\"h\", \"\", \"Hash of object to lookup\")\n\nfunc showSize(in io.ReadSeeker) {\n\tcount, err := GetTotalCount(in)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Fprintf(os.Stdout, \"%d\\n\", count)\n}\n\nfunc showHashObject(in io.ReadSeeker) {\n\tindex, err := GetObjectForHash(*hash, in)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Fprintf(os.Stdout, \"%s\\n\", index)\n}\n\nfunc showAllIndex(in io.ReadSeeker) {\n\tcount, err := GetTotalCount(in)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfor i := 0; i < int(count); i++ {\n\t\tindex, err := ReadPackIndexAt(i, in)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfmt.Fprintf(os.Stdout, \"%s\\n\", index)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tf, err := GetArgInputFile()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif *size {\n\t\tshowSize(f)\n\t\treturn\n\t}\n\tif len(*hash) > 0 {\n\t\tshowHashObject(f)\n\t\treturn\n\t}\n\tshowAllIndex(f)\n}\n\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage redis\n\nimport (\n\t\"errors\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"launchpad.net\/gocheck\"\n\t\"sync\/atomic\"\n)\n\n\/\/ CmdType represents a type of command, it may be a send or a do.\ntype CmdType byte\n\nconst (\n\t\/\/ CmdSend represents a send call in redigo.\n\tCmdSend CmdType = iota\n\t\/\/ CmdDo represents a do call in redigo.\n\tCmdDo\n)\n\nfunc ClearRedisKeys(keysPattern string, c *gocheck.C) {\n\tredisConn, err := redis.Dial(\"tcp\", \"127.0.0.1:6379\")\n\tc.Assert(err, gocheck.IsNil)\n\tdefer redisConn.Close()\n\tresult, err := redisConn.Do(\"KEYS\", keysPattern)\n\tc.Assert(err, gocheck.IsNil)\n\tkeys := result.([]interface{})\n\tfor _, key := range keys {\n\t\tkeyName := string(key.([]byte))\n\t\tredisConn.Do(\"DEL\", keyName)\n\t}\n}\n\n\/\/ RedisCommand is a command sent to the redis server.\ntype RedisCommand struct {\n\tCmd string\n\tArgs []interface{}\n\tType CmdType\n}\n\n\/\/ FakeRedisConn is a fake implementation of redis.Conn.\n\/\/\n\/\/ It's useful for mocks only. You may use it with a pool:\n\/\/\n\/\/ fakePool := redis.NewPool(func() (redis.Conn, error) {\n\/\/ return &FakeRedisConn{}, nil\n\/\/ }, 10)\n\/\/ conn := fakePool.Get()\ntype FakeRedisConn struct {\n\tCmds []RedisCommand\n\tclosed int32\n}\n\n\/\/ Close closes the connection.\nfunc (c *FakeRedisConn) Close() error {\n\tif !atomic.CompareAndSwapInt32(&c.closed, 0, 1) {\n\t\treturn errors.New(\"connection already closed\")\n\t}\n\treturn nil\n}\n\n\/\/ Err returns the last error.\nfunc (c *FakeRedisConn) Err() error {\n\treturn nil\n}\n\n\/\/ Do executes a do command, storing the arguments in the internal slice of\n\/\/ commands. It doesn't return anything.\nfunc (c *FakeRedisConn) Do(cmd string, args ...interface{}) (interface{}, error) {\n\treturn nil, c.cmd(CmdDo, cmd, args)\n}\n\n\/\/ Send executes a send command, storing the arguments in the internal slice of\n\/\/ commands. It doesn't return anything.\nfunc (c *FakeRedisConn) Send(cmd string, args ...interface{}) error {\n\treturn c.cmd(CmdSend, cmd, args)\n}\n\nfunc (c *FakeRedisConn) cmd(tp CmdType, cmd string, args []interface{}) error {\n\tif cmd != \"\" {\n\t\tc.Cmds = append(c.Cmds, RedisCommand{Cmd: cmd, Args: args, Type: tp})\n\t}\n\treturn nil\n}\n\n\/\/ Flush does not do anything in the fake connection.\nfunc (*FakeRedisConn) Flush() error {\n\treturn nil\n}\n\n\/\/ Receive does not do anything in the fake connection.\nfunc (*FakeRedisConn) Receive() (interface{}, error) {\n\treturn nil, nil\n}\n\n\/\/ GetCommands return the list of commands of the given type.\nfunc (c *FakeRedisConn) GetCommands(tp CmdType) []RedisCommand {\n\tvar cmds []RedisCommand\n\tfor _, cmd := range c.Cmds {\n\t\tif cmd.Type == tp {\n\t\t\tcmds = append(cmds, cmd)\n\t\t}\n\t}\n\treturn cmds\n}\n\n\/\/ FailingFakeRedisConn is a fake connection that fails to execute commands\n\/\/ (via Do and\/or Send).\ntype FailingFakeRedisConn struct {\n\tFakeRedisConn\n}\n\nfunc (c *FailingFakeRedisConn) Do(cmd string, args ...interface{}) (interface{}, error) {\n\treturn nil, errors.New(\"I can't do that.\")\n}\n\nfunc (c *FailingFakeRedisConn) Send(cmd string, args ...interface{}) error {\n\treturn errors.New(\"I can't do that.\")\n}\n\n\/\/ ResultCommandRedisConn is a fake connection that returns a result in the Do\n\/\/ command.\ntype ResultCommandRedisConn struct {\n\t*FakeRedisConn\n\tReply map[string]interface{}\n\tDefaultReply interface{}\n}\n\n\/\/ Do returns the result for a command as specified by the Reply map. If no\n\/\/ command is provided, it will return DefaultReply.\nfunc (c *ResultCommandRedisConn) Do(cmd string, args ...interface{}) (interface{}, error) {\n\tc.FakeRedisConn.Do(cmd, args...)\n\tif c.Reply == nil {\n\t\treturn c.DefaultReply, nil\n\t}\n\tif reply := c.Reply[cmd]; reply != nil {\n\t\treturn reply, nil\n\t}\n\treturn c.DefaultReply, nil\n}\n<commit_msg>testing\/redis: goimports<commit_after>\/\/ Copyright 2014 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage redis\n\nimport (\n\t\"errors\"\n\t\"sync\/atomic\"\n\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"launchpad.net\/gocheck\"\n)\n\n\/\/ CmdType represents a type of command, it may be a send or a do.\ntype CmdType byte\n\nconst (\n\t\/\/ CmdSend represents a send call in redigo.\n\tCmdSend CmdType = iota\n\t\/\/ CmdDo represents a do call in redigo.\n\tCmdDo\n)\n\nfunc ClearRedisKeys(keysPattern string, c *gocheck.C) {\n\tredisConn, err := redis.Dial(\"tcp\", \"127.0.0.1:6379\")\n\tc.Assert(err, gocheck.IsNil)\n\tdefer redisConn.Close()\n\tresult, err := redisConn.Do(\"KEYS\", keysPattern)\n\tc.Assert(err, gocheck.IsNil)\n\tkeys := result.([]interface{})\n\tfor _, key := range keys {\n\t\tkeyName := string(key.([]byte))\n\t\tredisConn.Do(\"DEL\", keyName)\n\t}\n}\n\n\/\/ RedisCommand is a command sent to the redis server.\ntype RedisCommand struct {\n\tCmd string\n\tArgs []interface{}\n\tType CmdType\n}\n\n\/\/ FakeRedisConn is a fake implementation of redis.Conn.\n\/\/\n\/\/ It's useful for mocks only. You may use it with a pool:\n\/\/\n\/\/ fakePool := redis.NewPool(func() (redis.Conn, error) {\n\/\/ return &FakeRedisConn{}, nil\n\/\/ }, 10)\n\/\/ conn := fakePool.Get()\ntype FakeRedisConn struct {\n\tCmds []RedisCommand\n\tclosed int32\n}\n\n\/\/ Close closes the connection.\nfunc (c *FakeRedisConn) Close() error {\n\tif !atomic.CompareAndSwapInt32(&c.closed, 0, 1) {\n\t\treturn errors.New(\"connection already closed\")\n\t}\n\treturn nil\n}\n\n\/\/ Err returns the last error.\nfunc (c *FakeRedisConn) Err() error {\n\treturn nil\n}\n\n\/\/ Do executes a do command, storing the arguments in the internal slice of\n\/\/ commands. It doesn't return anything.\nfunc (c *FakeRedisConn) Do(cmd string, args ...interface{}) (interface{}, error) {\n\treturn nil, c.cmd(CmdDo, cmd, args)\n}\n\n\/\/ Send executes a send command, storing the arguments in the internal slice of\n\/\/ commands. It doesn't return anything.\nfunc (c *FakeRedisConn) Send(cmd string, args ...interface{}) error {\n\treturn c.cmd(CmdSend, cmd, args)\n}\n\nfunc (c *FakeRedisConn) cmd(tp CmdType, cmd string, args []interface{}) error {\n\tif cmd != \"\" {\n\t\tc.Cmds = append(c.Cmds, RedisCommand{Cmd: cmd, Args: args, Type: tp})\n\t}\n\treturn nil\n}\n\n\/\/ Flush does not do anything in the fake connection.\nfunc (*FakeRedisConn) Flush() error {\n\treturn nil\n}\n\n\/\/ Receive does not do anything in the fake connection.\nfunc (*FakeRedisConn) Receive() (interface{}, error) {\n\treturn nil, nil\n}\n\n\/\/ GetCommands return the list of commands of the given type.\nfunc (c *FakeRedisConn) GetCommands(tp CmdType) []RedisCommand {\n\tvar cmds []RedisCommand\n\tfor _, cmd := range c.Cmds {\n\t\tif cmd.Type == tp {\n\t\t\tcmds = append(cmds, cmd)\n\t\t}\n\t}\n\treturn cmds\n}\n\n\/\/ FailingFakeRedisConn is a fake connection that fails to execute commands\n\/\/ (via Do and\/or Send).\ntype FailingFakeRedisConn struct {\n\tFakeRedisConn\n}\n\nfunc (c *FailingFakeRedisConn) Do(cmd string, args ...interface{}) (interface{}, error) {\n\treturn nil, errors.New(\"I can't do that.\")\n}\n\nfunc (c *FailingFakeRedisConn) Send(cmd string, args ...interface{}) error {\n\treturn errors.New(\"I can't do that.\")\n}\n\n\/\/ ResultCommandRedisConn is a fake connection that returns a result in the Do\n\/\/ command.\ntype ResultCommandRedisConn struct {\n\t*FakeRedisConn\n\tReply map[string]interface{}\n\tDefaultReply interface{}\n}\n\n\/\/ Do returns the result for a command as specified by the Reply map. If no\n\/\/ command is provided, it will return DefaultReply.\nfunc (c *ResultCommandRedisConn) Do(cmd string, args ...interface{}) (interface{}, error) {\n\tc.FakeRedisConn.Do(cmd, args...)\n\tif c.Reply == nil {\n\t\treturn c.DefaultReply, nil\n\t}\n\tif reply := c.Reply[cmd]; reply != nil {\n\t\treturn reply, nil\n\t}\n\treturn c.DefaultReply, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package kubernetes\n\nimport (\n\t\"encoding\/json\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/micro\/go-micro\/v2\/runtime\"\n\t\"github.com\/micro\/go-micro\/v2\/util\/kubernetes\/api\"\n\t\"github.com\/micro\/go-micro\/v2\/util\/kubernetes\/client\"\n\t\"github.com\/micro\/go-micro\/v2\/util\/log\"\n)\n\ntype service struct {\n\t\/\/ service to manage\n\t*runtime.Service\n\t\/\/ Kubernetes service\n\tkservice *client.Service\n\t\/\/ Kubernetes deployment\n\tkdeploy *client.Deployment\n}\n\nfunc parseError(err error) *api.Status {\n\tstatus := new(api.Status)\n\tjson.Unmarshal([]byte(err.Error()), &status)\n\treturn status\n}\n\nfunc newService(s *runtime.Service, c runtime.CreateOptions) *service {\n\t\/\/ use pre-formatted name\/version\n\tname := client.Format(s.Name)\n\tversion := client.Format(s.Version)\n\n\tkservice := client.NewService(name, version, c.Type)\n\tkdeploy := client.NewDeployment(name, version, c.Type)\n\n\tlog.Debugf(\"newService Source: %v\", s.Source)\n\tif len(s.Source) > 0 {\n\t\tfor _, c := range kdeploy.Spec.Template.PodSpec.Containers {\n\t\t\tc.Image = s.Source\n\t\t\tc.Command = []string{name}\n\t\t}\n\t}\n\tlog.Debugf(\"kdeploy: %v\", kdeploy)\n\tlog.Debugf(\"options: %v\", c)\n\n\t\/\/ attach our values to the deployment; name, version, source\n\tkdeploy.Metadata.Annotations[\"name\"] = s.Name\n\tkdeploy.Metadata.Annotations[\"version\"] = s.Version\n\tkdeploy.Metadata.Annotations[\"source\"] = s.Source\n\n\t\/\/ associate owner:group to be later augmented\n\tkdeploy.Metadata.Annotations[\"owner\"] = \"micro\"\n\tkdeploy.Metadata.Annotations[\"group\"] = \"micro\"\n\n\t\/\/ set a build timestamp to the current time\n\tif kdeploy.Spec.Template.Metadata.Annotations == nil {\n\t\tkdeploy.Spec.Template.Metadata.Annotations = make(map[string]string)\n\t}\n\tkdeploy.Spec.Template.Metadata.Annotations[\"build\"] = time.Now().Format(time.RFC3339)\n\n\t\/\/ define the environment values used by the container\n\tenv := make([]client.EnvVar, 0, len(c.Env))\n\tfor _, evar := range c.Env {\n\t\tevarPair := strings.Split(evar, \"=\")\n\t\tenv = append(env, client.EnvVar{Name: evarPair[0], Value: evarPair[1]})\n\t}\n\n\t\/\/ if environment has been supplied update deployment default environment\n\tif len(env) > 0 {\n\t\tkdeploy.Spec.Template.PodSpec.Containers[0].Env = append(kdeploy.Spec.Template.PodSpec.Containers[0].Env, env...)\n\t}\n\n\t\/\/ specify the command to exec\n\tif len(c.Command) > 0 {\n\t\tkdeploy.Spec.Template.PodSpec.Containers[0].Command = c.Command\n\t}\n\n\treturn &service{\n\t\tService: s,\n\t\tkservice: kservice,\n\t\tkdeploy: kdeploy,\n\t}\n}\n\nfunc deploymentResource(d *client.Deployment) *client.Resource {\n\treturn &client.Resource{\n\t\tName: d.Metadata.Name,\n\t\tKind: \"deployment\",\n\t\tValue: d,\n\t}\n}\n\nfunc serviceResource(s *client.Service) *client.Resource {\n\treturn &client.Resource{\n\t\tName: s.Metadata.Name,\n\t\tKind: \"service\",\n\t\tValue: s,\n\t}\n}\n\n\/\/ Start starts the Kubernetes service. It creates new kubernetes deployment and service API objects\nfunc (s *service) Start(k client.Client) error {\n\t\/\/ create deployment first; if we fail, we dont create service\n\tif err := k.Create(deploymentResource(s.kdeploy)); err != nil {\n\t\tlog.Debugf(\"Runtime failed to create deployment: %v\", err)\n\t\ts.Status(\"error\", err)\n\t\tv := parseError(err)\n\t\tif v.Reason == \"AlreadyExists\" {\n\t\t\treturn runtime.ErrAlreadyExists\n\t\t}\n\t\treturn err\n\t}\n\t\/\/ create service now that the deployment has been created\n\tif err := k.Create(serviceResource(s.kservice)); err != nil {\n\t\tlog.Debugf(\"Runtime failed to create service: %v\", err)\n\t\ts.Status(\"error\", err)\n\t\tv := parseError(err)\n\t\tif v.Reason == \"AlreadyExists\" {\n\t\t\treturn runtime.ErrAlreadyExists\n\t\t}\n\t\treturn err\n\t}\n\n\ts.Status(\"started\", nil)\n\n\treturn nil\n}\n\nfunc (s *service) Stop(k client.Client) error {\n\t\/\/ first attempt to delete service\n\tif err := k.Delete(serviceResource(s.kservice)); err != nil {\n\t\tlog.Debugf(\"Runtime failed to delete service: %v\", err)\n\t\ts.Status(\"error\", err)\n\t\treturn err\n\t}\n\t\/\/ delete deployment once the service has been deleted\n\tif err := k.Delete(deploymentResource(s.kdeploy)); err != nil {\n\t\tlog.Debugf(\"Runtime failed to delete deployment: %v\", err)\n\t\ts.Status(\"error\", err)\n\t\treturn err\n\t}\n\n\ts.Status(\"stopped\", nil)\n\n\treturn nil\n}\n\nfunc (s *service) Update(k client.Client) error {\n\tif err := k.Update(deploymentResource(s.kdeploy)); err != nil {\n\t\tlog.Debugf(\"Runtime failed to update deployment: %v\", err)\n\t\ts.Status(\"error\", err)\n\t\treturn err\n\t}\n\tif err := k.Update(serviceResource(s.kservice)); err != nil {\n\t\tlog.Debugf(\"Runtime failed to update service: %v\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *service) Status(status string, err error) {\n\tif err == nil {\n\t\ts.Metadata[\"status\"] = status\n\t\treturn\n\t}\n\ts.Metadata[\"status\"] = \"error\"\n\ts.Metadata[\"error\"] = err.Error()\n}\n<commit_msg>Debugging<commit_after>package kubernetes\n\nimport (\n\t\"encoding\/json\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/micro\/go-micro\/v2\/runtime\"\n\t\"github.com\/micro\/go-micro\/v2\/util\/kubernetes\/api\"\n\t\"github.com\/micro\/go-micro\/v2\/util\/kubernetes\/client\"\n\t\"github.com\/micro\/go-micro\/v2\/util\/log\"\n)\n\ntype service struct {\n\t\/\/ service to manage\n\t*runtime.Service\n\t\/\/ Kubernetes service\n\tkservice *client.Service\n\t\/\/ Kubernetes deployment\n\tkdeploy *client.Deployment\n}\n\nfunc parseError(err error) *api.Status {\n\tstatus := new(api.Status)\n\tjson.Unmarshal([]byte(err.Error()), &status)\n\treturn status\n}\n\nfunc newService(s *runtime.Service, c runtime.CreateOptions) *service {\n\t\/\/ use pre-formatted name\/version\n\tname := client.Format(s.Name)\n\tversion := client.Format(s.Version)\n\n\tkservice := client.NewService(name, version, c.Type)\n\tkdeploy := client.NewDeployment(name, version, c.Type)\n\n\tif len(s.Source) > 0 {\n\t\tfor _, c := range kdeploy.Spec.Template.PodSpec.Containers {\n\t\t\tc.Image = s.Source\n\t\t\tc.Command = []string{name}\n\t\t\tlog.Debugf(\"Setting name to %v\", c.Command)\n\t\t}\n\t}\n\n\t\/\/ attach our values to the deployment; name, version, source\n\tkdeploy.Metadata.Annotations[\"name\"] = s.Name\n\tkdeploy.Metadata.Annotations[\"version\"] = s.Version\n\tkdeploy.Metadata.Annotations[\"source\"] = s.Source\n\n\t\/\/ associate owner:group to be later augmented\n\tkdeploy.Metadata.Annotations[\"owner\"] = \"micro\"\n\tkdeploy.Metadata.Annotations[\"group\"] = \"micro\"\n\n\t\/\/ set a build timestamp to the current time\n\tif kdeploy.Spec.Template.Metadata.Annotations == nil {\n\t\tkdeploy.Spec.Template.Metadata.Annotations = make(map[string]string)\n\t}\n\tkdeploy.Spec.Template.Metadata.Annotations[\"build\"] = time.Now().Format(time.RFC3339)\n\n\t\/\/ define the environment values used by the container\n\tenv := make([]client.EnvVar, 0, len(c.Env))\n\tfor _, evar := range c.Env {\n\t\tevarPair := strings.Split(evar, \"=\")\n\t\tenv = append(env, client.EnvVar{Name: evarPair[0], Value: evarPair[1]})\n\t}\n\n\t\/\/ if environment has been supplied update deployment default environment\n\tif len(env) > 0 {\n\t\tkdeploy.Spec.Template.PodSpec.Containers[0].Env = append(kdeploy.Spec.Template.PodSpec.Containers[0].Env, env...)\n\t}\n\n\t\/\/ specify the command to exec\n\tlog.Debug(\"c.Command: \", c.Command)\n\tif len(c.Command) > 0 {\n\t\tkdeploy.Spec.Template.PodSpec.Containers[0].Command = c.Command\n\t}\n\n\treturn &service{\n\t\tService: s,\n\t\tkservice: kservice,\n\t\tkdeploy: kdeploy,\n\t}\n}\n\nfunc deploymentResource(d *client.Deployment) *client.Resource {\n\treturn &client.Resource{\n\t\tName: d.Metadata.Name,\n\t\tKind: \"deployment\",\n\t\tValue: d,\n\t}\n}\n\nfunc serviceResource(s *client.Service) *client.Resource {\n\treturn &client.Resource{\n\t\tName: s.Metadata.Name,\n\t\tKind: \"service\",\n\t\tValue: s,\n\t}\n}\n\n\/\/ Start starts the Kubernetes service. It creates new kubernetes deployment and service API objects\nfunc (s *service) Start(k client.Client) error {\n\t\/\/ create deployment first; if we fail, we dont create service\n\tif err := k.Create(deploymentResource(s.kdeploy)); err != nil {\n\t\tlog.Debugf(\"Runtime failed to create deployment: %v\", err)\n\t\ts.Status(\"error\", err)\n\t\tv := parseError(err)\n\t\tif v.Reason == \"AlreadyExists\" {\n\t\t\treturn runtime.ErrAlreadyExists\n\t\t}\n\t\treturn err\n\t}\n\t\/\/ create service now that the deployment has been created\n\tif err := k.Create(serviceResource(s.kservice)); err != nil {\n\t\tlog.Debugf(\"Runtime failed to create service: %v\", err)\n\t\ts.Status(\"error\", err)\n\t\tv := parseError(err)\n\t\tif v.Reason == \"AlreadyExists\" {\n\t\t\treturn runtime.ErrAlreadyExists\n\t\t}\n\t\treturn err\n\t}\n\n\ts.Status(\"started\", nil)\n\n\treturn nil\n}\n\nfunc (s *service) Stop(k client.Client) error {\n\t\/\/ first attempt to delete service\n\tif err := k.Delete(serviceResource(s.kservice)); err != nil {\n\t\tlog.Debugf(\"Runtime failed to delete service: %v\", err)\n\t\ts.Status(\"error\", err)\n\t\treturn err\n\t}\n\t\/\/ delete deployment once the service has been deleted\n\tif err := k.Delete(deploymentResource(s.kdeploy)); err != nil {\n\t\tlog.Debugf(\"Runtime failed to delete deployment: %v\", err)\n\t\ts.Status(\"error\", err)\n\t\treturn err\n\t}\n\n\ts.Status(\"stopped\", nil)\n\n\treturn nil\n}\n\nfunc (s *service) Update(k client.Client) error {\n\tif err := k.Update(deploymentResource(s.kdeploy)); err != nil {\n\t\tlog.Debugf(\"Runtime failed to update deployment: %v\", err)\n\t\ts.Status(\"error\", err)\n\t\treturn err\n\t}\n\tif err := k.Update(serviceResource(s.kservice)); err != nil {\n\t\tlog.Debugf(\"Runtime failed to update service: %v\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *service) Status(status string, err error) {\n\tif err == nil {\n\t\ts.Metadata[\"status\"] = status\n\t\treturn\n\t}\n\ts.Metadata[\"status\"] = \"error\"\n\ts.Metadata[\"error\"] = err.Error()\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/gin-gonic\/gin\"\n)\n\n\/\/ Version of IronFunctions\nvar Version = \"0.0.40\"\n\nfunc handleVersion(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"version\": Version})\n}\n<commit_msg>functions: 0.0.41 release [skip ci]<commit_after>package server\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/gin-gonic\/gin\"\n)\n\n\/\/ Version of IronFunctions\nvar Version = \"0.0.41\"\n\nfunc handleVersion(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"version\": Version})\n}\n<|endoftext|>"} {"text":"<commit_before>package elasticthought\n\nimport (\n\t\"archive\/tar\"\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com\/couchbaselabs\/cbfs\/client\"\n\t\"github.com\/couchbaselabs\/logg\"\n)\n\n\/\/ Worker job that splits a dataset into training\/test set\ntype DatasetSplitter struct {\n\tConfiguration Configuration\n\tDataset Dataset\n}\n\n\/\/ Run this job\nfunc (d DatasetSplitter) Run() {\n\n\tswitch d.Dataset.isSplittable() {\n\tcase true:\n\t\td.SplitDatafile()\n\tdefault:\n\t\td.DownloadDatafiles()\n\t}\n\n}\n\nfunc (d DatasetSplitter) SplitDatafile() {\n\n\t\/\/ Find the datafile object associated with dataset\n\tdb := d.Configuration.DbConnection()\n\tdatafile, err := d.Dataset.GetSplittableDatafile(db)\n\tif err != nil {\n\t\terrMsg := fmt.Errorf(\"Error looking up datafile: %+v. Error: %v\", d.Dataset, err)\n\t\td.recordProcessingError(errMsg)\n\t\treturn\n\t}\n\n\t\/\/ Open the url -- content type should be application\/x-gzip\n\ttr, err := openTarGzStream(datafile.Url)\n\tif err != nil {\n\t\terrMsg := fmt.Errorf(\"Error opening tar.gz streams: %v\", err)\n\t\td.recordProcessingError(errMsg)\n\t\treturn\n\t}\n\n\t\/\/ Create pipes\n\tprTrain, pwTrain := io.Pipe()\n\tprTest, pwTest := io.Pipe()\n\n\t\/\/ Wrap in gzip writers\n\tpwGzTest := gzip.NewWriter(pwTest)\n\tpwGzTrain := gzip.NewWriter(pwTrain)\n\n\t\/\/ Create tar writers on the write end of the pipes\n\ttarWriterTesting := tar.NewWriter(pwGzTest)\n\ttarWriterTraining := tar.NewWriter(pwGzTrain)\n\n\t\/\/ Create a cbfs client\n\tcbfs, err := cbfsclient.New(d.Configuration.CbfsUrl)\n\toptions := cbfsclient.PutOptions{\n\t\tContentType: \"application\/x-gzip\",\n\t}\n\tif err != nil {\n\t\terrMsg := fmt.Errorf(\"Error creating cbfs client: %v\", err)\n\t\td.recordProcessingError(errMsg)\n\t\treturn\n\t}\n\n\t\/\/ Figure out where to store these on cbfs\n\tdestTraining := d.Dataset.TrainingArtifactPath()\n\tdestTesting := d.Dataset.TestingArtifactPath()\n\n\t\/\/ Spawn a goroutine that will read from tar.gz reader coming from url data\n\t\/\/ and write to the training and test tar writers (which are on write ends of pipe)\n\ttransformDoneChan := make(chan error, 1)\n\tgo func() {\n\n\t\t\/\/ Must close _underlying_ piped writers, or the piped readers will\n\t\t\/\/ never get an EOF. Closing just the tar writers that wrap the underlying\n\t\t\/\/ piped writers is not enough.\n\t\tdefer pwTest.Close()\n\t\tdefer pwTrain.Close()\n\t\tdefer pwGzTest.Close()\n\t\tdefer pwGzTrain.Close()\n\n\t\tlogg.LogTo(\"DATASET_SPLITTER\", \"Calling transform\")\n\t\terr = d.transform(tr, tarWriterTraining, tarWriterTesting)\n\t\tif err != nil {\n\t\t\terrMsg := fmt.Errorf(\"Error transforming tar stream: %v\", err)\n\t\t\tlogg.LogError(errMsg)\n\t\t\ttransformDoneChan <- errMsg\n\t\t\treturn\n\t\t}\n\n\t\ttransformDoneChan <- nil\n\n\t}()\n\n\t\/\/ Spawn goroutines to read off the read ends of the pipe and store in cbfs\n\tcbfsTrainDoneChan := make(chan error, 1)\n\tcbfsTestDoneChan := make(chan error, 1)\n\tgo func() {\n\t\tif err := cbfs.Put(\"\", destTesting, prTest, options); err != nil {\n\t\t\terrMsg := fmt.Errorf(\"Error writing %v to cbfs: %v\", destTesting, err)\n\t\t\tlogg.LogError(errMsg)\n\t\t\tcbfsTestDoneChan <- errMsg\n\t\t\treturn\n\n\t\t}\n\t\tlogg.LogTo(\"DATASET_SPLITTER\", \"Wrote %v to cbfs\", destTesting)\n\t\tcbfsTestDoneChan <- nil\n\t}()\n\tgo func() {\n\t\tif err := cbfs.Put(\"\", destTraining, prTrain, options); err != nil {\n\t\t\terrMsg := fmt.Errorf(\"Error writing %v to cbfs: %v\", destTraining, err)\n\t\t\tlogg.LogError(errMsg)\n\t\t\tcbfsTrainDoneChan <- errMsg\n\t\t\treturn\n\t\t}\n\t\tlogg.LogTo(\"DATASET_SPLITTER\", \"Wrote %v to cbfs\", destTraining)\n\t\tcbfsTrainDoneChan <- nil\n\t}()\n\n\t\/\/ Wait for the results from all the goroutines\n\tcbfsTrainResult := <-cbfsTrainDoneChan\n\tcbfsTestResult := <-cbfsTestDoneChan\n\ttransformResult := <-transformDoneChan\n\n\t\/\/ If any results had an error, log it and return\n\tresults := []error{transformResult, cbfsTestResult, cbfsTrainResult}\n\tfor _, result := range results {\n\t\tif result != nil {\n\t\t\tlogg.LogTo(\"DATASET_SPLITTER\", \"Setting dataset to failed: %v\", result)\n\t\t\td.Dataset.Failed(db, fmt.Errorf(\"%v\", result))\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Update the state of the dataset to be finished\n\td.Dataset.FinishedSuccessfully(db)\n\n}\n\nfunc (d DatasetSplitter) DownloadDatafiles() {\n\n\t\/\/ Create a cbfs client\n\tcbfs, err := cbfsclient.New(d.Configuration.CbfsUrl)\n\toptions := cbfsclient.PutOptions{\n\t\tContentType: \"application\/x-gzip\",\n\t}\n\tif err != nil {\n\t\terrMsg := fmt.Errorf(\"Error creating cbfs client: %v\", err)\n\t\td.recordProcessingError(errMsg)\n\t\treturn\n\t}\n\n\tdb := d.Configuration.DbConnection()\n\n\tsource2destEntries := []struct {\n\t\tUrl string\n\t\tDestPath string\n\t}{\n\t\t{\n\t\t\tUrl: d.Dataset.GetTrainingDatafileUrl(db),\n\t\t\tDestPath: d.Dataset.TrainingArtifactPath(),\n\t\t},\n\t\t{\n\t\t\tUrl: d.Dataset.GetTestingDatafileUrl(db),\n\t\t\tDestPath: d.Dataset.TestingArtifactPath(),\n\t\t},\n\t}\n\n\tfor _, source2destEntry := range source2destEntries {\n\n\t\t\/\/ open tar.gz stream to source\n\t\t\/\/ Open the url -- content type should be application\/x-gzip\n\n\t\tfunc() {\n\n\t\t\tresp, err := http.Get(source2destEntry.Url)\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tif err != nil {\n\t\t\t\terrMsg := fmt.Errorf(\"Error opening stream to: %v. Err %v\", source2destEntry.Url, err)\n\t\t\t\td.recordProcessingError(errMsg)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgzipReader, err := gzip.NewReader(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\terrMsg := fmt.Errorf(\"Error opening gz stream to: %v. Err %v\", source2destEntry.Url, err)\n\t\t\t\td.recordProcessingError(errMsg)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttr := tar.NewReader(gzipReader)\n\n\t\t\tif err := cbfs.Put(\"\", source2destEntry.DestPath, tr, options); err != nil {\n\t\t\t\terrMsg := fmt.Errorf(\"Error writing %v to cbfs: %v\", source2destEntry.DestPath, err)\n\t\t\t\td.recordProcessingError(errMsg)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlogg.LogTo(\"DATASET_SPLITTER\", \"Wrote %v to cbfs\", source2destEntry.DestPath)\n\n\t\t}()\n\n\t}\n\n\t\/\/ Update the state of the dataset to be finished\n\td.Dataset.FinishedSuccessfully(db)\n\n}\n\n\/\/ Codereview: de-dupe\nfunc (d DatasetSplitter) recordProcessingError(err error) {\n\tlogg.LogError(err)\n\tdb := d.Configuration.DbConnection()\n\tif err := d.Dataset.Failed(db, err); err != nil {\n\t\terrMsg := fmt.Errorf(\"Error setting dataset as failed: %v\", err)\n\t\tlogg.LogError(errMsg)\n\t}\n}\n\n\/\/ Read from source tar stream and write training and test to given tar writers\nfunc (d DatasetSplitter) transform(source *tar.Reader, train, test *tar.Writer) error {\n\n\tsplitter := d.splitter(train, test)\n\n\tfor {\n\t\thdr, err := source.Next()\n\n\t\tif err == io.EOF {\n\t\t\t\/\/ end of tar archive\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ttw := splitter(hdr.Name)\n\n\t\tif err := tw.WriteHeader(hdr); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = io.Copy(tw, source)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\t\/\/ close writers\n\tif err := train.Close(); err != nil {\n\t\terrMsg := fmt.Errorf(\"Error closing tar writer: %v\", err)\n\t\tlogg.LogError(errMsg)\n\t\treturn err\n\t}\n\tif err := test.Close(); err != nil {\n\t\terrMsg := fmt.Errorf(\"Error closing tar reader: %v\", err)\n\t\tlogg.LogError(errMsg)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (d DatasetSplitter) splitter(train, test *tar.Writer) func(string) *tar.Writer {\n\n\ttrainingRatio := int(d.Dataset.TrainingDataset.SplitPercentage * 100)\n\ttestRatio := int(d.Dataset.TestDataset.SplitPercentage * 100)\n\n\tratio := [2]int{trainingRatio, testRatio}\n\n\tdircounts := make(map[string][2]int)\n\treturn func(file string) *tar.Writer {\n\t\tdir := path.Dir(file)\n\t\tcounts := dircounts[dir]\n\t\tcount0ratio1 := counts[0] * ratio[1]\n\t\tcount1ratio0 := counts[1] * ratio[0]\n\t\tif count0ratio1 <= count1ratio0 {\n\t\t\tcounts[0]++\n\t\t\tdircounts[dir] = counts\n\t\t\treturn train\n\t\t} else {\n\t\t\tcounts[1]++\n\t\t\tdircounts[dir] = counts\n\t\t\treturn test\n\t\t}\n\t}\n}\n\n\/\/ Validate that the source tar stream conforms to expected specs\nfunc (d DatasetSplitter) validate(source *tar.Reader) (bool, error) {\n\n\t\/\/ validation rules:\n\t\/\/ 1. has at least 2 files\n\t\/\/ 2. the depth of each file is 2 (folder\/filename.xxx)\n\n\tnumFiles := 0\n\tfor {\n\t\thdr, err := source.Next()\n\t\tif err == io.EOF {\n\t\t\t\/\/ end of tar archive\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tnumFiles += 1\n\n\t\tpathComponents := strings.Split(hdr.Name, \"\/\")\n\t\tif len(pathComponents) != 2 {\n\t\t\treturn false, fmt.Errorf(\"Path does not have 2 components: %v\", hdr.Name)\n\t\t}\n\n\t}\n\n\tif numFiles < 2 {\n\t\treturn false, fmt.Errorf(\"Archive must contain at least 2 files\")\n\t}\n\n\treturn true, nil\n}\n<commit_msg>fix dataset splitter copy datafiles<commit_after>package elasticthought\n\nimport (\n\t\"archive\/tar\"\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com\/couchbaselabs\/cbfs\/client\"\n\t\"github.com\/couchbaselabs\/logg\"\n)\n\n\/\/ Worker job that splits a dataset into training\/test set\ntype DatasetSplitter struct {\n\tConfiguration Configuration\n\tDataset Dataset\n}\n\n\/\/ Run this job\nfunc (d DatasetSplitter) Run() {\n\n\tswitch d.Dataset.isSplittable() {\n\tcase true:\n\t\td.SplitDatafile()\n\tdefault:\n\t\td.DownloadDatafiles()\n\t}\n\n}\n\nfunc (d DatasetSplitter) SplitDatafile() {\n\n\t\/\/ Find the datafile object associated with dataset\n\tdb := d.Configuration.DbConnection()\n\tdatafile, err := d.Dataset.GetSplittableDatafile(db)\n\tif err != nil {\n\t\terrMsg := fmt.Errorf(\"Error looking up datafile: %+v. Error: %v\", d.Dataset, err)\n\t\td.recordProcessingError(errMsg)\n\t\treturn\n\t}\n\n\t\/\/ Open the url -- content type should be application\/x-gzip\n\ttr, err := openTarGzStream(datafile.Url)\n\tif err != nil {\n\t\terrMsg := fmt.Errorf(\"Error opening tar.gz streams: %v\", err)\n\t\td.recordProcessingError(errMsg)\n\t\treturn\n\t}\n\n\t\/\/ Create pipes\n\tprTrain, pwTrain := io.Pipe()\n\tprTest, pwTest := io.Pipe()\n\n\t\/\/ Wrap in gzip writers\n\tpwGzTest := gzip.NewWriter(pwTest)\n\tpwGzTrain := gzip.NewWriter(pwTrain)\n\n\t\/\/ Create tar writers on the write end of the pipes\n\ttarWriterTesting := tar.NewWriter(pwGzTest)\n\ttarWriterTraining := tar.NewWriter(pwGzTrain)\n\n\t\/\/ Create a cbfs client\n\tcbfs, err := cbfsclient.New(d.Configuration.CbfsUrl)\n\toptions := cbfsclient.PutOptions{\n\t\tContentType: \"application\/x-gzip\",\n\t}\n\tif err != nil {\n\t\terrMsg := fmt.Errorf(\"Error creating cbfs client: %v\", err)\n\t\td.recordProcessingError(errMsg)\n\t\treturn\n\t}\n\n\t\/\/ Figure out where to store these on cbfs\n\tdestTraining := d.Dataset.TrainingArtifactPath()\n\tdestTesting := d.Dataset.TestingArtifactPath()\n\n\t\/\/ Spawn a goroutine that will read from tar.gz reader coming from url data\n\t\/\/ and write to the training and test tar writers (which are on write ends of pipe)\n\ttransformDoneChan := make(chan error, 1)\n\tgo func() {\n\n\t\t\/\/ Must close _underlying_ piped writers, or the piped readers will\n\t\t\/\/ never get an EOF. Closing just the tar writers that wrap the underlying\n\t\t\/\/ piped writers is not enough.\n\t\tdefer pwTest.Close()\n\t\tdefer pwTrain.Close()\n\t\tdefer pwGzTest.Close()\n\t\tdefer pwGzTrain.Close()\n\n\t\tlogg.LogTo(\"DATASET_SPLITTER\", \"Calling transform\")\n\t\terr = d.transform(tr, tarWriterTraining, tarWriterTesting)\n\t\tif err != nil {\n\t\t\terrMsg := fmt.Errorf(\"Error transforming tar stream: %v\", err)\n\t\t\tlogg.LogError(errMsg)\n\t\t\ttransformDoneChan <- errMsg\n\t\t\treturn\n\t\t}\n\n\t\ttransformDoneChan <- nil\n\n\t}()\n\n\t\/\/ Spawn goroutines to read off the read ends of the pipe and store in cbfs\n\tcbfsTrainDoneChan := make(chan error, 1)\n\tcbfsTestDoneChan := make(chan error, 1)\n\tgo func() {\n\t\tif err := cbfs.Put(\"\", destTesting, prTest, options); err != nil {\n\t\t\terrMsg := fmt.Errorf(\"Error writing %v to cbfs: %v\", destTesting, err)\n\t\t\tlogg.LogError(errMsg)\n\t\t\tcbfsTestDoneChan <- errMsg\n\t\t\treturn\n\n\t\t}\n\t\tlogg.LogTo(\"DATASET_SPLITTER\", \"Wrote %v to cbfs\", destTesting)\n\t\tcbfsTestDoneChan <- nil\n\t}()\n\tgo func() {\n\t\tif err := cbfs.Put(\"\", destTraining, prTrain, options); err != nil {\n\t\t\terrMsg := fmt.Errorf(\"Error writing %v to cbfs: %v\", destTraining, err)\n\t\t\tlogg.LogError(errMsg)\n\t\t\tcbfsTrainDoneChan <- errMsg\n\t\t\treturn\n\t\t}\n\t\tlogg.LogTo(\"DATASET_SPLITTER\", \"Wrote %v to cbfs\", destTraining)\n\t\tcbfsTrainDoneChan <- nil\n\t}()\n\n\t\/\/ Wait for the results from all the goroutines\n\tcbfsTrainResult := <-cbfsTrainDoneChan\n\tcbfsTestResult := <-cbfsTestDoneChan\n\ttransformResult := <-transformDoneChan\n\n\t\/\/ If any results had an error, log it and return\n\tresults := []error{transformResult, cbfsTestResult, cbfsTrainResult}\n\tfor _, result := range results {\n\t\tif result != nil {\n\t\t\tlogg.LogTo(\"DATASET_SPLITTER\", \"Setting dataset to failed: %v\", result)\n\t\t\td.Dataset.Failed(db, fmt.Errorf(\"%v\", result))\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Update the state of the dataset to be finished\n\td.Dataset.FinishedSuccessfully(db)\n\n}\n\nfunc (d DatasetSplitter) DownloadDatafiles() {\n\n\t\/\/ Create a cbfs client\n\tcbfs, err := cbfsclient.New(d.Configuration.CbfsUrl)\n\toptions := cbfsclient.PutOptions{\n\t\tContentType: \"application\/x-gzip\",\n\t}\n\tif err != nil {\n\t\terrMsg := fmt.Errorf(\"Error creating cbfs client: %v\", err)\n\t\td.recordProcessingError(errMsg)\n\t\treturn\n\t}\n\n\tdb := d.Configuration.DbConnection()\n\n\tsource2destEntries := []struct {\n\t\tUrl string\n\t\tDestPath string\n\t}{\n\t\t{\n\t\t\tUrl: d.Dataset.GetTrainingDatafileUrl(db),\n\t\t\tDestPath: d.Dataset.TrainingArtifactPath(),\n\t\t},\n\t\t{\n\t\t\tUrl: d.Dataset.GetTestingDatafileUrl(db),\n\t\t\tDestPath: d.Dataset.TestingArtifactPath(),\n\t\t},\n\t}\n\n\tfor _, source2destEntry := range source2destEntries {\n\n\t\t\/\/ open tar.gz stream to source\n\t\t\/\/ Open the url -- content type should be application\/x-gzip\n\n\t\tfunc() {\n\n\t\t\tresp, err := http.Get(source2destEntry.Url)\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tif err != nil {\n\t\t\t\terrMsg := fmt.Errorf(\"Error opening stream to: %v. Err %v\", source2destEntry.Url, err)\n\t\t\t\td.recordProcessingError(errMsg)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif err := cbfs.Put(\"\", source2destEntry.DestPath, resp.Body, options); err != nil {\n\t\t\t\terrMsg := fmt.Errorf(\"Error writing %v to cbfs: %v\", source2destEntry.DestPath, err)\n\t\t\t\td.recordProcessingError(errMsg)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlogg.LogTo(\"DATASET_SPLITTER\", \"Wrote %v to cbfs\", source2destEntry.DestPath)\n\n\t\t}()\n\n\t}\n\n\t\/\/ Update the state of the dataset to be finished\n\td.Dataset.FinishedSuccessfully(db)\n\n}\n\n\/\/ Codereview: de-dupe\nfunc (d DatasetSplitter) recordProcessingError(err error) {\n\tlogg.LogError(err)\n\tdb := d.Configuration.DbConnection()\n\tif err := d.Dataset.Failed(db, err); err != nil {\n\t\terrMsg := fmt.Errorf(\"Error setting dataset as failed: %v\", err)\n\t\tlogg.LogError(errMsg)\n\t}\n}\n\n\/\/ Read from source tar stream and write training and test to given tar writers\nfunc (d DatasetSplitter) transform(source *tar.Reader, train, test *tar.Writer) error {\n\n\tsplitter := d.splitter(train, test)\n\n\tfor {\n\t\thdr, err := source.Next()\n\n\t\tif err == io.EOF {\n\t\t\t\/\/ end of tar archive\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ttw := splitter(hdr.Name)\n\n\t\tif err := tw.WriteHeader(hdr); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = io.Copy(tw, source)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\t\/\/ close writers\n\tif err := train.Close(); err != nil {\n\t\terrMsg := fmt.Errorf(\"Error closing tar writer: %v\", err)\n\t\tlogg.LogError(errMsg)\n\t\treturn err\n\t}\n\tif err := test.Close(); err != nil {\n\t\terrMsg := fmt.Errorf(\"Error closing tar reader: %v\", err)\n\t\tlogg.LogError(errMsg)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (d DatasetSplitter) splitter(train, test *tar.Writer) func(string) *tar.Writer {\n\n\ttrainingRatio := int(d.Dataset.TrainingDataset.SplitPercentage * 100)\n\ttestRatio := int(d.Dataset.TestDataset.SplitPercentage * 100)\n\n\tratio := [2]int{trainingRatio, testRatio}\n\n\tdircounts := make(map[string][2]int)\n\treturn func(file string) *tar.Writer {\n\t\tdir := path.Dir(file)\n\t\tcounts := dircounts[dir]\n\t\tcount0ratio1 := counts[0] * ratio[1]\n\t\tcount1ratio0 := counts[1] * ratio[0]\n\t\tif count0ratio1 <= count1ratio0 {\n\t\t\tcounts[0]++\n\t\t\tdircounts[dir] = counts\n\t\t\treturn train\n\t\t} else {\n\t\t\tcounts[1]++\n\t\t\tdircounts[dir] = counts\n\t\t\treturn test\n\t\t}\n\t}\n}\n\n\/\/ Validate that the source tar stream conforms to expected specs\nfunc (d DatasetSplitter) validate(source *tar.Reader) (bool, error) {\n\n\t\/\/ validation rules:\n\t\/\/ 1. has at least 2 files\n\t\/\/ 2. the depth of each file is 2 (folder\/filename.xxx)\n\n\tnumFiles := 0\n\tfor {\n\t\thdr, err := source.Next()\n\t\tif err == io.EOF {\n\t\t\t\/\/ end of tar archive\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tnumFiles += 1\n\n\t\tpathComponents := strings.Split(hdr.Name, \"\/\")\n\t\tif len(pathComponents) != 2 {\n\t\t\treturn false, fmt.Errorf(\"Path does not have 2 components: %v\", hdr.Name)\n\t\t}\n\n\t}\n\n\tif numFiles < 2 {\n\t\treturn false, fmt.Errorf(\"Archive must contain at least 2 files\")\n\t}\n\n\treturn true, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/**\n * @file receiver.go\n * @author Mikhail Klementyev jollheef<AT>riseup.net\n * @license GNU AGPLv3\n * @date September, 2015\n * @brief routine for receive flags from commands\n *\n * Provide tcp server for receive flags. After receive flag daemon perform\n * validate flag, check flag round and write result to db.\n *\/\n\npackage receiver\n\nimport (\n\t\"bufio\"\n\t\"crypto\/rsa\"\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"strings\"\n\t\"time\"\n)\n\nimport (\n\t\"github.com\/jollheef\/tin_foil_hat\/steward\"\n\t\"github.com\/jollheef\/tin_foil_hat\/vexillary\"\n)\n\nconst (\n\tgreetingMsg string = \"IBST.PSU CTF Flag Receiver\\nInput flag: \"\n\tinvalidFlagMsg string = \"Invalid flag\\n\"\n\talreadyCapturedMsg string = \"Flag already captured\\n\"\n\tcapturedMsg string = \"Captured!\\n\"\n\tinternalErrorMsg string = \"Internal error\\n\"\n\tflagDoesNotExistMsg string = \"Flag does not exist\\n\"\n\tflagExpiredMsg string = \"Flag expired\\n\"\n\tinvalidTeamMsg string = \"Team does not exist\\n\"\n\tattemptsLimitMsg string = \"Attack attempts limit exceeded\\n\"\n\tflagYoursMsg string = \"Flag belongs to the attacking team\\n\"\n\tserviceNotUpMsg string = \"The attacking team service is not up\\n\"\n)\n\nfunc parseAddr(addr string) (subnetNo int, err error) {\n\n\t_, err = fmt.Sscanf(strings.Split(addr, \".\")[2], \"%d\", &subnetNo)\n\n\treturn\n}\n\nfunc teamByAddr(db *sql.DB, addr string) (team steward.Team, err error) {\n\n\tsubnetNo, err := parseAddr(addr)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tteams, err := steward.GetTeams(db)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor i := 0; i < len(teams); i++ {\n\n\t\tteam = teams[i]\n\n\t\tteamSubnetNo, err := parseAddr(team.Subnet)\n\t\tif err != nil {\n\t\t\treturn team, err\n\t\t}\n\n\t\tif teamSubnetNo == subnetNo {\n\t\t\treturn team, err\n\t\t}\n\t}\n\n\terr = errors.New(\"team not found\")\n\n\treturn\n}\n\nfunc handler(conn net.Conn, db *sql.DB, priv *rsa.PrivateKey) {\n\n\taddr := conn.RemoteAddr().String()\n\n\tdefer conn.Close()\n\n\tfmt.Fprint(conn, greetingMsg)\n\n\tflag, err := bufio.NewReader(conn).ReadString('\\n')\n\tif err != nil {\n\t\tlog.Println(\"Read error:\", err)\n\t}\n\n\tflag = strings.Trim(flag, \"\\n\")\n\n\tlog.Printf(\"\\tGet flag %s from %s\", flag, addr)\n\n\tvalid, err := vexillary.ValidFlag(flag, priv.PublicKey)\n\tif err != nil {\n\t\tlog.Println(\"\\tValidate flag failed:\", err)\n\t}\n\tif !valid {\n\t\tfmt.Fprint(conn, invalidFlagMsg)\n\t\treturn\n\t}\n\n\texist, err := steward.FlagExist(db, flag)\n\tif err != nil {\n\t\tlog.Println(\"\\tExist flag check failed:\", err)\n\t\tfmt.Fprint(conn, internalErrorMsg)\n\t\treturn\n\t}\n\tif !exist {\n\t\tfmt.Fprint(conn, flagDoesNotExistMsg)\n\t\treturn\n\t}\n\n\tflg, err := steward.GetFlagInfo(db, flag)\n\tif err != nil {\n\t\tlog.Println(\"\\tGet flag info failed:\", err)\n\t\tfmt.Fprint(conn, internalErrorMsg)\n\t\treturn\n\t}\n\n\tcaptured, err := steward.AlreadyCaptured(db, flg.ID)\n\tif err != nil {\n\t\tlog.Println(\"\\tAlready captured check failed:\", err)\n\t\tfmt.Fprint(conn, internalErrorMsg)\n\t\treturn\n\t}\n\tif captured {\n\t\tfmt.Fprint(conn, alreadyCapturedMsg)\n\t\treturn\n\t}\n\n\tteam, err := teamByAddr(db, addr)\n\tif err != nil {\n\t\tlog.Println(\"\\tGet team by ip failed:\", err)\n\t\tfmt.Fprint(conn, invalidTeamMsg)\n\t\treturn\n\t}\n\n\tif flg.TeamID == team.ID {\n\t\tlog.Printf(\"\\tTeam %s try to send their flag\", team.Name)\n\t\tfmt.Fprint(conn, flagYoursMsg)\n\t\treturn\n\t}\n\n\thalfStatus := steward.Status{flg.Round, team.ID, flg.ServiceID,\n\t\tsteward.StatusUnknown}\n\tstate, err := steward.GetState(db, halfStatus)\n\n\tif state != steward.StatusUP {\n\t\tlog.Printf(\"\\t%s service not ok, cannot capture\", team.Name)\n\t\tfmt.Fprint(conn, serviceNotUpMsg)\n\t\treturn\n\t}\n\n\tround, err := steward.CurrentRound(db)\n\n\tif round.ID != flg.Round {\n\t\tlog.Printf(\"\\t%s try to send flag from past round\", team.Name)\n\t\tfmt.Fprint(conn, flagExpiredMsg)\n\t\treturn\n\t}\n\n\troundEndTime := round.StartTime.Add(round.Len)\n\n\tif time.Now().After(roundEndTime) {\n\t\tlog.Printf(\"\\t%s try to send flag from finished round\", team.Name)\n\t\tfmt.Fprint(conn, flagExpiredMsg)\n\t\treturn\n\t}\n\n\terr = steward.CaptureFlag(db, flg.ID, team.ID)\n\tif err != nil {\n\t\tlog.Println(\"\\tCapture flag failed:\", err)\n\t\tfmt.Fprint(conn, internalErrorMsg)\n\t\treturn\n\t}\n\n\tfmt.Fprint(conn, capturedMsg)\n}\n\n\/\/ FlagReceiver starts flag receiver\nfunc FlagReceiver(db *sql.DB, priv *rsa.PrivateKey, addr string,\n\ttimeout, socketTimeout time.Duration) {\n\n\tlog.Println(\"Launching receiver at\", addr, \"...\")\n\n\tconnects := make(map[string]time.Time) \/\/ { ip : last_connect_time }\n\n\tlistener, _ := net.Listen(\"tcp\", addr)\n\n\tfor {\n\t\tconn, _ := listener.Accept()\n\n\t\taddr := conn.RemoteAddr().String()\n\n\t\tlog.Printf(\"Connection accepted from %s\", addr)\n\n\t\tip, _, err := net.SplitHostPort(addr)\n\t\tif err != nil {\n\t\t\tlog.Println(\"\\tCannot split remote addr:\", err)\n\t\t\tfmt.Fprint(conn, internalErrorMsg)\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\tif time.Now().Before(connects[ip].Add(timeout)) {\n\t\t\tlog.Println(\"\\tToo fast connects by\", ip)\n\t\t\tfmt.Fprint(conn, attemptsLimitMsg)\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\terr = conn.SetDeadline(time.Now().Add(socketTimeout))\n\t\tif err != nil {\n\t\t\tlog.Println(\"Set deadline fail:\", err)\n\t\t\tfmt.Fprint(conn, internalErrorMsg)\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\tgo handler(conn, db, priv)\n\n\t\tconnects[ip] = time.Now()\n\t}\n}\n<commit_msg>Do not panic on parse error<commit_after>\/**\n * @file receiver.go\n * @author Mikhail Klementyev jollheef<AT>riseup.net\n * @license GNU AGPLv3\n * @date September, 2015\n * @brief routine for receive flags from commands\n *\n * Provide tcp server for receive flags. After receive flag daemon perform\n * validate flag, check flag round and write result to db.\n *\/\n\npackage receiver\n\nimport (\n\t\"bufio\"\n\t\"crypto\/rsa\"\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"strings\"\n\t\"time\"\n)\n\nimport (\n\t\"github.com\/jollheef\/tin_foil_hat\/steward\"\n\t\"github.com\/jollheef\/tin_foil_hat\/vexillary\"\n)\n\nconst (\n\tgreetingMsg string = \"IBST.PSU CTF Flag Receiver\\nInput flag: \"\n\tinvalidFlagMsg string = \"Invalid flag\\n\"\n\talreadyCapturedMsg string = \"Flag already captured\\n\"\n\tcapturedMsg string = \"Captured!\\n\"\n\tinternalErrorMsg string = \"Internal error\\n\"\n\tflagDoesNotExistMsg string = \"Flag does not exist\\n\"\n\tflagExpiredMsg string = \"Flag expired\\n\"\n\tinvalidTeamMsg string = \"Team does not exist\\n\"\n\tattemptsLimitMsg string = \"Attack attempts limit exceeded\\n\"\n\tflagYoursMsg string = \"Flag belongs to the attacking team\\n\"\n\tserviceNotUpMsg string = \"The attacking team service is not up\\n\"\n)\n\nfunc parseAddr(addr string) (subnetNo int, err error) {\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = errors.New(\"Cannot parse '\" + addr + \"'\")\n\t\t}\n\t}()\n\t_, err = fmt.Sscanf(strings.Split(addr, \".\")[2], \"%d\", &subnetNo)\n\n\treturn\n}\n\nfunc teamByAddr(db *sql.DB, addr string) (team steward.Team, err error) {\n\n\tsubnetNo, err := parseAddr(addr)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tteams, err := steward.GetTeams(db)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor i := 0; i < len(teams); i++ {\n\n\t\tteam = teams[i]\n\n\t\tteamSubnetNo, err := parseAddr(team.Subnet)\n\t\tif err != nil {\n\t\t\treturn team, err\n\t\t}\n\n\t\tif teamSubnetNo == subnetNo {\n\t\t\treturn team, err\n\t\t}\n\t}\n\n\terr = errors.New(\"team not found\")\n\n\treturn\n}\n\nfunc handler(conn net.Conn, db *sql.DB, priv *rsa.PrivateKey) {\n\n\taddr := conn.RemoteAddr().String()\n\n\tdefer conn.Close()\n\n\tfmt.Fprint(conn, greetingMsg)\n\n\tflag, err := bufio.NewReader(conn).ReadString('\\n')\n\tif err != nil {\n\t\tlog.Println(\"Read error:\", err)\n\t}\n\n\tflag = strings.Trim(flag, \"\\n\")\n\n\tlog.Printf(\"\\tGet flag %s from %s\", flag, addr)\n\n\tvalid, err := vexillary.ValidFlag(flag, priv.PublicKey)\n\tif err != nil {\n\t\tlog.Println(\"\\tValidate flag failed:\", err)\n\t}\n\tif !valid {\n\t\tfmt.Fprint(conn, invalidFlagMsg)\n\t\treturn\n\t}\n\n\texist, err := steward.FlagExist(db, flag)\n\tif err != nil {\n\t\tlog.Println(\"\\tExist flag check failed:\", err)\n\t\tfmt.Fprint(conn, internalErrorMsg)\n\t\treturn\n\t}\n\tif !exist {\n\t\tfmt.Fprint(conn, flagDoesNotExistMsg)\n\t\treturn\n\t}\n\n\tflg, err := steward.GetFlagInfo(db, flag)\n\tif err != nil {\n\t\tlog.Println(\"\\tGet flag info failed:\", err)\n\t\tfmt.Fprint(conn, internalErrorMsg)\n\t\treturn\n\t}\n\n\tcaptured, err := steward.AlreadyCaptured(db, flg.ID)\n\tif err != nil {\n\t\tlog.Println(\"\\tAlready captured check failed:\", err)\n\t\tfmt.Fprint(conn, internalErrorMsg)\n\t\treturn\n\t}\n\tif captured {\n\t\tfmt.Fprint(conn, alreadyCapturedMsg)\n\t\treturn\n\t}\n\n\tteam, err := teamByAddr(db, addr)\n\tif err != nil {\n\t\tlog.Println(\"\\tGet team by ip failed:\", err)\n\t\tfmt.Fprint(conn, invalidTeamMsg)\n\t\treturn\n\t}\n\n\tif flg.TeamID == team.ID {\n\t\tlog.Printf(\"\\tTeam %s try to send their flag\", team.Name)\n\t\tfmt.Fprint(conn, flagYoursMsg)\n\t\treturn\n\t}\n\n\thalfStatus := steward.Status{flg.Round, team.ID, flg.ServiceID,\n\t\tsteward.StatusUnknown}\n\tstate, err := steward.GetState(db, halfStatus)\n\n\tif state != steward.StatusUP {\n\t\tlog.Printf(\"\\t%s service not ok, cannot capture\", team.Name)\n\t\tfmt.Fprint(conn, serviceNotUpMsg)\n\t\treturn\n\t}\n\n\tround, err := steward.CurrentRound(db)\n\n\tif round.ID != flg.Round {\n\t\tlog.Printf(\"\\t%s try to send flag from past round\", team.Name)\n\t\tfmt.Fprint(conn, flagExpiredMsg)\n\t\treturn\n\t}\n\n\troundEndTime := round.StartTime.Add(round.Len)\n\n\tif time.Now().After(roundEndTime) {\n\t\tlog.Printf(\"\\t%s try to send flag from finished round\", team.Name)\n\t\tfmt.Fprint(conn, flagExpiredMsg)\n\t\treturn\n\t}\n\n\terr = steward.CaptureFlag(db, flg.ID, team.ID)\n\tif err != nil {\n\t\tlog.Println(\"\\tCapture flag failed:\", err)\n\t\tfmt.Fprint(conn, internalErrorMsg)\n\t\treturn\n\t}\n\n\tfmt.Fprint(conn, capturedMsg)\n}\n\n\/\/ FlagReceiver starts flag receiver\nfunc FlagReceiver(db *sql.DB, priv *rsa.PrivateKey, addr string,\n\ttimeout, socketTimeout time.Duration) {\n\n\tlog.Println(\"Launching receiver at\", addr, \"...\")\n\n\tconnects := make(map[string]time.Time) \/\/ { ip : last_connect_time }\n\n\tlistener, _ := net.Listen(\"tcp\", addr)\n\n\tfor {\n\t\tconn, _ := listener.Accept()\n\n\t\taddr := conn.RemoteAddr().String()\n\n\t\tlog.Printf(\"Connection accepted from %s\", addr)\n\n\t\tip, _, err := net.SplitHostPort(addr)\n\t\tif err != nil {\n\t\t\tlog.Println(\"\\tCannot split remote addr:\", err)\n\t\t\tfmt.Fprint(conn, internalErrorMsg)\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\tif time.Now().Before(connects[ip].Add(timeout)) {\n\t\t\tlog.Println(\"\\tToo fast connects by\", ip)\n\t\t\tfmt.Fprint(conn, attemptsLimitMsg)\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\terr = conn.SetDeadline(time.Now().Add(socketTimeout))\n\t\tif err != nil {\n\t\t\tlog.Println(\"Set deadline fail:\", err)\n\t\t\tfmt.Fprint(conn, internalErrorMsg)\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\tgo handler(conn, db, priv)\n\n\t\tconnects[ip] = time.Now()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package bridge\n\nimport (\n\t\"fmt\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/bitrise-io\/go-utils\/cmdex\"\n)\n\n\/\/ \/\/ BitriseRunOrTrigger ...\n\/\/ func BitriseRunOrTrigger(inventoryPth, configPth, workflowNameOrTriggerPattern string, isUseTrigger bool) error {\n\/\/ \tlogLevel := log.GetLevel().String()\n\/\/ bitriseCommandToUse := \"run\"\n\/\/ if isUseTrigger {\n\/\/ \tbitriseCommandToUse = \"trigger\"\n\/\/ }\n\/\/ \targs := []string{\"--loglevel\", logLevel, bitriseCommandToUse, workflowNameOrTriggerPattern, \"--path\", configPth}\n\/\/ \tif inventoryPth != \"\" {\n\/\/ \t\targs = append(args, \"--inventory\", inventoryPth)\n\/\/ \t}\n\/\/ \treturn cmdex.RunCommand(\"bitrise\", args...)\n\/\/ }\n\n\/\/ CMDBridgeDoBitriseRunOrTrigger ...\nfunc CMDBridgeDoBitriseRunOrTrigger(inventoryPth, configPth, workflowNameOrTriggerPattern string, isUseTrigger bool, workdirPath string) error {\n\tlogLevel := log.GetLevel().String()\n\n\tbitriseCommandToUse := \"run\"\n\tif isUseTrigger {\n\t\tbitriseCommandToUse = \"trigger\"\n\t}\n\n\tparams := fmt.Sprintf(\"bitrise --loglevel %s %s %s --path %s\", logLevel, bitriseCommandToUse, workflowNameOrTriggerPattern, configPth)\n\tif inventoryPth != \"\" {\n\t\tparams = params + fmt.Sprintf(\" --inventory %s\", inventoryPth)\n\t}\n\n\targs := []string{\"-workdir\", workdirPath, \"-do\", params}\n\tlog.Infof(\"args: %#v\", args)\n\n\treturn cmdex.RunCommand(\"cmd-bridge\", args...)\n}\n<commit_msg>removed debug log<commit_after>package bridge\n\nimport (\n\t\"fmt\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/bitrise-io\/go-utils\/cmdex\"\n)\n\n\/\/ \/\/ BitriseRunOrTrigger ...\n\/\/ func BitriseRunOrTrigger(inventoryPth, configPth, workflowNameOrTriggerPattern string, isUseTrigger bool) error {\n\/\/ \tlogLevel := log.GetLevel().String()\n\/\/ bitriseCommandToUse := \"run\"\n\/\/ if isUseTrigger {\n\/\/ \tbitriseCommandToUse = \"trigger\"\n\/\/ }\n\/\/ \targs := []string{\"--loglevel\", logLevel, bitriseCommandToUse, workflowNameOrTriggerPattern, \"--path\", configPth}\n\/\/ \tif inventoryPth != \"\" {\n\/\/ \t\targs = append(args, \"--inventory\", inventoryPth)\n\/\/ \t}\n\/\/ \treturn cmdex.RunCommand(\"bitrise\", args...)\n\/\/ }\n\n\/\/ CMDBridgeDoBitriseRunOrTrigger ...\nfunc CMDBridgeDoBitriseRunOrTrigger(inventoryPth, configPth, workflowNameOrTriggerPattern string, isUseTrigger bool, workdirPath string) error {\n\tlogLevel := log.GetLevel().String()\n\n\tbitriseCommandToUse := \"run\"\n\tif isUseTrigger {\n\t\tbitriseCommandToUse = \"trigger\"\n\t}\n\n\tparams := fmt.Sprintf(\"bitrise --loglevel %s %s %s --path %s\", logLevel, bitriseCommandToUse, workflowNameOrTriggerPattern, configPth)\n\tif inventoryPth != \"\" {\n\t\tparams = params + fmt.Sprintf(\" --inventory %s\", inventoryPth)\n\t}\n\n\targs := []string{\"-workdir\", workdirPath, \"-do\", params}\n\n\treturn cmdex.RunCommand(\"cmd-bridge\", args...)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/dgrijalva\/jwt-go\"\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/labstack\/echo\/engine\/standard\"\n\t\"github.com\/labstack\/echo\/middleware\"\n)\n\nfunc login(c echo.Context) error {\n\tusername := c.FormValue(\"username\")\n\tpassword := c.FormValue(\"password\")\n\n\tif username == \"jon\" && password == \"shhh!\" {\n\t\t\/\/ Create token\n\t\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t\t\/\/ Set claims\n\t\ttoken.Claims[\"name\"] = \"Jon Snow\"\n\t\ttoken.Claims[\"admin\"] = true\n\t\ttoken.Claims[\"exp\"] = time.Now().Add(time.Hour * 72).Unix()\n\n\t\t\/\/ Generate encoded token and send it as response.\n\t\tt, err := token.SignedString([]byte(\"secret\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn c.JSON(http.StatusOK, map[string]string{\n\t\t\t\"token\": t,\n\t\t})\n\t}\n\n\treturn echo.ErrUnauthorized\n}\n\nfunc accessible(c echo.Context) error {\n\treturn c.String(http.StatusOK, \"Accessible\")\n}\n\nfunc restricted(c echo.Context) error {\n\tuser := c.Get(\"user\").(*jwt.Token)\n\tname := user.Claims[\"name\"].(string)\n\treturn c.String(http.StatusOK, \"Welcome \"+name+\"!\")\n}\n\nfunc main() {\n\te := echo.New()\n\n\t\/\/ Middleware\n\te.Use(middleware.Logger())\n\te.Use(middleware.Recover())\n\n\t\/\/ Login route\n\te.POST(\"\/login\", login)\n\n\t\/\/ Unauthenticated route\n\te.GET(\"\/\", accessible)\n\n\t\/\/ Restricted group\n\tr := e.Group(\"\/restricted\")\n\tr.Use(middleware.JWT([]byte(\"secret\")))\n\tr.GET(\"\", restricted)\n\n\te.Run(standard.New(\":1323\"))\n}\n<commit_msg>Add JWT lib v3 compatibility<commit_after>package main\n\nimport (\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/dgrijalva\/jwt-go\"\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/labstack\/echo\/engine\/standard\"\n\t\"github.com\/labstack\/echo\/middleware\"\n)\n\nfunc login(c echo.Context) error {\n\tusername := c.FormValue(\"username\")\n\tpassword := c.FormValue(\"password\")\n\n\tif username == \"jon\" && password == \"shhh!\" {\n\t\t\/\/ Create token\n\t\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t\t\/\/ Set claims\n\t\tclaims := token.Claims.(jwt.MapClaims)\n\t\tclaims[\"name\"] = \"Jon Snow\"\n\t\tclaims[\"admin\"] = true\n\t\tclaims[\"exp\"] = time.Now().Add(time.Hour * 72).Unix()\n\n\t\t\/\/ Generate encoded token and send it as response.\n\t\tt, err := token.SignedString([]byte(\"secret\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn c.JSON(http.StatusOK, map[string]string{\n\t\t\t\"token\": t,\n\t\t})\n\t}\n\n\treturn echo.ErrUnauthorized\n}\n\nfunc accessible(c echo.Context) error {\n\treturn c.String(http.StatusOK, \"Accessible\")\n}\n\nfunc restricted(c echo.Context) error {\n\tuser := c.Get(\"user\").(*jwt.Token)\n\tclaims := user.Claims.(jwt.MapClaims)\n\tname := claims[\"name\"].(string)\n\treturn c.String(http.StatusOK, \"Welcome \"+name+\"!\")\n}\n\nfunc main() {\n\te := echo.New()\n\n\t\/\/ Middleware\n\te.Use(middleware.Logger())\n\te.Use(middleware.Recover())\n\n\t\/\/ Login route\n\te.POST(\"\/login\", login)\n\n\t\/\/ Unauthenticated route\n\te.GET(\"\/\", accessible)\n\n\t\/\/ Restricted group\n\tr := e.Group(\"\/restricted\")\n\tr.Use(middleware.JWT([]byte(\"secret\")))\n\tr.GET(\"\", restricted)\n\n\te.Run(standard.New(\":1323\"))\n}\n<|endoftext|>"} {"text":"<commit_before>package cache\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"github.com\/astaxie\/beego\/cache\"\n\t\"github.com\/beego\/redigo\/redis\"\n\t\"time\"\n)\n\nvar (\n\t\/\/ the collection name of redis for cache adapter.\n\tDefaultKey string = \"cache\"\n)\n\n\/\/ Redis cache adapter.\ntype RedisCache struct {\n\tp *redis.Pool \/\/ redis connection pool\n\tconninfo string\n\tkey string\n}\n\n\/\/ create new redis cache with default collection name.\nfunc NewRedisCache() *RedisCache {\n\treturn &RedisCache{key: DefaultKey}\n}\n\n\/\/ actually do the redis cmds\nfunc (rc *RedisCache) do(commandName string, args ...interface{}) (reply interface{}, err error) {\n\tc := rc.p.Get()\n\tdefer c.Close()\n\n\treturn c.Do(commandName, args...)\n}\n\n\/\/ Get cache from redis.\nfunc (rc *RedisCache) Get(key string) interface{} {\n\tv, err := rc.do(\"GET\", rc.key+key)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn v\n}\n\n\/\/ put cache to redis.\nfunc (rc *RedisCache) Put(key string, val interface{}, timeout int64) error {\n\t_, err := rc.do(\"SETEX\", rc.key+key, timeout, val)\n\treturn err\n}\n\n\/\/ delete cache in redis.\nfunc (rc *RedisCache) Delete(key string) error {\n\t_, err := rc.do(\"DEL\", rc.key+key)\n\treturn err\n}\n\n\/\/ check cache exist in redis.\nfunc (rc *RedisCache) IsExist(key string) bool {\n\tv, err := redis.Bool(rc.do(\"EXISTS\", rc.key+key))\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn v\n}\n\n\/\/ increase counter in redis.\nfunc (rc *RedisCache) Incr(key string) error {\n\t_, err := redis.Bool(rc.do(\"INCRBY\", rc.key+key, 1))\n\treturn err\n}\n\n\/\/ decrease counter in redis.\nfunc (rc *RedisCache) Decr(key string) error {\n\t_, err := redis.Bool(rc.do(\"INCRBY\", rc.key+key, -1))\n\treturn err\n}\n\n\/\/ clean all cache in redis. delete this redis collection.\nfunc (rc *RedisCache) ClearAll() error {\n\t_, err := rc.do(\"FLUSHDB\")\n\treturn err\n}\n\n\/\/ start redis cache adapter.\n\/\/ config is like {\"key\":\"collection key\",\"conn\":\"connection info\"}\n\/\/ the cache item in redis are stored forever,\n\/\/ so no gc operation.\nfunc (rc *RedisCache) StartAndGC(config string) error {\n\tvar cf map[string]string\n\tjson.Unmarshal([]byte(config), &cf)\n\n\tif _, ok := cf[\"key\"]; !ok {\n\t\tcf[\"key\"] = DefaultKey\n\t}\n\n\tif _, ok := cf[\"conn\"]; !ok {\n\t\treturn errors.New(\"config has no conn key\")\n\t}\n\n\trc.key = cf[\"key\"]\n\trc.conninfo = cf[\"conn\"]\n\trc.connectInit()\n\n\tc := rc.p.Get()\n\tdefer c.Close()\n\tif err := c.Err(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ connect to redis.\nfunc (rc *RedisCache) connectInit() {\n\t\/\/ initialize a new pool\n\trc.p = &redis.Pool{\n\t\tMaxIdle: 3,\n\t\tIdleTimeout: 180 * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tc, err := redis.Dial(\"tcp\", rc.conninfo)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, nil\n\t\t},\n\t}\n}\n\nfunc init() {\n\tcache.Register(\"redis\", NewRedisCache())\n}\n<commit_msg>Redis Cache 默认前缀改成cache:, 增加可读性<commit_after>package cache\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"github.com\/astaxie\/beego\/cache\"\n\t\"github.com\/beego\/redigo\/redis\"\n\t\"time\"\n)\n\nvar (\n\t\/\/ the collection name of redis for cache adapter.\n\tDefaultKey string = \"cache:\"\n)\n\n\/\/ Redis cache adapter.\ntype RedisCache struct {\n\tp *redis.Pool \/\/ redis connection pool\n\tconninfo string\n\tkey string\n}\n\n\/\/ create new redis cache with default collection name.\nfunc NewRedisCache() *RedisCache {\n\treturn &RedisCache{key: DefaultKey}\n}\n\n\/\/ actually do the redis cmds\nfunc (rc *RedisCache) do(commandName string, args ...interface{}) (reply interface{}, err error) {\n\tc := rc.p.Get()\n\tdefer c.Close()\n\n\treturn c.Do(commandName, args...)\n}\n\n\/\/ Get cache from redis.\nfunc (rc *RedisCache) Get(key string) interface{} {\n\tv, err := rc.do(\"GET\", rc.key+key)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn v\n}\n\n\/\/ put cache to redis.\nfunc (rc *RedisCache) Put(key string, val interface{}, timeout int64) error {\n\t_, err := rc.do(\"SETEX\", rc.key+key, timeout, val)\n\treturn err\n}\n\n\/\/ delete cache in redis.\nfunc (rc *RedisCache) Delete(key string) error {\n\t_, err := rc.do(\"DEL\", rc.key+key)\n\treturn err\n}\n\n\/\/ check cache exist in redis.\nfunc (rc *RedisCache) IsExist(key string) bool {\n\tv, err := redis.Bool(rc.do(\"EXISTS\", rc.key+key))\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn v\n}\n\n\/\/ increase counter in redis.\nfunc (rc *RedisCache) Incr(key string) error {\n\t_, err := redis.Bool(rc.do(\"INCRBY\", rc.key+key, 1))\n\treturn err\n}\n\n\/\/ decrease counter in redis.\nfunc (rc *RedisCache) Decr(key string) error {\n\t_, err := redis.Bool(rc.do(\"INCRBY\", rc.key+key, -1))\n\treturn err\n}\n\n\/\/ clean all cache in redis. delete this redis collection.\nfunc (rc *RedisCache) ClearAll() error {\n\t_, err := rc.do(\"FLUSHDB\")\n\treturn err\n}\n\n\/\/ start redis cache adapter.\n\/\/ config is like {\"key\":\"collection key\",\"conn\":\"connection info\"}\n\/\/ the cache item in redis are stored forever,\n\/\/ so no gc operation.\nfunc (rc *RedisCache) StartAndGC(config string) error {\n\tvar cf map[string]string\n\tjson.Unmarshal([]byte(config), &cf)\n\n\tif _, ok := cf[\"key\"]; !ok {\n\t\tcf[\"key\"] = DefaultKey\n\t}\n\n\tif _, ok := cf[\"conn\"]; !ok {\n\t\treturn errors.New(\"config has no conn key\")\n\t}\n\n\trc.key = cf[\"key\"]\n\trc.conninfo = cf[\"conn\"]\n\trc.connectInit()\n\n\tc := rc.p.Get()\n\tdefer c.Close()\n\tif err := c.Err(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ connect to redis.\nfunc (rc *RedisCache) connectInit() {\n\t\/\/ initialize a new pool\n\trc.p = &redis.Pool{\n\t\tMaxIdle: 3,\n\t\tIdleTimeout: 180 * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tc, err := redis.Dial(\"tcp\", rc.conninfo)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, nil\n\t\t},\n\t}\n}\n\nfunc init() {\n\tcache.Register(\"redis\", NewRedisCache())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Copyright 2014, Sander van Harmelen\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nfunc (cg *ChefGuard) executeChecks() (int, error) {\n\tif cfg.Tests.Foodcritic != \"\" {\n\t\tif errCode, err := runFoodcritic(cg.ChefOrg, cg.CookbookPath); err != nil {\n\t\t\tif errCode == http.StatusInternalServerError || !cg.continueAfterFailedCheck(\"foodcritic\") {\n\t\t\t\treturn errCode, err\n\t\t\t}\n\t\t}\n\t}\n\tif cfg.Tests.Rubocop != \"\" {\n\t\tif errCode, err := runRubocop(cg.CookbookPath); err != nil {\n\t\t\tif errCode == http.StatusInternalServerError || !cg.continueAfterFailedCheck(\"rubocop\") {\n\t\t\t\treturn errCode, err\n\t\t\t}\n\t\t}\n\t}\n\treturn 0, nil\n}\n\nfunc (cg *ChefGuard) continueAfterFailedCheck(check string) bool {\n\tWARNING.Printf(\"%s errors when uploading cookbook '%s' for '%s'\\n\", strings.Title(check), cg.Cookbook.Name, cg.User)\n\tif getEffectiveConfig(\"Mode\", cg.ChefOrg).(string) == \"permissive\" && cg.ForcedUpload {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc runFoodcritic(org, cookbookPath string) (int, error) {\n\targs := getFoodcriticArgs(org, cookbookPath)\n\tcmd := exec.Command(cfg.Tests.Foodcritic, args...)\n\n\tcmd.Env = os.Environ()\n\tcmd.Env = append(cmd.Env, \"RUBY_THREAD_VM_STACK_SIZE=2097152\")\n\n\toutput, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tif exitError, ok := err.(*exec.ExitError); ok {\n\t\t\tif exitError.Sys().(syscall.WaitStatus).ExitStatus() == 3 {\n\t\t\t\terrText := strings.TrimSpace(strings.Replace(string(output), fmt.Sprintf(\"%s\/\", cookbookPath), \"\", -1))\n\t\t\t\treturn http.StatusPreconditionFailed, fmt.Errorf(\"\\n=== Foodcritic errors found ===\\n%s\\n===============================\\n\", errText)\n\t\t\t}\n\t\t}\n\t\treturn http.StatusInternalServerError, fmt.Errorf(\"Failed to execute foodcritic tests: %s - %s\", output, err)\n\t}\n\n\t\/\/ This is still needed for Foodcritic > v9.x.x\n\tif strings.TrimSpace(string(output)) != \"\" {\n\t\terrText := strings.TrimSpace(strings.Replace(string(output), fmt.Sprintf(\"%s\/\", cookbookPath), \"\", -1))\n\t\treturn http.StatusPreconditionFailed, fmt.Errorf(\"\\n=== Foodcritic errors found ===\\n%s\\n===============================\\n\", errText)\n\t}\n\n\treturn 0, nil\n}\n\nfunc getFoodcriticArgs(org, cookbookPath string) []string {\n\texcludes := cfg.Default.ExcludeFCs\n\tcustExcludes := getEffectiveConfig(\"ExcludeFCs\", org)\n\tif excludes != custExcludes {\n\t\texcludes = fmt.Sprintf(\"%s,%s\", excludes, custExcludes)\n\t}\n\texcls := strings.Split(excludes, \",\")\n\targs := []string{}\n\tfor _, excl := range excls {\n\t\targs = append(args, fmt.Sprintf(\"-t ~%s\", strings.TrimSpace(excl)))\n\t}\n\tif cfg.Default.IncludeFCs != \"\" {\n\t\targs = append(args, \"-I\", cfg.Default.IncludeFCs)\n\t}\n\treturn append(args, \"-B\", cookbookPath)\n}\n\nfunc runRubocop(cookbookPath string) (int, error) {\n\tcmd := exec.Command(cfg.Tests.Rubocop, cookbookPath)\n\tcmd.Env = []string{\"HOME=\" + cfg.Default.Tempdir}\n\toutput, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tif strings.Contains(string(output), \"offense\") {\n\t\t\terrText := strings.TrimSpace(strings.Replace(string(output), fmt.Sprintf(\"%s\/\", cookbookPath), \"\", -1))\n\t\t\treturn http.StatusPreconditionFailed, fmt.Errorf(\"\\n=== Rubocop errors found ===\\n%s\\n============================\\n\", errText)\n\t\t}\n\t\treturn http.StatusInternalServerError, fmt.Errorf(\"Failed to execute rubocop tests: %s - %s\", output, err)\n\t}\n\treturn 0, nil\n}\n<commit_msg>Maintain backwards compatibility (#120)<commit_after>\/\/\n\/\/ Copyright 2014, Sander van Harmelen\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nfunc (cg *ChefGuard) executeChecks() (int, error) {\n\tif cfg.Tests.Foodcritic != \"\" {\n\t\tif errCode, err := runFoodcritic(cg.ChefOrg, cg.CookbookPath); err != nil {\n\t\t\tif errCode == http.StatusInternalServerError || !cg.continueAfterFailedCheck(\"foodcritic\") {\n\t\t\t\treturn errCode, err\n\t\t\t}\n\t\t}\n\t}\n\tif cfg.Tests.Rubocop != \"\" {\n\t\tif errCode, err := runRubocop(cg.CookbookPath); err != nil {\n\t\t\tif errCode == http.StatusInternalServerError || !cg.continueAfterFailedCheck(\"rubocop\") {\n\t\t\t\treturn errCode, err\n\t\t\t}\n\t\t}\n\t}\n\treturn 0, nil\n}\n\nfunc (cg *ChefGuard) continueAfterFailedCheck(check string) bool {\n\tWARNING.Printf(\"%s errors when uploading cookbook '%s' for '%s'\\n\", strings.Title(check), cg.Cookbook.Name, cg.User)\n\tif getEffectiveConfig(\"Mode\", cg.ChefOrg).(string) == \"permissive\" && cg.ForcedUpload {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc runFoodcritic(org, cookbookPath string) (int, error) {\n\targs := getFoodcriticArgs(org, cookbookPath)\n\tcmd := exec.Command(cfg.Tests.Foodcritic, args...)\n\n\tcmd.Env = os.Environ()\n\tcmd.Env = append(cmd.Env, \"RUBY_THREAD_VM_STACK_SIZE=2097152\")\n\n\toutput, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tif exitError, ok := err.(*exec.ExitError); ok {\n\t\t\tif exitError.Sys().(syscall.WaitStatus).ExitStatus() == 3 {\n\t\t\t\terrText := strings.TrimSpace(strings.Replace(string(output), fmt.Sprintf(\"%s\/\", cookbookPath), \"\", -1))\n\t\t\t\treturn http.StatusPreconditionFailed, fmt.Errorf(\"\\n=== Foodcritic errors found ===\\n%s\\n===============================\\n\", errText)\n\t\t\t}\n\t\t}\n\n\t\treturn http.StatusInternalServerError, fmt.Errorf(\"Failed to execute \\\"foodcritic %s\\\": %s - %s\", strings.Join(cmd.Args, \" \"), output, err)\n\t}\n\n\t\/\/ This is still needed for Foodcritic > v9.x.x\n\tif strings.TrimSpace(string(output)) != \"\" {\n\t\terrText := strings.TrimSpace(strings.Replace(string(output), fmt.Sprintf(\"%s\/\", cookbookPath), \"\", -1))\n\t\treturn http.StatusPreconditionFailed, fmt.Errorf(\"\\n=== Foodcritic errors found ===\\n%s\\n===============================\\n\", errText)\n\t}\n\n\treturn 0, nil\n}\n\nfunc getFoodcriticArgs(org, cookbookPath string) []string {\n\texcludes := cfg.Default.ExcludeFCs\n\tcustExcludes := getEffectiveConfig(\"ExcludeFCs\", org)\n\tif excludes != custExcludes {\n\t\texcludes = fmt.Sprintf(\"%s,%s\", excludes, custExcludes)\n\t}\n\targs := []string{}\n\tif excludes != \"\" {\n\t\targs = append(args, \"--tags\", \"~\"+strings.Replace(excludes, \",\", \",~\", -1))\n\t}\n\tif cfg.Default.IncludeFCs != \"\" {\n\t\targs = append(args, \"--include\", cfg.Default.IncludeFCs)\n\t}\n\treturn append(args, \"--no-progress\", \"--cookbook-path\", cookbookPath)\n}\n\nfunc runRubocop(cookbookPath string) (int, error) {\n\tcmd := exec.Command(cfg.Tests.Rubocop, cookbookPath)\n\tcmd.Env = []string{\"HOME=\" + cfg.Default.Tempdir}\n\toutput, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tif strings.Contains(string(output), \"offense\") {\n\t\t\terrText := strings.TrimSpace(strings.Replace(string(output), fmt.Sprintf(\"%s\/\", cookbookPath), \"\", -1))\n\t\t\treturn http.StatusPreconditionFailed, fmt.Errorf(\"\\n=== Rubocop errors found ===\\n%s\\n============================\\n\", errText)\n\t\t}\n\t\treturn http.StatusInternalServerError, fmt.Errorf(\"Failed to execute \\\"rubocop %s\\\": %s - %s\", cookbookPath, output, err)\n\t}\n\treturn 0, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package article\n\nimport \"time\"\n\nconst (\n\tEnglish Language = \"ENG\"\n\tJapanese Language = \"JAP\"\n)\n\ntype Language string\n\ntype Article struct {\n\tId string `json:\"id\"`\n\tTitle map[Language]string `json:\"title\"`\n\tBody map[Language]string `json:\"body,omitempty\"`\n\tDatePosted time.Time `json:\"posted\"`\n\tDateEdited time.Time `json:\"edited,omitempty\"`\n}\n\ntype ArticleList []*Article\n\n\/\/ Make slices of article pointers sortable by posting time.\nfunc (l ArticleList) Len() int {\n\treturn len(l)\n}\n\nfunc (l ArticleList) Less(i, j int) bool {\n\treturn l[i].DatePosted.Unix() < l[j].DatePosted.Unix()\n}\n\nfunc (l ArticleList) Swap(i, j int) {\n\tl[i], l[j] = l[j], l[i]\n}\n<commit_msg>Serve japanese language name in japanese.<commit_after>package article\n\nimport \"time\"\n\nconst (\n\tEnglish Language = \"ENG\"\n\tJapanese Language = \"日本語\"\n)\n\ntype Language string\n\ntype Article struct {\n\tId string `json:\"id\"`\n\tTitle map[Language]string `json:\"title\"`\n\tBody map[Language]string `json:\"body,omitempty\"`\n\tDatePosted time.Time `json:\"posted\"`\n\tDateEdited time.Time `json:\"edited,omitempty\"`\n}\n\ntype ArticleList []*Article\n\n\/\/ Make slices of article pointers sortable by posting time.\nfunc (l ArticleList) Len() int {\n\treturn len(l)\n}\n\nfunc (l ArticleList) Less(i, j int) bool {\n\treturn l[i].DatePosted.Unix() < l[j].DatePosted.Unix()\n}\n\nfunc (l ArticleList) Swap(i, j int) {\n\tl[i], l[j] = l[j], l[i]\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 Gary Burd\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\npackage redis_test\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com\/garyburd\/redigo\/redis\"\n)\n\nfunc publish(channel, value interface{}) {\n\tc, err := dial()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tdefer c.Close()\n\tc.Do(\"PUBLISH\", channel, value)\n}\n\n\/\/ Applications can receive pushed messages from one goroutine and manage subscriptions from another goroutine.\nfunc ExamplePubSubConn() {\n\tc, err := dial()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tdefer c.Close()\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\tpsc := redis.PubSubConn{Conn: c}\n\n\t\/\/ This goroutine receives and prints pushed notifications from the server.\n\t\/\/ The goroutine exits when the connection is unsubscribed from all\n\t\/\/ channels or there is an error.\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor {\n\t\t\tswitch n := psc.Receive().(type) {\n\t\t\tcase redis.Message:\n\t\t\t\tfmt.Printf(\"Message: %s %s\\n\", n.Channel, n.Data)\n\t\t\tcase redis.PMessage:\n\t\t\t\tfmt.Printf(\"PMessage: %s %s %s\\n\", n.Pattern, n.Channel, n.Data)\n\t\t\tcase redis.Subscription:\n\t\t\t\tfmt.Printf(\"Subscription: %s %s %d\\n\", n.Kind, n.Channel, n.Count)\n\t\t\t\tif n.Count == 0 {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase error:\n\t\t\t\tfmt.Printf(\"error: %v\\n\", n)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ This goroutine manages subscriptions for the connection.\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tpsc.Subscribe(\"example\")\n\t\tpsc.PSubscribe(\"p*\")\n\n\t\t\/\/ The following function calls publish a message using another\n\t\t\/\/ connection to the Redis server.\n\t\tpublish(\"example\", \"hello\")\n\t\tpublish(\"example\", \"world\")\n\t\tpublish(\"pexample\", \"foo\")\n\t\tpublish(\"pexample\", \"bar\")\n\n\t\t\/\/ Unsubscribe from all connections. This will cause the receiving\n\t\t\/\/ goroutine to exit.\n\t\tpsc.Unsubscribe()\n\t\tpsc.PUnsubscribe()\n\t}()\n\n\twg.Wait()\n\n\t\/\/ Output:\n\t\/\/ Subscription: subscribe example 1\n\t\/\/ Subscription: psubscribe p* 2\n\t\/\/ Message: example hello\n\t\/\/ Message: example world\n\t\/\/ PMessage: p* pexample foo\n\t\/\/ PMessage: p* pexample bar\n\t\/\/ Subscription: unsubscribe example 1\n\t\/\/ Subscription: punsubscribe p* 0\n}\n\nfunc expectPushed(t *testing.T, c redis.PubSubConn, message string, expected interface{}) {\n\tactual := c.Receive()\n\tif !reflect.DeepEqual(actual, expected) {\n\t\tt.Errorf(\"%s = %v, want %v\", message, actual, expected)\n\t}\n}\n\nfunc TestPushed(t *testing.T) {\n\tpc, err := redis.DialDefaultServer()\n\tif err != nil {\n\t\tt.Fatalf(\"error connection to database, %v\", err)\n\t}\n\tdefer pc.Close()\n\n\tsc, err := redis.DialDefaultServer()\n\tif err != nil {\n\t\tt.Fatalf(\"error connection to database, %v\", err)\n\t}\n\tdefer sc.Close()\n\n\tc := redis.PubSubConn{Conn: sc}\n\n\tc.Subscribe(\"c1\")\n\texpectPushed(t, c, \"Subscribe(c1)\", redis.Subscription{Kind: \"subscribe\", Channel: \"c1\", Count: 1})\n\tc.Subscribe(\"c2\")\n\texpectPushed(t, c, \"Subscribe(c2)\", redis.Subscription{Kind: \"subscribe\", Channel: \"c2\", Count: 2})\n\tc.PSubscribe(\"p1\")\n\texpectPushed(t, c, \"PSubscribe(p1)\", redis.Subscription{Kind: \"psubscribe\", Channel: \"p1\", Count: 3})\n\tc.PSubscribe(\"p2\")\n\texpectPushed(t, c, \"PSubscribe(p2)\", redis.Subscription{Kind: \"psubscribe\", Channel: \"p2\", Count: 4})\n\tc.PUnsubscribe()\n\texpectPushed(t, c, \"Punsubscribe(p1)\", redis.Subscription{Kind: \"punsubscribe\", Channel: \"p1\", Count: 3})\n\texpectPushed(t, c, \"Punsubscribe()\", redis.Subscription{Kind: \"punsubscribe\", Channel: \"p2\", Count: 2})\n\n\tpc.Do(\"PUBLISH\", \"c1\", \"hello\")\n\texpectPushed(t, c, \"PUBLISH c1 hello\", redis.Message{Channel: \"c1\", Data: []byte(\"hello\")})\n\n\tc.Ping(\"hello\")\n\texpectPushed(t, c, `Ping(\"hello\")`, redis.Pong{\"hello\"})\n\n\tc.Conn.Send(\"PING\")\n\tc.Conn.Flush()\n\texpectPushed(t, c, `Send(\"PING\")`, redis.Pong{})\n}\n<commit_msg>Fix go vet warnings<commit_after>\/\/ Copyright 2012 Gary Burd\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\npackage redis_test\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com\/garyburd\/redigo\/redis\"\n)\n\nfunc publish(channel, value interface{}) {\n\tc, err := dial()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tdefer c.Close()\n\tc.Do(\"PUBLISH\", channel, value)\n}\n\n\/\/ Applications can receive pushed messages from one goroutine and manage subscriptions from another goroutine.\nfunc ExamplePubSubConn() {\n\tc, err := dial()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tdefer c.Close()\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\tpsc := redis.PubSubConn{Conn: c}\n\n\t\/\/ This goroutine receives and prints pushed notifications from the server.\n\t\/\/ The goroutine exits when the connection is unsubscribed from all\n\t\/\/ channels or there is an error.\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor {\n\t\t\tswitch n := psc.Receive().(type) {\n\t\t\tcase redis.Message:\n\t\t\t\tfmt.Printf(\"Message: %s %s\\n\", n.Channel, n.Data)\n\t\t\tcase redis.PMessage:\n\t\t\t\tfmt.Printf(\"PMessage: %s %s %s\\n\", n.Pattern, n.Channel, n.Data)\n\t\t\tcase redis.Subscription:\n\t\t\t\tfmt.Printf(\"Subscription: %s %s %d\\n\", n.Kind, n.Channel, n.Count)\n\t\t\t\tif n.Count == 0 {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase error:\n\t\t\t\tfmt.Printf(\"error: %v\\n\", n)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ This goroutine manages subscriptions for the connection.\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tpsc.Subscribe(\"example\")\n\t\tpsc.PSubscribe(\"p*\")\n\n\t\t\/\/ The following function calls publish a message using another\n\t\t\/\/ connection to the Redis server.\n\t\tpublish(\"example\", \"hello\")\n\t\tpublish(\"example\", \"world\")\n\t\tpublish(\"pexample\", \"foo\")\n\t\tpublish(\"pexample\", \"bar\")\n\n\t\t\/\/ Unsubscribe from all connections. This will cause the receiving\n\t\t\/\/ goroutine to exit.\n\t\tpsc.Unsubscribe()\n\t\tpsc.PUnsubscribe()\n\t}()\n\n\twg.Wait()\n\n\t\/\/ Output:\n\t\/\/ Subscription: subscribe example 1\n\t\/\/ Subscription: psubscribe p* 2\n\t\/\/ Message: example hello\n\t\/\/ Message: example world\n\t\/\/ PMessage: p* pexample foo\n\t\/\/ PMessage: p* pexample bar\n\t\/\/ Subscription: unsubscribe example 1\n\t\/\/ Subscription: punsubscribe p* 0\n}\n\nfunc expectPushed(t *testing.T, c redis.PubSubConn, message string, expected interface{}) {\n\tactual := c.Receive()\n\tif !reflect.DeepEqual(actual, expected) {\n\t\tt.Errorf(\"%s = %v, want %v\", message, actual, expected)\n\t}\n}\n\nfunc TestPushed(t *testing.T) {\n\tpc, err := redis.DialDefaultServer()\n\tif err != nil {\n\t\tt.Fatalf(\"error connection to database, %v\", err)\n\t}\n\tdefer pc.Close()\n\n\tsc, err := redis.DialDefaultServer()\n\tif err != nil {\n\t\tt.Fatalf(\"error connection to database, %v\", err)\n\t}\n\tdefer sc.Close()\n\n\tc := redis.PubSubConn{Conn: sc}\n\n\tc.Subscribe(\"c1\")\n\texpectPushed(t, c, \"Subscribe(c1)\", redis.Subscription{Kind: \"subscribe\", Channel: \"c1\", Count: 1})\n\tc.Subscribe(\"c2\")\n\texpectPushed(t, c, \"Subscribe(c2)\", redis.Subscription{Kind: \"subscribe\", Channel: \"c2\", Count: 2})\n\tc.PSubscribe(\"p1\")\n\texpectPushed(t, c, \"PSubscribe(p1)\", redis.Subscription{Kind: \"psubscribe\", Channel: \"p1\", Count: 3})\n\tc.PSubscribe(\"p2\")\n\texpectPushed(t, c, \"PSubscribe(p2)\", redis.Subscription{Kind: \"psubscribe\", Channel: \"p2\", Count: 4})\n\tc.PUnsubscribe()\n\texpectPushed(t, c, \"Punsubscribe(p1)\", redis.Subscription{Kind: \"punsubscribe\", Channel: \"p1\", Count: 3})\n\texpectPushed(t, c, \"Punsubscribe()\", redis.Subscription{Kind: \"punsubscribe\", Channel: \"p2\", Count: 2})\n\n\tpc.Do(\"PUBLISH\", \"c1\", \"hello\")\n\texpectPushed(t, c, \"PUBLISH c1 hello\", redis.Message{Channel: \"c1\", Data: []byte(\"hello\")})\n\n\tc.Ping(\"hello\")\n\texpectPushed(t, c, `Ping(\"hello\")`, redis.Pong{Data: \"hello\"})\n\n\tc.Conn.Send(\"PING\")\n\tc.Conn.Flush()\n\texpectPushed(t, c, `Send(\"PING\")`, redis.Pong{})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\t\"golang.org\/x\/net\/websocket\"\n)\n\/\/ IMPORTS, BITCH!\n\nfunc onInit() string {\n\treturn \"[INIT] Golang 10 min lib for Shitty mongo\/Chatango (@Coil) initiated. \" + time.Now().String()\n}\n\nfunc main() {\n\tinit := 1\n\tfmt.Println(onInit()) \/\/ Friendly little init message.\n\torigin := \"http:\/\/st.chatango.com\" \/\/ Origin required, thank Chatango for that.\n\turl := \"ws:\/\/s23.chatango.com:1800\" \/\/ Websocket server for the chat.\n\tws, err := websocket.Dial(url, \"\", origin) \/\/ Create a websocket connection\n\tif err != nil { \/\/ IS THIS THING ON?\n\t\tfmt.Println(\"[FATAL] ERR NIGNOG\")\n\t}\n\tws.Write([]byte(\"bauth:hazerdklan::Coil:password\\x00\")) \/\/ tested at the klan\n\ttime.Sleep(10000000000) \/\/ Needs a delay. HAVE YOUR FUCKING DELAY, CHATANGO\n\tws.Write([]byte(\"bm:t12j:256:<nED1C24\/><f x11555555=\\\"0\\\">get rekt mofucka\\r\\n\\x00\")) \/\/ Send a message\n\tfor init == 1 { \/\/ Golang's while loop equivalent\n\t\ttime.Sleep(100) \/\/ MORE SLEEPING FUUUUUUUUUUUUUUUUUUUUU\n\t\tbuf := make([]byte,1024) \/\/ Receive from the websocket\n\t\tvar n int \/\/ cursor\n\t\tif n, err = ws.Read(buf); err != nil { \/\/ Make sure this shit isn't null, nignog\n\t\t\tfmt.Println(\"[FATAL] ERR NIGNOG\") \/\/ but in the event that it is null...\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", buf[:n]) \/\/ Print the data!\n\t}\n}\n<commit_msg>Update cholib.go<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\t\"golang.org\/x\/net\/websocket\"\n)\n\/\/ IMPORTS, YAY!\n\nfunc onInit() string {\n\treturn \"[INIT] Golang 10 min lib for Chatango (@Coil) initiated. \" + time.Now().String()\n}\n\nfunc main() {\n\tinit := 1\n\tfmt.Println(onInit()) \/\/ Friendly little init message.\n\torigin := \"http:\/\/st.chatango.com\" \/\/ Origin required, thank Chatango for that.\n\turl := \"ws:\/\/s23.chatango.com:1800\" \/\/ Websocket server for the chat.\n\tws, err := websocket.Dial(url, \"\", origin) \/\/ Create a websocket connection\n\tif err != nil { \/\/ IS THIS THING ON?\n\t\tfmt.Println(\"[FATAL] ERR NIGNOG\")\n\t}\n\tws.Write([]byte(\"bauth:hazerdklan::Coil:password\\x00\")) \/\/ tested at the klan\n\ttime.Sleep(10000000000) \/\/ Needs a delay. HAVE YOUR DELAY, CHATANGO\n\tws.Write([]byte(\"bm:t12j:256:<nED1C24\/><f x11555555=\\\"0\\\">get rekt mofucka\\r\\n\\x00\")) \/\/ Send a message\n\tfor init == 1 { \/\/ Golang's while loop equivalent\n\t\ttime.Sleep(100) \/\/ MORE SLEEPING FUUUUUUUUUUUUUUUUUUUUU\n\t\tbuf := make([]byte,1024) \/\/ Receive from the websocket\n\t\tvar n int \/\/ cursor\n\t\tif n, err = ws.Read(buf); err != nil { \/\/ Make sure the data isn't null, nignog\n\t\t\tfmt.Println(\"[FATAL] ERR NIGNOG\") \/\/ but in the event that it is null...\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", buf[:n]) \/\/ Print the data!\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ The chroot package is able to create an Amazon AMI without requiring\n\/\/ the launch of a new instance for every build. It does this by attaching\n\/\/ and mounting the root volume of another AMI and chrooting into that\n\/\/ directory. It then creates an AMI from that attached drive.\npackage chroot\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/goamz\/ec2\"\n\t\"github.com\/mitchellh\/multistep\"\n\tawscommon \"github.com\/mitchellh\/packer\/builder\/amazon\/common\"\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"log\"\n\t\"runtime\"\n)\n\n\/\/ The unique ID for this builder\nconst BuilderId = \"mitchellh.amazon.chroot\"\n\n\/\/ Config is the configuration that is chained through the steps and\n\/\/ settable from the template.\ntype Config struct {\n\tcommon.PackerConfig `mapstructure:\",squash\"`\n\tawscommon.AccessConfig `mapstructure:\",squash\"`\n\tawscommon.AMIConfig `mapstructure:\",squash\"`\n\n\tChrootMounts [][]string `mapstructure:\"chroot_mounts\"`\n\tCommandWrapper string `mapstructure:\"command_wrapper\"`\n\tCopyFiles []string `mapstructure:\"copy_files\"`\n\tDevicePath string `mapstructure:\"device_path\"`\n\tMountPath string `mapstructure:\"mount_path\"`\n\tSourceAmi string `mapstructure:\"source_ami\"`\n\n\ttpl *packer.ConfigTemplate\n}\n\ntype wrappedCommandTemplate struct {\n\tCommand string\n}\n\ntype Builder struct {\n\tconfig Config\n\trunner multistep.Runner\n}\n\nfunc (b *Builder) Prepare(raws ...interface{}) ([]string, error) {\n\tmd, err := common.DecodeConfig(&b.config, raws...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb.config.tpl, err = packer.NewConfigTemplate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb.config.tpl.UserVars = b.config.PackerUserVars\n\tb.config.tpl.Funcs(awscommon.TemplateFuncs)\n\n\t\/\/ Defaults\n\tif b.config.ChrootMounts == nil {\n\t\tb.config.ChrootMounts = make([][]string, 0)\n\t}\n\n\tif b.config.CopyFiles == nil {\n\t\tb.config.CopyFiles = make([]string, 0)\n\t}\n\n\tif len(b.config.ChrootMounts) == 0 {\n\t\tb.config.ChrootMounts = [][]string{\n\t\t\t[]string{\"proc\", \"proc\", \"\/proc\"},\n\t\t\t[]string{\"sysfs\", \"sysfs\", \"\/sys\"},\n\t\t\t[]string{\"bind\", \"\/dev\", \"\/dev\"},\n\t\t\t[]string{\"devpts\", \"devpts\", \"\/dev\/pts\"},\n\t\t\t[]string{\"binfmt_misc\", \"binfmt_misc\", \"\/proc\/sys\/fs\/binfmt_misc\"},\n\t\t}\n\t}\n\n\tif len(b.config.CopyFiles) == 0 {\n\t\tb.config.CopyFiles = []string{\"\/etc\/resolv.conf\"}\n\t}\n\n\tif b.config.CommandWrapper == \"\" {\n\t\tb.config.CommandWrapper = \"{{.Command}}\"\n\t}\n\n\tif b.config.MountPath == \"\" {\n\t\tb.config.MountPath = \"packer-amazon-chroot-volumes\/{{.Device}}\"\n\t}\n\n\t\/\/ Accumulate any errors\n\terrs := common.CheckUnusedConfig(md)\n\terrs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...)\n\terrs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...)\n\n\tfor i, mounts := range b.config.ChrootMounts {\n\t\tif len(mounts) != 3 {\n\t\t\terrs = packer.MultiErrorAppend(\n\t\t\t\terrs, errors.New(\"Each chroot_mounts entry should be three elements.\"))\n\t\t\tbreak\n\t\t}\n\n\t\tfor j, entry := range mounts {\n\t\t\tb.config.ChrootMounts[i][j], err = b.config.tpl.Process(entry, nil)\n\t\t\tif err != nil {\n\t\t\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\t\t\tfmt.Errorf(\"Error processing chroot_mounts[%d][%d]: %s\",\n\t\t\t\t\t\ti, j, err))\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i, file := range b.config.CopyFiles {\n\t\tvar err error\n\t\tb.config.CopyFiles[i], err = b.config.tpl.Process(file, nil)\n\t\tif err != nil {\n\t\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\t\tfmt.Errorf(\"Error processing copy_files[%d]: %s\",\n\t\t\t\t\ti, err))\n\t\t}\n\t}\n\n\tif b.config.SourceAmi == \"\" {\n\t\terrs = packer.MultiErrorAppend(errs, errors.New(\"source_ami is required.\"))\n\t}\n\n\ttemplates := map[string]*string{\n\t\t\"device_path\": &b.config.DevicePath,\n\t\t\"source_ami\": &b.config.SourceAmi,\n\t}\n\n\tfor n, ptr := range templates {\n\t\tvar err error\n\t\t*ptr, err = b.config.tpl.Process(*ptr, nil)\n\t\tif err != nil {\n\t\t\terrs = packer.MultiErrorAppend(\n\t\t\t\terrs, fmt.Errorf(\"Error processing %s: %s\", n, err))\n\t\t}\n\t}\n\n\tif errs != nil && len(errs.Errors) > 0 {\n\t\treturn nil, errs\n\t}\n\n\tlog.Println(common.ScrubConfig(b.config, b.config.AccessKey, b.config.SecretKey))\n\treturn nil, nil\n}\n\nfunc (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {\n\tif runtime.GOOS != \"linux\" {\n\t\treturn nil, errors.New(\"The amazon-chroot builder only works on Linux environments.\")\n\t}\n\n\tregion, err := b.config.Region()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tauth, err := b.config.AccessConfig.Auth()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tec2conn := ec2.New(auth, region)\n\n\twrappedCommand := func(command string) (string, error) {\n\t\treturn b.config.tpl.Process(\n\t\t\tb.config.CommandWrapper, &wrappedCommandTemplate{\n\t\t\t\tCommand: command,\n\t\t\t})\n\t}\n\n\t\/\/ Setup the state bag and initial state for the steps\n\tstate := new(multistep.BasicStateBag)\n\tstate.Put(\"config\", &b.config)\n\tstate.Put(\"ec2\", ec2conn)\n\tstate.Put(\"hook\", hook)\n\tstate.Put(\"ui\", ui)\n\tstate.Put(\"wrappedCommand\", CommandWrapper(wrappedCommand))\n\n\t\/\/ Build the steps\n\tsteps := []multistep.Step{\n\t\t&StepInstanceInfo{},\n\t\t&StepSourceAMIInfo{},\n\t\t&StepFlock{},\n\t\t&StepPrepareDevice{},\n\t\t&StepCreateVolume{},\n\t\t&StepAttachVolume{},\n\t\t&StepEarlyUnflock{},\n\t\t&StepMountDevice{},\n\t\t&StepMountExtra{},\n\t\t&StepCopyFiles{},\n\t\t&StepChrootProvision{},\n\t\t&StepEarlyCleanup{},\n\t\t&StepSnapshot{},\n\t\t&StepRegisterAMI{},\n\t\t&awscommon.StepAMIRegionCopy{\n\t\t\tRegions: b.config.AMIRegions,\n\t\t},\n\t\t&awscommon.StepModifyAMIAttributes{\n\t\t\tDescription: b.config.AMIDescription,\n\t\t\tUsers: b.config.AMIUsers,\n\t\t\tGroups: b.config.AMIGroups,\n\t\t},\n\t\t&awscommon.StepCreateTags{\n\t\t\tTags: b.config.AMITags,\n\t\t},\n\t}\n\n\t\/\/ Run!\n\tif b.config.PackerDebug {\n\t\tb.runner = &multistep.DebugRunner{\n\t\t\tSteps: steps,\n\t\t\tPauseFn: common.MultistepDebugFn(ui),\n\t\t}\n\t} else {\n\t\tb.runner = &multistep.BasicRunner{Steps: steps}\n\t}\n\n\tb.runner.Run(state)\n\n\t\/\/ If there was an error, return that\n\tif rawErr, ok := state.GetOk(\"error\"); ok {\n\t\treturn nil, rawErr.(error)\n\t}\n\n\t\/\/ If there are no AMIs, then just return\n\tif _, ok := state.GetOk(\"amis\"); !ok {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Build the artifact and return it\n\tartifact := &awscommon.Artifact{\n\t\tAmis: state.Get(\"amis\").(map[string]string),\n\t\tBuilderIdValue: BuilderId,\n\t\tConn: ec2conn,\n\t}\n\n\treturn artifact, nil\n}\n\nfunc (b *Builder) Cancel() {\n\tif b.runner != nil {\n\t\tlog.Println(\"Cancelling the step runner...\")\n\t\tb.runner.Cancel()\n\t}\n}\n<commit_msg>builder\/amazon\/chroot: mount in device-specific places<commit_after>\/\/ The chroot package is able to create an Amazon AMI without requiring\n\/\/ the launch of a new instance for every build. It does this by attaching\n\/\/ and mounting the root volume of another AMI and chrooting into that\n\/\/ directory. It then creates an AMI from that attached drive.\npackage chroot\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/goamz\/ec2\"\n\t\"github.com\/mitchellh\/multistep\"\n\tawscommon \"github.com\/mitchellh\/packer\/builder\/amazon\/common\"\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"log\"\n\t\"runtime\"\n)\n\n\/\/ The unique ID for this builder\nconst BuilderId = \"mitchellh.amazon.chroot\"\n\n\/\/ Config is the configuration that is chained through the steps and\n\/\/ settable from the template.\ntype Config struct {\n\tcommon.PackerConfig `mapstructure:\",squash\"`\n\tawscommon.AccessConfig `mapstructure:\",squash\"`\n\tawscommon.AMIConfig `mapstructure:\",squash\"`\n\n\tChrootMounts [][]string `mapstructure:\"chroot_mounts\"`\n\tCommandWrapper string `mapstructure:\"command_wrapper\"`\n\tCopyFiles []string `mapstructure:\"copy_files\"`\n\tDevicePath string `mapstructure:\"device_path\"`\n\tMountPath string `mapstructure:\"mount_path\"`\n\tSourceAmi string `mapstructure:\"source_ami\"`\n\n\ttpl *packer.ConfigTemplate\n}\n\ntype wrappedCommandTemplate struct {\n\tCommand string\n}\n\ntype Builder struct {\n\tconfig Config\n\trunner multistep.Runner\n}\n\nfunc (b *Builder) Prepare(raws ...interface{}) ([]string, error) {\n\tmd, err := common.DecodeConfig(&b.config, raws...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb.config.tpl, err = packer.NewConfigTemplate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb.config.tpl.UserVars = b.config.PackerUserVars\n\tb.config.tpl.Funcs(awscommon.TemplateFuncs)\n\n\t\/\/ Defaults\n\tif b.config.ChrootMounts == nil {\n\t\tb.config.ChrootMounts = make([][]string, 0)\n\t}\n\n\tif b.config.CopyFiles == nil {\n\t\tb.config.CopyFiles = make([]string, 0)\n\t}\n\n\tif len(b.config.ChrootMounts) == 0 {\n\t\tb.config.ChrootMounts = [][]string{\n\t\t\t[]string{\"proc\", \"proc\", \"\/proc\"},\n\t\t\t[]string{\"sysfs\", \"sysfs\", \"\/sys\"},\n\t\t\t[]string{\"bind\", \"\/dev\", \"\/dev\"},\n\t\t\t[]string{\"devpts\", \"devpts\", \"\/dev\/pts\"},\n\t\t\t[]string{\"binfmt_misc\", \"binfmt_misc\", \"\/proc\/sys\/fs\/binfmt_misc\"},\n\t\t}\n\t}\n\n\tif len(b.config.CopyFiles) == 0 {\n\t\tb.config.CopyFiles = []string{\"\/etc\/resolv.conf\"}\n\t}\n\n\tif b.config.CommandWrapper == \"\" {\n\t\tb.config.CommandWrapper = \"{{.Command}}\"\n\t}\n\n\tif b.config.MountPath == \"\" {\n\t\tb.config.MountPath = \"\/mnt\/packer-amazon-chroot-volumes\/{{.Device}}\"\n\t}\n\n\t\/\/ Accumulate any errors\n\terrs := common.CheckUnusedConfig(md)\n\terrs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...)\n\terrs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...)\n\n\tfor i, mounts := range b.config.ChrootMounts {\n\t\tif len(mounts) != 3 {\n\t\t\terrs = packer.MultiErrorAppend(\n\t\t\t\terrs, errors.New(\"Each chroot_mounts entry should be three elements.\"))\n\t\t\tbreak\n\t\t}\n\n\t\tfor j, entry := range mounts {\n\t\t\tb.config.ChrootMounts[i][j], err = b.config.tpl.Process(entry, nil)\n\t\t\tif err != nil {\n\t\t\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\t\t\tfmt.Errorf(\"Error processing chroot_mounts[%d][%d]: %s\",\n\t\t\t\t\t\ti, j, err))\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i, file := range b.config.CopyFiles {\n\t\tvar err error\n\t\tb.config.CopyFiles[i], err = b.config.tpl.Process(file, nil)\n\t\tif err != nil {\n\t\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\t\tfmt.Errorf(\"Error processing copy_files[%d]: %s\",\n\t\t\t\t\ti, err))\n\t\t}\n\t}\n\n\tif b.config.SourceAmi == \"\" {\n\t\terrs = packer.MultiErrorAppend(errs, errors.New(\"source_ami is required.\"))\n\t}\n\n\ttemplates := map[string]*string{\n\t\t\"device_path\": &b.config.DevicePath,\n\t\t\"source_ami\": &b.config.SourceAmi,\n\t}\n\n\tfor n, ptr := range templates {\n\t\tvar err error\n\t\t*ptr, err = b.config.tpl.Process(*ptr, nil)\n\t\tif err != nil {\n\t\t\terrs = packer.MultiErrorAppend(\n\t\t\t\terrs, fmt.Errorf(\"Error processing %s: %s\", n, err))\n\t\t}\n\t}\n\n\tif errs != nil && len(errs.Errors) > 0 {\n\t\treturn nil, errs\n\t}\n\n\tlog.Println(common.ScrubConfig(b.config, b.config.AccessKey, b.config.SecretKey))\n\treturn nil, nil\n}\n\nfunc (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {\n\tif runtime.GOOS != \"linux\" {\n\t\treturn nil, errors.New(\"The amazon-chroot builder only works on Linux environments.\")\n\t}\n\n\tregion, err := b.config.Region()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tauth, err := b.config.AccessConfig.Auth()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tec2conn := ec2.New(auth, region)\n\n\twrappedCommand := func(command string) (string, error) {\n\t\treturn b.config.tpl.Process(\n\t\t\tb.config.CommandWrapper, &wrappedCommandTemplate{\n\t\t\t\tCommand: command,\n\t\t\t})\n\t}\n\n\t\/\/ Setup the state bag and initial state for the steps\n\tstate := new(multistep.BasicStateBag)\n\tstate.Put(\"config\", &b.config)\n\tstate.Put(\"ec2\", ec2conn)\n\tstate.Put(\"hook\", hook)\n\tstate.Put(\"ui\", ui)\n\tstate.Put(\"wrappedCommand\", CommandWrapper(wrappedCommand))\n\n\t\/\/ Build the steps\n\tsteps := []multistep.Step{\n\t\t&StepInstanceInfo{},\n\t\t&StepSourceAMIInfo{},\n\t\t&StepFlock{},\n\t\t&StepPrepareDevice{},\n\t\t&StepCreateVolume{},\n\t\t&StepAttachVolume{},\n\t\t&StepEarlyUnflock{},\n\t\t&StepMountDevice{},\n\t\t&StepMountExtra{},\n\t\t&StepCopyFiles{},\n\t\t&StepChrootProvision{},\n\t\t&StepEarlyCleanup{},\n\t\t&StepSnapshot{},\n\t\t&StepRegisterAMI{},\n\t\t&awscommon.StepAMIRegionCopy{\n\t\t\tRegions: b.config.AMIRegions,\n\t\t},\n\t\t&awscommon.StepModifyAMIAttributes{\n\t\t\tDescription: b.config.AMIDescription,\n\t\t\tUsers: b.config.AMIUsers,\n\t\t\tGroups: b.config.AMIGroups,\n\t\t},\n\t\t&awscommon.StepCreateTags{\n\t\t\tTags: b.config.AMITags,\n\t\t},\n\t}\n\n\t\/\/ Run!\n\tif b.config.PackerDebug {\n\t\tb.runner = &multistep.DebugRunner{\n\t\t\tSteps: steps,\n\t\t\tPauseFn: common.MultistepDebugFn(ui),\n\t\t}\n\t} else {\n\t\tb.runner = &multistep.BasicRunner{Steps: steps}\n\t}\n\n\tb.runner.Run(state)\n\n\t\/\/ If there was an error, return that\n\tif rawErr, ok := state.GetOk(\"error\"); ok {\n\t\treturn nil, rawErr.(error)\n\t}\n\n\t\/\/ If there are no AMIs, then just return\n\tif _, ok := state.GetOk(\"amis\"); !ok {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Build the artifact and return it\n\tartifact := &awscommon.Artifact{\n\t\tAmis: state.Get(\"amis\").(map[string]string),\n\t\tBuilderIdValue: BuilderId,\n\t\tConn: ec2conn,\n\t}\n\n\treturn artifact, nil\n}\n\nfunc (b *Builder) Cancel() {\n\tif b.runner != nil {\n\t\tlog.Println(\"Cancelling the step runner...\")\n\t\tb.runner.Cancel()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ The googlecompute package contains a packer.Builder implementation that\n\/\/ builds images for Google Compute Engine.\npackage googlecompute\n\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/mitchellh\/multistep\"\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n)\n\n\/\/ The unique ID for this builder.\nconst BuilderId = \"packer.googlecompute\"\n\n\/\/ Builder represents a Packer Builder.\ntype Builder struct {\n\tconfig *Config\n\trunner multistep.Runner\n}\n\n\/\/ Prepare processes the build configuration parameters.\nfunc (b *Builder) Prepare(raws ...interface{}) ([]string, error) {\n\tc, warnings, errs := NewConfig(raws...)\n\tif errs != nil {\n\t\treturn warnings, errs\n\t}\n\tb.config = c\n\n\treturn warnings, nil\n}\n\n\/\/ Run executes a googlecompute Packer build and returns a packer.Artifact\n\/\/ representing a GCE machine image.\nfunc (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {\n\t\/\/ Initialize the Google Compute Engine API.\n\tclient, err := New(b.config.ProjectId, b.config.Zone, b.config.clientSecrets, b.config.privateKeyBytes)\n\tif err != nil {\n\t\tlog.Println(\"Failed to create the Google Compute Engine client.\")\n\t\treturn nil, err\n\t}\n\t\/\/ Set up the state.\n\tstate := new(multistep.BasicStateBag)\n\tstate.Put(\"config\", b.config)\n\tstate.Put(\"client\", client)\n\tstate.Put(\"hook\", hook)\n\tstate.Put(\"ui\", ui)\n\t\/\/ Build the steps.\n\tsteps := []multistep.Step{\n\t\tnew(stepCreateSSHKey),\n\t\tnew(stepCreateInstance),\n\t\tnew(stepInstanceInfo),\n\t\t&common.StepConnectSSH{\n\t\t\tSSHAddress: sshAddress,\n\t\t\tSSHConfig: sshConfig,\n\t\t\tSSHWaitTimeout: 5 * time.Minute,\n\t\t},\n\t\tnew(common.StepProvision),\n\t\tnew(stepUpdateGsutil),\n\t\tnew(stepCreateImage),\n\t\tnew(stepUploadImage),\n\t\tnew(stepRegisterImage),\n\t}\n\t\/\/ Run the steps.\n\tif b.config.PackerDebug {\n\t\tb.runner = &multistep.DebugRunner{\n\t\t\tSteps: steps,\n\t\t\tPauseFn: common.MultistepDebugFn(ui),\n\t\t}\n\t} else {\n\t\tb.runner = &multistep.BasicRunner{Steps: steps}\n\t}\n\tb.runner.Run(state)\n\t\/\/ Report any errors.\n\tif rawErr, ok := state.GetOk(\"error\"); ok {\n\t\treturn nil, rawErr.(error)\n\t}\n\tif _, ok := state.GetOk(\"image_name\"); !ok {\n\t\tlog.Println(\"Failed to find image_name in state. Bug?\")\n\t\treturn nil, nil\n\t}\n\tartifact := &Artifact{\n\t\timageName: state.Get(\"image_name\").(string),\n\t\tclient: client,\n\t}\n\treturn artifact, nil\n}\n\n\/\/ Cancel.\nfunc (b *Builder) Cancel() {}\n<commit_msg>builder\/googlecompute: cancel works<commit_after>\/\/ The googlecompute package contains a packer.Builder implementation that\n\/\/ builds images for Google Compute Engine.\npackage googlecompute\n\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/mitchellh\/multistep\"\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n)\n\n\/\/ The unique ID for this builder.\nconst BuilderId = \"packer.googlecompute\"\n\n\/\/ Builder represents a Packer Builder.\ntype Builder struct {\n\tconfig *Config\n\trunner multistep.Runner\n}\n\n\/\/ Prepare processes the build configuration parameters.\nfunc (b *Builder) Prepare(raws ...interface{}) ([]string, error) {\n\tc, warnings, errs := NewConfig(raws...)\n\tif errs != nil {\n\t\treturn warnings, errs\n\t}\n\tb.config = c\n\n\treturn warnings, nil\n}\n\n\/\/ Run executes a googlecompute Packer build and returns a packer.Artifact\n\/\/ representing a GCE machine image.\nfunc (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {\n\t\/\/ Initialize the Google Compute Engine API.\n\tclient, err := New(b.config.ProjectId, b.config.Zone, b.config.clientSecrets, b.config.privateKeyBytes)\n\tif err != nil {\n\t\tlog.Println(\"Failed to create the Google Compute Engine client.\")\n\t\treturn nil, err\n\t}\n\n\t\/\/ Set up the state.\n\tstate := new(multistep.BasicStateBag)\n\tstate.Put(\"config\", b.config)\n\tstate.Put(\"client\", client)\n\tstate.Put(\"hook\", hook)\n\tstate.Put(\"ui\", ui)\n\n\t\/\/ Build the steps.\n\tsteps := []multistep.Step{\n\t\tnew(stepCreateSSHKey),\n\t\tnew(stepCreateInstance),\n\t\tnew(stepInstanceInfo),\n\t\t&common.StepConnectSSH{\n\t\t\tSSHAddress: sshAddress,\n\t\t\tSSHConfig: sshConfig,\n\t\t\tSSHWaitTimeout: 5 * time.Minute,\n\t\t},\n\t\tnew(common.StepProvision),\n\t\tnew(stepUpdateGsutil),\n\t\tnew(stepCreateImage),\n\t\tnew(stepUploadImage),\n\t\tnew(stepRegisterImage),\n\t}\n\n\t\/\/ Run the steps.\n\tif b.config.PackerDebug {\n\t\tb.runner = &multistep.DebugRunner{\n\t\t\tSteps: steps,\n\t\t\tPauseFn: common.MultistepDebugFn(ui),\n\t\t}\n\t} else {\n\t\tb.runner = &multistep.BasicRunner{Steps: steps}\n\t}\n\tb.runner.Run(state)\n\n\t\/\/ Report any errors.\n\tif rawErr, ok := state.GetOk(\"error\"); ok {\n\t\treturn nil, rawErr.(error)\n\t}\n\tif _, ok := state.GetOk(\"image_name\"); !ok {\n\t\tlog.Println(\"Failed to find image_name in state. Bug?\")\n\t\treturn nil, nil\n\t}\n\n\tartifact := &Artifact{\n\t\timageName: state.Get(\"image_name\").(string),\n\t\tclient: client,\n\t}\n\treturn artifact, nil\n}\n\n\/\/ Cancel.\nfunc (b *Builder) Cancel() {\n\tif b.runner != nil {\n\t\tlog.Println(\"Cancelling the step runner...\")\n\t\tb.runner.Cancel()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package receiver\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n)\n\n\/\/ Sync message from sbc\n\/\/ echo -ne '\\x00\\x0A\\x00\\x30\\x59\\x41\\x37\\x38\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x20\\x00\\x01\\x00\\x02\\x00\\xFC\\x77\\x31\\x00\\x00\\x00\\x1E\\x00\\x00\\x00\\x00\\x43\\x5A\\x07\\x03\\x00\\x06\\x65\\x63\\x7A\\x37\\x33\\x30' | nc localhost 4739\n\n\/\/ Iheader is the Oracle IPFIX Header\n\/\/\n\/\/ Wire format:\n\/\/\n\/\/ Bytes: 0 1 2 3\n\/\/ Bits: 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0\n\/\/ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\/\/ | Version (uint16) | Length (uint16) |\n\/\/ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\/\/ | ExportTime (unit32) |\n\/\/ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\/\/ | SequenceNumber (uint32) |\n\/\/ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\/\/ | ObservationID (uint32) |\n\/\/ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\/\/\n\/\/\ntype IpfixHeader struct {\n\tVersion uint16\n\tLength uint16\n\tExportTime uint32\n\tSeqNum uint32\n\tObservationID uint32\n}\n\n\/\/ SetHeader represents set header fields\ntype SetHeader struct {\n\tID uint16\n\tLength uint16\n}\n\ntype DataSet struct {\n\tHandShake Hs\n\tSIP Su\n}\n\ntype Hs struct {\n\tMaVer uint16\n\tMiVer uint16\n\tCFlags1 uint16\n\tCFlags2 uint16\n\tSFlags uint16\n\tTimeout uint16\n\tSystemID uint32\n\tProduct uint16\n\tSMaVer uint8\n\tSMiVer uint8\n\tRevision uint8\n\tHostnameLen uint8\n\tHostname []byte\n}\n\n\/\/ IPFIX is the Oracle IPFIX Handshake Message\ntype IPFIX struct {\n\tHeader IpfixHeader\n\tSet SetHeader\n\tData DataSet\n}\n\ntype Su struct {\n\tTimeSec uint32\n\tTimeMic uint32\n\tIntSlot uint8\n\tIntPort uint8\n\tIntVlan uint16\n\tCallIDLen uint8\n\tCallID []byte\n\tCallIDEnd uint8\n\tIPlen uint16\n\tVL uint8\n\tTOS uint8\n\tTLen uint16\n\tTID uint16\n\tTFlags uint16\n\tTTL uint8\n\tTProto uint8\n\tTPos uint16\n\tSrcIP uint32\n\tDstIP uint32\n\tDstPort uint16\n\tSrcPort uint16\n\tUDPlen uint16\n\tMsgLen uint16\n\tSipMsg []byte\n}\n\n\/\/ BufToInt8 casts []byte to []int8\nfunc BufToInt8(b []byte) []int8 {\n\tbi := make([]int8, len(b))\n\tfor i, v := range b {\n\t\tbi[i] = int8(v)\n\t}\n\treturn bi\n}\n\n\/\/ NewIPFIX fills the IPFIX struct with structured binary data from r\nfunc NewIPFIX(header []byte) *IPFIX {\n\tvar ni IPFIX\n\trni := bytes.NewReader(header)\n\tbinary.Read(rni, binary.BigEndian, &ni.Header.Version)\n\tbinary.Read(rni, binary.BigEndian, &ni.Header.Length)\n\tbinary.Read(rni, binary.BigEndian, &ni.Header.ExportTime)\n\tbinary.Read(rni, binary.BigEndian, &ni.Header.SeqNum)\n\tbinary.Read(rni, binary.BigEndian, &ni.Header.ObservationID)\n\tbinary.Read(rni, binary.BigEndian, &ni.Set.ID)\n\tbinary.Read(rni, binary.BigEndian, &ni.Set.Length)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.MaVer)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.MiVer)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.CFlags1)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.CFlags2)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.SFlags)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.Timeout)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.SystemID)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.Product)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.SMaVer)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.SMiVer)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.Revision)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.HostnameLen)\n\tni.Data.HandShake.Hostname = make([]byte, ni.Data.HandShake.HostnameLen)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.Hostname)\n\treturn &ni\n}\n\nfunc NewSipRecUdp(header []byte) *IPFIX {\n\tvar niu IPFIX\n\trniu := bytes.NewReader(header)\n\n\tbinary.Read(rniu, binary.BigEndian, &niu.Header.Version)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Header.Length)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Header.ExportTime)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Header.SeqNum)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Header.ObservationID)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Set.ID)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Set.Length)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TimeSec)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TimeMic)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.IntSlot)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.IntPort)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.IntVlan)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.CallIDLen)\n\tniu.Data.SIP.CallID = make([]byte, niu.Data.SIP.CallIDLen)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.CallID)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.CallIDEnd)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.IPlen)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.VL)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TOS)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TLen)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TID)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TFlags)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TTL)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TProto)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TPos)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.SrcIP)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.DstIP)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.DstPort)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.SrcPort)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.UDPlen)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.MsgLen)\n\t\/*\n\t\tfmt.Println(rniu.Len())\n\t\tfmt.Println(\"headerlen:\", len(header))\n\t*\/\n\tniu.Data.SIP.SipMsg = make([]byte, rniu.Len())\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.SipMsg)\n\n\treturn &niu\n}\n\n\/\/ HandShake writes the binary IPFIX representation into the buffer\nfunc (fi *IPFIX) HandShake() []byte {\n\tbuf := new(bytes.Buffer)\n\tbinary.Write(buf, binary.BigEndian, &fi.Header.Version)\n\tbinary.Write(buf, binary.BigEndian, &fi.Header.Length)\n\tbinary.Write(buf, binary.BigEndian, &fi.Header.ExportTime)\n\tbinary.Write(buf, binary.BigEndian, &fi.Header.SeqNum)\n\tbinary.Write(buf, binary.BigEndian, &fi.Header.ObservationID)\n\tbinary.Write(buf, binary.BigEndian, &fi.Set.ID)\n\tbinary.Write(buf, binary.BigEndian, &fi.Set.Length)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.MaVer)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.MiVer)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.CFlags1)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.CFlags2)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.SFlags)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.Timeout)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.SystemID)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.Product)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.SMaVer)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.SMiVer)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.Revision)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.HostnameLen)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.Hostname)\n\n\treturn buf.Bytes()\n}\n\nfunc checkError(err error) {\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Fatal error: %s\", err.Error())\n\t\tos.Exit(1)\n\t}\n}\n\nfunc SyncClient(conn net.Conn) {\n\tfmt.Println(\"Handling new connection...\")\n\n\t\/\/ Close connection when this function ends\n\tdefer func() {\n\t\tfmt.Println(\"Closing connection...\")\n\t\tconn.Close()\n\t}()\n\n\ths := make([]byte, 512)\n\tn, err := conn.Read(hs)\n\tcheckError(err)\n\n\tpacket := NewIPFIX(hs[:n])\n\tif packet.Set.ID == 256 {\n\t\tpacket.Set.ID++\n\t}\n\n\tfmt.Printf(\"Write: %#v\\n\", BufToInt8(packet.HandShake()))\n\n\tif packet.Set.ID == 257 && packet.Header.Length > 20 {\n\t\tbinary.Write(conn, binary.BigEndian, BufToInt8(packet.HandShake()))\n\t}\n\n\t\/\/bufReader := bufio.NewReader(conn)\n\n\tfor {\n\t\tb := make([]byte, 4096)\n\t\tn, err := conn.Read(b)\n\t\tcheckError(err)\n\n\t\tdata := NewSipRecUdp(b[:n])\n\n\t\t\/*\t\t\/\/ Read IPFIX Messages delimited by newline\n\t\t\t\tbytes, err := bufReader.ReadBytes('\\n')\n\t\t\t\tcheckError(err)*\/\n\n\t\t\/\/fmt.Printf(\"%s\", (binary.BigEndian.Uint16(bytes)))\n\t\tfmt.Printf(\"%s\", data.Data.SIP)\n\n\t}\n}\n<commit_msg>Rename dataset 259<commit_after>package receiver\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n)\n\n\/\/ Sync message from sbc\n\/\/ echo -ne '\\x00\\x0A\\x00\\x30\\x59\\x41\\x37\\x38\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x20\\x00\\x01\\x00\\x02\\x00\\xFC\\x77\\x31\\x00\\x00\\x00\\x1E\\x00\\x00\\x00\\x00\\x43\\x5A\\x07\\x03\\x00\\x06\\x65\\x63\\x7A\\x37\\x33\\x30' | nc localhost 4739\n\n\/\/ Iheader is the Oracle IPFIX Header\n\/\/\n\/\/ Wire format:\n\/\/\n\/\/ Bytes: 0 1 2 3\n\/\/ Bits: 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0\n\/\/ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\/\/ | Version (uint16) | Length (uint16) |\n\/\/ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\/\/ | ExportTime (unit32) |\n\/\/ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\/\/ | SequenceNumber (uint32) |\n\/\/ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\/\/ | ObservationID (uint32) |\n\/\/ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\/\/\n\/\/\ntype IpfixHeader struct {\n\tVersion uint16\n\tLength uint16\n\tExportTime uint32\n\tSeqNum uint32\n\tObservationID uint32\n}\n\n\/\/ SetHeader represents set header fields\ntype SetHeader struct {\n\tID uint16\n\tLength uint16\n}\n\ntype DataSet struct {\n\tHandShake Hs\n\tSIP Su\n}\n\ntype Hs struct {\n\tMaVer uint16\n\tMiVer uint16\n\tCFlags1 uint16\n\tCFlags2 uint16\n\tSFlags uint16\n\tTimeout uint16\n\tSystemID uint32\n\tProduct uint16\n\tSMaVer uint8\n\tSMiVer uint8\n\tRevision uint8\n\tHostnameLen uint8\n\tHostname []byte\n}\n\n\/\/ IPFIX is the Oracle IPFIX Handshake Message\ntype IPFIX struct {\n\tHeader IpfixHeader\n\tSet SetHeader\n\tData DataSet\n}\n\ntype Su struct {\n\tTimeSec uint32\n\tTimeMic uint32\n\tIntSlot uint8\n\tIntPort uint8\n\tIntVlan uint16\n\tCallIDLen uint8\n\tCallID []byte\n\tCallIDEnd uint8\n\tIPlen uint16\n\tVL uint8\n\tTOS uint8\n\tTLen uint16\n\tTID uint16\n\tTFlags uint16\n\tTTL uint8\n\tTProto uint8\n\tTPos uint16\n\tSrcIP uint32\n\tDstIP uint32\n\tDstPort uint16\n\tSrcPort uint16\n\tUDPlen uint16\n\tMsgLen uint16\n\tSipMsg []byte\n}\n\n\/\/ BufToInt8 casts []byte to []int8\nfunc BufToInt8(b []byte) []int8 {\n\tbi := make([]int8, len(b))\n\tfor i, v := range b {\n\t\tbi[i] = int8(v)\n\t}\n\treturn bi\n}\n\n\/\/ NewIPFIX fills the IPFIX struct with structured binary data from r\nfunc NewIPFIX(header []byte) *IPFIX {\n\tvar ni IPFIX\n\trni := bytes.NewReader(header)\n\tbinary.Read(rni, binary.BigEndian, &ni.Header.Version)\n\tbinary.Read(rni, binary.BigEndian, &ni.Header.Length)\n\tbinary.Read(rni, binary.BigEndian, &ni.Header.ExportTime)\n\tbinary.Read(rni, binary.BigEndian, &ni.Header.SeqNum)\n\tbinary.Read(rni, binary.BigEndian, &ni.Header.ObservationID)\n\tbinary.Read(rni, binary.BigEndian, &ni.Set.ID)\n\tbinary.Read(rni, binary.BigEndian, &ni.Set.Length)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.MaVer)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.MiVer)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.CFlags1)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.CFlags2)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.SFlags)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.Timeout)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.SystemID)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.Product)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.SMaVer)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.SMiVer)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.Revision)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.HostnameLen)\n\tni.Data.HandShake.Hostname = make([]byte, ni.Data.HandShake.HostnameLen)\n\tbinary.Read(rni, binary.BigEndian, &ni.Data.HandShake.Hostname)\n\treturn &ni\n}\n\nfunc NewSendSipUDP(header []byte) *IPFIX {\n\tvar niu IPFIX\n\trniu := bytes.NewReader(header)\n\n\tbinary.Read(rniu, binary.BigEndian, &niu.Header.Version)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Header.Length)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Header.ExportTime)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Header.SeqNum)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Header.ObservationID)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Set.ID)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Set.Length)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TimeSec)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TimeMic)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.IntSlot)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.IntPort)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.IntVlan)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.CallIDLen)\n\tniu.Data.SIP.CallID = make([]byte, niu.Data.SIP.CallIDLen)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.CallID)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.CallIDEnd)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.IPlen)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.VL)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TOS)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TLen)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TID)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TFlags)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TTL)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TProto)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.TPos)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.SrcIP)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.DstIP)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.DstPort)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.SrcPort)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.UDPlen)\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.MsgLen)\n\t\/*\n\t\tfmt.Println(rniu.Len())\n\t\tfmt.Println(\"headerlen:\", len(header))\n\t*\/\n\tniu.Data.SIP.SipMsg = make([]byte, rniu.Len())\n\tbinary.Read(rniu, binary.BigEndian, &niu.Data.SIP.SipMsg)\n\n\treturn &niu\n}\n\n\/\/ HandShake writes the binary IPFIX representation into the buffer\nfunc (fi *IPFIX) HandShake() []byte {\n\tbuf := new(bytes.Buffer)\n\tbinary.Write(buf, binary.BigEndian, &fi.Header.Version)\n\tbinary.Write(buf, binary.BigEndian, &fi.Header.Length)\n\tbinary.Write(buf, binary.BigEndian, &fi.Header.ExportTime)\n\tbinary.Write(buf, binary.BigEndian, &fi.Header.SeqNum)\n\tbinary.Write(buf, binary.BigEndian, &fi.Header.ObservationID)\n\tbinary.Write(buf, binary.BigEndian, &fi.Set.ID)\n\tbinary.Write(buf, binary.BigEndian, &fi.Set.Length)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.MaVer)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.MiVer)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.CFlags1)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.CFlags2)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.SFlags)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.Timeout)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.SystemID)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.Product)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.SMaVer)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.SMiVer)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.Revision)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.HostnameLen)\n\tbinary.Write(buf, binary.BigEndian, &fi.Data.HandShake.Hostname)\n\n\treturn buf.Bytes()\n}\n\nfunc checkError(err error) {\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Fatal error: %s\", err.Error())\n\t\tos.Exit(1)\n\t}\n}\n\nfunc SyncClient(conn net.Conn) {\n\tfmt.Println(\"Handling new connection...\")\n\n\t\/\/ Close connection when this function ends\n\tdefer func() {\n\t\tfmt.Println(\"Closing connection...\")\n\t\tconn.Close()\n\t}()\n\n\ths := make([]byte, 512)\n\tn, err := conn.Read(hs)\n\tcheckError(err)\n\n\tpacket := NewIPFIX(hs[:n])\n\tif packet.Set.ID == 256 {\n\t\tpacket.Set.ID++\n\t}\n\n\tfmt.Printf(\"Write: %#v\\n\", BufToInt8(packet.HandShake()))\n\n\tif packet.Set.ID == 257 && packet.Header.Length > 20 {\n\t\tbinary.Write(conn, binary.BigEndian, BufToInt8(packet.HandShake()))\n\t}\n\n\t\/\/bufReader := bufio.NewReader(conn)\n\n\tfor {\n\t\tb := make([]byte, 4096)\n\t\tn, err := conn.Read(b)\n\t\tcheckError(err)\n\n\t\tdata := NewSendSipUDP(b[:n])\n\n\t\t\/*\t\t\/\/ Read IPFIX Messages delimited by newline\n\t\t\t\tbytes, err := bufReader.ReadBytes('\\n')\n\t\t\t\tcheckError(err)*\/\n\n\t\t\/\/fmt.Printf(\"%s\", (binary.BigEndian.Uint16(bytes)))\n\t\tfmt.Printf(\"%s\", data.Data.SIP)\n\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package carbonserver\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"go.uber.org\/zap\"\n\n\t\"github.com\/go-graphite\/carbonzipper\/zipper\/httpHeaders\"\n\tprotov2 \"github.com\/go-graphite\/protocol\/carbonapi_v2_pb\"\n\tprotov3 \"github.com\/go-graphite\/protocol\/carbonapi_v3_pb\"\n\tpickle \"github.com\/lomik\/og-rek\"\n)\n\ntype findResponse struct {\n\tdata []byte\n\tcontentType string\n\tfiles int\n}\n\nfunc (listener *CarbonserverListener) findHandler(wr http.ResponseWriter, req *http.Request) {\n\t\/\/ URL: \/metrics\/find\/?local=1&format=pickle&query=the.metric.path.with.glob\n\n\tt0 := time.Now()\n\tctx := req.Context()\n\n\tatomic.AddUint64(&listener.metrics.FindRequests, 1)\n\n\tformat := req.FormValue(\"format\")\n\tquery := req.Form[\"query\"]\n\n\tvar response *findResponse\n\n\tlogger := TraceContextToZap(ctx, listener.logger.With(\n\t\tzap.String(\"handler\", \"find\"),\n\t\tzap.String(\"url\", req.URL.RequestURI()),\n\t\tzap.String(\"peer\", req.RemoteAddr),\n\t))\n\n\taccessLogger := TraceContextToZap(ctx, listener.accessLogger.With(\n\t\tzap.String(\"handler\", \"find\"),\n\t\tzap.String(\"url\", req.URL.RequestURI()),\n\t\tzap.String(\"peer\", req.RemoteAddr),\n\t))\n\n\taccepts := req.Header[\"Accept\"]\n\tfor _, accept := range accepts {\n\t\tif accept == httpHeaders.ContentTypeCarbonAPIv3PB {\n\t\t\tformat = \"carbonapi_v3_pb\"\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif format == \"\" {\n\t\tformat = \"json\"\n\t}\n\n\tformatCode, ok := knownFormats[format]\n\tif !ok {\n\t\tatomic.AddUint64(&listener.metrics.FindErrors, 1)\n\t\taccessLogger.Error(\"find failed\",\n\t\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\t\tzap.String(\"reason\", \"unsupported format\"),\n\t\t\tzap.Int(\"http_code\", http.StatusBadRequest),\n\t\t)\n\t\thttp.Error(wr, \"Bad request (unsupported format)\",\n\t\t\thttp.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif formatCode == protoV3Format {\n\t\tbody, err := ioutil.ReadAll(req.Body)\n\t\tif err != nil {\n\t\t\taccessLogger.Error(\"find failed\",\n\t\t\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\t\t\tzap.String(\"reason\", err.Error()),\n\t\t\t\tzap.Int(\"http_code\", http.StatusBadRequest),\n\t\t\t)\n\t\t\thttp.Error(wr, \"Bad request (unsupported format)\",\n\t\t\t\thttp.StatusBadRequest)\n\t\t}\n\n\t\tvar pv3Request protov3.MultiGlobRequest\n\t\tpv3Request.Unmarshal(body)\n\n\t\tfmt.Printf(\"\\n\\n%+v\\n\\n\", pv3Request)\n\n\t\tquery = pv3Request.Metrics\n\t}\n\n\tlogger = logger.With(\n\t\tzap.Strings(\"query\", query),\n\t\tzap.String(\"format\", format),\n\t)\n\n\taccessLogger = accessLogger.With(\n\t\tzap.Strings(\"query\", query),\n\t\tzap.String(\"format\", format),\n\t)\n\n\tif len(query) == 0 {\n\t\tatomic.AddUint64(&listener.metrics.FindErrors, 1)\n\t\taccessLogger.Error(\"find failed\",\n\t\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\t\tzap.String(\"reason\", \"empty query\"),\n\t\t\tzap.Int(\"http_code\", http.StatusBadRequest),\n\t\t)\n\t\thttp.Error(wr, \"Bad request (no query)\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar err error\n\tfromCache := false\n\t\/\/rch := make(chan *findResponse)\n\t\/\/response := make(chan findResponse)\n\t\/\/var resp findResponse\n\tif listener.findCacheEnabled {\n\t\tkey := strings.Join(query, \",\") + \"&\" + format\n\t\tsize := uint64(100 * 1024 * 1024)\n\t\titem := listener.findCache.getQueryItem(key, size, 300)\n\t\tres, ok := item.FetchOrLock()\n\t\tlistener.prometheus.cacheRequest(\"find\", ok)\n\t\tif !ok {\n\t\t\tlogger.Debug(\"find cache miss\")\n\t\t\tatomic.AddUint64(&listener.metrics.FindCacheMiss, 1)\n\t\t\tresponse, err = listener.findMetrics(ctx, logger, t0, formatCode, query)\n\t\t\tif err != nil {\n\t\t\t\titem.StoreAbort()\n\t\t\t} else {\n\t\t\t\titem.StoreAndUnlock(response)\n\t\t\t}\n\t\t} else if res != nil {\n\t\t\tlogger.Debug(\"query cache hit\")\n\t\t\tatomic.AddUint64(&listener.metrics.FindCacheHit, 1)\n\t\t\tresponse = res.(*findResponse)\n\t\t\tfromCache = true\n\t\t}\n\t} else {\n\t\tresponse, err = listener.findMetrics(ctx, logger, t0, formatCode, query)\n\t}\n\n\tif err != nil || response == nil {\n\t\tvar code int\n\t\tvar reason string\n\t\tif _, ok := err.(errorNotFound); ok {\n\t\t\treason = \"Not Found\"\n\t\t\tcode = http.StatusNotFound\n\t\t} else {\n\t\t\treason = \"Internal error while processing request\"\n\t\t\tcode = http.StatusInternalServerError\n\t\t}\n\n\t\taccessLogger.Error(\"find failed\",\n\t\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\t\tzap.String(\"reason\", reason),\n\t\t\tzap.Error(err),\n\t\t\tzap.Int(\"http_code\", code),\n\t\t)\n\t\thttp.Error(wr, fmt.Sprintf(\"%s (%v)\", reason, err), code)\n\n\t\treturn\n\t}\n\n\twr.Header().Set(\"Content-Type\", response.contentType)\n\twr.Write(response.data)\n\n\tif response.files == 0 {\n\t\t\/\/ to get an idea how often we search for nothing\n\t\tatomic.AddUint64(&listener.metrics.FindZero, 1)\n\t}\n\n\taccessLogger.Info(\"find success\",\n\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\tzap.Int(\"Files\", response.files),\n\t\tzap.Bool(\"find_cache_enabled\", listener.findCacheEnabled),\n\t\tzap.Bool(\"from_cache\", fromCache),\n\t\tzap.Int(\"http_code\", http.StatusOK),\n\t)\n\treturn\n}\n\ntype errorNotFound struct{}\n\nfunc (err errorNotFound) Error() string {\n\treturn \"Not Found\"\n}\n\ntype findError struct {\n\tname string\n\terr error\n}\n\ntype globs struct {\n\tName string\n\tFiles []string\n\tLeafs []bool\n}\n\nfunc (listener *CarbonserverListener) findMetrics(ctx context.Context, logger *zap.Logger, t0 time.Time, format responseFormat, names []string) (*findResponse, error) {\n\tvar result findResponse\n\tmetricsCount := uint64(0)\n\texpandedGlobs, err := listener.getExpandedGlobs(ctx, logger, t0, names)\n\tif expandedGlobs == nil {\n\t\treturn nil, err\n\t}\n\tatomic.AddUint64(&listener.metrics.MetricsFound, metricsCount)\n\tswitch format {\n\tcase protoV3Format, jsonFormat:\n\t\tvar err error\n\t\tmultiResponse := protov3.MultiGlobResponse{}\n\t\tfor _, glob := range expandedGlobs {\n\t\t\tresult.files += len(glob.Files)\n\t\t\tresponse := protov3.GlobResponse{\n\t\t\t\tName: glob.Name,\n\t\t\t\tMatches: make([]protov3.GlobMatch, 0),\n\t\t\t}\n\n\t\t\tfor i, p := range glob.Files {\n\t\t\t\tif glob.Leafs[i] {\n\t\t\t\t\tmetricsCount++\n\t\t\t\t}\n\t\t\t\tresponse.Matches = append(response.Matches, protov3.GlobMatch{Path: p, IsLeaf: glob.Leafs[i]})\n\t\t\t}\n\t\t\tmultiResponse.Metrics = append(multiResponse.Metrics, response)\n\t\t}\n\n\t\tlogger.Debug(\"will send out response\",\n\t\t\tzap.Any(\"response\", multiResponse),\n\t\t)\n\n\t\tswitch format {\n\t\tcase jsonFormat:\n\t\t\tresult.contentType = httpHeaders.ContentTypeJSON\n\t\t\tresult.data, err = json.Marshal(multiResponse)\n\t\tcase protoV3Format:\n\t\t\tresult.contentType = httpHeaders.ContentTypeCarbonAPIv3PB\n\t\t\tresult.data, err = multiResponse.Marshal()\n\t\t}\n\n\t\tif err != nil {\n\t\t\tatomic.AddUint64(&listener.metrics.FindErrors, 1)\n\n\t\t\tlogger.Error(\"find failed\",\n\t\t\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\t\t\tzap.String(\"reason\", \"response encode failed\"),\n\t\t\t\tzap.Error(err),\n\t\t\t)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif len(multiResponse.Metrics) == 0 {\n\t\t\treturn nil, errorNotFound{}\n\t\t}\n\n\t\treturn &result, err\n\n\tcase protoV2Format:\n\t\tresult.contentType = httpHeaders.ContentTypeProtobuf\n\t\tvar err error\n\t\tresponse := protov2.GlobResponse{\n\t\t\tName: names[0],\n\t\t\tMatches: make([]protov2.GlobMatch, 0),\n\t\t}\n\n\t\tresult.files += len(expandedGlobs[0].Files)\n\t\tfor i, p := range expandedGlobs[0].Files {\n\t\t\tif expandedGlobs[0].Leafs[i] {\n\t\t\t\tmetricsCount++\n\t\t\t}\n\t\t\tresponse.Matches = append(response.Matches, protov2.GlobMatch{Path: p, IsLeaf: expandedGlobs[0].Leafs[i]})\n\t\t}\n\t\tresult.data, err = response.Marshal()\n\t\tresult.contentType = httpHeaders.ContentTypeProtobuf\n\n\t\tif err != nil {\n\t\t\tatomic.AddUint64(&listener.metrics.FindErrors, 1)\n\n\t\t\tlogger.Error(\"find failed\",\n\t\t\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\t\t\tzap.String(\"reason\", \"response encode failed\"),\n\t\t\t\tzap.Error(err),\n\t\t\t)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif len(response.Matches) == 0 {\n\t\t\treturn nil, errorNotFound{}\n\t\t}\n\n\t\treturn &result, err\n\n\tcase pickleFormat:\n\t\t\/\/ [{'metric_path': 'metric', 'intervals': [(x,y)], 'isLeaf': True},]\n\t\tvar metrics []map[string]interface{}\n\t\tvar m map[string]interface{}\n\t\tfiles := 0\n\n\t\tglob := expandedGlobs[0]\n\t\tfiles += len(glob.Files)\n\t\tfor i, p := range glob.Files {\n\t\t\tif glob.Leafs[i] {\n\t\t\t\tmetricsCount++\n\t\t\t}\n\t\t\tm = make(map[string]interface{})\n\t\t\tm[\"metric_path\"] = p\n\t\t\tm[\"isLeaf\"] = glob.Leafs[i]\n\n\t\t\tmetrics = append(metrics, m)\n\t\t}\n\t\tvar buf bytes.Buffer\n\t\tpEnc := pickle.NewEncoder(&buf)\n\t\tpEnc.Encode(metrics)\n\t\treturn &findResponse{buf.Bytes(), httpHeaders.ContentTypePickle, files}, nil\n\t}\n\treturn nil, nil\n}\n\nfunc (listener *CarbonserverListener) getExpandedGlobs(ctx context.Context, logger *zap.Logger, t0 time.Time, names []string) ([]globs, error) {\n\tvar expandedGlobs []globs\n\tvar errors []findError\n\tvar err error\n\texpGlobResultChan := make(chan *ExpandedGlobResponse, len(names))\n\n\tfor _, name := range names {\n\t\tgo listener.expandGlobs(name, expGlobResultChan)\n\t}\n\tresponseCount := 0\nGATHER:\n\tfor {\n\t\tselect {\n\t\tcase expandedResult := <-expGlobResultChan:\n\t\t\tresponseCount++\n\t\t\tglob := globs{\n\t\t\t\tName: expandedResult.Name,\n\t\t\t}\n\t\t\tglob.Files, glob.Leafs, err = expandedResult.Files, expandedResult.Leafs, expandedResult.Err\n\t\t\tif err != nil {\n\t\t\t\terrors = append(errors, findError{name: expandedResult.Name, err: err})\n\t\t\t}\n\t\t\texpandedGlobs = append(expandedGlobs, glob)\n\t\t\tif responseCount == len(names) {\n\t\t\t\tbreak GATHER\n\t\t\t}\n\n\t\tcase <-ctx.Done():\n\t\t\tdone <- struct{}{}\n\t\t\treturn nil, fmt.Errorf(\"find failed, timeout\")\n\t\t}\n\t}\n\n\tif len(errors) > 0 {\n\t\tatomic.AddUint64(&listener.metrics.FindErrors, uint64(len(errors)))\n\n\t\tif len(errors) == len(names) {\n\t\t\tlogger.Error(\"find failed\",\n\t\t\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\t\t\tzap.String(\"reason\", \"can't expand globs\"),\n\t\t\t\tzap.Any(\"errors\", errors),\n\t\t\t)\n\t\t\treturn nil, fmt.Errorf(\"find failed, can't expand globs\")\n\t\t} else {\n\t\t\tlogger.Warn(\"find partly failed\",\n\t\t\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\t\t\tzap.String(\"reason\", \"can't expand globs for some metrics\"),\n\t\t\t\tzap.Any(\"errors\", errors),\n\t\t\t)\n\t\t}\n\t}\n\n\tlogger.Debug(\"expandGlobs result\",\n\t\tzap.String(\"action\", \"expandGlobs\"),\n\t\tzap.Strings(\"metrics\", names),\n\t\t\/\/zap.String(\"format\", format.String()),\n\t\tzap.Any(\"result\", expandedGlobs),\n\t)\n\treturn expandedGlobs, nil\n}\n<commit_msg>Remove done channel.<commit_after>package carbonserver\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"go.uber.org\/zap\"\n\n\t\"github.com\/go-graphite\/carbonzipper\/zipper\/httpHeaders\"\n\tprotov2 \"github.com\/go-graphite\/protocol\/carbonapi_v2_pb\"\n\tprotov3 \"github.com\/go-graphite\/protocol\/carbonapi_v3_pb\"\n\tpickle \"github.com\/lomik\/og-rek\"\n)\n\ntype findResponse struct {\n\tdata []byte\n\tcontentType string\n\tfiles int\n}\n\nfunc (listener *CarbonserverListener) findHandler(wr http.ResponseWriter, req *http.Request) {\n\t\/\/ URL: \/metrics\/find\/?local=1&format=pickle&query=the.metric.path.with.glob\n\n\tt0 := time.Now()\n\tctx := req.Context()\n\n\tatomic.AddUint64(&listener.metrics.FindRequests, 1)\n\n\tformat := req.FormValue(\"format\")\n\tquery := req.Form[\"query\"]\n\n\tvar response *findResponse\n\n\tlogger := TraceContextToZap(ctx, listener.logger.With(\n\t\tzap.String(\"handler\", \"find\"),\n\t\tzap.String(\"url\", req.URL.RequestURI()),\n\t\tzap.String(\"peer\", req.RemoteAddr),\n\t))\n\n\taccessLogger := TraceContextToZap(ctx, listener.accessLogger.With(\n\t\tzap.String(\"handler\", \"find\"),\n\t\tzap.String(\"url\", req.URL.RequestURI()),\n\t\tzap.String(\"peer\", req.RemoteAddr),\n\t))\n\n\taccepts := req.Header[\"Accept\"]\n\tfor _, accept := range accepts {\n\t\tif accept == httpHeaders.ContentTypeCarbonAPIv3PB {\n\t\t\tformat = \"carbonapi_v3_pb\"\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif format == \"\" {\n\t\tformat = \"json\"\n\t}\n\n\tformatCode, ok := knownFormats[format]\n\tif !ok {\n\t\tatomic.AddUint64(&listener.metrics.FindErrors, 1)\n\t\taccessLogger.Error(\"find failed\",\n\t\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\t\tzap.String(\"reason\", \"unsupported format\"),\n\t\t\tzap.Int(\"http_code\", http.StatusBadRequest),\n\t\t)\n\t\thttp.Error(wr, \"Bad request (unsupported format)\",\n\t\t\thttp.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif formatCode == protoV3Format {\n\t\tbody, err := ioutil.ReadAll(req.Body)\n\t\tif err != nil {\n\t\t\taccessLogger.Error(\"find failed\",\n\t\t\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\t\t\tzap.String(\"reason\", err.Error()),\n\t\t\t\tzap.Int(\"http_code\", http.StatusBadRequest),\n\t\t\t)\n\t\t\thttp.Error(wr, \"Bad request (unsupported format)\",\n\t\t\t\thttp.StatusBadRequest)\n\t\t}\n\n\t\tvar pv3Request protov3.MultiGlobRequest\n\t\tpv3Request.Unmarshal(body)\n\n\t\tfmt.Printf(\"\\n\\n%+v\\n\\n\", pv3Request)\n\n\t\tquery = pv3Request.Metrics\n\t}\n\n\tlogger = logger.With(\n\t\tzap.Strings(\"query\", query),\n\t\tzap.String(\"format\", format),\n\t)\n\n\taccessLogger = accessLogger.With(\n\t\tzap.Strings(\"query\", query),\n\t\tzap.String(\"format\", format),\n\t)\n\n\tif len(query) == 0 {\n\t\tatomic.AddUint64(&listener.metrics.FindErrors, 1)\n\t\taccessLogger.Error(\"find failed\",\n\t\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\t\tzap.String(\"reason\", \"empty query\"),\n\t\t\tzap.Int(\"http_code\", http.StatusBadRequest),\n\t\t)\n\t\thttp.Error(wr, \"Bad request (no query)\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar err error\n\tfromCache := false\n\t\/\/rch := make(chan *findResponse)\n\t\/\/response := make(chan findResponse)\n\t\/\/var resp findResponse\n\tif listener.findCacheEnabled {\n\t\tkey := strings.Join(query, \",\") + \"&\" + format\n\t\tsize := uint64(100 * 1024 * 1024)\n\t\titem := listener.findCache.getQueryItem(key, size, 300)\n\t\tres, ok := item.FetchOrLock()\n\t\tlistener.prometheus.cacheRequest(\"find\", ok)\n\t\tif !ok {\n\t\t\tlogger.Debug(\"find cache miss\")\n\t\t\tatomic.AddUint64(&listener.metrics.FindCacheMiss, 1)\n\t\t\tresponse, err = listener.findMetrics(ctx, logger, t0, formatCode, query)\n\t\t\tif err != nil {\n\t\t\t\titem.StoreAbort()\n\t\t\t} else {\n\t\t\t\titem.StoreAndUnlock(response)\n\t\t\t}\n\t\t} else if res != nil {\n\t\t\tlogger.Debug(\"query cache hit\")\n\t\t\tatomic.AddUint64(&listener.metrics.FindCacheHit, 1)\n\t\t\tresponse = res.(*findResponse)\n\t\t\tfromCache = true\n\t\t}\n\t} else {\n\t\tresponse, err = listener.findMetrics(ctx, logger, t0, formatCode, query)\n\t}\n\n\tif err != nil || response == nil {\n\t\tvar code int\n\t\tvar reason string\n\t\tif _, ok := err.(errorNotFound); ok {\n\t\t\treason = \"Not Found\"\n\t\t\tcode = http.StatusNotFound\n\t\t} else {\n\t\t\treason = \"Internal error while processing request\"\n\t\t\tcode = http.StatusInternalServerError\n\t\t}\n\n\t\taccessLogger.Error(\"find failed\",\n\t\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\t\tzap.String(\"reason\", reason),\n\t\t\tzap.Error(err),\n\t\t\tzap.Int(\"http_code\", code),\n\t\t)\n\t\thttp.Error(wr, fmt.Sprintf(\"%s (%v)\", reason, err), code)\n\n\t\treturn\n\t}\n\n\twr.Header().Set(\"Content-Type\", response.contentType)\n\twr.Write(response.data)\n\n\tif response.files == 0 {\n\t\t\/\/ to get an idea how often we search for nothing\n\t\tatomic.AddUint64(&listener.metrics.FindZero, 1)\n\t}\n\n\taccessLogger.Info(\"find success\",\n\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\tzap.Int(\"Files\", response.files),\n\t\tzap.Bool(\"find_cache_enabled\", listener.findCacheEnabled),\n\t\tzap.Bool(\"from_cache\", fromCache),\n\t\tzap.Int(\"http_code\", http.StatusOK),\n\t)\n\treturn\n}\n\ntype errorNotFound struct{}\n\nfunc (err errorNotFound) Error() string {\n\treturn \"Not Found\"\n}\n\ntype findError struct {\n\tname string\n\terr error\n}\n\ntype globs struct {\n\tName string\n\tFiles []string\n\tLeafs []bool\n}\n\nfunc (listener *CarbonserverListener) findMetrics(ctx context.Context, logger *zap.Logger, t0 time.Time, format responseFormat, names []string) (*findResponse, error) {\n\tvar result findResponse\n\tmetricsCount := uint64(0)\n\texpandedGlobs, err := listener.getExpandedGlobs(ctx, logger, t0, names)\n\tif expandedGlobs == nil {\n\t\treturn nil, err\n\t}\n\tatomic.AddUint64(&listener.metrics.MetricsFound, metricsCount)\n\tswitch format {\n\tcase protoV3Format, jsonFormat:\n\t\tvar err error\n\t\tmultiResponse := protov3.MultiGlobResponse{}\n\t\tfor _, glob := range expandedGlobs {\n\t\t\tresult.files += len(glob.Files)\n\t\t\tresponse := protov3.GlobResponse{\n\t\t\t\tName: glob.Name,\n\t\t\t\tMatches: make([]protov3.GlobMatch, 0),\n\t\t\t}\n\n\t\t\tfor i, p := range glob.Files {\n\t\t\t\tif glob.Leafs[i] {\n\t\t\t\t\tmetricsCount++\n\t\t\t\t}\n\t\t\t\tresponse.Matches = append(response.Matches, protov3.GlobMatch{Path: p, IsLeaf: glob.Leafs[i]})\n\t\t\t}\n\t\t\tmultiResponse.Metrics = append(multiResponse.Metrics, response)\n\t\t}\n\n\t\tlogger.Debug(\"will send out response\",\n\t\t\tzap.Any(\"response\", multiResponse),\n\t\t)\n\n\t\tswitch format {\n\t\tcase jsonFormat:\n\t\t\tresult.contentType = httpHeaders.ContentTypeJSON\n\t\t\tresult.data, err = json.Marshal(multiResponse)\n\t\tcase protoV3Format:\n\t\t\tresult.contentType = httpHeaders.ContentTypeCarbonAPIv3PB\n\t\t\tresult.data, err = multiResponse.Marshal()\n\t\t}\n\n\t\tif err != nil {\n\t\t\tatomic.AddUint64(&listener.metrics.FindErrors, 1)\n\n\t\t\tlogger.Error(\"find failed\",\n\t\t\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\t\t\tzap.String(\"reason\", \"response encode failed\"),\n\t\t\t\tzap.Error(err),\n\t\t\t)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif len(multiResponse.Metrics) == 0 {\n\t\t\treturn nil, errorNotFound{}\n\t\t}\n\n\t\treturn &result, err\n\n\tcase protoV2Format:\n\t\tresult.contentType = httpHeaders.ContentTypeProtobuf\n\t\tvar err error\n\t\tresponse := protov2.GlobResponse{\n\t\t\tName: names[0],\n\t\t\tMatches: make([]protov2.GlobMatch, 0),\n\t\t}\n\n\t\tresult.files += len(expandedGlobs[0].Files)\n\t\tfor i, p := range expandedGlobs[0].Files {\n\t\t\tif expandedGlobs[0].Leafs[i] {\n\t\t\t\tmetricsCount++\n\t\t\t}\n\t\t\tresponse.Matches = append(response.Matches, protov2.GlobMatch{Path: p, IsLeaf: expandedGlobs[0].Leafs[i]})\n\t\t}\n\t\tresult.data, err = response.Marshal()\n\t\tresult.contentType = httpHeaders.ContentTypeProtobuf\n\n\t\tif err != nil {\n\t\t\tatomic.AddUint64(&listener.metrics.FindErrors, 1)\n\n\t\t\tlogger.Error(\"find failed\",\n\t\t\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\t\t\tzap.String(\"reason\", \"response encode failed\"),\n\t\t\t\tzap.Error(err),\n\t\t\t)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif len(response.Matches) == 0 {\n\t\t\treturn nil, errorNotFound{}\n\t\t}\n\n\t\treturn &result, err\n\n\tcase pickleFormat:\n\t\t\/\/ [{'metric_path': 'metric', 'intervals': [(x,y)], 'isLeaf': True},]\n\t\tvar metrics []map[string]interface{}\n\t\tvar m map[string]interface{}\n\t\tfiles := 0\n\n\t\tglob := expandedGlobs[0]\n\t\tfiles += len(glob.Files)\n\t\tfor i, p := range glob.Files {\n\t\t\tif glob.Leafs[i] {\n\t\t\t\tmetricsCount++\n\t\t\t}\n\t\t\tm = make(map[string]interface{})\n\t\t\tm[\"metric_path\"] = p\n\t\t\tm[\"isLeaf\"] = glob.Leafs[i]\n\n\t\t\tmetrics = append(metrics, m)\n\t\t}\n\t\tvar buf bytes.Buffer\n\t\tpEnc := pickle.NewEncoder(&buf)\n\t\tpEnc.Encode(metrics)\n\t\treturn &findResponse{buf.Bytes(), httpHeaders.ContentTypePickle, files}, nil\n\t}\n\treturn nil, nil\n}\n\nfunc (listener *CarbonserverListener) getExpandedGlobs(ctx context.Context, logger *zap.Logger, t0 time.Time, names []string) ([]globs, error) {\n\tvar expandedGlobs []globs\n\tvar errors []findError\n\tvar err error\n\texpGlobResultChan := make(chan *ExpandedGlobResponse, len(names))\n\n\tfor _, name := range names {\n\t\tgo listener.expandGlobs(name, expGlobResultChan)\n\t}\n\tresponseCount := 0\nGATHER:\n\tfor {\n\t\tselect {\n\t\tcase expandedResult := <-expGlobResultChan:\n\t\t\tresponseCount++\n\t\t\tglob := globs{\n\t\t\t\tName: expandedResult.Name,\n\t\t\t}\n\t\t\tglob.Files, glob.Leafs, err = expandedResult.Files, expandedResult.Leafs, expandedResult.Err\n\t\t\tif err != nil {\n\t\t\t\terrors = append(errors, findError{name: expandedResult.Name, err: err})\n\t\t\t}\n\t\t\texpandedGlobs = append(expandedGlobs, glob)\n\t\t\tif responseCount == len(names) {\n\t\t\t\tbreak GATHER\n\t\t\t}\n\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, fmt.Errorf(\"find failed, timeout\")\n\t\t}\n\t}\n\n\tif len(errors) > 0 {\n\t\tatomic.AddUint64(&listener.metrics.FindErrors, uint64(len(errors)))\n\n\t\tif len(errors) == len(names) {\n\t\t\tlogger.Error(\"find failed\",\n\t\t\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\t\t\tzap.String(\"reason\", \"can't expand globs\"),\n\t\t\t\tzap.Any(\"errors\", errors),\n\t\t\t)\n\t\t\treturn nil, fmt.Errorf(\"find failed, can't expand globs\")\n\t\t} else {\n\t\t\tlogger.Warn(\"find partly failed\",\n\t\t\t\tzap.Duration(\"runtime_seconds\", time.Since(t0)),\n\t\t\t\tzap.String(\"reason\", \"can't expand globs for some metrics\"),\n\t\t\t\tzap.Any(\"errors\", errors),\n\t\t\t)\n\t\t}\n\t}\n\n\tlogger.Debug(\"expandGlobs result\",\n\t\tzap.String(\"action\", \"expandGlobs\"),\n\t\tzap.Strings(\"metrics\", names),\n\t\t\/\/zap.String(\"format\", format.String()),\n\t\tzap.Any(\"result\", expandedGlobs),\n\t)\n\treturn expandedGlobs, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 Miek Gieben. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage dns\n\n\/\/ A client implementation.\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"net\"\n\t\"time\"\n)\n\nconst dnsTimeout time.Duration = 2 * 1e9\nconst tcpIdleTimeout time.Duration = 8 * time.Second\n\n\/\/ A Conn represents a connection to a DNS server.\ntype Conn struct {\n\tnet.Conn \/\/ a net.Conn holding the connection\n\tUDPSize uint16 \/\/ Minimum receive buffer for UDP messages\n\tTsigSecret map[string]string \/\/ Secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified\n\trtt time.Duration\n\tt time.Time\n\ttsigRequestMAC string\n}\n\n\/\/ A Client defines parameters for a DNS client.\ntype Client struct {\n\tNet string \/\/ if \"tcp\" a TCP query will be initiated, otherwise an UDP one (default is \"\" for UDP)\n\tDialTimeout time.Duration \/\/ net.DialTimeout (ns), defaults to 2 * 1e9\n\tReadTimeout time.Duration \/\/ net.Conn.SetReadTimeout value for connections (ns), defaults to 2 * 1e9\n\tWriteTimeout time.Duration \/\/ net.Conn.SetWriteTimeout value for connections (ns), defaults to 2 * 1e9\n\tTsigSecret map[string]string \/\/ secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified\n\tSingleInflight bool \/\/ if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass\n\tgroup singleflight\n}\n\n\/\/ Exchange performs a synchronous UDP query. It sends the message m to the address\n\/\/ contained in a and waits for an reply. Exchange does not retry a failed query, nor\n\/\/ will it fall back to TCP in case of truncation.\nfunc Exchange(m *Msg, a string) (r *Msg, err error) {\n\tvar co *Conn\n\tco, err = DialTimeout(\"udp\", a, dnsTimeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer co.Close()\n\tco.SetReadDeadline(time.Now().Add(dnsTimeout))\n\tco.SetWriteDeadline(time.Now().Add(dnsTimeout))\n\tif err = co.WriteMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\tr, err = co.ReadMsg()\n\treturn r, err\n}\n\n\/\/ ExchangeConn performs a synchronous query. It sends the message m via the connection\n\/\/ c and waits for a reply. The connection c is not closed by ExchangeConn.\n\/\/ This function is going away, but can easily be mimicked:\n\/\/\n\/\/\tco := &dns.Conn{Conn: c} \/\/ c is your net.Conn\n\/\/\tco.WriteMsg(m)\n\/\/\tin, _ := co.ReadMsg()\n\/\/\tco.Close()\n\/\/\nfunc ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {\n\tprintln(\"dns: this function is deprecated\")\n\tco := new(Conn)\n\tco.Conn = c\n\tif err = co.WriteMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\tr, err = co.ReadMsg()\n\treturn r, err\n}\n\n\/\/ Exchange performs an synchronous query. It sends the message m to the address\n\/\/ contained in a and waits for an reply. Basic use pattern with a *dns.Client:\n\/\/\n\/\/\tc := new(dns.Client)\n\/\/\tin, rtt, err := c.Exchange(message, \"127.0.0.1:53\")\n\/\/\n\/\/ Exchange does not retry a failed query, nor will it fall back to TCP in\n\/\/ case of truncation.\nfunc (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {\n\tif !c.SingleInflight {\n\t\treturn c.exchange(m, a)\n\t}\n\t\/\/ This adds a bunch of garbage, TODO(miek).\n\tt := \"nop\"\n\tif t1, ok := TypeToString[m.Question[0].Qtype]; ok {\n\t\tt = t1\n\t}\n\tcl := \"nop\"\n\tif cl1, ok := ClassToString[m.Question[0].Qclass]; ok {\n\t\tcl = cl1\n\t}\n\tr, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {\n\t\treturn c.exchange(m, a)\n\t})\n\tif err != nil {\n\t\treturn r, rtt, err\n\t}\n\tif shared {\n\t\tr1 := r.copy()\n\t\tr = r1\n\t}\n\treturn r, rtt, nil\n}\n\nfunc (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {\n\ttimeout := dnsTimeout\n\tvar co *Conn\n\tif c.DialTimeout != 0 {\n\t\ttimeout = c.DialTimeout\n\t}\n\tif c.Net == \"\" {\n\t\tco, err = DialTimeout(\"udp\", a, timeout)\n\t} else {\n\t\tco, err = DialTimeout(c.Net, a, timeout)\n\t}\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\ttimeout = dnsTimeout\n\tif c.ReadTimeout != 0 {\n\t\ttimeout = c.ReadTimeout\n\t}\n\tco.SetReadDeadline(time.Now().Add(timeout))\n\ttimeout = dnsTimeout\n\tif c.WriteTimeout != 0 {\n\t\ttimeout = c.WriteTimeout\n\t}\n\tco.SetWriteDeadline(time.Now().Add(timeout))\n\tdefer co.Close()\n\topt := m.IsEdns0()\n\tif opt != nil && opt.UDPSize() >= MinMsgSize {\n\t\tco.UDPSize = opt.UDPSize()\n\t}\n\tco.TsigSecret = c.TsigSecret\n\tif err = co.WriteMsg(m); err != nil {\n\t\treturn nil, 0, err\n\t}\n\tr, err = co.ReadMsg()\n\treturn r, co.rtt, err\n}\n\n\/\/ ReadMsg reads a message from the connection co.\n\/\/ If the received message contains a TSIG record the transaction\n\/\/ signature is verified.\nfunc (co *Conn) ReadMsg() (*Msg, error) {\n\tvar p []byte\n\tm := new(Msg)\n\tif _, ok := co.Conn.(*net.TCPConn); ok {\n\t\tp = make([]byte, MaxMsgSize)\n\t} else {\n\t\tif co.UDPSize >= 512 {\n\t\t\tp = make([]byte, co.UDPSize)\n\t\t} else {\n\t\t\tp = make([]byte, MinMsgSize)\n\t\t}\n\t}\n\tn, err := co.Read(p)\n\tif err != nil && n == 0 {\n\t\treturn nil, err\n\t}\n\tp = p[:n]\n\tif err := m.Unpack(p); err != nil {\n\t\treturn nil, err\n\t}\n\tco.rtt = time.Since(co.t)\n\tif t := m.IsTsig(); t != nil {\n\t\tif _, ok := co.TsigSecret[t.Hdr.Name]; !ok {\n\t\t\treturn m, ErrSecret\n\t\t}\n\t\t\/\/ Need to work on the original message p, as that was used to calculate the tsig.\n\t\terr = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)\n\t}\n\treturn m, err\n}\n\n\/\/ Read implements the net.Conn read method.\nfunc (co *Conn) Read(p []byte) (n int, err error) {\n\tif co.Conn == nil {\n\t\treturn 0, ErrConnEmpty\n\t}\n\tif len(p) < 2 {\n\t\treturn 0, io.ErrShortBuffer\n\t}\n\tif t, ok := co.Conn.(*net.TCPConn); ok {\n\t\tn, err = t.Read(p[0:2])\n\t\tif err != nil || n != 2 {\n\t\t\treturn n, err\n\t\t}\n\t\tl, _ := unpackUint16(p[0:2], 0)\n\t\tif l == 0 {\n\t\t\treturn 0, ErrShortRead\n\t\t}\n\t\tif int(l) > len(p) {\n\t\t\treturn int(l), io.ErrShortBuffer\n\t\t}\n\t\tn, err = t.Read(p[:l])\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\ti := n\n\t\tfor i < int(l) {\n\t\t\tj, err := t.Read(p[i:int(l)])\n\t\t\tif err != nil {\n\t\t\t\treturn i, err\n\t\t\t}\n\t\t\ti += j\n\t\t}\n\t\tn = i\n\t\treturn n, err\n\t}\n\t\/\/ assume udp connection\n\tn, err = co.Conn.Read(p)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\treturn n, err\n}\n\n\/\/ WriteMsg sends a message throught the connection co.\n\/\/ If the message m contains a TSIG record the transaction\n\/\/ signature is calculated.\nfunc (co *Conn) WriteMsg(m *Msg) (err error) {\n\tvar out []byte\n\tif t := m.IsTsig(); t != nil {\n\t\tmac := \"\"\n\t\tif _, ok := co.TsigSecret[t.Hdr.Name]; !ok {\n\t\t\treturn ErrSecret\n\t\t}\n\t\tout, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)\n\t\t\/\/ Set for the next read, allthough only used in zone transfers\n\t\tco.tsigRequestMAC = mac\n\t} else {\n\t\tout, err = m.Pack()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tco.t = time.Now()\n\tif _, err = co.Write(out); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Write implements the net.Conn Write method.\nfunc (co *Conn) Write(p []byte) (n int, err error) {\n\tif t, ok := co.Conn.(*net.TCPConn); ok {\n\t\tlp := len(p)\n\t\tif lp < 2 {\n\t\t\treturn 0, io.ErrShortBuffer\n\t\t}\n\t\tif lp > MaxMsgSize {\n\t\t\treturn 0, &Error{err: \"message too large\"}\n\t\t}\n\t\tl := make([]byte, 2, lp+2)\n\t\tl[0], l[1] = packUint16(uint16(lp))\n\t\tp = append(l, p...)\n\t\tn, err := io.Copy(t, bytes.NewReader(p))\n\t\treturn int(n), err\n\t}\n\tn, err = co.Conn.(*net.UDPConn).Write(p)\n\treturn n, err\n}\n\n\/\/ Dial connects to the address on the named network.\nfunc Dial(network, address string) (conn *Conn, err error) {\n\tconn = new(Conn)\n\tconn.Conn, err = net.Dial(network, address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn conn, nil\n}\n\n\/\/ Dialtimeout acts like Dial but takes a timeout.\nfunc DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {\n\tconn = new(Conn)\n\tconn.Conn, err = net.DialTimeout(network, address, timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn conn, nil\n}\n\n\/\/ Close implements the net.Conn Close method.\nfunc (co *Conn) Close() error { return co.Conn.Close() }\n\n\/\/ LocalAddr implements the net.Conn LocalAddr method.\nfunc (co *Conn) LocalAddr() net.Addr { return co.Conn.LocalAddr() }\n\n\/\/ RemoteAddr implements the net.Conn RemoteAddr method.\nfunc (co *Conn) RemoteAddr() net.Addr { return co.Conn.RemoteAddr() }\n\n\/\/ SetDeadline implements the net.Conn SetDeadline method.\nfunc (co *Conn) SetDeadline(t time.Time) error { return co.Conn.SetDeadline(t) }\n\n\/\/ SetReadDeadline implements the net.Conn SetReadDeadline method.\nfunc (co *Conn) SetReadDeadline(t time.Time) error { return co.Conn.SetReadDeadline(t) }\n\n\/\/ SetWriteDeadline implements the net.Conn SetWriteDeadline method.\nfunc (co *Conn) SetWriteDeadline(t time.Time) error { return co.Conn.SetWriteDeadline(t) }\n<commit_msg>Cleanup singleFlight a bit<commit_after>\/\/ Copyright 2011 Miek Gieben. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage dns\n\n\/\/ A client implementation.\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"net\"\n\t\"time\"\n)\n\nconst dnsTimeout time.Duration = 2 * 1e9\nconst tcpIdleTimeout time.Duration = 8 * time.Second\n\n\/\/ A Conn represents a connection to a DNS server.\ntype Conn struct {\n\tnet.Conn \/\/ a net.Conn holding the connection\n\tUDPSize uint16 \/\/ Minimum receive buffer for UDP messages\n\tTsigSecret map[string]string \/\/ Secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified\n\trtt time.Duration\n\tt time.Time\n\ttsigRequestMAC string\n}\n\n\/\/ A Client defines parameters for a DNS client.\ntype Client struct {\n\tNet string \/\/ if \"tcp\" a TCP query will be initiated, otherwise an UDP one (default is \"\" for UDP)\n\tDialTimeout time.Duration \/\/ net.DialTimeout (ns), defaults to 2 * 1e9\n\tReadTimeout time.Duration \/\/ net.Conn.SetReadTimeout value for connections (ns), defaults to 2 * 1e9\n\tWriteTimeout time.Duration \/\/ net.Conn.SetWriteTimeout value for connections (ns), defaults to 2 * 1e9\n\tTsigSecret map[string]string \/\/ secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified\n\tSingleInflight bool \/\/ if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass\n\tgroup singleflight\n}\n\n\/\/ Exchange performs a synchronous UDP query. It sends the message m to the address\n\/\/ contained in a and waits for an reply. Exchange does not retry a failed query, nor\n\/\/ will it fall back to TCP in case of truncation.\nfunc Exchange(m *Msg, a string) (r *Msg, err error) {\n\tvar co *Conn\n\tco, err = DialTimeout(\"udp\", a, dnsTimeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer co.Close()\n\tco.SetReadDeadline(time.Now().Add(dnsTimeout))\n\tco.SetWriteDeadline(time.Now().Add(dnsTimeout))\n\tif err = co.WriteMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\tr, err = co.ReadMsg()\n\treturn r, err\n}\n\n\/\/ ExchangeConn performs a synchronous query. It sends the message m via the connection\n\/\/ c and waits for a reply. The connection c is not closed by ExchangeConn.\n\/\/ This function is going away, but can easily be mimicked:\n\/\/\n\/\/\tco := &dns.Conn{Conn: c} \/\/ c is your net.Conn\n\/\/\tco.WriteMsg(m)\n\/\/\tin, _ := co.ReadMsg()\n\/\/\tco.Close()\n\/\/\nfunc ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {\n\tprintln(\"dns: this function is deprecated\")\n\tco := new(Conn)\n\tco.Conn = c\n\tif err = co.WriteMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\tr, err = co.ReadMsg()\n\treturn r, err\n}\n\n\/\/ Exchange performs an synchronous query. It sends the message m to the address\n\/\/ contained in a and waits for an reply. Basic use pattern with a *dns.Client:\n\/\/\n\/\/\tc := new(dns.Client)\n\/\/\tin, rtt, err := c.Exchange(message, \"127.0.0.1:53\")\n\/\/\n\/\/ Exchange does not retry a failed query, nor will it fall back to TCP in\n\/\/ case of truncation.\nfunc (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {\n\tif !c.SingleInflight {\n\t\treturn c.exchange(m, a)\n\t}\n\t\/\/ This adds a bunch of garbage, TODO(miek).\n\tt := \"nop\"\n\tif t1, ok := TypeToString[m.Question[0].Qtype]; ok {\n\t\tt = t1\n\t}\n\tcl := \"nop\"\n\tif cl1, ok := ClassToString[m.Question[0].Qclass]; ok {\n\t\tcl = cl1\n\t}\n\tr, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {\n\t\treturn c.exchange(m, a)\n\t})\n\tif err != nil {\n\t\treturn r, rtt, err\n\t}\n\tif shared {\n\t\treturn r.copy(), rtt, nil\n\t}\n\treturn r, rtt, nil\n}\n\nfunc (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {\n\ttimeout := dnsTimeout\n\tvar co *Conn\n\tif c.DialTimeout != 0 {\n\t\ttimeout = c.DialTimeout\n\t}\n\tif c.Net == \"\" {\n\t\tco, err = DialTimeout(\"udp\", a, timeout)\n\t} else {\n\t\tco, err = DialTimeout(c.Net, a, timeout)\n\t}\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\ttimeout = dnsTimeout\n\tif c.ReadTimeout != 0 {\n\t\ttimeout = c.ReadTimeout\n\t}\n\tco.SetReadDeadline(time.Now().Add(timeout))\n\ttimeout = dnsTimeout\n\tif c.WriteTimeout != 0 {\n\t\ttimeout = c.WriteTimeout\n\t}\n\tco.SetWriteDeadline(time.Now().Add(timeout))\n\tdefer co.Close()\n\topt := m.IsEdns0()\n\tif opt != nil && opt.UDPSize() >= MinMsgSize {\n\t\tco.UDPSize = opt.UDPSize()\n\t}\n\tco.TsigSecret = c.TsigSecret\n\tif err = co.WriteMsg(m); err != nil {\n\t\treturn nil, 0, err\n\t}\n\tr, err = co.ReadMsg()\n\treturn r, co.rtt, err\n}\n\n\/\/ ReadMsg reads a message from the connection co.\n\/\/ If the received message contains a TSIG record the transaction\n\/\/ signature is verified.\nfunc (co *Conn) ReadMsg() (*Msg, error) {\n\tvar p []byte\n\tm := new(Msg)\n\tif _, ok := co.Conn.(*net.TCPConn); ok {\n\t\tp = make([]byte, MaxMsgSize)\n\t} else {\n\t\tif co.UDPSize >= 512 {\n\t\t\tp = make([]byte, co.UDPSize)\n\t\t} else {\n\t\t\tp = make([]byte, MinMsgSize)\n\t\t}\n\t}\n\tn, err := co.Read(p)\n\tif err != nil && n == 0 {\n\t\treturn nil, err\n\t}\n\tp = p[:n]\n\tif err := m.Unpack(p); err != nil {\n\t\treturn nil, err\n\t}\n\tco.rtt = time.Since(co.t)\n\tif t := m.IsTsig(); t != nil {\n\t\tif _, ok := co.TsigSecret[t.Hdr.Name]; !ok {\n\t\t\treturn m, ErrSecret\n\t\t}\n\t\t\/\/ Need to work on the original message p, as that was used to calculate the tsig.\n\t\terr = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)\n\t}\n\treturn m, err\n}\n\n\/\/ Read implements the net.Conn read method.\nfunc (co *Conn) Read(p []byte) (n int, err error) {\n\tif co.Conn == nil {\n\t\treturn 0, ErrConnEmpty\n\t}\n\tif len(p) < 2 {\n\t\treturn 0, io.ErrShortBuffer\n\t}\n\tif t, ok := co.Conn.(*net.TCPConn); ok {\n\t\tn, err = t.Read(p[0:2])\n\t\tif err != nil || n != 2 {\n\t\t\treturn n, err\n\t\t}\n\t\tl, _ := unpackUint16(p[0:2], 0)\n\t\tif l == 0 {\n\t\t\treturn 0, ErrShortRead\n\t\t}\n\t\tif int(l) > len(p) {\n\t\t\treturn int(l), io.ErrShortBuffer\n\t\t}\n\t\tn, err = t.Read(p[:l])\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\ti := n\n\t\tfor i < int(l) {\n\t\t\tj, err := t.Read(p[i:int(l)])\n\t\t\tif err != nil {\n\t\t\t\treturn i, err\n\t\t\t}\n\t\t\ti += j\n\t\t}\n\t\tn = i\n\t\treturn n, err\n\t}\n\t\/\/ assume udp connection\n\tn, err = co.Conn.Read(p)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\treturn n, err\n}\n\n\/\/ WriteMsg sends a message throught the connection co.\n\/\/ If the message m contains a TSIG record the transaction\n\/\/ signature is calculated.\nfunc (co *Conn) WriteMsg(m *Msg) (err error) {\n\tvar out []byte\n\tif t := m.IsTsig(); t != nil {\n\t\tmac := \"\"\n\t\tif _, ok := co.TsigSecret[t.Hdr.Name]; !ok {\n\t\t\treturn ErrSecret\n\t\t}\n\t\tout, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)\n\t\t\/\/ Set for the next read, allthough only used in zone transfers\n\t\tco.tsigRequestMAC = mac\n\t} else {\n\t\tout, err = m.Pack()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tco.t = time.Now()\n\tif _, err = co.Write(out); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Write implements the net.Conn Write method.\nfunc (co *Conn) Write(p []byte) (n int, err error) {\n\tif t, ok := co.Conn.(*net.TCPConn); ok {\n\t\tlp := len(p)\n\t\tif lp < 2 {\n\t\t\treturn 0, io.ErrShortBuffer\n\t\t}\n\t\tif lp > MaxMsgSize {\n\t\t\treturn 0, &Error{err: \"message too large\"}\n\t\t}\n\t\tl := make([]byte, 2, lp+2)\n\t\tl[0], l[1] = packUint16(uint16(lp))\n\t\tp = append(l, p...)\n\t\tn, err := io.Copy(t, bytes.NewReader(p))\n\t\treturn int(n), err\n\t}\n\tn, err = co.Conn.(*net.UDPConn).Write(p)\n\treturn n, err\n}\n\n\/\/ Dial connects to the address on the named network.\nfunc Dial(network, address string) (conn *Conn, err error) {\n\tconn = new(Conn)\n\tconn.Conn, err = net.Dial(network, address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn conn, nil\n}\n\n\/\/ Dialtimeout acts like Dial but takes a timeout.\nfunc DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {\n\tconn = new(Conn)\n\tconn.Conn, err = net.DialTimeout(network, address, timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn conn, nil\n}\n\n\/\/ Close implements the net.Conn Close method.\nfunc (co *Conn) Close() error { return co.Conn.Close() }\n\n\/\/ LocalAddr implements the net.Conn LocalAddr method.\nfunc (co *Conn) LocalAddr() net.Addr { return co.Conn.LocalAddr() }\n\n\/\/ RemoteAddr implements the net.Conn RemoteAddr method.\nfunc (co *Conn) RemoteAddr() net.Addr { return co.Conn.RemoteAddr() }\n\n\/\/ SetDeadline implements the net.Conn SetDeadline method.\nfunc (co *Conn) SetDeadline(t time.Time) error { return co.Conn.SetDeadline(t) }\n\n\/\/ SetReadDeadline implements the net.Conn SetReadDeadline method.\nfunc (co *Conn) SetReadDeadline(t time.Time) error { return co.Conn.SetReadDeadline(t) }\n\n\/\/ SetWriteDeadline implements the net.Conn SetWriteDeadline method.\nfunc (co *Conn) SetWriteDeadline(t time.Time) error { return co.Conn.SetWriteDeadline(t) }\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage websocket\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ ErrBadHandshake is returned when the server response to opening handshake is\n\/\/ invalid.\nvar ErrBadHandshake = errors.New(\"websocket: bad handshake\")\n\nvar errInvalidCompression = errors.New(\"websocket: invalid compression negotiation\")\n\n\/\/ NewClient creates a new client connection using the given net connection.\n\/\/ The URL u specifies the host and request URI. Use requestHeader to specify\n\/\/ the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies\n\/\/ (Cookie). Use the response.Header to get the selected subprotocol\n\/\/ (Sec-WebSocket-Protocol) and cookies (Set-Cookie).\n\/\/\n\/\/ If the WebSocket handshake fails, ErrBadHandshake is returned along with a\n\/\/ non-nil *http.Response so that callers can handle redirects, authentication,\n\/\/ etc.\n\/\/\n\/\/ Deprecated: Use Dialer instead.\nfunc NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {\n\td := Dialer{\n\t\tReadBufferSize: readBufSize,\n\t\tWriteBufferSize: writeBufSize,\n\t\tNetDial: func(net, addr string) (net.Conn, error) {\n\t\t\treturn netConn, nil\n\t\t},\n\t}\n\treturn d.Dial(u.String(), requestHeader)\n}\n\n\/\/ A Dialer contains options for connecting to WebSocket server.\ntype Dialer struct {\n\t\/\/ NetDial specifies the dial function for creating TCP connections. If\n\t\/\/ NetDial is nil, net.Dial is used.\n\tNetDial func(network, addr string) (net.Conn, error)\n\n\t\/\/ Proxy specifies a function to return a proxy for a given\n\t\/\/ Request. If the function returns a non-nil error, the\n\t\/\/ request is aborted with the provided error.\n\t\/\/ If Proxy is nil or returns a nil *URL, no proxy is used.\n\tProxy func(*http.Request) (*url.URL, error)\n\n\t\/\/ TLSClientConfig specifies the TLS configuration to use with tls.Client.\n\t\/\/ If nil, the default configuration is used.\n\tTLSClientConfig *tls.Config\n\n\t\/\/ HandshakeTimeout specifies the duration for the handshake to complete.\n\tHandshakeTimeout time.Duration\n\n\t\/\/ ReadBufferSize and WriteBufferSize specify I\/O buffer sizes. If a buffer\n\t\/\/ size is zero, then a useful default size is used. The I\/O buffer sizes\n\t\/\/ do not limit the size of the messages that can be sent or received.\n\tReadBufferSize, WriteBufferSize int\n\n\t\/\/ Subprotocols specifies the client's requested subprotocols.\n\tSubprotocols []string\n\n\t\/\/ EnableCompression specifies if the client should attempt to negotiate\n\t\/\/ per message compression (RFC 7692). Setting this value to true does not\n\t\/\/ guarantee that compression will be supported. Currently only \"no context\n\t\/\/ takeover\" modes are supported.\n\tEnableCompression bool\n\n\t\/\/ Jar specifies the cookie jar.\n\t\/\/ If Jar is nil, cookies are not sent in requests and ignored\n\t\/\/ in responses.\n\tJar http.CookieJar\n}\n\nvar errMalformedURL = errors.New(\"malformed ws or wss URL\")\n\nfunc hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {\n\thostPort = u.Host\n\thostNoPort = u.Host\n\tif i := strings.LastIndex(u.Host, \":\"); i > strings.LastIndex(u.Host, \"]\") {\n\t\thostNoPort = hostNoPort[:i]\n\t} else {\n\t\tswitch u.Scheme {\n\t\tcase \"wss\":\n\t\t\thostPort += \":443\"\n\t\tcase \"https\":\n\t\t\thostPort += \":443\"\n\t\tdefault:\n\t\t\thostPort += \":80\"\n\t\t}\n\t}\n\treturn hostPort, hostNoPort\n}\n\n\/\/ DefaultDialer is a dialer with all fields set to the default values.\nvar DefaultDialer = &Dialer{\n\tProxy: http.ProxyFromEnvironment,\n}\n\n\/\/ Dial creates a new client connection. Use requestHeader to specify the\n\/\/ origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).\n\/\/ Use the response.Header to get the selected subprotocol\n\/\/ (Sec-WebSocket-Protocol) and cookies (Set-Cookie).\n\/\/\n\/\/ If the WebSocket handshake fails, ErrBadHandshake is returned along with a\n\/\/ non-nil *http.Response so that callers can handle redirects, authentication,\n\/\/ etcetera. The response body may not contain the entire response and does not\n\/\/ need to be closed by the application.\nfunc (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {\n\n\tif d == nil {\n\t\td = &Dialer{\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t}\n\t}\n\n\tchallengeKey, err := generateChallengeKey()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tswitch u.Scheme {\n\tcase \"ws\":\n\t\tu.Scheme = \"http\"\n\tcase \"wss\":\n\t\tu.Scheme = \"https\"\n\tdefault:\n\t\treturn nil, nil, errMalformedURL\n\t}\n\n\tif u.User != nil {\n\t\t\/\/ User name and password are not allowed in websocket URIs.\n\t\treturn nil, nil, errMalformedURL\n\t}\n\n\treq := &http.Request{\n\t\tMethod: \"GET\",\n\t\tURL: u,\n\t\tProto: \"HTTP\/1.1\",\n\t\tProtoMajor: 1,\n\t\tProtoMinor: 1,\n\t\tHeader: make(http.Header),\n\t\tHost: u.Host,\n\t}\n\n\t\/\/ Set the cookies present in the cookie jar of the dialer\n\tif d.Jar != nil {\n\t\tfor _, cookie := range d.Jar.Cookies(u) {\n\t\t\treq.AddCookie(cookie)\n\t\t}\n\t}\n\n\t\/\/ Set the request headers using the capitalization for names and values in\n\t\/\/ RFC examples. Although the capitalization shouldn't matter, there are\n\t\/\/ servers that depend on it. The Header.Set method is not used because the\n\t\/\/ method canonicalizes the header names.\n\treq.Header[\"Upgrade\"] = []string{\"websocket\"}\n\treq.Header[\"Connection\"] = []string{\"Upgrade\"}\n\treq.Header[\"Sec-WebSocket-Key\"] = []string{challengeKey}\n\treq.Header[\"Sec-WebSocket-Version\"] = []string{\"13\"}\n\tif len(d.Subprotocols) > 0 {\n\t\treq.Header[\"Sec-WebSocket-Protocol\"] = []string{strings.Join(d.Subprotocols, \", \")}\n\t}\n\tfor k, vs := range requestHeader {\n\t\tswitch {\n\t\tcase k == \"Host\":\n\t\t\tif len(vs) > 0 {\n\t\t\t\treq.Host = vs[0]\n\t\t\t}\n\t\tcase k == \"Upgrade\" ||\n\t\t\tk == \"Connection\" ||\n\t\t\tk == \"Sec-Websocket-Key\" ||\n\t\t\tk == \"Sec-Websocket-Version\" ||\n\t\t\tk == \"Sec-Websocket-Extensions\" ||\n\t\t\t(k == \"Sec-Websocket-Protocol\" && len(d.Subprotocols) > 0):\n\t\t\treturn nil, nil, errors.New(\"websocket: duplicate header not allowed: \" + k)\n\t\tcase k == \"Sec-Websocket-Protocol\":\n\t\t\treq.Header[\"Sec-WebSocket-Protocol\"] = vs\n\t\tdefault:\n\t\t\treq.Header[k] = vs\n\t\t}\n\t}\n\n\tif d.EnableCompression {\n\t\treq.Header.Set(\"Sec-Websocket-Extensions\", \"permessage-deflate; server_no_context_takeover; client_no_context_takeover\")\n\t}\n\n\tvar deadline time.Time\n\tif d.HandshakeTimeout != 0 {\n\t\tdeadline = time.Now().Add(d.HandshakeTimeout)\n\t}\n\n\t\/\/ Get network dial function.\n\tnetDial := d.NetDial\n\tif netDial == nil {\n\t\tnetDialer := &net.Dialer{Deadline: deadline}\n\t\tnetDial = netDialer.Dial\n\t}\n\n\t\/\/ If needed, wrap the dial function to set the connection deadline.\n\tif !deadline.Equal(time.Time{}) {\n\t\tforwardDial := netDial\n\t\tnetDial = func(network, addr string) (net.Conn, error) {\n\t\t\tc, err := forwardDial(network, addr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\terr = c.SetDeadline(deadline)\n\t\t\tif err != nil {\n\t\t\t\tc.Close()\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, nil\n\t\t}\n\t}\n\n\t\/\/ If needed, wrap the dial function to connect through a proxy.\n\tif d.Proxy != nil {\n\t\tproxyURL, err := d.Proxy(req)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif proxyURL != nil {\n\t\t\tdialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\tnetDial = dialer.Dial\n\t\t}\n\t}\n\n\thostPort, hostNoPort := hostPortNoPort(u)\n\tnetConn, err := netDial(\"tcp\", hostPort)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdefer func() {\n\t\tif netConn != nil {\n\t\t\tnetConn.Close()\n\t\t}\n\t}()\n\n\tif u.Scheme == \"https\" {\n\t\tcfg := cloneTLSConfig(d.TLSClientConfig)\n\t\tif cfg.ServerName == \"\" {\n\t\t\tcfg.ServerName = hostNoPort\n\t\t}\n\t\ttlsConn := tls.Client(netConn, cfg)\n\t\tnetConn = tlsConn\n\t\tif err := tlsConn.Handshake(); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif !cfg.InsecureSkipVerify {\n\t\t\tif err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tconn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize)\n\n\tif err := req.Write(netConn); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := http.ReadResponse(conn.br, req)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif d.Jar != nil {\n\t\tif rc := resp.Cookies(); len(rc) > 0 {\n\t\t\td.Jar.SetCookies(u, rc)\n\t\t}\n\t}\n\n\tif resp.StatusCode != 101 ||\n\t\t!strings.EqualFold(resp.Header.Get(\"Upgrade\"), \"websocket\") ||\n\t\t!strings.EqualFold(resp.Header.Get(\"Connection\"), \"upgrade\") ||\n\t\tresp.Header.Get(\"Sec-Websocket-Accept\") != computeAcceptKey(challengeKey) {\n\t\t\/\/ Before closing the network connection on return from this\n\t\t\/\/ function, slurp up some of the response to aid application\n\t\t\/\/ debugging.\n\t\tbuf := make([]byte, 1024)\n\t\tn, _ := io.ReadFull(resp.Body, buf)\n\t\tresp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))\n\t\treturn nil, resp, ErrBadHandshake\n\t}\n\n\tfor _, ext := range parseExtensions(resp.Header) {\n\t\tif ext[\"\"] != \"permessage-deflate\" {\n\t\t\tcontinue\n\t\t}\n\t\t_, snct := ext[\"server_no_context_takeover\"]\n\t\t_, cnct := ext[\"client_no_context_takeover\"]\n\t\tif !snct || !cnct {\n\t\t\treturn nil, resp, errInvalidCompression\n\t\t}\n\t\tconn.newCompressionWriter = compressNoContextTakeover\n\t\tconn.newDecompressionReader = decompressNoContextTakeover\n\t\tbreak\n\t}\n\n\tresp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))\n\tconn.subprotocol = resp.Header.Get(\"Sec-Websocket-Protocol\")\n\n\tnetConn.SetDeadline(time.Time{})\n\tnetConn = nil \/\/ to avoid close in defer.\n\treturn conn, resp, nil\n}\n<commit_msg>Add a default handshake timeout of 5 seconds<commit_after>\/\/ Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage websocket\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ ErrBadHandshake is returned when the server response to opening handshake is\n\/\/ invalid.\nvar ErrBadHandshake = errors.New(\"websocket: bad handshake\")\n\nvar errInvalidCompression = errors.New(\"websocket: invalid compression negotiation\")\n\n\/\/ NewClient creates a new client connection using the given net connection.\n\/\/ The URL u specifies the host and request URI. Use requestHeader to specify\n\/\/ the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies\n\/\/ (Cookie). Use the response.Header to get the selected subprotocol\n\/\/ (Sec-WebSocket-Protocol) and cookies (Set-Cookie).\n\/\/\n\/\/ If the WebSocket handshake fails, ErrBadHandshake is returned along with a\n\/\/ non-nil *http.Response so that callers can handle redirects, authentication,\n\/\/ etc.\n\/\/\n\/\/ Deprecated: Use Dialer instead.\nfunc NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {\n\td := Dialer{\n\t\tReadBufferSize: readBufSize,\n\t\tWriteBufferSize: writeBufSize,\n\t\tNetDial: func(net, addr string) (net.Conn, error) {\n\t\t\treturn netConn, nil\n\t\t},\n\t\tHandshakeTimeout: 5 * time.Second,\n\t}\n\treturn d.Dial(u.String(), requestHeader)\n}\n\n\/\/ A Dialer contains options for connecting to WebSocket server.\ntype Dialer struct {\n\t\/\/ NetDial specifies the dial function for creating TCP connections. If\n\t\/\/ NetDial is nil, net.Dial is used.\n\tNetDial func(network, addr string) (net.Conn, error)\n\n\t\/\/ Proxy specifies a function to return a proxy for a given\n\t\/\/ Request. If the function returns a non-nil error, the\n\t\/\/ request is aborted with the provided error.\n\t\/\/ If Proxy is nil or returns a nil *URL, no proxy is used.\n\tProxy func(*http.Request) (*url.URL, error)\n\n\t\/\/ TLSClientConfig specifies the TLS configuration to use with tls.Client.\n\t\/\/ If nil, the default configuration is used.\n\tTLSClientConfig *tls.Config\n\n\t\/\/ HandshakeTimeout specifies the duration for the handshake to complete.\n\tHandshakeTimeout time.Duration\n\n\t\/\/ ReadBufferSize and WriteBufferSize specify I\/O buffer sizes. If a buffer\n\t\/\/ size is zero, then a useful default size is used. The I\/O buffer sizes\n\t\/\/ do not limit the size of the messages that can be sent or received.\n\tReadBufferSize, WriteBufferSize int\n\n\t\/\/ Subprotocols specifies the client's requested subprotocols.\n\tSubprotocols []string\n\n\t\/\/ EnableCompression specifies if the client should attempt to negotiate\n\t\/\/ per message compression (RFC 7692). Setting this value to true does not\n\t\/\/ guarantee that compression will be supported. Currently only \"no context\n\t\/\/ takeover\" modes are supported.\n\tEnableCompression bool\n\n\t\/\/ Jar specifies the cookie jar.\n\t\/\/ If Jar is nil, cookies are not sent in requests and ignored\n\t\/\/ in responses.\n\tJar http.CookieJar\n}\n\nvar errMalformedURL = errors.New(\"malformed ws or wss URL\")\n\nfunc hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {\n\thostPort = u.Host\n\thostNoPort = u.Host\n\tif i := strings.LastIndex(u.Host, \":\"); i > strings.LastIndex(u.Host, \"]\") {\n\t\thostNoPort = hostNoPort[:i]\n\t} else {\n\t\tswitch u.Scheme {\n\t\tcase \"wss\":\n\t\t\thostPort += \":443\"\n\t\tcase \"https\":\n\t\t\thostPort += \":443\"\n\t\tdefault:\n\t\t\thostPort += \":80\"\n\t\t}\n\t}\n\treturn hostPort, hostNoPort\n}\n\n\/\/ DefaultDialer is a dialer with all fields set to the default values.\nvar DefaultDialer = &Dialer{\n\tProxy: http.ProxyFromEnvironment,\n\tHandshakeTimeout: 5 * time.Second,\n}\n\n\/\/ Dial creates a new client connection. Use requestHeader to specify the\n\/\/ origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).\n\/\/ Use the response.Header to get the selected subprotocol\n\/\/ (Sec-WebSocket-Protocol) and cookies (Set-Cookie).\n\/\/\n\/\/ If the WebSocket handshake fails, ErrBadHandshake is returned along with a\n\/\/ non-nil *http.Response so that callers can handle redirects, authentication,\n\/\/ etcetera. The response body may not contain the entire response and does not\n\/\/ need to be closed by the application.\nfunc (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {\n\n\tif d == nil {\n\t\td = &Dialer{\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tHandshakeTimeout: 5 * time.Second,\n\t\t}\n\t}\n\n\tchallengeKey, err := generateChallengeKey()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tswitch u.Scheme {\n\tcase \"ws\":\n\t\tu.Scheme = \"http\"\n\tcase \"wss\":\n\t\tu.Scheme = \"https\"\n\tdefault:\n\t\treturn nil, nil, errMalformedURL\n\t}\n\n\tif u.User != nil {\n\t\t\/\/ User name and password are not allowed in websocket URIs.\n\t\treturn nil, nil, errMalformedURL\n\t}\n\n\treq := &http.Request{\n\t\tMethod: \"GET\",\n\t\tURL: u,\n\t\tProto: \"HTTP\/1.1\",\n\t\tProtoMajor: 1,\n\t\tProtoMinor: 1,\n\t\tHeader: make(http.Header),\n\t\tHost: u.Host,\n\t}\n\n\t\/\/ Set the cookies present in the cookie jar of the dialer\n\tif d.Jar != nil {\n\t\tfor _, cookie := range d.Jar.Cookies(u) {\n\t\t\treq.AddCookie(cookie)\n\t\t}\n\t}\n\n\t\/\/ Set the request headers using the capitalization for names and values in\n\t\/\/ RFC examples. Although the capitalization shouldn't matter, there are\n\t\/\/ servers that depend on it. The Header.Set method is not used because the\n\t\/\/ method canonicalizes the header names.\n\treq.Header[\"Upgrade\"] = []string{\"websocket\"}\n\treq.Header[\"Connection\"] = []string{\"Upgrade\"}\n\treq.Header[\"Sec-WebSocket-Key\"] = []string{challengeKey}\n\treq.Header[\"Sec-WebSocket-Version\"] = []string{\"13\"}\n\tif len(d.Subprotocols) > 0 {\n\t\treq.Header[\"Sec-WebSocket-Protocol\"] = []string{strings.Join(d.Subprotocols, \", \")}\n\t}\n\tfor k, vs := range requestHeader {\n\t\tswitch {\n\t\tcase k == \"Host\":\n\t\t\tif len(vs) > 0 {\n\t\t\t\treq.Host = vs[0]\n\t\t\t}\n\t\tcase k == \"Upgrade\" ||\n\t\t\tk == \"Connection\" ||\n\t\t\tk == \"Sec-Websocket-Key\" ||\n\t\t\tk == \"Sec-Websocket-Version\" ||\n\t\t\tk == \"Sec-Websocket-Extensions\" ||\n\t\t\t(k == \"Sec-Websocket-Protocol\" && len(d.Subprotocols) > 0):\n\t\t\treturn nil, nil, errors.New(\"websocket: duplicate header not allowed: \" + k)\n\t\tcase k == \"Sec-Websocket-Protocol\":\n\t\t\treq.Header[\"Sec-WebSocket-Protocol\"] = vs\n\t\tdefault:\n\t\t\treq.Header[k] = vs\n\t\t}\n\t}\n\n\tif d.EnableCompression {\n\t\treq.Header.Set(\"Sec-Websocket-Extensions\", \"permessage-deflate; server_no_context_takeover; client_no_context_takeover\")\n\t}\n\n\tvar deadline time.Time\n\tif d.HandshakeTimeout != 0 {\n\t\tdeadline = time.Now().Add(d.HandshakeTimeout)\n\t}\n\n\t\/\/ Get network dial function.\n\tnetDial := d.NetDial\n\tif netDial == nil {\n\t\tnetDialer := &net.Dialer{Deadline: deadline}\n\t\tnetDial = netDialer.Dial\n\t}\n\n\t\/\/ If needed, wrap the dial function to set the connection deadline.\n\tif !deadline.Equal(time.Time{}) {\n\t\tforwardDial := netDial\n\t\tnetDial = func(network, addr string) (net.Conn, error) {\n\t\t\tc, err := forwardDial(network, addr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\terr = c.SetDeadline(deadline)\n\t\t\tif err != nil {\n\t\t\t\tc.Close()\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, nil\n\t\t}\n\t}\n\n\t\/\/ If needed, wrap the dial function to connect through a proxy.\n\tif d.Proxy != nil {\n\t\tproxyURL, err := d.Proxy(req)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif proxyURL != nil {\n\t\t\tdialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\tnetDial = dialer.Dial\n\t\t}\n\t}\n\n\thostPort, hostNoPort := hostPortNoPort(u)\n\tnetConn, err := netDial(\"tcp\", hostPort)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdefer func() {\n\t\tif netConn != nil {\n\t\t\tnetConn.Close()\n\t\t}\n\t}()\n\n\tif u.Scheme == \"https\" {\n\t\tcfg := cloneTLSConfig(d.TLSClientConfig)\n\t\tif cfg.ServerName == \"\" {\n\t\t\tcfg.ServerName = hostNoPort\n\t\t}\n\t\ttlsConn := tls.Client(netConn, cfg)\n\t\tnetConn = tlsConn\n\t\tif err := tlsConn.Handshake(); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif !cfg.InsecureSkipVerify {\n\t\t\tif err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tconn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize)\n\n\tif err := req.Write(netConn); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := http.ReadResponse(conn.br, req)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif d.Jar != nil {\n\t\tif rc := resp.Cookies(); len(rc) > 0 {\n\t\t\td.Jar.SetCookies(u, rc)\n\t\t}\n\t}\n\n\tif resp.StatusCode != 101 ||\n\t\t!strings.EqualFold(resp.Header.Get(\"Upgrade\"), \"websocket\") ||\n\t\t!strings.EqualFold(resp.Header.Get(\"Connection\"), \"upgrade\") ||\n\t\tresp.Header.Get(\"Sec-Websocket-Accept\") != computeAcceptKey(challengeKey) {\n\t\t\/\/ Before closing the network connection on return from this\n\t\t\/\/ function, slurp up some of the response to aid application\n\t\t\/\/ debugging.\n\t\tbuf := make([]byte, 1024)\n\t\tn, _ := io.ReadFull(resp.Body, buf)\n\t\tresp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))\n\t\treturn nil, resp, ErrBadHandshake\n\t}\n\n\tfor _, ext := range parseExtensions(resp.Header) {\n\t\tif ext[\"\"] != \"permessage-deflate\" {\n\t\t\tcontinue\n\t\t}\n\t\t_, snct := ext[\"server_no_context_takeover\"]\n\t\t_, cnct := ext[\"client_no_context_takeover\"]\n\t\tif !snct || !cnct {\n\t\t\treturn nil, resp, errInvalidCompression\n\t\t}\n\t\tconn.newCompressionWriter = compressNoContextTakeover\n\t\tconn.newDecompressionReader = decompressNoContextTakeover\n\t\tbreak\n\t}\n\n\tresp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))\n\tconn.subprotocol = resp.Header.Get(\"Sec-Websocket-Protocol\")\n\n\tnetConn.SetDeadline(time.Time{})\n\tnetConn = nil \/\/ to avoid close in defer.\n\treturn conn, resp, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package TeleGogo\n\nimport (\n\t\"encoding\/json\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/wallnutkraken\/TeleGogo\/Requests\"\n)\n\ntype client struct {\n\ttoken string\n\thttpClient *http.Client\n}\n\nfunc (c *client) getToken() string {\n\treturn c.token\n}\n\n\/\/ GetUpdates receives incoming updates using long polling.\nfunc (c *client) GetUpdates(options GetUpdatesOptions) ([]Update, error) {\n\tget, err := Requests.CreateBotGetWithArgs(c.token, \"getUpdates\", options.toArgs()...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thttpResponse, err := c.httpClient.Do(get)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponseObj := updateResponse{}\n\tdecoder := json.NewDecoder(httpResponse.Body)\n\n\terr = decoder.Decode(&responseObj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thttpResponse.Body.Close()\n\n\treturn responseObj.Result, err\n}\n\n\/\/ SetWebhook NOT TESTED. Use this method to specify a url and receive incoming updates via an outgoing\n\/\/ webhook. Whenever there is an update for the bot, we will send an HTTPS POST request to the specified url,\n\/\/ containing a JSON-serialized Update. In case of an unsuccessful request, we will give up after a\n\/\/ reasonable amount of attempts.\nfunc (c *client) SetWebhook(args SetWebhookArgs) error {\n\tresponse, err := c.sendFile(args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif response.StatusCode != http.StatusOK {\n\t\treturn responseToError(response)\n\t}\n\tresponse.Body.Close()\n\n\treturn nil\n}\n\n\/\/ DownloadFile downloads the specified file\nfunc (c *client) DownloadFile(file File, path string) error {\n\tget, err := Requests.CreateFileGet(c.getToken(), file.FilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := c.httpClient.Do(get)\n\tif err != nil {\n\t\treturn err\n\t}\n\tphysicalFile, err := os.Create(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer physicalFile.Close()\n\tdefer resp.Body.Close()\n\n\t_, err = io.Copy(physicalFile, resp.Body)\n\treturn err\n}\n\n\/\/ WhoAmI A simple method for testing your bot's auth token. Requires no parameters.\n\/\/ Returns basic information about the bot in form of a User object.\nfunc (c *client) WhoAmI() (User, error) {\n\trequest, err := Requests.CreateBotGet(c.token, \"getMe\")\n\n\tif err != nil {\n\t\treturn User{}, err\n\t}\n\n\tresponse, err := c.httpClient.Do(request)\n\tif err != nil {\n\t\treturn User{}, err\n\t}\n\n\tif response.StatusCode != http.StatusOK {\n\t\treturn User{}, responseToError(response)\n\t}\n\ttgResp := userResponse{}\n\tdecoder := json.NewDecoder(response.Body)\n\n\terr = decoder.Decode(&tgResp)\n\tresponse.Body.Close()\n\n\treturn tgResp.Result, err\n}\n\n\/\/ SendMessage sends a message with the specified arguments. On success returns the sent Message.\nfunc (c *client) SendMessage(args SendMessageArgs) (Message, error) {\n\tresponse, err := c.sendJSON(args)\n\tif err != nil {\n\t\treturn Message{}, err\n\t}\n\tif response.StatusCode != http.StatusOK {\n\t\treturn Message{}, responseToError(response)\n\t}\n\n\tdecoder := json.NewDecoder(response.Body)\n\tsentMsgResponse := messageReply{}\n\n\terr = decoder.Decode(&sentMsgResponse)\n\tresponse.Body.Close()\n\n\treturn sentMsgResponse.Result, err\n}\n\nfunc (c *client) ForwardMessage(args ForwardMessageArgs) (Message, error) {\n\tresponse, err := c.sendJSON(args)\n\tif err != nil {\n\t\treturn Message{}, err\n\t}\n\tdefer response.Body.Close()\n\tif response.StatusCode != http.StatusOK {\n\t\treturn Message{}, responseToError(response)\n\t}\n\n\tdecoder := json.NewDecoder(response.Body)\n\tsentMsgResponse := messageReply{}\n\n\terr = decoder.Decode(&sentMsgResponse)\n\n\treturn sentMsgResponse.Result, err\n}\n\nfunc (c *client) sendNewPhoto(args SendPhotoArgs) (Message, error) {\n\tresponse, err := c.sendFile(args)\n\tif err != nil {\n\t\treturn Message{}, err\n\t}\n\n\tif response.StatusCode != http.StatusOK {\n\t\treturn Message{}, responseToError(response)\n\t}\n\t\/* Read reply *\/\n\tmsgReply := messageReply{}\n\tdecoder := json.NewDecoder(response.Body)\n\tif err = decoder.Decode(&msgReply); err != nil {\n\t\treturn Message{}, err\n\t}\n\n\tresponse.Body.Close()\n\treturn msgReply.Result, err\n}\n\nfunc (c *client) sendExistingPhoto(args SendPhotoArgs) (Message, error) {\n\tresponse, err := c.sendJSON(args)\n\tif err != nil {\n\t\treturn Message{}, err\n\t}\n\tif response.StatusCode != http.StatusOK {\n\t\treturn Message{}, responseToError(response)\n\t}\n\tmsg := messageReply{}\n\tdecoder := json.NewDecoder(response.Body)\n\tif err = decoder.Decode(&msg); err != nil {\n\t\treturn Message{}, err\n\t}\n\tresponse.Body.Close()\n\treturn msg.Result, err\n}\n\nfunc (c *client) SendPhoto(args SendPhotoArgs) (Message, error) {\n\t\/* Decide whether this is a newly uploaded file or an old one. *\/\n\tif args.FileID == \"\" {\n\t\treturn c.sendNewPhoto(args)\n\t}\n\treturn c.sendExistingPhoto(args)\n}\n\nfunc (c *client) SendAudio(args SendAudioArgs) (Message, error) {\n\tresponse, err := c.sendFile(args)\n\tif err != nil {\n\t\treturn Message{}, err\n\t}\n\n\tif response.StatusCode != http.StatusOK {\n\t\treturn Message{}, responseToError(response)\n\t}\n\t\/* Read reply *\/\n\tmsgReply := messageReply{}\n\tdecoder := json.NewDecoder(response.Body)\n\tif err = decoder.Decode(&msgReply); err != nil {\n\t\treturn Message{}, err\n\t}\n\tresponse.Body.Close()\n\n\treturn msgReply.Result, err\n}\n\nfunc (c *client) resendPhoto(args SendPhotoArgs) (Message, error) {\n\tresponse, err := c.sendJSON(args)\n\tif err != nil {\n\t\treturn Message{}, err\n\t}\n\tif response.StatusCode != http.StatusOK {\n\t\treturn Message{}, responseToError(response)\n\t}\n\tdecoder := json.NewDecoder(response.Body)\n\tmsgResponse := messageReply{}\n\tif err = decoder.Decode(&msgResponse); err != nil {\n\t\treturn Message{}, err\n\t}\n\tresponse.Body.Close()\n\n\treturn msgResponse.Result, nil\n}\n\n\/\/ NewClient Creates a new Client\nfunc NewClient(token string) (Client, error) {\n\tc := new(client)\n\tc.token = token\n\tc.httpClient = &http.Client{}\n\treturn c, nil\n}\n\n\/\/ Client represents a bot in Telegram.\ntype Client interface {\n\tgetToken() string\n\tDownloadFile(File, string) error\n\tWhoAmI() (User, error)\n\tGetUpdates(GetUpdatesOptions) ([]Update, error)\n\tSendMessage(SendMessageArgs) (Message, error)\n\tForwardMessage(ForwardMessageArgs) (Message, error)\n\tSetWebhook(SetWebhookArgs) error\n\tSendPhoto(SendPhotoArgs) (Message, error)\n\tSendAudio(SendAudioArgs) (Message, error)\n}\n<commit_msg>added audio resending support<commit_after>package TeleGogo\n\nimport (\n\t\"encoding\/json\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/wallnutkraken\/TeleGogo\/Requests\"\n)\n\ntype client struct {\n\ttoken string\n\thttpClient *http.Client\n}\n\nfunc (c *client) getToken() string {\n\treturn c.token\n}\n\n\/\/ GetUpdates receives incoming updates using long polling.\nfunc (c *client) GetUpdates(options GetUpdatesOptions) ([]Update, error) {\n\tget, err := Requests.CreateBotGetWithArgs(c.token, \"getUpdates\", options.toArgs()...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thttpResponse, err := c.httpClient.Do(get)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponseObj := updateResponse{}\n\tdecoder := json.NewDecoder(httpResponse.Body)\n\n\terr = decoder.Decode(&responseObj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thttpResponse.Body.Close()\n\n\treturn responseObj.Result, err\n}\n\n\/\/ SetWebhook NOT TESTED. Use this method to specify a url and receive incoming updates via an outgoing\n\/\/ webhook. Whenever there is an update for the bot, we will send an HTTPS POST request to the specified url,\n\/\/ containing a JSON-serialized Update. In case of an unsuccessful request, we will give up after a\n\/\/ reasonable amount of attempts.\nfunc (c *client) SetWebhook(args SetWebhookArgs) error {\n\tresponse, err := c.sendFile(args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif response.StatusCode != http.StatusOK {\n\t\treturn responseToError(response)\n\t}\n\tresponse.Body.Close()\n\n\treturn nil\n}\n\n\/\/ DownloadFile downloads the specified file\nfunc (c *client) DownloadFile(file File, path string) error {\n\tget, err := Requests.CreateFileGet(c.getToken(), file.FilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := c.httpClient.Do(get)\n\tif err != nil {\n\t\treturn err\n\t}\n\tphysicalFile, err := os.Create(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer physicalFile.Close()\n\tdefer resp.Body.Close()\n\n\t_, err = io.Copy(physicalFile, resp.Body)\n\treturn err\n}\n\n\/\/ WhoAmI A simple method for testing your bot's auth token. Requires no parameters.\n\/\/ Returns basic information about the bot in form of a User object.\nfunc (c *client) WhoAmI() (User, error) {\n\trequest, err := Requests.CreateBotGet(c.token, \"getMe\")\n\n\tif err != nil {\n\t\treturn User{}, err\n\t}\n\n\tresponse, err := c.httpClient.Do(request)\n\tif err != nil {\n\t\treturn User{}, err\n\t}\n\n\tif response.StatusCode != http.StatusOK {\n\t\treturn User{}, responseToError(response)\n\t}\n\ttgResp := userResponse{}\n\tdecoder := json.NewDecoder(response.Body)\n\n\terr = decoder.Decode(&tgResp)\n\tresponse.Body.Close()\n\n\treturn tgResp.Result, err\n}\n\n\/\/ SendMessage sends a message with the specified arguments. On success returns the sent Message.\nfunc (c *client) SendMessage(args SendMessageArgs) (Message, error) {\n\tresponse, err := c.sendJSON(args)\n\tif err != nil {\n\t\treturn Message{}, err\n\t}\n\tif response.StatusCode != http.StatusOK {\n\t\treturn Message{}, responseToError(response)\n\t}\n\n\tdecoder := json.NewDecoder(response.Body)\n\tsentMsgResponse := messageReply{}\n\n\terr = decoder.Decode(&sentMsgResponse)\n\tresponse.Body.Close()\n\n\treturn sentMsgResponse.Result, err\n}\n\nfunc (c *client) ForwardMessage(args ForwardMessageArgs) (Message, error) {\n\tresponse, err := c.sendJSON(args)\n\tif err != nil {\n\t\treturn Message{}, err\n\t}\n\tdefer response.Body.Close()\n\tif response.StatusCode != http.StatusOK {\n\t\treturn Message{}, responseToError(response)\n\t}\n\n\tdecoder := json.NewDecoder(response.Body)\n\tsentMsgResponse := messageReply{}\n\n\terr = decoder.Decode(&sentMsgResponse)\n\n\treturn sentMsgResponse.Result, err\n}\n\nfunc (c *client) sendNewPhoto(args SendPhotoArgs) (Message, error) {\n\tresponse, err := c.sendFile(args)\n\tif err != nil {\n\t\treturn Message{}, err\n\t}\n\n\tif response.StatusCode != http.StatusOK {\n\t\treturn Message{}, responseToError(response)\n\t}\n\t\/* Read reply *\/\n\tmsgReply := messageReply{}\n\tdecoder := json.NewDecoder(response.Body)\n\tif err = decoder.Decode(&msgReply); err != nil {\n\t\treturn Message{}, err\n\t}\n\n\tresponse.Body.Close()\n\treturn msgReply.Result, err\n}\n\nfunc (c *client) sendExistingPhoto(args SendPhotoArgs) (Message, error) {\n\tresponse, err := c.sendJSON(args)\n\tif err != nil {\n\t\treturn Message{}, err\n\t}\n\tif response.StatusCode != http.StatusOK {\n\t\treturn Message{}, responseToError(response)\n\t}\n\tmsg := messageReply{}\n\tdecoder := json.NewDecoder(response.Body)\n\tif err = decoder.Decode(&msg); err != nil {\n\t\treturn Message{}, err\n\t}\n\tresponse.Body.Close()\n\treturn msg.Result, err\n}\n\n\/\/ SendPhoto Use this method to send photos. On success, the sent Message is returned.\nfunc (c *client) SendPhoto(args SendPhotoArgs) (Message, error) {\n\t\/* Decide whether this is a newly uploaded file or an old one. *\/\n\tif args.FileID == \"\" {\n\t\treturn c.sendNewPhoto(args)\n\t}\n\treturn c.sendExistingPhoto(args)\n}\n\nfunc (c *client) sendNewAudio(args SendAudioArgs) (Message, error) {\n\tresponse, err := c.sendFile(args)\n\tif err != nil {\n\t\treturn Message{}, err\n\t}\n\n\tif response.StatusCode != http.StatusOK {\n\t\treturn Message{}, responseToError(response)\n\t}\n\t\/* Read reply *\/\n\tmsgReply := messageReply{}\n\tdecoder := json.NewDecoder(response.Body)\n\tif err = decoder.Decode(&msgReply); err != nil {\n\t\treturn Message{}, err\n\t}\n\tresponse.Body.Close()\n\n\treturn msgReply.Result, err\n}\n\nfunc (c *client) sendExistingAudio(args SendAudioArgs) (Message, error) {\n\tresponse, err := c.sendJSON(args)\n\tif err != nil {\n\t\treturn Message{}, err\n\t}\n\tif response.StatusCode != http.StatusOK {\n\t\treturn Message{}, responseToError(response)\n\t}\n\t\/* Parse reply *\/\n\treturn responseToMessage(response)\n}\n\nfunc (c *client) SendAudio(args SendAudioArgs) (Message, error) {\n\t\/* Decide if it's a new or existing file, based on user intent *\/\n\tif args.AudioFileID != \"\" {\n\t\treturn c.sendNewAudio(args)\n\t}\n\treturn c.sendExistingAudio(args)\n}\n\nfunc (c *client) resendPhoto(args SendPhotoArgs) (Message, error) {\n\tresponse, err := c.sendJSON(args)\n\tif err != nil {\n\t\treturn Message{}, err\n\t}\n\tif response.StatusCode != http.StatusOK {\n\t\treturn Message{}, responseToError(response)\n\t}\n\tdecoder := json.NewDecoder(response.Body)\n\tmsgResponse := messageReply{}\n\tif err = decoder.Decode(&msgResponse); err != nil {\n\t\treturn Message{}, err\n\t}\n\tresponse.Body.Close()\n\n\treturn msgResponse.Result, nil\n}\n\nfunc responseToMessage(response *http.Response) (Message, error) {\n\tmsg := messageReply{}\n\tdecoder := json.NewDecoder(response.Body)\n\terr := decoder.Decode(msg)\n\tdefer response.Body.Close()\n\treturn msg.Result, err\n}\n\n\/\/ NewClient Creates a new Client\nfunc NewClient(token string) (Client, error) {\n\tc := new(client)\n\tc.token = token\n\tc.httpClient = &http.Client{}\n\treturn c, nil\n}\n\n\/\/ Client represents a bot in Telegram.\ntype Client interface {\n\tgetToken() string\n\tDownloadFile(File, string) error\n\tWhoAmI() (User, error)\n\tGetUpdates(GetUpdatesOptions) ([]Update, error)\n\tSendMessage(SendMessageArgs) (Message, error)\n\tForwardMessage(ForwardMessageArgs) (Message, error)\n\tSetWebhook(SetWebhookArgs) error\n\tSendPhoto(SendPhotoArgs) (Message, error)\n\tSendAudio(SendAudioArgs) (Message, error)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2009 Fazlul Shahriar <fshahriar@gmail.com>.\n\/\/ See LICENSE file for license details.\n\n\/\/ This package provides the client side interface to MPD (Music Player Daemon).\n\/\/ The protocol reference can be found at http:\/\/www.musicpd.org\/doc\/protocol\/index.html\npackage mpd\n\nimport (\n\t\"bufio\";\n\t\"fmt\";\n\t\"net\";\n\t\"os\";\n\t\"strconv\";\n\t\"strings\";\n)\n\ntype Client struct {\n\tconn\tnet.Conn;\n\trw\t*bufio.ReadWriter;\n}\n\ntype Attrs map[string]string\n\nfunc Connect(network, addr string) (c *Client, err os.Error) {\n\tconn, err := net.Dial(network, \"\", addr);\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc = new(Client);\n\tc.rw = bufio.NewReadWriter(bufio.NewReader(conn),\n\t\tbufio.NewWriter(conn));\n\tline, err := c.readLine();\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif line[0:6] != \"OK MPD\" {\n\t\treturn nil, os.NewError(\"no greeting\")\n\t}\n\treturn;\n}\n\nfunc (c *Client) Close() (err os.Error) {\n\tif c.conn != nil {\n\t\tc.writeLine(\"close\");\n\t\terr = c.conn.Close();\n\t\tc.conn = nil;\n\t}\n\treturn;\n}\n\nfunc (c *Client) readLine() (line string, err os.Error) {\n\tline, err = c.rw.ReadString('\\n');\n\tif err != nil {\n\t\treturn\n\t}\n\tif line[len(line)-1] == '\\n' {\n\t\tline = line[0 : len(line)-1]\n\t}\n\tfmt.Println(\"-->\", line);\n\treturn;\n}\n\nfunc (c *Client) writeLine(line string) (err os.Error) {\n\tfmt.Println(\"<--\", line);\n\t_, err = c.rw.Write(strings.Bytes(line + \"\\n\"));\n\t\/\/ TODO: try again if # written != len(buf)\n\tc.rw.Flush();\n\treturn;\n}\n\nfunc (c *Client) readPlaylist() (pls []Attrs, err os.Error) {\n\tpls = make([]Attrs, 100);\n\n\tn := 0;\n\tfor {\n\t\tline, err := c.readLine();\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif line == \"OK\" {\n\t\t\tbreak\n\t\t}\n\t\tif strings.HasPrefix(line, \"file:\") {\t\/\/ new song entry begins\n\t\t\tn++;\n\t\t\tif n > len(pls) || n > cap(pls) {\n\t\t\t\tpls1 := make([]Attrs, 2*cap(pls));\n\t\t\t\tfor k, a := range pls {\n\t\t\t\t\tpls1[k] = a\n\t\t\t\t}\n\t\t\t\tpls = pls1;\n\t\t\t}\n\t\t\tpls[n-1] = make(Attrs);\n\t\t}\n\t\tif n == 0 {\n\t\t\treturn nil, os.NewError(\"unexpected: \" + line)\n\t\t}\n\t\tz := strings.Index(line, \": \");\n\t\tif z < 0 {\n\t\t\treturn nil, os.NewError(\"can't parse line: \" + line)\n\t\t}\n\t\tkey := line[0:z];\n\t\tpls[n-1][key] = line[z+2:];\n\t}\n\treturn pls[0:n], nil;\n}\n\nfunc (c *Client) getAttrs() (attrs Attrs, err os.Error) {\n\tattrs = make(Attrs);\n\tfor {\n\t\tline, err := c.readLine();\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif line == \"OK\" {\n\t\t\tbreak\n\t\t}\n\t\tz := strings.Index(line, \": \");\n\t\tif z < 0 {\n\t\t\treturn nil, os.NewError(\"can't parse line: \" + line)\n\t\t}\n\t\tkey := line[0:z];\n\t\tattrs[key] = line[z+2:];\n\t}\n\treturn;\n}\n\nfunc (c *Client) CurrentSong() (Attrs, os.Error) {\n\tc.writeLine(\"currentsong\");\n\treturn c.getAttrs();\n}\n\nfunc (c *Client) Status() (Attrs, os.Error) {\n\tc.writeLine(\"status\");\n\treturn c.getAttrs();\n}\n\nfunc (c *Client) readErr() (err os.Error) {\n\tline, err := c.readLine();\n\tswitch {\n\tcase err != nil:\n\t\treturn err\n\tcase line == \"OK\":\n\t\treturn nil\n\tcase strings.HasPrefix(line, \"ACK \"):\n\t\treturn os.NewError(line[4:])\n\t}\n\treturn os.NewError(\"unexpected response: \" + line);\n}\n\n\/\/\n\/\/ Playback control\n\/\/\n\n\/\/ Next plays next song in the playlist.\nfunc (c *Client) Next() os.Error {\n\tc.writeLine(\"next\");\n\treturn c.readErr();\n}\n\n\/\/ Pause pauses playback if pause is true; resumes playback otherwise.\nfunc (c *Client) Pause(pause bool) os.Error {\n\tif pause {\n\t\tc.writeLine(\"pause 1\")\n\t} else {\n\t\tc.writeLine(\"pause 0\")\n\t}\n\treturn c.readErr();\n}\n\n\/\/ Play starts playing the song at playlist position pos. If pos is negative,\n\/\/ start playing at the current position in the playlist.\nfunc (c *Client) Play(pos int) os.Error {\n\tif pos < 0 {\n\t\tc.writeLine(\"play\")\n\t} else {\n\t\tc.writeLine(fmt.Sprintf(\"play %d\", pos))\n\t}\n\treturn c.readErr();\n}\n\nfunc (c *Client) PlayId(id int) os.Error\t{ return c.Play(id) }\n\n\/\/ Previous plays previous song in the playlist.\nfunc (c *Client) Previous() os.Error {\n\tc.writeLine(\"next\");\n\treturn c.readErr();\n}\n\n\/\/ Seek seeks to the position time (in seconds) of the song at playlist position pos.\nfunc (c *Client) Seek(pos, time int) os.Error {\n\tc.writeLine(fmt.Sprintf(\"seek %d %d\", pos, time));\n\treturn c.readErr();\n}\n\nfunc (c *Client) SeekId(id, time int) os.Error {\n\treturn c.Seek(id, time)\n}\n\n\/\/ Stop stops playback.\nfunc (c *Client) Stop() os.Error {\n\tc.writeLine(\"stop\");\n\treturn c.readErr();\n}\n\n\/\/\n\/\/ Playlist related functions\n\/\/\n\nfunc (c *Client) PlaylistInfo(start, end int) (pls []Attrs, err os.Error) {\n\tif start < 0 && end >= 0 {\n\t\treturn nil, os.NewError(\"negative start index\")\n\t}\n\tif start >= 0 && end < 0 {\n\t\tc.writeLine(fmt.Sprintf(\"playlistinfo %d\", start));\n\t\treturn c.readPlaylist();\n\t}\n\tc.writeLine(\"playlistinfo\");\n\tpls, err = c.readPlaylist();\n\tif err != nil || start < 0 || end < 0 {\n\t\treturn\n\t}\n\treturn pls[start:end], nil;\n}\n\nfunc (c *Client) Delete(start, end int) os.Error {\n\tif start < 0 {\n\t\treturn os.NewError(\"negative start index\")\n\t}\n\tif end < 0 {\n\t\tc.writeLine(fmt.Sprintf(\"delete %d\", start))\n\t} else {\n\t\tc.writeLine(fmt.Sprintf(\"delete %d %d\", start, end))\n\t}\n\treturn c.readErr();\n}\n\nfunc (c *Client) DeleteId(songid int) os.Error {\n\tc.writeLine(fmt.Sprintf(\"delete %d\", songid));\n\treturn c.readErr();\n}\n\nfunc (c *Client) Add(uri string) os.Error {\n\tc.writeLine(fmt.Sprintf(\"%q\", uri));\n\treturn c.readErr();\n}\n\nfunc (c *Client) AddId(uri string, pos int) (id int, err os.Error) {\n\tif pos >= 0 {\n\t\tc.writeLine(fmt.Sprintf(\"%q %d\", uri, pos))\n\t} else {\n\t\tc.writeLine(fmt.Sprintf(\"%q\", uri))\n\t}\n\tattrs, err := c.getAttrs();\n\tif err != nil {\n\t\treturn\n\t}\n\ttok, ok := attrs[\"Id\"];\n\tif !ok {\n\t\treturn -1, os.NewError(\"addid did not return Id\")\n\t}\n\treturn strconv.Atoi(tok);\n}\n\nfunc (c *Client) Clear() os.Error {\n\tc.writeLine(\"clear\");\n\treturn c.readErr();\n}\n<commit_msg>add Chatty flag<commit_after>\/\/ Copyright © 2009 Fazlul Shahriar <fshahriar@gmail.com>.\n\/\/ See LICENSE file for license details.\n\n\/\/ This package provides the client side interface to MPD (Music Player Daemon).\n\/\/ The protocol reference can be found at http:\/\/www.musicpd.org\/doc\/protocol\/index.html\npackage mpd\n\nimport (\n\t\"bufio\";\n\t\"fmt\";\n\t\"net\";\n\t\"os\";\n\t\"strconv\";\n\t\"strings\";\n)\n\nvar Chatty bool\t\/\/ print all conversation with MPD (for debugging)\n\ntype Client struct {\n\tconn\tnet.Conn;\n\trw\t*bufio.ReadWriter;\n}\n\ntype Attrs map[string]string\n\nfunc Connect(network, addr string) (c *Client, err os.Error) {\n\tconn, err := net.Dial(network, \"\", addr);\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc = new(Client);\n\tc.rw = bufio.NewReadWriter(bufio.NewReader(conn),\n\t\tbufio.NewWriter(conn));\n\tline, err := c.readLine();\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif line[0:6] != \"OK MPD\" {\n\t\treturn nil, os.NewError(\"no greeting\")\n\t}\n\treturn;\n}\n\nfunc (c *Client) Close() (err os.Error) {\n\tif c.conn != nil {\n\t\tc.writeLine(\"close\");\n\t\terr = c.conn.Close();\n\t\tc.conn = nil;\n\t}\n\treturn;\n}\n\nfunc (c *Client) readLine() (line string, err os.Error) {\n\tline, err = c.rw.ReadString('\\n');\n\tif err != nil {\n\t\treturn\n\t}\n\tif line[len(line)-1] == '\\n' {\n\t\tline = line[0 : len(line)-1]\n\t}\n\tif Chatty {\n\t\tfmt.Println(\"-->\", line)\n\t}\n\treturn;\n}\n\nfunc (c *Client) writeLine(line string) (err os.Error) {\n\tif Chatty {\n\t\tfmt.Println(\"<--\", line)\n\t}\n\t_, err = c.rw.Write(strings.Bytes(line + \"\\n\"));\n\t\/\/ TODO: try again if # written != len(buf)\n\tc.rw.Flush();\n\treturn;\n}\n\nfunc (c *Client) readPlaylist() (pls []Attrs, err os.Error) {\n\tpls = make([]Attrs, 100);\n\n\tn := 0;\n\tfor {\n\t\tline, err := c.readLine();\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif line == \"OK\" {\n\t\t\tbreak\n\t\t}\n\t\tif strings.HasPrefix(line, \"file:\") {\t\/\/ new song entry begins\n\t\t\tn++;\n\t\t\tif n > len(pls) || n > cap(pls) {\n\t\t\t\tpls1 := make([]Attrs, 2*cap(pls));\n\t\t\t\tfor k, a := range pls {\n\t\t\t\t\tpls1[k] = a\n\t\t\t\t}\n\t\t\t\tpls = pls1;\n\t\t\t}\n\t\t\tpls[n-1] = make(Attrs);\n\t\t}\n\t\tif n == 0 {\n\t\t\treturn nil, os.NewError(\"unexpected: \" + line)\n\t\t}\n\t\tz := strings.Index(line, \": \");\n\t\tif z < 0 {\n\t\t\treturn nil, os.NewError(\"can't parse line: \" + line)\n\t\t}\n\t\tkey := line[0:z];\n\t\tpls[n-1][key] = line[z+2:];\n\t}\n\treturn pls[0:n], nil;\n}\n\nfunc (c *Client) getAttrs() (attrs Attrs, err os.Error) {\n\tattrs = make(Attrs);\n\tfor {\n\t\tline, err := c.readLine();\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif line == \"OK\" {\n\t\t\tbreak\n\t\t}\n\t\tz := strings.Index(line, \": \");\n\t\tif z < 0 {\n\t\t\treturn nil, os.NewError(\"can't parse line: \" + line)\n\t\t}\n\t\tkey := line[0:z];\n\t\tattrs[key] = line[z+2:];\n\t}\n\treturn;\n}\n\nfunc (c *Client) CurrentSong() (Attrs, os.Error) {\n\tc.writeLine(\"currentsong\");\n\treturn c.getAttrs();\n}\n\nfunc (c *Client) Status() (Attrs, os.Error) {\n\tc.writeLine(\"status\");\n\treturn c.getAttrs();\n}\n\nfunc (c *Client) readErr() (err os.Error) {\n\tline, err := c.readLine();\n\tswitch {\n\tcase err != nil:\n\t\treturn err\n\tcase line == \"OK\":\n\t\treturn nil\n\tcase strings.HasPrefix(line, \"ACK \"):\n\t\treturn os.NewError(line[4:])\n\t}\n\treturn os.NewError(\"unexpected response: \" + line);\n}\n\n\/\/\n\/\/ Playback control\n\/\/\n\n\/\/ Next plays next song in the playlist.\nfunc (c *Client) Next() os.Error {\n\tc.writeLine(\"next\");\n\treturn c.readErr();\n}\n\n\/\/ Pause pauses playback if pause is true; resumes playback otherwise.\nfunc (c *Client) Pause(pause bool) os.Error {\n\tif pause {\n\t\tc.writeLine(\"pause 1\")\n\t} else {\n\t\tc.writeLine(\"pause 0\")\n\t}\n\treturn c.readErr();\n}\n\n\/\/ Play starts playing the song at playlist position pos. If pos is negative,\n\/\/ start playing at the current position in the playlist.\nfunc (c *Client) Play(pos int) os.Error {\n\tif pos < 0 {\n\t\tc.writeLine(\"play\")\n\t} else {\n\t\tc.writeLine(fmt.Sprintf(\"play %d\", pos))\n\t}\n\treturn c.readErr();\n}\n\nfunc (c *Client) PlayId(id int) os.Error\t{ return c.Play(id) }\n\n\/\/ Previous plays previous song in the playlist.\nfunc (c *Client) Previous() os.Error {\n\tc.writeLine(\"next\");\n\treturn c.readErr();\n}\n\n\/\/ Seek seeks to the position time (in seconds) of the song at playlist position pos.\nfunc (c *Client) Seek(pos, time int) os.Error {\n\tc.writeLine(fmt.Sprintf(\"seek %d %d\", pos, time));\n\treturn c.readErr();\n}\n\nfunc (c *Client) SeekId(id, time int) os.Error {\n\treturn c.Seek(id, time)\n}\n\n\/\/ Stop stops playback.\nfunc (c *Client) Stop() os.Error {\n\tc.writeLine(\"stop\");\n\treturn c.readErr();\n}\n\n\/\/\n\/\/ Playlist related functions\n\/\/\n\nfunc (c *Client) PlaylistInfo(start, end int) (pls []Attrs, err os.Error) {\n\tif start < 0 && end >= 0 {\n\t\treturn nil, os.NewError(\"negative start index\")\n\t}\n\tif start >= 0 && end < 0 {\n\t\tc.writeLine(fmt.Sprintf(\"playlistinfo %d\", start));\n\t\treturn c.readPlaylist();\n\t}\n\tc.writeLine(\"playlistinfo\");\n\tpls, err = c.readPlaylist();\n\tif err != nil || start < 0 || end < 0 {\n\t\treturn\n\t}\n\treturn pls[start:end], nil;\n}\n\nfunc (c *Client) Delete(start, end int) os.Error {\n\tif start < 0 {\n\t\treturn os.NewError(\"negative start index\")\n\t}\n\tif end < 0 {\n\t\tc.writeLine(fmt.Sprintf(\"delete %d\", start))\n\t} else {\n\t\tc.writeLine(fmt.Sprintf(\"delete %d %d\", start, end))\n\t}\n\treturn c.readErr();\n}\n\nfunc (c *Client) DeleteId(songid int) os.Error {\n\tc.writeLine(fmt.Sprintf(\"delete %d\", songid));\n\treturn c.readErr();\n}\n\nfunc (c *Client) Add(uri string) os.Error {\n\tc.writeLine(fmt.Sprintf(\"%q\", uri));\n\treturn c.readErr();\n}\n\nfunc (c *Client) AddId(uri string, pos int) (id int, err os.Error) {\n\tif pos >= 0 {\n\t\tc.writeLine(fmt.Sprintf(\"%q %d\", uri, pos))\n\t} else {\n\t\tc.writeLine(fmt.Sprintf(\"%q\", uri))\n\t}\n\tattrs, err := c.getAttrs();\n\tif err != nil {\n\t\treturn\n\t}\n\ttok, ok := attrs[\"Id\"];\n\tif !ok {\n\t\treturn -1, os.NewError(\"addid did not return Id\")\n\t}\n\treturn strconv.Atoi(tok);\n}\n\nfunc (c *Client) Clear() os.Error {\n\tc.writeLine(\"clear\");\n\treturn c.readErr();\n}\n<|endoftext|>"} {"text":"<commit_before>package gnip\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"encoding\/json\"\n\n\t\"io\/ioutil\"\n\n\t\"github.com\/JvrBaena\/gognip\/types\"\n\t\"github.com\/eapache\/channels\"\n)\n\nconst (\n\tstreamURL = \"https:\/\/gnip-stream.twitter.com\/stream\/powertrack\/accounts\/%s\/publishers\/twitter\/%s.json\"\n\trulesURL = \"https:\/\/gnip-api.twitter.com\/rules\/powertrack\/accounts\/%s\/publishers\/twitter\/%s.json\"\n\treplayURL = \"https:\/\/stream.gnip.com:443\/accounts\/%s\/publishers\/twitter\/replay\/track\/%s.json\"\n\treplayRulesURL = \"https:\/\/api.gnip.com:443\/accounts\/%s\/publishers\/twitter\/replay\/track\/%s\/rules.json\"\n\tfullArchiveURL = \"https:\/\/data-api.twitter.com\/search\/fullarchive\/accounts\/%s\/%s\"\n)\n\n\/*\nClient ...\n*\/\ntype Client struct {\n\tclient *http.Client\n\tuser string\n\tpassword string\n\taccount string\n\tch *channels.InfiniteChannel\n\tstop chan bool\n\tactive bool\n}\n\n\/*\nNewClient ...\n*\/\nfunc NewClient(user string, password string, account string) *Client {\n\tc := &Client{\n\t\t&http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tDial: (&net.Dialer{}).Dial,\n\t\t\t\tIdleConnTimeout: 30 * time.Second,\n\t\t\t},\n\t\t},\n\t\tuser,\n\t\tpassword,\n\t\taccount,\n\t\tchannels.NewInfiniteChannel(),\n\t\tmake(chan bool),\n\t\tfalse,\n\t}\n\treturn c\n}\n\n\/*\nConnectPowertrack ...\n*\/\nfunc (client *Client) ConnectPowertrack(streamLabel string) (<-chan interface{}, error) {\n\tpowertrackStream := fmt.Sprintf(streamURL, client.account, streamLabel)\n\treq, err := http.NewRequest(\"GET\", powertrackStream, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Connection\", \"keep-alive\")\n\treq.SetBasicAuth(client.user, client.password)\n\n\tgo client.processResponse(req)\n\n\treturn client.ch.Out(), nil\n}\n\nfunc (client *Client) processResponse(req *http.Request) {\n\tresp, err := client.client.Do(req)\n\tdefer resp.Body.Close()\n\n\tif err != nil {\n\t\tlog.Println(\"Error in Request\")\n\t\tclient.active = false\n\t\tclient.ch.Close()\n\t\treturn\n\t}\n\n\tclient.active = true\n\treader := bufio.NewReader(resp.Body)\n\tfor {\n\t\tselect {\n\t\tcase <-client.stop:\n\t\t\tlog.Println(\"STOP RECEIVED\")\n\t\t\tclient.active = false\n\t\t\tclient.ch.Close()\n\t\t\tbreak\n\t\tdefault:\n\t\t\tif client.active {\n\t\t\t\tline, _ := reader.ReadBytes('\\r')\n\t\t\t\tline = bytes.TrimSpace(line)\n\n\t\t\t\tif line != nil && string(line[:]) != \"\" {\n\t\t\t\t\tclient.ch.In() <- line\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/*\nStopPowertrack ...\n*\/\nfunc (client *Client) StopPowertrack() {\n\tclient.stop <- true\n}\n\n\/*\nIsActive ...\n*\/\nfunc (client *Client) IsActive() bool {\n\treturn client.active\n}\n\n\/*\nAddRule ...\n*\/\nfunc (client *Client) AddRule(streamLabel string, rule *types.Rule) (*types.RuleRequestResponse, error) {\n\trulesEndpoint := fmt.Sprintf(rulesURL, client.account, streamLabel)\n\treturn client.postRule(rulesEndpoint, rule)\n}\n\n\/*\nRemoveRule ...\n*\/\nfunc (client *Client) RemoveRule(streamLabel string, rule *types.Rule) (*types.RuleRequestResponse, error) {\n\trulesEndpoint := fmt.Sprintf(rulesURL, client.account, streamLabel)\n\trulesEndpoint += \"?_method=delete\"\n\treturn client.postRule(rulesEndpoint, rule)\n}\n\nfunc (client *Client) postRule(endpoint string, rule *types.Rule) (*types.RuleRequestResponse, error) {\n\tbody := &types.RuleRequest{\n\t\tRules: []*types.Rule{rule},\n\t}\n\tjsonBody, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", endpoint, bytes.NewBuffer(jsonBody))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", \"json\")\n\treq.SetBasicAuth(client.user, client.password)\n\n\tresp, err := client.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treqResponse := types.RuleRequestResponse{}\n\tjsonResponse, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(jsonResponse, &reqResponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &reqResponse, nil\n}\n<commit_msg>Adding methods to work with arrays of rules<commit_after>package gnip\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"encoding\/json\"\n\n\t\"io\/ioutil\"\n\n\t\"github.com\/JvrBaena\/gognip\/types\"\n\t\"github.com\/eapache\/channels\"\n)\n\nconst (\n\tstreamURL = \"https:\/\/gnip-stream.twitter.com\/stream\/powertrack\/accounts\/%s\/publishers\/twitter\/%s.json\"\n\trulesURL = \"https:\/\/gnip-api.twitter.com\/rules\/powertrack\/accounts\/%s\/publishers\/twitter\/%s.json\"\n\treplayURL = \"https:\/\/stream.gnip.com:443\/accounts\/%s\/publishers\/twitter\/replay\/track\/%s.json\"\n\treplayRulesURL = \"https:\/\/api.gnip.com:443\/accounts\/%s\/publishers\/twitter\/replay\/track\/%s\/rules.json\"\n\tfullArchiveURL = \"https:\/\/data-api.twitter.com\/search\/fullarchive\/accounts\/%s\/%s\"\n)\n\n\/*\nClient ...\n*\/\ntype Client struct {\n\tclient *http.Client\n\tuser string\n\tpassword string\n\taccount string\n\tch *channels.InfiniteChannel\n\tstop chan bool\n\tactive bool\n}\n\n\/*\nNewClient ...\n*\/\nfunc NewClient(user string, password string, account string) *Client {\n\tc := &Client{\n\t\t&http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tDial: (&net.Dialer{}).Dial,\n\t\t\t\tIdleConnTimeout: 30 * time.Second,\n\t\t\t},\n\t\t},\n\t\tuser,\n\t\tpassword,\n\t\taccount,\n\t\tchannels.NewInfiniteChannel(),\n\t\tmake(chan bool),\n\t\tfalse,\n\t}\n\treturn c\n}\n\n\/*\nConnectPowertrack ...\n*\/\nfunc (client *Client) ConnectPowertrack(streamLabel string) (<-chan interface{}, error) {\n\tpowertrackStream := fmt.Sprintf(streamURL, client.account, streamLabel)\n\treq, err := http.NewRequest(\"GET\", powertrackStream, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Connection\", \"keep-alive\")\n\treq.SetBasicAuth(client.user, client.password)\n\n\tgo client.processResponse(req)\n\n\treturn client.ch.Out(), nil\n}\n\nfunc (client *Client) processResponse(req *http.Request) {\n\tresp, err := client.client.Do(req)\n\tdefer resp.Body.Close()\n\n\tif err != nil {\n\t\tlog.Println(\"Error in Request\")\n\t\tclient.active = false\n\t\tclient.ch.Close()\n\t\treturn\n\t}\n\n\tclient.active = true\n\treader := bufio.NewReader(resp.Body)\n\tfor {\n\t\tselect {\n\t\tcase <-client.stop:\n\t\t\tlog.Println(\"STOP RECEIVED\")\n\t\t\tclient.active = false\n\t\t\tclient.ch.Close()\n\t\t\tbreak\n\t\tdefault:\n\t\t\tif client.active {\n\t\t\t\tline, _ := reader.ReadBytes('\\r')\n\t\t\t\tline = bytes.TrimSpace(line)\n\n\t\t\t\tif line != nil && string(line[:]) != \"\" {\n\t\t\t\t\tclient.ch.In() <- line\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/*\nStopPowertrack ...\n*\/\nfunc (client *Client) StopPowertrack() {\n\tclient.stop <- true\n}\n\n\/*\nIsActive ...\n*\/\nfunc (client *Client) IsActive() bool {\n\treturn client.active\n}\n\n\/*\nAddRule ...\n*\/\nfunc (client *Client) AddRule(streamLabel string, rule *types.Rule) (*types.RuleRequestResponse, error) {\n\trulesEndpoint := fmt.Sprintf(rulesURL, client.account, streamLabel)\n\treturn client.postRules(rulesEndpoint, []*types.Rule{rule})\n}\n\n\/*\nAddRules ...\n*\/\nfunc (client *Client) AddRules(streamLabel string, rules []*types.Rule) (*types.RuleRequestResponse, error) {\n\trulesEndpoint := fmt.Sprintf(rulesURL, client.account, streamLabel)\n\treturn client.postRules(rulesEndpoint, rules)\n}\n\n\/*\nRemoveRule ...\n*\/\nfunc (client *Client) RemoveRule(streamLabel string, rule *types.Rule) (*types.RuleRequestResponse, error) {\n\trulesEndpoint := fmt.Sprintf(rulesURL, client.account, streamLabel)\n\trulesEndpoint += \"?_method=delete\"\n\treturn client.postRules(rulesEndpoint, []*types.Rule{rule})\n}\n\n\/*\nRemoveRules ...\n*\/\nfunc (client *Client) RemoveRules(streamLabel string, rules []*types.Rule) (*types.RuleRequestResponse, error) {\n\trulesEndpoint := fmt.Sprintf(rulesURL, client.account, streamLabel)\n\trulesEndpoint += \"?_method=delete\"\n\treturn client.postRules(rulesEndpoint, rules)\n}\n\nfunc (client *Client) postRules(endpoint string, rules []*types.Rule) (*types.RuleRequestResponse, error) {\n\tbody := &types.RuleRequest{\n\t\tRules: rules,\n\t}\n\tjsonBody, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", endpoint, bytes.NewBuffer(jsonBody))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", \"json\")\n\treq.SetBasicAuth(client.user, client.password)\n\n\tresp, err := client.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treqResponse := types.RuleRequestResponse{}\n\tjsonResponse, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(jsonResponse, &reqResponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &reqResponse, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ More information about Google Directions API is available on\n\/\/ https:\/\/developers.google.com\/maps\/documentation\/directions\/\n\npackage maps \/\/ import \"google.golang.org\/maps\"\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"google.golang.org\/maps\/internal\"\n)\n\n\/\/ Client may be used to make requests to the Google Maps WebService APIs\ntype Client struct {\n\thttpClient *http.Client\n\tapiKey string\n\tbaseURL string\n\tclientID string\n\tsignature string\n\trequestsPerSecond int\n\trateLimiter chan time.Time\n}\n\ntype clientOption func(*Client) error\n\n\/\/ NewClient constructs a new Client which can make requests to the Google Maps WebService APIs.\n\/\/ The supplied http.Client is used for making requests to the Maps WebService APIs\nfunc NewClient(options ...clientOption) (*Client, error) {\n\tc := &Client{requestsPerSecond: 10}\n\tWithHTTPClient(&http.Client{})(c)\n\tfor _, option := range options {\n\t\terr := option(c)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif c.apiKey == \"\" && (c.clientID == \"\" || c.signature == \"\") {\n\t\treturn nil, fmt.Errorf(\"maps.Client with no API Key or credentials\")\n\t}\n\n\t\/\/ Implement a bursty rate limiter.\n\t\/\/ Allow up to 1 second worth of requests to be made at once.\n\tc.rateLimiter = make(chan time.Time, c.requestsPerSecond)\n\tgo func() {\n\t\t\/\/ Prefill rateLimiter with 1 seconds worth of requests.\n\t\tfor i := 0; i < c.requestsPerSecond; i++ {\n\t\t\tc.rateLimiter <- time.Now()\n\t\t}\n\t\t\/\/ Refill rateLimiter continuously\n\t\tfor t := range time.Tick(time.Second \/ time.Duration(c.requestsPerSecond)) {\n\t\t\tc.rateLimiter <- t\n\t\t}\n\t}()\n\n\treturn c, nil\n}\n\n\/\/ WithHTTPClient configures a Maps API client with a http.Client to make requests over.\nfunc WithHTTPClient(c *http.Client) func(*Client) error {\n\treturn func(client *Client) error {\n\t\tif _, ok := c.Transport.(*transport); !ok {\n\t\t\tt := c.Transport\n\t\t\tif t != nil {\n\t\t\t\tc.Transport = &transport{Base: t}\n\t\t\t} else {\n\t\t\t\tc.Transport = &transport{Base: http.DefaultTransport}\n\t\t\t}\n\t\t}\n\t\tclient.httpClient = c\n\t\treturn nil\n\t}\n}\n\n\/\/ withBaseURL is for testing only.\nfunc withBaseURL(url string) func(*Client) error {\n\treturn func(client *Client) error {\n\t\tclient.baseURL = url\n\t\treturn nil\n\t}\n}\n\n\/\/ WithAPIKey configures a Maps API client with an API Key\nfunc WithAPIKey(apiKey string) func(*Client) error {\n\treturn func(client *Client) error {\n\t\tclient.apiKey = apiKey\n\t\treturn nil\n\t}\n}\n\n\/\/ WithClientIDAndSignature configures a Maps API client for a Maps for Work application\n\/\/ The signature is assumed to be URL modified Base64 encoded\nfunc WithClientIDAndSignature(clientID, signature string) func(*Client) error {\n\treturn func(client *Client) error {\n\t\tclient.clientID = clientID\n\t\tclient.signature = signature\n\n\t\t\/\/ Enforce that signature is URL modified Base64 encoded\n\t\t_, err := base64.URLEncoding.DecodeString(signature)\n\t\treturn err\n\t}\n}\n\n\/\/ WithRateLimit configures the rate limit for back end requests.\n\/\/ Default is to limit to 10 requests per second.\nfunc WithRateLimit(requestsPerSecond int) func(*Client) error {\n\treturn func(client *Client) error {\n\t\tclient.requestsPerSecond = requestsPerSecond\n\t\treturn nil\n\t}\n}\n\nfunc (client *Client) httpDo(req *http.Request) (*http.Response, error) {\n\t<-client.rateLimiter\n\treturn client.httpClient.Do(req)\n}\n\nconst userAgent = \"GoogleGeoApiClientGo\/0.1\"\n\n\/\/ Transport is an http.RoundTripper that appends\n\/\/ Google Cloud client's user-agent to the original\n\/\/ request's user-agent header.\ntype transport struct {\n\t\/\/ Base represents the actual http.RoundTripper\n\t\/\/ the requests will be delegated to.\n\tBase http.RoundTripper\n}\n\n\/\/ RoundTrip appends a user-agent to the existing user-agent\n\/\/ header and delegates the request to the base http.RoundTripper.\nfunc (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {\n\treq = cloneRequest(req)\n\tua := req.Header.Get(\"User-Agent\")\n\tif ua == \"\" {\n\t\tua = userAgent\n\t} else {\n\t\tua = fmt.Sprintf(\"%s;%s\", ua, userAgent)\n\t}\n\treq.Header.Set(\"User-Agent\", ua)\n\treturn t.Base.RoundTrip(req)\n}\n\n\/\/ cloneRequest returns a clone of the provided *http.Request.\n\/\/ The clone is a shallow copy of the struct and its Header map.\nfunc cloneRequest(r *http.Request) *http.Request {\n\t\/\/ shallow copy of the struct\n\tr2 := new(http.Request)\n\t*r2 = *r\n\t\/\/ deep copy of the Header\n\tr2.Header = make(http.Header)\n\tfor k, s := range r.Header {\n\t\tr2.Header[k] = s\n\t}\n\treturn r2\n}\n\nfunc (client *Client) generateAuthQuery(path string, q url.Values, acceptClientID bool) (string, error) {\n\tif client.apiKey != \"\" {\n\t\tq.Set(\"key\", client.apiKey)\n\t\treturn q.Encode(), nil\n\t}\n\tif acceptClientID {\n\t\tquery, err := internal.SignURL(path, client.clientID, client.signature, q)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn query, nil\n\t}\n\treturn \"\", fmt.Errorf(\"Must provide API key for this API. It does not accept enterprise credentials.\")\n}\n<commit_msg>Moving rate limit pre-fill outside of goroutine<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ More information about Google Directions API is available on\n\/\/ https:\/\/developers.google.com\/maps\/documentation\/directions\/\n\npackage maps \/\/ import \"google.golang.org\/maps\"\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"google.golang.org\/maps\/internal\"\n)\n\n\/\/ Client may be used to make requests to the Google Maps WebService APIs\ntype Client struct {\n\thttpClient *http.Client\n\tapiKey string\n\tbaseURL string\n\tclientID string\n\tsignature string\n\trequestsPerSecond int\n\trateLimiter chan time.Time\n}\n\ntype clientOption func(*Client) error\n\n\/\/ NewClient constructs a new Client which can make requests to the Google Maps WebService APIs.\n\/\/ The supplied http.Client is used for making requests to the Maps WebService APIs\nfunc NewClient(options ...clientOption) (*Client, error) {\n\tc := &Client{requestsPerSecond: 10}\n\tWithHTTPClient(&http.Client{})(c)\n\tfor _, option := range options {\n\t\terr := option(c)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif c.apiKey == \"\" && (c.clientID == \"\" || c.signature == \"\") {\n\t\treturn nil, fmt.Errorf(\"maps.Client with no API Key or credentials\")\n\t}\n\n\t\/\/ Implement a bursty rate limiter.\n\t\/\/ Allow up to 1 second worth of requests to be made at once.\n\tc.rateLimiter = make(chan time.Time, c.requestsPerSecond)\n\t\/\/ Prefill rateLimiter with 1 seconds worth of requests.\n\tfor i := 0; i < c.requestsPerSecond; i++ {\n\t\tc.rateLimiter <- time.Now()\n\t}\n\tgo func() {\n\t\t\/\/ Refill rateLimiter continuously\n\t\tfor t := range time.Tick(time.Second \/ time.Duration(c.requestsPerSecond)) {\n\t\t\tc.rateLimiter <- t\n\t\t}\n\t}()\n\n\treturn c, nil\n}\n\n\/\/ WithHTTPClient configures a Maps API client with a http.Client to make requests over.\nfunc WithHTTPClient(c *http.Client) func(*Client) error {\n\treturn func(client *Client) error {\n\t\tif _, ok := c.Transport.(*transport); !ok {\n\t\t\tt := c.Transport\n\t\t\tif t != nil {\n\t\t\t\tc.Transport = &transport{Base: t}\n\t\t\t} else {\n\t\t\t\tc.Transport = &transport{Base: http.DefaultTransport}\n\t\t\t}\n\t\t}\n\t\tclient.httpClient = c\n\t\treturn nil\n\t}\n}\n\n\/\/ withBaseURL is for testing only.\nfunc withBaseURL(url string) func(*Client) error {\n\treturn func(client *Client) error {\n\t\tclient.baseURL = url\n\t\treturn nil\n\t}\n}\n\n\/\/ WithAPIKey configures a Maps API client with an API Key\nfunc WithAPIKey(apiKey string) func(*Client) error {\n\treturn func(client *Client) error {\n\t\tclient.apiKey = apiKey\n\t\treturn nil\n\t}\n}\n\n\/\/ WithClientIDAndSignature configures a Maps API client for a Maps for Work application\n\/\/ The signature is assumed to be URL modified Base64 encoded\nfunc WithClientIDAndSignature(clientID, signature string) func(*Client) error {\n\treturn func(client *Client) error {\n\t\tclient.clientID = clientID\n\t\tclient.signature = signature\n\n\t\t\/\/ Enforce that signature is URL modified Base64 encoded\n\t\t_, err := base64.URLEncoding.DecodeString(signature)\n\t\treturn err\n\t}\n}\n\n\/\/ WithRateLimit configures the rate limit for back end requests.\n\/\/ Default is to limit to 10 requests per second.\nfunc WithRateLimit(requestsPerSecond int) func(*Client) error {\n\treturn func(client *Client) error {\n\t\tclient.requestsPerSecond = requestsPerSecond\n\t\treturn nil\n\t}\n}\n\nfunc (client *Client) httpDo(req *http.Request) (*http.Response, error) {\n\t<-client.rateLimiter\n\treturn client.httpClient.Do(req)\n}\n\nconst userAgent = \"GoogleGeoApiClientGo\/0.1\"\n\n\/\/ Transport is an http.RoundTripper that appends\n\/\/ Google Cloud client's user-agent to the original\n\/\/ request's user-agent header.\ntype transport struct {\n\t\/\/ Base represents the actual http.RoundTripper\n\t\/\/ the requests will be delegated to.\n\tBase http.RoundTripper\n}\n\n\/\/ RoundTrip appends a user-agent to the existing user-agent\n\/\/ header and delegates the request to the base http.RoundTripper.\nfunc (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {\n\treq = cloneRequest(req)\n\tua := req.Header.Get(\"User-Agent\")\n\tif ua == \"\" {\n\t\tua = userAgent\n\t} else {\n\t\tua = fmt.Sprintf(\"%s;%s\", ua, userAgent)\n\t}\n\treq.Header.Set(\"User-Agent\", ua)\n\treturn t.Base.RoundTrip(req)\n}\n\n\/\/ cloneRequest returns a clone of the provided *http.Request.\n\/\/ The clone is a shallow copy of the struct and its Header map.\nfunc cloneRequest(r *http.Request) *http.Request {\n\t\/\/ shallow copy of the struct\n\tr2 := new(http.Request)\n\t*r2 = *r\n\t\/\/ deep copy of the Header\n\tr2.Header = make(http.Header)\n\tfor k, s := range r.Header {\n\t\tr2.Header[k] = s\n\t}\n\treturn r2\n}\n\nfunc (client *Client) generateAuthQuery(path string, q url.Values, acceptClientID bool) (string, error) {\n\tif client.apiKey != \"\" {\n\t\tq.Set(\"key\", client.apiKey)\n\t\treturn q.Encode(), nil\n\t}\n\tif acceptClientID {\n\t\tquery, err := internal.SignURL(path, client.clientID, client.signature, q)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn query, nil\n\t}\n\treturn \"\", fmt.Errorf(\"Must provide API key for this API. It does not accept enterprise credentials.\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ A Riemann client for Go, featuring concurrency, sending events and state updates, queries\n\/\/\n\/\/ Copyright (C) 2014 by Christopher Gilbert <christopher.john.gilbert@gmail.com>\npackage riemanngo\n\nimport (\n\t\"github.com\/riemann\/riemann-go-client\/proto\"\n)\n\n\/\/ Client is an interface to a generic client\ntype Client interface {\n\tSend(message *proto.Msg) (*proto.Msg, error)\n\tConnect() error\n\tClose() error\n}\n\n\/\/ IndexClient is an interface to a generic Client for index queries\ntype IndexClient interface {\n\tQueryIndex(q string) ([]Event, error)\n}\n\n\/\/ request encapsulates a request to send to the Riemann server\ntype request struct {\n\tmessage *proto.Msg\n\tresponse_ch chan response\n}\n\n\/\/ response encapsulates a response from the Riemann server\ntype response struct {\n\tmessage *proto.Msg\n\terr error\n}\n\n\/\/ Send an event using a client\nfunc SendEvent(c Client, e *Event) (*proto.Msg, error) {\n\tepb, err := EventToProtocolBuffer(e)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmessage := &proto.Msg{}\n\tmessage.Events = append(message.Events, epb)\n\n\tmsg, err := c.Send(message)\n\treturn msg, err\n}\n\n\/\/ Send multiple events using a client\nfunc SendEvents(c Client, e *[]Event) (*proto.Msg, error) {\n\tvar events []*proto.Event\n\tfor _, elem := range *e {\n\t\tepb, err := EventToProtocolBuffer(&elem)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tevents = append(events, epb)\n\t}\n\tmessage := &proto.Msg{}\n\tmessage.Events = events\n\n\tmsg, err := c.Send(message)\n\treturn msg, err\n}\n<commit_msg>Refactor SendEvent()<commit_after>\/\/ A Riemann client for Go, featuring concurrency, sending events and state updates, queries\n\/\/\n\/\/ Copyright (C) 2014 by Christopher Gilbert <christopher.john.gilbert@gmail.com>\npackage riemanngo\n\nimport (\n\t\"github.com\/riemann\/riemann-go-client\/proto\"\n)\n\n\/\/ Client is an interface to a generic client\ntype Client interface {\n\tSend(message *proto.Msg) (*proto.Msg, error)\n\tConnect() error\n\tClose() error\n}\n\n\/\/ IndexClient is an interface to a generic Client for index queries\ntype IndexClient interface {\n\tQueryIndex(q string) ([]Event, error)\n}\n\n\/\/ request encapsulates a request to send to the Riemann server\ntype request struct {\n\tmessage *proto.Msg\n\tresponse_ch chan response\n}\n\n\/\/ response encapsulates a response from the Riemann server\ntype response struct {\n\tmessage *proto.Msg\n\terr error\n}\n\n\/\/ Send an event using a client\nfunc SendEvent(c Client, e *Event) (*proto.Msg, error) {\n\treturn SendEvents(\n\t\tc, &([]Event{*e}),\n\t)\n}\n\n\/\/ Send multiple events using a client\nfunc SendEvents(c Client, e *[]Event) (*proto.Msg, error) {\n\tbuff := make(\n\t\t[]*proto.Event, len(*e),\n\t)\n\n\tfor i, elem := range *e {\n\t\tepb, err := EventToProtocolBuffer(\n\t\t\t&elem,\n\t\t)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tbuff[i] = epb\n\t}\n\n\tmessage := new(proto.Msg)\n\tmessage.Events = buff\n\n\treturn c.Send(message)\n}\n<|endoftext|>"} {"text":"<commit_before>package kcl\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/kinesis\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/kinesis\/kinesisiface\"\n)\n\ntype Client struct {\n\tkinesis kinesisiface.KinesisAPI\n}\n\nfunc New(awsKey, awsSecret, awsRegion string) *Client {\n\tawsConfig := &aws.Config{\n\t\tRegion: aws.String(\"us-east-1\"),\n\t\tCredentials: credentials.NewStaticCredentials(\n\t\t\t\"AKIAI2MUBA4UET6O3IHA\",\n\t\t\t\"\",\n\t\t\t\"\",\n\t\t),\n\t}\n\treturn &Client{\n\t\tkinesis: kinesis.New(session.New(awsConfig)),\n\t}\n}\n\n<commit_msg>client new<commit_after>package kcl\n\nimport (\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/kinesis\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/kinesis\/kinesisiface\"\n)\n\ntype Client struct {\n\tkinesis kinesisiface.KinesisAPI\n}\n\nfunc New(awsKey, awsSecret, awsRegion string) *Client {\n\tawsConfig := &aws.Config{\n\t\tRegion: aws.String(\"us-east-1\"),\n\t\tCredentials: credentials.NewStaticCredentials(\n\t\t\t\"AKIAI2MUBA4UET6O3IHA\",\n\t\t\t\"\",\n\t\t\t\"\",\n\t\t),\n\t}\n\treturn &Client{\n\t\tkinesis: kinesis.New(session.New(awsConfig)),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package atlas \/\/ import \"github.com\/keltia\/ripe-atlas\"\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/keltia\/proxy\"\n)\n\n\/\/ NewClient is the first function to call.\n\/\/ Yes, it does take multiple config\n\/\/ and the last one wins.\nfunc NewClient(cfgs ...Config) (*Client, error) {\n\tc := &Client{}\n\tfor _, cfg := range cfgs {\n\t\tc.config = cfg\n\t}\n\n\t\/\/ This holds the global options\n\tc.opts = make(map[string]string)\n\n\t\/\/ If no log output is specified, use the default one\n\tif c.config.Log == nil {\n\t\tc.log = log.New(os.Stderr, \"\", log.LstdFlags|log.LUTC)\n\t} else {\n\t\tc.log = c.config.Log\n\t}\n\n\tc.verbose(\"c.config=%#v\", c.config)\n\n\t\/\/ Create and save the http.Client\n\treturn c.addHTTPClient()\n}\n\n\/\/ HasAPIKey returns whether an API key is stored\nfunc (c *Client) HasAPIKey() (string, bool) {\n\tif c.config.APIKey == \"\" {\n\t\treturn \"\", false\n\t}\n\treturn c.config.APIKey, true\n}\n\n\/\/ call is s shortcut\nfunc (c *Client) call(req *http.Request) (*http.Response, error) {\n\tc.verbose(\"Full URL:\\n%v\", req.URL)\n\n\tmyurl, _ := url.Parse(apiEndpoint)\n\treq.Header.Set(\"Host\", myurl.Host)\n\treq.Header.Set(\"User-Agent\", fmt.Sprintf(\"ripe-atlas\/%s\", ourVersion))\n\n\treturn c.client.Do(req)\n}\n\nfunc (c *Client) addHTTPClient() (*Client, error) {\n\t_, transport := proxy.SetupTransport(apiEndpoint)\n\tif transport == nil {\n\t\tc.log.Fatal(\"unable to create httpclient\")\n\t}\n\tc.client = &http.Client{Transport: transport, Timeout: 20 * time.Second}\n\treturn c, nil\n}\n\n\/\/ SetOption sets a global option\nfunc (c *Client) SetOption(name, value string) *Client {\n\tif value != \"\" {\n\t\tc.opts[name] = value\n\t}\n\treturn c\n}\n\nfunc (c *Client) mergeGlobalOptions(opts map[string]string) {\n\tfor k, v := range c.opts {\n\t\topts[k] = v\n\t}\n}\n<commit_msg>Ensure endpoint is not empty.<commit_after>package atlas \/\/ import \"github.com\/keltia\/ripe-atlas\"\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/keltia\/proxy\"\n)\n\n\/\/ NewClient is the first function to call.\n\/\/ Yes, it does take multiple config\n\/\/ and the last one wins.\nfunc NewClient(cfgs ...Config) (*Client, error) {\n\tc := &Client{}\n\tfor _, cfg := range cfgs {\n\t\tc.config = cfg\n\t}\n\n\t\/\/ This holds the global options\n\tc.opts = make(map[string]string)\n\n\t\/\/ If no log output is specified, use the default one\n\tif c.config.Log == nil {\n\t\tc.log = log.New(os.Stderr, \"\", log.LstdFlags|log.LUTC)\n\t} else {\n\t\tc.log = c.config.Log\n\t}\n\n\t\/\/ Ensure this is not empty\n\tif c.config.endpoint == \"\" {\n\t\tc.config.endpoint = apiEndpoint\n\t}\n\tc.verbose(\"c.config=%#v\", c.config)\n\n\t\/\/ Create and save the http.Client\n\treturn c.addHTTPClient()\n}\n\n\/\/ HasAPIKey returns whether an API key is stored\nfunc (c *Client) HasAPIKey() (string, bool) {\n\tif c.config.APIKey == \"\" {\n\t\treturn \"\", false\n\t}\n\treturn c.config.APIKey, true\n}\n\n\/\/ call is s shortcut\nfunc (c *Client) call(req *http.Request) (*http.Response, error) {\n\tc.verbose(\"Full URL:\\n%v\", req.URL)\n\n\tmyurl, _ := url.Parse(apiEndpoint)\n\treq.Header.Set(\"Host\", myurl.Host)\n\treq.Header.Set(\"User-Agent\", fmt.Sprintf(\"ripe-atlas\/%s\", ourVersion))\n\n\treturn c.client.Do(req)\n}\n\nfunc (c *Client) addHTTPClient() (*Client, error) {\n\t_, transport := proxy.SetupTransport(apiEndpoint)\n\tif transport == nil {\n\t\tc.log.Fatal(\"unable to create httpclient\")\n\t}\n\tc.client = &http.Client{Transport: transport, Timeout: 20 * time.Second}\n\treturn c, nil\n}\n\n\/\/ SetOption sets a global option\nfunc (c *Client) SetOption(name, value string) *Client {\n\tif value != \"\" {\n\t\tc.opts[name] = value\n\t}\n\treturn c\n}\n\nfunc (c *Client) mergeGlobalOptions(opts map[string]string) {\n\tfor k, v := range c.opts {\n\t\topts[k] = v\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package smitego\n\nimport (\n\t\"bytes\"\n\t\"crypto\/md5\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"golang.org\/x\/net\/context\"\n\t\"io\"\n\t\"net\/http\"\n\t\"time\"\n)\n\n\/\/ DefaultBaseURL is where smite expects API calls. Why the frick is this HTTP and not HTTPS. (?????)\nconst DefaultBaseURL = \"http:\/\/api.smitegame.com\/smiteapi.svc\"\n\n\/\/ Client can create smite session objects and interact with the smite API\ntype Client struct {\n\tDevID int64\n\tAuthKey string\n\tHTTPClient http.Client\n\tBaseURL string\n\tCurTime func() time.Time\n\tVerboseLog Log\n}\n\n\/\/ Log is a function that Client can take to optionally verbose log what it does internally\ntype Log func(...interface{})\n\n\/\/ ErrNotExpectedJSON is returned by API calls when the response isn't expected JSON\ntype ErrNotExpectedJSON struct {\n\tOriginalBody string\n\tErr error\n}\n\nfunc (c *Client) verboseLog(v ...interface{}) {\n\tif c.VerboseLog != nil {\n\t\tc.VerboseLog(v...)\n\t}\n}\n\nfunc (e *ErrNotExpectedJSON) Error() string {\n\treturn fmt.Sprintf(\"Unexpected JSON: %s from %s\", e.Err.Error(), e.OriginalBody)\n}\n\nfunc (c *Client) clientTime() time.Time {\n\tif c.CurTime == nil {\n\t\treturn time.Now()\n\t}\n\treturn c.CurTime()\n}\n\n\/\/ Ping is a quick way of validating access to the Hi-Rez API\nfunc (c *Client) Ping(ctx context.Context) error {\n\tvar m string\n\tif err := c.doReqURL(ctx, c.urlBase(\"ping\"), &m); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *Client) doReqURL(ctx context.Context, u string, jsonInto interface{}) error {\n\tc.verboseLog(\"fetching\", u)\n\treq, err := http.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := withCancel(ctx, &c.HTTPClient, req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar b bytes.Buffer\n\tif _, err := io.Copy(&b, resp.Body); err != nil {\n\t\treturn err\n\t}\n\tdebug := b.String()\n\tc.verboseLog(\"Fetch result\", debug)\n\tif err := json.NewDecoder(&b).Decode(jsonInto); err != nil {\n\t\treturn &ErrNotExpectedJSON{\n\t\t\tOriginalBody: debug,\n\t\t\tErr: err,\n\t\t}\n\t}\n\tif err := resp.Body.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ CreateSession is a required step to Authenticate the developerId\/signature for further API use.\nfunc (c *Client) CreateSession(ctx context.Context) (*Session, error) {\n\tvar v createSessionResp\n\tif err := c.doReqURL(ctx, c.url(\"createsession\", \"\"), &v); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Session{\n\t\tparent: c,\n\t\tSessionID: v.SessionID,\n\t}, nil\n}\n\nfunc (c *Client) urlBase(endpoint string) string {\n\tbase := c.BaseURL\n\tif c.BaseURL == \"\" {\n\t\tbase = DefaultBaseURL\n\t}\n\treturn fmt.Sprintf(\"%s\/%sjson\", base, endpoint)\n}\n\nfunc (c *Client) url(endpoint string, session string) string {\n\ttimeFmt := c.clientTime().UTC().Format(\"20060102150405\")\n\thasher := md5.New()\n\tsig := fmt.Sprintf(\"%d%s%s%s\", c.DevID, endpoint, c.AuthKey, timeFmt)\n\t_, err := hasher.Write([]byte(sig))\n\tmustNotErr(err)\n\tsignatureBytes := hasher.Sum(nil)\n\tsignature := hex.EncodeToString(signatureBytes)\n\tif session != \"\" {\n\t\tsession = session + \"\/\"\n\t}\n\tret := fmt.Sprintf(\"%s\/%d\/%s\/%s%s\", c.urlBase(endpoint), c.DevID, signature, session, timeFmt)\n\treturn ret\n}\n\nfunc mustNotErr(err error) {\n\tif err != nil {\n\t\tpanic(\"Unexpected error: \" + err.Error())\n\t}\n}\n<commit_msg>Add an example<commit_after>package smitego\n\nimport (\n\t\"bytes\"\n\t\"crypto\/md5\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"golang.org\/x\/net\/context\"\n\t\"io\"\n\t\"net\/http\"\n\t\"time\"\n)\n\n\/\/ DefaultBaseURL is where smite expects API calls. Why the frick is this HTTP and not HTTPS. (?????)\nconst DefaultBaseURL = \"http:\/\/api.smitegame.com\/smiteapi.svc\"\n\n\/\/ Client can create smite session objects and interact with the smite API\ntype Client struct {\n\tDevID int64\n\tAuthKey string\n\tHTTPClient http.Client\n\tBaseURL string\n\tCurTime func() time.Time\n\tVerboseLog Log\n}\n\n\/\/ Log is a function that Client can take to optionally verbose log what it does internally\ntype Log func(...interface{})\n\n\/\/ ErrNotExpectedJSON is returned by API calls when the response isn't expected JSON\ntype ErrNotExpectedJSON struct {\n\tOriginalBody string\n\tErr error\n}\n\nfunc (c *Client) verboseLog(v ...interface{}) {\n\tif c.VerboseLog != nil {\n\t\tc.VerboseLog(v...)\n\t}\n}\n\nfunc (e *ErrNotExpectedJSON) Error() string {\n\treturn fmt.Sprintf(\"Unexpected JSON: %s from %s\", e.Err.Error(), e.OriginalBody)\n}\n\nfunc (c *Client) clientTime() time.Time {\n\tif c.CurTime == nil {\n\t\treturn time.Now()\n\t}\n\treturn c.CurTime()\n}\n\n\/\/ Ping is a quick way of validating access to the Hi-Rez API\nfunc (c *Client) Ping(ctx context.Context) error {\n\tvar m string\n\tif err := c.doReqURL(ctx, c.urlBase(\"ping\"), &m); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *Client) doReqURL(ctx context.Context, u string, jsonInto interface{}) error {\n\tc.verboseLog(\"fetching\", u)\n\treq, err := http.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := withCancel(ctx, &c.HTTPClient, req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar b bytes.Buffer\n\tif _, err := io.Copy(&b, resp.Body); err != nil {\n\t\treturn err\n\t}\n\tdebug := b.String()\n\tc.verboseLog(\"Fetch result\", debug)\n\tif err := json.NewDecoder(&b).Decode(jsonInto); err != nil {\n\t\treturn &ErrNotExpectedJSON{\n\t\t\tOriginalBody: debug,\n\t\t\tErr: err,\n\t\t}\n\t}\n\tif err := resp.Body.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ CreateSession is a required step to Authenticate the developerId\/signature for further API use.\nfunc (c *Client) CreateSession(ctx context.Context) (*Session, error) {\n\tvar v createSessionResp\n\tif err := c.doReqURL(ctx, c.url(\"createsession\", \"\"), &v); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Session{\n\t\tparent: c,\n\t\tSessionID: v.SessionID,\n\t}, nil\n}\n\nfunc (c *Client) urlBase(endpoint string) string {\n\tbase := c.BaseURL\n\tif c.BaseURL == \"\" {\n\t\tbase = DefaultBaseURL\n\t}\n\treturn fmt.Sprintf(\"%s\/%sjson\", base, endpoint)\n}\n\nfunc (c *Client) url(endpoint string, session string) string {\n\ttimeFmt := c.clientTime().UTC().Format(\"20060102150405\")\n\thasher := md5.New()\n\tsig := fmt.Sprintf(\"%d%s%s%s\", c.DevID, endpoint, c.AuthKey, timeFmt)\n\t_, err := hasher.Write([]byte(sig))\n\tmustNotErr(err)\n\tsignatureBytes := hasher.Sum(nil)\n\tsignature := hex.EncodeToString(signatureBytes)\n\tif session != \"\" {\n\t\tsession = session + \"\/\"\n\t}\n\tret := fmt.Sprintf(\"%s\/%d\/%s\/%s%s\", c.urlBase(endpoint), c.DevID, signature, session, timeFmt)\n\treturn ret\n}\n\nfunc mustNotErr(err error) {\n\tif err != nil {\n\t\tpanic(\"Unexpected error: \" + err.Error())\n\t}\n}\n\n\/\/ Example of creating a session and making a function call.\nfunc Example() {\n\t\/\/ First make a client to describe how you want to connect. Each client returns a session\n\t\/\/ and primary function calls are done on the session. Concurrent sessions are limited\n\t\/\/ by HiRez\n\tclient := Client{\n\t\tDevID: 123,\n\t\tAuthKey: \"AuthKey123\",\n\t}\n\n\t\/\/ A context is how you can time out function calls\n\tctx := context.Background()\n\n\t\/\/ Some functions don't require a session first and can be called on the client directly\n\t_ = client.Ping(ctx)\n\n\t\/\/ Most functions require a session\n\n\tsession, _ := client.CreateSession(ctx)\n\tgods, _ := session.GetGods(ctx, English)\n\tfmt.Printf(\"Got %d gods\\n\", len(gods))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"io\/ioutil\"\n\t\"net\/rpc\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"time\"\n)\n\nfunc do_client() int {\n\taddr := *g_addr\n\tif *g_sock == \"unix\" {\n\t\taddr = get_socket_filename()\n\t}\n\n\t\/\/ client\n\tclient, err := rpc.Dial(*g_sock, addr)\n\tif err != nil {\n\t\tif *g_sock == \"unix\" && file_exists(addr) {\n\t\t\tos.Remove(addr)\n\t\t}\n\n\t\terr = try_run_server()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"%s\\n\", err.Error())\n\t\t\treturn 1\n\t\t}\n\t\tclient, err = try_to_connect(*g_sock, addr)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"%s\\n\", err.Error())\n\t\t\treturn 1\n\t\t}\n\t}\n\tdefer client.Close()\n\n\tif flag.NArg() > 0 {\n\t\tswitch flag.Arg(0) {\n\t\tcase \"autocomplete\":\n\t\t\tcmd_auto_complete(client)\n\t\tcase \"cursortype\":\n\t\t\tcmd_cursor_type_pkg(client)\n\t\tcase \"close\":\n\t\t\tcmd_close(client)\n\t\tcase \"status\":\n\t\t\tcmd_status(client)\n\t\tcase \"drop-cache\":\n\t\t\tcmd_drop_cache(client)\n\t\tcase \"set\":\n\t\t\tcmd_set(client)\n\t\t}\n\t}\n\treturn 0\n}\n\nfunc try_run_server() error {\n\tpath := get_executable_filename()\n\targs := []string{os.Args[0], \"-s\", \"-sock\", *g_sock, \"-addr\", *g_addr}\n\tcwd, _ := os.Getwd()\n\tprocattr := os.ProcAttr{Dir: cwd, Env: os.Environ(), Files: []*os.File{nil, nil, nil}}\n\tp, err := os.StartProcess(path, args, &procattr)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn p.Release()\n}\n\nfunc try_to_connect(network, address string) (client *rpc.Client, err error) {\n\tt := 0\n\tfor {\n\t\tclient, err = rpc.Dial(network, address)\n\t\tif err != nil && t < 1000 {\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\tt += 10\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\n\treturn\n}\n\nfunc prepare_file_filename_cursor() ([]byte, string, int) {\n\tvar file []byte\n\tvar err error\n\n\tif *g_input != \"\" {\n\t\tfile, err = ioutil.ReadFile(*g_input)\n\t} else {\n\t\tfile, err = ioutil.ReadAll(os.Stdin)\n\t}\n\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tvar skipped int\n\tfile, skipped = filter_out_shebang(file)\n\n\tfilename := *g_input\n\tcursor := -1\n\n\toffset := \"\"\n\tswitch flag.NArg() {\n\tcase 2:\n\t\toffset = flag.Arg(1)\n\tcase 3:\n\t\tfilename = flag.Arg(1) \/\/ Override default filename\n\t\toffset = flag.Arg(2)\n\t}\n\n\tif offset != \"\" {\n\t\tif offset[0] == 'c' || offset[0] == 'C' {\n\t\t\tcursor, _ = strconv.Atoi(offset[1:])\n\t\t\tcursor = char_to_byte_offset(file, cursor)\n\t\t} else {\n\t\t\tcursor, _ = strconv.Atoi(offset)\n\t\t}\n\t}\n\n\tcursor -= skipped\n\tif filename != \"\" && !filepath.IsAbs(filename) {\n\t\tcwd, _ := os.Getwd()\n\t\tfilename = filepath.Join(cwd, filename)\n\t}\n\treturn file, filename, cursor\n}\n\n\/\/-------------------------------------------------------------------------\n\/\/ commands\n\/\/-------------------------------------------------------------------------\n\nfunc cmd_status(c *rpc.Client) {\n\tfmt.Printf(\"%s\\n\", client_status(c, 0))\n}\n\nfunc cmd_auto_complete(c *rpc.Client) {\n\tcontext := pack_build_context(&build.Default)\n\tfile, filename, cursor := prepare_file_filename_cursor()\n\tf := get_formatter(*g_format)\n\tf.write_candidates(client_auto_complete(c, file, filename, cursor, context))\n}\n\nfunc cmd_cursor_type_pkg(c *rpc.Client) {\n\tfile, filename, cursor := prepare_file_filename_cursor()\n\ttyp, pkg := client_cursor_type_pkg(c, file, filename, cursor)\n\tfmt.Printf(\"%s,,%s\\n\", typ, pkg)\n}\n\nfunc cmd_close(c *rpc.Client) {\n\tclient_close(c, 0)\n}\n\nfunc cmd_drop_cache(c *rpc.Client) {\n\tclient_drop_cache(c, 0)\n}\n\nfunc cmd_set(c *rpc.Client) {\n\tswitch flag.NArg() {\n\tcase 1:\n\t\tfmt.Print(client_set(c, \"\\x00\", \"\\x00\"))\n\tcase 2:\n\t\tfmt.Print(client_set(c, flag.Arg(1), \"\\x00\"))\n\tcase 3:\n\t\tfmt.Print(client_set(c, flag.Arg(1), flag.Arg(2)))\n\t}\n}\n<commit_msg>When running the server, use os.DevNull for file descriptors.<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"io\/ioutil\"\n\t\"net\/rpc\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"time\"\n)\n\nfunc do_client() int {\n\taddr := *g_addr\n\tif *g_sock == \"unix\" {\n\t\taddr = get_socket_filename()\n\t}\n\n\t\/\/ client\n\tclient, err := rpc.Dial(*g_sock, addr)\n\tif err != nil {\n\t\tif *g_sock == \"unix\" && file_exists(addr) {\n\t\t\tos.Remove(addr)\n\t\t}\n\n\t\terr = try_run_server()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"%s\\n\", err.Error())\n\t\t\treturn 1\n\t\t}\n\t\tclient, err = try_to_connect(*g_sock, addr)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"%s\\n\", err.Error())\n\t\t\treturn 1\n\t\t}\n\t}\n\tdefer client.Close()\n\n\tif flag.NArg() > 0 {\n\t\tswitch flag.Arg(0) {\n\t\tcase \"autocomplete\":\n\t\t\tcmd_auto_complete(client)\n\t\tcase \"cursortype\":\n\t\t\tcmd_cursor_type_pkg(client)\n\t\tcase \"close\":\n\t\t\tcmd_close(client)\n\t\tcase \"status\":\n\t\t\tcmd_status(client)\n\t\tcase \"drop-cache\":\n\t\t\tcmd_drop_cache(client)\n\t\tcase \"set\":\n\t\t\tcmd_set(client)\n\t\t}\n\t}\n\treturn 0\n}\n\nfunc try_run_server() error {\n\tpath := get_executable_filename()\n\targs := []string{os.Args[0], \"-s\", \"-sock\", *g_sock, \"-addr\", *g_addr}\n\tcwd, _ := os.Getwd()\n\n\tvar err error\n\tstdin, err := os.Open(os.DevNull)\n\tif err != nil {\n\t\treturn err\n\t}\n\tstdout, err := os.OpenFile(os.DevNull, os.O_WRONLY, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\tstderr, err := os.OpenFile(os.DevNull, os.O_WRONLY, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprocattr := os.ProcAttr{Dir: cwd, Env: os.Environ(), Files: []*os.File{stdin, stdout, stderr}}\n\tp, err := os.StartProcess(path, args, &procattr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn p.Release()\n}\n\nfunc try_to_connect(network, address string) (client *rpc.Client, err error) {\n\tt := 0\n\tfor {\n\t\tclient, err = rpc.Dial(network, address)\n\t\tif err != nil && t < 1000 {\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\tt += 10\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\n\treturn\n}\n\nfunc prepare_file_filename_cursor() ([]byte, string, int) {\n\tvar file []byte\n\tvar err error\n\n\tif *g_input != \"\" {\n\t\tfile, err = ioutil.ReadFile(*g_input)\n\t} else {\n\t\tfile, err = ioutil.ReadAll(os.Stdin)\n\t}\n\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tvar skipped int\n\tfile, skipped = filter_out_shebang(file)\n\n\tfilename := *g_input\n\tcursor := -1\n\n\toffset := \"\"\n\tswitch flag.NArg() {\n\tcase 2:\n\t\toffset = flag.Arg(1)\n\tcase 3:\n\t\tfilename = flag.Arg(1) \/\/ Override default filename\n\t\toffset = flag.Arg(2)\n\t}\n\n\tif offset != \"\" {\n\t\tif offset[0] == 'c' || offset[0] == 'C' {\n\t\t\tcursor, _ = strconv.Atoi(offset[1:])\n\t\t\tcursor = char_to_byte_offset(file, cursor)\n\t\t} else {\n\t\t\tcursor, _ = strconv.Atoi(offset)\n\t\t}\n\t}\n\n\tcursor -= skipped\n\tif filename != \"\" && !filepath.IsAbs(filename) {\n\t\tcwd, _ := os.Getwd()\n\t\tfilename = filepath.Join(cwd, filename)\n\t}\n\treturn file, filename, cursor\n}\n\n\/\/-------------------------------------------------------------------------\n\/\/ commands\n\/\/-------------------------------------------------------------------------\n\nfunc cmd_status(c *rpc.Client) {\n\tfmt.Printf(\"%s\\n\", client_status(c, 0))\n}\n\nfunc cmd_auto_complete(c *rpc.Client) {\n\tcontext := pack_build_context(&build.Default)\n\tfile, filename, cursor := prepare_file_filename_cursor()\n\tf := get_formatter(*g_format)\n\tf.write_candidates(client_auto_complete(c, file, filename, cursor, context))\n}\n\nfunc cmd_cursor_type_pkg(c *rpc.Client) {\n\tfile, filename, cursor := prepare_file_filename_cursor()\n\ttyp, pkg := client_cursor_type_pkg(c, file, filename, cursor)\n\tfmt.Printf(\"%s,,%s\\n\", typ, pkg)\n}\n\nfunc cmd_close(c *rpc.Client) {\n\tclient_close(c, 0)\n}\n\nfunc cmd_drop_cache(c *rpc.Client) {\n\tclient_drop_cache(c, 0)\n}\n\nfunc cmd_set(c *rpc.Client) {\n\tswitch flag.NArg() {\n\tcase 1:\n\t\tfmt.Print(client_set(c, \"\\x00\", \"\\x00\"))\n\tcase 2:\n\t\tfmt.Print(client_set(c, flag.Arg(1), \"\\x00\"))\n\tcase 3:\n\t\tfmt.Print(client_set(c, flag.Arg(1), flag.Arg(2)))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package matrixfederation\n\nimport (\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n)\n\n\/\/ A Client makes request to the federation listeners of matrix\n\/\/ homeservers\ntype Client struct {\n\tclient http.Client\n}\n\n\/\/ UserInfo represents information about a user.\ntype UserInfo struct {\n\tSub string `json:\"sub\"`\n}\n\n\/\/ NewClient makes a new Client\nfunc NewClient() *Client {\n\t\/\/ TODO: Verify ceritificates\n\ttripper := federationTripper{\n\t\ttransport: &http.Transport{\n\t\t\t\/\/ Set our own DialTLS function to avoid the default net\/http SNI.\n\t\t\t\/\/ By default net\/http and crypto\/tls set the SNI to the target host.\n\t\t\t\/\/ By avoiding the default implementation we can keep the ServerName\n\t\t\t\/\/ as the empty string so that crypto\/tls doesn't add SNI.\n\t\t\tDialTLS: func(network, addr string) (net.Conn, error) {\n\t\t\t\trawconn, err := net.Dial(network, addr)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\t\/\/ Wrap a raw connection ourselves since tls.Dial defaults the SNI\n\t\t\t\tconn := tls.Client(rawconn, &tls.Config{\n\t\t\t\t\tServerName: \"\",\n\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t})\n\t\t\t\tif err := conn.Handshake(); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\treturn conn, nil\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &Client{\n\t\tclient: http.Client{Transport: &tripper},\n\t}\n}\n\ntype federationTripper struct {\n\ttransport http.RoundTripper\n}\n\nfunc makeHTTPSURL(u *url.URL, addr string) (httpsURL url.URL) {\n\thttpsURL = *u\n\thttpsURL.Scheme = \"https\"\n\thttpsURL.Host = addr\n\treturn\n}\n\nfunc (f *federationTripper) RoundTrip(r *http.Request) (*http.Response, error) {\n\thost := r.URL.Host\n\tdnsResult, err := LookupServer(host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resp *http.Response\n\terr = fmt.Errorf(\"no address found for matrix host %v\", host)\n\tfor _, addr := range dnsResult.Addrs {\n\t\tu := makeHTTPSURL(r.URL, addr)\n\t\tr.URL = &u\n\t\tresp, err = f.transport.RoundTrip(r)\n\t\tif err == nil {\n\t\t\treturn resp, nil\n\t\t}\n\t}\n\treturn nil, err\n}\n\n\/\/ LookupUserInfo gets information about a user from a given matrix homeserver\n\/\/ using a bearer access token.\nfunc (fc *Client) LookupUserInfo(matrixServer, token string) (u UserInfo, err error) {\n\turl := url.URL{\n\t\tScheme: \"matrix\",\n\t\tHost: matrixServer,\n\t\tPath: \"\/_matrix\/federation\/v1\/openid\/userinfo\",\n\t\tRawQuery: url.Values{\"access_token\": []string{token}}.Encode(),\n\t}\n\n\tvar response *http.Response\n\tresponse, err = fc.client.Get(url.String())\n\tif response != nil {\n\t\tdefer response.Body.Close()\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\tif response.StatusCode < 200 || response.StatusCode >= 300 {\n\t\tvar errorOutput []byte\n\t\terrorOutput, err = ioutil.ReadAll(response.Body)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\terr = fmt.Errorf(\"HTTP %d : %s\", response.StatusCode, errorOutput)\n\t\treturn\n\t}\n\n\terr = json.NewDecoder(response.Body).Decode(&u)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tuserParts := strings.SplitN(u.Sub, \":\", 2)\n\tif len(userParts) != 2 || userParts[1] != matrixServer {\n\t\terr = fmt.Errorf(\"userID doesn't match server name '%v' != '%v'\", u.Sub, matrixServer)\n\t\treturn\n\t}\n\n\treturn\n}\n<commit_msg>Review comments<commit_after>package matrixfederation\n\nimport (\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n)\n\n\/\/ A Client makes request to the federation listeners of matrix\n\/\/ homeservers\ntype Client struct {\n\tclient http.Client\n}\n\n\/\/ UserInfo represents information about a user.\ntype UserInfo struct {\n\tSub string `json:\"sub\"`\n}\n\n\/\/ NewClient makes a new Client\nfunc NewClient() *Client {\n\t\/\/ TODO: Verify ceritificates\n\ttripper := federationTripper{\n\t\ttransport: &http.Transport{\n\t\t\t\/\/ Set our own DialTLS function to avoid the default net\/http SNI.\n\t\t\t\/\/ By default net\/http and crypto\/tls set the SNI to the target host.\n\t\t\t\/\/ By avoiding the default implementation we can keep the ServerName\n\t\t\t\/\/ as the empty string so that crypto\/tls doesn't add SNI.\n\t\t\tDialTLS: func(network, addr string) (net.Conn, error) {\n\t\t\t\trawconn, err := net.Dial(network, addr)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\t\/\/ Wrap a raw connection ourselves since tls.Dial defaults the SNI\n\t\t\t\tconn := tls.Client(rawconn, &tls.Config{\n\t\t\t\t\tServerName: \"\",\n\t\t\t\t\t\/\/ TODO: We should be checking that the TLS certificate we see here matches\n\t\t\t\t\t\/\/ one of the allowed SHA-256 fingerprints for the server.\n\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t})\n\t\t\t\tif err := conn.Handshake(); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\treturn conn, nil\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &Client{\n\t\tclient: http.Client{Transport: &tripper},\n\t}\n}\n\ntype federationTripper struct {\n\ttransport http.RoundTripper\n}\n\nfunc makeHTTPSURL(u *url.URL, addr string) (httpsURL url.URL) {\n\thttpsURL = *u\n\thttpsURL.Scheme = \"https\"\n\thttpsURL.Host = addr\n\treturn\n}\n\nfunc (f *federationTripper) RoundTrip(r *http.Request) (*http.Response, error) {\n\thost := r.URL.Host\n\tdnsResult, err := LookupServer(host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resp *http.Response\n\tfor _, addr := range dnsResult.Addrs {\n\t\tu := makeHTTPSURL(r.URL, addr)\n\t\tr.URL = &u\n\t\tresp, err = f.transport.RoundTrip(r)\n\t\tif err == nil {\n\t\t\treturn resp, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"no address found for matrix host %v\", host)\n}\n\n\/\/ LookupUserInfo gets information about a user from a given matrix homeserver\n\/\/ using a bearer access token.\nfunc (fc *Client) LookupUserInfo(matrixServer, token string) (u UserInfo, err error) {\n\turl := url.URL{\n\t\tScheme: \"matrix\",\n\t\tHost: matrixServer,\n\t\tPath: \"\/_matrix\/federation\/v1\/openid\/userinfo\",\n\t\tRawQuery: url.Values{\"access_token\": []string{token}}.Encode(),\n\t}\n\n\tvar response *http.Response\n\tresponse, err = fc.client.Get(url.String())\n\tif response != nil {\n\t\tdefer response.Body.Close()\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\tif response.StatusCode < 200 || response.StatusCode >= 300 {\n\t\tvar errorOutput []byte\n\t\terrorOutput, err = ioutil.ReadAll(response.Body)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\terr = fmt.Errorf(\"HTTP %d : %s\", response.StatusCode, errorOutput)\n\t\treturn\n\t}\n\n\terr = json.NewDecoder(response.Body).Decode(&u)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tuserParts := strings.SplitN(u.Sub, \":\", 2)\n\tif len(userParts) != 2 || userParts[1] != matrixServer {\n\t\terr = fmt.Errorf(\"userID doesn't match server name '%v' != '%v'\", u.Sub, matrixServer)\n\t\treturn\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package fluffle\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/satori\/go.uuid\"\n\t\"github.com\/streadway\/amqp\"\n)\n\ntype Client struct {\n\tUUID uuid.UUID\n\tConnection *amqp.Connection\n\tChannel *amqp.Channel\n\tResponseQueue *amqp.Queue\n\n\tpendingResponses map[string]chan *Response\n}\n\ntype pendingResponse struct {\n\tcond *sync.Cond\n\tpayload *Response\n}\n\n\/\/ Creates a client with the given UUID and initializes its internal data\n\/\/ structures. Does not setup connections or perform any network operations.\n\/\/ You will almost always want to use NewClient.\nfunc NewBareClient(uuid uuid.UUID) *Client {\n\treturn &Client{\n\t\tUUID: uuid,\n\t\tpendingResponses: make(map[string]chan *Response),\n\t}\n}\n\n\/\/ Create a client, connect it to the given AMQP server, and setup a queue\n\/\/ to receive responses on.\nfunc NewClient(url string) (*Client, error) {\n\tuuid := uuid.NewV1()\n\tclient := NewBareClient(uuid)\n\n\tconnection, err := amqp.Dial(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient.Connection = connection\n\n\tchannel, err := connection.Channel()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient.Channel = channel\n\n\terr = client.SetupResponseQueue()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn client, nil\n}\n\n\/\/ Declares the response queue and starts consuming (listening) deliveries\n\/\/ from it. This is normally the final step in setting up a usable client.\nfunc (c *Client) SetupResponseQueue() error {\n\tdurable := false\n\tautoDelete := false\n\texclusive := true\n\tnoWait := false\n\tqueue, err := c.Channel.QueueDeclare(ResponseQueueName(c.UUID.String()), durable, autoDelete, exclusive, noWait, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.ResponseQueue = &queue\n\n\tautoAck := false\n\tnoLocal := false\n\tdeliveries, err := c.Channel.Consume(queue.Name, \"\", autoAck, exclusive, noLocal, noWait, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo func() {\n\t\tfor delivery := range deliveries {\n\t\t\tc.handleReply(&delivery)\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (c *Client) handleReply(delivery *amqp.Delivery) {\n\tdelivery.Ack(false)\n\n\tpayload := &Response{}\n\terr := json.Unmarshal(delivery.Body, &payload)\n\tif err != nil {\n\t\tlog.Printf(\"Error unmarshalling response payload: %v\", err)\n\t\treturn\n\t}\n\n\tid := payload.Id\n\tresponseChan, present := c.pendingResponses[id]\n\tif present {\n\t\tresponseChan <- payload\n\t} else {\n\t\tlog.Printf(\"No response chan found: id=%s\", id)\n\t}\n}\n\n\/\/ Call a remote method over JSON-RPC and return its response. This will block\n\/\/ the goroutine on which it is called.\nfunc (c *Client) Call(method string, params []interface{}, queue string) (interface{}, error) {\n\tid := uuid.NewV4().String()\n\n\trequest := &Request{\n\t\tJsonRpc: \"2.0\",\n\t\tId: id,\n\t\tMethod: method,\n\t\tParams: params,\n\t}\n\n\treturn c.CallWithRequest(request, queue)\n}\n\nfunc (c *Client) CallWithRequest(request *Request, queue string) (interface{}, error) {\n\tresponse, err := c.PublishAndWait(request, queue)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c.DecodeResponse(response)\n}\n\n\/\/ Publishes a request onto the given queue, then waits for a response to that\n\/\/ message on the client's response queue.\nfunc (c *Client) PublishAndWait(payload *Request, queue string) (*Response, error) {\n\ttimeoutChan := make(chan bool, 1)\n\tresponseChan := make(chan *Response, 1)\n\tc.pendingResponses[payload.Id] = responseChan\n\tdefer delete(c.pendingResponses, payload.Id)\n\n\terr := c.Publish(payload, queue)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgo func() {\n\t\ttime.Sleep(5 * time.Second)\n\t\ttimeoutChan <- true\n\t}()\n\n\tselect {\n\tcase response := <-responseChan:\n\t\treturn response, nil\n\tcase <-timeoutChan:\n\t\treturn nil, fmt.Errorf(\"Timed out\")\n\t}\n}\n\n\/\/ payload: JSON-RPC request payload to be sent\n\/\/\n\/\/ queue: Queue on which to send the request\nfunc (c *Client) Publish(payload *Request, queue string) error {\n\troutingKey := RequestQueueName(queue)\n\tcorrelationId := payload.Id\n\treplyTo := c.ResponseQueue.Name\n\n\tbody, err := json.Marshal(payload)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmandatory := false\n\timmediate := false\n\tpublishing := amqp.Publishing{\n\t\tCorrelationId: correlationId,\n\t\tReplyTo: replyTo,\n\t\tBody: body,\n\t}\n\treturn c.Channel.Publish(DEFAULT_EXCHANGE, routingKey, mandatory, immediate, publishing)\n}\n\n\/\/ Figure out what was in the response payload: was it a result, an error,\n\/\/ or unknown?\nfunc (c *Client) DecodeResponse(response *Response) (interface{}, error) {\n\tif response.Result != nil {\n\t\treturn response.Result, nil\n\t}\n\tif response.Error != nil {\n\t\treturn nil, response.Error\n\t}\n\treturn nil, &ErrorResponse{\n\t\tCode: 0,\n\t\tMessage: \"Missing both `result' and `error' on Response object\",\n\t\tData: nil,\n\t}\n}\n<commit_msg>Remove unused pending response struct<commit_after>package fluffle\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/satori\/go.uuid\"\n\t\"github.com\/streadway\/amqp\"\n)\n\ntype Client struct {\n\tUUID uuid.UUID\n\tConnection *amqp.Connection\n\tChannel *amqp.Channel\n\tResponseQueue *amqp.Queue\n\n\tpendingResponses map[string]chan *Response\n}\n\n\/\/ Creates a client with the given UUID and initializes its internal data\n\/\/ structures. Does not setup connections or perform any network operations.\n\/\/ You will almost always want to use NewClient.\nfunc NewBareClient(uuid uuid.UUID) *Client {\n\treturn &Client{\n\t\tUUID: uuid,\n\t\tpendingResponses: make(map[string]chan *Response),\n\t}\n}\n\n\/\/ Create a client, connect it to the given AMQP server, and setup a queue\n\/\/ to receive responses on.\nfunc NewClient(url string) (*Client, error) {\n\tuuid := uuid.NewV1()\n\tclient := NewBareClient(uuid)\n\n\tconnection, err := amqp.Dial(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient.Connection = connection\n\n\tchannel, err := connection.Channel()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient.Channel = channel\n\n\terr = client.SetupResponseQueue()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn client, nil\n}\n\n\/\/ Declares the response queue and starts consuming (listening) deliveries\n\/\/ from it. This is normally the final step in setting up a usable client.\nfunc (c *Client) SetupResponseQueue() error {\n\tdurable := false\n\tautoDelete := false\n\texclusive := true\n\tnoWait := false\n\tqueue, err := c.Channel.QueueDeclare(ResponseQueueName(c.UUID.String()), durable, autoDelete, exclusive, noWait, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.ResponseQueue = &queue\n\n\tautoAck := false\n\tnoLocal := false\n\tdeliveries, err := c.Channel.Consume(queue.Name, \"\", autoAck, exclusive, noLocal, noWait, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo func() {\n\t\tfor delivery := range deliveries {\n\t\t\tc.handleReply(&delivery)\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (c *Client) handleReply(delivery *amqp.Delivery) {\n\tdelivery.Ack(false)\n\n\tpayload := &Response{}\n\terr := json.Unmarshal(delivery.Body, &payload)\n\tif err != nil {\n\t\tlog.Printf(\"Error unmarshalling response payload: %v\", err)\n\t\treturn\n\t}\n\n\tid := payload.Id\n\tresponseChan, present := c.pendingResponses[id]\n\tif present {\n\t\tresponseChan <- payload\n\t} else {\n\t\tlog.Printf(\"No response chan found: id=%s\", id)\n\t}\n}\n\n\/\/ Call a remote method over JSON-RPC and return its response. This will block\n\/\/ the goroutine on which it is called.\nfunc (c *Client) Call(method string, params []interface{}, queue string) (interface{}, error) {\n\tid := uuid.NewV4().String()\n\n\trequest := &Request{\n\t\tJsonRpc: \"2.0\",\n\t\tId: id,\n\t\tMethod: method,\n\t\tParams: params,\n\t}\n\n\treturn c.CallWithRequest(request, queue)\n}\n\nfunc (c *Client) CallWithRequest(request *Request, queue string) (interface{}, error) {\n\tresponse, err := c.PublishAndWait(request, queue)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c.DecodeResponse(response)\n}\n\n\/\/ Publishes a request onto the given queue, then waits for a response to that\n\/\/ message on the client's response queue.\nfunc (c *Client) PublishAndWait(payload *Request, queue string) (*Response, error) {\n\ttimeoutChan := make(chan bool, 1)\n\tresponseChan := make(chan *Response, 1)\n\tc.pendingResponses[payload.Id] = responseChan\n\tdefer delete(c.pendingResponses, payload.Id)\n\n\terr := c.Publish(payload, queue)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgo func() {\n\t\ttime.Sleep(5 * time.Second)\n\t\ttimeoutChan <- true\n\t}()\n\n\tselect {\n\tcase response := <-responseChan:\n\t\treturn response, nil\n\tcase <-timeoutChan:\n\t\treturn nil, fmt.Errorf(\"Timed out\")\n\t}\n}\n\n\/\/ payload: JSON-RPC request payload to be sent\n\/\/\n\/\/ queue: Queue on which to send the request\nfunc (c *Client) Publish(payload *Request, queue string) error {\n\troutingKey := RequestQueueName(queue)\n\tcorrelationId := payload.Id\n\treplyTo := c.ResponseQueue.Name\n\n\tbody, err := json.Marshal(payload)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmandatory := false\n\timmediate := false\n\tpublishing := amqp.Publishing{\n\t\tCorrelationId: correlationId,\n\t\tReplyTo: replyTo,\n\t\tBody: body,\n\t}\n\treturn c.Channel.Publish(DEFAULT_EXCHANGE, routingKey, mandatory, immediate, publishing)\n}\n\n\/\/ Figure out what was in the response payload: was it a result, an error,\n\/\/ or unknown?\nfunc (c *Client) DecodeResponse(response *Response) (interface{}, error) {\n\tif response.Result != nil {\n\t\treturn response.Result, nil\n\t}\n\tif response.Error != nil {\n\t\treturn nil, response.Error\n\t}\n\treturn nil, &ErrorResponse{\n\t\tCode: 0,\n\t\tMessage: \"Missing both `result' and `error' on Response object\",\n\t\tData: nil,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"strings\"\n \"os\"\n \"io\"\n \"errors\"\n \"net\/url\"\n \"path\/filepath\"\n \"github.com\/aws\/aws-sdk-go\/aws\"\n \"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n \"github.com\/aws\/aws-sdk-go\/service\/kms\"\n \"github.com\/aws\/aws-sdk-go\/aws\/session\"\n \"github.com\/aws\/aws-sdk-go\/service\/s3\/s3crypto\"\n \"github.com\/aws\/aws-sdk-go\/service\/s3\"\n \"github.com\/yookoala\/realpath\"\n \"fmt\"\n \"strconv\"\n)\n\ntype S3Location struct {\n Bucket string\n Key string\n}\n\nvar (\n InvalidS3LocationError = errors.New(\"Invalid S3 Location\")\n InvalidS3FolderError = errors.New(\"S3 Key must point to an S3 folder\")\n FolderNotWritableError = errors.New(\"Folder is not writable\")\n NotExistError = errors.New(\"Folder does not exist\")\n workers int = 0\n)\n\nfunc init() {\n val, ok := os.LookupEnv(\"NUM_WORKERS\")\n if ok {\n parsedVal, err := strconv.Atoi(val)\n\n if err == nil {\n workers = parsedVal\n }\n }\n\n if workers == 0 {\n workers = 8\n }\n}\n\nfunc NewS3Location(path string) (location *S3Location, err error) {\n if !strings.Contains(path, \"s3:\/\/\") {\n return nil, InvalidS3LocationError\n }\n\n u, err := url.Parse(path)\n\n if err != nil {\n return nil, err\n }\n\n location = &S3Location{\n Bucket: u.Host,\n Key: strings.Trim(u.Path, \"\/\"),\n }\n\n return\n}\n\ntype Client struct {\n Session *session.Session\n KmsId string\n s3 *s3.S3\n encryptionClient *s3crypto.EncryptionClient\n decryptionClient *s3crypto.DecryptionClient\n}\n\n\/\/ Make load strategy configurable\nfunc NewClient(sess *session.Session, kmsId string) (client *Client) {\n client = &Client{\n Session: sess,\n KmsId: kmsId,\n }\n\n client.Init()\n\n return\n}\n\nfunc (cli *Client) Init() {\n handler := s3crypto.NewKMSKeyGenerator(kms.New(cli.Session), cli.KmsId)\n crypto := s3crypto.AESGCMContentCipherBuilder(handler)\n cli.s3 = s3.New(cli.Session)\n cli.encryptionClient = s3crypto.NewEncryptionClient(cli.Session, crypto)\n cli.decryptionClient = s3crypto.NewDecryptionClient(cli.Session)\n}\n\nfunc (cli *Client) UploadFile(source string, dest *S3Location) (error) {\n file, openErr := os.Open(source)\n\n if openErr != nil {\n return openErr\n }\n defer file.Close()\n\n input := &s3.PutObjectInput{\n Body: aws.ReadSeekCloser(file),\n Bucket: &dest.Bucket,\n Key: &dest.Key,\n ACL: aws.String(\"bucket-owner-full-control\"),\n }\n\n _, err2 := cli.encryptionClient.PutObject(input)\n return err2\n}\n\nfunc (cli *Client) DownloadFile(source *S3Location, dest string) (error) {\n \/\/ we want to support both encrypted and non-encrypted, so we call a HEAD to check\n head, err := cli.s3.HeadObject(&s3.HeadObjectInput{\n Bucket: &source.Bucket,\n Key: &source.Key,\n })\n\n if err != nil {\n return err\n }\n\n var output *s3.GetObjectOutput\n input := &s3.GetObjectInput{\n Bucket: &source.Bucket,\n Key: &source.Key,\n }\n\n \/\/ from http:\/\/docs.aws.amazon.com\/AWSJavaSDK\/latest\/javadoc\/com\/amazonaws\/services\/s3\/package-summary.html\n \/\/ key wrapping algorithm, x-amz-wrap-alg is always set to \"kms\" for CSE\n if val, ok := head.Metadata[\"X-Amz-Wrap-Alg\"]; ok && *val == \"kms\" {\n output, err = cli.decryptionClient.GetObject(input)\n } else {\n output, err = cli.s3.GetObject(input)\n }\n\n if err != nil {\n return err\n }\n\n dir := filepath.Dir(dest)\n os.MkdirAll(dir, 0766)\n\n file, openErr := os.Create(dest)\n if openErr != nil {\n return openErr\n }\n defer file.Close()\n\n _, writeErr := io.Copy(file, output.Body)\n return writeErr\n}\n\nfunc (cli *Client) DownloadFolder(source *S3Location, dest string) (error) {\n \/\/ validate the destination\n if ok, _ := IsDirWritable(dest); !ok {\n return FolderNotWritableError\n }\n\n \/\/ validate the source\n if err := cli.validateFolderKey(source); err != nil {\n return err\n }\n\n startIdx := len(source.Key)\n p := NewWorkerPool(workers)\n tf := func(item *S3Location) (func () error) {\n return func() error {\n destFile := strings.Trim(item.Key[startIdx:], \"\/\")\n fmt.Printf(\"Downloading %v ...\\n\", destFile)\n return cli.DownloadFile(item, filepath.Join(dest, destFile))\n }\n }\n\n errCh := cli.listObjects(source, p, tf)\n\n finalErr := monitorPoolError(p, errCh)\n\n if finalErr == io.EOF {\n return nil\n }\n\n return finalErr\n}\n\nfunc (cli *Client) UploadFolder(source string, dest *S3Location) (error) {\n realPath, err := realpath.Realpath(source)\n\n if err != nil {\n return err\n }\n\n stat, err := os.Stat(realPath)\n if err != nil {\n return err\n }\n\n if !stat.IsDir() {\n return NotExistError\n }\n\n p := NewWorkerPool(workers)\n errCh := make(chan error)\n\n \/\/ task factory function\n tf := func(key string) (func() error) {\n return func() error {\n sourceFile := filepath.Join(source, key)\n destFile := &S3Location{dest.Bucket, fmt.Sprintf(\"%v\/%v\", dest.Key, key)}\n fmt.Printf(\"Uploading %v\\n\", key)\n return cli.UploadFile(sourceFile, destFile)\n }\n }\n\n go func() {\n startIdx := len(realPath)\n upload := func(path string, f os.FileInfo, err error) error {\n\n \/\/ if we have no error and this is a file, then we submit the task\n if err == nil && !f.IsDir() {\n key := path[startIdx+1:]\n err = p.SubmitFunc(tf(key))\n }\n\n return err\n }\n\n err := filepath.Walk(realPath, upload)\n\n if err == nil {\n p.Stop() \/\/ stop the pool to drain all current task\n errCh <- io.EOF\n } else {\n errCh <- err\n }\n }()\n\n finalErr := monitorPoolError(p, errCh)\n\n return finalErr\n}\n\n\/\/ Validate S3 location for download. It will make sure that the location is not pointed\n\/\/ to an S3 file\nfunc (cli *Client) validateFolderKey(source *S3Location) (error) {\n svc := s3.New(cli.Session)\n\n \/\/ just make sure this is not a file\n if len(source.Key) > 0 {\n _, err := svc.GetObject(&s3.GetObjectInput{\n Bucket: &source.Bucket,\n Key: &source.Key,\n })\n\n if err != nil {\n \/\/ it's okie that the key does not exist\n if awsErr, ok := err.(awserr.Error); ok {\n if awsErr.Code() != s3.ErrCodeNoSuchKey {\n return err\n }\n } else {\n return err\n }\n } else {\n return InvalidS3FolderError\n }\n }\n\n return nil\n}\n\n\/\/ List object from an S3Location and yielding the result\nfunc (cli *Client) listObjects(location *S3Location, p *WorkerPool,\n tf func(*S3Location) (func() error)) (errCh chan error) {\n errCh = make(chan error)\n\n svc := s3.New(cli.Session)\n go func() {\n var marker string\n\n for {\n output, err := svc.ListObjects(&s3.ListObjectsInput{\n Bucket: &location.Bucket,\n Prefix: &location.Key,\n Marker: &marker,\n })\n\n if err == nil {\n for _, object := range output.Contents {\n \/\/ skip directory and instruction file\n if !strings.HasSuffix(*object.Key, \"\/\") &&\n !strings.HasSuffix(*object.Key, s3crypto.DefaultInstructionKeySuffix) {\n\n err = p.SubmitFunc(tf(&S3Location{\n Bucket: location.Bucket,\n Key: *object.Key,\n }))\n\n if err != nil {\n break\n }\n }\n\n marker = *object.Key\n }\n\n \/\/ continue if there is no error\n if err == nil {\n if *output.IsTruncated {\n \/\/ if NextMarker is available, then use it\n if output.NextMarker != nil {\n marker = *output.NextMarker\n }\n } else {\n err = io.EOF\n }\n }\n }\n\n if err != nil {\n p.Stop()\n errCh <- err\n break\n }\n }\n }()\n\n return\n}\n\nfunc monitorPoolError(p *WorkerPool, errCh chan error) error {\n var finalErr error\n\n for {\n done := false\n\n select {\n case t := <- p.ResultCh:\n if t.Err != nil {\n fmt.Fprint(os.Stderr, \"%v\\n\", t.Err)\n p.Stop()\n }\n case finalErr = <- errCh:\n done = true\n }\n\n if done {\n break\n }\n }\n\n if finalErr == io.EOF {\n return nil\n }\n\n return finalErr\n}\n<commit_msg>Format typo<commit_after>package main\n\nimport (\n \"strings\"\n \"os\"\n \"io\"\n \"errors\"\n \"net\/url\"\n \"path\/filepath\"\n \"github.com\/aws\/aws-sdk-go\/aws\"\n \"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n \"github.com\/aws\/aws-sdk-go\/service\/kms\"\n \"github.com\/aws\/aws-sdk-go\/aws\/session\"\n \"github.com\/aws\/aws-sdk-go\/service\/s3\/s3crypto\"\n \"github.com\/aws\/aws-sdk-go\/service\/s3\"\n \"github.com\/yookoala\/realpath\"\n \"fmt\"\n \"strconv\"\n)\n\ntype S3Location struct {\n Bucket string\n Key string\n}\n\nvar (\n InvalidS3LocationError = errors.New(\"Invalid S3 Location\")\n InvalidS3FolderError = errors.New(\"S3 Key must point to an S3 folder\")\n FolderNotWritableError = errors.New(\"Folder is not writable\")\n NotExistError = errors.New(\"Folder does not exist\")\n workers int = 0\n)\n\nfunc init() {\n val, ok := os.LookupEnv(\"NUM_WORKERS\")\n if ok {\n parsedVal, err := strconv.Atoi(val)\n\n if err == nil {\n workers = parsedVal\n }\n }\n\n if workers == 0 {\n workers = 8\n }\n}\n\nfunc NewS3Location(path string) (location *S3Location, err error) {\n if !strings.Contains(path, \"s3:\/\/\") {\n return nil, InvalidS3LocationError\n }\n\n u, err := url.Parse(path)\n\n if err != nil {\n return nil, err\n }\n\n location = &S3Location{\n Bucket: u.Host,\n Key: strings.Trim(u.Path, \"\/\"),\n }\n\n return\n}\n\ntype Client struct {\n Session *session.Session\n KmsId string\n s3 *s3.S3\n encryptionClient *s3crypto.EncryptionClient\n decryptionClient *s3crypto.DecryptionClient\n}\n\n\/\/ Make load strategy configurable\nfunc NewClient(sess *session.Session, kmsId string) (client *Client) {\n client = &Client{\n Session: sess,\n KmsId: kmsId,\n }\n\n client.Init()\n\n return\n}\n\nfunc (cli *Client) Init() {\n handler := s3crypto.NewKMSKeyGenerator(kms.New(cli.Session), cli.KmsId)\n crypto := s3crypto.AESGCMContentCipherBuilder(handler)\n cli.s3 = s3.New(cli.Session)\n cli.encryptionClient = s3crypto.NewEncryptionClient(cli.Session, crypto)\n cli.decryptionClient = s3crypto.NewDecryptionClient(cli.Session)\n}\n\nfunc (cli *Client) UploadFile(source string, dest *S3Location) (error) {\n file, openErr := os.Open(source)\n\n if openErr != nil {\n return openErr\n }\n defer file.Close()\n\n input := &s3.PutObjectInput{\n Body: aws.ReadSeekCloser(file),\n Bucket: &dest.Bucket,\n Key: &dest.Key,\n ACL: aws.String(\"bucket-owner-full-control\"),\n }\n\n _, err2 := cli.encryptionClient.PutObject(input)\n return err2\n}\n\nfunc (cli *Client) DownloadFile(source *S3Location, dest string) (error) {\n \/\/ we want to support both encrypted and non-encrypted, so we call a HEAD to check\n head, err := cli.s3.HeadObject(&s3.HeadObjectInput{\n Bucket: &source.Bucket,\n Key: &source.Key,\n })\n\n if err != nil {\n return err\n }\n\n var output *s3.GetObjectOutput\n input := &s3.GetObjectInput{\n Bucket: &source.Bucket,\n Key: &source.Key,\n }\n\n \/\/ from http:\/\/docs.aws.amazon.com\/AWSJavaSDK\/latest\/javadoc\/com\/amazonaws\/services\/s3\/package-summary.html\n \/\/ key wrapping algorithm, x-amz-wrap-alg is always set to \"kms\" for CSE\n if val, ok := head.Metadata[\"X-Amz-Wrap-Alg\"]; ok && *val == \"kms\" {\n output, err = cli.decryptionClient.GetObject(input)\n } else {\n output, err = cli.s3.GetObject(input)\n }\n\n if err != nil {\n return err\n }\n\n dir := filepath.Dir(dest)\n os.MkdirAll(dir, 0766)\n\n file, openErr := os.Create(dest)\n if openErr != nil {\n return openErr\n }\n defer file.Close()\n\n _, writeErr := io.Copy(file, output.Body)\n return writeErr\n}\n\nfunc (cli *Client) DownloadFolder(source *S3Location, dest string) (error) {\n \/\/ validate the destination\n if ok, _ := IsDirWritable(dest); !ok {\n return FolderNotWritableError\n }\n\n \/\/ validate the source\n if err := cli.validateFolderKey(source); err != nil {\n return err\n }\n\n startIdx := len(source.Key)\n p := NewWorkerPool(workers)\n tf := func(item *S3Location) (func () error) {\n return func() error {\n destFile := strings.Trim(item.Key[startIdx:], \"\/\")\n fmt.Printf(\"Downloading %v ...\\n\", destFile)\n return cli.DownloadFile(item, filepath.Join(dest, destFile))\n }\n }\n\n errCh := cli.listObjects(source, p, tf)\n\n finalErr := monitorPoolError(p, errCh)\n\n if finalErr == io.EOF {\n return nil\n }\n\n return finalErr\n}\n\nfunc (cli *Client) UploadFolder(source string, dest *S3Location) (error) {\n realPath, err := realpath.Realpath(source)\n\n if err != nil {\n return err\n }\n\n stat, err := os.Stat(realPath)\n if err != nil {\n return err\n }\n\n if !stat.IsDir() {\n return NotExistError\n }\n\n p := NewWorkerPool(workers)\n errCh := make(chan error)\n\n \/\/ task factory function\n tf := func(key string) (func() error) {\n return func() error {\n sourceFile := filepath.Join(source, key)\n destFile := &S3Location{dest.Bucket, fmt.Sprintf(\"%v\/%v\", dest.Key, key)}\n fmt.Printf(\"Uploading %v\\n\", key)\n return cli.UploadFile(sourceFile, destFile)\n }\n }\n\n go func() {\n startIdx := len(realPath)\n upload := func(path string, f os.FileInfo, err error) error {\n\n \/\/ if we have no error and this is a file, then we submit the task\n if err == nil && !f.IsDir() {\n key := path[startIdx+1:]\n err = p.SubmitFunc(tf(key))\n }\n\n return err\n }\n\n err := filepath.Walk(realPath, upload)\n\n if err == nil {\n p.Stop() \/\/ stop the pool to drain all current task\n errCh <- io.EOF\n } else {\n errCh <- err\n }\n }()\n\n finalErr := monitorPoolError(p, errCh)\n\n return finalErr\n}\n\n\/\/ Validate S3 location for download. It will make sure that the location is not pointed\n\/\/ to an S3 file\nfunc (cli *Client) validateFolderKey(source *S3Location) (error) {\n svc := s3.New(cli.Session)\n\n \/\/ just make sure this is not a file\n if len(source.Key) > 0 {\n _, err := svc.GetObject(&s3.GetObjectInput{\n Bucket: &source.Bucket,\n Key: &source.Key,\n })\n\n if err != nil {\n \/\/ it's okie that the key does not exist\n if awsErr, ok := err.(awserr.Error); ok {\n if awsErr.Code() != s3.ErrCodeNoSuchKey {\n return err\n }\n } else {\n return err\n }\n } else {\n return InvalidS3FolderError\n }\n }\n\n return nil\n}\n\n\/\/ List object from an S3Location and yielding the result\nfunc (cli *Client) listObjects(location *S3Location, p *WorkerPool,\n tf func(*S3Location) (func() error)) (errCh chan error) {\n errCh = make(chan error)\n\n svc := s3.New(cli.Session)\n go func() {\n var marker string\n\n for {\n output, err := svc.ListObjects(&s3.ListObjectsInput{\n Bucket: &location.Bucket,\n Prefix: &location.Key,\n Marker: &marker,\n })\n\n if err == nil {\n for _, object := range output.Contents {\n \/\/ skip directory and instruction file\n if !strings.HasSuffix(*object.Key, \"\/\") &&\n !strings.HasSuffix(*object.Key, s3crypto.DefaultInstructionKeySuffix) {\n\n err = p.SubmitFunc(tf(&S3Location{\n Bucket: location.Bucket,\n Key: *object.Key,\n }))\n\n if err != nil {\n break\n }\n }\n\n marker = *object.Key\n }\n\n \/\/ continue if there is no error\n if err == nil {\n if *output.IsTruncated {\n \/\/ if NextMarker is available, then use it\n if output.NextMarker != nil {\n marker = *output.NextMarker\n }\n } else {\n err = io.EOF\n }\n }\n }\n\n if err != nil {\n p.Stop()\n errCh <- err\n break\n }\n }\n }()\n\n return\n}\n\nfunc monitorPoolError(p *WorkerPool, errCh chan error) error {\n var finalErr error\n\n for {\n done := false\n\n select {\n case t := <- p.ResultCh:\n if t.Err != nil {\n fmt.Fprint(os.Stderr, fmt.Sprintf(\"%v\\n\", t.Err))\n p.Stop()\n }\n case finalErr = <- errCh:\n done = true\n }\n\n if done {\n break\n }\n }\n\n if finalErr == io.EOF {\n return nil\n }\n\n return finalErr\n}\n<|endoftext|>"} {"text":"<commit_before>package ldapClient\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"fmt\"\n\t\"github.com\/go-ldap\/ldap\"\n)\n\n\/\/ LDAPClient - the ldap client interface\ntype LDAPClient interface {\n\tBind() error\n\tAuthenticate(string, string) (bool, map[string]string, error)\n\tClose()\n}\n\n\/\/ Client - the ldap client\ntype Client struct {\n\tConn ldap.Client\n\tConfig *Config\n\tdisconnects int\n}\n\n\/\/ Config - ldap client config\ntype Config struct {\n\tAttributes []string\n\tBase string\n\tBindDN string\n\tBindPassword string\n\tGroupFilter string \/\/ e.g. \"(memberUid=%s)\"\n\tHost string\n\tUserFilter string \/\/ e.g. \"(uid=%s)\"\n\tPort int\n\tInsecureSkipVerify bool\n\tUseSSL bool\n\tClientCertificates []tls.Certificate \/\/ Adding client certificates\n\tCACertificates []byte\n}\n\n\/\/ New - Creates a new ldap client\nfunc New(config *Config) (*Client, error) {\n\tclient := &Client{Config: config}\n\tif err := client.connect(); err != nil {\n\t\treturn &Client{}, err\n\t}\n\tif err := client.Bind(); err != nil {\n\t\treturn &Client{}, err\n\t}\n\treturn client, nil\n}\n\nfunc (c *Client) connect() error {\n\tvar (\n\t\tldapConn *ldap.Conn\n\t\terr error\n\t)\n\tc.Close()\n\taddress := fmt.Sprintf(\"%s:%d\", c.Config.Host, c.Config.Port)\n\tif c.Config.UseSSL {\n\t\ttlsConfig := &tls.Config{\n\t\t\tInsecureSkipVerify: c.Config.InsecureSkipVerify,\n\t\t\tServerName: c.Config.Host,\n\t\t}\n\t\tif len(c.Config.CACertificates) > 0 {\n\t\t\ttlsConfig.RootCAs = x509.NewCertPool()\n\t\t\tif !tlsConfig.RootCAs.AppendCertsFromPEM(c.Config.CACertificates) {\n\t\t\t\treturn fmt.Errorf(\"Could not append CA certs from PEM\")\n\t\t\t}\n\t\t}\n\t\tif c.Config.ClientCertificates != nil && len(c.Config.ClientCertificates) > 0 {\n\t\t\ttlsConfig.Certificates = c.Config.ClientCertificates\n\t\t}\n\t\tldapConn, err = ldap.DialTLS(\"tcp\", address, tlsConfig)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tldapConn, err = ldap.Dial(\"tcp\", address)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tc.Conn = ldapConn\n\treturn nil\n}\n\n\/\/ Bind - bind to LDAP as the Config user\nfunc (c *Client) Bind() error {\n\tif c.Config.BindDN != \"\" && c.Config.BindPassword != \"\" {\n\t\tif err := c.Conn.Bind(c.Config.BindDN, c.Config.BindPassword); err != nil {\n\t\t\tif err.Error() == \"ldap: connection closed\" {\n\t\t\t\tc.disconnects++\n\t\t\t\tif c.disconnects < 2 {\n\t\t\t\t\tif err := c.connect(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\treturn c.Bind()\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tc.disconnects = 0\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"BindDN or BindPassword was not set on Client config\")\n}\n\n\/\/ Close - close the backend ldap connection\nfunc (c *Client) Close() {\n\tif c.Conn != nil {\n\t\tc.Conn.Close()\n\t\tc.Conn = nil\n\t}\n}\n\n\/\/ Authenticate - authenticates a user against ldap\nfunc (c *Client) Authenticate(username, password string) (bool, map[string]string, error) {\n\tdefer c.Bind()\n\tif err := c.Bind(); err != nil {\n\t\treturn false, nil, err\n\t}\n\tattributes := append(c.Config.Attributes, \"dn\")\n\t\/\/ Search for the given username\n\tsearchRequest := ldap.NewSearchRequest(\n\t\tc.Config.Base,\n\t\tldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,\n\t\tfmt.Sprintf(c.Config.UserFilter, username),\n\t\tattributes,\n\t\tnil,\n\t)\n\n\tsr, err := c.Conn.Search(searchRequest)\n\tif err != nil {\n\t\treturn false, nil, err\n\t}\n\n\tif len(sr.Entries) < 1 {\n\t\treturn false, nil, fmt.Errorf(\"User does not exist\")\n\t}\n\n\tif len(sr.Entries) > 1 {\n\t\treturn false, nil, fmt.Errorf(\"Too many entries returned\")\n\t}\n\n\tuserDN := sr.Entries[0].DN\n\tuser := map[string]string{}\n\tfor _, attr := range c.Config.Attributes {\n\t\tuser[attr] = sr.Entries[0].GetAttributeValue(attr)\n\t}\n\n\t\/\/ Bind as the user to verify their password\n\tif err := c.Conn.Bind(userDN, password); err != nil {\n\t\treturn false, user, err\n\t}\n\treturn true, user, nil\n}\n<commit_msg>correct ldap disconnected error matching<commit_after>package ldapClient\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"fmt\"\n\t\"github.com\/go-ldap\/ldap\"\n)\n\n\/\/ LDAPClient - the ldap client interface\ntype LDAPClient interface {\n\tBind() error\n\tAuthenticate(string, string) (bool, map[string]string, error)\n\tClose()\n}\n\n\/\/ Client - the ldap client\ntype Client struct {\n\tConn ldap.Client\n\tConfig *Config\n\tdisconnects int\n}\n\n\/\/ Config - ldap client config\ntype Config struct {\n\tAttributes []string\n\tBase string\n\tBindDN string\n\tBindPassword string\n\tGroupFilter string \/\/ e.g. \"(memberUid=%s)\"\n\tHost string\n\tUserFilter string \/\/ e.g. \"(uid=%s)\"\n\tPort int\n\tInsecureSkipVerify bool\n\tUseSSL bool\n\tClientCertificates []tls.Certificate \/\/ Adding client certificates\n\tCACertificates []byte\n}\n\n\/\/ New - Creates a new ldap client\nfunc New(config *Config) (*Client, error) {\n\tclient := &Client{Config: config}\n\tif err := client.connect(); err != nil {\n\t\treturn &Client{}, err\n\t}\n\tif err := client.Bind(); err != nil {\n\t\treturn &Client{}, err\n\t}\n\treturn client, nil\n}\n\nfunc (c *Client) connect() error {\n\tvar (\n\t\tldapConn *ldap.Conn\n\t\terr error\n\t)\n\tc.Close()\n\taddress := fmt.Sprintf(\"%s:%d\", c.Config.Host, c.Config.Port)\n\tif c.Config.UseSSL {\n\t\ttlsConfig := &tls.Config{\n\t\t\tInsecureSkipVerify: c.Config.InsecureSkipVerify,\n\t\t\tServerName: c.Config.Host,\n\t\t}\n\t\tif len(c.Config.CACertificates) > 0 {\n\t\t\ttlsConfig.RootCAs = x509.NewCertPool()\n\t\t\tif !tlsConfig.RootCAs.AppendCertsFromPEM(c.Config.CACertificates) {\n\t\t\t\treturn fmt.Errorf(\"Could not append CA certs from PEM\")\n\t\t\t}\n\t\t}\n\t\tif c.Config.ClientCertificates != nil && len(c.Config.ClientCertificates) > 0 {\n\t\t\ttlsConfig.Certificates = c.Config.ClientCertificates\n\t\t}\n\t\tldapConn, err = ldap.DialTLS(\"tcp\", address, tlsConfig)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tldapConn, err = ldap.Dial(\"tcp\", address)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tc.Conn = ldapConn\n\treturn nil\n}\n\n\/\/ Bind - bind to LDAP as the Config user\nfunc (c *Client) Bind() error {\n\tif c.Config.BindDN != \"\" && c.Config.BindPassword != \"\" {\n\t\tif err := c.Conn.Bind(c.Config.BindDN, c.Config.BindPassword); err != nil {\n\t\t\tif err.Error() == `LDAP Result Code 200 \"Network Error\": ldap: connection closed` {\n\t\t\t\tc.disconnects++\n\t\t\t\tif c.disconnects < 2 {\n\t\t\t\t\tif err := c.connect(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\treturn c.Bind()\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tc.disconnects = 0\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"BindDN or BindPassword was not set on Client config\")\n}\n\n\/\/ Close - close the backend ldap connection\nfunc (c *Client) Close() {\n\tif c.Conn != nil {\n\t\tc.Conn.Close()\n\t\tc.Conn = nil\n\t}\n}\n\n\/\/ Authenticate - authenticates a user against ldap\nfunc (c *Client) Authenticate(username, password string) (bool, map[string]string, error) {\n\tdefer c.Bind()\n\tif err := c.Bind(); err != nil {\n\t\treturn false, nil, err\n\t}\n\tattributes := append(c.Config.Attributes, \"dn\")\n\t\/\/ Search for the given username\n\tsearchRequest := ldap.NewSearchRequest(\n\t\tc.Config.Base,\n\t\tldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,\n\t\tfmt.Sprintf(c.Config.UserFilter, username),\n\t\tattributes,\n\t\tnil,\n\t)\n\n\tsr, err := c.Conn.Search(searchRequest)\n\tif err != nil {\n\t\treturn false, nil, err\n\t}\n\n\tif len(sr.Entries) < 1 {\n\t\treturn false, nil, fmt.Errorf(\"User does not exist\")\n\t}\n\n\tif len(sr.Entries) > 1 {\n\t\treturn false, nil, fmt.Errorf(\"Too many entries returned\")\n\t}\n\n\tuserDN := sr.Entries[0].DN\n\tuser := map[string]string{}\n\tfor _, attr := range c.Config.Attributes {\n\t\tuser[attr] = sr.Entries[0].GetAttributeValue(attr)\n\t}\n\n\t\/\/ Bind as the user to verify their password\n\tif err := c.Conn.Bind(userDN, password); err != nil {\n\t\treturn false, user, err\n\t}\n\treturn true, user, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage maps\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/net\/context\/ctxhttp\"\n\t\"googlemaps.github.io\/maps\/internal\"\n\n\t\/\/ Importing image\/jpeg for it's decoder\n\t_ \"image\/jpeg\"\n)\n\n\/\/ Client may be used to make requests to the Google Maps WebService APIs\ntype Client struct {\n\thttpClient *http.Client\n\tapiKey string\n\tbaseURL string\n\tclientID string\n\tsignature []byte\n\trequestsPerSecond int\n\trateLimiter chan int\n}\n\n\/\/ ClientOption is the type of constructor options for NewClient(...).\ntype ClientOption func(*Client) error\n\nvar defaultRequestsPerSecond = 10\n\n\/\/ NewClient constructs a new Client which can make requests to the Google Maps WebService APIs.\nfunc NewClient(options ...ClientOption) (*Client, error) {\n\tc := &Client{requestsPerSecond: defaultRequestsPerSecond}\n\tWithHTTPClient(&http.Client{})(c)\n\tfor _, option := range options {\n\t\terr := option(c)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif c.apiKey == \"\" && (c.clientID == \"\" || len(c.signature) == 0) {\n\t\treturn nil, errors.New(\"maps: API Key or Maps for Work credentials missing\")\n\t}\n\n\t\/\/ Implement a bursty rate limiter.\n\t\/\/ Allow up to 1 second worth of requests to be made at once.\n\tc.rateLimiter = make(chan int, c.requestsPerSecond)\n\t\/\/ Prefill rateLimiter with 1 seconds worth of requests.\n\tfor i := 0; i < c.requestsPerSecond; i++ {\n\t\tc.rateLimiter <- 1\n\t}\n\tgo func() {\n\t\t\/\/ Refill rateLimiter continuously\n\t\tfor range time.Tick(time.Second \/ time.Duration(c.requestsPerSecond)) {\n\t\t\tc.rateLimiter <- 1\n\t\t}\n\t}()\n\n\treturn c, nil\n}\n\n\/\/ WithHTTPClient configures a Maps API client with a http.Client to make requests over.\nfunc WithHTTPClient(c *http.Client) ClientOption {\n\treturn func(client *Client) error {\n\t\tif _, ok := c.Transport.(*transport); !ok {\n\t\t\tt := c.Transport\n\t\t\tif t != nil {\n\t\t\t\tc.Transport = &transport{Base: t}\n\t\t\t} else {\n\t\t\t\tc.Transport = &transport{Base: http.DefaultTransport}\n\t\t\t}\n\t\t}\n\t\tclient.httpClient = c\n\t\treturn nil\n\t}\n}\n\n\/\/ WithAPIKey configures a Maps API client with an API Key\nfunc WithAPIKey(apiKey string) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.apiKey = apiKey\n\t\treturn nil\n\t}\n}\n\n\/\/ WithClientIDAndSignature configures a Maps API client for a Maps for Work application\n\/\/ The signature is assumed to be URL modified Base64 encoded\nfunc WithClientIDAndSignature(clientID, signature string) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.clientID = clientID\n\t\tdecoded, err := base64.URLEncoding.DecodeString(signature)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.signature = decoded\n\t\treturn nil\n\t}\n}\n\n\/\/ WithRateLimit configures the rate limit for back end requests.\n\/\/ Default is to limit to 10 requests per second.\nfunc WithRateLimit(requestsPerSecond int) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.requestsPerSecond = requestsPerSecond\n\t\treturn nil\n\t}\n}\n\ntype apiConfig struct {\n\thost string\n\tpath string\n\tacceptsClientID bool\n}\n\ntype apiRequest interface {\n\tparams() url.Values\n}\n\nfunc (c *Client) get(ctx context.Context, config *apiConfig, apiReq apiRequest) (*http.Response, error) {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\tcase <-c.rateLimiter:\n\t\t\/\/ Execute request.\n\t}\n\n\thost := config.host\n\tif c.baseURL != \"\" {\n\t\thost = c.baseURL\n\t}\n\treq, err := http.NewRequest(\"GET\", host+config.path, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tq, err := c.generateAuthQuery(config.path, apiReq.params(), config.acceptsClientID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.URL.RawQuery = q\n\treturn ctxhttp.Do(ctx, c.httpClient, req)\n}\n\nfunc (c *Client) getJSON(ctx context.Context, config *apiConfig, apiReq apiRequest, resp interface{}) error {\n\thttpResp, err := c.get(ctx, config, apiReq)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer httpResp.Body.Close()\n\n\treturn json.NewDecoder(httpResp.Body).Decode(resp)\n}\n\ntype binaryResponse struct {\n\tstatusCode int\n\tcontentType string\n\tdata io.ReadCloser\n}\n\nfunc (c *Client) getBinary(ctx context.Context, config *apiConfig, apiReq apiRequest) (binaryResponse, error) {\n\thttpResp, err := c.get(ctx, config, apiReq)\n\tif err != nil {\n\t\treturn binaryResponse{}, err\n\t}\n\n\treturn binaryResponse{httpResp.StatusCode, httpResp.Header.Get(\"Content-Type\"), httpResp.Body}, nil\n}\n\nfunc (c *Client) generateAuthQuery(path string, q url.Values, acceptClientID bool) (string, error) {\n\tif c.apiKey != \"\" {\n\t\tq.Set(\"key\", c.apiKey)\n\t\treturn q.Encode(), nil\n\t}\n\tif acceptClientID {\n\t\treturn internal.SignURL(path, c.clientID, c.signature, q)\n\t}\n\treturn \"\", errors.New(\"maps: API Key missing\")\n}\n\n\/\/ commonResponse contains the common response fields to most API calls inside\n\/\/ the Google Maps APIs. This is used internally.\ntype commonResponse struct {\n\t\/\/ Status contains the status of the request, and may contain debugging\n\t\/\/ information to help you track down why the call failed.\n\tStatus string `json:\"status\"`\n\n\t\/\/ ErrorMessage is the explanatory field added when Status is an error.\n\tErrorMessage string `json:\"error_message\"`\n}\n\n\/\/ StatusError returns an error iff this object has a non-OK Status.\nfunc (c *commonResponse) StatusError() error {\n\tif c.Status != \"OK\" {\n\t\treturn fmt.Errorf(\"maps: %s - %s\", c.Status, c.ErrorMessage)\n\t}\n\treturn nil\n}\n<commit_msg>Deleting extraneous import.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage maps\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/net\/context\/ctxhttp\"\n\t\"googlemaps.github.io\/maps\/internal\"\n)\n\n\/\/ Client may be used to make requests to the Google Maps WebService APIs\ntype Client struct {\n\thttpClient *http.Client\n\tapiKey string\n\tbaseURL string\n\tclientID string\n\tsignature []byte\n\trequestsPerSecond int\n\trateLimiter chan int\n}\n\n\/\/ ClientOption is the type of constructor options for NewClient(...).\ntype ClientOption func(*Client) error\n\nvar defaultRequestsPerSecond = 10\n\n\/\/ NewClient constructs a new Client which can make requests to the Google Maps WebService APIs.\nfunc NewClient(options ...ClientOption) (*Client, error) {\n\tc := &Client{requestsPerSecond: defaultRequestsPerSecond}\n\tWithHTTPClient(&http.Client{})(c)\n\tfor _, option := range options {\n\t\terr := option(c)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif c.apiKey == \"\" && (c.clientID == \"\" || len(c.signature) == 0) {\n\t\treturn nil, errors.New(\"maps: API Key or Maps for Work credentials missing\")\n\t}\n\n\t\/\/ Implement a bursty rate limiter.\n\t\/\/ Allow up to 1 second worth of requests to be made at once.\n\tc.rateLimiter = make(chan int, c.requestsPerSecond)\n\t\/\/ Prefill rateLimiter with 1 seconds worth of requests.\n\tfor i := 0; i < c.requestsPerSecond; i++ {\n\t\tc.rateLimiter <- 1\n\t}\n\tgo func() {\n\t\t\/\/ Refill rateLimiter continuously\n\t\tfor range time.Tick(time.Second \/ time.Duration(c.requestsPerSecond)) {\n\t\t\tc.rateLimiter <- 1\n\t\t}\n\t}()\n\n\treturn c, nil\n}\n\n\/\/ WithHTTPClient configures a Maps API client with a http.Client to make requests over.\nfunc WithHTTPClient(c *http.Client) ClientOption {\n\treturn func(client *Client) error {\n\t\tif _, ok := c.Transport.(*transport); !ok {\n\t\t\tt := c.Transport\n\t\t\tif t != nil {\n\t\t\t\tc.Transport = &transport{Base: t}\n\t\t\t} else {\n\t\t\t\tc.Transport = &transport{Base: http.DefaultTransport}\n\t\t\t}\n\t\t}\n\t\tclient.httpClient = c\n\t\treturn nil\n\t}\n}\n\n\/\/ WithAPIKey configures a Maps API client with an API Key\nfunc WithAPIKey(apiKey string) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.apiKey = apiKey\n\t\treturn nil\n\t}\n}\n\n\/\/ WithClientIDAndSignature configures a Maps API client for a Maps for Work application\n\/\/ The signature is assumed to be URL modified Base64 encoded\nfunc WithClientIDAndSignature(clientID, signature string) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.clientID = clientID\n\t\tdecoded, err := base64.URLEncoding.DecodeString(signature)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.signature = decoded\n\t\treturn nil\n\t}\n}\n\n\/\/ WithRateLimit configures the rate limit for back end requests.\n\/\/ Default is to limit to 10 requests per second.\nfunc WithRateLimit(requestsPerSecond int) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.requestsPerSecond = requestsPerSecond\n\t\treturn nil\n\t}\n}\n\ntype apiConfig struct {\n\thost string\n\tpath string\n\tacceptsClientID bool\n}\n\ntype apiRequest interface {\n\tparams() url.Values\n}\n\nfunc (c *Client) get(ctx context.Context, config *apiConfig, apiReq apiRequest) (*http.Response, error) {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\tcase <-c.rateLimiter:\n\t\t\/\/ Execute request.\n\t}\n\n\thost := config.host\n\tif c.baseURL != \"\" {\n\t\thost = c.baseURL\n\t}\n\treq, err := http.NewRequest(\"GET\", host+config.path, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tq, err := c.generateAuthQuery(config.path, apiReq.params(), config.acceptsClientID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.URL.RawQuery = q\n\treturn ctxhttp.Do(ctx, c.httpClient, req)\n}\n\nfunc (c *Client) getJSON(ctx context.Context, config *apiConfig, apiReq apiRequest, resp interface{}) error {\n\thttpResp, err := c.get(ctx, config, apiReq)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer httpResp.Body.Close()\n\n\treturn json.NewDecoder(httpResp.Body).Decode(resp)\n}\n\ntype binaryResponse struct {\n\tstatusCode int\n\tcontentType string\n\tdata io.ReadCloser\n}\n\nfunc (c *Client) getBinary(ctx context.Context, config *apiConfig, apiReq apiRequest) (binaryResponse, error) {\n\thttpResp, err := c.get(ctx, config, apiReq)\n\tif err != nil {\n\t\treturn binaryResponse{}, err\n\t}\n\n\treturn binaryResponse{httpResp.StatusCode, httpResp.Header.Get(\"Content-Type\"), httpResp.Body}, nil\n}\n\nfunc (c *Client) generateAuthQuery(path string, q url.Values, acceptClientID bool) (string, error) {\n\tif c.apiKey != \"\" {\n\t\tq.Set(\"key\", c.apiKey)\n\t\treturn q.Encode(), nil\n\t}\n\tif acceptClientID {\n\t\treturn internal.SignURL(path, c.clientID, c.signature, q)\n\t}\n\treturn \"\", errors.New(\"maps: API Key missing\")\n}\n\n\/\/ commonResponse contains the common response fields to most API calls inside\n\/\/ the Google Maps APIs. This is used internally.\ntype commonResponse struct {\n\t\/\/ Status contains the status of the request, and may contain debugging\n\t\/\/ information to help you track down why the call failed.\n\tStatus string `json:\"status\"`\n\n\t\/\/ ErrorMessage is the explanatory field added when Status is an error.\n\tErrorMessage string `json:\"error_message\"`\n}\n\n\/\/ StatusError returns an error iff this object has a non-OK Status.\nfunc (c *commonResponse) StatusError() error {\n\tif c.Status != \"OK\" {\n\t\treturn fmt.Errorf(\"maps: %s - %s\", c.Status, c.ErrorMessage)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package torrent provides a BitTorrent client implementation.\npackage torrent\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/cenkalti\/rain\/internal\/bitfield\"\n\t\"github.com\/cenkalti\/rain\/internal\/blocklist\"\n\t\"github.com\/cenkalti\/rain\/internal\/logger\"\n\t\"github.com\/cenkalti\/rain\/internal\/piececache\"\n\t\"github.com\/cenkalti\/rain\/internal\/resolver\"\n\t\"github.com\/cenkalti\/rain\/internal\/resourcemanager\"\n\t\"github.com\/cenkalti\/rain\/internal\/resumer\/boltdbresumer\"\n\t\"github.com\/cenkalti\/rain\/internal\/semaphore\"\n\t\"github.com\/cenkalti\/rain\/internal\/tracker\"\n\t\"github.com\/cenkalti\/rain\/internal\/trackermanager\"\n\t\"github.com\/juju\/ratelimit\"\n\t\"github.com\/mitchellh\/go-homedir\"\n\t\"github.com\/nictuku\/dht\"\n)\n\nvar (\n\tsessionBucket = []byte(\"session\")\n\ttorrentsBucket = []byte(\"torrents\")\n\tblocklistKey = []byte(\"blocklist\")\n\tblocklistTimestampKey = []byte(\"blocklist-timestamp\")\n)\n\n\/\/ Session contains torrents, DHT node, caches and other data structures shared by multiple torrents.\ntype Session struct {\n\tconfig Config\n\tdb *bolt.DB\n\tresumer *boltdbresumer.Resumer\n\tlog logger.Logger\n\textensions [8]byte\n\tdht *dht.DHT\n\trpc *rpcServer\n\ttrackerManager *trackermanager.TrackerManager\n\tram *resourcemanager.ResourceManager\n\tpieceCache *piececache.Cache\n\twebseedClient http.Client\n\tcreatedAt time.Time\n\tsemWrite *semaphore.Semaphore\n\tmetrics *sessionMetrics\n\tbucketDownload *ratelimit.Bucket\n\tbucketUpload *ratelimit.Bucket\n\tcloseC chan struct{}\n\n\tmPeerRequests sync.Mutex\n\tdhtPeerRequests map[*torrent]struct{}\n\n\tmTorrents sync.RWMutex\n\ttorrents map[string]*Torrent\n\ttorrentsByInfoHash map[dht.InfoHash][]*Torrent\n\tinvalidTorrentIDs []string\n\n\tmPorts sync.RWMutex\n\tavailablePorts map[int]struct{}\n\n\tmBlocklist sync.RWMutex\n\tblocklist *blocklist.Blocklist\n\tblocklistTimestamp time.Time\n}\n\n\/\/ NewSession creates a new Session for downloading and seeding torrents.\n\/\/ Returned session must be closed after use.\nfunc NewSession(cfg Config) (*Session, error) {\n\tif cfg.PortBegin >= cfg.PortEnd {\n\t\treturn nil, errors.New(\"invalid port range\")\n\t}\n\tif cfg.MaxOpenFiles > 0 {\n\t\terr := setNoFile(cfg.MaxOpenFiles)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"cannot change max open files limit: \" + err.Error())\n\t\t}\n\t}\n\tvar err error\n\tcfg.Database, err = homedir.Expand(cfg.Database)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcfg.DataDir, err = homedir.Expand(cfg.DataDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = os.MkdirAll(filepath.Dir(cfg.Database), 0750)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tl := logger.New(\"session\")\n\tdb, err := bolt.Open(cfg.Database, 0640, &bolt.Options{Timeout: time.Second})\n\tif err == bolt.ErrTimeout {\n\t\treturn nil, errors.New(\"resume database is locked by another process\")\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tdb.Close()\n\t\t}\n\t}()\n\tvar ids []string\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\t_, err2 := tx.CreateBucketIfNotExists(sessionBucket)\n\t\tif err2 != nil {\n\t\t\treturn err2\n\t\t}\n\t\tb, err2 := tx.CreateBucketIfNotExists(torrentsBucket)\n\t\tif err2 != nil {\n\t\t\treturn err2\n\t\t}\n\t\treturn b.ForEach(func(k, _ []byte) error {\n\t\t\tids = append(ids, string(k))\n\t\t\treturn nil\n\t\t})\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := boltdbresumer.New(db, torrentsBucket)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar dhtNode *dht.DHT\n\tif cfg.DHTEnabled {\n\t\tdhtConfig := dht.NewConfig()\n\t\tdhtConfig.Address = cfg.DHTHost\n\t\tdhtConfig.Port = int(cfg.DHTPort)\n\t\tdhtConfig.DHTRouters = strings.Join(cfg.DHTBootstrapNodes, \",\")\n\t\tdhtConfig.SaveRoutingTable = false\n\t\tdhtNode, err = dht.New(dhtConfig)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr = dhtNode.Start()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tports := make(map[int]struct{})\n\tfor p := cfg.PortBegin; p < cfg.PortEnd; p++ {\n\t\tports[int(p)] = struct{}{}\n\t}\n\tbl := blocklist.New()\n\tvar blTracker *blocklist.Blocklist\n\tif cfg.BlocklistEnabledForTrackers {\n\t\tblTracker = bl\n\t}\n\tc := &Session{\n\t\tconfig: cfg,\n\t\tdb: db,\n\t\tresumer: res,\n\t\tblocklist: bl,\n\t\ttrackerManager: trackermanager.New(blTracker, cfg.DNSResolveTimeout, !cfg.TrackerHTTPVerifyTLS),\n\t\tlog: l,\n\t\ttorrents: make(map[string]*Torrent),\n\t\ttorrentsByInfoHash: make(map[dht.InfoHash][]*Torrent),\n\t\tavailablePorts: ports,\n\t\tdht: dhtNode,\n\t\tpieceCache: piececache.New(cfg.ReadCacheSize, cfg.ReadCacheTTL, cfg.ParallelReads),\n\t\tram: resourcemanager.New(cfg.WriteCacheSize),\n\t\tcreatedAt: time.Now(),\n\t\tsemWrite: semaphore.New(int(cfg.ParallelWrites)),\n\t\tcloseC: make(chan struct{}),\n\t\twebseedClient: http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tDialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {\n\t\t\t\t\tip, port, err := resolver.Resolve(ctx, addr, cfg.DNSResolveTimeout, bl)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\tvar d net.Dialer\n\t\t\t\t\ttaddr := &net.TCPAddr{IP: ip, Port: port}\n\t\t\t\t\tdctx, cancel := context.WithTimeout(ctx, cfg.WebseedDialTimeout)\n\t\t\t\t\tdefer cancel()\n\t\t\t\t\treturn d.DialContext(dctx, network, taddr.String())\n\t\t\t\t},\n\t\t\t\tTLSHandshakeTimeout: cfg.WebseedTLSHandshakeTimeout,\n\t\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: !cfg.WebseedVerifyTLS}, \/\/ nolint: gosec\n\t\t\t\tResponseHeaderTimeout: cfg.WebseedResponseHeaderTimeout,\n\t\t\t},\n\t\t},\n\t}\n\tif cfg.SpeedLimitDownload > 0 {\n\t\tc.bucketDownload = ratelimit.NewBucketWithRate(float64(cfg.SpeedLimitDownload), cfg.SpeedLimitDownload)\n\t}\n\tif cfg.SpeedLimitUpload > 0 {\n\t\tc.bucketUpload = ratelimit.NewBucketWithRate(float64(cfg.SpeedLimitUpload), cfg.SpeedLimitUpload)\n\t}\n\terr = c.startBlocklistReloader()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\text, err := bitfield.NewBytes(c.extensions[:], 64)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\text.Set(61) \/\/ Fast Extension (BEP 6)\n\text.Set(43) \/\/ Extension Protocol (BEP 10)\n\tif cfg.DHTEnabled {\n\t\text.Set(63) \/\/ DHT Protocol (BEP 5)\n\t\tc.dhtPeerRequests = make(map[*torrent]struct{})\n\t}\n\terr = c.initMetrics()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.loadExistingTorrents(ids)\n\tif c.config.RPCEnabled {\n\t\tc.rpc = newRPCServer(c)\n\t\terr = c.rpc.Start(c.config.RPCHost, c.config.RPCPort)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif cfg.DHTEnabled {\n\t\tgo c.processDHTResults()\n\t}\n\tgo c.updateStatsLoop()\n\treturn c, nil\n}\n\nfunc (s *Session) parseTrackers(tiers [][]string, private bool) []tracker.Tracker {\n\tret := make([]tracker.Tracker, 0, len(tiers))\n\tfor _, tier := range tiers {\n\t\ttrackers := make([]tracker.Tracker, 0, len(tier))\n\t\tfor _, tr := range tier {\n\t\t\tt, err := s.trackerManager.Get(tr, s.config.TrackerHTTPTimeout, s.getTrackerUserAgent(private), int64(s.config.TrackerHTTPMaxResponseSize))\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttrackers = append(trackers, t)\n\t\t}\n\t\tif len(trackers) > 0 {\n\t\t\ttra := tracker.NewTier(trackers)\n\t\t\tret = append(ret, tra)\n\t\t}\n\t}\n\treturn ret\n}\n\nfunc (s *Session) getTrackerUserAgent(private bool) string {\n\tif private {\n\t\treturn s.config.TrackerHTTPPrivateUserAgent\n\t}\n\treturn trackerHTTPPublicUserAgent\n}\n\n\/\/ Close stops all torrents and release the resources.\nfunc (s *Session) Close() error {\n\tclose(s.closeC)\n\n\tif s.config.DHTEnabled {\n\t\ts.dht.Stop()\n\t}\n\n\ts.updateStats()\n\n\tvar wg sync.WaitGroup\n\ts.mTorrents.Lock()\n\twg.Add(len(s.torrents))\n\tfor _, t := range s.torrents {\n\t\tgo func(t *Torrent) {\n\t\t\tt.torrent.Close()\n\t\t\twg.Done()\n\t\t}(t)\n\t}\n\twg.Wait()\n\ts.torrents = nil\n\ts.mTorrents.Unlock()\n\n\tif s.rpc != nil {\n\t\terr := s.rpc.Stop(s.config.RPCShutdownTimeout)\n\t\tif err != nil {\n\t\t\ts.log.Errorln(\"cannot stop RPC server:\", err.Error())\n\t\t}\n\t}\n\n\ts.ram.Close()\n\ts.pieceCache.Close()\n\ts.metrics.Close()\n\treturn s.db.Close()\n}\n\n\/\/ ListTorrents returns all torrents in session as a slice.\n\/\/ The order of the torrents returned is different on each call.\nfunc (s *Session) ListTorrents() []*Torrent {\n\ts.mTorrents.RLock()\n\tdefer s.mTorrents.RUnlock()\n\ttorrents := make([]*Torrent, 0, len(s.torrents))\n\tfor _, t := range s.torrents {\n\t\ttorrents = append(torrents, t)\n\t}\n\treturn torrents\n}\n\nfunc (s *Session) getPort() (int, error) {\n\ts.mPorts.Lock()\n\tdefer s.mPorts.Unlock()\n\tfor p := range s.availablePorts {\n\t\tdelete(s.availablePorts, p)\n\t\treturn p, nil\n\t}\n\treturn 0, errors.New(\"no free port\")\n}\n\nfunc (s *Session) releasePort(port int) {\n\ts.mPorts.Lock()\n\tdefer s.mPorts.Unlock()\n\ts.availablePorts[port] = struct{}{}\n}\n\n\/\/ GetTorrent by its id. Returns nil if torrent with id is not found.\nfunc (s *Session) GetTorrent(id string) *Torrent {\n\ts.mTorrents.RLock()\n\tdefer s.mTorrents.RUnlock()\n\treturn s.torrents[id]\n}\n\n\/\/ RemoveTorrent removes the torrent from the session and delete its files.\nfunc (s *Session) RemoveTorrent(id string) error {\n\tt, err := s.removeTorrentFromClient(id)\n\tif t != nil {\n\t\tgo func() { _ = s.stopAndRemoveData(t) }()\n\t}\n\treturn err\n}\n\nfunc (s *Session) removeTorrentFromClient(id string) (*Torrent, error) {\n\ts.mTorrents.Lock()\n\tdefer s.mTorrents.Unlock()\n\tt, ok := s.torrents[id]\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\tt.torrent.log.Info(\"removing torrent\")\n\tdelete(s.torrents, id)\n\tdelete(s.torrentsByInfoHash, dht.InfoHash(t.torrent.InfoHash()))\n\treturn t, s.db.Update(func(tx *bolt.Tx) error {\n\t\treturn tx.Bucket(torrentsBucket).DeleteBucket([]byte(id))\n\t})\n}\n\nfunc (s *Session) stopAndRemoveData(t *Torrent) error {\n\tt.torrent.Close()\n\ts.releasePort(t.torrent.port)\n\tvar err error\n\tvar dest string\n\tif s.config.DataDirIncludesTorrentID {\n\t\tdest = filepath.Join(s.config.DataDir, t.torrent.id)\n\t} else if t.torrent.info != nil {\n\t\tdest = t.torrent.info.Name\n\t}\n\tif dest != \"\" {\n\t\terr = os.RemoveAll(dest)\n\t\tif err != nil {\n\t\t\ts.log.Errorf(\"cannot remove torrent data. err: %s dest: %s\", err, dest)\n\t\t}\n\t}\n\treturn err\n}\n\n\/\/ StartAll starts all torrents in session.\nfunc (s *Session) StartAll() error {\n\terr := s.db.Update(func(tx *bolt.Tx) error {\n\t\ttb := tx.Bucket(torrentsBucket)\n\t\ts.mTorrents.RLock()\n\t\tfor _, t := range s.torrents {\n\t\t\tb := tb.Bucket([]byte(t.torrent.id))\n\t\t\t_ = b.Put([]byte(\"started\"), []byte(\"true\"))\n\t\t}\n\t\tdefer s.mTorrents.RUnlock()\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, t := range s.torrents {\n\t\tt.torrent.Start()\n\t}\n\treturn nil\n}\n\n\/\/ StopAll stops all torrents in session.\nfunc (s *Session) StopAll() error {\n\terr := s.db.Update(func(tx *bolt.Tx) error {\n\t\ttb := tx.Bucket(torrentsBucket)\n\t\ts.mTorrents.RLock()\n\t\tfor _, t := range s.torrents {\n\t\t\tb := tb.Bucket([]byte(t.torrent.id))\n\t\t\t_ = b.Put([]byte(\"started\"), []byte(\"false\"))\n\t\t}\n\t\tdefer s.mTorrents.RUnlock()\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, t := range s.torrents {\n\t\tt.torrent.Stop()\n\t}\n\treturn nil\n}\n<commit_msg>do not search dht for peers indefinitely<commit_after>\/\/ Package torrent provides a BitTorrent client implementation.\npackage torrent\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/cenkalti\/rain\/internal\/bitfield\"\n\t\"github.com\/cenkalti\/rain\/internal\/blocklist\"\n\t\"github.com\/cenkalti\/rain\/internal\/logger\"\n\t\"github.com\/cenkalti\/rain\/internal\/piececache\"\n\t\"github.com\/cenkalti\/rain\/internal\/resolver\"\n\t\"github.com\/cenkalti\/rain\/internal\/resourcemanager\"\n\t\"github.com\/cenkalti\/rain\/internal\/resumer\/boltdbresumer\"\n\t\"github.com\/cenkalti\/rain\/internal\/semaphore\"\n\t\"github.com\/cenkalti\/rain\/internal\/tracker\"\n\t\"github.com\/cenkalti\/rain\/internal\/trackermanager\"\n\t\"github.com\/juju\/ratelimit\"\n\t\"github.com\/mitchellh\/go-homedir\"\n\t\"github.com\/nictuku\/dht\"\n)\n\nvar (\n\tsessionBucket = []byte(\"session\")\n\ttorrentsBucket = []byte(\"torrents\")\n\tblocklistKey = []byte(\"blocklist\")\n\tblocklistTimestampKey = []byte(\"blocklist-timestamp\")\n)\n\n\/\/ Session contains torrents, DHT node, caches and other data structures shared by multiple torrents.\ntype Session struct {\n\tconfig Config\n\tdb *bolt.DB\n\tresumer *boltdbresumer.Resumer\n\tlog logger.Logger\n\textensions [8]byte\n\tdht *dht.DHT\n\trpc *rpcServer\n\ttrackerManager *trackermanager.TrackerManager\n\tram *resourcemanager.ResourceManager\n\tpieceCache *piececache.Cache\n\twebseedClient http.Client\n\tcreatedAt time.Time\n\tsemWrite *semaphore.Semaphore\n\tmetrics *sessionMetrics\n\tbucketDownload *ratelimit.Bucket\n\tbucketUpload *ratelimit.Bucket\n\tcloseC chan struct{}\n\n\tmPeerRequests sync.Mutex\n\tdhtPeerRequests map[*torrent]struct{}\n\n\tmTorrents sync.RWMutex\n\ttorrents map[string]*Torrent\n\ttorrentsByInfoHash map[dht.InfoHash][]*Torrent\n\tinvalidTorrentIDs []string\n\n\tmPorts sync.RWMutex\n\tavailablePorts map[int]struct{}\n\n\tmBlocklist sync.RWMutex\n\tblocklist *blocklist.Blocklist\n\tblocklistTimestamp time.Time\n}\n\n\/\/ NewSession creates a new Session for downloading and seeding torrents.\n\/\/ Returned session must be closed after use.\nfunc NewSession(cfg Config) (*Session, error) {\n\tif cfg.PortBegin >= cfg.PortEnd {\n\t\treturn nil, errors.New(\"invalid port range\")\n\t}\n\tif cfg.MaxOpenFiles > 0 {\n\t\terr := setNoFile(cfg.MaxOpenFiles)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"cannot change max open files limit: \" + err.Error())\n\t\t}\n\t}\n\tvar err error\n\tcfg.Database, err = homedir.Expand(cfg.Database)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcfg.DataDir, err = homedir.Expand(cfg.DataDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = os.MkdirAll(filepath.Dir(cfg.Database), 0750)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tl := logger.New(\"session\")\n\tdb, err := bolt.Open(cfg.Database, 0640, &bolt.Options{Timeout: time.Second})\n\tif err == bolt.ErrTimeout {\n\t\treturn nil, errors.New(\"resume database is locked by another process\")\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tdb.Close()\n\t\t}\n\t}()\n\tvar ids []string\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\t_, err2 := tx.CreateBucketIfNotExists(sessionBucket)\n\t\tif err2 != nil {\n\t\t\treturn err2\n\t\t}\n\t\tb, err2 := tx.CreateBucketIfNotExists(torrentsBucket)\n\t\tif err2 != nil {\n\t\t\treturn err2\n\t\t}\n\t\treturn b.ForEach(func(k, _ []byte) error {\n\t\t\tids = append(ids, string(k))\n\t\t\treturn nil\n\t\t})\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := boltdbresumer.New(db, torrentsBucket)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar dhtNode *dht.DHT\n\tif cfg.DHTEnabled {\n\t\tdhtConfig := dht.NewConfig()\n\t\tdhtConfig.Address = cfg.DHTHost\n\t\tdhtConfig.Port = int(cfg.DHTPort)\n\t\tdhtConfig.DHTRouters = strings.Join(cfg.DHTBootstrapNodes, \",\")\n\t\tdhtConfig.SaveRoutingTable = false\n\t\tdhtConfig.NumTargetPeers = 0\n\t\tdhtNode, err = dht.New(dhtConfig)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr = dhtNode.Start()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tports := make(map[int]struct{})\n\tfor p := cfg.PortBegin; p < cfg.PortEnd; p++ {\n\t\tports[int(p)] = struct{}{}\n\t}\n\tbl := blocklist.New()\n\tvar blTracker *blocklist.Blocklist\n\tif cfg.BlocklistEnabledForTrackers {\n\t\tblTracker = bl\n\t}\n\tc := &Session{\n\t\tconfig: cfg,\n\t\tdb: db,\n\t\tresumer: res,\n\t\tblocklist: bl,\n\t\ttrackerManager: trackermanager.New(blTracker, cfg.DNSResolveTimeout, !cfg.TrackerHTTPVerifyTLS),\n\t\tlog: l,\n\t\ttorrents: make(map[string]*Torrent),\n\t\ttorrentsByInfoHash: make(map[dht.InfoHash][]*Torrent),\n\t\tavailablePorts: ports,\n\t\tdht: dhtNode,\n\t\tpieceCache: piececache.New(cfg.ReadCacheSize, cfg.ReadCacheTTL, cfg.ParallelReads),\n\t\tram: resourcemanager.New(cfg.WriteCacheSize),\n\t\tcreatedAt: time.Now(),\n\t\tsemWrite: semaphore.New(int(cfg.ParallelWrites)),\n\t\tcloseC: make(chan struct{}),\n\t\twebseedClient: http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tDialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {\n\t\t\t\t\tip, port, err := resolver.Resolve(ctx, addr, cfg.DNSResolveTimeout, bl)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\tvar d net.Dialer\n\t\t\t\t\ttaddr := &net.TCPAddr{IP: ip, Port: port}\n\t\t\t\t\tdctx, cancel := context.WithTimeout(ctx, cfg.WebseedDialTimeout)\n\t\t\t\t\tdefer cancel()\n\t\t\t\t\treturn d.DialContext(dctx, network, taddr.String())\n\t\t\t\t},\n\t\t\t\tTLSHandshakeTimeout: cfg.WebseedTLSHandshakeTimeout,\n\t\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: !cfg.WebseedVerifyTLS}, \/\/ nolint: gosec\n\t\t\t\tResponseHeaderTimeout: cfg.WebseedResponseHeaderTimeout,\n\t\t\t},\n\t\t},\n\t}\n\tif cfg.SpeedLimitDownload > 0 {\n\t\tc.bucketDownload = ratelimit.NewBucketWithRate(float64(cfg.SpeedLimitDownload), cfg.SpeedLimitDownload)\n\t}\n\tif cfg.SpeedLimitUpload > 0 {\n\t\tc.bucketUpload = ratelimit.NewBucketWithRate(float64(cfg.SpeedLimitUpload), cfg.SpeedLimitUpload)\n\t}\n\terr = c.startBlocklistReloader()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\text, err := bitfield.NewBytes(c.extensions[:], 64)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\text.Set(61) \/\/ Fast Extension (BEP 6)\n\text.Set(43) \/\/ Extension Protocol (BEP 10)\n\tif cfg.DHTEnabled {\n\t\text.Set(63) \/\/ DHT Protocol (BEP 5)\n\t\tc.dhtPeerRequests = make(map[*torrent]struct{})\n\t}\n\terr = c.initMetrics()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.loadExistingTorrents(ids)\n\tif c.config.RPCEnabled {\n\t\tc.rpc = newRPCServer(c)\n\t\terr = c.rpc.Start(c.config.RPCHost, c.config.RPCPort)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif cfg.DHTEnabled {\n\t\tgo c.processDHTResults()\n\t}\n\tgo c.updateStatsLoop()\n\treturn c, nil\n}\n\nfunc (s *Session) parseTrackers(tiers [][]string, private bool) []tracker.Tracker {\n\tret := make([]tracker.Tracker, 0, len(tiers))\n\tfor _, tier := range tiers {\n\t\ttrackers := make([]tracker.Tracker, 0, len(tier))\n\t\tfor _, tr := range tier {\n\t\t\tt, err := s.trackerManager.Get(tr, s.config.TrackerHTTPTimeout, s.getTrackerUserAgent(private), int64(s.config.TrackerHTTPMaxResponseSize))\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttrackers = append(trackers, t)\n\t\t}\n\t\tif len(trackers) > 0 {\n\t\t\ttra := tracker.NewTier(trackers)\n\t\t\tret = append(ret, tra)\n\t\t}\n\t}\n\treturn ret\n}\n\nfunc (s *Session) getTrackerUserAgent(private bool) string {\n\tif private {\n\t\treturn s.config.TrackerHTTPPrivateUserAgent\n\t}\n\treturn trackerHTTPPublicUserAgent\n}\n\n\/\/ Close stops all torrents and release the resources.\nfunc (s *Session) Close() error {\n\tclose(s.closeC)\n\n\tif s.config.DHTEnabled {\n\t\ts.dht.Stop()\n\t}\n\n\ts.updateStats()\n\n\tvar wg sync.WaitGroup\n\ts.mTorrents.Lock()\n\twg.Add(len(s.torrents))\n\tfor _, t := range s.torrents {\n\t\tgo func(t *Torrent) {\n\t\t\tt.torrent.Close()\n\t\t\twg.Done()\n\t\t}(t)\n\t}\n\twg.Wait()\n\ts.torrents = nil\n\ts.mTorrents.Unlock()\n\n\tif s.rpc != nil {\n\t\terr := s.rpc.Stop(s.config.RPCShutdownTimeout)\n\t\tif err != nil {\n\t\t\ts.log.Errorln(\"cannot stop RPC server:\", err.Error())\n\t\t}\n\t}\n\n\ts.ram.Close()\n\ts.pieceCache.Close()\n\ts.metrics.Close()\n\treturn s.db.Close()\n}\n\n\/\/ ListTorrents returns all torrents in session as a slice.\n\/\/ The order of the torrents returned is different on each call.\nfunc (s *Session) ListTorrents() []*Torrent {\n\ts.mTorrents.RLock()\n\tdefer s.mTorrents.RUnlock()\n\ttorrents := make([]*Torrent, 0, len(s.torrents))\n\tfor _, t := range s.torrents {\n\t\ttorrents = append(torrents, t)\n\t}\n\treturn torrents\n}\n\nfunc (s *Session) getPort() (int, error) {\n\ts.mPorts.Lock()\n\tdefer s.mPorts.Unlock()\n\tfor p := range s.availablePorts {\n\t\tdelete(s.availablePorts, p)\n\t\treturn p, nil\n\t}\n\treturn 0, errors.New(\"no free port\")\n}\n\nfunc (s *Session) releasePort(port int) {\n\ts.mPorts.Lock()\n\tdefer s.mPorts.Unlock()\n\ts.availablePorts[port] = struct{}{}\n}\n\n\/\/ GetTorrent by its id. Returns nil if torrent with id is not found.\nfunc (s *Session) GetTorrent(id string) *Torrent {\n\ts.mTorrents.RLock()\n\tdefer s.mTorrents.RUnlock()\n\treturn s.torrents[id]\n}\n\n\/\/ RemoveTorrent removes the torrent from the session and delete its files.\nfunc (s *Session) RemoveTorrent(id string) error {\n\tt, err := s.removeTorrentFromClient(id)\n\tif t != nil {\n\t\tgo func() { _ = s.stopAndRemoveData(t) }()\n\t}\n\treturn err\n}\n\nfunc (s *Session) removeTorrentFromClient(id string) (*Torrent, error) {\n\ts.mTorrents.Lock()\n\tdefer s.mTorrents.Unlock()\n\tt, ok := s.torrents[id]\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\tt.torrent.log.Info(\"removing torrent\")\n\tdelete(s.torrents, id)\n\tdelete(s.torrentsByInfoHash, dht.InfoHash(t.torrent.InfoHash()))\n\treturn t, s.db.Update(func(tx *bolt.Tx) error {\n\t\treturn tx.Bucket(torrentsBucket).DeleteBucket([]byte(id))\n\t})\n}\n\nfunc (s *Session) stopAndRemoveData(t *Torrent) error {\n\tt.torrent.Close()\n\ts.releasePort(t.torrent.port)\n\tvar err error\n\tvar dest string\n\tif s.config.DataDirIncludesTorrentID {\n\t\tdest = filepath.Join(s.config.DataDir, t.torrent.id)\n\t} else if t.torrent.info != nil {\n\t\tdest = t.torrent.info.Name\n\t}\n\tif dest != \"\" {\n\t\terr = os.RemoveAll(dest)\n\t\tif err != nil {\n\t\t\ts.log.Errorf(\"cannot remove torrent data. err: %s dest: %s\", err, dest)\n\t\t}\n\t}\n\treturn err\n}\n\n\/\/ StartAll starts all torrents in session.\nfunc (s *Session) StartAll() error {\n\terr := s.db.Update(func(tx *bolt.Tx) error {\n\t\ttb := tx.Bucket(torrentsBucket)\n\t\ts.mTorrents.RLock()\n\t\tfor _, t := range s.torrents {\n\t\t\tb := tb.Bucket([]byte(t.torrent.id))\n\t\t\t_ = b.Put([]byte(\"started\"), []byte(\"true\"))\n\t\t}\n\t\tdefer s.mTorrents.RUnlock()\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, t := range s.torrents {\n\t\tt.torrent.Start()\n\t}\n\treturn nil\n}\n\n\/\/ StopAll stops all torrents in session.\nfunc (s *Session) StopAll() error {\n\terr := s.db.Update(func(tx *bolt.Tx) error {\n\t\ttb := tx.Bucket(torrentsBucket)\n\t\ts.mTorrents.RLock()\n\t\tfor _, t := range s.torrents {\n\t\t\tb := tb.Bucket([]byte(t.torrent.id))\n\t\t\t_ = b.Put([]byte(\"started\"), []byte(\"false\"))\n\t\t}\n\t\tdefer s.mTorrents.RUnlock()\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, t := range s.torrents {\n\t\tt.torrent.Stop()\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package ionic provides a direct representation of the endpoints and objects\n\/\/ within the Ion Channel API\npackage ionic\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/ion-channel\/ionic\/pagination\"\n\t\"github.com\/ion-channel\/ionic\/requests\"\n\t\"github.com\/ion-channel\/ionic\/responses\"\n)\n\nconst (\n\tmaxIdleConns = 25\n\tmaxIdleConnsPerHost = 25\n\tmaxPagingLimit = 100\n)\n\n\/\/ IonClient represents a communication layer with the Ion Channel API\ntype IonClient struct {\n\tbaseURL *url.URL\n\tclient *http.Client\n\tsession *Session\n\tsessionAutoRenewStop chan struct{}\n}\n\n\/\/ New takes the base URL of the API and returns a client for talking to the API\n\/\/ and an error if any issues instantiating the client are encountered\nfunc New(baseURL string) (*IonClient, error) {\n\tc := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tMaxIdleConnsPerHost: maxIdleConnsPerHost,\n\t\t\tMaxIdleConns: maxIdleConns,\n\t\t},\n\t}\n\n\treturn NewWithClient(baseURL, c)\n}\n\n\/\/ NewWithClient takes the base URL of the API and an existing HTTP client. It\n\/\/ returns a client for talking to the API and an error if any issues\n\/\/ instantiating the client are encountered\nfunc NewWithClient(baseURL string, client *http.Client) (*IonClient, error) {\n\tu, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ionic: client initialization: %v\", err.Error())\n\t}\n\n\tic := &IonClient{\n\t\tbaseURL: u,\n\t\tclient: client,\n\t}\n\n\treturn ic, nil\n}\n\n\/\/ Delete takes an endpoint, token, params, and headers to pass as a delete call to the\n\/\/ API. It will return a json RawMessage for the response and any errors it\n\/\/ encounters with the API.\nfunc (ic *IonClient) Delete(endpoint, token string, params *url.Values, headers http.Header) (json.RawMessage, error) {\n\treturn requests.Delete(ic.client, ic.baseURL, endpoint, token, params, headers)\n}\n\n\/\/ Head takes an endpoint, token, params, headers, and pagination params to pass as a\n\/\/ head call to the API. It will return any errors it encounters with the API.\nfunc (ic *IonClient) Head(endpoint, token string, params *url.Values, headers http.Header, page *pagination.Pagination) error {\n\treturn requests.Head(ic.client, ic.baseURL, endpoint, token, params, headers, page)\n}\n\n\/\/ Get takes an endpoint, token, params, headers, and pagination params to pass as a\n\/\/ get call to the API. It will return a json RawMessage for the response and\n\/\/ any errors it encounters with the API.\nfunc (ic *IonClient) Get(endpoint, token string, params *url.Values, headers http.Header, page *pagination.Pagination) (json.RawMessage, *responses.Meta, error) {\n\treturn requests.Get(ic.client, ic.baseURL, endpoint, token, params, headers, page)\n}\n\n\/\/ Post takes an endpoint, token, params, payload, and headers to pass as a post call\n\/\/ to the API. It will return a json RawMessage for the response and any errors\n\/\/ it encounters with the API.\nfunc (ic *IonClient) Post(endpoint, token string, params *url.Values, payload bytes.Buffer, headers http.Header) (json.RawMessage, error) {\n\treturn requests.Post(ic.client, ic.baseURL, endpoint, token, params, payload, headers)\n}\n\n\/\/ Put takes an endpoint, token, params, payload, and headers to pass as a put call to\n\/\/ the API. It will return a json RawMessage for the response and any errors it\n\/\/ encounters with the API.\nfunc (ic *IonClient) Put(endpoint, token string, params *url.Values, payload bytes.Buffer, headers http.Header) (json.RawMessage, error) {\n\treturn requests.Put(ic.client, ic.baseURL, endpoint, token, params, payload, headers)\n}\n\n\/\/ Patch takes an endpoint, token, params, payload, and headers to pass as a patch call to\n\/\/ the API. It will return a json RawMessage for the response and any errors it\n\/\/ encounters with the API.\nfunc (ic *IonClient) Patch(endpoint, token string, params *url.Values, payload bytes.Buffer, headers http.Header) (json.RawMessage, error) {\n\treturn requests.Patch(ic.client, ic.baseURL, endpoint, token, params, payload, headers)\n}\n\n\/\/ SetSession sets the client's internal Session that can be used to authenticate when making API requests.\n\/\/ The session can safely be set to null.\n\/\/ Example: myClient.GetSelf(myClient.Session().BearerToken)\nfunc (ic *IonClient) SetSession(session *Session) {\n\tic.session = session\n}\n\n\/\/ Session returns the client's internal Session.\n\/\/ This Session is set and renewed automatically if the EnableSessionAutoRenew method is used.\nfunc (ic *IonClient) Session() *Session {\n\treturn ic.session\n}\n\n\/\/ EnableSessionAutoRenew enables the periodic automatic renewal of the IonClient session using the given\n\/\/ login information, ensuring that the client will always have a valid session token.\n\/\/ To make the client stop automatically renewing its session, use the DisableSessionAutoRenew method.\nfunc (ic *IonClient) EnableSessionAutoRenew(username, password string) error {\n\t\/\/ try to log in with these credentials immediately, abort if it fails\n\tsession, err := ic.Login(username, password)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tic.session = session\n\n\tgo ic.autoRenewSessionWorker(username, password)\n\n\treturn nil\n}\n\n\/\/ DisableSessionAutoRenew makes the IonClient stop automatically renewing its session.\nfunc (ic *IonClient) DisableSessionAutoRenew() {\n\tclose(ic.sessionAutoRenewStop)\n}\n\nfunc (ic *IonClient) autoRenewSessionWorker(username, password string) {\n\tticker := time.NewTicker(7 * time.Minute) \/\/ sessions expire after 15 minutes\n\tic.sessionAutoRenewStop = make(chan struct{})\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tsession, err := ic.Login(username, password)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"ERROR - failed to automatically renew IonClient session: %s\", err.Error())\n\t\t\t\tcontinue \/\/ don't blow everything up in case it was just a temporary issue\n\t\t\t}\n\n\t\t\tic.session = session\n\t\tcase <-ic.sessionAutoRenewStop:\n\t\t\tticker.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}\n<commit_msg>Add mutex to IonClient to protect Session access<commit_after>\/\/ Package ionic provides a direct representation of the endpoints and objects\n\/\/ within the Ion Channel API\npackage ionic\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/ion-channel\/ionic\/pagination\"\n\t\"github.com\/ion-channel\/ionic\/requests\"\n\t\"github.com\/ion-channel\/ionic\/responses\"\n)\n\nconst (\n\tmaxIdleConns = 25\n\tmaxIdleConnsPerHost = 25\n\tmaxPagingLimit = 100\n)\n\n\/\/ IonClient represents a communication layer with the Ion Channel API\ntype IonClient struct {\n\tbaseURL *url.URL\n\tclient *http.Client\n\tsession *Session\n\tsessionAutoRenewStop chan struct{}\n\t\/\/ mutex is used to lock access to the Session when it is being updated\n\tmutex sync.Mutex\n}\n\n\/\/ New takes the base URL of the API and returns a client for talking to the API\n\/\/ and an error if any issues instantiating the client are encountered\nfunc New(baseURL string) (*IonClient, error) {\n\tc := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tMaxIdleConnsPerHost: maxIdleConnsPerHost,\n\t\t\tMaxIdleConns: maxIdleConns,\n\t\t},\n\t}\n\n\treturn NewWithClient(baseURL, c)\n}\n\n\/\/ NewWithClient takes the base URL of the API and an existing HTTP client. It\n\/\/ returns a client for talking to the API and an error if any issues\n\/\/ instantiating the client are encountered\nfunc NewWithClient(baseURL string, client *http.Client) (*IonClient, error) {\n\tu, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ionic: client initialization: %v\", err.Error())\n\t}\n\n\tic := &IonClient{\n\t\tbaseURL: u,\n\t\tclient: client,\n\t}\n\n\treturn ic, nil\n}\n\n\/\/ Delete takes an endpoint, token, params, and headers to pass as a delete call to the\n\/\/ API. It will return a json RawMessage for the response and any errors it\n\/\/ encounters with the API.\nfunc (ic *IonClient) Delete(endpoint, token string, params *url.Values, headers http.Header) (json.RawMessage, error) {\n\treturn requests.Delete(ic.client, ic.baseURL, endpoint, token, params, headers)\n}\n\n\/\/ Head takes an endpoint, token, params, headers, and pagination params to pass as a\n\/\/ head call to the API. It will return any errors it encounters with the API.\nfunc (ic *IonClient) Head(endpoint, token string, params *url.Values, headers http.Header, page *pagination.Pagination) error {\n\treturn requests.Head(ic.client, ic.baseURL, endpoint, token, params, headers, page)\n}\n\n\/\/ Get takes an endpoint, token, params, headers, and pagination params to pass as a\n\/\/ get call to the API. It will return a json RawMessage for the response and\n\/\/ any errors it encounters with the API.\nfunc (ic *IonClient) Get(endpoint, token string, params *url.Values, headers http.Header, page *pagination.Pagination) (json.RawMessage, *responses.Meta, error) {\n\treturn requests.Get(ic.client, ic.baseURL, endpoint, token, params, headers, page)\n}\n\n\/\/ Post takes an endpoint, token, params, payload, and headers to pass as a post call\n\/\/ to the API. It will return a json RawMessage for the response and any errors\n\/\/ it encounters with the API.\nfunc (ic *IonClient) Post(endpoint, token string, params *url.Values, payload bytes.Buffer, headers http.Header) (json.RawMessage, error) {\n\treturn requests.Post(ic.client, ic.baseURL, endpoint, token, params, payload, headers)\n}\n\n\/\/ Put takes an endpoint, token, params, payload, and headers to pass as a put call to\n\/\/ the API. It will return a json RawMessage for the response and any errors it\n\/\/ encounters with the API.\nfunc (ic *IonClient) Put(endpoint, token string, params *url.Values, payload bytes.Buffer, headers http.Header) (json.RawMessage, error) {\n\treturn requests.Put(ic.client, ic.baseURL, endpoint, token, params, payload, headers)\n}\n\n\/\/ Patch takes an endpoint, token, params, payload, and headers to pass as a patch call to\n\/\/ the API. It will return a json RawMessage for the response and any errors it\n\/\/ encounters with the API.\nfunc (ic *IonClient) Patch(endpoint, token string, params *url.Values, payload bytes.Buffer, headers http.Header) (json.RawMessage, error) {\n\treturn requests.Patch(ic.client, ic.baseURL, endpoint, token, params, payload, headers)\n}\n\n\/\/ SetSession sets the client's internal Session that can be used to authenticate when making API requests.\n\/\/ The session can safely be set to null.\n\/\/ Example: myClient.GetSelf(myClient.Session().BearerToken)\nfunc (ic *IonClient) SetSession(session *Session) {\n\tic.mutex.Lock()\n\tic.session = session\n\tic.mutex.Unlock()\n}\n\n\/\/ Session returns the client's internal Session.\n\/\/ This Session is set and renewed automatically if the EnableSessionAutoRenew method is used.\nfunc (ic *IonClient) Session() *Session {\n\tic.mutex.Lock()\n\tdefer ic.mutex.Unlock()\n\n\treturn ic.session\n}\n\n\/\/ EnableSessionAutoRenew enables the periodic automatic renewal of the IonClient session using the given\n\/\/ login information, ensuring that the client will always have a valid session token.\n\/\/ To make the client stop automatically renewing its session, use the DisableSessionAutoRenew method.\nfunc (ic *IonClient) EnableSessionAutoRenew(username, password string) error {\n\t\/\/ try to log in with these credentials immediately, abort if it fails\n\tsession, err := ic.Login(username, password)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tic.SetSession(session)\n\n\tgo ic.autoRenewSessionWorker(username, password)\n\n\treturn nil\n}\n\n\/\/ DisableSessionAutoRenew makes the IonClient stop automatically renewing its session.\nfunc (ic *IonClient) DisableSessionAutoRenew() {\n\tclose(ic.sessionAutoRenewStop)\n}\n\nfunc (ic *IonClient) autoRenewSessionWorker(username, password string) {\n\tticker := time.NewTicker(7 * time.Minute) \/\/ sessions expire after 15 minutes\n\tic.sessionAutoRenewStop = make(chan struct{})\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tsession, err := ic.Login(username, password)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"ERROR - failed to automatically renew IonClient session: %s\", err.Error())\n\t\t\t\tcontinue \/\/ don't blow everything up in case it was just a temporary issue\n\t\t\t}\n\n\t\t\tic.SetSession(session)\n\t\tcase <-ic.sessionAutoRenewStop:\n\t\t\tticker.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package torrent\n\nimport (\n\t\"bytes\"\n\t\"crypto\/sha1\"\n\t\"errors\"\n\t\"os\"\n\n\t\"github.com\/zeebo\/bencode\"\n\n\t\"github.com\/cenkalti\/rain\/bt\"\n)\n\ntype Torrent struct {\n\tInfo *Info `bencode:\"-\"`\n\tRawInfo bencode.RawMessage `bencode:\"info\" json:\"-\"`\n\tAnnounce string `bencode:\"announce\"`\n\tAnnounceList [][]string `bencode:\"announce-list\"`\n\tCreationDate int64 `bencode:\"creation date\"`\n\tComment string `bencode:\"comment\"`\n\tCreatedBy string `bencode:\"created by\"`\n\tEncoding string `bencode:\"encoding\"`\n}\n\ntype Info struct {\n\tPieceLength uint32 `bencode:\"piece length\" json:\"piece_length\"`\n\tPieces []byte `bencode:\"pieces\" json:\"pieces\"`\n\tPrivate byte `bencode:\"private\" json:\"private\"`\n\tName string `bencode:\"name\" json:\"name\"`\n\t\/\/ Single File Mode\n\tLength int64 `bencode:\"length\" json:\"length\"`\n\tMd5sum string `bencode:\"md5sum\" json:\"md5sum,omitempty\"`\n\t\/\/ Multiple File mode\n\tFiles []fileDict `bencode:\"files\" json:\"files\"`\n\n\tRaw []byte `bencode:\"-\" json:\"-\"`\n\n\t\/\/ Calculated fileds\n\tHash bt.InfoHash `bencode:\"-\" json:\"-\"`\n\tTotalLength int64 `bencode:\"-\" json:\"-\"`\n\tNumPieces uint32 `bencode:\"-\" json:\"-\"`\n\tMultiFile bool `bencode:\"-\" json:\"-\"`\n}\n\ntype fileDict struct {\n\tLength int64 `bencode:\"length\" json:\"length\"`\n\tPath []string `bencode:\"path\" json:\"path\"`\n\tMd5sum string `bencode:\"md5sum\" json:\"md5sum,omitempty\"`\n}\n\nfunc New(path string) (*Torrent, error) {\n\tvar t Torrent\n\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td := bencode.NewDecoder(f)\n\terr = d.Decode(&t)\n\tf.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(t.RawInfo) == 0 {\n\t\treturn nil, errors.New(\"no info dict in torrent file\")\n\t}\n\n\tt.Info, err = NewInfo(t.RawInfo)\n\treturn &t, err\n}\n\nfunc NewInfo(b []byte) (*Info, error) {\n\tvar i Info\n\n\tr := bytes.NewReader(b)\n\td := bencode.NewDecoder(r)\n\terr := d.Decode(&i)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ti.Raw = append([]byte(nil), b...)\n\n\thash := sha1.New()\n\thash.Write(b)\n\tcopy(i.Hash[:], hash.Sum(nil))\n\n\ti.MultiFile = len(i.Files) != 0\n\n\ti.NumPieces = uint32(len(i.Pieces)) \/ sha1.Size\n\n\tif !i.MultiFile {\n\t\ti.TotalLength = i.Length\n\t} else {\n\t\tfor _, f := range i.Files {\n\t\t\ti.TotalLength += f.Length\n\t\t}\n\t}\n\n\treturn &i, nil\n}\n\nfunc (i *Info) PieceHash(index uint32) []byte {\n\tif index >= i.NumPieces {\n\t\tpanic(\"piece index out of range\")\n\t}\n\tstart := index * sha1.Size\n\tend := start + sha1.Size\n\treturn i.Pieces[start:end]\n}\n\n\/\/ GetFiles returns the files in torrent as a slice, even if there is a single file.\nfunc (i *Info) GetFiles() []fileDict {\n\tif i.MultiFile {\n\t\treturn i.Files\n\t} else {\n\t\treturn []fileDict{fileDict{i.Length, []string{i.Name}, i.Md5sum}}\n\t}\n}\n<commit_msg>remove raw info<commit_after>package torrent\n\nimport (\n\t\"bytes\"\n\t\"crypto\/sha1\"\n\t\"errors\"\n\t\"os\"\n\n\t\"github.com\/zeebo\/bencode\"\n\n\t\"github.com\/cenkalti\/rain\/bt\"\n)\n\ntype Torrent struct {\n\tInfo *Info `bencode:\"-\"`\n\tRawInfo bencode.RawMessage `bencode:\"info\" json:\"-\"`\n\tAnnounce string `bencode:\"announce\"`\n\tAnnounceList [][]string `bencode:\"announce-list\"`\n\tCreationDate int64 `bencode:\"creation date\"`\n\tComment string `bencode:\"comment\"`\n\tCreatedBy string `bencode:\"created by\"`\n\tEncoding string `bencode:\"encoding\"`\n}\n\ntype Info struct {\n\tPieceLength uint32 `bencode:\"piece length\" json:\"piece_length\"`\n\tPieces []byte `bencode:\"pieces\" json:\"pieces\"`\n\tPrivate byte `bencode:\"private\" json:\"private\"`\n\tName string `bencode:\"name\" json:\"name\"`\n\t\/\/ Single File Mode\n\tLength int64 `bencode:\"length\" json:\"length\"`\n\tMd5sum string `bencode:\"md5sum\" json:\"md5sum,omitempty\"`\n\t\/\/ Multiple File mode\n\tFiles []fileDict `bencode:\"files\" json:\"files\"`\n\t\/\/ Calculated fileds\n\tHash bt.InfoHash `bencode:\"-\" json:\"-\"`\n\tTotalLength int64 `bencode:\"-\" json:\"-\"`\n\tNumPieces uint32 `bencode:\"-\" json:\"-\"`\n\tMultiFile bool `bencode:\"-\" json:\"-\"`\n}\n\ntype fileDict struct {\n\tLength int64 `bencode:\"length\" json:\"length\"`\n\tPath []string `bencode:\"path\" json:\"path\"`\n\tMd5sum string `bencode:\"md5sum\" json:\"md5sum,omitempty\"`\n}\n\nfunc New(path string) (*Torrent, error) {\n\tvar t Torrent\n\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td := bencode.NewDecoder(f)\n\terr = d.Decode(&t)\n\tf.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(t.RawInfo) == 0 {\n\t\treturn nil, errors.New(\"no info dict in torrent file\")\n\t}\n\n\tt.Info, err = NewInfo(t.RawInfo)\n\treturn &t, err\n}\n\nfunc NewInfo(b []byte) (*Info, error) {\n\tvar i Info\n\n\tr := bytes.NewReader(b)\n\td := bencode.NewDecoder(r)\n\terr := d.Decode(&i)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thash := sha1.New()\n\thash.Write(b)\n\tcopy(i.Hash[:], hash.Sum(nil))\n\n\ti.MultiFile = len(i.Files) != 0\n\n\ti.NumPieces = uint32(len(i.Pieces)) \/ sha1.Size\n\n\tif !i.MultiFile {\n\t\ti.TotalLength = i.Length\n\t} else {\n\t\tfor _, f := range i.Files {\n\t\t\ti.TotalLength += f.Length\n\t\t}\n\t}\n\n\treturn &i, nil\n}\n\nfunc (i *Info) PieceHash(index uint32) []byte {\n\tif index >= i.NumPieces {\n\t\tpanic(\"piece index out of range\")\n\t}\n\tstart := index * sha1.Size\n\tend := start + sha1.Size\n\treturn i.Pieces[start:end]\n}\n\n\/\/ GetFiles returns the files in torrent as a slice, even if there is a single file.\nfunc (i *Info) GetFiles() []fileDict {\n\tif i.MultiFile {\n\t\treturn i.Files\n\t} else {\n\t\treturn []fileDict{fileDict{i.Length, []string{i.Name}, i.Md5sum}}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package osdb\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/kolo\/xmlrpc\"\n)\n\nconst (\n\tOSDBServer = \"http:\/\/api.opensubtitles.org\/xml-rpc\"\n\tDefaultUserAgent = \"OS Test User Agent\" \/\/ OSDB's test agent\n\tSearchLimit = 100\n\tStatusSuccess = \"200 OK\"\n)\n\ntype Client struct {\n\tUserAgent string\n\tToken string\n\tLogin string\n\tPassword string\n\tLanguage string\n\t*xmlrpc.Client\n}\n\ntype Movie struct {\n\tId string `xmlrpc:\"id\"`\n\tTitle string `xmlrpc:\"title\"`\n\tCover string `xmlrpc:\"cover\"`\n\tYear string `xmlrpc:\"year\"`\n\tDuration string `xmlrpc:\"duration\"`\n\tTagLine string `xmlrpc:\"tagline\"`\n\tPlot string `xmlrpc:\"plot\"`\n\tGoofs string `xmlrpc:\"goofs\"`\n\tTrivia string `xmlrpc:\"trivia\"`\n\tCast map[string]string `xmlrpc:\"cast\"`\n\tDirectors map[string]string `xmlrpc:\"directors\"`\n\tWriters map[string]string `xmlrpc:\"writers\"`\n\tAwards string `xmlrpc:\"awards\"`\n\tGenres []string `xmlrpc:\"genres\"`\n\tCountries []string `xmlrpc:\"country\"`\n\tLanguages []string `xmlrpc:\"language\"`\n\tCertifications []string `xmlrpc:\"certification\"`\n}\n\n\/\/ A collection of movies.\ntype Movies []Movie\n\nfunc (m Movies) Empty() bool {\n\treturn len(m) == 0\n}\n\n\/\/ Search subtitles matching a file hash.\nfunc (c *Client) FileSearch(path string, langs []string) (Subtitles, error) {\n\t\/\/ Hash file, and other params values.\n\tparams, err := c.fileToParams(path, langs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.SearchSubtitles(params)\n}\n\n\/\/ Search subtitles matching IMDB IDs.\nfunc (c *Client) ImdbIdSearch(ids []string, langs []string) (Subtitles, error) {\n\t\/\/ OSDB search params struct\n\tparams := []interface{}{\n\t\tc.Token,\n\t\t[]map[string]string{},\n\t}\n\n\t\/\/ Convert ids []string into a slice of map[string]string for search. Ouch!\n\tfor _, imdbId := range ids {\n\t\tparams[1] = append(\n\t\t\tparams[1].([]map[string]string),\n\t\t\tmap[string]string{\n\t\t\t\t\"imdbid\": imdbId,\n\t\t\t\t\"sublanguageid\": strings.Join(langs, \",\"),\n\t\t\t},\n\t\t)\n\t}\n\n\treturn c.SearchSubtitles(¶ms)\n}\n\n\/\/ Search Subtitles, DIY method.\nfunc (c *Client) SearchSubtitles(params *[]interface{}) (Subtitles, error) {\n\tres := struct {\n\t\tData []Subtitle `xmlrpc:\"data\"`\n\t}{}\n\n\tif err := c.Call(\"SearchSubtitles\", *params, &res); err != nil {\n\t\treturn nil, err\n\t}\n\treturn res.Data, nil\n}\n\n\/\/ Search movies on IMDB.\nfunc (c *Client) SearchOnImdb(q string) (Movies, error) {\n\tparams := []interface{}{c.Token, q}\n\tres := struct {\n\t\tStatus string `xmlrpc:\"status\"`\n\t\tData Movies `xmlrpc:\"data\"`\n\t}{}\n\tif err := c.Call(\"SearchMoviesOnIMDB\", params, &res); err != nil {\n\t\treturn nil, err\n\t}\n\tif res.Status != StatusSuccess {\n\t\treturn nil, fmt.Errorf(\"SearchMoviesOnIMDB error: %s\", res.Status)\n\t}\n\treturn res.Data, nil\n}\n\n\/\/ Get movie details from IMDB.\nfunc (c *Client) GetImdbMovieDetails(id string) (*Movie, error) {\n\tparams := []interface{}{c.Token, id}\n\tres := struct {\n\t\tStatus string `xmlrpc:\"status\"`\n\t\tData Movie `xmlrpc:\"data\"`\n\t}{}\n\tif err := c.Call(\"GetIMDBMovieDetails\", params, &res); err != nil {\n\t\treturn nil, err\n\t}\n\tif res.Status != StatusSuccess {\n\t\treturn nil, fmt.Errorf(\"GetIMDBMovieDetails error: %s\", res.Status)\n\t}\n\treturn &res.Data, nil\n}\n\n\/\/ Download subtitles by file ID.\nfunc (c *Client) DownloadSubtitles(ids []int) ([]SubtitleFile, error) {\n\tparams := []interface{}{c.Token, ids}\n\tres := struct {\n\t\tStatus string `xmlrpc:\"status\"`\n\t\tData []SubtitleFile `xmlrpc:\"data\"`\n\t}{}\n\tif err := c.Call(\"DownloadSubtitles\", params, &res); err != nil {\n\t\treturn nil, err\n\t}\n\tif res.Status != StatusSuccess {\n\t\treturn nil, fmt.Errorf(\"DownloadSubtitles error: %s\", res.Status)\n\t}\n\treturn res.Data, nil\n}\n\n\/\/ Save subtitle file to disk, using the OSDB specified name.\nfunc (c *Client) Download(s *Subtitle) error {\n\treturn c.DownloadTo(s, s.SubFileName)\n}\n\n\/\/ Save subtitle file to disk, using the specified path.\nfunc (c *Client) DownloadTo(s *Subtitle, path string) (err error) {\n\tid, err := strconv.Atoi(s.IDSubtitleFile)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Download\n\tfiles, err := c.DownloadSubtitles([]int{id})\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(files) == 0 {\n\t\treturn fmt.Errorf(\"No file match this subtitle ID\")\n\t}\n\n\t\/\/ Save to disk.\n\tr, err := files[0].Reader()\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer r.Close()\n\n\tw, err := os.Create(path)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer w.Close()\n\n\t_, err = io.Copy(w, r)\n\treturn\n}\n\n\/\/ Checks whether OSDB already has subtitles for a movie and subtitle\n\/\/ files.\nfunc (c *Client) HasSubtitlesForFiles(movie_file string, sub_file string) (bool, error) {\n\tsubtitle, err := NewSubtitleWithFile(movie_file, sub_file)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\treturn c.HasSubtitles(Subtitles{subtitle})\n}\n\n\/\/ Checks whether subtitles already exists in OSDB. The mandatory fields in the\n\/\/ received Subtitle slice are: SubHash, SubFileName, MovieHash, MovieByteSize,\n\/\/ and MovieFileName.\nfunc (c *Client) HasSubtitles(subs Subtitles) (bool, error) {\n\targs, err := subs.toUploadParams(c)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\tres := struct {\n\t\tStatus string `xmlrpc:\"status\"`\n\t\tExists int `xmlrpc:\"alreadyindb\"`\n\t\tData Subtitle `xmlrpc:\"data\"`\n\t}{}\n\tif err := c.Call(\"TryUploadSubtitles\", args, &res); err != nil {\n\t\treturn true, err\n\t}\n\tif res.Status != StatusSuccess {\n\t\treturn true, fmt.Errorf(\"HasSubtitles: %s\", res.Status)\n\t}\n\n\treturn res.Exists == 1, nil\n}\n\n\/\/ Keep session alive\nfunc (c *Client) Noop() (err error) {\n\tres := struct {\n\t\tStatus string `xmlrpc:\"status\"`\n\t}{}\n\terr = c.Call(\"NoOperation\", []interface{}{c.Token}, &res)\n\tif err == nil && res.Status != StatusSuccess {\n\t\terr = fmt.Errorf(\"NoOp: %s\", res.Status)\n\t}\n\treturn\n}\n\n\/\/ Login to the API, and return a session token.\nfunc (c *Client) LogIn(user string, pass string, lang string) (err error) {\n\tc.Login = user\n\tc.Password = pass\n\tc.Language = lang\n\targs := []interface{}{user, pass, lang, c.UserAgent}\n\tres := struct {\n\t\tStatus string `xmlrpc:\"status\"`\n\t\tToken string `xmlrpc:\"token\"`\n\t}{}\n\tif err = c.Call(\"LogIn\", args, &res); err != nil {\n\t\treturn\n\t}\n\n\tif res.Status != StatusSuccess {\n\t\treturn fmt.Errorf(\"Login: %s\", res.Status)\n\t}\n\tc.Token = res.Token\n\treturn\n}\n\n\/\/ Logout...\nfunc (c *Client) LogOut() (err error) {\n\targs := []interface{}{c.Token}\n\tres := struct {\n\t\tStatus string `xmlrpc:\"status\"`\n\t}{}\n\treturn c.Call(\"LogOut\", args, &res)\n}\n\n\/\/ Build query parameters for hash-based movie search.\nfunc (c *Client) fileToParams(path string, langs []string) (*[]interface{}, error) {\n\t\/\/ File size\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\tfi, err := file.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsize := fi.Size()\n\n\t\/\/ File hash\n\th, err := HashFile(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparams := []interface{}{\n\t\tc.Token,\n\t\t[]struct {\n\t\t\tHash string `xmlrpc:\"moviehash\"`\n\t\t\tSize int64 `xmlrpc:\"moviebytesize\"`\n\t\t\tLangs string `xmlrpc:\"sublanguageid\"`\n\t\t}{{\n\t\t\tfmt.Sprintf(\"%x\", h),\n\t\t\tsize,\n\t\t\tstrings.Join(langs, \",\"),\n\t\t}},\n\t}\n\treturn ¶ms, nil\n}\n<commit_msg>OSDB returns different types sometimes<commit_after>package osdb\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/kolo\/xmlrpc\"\n)\n\nconst (\n\tOSDBServer = \"http:\/\/api.opensubtitles.org\/xml-rpc\"\n\tDefaultUserAgent = \"OS Test User Agent\" \/\/ OSDB's test agent\n\tSearchLimit = 100\n\tStatusSuccess = \"200 OK\"\n)\n\ntype Client struct {\n\tUserAgent string\n\tToken string\n\tLogin string\n\tPassword string\n\tLanguage string\n\t*xmlrpc.Client\n}\n\ntype Movie struct {\n\tId string `xmlrpc:\"id\"`\n\tTitle string `xmlrpc:\"title\"`\n\tCover string `xmlrpc:\"cover\"`\n\tYear string `xmlrpc:\"year\"`\n\tDuration string `xmlrpc:\"duration\"`\n\tTagLine string `xmlrpc:\"tagline\"`\n\tPlot string `xmlrpc:\"plot\"`\n\tGoofs string `xmlrpc:\"goofs\"`\n\tTrivia string `xmlrpc:\"trivia\"`\n\tCast map[string]string `xmlrpc:\"cast\"`\n\tDirectors map[string]string `xmlrpc:\"directors\"`\n\tWriters map[string]string `xmlrpc:\"writers\"`\n\tAwards string `xmlrpc:\"awards\"`\n\tGenres []string `xmlrpc:\"genres\"`\n\tCountries []string `xmlrpc:\"country\"`\n\tLanguages []string `xmlrpc:\"language\"`\n\tCertifications []string `xmlrpc:\"certification\"`\n}\n\n\/\/ A collection of movies.\ntype Movies []Movie\n\nfunc (m Movies) Empty() bool {\n\treturn len(m) == 0\n}\n\n\/\/ Search subtitles matching a file hash.\nfunc (c *Client) FileSearch(path string, langs []string) (Subtitles, error) {\n\t\/\/ Hash file, and other params values.\n\tparams, err := c.fileToParams(path, langs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.SearchSubtitles(params)\n}\n\n\/\/ Search subtitles matching IMDB IDs.\nfunc (c *Client) ImdbIdSearch(ids []string, langs []string) (Subtitles, error) {\n\t\/\/ OSDB search params struct\n\tparams := []interface{}{\n\t\tc.Token,\n\t\t[]map[string]string{},\n\t}\n\n\t\/\/ Convert ids []string into a slice of map[string]string for search. Ouch!\n\tfor _, imdbId := range ids {\n\t\tparams[1] = append(\n\t\t\tparams[1].([]map[string]string),\n\t\t\tmap[string]string{\n\t\t\t\t\"imdbid\": imdbId,\n\t\t\t\t\"sublanguageid\": strings.Join(langs, \",\"),\n\t\t\t},\n\t\t)\n\t}\n\n\treturn c.SearchSubtitles(¶ms)\n}\n\n\/\/ Search Subtitles, DIY method.\nfunc (c *Client) SearchSubtitles(params *[]interface{}) (Subtitles, error) {\n\tres := struct {\n\t\tData []Subtitle `xmlrpc:\"data\"`\n\t}{}\n\n\tif err := c.Call(\"SearchSubtitles\", *params, &res); err != nil {\n\t\tif !strings.Contains(err.Error(), \"type mismatch\") {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn res.Data, nil\n}\n\n\/\/ Search movies on IMDB.\nfunc (c *Client) SearchOnImdb(q string) (Movies, error) {\n\tparams := []interface{}{c.Token, q}\n\tres := struct {\n\t\tStatus string `xmlrpc:\"status\"`\n\t\tData Movies `xmlrpc:\"data\"`\n\t}{}\n\tif err := c.Call(\"SearchMoviesOnIMDB\", params, &res); err != nil {\n\t\treturn nil, err\n\t}\n\tif res.Status != StatusSuccess {\n\t\treturn nil, fmt.Errorf(\"SearchMoviesOnIMDB error: %s\", res.Status)\n\t}\n\treturn res.Data, nil\n}\n\n\/\/ Get movie details from IMDB.\nfunc (c *Client) GetImdbMovieDetails(id string) (*Movie, error) {\n\tparams := []interface{}{c.Token, id}\n\tres := struct {\n\t\tStatus string `xmlrpc:\"status\"`\n\t\tData Movie `xmlrpc:\"data\"`\n\t}{}\n\tif err := c.Call(\"GetIMDBMovieDetails\", params, &res); err != nil {\n\t\treturn nil, err\n\t}\n\tif res.Status != StatusSuccess {\n\t\treturn nil, fmt.Errorf(\"GetIMDBMovieDetails error: %s\", res.Status)\n\t}\n\treturn &res.Data, nil\n}\n\n\/\/ Download subtitles by file ID.\nfunc (c *Client) DownloadSubtitles(ids []int) ([]SubtitleFile, error) {\n\tparams := []interface{}{c.Token, ids}\n\tres := struct {\n\t\tStatus string `xmlrpc:\"status\"`\n\t\tData []SubtitleFile `xmlrpc:\"data\"`\n\t}{}\n\tif err := c.Call(\"DownloadSubtitles\", params, &res); err != nil {\n\t\treturn nil, err\n\t}\n\tif res.Status != StatusSuccess {\n\t\treturn nil, fmt.Errorf(\"DownloadSubtitles error: %s\", res.Status)\n\t}\n\treturn res.Data, nil\n}\n\n\/\/ Save subtitle file to disk, using the OSDB specified name.\nfunc (c *Client) Download(s *Subtitle) error {\n\treturn c.DownloadTo(s, s.SubFileName)\n}\n\n\/\/ Save subtitle file to disk, using the specified path.\nfunc (c *Client) DownloadTo(s *Subtitle, path string) (err error) {\n\tid, err := strconv.Atoi(s.IDSubtitleFile)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Download\n\tfiles, err := c.DownloadSubtitles([]int{id})\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(files) == 0 {\n\t\treturn fmt.Errorf(\"No file match this subtitle ID\")\n\t}\n\n\t\/\/ Save to disk.\n\tr, err := files[0].Reader()\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer r.Close()\n\n\tw, err := os.Create(path)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer w.Close()\n\n\t_, err = io.Copy(w, r)\n\treturn\n}\n\n\/\/ Checks whether OSDB already has subtitles for a movie and subtitle\n\/\/ files.\nfunc (c *Client) HasSubtitlesForFiles(movie_file string, sub_file string) (bool, error) {\n\tsubtitle, err := NewSubtitleWithFile(movie_file, sub_file)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\treturn c.HasSubtitles(Subtitles{subtitle})\n}\n\n\/\/ Checks whether subtitles already exists in OSDB. The mandatory fields in the\n\/\/ received Subtitle slice are: SubHash, SubFileName, MovieHash, MovieByteSize,\n\/\/ and MovieFileName.\nfunc (c *Client) HasSubtitles(subs Subtitles) (bool, error) {\n\targs, err := subs.toUploadParams(c)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\tres := struct {\n\t\tStatus string `xmlrpc:\"status\"`\n\t\tExists int `xmlrpc:\"alreadyindb\"`\n\t\tData Subtitle `xmlrpc:\"data\"`\n\t}{}\n\tif err := c.Call(\"TryUploadSubtitles\", args, &res); err != nil {\n\t\treturn true, err\n\t}\n\tif res.Status != StatusSuccess {\n\t\treturn true, fmt.Errorf(\"HasSubtitles: %s\", res.Status)\n\t}\n\n\treturn res.Exists == 1, nil\n}\n\n\/\/ Keep session alive\nfunc (c *Client) Noop() (err error) {\n\tres := struct {\n\t\tStatus string `xmlrpc:\"status\"`\n\t}{}\n\terr = c.Call(\"NoOperation\", []interface{}{c.Token}, &res)\n\tif err == nil && res.Status != StatusSuccess {\n\t\terr = fmt.Errorf(\"NoOp: %s\", res.Status)\n\t}\n\treturn\n}\n\n\/\/ Login to the API, and return a session token.\nfunc (c *Client) LogIn(user string, pass string, lang string) (err error) {\n\tc.Login = user\n\tc.Password = pass\n\tc.Language = lang\n\targs := []interface{}{user, pass, lang, c.UserAgent}\n\tres := struct {\n\t\tStatus string `xmlrpc:\"status\"`\n\t\tToken string `xmlrpc:\"token\"`\n\t}{}\n\tif err = c.Call(\"LogIn\", args, &res); err != nil {\n\t\treturn\n\t}\n\n\tif res.Status != StatusSuccess {\n\t\treturn fmt.Errorf(\"Login: %s\", res.Status)\n\t}\n\tc.Token = res.Token\n\treturn\n}\n\n\/\/ Logout...\nfunc (c *Client) LogOut() (err error) {\n\targs := []interface{}{c.Token}\n\tres := struct {\n\t\tStatus string `xmlrpc:\"status\"`\n\t}{}\n\treturn c.Call(\"LogOut\", args, &res)\n}\n\n\/\/ Build query parameters for hash-based movie search.\nfunc (c *Client) fileToParams(path string, langs []string) (*[]interface{}, error) {\n\t\/\/ File size\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\tfi, err := file.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsize := fi.Size()\n\n\t\/\/ File hash\n\th, err := HashFile(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparams := []interface{}{\n\t\tc.Token,\n\t\t[]struct {\n\t\t\tHash string `xmlrpc:\"moviehash\"`\n\t\t\tSize int64 `xmlrpc:\"moviebytesize\"`\n\t\t\tLangs string `xmlrpc:\"sublanguageid\"`\n\t\t}{{\n\t\t\tfmt.Sprintf(\"%x\", h),\n\t\t\tsize,\n\t\t\tstrings.Join(langs, \",\"),\n\t\t}},\n\t}\n\treturn ¶ms, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/crypto\/ssh\"\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n)\n\nconst MSG_BUFFER int = 10\n\nconst HELP_TEXT string = `-> Available commands:\n \/about\n \/exit\n \/help\n \/list\n \/nick $NAME\n \/whois $NAME\n`\n\nconst ABOUT_TEXT string = `-> ssh-chat is made by @shazow.\n\n It is a custom ssh server built in Go to serve a chat experience\n instead of a shell.\n\n Source: https:\/\/github.com\/shazow\/ssh-chat\n\n For more, visit shazow.net or follow at twitter.com\/shazow\n`\n\ntype Client struct {\n\tServer *Server\n\tConn *ssh.ServerConn\n\tMsg chan string\n\tName string\n\tColor string\n\tOp bool\n\tready chan struct{}\n\tterm *terminal.Terminal\n\ttermWidth int\n\ttermHeight int\n\tsilencedUntil time.Time\n}\n\nfunc NewClient(server *Server, conn *ssh.ServerConn) *Client {\n\treturn &Client{\n\t\tServer: server,\n\t\tConn: conn,\n\t\tName: conn.User(),\n\t\tColor: RandomColor(),\n\t\tMsg: make(chan string, MSG_BUFFER),\n\t\tready: make(chan struct{}, 1),\n\t}\n}\n\nfunc (c *Client) ColoredName() string {\n\treturn ColorString(c.Color, c.Name)\n}\n\nfunc (c *Client) Write(msg string) {\n\tc.term.Write([]byte(msg + \"\\r\\n\"))\n}\n\nfunc (c *Client) WriteLines(msg []string) {\n\tfor _, line := range msg {\n\t\tc.Write(line)\n\t}\n}\n\nfunc (c *Client) IsSilenced() bool {\n\treturn c.silencedUntil.After(time.Now())\n}\n\nfunc (c *Client) Silence(d time.Duration) {\n\tc.silencedUntil = time.Now().Add(d)\n}\n\nfunc (c *Client) Resize(width int, height int) error {\n\terr := c.term.SetSize(width, height)\n\tif err != nil {\n\t\tlogger.Errorf(\"Resize failed: %dx%d\", width, height)\n\t\treturn err\n\t}\n\tc.termWidth, c.termHeight = width, height\n\treturn nil\n}\n\nfunc (c *Client) Rename(name string) {\n\tc.Name = name\n\tc.term.SetPrompt(fmt.Sprintf(\"[%s] \", c.ColoredName()))\n}\n\nfunc (c *Client) Fingerprint() string {\n\treturn c.Conn.Permissions.Extensions[\"fingerprint\"]\n}\n\nfunc (c *Client) handleShell(channel ssh.Channel) {\n\tdefer channel.Close()\n\n\t\/\/ FIXME: This shouldn't live here, need to restructure the call chaining.\n\tc.Server.Add(c)\n\tgo func() {\n\t\t\/\/ Block until done, then remove.\n\t\tc.Conn.Wait()\n\t\tc.Server.Remove(c)\n\t}()\n\n\tgo func() {\n\t\tfor msg := range c.Msg {\n\t\t\tc.Write(msg)\n\t\t}\n\t}()\n\n\tfor {\n\t\tline, err := c.term.ReadLine()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tparts := strings.SplitN(line, \" \", 3)\n\t\tisCmd := strings.HasPrefix(parts[0], \"\/\")\n\n\t\tif isCmd {\n\t\t\t\/\/ TODO: Factor this out.\n\t\t\tswitch parts[0] {\n\t\t\tcase \"\/test-colors\": \/\/ Shh, this command is a secret!\n\t\t\t\tc.Write(ColorString(\"32\", \"Lorem ipsum dolor sit amet,\"))\n\t\t\t\tc.Write(\"consectetur \" + ColorString(\"31;1\", \"adipiscing\") + \" elit.\")\n\t\t\tcase \"\/exit\":\n\t\t\t\tchannel.Close()\n\t\t\tcase \"\/help\":\n\t\t\t\tc.WriteLines(strings.Split(HELP_TEXT, \"\\n\"))\n\t\t\tcase \"\/about\":\n\t\t\t\tc.WriteLines(strings.Split(ABOUT_TEXT, \"\\n\"))\n\t\t\tcase \"\/uptime\":\n\t\t\t\tc.Write(c.Server.Uptime())\n\t\t\tcase \"\/me\":\n\t\t\t\tme := strings.TrimLeft(line, \"\/me\")\n\t\t\t\tif me == \"\" {\n\t\t\t\t\tme = \" is at a loss for words.\"\n\t\t\t\t}\n\t\t\t\tmsg := fmt.Sprintf(\"** %s%s\", c.ColoredName(), me)\n\t\t\t\tif c.IsSilenced() || len(msg) > 1000 {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> Message rejected.\")\n\t\t\t\t} else {\n\t\t\t\t\tc.Server.Broadcast(msg, nil)\n\t\t\t\t}\n\t\t\tcase \"\/nick\":\n\t\t\t\tif len(parts) == 2 {\n\t\t\t\t\tc.Server.Rename(c, parts[1])\n\t\t\t\t} else {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> Missing $NAME from: \/nick $NAME\")\n\t\t\t\t}\n\t\t\tcase \"\/whois\":\n\t\t\t\tif len(parts) == 2 {\n\t\t\t\t\tclient := c.Server.Who(parts[1])\n\t\t\t\t\tif client != nil {\n\t\t\t\t\t\tversion := RE_STRIP_TEXT.ReplaceAllString(string(client.Conn.ClientVersion()), \"\")\n\t\t\t\t\t\tif len(version) > 100 {\n\t\t\t\t\t\t\tversion = \"Evil Jerk with a superlong string\"\n\t\t\t\t\t\t}\n\t\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> %s is %s via %s\", client.ColoredName(), client.Fingerprint(), version)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> No such name: %s\", parts[1])\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> Missing $NAME from: \/whois $NAME\")\n\t\t\t\t}\n\t\t\tcase \"\/list\":\n\t\t\t\tnames := c.Server.List(nil)\n\t\t\t\tc.Msg <- fmt.Sprintf(\"-> %d connected: %s\", len(names), strings.Join(names, \", \"))\n\t\t\tcase \"\/ban\":\n\t\t\t\tif !c.Server.IsOp(c) {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> You're not an admin.\")\n\t\t\t\t} else if len(parts) != 2 {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> Missing $NAME from: \/ban $NAME\")\n\t\t\t\t} else {\n\t\t\t\t\tclient := c.Server.Who(parts[1])\n\t\t\t\t\tif client == nil {\n\t\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> No such name: %s\", parts[1])\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfingerprint := client.Fingerprint()\n\t\t\t\t\t\tclient.Write(fmt.Sprintf(\"-> Banned by %s.\", c.ColoredName()))\n\t\t\t\t\t\tc.Server.Ban(fingerprint, nil)\n\t\t\t\t\t\tclient.Conn.Close()\n\t\t\t\t\t\tc.Server.Broadcast(fmt.Sprintf(\"* %s was banned by %s\", parts[1], c.ColoredName()), nil)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase \"\/op\":\n\t\t\t\tif !c.Server.IsOp(c) {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> You're not an admin.\")\n\t\t\t\t} else if len(parts) != 2 {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> Missing $NAME from: \/op $NAME\")\n\t\t\t\t} else {\n\t\t\t\t\tclient := c.Server.Who(parts[1])\n\t\t\t\t\tif client == nil {\n\t\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> No such name: %s\", parts[1])\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfingerprint := client.Fingerprint()\n\t\t\t\t\t\tclient.Write(fmt.Sprintf(\"-> Made op by %s.\", c.ColoredName()))\n\t\t\t\t\t\tc.Server.Op(fingerprint)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase \"\/silence\":\n\t\t\t\tif !c.Server.IsOp(c) {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> You're not an admin.\")\n\t\t\t\t} else if len(parts) < 2 {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> Missing $NAME from: \/silence $NAME\")\n\t\t\t\t} else {\n\t\t\t\t\tduration := time.Duration(5) * time.Minute\n\t\t\t\t\tif len(parts) >= 3 {\n\t\t\t\t\t\tparsedDuration, err := time.ParseDuration(parts[2])\n\t\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t\tduration = parsedDuration\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tclient := c.Server.Who(parts[1])\n\t\t\t\t\tif client == nil {\n\t\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> No such name: %s\", parts[1])\n\t\t\t\t\t} else {\n\t\t\t\t\t\tclient.Silence(duration)\n\t\t\t\t\t\tclient.Write(fmt.Sprintf(\"-> Silenced for %s by %s.\", duration, c.ColoredName()))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tc.Msg <- fmt.Sprintf(\"-> Invalid command: %s\", line)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tmsg := fmt.Sprintf(\"%s: %s\", c.ColoredName(), line)\n\t\tif c.IsSilenced() || len(msg) > 1000 {\n\t\t\tc.Msg <- fmt.Sprintf(\"-> Message rejected.\")\n\t\t\tcontinue\n\t\t}\n\t\tc.Server.Broadcast(msg, c)\n\t}\n\n}\n\nfunc (c *Client) handleChannels(channels <-chan ssh.NewChannel) {\n\tprompt := fmt.Sprintf(\"[%s] \", c.ColoredName())\n\n\thasShell := false\n\n\tfor ch := range channels {\n\t\tif t := ch.ChannelType(); t != \"session\" {\n\t\t\tch.Reject(ssh.UnknownChannelType, fmt.Sprintf(\"unknown channel type: %s\", t))\n\t\t\tcontinue\n\t\t}\n\n\t\tchannel, requests, err := ch.Accept()\n\t\tif err != nil {\n\t\t\tlogger.Errorf(\"Could not accept channel: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tdefer channel.Close()\n\n\t\tc.term = terminal.NewTerminal(channel, prompt)\n\t\tfor req := range requests {\n\t\t\tvar width, height int\n\t\t\tvar ok bool\n\n\t\t\tswitch req.Type {\n\t\t\tcase \"shell\":\n\t\t\t\tif c.term != nil && !hasShell {\n\t\t\t\t\tgo c.handleShell(channel)\n\t\t\t\t\tok = true\n\t\t\t\t\thasShell = true\n\t\t\t\t}\n\t\t\tcase \"pty-req\":\n\t\t\t\twidth, height, ok = parsePtyRequest(req.Payload)\n\t\t\t\tif ok {\n\t\t\t\t\terr := c.Resize(width, height)\n\t\t\t\t\tok = err == nil\n\t\t\t\t}\n\t\t\tcase \"window-change\":\n\t\t\t\twidth, height, ok = parseWinchRequest(req.Payload)\n\t\t\t\tif ok {\n\t\t\t\t\terr := c.Resize(width, height)\n\t\t\t\t\tok = err == nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif req.WantReply {\n\t\t\t\treq.Reply(ok, nil)\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>Added command to help<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/crypto\/ssh\"\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n)\n\nconst MSG_BUFFER int = 10\n\nconst HELP_TEXT string = `-> Available commands:\n \/about\n \/exit\n \/help\n \/list\n \/uptime\n \/nick $NAME\n \/whois $NAME\n`\n\nconst ABOUT_TEXT string = `-> ssh-chat is made by @shazow.\n\n It is a custom ssh server built in Go to serve a chat experience\n instead of a shell.\n\n Source: https:\/\/github.com\/shazow\/ssh-chat\n\n For more, visit shazow.net or follow at twitter.com\/shazow\n`\n\ntype Client struct {\n\tServer *Server\n\tConn *ssh.ServerConn\n\tMsg chan string\n\tName string\n\tColor string\n\tOp bool\n\tready chan struct{}\n\tterm *terminal.Terminal\n\ttermWidth int\n\ttermHeight int\n\tsilencedUntil time.Time\n}\n\nfunc NewClient(server *Server, conn *ssh.ServerConn) *Client {\n\treturn &Client{\n\t\tServer: server,\n\t\tConn: conn,\n\t\tName: conn.User(),\n\t\tColor: RandomColor(),\n\t\tMsg: make(chan string, MSG_BUFFER),\n\t\tready: make(chan struct{}, 1),\n\t}\n}\n\nfunc (c *Client) ColoredName() string {\n\treturn ColorString(c.Color, c.Name)\n}\n\nfunc (c *Client) Write(msg string) {\n\tc.term.Write([]byte(msg + \"\\r\\n\"))\n}\n\nfunc (c *Client) WriteLines(msg []string) {\n\tfor _, line := range msg {\n\t\tc.Write(line)\n\t}\n}\n\nfunc (c *Client) IsSilenced() bool {\n\treturn c.silencedUntil.After(time.Now())\n}\n\nfunc (c *Client) Silence(d time.Duration) {\n\tc.silencedUntil = time.Now().Add(d)\n}\n\nfunc (c *Client) Resize(width int, height int) error {\n\terr := c.term.SetSize(width, height)\n\tif err != nil {\n\t\tlogger.Errorf(\"Resize failed: %dx%d\", width, height)\n\t\treturn err\n\t}\n\tc.termWidth, c.termHeight = width, height\n\treturn nil\n}\n\nfunc (c *Client) Rename(name string) {\n\tc.Name = name\n\tc.term.SetPrompt(fmt.Sprintf(\"[%s] \", c.ColoredName()))\n}\n\nfunc (c *Client) Fingerprint() string {\n\treturn c.Conn.Permissions.Extensions[\"fingerprint\"]\n}\n\nfunc (c *Client) handleShell(channel ssh.Channel) {\n\tdefer channel.Close()\n\n\t\/\/ FIXME: This shouldn't live here, need to restructure the call chaining.\n\tc.Server.Add(c)\n\tgo func() {\n\t\t\/\/ Block until done, then remove.\n\t\tc.Conn.Wait()\n\t\tc.Server.Remove(c)\n\t}()\n\n\tgo func() {\n\t\tfor msg := range c.Msg {\n\t\t\tc.Write(msg)\n\t\t}\n\t}()\n\n\tfor {\n\t\tline, err := c.term.ReadLine()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tparts := strings.SplitN(line, \" \", 3)\n\t\tisCmd := strings.HasPrefix(parts[0], \"\/\")\n\n\t\tif isCmd {\n\t\t\t\/\/ TODO: Factor this out.\n\t\t\tswitch parts[0] {\n\t\t\tcase \"\/test-colors\": \/\/ Shh, this command is a secret!\n\t\t\t\tc.Write(ColorString(\"32\", \"Lorem ipsum dolor sit amet,\"))\n\t\t\t\tc.Write(\"consectetur \" + ColorString(\"31;1\", \"adipiscing\") + \" elit.\")\n\t\t\tcase \"\/exit\":\n\t\t\t\tchannel.Close()\n\t\t\tcase \"\/help\":\n\t\t\t\tc.WriteLines(strings.Split(HELP_TEXT, \"\\n\"))\n\t\t\tcase \"\/about\":\n\t\t\t\tc.WriteLines(strings.Split(ABOUT_TEXT, \"\\n\"))\n\t\t\tcase \"\/uptime\":\n\t\t\t\tc.Write(c.Server.Uptime())\n\t\t\tcase \"\/me\":\n\t\t\t\tme := strings.TrimLeft(line, \"\/me\")\n\t\t\t\tif me == \"\" {\n\t\t\t\t\tme = \" is at a loss for words.\"\n\t\t\t\t}\n\t\t\t\tmsg := fmt.Sprintf(\"** %s%s\", c.ColoredName(), me)\n\t\t\t\tif c.IsSilenced() || len(msg) > 1000 {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> Message rejected.\")\n\t\t\t\t} else {\n\t\t\t\t\tc.Server.Broadcast(msg, nil)\n\t\t\t\t}\n\t\t\tcase \"\/nick\":\n\t\t\t\tif len(parts) == 2 {\n\t\t\t\t\tc.Server.Rename(c, parts[1])\n\t\t\t\t} else {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> Missing $NAME from: \/nick $NAME\")\n\t\t\t\t}\n\t\t\tcase \"\/whois\":\n\t\t\t\tif len(parts) == 2 {\n\t\t\t\t\tclient := c.Server.Who(parts[1])\n\t\t\t\t\tif client != nil {\n\t\t\t\t\t\tversion := RE_STRIP_TEXT.ReplaceAllString(string(client.Conn.ClientVersion()), \"\")\n\t\t\t\t\t\tif len(version) > 100 {\n\t\t\t\t\t\t\tversion = \"Evil Jerk with a superlong string\"\n\t\t\t\t\t\t}\n\t\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> %s is %s via %s\", client.ColoredName(), client.Fingerprint(), version)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> No such name: %s\", parts[1])\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> Missing $NAME from: \/whois $NAME\")\n\t\t\t\t}\n\t\t\tcase \"\/list\":\n\t\t\t\tnames := c.Server.List(nil)\n\t\t\t\tc.Msg <- fmt.Sprintf(\"-> %d connected: %s\", len(names), strings.Join(names, \", \"))\n\t\t\tcase \"\/ban\":\n\t\t\t\tif !c.Server.IsOp(c) {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> You're not an admin.\")\n\t\t\t\t} else if len(parts) != 2 {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> Missing $NAME from: \/ban $NAME\")\n\t\t\t\t} else {\n\t\t\t\t\tclient := c.Server.Who(parts[1])\n\t\t\t\t\tif client == nil {\n\t\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> No such name: %s\", parts[1])\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfingerprint := client.Fingerprint()\n\t\t\t\t\t\tclient.Write(fmt.Sprintf(\"-> Banned by %s.\", c.ColoredName()))\n\t\t\t\t\t\tc.Server.Ban(fingerprint, nil)\n\t\t\t\t\t\tclient.Conn.Close()\n\t\t\t\t\t\tc.Server.Broadcast(fmt.Sprintf(\"* %s was banned by %s\", parts[1], c.ColoredName()), nil)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase \"\/op\":\n\t\t\t\tif !c.Server.IsOp(c) {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> You're not an admin.\")\n\t\t\t\t} else if len(parts) != 2 {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> Missing $NAME from: \/op $NAME\")\n\t\t\t\t} else {\n\t\t\t\t\tclient := c.Server.Who(parts[1])\n\t\t\t\t\tif client == nil {\n\t\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> No such name: %s\", parts[1])\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfingerprint := client.Fingerprint()\n\t\t\t\t\t\tclient.Write(fmt.Sprintf(\"-> Made op by %s.\", c.ColoredName()))\n\t\t\t\t\t\tc.Server.Op(fingerprint)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase \"\/silence\":\n\t\t\t\tif !c.Server.IsOp(c) {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> You're not an admin.\")\n\t\t\t\t} else if len(parts) < 2 {\n\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> Missing $NAME from: \/silence $NAME\")\n\t\t\t\t} else {\n\t\t\t\t\tduration := time.Duration(5) * time.Minute\n\t\t\t\t\tif len(parts) >= 3 {\n\t\t\t\t\t\tparsedDuration, err := time.ParseDuration(parts[2])\n\t\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t\tduration = parsedDuration\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tclient := c.Server.Who(parts[1])\n\t\t\t\t\tif client == nil {\n\t\t\t\t\t\tc.Msg <- fmt.Sprintf(\"-> No such name: %s\", parts[1])\n\t\t\t\t\t} else {\n\t\t\t\t\t\tclient.Silence(duration)\n\t\t\t\t\t\tclient.Write(fmt.Sprintf(\"-> Silenced for %s by %s.\", duration, c.ColoredName()))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tc.Msg <- fmt.Sprintf(\"-> Invalid command: %s\", line)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tmsg := fmt.Sprintf(\"%s: %s\", c.ColoredName(), line)\n\t\tif c.IsSilenced() || len(msg) > 1000 {\n\t\t\tc.Msg <- fmt.Sprintf(\"-> Message rejected.\")\n\t\t\tcontinue\n\t\t}\n\t\tc.Server.Broadcast(msg, c)\n\t}\n\n}\n\nfunc (c *Client) handleChannels(channels <-chan ssh.NewChannel) {\n\tprompt := fmt.Sprintf(\"[%s] \", c.ColoredName())\n\n\thasShell := false\n\n\tfor ch := range channels {\n\t\tif t := ch.ChannelType(); t != \"session\" {\n\t\t\tch.Reject(ssh.UnknownChannelType, fmt.Sprintf(\"unknown channel type: %s\", t))\n\t\t\tcontinue\n\t\t}\n\n\t\tchannel, requests, err := ch.Accept()\n\t\tif err != nil {\n\t\t\tlogger.Errorf(\"Could not accept channel: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tdefer channel.Close()\n\n\t\tc.term = terminal.NewTerminal(channel, prompt)\n\t\tfor req := range requests {\n\t\t\tvar width, height int\n\t\t\tvar ok bool\n\n\t\t\tswitch req.Type {\n\t\t\tcase \"shell\":\n\t\t\t\tif c.term != nil && !hasShell {\n\t\t\t\t\tgo c.handleShell(channel)\n\t\t\t\t\tok = true\n\t\t\t\t\thasShell = true\n\t\t\t\t}\n\t\t\tcase \"pty-req\":\n\t\t\t\twidth, height, ok = parsePtyRequest(req.Payload)\n\t\t\t\tif ok {\n\t\t\t\t\terr := c.Resize(width, height)\n\t\t\t\t\tok = err == nil\n\t\t\t\t}\n\t\t\tcase \"window-change\":\n\t\t\t\twidth, height, ok = parseWinchRequest(req.Payload)\n\t\t\t\tif ok {\n\t\t\t\t\terr := c.Resize(width, height)\n\t\t\t\t\tok = err == nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif req.WantReply {\n\t\t\t\treq.Reply(ok, nil)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gehirndns\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"path\"\n\t\"strconv\"\n\n\t\"github.com\/kr\/pretty\"\n)\n\nconst (\n\tAPIENDPOINT = \"https:\/\/cp.gehirn.jp\/api\/dns\/\"\n)\n\ntype ZoneId uint\n\ntype Client struct {\n\tendpoint *url.URL\n\tapiToken string\n\tapiSecret string\n}\n\nfunc NewClient(zoneId ZoneId, apiToken, apiSecret string) *Client {\n\tendpoint, err := url.Parse(APIENDPOINT)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tendpoint.Path = path.Join(\n\t\tendpoint.Path,\n\t\t\"resource\",\n\t\tstrconv.Itoa(int(zoneId)))\n\n\treturn &Client{\n\t\tendpoint: endpoint,\n\t\tapiToken: apiToken,\n\t\tapiSecret: apiSecret,\n\t}\n}\n\nfunc (c *Client) buildURL(relativePath string) (endpoint *url.URL) {\n\tendpoint = new(url.URL)\n\t*endpoint = *c.endpoint\n\tendpoint.Path = path.Join(endpoint.Path, relativePath)\n\treturn\n}\n\nfunc (c *Client) makeRequest(method, path string, body io.Reader) (req *http.Request, err error) {\n\tendpoint := c.buildURL(path)\n\treq, err = http.NewRequest(method, endpoint.String(), body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treq.SetBasicAuth(c.apiToken, c.apiSecret)\n\treq.Header.Add(\"Content-Type\", \"text\/json;charset=utf8\")\n\treturn\n}\n\nfunc (c *Client) request(req *http.Request, body interface{}) (err error) {\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(resp.Status)\n\t}\n\n\tdecoder := json.NewDecoder(resp.Body)\n\tif err = decoder.Decode(&body); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (c *Client) GetResources() (err error) {\n\treq, err := c.makeRequest(\"GET\", \"\", nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tbody := struct {\n\t\tResource struct {\n\t\t\tSOA SOARecord\n\t\t\tNS []NSRecord\n\t\t\tA []ARecord\n\t\t\tAAAA []AAAARecord\n\t\t\tCNAME []CNAMERecord\n\t\t\tMX []MXRecord\n\t\t\tTXT []TXTRecord\n\t\t\tSRV []SRVRecord\n\t\t}\n\t\tSuccessful bool `json:\"is_success\"`\n\t}{}\n\n\tif err = c.request(req, &body); err != nil {\n\t\treturn\n\t} else if !body.Successful {\n\t\treturn fmt.Errorf(\"unsuccessful\")\n\t}\n\n\tpretty.Println(body.Resource)\n\treturn\n}\n\nfunc (c *Client) AddNS(name, ns HostName, ttl Seconds) (record *NSRecord, err error) {\n\tconst (\n\t\trecordType RecordType = \"NS\"\n\t)\n\n\trecord = &NSRecord{\n\t\tNameServer: ns,\n\t\tRecord: Record{\n\t\t\tHostName: name,\n\t\t\tType: recordType,\n\t\t\tTTL: ttl,\n\t\t},\n\t}\n\n\terr = c.AddResource(record)\n\treturn\n}\n\nfunc (c *Client) AddA(name HostName, addr IPv4, ttl Seconds) (record *ARecord, err error) {\n\tconst (\n\t\trecordType RecordType = \"A\"\n\t)\n\n\trecord = &ARecord{\n\t\tIPAddress: addr,\n\t\tRecord: Record{\n\t\t\tHostName: name,\n\t\t\tType: recordType,\n\t\t\tTTL: ttl,\n\t\t},\n\t}\n\n\terr = c.AddResource(record)\n\treturn\n}\n\nfunc (c *Client) AddAAAA(name HostName, addr IPv6, ttl Seconds) (record *AAAARecord, err error) {\n\tconst (\n\t\trecordType RecordType = \"AAAA\"\n\t)\n\n\trecord = &AAAARecord{\n\t\tIPAddress: addr,\n\t\tRecord: Record{\n\t\t\tHostName: name,\n\t\t\tType: recordType,\n\t\t\tTTL: ttl,\n\t\t},\n\t}\n\n\terr = c.AddResource(record)\n\treturn\n}\n\nfunc (c *Client) AddCNAME(name, to HostName, ttl Seconds) (record *CNAMERecord, err error) {\n\tconst (\n\t\trecordType RecordType = \"CNAME\"\n\t)\n\n\trecord = &CNAMERecord{\n\t\tAliasTo: to,\n\t\tRecord: Record{\n\t\t\tHostName: name,\n\t\t\tType: recordType,\n\t\t\tTTL: ttl,\n\t\t},\n\t}\n\n\terr = c.AddResource(record)\n\treturn\n}\n\nfunc (c *Client) AddMX(name, mailServer HostName, priority Priority, ttl Seconds) (record *MXRecord, err error) {\n\tconst (\n\t\trecordType RecordType = \"MX\"\n\t)\n\n\trecord = &MXRecord{\n\t\tMailServer: mailServer,\n\t\tPriority: priority,\n\t\tRecord: Record{\n\t\t\tHostName: name,\n\t\t\tType: recordType,\n\t\t\tTTL: ttl,\n\t\t},\n\t}\n\n\terr = c.AddResource(record)\n\treturn\n}\n\nfunc (c *Client) AddTXT(name HostName, value string, ttl Seconds) (record *TXTRecord, err error) {\n\tconst (\n\t\trecordType RecordType = \"TXT\"\n\t)\n\n\trecord = &TXTRecord{\n\t\tValue: value,\n\t\tRecord: Record{\n\t\t\tHostName: name,\n\t\t\tType: recordType,\n\t\t\tTTL: ttl,\n\t\t},\n\t}\n\n\terr = c.AddResource(record)\n\treturn\n}\n\nfunc (c *Client) AddSRV(name, target HostName, port, weight uint, priority Priority, ttl Seconds) (record *SRVRecord, err error) {\n\tconst (\n\t\trecordType RecordType = \"SRV\"\n\t)\n\n\trecord = &SRVRecord{\n\t\tTarget: target,\n\t\tPort: port,\n\t\tWeight: weight,\n\t\tPriority: priority,\n\t\tRecord: Record{\n\t\t\tHostName: name,\n\t\t\tType: recordType,\n\t\t\tTTL: ttl,\n\t\t},\n\t}\n\n\terr = c.AddResource(record)\n\treturn\n}\n\nfunc (c *Client) encodeJSON(object interface{}) (reader io.Reader, err error) {\n\tbuffer := bytes.NewBuffer(nil)\n\tencoder := json.NewEncoder(buffer)\n\terr = encoder.Encode(object)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treader = buffer\n\treturn\n}\n\nfunc (c *Client) AddResource(record IRecord) (err error) {\n\tbodyObject := struct {\n\t\tResource IRecord\n\t}{\n\t\tResource: record,\n\t}\n\tbody, err := c.encodeJSON(bodyObject)\n\tif err != nil {\n\t\treturn\n\t}\n\n\trequest, err := c.makeRequest(\"POST\", \"\", body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = c.request(request, record)\n\treturn\n}\n<commit_msg>return response message as error if error occured<commit_after>package gehirndns\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"path\"\n\t\"strconv\"\n\n\t\"github.com\/kr\/pretty\"\n)\n\nconst (\n\tAPIENDPOINT = \"https:\/\/cp.gehirn.jp\/api\/dns\/\"\n)\n\ntype ZoneId uint\n\ntype Client struct {\n\tendpoint *url.URL\n\tapiToken string\n\tapiSecret string\n}\n\nfunc NewClient(zoneId ZoneId, apiToken, apiSecret string) *Client {\n\tendpoint, err := url.Parse(APIENDPOINT)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tendpoint.Path = path.Join(\n\t\tendpoint.Path,\n\t\t\"resource\",\n\t\tstrconv.Itoa(int(zoneId)))\n\n\treturn &Client{\n\t\tendpoint: endpoint,\n\t\tapiToken: apiToken,\n\t\tapiSecret: apiSecret,\n\t}\n}\n\nfunc (c *Client) buildURL(relativePath string) (endpoint *url.URL) {\n\tendpoint = new(url.URL)\n\t*endpoint = *c.endpoint\n\tendpoint.Path = path.Join(endpoint.Path, relativePath)\n\treturn\n}\n\nfunc (c *Client) makeRequest(method, path string, body io.Reader) (req *http.Request, err error) {\n\tendpoint := c.buildURL(path)\n\treq, err = http.NewRequest(method, endpoint.String(), body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treq.SetBasicAuth(c.apiToken, c.apiSecret)\n\treq.Header.Add(\"Content-Type\", \"text\/json;charset=utf8\")\n\treturn\n}\n\nfunc (c *Client) request(req *http.Request, body interface{}) (err error) {\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\tbody := struct {\n\t\t\tError struct {\n\t\t\t\tCode uint `json:\"code\"`\n\t\t\t\tMessage string `json:\"message\"`\n\t\t\t} `json:\"error\"`\n\t\t\tSuccessful bool `json:\"is_success\"`\n\t\t}{}\n\n\t\tdecoder := json.NewDecoder(resp.Body)\n\t\terr = decoder.Decode(&body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(resp.Status)\n\t\t}\n\n\t\treturn fmt.Errorf(body.Error.Message)\n\t}\n\n\tdecoder := json.NewDecoder(resp.Body)\n\tif err = decoder.Decode(&body); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (c *Client) GetResources() (err error) {\n\treq, err := c.makeRequest(\"GET\", \"\", nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tbody := struct {\n\t\tResource struct {\n\t\t\tSOA SOARecord\n\t\t\tNS []NSRecord\n\t\t\tA []ARecord\n\t\t\tAAAA []AAAARecord\n\t\t\tCNAME []CNAMERecord\n\t\t\tMX []MXRecord\n\t\t\tTXT []TXTRecord\n\t\t\tSRV []SRVRecord\n\t\t}\n\t\tSuccessful bool `json:\"is_success\"`\n\t}{}\n\n\tif err = c.request(req, &body); err != nil {\n\t\treturn\n\t} else if !body.Successful {\n\t\treturn fmt.Errorf(\"unsuccessful\")\n\t}\n\n\tpretty.Println(body.Resource)\n\treturn\n}\n\nfunc (c *Client) AddNS(name, ns HostName, ttl Seconds) (record *NSRecord, err error) {\n\tconst (\n\t\trecordType RecordType = \"NS\"\n\t)\n\n\trecord = &NSRecord{\n\t\tNameServer: ns,\n\t\tRecord: Record{\n\t\t\tHostName: name,\n\t\t\tType: recordType,\n\t\t\tTTL: ttl,\n\t\t},\n\t}\n\n\terr = c.AddResource(record)\n\treturn\n}\n\nfunc (c *Client) AddA(name HostName, addr IPv4, ttl Seconds) (record *ARecord, err error) {\n\tconst (\n\t\trecordType RecordType = \"A\"\n\t)\n\n\trecord = &ARecord{\n\t\tIPAddress: addr,\n\t\tRecord: Record{\n\t\t\tHostName: name,\n\t\t\tType: recordType,\n\t\t\tTTL: ttl,\n\t\t},\n\t}\n\n\terr = c.AddResource(record)\n\treturn\n}\n\nfunc (c *Client) AddAAAA(name HostName, addr IPv6, ttl Seconds) (record *AAAARecord, err error) {\n\tconst (\n\t\trecordType RecordType = \"AAAA\"\n\t)\n\n\trecord = &AAAARecord{\n\t\tIPAddress: addr,\n\t\tRecord: Record{\n\t\t\tHostName: name,\n\t\t\tType: recordType,\n\t\t\tTTL: ttl,\n\t\t},\n\t}\n\n\terr = c.AddResource(record)\n\treturn\n}\n\nfunc (c *Client) AddCNAME(name, to HostName, ttl Seconds) (record *CNAMERecord, err error) {\n\tconst (\n\t\trecordType RecordType = \"CNAME\"\n\t)\n\n\trecord = &CNAMERecord{\n\t\tAliasTo: to,\n\t\tRecord: Record{\n\t\t\tHostName: name,\n\t\t\tType: recordType,\n\t\t\tTTL: ttl,\n\t\t},\n\t}\n\n\terr = c.AddResource(record)\n\treturn\n}\n\nfunc (c *Client) AddMX(name, mailServer HostName, priority Priority, ttl Seconds) (record *MXRecord, err error) {\n\tconst (\n\t\trecordType RecordType = \"MX\"\n\t)\n\n\trecord = &MXRecord{\n\t\tMailServer: mailServer,\n\t\tPriority: priority,\n\t\tRecord: Record{\n\t\t\tHostName: name,\n\t\t\tType: recordType,\n\t\t\tTTL: ttl,\n\t\t},\n\t}\n\n\terr = c.AddResource(record)\n\treturn\n}\n\nfunc (c *Client) AddTXT(name HostName, value string, ttl Seconds) (record *TXTRecord, err error) {\n\tconst (\n\t\trecordType RecordType = \"TXT\"\n\t)\n\n\trecord = &TXTRecord{\n\t\tValue: value,\n\t\tRecord: Record{\n\t\t\tHostName: name,\n\t\t\tType: recordType,\n\t\t\tTTL: ttl,\n\t\t},\n\t}\n\n\terr = c.AddResource(record)\n\treturn\n}\n\nfunc (c *Client) AddSRV(name, target HostName, port, weight uint, priority Priority, ttl Seconds) (record *SRVRecord, err error) {\n\tconst (\n\t\trecordType RecordType = \"SRV\"\n\t)\n\n\trecord = &SRVRecord{\n\t\tTarget: target,\n\t\tPort: port,\n\t\tWeight: weight,\n\t\tPriority: priority,\n\t\tRecord: Record{\n\t\t\tHostName: name,\n\t\t\tType: recordType,\n\t\t\tTTL: ttl,\n\t\t},\n\t}\n\n\terr = c.AddResource(record)\n\treturn\n}\n\nfunc (c *Client) encodeJSON(object interface{}) (reader io.Reader, err error) {\n\tbuffer := bytes.NewBuffer(nil)\n\tencoder := json.NewEncoder(buffer)\n\terr = encoder.Encode(object)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treader = buffer\n\treturn\n}\n\nfunc (c *Client) AddResource(record IRecord) (err error) {\n\tbodyObject := struct {\n\t\tResource IRecord\n\t}{\n\t\tResource: record,\n\t}\n\tbody, err := c.encodeJSON(bodyObject)\n\tif err != nil {\n\t\treturn\n\t}\n\n\trequest, err := c.makeRequest(\"POST\", \"\", body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = c.request(request, record)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package cas\n\nimport (\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sync\"\n\n\t\"github.com\/golang\/glog\"\n)\n\n\/\/ Client configuration options\ntype Options struct {\n\tURL *url.URL \/\/ URL to the CAS service\n\tStore TicketStore \/\/ Custom TicketStore, if nil a MemoryStore will be used\n}\n\n\/\/ Client implements the main protocol\ntype Client struct {\n\turl *url.URL\n\ttickets TicketStore\n\tclient *http.Client\n\n\tmu sync.Mutex\n\tsessions map[string]string\n}\n\n\/\/ NewClient creates a Client with the provided Options.\nfunc NewClient(options *Options) *Client {\n\tif glog.V(2) {\n\t\tglog.Infof(\"cas: new client with options %v\", options)\n\t}\n\n\tvar tickets TicketStore\n\tif options.Store != nil {\n\t\ttickets = options.Store\n\t} else {\n\t\ttickets = &MemoryStore{}\n\t}\n\n\treturn &Client{\n\t\turl: options.URL,\n\t\ttickets: tickets,\n\t\tclient: &http.Client{},\n\t\tsessions: make(map[string]string),\n\t}\n}\n\n\/\/ Handle wraps a http.Handler to provide CAS authentication for the handler.\nfunc (c *Client) Handle(h http.Handler) http.Handler {\n\treturn &clientHandler{\n\t\tc: c,\n\t\th: h,\n\t}\n}\n\n\/\/ HandleFunc wraps a function to provide CAS authentication for the handler function.\nfunc (c *Client) HandleFunc(h func(http.ResponseWriter, *http.Request)) http.Handler {\n\treturn c.Handle(http.HandlerFunc(h))\n}\n\n\/\/ requestURL determines an absolute URL from the http.Request.\nfunc requestURL(r *http.Request) (*url.URL, error) {\n\tu, err := url.Parse(r.URL.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tu.Host = r.Host\n\tu.Scheme = \"http\"\n\n\tif scheme := r.Header.Get(\"X-Forwarded-Proto\"); scheme != \"\" {\n\t\tu.Scheme = scheme\n\t} else if r.TLS != nil {\n\t\tu.Scheme = \"https\"\n\t}\n\n\treturn u, nil\n}\n\n\/\/ LoginUrlForRequest determines the CAS login URL for the http.Request.\nfunc (c *Client) LoginUrlForRequest(r *http.Request) (string, error) {\n\tu, err := c.url.Parse(\"login\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tservice, err := requestURL(r)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tq := u.Query()\n\tq.Add(\"service\", sanitisedURLString(service))\n\tu.RawQuery = q.Encode()\n\n\treturn u.String(), nil\n}\n\n\/\/ LogoutUrlForRequest determines the CAS logout URL for the http.Request.\nfunc (c *Client) LogoutUrlForRequest(r *http.Request) (string, error) {\n\tu, err := c.url.Parse(\"logout\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn u.String(), nil\n}\n\n\/\/ ServiceValidateUrlForRequest determines the CAS serviceValidate URL for the ticket and http.Request.\nfunc (c *Client) ServiceValidateUrlForRequest(ticket string, r *http.Request) (string, error) {\n\tu, err := c.url.Parse(\"serviceValidate\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tservice, err := requestURL(r)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tq := u.Query()\n\tq.Add(\"service\", sanitisedURLString(service))\n\tq.Add(\"ticket\", ticket)\n\tu.RawQuery = q.Encode()\n\n\treturn u.String(), nil\n}\n\n\/\/ ValidateUrlForRequest determines the CAS validate URL for the ticket and http.Request.\nfunc (c *Client) ValidateUrlForRequest(ticket string, r *http.Request) (string, error) {\n\tu, err := c.url.Parse(\"validate\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tservice, err := requestURL(r)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tq := u.Query()\n\tq.Add(\"service\", sanitisedURLString(service))\n\tq.Add(\"ticket\", ticket)\n\tu.RawQuery = q.Encode()\n\n\treturn u.String(), nil\n}\n\n\/\/ RedirectToLogout replies to the request with a redirect URL to log out of CAS.\nfunc (c *Client) RedirectToLogout(w http.ResponseWriter, r *http.Request) {\n\tu, err := c.LogoutUrlForRequest(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif glog.V(2) {\n\t\tglog.Info(\"Logging out, redirecting client to %v with status %v\",\n\t\t\tu, http.StatusFound)\n\t}\n\n\tc.clearSession(w, r)\n\thttp.Redirect(w, r, u, http.StatusFound)\n}\n\n\/\/ RedirectToLogout replies to the request with a redirect URL to authenticate with CAS.\nfunc (c *Client) RedirectToLogin(w http.ResponseWriter, r *http.Request) {\n\tu, err := c.LoginUrlForRequest(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif glog.V(2) {\n\t\tglog.Infof(\"Redirecting client to %v with status %v\", u, http.StatusFound)\n\t}\n\n\thttp.Redirect(w, r, u, http.StatusFound)\n}\n\n\/\/ validateTicket performs CAS ticket validation with the given ticket and service.\n\/\/\n\/\/ If the request returns a 404 then validateTicketCas1 will be returned.\nfunc (c *Client) validateTicket(ticket string, service *http.Request) error {\n\tif glog.V(2) {\n\t\tserviceUrl, _ := requestURL(service)\n\t\tglog.Infof(\"Validating ticket %v for service %v\", ticket, serviceUrl)\n\t}\n\n\tu, err := c.ServiceValidateUrlForRequest(ticket, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr, err := http.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.Header.Add(\"User-Agent\", \"Golang CAS client gopkg.in\/cas\")\n\n\tif glog.V(2) {\n\t\tglog.Infof(\"Attempting ticket validation with %v\", r.URL)\n\t}\n\n\tresp, err := c.client.Do(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif glog.V(2) {\n\t\tglog.Infof(\"Request %v %v returned %v\",\n\t\t\tr.Method, r.URL,\n\t\t\tresp.Status)\n\t}\n\n\tif resp.StatusCode == http.StatusNotFound {\n\t\treturn c.validateTicketCas1(ticket, service)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"cas: validate ticket: %v\", string(body))\n\t}\n\n\tif glog.V(2) {\n\t\tglog.Infof(\"Received authentication response\\n%v\", string(body))\n\t}\n\n\tsuccess, err := ParseServiceResponse(body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif glog.V(2) {\n\t\tglog.Infof(\"Parsed ServiceResponse: %#v\", success)\n\t}\n\n\tif err := c.tickets.Write(ticket, success); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ validateTicketCas1 performs CAS protocol 1 ticket validation.\nfunc (c *Client) validateTicketCas1(ticket string, service *http.Request) error {\n\tu, err := c.ValidateUrlForRequest(ticket, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr, err := http.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.Header.Add(\"User-Agent\", \"Golang CAS client gopkg.in\/cas\")\n\n\tif glog.V(2) {\n\t\tglog.Info(\"Attempting ticket validation with %v\", r.URL)\n\t}\n\n\tresp, err := c.client.Do(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif glog.V(2) {\n\t\tglog.Info(\"Request %v %v returned %v\",\n\t\t\tr.Method, r.URL,\n\t\t\tresp.Status)\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbody := string(data)\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"cas: validate ticket: %v\", body)\n\t}\n\n\tif glog.V(2) {\n\t\tglog.Infof(\"Received authentication response\\n%v\", body)\n\t}\n\n\tif body == \"no\\n\\n\" {\n\t\treturn nil \/\/ not logged in\n\t}\n\n\tsuccess := &AuthenticationResponse{\n\t\tUser: body[4 : len(body)-1],\n\t}\n\n\tif glog.V(2) {\n\t\tglog.Infof(\"Parsed ServiceResponse: %#v\", success)\n\t}\n\n\tif err := c.tickets.Write(ticket, success); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ getSession finds or creates a session for the request.\n\/\/\n\/\/ A cookie is set on the response if one is not provided with the request.\n\/\/ Validates the ticket if the URL parameter is provided.\nfunc (c *Client) getSession(w http.ResponseWriter, r *http.Request) {\n\tcookie := getCookie(w, r)\n\n\tif s, ok := c.sessions[cookie.Value]; ok {\n\t\tif t, err := c.tickets.Read(s); err == nil {\n\t\t\tif glog.V(1) {\n\t\t\t\tglog.Infof(\"Re-used ticket %s for %s\", s, t.User)\n\t\t\t}\n\n\t\t\tsetAuthenticationResponse(r, t)\n\t\t\treturn\n\t\t} else {\n\t\t\tif glog.V(2) {\n\t\t\t\tglog.Infof(\"Ticket %v not in %T: %v\", s, c.tickets, err)\n\t\t\t}\n\n\t\t\tif glog.V(1) {\n\t\t\t\tglog.Infof(\"Clearing ticket %s, no longer exists in ticket store\", s)\n\t\t\t}\n\n\t\t\tclearCookie(w, cookie)\n\t\t}\n\t}\n\n\tif ticket := r.URL.Query().Get(\"ticket\"); ticket != \"\" {\n\t\tif err := c.validateTicket(ticket, r); err != nil {\n\t\t\treturn \/\/ allow ServeHTTP()\n\t\t}\n\n\t\tc.setSession(cookie.Value, ticket)\n\n\t\tif t, err := c.tickets.Read(ticket); err == nil {\n\t\t\tif glog.V(1) {\n\t\t\t\tglog.Infof(\"Validated ticket %s for %s\", ticket, t.User)\n\t\t\t}\n\n\t\t\tsetAuthenticationResponse(r, t)\n\t\t\treturn\n\t\t} else {\n\t\t\tif glog.V(2) {\n\t\t\t\tglog.Infof(\"Ticket %v not in %T: %v\", ticket, c.tickets, err)\n\t\t\t}\n\n\t\t\tif glog.V(1) {\n\t\t\t\tglog.Infof(\"Clearing ticket %s, no longer exists in ticket store\", ticket)\n\t\t\t}\n\n\t\t\tclearCookie(w, cookie)\n\t\t}\n\t}\n}\n\n\/\/ getCookie finds or creates the session cookie on the response.\nfunc getCookie(w http.ResponseWriter, r *http.Request) *http.Cookie {\n\tc, err := r.Cookie(sessionCookieName)\n\tif err != nil {\n\t\t\/\/ NOTE: Intentionally not enabling HttpOnly so the cookie can\n\t\t\/\/ still be used by Ajax requests.\n\t\tc = &http.Cookie{\n\t\t\tName: sessionCookieName,\n\t\t\tValue: newSessionId(),\n\t\t\tMaxAge: 86400,\n\t\t\tHttpOnly: false,\n\t\t}\n\n\t\tif glog.V(2) {\n\t\t\tglog.Infof(\"Setting %v cookie with value: %v\", c.Name, c.Value)\n\t\t}\n\n\t\tr.AddCookie(c) \/\/ so we can find it later if required\n\t\thttp.SetCookie(w, c)\n\t}\n\n\treturn c\n}\n\n\/\/ newSessionId generates a new opaque session identifier for use in the cookie.\nfunc newSessionId() string {\n\tconst alphabet = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\n\t\/\/ generate 64 character string\n\tbytes := make([]byte, 64)\n\trand.Read(bytes)\n\n\tfor k, v := range bytes {\n\t\tbytes[k] = alphabet[v%byte(len(alphabet))]\n\t}\n\n\treturn string(bytes)\n}\n\n\/\/ clearCookie invalidates and removes the cookie from the client.\nfunc clearCookie(w http.ResponseWriter, c *http.Cookie) {\n\tc.MaxAge = -1\n\thttp.SetCookie(w, c)\n}\n\n\/\/ setSession stores the session id to ticket mapping in the Client.\nfunc (c *Client) setSession(id string, ticket string) {\n\tif glog.V(2) {\n\t\tglog.Infof(\"Recording session, %v -> %v\", id, ticket)\n\t}\n\n\tc.mu.Lock()\n\tc.sessions[id] = ticket\n\tc.mu.Unlock()\n}\n\n\/\/ clearSession removes the session from the client and clears the cookie.\nfunc (c *Client) clearSession(w http.ResponseWriter, r *http.Request) {\n\tcookie := getCookie(w, r)\n\n\tif s, ok := c.sessions[cookie.Value]; ok {\n\t\tif err := c.tickets.Delete(s); err != nil {\n\t\t\tfmt.Printf(\"Failed to remove %v from %T: %v\\n\", cookie.Value, c.tickets, err)\n\t\t\tif glog.V(2) {\n\t\t\t\tglog.Errorf(\"Failed to remove %v from %T: %v\", cookie.Value, c.tickets, err)\n\t\t\t}\n\t\t}\n\n\t\tc.deleteSession(s)\n\t}\n\n\tclearCookie(w, cookie)\n}\n\n\/\/ deleteSession removes the session from the client\nfunc (c *Client) deleteSession(id string) {\n\tc.mu.Lock()\n\tdelete(c.sessions, id)\n\tc.mu.Unlock()\n}\n\n\/\/ findAndDeleteSessionWithTicket removes the session from the client via Single Log Out\n\/\/\n\/\/ When a Single Log Out request is received we receive the service ticket identidier. This\n\/\/ function loops through the sessions to find the matching session id. Once retrieved the\n\/\/ session is removed from the client. When the session is next requested the getSession\n\/\/ function will notice the session is invalid and revalidate the user.\nfunc (c *Client) findAndDeleteSessionWithTicket(ticket string) {\n\tvar id string\n\tfor s, t := range c.sessions {\n\t\tif t == ticket {\n\t\t\tid = s\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif id == \"\" {\n\t\treturn\n\t}\n\n\tc.deleteSession(id)\n}\n<commit_msg>Add glog Error message for validation error<commit_after>package cas\n\nimport (\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sync\"\n\n\t\"github.com\/golang\/glog\"\n)\n\n\/\/ Client configuration options\ntype Options struct {\n\tURL *url.URL \/\/ URL to the CAS service\n\tStore TicketStore \/\/ Custom TicketStore, if nil a MemoryStore will be used\n}\n\n\/\/ Client implements the main protocol\ntype Client struct {\n\turl *url.URL\n\ttickets TicketStore\n\tclient *http.Client\n\n\tmu sync.Mutex\n\tsessions map[string]string\n}\n\n\/\/ NewClient creates a Client with the provided Options.\nfunc NewClient(options *Options) *Client {\n\tif glog.V(2) {\n\t\tglog.Infof(\"cas: new client with options %v\", options)\n\t}\n\n\tvar tickets TicketStore\n\tif options.Store != nil {\n\t\ttickets = options.Store\n\t} else {\n\t\ttickets = &MemoryStore{}\n\t}\n\n\treturn &Client{\n\t\turl: options.URL,\n\t\ttickets: tickets,\n\t\tclient: &http.Client{},\n\t\tsessions: make(map[string]string),\n\t}\n}\n\n\/\/ Handle wraps a http.Handler to provide CAS authentication for the handler.\nfunc (c *Client) Handle(h http.Handler) http.Handler {\n\treturn &clientHandler{\n\t\tc: c,\n\t\th: h,\n\t}\n}\n\n\/\/ HandleFunc wraps a function to provide CAS authentication for the handler function.\nfunc (c *Client) HandleFunc(h func(http.ResponseWriter, *http.Request)) http.Handler {\n\treturn c.Handle(http.HandlerFunc(h))\n}\n\n\/\/ requestURL determines an absolute URL from the http.Request.\nfunc requestURL(r *http.Request) (*url.URL, error) {\n\tu, err := url.Parse(r.URL.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tu.Host = r.Host\n\tu.Scheme = \"http\"\n\n\tif scheme := r.Header.Get(\"X-Forwarded-Proto\"); scheme != \"\" {\n\t\tu.Scheme = scheme\n\t} else if r.TLS != nil {\n\t\tu.Scheme = \"https\"\n\t}\n\n\treturn u, nil\n}\n\n\/\/ LoginUrlForRequest determines the CAS login URL for the http.Request.\nfunc (c *Client) LoginUrlForRequest(r *http.Request) (string, error) {\n\tu, err := c.url.Parse(\"login\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tservice, err := requestURL(r)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tq := u.Query()\n\tq.Add(\"service\", sanitisedURLString(service))\n\tu.RawQuery = q.Encode()\n\n\treturn u.String(), nil\n}\n\n\/\/ LogoutUrlForRequest determines the CAS logout URL for the http.Request.\nfunc (c *Client) LogoutUrlForRequest(r *http.Request) (string, error) {\n\tu, err := c.url.Parse(\"logout\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn u.String(), nil\n}\n\n\/\/ ServiceValidateUrlForRequest determines the CAS serviceValidate URL for the ticket and http.Request.\nfunc (c *Client) ServiceValidateUrlForRequest(ticket string, r *http.Request) (string, error) {\n\tu, err := c.url.Parse(\"serviceValidate\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tservice, err := requestURL(r)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tq := u.Query()\n\tq.Add(\"service\", sanitisedURLString(service))\n\tq.Add(\"ticket\", ticket)\n\tu.RawQuery = q.Encode()\n\n\treturn u.String(), nil\n}\n\n\/\/ ValidateUrlForRequest determines the CAS validate URL for the ticket and http.Request.\nfunc (c *Client) ValidateUrlForRequest(ticket string, r *http.Request) (string, error) {\n\tu, err := c.url.Parse(\"validate\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tservice, err := requestURL(r)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tq := u.Query()\n\tq.Add(\"service\", sanitisedURLString(service))\n\tq.Add(\"ticket\", ticket)\n\tu.RawQuery = q.Encode()\n\n\treturn u.String(), nil\n}\n\n\/\/ RedirectToLogout replies to the request with a redirect URL to log out of CAS.\nfunc (c *Client) RedirectToLogout(w http.ResponseWriter, r *http.Request) {\n\tu, err := c.LogoutUrlForRequest(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif glog.V(2) {\n\t\tglog.Info(\"Logging out, redirecting client to %v with status %v\",\n\t\t\tu, http.StatusFound)\n\t}\n\n\tc.clearSession(w, r)\n\thttp.Redirect(w, r, u, http.StatusFound)\n}\n\n\/\/ RedirectToLogout replies to the request with a redirect URL to authenticate with CAS.\nfunc (c *Client) RedirectToLogin(w http.ResponseWriter, r *http.Request) {\n\tu, err := c.LoginUrlForRequest(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif glog.V(2) {\n\t\tglog.Infof(\"Redirecting client to %v with status %v\", u, http.StatusFound)\n\t}\n\n\thttp.Redirect(w, r, u, http.StatusFound)\n}\n\n\/\/ validateTicket performs CAS ticket validation with the given ticket and service.\n\/\/\n\/\/ If the request returns a 404 then validateTicketCas1 will be returned.\nfunc (c *Client) validateTicket(ticket string, service *http.Request) error {\n\tif glog.V(2) {\n\t\tserviceUrl, _ := requestURL(service)\n\t\tglog.Infof(\"Validating ticket %v for service %v\", ticket, serviceUrl)\n\t}\n\n\tu, err := c.ServiceValidateUrlForRequest(ticket, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr, err := http.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.Header.Add(\"User-Agent\", \"Golang CAS client gopkg.in\/cas\")\n\n\tif glog.V(2) {\n\t\tglog.Infof(\"Attempting ticket validation with %v\", r.URL)\n\t}\n\n\tresp, err := c.client.Do(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif glog.V(2) {\n\t\tglog.Infof(\"Request %v %v returned %v\",\n\t\t\tr.Method, r.URL,\n\t\t\tresp.Status)\n\t}\n\n\tif resp.StatusCode == http.StatusNotFound {\n\t\treturn c.validateTicketCas1(ticket, service)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"cas: validate ticket: %v\", string(body))\n\t}\n\n\tif glog.V(2) {\n\t\tglog.Infof(\"Received authentication response\\n%v\", string(body))\n\t}\n\n\tsuccess, err := ParseServiceResponse(body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif glog.V(2) {\n\t\tglog.Infof(\"Parsed ServiceResponse: %#v\", success)\n\t}\n\n\tif err := c.tickets.Write(ticket, success); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ validateTicketCas1 performs CAS protocol 1 ticket validation.\nfunc (c *Client) validateTicketCas1(ticket string, service *http.Request) error {\n\tu, err := c.ValidateUrlForRequest(ticket, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr, err := http.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.Header.Add(\"User-Agent\", \"Golang CAS client gopkg.in\/cas\")\n\n\tif glog.V(2) {\n\t\tglog.Info(\"Attempting ticket validation with %v\", r.URL)\n\t}\n\n\tresp, err := c.client.Do(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif glog.V(2) {\n\t\tglog.Info(\"Request %v %v returned %v\",\n\t\t\tr.Method, r.URL,\n\t\t\tresp.Status)\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbody := string(data)\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"cas: validate ticket: %v\", body)\n\t}\n\n\tif glog.V(2) {\n\t\tglog.Infof(\"Received authentication response\\n%v\", body)\n\t}\n\n\tif body == \"no\\n\\n\" {\n\t\treturn nil \/\/ not logged in\n\t}\n\n\tsuccess := &AuthenticationResponse{\n\t\tUser: body[4 : len(body)-1],\n\t}\n\n\tif glog.V(2) {\n\t\tglog.Infof(\"Parsed ServiceResponse: %#v\", success)\n\t}\n\n\tif err := c.tickets.Write(ticket, success); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ getSession finds or creates a session for the request.\n\/\/\n\/\/ A cookie is set on the response if one is not provided with the request.\n\/\/ Validates the ticket if the URL parameter is provided.\nfunc (c *Client) getSession(w http.ResponseWriter, r *http.Request) {\n\tcookie := getCookie(w, r)\n\n\tif s, ok := c.sessions[cookie.Value]; ok {\n\t\tif t, err := c.tickets.Read(s); err == nil {\n\t\t\tif glog.V(1) {\n\t\t\t\tglog.Infof(\"Re-used ticket %s for %s\", s, t.User)\n\t\t\t}\n\n\t\t\tsetAuthenticationResponse(r, t)\n\t\t\treturn\n\t\t} else {\n\t\t\tif glog.V(2) {\n\t\t\t\tglog.Infof(\"Ticket %v not in %T: %v\", s, c.tickets, err)\n\t\t\t}\n\n\t\t\tif glog.V(1) {\n\t\t\t\tglog.Infof(\"Clearing ticket %s, no longer exists in ticket store\", s)\n\t\t\t}\n\n\t\t\tclearCookie(w, cookie)\n\t\t}\n\t}\n\n\tif ticket := r.URL.Query().Get(\"ticket\"); ticket != \"\" {\n\t\tif err := c.validateTicket(ticket, r); err != nil {\n\t\t\tif glog.V(2) {\n\t\t\t\tglog.Infof(\"Error validating ticket: %v\", err)\n\t\t\t}\n\t\t\treturn \/\/ allow ServeHTTP()\n\t\t}\n\n\t\tc.setSession(cookie.Value, ticket)\n\n\t\tif t, err := c.tickets.Read(ticket); err == nil {\n\t\t\tif glog.V(1) {\n\t\t\t\tglog.Infof(\"Validated ticket %s for %s\", ticket, t.User)\n\t\t\t}\n\n\t\t\tsetAuthenticationResponse(r, t)\n\t\t\treturn\n\t\t} else {\n\t\t\tif glog.V(2) {\n\t\t\t\tglog.Infof(\"Ticket %v not in %T: %v\", ticket, c.tickets, err)\n\t\t\t}\n\n\t\t\tif glog.V(1) {\n\t\t\t\tglog.Infof(\"Clearing ticket %s, no longer exists in ticket store\", ticket)\n\t\t\t}\n\n\t\t\tclearCookie(w, cookie)\n\t\t}\n\t}\n}\n\n\/\/ getCookie finds or creates the session cookie on the response.\nfunc getCookie(w http.ResponseWriter, r *http.Request) *http.Cookie {\n\tc, err := r.Cookie(sessionCookieName)\n\tif err != nil {\n\t\t\/\/ NOTE: Intentionally not enabling HttpOnly so the cookie can\n\t\t\/\/ still be used by Ajax requests.\n\t\tc = &http.Cookie{\n\t\t\tName: sessionCookieName,\n\t\t\tValue: newSessionId(),\n\t\t\tMaxAge: 86400,\n\t\t\tHttpOnly: false,\n\t\t}\n\n\t\tif glog.V(2) {\n\t\t\tglog.Infof(\"Setting %v cookie with value: %v\", c.Name, c.Value)\n\t\t}\n\n\t\tr.AddCookie(c) \/\/ so we can find it later if required\n\t\thttp.SetCookie(w, c)\n\t}\n\n\treturn c\n}\n\n\/\/ newSessionId generates a new opaque session identifier for use in the cookie.\nfunc newSessionId() string {\n\tconst alphabet = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\n\t\/\/ generate 64 character string\n\tbytes := make([]byte, 64)\n\trand.Read(bytes)\n\n\tfor k, v := range bytes {\n\t\tbytes[k] = alphabet[v%byte(len(alphabet))]\n\t}\n\n\treturn string(bytes)\n}\n\n\/\/ clearCookie invalidates and removes the cookie from the client.\nfunc clearCookie(w http.ResponseWriter, c *http.Cookie) {\n\tc.MaxAge = -1\n\thttp.SetCookie(w, c)\n}\n\n\/\/ setSession stores the session id to ticket mapping in the Client.\nfunc (c *Client) setSession(id string, ticket string) {\n\tif glog.V(2) {\n\t\tglog.Infof(\"Recording session, %v -> %v\", id, ticket)\n\t}\n\n\tc.mu.Lock()\n\tc.sessions[id] = ticket\n\tc.mu.Unlock()\n}\n\n\/\/ clearSession removes the session from the client and clears the cookie.\nfunc (c *Client) clearSession(w http.ResponseWriter, r *http.Request) {\n\tcookie := getCookie(w, r)\n\n\tif s, ok := c.sessions[cookie.Value]; ok {\n\t\tif err := c.tickets.Delete(s); err != nil {\n\t\t\tfmt.Printf(\"Failed to remove %v from %T: %v\\n\", cookie.Value, c.tickets, err)\n\t\t\tif glog.V(2) {\n\t\t\t\tglog.Errorf(\"Failed to remove %v from %T: %v\", cookie.Value, c.tickets, err)\n\t\t\t}\n\t\t}\n\n\t\tc.deleteSession(s)\n\t}\n\n\tclearCookie(w, cookie)\n}\n\n\/\/ deleteSession removes the session from the client\nfunc (c *Client) deleteSession(id string) {\n\tc.mu.Lock()\n\tdelete(c.sessions, id)\n\tc.mu.Unlock()\n}\n\n\/\/ findAndDeleteSessionWithTicket removes the session from the client via Single Log Out\n\/\/\n\/\/ When a Single Log Out request is received we receive the service ticket identidier. This\n\/\/ function loops through the sessions to find the matching session id. Once retrieved the\n\/\/ session is removed from the client. When the session is next requested the getSession\n\/\/ function will notice the session is invalid and revalidate the user.\nfunc (c *Client) findAndDeleteSessionWithTicket(ticket string) {\n\tvar id string\n\tfor s, t := range c.sessions {\n\t\tif t == ticket {\n\t\t\tid = s\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif id == \"\" {\n\t\treturn\n\t}\n\n\tc.deleteSession(id)\n}\n<|endoftext|>"} {"text":"<commit_before>package sensorsanalytics\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n)\n\n\/\/ Client sensoranalytics client\ntype Client struct {\n\tconsumer Consumer\n\tprojectName *string\n\tenableTimeFree bool\n\tappVersion *string\n\tsuperProperties map[string]interface{}\n\tnamePattern *regexp.Regexp\n}\n\n\/\/ NewClient create new client\nfunc NewClient(consumer Consumer, projectName string, timeFree bool) (*Client, error) {\n\tvar c Client\n\tc.consumer = consumer\n\tif projectName == \"\" {\n\t\treturn &c, errors.New(\"project_name must not be empty\")\n\t}\n\tc.projectName = &projectName\n\tc.enableTimeFree = timeFree\n\tnamePattern, err := regexp.Compile(\"^([a-zA-Z_$][a-zA-Z0-9_$]{0,99})\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.namePattern = namePattern\n\tc.ClearSuperProperties()\n\treturn &c, nil\n}\n\nfunc (c *Client) match(input string) bool {\n\tif c.namePattern.Match([]byte(input)) {\n\t\tfor _, keyword := range FieldKeywords {\n\t\t\tif keyword == input {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (c *Client) now() int64 {\n\treturn time.Now().Unix() * 1000\n}\n\n\/\/ RegisterSuperProperties 设置每个事件都带有的一些公共属性,当 track 的 properties 和 super properties 有相同的 key 时,将采用 track 的\n\/\/ :param superProperties 公共属性\nfunc (c *Client) RegisterSuperProperties(superProperties map[string]interface{}) {\n\tfor k, v := range superProperties {\n\t\tc.superProperties[k] = v\n\t}\n}\n\n\/\/ ClearSuperProperties 删除所有已设置的事件公共属性\nfunc (c *Client) ClearSuperProperties() {\n\tc.superProperties = map[string]interface{}{\n\t\t\"$lib\": \"golang\",\n\t\t\"$lib_version\": SDKVersion,\n\t}\n}\n\n\/\/ Track 跟踪一个用户的行为。\n\/\/ :param distinctID: 用户的唯一标识\n\/\/ :param eventName: 事件名称\n\/\/ :param properties: 事件的属性\nfunc (c *Client) Track(distinctID string, eventName string, properties map[string]interface{}, isLoginID bool) error {\n\tallProperties := c.superProperties\n\tif properties != nil {\n\t\tfor k, v := range properties {\n\t\t\tallProperties[k] = v\n\t\t}\n\t}\n\treturn c.trackEvent(\"track\", eventName, distinctID, \"\", allProperties, isLoginID)\n}\n\n\/\/ TrackSignup 这个接口是一个较为复杂的功能,请在使用前先阅读相关说明:http:\/\/www.sensorsdata.cn\/manual\/track_signup.html,\n\/\/ 并在必要时联系我们的技术支持人员。\n\/\/ :param distinct_id: 用户注册之后的唯一标识\n\/\/ :param original_id: 用户注册前的唯一标识\n\/\/ :param properties: 事件的属性\nfunc (c *Client) TrackSignup(distinctID string, originalID string, properties map[string]interface{}) error {\n\tif len(originalID) == 0 {\n\t\treturn fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"property [original_id] must not be empty\")\n\t}\n\tif len(originalID) > 255 {\n\t\treturn fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"the max length of property [original_id] is 255\")\n\t}\n\tallProperties := c.superProperties\n\tif properties != nil {\n\t\tfor key, value := range properties {\n\t\t\tallProperties[key] = value\n\t\t}\n\t}\n\treturn c.trackEvent(\"track_signup\", \"$SignUp\", distinctID, originalID, allProperties, false)\n}\n\nfunc (c *Client) normalizeData(data map[string]interface{}) (map[string]interface{}, error) {\n\t\/\/ 检查 distinct_id\n\tdistinctIDI, ok := data[\"distinct_id\"]\n\tif !ok {\n\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"property [distinct_id] must not be empty\")\n\t}\n\tdistinctID, ok := distinctIDI.(string)\n\tif !ok || len(distinctID) == 0 {\n\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"property [distinct_id] must not be empty\")\n\t}\n\tif len(distinctID) > 255 {\n\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"the max length of [distinct_id] is 255\")\n\t}\n\t\/\/ 检查 time\n\ttsI, ok := data[\"time\"]\n\tif !ok {\n\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"property [time] must not be empty\")\n\t}\n\tts, ok := tsI.(int64)\n\tif !ok {\n\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"property [time] must be int64\")\n\t}\n\ttsNum := len(strconv.FormatInt(ts, 10))\n\tif tsNum < 10 || tsNum > 13 {\n\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"property [time] must be a timestamp in microseconds\")\n\t}\n\tif tsNum == 10 {\n\t\tts *= 1000\n\t}\n\tdata[\"time\"] = ts\n\n\t\/\/ 检查 event name\n\teventI, ok := data[\"event\"]\n\tif ok {\n\t\tevent, ok := eventI.(string)\n\t\tif !ok {\n\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"property [event] must no be empty\")\n\t\t}\n\t\tif !c.match(event) {\n\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, fmt.Sprintf(\"event name must be a valid variable name. [event=%s]\", event))\n\t\t}\n\t}\n\t\/\/ 检查 project name\n\tprojectI, ok := data[\"project\"]\n\tif ok {\n\t\tproject, ok := projectI.(string)\n\t\tif !ok {\n\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"property [project] must no be empty\")\n\t\t}\n\t\tif !c.match(project) {\n\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, fmt.Sprintf(\"project name must be a valid variable name. [project=%s]\", project))\n\t\t}\n\t}\n\t\/\/ 检查 properties\n\tvar eventType string\n\teventTypeI, ok := data[\"type\"]\n\tif ok {\n\t\teventType = eventTypeI.(string)\n\n\t}\n\tpropertiesi, ok := data[\"properties\"]\n\tif ok {\n\t\tproperties, ok := propertiesi.(map[string]interface{})\n\t\tif ok {\n\t\t\tfor key, value := range properties {\n\t\t\t\tif len(key) > 255 {\n\t\t\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, fmt.Sprintf(\"the max length of property key is 256. [key=%s]\", key))\n\t\t\t\t}\n\t\t\t\tif !c.match(key) {\n\t\t\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, fmt.Sprintf(\"the property key must be a valid variable name. [key=%s]\", key))\n\t\t\t\t}\n\t\t\t\tswitch value.(type) {\n\t\t\t\tcase string:\n\t\t\t\t\tv, ok := value.(string)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tif len(v) > 8192 {\n\t\t\t\t\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, fmt.Sprintf(\"the max length of property value is 8192. [value=%s]\", value))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase bool:\n\t\t\t\t\tif eventType != \"profile_unset\" {\n\t\t\t\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, fmt.Sprintf(\"property value must be a str\/int\/float\/list. [value=%s]\", reflect.TypeOf(value)))\n\t\t\t\t\t}\n\t\t\t\tcase int, int32, int64, float32, float64, []string:\n\t\t\t\t\tcontinue\n\t\t\t\tdefault:\n\t\t\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, fmt.Sprintf(\"property value must be a str\/int\/float\/list. [value=%s]\", reflect.TypeOf(value)))\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"properties must be a map[string]interface{}\")\n\t\t}\n\t}\n\treturn data, nil\n}\n\nfunc (c *Client) getLibProperties() map[string]interface{} {\n\tlibProperties := map[string]interface{}{\n\t\t\"$lib\": \"golang\",\n\t\t\"$lib_version\": SDKVersion,\n\t\t\"$lib_method\": \"code\",\n\t}\n\tif appVersion, ok := c.superProperties[\"$app_version\"]; ok {\n\t\tlibProperties[\"$app_version\"] = appVersion\n\t}\n\treturn libProperties\n}\n\n\/\/ getCommonProperties 构造所有 Event 通用的属性\nfunc (c *Client) getCommonProperties() map[string]interface{} {\n\tcommonProperties := map[string]interface{}{\n\t\t\"$lib\": \"golang\",\n\t\t\"$lib_version\": SDKVersion,\n\t}\n\tif c.appVersion != nil {\n\t\tcommonProperties[\"$app_version\"] = c.appVersion\n\t}\n\treturn commonProperties\n}\n\n\/\/ extractUserTime 如果用户传入了 $time 字段,则不使用当前时间。\nfunc (c *Client) extractUserTime(properties map[string]interface{}) *int64 {\n\tif properties != nil {\n\t\tti, ok := properties[\"$time\"]\n\t\tif ok {\n\t\t\tt, ok := ti.(int64)\n\t\t\tif ok {\n\t\t\t\tdelete(properties, \"$time\")\n\t\t\t\treturn &t\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ ProfileSet 直接设置一个用户的 Profile,如果已存在则覆盖\n\/\/ :param distinct_id: 用户的唯一标识\n\/\/ :param profiles: 用户属性\nfunc (c *Client) ProfileSet(distinctID string, profiles map[string]interface{}, isLoginID bool) error {\n\treturn c.trackEvent(\"profile_set\", \"\", distinctID, \"\", profiles, isLoginID)\n}\n\n\/\/ ProfileSetOnce 直接设置一个用户的 Profile,如果某个 Profile 已存在则不设置。\n\/\/ :param distinct_id: 用户的唯一标识\n\/\/ :param profiles: 用户属性\nfunc (c *Client) ProfileSetOnce(distinctID string, profiles map[string]interface{}, isLoginID bool) error {\n\treturn c.trackEvent(\"profile_set_once\", \"\", distinctID, \"\", profiles, isLoginID)\n}\n\n\/\/ ProfileIncrement 增减\/减少一个用户的某一个或者多个数值类型的 Profile。\n\/\/ :param distinct_id: 用户的唯一标识\n\/\/ :param profiles: 用户属性\nfunc (c *Client) ProfileIncrement(distinctID string, profiles map[string]interface{}, isLoginID bool) error {\n\treturn c.trackEvent(\"profile_increment\", \"\", distinctID, \"\", profiles, isLoginID)\n}\n\n\/\/ ProfileAppend 追加一个用户的某一个或者多个集合类型的 Profile。\n\/\/ :param distinct_id: 用户的唯一标识\n\/\/ :param profiles: 用户属性\nfunc (c *Client) ProfileAppend(distinctID string, profiles map[string]interface{}, isLoginID bool) error {\n\treturn c.trackEvent(\"profile_append\", \"\", distinctID, \"\", profiles, isLoginID)\n}\n\n\/\/ ProfileUnset 删除一个用户的一个或者多个 Profile。\n\/\/ :param distinct_id: 用户的唯一标识\n\/\/ :param profile_keys: 用户属性键值列表\nfunc (c *Client) ProfileUnset(distinctID string, profileKeys []string, isLoginID bool) error {\n\tprofileMap := make(map[string]interface{}, len(profileKeys))\n\tfor _, v := range profileKeys {\n\t\tprofileMap[v] = true\n\t}\n\treturn c.trackEvent(\"profile_unset\", \"\", distinctID, \"\", profileMap, isLoginID)\n}\n\n\/\/ ProfileDelete 删除整个用户的信息。\n\/\/ :param distinct_id: 用户的唯一标识\nfunc (c *Client) ProfileDelete(distinctID string, isLoginID bool) error {\n\treturn c.trackEvent(\"profile_delete\", \"\", distinctID, \"\", map[string]interface{}{}, isLoginID)\n}\n\nfunc (c *Client) trackEvent(eventType string, eventName string, distinctID string, originalID string, properties map[string]interface{}, isLoginID bool) error {\n\tvar eventTime int64\n\tt := c.extractUserTime(properties)\n\tif t != nil {\n\t\teventTime = *t\n\t} else {\n\t\teventTime = c.now()\n\t}\n\tif isLoginID {\n\t\tproperties[\"$is_login_id\"] = true\n\t}\n\tdata := map[string]interface{}{\n\t\t\"type\": eventType,\n\t\t\"time\": eventTime,\n\t\t\"distinct_id\": distinctID,\n\t\t\"properties\": properties,\n\t\t\"lib\": c.getLibProperties(),\n\t}\n\tif c.projectName != nil {\n\t\tdata[\"project\"] = *c.projectName\n\t}\n\tif eventType == \"track\" || eventType == \"track_signup\" {\n\t\tdata[\"event\"] = eventName\n\t}\n\tif eventType == \"track_signup\" {\n\t\tdata[\"original_id\"] = originalID\n\t}\n\tif c.enableTimeFree {\n\t\tdata[\"time_free\"] = true\n\t}\n\tdata, err := c.normalizeData(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.consumer.Send(data)\n}\n\n\/\/ Flush 对于不立即发送数据的 Consumer,调用此接口应当立即进行已有数据的发送。\nfunc (c *Client) Flush() error {\n\treturn c.consumer.Flush()\n}\n\n\/\/ Close 在进程结束或者数据发送完成时,应当调用此接口,以保证所有数据被发送完毕。如果发生意外,此方法将抛出异常。\nfunc (c *Client) Close() error {\n\treturn c.consumer.Close()\n}\n<commit_msg>Fix time_free and is_login_id<commit_after>package sensorsanalytics\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n)\n\n\/\/ Client sensoranalytics client\ntype Client struct {\n\tconsumer Consumer\n\tprojectName *string\n\tenableTimeFree bool\n\tappVersion *string\n\tsuperProperties map[string]interface{}\n\tnamePattern *regexp.Regexp\n}\n\n\/\/ NewClient create new client\nfunc NewClient(consumer Consumer, projectName string, timeFree bool) (*Client, error) {\n\tvar c Client\n\tc.consumer = consumer\n\tif projectName == \"\" {\n\t\treturn &c, errors.New(\"project_name must not be empty\")\n\t}\n\tc.projectName = &projectName\n\tc.enableTimeFree = timeFree\n\tnamePattern, err := regexp.Compile(\"^([a-zA-Z_$][a-zA-Z0-9_$]{0,99})\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.namePattern = namePattern\n\tc.ClearSuperProperties()\n\treturn &c, nil\n}\n\nfunc (c *Client) match(input string) bool {\n\tif c.namePattern.Match([]byte(input)) {\n\t\tfor _, keyword := range FieldKeywords {\n\t\t\tif keyword == input {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (c *Client) now() int64 {\n\treturn time.Now().Unix() * 1000\n}\n\n\/\/ RegisterSuperProperties 设置每个事件都带有的一些公共属性,当 track 的 properties 和 super properties 有相同的 key 时,将采用 track 的\n\/\/ :param superProperties 公共属性\nfunc (c *Client) RegisterSuperProperties(superProperties map[string]interface{}) {\n\tfor k, v := range superProperties {\n\t\tc.superProperties[k] = v\n\t}\n}\n\n\/\/ ClearSuperProperties 删除所有已设置的事件公共属性\nfunc (c *Client) ClearSuperProperties() {\n\tc.superProperties = map[string]interface{}{\n\t\t\"$lib\": \"golang\",\n\t\t\"$lib_version\": SDKVersion,\n\t}\n}\n\n\/\/ Track 跟踪一个用户的行为。\n\/\/ :param distinctID: 用户的唯一标识\n\/\/ :param eventName: 事件名称\n\/\/ :param properties: 事件的属性\nfunc (c *Client) Track(distinctID string, eventName string, properties map[string]interface{}, isLoginID bool) error {\n\tallProperties := c.superProperties\n\tif properties != nil {\n\t\tfor k, v := range properties {\n\t\t\tallProperties[k] = v\n\t\t}\n\t}\n\treturn c.trackEvent(\"track\", eventName, distinctID, \"\", allProperties, isLoginID)\n}\n\n\/\/ TrackSignup 这个接口是一个较为复杂的功能,请在使用前先阅读相关说明:http:\/\/www.sensorsdata.cn\/manual\/track_signup.html,\n\/\/ 并在必要时联系我们的技术支持人员。\n\/\/ :param distinct_id: 用户注册之后的唯一标识\n\/\/ :param original_id: 用户注册前的唯一标识\n\/\/ :param properties: 事件的属性\nfunc (c *Client) TrackSignup(distinctID string, originalID string, properties map[string]interface{}) error {\n\tif len(originalID) == 0 {\n\t\treturn fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"property [original_id] must not be empty\")\n\t}\n\tif len(originalID) > 255 {\n\t\treturn fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"the max length of property [original_id] is 255\")\n\t}\n\tallProperties := c.superProperties\n\tif properties != nil {\n\t\tfor key, value := range properties {\n\t\t\tallProperties[key] = value\n\t\t}\n\t}\n\treturn c.trackEvent(\"track_signup\", \"$SignUp\", distinctID, originalID, allProperties, false)\n}\n\nfunc (c *Client) normalizeData(data map[string]interface{}) (map[string]interface{}, error) {\n\t\/\/ 检查 distinct_id\n\tdistinctIDI, ok := data[\"distinct_id\"]\n\tif !ok {\n\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"property [distinct_id] must not be empty\")\n\t}\n\tdistinctID, ok := distinctIDI.(string)\n\tif !ok || len(distinctID) == 0 {\n\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"property [distinct_id] must not be empty\")\n\t}\n\tif len(distinctID) > 255 {\n\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"the max length of [distinct_id] is 255\")\n\t}\n\t\/\/ 检查 time\n\ttsI, ok := data[\"time\"]\n\tif !ok {\n\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"property [time] must not be empty\")\n\t}\n\tts, ok := tsI.(int64)\n\tif !ok {\n\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"property [time] must be int64\")\n\t}\n\ttsNum := len(strconv.FormatInt(ts, 10))\n\tif tsNum < 10 || tsNum > 13 {\n\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"property [time] must be a timestamp in microseconds\")\n\t}\n\tif tsNum == 10 {\n\t\tts *= 1000\n\t}\n\tdata[\"time\"] = ts\n\n\t\/\/ 检查 event name\n\teventI, ok := data[\"event\"]\n\tif ok {\n\t\tevent, ok := eventI.(string)\n\t\tif !ok {\n\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"property [event] must no be empty\")\n\t\t}\n\t\tif !c.match(event) {\n\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, fmt.Sprintf(\"event name must be a valid variable name. [event=%s]\", event))\n\t\t}\n\t}\n\t\/\/ 检查 project name\n\tprojectI, ok := data[\"project\"]\n\tif ok {\n\t\tproject, ok := projectI.(string)\n\t\tif !ok {\n\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"property [project] must no be empty\")\n\t\t}\n\t\tif !c.match(project) {\n\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, fmt.Sprintf(\"project name must be a valid variable name. [project=%s]\", project))\n\t\t}\n\t}\n\t\/\/ 检查 properties\n\tvar eventType string\n\teventTypeI, ok := data[\"type\"]\n\tif ok {\n\t\teventType = eventTypeI.(string)\n\n\t}\n\tpropertiesi, ok := data[\"properties\"]\n\tif ok {\n\t\tproperties, ok := propertiesi.(map[string]interface{})\n\t\tif ok {\n\t\t\tfor key, value := range properties {\n\t\t\t\tif len(key) > 255 {\n\t\t\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, fmt.Sprintf(\"the max length of property key is 256. [key=%s]\", key))\n\t\t\t\t}\n\t\t\t\tif !c.match(key) {\n\t\t\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, fmt.Sprintf(\"the property key must be a valid variable name. [key=%s]\", key))\n\t\t\t\t}\n\t\t\t\tswitch value.(type) {\n\t\t\t\tcase string:\n\t\t\t\t\tv, ok := value.(string)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tif len(v) > 8192 {\n\t\t\t\t\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, fmt.Sprintf(\"the max length of property value is 8192. [value=%s]\", value))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase bool:\n\t\t\t\t\tif eventType != \"profile_unset\" && key != \"time_free\" && key != \"$is_login_id\" {\n\t\t\t\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, fmt.Sprintf(\"property value must be a str\/int\/float\/list. [key=%s, value=%s]\", key, reflect.TypeOf(value)))\n\t\t\t\t\t}\n\t\t\t\tcase int, int32, int64, float32, float64, []string:\n\t\t\t\t\tcontinue\n\t\t\t\tdefault:\n\t\t\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, fmt.Sprintf(\"default: property value must be a str\/int\/float\/list. [key=%s, value=%s]\", key, reflect.TypeOf(value)))\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\treturn data, fmt.Errorf(\"%s: %s\", ErrIllegalDataException, \"properties must be a map[string]interface{}\")\n\t\t}\n\t}\n\treturn data, nil\n}\n\nfunc (c *Client) getLibProperties() map[string]interface{} {\n\tlibProperties := map[string]interface{}{\n\t\t\"$lib\": \"golang\",\n\t\t\"$lib_version\": SDKVersion,\n\t\t\"$lib_method\": \"code\",\n\t}\n\tif appVersion, ok := c.superProperties[\"$app_version\"]; ok {\n\t\tlibProperties[\"$app_version\"] = appVersion\n\t}\n\treturn libProperties\n}\n\n\/\/ getCommonProperties 构造所有 Event 通用的属性\nfunc (c *Client) getCommonProperties() map[string]interface{} {\n\tcommonProperties := map[string]interface{}{\n\t\t\"$lib\": \"golang\",\n\t\t\"$lib_version\": SDKVersion,\n\t}\n\tif c.appVersion != nil {\n\t\tcommonProperties[\"$app_version\"] = c.appVersion\n\t}\n\treturn commonProperties\n}\n\n\/\/ extractUserTime 如果用户传入了 $time 字段,则不使用当前时间。\nfunc (c *Client) extractUserTime(properties map[string]interface{}) *int64 {\n\tif properties != nil {\n\t\tti, ok := properties[\"$time\"]\n\t\tif ok {\n\t\t\tt, ok := ti.(int64)\n\t\t\tif ok {\n\t\t\t\tdelete(properties, \"$time\")\n\t\t\t\treturn &t\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ ProfileSet 直接设置一个用户的 Profile,如果已存在则覆盖\n\/\/ :param distinct_id: 用户的唯一标识\n\/\/ :param profiles: 用户属性\nfunc (c *Client) ProfileSet(distinctID string, profiles map[string]interface{}, isLoginID bool) error {\n\treturn c.trackEvent(\"profile_set\", \"\", distinctID, \"\", profiles, isLoginID)\n}\n\n\/\/ ProfileSetOnce 直接设置一个用户的 Profile,如果某个 Profile 已存在则不设置。\n\/\/ :param distinct_id: 用户的唯一标识\n\/\/ :param profiles: 用户属性\nfunc (c *Client) ProfileSetOnce(distinctID string, profiles map[string]interface{}, isLoginID bool) error {\n\treturn c.trackEvent(\"profile_set_once\", \"\", distinctID, \"\", profiles, isLoginID)\n}\n\n\/\/ ProfileIncrement 增减\/减少一个用户的某一个或者多个数值类型的 Profile。\n\/\/ :param distinct_id: 用户的唯一标识\n\/\/ :param profiles: 用户属性\nfunc (c *Client) ProfileIncrement(distinctID string, profiles map[string]interface{}, isLoginID bool) error {\n\treturn c.trackEvent(\"profile_increment\", \"\", distinctID, \"\", profiles, isLoginID)\n}\n\n\/\/ ProfileAppend 追加一个用户的某一个或者多个集合类型的 Profile。\n\/\/ :param distinct_id: 用户的唯一标识\n\/\/ :param profiles: 用户属性\nfunc (c *Client) ProfileAppend(distinctID string, profiles map[string]interface{}, isLoginID bool) error {\n\treturn c.trackEvent(\"profile_append\", \"\", distinctID, \"\", profiles, isLoginID)\n}\n\n\/\/ ProfileUnset 删除一个用户的一个或者多个 Profile。\n\/\/ :param distinct_id: 用户的唯一标识\n\/\/ :param profile_keys: 用户属性键值列表\nfunc (c *Client) ProfileUnset(distinctID string, profileKeys []string, isLoginID bool) error {\n\tprofileMap := make(map[string]interface{}, len(profileKeys))\n\tfor _, v := range profileKeys {\n\t\tprofileMap[v] = true\n\t}\n\treturn c.trackEvent(\"profile_unset\", \"\", distinctID, \"\", profileMap, isLoginID)\n}\n\n\/\/ ProfileDelete 删除整个用户的信息。\n\/\/ :param distinct_id: 用户的唯一标识\nfunc (c *Client) ProfileDelete(distinctID string, isLoginID bool) error {\n\treturn c.trackEvent(\"profile_delete\", \"\", distinctID, \"\", map[string]interface{}{}, isLoginID)\n}\n\nfunc (c *Client) trackEvent(eventType string, eventName string, distinctID string, originalID string, properties map[string]interface{}, isLoginID bool) error {\n\tvar eventTime int64\n\tt := c.extractUserTime(properties)\n\tif t != nil {\n\t\teventTime = *t\n\t} else {\n\t\teventTime = c.now()\n\t}\n\tif isLoginID {\n\t\tproperties[\"$is_login_id\"] = true\n\t}\n\tdata := map[string]interface{}{\n\t\t\"type\": eventType,\n\t\t\"time\": eventTime,\n\t\t\"distinct_id\": distinctID,\n\t\t\"properties\": properties,\n\t\t\"lib\": c.getLibProperties(),\n\t}\n\tif c.projectName != nil {\n\t\tdata[\"project\"] = *c.projectName\n\t}\n\tif eventType == \"track\" || eventType == \"track_signup\" {\n\t\tdata[\"event\"] = eventName\n\t}\n\tif eventType == \"track_signup\" {\n\t\tdata[\"original_id\"] = originalID\n\t}\n\tif c.enableTimeFree {\n\t\tdata[\"time_free\"] = true\n\t}\n\tdata, err := c.normalizeData(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.consumer.Send(data)\n}\n\n\/\/ Flush 对于不立即发送数据的 Consumer,调用此接口应当立即进行已有数据的发送。\nfunc (c *Client) Flush() error {\n\treturn c.consumer.Flush()\n}\n\n\/\/ Close 在进程结束或者数据发送完成时,应当调用此接口,以保证所有数据被发送完毕。如果发生意外,此方法将抛出异常。\nfunc (c *Client) Close() error {\n\treturn c.consumer.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package market\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"golang.org\/x\/oauth2\/clientcredentials\"\n)\n\nconst (\n\tSandbox = \"https:\/\/api.sandbox.paypal.com\"\n\tLive = \"https:\/\/api.paypal.com\"\n)\n\nconst (\n\ttokenRoute = \"\/v1\/oauth2\/token\"\n)\n\ntype Client struct {\n\tclient *http.Client\n\tapiBase string\n\tBNCode string\n}\n\ntype BadResponse struct {\n\tStatus int\n\tBody string\n}\n\nfunc (b *BadResponse) Error() string {\n\treturn \"Bad response from Paypal, status code: \" + strconv.Itoa(b.Status) + \", body is:\\n\" + b.Body\n}\n\n\/\/ NewClient creates a new Paypal marketplace client.\n\/\/ clientID and clientSecret are provided by Paypal.\n\/\/ apiBase should be either market.Sandbox or market.Live\nfunc NewClient(ctx context.Context, clientID, clientSecret, apiBase string) *Client {\n\tconf := &clientcredentials.Config{\n\t\tClientID: clientID,\n\t\tClientSecret: clientSecret,\n\t\tTokenURL: apiBase + tokenRoute,\n\t}\n\treturn &Client{\n\t\tclient: conf.Client(ctx),\n\t\tapiBase: apiBase,\n\t}\n}\n\ntype request struct {\n\tclient *Client\n\tmethod string\n\tendpoint string\n\tbody io.Reader\n\theaders http.Header\n}\n\ntype response struct {\n\tstatus int\n\theaders http.Header\n\tbody io.ReadSeeker\n}\n\nfunc (r *request) do(ctx context.Context) (*response, error) {\n\treq, err := http.NewRequest(r.method, r.client.apiBase+r.endpoint, r.body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\tfor k, vs := range r.headers {\n\t\tfor _, v := range vs {\n\t\t\treq.Header.Add(k, v)\n\t\t}\n\t}\n\tif r.client.BNCode != \"\" {\n\t\treq.Header.Set(\"PayPal-Partner-Attribution-Id\", r.client.BNCode)\n\t}\n\tres, err := r.client.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\tresData, err := ioutil.ReadAll(io.LimitReader(res.Body, 5*1024*1024)) \/\/ Read at most 5 MB\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &response{\n\t\tstatus: res.StatusCode,\n\t\theaders: res.Header,\n\t\tbody: bytes.NewReader(resData),\n\t}, nil\n}\n<commit_msg>Increase default timeout on client<commit_after>package market\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"golang.org\/x\/oauth2\/clientcredentials\"\n)\n\nconst (\n\tSandbox = \"https:\/\/api.sandbox.paypal.com\"\n\tLive = \"https:\/\/api.paypal.com\"\n)\n\nconst (\n\ttokenRoute = \"\/v1\/oauth2\/token\"\n)\n\ntype Client struct {\n\tclient *http.Client\n\tapiBase string\n\tBNCode string\n}\n\ntype BadResponse struct {\n\tStatus int\n\tBody string\n}\n\nfunc (b *BadResponse) Error() string {\n\treturn \"Bad response from Paypal, status code: \" + strconv.Itoa(b.Status) + \", body is:\\n\" + b.Body\n}\n\n\/\/ NewClient creates a new Paypal marketplace client.\n\/\/ clientID and clientSecret are provided by Paypal.\n\/\/ apiBase should be either market.Sandbox or market.Live\nfunc NewClient(ctx context.Context, clientID, clientSecret, apiBase string) *Client {\n\tconf := &clientcredentials.Config{\n\t\tClientID: clientID,\n\t\tClientSecret: clientSecret,\n\t\tTokenURL: apiBase + tokenRoute,\n\t}\n\tc := &Client{\n\t\tclient: conf.Client(ctx),\n\t\tapiBase: apiBase,\n\t}\n\tconst timeout = 10 * time.Second\n\tif c.client.Timeout == 0 {\n\t\tc.client.Timeout = timeout\n\t}\n\treturn c\n}\n\ntype request struct {\n\tclient *Client\n\tmethod string\n\tendpoint string\n\tbody io.Reader\n\theaders http.Header\n}\n\ntype response struct {\n\tstatus int\n\theaders http.Header\n\tbody io.ReadSeeker\n}\n\nfunc (r *request) do(ctx context.Context) (*response, error) {\n\treq, err := http.NewRequest(r.method, r.client.apiBase+r.endpoint, r.body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\tfor k, vs := range r.headers {\n\t\tfor _, v := range vs {\n\t\t\treq.Header.Add(k, v)\n\t\t}\n\t}\n\tif r.client.BNCode != \"\" {\n\t\treq.Header.Set(\"PayPal-Partner-Attribution-Id\", r.client.BNCode)\n\t}\n\tres, err := r.client.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\tresData, err := ioutil.ReadAll(io.LimitReader(res.Body, 5*1024*1024)) \/\/ Read at most 5 MB\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &response{\n\t\tstatus: res.StatusCode,\n\t\theaders: res.Header,\n\t\tbody: bytes.NewReader(resData),\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/google\/go-github\/v29\/github\"\n\n\t\"github.com\/reviewdog\/reviewdog\"\n\t\"github.com\/reviewdog\/reviewdog\/diff\"\n\t\"github.com\/reviewdog\/reviewdog\/doghouse\"\n\t\"github.com\/reviewdog\/reviewdog\/service\/github\/githubutils\"\n)\n\n\/\/ GitHub check runs API cannot handle too large requests.\n\/\/ Set max number of filtered findings to be shown in check-run summary.\n\/\/ ERROR:\n\/\/ https:\/\/api.github.com\/repos\/easymotion\/vim-easymotion\/check-runs: 422\n\/\/ Invalid request.\n\/\/ Only 65535 characters are allowed; 250684 were supplied. []\nconst maxFilteredFinding = 150\n\n\/\/ > The Checks API limits the number of annotations to a maximum of 50 per API\n\/\/ > request.\n\/\/ https:\/\/developer.github.com\/v3\/checks\/runs\/#output-object\nconst maxAnnotationsPerRequest = 50\n\ntype Checker struct {\n\treq *doghouse.CheckRequest\n\tgh checkerGitHubClientInterface\n}\n\nfunc NewChecker(req *doghouse.CheckRequest, gh *github.Client) *Checker {\n\treturn &Checker{req: req, gh: &checkerGitHubClient{Client: gh}}\n}\n\nfunc (ch *Checker) Check(ctx context.Context) (*doghouse.CheckResponse, error) {\n\tvar filediffs []*diff.FileDiff\n\tif ch.req.PullRequest != 0 {\n\t\tvar err error\n\t\tfilediffs, err = ch.pullRequestDiff(ctx, ch.req.PullRequest)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"fail to parse diff: %v\", err)\n\t\t}\n\t}\n\n\tresults := annotationsToCheckResults(ch.req.Annotations)\n\tfiltered := reviewdog.FilterCheck(results, filediffs, 1, \"\", ch.req.FilterMode)\n\tcheck, err := ch.createCheck(ctx)\n\tif err != nil {\n\t\t\/\/ If this error is StatusForbidden (403) here, it means reviewdog is\n\t\t\/\/ running on GitHub Actions and has only read permission (because it's\n\t\t\/\/ running for Pull Requests from forked repository). If the token itself\n\t\t\/\/ is invalid, reviewdog should return an error earlier (e.g. when reading\n\t\t\/\/ Pull Requests diff), so it should be ok not to return error here and\n\t\t\/\/ return results instead.\n\t\tif err, ok := err.(*github.ErrorResponse); ok && err.Response.StatusCode == http.StatusForbidden {\n\t\t\treturn &doghouse.CheckResponse{CheckedResults: filtered}, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to create check: %v\", err)\n\t}\n\n\tcheckRun, err := ch.postCheck(ctx, check.GetID(), filtered)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to post result: %v\", err)\n\t}\n\tres := &doghouse.CheckResponse{\n\t\tReportURL: checkRun.GetHTMLURL(),\n\t}\n\treturn res, nil\n}\n\nfunc (ch *Checker) postCheck(ctx context.Context, checkID int64, checks []*reviewdog.FilteredCheck) (*github.CheckRun, error) {\n\tfilterByDiff := ch.req.PullRequest != 0 && !ch.req.OutsideDiff\n\tvar annotations []*github.CheckRunAnnotation\n\tfor _, c := range checks {\n\t\tif !c.InDiff && filterByDiff {\n\t\t\tcontinue\n\t\t}\n\t\tannotations = append(annotations, ch.toCheckRunAnnotation(c))\n\t}\n\tif err := ch.postAnnotations(ctx, checkID, annotations); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to post annotations: %v\", err)\n\t}\n\n\tconclusion := \"success\"\n\tif len(annotations) > 0 {\n\t\tconclusion = ch.conclusion()\n\t}\n\topt := github.UpdateCheckRunOptions{\n\t\tName: ch.checkName(),\n\t\tStatus: github.String(\"completed\"),\n\t\tConclusion: github.String(conclusion),\n\t\tCompletedAt: &github.Timestamp{Time: time.Now()},\n\t\tOutput: &github.CheckRunOutput{\n\t\t\tTitle: github.String(ch.checkTitle()),\n\t\t\tSummary: github.String(ch.summary(checks)),\n\t\t},\n\t}\n\treturn ch.gh.UpdateCheckRun(ctx, ch.req.Owner, ch.req.Repo, checkID, opt)\n}\n\nfunc (ch *Checker) createCheck(ctx context.Context) (*github.CheckRun, error) {\n\topt := github.CreateCheckRunOptions{\n\t\tName: ch.checkName(),\n\t\tHeadSHA: ch.req.SHA,\n\t\tStatus: github.String(\"in_progress\"),\n\t}\n\treturn ch.gh.CreateCheckRun(ctx, ch.req.Owner, ch.req.Repo, opt)\n}\n\nfunc (ch *Checker) postAnnotations(ctx context.Context, checkID int64, annotations []*github.CheckRunAnnotation) error {\n\topt := github.UpdateCheckRunOptions{\n\t\tName: ch.checkName(),\n\t\tOutput: &github.CheckRunOutput{\n\t\t\tTitle: github.String(ch.checkTitle()),\n\t\t\tSummary: github.String(\"\"), \/\/ Post summary with the last reqeust.\n\t\t\tAnnotations: annotations[:min(maxAnnotationsPerRequest, len(annotations))],\n\t\t},\n\t}\n\tif _, err := ch.gh.UpdateCheckRun(ctx, ch.req.Owner, ch.req.Repo, checkID, opt); err != nil {\n\t\treturn err\n\t}\n\tif len(annotations) > maxAnnotationsPerRequest {\n\t\treturn ch.postAnnotations(ctx, checkID, annotations[maxAnnotationsPerRequest:])\n\t}\n\treturn nil\n}\n\nfunc (ch *Checker) checkName() string {\n\tif ch.req.Name != \"\" {\n\t\treturn ch.req.Name\n\t}\n\treturn \"reviewdog\"\n}\n\nfunc (ch *Checker) checkTitle() string {\n\tif name := ch.checkName(); name != \"reviewdog\" {\n\t\treturn fmt.Sprintf(\"reviewdog [%s] report\", name)\n\t}\n\treturn \"reviewdog report\"\n}\n\n\/\/ https:\/\/developer.github.com\/v3\/checks\/runs\/#parameters-1\nfunc (ch *Checker) conclusion() string {\n\tswitch strings.ToLower(ch.req.Level) {\n\tcase \"info\", \"warning\":\n\t\treturn \"neutral\"\n\t}\n\treturn \"failure\"\n}\n\n\/\/ https:\/\/developer.github.com\/v3\/checks\/runs\/#annotations-object\nfunc (ch *Checker) annotationLevel() string {\n\tswitch strings.ToLower(ch.req.Level) {\n\tcase \"info\":\n\t\treturn \"notice\"\n\tcase \"warning\":\n\t\treturn \"warning\"\n\tcase \"failure\":\n\t\treturn \"failure\"\n\t}\n\treturn \"failure\"\n}\n\nfunc (ch *Checker) summary(checks []*reviewdog.FilteredCheck) string {\n\tvar lines []string\n\tlines = append(lines, \"reported by [reviewdog](https:\/\/github.com\/reviewdog\/reviewdog) :dog:\")\n\n\tvar findings []*reviewdog.FilteredCheck\n\tvar filteredFindings []*reviewdog.FilteredCheck\n\tfor _, c := range checks {\n\t\tif c.InDiff {\n\t\t\tfindings = append(findings, c)\n\t\t} else {\n\t\t\tfilteredFindings = append(filteredFindings, c)\n\t\t}\n\t}\n\tlines = append(lines, ch.summaryFindings(\"Findings\", findings)...)\n\tlines = append(lines, ch.summaryFindings(\"Filtered Findings\", filteredFindings)...)\n\n\treturn strings.Join(lines, \"\\n\")\n}\n\nfunc (ch *Checker) summaryFindings(name string, checks []*reviewdog.FilteredCheck) []string {\n\tvar lines []string\n\tlines = append(lines, \"<details>\")\n\tlines = append(lines, fmt.Sprintf(\"<summary>%s (%d)<\/summary>\", name, len(checks)))\n\tlines = append(lines, \"\")\n\tfor i, c := range checks {\n\t\tif i >= maxFilteredFinding {\n\t\t\tlines = append(lines, \"... (Too many findings. Dropped some findings)\")\n\t\t\tbreak\n\t\t}\n\t\tlines = append(lines, githubutils.LinkedMarkdownCheckResult(\n\t\t\tch.req.Owner, ch.req.Repo, ch.req.SHA, c.CheckResult))\n\t}\n\tlines = append(lines, \"<\/details>\")\n\treturn lines\n}\n\nfunc (ch *Checker) toCheckRunAnnotation(c *reviewdog.FilteredCheck) *github.CheckRunAnnotation {\n\ta := &github.CheckRunAnnotation{\n\t\tPath: github.String(c.Path),\n\t\tStartLine: github.Int(c.Lnum),\n\t\tEndLine: github.Int(c.Lnum),\n\t\tAnnotationLevel: github.String(ch.annotationLevel()),\n\t\tMessage: github.String(c.Message),\n\t}\n\tif ch.req.Name != \"\" {\n\t\ta.Title = github.String(fmt.Sprintf(\"[%s] %s#L%d\", ch.req.Name, c.Path, c.Lnum))\n\t}\n\tif s := strings.Join(c.Lines, \"\\n\"); s != \"\" {\n\t\ta.RawDetails = github.String(s)\n\t}\n\treturn a\n}\n\nfunc (ch *Checker) pullRequestDiff(ctx context.Context, pr int) ([]*diff.FileDiff, error) {\n\td, err := ch.rawPullRequestDiff(ctx, pr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfilediffs, err := diff.ParseMultiFile(bytes.NewReader(d))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"fail to parse diff: %v\", err)\n\t}\n\treturn filediffs, nil\n}\n\nfunc (ch *Checker) rawPullRequestDiff(ctx context.Context, pr int) ([]byte, error) {\n\td, err := ch.gh.GetPullRequestDiff(ctx, ch.req.Owner, ch.req.Repo, pr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn d, nil\n}\n\nfunc annotationsToCheckResults(as []*doghouse.Annotation) []*reviewdog.CheckResult {\n\tcs := make([]*reviewdog.CheckResult, 0, len(as))\n\tfor _, a := range as {\n\t\tcs = append(cs, &reviewdog.CheckResult{\n\t\t\tPath: a.Path,\n\t\t\tLnum: a.Line,\n\t\t\tMessage: a.Message,\n\t\t\tLines: strings.Split(a.RawMessage, \"\\n\"),\n\t\t})\n\t}\n\treturn cs\n}\n\nfunc min(x, y int) int {\n\tif x > y {\n\t\treturn y\n\t}\n\treturn x\n}\n<commit_msg>spelling: request<commit_after>package server\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/google\/go-github\/v29\/github\"\n\n\t\"github.com\/reviewdog\/reviewdog\"\n\t\"github.com\/reviewdog\/reviewdog\/diff\"\n\t\"github.com\/reviewdog\/reviewdog\/doghouse\"\n\t\"github.com\/reviewdog\/reviewdog\/service\/github\/githubutils\"\n)\n\n\/\/ GitHub check runs API cannot handle too large requests.\n\/\/ Set max number of filtered findings to be shown in check-run summary.\n\/\/ ERROR:\n\/\/ https:\/\/api.github.com\/repos\/easymotion\/vim-easymotion\/check-runs: 422\n\/\/ Invalid request.\n\/\/ Only 65535 characters are allowed; 250684 were supplied. []\nconst maxFilteredFinding = 150\n\n\/\/ > The Checks API limits the number of annotations to a maximum of 50 per API\n\/\/ > request.\n\/\/ https:\/\/developer.github.com\/v3\/checks\/runs\/#output-object\nconst maxAnnotationsPerRequest = 50\n\ntype Checker struct {\n\treq *doghouse.CheckRequest\n\tgh checkerGitHubClientInterface\n}\n\nfunc NewChecker(req *doghouse.CheckRequest, gh *github.Client) *Checker {\n\treturn &Checker{req: req, gh: &checkerGitHubClient{Client: gh}}\n}\n\nfunc (ch *Checker) Check(ctx context.Context) (*doghouse.CheckResponse, error) {\n\tvar filediffs []*diff.FileDiff\n\tif ch.req.PullRequest != 0 {\n\t\tvar err error\n\t\tfilediffs, err = ch.pullRequestDiff(ctx, ch.req.PullRequest)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"fail to parse diff: %v\", err)\n\t\t}\n\t}\n\n\tresults := annotationsToCheckResults(ch.req.Annotations)\n\tfiltered := reviewdog.FilterCheck(results, filediffs, 1, \"\", ch.req.FilterMode)\n\tcheck, err := ch.createCheck(ctx)\n\tif err != nil {\n\t\t\/\/ If this error is StatusForbidden (403) here, it means reviewdog is\n\t\t\/\/ running on GitHub Actions and has only read permission (because it's\n\t\t\/\/ running for Pull Requests from forked repository). If the token itself\n\t\t\/\/ is invalid, reviewdog should return an error earlier (e.g. when reading\n\t\t\/\/ Pull Requests diff), so it should be ok not to return error here and\n\t\t\/\/ return results instead.\n\t\tif err, ok := err.(*github.ErrorResponse); ok && err.Response.StatusCode == http.StatusForbidden {\n\t\t\treturn &doghouse.CheckResponse{CheckedResults: filtered}, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to create check: %v\", err)\n\t}\n\n\tcheckRun, err := ch.postCheck(ctx, check.GetID(), filtered)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to post result: %v\", err)\n\t}\n\tres := &doghouse.CheckResponse{\n\t\tReportURL: checkRun.GetHTMLURL(),\n\t}\n\treturn res, nil\n}\n\nfunc (ch *Checker) postCheck(ctx context.Context, checkID int64, checks []*reviewdog.FilteredCheck) (*github.CheckRun, error) {\n\tfilterByDiff := ch.req.PullRequest != 0 && !ch.req.OutsideDiff\n\tvar annotations []*github.CheckRunAnnotation\n\tfor _, c := range checks {\n\t\tif !c.InDiff && filterByDiff {\n\t\t\tcontinue\n\t\t}\n\t\tannotations = append(annotations, ch.toCheckRunAnnotation(c))\n\t}\n\tif err := ch.postAnnotations(ctx, checkID, annotations); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to post annotations: %v\", err)\n\t}\n\n\tconclusion := \"success\"\n\tif len(annotations) > 0 {\n\t\tconclusion = ch.conclusion()\n\t}\n\topt := github.UpdateCheckRunOptions{\n\t\tName: ch.checkName(),\n\t\tStatus: github.String(\"completed\"),\n\t\tConclusion: github.String(conclusion),\n\t\tCompletedAt: &github.Timestamp{Time: time.Now()},\n\t\tOutput: &github.CheckRunOutput{\n\t\t\tTitle: github.String(ch.checkTitle()),\n\t\t\tSummary: github.String(ch.summary(checks)),\n\t\t},\n\t}\n\treturn ch.gh.UpdateCheckRun(ctx, ch.req.Owner, ch.req.Repo, checkID, opt)\n}\n\nfunc (ch *Checker) createCheck(ctx context.Context) (*github.CheckRun, error) {\n\topt := github.CreateCheckRunOptions{\n\t\tName: ch.checkName(),\n\t\tHeadSHA: ch.req.SHA,\n\t\tStatus: github.String(\"in_progress\"),\n\t}\n\treturn ch.gh.CreateCheckRun(ctx, ch.req.Owner, ch.req.Repo, opt)\n}\n\nfunc (ch *Checker) postAnnotations(ctx context.Context, checkID int64, annotations []*github.CheckRunAnnotation) error {\n\topt := github.UpdateCheckRunOptions{\n\t\tName: ch.checkName(),\n\t\tOutput: &github.CheckRunOutput{\n\t\t\tTitle: github.String(ch.checkTitle()),\n\t\t\tSummary: github.String(\"\"), \/\/ Post summary with the last request.\n\t\t\tAnnotations: annotations[:min(maxAnnotationsPerRequest, len(annotations))],\n\t\t},\n\t}\n\tif _, err := ch.gh.UpdateCheckRun(ctx, ch.req.Owner, ch.req.Repo, checkID, opt); err != nil {\n\t\treturn err\n\t}\n\tif len(annotations) > maxAnnotationsPerRequest {\n\t\treturn ch.postAnnotations(ctx, checkID, annotations[maxAnnotationsPerRequest:])\n\t}\n\treturn nil\n}\n\nfunc (ch *Checker) checkName() string {\n\tif ch.req.Name != \"\" {\n\t\treturn ch.req.Name\n\t}\n\treturn \"reviewdog\"\n}\n\nfunc (ch *Checker) checkTitle() string {\n\tif name := ch.checkName(); name != \"reviewdog\" {\n\t\treturn fmt.Sprintf(\"reviewdog [%s] report\", name)\n\t}\n\treturn \"reviewdog report\"\n}\n\n\/\/ https:\/\/developer.github.com\/v3\/checks\/runs\/#parameters-1\nfunc (ch *Checker) conclusion() string {\n\tswitch strings.ToLower(ch.req.Level) {\n\tcase \"info\", \"warning\":\n\t\treturn \"neutral\"\n\t}\n\treturn \"failure\"\n}\n\n\/\/ https:\/\/developer.github.com\/v3\/checks\/runs\/#annotations-object\nfunc (ch *Checker) annotationLevel() string {\n\tswitch strings.ToLower(ch.req.Level) {\n\tcase \"info\":\n\t\treturn \"notice\"\n\tcase \"warning\":\n\t\treturn \"warning\"\n\tcase \"failure\":\n\t\treturn \"failure\"\n\t}\n\treturn \"failure\"\n}\n\nfunc (ch *Checker) summary(checks []*reviewdog.FilteredCheck) string {\n\tvar lines []string\n\tlines = append(lines, \"reported by [reviewdog](https:\/\/github.com\/reviewdog\/reviewdog) :dog:\")\n\n\tvar findings []*reviewdog.FilteredCheck\n\tvar filteredFindings []*reviewdog.FilteredCheck\n\tfor _, c := range checks {\n\t\tif c.InDiff {\n\t\t\tfindings = append(findings, c)\n\t\t} else {\n\t\t\tfilteredFindings = append(filteredFindings, c)\n\t\t}\n\t}\n\tlines = append(lines, ch.summaryFindings(\"Findings\", findings)...)\n\tlines = append(lines, ch.summaryFindings(\"Filtered Findings\", filteredFindings)...)\n\n\treturn strings.Join(lines, \"\\n\")\n}\n\nfunc (ch *Checker) summaryFindings(name string, checks []*reviewdog.FilteredCheck) []string {\n\tvar lines []string\n\tlines = append(lines, \"<details>\")\n\tlines = append(lines, fmt.Sprintf(\"<summary>%s (%d)<\/summary>\", name, len(checks)))\n\tlines = append(lines, \"\")\n\tfor i, c := range checks {\n\t\tif i >= maxFilteredFinding {\n\t\t\tlines = append(lines, \"... (Too many findings. Dropped some findings)\")\n\t\t\tbreak\n\t\t}\n\t\tlines = append(lines, githubutils.LinkedMarkdownCheckResult(\n\t\t\tch.req.Owner, ch.req.Repo, ch.req.SHA, c.CheckResult))\n\t}\n\tlines = append(lines, \"<\/details>\")\n\treturn lines\n}\n\nfunc (ch *Checker) toCheckRunAnnotation(c *reviewdog.FilteredCheck) *github.CheckRunAnnotation {\n\ta := &github.CheckRunAnnotation{\n\t\tPath: github.String(c.Path),\n\t\tStartLine: github.Int(c.Lnum),\n\t\tEndLine: github.Int(c.Lnum),\n\t\tAnnotationLevel: github.String(ch.annotationLevel()),\n\t\tMessage: github.String(c.Message),\n\t}\n\tif ch.req.Name != \"\" {\n\t\ta.Title = github.String(fmt.Sprintf(\"[%s] %s#L%d\", ch.req.Name, c.Path, c.Lnum))\n\t}\n\tif s := strings.Join(c.Lines, \"\\n\"); s != \"\" {\n\t\ta.RawDetails = github.String(s)\n\t}\n\treturn a\n}\n\nfunc (ch *Checker) pullRequestDiff(ctx context.Context, pr int) ([]*diff.FileDiff, error) {\n\td, err := ch.rawPullRequestDiff(ctx, pr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfilediffs, err := diff.ParseMultiFile(bytes.NewReader(d))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"fail to parse diff: %v\", err)\n\t}\n\treturn filediffs, nil\n}\n\nfunc (ch *Checker) rawPullRequestDiff(ctx context.Context, pr int) ([]byte, error) {\n\td, err := ch.gh.GetPullRequestDiff(ctx, ch.req.Owner, ch.req.Repo, pr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn d, nil\n}\n\nfunc annotationsToCheckResults(as []*doghouse.Annotation) []*reviewdog.CheckResult {\n\tcs := make([]*reviewdog.CheckResult, 0, len(as))\n\tfor _, a := range as {\n\t\tcs = append(cs, &reviewdog.CheckResult{\n\t\t\tPath: a.Path,\n\t\t\tLnum: a.Line,\n\t\t\tMessage: a.Message,\n\t\t\tLines: strings.Split(a.RawMessage, \"\\n\"),\n\t\t})\n\t}\n\treturn cs\n}\n\nfunc min(x, y int) int {\n\tif x > y {\n\t\treturn y\n\t}\n\treturn x\n}\n<|endoftext|>"} {"text":"<commit_before>package bytebuffer\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\ntype MemoryMappedBuffer struct {\n\t*ByteBuffer\n\tloc string \/\/ location of the memory mapped file\n\tsize int \/\/ size in bytes\n}\n\nfunc NewMemoryMappedBuffer(loc string, size int) (*MemoryMappedBuffer, error) {\n\tf, err := os.Create(loc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tf.Write(make([]byte, size))\n\n\tfd := int(f.Fd())\n\n\tb, err := syscall.Mmap(fd, 0, size, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &MemoryMappedBuffer{\n\t\tNewByteBufferSlice(b),\n\t\tloc,\n\t\tsize,\n\t}, nil\n}\n\nfunc (b *MemoryMappedBuffer) Unmap(removefile bool) error {\n\tif removefile {\n\t\tos.Remove(b.loc)\n\t}\n\n\treturn syscall.Munmap(b.buffer)\n}\n<commit_msg>bytebuffer: MemoryMappedBuffer: always create a fresh mapping<commit_after>package bytebuffer\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\n\/\/ MemoryMappedBuffer is a ByteBuffer that is also mapped into memory\ntype MemoryMappedBuffer struct {\n\t*ByteBuffer\n\tloc string \/\/ location of the memory mapped file\n\tsize int \/\/ size in bytes\n}\n\n\/\/ NewMemoryMappedBuffer will create and return a new instance of a MemoryMappedBuffer\nfunc NewMemoryMappedBuffer(loc string, size int) (*MemoryMappedBuffer, error) {\n\tif _, err := os.Stat(loc); err == nil {\n\t\terr = os.Remove(loc)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tf, err := os.Create(loc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tf.Write(make([]byte, size))\n\n\tfd := int(f.Fd())\n\n\tb, err := syscall.Mmap(fd, 0, size, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &MemoryMappedBuffer{\n\t\tNewByteBufferSlice(b),\n\t\tloc,\n\t\tsize,\n\t}, nil\n}\n\n\/\/ Unmap will manually delete the memory mapping of a mapped buffer\nfunc (b *MemoryMappedBuffer) Unmap(removefile bool) error {\n\tif removefile {\n\t\tos.Remove(b.loc)\n\t}\n\n\treturn syscall.Munmap(b.buffer)\n}\n<|endoftext|>"} {"text":"<commit_before>package registry\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\ntype LogfCallback func(format string, args ...interface{})\n\n\/*\n * Discard log messages silently.\n *\/\nfunc Quiet(format string, args ...interface{}) {\n\t\/* discard logs *\/\n}\n\n\/*\n * Pass log messages along to Go's \"log\" module.\n *\/\nfunc Log(format string, args ...interface{}) {\n\tlog.Printf(format, args...)\n}\n\ntype Registry struct {\n\tURL string\n\tClient *http.Client\n\tLogf LogfCallback\n}\n\n\/*\n * Create a new Registry with the given URL and credentials, then Ping()s it\n * before returning it to verify that the registry is available.\n *\n * You can, alternately, construct a Registry manually by populating the fields.\n * This passes http.DefaultTransport to WrapTransport when creating the\n * http.Client.\n *\/\nfunc New(registryUrl, username, password string) (*Registry, error) {\n\ttransport := http.DefaultTransport\n\n\treturn newFromTransport(registryUrl, username, password, transport, Log)\n}\n\n\/*\n * Create a new Registry, as with New, using an http.Transport that disables\n * SSL certificate verification.\n *\/\nfunc NewInsecure(registryUrl, username, password string) (*Registry, error) {\n\ttransport := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: true,\n\t\t},\n\t}\n\n\treturn newFromTransport(registryUrl, username, password, transport, Log)\n}\n\n\/*\n * Given an existing http.RoundTripper such as http.DefaultTransport, build the\n * transport stack necessary to authenticate to the Docker registry API. This\n * adds in support for OAuth bearer tokens and HTTP Basic auth, and sets up\n * error handling this library relies on.\n *\/\nfunc WrapTransport(transport http.RoundTripper, url, username, password string) http.RoundTripper {\n\ttokenTransport := &TokenTransport{\n\t\tTransport: transport,\n\t\tUsername: username,\n\t\tPassword: password,\n\t}\n\tbasicAuthTransport := &BasicTransport{\n\t\tTransport: tokenTransport,\n\t\tURL: url,\n\t\tUsername: username,\n\t\tPassword: password,\n\t}\n\terrorTransport := &ErrorTransport{\n\t\tTransport: basicAuthTransport,\n\t}\n\treturn errorTransport\n}\n\nfunc newFromTransport(registryUrl, username, password string, transport http.RoundTripper, logf LogfCallback) (*Registry, error) {\n\turl := strings.TrimSuffix(registryUrl, \"\/\")\n\ttransport = WrapTransport(transport, url, username, password)\n\tregistry := &Registry{\n\t\tURL: url,\n\t\tClient: &http.Client{\n\t\t\tTransport: transport,\n\t\t},\n\t\tLogf: logf,\n\t}\n\n\tif err := registry.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn registry, nil\n}\n\nfunc (r *Registry) url(pathTemplate string, args ...interface{}) string {\n\tpathSuffix := fmt.Sprintf(pathTemplate, args...)\n\turl := fmt.Sprintf(\"%s%s\", r.URL, pathSuffix)\n\treturn url\n}\n\nfunc (r *Registry) Ping() error {\n\turl := r.url(\"\/v2\/\")\n\tr.Logf(\"registry.ping url=%s\", url)\n\tresp, err := r.Client.Get(url)\n\tif resp != nil {\n\t\tdefer resp.Body.Close()\n\t}\n\tif err != nil {\n\n\t}\n\treturn err\n}\n<commit_msg>G402: TLS InsecureSkipVerify set true. (gosec)<commit_after>package registry\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\ntype LogfCallback func(format string, args ...interface{})\n\n\/*\n * Discard log messages silently.\n *\/\nfunc Quiet(format string, args ...interface{}) {\n\t\/* discard logs *\/\n}\n\n\/*\n * Pass log messages along to Go's \"log\" module.\n *\/\nfunc Log(format string, args ...interface{}) {\n\tlog.Printf(format, args...)\n}\n\ntype Registry struct {\n\tURL string\n\tClient *http.Client\n\tLogf LogfCallback\n}\n\n\/*\n * Create a new Registry with the given URL and credentials, then Ping()s it\n * before returning it to verify that the registry is available.\n *\n * You can, alternately, construct a Registry manually by populating the fields.\n * This passes http.DefaultTransport to WrapTransport when creating the\n * http.Client.\n *\/\nfunc New(registryUrl, username, password string) (*Registry, error) {\n\ttransport := http.DefaultTransport\n\n\treturn newFromTransport(registryUrl, username, password, transport, Log)\n}\n\n\/*\n * Create a new Registry, as with New, using an http.Transport that disables\n * SSL certificate verification.\n *\/\nfunc NewInsecure(registryUrl, username, password string) (*Registry, error) {\n\ttransport := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: true, \/\/nolint:gosec TODO: Why?\n\t\t},\n\t}\n\n\treturn newFromTransport(registryUrl, username, password, transport, Log)\n}\n\n\/*\n * Given an existing http.RoundTripper such as http.DefaultTransport, build the\n * transport stack necessary to authenticate to the Docker registry API. This\n * adds in support for OAuth bearer tokens and HTTP Basic auth, and sets up\n * error handling this library relies on.\n *\/\nfunc WrapTransport(transport http.RoundTripper, url, username, password string) http.RoundTripper {\n\ttokenTransport := &TokenTransport{\n\t\tTransport: transport,\n\t\tUsername: username,\n\t\tPassword: password,\n\t}\n\tbasicAuthTransport := &BasicTransport{\n\t\tTransport: tokenTransport,\n\t\tURL: url,\n\t\tUsername: username,\n\t\tPassword: password,\n\t}\n\terrorTransport := &ErrorTransport{\n\t\tTransport: basicAuthTransport,\n\t}\n\treturn errorTransport\n}\n\nfunc newFromTransport(registryUrl, username, password string, transport http.RoundTripper, logf LogfCallback) (*Registry, error) {\n\turl := strings.TrimSuffix(registryUrl, \"\/\")\n\ttransport = WrapTransport(transport, url, username, password)\n\tregistry := &Registry{\n\t\tURL: url,\n\t\tClient: &http.Client{\n\t\t\tTransport: transport,\n\t\t},\n\t\tLogf: logf,\n\t}\n\n\tif err := registry.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn registry, nil\n}\n\nfunc (r *Registry) url(pathTemplate string, args ...interface{}) string {\n\tpathSuffix := fmt.Sprintf(pathTemplate, args...)\n\turl := fmt.Sprintf(\"%s%s\", r.URL, pathSuffix)\n\treturn url\n}\n\nfunc (r *Registry) Ping() error {\n\turl := r.url(\"\/v2\/\")\n\tr.Logf(\"registry.ping url=%s\", url)\n\tresp, err := r.Client.Get(url)\n\tif resp != nil {\n\t\tdefer resp.Body.Close()\n\t}\n\tif err != nil {\n\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package rpc\n\nimport (\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"log\"\n\t\"net\"\n\t\"net\/rpc\"\n)\n\n\/\/ An implementation of packer.Builder where the builder is actually executed\n\/\/ over an RPC connection.\ntype builder struct {\n\tclient *rpc.Client\n}\n\n\/\/ BuilderServer wraps a packer.Builder implementation and makes it exportable\n\/\/ as part of a Golang RPC server.\ntype BuilderServer struct {\n\tbuilder packer.Builder\n}\n\ntype BuilderPrepareArgs struct {\n\tConfigs []interface{}\n}\n\ntype BuilderRunArgs struct {\n\tRPCAddress string\n\tResponseAddress string\n}\n\ntype BuilderRunResponse struct {\n\tErr error\n\tRPCAddress string\n}\n\nfunc Builder(client *rpc.Client) *builder {\n\treturn &builder{client}\n}\n\nfunc (b *builder) Prepare(config ...interface{}) (err error) {\n\tcerr := b.client.Call(\"Builder.Prepare\", &BuilderPrepareArgs{config}, &err)\n\tif cerr != nil {\n\t\terr = cerr\n\t}\n\n\treturn\n}\n\nfunc (b *builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {\n\t\/\/ Create and start the server for the Build and UI\n\tserver := rpc.NewServer()\n\tRegisterCache(server, cache)\n\tRegisterHook(server, hook)\n\tRegisterUi(server, ui)\n\n\t\/\/ Create a server for the response\n\tresponseL := netListenerInRange(portRangeMin, portRangeMax)\n\trunResponseCh := make(chan *BuilderRunResponse)\n\tgo func() {\n\t\tdefer responseL.Close()\n\n\t\tvar response BuilderRunResponse\n\t\tdefer func() { runResponseCh <- &response }()\n\n\t\tconn, err := responseL.Accept()\n\t\tif err != nil {\n\t\t\tresponse.Err = err\n\t\t\treturn\n\t\t}\n\t\tdefer conn.Close()\n\n\t\tdecoder := gob.NewDecoder(conn)\n\t\tif err := decoder.Decode(&response); err != nil {\n\t\t\tresponse.Err = fmt.Errorf(\"Error waiting for Run: %s\", err)\n\t\t}\n\t}()\n\n\targs := &BuilderRunArgs{\n\t\tserveSingleConn(server),\n\t\tresponseL.Addr().String(),\n\t}\n\n\tif err := b.client.Call(\"Builder.Run\", args, new(interface{})); err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := <-runResponseCh\n\tif response.Err != nil {\n\t\treturn nil, response.Err\n\t}\n\n\tif response.RPCAddress == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tclient, err := rpc.Dial(\"tcp\", response.RPCAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn Artifact(client), nil\n}\n\nfunc (b *builder) Cancel() {\n\tif err := b.client.Call(\"Builder.Cancel\", new(interface{}), new(interface{})); err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (b *BuilderServer) Prepare(args *BuilderPrepareArgs, reply *error) error {\n\terr := b.builder.Prepare(args.Configs...)\n\tif err != nil {\n\t\t*reply = NewBasicError(err)\n\t}\n\n\treturn nil\n}\n\nfunc (b *BuilderServer) Run(args *BuilderRunArgs, reply *interface{}) error {\n\tclient, err := rpc.Dial(\"tcp\", args.RPCAddress)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresponseC, err := net.Dial(\"tcp\", args.ResponseAddress)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresponseWriter := gob.NewEncoder(responseC)\n\n\t\/\/ Run the build in a goroutine so we don't block the RPC connection\n\tgo func() {\n\t\tdefer responseC.Close()\n\n\t\tcache := Cache(client)\n\t\thook := Hook(client)\n\t\tui := &Ui{client}\n\t\tartifact, responseErr := b.builder.Run(ui, hook, cache)\n\t\tresponseAddress := \"\"\n\n\t\tif responseErr == nil && artifact != nil {\n\t\t\t\/\/ Wrap the artifact\n\t\t\tserver := rpc.NewServer()\n\t\t\tRegisterArtifact(server, artifact)\n\t\t\tresponseAddress = serveSingleConn(server)\n\t\t}\n\n\t\tif responseErr != nil {\n\t\t\tresponseErr = NewBasicError(responseErr)\n\t\t}\n\n\t\terr := responseWriter.Encode(&BuilderRunResponse{responseErr, responseAddress})\n\t\tif err != nil {\n\t\t\tlog.Printf(\"BuildServer.Run error: %s\", err)\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (b *BuilderServer) Cancel(args *interface{}, reply *interface{}) error {\n\tb.builder.Cancel()\n\treturn nil\n}\n<commit_msg>packer\/rpc: error instead of panic cancelling builder<commit_after>package rpc\n\nimport (\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"log\"\n\t\"net\"\n\t\"net\/rpc\"\n)\n\n\/\/ An implementation of packer.Builder where the builder is actually executed\n\/\/ over an RPC connection.\ntype builder struct {\n\tclient *rpc.Client\n}\n\n\/\/ BuilderServer wraps a packer.Builder implementation and makes it exportable\n\/\/ as part of a Golang RPC server.\ntype BuilderServer struct {\n\tbuilder packer.Builder\n}\n\ntype BuilderPrepareArgs struct {\n\tConfigs []interface{}\n}\n\ntype BuilderRunArgs struct {\n\tRPCAddress string\n\tResponseAddress string\n}\n\ntype BuilderRunResponse struct {\n\tErr error\n\tRPCAddress string\n}\n\nfunc Builder(client *rpc.Client) *builder {\n\treturn &builder{client}\n}\n\nfunc (b *builder) Prepare(config ...interface{}) (err error) {\n\tcerr := b.client.Call(\"Builder.Prepare\", &BuilderPrepareArgs{config}, &err)\n\tif cerr != nil {\n\t\terr = cerr\n\t}\n\n\treturn\n}\n\nfunc (b *builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {\n\t\/\/ Create and start the server for the Build and UI\n\tserver := rpc.NewServer()\n\tRegisterCache(server, cache)\n\tRegisterHook(server, hook)\n\tRegisterUi(server, ui)\n\n\t\/\/ Create a server for the response\n\tresponseL := netListenerInRange(portRangeMin, portRangeMax)\n\trunResponseCh := make(chan *BuilderRunResponse)\n\tgo func() {\n\t\tdefer responseL.Close()\n\n\t\tvar response BuilderRunResponse\n\t\tdefer func() { runResponseCh <- &response }()\n\n\t\tconn, err := responseL.Accept()\n\t\tif err != nil {\n\t\t\tresponse.Err = err\n\t\t\treturn\n\t\t}\n\t\tdefer conn.Close()\n\n\t\tdecoder := gob.NewDecoder(conn)\n\t\tif err := decoder.Decode(&response); err != nil {\n\t\t\tresponse.Err = fmt.Errorf(\"Error waiting for Run: %s\", err)\n\t\t}\n\t}()\n\n\targs := &BuilderRunArgs{\n\t\tserveSingleConn(server),\n\t\tresponseL.Addr().String(),\n\t}\n\n\tif err := b.client.Call(\"Builder.Run\", args, new(interface{})); err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := <-runResponseCh\n\tif response.Err != nil {\n\t\treturn nil, response.Err\n\t}\n\n\tif response.RPCAddress == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tclient, err := rpc.Dial(\"tcp\", response.RPCAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn Artifact(client), nil\n}\n\nfunc (b *builder) Cancel() {\n\tif err := b.client.Call(\"Builder.Cancel\", new(interface{}), new(interface{})); err != nil {\n\t\tlog.Printf(\"Error cancelling builder: %s\", err)\n\t}\n}\n\nfunc (b *BuilderServer) Prepare(args *BuilderPrepareArgs, reply *error) error {\n\terr := b.builder.Prepare(args.Configs...)\n\tif err != nil {\n\t\t*reply = NewBasicError(err)\n\t}\n\n\treturn nil\n}\n\nfunc (b *BuilderServer) Run(args *BuilderRunArgs, reply *interface{}) error {\n\tclient, err := rpc.Dial(\"tcp\", args.RPCAddress)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresponseC, err := net.Dial(\"tcp\", args.ResponseAddress)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresponseWriter := gob.NewEncoder(responseC)\n\n\t\/\/ Run the build in a goroutine so we don't block the RPC connection\n\tgo func() {\n\t\tdefer responseC.Close()\n\n\t\tcache := Cache(client)\n\t\thook := Hook(client)\n\t\tui := &Ui{client}\n\t\tartifact, responseErr := b.builder.Run(ui, hook, cache)\n\t\tresponseAddress := \"\"\n\n\t\tif responseErr == nil && artifact != nil {\n\t\t\t\/\/ Wrap the artifact\n\t\t\tserver := rpc.NewServer()\n\t\t\tRegisterArtifact(server, artifact)\n\t\t\tresponseAddress = serveSingleConn(server)\n\t\t}\n\n\t\tif responseErr != nil {\n\t\t\tresponseErr = NewBasicError(responseErr)\n\t\t}\n\n\t\terr := responseWriter.Encode(&BuilderRunResponse{responseErr, responseAddress})\n\t\tif err != nil {\n\t\t\tlog.Printf(\"BuildServer.Run error: %s\", err)\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (b *BuilderServer) Cancel(args *interface{}, reply *interface{}) error {\n\tb.builder.Cancel()\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.\n\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage node\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/docopt\/docopt-go\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\tshutil \"github.com\/termie\/go-shutil\"\n)\n\n\/\/ diagCmd is a struct to hold a command, cmd info and filename to run diagnostic on\ntype diagCmd struct {\n\tinfo string\n\tcmd string\n\tfilename string\n}\n\n\/\/ Diags function collects diagnostic information and logs\nfunc Diags(args []string) {\n\tvar err error\n\tdoc := `Usage:\n calicoctl node diags [--log-dir=<LOG_DIR>]\n\nOptions:\n -h --help Show this screen.\n --log-dir=<LOG_DIR> The directory containing Calico logs.\n [default: \/var\/log\/calico]\n\nDescription:\n This command is used to gather diagnostic information from a Calico node.\n This is usually used when trying to diagnose an issue that may be related to\n your Calico network.\n\n The output of the command explains how to automatically upload the\n diagnostics to http:\/\/transfer.sh for easy sharing of the data. Note that the\n uploaded files will be deleted after 14 days.\n\n This command must be run on the specific Calico node that you are gathering\n diagnostics for.\n`\n\n\targuments, err := docopt.Parse(doc, args, true, \"\", false, false)\n\tif err != nil {\n\t\tfmt.Printf(\"Invalid option: 'calicoctl %s'. Use flag '--help' to read about a specific subcommand.\\n\", strings.Join(args, \" \"))\n\t\tos.Exit(1)\n\t}\n\tif len(arguments) == 0 {\n\t\treturn\n\t}\n\n\trunDiags(arguments[\"--log-dir\"].(string))\n}\n\n\/\/ runDiags takes logDir and runs a sequence of commands to collect diagnostics\nfunc runDiags(logDir string) {\n\n\t\/\/ Note: in for the cmd field in this struct, it can't handle args quoted with space in it\n\t\/\/ For example, you can't add cmd \"do this\", since after the `strings.Fields` it will become `\"do` and `this\"`\n\tcmds := []diagCmd{\n\t\t{\"\", \"date\", \"date\"},\n\t\t{\"\", \"hostname\", \"hostname\"},\n\t\t{\"Dumping netstat\", \"netstat --all --numeric\", \"netstat\"},\n\t\t{\"Dumping routes (IPv4)\", \"ip -4 route\", \"ipv4_route\"},\n\t\t{\"Dumping routes (IPv6)\", \"ip -6 route\", \"ipv6_route\"},\n\t\t{\"Dumping interface info (IPv4)\", \"ip -4 addr\", \"ipv4_addr\"},\n\t\t{\"Dumping interface info (IPv6)\", \"ip -6 addr\", \"ipv6_addr\"},\n\t\t{\"Dumping iptables (IPv4)\", \"iptables-save -c\", \"ipv4_tables\"},\n\t\t{\"Dumping iptables (IPv6)\", \"ip6tables-save -c\", \"ipv6_tables\"},\n\t\t{\"Dumping ipsets\", \"ipset list\", \"ipsets\"},\n\t\t{\"Dumping ipsets (container)\", \"docker run --privileged --net=host calico\/node ipset list\", \"ipset_container\"},\n\t\t{\"Copying journal for calico-node.service\", \"journalctl -u calico-node.service --no-pager\", \"journalctl_calico_node\"},\n\t\t{\"Dumping felix stats\", \"pkill -SIGUSR1 felix\", \"\"},\n\t}\n\n\t\/\/ Make sure the command is run with super user privileges\n\tenforceRoot()\n\n\tfmt.Println(\"Collecting diagnostics\")\n\n\t\/\/ Create a temp directory in \/tmp\n\ttmpDir, err := ioutil.TempDir(\"\", \"calico\")\n\tif err != nil {\n\t\tfmt.Printf(\"Error creating temp directory to dump logs: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Println(\"Using temp dir:\", tmpDir)\n\terr = os.Chdir(tmpDir)\n\tif err != nil {\n\t\tfmt.Printf(\"Error changing directory to temp directory to dump logs: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tos.Mkdir(\"diagnostics\", os.ModeDir)\n\tdiagsTmpDir := filepath.Join(tmpDir, \"diagnostics\")\n\n\tfor _, v := range cmds {\n\t\twriteDiags(v, diagsTmpDir)\n\t}\n\n\ttmpLogDir := filepath.Join(diagsTmpDir, \"logs\")\n\n\t\/\/ Check if the logDir provided\/default exists and is a directory\n\tfileInfo, err := os.Stat(logDir)\n\tif err != nil {\n\t\tfmt.Printf(\"Error copying log files: %v\\n\", err)\n\t} else if fileInfo.IsDir() {\n\t\tfmt.Println(\"Copying Calico logs\")\n\t\terr = shutil.CopyTree(logDir, tmpLogDir, nil)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error copying log files: %v\\n\", err)\n\t\t}\n\t} else {\n\t\tfmt.Printf(\"No logs found in %s; skipping log copying\", logDir)\n\t}\n\n\t\/\/ Try to copy logs from containers for hosted installs.\n\tgetNodeContainerLogs(tmpLogDir)\n\n\t\/\/ Get the current time and create a tar.gz file with the timestamp in the name\n\ttarFile := fmt.Sprintf(\"diags-%s.tar.gz\", time.Now().Format(\"20060102_150405\"))\n\n\t\/\/ Have to use shell to compress the file because Go archive\/tar can't handle\n\t\/\/ some header metadata longer than a certain length (Ref: https:\/\/github.com\/golang\/go\/issues\/12436)\n\terr = exec.Command(\"tar\", \"-zcvf\", tarFile, \"diagnostics\").Run()\n\tif err != nil {\n\t\tfmt.Printf(\"Error compressing the diagnostics: %v\\n\", err)\n\t}\n\n\ttarFilePath := filepath.Join(tmpDir, tarFile)\n\n\tfmt.Printf(\"\\nDiags saved to %s\\n\", tarFilePath)\n\tfmt.Printf(`If required, you can upload the diagnostics bundle to a file sharing service\nsuch as transfer.sh using curl or similar. For example:\n\n curl --upload-file %s https:\/\/transfer.sh\/%s`, tarFilePath, tarFilePath)\n\tfmt.Println()\n\n}\n\n\/\/ getNodeContainerLogs will attempt to grab logs for any \"calico\" named containers for hosted installs.\nfunc getNodeContainerLogs(logDir string) {\n\tos.Mkdir(logDir, os.ModeDir)\n\n\t\/\/ Get a list of Calico containers running on this Node.\n\tresult, err := exec.Command(\"docker\", \"ps\", \"-a\", \"--filter\", \"\\\"name=calico\\\"\", \"--format\", \"{{.Names}}: {{.CreatedAt}}\").CombinedOutput()\n\tif err != nil {\n\t\tfmt.Printf(\"Could not run docker command: %s\\n\", string(result))\n\t\treturn\n\t}\n\n\t\/\/ No Calico containers found.\n\tif string(result) == \"\" {\n\t\tlog.Debug(\"Did not find any Calico containers\")\n\t\treturn\n\t}\n\n\t\/\/ Remove any containers that have \"k8s_POD\" in them.\n\tre := regexp.MustCompile(\"(?m)[\\r\\n]+^.*k8s_POD.*$\")\n\tcontainers := re.ReplaceAllString(string(result), \"\")\n\n\tfmt.Println(\"Copying logs from Calico containers\")\n\terr = ioutil.WriteFile(logDir+\"\/\"+\"container_creation_time\", []byte(containers), 0666)\n\tif err != nil {\n\t\tfmt.Printf(\"Could not save output of `docker ps` command to container_creation_time: %s\\n\", err)\n\t}\n\n\t\/\/ Grab the log for each container and write it as <containerName>.log.\n\tscanner := bufio.NewScanner(strings.NewReader(containers))\n\tfor scanner.Scan() {\n\t\tname := strings.Split(scanner.Text(), \":\")[0]\n\t\tlog.Debugf(\"Getting logs for container %s\", name)\n\t\tcLog, err := exec.Command(\"docker\", \"logs\", name).CombinedOutput()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Could not pull log for container %s: %s\\n\", name, err)\n\t\t\tcontinue\n\t\t}\n\t\terr = ioutil.WriteFile(logDir+\"\/\"+name+\".log\", cLog, 0666)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failed to write log for container %s to file: %s\\n\", name, err)\n\t\t}\n\t}\n}\n\n\/\/ writeDiags executes the diagnostic commands and outputs the result in the file\n\/\/ with the filename and directory passed as arguments\nfunc writeDiags(cmds diagCmd, dir string) {\n\n\tif cmds.info != \"\" {\n\t\tfmt.Println(cmds.info)\n\t}\n\n\tparts := strings.Fields(cmds.cmd)\n\n\tcontent, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to run command: %s\\nError: %s\\n\", cmds.cmd, string(content))\n\t}\n\n\t\/\/ This is for the commands we want to run but don't want to save the output\n\t\/\/ or for commands that don't produce any output to stdout\n\tif cmds.filename == \"\" {\n\t\treturn\n\t}\n\n\tfp := filepath.Join(dir, cmds.filename)\n\tif err := ioutil.WriteFile(fp, content, 0666); err != nil {\n\t\tlog.Errorf(\"Error writing diags to file: %s\\n\", err)\n\t}\n}\n<commit_msg>fix up docker filter for name=calico<commit_after>\/\/ Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.\n\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage node\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/docopt\/docopt-go\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\tshutil \"github.com\/termie\/go-shutil\"\n)\n\n\/\/ diagCmd is a struct to hold a command, cmd info and filename to run diagnostic on\ntype diagCmd struct {\n\tinfo string\n\tcmd string\n\tfilename string\n}\n\n\/\/ Diags function collects diagnostic information and logs\nfunc Diags(args []string) {\n\tvar err error\n\tdoc := `Usage:\n calicoctl node diags [--log-dir=<LOG_DIR>]\n\nOptions:\n -h --help Show this screen.\n --log-dir=<LOG_DIR> The directory containing Calico logs.\n [default: \/var\/log\/calico]\n\nDescription:\n This command is used to gather diagnostic information from a Calico node.\n This is usually used when trying to diagnose an issue that may be related to\n your Calico network.\n\n The output of the command explains how to automatically upload the\n diagnostics to http:\/\/transfer.sh for easy sharing of the data. Note that the\n uploaded files will be deleted after 14 days.\n\n This command must be run on the specific Calico node that you are gathering\n diagnostics for.\n`\n\n\targuments, err := docopt.Parse(doc, args, true, \"\", false, false)\n\tif err != nil {\n\t\tfmt.Printf(\"Invalid option: 'calicoctl %s'. Use flag '--help' to read about a specific subcommand.\\n\", strings.Join(args, \" \"))\n\t\tos.Exit(1)\n\t}\n\tif len(arguments) == 0 {\n\t\treturn\n\t}\n\n\trunDiags(arguments[\"--log-dir\"].(string))\n}\n\n\/\/ runDiags takes logDir and runs a sequence of commands to collect diagnostics\nfunc runDiags(logDir string) {\n\n\t\/\/ Note: in for the cmd field in this struct, it can't handle args quoted with space in it\n\t\/\/ For example, you can't add cmd \"do this\", since after the `strings.Fields` it will become `\"do` and `this\"`\n\tcmds := []diagCmd{\n\t\t{\"\", \"date\", \"date\"},\n\t\t{\"\", \"hostname\", \"hostname\"},\n\t\t{\"Dumping netstat\", \"netstat --all --numeric\", \"netstat\"},\n\t\t{\"Dumping routes (IPv4)\", \"ip -4 route\", \"ipv4_route\"},\n\t\t{\"Dumping routes (IPv6)\", \"ip -6 route\", \"ipv6_route\"},\n\t\t{\"Dumping interface info (IPv4)\", \"ip -4 addr\", \"ipv4_addr\"},\n\t\t{\"Dumping interface info (IPv6)\", \"ip -6 addr\", \"ipv6_addr\"},\n\t\t{\"Dumping iptables (IPv4)\", \"iptables-save -c\", \"ipv4_tables\"},\n\t\t{\"Dumping iptables (IPv6)\", \"ip6tables-save -c\", \"ipv6_tables\"},\n\t\t{\"Dumping ipsets\", \"ipset list\", \"ipsets\"},\n\t\t{\"Dumping ipsets (container)\", \"docker run --privileged --net=host calico\/node ipset list\", \"ipset_container\"},\n\t\t{\"Copying journal for calico-node.service\", \"journalctl -u calico-node.service --no-pager\", \"journalctl_calico_node\"},\n\t\t{\"Dumping felix stats\", \"pkill -SIGUSR1 felix\", \"\"},\n\t}\n\n\t\/\/ Make sure the command is run with super user privileges\n\tenforceRoot()\n\n\tfmt.Println(\"Collecting diagnostics\")\n\n\t\/\/ Create a temp directory in \/tmp\n\ttmpDir, err := ioutil.TempDir(\"\", \"calico\")\n\tif err != nil {\n\t\tfmt.Printf(\"Error creating temp directory to dump logs: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Println(\"Using temp dir:\", tmpDir)\n\terr = os.Chdir(tmpDir)\n\tif err != nil {\n\t\tfmt.Printf(\"Error changing directory to temp directory to dump logs: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tos.Mkdir(\"diagnostics\", os.ModeDir)\n\tdiagsTmpDir := filepath.Join(tmpDir, \"diagnostics\")\n\n\tfor _, v := range cmds {\n\t\twriteDiags(v, diagsTmpDir)\n\t}\n\n\ttmpLogDir := filepath.Join(diagsTmpDir, \"logs\")\n\n\t\/\/ Check if the logDir provided\/default exists and is a directory\n\tfileInfo, err := os.Stat(logDir)\n\tif err != nil {\n\t\tfmt.Printf(\"Error copying log files: %v\\n\", err)\n\t} else if fileInfo.IsDir() {\n\t\tfmt.Println(\"Copying Calico logs\")\n\t\terr = shutil.CopyTree(logDir, tmpLogDir, nil)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error copying log files: %v\\n\", err)\n\t\t}\n\t} else {\n\t\tfmt.Printf(\"No logs found in %s; skipping log copying\", logDir)\n\t}\n\n\t\/\/ Try to copy logs from containers for hosted installs.\n\tgetNodeContainerLogs(tmpLogDir)\n\n\t\/\/ Get the current time and create a tar.gz file with the timestamp in the name\n\ttarFile := fmt.Sprintf(\"diags-%s.tar.gz\", time.Now().Format(\"20060102_150405\"))\n\n\t\/\/ Have to use shell to compress the file because Go archive\/tar can't handle\n\t\/\/ some header metadata longer than a certain length (Ref: https:\/\/github.com\/golang\/go\/issues\/12436)\n\terr = exec.Command(\"tar\", \"-zcvf\", tarFile, \"diagnostics\").Run()\n\tif err != nil {\n\t\tfmt.Printf(\"Error compressing the diagnostics: %v\\n\", err)\n\t}\n\n\ttarFilePath := filepath.Join(tmpDir, tarFile)\n\n\tfmt.Printf(\"\\nDiags saved to %s\\n\", tarFilePath)\n\tfmt.Printf(`If required, you can upload the diagnostics bundle to a file sharing service\nsuch as transfer.sh using curl or similar. For example:\n\n curl --upload-file %s https:\/\/transfer.sh\/%s`, tarFilePath, tarFilePath)\n\tfmt.Println()\n\n}\n\n\/\/ getNodeContainerLogs will attempt to grab logs for any \"calico\" named containers for hosted installs.\nfunc getNodeContainerLogs(logDir string) {\n\tos.Mkdir(logDir, os.ModeDir)\n\n\t\/\/ Get a list of Calico containers running on this Node.\n\tresult, err := exec.Command(\"docker\", \"ps\", \"-a\", \"--filter\", \"name=calico\", \"--format\", \"{{.Names}}: {{.CreatedAt}}\").CombinedOutput()\n\tif err != nil {\n\t\tfmt.Printf(\"Could not run docker command: %s\\n\", string(result))\n\t\treturn\n\t}\n\n\t\/\/ No Calico containers found.\n\tif string(result) == \"\" {\n\t\tlog.Debug(\"Did not find any Calico containers\")\n\t\treturn\n\t}\n\n\t\/\/ Remove any containers that have \"k8s_POD\" in them.\n\tre := regexp.MustCompile(\"(?m)[\\r\\n]+^.*k8s_POD.*$\")\n\tcontainers := re.ReplaceAllString(string(result), \"\")\n\n\tfmt.Println(\"Copying logs from Calico containers\")\n\terr = ioutil.WriteFile(logDir+\"\/\"+\"container_creation_time\", []byte(containers), 0666)\n\tif err != nil {\n\t\tfmt.Printf(\"Could not save output of `docker ps` command to container_creation_time: %s\\n\", err)\n\t}\n\n\t\/\/ Grab the log for each container and write it as <containerName>.log.\n\tscanner := bufio.NewScanner(strings.NewReader(containers))\n\tfor scanner.Scan() {\n\t\tname := strings.Split(scanner.Text(), \":\")[0]\n\t\tlog.Debugf(\"Getting logs for container %s\", name)\n\t\tcLog, err := exec.Command(\"docker\", \"logs\", name).CombinedOutput()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Could not pull log for container %s: %s\\n\", name, err)\n\t\t\tcontinue\n\t\t}\n\t\terr = ioutil.WriteFile(logDir+\"\/\"+name+\".log\", cLog, 0666)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failed to write log for container %s to file: %s\\n\", name, err)\n\t\t}\n\t}\n}\n\n\/\/ writeDiags executes the diagnostic commands and outputs the result in the file\n\/\/ with the filename and directory passed as arguments\nfunc writeDiags(cmds diagCmd, dir string) {\n\n\tif cmds.info != \"\" {\n\t\tfmt.Println(cmds.info)\n\t}\n\n\tparts := strings.Fields(cmds.cmd)\n\n\tcontent, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to run command: %s\\nError: %s\\n\", cmds.cmd, string(content))\n\t}\n\n\t\/\/ This is for the commands we want to run but don't want to save the output\n\t\/\/ or for commands that don't produce any output to stdout\n\tif cmds.filename == \"\" {\n\t\treturn\n\t}\n\n\tfp := filepath.Join(dir, cmds.filename)\n\tif err := ioutil.WriteFile(fp, content, 0666); err != nil {\n\t\tlog.Errorf(\"Error writing diags to file: %s\\n\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package rpc\n\nimport (\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"log\"\n\t\"net\"\n\t\"net\/rpc\"\n)\n\n\/\/ An implementation of packer.Builder where the builder is actually executed\n\/\/ over an RPC connection.\ntype builder struct {\n\tclient *rpc.Client\n}\n\n\/\/ BuilderServer wraps a packer.Builder implementation and makes it exportable\n\/\/ as part of a Golang RPC server.\ntype BuilderServer struct {\n\tbuilder packer.Builder\n}\n\ntype BuilderPrepareArgs struct {\n\tConfigs []interface{}\n}\n\ntype BuilderRunArgs struct {\n\tRPCAddress string\n\tResponseAddress string\n}\n\ntype BuilderRunResponse struct {\n\tErr error\n\tRPCAddress string\n}\n\nfunc Builder(client *rpc.Client) *builder {\n\treturn &builder{client}\n}\n\nfunc (b *builder) Prepare(config ...interface{}) (err error) {\n\tcerr := b.client.Call(\"Builder.Prepare\", &BuilderPrepareArgs{config}, &err)\n\tif cerr != nil {\n\t\terr = cerr\n\t}\n\n\treturn\n}\n\nfunc (b *builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {\n\t\/\/ Create and start the server for the Build and UI\n\tserver := rpc.NewServer()\n\tRegisterCache(server, cache)\n\tRegisterHook(server, hook)\n\tRegisterUi(server, ui)\n\n\t\/\/ Create a server for the response\n\tresponseL := netListenerInRange(portRangeMin, portRangeMax)\n\trunResponseCh := make(chan *BuilderRunResponse)\n\tgo func() {\n\t\tdefer responseL.Close()\n\n\t\tconn, err := responseL.Accept()\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t\tdefer conn.Close()\n\n\t\tdecoder := gob.NewDecoder(conn)\n\n\t\tvar response BuilderRunResponse\n\t\tif err := decoder.Decode(&response); err != nil {\n\t\t\tresponse.Err = fmt.Errorf(\"Error waiting for Run: %s\", err)\n\t\t}\n\n\t\trunResponseCh <- &response\n\t}()\n\n\targs := &BuilderRunArgs{\n\t\tserveSingleConn(server),\n\t\tresponseL.Addr().String(),\n\t}\n\n\tif err := b.client.Call(\"Builder.Run\", args, new(interface{})); err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := <-runResponseCh\n\tif response.Err != nil {\n\t\treturn nil, response.Err\n\t}\n\n\tif response.RPCAddress == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tclient, err := rpc.Dial(\"tcp\", response.RPCAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn Artifact(client), nil\n}\n\nfunc (b *builder) Cancel() {\n\tif err := b.client.Call(\"Builder.Cancel\", new(interface{}), new(interface{})); err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (b *BuilderServer) Prepare(args *BuilderPrepareArgs, reply *error) error {\n\terr := b.builder.Prepare(args.Configs...)\n\tif err != nil {\n\t\t*reply = NewBasicError(err)\n\t}\n\n\treturn nil\n}\n\nfunc (b *BuilderServer) Run(args *BuilderRunArgs, reply *interface{}) error {\n\tclient, err := rpc.Dial(\"tcp\", args.RPCAddress)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresponseC, err := net.Dial(\"tcp\", args.ResponseAddress)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresponseWriter := gob.NewEncoder(responseC)\n\n\t\/\/ Run the build in a goroutine so we don't block the RPC connection\n\tgo func() {\n\t\tdefer responseC.Close()\n\n\t\tcache := Cache(client)\n\t\thook := Hook(client)\n\t\tui := &Ui{client}\n\t\tartifact, responseErr := b.builder.Run(ui, hook, cache)\n\t\tresponseAddress := \"\"\n\n\t\tif responseErr == nil && artifact != nil {\n\t\t\t\/\/ Wrap the artifact\n\t\t\tserver := rpc.NewServer()\n\t\t\tRegisterArtifact(server, artifact)\n\t\t\tresponseAddress = serveSingleConn(server)\n\t\t}\n\n\t\tif responseErr != nil {\n\t\t\tresponseErr = NewBasicError(responseErr)\n\t\t}\n\n\t\terr := responseWriter.Encode(&BuilderRunResponse{responseErr, responseAddress})\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (b *BuilderServer) Cancel(args *interface{}, reply *interface{}) error {\n\tb.builder.Cancel()\n\treturn nil\n}\n<commit_msg>packer\/rpc: less panics<commit_after>package rpc\n\nimport (\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"log\"\n\t\"net\"\n\t\"net\/rpc\"\n)\n\n\/\/ An implementation of packer.Builder where the builder is actually executed\n\/\/ over an RPC connection.\ntype builder struct {\n\tclient *rpc.Client\n}\n\n\/\/ BuilderServer wraps a packer.Builder implementation and makes it exportable\n\/\/ as part of a Golang RPC server.\ntype BuilderServer struct {\n\tbuilder packer.Builder\n}\n\ntype BuilderPrepareArgs struct {\n\tConfigs []interface{}\n}\n\ntype BuilderRunArgs struct {\n\tRPCAddress string\n\tResponseAddress string\n}\n\ntype BuilderRunResponse struct {\n\tErr error\n\tRPCAddress string\n}\n\nfunc Builder(client *rpc.Client) *builder {\n\treturn &builder{client}\n}\n\nfunc (b *builder) Prepare(config ...interface{}) (err error) {\n\tcerr := b.client.Call(\"Builder.Prepare\", &BuilderPrepareArgs{config}, &err)\n\tif cerr != nil {\n\t\terr = cerr\n\t}\n\n\treturn\n}\n\nfunc (b *builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {\n\t\/\/ Create and start the server for the Build and UI\n\tserver := rpc.NewServer()\n\tRegisterCache(server, cache)\n\tRegisterHook(server, hook)\n\tRegisterUi(server, ui)\n\n\t\/\/ Create a server for the response\n\tresponseL := netListenerInRange(portRangeMin, portRangeMax)\n\trunResponseCh := make(chan *BuilderRunResponse)\n\tgo func() {\n\t\tdefer responseL.Close()\n\n\t\tvar response BuilderRunResponse\n\t\tdefer func() { runResponseCh <- &response }()\n\n\t\tconn, err := responseL.Accept()\n\t\tif err != nil {\n\t\t\tresponse.Err = err\n\t\t\treturn\n\t\t}\n\t\tdefer conn.Close()\n\n\t\tdecoder := gob.NewDecoder(conn)\n\t\tif err := decoder.Decode(&response); err != nil {\n\t\t\tresponse.Err = fmt.Errorf(\"Error waiting for Run: %s\", err)\n\t\t}\n\t}()\n\n\targs := &BuilderRunArgs{\n\t\tserveSingleConn(server),\n\t\tresponseL.Addr().String(),\n\t}\n\n\tif err := b.client.Call(\"Builder.Run\", args, new(interface{})); err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := <-runResponseCh\n\tif response.Err != nil {\n\t\treturn nil, response.Err\n\t}\n\n\tif response.RPCAddress == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tclient, err := rpc.Dial(\"tcp\", response.RPCAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn Artifact(client), nil\n}\n\nfunc (b *builder) Cancel() {\n\tif err := b.client.Call(\"Builder.Cancel\", new(interface{}), new(interface{})); err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (b *BuilderServer) Prepare(args *BuilderPrepareArgs, reply *error) error {\n\terr := b.builder.Prepare(args.Configs...)\n\tif err != nil {\n\t\t*reply = NewBasicError(err)\n\t}\n\n\treturn nil\n}\n\nfunc (b *BuilderServer) Run(args *BuilderRunArgs, reply *interface{}) error {\n\tclient, err := rpc.Dial(\"tcp\", args.RPCAddress)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresponseC, err := net.Dial(\"tcp\", args.ResponseAddress)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresponseWriter := gob.NewEncoder(responseC)\n\n\t\/\/ Run the build in a goroutine so we don't block the RPC connection\n\tgo func() {\n\t\tdefer responseC.Close()\n\n\t\tcache := Cache(client)\n\t\thook := Hook(client)\n\t\tui := &Ui{client}\n\t\tartifact, responseErr := b.builder.Run(ui, hook, cache)\n\t\tresponseAddress := \"\"\n\n\t\tif responseErr == nil && artifact != nil {\n\t\t\t\/\/ Wrap the artifact\n\t\t\tserver := rpc.NewServer()\n\t\t\tRegisterArtifact(server, artifact)\n\t\t\tresponseAddress = serveSingleConn(server)\n\t\t}\n\n\t\tif responseErr != nil {\n\t\t\tresponseErr = NewBasicError(responseErr)\n\t\t}\n\n\t\terr := responseWriter.Encode(&BuilderRunResponse{responseErr, responseAddress})\n\t\tif err != nil {\n\t\t\tlog.Printf(\"BuildServer.Run error: %s\", err)\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (b *BuilderServer) Cancel(args *interface{}, reply *interface{}) error {\n\tb.builder.Cancel()\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage registry\n\nimport (\n\tcrypto_rand \"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"github.com\/jacobsa\/aws\/sdb\"\n\t\"github.com\/jacobsa\/comeback\/blob\"\n\t\"github.com\/jacobsa\/comeback\/crypto\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n\t\"unicode\/utf8\"\n)\n\nconst (\n\t\/\/ The item name we use for \"domain is already in use\" markers.\n\tmarkerItemName = \"comeback_marker\"\n\n\t\/\/ The attribute names we use for storing crypto-compatibility data.\n\tencryptedDataMarker = \"encrypted_data\"\n\tpasswordSaltMarker = \"password_salt\"\n\n\t\/\/ A time format that works properly with range queries.\n\tiso8601TimeFormat = \"2006-01-02T15:04:05Z\"\n)\n\n\/\/ A regexp for selected item names.\nvar itemNameRegexp = regexp.MustCompile(`^backup_([0-9a-f]{16})$`)\n\ntype Registry interface {\n\t\/\/ Record that the named backup job has completed.\n\tRecordBackup(j CompletedJob) (err error)\n\n\t\/\/ Return a list of the most recent completed backups.\n\tListRecentBackups() (jobs []CompletedJob, err error)\n\n\t\/\/ Find a particular completed job by ID.\n\tFindBackup(jobId uint64) (job CompletedJob, err error)\n\n\t\/\/ Take note of the version of the score set we're writing out as the latest,\n\t\/\/ ensuring that it still is the latest. A version of zero means there was no\n\t\/\/ previous version.\n\tUpdateScoreSetVersion(newVersion, lastVersion uint64) (err error)\n\n\t\/\/ Return the latest score set version number, or zero if there is none.\n\tGetCurrentScoreSetVersion() (version uint64, err error)\n}\n\n\/\/ Create a registry that stores data in the supplied SimpleDB domain, deriving\n\/\/ a crypto key from the supplied password and ensuring that the domain may not\n\/\/ in the future be used with any other key and has not in the past, either.\n\/\/ Return a crypter configured to use the key.\nfunc NewRegistry(\n\tdomain sdb.Domain,\n\tcryptoPassword string,\n\tderiver crypto.KeyDeriver,\n) (r Registry, crypter crypto.Crypter, err error) {\n\treturn newRegistry(\n\t\tdomain,\n\t\tcryptoPassword,\n\t\tderiver,\n\t\tcrypto.NewCrypter,\n\t\tcrypto_rand.Reader)\n}\n\nfunc verifyCompatibleAndSetUpCrypter(\n\tmarkerAttrs []sdb.Attribute,\n\tcryptoPassword string,\n\tderiver crypto.KeyDeriver,\n\tcreateCrypter func(key []byte) (crypto.Crypter, error),\n) (crypter crypto.Crypter, err error) {\n\t\/\/ Look through the attributes for what we need.\n\tvar ciphertext []byte\n\tvar salt []byte\n\n\tfor _, attr := range markerAttrs {\n\t\tvar dest *[]byte\n\t\tswitch attr.Name {\n\t\tcase encryptedDataMarker:\n\t\t\tdest = &ciphertext\n\t\tcase passwordSaltMarker:\n\t\t\tdest = &salt\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ The data is base64-encoded.\n\t\tif *dest, err = base64.StdEncoding.DecodeString(attr.Value); err != nil {\n\t\t\terr = fmt.Errorf(\"Decoding %s (%s): %v\", attr.Name, attr.Value, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Did we get both ciphertext and salt?\n\tif ciphertext == nil {\n\t\terr = fmt.Errorf(\"Missing encrypted data marker.\")\n\t\treturn\n\t}\n\n\tif salt == nil {\n\t\terr = fmt.Errorf(\"Missing password salt marker.\")\n\t\treturn\n\t}\n\n\t\/\/ Derive a key and create a crypter.\n\tcryptoKey := deriver.DeriveKey(cryptoPassword, salt)\n\tif crypter, err = createCrypter(cryptoKey); err != nil {\n\t\terr = fmt.Errorf(\"createCrypter: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Attempt to decrypt the ciphertext.\n\tif _, err = crypter.Decrypt(ciphertext); err != nil {\n\t\t\/\/ Special case: Did the crypter signal that the key was wrong?\n\t\tif _, ok := err.(*crypto.NotAuthenticError); ok {\n\t\t\terr = fmt.Errorf(\"The supplied password is incorrect.\")\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Generic error.\n\t\terr = fmt.Errorf(\"Decrypt: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ A version split out for testability.\nfunc newRegistry(\n\tdomain sdb.Domain,\n\tcryptoPassword string,\n\tderiver crypto.KeyDeriver,\n\tcreateCrypter func(key []byte) (crypto.Crypter, error),\n\tcryptoRandSrc io.Reader,\n) (r Registry, crypter crypto.Crypter, err error) {\n\t\/\/ Ask for the previously-written encrypted marker and password salt, if any.\n\tattrs, err := domain.GetAttributes(\n\t\tmarkerItemName,\n\t\tfalse, \/\/ No need to ask for a consistent read\n\t\t[]string{encryptedDataMarker, passwordSaltMarker},\n\t)\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"GetAttributes: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ If we got back any attributes, we must verify that they are compatible.\n\tif len(attrs) > 0 {\n\t\tcrypter, err = verifyCompatibleAndSetUpCrypter(\n\t\t\tattrs,\n\t\t\tcryptoPassword,\n\t\t\tderiver,\n\t\t\tcreateCrypter,\n\t\t)\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ All is good.\n\t\tr = ®istry{crypter, domain.Db(), domain}\n\t\treturn\n\t}\n\n\t\/\/ Otherwise, we want to claim this domain. Encrypt some random data, base64\n\t\/\/ encode it, then write it out. Make sure to use a precondition to defeat\n\t\/\/ the race condition where another machine is doing the same simultaneously.\n\n\t\/\/ Generate a random salt.\n\tsalt := make([]byte, 8)\n\tif _, err = io.ReadAtLeast(cryptoRandSrc, salt, len(salt)); err != nil {\n\t\terr = fmt.Errorf(\"Reading random bytes for salt: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Derive a crypto key and create the crypter.\n\tcryptoKey := deriver.DeriveKey(cryptoPassword, salt)\n\tcrypter, err = createCrypter(cryptoKey)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"createCrypter: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Create some plaintext.\n\tplaintext := make([]byte, 8)\n\tif _, err = io.ReadAtLeast(cryptoRandSrc, plaintext, len(plaintext)); err != nil {\n\t\terr = fmt.Errorf(\"Reading random bytes for plaintext: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Encrypt the plaintext.\n\tciphertext, err := crypter.Encrypt(plaintext)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Encrypt: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ SimpleDB requires only UTF-8 text.\n\tencodedEncryptedData := base64.StdEncoding.EncodeToString(ciphertext)\n\tencodedSalt := base64.StdEncoding.EncodeToString(salt)\n\n\t\/\/ Write out the two markers.\n\terr = domain.PutAttributes(\n\t\tmarkerItemName,\n\t\t[]sdb.PutUpdate{\n\t\t\tsdb.PutUpdate{Name: encryptedDataMarker, Value: encodedEncryptedData},\n\t\t\tsdb.PutUpdate{Name: passwordSaltMarker, Value: encodedSalt},\n\t\t},\n\t\t&sdb.Precondition{Name: encryptedDataMarker, Value: nil},\n\t)\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"PutAttributes: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ All is good.\n\tr = ®istry{crypter, domain.Db(), domain}\n\n\treturn\n}\n\nfunc get8RandBytes(src *rand.Rand) []byte {\n\ta := src.Uint32()\n\tb := src.Uint32()\n\n\treturn []byte{\n\t\tbyte(a),\n\t\tbyte(a >> 8),\n\t\tbyte(a >> 16),\n\t\tbyte(a >> 24),\n\t\tbyte(b),\n\t\tbyte(b >> 8),\n\t\tbyte(b >> 16),\n\t\tbyte(b >> 24),\n\t}\n}\n\ntype registry struct {\n\tcrypter crypto.Crypter\n\tdb sdb.SimpleDB\n\tdomain sdb.Domain\n}\n\nfunc (r *registry) RecordBackup(job CompletedJob) (err error) {\n\t\/\/ Job names must be valid SimpleDB attribute values.\n\tif len(job.Name) == 0 || len(job.Name) > 1024 || !utf8.ValidString(job.Name) {\n\t\terr = fmt.Errorf(\n\t\t\t\"Job names must be non-empty UTF-8 no more than 1024 bytes long.\")\n\t\treturn\n\t}\n\n\t\/\/ Format the item name.\n\titemName := sdb.ItemName(fmt.Sprintf(\"backup_%016x\", job.Id))\n\n\t\/\/ Use a time format that works correctly with range queries. Make sure to\n\t\/\/ standardize on UTC.\n\tformattedTime := job.StartTime.UTC().Format(iso8601TimeFormat)\n\n\t\/\/ Call the domain.\n\tupdates := []sdb.PutUpdate{\n\t\tsdb.PutUpdate{Name: \"job_name\", Value: job.Name},\n\t\tsdb.PutUpdate{Name: \"start_time\", Value: formattedTime},\n\t\tsdb.PutUpdate{Name: \"score\", Value: job.Score.Hex()},\n\t}\n\n\tprecond := sdb.Precondition{Name: \"score\", Value: nil}\n\n\tif err = r.domain.PutAttributes(itemName, updates, &precond); err != nil {\n\t\terr = fmt.Errorf(\"PutAttributes: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc convertAttributes(attrs []sdb.Attribute) (j CompletedJob, err error) {\n\tfor _, attr := range attrs {\n\t\tswitch attr.Name {\n\t\tcase \"job_name\":\n\t\t\tj.Name = attr.Value\n\n\t\tcase \"start_time\":\n\t\t\tif j.StartTime, err = time.Parse(iso8601TimeFormat, attr.Value); err != nil {\n\t\t\t\terr = fmt.Errorf(\"Invalid start_time value: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase \"score\":\n\t\t\tvar decoded []byte\n\t\t\tdecoded, err = hex.DecodeString(attr.Value)\n\t\t\tif err != nil || len(decoded) != 20 {\n\t\t\t\terr = fmt.Errorf(\"Invalid score: %s\", attr.Value)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tj.Score = blob.Score(decoded)\n\t\t}\n\t}\n\n\t\/\/ Everything must have been returned.\n\tif j.Name == \"\" {\n\t\terr = fmt.Errorf(\"Missing job_name attribute.\")\n\t\treturn\n\t}\n\n\tif j.StartTime.IsZero() {\n\t\terr = fmt.Errorf(\"Missing start_time attribute.\")\n\t\treturn\n\t}\n\n\tif j.Score == nil {\n\t\terr = fmt.Errorf(\"Missing score attribute.\")\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc convertSelectedItem(item sdb.SelectedItem) (j CompletedJob, err error) {\n\t\/\/ Convert the item's attributes.\n\tif j, err = convertAttributes(item.Attributes); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Convert the item name.\n\tsubMatches := itemNameRegexp.FindStringSubmatch(string(item.Name))\n\tif subMatches == nil {\n\t\terr = fmt.Errorf(\"Invalid item name: %s\", item.Name)\n\t\treturn\n\t}\n\n\tif j.Id, err = strconv.ParseUint(subMatches[1], 16, 64); err != nil {\n\t\tpanic(fmt.Sprintf(\"Unexpected result for name: %s\", item.Name))\n\t}\n\n\treturn\n}\n\nfunc (r *registry) ListRecentBackups() (jobs []CompletedJob, err error) {\n\t\/\/ Call the database.\n\tquery := fmt.Sprintf(\n\t\t\"select job_name, start_time, score from `%s` where \"+\n\t\t\t\"start_time is not null order by start_time desc\",\n\t\tr.domain.Name(),\n\t)\n\n\tresults, _, err := r.db.Select(\n\t\tquery,\n\t\tfalse, \/\/ No need for consistent reads.\n\t\tnil,\n\t)\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Select: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Convert each result.\n\tfor _, item := range results {\n\t\tvar job CompletedJob\n\t\tjob, err = convertSelectedItem(item)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Item %s is invalid: %v\", item.Name, err)\n\t\t\treturn\n\t\t}\n\n\t\tjobs = append(jobs, job)\n\t}\n\n\treturn\n}\n\nfunc (r *registry) FindBackup(jobId uint64) (job CompletedJob, err error) {\n\t\/\/ Call the domain.\n\tattrs, err := r.domain.GetAttributes(\n\t\tsdb.ItemName(fmt.Sprintf(\"backup_%016x\", jobId)),\n\t\tfalse, \/\/ Consistent read unnecessary\n\t\t[]string{\"job_name\", \"start_time\", \"score\"},\n\t)\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"GetAttributes: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Convert the results.\n\tif job, err = convertAttributes(attrs); err != nil {\n\t\terr = fmt.Errorf(\"Returned attributes invalid: %v\", err)\n\t\treturn\n\t}\n\n\tjob.Id = jobId\n\treturn\n}\n\nfunc formatVersion(v uint64) string {\n\treturn fmt.Sprintf(\"%016x\", v)\n}\n\nfunc (r *registry) UpdateScoreSetVersion(\n\tnewVersion uint64,\n\tlastVersion uint64,\n) (err error) {\n\tupdates := []sdb.PutUpdate{\n\t\tsdb.PutUpdate{Name: \"score_set_version\", Value: formatVersion(newVersion)},\n\t}\n\n\tvar precond *sdb.Precondition\n\tif lastVersion != 0 {\n\t\tformatted := formatVersion(lastVersion)\n\t\tprecond = &sdb.Precondition{Name: \"score_set_version\", Value: &formatted}\n\t}\n\n\tif err = r.domain.PutAttributes(markerItemName, updates, precond); err != nil {\n\t\terr = fmt.Errorf(\"PutAttributes: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (r *registry) GetCurrentScoreSetVersion() (version uint64, err error) {\n\terr = fmt.Errorf(\"TODO\")\n\treturn\n}\n<commit_msg>Fixed a bug.<commit_after>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage registry\n\nimport (\n\tcrypto_rand \"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"github.com\/jacobsa\/aws\/sdb\"\n\t\"github.com\/jacobsa\/comeback\/blob\"\n\t\"github.com\/jacobsa\/comeback\/crypto\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n\t\"unicode\/utf8\"\n)\n\nconst (\n\t\/\/ The item name we use for \"domain is already in use\" markers.\n\tmarkerItemName = \"comeback_marker\"\n\n\t\/\/ The attribute names we use for storing crypto-compatibility data.\n\tencryptedDataMarker = \"encrypted_data\"\n\tpasswordSaltMarker = \"password_salt\"\n\n\t\/\/ A time format that works properly with range queries.\n\tiso8601TimeFormat = \"2006-01-02T15:04:05Z\"\n)\n\n\/\/ A regexp for selected item names.\nvar itemNameRegexp = regexp.MustCompile(`^backup_([0-9a-f]{16})$`)\n\ntype Registry interface {\n\t\/\/ Record that the named backup job has completed.\n\tRecordBackup(j CompletedJob) (err error)\n\n\t\/\/ Return a list of the most recent completed backups.\n\tListRecentBackups() (jobs []CompletedJob, err error)\n\n\t\/\/ Find a particular completed job by ID.\n\tFindBackup(jobId uint64) (job CompletedJob, err error)\n\n\t\/\/ Take note of the version of the score set we're writing out as the latest,\n\t\/\/ ensuring that it still is the latest. A version of zero means there was no\n\t\/\/ previous version.\n\tUpdateScoreSetVersion(newVersion, lastVersion uint64) (err error)\n\n\t\/\/ Return the latest score set version number, or zero if there is none.\n\tGetCurrentScoreSetVersion() (version uint64, err error)\n}\n\n\/\/ Create a registry that stores data in the supplied SimpleDB domain, deriving\n\/\/ a crypto key from the supplied password and ensuring that the domain may not\n\/\/ in the future be used with any other key and has not in the past, either.\n\/\/ Return a crypter configured to use the key.\nfunc NewRegistry(\n\tdomain sdb.Domain,\n\tcryptoPassword string,\n\tderiver crypto.KeyDeriver,\n) (r Registry, crypter crypto.Crypter, err error) {\n\treturn newRegistry(\n\t\tdomain,\n\t\tcryptoPassword,\n\t\tderiver,\n\t\tcrypto.NewCrypter,\n\t\tcrypto_rand.Reader)\n}\n\nfunc verifyCompatibleAndSetUpCrypter(\n\tmarkerAttrs []sdb.Attribute,\n\tcryptoPassword string,\n\tderiver crypto.KeyDeriver,\n\tcreateCrypter func(key []byte) (crypto.Crypter, error),\n) (crypter crypto.Crypter, err error) {\n\t\/\/ Look through the attributes for what we need.\n\tvar ciphertext []byte\n\tvar salt []byte\n\n\tfor _, attr := range markerAttrs {\n\t\tvar dest *[]byte\n\t\tswitch attr.Name {\n\t\tcase encryptedDataMarker:\n\t\t\tdest = &ciphertext\n\t\tcase passwordSaltMarker:\n\t\t\tdest = &salt\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ The data is base64-encoded.\n\t\tif *dest, err = base64.StdEncoding.DecodeString(attr.Value); err != nil {\n\t\t\terr = fmt.Errorf(\"Decoding %s (%s): %v\", attr.Name, attr.Value, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Did we get both ciphertext and salt?\n\tif ciphertext == nil {\n\t\terr = fmt.Errorf(\"Missing encrypted data marker.\")\n\t\treturn\n\t}\n\n\tif salt == nil {\n\t\terr = fmt.Errorf(\"Missing password salt marker.\")\n\t\treturn\n\t}\n\n\t\/\/ Derive a key and create a crypter.\n\tcryptoKey := deriver.DeriveKey(cryptoPassword, salt)\n\tif crypter, err = createCrypter(cryptoKey); err != nil {\n\t\terr = fmt.Errorf(\"createCrypter: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Attempt to decrypt the ciphertext.\n\tif _, err = crypter.Decrypt(ciphertext); err != nil {\n\t\t\/\/ Special case: Did the crypter signal that the key was wrong?\n\t\tif _, ok := err.(*crypto.NotAuthenticError); ok {\n\t\t\terr = fmt.Errorf(\"The supplied password is incorrect.\")\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Generic error.\n\t\terr = fmt.Errorf(\"Decrypt: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ A version split out for testability.\nfunc newRegistry(\n\tdomain sdb.Domain,\n\tcryptoPassword string,\n\tderiver crypto.KeyDeriver,\n\tcreateCrypter func(key []byte) (crypto.Crypter, error),\n\tcryptoRandSrc io.Reader,\n) (r Registry, crypter crypto.Crypter, err error) {\n\t\/\/ Ask for the previously-written encrypted marker and password salt, if any.\n\tattrs, err := domain.GetAttributes(\n\t\tmarkerItemName,\n\t\tfalse, \/\/ No need to ask for a consistent read\n\t\t[]string{encryptedDataMarker, passwordSaltMarker},\n\t)\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"GetAttributes: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ If we got back any attributes, we must verify that they are compatible.\n\tif len(attrs) > 0 {\n\t\tcrypter, err = verifyCompatibleAndSetUpCrypter(\n\t\t\tattrs,\n\t\t\tcryptoPassword,\n\t\t\tderiver,\n\t\t\tcreateCrypter,\n\t\t)\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ All is good.\n\t\tr = ®istry{crypter, domain.Db(), domain}\n\t\treturn\n\t}\n\n\t\/\/ Otherwise, we want to claim this domain. Encrypt some random data, base64\n\t\/\/ encode it, then write it out. Make sure to use a precondition to defeat\n\t\/\/ the race condition where another machine is doing the same simultaneously.\n\n\t\/\/ Generate a random salt.\n\tsalt := make([]byte, 8)\n\tif _, err = io.ReadAtLeast(cryptoRandSrc, salt, len(salt)); err != nil {\n\t\terr = fmt.Errorf(\"Reading random bytes for salt: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Derive a crypto key and create the crypter.\n\tcryptoKey := deriver.DeriveKey(cryptoPassword, salt)\n\tcrypter, err = createCrypter(cryptoKey)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"createCrypter: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Create some plaintext.\n\tplaintext := make([]byte, 8)\n\tif _, err = io.ReadAtLeast(cryptoRandSrc, plaintext, len(plaintext)); err != nil {\n\t\terr = fmt.Errorf(\"Reading random bytes for plaintext: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Encrypt the plaintext.\n\tciphertext, err := crypter.Encrypt(plaintext)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Encrypt: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ SimpleDB requires only UTF-8 text.\n\tencodedEncryptedData := base64.StdEncoding.EncodeToString(ciphertext)\n\tencodedSalt := base64.StdEncoding.EncodeToString(salt)\n\n\t\/\/ Write out the two markers.\n\terr = domain.PutAttributes(\n\t\tmarkerItemName,\n\t\t[]sdb.PutUpdate{\n\t\t\tsdb.PutUpdate{Name: encryptedDataMarker, Value: encodedEncryptedData},\n\t\t\tsdb.PutUpdate{Name: passwordSaltMarker, Value: encodedSalt},\n\t\t},\n\t\t&sdb.Precondition{Name: encryptedDataMarker, Value: nil},\n\t)\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"PutAttributes: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ All is good.\n\tr = ®istry{crypter, domain.Db(), domain}\n\n\treturn\n}\n\nfunc get8RandBytes(src *rand.Rand) []byte {\n\ta := src.Uint32()\n\tb := src.Uint32()\n\n\treturn []byte{\n\t\tbyte(a),\n\t\tbyte(a >> 8),\n\t\tbyte(a >> 16),\n\t\tbyte(a >> 24),\n\t\tbyte(b),\n\t\tbyte(b >> 8),\n\t\tbyte(b >> 16),\n\t\tbyte(b >> 24),\n\t}\n}\n\ntype registry struct {\n\tcrypter crypto.Crypter\n\tdb sdb.SimpleDB\n\tdomain sdb.Domain\n}\n\nfunc (r *registry) RecordBackup(job CompletedJob) (err error) {\n\t\/\/ Job names must be valid SimpleDB attribute values.\n\tif len(job.Name) == 0 || len(job.Name) > 1024 || !utf8.ValidString(job.Name) {\n\t\terr = fmt.Errorf(\n\t\t\t\"Job names must be non-empty UTF-8 no more than 1024 bytes long.\")\n\t\treturn\n\t}\n\n\t\/\/ Format the item name.\n\titemName := sdb.ItemName(fmt.Sprintf(\"backup_%016x\", job.Id))\n\n\t\/\/ Use a time format that works correctly with range queries. Make sure to\n\t\/\/ standardize on UTC.\n\tformattedTime := job.StartTime.UTC().Format(iso8601TimeFormat)\n\n\t\/\/ Call the domain.\n\tupdates := []sdb.PutUpdate{\n\t\tsdb.PutUpdate{Name: \"job_name\", Value: job.Name},\n\t\tsdb.PutUpdate{Name: \"start_time\", Value: formattedTime},\n\t\tsdb.PutUpdate{Name: \"score\", Value: job.Score.Hex()},\n\t}\n\n\tprecond := sdb.Precondition{Name: \"score\", Value: nil}\n\n\tif err = r.domain.PutAttributes(itemName, updates, &precond); err != nil {\n\t\terr = fmt.Errorf(\"PutAttributes: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc convertAttributes(attrs []sdb.Attribute) (j CompletedJob, err error) {\n\tfor _, attr := range attrs {\n\t\tswitch attr.Name {\n\t\tcase \"job_name\":\n\t\t\tj.Name = attr.Value\n\n\t\tcase \"start_time\":\n\t\t\tif j.StartTime, err = time.Parse(iso8601TimeFormat, attr.Value); err != nil {\n\t\t\t\terr = fmt.Errorf(\"Invalid start_time value: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase \"score\":\n\t\t\tvar decoded []byte\n\t\t\tdecoded, err = hex.DecodeString(attr.Value)\n\t\t\tif err != nil || len(decoded) != 20 {\n\t\t\t\terr = fmt.Errorf(\"Invalid score: %s\", attr.Value)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tj.Score = blob.Score(decoded)\n\t\t}\n\t}\n\n\t\/\/ Everything must have been returned.\n\tif j.Name == \"\" {\n\t\terr = fmt.Errorf(\"Missing job_name attribute.\")\n\t\treturn\n\t}\n\n\tif j.StartTime.IsZero() {\n\t\terr = fmt.Errorf(\"Missing start_time attribute.\")\n\t\treturn\n\t}\n\n\tif j.Score == nil {\n\t\terr = fmt.Errorf(\"Missing score attribute.\")\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc convertSelectedItem(item sdb.SelectedItem) (j CompletedJob, err error) {\n\t\/\/ Convert the item's attributes.\n\tif j, err = convertAttributes(item.Attributes); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Convert the item name.\n\tsubMatches := itemNameRegexp.FindStringSubmatch(string(item.Name))\n\tif subMatches == nil {\n\t\terr = fmt.Errorf(\"Invalid item name: %s\", item.Name)\n\t\treturn\n\t}\n\n\tif j.Id, err = strconv.ParseUint(subMatches[1], 16, 64); err != nil {\n\t\tpanic(fmt.Sprintf(\"Unexpected result for name: %s\", item.Name))\n\t}\n\n\treturn\n}\n\nfunc (r *registry) ListRecentBackups() (jobs []CompletedJob, err error) {\n\t\/\/ Call the database.\n\tquery := fmt.Sprintf(\n\t\t\"select job_name, start_time, score from `%s` where \"+\n\t\t\t\"start_time is not null order by start_time desc\",\n\t\tr.domain.Name(),\n\t)\n\n\tresults, _, err := r.db.Select(\n\t\tquery,\n\t\tfalse, \/\/ No need for consistent reads.\n\t\tnil,\n\t)\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Select: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Convert each result.\n\tfor _, item := range results {\n\t\tvar job CompletedJob\n\t\tjob, err = convertSelectedItem(item)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Item %s is invalid: %v\", item.Name, err)\n\t\t\treturn\n\t\t}\n\n\t\tjobs = append(jobs, job)\n\t}\n\n\treturn\n}\n\nfunc (r *registry) FindBackup(jobId uint64) (job CompletedJob, err error) {\n\t\/\/ Call the domain.\n\tattrs, err := r.domain.GetAttributes(\n\t\tsdb.ItemName(fmt.Sprintf(\"backup_%016x\", jobId)),\n\t\tfalse, \/\/ Consistent read unnecessary\n\t\t[]string{\"job_name\", \"start_time\", \"score\"},\n\t)\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"GetAttributes: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Convert the results.\n\tif job, err = convertAttributes(attrs); err != nil {\n\t\terr = fmt.Errorf(\"Returned attributes invalid: %v\", err)\n\t\treturn\n\t}\n\n\tjob.Id = jobId\n\treturn\n}\n\nfunc formatVersion(v uint64) string {\n\treturn fmt.Sprintf(\"%016x\", v)\n}\n\nfunc (r *registry) UpdateScoreSetVersion(\n\tnewVersion uint64,\n\tlastVersion uint64,\n) (err error) {\n\tupdates := []sdb.PutUpdate{\n\t\tsdb.PutUpdate{Name: \"score_set_version\", Value: formatVersion(newVersion)},\n\t}\n\n\tprecond := &sdb.Precondition{Name: \"score_set_version\"}\n\tif lastVersion != 0 {\n\t\tformatted := formatVersion(lastVersion)\n\t\tprecond.Value = &formatted\n\t}\n\n\tif err = r.domain.PutAttributes(markerItemName, updates, precond); err != nil {\n\t\terr = fmt.Errorf(\"PutAttributes: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (r *registry) GetCurrentScoreSetVersion() (version uint64, err error) {\n\terr = fmt.Errorf(\"TODO\")\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage annotations\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"github.com\/parnurzeal\/gorequest\"\n\n\t\"k8s.io\/ingress-nginx\/test\/e2e\/framework\"\n)\n\nvar _ = framework.IngressNginxDescribe(\"Annotations - Rewrite\", func() {\n\tf := framework.NewDefaultFramework(\"rewrite\")\n\n\tBeforeEach(func() {\n\t\terr := f.NewEchoDeploymentWithReplicas(1)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n\n\tAfterEach(func() {\n\t})\n\n\tIt(\"should rewrite request URL\", func() {\n\t\tBy(\"setting rewrite-target annotation\")\n\n\t\thost := \"rewrite.foo.com\"\n\t\tannotations := map[string]string{\"nginx.ingress.kubernetes.io\/rewrite-target\": \"\/\"}\n\t\texpectBodyRequestURI := fmt.Sprintf(\"request_uri=http:\/\/%v:8080\/\", host)\n\n\t\ting := framework.NewSingleIngress(host, \"\/something\", host, f.IngressController.Namespace, \"http-svc\", 80, &annotations)\n\t\t_, err := f.EnsureIngress(ing)\n\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(ing).NotTo(BeNil())\n\n\t\terr = f.WaitForNginxServer(host,\n\t\t\tfunc(server string) bool {\n\t\t\t\treturn strings.Contains(server, \"rewrite (?i)\/something\/(.*) \/$1 break;\") &&\n\t\t\t\t\tstrings.Contains(server, \"rewrite (?i)\/something$ \/ break;\")\n\t\t\t})\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tBy(\"sending request to Ingress rule path (lowercase)\")\n\n\t\tresp, body, errs := gorequest.New().\n\t\t\tGet(f.IngressController.HTTPURL+\"\/something\").\n\t\t\tSet(\"Host\", host).\n\t\t\tEnd()\n\n\t\tExpect(len(errs)).Should(Equal(0))\n\t\tExpect(resp.StatusCode).Should(Equal(http.StatusOK))\n\t\tExpect(body).Should(ContainSubstring(expectBodyRequestURI))\n\n\t\tBy(\"sending request to Ingress rule path (mixed case)\")\n\n\t\tresp, body, errs = gorequest.New().\n\t\t\tGet(f.IngressController.HTTPURL+\"\/SomeThing\").\n\t\t\tSet(\"Host\", host).\n\t\t\tEnd()\n\n\t\tExpect(len(errs)).Should(Equal(0))\n\t\tExpect(resp.StatusCode).Should(Equal(http.StatusOK))\n\t\tExpect(body).Should(ContainSubstring(expectBodyRequestURI))\n\t})\n\n\tIt(\"should write rewrite logs\", func() {\n\t\tBy(\"setting enable-rewrite-log annotation\")\n\n\t\thost := \"rewrite.bar.com\"\n\t\tannotations := map[string]string{\n\t\t\t\"nginx.ingress.kubernetes.io\/rewrite-target\": \"\/\",\n\t\t\t\"nginx.ingress.kubernetes.io\/enable-rewrite-log\": \"true\",\n\t\t}\n\n\t\ting := framework.NewSingleIngress(host, \"\/something\", host, f.IngressController.Namespace, \"http-svc\", 80, &annotations)\n\t\t_, err := f.EnsureIngress(ing)\n\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(ing).NotTo(BeNil())\n\n\t\terr = f.WaitForNginxServer(host,\n\t\t\tfunc(server string) bool {\n\t\t\t\treturn strings.Contains(server, \"rewrite_log on;\")\n\t\t\t})\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tresp, _, errs := gorequest.New().\n\t\t\tGet(f.IngressController.HTTPURL+\"\/something\").\n\t\t\tSet(\"Host\", host).\n\t\t\tEnd()\n\n\t\tExpect(len(errs)).Should(Equal(0))\n\t\tExpect(resp.StatusCode).Should(Equal(http.StatusOK))\n\n\t\tlogs, err := f.NginxLogs()\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\tExpect(logs).To(ContainSubstring(`\"(?i)\/something$\" matches \"\/something\", client:`))\n\t\tExpect(logs).To(ContainSubstring(`rewritten data: \"\/\", args: \"\",`))\n\t})\n})\n<commit_msg>Add e2e test for rewrite-target annotation kube-lego failure<commit_after>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage annotations\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"github.com\/parnurzeal\/gorequest\"\n\n\t\"k8s.io\/ingress-nginx\/test\/e2e\/framework\"\n)\n\nvar _ = framework.IngressNginxDescribe(\"Annotations - Rewrite\", func() {\n\tf := framework.NewDefaultFramework(\"rewrite\")\n\n\tBeforeEach(func() {\n\t\terr := f.NewEchoDeploymentWithReplicas(1)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n\n\tAfterEach(func() {\n\t})\n\n\tIt(\"should rewrite request URL\", func() {\n\t\tBy(\"setting rewrite-target annotation\")\n\n\t\thost := \"rewrite.foo.com\"\n\t\tannotations := map[string]string{\"nginx.ingress.kubernetes.io\/rewrite-target\": \"\/\"}\n\t\texpectBodyRequestURI := fmt.Sprintf(\"request_uri=http:\/\/%v:8080\/\", host)\n\n\t\ting := framework.NewSingleIngress(host, \"\/something\", host, f.IngressController.Namespace, \"http-svc\", 80, &annotations)\n\t\t_, err := f.EnsureIngress(ing)\n\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(ing).NotTo(BeNil())\n\n\t\terr = f.WaitForNginxServer(host,\n\t\t\tfunc(server string) bool {\n\t\t\t\treturn strings.Contains(server, \"rewrite (?i)\/something\/(.*) \/$1 break;\") &&\n\t\t\t\t\tstrings.Contains(server, \"rewrite (?i)\/something$ \/ break;\")\n\t\t\t})\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tBy(\"sending request to Ingress rule path (lowercase)\")\n\n\t\tresp, body, errs := gorequest.New().\n\t\t\tGet(f.IngressController.HTTPURL+\"\/something\").\n\t\t\tSet(\"Host\", host).\n\t\t\tEnd()\n\n\t\tExpect(len(errs)).Should(Equal(0))\n\t\tExpect(resp.StatusCode).Should(Equal(http.StatusOK))\n\t\tExpect(body).Should(ContainSubstring(expectBodyRequestURI))\n\n\t\tBy(\"sending request to Ingress rule path (mixed case)\")\n\n\t\tresp, body, errs = gorequest.New().\n\t\t\tGet(f.IngressController.HTTPURL+\"\/SomeThing\").\n\t\t\tSet(\"Host\", host).\n\t\t\tEnd()\n\n\t\tExpect(len(errs)).Should(Equal(0))\n\t\tExpect(resp.StatusCode).Should(Equal(http.StatusOK))\n\t\tExpect(body).Should(ContainSubstring(expectBodyRequestURI))\n\t})\n\n\tIt(\"should write rewrite logs\", func() {\n\t\tBy(\"setting enable-rewrite-log annotation\")\n\n\t\thost := \"rewrite.bar.com\"\n\t\tannotations := map[string]string{\n\t\t\t\"nginx.ingress.kubernetes.io\/rewrite-target\": \"\/\",\n\t\t\t\"nginx.ingress.kubernetes.io\/enable-rewrite-log\": \"true\",\n\t\t}\n\n\t\ting := framework.NewSingleIngress(host, \"\/something\", host, f.IngressController.Namespace, \"http-svc\", 80, &annotations)\n\t\t_, err := f.EnsureIngress(ing)\n\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(ing).NotTo(BeNil())\n\n\t\terr = f.WaitForNginxServer(host,\n\t\t\tfunc(server string) bool {\n\t\t\t\treturn strings.Contains(server, \"rewrite_log on;\")\n\t\t\t})\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tresp, _, errs := gorequest.New().\n\t\t\tGet(f.IngressController.HTTPURL+\"\/something\").\n\t\t\tSet(\"Host\", host).\n\t\t\tEnd()\n\n\t\tExpect(len(errs)).Should(Equal(0))\n\t\tExpect(resp.StatusCode).Should(Equal(http.StatusOK))\n\n\t\tlogs, err := f.NginxLogs()\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\tExpect(logs).To(ContainSubstring(`\"(?i)\/something$\" matches \"\/something\", client:`))\n\t\tExpect(logs).To(ContainSubstring(`rewritten data: \"\/\", args: \"\",`))\n\t})\n\n\tIt(\"should use correct longest path match\", func() {\n\t\thost := \"rewrite.bar.com\"\n\t\texpectBodyRequestURI := fmt.Sprintf(\"request_uri=http:\/\/%v:8080\/.well-known\/acme\/challenge\", host)\n\t\tannotations := map[string]string{}\n\t\trewriteAnnotations := map[string]string{\n\t\t\t\"nginx.ingress.kubernetes.io\/rewrite-target\": \"\/new\/backend\",\n\t\t}\n\n\t\tBy(\"creating a regular ingress definition\")\n\t\ting := framework.NewSingleIngress(\"kube-lego\", \"\/.well-known\/acme\/challenge\", host, f.IngressController.Namespace, \"http-svc\", 80, &annotations)\n\t\t_, err := f.EnsureIngress(ing)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(ing).NotTo(BeNil())\n\n\t\terr = f.WaitForNginxServer(host,\n\t\t\tfunc(server string) bool {\n\t\t\t\treturn strings.Contains(server, \"\/.well-known\/acme\/challenge\")\n\t\t\t})\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tBy(\"making a request to the non-rewritten location\")\n\t\tresp, body, errs := gorequest.New().\n\t\t\tGet(f.IngressController.HTTPURL+\"\/.well-known\/acme\/challenge\").\n\t\t\tSet(\"Host\", host).\n\t\t\tEnd()\n\n\t\tExpect(len(errs)).Should(Equal(0))\n\t\tExpect(resp.StatusCode).Should(Equal(http.StatusOK))\n\t\tExpect(body).Should(ContainSubstring(expectBodyRequestURI))\n\n\t\tBy(`creating an ingress definition with the rewrite-target annotation set on the \"\/\" location`)\n\t\trewriteIng := framework.NewSingleIngress(\"rewrite-index\", \"\/\", host, f.IngressController.Namespace, \"http-svc\", 80, &rewriteAnnotations)\n\t\t_, err = f.EnsureIngress(rewriteIng)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(rewriteIng).NotTo(BeNil())\n\n\t\terr = f.WaitForNginxServer(host,\n\t\t\tfunc(server string) bool {\n\t\t\t\treturn strings.Contains(server, \"location ~* \/ {\")\n\t\t\t})\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tBy(\"making a second request to the non-rewritten location\")\n\t\tresp, body, errs = gorequest.New().\n\t\t\tGet(f.IngressController.HTTPURL+\"\/.well-known\/acme\/challenge\").\n\t\t\tSet(\"Host\", host).\n\t\t\tEnd()\n\n\t\tExpect(len(errs)).Should(Equal(0))\n\t\tExpect(resp.StatusCode).Should(Equal(http.StatusOK))\n\t\tExpect(body).Should(ContainSubstring(expectBodyRequestURI))\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package fire\n\nimport (\n\t\"testing\"\n\t\"net\/http\"\n\n\t\"gopkg.in\/mgo.v2\/bson\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/appleboy\/gofight\"\n\t\"github.com\/Jeffail\/gabs\"\n)\n\ntype Post struct {\n\tBase `bson:\",inline\" fire:\"post:posts\"`\n\tTitle string `json:\"title\" valid:\"required\"`\n\tTextBody string `json:\"text-body\" valid:\"-\" bson:\"text_body\"`\n\tComments HasMany `json:\"-\" valid:\"-\" bson:\"-\" fire:\"comments:comments\"`\n}\n\ntype Comment struct {\n\tBase `bson:\",inline\" fire:\"comment:comments\"`\n\tMessage string `json:\"message\" valid:\"required\"`\n\tPostID bson.ObjectId `json:\"-\" valid:\"required\" bson:\"post_id\" fire:\"post:posts\"`\n}\n\nvar PostResource = &Resource{\n\tModel: &Post{},\n\tCollection: \"posts\",\n}\n\nvar CommentResource = &Resource{\n\tModel: &Comment{},\n\tCollection: \"comments\",\n}\n\nfunc TestPosts(t *testing.T) {\n\tserver := buildServer(PostResource, CommentResource)\n\n\tr := gofight.New()\n\n\tr.GET(\"\/posts\").\n\t\tRun(server, func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {\n\t\t\tassert.Equal(t, http.StatusOK, r.Code)\n\t\t\tassert.Equal(t, `{\"data\":[]}`, r.Body.String())\n\t\t})\n\n\tr.POST(\"\/posts\").\n\t\tSetBody(`{\n\t\t\t\"data\": {\n\t\t\t\t\"type\": \"posts\",\n\t\t\t\t\"attributes\": {\n\t\t\t \t\t\"title\": \"Hello World!\"\n\t\t\t\t}\n\t\t\t}\n\t\t}`).\n\t\tRun(server, func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {\n\t\t\tjson, _ := gabs.ParseJSONBuffer(r.Body)\n\t\t\tobj := json.Path(\"data\")\n\n\t\t\tassert.Equal(t, http.StatusCreated, r.Code)\n\t\t\tassert.Equal(t, \"posts\", obj.Path(\"type\").Data().(string))\n\t\t\tassert.True(t, bson.IsObjectIdHex(obj.Path(\"id\").Data().(string)))\n\t\t\tassert.Equal(t, \"Hello World!\", obj.Path(\"attributes.title\").Data().(string))\n\t\t\tassert.Equal(t, \"\", obj.Path(\"attributes.text-body\").Data().(string))\n\t\t})\n\n\tvar id string\n\n\tr.GET(\"\/posts\").\n\t\tRun(server, func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {\n\t\t\tjson, _ := gabs.ParseJSONBuffer(r.Body)\n\t\t\tobj := json.Path(\"data\").Index(0)\n\n\t\t\tassert.Equal(t, http.StatusOK, r.Code)\n\t\t\tassert.Equal(t, \"posts\", obj.Path(\"type\").Data().(string))\n\t\t\tassert.True(t, bson.IsObjectIdHex(obj.Path(\"id\").Data().(string)))\n\t\t\tassert.Equal(t, \"Hello World!\", obj.Path(\"attributes.title\").Data().(string))\n\t\t\tassert.Equal(t, \"\", obj.Path(\"attributes.text-body\").Data().(string))\n\n\t\t\tid = obj.Path(\"id\").Data().(string)\n\t\t})\n\n\tr.PATCH(\"\/posts\/\" + id).\n\t\tSetBody(`{\n\t\t\t\"data\": {\n\t\t\t\t\"type\": \"posts\",\n\t\t\t\t\"id\": \"` + id + `\",\n\t\t\t\t\"attributes\": {\n\t\t\t \t\t\"text-body\": \"Some Text...\"\n\t\t\t\t}\n\t\t\t}\n\t\t}`).\n\t\tRun(server, func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {\n\t\t\tjson, _ := gabs.ParseJSONBuffer(r.Body)\n\t\t\tobj := json.Path(\"data\")\n\n\t\t\tassert.Equal(t, http.StatusOK, r.Code)\n\t\t\tassert.Equal(t, \"posts\", obj.Path(\"type\").Data().(string))\n\t\t\tassert.True(t, bson.IsObjectIdHex(obj.Path(\"id\").Data().(string)))\n\t\t\tassert.Equal(t, \"Hello World!\", obj.Path(\"attributes.title\").Data().(string))\n\t\t\tassert.Equal(t, \"Some Text...\", obj.Path(\"attributes.text-body\").Data().(string))\n\t\t})\n\n\tr.GET(\"\/posts\/\" + id).\n\t\tRun(server, func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {\n\t\t\tjson, _ := gabs.ParseJSONBuffer(r.Body)\n\t\t\tobj := json.Path(\"data\")\n\n\t\t\tassert.Equal(t, http.StatusOK, r.Code)\n\t\t\tassert.Equal(t, \"posts\", obj.Path(\"type\").Data().(string))\n\t\t\tassert.True(t, bson.IsObjectIdHex(obj.Path(\"id\").Data().(string)))\n\t\t\tassert.Equal(t, \"Hello World!\", obj.Path(\"attributes.title\").Data().(string))\n\t\t\tassert.Equal(t, \"Some Text...\", obj.Path(\"attributes.text-body\").Data().(string))\n\t\t})\n\n\tr.DELETE(\"\/posts\/\" + id).\n\t\tRun(server, func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {\n\t\t\tassert.Equal(t, http.StatusNoContent, r.Code)\n\t\t\tassert.Equal(t, \"\", r.Body.String())\n\t\t})\n\n\tr.GET(\"\/posts\").\n\t\tRun(server, func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {\n\t\tassert.Equal(t, http.StatusOK, r.Code)\n\t\tassert.Equal(t, `{\"data\":[]}`, r.Body.String())\n\t})\n}\n<commit_msg>inline resources<commit_after>package fire\n\nimport (\n\t\"testing\"\n\t\"net\/http\"\n\n\t\"gopkg.in\/mgo.v2\/bson\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/appleboy\/gofight\"\n\t\"github.com\/Jeffail\/gabs\"\n)\n\ntype Post struct {\n\tBase `bson:\",inline\" fire:\"post:posts\"`\n\tTitle string `json:\"title\" valid:\"required\"`\n\tTextBody string `json:\"text-body\" valid:\"-\" bson:\"text_body\"`\n\tComments HasMany `json:\"-\" valid:\"-\" bson:\"-\" fire:\"comments:comments\"`\n}\n\ntype Comment struct {\n\tBase `bson:\",inline\" fire:\"comment:comments\"`\n\tMessage string `json:\"message\" valid:\"required\"`\n\tPostID bson.ObjectId `json:\"-\" valid:\"required\" bson:\"post_id\" fire:\"post:posts\"`\n}\n\nfunc TestPosts(t *testing.T) {\n\tserver := buildServer(&Resource{\n\t\tModel: &Post{},\n\t\tCollection: \"posts\",\n\t}, &Resource{\n\t\tModel: &Comment{},\n\t\tCollection: \"comments\",\n\t})\n\n\tr := gofight.New()\n\n\tr.GET(\"\/posts\").\n\t\tRun(server, func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {\n\t\t\tassert.Equal(t, http.StatusOK, r.Code)\n\t\t\tassert.Equal(t, `{\"data\":[]}`, r.Body.String())\n\t\t})\n\n\tr.POST(\"\/posts\").\n\t\tSetBody(`{\n\t\t\t\"data\": {\n\t\t\t\t\"type\": \"posts\",\n\t\t\t\t\"attributes\": {\n\t\t\t \t\t\"title\": \"Hello World!\"\n\t\t\t\t}\n\t\t\t}\n\t\t}`).\n\t\tRun(server, func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {\n\t\t\tjson, _ := gabs.ParseJSONBuffer(r.Body)\n\t\t\tobj := json.Path(\"data\")\n\n\t\t\tassert.Equal(t, http.StatusCreated, r.Code)\n\t\t\tassert.Equal(t, \"posts\", obj.Path(\"type\").Data().(string))\n\t\t\tassert.True(t, bson.IsObjectIdHex(obj.Path(\"id\").Data().(string)))\n\t\t\tassert.Equal(t, \"Hello World!\", obj.Path(\"attributes.title\").Data().(string))\n\t\t\tassert.Equal(t, \"\", obj.Path(\"attributes.text-body\").Data().(string))\n\t\t})\n\n\tvar id string\n\n\tr.GET(\"\/posts\").\n\t\tRun(server, func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {\n\t\t\tjson, _ := gabs.ParseJSONBuffer(r.Body)\n\t\t\tobj := json.Path(\"data\").Index(0)\n\n\t\t\tassert.Equal(t, http.StatusOK, r.Code)\n\t\t\tassert.Equal(t, \"posts\", obj.Path(\"type\").Data().(string))\n\t\t\tassert.True(t, bson.IsObjectIdHex(obj.Path(\"id\").Data().(string)))\n\t\t\tassert.Equal(t, \"Hello World!\", obj.Path(\"attributes.title\").Data().(string))\n\t\t\tassert.Equal(t, \"\", obj.Path(\"attributes.text-body\").Data().(string))\n\n\t\t\tid = obj.Path(\"id\").Data().(string)\n\t\t})\n\n\tr.PATCH(\"\/posts\/\" + id).\n\t\tSetBody(`{\n\t\t\t\"data\": {\n\t\t\t\t\"type\": \"posts\",\n\t\t\t\t\"id\": \"` + id + `\",\n\t\t\t\t\"attributes\": {\n\t\t\t \t\t\"text-body\": \"Some Text...\"\n\t\t\t\t}\n\t\t\t}\n\t\t}`).\n\t\tRun(server, func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {\n\t\t\tjson, _ := gabs.ParseJSONBuffer(r.Body)\n\t\t\tobj := json.Path(\"data\")\n\n\t\t\tassert.Equal(t, http.StatusOK, r.Code)\n\t\t\tassert.Equal(t, \"posts\", obj.Path(\"type\").Data().(string))\n\t\t\tassert.True(t, bson.IsObjectIdHex(obj.Path(\"id\").Data().(string)))\n\t\t\tassert.Equal(t, \"Hello World!\", obj.Path(\"attributes.title\").Data().(string))\n\t\t\tassert.Equal(t, \"Some Text...\", obj.Path(\"attributes.text-body\").Data().(string))\n\t\t})\n\n\tr.GET(\"\/posts\/\" + id).\n\t\tRun(server, func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {\n\t\t\tjson, _ := gabs.ParseJSONBuffer(r.Body)\n\t\t\tobj := json.Path(\"data\")\n\n\t\t\tassert.Equal(t, http.StatusOK, r.Code)\n\t\t\tassert.Equal(t, \"posts\", obj.Path(\"type\").Data().(string))\n\t\t\tassert.True(t, bson.IsObjectIdHex(obj.Path(\"id\").Data().(string)))\n\t\t\tassert.Equal(t, \"Hello World!\", obj.Path(\"attributes.title\").Data().(string))\n\t\t\tassert.Equal(t, \"Some Text...\", obj.Path(\"attributes.text-body\").Data().(string))\n\t\t})\n\n\tr.DELETE(\"\/posts\/\" + id).\n\t\tRun(server, func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {\n\t\t\tassert.Equal(t, http.StatusNoContent, r.Code)\n\t\t\tassert.Equal(t, \"\", r.Body.String())\n\t\t})\n\n\tr.GET(\"\/posts\").\n\t\tRun(server, func(r gofight.HTTPResponse, rq gofight.HTTPRequest) {\n\t\tassert.Equal(t, http.StatusOK, r.Code)\n\t\tassert.Equal(t, `{\"data\":[]}`, r.Body.String())\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/motemen\/ghq\/utils\"\n)\n\n\/\/ A RemoteRepository represents a remote repository.\ntype RemoteRepository interface {\n\t\/\/ The repository URL.\n\tURL() *url.URL\n\t\/\/ Checks if the URL is valid.\n\tIsValid() bool\n\t\/\/ The VCS backend that hosts the repository.\n\tVCS() *VCSBackend\n}\n\n\/\/ A GitHubRepository represents a GitHub repository. Impliments RemoteRepository.\ntype GitHubRepository struct {\n\turl *url.URL\n}\n\nfunc (repo *GitHubRepository) URL() *url.URL {\n\treturn repo.url\n}\n\nfunc (repo *GitHubRepository) IsValid() bool {\n\tif strings.HasPrefix(repo.url.Path, \"\/blog\/\") {\n\t\treturn false\n\t}\n\n\t\/\/ must be \/{user}\/{project}\/?\n\tpathComponents := strings.Split(strings.TrimRight(repo.url.Path, \"\/\"), \"\/\")\n\tif len(pathComponents) != 3 {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (repo *GitHubRepository) VCS() *VCSBackend {\n\treturn GitBackend\n}\n\ntype GoogleCodeRepository struct {\n\turl *url.URL\n}\n\nfunc (repo *GoogleCodeRepository) URL() *url.URL {\n\treturn repo.url\n}\n\nvar validGoogleCodePathPattern = regexp.MustCompile(`^\/p\/[^\/]+\/?$`)\n\nfunc (repo *GoogleCodeRepository) IsValid() bool {\n\treturn validGoogleCodePathPattern.MatchString(repo.url.Path)\n}\n\nfunc (repo *GoogleCodeRepository) VCS() *VCSBackend {\n\tif utils.RunSilently(\"hg\", \"identify\", repo.url.String()) == nil {\n\t\treturn MercurialBackend\n\t} else if utils.RunSilently(\"git\", \"ls-remote\", repo.url.String()) == nil {\n\t\treturn GitBackend\n\t} else {\n\t\treturn nil\n\t}\n}\n\ntype OtherRepository struct {\n\turl *url.URL\n}\n\nfunc (repo *OtherRepository) URL() *url.URL {\n\treturn repo.url\n}\n\nfunc (repo *OtherRepository) IsValid() bool {\n\treturn true\n}\n\nfunc (repo *OtherRepository) VCS() *VCSBackend {\n\tif GitHasFeatureConfigURLMatch() {\n\t\t\/\/ Respect 'ghq.url.https:\/\/ghe.example.com\/.vcs' config variable\n\t\t\/\/ (in gitconfig:)\n\t\t\/\/ [ghq \"https:\/\/ghe.example.com\/\"]\n\t\t\/\/ vcs = github\n\t\tvcs, err := GitConfig(\"--get-urlmatch\", \"ghq.vcs\", repo.URL().String())\n\t\tif err != nil {\n\t\t\tutils.Log(\"error\", err.Error())\n\t\t}\n\n\t\tif vcs == \"git\" || vcs == \"github\" {\n\t\t\treturn GitBackend\n\t\t}\n\n\t\tif vcs == \"svn\" || vcs == \"subversion\" {\n\t\t\treturn SubversionBackend\n\t\t}\n\n\t\tif vcs == \"git svn\" || vcs == \"git-svn\" {\n\t\t\treturn GitsvnBackend\n\t\t}\n\n\t\tif vcs == \"hg\" || vcs == \"mercurial\" {\n\t\t\treturn MercurialBackend\n\t\t}\n\t} else {\n\t\tutils.Log(\"warning\", \"This version of Git does not support `config --get-urlmatch`; per-URL settings are not available\")\n\t}\n\n\t\/\/ Detect VCS backend automatically\n\tif utils.RunSilently(\"hg\", \"identify\", repo.url.String()) == nil {\n\t\treturn MercurialBackend\n\t} else if utils.RunSilently(\"git\", \"ls-remote\", repo.url.String()) == nil {\n\t\treturn GitBackend\n\t} else if utils.RunSilently(\"svn\", \"info\", repo.url.String()) == nil {\n\t\treturn SubversionBackend\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc NewRemoteRepository(url *url.URL) (RemoteRepository, error) {\n\tif url.Host == \"github.com\" {\n\t\treturn &GitHubRepository{url}, nil\n\t}\n\n\tif url.Host == \"code.google.com\" {\n\t\treturn &GoogleCodeRepository{url}, nil\n\t}\n\n\tgheHosts, err := GitConfigAll(\"ghq.ghe.host\")\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to retrieve GH:E hostname from .gitconfig: %s\", err)\n\t}\n\n\tfor _, host := range gheHosts {\n\t\tif url.Host == host {\n\t\t\treturn &GitHubRepository{url}, nil\n\t\t}\n\t}\n\n\treturn &OtherRepository{url}, nil\n}\n<commit_msg>Do not allow \"git svn\" configuration variable for `ghq.<url>.vcs`<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/motemen\/ghq\/utils\"\n)\n\n\/\/ A RemoteRepository represents a remote repository.\ntype RemoteRepository interface {\n\t\/\/ The repository URL.\n\tURL() *url.URL\n\t\/\/ Checks if the URL is valid.\n\tIsValid() bool\n\t\/\/ The VCS backend that hosts the repository.\n\tVCS() *VCSBackend\n}\n\n\/\/ A GitHubRepository represents a GitHub repository. Impliments RemoteRepository.\ntype GitHubRepository struct {\n\turl *url.URL\n}\n\nfunc (repo *GitHubRepository) URL() *url.URL {\n\treturn repo.url\n}\n\nfunc (repo *GitHubRepository) IsValid() bool {\n\tif strings.HasPrefix(repo.url.Path, \"\/blog\/\") {\n\t\treturn false\n\t}\n\n\t\/\/ must be \/{user}\/{project}\/?\n\tpathComponents := strings.Split(strings.TrimRight(repo.url.Path, \"\/\"), \"\/\")\n\tif len(pathComponents) != 3 {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (repo *GitHubRepository) VCS() *VCSBackend {\n\treturn GitBackend\n}\n\ntype GoogleCodeRepository struct {\n\turl *url.URL\n}\n\nfunc (repo *GoogleCodeRepository) URL() *url.URL {\n\treturn repo.url\n}\n\nvar validGoogleCodePathPattern = regexp.MustCompile(`^\/p\/[^\/]+\/?$`)\n\nfunc (repo *GoogleCodeRepository) IsValid() bool {\n\treturn validGoogleCodePathPattern.MatchString(repo.url.Path)\n}\n\nfunc (repo *GoogleCodeRepository) VCS() *VCSBackend {\n\tif utils.RunSilently(\"hg\", \"identify\", repo.url.String()) == nil {\n\t\treturn MercurialBackend\n\t} else if utils.RunSilently(\"git\", \"ls-remote\", repo.url.String()) == nil {\n\t\treturn GitBackend\n\t} else {\n\t\treturn nil\n\t}\n}\n\ntype OtherRepository struct {\n\turl *url.URL\n}\n\nfunc (repo *OtherRepository) URL() *url.URL {\n\treturn repo.url\n}\n\nfunc (repo *OtherRepository) IsValid() bool {\n\treturn true\n}\n\nfunc (repo *OtherRepository) VCS() *VCSBackend {\n\tif GitHasFeatureConfigURLMatch() {\n\t\t\/\/ Respect 'ghq.url.https:\/\/ghe.example.com\/.vcs' config variable\n\t\t\/\/ (in gitconfig:)\n\t\t\/\/ [ghq \"https:\/\/ghe.example.com\/\"]\n\t\t\/\/ vcs = github\n\t\tvcs, err := GitConfig(\"--get-urlmatch\", \"ghq.vcs\", repo.URL().String())\n\t\tif err != nil {\n\t\t\tutils.Log(\"error\", err.Error())\n\t\t}\n\n\t\tif vcs == \"git\" || vcs == \"github\" {\n\t\t\treturn GitBackend\n\t\t}\n\n\t\tif vcs == \"svn\" || vcs == \"subversion\" {\n\t\t\treturn SubversionBackend\n\t\t}\n\n\t\tif vcs == \"git-svn\" {\n\t\t\treturn GitsvnBackend\n\t\t}\n\n\t\tif vcs == \"hg\" || vcs == \"mercurial\" {\n\t\t\treturn MercurialBackend\n\t\t}\n\t} else {\n\t\tutils.Log(\"warning\", \"This version of Git does not support `config --get-urlmatch`; per-URL settings are not available\")\n\t}\n\n\t\/\/ Detect VCS backend automatically\n\tif utils.RunSilently(\"hg\", \"identify\", repo.url.String()) == nil {\n\t\treturn MercurialBackend\n\t} else if utils.RunSilently(\"git\", \"ls-remote\", repo.url.String()) == nil {\n\t\treturn GitBackend\n\t} else if utils.RunSilently(\"svn\", \"info\", repo.url.String()) == nil {\n\t\treturn SubversionBackend\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc NewRemoteRepository(url *url.URL) (RemoteRepository, error) {\n\tif url.Host == \"github.com\" {\n\t\treturn &GitHubRepository{url}, nil\n\t}\n\n\tif url.Host == \"code.google.com\" {\n\t\treturn &GoogleCodeRepository{url}, nil\n\t}\n\n\tgheHosts, err := GitConfigAll(\"ghq.ghe.host\")\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to retrieve GH:E hostname from .gitconfig: %s\", err)\n\t}\n\n\tfor _, host := range gheHosts {\n\t\tif url.Host == host {\n\t\t\treturn &GitHubRepository{url}, nil\n\t\t}\n\t}\n\n\treturn &OtherRepository{url}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/motemen\/ghq\/utils\"\n)\n\n\/\/ A RemoteRepository represents a remote repository.\ntype RemoteRepository interface {\n\t\/\/ The repository URL.\n\tURL() *url.URL\n\t\/\/ Checks if the URL is valid.\n\tIsValid() bool\n\t\/\/ The VCS backend that hosts the repository.\n\tVCS() *VCSBackend\n}\n\n\/\/ A GitHubRepository represents a GitHub repository. Impliments RemoteRepository.\ntype GitHubRepository struct {\n\turl *url.URL\n}\n\nfunc (repo *GitHubRepository) URL() *url.URL {\n\treturn repo.url\n}\n\nfunc (repo *GitHubRepository) IsValid() bool {\n\tif strings.HasPrefix(repo.url.Path, \"\/blog\/\") {\n\t\treturn false\n\t}\n\n\t\/\/ must be \/{user}\/{project}\/?\n\tpathComponents := strings.Split(strings.TrimRight(repo.url.Path, \"\/\"), \"\/\")\n\tif len(pathComponents) != 3 {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (repo *GitHubRepository) VCS() *VCSBackend {\n\treturn GitBackend\n}\n\ntype GoogleCodeRepository struct {\n\turl *url.URL\n}\n\nfunc (repo *GoogleCodeRepository) URL() *url.URL {\n\treturn repo.url\n}\n\nvar validGoogleCodePathPattern = regexp.MustCompile(`^\/p\/[^\/]+\/?$`)\n\nfunc (repo *GoogleCodeRepository) IsValid() bool {\n\treturn validGoogleCodePathPattern.MatchString(repo.url.Path)\n}\n\nfunc (repo *GoogleCodeRepository) VCS() *VCSBackend {\n\tif utils.RunSilently(\"hg\", \"identify\", repo.url.String()) == nil {\n\t\treturn MercurialBackend\n\t} else if utils.RunSilently(\"git\", \"ls-remote\", repo.url.String()) == nil {\n\t\treturn GitBackend\n\t} else {\n\t\treturn nil\n\t}\n}\n\ntype OtherRepository struct {\n\turl *url.URL\n}\n\nfunc (repo *OtherRepository) URL() *url.URL {\n\treturn repo.url\n}\n\nfunc (repo *OtherRepository) IsValid() bool {\n\treturn true\n}\n\nfunc (repo *OtherRepository) VCS() *VCSBackend {\n\tif utils.RunSilently(\"hg\", \"identify\", repo.url.String()) == nil {\n\t\treturn MercurialBackend\n\t} else if utils.RunSilently(\"git\", \"ls-remote\", repo.url.String()) == nil {\n\t\treturn GitBackend\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc NewRemoteRepository(url *url.URL) (RemoteRepository, error) {\n\tif url.Host == \"github.com\" {\n\t\treturn &GitHubRepository{url}, nil\n\t}\n\n\tif url.Host == \"code.google.com\" {\n\t\treturn &GoogleCodeRepository{url}, nil\n\t}\n\n\tgheHosts, err := GitConfigAll(\"ghq.ghe.host\")\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to retrieve GH:E hostname from .gitconfig: %s\", err)\n\t}\n\n\tfor _, host := range gheHosts {\n\t\tif url.Host == host {\n\t\t\treturn &GitHubRepository{url}, nil\n\t\t}\n\t}\n\n\treturn &OtherRepository{url}, nil\n}\n<commit_msg>Use ghq.url.https:\/\/example.com\/.vcs varaible to determine VCS backend<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/motemen\/ghq\/utils\"\n)\n\n\/\/ A RemoteRepository represents a remote repository.\ntype RemoteRepository interface {\n\t\/\/ The repository URL.\n\tURL() *url.URL\n\t\/\/ Checks if the URL is valid.\n\tIsValid() bool\n\t\/\/ The VCS backend that hosts the repository.\n\tVCS() *VCSBackend\n}\n\n\/\/ A GitHubRepository represents a GitHub repository. Impliments RemoteRepository.\ntype GitHubRepository struct {\n\turl *url.URL\n}\n\nfunc (repo *GitHubRepository) URL() *url.URL {\n\treturn repo.url\n}\n\nfunc (repo *GitHubRepository) IsValid() bool {\n\tif strings.HasPrefix(repo.url.Path, \"\/blog\/\") {\n\t\treturn false\n\t}\n\n\t\/\/ must be \/{user}\/{project}\/?\n\tpathComponents := strings.Split(strings.TrimRight(repo.url.Path, \"\/\"), \"\/\")\n\tif len(pathComponents) != 3 {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (repo *GitHubRepository) VCS() *VCSBackend {\n\treturn GitBackend\n}\n\ntype GoogleCodeRepository struct {\n\turl *url.URL\n}\n\nfunc (repo *GoogleCodeRepository) URL() *url.URL {\n\treturn repo.url\n}\n\nvar validGoogleCodePathPattern = regexp.MustCompile(`^\/p\/[^\/]+\/?$`)\n\nfunc (repo *GoogleCodeRepository) IsValid() bool {\n\treturn validGoogleCodePathPattern.MatchString(repo.url.Path)\n}\n\nfunc (repo *GoogleCodeRepository) VCS() *VCSBackend {\n\tif utils.RunSilently(\"hg\", \"identify\", repo.url.String()) == nil {\n\t\treturn MercurialBackend\n\t} else if utils.RunSilently(\"git\", \"ls-remote\", repo.url.String()) == nil {\n\t\treturn GitBackend\n\t} else {\n\t\treturn nil\n\t}\n}\n\ntype OtherRepository struct {\n\turl *url.URL\n}\n\nfunc (repo *OtherRepository) URL() *url.URL {\n\treturn repo.url\n}\n\nfunc (repo *OtherRepository) IsValid() bool {\n\treturn true\n}\n\nfunc (repo *OtherRepository) VCS() *VCSBackend {\n\t\/\/ Respect 'ghq.url.https:\/\/ghe.example.com\/.vcs' config variable\n\t\/\/ (in gitconfig:)\n\t\/\/ [ghq.url \"https:\/\/ghe.example.com\/\"]\n\t\/\/ vcs = github\n\tvcs, err := GitConfigURLMatch(\"ghq.url\", \"vcs\", repo.URL().String())\n\tif err != nil {\n\t\tutils.Log(\"error\", err.Error())\n\t}\n\n\tif vcs == \"git\" || vcs == \"github\" {\n\t\treturn GitBackend\n\t}\n\n\tif vcs == \"hg\" || vcs == \"mercurial\" {\n\t\treturn MercurialBackend\n\t}\n\n\tif utils.RunSilently(\"hg\", \"identify\", repo.url.String()) == nil {\n\t\treturn MercurialBackend\n\t} else if utils.RunSilently(\"git\", \"ls-remote\", repo.url.String()) == nil {\n\t\treturn GitBackend\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc NewRemoteRepository(url *url.URL) (RemoteRepository, error) {\n\tif url.Host == \"github.com\" {\n\t\treturn &GitHubRepository{url}, nil\n\t}\n\n\tif url.Host == \"code.google.com\" {\n\t\treturn &GoogleCodeRepository{url}, nil\n\t}\n\n\tgheHosts, err := GitConfigAll(\"ghq.ghe.host\")\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to retrieve GH:E hostname from .gitconfig: %s\", err)\n\t}\n\n\tfor _, host := range gheHosts {\n\t\tif url.Host == host {\n\t\t\treturn &GitHubRepository{url}, nil\n\t\t}\n\t}\n\n\treturn &OtherRepository{url}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestCreateBlogFolder(t *testing.T) {\n\tt.Parallel()\n\n\tblogFileDir := testDataPath(\"build\", \"test_generate_files\")\n\toutputDir := filepath.Join(createTmpFolder(t), BuildDirName)\n\n\terr := NewBlogBuilder(newTestPostParser(), fakeConfiguration(), blogFileDir).Build(outputDir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err := os.Stat(outputDir); os.IsNotExist(err) {\n\t\tt.Fatalf(\"A folder call 'blog' should be created at %s after build, but not.\", outputDir)\n\t}\n}\n\nfunc TestCleanUpBeforeBuild(t *testing.T) {\n\tt.Parallel()\n\n\tblogFileDir := testDataPath(\"build\", \"test_generate_files\")\n\toutputDir := filepath.Join(createTmpFolder(t), BuildDirName)\n\n\tdeleteme := filepath.Join(outputDir, \"delete_me\")\n\tos.Mkdir(outputDir, os.ModePerm)\n\tos.Create(deleteme)\n\n\terr := NewBlogBuilder(newTestPostParser(), fakeConfiguration(), blogFileDir).Build(outputDir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err := os.Stat(deleteme); !os.IsNotExist(err) {\n\t\tt.Fatalf(\"Exist blog folder should be clean up before build, but not\")\n\t}\n}\n\nfunc TestBuildGeneratePostFiles(t *testing.T) {\n\tt.Parallel()\n\n\ttestDataDir := testDataPath(\"build\", \"test_generate_posts\")\n\toutput := createTmpFolder(t)\n\n\terr := NewBlogBuilder(newTestPostParser(), fakeConfiguration(), testDataDir).Build(output)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tbytes, _ := ioutil.ReadFile(filepath.Join(output, \"test-post\", \"index.html\"))\n\tcontent := string(bytes)\n\n\tif !strings.Contains(content, `<meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">`) {\n\t\tt.Fatalf(\"No base template in post file\")\n\t}\n\n\tif !strings.Contains(content, \"This is test post content\") {\n\t\tt.Fatalf(\"No post in post file\")\n\t}\n}\n\nfunc TestBuildShouldCopyPostFiles(t *testing.T) {\n\tt.Parallel()\n\n\ttestDataDir := testDataPath(\"build\", \"test_generate_posts\")\n\toutput := createTmpFolder(t)\n\n\terr := NewBlogBuilder(newTestPostParser(), fakeConfiguration(), testDataDir).Build(output)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfile := filepath.Join(output, \"test-post\", \"test.txt\")\n\tif _, err := os.Stat(file); os.IsNotExist(err) {\n\t\tt.Errorf(\"File in post folder should be copied to build result\")\n\t}\n\n\tfileInSubDir := filepath.Join(output, \"test-post\", \"test\", \"test.txt\")\n\tif _, err := os.Stat(fileInSubDir); os.IsNotExist(err) {\n\t\tt.Errorf(\"File in sub folder of post folder should be copied to the build result\")\n\t}\n\n\tmarkdown := filepath.Join(output, \"test-post\", \"post.md\")\n\tif _, err := os.Stat(markdown); !os.IsNotExist(err) {\n\t\tt.Errorf(\"Post markdown file should not be copied to the built result\")\n\t}\n\n}\n\nfunc TestBuildBlogIndexPage(t *testing.T) {\n\tt.Parallel()\n\n\ttestDataDir := testDataPath(\"build\", \"test_generate_index\")\n\toutput := createTmpFolder(t)\n\n\terr := NewBlogBuilder(newTestPostParser(), fakeConfiguration(), testDataDir).Build(output)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tindex := filepath.Join(output, \"index.html\")\n\tif _, err := os.Stat(index); os.IsNotExist(err) {\n\t\tt.Fatalf(\"Blog index file should be created at %s, but not\", index)\n\t}\n\n\tbytes, _ := ioutil.ReadFile(index)\n\tcontent := string(bytes)\n\tif !strings.Contains(content, `<meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">`) {\n\t\tt.Errorf(\"Blog index file should be generated with base template, but not\")\n\t}\n\tif !strings.Contains(content, \"<a href=\\\"#\\\">Test Post<\/a>\") {\n\t\tt.Errorf(\"Blog index file should have posts data available, but not\")\n\t}\n\tif !strings.Contains(content, \"GithubAccessToken\") {\n\t\tt.Errorf(\"Blog index file should have configuration data available, but not\")\n\t}\n}\n\nfunc TestAssetsWillBeCopied(t *testing.T) {\n\tt.Parallel()\n\n\ttestDataDir := testDataPath(\"build\", \"test_copy_assets\")\n\toutput := createTmpFolder(t)\n\n\terr := NewBlogBuilder(newTestPostParser(), fakeConfiguration(), testDataDir).Build(output)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpaths := []string{\n\t\tfilepath.Join(output, \"test.txt\"),\n\t\tfilepath.Join(output, \"subdir\", \"test.txt\"),\n\t}\n\tfor _, path := range paths {\n\t\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\t\tt.Fatalf(\"%s should exist in build result but not\", path)\n\t\t}\n\t}\n}\n\nfunc TestGeneratedPostsWillBeSortedByDateInBlogIndex(t *testing.T) {\n\tt.Parallel()\n\n\ttestDataDir := testDataPath(\"build\", \"test_posts_sorted_by_date\")\n\toutput := createTmpFolder(t)\n\n\terr := NewBlogBuilder(newTestPostParser(), fakeConfiguration(), testDataDir).Build(output)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tbytes, _ := ioutil.ReadFile(filepath.Join(output, \"index.html\"))\n\tcontent := string(bytes)\n\n\texpectExcerpt := `\n <li><a href=\"2-second-post\">2-second-post<\/a><\/li>\n\n <li><a href=\"1-first-post\">1-first-post<\/a><\/li>\n`\n\tif !strings.Contains(content, expectExcerpt) {\n\t\tt.Fatalf(\"Posts in blog index page should be sorted by date in descending\")\n\t}\n}\n<commit_msg>remove redundent words<commit_after>package main\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestCreateBlogFolder(t *testing.T) {\n\tt.Parallel()\n\n\tblogFileDir := testDataPath(\"build\", \"test_generate_files\")\n\toutputDir := filepath.Join(createTmpFolder(t), BuildDirName)\n\n\terr := NewBlogBuilder(newTestPostParser(), fakeConfiguration(), blogFileDir).Build(outputDir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err := os.Stat(outputDir); os.IsNotExist(err) {\n\t\tt.Fatalf(\"A folder call 'blog' should be created at %s after build.\", outputDir)\n\t}\n}\n\nfunc TestCleanUpBeforeBuild(t *testing.T) {\n\tt.Parallel()\n\n\tblogFileDir := testDataPath(\"build\", \"test_generate_files\")\n\toutputDir := filepath.Join(createTmpFolder(t), BuildDirName)\n\n\tdeleteme := filepath.Join(outputDir, \"delete_me\")\n\tos.Mkdir(outputDir, os.ModePerm)\n\tos.Create(deleteme)\n\n\terr := NewBlogBuilder(newTestPostParser(), fakeConfiguration(), blogFileDir).Build(outputDir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err := os.Stat(deleteme); !os.IsNotExist(err) {\n\t\tt.Fatalf(\"Exist blog folder should be clean up before build\")\n\t}\n}\n\nfunc TestBuildGeneratePostFiles(t *testing.T) {\n\tt.Parallel()\n\n\ttestDataDir := testDataPath(\"build\", \"test_generate_posts\")\n\toutput := createTmpFolder(t)\n\n\terr := NewBlogBuilder(newTestPostParser(), fakeConfiguration(), testDataDir).Build(output)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tbytes, _ := ioutil.ReadFile(filepath.Join(output, \"test-post\", \"index.html\"))\n\tcontent := string(bytes)\n\n\tif !strings.Contains(content, `<meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">`) {\n\t\tt.Fatalf(\"No base template in post file\")\n\t}\n\n\tif !strings.Contains(content, \"This is test post content\") {\n\t\tt.Fatalf(\"No post in post file\")\n\t}\n}\n\nfunc TestBuildShouldCopyPostFiles(t *testing.T) {\n\tt.Parallel()\n\n\ttestDataDir := testDataPath(\"build\", \"test_generate_posts\")\n\toutput := createTmpFolder(t)\n\n\terr := NewBlogBuilder(newTestPostParser(), fakeConfiguration(), testDataDir).Build(output)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfile := filepath.Join(output, \"test-post\", \"test.txt\")\n\tif _, err := os.Stat(file); os.IsNotExist(err) {\n\t\tt.Errorf(\"File in post folder should be copied to build result\")\n\t}\n\n\tfileInSubDir := filepath.Join(output, \"test-post\", \"test\", \"test.txt\")\n\tif _, err := os.Stat(fileInSubDir); os.IsNotExist(err) {\n\t\tt.Errorf(\"File in sub folder of post folder should be copied to the build result\")\n\t}\n\n\tmarkdown := filepath.Join(output, \"test-post\", \"post.md\")\n\tif _, err := os.Stat(markdown); !os.IsNotExist(err) {\n\t\tt.Errorf(\"Post markdown file should not be copied to the built result\")\n\t}\n\n}\n\nfunc TestBuildBlogIndexPage(t *testing.T) {\n\tt.Parallel()\n\n\ttestDataDir := testDataPath(\"build\", \"test_generate_index\")\n\toutput := createTmpFolder(t)\n\n\terr := NewBlogBuilder(newTestPostParser(), fakeConfiguration(), testDataDir).Build(output)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tindex := filepath.Join(output, \"index.html\")\n\tif _, err := os.Stat(index); os.IsNotExist(err) {\n\t\tt.Fatalf(\"Blog index file should be created at %s\", index)\n\t}\n\n\tbytes, _ := ioutil.ReadFile(index)\n\tcontent := string(bytes)\n\tif !strings.Contains(content, `<meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">`) {\n\t\tt.Errorf(\"Blog index file should be generated with base template\")\n\t}\n\tif !strings.Contains(content, \"<a href=\\\"#\\\">Test Post<\/a>\") {\n\t\tt.Errorf(\"Blog index file should have posts data available\")\n\t}\n\tif !strings.Contains(content, \"GithubAccessToken\") {\n\t\tt.Errorf(\"Blog index file should have configuration data available\")\n\t}\n}\n\nfunc TestAssetsWillBeCopied(t *testing.T) {\n\tt.Parallel()\n\n\ttestDataDir := testDataPath(\"build\", \"test_copy_assets\")\n\toutput := createTmpFolder(t)\n\n\terr := NewBlogBuilder(newTestPostParser(), fakeConfiguration(), testDataDir).Build(output)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpaths := []string{\n\t\tfilepath.Join(output, \"test.txt\"),\n\t\tfilepath.Join(output, \"subdir\", \"test.txt\"),\n\t}\n\tfor _, path := range paths {\n\t\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\t\tt.Fatalf(\"%s should exist in build result but not\", path)\n\t\t}\n\t}\n}\n\nfunc TestGeneratedPostsWillBeSortedByDateInBlogIndex(t *testing.T) {\n\tt.Parallel()\n\n\ttestDataDir := testDataPath(\"build\", \"test_posts_sorted_by_date\")\n\toutput := createTmpFolder(t)\n\n\terr := NewBlogBuilder(newTestPostParser(), fakeConfiguration(), testDataDir).Build(output)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tbytes, _ := ioutil.ReadFile(filepath.Join(output, \"index.html\"))\n\tcontent := string(bytes)\n\n\texpectExcerpt := `\n <li><a href=\"2-second-post\">2-second-post<\/a><\/li>\n\n <li><a href=\"1-first-post\">1-first-post<\/a><\/li>\n`\n\tif !strings.Contains(content, expectExcerpt) {\n\t\tt.Fatalf(\"Posts in blog index page should be sorted by date in descending\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/motemen\/ghq\/utils\"\n)\n\n\/\/ A RemoteRepository represents a remote repository.\ntype RemoteRepository interface {\n\t\/\/ The repository URL.\n\tURL() *url.URL\n\t\/\/ Checks if the URL is valid.\n\tIsValid() bool\n\t\/\/ The VCS backend that hosts the repository.\n\tVCS() *VCSBackend\n}\n\n\/\/ A GitHubRepository represents a GitHub repository. Impliments RemoteRepository.\ntype GitHubRepository struct {\n\turl *url.URL\n}\n\nfunc (repo *GitHubRepository) URL() *url.URL {\n\treturn repo.url\n}\n\nfunc (repo *GitHubRepository) IsValid() bool {\n\tif strings.HasPrefix(repo.url.Path, \"\/blog\/\") {\n\t\treturn false\n\t}\n\n\t\/\/ must be \/{user}\/{project}\/?\n\tpathComponents := strings.Split(strings.TrimRight(repo.url.Path, \"\/\"), \"\/\")\n\tif len(pathComponents) != 3 {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (repo *GitHubRepository) VCS() *VCSBackend {\n\treturn GitBackend\n}\n\ntype GoogleCodeRepository struct {\n\turl *url.URL\n}\n\nfunc (repo *GoogleCodeRepository) URL() *url.URL {\n\treturn repo.url\n}\n\nvar validGoogleCodePathPattern = regexp.MustCompile(`^\/p\/[^\/]+\/?$`)\n\nfunc (repo *GoogleCodeRepository) IsValid() bool {\n\treturn validGoogleCodePathPattern.MatchString(repo.url.Path)\n}\n\nfunc (repo *GoogleCodeRepository) VCS() *VCSBackend {\n\tif utils.RunSilently(\"hg\", \"identify\", repo.url.String()) == nil {\n\t\treturn MercurialBackend\n\t} else if utils.RunSilently(\"git\", \"ls-remote\", repo.url.String()) == nil {\n\t\treturn GitBackend\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc NewRemoteRepository(url *url.URL) (RemoteRepository, error) {\n\tif url.Host == \"github.com\" {\n\t\treturn &GitHubRepository{url}, nil\n\t}\n\n\tif url.Host == \"code.google.com\" {\n\t\treturn &GoogleCodeRepository{url}, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"unsupported host: %s\", url.Host)\n}\n<commit_msg>Regard repositories on GH:E as ones on GitHub.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/motemen\/ghq\/utils\"\n)\n\n\/\/ A RemoteRepository represents a remote repository.\ntype RemoteRepository interface {\n\t\/\/ The repository URL.\n\tURL() *url.URL\n\t\/\/ Checks if the URL is valid.\n\tIsValid() bool\n\t\/\/ The VCS backend that hosts the repository.\n\tVCS() *VCSBackend\n}\n\n\/\/ A GitHubRepository represents a GitHub repository. Impliments RemoteRepository.\ntype GitHubRepository struct {\n\turl *url.URL\n}\n\nfunc (repo *GitHubRepository) URL() *url.URL {\n\treturn repo.url\n}\n\nfunc (repo *GitHubRepository) IsValid() bool {\n\tif strings.HasPrefix(repo.url.Path, \"\/blog\/\") {\n\t\treturn false\n\t}\n\n\t\/\/ must be \/{user}\/{project}\/?\n\tpathComponents := strings.Split(strings.TrimRight(repo.url.Path, \"\/\"), \"\/\")\n\tif len(pathComponents) != 3 {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (repo *GitHubRepository) VCS() *VCSBackend {\n\treturn GitBackend\n}\n\ntype GoogleCodeRepository struct {\n\turl *url.URL\n}\n\nfunc (repo *GoogleCodeRepository) URL() *url.URL {\n\treturn repo.url\n}\n\nvar validGoogleCodePathPattern = regexp.MustCompile(`^\/p\/[^\/]+\/?$`)\n\nfunc (repo *GoogleCodeRepository) IsValid() bool {\n\treturn validGoogleCodePathPattern.MatchString(repo.url.Path)\n}\n\nfunc (repo *GoogleCodeRepository) VCS() *VCSBackend {\n\tif utils.RunSilently(\"hg\", \"identify\", repo.url.String()) == nil {\n\t\treturn MercurialBackend\n\t} else if utils.RunSilently(\"git\", \"ls-remote\", repo.url.String()) == nil {\n\t\treturn GitBackend\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc NewRemoteRepository(url *url.URL) (RemoteRepository, error) {\n\tif url.Host == \"github.com\" {\n\t\treturn &GitHubRepository{url}, nil\n\t}\n\n\tif url.Host == \"code.google.com\" {\n\t\treturn &GoogleCodeRepository{url}, nil\n\t}\n\n\tgheHost, err := GitConfig(\"ghq.ghe.host\")\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"An error occurred while attempting to retrieve GH:E hostname from .gitconfig: %s\", err)\n\t}\n\n\tif url.Host == gheHost {\n\t\treturn &GitHubRepository{url}, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"unsupported host: %s\", url.Host)\n}\n<|endoftext|>"} {"text":"<commit_before>package replacer\n\nimport (\n\t\"bytes\"\n\n\t\"github.com\/json-iterator\/go\"\n)\n\nvar json = jsoniter.ConfigFastest\n\ntype domainConfig struct {\n\tapiReplaces []replace\n\tapiOfficialLongpollReplace replace\n\tapiLongpollReplace replace\n\tsiteHlsReplace replace\n}\n\ntype Replacer struct {\n\tdomains map[string]*domainConfig\n}\n\ntype ReplaceContext struct {\n\t\/\/ vk-proxy domain name\n\tBaseDomain string\n\n\tDomain string\n\tPath string\n\n\tFilterFeed bool\n}\n\nfunc (r *Replacer) getDomainConfig(domain string) *domainConfig {\n\tcfg, ok := r.domains[domain]\n\tif !ok {\n\t\tvar replacementStart = []byte(`\\\/\\\/` + domain + `\\\/_\\\/`)\n\t\tvar jsonUrlPrefix = []byte(`\"https:`)\n\t\tvar mVkCom = []byte(`m.vk.com`)\n\t\tcfg = &domainConfig{}\n\t\tcfg.apiReplaces = []replace{\n\t\t\tnewStringReplace(`\"https:\\\/\\\/vk.com\\\/video_hls.php`, `\"https:\\\/\\\/`+domain+`\\\/@vk.com\\\/video_hls.php`),\n\t\t\tnewRegexFastReplace(`\\\\\/\\\\\/[-_a-zA-Z0-9]{1,15}\\.(?:userapi\\.com|vk-cdn\\.net|vk\\.(?:me|com)|vkuser(?:live|video|audio)\\.(?:net|com))\\\\\/`,\n\t\t\t\tfunc(src, dst []byte, start, end int) []byte {\n\t\t\t\t\t\/\/ check if url has valid prefix (like in regexp backreference)\n\t\t\t\t\tif start < 7 || !bytes.Equal(src[start-7:start], jsonUrlPrefix) {\n\t\t\t\t\t\tgoto cancel\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ do not proxy m.vk.com domain (bugged articles)\n\t\t\t\t\tif bytes.Equal(src[start+4:end-2], mVkCom) {\n\t\t\t\t\t\tgoto cancel\n\t\t\t\t\t}\n\t\t\t\t\tdst = append(dst, replacementStart...)\n\t\t\t\t\tdst = append(dst, src[start+4:end]...)\n\t\t\t\t\treturn dst\n\t\t\t\tcancel:\n\t\t\t\t\treturn append(dst, src[start:end]...)\n\t\t\t\t}),\n\t\t\tnewRegexReplace(`\"https:\\\\\/\\\\\/vk\\.com\\\\\/((?:\\\\\/)?images\\\\\/|doc-?[0-9]+_)`, `\"https:\\\/\\\/`+domain+`\\\/_\\\/vk.com\\\/$1`),\n\t\t}\n\t\tcfg.apiOfficialLongpollReplace = newStringReplace(`\"server\":\"api.vk.com\\\/newuim`, `\"server\":\"`+domain+`\\\/_\\\/api.vk.com\\\/newuim`)\n\t\tcfg.apiLongpollReplace = newStringReplace(`\"server\":\"`, `\"server\":\"`+domain+`\\\/_\\\/`)\n\t\tcfg.siteHlsReplace = newRegexReplace(`https:\\\/\\\/([-_a-zA-Z0-9]+\\.(?:userapi\\.com|vk-cdn\\.net|vk\\.me|vkuser(?:live|video)\\.(?:net|com)))\\\/`, `https:\/\/`+domain+`\/_\/$1\/`)\n\t\tif r.domains == nil {\n\t\t\tr.domains = make(map[string]*domainConfig)\n\t\t}\n\t\tr.domains[domain] = cfg\n\t}\n\treturn cfg\n}\n\nfunc (r *Replacer) DoReplace(body []byte, ctx ReplaceContext) []byte {\n\tconfig := r.getDomainConfig(ctx.BaseDomain)\n\n\tif ctx.Domain == \"vk.com\" {\n\t\tif ctx.Path == \"\/video_hls.php\" {\n\t\t\tbody = config.siteHlsReplace.apply(body)\n\t\t}\n\t} else {\n\t\tfor _, replace := range config.apiReplaces {\n\t\t\tbody = replace.apply(body)\n\t\t}\n\n\t\t\/\/ Replace longpoll server\n\t\tif ctx.Path == \"\/method\/messages.getLongPollServer\" {\n\t\t\tbody = config.apiLongpollReplace.apply(body)\n\t\t} else\n\n\t\t\/\/ Replace longpoll server for official app\n\t\tif ctx.Path == \"\/method\/execute\" ||\n\t\t\tctx.Path == \"\/method\/execute.imGetLongPollHistoryExtended\" ||\n\t\t\tctx.Path == \"\/method\/execute.imLpInit\" {\n\t\t\tbody = config.apiOfficialLongpollReplace.apply(body)\n\t\t}\n\n\t\t\/\/ Clear feed from SPAM\n\t\tif ctx.FilterFeed {\n\t\t\tif ctx.Path == \"\/method\/execute.getNewsfeedSmart\" ||\n\t\t\t\tctx.Path == \"\/method\/newsfeed.get\" {\n\t\t\t\tvar parsed map[string]interface{}\n\t\t\t\tif err := json.Unmarshal(body, &parsed); err == nil {\n\t\t\t\t\tif parsed[\"response\"] != nil {\n\t\t\t\t\t\tresponse := parsed[\"response\"].(map[string]interface{})\n\t\t\t\t\t\tif response[\"items\"] != nil {\n\t\t\t\t\t\t\tnewItems, modified := filterFeed(response[\"items\"].([]interface{}))\n\t\t\t\t\t\t\tif modified {\n\t\t\t\t\t\t\t\tresponse[\"items\"] = newItems\n\t\t\t\t\t\t\t\tbody, err = json.Marshal(parsed)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn body\n}\n\nfunc filterFeed(items []interface{}) ([]interface{}, bool) {\n\tremoved := 0\n\tfor i := len(items) - 1; i >= 0; i-- {\n\t\tpost := items[i].(map[string]interface{})\n\t\tif post[\"type\"] == \"ads\" || (post[\"type\"] == \"post\" && post[\"marked_as_ads\"] != nil && post[\"marked_as_ads\"].(float64) == 1) {\n\t\t\titems[i] = items[len(items)-1]\n\t\t\titems[len(items)-1] = nil\n\t\t\titems = items[:len(items)-1]\n\t\t\tremoved++\n\t\t}\n\t}\n\tif removed > 0 {\n\t\tnewItems := make([]interface{}, len(items))\n\t\tcopy(newItems, items)\n\t\treturn newItems, true\n\t}\n\treturn nil, false\n}\n<commit_msg>Новый адрес для стикеров<commit_after>package replacer\n\nimport (\n\t\"bytes\"\n\n\t\"github.com\/json-iterator\/go\"\n)\n\nvar json = jsoniter.ConfigFastest\n\ntype domainConfig struct {\n\tapiReplaces []replace\n\tapiOfficialLongpollReplace replace\n\tapiLongpollReplace replace\n\tsiteHlsReplace replace\n}\n\ntype Replacer struct {\n\tdomains map[string]*domainConfig\n}\n\ntype ReplaceContext struct {\n\t\/\/ vk-proxy domain name\n\tBaseDomain string\n\n\tDomain string\n\tPath string\n\n\tFilterFeed bool\n}\n\nfunc (r *Replacer) getDomainConfig(domain string) *domainConfig {\n\tcfg, ok := r.domains[domain]\n\tif !ok {\n\t\tvar replacementStart = []byte(`\\\/\\\/` + domain + `\\\/_\\\/`)\n\t\tvar jsonUrlPrefix = []byte(`\"https:`)\n\t\tvar mVkCom = []byte(`m.vk.com`)\n\t\tcfg = &domainConfig{}\n\t\tcfg.apiReplaces = []replace{\n\t\t\tnewStringReplace(`\"https:\\\/\\\/vk.com\\\/video_hls.php`, `\"https:\\\/\\\/`+domain+`\\\/@vk.com\\\/video_hls.php`),\n\t\t\tnewRegexFastReplace(`\\\\\/\\\\\/[-_a-zA-Z0-9]{1,15}\\.(?:userapi\\.com|vk-cdn\\.net|vk\\.(?:me|com)|vkuser(?:live|video|audio)\\.(?:net|com))\\\\\/`,\n\t\t\t\tfunc(src, dst []byte, start, end int) []byte {\n\t\t\t\t\t\/\/ check if url has valid prefix (like in regexp backreference)\n\t\t\t\t\tif start < 7 || !bytes.Equal(src[start-7:start], jsonUrlPrefix) {\n\t\t\t\t\t\tgoto cancel\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ do not proxy m.vk.com domain (bugged articles)\n\t\t\t\t\tif bytes.Equal(src[start+4:end-2], mVkCom) {\n\t\t\t\t\t\tgoto cancel\n\t\t\t\t\t}\n\t\t\t\t\tdst = append(dst, replacementStart...)\n\t\t\t\t\tdst = append(dst, src[start+4:end]...)\n\t\t\t\t\treturn dst\n\t\t\t\tcancel:\n\t\t\t\t\treturn append(dst, src[start:end]...)\n\t\t\t\t}),\n\t\t\tnewRegexReplace(`\"https:\\\\\/\\\\\/vk\\.com\\\\\/((?:\\\\\/)?images\\\\\/|stickers_|doc-?[0-9]+_)`, `\"https:\\\/\\\/`+domain+`\\\/_\\\/vk.com\\\/$1`),\n\t\t}\n\t\tcfg.apiOfficialLongpollReplace = newStringReplace(`\"server\":\"api.vk.com\\\/newuim`, `\"server\":\"`+domain+`\\\/_\\\/api.vk.com\\\/newuim`)\n\t\tcfg.apiLongpollReplace = newStringReplace(`\"server\":\"`, `\"server\":\"`+domain+`\\\/_\\\/`)\n\t\tcfg.siteHlsReplace = newRegexReplace(`https:\\\/\\\/([-_a-zA-Z0-9]+\\.(?:userapi\\.com|vk-cdn\\.net|vk\\.me|vkuser(?:live|video)\\.(?:net|com)))\\\/`, `https:\/\/`+domain+`\/_\/$1\/`)\n\t\tif r.domains == nil {\n\t\t\tr.domains = make(map[string]*domainConfig)\n\t\t}\n\t\tr.domains[domain] = cfg\n\t}\n\treturn cfg\n}\n\nfunc (r *Replacer) DoReplace(body []byte, ctx ReplaceContext) []byte {\n\tconfig := r.getDomainConfig(ctx.BaseDomain)\n\n\tif ctx.Domain == \"vk.com\" {\n\t\tif ctx.Path == \"\/video_hls.php\" {\n\t\t\tbody = config.siteHlsReplace.apply(body)\n\t\t}\n\t} else {\n\t\tfor _, replace := range config.apiReplaces {\n\t\t\tbody = replace.apply(body)\n\t\t}\n\n\t\t\/\/ Replace longpoll server\n\t\tif ctx.Path == \"\/method\/messages.getLongPollServer\" {\n\t\t\tbody = config.apiLongpollReplace.apply(body)\n\t\t} else\n\n\t\t\/\/ Replace longpoll server for official app\n\t\tif ctx.Path == \"\/method\/execute\" ||\n\t\t\tctx.Path == \"\/method\/execute.imGetLongPollHistoryExtended\" ||\n\t\t\tctx.Path == \"\/method\/execute.imLpInit\" {\n\t\t\tbody = config.apiOfficialLongpollReplace.apply(body)\n\t\t}\n\n\t\t\/\/ Clear feed from SPAM\n\t\tif ctx.FilterFeed {\n\t\t\tif ctx.Path == \"\/method\/execute.getNewsfeedSmart\" ||\n\t\t\t\tctx.Path == \"\/method\/newsfeed.get\" {\n\t\t\t\tvar parsed map[string]interface{}\n\t\t\t\tif err := json.Unmarshal(body, &parsed); err == nil {\n\t\t\t\t\tif parsed[\"response\"] != nil {\n\t\t\t\t\t\tresponse := parsed[\"response\"].(map[string]interface{})\n\t\t\t\t\t\tif response[\"items\"] != nil {\n\t\t\t\t\t\t\tnewItems, modified := filterFeed(response[\"items\"].([]interface{}))\n\t\t\t\t\t\t\tif modified {\n\t\t\t\t\t\t\t\tresponse[\"items\"] = newItems\n\t\t\t\t\t\t\t\tbody, err = json.Marshal(parsed)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn body\n}\n\nfunc filterFeed(items []interface{}) ([]interface{}, bool) {\n\tremoved := 0\n\tfor i := len(items) - 1; i >= 0; i-- {\n\t\tpost := items[i].(map[string]interface{})\n\t\tif post[\"type\"] == \"ads\" || (post[\"type\"] == \"post\" && post[\"marked_as_ads\"] != nil && post[\"marked_as_ads\"].(float64) == 1) {\n\t\t\titems[i] = items[len(items)-1]\n\t\t\titems[len(items)-1] = nil\n\t\t\titems = items[:len(items)-1]\n\t\t\tremoved++\n\t\t}\n\t}\n\tif removed > 0 {\n\t\tnewItems := make([]interface{}, len(items))\n\t\tcopy(newItems, items)\n\t\treturn newItems, true\n\t}\n\treturn nil, false\n}\n<|endoftext|>"} {"text":"<commit_before>package repofile\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\n\t\"github.com\/arteev\/logger\"\n)\n\n\/\/RepositoryFile - current file of repository\nvar (\n\trepositoryFile = \"repository.sqlite\"\n\tisDefaultRepo = true\n)\n\nfunc init() {\n\tsearchLocation()\n}\n\nfunc searchLocation() {\n\t\/\/workdir\n\tif _, err := os.Stat(repositoryFile); err == nil {\n\t\treturn\n\t}\n\tvar cfgLocation string\n\t\/\/appdata | ~\/.config\n\tif u, err := user.Current(); err == nil {\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tcfgLocation = filepath.Join(u.HomeDir, os.Getenv(\"APPDATA\"), \"dsql\", repositoryFile)\n\t\t} else {\n\t\t\tcfgLocation = filepath.Join(u.HomeDir, \".config\", \"dsql\", repositoryFile)\n\t\t}\n\t\tif _, err := os.Stat(cfgLocation); err == nil {\n\t\t\trepositoryFile = cfgLocation\n return\n\t\t}\n \n\t}\n\t\/\/folder dsql\n\tabsPath, _ := filepath.Abs(path.Dir(os.Args[0]))\n\tinAppLocation := path.Join(absPath, repositoryFile)\n\tif _, err := os.Stat(inAppLocation); err == nil {\n\t\trepositoryFile = inAppLocation\n\t\treturn\n\t}\n\tif cfgLocation != \"\" {\n\t\trepositoryFile = cfgLocation\n\t}\n}\n\n\/\/SetRepositoryFile - set new location repository file\nfunc SetRepositoryFile(filename string) {\n\tif !isDefaultRepo {\n\t\tpanic(fmt.Errorf(\"can't twice change repository file \"))\n\t}\n\tif filename != \"\" {\n\t\tisDefaultRepo = false\n\t\trepositoryFile = filename\n\t}\n}\n\n\/\/GetRepositoryFile - get current location repository file\nfunc GetRepositoryFile() string {\n\treturn repositoryFile\n}\n\n\/\/IsDefault returns location repository file is default\nfunc IsDefault() bool {\n\treturn isDefaultRepo\n}\n\n\/\/PrepareLocation - make directories for repository files\nfunc PrepareLocation() {\n\tdir := path.Dir(repositoryFile)\n\tif dir == \"\" {\n\t\treturn\n\t}\n\tperm := 0700\n\tif err := os.MkdirAll(dir, os.FileMode(perm)); err != nil {\n\t\tlogger.Error.Println(err)\n\t}\n}\n<commit_msg>repo file: fix appdata<commit_after>package repofile\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\n\t\"github.com\/arteev\/logger\"\n \n)\n\n\/\/RepositoryFile - current file of repository\nvar (\n\trepositoryFile = \"repository.sqlite\"\n\tisDefaultRepo = true\n)\n\nfunc init() {\n\tsearchLocation()\n}\n\nfunc searchLocation() {\n\t\/\/workdir\n\tif _, err := os.Stat(repositoryFile); err == nil {\n\t\treturn\n\t}\n\tvar cfgLocation string\n\t\/\/appdata | ~\/.config\n\tif u, err := user.Current(); err == nil {\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tcfgLocation = filepath.Join(u.HomeDir, os.Getenv(\"APPDATA\"), \"dsql\", repositoryFile)\n\t\t} else {\n\t\t\tcfgLocation = filepath.Join(u.HomeDir, \".config\", \"dsql\", repositoryFile)\n\t\t}\n\t\tif _, err := os.Stat(cfgLocation); err == nil {\n\t\t\trepositoryFile = cfgLocation\n return\n\t\t}\n \n\t}\n\t\/\/folder dsql\n\tabsPath, _ := filepath.Abs(path.Dir(os.Args[0]))\n\tinAppLocation := path.Join(absPath, repositoryFile)\n\tif _, err := os.Stat(inAppLocation); err == nil {\n\t\trepositoryFile = inAppLocation\n\t\treturn\n\t}\n\tif cfgLocation != \"\" {\n\t\trepositoryFile = cfgLocation\n\t}\n}\n\n\/\/SetRepositoryFile - set new location repository file\nfunc SetRepositoryFile(filename string) {\n\tif !isDefaultRepo {\n\t\tpanic(fmt.Errorf(\"can't twice change repository file \"))\n\t}\n\tif filename != \"\" {\n\t\tisDefaultRepo = false\n\t\trepositoryFile = filename\n\t}\n}\n\n\/\/GetRepositoryFile - get current location repository file\nfunc GetRepositoryFile() string {\n\treturn repositoryFile\n}\n\n\/\/IsDefault returns location repository file is default\nfunc IsDefault() bool {\n\treturn isDefaultRepo\n}\n\n\/\/PrepareLocation - make directories for repository files\nfunc PrepareLocation() {\n\tdir := filepath.Dir(repositoryFile) \n if dir == \"\" || dir == \".\" {\n\t\treturn\n\t} \n\tperm := 0700\n \n\tif err := os.MkdirAll(dir, os.FileMode(perm)); err != nil {\n\t\tlogger.Error.Println(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build istio\n\n\/*\n * Copyright (C) 2018 IBM, Inc.\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\n *\/\n\npackage tests\n\nimport (\n\t\"github.com\/skydive-project\/skydive\/topology\/probes\/istio\"\n\t\"testing\"\n)\n\nconst (\n\tbookinfo = \"https:\/\/raw.githubusercontent.com\/istio\/istio\/release-1.0\/samples\/bookinfo\"\n)\n\n\/* -- test creation of single resource -- *\/\nfunc TestIstioDestinationRuleNode(t *testing.T) {\n\ttestNodeCreationFromConfig(t, istio.Manager, \"destinationrule\", objName+\"-destinationrule\")\n}\n\nfunc TestIstioGatewayNode(t *testing.T) {\n\ttestNodeCreationFromConfig(t, istio.Manager, \"gateway\", objName+\"-gateway\")\n}\n\nfunc TestIstioServiceEntryNode(t *testing.T) {\n\ttestNodeCreationFromConfig(t, istio.Manager, \"serviceentry\", objName+\"-serviceentry\")\n}\n\nfunc TestIstioQuotaSpecNode(t *testing.T) {\n\ttestNodeCreationFromConfig(t, istio.Manager, \"quotaspec\", objName+\"-quotaspec\")\n}\n\nfunc TestIstioQuotaSpecBindingNode(t *testing.T) {\n\ttestNodeCreationFromConfig(t, istio.Manager, \"quotaspecbinding\", objName+\"-quotaspecbinding\")\n}\n\nfunc TestIstioVirtualServiceNode(t *testing.T) {\n\ttestNodeCreationFromConfig(t, istio.Manager, \"virtualservice\", objName+\"-virtualservice\")\n}\n\nfunc TestBookInfoScenario(t *testing.T) {\n\ttestRunner(\n\t\tt,\n\t\t[]Cmd{\n\t\t\t{\"kubectl apply -f \" + bookinfo + \"\/networking\/destination-rule-all.yaml\", true},\n\t\t\t{\"kubectl apply -f \" + bookinfo + \"\/networking\/bookinfo-gateway.yaml\", true},\n\t\t},\n\t\t[]Cmd{\n\t\t\t{\"kubectl apply -f \" + bookinfo + \"\/platform\/kube\/cleanup.sh\", false},\n\t\t},\n\t\t[]CheckFunction{\n\t\t\tfunc(c *CheckContext) error {\n\t\t\t\t\/\/ check nodes exist\n\t\t\t\t_, err := checkNodeCreation(t, c, istio.Manager, \"destinationrule\", \"Name\", \"details\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t_, err = checkNodeCreation(t, c, istio.Manager, \"destinationrule\", \"Name\", \"productpage\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t_, err = checkNodeCreation(t, c, istio.Manager, \"destinationrule\", \"Name\", \"ratings\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t_, err = checkNodeCreation(t, c, istio.Manager, \"destinationrule\", \"Name\", \"reviews\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t_, err = checkNodeCreation(t, c, istio.Manager, \"gateway\", \"Name\", \"bookinfo-gateway\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t)\n}\n<commit_msg>istio: extend bookinfo scenario<commit_after>\/\/ +build istio\n\n\/*\n * Copyright (C) 2018 IBM, Inc.\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\n *\/\n\npackage tests\n\nimport (\n\t\"github.com\/skydive-project\/skydive\/topology\/probes\/istio\"\n\t\"testing\"\n)\n\nconst (\n\tbookinfo = \"https:\/\/raw.githubusercontent.com\/istio\/istio\/release-1.0\/samples\/bookinfo\"\n)\n\n\/* -- test creation of single resource -- *\/\nfunc TestIstioDestinationRuleNode(t *testing.T) {\n\ttestNodeCreationFromConfig(t, istio.Manager, \"destinationrule\", objName+\"-destinationrule\")\n}\n\nfunc TestIstioGatewayNode(t *testing.T) {\n\ttestNodeCreationFromConfig(t, istio.Manager, \"gateway\", objName+\"-gateway\")\n}\n\nfunc TestIstioServiceEntryNode(t *testing.T) {\n\ttestNodeCreationFromConfig(t, istio.Manager, \"serviceentry\", objName+\"-serviceentry\")\n}\n\nfunc TestIstioQuotaSpecNode(t *testing.T) {\n\ttestNodeCreationFromConfig(t, istio.Manager, \"quotaspec\", objName+\"-quotaspec\")\n}\n\nfunc TestIstioQuotaSpecBindingNode(t *testing.T) {\n\ttestNodeCreationFromConfig(t, istio.Manager, \"quotaspecbinding\", objName+\"-quotaspecbinding\")\n}\n\nfunc TestIstioVirtualServiceNode(t *testing.T) {\n\ttestNodeCreationFromConfig(t, istio.Manager, \"virtualservice\", objName+\"-virtualservice\")\n}\n\nfunc TestBookInfoScenario(t *testing.T) {\n\ttestRunner(\n\t\tt,\n\t\t[]Cmd{\n\t\t\t{\"kubectl apply -f \" + bookinfo + \"\/networking\/destination-rule-all.yaml\", true},\n\t\t\t{\"kubectl apply -f \" + bookinfo + \"\/networking\/bookinfo-gateway.yaml\", true},\n\t\t\t{\"kubectl apply -f \" + bookinfo + \"\/platform\/kube\/bookinfo.yaml\", true},\n\t\t},\n\t\t[]Cmd{\n\t\t\t{\"kubectl apply -f \" + bookinfo + \"\/platform\/kube\/cleanup.sh\", false},\n\t\t},\n\t\t[]CheckFunction{\n\t\t\tfunc(c *CheckContext) error {\n\t\t\t\t\/\/ check nodes exist\n\t\t\t\t_, err := checkNodeCreation(t, c, istio.Manager, \"destinationrule\", \"Name\", \"details\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t_, err = checkNodeCreation(t, c, istio.Manager, \"destinationrule\", \"Name\", \"productpage\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t_, err = checkNodeCreation(t, c, istio.Manager, \"destinationrule\", \"Name\", \"ratings\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t_, err = checkNodeCreation(t, c, istio.Manager, \"destinationrule\", \"Name\", \"reviews\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t_, err = checkNodeCreation(t, c, istio.Manager, \"gateway\", \"Name\", \"bookinfo-gateway\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t_, err = checkNodeCreation(t, c, istio.Manager, \"virtualservice\", \"Name\", \"bookinfo\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build istio\n\n\/*\n * Copyright (C) 2018 IBM, Inc.\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\n *\/\n\npackage tests\n\nimport (\n\t\"github.com\/skydive-project\/skydive\/topology\/probes\/istio\"\n\t\"testing\"\n)\n\nconst (\n\tbookinfo = \"https:\/\/raw.githubusercontent.com\/istio\/istio\/release-1.0\/samples\/bookinfo\"\n)\n\n\/* -- test creation of single resource -- *\/\nfunc TestIstioClusterNode(t *testing.T) {\n\ttestNodeCreation(t, nil, nil, istio.Manager, \"cluster\", istio.ClusterName)\n}\n\nfunc TestIstioDestinationRuleNode(t *testing.T) {\n\ttestNodeCreationFromConfig(t, istio.Manager, \"destinationrule\", objName+\"-destinationrule\")\n}\n\nfunc TestBookInfoScenario(t *testing.T) {\n\ttestRunner(\n\t\tt,\n\t\t[]Cmd{\n\t\t\t{\"kubectl apply -f \" + bookinfo + \"\/networking\/destination-rule-all.yaml\", true},\n\t\t},\n\t\t[]Cmd{\n\t\t\t{\"kubectl apply -f \" + bookinfo + \"\/platform\/kube\/cleanup.sh\", false},\n\t\t},\n\t\t[]CheckFunction{\n\t\t\tfunc(c *CheckContext) error {\n\t\t\t\t\/\/ check nodes exist\n\t\t\t\t_, err := checkNodeCreation(t, c, istio.Manager, \"destinationrule\", \"Name\", \"details\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t_, err = checkNodeCreation(t, c, istio.Manager, \"destinationrule\", \"Name\", \"productpage\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t_, err = checkNodeCreation(t, c, istio.Manager, \"destinationrule\", \"Name\", \"ratings\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t_, err = checkNodeCreation(t, c, istio.Manager, \"destinationrule\", \"Name\", \"reviews\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t)\n}\n<commit_msg>istio: add gateway test<commit_after>\/\/ +build istio\n\n\/*\n * Copyright (C) 2018 IBM, Inc.\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n *\n *\/\n\npackage tests\n\nimport (\n\t\"github.com\/skydive-project\/skydive\/topology\/probes\/istio\"\n\t\"testing\"\n)\n\nconst (\n\tbookinfo = \"https:\/\/raw.githubusercontent.com\/istio\/istio\/release-1.0\/samples\/bookinfo\"\n)\n\n\/* -- test creation of single resource -- *\/\nfunc TestIstioClusterNode(t *testing.T) {\n\ttestNodeCreation(t, nil, nil, istio.Manager, \"cluster\", istio.ClusterName)\n}\n\nfunc TestIstioDestinationRuleNode(t *testing.T) {\n\ttestNodeCreationFromConfig(t, istio.Manager, \"destinationrule\", objName+\"-destinationrule\")\n}\n\nfunc TestBookInfoScenario(t *testing.T) {\n\ttestRunner(\n\t\tt,\n\t\t[]Cmd{\n\t\t\t{\"kubectl apply -f \" + bookinfo + \"\/networking\/destination-rule-all.yaml\", true},\n\t\t\t{\"kubectl apply -f \" + bookinfo + \"\/networking\/bookinfo-gateway.yaml\", true},\n\t\t},\n\t\t[]Cmd{\n\t\t\t{\"kubectl apply -f \" + bookinfo + \"\/platform\/kube\/cleanup.sh\", false},\n\t\t},\n\t\t[]CheckFunction{\n\t\t\tfunc(c *CheckContext) error {\n\t\t\t\t\/\/ check nodes exist\n\t\t\t\t_, err := checkNodeCreation(t, c, istio.Manager, \"destinationrule\", \"Name\", \"details\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t_, err = checkNodeCreation(t, c, istio.Manager, \"destinationrule\", \"Name\", \"productpage\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t_, err = checkNodeCreation(t, c, istio.Manager, \"destinationrule\", \"Name\", \"ratings\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t_, err = checkNodeCreation(t, c, istio.Manager, \"destinationrule\", \"Name\", \"reviews\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t_, err = checkNodeCreation(t, c, istio.Manager, \"gateway\", \"Name\", \"bookinfo-gateway\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"go\/format\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\ntype Unit struct {\n\tName string\n\tReceiver string\n\tOffset int \/\/ From normal (for example, mass base unit is kg, not kg)\n\tPrintString string \/\/ print string for the unit (kg for mass)\n\tExtraConstant []Constant\n\tSuffix string\n\tSingular string\n\tTypeComment string \/\/ Text to comment the type\n\tDimensions []Dimension\n\tErForm string \/\/For Xxxer interface\n}\n\ntype Dimension struct {\n\tName string\n\tPower int\n}\n\nconst (\n\tTimeName string = \"TimeDim\"\n\tLengthName string = \"LengthDim\"\n\tMassName string = \"MassDim\"\n)\n\ntype Constant struct {\n\tName string\n\tValue string\n}\n\ntype Prefix struct {\n\tName string\n\tPower int\n}\n\nvar Prefixes = []Prefix{\n\t{\n\t\tName: \"Yotta\",\n\t\tPower: 24,\n\t},\n\t{\n\t\tName: \"Zetta\",\n\t\tPower: 21,\n\t},\n\t{\n\t\tName: \"Exa\",\n\t\tPower: 18,\n\t},\n\t{\n\t\tName: \"Peta\",\n\t\tPower: 15,\n\t},\n\t{\n\t\tName: \"Tera\",\n\t\tPower: 12,\n\t},\n\t{\n\t\tName: \"Giga\",\n\t\tPower: 9,\n\t},\n\t{\n\t\tName: \"Mega\",\n\t\tPower: 6,\n\t},\n\t{\n\t\tName: \"Kilo\",\n\t\tPower: 3,\n\t},\n\t{\n\t\tName: \"Hecto\",\n\t\tPower: 2,\n\t},\n\t{\n\t\tName: \"Deca\",\n\t\tPower: 1,\n\t},\n\t{\n\t\tName: \"\",\n\t\tPower: 0,\n\t},\n\t{\n\t\tName: \"Deci\",\n\t\tPower: -1,\n\t},\n\t{\n\t\tName: \"Centi\",\n\t\tPower: -2,\n\t},\n\t{\n\t\tName: \"Milli\",\n\t\tPower: -3,\n\t},\n\t{\n\t\tName: \"Micro\",\n\t\tPower: -6,\n\t},\n\t{\n\t\tName: \"Nano\",\n\t\tPower: -9,\n\t},\n\t{\n\t\tName: \"Pico\",\n\t\tPower: -12,\n\t},\n\t{\n\t\tName: \"Femto\",\n\t\tPower: -15,\n\t},\n\t{\n\t\tName: \"Atto\",\n\t\tPower: -18,\n\t},\n\t{\n\t\tName: \"Zepto\",\n\t\tPower: -21,\n\t},\n\t{\n\t\tName: \"Yocto\",\n\t\tPower: -24,\n\t},\n}\n\nvar Units = []Unit{\n\t{\n\t\tName: \"Mass\",\n\t\tReceiver: \"m\",\n\t\tOffset: -3,\n\t\tPrintString: \"kg\",\n\t\tSuffix: \"gram\",\n\t\tSingular: \"Gram\",\n\t\tTypeComment: \"Mass represents a mass in kilograms\",\n\t\tDimensions: []Dimension{\n\t\t\t{\n\t\t\t\tName: MassName,\n\t\t\t\tPower: 1,\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tName: \"Length\",\n\t\tReceiver: \"l\",\n\t\tPrintString: \"m\",\n\t\tSuffix: \"meter\",\n\t\tSingular: \"Meter\",\n\t\tTypeComment: \"Length represents a length in meters\",\n\t\tDimensions: []Dimension{\n\t\t\t{\n\t\t\t\tName: LengthName,\n\t\t\t\tPower: 1,\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tName: \"Time\",\n\t\tReceiver: \"t\",\n\t\tPrintString: \"s\",\n\t\tSuffix: \"second\",\n\t\tSingular: \"Second\",\n\t\tTypeComment: \"Time represents a time in seconds\",\n\t\tExtraConstant: []Constant{\n\t\t\t{\n\t\t\t\tName: \"Hour\",\n\t\t\t\tValue: \"3600\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"Minute\",\n\t\t\t\tValue: \"60\",\n\t\t\t},\n\t\t},\n\t\tDimensions: []Dimension{\n\t\t\t{\n\t\t\t\tName: TimeName,\n\t\t\t\tPower: 1,\n\t\t\t},\n\t\t},\n\t\tErForm: \"Timer\",\n\t},\n}\n\nvar gopath string\nvar unitPkgPath string\n\nfunc init() {\n\tgopath = os.Getenv(\"GOPATH\")\n\tif gopath == \"\" {\n\t\tlog.Fatal(\"no gopath\")\n\t}\n\n\tunitPkgPath = filepath.Join(gopath, \"src\", \"github.com\", \"gonum\", \"unit\")\n}\n\n\/\/ Generate generates a file for each of the units\nfunc main() {\n\tfor _, unit := range Units {\n\t\tgenerate(unit)\n\t}\n}\n\nvar headerTemplate = `\/\/ Copyright ©2014 The gonum Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\npackage unit\n\n\/\/ This file is autogenerated by github.com\/gonum\/unit\/autogen\n\/\/ Changes should be made to the autogenerated template rather than this one\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n)\n\ntype {{.Name}} float64\n`\n\nvar constTemplate = `\nconst(\n\t{{$unit := .Unit}}\n\t{{range $unit.ExtraConstant}} {{.Name}} {{$unit.Name}} = {{.Value}}\n\t{{end}}\n\t{{$prefixes := .Prefixes}}\n\t{{range $prefixes}} {{if .Name}} {{.Name}}{{$unit.Suffix}} {{else}} {{$unit.Singular}} {{end}} {{$unit.Name}} = 1e{{.Power}}\n\t{{end}}\n)\n`\n\nvar methodTemplate = `\n\/\/ Unit converts the {{.Name}} to a *Unit\nfunc ({{.Receiver}} {{.Name}}) Unit() *Unit{\n\treturn New(float64({{.Receiver}}), Dimensions{\n\t\t{{range .Dimensions}} {{.Name}}: {{.Power}},\n\t\t{{end}}\n\t\t})\n}\n\n\/\/ {{.Name}} allows {{.Name}} to implement a {{.ErForm}} interface\nfunc ({{.Receiver}} {{.Name}}) {{.Name}}() {{.Name}} {\n\treturn {{.Receiver}}\n}\n\n\/\/ From converts the unit into the {{.Receiver}}. From returns an\n\/\/ error if there is a mismatch in dimension\nfunc ({{.Receiver}} *{{.Name}}) From(u Uniter) error{\n\tif !DimensionsMatch(u, {{.Singular}}){\n\t\t(*{{.Receiver}}) = {{.Name}}(math.NaN())\n\t\treturn errors.New(\"Dimension mismatch\")\n\t}\n\t(*{{.Receiver}}) = {{.Name}}(u.Unit().Value())\n\treturn nil\n}\n`\n\nvar formatTemplate = `\nfunc ({{.Receiver}} {{.Name}}) Format(fs fmt.State, c rune){\n\tswitch c {\n\tcase 'v':\n\t\tif fs.Flag('#') {\n\t\t\tfmt.Fprintf(fs, \"%T(%v)\", {{.Receiver}}, float64({{.Receiver}}))\n\t\t\treturn\n\t\t}\n\t\tfallthrough\n\tcase 'e', 'E', 'f', 'F', 'g', 'G':\n\t\tp, pOk := fs.Precision()\n\t\tif !pOk {\n\t\t\tp = -1\n\t\t}\n\t\tw, wOk := fs.Width()\n\t\tif !wOk {\n\t\t\tw = -1\n\t\t}\n\t\tfmt.Fprintf(fs, \"%*.*\"+string(c), w, p, float64({{.Receiver}}))\n\t\tfmt.Fprint(fs, \" {{.PrintString}}\")\n\tdefault:\n\t\tfmt.Fprintf(fs, \"%%!%c(%T=%g {{.PrintString}})\", c, {{.Receiver}}, float64({{.Receiver}}))\n\treturn\n}\n}\n`\n\nfunc generate(unit Unit) {\n\tlowerName := strings.ToLower(unit.Name)\n\tfilename := filepath.Join(unitPkgPath, lowerName+\".go\")\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\n\t\/\/ Need to define new prefixes because text\/template can't do math.\n\t\/\/ Need to do math because kilogram = 1 not 10^3\n\n\tprefixes := make([]Prefix, len(Prefixes))\n\tfor i, p := range Prefixes {\n\t\tprefixes[i].Name = p.Name\n\t\tprefixes[i].Power = p.Power + unit.Offset\n\t}\n\n\tdata := struct {\n\t\tPrefixes []Prefix\n\t\tUnit Unit\n\t}{\n\t\tprefixes,\n\t\tunit,\n\t}\n\n\tbuf := bytes.NewBuffer(make([]byte, 0))\n\n\theader := template.Must(template.New(\"header\").Parse(headerTemplate))\n\terr = header.Execute(buf, unit)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tprefix := template.Must(template.New(\"prefix\").Parse(constTemplate))\n\terr = prefix.Execute(buf, data)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tmethods := template.Must(template.New(\"methods\").Parse(methodTemplate))\n\terr = methods.Execute(buf, unit)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tform := template.Must(template.New(\"format\").Parse(formatTemplate))\n\terr = form.Execute(buf, unit)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tb, err := format.Source(buf.Bytes())\n\tif err != nil {\n\t\tf.Write(buf.Bytes()) \/\/ This is here to debug bad format\n\t\tlog.Fatalf(\"error formatting: %s\", err)\n\t}\n\n\tf.Write(b)\n}\n<commit_msg>Added extra newline between copyright and package-comment<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"go\/format\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\ntype Unit struct {\n\tName string\n\tReceiver string\n\tOffset int \/\/ From normal (for example, mass base unit is kg, not kg)\n\tPrintString string \/\/ print string for the unit (kg for mass)\n\tExtraConstant []Constant\n\tSuffix string\n\tSingular string\n\tTypeComment string \/\/ Text to comment the type\n\tDimensions []Dimension\n\tErForm string \/\/For Xxxer interface\n}\n\ntype Dimension struct {\n\tName string\n\tPower int\n}\n\nconst (\n\tTimeName string = \"TimeDim\"\n\tLengthName string = \"LengthDim\"\n\tMassName string = \"MassDim\"\n)\n\ntype Constant struct {\n\tName string\n\tValue string\n}\n\ntype Prefix struct {\n\tName string\n\tPower int\n}\n\nvar Prefixes = []Prefix{\n\t{\n\t\tName: \"Yotta\",\n\t\tPower: 24,\n\t},\n\t{\n\t\tName: \"Zetta\",\n\t\tPower: 21,\n\t},\n\t{\n\t\tName: \"Exa\",\n\t\tPower: 18,\n\t},\n\t{\n\t\tName: \"Peta\",\n\t\tPower: 15,\n\t},\n\t{\n\t\tName: \"Tera\",\n\t\tPower: 12,\n\t},\n\t{\n\t\tName: \"Giga\",\n\t\tPower: 9,\n\t},\n\t{\n\t\tName: \"Mega\",\n\t\tPower: 6,\n\t},\n\t{\n\t\tName: \"Kilo\",\n\t\tPower: 3,\n\t},\n\t{\n\t\tName: \"Hecto\",\n\t\tPower: 2,\n\t},\n\t{\n\t\tName: \"Deca\",\n\t\tPower: 1,\n\t},\n\t{\n\t\tName: \"\",\n\t\tPower: 0,\n\t},\n\t{\n\t\tName: \"Deci\",\n\t\tPower: -1,\n\t},\n\t{\n\t\tName: \"Centi\",\n\t\tPower: -2,\n\t},\n\t{\n\t\tName: \"Milli\",\n\t\tPower: -3,\n\t},\n\t{\n\t\tName: \"Micro\",\n\t\tPower: -6,\n\t},\n\t{\n\t\tName: \"Nano\",\n\t\tPower: -9,\n\t},\n\t{\n\t\tName: \"Pico\",\n\t\tPower: -12,\n\t},\n\t{\n\t\tName: \"Femto\",\n\t\tPower: -15,\n\t},\n\t{\n\t\tName: \"Atto\",\n\t\tPower: -18,\n\t},\n\t{\n\t\tName: \"Zepto\",\n\t\tPower: -21,\n\t},\n\t{\n\t\tName: \"Yocto\",\n\t\tPower: -24,\n\t},\n}\n\nvar Units = []Unit{\n\t{\n\t\tName: \"Mass\",\n\t\tReceiver: \"m\",\n\t\tOffset: -3,\n\t\tPrintString: \"kg\",\n\t\tSuffix: \"gram\",\n\t\tSingular: \"Gram\",\n\t\tTypeComment: \"Mass represents a mass in kilograms\",\n\t\tDimensions: []Dimension{\n\t\t\t{\n\t\t\t\tName: MassName,\n\t\t\t\tPower: 1,\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tName: \"Length\",\n\t\tReceiver: \"l\",\n\t\tPrintString: \"m\",\n\t\tSuffix: \"meter\",\n\t\tSingular: \"Meter\",\n\t\tTypeComment: \"Length represents a length in meters\",\n\t\tDimensions: []Dimension{\n\t\t\t{\n\t\t\t\tName: LengthName,\n\t\t\t\tPower: 1,\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tName: \"Time\",\n\t\tReceiver: \"t\",\n\t\tPrintString: \"s\",\n\t\tSuffix: \"second\",\n\t\tSingular: \"Second\",\n\t\tTypeComment: \"Time represents a time in seconds\",\n\t\tExtraConstant: []Constant{\n\t\t\t{\n\t\t\t\tName: \"Hour\",\n\t\t\t\tValue: \"3600\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"Minute\",\n\t\t\t\tValue: \"60\",\n\t\t\t},\n\t\t},\n\t\tDimensions: []Dimension{\n\t\t\t{\n\t\t\t\tName: TimeName,\n\t\t\t\tPower: 1,\n\t\t\t},\n\t\t},\n\t\tErForm: \"Timer\",\n\t},\n}\n\nvar gopath string\nvar unitPkgPath string\n\nfunc init() {\n\tgopath = os.Getenv(\"GOPATH\")\n\tif gopath == \"\" {\n\t\tlog.Fatal(\"no gopath\")\n\t}\n\n\tunitPkgPath = filepath.Join(gopath, \"src\", \"github.com\", \"gonum\", \"unit\")\n}\n\n\/\/ Generate generates a file for each of the units\nfunc main() {\n\tfor _, unit := range Units {\n\t\tgenerate(unit)\n\t}\n}\n\nvar headerTemplate = `\/\/ Copyright ©2014 The gonum Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage unit\n\n\/\/ This file is autogenerated by github.com\/gonum\/unit\/autogen\n\/\/ Changes should be made to the autogenerated template rather than this one\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n)\n\ntype {{.Name}} float64\n`\n\nvar constTemplate = `\nconst(\n\t{{$unit := .Unit}}\n\t{{range $unit.ExtraConstant}} {{.Name}} {{$unit.Name}} = {{.Value}}\n\t{{end}}\n\t{{$prefixes := .Prefixes}}\n\t{{range $prefixes}} {{if .Name}} {{.Name}}{{$unit.Suffix}} {{else}} {{$unit.Singular}} {{end}} {{$unit.Name}} = 1e{{.Power}}\n\t{{end}}\n)\n`\n\nvar methodTemplate = `\n\/\/ Unit converts the {{.Name}} to a *Unit\nfunc ({{.Receiver}} {{.Name}}) Unit() *Unit{\n\treturn New(float64({{.Receiver}}), Dimensions{\n\t\t{{range .Dimensions}} {{.Name}}: {{.Power}},\n\t\t{{end}}\n\t\t})\n}\n\n\/\/ {{.Name}} allows {{.Name}} to implement a {{.ErForm}} interface\nfunc ({{.Receiver}} {{.Name}}) {{.Name}}() {{.Name}} {\n\treturn {{.Receiver}}\n}\n\n\/\/ From converts the unit into the {{.Receiver}}. From returns an\n\/\/ error if there is a mismatch in dimension\nfunc ({{.Receiver}} *{{.Name}}) From(u Uniter) error{\n\tif !DimensionsMatch(u, {{.Singular}}){\n\t\t(*{{.Receiver}}) = {{.Name}}(math.NaN())\n\t\treturn errors.New(\"Dimension mismatch\")\n\t}\n\t(*{{.Receiver}}) = {{.Name}}(u.Unit().Value())\n\treturn nil\n}\n`\n\nvar formatTemplate = `\nfunc ({{.Receiver}} {{.Name}}) Format(fs fmt.State, c rune){\n\tswitch c {\n\tcase 'v':\n\t\tif fs.Flag('#') {\n\t\t\tfmt.Fprintf(fs, \"%T(%v)\", {{.Receiver}}, float64({{.Receiver}}))\n\t\t\treturn\n\t\t}\n\t\tfallthrough\n\tcase 'e', 'E', 'f', 'F', 'g', 'G':\n\t\tp, pOk := fs.Precision()\n\t\tif !pOk {\n\t\t\tp = -1\n\t\t}\n\t\tw, wOk := fs.Width()\n\t\tif !wOk {\n\t\t\tw = -1\n\t\t}\n\t\tfmt.Fprintf(fs, \"%*.*\"+string(c), w, p, float64({{.Receiver}}))\n\t\tfmt.Fprint(fs, \" {{.PrintString}}\")\n\tdefault:\n\t\tfmt.Fprintf(fs, \"%%!%c(%T=%g {{.PrintString}})\", c, {{.Receiver}}, float64({{.Receiver}}))\n\treturn\n}\n}\n`\n\nfunc generate(unit Unit) {\n\tlowerName := strings.ToLower(unit.Name)\n\tfilename := filepath.Join(unitPkgPath, lowerName+\".go\")\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\n\t\/\/ Need to define new prefixes because text\/template can't do math.\n\t\/\/ Need to do math because kilogram = 1 not 10^3\n\n\tprefixes := make([]Prefix, len(Prefixes))\n\tfor i, p := range Prefixes {\n\t\tprefixes[i].Name = p.Name\n\t\tprefixes[i].Power = p.Power + unit.Offset\n\t}\n\n\tdata := struct {\n\t\tPrefixes []Prefix\n\t\tUnit Unit\n\t}{\n\t\tprefixes,\n\t\tunit,\n\t}\n\n\tbuf := bytes.NewBuffer(make([]byte, 0))\n\n\theader := template.Must(template.New(\"header\").Parse(headerTemplate))\n\terr = header.Execute(buf, unit)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tprefix := template.Must(template.New(\"prefix\").Parse(constTemplate))\n\terr = prefix.Execute(buf, data)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tmethods := template.Must(template.New(\"methods\").Parse(methodTemplate))\n\terr = methods.Execute(buf, unit)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tform := template.Must(template.New(\"format\").Parse(formatTemplate))\n\terr = form.Execute(buf, unit)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tb, err := format.Source(buf.Bytes())\n\tif err != nil {\n\t\tf.Write(buf.Bytes()) \/\/ This is here to debug bad format\n\t\tlog.Fatalf(\"error formatting: %s\", err)\n\t}\n\n\tf.Write(b)\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n\n\t\"code.cloudfoundry.org\/cli\/api\/cloudcontroller\/ccversion\"\n\t\"code.cloudfoundry.org\/cli\/cf\/commandregistry\"\n\t\"code.cloudfoundry.org\/cli\/cf\/flags\"\n\t. \"code.cloudfoundry.org\/cli\/cf\/i18n\"\n\t\"code.cloudfoundry.org\/cli\/command\"\n\t\"code.cloudfoundry.org\/cli\/command\/translatableerror\"\n\n\t\"code.cloudfoundry.org\/cli\/cf\/api\/authentication\"\n\t\"code.cloudfoundry.org\/cli\/cf\/api\/organizations\"\n\t\"code.cloudfoundry.org\/cli\/cf\/api\/spaces\"\n\t\"code.cloudfoundry.org\/cli\/cf\/configuration\/coreconfig\"\n\t\"code.cloudfoundry.org\/cli\/cf\/models\"\n\t\"code.cloudfoundry.org\/cli\/cf\/requirements\"\n\t\"code.cloudfoundry.org\/cli\/cf\/terminal\"\n)\n\nconst maxLoginTries = 3\nconst maxChoices = 50\n\ntype Login struct {\n\tui terminal.UI\n\tconfig coreconfig.ReadWriter\n\tauthenticator authentication.Repository\n\tendpointRepo coreconfig.EndpointRepository\n\torgRepo organizations.OrganizationRepository\n\tspaceRepo spaces.SpaceRepository\n}\n\nfunc init() {\n\tcommandregistry.Register(&Login{})\n}\n\nfunc (cmd *Login) MetaData() commandregistry.CommandMetadata {\n\tfs := make(map[string]flags.FlagSet)\n\tfs[\"a\"] = &flags.StringFlag{ShortName: \"a\", Usage: T(\"API endpoint (e.g. https:\/\/api.example.com)\")}\n\tfs[\"u\"] = &flags.StringFlag{ShortName: \"u\", Usage: T(\"Username\")}\n\tfs[\"p\"] = &flags.StringFlag{ShortName: \"p\", Usage: T(\"Password\")}\n\tfs[\"o\"] = &flags.StringFlag{ShortName: \"o\", Usage: T(\"Org\")}\n\tfs[\"s\"] = &flags.StringFlag{ShortName: \"s\", Usage: T(\"Space\")}\n\tfs[\"sso\"] = &flags.BoolFlag{Name: \"sso\", Usage: T(\"Prompt for a one-time passcode to login\")}\n\tfs[\"sso-passcode\"] = &flags.StringFlag{Name: \"sso-passcode\", Usage: T(\"One-time passcode\")}\n\tfs[\"skip-ssl-validation\"] = &flags.BoolFlag{Name: \"skip-ssl-validation\", Usage: T(\"Skip verification of the API endpoint. Not recommended!\")}\n\n\treturn commandregistry.CommandMetadata{\n\t\tName: \"login\",\n\t\tShortName: \"l\",\n\t\tDescription: T(\"Log user in\"),\n\t\tUsage: []string{\n\t\t\tT(\"CF_NAME login [-a API_URL] [-u USERNAME] [-p PASSWORD] [-o ORG] [-s SPACE] [--sso | --sso-passcode PASSCODE]\\n\\n\"),\n\t\t\tterminal.WarningColor(T(\"WARNING:\\n Providing your password as a command line option is highly discouraged\\n Your password may be visible to others and may be recorded in your shell history\")),\n\t\t},\n\t\tExamples: []string{\n\t\t\tT(\"CF_NAME login (omit username and password to login interactively -- CF_NAME will prompt for both)\"),\n\t\t\tT(\"CF_NAME login -u name@example.com -p pa55woRD (specify username and password as arguments)\"),\n\t\t\tT(\"CF_NAME login -u name@example.com -p \\\"my password\\\" (use quotes for passwords with a space)\"),\n\t\t\tT(\"CF_NAME login -u name@example.com -p \\\"\\\\\\\"password\\\\\\\"\\\" (escape quotes if used in password)\"),\n\t\t\tT(\"CF_NAME login --sso (CF_NAME will provide a url to obtain a one-time passcode to login)\"),\n\t\t},\n\t\tFlags: fs,\n\t}\n}\n\nfunc (cmd *Login) Requirements(requirementsFactory requirements.Factory, fc flags.FlagContext) ([]requirements.Requirement, error) {\n\treqs := []requirements.Requirement{}\n\treturn reqs, nil\n}\n\nfunc (cmd *Login) SetDependency(deps commandregistry.Dependency, pluginCall bool) commandregistry.Command {\n\tcmd.ui = deps.UI\n\tcmd.config = deps.Config\n\tcmd.authenticator = deps.RepoLocator.GetAuthenticationRepository()\n\tcmd.endpointRepo = deps.RepoLocator.GetEndpointRepository()\n\tcmd.orgRepo = deps.RepoLocator.GetOrganizationRepository()\n\tcmd.spaceRepo = deps.RepoLocator.GetSpaceRepository()\n\treturn cmd\n}\n\nfunc (cmd *Login) Execute(c flags.FlagContext) error {\n\tcmd.config.ClearSession()\n\n\tendpoint, skipSSL := cmd.decideEndpoint(c)\n\n\tapi := API{\n\t\tui: cmd.ui,\n\t\tconfig: cmd.config,\n\t\tendpointRepo: cmd.endpointRepo,\n\t}\n\terr := api.setAPIEndpoint(endpoint, skipSSL, cmd.MetaData().Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = command.MinimumCCAPIVersionCheck(cmd.config.APIVersion(), ccversion.MinSupportedV2ClientVersion)\n\tif err != nil {\n\t\tif _, ok := err.(translatableerror.MinimumCFAPIVersionNotMetError); ok {\n\t\t\tcmd.ui.Warn(\"Your API version is no longer supported. Upgrade to a newer version of the API.\")\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tdefer func() {\n\t\tcmd.ui.Say(\"\")\n\t\tcmd.ui.ShowConfiguration(cmd.config)\n\t}()\n\n\t\/\/ We thought we would never need to explicitly branch in this code\n\t\/\/ for anything as simple as authentication, but it turns out that our\n\t\/\/ assumptions did not match reality.\n\n\t\/\/ When SAML is enabled (but not configured) then the UAA\/Login server\n\t\/\/ will always returns password prompts that includes the Passcode field.\n\t\/\/ Users can authenticate with:\n\t\/\/ EITHER username and password\n\t\/\/ OR a one-time passcode\n\n\tswitch {\n\tcase c.Bool(\"sso\") && c.IsSet(\"sso-passcode\"):\n\t\treturn errors.New(T(\"Incorrect usage: --sso-passcode flag cannot be used with --sso\"))\n\tcase c.Bool(\"sso\") || c.IsSet(\"sso-passcode\"):\n\t\terr = cmd.authenticateSSO(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\terr = cmd.authenticate(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\torgIsSet, err := cmd.setOrganization(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif orgIsSet {\n\t\terr = cmd.setSpace(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tcmd.ui.NotifyUpdateIfNeeded(cmd.config)\n\treturn nil\n}\n\nfunc (cmd Login) decideEndpoint(c flags.FlagContext) (string, bool) {\n\tendpoint := c.String(\"a\")\n\tskipSSL := c.Bool(\"skip-ssl-validation\")\n\tif endpoint == \"\" {\n\t\tendpoint = cmd.config.APIEndpoint()\n\t\tskipSSL = cmd.config.IsSSLDisabled() || skipSSL\n\t}\n\n\tif endpoint == \"\" {\n\t\tendpoint = cmd.ui.Ask(T(\"API endpoint\"))\n\t} else {\n\t\tcmd.ui.Say(T(\"API endpoint: {{.Endpoint}}\", map[string]interface{}{\"Endpoint\": terminal.EntityNameColor(endpoint)}))\n\t}\n\n\treturn endpoint, skipSSL\n}\n\nfunc (cmd Login) authenticateSSO(c flags.FlagContext) error {\n\tprompts, err := cmd.authenticator.GetLoginPromptsAndSaveUAAServerURL()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcredentials := make(map[string]string)\n\tpasscode := prompts[\"passcode\"]\n\n\tfor i := 0; i < maxLoginTries; i++ {\n\t\tif c.IsSet(\"sso-passcode\") && i == 0 {\n\t\t\tcredentials[\"passcode\"] = c.String(\"sso-passcode\")\n\t\t} else {\n\t\t\tcredentials[\"passcode\"] = cmd.ui.AskForPassword(passcode.DisplayName)\n\t\t}\n\n\t\tcmd.ui.Say(T(\"Authenticating...\"))\n\t\terr = cmd.authenticator.Authenticate(credentials)\n\n\t\tif err == nil {\n\t\t\tcmd.ui.Ok()\n\t\t\tcmd.ui.Say(\"\")\n\t\t\tbreak\n\t\t}\n\n\t\tcmd.ui.Say(err.Error())\n\t}\n\n\tif err != nil {\n\t\treturn errors.New(T(\"Unable to authenticate.\"))\n\t}\n\treturn nil\n}\n\nfunc (cmd Login) authenticate(c flags.FlagContext) error {\n\tif cmd.config.UAAGrantType() == \"client_credentials\" {\n\t\treturn errors.New(T(\"Service account currently logged in. Use 'cf logout' to log out service account and try again.\"))\n\t}\n\n\tusernameFlagValue := c.String(\"u\")\n\tpasswordFlagValue := c.String(\"p\")\n\n\tprompts, err := cmd.authenticator.GetLoginPromptsAndSaveUAAServerURL()\n\tif err != nil {\n\t\treturn err\n\t}\n\tpasswordKeys := []string{}\n\tcredentials := make(map[string]string)\n\n\tif value, ok := prompts[\"username\"]; ok {\n\t\tif prompts[\"username\"].Type == coreconfig.AuthPromptTypeText && usernameFlagValue != \"\" {\n\t\t\tcredentials[\"username\"] = usernameFlagValue\n\t\t} else {\n\t\t\tcredentials[\"username\"] = cmd.ui.Ask(value.DisplayName)\n\t\t}\n\t}\n\n\tfor key, prompt := range prompts {\n\t\tif prompt.Type == coreconfig.AuthPromptTypePassword {\n\t\t\tif key == \"passcode\" || key == \"password\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tpasswordKeys = append(passwordKeys, key)\n\t\t} else if key == \"username\" {\n\t\t\tcontinue\n\t\t} else {\n\t\t\tcredentials[key] = cmd.ui.Ask(prompt.DisplayName)\n\t\t}\n\t}\n\n\tfor i := 0; i < maxLoginTries; i++ {\n\n\t\t\/\/ ensure that password gets prompted before other codes (eg. mfa code)\n\t\tif passPrompt, ok := prompts[\"password\"]; ok {\n\t\t\tif passwordFlagValue != \"\" {\n\t\t\t\tcredentials[\"password\"] = passwordFlagValue\n\t\t\t\tpasswordFlagValue = \"\"\n\t\t\t} else {\n\t\t\t\tcredentials[\"password\"] = cmd.ui.AskForPassword(passPrompt.DisplayName)\n\t\t\t}\n\t\t}\n\n\t\tfor _, key := range passwordKeys {\n\t\t\tcredentials[key] = cmd.ui.AskForPassword(prompts[key].DisplayName)\n\t\t}\n\n\t\tcredentialsCopy := make(map[string]string, len(credentials))\n\t\tfor k, v := range credentials {\n\t\t\tcredentialsCopy[k] = v\n\t\t}\n\n\t\tcmd.ui.Say(T(\"Authenticating...\"))\n\t\terr = cmd.authenticator.Authenticate(credentialsCopy)\n\n\t\tif err == nil {\n\t\t\tcmd.ui.Ok()\n\t\t\tcmd.ui.Say(\"\")\n\t\t\tbreak\n\t\t}\n\n\t\tcmd.ui.Say(err.Error())\n\t}\n\n\tif err != nil {\n\t\treturn errors.New(T(\"Unable to authenticate.\"))\n\t}\n\treturn nil\n}\n\nfunc (cmd Login) setOrganization(c flags.FlagContext) (bool, error) {\n\torgName := c.String(\"o\")\n\n\tif orgName == \"\" {\n\t\torgs, err := cmd.orgRepo.ListOrgs(maxChoices)\n\t\tif err != nil {\n\t\t\treturn false, errors.New(T(\"Error finding available orgs\\n{{.APIErr}}\",\n\t\t\t\tmap[string]interface{}{\"APIErr\": err.Error()}))\n\t\t}\n\n\t\tswitch len(orgs) {\n\t\tcase 0:\n\t\t\treturn false, nil\n\t\tcase 1:\n\t\t\tcmd.targetOrganization(orgs[0])\n\t\t\treturn true, nil\n\t\tdefault:\n\t\t\torgName = cmd.promptForOrgName(orgs)\n\t\t\tif orgName == \"\" {\n\t\t\t\tcmd.ui.Say(\"\")\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t}\n\n\torg, err := cmd.orgRepo.FindByName(orgName)\n\tif err != nil {\n\t\treturn false, errors.New(T(\"Error finding org {{.OrgName}}\\n{{.Err}}\",\n\t\t\tmap[string]interface{}{\"OrgName\": terminal.EntityNameColor(orgName), \"Err\": err.Error()}))\n\t}\n\n\tcmd.targetOrganization(org)\n\treturn true, nil\n}\n\nfunc (cmd Login) promptForOrgName(orgs []models.Organization) string {\n\torgNames := []string{}\n\tfor _, org := range orgs {\n\t\torgNames = append(orgNames, org.Name)\n\t}\n\n\treturn cmd.promptForName(orgNames, T(\"Select an org (or press enter to skip):\"), \"Org\")\n}\n\nfunc (cmd Login) targetOrganization(org models.Organization) {\n\tcmd.config.SetOrganizationFields(org.OrganizationFields)\n\tcmd.ui.Say(T(\"Targeted org {{.OrgName}}\\n\",\n\t\tmap[string]interface{}{\"OrgName\": terminal.EntityNameColor(org.Name)}))\n}\n\nfunc (cmd Login) setSpace(c flags.FlagContext) error {\n\tspaceName := c.String(\"s\")\n\n\tif spaceName == \"\" {\n\t\tvar availableSpaces []models.Space\n\t\terr := cmd.spaceRepo.ListSpaces(func(space models.Space) bool {\n\t\t\tavailableSpaces = append(availableSpaces, space)\n\t\t\treturn (len(availableSpaces) < maxChoices)\n\t\t})\n\t\tif err != nil {\n\t\t\treturn errors.New(T(\"Error finding available spaces\\n{{.Err}}\",\n\t\t\t\tmap[string]interface{}{\"Err\": err.Error()}))\n\t\t}\n\n\t\tif len(availableSpaces) == 0 {\n\t\t\treturn nil\n\t\t} else if len(availableSpaces) == 1 {\n\t\t\tcmd.targetSpace(availableSpaces[0])\n\t\t\treturn nil\n\t\t} else {\n\t\t\tspaceName = cmd.promptForSpaceName(availableSpaces)\n\t\t\tif spaceName == \"\" {\n\t\t\t\tcmd.ui.Say(\"\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\tspace, err := cmd.spaceRepo.FindByName(spaceName)\n\tif err != nil {\n\t\treturn errors.New(T(\"Error finding space {{.SpaceName}}\\n{{.Err}}\",\n\t\t\tmap[string]interface{}{\"SpaceName\": terminal.EntityNameColor(spaceName), \"Err\": err.Error()}))\n\t}\n\n\tcmd.targetSpace(space)\n\treturn nil\n}\n\nfunc (cmd Login) promptForSpaceName(spaces []models.Space) string {\n\tspaceNames := []string{}\n\tfor _, space := range spaces {\n\t\tspaceNames = append(spaceNames, space.Name)\n\t}\n\n\treturn cmd.promptForName(spaceNames, T(\"Select a space (or press enter to skip):\"), \"Space\")\n}\n\nfunc (cmd Login) targetSpace(space models.Space) {\n\tcmd.config.SetSpaceFields(space.SpaceFields)\n\tcmd.ui.Say(T(\"Targeted space {{.SpaceName}}\\n\",\n\t\tmap[string]interface{}{\"SpaceName\": terminal.EntityNameColor(space.Name)}))\n}\n\nfunc (cmd Login) promptForName(names []string, listPrompt, itemPrompt string) string {\n\tnameIndex := 0\n\tvar nameString string\n\tfor nameIndex < 1 || nameIndex > len(names) {\n\t\tvar err error\n\n\t\t\/\/ list header\n\t\tcmd.ui.Say(listPrompt)\n\n\t\t\/\/ only display list if it is shorter than maxChoices\n\t\tif len(names) < maxChoices {\n\t\t\tfor i, name := range names {\n\t\t\t\tcmd.ui.Say(\"%d. %s\", i+1, name)\n\t\t\t}\n\t\t} else {\n\t\t\tcmd.ui.Say(T(\"There are too many options to display, please type in the name.\"))\n\t\t}\n\n\t\tnameString = cmd.ui.Ask(itemPrompt)\n\t\tif nameString == \"\" {\n\t\t\treturn \"\"\n\t\t}\n\n\t\tnameIndex, err = strconv.Atoi(nameString)\n\n\t\tif err != nil {\n\t\t\tnameIndex = 1\n\t\t\treturn nameString\n\t\t}\n\t}\n\n\treturn names[nameIndex-1]\n}\n<commit_msg>if cf misconfigured, still allow user to 'cf login -sso'<commit_after>package commands\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"code.cloudfoundry.org\/cli\/api\/cloudcontroller\/ccversion\"\n\t\"code.cloudfoundry.org\/cli\/cf\/commandregistry\"\n\t\"code.cloudfoundry.org\/cli\/cf\/flags\"\n\t. \"code.cloudfoundry.org\/cli\/cf\/i18n\"\n\t\"code.cloudfoundry.org\/cli\/command\"\n\t\"code.cloudfoundry.org\/cli\/command\/translatableerror\"\n\n\t\"code.cloudfoundry.org\/cli\/cf\/api\/authentication\"\n\t\"code.cloudfoundry.org\/cli\/cf\/api\/organizations\"\n\t\"code.cloudfoundry.org\/cli\/cf\/api\/spaces\"\n\t\"code.cloudfoundry.org\/cli\/cf\/configuration\/coreconfig\"\n\t\"code.cloudfoundry.org\/cli\/cf\/models\"\n\t\"code.cloudfoundry.org\/cli\/cf\/requirements\"\n\t\"code.cloudfoundry.org\/cli\/cf\/terminal\"\n)\n\nconst maxLoginTries = 3\nconst maxChoices = 50\n\ntype Login struct {\n\tui terminal.UI\n\tconfig coreconfig.ReadWriter\n\tauthenticator authentication.Repository\n\tendpointRepo coreconfig.EndpointRepository\n\torgRepo organizations.OrganizationRepository\n\tspaceRepo spaces.SpaceRepository\n}\n\nfunc init() {\n\tcommandregistry.Register(&Login{})\n}\n\nfunc (cmd *Login) MetaData() commandregistry.CommandMetadata {\n\tfs := make(map[string]flags.FlagSet)\n\tfs[\"a\"] = &flags.StringFlag{ShortName: \"a\", Usage: T(\"API endpoint (e.g. https:\/\/api.example.com)\")}\n\tfs[\"u\"] = &flags.StringFlag{ShortName: \"u\", Usage: T(\"Username\")}\n\tfs[\"p\"] = &flags.StringFlag{ShortName: \"p\", Usage: T(\"Password\")}\n\tfs[\"o\"] = &flags.StringFlag{ShortName: \"o\", Usage: T(\"Org\")}\n\tfs[\"s\"] = &flags.StringFlag{ShortName: \"s\", Usage: T(\"Space\")}\n\tfs[\"sso\"] = &flags.BoolFlag{Name: \"sso\", Usage: T(\"Prompt for a one-time passcode to login\")}\n\tfs[\"sso-passcode\"] = &flags.StringFlag{Name: \"sso-passcode\", Usage: T(\"One-time passcode\")}\n\tfs[\"skip-ssl-validation\"] = &flags.BoolFlag{Name: \"skip-ssl-validation\", Usage: T(\"Skip verification of the API endpoint. Not recommended!\")}\n\n\treturn commandregistry.CommandMetadata{\n\t\tName: \"login\",\n\t\tShortName: \"l\",\n\t\tDescription: T(\"Log user in\"),\n\t\tUsage: []string{\n\t\t\tT(\"CF_NAME login [-a API_URL] [-u USERNAME] [-p PASSWORD] [-o ORG] [-s SPACE] [--sso | --sso-passcode PASSCODE]\\n\\n\"),\n\t\t\tterminal.WarningColor(T(\"WARNING:\\n Providing your password as a command line option is highly discouraged\\n Your password may be visible to others and may be recorded in your shell history\")),\n\t\t},\n\t\tExamples: []string{\n\t\t\tT(\"CF_NAME login (omit username and password to login interactively -- CF_NAME will prompt for both)\"),\n\t\t\tT(\"CF_NAME login -u name@example.com -p pa55woRD (specify username and password as arguments)\"),\n\t\t\tT(\"CF_NAME login -u name@example.com -p \\\"my password\\\" (use quotes for passwords with a space)\"),\n\t\t\tT(\"CF_NAME login -u name@example.com -p \\\"\\\\\\\"password\\\\\\\"\\\" (escape quotes if used in password)\"),\n\t\t\tT(\"CF_NAME login --sso (CF_NAME will provide a url to obtain a one-time passcode to login)\"),\n\t\t},\n\t\tFlags: fs,\n\t}\n}\n\nfunc (cmd *Login) Requirements(requirementsFactory requirements.Factory, fc flags.FlagContext) ([]requirements.Requirement, error) {\n\treqs := []requirements.Requirement{}\n\treturn reqs, nil\n}\n\nfunc (cmd *Login) SetDependency(deps commandregistry.Dependency, pluginCall bool) commandregistry.Command {\n\tcmd.ui = deps.UI\n\tcmd.config = deps.Config\n\tcmd.authenticator = deps.RepoLocator.GetAuthenticationRepository()\n\tcmd.endpointRepo = deps.RepoLocator.GetEndpointRepository()\n\tcmd.orgRepo = deps.RepoLocator.GetOrganizationRepository()\n\tcmd.spaceRepo = deps.RepoLocator.GetSpaceRepository()\n\treturn cmd\n}\n\nfunc (cmd *Login) Execute(c flags.FlagContext) error {\n\tcmd.config.ClearSession()\n\n\tendpoint, skipSSL := cmd.decideEndpoint(c)\n\n\tapi := API{\n\t\tui: cmd.ui,\n\t\tconfig: cmd.config,\n\t\tendpointRepo: cmd.endpointRepo,\n\t}\n\terr := api.setAPIEndpoint(endpoint, skipSSL, cmd.MetaData().Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = command.MinimumCCAPIVersionCheck(cmd.config.APIVersion(), ccversion.MinSupportedV2ClientVersion)\n\tif err != nil {\n\t\tif _, ok := err.(translatableerror.MinimumCFAPIVersionNotMetError); ok {\n\t\t\tcmd.ui.Warn(\"Your API version is no longer supported. Upgrade to a newer version of the API.\")\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tdefer func() {\n\t\tcmd.ui.Say(\"\")\n\t\tcmd.ui.ShowConfiguration(cmd.config)\n\t}()\n\n\t\/\/ We thought we would never need to explicitly branch in this code\n\t\/\/ for anything as simple as authentication, but it turns out that our\n\t\/\/ assumptions did not match reality.\n\n\t\/\/ When SAML is enabled (but not configured) then the UAA\/Login server\n\t\/\/ will always returns password prompts that includes the Passcode field.\n\t\/\/ Users can authenticate with:\n\t\/\/ EITHER username and password\n\t\/\/ OR a one-time passcode\n\n\tswitch {\n\tcase c.Bool(\"sso\") && c.IsSet(\"sso-passcode\"):\n\t\treturn errors.New(T(\"Incorrect usage: --sso-passcode flag cannot be used with --sso\"))\n\tcase c.Bool(\"sso\") || c.IsSet(\"sso-passcode\"):\n\t\terr = cmd.authenticateSSO(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\terr = cmd.authenticate(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\torgIsSet, err := cmd.setOrganization(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif orgIsSet {\n\t\terr = cmd.setSpace(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tcmd.ui.NotifyUpdateIfNeeded(cmd.config)\n\treturn nil\n}\n\nfunc (cmd Login) decideEndpoint(c flags.FlagContext) (string, bool) {\n\tendpoint := c.String(\"a\")\n\tskipSSL := c.Bool(\"skip-ssl-validation\")\n\tif endpoint == \"\" {\n\t\tendpoint = cmd.config.APIEndpoint()\n\t\tskipSSL = cmd.config.IsSSLDisabled() || skipSSL\n\t}\n\n\tif endpoint == \"\" {\n\t\tendpoint = cmd.ui.Ask(T(\"API endpoint\"))\n\t} else {\n\t\tcmd.ui.Say(T(\"API endpoint: {{.Endpoint}}\", map[string]interface{}{\"Endpoint\": terminal.EntityNameColor(endpoint)}))\n\t}\n\n\treturn endpoint, skipSSL\n}\n\nfunc (cmd Login) authenticateSSO(c flags.FlagContext) error {\n\tprompts, err := cmd.authenticator.GetLoginPromptsAndSaveUAAServerURL()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcredentials := make(map[string]string)\n\tpasscode := prompts[\"passcode\"]\n\n\tif passcode.DisplayName == \"\" {\n\t\tpasscode = coreconfig.AuthPrompt{\n\t\t\tType: coreconfig.AuthPromptTypePassword,\n\t\t\tDisplayName: fmt.Sprintf(\"Temporary Authentication Code ( Get one at %s\/passcode )\", cmd.config.AuthenticationEndpoint()),\n\t\t}\n\t}\n\n\tfor i := 0; i < maxLoginTries; i++ {\n\t\tif c.IsSet(\"sso-passcode\") && i == 0 {\n\t\t\tcredentials[\"passcode\"] = c.String(\"sso-passcode\")\n\t\t} else {\n\t\t\tcredentials[\"passcode\"] = cmd.ui.AskForPassword(passcode.DisplayName)\n\t\t}\n\n\t\tcmd.ui.Say(T(\"Authenticating...\"))\n\t\terr = cmd.authenticator.Authenticate(credentials)\n\n\t\tif err == nil {\n\t\t\tcmd.ui.Ok()\n\t\t\tcmd.ui.Say(\"\")\n\t\t\tbreak\n\t\t}\n\n\t\tcmd.ui.Say(err.Error())\n\t}\n\n\tif err != nil {\n\t\treturn errors.New(T(\"Unable to authenticate.\"))\n\t}\n\treturn nil\n}\n\nfunc (cmd Login) authenticate(c flags.FlagContext) error {\n\tif cmd.config.UAAGrantType() == \"client_credentials\" {\n\t\treturn errors.New(T(\"Service account currently logged in. Use 'cf logout' to log out service account and try again.\"))\n\t}\n\n\tusernameFlagValue := c.String(\"u\")\n\tpasswordFlagValue := c.String(\"p\")\n\n\tprompts, err := cmd.authenticator.GetLoginPromptsAndSaveUAAServerURL()\n\tif err != nil {\n\t\treturn err\n\t}\n\tpasswordKeys := []string{}\n\tcredentials := make(map[string]string)\n\n\tif value, ok := prompts[\"username\"]; ok {\n\t\tif prompts[\"username\"].Type == coreconfig.AuthPromptTypeText && usernameFlagValue != \"\" {\n\t\t\tcredentials[\"username\"] = usernameFlagValue\n\t\t} else {\n\t\t\tcredentials[\"username\"] = cmd.ui.Ask(value.DisplayName)\n\t\t}\n\t}\n\n\tfor key, prompt := range prompts {\n\t\tif prompt.Type == coreconfig.AuthPromptTypePassword {\n\t\t\tif key == \"passcode\" || key == \"password\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tpasswordKeys = append(passwordKeys, key)\n\t\t} else if key == \"username\" {\n\t\t\tcontinue\n\t\t} else {\n\t\t\tcredentials[key] = cmd.ui.Ask(prompt.DisplayName)\n\t\t}\n\t}\n\n\tfor i := 0; i < maxLoginTries; i++ {\n\n\t\t\/\/ ensure that password gets prompted before other codes (eg. mfa code)\n\t\tif passPrompt, ok := prompts[\"password\"]; ok {\n\t\t\tif passwordFlagValue != \"\" {\n\t\t\t\tcredentials[\"password\"] = passwordFlagValue\n\t\t\t\tpasswordFlagValue = \"\"\n\t\t\t} else {\n\t\t\t\tcredentials[\"password\"] = cmd.ui.AskForPassword(passPrompt.DisplayName)\n\t\t\t}\n\t\t}\n\n\t\tfor _, key := range passwordKeys {\n\t\t\tcredentials[key] = cmd.ui.AskForPassword(prompts[key].DisplayName)\n\t\t}\n\n\t\tcredentialsCopy := make(map[string]string, len(credentials))\n\t\tfor k, v := range credentials {\n\t\t\tcredentialsCopy[k] = v\n\t\t}\n\n\t\tcmd.ui.Say(T(\"Authenticating...\"))\n\t\terr = cmd.authenticator.Authenticate(credentialsCopy)\n\n\t\tif err == nil {\n\t\t\tcmd.ui.Ok()\n\t\t\tcmd.ui.Say(\"\")\n\t\t\tbreak\n\t\t}\n\n\t\tcmd.ui.Say(err.Error())\n\t}\n\n\tif err != nil {\n\t\treturn errors.New(T(\"Unable to authenticate.\"))\n\t}\n\treturn nil\n}\n\nfunc (cmd Login) setOrganization(c flags.FlagContext) (bool, error) {\n\torgName := c.String(\"o\")\n\n\tif orgName == \"\" {\n\t\torgs, err := cmd.orgRepo.ListOrgs(maxChoices)\n\t\tif err != nil {\n\t\t\treturn false, errors.New(T(\"Error finding available orgs\\n{{.APIErr}}\",\n\t\t\t\tmap[string]interface{}{\"APIErr\": err.Error()}))\n\t\t}\n\n\t\tswitch len(orgs) {\n\t\tcase 0:\n\t\t\treturn false, nil\n\t\tcase 1:\n\t\t\tcmd.targetOrganization(orgs[0])\n\t\t\treturn true, nil\n\t\tdefault:\n\t\t\torgName = cmd.promptForOrgName(orgs)\n\t\t\tif orgName == \"\" {\n\t\t\t\tcmd.ui.Say(\"\")\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t}\n\n\torg, err := cmd.orgRepo.FindByName(orgName)\n\tif err != nil {\n\t\treturn false, errors.New(T(\"Error finding org {{.OrgName}}\\n{{.Err}}\",\n\t\t\tmap[string]interface{}{\"OrgName\": terminal.EntityNameColor(orgName), \"Err\": err.Error()}))\n\t}\n\n\tcmd.targetOrganization(org)\n\treturn true, nil\n}\n\nfunc (cmd Login) promptForOrgName(orgs []models.Organization) string {\n\torgNames := []string{}\n\tfor _, org := range orgs {\n\t\torgNames = append(orgNames, org.Name)\n\t}\n\n\treturn cmd.promptForName(orgNames, T(\"Select an org (or press enter to skip):\"), \"Org\")\n}\n\nfunc (cmd Login) targetOrganization(org models.Organization) {\n\tcmd.config.SetOrganizationFields(org.OrganizationFields)\n\tcmd.ui.Say(T(\"Targeted org {{.OrgName}}\\n\",\n\t\tmap[string]interface{}{\"OrgName\": terminal.EntityNameColor(org.Name)}))\n}\n\nfunc (cmd Login) setSpace(c flags.FlagContext) error {\n\tspaceName := c.String(\"s\")\n\n\tif spaceName == \"\" {\n\t\tvar availableSpaces []models.Space\n\t\terr := cmd.spaceRepo.ListSpaces(func(space models.Space) bool {\n\t\t\tavailableSpaces = append(availableSpaces, space)\n\t\t\treturn (len(availableSpaces) < maxChoices)\n\t\t})\n\t\tif err != nil {\n\t\t\treturn errors.New(T(\"Error finding available spaces\\n{{.Err}}\",\n\t\t\t\tmap[string]interface{}{\"Err\": err.Error()}))\n\t\t}\n\n\t\tif len(availableSpaces) == 0 {\n\t\t\treturn nil\n\t\t} else if len(availableSpaces) == 1 {\n\t\t\tcmd.targetSpace(availableSpaces[0])\n\t\t\treturn nil\n\t\t} else {\n\t\t\tspaceName = cmd.promptForSpaceName(availableSpaces)\n\t\t\tif spaceName == \"\" {\n\t\t\t\tcmd.ui.Say(\"\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\tspace, err := cmd.spaceRepo.FindByName(spaceName)\n\tif err != nil {\n\t\treturn errors.New(T(\"Error finding space {{.SpaceName}}\\n{{.Err}}\",\n\t\t\tmap[string]interface{}{\"SpaceName\": terminal.EntityNameColor(spaceName), \"Err\": err.Error()}))\n\t}\n\n\tcmd.targetSpace(space)\n\treturn nil\n}\n\nfunc (cmd Login) promptForSpaceName(spaces []models.Space) string {\n\tspaceNames := []string{}\n\tfor _, space := range spaces {\n\t\tspaceNames = append(spaceNames, space.Name)\n\t}\n\n\treturn cmd.promptForName(spaceNames, T(\"Select a space (or press enter to skip):\"), \"Space\")\n}\n\nfunc (cmd Login) targetSpace(space models.Space) {\n\tcmd.config.SetSpaceFields(space.SpaceFields)\n\tcmd.ui.Say(T(\"Targeted space {{.SpaceName}}\\n\",\n\t\tmap[string]interface{}{\"SpaceName\": terminal.EntityNameColor(space.Name)}))\n}\n\nfunc (cmd Login) promptForName(names []string, listPrompt, itemPrompt string) string {\n\tnameIndex := 0\n\tvar nameString string\n\tfor nameIndex < 1 || nameIndex > len(names) {\n\t\tvar err error\n\n\t\t\/\/ list header\n\t\tcmd.ui.Say(listPrompt)\n\n\t\t\/\/ only display list if it is shorter than maxChoices\n\t\tif len(names) < maxChoices {\n\t\t\tfor i, name := range names {\n\t\t\t\tcmd.ui.Say(\"%d. %s\", i+1, name)\n\t\t\t}\n\t\t} else {\n\t\t\tcmd.ui.Say(T(\"There are too many options to display, please type in the name.\"))\n\t\t}\n\n\t\tnameString = cmd.ui.Ask(itemPrompt)\n\t\tif nameString == \"\" {\n\t\t\treturn \"\"\n\t\t}\n\n\t\tnameIndex, err = strconv.Atoi(nameString)\n\n\t\tif err != nil {\n\t\t\tnameIndex = 1\n\t\t\treturn nameString\n\t\t}\n\t}\n\n\treturn names[nameIndex-1]\n}\n<|endoftext|>"} {"text":"<commit_before>package terminal\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n)\n\n\/\/ PrintableTable is an implementation of the Table interface. It\n\/\/ remembers the headers, the added rows, the column widths, and a\n\/\/ number of other things.\ntype Table struct {\n\tui UI\n\theaders []string\n\theaderPrinted bool\n\tcolumnWidth []int\n\trowHeight []int\n\trows [][]string\n\tcolSpacing string\n\ttransformer []Transformer\n}\n\n\/\/ Transformer is the type of functions used to modify the content of\n\/\/ a table cell for actual display. For multi-line content of a cell\n\/\/ the transformation is applied to each individual line.\ntype Transformer func(s string) string\n\n\/\/ NewTable is the constructor function creating a new printable table\n\/\/ from a list of headers. The table is also connected to a UI, which\n\/\/ is where it will print itself to on demand.\nfunc NewTable(headers []string) *Table {\n\tpt := &Table{\n\t\theaders: headers,\n\t\tcolumnWidth: make([]int, len(headers)),\n\t\tcolSpacing: \" \",\n\t\ttransformer: make([]Transformer, len(headers)),\n\t}\n\t\/\/ Standard colorization, column 0 is auto-highlighted as some\n\t\/\/ name. Everything else has no transformation (== identity\n\t\/\/ transform)\n\tfor i := range pt.transformer {\n\t\tpt.transformer[i] = nop\n\t}\n\tif 0 < len(headers) {\n\t\tpt.transformer[0] = TableContentHeaderColor\n\t}\n\treturn pt\n}\n\n\/\/ NoHeaders disables the printing of the header row for the specified\n\/\/ table.\nfunc (t *Table) NoHeaders() {\n\t\/\/ Fake the Print() code into the belief that the headers have\n\t\/\/ been printed already.\n\tt.headerPrinted = true\n}\n\n\/\/ SetTransformer specifies a string transformer to apply to the\n\/\/ content of the given column in the specified table.\nfunc (t *Table) SetTransformer(columnIndex int, tr Transformer) {\n\tt.transformer[columnIndex] = tr\n}\n\n\/\/ Add extends the table by another row.\nfunc (t *Table) Add(row ...string) {\n\tt.rows = append(t.rows, row)\n}\n\n\/\/ PrintTo is the core functionality for printing the table, placing\n\/\/ the formatted table into the writer given to it as argument. The\n\/\/ exported Print() is just a wrapper around this which redirects the\n\/\/ result into CF datastructures.\nfunc (t *Table) PrintTo(result io.Writer) error {\n\tt.rowHeight = make([]int, len(t.rows)+1)\n\n\trowIndex := 0\n\tif !t.headerPrinted {\n\t\t\/\/ row transformer header row\n\t\terr := t.calculateMaxSize(transHeader, rowIndex, t.headers)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trowIndex++\n\t}\n\n\tfor _, row := range t.rows {\n\t\t\/\/ table is row transformer itself, for content rows\n\t\terr := t.calculateMaxSize(t, rowIndex, row)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trowIndex++\n\t}\n\n\trowIndex = 0\n\tif !t.headerPrinted {\n\t\terr := t.printRow(result, transHeader, rowIndex, t.headers)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tt.headerPrinted = true\n\t\trowIndex++\n\t}\n\n\tfor row := range t.rows {\n\t\terr := t.printRow(result, t, rowIndex, t.rows[row])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trowIndex++\n\t}\n\n\t\/\/ Note, printing a table clears it.\n\tt.rows = [][]string{}\n\treturn nil\n}\n\n\/\/ calculateMaxSize iterates over the collected rows of the specified\n\/\/ table, and their strings, determining the height of each row (in\n\/\/ lines), and the width of each column (in characters). The results\n\/\/ are stored in the table for use by Print.\nfunc (t *Table) calculateMaxSize(transformer rowTransformer, rowIndex int, row []string) error {\n\n\t\/\/ Iterate columns\n\tfor columnIndex := range row {\n\t\t\/\/ Truncate long row, ignore the additional fields.\n\t\tif columnIndex >= len(t.headers) {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Note that the length of the cell in characters is\n\t\t\/\/ __not__ equivalent to its width. Because it may be\n\t\t\/\/ a multi-line value. We have to split the cell into\n\t\t\/\/ lines and check the width of each such fragment.\n\t\t\/\/ The number of lines founds also goes into the row\n\t\t\/\/ height.\n\n\t\tlines := strings.Split(row[columnIndex], \"\\n\")\n\t\theight := len(lines)\n\n\t\tif t.rowHeight[rowIndex] < height {\n\t\t\tt.rowHeight[rowIndex] = height\n\t\t}\n\n\t\tfor i := range lines {\n\t\t\t\/\/ (**) See also 'printCellValue' (pCV). Here\n\t\t\t\/\/ and there we have to apply identical\n\t\t\t\/\/ transformations to the cell value to get\n\t\t\t\/\/ matching cell width information. If they do\n\t\t\t\/\/ not match then pCV may compute a cell width\n\t\t\t\/\/ larger than the max width found here, a\n\t\t\t\/\/ negative padding length from that, and\n\t\t\t\/\/ subsequently return an error. What\n\t\t\t\/\/ was further missing is trimming before\n\t\t\t\/\/ entering the user-transform. Especially\n\t\t\t\/\/ with color transforms any trailing space\n\t\t\t\/\/ going in will not be removable for print.\n\t\t\t\/\/\n\t\t\t\/\/ This happened for\n\t\t\t\/\/ https:\/\/www.pivotaltracker.com\/n\/projects\/892938\/stories\/117404629\n\n\t\t\tvalue := trim(Decolorize(transformer.Transform(columnIndex, trim(lines[i]))))\n\t\t\twidth, err := visibleSize(value)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif t.columnWidth[columnIndex] < width {\n\t\t\t\tt.columnWidth[columnIndex] = width\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ printRow is responsible for the layouting, transforming and\n\/\/ printing of the string in a single row\nfunc (t *Table) printRow(result io.Writer, transformer rowTransformer, rowIndex int, row []string) error {\n\n\theight := t.rowHeight[rowIndex]\n\n\t\/\/ Compute the index of the last column as the min number of\n\t\/\/ cells in the header and cells in the current row.\n\t\/\/ Note: math.Min seems to be for float only :(\n\tlast := len(t.headers) - 1\n\tlastr := len(row) - 1\n\tif lastr < last {\n\t\tlast = lastr\n\t}\n\n\t\/\/ Note how we always print into a line buffer before placing\n\t\/\/ the assembled line into the result. This allows us to trim\n\t\/\/ superfluous trailing whitespace from the line before making\n\t\/\/ it final.\n\n\tif height <= 1 {\n\t\t\/\/ Easy case, all cells in the row are single-line\n\t\tline := &bytes.Buffer{}\n\n\t\tfor columnIndex := range row {\n\t\t\t\/\/ Truncate long row, ignore the additional fields.\n\t\t\tif columnIndex >= len(t.headers) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\terr := t.printCellValue(line, transformer, columnIndex, last, row[columnIndex])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintf(result, \"%s\\n\", trim(string(line.Bytes())))\n\t\treturn nil\n\t}\n\n\t\/\/ We have at least one multi-line cell in this row.\n\t\/\/ Treat it a bit like a mini-table.\n\n\t\/\/ Step I. Fill the mini-table. Note how it is stored\n\t\/\/ column-major, not row-major.\n\n\t\/\/ [column][row]string\n\tsub := make([][]string, len(t.headers))\n\tfor columnIndex := range row {\n\t\t\/\/ Truncate long row, ignore the additional fields.\n\t\tif columnIndex >= len(t.headers) {\n\t\t\tbreak\n\t\t}\n\t\tsub[columnIndex] = strings.Split(row[columnIndex], \"\\n\")\n\t\t\/\/ (*) Extend the column to the full height.\n\t\tfor len(sub[columnIndex]) < height {\n\t\t\tsub[columnIndex] = append(sub[columnIndex], \"\")\n\t\t}\n\t}\n\n\t\/\/ Step II. Iterate over the rows, then columns to\n\t\/\/ collect the output. This assumes that all\n\t\/\/ the rows in sub are the same height. See\n\t\/\/ (*) above where that is made true.\n\n\tfor rowIndex := range sub[0] {\n\t\tline := &bytes.Buffer{}\n\n\t\tfor columnIndex := range sub {\n\t\t\terr := t.printCellValue(line, transformer, columnIndex, last, sub[columnIndex][rowIndex])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintf(result, \"%s\\n\", trim(string(line.Bytes())))\n\t}\n\treturn nil\n}\n\n\/\/ printCellValue pads the specified string to the width of the given\n\/\/ column, adds the spacing bewtween columns, and returns the result.\nfunc (t *Table) printCellValue(result io.Writer, transformer rowTransformer, col, last int, value string) error {\n\tvalue = trim(transformer.Transform(col, trim(value)))\n\tfmt.Fprint(result, value)\n\n\t\/\/ Pad all columns, but the last in this row (with the size of\n\t\/\/ the header row limiting this). This ensures that most of\n\t\/\/ the irrelevant spacing is not printed. At the moment\n\t\/\/ irrelevant spacing can only occur when printing a row with\n\t\/\/ multi-line cells, introducing a physical short line for a\n\t\/\/ long logical row. Getting rid of that requires fixing in\n\t\/\/ printRow.\n\t\/\/\n\t\/\/ Note how the inter-column spacing is also irrelevant for\n\t\/\/ that last column.\n\n\tif col < last {\n\t\t\/\/ (**) See also 'calculateMaxSize' (cMS). Here and\n\t\t\/\/ there we have to apply identical transformations to\n\t\t\/\/ the cell value to get matching cell width\n\t\t\/\/ information. If they do not match then we may here\n\t\t\/\/ compute a cell width larger than the max width\n\t\t\/\/ found by cMS, derive a negative padding length from\n\t\t\/\/ that, and subsequently return an error. What was\n\t\t\/\/ further missing is trimming before entering the\n\t\t\/\/ user-transform. Especially with color transforms\n\t\t\/\/ any trailing space going in will not be removable\n\t\t\/\/ for print.\n\t\t\/\/\n\t\t\/\/ This happened for\n\t\t\/\/ https:\/\/www.pivotaltracker.com\/n\/projects\/892938\/stories\/117404629\n\n\t\tdecolorizedLength, err := visibleSize(trim(Decolorize(value)))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpadlen := t.columnWidth[col] - decolorizedLength\n\t\tpadding := strings.Repeat(\" \", padlen)\n\t\tfmt.Fprint(result, padding)\n\t\tfmt.Fprint(result, t.colSpacing)\n\t}\n\treturn nil\n}\n\n\/\/ rowTransformer is an interface behind which we can specify how to\n\/\/ transform the strings of an entire row on a column-by-column basis.\ntype rowTransformer interface {\n\tTransform(column int, s string) string\n}\n\n\/\/ transformHeader is an implementation of rowTransformer which\n\/\/ highlights all columns as a Header.\ntype transformHeader struct{}\n\n\/\/ transHeader holds a package-global transformHeader to prevent us\n\/\/ from continuously allocating a literal of the type whenever we\n\/\/ print a header row. Instead all tables use this value.\nvar transHeader = &transformHeader{}\n\n\/\/ Transform performs the Header highlighting for transformHeader\nfunc (th *transformHeader) Transform(column int, s string) string {\n\treturn HeaderColor(s)\n}\n\n\/\/ Transform makes a PrintableTable an implementation of\n\/\/ rowTransformer. It performs the per-column transformation for table\n\/\/ content, as specified during construction and\/or overridden by the\n\/\/ user of the table, see SetTransformer.\nfunc (t *Table) Transform(column int, s string) string {\n\treturn t.transformer[column](s)\n}\n\n\/\/ nop is the identity transformation which does not transform the\n\/\/ string at all.\nfunc nop(s string) string {\n\treturn s\n}\n\n\/\/ trim is a helper to remove trailing whitespace from a string.\nfunc trim(s string) string {\n\treturn strings.TrimRight(s, \" \\t\")\n}\n\n\/\/ visibleSize returns the number of columns the string will cover\n\/\/ when displayed in the terminal. This is the number of runes,\n\/\/ i.e. characters, not the number of bytes it consists of.\nfunc visibleSize(s string) (int, error) {\n\t\/\/ This code re-implements the basic functionality of\n\t\/\/ RuneCountInString to account for special cases. Namely\n\t\/\/ UTF-8 characters taking up 3 bytes (**) appear as double-width.\n\t\/\/\n\t\/\/ (**) I wonder if that is the set of characters outside of\n\t\/\/ the BMP <=> the set of characters requiring surrogates (2\n\t\/\/ slots) when encoded in UCS-2.\n\n\tr := strings.NewReader(s)\n\n\tvar size int\n\tfor range s {\n\t\t_, runeSize, err := r.ReadRune()\n\t\tif err != nil {\n\t\t\treturn -1, fmt.Errorf(\"error when calculating visible size of: %s\", s)\n\t\t}\n\n\t\tif runeSize == 3 {\n\t\t\tsize += 2 \/\/ Kanji and Katakana characters appear as double-width\n\t\t} else {\n\t\t\tsize++\n\t\t}\n\t}\n\n\treturn size, nil\n}\n<commit_msg>Improves godoc for Table.PrintTo<commit_after>package terminal\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n)\n\n\/\/ PrintableTable is an implementation of the Table interface. It\n\/\/ remembers the headers, the added rows, the column widths, and a\n\/\/ number of other things.\ntype Table struct {\n\tui UI\n\theaders []string\n\theaderPrinted bool\n\tcolumnWidth []int\n\trowHeight []int\n\trows [][]string\n\tcolSpacing string\n\ttransformer []Transformer\n}\n\n\/\/ Transformer is the type of functions used to modify the content of\n\/\/ a table cell for actual display. For multi-line content of a cell\n\/\/ the transformation is applied to each individual line.\ntype Transformer func(s string) string\n\n\/\/ NewTable is the constructor function creating a new printable table\n\/\/ from a list of headers. The table is also connected to a UI, which\n\/\/ is where it will print itself to on demand.\nfunc NewTable(headers []string) *Table {\n\tpt := &Table{\n\t\theaders: headers,\n\t\tcolumnWidth: make([]int, len(headers)),\n\t\tcolSpacing: \" \",\n\t\ttransformer: make([]Transformer, len(headers)),\n\t}\n\t\/\/ Standard colorization, column 0 is auto-highlighted as some\n\t\/\/ name. Everything else has no transformation (== identity\n\t\/\/ transform)\n\tfor i := range pt.transformer {\n\t\tpt.transformer[i] = nop\n\t}\n\tif 0 < len(headers) {\n\t\tpt.transformer[0] = TableContentHeaderColor\n\t}\n\treturn pt\n}\n\n\/\/ NoHeaders disables the printing of the header row for the specified\n\/\/ table.\nfunc (t *Table) NoHeaders() {\n\t\/\/ Fake the Print() code into the belief that the headers have\n\t\/\/ been printed already.\n\tt.headerPrinted = true\n}\n\n\/\/ SetTransformer specifies a string transformer to apply to the\n\/\/ content of the given column in the specified table.\nfunc (t *Table) SetTransformer(columnIndex int, tr Transformer) {\n\tt.transformer[columnIndex] = tr\n}\n\n\/\/ Add extends the table by another row.\nfunc (t *Table) Add(row ...string) {\n\tt.rows = append(t.rows, row)\n}\n\n\/\/ PrintTo is the core functionality for printing the table, placing\n\/\/ the formatted table into the writer given to it as argument. The\n\/\/ exported Print() is just a wrapper around this which redirects the\n\/\/ result into CF data structures.\n\/\/ Once a table has been printed onto a Writer, it cannot be printed\n\/\/ again.\nfunc (t *Table) PrintTo(result io.Writer) error {\n\tt.rowHeight = make([]int, len(t.rows)+1)\n\n\trowIndex := 0\n\tif !t.headerPrinted {\n\t\t\/\/ row transformer header row\n\t\terr := t.calculateMaxSize(transHeader, rowIndex, t.headers)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trowIndex++\n\t}\n\n\tfor _, row := range t.rows {\n\t\t\/\/ table is row transformer itself, for content rows\n\t\terr := t.calculateMaxSize(t, rowIndex, row)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trowIndex++\n\t}\n\n\trowIndex = 0\n\tif !t.headerPrinted {\n\t\terr := t.printRow(result, transHeader, rowIndex, t.headers)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tt.headerPrinted = true\n\t\trowIndex++\n\t}\n\n\tfor row := range t.rows {\n\t\terr := t.printRow(result, t, rowIndex, t.rows[row])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trowIndex++\n\t}\n\n\tt.rows = [][]string{}\n\treturn nil\n}\n\n\/\/ calculateMaxSize iterates over the collected rows of the specified\n\/\/ table, and their strings, determining the height of each row (in\n\/\/ lines), and the width of each column (in characters). The results\n\/\/ are stored in the table for use by Print.\nfunc (t *Table) calculateMaxSize(transformer rowTransformer, rowIndex int, row []string) error {\n\n\t\/\/ Iterate columns\n\tfor columnIndex := range row {\n\t\t\/\/ Truncate long row, ignore the additional fields.\n\t\tif columnIndex >= len(t.headers) {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Note that the length of the cell in characters is\n\t\t\/\/ __not__ equivalent to its width. Because it may be\n\t\t\/\/ a multi-line value. We have to split the cell into\n\t\t\/\/ lines and check the width of each such fragment.\n\t\t\/\/ The number of lines founds also goes into the row\n\t\t\/\/ height.\n\n\t\tlines := strings.Split(row[columnIndex], \"\\n\")\n\t\theight := len(lines)\n\n\t\tif t.rowHeight[rowIndex] < height {\n\t\t\tt.rowHeight[rowIndex] = height\n\t\t}\n\n\t\tfor i := range lines {\n\t\t\t\/\/ (**) See also 'printCellValue' (pCV). Here\n\t\t\t\/\/ and there we have to apply identical\n\t\t\t\/\/ transformations to the cell value to get\n\t\t\t\/\/ matching cell width information. If they do\n\t\t\t\/\/ not match then pCV may compute a cell width\n\t\t\t\/\/ larger than the max width found here, a\n\t\t\t\/\/ negative padding length from that, and\n\t\t\t\/\/ subsequently return an error. What\n\t\t\t\/\/ was further missing is trimming before\n\t\t\t\/\/ entering the user-transform. Especially\n\t\t\t\/\/ with color transforms any trailing space\n\t\t\t\/\/ going in will not be removable for print.\n\t\t\t\/\/\n\t\t\t\/\/ This happened for\n\t\t\t\/\/ https:\/\/www.pivotaltracker.com\/n\/projects\/892938\/stories\/117404629\n\n\t\t\tvalue := trim(Decolorize(transformer.Transform(columnIndex, trim(lines[i]))))\n\t\t\twidth, err := visibleSize(value)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif t.columnWidth[columnIndex] < width {\n\t\t\t\tt.columnWidth[columnIndex] = width\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ printRow is responsible for the layouting, transforming and\n\/\/ printing of the string in a single row\nfunc (t *Table) printRow(result io.Writer, transformer rowTransformer, rowIndex int, row []string) error {\n\n\theight := t.rowHeight[rowIndex]\n\n\t\/\/ Compute the index of the last column as the min number of\n\t\/\/ cells in the header and cells in the current row.\n\t\/\/ Note: math.Min seems to be for float only :(\n\tlast := len(t.headers) - 1\n\tlastr := len(row) - 1\n\tif lastr < last {\n\t\tlast = lastr\n\t}\n\n\t\/\/ Note how we always print into a line buffer before placing\n\t\/\/ the assembled line into the result. This allows us to trim\n\t\/\/ superfluous trailing whitespace from the line before making\n\t\/\/ it final.\n\n\tif height <= 1 {\n\t\t\/\/ Easy case, all cells in the row are single-line\n\t\tline := &bytes.Buffer{}\n\n\t\tfor columnIndex := range row {\n\t\t\t\/\/ Truncate long row, ignore the additional fields.\n\t\t\tif columnIndex >= len(t.headers) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\terr := t.printCellValue(line, transformer, columnIndex, last, row[columnIndex])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintf(result, \"%s\\n\", trim(string(line.Bytes())))\n\t\treturn nil\n\t}\n\n\t\/\/ We have at least one multi-line cell in this row.\n\t\/\/ Treat it a bit like a mini-table.\n\n\t\/\/ Step I. Fill the mini-table. Note how it is stored\n\t\/\/ column-major, not row-major.\n\n\t\/\/ [column][row]string\n\tsub := make([][]string, len(t.headers))\n\tfor columnIndex := range row {\n\t\t\/\/ Truncate long row, ignore the additional fields.\n\t\tif columnIndex >= len(t.headers) {\n\t\t\tbreak\n\t\t}\n\t\tsub[columnIndex] = strings.Split(row[columnIndex], \"\\n\")\n\t\t\/\/ (*) Extend the column to the full height.\n\t\tfor len(sub[columnIndex]) < height {\n\t\t\tsub[columnIndex] = append(sub[columnIndex], \"\")\n\t\t}\n\t}\n\n\t\/\/ Step II. Iterate over the rows, then columns to\n\t\/\/ collect the output. This assumes that all\n\t\/\/ the rows in sub are the same height. See\n\t\/\/ (*) above where that is made true.\n\n\tfor rowIndex := range sub[0] {\n\t\tline := &bytes.Buffer{}\n\n\t\tfor columnIndex := range sub {\n\t\t\terr := t.printCellValue(line, transformer, columnIndex, last, sub[columnIndex][rowIndex])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintf(result, \"%s\\n\", trim(string(line.Bytes())))\n\t}\n\treturn nil\n}\n\n\/\/ printCellValue pads the specified string to the width of the given\n\/\/ column, adds the spacing bewtween columns, and returns the result.\nfunc (t *Table) printCellValue(result io.Writer, transformer rowTransformer, col, last int, value string) error {\n\tvalue = trim(transformer.Transform(col, trim(value)))\n\tfmt.Fprint(result, value)\n\n\t\/\/ Pad all columns, but the last in this row (with the size of\n\t\/\/ the header row limiting this). This ensures that most of\n\t\/\/ the irrelevant spacing is not printed. At the moment\n\t\/\/ irrelevant spacing can only occur when printing a row with\n\t\/\/ multi-line cells, introducing a physical short line for a\n\t\/\/ long logical row. Getting rid of that requires fixing in\n\t\/\/ printRow.\n\t\/\/\n\t\/\/ Note how the inter-column spacing is also irrelevant for\n\t\/\/ that last column.\n\n\tif col < last {\n\t\t\/\/ (**) See also 'calculateMaxSize' (cMS). Here and\n\t\t\/\/ there we have to apply identical transformations to\n\t\t\/\/ the cell value to get matching cell width\n\t\t\/\/ information. If they do not match then we may here\n\t\t\/\/ compute a cell width larger than the max width\n\t\t\/\/ found by cMS, derive a negative padding length from\n\t\t\/\/ that, and subsequently return an error. What was\n\t\t\/\/ further missing is trimming before entering the\n\t\t\/\/ user-transform. Especially with color transforms\n\t\t\/\/ any trailing space going in will not be removable\n\t\t\/\/ for print.\n\t\t\/\/\n\t\t\/\/ This happened for\n\t\t\/\/ https:\/\/www.pivotaltracker.com\/n\/projects\/892938\/stories\/117404629\n\n\t\tdecolorizedLength, err := visibleSize(trim(Decolorize(value)))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpadlen := t.columnWidth[col] - decolorizedLength\n\t\tpadding := strings.Repeat(\" \", padlen)\n\t\tfmt.Fprint(result, padding)\n\t\tfmt.Fprint(result, t.colSpacing)\n\t}\n\treturn nil\n}\n\n\/\/ rowTransformer is an interface behind which we can specify how to\n\/\/ transform the strings of an entire row on a column-by-column basis.\ntype rowTransformer interface {\n\tTransform(column int, s string) string\n}\n\n\/\/ transformHeader is an implementation of rowTransformer which\n\/\/ highlights all columns as a Header.\ntype transformHeader struct{}\n\n\/\/ transHeader holds a package-global transformHeader to prevent us\n\/\/ from continuously allocating a literal of the type whenever we\n\/\/ print a header row. Instead all tables use this value.\nvar transHeader = &transformHeader{}\n\n\/\/ Transform performs the Header highlighting for transformHeader\nfunc (th *transformHeader) Transform(column int, s string) string {\n\treturn HeaderColor(s)\n}\n\n\/\/ Transform makes a PrintableTable an implementation of\n\/\/ rowTransformer. It performs the per-column transformation for table\n\/\/ content, as specified during construction and\/or overridden by the\n\/\/ user of the table, see SetTransformer.\nfunc (t *Table) Transform(column int, s string) string {\n\treturn t.transformer[column](s)\n}\n\n\/\/ nop is the identity transformation which does not transform the\n\/\/ string at all.\nfunc nop(s string) string {\n\treturn s\n}\n\n\/\/ trim is a helper to remove trailing whitespace from a string.\nfunc trim(s string) string {\n\treturn strings.TrimRight(s, \" \\t\")\n}\n\n\/\/ visibleSize returns the number of columns the string will cover\n\/\/ when displayed in the terminal. This is the number of runes,\n\/\/ i.e. characters, not the number of bytes it consists of.\nfunc visibleSize(s string) (int, error) {\n\t\/\/ This code re-implements the basic functionality of\n\t\/\/ RuneCountInString to account for special cases. Namely\n\t\/\/ UTF-8 characters taking up 3 bytes (**) appear as double-width.\n\t\/\/\n\t\/\/ (**) I wonder if that is the set of characters outside of\n\t\/\/ the BMP <=> the set of characters requiring surrogates (2\n\t\/\/ slots) when encoded in UCS-2.\n\n\tr := strings.NewReader(s)\n\n\tvar size int\n\tfor range s {\n\t\t_, runeSize, err := r.ReadRune()\n\t\tif err != nil {\n\t\t\treturn -1, fmt.Errorf(\"error when calculating visible size of: %s\", s)\n\t\t}\n\n\t\tif runeSize == 3 {\n\t\t\tsize += 2 \/\/ Kanji and Katakana characters appear as double-width\n\t\t} else {\n\t\t\tsize++\n\t\t}\n\t}\n\n\treturn size, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package adapter\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/CenturyLinkLabs\/pmxadapter\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\/errors\"\n)\n\nconst (\n\tmetadataType = \"Kubernetes\"\n\tmetadataVersion = \"0.1\"\n)\n\nvar (\n\tDefaultExecutor Executor\n\tillegalNameCharacters = regexp.MustCompile(`[\\W_]+`)\n)\n\nfunc init() {\n\te, err := NewKubernetesExecutor(\n\t\tos.Getenv(\"KUBERNETES_MASTER\"),\n\t\tos.Getenv(\"KUBERNETES_USERNAME\"),\n\t\tos.Getenv(\"KUBERNETES_PASSWORD\"),\n\t)\n\tif err != nil {\n\t\tlog.Fatalf(\"There was a problem with your Kubernetes connection: %v\", err)\n\t}\n\n\tDefaultExecutor = e\n}\n\ntype KubernetesAdapter struct{}\n\nfunc (a KubernetesAdapter) GetServices() ([]pmxadapter.ServiceDeployment, *pmxadapter.Error) {\n\trcs, err := DefaultExecutor.GetReplicationControllers()\n\tif err != nil {\n\t\tpmxErr := pmxadapter.NewError(http.StatusInternalServerError, err.Error())\n\t\treturn []pmxadapter.ServiceDeployment{}, pmxErr\n\t}\n\n\tsds := make([]pmxadapter.ServiceDeployment, len(rcs))\n\tfor i, rc := range rcs {\n\t\tsds[i].ID = rc.ObjectMeta.Name\n\t\tsds[i].ActualState = statusFromReplicationController(rc)\n\t}\n\treturn sds, nil\n}\n\nfunc (a KubernetesAdapter) GetService(id string) (pmxadapter.ServiceDeployment, *pmxadapter.Error) {\n\trc, err := DefaultExecutor.GetReplicationController(id)\n\tif err != nil {\n\t\tif sErr, ok := err.(*errors.StatusError); ok && sErr.ErrStatus.Reason == api.StatusReasonNotFound {\n\t\t\treturn pmxadapter.ServiceDeployment{}, pmxadapter.NewError(http.StatusNotFound, err.Error())\n\t\t}\n\n\t\tpmxErr := pmxadapter.NewError(http.StatusInternalServerError, err.Error())\n\t\treturn pmxadapter.ServiceDeployment{}, pmxErr\n\t}\n\n\tsd := pmxadapter.ServiceDeployment{\n\t\tID: rc.ObjectMeta.Name,\n\t\tActualState: statusFromReplicationController(rc),\n\t}\n\treturn sd, nil\n}\n\nfunc (a KubernetesAdapter) CreateServices(services []*pmxadapter.Service) ([]pmxadapter.ServiceDeployment, *pmxadapter.Error) {\n\tdeployments := make([]pmxadapter.ServiceDeployment, len(services))\n\n\tfor i, s := range services {\n\t\tports := make([]api.Port, len(s.Ports))\n\t\tfor i, p := range s.Ports {\n\t\t\tports[i].HostPort = int(p.HostPort)\n\t\t\tports[i].ContainerPort = int(p.ContainerPort)\n\t\t\tports[i].Protocol = api.Protocol(p.Protocol)\n\t\t}\n\n\t\tenv := make([]api.EnvVar, len(s.Environment))\n\t\tfor i, e := range s.Environment {\n\t\t\tenv[i].Name = e.Variable\n\t\t\tenv[i].Value = e.Value\n\t\t}\n\n\t\tsafeName := sanitizeServiceName(s.Name)\n\n\t\trcSpec := api.ReplicationController{\n\t\t\tObjectMeta: api.ObjectMeta{\n\t\t\t\tName: safeName,\n\t\t\t},\n\t\t\tSpec: api.ReplicationControllerSpec{\n\t\t\t\tReplicas: s.Deployment.Count,\n\t\t\t\tSelector: map[string]string{\"name\": safeName},\n\t\t\t\tTemplate: &api.PodTemplateSpec{\n\t\t\t\t\tObjectMeta: api.ObjectMeta{\n\t\t\t\t\t\tLabels: map[string]string{\"name\": safeName},\n\t\t\t\t\t},\n\t\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\t\tContainers: []api.Container{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: safeName,\n\t\t\t\t\t\t\t\tImage: s.Source,\n\t\t\t\t\t\t\t\tCommand: []string{s.Command},\n\t\t\t\t\t\t\t\tPorts: ports,\n\t\t\t\t\t\t\t\tEnv: env,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\trc, err := DefaultExecutor.CreateReplicationController(rcSpec)\n\t\tif err != nil {\n\t\t\tif sErr, ok := err.(*errors.StatusError); ok && sErr.ErrStatus.Reason == api.StatusReasonAlreadyExists {\n\t\t\t\treturn nil, pmxadapter.NewError(http.StatusConflict, err.Error())\n\t\t\t}\n\t\t\treturn nil, pmxadapter.NewError(http.StatusInternalServerError, err.Error())\n\t\t}\n\n\t\tdeployments[i].ID = rc.ObjectMeta.Name\n\t\tdeployments[i].ActualState = statusFromReplicationController(rc)\n\t}\n\n\treturn deployments, nil\n}\n\nfunc (a KubernetesAdapter) DestroyService(id string) *pmxadapter.Error {\n\terr := DefaultExecutor.DeleteReplicationController(id)\n\tif err != nil {\n\t\tif sErr, ok := err.(*errors.StatusError); ok && sErr.ErrStatus.Reason == api.StatusReasonNotFound {\n\t\t\treturn pmxadapter.NewError(http.StatusNotFound, err.Error())\n\t\t}\n\n\t\treturn pmxadapter.NewError(http.StatusInternalServerError, err.Error())\n\t}\n\n\treturn nil\n}\n\nfunc (a KubernetesAdapter) GetMetadata() pmxadapter.Metadata {\n\treturn pmxadapter.Metadata{\n\t\tVersion: metadataVersion,\n\t\tType: metadataType,\n\t\tIsHealthy: DefaultExecutor.IsHealthy(),\n\t}\n}\n\nfunc sanitizeServiceName(n string) string {\n\ts := illegalNameCharacters.ReplaceAllString(n, \"-\")\n\treturn strings.ToLower(s)\n}\n\nfunc statusFromReplicationController(rc api.ReplicationController) string {\n\tdesired := rc.Spec.Replicas\n\tactual := rc.Status.Replicas\n\n\tif actual < desired {\n\t\treturn \"pending\"\n\t} else if desired == actual {\n\t\treturn \"running\"\n\t}\n\treturn \"unknown\"\n}\n<commit_msg>Break up enormous method.<commit_after>package adapter\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/CenturyLinkLabs\/pmxadapter\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\/errors\"\n)\n\nconst (\n\tmetadataType = \"Kubernetes\"\n\tmetadataVersion = \"0.1\"\n)\n\nvar (\n\tDefaultExecutor Executor\n\tillegalNameCharacters = regexp.MustCompile(`[\\W_]+`)\n)\n\nfunc init() {\n\te, err := NewKubernetesExecutor(\n\t\tos.Getenv(\"KUBERNETES_MASTER\"),\n\t\tos.Getenv(\"KUBERNETES_USERNAME\"),\n\t\tos.Getenv(\"KUBERNETES_PASSWORD\"),\n\t)\n\tif err != nil {\n\t\tlog.Fatalf(\"There was a problem with your Kubernetes connection: %v\", err)\n\t}\n\n\tDefaultExecutor = e\n}\n\ntype KubernetesAdapter struct{}\n\nfunc (a KubernetesAdapter) GetServices() ([]pmxadapter.ServiceDeployment, *pmxadapter.Error) {\n\trcs, err := DefaultExecutor.GetReplicationControllers()\n\tif err != nil {\n\t\tpmxErr := pmxadapter.NewError(http.StatusInternalServerError, err.Error())\n\t\treturn []pmxadapter.ServiceDeployment{}, pmxErr\n\t}\n\n\tsds := make([]pmxadapter.ServiceDeployment, len(rcs))\n\tfor i, rc := range rcs {\n\t\tsds[i].ID = rc.ObjectMeta.Name\n\t\tsds[i].ActualState = statusFromReplicationController(rc)\n\t}\n\treturn sds, nil\n}\n\nfunc (a KubernetesAdapter) GetService(id string) (pmxadapter.ServiceDeployment, *pmxadapter.Error) {\n\trc, err := DefaultExecutor.GetReplicationController(id)\n\tif err != nil {\n\t\tif sErr, ok := err.(*errors.StatusError); ok && sErr.ErrStatus.Reason == api.StatusReasonNotFound {\n\t\t\treturn pmxadapter.ServiceDeployment{}, pmxadapter.NewError(http.StatusNotFound, err.Error())\n\t\t}\n\n\t\tpmxErr := pmxadapter.NewError(http.StatusInternalServerError, err.Error())\n\t\treturn pmxadapter.ServiceDeployment{}, pmxErr\n\t}\n\n\tsd := pmxadapter.ServiceDeployment{\n\t\tID: rc.ObjectMeta.Name,\n\t\tActualState: statusFromReplicationController(rc),\n\t}\n\treturn sd, nil\n}\n\nfunc (a KubernetesAdapter) CreateServices(services []*pmxadapter.Service) ([]pmxadapter.ServiceDeployment, *pmxadapter.Error) {\n\tdeployments := make([]pmxadapter.ServiceDeployment, len(services))\n\n\tfor i, s := range services {\n\t\trcSpec := replicationControllerSpecFromService(*s)\n\t\trc, err := DefaultExecutor.CreateReplicationController(rcSpec)\n\t\tif err != nil {\n\t\t\tif sErr, ok := err.(*errors.StatusError); ok && sErr.ErrStatus.Reason == api.StatusReasonAlreadyExists {\n\t\t\t\treturn nil, pmxadapter.NewError(http.StatusConflict, err.Error())\n\t\t\t}\n\t\t\treturn nil, pmxadapter.NewError(http.StatusInternalServerError, err.Error())\n\t\t}\n\n\t\tdeployments[i].ID = rc.ObjectMeta.Name\n\t\tdeployments[i].ActualState = statusFromReplicationController(rc)\n\t}\n\n\treturn deployments, nil\n}\n\nfunc (a KubernetesAdapter) DestroyService(id string) *pmxadapter.Error {\n\terr := DefaultExecutor.DeleteReplicationController(id)\n\tif err != nil {\n\t\tif sErr, ok := err.(*errors.StatusError); ok && sErr.ErrStatus.Reason == api.StatusReasonNotFound {\n\t\t\treturn pmxadapter.NewError(http.StatusNotFound, err.Error())\n\t\t}\n\n\t\treturn pmxadapter.NewError(http.StatusInternalServerError, err.Error())\n\t}\n\n\treturn nil\n}\n\nfunc (a KubernetesAdapter) GetMetadata() pmxadapter.Metadata {\n\treturn pmxadapter.Metadata{\n\t\tVersion: metadataVersion,\n\t\tType: metadataType,\n\t\tIsHealthy: DefaultExecutor.IsHealthy(),\n\t}\n}\n\nfunc sanitizeServiceName(n string) string {\n\ts := illegalNameCharacters.ReplaceAllString(n, \"-\")\n\treturn strings.ToLower(s)\n}\n\nfunc statusFromReplicationController(rc api.ReplicationController) string {\n\tdesired := rc.Spec.Replicas\n\tactual := rc.Status.Replicas\n\n\tif actual < desired {\n\t\treturn \"pending\"\n\t} else if desired == actual {\n\t\treturn \"running\"\n\t}\n\treturn \"unknown\"\n}\n\nfunc replicationControllerSpecFromService(s pmxadapter.Service) api.ReplicationController {\n\tports := make([]api.Port, len(s.Ports))\n\tfor i, p := range s.Ports {\n\t\tports[i].HostPort = int(p.HostPort)\n\t\tports[i].ContainerPort = int(p.ContainerPort)\n\t\tports[i].Protocol = api.Protocol(p.Protocol)\n\t}\n\n\tenv := make([]api.EnvVar, len(s.Environment))\n\tfor i, e := range s.Environment {\n\t\tenv[i].Name = e.Variable\n\t\tenv[i].Value = e.Value\n\t}\n\n\tsafeName := sanitizeServiceName(s.Name)\n\n\treturn api.ReplicationController{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: safeName,\n\t\t},\n\t\tSpec: api.ReplicationControllerSpec{\n\t\t\tReplicas: s.Deployment.Count,\n\t\t\tSelector: map[string]string{\"name\": safeName},\n\t\t\tTemplate: &api.PodTemplateSpec{\n\t\t\t\tObjectMeta: api.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\"name\": safeName},\n\t\t\t\t},\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tContainers: []api.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: safeName,\n\t\t\t\t\t\t\tImage: s.Source,\n\t\t\t\t\t\t\tCommand: []string{s.Command},\n\t\t\t\t\t\t\tPorts: ports,\n\t\t\t\t\t\t\tEnv: env,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/dotcloud\/docker\"\n\t\"github.com\/dotcloud\/docker\/rcli\"\n\t\"github.com\/dotcloud\/docker\/term\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n)\n\nvar (\n\tGIT_COMMIT string\n\tNO_MEMORY_LIMIT string\n)\n\nfunc main() {\n\tif docker.SelfPath() == \"\/sbin\/init\" {\n\t\t\/\/ Running in init mode\n\t\tdocker.SysInit()\n\t\treturn\n\t}\n\t\/\/ FIXME: Switch d and D ? (to be more sshd like)\n\tflDaemon := flag.Bool(\"d\", false, \"Daemon mode\")\n\tflDebug := flag.Bool(\"D\", false, \"Debug mode\")\n\tbridgeName := flag.String(\"b\", \"\", \"Attach containers to a pre-existing network bridge\")\n\tpidfile := flag.String(\"p\", \"\/var\/run\/docker.pid\", \"File containing process PID\")\n\tflag.Parse()\n\tif *bridgeName != \"\" {\n\t\tdocker.NetworkBridgeIface = *bridgeName\n\t} else {\n\t\tdocker.NetworkBridgeIface = docker.DefaultNetworkBridge\n\t}\n\tif *flDebug {\n\t\tos.Setenv(\"DEBUG\", \"1\")\n\t}\n\tdocker.GIT_COMMIT = GIT_COMMIT\n\tdocker.NO_MEMORY_LIMIT = NO_MEMORY_LIMIT == \"1\"\n\tif *flDaemon {\n\t\tif flag.NArg() != 0 {\n\t\t\tflag.Usage()\n\t\t\treturn\n\t\t}\n\t\tif NO_MEMORY_LIMIT == \"1\" {\n\t\t\tlog.Printf(\"WARNING: This version of docker has been compiled without memory limit support.\")\n\t\t}\n\t\tif err := daemon(*pidfile); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t} else {\n\t\tif err := runCommand(flag.Args()); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc createPidFile(pidfile string) error {\n\tif _, err := os.Stat(pidfile); err == nil {\n\t\treturn fmt.Errorf(\"pid file found, ensure docker is not running or delete %s\", pidfile)\n\t}\n\n\tfile, err := os.Create(pidfile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer file.Close()\n\n\t_, err = fmt.Fprintf(file, \"%d\", os.Getpid())\n\treturn err\n}\n\nfunc removePidFile(pidfile string) {\n\tif err := os.Remove(pidfile); err != nil {\n\t\tlog.Printf(\"Error removing %s: %s\", pidfile, err)\n\t}\n}\n\nfunc daemon(pidfile string) error {\n\tif err := createPidFile(pidfile); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer removePidFile(pidfile)\n\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt, os.Kill, os.Signal(syscall.SIGTERM))\n\tgo func() {\n\t\tsig := <-c\n\t\tlog.Printf(\"Received signal '%v', exiting\\n\", sig)\n\t\tremovePidFile(pidfile)\n\t\tos.Exit(0)\n\t}()\n\n\tservice, err := docker.NewServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn rcli.ListenAndServe(\"tcp\", \"127.0.0.1:4242\", service)\n}\n\nfunc runCommand(args []string) error {\n\t\/\/ FIXME: we want to use unix sockets here, but net.UnixConn doesn't expose\n\t\/\/ CloseWrite(), which we need to cleanly signal that stdin is closed without\n\t\/\/ closing the connection.\n\t\/\/ See http:\/\/code.google.com\/p\/go\/issues\/detail?id=3345\n\tif conn, err := rcli.Call(\"tcp\", \"127.0.0.1:4242\", args...); err == nil {\n\t\toptions := conn.GetOptions()\n\t\tif options.RawTerminal &&\n\t\t\tterm.IsTerminal(int(os.Stdin.Fd())) &&\n\t\t\tos.Getenv(\"NORAW\") == \"\" {\n\t\t\tif oldState, err := rcli.SetRawTerminal(); err != nil {\n\t\t\t\treturn err\n\t\t\t} else {\n\t\t\t\tdefer rcli.RestoreTerminal(oldState)\n\t\t\t}\n\t\t}\n\t\treceiveStdout := docker.Go(func() error {\n\t\t\t_, err := io.Copy(os.Stdout, conn)\n\t\t\treturn err\n\t\t})\n\t\tsendStdin := docker.Go(func() error {\n\t\t\t_, err := io.Copy(conn, os.Stdin)\n\t\t\tif err := conn.CloseWrite(); err != nil {\n\t\t\t\tlog.Printf(\"Couldn't send EOF: \" + err.Error())\n\t\t\t}\n\t\t\treturn err\n\t\t})\n\t\tif err := <-receiveStdout; err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !term.IsTerminal(int(os.Stdin.Fd())) {\n\t\t\tif err := <-sendStdin; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tservice, err := docker.NewServer()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdockerConn := rcli.NewDockerLocalConn(os.Stdout)\n\t\tdefer dockerConn.Close()\n\t\tif err := rcli.LocalCall(service, os.Stdin, dockerConn, args...); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Disabled standalone mode (fixes #364)<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/dotcloud\/docker\"\n\t\"github.com\/dotcloud\/docker\/rcli\"\n\t\"github.com\/dotcloud\/docker\/term\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n)\n\nvar (\n\tGIT_COMMIT string\n\tNO_MEMORY_LIMIT string\n)\n\nfunc main() {\n\tif docker.SelfPath() == \"\/sbin\/init\" {\n\t\t\/\/ Running in init mode\n\t\tdocker.SysInit()\n\t\treturn\n\t}\n\t\/\/ FIXME: Switch d and D ? (to be more sshd like)\n\tflDaemon := flag.Bool(\"d\", false, \"Daemon mode\")\n\tflDebug := flag.Bool(\"D\", false, \"Debug mode\")\n\tbridgeName := flag.String(\"b\", \"\", \"Attach containers to a pre-existing network bridge\")\n\tpidfile := flag.String(\"p\", \"\/var\/run\/docker.pid\", \"File containing process PID\")\n\tflag.Parse()\n\tif *bridgeName != \"\" {\n\t\tdocker.NetworkBridgeIface = *bridgeName\n\t} else {\n\t\tdocker.NetworkBridgeIface = docker.DefaultNetworkBridge\n\t}\n\tif *flDebug {\n\t\tos.Setenv(\"DEBUG\", \"1\")\n\t}\n\tdocker.GIT_COMMIT = GIT_COMMIT\n\tdocker.NO_MEMORY_LIMIT = NO_MEMORY_LIMIT == \"1\"\n\tif *flDaemon {\n\t\tif flag.NArg() != 0 {\n\t\t\tflag.Usage()\n\t\t\treturn\n\t\t}\n\t\tif NO_MEMORY_LIMIT == \"1\" {\n\t\t\tlog.Printf(\"WARNING: This version of docker has been compiled without memory limit support.\")\n\t\t}\n\t\tif err := daemon(*pidfile); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t} else {\n\t\tif err := runCommand(flag.Args()); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc createPidFile(pidfile string) error {\n\tif _, err := os.Stat(pidfile); err == nil {\n\t\treturn fmt.Errorf(\"pid file found, ensure docker is not running or delete %s\", pidfile)\n\t}\n\n\tfile, err := os.Create(pidfile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer file.Close()\n\n\t_, err = fmt.Fprintf(file, \"%d\", os.Getpid())\n\treturn err\n}\n\nfunc removePidFile(pidfile string) {\n\tif err := os.Remove(pidfile); err != nil {\n\t\tlog.Printf(\"Error removing %s: %s\", pidfile, err)\n\t}\n}\n\nfunc daemon(pidfile string) error {\n\tif err := createPidFile(pidfile); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer removePidFile(pidfile)\n\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt, os.Kill, os.Signal(syscall.SIGTERM))\n\tgo func() {\n\t\tsig := <-c\n\t\tlog.Printf(\"Received signal '%v', exiting\\n\", sig)\n\t\tremovePidFile(pidfile)\n\t\tos.Exit(0)\n\t}()\n\n\tservice, err := docker.NewServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn rcli.ListenAndServe(\"tcp\", \"127.0.0.1:4242\", service)\n}\n\nfunc runCommand(args []string) error {\n\t\/\/ FIXME: we want to use unix sockets here, but net.UnixConn doesn't expose\n\t\/\/ CloseWrite(), which we need to cleanly signal that stdin is closed without\n\t\/\/ closing the connection.\n\t\/\/ See http:\/\/code.google.com\/p\/go\/issues\/detail?id=3345\n\tif conn, err := rcli.Call(\"tcp\", \"127.0.0.1:4242\", args...); err == nil {\n\t\toptions := conn.GetOptions()\n\t\tif options.RawTerminal &&\n\t\t\tterm.IsTerminal(int(os.Stdin.Fd())) &&\n\t\t\tos.Getenv(\"NORAW\") == \"\" {\n\t\t\tif oldState, err := rcli.SetRawTerminal(); err != nil {\n\t\t\t\treturn err\n\t\t\t} else {\n\t\t\t\tdefer rcli.RestoreTerminal(oldState)\n\t\t\t}\n\t\t}\n\t\treceiveStdout := docker.Go(func() error {\n\t\t\t_, err := io.Copy(os.Stdout, conn)\n\t\t\treturn err\n\t\t})\n\t\tsendStdin := docker.Go(func() error {\n\t\t\t_, err := io.Copy(conn, os.Stdin)\n\t\t\tif err := conn.CloseWrite(); err != nil {\n\t\t\t\tlog.Printf(\"Couldn't send EOF: \" + err.Error())\n\t\t\t}\n\t\t\treturn err\n\t\t})\n\t\tif err := <-receiveStdout; err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !term.IsTerminal(int(os.Stdin.Fd())) {\n\t\t\tif err := <-sendStdin; err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"Can't connect to docker daemon. Is 'docker -d' running on this host?\")\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package docker provides a layer on top of Docker's API via Kite handlers.\npackage docker\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/koding\/kite\"\n)\n\n\/\/ Docker defines the main configuration. One instance is running as one Docker\n\/\/ client, so multiple instances of a Docker struct can connect to multiple\n\/\/ Docker Servers. After creating a new Docker struct, it'll be able to manage\n\/\/ Docker containers. A usual working lifecyle is:\n\/\/ 1. Build a Docker image\n\/\/ 2. Create a new Docker container from that image\n\/\/ 3. Start this container\n\/\/ 4. Connect and open a terminal instance to it (optional)\n\/\/ 5. Stop the container\n\/\/ 6. Remove the container\n\/\/ 7. Destroy the image\ntype Docker struct{}\n\n\/\/ TODO:\n\/\/ 1. Docker Deamon needs to be run in TCP Mode. If UNIX sockets are used we\n\/\/ need to setup the client so it has access to the `docker` group which\n\/\/ maintains the the socket. So initiallty TCP is a good start.\n\/\/ 2. TCP mode is enabled by adding \"-H tcp:\/\/bind-ip:port\" to\n\/\/ \/etc\/default\/docker or DOCKER_OPTS\n\/\/ 3. But only authenticated Client should access it over TCP, so we need to\n\/\/ generate TLS cert keys and let both the Deamon and Client use it.\n\/\/ 4. The boot2docker guys are using this package:\n\/\/ https:\/\/github.com\/SvenDowideit\/generate_cert for it which we can also use.\n\/\/ 5. Here is also some information on how to start the Docker Deamon securely\n\/\/ https:\/\/docs.docker.com\/articles\/https\/\nfunc New() *Docker {\n\treturn &Docker{}\n}\n\n\/\/ Build builds a new container image from a public Docker path or froma a\n\/\/ given Dockerfile\nfunc (d *Docker) Build(r *kite.Request) (interface{}, error) {\n\treturn nil, errors.New(\"not implemented yet.\")\n}\n\n\/\/ Create creates a new container\nfunc (d *Docker) Create(r *kite.Request) (interface{}, error) {\n\treturn nil, errors.New(\"not implemented yet.\")\n}\n\n\/\/ Connects connects to an existing Container by spawning a new process and\n\/\/ attaching to it.\nfunc (d *Docker) Connect(r *kite.Request) (interface{}, error) {\n\treturn nil, errors.New(\"not implemented yet.\")\n}\n\n\/\/ Stop stops a running container\nfunc (d *Docker) Stop(r *kite.Request) (interface{}, error) {\n\treturn nil, errors.New(\"not implemented yet.\")\n}\n\n\/\/ Start starts a stopped container\nfunc (d *Docker) Start(r *kite.Request) (interface{}, error) {\n\treturn nil, errors.New(\"not implemented yet.\")\n}\n\n\/\/ Kill kills and delete a container\nfunc (d *Docker) Kill(r *kite.Request) (interface{}, error) {\n\treturn nil, errors.New(\"not implemented yet.\")\n}\n\n\/\/ Destroy destroys and removes an image.\nfunc (d *Docker) Destroy(r *kite.Request) (interface{}, error) {\n\treturn nil, errors.New(\"not implemented yet.\")\n}\n<commit_msg>docker: add consructor and include handlers into Klient<commit_after>\/\/ Package docker provides a layer on top of Docker's API via Kite handlers.\npackage docker\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/koding\/kite\"\n\t\"github.com\/samalba\/dockerclient\"\n)\n\n\/\/ Docker defines the main configuration. One instance is running as one Docker\n\/\/ client, so multiple instances of a Docker struct can connect to multiple\n\/\/ Docker Servers. After creating a new Docker struct, it'll be able to manage\n\/\/ Docker containers. A usual working lifecyle is:\n\/\/ 1. Build a Docker image\n\/\/ 2. Create a new Docker container from that image\n\/\/ 3. Start this container\n\/\/ 4. Connect and open a terminal instance to it (optional)\n\/\/ 5. Stop the container\n\/\/ 6. Remove the container\n\/\/ 7. Destroy the image\ntype Docker struct {\n\tclient *dockerclient.DockerClient\n}\n\n\/\/ New connects to a Docker Deamon specified with the given URL. It can be a\n\/\/ TCP address or a UNIX socket.\nfunc New(url string) *Docker {\n\t\/\/ the error is returned only when the passed URL is not parsable via\n\t\/\/ url.Parse, so we can safely neglect it\n\tclient, _ := dockerclient.NewDockerClient(url, nil)\n\treturn &Docker{\n\t\tclient: client,\n\t}\n}\n\n\/\/ Build builds a new container image from a public Docker path or froma a\n\/\/ given Dockerfile\nfunc (d *Docker) Build(r *kite.Request) (interface{}, error) {\n\treturn nil, errors.New(\"not implemented yet.\")\n}\n\n\/\/ Create creates a new container\nfunc (d *Docker) Create(r *kite.Request) (interface{}, error) {\n\treturn nil, errors.New(\"not implemented yet.\")\n}\n\n\/\/ Connects connects to an existing Container by spawning a new process and\n\/\/ attaching to it.\nfunc (d *Docker) Connect(r *kite.Request) (interface{}, error) {\n\treturn nil, errors.New(\"not implemented yet.\")\n}\n\n\/\/ Stop stops a running container\nfunc (d *Docker) Stop(r *kite.Request) (interface{}, error) {\n\treturn nil, errors.New(\"not implemented yet.\")\n}\n\n\/\/ Start starts a stopped container\nfunc (d *Docker) Start(r *kite.Request) (interface{}, error) {\n\treturn nil, errors.New(\"not implemented yet.\")\n}\n\n\/\/ Kill kills and delete a container\nfunc (d *Docker) Kill(r *kite.Request) (interface{}, error) {\n\treturn nil, errors.New(\"not implemented yet.\")\n}\n\n\/\/ Destroy destroys and removes an image.\nfunc (d *Docker) Destroy(r *kite.Request) (interface{}, error) {\n\treturn nil, errors.New(\"not implemented yet.\")\n}\n\n\/\/ List lists all available containers\nfunc (d *Docker) List(r *kite.Request) (interface{}, error) {\n\treturn nil, errors.New(\"not implemented yet.\")\n}\n<|endoftext|>"} {"text":"<commit_before>package docker\n\nimport (\n\t\"encoding\/json\"\n\t\"io\"\n \"log\"\n \"os\"\n \"time\"\n \"github.com\/docker\/engine-api\/client\"\n \"github.com\/docker\/engine-api\/types\"\n \"github.com\/docker\/engine-api\/types\/filters\"\n eventtypes \"github.com\/docker\/engine-api\/types\/events\"\n \"golang.org\/x\/net\/context\"\n api \"github.com\/docking-tools\/register\/api\"\n \"github.com\/docking-tools\/register\/config\"\n \n)\n\ntype DockerRegistry struct {\n docker *client.Client\n events <-chan *io.ReadCloser\n config\t\t*config.ConfigFile\n}\n\ntype DockerServicePort struct {\n api.ServicePort\n ContainerHostname string\n\tContainerID string\n\tContainerName string\n\tcontainer *types.ContainerJSON\n}\n\nfunc assert(err error) {\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc New(config *config.ConfigFile) (*DockerRegistry, error) {\n \n \/\/ Init docker\n \n dockerHost:= config.DockerUrl\n if dockerHost == \"\" {\n \tdockerHost= os.Getenv(\"DOCKER_HOST\")\n }\n\tlog.Printf(\"Start docker client %s\", dockerHost)\n\n\t\/\/ Init client docker\n\tdefaultHeaders := map[string]string{\"User-Agent\": \"engine-api-cli-1.0\"}\n\tdocker, err := client.NewClient(dockerHost, \"v1.22\", nil, defaultHeaders)\n\n if err != nil {\n panic(err)\n }\n\n\n \n return &DockerRegistry{\n docker: docker,\n events: nil,\n config: config,\n }, nil\n}\n\nfunc (doc * DockerRegistry) Start(ep api.EventProcessor) {\n\n \/\/ check if an error occur during watch event\n closeChan := make(chan error)\n \n\tmonitorContainerEvents := func(started chan<- struct{}, c chan<-eventtypes.Message) {\n\t\tf := filters.NewArgs()\n\t\tf.Add(\"type\", \"container\")\n\t\toptions := types.EventsOptions{\n\t\t\tFilters: f,\n\t\t}\n\t\tresBody, err := doc.docker.Events(context.Background(), options)\n\t\t\/\/ Whether we successfully subscribed to events or not, we can now\n\t\t\/\/ unblock the main goroutine.\n\t\tclose(started)\n\t\tlog.Printf(\"Start listenig docker event\")\t\t\n\t\tif err != nil {\n\t\t\tcloseChan <- err\n\t\t\treturn\n\t\t}\n\t\tdefer resBody.Close()\n\n\t\t\/\/ Decode event\n\t\tdec := json.NewDecoder(resBody)\n\t\tfor {\n\t\t\tvar event eventtypes.Message\n\t\t\terr := dec.Decode(&event)\n\t\t\tif err != nil && err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tc <- event\n\t\t\n\t\t}\n\t}\n\n\tparseService := func(id string, status string) {\n\t\tcontainer, err := doc.docker.ContainerInspect(context.Background(), id)\n\t\tif err != nil {\n\t\t\tcloseChan <- err\n\t\t}\n\t\tif (status==\"\") {\n\t\t\tstatus =container.State.Status\n\t\t}\n\t\t\n\t\tservices := make([]*api.Service,0)\n\t\tservices, err = createService(doc.config, &container)\n\t\tif err != nil {\n\t\t\tcloseChan <- err\n\t\t}\n\n\t\tfor _,service := range services {\n\t\t\tgo ep(status, service , closeChan)\n\t\t}\n\t\n\t}\t\n\n\t\n\t\/\/ getContainerList simulates creation event for all previously existing\n\t\/\/ containers.\n\tgetContainerList := func() {\n\t\toptions := types.ContainerListOptions{\n\t\t\tQuiet: false,\n\t\t}\n\t\tcs, err := doc.docker.ContainerList(context.Background(), options)\n\t\tif err != nil {\n\t\t\tcloseChan <- err\n\t\t}\n\t\tfor _, container := range cs {\n\t\t\tparseService(container.ID, container.State)\n\t\t}\n\t}\n\t\n\n\teh := eventHandler{handlers: make(map[string]func(eventtypes.Message))}\n\t\teh.Handle(\"*\", func(e eventtypes.Message) {\n\t\t\tlog.Printf(\"new event %v id: %v\",e.Status,e.ID)\n\t\t\tparseService(e.ID, e.Status)\n\t\t})\n\t\n\t\n\t\n\t\/\/ start listening event\n\tstarted := make(chan struct{})\n\teventChan := make(chan eventtypes.Message)\n\tgo eh.Watch(eventChan)\n\tgo monitorContainerEvents(started, eventChan)\n\tdefer close(eventChan)\n\t<-started\n\t\n\tgetContainerList ()\n \n \n \n \n for range time.Tick(500 * time.Millisecond) {\n\t\tselect {\n\t\tcase err, ok := <-closeChan:\n\t\t\tif ok {\n\t\t\t\tif err != nil {\n\t\t\t\t\t\/\/ this is suppressing \"unexpected EOF\" in the cli when the\n\t\t\t\t\t\/\/ daemon restarts so it shutdowns cleanly\n\t\t\t\t\tif err != io.ErrUnexpectedEOF {\n\t\t\t\t\t\tcloseChan <- err\n\t\t\t\t\t\tlog.Printf(\"Error on run\",err)\t\n\t\t\t\t\t\tclose(closeChan)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\t\/\/ just skip\n\t\t}\n }\n log.Fatal(\"Docker event loop closed\")\n}\n\n\n\n\nvar Hostname string\n\nfunc init() {\n\t\/\/ It's ok for Hostname to ultimately be an empty string\n\t\/\/ An empty string will fall back to trying to make a best guess\n\tHostname, _ = os.Hostname()\n}<commit_msg>bug fix: correct dead container on start before inspect<commit_after>package docker\n\nimport (\n\t\"encoding\/json\"\n\t\"io\"\n \"log\"\n \"os\"\n \"time\"\n \"github.com\/docker\/engine-api\/client\"\n \"github.com\/docker\/engine-api\/types\"\n \"github.com\/docker\/engine-api\/types\/filters\"\n eventtypes \"github.com\/docker\/engine-api\/types\/events\"\n \"golang.org\/x\/net\/context\"\n api \"github.com\/docking-tools\/register\/api\"\n \"github.com\/docking-tools\/register\/config\"\n \n)\n\ntype DockerRegistry struct {\n docker *client.Client\n events <-chan *io.ReadCloser\n config\t\t*config.ConfigFile\n}\n\ntype DockerServicePort struct {\n api.ServicePort\n ContainerHostname string\n\tContainerID string\n\tContainerName string\n\tcontainer *types.ContainerJSON\n}\n\nfunc assert(err error) {\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc New(config *config.ConfigFile) (*DockerRegistry, error) {\n \n \/\/ Init docker\n \n dockerHost:= config.DockerUrl\n if dockerHost == \"\" {\n \tdockerHost= os.Getenv(\"DOCKER_HOST\")\n }\n\tlog.Printf(\"Start docker client %s\", dockerHost)\n\n\t\/\/ Init client docker\n\tdefaultHeaders := map[string]string{\"User-Agent\": \"engine-api-cli-1.0\"}\n\tdocker, err := client.NewClient(dockerHost, \"v1.22\", nil, defaultHeaders)\n\n if err != nil {\n panic(err)\n }\n\n\n \n return &DockerRegistry{\n docker: docker,\n events: nil,\n config: config,\n }, nil\n}\n\nfunc (doc * DockerRegistry) Start(ep api.EventProcessor) {\n\n \/\/ check if an error occur during watch event\n closeChan := make(chan error)\n \n\tmonitorContainerEvents := func(started chan<- struct{}, c chan<-eventtypes.Message) {\n\t\tf := filters.NewArgs()\n\t\tf.Add(\"type\", \"container\")\n\t\toptions := types.EventsOptions{\n\t\t\tFilters: f,\n\t\t}\n\t\tresBody, err := doc.docker.Events(context.Background(), options)\n\t\t\/\/ Whether we successfully subscribed to events or not, we can now\n\t\t\/\/ unblock the main goroutine.\n\t\tclose(started)\n\t\tlog.Printf(\"Start listenig docker event\")\t\t\n\t\tif err != nil {\n\t\t\tcloseChan <- err\n\t\t\treturn\n\t\t}\n\t\tdefer resBody.Close()\n\n\t\t\/\/ Decode event\n\t\tdec := json.NewDecoder(resBody)\n\t\tfor {\n\t\t\tvar event eventtypes.Message\n\t\t\terr := dec.Decode(&event)\n\t\t\tif err != nil && err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tc <- event\n\t\t\n\t\t}\n\t}\n\n\tparseService := func(id string, status string) {\n\t\tcontainer, err := doc.docker.ContainerInspect(context.Background(), id)\n\t\tif err != nil {\n\t\t\tcloseChan <- err\n\t\t} else {\n\t\t\tif (status==\"\") {\n\t\t\t\tstatus =container.State.Status\n\t\t\t}\n\t\t\n\t\t\tservices := make([]*api.Service,0)\n\t\t\tservices, err = createService(doc.config, &container)\n\t\t\tif err != nil {\n\t\t\t\tcloseChan <- err\n\t\t\t}\n\t\n\t\t\tfor _,service := range services {\n\t\t\t\tgo ep(status, service , closeChan)\n\t\t\t}\n\t\t}\n\t}\t\n\n\t\n\t\/\/ getContainerList simulates creation event for all previously existing\n\t\/\/ containers.\n\tgetContainerList := func() {\n\t\toptions := types.ContainerListOptions{\n\t\t\tQuiet: false,\n\t\t}\n\t\tcs, err := doc.docker.ContainerList(context.Background(), options)\n\t\tif err != nil {\n\t\t\tcloseChan <- err\n\t\t}\n\t\tfor _, container := range cs {\n\t\t\tparseService(container.ID, container.State)\n\t\t}\n\t}\n\t\n\n\teh := eventHandler{handlers: make(map[string]func(eventtypes.Message))}\n\t\teh.Handle(\"*\", func(e eventtypes.Message) {\n\t\t\tlog.Printf(\"new event %v id: %v\",e.Status,e.ID)\n\t\t\tparseService(e.ID, e.Status)\n\t\t})\n\t\n\t\n\t\n\t\/\/ start listening event\n\tstarted := make(chan struct{})\n\teventChan := make(chan eventtypes.Message)\n\tgo eh.Watch(eventChan)\n\tgo monitorContainerEvents(started, eventChan)\n\tdefer close(eventChan)\n\t<-started\n\t\n\tgetContainerList ()\n \n \n \n \n for range time.Tick(500 * time.Millisecond) {\n\t\tselect {\n\t\tcase err, ok := <-closeChan:\n\t\t\tif ok {\n\t\t\t\tif err != nil {\n\t\t\t\t\t\/\/ this is suppressing \"unexpected EOF\" in the cli when the\n\t\t\t\t\t\/\/ daemon restarts so it shutdowns cleanly\n\t\t\t\t\tif err != io.ErrUnexpectedEOF {\n\t\t\t\t\t\tcloseChan <- err\n\t\t\t\t\t\tlog.Printf(\"Error on run\",err)\t\n\t\t\t\t\t\tclose(closeChan)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\t\/\/ just skip\n\t\t}\n }\n log.Fatal(\"Docker event loop closed\")\n}\n\n\n\n\nvar Hostname string\n\nfunc init() {\n\t\/\/ It's ok for Hostname to ultimately be an empty string\n\t\/\/ An empty string will fall back to trying to make a best guess\n\tHostname, _ = os.Hostname()\n}<|endoftext|>"} {"text":"<commit_before>package logging\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\thclog \"github.com\/hashicorp\/go-hclog\"\n)\n\nconst (\n\t\/\/ logBufferSize is the size of the buffer.\n\tlogBufferSize = 64 * 1024\n\n\t\/\/ bufferFlushDuration is the duration at which we flush the buffer.\n\tbufferFlushDuration = 100 * time.Millisecond\n\n\t\/\/ lineScanLimit is the number of bytes we will attempt to scan for new\n\t\/\/ lines when approaching the end of the file to avoid a log line being\n\t\/\/ split between two files. Any single line that is greater than this limit\n\t\/\/ may be split.\n\tlineScanLimit = 32 * 1024\n\n\t\/\/ newLineDelimiter is the delimiter used for new lines.\n\tnewLineDelimiter = '\\n'\n)\n\n\/\/ FileRotator writes bytes to a rotated set of files\ntype FileRotator struct {\n\tMaxFiles int \/\/ MaxFiles is the maximum number of rotated files allowed in a path\n\tFileSize int64 \/\/ FileSize is the size a rotated file is allowed to grow\n\n\tpath string \/\/ path is the path on the file system where the rotated set of files are opened\n\tbaseFileName string \/\/ baseFileName is the base file name of the rotated files\n\tlogFileIdx int \/\/ logFileIdx is the current index of the rotated files\n\toldestLogFileIdx int \/\/ oldestLogFileIdx is the index of the oldest log file in a path\n\n\tcurrentFile *os.File \/\/ currentFile is the file that is currently getting written\n\tcurrentWr int64 \/\/ currentWr is the number of bytes written to the current file\n\tbufw *bufio.Writer\n\tbufLock sync.Mutex\n\n\tflushTicker *time.Ticker\n\tlogger hclog.Logger\n\tpurgeCh chan struct{}\n\tdoneCh chan struct{}\n\n\tclosed bool\n\tclosedLock sync.Mutex\n}\n\n\/\/ NewFileRotator returns a new file rotator\nfunc NewFileRotator(path string, baseFile string, maxFiles int,\n\tfileSize int64, logger hclog.Logger) (*FileRotator, error) {\n\tlogger = logger.Named(\"rotator\")\n\trotator := &FileRotator{\n\t\tMaxFiles: maxFiles,\n\t\tFileSize: fileSize,\n\n\t\tpath: path,\n\t\tbaseFileName: baseFile,\n\n\t\tflushTicker: time.NewTicker(bufferFlushDuration),\n\t\tlogger: logger,\n\t\tpurgeCh: make(chan struct{}, 1),\n\t\tdoneCh: make(chan struct{}, 1),\n\t}\n\n\tif err := rotator.lastFile(); err != nil {\n\t\treturn nil, err\n\t}\n\tgo rotator.purgeOldFiles()\n\tgo rotator.flushPeriodically()\n\treturn rotator, nil\n}\n\n\/\/ Write writes a byte array to a file and rotates the file if it's size becomes\n\/\/ equal to the maximum size the user has defined.\nfunc (f *FileRotator) Write(p []byte) (n int, err error) {\n\tn = 0\n\tvar forceRotate bool\n\n\tfor n < len(p) {\n\t\t\/\/ Check if we still have space in the current file, otherwise close and\n\t\t\/\/ open the next file\n\t\tif forceRotate || f.currentWr >= f.FileSize {\n\t\t\tforceRotate = false\n\t\t\tf.flushBuffer()\n\t\t\tf.currentFile.Close()\n\t\t\tif err := f.nextFile(); err != nil {\n\t\t\t\tf.logger.Error(\"error creating next file\", \"err\", err)\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t\t\/\/ Calculate the remaining size on this file and how much we have left\n\t\t\/\/ to write\n\t\tremainingSpace := f.FileSize - f.currentWr\n\t\tremainingToWrite := int64(len(p[n:]))\n\n\t\t\/\/ Check if we are near the end of the file. If we are we attempt to\n\t\t\/\/ avoid a log line being split between two files.\n\t\tvar nw int\n\t\tif (remainingSpace - lineScanLimit) < remainingToWrite {\n\t\t\t\/\/ Scan for new line and if the data up to new line fits in current\n\t\t\t\/\/ file, write to buffer\n\t\t\tidx := bytes.IndexByte(p[n:], newLineDelimiter)\n\t\t\tif idx >= 0 && (remainingSpace-int64(idx)-1) >= 0 {\n\t\t\t\t\/\/ We have space so write it to buffer\n\t\t\t\tnw, err = f.writeToBuffer(p[n : n+idx+1])\n\t\t\t} else if idx >= 0 {\n\t\t\t\t\/\/ We found a new line but don't have space so just force rotate\n\t\t\t\tforceRotate = true\n\t\t\t} else if remainingToWrite > f.FileSize || f.FileSize-lineScanLimit < 0 {\n\t\t\t\t\/\/ There is no new line remaining but there is no point in\n\t\t\t\t\/\/ rotating since the remaining data will not even fit in the\n\t\t\t\t\/\/ next file either so just fill this one up.\n\t\t\t\tli := int64(n) + remainingSpace\n\t\t\t\tif remainingSpace > remainingToWrite {\n\t\t\t\t\tli = int64(n) + remainingToWrite\n\t\t\t\t}\n\t\t\t\tnw, err = f.writeToBuffer(p[n:li])\n\t\t\t} else {\n\t\t\t\t\/\/ There is no new line in the data remaining for us to write\n\t\t\t\t\/\/ and it will fit in the next file so rotate.\n\t\t\t\tforceRotate = true\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Write all the bytes in the current file\n\t\t\tnw, err = f.writeToBuffer(p[n:])\n\t\t}\n\n\t\t\/\/ Increment the number of bytes written so far in this method\n\t\t\/\/ invocation\n\t\tn += nw\n\n\t\t\/\/ Increment the total number of bytes in the file\n\t\tf.currentWr += int64(n)\n\t\tif err != nil {\n\t\t\tf.logger.Error(\"error writing to file\", \"err\", err)\n\n\t\t\t\/\/ As bufio writer does not automatically recover in case of any\n\t\t\t\/\/ io error, we need to recover from it manually resetting the\n\t\t\t\/\/ writter.\n\t\t\tf.createOrResetBuffer()\n\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ nextFile opens the next file and purges older files if the number of rotated\n\/\/ files is larger than the maximum files configured by the user\nfunc (f *FileRotator) nextFile() error {\n\tnextFileIdx := f.logFileIdx\n\tfor {\n\t\tnextFileIdx += 1\n\t\tlogFileName := filepath.Join(f.path, fmt.Sprintf(\"%s.%d\", f.baseFileName, nextFileIdx))\n\t\tif fi, err := os.Stat(logFileName); err == nil {\n\t\t\tif fi.IsDir() || fi.Size() >= f.FileSize {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tf.logFileIdx = nextFileIdx\n\t\tif err := f.createFile(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbreak\n\t}\n\t\/\/ Purge old files if we have more files than MaxFiles\n\tf.closedLock.Lock()\n\tdefer f.closedLock.Unlock()\n\tif f.logFileIdx-f.oldestLogFileIdx >= f.MaxFiles && !f.closed {\n\t\tselect {\n\t\tcase f.purgeCh <- struct{}{}:\n\t\tdefault:\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ lastFile finds out the rotated file with the largest index in a path.\nfunc (f *FileRotator) lastFile() error {\n\tfinfos, err := ioutil.ReadDir(f.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprefix := fmt.Sprintf(\"%s.\", f.baseFileName)\n\tfor _, fi := range finfos {\n\t\tif fi.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(fi.Name(), prefix) {\n\t\t\tfileIdx := strings.TrimPrefix(fi.Name(), prefix)\n\t\t\tn, err := strconv.Atoi(fileIdx)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif n > f.logFileIdx {\n\t\t\t\tf.logFileIdx = n\n\t\t\t}\n\t\t}\n\t}\n\tif err := f.createFile(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ createFile opens a new or existing file for writing\nfunc (f *FileRotator) createFile() error {\n\tlogFileName := filepath.Join(f.path, fmt.Sprintf(\"%s.%d\", f.baseFileName, f.logFileIdx))\n\tcFile, err := os.OpenFile(logFileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf.currentFile = cFile\n\tfi, err := f.currentFile.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.currentWr = fi.Size()\n\tf.createOrResetBuffer()\n\treturn nil\n}\n\n\/\/ flushPeriodically flushes the buffered writer every 100ms to the underlying\n\/\/ file\nfunc (f *FileRotator) flushPeriodically() {\n\tfor range f.flushTicker.C {\n\t\tf.flushBuffer()\n\t}\n}\n\n\/\/ Close flushes and closes the rotator. It never returns an error.\nfunc (f *FileRotator) Close() error {\n\tf.closedLock.Lock()\n\tdefer f.closedLock.Unlock()\n\n\t\/\/ Stop the ticker and flush for one last time\n\tf.flushTicker.Stop()\n\tf.flushBuffer()\n\n\t\/\/ Stop the purge go routine\n\tif !f.closed {\n\t\tf.doneCh <- struct{}{}\n\t\tclose(f.purgeCh)\n\t\tf.closed = true\n\t\tf.currentFile.Close()\n\t}\n\n\treturn nil\n}\n\n\/\/ purgeOldFiles removes older files and keeps only the last N files rotated for\n\/\/ a file\nfunc (f *FileRotator) purgeOldFiles() {\n\tfor {\n\t\tselect {\n\t\tcase <-f.purgeCh:\n\t\t\tvar fIndexes []int\n\t\t\tfiles, err := ioutil.ReadDir(f.path)\n\t\t\tif err != nil {\n\t\t\t\tf.logger.Error(\"error getting directory listing\", \"err\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/ Inserting all the rotated files in a slice\n\t\t\tfor _, fi := range files {\n\t\t\t\tif strings.HasPrefix(fi.Name(), f.baseFileName) {\n\t\t\t\t\tfileIdx := strings.TrimPrefix(fi.Name(), fmt.Sprintf(\"%s.\", f.baseFileName))\n\t\t\t\t\tn, err := strconv.Atoi(fileIdx)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tf.logger.Error(\"error extracting file index\", \"err\", err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tfIndexes = append(fIndexes, n)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Not continuing to delete files if the number of files is not more\n\t\t\t\/\/ than MaxFiles\n\t\t\tif len(fIndexes) <= f.MaxFiles {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Sorting the file indexes so that we can purge the older files and keep\n\t\t\t\/\/ only the number of files as configured by the user\n\t\t\tsort.Ints(fIndexes)\n\t\t\ttoDelete := fIndexes[0 : len(fIndexes)-f.MaxFiles]\n\t\t\tfor _, fIndex := range toDelete {\n\t\t\t\tfname := filepath.Join(f.path, fmt.Sprintf(\"%s.%d\", f.baseFileName, fIndex))\n\t\t\t\terr := os.RemoveAll(fname)\n\t\t\t\tif err != nil {\n\t\t\t\t\tf.logger.Error(\"error removing file\", \"filename\", fname, \"err\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tf.oldestLogFileIdx = fIndexes[0]\n\t\tcase <-f.doneCh:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ flushBuffer flushes the buffer\nfunc (f *FileRotator) flushBuffer() error {\n\tf.bufLock.Lock()\n\tdefer f.bufLock.Unlock()\n\tif f.bufw != nil {\n\t\treturn f.bufw.Flush()\n\t}\n\treturn nil\n}\n\n\/\/ writeToBuffer writes the byte array to buffer\nfunc (f *FileRotator) writeToBuffer(p []byte) (int, error) {\n\tf.bufLock.Lock()\n\tdefer f.bufLock.Unlock()\n\treturn f.bufw.Write(p)\n}\n\n\/\/ createOrResetBuffer creates a new buffer if we don't have one otherwise\n\/\/ resets the buffer\nfunc (f *FileRotator) createOrResetBuffer() {\n\tf.bufLock.Lock()\n\tdefer f.bufLock.Unlock()\n\tif f.bufw == nil {\n\t\tf.bufw = bufio.NewWriterSize(f.currentFile, logBufferSize)\n\t} else {\n\t\tf.bufw.Reset(f.currentFile)\n\t}\n}\n<commit_msg>logmon: Fix a memory leak on task restart<commit_after>package logging\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\thclog \"github.com\/hashicorp\/go-hclog\"\n)\n\nconst (\n\t\/\/ logBufferSize is the size of the buffer.\n\tlogBufferSize = 64 * 1024\n\n\t\/\/ bufferFlushDuration is the duration at which we flush the buffer.\n\tbufferFlushDuration = 100 * time.Millisecond\n\n\t\/\/ lineScanLimit is the number of bytes we will attempt to scan for new\n\t\/\/ lines when approaching the end of the file to avoid a log line being\n\t\/\/ split between two files. Any single line that is greater than this limit\n\t\/\/ may be split.\n\tlineScanLimit = 32 * 1024\n\n\t\/\/ newLineDelimiter is the delimiter used for new lines.\n\tnewLineDelimiter = '\\n'\n)\n\n\/\/ FileRotator writes bytes to a rotated set of files\ntype FileRotator struct {\n\tMaxFiles int \/\/ MaxFiles is the maximum number of rotated files allowed in a path\n\tFileSize int64 \/\/ FileSize is the size a rotated file is allowed to grow\n\n\tpath string \/\/ path is the path on the file system where the rotated set of files are opened\n\tbaseFileName string \/\/ baseFileName is the base file name of the rotated files\n\tlogFileIdx int \/\/ logFileIdx is the current index of the rotated files\n\toldestLogFileIdx int \/\/ oldestLogFileIdx is the index of the oldest log file in a path\n\n\tcurrentFile *os.File \/\/ currentFile is the file that is currently getting written\n\tcurrentWr int64 \/\/ currentWr is the number of bytes written to the current file\n\tbufw *bufio.Writer\n\tbufLock sync.Mutex\n\n\tflushTicker *time.Ticker\n\tlogger hclog.Logger\n\tpurgeCh chan struct{}\n\tdoneCh chan struct{}\n\n\tclosed bool\n\tclosedLock sync.Mutex\n}\n\n\/\/ NewFileRotator returns a new file rotator\nfunc NewFileRotator(path string, baseFile string, maxFiles int,\n\tfileSize int64, logger hclog.Logger) (*FileRotator, error) {\n\tlogger = logger.Named(\"rotator\")\n\trotator := &FileRotator{\n\t\tMaxFiles: maxFiles,\n\t\tFileSize: fileSize,\n\n\t\tpath: path,\n\t\tbaseFileName: baseFile,\n\n\t\tflushTicker: time.NewTicker(bufferFlushDuration),\n\t\tlogger: logger,\n\t\tpurgeCh: make(chan struct{}, 1),\n\t\tdoneCh: make(chan struct{}),\n\t}\n\n\tif err := rotator.lastFile(); err != nil {\n\t\treturn nil, err\n\t}\n\tgo rotator.purgeOldFiles()\n\tgo rotator.flushPeriodically()\n\treturn rotator, nil\n}\n\n\/\/ Write writes a byte array to a file and rotates the file if it's size becomes\n\/\/ equal to the maximum size the user has defined.\nfunc (f *FileRotator) Write(p []byte) (n int, err error) {\n\tn = 0\n\tvar forceRotate bool\n\n\tfor n < len(p) {\n\t\t\/\/ Check if we still have space in the current file, otherwise close and\n\t\t\/\/ open the next file\n\t\tif forceRotate || f.currentWr >= f.FileSize {\n\t\t\tforceRotate = false\n\t\t\tf.flushBuffer()\n\t\t\tf.currentFile.Close()\n\t\t\tif err := f.nextFile(); err != nil {\n\t\t\t\tf.logger.Error(\"error creating next file\", \"err\", err)\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t\t\/\/ Calculate the remaining size on this file and how much we have left\n\t\t\/\/ to write\n\t\tremainingSpace := f.FileSize - f.currentWr\n\t\tremainingToWrite := int64(len(p[n:]))\n\n\t\t\/\/ Check if we are near the end of the file. If we are we attempt to\n\t\t\/\/ avoid a log line being split between two files.\n\t\tvar nw int\n\t\tif (remainingSpace - lineScanLimit) < remainingToWrite {\n\t\t\t\/\/ Scan for new line and if the data up to new line fits in current\n\t\t\t\/\/ file, write to buffer\n\t\t\tidx := bytes.IndexByte(p[n:], newLineDelimiter)\n\t\t\tif idx >= 0 && (remainingSpace-int64(idx)-1) >= 0 {\n\t\t\t\t\/\/ We have space so write it to buffer\n\t\t\t\tnw, err = f.writeToBuffer(p[n : n+idx+1])\n\t\t\t} else if idx >= 0 {\n\t\t\t\t\/\/ We found a new line but don't have space so just force rotate\n\t\t\t\tforceRotate = true\n\t\t\t} else if remainingToWrite > f.FileSize || f.FileSize-lineScanLimit < 0 {\n\t\t\t\t\/\/ There is no new line remaining but there is no point in\n\t\t\t\t\/\/ rotating since the remaining data will not even fit in the\n\t\t\t\t\/\/ next file either so just fill this one up.\n\t\t\t\tli := int64(n) + remainingSpace\n\t\t\t\tif remainingSpace > remainingToWrite {\n\t\t\t\t\tli = int64(n) + remainingToWrite\n\t\t\t\t}\n\t\t\t\tnw, err = f.writeToBuffer(p[n:li])\n\t\t\t} else {\n\t\t\t\t\/\/ There is no new line in the data remaining for us to write\n\t\t\t\t\/\/ and it will fit in the next file so rotate.\n\t\t\t\tforceRotate = true\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Write all the bytes in the current file\n\t\t\tnw, err = f.writeToBuffer(p[n:])\n\t\t}\n\n\t\t\/\/ Increment the number of bytes written so far in this method\n\t\t\/\/ invocation\n\t\tn += nw\n\n\t\t\/\/ Increment the total number of bytes in the file\n\t\tf.currentWr += int64(n)\n\t\tif err != nil {\n\t\t\tf.logger.Error(\"error writing to file\", \"err\", err)\n\n\t\t\t\/\/ As bufio writer does not automatically recover in case of any\n\t\t\t\/\/ io error, we need to recover from it manually resetting the\n\t\t\t\/\/ writter.\n\t\t\tf.createOrResetBuffer()\n\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ nextFile opens the next file and purges older files if the number of rotated\n\/\/ files is larger than the maximum files configured by the user\nfunc (f *FileRotator) nextFile() error {\n\tnextFileIdx := f.logFileIdx\n\tfor {\n\t\tnextFileIdx += 1\n\t\tlogFileName := filepath.Join(f.path, fmt.Sprintf(\"%s.%d\", f.baseFileName, nextFileIdx))\n\t\tif fi, err := os.Stat(logFileName); err == nil {\n\t\t\tif fi.IsDir() || fi.Size() >= f.FileSize {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tf.logFileIdx = nextFileIdx\n\t\tif err := f.createFile(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbreak\n\t}\n\t\/\/ Purge old files if we have more files than MaxFiles\n\tf.closedLock.Lock()\n\tdefer f.closedLock.Unlock()\n\tif f.logFileIdx-f.oldestLogFileIdx >= f.MaxFiles && !f.closed {\n\t\tselect {\n\t\tcase f.purgeCh <- struct{}{}:\n\t\tdefault:\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ lastFile finds out the rotated file with the largest index in a path.\nfunc (f *FileRotator) lastFile() error {\n\tfinfos, err := ioutil.ReadDir(f.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprefix := fmt.Sprintf(\"%s.\", f.baseFileName)\n\tfor _, fi := range finfos {\n\t\tif fi.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(fi.Name(), prefix) {\n\t\t\tfileIdx := strings.TrimPrefix(fi.Name(), prefix)\n\t\t\tn, err := strconv.Atoi(fileIdx)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif n > f.logFileIdx {\n\t\t\t\tf.logFileIdx = n\n\t\t\t}\n\t\t}\n\t}\n\tif err := f.createFile(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ createFile opens a new or existing file for writing\nfunc (f *FileRotator) createFile() error {\n\tlogFileName := filepath.Join(f.path, fmt.Sprintf(\"%s.%d\", f.baseFileName, f.logFileIdx))\n\tcFile, err := os.OpenFile(logFileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf.currentFile = cFile\n\tfi, err := f.currentFile.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.currentWr = fi.Size()\n\tf.createOrResetBuffer()\n\treturn nil\n}\n\n\/\/ flushPeriodically flushes the buffered writer every 100ms to the underlying\n\/\/ file\nfunc (f *FileRotator) flushPeriodically() {\n\tfor {\n\t\tselect {\n\t\tcase <-f.flushTicker.C:\n\t\t\tf.flushBuffer()\n\t\tcase <-f.doneCh:\n\t\t\treturn\n\t\t}\n\n\t}\n}\n\n\/\/ Close flushes and closes the rotator. It never returns an error.\nfunc (f *FileRotator) Close() error {\n\tf.closedLock.Lock()\n\tdefer f.closedLock.Unlock()\n\n\t\/\/ Stop the ticker and flush for one last time\n\tf.flushTicker.Stop()\n\tf.flushBuffer()\n\n\t\/\/ Stop the go routines\n\tif !f.closed {\n\t\tclose(f.doneCh)\n\t\tclose(f.purgeCh)\n\t\tf.closed = true\n\t\tf.currentFile.Close()\n\t}\n\n\treturn nil\n}\n\n\/\/ purgeOldFiles removes older files and keeps only the last N files rotated for\n\/\/ a file\nfunc (f *FileRotator) purgeOldFiles() {\n\tfor {\n\t\tselect {\n\t\tcase <-f.purgeCh:\n\t\t\tvar fIndexes []int\n\t\t\tfiles, err := ioutil.ReadDir(f.path)\n\t\t\tif err != nil {\n\t\t\t\tf.logger.Error(\"error getting directory listing\", \"err\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/ Inserting all the rotated files in a slice\n\t\t\tfor _, fi := range files {\n\t\t\t\tif strings.HasPrefix(fi.Name(), f.baseFileName) {\n\t\t\t\t\tfileIdx := strings.TrimPrefix(fi.Name(), fmt.Sprintf(\"%s.\", f.baseFileName))\n\t\t\t\t\tn, err := strconv.Atoi(fileIdx)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tf.logger.Error(\"error extracting file index\", \"err\", err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tfIndexes = append(fIndexes, n)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Not continuing to delete files if the number of files is not more\n\t\t\t\/\/ than MaxFiles\n\t\t\tif len(fIndexes) <= f.MaxFiles {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Sorting the file indexes so that we can purge the older files and keep\n\t\t\t\/\/ only the number of files as configured by the user\n\t\t\tsort.Ints(fIndexes)\n\t\t\ttoDelete := fIndexes[0 : len(fIndexes)-f.MaxFiles]\n\t\t\tfor _, fIndex := range toDelete {\n\t\t\t\tfname := filepath.Join(f.path, fmt.Sprintf(\"%s.%d\", f.baseFileName, fIndex))\n\t\t\t\terr := os.RemoveAll(fname)\n\t\t\t\tif err != nil {\n\t\t\t\t\tf.logger.Error(\"error removing file\", \"filename\", fname, \"err\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tf.oldestLogFileIdx = fIndexes[0]\n\t\tcase <-f.doneCh:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ flushBuffer flushes the buffer\nfunc (f *FileRotator) flushBuffer() error {\n\tf.bufLock.Lock()\n\tdefer f.bufLock.Unlock()\n\tif f.bufw != nil {\n\t\treturn f.bufw.Flush()\n\t}\n\treturn nil\n}\n\n\/\/ writeToBuffer writes the byte array to buffer\nfunc (f *FileRotator) writeToBuffer(p []byte) (int, error) {\n\tf.bufLock.Lock()\n\tdefer f.bufLock.Unlock()\n\treturn f.bufw.Write(p)\n}\n\n\/\/ createOrResetBuffer creates a new buffer if we don't have one otherwise\n\/\/ resets the buffer\nfunc (f *FileRotator) createOrResetBuffer() {\n\tf.bufLock.Lock()\n\tdefer f.bufLock.Unlock()\n\tif f.bufw == nil {\n\t\tf.bufw = bufio.NewWriterSize(f.currentFile, logBufferSize)\n\t} else {\n\t\tf.bufw.Reset(f.currentFile)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package repl\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/skatsuta\/monkey-interpreter\/lexer\"\n\t\"github.com\/skatsuta\/monkey-interpreter\/token\"\n)\n\nconst prompt = \">> \"\n\n\/\/ Start starts Monkey REPL.\nfunc Start(in io.Reader, out io.Writer) {\n\tscanner := bufio.NewScanner(in)\n\n\tfor {\n\t\tfmt.Printf(prompt)\n\t\tif !scanner.Scan() {\n\t\t\treturn\n\t\t}\n\n\t\tline := scanner.Text()\n\t\tl := lexer.New(line)\n\n\t\tfor tok := l.NextToken(); tok.Type != token.EOF; tok = l.NextToken() {\n\t\t\tfmt.Printf(\"%+v\\n\", tok)\n\t\t}\n\t}\n}\n<commit_msg>repl: make REPL print an AST of input<commit_after>package repl\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/skatsuta\/monkey-interpreter\/lexer\"\n\t\"github.com\/skatsuta\/monkey-interpreter\/parser\"\n)\n\nconst prompt = \">> \"\n\n\/\/ Start starts Monkey REPL.\nfunc Start(in io.Reader, out io.Writer) {\n\tscanner := bufio.NewScanner(in)\n\n\tfor {\n\t\tfmt.Printf(prompt)\n\t\tif !scanner.Scan() {\n\t\t\treturn\n\t\t}\n\n\t\tline := scanner.Text()\n\t\tl := lexer.New(line)\n\t\tp := parser.New(l)\n\n\t\tprogram := p.ParseProgram()\n\t\tif len(p.Errors()) != 0 {\n\t\t\tprintParserErrors(out, p.Errors())\n\t\t\tcontinue\n\t\t}\n\n\t\tio.WriteString(out, program.String())\n\t\tio.WriteString(out, \"\\n\")\n\t}\n}\n\nfunc printParserErrors(out io.Writer, errors []string) {\n\tfor _, msg := range errors {\n\t\tio.WriteString(out, \"\\t\"+msg+\"\\n\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package repo\n\nimport (\n\t\"os\"\n)\n\nfunc checkAndMake(path string) {\n\tif _, err := os.Stat(\"\/path\/to\/whatever\"); os.IsNotExist(err) {\n\t\tos.MkdirAll(path, os.ModePerm) \/\/ TODO: better mode\n\t}\n}\n<commit_msg>[repo] fix utils<commit_after>package repo\n\nimport (\n\t\"os\"\n)\n\nfunc checkAndMake(path string) {\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\tos.MkdirAll(path, os.ModePerm) \/\/ TODO: better mode\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package executor\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/bbs\/models\"\n)\n\nconst ContainerOwnerProperty = \"executor:owner\"\n\ntype State string\n\nconst (\n\tStateInvalid State = \"\"\n\tStateReserved State = \"reserved\"\n\tStateInitializing State = \"initializing\"\n\tStateCreated State = \"created\"\n\tStateRunning State = \"running\"\n\tStateCompleted State = \"completed\"\n)\n\nconst (\n\tHealthcheckTag = \"executor-healthcheck\"\n\tHealthcheckTagValue = \"executor-healthcheck\"\n)\n\ntype ProxyPortMapping struct {\n\tAppPort uint16 `json:\"app_port\"`\n\tProxyPort uint16 `json:\"proxy_port\"`\n}\n\ntype Container struct {\n\tGuid string `json:\"guid\"`\n\tResource\n\tRunInfo\n\tTags Tags\n\tState State `json:\"state\"`\n\tAllocatedAt int64 `json:\"allocated_at\"`\n\tExternalIP string `json:\"external_ip\"`\n\tInternalIP string `json:\"internal_ip\"`\n\tRunResult ContainerRunResult `json:\"run_result\"`\n\tMemoryLimit uint64 `json:\"memory_limit\"`\n\tDiskLimit uint64 `json:\"disk_limit\"`\n\tAdvertisePreferenceForInstanceAddress bool `json:\"advertise_preference_for_instance_address\"`\n}\n\nfunc NewContainerFromResource(guid string, resource *Resource, tags Tags) Container {\n\treturn Container{\n\t\tGuid: guid,\n\t\tResource: *resource,\n\t\tTags: tags,\n\t}\n}\n\nfunc (c *Container) ValidateTransitionTo(newState State) bool {\n\tif newState == StateCompleted {\n\t\treturn true\n\t}\n\tswitch c.State {\n\tcase StateReserved:\n\t\treturn newState == StateInitializing\n\tcase StateInitializing:\n\t\treturn newState == StateCreated\n\tcase StateCreated:\n\t\treturn newState == StateRunning\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (c *Container) TransistionToInitialize(req *RunRequest) error {\n\tif !c.ValidateTransitionTo(StateInitializing) {\n\t\treturn ErrInvalidTransition\n\t}\n\tc.State = StateInitializing\n\tc.RunInfo = req.RunInfo\n\tc.Tags.Add(req.Tags)\n\treturn nil\n}\n\nfunc (c *Container) TransitionToCreate() error {\n\tif !c.ValidateTransitionTo(StateCreated) {\n\t\treturn ErrInvalidTransition\n\t}\n\n\tc.State = StateCreated\n\treturn nil\n}\n\nfunc (c *Container) TransitionToComplete(failed bool, failureReason string, retryable bool) {\n\tc.RunResult.Failed = failed\n\tc.RunResult.FailureReason = failureReason\n\tc.RunResult.Retryable = retryable\n\tc.State = StateCompleted\n}\n\nfunc (newContainer Container) Copy() Container {\n\tnewContainer.Tags = newContainer.Tags.Copy()\n\treturn newContainer\n}\n\nfunc (c *Container) IsCreated() bool {\n\treturn c.State != StateReserved && c.State != StateInitializing && c.State != StateCompleted\n}\n\nfunc (c *Container) HasTags(tags Tags) bool {\n\tif c.Tags == nil {\n\t\treturn tags == nil\n\t}\n\n\tif tags == nil {\n\t\treturn false\n\t}\n\n\tfor key, val := range tags {\n\t\tv, ok := c.Tags[key]\n\t\tif !ok || val != v {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc NewReservedContainerFromAllocationRequest(req *AllocationRequest, allocatedAt int64) Container {\n\tc := NewContainerFromResource(req.Guid, &req.Resource, req.Tags)\n\tc.State = StateReserved\n\tc.AllocatedAt = allocatedAt\n\treturn c\n}\n\ntype Resource struct {\n\tMemoryMB int `json:\"memory_mb\"`\n\tDiskMB int `json:\"disk_mb\"`\n\tMaxPids int `json:\"max_pids\"`\n}\n\nfunc NewResource(memoryMB, diskMB, maxPids int) Resource {\n\treturn Resource{\n\t\tMemoryMB: memoryMB,\n\t\tDiskMB: diskMB,\n\t\tMaxPids: maxPids,\n\t}\n}\n\ntype CachedDependency struct {\n\tName string `json:\"name\"`\n\tFrom string `json:\"from\"`\n\tTo string `json:\"to\"`\n\tCacheKey string `json:\"cache_key\"`\n\tLogSource string `json:\"log_source\"`\n\tChecksumValue string `json:\"checksum_value\"`\n\tChecksumAlgorithm string `json:\"checksum_algorithm\"`\n}\n\ntype CertificateProperties struct {\n\tOrganizationalUnit []string `json:\"organizational_unit\"`\n}\n\ntype RunInfo struct {\n\tRootFSPath string `json:\"rootfs\"`\n\tCPUWeight uint `json:\"cpu_weight\"`\n\tPorts []PortMapping `json:\"ports\"`\n\tLogConfig LogConfig `json:\"log_config\"`\n\tMetricsConfig MetricsConfig `json:\"metrics_config\"`\n\tStartTimeoutMs uint `json:\"start_timeout_ms\"`\n\tPrivileged bool `json:\"privileged\"`\n\tCachedDependencies []CachedDependency `json:\"cached_dependencies\"`\n\tSetup *models.Action `json:\"setup\"`\n\tAction *models.Action `json:\"run\"`\n\tMonitor *models.Action `json:\"monitor\"`\n\tCheckDefinition *models.CheckDefinition `json:\"check_definition\"`\n\tEgressRules []*models.SecurityGroupRule `json:\"egress_rules,omitempty\"`\n\tEnv []EnvironmentVariable `json:\"env,omitempty\"`\n\tTrustedSystemCertificatesPath string `json:\"trusted_system_certificates_path,omitempty\"`\n\tVolumeMounts []VolumeMount `json:\"volume_mounts\"`\n\tNetwork *Network `json:\"network,omitempty\"`\n\tCertificateProperties CertificateProperties `json:\"certificate_properties\"`\n\tImageUsername string `json:\"image_username\"`\n\tImagePassword string `json:\"image_password\"`\n\tEnableContainerProxy bool `json:\"enable_container_proxy\"`\n}\n\ntype BindMountMode uint8\n\nconst (\n\tBindMountModeRO BindMountMode = 0\n\tBindMountModeRW BindMountMode = 1\n)\n\ntype VolumeMount struct {\n\tDriver string `json:\"driver\"`\n\tVolumeId string `json:\"volume_id\"`\n\tConfig map[string]interface{} `json:\"config\"`\n\tContainerPath string `json:\"container_path\"`\n\tMode BindMountMode `json:\"mode\"`\n}\n\ntype Network struct {\n\tProperties map[string]string `json:\"properties,omitempty\"`\n}\n\ntype InnerContainer Container\n\ntype EnvironmentVariable struct {\n\tName string `json:\"name\"`\n\tValue string `json:\"value\"`\n}\n\ntype ContainerMetrics struct {\n\tMemoryUsageInBytes uint64 `json:\"memory_usage_in_bytes\"`\n\tDiskUsageInBytes uint64 `json:\"disk_usage_in_bytes\"`\n\tMemoryLimitInBytes uint64 `json:\"memory_limit_in_bytes\"`\n\tDiskLimitInBytes uint64 `json:\"disk_limit_in_bytes\"`\n\tTimeSpentInCPU time.Duration `json:\"time_spent_in_cpu\"`\n\tAbsoluteCPUEntitlementInNanoseconds uint64 `json:\"absolute_cpu_entitlement_in_ns\"`\n\tContainerAgeInNanoseconds uint64 `json:\"container_age_in_ns\"`\n}\n\ntype MetricsConfig struct {\n\tGuid string `json:\"guid\"`\n\tIndex int `json:\"index\"`\n\tTags map[string]string `json:\"tags\"`\n}\n\ntype Metrics struct {\n\tMetricsConfig\n\tContainerMetrics\n}\n\ntype LogConfig struct {\n\tGuid string `json:\"guid\"`\n\tIndex int `json:\"index\"`\n\tSourceName string `json:\"source_name\"`\n\tTags map[string]string `json:\"tags\"`\n}\n\ntype PortMapping struct {\n\tContainerPort uint16 `json:\"container_port\"`\n\tHostPort uint16 `json:\"host_port,omitempty\"`\n\tContainerTLSProxyPort uint16 `json:\"container_tls_proxy_port,omitempty\"`\n\tHostTLSProxyPort uint16 `json:\"host_tls_proxy_port,omitempty\"`\n}\n\ntype ContainerRunResult struct {\n\tFailed bool `json:\"failed\"`\n\tFailureReason string `json:\"failure_reason\"`\n\tRetryable bool\n\n\tStopped bool `json:\"stopped\"`\n}\n\ntype ExecutorResources struct {\n\tMemoryMB int `json:\"memory_mb\"`\n\tDiskMB int `json:\"disk_mb\"`\n\tContainers int `json:\"containers\"`\n}\n\nfunc NewExecutorResources(memoryMB, diskMB, containers int) ExecutorResources {\n\treturn ExecutorResources{\n\t\tMemoryMB: memoryMB,\n\t\tDiskMB: diskMB,\n\t\tContainers: containers,\n\t}\n}\n\nfunc (e ExecutorResources) Copy() ExecutorResources {\n\treturn e\n}\n\nfunc (r *ExecutorResources) canSubtract(res *Resource) bool {\n\treturn r.MemoryMB >= res.MemoryMB && r.DiskMB >= res.DiskMB && r.Containers > 0\n}\n\nfunc (r *ExecutorResources) Subtract(res *Resource) bool {\n\tif !r.canSubtract(res) {\n\t\treturn false\n\t}\n\tr.MemoryMB -= res.MemoryMB\n\tr.DiskMB -= res.DiskMB\n\tr.Containers -= 1\n\treturn true\n}\n\nfunc (r *ExecutorResources) Add(res *Resource) {\n\tr.MemoryMB += res.MemoryMB\n\tr.DiskMB += res.DiskMB\n\tr.Containers += 1\n}\n\ntype Tags map[string]string\n\nfunc (t Tags) Copy() Tags {\n\tif t == nil {\n\t\treturn nil\n\t}\n\tnewTags := make(Tags, len(t))\n\tnewTags.Add(t)\n\treturn newTags\n}\n\nfunc (t Tags) Add(other Tags) {\n\tfor key := range other {\n\t\tt[key] = other[key]\n\t}\n}\n\ntype Event interface {\n\tEventType() EventType\n}\n\ntype EventType string\n\nvar ErrUnknownEventType = errors.New(\"unknown event type\")\n\nconst (\n\tEventTypeInvalid EventType = \"\"\n\n\tEventTypeContainerComplete EventType = \"container_complete\"\n\tEventTypeContainerRunning EventType = \"container_running\"\n\tEventTypeContainerReserved EventType = \"container_reserved\"\n)\n\ntype LifecycleEvent interface {\n\tContainer() Container\n\tlifecycleEvent()\n}\n\ntype ContainerCompleteEvent struct {\n\tRawContainer Container `json:\"container\"`\n}\n\nfunc NewContainerCompleteEvent(container Container) ContainerCompleteEvent {\n\treturn ContainerCompleteEvent{\n\t\tRawContainer: container,\n\t}\n}\n\nfunc (ContainerCompleteEvent) EventType() EventType { return EventTypeContainerComplete }\nfunc (e ContainerCompleteEvent) Container() Container { return e.RawContainer }\nfunc (ContainerCompleteEvent) lifecycleEvent() {}\n\ntype ContainerRunningEvent struct {\n\tRawContainer Container `json:\"container\"`\n}\n\nfunc NewContainerRunningEvent(container Container) ContainerRunningEvent {\n\treturn ContainerRunningEvent{\n\t\tRawContainer: container,\n\t}\n}\n\nfunc (ContainerRunningEvent) EventType() EventType { return EventTypeContainerRunning }\nfunc (e ContainerRunningEvent) Container() Container { return e.RawContainer }\nfunc (ContainerRunningEvent) lifecycleEvent() {}\n\ntype ContainerReservedEvent struct {\n\tRawContainer Container `json:\"container\"`\n}\n\nfunc NewContainerReservedEvent(container Container) ContainerReservedEvent {\n\treturn ContainerReservedEvent{\n\t\tRawContainer: container,\n\t}\n}\n\nfunc (ContainerReservedEvent) EventType() EventType { return EventTypeContainerReserved }\nfunc (e ContainerReservedEvent) Container() Container { return e.RawContainer }\nfunc (ContainerReservedEvent) lifecycleEvent() {}\n<commit_msg>add Sidecars to RunInfo<commit_after>package executor\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/bbs\/models\"\n)\n\nconst ContainerOwnerProperty = \"executor:owner\"\n\ntype State string\n\nconst (\n\tStateInvalid State = \"\"\n\tStateReserved State = \"reserved\"\n\tStateInitializing State = \"initializing\"\n\tStateCreated State = \"created\"\n\tStateRunning State = \"running\"\n\tStateCompleted State = \"completed\"\n)\n\nconst (\n\tHealthcheckTag = \"executor-healthcheck\"\n\tHealthcheckTagValue = \"executor-healthcheck\"\n)\n\ntype ProxyPortMapping struct {\n\tAppPort uint16 `json:\"app_port\"`\n\tProxyPort uint16 `json:\"proxy_port\"`\n}\n\ntype Container struct {\n\tGuid string `json:\"guid\"`\n\tResource\n\tRunInfo\n\tTags Tags\n\tState State `json:\"state\"`\n\tAllocatedAt int64 `json:\"allocated_at\"`\n\tExternalIP string `json:\"external_ip\"`\n\tInternalIP string `json:\"internal_ip\"`\n\tRunResult ContainerRunResult `json:\"run_result\"`\n\tMemoryLimit uint64 `json:\"memory_limit\"`\n\tDiskLimit uint64 `json:\"disk_limit\"`\n\tAdvertisePreferenceForInstanceAddress bool `json:\"advertise_preference_for_instance_address\"`\n}\n\nfunc NewContainerFromResource(guid string, resource *Resource, tags Tags) Container {\n\treturn Container{\n\t\tGuid: guid,\n\t\tResource: *resource,\n\t\tTags: tags,\n\t}\n}\n\nfunc (c *Container) ValidateTransitionTo(newState State) bool {\n\tif newState == StateCompleted {\n\t\treturn true\n\t}\n\tswitch c.State {\n\tcase StateReserved:\n\t\treturn newState == StateInitializing\n\tcase StateInitializing:\n\t\treturn newState == StateCreated\n\tcase StateCreated:\n\t\treturn newState == StateRunning\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (c *Container) TransistionToInitialize(req *RunRequest) error {\n\tif !c.ValidateTransitionTo(StateInitializing) {\n\t\treturn ErrInvalidTransition\n\t}\n\tc.State = StateInitializing\n\tc.RunInfo = req.RunInfo\n\tc.Tags.Add(req.Tags)\n\treturn nil\n}\n\nfunc (c *Container) TransitionToCreate() error {\n\tif !c.ValidateTransitionTo(StateCreated) {\n\t\treturn ErrInvalidTransition\n\t}\n\n\tc.State = StateCreated\n\treturn nil\n}\n\nfunc (c *Container) TransitionToComplete(failed bool, failureReason string, retryable bool) {\n\tc.RunResult.Failed = failed\n\tc.RunResult.FailureReason = failureReason\n\tc.RunResult.Retryable = retryable\n\tc.State = StateCompleted\n}\n\nfunc (newContainer Container) Copy() Container {\n\tnewContainer.Tags = newContainer.Tags.Copy()\n\treturn newContainer\n}\n\nfunc (c *Container) IsCreated() bool {\n\treturn c.State != StateReserved && c.State != StateInitializing && c.State != StateCompleted\n}\n\nfunc (c *Container) HasTags(tags Tags) bool {\n\tif c.Tags == nil {\n\t\treturn tags == nil\n\t}\n\n\tif tags == nil {\n\t\treturn false\n\t}\n\n\tfor key, val := range tags {\n\t\tv, ok := c.Tags[key]\n\t\tif !ok || val != v {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc NewReservedContainerFromAllocationRequest(req *AllocationRequest, allocatedAt int64) Container {\n\tc := NewContainerFromResource(req.Guid, &req.Resource, req.Tags)\n\tc.State = StateReserved\n\tc.AllocatedAt = allocatedAt\n\treturn c\n}\n\ntype Resource struct {\n\tMemoryMB int `json:\"memory_mb\"`\n\tDiskMB int `json:\"disk_mb\"`\n\tMaxPids int `json:\"max_pids\"`\n}\n\nfunc NewResource(memoryMB, diskMB, maxPids int) Resource {\n\treturn Resource{\n\t\tMemoryMB: memoryMB,\n\t\tDiskMB: diskMB,\n\t\tMaxPids: maxPids,\n\t}\n}\n\ntype CachedDependency struct {\n\tName string `json:\"name\"`\n\tFrom string `json:\"from\"`\n\tTo string `json:\"to\"`\n\tCacheKey string `json:\"cache_key\"`\n\tLogSource string `json:\"log_source\"`\n\tChecksumValue string `json:\"checksum_value\"`\n\tChecksumAlgorithm string `json:\"checksum_algorithm\"`\n}\n\ntype CertificateProperties struct {\n\tOrganizationalUnit []string `json:\"organizational_unit\"`\n}\n\ntype Sidecar struct {\n\tAction *models.Action `json:\"run\"`\n\tDiskMB int32 `json:\"disk_mb\"`\n\tMemoryMB int32 `json:\"memory_mb\"`\n}\n\ntype RunInfo struct {\n\tRootFSPath string `json:\"rootfs\"`\n\tCPUWeight uint `json:\"cpu_weight\"`\n\tPorts []PortMapping `json:\"ports\"`\n\tLogConfig LogConfig `json:\"log_config\"`\n\tMetricsConfig MetricsConfig `json:\"metrics_config\"`\n\tStartTimeoutMs uint `json:\"start_timeout_ms\"`\n\tPrivileged bool `json:\"privileged\"`\n\tCachedDependencies []CachedDependency `json:\"cached_dependencies\"`\n\tSetup *models.Action `json:\"setup\"`\n\tAction *models.Action `json:\"run\"`\n\tMonitor *models.Action `json:\"monitor\"`\n\tCheckDefinition *models.CheckDefinition `json:\"check_definition\"`\n\tEgressRules []*models.SecurityGroupRule `json:\"egress_rules,omitempty\"`\n\tEnv []EnvironmentVariable `json:\"env,omitempty\"`\n\tTrustedSystemCertificatesPath string `json:\"trusted_system_certificates_path,omitempty\"`\n\tVolumeMounts []VolumeMount `json:\"volume_mounts\"`\n\tNetwork *Network `json:\"network,omitempty\"`\n\tCertificateProperties CertificateProperties `json:\"certificate_properties\"`\n\tImageUsername string `json:\"image_username\"`\n\tImagePassword string `json:\"image_password\"`\n\tEnableContainerProxy bool `json:\"enable_container_proxy\"`\n\tSidecars []Sidecar `json:\"sidecars\"`\n}\n\ntype BindMountMode uint8\n\nconst (\n\tBindMountModeRO BindMountMode = 0\n\tBindMountModeRW BindMountMode = 1\n)\n\ntype VolumeMount struct {\n\tDriver string `json:\"driver\"`\n\tVolumeId string `json:\"volume_id\"`\n\tConfig map[string]interface{} `json:\"config\"`\n\tContainerPath string `json:\"container_path\"`\n\tMode BindMountMode `json:\"mode\"`\n}\n\ntype Network struct {\n\tProperties map[string]string `json:\"properties,omitempty\"`\n}\n\ntype InnerContainer Container\n\ntype EnvironmentVariable struct {\n\tName string `json:\"name\"`\n\tValue string `json:\"value\"`\n}\n\ntype ContainerMetrics struct {\n\tMemoryUsageInBytes uint64 `json:\"memory_usage_in_bytes\"`\n\tDiskUsageInBytes uint64 `json:\"disk_usage_in_bytes\"`\n\tMemoryLimitInBytes uint64 `json:\"memory_limit_in_bytes\"`\n\tDiskLimitInBytes uint64 `json:\"disk_limit_in_bytes\"`\n\tTimeSpentInCPU time.Duration `json:\"time_spent_in_cpu\"`\n\tAbsoluteCPUEntitlementInNanoseconds uint64 `json:\"absolute_cpu_entitlement_in_ns\"`\n\tContainerAgeInNanoseconds uint64 `json:\"container_age_in_ns\"`\n}\n\ntype MetricsConfig struct {\n\tGuid string `json:\"guid\"`\n\tIndex int `json:\"index\"`\n\tTags map[string]string `json:\"tags\"`\n}\n\ntype Metrics struct {\n\tMetricsConfig\n\tContainerMetrics\n}\n\ntype LogConfig struct {\n\tGuid string `json:\"guid\"`\n\tIndex int `json:\"index\"`\n\tSourceName string `json:\"source_name\"`\n\tTags map[string]string `json:\"tags\"`\n}\n\ntype PortMapping struct {\n\tContainerPort uint16 `json:\"container_port\"`\n\tHostPort uint16 `json:\"host_port,omitempty\"`\n\tContainerTLSProxyPort uint16 `json:\"container_tls_proxy_port,omitempty\"`\n\tHostTLSProxyPort uint16 `json:\"host_tls_proxy_port,omitempty\"`\n}\n\ntype ContainerRunResult struct {\n\tFailed bool `json:\"failed\"`\n\tFailureReason string `json:\"failure_reason\"`\n\tRetryable bool\n\n\tStopped bool `json:\"stopped\"`\n}\n\ntype ExecutorResources struct {\n\tMemoryMB int `json:\"memory_mb\"`\n\tDiskMB int `json:\"disk_mb\"`\n\tContainers int `json:\"containers\"`\n}\n\nfunc NewExecutorResources(memoryMB, diskMB, containers int) ExecutorResources {\n\treturn ExecutorResources{\n\t\tMemoryMB: memoryMB,\n\t\tDiskMB: diskMB,\n\t\tContainers: containers,\n\t}\n}\n\nfunc (e ExecutorResources) Copy() ExecutorResources {\n\treturn e\n}\n\nfunc (r *ExecutorResources) canSubtract(res *Resource) bool {\n\treturn r.MemoryMB >= res.MemoryMB && r.DiskMB >= res.DiskMB && r.Containers > 0\n}\n\nfunc (r *ExecutorResources) Subtract(res *Resource) bool {\n\tif !r.canSubtract(res) {\n\t\treturn false\n\t}\n\tr.MemoryMB -= res.MemoryMB\n\tr.DiskMB -= res.DiskMB\n\tr.Containers -= 1\n\treturn true\n}\n\nfunc (r *ExecutorResources) Add(res *Resource) {\n\tr.MemoryMB += res.MemoryMB\n\tr.DiskMB += res.DiskMB\n\tr.Containers += 1\n}\n\ntype Tags map[string]string\n\nfunc (t Tags) Copy() Tags {\n\tif t == nil {\n\t\treturn nil\n\t}\n\tnewTags := make(Tags, len(t))\n\tnewTags.Add(t)\n\treturn newTags\n}\n\nfunc (t Tags) Add(other Tags) {\n\tfor key := range other {\n\t\tt[key] = other[key]\n\t}\n}\n\ntype Event interface {\n\tEventType() EventType\n}\n\ntype EventType string\n\nvar ErrUnknownEventType = errors.New(\"unknown event type\")\n\nconst (\n\tEventTypeInvalid EventType = \"\"\n\n\tEventTypeContainerComplete EventType = \"container_complete\"\n\tEventTypeContainerRunning EventType = \"container_running\"\n\tEventTypeContainerReserved EventType = \"container_reserved\"\n)\n\ntype LifecycleEvent interface {\n\tContainer() Container\n\tlifecycleEvent()\n}\n\ntype ContainerCompleteEvent struct {\n\tRawContainer Container `json:\"container\"`\n}\n\nfunc NewContainerCompleteEvent(container Container) ContainerCompleteEvent {\n\treturn ContainerCompleteEvent{\n\t\tRawContainer: container,\n\t}\n}\n\nfunc (ContainerCompleteEvent) EventType() EventType { return EventTypeContainerComplete }\nfunc (e ContainerCompleteEvent) Container() Container { return e.RawContainer }\nfunc (ContainerCompleteEvent) lifecycleEvent() {}\n\ntype ContainerRunningEvent struct {\n\tRawContainer Container `json:\"container\"`\n}\n\nfunc NewContainerRunningEvent(container Container) ContainerRunningEvent {\n\treturn ContainerRunningEvent{\n\t\tRawContainer: container,\n\t}\n}\n\nfunc (ContainerRunningEvent) EventType() EventType { return EventTypeContainerRunning }\nfunc (e ContainerRunningEvent) Container() Container { return e.RawContainer }\nfunc (ContainerRunningEvent) lifecycleEvent() {}\n\ntype ContainerReservedEvent struct {\n\tRawContainer Container `json:\"container\"`\n}\n\nfunc NewContainerReservedEvent(container Container) ContainerReservedEvent {\n\treturn ContainerReservedEvent{\n\t\tRawContainer: container,\n\t}\n}\n\nfunc (ContainerReservedEvent) EventType() EventType { return EventTypeContainerReserved }\nfunc (e ContainerReservedEvent) Container() Container { return e.RawContainer }\nfunc (ContainerReservedEvent) lifecycleEvent() {}\n<|endoftext|>"} {"text":"<commit_before>package rest\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"reflect\"\n)\n\n\/\/ TODO:\n\/\/ - Consider using\/writing a more robust data validation library.\n\/\/ E.g. https:\/\/github.com\/Assembli\/beautiful-validity\n\/\/ This would allow for semantic validation and custom validation logic.\n\/\/ For now, we are only providing type validation.\n\/\/\n\/\/ - Make type coercion pluggable (i.e. conversion\/validation of custom types).\n\n\/\/ Rules is a slice of Rule pointers.\ntype Rules []*Rule\n\n\/\/ Rule provides schema validation and type coercion for request input and fine-grained\n\/\/ control over response output. If a ResourceHandler provides input Rules which\n\/\/ specify types, input fields will attempt to be coerced to those types. If coercion\n\/\/ fails, an error will be returned in the response. If a ResourceHandler provides\n\/\/ output Rules, only the fields corresponding to those Rules will be sent back. This\n\/\/ prevents new fields from leaking into old API versions.\ntype Rule struct {\n\t\/\/ Name of the resource field. This is the name as it appears in the struct\n\t\/\/ definition.\n\tField string\n\n\t\/\/ Name of the input\/output field. Use Name() to retrieve the field alias while\n\t\/\/ falling back to the field name if it's not specified.\n\tFieldAlias string\n\n\t\/\/ Type to coerce field value to. If the value cannot be coerced, an error will be\n\t\/\/ returned in the response. Defaults to Unspecified, which is the equivalent of\n\t\/\/ an interface{} value.\n\tType Type\n\n\t\/\/ Indicates if the field must have a value. Defaults to false.\n\tRequired bool\n\n\t\/\/ Versions is a list of the API versions this Rule applies to. If empty, it will\n\t\/\/ be applied to all versions.\n\tVersions []string\n\n\t\/\/ Indicates if the Rule should only be applied to requests.\n\tInputOnly bool\n\n\t\/\/ Indicates if the Rule should only be applied to responses.\n\tOutputOnly bool\n\n\t\/\/ Function which produces the field value to receive.\n\tInputHandler func(interface{}) interface{}\n\n\t\/\/ Function which produces the field value to send.\n\tOutputHandler func(interface{}) interface{}\n\n\t\/\/ resourceType is the reflect.Type of the resource this Rule applies to. Set\n\t\/\/ by the framework.\n\tresourceType reflect.Type\n}\n\n\/\/ Name returns the name of the input\/output field alias. It defaults to the field\n\/\/ name if the alias was not specified.\nfunc (r Rule) Name() string {\n\talias := r.FieldAlias\n\tif alias == \"\" {\n\t\talias = r.Field\n\t}\n\treturn alias\n}\n\n\/\/ Applies returns whether or not the Rule applies to the given version.\nfunc (r Rule) Applies(version string) bool {\n\tif r.Versions == nil {\n\t\treturn true\n\t}\n\n\tfor _, v := range r.Versions {\n\t\tif v == version {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/ validType returns whether or not the Rule is valid for the given reflect.Type.\nfunc (r Rule) validType(fieldType reflect.Type) bool {\n\tif r.Type == Unspecified {\n\t\treturn true\n\t}\n\n\tkind := typeToKind[r.Type]\n\treturn fieldType.Kind() == kind\n}\n\n\/\/ validate verifies that the Rules are valid for the given reflect.Type, meaning\n\/\/ they specify fields that exist and correct types. If a Rule is invalid, an\n\/\/ error is returned. If the Rules are valid, nil is returned.\nfunc (r Rules) validate() error {\n\tfor _, rule := range r {\n\t\tresourceType := rule.resourceType\n\t\tif resourceType.Kind() != reflect.Struct && resourceType.Kind() != reflect.Map {\n\t\t\treturn fmt.Errorf(fmt.Sprintf(\n\t\t\t\t\"Invalid resource type: must be struct or map, got %s\", resourceType))\n\t\t}\n\n\t\tfield, found := resourceType.FieldByName(rule.Field)\n\t\tif !found {\n\t\t\treturn fmt.Errorf(fmt.Sprintf(\n\t\t\t\t\"Invalid Rule for %s: field '%s' does not exist\",\n\t\t\t\tresourceType, rule.Field))\n\t\t}\n\n\t\tif !rule.validType(field.Type) {\n\t\t\treturn fmt.Errorf(fmt.Sprintf(\n\t\t\t\t\"Invalid Rule for %s: field '%s' is type %s, not %s\",\n\t\t\t\tresourceType, rule.Field, field.Type, typeToName[rule.Type]))\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ applyInboundRules applies Rules which are not specified as output only to the\n\/\/ provided Payload. If the Payload is nil, an empty Payload will be returned. If no\n\/\/ Rules are provided, this acts as an identity function. If Rules are provided, any\n\/\/ incoming fields which are not specified will be discarded. If Rules specify types,\n\/\/ incoming values will attempted to be coerced. If coercion fails, an error will be\n\/\/ returned.\nfunc applyInboundRules(payload Payload, rules Rules) (Payload, error) {\n\tif payload == nil {\n\t\treturn Payload{}, nil\n\t}\n\n\t\/\/ Apply only inbound Rules.\n\trules = filterRules(rules, true)\n\n\tif len(rules) == 0 {\n\t\treturn payload, nil\n\t}\n\n\tnewPayload := Payload{}\n\nfieldLoop:\n\tfor field, value := range payload {\n\t\tfor _, rule := range rules {\n\t\t\tif rule.FieldAlias == field {\n\t\t\t\tif rule.Type != Unspecified {\n\t\t\t\t\t\/\/ Coerce to specified type.\n\t\t\t\t\tcoerced, err := coerceType(value, rule.Type)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\tvalue = coerced\n\t\t\t\t}\n\t\t\t\tif rule.InputHandler != nil {\n\t\t\t\t\tvalue = rule.InputHandler(value)\n\t\t\t\t}\n\t\t\t\tnewPayload[field] = value\n\t\t\t\tcontinue fieldLoop\n\t\t\t}\n\t\t}\n\n\t\tlog.Printf(\"Discarding field '%s'\", field)\n\t}\n\n\t\/\/ Ensure no required fields are missing.\n\tif err := enforceRequiredFields(rules, newPayload); err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\treturn newPayload, nil\n}\n\n\/\/ applyOutboundRules applies Rules which are not specified as input only to the\n\/\/ provided Resource. If the Resource is nil, not a struct or\n\/\/ map[string]interface{}, or no Rules are provided, this acts as an identity\n\/\/ function. If Rules are provided, only the fields specified by them will be\n\/\/ included in the returned Resource. This is to prevent new fields from leaking\n\/\/ into old API versions.\nfunc applyOutboundRules(resource Resource, rules Rules) Resource {\n\t\/\/ Apply only outbound Rules.\n\trules = filterRules(rules, false)\n\n\tif resource == nil || len(rules) == 0 {\n\t\t\/\/ Return resource as-is if no Rules are provided.\n\t\treturn resource\n\t}\n\n\t\/\/ Get the underlying value by dereferencing the pointer if there is one.\n\tresourceValue := reflect.Indirect(reflect.ValueOf(resource))\n\tresource = resourceValue.Interface()\n\tresourceType := reflect.TypeOf(resource)\n\tvar payload Resource\n\n\tif resourceType.Kind() == reflect.Map {\n\t\tif resourceMap, ok := resource.(map[string]interface{}); ok {\n\t\t\tpayload = applyOutboundRulesForMap(resourceMap, rules)\n\t\t} else {\n\t\t\t\/\/ Nothing we can do if the keys aren't strings.\n\t\t\tpayload = resource\n\t\t}\n\t} else if resourceType == rules[0].resourceType {\n\t\tpayload = applyOutboundRulesForStruct(resourceValue, rules)\n\t} else {\n\t\t\/\/ Only apply Rules to resource structs and maps.\n\t\tpayload = resource\n\t}\n\n\treturn payload\n}\n\nfunc applyOutboundRulesForMap(resource map[string]interface{}, rules Rules) Payload {\n\tpayload := Payload{}\n\tfor _, rule := range rules {\n\t\tfieldValue, ok := resource[rule.Field]\n\t\tif !ok {\n\t\t\tlog.Printf(\"Map resource missing field '%s'\", rule.Field)\n\t\t\tcontinue\n\t\t}\n\t\tif rule.OutputHandler != nil {\n\t\t\tfieldValue = rule.OutputHandler(fieldValue)\n\t\t}\n\t\tpayload[rule.Name()] = fieldValue\n\t}\n\n\treturn payload\n}\n\nfunc applyOutboundRulesForStruct(resourceValue reflect.Value, rules Rules) Payload {\n\tpayload := Payload{}\n\tfor _, rule := range rules {\n\t\t\/\/ Rule validation occurs at server start. No need to check for field existence.\n\t\tfield := resourceValue.FieldByName(rule.Field)\n\t\tfieldValue := field.Interface()\n\t\tif rule.OutputHandler != nil {\n\t\t\tfieldValue = rule.OutputHandler(fieldValue)\n\t\t}\n\t\tpayload[rule.Name()] = fieldValue\n\t}\n\n\treturn payload\n}\n\n\/\/ filterRules filters the slice of Rules based on the specified bool. True means to\n\/\/ filter out outbound Rules such that the returned slice contains only inbound Rules.\n\/\/ False means to filter out inbound Rules such that the returned slice contains only\n\/\/ outbound Rules.\nfunc filterRules(rules Rules, inbound bool) Rules {\n\tfiltered := make(Rules, 0, len(rules))\n\tfor _, rule := range rules {\n\t\tif inbound && rule.OutputOnly {\n\t\t\t\/\/ Filter out outbound Rules.\n\t\t\tcontinue\n\t\t} else if !inbound && rule.InputOnly {\n\t\t\t\/\/ Filter out inbound Rules.\n\t\t\tcontinue\n\t\t}\n\t\tfiltered = append(filtered, rule)\n\t}\n\n\treturn filtered\n}\n\n\/\/ enforceRequiredFields verifies that the provided Payload has values for any Rules\n\/\/ with the Required flag set to true. If any required fields are missing, an error\n\/\/ will be returned. Otherwise nil is returned.\nfunc enforceRequiredFields(rules Rules, payload Payload) error {\nruleLoop:\n\tfor _, rule := range rules {\n\t\tif !rule.Required {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor field := range payload {\n\t\t\tif rule.Name() == field {\n\t\t\t\tcontinue ruleLoop\n\t\t\t}\n\t\t}\n\n\t\treturn fmt.Errorf(\"Missing required field '%s'\", rule.Name())\n\t}\n\n\treturn nil\n}\n<commit_msg>Remove unnecessary Sprintf<commit_after>package rest\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"reflect\"\n)\n\n\/\/ TODO:\n\/\/ - Consider using\/writing a more robust data validation library.\n\/\/ E.g. https:\/\/github.com\/Assembli\/beautiful-validity\n\/\/ This would allow for semantic validation and custom validation logic.\n\/\/ For now, we are only providing type validation.\n\/\/\n\/\/ - Make type coercion pluggable (i.e. conversion\/validation of custom types).\n\n\/\/ Rules is a slice of Rule pointers.\ntype Rules []*Rule\n\n\/\/ Rule provides schema validation and type coercion for request input and fine-grained\n\/\/ control over response output. If a ResourceHandler provides input Rules which\n\/\/ specify types, input fields will attempt to be coerced to those types. If coercion\n\/\/ fails, an error will be returned in the response. If a ResourceHandler provides\n\/\/ output Rules, only the fields corresponding to those Rules will be sent back. This\n\/\/ prevents new fields from leaking into old API versions.\ntype Rule struct {\n\t\/\/ Name of the resource field. This is the name as it appears in the struct\n\t\/\/ definition.\n\tField string\n\n\t\/\/ Name of the input\/output field. Use Name() to retrieve the field alias while\n\t\/\/ falling back to the field name if it's not specified.\n\tFieldAlias string\n\n\t\/\/ Type to coerce field value to. If the value cannot be coerced, an error will be\n\t\/\/ returned in the response. Defaults to Unspecified, which is the equivalent of\n\t\/\/ an interface{} value.\n\tType Type\n\n\t\/\/ Indicates if the field must have a value. Defaults to false.\n\tRequired bool\n\n\t\/\/ Versions is a list of the API versions this Rule applies to. If empty, it will\n\t\/\/ be applied to all versions.\n\tVersions []string\n\n\t\/\/ Indicates if the Rule should only be applied to requests.\n\tInputOnly bool\n\n\t\/\/ Indicates if the Rule should only be applied to responses.\n\tOutputOnly bool\n\n\t\/\/ Function which produces the field value to receive.\n\tInputHandler func(interface{}) interface{}\n\n\t\/\/ Function which produces the field value to send.\n\tOutputHandler func(interface{}) interface{}\n\n\t\/\/ resourceType is the reflect.Type of the resource this Rule applies to. Set\n\t\/\/ by the framework.\n\tresourceType reflect.Type\n}\n\n\/\/ Name returns the name of the input\/output field alias. It defaults to the field\n\/\/ name if the alias was not specified.\nfunc (r Rule) Name() string {\n\talias := r.FieldAlias\n\tif alias == \"\" {\n\t\talias = r.Field\n\t}\n\treturn alias\n}\n\n\/\/ Applies returns whether or not the Rule applies to the given version.\nfunc (r Rule) Applies(version string) bool {\n\tif r.Versions == nil {\n\t\treturn true\n\t}\n\n\tfor _, v := range r.Versions {\n\t\tif v == version {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/ validType returns whether or not the Rule is valid for the given reflect.Type.\nfunc (r Rule) validType(fieldType reflect.Type) bool {\n\tif r.Type == Unspecified {\n\t\treturn true\n\t}\n\n\tkind := typeToKind[r.Type]\n\treturn fieldType.Kind() == kind\n}\n\n\/\/ validate verifies that the Rules are valid for the given reflect.Type, meaning\n\/\/ they specify fields that exist and correct types. If a Rule is invalid, an\n\/\/ error is returned. If the Rules are valid, nil is returned.\nfunc (r Rules) validate() error {\n\tfor _, rule := range r {\n\t\tresourceType := rule.resourceType\n\t\tif resourceType.Kind() != reflect.Struct && resourceType.Kind() != reflect.Map {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"Invalid resource type: must be struct or map, got %s\",\n\t\t\t\tresourceType)\n\t\t}\n\n\t\tfield, found := resourceType.FieldByName(rule.Field)\n\t\tif !found {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"Invalid Rule for %s: field '%s' does not exist\",\n\t\t\t\tresourceType, rule.Field)\n\t\t}\n\n\t\tif !rule.validType(field.Type) {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"Invalid Rule for %s: field '%s' is type %s, not %s\",\n\t\t\t\tresourceType, rule.Field, field.Type, typeToName[rule.Type])\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ applyInboundRules applies Rules which are not specified as output only to the\n\/\/ provided Payload. If the Payload is nil, an empty Payload will be returned. If no\n\/\/ Rules are provided, this acts as an identity function. If Rules are provided, any\n\/\/ incoming fields which are not specified will be discarded. If Rules specify types,\n\/\/ incoming values will attempted to be coerced. If coercion fails, an error will be\n\/\/ returned.\nfunc applyInboundRules(payload Payload, rules Rules) (Payload, error) {\n\tif payload == nil {\n\t\treturn Payload{}, nil\n\t}\n\n\t\/\/ Apply only inbound Rules.\n\trules = filterRules(rules, true)\n\n\tif len(rules) == 0 {\n\t\treturn payload, nil\n\t}\n\n\tnewPayload := Payload{}\n\nfieldLoop:\n\tfor field, value := range payload {\n\t\tfor _, rule := range rules {\n\t\t\tif rule.FieldAlias == field {\n\t\t\t\tif rule.Type != Unspecified {\n\t\t\t\t\t\/\/ Coerce to specified type.\n\t\t\t\t\tcoerced, err := coerceType(value, rule.Type)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\tvalue = coerced\n\t\t\t\t}\n\t\t\t\tif rule.InputHandler != nil {\n\t\t\t\t\tvalue = rule.InputHandler(value)\n\t\t\t\t}\n\t\t\t\tnewPayload[field] = value\n\t\t\t\tcontinue fieldLoop\n\t\t\t}\n\t\t}\n\n\t\tlog.Printf(\"Discarding field '%s'\", field)\n\t}\n\n\t\/\/ Ensure no required fields are missing.\n\tif err := enforceRequiredFields(rules, newPayload); err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\treturn newPayload, nil\n}\n\n\/\/ applyOutboundRules applies Rules which are not specified as input only to the\n\/\/ provided Resource. If the Resource is nil, not a struct or\n\/\/ map[string]interface{}, or no Rules are provided, this acts as an identity\n\/\/ function. If Rules are provided, only the fields specified by them will be\n\/\/ included in the returned Resource. This is to prevent new fields from leaking\n\/\/ into old API versions.\nfunc applyOutboundRules(resource Resource, rules Rules) Resource {\n\t\/\/ Apply only outbound Rules.\n\trules = filterRules(rules, false)\n\n\tif resource == nil || len(rules) == 0 {\n\t\t\/\/ Return resource as-is if no Rules are provided.\n\t\treturn resource\n\t}\n\n\t\/\/ Get the underlying value by dereferencing the pointer if there is one.\n\tresourceValue := reflect.Indirect(reflect.ValueOf(resource))\n\tresource = resourceValue.Interface()\n\tresourceType := reflect.TypeOf(resource)\n\tvar payload Resource\n\n\tif resourceType.Kind() == reflect.Map {\n\t\tif resourceMap, ok := resource.(map[string]interface{}); ok {\n\t\t\tpayload = applyOutboundRulesForMap(resourceMap, rules)\n\t\t} else {\n\t\t\t\/\/ Nothing we can do if the keys aren't strings.\n\t\t\tpayload = resource\n\t\t}\n\t} else if resourceType == rules[0].resourceType {\n\t\tpayload = applyOutboundRulesForStruct(resourceValue, rules)\n\t} else {\n\t\t\/\/ Only apply Rules to resource structs and maps.\n\t\tpayload = resource\n\t}\n\n\treturn payload\n}\n\nfunc applyOutboundRulesForMap(resource map[string]interface{}, rules Rules) Payload {\n\tpayload := Payload{}\n\tfor _, rule := range rules {\n\t\tfieldValue, ok := resource[rule.Field]\n\t\tif !ok {\n\t\t\tlog.Printf(\"Map resource missing field '%s'\", rule.Field)\n\t\t\tcontinue\n\t\t}\n\t\tif rule.OutputHandler != nil {\n\t\t\tfieldValue = rule.OutputHandler(fieldValue)\n\t\t}\n\t\tpayload[rule.Name()] = fieldValue\n\t}\n\n\treturn payload\n}\n\nfunc applyOutboundRulesForStruct(resourceValue reflect.Value, rules Rules) Payload {\n\tpayload := Payload{}\n\tfor _, rule := range rules {\n\t\t\/\/ Rule validation occurs at server start. No need to check for field existence.\n\t\tfield := resourceValue.FieldByName(rule.Field)\n\t\tfieldValue := field.Interface()\n\t\tif rule.OutputHandler != nil {\n\t\t\tfieldValue = rule.OutputHandler(fieldValue)\n\t\t}\n\t\tpayload[rule.Name()] = fieldValue\n\t}\n\n\treturn payload\n}\n\n\/\/ filterRules filters the slice of Rules based on the specified bool. True means to\n\/\/ filter out outbound Rules such that the returned slice contains only inbound Rules.\n\/\/ False means to filter out inbound Rules such that the returned slice contains only\n\/\/ outbound Rules.\nfunc filterRules(rules Rules, inbound bool) Rules {\n\tfiltered := make(Rules, 0, len(rules))\n\tfor _, rule := range rules {\n\t\tif inbound && rule.OutputOnly {\n\t\t\t\/\/ Filter out outbound Rules.\n\t\t\tcontinue\n\t\t} else if !inbound && rule.InputOnly {\n\t\t\t\/\/ Filter out inbound Rules.\n\t\t\tcontinue\n\t\t}\n\t\tfiltered = append(filtered, rule)\n\t}\n\n\treturn filtered\n}\n\n\/\/ enforceRequiredFields verifies that the provided Payload has values for any Rules\n\/\/ with the Required flag set to true. If any required fields are missing, an error\n\/\/ will be returned. Otherwise nil is returned.\nfunc enforceRequiredFields(rules Rules, payload Payload) error {\nruleLoop:\n\tfor _, rule := range rules {\n\t\tif !rule.Required {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor field := range payload {\n\t\t\tif rule.Name() == field {\n\t\t\t\tcontinue ruleLoop\n\t\t\t}\n\t\t}\n\n\t\treturn fmt.Errorf(\"Missing required field '%s'\", rule.Name())\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package spider\n\nimport (\n\t\"github.com\/henrylee2cn\/pholcus\/common\/pinyin\"\n)\n\n\/\/ 蜘蛛种类列表\ntype SpiderSpecies struct {\n\tlist []*Spider\n\tsorted bool\n}\n\n\/\/ 全局蜘蛛种类实例\nvar Species = &SpiderSpecies{\n\tlist: []*Spider{},\n}\n\n\/\/ 向蜘蛛种类清单添加新种类\nfunc (self *SpiderSpecies) Add(sp *Spider) *Spider {\n\tself.list = append(self.list, sp)\n\treturn sp\n}\n\n\/\/ 获取全部蜘蛛种类\nfunc (self *SpiderSpecies) Get() []*Spider {\n\tif !self.sorted {\n\t\tl := len(self.list)\n\t\tinitials := make([]string, l)\n\t\tnewlist := map[string]*Spider{}\n\t\tfor i := 0; i < l; i++ {\n\t\t\tinitials[i] = self.list[i].GetName()\n\t\t\tnewlist[initials[i]] = self.list[i]\n\t\t}\n\t\tpinyin.SortInitials(initials)\n\t\tfor i := 0; i < l; i++ {\n\t\t\tself.list[i] = newlist[initials[i]]\n\t\t}\n\t\tself.sorted = true\n\t}\n\treturn self.list\n}\n\nfunc (self *SpiderSpecies) GetByName(n string) *Spider {\n\tfor _, sp := range self.list {\n\t\tif sp.GetName() == n {\n\t\t\treturn sp\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>同名规则自动加\"(2)\"形式的后缀<commit_after>package spider\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/henrylee2cn\/pholcus\/common\/pinyin\"\n)\n\n\/\/ 蜘蛛种类列表\ntype SpiderSpecies struct {\n\tlist []*Spider\n\thash map[string]*Spider\n\tsorted bool\n}\n\n\/\/ 全局蜘蛛种类实例\nvar Species = &SpiderSpecies{\n\tlist: []*Spider{},\n\thash: map[string]*Spider{},\n}\n\n\/\/ 向蜘蛛种类清单添加新种类\nfunc (self *SpiderSpecies) Add(sp *Spider) *Spider {\n\tfor i, name := 2, sp.Name; true; i++ {\n\t\tif _, ok := self.hash[name]; !ok {\n\t\t\tsp.Name = name\n\t\t\tself.hash[sp.Name] = sp\n\t\t\tbreak\n\t\t}\n\t\tname = fmt.Sprintf(\"%s(%d)\", sp.Name, i)\n\t}\n\tself.list = append(self.list, sp)\n\treturn sp\n}\n\n\/\/ 获取全部蜘蛛种类\nfunc (self *SpiderSpecies) Get() []*Spider {\n\tif !self.sorted {\n\t\tl := len(self.list)\n\t\tinitials := make([]string, l)\n\t\tnewlist := map[string]*Spider{}\n\t\tfor i := 0; i < l; i++ {\n\t\t\tinitials[i] = self.list[i].GetName()\n\t\t\tnewlist[initials[i]] = self.list[i]\n\t\t}\n\t\tpinyin.SortInitials(initials)\n\t\tfor i := 0; i < l; i++ {\n\t\t\tself.list[i] = newlist[initials[i]]\n\t\t}\n\t\tself.sorted = true\n\t}\n\treturn self.list\n}\n\nfunc (self *SpiderSpecies) GetByName(name string) *Spider {\n\treturn self.hash[name]\n}\n<|endoftext|>"} {"text":"<commit_before>package parser\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/wellington\/sass\/ast\"\n\t\"github.com\/wellington\/sass\/token\"\n)\n\nfunc TestParse_files(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skip robust testing to better indicate errors\")\n\t}\n\n\tinputs, err := filepath.Glob(\"..\/sass-spec\/spec\/basic\/*\/input.scss\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tmode := DeclarationErrors\n\tmode = 0 \/\/Trace | ParseComments\n\tvar name string\n\tdefer func() { fmt.Println(\"exit parsing\", name) }()\n\tfor _, name = range inputs {\n\n\t\tif !strings.Contains(name, \"23_\") {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(name, \"06_\") {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ These are fucked things in Sass like lists\n\t\tif strings.Contains(name, \"15_\") {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(name, \"16_\") {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ namespaces are wtf\n\t\tif strings.Contains(name, \"24_\") {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(\"Parsing\", name)\n\t\t_, err := ParseFile(token.NewFileSet(), name, nil, mode)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"ParseFile(%s): %v\", name, err)\n\t\t}\n\t\tfmt.Println(\"Parsed\", name)\n\t}\n}\n\nfunc testString(t *testing.T, in string, mode Mode) (*ast.File, *token.FileSet) {\n\tfset := token.NewFileSet()\n\tf, err := ParseFile(fset, \"testfile\", in, mode)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn f, fset\n\n}\n\nfunc TestSelMath(t *testing.T) {\n\t\/\/ Selectors act like boolean math\n\tin := `\ndiv ~ span { }`\n\tf, fset := testString(t, in, 0)\n\t_ = fset\n\tsel, ok := f.Decls[0].(*ast.SelDecl)\n\tif !ok {\n\t\tt.Fatal(\"SelDecl expected\")\n\t}\n\n\tbexpr, ok := sel.Sel.(*ast.BinaryExpr)\n\tif !ok {\n\t\tt.Fatal(\"BinaryExpr expected\")\n\t}\n\n\tlit, ok := bexpr.X.(*ast.BasicLit)\n\tif !ok {\n\t\tt.Fatal(\"BasicLit expected\")\n\t}\n\n\tif e := \"div\"; lit.Value != e {\n\t\tt.Errorf(\"got: %s wanted: %s\", lit.Value, e)\n\t}\n\n\tif e := token.TIL; bexpr.Op != e {\n\t\tt.Errorf(\"got: %s wanted: %s\", bexpr.Op, e)\n\t}\n\n\tlit, ok = bexpr.Y.(*ast.BasicLit)\n\tif !ok {\n\t\tt.Fatal(\"BasicLit expected\")\n\t}\n\n\tif e := \"span\"; lit.Value != e {\n\t\tt.Errorf(\"got: %s wanted: %s\", lit.Value, e)\n\t}\n}\n\nfunc TestBackRef(t *testing.T) {\n\t\/\/ Selectors act like boolean math\n\tin := `div { & { color: red; } }`\n\tf, fset := testString(t, in, 0)\n\t_, _ = f, fset\n\n\tdecl, ok := f.Decls[0].(*ast.SelDecl)\n\tif !ok {\n\t\tt.Fatal(\"SelDecl expected\")\n\t}\n\tsel := decl.SelStmt\n\tlit, ok := sel.Sel.(*ast.BasicLit)\n\tif !ok {\n\t\tt.Fatal(\"BasicLit expected\")\n\t}\n\tif e := \"div\"; lit.Value != e {\n\t\tt.Errorf(\"got: %s wanted: %s\", lit.Value, e)\n\t}\n\n\tnested, ok := sel.Body.List[0].(*ast.SelStmt)\n\tif !ok {\n\t\tt.Fatal(\"expected SelStmt\")\n\t}\n\n\tif e := \"&\"; nested.Name.String() != e {\n\t\tt.Fatalf(\"got: %s wanted: %s\", nested.Name.String(), e)\n\t}\n\n\tif e := \"div\"; e != nested.Resolved.Value {\n\t\tt.Errorf(\"got: %s wanted: %s\", nested.Resolved.Value, e)\n\t}\n}\n\nfunc TestExprMath(t *testing.T) {\n\t\/\/ Selectors act like boolean math\n\tin := `\ndiv {\n value: 1*(2+3);\n}`\n\tf, fset := testString(t, in, 0)\n\tast.Print(fset, f.Decls[0].(*ast.SelDecl).Body.List[0].(*ast.DeclStmt).Decl.(*ast.GenDecl).Specs[0].(*ast.RuleSpec).Values[0])\n}\n<commit_msg>remove bad tests<commit_after>package parser\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/wellington\/sass\/ast\"\n\t\"github.com\/wellington\/sass\/token\"\n)\n\nfunc TestParse_files(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skip robust testing to better indicate errors\")\n\t}\n\n\tinputs, err := filepath.Glob(\"..\/sass-spec\/spec\/basic\/*\/input.scss\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tmode := DeclarationErrors\n\tmode = 0 \/\/Trace | ParseComments\n\tvar name string\n\tdefer func() { fmt.Println(\"exit parsing\", name) }()\n\tfor _, name = range inputs {\n\n\t\tif !strings.Contains(name, \"23_\") {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(name, \"06_\") {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ These are fucked things in Sass like lists\n\t\tif strings.Contains(name, \"15_\") {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(name, \"16_\") {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ namespaces are wtf\n\t\tif strings.Contains(name, \"24_\") {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(\"Parsing\", name)\n\t\t_, err := ParseFile(token.NewFileSet(), name, nil, mode)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"ParseFile(%s): %v\", name, err)\n\t\t}\n\t\tfmt.Println(\"Parsed\", name)\n\t}\n}\n\nfunc testString(t *testing.T, in string, mode Mode) (*ast.File, *token.FileSet) {\n\tfset := token.NewFileSet()\n\tf, err := ParseFile(fset, \"testfile\", in, mode)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn f, fset\n\n}\n\nfunc TestSelMath(t *testing.T) {\n\t\/\/ Selectors act like boolean math\n\tin := `\ndiv ~ span { }`\n\tf, fset := testString(t, in, 0)\n\t_ = fset\n\tsel, ok := f.Decls[0].(*ast.SelDecl)\n\tif !ok {\n\t\tt.Fatal(\"SelDecl expected\")\n\t}\n\n\tbexpr, ok := sel.Sel.(*ast.BinaryExpr)\n\tif !ok {\n\t\tt.Fatal(\"BinaryExpr expected\")\n\t}\n\n\tlit, ok := bexpr.X.(*ast.BasicLit)\n\tif !ok {\n\t\tt.Fatal(\"BasicLit expected\")\n\t}\n\n\tif e := \"div\"; lit.Value != e {\n\t\tt.Errorf(\"got: %s wanted: %s\", lit.Value, e)\n\t}\n\n\tif e := token.TIL; bexpr.Op != e {\n\t\tt.Errorf(\"got: %s wanted: %s\", bexpr.Op, e)\n\t}\n\n\tlit, ok = bexpr.Y.(*ast.BasicLit)\n\tif !ok {\n\t\tt.Fatal(\"BasicLit expected\")\n\t}\n\n\tif e := \"span\"; lit.Value != e {\n\t\tt.Errorf(\"got: %s wanted: %s\", lit.Value, e)\n\t}\n}\n\nfunc TestBackRef(t *testing.T) {\n\t\/\/ Selectors act like boolean math\n\tin := `div { & { color: red; } }`\n\tf, fset := testString(t, in, 0)\n\t_, _ = f, fset\n\n\tdecl, ok := f.Decls[0].(*ast.SelDecl)\n\tif !ok {\n\t\tt.Fatal(\"SelDecl expected\")\n\t}\n\tsel := decl.SelStmt\n\tlit, ok := sel.Sel.(*ast.BasicLit)\n\tif !ok {\n\t\tt.Fatal(\"BasicLit expected\")\n\t}\n\tif e := \"div\"; lit.Value != e {\n\t\tt.Errorf(\"got: %s wanted: %s\", lit.Value, e)\n\t}\n\n\tnested, ok := sel.Body.List[0].(*ast.SelStmt)\n\tif !ok {\n\t\tt.Fatal(\"expected SelStmt\")\n\t}\n\n\tif e := \"&\"; nested.Name.String() != e {\n\t\tt.Fatalf(\"got: %s wanted: %s\", nested.Name.String(), e)\n\t}\n\n\tif e := \"div\"; e != nested.Resolved.Value {\n\t\tt.Errorf(\"got: %s wanted: %s\", nested.Resolved.Value, e)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package metrics\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar spaceReplacer = strings.NewReplacer(\" \", \"_\")\n\n\/\/ InmemSink provides a MetricSink that does in-memory aggregation\n\/\/ without sending metrics over a network. It can be embedded within\n\/\/ an application to provide profiling information.\ntype InmemSink struct {\n\t\/\/ How long is each aggregation interval\n\tinterval time.Duration\n\n\t\/\/ Retain controls how many metrics interval we keep\n\tretain time.Duration\n\n\t\/\/ maxIntervals is the maximum length of intervals.\n\t\/\/ It is retain \/ interval.\n\tmaxIntervals int\n\n\t\/\/ intervals is a slice of the retained intervals\n\tintervals []*IntervalMetrics\n\tintervalLock sync.RWMutex\n\n\trateDenom float64\n}\n\n\/\/ IntervalMetrics stores the aggregated metrics\n\/\/ for a specific interval\ntype IntervalMetrics struct {\n\tsync.RWMutex\n\n\t\/\/ The start time of the interval\n\tInterval time.Time\n\n\t\/\/ Gauges maps the key to the last set value\n\tGauges map[string]GaugeValue\n\n\t\/\/ Points maps the string to the list of emitted values\n\t\/\/ from EmitKey\n\tPoints map[string][]float32\n\n\t\/\/ Counters maps the string key to a sum of the counter\n\t\/\/ values\n\tCounters map[string]SampledValue\n\n\t\/\/ Samples maps the key to an AggregateSample,\n\t\/\/ which has the rolled up view of a sample\n\tSamples map[string]SampledValue\n}\n\n\/\/ NewIntervalMetrics creates a new IntervalMetrics for a given interval\nfunc NewIntervalMetrics(intv time.Time) *IntervalMetrics {\n\treturn &IntervalMetrics{\n\t\tInterval: intv,\n\t\tGauges: make(map[string]GaugeValue),\n\t\tPoints: make(map[string][]float32),\n\t\tCounters: make(map[string]SampledValue),\n\t\tSamples: make(map[string]SampledValue),\n\t}\n}\n\n\/\/ AggregateSample is used to hold aggregate metrics\n\/\/ about a sample\ntype AggregateSample struct {\n\tCount int \/\/ The count of emitted pairs\n\tRate float64 \/\/ The values rate per time unit (usually 1 second)\n\tSum float64 \/\/ The sum of values\n\tSumSq float64 `json:\"-\"` \/\/ The sum of squared values\n\tMin float64 \/\/ Minimum value\n\tMax float64 \/\/ Maximum value\n\tLastUpdated time.Time `json:\"-\"` \/\/ When value was last updated\n}\n\n\/\/ Computes a Stddev of the values\nfunc (a *AggregateSample) Stddev() float64 {\n\tnum := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2)\n\tdiv := float64(a.Count * (a.Count - 1))\n\tif div == 0 {\n\t\treturn 0\n\t}\n\treturn math.Sqrt(num \/ div)\n}\n\n\/\/ Computes a mean of the values\nfunc (a *AggregateSample) Mean() float64 {\n\tif a.Count == 0 {\n\t\treturn 0\n\t}\n\treturn a.Sum \/ float64(a.Count)\n}\n\n\/\/ Ingest is used to update a sample\nfunc (a *AggregateSample) Ingest(v float64, rateDenom float64) {\n\ta.Count++\n\ta.Sum += v\n\ta.SumSq += (v * v)\n\tif v < a.Min || a.Count == 1 {\n\t\ta.Min = v\n\t}\n\tif v > a.Max || a.Count == 1 {\n\t\ta.Max = v\n\t}\n\ta.Rate = float64(a.Sum) \/ rateDenom\n\ta.LastUpdated = time.Now()\n}\n\nfunc (a *AggregateSample) String() string {\n\tif a.Count == 0 {\n\t\treturn \"Count: 0\"\n\t} else if a.Stddev() == 0 {\n\t\treturn fmt.Sprintf(\"Count: %d Sum: %0.3f LastUpdated: %s\", a.Count, a.Sum, a.LastUpdated)\n\t} else {\n\t\treturn fmt.Sprintf(\"Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s\",\n\t\t\ta.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated)\n\t}\n}\n\n\/\/ NewInmemSinkFromURL creates an InmemSink from a URL. It is used\n\/\/ (and tested) from NewMetricSinkFromURL.\nfunc NewInmemSinkFromURL(u *url.URL) (MetricSink, error) {\n\tparams := u.Query()\n\n\tinterval, err := time.ParseDuration(params.Get(\"interval\"))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Bad 'interval' param: %s\", err)\n\t}\n\n\tretain, err := time.ParseDuration(params.Get(\"retain\"))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Bad 'retain' param: %s\", err)\n\t}\n\n\treturn NewInmemSink(interval, retain), nil\n}\n\n\/\/ NewInmemSink is used to construct a new in-memory sink.\n\/\/ Uses an aggregation interval and maximum retention period.\nfunc NewInmemSink(interval, retain time.Duration) *InmemSink {\n\trateTimeUnit := time.Second\n\ti := &InmemSink{\n\t\tinterval: interval,\n\t\tretain: retain,\n\t\tmaxIntervals: int(retain \/ interval),\n\t\trateDenom: float64(interval.Nanoseconds()) \/ float64(rateTimeUnit.Nanoseconds()),\n\t}\n\ti.intervals = make([]*IntervalMetrics, 0, i.maxIntervals)\n\treturn i\n}\n\nfunc (i *InmemSink) SetGauge(key []string, val float32) {\n\ti.SetGaugeWithLabels(key, val, nil)\n}\n\nfunc (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {\n\tk, name := i.flattenKeyLabels(key, labels)\n\tintv := i.getInterval()\n\n\tintv.Lock()\n\tdefer intv.Unlock()\n\tintv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels}\n}\n\nfunc (i *InmemSink) EmitKey(key []string, val float32) {\n\tk := i.flattenKey(key)\n\tintv := i.getInterval()\n\n\tintv.Lock()\n\tdefer intv.Unlock()\n\tvals := intv.Points[k]\n\tintv.Points[k] = append(vals, val)\n}\n\nfunc (i *InmemSink) IncrCounter(key []string, val float32) {\n\ti.IncrCounterWithLabels(key, val, nil)\n}\n\nfunc (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {\n\tk, name := i.flattenKeyLabels(key, labels)\n\tintv := i.getInterval()\n\n\tintv.Lock()\n\tdefer intv.Unlock()\n\n\tagg, ok := intv.Counters[k]\n\tif !ok {\n\t\tagg = SampledValue{\n\t\t\tName: name,\n\t\t\tAggregateSample: &AggregateSample{},\n\t\t\tLabels: labels,\n\t\t}\n\t\tintv.Counters[k] = agg\n\t}\n\tagg.Ingest(float64(val), i.rateDenom)\n}\n\nfunc (i *InmemSink) AddSample(key []string, val float32) {\n\ti.AddSampleWithLabels(key, val, nil)\n}\n\nfunc (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) {\n\tk, name := i.flattenKeyLabels(key, labels)\n\tintv := i.getInterval()\n\n\tintv.Lock()\n\tdefer intv.Unlock()\n\n\tagg, ok := intv.Samples[k]\n\tif !ok {\n\t\tagg = SampledValue{\n\t\t\tName: name,\n\t\t\tAggregateSample: &AggregateSample{},\n\t\t\tLabels: labels,\n\t\t}\n\t\tintv.Samples[k] = agg\n\t}\n\tagg.Ingest(float64(val), i.rateDenom)\n}\n\n\/\/ Data is used to retrieve all the aggregated metrics\n\/\/ Intervals may be in use, and a read lock should be acquired\nfunc (i *InmemSink) Data() []*IntervalMetrics {\n\t\/\/ Get the current interval, forces creation\n\ti.getInterval()\n\n\ti.intervalLock.RLock()\n\tdefer i.intervalLock.RUnlock()\n\n\tn := len(i.intervals)\n\tintervals := make([]*IntervalMetrics, n)\n\n\tcopy(intervals[:n-1], i.intervals[:n-1])\n\tcurrent := i.intervals[n-1]\n\n\t\/\/ make its own copy for current interval\n\tintervals[n-1] = &IntervalMetrics{}\n\tcopyCurrent := intervals[n-1]\n\tcurrent.RLock()\n\t*copyCurrent = *current\n\n\tcopyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges))\n\tfor k, v := range current.Gauges {\n\t\tcopyCurrent.Gauges[k] = v\n\t}\n\t\/\/ saved values will be not change, just copy its link\n\tcopyCurrent.Points = make(map[string][]float32, len(current.Points))\n\tfor k, v := range current.Points {\n\t\tcopyCurrent.Points[k] = v\n\t}\n\tcopyCurrent.Counters = make(map[string]SampledValue, len(current.Counters))\n\tfor k, v := range current.Counters {\n\t\tcopyCurrent.Counters[k] = v.deepCopy()\n\t}\n\tcopyCurrent.Samples = make(map[string]SampledValue, len(current.Samples))\n\tfor k, v := range current.Samples {\n\t\tcopyCurrent.Samples[k] = v.deepCopy()\n\t}\n\tcurrent.RUnlock()\n\n\treturn intervals\n}\n\nfunc (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics {\n\ti.intervalLock.RLock()\n\tdefer i.intervalLock.RUnlock()\n\n\tn := len(i.intervals)\n\tif n > 0 && i.intervals[n-1].Interval == intv {\n\t\treturn i.intervals[n-1]\n\t}\n\treturn nil\n}\n\nfunc (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics {\n\ti.intervalLock.Lock()\n\tdefer i.intervalLock.Unlock()\n\n\t\/\/ Check for an existing interval\n\tn := len(i.intervals)\n\tif n > 0 && i.intervals[n-1].Interval == intv {\n\t\treturn i.intervals[n-1]\n\t}\n\n\t\/\/ Add the current interval\n\tcurrent := NewIntervalMetrics(intv)\n\ti.intervals = append(i.intervals, current)\n\tn++\n\n\t\/\/ Truncate the intervals if they are too long\n\tif n >= i.maxIntervals {\n\t\tcopy(i.intervals[0:], i.intervals[n-i.maxIntervals:])\n\t\ti.intervals = i.intervals[:i.maxIntervals]\n\t}\n\treturn current\n}\n\n\/\/ getInterval returns the current interval to write to\nfunc (i *InmemSink) getInterval() *IntervalMetrics {\n\tintv := time.Now().Truncate(i.interval)\n\tif m := i.getExistingInterval(intv); m != nil {\n\t\treturn m\n\t}\n\treturn i.createInterval(intv)\n}\n\n\/\/ Flattens the key for formatting, removes spaces\nfunc (i *InmemSink) flattenKey(parts []string) string {\n\tbuf := &bytes.Buffer{}\n\n\tjoined := strings.Join(parts, \".\")\n\n\tspaceReplacer.WriteString(buf, joined)\n\n\treturn buf.String()\n}\n\n\/\/ Flattens the key for formatting along with its labels, removes spaces\nfunc (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) {\n\tkey := i.flattenKey(parts)\n\tbuf := bytes.NewBufferString(key)\n\n\tfor _, label := range labels {\n\t\tspaceReplacer.WriteString(buf, fmt.Sprintf(\";%s=%s\", label.Name, label.Value))\n\t}\n\n\treturn buf.String(), key\n}\n<commit_msg>inmem: create a new RWMutex, because they are not safe to copy<commit_after>package metrics\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar spaceReplacer = strings.NewReplacer(\" \", \"_\")\n\n\/\/ InmemSink provides a MetricSink that does in-memory aggregation\n\/\/ without sending metrics over a network. It can be embedded within\n\/\/ an application to provide profiling information.\ntype InmemSink struct {\n\t\/\/ How long is each aggregation interval\n\tinterval time.Duration\n\n\t\/\/ Retain controls how many metrics interval we keep\n\tretain time.Duration\n\n\t\/\/ maxIntervals is the maximum length of intervals.\n\t\/\/ It is retain \/ interval.\n\tmaxIntervals int\n\n\t\/\/ intervals is a slice of the retained intervals\n\tintervals []*IntervalMetrics\n\tintervalLock sync.RWMutex\n\n\trateDenom float64\n}\n\n\/\/ IntervalMetrics stores the aggregated metrics\n\/\/ for a specific interval\ntype IntervalMetrics struct {\n\tsync.RWMutex\n\n\t\/\/ The start time of the interval\n\tInterval time.Time\n\n\t\/\/ Gauges maps the key to the last set value\n\tGauges map[string]GaugeValue\n\n\t\/\/ Points maps the string to the list of emitted values\n\t\/\/ from EmitKey\n\tPoints map[string][]float32\n\n\t\/\/ Counters maps the string key to a sum of the counter\n\t\/\/ values\n\tCounters map[string]SampledValue\n\n\t\/\/ Samples maps the key to an AggregateSample,\n\t\/\/ which has the rolled up view of a sample\n\tSamples map[string]SampledValue\n}\n\n\/\/ NewIntervalMetrics creates a new IntervalMetrics for a given interval\nfunc NewIntervalMetrics(intv time.Time) *IntervalMetrics {\n\treturn &IntervalMetrics{\n\t\tInterval: intv,\n\t\tGauges: make(map[string]GaugeValue),\n\t\tPoints: make(map[string][]float32),\n\t\tCounters: make(map[string]SampledValue),\n\t\tSamples: make(map[string]SampledValue),\n\t}\n}\n\n\/\/ AggregateSample is used to hold aggregate metrics\n\/\/ about a sample\ntype AggregateSample struct {\n\tCount int \/\/ The count of emitted pairs\n\tRate float64 \/\/ The values rate per time unit (usually 1 second)\n\tSum float64 \/\/ The sum of values\n\tSumSq float64 `json:\"-\"` \/\/ The sum of squared values\n\tMin float64 \/\/ Minimum value\n\tMax float64 \/\/ Maximum value\n\tLastUpdated time.Time `json:\"-\"` \/\/ When value was last updated\n}\n\n\/\/ Computes a Stddev of the values\nfunc (a *AggregateSample) Stddev() float64 {\n\tnum := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2)\n\tdiv := float64(a.Count * (a.Count - 1))\n\tif div == 0 {\n\t\treturn 0\n\t}\n\treturn math.Sqrt(num \/ div)\n}\n\n\/\/ Computes a mean of the values\nfunc (a *AggregateSample) Mean() float64 {\n\tif a.Count == 0 {\n\t\treturn 0\n\t}\n\treturn a.Sum \/ float64(a.Count)\n}\n\n\/\/ Ingest is used to update a sample\nfunc (a *AggregateSample) Ingest(v float64, rateDenom float64) {\n\ta.Count++\n\ta.Sum += v\n\ta.SumSq += (v * v)\n\tif v < a.Min || a.Count == 1 {\n\t\ta.Min = v\n\t}\n\tif v > a.Max || a.Count == 1 {\n\t\ta.Max = v\n\t}\n\ta.Rate = float64(a.Sum) \/ rateDenom\n\ta.LastUpdated = time.Now()\n}\n\nfunc (a *AggregateSample) String() string {\n\tif a.Count == 0 {\n\t\treturn \"Count: 0\"\n\t} else if a.Stddev() == 0 {\n\t\treturn fmt.Sprintf(\"Count: %d Sum: %0.3f LastUpdated: %s\", a.Count, a.Sum, a.LastUpdated)\n\t} else {\n\t\treturn fmt.Sprintf(\"Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s\",\n\t\t\ta.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated)\n\t}\n}\n\n\/\/ NewInmemSinkFromURL creates an InmemSink from a URL. It is used\n\/\/ (and tested) from NewMetricSinkFromURL.\nfunc NewInmemSinkFromURL(u *url.URL) (MetricSink, error) {\n\tparams := u.Query()\n\n\tinterval, err := time.ParseDuration(params.Get(\"interval\"))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Bad 'interval' param: %s\", err)\n\t}\n\n\tretain, err := time.ParseDuration(params.Get(\"retain\"))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Bad 'retain' param: %s\", err)\n\t}\n\n\treturn NewInmemSink(interval, retain), nil\n}\n\n\/\/ NewInmemSink is used to construct a new in-memory sink.\n\/\/ Uses an aggregation interval and maximum retention period.\nfunc NewInmemSink(interval, retain time.Duration) *InmemSink {\n\trateTimeUnit := time.Second\n\ti := &InmemSink{\n\t\tinterval: interval,\n\t\tretain: retain,\n\t\tmaxIntervals: int(retain \/ interval),\n\t\trateDenom: float64(interval.Nanoseconds()) \/ float64(rateTimeUnit.Nanoseconds()),\n\t}\n\ti.intervals = make([]*IntervalMetrics, 0, i.maxIntervals)\n\treturn i\n}\n\nfunc (i *InmemSink) SetGauge(key []string, val float32) {\n\ti.SetGaugeWithLabels(key, val, nil)\n}\n\nfunc (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {\n\tk, name := i.flattenKeyLabels(key, labels)\n\tintv := i.getInterval()\n\n\tintv.Lock()\n\tdefer intv.Unlock()\n\tintv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels}\n}\n\nfunc (i *InmemSink) EmitKey(key []string, val float32) {\n\tk := i.flattenKey(key)\n\tintv := i.getInterval()\n\n\tintv.Lock()\n\tdefer intv.Unlock()\n\tvals := intv.Points[k]\n\tintv.Points[k] = append(vals, val)\n}\n\nfunc (i *InmemSink) IncrCounter(key []string, val float32) {\n\ti.IncrCounterWithLabels(key, val, nil)\n}\n\nfunc (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {\n\tk, name := i.flattenKeyLabels(key, labels)\n\tintv := i.getInterval()\n\n\tintv.Lock()\n\tdefer intv.Unlock()\n\n\tagg, ok := intv.Counters[k]\n\tif !ok {\n\t\tagg = SampledValue{\n\t\t\tName: name,\n\t\t\tAggregateSample: &AggregateSample{},\n\t\t\tLabels: labels,\n\t\t}\n\t\tintv.Counters[k] = agg\n\t}\n\tagg.Ingest(float64(val), i.rateDenom)\n}\n\nfunc (i *InmemSink) AddSample(key []string, val float32) {\n\ti.AddSampleWithLabels(key, val, nil)\n}\n\nfunc (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) {\n\tk, name := i.flattenKeyLabels(key, labels)\n\tintv := i.getInterval()\n\n\tintv.Lock()\n\tdefer intv.Unlock()\n\n\tagg, ok := intv.Samples[k]\n\tif !ok {\n\t\tagg = SampledValue{\n\t\t\tName: name,\n\t\t\tAggregateSample: &AggregateSample{},\n\t\t\tLabels: labels,\n\t\t}\n\t\tintv.Samples[k] = agg\n\t}\n\tagg.Ingest(float64(val), i.rateDenom)\n}\n\n\/\/ Data is used to retrieve all the aggregated metrics\n\/\/ Intervals may be in use, and a read lock should be acquired\nfunc (i *InmemSink) Data() []*IntervalMetrics {\n\t\/\/ Get the current interval, forces creation\n\ti.getInterval()\n\n\ti.intervalLock.RLock()\n\tdefer i.intervalLock.RUnlock()\n\n\tn := len(i.intervals)\n\tintervals := make([]*IntervalMetrics, n)\n\n\tcopy(intervals[:n-1], i.intervals[:n-1])\n\tcurrent := i.intervals[n-1]\n\n\t\/\/ make its own copy for current interval\n\tintervals[n-1] = &IntervalMetrics{}\n\tcopyCurrent := intervals[n-1]\n\tcurrent.RLock()\n\t*copyCurrent = *current\n\t\/\/ RWMutex is not safe to copy, so create a new instance on the copy\n\tcopyCurrent.RWMutex = sync.RWMutex{}\n\n\tcopyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges))\n\tfor k, v := range current.Gauges {\n\t\tcopyCurrent.Gauges[k] = v\n\t}\n\t\/\/ saved values will be not change, just copy its link\n\tcopyCurrent.Points = make(map[string][]float32, len(current.Points))\n\tfor k, v := range current.Points {\n\t\tcopyCurrent.Points[k] = v\n\t}\n\tcopyCurrent.Counters = make(map[string]SampledValue, len(current.Counters))\n\tfor k, v := range current.Counters {\n\t\tcopyCurrent.Counters[k] = v.deepCopy()\n\t}\n\tcopyCurrent.Samples = make(map[string]SampledValue, len(current.Samples))\n\tfor k, v := range current.Samples {\n\t\tcopyCurrent.Samples[k] = v.deepCopy()\n\t}\n\tcurrent.RUnlock()\n\n\treturn intervals\n}\n\nfunc (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics {\n\ti.intervalLock.RLock()\n\tdefer i.intervalLock.RUnlock()\n\n\tn := len(i.intervals)\n\tif n > 0 && i.intervals[n-1].Interval == intv {\n\t\treturn i.intervals[n-1]\n\t}\n\treturn nil\n}\n\nfunc (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics {\n\ti.intervalLock.Lock()\n\tdefer i.intervalLock.Unlock()\n\n\t\/\/ Check for an existing interval\n\tn := len(i.intervals)\n\tif n > 0 && i.intervals[n-1].Interval == intv {\n\t\treturn i.intervals[n-1]\n\t}\n\n\t\/\/ Add the current interval\n\tcurrent := NewIntervalMetrics(intv)\n\ti.intervals = append(i.intervals, current)\n\tn++\n\n\t\/\/ Truncate the intervals if they are too long\n\tif n >= i.maxIntervals {\n\t\tcopy(i.intervals[0:], i.intervals[n-i.maxIntervals:])\n\t\ti.intervals = i.intervals[:i.maxIntervals]\n\t}\n\treturn current\n}\n\n\/\/ getInterval returns the current interval to write to\nfunc (i *InmemSink) getInterval() *IntervalMetrics {\n\tintv := time.Now().Truncate(i.interval)\n\tif m := i.getExistingInterval(intv); m != nil {\n\t\treturn m\n\t}\n\treturn i.createInterval(intv)\n}\n\n\/\/ Flattens the key for formatting, removes spaces\nfunc (i *InmemSink) flattenKey(parts []string) string {\n\tbuf := &bytes.Buffer{}\n\n\tjoined := strings.Join(parts, \".\")\n\n\tspaceReplacer.WriteString(buf, joined)\n\n\treturn buf.String()\n}\n\n\/\/ Flattens the key for formatting along with its labels, removes spaces\nfunc (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) {\n\tkey := i.flattenKey(parts)\n\tbuf := bytes.NewBufferString(key)\n\n\tfor _, label := range labels {\n\t\tspaceReplacer.WriteString(buf, fmt.Sprintf(\";%s=%s\", label.Name, label.Value))\n\t}\n\n\treturn buf.String(), key\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n)\n\ntype (\n\tasyncFlag bool\n\tconfigPath string\n\tkeyPath string\n\tserverGroup string\n\tcommandName string\n)\n\ntype input struct {\n\tasyncFlag asyncFlag\n\tconfigPath configPath\n\tkeyPath keyPath\n\tserverGroup serverGroup\n\tcommandName commandName\n}\n\nfunc (input *input) parse() {\n\n\tasyncFlagPtr := flag.Bool(\"async\", false,\"async - when true, parallel executing over servers\")\n\tconfigPathPtr := flag.String(\"c\", \"\", \"config file - yaml config file\")\n\tkeyPathPtr := flag.String(\"i\", \"\", \"identity file - path to private key\")\n\tserverGroupPtr := flag.String(\"s\", \"\", \"server group - name of the server group\")\n\tcommandPtr := flag.String(\"cmd\", \"\", \"command name - name of the command to run\")\n\n\tflag.Parse()\n\n\tinput.asyncFlag = asyncFlag(*asyncFlagPtr)\n\tinput.commandName = commandName(*commandPtr)\n\tinput.serverGroup = serverGroup(*serverGroupPtr)\n\tinput.keyPath = keyPath(*keyPathPtr)\n\tinput.configPath = configPath(*configPathPtr)\n\n}\n\nfunc (input *input) validate() {\n\tinput.configPath.validate()\n\tinput.keyPath.validate()\n\tinput.serverGroup.validate()\n\tinput.commandName.validate()\n}\n\nfunc (val *configPath) validate() {\n\tif *val == \"\" {\n\t\tfatal(\"configPath is empty please set grapes -c config.yml\")\n\t}\n}\n\nfunc (val *keyPath) validate() {\n\tif *val == \"\" {\n\t\tfatal(\"idendity file path is empty please set grapes -i ~\/.ssh\/id_rsaa\")\n\t}\n}\n\nfunc (val *serverGroup) validate() {\n\tif *val == \"\" {\n\t\tfatal(\"server group is empty please set grapes -s server_group\")\n\t}\n}\n\nfunc (val *commandName) validate() {\n\tif *val == \"\" {\n\t\tfatal(\"command name is empty please set grapes -cmd whats_up\")\n\t}\n}\n<commit_msg>gofmt 100%<commit_after>package main\n\nimport (\n\t\"flag\"\n)\n\ntype (\n\tasyncFlag bool\n\tconfigPath string\n\tkeyPath string\n\tserverGroup string\n\tcommandName string\n)\n\ntype input struct {\n\tasyncFlag asyncFlag\n\tconfigPath configPath\n\tkeyPath keyPath\n\tserverGroup serverGroup\n\tcommandName commandName\n}\n\nfunc (input *input) parse() {\n\n\tasyncFlagPtr := flag.Bool(\"async\", false, \"async - when true, parallel executing over servers\")\n\tconfigPathPtr := flag.String(\"c\", \"\", \"config file - yaml config file\")\n\tkeyPathPtr := flag.String(\"i\", \"\", \"identity file - path to private key\")\n\tserverGroupPtr := flag.String(\"s\", \"\", \"server group - name of the server group\")\n\tcommandPtr := flag.String(\"cmd\", \"\", \"command name - name of the command to run\")\n\n\tflag.Parse()\n\n\tinput.asyncFlag = asyncFlag(*asyncFlagPtr)\n\tinput.commandName = commandName(*commandPtr)\n\tinput.serverGroup = serverGroup(*serverGroupPtr)\n\tinput.keyPath = keyPath(*keyPathPtr)\n\tinput.configPath = configPath(*configPathPtr)\n\n}\n\nfunc (input *input) validate() {\n\tinput.configPath.validate()\n\tinput.keyPath.validate()\n\tinput.serverGroup.validate()\n\tinput.commandName.validate()\n}\n\nfunc (val *configPath) validate() {\n\tif *val == \"\" {\n\t\tfatal(\"configPath is empty please set grapes -c config.yml\")\n\t}\n}\n\nfunc (val *keyPath) validate() {\n\tif *val == \"\" {\n\t\tfatal(\"idendity file path is empty please set grapes -i ~\/.ssh\/id_rsaa\")\n\t}\n}\n\nfunc (val *serverGroup) validate() {\n\tif *val == \"\" {\n\t\tfatal(\"server group is empty please set grapes -s server_group\")\n\t}\n}\n\nfunc (val *commandName) validate() {\n\tif *val == \"\" {\n\t\tfatal(\"command name is empty please set grapes -cmd whats_up\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 The Moov Authors\n\/\/ Use of this source code is governed by an Apache License\n\/\/ license that can be found in the LICENSE file.\n\npackage describe\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestFormatAmount(t *testing.T) {\n\trequire.Equal(t, \"12345\", formatAmount(false, 12345))\n\trequire.Equal(t, \"123.45\", formatAmount(true, 12345))\n\trequire.Equal(t, \"1,234,567.89\", formatAmount(true, 123456789))\n}\n\nfunc TestMaskNumber(t *testing.T) {\n\trequire.Equal(t, \"****\", maskNumber(\"\"))\n\trequire.Equal(t, \"****\", maskNumber(\"1\"))\n\trequire.Equal(t, \"****\", maskNumber(\"12\"))\n\trequire.Equal(t, \"****\", maskNumber(\"123\"))\n\trequire.Equal(t, \"****\", maskNumber(\"1234\"))\n\trequire.Equal(t, \"*2345\", maskNumber(\"12345\"))\n\trequire.Equal(t, \"**3456\", maskNumber(\"123456\"))\n\trequire.Equal(t, \"***4567\", maskNumber(\"1234567\"))\n\trequire.Equal(t, \"****5678\", maskNumber(\"12345678\"))\n\trequire.Equal(t, \"*****6789\", maskNumber(\"123456789\"))\n\trequire.Equal(t, \"******7890\", maskNumber(\"1234567890\"))\n}\n\nfunc TestMaskName(t *testing.T) {\n\trequire.Equal(t, \"\", maskName(\"\"))\n\trequire.Equal(t, \"* * *\", maskName(\"a a a\"))\n\trequire.Equal(t, \"* * *\", maskName(\" a a a \"))\n\trequire.Equal(t, \"Jo** ***\", maskName(\"John Doe\"))\n\trequire.Equal(t, \"Jo** Sm*** **\", maskName(\"John Smith Jr\"))\n\trequire.Equal(t, \"Al******* Lo********** ** ***\", maskName(\"Alexander Longnameiton Jr III\"))\n}\n<commit_msg>test: verify describe file<commit_after>\/\/ Copyright 2020 The Moov Authors\n\/\/ Use of this source code is governed by an Apache License\n\/\/ license that can be found in the LICENSE file.\n\npackage describe\n\nimport (\n\t\"bytes\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/moov-io\/ach\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestDescribeFile(t *testing.T) {\n\tfile, err := ach.ReadFile(filepath.Join(\"..\", \"..\", \"..\", \"test\", \"testdata\", \"ppd-debit.ach\"))\n\trequire.NoError(t, err)\n\n\tvar buf bytes.Buffer\n\tFile(&buf, file, nil) \/\/ No Options\n\trequire.Equal(t, 1092, buf.Len())\n}\n\nfunc TestDescribeIAT(t *testing.T) {\n\tfile, err := ach.ReadFile(filepath.Join(\"..\", \"..\", \"..\", \"test\", \"testdata\", \"iat-debit.ach\"))\n\trequire.NoError(t, err)\n\n\tvar buf bytes.Buffer\n\tFile(&buf, file, nil) \/\/ No Options\n\trequire.Equal(t, 3967, buf.Len())\n}\n\nfunc TestDescribeReturn(t *testing.T) {\n\tfile, err := ach.ReadFile(filepath.Join(\"..\", \"..\", \"..\", \"test\", \"testdata\", \"return-WEB.ach\"))\n\trequire.NoError(t, err)\n\n\tvar buf bytes.Buffer\n\tFile(&buf, file, nil) \/\/ No Options\n\trequire.Equal(t, 2410, buf.Len())\n}\n\nfunc TestDescribeCorrection(t *testing.T) {\n\tfile, err := ach.ReadFile(filepath.Join(\"..\", \"..\", \"..\", \"test\", \"testdata\", \"cor-example.ach\"))\n\trequire.NoError(t, err)\n\n\tvar buf bytes.Buffer\n\tFile(&buf, file, nil) \/\/ No Options\n\trequire.Equal(t, 1283, buf.Len())\n}\n\nfunc TestFormatAmount(t *testing.T) {\n\trequire.Equal(t, \"12345\", formatAmount(false, 12345))\n\trequire.Equal(t, \"123.45\", formatAmount(true, 12345))\n\trequire.Equal(t, \"1,234,567.89\", formatAmount(true, 123456789))\n}\n\nfunc TestMaskNumber(t *testing.T) {\n\trequire.Equal(t, \"****\", maskNumber(\"\"))\n\trequire.Equal(t, \"****\", maskNumber(\"1\"))\n\trequire.Equal(t, \"****\", maskNumber(\"12\"))\n\trequire.Equal(t, \"****\", maskNumber(\"123\"))\n\trequire.Equal(t, \"****\", maskNumber(\"1234\"))\n\trequire.Equal(t, \"*2345\", maskNumber(\"12345\"))\n\trequire.Equal(t, \"**3456\", maskNumber(\"123456\"))\n\trequire.Equal(t, \"***4567\", maskNumber(\"1234567\"))\n\trequire.Equal(t, \"****5678\", maskNumber(\"12345678\"))\n\trequire.Equal(t, \"*****6789\", maskNumber(\"123456789\"))\n\trequire.Equal(t, \"******7890\", maskNumber(\"1234567890\"))\n}\n\nfunc TestMaskName(t *testing.T) {\n\trequire.Equal(t, \"\", maskName(\"\"))\n\trequire.Equal(t, \"* * *\", maskName(\"a a a\"))\n\trequire.Equal(t, \"* * *\", maskName(\" a a a \"))\n\trequire.Equal(t, \"Jo** ***\", maskName(\"John Doe\"))\n\trequire.Equal(t, \"Jo** Sm*** **\", maskName(\"John Smith Jr\"))\n\trequire.Equal(t, \"Al******* Lo********** ** ***\", maskName(\"Alexander Longnameiton Jr III\"))\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\npackage apisrv provides an implementation of the gRPC server defined in ..\/..\/..\/api\/protobuf-spec\/frontend.proto.\n\nCopyright 2018 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n*\/\n\npackage apisrv\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/open-match\/internal\/metrics\"\n\tfrontend \"github.com\/GoogleCloudPlatform\/open-match\/internal\/pb\"\n\tredisHelpers \"github.com\/GoogleCloudPlatform\/open-match\/internal\/statestorage\/redis\"\n\t\"github.com\/GoogleCloudPlatform\/open-match\/internal\/statestorage\/redis\/playerindices\"\n\t\"github.com\/GoogleCloudPlatform\/open-match\/internal\/statestorage\/redis\/redispb\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"go.opencensus.io\/stats\"\n\t\"go.opencensus.io\/tag\"\n\n\t\"github.com\/gomodule\/redigo\/redis\"\n\t\"github.com\/spf13\/viper\"\n\n\t\"go.opencensus.io\/plugin\/ocgrpc\"\n\t\"google.golang.org\/grpc\"\n)\n\n\/\/ Logrus structured logging setup\nvar (\n\tfeLogFields = log.Fields{\n\t\t\"app\": \"openmatch\",\n\t\t\"component\": \"frontend\",\n\t}\n\tfeLog = log.WithFields(feLogFields)\n)\n\n\/\/ FrontendAPI implements frontend.ApiServer, the server generated by compiling\n\/\/ the protobuf, by fulfilling the frontend.APIClient interface.\ntype FrontendAPI struct {\n\tgrpc *grpc.Server\n\tcfg *viper.Viper\n\tpool *redis.Pool\n}\ntype frontendAPI FrontendAPI\n\n\/\/ New returns an instantiated srvice\nfunc New(cfg *viper.Viper, pool *redis.Pool) *FrontendAPI {\n\ts := FrontendAPI{\n\t\tpool: pool,\n\t\tgrpc: grpc.NewServer(grpc.StatsHandler(&ocgrpc.ServerHandler{})),\n\t\tcfg: cfg,\n\t}\n\n\t\/\/ Add a hook to the logger to auto-count log lines for metrics output thru OpenCensus\n\tlog.AddHook(metrics.NewHook(FeLogLines, KeySeverity))\n\n\t\/\/ Register gRPC server\n\tfrontend.RegisterFrontendServer(s.grpc, (*frontendAPI)(&s))\n\tfeLog.Info(\"Successfully registered gRPC server\")\n\treturn &s\n}\n\n\/\/ Open starts the api grpc service listening on the configured port.\nfunc (s *FrontendAPI) Open() error {\n\tln, err := net.Listen(\"tcp\", \":\"+s.cfg.GetString(\"api.frontend.port\"))\n\tif err != nil {\n\t\tfeLog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t\t\"port\": s.cfg.GetInt(\"api.frontend.port\"),\n\t\t}).Error(\"net.Listen() error\")\n\t\treturn err\n\t}\n\tfeLog.WithFields(log.Fields{\"port\": s.cfg.GetInt(\"api.frontend.port\")}).Info(\"TCP net listener initialized\")\n\n\tgo func() {\n\t\terr := s.grpc.Serve(ln)\n\t\tif err != nil {\n\t\t\tfeLog.WithFields(log.Fields{\"error\": err.Error()}).Error(\"gRPC serve() error\")\n\t\t}\n\t\tfeLog.Info(\"serving gRPC endpoints\")\n\t}()\n\n\treturn nil\n}\n\n\/\/ CreatePlayer is this service's implementation of the CreatePlayer gRPC method defined in frontend.proto\nfunc (s *frontendAPI) CreatePlayer(ctx context.Context, group *frontend.Player) (*frontend.Result, error) {\n\n\t\/\/ Create context for tagging OpenCensus metrics.\n\tfuncName := \"CreatePlayer\"\n\tfnCtx, _ := tag.New(ctx, tag.Insert(KeyMethod, funcName))\n\n\t\/\/ Write group\n\terr := redispb.MarshalToRedis(ctx, s.pool, group, s.cfg.GetInt(\"redis.expirations.player\"))\n\tif err != nil {\n\t\tfeLog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t\t\"component\": \"statestorage\",\n\t\t}).Error(\"State storage error\")\n\n\t\tstats.Record(fnCtx, FeGrpcErrors.M(1))\n\t\treturn &frontend.Result{Success: false, Error: err.Error()}, err\n\t}\n\n\t\/\/ Index group\n\terr = playerindices.Create(ctx, s.pool, s.cfg, *group)\n\tif err != nil {\n\t\tfeLog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t\t\"component\": \"statestorage\",\n\t\t}).Error(\"State storage error\")\n\n\t\tstats.Record(fnCtx, FeGrpcErrors.M(1))\n\t\treturn &frontend.Result{Success: false, Error: err.Error()}, err\n\t}\n\n\t\/\/ Return success.\n\tstats.Record(fnCtx, FeGrpcRequests.M(1))\n\treturn &frontend.Result{Success: true, Error: \"\"}, err\n\n}\n\n\/\/ DeletePlayer is this service's implementation of the DeletePlayer gRPC method defined in frontend.proto\nfunc (s *frontendAPI) DeletePlayer(ctx context.Context, group *frontend.Player) (*frontend.Result, error) {\n\n\t\/\/ Create context for tagging OpenCensus metrics.\n\tfuncName := \"DeletePlayer\"\n\tfnCtx, _ := tag.New(ctx, tag.Insert(KeyMethod, funcName))\n\n\t\/\/ Deindex this player; at that point they don't show up in MMFs anymore. We can then delete\n\t\/\/ their actual player object from Redis later.\n\terr := playerindices.Delete(ctx, s.pool, s.cfg, group.Id)\n\tif err != nil {\n\t\tfeLog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t\t\"component\": \"statestorage\",\n\t\t}).Error(\"State storage error\")\n\n\t\tstats.Record(fnCtx, FeGrpcErrors.M(1))\n\t\treturn &frontend.Result{Success: false, Error: err.Error()}, err\n\t}\n\t\/\/ Kick off delete but don't wait for it to complete.\n\tgo s.deletePlayer(group.Id)\n\n\tstats.Record(fnCtx, FeGrpcRequests.M(1))\n\treturn &frontend.Result{Success: true, Error: \"\"}, err\n\n}\n\n\/\/ deletePlayer is a 'lazy' player delete\n\/\/ It should always be called as a goroutine and should only be called after\n\/\/ confirmation that a player has been deindexed (and therefore MMF's can't\n\/\/ find the player to read them anyway)\n\/\/ As a final action, it also kicks off a lazy delete of the player's metadata\nfunc (s *frontendAPI) deletePlayer(id string) {\n\n\terr := redisHelpers.Delete(context.Background(), s.pool, id)\n\tif err != nil {\n\t\tfeLog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t\t\"component\": \"statestorage\",\n\t\t}).Warn(\"Error deleting player from state storage, this could leak state storage memory but is usually not a fatal error\")\n\t}\n\tgo playerindices.DeleteMeta(context.Background(), s.pool, id)\n}\n\n\/\/ GetUpdates is this service's implementation of the GetUpdates gRPC method defined in frontend.proto\nfunc (s *frontendAPI) GetUpdates(p *frontend.Player, assignmentStream frontend.Frontend_GetUpdatesServer) error {\n\t\/\/ Get cancellable context\n\tctx, cancel := context.WithCancel(assignmentStream.Context())\n\tdefer cancel()\n\n\t\/\/ Create context for tagging OpenCensus metrics.\n\tfuncName := \"GetAssignment\"\n\tfnCtx, _ := tag.New(ctx, tag.Insert(KeyMethod, funcName))\n\n\t\/\/ get and return connection string\n\twatchChan := redispb.PlayerWatcher(ctx, s.pool, *p) \/\/ watcher() runs the appropriate Redis commands.\n\ttimeoutChan := time.After(time.Duration(s.cfg.GetInt(\"api.frontend.timeout\")) * time.Second)\n\n\tfor {\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\t\/\/ Context cancelled\n\t\t\tfeLog.WithFields(log.Fields{\n\t\t\t\t\"playerid\": p.Id,\n\t\t\t}).Info(\"client closed connection successfully\")\n\t\t\tstats.Record(fnCtx, FeGrpcRequests.M(1))\n\t\t\treturn nil\n\t\tcase <-timeoutChan: \/\/ Timeout reached without client closing connection\n\t\t\t\/\/ TODO:deal with the fallout\n\t\t\terr := errors.New(\"server timeout reached without client closing connection\")\n\t\t\tfeLog.WithFields(log.Fields{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t\t\"component\": \"statestorage\",\n\t\t\t\t\"playerid\": p.Id,\n\t\t\t}).Error(\"State storage error\")\n\n\t\t\t\/\/ Count errors for metrics\n\t\t\terrTag, _ := tag.NewKey(\"errtype\")\n\t\t\tfnCtx, _ := tag.New(ctx, tag.Insert(errTag, \"watch_timeout\"))\n\t\t\tstats.Record(fnCtx, FeGrpcErrors.M(1))\n\t\t\t\/\/TODO: we could generate a frontend.player message with an error\n\t\t\t\/\/field and stream it to the client before throwing the error here\n\t\t\t\/\/if we wanted to send more useful client retry information\n\t\t\treturn err\n\n\t\tcase a := <-watchChan:\n\t\t\tfeLog.WithFields(log.Fields{\n\t\t\t\t\"assignment\": a.Assignment,\n\t\t\t\t\"playerid\": a.Id,\n\t\t\t\t\"status\": a.Status,\n\t\t\t\t\"error\": a.Error,\n\t\t\t}).Info(\"updating client\")\n\t\t\tassignmentStream.Send(&a)\n\t\t\tstats.Record(fnCtx, FeGrpcStreamedResponses.M(1))\n\t\t\t\/\/ Reset timeout.\n\t\t\ttimeoutChan = time.After(time.Duration(s.cfg.GetInt(\"api.frontend.timeout\")) * time.Second)\n\t\t}\n\t}\n\n}\n<commit_msg>remove player from ignorelists on frontend.DeletePlayer call<commit_after>\/*\npackage apisrv provides an implementation of the gRPC server defined in ..\/..\/..\/api\/protobuf-spec\/frontend.proto.\n\nCopyright 2018 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n*\/\n\npackage apisrv\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/open-match\/internal\/metrics\"\n\tfrontend \"github.com\/GoogleCloudPlatform\/open-match\/internal\/pb\"\n\tredisHelpers \"github.com\/GoogleCloudPlatform\/open-match\/internal\/statestorage\/redis\"\n\t\"github.com\/GoogleCloudPlatform\/open-match\/internal\/statestorage\/redis\/ignorelist\"\n\t\"github.com\/GoogleCloudPlatform\/open-match\/internal\/statestorage\/redis\/playerindices\"\n\t\"github.com\/GoogleCloudPlatform\/open-match\/internal\/statestorage\/redis\/redispb\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"go.opencensus.io\/stats\"\n\t\"go.opencensus.io\/tag\"\n\n\t\"github.com\/gomodule\/redigo\/redis\"\n\t\"github.com\/spf13\/viper\"\n\n\t\"go.opencensus.io\/plugin\/ocgrpc\"\n\t\"google.golang.org\/grpc\"\n)\n\n\/\/ Logrus structured logging setup\nvar (\n\tfeLogFields = log.Fields{\n\t\t\"app\": \"openmatch\",\n\t\t\"component\": \"frontend\",\n\t}\n\tfeLog = log.WithFields(feLogFields)\n)\n\n\/\/ FrontendAPI implements frontend.ApiServer, the server generated by compiling\n\/\/ the protobuf, by fulfilling the frontend.APIClient interface.\ntype FrontendAPI struct {\n\tgrpc *grpc.Server\n\tcfg *viper.Viper\n\tpool *redis.Pool\n}\ntype frontendAPI FrontendAPI\n\n\/\/ New returns an instantiated srvice\nfunc New(cfg *viper.Viper, pool *redis.Pool) *FrontendAPI {\n\ts := FrontendAPI{\n\t\tpool: pool,\n\t\tgrpc: grpc.NewServer(grpc.StatsHandler(&ocgrpc.ServerHandler{})),\n\t\tcfg: cfg,\n\t}\n\n\t\/\/ Add a hook to the logger to auto-count log lines for metrics output thru OpenCensus\n\tlog.AddHook(metrics.NewHook(FeLogLines, KeySeverity))\n\n\t\/\/ Register gRPC server\n\tfrontend.RegisterFrontendServer(s.grpc, (*frontendAPI)(&s))\n\tfeLog.Info(\"Successfully registered gRPC server\")\n\treturn &s\n}\n\n\/\/ Open starts the api grpc service listening on the configured port.\nfunc (s *FrontendAPI) Open() error {\n\tln, err := net.Listen(\"tcp\", \":\"+s.cfg.GetString(\"api.frontend.port\"))\n\tif err != nil {\n\t\tfeLog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t\t\"port\": s.cfg.GetInt(\"api.frontend.port\"),\n\t\t}).Error(\"net.Listen() error\")\n\t\treturn err\n\t}\n\tfeLog.WithFields(log.Fields{\"port\": s.cfg.GetInt(\"api.frontend.port\")}).Info(\"TCP net listener initialized\")\n\n\tgo func() {\n\t\terr := s.grpc.Serve(ln)\n\t\tif err != nil {\n\t\t\tfeLog.WithFields(log.Fields{\"error\": err.Error()}).Error(\"gRPC serve() error\")\n\t\t}\n\t\tfeLog.Info(\"serving gRPC endpoints\")\n\t}()\n\n\treturn nil\n}\n\n\/\/ CreatePlayer is this service's implementation of the CreatePlayer gRPC method defined in frontend.proto\nfunc (s *frontendAPI) CreatePlayer(ctx context.Context, group *frontend.Player) (*frontend.Result, error) {\n\n\t\/\/ Create context for tagging OpenCensus metrics.\n\tfuncName := \"CreatePlayer\"\n\tfnCtx, _ := tag.New(ctx, tag.Insert(KeyMethod, funcName))\n\n\t\/\/ Write group\n\terr := redispb.MarshalToRedis(ctx, s.pool, group, s.cfg.GetInt(\"redis.expirations.player\"))\n\tif err != nil {\n\t\tfeLog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t\t\"component\": \"statestorage\",\n\t\t}).Error(\"State storage error\")\n\n\t\tstats.Record(fnCtx, FeGrpcErrors.M(1))\n\t\treturn &frontend.Result{Success: false, Error: err.Error()}, err\n\t}\n\n\t\/\/ Index group\n\terr = playerindices.Create(ctx, s.pool, s.cfg, *group)\n\tif err != nil {\n\t\tfeLog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t\t\"component\": \"statestorage\",\n\t\t}).Error(\"State storage error\")\n\n\t\tstats.Record(fnCtx, FeGrpcErrors.M(1))\n\t\treturn &frontend.Result{Success: false, Error: err.Error()}, err\n\t}\n\n\t\/\/ Return success.\n\tstats.Record(fnCtx, FeGrpcRequests.M(1))\n\treturn &frontend.Result{Success: true, Error: \"\"}, err\n\n}\n\n\/\/ DeletePlayer is this service's implementation of the DeletePlayer gRPC method defined in frontend.proto\nfunc (s *frontendAPI) DeletePlayer(ctx context.Context, group *frontend.Player) (*frontend.Result, error) {\n\n\t\/\/ Create context for tagging OpenCensus metrics.\n\tfuncName := \"DeletePlayer\"\n\tfnCtx, _ := tag.New(ctx, tag.Insert(KeyMethod, funcName))\n\n\t\/\/ Deindex this player; at that point they don't show up in MMFs anymore. We can then delete\n\t\/\/ their actual player object from Redis later.\n\terr := playerindices.Delete(ctx, s.pool, s.cfg, group.Id)\n\tif err != nil {\n\t\tfeLog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t\t\"component\": \"statestorage\",\n\t\t}).Error(\"State storage error\")\n\n\t\tstats.Record(fnCtx, FeGrpcErrors.M(1))\n\t\treturn &frontend.Result{Success: false, Error: err.Error()}, err\n\t}\n\t\/\/ Kick off delete but don't wait for it to complete.\n\tgo s.deletePlayer(group.Id)\n\n\tstats.Record(fnCtx, FeGrpcRequests.M(1))\n\treturn &frontend.Result{Success: true, Error: \"\"}, err\n\n}\n\n\/\/ deletePlayer is a 'lazy' player delete\n\/\/ It should always be called as a goroutine and should only be called after\n\/\/ confirmation that a player has been deindexed (and therefore MMF's can't\n\/\/ find the player to read them anyway)\n\/\/ As a final action, it also kicks off a lazy delete of the player's metadata\nfunc (s *frontendAPI) deletePlayer(id string) {\n\n\terr := redisHelpers.Delete(context.Background(), s.pool, id)\n\tif err != nil {\n\t\tfeLog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t\t\"component\": \"statestorage\",\n\t\t}).Warn(\"Error deleting player from state storage, this could leak state storage memory but is usually not a fatal error\")\n\t}\n\n\t\/\/ Delete player from all ignorelists\n\tgo func() {\n\t\tredisConn := s.pool.Get()\n\t\tdefer redisConn.Close()\n\n\t\tredisConn.Send(\"MULTI\")\n\t\tfor il := range s.cfg.GetStringMap(\"ignoreLists\") {\n\t\t\tignorelist.SendRemove(redisConn, il, []string{id})\n\t\t}\n\t\t_, err := redisConn.Do(\"EXEC\")\n\t\tif err != nil {\n\t\t\tfeLog.WithFields(log.Fields{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t\t\"component\": \"statestorage\",\n\t\t\t}).Error(\"Error de-indexing player from ignorelists\")\n\t\t}\n\t}()\n\n\tgo playerindices.DeleteMeta(context.Background(), s.pool, id)\n}\n\n\/\/ GetUpdates is this service's implementation of the GetUpdates gRPC method defined in frontend.proto\nfunc (s *frontendAPI) GetUpdates(p *frontend.Player, assignmentStream frontend.Frontend_GetUpdatesServer) error {\n\t\/\/ Get cancellable context\n\tctx, cancel := context.WithCancel(assignmentStream.Context())\n\tdefer cancel()\n\n\t\/\/ Create context for tagging OpenCensus metrics.\n\tfuncName := \"GetAssignment\"\n\tfnCtx, _ := tag.New(ctx, tag.Insert(KeyMethod, funcName))\n\n\t\/\/ get and return connection string\n\twatchChan := redispb.PlayerWatcher(ctx, s.pool, *p) \/\/ watcher() runs the appropriate Redis commands.\n\ttimeoutChan := time.After(time.Duration(s.cfg.GetInt(\"api.frontend.timeout\")) * time.Second)\n\n\tfor {\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\t\/\/ Context cancelled\n\t\t\tfeLog.WithFields(log.Fields{\n\t\t\t\t\"playerid\": p.Id,\n\t\t\t}).Info(\"client closed connection successfully\")\n\t\t\tstats.Record(fnCtx, FeGrpcRequests.M(1))\n\t\t\treturn nil\n\t\tcase <-timeoutChan: \/\/ Timeout reached without client closing connection\n\t\t\t\/\/ TODO:deal with the fallout\n\t\t\terr := errors.New(\"server timeout reached without client closing connection\")\n\t\t\tfeLog.WithFields(log.Fields{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t\t\"component\": \"statestorage\",\n\t\t\t\t\"playerid\": p.Id,\n\t\t\t}).Error(\"State storage error\")\n\n\t\t\t\/\/ Count errors for metrics\n\t\t\terrTag, _ := tag.NewKey(\"errtype\")\n\t\t\tfnCtx, _ := tag.New(ctx, tag.Insert(errTag, \"watch_timeout\"))\n\t\t\tstats.Record(fnCtx, FeGrpcErrors.M(1))\n\t\t\t\/\/TODO: we could generate a frontend.player message with an error\n\t\t\t\/\/field and stream it to the client before throwing the error here\n\t\t\t\/\/if we wanted to send more useful client retry information\n\t\t\treturn err\n\n\t\tcase a := <-watchChan:\n\t\t\tfeLog.WithFields(log.Fields{\n\t\t\t\t\"assignment\": a.Assignment,\n\t\t\t\t\"playerid\": a.Id,\n\t\t\t\t\"status\": a.Status,\n\t\t\t\t\"error\": a.Error,\n\t\t\t}).Info(\"updating client\")\n\t\t\tassignmentStream.Send(&a)\n\t\t\tstats.Record(fnCtx, FeGrpcStreamedResponses.M(1))\n\t\t\t\/\/ Reset timeout.\n\t\t\ttimeoutChan = time.After(time.Duration(s.cfg.GetInt(\"api.frontend.timeout\")) * time.Second)\n\t\t}\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package app does all of the work necessary to create a Kubernetes\n\/\/ APIServer by binding together the API, master and APIServer infrastructure.\n\/\/ It can be configured and called directly or via the hyperkube framework.\npackage app\n\nimport (\n\t\"crypto\/tls\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/pborman\/uuid\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/pflag\"\n\n\t\"k8s.io\/kubernetes\/cmd\/kube-apiserver\/app\/options\"\n\t\"k8s.io\/kubernetes\/pkg\/admission\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/autoscaling\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/batch\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/extensions\"\n\t\"k8s.io\/kubernetes\/pkg\/apiserver\"\n\t\"k8s.io\/kubernetes\/pkg\/apiserver\/authenticator\"\n\t\"k8s.io\/kubernetes\/pkg\/capabilities\"\n\t\"k8s.io\/kubernetes\/pkg\/client\/clientset_generated\/internalclientset\"\n\t\"k8s.io\/kubernetes\/pkg\/cloudprovider\"\n\t\"k8s.io\/kubernetes\/pkg\/controller\/informers\"\n\tserviceaccountcontroller \"k8s.io\/kubernetes\/pkg\/controller\/serviceaccount\"\n\tgeneratedopenapi \"k8s.io\/kubernetes\/pkg\/generated\/openapi\"\n\t\"k8s.io\/kubernetes\/pkg\/genericapiserver\"\n\t\"k8s.io\/kubernetes\/pkg\/genericapiserver\/authorizer\"\n\tgenericoptions \"k8s.io\/kubernetes\/pkg\/genericapiserver\/options\"\n\t\"k8s.io\/kubernetes\/pkg\/master\"\n\t\"k8s.io\/kubernetes\/pkg\/registry\/cachesize\"\n\t\"k8s.io\/kubernetes\/pkg\/runtime\/schema\"\n\tutilerrors \"k8s.io\/kubernetes\/pkg\/util\/errors\"\n\tutilnet \"k8s.io\/kubernetes\/pkg\/util\/net\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/wait\"\n\t\"k8s.io\/kubernetes\/pkg\/version\"\n)\n\n\/\/ NewAPIServerCommand creates a *cobra.Command object with default parameters\nfunc NewAPIServerCommand() *cobra.Command {\n\ts := options.NewServerRunOptions()\n\ts.AddFlags(pflag.CommandLine)\n\tcmd := &cobra.Command{\n\t\tUse: \"kube-apiserver\",\n\t\tLong: `The Kubernetes API server validates and configures data\nfor the api objects which include pods, services, replicationcontrollers, and\nothers. The API Server services REST operations and provides the frontend to the\ncluster's shared state through which all other components interact.`,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t},\n\t}\n\n\treturn cmd\n}\n\n\/\/ Run runs the specified APIServer. This should never exit.\nfunc Run(s *options.ServerRunOptions) error {\n\tif errs := s.Etcd.Validate(); len(errs) > 0 {\n\t\treturn utilerrors.NewAggregate(errs)\n\t}\n\tif err := s.GenericServerRunOptions.DefaultExternalAddress(s.SecureServing, s.InsecureServing); err != nil {\n\t\treturn err\n\t}\n\n\tgenericapiserver.DefaultAndValidateRunOptions(s.GenericServerRunOptions)\n\tgenericConfig := genericapiserver.NewConfig(). \/\/ create the new config\n\t\t\t\t\t\t\tApplyOptions(s.GenericServerRunOptions). \/\/ apply the options selected\n\t\t\t\t\t\t\tApplySecureServingOptions(s.SecureServing).\n\t\t\t\t\t\t\tApplyInsecureServingOptions(s.InsecureServing).\n\t\t\t\t\t\t\tApplyAuthenticationOptions(s.Authentication).\n\t\t\t\t\t\t\tApplyRBACSuperUser(s.Authorization.RBACSuperUser)\n\n\tserviceIPRange, apiServerServiceIP, err := genericapiserver.DefaultServiceIPRange(s.GenericServerRunOptions.ServiceClusterIPRange)\n\tif err != nil {\n\t\tglog.Fatalf(\"Error determining service IP ranges: %v\", err)\n\t}\n\tif err := genericConfig.MaybeGenerateServingCerts(apiServerServiceIP); err != nil {\n\t\tglog.Fatalf(\"Failed to generate service certificate: %v\", err)\n\t}\n\n\tcapabilities.Initialize(capabilities.Capabilities{\n\t\tAllowPrivileged: s.AllowPrivileged,\n\t\t\/\/ TODO(vmarmol): Implement support for HostNetworkSources.\n\t\tPrivilegedSources: capabilities.PrivilegedSources{\n\t\t\tHostNetworkSources: []string{},\n\t\t\tHostPIDSources: []string{},\n\t\t\tHostIPCSources: []string{},\n\t\t},\n\t\tPerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec,\n\t})\n\n\t\/\/ Setup tunneler if needed\n\tvar tunneler genericapiserver.Tunneler\n\tvar proxyDialerFn apiserver.ProxyDialerFunc\n\tif len(s.SSHUser) > 0 {\n\t\t\/\/ Get ssh key distribution func, if supported\n\t\tvar installSSH genericapiserver.InstallSSHKey\n\t\tcloud, err := cloudprovider.InitCloudProvider(s.GenericServerRunOptions.CloudProvider, s.GenericServerRunOptions.CloudConfigFile)\n\t\tif err != nil {\n\t\t\tglog.Fatalf(\"Cloud provider could not be initialized: %v\", err)\n\t\t}\n\t\tif cloud != nil {\n\t\t\tif instances, supported := cloud.Instances(); supported {\n\t\t\t\tinstallSSH = instances.AddSSHKeyToAllInstances\n\t\t\t}\n\t\t}\n\t\tif s.KubeletConfig.Port == 0 {\n\t\t\tglog.Fatalf(\"Must enable kubelet port if proxy ssh-tunneling is specified.\")\n\t\t}\n\t\t\/\/ Set up the tunneler\n\t\t\/\/ TODO(cjcullen): If we want this to handle per-kubelet ports or other\n\t\t\/\/ kubelet listen-addresses, we need to plumb through options.\n\t\thealthCheckPath := &url.URL{\n\t\t\tScheme: \"https\",\n\t\t\tHost: net.JoinHostPort(\"127.0.0.1\", strconv.FormatUint(uint64(s.KubeletConfig.Port), 10)),\n\t\t\tPath: \"healthz\",\n\t\t}\n\t\ttunneler = genericapiserver.NewSSHTunneler(s.SSHUser, s.SSHKeyfile, healthCheckPath, installSSH)\n\n\t\t\/\/ Use the tunneler's dialer to connect to the kubelet\n\t\ts.KubeletConfig.Dial = tunneler.Dial\n\t\t\/\/ Use the tunneler's dialer when proxying to pods, services, and nodes\n\t\tproxyDialerFn = tunneler.Dial\n\t}\n\n\t\/\/ Proxying to pods and services is IP-based... don't expect to be able to verify the hostname\n\tproxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true}\n\n\tif s.Etcd.StorageConfig.DeserializationCacheSize == 0 {\n\t\t\/\/ When size of cache is not explicitly set, estimate its size based on\n\t\t\/\/ target memory usage.\n\t\tglog.V(2).Infof(\"Initalizing deserialization cache size based on %dMB limit\", s.GenericServerRunOptions.TargetRAMMB)\n\n\t\t\/\/ This is the heuristics that from memory capacity is trying to infer\n\t\t\/\/ the maximum number of nodes in the cluster and set cache sizes based\n\t\t\/\/ on that value.\n\t\t\/\/ From our documentation, we officially recomment 120GB machines for\n\t\t\/\/ 2000 nodes, and we scale from that point. Thus we assume ~60MB of\n\t\t\/\/ capacity per node.\n\t\t\/\/ TODO: We may consider deciding that some percentage of memory will\n\t\t\/\/ be used for the deserialization cache and divide it by the max object\n\t\t\/\/ size to compute its size. We may even go further and measure\n\t\t\/\/ collective sizes of the objects in the cache.\n\t\tclusterSize := s.GenericServerRunOptions.TargetRAMMB \/ 60\n\t\ts.Etcd.StorageConfig.DeserializationCacheSize = 25 * clusterSize\n\t\tif s.Etcd.StorageConfig.DeserializationCacheSize < 1000 {\n\t\t\ts.Etcd.StorageConfig.DeserializationCacheSize = 1000\n\t\t}\n\t}\n\n\tstorageGroupsToEncodingVersion, err := s.GenericServerRunOptions.StorageGroupsToEncodingVersion()\n\tif err != nil {\n\t\tglog.Fatalf(\"error generating storage version map: %s\", err)\n\t}\n\tstorageFactory, err := genericapiserver.BuildDefaultStorageFactory(\n\t\ts.Etcd.StorageConfig, s.GenericServerRunOptions.DefaultStorageMediaType, api.Codecs,\n\t\tgenericapiserver.NewDefaultResourceEncodingConfig(), storageGroupsToEncodingVersion,\n\t\t\/\/ FIXME: this GroupVersionResource override should be configurable\n\t\t[]schema.GroupVersionResource{batch.Resource(\"cronjobs\").WithVersion(\"v2alpha1\")},\n\t\tmaster.DefaultAPIResourceConfigSource(), s.GenericServerRunOptions.RuntimeConfig)\n\tif err != nil {\n\t\tglog.Fatalf(\"error in initializing storage factory: %s\", err)\n\t}\n\tstorageFactory.AddCohabitatingResources(batch.Resource(\"jobs\"), extensions.Resource(\"jobs\"))\n\tstorageFactory.AddCohabitatingResources(autoscaling.Resource(\"horizontalpodautoscalers\"), extensions.Resource(\"horizontalpodautoscalers\"))\n\tfor _, override := range s.Etcd.EtcdServersOverrides {\n\t\ttokens := strings.Split(override, \"#\")\n\t\tif len(tokens) != 2 {\n\t\t\tglog.Errorf(\"invalid value of etcd server overrides: %s\", override)\n\t\t\tcontinue\n\t\t}\n\n\t\tapiresource := strings.Split(tokens[0], \"\/\")\n\t\tif len(apiresource) != 2 {\n\t\t\tglog.Errorf(\"invalid resource definition: %s\", tokens[0])\n\t\t\tcontinue\n\t\t}\n\t\tgroup := apiresource[0]\n\t\tresource := apiresource[1]\n\t\tgroupResource := schema.GroupResource{Group: group, Resource: resource}\n\n\t\tservers := strings.Split(tokens[1], \";\")\n\t\tstorageFactory.SetEtcdLocation(groupResource, servers)\n\t}\n\n\t\/\/ Default to the private server key for service account token signing\n\tif len(s.Authentication.ServiceAccounts.KeyFiles) == 0 && s.SecureServing.ServerCert.CertKey.KeyFile != \"\" {\n\t\tif authenticator.IsValidServiceAccountKeyFile(s.SecureServing.ServerCert.CertKey.KeyFile) {\n\t\t\ts.Authentication.ServiceAccounts.KeyFiles = []string{s.SecureServing.ServerCert.CertKey.KeyFile}\n\t\t} else {\n\t\t\tglog.Warning(\"No TLS key provided, service account token authentication disabled\")\n\t\t}\n\t}\n\n\tauthenticatorConfig := s.Authentication.ToAuthenticationConfig(s.SecureServing.ClientCA)\n\tif s.Authentication.ServiceAccounts.Lookup {\n\t\t\/\/ If we need to look up service accounts and tokens,\n\t\t\/\/ go directly to etcd to avoid recursive auth insanity\n\t\tstorageConfig, err := storageFactory.NewConfig(api.Resource(\"serviceaccounts\"))\n\t\tif err != nil {\n\t\t\tglog.Fatalf(\"Unable to get serviceaccounts storage: %v\", err)\n\t\t}\n\t\tauthenticatorConfig.ServiceAccountTokenGetter = serviceaccountcontroller.NewGetterFromStorageInterface(storageConfig, storageFactory.ResourcePrefix(api.Resource(\"serviceaccounts\")), storageFactory.ResourcePrefix(api.Resource(\"secrets\")))\n\t}\n\n\tapiAuthenticator, securityDefinitions, err := authenticator.New(authenticatorConfig)\n\tif err != nil {\n\t\tglog.Fatalf(\"Invalid Authentication Config: %v\", err)\n\t}\n\n\tprivilegedLoopbackToken := uuid.NewRandom().String()\n\tselfClientConfig, err := genericoptions.NewSelfClientConfig(s.SecureServing, s.InsecureServing, privilegedLoopbackToken)\n\tif err != nil {\n\t\tglog.Fatalf(\"Failed to create clientset: %v\", err)\n\t}\n\tclient, err := internalclientset.NewForConfig(selfClientConfig)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to create clientset: %v\", err)\n\t}\n\tsharedInformers := informers.NewSharedInformerFactory(nil, client, 10*time.Minute)\n\n\tauthorizationConfig := s.Authorization.ToAuthorizationConfig(sharedInformers)\n\tapiAuthorizer, err := authorizer.NewAuthorizerFromAuthorizationConfig(authorizationConfig)\n\tif err != nil {\n\t\tglog.Fatalf(\"Invalid Authorization Config: %v\", err)\n\t}\n\n\tadmissionControlPluginNames := strings.Split(s.GenericServerRunOptions.AdmissionControl, \",\")\n\tpluginInitializer := admission.NewPluginInitializer(sharedInformers, apiAuthorizer)\n\tadmissionController, err := admission.NewFromPlugins(client, admissionControlPluginNames, s.GenericServerRunOptions.AdmissionControlConfigFile, pluginInitializer)\n\tif err != nil {\n\t\tglog.Fatalf(\"Failed to initialize plugins: %v\", err)\n\t}\n\n\tproxyTransport := utilnet.SetTransportDefaults(&http.Transport{\n\t\tDial: proxyDialerFn,\n\t\tTLSClientConfig: proxyTLSClientConfig,\n\t})\n\tkubeVersion := version.Get()\n\n\tgenericConfig.Version = &kubeVersion\n\tgenericConfig.LoopbackClientConfig = selfClientConfig\n\tgenericConfig.Authenticator = apiAuthenticator\n\tgenericConfig.Authorizer = apiAuthorizer\n\tgenericConfig.AdmissionControl = admissionController\n\tgenericConfig.APIResourceConfigSource = storageFactory.APIResourceConfigSource\n\tgenericConfig.OpenAPIConfig.Info.Title = \"Kubernetes\"\n\tgenericConfig.OpenAPIConfig.Definitions = generatedopenapi.OpenAPIDefinitions\n\tgenericConfig.EnableOpenAPISupport = true\n\tgenericConfig.EnableMetrics = true\n\tgenericConfig.OpenAPIConfig.SecurityDefinitions = securityDefinitions\n\n\tconfig := &master.Config{\n\t\tGenericConfig: genericConfig,\n\n\t\tStorageFactory: storageFactory,\n\t\tEnableWatchCache: s.GenericServerRunOptions.EnableWatchCache,\n\t\tEnableCoreControllers: true,\n\t\tDeleteCollectionWorkers: s.GenericServerRunOptions.DeleteCollectionWorkers,\n\t\tEventTTL: s.EventTTL,\n\t\tKubeletClientConfig: s.KubeletConfig,\n\t\tEnableUISupport: true,\n\t\tEnableLogsSupport: true,\n\t\tProxyTransport: proxyTransport,\n\n\t\tTunneler: tunneler,\n\n\t\tServiceIPRange: serviceIPRange,\n\t\tAPIServerServiceIP: apiServerServiceIP,\n\t\tAPIServerServicePort: 443,\n\n\t\tServiceNodePortRange: s.GenericServerRunOptions.ServiceNodePortRange,\n\t\tKubernetesServiceNodePort: s.GenericServerRunOptions.KubernetesServiceNodePort,\n\n\t\tMasterCount: s.GenericServerRunOptions.MasterCount,\n\t}\n\n\tif s.GenericServerRunOptions.EnableWatchCache {\n\t\tglog.V(2).Infof(\"Initalizing cache sizes based on %dMB limit\", s.GenericServerRunOptions.TargetRAMMB)\n\t\tcachesize.InitializeWatchCacheSizes(s.GenericServerRunOptions.TargetRAMMB)\n\t\tcachesize.SetWatchCacheSizes(s.GenericServerRunOptions.WatchCacheSizes)\n\t}\n\n\tm, err := config.Complete().New()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsharedInformers.Start(wait.NeverStop)\n\tm.GenericAPIServer.PrepareRun().Run(wait.NeverStop)\n\treturn nil\n}\n<commit_msg>fix glog message typo<commit_after>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package app does all of the work necessary to create a Kubernetes\n\/\/ APIServer by binding together the API, master and APIServer infrastructure.\n\/\/ It can be configured and called directly or via the hyperkube framework.\npackage app\n\nimport (\n\t\"crypto\/tls\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/pborman\/uuid\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/pflag\"\n\n\t\"k8s.io\/kubernetes\/cmd\/kube-apiserver\/app\/options\"\n\t\"k8s.io\/kubernetes\/pkg\/admission\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/autoscaling\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/batch\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/extensions\"\n\t\"k8s.io\/kubernetes\/pkg\/apiserver\"\n\t\"k8s.io\/kubernetes\/pkg\/apiserver\/authenticator\"\n\t\"k8s.io\/kubernetes\/pkg\/capabilities\"\n\t\"k8s.io\/kubernetes\/pkg\/client\/clientset_generated\/internalclientset\"\n\t\"k8s.io\/kubernetes\/pkg\/cloudprovider\"\n\t\"k8s.io\/kubernetes\/pkg\/controller\/informers\"\n\tserviceaccountcontroller \"k8s.io\/kubernetes\/pkg\/controller\/serviceaccount\"\n\tgeneratedopenapi \"k8s.io\/kubernetes\/pkg\/generated\/openapi\"\n\t\"k8s.io\/kubernetes\/pkg\/genericapiserver\"\n\t\"k8s.io\/kubernetes\/pkg\/genericapiserver\/authorizer\"\n\tgenericoptions \"k8s.io\/kubernetes\/pkg\/genericapiserver\/options\"\n\t\"k8s.io\/kubernetes\/pkg\/master\"\n\t\"k8s.io\/kubernetes\/pkg\/registry\/cachesize\"\n\t\"k8s.io\/kubernetes\/pkg\/runtime\/schema\"\n\tutilerrors \"k8s.io\/kubernetes\/pkg\/util\/errors\"\n\tutilnet \"k8s.io\/kubernetes\/pkg\/util\/net\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/wait\"\n\t\"k8s.io\/kubernetes\/pkg\/version\"\n)\n\n\/\/ NewAPIServerCommand creates a *cobra.Command object with default parameters\nfunc NewAPIServerCommand() *cobra.Command {\n\ts := options.NewServerRunOptions()\n\ts.AddFlags(pflag.CommandLine)\n\tcmd := &cobra.Command{\n\t\tUse: \"kube-apiserver\",\n\t\tLong: `The Kubernetes API server validates and configures data\nfor the api objects which include pods, services, replicationcontrollers, and\nothers. The API Server services REST operations and provides the frontend to the\ncluster's shared state through which all other components interact.`,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t},\n\t}\n\n\treturn cmd\n}\n\n\/\/ Run runs the specified APIServer. This should never exit.\nfunc Run(s *options.ServerRunOptions) error {\n\tif errs := s.Etcd.Validate(); len(errs) > 0 {\n\t\treturn utilerrors.NewAggregate(errs)\n\t}\n\tif err := s.GenericServerRunOptions.DefaultExternalAddress(s.SecureServing, s.InsecureServing); err != nil {\n\t\treturn err\n\t}\n\n\tgenericapiserver.DefaultAndValidateRunOptions(s.GenericServerRunOptions)\n\tgenericConfig := genericapiserver.NewConfig(). \/\/ create the new config\n\t\t\t\t\t\t\tApplyOptions(s.GenericServerRunOptions). \/\/ apply the options selected\n\t\t\t\t\t\t\tApplySecureServingOptions(s.SecureServing).\n\t\t\t\t\t\t\tApplyInsecureServingOptions(s.InsecureServing).\n\t\t\t\t\t\t\tApplyAuthenticationOptions(s.Authentication).\n\t\t\t\t\t\t\tApplyRBACSuperUser(s.Authorization.RBACSuperUser)\n\n\tserviceIPRange, apiServerServiceIP, err := genericapiserver.DefaultServiceIPRange(s.GenericServerRunOptions.ServiceClusterIPRange)\n\tif err != nil {\n\t\tglog.Fatalf(\"Error determining service IP ranges: %v\", err)\n\t}\n\tif err := genericConfig.MaybeGenerateServingCerts(apiServerServiceIP); err != nil {\n\t\tglog.Fatalf(\"Failed to generate service certificate: %v\", err)\n\t}\n\n\tcapabilities.Initialize(capabilities.Capabilities{\n\t\tAllowPrivileged: s.AllowPrivileged,\n\t\t\/\/ TODO(vmarmol): Implement support for HostNetworkSources.\n\t\tPrivilegedSources: capabilities.PrivilegedSources{\n\t\t\tHostNetworkSources: []string{},\n\t\t\tHostPIDSources: []string{},\n\t\t\tHostIPCSources: []string{},\n\t\t},\n\t\tPerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec,\n\t})\n\n\t\/\/ Setup tunneler if needed\n\tvar tunneler genericapiserver.Tunneler\n\tvar proxyDialerFn apiserver.ProxyDialerFunc\n\tif len(s.SSHUser) > 0 {\n\t\t\/\/ Get ssh key distribution func, if supported\n\t\tvar installSSH genericapiserver.InstallSSHKey\n\t\tcloud, err := cloudprovider.InitCloudProvider(s.GenericServerRunOptions.CloudProvider, s.GenericServerRunOptions.CloudConfigFile)\n\t\tif err != nil {\n\t\t\tglog.Fatalf(\"Cloud provider could not be initialized: %v\", err)\n\t\t}\n\t\tif cloud != nil {\n\t\t\tif instances, supported := cloud.Instances(); supported {\n\t\t\t\tinstallSSH = instances.AddSSHKeyToAllInstances\n\t\t\t}\n\t\t}\n\t\tif s.KubeletConfig.Port == 0 {\n\t\t\tglog.Fatalf(\"Must enable kubelet port if proxy ssh-tunneling is specified.\")\n\t\t}\n\t\t\/\/ Set up the tunneler\n\t\t\/\/ TODO(cjcullen): If we want this to handle per-kubelet ports or other\n\t\t\/\/ kubelet listen-addresses, we need to plumb through options.\n\t\thealthCheckPath := &url.URL{\n\t\t\tScheme: \"https\",\n\t\t\tHost: net.JoinHostPort(\"127.0.0.1\", strconv.FormatUint(uint64(s.KubeletConfig.Port), 10)),\n\t\t\tPath: \"healthz\",\n\t\t}\n\t\ttunneler = genericapiserver.NewSSHTunneler(s.SSHUser, s.SSHKeyfile, healthCheckPath, installSSH)\n\n\t\t\/\/ Use the tunneler's dialer to connect to the kubelet\n\t\ts.KubeletConfig.Dial = tunneler.Dial\n\t\t\/\/ Use the tunneler's dialer when proxying to pods, services, and nodes\n\t\tproxyDialerFn = tunneler.Dial\n\t}\n\n\t\/\/ Proxying to pods and services is IP-based... don't expect to be able to verify the hostname\n\tproxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true}\n\n\tif s.Etcd.StorageConfig.DeserializationCacheSize == 0 {\n\t\t\/\/ When size of cache is not explicitly set, estimate its size based on\n\t\t\/\/ target memory usage.\n\t\tglog.V(2).Infof(\"Initializing deserialization cache size based on %dMB limit\", s.GenericServerRunOptions.TargetRAMMB)\n\n\t\t\/\/ This is the heuristics that from memory capacity is trying to infer\n\t\t\/\/ the maximum number of nodes in the cluster and set cache sizes based\n\t\t\/\/ on that value.\n\t\t\/\/ From our documentation, we officially recomment 120GB machines for\n\t\t\/\/ 2000 nodes, and we scale from that point. Thus we assume ~60MB of\n\t\t\/\/ capacity per node.\n\t\t\/\/ TODO: We may consider deciding that some percentage of memory will\n\t\t\/\/ be used for the deserialization cache and divide it by the max object\n\t\t\/\/ size to compute its size. We may even go further and measure\n\t\t\/\/ collective sizes of the objects in the cache.\n\t\tclusterSize := s.GenericServerRunOptions.TargetRAMMB \/ 60\n\t\ts.Etcd.StorageConfig.DeserializationCacheSize = 25 * clusterSize\n\t\tif s.Etcd.StorageConfig.DeserializationCacheSize < 1000 {\n\t\t\ts.Etcd.StorageConfig.DeserializationCacheSize = 1000\n\t\t}\n\t}\n\n\tstorageGroupsToEncodingVersion, err := s.GenericServerRunOptions.StorageGroupsToEncodingVersion()\n\tif err != nil {\n\t\tglog.Fatalf(\"error generating storage version map: %s\", err)\n\t}\n\tstorageFactory, err := genericapiserver.BuildDefaultStorageFactory(\n\t\ts.Etcd.StorageConfig, s.GenericServerRunOptions.DefaultStorageMediaType, api.Codecs,\n\t\tgenericapiserver.NewDefaultResourceEncodingConfig(), storageGroupsToEncodingVersion,\n\t\t\/\/ FIXME: this GroupVersionResource override should be configurable\n\t\t[]schema.GroupVersionResource{batch.Resource(\"cronjobs\").WithVersion(\"v2alpha1\")},\n\t\tmaster.DefaultAPIResourceConfigSource(), s.GenericServerRunOptions.RuntimeConfig)\n\tif err != nil {\n\t\tglog.Fatalf(\"error in initializing storage factory: %s\", err)\n\t}\n\tstorageFactory.AddCohabitatingResources(batch.Resource(\"jobs\"), extensions.Resource(\"jobs\"))\n\tstorageFactory.AddCohabitatingResources(autoscaling.Resource(\"horizontalpodautoscalers\"), extensions.Resource(\"horizontalpodautoscalers\"))\n\tfor _, override := range s.Etcd.EtcdServersOverrides {\n\t\ttokens := strings.Split(override, \"#\")\n\t\tif len(tokens) != 2 {\n\t\t\tglog.Errorf(\"invalid value of etcd server overrides: %s\", override)\n\t\t\tcontinue\n\t\t}\n\n\t\tapiresource := strings.Split(tokens[0], \"\/\")\n\t\tif len(apiresource) != 2 {\n\t\t\tglog.Errorf(\"invalid resource definition: %s\", tokens[0])\n\t\t\tcontinue\n\t\t}\n\t\tgroup := apiresource[0]\n\t\tresource := apiresource[1]\n\t\tgroupResource := schema.GroupResource{Group: group, Resource: resource}\n\n\t\tservers := strings.Split(tokens[1], \";\")\n\t\tstorageFactory.SetEtcdLocation(groupResource, servers)\n\t}\n\n\t\/\/ Default to the private server key for service account token signing\n\tif len(s.Authentication.ServiceAccounts.KeyFiles) == 0 && s.SecureServing.ServerCert.CertKey.KeyFile != \"\" {\n\t\tif authenticator.IsValidServiceAccountKeyFile(s.SecureServing.ServerCert.CertKey.KeyFile) {\n\t\t\ts.Authentication.ServiceAccounts.KeyFiles = []string{s.SecureServing.ServerCert.CertKey.KeyFile}\n\t\t} else {\n\t\t\tglog.Warning(\"No TLS key provided, service account token authentication disabled\")\n\t\t}\n\t}\n\n\tauthenticatorConfig := s.Authentication.ToAuthenticationConfig(s.SecureServing.ClientCA)\n\tif s.Authentication.ServiceAccounts.Lookup {\n\t\t\/\/ If we need to look up service accounts and tokens,\n\t\t\/\/ go directly to etcd to avoid recursive auth insanity\n\t\tstorageConfig, err := storageFactory.NewConfig(api.Resource(\"serviceaccounts\"))\n\t\tif err != nil {\n\t\t\tglog.Fatalf(\"Unable to get serviceaccounts storage: %v\", err)\n\t\t}\n\t\tauthenticatorConfig.ServiceAccountTokenGetter = serviceaccountcontroller.NewGetterFromStorageInterface(storageConfig, storageFactory.ResourcePrefix(api.Resource(\"serviceaccounts\")), storageFactory.ResourcePrefix(api.Resource(\"secrets\")))\n\t}\n\n\tapiAuthenticator, securityDefinitions, err := authenticator.New(authenticatorConfig)\n\tif err != nil {\n\t\tglog.Fatalf(\"Invalid Authentication Config: %v\", err)\n\t}\n\n\tprivilegedLoopbackToken := uuid.NewRandom().String()\n\tselfClientConfig, err := genericoptions.NewSelfClientConfig(s.SecureServing, s.InsecureServing, privilegedLoopbackToken)\n\tif err != nil {\n\t\tglog.Fatalf(\"Failed to create clientset: %v\", err)\n\t}\n\tclient, err := internalclientset.NewForConfig(selfClientConfig)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to create clientset: %v\", err)\n\t}\n\tsharedInformers := informers.NewSharedInformerFactory(nil, client, 10*time.Minute)\n\n\tauthorizationConfig := s.Authorization.ToAuthorizationConfig(sharedInformers)\n\tapiAuthorizer, err := authorizer.NewAuthorizerFromAuthorizationConfig(authorizationConfig)\n\tif err != nil {\n\t\tglog.Fatalf(\"Invalid Authorization Config: %v\", err)\n\t}\n\n\tadmissionControlPluginNames := strings.Split(s.GenericServerRunOptions.AdmissionControl, \",\")\n\tpluginInitializer := admission.NewPluginInitializer(sharedInformers, apiAuthorizer)\n\tadmissionController, err := admission.NewFromPlugins(client, admissionControlPluginNames, s.GenericServerRunOptions.AdmissionControlConfigFile, pluginInitializer)\n\tif err != nil {\n\t\tglog.Fatalf(\"Failed to initialize plugins: %v\", err)\n\t}\n\n\tproxyTransport := utilnet.SetTransportDefaults(&http.Transport{\n\t\tDial: proxyDialerFn,\n\t\tTLSClientConfig: proxyTLSClientConfig,\n\t})\n\tkubeVersion := version.Get()\n\n\tgenericConfig.Version = &kubeVersion\n\tgenericConfig.LoopbackClientConfig = selfClientConfig\n\tgenericConfig.Authenticator = apiAuthenticator\n\tgenericConfig.Authorizer = apiAuthorizer\n\tgenericConfig.AdmissionControl = admissionController\n\tgenericConfig.APIResourceConfigSource = storageFactory.APIResourceConfigSource\n\tgenericConfig.OpenAPIConfig.Info.Title = \"Kubernetes\"\n\tgenericConfig.OpenAPIConfig.Definitions = generatedopenapi.OpenAPIDefinitions\n\tgenericConfig.EnableOpenAPISupport = true\n\tgenericConfig.EnableMetrics = true\n\tgenericConfig.OpenAPIConfig.SecurityDefinitions = securityDefinitions\n\n\tconfig := &master.Config{\n\t\tGenericConfig: genericConfig,\n\n\t\tStorageFactory: storageFactory,\n\t\tEnableWatchCache: s.GenericServerRunOptions.EnableWatchCache,\n\t\tEnableCoreControllers: true,\n\t\tDeleteCollectionWorkers: s.GenericServerRunOptions.DeleteCollectionWorkers,\n\t\tEventTTL: s.EventTTL,\n\t\tKubeletClientConfig: s.KubeletConfig,\n\t\tEnableUISupport: true,\n\t\tEnableLogsSupport: true,\n\t\tProxyTransport: proxyTransport,\n\n\t\tTunneler: tunneler,\n\n\t\tServiceIPRange: serviceIPRange,\n\t\tAPIServerServiceIP: apiServerServiceIP,\n\t\tAPIServerServicePort: 443,\n\n\t\tServiceNodePortRange: s.GenericServerRunOptions.ServiceNodePortRange,\n\t\tKubernetesServiceNodePort: s.GenericServerRunOptions.KubernetesServiceNodePort,\n\n\t\tMasterCount: s.GenericServerRunOptions.MasterCount,\n\t}\n\n\tif s.GenericServerRunOptions.EnableWatchCache {\n\t\tglog.V(2).Infof(\"Initializing cache sizes based on %dMB limit\", s.GenericServerRunOptions.TargetRAMMB)\n\t\tcachesize.InitializeWatchCacheSizes(s.GenericServerRunOptions.TargetRAMMB)\n\t\tcachesize.SetWatchCacheSizes(s.GenericServerRunOptions.WatchCacheSizes)\n\t}\n\n\tm, err := config.Complete().New()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsharedInformers.Start(wait.NeverStop)\n\tm.GenericAPIServer.PrepareRun().Run(wait.NeverStop)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package coduno\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/coduno\/app\/gitlab\"\n\t\"github.com\/coduno\/app\/util\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/appengine\"\n\t\"google.golang.org\/appengine\/log\"\n\t\"google.golang.org\/appengine\/urlfetch\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar gitlabToken = \"YHQiqMx3qUfj8_FxpFe4\"\n\ntype ContainerCreation struct {\n\tId string `json:\"Id\"`\n}\n\ntype Handler func(http.ResponseWriter, *http.Request)\n\n\/\/ A basic wrapper that is extremely general and takes care of baseline features, such\n\/\/ as tightly timed HSTS for all requests and automatic upgrades from HTTP to HTTPS.\n\/\/ All outbound flows SHOULD be wrapped.\nfunc setupHandler(handler func(http.ResponseWriter, *http.Request, context.Context)) Handler {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\/\/ Redirect all HTTP requests to their HTTPS version.\n\t\t\/\/ This uses a permanent redirect to make clients adjust their bookmarks.\n\t\tif r.URL.Scheme != \"https\" {\n\t\t\tlocation := r.URL\n\t\t\tlocation.Scheme = \"https\"\n\t\t\thttp.Redirect(w, r, location.String(), http.StatusMovedPermanently)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Protect against HTTP downgrade attacks by explicitly telling\n\t\t\/\/ clients to use HTTPS.\n\t\t\/\/ max-age is computed to match the expiration date of our TLS\n\t\t\/\/ certificate (minus approx. one day buffer).\n\t\t\/\/ https:\/\/developer.mozilla.org\/docs\/Web\/Security\/HTTP_strict_transport_security\n\t\tinvalidity := time.Date(2016, time.January, 3, 0, 59, 59, 0, time.UTC)\n\t\tmaxAge := invalidity.Sub(time.Now()).Seconds()\n\t\tw.Header().Set(\"Strict-Transport-Security\", fmt.Sprintf(\"max-age=%d\", int(maxAge)))\n\n\t\t\/\/ appengine.NewContext is cheap, and context is needed in many\n\t\t\/\/ handlers, so create one here.\n\t\tc := appengine.NewContext(r)\n\n\t\t\/\/ All wrapping is done, call the original handler.\n\t\thandler(w, r, c)\n\t}\n}\n\nfunc init() {\n\thttp.HandleFunc(\"\/api\/token\", setupHandler(token))\n\thttp.HandleFunc(\"\/api\/push\", setupHandler(push))\n}\n\nfunc push(w http.ResponseWriter, req *http.Request, c context.Context) {\n\tbody, err := ioutil.ReadAll(req.Body)\n\n\tif err != nil {\n\t\tlog.Warningf(c, err.Error())\n\t} else if len(body) < 1 {\n\t\tlog.Warningf(c, \"Received empty body.\")\n\t} else {\n\t\tpush, err := gitlab.NewPush(body)\n\n\t\tif err != nil {\n\t\t\tlog.Warningf(c, err.Error())\n\t\t} else {\n\t\t\tcommit := push.Commits[0]\n\n\t\t\tdocker, _ := http.NewRequest(\"POST\", \"http:\/\/docker.cod.uno:2375\/v1.15\/containers\/create\", strings.NewReader(`\n\t\t\t\t{\n\t\t\t\t\t\"Image\": \"coduno\/git:experimental\",\n\t\t\t\t\t\"Cmd\": [\"\/start.sh\"],\n\t\t\t\t\t\"Env\": [\n\t\t\t\t\t\t\"CODUNO_REPOSITORY_NAME=`+push.Repository.Name+`\",\n\t\t\t\t\t\t\"CODUNO_REPOSITORY_URL=`+push.Repository.URL+`\",\n\t\t\t\t\t\t\"CODUNO_REPOSITORY_HOMEPAGE=`+push.Repository.Homepage+`\",\n\t\t\t\t\t\t\"CODUNO_REF=`+push.Ref+`\"\n\t\t\t\t\t]\n\t\t\t\t}\n\t\t\t`))\n\n\t\t\tdocker.Header = map[string][]string{\n\t\t\t\t\"Content-Type\": {\"application\/json\"},\n\t\t\t\t\"Accept\": {\"application\/json\"},\n\t\t\t}\n\n\t\t\tclient := urlfetch.Client(c)\n\t\t\tres, err := client.Do(docker)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(c, \"Docker API response:\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar result ContainerCreation\n\t\t\tbody, err := ioutil.ReadAll(res.Body)\n\t\t\terr = json.Unmarshal(body, &result)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(c, \"Received body %d: %s\", res.StatusCode, string(body))\n\t\t\t\tlog.Debugf(c, \"Unmarshalling API response: %s\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tdocker, _ = http.NewRequest(\"POST\", \"http:\/\/docker.cod.uno:2375\/v1.15\/containers\/\"+result.Id+\"\/start\", nil)\n\n\t\t\tdocker.Header = map[string][]string{\n\t\t\t\t\"Content-Type\": {\"application\/json\"},\n\t\t\t\t\"Accept\": {\"application\/json\"},\n\t\t\t}\n\n\t\t\tres, err = client.Do(docker)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(c, \"Docker API response 2: %s\", err.Error())\n\t\t\t} else {\n\t\t\t\tresult, err := ioutil.ReadAll(res.Body)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Debugf(c, err.Error())\n\t\t\t\t} else {\n\t\t\t\t\tlog.Debugf(c, string(result))\n\t\t\t\t}\n\t\t\t}\n\t\t\tdefer res.Body.Close()\n\n\t\t\tlog.Infof(c, \"Received push from %s\", commit.Author.Email)\n\t\t}\n\t}\n}\n\nfunc generateToken() (string, error) {\n\ttoken := make([]byte, 64)\n\n\tif _, err := rand.Read(token); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn base64.URLEncoding.EncodeToString(token), nil\n}\n\nfunc token(w http.ResponseWriter, req *http.Request, c context.Context) {\n\tif req.Method != \"POST\" {\n\t\tw.Header().Set(\"Allow\", \"POST\")\n\t\thttp.Error(w, \"Invalid method.\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(req.Body)\n\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\n\tif len(body) < 1 {\n\t\thttp.Error(w, \"Invalid body.\", http.StatusBadRequest)\n\t}\n\n\tif err, username := authenticate(req); err == nil {\n\t\ttoken, err := generateToken()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Failed to generate token.\", http.StatusInternalServerError)\n\t\t} else {\n\t\t\tquery := url.Values{}\n\t\t\tquery.Add(\"id\", \"2\")\n\t\t\tquery.Add(\"title\", username)\n\t\t\tquery.Add(\"key\", string(body))\n\n\t\t\tgitlabReq, _ := http.NewRequest(\"POST\", \"http:\/\/git.cod.uno\/api\/v3\/users\/2\/keys\", strings.NewReader(query.Encode()))\n\n\t\t\tgitlabReq.Header = map[string][]string{\n\t\t\t\t\"PRIVATE-TOKEN\": {gitlabToken},\n\t\t\t\t\"Content-Type\": {\"application\/x-www-form-urlencoded\"},\n\t\t\t\t\"Accept\": {\"application\/json\"},\n\t\t\t}\n\n\t\t\tclient := urlfetch.Client(c)\n\t\t\tres, err := client.Do(gitlabReq)\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(c, err.Error())\n\t\t\t} else {\n\t\t\t\tresult, err := ioutil.ReadAll(res.Body)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Debugf(c, err.Error())\n\t\t\t\t} else {\n\t\t\t\t\tlog.Debugf(c, string(result))\n\t\t\t\t}\n\t\t\t}\n\t\t\tdefer res.Body.Close()\n\n\t\t\tlog.Infof(c, \"Generated token for '%s'\", username)\n\t\t\tfmt.Fprintf(w, token)\n\t\t}\n\t} else {\n\t\t\/\/ This could be either invalid\/missing credentials or\n\t\t\/\/ the database failing, so let's issue a warning.\n\t\tlog.Warningf(c, err.Error())\n\t\thttp.Error(w, \"Invalid credentials.\", http.StatusForbidden)\n\t}\n}\n\nfunc authenticate(req *http.Request) (error, string) {\n\tusername, _, ok := util.BasicAuth(req)\n\tif !ok {\n\t\treturn errors.New(\"No Authorization header present.\"), \"\"\n\t}\n\treturn errors.New(\"No authorization backend present.\"), username\n}\n<commit_msg>Organize imports<commit_after>package coduno\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"google.golang.org\/appengine\"\n\t\"google.golang.org\/appengine\/log\"\n\t\"google.golang.org\/appengine\/urlfetch\"\n\n\t\"github.com\/coduno\/app\/gitlab\"\n\t\"github.com\/coduno\/app\/util\"\n)\n\nvar gitlabToken = \"YHQiqMx3qUfj8_FxpFe4\"\n\ntype ContainerCreation struct {\n\tId string `json:\"Id\"`\n}\n\ntype Handler func(http.ResponseWriter, *http.Request)\n\n\/\/ A basic wrapper that is extremely general and takes care of baseline features, such\n\/\/ as tightly timed HSTS for all requests and automatic upgrades from HTTP to HTTPS.\n\/\/ All outbound flows SHOULD be wrapped.\nfunc setupHandler(handler func(http.ResponseWriter, *http.Request, context.Context)) Handler {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\/\/ Redirect all HTTP requests to their HTTPS version.\n\t\t\/\/ This uses a permanent redirect to make clients adjust their bookmarks.\n\t\tif r.URL.Scheme != \"https\" {\n\t\t\tlocation := r.URL\n\t\t\tlocation.Scheme = \"https\"\n\t\t\thttp.Redirect(w, r, location.String(), http.StatusMovedPermanently)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Protect against HTTP downgrade attacks by explicitly telling\n\t\t\/\/ clients to use HTTPS.\n\t\t\/\/ max-age is computed to match the expiration date of our TLS\n\t\t\/\/ certificate (minus approx. one day buffer).\n\t\t\/\/ https:\/\/developer.mozilla.org\/docs\/Web\/Security\/HTTP_strict_transport_security\n\t\tinvalidity := time.Date(2016, time.January, 3, 0, 59, 59, 0, time.UTC)\n\t\tmaxAge := invalidity.Sub(time.Now()).Seconds()\n\t\tw.Header().Set(\"Strict-Transport-Security\", fmt.Sprintf(\"max-age=%d\", int(maxAge)))\n\n\t\t\/\/ appengine.NewContext is cheap, and context is needed in many\n\t\t\/\/ handlers, so create one here.\n\t\tc := appengine.NewContext(r)\n\n\t\t\/\/ All wrapping is done, call the original handler.\n\t\thandler(w, r, c)\n\t}\n}\n\nfunc init() {\n\thttp.HandleFunc(\"\/api\/token\", setupHandler(token))\n\thttp.HandleFunc(\"\/api\/push\", setupHandler(push))\n}\n\nfunc push(w http.ResponseWriter, req *http.Request, c context.Context) {\n\tbody, err := ioutil.ReadAll(req.Body)\n\n\tif err != nil {\n\t\tlog.Warningf(c, err.Error())\n\t} else if len(body) < 1 {\n\t\tlog.Warningf(c, \"Received empty body.\")\n\t} else {\n\t\tpush, err := gitlab.NewPush(body)\n\n\t\tif err != nil {\n\t\t\tlog.Warningf(c, err.Error())\n\t\t} else {\n\t\t\tcommit := push.Commits[0]\n\n\t\t\tdocker, _ := http.NewRequest(\"POST\", \"http:\/\/docker.cod.uno:2375\/v1.15\/containers\/create\", strings.NewReader(`\n\t\t\t\t{\n\t\t\t\t\t\"Image\": \"coduno\/git:experimental\",\n\t\t\t\t\t\"Cmd\": [\"\/start.sh\"],\n\t\t\t\t\t\"Env\": [\n\t\t\t\t\t\t\"CODUNO_REPOSITORY_NAME=`+push.Repository.Name+`\",\n\t\t\t\t\t\t\"CODUNO_REPOSITORY_URL=`+push.Repository.URL+`\",\n\t\t\t\t\t\t\"CODUNO_REPOSITORY_HOMEPAGE=`+push.Repository.Homepage+`\",\n\t\t\t\t\t\t\"CODUNO_REF=`+push.Ref+`\"\n\t\t\t\t\t]\n\t\t\t\t}\n\t\t\t`))\n\n\t\t\tdocker.Header = map[string][]string{\n\t\t\t\t\"Content-Type\": {\"application\/json\"},\n\t\t\t\t\"Accept\": {\"application\/json\"},\n\t\t\t}\n\n\t\t\tclient := urlfetch.Client(c)\n\t\t\tres, err := client.Do(docker)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(c, \"Docker API response:\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar result ContainerCreation\n\t\t\tbody, err := ioutil.ReadAll(res.Body)\n\t\t\terr = json.Unmarshal(body, &result)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(c, \"Received body %d: %s\", res.StatusCode, string(body))\n\t\t\t\tlog.Debugf(c, \"Unmarshalling API response: %s\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tdocker, _ = http.NewRequest(\"POST\", \"http:\/\/docker.cod.uno:2375\/v1.15\/containers\/\"+result.Id+\"\/start\", nil)\n\n\t\t\tdocker.Header = map[string][]string{\n\t\t\t\t\"Content-Type\": {\"application\/json\"},\n\t\t\t\t\"Accept\": {\"application\/json\"},\n\t\t\t}\n\n\t\t\tres, err = client.Do(docker)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(c, \"Docker API response 2: %s\", err.Error())\n\t\t\t} else {\n\t\t\t\tresult, err := ioutil.ReadAll(res.Body)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Debugf(c, err.Error())\n\t\t\t\t} else {\n\t\t\t\t\tlog.Debugf(c, string(result))\n\t\t\t\t}\n\t\t\t}\n\t\t\tdefer res.Body.Close()\n\n\t\t\tlog.Infof(c, \"Received push from %s\", commit.Author.Email)\n\t\t}\n\t}\n}\n\nfunc generateToken() (string, error) {\n\ttoken := make([]byte, 64)\n\n\tif _, err := rand.Read(token); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn base64.URLEncoding.EncodeToString(token), nil\n}\n\nfunc token(w http.ResponseWriter, req *http.Request, c context.Context) {\n\tif req.Method != \"POST\" {\n\t\tw.Header().Set(\"Allow\", \"POST\")\n\t\thttp.Error(w, \"Invalid method.\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(req.Body)\n\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\n\tif len(body) < 1 {\n\t\thttp.Error(w, \"Invalid body.\", http.StatusBadRequest)\n\t}\n\n\tif err, username := authenticate(req); err == nil {\n\t\ttoken, err := generateToken()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Failed to generate token.\", http.StatusInternalServerError)\n\t\t} else {\n\t\t\tquery := url.Values{}\n\t\t\tquery.Add(\"id\", \"2\")\n\t\t\tquery.Add(\"title\", username)\n\t\t\tquery.Add(\"key\", string(body))\n\n\t\t\tgitlabReq, _ := http.NewRequest(\"POST\", \"http:\/\/git.cod.uno\/api\/v3\/users\/2\/keys\", strings.NewReader(query.Encode()))\n\n\t\t\tgitlabReq.Header = map[string][]string{\n\t\t\t\t\"PRIVATE-TOKEN\": {gitlabToken},\n\t\t\t\t\"Content-Type\": {\"application\/x-www-form-urlencoded\"},\n\t\t\t\t\"Accept\": {\"application\/json\"},\n\t\t\t}\n\n\t\t\tclient := urlfetch.Client(c)\n\t\t\tres, err := client.Do(gitlabReq)\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(c, err.Error())\n\t\t\t} else {\n\t\t\t\tresult, err := ioutil.ReadAll(res.Body)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Debugf(c, err.Error())\n\t\t\t\t} else {\n\t\t\t\t\tlog.Debugf(c, string(result))\n\t\t\t\t}\n\t\t\t}\n\t\t\tdefer res.Body.Close()\n\n\t\t\tlog.Infof(c, \"Generated token for '%s'\", username)\n\t\t\tfmt.Fprintf(w, token)\n\t\t}\n\t} else {\n\t\t\/\/ This could be either invalid\/missing credentials or\n\t\t\/\/ the database failing, so let's issue a warning.\n\t\tlog.Warningf(c, err.Error())\n\t\thttp.Error(w, \"Invalid credentials.\", http.StatusForbidden)\n\t}\n}\n\nfunc authenticate(req *http.Request) (error, string) {\n\tusername, _, ok := util.BasicAuth(req)\n\tif !ok {\n\t\treturn errors.New(\"No Authorization header present.\"), \"\"\n\t}\n\treturn errors.New(\"No authorization backend present.\"), username\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/fubarhouse\/golang-drush\/command\"\n\t\"strings\"\n)\n\nfunc main() {\n\tvar strAliases = flag.String(\"aliases\", \"\", \"alias1,alias2,alias3\")\n\tflag.Parse()\n\n\tif *strAliases != \"\" {\n\t\tfor _, Alias := range strings.Split(*strAliases, \"\\n\") {\n\n\t\t\tcmdQ := command.NewDrushCommand()\n\t\t\tcmdQ.SetAlias(Alias)\n\t\t\tcmdQ.SetCommand(\"rr\")\n\t\t\t_, errQ := cmdQ.Output()\n\t\t\tif errQ != nil {\n\t\t\t\tlog.Warnf(\"%v, %v, unsuccessful.\\n\", cmdQ.GetAlias(), cmdQ.GetCommand())\n\t\t\t} else {\n\t\t\t\tlog.Infof(\"%v, %v, unsuccessful.\\n\", cmdQ.GetAlias(), cmdQ.GetCommand())\n\t\t\t}\n\n\t\t\tcmdR := command.NewDrushCommand()\n\t\t\tcmdR.SetAlias(Alias)\n\t\t\tcmdR.SetCommand(\"updb --yes\")\n\t\t\t_, errR := cmdQ.Output()\n\t\t\tif errR != nil {\n\t\t\t\tlog.Warnf(\"%v, %v, unsuccessful.\\n\", cmdQ.GetAlias(), cmdQ.GetCommand())\n\t\t\t} else {\n\t\t\t\tlog.Infof(\"%v, %v, unsuccessful.\\n\", cmdQ.GetAlias(), cmdQ.GetCommand())\n\t\t\t}\n\n\t\t\tcmdS := command.NewDrushCommand()\n\t\t\tcmdS.SetAlias(Alias)\n\t\t\tcmdS.SetCommand(\"cc all\")\n\t\t\t_, errS := cmdQ.Output()\n\t\t\tif errS != nil {\n\t\t\t\tlog.Warnf(\"%v, %v, unsuccessful.\\n\", cmdQ.GetAlias(), cmdQ.GetCommand())\n\t\t\t} else {\n\t\t\t\tlog.Infof(\"%v, %v, unsuccessful.\\n\", cmdQ.GetAlias(), cmdQ.GetCommand())\n\t\t\t}\n\n\t\t}\n\t} else {\n\t\tflag.Usage()\n\t}\n}\n<commit_msg>add general purpose site checking utility.<commit_after>package main\n\nimport (\n\t\"flag\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/fubarhouse\/golang-drush\/command\"\n\t\"strings\"\n)\n\nfunc main() {\n\tvar strAliases = flag.String(\"aliases\", \"\", \"alias1,alias2,alias3\")\n\tflag.Parse()\n\n\tif *strAliases != \"\" {\n\t\tfor _, Alias := range strings.Split(*strAliases, \"\\n\") {\n\n\t\t\tcmdQ := command.NewDrushCommand()\n\t\t\tcmdQ.SetAlias(Alias)\n\t\t\tcmdQ.SetCommand(\"rr\")\n\t\t\t_, errQ := cmdQ.Output()\n\t\t\tif errQ != nil {\n\t\t\t\tlog.Warnf(\"%v, %v, unsuccessful.\\n\", cmdQ.GetAlias(), cmdQ.GetCommand())\n\t\t\t} else {\n\t\t\t\tlog.Infof(\"%v, %v, unsuccessful.\\n\", cmdQ.GetAlias(), cmdQ.GetCommand())\n\t\t\t}\n\n\t\t\tcmdR := command.NewDrushCommand()\n\t\t\tcmdR.SetAlias(Alias)\n\t\t\tcmdR.SetCommand(\"updb --yes\")\n\t\t\t_, errR := cmdR.Output()\n\t\t\tif errR != nil {\n\t\t\t\tlog.Warnf(\"%v, %v, unsuccessful.\\n\", cmdR.GetAlias(), cmdR.GetCommand())\n\t\t\t} else {\n\t\t\t\tlog.Infof(\"%v, %v, unsuccessful.\\n\", cmdR.GetAlias(), cmdR.GetCommand())\n\t\t\t}\n\n\t\t\tcmdS := command.NewDrushCommand()\n\t\t\tcmdS.SetAlias(Alias)\n\t\t\tcmdS.SetCommand(\"cc all\")\n\t\t\t_, errS := cmdS.Output()\n\t\t\tif errS != nil {\n\t\t\t\tlog.Warnf(\"%v, %v, unsuccessful.\\n\", cmdS.GetAlias(), cmdS.GetCommand())\n\t\t\t} else {\n\t\t\t\tlog.Infof(\"%v, %v, unsuccessful.\\n\", cmdS.GetAlias(), cmdS.GetCommand())\n\t\t\t}\n\n\t\t}\n\t} else {\n\t\tflag.Usage()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cmd\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"testing\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/config\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/runner\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/schema\/latest\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/testutil\"\n)\n\ntype mockDevRunner struct {\n\trunner.Runner\n\thasBuilt bool\n\thasDeployed bool\n\terrDev error\n\tcalls []string\n}\n\nfunc (r *mockDevRunner) Dev(context.Context, io.Writer, []*latest.Artifact) error {\n\tr.calls = append(r.calls, \"Dev\")\n\treturn r.errDev\n}\n\nfunc (r *mockDevRunner) HasBuilt() bool {\n\tr.calls = append(r.calls, \"HasBuilt\")\n\treturn r.hasBuilt\n}\n\nfunc (r *mockDevRunner) HasDeployed() bool {\n\tr.calls = append(r.calls, \"HasDeployed\")\n\treturn r.hasDeployed\n}\n\nfunc (r *mockDevRunner) Prune(context.Context, io.Writer) error {\n\tr.calls = append(r.calls, \"Prune\")\n\treturn nil\n}\n\nfunc (r *mockDevRunner) Cleanup(context.Context, io.Writer) error {\n\tr.calls = append(r.calls, \"Cleanup\")\n\treturn nil\n}\n\nfunc TestDoDev(t *testing.T) {\n\ttests := []struct {\n\t\tdescription string\n\t\thasBuilt bool\n\t\thasDeployed bool\n\t\texpectedCalls []string\n\t}{\n\t\t{\n\t\t\tdescription: \"cleanup and then prune\",\n\t\t\thasBuilt: true,\n\t\t\thasDeployed: true,\n\t\t\texpectedCalls: []string{\"Dev\", \"HasDeployed\", \"HasBuilt\", \"Cleanup\", \"Prune\"},\n\t\t},\n\t\t{\n\t\t\tdescription: \"hasn't deployed\",\n\t\t\thasBuilt: true,\n\t\t\thasDeployed: false,\n\t\t\texpectedCalls: []string{\"Dev\", \"HasDeployed\", \"HasBuilt\", \"Prune\"},\n\t\t},\n\t\t{\n\t\t\tdescription: \"hasn't built\",\n\t\t\thasBuilt: false,\n\t\t\thasDeployed: false,\n\t\t\texpectedCalls: []string{\"Dev\", \"HasDeployed\", \"HasBuilt\"},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\ttestutil.Run(t, test.description, func(t *testutil.T) {\n\t\t\tmockRunner := &mockDevRunner{\n\t\t\t\thasBuilt: test.hasBuilt,\n\t\t\t\thasDeployed: test.hasDeployed,\n\t\t\t\terrDev: context.Canceled,\n\t\t\t}\n\t\t\tt.Override(&createRunner, func(*config.SkaffoldOptions) (runner.Runner, *latest.SkaffoldConfig, error) {\n\t\t\t\treturn mockRunner, &latest.SkaffoldConfig{}, nil\n\t\t\t})\n\t\t\tt.Override(&opts, &config.SkaffoldOptions{\n\t\t\t\tCleanup: true,\n\t\t\t\tNoPrune: false,\n\t\t\t})\n\n\t\t\terr := doDev(context.Background(), ioutil.Discard)\n\n\t\t\tt.CheckDeepEqual(test.expectedCalls, mockRunner.calls)\n\t\t\tt.CheckDeepEqual(true, err == context.Canceled)\n\t\t})\n\t}\n}\n\ntype mockConfigChangeRunner struct {\n\trunner.Runner\n\tcycles int\n}\n\nfunc (m *mockConfigChangeRunner) Dev(context.Context, io.Writer, []*latest.Artifact) error {\n\tm.cycles++\n\tif m.cycles == 1 {\n\t\t\/\/ pass through the first cycle with a config reload\n\t\treturn runner.ErrorConfigurationChanged\n\t}\n\treturn context.Canceled\n}\n\nfunc (m *mockConfigChangeRunner) HasBuilt() bool {\n\treturn true\n}\n\nfunc (m *mockConfigChangeRunner) HasDeployed() bool {\n\treturn true\n}\n\nfunc (r *mockConfigChangeRunner) Prune(context.Context, io.Writer) error {\n\treturn nil\n}\n\nfunc (r *mockConfigChangeRunner) Cleanup(context.Context, io.Writer) error {\n\treturn nil\n}\n\nfunc TestDevConfigChange(t *testing.T) {\n\ttestutil.Run(t, \"test config change\", func(t *testutil.T) {\n\t\tmockRunner := &mockConfigChangeRunner{}\n\n\t\tt.Override(&createRunner, func(*config.SkaffoldOptions) (runner.Runner, *latest.SkaffoldConfig, error) {\n\t\t\treturn mockRunner, &latest.SkaffoldConfig{}, nil\n\t\t})\n\t\tt.Override(&opts, &config.SkaffoldOptions{\n\t\t\tCleanup: true,\n\t\t\tNoPrune: false,\n\t\t})\n\n\t\terr := doDev(context.Background(), ioutil.Discard)\n\n\t\t\/\/ ensure that we received the context.Cancled error (and not ErrorConfigurationChanged)\n\t\t\/\/ also ensure that the we run through dev cycles (since we reloaded on the first),\n\t\t\/\/ and exit after a real error is received\n\t\tt.CheckDeepEqual(true, err == context.Canceled)\n\t\tt.CheckDeepEqual(mockRunner.cycles, 2)\n\t})\n}\n<commit_msg>fix linter error<commit_after>\/*\nCopyright 2019 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cmd\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"testing\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/config\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/runner\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/schema\/latest\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/testutil\"\n)\n\ntype mockDevRunner struct {\n\trunner.Runner\n\thasBuilt bool\n\thasDeployed bool\n\terrDev error\n\tcalls []string\n}\n\nfunc (r *mockDevRunner) Dev(context.Context, io.Writer, []*latest.Artifact) error {\n\tr.calls = append(r.calls, \"Dev\")\n\treturn r.errDev\n}\n\nfunc (r *mockDevRunner) HasBuilt() bool {\n\tr.calls = append(r.calls, \"HasBuilt\")\n\treturn r.hasBuilt\n}\n\nfunc (r *mockDevRunner) HasDeployed() bool {\n\tr.calls = append(r.calls, \"HasDeployed\")\n\treturn r.hasDeployed\n}\n\nfunc (r *mockDevRunner) Prune(context.Context, io.Writer) error {\n\tr.calls = append(r.calls, \"Prune\")\n\treturn nil\n}\n\nfunc (r *mockDevRunner) Cleanup(context.Context, io.Writer) error {\n\tr.calls = append(r.calls, \"Cleanup\")\n\treturn nil\n}\n\nfunc TestDoDev(t *testing.T) {\n\ttests := []struct {\n\t\tdescription string\n\t\thasBuilt bool\n\t\thasDeployed bool\n\t\texpectedCalls []string\n\t}{\n\t\t{\n\t\t\tdescription: \"cleanup and then prune\",\n\t\t\thasBuilt: true,\n\t\t\thasDeployed: true,\n\t\t\texpectedCalls: []string{\"Dev\", \"HasDeployed\", \"HasBuilt\", \"Cleanup\", \"Prune\"},\n\t\t},\n\t\t{\n\t\t\tdescription: \"hasn't deployed\",\n\t\t\thasBuilt: true,\n\t\t\thasDeployed: false,\n\t\t\texpectedCalls: []string{\"Dev\", \"HasDeployed\", \"HasBuilt\", \"Prune\"},\n\t\t},\n\t\t{\n\t\t\tdescription: \"hasn't built\",\n\t\t\thasBuilt: false,\n\t\t\thasDeployed: false,\n\t\t\texpectedCalls: []string{\"Dev\", \"HasDeployed\", \"HasBuilt\"},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\ttestutil.Run(t, test.description, func(t *testutil.T) {\n\t\t\tmockRunner := &mockDevRunner{\n\t\t\t\thasBuilt: test.hasBuilt,\n\t\t\t\thasDeployed: test.hasDeployed,\n\t\t\t\terrDev: context.Canceled,\n\t\t\t}\n\t\t\tt.Override(&createRunner, func(*config.SkaffoldOptions) (runner.Runner, *latest.SkaffoldConfig, error) {\n\t\t\t\treturn mockRunner, &latest.SkaffoldConfig{}, nil\n\t\t\t})\n\t\t\tt.Override(&opts, &config.SkaffoldOptions{\n\t\t\t\tCleanup: true,\n\t\t\t\tNoPrune: false,\n\t\t\t})\n\n\t\t\terr := doDev(context.Background(), ioutil.Discard)\n\n\t\t\tt.CheckDeepEqual(test.expectedCalls, mockRunner.calls)\n\t\t\tt.CheckDeepEqual(true, err == context.Canceled)\n\t\t})\n\t}\n}\n\ntype mockConfigChangeRunner struct {\n\trunner.Runner\n\tcycles int\n}\n\nfunc (m *mockConfigChangeRunner) Dev(context.Context, io.Writer, []*latest.Artifact) error {\n\tm.cycles++\n\tif m.cycles == 1 {\n\t\t\/\/ pass through the first cycle with a config reload\n\t\treturn runner.ErrorConfigurationChanged\n\t}\n\treturn context.Canceled\n}\n\nfunc (m *mockConfigChangeRunner) HasBuilt() bool {\n\treturn true\n}\n\nfunc (m *mockConfigChangeRunner) HasDeployed() bool {\n\treturn true\n}\n\nfunc (m *mockConfigChangeRunner) Prune(context.Context, io.Writer) error {\n\treturn nil\n}\n\nfunc (m *mockConfigChangeRunner) Cleanup(context.Context, io.Writer) error {\n\treturn nil\n}\n\nfunc TestDevConfigChange(t *testing.T) {\n\ttestutil.Run(t, \"test config change\", func(t *testutil.T) {\n\t\tmockRunner := &mockConfigChangeRunner{}\n\n\t\tt.Override(&createRunner, func(*config.SkaffoldOptions) (runner.Runner, *latest.SkaffoldConfig, error) {\n\t\t\treturn mockRunner, &latest.SkaffoldConfig{}, nil\n\t\t})\n\t\tt.Override(&opts, &config.SkaffoldOptions{\n\t\t\tCleanup: true,\n\t\t\tNoPrune: false,\n\t\t})\n\n\t\terr := doDev(context.Background(), ioutil.Discard)\n\n\t\t\/\/ ensure that we received the context.Cancled error (and not ErrorConfigurationChanged)\n\t\t\/\/ also ensure that the we run through dev cycles (since we reloaded on the first),\n\t\t\/\/ and exit after a real error is received\n\t\tt.CheckDeepEqual(true, err == context.Canceled)\n\t\tt.CheckDeepEqual(mockRunner.cycles, 2)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package clinamespace\n\nimport (\n\t\"fmt\"\n\n\t\"os\"\n\n\t\"github.com\/containerum\/chkit\/pkg\/context\"\n\t\"github.com\/containerum\/chkit\/pkg\/util\/coblog\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar accessAliases = []string{\"namespace-access\", \"ns-access\"}\n\nfunc GetAccess(ctx *context.Context) *cobra.Command {\n\tcommand := &cobra.Command{\n\t\tUse: \"access\",\n\t\tAliases: accessAliases,\n\t\tShort: \"get namespace access\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tlogger := coblog.Logger(cmd)\n\t\t\tvar flags = cmd.Flags()\n\t\t\tif all, _ := flags.GetBool(\"all\"); all {\n\t\t\t\tlogger.Debugf(\"getting access list\")\n\t\t\t\tlist, err := ctx.Client.GetAccessList()\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tfmt.Println(list.RenderTable())\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tvar nsName = ctx.Namespace\n\t\t\t\tif len(args) == 1 {\n\t\t\t\t\tnsName = args[0]\n\t\t\t\t} else if len(args) > 1 {\n\t\t\t\t\tcmd.Help()\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tlogger.Debugf(\"getting namespace %q access\", ctx.Namespace)\n\t\t\t\tacc, err := ctx.Client.GetAccess(nsName)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tfmt.Println(acc.RenderTable())\n\t\t\t\treturn\n\t\t\t}\n\t\t},\n\t}\n\tcommand.PersistentFlags().\n\t\tBool(\"all\", false, \"get access rights to all namespaces\")\n\treturn command\n}\n<commit_msg>add logs #67<commit_after>package clinamespace\n\nimport (\n\t\"fmt\"\n\n\t\"os\"\n\n\t\"github.com\/containerum\/chkit\/pkg\/context\"\n\t\"github.com\/containerum\/chkit\/pkg\/util\/coblog\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar accessAliases = []string{\"namespace-access\", \"ns-access\"}\n\nfunc GetAccess(ctx *context.Context) *cobra.Command {\n\tcommand := &cobra.Command{\n\t\tUse: \"access\",\n\t\tAliases: accessAliases,\n\t\tShort: \"get namespace access\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tlogger := coblog.Logger(cmd)\n\t\t\tvar flags = cmd.Flags()\n\t\t\tif all, _ := flags.GetBool(\"all\"); all {\n\t\t\t\tlogger.Debugf(\"getting access list\")\n\t\t\t\tlist, err := ctx.Client.GetAccessList()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.WithError(err).Errorf(\"unable to get namespace access list\")\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tfmt.Println(list.RenderTable())\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tvar nsName = ctx.Namespace\n\t\t\t\tif len(args) == 1 {\n\t\t\t\t\tnsName = args[0]\n\t\t\t\t} else if len(args) > 1 {\n\t\t\t\t\tcmd.Help()\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tlogger.Debugf(\"getting namespace %q access\", ctx.Namespace)\n\t\t\t\tacc, err := ctx.Client.GetAccess(nsName)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.WithError(err).Errorf(\"uanble to get namespace %q access\", nsName)\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tfmt.Println(acc.RenderTable())\n\t\t\t\treturn\n\t\t\t}\n\t\t},\n\t}\n\tcommand.PersistentFlags().\n\t\tBool(\"all\", false, \"get access rights to all namespaces\")\n\treturn command\n}\n<|endoftext|>"} {"text":"<commit_before>package describe\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"sort\"\n\t\"text\/tabwriter\"\n\t\"time\"\n\n\t\"github.com\/docker\/docker\/pkg\/units\"\n\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/sets\"\n\n\tbuildapi \"github.com\/openshift\/origin\/pkg\/build\/api\"\n\t\"github.com\/openshift\/origin\/pkg\/client\"\n\timageapi \"github.com\/openshift\/origin\/pkg\/image\/api\"\n)\n\nconst emptyString = \"<none>\"\n\nfunc tabbedString(f func(*tabwriter.Writer) error) (string, error) {\n\tout := new(tabwriter.Writer)\n\tbuf := &bytes.Buffer{}\n\tout.Init(buf, 0, 8, 1, '\\t', 0)\n\n\terr := f(out)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tout.Flush()\n\tstr := string(buf.String())\n\treturn str, nil\n}\n\nfunc toString(v interface{}) string {\n\tvalue := fmt.Sprintf(\"%v\", v)\n\tif len(value) == 0 {\n\t\tvalue = emptyString\n\t}\n\treturn value\n}\n\nfunc bold(v interface{}) string {\n\treturn \"\\033[1m\" + toString(v) + \"\\033[0m\"\n}\n\nfunc convertEnv(env []api.EnvVar) map[string]string {\n\tresult := make(map[string]string, len(env))\n\tfor _, e := range env {\n\t\tresult[e.Name] = toString(e.Value)\n\t}\n\treturn result\n}\n\nfunc formatEnv(env api.EnvVar) string {\n\tif env.ValueFrom != nil && env.ValueFrom.FieldRef != nil {\n\t\treturn fmt.Sprintf(\"%s=<%s>\", env.Name, env.ValueFrom.FieldRef.FieldPath)\n\t}\n\treturn fmt.Sprintf(\"%s=%s\", env.Name, env.Value)\n}\n\nfunc formatString(out *tabwriter.Writer, label string, v interface{}) {\n\tfmt.Fprintf(out, fmt.Sprintf(\"%s:\\t%s\\n\", label, toString(v)))\n}\n\nfunc formatTime(out *tabwriter.Writer, label string, t time.Time) {\n\tfmt.Fprintf(out, fmt.Sprintf(\"%s:\\t%s ago\\n\", label, formatRelativeTime(t)))\n}\n\nfunc formatLabels(labelMap map[string]string) string {\n\treturn labels.Set(labelMap).String()\n}\n\nfunc extractAnnotations(annotations map[string]string, keys ...string) ([]string, map[string]string) {\n\textracted := make([]string, len(keys))\n\tremaining := make(map[string]string)\n\tfor k, v := range annotations {\n\t\tremaining[k] = v\n\t}\n\tfor i, key := range keys {\n\t\textracted[i] = remaining[key]\n\t\tdelete(remaining, key)\n\t}\n\treturn extracted, remaining\n}\n\nfunc formatAnnotations(out *tabwriter.Writer, m api.ObjectMeta, prefix string) {\n\tvalues, annotations := extractAnnotations(m.Annotations, \"description\")\n\tif len(values[0]) > 0 {\n\t\tformatString(out, prefix+\"Description\", values[0])\n\t}\n\tkeys := sets.NewString()\n\tfor k := range annotations {\n\t\tkeys.Insert(k)\n\t}\n\tfor i, key := range keys.List() {\n\t\tif i == 0 {\n\t\t\tformatString(out, prefix+\"Annotations\", fmt.Sprintf(\"%s=%s\", key, annotations[key]))\n\t\t} else {\n\t\t\tfmt.Fprintf(out, \"%s\\t%s=%s\\n\", prefix, key, annotations[key])\n\t\t}\n\t}\n}\n\nvar timeNowFn = func() time.Time {\n\treturn time.Now()\n}\n\nfunc formatRelativeTime(t time.Time) string {\n\treturn units.HumanDuration(timeNowFn().Sub(t))\n}\n\n\/\/ FormatRelativeTime converts a time field into a human readable age string (hours, minutes, days).\nfunc FormatRelativeTime(t time.Time) string {\n\treturn formatRelativeTime(t)\n}\n\nfunc formatMeta(out *tabwriter.Writer, m api.ObjectMeta) {\n\tformatString(out, \"Name\", m.Name)\n\tif !m.CreationTimestamp.IsZero() {\n\t\tformatTime(out, \"Created\", m.CreationTimestamp.Time)\n\t}\n\tformatString(out, \"Labels\", formatLabels(m.Labels))\n\tformatAnnotations(out, m, \"\")\n}\n\n\/\/ webhookURL assembles map with of webhook type as key and webhook url and value\nfunc webhookURL(c *buildapi.BuildConfig, cli client.BuildConfigsNamespacer) map[string]string {\n\tresult := map[string]string{}\n\tfor _, trigger := range c.Spec.Triggers {\n\t\twhTrigger := \"\"\n\t\tswitch trigger.Type {\n\t\tcase buildapi.GitHubWebHookBuildTriggerType:\n\t\t\twhTrigger = trigger.GitHubWebHook.Secret\n\t\tcase buildapi.GenericWebHookBuildTriggerType:\n\t\t\twhTrigger = trigger.GenericWebHook.Secret\n\t\t}\n\t\tif len(whTrigger) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tout := \"\"\n\t\turl, err := cli.BuildConfigs(c.Namespace).WebHookURL(c.Name, &trigger)\n\t\tif err != nil {\n\t\t\tout = fmt.Sprintf(\"<error: %s>\", err.Error())\n\t\t} else {\n\t\t\tout = url.String()\n\t\t}\n\t\tresult[string(trigger.Type)] = out\n\t}\n\treturn result\n}\n\nfunc formatImageStreamTags(out *tabwriter.Writer, stream *imageapi.ImageStream) {\n\tif len(stream.Status.Tags) == 0 {\n\t\tfmt.Fprintf(out, \"Tags:\\t<none>\\n\")\n\t\treturn\n\t}\n\tfmt.Fprint(out, \"\\nTag\\tSpec\\tCreated\\tPullSpec\\tImage\\n\")\n\tsortedTags := []string{}\n\tfor k := range stream.Status.Tags {\n\t\tsortedTags = append(sortedTags, k)\n\t}\n\tfor k := range stream.Spec.Tags {\n\t\tif _, ok := stream.Status.Tags[k]; !ok {\n\t\t\tsortedTags = append(sortedTags, k)\n\t\t}\n\t}\n\tsort.Strings(sortedTags)\n\tfor _, tag := range sortedTags {\n\t\ttagRef, ok := stream.Spec.Tags[tag]\n\t\tspecTag := \"\"\n\t\tif ok {\n\t\t\tif tagRef.From != nil {\n\t\t\t\tnamePair := \"\"\n\t\t\t\tif len(tagRef.From.Namespace) > 0 && tagRef.From.Namespace != stream.Namespace {\n\t\t\t\t\tnamePair = fmt.Sprintf(\"%s\/%s\", tagRef.From.Namespace, tagRef.From.Name)\n\t\t\t\t} else {\n\t\t\t\t\tnamePair = tagRef.From.Name\n\t\t\t\t}\n\n\t\t\t\tswitch tagRef.From.Kind {\n\t\t\t\tcase \"ImageStreamTag\", \"ImageStreamImage\":\n\t\t\t\t\tspecTag = namePair\n\t\t\t\tcase \"DockerImage\":\n\t\t\t\t\tspecTag = tagRef.From.Name\n\t\t\t\tdefault:\n\t\t\t\t\tspecTag = fmt.Sprintf(\"<unknown %s> %s\", tagRef.From.Kind, namePair)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tspecTag = \"<pushed>\"\n\t\t}\n\t\tif taglist, ok := stream.Status.Tags[tag]; ok {\n\t\t\tfor _, event := range taglist.Items {\n\t\t\t\td := timeNowFn().Sub(event.Created.Time)\n\t\t\t\timage := event.Image\n\t\t\t\tref, err := imageapi.ParseDockerImageReference(event.DockerImageReference)\n\t\t\t\tif err == nil {\n\t\t\t\t\tif ref.ID == image {\n\t\t\t\t\t\timage = \"\"\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(out, \"%s\\t%s\\t%s ago\\t%s\\t%v\\n\",\n\t\t\t\t\ttag,\n\t\t\t\t\tspecTag,\n\t\t\t\t\tunits.HumanDuration(d),\n\t\t\t\t\tevent.DockerImageReference,\n\t\t\t\t\timage)\n\t\t\t\tif tag != \"\" {\n\t\t\t\t\ttag = \"\"\n\t\t\t\t}\n\t\t\t\tif specTag != \"\" {\n\t\t\t\t\tspecTag = \"\"\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Fprintf(out, \"%s\\t%s\\t\\t<not available>\\t<not available>\\n\", tag, specTag)\n\t\t}\n\t}\n}\n<commit_msg>Fixed how tags are being printed when describing ImageStream. Previously the image.Spec.Tags was ignored, which resulted in not showing the tags for which there were errors during imports.<commit_after>package describe\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"sort\"\n\t\"text\/tabwriter\"\n\t\"time\"\n\n\t\"github.com\/docker\/docker\/pkg\/units\"\n\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/sets\"\n\n\tbuildapi \"github.com\/openshift\/origin\/pkg\/build\/api\"\n\t\"github.com\/openshift\/origin\/pkg\/client\"\n\timageapi \"github.com\/openshift\/origin\/pkg\/image\/api\"\n)\n\nconst emptyString = \"<none>\"\n\nfunc tabbedString(f func(*tabwriter.Writer) error) (string, error) {\n\tout := new(tabwriter.Writer)\n\tbuf := &bytes.Buffer{}\n\tout.Init(buf, 0, 8, 1, '\\t', 0)\n\n\terr := f(out)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tout.Flush()\n\tstr := string(buf.String())\n\treturn str, nil\n}\n\nfunc toString(v interface{}) string {\n\tvalue := fmt.Sprintf(\"%v\", v)\n\tif len(value) == 0 {\n\t\tvalue = emptyString\n\t}\n\treturn value\n}\n\nfunc bold(v interface{}) string {\n\treturn \"\\033[1m\" + toString(v) + \"\\033[0m\"\n}\n\nfunc convertEnv(env []api.EnvVar) map[string]string {\n\tresult := make(map[string]string, len(env))\n\tfor _, e := range env {\n\t\tresult[e.Name] = toString(e.Value)\n\t}\n\treturn result\n}\n\nfunc formatEnv(env api.EnvVar) string {\n\tif env.ValueFrom != nil && env.ValueFrom.FieldRef != nil {\n\t\treturn fmt.Sprintf(\"%s=<%s>\", env.Name, env.ValueFrom.FieldRef.FieldPath)\n\t}\n\treturn fmt.Sprintf(\"%s=%s\", env.Name, env.Value)\n}\n\nfunc formatString(out *tabwriter.Writer, label string, v interface{}) {\n\tfmt.Fprintf(out, fmt.Sprintf(\"%s:\\t%s\\n\", label, toString(v)))\n}\n\nfunc formatTime(out *tabwriter.Writer, label string, t time.Time) {\n\tfmt.Fprintf(out, fmt.Sprintf(\"%s:\\t%s ago\\n\", label, formatRelativeTime(t)))\n}\n\nfunc formatLabels(labelMap map[string]string) string {\n\treturn labels.Set(labelMap).String()\n}\n\nfunc extractAnnotations(annotations map[string]string, keys ...string) ([]string, map[string]string) {\n\textracted := make([]string, len(keys))\n\tremaining := make(map[string]string)\n\tfor k, v := range annotations {\n\t\tremaining[k] = v\n\t}\n\tfor i, key := range keys {\n\t\textracted[i] = remaining[key]\n\t\tdelete(remaining, key)\n\t}\n\treturn extracted, remaining\n}\n\nfunc formatAnnotations(out *tabwriter.Writer, m api.ObjectMeta, prefix string) {\n\tvalues, annotations := extractAnnotations(m.Annotations, \"description\")\n\tif len(values[0]) > 0 {\n\t\tformatString(out, prefix+\"Description\", values[0])\n\t}\n\tkeys := sets.NewString()\n\tfor k := range annotations {\n\t\tkeys.Insert(k)\n\t}\n\tfor i, key := range keys.List() {\n\t\tif i == 0 {\n\t\t\tformatString(out, prefix+\"Annotations\", fmt.Sprintf(\"%s=%s\", key, annotations[key]))\n\t\t} else {\n\t\t\tfmt.Fprintf(out, \"%s\\t%s=%s\\n\", prefix, key, annotations[key])\n\t\t}\n\t}\n}\n\nvar timeNowFn = func() time.Time {\n\treturn time.Now()\n}\n\nfunc formatRelativeTime(t time.Time) string {\n\treturn units.HumanDuration(timeNowFn().Sub(t))\n}\n\n\/\/ FormatRelativeTime converts a time field into a human readable age string (hours, minutes, days).\nfunc FormatRelativeTime(t time.Time) string {\n\treturn formatRelativeTime(t)\n}\n\nfunc formatMeta(out *tabwriter.Writer, m api.ObjectMeta) {\n\tformatString(out, \"Name\", m.Name)\n\tif !m.CreationTimestamp.IsZero() {\n\t\tformatTime(out, \"Created\", m.CreationTimestamp.Time)\n\t}\n\tformatString(out, \"Labels\", formatLabels(m.Labels))\n\tformatAnnotations(out, m, \"\")\n}\n\n\/\/ webhookURL assembles map with of webhook type as key and webhook url and value\nfunc webhookURL(c *buildapi.BuildConfig, cli client.BuildConfigsNamespacer) map[string]string {\n\tresult := map[string]string{}\n\tfor _, trigger := range c.Spec.Triggers {\n\t\twhTrigger := \"\"\n\t\tswitch trigger.Type {\n\t\tcase buildapi.GitHubWebHookBuildTriggerType:\n\t\t\twhTrigger = trigger.GitHubWebHook.Secret\n\t\tcase buildapi.GenericWebHookBuildTriggerType:\n\t\t\twhTrigger = trigger.GenericWebHook.Secret\n\t\t}\n\t\tif len(whTrigger) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tout := \"\"\n\t\turl, err := cli.BuildConfigs(c.Namespace).WebHookURL(c.Name, &trigger)\n\t\tif err != nil {\n\t\t\tout = fmt.Sprintf(\"<error: %s>\", err.Error())\n\t\t} else {\n\t\t\tout = url.String()\n\t\t}\n\t\tresult[string(trigger.Type)] = out\n\t}\n\treturn result\n}\n\nfunc formatImageStreamTags(out *tabwriter.Writer, stream *imageapi.ImageStream) {\n\tif len(stream.Status.Tags) == 0 && len(stream.Spec.Tags) == 0 {\n\t\tfmt.Fprintf(out, \"Tags:\\t<none>\\n\")\n\t\treturn\n\t}\n\tfmt.Fprint(out, \"\\nTag\\tSpec\\tCreated\\tPullSpec\\tImage\\n\")\n\tsortedTags := []string{}\n\tfor k := range stream.Status.Tags {\n\t\tsortedTags = append(sortedTags, k)\n\t}\n\tfor k := range stream.Spec.Tags {\n\t\tif _, ok := stream.Status.Tags[k]; !ok {\n\t\t\tsortedTags = append(sortedTags, k)\n\t\t}\n\t}\n\tsort.Strings(sortedTags)\n\tfor _, tag := range sortedTags {\n\t\ttagRef, ok := stream.Spec.Tags[tag]\n\t\tspecTag := \"\"\n\t\tif ok {\n\t\t\tif tagRef.From != nil {\n\t\t\t\tnamePair := \"\"\n\t\t\t\tif len(tagRef.From.Namespace) > 0 && tagRef.From.Namespace != stream.Namespace {\n\t\t\t\t\tnamePair = fmt.Sprintf(\"%s\/%s\", tagRef.From.Namespace, tagRef.From.Name)\n\t\t\t\t} else {\n\t\t\t\t\tnamePair = tagRef.From.Name\n\t\t\t\t}\n\n\t\t\t\tswitch tagRef.From.Kind {\n\t\t\t\tcase \"ImageStreamTag\", \"ImageStreamImage\":\n\t\t\t\t\tspecTag = namePair\n\t\t\t\tcase \"DockerImage\":\n\t\t\t\t\tspecTag = tagRef.From.Name\n\t\t\t\tdefault:\n\t\t\t\t\tspecTag = fmt.Sprintf(\"<unknown %s> %s\", tagRef.From.Kind, namePair)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tspecTag = \"<pushed>\"\n\t\t}\n\t\tif taglist, ok := stream.Status.Tags[tag]; ok {\n\t\t\tfor _, event := range taglist.Items {\n\t\t\t\td := timeNowFn().Sub(event.Created.Time)\n\t\t\t\timage := event.Image\n\t\t\t\tref, err := imageapi.ParseDockerImageReference(event.DockerImageReference)\n\t\t\t\tif err == nil {\n\t\t\t\t\tif ref.ID == image {\n\t\t\t\t\t\timage = \"\"\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(out, \"%s\\t%s\\t%s ago\\t%s\\t%v\\n\",\n\t\t\t\t\ttag,\n\t\t\t\t\tspecTag,\n\t\t\t\t\tunits.HumanDuration(d),\n\t\t\t\t\tevent.DockerImageReference,\n\t\t\t\t\timage)\n\t\t\t\tif tag != \"\" {\n\t\t\t\t\ttag = \"\"\n\t\t\t\t}\n\t\t\t\tif specTag != \"\" {\n\t\t\t\t\tspecTag = \"\"\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Fprintf(out, \"%s\\t%s\\t\\t<not available>\\t<not available>\\n\", tag, specTag)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package path\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"testing\"\n\n\t\"src.elv.sh\/pkg\/eval\"\n\t\"src.elv.sh\/pkg\/eval\/errs\"\n\t. \"src.elv.sh\/pkg\/eval\/evaltest\"\n\t\"src.elv.sh\/pkg\/testutil\"\n)\n\nvar testDir = testutil.Dir{\n\t\"d\": testutil.Dir{\n\t\t\"f\": \"\",\n\t},\n}\n\n\/\/ A regular expression fragment to match the directory part of an absolute\n\/\/ path. QuoteMeta is needed since on Windows filepath.Separator is '\\\\'.\nvar anyDir = \"^.*\" + regexp.QuoteMeta(string(filepath.Separator))\n\nfunc TestPath(t *testing.T) {\n\ttmpdir := testutil.InTempDir(t)\n\ttestutil.ApplyDir(testDir)\n\n\tabsPath, err := filepath.Abs(\"a\/b\/c.png\")\n\tif err != nil {\n\t\tpanic(\"unable to convert a\/b\/c.png to an absolute path\")\n\t}\n\n\tTestWithSetup(t, importPathModule,\n\t\t\/\/ This block of tests is not meant to be comprehensive. Their primary purpose is to simply\n\t\t\/\/ ensure the Elvish command is correctly mapped to the relevant Go function. We assume the\n\t\t\/\/ Go function behaves correctly.\n\t\tThat(\"path:abs a\/b\/c.png\").Puts(absPath),\n\t\tThat(\"path:base a\/b\/d.png\").Puts(\"d.png\"),\n\t\tThat(\"path:clean .\/.\/x\").Puts(\"x\"),\n\t\tThat(\"path:clean a\/b\/..\/.\/c\").Puts(filepath.Join(\"a\", \"c\")),\n\t\tThat(\"path:dir a\/b\/d.png\").Puts(filepath.Join(\"a\", \"b\")),\n\t\tThat(\"path:ext a\/b\/e.png\").Puts(\".png\"),\n\t\tThat(\"path:ext a\/b\/s\").Puts(\"\"),\n\t\tThat(\"path:is-abs a\/b\/s\").Puts(false),\n\t\tThat(\"path:is-abs \"+absPath).Puts(true),\n\n\t\t\/\/ Elvish \"path:\" module functions that are not trivial wrappers around a Go stdlib function\n\t\t\/\/ should have comprehensive tests below this comment.\n\t\tThat(\"path:is-dir \"+tmpdir).Puts(true),\n\t\tThat(\"path:is-dir d\").Puts(true),\n\t\tThat(\"path:is-dir d\/f\").Puts(false),\n\t\tThat(\"path:is-dir bad\").Puts(false),\n\n\t\tThat(\"path:is-regular \"+tmpdir).Puts(false),\n\t\tThat(\"path:is-regular d\").Puts(false),\n\t\tThat(\"path:is-regular d\/f\").Puts(true),\n\t\tThat(\"path:is-regular bad\").Puts(false),\n\n\t\t\/\/ Verify the commands for creating temporary filesystem objects work correctly.\n\t\tThat(\"x = (path:temp-dir)\", \"rmdir $x\", \"put $x\").Puts(\n\t\t\tMatchingRegexp{Pattern: anyDir + `elvish-.*$`}),\n\t\tThat(\"x = (path:temp-dir 'x-*.y')\", \"rmdir $x\", \"put $x\").Puts(\n\t\t\tMatchingRegexp{Pattern: anyDir + `x-.*\\.y$`}),\n\t\tThat(\"x = (path:temp-dir &dir=. 'x-*.y')\", \"rmdir $x\", \"put $x\").Puts(\n\t\t\tMatchingRegexp{Pattern: `^(\\.[\/\\\/])?x-.*\\.y$`}),\n\t\tThat(\"x = (path:temp-dir &dir=.)\", \"rmdir $x\", \"put $x\").Puts(\n\t\t\tMatchingRegexp{Pattern: `^(\\.[\/\\\/])?elvish-.*$`}),\n\t\tThat(\"path:temp-dir a b\").Throws(\n\t\t\terrs.ArityMismatch{What: \"arguments\", ValidLow: 0, ValidHigh: 1, Actual: 2},\n\t\t\t\"path:temp-dir a b\"),\n\n\t\tThat(\"f = (path:temp-file)\", \"fclose $f\", \"put $f[fd]\", \"rm $f[name]\").\n\t\t\tPuts(-1),\n\t\tThat(\"f = (path:temp-file)\", \"put $f[name]\", \"fclose $f\", \"rm $f[name]\").\n\t\t\tPuts(MatchingRegexp{Pattern: anyDir + `elvish-.*$`}),\n\t\tThat(\"f = (path:temp-file 'x-*.y')\", \"put $f[name]\", \"fclose $f\", \"rm $f[name]\").\n\t\t\tPuts(MatchingRegexp{Pattern: anyDir + `x-.*\\.y$`}),\n\t\tThat(\"f = (path:temp-file &dir=. 'x-*.y')\", \"put $f[name]\", \"fclose $f\", \"rm $f[name]\").\n\t\t\tPuts(MatchingRegexp{Pattern: `^(\\.[\/\\\/])?x-.*\\.y$`}),\n\t\tThat(\"f = (path:temp-file &dir=.)\", \"put $f[name]\", \"fclose $f\", \"rm $f[name]\").\n\t\t\tPuts(MatchingRegexp{Pattern: `^(\\.[\/\\\/])?elvish-.*$`}),\n\t\tThat(\"path:temp-file a b\").Throws(\n\t\t\terrs.ArityMismatch{What: \"arguments\", ValidLow: 0, ValidHigh: 1, Actual: 2},\n\t\t\t\"path:temp-file a b\"),\n\t)\n}\n\nvar symlinks = []struct {\n\tpath string\n\ttarget string\n}{\n\t{\"d\/s-f\", \"f\"},\n\t{\"s-d\", \"d\"},\n\t{\"s-d-f\", \"d\/f\"},\n\t{\"s-bad\", \"bad\"},\n}\n\nfunc TestPath_Symlink(t *testing.T) {\n\ttestutil.InTempDir(t)\n\ttestutil.ApplyDir(testDir)\n\t\/\/ testutil.ApplyDir(testDirSymlinks)\n\tfor _, link := range symlinks {\n\t\terr := os.Symlink(link.target, link.path)\n\t\tif err != nil {\n\t\t\t\/\/ Creating symlinks requires a special permission on Windows. If\n\t\t\t\/\/ the user doesn't have that permission, just skip the whole test.\n\t\t\tt.Skip(err)\n\t\t}\n\t}\n\n\tTestWithSetup(t, importPathModule,\n\t\tThat(\"path:eval-symlinks d\/f\").Puts(filepath.Join(\"d\", \"f\")),\n\t\tThat(\"path:eval-symlinks d\/s-f\").Puts(filepath.Join(\"d\", \"f\")),\n\t\tThat(\"path:eval-symlinks s-d\/f\").Puts(filepath.Join(\"d\", \"f\")),\n\t\tThat(\"path:eval-symlinks s-bad\").Throws(AnyError),\n\n\t\tThat(\"path:is-dir s-d\").Puts(false),\n\t\tThat(\"path:is-dir s-d &follow-symlink\").Puts(true),\n\t\tThat(\"path:is-dir s-d-f\").Puts(false),\n\t\tThat(\"path:is-dir s-d-f &follow-symlink\").Puts(false),\n\t\tThat(\"path:is-dir s-bad\").Puts(false),\n\t\tThat(\"path:is-dir s-bad &follow-symlink\").Puts(false),\n\t\tThat(\"path:is-dir bad\").Puts(false),\n\t\tThat(\"path:is-dir bad &follow-symlink\").Puts(false),\n\n\t\tThat(\"path:is-regular s-d\").Puts(false),\n\t\tThat(\"path:is-regular s-d &follow-symlink\").Puts(false),\n\t\tThat(\"path:is-regular s-d-f\").Puts(false),\n\t\tThat(\"path:is-regular s-d-f &follow-symlink\").Puts(true),\n\t\tThat(\"path:is-regular s-bad\").Puts(false),\n\t\tThat(\"path:is-regular s-bad &follow-symlink\").Puts(false),\n\t\tThat(\"path:is-regular bad\").Puts(false),\n\t\tThat(\"path:is-regular bad &follow-symlink\").Puts(false),\n\t)\n}\n\nfunc importPathModule(ev *eval.Evaler) {\n\tev.AddGlobal(eval.NsBuilder{}.AddNs(\"path\", Ns).Ns())\n}\n<commit_msg>Fix mods\/path test on Windows.<commit_after>package path\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"testing\"\n\n\t\"src.elv.sh\/pkg\/eval\"\n\t\"src.elv.sh\/pkg\/eval\/errs\"\n\t. \"src.elv.sh\/pkg\/eval\/evaltest\"\n\t\"src.elv.sh\/pkg\/testutil\"\n)\n\nvar testDir = testutil.Dir{\n\t\"d\": testutil.Dir{\n\t\t\"f\": \"\",\n\t},\n}\n\n\/\/ A regular expression fragment to match the directory part of an absolute\n\/\/ path. QuoteMeta is needed since on Windows filepath.Separator is '\\\\'.\nvar anyDir = \"^.*\" + regexp.QuoteMeta(string(filepath.Separator))\n\nfunc TestPath(t *testing.T) {\n\ttmpdir := testutil.InTempDir(t)\n\ttestutil.ApplyDir(testDir)\n\n\tabsPath, err := filepath.Abs(\"a\/b\/c.png\")\n\tif err != nil {\n\t\tpanic(\"unable to convert a\/b\/c.png to an absolute path\")\n\t}\n\n\tTestWithSetup(t, importPathModule,\n\t\t\/\/ This block of tests is not meant to be comprehensive. Their primary purpose is to simply\n\t\t\/\/ ensure the Elvish command is correctly mapped to the relevant Go function. We assume the\n\t\t\/\/ Go function behaves correctly.\n\t\tThat(\"path:abs a\/b\/c.png\").Puts(absPath),\n\t\tThat(\"path:base a\/b\/d.png\").Puts(\"d.png\"),\n\t\tThat(\"path:clean .\/.\/x\").Puts(\"x\"),\n\t\tThat(\"path:clean a\/b\/..\/.\/c\").Puts(filepath.Join(\"a\", \"c\")),\n\t\tThat(\"path:dir a\/b\/d.png\").Puts(filepath.Join(\"a\", \"b\")),\n\t\tThat(\"path:ext a\/b\/e.png\").Puts(\".png\"),\n\t\tThat(\"path:ext a\/b\/s\").Puts(\"\"),\n\t\tThat(\"path:is-abs a\/b\/s\").Puts(false),\n\t\tThat(\"path:is-abs \"+absPath).Puts(true),\n\n\t\t\/\/ Elvish \"path:\" module functions that are not trivial wrappers around a Go stdlib function\n\t\t\/\/ should have comprehensive tests below this comment.\n\t\tThat(\"path:is-dir \"+tmpdir).Puts(true),\n\t\tThat(\"path:is-dir d\").Puts(true),\n\t\tThat(\"path:is-dir d\/f\").Puts(false),\n\t\tThat(\"path:is-dir bad\").Puts(false),\n\n\t\tThat(\"path:is-regular \"+tmpdir).Puts(false),\n\t\tThat(\"path:is-regular d\").Puts(false),\n\t\tThat(\"path:is-regular d\/f\").Puts(true),\n\t\tThat(\"path:is-regular bad\").Puts(false),\n\n\t\t\/\/ Verify the commands for creating temporary filesystem objects work correctly.\n\t\tThat(\"x = (path:temp-dir)\", \"rmdir $x\", \"put $x\").Puts(\n\t\t\tMatchingRegexp{Pattern: anyDir + `elvish-.*$`}),\n\t\tThat(\"x = (path:temp-dir 'x-*.y')\", \"rmdir $x\", \"put $x\").Puts(\n\t\t\tMatchingRegexp{Pattern: anyDir + `x-.*\\.y$`}),\n\t\tThat(\"x = (path:temp-dir &dir=. 'x-*.y')\", \"rmdir $x\", \"put $x\").Puts(\n\t\t\tMatchingRegexp{Pattern: `^(\\.[\/\\\\])?x-.*\\.y$`}),\n\t\tThat(\"x = (path:temp-dir &dir=.)\", \"rmdir $x\", \"put $x\").Puts(\n\t\t\tMatchingRegexp{Pattern: `^(\\.[\/\\\\])?elvish-.*$`}),\n\t\tThat(\"path:temp-dir a b\").Throws(\n\t\t\terrs.ArityMismatch{What: \"arguments\", ValidLow: 0, ValidHigh: 1, Actual: 2},\n\t\t\t\"path:temp-dir a b\"),\n\n\t\tThat(\"f = (path:temp-file)\", \"fclose $f\", \"put $f[fd]\", \"rm $f[name]\").\n\t\t\tPuts(-1),\n\t\tThat(\"f = (path:temp-file)\", \"put $f[name]\", \"fclose $f\", \"rm $f[name]\").\n\t\t\tPuts(MatchingRegexp{Pattern: anyDir + `elvish-.*$`}),\n\t\tThat(\"f = (path:temp-file 'x-*.y')\", \"put $f[name]\", \"fclose $f\", \"rm $f[name]\").\n\t\t\tPuts(MatchingRegexp{Pattern: anyDir + `x-.*\\.y$`}),\n\t\tThat(\"f = (path:temp-file &dir=. 'x-*.y')\", \"put $f[name]\", \"fclose $f\", \"rm $f[name]\").\n\t\t\tPuts(MatchingRegexp{Pattern: `^(\\.[\/\\\\])?x-.*\\.y$`}),\n\t\tThat(\"f = (path:temp-file &dir=.)\", \"put $f[name]\", \"fclose $f\", \"rm $f[name]\").\n\t\t\tPuts(MatchingRegexp{Pattern: `^(\\.[\/\\\\])?elvish-.*$`}),\n\t\tThat(\"path:temp-file a b\").Throws(\n\t\t\terrs.ArityMismatch{What: \"arguments\", ValidLow: 0, ValidHigh: 1, Actual: 2},\n\t\t\t\"path:temp-file a b\"),\n\t)\n}\n\nvar symlinks = []struct {\n\tpath string\n\ttarget string\n}{\n\t{\"d\/s-f\", \"f\"},\n\t{\"s-d\", \"d\"},\n\t{\"s-d-f\", \"d\/f\"},\n\t{\"s-bad\", \"bad\"},\n}\n\nfunc TestPath_Symlink(t *testing.T) {\n\ttestutil.InTempDir(t)\n\ttestutil.ApplyDir(testDir)\n\t\/\/ testutil.ApplyDir(testDirSymlinks)\n\tfor _, link := range symlinks {\n\t\terr := os.Symlink(link.target, link.path)\n\t\tif err != nil {\n\t\t\t\/\/ Creating symlinks requires a special permission on Windows. If\n\t\t\t\/\/ the user doesn't have that permission, just skip the whole test.\n\t\t\tt.Skip(err)\n\t\t}\n\t}\n\n\tTestWithSetup(t, importPathModule,\n\t\tThat(\"path:eval-symlinks d\/f\").Puts(filepath.Join(\"d\", \"f\")),\n\t\tThat(\"path:eval-symlinks d\/s-f\").Puts(filepath.Join(\"d\", \"f\")),\n\t\tThat(\"path:eval-symlinks s-d\/f\").Puts(filepath.Join(\"d\", \"f\")),\n\t\tThat(\"path:eval-symlinks s-bad\").Throws(AnyError),\n\n\t\tThat(\"path:is-dir s-d\").Puts(false),\n\t\tThat(\"path:is-dir s-d &follow-symlink\").Puts(true),\n\t\tThat(\"path:is-dir s-d-f\").Puts(false),\n\t\tThat(\"path:is-dir s-d-f &follow-symlink\").Puts(false),\n\t\tThat(\"path:is-dir s-bad\").Puts(false),\n\t\tThat(\"path:is-dir s-bad &follow-symlink\").Puts(false),\n\t\tThat(\"path:is-dir bad\").Puts(false),\n\t\tThat(\"path:is-dir bad &follow-symlink\").Puts(false),\n\n\t\tThat(\"path:is-regular s-d\").Puts(false),\n\t\tThat(\"path:is-regular s-d &follow-symlink\").Puts(false),\n\t\tThat(\"path:is-regular s-d-f\").Puts(false),\n\t\tThat(\"path:is-regular s-d-f &follow-symlink\").Puts(true),\n\t\tThat(\"path:is-regular s-bad\").Puts(false),\n\t\tThat(\"path:is-regular s-bad &follow-symlink\").Puts(false),\n\t\tThat(\"path:is-regular bad\").Puts(false),\n\t\tThat(\"path:is-regular bad &follow-symlink\").Puts(false),\n\t)\n}\n\nfunc importPathModule(ev *eval.Evaler) {\n\tev.AddGlobal(eval.NsBuilder{}.AddNs(\"path\", Ns).Ns())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Chef Software Inc. and\/or applicable contributors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage v1\n\nimport (\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\nconst (\n\tServiceGroupResourcePlural = \"servicegroups\"\n\tServiceGroupLabel = \"service-group\"\n\n\tTopologyLabel = \"topology\"\n\n\tHabitatLabel = \"habitat\"\n)\n\ntype ServiceGroup struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata\"`\n\tSpec ServiceGroupSpec `json:\"spec\"`\n\tStatus ServiceGroupStatus `json:\"status,omitempty\"`\n}\n\ntype ServiceGroupSpec struct {\n\t\/\/ Count is the amount of Services to start in this Service Group.\n\tCount int `json:\"count\"`\n\t\/\/ Image is the Docker image of the Habitat Service.\n\tImage string `json:\"image\"`\n\tHabitat Habitat `json:\"habitat\"`\n}\n\ntype ServiceGroupStatus struct {\n\tState ServiceGroupState `json:\"state,omitempty\"`\n\tMessage string `json:\"message,omitempty\"`\n}\n\ntype ServiceGroupState string\n\ntype Habitat struct {\n\t\/\/ Group is the value of the --group flag for the hab client.\n\t\/\/ Optional. Defaults to `default`.\n\tGroup string `json:\"group\"`\n\t\/\/ Topology is the value of the --topology flag for the hab client.\n\tTopology `json:\"topology\"`\n\t\/\/ ConfigSecretName is the name of the Secret containing the config the user has previously created.\n\t\/\/ The file with this name is mounted inside of the pod. Habitat will\n\t\/\/ use it for initial configuration of the service.\n\tConfigSecretName string `json:\"config\"`\n\t\/\/ The name of the secret that contains the ring key.\n\t\/\/ Optional.\n\tRingSecretName string `json:\"ringSecretName,omitempty\"`\n\t\/\/ Bind is when one service connects to another forming a producer\/consumer relationship.\n\t\/\/ Optional.\n\tBind []Bind `json:\"bind,omitempty\"`\n}\n\ntype Bind struct {\n\t\/\/ Name is the name of the bind specified in the Habitat configuration files.\n\tName string `json:\"name\"`\n\t\/\/ Service is the name of the service this bind refers to.\n\tService string `json:\"service\"`\n\t\/\/ Group is the group of the service this bind refers to.\n\tGroup string `json:\"group\"`\n}\n\ntype Topology string\n\nfunc (t Topology) String() string {\n\treturn string(t)\n}\n\nconst (\n\tServiceGroupStateCreated ServiceGroupState = \"Created\"\n\tServiceGroupStateProcessed ServiceGroupState = \"Processed\"\n\n\tTopologyStandalone Topology = \"standalone\"\n\tTopologyLeader Topology = \"leader\"\n)\n\ntype ServiceGroupList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\tItems []ServiceGroup `json:\"items\"`\n}\n<commit_msg>Fix serialization of user.toml secret<commit_after>\/\/ Copyright (c) 2017 Chef Software Inc. and\/or applicable contributors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage v1\n\nimport (\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\nconst (\n\tServiceGroupResourcePlural = \"servicegroups\"\n\tServiceGroupLabel = \"service-group\"\n\n\tTopologyLabel = \"topology\"\n\n\tHabitatLabel = \"habitat\"\n)\n\ntype ServiceGroup struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata\"`\n\tSpec ServiceGroupSpec `json:\"spec\"`\n\tStatus ServiceGroupStatus `json:\"status,omitempty\"`\n}\n\ntype ServiceGroupSpec struct {\n\t\/\/ Count is the amount of Services to start in this Service Group.\n\tCount int `json:\"count\"`\n\t\/\/ Image is the Docker image of the Habitat Service.\n\tImage string `json:\"image\"`\n\tHabitat Habitat `json:\"habitat\"`\n}\n\ntype ServiceGroupStatus struct {\n\tState ServiceGroupState `json:\"state,omitempty\"`\n\tMessage string `json:\"message,omitempty\"`\n}\n\ntype ServiceGroupState string\n\ntype Habitat struct {\n\t\/\/ Group is the value of the --group flag for the hab client.\n\t\/\/ Optional. Defaults to `default`.\n\tGroup string `json:\"group\"`\n\t\/\/ Topology is the value of the --topology flag for the hab client.\n\tTopology `json:\"topology\"`\n\t\/\/ ConfigSecretName is the name of the Secret containing the config the user has previously created.\n\t\/\/ The file with this name is mounted inside of the pod. Habitat will\n\t\/\/ use it for initial configuration of the service.\n\tConfigSecretName string `json:\"configSecretName,omitempty\"`\n\t\/\/ The name of the secret that contains the ring key.\n\t\/\/ Optional.\n\tRingSecretName string `json:\"ringSecretName,omitempty\"`\n\t\/\/ Bind is when one service connects to another forming a producer\/consumer relationship.\n\t\/\/ Optional.\n\tBind []Bind `json:\"bind,omitempty\"`\n}\n\ntype Bind struct {\n\t\/\/ Name is the name of the bind specified in the Habitat configuration files.\n\tName string `json:\"name\"`\n\t\/\/ Service is the name of the service this bind refers to.\n\tService string `json:\"service\"`\n\t\/\/ Group is the group of the service this bind refers to.\n\tGroup string `json:\"group\"`\n}\n\ntype Topology string\n\nfunc (t Topology) String() string {\n\treturn string(t)\n}\n\nconst (\n\tServiceGroupStateCreated ServiceGroupState = \"Created\"\n\tServiceGroupStateProcessed ServiceGroupState = \"Processed\"\n\n\tTopologyStandalone Topology = \"standalone\"\n\tTopologyLeader Topology = \"leader\"\n)\n\ntype ServiceGroupList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\tItems []ServiceGroup `json:\"items\"`\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Chef Software Inc. and\/or applicable contributors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage v1\n\nimport (\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\nconst ServiceGroupResourcePlural = \"servicegroups\"\n\ntype ServiceGroup struct {\n\tmetav1.TypeMeta `json:,inline\"`\n\tmetav1.ObjectMeta `json:\"metadata\"`\n\tSpec ServiceGroupSpec `json:\"spec\"`\n\tStatus ServiceGroupStatus `json:\"status,omitempty\"`\n}\n\ntype ServiceGroupSpec struct {\n\t\/\/ Count is the amount of Services to start in this Service Group.\n\tCount int\n\t\/\/ Image is the Docker image of the Habitat Service.\n\tImage string\n}\n\ntype ServiceGroupStatus struct {\n\tState ServiceGroupState `json:\"state,omitempty\"`\n\tMessage string `json:\"message,omitempty\"`\n}\n\ntype ServiceGroupState string\n\nconst (\n\tServiceGroupStateCreated ServiceGroupState = \"Created\"\n\tServiceGroupStateProcessed ServiceGroupState = \"Processed\"\n)\n\ntype ServiceGroupList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\tItems []ServiceGroup `json:\"items\"`\n}\n<commit_msg>Add json tags for spec fields<commit_after>\/\/ Copyright (c) 2017 Chef Software Inc. and\/or applicable contributors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage v1\n\nimport (\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\nconst ServiceGroupResourcePlural = \"servicegroups\"\n\ntype ServiceGroup struct {\n\tmetav1.TypeMeta `json:,inline\"`\n\tmetav1.ObjectMeta `json:\"metadata\"`\n\tSpec ServiceGroupSpec `json:\"spec\"`\n\tStatus ServiceGroupStatus `json:\"status,omitempty\"`\n}\n\ntype ServiceGroupSpec struct {\n\t\/\/ Count is the amount of Services to start in this Service Group.\n\tCount int `json:\"count\"`\n\t\/\/ Image is the Docker image of the Habitat Service.\n\tImage string `json:\"image\"`\n}\n\ntype ServiceGroupStatus struct {\n\tState ServiceGroupState `json:\"state,omitempty\"`\n\tMessage string `json:\"message,omitempty\"`\n}\n\ntype ServiceGroupState string\n\nconst (\n\tServiceGroupStateCreated ServiceGroupState = \"Created\"\n\tServiceGroupStateProcessed ServiceGroupState = \"Processed\"\n)\n\ntype ServiceGroupList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\tItems []ServiceGroup `json:\"items\"`\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage helmreconciler\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"istio.io\/pkg\/version\"\n\n\tapierrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\tutilerrors \"k8s.io\/apimachinery\/pkg\/util\/errors\"\n\t\"k8s.io\/helm\/pkg\/manifest\"\n\tkubectl \"k8s.io\/kubectl\/pkg\/util\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/client\"\n\n\t\"istio.io\/operator\/pkg\/apis\/istio\/v1alpha2\"\n\t\"istio.io\/operator\/pkg\/component\/controlplane\"\n\t\"istio.io\/operator\/pkg\/helm\"\n\tistiomanifest \"istio.io\/operator\/pkg\/manifest\"\n\t\"istio.io\/operator\/pkg\/name\"\n\t\"istio.io\/operator\/pkg\/object\"\n\t\"istio.io\/operator\/pkg\/translate\"\n\t\"istio.io\/operator\/pkg\/util\"\n\t\"istio.io\/operator\/pkg\/validate\"\n\tbinversion \"istio.io\/operator\/version\"\n\t\"istio.io\/pkg\/log\"\n)\n\nfunc (h *HelmReconciler) renderCharts(in RenderingInput) (ChartManifestsMap, error) {\n\ticp, ok := in.GetInputConfig().(*v1alpha2.IstioControlPlane)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unexpected type %T in renderCharts\", in.GetInputConfig())\n\t}\n\n\ticpSpec := icp.GetSpec()\n\tif err := validate.CheckIstioControlPlaneSpec(icpSpec, false); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmergedICPS, err := mergeICPSWithProfile(icpSpec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tt, err := translate.NewTranslator(binversion.OperatorBinaryVersion.MinorVersion)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcp := controlplane.NewIstioControlPlane(mergedICPS, t)\n\tif err := cp.Run(); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create Istio control plane with spec: \\n%v\\nerror: %s\", mergedICPS, err)\n\t}\n\n\tmanifests, errs := cp.RenderManifest()\n\tif errs != nil {\n\t\terr = errs.ToError()\n\t}\n\n\treturn toChartManifestsMap(manifests), err\n}\n\n\/\/ mergeICPSWithProfile overlays the values in icp on top of the defaults for the profile given by icp.profile and\n\/\/ returns the merged result.\nfunc mergeICPSWithProfile(icp *v1alpha2.IstioControlPlaneSpec) (*v1alpha2.IstioControlPlaneSpec, error) {\n\tprofile := icp.Profile\n\n\t\/\/ This contains the IstioControlPlane CR.\n\tbaseCRYAML, err := helm.ReadProfileYAML(profile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not read the profile values for %s: %s\", profile, err)\n\t}\n\n\tif !helm.IsDefaultProfile(profile) {\n\t\t\/\/ Profile definitions are relative to the default profile, so read that first.\n\t\tdfn, err := helm.DefaultFilenameForProfile(profile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefaultYAML, err := helm.ReadProfileYAML(dfn)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not read the default profile values for %s: %s\", dfn, err)\n\t\t}\n\t\tbaseCRYAML, err = helm.OverlayYAML(defaultYAML, baseCRYAML)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not overlay the profile over the default %s: %s\", profile, err)\n\t\t}\n\t}\n\n\t_, baseYAML, err := unmarshalAndValidateICP(baseCRYAML)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Due to the fact that base profile is compiled in before a tag can be created, we must allow an additional\n\t\/\/ override from variables that are set during release build time.\n\thub := version.DockerInfo.Hub\n\ttag := version.DockerInfo.Tag\n\tif hub != \"unknown\" && tag != \"unknown\" {\n\t\tbuildHubTagOverlayYAML, err := helm.GenerateHubTagOverlay(hub, tag)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbaseYAML, err = helm.OverlayYAML(baseYAML, buildHubTagOverlayYAML)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\toverlayYAML, err := util.MarshalWithJSONPB(icp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Merge base and overlay.\n\tmergedYAML, err := helm.OverlayYAML(baseYAML, overlayYAML)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not overlay user config over base: %s\", err)\n\t}\n\treturn unmarshalAndValidateICPSpec(mergedYAML)\n}\n\n\/\/ unmarshalAndValidateICP unmarshals the IstioControlPlane in the crYAML string and validates it.\n\/\/ If successful, it returns both a struct and string YAML representations of the IstioControlPlaneSpec embedded in icp.\nfunc unmarshalAndValidateICP(crYAML string) (*v1alpha2.IstioControlPlaneSpec, string, error) {\n\t\/\/ TODO: add GroupVersionKind handling as appropriate.\n\tif crYAML == \"\" {\n\t\treturn &v1alpha2.IstioControlPlaneSpec{}, \"\", nil\n\t}\n\ticps, _, err := istiomanifest.ParseK8SYAMLToIstioControlPlaneSpec(crYAML)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"could not parse the overlay file: %s\\n\\nOriginal YAML:\\n%s\", err, crYAML)\n\t}\n\tif errs := validate.CheckIstioControlPlaneSpec(icps, false); len(errs) != 0 {\n\t\treturn nil, \"\", fmt.Errorf(\"input file failed validation with the following errors: %s\\n\\nOriginal YAML:\\n%s\", errs, crYAML)\n\t}\n\ticpsYAML, err := util.MarshalWithJSONPB(icps)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"could not marshal: %s\", err)\n\t}\n\treturn icps, icpsYAML, nil\n}\n\n\/\/ unmarshalAndValidateICPSpec unmarshals the IstioControlPlaneSpec in the icpsYAML string and validates it.\n\/\/ If successful, it returns a struct representation of icpsYAML.\nfunc unmarshalAndValidateICPSpec(icpsYAML string) (*v1alpha2.IstioControlPlaneSpec, error) {\n\ticps := &v1alpha2.IstioControlPlaneSpec{}\n\tif err := util.UnmarshalWithJSONPB(icpsYAML, icps); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not unmarshal the merged YAML: %s\\n\\nYAML:\\n%s\", err, icpsYAML)\n\t}\n\tif errs := validate.CheckIstioControlPlaneSpec(icps, true); len(errs) != 0 {\n\t\treturn nil, fmt.Errorf(errs.Error())\n\t}\n\treturn icps, nil\n}\n\n\/\/ ProcessManifest apply the manifest to create or update resources, returns the number of objects processed\nfunc (h *HelmReconciler) ProcessManifest(manifest manifest.Manifest) (int, error) {\n\tvar errs []error\n\tlog.Infof(\"Processing resources from manifest: %s\", manifest.Name)\n\tobjects, err := object.ParseK8sObjectsFromYAMLManifest(manifest.Content)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tfor _, obj := range objects {\n\t\terr = h.ProcessObject(manifest.Name, obj.UnstructuredObject())\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\treturn len(objects), utilerrors.NewAggregate(errs)\n}\n\nfunc (h *HelmReconciler) ProcessObject(chartName string, obj *unstructured.Unstructured) error {\n\tif obj.GetKind() == \"List\" {\n\t\tallErrors := []error{}\n\t\tlist, err := obj.ToList()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error converting List object: %s\", err)\n\t\t\treturn err\n\t\t}\n\t\tfor _, item := range list.Items {\n\t\t\terr = h.ProcessObject(chartName, &item)\n\t\t\tif err != nil {\n\t\t\t\tallErrors = append(allErrors, err)\n\t\t\t}\n\t\t}\n\t\treturn utilerrors.NewAggregate(allErrors)\n\t}\n\n\tmutatedObj, err := h.customizer.Listener().BeginResource(chartName, obj)\n\tif err != nil {\n\t\tlog.Errorf(\"error preprocessing object: %s\", err)\n\t\treturn err\n\t}\n\n\terr = kubectl.CreateApplyAnnotation(obj, unstructured.UnstructuredJSONScheme)\n\tif err != nil {\n\t\tlog.Errorf(\"unexpected error adding apply annotation to object: %s\", err)\n\t}\n\n\treceiver := &unstructured.Unstructured{}\n\treceiver.SetGroupVersionKind(mutatedObj.GetObjectKind().GroupVersionKind())\n\tobjectKey, _ := client.ObjectKeyFromObject(mutatedObj)\n\n\tvar patch Patch\n\n\terr = h.client.Get(context.TODO(), objectKey, receiver)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tlog.Infof(\"creating resource: %s\", objectKey)\n\t\t\terr = h.client.Create(context.TODO(), mutatedObj)\n\t\t\tif err == nil {\n\t\t\t\t\/\/ special handling\n\t\t\t\tif err = h.customizer.Listener().ResourceCreated(mutatedObj); err != nil {\n\t\t\t\t\tlog.Errorf(\"unexpected error occurred during postprocessing of new resource: %s\", err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlistenerErr := h.customizer.Listener().ResourceError(mutatedObj, err)\n\t\t\t\tif listenerErr != nil {\n\t\t\t\t\tlog.Errorf(\"unexpected error occurred invoking ResourceError on listener: %s\", listenerErr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else if patch, err = h.CreatePatch(receiver, mutatedObj); err == nil && patch != nil {\n\t\tlog.Info(\"updating existing resource\")\n\t\tmutatedObj, err = patch.Apply()\n\t\tif err == nil {\n\t\t\tif err = h.customizer.Listener().ResourceUpdated(mutatedObj, receiver); err != nil {\n\t\t\t\tlog.Errorf(\"unexpected error occurred during postprocessing of updated resource: %s\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tlistenerErr := h.customizer.Listener().ResourceError(obj, err)\n\t\t\tif listenerErr != nil {\n\t\t\t\tlog.Errorf(\"unexpected error occurred invoking ResourceError on listener: %s\", listenerErr)\n\t\t\t}\n\t\t}\n\t}\n\tif err != nil {\n\t\tlog.Errorf(\"error occurred reconciling resource: %s\", err)\n\t}\n\treturn err\n}\n\nfunc toChartManifestsMap(m name.ManifestMap) ChartManifestsMap {\n\tout := make(ChartManifestsMap)\n\tfor k, v := range m {\n\t\tout[string(k)] = []manifest.Manifest{{\n\t\t\tName: string(k),\n\t\t\tContent: v,\n\t\t}}\n\t}\n\treturn out\n}\n<commit_msg>fix invalid image name. (#626)<commit_after>\/\/ Copyright 2019 Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage helmreconciler\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"istio.io\/pkg\/version\"\n\n\tapierrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\tutilerrors \"k8s.io\/apimachinery\/pkg\/util\/errors\"\n\t\"k8s.io\/helm\/pkg\/manifest\"\n\tkubectl \"k8s.io\/kubectl\/pkg\/util\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/client\"\n\n\t\"istio.io\/operator\/pkg\/apis\/istio\/v1alpha2\"\n\t\"istio.io\/operator\/pkg\/component\/controlplane\"\n\t\"istio.io\/operator\/pkg\/helm\"\n\tistiomanifest \"istio.io\/operator\/pkg\/manifest\"\n\t\"istio.io\/operator\/pkg\/name\"\n\t\"istio.io\/operator\/pkg\/object\"\n\t\"istio.io\/operator\/pkg\/translate\"\n\t\"istio.io\/operator\/pkg\/util\"\n\t\"istio.io\/operator\/pkg\/validate\"\n\tbinversion \"istio.io\/operator\/version\"\n\t\"istio.io\/pkg\/log\"\n)\n\nfunc (h *HelmReconciler) renderCharts(in RenderingInput) (ChartManifestsMap, error) {\n\ticp, ok := in.GetInputConfig().(*v1alpha2.IstioControlPlane)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unexpected type %T in renderCharts\", in.GetInputConfig())\n\t}\n\n\ticpSpec := icp.GetSpec()\n\tif err := validate.CheckIstioControlPlaneSpec(icpSpec, false); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmergedICPS, err := mergeICPSWithProfile(icpSpec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tt, err := translate.NewTranslator(binversion.OperatorBinaryVersion.MinorVersion)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcp := controlplane.NewIstioControlPlane(mergedICPS, t)\n\tif err := cp.Run(); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create Istio control plane with spec: \\n%v\\nerror: %s\", mergedICPS, err)\n\t}\n\n\tmanifests, errs := cp.RenderManifest()\n\tif errs != nil {\n\t\terr = errs.ToError()\n\t}\n\n\treturn toChartManifestsMap(manifests), err\n}\n\n\/\/ mergeICPSWithProfile overlays the values in icp on top of the defaults for the profile given by icp.profile and\n\/\/ returns the merged result.\nfunc mergeICPSWithProfile(icp *v1alpha2.IstioControlPlaneSpec) (*v1alpha2.IstioControlPlaneSpec, error) {\n\tprofile := icp.Profile\n\n\t\/\/ This contains the IstioControlPlane CR.\n\tbaseCRYAML, err := helm.ReadProfileYAML(profile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not read the profile values for %s: %s\", profile, err)\n\t}\n\n\tif !helm.IsDefaultProfile(profile) {\n\t\t\/\/ Profile definitions are relative to the default profile, so read that first.\n\t\tdfn, err := helm.DefaultFilenameForProfile(profile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefaultYAML, err := helm.ReadProfileYAML(dfn)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not read the default profile values for %s: %s\", dfn, err)\n\t\t}\n\t\tbaseCRYAML, err = helm.OverlayYAML(defaultYAML, baseCRYAML)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not overlay the profile over the default %s: %s\", profile, err)\n\t\t}\n\t}\n\n\t_, baseYAML, err := unmarshalAndValidateICP(baseCRYAML)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Due to the fact that base profile is compiled in before a tag can be created, we must allow an additional\n\t\/\/ override from variables that are set during release build time.\n\thub := version.DockerInfo.Hub\n\ttag := version.DockerInfo.Tag\n\tif hub != \"\" && hub != \"unknown\" && tag != \"\" && tag != \"unknown\" {\n\t\tbuildHubTagOverlayYAML, err := helm.GenerateHubTagOverlay(hub, tag)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbaseYAML, err = helm.OverlayYAML(baseYAML, buildHubTagOverlayYAML)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\toverlayYAML, err := util.MarshalWithJSONPB(icp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Merge base and overlay.\n\tmergedYAML, err := helm.OverlayYAML(baseYAML, overlayYAML)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not overlay user config over base: %s\", err)\n\t}\n\treturn unmarshalAndValidateICPSpec(mergedYAML)\n}\n\n\/\/ unmarshalAndValidateICP unmarshals the IstioControlPlane in the crYAML string and validates it.\n\/\/ If successful, it returns both a struct and string YAML representations of the IstioControlPlaneSpec embedded in icp.\nfunc unmarshalAndValidateICP(crYAML string) (*v1alpha2.IstioControlPlaneSpec, string, error) {\n\t\/\/ TODO: add GroupVersionKind handling as appropriate.\n\tif crYAML == \"\" {\n\t\treturn &v1alpha2.IstioControlPlaneSpec{}, \"\", nil\n\t}\n\ticps, _, err := istiomanifest.ParseK8SYAMLToIstioControlPlaneSpec(crYAML)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"could not parse the overlay file: %s\\n\\nOriginal YAML:\\n%s\", err, crYAML)\n\t}\n\tif errs := validate.CheckIstioControlPlaneSpec(icps, false); len(errs) != 0 {\n\t\treturn nil, \"\", fmt.Errorf(\"input file failed validation with the following errors: %s\\n\\nOriginal YAML:\\n%s\", errs, crYAML)\n\t}\n\ticpsYAML, err := util.MarshalWithJSONPB(icps)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"could not marshal: %s\", err)\n\t}\n\treturn icps, icpsYAML, nil\n}\n\n\/\/ unmarshalAndValidateICPSpec unmarshals the IstioControlPlaneSpec in the icpsYAML string and validates it.\n\/\/ If successful, it returns a struct representation of icpsYAML.\nfunc unmarshalAndValidateICPSpec(icpsYAML string) (*v1alpha2.IstioControlPlaneSpec, error) {\n\ticps := &v1alpha2.IstioControlPlaneSpec{}\n\tif err := util.UnmarshalWithJSONPB(icpsYAML, icps); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not unmarshal the merged YAML: %s\\n\\nYAML:\\n%s\", err, icpsYAML)\n\t}\n\tif errs := validate.CheckIstioControlPlaneSpec(icps, true); len(errs) != 0 {\n\t\treturn nil, fmt.Errorf(errs.Error())\n\t}\n\treturn icps, nil\n}\n\n\/\/ ProcessManifest apply the manifest to create or update resources, returns the number of objects processed\nfunc (h *HelmReconciler) ProcessManifest(manifest manifest.Manifest) (int, error) {\n\tvar errs []error\n\tlog.Infof(\"Processing resources from manifest: %s\", manifest.Name)\n\tobjects, err := object.ParseK8sObjectsFromYAMLManifest(manifest.Content)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tfor _, obj := range objects {\n\t\terr = h.ProcessObject(manifest.Name, obj.UnstructuredObject())\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\treturn len(objects), utilerrors.NewAggregate(errs)\n}\n\nfunc (h *HelmReconciler) ProcessObject(chartName string, obj *unstructured.Unstructured) error {\n\tif obj.GetKind() == \"List\" {\n\t\tallErrors := []error{}\n\t\tlist, err := obj.ToList()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error converting List object: %s\", err)\n\t\t\treturn err\n\t\t}\n\t\tfor _, item := range list.Items {\n\t\t\terr = h.ProcessObject(chartName, &item)\n\t\t\tif err != nil {\n\t\t\t\tallErrors = append(allErrors, err)\n\t\t\t}\n\t\t}\n\t\treturn utilerrors.NewAggregate(allErrors)\n\t}\n\n\tmutatedObj, err := h.customizer.Listener().BeginResource(chartName, obj)\n\tif err != nil {\n\t\tlog.Errorf(\"error preprocessing object: %s\", err)\n\t\treturn err\n\t}\n\n\terr = kubectl.CreateApplyAnnotation(obj, unstructured.UnstructuredJSONScheme)\n\tif err != nil {\n\t\tlog.Errorf(\"unexpected error adding apply annotation to object: %s\", err)\n\t}\n\n\treceiver := &unstructured.Unstructured{}\n\treceiver.SetGroupVersionKind(mutatedObj.GetObjectKind().GroupVersionKind())\n\tobjectKey, _ := client.ObjectKeyFromObject(mutatedObj)\n\n\tvar patch Patch\n\n\terr = h.client.Get(context.TODO(), objectKey, receiver)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tlog.Infof(\"creating resource: %s\", objectKey)\n\t\t\terr = h.client.Create(context.TODO(), mutatedObj)\n\t\t\tif err == nil {\n\t\t\t\t\/\/ special handling\n\t\t\t\tif err = h.customizer.Listener().ResourceCreated(mutatedObj); err != nil {\n\t\t\t\t\tlog.Errorf(\"unexpected error occurred during postprocessing of new resource: %s\", err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlistenerErr := h.customizer.Listener().ResourceError(mutatedObj, err)\n\t\t\t\tif listenerErr != nil {\n\t\t\t\t\tlog.Errorf(\"unexpected error occurred invoking ResourceError on listener: %s\", listenerErr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else if patch, err = h.CreatePatch(receiver, mutatedObj); err == nil && patch != nil {\n\t\tlog.Info(\"updating existing resource\")\n\t\tmutatedObj, err = patch.Apply()\n\t\tif err == nil {\n\t\t\tif err = h.customizer.Listener().ResourceUpdated(mutatedObj, receiver); err != nil {\n\t\t\t\tlog.Errorf(\"unexpected error occurred during postprocessing of updated resource: %s\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tlistenerErr := h.customizer.Listener().ResourceError(obj, err)\n\t\t\tif listenerErr != nil {\n\t\t\t\tlog.Errorf(\"unexpected error occurred invoking ResourceError on listener: %s\", listenerErr)\n\t\t\t}\n\t\t}\n\t}\n\tif err != nil {\n\t\tlog.Errorf(\"error occurred reconciling resource: %s\", err)\n\t}\n\treturn err\n}\n\nfunc toChartManifestsMap(m name.ManifestMap) ChartManifestsMap {\n\tout := make(ChartManifestsMap)\n\tfor k, v := range m {\n\t\tout[string(k)] = []manifest.Manifest{{\n\t\t\tName: string(k),\n\t\t\tContent: v,\n\t\t}}\n\t}\n\treturn out\n}\n<|endoftext|>"} {"text":"<commit_before>package dp\n\nimport \"testing\"\n\nvar tests = []struct {\n\tin *Parameters\n\tout int\n}{\n\t{\n\t\tin: &Parameters{\n\t\t\tScores: []int{3, 4, 6},\n\t\t\tWeights: []int{3, 4, 6},\n\t\t\tMaxWeight: 7,\n\t\t},\n\t\tout: 7,\n\t},\n}\n\nfunc TestMax(t *testing.T) {\n\tfor _, v := range tests {\n\t\tout := Max(v.in)\n\t\tif out != v.out {\n\t\t\tt.Errorf(\"Got %v, expected %v\", out, v.out)\n\t\t}\n\t}\n}\n<commit_msg>more DP tests<commit_after>package dp\n\nimport \"testing\"\n\nvar tests = []struct {\n\tin *Parameters\n\tout int\n}{\n\t{\n\t\tin: &Parameters{\n\t\t\tScores: []int{3, 4, 6},\n\t\t\tWeights: []int{3, 4, 6},\n\t\t\tMaxWeight: 7,\n\t\t},\n\t\tout: 7,\n\t},\n\t{\n\t\tin: &Parameters{\n\t\t\tScores: []int{3, 4, 5, 6},\n\t\t\tWeights: []int{2, 3, 4, 5},\n\t\t\tMaxWeight: 5,\n\t\t},\n\t\tout: 7,\n\t},\n\t{\n\t\tin: &Parameters{\n\t\t\tScores: []int{1, 6, 18, 22, 28},\n\t\t\tWeights: []int{1, 2, 5, 6, 7},\n\t\t\tMaxWeight: 11,\n\t\t},\n\t\tout: 40,\n\t},\n}\n\nfunc TestMax(t *testing.T) {\n\tfor _, v := range tests {\n\t\tout := Max(v.in)\n\t\tif out != v.out {\n\t\t\tt.Errorf(\"Got %v, expected %v\", out, v.out)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package thesaurus\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"net\/http\"\n)\n\ntype BigHugh struct {\n\tAPIKey string\n}\n\ntype synonyms struct {\n\tNoun *words `json:\"noun\"`\n\tVerb *words `json:\"verb\"`\n}\n\ntype words struct {\n\tSyn []string `json:\"syn\"`\n}\n\nfunc (b *BigHugh) Synonyms(term string) ([]string, error) {\n\tvar syns []string\n\tresponse, err := http.Get(\"http:\/\/words.bighugelabs.com\/api\/2\/\" + b.APIKey + \"\/\" + term + \"\/json\")\n\tif err != nil {\n\t\treturn syns, errors.New(\"bighugh: Failed when looking for synonyms for \\\"\" + term + \"\\\"\" + err.Error())\n\t}\n\tvar data synonyms\n\tdefer response.Body.Close()\n\tif err := json.NewDecoder(response.Body).Decode(&data); err != nil {\n\t\treturn syns, err\n\t}\n\tsyns = append(syns, data.Noun.Syn...)\n\tsyns = append(syns, data.Verb.Syn...)\n\treturn syns, nil\n}\n<commit_msg>made field access more robust by checking for nils<commit_after>package thesaurus\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"net\/http\"\n)\n\ntype BigHugh struct {\n\tAPIKey string\n}\n\ntype synonyms struct {\n\tNoun *words `json:\"noun\"`\n\tVerb *words `json:\"verb\"`\n}\n\ntype words struct {\n\tSyn []string `json:\"syn\"`\n}\n\nfunc (b *BigHugh) Synonyms(term string) ([]string, error) {\n\tvar syns []string\n\tresponse, err := http.Get(\"http:\/\/words.bighugelabs.com\/api\/2\/\" + b.APIKey + \"\/\" + term + \"\/json\")\n\tif err != nil {\n\t\treturn syns, errors.New(\"bighugh: Failed when looking for synonyms for \\\"\" + term + \"\\\"\" + err.Error())\n\t}\n\tvar data synonyms\n\tdefer response.Body.Close()\n\tif err := json.NewDecoder(response.Body).Decode(&data); err != nil {\n\t\treturn syns, err\n\t}\n\tif data.Noun != nil {\n\t\tsyns = append(syns, data.Noun.Syn...)\n\t}\n\tif data.Verb != nil {\n\t\tsyns = append(syns, data.Verb.Syn...)\n\t}\n\treturn syns, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package currency\n\nimport \"strings\"\n\ntype CurrencyConf struct {\n\tCurrency string\n\tLabel string\n\tSymbol string\n\tPairs []string\n\tPools []string\n\tExplorers ExplorerConf\n\tUnit string\n}\n\ntype ExplorerConf struct {\n\tBaseUri string\n\tAddressUri string\n\tTransactionUri string\n}\n\ntype CurrencyConfigMapping string\n\nconst (\n\tCurrencyLabelMap CurrencyConfigMapping = \"pub:map:currency:label\"\n\tCurrencySymbolMap CurrencyConfigMapping = \"pub:map:currency:sym\"\n\tCurrencyUnitMap CurrencyConfigMapping = \"pub:map:currency:unit\"\n\tCurrencyExplorerMap CurrencyConfigMapping = \"pub:map:currency:explorer\"\n\tCurrencyExchangeMap CurrencyConfigMapping = \"pub:list:pair:exchange\"\n)\n\ntype RawCurrencyConf struct {\n\tMapping string\n\tData interface{}\n}\n\nfunc parseCurrencyLabelMap(config map[string]CurrencyConf, raw []interface{}) {\n\tfor _, rawLabel := range raw {\n\t\tdata := rawLabel.([]interface{})\n\t\tcur := data[0].(string)\n\t\tif val, ok := config[cur]; ok {\n\t\t\t\/\/ add value\n\t\t\tval.Label = data[1].(string)\n\t\t\tconfig[cur] = val\n\t\t} else {\n\t\t\t\/\/ create new empty config instance\n\t\t\tcfg := CurrencyConf{}\n\t\t\tcfg.Label = data[1].(string)\n\t\t\tcfg.Currency = cur\n\t\t\tconfig[cur] = cfg\n\t\t}\n\t}\n}\n\nfunc parseCurrencySymbMap(config map[string]CurrencyConf, raw []interface{}) {\n\tfor _, rawLabel := range raw {\n\t\tdata := rawLabel.([]interface{})\n\t\tcur := data[0].(string)\n\t\tif val, ok := config[cur]; ok {\n\t\t\t\/\/ add value\n\t\t\tval.Symbol = data[1].(string)\n\t\t\tconfig[cur] = val\n\t\t} else {\n\t\t\t\/\/ create new empty config instance\n\t\t\tcfg := CurrencyConf{}\n\t\t\tcfg.Symbol = data[1].(string)\n\t\t\tcfg.Currency = cur\n\t\t\tconfig[cur] = cfg\n\t\t}\n\t}\n}\n\nfunc parseCurrencyUnitMap(config map[string]CurrencyConf, raw []interface{}) {\n\tfor _, rawLabel := range raw {\n\t\tdata := rawLabel.([]interface{})\n\t\tcur := data[0].(string)\n\t\tif val, ok := config[cur]; ok {\n\t\t\t\/\/ add value\n\t\t\tval.Unit = data[1].(string)\n\t\t\tconfig[cur] = val\n\t\t} else {\n\t\t\t\/\/ create new empty config instance\n\t\t\tcfg := CurrencyConf{}\n\t\t\tcfg.Unit = data[1].(string)\n\t\t\tcfg.Currency = cur\n\t\t\tconfig[cur] = cfg\n\t\t}\n\t}\n}\n\nfunc parseCurrencyExplorerMap(config map[string]CurrencyConf, raw []interface{}) {\n\tfor _, rawLabel := range raw {\n\t\tdata := rawLabel.([]interface{})\n\t\tcur := data[0].(string)\n\t\texplorers := data[1].([]interface{})\n\t\tvar cfg CurrencyConf\n\t\tif val, ok := config[cur]; ok {\n\t\t\tcfg = val\n\t\t} else {\n\t\t\t\/\/ create new empty config instance\n\t\t\tcc := CurrencyConf{}\n\t\t\tcc.Currency = cur\n\t\t\tcfg = cc\n\t\t}\n\t\tec := ExplorerConf{\n\t\t\texplorers[0].(string),\n\t\t\texplorers[1].(string),\n\t\t\texplorers[2].(string),\n\t\t}\n\t\tcfg.Explorers = ec\n\t\tconfig[cur] = cfg\n\t}\n}\n\nfunc parseCurrencyExchangeMap(config map[string]CurrencyConf, raw []interface{}) {\n\tfor _, rs := range raw {\n\t\tsymbol := rs.(string)\n\t\tvar base, quote string\n\n\t\tif len(symbol) > 6 {\n\t\t\tbase = strings.Split(symbol, \":\")[0]\n\t\t\tquote = strings.Split(symbol, \":\")[1]\n\t\t} else {\n\t\t\tbase = symbol[3:]\n\t\t\tquote = symbol[:3]\n\t\t}\n\n\t\t\/\/ append if base exists in configs\n\t\tif val, ok := config[base]; ok {\n\t\t\tval.Pairs = append(val.Pairs, symbol)\n\t\t\tconfig[base] = val\n\t\t}\n\n\t\t\/\/ append if quote exists in configs\n\t\tif val, ok := config[quote]; ok {\n\t\t\tval.Pairs = append(val.Pairs, symbol)\n\t\t\tconfig[quote] = val\n\t\t}\n\t}\n}\n\nfunc NewCurrencyConfFromRaw(raw []RawCurrencyConf) ([]CurrencyConf, error) {\n\tconfigMap := make(map[string]CurrencyConf)\n\tfor _, r := range raw {\n\t\tswitch CurrencyConfigMapping(r.Mapping) {\n\t\tcase CurrencyLabelMap:\n\t\t\tdata := r.Data.([]interface{})\n\t\t\tparseCurrencyLabelMap(configMap, data)\n\t\tcase CurrencySymbolMap:\n\t\t\tdata := r.Data.([]interface{})\n\t\t\tparseCurrencySymbMap(configMap, data)\n\t\tcase CurrencyUnitMap:\n\t\t\tdata := r.Data.([]interface{})\n\t\t\tparseCurrencyUnitMap(configMap, data)\n\t\tcase CurrencyExplorerMap:\n\t\t\tdata := r.Data.([]interface{})\n\t\t\tparseCurrencyExplorerMap(configMap, data)\n\t\tcase CurrencyExchangeMap:\n\t\t\tdata := r.Data.([]interface{})\n\t\t\tparseCurrencyExchangeMap(configMap, data)\n\t\t}\n\t}\n\n\t\/\/ convert map to array\n\tconfigs := make([]CurrencyConf, 0)\n\tfor _, v := range configMap {\n\t\tconfigs = append(configs, v)\n\t}\n\n\treturn configs, nil\n}\n<commit_msg>amending currency type related names to follow go convention<commit_after>package currency\n\nimport \"strings\"\n\ntype Conf struct {\n\tCurrency string\n\tLabel string\n\tSymbol string\n\tPairs []string\n\tPools []string\n\tExplorers ExplorerConf\n\tUnit string\n}\n\ntype ExplorerConf struct {\n\tBaseUri string\n\tAddressUri string\n\tTransactionUri string\n}\n\ntype ConfigMapping string\n\nconst (\n\tLabelMap ConfigMapping = \"pub:map:currency:label\"\n\tSymbolMap ConfigMapping = \"pub:map:currency:sym\"\n\tUnitMap ConfigMapping = \"pub:map:currency:unit\"\n\tExplorerMap ConfigMapping = \"pub:map:currency:explorer\"\n\tExchangeMap ConfigMapping = \"pub:list:pair:exchange\"\n)\n\ntype RawConf struct {\n\tMapping string\n\tData interface{}\n}\n\nfunc parseLabelMap(config map[string]Conf, raw []interface{}) {\n\tfor _, rawLabel := range raw {\n\t\tdata := rawLabel.([]interface{})\n\t\tcur := data[0].(string)\n\t\tif val, ok := config[cur]; ok {\n\t\t\t\/\/ add value\n\t\t\tval.Label = data[1].(string)\n\t\t\tconfig[cur] = val\n\t\t} else {\n\t\t\t\/\/ create new empty config instance\n\t\t\tcfg := Conf{}\n\t\t\tcfg.Label = data[1].(string)\n\t\t\tcfg.Currency = cur\n\t\t\tconfig[cur] = cfg\n\t\t}\n\t}\n}\n\nfunc parseSymbMap(config map[string]Conf, raw []interface{}) {\n\tfor _, rawLabel := range raw {\n\t\tdata := rawLabel.([]interface{})\n\t\tcur := data[0].(string)\n\t\tif val, ok := config[cur]; ok {\n\t\t\t\/\/ add value\n\t\t\tval.Symbol = data[1].(string)\n\t\t\tconfig[cur] = val\n\t\t} else {\n\t\t\t\/\/ create new empty config instance\n\t\t\tcfg := Conf{}\n\t\t\tcfg.Symbol = data[1].(string)\n\t\t\tcfg.Currency = cur\n\t\t\tconfig[cur] = cfg\n\t\t}\n\t}\n}\n\nfunc parseUnitMap(config map[string]Conf, raw []interface{}) {\n\tfor _, rawLabel := range raw {\n\t\tdata := rawLabel.([]interface{})\n\t\tcur := data[0].(string)\n\t\tif val, ok := config[cur]; ok {\n\t\t\t\/\/ add value\n\t\t\tval.Unit = data[1].(string)\n\t\t\tconfig[cur] = val\n\t\t} else {\n\t\t\t\/\/ create new empty config instance\n\t\t\tcfg := Conf{}\n\t\t\tcfg.Unit = data[1].(string)\n\t\t\tcfg.Currency = cur\n\t\t\tconfig[cur] = cfg\n\t\t}\n\t}\n}\n\nfunc parseExplorerMap(config map[string]Conf, raw []interface{}) {\n\tfor _, rawLabel := range raw {\n\t\tdata := rawLabel.([]interface{})\n\t\tcur := data[0].(string)\n\t\texplorers := data[1].([]interface{})\n\t\tvar cfg Conf\n\t\tif val, ok := config[cur]; ok {\n\t\t\tcfg = val\n\t\t} else {\n\t\t\t\/\/ create new empty config instance\n\t\t\tcc := Conf{}\n\t\t\tcc.Currency = cur\n\t\t\tcfg = cc\n\t\t}\n\t\tec := ExplorerConf{\n\t\t\texplorers[0].(string),\n\t\t\texplorers[1].(string),\n\t\t\texplorers[2].(string),\n\t\t}\n\t\tcfg.Explorers = ec\n\t\tconfig[cur] = cfg\n\t}\n}\n\nfunc parseExchangeMap(config map[string]Conf, raw []interface{}) {\n\tfor _, rs := range raw {\n\t\tsymbol := rs.(string)\n\t\tvar base, quote string\n\n\t\tif len(symbol) > 6 {\n\t\t\tbase = strings.Split(symbol, \":\")[0]\n\t\t\tquote = strings.Split(symbol, \":\")[1]\n\t\t} else {\n\t\t\tbase = symbol[3:]\n\t\t\tquote = symbol[:3]\n\t\t}\n\n\t\t\/\/ append if base exists in configs\n\t\tif val, ok := config[base]; ok {\n\t\t\tval.Pairs = append(val.Pairs, symbol)\n\t\t\tconfig[base] = val\n\t\t}\n\n\t\t\/\/ append if quote exists in configs\n\t\tif val, ok := config[quote]; ok {\n\t\t\tval.Pairs = append(val.Pairs, symbol)\n\t\t\tconfig[quote] = val\n\t\t}\n\t}\n}\n\nfunc ConfFromRaw(raw []RawConf) ([]Conf, error) {\n\tconfigMap := make(map[string]Conf)\n\tfor _, r := range raw {\n\t\tswitch ConfigMapping(r.Mapping) {\n\t\tcase LabelMap:\n\t\t\tdata := r.Data.([]interface{})\n\t\t\tparseLabelMap(configMap, data)\n\t\tcase SymbolMap:\n\t\t\tdata := r.Data.([]interface{})\n\t\t\tparseSymbMap(configMap, data)\n\t\tcase UnitMap:\n\t\t\tdata := r.Data.([]interface{})\n\t\t\tparseUnitMap(configMap, data)\n\t\tcase ExplorerMap:\n\t\t\tdata := r.Data.([]interface{})\n\t\t\tparseExplorerMap(configMap, data)\n\t\tcase ExchangeMap:\n\t\t\tdata := r.Data.([]interface{})\n\t\t\tparseExchangeMap(configMap, data)\n\t\t}\n\t}\n\n\t\/\/ convert map to array\n\tconfigs := make([]Conf, 0)\n\tfor _, v := range configMap {\n\t\tconfigs = append(configs, v)\n\t}\n\n\treturn configs, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package pingfed\n\nimport (\n\t\"context\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/tidwall\/gjson\"\n\t\"github.com\/versent\/saml2aws\/v2\/pkg\/cfg\"\n\t\"github.com\/versent\/saml2aws\/v2\/pkg\/creds\"\n\t\"github.com\/versent\/saml2aws\/v2\/pkg\/page\"\n\t\"github.com\/versent\/saml2aws\/v2\/pkg\/prompter\"\n\t\"github.com\/versent\/saml2aws\/v2\/pkg\/provider\"\n)\n\nvar logger = logrus.WithField(\"provider\", \"pingfed\")\n\n\/\/ Client wrapper around PingFed + PingId enabling authentication and retrieval of assertions\ntype Client struct {\n\tprovider.ValidateBase\n\n\tclient *provider.HTTPClient\n\tidpAccount *cfg.IDPAccount\n}\n\n\/\/ New create a new PingFed client\nfunc New(idpAccount *cfg.IDPAccount) (*Client, error) {\n\n\ttr := provider.NewDefaultTransport(idpAccount.SkipVerify)\n\n\tclient, err := provider.NewHTTPClient(tr, provider.BuildHttpClientOpts(idpAccount))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error building http client\")\n\t}\n\n\t\/\/ assign a response validator to ensure all responses are either success or a redirect\n\t\/\/ this is to avoid have explicit checks for every single response\n\tclient.CheckResponseStatus = provider.SuccessOrRedirectResponseValidator\n\n\treturn &Client{\n\t\tclient: client,\n\t\tidpAccount: idpAccount,\n\t}, nil\n}\n\ntype ctxKey string\n\n\/\/ Authenticate Authenticate to PingFed and return the data from the body of the SAML assertion.\nfunc (ac *Client) Authenticate(loginDetails *creds.LoginDetails) (string, error) {\n\tu := fmt.Sprintf(\"%s\/idp\/startSSO.ping?PartnerSpId=%s\", loginDetails.URL, ac.idpAccount.AmazonWebservicesURN)\n\treq, err := http.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"error building request\")\n\t}\n\tctx := context.WithValue(context.Background(), ctxKey(\"login\"), loginDetails)\n\treturn ac.follow(ctx, req)\n}\n\nfunc (ac *Client) follow(ctx context.Context, req *http.Request) (string, error) {\n\tres, err := ac.client.Do(req)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"error following\")\n\t}\n\tdoc, err := goquery.NewDocumentFromResponse(res)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to build document from response\")\n\t}\n\n\tvar handler func(context.Context, *goquery.Document) (context.Context, *http.Request, error)\n\n\tif docIsFormRedirectToAWS(doc) {\n\t\tlogger.WithField(\"type\", \"saml-response-to-aws\").Debug(\"doc detect\")\n\t\tif samlResponse, ok := extractSAMLResponse(doc); ok {\n\t\t\tdecodedSamlResponse, err := base64.StdEncoding.DecodeString(samlResponse)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", errors.Wrap(err, \"failed to decode saml-response\")\n\t\t\t}\n\t\t\tlogger.WithField(\"type\", \"saml-response\").WithField(\"saml-response\", string(decodedSamlResponse)).Debug(\"doc detect\")\n\t\t\treturn samlResponse, nil\n\t\t}\n\t} else if docIsFormSamlRequest(doc) {\n\t\tlogger.WithField(\"type\", \"saml-request\").Debug(\"doc detect\")\n\t\thandler = ac.handleFormRedirect\n\t} else if docIsFormResume(doc) {\n\t\tlogger.WithField(\"type\", \"resume\").Debug(\"doc detect\")\n\t\thandler = ac.handleFormRedirect\n\t} else if docIsFormSamlResponse(doc) {\n\t\tlogger.WithField(\"type\", \"saml-response\").Debug(\"doc detect\")\n\t\thandler = ac.handleFormRedirect\n\t} else if docIsLogin(doc) {\n\t\tlogger.WithField(\"type\", \"login\").Debug(\"doc detect\")\n\t\thandler = ac.handleLogin\n\t} else if docIsOTP(doc) {\n\t\tlogger.WithField(\"type\", \"otp\").Debug(\"doc detect\")\n\t\thandler = ac.handleOTP\n\t} else if docIsSwipe(doc) {\n\t\tlogger.WithField(\"type\", \"swipe\").Debug(\"doc detect\")\n\t\thandler = ac.handleSwipe\n\t} else if docIsFormRedirect(doc) {\n\t\tlogger.WithField(\"type\", \"form-redirect\").Debug(\"doc detect\")\n\t\thandler = ac.handleFormRedirect\n\t} else if docIsWebAuthn(doc) {\n\t\tlogger.WithField(\"type\", \"webauthn\").Debug(\"doc detect\")\n\t\thandler = ac.handleWebAuthn\n\t}\n\tif handler == nil {\n\t\thtml, _ := doc.Selection.Html()\n\t\tlogger.WithField(\"doc\", html).Debug(\"Unknown document type\")\n\t\treturn \"\", fmt.Errorf(\"Unknown document type\")\n\t}\n\n\tctx, req, err = handler(ctx, doc)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ac.follow(ctx, req)\n}\n\nfunc (ac *Client) handleLogin(ctx context.Context, doc *goquery.Document) (context.Context, *http.Request, error) {\n\tloginDetails, ok := ctx.Value(ctxKey(\"login\")).(*creds.LoginDetails)\n\tif !ok {\n\t\treturn ctx, nil, fmt.Errorf(\"no context value for 'login'\")\n\t}\n\n\tform, err := page.NewFormFromDocument(doc, \"form\")\n\tif err != nil {\n\t\treturn ctx, nil, errors.Wrap(err, \"error extracting login form\")\n\t}\n\n\tform.Values.Set(\"pf.username\", loginDetails.Username)\n\tform.Values.Set(\"pf.pass\", loginDetails.Password)\n\tform.URL = makeAbsoluteURL(form.URL, loginDetails.URL)\n\n\treq, err := form.BuildRequest()\n\treturn ctx, req, err\n}\n\nfunc (ac *Client) handleOTP(ctx context.Context, doc *goquery.Document) (context.Context, *http.Request, error) {\n\tform, err := page.NewFormFromDocument(doc, \"#otp-form\")\n\tif err != nil {\n\t\treturn ctx, nil, errors.Wrap(err, \"error extracting OTP form\")\n\t}\n\n\ttoken := prompter.StringRequired(\"Enter passcode\")\n\tform.Values.Set(\"otp\", token)\n\treq, err := form.BuildRequest()\n\treturn ctx, req, err\n}\n\nfunc (ac *Client) handleSwipe(ctx context.Context, doc *goquery.Document) (context.Context, *http.Request, error) {\n\tform, err := page.NewFormFromDocument(doc, \"#form1\")\n\tif err != nil {\n\t\treturn ctx, nil, errors.Wrap(err, \"error extracting swipe status form\")\n\t}\n\n\t\/\/ poll status. request must specifically be a GET\n\tform.Method = \"GET\"\n\treq, err := form.BuildRequest()\n\tif err != nil {\n\t\treturn ctx, nil, err\n\t}\n\n\tfor {\n\t\ttime.Sleep(3 * time.Second)\n\n\t\tres, err := ac.client.Do(req)\n\t\tif err != nil {\n\t\t\treturn ctx, nil, errors.Wrap(err, \"error polling swipe status\")\n\t\t}\n\n\t\tbody, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn ctx, nil, errors.Wrap(err, \"error parsing body from swipe status response\")\n\t\t}\n\n\t\tresp := string(body)\n\n\t\tpingfedMFAStatusResponse := gjson.Get(resp, \"status\").String()\n\n\t\t\/\/ASYNC_AUTH_WAIT indicates we keep going\n\t\t\/\/OK indicates someone swiped\n\t\t\/\/DEVICE_CLAIM_TIMEOUT indicates nobody swiped\n\t\t\/\/otherwise loop forever?\n\n\t\tif pingfedMFAStatusResponse == \"OK\" || pingfedMFAStatusResponse == \"DEVICE_CLAIM_TIMEOUT\" || pingfedMFAStatusResponse == \"TIMEOUT\" {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ now build a request for getting response of MFA\n\tform, err = page.NewFormFromDocument(doc, \"#reponseView\")\n\tif err != nil {\n\t\treturn ctx, nil, errors.Wrap(err, \"error extracting swipe response form\")\n\t}\n\treq, err = form.BuildRequest()\n\treturn ctx, req, err\n}\n\nfunc (ac *Client) handleFormRedirect(ctx context.Context, doc *goquery.Document) (context.Context, *http.Request, error) {\n\tform, err := page.NewFormFromDocument(doc, \"\")\n\tif err != nil {\n\t\treturn ctx, nil, errors.Wrap(err, \"error extracting redirect form\")\n\t}\n\treq, err := form.BuildRequest()\n\treturn ctx, req, err\n}\n\nfunc (ac *Client) handleWebAuthn(ctx context.Context, doc *goquery.Document) (context.Context, *http.Request, error) {\n\tform, err := page.NewFormFromDocument(doc, \"\")\n\tif err != nil {\n\t\treturn ctx, nil, errors.Wrap(err, \"error extracting webauthn form\")\n\t}\n\tform.Values.Set(\"isWebAuthnSupportedByBrowser\", \"false\")\n\treq, err := form.BuildRequest()\n\treturn ctx, req, err\n}\n\nfunc docIsLogin(doc *goquery.Document) bool {\n\treturn doc.Has(\"input[name=\\\"pf.pass\\\"]\").Size() == 1\n}\n\nfunc docIsOTP(doc *goquery.Document) bool {\n\treturn doc.Has(\"form#otp-form\").Size() == 1\n}\n\nfunc docIsSwipe(doc *goquery.Document) bool {\n\treturn doc.Has(\"form#form1\").Size() == 1 && doc.Has(\"form#reponseView\").Size() == 1\n}\n\nfunc docIsFormRedirect(doc *goquery.Document) bool {\n\treturn doc.Has(\"input[name=\\\"ppm_request\\\"]\").Size() == 1\n}\n\nfunc docIsWebAuthn(doc *goquery.Document) bool {\n\treturn doc.Has(\"input[name=\\\"isWebAuthnSupportedByBrowser\\\"]\").Size() == 1\n}\n\nfunc docIsFormSamlRequest(doc *goquery.Document) bool {\n\treturn doc.Find(\"input[name=\\\"SAMLRequest\\\"]\").Size() == 1\n}\n\nfunc docIsFormSamlResponse(doc *goquery.Document) bool {\n\treturn doc.Find(\"input[name=\\\"SAMLResponse\\\"]\").Size() == 1\n}\n\nfunc docIsFormResume(doc *goquery.Document) bool {\n\treturn doc.Find(\"input[name=\\\"RelayState\\\"]\").Size() == 1\n}\n\nfunc docIsFormRedirectToAWS(doc *goquery.Document) bool {\n\treturn doc.Find(\"form[action=\\\"https:\/\/signin.aws.amazon.com\/saml\\\"]\").Size() == 1\n}\n\nfunc extractSAMLResponse(doc *goquery.Document) (v string, ok bool) {\n\treturn doc.Find(\"input[name=\\\"SAMLResponse\\\"]\").Attr(\"value\")\n}\n\n\/\/ ensures given url is an absolute URL. if not, it will be combined with the base URL\nfunc makeAbsoluteURL(v string, base string) string {\n\tif u, err := url.ParseRequestURI(v); err == nil && !u.IsAbs() {\n\t\treturn fmt.Sprintf(\"%s%s\", base, v)\n\t}\n\treturn v\n}\n<commit_msg>add support for `USER` and `PASSWORD` fields in pingfed provider<commit_after>package pingfed\n\nimport (\n\t\"context\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/tidwall\/gjson\"\n\t\"github.com\/versent\/saml2aws\/v2\/pkg\/cfg\"\n\t\"github.com\/versent\/saml2aws\/v2\/pkg\/creds\"\n\t\"github.com\/versent\/saml2aws\/v2\/pkg\/page\"\n\t\"github.com\/versent\/saml2aws\/v2\/pkg\/prompter\"\n\t\"github.com\/versent\/saml2aws\/v2\/pkg\/provider\"\n)\n\nvar logger = logrus.WithField(\"provider\", \"pingfed\")\n\n\/\/ Client wrapper around PingFed + PingId enabling authentication and retrieval of assertions\ntype Client struct {\n\tprovider.ValidateBase\n\n\tclient *provider.HTTPClient\n\tidpAccount *cfg.IDPAccount\n}\n\n\/\/ New create a new PingFed client\nfunc New(idpAccount *cfg.IDPAccount) (*Client, error) {\n\n\ttr := provider.NewDefaultTransport(idpAccount.SkipVerify)\n\n\tclient, err := provider.NewHTTPClient(tr, provider.BuildHttpClientOpts(idpAccount))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error building http client\")\n\t}\n\n\t\/\/ assign a response validator to ensure all responses are either success or a redirect\n\t\/\/ this is to avoid have explicit checks for every single response\n\tclient.CheckResponseStatus = provider.SuccessOrRedirectResponseValidator\n\n\treturn &Client{\n\t\tclient: client,\n\t\tidpAccount: idpAccount,\n\t}, nil\n}\n\ntype ctxKey string\n\n\/\/ Authenticate Authenticate to PingFed and return the data from the body of the SAML assertion.\nfunc (ac *Client) Authenticate(loginDetails *creds.LoginDetails) (string, error) {\n\tu := fmt.Sprintf(\"%s\/idp\/startSSO.ping?PartnerSpId=%s\", loginDetails.URL, ac.idpAccount.AmazonWebservicesURN)\n\treq, err := http.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"error building request\")\n\t}\n\tctx := context.WithValue(context.Background(), ctxKey(\"login\"), loginDetails)\n\treturn ac.follow(ctx, req)\n}\n\nfunc (ac *Client) follow(ctx context.Context, req *http.Request) (string, error) {\n\tres, err := ac.client.Do(req)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"error following\")\n\t}\n\tdoc, err := goquery.NewDocumentFromResponse(res)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to build document from response\")\n\t}\n\n\tvar handler func(context.Context, *goquery.Document) (context.Context, *http.Request, error)\n\n\tif docIsFormRedirectToAWS(doc) {\n\t\tlogger.WithField(\"type\", \"saml-response-to-aws\").Debug(\"doc detect\")\n\t\tif samlResponse, ok := extractSAMLResponse(doc); ok {\n\t\t\tdecodedSamlResponse, err := base64.StdEncoding.DecodeString(samlResponse)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", errors.Wrap(err, \"failed to decode saml-response\")\n\t\t\t}\n\t\t\tlogger.WithField(\"type\", \"saml-response\").WithField(\"saml-response\", string(decodedSamlResponse)).Debug(\"doc detect\")\n\t\t\treturn samlResponse, nil\n\t\t}\n\t} else if docIsFormSamlRequest(doc) {\n\t\tlogger.WithField(\"type\", \"saml-request\").Debug(\"doc detect\")\n\t\thandler = ac.handleFormRedirect\n\t} else if docIsFormResume(doc) {\n\t\tlogger.WithField(\"type\", \"resume\").Debug(\"doc detect\")\n\t\thandler = ac.handleFormRedirect\n\t} else if docIsFormSamlResponse(doc) {\n\t\tlogger.WithField(\"type\", \"saml-response\").Debug(\"doc detect\")\n\t\thandler = ac.handleFormRedirect\n\t} else if docIsLogin(doc) {\n\t\tlogger.WithField(\"type\", \"login\").Debug(\"doc detect\")\n\t\thandler = ac.handleLogin\n\t} else if docIsOTP(doc) {\n\t\tlogger.WithField(\"type\", \"otp\").Debug(\"doc detect\")\n\t\thandler = ac.handleOTP\n\t} else if docIsSwipe(doc) {\n\t\tlogger.WithField(\"type\", \"swipe\").Debug(\"doc detect\")\n\t\thandler = ac.handleSwipe\n\t} else if docIsFormRedirect(doc) {\n\t\tlogger.WithField(\"type\", \"form-redirect\").Debug(\"doc detect\")\n\t\thandler = ac.handleFormRedirect\n\t} else if docIsWebAuthn(doc) {\n\t\tlogger.WithField(\"type\", \"webauthn\").Debug(\"doc detect\")\n\t\thandler = ac.handleWebAuthn\n\t}\n\tif handler == nil {\n\t\thtml, _ := doc.Selection.Html()\n\t\tlogger.WithField(\"doc\", html).Debug(\"Unknown document type\")\n\t\treturn \"\", fmt.Errorf(\"Unknown document type\")\n\t}\n\n\tctx, req, err = handler(ctx, doc)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ac.follow(ctx, req)\n}\n\nfunc (ac *Client) handleLogin(ctx context.Context, doc *goquery.Document) (context.Context, *http.Request, error) {\n\tloginDetails, ok := ctx.Value(ctxKey(\"login\")).(*creds.LoginDetails)\n\tif !ok {\n\t\treturn ctx, nil, fmt.Errorf(\"no context value for 'login'\")\n\t}\n\n\tform, err := page.NewFormFromDocument(doc, \"form\")\n\tif err != nil {\n\t\treturn ctx, nil, errors.Wrap(err, \"error extracting login form\")\n\t}\n\n\tform.Values.Set(\"pf.username\", loginDetails.Username)\n\tform.Values.Set(\"pf.pass\", loginDetails.Password)\n\tform.Values.Set(\"USER\", loginDetails.Username)\n\tform.Values.Set(\"PASSWORD\", loginDetails.Password)\n\tform.URL = makeAbsoluteURL(form.URL, loginDetails.URL)\n\n\treq, err := form.BuildRequest()\n\treturn ctx, req, err\n}\n\nfunc (ac *Client) handleOTP(ctx context.Context, doc *goquery.Document) (context.Context, *http.Request, error) {\n\tform, err := page.NewFormFromDocument(doc, \"#otp-form\")\n\tif err != nil {\n\t\treturn ctx, nil, errors.Wrap(err, \"error extracting OTP form\")\n\t}\n\n\ttoken := prompter.StringRequired(\"Enter passcode\")\n\tform.Values.Set(\"otp\", token)\n\treq, err := form.BuildRequest()\n\treturn ctx, req, err\n}\n\nfunc (ac *Client) handleSwipe(ctx context.Context, doc *goquery.Document) (context.Context, *http.Request, error) {\n\tform, err := page.NewFormFromDocument(doc, \"#form1\")\n\tif err != nil {\n\t\treturn ctx, nil, errors.Wrap(err, \"error extracting swipe status form\")\n\t}\n\n\t\/\/ poll status. request must specifically be a GET\n\tform.Method = \"GET\"\n\treq, err := form.BuildRequest()\n\tif err != nil {\n\t\treturn ctx, nil, err\n\t}\n\n\tfor {\n\t\ttime.Sleep(3 * time.Second)\n\n\t\tres, err := ac.client.Do(req)\n\t\tif err != nil {\n\t\t\treturn ctx, nil, errors.Wrap(err, \"error polling swipe status\")\n\t\t}\n\n\t\tbody, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn ctx, nil, errors.Wrap(err, \"error parsing body from swipe status response\")\n\t\t}\n\n\t\tresp := string(body)\n\n\t\tpingfedMFAStatusResponse := gjson.Get(resp, \"status\").String()\n\n\t\t\/\/ASYNC_AUTH_WAIT indicates we keep going\n\t\t\/\/OK indicates someone swiped\n\t\t\/\/DEVICE_CLAIM_TIMEOUT indicates nobody swiped\n\t\t\/\/otherwise loop forever?\n\n\t\tif pingfedMFAStatusResponse == \"OK\" || pingfedMFAStatusResponse == \"DEVICE_CLAIM_TIMEOUT\" || pingfedMFAStatusResponse == \"TIMEOUT\" {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ now build a request for getting response of MFA\n\tform, err = page.NewFormFromDocument(doc, \"#reponseView\")\n\tif err != nil {\n\t\treturn ctx, nil, errors.Wrap(err, \"error extracting swipe response form\")\n\t}\n\treq, err = form.BuildRequest()\n\treturn ctx, req, err\n}\n\nfunc (ac *Client) handleFormRedirect(ctx context.Context, doc *goquery.Document) (context.Context, *http.Request, error) {\n\tform, err := page.NewFormFromDocument(doc, \"\")\n\tif err != nil {\n\t\treturn ctx, nil, errors.Wrap(err, \"error extracting redirect form\")\n\t}\n\treq, err := form.BuildRequest()\n\treturn ctx, req, err\n}\n\nfunc (ac *Client) handleWebAuthn(ctx context.Context, doc *goquery.Document) (context.Context, *http.Request, error) {\n\tform, err := page.NewFormFromDocument(doc, \"\")\n\tif err != nil {\n\t\treturn ctx, nil, errors.Wrap(err, \"error extracting webauthn form\")\n\t}\n\tform.Values.Set(\"isWebAuthnSupportedByBrowser\", \"false\")\n\treq, err := form.BuildRequest()\n\treturn ctx, req, err\n}\n\nfunc docIsLogin(doc *goquery.Document) bool {\n\treturn doc.Has(\"input[name=\\\"pf.pass\\\"]\").Size() == 1 || doc.Has(\"input[name=\\\"PASSWORD\\\"]\").Size() == 1\n}\n\nfunc docIsOTP(doc *goquery.Document) bool {\n\treturn doc.Has(\"form#otp-form\").Size() == 1\n}\n\nfunc docIsSwipe(doc *goquery.Document) bool {\n\treturn doc.Has(\"form#form1\").Size() == 1 && doc.Has(\"form#reponseView\").Size() == 1\n}\n\nfunc docIsFormRedirect(doc *goquery.Document) bool {\n\treturn doc.Has(\"input[name=\\\"ppm_request\\\"]\").Size() == 1\n}\n\nfunc docIsWebAuthn(doc *goquery.Document) bool {\n\treturn doc.Has(\"input[name=\\\"isWebAuthnSupportedByBrowser\\\"]\").Size() == 1\n}\n\nfunc docIsFormSamlRequest(doc *goquery.Document) bool {\n\treturn doc.Find(\"input[name=\\\"SAMLRequest\\\"]\").Size() == 1\n}\n\nfunc docIsFormSamlResponse(doc *goquery.Document) bool {\n\treturn doc.Find(\"input[name=\\\"SAMLResponse\\\"]\").Size() == 1\n}\n\nfunc docIsFormResume(doc *goquery.Document) bool {\n\treturn doc.Find(\"input[name=\\\"RelayState\\\"]\").Size() == 1\n}\n\nfunc docIsFormRedirectToAWS(doc *goquery.Document) bool {\n\treturn doc.Find(\"form[action=\\\"https:\/\/signin.aws.amazon.com\/saml\\\"]\").Size() == 1\n}\n\nfunc extractSAMLResponse(doc *goquery.Document) (v string, ok bool) {\n\treturn doc.Find(\"input[name=\\\"SAMLResponse\\\"]\").Attr(\"value\")\n}\n\n\/\/ ensures given url is an absolute URL. if not, it will be combined with the base URL\nfunc makeAbsoluteURL(v string, base string) string {\n\tif u, err := url.ParseRequestURI(v); err == nil && !u.IsAbs() {\n\t\treturn fmt.Sprintf(\"%s%s\", base, v)\n\t}\n\treturn v\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * MinIO Cloud Storage, (C) 2019 MinIO, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage sql\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\n\/\/ Aggregation Function name constants\nconst (\n\taggFnAvg FuncName = \"AVG\"\n\taggFnCount FuncName = \"COUNT\"\n\taggFnMax FuncName = \"MAX\"\n\taggFnMin FuncName = \"MIN\"\n\taggFnSum FuncName = \"SUM\"\n)\n\nvar (\n\terrNonNumericArg = func(fnStr FuncName) error {\n\t\treturn fmt.Errorf(\"%s() requires a numeric argument\", fnStr)\n\t}\n\terrInvalidAggregation = errors.New(\"Invalid aggregation seen\")\n)\n\ntype aggVal struct {\n\trunningSum *Value\n\trunningCount int64\n\trunningMax, runningMin *Value\n\n\t\/\/ Stores if at least one record has been seen\n\tseen bool\n}\n\nfunc newAggVal(fn FuncName) *aggVal {\n\tswitch fn {\n\tcase aggFnAvg, aggFnSum:\n\t\treturn &aggVal{runningSum: FromInt(0)}\n\tcase aggFnMin:\n\t\treturn &aggVal{runningMin: FromInt(0)}\n\tcase aggFnMax:\n\t\treturn &aggVal{runningMax: FromInt(0)}\n\tdefault:\n\t\treturn &aggVal{}\n\t}\n}\n\n\/\/ evalAggregationNode - performs partial computation using the\n\/\/ current row and stores the result.\n\/\/\n\/\/ On success, it returns (nil, nil).\nfunc (e *FuncExpr) evalAggregationNode(r Record) error {\n\t\/\/ It is assumed that this function is called only when\n\t\/\/ `e` is an aggregation function.\n\n\tvar val *Value\n\tvar err error\n\tfuncName := e.getFunctionName()\n\tif aggFnCount == funcName {\n\t\tif e.Count.StarArg {\n\t\t\t\/\/ Handle COUNT(*)\n\t\t\te.aggregate.runningCount++\n\t\t\treturn nil\n\t\t}\n\n\t\tval, err = e.Count.ExprArg.evalNode(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t\/\/ Evaluate the (only) argument\n\t\tval, err = e.SFunc.ArgsList[0].evalNode(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif val.IsNull() {\n\t\t\/\/ E.g. the column or field does not exist in the\n\t\t\/\/ record - in all such cases the aggregation is not\n\t\t\/\/ updated.\n\t\treturn nil\n\t}\n\n\targVal := val\n\tif funcName != aggFnCount {\n\t\t\/\/ All aggregation functions, except COUNT require a\n\t\t\/\/ numeric argument.\n\n\t\t\/\/ Here, we diverge from Amazon S3 behavior by\n\t\t\/\/ inferring untyped values are numbers.\n\t\tif !argVal.isNumeric() {\n\t\t\tif i, ok := argVal.bytesToInt(); ok {\n\t\t\t\targVal.setInt(i)\n\t\t\t} else if f, ok := argVal.bytesToFloat(); ok {\n\t\t\t\targVal.setFloat(f)\n\t\t\t} else {\n\t\t\t\treturn errNonNumericArg(funcName)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Mark that we have seen one non-null value.\n\tisFirstRow := false\n\tif !e.aggregate.seen {\n\t\te.aggregate.seen = true\n\t\tisFirstRow = true\n\t}\n\n\tswitch funcName {\n\tcase aggFnCount:\n\t\t\/\/ For all non-null values, the count is incremented.\n\t\te.aggregate.runningCount++\n\n\tcase aggFnAvg:\n\t\te.aggregate.runningCount++\n\t\terr = e.aggregate.runningSum.arithOp(opPlus, argVal)\n\n\tcase aggFnMin:\n\t\terr = e.aggregate.runningMin.minmax(argVal, false, isFirstRow)\n\n\tcase aggFnMax:\n\t\terr = e.aggregate.runningMax.minmax(argVal, true, isFirstRow)\n\n\tcase aggFnSum:\n\t\terr = e.aggregate.runningSum.arithOp(opPlus, argVal)\n\n\tdefault:\n\t\terr = errInvalidAggregation\n\t}\n\n\treturn err\n}\n\nfunc (e *AliasedExpression) aggregateRow(r Record) error {\n\treturn e.Expression.aggregateRow(r)\n}\n\nfunc (e *Expression) aggregateRow(r Record) error {\n\tfor _, ex := range e.And {\n\t\terr := ex.aggregateRow(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (e *AndCondition) aggregateRow(r Record) error {\n\tfor _, ex := range e.Condition {\n\t\terr := ex.aggregateRow(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (e *Condition) aggregateRow(r Record) error {\n\tif e.Operand != nil {\n\t\treturn e.Operand.aggregateRow(r)\n\t}\n\treturn e.Not.aggregateRow(r)\n}\n\nfunc (e *ConditionOperand) aggregateRow(r Record) error {\n\terr := e.Operand.aggregateRow(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif e.ConditionRHS == nil {\n\t\treturn nil\n\t}\n\n\tswitch {\n\tcase e.ConditionRHS.Compare != nil:\n\t\treturn e.ConditionRHS.Compare.Operand.aggregateRow(r)\n\tcase e.ConditionRHS.Between != nil:\n\t\terr = e.ConditionRHS.Between.Start.aggregateRow(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn e.ConditionRHS.Between.End.aggregateRow(r)\n\tcase e.ConditionRHS.In != nil:\n\t\tfor _, elt := range e.ConditionRHS.In.Expressions {\n\t\t\terr = elt.aggregateRow(r)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\tcase e.ConditionRHS.Like != nil:\n\t\terr = e.ConditionRHS.Like.Pattern.aggregateRow(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn e.ConditionRHS.Like.EscapeChar.aggregateRow(r)\n\tdefault:\n\t\treturn errInvalidASTNode\n\t}\n}\n\nfunc (e *Operand) aggregateRow(r Record) error {\n\terr := e.Left.aggregateRow(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, rt := range e.Right {\n\t\terr = rt.Right.aggregateRow(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (e *MultOp) aggregateRow(r Record) error {\n\terr := e.Left.aggregateRow(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, rt := range e.Right {\n\t\terr = rt.Right.aggregateRow(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (e *UnaryTerm) aggregateRow(r Record) error {\n\tif e.Negated != nil {\n\t\treturn e.Negated.Term.aggregateRow(r)\n\t}\n\treturn e.Primary.aggregateRow(r)\n}\n\nfunc (e *PrimaryTerm) aggregateRow(r Record) error {\n\tswitch {\n\tcase e.SubExpression != nil:\n\t\treturn e.SubExpression.aggregateRow(r)\n\tcase e.FuncCall != nil:\n\t\treturn e.FuncCall.aggregateRow(r)\n\t}\n\treturn nil\n}\n\nfunc (e *FuncExpr) aggregateRow(r Record) error {\n\tswitch e.getFunctionName() {\n\tcase aggFnAvg, aggFnSum, aggFnMax, aggFnMin, aggFnCount:\n\t\treturn e.evalAggregationNode(r)\n\tdefault:\n\t\t\/\/ TODO: traverse arguments and call aggregateRow on\n\t\t\/\/ them if they could be an ancestor of an\n\t\t\/\/ aggregation.\n\t}\n\treturn nil\n}\n\n\/\/ getAggregate() implementation for each AST node follows. This is\n\/\/ called after calling aggregateRow() on each input row, to calculate\n\/\/ the final aggregate result.\n\nfunc (e *FuncExpr) getAggregate() (*Value, error) {\n\tswitch e.getFunctionName() {\n\tcase aggFnCount:\n\t\treturn FromInt(e.aggregate.runningCount), nil\n\n\tcase aggFnAvg:\n\t\tif e.aggregate.runningCount == 0 {\n\t\t\t\/\/ No rows were seen by AVG.\n\t\t\treturn FromNull(), nil\n\t\t}\n\t\terr := e.aggregate.runningSum.arithOp(opDivide, FromInt(e.aggregate.runningCount))\n\t\treturn e.aggregate.runningSum, err\n\n\tcase aggFnMin:\n\t\tif !e.aggregate.seen {\n\t\t\t\/\/ No rows were seen by MIN\n\t\t\treturn FromNull(), nil\n\t\t}\n\t\treturn e.aggregate.runningMin, nil\n\n\tcase aggFnMax:\n\t\tif !e.aggregate.seen {\n\t\t\t\/\/ No rows were seen by MAX\n\t\t\treturn FromNull(), nil\n\t\t}\n\t\treturn e.aggregate.runningMax, nil\n\n\tcase aggFnSum:\n\t\t\/\/ TODO: check if returning 0 when no rows were seen\n\t\t\/\/ by SUM is expected behavior.\n\t\treturn e.aggregate.runningSum, nil\n\n\tdefault:\n\t\t\/\/ TODO:\n\t}\n\n\treturn nil, errInvalidAggregation\n}\n<commit_msg>S3 Select: Aggregate AVG\/SUM as float (#8326)<commit_after>\/*\n * MinIO Cloud Storage, (C) 2019 MinIO, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage sql\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\n\/\/ Aggregation Function name constants\nconst (\n\taggFnAvg FuncName = \"AVG\"\n\taggFnCount FuncName = \"COUNT\"\n\taggFnMax FuncName = \"MAX\"\n\taggFnMin FuncName = \"MIN\"\n\taggFnSum FuncName = \"SUM\"\n)\n\nvar (\n\terrNonNumericArg = func(fnStr FuncName) error {\n\t\treturn fmt.Errorf(\"%s() requires a numeric argument\", fnStr)\n\t}\n\terrInvalidAggregation = errors.New(\"Invalid aggregation seen\")\n)\n\ntype aggVal struct {\n\trunningSum *Value\n\trunningCount int64\n\trunningMax, runningMin *Value\n\n\t\/\/ Stores if at least one record has been seen\n\tseen bool\n}\n\nfunc newAggVal(fn FuncName) *aggVal {\n\tswitch fn {\n\tcase aggFnAvg, aggFnSum:\n\t\treturn &aggVal{runningSum: FromFloat(0)}\n\tcase aggFnMin:\n\t\treturn &aggVal{runningMin: FromInt(0)}\n\tcase aggFnMax:\n\t\treturn &aggVal{runningMax: FromInt(0)}\n\tdefault:\n\t\treturn &aggVal{}\n\t}\n}\n\n\/\/ evalAggregationNode - performs partial computation using the\n\/\/ current row and stores the result.\n\/\/\n\/\/ On success, it returns (nil, nil).\nfunc (e *FuncExpr) evalAggregationNode(r Record) error {\n\t\/\/ It is assumed that this function is called only when\n\t\/\/ `e` is an aggregation function.\n\n\tvar val *Value\n\tvar err error\n\tfuncName := e.getFunctionName()\n\tif aggFnCount == funcName {\n\t\tif e.Count.StarArg {\n\t\t\t\/\/ Handle COUNT(*)\n\t\t\te.aggregate.runningCount++\n\t\t\treturn nil\n\t\t}\n\n\t\tval, err = e.Count.ExprArg.evalNode(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t\/\/ Evaluate the (only) argument\n\t\tval, err = e.SFunc.ArgsList[0].evalNode(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif val.IsNull() {\n\t\t\/\/ E.g. the column or field does not exist in the\n\t\t\/\/ record - in all such cases the aggregation is not\n\t\t\/\/ updated.\n\t\treturn nil\n\t}\n\n\targVal := val\n\tif funcName != aggFnCount {\n\t\t\/\/ All aggregation functions, except COUNT require a\n\t\t\/\/ numeric argument.\n\n\t\t\/\/ Here, we diverge from Amazon S3 behavior by\n\t\t\/\/ inferring untyped values are numbers.\n\t\tif !argVal.isNumeric() {\n\t\t\tif i, ok := argVal.bytesToInt(); ok {\n\t\t\t\targVal.setInt(i)\n\t\t\t} else if f, ok := argVal.bytesToFloat(); ok {\n\t\t\t\targVal.setFloat(f)\n\t\t\t} else {\n\t\t\t\treturn errNonNumericArg(funcName)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Mark that we have seen one non-null value.\n\tisFirstRow := false\n\tif !e.aggregate.seen {\n\t\te.aggregate.seen = true\n\t\tisFirstRow = true\n\t}\n\n\tswitch funcName {\n\tcase aggFnCount:\n\t\t\/\/ For all non-null values, the count is incremented.\n\t\te.aggregate.runningCount++\n\n\tcase aggFnAvg, aggFnSum:\n\t\te.aggregate.runningCount++\n\t\t\/\/ Convert to float.\n\t\tf, ok := argVal.ToFloat()\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Could not convert value %v (%s) to a number\", argVal.value, argVal.GetTypeString())\n\t\t}\n\t\targVal.setFloat(f)\n\t\terr = e.aggregate.runningSum.arithOp(opPlus, argVal)\n\n\tcase aggFnMin:\n\t\terr = e.aggregate.runningMin.minmax(argVal, false, isFirstRow)\n\n\tcase aggFnMax:\n\t\terr = e.aggregate.runningMax.minmax(argVal, true, isFirstRow)\n\n\tdefault:\n\t\terr = errInvalidAggregation\n\t}\n\n\treturn err\n}\n\nfunc (e *AliasedExpression) aggregateRow(r Record) error {\n\treturn e.Expression.aggregateRow(r)\n}\n\nfunc (e *Expression) aggregateRow(r Record) error {\n\tfor _, ex := range e.And {\n\t\terr := ex.aggregateRow(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (e *AndCondition) aggregateRow(r Record) error {\n\tfor _, ex := range e.Condition {\n\t\terr := ex.aggregateRow(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (e *Condition) aggregateRow(r Record) error {\n\tif e.Operand != nil {\n\t\treturn e.Operand.aggregateRow(r)\n\t}\n\treturn e.Not.aggregateRow(r)\n}\n\nfunc (e *ConditionOperand) aggregateRow(r Record) error {\n\terr := e.Operand.aggregateRow(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif e.ConditionRHS == nil {\n\t\treturn nil\n\t}\n\n\tswitch {\n\tcase e.ConditionRHS.Compare != nil:\n\t\treturn e.ConditionRHS.Compare.Operand.aggregateRow(r)\n\tcase e.ConditionRHS.Between != nil:\n\t\terr = e.ConditionRHS.Between.Start.aggregateRow(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn e.ConditionRHS.Between.End.aggregateRow(r)\n\tcase e.ConditionRHS.In != nil:\n\t\tfor _, elt := range e.ConditionRHS.In.Expressions {\n\t\t\terr = elt.aggregateRow(r)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\tcase e.ConditionRHS.Like != nil:\n\t\terr = e.ConditionRHS.Like.Pattern.aggregateRow(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn e.ConditionRHS.Like.EscapeChar.aggregateRow(r)\n\tdefault:\n\t\treturn errInvalidASTNode\n\t}\n}\n\nfunc (e *Operand) aggregateRow(r Record) error {\n\terr := e.Left.aggregateRow(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, rt := range e.Right {\n\t\terr = rt.Right.aggregateRow(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (e *MultOp) aggregateRow(r Record) error {\n\terr := e.Left.aggregateRow(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, rt := range e.Right {\n\t\terr = rt.Right.aggregateRow(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (e *UnaryTerm) aggregateRow(r Record) error {\n\tif e.Negated != nil {\n\t\treturn e.Negated.Term.aggregateRow(r)\n\t}\n\treturn e.Primary.aggregateRow(r)\n}\n\nfunc (e *PrimaryTerm) aggregateRow(r Record) error {\n\tswitch {\n\tcase e.SubExpression != nil:\n\t\treturn e.SubExpression.aggregateRow(r)\n\tcase e.FuncCall != nil:\n\t\treturn e.FuncCall.aggregateRow(r)\n\t}\n\treturn nil\n}\n\nfunc (e *FuncExpr) aggregateRow(r Record) error {\n\tswitch e.getFunctionName() {\n\tcase aggFnAvg, aggFnSum, aggFnMax, aggFnMin, aggFnCount:\n\t\treturn e.evalAggregationNode(r)\n\tdefault:\n\t\t\/\/ TODO: traverse arguments and call aggregateRow on\n\t\t\/\/ them if they could be an ancestor of an\n\t\t\/\/ aggregation.\n\t}\n\treturn nil\n}\n\n\/\/ getAggregate() implementation for each AST node follows. This is\n\/\/ called after calling aggregateRow() on each input row, to calculate\n\/\/ the final aggregate result.\n\nfunc (e *FuncExpr) getAggregate() (*Value, error) {\n\tswitch e.getFunctionName() {\n\tcase aggFnCount:\n\t\treturn FromInt(e.aggregate.runningCount), nil\n\n\tcase aggFnAvg:\n\t\tif e.aggregate.runningCount == 0 {\n\t\t\t\/\/ No rows were seen by AVG.\n\t\t\treturn FromNull(), nil\n\t\t}\n\t\terr := e.aggregate.runningSum.arithOp(opDivide, FromInt(e.aggregate.runningCount))\n\t\treturn e.aggregate.runningSum, err\n\n\tcase aggFnMin:\n\t\tif !e.aggregate.seen {\n\t\t\t\/\/ No rows were seen by MIN\n\t\t\treturn FromNull(), nil\n\t\t}\n\t\treturn e.aggregate.runningMin, nil\n\n\tcase aggFnMax:\n\t\tif !e.aggregate.seen {\n\t\t\t\/\/ No rows were seen by MAX\n\t\t\treturn FromNull(), nil\n\t\t}\n\t\treturn e.aggregate.runningMax, nil\n\n\tcase aggFnSum:\n\t\t\/\/ TODO: check if returning 0 when no rows were seen\n\t\t\/\/ by SUM is expected behavior.\n\t\treturn e.aggregate.runningSum, nil\n\n\tdefault:\n\t\t\/\/ TODO:\n\t}\n\n\treturn nil, errInvalidAggregation\n}\n<|endoftext|>"} {"text":"<commit_before>package v1\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/fritzpay\/paymentd\/pkg\/metadata\"\n\t\"github.com\/fritzpay\/paymentd\/pkg\/paymentd\/principal\"\n\t\"github.com\/fritzpay\/paymentd\/pkg\/service\"\n\t\"github.com\/gorilla\/mux\"\n\t\"gopkg.in\/inconshreveable\/log15.v2\"\n\t\"net\/http\"\n\t\"time\"\n)\n\ntype PrincipalAdminAPIResponse struct {\n\tAdminAPIResponse\n}\n\n\/\/ handler to create or change a principal\n\/\/\n\/\/ PUT creates new principal\n\/\/ POST can be used to change the principals metadata\nfunc (a *AdminAPI) PrincipalRequest() http.Handler {\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tlog := a.log.New(log15.Ctx{\"method\": \"PrincipalRequest\"})\n\n\t\tswitch r.Method {\n\t\tcase \"PUT\":\n\t\t\ta.putNewPrincipal(w, r)\n\t\tcase \"POST\":\n\t\t\ta.postChangePrincipal(w, r)\n\t\tdefault:\n\t\t\tErrMethod.Write(w)\n\t\t\tlog.Info(\"http method not supported\", log15.Ctx{\"requestMethod\": r.Method})\n\t\t}\n\t})\n}\n\n\/\/ handler to display a specific existing principal\nfunc (a *AdminAPI) PrincipalGetRequest() http.Handler {\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tlog := a.log.New(log15.Ctx{\"method\": \"PrincipalGetRequest\"})\n\n\t\t\/\/ get principal by name\n\t\tvars := mux.Vars(r)\n\t\tprincipalName := vars[\"name\"]\n\n\t\tlog = log.New(log15.Ctx{\"principalName\": principalName})\n\n\t\tdb := a.ctx.PrincipalDB(service.ReadOnly)\n\t\tpr, err := principal.PrincipalByNameDB(db, principalName)\n\t\tif err == principal.ErrPrincipalNotFound {\n\t\t\tErrNotFound.Write(w)\n\t\t\tlog.Info(\"principal not found\")\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\tErrDatabase.Write(w)\n\t\t\tlog.Error(\"DB get by name failed\", log15.Ctx{\"err\": err})\n\t\t\treturn\n\t\t}\n\t\tmd, err := metadata.MetadataByPrimaryDB(db, principal.MetadataModel, pr.ID)\n\t\tif err != nil {\n\t\t\tErrDatabase.Write(w)\n\t\t\tlog.Error(\"get metadata failed\", log15.Ctx{\"err\": err})\n\t\t\treturn\n\t\t}\n\t\tif len(md) > 0 {\n\t\t\tpr.Metadata = md.Values()\n\t\t}\n\n\t\t\/\/ create service response object\n\t\tresp := PrincipalAdminAPIResponse{}\n\t\tresp.Status = StatusSuccess\n\t\tresp.Info = \"principal \" + pr.Name + \" found\"\n\t\tresp.Response = pr\n\t\terr = resp.Write(w)\n\t\tif err != nil {\n\t\t\tlog.Error(\"write error\", log15.Ctx{\"err\": err})\n\t\t\treturn\n\t\t}\n\t})\n}\n\nfunc (a *AdminAPI) putNewPrincipal(w http.ResponseWriter, r *http.Request) {\n\tlog := a.log.New(log15.Ctx{\"method\": \"putNewPrincipal\"})\n\n\t\/\/ create new principal\n\tjd := json.NewDecoder(r.Body)\n\tpr := principal.Principal{}\n\terr := jd.Decode(&pr)\n\tr.Body.Close()\n\tif err != nil {\n\t\tErrReadJson.Write(w)\n\t\tlog.Error(\"json decode failed\", log15.Ctx{\"err\": err})\n\t\treturn\n\t}\n\n\tlog = log.New(log15.Ctx{\"principalName\": pr.Name})\n\n\tauth, err := getAuthContainer(r)\n\tif err != nil {\n\t\tlog.Crit(\"context auth error\", log15.Ctx{\"err\": err})\n\t\tErrSystem.Write(w)\n\t\treturn\n\t}\n\tpr.CreatedBy = auth[AuthUserIDKey].(string)\n\t\/\/ set created time\n\tpr.Created = time.Now()\n\n\t\/\/ check if principal exists\n\tdb := a.ctx.PrincipalDB()\n\t_, err = principal.PrincipalByNameDB(db, pr.Name)\n\tif err != nil && err != principal.ErrPrincipalNotFound {\n\t\t\/\/ other db error\n\t\tlog.Error(\"DB get by name failed\", log15.Ctx{\"err\": err})\n\t\tErrSystem.Write(w)\n\t\treturn\n\t} else if err == nil {\n\t\t\/\/ already exists\n\t\tlog.Warn(\"principal already exists\")\n\t\tErrConflict.Write(w)\n\t\treturn\n\t}\n\n\t\/\/ insert pr if not exists\n\t\/\/ start dbtx to save metadata and pr together\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\tlog.Crit(\"Tx begin failed\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\n\t\/\/ insert pr\n\terr = principal.InsertPrincipalTx(tx, &pr)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\tErrDatabase.Write(w)\n\t\tlog.Error(\"TX insert failed.\", log15.Ctx{\"err\": err})\n\t\treturn\n\t}\n\n\t\/\/ insert metadata\n\tmd := metadata.MetadataFromValues(pr.Metadata, pr.CreatedBy)\n\terr = metadata.InsertMetadataTx(tx, principal.MetadataModel, pr.ID, md)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\tlog.Error(\"TX metadata insert failed.\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\n\t\/\/commit tx\n\terr = tx.Commit()\n\tif err != nil {\n\t\tErrDatabase.Write(w)\n\t\tlog.Crit(\"TX commit failed.\", log15.Ctx{\"err\": err})\n\t\treturn\n\t}\n\n\t\/\/ get data explicit from DB\n\tpr, err = principal.PrincipalByNameDB(db, pr.Name)\n\tif err != nil {\n\t\tlog.Error(\"DB get by name failed.\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\tmd, err = metadata.MetadataByPrimaryDB(db, principal.MetadataModel, pr.ID)\n\tif len(md) > 0 {\n\t\tpr.Metadata = md.Values()\n\t}\n\tif err != nil {\n\t\tlog.Error(\"get metadata failed.\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\n\tresp := PrincipalAdminAPIResponse{}\n\tresp.HttpStatus = http.StatusOK\n\tresp.Status = StatusSuccess\n\tresp.Response = pr\n\tresp.Info = \"principal \" + pr.Name + \" created\"\n\terr = resp.Write(w)\n\tif err != nil {\n\t\tlog.Error(\"write error\", log15.Ctx{\"err\": err})\n\t\tErrSystem.Write(w)\n\t\treturn\n\t}\n}\n\n\/\/ post method to add and change the metadata\nfunc (a *AdminAPI) postChangePrincipal(w http.ResponseWriter, r *http.Request) {\n\tlog := a.log.New(log15.Ctx{\"method\": \"postChangePrincipal\"})\n\n\t\/\/ get Metadata from post variables\n\tjd := json.NewDecoder(r.Body)\n\tpr := principal.Principal{}\n\terr := jd.Decode(&pr)\n\tr.Body.Close()\n\tif err != nil {\n\t\tlog.Error(\"json decode failed\", log15.Ctx{\"err\": err})\n\t\tErrReadJson.Write(w)\n\t\treturn\n\t}\n\n\tauth, err := getAuthContainer(r)\n\tif err != nil {\n\t\tlog.Crit(\"error getting auth container\", log15.Ctx{\"err\": err})\n\t\tErrSystem.Write(w)\n\t\treturn\n\t}\n\tpr.CreatedBy = auth[AuthUserIDKey].(string)\n\n\tlog = log.New(log15.Ctx{\"principalName\": pr.Name})\n\n\t\/\/ open transaction to add the posted metadata\n\ttx, err := a.ctx.PrincipalDB().Begin()\n\tif err != nil {\n\t\tlog.Crit(\"Tx begin failed.\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\n\t\/\/ does principal exist\n\tpr.ID, err = principal.PrincipalIDByNameTx(tx, pr.Name)\n\tif err != nil {\n\t\ttxErr := tx.Rollback()\n\t\tif txErr != nil {\n\t\t\tlog.Crit(\"error on rollback\", log15.Ctx{\"err\": err})\n\t\t}\n\t\tif err == principal.ErrPrincipalNotFound {\n\t\t\tlog.Warn(\"principal not found\")\n\t\t\tErrNotFound.Write(w)\n\t\t\treturn\n\t\t}\n\t\tlog.Error(\"get principal from DB failed\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\n\tmd := metadata.MetadataFromValues(pr.Metadata, pr.CreatedBy)\n\n\terr = metadata.InsertMetadataTx(tx, principal.MetadataModel, pr.ID, md)\n\tif err != nil {\n\t\ttxErr := tx.Rollback()\n\t\tif txErr != nil {\n\t\t\tlog.Crit(\"error on rollback\", log15.Ctx{\"err\": err})\n\t\t}\n\t\tlog.Error(\"insert metadata failed\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\terr = tx.Commit()\n\tif err != nil {\n\t\tlog.Crit(\"error on commit\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\n\t\/\/ get stored data from db\n\tdb := a.ctx.PrincipalDB(service.ReadOnly)\n\tpr, err = principal.PrincipalByNameDB(db, pr.Name)\n\tif err != nil {\n\t\tif err == principal.ErrPrincipalNotFound {\n\t\t\tlog.Error(\"principal not found\")\n\t\t\tErrNotFound.Write(w)\n\t\t\treturn\n\t\t}\n\t\tlog.Error(\"DB get by name failed.\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\tmd, err = metadata.MetadataByPrimaryDB(db, principal.MetadataModel, pr.ID)\n\tif err != nil {\n\t\tlog.Error(\"get metadata failed.\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\tif len(md) > 0 {\n\t\tpr.Metadata = md.Values()\n\t}\n\n\t\/\/ create response\n\tresp := PrincipalAdminAPIResponse{}\n\tresp.Info = \"principal \" + pr.Name + \" changed\"\n\tresp.Status = StatusSuccess\n\tresp.Response = pr\n\terr = resp.Write(w)\n\tif err != nil {\n\t\tErrSystem.Write(w)\n\t\tlog.Error(\"write error\", log15.Ctx{\"err\": err})\n\t\treturn\n\t}\n}\n<commit_msg>fix transaction handling on put principal<commit_after>package v1\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/fritzpay\/paymentd\/pkg\/metadata\"\n\t\"github.com\/fritzpay\/paymentd\/pkg\/paymentd\/principal\"\n\t\"github.com\/fritzpay\/paymentd\/pkg\/service\"\n\t\"github.com\/gorilla\/mux\"\n\t\"gopkg.in\/inconshreveable\/log15.v2\"\n\t\"net\/http\"\n\t\"time\"\n)\n\ntype PrincipalAdminAPIResponse struct {\n\tAdminAPIResponse\n}\n\n\/\/ handler to create or change a principal\n\/\/\n\/\/ PUT creates new principal\n\/\/ POST can be used to change the principals metadata\nfunc (a *AdminAPI) PrincipalRequest() http.Handler {\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tlog := a.log.New(log15.Ctx{\"method\": \"PrincipalRequest\"})\n\n\t\tswitch r.Method {\n\t\tcase \"PUT\":\n\t\t\ta.putNewPrincipal(w, r)\n\t\tcase \"POST\":\n\t\t\ta.postChangePrincipal(w, r)\n\t\tdefault:\n\t\t\tErrMethod.Write(w)\n\t\t\tlog.Info(\"http method not supported\", log15.Ctx{\"requestMethod\": r.Method})\n\t\t}\n\t})\n}\n\n\/\/ handler to display a specific existing principal\nfunc (a *AdminAPI) PrincipalGetRequest() http.Handler {\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tlog := a.log.New(log15.Ctx{\"method\": \"PrincipalGetRequest\"})\n\n\t\t\/\/ get principal by name\n\t\tvars := mux.Vars(r)\n\t\tprincipalName := vars[\"name\"]\n\n\t\tlog = log.New(log15.Ctx{\"principalName\": principalName})\n\n\t\tdb := a.ctx.PrincipalDB(service.ReadOnly)\n\t\tpr, err := principal.PrincipalByNameDB(db, principalName)\n\t\tif err == principal.ErrPrincipalNotFound {\n\t\t\tErrNotFound.Write(w)\n\t\t\tlog.Info(\"principal not found\")\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\tErrDatabase.Write(w)\n\t\t\tlog.Error(\"DB get by name failed\", log15.Ctx{\"err\": err})\n\t\t\treturn\n\t\t}\n\t\tmd, err := metadata.MetadataByPrimaryDB(db, principal.MetadataModel, pr.ID)\n\t\tif err != nil {\n\t\t\tErrDatabase.Write(w)\n\t\t\tlog.Error(\"get metadata failed\", log15.Ctx{\"err\": err})\n\t\t\treturn\n\t\t}\n\t\tif len(md) > 0 {\n\t\t\tpr.Metadata = md.Values()\n\t\t}\n\n\t\t\/\/ create service response object\n\t\tresp := PrincipalAdminAPIResponse{}\n\t\tresp.Status = StatusSuccess\n\t\tresp.Info = \"principal \" + pr.Name + \" found\"\n\t\tresp.Response = pr\n\t\terr = resp.Write(w)\n\t\tif err != nil {\n\t\t\tlog.Error(\"write error\", log15.Ctx{\"err\": err})\n\t\t\treturn\n\t\t}\n\t})\n}\n\nfunc (a *AdminAPI) putNewPrincipal(w http.ResponseWriter, r *http.Request) {\n\tlog := a.log.New(log15.Ctx{\"method\": \"putNewPrincipal\"})\n\n\t\/\/ create new principal\n\tjd := json.NewDecoder(r.Body)\n\tpr := principal.Principal{}\n\terr := jd.Decode(&pr)\n\tr.Body.Close()\n\tif err != nil {\n\t\tErrReadJson.Write(w)\n\t\tlog.Error(\"json decode failed\", log15.Ctx{\"err\": err})\n\t\treturn\n\t}\n\n\tlog = log.New(log15.Ctx{\"principalName\": pr.Name})\n\n\tauth, err := getAuthContainer(r)\n\tif err != nil {\n\t\tlog.Crit(\"context auth error\", log15.Ctx{\"err\": err})\n\t\tErrSystem.Write(w)\n\t\treturn\n\t}\n\tpr.CreatedBy = auth[AuthUserIDKey].(string)\n\t\/\/ set created time\n\tpr.Created = time.Now()\n\n\t\/\/ insert pr if not exists\n\t\/\/ start dbtx to save metadata and pr together\n\ttx, err := a.ctx.PrincipalDB().Begin()\n\tif err != nil {\n\t\tlog.Crit(\"Tx begin failed\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\n\t\/\/ check if principal exists\n\t_, err = principal.PrincipalByNameTx(tx, pr.Name)\n\tif err != nil && err != principal.ErrPrincipalNotFound {\n\t\t\/\/ other db error\n\t\tlog.Error(\"DB get by name failed\", log15.Ctx{\"err\": err})\n\t\tErrSystem.Write(w)\n\t\treturn\n\t} else if err == nil {\n\t\t\/\/ already exists\n\t\tlog.Warn(\"principal already exists\")\n\t\tErrConflict.Write(w)\n\t\treturn\n\t}\n\n\t\/\/ insert pr\n\terr = principal.InsertPrincipalTx(tx, &pr)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\tErrDatabase.Write(w)\n\t\tlog.Error(\"TX insert failed.\", log15.Ctx{\"err\": err})\n\t\treturn\n\t}\n\n\t\/\/ insert metadata\n\tmd := metadata.MetadataFromValues(pr.Metadata, pr.CreatedBy)\n\terr = metadata.InsertMetadataTx(tx, principal.MetadataModel, pr.ID, md)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\tlog.Error(\"TX metadata insert failed.\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\n\t\/\/commit tx\n\terr = tx.Commit()\n\tif err != nil {\n\t\tErrDatabase.Write(w)\n\t\tlog.Crit(\"TX commit failed.\", log15.Ctx{\"err\": err})\n\t\treturn\n\t}\n\n\t\/\/ get data explicit from DB\n\tdb := a.ctx.PrincipalDB(service.ReadOnly)\n\tpr, err = principal.PrincipalByNameDB(db, pr.Name)\n\tif err != nil {\n\t\tlog.Error(\"DB get by name failed.\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\tmd, err = metadata.MetadataByPrimaryDB(db, principal.MetadataModel, pr.ID)\n\tif len(md) > 0 {\n\t\tpr.Metadata = md.Values()\n\t}\n\tif err != nil {\n\t\tlog.Error(\"get metadata failed.\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\n\tresp := PrincipalAdminAPIResponse{}\n\tresp.HttpStatus = http.StatusOK\n\tresp.Status = StatusSuccess\n\tresp.Response = pr\n\tresp.Info = \"principal \" + pr.Name + \" created\"\n\terr = resp.Write(w)\n\tif err != nil {\n\t\tlog.Error(\"write error\", log15.Ctx{\"err\": err})\n\t\tErrSystem.Write(w)\n\t\treturn\n\t}\n}\n\n\/\/ post method to add and change the metadata\nfunc (a *AdminAPI) postChangePrincipal(w http.ResponseWriter, r *http.Request) {\n\tlog := a.log.New(log15.Ctx{\"method\": \"postChangePrincipal\"})\n\n\t\/\/ get Metadata from post variables\n\tjd := json.NewDecoder(r.Body)\n\tpr := principal.Principal{}\n\terr := jd.Decode(&pr)\n\tr.Body.Close()\n\tif err != nil {\n\t\tlog.Error(\"json decode failed\", log15.Ctx{\"err\": err})\n\t\tErrReadJson.Write(w)\n\t\treturn\n\t}\n\n\tauth, err := getAuthContainer(r)\n\tif err != nil {\n\t\tlog.Crit(\"error getting auth container\", log15.Ctx{\"err\": err})\n\t\tErrSystem.Write(w)\n\t\treturn\n\t}\n\tpr.CreatedBy = auth[AuthUserIDKey].(string)\n\n\tlog = log.New(log15.Ctx{\"principalName\": pr.Name})\n\n\t\/\/ open transaction to add the posted metadata\n\ttx, err := a.ctx.PrincipalDB().Begin()\n\tif err != nil {\n\t\tlog.Crit(\"Tx begin failed.\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\n\t\/\/ does principal exist\n\tpr.ID, err = principal.PrincipalIDByNameTx(tx, pr.Name)\n\tif err != nil {\n\t\ttxErr := tx.Rollback()\n\t\tif txErr != nil {\n\t\t\tlog.Crit(\"error on rollback\", log15.Ctx{\"err\": err})\n\t\t}\n\t\tif err == principal.ErrPrincipalNotFound {\n\t\t\tlog.Warn(\"principal not found\")\n\t\t\tErrNotFound.Write(w)\n\t\t\treturn\n\t\t}\n\t\tlog.Error(\"get principal from DB failed\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\n\tmd := metadata.MetadataFromValues(pr.Metadata, pr.CreatedBy)\n\n\terr = metadata.InsertMetadataTx(tx, principal.MetadataModel, pr.ID, md)\n\tif err != nil {\n\t\ttxErr := tx.Rollback()\n\t\tif txErr != nil {\n\t\t\tlog.Crit(\"error on rollback\", log15.Ctx{\"err\": err})\n\t\t}\n\t\tlog.Error(\"insert metadata failed\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\terr = tx.Commit()\n\tif err != nil {\n\t\tlog.Crit(\"error on commit\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\n\t\/\/ get stored data from db\n\tdb := a.ctx.PrincipalDB(service.ReadOnly)\n\tpr, err = principal.PrincipalByNameDB(db, pr.Name)\n\tif err != nil {\n\t\tif err == principal.ErrPrincipalNotFound {\n\t\t\tlog.Error(\"principal not found\")\n\t\t\tErrNotFound.Write(w)\n\t\t\treturn\n\t\t}\n\t\tlog.Error(\"DB get by name failed.\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\tmd, err = metadata.MetadataByPrimaryDB(db, principal.MetadataModel, pr.ID)\n\tif err != nil {\n\t\tlog.Error(\"get metadata failed.\", log15.Ctx{\"err\": err})\n\t\tErrDatabase.Write(w)\n\t\treturn\n\t}\n\tif len(md) > 0 {\n\t\tpr.Metadata = md.Values()\n\t}\n\n\t\/\/ create response\n\tresp := PrincipalAdminAPIResponse{}\n\tresp.Info = \"principal \" + pr.Name + \" changed\"\n\tresp.Status = StatusSuccess\n\tresp.Response = pr\n\terr = resp.Write(w)\n\tif err != nil {\n\t\tErrSystem.Write(w)\n\t\tlog.Error(\"write error\", log15.Ctx{\"err\": err})\n\t\treturn\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage kubernetes\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"gopkg.in\/yaml.v2\"\n\tk8syaml \"k8s.io\/apimachinery\/pkg\/util\/yaml\"\n)\n\ntype yamlObject map[interface{}]interface{}\n\n\/\/ These are the required fields for a yaml document to be a valid Kubernetes yaml\nvar requiredFields = []string{\"apiVersion\", \"kind\", \"metadata\"}\n\n\/\/ These are the supported file formats for Kubernetes manifests\nvar validSuffixes = []string{\".yml\", \".yaml\", \".json\"}\n\n\/\/ HasKubernetesFileExtension is for determining if a file under a glob pattern\n\/\/ is deployable file format. It makes no attempt to check whether or not the file\n\/\/ is actually deployable or has the correct contents.\nfunc HasKubernetesFileExtension(n string) bool {\n\tfor _, s := range validSuffixes {\n\t\tif strings.HasSuffix(n, s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ IsKubernetesManifest is for determining if a file is a valid Kubernetes manifest\nfunc IsKubernetesManifest(file string) bool {\n\tif !HasKubernetesFileExtension(file) {\n\t\treturn false\n\t}\n\n\t_, err := parseKubernetesObjects(file)\n\treturn err == nil\n}\n\n\/\/ ParseImagesFromKubernetesYaml parses the kubernetes yamls, and if it finds at least one\n\/\/ valid Kubernetes object, it will return the images referenced in them.\nfunc ParseImagesFromKubernetesYaml(filepath string) ([]string, error) {\n\tk8sObjects, err := parseKubernetesObjects(filepath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\timages := []string{}\n\tfor _, k8sObject := range k8sObjects {\n\t\timages = append(images, parseImagesFromYaml(k8sObject)...)\n\t}\n\treturn images, nil\n}\n\n\/\/ parseKubernetesObjects uses required fields from the k8s spec\n\/\/ to determine if a provided yaml file is a valid k8s manifest, as detailed in\n\/\/ https:\/\/kubernetes.io\/docs\/concepts\/overview\/working-with-objects\/kubernetes-objects\/#required-fields.\n\/\/ If so, it will return the parsed objects.\nfunc parseKubernetesObjects(filepath string) ([]yamlObject, error) {\n\tf, err := os.Open(filepath)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"opening config file\")\n\t}\n\tr := k8syaml.NewYAMLReader(bufio.NewReader(f))\n\n\tvar k8sObjects []yamlObject\n\n\tfor {\n\t\tdoc, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"reading config file\")\n\t\t}\n\n\t\tobj := make(yamlObject)\n\t\tif err := yaml.Unmarshal(doc, &obj); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"reading Kubernetes YAML\")\n\t\t}\n\n\t\tif !hasRequiredK8sManifestFields(obj, filepath) {\n\t\t\tcontinue\n\t\t}\n\n\t\tk8sObjects = append(k8sObjects, obj)\n\t}\n\tif len(k8sObjects) == 0 {\n\t\treturn nil, errors.New(\"no valid Kubernetes objects decoded\")\n\t}\n\treturn k8sObjects, nil\n}\n\nfunc hasRequiredK8sManifestFields(doc map[interface{}]interface{}, fileName string) bool {\n\tfor _, field := range requiredFields {\n\t\tif _, ok := doc[field]; !ok {\n\t\t\tlogrus.Tracef(\"%s not present in %s, continuing\", field, fileName)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ adapted from pkg\/skaffold\/deploy\/kubectl\/recursiveReplaceImage()\nfunc parseImagesFromYaml(obj interface{}) []string {\n\timages := []string{}\n\tswitch t := obj.(type) {\n\tcase []interface{}:\n\t\tfor _, v := range t {\n\t\t\timages = append(images, parseImagesFromYaml(v)...)\n\t\t}\n\tcase yamlObject:\n\t\tfor k, v := range t {\n\t\t\tif k.(string) != \"image\" {\n\t\t\t\timages = append(images, parseImagesFromYaml(v)...)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\timages = append(images, v.(string))\n\t\t}\n\t}\n\treturn images\n}\n<commit_msg>revert extra debugging<commit_after>\/*\nCopyright 2019 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage kubernetes\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"gopkg.in\/yaml.v2\"\n\tk8syaml \"k8s.io\/apimachinery\/pkg\/util\/yaml\"\n)\n\ntype yamlObject map[interface{}]interface{}\n\n\/\/ These are the required fields for a yaml document to be a valid Kubernetes yaml\nvar requiredFields = []string{\"apiVersion\", \"kind\", \"metadata\"}\n\n\/\/ These are the supported file formats for Kubernetes manifests\nvar validSuffixes = []string{\".yml\", \".yaml\", \".json\"}\n\n\/\/ HasKubernetesFileExtension is for determining if a file under a glob pattern\n\/\/ is deployable file format. It makes no attempt to check whether or not the file\n\/\/ is actually deployable or has the correct contents.\nfunc HasKubernetesFileExtension(n string) bool {\n\tfor _, s := range validSuffixes {\n\t\tif strings.HasSuffix(n, s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ IsKubernetesManifest is for determining if a file is a valid Kubernetes manifest\nfunc IsKubernetesManifest(file string) bool {\n\tif !HasKubernetesFileExtension(file) {\n\t\treturn false\n\t}\n\n\t_, err := parseKubernetesObjects(file)\n\treturn err == nil\n}\n\n\/\/ ParseImagesFromKubernetesYaml parses the kubernetes yamls, and if it finds at least one\n\/\/ valid Kubernetes object, it will return the images referenced in them.\nfunc ParseImagesFromKubernetesYaml(filepath string) ([]string, error) {\n\tk8sObjects, err := parseKubernetesObjects(filepath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\timages := []string{}\n\tfor _, k8sObject := range k8sObjects {\n\t\timages = append(images, parseImagesFromYaml(k8sObject)...)\n\t}\n\treturn images, nil\n}\n\n\/\/ parseKubernetesObjects uses required fields from the k8s spec\n\/\/ to determine if a provided yaml file is a valid k8s manifest, as detailed in\n\/\/ https:\/\/kubernetes.io\/docs\/concepts\/overview\/working-with-objects\/kubernetes-objects\/#required-fields.\n\/\/ If so, it will return the parsed objects.\nfunc parseKubernetesObjects(filepath string) ([]yamlObject, error) {\n\tf, err := os.Open(filepath)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"opening config file\")\n\t}\n\tr := k8syaml.NewYAMLReader(bufio.NewReader(f))\n\n\tvar k8sObjects []yamlObject\n\n\tfor {\n\t\tdoc, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"reading config file\")\n\t\t}\n\n\t\tobj := make(yamlObject)\n\t\tif err := yaml.Unmarshal(doc, &obj); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"reading Kubernetes YAML\")\n\t\t}\n\n\t\tif !hasRequiredK8sManifestFields(obj) {\n\t\t\tcontinue\n\t\t}\n\n\t\tk8sObjects = append(k8sObjects, obj)\n\t}\n\tif len(k8sObjects) == 0 {\n\t\treturn nil, errors.New(\"no valid Kubernetes objects decoded\")\n\t}\n\treturn k8sObjects, nil\n}\n\nfunc hasRequiredK8sManifestFields(doc map[interface{}]interface{}) bool {\n\tfor _, field := range requiredFields {\n\t\tif _, ok := doc[field]; !ok {\n\t\t\tlogrus.Debugf(\"%s not present in yaml, continuing\", field)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ adapted from pkg\/skaffold\/deploy\/kubectl\/recursiveReplaceImage()\nfunc parseImagesFromYaml(obj interface{}) []string {\n\timages := []string{}\n\tswitch t := obj.(type) {\n\tcase []interface{}:\n\t\tfor _, v := range t {\n\t\t\timages = append(images, parseImagesFromYaml(v)...)\n\t\t}\n\tcase yamlObject:\n\t\tfor k, v := range t {\n\t\t\tif k.(string) != \"image\" {\n\t\t\t\timages = append(images, parseImagesFromYaml(v)...)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\timages = append(images, v.(string))\n\t\t}\n\t}\n\treturn images\n}\n<|endoftext|>"} {"text":"<commit_before>package onedrive\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"testing\"\n)\n\nfunc TestGetDefaultDriveValid(t *testing.T) {\n\tsetup()\n\tdefer teardown()\n\n\ttestFile := \"fixtures\/drive.valid.json\"\n\n\tfb, err := ioutil.ReadFile(testFile)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar expectedDrive Drive\n\tif err := json.Unmarshal(fb, &expectedDrive); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tmux.HandleFunc(\"\/drive\", fileWrapperHandler(testFile))\n\n\tdrive, resp, err := oneDrive.Drives.GetDefaultDrive()\n\tif err != nil {\n\t\tt.Fatalf(\"Problem fetching the default drive: %s\", err.Error())\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\tt.Fatalf(\"Expected response to be 200 got %d\", resp.StatusCode)\n\t}\n\n\tif drive.ID != expectedDrive.ID {\n\t\tt.Fatalf(\"Expected ID to be %q, but got %q\", expectedDrive.ID, drive.ID)\n\t}\n\n}\n<commit_msg>Add test for GetRootDrive.<commit_after>package onedrive\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"testing\"\n)\n\nfunc TestGetDefaultDrive(t *testing.T) {\n\tsetup()\n\tdefer teardown()\n\n\ttestFile := \"fixtures\/drive.valid.json\"\n\n\tfb, err := ioutil.ReadFile(testFile)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar expectedDrive Drive\n\tif err := json.Unmarshal(fb, &expectedDrive); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tmux.HandleFunc(\"\/drive\", fileWrapperHandler(testFile))\n\n\tdefaultDrive, resp, err := oneDrive.Drives.GetDefaultDrive()\n\tif err != nil {\n\t\tt.Fatalf(\"Problem fetching the default drive: %s\", err.Error())\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\tt.Fatalf(\"Expected response to be 200 got %d\", resp.StatusCode)\n\t}\n\n\tif got, want := defaultDrive.ID, expectedDrive.ID; got != want {\n\t\tt.Fatalf(\"Got %q Expected %q\", got, want)\n\t}\n\n\tif got, want := defaultDrive.Owner.User.DisplayName, expectedDrive.Owner.User.DisplayName; got != want {\n\t\tt.Fatalf(\"Got %q Expected %q\", got, want)\n\t}\n\n}\n\nfunc TestGetRootDrive(t *testing.T) {\n\tsetup()\n\tdefer teardown()\n\n\ttestFile := \"fixtures\/drive.valid.json\"\n\n\tfb, err := ioutil.ReadFile(testFile)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar expectedDrive Drive\n\tif err := json.Unmarshal(fb, &expectedDrive); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tmux.HandleFunc(\"\/drive\/root\", fileWrapperHandler(testFile))\n\n\trootDrive, resp, err := oneDrive.Drives.GetRootDrive()\n\tif err != nil {\n\t\tt.Fatalf(\"Problem fetching the root drive: %s\", err.Error())\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\tt.Fatalf(\"Expected response to be 200 got %d\", resp.StatusCode)\n\t}\n\n\tif got, want := rootDrive.ID, expectedDrive.ID; got != want {\n\t\tt.Fatalf(\"Got %q Expected %q\", got, want)\n\t}\n\n\tif got, want := rootDrive.Owner.User.DisplayName, expectedDrive.Owner.User.DisplayName; got != want {\n\t\tt.Fatalf(\"Got %q Expected %q\", got, want)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/bazelbuild\/rules_typescript\/devserver\/concatjs\"\n\t\"github.com\/bazelbuild\/rules_typescript\/devserver\/devserver\"\n\t\"github.com\/bazelbuild\/rules_typescript\/devserver\/runfiles\"\n)\n\nvar (\n\tport = flag.Int(\"port\", 5432, \"server port to listen on\")\n\t\/\/ The \"base\" CLI flag is only kept because within Google3 because removing would be a breaking change due to\n\t\/\/ ConcatJS and \"devserver\/devserver.go\" still respecting the specified base flag.\n\tbase = flag.String(\"base\", \"\", \"server base (required, runfiles of the binary)\")\n\tpkgs = flag.String(\"packages\", \"\", \"root package(s) to serve, comma-separated\")\n\tmanifest = flag.String(\"manifest\", \"\", \"sources manifest (.MF)\")\n\tscriptsManifest = flag.String(\"scripts_manifest\", \"\", \"preScripts manifest (.MF)\")\n\tservingPath = flag.String(\"serving_path\", \"\/_\/ts_scripts.js\", \"path to serve the combined sources at\")\n\tentryModule = flag.String(\"entry_module\", \"\", \"entry module name\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\tif len(*pkgs) == 0 || (*manifest == \"\") || (*scriptsManifest == \"\") {\n\t\tfmt.Fprintf(os.Stderr, \"Required argument not set\\n\")\n\t\tos.Exit(1)\n\t}\n\n\tmanifestPath, err := runfiles.Runfile(*base, *scriptsManifest)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Failed to find scripts_manifest in runfiles: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tscriptFiles, err := manifestFiles(manifestPath)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Failed to read scripts_manifest: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tif !strings.HasPrefix(*servingPath, \"\/\") {\n\t\tfmt.Fprintf(os.Stderr, \"The specified serving_path does not start with a slash. \"+\n\t\t\t\"This causes the serving path to not have any effect.\\n\")\n\t\tos.Exit(1)\n\t}\n\n\tpreScripts := make([]string, 0, 100)\n\tpostScripts := make([]string, 0, 1)\n\n\t\/\/ Include the livereload script if IBAZEL_LIVERELOAD_URL is set.\n\tlivereloadUrl := os.Getenv(\"IBAZEL_LIVERELOAD_URL\")\n\tif livereloadUrl != \"\" {\n\t\tfmt.Printf(\"Serving livereload script from %s\\n\", livereloadUrl)\n\t\tlivereloadLoaderSnippet := fmt.Sprintf(`(function(){\n\tconst script = document.createElement('script');\n\tscript.src = \"%s\";\n\tdocument.head.appendChild(script);\n})();`, livereloadUrl)\n\t\tpreScripts = append(preScripts, livereloadLoaderSnippet)\n\t}\n\n\t\/\/ Include the profiler script if IBAZEL_PROFILER_URL is set.\n\tprofilerScriptURL := os.Getenv(\"IBAZEL_PROFILER_URL\")\n\tif profilerScriptURL != \"\" {\n\t\tfmt.Printf(\"Serving profiler script from %s\\n\", profilerScriptURL)\n\t\tprofilerLoaderSnippet := fmt.Sprintf(`(function(){\n\tconst script = document.createElement('script');\n\tscript.src = \"%s\";\n\tdocument.head.appendChild(script);\n})();`, profilerScriptURL)\n\t\tpreScripts = append(preScripts, profilerLoaderSnippet)\n\t}\n\n\t\/\/ Include all user scripts in preScripts. This should always include\n\t\/\/ the requirejs script which is added to scriptFiles by the devserver\n\t\/\/ skylark rule.\n\tfor _, v := range scriptFiles {\n\t\trunfile, err := runfiles.Runfile(*base, v)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Could not find runfile %s, got error %s\", v, err)\n\t\t}\n\n\t\tjs, err := loadScript(runfile)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Failed to read script %s: %v\\n\", v, err)\n\t\t} else {\n\t\t\tpreScripts = append(preScripts, js)\n\t\t}\n\t}\n\n\t\/\/ If the entryModule is set then add a snippet to load\n\t\/\/ the application to postScripts to be outputted after the sources\n\tif *entryModule != \"\" {\n\t\tpostScripts = append(postScripts, fmt.Sprintf(\"require([\\\"%s\\\"]);\", *entryModule))\n\t}\n\n\thttp.Handle(*servingPath, concatjs.ServeConcatenatedJS(*manifest, *base, preScripts, postScripts,\n\t\t&RunfileFileSystem{}))\n\tpkgList := strings.Split(*pkgs, \",\")\n\thttp.HandleFunc(\"\/\", devserver.CreateFileHandler(*servingPath, *manifest, pkgList, *base))\n\n\th, err := os.Hostname()\n\tif err != nil {\n\t\th = \"localhost\"\n\t}\n\n\tfmt.Printf(\"Server listening on http:\/\/%s:%d\/\\n\", h, *port)\n\tfmt.Fprintln(os.Stderr, http.ListenAndServe(fmt.Sprintf(\":%d\", *port), nil).Error())\n\tos.Exit(1)\n}\n\nfunc loadScript(path string) (string, error) {\n\tbuf, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(buf), nil\n}\n\n\/\/ manifestFiles parses a manifest, returning a list of the files in the manifest.\nfunc manifestFiles(manifest string) ([]string, error) {\n\tf, err := os.Open(manifest)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not read manifest %s: %s\", manifest, err)\n\t}\n\tdefer f.Close()\n\treturn manifestFilesFromReader(f)\n}\n\n\/\/ manifestFilesFromReader is a helper for manifestFiles, split out for testing.\nfunc manifestFilesFromReader(r io.Reader) ([]string, error) {\n\tvar lines []string\n\ts := bufio.NewScanner(r)\n\tfor s.Scan() {\n\t\tpath := s.Text()\n\t\tif path == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tlines = append(lines, path)\n\t}\n\tif err := s.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn lines, nil\n}\n<commit_msg>Print a working hostname when running ts_devserver on crostini<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/bazelbuild\/rules_typescript\/devserver\/concatjs\"\n\t\"github.com\/bazelbuild\/rules_typescript\/devserver\/devserver\"\n\t\"github.com\/bazelbuild\/rules_typescript\/devserver\/runfiles\"\n)\n\nvar (\n\tport = flag.Int(\"port\", 5432, \"server port to listen on\")\n\t\/\/ The \"base\" CLI flag is only kept because within Google3 because removing would be a breaking change due to\n\t\/\/ ConcatJS and \"devserver\/devserver.go\" still respecting the specified base flag.\n\tbase = flag.String(\"base\", \"\", \"server base (required, runfiles of the binary)\")\n\tpkgs = flag.String(\"packages\", \"\", \"root package(s) to serve, comma-separated\")\n\tmanifest = flag.String(\"manifest\", \"\", \"sources manifest (.MF)\")\n\tscriptsManifest = flag.String(\"scripts_manifest\", \"\", \"preScripts manifest (.MF)\")\n\tservingPath = flag.String(\"serving_path\", \"\/_\/ts_scripts.js\", \"path to serve the combined sources at\")\n\tentryModule = flag.String(\"entry_module\", \"\", \"entry module name\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\tif len(*pkgs) == 0 || (*manifest == \"\") || (*scriptsManifest == \"\") {\n\t\tfmt.Fprintf(os.Stderr, \"Required argument not set\\n\")\n\t\tos.Exit(1)\n\t}\n\n\tmanifestPath, err := runfiles.Runfile(*base, *scriptsManifest)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Failed to find scripts_manifest in runfiles: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tscriptFiles, err := manifestFiles(manifestPath)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Failed to read scripts_manifest: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tif !strings.HasPrefix(*servingPath, \"\/\") {\n\t\tfmt.Fprintf(os.Stderr, \"The specified serving_path does not start with a slash. \"+\n\t\t\t\"This causes the serving path to not have any effect.\\n\")\n\t\tos.Exit(1)\n\t}\n\n\tpreScripts := make([]string, 0, 100)\n\tpostScripts := make([]string, 0, 1)\n\n\t\/\/ Include the livereload script if IBAZEL_LIVERELOAD_URL is set.\n\tlivereloadUrl := os.Getenv(\"IBAZEL_LIVERELOAD_URL\")\n\tif livereloadUrl != \"\" {\n\t\tfmt.Printf(\"Serving livereload script from %s\\n\", livereloadUrl)\n\t\tlivereloadLoaderSnippet := fmt.Sprintf(`(function(){\n\tconst script = document.createElement('script');\n\tscript.src = \"%s\";\n\tdocument.head.appendChild(script);\n})();`, livereloadUrl)\n\t\tpreScripts = append(preScripts, livereloadLoaderSnippet)\n\t}\n\n\t\/\/ Include the profiler script if IBAZEL_PROFILER_URL is set.\n\tprofilerScriptURL := os.Getenv(\"IBAZEL_PROFILER_URL\")\n\tif profilerScriptURL != \"\" {\n\t\tfmt.Printf(\"Serving profiler script from %s\\n\", profilerScriptURL)\n\t\tprofilerLoaderSnippet := fmt.Sprintf(`(function(){\n\tconst script = document.createElement('script');\n\tscript.src = \"%s\";\n\tdocument.head.appendChild(script);\n})();`, profilerScriptURL)\n\t\tpreScripts = append(preScripts, profilerLoaderSnippet)\n\t}\n\n\t\/\/ Include all user scripts in preScripts. This should always include\n\t\/\/ the requirejs script which is added to scriptFiles by the devserver\n\t\/\/ skylark rule.\n\tfor _, v := range scriptFiles {\n\t\trunfile, err := runfiles.Runfile(*base, v)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Could not find runfile %s, got error %s\", v, err)\n\t\t}\n\n\t\tjs, err := loadScript(runfile)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Failed to read script %s: %v\\n\", v, err)\n\t\t} else {\n\t\t\tpreScripts = append(preScripts, js)\n\t\t}\n\t}\n\n\t\/\/ If the entryModule is set then add a snippet to load\n\t\/\/ the application to postScripts to be outputted after the sources\n\tif *entryModule != \"\" {\n\t\tpostScripts = append(postScripts, fmt.Sprintf(\"require([\\\"%s\\\"]);\", *entryModule))\n\t}\n\n\thttp.Handle(*servingPath, concatjs.ServeConcatenatedJS(*manifest, *base, preScripts, postScripts,\n\t\t&RunfileFileSystem{}))\n\tpkgList := strings.Split(*pkgs, \",\")\n\thttp.HandleFunc(\"\/\", devserver.CreateFileHandler(*servingPath, *manifest, pkgList, *base))\n\n\th, err := os.Hostname()\n\tif err != nil {\n\t\th = \"localhost\"\n\t}\n\t\/\/ Detect if we are running in a linux container inside ChromeOS\n\t\/\/ If so, we assume you want to use the native browser (outside the container)\n\t\/\/ so you'll need to modify the hostname to access the server\n\tif _, err := os.Stat(\"\/etc\/apt\/sources.list.d\/cros.list\"); err == nil {\n\t\th = h + \".linux.test\"\n\t}\n\n\tfmt.Printf(\"Server listening on http:\/\/%s:%d\/\\n\", h, *port)\n\tfmt.Fprintln(os.Stderr, http.ListenAndServe(fmt.Sprintf(\":%d\", *port), nil).Error())\n\tos.Exit(1)\n}\n\nfunc loadScript(path string) (string, error) {\n\tbuf, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(buf), nil\n}\n\n\/\/ manifestFiles parses a manifest, returning a list of the files in the manifest.\nfunc manifestFiles(manifest string) ([]string, error) {\n\tf, err := os.Open(manifest)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not read manifest %s: %s\", manifest, err)\n\t}\n\tdefer f.Close()\n\treturn manifestFilesFromReader(f)\n}\n\n\/\/ manifestFilesFromReader is a helper for manifestFiles, split out for testing.\nfunc manifestFilesFromReader(r io.Reader) ([]string, error) {\n\tvar lines []string\n\ts := bufio.NewScanner(r)\n\tfor s.Scan() {\n\t\tpath := s.Text()\n\t\tif path == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tlines = append(lines, path)\n\t}\n\tif err := s.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn lines, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package camo provides an HTTP proxy server with content type\n\/\/ restrictions as well as regex host allow list support.\npackage camo\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/cactus\/go-camo\/camo\/encoding\"\n\t\"github.com\/cactus\/gologit\"\n\t\"github.com\/gorilla\/mux\"\n\thttpclient \"github.com\/mreiferson\/go-httpclient\"\n)\n\n\/\/ Config holds configuration data used when creating a Proxy with New.\ntype Config struct {\n\t\/\/ HMACKey is a byte slice to be used as the hmac key\n\tHMACKey []byte\n\t\/\/ AllowList is a list of string represenstations of regex (not compiled\n\t\/\/ regex) that are used as a whitelist filter. If an AllowList is present,\n\t\/\/ then anything not matching is dropped. If no AllowList is present,\n\t\/\/ no Allow filtering is done.\n\tAllowList []string\n\t\/\/ MaxSize is the maximum valid image size response (in bytes).\n\tMaxSize int64\n\t\/\/ MaxRedirects is the maximum number of redirects to follow.\n\tMaxRedirects int\n\t\/\/ Request timeout is a timeout for fetching upstream data.\n\tRequestTimeout time.Duration\n\t\/\/ Server name used in Headers and Via checks\n\tServerName string\n\t\/\/ Default headers to add to each response\n\tAddHeaders map[string]string\n}\n\n\/\/ ProxyMetrics interface for Proxy to use for stats\/metrics.\n\/\/ This must be goroutine safe, as AddBytes and AddServed will be called from\n\/\/ many goroutines.\ntype ProxyMetrics interface {\n\tAddBytes(bc int64)\n\tAddServed()\n}\n\n\/\/ A Proxy is a Camo like HTTP proxy, that provides content type\n\/\/ restrictions as well as regex host allow list support.\ntype Proxy struct {\n\tclient *http.Client\n\tconfig *Config\n\t\/\/ compiled allow list regex\n\tallowList []*regexp.Regexp\n\tmetrics ProxyMetrics\n}\n\n\/\/ NotFound handles client requests that did not match a valid\n\/\/ mux route.\nfunc (p *Proxy) NotFoundHandler() http.Handler {\n\thandler := func(w http.ResponseWriter, req *http.Request) {\n\t\th := w.Header()\n\t\tfor k, v := range p.config.AddHeaders {\n\t\t\th.Set(k, v)\n\t\t}\n\t\th.Set(\"Server\", p.config.ServerName)\n\t\th.Set(\"Date\", formattedDate.String())\n\t\thttp.Error(w, \"404 Not Found\", 404)\n\t}\n\treturn http.HandlerFunc(handler)\n}\n\n\/\/ ServerHTTP handles the client request, validates the request is validly\n\/\/ HMAC signed, filters based on the Allow list, and then proxies\n\/\/ valid requests to the desired endpoint. Responses are filtered for\n\/\/ proper image content types.\nfunc (p *Proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tgologit.Debugln(\"Request:\", req.URL)\n\tif p.metrics != nil {\n\t\tgo p.metrics.AddServed()\n\t}\n\n\t\/\/ add default headers\n\twh := w.Header()\n\tfor k, v := range p.config.AddHeaders {\n\t\twh.Set(k, v)\n\t}\n\twh.Set(\"Server\", p.config.ServerName)\n\twh.Set(\"Date\", formattedDate.String())\n\n\tif req.Header.Get(\"Via\") == p.config.ServerName {\n\t\thttp.Error(w, \"Request loop failure\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tvars := mux.Vars(req)\n\tsURL, ok := encoding.DecodeURL(p.config.HMACKey, vars[\"sigHash\"], vars[\"encodedURL\"])\n\tif !ok {\n\t\thttp.Error(w, \"Bad Signature\", http.StatusForbidden)\n\t\treturn\n\t}\n\tgologit.Debugln(\"URL:\", sURL)\n\tgologit.Debugln(\"Client request:\", req)\n\n\tu, err := url.Parse(sURL)\n\tif err != nil {\n\t\tgologit.Debugln(err)\n\t\thttp.Error(w, \"Bad url\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tu.Host = strings.ToLower(u.Host)\n\tif u.Host == \"\" || localhostRegex.MatchString(u.Host) {\n\t\thttp.Error(w, \"Bad url\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\t\/\/ if allowList is set, require match\n\tmatchFound := true\n\tif len(p.allowList) > 0 {\n\t\tmatchFound = false\n\t\tfor _, rgx := range p.allowList {\n\t\t\tif rgx.MatchString(u.Host) {\n\t\t\t\tmatchFound = true\n\t\t\t}\n\t\t}\n\t}\n\tif !matchFound {\n\t\thttp.Error(w, \"Allowlist host failure\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\t\/\/ filter out rfc1918 hosts\n\tip := net.ParseIP(u.Host)\n\tif ip != nil {\n\t\tif addr1918PrefixRegex.MatchString(ip.String()) {\n\t\t\thttp.Error(w, \"Denylist host failure\", http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t}\n\n\tnreq, err := http.NewRequest(req.Method, sURL, nil)\n\tif err != nil {\n\t\tgologit.Debugln(\"Could not create NewRequest\", err)\n\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\treturn\n\t}\n\n\t\/\/ filter headers\n\tp.copyHeader(&nreq.Header, &req.Header, &ValidReqHeaders)\n\tif req.Header.Get(\"X-Forwarded-For\") == \"\" {\n\t\thost, _, err := net.SplitHostPort(req.RemoteAddr)\n\t\tif err == nil && !addr1918PrefixRegex.MatchString(host) {\n\t\t\tnreq.Header.Add(\"X-Forwarded-For\", host)\n\t\t}\n\t}\n\n\t\/\/ add an accept header if the client didn't send one\n\tif nreq.Header.Get(\"Accept\") == \"\" {\n\t\tnreq.Header.Add(\"Accept\", \"image\/*\")\n\t}\n\n\tnreq.Header.Add(\"user-agent\", p.config.ServerName)\n\tnreq.Header.Add(\"via\", p.config.ServerName)\n\n\tgologit.Debugln(\"Built outgoing request:\", nreq)\n\n\tresp, err := p.client.Do(nreq)\n\tif err != nil {\n\t\tgologit.Debugln(\"Could not connect to endpoint\", err)\n\t\tif strings.Contains(err.Error(), \"timeout\") {\n\t\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\t} else {\n\t\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusNotFound)\n\t\t}\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tgologit.Debugln(\"Response from upstream:\", resp)\n\n\t\/\/ check for too large a response\n\tif resp.ContentLength > p.config.MaxSize {\n\t\tgologit.Debugln(\"Content length exceeded\", sURL)\n\t\thttp.Error(w, \"Content length exceeded\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tswitch resp.StatusCode {\n\tcase 200:\n\t\t\/\/ check content type\n\t\tif !strings.HasPrefix(resp.Header.Get(\"Content-Type\"), \"image\/\") {\n\t\t\tgologit.Debugln(\"Non-Image content-type returned\", u)\n\t\t\thttp.Error(w, \"Non-Image content-type returned\",\n\t\t\t\thttp.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\tcase 300:\n\t\tgologit.Debugln(\"Multiple choices not supported\")\n\t\thttp.Error(w, \"Multiple choices not supported\", http.StatusNotFound)\n\t\treturn\n\tcase 301, 302, 303, 307:\n\t\t\/\/ if we get a redirect here, we either disabled following,\n\t\t\/\/ or followed until max depth and still got one (redirect loop)\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\tcase 304:\n\t\th := w.Header()\n\t\tp.copyHeader(&h, &resp.Header, &ValidRespHeaders)\n\t\tw.WriteHeader(304)\n\t\treturn\n\tcase 404:\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\tcase 500, 502, 503, 504:\n\t\t\/\/ upstream errors should probably just 502. client can try later.\n\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\treturn\n\tdefault:\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\th := w.Header()\n\tp.copyHeader(&h, &resp.Header, &ValidRespHeaders)\n\tw.WriteHeader(resp.StatusCode)\n\n\t\/\/ since this uses io.Copy from the respBody, it is streaming\n\t\/\/ from the request to the response. This means it will nearly\n\t\/\/ always end up with a chunked response.\n\tbW, err := io.Copy(w, resp.Body)\n\tif err != nil {\n\t\topErr, ok := err.(*net.OpError)\n\t\tif !ok {\n\t\t\tswitch opErr.Err {\n\t\t\tcase syscall.EPIPE:\n\t\t\t\t\/\/ broken pipe - endpoint terminated the conn\n\t\t\t\t\/\/ log as debug only.\n\t\t\t\tgologit.Debugln(\"Error writing response:\", err)\n\t\t\tcase syscall.ECONNRESET:\n\t\t\t\t\/\/ connection reset by peer - endpoint terminated the conn\n\t\t\t\t\/\/ log as debug only.\n\t\t\t\tgologit.Debugln(\"Error writing response:\", err)\n\t\t\tdefault:\n\t\t\t\t\/\/ log anything else normally\n\t\t\t\tgologit.Println(\"Error writing response:\", err)\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\tif p.metrics != nil {\n\t\tgo p.metrics.AddBytes(bW)\n\t}\n\tgologit.Debugln(\"Response to client:\", w)\n}\n\n\/\/ copy headers from src into dst\n\/\/ empty filter map will result in no filtering being done\nfunc (p *Proxy) copyHeader(dst, src *http.Header, filter *map[string]bool) {\n\tf := *filter\n\tfiltering := false\n\tif len(f) > 0 {\n\t\tfiltering = true\n\t}\n\n\tfor k, vv := range *src {\n\t\tif x, ok := f[k]; filtering && (!ok || !x) {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, v := range vv {\n\t\t\tdst.Add(k, v)\n\t\t}\n\t}\n}\n\n\/\/ SetMetricsCollector sets a proxy metrics (ProxyMetrics interface) for\n\/\/ the proxy\nfunc (p *Proxy) SetMetricsCollector(pm ProxyMetrics) {\n\tp.metrics = pm\n}\n\n\/\/ New returns a new Proxy. An error is returned if there was a failure\n\/\/ to parse the regex from the passed Config.\nfunc New(pc Config) (*Proxy, error) {\n\ttr := &httpclient.Transport{\n\t\tMaxIdleConnsPerHost: 8,\n\t\tConnectTimeout: 2 * time.Second,\n\t\tRequestTimeout: pc.RequestTimeout}\n\n\t\/\/ spawn an idle conn trimmer\n\tgo func() {\n\t\t\/\/ prunes every 5 minutes. this is just a guess at an\n\t\t\/\/ initial value. very busy severs may want to lower this...\n\t\tfor {\n\t\t\ttime.Sleep(5 * time.Minute)\n\t\t\ttr.CloseIdleConnections()\n\t\t}\n\t}()\n\n\tclient := &http.Client{Transport: tr}\n\tclient.CheckRedirect = func(req *http.Request, via []*http.Request) error {\n\t\tif len(via) >= pc.MaxRedirects {\n\t\t\treturn errors.New(\"Too many redirects\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tvar allow []*regexp.Regexp\n\tvar c *regexp.Regexp\n\tvar err error\n\t\/\/ compile allow list\n\tfor _, v := range pc.AllowList {\n\t\tc, err = regexp.Compile(strings.TrimSpace(v))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tallow = append(allow, c)\n\t}\n\n\treturn &Proxy{\n\t\tclient: client,\n\t\tconfig: &pc,\n\t\tallowList: allow}, nil\n}\n<commit_msg>make debug log more distinct<commit_after>\/\/ Package camo provides an HTTP proxy server with content type\n\/\/ restrictions as well as regex host allow list support.\npackage camo\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/cactus\/go-camo\/camo\/encoding\"\n\t\"github.com\/cactus\/gologit\"\n\t\"github.com\/gorilla\/mux\"\n\thttpclient \"github.com\/mreiferson\/go-httpclient\"\n)\n\n\/\/ Config holds configuration data used when creating a Proxy with New.\ntype Config struct {\n\t\/\/ HMACKey is a byte slice to be used as the hmac key\n\tHMACKey []byte\n\t\/\/ AllowList is a list of string represenstations of regex (not compiled\n\t\/\/ regex) that are used as a whitelist filter. If an AllowList is present,\n\t\/\/ then anything not matching is dropped. If no AllowList is present,\n\t\/\/ no Allow filtering is done.\n\tAllowList []string\n\t\/\/ MaxSize is the maximum valid image size response (in bytes).\n\tMaxSize int64\n\t\/\/ MaxRedirects is the maximum number of redirects to follow.\n\tMaxRedirects int\n\t\/\/ Request timeout is a timeout for fetching upstream data.\n\tRequestTimeout time.Duration\n\t\/\/ Server name used in Headers and Via checks\n\tServerName string\n\t\/\/ Default headers to add to each response\n\tAddHeaders map[string]string\n}\n\n\/\/ ProxyMetrics interface for Proxy to use for stats\/metrics.\n\/\/ This must be goroutine safe, as AddBytes and AddServed will be called from\n\/\/ many goroutines.\ntype ProxyMetrics interface {\n\tAddBytes(bc int64)\n\tAddServed()\n}\n\n\/\/ A Proxy is a Camo like HTTP proxy, that provides content type\n\/\/ restrictions as well as regex host allow list support.\ntype Proxy struct {\n\tclient *http.Client\n\tconfig *Config\n\t\/\/ compiled allow list regex\n\tallowList []*regexp.Regexp\n\tmetrics ProxyMetrics\n}\n\n\/\/ NotFound handles client requests that did not match a valid\n\/\/ mux route.\nfunc (p *Proxy) NotFoundHandler() http.Handler {\n\thandler := func(w http.ResponseWriter, req *http.Request) {\n\t\th := w.Header()\n\t\tfor k, v := range p.config.AddHeaders {\n\t\t\th.Set(k, v)\n\t\t}\n\t\th.Set(\"Server\", p.config.ServerName)\n\t\th.Set(\"Date\", formattedDate.String())\n\t\thttp.Error(w, \"404 Not Found\", 404)\n\t}\n\treturn http.HandlerFunc(handler)\n}\n\n\/\/ ServerHTTP handles the client request, validates the request is validly\n\/\/ HMAC signed, filters based on the Allow list, and then proxies\n\/\/ valid requests to the desired endpoint. Responses are filtered for\n\/\/ proper image content types.\nfunc (p *Proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tgologit.Debugln(\"Request:\", req.URL)\n\tif p.metrics != nil {\n\t\tgo p.metrics.AddServed()\n\t}\n\n\t\/\/ add default headers\n\twh := w.Header()\n\tfor k, v := range p.config.AddHeaders {\n\t\twh.Set(k, v)\n\t}\n\twh.Set(\"Server\", p.config.ServerName)\n\twh.Set(\"Date\", formattedDate.String())\n\n\tif req.Header.Get(\"Via\") == p.config.ServerName {\n\t\thttp.Error(w, \"Request loop failure\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tvars := mux.Vars(req)\n\tsURL, ok := encoding.DecodeURL(p.config.HMACKey, vars[\"sigHash\"], vars[\"encodedURL\"])\n\tif !ok {\n\t\thttp.Error(w, \"Bad Signature\", http.StatusForbidden)\n\t\treturn\n\t}\n\tgologit.Debugln(\"URL:\", sURL)\n\tgologit.Debugln(\"Client request:\", req)\n\n\tu, err := url.Parse(sURL)\n\tif err != nil {\n\t\tgologit.Debugln(\"url parse error:\", err)\n\t\thttp.Error(w, \"Bad url\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tu.Host = strings.ToLower(u.Host)\n\tif u.Host == \"\" || localhostRegex.MatchString(u.Host) {\n\t\thttp.Error(w, \"Bad url\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\t\/\/ if allowList is set, require match\n\tmatchFound := true\n\tif len(p.allowList) > 0 {\n\t\tmatchFound = false\n\t\tfor _, rgx := range p.allowList {\n\t\t\tif rgx.MatchString(u.Host) {\n\t\t\t\tmatchFound = true\n\t\t\t}\n\t\t}\n\t}\n\tif !matchFound {\n\t\thttp.Error(w, \"Allowlist host failure\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\t\/\/ filter out rfc1918 hosts\n\tip := net.ParseIP(u.Host)\n\tif ip != nil {\n\t\tif addr1918PrefixRegex.MatchString(ip.String()) {\n\t\t\thttp.Error(w, \"Denylist host failure\", http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t}\n\n\tnreq, err := http.NewRequest(req.Method, sURL, nil)\n\tif err != nil {\n\t\tgologit.Debugln(\"Could not create NewRequest\", err)\n\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\treturn\n\t}\n\n\t\/\/ filter headers\n\tp.copyHeader(&nreq.Header, &req.Header, &ValidReqHeaders)\n\tif req.Header.Get(\"X-Forwarded-For\") == \"\" {\n\t\thost, _, err := net.SplitHostPort(req.RemoteAddr)\n\t\tif err == nil && !addr1918PrefixRegex.MatchString(host) {\n\t\t\tnreq.Header.Add(\"X-Forwarded-For\", host)\n\t\t}\n\t}\n\n\t\/\/ add an accept header if the client didn't send one\n\tif nreq.Header.Get(\"Accept\") == \"\" {\n\t\tnreq.Header.Add(\"Accept\", \"image\/*\")\n\t}\n\n\tnreq.Header.Add(\"user-agent\", p.config.ServerName)\n\tnreq.Header.Add(\"via\", p.config.ServerName)\n\n\tgologit.Debugln(\"Built outgoing request:\", nreq)\n\n\tresp, err := p.client.Do(nreq)\n\tif err != nil {\n\t\tgologit.Debugln(\"Could not connect to endpoint\", err)\n\t\tif strings.Contains(err.Error(), \"timeout\") {\n\t\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\t} else {\n\t\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusNotFound)\n\t\t}\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tgologit.Debugln(\"Response from upstream:\", resp)\n\n\t\/\/ check for too large a response\n\tif resp.ContentLength > p.config.MaxSize {\n\t\tgologit.Debugln(\"Content length exceeded\", sURL)\n\t\thttp.Error(w, \"Content length exceeded\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tswitch resp.StatusCode {\n\tcase 200:\n\t\t\/\/ check content type\n\t\tif !strings.HasPrefix(resp.Header.Get(\"Content-Type\"), \"image\/\") {\n\t\t\tgologit.Debugln(\"Non-Image content-type returned\", u)\n\t\t\thttp.Error(w, \"Non-Image content-type returned\",\n\t\t\t\thttp.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\tcase 300:\n\t\tgologit.Debugln(\"Multiple choices not supported\")\n\t\thttp.Error(w, \"Multiple choices not supported\", http.StatusNotFound)\n\t\treturn\n\tcase 301, 302, 303, 307:\n\t\t\/\/ if we get a redirect here, we either disabled following,\n\t\t\/\/ or followed until max depth and still got one (redirect loop)\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\tcase 304:\n\t\th := w.Header()\n\t\tp.copyHeader(&h, &resp.Header, &ValidRespHeaders)\n\t\tw.WriteHeader(304)\n\t\treturn\n\tcase 404:\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\tcase 500, 502, 503, 504:\n\t\t\/\/ upstream errors should probably just 502. client can try later.\n\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\treturn\n\tdefault:\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\th := w.Header()\n\tp.copyHeader(&h, &resp.Header, &ValidRespHeaders)\n\tw.WriteHeader(resp.StatusCode)\n\n\t\/\/ since this uses io.Copy from the respBody, it is streaming\n\t\/\/ from the request to the response. This means it will nearly\n\t\/\/ always end up with a chunked response.\n\tbW, err := io.Copy(w, resp.Body)\n\tif err != nil {\n\t\topErr, ok := err.(*net.OpError)\n\t\tif !ok {\n\t\t\tswitch opErr.Err {\n\t\t\tcase syscall.EPIPE:\n\t\t\t\t\/\/ broken pipe - endpoint terminated the conn\n\t\t\t\t\/\/ log as debug only.\n\t\t\t\tgologit.Debugln(\"Error writing response:\", err)\n\t\t\tcase syscall.ECONNRESET:\n\t\t\t\t\/\/ connection reset by peer - endpoint terminated the conn\n\t\t\t\t\/\/ log as debug only.\n\t\t\t\tgologit.Debugln(\"Error writing response:\", err)\n\t\t\tdefault:\n\t\t\t\t\/\/ log anything else normally\n\t\t\t\tgologit.Println(\"Error writing response:\", err)\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\tif p.metrics != nil {\n\t\tgo p.metrics.AddBytes(bW)\n\t}\n\tgologit.Debugln(\"Response to client:\", w)\n}\n\n\/\/ copy headers from src into dst\n\/\/ empty filter map will result in no filtering being done\nfunc (p *Proxy) copyHeader(dst, src *http.Header, filter *map[string]bool) {\n\tf := *filter\n\tfiltering := false\n\tif len(f) > 0 {\n\t\tfiltering = true\n\t}\n\n\tfor k, vv := range *src {\n\t\tif x, ok := f[k]; filtering && (!ok || !x) {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, v := range vv {\n\t\t\tdst.Add(k, v)\n\t\t}\n\t}\n}\n\n\/\/ SetMetricsCollector sets a proxy metrics (ProxyMetrics interface) for\n\/\/ the proxy\nfunc (p *Proxy) SetMetricsCollector(pm ProxyMetrics) {\n\tp.metrics = pm\n}\n\n\/\/ New returns a new Proxy. An error is returned if there was a failure\n\/\/ to parse the regex from the passed Config.\nfunc New(pc Config) (*Proxy, error) {\n\ttr := &httpclient.Transport{\n\t\tMaxIdleConnsPerHost: 8,\n\t\tConnectTimeout: 2 * time.Second,\n\t\tRequestTimeout: pc.RequestTimeout}\n\n\t\/\/ spawn an idle conn trimmer\n\tgo func() {\n\t\t\/\/ prunes every 5 minutes. this is just a guess at an\n\t\t\/\/ initial value. very busy severs may want to lower this...\n\t\tfor {\n\t\t\ttime.Sleep(5 * time.Minute)\n\t\t\ttr.CloseIdleConnections()\n\t\t}\n\t}()\n\n\tclient := &http.Client{Transport: tr}\n\tclient.CheckRedirect = func(req *http.Request, via []*http.Request) error {\n\t\tif len(via) >= pc.MaxRedirects {\n\t\t\treturn errors.New(\"Too many redirects\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tvar allow []*regexp.Regexp\n\tvar c *regexp.Regexp\n\tvar err error\n\t\/\/ compile allow list\n\tfor _, v := range pc.AllowList {\n\t\tc, err = regexp.Compile(strings.TrimSpace(v))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tallow = append(allow, c)\n\t}\n\n\treturn &Proxy{\n\t\tclient: client,\n\t\tconfig: &pc,\n\t\tallowList: allow}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package v1\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"gopkg.in\/thehowl\/go-osuapi.v1\"\n\t\"zxq.co\/ripple\/rippleapi\/common\"\n\t\"zxq.co\/x\/getrank\"\n)\n\ntype userScore struct {\n\tScore\n\tBeatmap beatmap `json:\"beatmap\"`\n}\n\ntype userScoresResponse struct {\n\tcommon.ResponseBase\n\tScores []userScore `json:\"scores\"`\n}\n\nconst userScoreSelectBase = `\nSELECT\n\tscores.id, scores.beatmap_md5, scores.score,\n\tscores.max_combo, scores.full_combo, scores.mods,\n\tscores.300_count, scores.100_count, scores.50_count,\n\tscores.gekis_count, scores.katus_count, scores.misses_count,\n\tscores.time, scores.play_mode, scores.accuracy, scores.pp,\n\tscores.completed,\n\n\tbeatmaps.beatmap_id, beatmaps.beatmapset_id, beatmaps.beatmap_md5,\n\tbeatmaps.song_name, beatmaps.ar, beatmaps.od, beatmaps.difficulty_std,\n\tbeatmaps.difficulty_taiko, beatmaps.difficulty_ctb, beatmaps.difficulty_mania,\n\tbeatmaps.max_combo, beatmaps.hit_length, beatmaps.ranked,\n\tbeatmaps.ranked_status_freezed, beatmaps.latest_update\nFROM scores\nINNER JOIN beatmaps ON beatmaps.beatmap_md5 = scores.beatmap_md5\nINNER JOIN users ON users.id = scores.userid\n`\n\n\/\/ UserScoresBestGET retrieves the best scores of an user, sorted by PP if\n\/\/ mode is standard and sorted by ranked score otherwise.\nfunc UserScoresBestGET(md common.MethodData) common.CodeMessager {\n\tcm, wc, param := whereClauseUser(md, \"users\")\n\tif cm != nil {\n\t\treturn *cm\n\t}\n\tmc := genModeClause(md)\n\t\/\/ For all modes that have PP, we leave out 0 PP scores.\n\tif getMode(md.Query(\"mode\")) != \"ctb\" {\n\t\tmc += \" AND scores.pp > 0\"\n\t}\n\treturn scoresPuts(md, fmt.Sprintf(\n\t\t`WHERE\n\t\t\tscores.completed = '3'\n\t\t\tAND %s\n\t\t\t%s\n\t\t\tAND `+md.User.OnlyUserPublic(true)+`\n\t\tORDER BY scores.pp DESC, scores.score DESC %s`,\n\t\twc, mc, common.Paginate(md.Query(\"p\"), md.Query(\"l\"), 100),\n\t), param)\n}\n\n\/\/ UserScoresRecentGET retrieves an user's latest scores.\nfunc UserScoresRecentGET(md common.MethodData) common.CodeMessager {\n\tcm, wc, param := whereClauseUser(md, \"users\")\n\tif cm != nil {\n\t\treturn *cm\n\t}\n\treturn scoresPuts(md, fmt.Sprintf(\n\t\t`WHERE\n\t\t\t%s\n\t\t\t%s\n\t\t\tAND `+md.User.OnlyUserPublic(true)+`\n\t\tORDER BY scores.id DESC %s`,\n\t\twc, genModeClause(md), common.Paginate(md.Query(\"p\"), md.Query(\"l\"), 100),\n\t), param)\n}\n\nfunc scoresPuts(md common.MethodData, whereClause string, params ...interface{}) common.CodeMessager {\n\trows, err := md.DB.Query(userScoreSelectBase+whereClause, params...)\n\tif err != nil {\n\t\tmd.Err(err)\n\t\treturn Err500\n\t}\n\tvar scores []userScore\n\tfor rows.Next() {\n\t\tvar (\n\t\t\tus userScore\n\t\t\tb beatmap\n\t\t)\n\t\terr = rows.Scan(\n\t\t\t&us.ID, &us.BeatmapMD5, &us.Score.Score,\n\t\t\t&us.MaxCombo, &us.FullCombo, &us.Mods,\n\t\t\t&us.Count300, &us.Count100, &us.Count50,\n\t\t\t&us.CountGeki, &us.CountKatu, &us.CountMiss,\n\t\t\t&us.Time, &us.PlayMode, &us.Accuracy, &us.PP,\n\t\t\t&us.Completed,\n\n\t\t\t&b.BeatmapID, &b.BeatmapsetID, &b.BeatmapMD5,\n\t\t\t&b.SongName, &b.AR, &b.OD, &b.Diff2.STD,\n\t\t\t&b.Diff2.Taiko, &b.Diff2.CTB, &b.Diff2.Mania,\n\t\t\t&b.MaxCombo, &b.HitLength, &b.Ranked,\n\t\t\t&b.RankedStatusFrozen, &b.LatestUpdate,\n\t\t)\n\t\tif err != nil {\n\t\t\tmd.Err(err)\n\t\t\treturn Err500\n\t\t}\n\t\tb.Difficulty = b.Diff2.STD\n\t\tus.Beatmap = b\n\t\tus.Rank = strings.ToUpper(getrank.GetRank(\n\t\t\tosuapi.Mode(us.PlayMode),\n\t\t\tosuapi.Mods(us.Mods),\n\t\t\tus.Accuracy,\n\t\t\tus.Count300,\n\t\t\tus.Count100,\n\t\t\tus.Count50,\n\t\t\tus.CountMiss,\n\t\t))\n\t\tscores = append(scores, us)\n\t}\n\tr := userScoresResponse{}\n\tr.Code = 200\n\tr.Scores = scores\n\treturn r\n}\n<commit_msg>Fix wrong condition on ctb user scores<commit_after>package v1\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"gopkg.in\/thehowl\/go-osuapi.v1\"\n\t\"zxq.co\/ripple\/rippleapi\/common\"\n\t\"zxq.co\/x\/getrank\"\n)\n\ntype userScore struct {\n\tScore\n\tBeatmap beatmap `json:\"beatmap\"`\n}\n\ntype userScoresResponse struct {\n\tcommon.ResponseBase\n\tScores []userScore `json:\"scores\"`\n}\n\nconst userScoreSelectBase = `\nSELECT\n\tscores.id, scores.beatmap_md5, scores.score,\n\tscores.max_combo, scores.full_combo, scores.mods,\n\tscores.300_count, scores.100_count, scores.50_count,\n\tscores.gekis_count, scores.katus_count, scores.misses_count,\n\tscores.time, scores.play_mode, scores.accuracy, scores.pp,\n\tscores.completed,\n\n\tbeatmaps.beatmap_id, beatmaps.beatmapset_id, beatmaps.beatmap_md5,\n\tbeatmaps.song_name, beatmaps.ar, beatmaps.od, beatmaps.difficulty_std,\n\tbeatmaps.difficulty_taiko, beatmaps.difficulty_ctb, beatmaps.difficulty_mania,\n\tbeatmaps.max_combo, beatmaps.hit_length, beatmaps.ranked,\n\tbeatmaps.ranked_status_freezed, beatmaps.latest_update\nFROM scores\nINNER JOIN beatmaps ON beatmaps.beatmap_md5 = scores.beatmap_md5\nINNER JOIN users ON users.id = scores.userid\n`\n\n\/\/ UserScoresBestGET retrieves the best scores of an user, sorted by PP if\n\/\/ mode is standard and sorted by ranked score otherwise.\nfunc UserScoresBestGET(md common.MethodData) common.CodeMessager {\n\tcm, wc, param := whereClauseUser(md, \"users\")\n\tif cm != nil {\n\t\treturn *cm\n\t}\n\tmc := genModeClause(md)\n\t\/\/ We leave out 0 PP scores.\n\tmc += \" AND scores.pp > 0\"\n\treturn scoresPuts(md, fmt.Sprintf(\n\t\t`WHERE\n\t\t\tscores.completed = '3'\n\t\t\tAND %s\n\t\t\t%s\n\t\t\tAND `+md.User.OnlyUserPublic(true)+`\n\t\tORDER BY scores.pp DESC, scores.score DESC %s`,\n\t\twc, mc, common.Paginate(md.Query(\"p\"), md.Query(\"l\"), 100),\n\t), param)\n}\n\n\/\/ UserScoresRecentGET retrieves an user's latest scores.\nfunc UserScoresRecentGET(md common.MethodData) common.CodeMessager {\n\tcm, wc, param := whereClauseUser(md, \"users\")\n\tif cm != nil {\n\t\treturn *cm\n\t}\n\treturn scoresPuts(md, fmt.Sprintf(\n\t\t`WHERE\n\t\t\t%s\n\t\t\t%s\n\t\t\tAND `+md.User.OnlyUserPublic(true)+`\n\t\tORDER BY scores.id DESC %s`,\n\t\twc, genModeClause(md), common.Paginate(md.Query(\"p\"), md.Query(\"l\"), 100),\n\t), param)\n}\n\nfunc scoresPuts(md common.MethodData, whereClause string, params ...interface{}) common.CodeMessager {\n\trows, err := md.DB.Query(userScoreSelectBase+whereClause, params...)\n\tif err != nil {\n\t\tmd.Err(err)\n\t\treturn Err500\n\t}\n\tvar scores []userScore\n\tfor rows.Next() {\n\t\tvar (\n\t\t\tus userScore\n\t\t\tb beatmap\n\t\t)\n\t\terr = rows.Scan(\n\t\t\t&us.ID, &us.BeatmapMD5, &us.Score.Score,\n\t\t\t&us.MaxCombo, &us.FullCombo, &us.Mods,\n\t\t\t&us.Count300, &us.Count100, &us.Count50,\n\t\t\t&us.CountGeki, &us.CountKatu, &us.CountMiss,\n\t\t\t&us.Time, &us.PlayMode, &us.Accuracy, &us.PP,\n\t\t\t&us.Completed,\n\n\t\t\t&b.BeatmapID, &b.BeatmapsetID, &b.BeatmapMD5,\n\t\t\t&b.SongName, &b.AR, &b.OD, &b.Diff2.STD,\n\t\t\t&b.Diff2.Taiko, &b.Diff2.CTB, &b.Diff2.Mania,\n\t\t\t&b.MaxCombo, &b.HitLength, &b.Ranked,\n\t\t\t&b.RankedStatusFrozen, &b.LatestUpdate,\n\t\t)\n\t\tif err != nil {\n\t\t\tmd.Err(err)\n\t\t\treturn Err500\n\t\t}\n\t\tb.Difficulty = b.Diff2.STD\n\t\tus.Beatmap = b\n\t\tus.Rank = strings.ToUpper(getrank.GetRank(\n\t\t\tosuapi.Mode(us.PlayMode),\n\t\t\tosuapi.Mods(us.Mods),\n\t\t\tus.Accuracy,\n\t\t\tus.Count300,\n\t\t\tus.Count100,\n\t\t\tus.Count50,\n\t\t\tus.CountMiss,\n\t\t))\n\t\tscores = append(scores, us)\n\t}\n\tr := userScoresResponse{}\n\tr.Code = 200\n\tr.Scores = scores\n\treturn r\n}\n<|endoftext|>"} {"text":"<commit_before>package ecs\n\nimport (\n\t\"github.com\/denverdino\/aliyungo\/common\"\n\t\"os\"\n)\n\n\/\/ Interval for checking status in WaitForXXX method\nconst DefaultWaitForInterval = 5\n\n\/\/ Default timeout value for WaitForXXX method\nconst DefaultTimeout = 60\n\ntype Client struct {\n\tcommon.Client\n}\n\nconst (\n\t\/\/ ECSDefaultEndpoint is the default API endpoint of ECS services\n\tECSDefaultEndpoint = \"https:\/\/ecs.aliyuncs.com\"\n\tECSAPIVersion = \"2014-05-26\"\n)\n\n\/\/ NewClient creates a new instance of ECS client\nfunc NewClient(accessKeyId, accessKeySecret string) *Client {\n\tendpoint := os.Getenv(\"ECS_ENDPOINT\")\n\tif endpoint == \"\" {\n\t\tendpoint = ECSDefaultEndpoint\n\t}\n\treturn NewClientWithEndpoint(endpoint, accessKeyId, accessKeySecret)\n}\n\nfunc NewClientWithEndpoint(endpoint string, accessKeyId, accessKeySecret string) *Client {\n\tclient := &Client{}\n\tclient.Init(endpoint, ECSAPIVersion, accessKeyId, accessKeySecret)\n\treturn client\n}\n<commit_msg>update ecsendpoint<commit_after>package ecs\n\nimport (\n\t\"github.com\/denverdino\/aliyungo\/common\"\n\t\"os\"\n)\n\n\/\/ Interval for checking status in WaitForXXX method\nconst DefaultWaitForInterval = 5\n\n\/\/ Default timeout value for WaitForXXX method\nconst DefaultTimeout = 60\n\ntype Client struct {\n\tcommon.Client\n}\n\nconst (\n\t\/\/ ECSDefaultEndpoint is the default API endpoint of ECS services\n\tECSDefaultEndpoint = \"https:\/\/ecs-cn-hangzhou.aliyuncs.com\"\n\tECSAPIVersion = \"2014-05-26\"\n)\n\n\/\/ NewClient creates a new instance of ECS client\nfunc NewClient(accessKeyId, accessKeySecret string) *Client {\n\tendpoint := os.Getenv(\"ECS_ENDPOINT\")\n\tif endpoint == \"\" {\n\t\tendpoint = ECSDefaultEndpoint\n\t}\n\treturn NewClientWithEndpoint(endpoint, accessKeyId, accessKeySecret)\n}\n\nfunc NewClientWithEndpoint(endpoint string, accessKeyId, accessKeySecret string) *Client {\n\tclient := &Client{}\n\tclient.Init(endpoint, ECSAPIVersion, accessKeyId, accessKeySecret)\n\treturn client\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\/\/ \"encoding\/hex\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"bytes\"\n\t\/\/ \"time\"\n\t\/\/ \"math\/big\"\n\t\/\/ \"github.com\/PointCoin\/btcutil\"\n\t\/\/ \"github.com\/PointCoin\/btcwire\"\n\t\/\/ \"github.com\/PointCoin\/btcrpcclient\"\n\t\/\/ \"github.com\/PointCoin\/btcjson\"\n\t\"strconv\"\n\t\"strings\"\n\t\"encoding\/json\"\n\t\/\/ \"regexp\"\n\t\/\/ \"math\/rand\"\n\t\"log\"\n)\n\n\nfunc main() {\n\tprint := fmt.Println\n\ts := \"\\\"Hello\\\"\"\n\tfmt.Println(s)\n\n\to,_ := strconv.Unquote(s)\n\tfmt.Println(o)\n\t\/\/ address: Prxy397nCyskwHwmiv3TaFG6ZgZ88Cbnju\n\t\/\/ command = pointctl getrawtransaction c1de1be883834d733d096b3e14674978459f111f90d9dfbc5a82c9fa20db60a7\n\t\n\ttxid := \"c1de1be883834d733d096b3e14674978459f111f90d9dfbc5a82c9fa20db60a7\"\n\ttxdetails := getTransactionDetails(txid)\n\tm := getTransactionJson(txdetails)\n\n\ttxidreturned := m[\"txid\"]\n\tprint(\"\\n\\ngot txid\", txidreturned)\n\n\tvinList := getVinList(m)\n\tvoutList := getVoutList(m)\n\n\t_ , _ = vinList,voutList\n\n\n\n\t\/\/ Start with transaction\n\n\t\/\/ See input addresses of transaction as well as amounts\n\t\/\/ For each vin, going to have to \n\tfmt.Println(\"Outputs\")\n\tfor i, x := range vinList {\n\t\ttx := getTransactionDetails(x.txid)\n\t\ttxjs := getTransactionJson(tx)\n\t\ttxvouts := getVoutList(txjs)\n\t\tfor _, y := range txvouts {\n\n\t\t\tif y.n == x.vout {\n\t\t\t\tfmt.Println(\"\\t[\",i,\"]\",y.addresses[0],y.value)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\t\t\/\/ 1) Get tx, \n\t\t\/\/ 2) Get 'n'th output\n\t\t\/\/ 3) Get address and amount of that output\n\n\n\t\/\/ See output addresses as well as amounts\n\t\/\/ For each vout\n\t\t\/\/ 1)Print address and amount\n\n\n\n}\n\n\ntype vin struct {\n txid string\n vout int\n}\n\ntype vout struct {\n\tvalue float64\n\tn int\n\taddresses []string\n}\n\nfunc getVinList(m map[string]interface{}) ([]vin) {\n\tvinList := make([]vin,0)\n\tvinJsonList := m[\"vin\"]\n\n\tswitch vv := vinJsonList.(type) {\n\tcase []interface{}:\n\t\tfor _, u := range vv {\n\t\t\tj := u.(map[string]interface{})\n\t\t\tvinTxid := j[\"txid\"].(string)\n\t\t\tvinVout := int(j[\"vout\"].(float64))\n\t\t\tnewVin := vin{txid: vinTxid, vout: vinVout}\n\t\t\tvinList = append(vinList, newVin)\n \/\/ fmt.Println(i, u)\n }\n\t\t\/\/ print(\"yes matches\")\n\tdefault:\n\t\tprint(\"nope getVinList didn't work\")\n\t}\n\n\tfmt.Println(\"vins:\")\n\tfor _,x := range vinList {\n\t\tfmt.Println(x)\n\t}\n\treturn vinList\n\n}\n\nfunc getVoutList(m map[string] interface{}) ([]vout) {\n\tvoutList := make([]vout,0)\n\tvoutJsonList := m[\"vout\"]\n\n\tswitch oo := voutJsonList.(type) {\n\tcase []interface{}:\n\t\tfor _,u := range oo {\n\t\t\tj := u.(map[string]interface{})\n\t\t\tvoutVal := j[\"value\"].(float64)\n\t\t\tvoutN := int(j[\"n\"].(float64))\n\n\t\t\tvScriptPubKey := j[\"scriptPubKey\"].(map[string]interface{})\n\t\t\tvAddresses := vScriptPubKey[\"addresses\"].([]interface{})\n\t\t\tvAddressesStrings := make([]string, 0)\n\t\t\tfor _,u := range vAddresses {\n\t\t\t\taddr := u.(string)\n\t\t\t\tvAddressesStrings = append(vAddressesStrings, addr)\n\t\t\t}\n\n\t\t\tnewVout := vout{value: voutVal, n: voutN, addresses: vAddressesStrings}\n\t\t\tvoutList = append(voutList, newVout)\n\t\t}\n\t}\n\n\tfmt.Println(\"vouts:\")\n\tfor _,x := range voutList {\n\t\tfmt.Println(x)\n\t}\n\treturn voutList\n}\n\nfunc getTransactionJson(txdetails string) (map[string]interface{}){\n\ttxdetailsbytes := []byte(txdetails)\n\n\tvar f interface{}\n\t_ = json.Unmarshal(txdetailsbytes, &f)\n\tm := f.(map[string]interface{})\n\treturn m\n}\n\nfunc getTransactionDetails(txhash string) (string){\n\t\/\/ command = pointctl getrawtransaction d2011b19dea6e98ec8bf78bd224856e76b6a9c460bbb347e49adb3dcf457e548\n\tcmd := exec.Command(\"pointctl\", \"getrawtransaction\", txhash)\n\n\tvar out bytes.Buffer\n cmd.Stdout = &out\n err := cmd.Run()\n if err != nil {\n \tlog.Fatal(err)\n }\n \/\/ fmt.Printf(\"result: %s\\n\", out)\n \/\/ fmt.Println(out.String())\n\n\n\tcmd2 := exec.Command(\"xargs\", \"pointctl\", \"decoderawtransaction\")\n\tcmd2.Stdin = strings.NewReader(out.String())\n\tvar out2 bytes.Buffer\n cmd2.Stdout = &out2\n err2 := cmd2.Run()\n if err2 != nil {\n \tlog.Fatal(err2)\n }\n\t\n return out2.String()\n \/\/ fmt.Println(out2.String())\n}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n<commit_msg>trying again<commit_after>package main\n\nimport (\n\t\/\/ \"encoding\/hex\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"bytes\"\n\t\/\/ \"time\"\n\t\/\/ \"math\/big\"\n\t\/\/ \"github.com\/PointCoin\/btcutil\"\n\t\/\/ \"github.com\/PointCoin\/btcwire\"\n\t\/\/ \"github.com\/PointCoin\/btcrpcclient\"\n\t\/\/ \"github.com\/PointCoin\/btcjson\"\n\t\"strconv\"\n\t\"strings\"\n\t\"encoding\/json\"\n\t\/\/ \"regexp\"\n\t\/\/ \"math\/rand\"\n\t\"log\"\n)\n\n\nfunc main() {\n\tprint := fmt.Println\n\ts := \"\\\"Hello\\\"\"\n\tfmt.Println(s)\n\n\to,_ := strconv.Unquote(s)\n\tfmt.Println(o)\n\t\/\/ address: Prxy397nCyskwHwmiv3TaFG6ZgZ88Cbnju\n\t\/\/ command = pointctl getrawtransaction c1de1be883834d733d096b3e14674978459f111f90d9dfbc5a82c9fa20db60a7\n\t\n\ttxid := \"c1de1be883834d733d096b3e14674978459f111f90d9dfbc5a82c9fa20db60a7\"\n\ttxdetails := getTransactionDetails(txid)\n\t\/\/ m := getTransactionJson(txdetails)\n\ttxdetailsbytes := []byte(txdetails)\n\n\tvar f interface{}\n\t_ = json.Unmarshal(txdetailsbytes, &f)\n\tm := f.(map[string]interface{})\n\ttxidreturned := m[\"txid\"]\n\tprint(\"\\n\\ngot txid\", txidreturned)\n\n\tvinList := getVinList(m)\n\tvoutList := getVoutList(m)\n\n\t_ , _ = vinList,voutList\n\n\n\n\t\/\/ Start with transaction\n\n\t\/\/ See input addresses of transaction as well as amounts\n\t\/\/ For each vin, going to have to \n\tfmt.Println(\"Outputs\")\n\tfor i, x := range vinList {\n\t\ttx := getTransactionDetails(x.txid)\n\t\ttxjs := getTransactionJson(tx)\n\t\ttxvouts := getVoutList(txjs)\n\t\tfor _, y := range txvouts {\n\n\t\t\tif y.n == x.vout {\n\t\t\t\tfmt.Println(\"\\t[\",i,\"]\",y.addresses[0],y.value)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\t\t\/\/ 1) Get tx, \n\t\t\/\/ 2) Get 'n'th output\n\t\t\/\/ 3) Get address and amount of that output\n\n\n\t\/\/ See output addresses as well as amounts\n\t\/\/ For each vout\n\t\t\/\/ 1)Print address and amount\n\n\n\n}\n\n\ntype vin struct {\n txid string\n vout int\n}\n\ntype vout struct {\n\tvalue float64\n\tn int\n\taddresses []string\n}\n\nfunc getVinList(m map[string]interface{}) ([]vin) {\n\tvinList := make([]vin,0)\n\tvinJsonList := m[\"vin\"]\n\n\tswitch vv := vinJsonList.(type) {\n\tcase []interface{}:\n\t\tfor _, u := range vv {\n\t\t\tj := u.(map[string]interface{})\n\t\t\tvinTxid := j[\"txid\"].(string)\n\t\t\tvinVout := int(j[\"vout\"].(float64))\n\t\t\tnewVin := vin{txid: vinTxid, vout: vinVout}\n\t\t\tvinList = append(vinList, newVin)\n \/\/ fmt.Println(i, u)\n }\n\t\t\/\/ print(\"yes matches\")\n\tdefault:\n\t\tprint(\"nope getVinList didn't work\")\n\t}\n\n\tfmt.Println(\"vins:\")\n\tfor _,x := range vinList {\n\t\tfmt.Println(x)\n\t}\n\treturn vinList\n\n}\n\nfunc getVoutList(m map[string] interface{}) ([]vout) {\n\tvoutList := make([]vout,0)\n\tvoutJsonList := m[\"vout\"]\n\n\tswitch oo := voutJsonList.(type) {\n\tcase []interface{}:\n\t\tfor _,u := range oo {\n\t\t\tj := u.(map[string]interface{})\n\t\t\tvoutVal := j[\"value\"].(float64)\n\t\t\tvoutN := int(j[\"n\"].(float64))\n\n\t\t\tvScriptPubKey := j[\"scriptPubKey\"].(map[string]interface{})\n\t\t\tvAddresses := vScriptPubKey[\"addresses\"].([]interface{})\n\t\t\tvAddressesStrings := make([]string, 0)\n\t\t\tfor _,u := range vAddresses {\n\t\t\t\taddr := u.(string)\n\t\t\t\tvAddressesStrings = append(vAddressesStrings, addr)\n\t\t\t}\n\n\t\t\tnewVout := vout{value: voutVal, n: voutN, addresses: vAddressesStrings}\n\t\t\tvoutList = append(voutList, newVout)\n\t\t}\n\t}\n\n\tfmt.Println(\"vouts:\")\n\tfor _,x := range voutList {\n\t\tfmt.Println(x)\n\t}\n\treturn voutList\n}\n\nfunc getTransactionJson(txdetails string) (map[string]interface{}){\n\ttxdetailsbytes := []byte(txdetails)\n\n\tvar f interface{}\n\t_ = json.Unmarshal(txdetailsbytes, &f)\n\tm := f.(map[string]interface{})\n\treturn m\n}\n\nfunc getTransactionDetails(txhash string) (string){\n\t\/\/ command = pointctl getrawtransaction d2011b19dea6e98ec8bf78bd224856e76b6a9c460bbb347e49adb3dcf457e548\n\tcmd := exec.Command(\"pointctl\", \"getrawtransaction\", txhash)\n\n\tvar out bytes.Buffer\n cmd.Stdout = &out\n err := cmd.Run()\n if err != nil {\n \tlog.Fatal(err)\n }\n \/\/ fmt.Printf(\"result: %s\\n\", out)\n \/\/ fmt.Println(out.String())\n\n\n\tcmd2 := exec.Command(\"xargs\", \"pointctl\", \"decoderawtransaction\")\n\tcmd2.Stdin = strings.NewReader(out.String())\n\tvar out2 bytes.Buffer\n cmd2.Stdout = &out2\n err2 := cmd2.Run()\n if err2 != nil {\n \tlog.Fatal(err2)\n }\n\t\n return out2.String()\n \/\/ fmt.Println(out2.String())\n}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/bigroom\/vision\/models\"\n\t\"github.com\/bigroom\/zombies\"\n\t\"github.com\/dgrijalva\/jwt-go\"\n\t\"github.com\/gorilla\/websocket\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nfunc messageLoop() {\n\tfor {\n\t\tlog.Info(\"Waiting for message...\")\n\n\t\tm := <-messages\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"message\": m.Content,\n\t\t\t\"channel_key\": m.Key(),\n\t\t}).Info(\"Dispatching message to user\")\n\n\t\tfor _, client := range clients[m.Key()] {\n\t\t\terr := client.c.WriteJSON(response{\n\t\t\t\tName: \"MESSAGE\",\n\t\t\t\tContents: m,\n\t\t\t})\n\n\t\t\tif err != nil {\n\t\t\t\tsentry.CaptureError(err, nil)\n\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"error\": err,\n\t\t\t\t}).Info(\"Couldnt send error\")\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar upgrader = websocket.Upgrader{\n\tCheckOrigin: func(r *http.Request) bool { return true },\n}\n\nfunc dispatchHandler(w http.ResponseWriter, r *http.Request, t *jwt.Token) {\n\tu, err := models.FetchUser(\"id\", t.Claims[\"id\"])\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"id\": t.Claims[\"id\"],\n\t\t}).Info(\"Could not get user with ID\")\n\t\treturn\n\t}\n\n\tc, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tsentry.CaptureError(err, nil)\n\t\treturn\n\t}\n\n\tdefer c.Close()\n\n\tserver := r.FormValue(\"server\")\n\tif server == \"\" {\n\t\tserver = \"chat.freenode.net:6667\"\n\t}\n\n\tresp, err := pool.Tell(\"exists\", u.ID)\n\tif err != nil {\n\t\tsentry.CaptureError(err, nil)\n\t\treturn\n\t}\n\n\tif !resp.MustBool() {\n\t\tlog.Info(\"Creating zombie\")\n\t\tadd := zombies.Add{\n\t\t\tID: u.ID,\n\t\t\tNick: u.Username,\n\t\t\tServer: server,\n\t\t}\n\n\t\t_, err := pool.Tell(\"add\", add)\n\t\tif err != nil {\n\t\t\tlog.Error(\"error creating:\", err)\n\t\t\tsentry.CaptureError(err, nil)\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor {\n\t\tvar a action\n\t\terr := c.ReadJSON(&a)\n\t\tif err != nil {\n\t\t\tlog.Warn(\"Closing connection. Error reading:\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif a.Name == \"SET\" {\n\t\t\terr = handleJoin(a, u, c)\n\t\t} else if a.Name == \"SEND\" {\n\t\t\terr = handleSend(a, u, c)\n\t\t} else if a.Name == \"CHANNELS\" {\n\t\t\terr = handleChannels(a, u, c)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tsentry.CaptureError(err, nil)\n\t\t}\n\t}\n}\n\nfunc handleJoin(a action, u models.User, c *websocket.Conn) error {\n\tlog.WithFields(log.Fields{\n\t\t\"channel_key\": a.Message,\n\t}).Info(\"Adding user to chanel\")\n\n\t_, err := pool.Tell(\"join\", zombies.Join{\n\t\tID: u.ID,\n\t\tChannel: a.Message,\n\t})\n\n\tif err != nil {\n\t\tlog.Warn(\"Closing connection. Error joining chanel:\", err)\n\t\treturn err\n\t}\n\n\tclients[a.Message] = append(clients[a.Message], &conn{\n\t\tc: c,\n\t\tid: u.ID,\n\t})\n\n\treturn nil\n}\n\nfunc handleSend(a action, u models.User, c *websocket.Conn) error {\n\tlog.WithFields(log.Fields{\n\t\t\"message\": a.Message,\n\t\t\"channel_key\": a.Channel,\n\t}).Info(\"Going to send message to IRC\")\n\n\t_, err := pool.Tell(\"send\", zombies.Send{\n\t\tID: u.ID,\n\t\tChannel: a.Channel,\n\t\tMessage: a.Message,\n\t})\n\n\tif err != nil {\n\t\tlog.Info(\"Closing connection. Error sending message:\", err)\n\t\treturn err\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"channel_key\": a.Channel,\n\t\t\"message\": a.Message,\n\t}).Info(\"Sent message\")\n\n\treturn nil\n}\n\nfunc handleChannels(a action, u models.User, c *websocket.Conn) error {\n\tlog.Info(\"Sending channels to user\")\n\tresp, err := pool.Tell(\"channels\", u.ID)\n\tif err != nil {\n\t\tlog.Warn(\"Closing connection. Could not connect to kite: \", err)\n\t\treturn err\n\t}\n\n\tvar channels zombies.Channels\n\tresp.MustUnmarshal(&channels)\n\n\tfor _, channel := range channels.Channels {\n\t\tkey := a.Message + \"\/\" + channel\n\t\tlog.Info(\"Joining channel\", key)\n\t\tclients[key] = append(clients[key], &conn{\n\t\t\tc: c,\n\t\t\tid: u.ID,\n\t\t})\n\t}\n\n\terr = c.WriteJSON(response{\n\t\tContents: channels.Channels,\n\t\tName: \"CHANNELS\",\n\t})\n\n\tif err != nil {\n\t\tlog.Warn(\"Could not write JSON\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\ntype action struct {\n\tName string `json:\"name\"`\n\tMessage string `json:\"message\"`\n\tChannel string `json:\"channel\"`\n}\n\ntype conn struct {\n\tid int64\n\tc *websocket.Conn\n}\n\ntype response struct {\n\tName string `json:\"name\"`\n\tContents interface{} `json:\"contents\"`\n}\n<commit_msg>Fix duplicate message sending<commit_after>package main\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/bigroom\/vision\/models\"\n\t\"github.com\/bigroom\/zombies\"\n\t\"github.com\/dgrijalva\/jwt-go\"\n\t\"github.com\/gorilla\/websocket\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nfunc messageLoop() {\n\tfor {\n\t\tlog.Info(\"Waiting for message...\")\n\n\t\tm := <-messages\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"message\": m.Content,\n\t\t\t\"channel_key\": m.Key(),\n\t\t}).Info(\"Dispatching message to user\")\n\n\t\tfor _, client := range clients[m.Key()] {\n\t\t\terr := client.c.WriteJSON(response{\n\t\t\t\tName: \"MESSAGE\",\n\t\t\t\tContents: m,\n\t\t\t})\n\n\t\t\tif err != nil {\n\t\t\t\tsentry.CaptureError(err, nil)\n\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"error\": err,\n\t\t\t\t}).Info(\"Couldnt send error\")\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar upgrader = websocket.Upgrader{\n\tCheckOrigin: func(r *http.Request) bool { return true },\n}\n\nfunc dispatchHandler(w http.ResponseWriter, r *http.Request, t *jwt.Token) {\n\tu, err := models.FetchUser(\"id\", t.Claims[\"id\"])\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"id\": t.Claims[\"id\"],\n\t\t}).Info(\"Could not get user with ID\")\n\t\treturn\n\t}\n\n\tc, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tsentry.CaptureError(err, nil)\n\t\treturn\n\t}\n\n\tdefer c.Close()\n\n\tserver := r.FormValue(\"server\")\n\tif server == \"\" {\n\t\tserver = \"chat.freenode.net:6667\"\n\t}\n\n\tresp, err := pool.Tell(\"exists\", u.ID)\n\tif err != nil {\n\t\tsentry.CaptureError(err, nil)\n\t\treturn\n\t}\n\n\tif !resp.MustBool() {\n\t\tlog.Info(\"Creating zombie\")\n\t\tadd := zombies.Add{\n\t\t\tID: u.ID,\n\t\t\tNick: u.Username,\n\t\t\tServer: server,\n\t\t}\n\n\t\t_, err := pool.Tell(\"add\", add)\n\t\tif err != nil {\n\t\t\tlog.Error(\"error creating:\", err)\n\t\t\tsentry.CaptureError(err, nil)\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor {\n\t\tvar a action\n\t\terr := c.ReadJSON(&a)\n\t\tif err != nil {\n\t\t\tlog.Warn(\"Closing connection. Error reading:\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif a.Name == \"SET\" {\n\t\t\terr = handleJoin(a, u, c)\n\t\t} else if a.Name == \"SEND\" {\n\t\t\terr = handleSend(a, u, c)\n\t\t} else if a.Name == \"CHANNELS\" {\n\t\t\terr = handleChannels(a, u, c)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tsentry.CaptureError(err, nil)\n\t\t}\n\t}\n}\n\nfunc handleJoin(a action, u models.User, c *websocket.Conn) error {\n\tlog.WithFields(log.Fields{\n\t\t\"channel_key\": a.Message,\n\t}).Info(\"Adding user to chanel\")\n\n\t_, err := pool.Tell(\"join\", zombies.Join{\n\t\tID: u.ID,\n\t\tChannel: a.Message,\n\t})\n\n\tif err != nil {\n\t\tlog.Warn(\"Closing connection. Error joining chanel:\", err)\n\t\treturn err\n\t}\n\n\tclients[a.Message] = append(clients[a.Message], &conn{\n\t\tc: c,\n\t\tid: u.ID,\n\t})\n\n\treturn nil\n}\n\nfunc handleSend(a action, u models.User, c *websocket.Conn) error {\n\tlog.WithFields(log.Fields{\n\t\t\"message\": a.Message,\n\t\t\"channel_key\": a.Channel,\n\t}).Info(\"Going to send message to IRC\")\n\n\t_, err := pool.Tell(\"send\", zombies.Send{\n\t\tID: u.ID,\n\t\tChannel: a.Channel,\n\t\tMessage: a.Message,\n\t})\n\n\tif err != nil {\n\t\tlog.Info(\"Closing connection. Error sending message:\", err)\n\t\treturn err\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"channel_key\": a.Channel,\n\t\t\"message\": a.Message,\n\t}).Info(\"Sent message\")\n\n\treturn nil\n}\n\nfunc handleChannels(a action, u models.User, c *websocket.Conn) error {\n\tlog.Info(\"Sending channels to user\")\n\tresp, err := pool.Tell(\"channels\", u.ID)\n\tif err != nil {\n\t\tlog.Warn(\"Closing connection. Could not connect to kite: \", err)\n\t\treturn err\n\t}\n\n\tvar channels zombies.Channels\n\tresp.MustUnmarshal(&channels)\n\n\tfor _, channel := range channels.Channels {\n\t\tkey := a.Message + \"\/\" + channel\n\t\tlog.Info(\"Joining channel\", key)\n\n\t\tadd := true\n\t\tfor _, client := range clients[key] {\n\t\t\tif client.c == c {\n\t\t\t\tadd = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif add {\n\t\t\tclients[key] = append(clients[key], &conn{\n\t\t\t\tc: c,\n\t\t\t\tid: u.ID,\n\t\t\t})\n\t\t}\n\t}\n\n\terr = c.WriteJSON(response{\n\t\tContents: channels.Channels,\n\t\tName: \"CHANNELS\",\n\t})\n\n\tif err != nil {\n\t\tlog.Warn(\"Could not write JSON\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\ntype action struct {\n\tName string `json:\"name\"`\n\tMessage string `json:\"message\"`\n\tChannel string `json:\"channel\"`\n}\n\ntype conn struct {\n\tid int64\n\tc *websocket.Conn\n}\n\ntype response struct {\n\tName string `json:\"name\"`\n\tContents interface{} `json:\"contents\"`\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016 Timo Savola. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage run\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\n\t\"github.com\/tsavola\/wag\/sections\"\n\t\"github.com\/tsavola\/wag\/types\"\n)\n\ntype callSite struct {\n\tindex uint64\n\tstackOffset int\n}\n\nfunc findCaller(funcMap []byte, retAddr uint32) (num int, initial, ok bool) {\n\tcount := len(funcMap) \/ 4\n\tif count == 0 {\n\t\treturn\n\t}\n\n\tfirstFuncAddr := endian.Uint32(funcMap[:4])\n\tif retAddr > 0 && retAddr < firstFuncAddr {\n\t\tinitial = true\n\t\tok = true\n\t\treturn\n\t}\n\n\tnum = sort.Search(count, func(i int) bool {\n\t\ti++\n\t\tif i == count {\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn retAddr <= endian.Uint32(funcMap[i*4:(i+1)*4])\n\t\t}\n\t})\n\n\tif num < count {\n\t\tok = true\n\t}\n\treturn\n}\n\nfunc getCallSites(callMap []byte) (callSites map[int]callSite) {\n\tcallSites = make(map[int]callSite)\n\n\tfor i := 0; len(callMap) > 0; i++ {\n\t\tentry := endian.Uint64(callMap[:8])\n\t\tcallMap = callMap[8:]\n\n\t\taddr := int(uint32(entry))\n\t\tstackOffset := int(entry >> 32)\n\n\t\tcallSites[addr] = callSite{uint64(i), stackOffset}\n\t}\n\n\treturn\n}\n\nfunc writeStacktraceTo(w io.Writer, textAddr uint64, stack, funcMap, callMap []byte, funcSigs []types.Function, ns *sections.NameSection,\n) (err error) {\n\tstack = stack[signalStackReserve:]\n\n\tunused := endian.Uint64(stack)\n\tif unused == 0 {\n\t\terr = errors.New(\"no stack\")\n\t\treturn\n\t}\n\tif unused > uint64(len(stack)) || (unused&7) != 0 {\n\t\terr = errors.New(\"corrupted stack\")\n\t\treturn\n\t}\n\tstack = stack[unused:]\n\n\tcallSites := getCallSites(callMap)\n\n\tdepth := 1\n\n\tfor ; len(stack) > 0; depth++ {\n\t\tabsoluteRetAddr := endian.Uint64(stack[:8])\n\n\t\tretAddr := absoluteRetAddr - textAddr\n\t\tif retAddr > 0x7ffffffe {\n\t\t\tfmt.Fprintf(w, \"#%d <absolute return address 0x%x is not in text section>\\n\", depth, absoluteRetAddr)\n\t\t\treturn\n\t\t}\n\n\t\tfuncNum, start, ok := findCaller(funcMap, uint32(retAddr))\n\t\tif !ok {\n\t\t\tfmt.Fprintf(w, \"#%d <function not found for return address 0x%x>\\n\", depth, retAddr)\n\t\t\treturn\n\t\t}\n\n\t\tsite, found := callSites[int(retAddr)]\n\t\tif !found {\n\t\t\tfmt.Fprintf(w, \"#%d <unknown return address 0x%x>\\n\", depth, retAddr)\n\t\t\treturn\n\t\t}\n\n\t\tif start {\n\t\t\tif site.stackOffset != 0 {\n\t\t\t\tfmt.Fprintf(w, \"#%d <start function call site stack offset is not zero>\\n\", depth)\n\t\t\t}\n\t\t\tif len(stack) != 8 {\n\t\t\t\tfmt.Fprintf(w, \"#%d <start function return address is not stored at start of stack>\\n\", depth)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tif site.stackOffset < 8 || (site.stackOffset&7) != 0 {\n\t\t\tfmt.Fprintf(w, \"#%d <invalid stack offset %d>\\n\", depth, site.stackOffset)\n\t\t\treturn\n\t\t}\n\n\t\tstack = stack[site.stackOffset:]\n\n\t\tvar name string\n\t\tvar localNames []string\n\n\t\tif ns != nil && funcNum < len(ns.FunctionNames) {\n\t\t\tname = ns.FunctionNames[funcNum].FunName\n\t\t\tlocalNames = ns.FunctionNames[funcNum].LocalNames\n\t\t} else {\n\t\t\tname = fmt.Sprintf(\"func-%d\", funcNum)\n\t\t}\n\n\t\tvar sigStr string\n\n\t\tif funcNum < len(funcSigs) {\n\t\t\tsigStr = funcSigs[funcNum].StringWithNames(localNames)\n\t\t}\n\n\t\tfmt.Fprintf(w, \"#%d %s%s\\n\", depth, name, sigStr)\n\t}\n\n\tif len(stack) != 0 {\n\t\tfmt.Fprintf(w, \"#%d <%d bytes of untraced stack>\\n\", depth, len(stack))\n\t}\n\treturn\n}\n<commit_msg>run: demangle C++ symbols in stacktrace<commit_after>\/\/ Copyright (c) 2016 Timo Savola. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage run\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\n\t\"github.com\/ianlancetaylor\/demangle\"\n\t\"github.com\/tsavola\/wag\/sections\"\n\t\"github.com\/tsavola\/wag\/types\"\n)\n\ntype callSite struct {\n\tindex uint64\n\tstackOffset int\n}\n\nfunc findCaller(funcMap []byte, retAddr uint32) (num int, initial, ok bool) {\n\tcount := len(funcMap) \/ 4\n\tif count == 0 {\n\t\treturn\n\t}\n\n\tfirstFuncAddr := endian.Uint32(funcMap[:4])\n\tif retAddr > 0 && retAddr < firstFuncAddr {\n\t\tinitial = true\n\t\tok = true\n\t\treturn\n\t}\n\n\tnum = sort.Search(count, func(i int) bool {\n\t\ti++\n\t\tif i == count {\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn retAddr <= endian.Uint32(funcMap[i*4:(i+1)*4])\n\t\t}\n\t})\n\n\tif num < count {\n\t\tok = true\n\t}\n\treturn\n}\n\nfunc getCallSites(callMap []byte) (callSites map[int]callSite) {\n\tcallSites = make(map[int]callSite)\n\n\tfor i := 0; len(callMap) > 0; i++ {\n\t\tentry := endian.Uint64(callMap[:8])\n\t\tcallMap = callMap[8:]\n\n\t\taddr := int(uint32(entry))\n\t\tstackOffset := int(entry >> 32)\n\n\t\tcallSites[addr] = callSite{uint64(i), stackOffset}\n\t}\n\n\treturn\n}\n\nfunc writeStacktraceTo(w io.Writer, textAddr uint64, stack, funcMap, callMap []byte, funcSigs []types.Function, ns *sections.NameSection,\n) (err error) {\n\tstack = stack[signalStackReserve:]\n\n\tunused := endian.Uint64(stack)\n\tif unused == 0 {\n\t\terr = errors.New(\"no stack\")\n\t\treturn\n\t}\n\tif unused > uint64(len(stack)) || (unused&7) != 0 {\n\t\terr = errors.New(\"corrupted stack\")\n\t\treturn\n\t}\n\tstack = stack[unused:]\n\n\tcallSites := getCallSites(callMap)\n\n\tdepth := 1\n\n\tfor ; len(stack) > 0; depth++ {\n\t\tabsoluteRetAddr := endian.Uint64(stack[:8])\n\n\t\tretAddr := absoluteRetAddr - textAddr\n\t\tif retAddr > 0x7ffffffe {\n\t\t\tfmt.Fprintf(w, \"#%d <absolute return address 0x%x is not in text section>\\n\", depth, absoluteRetAddr)\n\t\t\treturn\n\t\t}\n\n\t\tfuncNum, start, ok := findCaller(funcMap, uint32(retAddr))\n\t\tif !ok {\n\t\t\tfmt.Fprintf(w, \"#%d <function not found for return address 0x%x>\\n\", depth, retAddr)\n\t\t\treturn\n\t\t}\n\n\t\tsite, found := callSites[int(retAddr)]\n\t\tif !found {\n\t\t\tfmt.Fprintf(w, \"#%d <unknown return address 0x%x>\\n\", depth, retAddr)\n\t\t\treturn\n\t\t}\n\n\t\tif start {\n\t\t\tif site.stackOffset != 0 {\n\t\t\t\tfmt.Fprintf(w, \"#%d <start function call site stack offset is not zero>\\n\", depth)\n\t\t\t}\n\t\t\tif len(stack) != 8 {\n\t\t\t\tfmt.Fprintf(w, \"#%d <start function return address is not stored at start of stack>\\n\", depth)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tif site.stackOffset < 8 || (site.stackOffset&7) != 0 {\n\t\t\tfmt.Fprintf(w, \"#%d <invalid stack offset %d>\\n\", depth, site.stackOffset)\n\t\t\treturn\n\t\t}\n\n\t\tstack = stack[site.stackOffset:]\n\n\t\tvar name string\n\t\tvar localNames []string\n\n\t\tif ns != nil && funcNum < len(ns.FunctionNames) {\n\t\t\tname = ns.FunctionNames[funcNum].FunName\n\t\t\tlocalNames = ns.FunctionNames[funcNum].LocalNames\n\t\t} else {\n\t\t\tname = fmt.Sprintf(\"func-%d\", funcNum)\n\t\t}\n\n\t\tvar sigStr string\n\n\t\tif funcNum < len(funcSigs) {\n\t\t\tsigStr = funcSigs[funcNum].StringWithNames(localNames)\n\t\t}\n\n\t\tprettyName, err := demangle.ToString(name)\n\t\tif err != nil {\n\t\t\tprettyName = name\n\t\t}\n\n\t\tfmt.Fprintf(w, \"#%d %s%s\\n\", depth, prettyName, sigStr)\n\t}\n\n\tif len(stack) != 0 {\n\t\tfmt.Fprintf(w, \"#%d <%d bytes of untraced stack>\\n\", depth, len(stack))\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 IBM Corporation\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage commands\n\nimport (\n\t\"strings\"\n\n\t\"fmt\"\n\n\t\"github.com\/amalgam8\/amalgam8\/cli\/common\"\n\t\"github.com\/amalgam8\/amalgam8\/cli\/terminal\"\n\t\"github.com\/amalgam8\/amalgam8\/cli\/utils\"\n\tctrl \"github.com\/amalgam8\/amalgam8\/controller\/client\"\n\t\"github.com\/amalgam8\/amalgam8\/pkg\/api\"\n\treg \"github.com\/amalgam8\/amalgam8\/registry\/client\"\n\t\"github.com\/urfave\/cli\"\n)\n\n\/\/ TrafficStartCommand is used for the route-list command.\ntype TrafficStartCommand struct {\n\tctx *cli.Context\n\tregistry *reg.Client\n\tcontroller *ctrl.Client\n\tterm terminal.UI\n}\n\n\/\/ NewTrafficStartCommand constructs a new TrafficStart.\nfunc NewTrafficStartCommand(term terminal.UI) (cmd *TrafficStartCommand) {\n\treturn &TrafficStartCommand{\n\t\tterm: term,\n\t}\n}\n\n\/\/ GetMetadata returns the metadata.\nfunc (cmd *TrafficStartCommand) GetMetadata() cli.Command {\n\tT := utils.Language(common.DefaultLanguage)\n\treturn cli.Command{\n\t\tName: \"traffic-start\",\n\t\tDescription: T(\"traffic_start_description\"),\n\t\tUsage: T(\"traffic_start_usage\"),\n\t\t\/\/ TODO: Complete UsageText\n\t\tUsageText: T(\"traffic_start_usage\"),\n\t\tFlags: []cli.Flag{\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"service, s\",\n\t\t\t\tUsage: T(\"traffic_start_service_usage\"),\n\t\t\t\tValue: \"\",\n\t\t\t},\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"version, v\",\n\t\t\t\tUsage: T(\"traffic_start_version_usage\"),\n\t\t\t\tValue: \"\",\n\t\t\t},\n\t\t\tcli.IntFlag{\n\t\t\t\tName: \"amount, a\",\n\t\t\t\tUsage: T(\"traffic_start_amount_usage\"),\n\t\t\t},\n\t\t},\n\t\tBefore: cmd.Before,\n\t\tOnUsageError: cmd.OnUsageError,\n\t\tAction: cmd.Action,\n\t}\n}\n\n\/\/ Before runs before the Action\n\/\/ https:\/\/godoc.org\/github.com\/urfave\/cli#BeforeFunc\nfunc (cmd *TrafficStartCommand) Before(ctx *cli.Context) error {\n\t\/\/ Update the context\n\tcmd.ctx = ctx\n\treturn nil\n}\n\n\/\/ OnUsageError is executed if an usage error occurs.\nfunc (cmd *TrafficStartCommand) OnUsageError(ctx *cli.Context, err error, isSubcommand bool) error {\n\tcli.ShowCommandHelp(ctx, cmd.GetMetadata().FullName())\n\treturn nil\n}\n\n\/\/ Action runs when no subcommands are specified\n\/\/ https:\/\/godoc.org\/github.com\/urfave\/cli#ActionFunc\nfunc (cmd *TrafficStartCommand) Action(ctx *cli.Context) error {\n\tregistry, err := NewRegistry(ctx)\n\tif err != nil {\n\t\t\/\/ Exit if the registry returned an error\n\t\treturn nil\n\t}\n\t\/\/ Update the registry\n\tcmd.registry = registry\n\n\tcontroller, err := NewController(ctx)\n\tif err != nil {\n\t\t\/\/ Exit if the controller returned an error\n\t\treturn nil\n\t}\n\t\/\/ Update the controller\n\tcmd.controller = controller\n\n\tif ctx.IsSet(\"service\") && ctx.IsSet(\"version\") {\n\t\tif ctx.Int(\"amount\") < 0 || ctx.Int(\"amount\") > 100 {\n\t\t\tfmt.Fprintf(ctx.App.Writer, \"%s\\n\\n\", common.ErrIncorrectAmountRange.Error())\n\t\t\treturn nil\n\t\t}\n\n\t\treturn cmd.StartTraffic(ctx.String(\"service\"), ctx.String(\"version\"), ctx.Int(\"amount\"))\n\n\t}\n\n\tif ctx.NArg() > 0 {\n\t\tcli.ShowCommandHelp(ctx, cmd.GetMetadata().FullName())\n\t\treturn nil\n\t}\n\n\treturn cmd.DefaultAction(ctx)\n}\n\n\/\/ StartTraffic .\nfunc (cmd *TrafficStartCommand) StartTraffic(serviceName string, version string, amount int) error {\n\n\tfilter := &api.RuleFilter{\n\t\tDestinations: []string{serviceName},\n\t}\n\troutes, err := cmd.controller.ListRoutes(filter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\troutingRules := routes.Services[serviceName]\n\tif len(routingRules) == 0 {\n\t\tfmt.Fprintf(cmd.ctx.App.Writer, \"%s: %q\\n\\n\", common.ErrNotRulesFoundForService.Error(), serviceName)\n\t\treturn nil\n\t}\n\n\tfor _, rule := range routingRules {\n\t\tif len(rule.Route.Backends) > 1 {\n\t\t\tfmt.Fprintf(cmd.ctx.App.Writer, \"%s: service %q traffic is already being split\\n\\n\", common.ErrInvalidStateForTrafficStart.Error(), serviceName)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\trule := routingRules[0]\n\tfor _, backend := range rule.Route.Backends {\n\t\tif backend.Weight != 0 {\n\t\t\tfmt.Fprintf(cmd.ctx.App.Writer, \"%s: service %q traffic is already being split\\n\\n\", common.ErrInvalidStateForTrafficStart.Error(), serviceName)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tdefaultVersion := strings.Join(rule.Route.Backends[0].Tags, \", \")\n\n\tinstances, err := cmd.registry.ListServiceInstances(serviceName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !cmd.IsServiceActive(serviceName, defaultVersion, instances) {\n\t\tfmt.Fprintf(cmd.ctx.App.Writer, \"%s: service %q is not currently receiving traffic\\n\\n\", common.ErrInvalidStateForTrafficStart.Error(), serviceName)\n\t\treturn nil\n\t}\n\n\tif !cmd.IsServiceActive(serviceName, version, instances) {\n\t\tfmt.Fprintf(cmd.ctx.App.Writer, \"%s: service %q does not have active instances of version %q\\n\\n\", common.ErrInvalidStateForTrafficStart.Error(), serviceName, version)\n\t\treturn nil\n\t}\n\n\tif amount == 100 {\n\t\trule.Route.Backends[0].Tags = []string{version}\n\t} else {\n\t\tif amount == 0 {\n\t\t\tamount += 10\n\t\t}\n\n\t\trule.Route.Backends = append([]api.Backend{api.Backend{\n\t\t\tTags: []string{version},\n\t\t\tWeight: float64(amount) \/ 100,\n\t\t}}, rule.Route.Backends...)\n\t}\n\n\trulesSet := &api.RulesSet{\n\t\tRules: []api.Rule{\n\t\t\trule,\n\t\t},\n\t}\n\n\t_, err = cmd.controller.UpdateRules(rulesSet)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif amount == 100 {\n\t\tfmt.Fprintf(cmd.ctx.App.Writer, \"Transfer complete for %q: sending %d%% of traffic to %q\\n\\n\", serviceName, amount, version)\n\t\treturn nil\n\t}\n\n\tfmt.Fprintf(cmd.ctx.App.Writer, \"Transfer starting for %q: diverting %d%% of traffic from %q to %q\\n\\n\", serviceName, amount, defaultVersion, version)\n\treturn nil\n}\n\n\/\/ IsServiceActive .\nfunc (cmd *TrafficStartCommand) IsServiceActive(serviceName, version string, instances []*api.ServiceInstance) bool {\n\tfor _, instance := range instances {\n\t\tif version == strings.Join(instance.Tags, \", \") {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ DefaultAction runs the default action.\nfunc (cmd *TrafficStartCommand) DefaultAction(ctx *cli.Context) error {\n\treturn cli.ShowCommandHelp(ctx, cmd.GetMetadata().FullName())\n}\n<commit_msg>bug fix: subset matching for tags if instances has more than one<commit_after>\/\/ Copyright 2016 IBM Corporation\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage commands\n\nimport (\n\t\"strings\"\n\n\t\"fmt\"\n\n\t\"github.com\/amalgam8\/amalgam8\/cli\/common\"\n\t\"github.com\/amalgam8\/amalgam8\/cli\/terminal\"\n\t\"github.com\/amalgam8\/amalgam8\/cli\/utils\"\n\tctrl \"github.com\/amalgam8\/amalgam8\/controller\/client\"\n\t\"github.com\/amalgam8\/amalgam8\/pkg\/api\"\n\treg \"github.com\/amalgam8\/amalgam8\/registry\/client\"\n\t\"github.com\/urfave\/cli\"\n)\n\n\/\/ TrafficStartCommand is used for the route-list command.\ntype TrafficStartCommand struct {\n\tctx *cli.Context\n\tregistry *reg.Client\n\tcontroller *ctrl.Client\n\tterm terminal.UI\n}\n\n\/\/ NewTrafficStartCommand constructs a new TrafficStart.\nfunc NewTrafficStartCommand(term terminal.UI) (cmd *TrafficStartCommand) {\n\treturn &TrafficStartCommand{\n\t\tterm: term,\n\t}\n}\n\n\/\/ GetMetadata returns the metadata.\nfunc (cmd *TrafficStartCommand) GetMetadata() cli.Command {\n\tT := utils.Language(common.DefaultLanguage)\n\treturn cli.Command{\n\t\tName: \"traffic-start\",\n\t\tDescription: T(\"traffic_start_description\"),\n\t\tUsage: T(\"traffic_start_usage\"),\n\t\t\/\/ TODO: Complete UsageText\n\t\tUsageText: T(\"traffic_start_usage\"),\n\t\tFlags: []cli.Flag{\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"service, s\",\n\t\t\t\tUsage: T(\"traffic_start_service_usage\"),\n\t\t\t\tValue: \"\",\n\t\t\t},\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"version, v\",\n\t\t\t\tUsage: T(\"traffic_start_version_usage\"),\n\t\t\t\tValue: \"\",\n\t\t\t},\n\t\t\tcli.IntFlag{\n\t\t\t\tName: \"amount, a\",\n\t\t\t\tUsage: T(\"traffic_start_amount_usage\"),\n\t\t\t},\n\t\t},\n\t\tBefore: cmd.Before,\n\t\tOnUsageError: cmd.OnUsageError,\n\t\tAction: cmd.Action,\n\t}\n}\n\n\/\/ Before runs before the Action\n\/\/ https:\/\/godoc.org\/github.com\/urfave\/cli#BeforeFunc\nfunc (cmd *TrafficStartCommand) Before(ctx *cli.Context) error {\n\t\/\/ Update the context\n\tcmd.ctx = ctx\n\treturn nil\n}\n\n\/\/ OnUsageError is executed if an usage error occurs.\nfunc (cmd *TrafficStartCommand) OnUsageError(ctx *cli.Context, err error, isSubcommand bool) error {\n\tcli.ShowCommandHelp(ctx, cmd.GetMetadata().FullName())\n\treturn nil\n}\n\n\/\/ Action runs when no subcommands are specified\n\/\/ https:\/\/godoc.org\/github.com\/urfave\/cli#ActionFunc\nfunc (cmd *TrafficStartCommand) Action(ctx *cli.Context) error {\n\tregistry, err := NewRegistry(ctx)\n\tif err != nil {\n\t\t\/\/ Exit if the registry returned an error\n\t\treturn nil\n\t}\n\t\/\/ Update the registry\n\tcmd.registry = registry\n\n\tcontroller, err := NewController(ctx)\n\tif err != nil {\n\t\t\/\/ Exit if the controller returned an error\n\t\treturn nil\n\t}\n\t\/\/ Update the controller\n\tcmd.controller = controller\n\n\tif ctx.IsSet(\"service\") && ctx.IsSet(\"version\") {\n\t\tif ctx.Int(\"amount\") < 0 || ctx.Int(\"amount\") > 100 {\n\t\t\tfmt.Fprintf(ctx.App.Writer, \"%s\\n\\n\", common.ErrIncorrectAmountRange.Error())\n\t\t\treturn nil\n\t\t}\n\n\t\treturn cmd.StartTraffic(ctx.String(\"service\"), ctx.String(\"version\"), ctx.Int(\"amount\"))\n\n\t}\n\n\tif ctx.NArg() > 0 {\n\t\tcli.ShowCommandHelp(ctx, cmd.GetMetadata().FullName())\n\t\treturn nil\n\t}\n\n\treturn cmd.DefaultAction(ctx)\n}\n\n\/\/ StartTraffic .\nfunc (cmd *TrafficStartCommand) StartTraffic(serviceName string, version string, amount int) error {\n\n\tfilter := &api.RuleFilter{\n\t\tDestinations: []string{serviceName},\n\t}\n\troutes, err := cmd.controller.ListRoutes(filter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\troutingRules := routes.Services[serviceName]\n\tif len(routingRules) == 0 {\n\t\tfmt.Fprintf(cmd.ctx.App.Writer, \"%s: %q\\n\\n\", common.ErrNotRulesFoundForService.Error(), serviceName)\n\t\treturn nil\n\t}\n\n\tfor _, rule := range routingRules {\n\t\tif len(rule.Route.Backends) > 1 {\n\t\t\tfmt.Fprintf(cmd.ctx.App.Writer, \"%s: service %q traffic is already being split\\n\\n\", common.ErrInvalidStateForTrafficStart.Error(), serviceName)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\trule := routingRules[0]\n\tfor _, backend := range rule.Route.Backends {\n\t\tif backend.Weight != 0 {\n\t\t\tfmt.Fprintf(cmd.ctx.App.Writer, \"%s: service %q traffic is already being split\\n\\n\", common.ErrInvalidStateForTrafficStart.Error(), serviceName)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tdefaultVersion := strings.Join(rule.Route.Backends[0].Tags, \", \")\n\n\tinstances, err := cmd.registry.ListServiceInstances(serviceName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !cmd.IsServiceActive(serviceName, defaultVersion, instances) {\n\t\tfmt.Fprintf(cmd.ctx.App.Writer, \"%s: service %q is not currently receiving traffic\\n\\n\", common.ErrInvalidStateForTrafficStart.Error(), serviceName)\n\t\treturn nil\n\t}\n\n\tif !cmd.IsServiceActive(serviceName, version, instances) {\n\t\tfmt.Fprintf(cmd.ctx.App.Writer, \"%s: service %q does not have active instances of version %q\\n\\n\", common.ErrInvalidStateForTrafficStart.Error(), serviceName, version)\n\t\treturn nil\n\t}\n\n\tif amount == 100 {\n\t\trule.Route.Backends[0].Tags = []string{version}\n\t} else {\n\t\tif amount == 0 {\n\t\t\tamount += 10\n\t\t}\n\n\t\trule.Route.Backends = append([]api.Backend{api.Backend{\n\t\t\tTags: []string{version},\n\t\t\tWeight: float64(amount) \/ 100,\n\t\t}}, rule.Route.Backends...)\n\t}\n\n\trulesSet := &api.RulesSet{\n\t\tRules: []api.Rule{\n\t\t\trule,\n\t\t},\n\t}\n\n\t_, err = cmd.controller.UpdateRules(rulesSet)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif amount == 100 {\n\t\tfmt.Fprintf(cmd.ctx.App.Writer, \"Transfer complete for %q: sending %d%% of traffic to %q\\n\\n\", serviceName, amount, version)\n\t\treturn nil\n\t}\n\n\tfmt.Fprintf(cmd.ctx.App.Writer, \"Transfer starting for %q: diverting %d%% of traffic from %q to %q\\n\\n\", serviceName, amount, defaultVersion, version)\n\treturn nil\n}\n\n\/\/ IsServiceActive .\nfunc (cmd *TrafficStartCommand) IsServiceActive(serviceName, version string, instances []*api.ServiceInstance) bool {\n\tfor _, instance := range instances {\n\t\tfor _, tag := range instance.Tags {\n\t\t\tif version == tag {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ DefaultAction runs the default action.\nfunc (cmd *TrafficStartCommand) DefaultAction(ctx *cli.Context) error {\n\treturn cli.ShowCommandHelp(ctx, cmd.GetMetadata().FullName())\n}\n<|endoftext|>"} {"text":"<commit_before>package beat\n\nimport (\n\t\"bytes\"\n\t\"github.com\/christiangalsterer\/execbeat\/config\"\n\t\"github.com\/elastic\/beats\/libbeat\/logp\"\n\t\"github.com\/robfig\/cron\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Executor struct {\n\texecbeat *Execbeat\n\tconfig config.ExecConfig\n\tschedule string\n\tdocumentType string\n}\n\nfunc NewExecutor(execbeat *Execbeat, config config.ExecConfig) *Executor {\n\texecutor := &Executor{\n\t\texecbeat: execbeat,\n\t\tconfig: config,\n\t}\n\n\treturn executor\n}\n\nfunc (e *Executor) Run() {\n\n\t\/\/ setup default config\n\te.documentType = config.DefaultDocumentType\n\te.schedule = config.DefaultSchedule\n\n\t\/\/ setup DocumentType\n\tif e.config.DocumentType != \"\" {\n\t\te.documentType = e.config.DocumentType\n\t}\n\n\t\/\/ setup cron schedule\n\tif e.config.Schedule != \"\" {\n\t\tlogp.Debug(\"Execbeat\", \"Use schedule: [%w]\", e.config.Schedule)\n\t\te.schedule = e.config.Schedule\n\t}\n\n\tcron := cron.New()\n\tcron.AddFunc(e.schedule, func() { e.runOneTime() })\n\tcron.Start()\n}\n\nfunc (e *Executor) runOneTime() error {\n\tvar cmd *exec.Cmd\n\tvar err error\n\tvar stdout bytes.Buffer\n\tvar stderr bytes.Buffer\n\n\tcmdName := strings.TrimSpace(e.config.Command)\n\tcmdArgs := strings.TrimSpace(e.config.Args)\n\n\t\/\/ execute command\n\tlogp.Debug(\"Execbeat\", \"Executing command: [%v] with args [%w]\", cmdName, cmdArgs)\n\tnow := time.Now()\n\n\tif len(cmdArgs) > 0 {\n\t\tcmd = exec.Command(cmdName, cmdArgs)\n\t} else {\n\t\tcmd = exec.Command(cmdName)\n\t}\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\tlogp.Err(\"An error occured while executing command: %v\", err)\n\t}\n\n\terr = cmd.Wait()\n\tif err != nil {\n\t\tlogp.Err(\"An error occured while executing command: %v\", err)\n\t}\n\n\tcommandEvent := Exec{\n\t\tCommand: cmdName,\n\t\tStdOut: stdout.String(),\n\t\tStdErr: stderr.String(),\n\t}\n\n\tevent := ExecEvent{\n\t\tReadTime: now,\n\t\tDocumentType: e.documentType,\n\t\tFields: e.config.Fields,\n\t\tExec: commandEvent,\n\t}\n\n\te.execbeat.client.PublishEvent(event.ToMapStr())\n\n\treturn nil\n}\n\nfunc (e *Executor) Stop() {\n}\n<commit_msg>- Refactoring for default configuration<commit_after>package beat\n\nimport (\n\t\"bytes\"\n\t\"github.com\/christiangalsterer\/execbeat\/config\"\n\t\"github.com\/elastic\/beats\/libbeat\/logp\"\n\t\"github.com\/robfig\/cron\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Executor struct {\n\texecbeat *Execbeat\n\tconfig config.ExecConfig\n\tschedule string\n\tdocumentType string\n}\n\nfunc NewExecutor(execbeat *Execbeat, config config.ExecConfig) *Executor {\n\texecutor := &Executor{\n\t\texecbeat: execbeat,\n\t\tconfig: config,\n\t}\n\n\treturn executor\n}\n\nfunc (e *Executor) Run() {\n\n\t\/\/ setup default config\n\te.documentType = config.DefaultDocumentType\n\te.schedule = config.DefaultSchedule\n\n\t\/\/ setup document type\n\tif e.config.DocumentType != \"\" {\n\t\te.documentType = e.config.DocumentType\n\t}\n\n\t\/\/ setup cron schedule\n\tif e.config.Schedule != \"\" {\n\t\tlogp.Debug(\"Execbeat\", \"Use schedule: [%w]\", e.config.Schedule)\n\t\te.schedule = e.config.Schedule\n\t}\n\n\tcron := cron.New()\n\tcron.AddFunc(e.schedule, func() { e.runOneTime() })\n\tcron.Start()\n}\n\nfunc (e *Executor) runOneTime() error {\n\tvar cmd *exec.Cmd\n\tvar err error\n\tvar stdout bytes.Buffer\n\tvar stderr bytes.Buffer\n\n\tcmdName := strings.TrimSpace(e.config.Command)\n\tcmdArgs := strings.TrimSpace(e.config.Args)\n\n\t\/\/ execute command\n\tlogp.Debug(\"Execbeat\", \"Executing command: [%v] with args [%w]\", cmdName, cmdArgs)\n\tnow := time.Now()\n\n\tif len(cmdArgs) > 0 {\n\t\tcmd = exec.Command(cmdName, cmdArgs)\n\t} else {\n\t\tcmd = exec.Command(cmdName)\n\t}\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\tlogp.Err(\"An error occured while executing command: %v\", err)\n\t}\n\n\terr = cmd.Wait()\n\tif err != nil {\n\t\tlogp.Err(\"An error occured while executing command: %v\", err)\n\t}\n\n\tcommandEvent := Exec{\n\t\tCommand: cmdName,\n\t\tStdOut: stdout.String(),\n\t\tStdErr: stderr.String(),\n\t}\n\n\tevent := ExecEvent{\n\t\tReadTime: now,\n\t\tDocumentType: e.documentType,\n\t\tFields: e.config.Fields,\n\t\tExec: commandEvent,\n\t}\n\n\te.execbeat.client.PublishEvent(event.ToMapStr())\n\n\treturn nil\n}\n\nfunc (e *Executor) Stop() {\n}\n<|endoftext|>"} {"text":"<commit_before>package assert\n\ntype AnyTypeAssert interface {\n\tIsEqualTo(expected interface{}) AnyTypeAssert\n}\n<commit_msg>some ideas for any type assertions<commit_after>package assert\n\ntype AnyTypeAssert interface {\n\tIsEqualTo(expected interface{}) AnyTypeAssert\n\t\/\/ TODO\n\t\/\/ IsNotEqualTo(unexpected interface{}) AnyTypeAssert\n\t\/\/ IsNil() AnyTypeAssert\n\t\/\/ IsNotNil() AnyTypeAssert\n\t\/\/ AsInt() IntAssert \/\/ new assert that treats actual value as int, reports failure if cannot convert\n\t\/\/ AsString() StringAssert\n}\n<|endoftext|>"} {"text":"<commit_before>package s3\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/corehandlers\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/request\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/smira\/aptly\/aptly\"\n\t\"github.com\/smira\/aptly\/utils\"\n\t\"github.com\/smira\/go-aws-auth\"\n)\n\n\/\/ PublishedStorage abstract file system with published files (actually hosted on S3)\ntype PublishedStorage struct {\n\ts3 *s3.S3\n\tconfig *aws.Config\n\tbucket string\n\tacl string\n\tprefix string\n\tstorageClass string\n\tencryptionMethod string\n\tplusWorkaround bool\n\tdisableMultiDel bool\n\tpathCache map[string]string\n}\n\n\/\/ Check interface\nvar (\n\t_ aptly.PublishedStorage = (*PublishedStorage)(nil)\n)\n\n\/\/ NewPublishedStorageRaw creates published storage from raw aws credentials\nfunc NewPublishedStorageRaw(\n\tbucket, defaultACL, prefix, storageClass, encryptionMethod string,\n\tplusWorkaround, disabledMultiDel bool,\n\tconfig *aws.Config,\n) (*PublishedStorage, error) {\n\tif defaultACL == \"\" {\n\t\tdefaultACL = \"private\"\n\t}\n\n\tif storageClass == \"STANDARD\" {\n\t\tstorageClass = \"\"\n\t}\n\n\tsess, err := session.NewSession(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult := &PublishedStorage{\n\t\ts3: s3.New(sess),\n\t\tbucket: bucket,\n\t\tconfig: config,\n\t\tacl: defaultACL,\n\t\tprefix: prefix,\n\t\tstorageClass: storageClass,\n\t\tencryptionMethod: encryptionMethod,\n\t\tplusWorkaround: plusWorkaround,\n\t\tdisableMultiDel: disabledMultiDel,\n\t}\n\n\treturn result, nil\n}\n\n\/\/ NewPublishedStorage creates new instance of PublishedStorage with specified S3 access\n\/\/ keys, region and bucket name\nfunc NewPublishedStorage(accessKey, secretKey, sessionToken, region, endpoint, bucket, defaultACL, prefix,\n\tstorageClass, encryptionMethod string, plusWorkaround, disableMultiDel, forceSigV2, debug bool) (*PublishedStorage, error) {\n\n\tconfig := &aws.Config{\n\t\tRegion: aws.String(region),\n\t}\n\n\tif endpoint != \"\" {\n\t\tconfig = config.WithEndpoint(endpoint).WithS3ForcePathStyle(true)\n\t}\n\n\tif accessKey != \"\" {\n\t\tconfig.Credentials = credentials.NewStaticCredentials(accessKey, secretKey, sessionToken)\n\t}\n\n\tif debug {\n\t\tconfig = config.WithLogLevel(aws.LogDebug)\n\t}\n\n\tresult, err := NewPublishedStorageRaw(bucket, defaultACL, prefix, storageClass,\n\t\tencryptionMethod, plusWorkaround, disableMultiDel, config)\n\n\tif err == nil && forceSigV2 {\n\t\tcreds := []awsauth.Credentials{}\n\n\t\tif accessKey != \"\" {\n\t\t\tcreds = append(creds, awsauth.Credentials{\n\t\t\t\tAccessKeyID: accessKey,\n\t\t\t\tSecretAccessKey: secretKey,\n\t\t\t})\n\t\t}\n\n\t\tresult.s3.Handlers.Sign.Clear()\n\t\tresult.s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)\n\t\tresult.s3.Handlers.Sign.PushBack(func(req *request.Request) {\n\t\t\tawsauth.SignS3(req.HTTPRequest, creds...)\n\t\t})\n\t}\n\n\treturn result, err\n}\n\n\/\/ String\nfunc (storage *PublishedStorage) String() string {\n\treturn fmt.Sprintf(\"S3: %s:%s\/%s\", *storage.config.Region, storage.bucket, storage.prefix)\n}\n\n\/\/ MkDir creates directory recursively under public path\nfunc (storage *PublishedStorage) MkDir(path string) error {\n\t\/\/ no op for S3\n\treturn nil\n}\n\n\/\/ PutFile puts file into published storage at specified path\nfunc (storage *PublishedStorage) PutFile(path string, sourceFilename string) error {\n\tvar (\n\t\tsource *os.File\n\t\terr error\n\t)\n\tsource, err = os.Open(sourceFilename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer source.Close()\n\n\terr = storage.putFile(path, source)\n\tif err != nil {\n\t\terr = errors.Wrap(err, fmt.Sprintf(\"error uploading %s to %s\", sourceFilename, storage))\n\t}\n\n\treturn err\n}\n\n\/\/ putFile uploads file-like object to\nfunc (storage *PublishedStorage) putFile(path string, source io.ReadSeeker) error {\n\n\tparams := &s3.PutObjectInput{\n\t\tBucket: aws.String(storage.bucket),\n\t\tKey: aws.String(filepath.Join(storage.prefix, path)),\n\t\tBody: source,\n\t\tACL: aws.String(storage.acl),\n\t}\n\tif storage.storageClass != \"\" {\n\t\tparams.StorageClass = aws.String(storage.storageClass)\n\t}\n\tif storage.encryptionMethod != \"\" {\n\t\tparams.ServerSideEncryption = aws.String(storage.encryptionMethod)\n\t}\n\n\t_, err := storage.s3.PutObject(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif storage.plusWorkaround && strings.Contains(path, \"+\") {\n\t\t_, err = source.Seek(0, 0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn storage.putFile(strings.Replace(path, \"+\", \" \", -1), source)\n\t}\n\treturn nil\n}\n\n\/\/ Remove removes single file under public path\nfunc (storage *PublishedStorage) Remove(path string) error {\n\tparams := &s3.DeleteObjectInput{\n\t\tBucket: aws.String(storage.bucket),\n\t\tKey: aws.String(filepath.Join(storage.prefix, path)),\n\t}\n\t_, err := storage.s3.DeleteObject(params)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"error deleting %s from %s\", path, storage))\n\t}\n\n\tif storage.plusWorkaround && strings.Contains(path, \"+\") {\n\t\t\/\/ try to remove workaround version, but don't care about result\n\t\t_ = storage.Remove(strings.Replace(path, \"+\", \" \", -1))\n\t}\n\treturn nil\n}\n\n\/\/ RemoveDirs removes directory structure under public path\nfunc (storage *PublishedStorage) RemoveDirs(path string, progress aptly.Progress) error {\n\tconst page = 1000\n\n\tfilelist, _, err := storage.internalFilelist(path, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif storage.disableMultiDel {\n\t\tfor i := range filelist {\n\t\t\tparams := &s3.DeleteObjectInput{\n\t\t\t\tBucket: aws.String(storage.bucket),\n\t\t\t\tKey: aws.String(filepath.Join(storage.prefix, path, filelist[i])),\n\t\t\t}\n\t\t\t_, err := storage.s3.DeleteObject(params)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error deleting path %s from %s: %s\", filelist[i], storage, err)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tnumParts := (len(filelist) + page - 1) \/ page\n\n\t\tfor i := 0; i < numParts; i++ {\n\t\t\tvar part []string\n\t\t\tif i == numParts-1 {\n\t\t\t\tpart = filelist[i*page:]\n\t\t\t} else {\n\t\t\t\tpart = filelist[i*page : (i+1)*page]\n\t\t\t}\n\t\t\tpaths := make([]*s3.ObjectIdentifier, len(part))\n\n\t\t\tfor i := range part {\n\t\t\t\tpaths[i] = &s3.ObjectIdentifier{\n\t\t\t\t\tKey: aws.String(filepath.Join(storage.prefix, path, part[i])),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tparams := &s3.DeleteObjectsInput{\n\t\t\t\tBucket: aws.String(storage.bucket),\n\t\t\t\tDelete: &s3.Delete{\n\t\t\t\t\tObjects: paths,\n\t\t\t\t\tQuiet: aws.Bool(true),\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t_, err := storage.s3.DeleteObjects(params)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error deleting multiple paths from %s: %s\", storage, err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ LinkFromPool links package file from pool to dist's pool location\n\/\/\n\/\/ publishedDirectory is desired location in pool (like prefix\/pool\/component\/liba\/libav\/)\n\/\/ sourcePool is instance of aptly.PackagePool\n\/\/ sourcePath is filepath to package file in package pool\n\/\/\n\/\/ LinkFromPool returns relative path for the published file to be included in package index\nfunc (storage *PublishedStorage) LinkFromPool(publishedDirectory, baseName string, sourcePool aptly.PackagePool,\n\tsourcePath string, sourceChecksums utils.ChecksumInfo, force bool) error {\n\n\trelPath := filepath.Join(publishedDirectory, baseName)\n\tpoolPath := filepath.Join(storage.prefix, relPath)\n\n\tif storage.pathCache == nil {\n\t\tpaths, md5s, err := storage.internalFilelist(\"\", true)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error caching paths under prefix\")\n\t\t}\n\n\t\tstorage.pathCache = make(map[string]string, len(paths))\n\n\t\tfor i := range paths {\n\t\t\tstorage.pathCache[paths[i]] = md5s[i]\n\t\t}\n\t}\n\n\tdestinationMD5, exists := storage.pathCache[relPath]\n\tsourceMD5 := sourceChecksums.MD5\n\n\tif exists {\n\t\tif sourceMD5 == \"\" {\n\t\t\treturn fmt.Errorf(\"unable to compare object, MD5 checksum missing\")\n\t\t}\n\n\t\tif destinationMD5 == sourceMD5 {\n\t\t\treturn nil\n\t\t}\n\n\t\tif !force && destinationMD5 != sourceMD5 {\n\t\t\treturn fmt.Errorf(\"error putting file to %s: file already exists and is different: %s\", poolPath, storage)\n\n\t\t}\n\t}\n\n\tsource, err := sourcePool.Open(sourcePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer source.Close()\n\n\terr = storage.putFile(relPath, source)\n\tif err == nil {\n\t\tstorage.pathCache[relPath] = sourceMD5\n\t} else {\n\t\terr = errors.Wrap(err, fmt.Sprintf(\"error uploading %s to %s: %s\", sourcePath, storage, poolPath))\n\t}\n\n\treturn err\n}\n\n\/\/ Filelist returns list of files under prefix\nfunc (storage *PublishedStorage) Filelist(prefix string) ([]string, error) {\n\tpaths, _, err := storage.internalFilelist(prefix, true)\n\treturn paths, err\n}\n\nfunc (storage *PublishedStorage) internalFilelist(prefix string, hidePlusWorkaround bool) (paths []string, md5s []string, err error) {\n\tpaths = make([]string, 0, 1024)\n\tmd5s = make([]string, 0, 1024)\n\tprefix = filepath.Join(storage.prefix, prefix)\n\tif prefix != \"\" {\n\t\tprefix += \"\/\"\n\t}\n\n\tparams := &s3.ListObjectsInput{\n\t\tBucket: aws.String(storage.bucket),\n\t\tPrefix: aws.String(prefix),\n\t\tMaxKeys: aws.Int64(1000),\n\t}\n\n\terr = storage.s3.ListObjectsPages(params, func(contents *s3.ListObjectsOutput, lastPage bool) bool {\n\t\tfor _, key := range contents.Contents {\n\t\t\tif storage.plusWorkaround && hidePlusWorkaround && strings.Contains(*key.Key, \" \") {\n\t\t\t\t\/\/ if we use plusWorkaround, we want to hide those duplicates\n\t\t\t\t\/\/\/ from listing\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif prefix == \"\" {\n\t\t\t\tpaths = append(paths, *key.Key)\n\t\t\t} else {\n\t\t\t\tpaths = append(paths, (*key.Key)[len(prefix):])\n\t\t\t}\n\t\t\tmd5s = append(md5s, strings.Replace(*key.ETag, \"\\\"\", \"\", -1))\n\t\t}\n\n\t\treturn true\n\t})\n\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"error listing under prefix %s in %s: %s\", prefix, storage, err)\n\t}\n\n\treturn paths, md5s, nil\n}\n\n\/\/ RenameFile renames (moves) file\nfunc (storage *PublishedStorage) RenameFile(oldName, newName string) error {\n\tsource := fmt.Sprintf(\"\/%s\/%s\", storage.bucket, filepath.Join(storage.prefix, oldName))\n\n\tparams := &s3.CopyObjectInput{\n\t\tBucket: aws.String(storage.bucket),\n\t\tCopySource: aws.String(source),\n\t\tKey: aws.String(filepath.Join(storage.prefix, newName)),\n\t\tACL: aws.String(storage.acl),\n\t}\n\n\tif storage.storageClass != \"\" {\n\t\tparams.StorageClass = aws.String(storage.storageClass)\n\t}\n\tif storage.encryptionMethod != \"\" {\n\t\tparams.ServerSideEncryption = aws.String(storage.encryptionMethod)\n\t}\n\n\t_, err := storage.s3.CopyObject(params)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error copying %s -> %s in %s: %s\", oldName, newName, storage, err)\n\t}\n\n\treturn storage.Remove(oldName)\n}\n\n\/\/ SymLink creates a copy of src file and adds link information as meta data\nfunc (storage *PublishedStorage) SymLink(src string, dst string) error {\n\n\tparams := &s3.CopyObjectInput{\n\t\tBucket: aws.String(storage.bucket),\n\t\tCopySource: aws.String(filepath.Join(storage.prefix, src)),\n\t\tKey: aws.String(filepath.Join(storage.prefix, dst)),\n\t\tACL: aws.String(storage.acl),\n\t\tMetadata: map[string]*string{\n\t\t\t\"SymLink\": aws.String(src),\n\t\t},\n\t\tMetadataDirective: aws.String(\"REPLACE\"),\n\t}\n\n\tif storage.storageClass != \"\" {\n\t\tparams.StorageClass = aws.String(storage.storageClass)\n\t}\n\tif storage.encryptionMethod != \"\" {\n\t\tparams.ServerSideEncryption = aws.String(storage.encryptionMethod)\n\t}\n\n\t_, err := storage.s3.CopyObject(params)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error symlinking %s -> %s in %s: %s\", src, dst, storage, err)\n\t}\n\n\treturn err\n}\n\n\/\/ HardLink using symlink functionality as hard links do not exist\nfunc (storage *PublishedStorage) HardLink(src string, dst string) error {\n\treturn storage.SymLink(src, dst)\n}\n\n\/\/ FileExists returns true if path exists\nfunc (storage *PublishedStorage) FileExists(path string) (bool, error) {\n\tparams := &s3.HeadObjectInput{\n\t\tBucket: aws.String(storage.bucket),\n\t\tKey: aws.String(filepath.Join(storage.prefix, path)),\n\t}\n\t_, err := storage.s3.HeadObject(params)\n\tif err != nil {\n\t\taerr, ok := err.(awserr.Error)\n\t\tif ok && aerr.Code() == s3.ErrCodeNoSuchKey {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\n\/\/ ReadLink returns the symbolic link pointed to by path.\n\/\/ This simply reads text file created with SymLink\nfunc (storage *PublishedStorage) ReadLink(path string) (string, error) {\n\tparams := &s3.HeadObjectInput{\n\t\tBucket: aws.String(storage.bucket),\n\t\tKey: aws.String(filepath.Join(storage.prefix, path)),\n\t}\n\toutput, err := storage.s3.HeadObject(params)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn aws.StringValue(output.Metadata[\"SymLink\"]), nil\n}\n<commit_msg>S3 FileExists fix<commit_after>package s3\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/corehandlers\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/request\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/smira\/aptly\/aptly\"\n\t\"github.com\/smira\/aptly\/utils\"\n\t\"github.com\/smira\/go-aws-auth\"\n)\n\nconst errCodeNotFound = \"NotFound\"\n\n\/\/ PublishedStorage abstract file system with published files (actually hosted on S3)\ntype PublishedStorage struct {\n\ts3 *s3.S3\n\tconfig *aws.Config\n\tbucket string\n\tacl string\n\tprefix string\n\tstorageClass string\n\tencryptionMethod string\n\tplusWorkaround bool\n\tdisableMultiDel bool\n\tpathCache map[string]string\n}\n\n\/\/ Check interface\nvar (\n\t_ aptly.PublishedStorage = (*PublishedStorage)(nil)\n)\n\n\/\/ NewPublishedStorageRaw creates published storage from raw aws credentials\nfunc NewPublishedStorageRaw(\n\tbucket, defaultACL, prefix, storageClass, encryptionMethod string,\n\tplusWorkaround, disabledMultiDel bool,\n\tconfig *aws.Config,\n) (*PublishedStorage, error) {\n\tif defaultACL == \"\" {\n\t\tdefaultACL = \"private\"\n\t}\n\n\tif storageClass == \"STANDARD\" {\n\t\tstorageClass = \"\"\n\t}\n\n\tsess, err := session.NewSession(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult := &PublishedStorage{\n\t\ts3: s3.New(sess),\n\t\tbucket: bucket,\n\t\tconfig: config,\n\t\tacl: defaultACL,\n\t\tprefix: prefix,\n\t\tstorageClass: storageClass,\n\t\tencryptionMethod: encryptionMethod,\n\t\tplusWorkaround: plusWorkaround,\n\t\tdisableMultiDel: disabledMultiDel,\n\t}\n\n\treturn result, nil\n}\n\n\/\/ NewPublishedStorage creates new instance of PublishedStorage with specified S3 access\n\/\/ keys, region and bucket name\nfunc NewPublishedStorage(accessKey, secretKey, sessionToken, region, endpoint, bucket, defaultACL, prefix,\n\tstorageClass, encryptionMethod string, plusWorkaround, disableMultiDel, forceSigV2, debug bool) (*PublishedStorage, error) {\n\n\tconfig := &aws.Config{\n\t\tRegion: aws.String(region),\n\t}\n\n\tif endpoint != \"\" {\n\t\tconfig = config.WithEndpoint(endpoint).WithS3ForcePathStyle(true)\n\t}\n\n\tif accessKey != \"\" {\n\t\tconfig.Credentials = credentials.NewStaticCredentials(accessKey, secretKey, sessionToken)\n\t}\n\n\tif debug {\n\t\tconfig = config.WithLogLevel(aws.LogDebug)\n\t}\n\n\tresult, err := NewPublishedStorageRaw(bucket, defaultACL, prefix, storageClass,\n\t\tencryptionMethod, plusWorkaround, disableMultiDel, config)\n\n\tif err == nil && forceSigV2 {\n\t\tcreds := []awsauth.Credentials{}\n\n\t\tif accessKey != \"\" {\n\t\t\tcreds = append(creds, awsauth.Credentials{\n\t\t\t\tAccessKeyID: accessKey,\n\t\t\t\tSecretAccessKey: secretKey,\n\t\t\t})\n\t\t}\n\n\t\tresult.s3.Handlers.Sign.Clear()\n\t\tresult.s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)\n\t\tresult.s3.Handlers.Sign.PushBack(func(req *request.Request) {\n\t\t\tawsauth.SignS3(req.HTTPRequest, creds...)\n\t\t})\n\t}\n\n\treturn result, err\n}\n\n\/\/ String\nfunc (storage *PublishedStorage) String() string {\n\treturn fmt.Sprintf(\"S3: %s:%s\/%s\", *storage.config.Region, storage.bucket, storage.prefix)\n}\n\n\/\/ MkDir creates directory recursively under public path\nfunc (storage *PublishedStorage) MkDir(path string) error {\n\t\/\/ no op for S3\n\treturn nil\n}\n\n\/\/ PutFile puts file into published storage at specified path\nfunc (storage *PublishedStorage) PutFile(path string, sourceFilename string) error {\n\tvar (\n\t\tsource *os.File\n\t\terr error\n\t)\n\tsource, err = os.Open(sourceFilename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer source.Close()\n\n\terr = storage.putFile(path, source)\n\tif err != nil {\n\t\terr = errors.Wrap(err, fmt.Sprintf(\"error uploading %s to %s\", sourceFilename, storage))\n\t}\n\n\treturn err\n}\n\n\/\/ putFile uploads file-like object to\nfunc (storage *PublishedStorage) putFile(path string, source io.ReadSeeker) error {\n\n\tparams := &s3.PutObjectInput{\n\t\tBucket: aws.String(storage.bucket),\n\t\tKey: aws.String(filepath.Join(storage.prefix, path)),\n\t\tBody: source,\n\t\tACL: aws.String(storage.acl),\n\t}\n\tif storage.storageClass != \"\" {\n\t\tparams.StorageClass = aws.String(storage.storageClass)\n\t}\n\tif storage.encryptionMethod != \"\" {\n\t\tparams.ServerSideEncryption = aws.String(storage.encryptionMethod)\n\t}\n\n\t_, err := storage.s3.PutObject(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif storage.plusWorkaround && strings.Contains(path, \"+\") {\n\t\t_, err = source.Seek(0, 0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn storage.putFile(strings.Replace(path, \"+\", \" \", -1), source)\n\t}\n\treturn nil\n}\n\n\/\/ Remove removes single file under public path\nfunc (storage *PublishedStorage) Remove(path string) error {\n\tparams := &s3.DeleteObjectInput{\n\t\tBucket: aws.String(storage.bucket),\n\t\tKey: aws.String(filepath.Join(storage.prefix, path)),\n\t}\n\t_, err := storage.s3.DeleteObject(params)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"error deleting %s from %s\", path, storage))\n\t}\n\n\tif storage.plusWorkaround && strings.Contains(path, \"+\") {\n\t\t\/\/ try to remove workaround version, but don't care about result\n\t\t_ = storage.Remove(strings.Replace(path, \"+\", \" \", -1))\n\t}\n\treturn nil\n}\n\n\/\/ RemoveDirs removes directory structure under public path\nfunc (storage *PublishedStorage) RemoveDirs(path string, progress aptly.Progress) error {\n\tconst page = 1000\n\n\tfilelist, _, err := storage.internalFilelist(path, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif storage.disableMultiDel {\n\t\tfor i := range filelist {\n\t\t\tparams := &s3.DeleteObjectInput{\n\t\t\t\tBucket: aws.String(storage.bucket),\n\t\t\t\tKey: aws.String(filepath.Join(storage.prefix, path, filelist[i])),\n\t\t\t}\n\t\t\t_, err := storage.s3.DeleteObject(params)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error deleting path %s from %s: %s\", filelist[i], storage, err)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tnumParts := (len(filelist) + page - 1) \/ page\n\n\t\tfor i := 0; i < numParts; i++ {\n\t\t\tvar part []string\n\t\t\tif i == numParts-1 {\n\t\t\t\tpart = filelist[i*page:]\n\t\t\t} else {\n\t\t\t\tpart = filelist[i*page : (i+1)*page]\n\t\t\t}\n\t\t\tpaths := make([]*s3.ObjectIdentifier, len(part))\n\n\t\t\tfor i := range part {\n\t\t\t\tpaths[i] = &s3.ObjectIdentifier{\n\t\t\t\t\tKey: aws.String(filepath.Join(storage.prefix, path, part[i])),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tparams := &s3.DeleteObjectsInput{\n\t\t\t\tBucket: aws.String(storage.bucket),\n\t\t\t\tDelete: &s3.Delete{\n\t\t\t\t\tObjects: paths,\n\t\t\t\t\tQuiet: aws.Bool(true),\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t_, err := storage.s3.DeleteObjects(params)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error deleting multiple paths from %s: %s\", storage, err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ LinkFromPool links package file from pool to dist's pool location\n\/\/\n\/\/ publishedDirectory is desired location in pool (like prefix\/pool\/component\/liba\/libav\/)\n\/\/ sourcePool is instance of aptly.PackagePool\n\/\/ sourcePath is filepath to package file in package pool\n\/\/\n\/\/ LinkFromPool returns relative path for the published file to be included in package index\nfunc (storage *PublishedStorage) LinkFromPool(publishedDirectory, baseName string, sourcePool aptly.PackagePool,\n\tsourcePath string, sourceChecksums utils.ChecksumInfo, force bool) error {\n\n\trelPath := filepath.Join(publishedDirectory, baseName)\n\tpoolPath := filepath.Join(storage.prefix, relPath)\n\n\tif storage.pathCache == nil {\n\t\tpaths, md5s, err := storage.internalFilelist(\"\", true)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error caching paths under prefix\")\n\t\t}\n\n\t\tstorage.pathCache = make(map[string]string, len(paths))\n\n\t\tfor i := range paths {\n\t\t\tstorage.pathCache[paths[i]] = md5s[i]\n\t\t}\n\t}\n\n\tdestinationMD5, exists := storage.pathCache[relPath]\n\tsourceMD5 := sourceChecksums.MD5\n\n\tif exists {\n\t\tif sourceMD5 == \"\" {\n\t\t\treturn fmt.Errorf(\"unable to compare object, MD5 checksum missing\")\n\t\t}\n\n\t\tif destinationMD5 == sourceMD5 {\n\t\t\treturn nil\n\t\t}\n\n\t\tif !force && destinationMD5 != sourceMD5 {\n\t\t\treturn fmt.Errorf(\"error putting file to %s: file already exists and is different: %s\", poolPath, storage)\n\n\t\t}\n\t}\n\n\tsource, err := sourcePool.Open(sourcePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer source.Close()\n\n\terr = storage.putFile(relPath, source)\n\tif err == nil {\n\t\tstorage.pathCache[relPath] = sourceMD5\n\t} else {\n\t\terr = errors.Wrap(err, fmt.Sprintf(\"error uploading %s to %s: %s\", sourcePath, storage, poolPath))\n\t}\n\n\treturn err\n}\n\n\/\/ Filelist returns list of files under prefix\nfunc (storage *PublishedStorage) Filelist(prefix string) ([]string, error) {\n\tpaths, _, err := storage.internalFilelist(prefix, true)\n\treturn paths, err\n}\n\nfunc (storage *PublishedStorage) internalFilelist(prefix string, hidePlusWorkaround bool) (paths []string, md5s []string, err error) {\n\tpaths = make([]string, 0, 1024)\n\tmd5s = make([]string, 0, 1024)\n\tprefix = filepath.Join(storage.prefix, prefix)\n\tif prefix != \"\" {\n\t\tprefix += \"\/\"\n\t}\n\n\tparams := &s3.ListObjectsInput{\n\t\tBucket: aws.String(storage.bucket),\n\t\tPrefix: aws.String(prefix),\n\t\tMaxKeys: aws.Int64(1000),\n\t}\n\n\terr = storage.s3.ListObjectsPages(params, func(contents *s3.ListObjectsOutput, lastPage bool) bool {\n\t\tfor _, key := range contents.Contents {\n\t\t\tif storage.plusWorkaround && hidePlusWorkaround && strings.Contains(*key.Key, \" \") {\n\t\t\t\t\/\/ if we use plusWorkaround, we want to hide those duplicates\n\t\t\t\t\/\/\/ from listing\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif prefix == \"\" {\n\t\t\t\tpaths = append(paths, *key.Key)\n\t\t\t} else {\n\t\t\t\tpaths = append(paths, (*key.Key)[len(prefix):])\n\t\t\t}\n\t\t\tmd5s = append(md5s, strings.Replace(*key.ETag, \"\\\"\", \"\", -1))\n\t\t}\n\n\t\treturn true\n\t})\n\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"error listing under prefix %s in %s: %s\", prefix, storage, err)\n\t}\n\n\treturn paths, md5s, nil\n}\n\n\/\/ RenameFile renames (moves) file\nfunc (storage *PublishedStorage) RenameFile(oldName, newName string) error {\n\tsource := fmt.Sprintf(\"\/%s\/%s\", storage.bucket, filepath.Join(storage.prefix, oldName))\n\n\tparams := &s3.CopyObjectInput{\n\t\tBucket: aws.String(storage.bucket),\n\t\tCopySource: aws.String(source),\n\t\tKey: aws.String(filepath.Join(storage.prefix, newName)),\n\t\tACL: aws.String(storage.acl),\n\t}\n\n\tif storage.storageClass != \"\" {\n\t\tparams.StorageClass = aws.String(storage.storageClass)\n\t}\n\tif storage.encryptionMethod != \"\" {\n\t\tparams.ServerSideEncryption = aws.String(storage.encryptionMethod)\n\t}\n\n\t_, err := storage.s3.CopyObject(params)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error copying %s -> %s in %s: %s\", oldName, newName, storage, err)\n\t}\n\n\treturn storage.Remove(oldName)\n}\n\n\/\/ SymLink creates a copy of src file and adds link information as meta data\nfunc (storage *PublishedStorage) SymLink(src string, dst string) error {\n\n\tparams := &s3.CopyObjectInput{\n\t\tBucket: aws.String(storage.bucket),\n\t\tCopySource: aws.String(filepath.Join(storage.prefix, src)),\n\t\tKey: aws.String(filepath.Join(storage.prefix, dst)),\n\t\tACL: aws.String(storage.acl),\n\t\tMetadata: map[string]*string{\n\t\t\t\"SymLink\": aws.String(src),\n\t\t},\n\t\tMetadataDirective: aws.String(\"REPLACE\"),\n\t}\n\n\tif storage.storageClass != \"\" {\n\t\tparams.StorageClass = aws.String(storage.storageClass)\n\t}\n\tif storage.encryptionMethod != \"\" {\n\t\tparams.ServerSideEncryption = aws.String(storage.encryptionMethod)\n\t}\n\n\t_, err := storage.s3.CopyObject(params)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error symlinking %s -> %s in %s: %s\", src, dst, storage, err)\n\t}\n\n\treturn err\n}\n\n\/\/ HardLink using symlink functionality as hard links do not exist\nfunc (storage *PublishedStorage) HardLink(src string, dst string) error {\n\treturn storage.SymLink(src, dst)\n}\n\n\/\/ FileExists returns true if path exists\nfunc (storage *PublishedStorage) FileExists(path string) (bool, error) {\n\tparams := &s3.HeadObjectInput{\n\t\tBucket: aws.String(storage.bucket),\n\t\tKey: aws.String(filepath.Join(storage.prefix, path)),\n\t}\n\t_, err := storage.s3.HeadObject(params)\n\tif err != nil {\n\t\taerr, ok := err.(awserr.Error)\n\t\tif ok && aerr.Code() == errCodeNotFound {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\n\/\/ ReadLink returns the symbolic link pointed to by path.\n\/\/ This simply reads text file created with SymLink\nfunc (storage *PublishedStorage) ReadLink(path string) (string, error) {\n\tparams := &s3.HeadObjectInput{\n\t\tBucket: aws.String(storage.bucket),\n\t\tKey: aws.String(filepath.Join(storage.prefix, path)),\n\t}\n\toutput, err := storage.s3.HeadObject(params)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn aws.StringValue(output.Metadata[\"SymLink\"]), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Andreas Koch. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage console\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\nconst (\n\tLogLevelDebug = \"Debug\"\n\tLogLevelInfo = \"Info\"\n\tLogLevelWarn = \"Warn\"\n\tLogLevelError = \"Error\"\n\tLogLevelFatal = \"Fatal\"\n)\n\nfunc New() *ConsoleLogger {\n\treturn &ConsoleLogger{\n\t\toutput: os.Stdout,\n\t}\n}\n\ntype ConsoleLogger struct {\n\toutput io.Writer\n}\n\nfunc (logger *ConsoleLogger) SetOutput(w io.Writer) {\n\tlogger.output = w\n}\n\nfunc (logger *ConsoleLogger) Debug(format string, v ...interface{}) {\n\tfmt.Fprintf(logger.output, fmt.Sprintf(\"[%s] %s\\n\", LogLevelDebug, format), v)\n}\n\nfunc (logger *ConsoleLogger) Info(format string, v ...interface{}) {\n\tfmt.Fprintf(logger.output, fmt.Sprintf(\"[%s] %s\\n\", LogLevelInfo, format), v)\n}\n\nfunc (logger *ConsoleLogger) Warn(format string, v ...interface{}) {\n\tfmt.Fprintf(logger.output, fmt.Sprintf(\"[%s] %s\\n\", LogLevelWarn, format), v)\n}\n\nfunc (logger *ConsoleLogger) Error(format string, v ...interface{}) {\n\tfmt.Fprintf(logger.output, fmt.Sprintf(\"[%s] %s\\n\", LogLevelError, format), v)\n}\n\nfunc (logger *ConsoleLogger) Fatal(format string, v ...interface{}) {\n\tfmt.Fprintf(logger.output, fmt.Sprintf(\"[%s] %s\\n\", LogLevelFatal, format), v)\n}\n<commit_msg>Changed the format of the console logger so that the log level is separated from the message to make the message more readable.<commit_after>\/\/ Copyright 2013 Andreas Koch. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage console\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\nconst (\n\tLogLevelDebug = \"Debug\"\n\tLogLevelInfo = \"Info\"\n\tLogLevelWarn = \"Warn\"\n\tLogLevelError = \"Error\"\n\tLogLevelFatal = \"Fatal\"\n)\n\nfunc New() *ConsoleLogger {\n\treturn &ConsoleLogger{\n\t\toutput: os.Stdout,\n\t}\n}\n\ntype ConsoleLogger struct {\n\toutput io.Writer\n}\n\nfunc (logger *ConsoleLogger) SetOutput(w io.Writer) {\n\tlogger.output = w\n}\n\nfunc (logger *ConsoleLogger) Debug(format string, v ...interface{}) {\n\tfmt.Fprintf(logger.output, fmt.Sprintf(\"%s: \\t%s\\n\", LogLevelDebug, format), v)\n}\n\nfunc (logger *ConsoleLogger) Info(format string, v ...interface{}) {\n\tfmt.Fprintf(logger.output, fmt.Sprintf(\"%s: \\t%s\\n\", LogLevelInfo, format), v)\n}\n\nfunc (logger *ConsoleLogger) Warn(format string, v ...interface{}) {\n\tfmt.Fprintf(logger.output, fmt.Sprintf(\"%s: \\t%s\\n\", LogLevelWarn, format), v)\n}\n\nfunc (logger *ConsoleLogger) Error(format string, v ...interface{}) {\n\tfmt.Fprintf(logger.output, fmt.Sprintf(\"%s: \\t%s\\n\", LogLevelError, format), v)\n}\n\nfunc (logger *ConsoleLogger) Fatal(format string, v ...interface{}) {\n\tfmt.Fprintf(logger.output, fmt.Sprintf(\"%s: \\t%s\\n\", LogLevelFatal, format), v)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The go-gl Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gl\n\n\/\/ #include \"gl.h\"\nimport \"C\"\nimport \"unsafe\"\n\n\/\/ UniformLocation\n\/\/TODO\n\ntype UniformLocation int\n\nfunc (location UniformLocation) Uniform1f(x float32) {\n\tC.glUniform1f(C.GLint(location), C.GLfloat(x))\n}\n\nfunc (location UniformLocation) Uniform2f(x float32, y float32) {\n\tC.glUniform2f(C.GLint(location), C.GLfloat(x), C.GLfloat(y))\n}\n\nfunc (location UniformLocation) Uniform3f(x float32, y float32, z float32) {\n\tC.glUniform3f(C.GLint(location), C.GLfloat(x), C.GLfloat(y), C.GLfloat(z))\n}\n\nfunc (location UniformLocation) Uniform1ui(x uint) {\n\tC.glUniform1ui(C.GLint(location), C.GLuint(x))\n}\nfunc (location UniformLocation) Uniform2ui(x uint, y uint) {\n\tC.glUniform2ui(C.GLint(location), C.GLuint(x), C.GLuint(y))\n}\n\nfunc (location UniformLocation) Uniform3ui(x uint, y uint, z uint) {\n\tC.glUniform3ui(C.GLint(location), C.GLuint(x), C.GLuint(y), C.GLuint(z))\n}\n\nfunc (location UniformLocation) Uniform4ui(x uint, y uint, z uint, w uint) {\n\tC.glUniform4ui(C.GLint(location), C.GLuint(x), C.GLuint(y), C.GLuint(z), C.GLuint(w))\n}\n\nfunc (location UniformLocation) Uniform1uiv(v ...uint32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform1uiv(C.GLint(location), C.GLsizei(len(v)), (*C.GLuint)((unsafe.Pointer)(&v[0])))\n}\n\nfunc (location UniformLocation) Uniform2uiv(v ...[2]uint32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform2uiv(C.GLint(location), C.GLsizei(len(v)), (*C.GLuint)((unsafe.Pointer)(&v[0])))\n}\n\nfunc (location UniformLocation) Uniform3uiv(v ...[3]uint32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform3uiv(C.GLint(location), C.GLsizei(len(v)), (*C.GLuint)((unsafe.Pointer)(&v[0])))\n}\n\nfunc (location UniformLocation) Uniform4uiv(v ...[4]uint32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform4uiv(C.GLint(location), C.GLsizei(len(v)), (*C.GLuint)((unsafe.Pointer)(&v[0])))\n}\n\nfunc (location UniformLocation) Uniform1fv(v ...float32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform1fv(C.GLint(location), C.GLsizei(len(v)), (*C.GLfloat)((unsafe.Pointer)(&v[0])))\n}\n\nfunc (location UniformLocation) Uniform1i(x int) {\n\tC.glUniform1i(C.GLint(location), C.GLint(x))\n}\n\nfunc (location UniformLocation) Uniform1iv(v ...int32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform1iv(C.GLint(location), C.GLsizei(len(v)), (*C.GLint)((unsafe.Pointer)(&v[0])))\n}\n\nfunc (location UniformLocation) Uniform2fv(v ...[2]float32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform2fv(C.GLint(location), C.GLsizei(len(v)), (*C.GLfloat)((unsafe.Pointer)(&v[0])))\n}\n\nfunc (location UniformLocation) Uniform2i(x int, y int) {\n\tC.glUniform2i(C.GLint(location), C.GLint(x), C.GLint(y))\n}\n\nfunc (location UniformLocation) Uniform2iv(v ...[2]int32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform2iv(C.GLint(location), C.GLsizei(len(v)), (*C.GLint)((unsafe.Pointer)(&v[0])))\n}\n\nfunc (location UniformLocation) Uniform3fv(v ...[3]float32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform3fv(C.GLint(location), C.GLsizei(len(v)), (*C.GLfloat)((unsafe.Pointer)(&v[0])))\n}\n\nfunc (location UniformLocation) Uniform3i(x int, y int, z int) {\n\tC.glUniform3i(C.GLint(location), C.GLint(x), C.GLint(y), C.GLint(z))\n}\n\nfunc (location UniformLocation) Uniform3iv(v ...[3]int32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform3iv(C.GLint(location), C.GLsizei(len(v)), (*C.GLint)((unsafe.Pointer)(&v[0])))\n}\n\nfunc (location UniformLocation) Uniform4f(x float32, y float32, z float32, w float32) {\n\tC.glUniform4f(C.GLint(location), C.GLfloat(x), C.GLfloat(y), C.GLfloat(z), C.GLfloat(w))\n}\n\nfunc (location UniformLocation) Uniform4fv(v ...[4]float32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform4fv(C.GLint(location), C.GLsizei(len(v)), (*C.GLfloat)((unsafe.Pointer)(&v[0])))\n}\n\nfunc (location UniformLocation) Uniform4i(x int, y int, z int, w int) {\n\tC.glUniform4i(C.GLint(location), C.GLint(x), C.GLint(y), C.GLint(z), C.GLint(w))\n}\n\nfunc (location UniformLocation) Uniform4iv(v ...[4]int32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform4iv(C.GLint(location), C.GLsizei(len(v)), (*C.GLint)((unsafe.Pointer)(&v[0])))\n}\n\nfunc (location UniformLocation) UniformMatrix2fv(transpose bool, list ...[4]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix2fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix2f(transpose bool, matrix *[4]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix2fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix3fv(transpose bool, list ...[9]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix3fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix3f(transpose bool, matrix *[9]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix3fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix4fv(transpose bool, list ...[16]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix4fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix4f(transpose bool, matrix *[16]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix4fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix2x3fv(transpose bool, list ...[6]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix2x3fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix2x3f(transpose bool, matrix *[6]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix2x3fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix3x2fv(transpose bool, list ...[6]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix3x2fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix3x2f(transpose bool, matrix *[6]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix3x2fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix2x4fv(transpose bool, list ...[8]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix2x4fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix2x4f(transpose bool, matrix *[8]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix2x4fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix4x2fv(transpose bool, list ...[8]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix4x2fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix4x2f(transpose bool, matrix *[8]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix4x2fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix3x4fv(transpose bool, list ...[12]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix3x4fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix3x4f(transpose bool, matrix *[12]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix3x4fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix4x3fv(transpose bool, list ...[12]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix4x3fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix4x3f(transpose bool, matrix *[12]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix4x3fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n<commit_msg>Fixed the mess.<commit_after>\/\/ Copyright 2012 The go-gl Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gl\n\n\/\/ #include \"gl.h\"\nimport \"C\"\nimport \"unsafe\"\n\n\/\/ UniformLocation\n\/\/TODO\n\ntype UniformLocation int\n\nfunc (location UniformLocation) Uniform1f(x float32) {\n\tC.glUniform1f(C.GLint(location), C.GLfloat(x))\n}\n\nfunc (location UniformLocation) Uniform2f(x float32, y float32) {\n\tC.glUniform2f(C.GLint(location), C.GLfloat(x), C.GLfloat(y))\n}\n\nfunc (location UniformLocation) Uniform3f(x float32, y float32, z float32) {\n\tC.glUniform3f(C.GLint(location), C.GLfloat(x), C.GLfloat(y), C.GLfloat(z))\n}\n\nfunc (location UniformLocation) Uniform1fv(count int, v []float32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform1fv(C.GLint(location), C.GLsizei(count), (*C.GLfloat)(&v[0]))\n}\n\nfunc (location UniformLocation) Uniform1i(x int) {\n\tC.glUniform1i(C.GLint(location), C.GLint(x))\n}\n\nfunc (location UniformLocation) Uniform1iv(count int, v []int32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform1iv(C.GLint(location), C.GLsizei(count), (*C.GLint)(&v[0]))\n}\n\nfunc (location UniformLocation) Uniform2fv(count int, v []float32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform2fv(C.GLint(location), C.GLsizei(count), (*C.GLfloat)(&v[0]))\n}\n\nfunc (location UniformLocation) Uniform2i(x int, y int) {\n\tC.glUniform2i(C.GLint(location), C.GLint(x), C.GLint(y))\n}\n\nfunc (location UniformLocation) Uniform2iv(count int, v []int32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform2iv(C.GLint(location), C.GLsizei(count), (*C.GLint)(&v[0]))\n}\n\nfunc (location UniformLocation) Uniform3fv(count int, v []float32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform3fv(C.GLint(location), C.GLsizei(count), (*C.GLfloat)(&v[0]))\n}\n\nfunc (location UniformLocation) Uniform3i(x int, y int, z int) {\n\tC.glUniform3i(C.GLint(location), C.GLint(x), C.GLint(y), C.GLint(z))\n}\n\nfunc (location UniformLocation) Uniform3iv(count int, v []int32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform3iv(C.GLint(location), C.GLsizei(count), (*C.GLint)(&v[0]))\n}\n\nfunc (location UniformLocation) Uniform4f(x float32, y float32, z float32, w float32) {\n\tC.glUniform4f(C.GLint(location), C.GLfloat(x), C.GLfloat(y), C.GLfloat(z), C.GLfloat(w))\n}\n\nfunc (location UniformLocation) Uniform4fv(count int, v []float32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform4fv(C.GLint(location), C.GLsizei(count), (*C.GLfloat)(&v[0]))\n}\n\nfunc (location UniformLocation) Uniform4i(x int, y int, z int, w int) {\n\tC.glUniform4i(C.GLint(location), C.GLint(x), C.GLint(y), C.GLint(z), C.GLint(w))\n}\n\nfunc (location UniformLocation) Uniform4iv(count int, v []int32) {\n\tif len(v) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniform4iv(C.GLint(location), C.GLsizei(count), (*C.GLint)(&v[0]))\n}\n\nfunc (location UniformLocation) UniformMatrix2fv(transpose bool, list ...[4]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix2fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix2f(transpose bool, matrix *[4]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix2fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix3fv(transpose bool, list ...[9]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix3fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix3f(transpose bool, matrix *[9]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix3fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix4fv(transpose bool, list ...[16]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix4fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix4f(transpose bool, matrix *[16]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix4fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix2x3fv(transpose bool, list ...[6]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix2x3fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix2x3f(transpose bool, matrix *[6]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix2x3fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix3x2fv(transpose bool, list ...[6]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix3x2fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix3x2f(transpose bool, matrix *[6]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix3x2fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix2x4fv(transpose bool, list ...[8]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix2x4fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix2x4f(transpose bool, matrix *[8]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix2x4fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix4x2fv(transpose bool, list ...[8]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix4x2fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix4x2f(transpose bool, matrix *[8]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix4x2fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix3x4fv(transpose bool, list ...[12]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix3x4fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix3x4f(transpose bool, matrix *[12]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix3x4fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix4x3fv(transpose bool, list ...[12]float32) {\n\tif len(list) < 1 {\n\t\tpanic(\"Invalid array length - must be at least 1\")\n\t}\n\tC.glUniformMatrix4x3fv(C.GLint(location), C.GLsizei(len(list)), glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&list[0]))))\n}\n\nfunc (location UniformLocation) UniformMatrix4x3f(transpose bool, matrix *[12]float32) {\n\tif matrix == nil {\n\t\tpanic(\"Matrix is nil\")\n\t}\n\tC.glUniformMatrix4x3fv(C.GLint(location), 1, glBool(transpose), ((*C.GLfloat)((unsafe.Pointer)(&matrix[0]))))\n}\n<|endoftext|>"} {"text":"<commit_before>package email\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/smtp\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tserver = \"localhost:25\"\n\tnlgidsPrefix = \"[NLgids] \"\n)\n\nvar (\n\t\/\/to = []string{\"ans@nlgids.london\", \"miek@miek.nl\"}\n\tto = []string{\"miek@miek.nl\"}\n\tfrom = \"nlgids@nlgids.london\"\n)\n\nfunc NewContact(subject string, body *bytes.Buffer) *Message {\n\tm := NewMessage(nlgidsPrefix+subject, body.String())\n\tm.From = from\n\tm.To = to\n\treturn m\n}\n\nfunc NewBooking(subject string, body *bytes.Buffer) *Message {\n\treturn nil\n}\n\nfunc NewInvoice(subject string, body *bytes.Buffer, pdfName string, pdf []byte) *Message {\n\tm := NewMessage(nlgidsPrefix+subject, body.String())\n\tm.From = from\n\tm.To = to\n\tm.AttachBytes(pdfName, pdf)\n\treturn m\n}\n\n\/\/ Do sends an email message.\nfunc (m *Message) Do() error { return Send(server, nil, m) }\n\n\/\/ Everything below:\n\n\/\/ Copyright 2012 Santiago Corredoira\n\/\/ Distributed under a BSD-like license.\n\ntype Attachment struct {\n\tFilename string\n\tData []byte\n\tInline bool\n}\n\ntype Message struct {\n\tFrom string\n\tTo []string\n\tCc []string\n\tBcc []string\n\tReplyTo string\n\tSubject string\n\tBody string\n\tBodyContentType string\n\tAttachments map[string]*Attachment\n}\n\nfunc (m *Message) attach(file string, inline bool) error {\n\tdata, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm.attachBytes(file, data, inline)\n\treturn nil\n}\n\nfunc (m *Message) attachBytes(file string, data []byte, inline bool) {\n\t_, filename := filepath.Split(file)\n\n\tm.Attachments[filename] = &Attachment{\n\t\tFilename: filename,\n\t\tData: data,\n\t\tInline: inline,\n\t}\n}\n\nfunc (m *Message) AttachBytes(file string, data []byte) {\n\tm.attachBytes(file, data, false)\n}\n\nfunc (m *Message) InlineBytes(file string, data []byte) {\n\tm.attachBytes(file, data, true)\n}\n\nfunc (m *Message) Attach(file string) error {\n\treturn m.attach(file, false)\n}\n\nfunc (m *Message) Inline(file string) error {\n\treturn m.attach(file, true)\n}\n\nfunc newMessage(subject string, body string, bodyContentType string) *Message {\n\tm := &Message{Subject: subject, Body: body, BodyContentType: bodyContentType}\n\n\tm.Attachments = make(map[string]*Attachment)\n\n\treturn m\n}\n\n\/\/ NewMessage returns a new Message that can compose an email with attachments\nfunc NewMessage(subject string, body string) *Message {\n\treturn newMessage(subject, body, \"text\/plain\")\n}\n\n\/\/ NewMessage returns a new Message that can compose an HTML email with attachments\nfunc NewHTMLMessage(subject string, body string) *Message {\n\treturn newMessage(subject, body, \"text\/html\")\n}\n\n\/\/ ToList returns all the recipients of the email\nfunc (m *Message) Tolist() []string {\n\ttolist := m.To\n\n\tfor _, cc := range m.Cc {\n\t\ttolist = append(tolist, cc)\n\t}\n\n\tfor _, bcc := range m.Bcc {\n\t\ttolist = append(tolist, bcc)\n\t}\n\n\treturn tolist\n}\n\n\/\/ Bytes returns the mail data\nfunc (m *Message) Bytes() []byte {\n\tbuf := bytes.NewBuffer(nil)\n\n\tbuf.WriteString(\"From: \" + m.From + \"\\r\\n\")\n\n\tt := time.Now()\n\tbuf.WriteString(\"Date: \" + t.Format(time.RFC822) + \"\\r\\n\")\n\n\tbuf.WriteString(\"To: \" + strings.Join(m.To, \",\") + \"\\r\\n\")\n\tif len(m.Cc) > 0 {\n\t\tbuf.WriteString(\"Cc: \" + strings.Join(m.Cc, \",\") + \"\\r\\n\")\n\t}\n\n\tbuf.WriteString(\"Subject: \" + m.Subject + \"\\r\\n\")\n\n\tif len(m.ReplyTo) > 0 {\n\t\tbuf.WriteString(\"Reply-To: \" + m.ReplyTo + \"\\r\\n\")\n\t}\n\n\tbuf.WriteString(\"MIME-Version: 1.0\\r\\n\")\n\n\tboundary := \"f46d043c813270fc6b04c2d223da\"\n\n\tif len(m.Attachments) > 0 {\n\t\tbuf.WriteString(\"Content-Type: multipart\/mixed; boundary=\" + boundary + \"\\r\\n\")\n\t\tbuf.WriteString(\"--\" + boundary + \"\\r\\n\")\n\t}\n\n\tbuf.WriteString(fmt.Sprintf(\"Content-Type: %s; charset=utf-8\\r\\n\\r\\n\", m.BodyContentType))\n\tbuf.WriteString(m.Body)\n\tbuf.WriteString(\"\\r\\n\")\n\n\tif len(m.Attachments) > 0 {\n\t\tfor _, attachment := range m.Attachments {\n\t\t\tbuf.WriteString(\"\\r\\n\\r\\n--\" + boundary + \"\\r\\n\")\n\n\t\t\tif attachment.Inline {\n\t\t\t\tbuf.WriteString(\"Content-Type: message\/rfc822\\r\\n\")\n\t\t\t\tbuf.WriteString(\"Content-Disposition: inline; filename=\\\"\" + attachment.Filename + \"\\\"\\r\\n\\r\\n\")\n\n\t\t\t\tbuf.Write(attachment.Data)\n\t\t\t} else {\n\t\t\t\tbuf.WriteString(\"Content-Type: application\/octet-stream\\r\\n\")\n\t\t\t\tbuf.WriteString(\"Content-Transfer-Encoding: base64\\r\\n\")\n\t\t\t\tbuf.WriteString(\"Content-Disposition: attachment; filename=\\\"\" + attachment.Filename + \"\\\"\\r\\n\\r\\n\")\n\n\t\t\t\tb := make([]byte, base64.StdEncoding.EncodedLen(len(attachment.Data)))\n\t\t\t\tbase64.StdEncoding.Encode(b, attachment.Data)\n\n\t\t\t\t\/\/ write base64 content in lines of up to 76 chars\n\t\t\t\tfor i, l := 0, len(b); i < l; i++ {\n\t\t\t\t\tbuf.WriteByte(b[i])\n\t\t\t\t\tif (i+1)%76 == 0 {\n\t\t\t\t\t\tbuf.WriteString(\"\\r\\n\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbuf.WriteString(\"\\r\\n--\" + boundary)\n\t\t}\n\n\t\tbuf.WriteString(\"--\")\n\t}\n\n\treturn buf.Bytes()\n}\n\nfunc Send(addr string, auth smtp.Auth, m *Message) error {\n\treturn smtp.SendMail(addr, auth, m.From, m.Tolist(), m.Bytes())\n}\n<commit_msg>another from address<commit_after>package email\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/smtp\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tserver = \"localhost:25\"\n\tnlgidsPrefix = \"[NLgids] \"\n)\n\nvar (\n\t\/\/to = []string{\"ans@nlgids.london\", \"miek@miek.nl\"}\n\tto = []string{\"miek@miek.nl\"}\n\tfrom = \"nlgids@x64.atoom.net\"\n)\n\nfunc NewContact(subject string, body *bytes.Buffer) *Message {\n\tm := NewMessage(nlgidsPrefix+subject, body.String())\n\tm.From = from\n\tm.To = to\n\treturn m\n}\n\nfunc NewBooking(subject string, body *bytes.Buffer) *Message {\n\treturn nil\n}\n\nfunc NewInvoice(subject string, body *bytes.Buffer, pdfName string, pdf []byte) *Message {\n\tm := NewMessage(nlgidsPrefix+subject, body.String())\n\tm.From = from\n\tm.To = to\n\tm.AttachBytes(pdfName, pdf)\n\treturn m\n}\n\n\/\/ Do sends an email message.\nfunc (m *Message) Do() error { return Send(server, nil, m) }\n\n\/\/ Everything below:\n\n\/\/ Copyright 2012 Santiago Corredoira\n\/\/ Distributed under a BSD-like license.\n\ntype Attachment struct {\n\tFilename string\n\tData []byte\n\tInline bool\n}\n\ntype Message struct {\n\tFrom string\n\tTo []string\n\tCc []string\n\tBcc []string\n\tReplyTo string\n\tSubject string\n\tBody string\n\tBodyContentType string\n\tAttachments map[string]*Attachment\n}\n\nfunc (m *Message) attach(file string, inline bool) error {\n\tdata, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm.attachBytes(file, data, inline)\n\treturn nil\n}\n\nfunc (m *Message) attachBytes(file string, data []byte, inline bool) {\n\t_, filename := filepath.Split(file)\n\n\tm.Attachments[filename] = &Attachment{\n\t\tFilename: filename,\n\t\tData: data,\n\t\tInline: inline,\n\t}\n}\n\nfunc (m *Message) AttachBytes(file string, data []byte) {\n\tm.attachBytes(file, data, false)\n}\n\nfunc (m *Message) InlineBytes(file string, data []byte) {\n\tm.attachBytes(file, data, true)\n}\n\nfunc (m *Message) Attach(file string) error {\n\treturn m.attach(file, false)\n}\n\nfunc (m *Message) Inline(file string) error {\n\treturn m.attach(file, true)\n}\n\nfunc newMessage(subject string, body string, bodyContentType string) *Message {\n\tm := &Message{Subject: subject, Body: body, BodyContentType: bodyContentType}\n\n\tm.Attachments = make(map[string]*Attachment)\n\n\treturn m\n}\n\n\/\/ NewMessage returns a new Message that can compose an email with attachments\nfunc NewMessage(subject string, body string) *Message {\n\treturn newMessage(subject, body, \"text\/plain\")\n}\n\n\/\/ NewMessage returns a new Message that can compose an HTML email with attachments\nfunc NewHTMLMessage(subject string, body string) *Message {\n\treturn newMessage(subject, body, \"text\/html\")\n}\n\n\/\/ ToList returns all the recipients of the email\nfunc (m *Message) Tolist() []string {\n\ttolist := m.To\n\n\tfor _, cc := range m.Cc {\n\t\ttolist = append(tolist, cc)\n\t}\n\n\tfor _, bcc := range m.Bcc {\n\t\ttolist = append(tolist, bcc)\n\t}\n\n\treturn tolist\n}\n\n\/\/ Bytes returns the mail data\nfunc (m *Message) Bytes() []byte {\n\tbuf := bytes.NewBuffer(nil)\n\n\tbuf.WriteString(\"From: \" + m.From + \"\\r\\n\")\n\n\tt := time.Now()\n\tbuf.WriteString(\"Date: \" + t.Format(time.RFC822) + \"\\r\\n\")\n\n\tbuf.WriteString(\"To: \" + strings.Join(m.To, \",\") + \"\\r\\n\")\n\tif len(m.Cc) > 0 {\n\t\tbuf.WriteString(\"Cc: \" + strings.Join(m.Cc, \",\") + \"\\r\\n\")\n\t}\n\n\tbuf.WriteString(\"Subject: \" + m.Subject + \"\\r\\n\")\n\n\tif len(m.ReplyTo) > 0 {\n\t\tbuf.WriteString(\"Reply-To: \" + m.ReplyTo + \"\\r\\n\")\n\t}\n\n\tbuf.WriteString(\"MIME-Version: 1.0\\r\\n\")\n\n\tboundary := \"f46d043c813270fc6b04c2d223da\"\n\n\tif len(m.Attachments) > 0 {\n\t\tbuf.WriteString(\"Content-Type: multipart\/mixed; boundary=\" + boundary + \"\\r\\n\")\n\t\tbuf.WriteString(\"--\" + boundary + \"\\r\\n\")\n\t}\n\n\tbuf.WriteString(fmt.Sprintf(\"Content-Type: %s; charset=utf-8\\r\\n\\r\\n\", m.BodyContentType))\n\tbuf.WriteString(m.Body)\n\tbuf.WriteString(\"\\r\\n\")\n\n\tif len(m.Attachments) > 0 {\n\t\tfor _, attachment := range m.Attachments {\n\t\t\tbuf.WriteString(\"\\r\\n\\r\\n--\" + boundary + \"\\r\\n\")\n\n\t\t\tif attachment.Inline {\n\t\t\t\tbuf.WriteString(\"Content-Type: message\/rfc822\\r\\n\")\n\t\t\t\tbuf.WriteString(\"Content-Disposition: inline; filename=\\\"\" + attachment.Filename + \"\\\"\\r\\n\\r\\n\")\n\n\t\t\t\tbuf.Write(attachment.Data)\n\t\t\t} else {\n\t\t\t\tbuf.WriteString(\"Content-Type: application\/octet-stream\\r\\n\")\n\t\t\t\tbuf.WriteString(\"Content-Transfer-Encoding: base64\\r\\n\")\n\t\t\t\tbuf.WriteString(\"Content-Disposition: attachment; filename=\\\"\" + attachment.Filename + \"\\\"\\r\\n\\r\\n\")\n\n\t\t\t\tb := make([]byte, base64.StdEncoding.EncodedLen(len(attachment.Data)))\n\t\t\t\tbase64.StdEncoding.Encode(b, attachment.Data)\n\n\t\t\t\t\/\/ write base64 content in lines of up to 76 chars\n\t\t\t\tfor i, l := 0, len(b); i < l; i++ {\n\t\t\t\t\tbuf.WriteByte(b[i])\n\t\t\t\t\tif (i+1)%76 == 0 {\n\t\t\t\t\t\tbuf.WriteString(\"\\r\\n\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbuf.WriteString(\"\\r\\n--\" + boundary)\n\t\t}\n\n\t\tbuf.WriteString(\"--\")\n\t}\n\n\treturn buf.Bytes()\n}\n\nfunc Send(addr string, auth smtp.Auth, m *Message) error {\n\treturn smtp.SendMail(addr, auth, m.From, m.Tolist(), m.Bytes())\n}\n<|endoftext|>"} {"text":"<commit_before>package registry\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/docker\/autogen\/dockerversion\"\n\t\"github.com\/docker\/docker\/pkg\/parsers\/kernel\"\n\t\"github.com\/docker\/docker\/pkg\/timeoutconn\"\n\t\"github.com\/docker\/docker\/pkg\/transport\"\n\t\"github.com\/docker\/docker\/pkg\/useragent\"\n)\n\nvar (\n\tErrAlreadyExists = errors.New(\"Image already exists\")\n\tErrDoesNotExist = errors.New(\"Image does not exist\")\n\terrLoginRequired = errors.New(\"Authentication is required.\")\n)\n\ntype TimeoutType uint32\n\nconst (\n\tNoTimeout TimeoutType = iota\n\tReceiveTimeout\n\tConnectTimeout\n)\n\n\/\/ dockerUserAgent is the User-Agent the Docker client uses to identify itself.\n\/\/ It is populated on init(), comprising version information of different components.\nvar dockerUserAgent string\n\nfunc init() {\n\thttpVersion := make([]useragent.VersionInfo, 0, 6)\n\thttpVersion = append(httpVersion, useragent.VersionInfo{\"docker\", dockerversion.VERSION})\n\thttpVersion = append(httpVersion, useragent.VersionInfo{\"go\", runtime.Version()})\n\thttpVersion = append(httpVersion, useragent.VersionInfo{\"git-commit\", dockerversion.GITCOMMIT})\n\tif kernelVersion, err := kernel.GetKernelVersion(); err == nil {\n\t\thttpVersion = append(httpVersion, useragent.VersionInfo{\"kernel\", kernelVersion.String()})\n\t}\n\thttpVersion = append(httpVersion, useragent.VersionInfo{\"os\", runtime.GOOS})\n\thttpVersion = append(httpVersion, useragent.VersionInfo{\"arch\", runtime.GOARCH})\n\n\tdockerUserAgent = useragent.AppendVersions(\"\", httpVersion...)\n}\n\ntype httpsRequestModifier struct {\n\tmu sync.Mutex\n\ttlsConfig *tls.Config\n}\n\n\/\/ DRAGONS(tiborvass): If someone wonders why do we set tlsconfig in a roundtrip,\n\/\/ it's because it's so as to match the current behavior in master: we generate the\n\/\/ certpool on every-goddam-request. It's not great, but it allows people to just put\n\/\/ the certs in \/etc\/docker\/certs.d\/...\/ and let docker \"pick it up\" immediately. Would\n\/\/ prefer an fsnotify implementation, but that was out of scope of my refactoring.\nfunc (m *httpsRequestModifier) ModifyRequest(req *http.Request) error {\n\tvar (\n\t\troots *x509.CertPool\n\t\tcerts []tls.Certificate\n\t\thostDir string\n\t)\n\n\tif req.URL.Scheme == \"https\" {\n\t\thasFile := func(files []os.FileInfo, name string) bool {\n\t\t\tfor _, f := range files {\n\t\t\t\tif f.Name() == name {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\thostDir = path.Join(os.TempDir(), \"\/docker\/certs.d\", req.URL.Host)\n\t\t} else {\n\t\t\thostDir = path.Join(\"\/etc\/docker\/certs.d\", req.URL.Host)\n\t\t}\n\t\tlogrus.Debugf(\"hostDir: %s\", hostDir)\n\t\tfs, err := ioutil.ReadDir(hostDir)\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\n\t\tfor _, f := range fs {\n\t\t\tif strings.HasSuffix(f.Name(), \".crt\") {\n\t\t\t\tif roots == nil {\n\t\t\t\t\troots = x509.NewCertPool()\n\t\t\t\t}\n\t\t\t\tlogrus.Debugf(\"crt: %s\", hostDir+\"\/\"+f.Name())\n\t\t\t\tdata, err := ioutil.ReadFile(filepath.Join(hostDir, f.Name()))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\troots.AppendCertsFromPEM(data)\n\t\t\t}\n\t\t\tif strings.HasSuffix(f.Name(), \".cert\") {\n\t\t\t\tcertName := f.Name()\n\t\t\t\tkeyName := certName[:len(certName)-5] + \".key\"\n\t\t\t\tlogrus.Debugf(\"cert: %s\", hostDir+\"\/\"+f.Name())\n\t\t\t\tif !hasFile(fs, keyName) {\n\t\t\t\t\treturn fmt.Errorf(\"Missing key %s for certificate %s\", keyName, certName)\n\t\t\t\t}\n\t\t\t\tcert, err := tls.LoadX509KeyPair(filepath.Join(hostDir, certName), path.Join(hostDir, keyName))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcerts = append(certs, cert)\n\t\t\t}\n\t\t\tif strings.HasSuffix(f.Name(), \".key\") {\n\t\t\t\tkeyName := f.Name()\n\t\t\t\tcertName := keyName[:len(keyName)-4] + \".cert\"\n\t\t\t\tlogrus.Debugf(\"key: %s\", hostDir+\"\/\"+f.Name())\n\t\t\t\tif !hasFile(fs, certName) {\n\t\t\t\t\treturn fmt.Errorf(\"Missing certificate %s for key %s\", certName, keyName)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tm.mu.Lock()\n\t\tm.tlsConfig.RootCAs = roots\n\t\tm.tlsConfig.Certificates = certs\n\t\tm.mu.Unlock()\n\t}\n\treturn nil\n}\n\nfunc NewTransport(timeout TimeoutType, secure bool) http.RoundTripper {\n\ttlsConfig := &tls.Config{\n\t\t\/\/ Avoid fallback to SSL protocols < TLS1.0\n\t\tMinVersion: tls.VersionTLS10,\n\t\tInsecureSkipVerify: !secure,\n\t}\n\n\ttr := &http.Transport{\n\t\tDisableKeepAlives: true,\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tTLSClientConfig: tlsConfig,\n\t}\n\n\tswitch timeout {\n\tcase ConnectTimeout:\n\t\ttr.Dial = func(proto string, addr string) (net.Conn, error) {\n\t\t\t\/\/ Set the connect timeout to 30 seconds to allow for slower connection\n\t\t\t\/\/ times...\n\t\t\td := net.Dialer{Timeout: 30 * time.Second, DualStack: true}\n\n\t\t\tconn, err := d.Dial(proto, addr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t\/\/ Set the recv timeout to 10 seconds\n\t\t\tconn.SetDeadline(time.Now().Add(10 * time.Second))\n\t\t\treturn conn, nil\n\t\t}\n\tcase ReceiveTimeout:\n\t\ttr.Dial = func(proto string, addr string) (net.Conn, error) {\n\t\t\td := net.Dialer{DualStack: true}\n\n\t\t\tconn, err := d.Dial(proto, addr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tconn = timeoutconn.New(conn, 1*time.Minute)\n\t\t\treturn conn, nil\n\t\t}\n\t}\n\n\tif secure {\n\t\t\/\/ note: httpsTransport also handles http transport\n\t\t\/\/ but for HTTPS, it sets up the certs\n\t\treturn transport.NewTransport(tr, &httpsRequestModifier{tlsConfig: tlsConfig})\n\t}\n\n\treturn tr\n}\n\n\/\/ DockerHeaders returns request modifiers that ensure requests have\n\/\/ the User-Agent header set to dockerUserAgent and that metaHeaders\n\/\/ are added.\nfunc DockerHeaders(metaHeaders http.Header) []transport.RequestModifier {\n\tmodifiers := []transport.RequestModifier{\n\t\ttransport.NewHeaderRequestModifier(http.Header{\"User-Agent\": []string{dockerUserAgent}}),\n\t}\n\tif metaHeaders != nil {\n\t\tmodifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders))\n\t}\n\treturn modifiers\n}\n\nfunc HTTPClient(transport http.RoundTripper) *http.Client {\n\tif transport == nil {\n\t\ttransport = NewTransport(ConnectTimeout, true)\n\t}\n\n\treturn &http.Client{\n\t\tTransport: transport,\n\t\tCheckRedirect: AddRequiredHeadersToRedirectedRequests,\n\t}\n}\n\nfunc trustedLocation(req *http.Request) bool {\n\tvar (\n\t\ttrusteds = []string{\"docker.com\", \"docker.io\"}\n\t\thostname = strings.SplitN(req.Host, \":\", 2)[0]\n\t)\n\tif req.URL.Scheme != \"https\" {\n\t\treturn false\n\t}\n\n\tfor _, trusted := range trusteds {\n\t\tif hostname == trusted || strings.HasSuffix(hostname, \".\"+trusted) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc AddRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error {\n\tif via != nil && via[0] != nil {\n\t\tif trustedLocation(req) && trustedLocation(via[0]) {\n\t\t\treq.Header = via[0].Header\n\t\t\treturn nil\n\t\t}\n\t\tfor k, v := range via[0].Header {\n\t\t\tif k != \"Authorization\" {\n\t\t\t\tfor _, vv := range v {\n\t\t\t\t\treq.Header.Add(k, vv)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Remove RC4 from the list of registry cipher suites<commit_after>package registry\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/docker\/autogen\/dockerversion\"\n\t\"github.com\/docker\/docker\/pkg\/parsers\/kernel\"\n\t\"github.com\/docker\/docker\/pkg\/timeoutconn\"\n\t\"github.com\/docker\/docker\/pkg\/tlsconfig\"\n\t\"github.com\/docker\/docker\/pkg\/transport\"\n\t\"github.com\/docker\/docker\/pkg\/useragent\"\n)\n\nvar (\n\tErrAlreadyExists = errors.New(\"Image already exists\")\n\tErrDoesNotExist = errors.New(\"Image does not exist\")\n\terrLoginRequired = errors.New(\"Authentication is required.\")\n)\n\ntype TimeoutType uint32\n\nconst (\n\tNoTimeout TimeoutType = iota\n\tReceiveTimeout\n\tConnectTimeout\n)\n\n\/\/ dockerUserAgent is the User-Agent the Docker client uses to identify itself.\n\/\/ It is populated on init(), comprising version information of different components.\nvar dockerUserAgent string\n\nfunc init() {\n\thttpVersion := make([]useragent.VersionInfo, 0, 6)\n\thttpVersion = append(httpVersion, useragent.VersionInfo{\"docker\", dockerversion.VERSION})\n\thttpVersion = append(httpVersion, useragent.VersionInfo{\"go\", runtime.Version()})\n\thttpVersion = append(httpVersion, useragent.VersionInfo{\"git-commit\", dockerversion.GITCOMMIT})\n\tif kernelVersion, err := kernel.GetKernelVersion(); err == nil {\n\t\thttpVersion = append(httpVersion, useragent.VersionInfo{\"kernel\", kernelVersion.String()})\n\t}\n\thttpVersion = append(httpVersion, useragent.VersionInfo{\"os\", runtime.GOOS})\n\thttpVersion = append(httpVersion, useragent.VersionInfo{\"arch\", runtime.GOARCH})\n\n\tdockerUserAgent = useragent.AppendVersions(\"\", httpVersion...)\n}\n\ntype httpsRequestModifier struct {\n\tmu sync.Mutex\n\ttlsConfig *tls.Config\n}\n\n\/\/ DRAGONS(tiborvass): If someone wonders why do we set tlsconfig in a roundtrip,\n\/\/ it's because it's so as to match the current behavior in master: we generate the\n\/\/ certpool on every-goddam-request. It's not great, but it allows people to just put\n\/\/ the certs in \/etc\/docker\/certs.d\/...\/ and let docker \"pick it up\" immediately. Would\n\/\/ prefer an fsnotify implementation, but that was out of scope of my refactoring.\nfunc (m *httpsRequestModifier) ModifyRequest(req *http.Request) error {\n\tvar (\n\t\troots *x509.CertPool\n\t\tcerts []tls.Certificate\n\t\thostDir string\n\t)\n\n\tif req.URL.Scheme == \"https\" {\n\t\thasFile := func(files []os.FileInfo, name string) bool {\n\t\t\tfor _, f := range files {\n\t\t\t\tif f.Name() == name {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\thostDir = path.Join(os.TempDir(), \"\/docker\/certs.d\", req.URL.Host)\n\t\t} else {\n\t\t\thostDir = path.Join(\"\/etc\/docker\/certs.d\", req.URL.Host)\n\t\t}\n\t\tlogrus.Debugf(\"hostDir: %s\", hostDir)\n\t\tfs, err := ioutil.ReadDir(hostDir)\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\n\t\tfor _, f := range fs {\n\t\t\tif strings.HasSuffix(f.Name(), \".crt\") {\n\t\t\t\tif roots == nil {\n\t\t\t\t\troots = x509.NewCertPool()\n\t\t\t\t}\n\t\t\t\tlogrus.Debugf(\"crt: %s\", hostDir+\"\/\"+f.Name())\n\t\t\t\tdata, err := ioutil.ReadFile(filepath.Join(hostDir, f.Name()))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\troots.AppendCertsFromPEM(data)\n\t\t\t}\n\t\t\tif strings.HasSuffix(f.Name(), \".cert\") {\n\t\t\t\tcertName := f.Name()\n\t\t\t\tkeyName := certName[:len(certName)-5] + \".key\"\n\t\t\t\tlogrus.Debugf(\"cert: %s\", hostDir+\"\/\"+f.Name())\n\t\t\t\tif !hasFile(fs, keyName) {\n\t\t\t\t\treturn fmt.Errorf(\"Missing key %s for certificate %s\", keyName, certName)\n\t\t\t\t}\n\t\t\t\tcert, err := tls.LoadX509KeyPair(filepath.Join(hostDir, certName), path.Join(hostDir, keyName))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcerts = append(certs, cert)\n\t\t\t}\n\t\t\tif strings.HasSuffix(f.Name(), \".key\") {\n\t\t\t\tkeyName := f.Name()\n\t\t\t\tcertName := keyName[:len(keyName)-4] + \".cert\"\n\t\t\t\tlogrus.Debugf(\"key: %s\", hostDir+\"\/\"+f.Name())\n\t\t\t\tif !hasFile(fs, certName) {\n\t\t\t\t\treturn fmt.Errorf(\"Missing certificate %s for key %s\", certName, keyName)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tm.mu.Lock()\n\t\tm.tlsConfig.RootCAs = roots\n\t\tm.tlsConfig.Certificates = certs\n\t\tm.mu.Unlock()\n\t}\n\treturn nil\n}\n\nfunc NewTransport(timeout TimeoutType, secure bool) http.RoundTripper {\n\ttlsConfig := &tls.Config{\n\t\t\/\/ Avoid fallback to SSL protocols < TLS1.0\n\t\tMinVersion: tls.VersionTLS10,\n\t\tInsecureSkipVerify: !secure,\n\t\tCipherSuites: tlsconfig.DefaultServerAcceptedCiphers,\n\t}\n\n\ttr := &http.Transport{\n\t\tDisableKeepAlives: true,\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tTLSClientConfig: tlsConfig,\n\t}\n\n\tswitch timeout {\n\tcase ConnectTimeout:\n\t\ttr.Dial = func(proto string, addr string) (net.Conn, error) {\n\t\t\t\/\/ Set the connect timeout to 30 seconds to allow for slower connection\n\t\t\t\/\/ times...\n\t\t\td := net.Dialer{Timeout: 30 * time.Second, DualStack: true}\n\n\t\t\tconn, err := d.Dial(proto, addr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t\/\/ Set the recv timeout to 10 seconds\n\t\t\tconn.SetDeadline(time.Now().Add(10 * time.Second))\n\t\t\treturn conn, nil\n\t\t}\n\tcase ReceiveTimeout:\n\t\ttr.Dial = func(proto string, addr string) (net.Conn, error) {\n\t\t\td := net.Dialer{DualStack: true}\n\n\t\t\tconn, err := d.Dial(proto, addr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tconn = timeoutconn.New(conn, 1*time.Minute)\n\t\t\treturn conn, nil\n\t\t}\n\t}\n\n\tif secure {\n\t\t\/\/ note: httpsTransport also handles http transport\n\t\t\/\/ but for HTTPS, it sets up the certs\n\t\treturn transport.NewTransport(tr, &httpsRequestModifier{tlsConfig: tlsConfig})\n\t}\n\n\treturn tr\n}\n\n\/\/ DockerHeaders returns request modifiers that ensure requests have\n\/\/ the User-Agent header set to dockerUserAgent and that metaHeaders\n\/\/ are added.\nfunc DockerHeaders(metaHeaders http.Header) []transport.RequestModifier {\n\tmodifiers := []transport.RequestModifier{\n\t\ttransport.NewHeaderRequestModifier(http.Header{\"User-Agent\": []string{dockerUserAgent}}),\n\t}\n\tif metaHeaders != nil {\n\t\tmodifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders))\n\t}\n\treturn modifiers\n}\n\nfunc HTTPClient(transport http.RoundTripper) *http.Client {\n\tif transport == nil {\n\t\ttransport = NewTransport(ConnectTimeout, true)\n\t}\n\n\treturn &http.Client{\n\t\tTransport: transport,\n\t\tCheckRedirect: AddRequiredHeadersToRedirectedRequests,\n\t}\n}\n\nfunc trustedLocation(req *http.Request) bool {\n\tvar (\n\t\ttrusteds = []string{\"docker.com\", \"docker.io\"}\n\t\thostname = strings.SplitN(req.Host, \":\", 2)[0]\n\t)\n\tif req.URL.Scheme != \"https\" {\n\t\treturn false\n\t}\n\n\tfor _, trusted := range trusteds {\n\t\tif hostname == trusted || strings.HasSuffix(hostname, \".\"+trusted) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc AddRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error {\n\tif via != nil && via[0] != nil {\n\t\tif trustedLocation(req) && trustedLocation(via[0]) {\n\t\t\treq.Header = via[0].Header\n\t\t\treturn nil\n\t\t}\n\t\tfor k, v := range via[0].Header {\n\t\t\tif k != \"Authorization\" {\n\t\t\t\tfor _, vv := range v {\n\t\t\t\t\treq.Header.Add(k, vv)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package tests_test\n\nimport (\n\t\"io\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\tv1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\t\"kubevirt.io\/client-go\/kubecli\"\n\n\t\"kubevirt.io\/kubevirt\/tests\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nconst (\n\t\/\/ Define relevant k8s versions\n\trelevantk8sVer = \"1.16.2\"\n\n\t\/\/ Define a timeout for read opeartions in order to prevent test hanging\n\treadTimeout = 1 * time.Minute\n\tprocessWaitTime = 10 * time.Second\n\n\tbufferSize = 1024\n)\n\nvar _ = Describe(\"[rfe_id:3423][crit:high][vendor:cnv-qe@redhat.com][level:component]VmWatch\", func() {\n\ttests.FlagParse()\n\n\tvirtCli, err := kubecli.GetKubevirtClient()\n\ttests.PanicOnError(err)\n\n\ttype vmStatus struct {\n\t\tname,\n\t\tage,\n\t\trunning,\n\t\tvolume string\n\t}\n\n\ttype vmiStatus struct {\n\t\tname,\n\t\tage,\n\t\tphase,\n\t\tip,\n\t\tnode string\n\t}\n\n\tnewVMStatus := func(fields []string) *vmStatus {\n\t\tflen := len(fields)\n\t\tstat := &vmStatus{}\n\n\t\tswitch {\n\t\tcase flen > 3:\n\t\t\tstat.volume = fields[3]\n\t\t\tfallthrough\n\t\tcase flen > 2:\n\t\t\tstat.running = fields[2]\n\t\t\tfallthrough\n\t\tcase flen > 1:\n\t\t\tstat.age = fields[1]\n\t\t\tfallthrough\n\t\tcase flen > 0:\n\t\t\tstat.name = fields[0]\n\t\t}\n\n\t\treturn stat\n\t}\n\n\tnewVMIStatus := func(fields []string) *vmiStatus {\n\t\tflen := len(fields)\n\t\tstat := &vmiStatus{}\n\n\t\tswitch {\n\t\tcase flen > 4:\n\t\t\tstat.node = fields[4]\n\t\t\tfallthrough\n\t\tcase flen > 3:\n\t\t\tstat.ip = fields[3]\n\t\t\tfallthrough\n\t\tcase flen > 2:\n\t\t\tstat.phase = fields[2]\n\t\t\tfallthrough\n\t\tcase flen > 1:\n\t\t\tstat.age = fields[1]\n\t\t\tfallthrough\n\t\tcase flen > 0:\n\t\t\tstat.name = fields[0]\n\t\t}\n\n\t\treturn stat\n\t}\n\n\t\/\/ Fail the test if stderr has something to read\n\tfailOnError := func(rc io.ReadCloser) {\n\t\tdefer GinkgoRecover()\n\n\t\tbuf := make([]byte, bufferSize)\n\n\t\tn, err := rc.Read(buf)\n\n\t\tif err != nil && n > 0 {\n\t\t\trc.Close()\n\t\t\tFail(string(buf[:n]))\n\t\t}\n\t}\n\n\t\/\/ Reads from stdin until a newline character is found\n\treadLine := func(rc io.ReadCloser, timeout time.Duration) string {\n\t\tlineChan := make(chan string)\n\n\t\tgo func() {\n\t\t\tdefer GinkgoRecover()\n\n\t\t\tvar line strings.Builder\n\t\t\tbuf := make([]byte, 1)\n\t\t\tdefer close(lineChan)\n\n\t\t\tfor {\n\t\t\t\tn, err := rc.Read(buf)\n\n\t\t\t\tif err != nil && err != io.EOF && !strings.Contains(err.Error(), \"file already closed\") {\n\t\t\t\t\tFail(err.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif n > 0 {\n\t\t\t\t\tif buf[0] != '\\n' {\n\t\t\t\t\t\tline.WriteByte(buf[0])\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tlineChan <- line.String()\n\t\t}()\n\n\t\tselect {\n\t\tcase line := <-lineChan:\n\t\t\treturn line\n\t\tcase <-time.After(timeout):\n\t\t\terr := rc.Close()\n\t\t\tExpect(err).ToNot(HaveOccurred(), \"stdout should have been closed properly\")\n\n\t\t\tFail(\"Timeout reached on read operation\")\n\n\t\t\treturn \"\"\n\t\t}\n\t}\n\n\t\/\/ Reads VM status from the given pipe (stdin in this case) and\n\t\/\/ returns a new status.\n\t\/\/ if old_status is non-nil, the function will read status lines until\n\t\/\/ new_status.running != old_status.running in order to skip duplicated status lines\n\treadVMStatus := func(rc io.ReadCloser, old_status *vmStatus, timeout time.Duration) *vmStatus {\n\t\tnew_stat := newVMStatus(strings.Fields(readLine(rc, timeout)))\n\n\t\tfor old_status != nil && new_stat.running == old_status.running {\n\t\t\tnew_stat = newVMStatus(strings.Fields(readLine(rc, timeout)))\n\t\t}\n\n\t\treturn new_stat\n\t}\n\n\t\/\/ Reads VMI status from the given pipe (stdin in this case) and\n\t\/\/ returns a new status.\n\t\/\/ if old_status is non-nil, the function will read status lines until\n\t\/\/ new_status.phase != old_status.phase in order to skip duplicated lines\n\treadVMIStatus := func(rc io.ReadCloser, old_status *vmiStatus, timeout time.Duration) *vmiStatus {\n\t\tnewStat := newVMIStatus(strings.Fields(readLine(rc, timeout)))\n\n\t\tfor old_status != nil && newStat.phase == old_status.phase {\n\t\t\tnewStat = newVMIStatus(strings.Fields(readLine(rc, timeout)))\n\t\t}\n\n\t\treturn newStat\n\t}\n\n\t\/\/ Create a command with output\/error redirection.\n\t\/\/ Returns (cmd, stdout, stderr)\n\tcreateCommandWithNSAndRedirect := func(namespace, cmdName string, args ...string) (*exec.Cmd, io.ReadCloser, io.ReadCloser) {\n\t\tcmdName, cmd, err := tests.CreateCommandWithNS(namespace, cmdName, args...)\n\n\t\tExpect(cmdName).ToNot(Equal(\"\"))\n\t\tExpect(cmd).ToNot(BeNil())\n\t\tExpect(err).ToNot(HaveOccurred(), \"Command should have been created with proper kubectl\/oc arguments\")\n\n\t\t\/\/ Output redirection\n\t\tstdOut, err := cmd.StdoutPipe()\n\t\tExpect(err).ToNot(HaveOccurred(), \"stdout should have been redirected\")\n\t\tExpect(stdOut).ToNot(BeNil())\n\n\t\tstdErr, err := cmd.StderrPipe()\n\t\tExpect(err).ToNot(HaveOccurred(), \"stderr should have been redirected\")\n\t\tExpect(stdErr).ToNot(BeNil())\n\n\t\treturn cmd, stdOut, stdErr\n\t}\n\n\tBeforeEach(func() {\n\t\ttests.SkipIfVersionBelow(\"Printing format for `kubectl get -w` on custom resources is only relevant for 1.16.2+\", relevantk8sVer)\n\t})\n\n\tIt(\"[test_id:3468]Should update vm status with the proper columns using 'kubectl get vm -w'\", func() {\n\t\tBy(\"Creating a new VM spec\")\n\t\tvm := tests.NewRandomVMWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskCirros))\n\t\tExpect(vm).ToNot(BeNil())\n\n\t\tBy(\"Setting up the kubectl command\")\n\t\tcmd, stdout, stderr :=\n\t\t\tcreateCommandWithNSAndRedirect(vm.ObjectMeta.Namespace, tests.GetK8sCmdClient(), \"get\", \"vm\", \"-w\")\n\t\tExpect(cmd).ToNot(BeNil())\n\n\t\terr = cmd.Start()\n\t\tExpect(err).ToNot(HaveOccurred(), \"Command should have started successfully\")\n\n\t\tdefer cmd.Process.Kill()\n\n\t\ttime.Sleep(processWaitTime)\n\n\t\tgo failOnError(stderr)\n\n\t\tBy(\"Applying the VM to the cluster\")\n\t\tvm, err := virtCli.VirtualMachine(vm.ObjectMeta.Namespace).Create(vm)\n\t\tExpect(err).ToNot(HaveOccurred(), \"VM should have been added to the cluster\")\n\n\t\t\/\/ Read column titles\n\t\tvmStatus := readVMStatus(stdout, nil, readTimeout)\n\t\tExpect(vmStatus.name).To(Equal(\"NAME\"))\n\t\tExpect(vmStatus.age).To(Equal(\"AGE\"))\n\t\tExpect(vmStatus.running).To(Equal(\"RUNNING\"))\n\t\tExpect(vmStatus.volume).To(Equal(\"VOLUME\"))\n\n\t\t\/\/ Read first status of the vm\n\t\tvmStatus = readVMStatus(stdout, vmStatus, readTimeout)\n\t\tExpect(vmStatus.name).To(Equal(vm.Name))\n\t\tExpect(vmStatus.running).To(Equal(\"false\"))\n\n\t\tBy(\"Starting the VM\")\n\t\tvm = tests.StartVirtualMachine(vm)\n\n\t\tvmStatus = readVMStatus(stdout, vmStatus, readTimeout)\n\t\tExpect(vmStatus.running).To(Equal(\"true\"))\n\n\t\tBy(\"Restarting the VM\")\n\t\terr = virtCli.VirtualMachine(vm.ObjectMeta.Namespace).Restart(vm.ObjectMeta.Name)\n\t\tExpect(err).ToNot(HaveOccurred(), \"VM should have been restarted\")\n\n\t\tvmStatus = readVMStatus(stdout, nil, readTimeout)\n\t\tExpect(vmStatus.running).To(Equal(\"true\"))\n\n\t\tBy(\"Stopping the VM\")\n\t\tvm = tests.StopVirtualMachine(vm)\n\n\t\tvmStatus = readVMStatus(stdout, vmStatus, readTimeout)\n\t\tExpect(vmStatus.running).To(Equal(\"false\"))\n\n\t\tBy(\"Deleting the VM\")\n\t\terr = virtCli.VirtualMachine(vm.ObjectMeta.Namespace).Delete(vm.ObjectMeta.Name, &v1.DeleteOptions{})\n\t\tExpect(err).ToNot(HaveOccurred(), \"VM should have been deleted from the cluster\")\n\t})\n\n\tIt(\"[test_id:3466]Should update vmi status with the proper columns using 'kubectl get vmi -w'\", func() {\n\t\tBy(\"Creating a random VMI spec\")\n\t\tvm := tests.NewRandomVMWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskCirros))\n\t\tExpect(vm).ToNot(BeNil())\n\n\t\tBy(\"Setting up the kubectl command\")\n\t\tcmd, stdout, stderr :=\n\t\t\tcreateCommandWithNSAndRedirect(vm.ObjectMeta.Namespace, tests.GetK8sCmdClient(), \"get\", \"vmi\", \"-w\")\n\t\tExpect(cmd).ToNot(BeNil())\n\n\t\terr = cmd.Start()\n\t\tExpect(err).ToNot(HaveOccurred(), \"Command should have stared successfully\")\n\n\t\tdefer cmd.Process.Kill()\n\n\t\tgo failOnError(stderr)\n\n\t\ttime.Sleep(processWaitTime)\n\n\t\tBy(\"Applying vmi to the cluster\")\n\t\tvm, err = virtCli.VirtualMachine(vm.ObjectMeta.Namespace).Create(vm)\n\t\tExpect(err).ToNot(HaveOccurred(), \"VMI should have been added to the cluster\")\n\n\t\t\/\/ Start a VMI\n\t\tvm = tests.StartVirtualMachine(vm)\n\n\t\t\/\/ Read the column titles\n\t\tvmiStatus := readVMIStatus(stdout, nil, readTimeout)\n\t\tExpect(vmiStatus.name).To(Equal(\"NAME\"))\n\t\tExpect(vmiStatus.age).To(Equal(\"AGE\"))\n\t\tExpect(vmiStatus.phase).To(Equal(\"PHASE\"))\n\t\tExpect(vmiStatus.ip).To(Equal(\"IP\"))\n\t\tExpect(vmiStatus.node).To(Equal(\"NODENAME\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Pending\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Scheduling\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Scheduled\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Running\"))\n\n\t\t\/\/ Restart the VMI\n\t\terr = virtCli.VirtualMachine(vm.ObjectMeta.Namespace).Restart(vm.ObjectMeta.Name)\n\t\tExpect(err).ToNot(HaveOccurred(), \"VMI should have been restarted\")\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Failed\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Pending\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Scheduling\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Scheduled\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Running\"))\n\t})\n})\n<commit_msg>Increased process wait time<commit_after>package tests_test\n\nimport (\n\t\"io\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\tv1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\t\"kubevirt.io\/client-go\/kubecli\"\n\n\t\"kubevirt.io\/kubevirt\/tests\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nconst (\n\t\/\/ Define relevant k8s versions\n\trelevantk8sVer = \"1.16.2\"\n\n\t\/\/ Define a timeout for read opeartions in order to prevent test hanging\n\treadTimeout = 1 * time.Minute\n\tprocessWaitTime = 1 * time.Minute\n\n\tbufferSize = 1024\n)\n\nvar _ = Describe(\"[rfe_id:3423][crit:high][vendor:cnv-qe@redhat.com][level:component]VmWatch\", func() {\n\ttests.FlagParse()\n\n\tvirtCli, err := kubecli.GetKubevirtClient()\n\ttests.PanicOnError(err)\n\n\ttype vmStatus struct {\n\t\tname,\n\t\tage,\n\t\trunning,\n\t\tvolume string\n\t}\n\n\ttype vmiStatus struct {\n\t\tname,\n\t\tage,\n\t\tphase,\n\t\tip,\n\t\tnode string\n\t}\n\n\tnewVMStatus := func(fields []string) *vmStatus {\n\t\tflen := len(fields)\n\t\tstat := &vmStatus{}\n\n\t\tswitch {\n\t\tcase flen > 3:\n\t\t\tstat.volume = fields[3]\n\t\t\tfallthrough\n\t\tcase flen > 2:\n\t\t\tstat.running = fields[2]\n\t\t\tfallthrough\n\t\tcase flen > 1:\n\t\t\tstat.age = fields[1]\n\t\t\tfallthrough\n\t\tcase flen > 0:\n\t\t\tstat.name = fields[0]\n\t\t}\n\n\t\treturn stat\n\t}\n\n\tnewVMIStatus := func(fields []string) *vmiStatus {\n\t\tflen := len(fields)\n\t\tstat := &vmiStatus{}\n\n\t\tswitch {\n\t\tcase flen > 4:\n\t\t\tstat.node = fields[4]\n\t\t\tfallthrough\n\t\tcase flen > 3:\n\t\t\tstat.ip = fields[3]\n\t\t\tfallthrough\n\t\tcase flen > 2:\n\t\t\tstat.phase = fields[2]\n\t\t\tfallthrough\n\t\tcase flen > 1:\n\t\t\tstat.age = fields[1]\n\t\t\tfallthrough\n\t\tcase flen > 0:\n\t\t\tstat.name = fields[0]\n\t\t}\n\n\t\treturn stat\n\t}\n\n\t\/\/ Fail the test if stderr has something to read\n\tfailOnError := func(rc io.ReadCloser) {\n\t\tdefer GinkgoRecover()\n\n\t\tbuf := make([]byte, bufferSize)\n\n\t\tn, err := rc.Read(buf)\n\n\t\tif err != nil && n > 0 {\n\t\t\trc.Close()\n\t\t\tFail(string(buf[:n]))\n\t\t}\n\t}\n\n\t\/\/ Reads from stdin until a newline character is found\n\treadLine := func(rc io.ReadCloser, timeout time.Duration) string {\n\t\tlineChan := make(chan string)\n\n\t\tgo func() {\n\t\t\tdefer GinkgoRecover()\n\n\t\t\tvar line strings.Builder\n\t\t\tbuf := make([]byte, 1)\n\t\t\tdefer close(lineChan)\n\n\t\t\tfor {\n\t\t\t\tn, err := rc.Read(buf)\n\n\t\t\t\tif err != nil && err != io.EOF && !strings.Contains(err.Error(), \"file already closed\") {\n\t\t\t\t\tFail(err.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif n > 0 {\n\t\t\t\t\tif buf[0] != '\\n' {\n\t\t\t\t\t\tline.WriteByte(buf[0])\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tlineChan <- line.String()\n\t\t}()\n\n\t\tselect {\n\t\tcase line := <-lineChan:\n\t\t\treturn line\n\t\tcase <-time.After(timeout):\n\t\t\terr := rc.Close()\n\t\t\tExpect(err).ToNot(HaveOccurred(), \"stdout should have been closed properly\")\n\n\t\t\tFail(\"Timeout reached on read operation\")\n\n\t\t\treturn \"\"\n\t\t}\n\t}\n\n\t\/\/ Reads VM status from the given pipe (stdin in this case) and\n\t\/\/ returns a new status.\n\t\/\/ if old_status is non-nil, the function will read status lines until\n\t\/\/ new_status.running != old_status.running in order to skip duplicated status lines\n\treadVMStatus := func(rc io.ReadCloser, old_status *vmStatus, timeout time.Duration) *vmStatus {\n\t\tnew_stat := newVMStatus(strings.Fields(readLine(rc, timeout)))\n\n\t\tfor old_status != nil && new_stat.running == old_status.running {\n\t\t\tnew_stat = newVMStatus(strings.Fields(readLine(rc, timeout)))\n\t\t}\n\n\t\treturn new_stat\n\t}\n\n\t\/\/ Reads VMI status from the given pipe (stdin in this case) and\n\t\/\/ returns a new status.\n\t\/\/ if old_status is non-nil, the function will read status lines until\n\t\/\/ new_status.phase != old_status.phase in order to skip duplicated lines\n\treadVMIStatus := func(rc io.ReadCloser, old_status *vmiStatus, timeout time.Duration) *vmiStatus {\n\t\tnewStat := newVMIStatus(strings.Fields(readLine(rc, timeout)))\n\n\t\tfor old_status != nil && newStat.phase == old_status.phase {\n\t\t\tnewStat = newVMIStatus(strings.Fields(readLine(rc, timeout)))\n\t\t}\n\n\t\treturn newStat\n\t}\n\n\t\/\/ Create a command with output\/error redirection.\n\t\/\/ Returns (cmd, stdout, stderr)\n\tcreateCommandWithNSAndRedirect := func(namespace, cmdName string, args ...string) (*exec.Cmd, io.ReadCloser, io.ReadCloser) {\n\t\tcmdName, cmd, err := tests.CreateCommandWithNS(namespace, cmdName, args...)\n\n\t\tExpect(cmdName).ToNot(Equal(\"\"))\n\t\tExpect(cmd).ToNot(BeNil())\n\t\tExpect(err).ToNot(HaveOccurred(), \"Command should have been created with proper kubectl\/oc arguments\")\n\n\t\t\/\/ Output redirection\n\t\tstdOut, err := cmd.StdoutPipe()\n\t\tExpect(err).ToNot(HaveOccurred(), \"stdout should have been redirected\")\n\t\tExpect(stdOut).ToNot(BeNil())\n\n\t\tstdErr, err := cmd.StderrPipe()\n\t\tExpect(err).ToNot(HaveOccurred(), \"stderr should have been redirected\")\n\t\tExpect(stdErr).ToNot(BeNil())\n\n\t\treturn cmd, stdOut, stdErr\n\t}\n\n\tBeforeEach(func() {\n\t\ttests.SkipIfVersionBelow(\"Printing format for `kubectl get -w` on custom resources is only relevant for 1.16.2+\", relevantk8sVer)\n\t})\n\n\tIt(\"[test_id:3468]Should update vm status with the proper columns using 'kubectl get vm -w'\", func() {\n\t\tBy(\"Creating a new VM spec\")\n\t\tvm := tests.NewRandomVMWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskCirros))\n\t\tExpect(vm).ToNot(BeNil())\n\n\t\tBy(\"Setting up the kubectl command\")\n\t\tcmd, stdout, stderr :=\n\t\t\tcreateCommandWithNSAndRedirect(vm.ObjectMeta.Namespace, tests.GetK8sCmdClient(), \"get\", \"vm\", \"-w\")\n\t\tExpect(cmd).ToNot(BeNil())\n\n\t\terr = cmd.Start()\n\t\tExpect(err).ToNot(HaveOccurred(), \"Command should have started successfully\")\n\n\t\tdefer cmd.Process.Kill()\n\n\t\ttime.Sleep(processWaitTime)\n\n\t\tgo failOnError(stderr)\n\n\t\tBy(\"Applying the VM to the cluster\")\n\t\tvm, err := virtCli.VirtualMachine(vm.ObjectMeta.Namespace).Create(vm)\n\t\tExpect(err).ToNot(HaveOccurred(), \"VM should have been added to the cluster\")\n\n\t\t\/\/ Read column titles\n\t\tvmStatus := readVMStatus(stdout, nil, readTimeout)\n\t\tExpect(vmStatus.name).To(Equal(\"NAME\"))\n\t\tExpect(vmStatus.age).To(Equal(\"AGE\"))\n\t\tExpect(vmStatus.running).To(Equal(\"RUNNING\"))\n\t\tExpect(vmStatus.volume).To(Equal(\"VOLUME\"))\n\n\t\t\/\/ Read first status of the vm\n\t\tvmStatus = readVMStatus(stdout, vmStatus, readTimeout)\n\t\tExpect(vmStatus.name).To(Equal(vm.Name))\n\t\tExpect(vmStatus.running).To(Equal(\"false\"))\n\n\t\tBy(\"Starting the VM\")\n\t\tvm = tests.StartVirtualMachine(vm)\n\n\t\tvmStatus = readVMStatus(stdout, vmStatus, readTimeout)\n\t\tExpect(vmStatus.running).To(Equal(\"true\"))\n\n\t\tBy(\"Restarting the VM\")\n\t\terr = virtCli.VirtualMachine(vm.ObjectMeta.Namespace).Restart(vm.ObjectMeta.Name)\n\t\tExpect(err).ToNot(HaveOccurred(), \"VM should have been restarted\")\n\n\t\tvmStatus = readVMStatus(stdout, nil, readTimeout)\n\t\tExpect(vmStatus.running).To(Equal(\"true\"))\n\n\t\tBy(\"Stopping the VM\")\n\t\tvm = tests.StopVirtualMachine(vm)\n\n\t\tvmStatus = readVMStatus(stdout, vmStatus, readTimeout)\n\t\tExpect(vmStatus.running).To(Equal(\"false\"))\n\n\t\tBy(\"Deleting the VM\")\n\t\terr = virtCli.VirtualMachine(vm.ObjectMeta.Namespace).Delete(vm.ObjectMeta.Name, &v1.DeleteOptions{})\n\t\tExpect(err).ToNot(HaveOccurred(), \"VM should have been deleted from the cluster\")\n\t})\n\n\tIt(\"[test_id:3466]Should update vmi status with the proper columns using 'kubectl get vmi -w'\", func() {\n\t\tBy(\"Creating a random VMI spec\")\n\t\tvm := tests.NewRandomVMWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskCirros))\n\t\tExpect(vm).ToNot(BeNil())\n\n\t\tBy(\"Setting up the kubectl command\")\n\t\tcmd, stdout, stderr :=\n\t\t\tcreateCommandWithNSAndRedirect(vm.ObjectMeta.Namespace, tests.GetK8sCmdClient(), \"get\", \"vmi\", \"-w\")\n\t\tExpect(cmd).ToNot(BeNil())\n\n\t\terr = cmd.Start()\n\t\tExpect(err).ToNot(HaveOccurred(), \"Command should have stared successfully\")\n\n\t\tdefer cmd.Process.Kill()\n\n\t\tgo failOnError(stderr)\n\n\t\ttime.Sleep(processWaitTime)\n\n\t\tBy(\"Applying vmi to the cluster\")\n\t\tvm, err = virtCli.VirtualMachine(vm.ObjectMeta.Namespace).Create(vm)\n\t\tExpect(err).ToNot(HaveOccurred(), \"VMI should have been added to the cluster\")\n\n\t\t\/\/ Start a VMI\n\t\tvm = tests.StartVirtualMachine(vm)\n\n\t\t\/\/ Read the column titles\n\t\tvmiStatus := readVMIStatus(stdout, nil, readTimeout)\n\t\tExpect(vmiStatus.name).To(Equal(\"NAME\"))\n\t\tExpect(vmiStatus.age).To(Equal(\"AGE\"))\n\t\tExpect(vmiStatus.phase).To(Equal(\"PHASE\"))\n\t\tExpect(vmiStatus.ip).To(Equal(\"IP\"))\n\t\tExpect(vmiStatus.node).To(Equal(\"NODENAME\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Pending\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Scheduling\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Scheduled\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Running\"))\n\n\t\t\/\/ Restart the VMI\n\t\terr = virtCli.VirtualMachine(vm.ObjectMeta.Namespace).Restart(vm.ObjectMeta.Name)\n\t\tExpect(err).ToNot(HaveOccurred(), \"VMI should have been restarted\")\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Failed\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Pending\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Scheduling\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Scheduled\"))\n\n\t\tvmiStatus = readVMIStatus(stdout, vmiStatus, readTimeout)\n\t\tExpect(vmiStatus.phase).To(Equal(\"Running\"))\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ package uroot contains various functions that might be needed more than\n\/\/ one place.\npackage uroot\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nconst PATH = \"\/bin:\/ubin:\/buildbin:\/usr\/local\/bin\"\n\n\/\/ TODO: make this a map so it's easier to find dups.\ntype dir struct {\n\tname string\n\tmode os.FileMode\n}\n\ntype file struct {\n\tcontents string\n\tmode os.FileMode\n}\n\n\/\/ TODO: make this a map so it's easier to find dups.\ntype dev struct {\n\tname string\n\tmode os.FileMode\n\tmagic int\n\thowmany int\n}\ntype mount struct {\n\tsource string\n\ttarget string\n\tfstype string\n\tflags uintptr\n\topts string\n}\n\nvar (\n\tEnvs []string\n\tenv = map[string]string{\n\t\t\"LD_LIBRARY_PATH\": \"\/usr\/local\/lib\",\n\t\t\"GOROOT\": \"\/go\",\n\t\t\"GOPATH\": \"\/\",\n\t\t\"CGO_ENABLED\": \"0\",\n\t}\n\n\tdirs = []dir{\n\t\t{name: \"\/proc\", mode: os.FileMode(0555)},\n\t\t{name: \"\/sys\", mode: os.FileMode(0555)},\n\t\t{name: \"\/buildbin\", mode: os.FileMode(0777)},\n\t\t{name: \"\/ubin\", mode: os.FileMode(0777)},\n\t\t{name: \"\/tmp\", mode: os.FileMode(0777)},\n\t\t{name: \"\/env\", mode: os.FileMode(0777)},\n\t\t{name: \"\/etc\", mode: os.FileMode(0777)},\n\t\t{name: \"\/tcz\", mode: os.FileMode(0777)},\n\t\t{name: \"\/dev\", mode: os.FileMode(0777)},\n\t\t{name: \"\/lib\", mode: os.FileMode(0777)},\n\t\t{name: \"\/usr\/lib\", mode: os.FileMode(0777)},\n\t\t{name: \"\/go\/pkg\/linux_amd64\", mode: os.FileMode(0777)},\n\t}\n\tdevs = []dev{\n\t\t\/\/ chicken and egg: these need to be there before you start. So, sadly,\n\t\t\/\/ we will always need dev.cpio. \n\t\t\/\/{name: \"\/dev\/null\", mode: os.FileMode(0660) | 020000, magic: 0x0103},\n\t\t\/\/{name: \"\/dev\/console\", mode: os.FileMode(0660) | 020000, magic: 0x0501},\n\t}\n\tnamespace = []mount{\n\t\t{source: \"proc\", target: \"\/proc\", fstype: \"proc\", flags: syscall.MS_MGC_VAL | syscall.MS_RDONLY, opts: \"\"},\n\t\t{source: \"sys\", target: \"\/sys\", fstype: \"sysfs\", flags: syscall.MS_MGC_VAL | syscall.MS_RDONLY, opts: \"\"},\n\t}\n\n\tfiles = map[string] file {\n\t\t\"\/etc\/resolv.conf\": {contents: `nameserver 8.8.8.8`, mode: os.FileMode(0644)},\n\t}\n)\n\n\/\/ build the root file system. \nfunc Rootfs() {\n\t\/\/ Pick some reasonable values in the (unlikely!) even that Uname fails.\n\tuname := \"linux\"\n\tmach := \"x86_64\"\n\t\/\/ There are three possible places for go:\n\t\/\/ The first is in \/go\/bin\/$OS_$ARCH\n\t\/\/ The second is in \/go\/bin [why they still use this path is anyone's guess]\n\t\/\/ The third is in \/go\/pkg\/tool\/$OS_$ARCH\n\tif u, err := Uname(); err != nil {\n\t\tlog.Printf(\"uroot.Utsname fails: %v, so assume %v_%v\\n\", uname, mach)\n\t} else {\n\t\t\/\/ Sadly, go and the OS disagree on case.\n\t\tuname = strings.ToLower(u.Sysname)\n\t\tmach = strings.ToLower(u.Machine)\n\t\t\/\/ Yes, we really have to do this stupid thing.\n\t\tif mach[0:3] == \"arm\" {\n\t\t\tmach = \"arm\"\n\t\t}\n\t}\n\tenv[\"PATH\"] = fmt.Sprintf(\"\/go\/bin\/%s_%s:\/go\/bin:\/go\/pkg\/tool\/%s_%s:%v\", uname, mach, uname, mach, PATH)\n\n\tfor k, v := range env {\n\t\tos.Setenv(k, v)\n\t\tEnvs = append(Envs, k+\"=\"+v)\n\t}\n\n\tfor _, m := range dirs {\n\t\tif err := os.MkdirAll(m.name, m.mode); err != nil {\n\t\t\tlog.Printf(\"mkdir :%s: mode %o: %v\\n\", m.name, m.mode, err)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tfor _, d := range devs {\n\t\tsyscall.Unlink(d.name)\n\t\tif err := syscall.Mknod(d.name, uint32(d.mode), d.magic); err != nil {\n\t\t\tlog.Printf(\"mknod :%s: mode %o: magic: %v: %v\\n\", d.name, d.mode, d.magic, err)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tfor _, m := range namespace {\n\t\tif err := syscall.Mount(m.source, m.target, m.fstype, m.flags, m.opts); err != nil {\n\t\t\tlog.Printf(\"Mount :%s: on :%s: type :%s: flags %x: %v\\n\", m.source, m.target, m.fstype, m.flags, m.opts, err)\n\t\t}\n\n\t}\n\n\tfor name, m := range files {\n\t\tif err := ioutil.WriteFile(name, []byte(m.contents), m.mode); err != nil {\n\t\t\tlog.Printf(\"Error writeing %v: %v\", name, err)\n\t\t}\n\t}\n\n\t\/\/ only in case of emergency.\n\tif false {\n\t\tif err := filepath.Walk(\"\/\", func(name string, fi os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\" WALK FAIL%v: %v\\n\", name, err)\n\t\t\t\t\/\/ That's ok, sometimes things are not there.\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tfmt.Printf(\"%v\\n\", name)\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\t\tlog.Printf(\"WALK fails %v\\n\", err)\n\t\t}\n\t}\n}\n<commit_msg>Simple fixes at uroot\/root.go, log.Printf missing args and gofmt<commit_after>\/\/ package uroot contains various functions that might be needed more than\n\/\/ one place.\npackage uroot\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nconst PATH = \"\/bin:\/ubin:\/buildbin:\/usr\/local\/bin\"\n\n\/\/ TODO: make this a map so it's easier to find dups.\ntype dir struct {\n\tname string\n\tmode os.FileMode\n}\n\ntype file struct {\n\tcontents string\n\tmode os.FileMode\n}\n\n\/\/ TODO: make this a map so it's easier to find dups.\ntype dev struct {\n\tname string\n\tmode os.FileMode\n\tmagic int\n\thowmany int\n}\ntype mount struct {\n\tsource string\n\ttarget string\n\tfstype string\n\tflags uintptr\n\topts string\n}\n\nvar (\n\tEnvs []string\n\tenv = map[string]string{\n\t\t\"LD_LIBRARY_PATH\": \"\/usr\/local\/lib\",\n\t\t\"GOROOT\": \"\/go\",\n\t\t\"GOPATH\": \"\/\",\n\t\t\"CGO_ENABLED\": \"0\",\n\t}\n\n\tdirs = []dir{\n\t\t{name: \"\/proc\", mode: os.FileMode(0555)},\n\t\t{name: \"\/sys\", mode: os.FileMode(0555)},\n\t\t{name: \"\/buildbin\", mode: os.FileMode(0777)},\n\t\t{name: \"\/ubin\", mode: os.FileMode(0777)},\n\t\t{name: \"\/tmp\", mode: os.FileMode(0777)},\n\t\t{name: \"\/env\", mode: os.FileMode(0777)},\n\t\t{name: \"\/etc\", mode: os.FileMode(0777)},\n\t\t{name: \"\/tcz\", mode: os.FileMode(0777)},\n\t\t{name: \"\/dev\", mode: os.FileMode(0777)},\n\t\t{name: \"\/lib\", mode: os.FileMode(0777)},\n\t\t{name: \"\/usr\/lib\", mode: os.FileMode(0777)},\n\t\t{name: \"\/go\/pkg\/linux_amd64\", mode: os.FileMode(0777)},\n\t}\n\tdevs = []dev{\n\t\/\/ chicken and egg: these need to be there before you start. So, sadly,\n\t\/\/ we will always need dev.cpio.\n\t\/\/{name: \"\/dev\/null\", mode: os.FileMode(0660) | 020000, magic: 0x0103},\n\t\/\/{name: \"\/dev\/console\", mode: os.FileMode(0660) | 020000, magic: 0x0501},\n\t}\n\tnamespace = []mount{\n\t\t{source: \"proc\", target: \"\/proc\", fstype: \"proc\", flags: syscall.MS_MGC_VAL | syscall.MS_RDONLY, opts: \"\"},\n\t\t{source: \"sys\", target: \"\/sys\", fstype: \"sysfs\", flags: syscall.MS_MGC_VAL | syscall.MS_RDONLY, opts: \"\"},\n\t}\n\n\tfiles = map[string]file{\n\t\t\"\/etc\/resolv.conf\": {contents: `nameserver 8.8.8.8`, mode: os.FileMode(0644)},\n\t}\n)\n\n\/\/ build the root file system.\nfunc Rootfs() {\n\t\/\/ Pick some reasonable values in the (unlikely!) even that Uname fails.\n\tuname := \"linux\"\n\tmach := \"x86_64\"\n\t\/\/ There are three possible places for go:\n\t\/\/ The first is in \/go\/bin\/$OS_$ARCH\n\t\/\/ The second is in \/go\/bin [why they still use this path is anyone's guess]\n\t\/\/ The third is in \/go\/pkg\/tool\/$OS_$ARCH\n\tif u, err := Uname(); err != nil {\n\t\tlog.Printf(\"uroot.Utsname fails: %v, so assume %v_%v\\n\", err, uname, mach)\n\t} else {\n\t\t\/\/ Sadly, go and the OS disagree on case.\n\t\tuname = strings.ToLower(u.Sysname)\n\t\tmach = strings.ToLower(u.Machine)\n\t\t\/\/ Yes, we really have to do this stupid thing.\n\t\tif mach[0:3] == \"arm\" {\n\t\t\tmach = \"arm\"\n\t\t}\n\t}\n\tenv[\"PATH\"] = fmt.Sprintf(\"\/go\/bin\/%s_%s:\/go\/bin:\/go\/pkg\/tool\/%s_%s:%v\", uname, mach, uname, mach, PATH)\n\n\tfor k, v := range env {\n\t\tos.Setenv(k, v)\n\t\tEnvs = append(Envs, k+\"=\"+v)\n\t}\n\n\tfor _, m := range dirs {\n\t\tif err := os.MkdirAll(m.name, m.mode); err != nil {\n\t\t\tlog.Printf(\"mkdir :%s: mode %o: %v\\n\", m.name, m.mode, err)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tfor _, d := range devs {\n\t\tsyscall.Unlink(d.name)\n\t\tif err := syscall.Mknod(d.name, uint32(d.mode), d.magic); err != nil {\n\t\t\tlog.Printf(\"mknod :%s: mode %o: magic: %v: %v\\n\", d.name, d.mode, d.magic, err)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tfor _, m := range namespace {\n\t\tif err := syscall.Mount(m.source, m.target, m.fstype, m.flags, m.opts); err != nil {\n\t\t\tlog.Printf(\"Mount :%s: on :%s: type :%s: flags %x opts: %s: %v\\n\", m.source, m.target, m.fstype, m.flags, m.opts, err)\n\t\t}\n\n\t}\n\n\tfor name, m := range files {\n\t\tif err := ioutil.WriteFile(name, []byte(m.contents), m.mode); err != nil {\n\t\t\tlog.Printf(\"Error writeing %v: %v\", name, err)\n\t\t}\n\t}\n\n\t\/\/ only in case of emergency.\n\tif false {\n\t\tif err := filepath.Walk(\"\/\", func(name string, fi os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\" WALK FAIL%v: %v\\n\", name, err)\n\t\t\t\t\/\/ That's ok, sometimes things are not there.\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tfmt.Printf(\"%v\\n\", name)\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\t\tlog.Printf(\"WALK fails %v\\n\", err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package thumbnailer\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n)\n\n\/\/ func Test_generateThumbnail(t *testing.T){\n\/\/\n\/\/ }\n\nfunc Test_GenerateThumbnails(t *testing.T) {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tt.Fatal(\"Failed to get the current directory: %s\", err)\n\t}\n\tfmt.Println(\"Current dir:\", pwd)\n\topt := ThumbnailOpt{Width: 100, Height: 100}\n\topts := make([]ThumbnailOpt, 0)\n\topts = append(opts, opt)\n\ttm := ThumbnailerMessage{\n\t\tSrcImage: filepath.Join(\"file:\/\/\", pwd, \"testdata\", \"pic.jpg\"),\n\t\tDstFolder: \"file:\/\/\/tmp\",\n\t\tOpts: opts}\n\tresults := tm.GenerateThumbnails()\n\tfor result := range results {\n\t\tif result.Err != nil {\n\t\t\tt.Fatal(\"An error occured while generating a thumb :\", result.Err)\n\t\t}\n\t\t\/\/ Clean up the generated thumb\n\t\tif err := os.Remove(result.Thumbnail.Path); err != nil {\n\t\t\tt.Fatal(\"Failed to delete the generated thumb:\", err)\n\t\t}\n\t}\n}\n<commit_msg>Reorganize the thumbnailer internals<commit_after>package thumbnailer\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n)\n\nfunc testThumbnailerMessage() ThumbnailerMessage {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to get the current directory: %s\", err)\n\t}\n\tfmt.Println(\"Current dir:\", pwd)\n\topt := ThumbnailOpt{Width: 100, Height: 100}\n\topts := make([]ThumbnailOpt, 0)\n\topts = append(opts, opt)\n\treturn ThumbnailerMessage{\n\t\tSrcImage: filepath.Join(\"file:\/\/\", pwd, \"testdata\", \"pic.jpg\"),\n\t\tDstFolder: \"file:\/\/\/tmp\",\n\t\tOpts: opts}\n\n}\n\nfunc Test_thumbURL(t *testing.T) {\n\texpected := \"\/tmp\/pic_s100x100.jpg\"\n\ttm := testThumbnailerMessage()\n\turl, err := tm.thumbURL(tm.Opts[0])\n\tif err != nil {\n\t\tt.Fatal(\"Failed to generate the thumbURL :\", err)\n\t}\n\tif url.Path != expected {\n\t\tt.Fatalf(\"got: %s, expected: %s\", url.Path, expected)\n\t}\n}\n\nfunc Test_generateThumbnail(t *testing.T) {\n\ttm := testThumbnailerMessage()\n\tsrc, err := tm.Open()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to open tm.SrcImage: %v\", err)\n\t}\n\ttm.generateThumbnail(src, tm.Opts[0])\n}\n\nfunc Test_GenerateThumbnails(t *testing.T) {\n\ttm := testThumbnailerMessage()\n\tresults := tm.GenerateThumbnails()\n\tfor result := range results {\n\t\tif result.Err != nil {\n\t\t\tt.Fatal(\"An error occured while generating a thumb :\", result.Err)\n\t\t}\n\t\t\/\/ Clean up the generated thumb\n\t\tif err := os.Remove(result.Thumbnail.Path); err != nil {\n\t\t\tt.Fatal(\"Failed to delete the generated thumb:\", err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/d2r2\/go-max7219\/max7219\"\n)\n\nfunc main() {\n\tmtx := max7219.NewMatrix(1)\n\terr := mtx.Open(0, 0, 7)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer mtx.Close()\n\tvar font max7219.Font\n\t\/\/ font = max7219.FontCP437\n\t\/\/ font = max7219.FontLCD\n\t\/\/ font = max7219.FontMSXRus\n\t\/\/ font = max7219.FontZXSpectrumRus\n\t\/\/ font = max7219.FontSinclair\n\t\/\/ font = max7219.FontTiny\n\t\/\/ font = max7219.FontVestaPK8000Rus\n\tfont = max7219.FontCP437\n\tmtx.SlideMessage(\"Hello world!!! Hey Beavis, let's rock!\",\n\t\tfont, true, 50*time.Millisecond)\n\ttime.Sleep(1 * time.Second)\n\tfont = max7219.FontVestaPK8000Rus\n\tmtx.SlideMessage(\"Привет мир!!! Шарик - ты балбес!!!\",\n\t\tfont, true, 50*time.Millisecond)\n\ttime.Sleep(1 * time.Second)\n}\n<commit_msg>Initial commit<commit_after>package main\n\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/d2r2\/go-max7219\/max7219\"\n)\n\nfunc main() {\n\tmtx := max7219.NewMatrix(1)\n\terr := mtx.Open(0, 0, 7)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer mtx.Close()\n\tvar font max7219.Font\n\t\/\/ font = max7219.FontCP437\n\t\/\/ font = max7219.FontLCD\n\t\/\/ font = max7219.FontMSXRus\n\t\/\/ font = max7219.FontZXSpectrumRus\n\t\/\/ font = max7219.FontSinclair\n\t\/\/ font = max7219.FontTiny\n\t\/\/ font = max7219.FontVestaPK8000Rus\n\tfont = max7219.FontCP437\n\tmtx.SlideMessage(\"Hello world!!! Hey Beavis, let's rock!\",\n\t\tfont, true, 50*time.Millisecond)\n\ttime.Sleep(1 * time.Second)\n\tfont = max7219.FontZXSpectrumRus\n\tmtx.SlideMessage(\"Привет мир!!! Шарик - ты балбес!!!\",\n\t\tfont, true, 50*time.Millisecond)\n\ttime.Sleep(1 * time.Second)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/seletskiy\/hierr\"\n\t\"github.com\/theairkit\/runcmd\"\n)\n\ntype distributedLock struct {\n\tnodes []*distributedLockNode\n\n\tfailOnError bool\n}\n\nfunc (lock *distributedLock) addNodeRunner(\n\trunner runcmd.Runner,\n\taddress address,\n) {\n\tlock.nodes = append(lock.nodes, &distributedLockNode{\n\t\taddress: address,\n\t\trunner: runner,\n\t})\n}\n\nfunc (lock *distributedLock) acquire(filename string) error {\n\tfor nodeIndex, node := range lock.nodes {\n\t\ttracef(\n\t\t\t\"%4d\/%d locking node: '%s'\",\n\t\t\tnodeIndex+1,\n\t\t\tlen(lock.nodes),\n\t\t\tnode.String(),\n\t\t)\n\n\t\terr := node.lock(filename)\n\t\tif err != nil {\n\t\t\tif lock.failOnError {\n\t\t\t\twarningf(\n\t\t\t\t\thierr.Errorf(\n\t\t\t\t\t\terr,\n\t\t\t\t\t\t`failed to acquire lock, but continuing execution`,\n\t\t\t\t\t).Error(),\n\t\t\t\t)\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tnodes := []string{}\n\t\t\tfor _, node := range lock.nodes {\n\t\t\t\tnodes = append(nodes, node.String())\n\t\t\t}\n\n\t\t\treturn hierr.Errorf(\n\t\t\t\terr,\n\t\t\t\t\"failed to lock %d nodes: %s\",\n\t\t\t\tlen(lock.nodes),\n\t\t\t\tstrings.Join(nodes, \", \"),\n\t\t\t)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (lock *distributedLock) runHeartbeats(\n\tperiod time.Duration,\n\tcanceler *sync.Cond,\n) {\n\tfor _, node := range lock.nodes {\n\t\tif node.connection != nil {\n\t\t\tgo heartbeat(period, node, canceler)\n\t\t}\n\t}\n}\n<commit_msg>reword error message about lock failure<commit_after>package main\n\nimport (\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/seletskiy\/hierr\"\n\t\"github.com\/theairkit\/runcmd\"\n)\n\ntype distributedLock struct {\n\tnodes []*distributedLockNode\n\n\tfailOnError bool\n}\n\nfunc (lock *distributedLock) addNodeRunner(\n\trunner runcmd.Runner,\n\taddress address,\n) {\n\tlock.nodes = append(lock.nodes, &distributedLockNode{\n\t\taddress: address,\n\t\trunner: runner,\n\t})\n}\n\nfunc (lock *distributedLock) acquire(filename string) error {\n\tfor nodeIndex, node := range lock.nodes {\n\t\ttracef(\n\t\t\t\"%4d\/%d locking node: '%s'\",\n\t\t\tnodeIndex+1,\n\t\t\tlen(lock.nodes),\n\t\t\tnode.String(),\n\t\t)\n\n\t\terr := node.lock(filename)\n\t\tif err != nil {\n\t\t\tif lock.failOnError {\n\t\t\t\twarningf(\n\t\t\t\t\thierr.Errorf(\n\t\t\t\t\t\terr,\n\t\t\t\t\t\t`failed to acquire lock, `+\n\t\t\t\t\t\t\t`but proceeding with execution`,\n\t\t\t\t\t).Error(),\n\t\t\t\t)\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tnodes := []string{}\n\t\t\tfor _, node := range lock.nodes {\n\t\t\t\tnodes = append(nodes, node.String())\n\t\t\t}\n\n\t\t\treturn hierr.Errorf(\n\t\t\t\terr,\n\t\t\t\t\"failed to lock %d nodes: %s\",\n\t\t\t\tlen(lock.nodes),\n\t\t\t\tstrings.Join(nodes, \", \"),\n\t\t\t)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (lock *distributedLock) runHeartbeats(\n\tperiod time.Duration,\n\tcanceler *sync.Cond,\n) {\n\tfor _, node := range lock.nodes {\n\t\tif node.connection != nil {\n\t\t\tgo heartbeat(period, node, canceler)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package response defines the how the default microservice response must look and behave like.\npackage response\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/LUSHDigital\/microservice-core-golang\/pagination\"\n)\n\n\/\/ Standard response statuses.\nconst (\n\tStatusOk = \"ok\"\n\tStatusFail = \"fail\"\n)\n\n\/\/ Responder - Responder for microservice responses.\ntype Responder interface {\n\t\/\/ ExtractData returns a particular item of data from the response.\n\tExtractData(srcKey string, dst interface{}) error\n\n\t\/\/ GetCode returns the response code.\n\tGetCode() int\n\n\t\/\/ WriteTo writes back to the network connection.\n\tWriteTo(w http.ResponseWriter) error\n}\n\n\/\/ Response - A standardised response format for a microservice.\ntype Response struct {\n\tStatus string `json:\"status\"` \/\/ Can be 'ok' or 'fail'\n\tCode int `json:\"code\"` \/\/ Any valid HTTP response code\n\tMessage string `json:\"message\"` \/\/ Any relevant message (optional)\n\tData *Data `json:\"data,omitempty\"` \/\/ Data to pass along to the response (optional)\n}\n\n\/\/ New returns a new Response for a microservice endpoint\n\/\/ This ensures that all API endpoints return data in a standardised format:\n\/\/\n\/\/ {\n\/\/ \"status\": \"ok or fail\",\n\/\/ \"code\": any HTTP response code,\n\/\/ \"message\": \"any relevant message (optional)\",\n\/\/ \"data\": {[\n\/\/ ...\n\/\/ ]}\n\/\/ }\nfunc New(code int, message string, data *Data) *Response {\n\tvar status string\n\tswitch {\n\tcase code >= http.StatusOK && code < http.StatusBadRequest:\n\t\tstatus = StatusOk\n\tdefault:\n\t\tstatus = StatusFail\n\t}\n\treturn &Response{\n\t\tCode: code,\n\t\tStatus: status,\n\t\tMessage: message,\n\t\tData: data,\n\t}\n}\n\n\/\/ DBError returns a prepared 503 Service Unavailable response.\nfunc DBError(err error) *Response {\n\treturn DBErrorf(\"\", err)\n}\n\n\/\/ DBErrorf returns a prepared 503 Service Unavailable response,\n\/\/ using the user provided formatted message.\nfunc DBErrorf(format string, err error) *Response {\n\tvar msg string\n\tswitch format {\n\tcase \"\":\n\t\tmsg = fmt.Sprintf(\"db error: %v\", err)\n\tdefault:\n\t\tmsg = fmt.Sprintf(format, err)\n\t}\n\treturn New(http.StatusInternalServerError, msg, nil)\n}\n\n\/\/ SQLError - currently only wraps DBError\n\/\/\n\/\/ Deprecated: This function has been made redundant by the more generic DBError\nfunc SQLError(err error) *Response {\n\treturn DBError(err)\n}\n\n\/\/ SQLErrorf - currently only wraps DBErrorf\n\/\/\n\/\/ Deprecated: This function has been made redundant by the more generic DBErrorf\nfunc SQLErrorf(format string, err error) *Response {\n\treturn DBErrorf(format, err)\n}\n\n\/\/ JSONError returns a prepared 422 Unprocessable Entity response if the JSON is found to\n\/\/ contain syntax errors, or invalid values for types.\nfunc JSONError(err error) *Response {\n\treturn New(http.StatusUnprocessableEntity, fmt.Sprintf(\"json error: %v\", err), nil)\n}\n\n\/\/ ParamError returns a prepared 422 Unprocessable Entity response, including the name of\n\/\/ the failing parameter in the message field of the response object.\nfunc ParamError(name string) *Response {\n\treturn New(http.StatusUnprocessableEntity, fmt.Sprintf(\"invalid or missing parameter: %v\", name), nil)\n}\n\n\/\/ ValidationError returns a prepared 422 Unprocessable Entity response, including the name of\n\/\/ the failing validation\/validator in the message field of the response object.\nfunc ValidationError(err error, name string) *Response {\n\treturn New(http.StatusUnprocessableEntity, fmt.Sprintf(\"validation error on %s: %v\", name, err), nil)\n}\n\n\/\/ NotFoundErr returns a prepared 404 Not Found response, including the message passed by the user\n\/\/ in the message field of the response object.\nfunc NotFoundErr(msg string) *Response {\n\treturn New(http.StatusNotFound, msg, nil)\n}\n\n\/\/ ConflictErr returns a prepared 409 Conflict response, including the message passed by the user\n\/\/ in the message field of the response object.\nfunc ConflictErr(msg string) *Response {\n\treturn New(http.StatusConflict, msg, nil)\n}\n\n\/\/ InternalError returns a prepared 500 Internal Server Error, including the error\n\/\/ message in the message field of the response object\nfunc InternalError(err error) *Response {\n\treturn New(http.StatusInternalServerError, fmt.Sprintf(\"internal server error: %v\", err), nil)\n}\n\n\/\/ WriteTo - pick a response writer to write the default json response to.\nfunc (r *Response) WriteTo(w http.ResponseWriter) error {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(r.Code)\n\n\t\/\/ Don't attempt to write a body for 204s.\n\tif r.Code == http.StatusNoContent {\n\t\treturn nil\n\t}\n\n\tj, err := json.Marshal(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(j)\n\treturn err\n}\n\n\/\/ ExtractData returns a particular item of data from the response.\nfunc (r *Response) ExtractData(srcKey string, dst interface{}) error {\n\tif !r.Data.Valid() {\n\t\treturn fmt.Errorf(\"invalid data provided: %v\", r.Data)\n\t}\n\tfor key, value := range r.Data.Map() {\n\t\tif key != srcKey {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Get the raw JSON just for the endpoints.\n\t\trawJSON, err := json.Marshal(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Decode the raw JSON.\n\t\tjson.Unmarshal(rawJSON, &dst)\n\t}\n\n\treturn nil\n}\n\n\/\/ GetCode returns the response code.\nfunc (r *Response) GetCode() int {\n\treturn r.Code\n}\n\n\/\/ PaginatedResponse - A paginated response format for a microservice.\ntype PaginatedResponse struct {\n\tStatus string `json:\"status\"` \/\/ Can be 'ok' or 'fail'\n\tCode int `json:\"code\"` \/\/ Any valid HTTP response code\n\tMessage string `json:\"message\"` \/\/ Any relevant message (optional)\n\tData *Data `json:\"data,omitempty\"` \/\/ Data to pass along to the response (optional)\n\tPagination *pagination.Response `json:\"pagination\"` \/\/ Pagination data\n}\n\n\/\/ NewPaginated returns a new PaginatedResponse for a microservice endpoint\nfunc NewPaginated(paginator *pagination.Paginator, code int, message string, data *Data) *PaginatedResponse {\n\tvar status string\n\tswitch {\n\tcase code >= http.StatusOK && code < http.StatusBadRequest:\n\t\tstatus = StatusOk\n\tdefault:\n\t\tstatus = StatusFail\n\t}\n\treturn &PaginatedResponse{\n\t\tCode: code,\n\t\tStatus: status,\n\t\tMessage: message,\n\t\tData: data,\n\t\tPagination: paginator.PrepareResponse(),\n\t}\n}\n\n\/\/ WriteTo - pick a response writer to write the default json response to.\nfunc (p *PaginatedResponse) WriteTo(w http.ResponseWriter) error {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(p.Code)\n\n\t\/\/ Don't attempt to write a body for 204s.\n\tif p.Code == http.StatusNoContent {\n\t\treturn nil\n\t}\n\n\tj, err := json.Marshal(p)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(j)\n\treturn err\n}\n\n\/\/ ExtractData returns a particular item of data from the response.\nfunc (p *PaginatedResponse) ExtractData(srcKey string, dst interface{}) error {\n\tif !p.Data.Valid() {\n\t\treturn fmt.Errorf(\"invalid data provided: %v\", p.Data)\n\t}\n\tfor key, value := range p.Data.Map() {\n\t\tif key != srcKey {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Get the raw JSON just for the endpoints.\n\t\trawJSON, err := json.Marshal(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Decode the raw JSON.\n\t\tjson.Unmarshal(rawJSON, &dst)\n\t}\n\n\treturn nil\n}\n\n\/\/ GetCode returns the response code.\nfunc (p *PaginatedResponse) GetCode() int {\n\treturn p.Code\n}\n\n\/\/ Data represents the collection data the the response will return to the consumer.\n\/\/ Type ends up being the name of the key containing the collection of Content\ntype Data struct {\n\tType string\n\tContent interface{}\n}\n\n\/\/ UnmarshalJSON implements the Unmarshaler interface\n\/\/ this implementation will fill the type in the case we're been provided a valid single collection\n\/\/ and set the content to the contents of said collection.\n\/\/ for every other options, it behaves like normal.\n\/\/ Despite the fact that we are not supposed to marshal without a type set,\n\/\/ this is purposefully left open to unmarshal without a collection name set, in case you may want to set it later,\n\/\/ and for interop with other systems which may not send the collection properly.\nfunc (d *Data) UnmarshalJSON(b []byte) error {\n\tif err := json.Unmarshal(b, &d.Content); err != nil {\n\t\tlog.Printf(\"cannot unmarshal data: %v\", err)\n\t}\n\n\tdata, ok := d.Content.(map[string]interface{})\n\tif !ok {\n\t\treturn nil\n\t}\n\t\/\/ count how many collections were provided\n\tvar count int\n\tfor _, value := range data {\n\t\tswitch value.(type) {\n\t\tcase map[string]interface{}, []interface{}:\n\t\t\tcount++\n\t\t}\n\t}\n\tif count > 1 {\n\t\t\/\/ we can stop there since this is not a single collection\n\t\treturn nil\n\t}\n\tfor key, value := range data {\n\t\tswitch value.(type) {\n\t\tcase map[string]interface{}, []interface{}:\n\t\t\td.Type = key\n\t\t\td.Content = data[key]\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Valid ensures the Data passed to the response is correct (it must contain a Type along with the data).\nfunc (d *Data) Valid() bool {\n\treturn d.Type != \"\"\n}\n\n\/\/ MarshalJSON implements the Marshaler interface and is there to ensure the output\n\/\/ is correct when we return data to the consumer\nfunc (d *Data) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(d.Map())\n}\n\n\/\/ Map returns a version of the data as a map\nfunc (d *Data) Map() map[string]interface{} {\n\tif !d.Valid() {\n\t\treturn nil\n\t}\n\td.Type = strings.Replace(strings.ToLower(d.Type), \" \", \"-\", -1)\n\n\treturn map[string]interface{}{\n\t\td.Type: d.Content,\n\t}\n}\n<commit_msg>Response returns 503 as intended instead of 500<commit_after>\/\/ Package response defines the how the default microservice response must look and behave like.\npackage response\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/LUSHDigital\/microservice-core-golang\/pagination\"\n)\n\n\/\/ Standard response statuses.\nconst (\n\tStatusOk = \"ok\"\n\tStatusFail = \"fail\"\n)\n\n\/\/ Responder - Responder for microservice responses.\ntype Responder interface {\n\t\/\/ ExtractData returns a particular item of data from the response.\n\tExtractData(srcKey string, dst interface{}) error\n\n\t\/\/ GetCode returns the response code.\n\tGetCode() int\n\n\t\/\/ WriteTo writes back to the network connection.\n\tWriteTo(w http.ResponseWriter) error\n}\n\n\/\/ Response - A standardised response format for a microservice.\ntype Response struct {\n\tStatus string `json:\"status\"` \/\/ Can be 'ok' or 'fail'\n\tCode int `json:\"code\"` \/\/ Any valid HTTP response code\n\tMessage string `json:\"message\"` \/\/ Any relevant message (optional)\n\tData *Data `json:\"data,omitempty\"` \/\/ Data to pass along to the response (optional)\n}\n\n\/\/ New returns a new Response for a microservice endpoint\n\/\/ This ensures that all API endpoints return data in a standardised format:\n\/\/\n\/\/ {\n\/\/ \"status\": \"ok or fail\",\n\/\/ \"code\": any HTTP response code,\n\/\/ \"message\": \"any relevant message (optional)\",\n\/\/ \"data\": {[\n\/\/ ...\n\/\/ ]}\n\/\/ }\nfunc New(code int, message string, data *Data) *Response {\n\tvar status string\n\tswitch {\n\tcase code >= http.StatusOK && code < http.StatusBadRequest:\n\t\tstatus = StatusOk\n\tdefault:\n\t\tstatus = StatusFail\n\t}\n\treturn &Response{\n\t\tCode: code,\n\t\tStatus: status,\n\t\tMessage: message,\n\t\tData: data,\n\t}\n}\n\n\/\/ DBError returns a prepared 503 Service Unavailable response.\nfunc DBError(err error) *Response {\n\treturn DBErrorf(\"\", err)\n}\n\n\/\/ DBErrorf returns a prepared 503 Service Unavailable response,\n\/\/ using the user provided formatted message.\nfunc DBErrorf(format string, err error) *Response {\n\tvar msg string\n\tswitch format {\n\tcase \"\":\n\t\tmsg = fmt.Sprintf(\"db error: %v\", err)\n\tdefault:\n\t\tmsg = fmt.Sprintf(format, err)\n\t}\n\treturn New(http.StatusServiceUnavailable, msg, nil)\n}\n\n\/\/ SQLError - currently only wraps DBError\n\/\/\n\/\/ Deprecated: This function has been made redundant by the more generic DBError\nfunc SQLError(err error) *Response {\n\treturn DBError(err)\n}\n\n\/\/ SQLErrorf - currently only wraps DBErrorf\n\/\/\n\/\/ Deprecated: This function has been made redundant by the more generic DBErrorf\nfunc SQLErrorf(format string, err error) *Response {\n\treturn DBErrorf(format, err)\n}\n\n\/\/ JSONError returns a prepared 422 Unprocessable Entity response if the JSON is found to\n\/\/ contain syntax errors, or invalid values for types.\nfunc JSONError(err error) *Response {\n\treturn New(http.StatusUnprocessableEntity, fmt.Sprintf(\"json error: %v\", err), nil)\n}\n\n\/\/ ParamError returns a prepared 422 Unprocessable Entity response, including the name of\n\/\/ the failing parameter in the message field of the response object.\nfunc ParamError(name string) *Response {\n\treturn New(http.StatusUnprocessableEntity, fmt.Sprintf(\"invalid or missing parameter: %v\", name), nil)\n}\n\n\/\/ ValidationError returns a prepared 422 Unprocessable Entity response, including the name of\n\/\/ the failing validation\/validator in the message field of the response object.\nfunc ValidationError(err error, name string) *Response {\n\treturn New(http.StatusUnprocessableEntity, fmt.Sprintf(\"validation error on %s: %v\", name, err), nil)\n}\n\n\/\/ NotFoundErr returns a prepared 404 Not Found response, including the message passed by the user\n\/\/ in the message field of the response object.\nfunc NotFoundErr(msg string) *Response {\n\treturn New(http.StatusNotFound, msg, nil)\n}\n\n\/\/ ConflictErr returns a prepared 409 Conflict response, including the message passed by the user\n\/\/ in the message field of the response object.\nfunc ConflictErr(msg string) *Response {\n\treturn New(http.StatusConflict, msg, nil)\n}\n\n\/\/ InternalError returns a prepared 500 Internal Server Error, including the error\n\/\/ message in the message field of the response object\nfunc InternalError(err error) *Response {\n\treturn New(http.StatusInternalServerError, fmt.Sprintf(\"internal server error: %v\", err), nil)\n}\n\n\/\/ WriteTo - pick a response writer to write the default json response to.\nfunc (r *Response) WriteTo(w http.ResponseWriter) error {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(r.Code)\n\n\t\/\/ Don't attempt to write a body for 204s.\n\tif r.Code == http.StatusNoContent {\n\t\treturn nil\n\t}\n\n\tj, err := json.Marshal(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(j)\n\treturn err\n}\n\n\/\/ ExtractData returns a particular item of data from the response.\nfunc (r *Response) ExtractData(srcKey string, dst interface{}) error {\n\tif !r.Data.Valid() {\n\t\treturn fmt.Errorf(\"invalid data provided: %v\", r.Data)\n\t}\n\tfor key, value := range r.Data.Map() {\n\t\tif key != srcKey {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Get the raw JSON just for the endpoints.\n\t\trawJSON, err := json.Marshal(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Decode the raw JSON.\n\t\tjson.Unmarshal(rawJSON, &dst)\n\t}\n\n\treturn nil\n}\n\n\/\/ GetCode returns the response code.\nfunc (r *Response) GetCode() int {\n\treturn r.Code\n}\n\n\/\/ PaginatedResponse - A paginated response format for a microservice.\ntype PaginatedResponse struct {\n\tStatus string `json:\"status\"` \/\/ Can be 'ok' or 'fail'\n\tCode int `json:\"code\"` \/\/ Any valid HTTP response code\n\tMessage string `json:\"message\"` \/\/ Any relevant message (optional)\n\tData *Data `json:\"data,omitempty\"` \/\/ Data to pass along to the response (optional)\n\tPagination *pagination.Response `json:\"pagination\"` \/\/ Pagination data\n}\n\n\/\/ NewPaginated returns a new PaginatedResponse for a microservice endpoint\nfunc NewPaginated(paginator *pagination.Paginator, code int, message string, data *Data) *PaginatedResponse {\n\tvar status string\n\tswitch {\n\tcase code >= http.StatusOK && code < http.StatusBadRequest:\n\t\tstatus = StatusOk\n\tdefault:\n\t\tstatus = StatusFail\n\t}\n\treturn &PaginatedResponse{\n\t\tCode: code,\n\t\tStatus: status,\n\t\tMessage: message,\n\t\tData: data,\n\t\tPagination: paginator.PrepareResponse(),\n\t}\n}\n\n\/\/ WriteTo - pick a response writer to write the default json response to.\nfunc (p *PaginatedResponse) WriteTo(w http.ResponseWriter) error {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(p.Code)\n\n\t\/\/ Don't attempt to write a body for 204s.\n\tif p.Code == http.StatusNoContent {\n\t\treturn nil\n\t}\n\n\tj, err := json.Marshal(p)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(j)\n\treturn err\n}\n\n\/\/ ExtractData returns a particular item of data from the response.\nfunc (p *PaginatedResponse) ExtractData(srcKey string, dst interface{}) error {\n\tif !p.Data.Valid() {\n\t\treturn fmt.Errorf(\"invalid data provided: %v\", p.Data)\n\t}\n\tfor key, value := range p.Data.Map() {\n\t\tif key != srcKey {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Get the raw JSON just for the endpoints.\n\t\trawJSON, err := json.Marshal(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Decode the raw JSON.\n\t\tjson.Unmarshal(rawJSON, &dst)\n\t}\n\n\treturn nil\n}\n\n\/\/ GetCode returns the response code.\nfunc (p *PaginatedResponse) GetCode() int {\n\treturn p.Code\n}\n\n\/\/ Data represents the collection data the the response will return to the consumer.\n\/\/ Type ends up being the name of the key containing the collection of Content\ntype Data struct {\n\tType string\n\tContent interface{}\n}\n\n\/\/ UnmarshalJSON implements the Unmarshaler interface\n\/\/ this implementation will fill the type in the case we're been provided a valid single collection\n\/\/ and set the content to the contents of said collection.\n\/\/ for every other options, it behaves like normal.\n\/\/ Despite the fact that we are not supposed to marshal without a type set,\n\/\/ this is purposefully left open to unmarshal without a collection name set, in case you may want to set it later,\n\/\/ and for interop with other systems which may not send the collection properly.\nfunc (d *Data) UnmarshalJSON(b []byte) error {\n\tif err := json.Unmarshal(b, &d.Content); err != nil {\n\t\tlog.Printf(\"cannot unmarshal data: %v\", err)\n\t}\n\n\tdata, ok := d.Content.(map[string]interface{})\n\tif !ok {\n\t\treturn nil\n\t}\n\t\/\/ count how many collections were provided\n\tvar count int\n\tfor _, value := range data {\n\t\tswitch value.(type) {\n\t\tcase map[string]interface{}, []interface{}:\n\t\t\tcount++\n\t\t}\n\t}\n\tif count > 1 {\n\t\t\/\/ we can stop there since this is not a single collection\n\t\treturn nil\n\t}\n\tfor key, value := range data {\n\t\tswitch value.(type) {\n\t\tcase map[string]interface{}, []interface{}:\n\t\t\td.Type = key\n\t\t\td.Content = data[key]\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Valid ensures the Data passed to the response is correct (it must contain a Type along with the data).\nfunc (d *Data) Valid() bool {\n\treturn d.Type != \"\"\n}\n\n\/\/ MarshalJSON implements the Marshaler interface and is there to ensure the output\n\/\/ is correct when we return data to the consumer\nfunc (d *Data) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(d.Map())\n}\n\n\/\/ Map returns a version of the data as a map\nfunc (d *Data) Map() map[string]interface{} {\n\tif !d.Valid() {\n\t\treturn nil\n\t}\n\td.Type = strings.Replace(strings.ToLower(d.Type), \" \", \"-\", -1)\n\n\treturn map[string]interface{}{\n\t\td.Type: d.Content,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package junos\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/Juniper\/go-netconf\/netconf\"\n\t\"io\/ioutil\"\n\t\"log\"\n)\n\n\/\/ Session holds the connection information to our Junos device.\ntype Session struct {\n\tConn *netconf.Session\n}\n\n\/\/ rollbackXML parses our rollback diff configuration.\ntype rollbackXML struct {\n\tXMLName xml.Name `xml:\"rollback-information\"`\n\tConfig string `xml:\"configuration-information>configuration-output\"`\n}\n\n\/\/ CommandXML parses our operational command responses.\ntype commandXML struct {\n\tConfig string `xml:\",innerxml\"`\n}\n\n\/\/ NewSession establishes a new connection to a Junos device that we will use\n\/\/ to run our commands against.\nfunc NewSession(host, user, password string) *Session {\n\ts, err := netconf.DialSSH(host, netconf.SSHConfigPassword(user, password))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn &Session{\n\t\tConn: s,\n\t}\n}\n\n\/\/ Commit commits the configuration.\nfunc (s *Session) Commit() error {\n\treply, err := s.Conn.Exec(rpcCommand[\"commit\"])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif reply.Ok == false {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Lock locks the candidate configuration.\nfunc (s *Session) Lock() error {\n\treply, err := s.Conn.Exec(rpcCommand[\"lock\"])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif reply.Ok == false {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Unlock unlocks the candidate configuration.\nfunc (s *Session) Unlock() error {\n\tresp, err := s.Conn.Exec(rpcCommand[\"unlock\"])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif resp.Ok == false {\n\t\tfor _, m := range resp.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Configure loads a configuration file in \"set\" format.\nfunc (s *Session) Configure(file string) error {\n\tdata, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tcommand := fmt.Sprintf(rpcCommand[\"configure-set\"], string(data))\n\treply, err := s.Conn.Exec(command)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif reply.Ok == false {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ RollbackConfig loads and commits the configuration of a given rollback or rescue state.\nfunc (s *Session) RollbackConfig(option interface{}) error {\n\tvar command string\n\tswitch option.(type) {\n\tcase int:\n\t\tcommand = fmt.Sprintf(rpcCommand[\"rollback-config\"], option)\n\tcase string:\n\t\tcommand = fmt.Sprintf(rpcCommand[\"rescue-config\"])\n\t}\n\n\treply, err := s.Conn.Exec(command)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr = s.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif reply.Ok == false {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ RollbackDiff compares the current active configuration to a given rollback configuration.\nfunc (s *Session) RollbackDiff(compare int) (string, error) {\n\trb := &rollbackXML{}\n\tcommand := fmt.Sprintf(rpcCommand[\"get-rollback-information-compare\"], compare)\n\treply, err := s.Conn.Exec(command)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif reply.Ok == false {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn \"\", errors.New(m.Message)\n\t\t}\n\t}\n\n\terr = xml.Unmarshal([]byte(reply.Data), rb)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn rb.Config, nil\n}\n\n\/\/ Command runs any operational mode command, such as \"show\" or \"request.\"\n\/\/ Format is either \"text\" or \"xml\".\nfunc (s *Session) Command(cmd, format string) (string, error) {\n\tc := &commandXML{}\n\tvar command string\n\n\tswitch format {\n\tcase \"xml\":\n\t\tcommand = fmt.Sprintf(rpcCommand[\"command-xml\"], cmd)\n\tdefault:\n\t\tcommand = fmt.Sprintf(rpcCommand[\"command\"], cmd)\n\t}\n\treply, err := s.Conn.Exec(command)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif reply.Ok == false {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn \"\", errors.New(m.Message)\n\t\t}\n\t}\n\n\terr = xml.Unmarshal([]byte(reply.Data), &c)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif c.Config == \"\" {\n\t\treturn \"No output available.\", nil\n\t}\n\n\treturn c.Config, nil\n}\n\n\/\/ Close disconnects our session to the device.\nfunc (s *Session) Close() {\n\ts.Conn.Close()\n}\n<commit_msg>Updated documentation<commit_after>package junos\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/Juniper\/go-netconf\/netconf\"\n\t\"io\/ioutil\"\n\t\"log\"\n)\n\n\/\/ Session holds the connection information to our Junos device.\ntype Session struct {\n\tConn *netconf.Session\n}\n\n\/\/ rollbackXML parses our rollback diff configuration.\ntype rollbackXML struct {\n\tXMLName xml.Name `xml:\"rollback-information\"`\n\tConfig string `xml:\"configuration-information>configuration-output\"`\n}\n\n\/\/ CommandXML parses our operational command responses.\ntype commandXML struct {\n\tConfig string `xml:\",innerxml\"`\n}\n\n\/\/ NewSession establishes a new connection to a Junos device that we will use\n\/\/ to run our commands against.\nfunc NewSession(host, user, password string) *Session {\n\ts, err := netconf.DialSSH(host, netconf.SSHConfigPassword(user, password))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn &Session{\n\t\tConn: s,\n\t}\n}\n\n\/\/ Commit commits the configuration.\nfunc (s *Session) Commit() error {\n\treply, err := s.Conn.Exec(rpcCommand[\"commit\"])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif reply.Ok == false {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Lock locks the candidate configuration.\nfunc (s *Session) Lock() error {\n\treply, err := s.Conn.Exec(rpcCommand[\"lock\"])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif reply.Ok == false {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Unlock unlocks the candidate configuration.\nfunc (s *Session) Unlock() error {\n\tresp, err := s.Conn.Exec(rpcCommand[\"unlock\"])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif resp.Ok == false {\n\t\tfor _, m := range resp.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Configure loads a given configuration file where the commands are\n\/\/ in \"set\" format.\nfunc (s *Session) Configure(file string) error {\n\tdata, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tcommand := fmt.Sprintf(rpcCommand[\"configure-set\"], string(data))\n\treply, err := s.Conn.Exec(command)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif reply.Ok == false {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ RollbackConfig loads and commits the configuration of a given rollback or rescue state.\nfunc (s *Session) RollbackConfig(option interface{}) error {\n\tvar command string\n\tswitch option.(type) {\n\tcase int:\n\t\tcommand = fmt.Sprintf(rpcCommand[\"rollback-config\"], option)\n\tcase string:\n\t\tcommand = fmt.Sprintf(rpcCommand[\"rescue-config\"])\n\t}\n\n\treply, err := s.Conn.Exec(command)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr = s.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif reply.Ok == false {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn errors.New(m.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ RollbackDiff compares the current active configuration to a given rollback configuration.\nfunc (s *Session) RollbackDiff(compare int) (string, error) {\n\trb := &rollbackXML{}\n\tcommand := fmt.Sprintf(rpcCommand[\"get-rollback-information-compare\"], compare)\n\treply, err := s.Conn.Exec(command)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif reply.Ok == false {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn \"\", errors.New(m.Message)\n\t\t}\n\t}\n\n\terr = xml.Unmarshal([]byte(reply.Data), rb)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn rb.Config, nil\n}\n\n\/\/ Command runs any operational mode command, such as \"show\" or \"request.\"\n\/\/ Format is either \"text\" or \"xml\".\nfunc (s *Session) Command(cmd, format string) (string, error) {\n\tc := &commandXML{}\n\tvar command string\n\n\tswitch format {\n\tcase \"xml\":\n\t\tcommand = fmt.Sprintf(rpcCommand[\"command-xml\"], cmd)\n\tdefault:\n\t\tcommand = fmt.Sprintf(rpcCommand[\"command\"], cmd)\n\t}\n\treply, err := s.Conn.Exec(command)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif reply.Ok == false {\n\t\tfor _, m := range reply.Errors {\n\t\t\treturn \"\", errors.New(m.Message)\n\t\t}\n\t}\n\n\terr = xml.Unmarshal([]byte(reply.Data), &c)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif c.Config == \"\" {\n\t\treturn \"No output available.\", nil\n\t}\n\n\treturn c.Config, nil\n}\n\n\/\/ Close disconnects our session to the device.\nfunc (s *Session) Close() {\n\ts.Conn.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package charm\n\nimport (\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"launchpad.net\/juju-core\/environs\/config\"\n\t\"launchpad.net\/juju-core\/log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\n\/\/ InfoResponse is sent by the charm store in response to charm-info requests.\ntype InfoResponse struct {\n\tRevision int `json:\"revision\"` \/\/ Zero is valid. Can't omitempty.\n\tSha256 string `json:\"sha256,omitempty\"`\n\tDigest string `json:\"digest,omitempty\"`\n\tErrors []string `json:\"errors,omitempty\"`\n\tWarnings []string `json:\"warnings,omitempty\"`\n}\n\n\/\/ EventResponse is sent by the charm store in response to charm-event requests.\ntype EventResponse struct {\n\tKind string `json:\"kind\"`\n\tRevision int `json:\"revision\"` \/\/ Zero is valid. Can't omitempty.\n\tDigest string `json:\"digest,omitempty\"`\n\tErrors []string `json:\"errors,omitempty\"`\n\tWarnings []string `json:\"warnings,omitempty\"`\n\tTime string `json:\"time,omitempty\"`\n}\n\n\/\/ Repository respresents a collection of charms.\ntype Repository interface {\n\tGet(curl *URL) (Charm, error)\n\tLatest(curl *URL) (int, error)\n}\n\n\/\/ NotFoundError represents an error indicating that the requested data wasn't found.\ntype NotFoundError struct {\n\tmsg string\n}\n\nfunc (e *NotFoundError) Error() string {\n\treturn e.msg\n}\n\n\/\/ CharmStore is a Repository that provides access to the public juju charm store.\ntype CharmStore struct {\n\tbaseURL string\n}\n\n\/\/var Store = &CharmStore{\"https:\/\/store.juju.ubuntu.com\"}\nvar Store = &CharmStore{\"http:\/\/localhost:8080\"}\n\n\/\/ Info returns details for a charm in the charm store.\nfunc (s *CharmStore) Info(curl *URL) (*InfoResponse, error) {\n\tkey := curl.String()\n\tresp, err := http.Get(s.baseURL + \"\/charm-info?charms=\" + url.QueryEscape(key))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinfos := make(map[string]*InfoResponse)\n\tif err = json.Unmarshal(body, &infos); err != nil {\n\t\treturn nil, err\n\t}\n\tinfo, found := infos[key]\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"charm: charm store returned response without charm %q\", key)\n\t}\n\tif len(info.Errors) == 1 && info.Errors[0] == \"entry not found\" {\n\t\treturn nil, &NotFoundError{fmt.Sprintf(\"charm not found: %s\", curl)}\n\t}\n\treturn info, nil\n}\n\n\/\/ Event returns details for a charm event in the charm store.\n\/\/\n\/\/ If digest is empty, the latest event is returned.\nfunc (s *CharmStore) Event(curl *URL, digest string) (*EventResponse, error) {\n\tkey := curl.String()\n\tquery := key\n\tif digest != \"\" {\n\t\tquery += \"@\" + digest\n\t}\n\tresp, err := http.Get(s.baseURL + \"\/charm-event?charms=\" + url.QueryEscape(query))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tevents := make(map[string]*EventResponse)\n\tif err = json.Unmarshal(body, &events); err != nil {\n\t\treturn nil, err\n\t}\n\tevent, found := events[key]\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"charm: charm store returned response without charm %q\", key)\n\t}\n\tif len(event.Errors) == 1 && event.Errors[0] == \"entry not found\" {\n\t\tif digest == \"\" {\n\t\t\treturn nil, &NotFoundError{fmt.Sprintf(\"charm event not found for %q\", curl)}\n\t\t} else {\n\t\t\treturn nil, &NotFoundError{fmt.Sprintf(\"charm event not found for %q with digest %q\", curl, digest)}\n\t\t}\n\t}\n\treturn event, nil\n}\n\n\/\/ revision returns the revision and SHA256 digest of the charm referenced by curl.\nfunc (s *CharmStore) revision(curl *URL) (revision int, digest string, err error) {\n\tinfo, err := s.Info(curl)\n\tif err != nil {\n\t\treturn 0, \"\", err\n\t}\n\tfor _, w := range info.Warnings {\n\t\tlog.Warningf(\"charm: charm store reports for %q: %s\", curl, w)\n\t}\n\tif info.Errors != nil {\n\t\treturn 0, \"\", fmt.Errorf(\"charm info errors for %q: %s\", curl, strings.Join(info.Errors, \"; \"))\n\t}\n\treturn info.Revision, info.Sha256, nil\n}\n\n\/\/ Latest returns the latest revision of the charm referenced by curl, regardless\n\/\/ of the revision set on curl itself.\nfunc (s *CharmStore) Latest(curl *URL) (int, error) {\n\trev, _, err := s.revision(curl.WithRevision(-1))\n\treturn rev, err\n}\n\n\/\/ verify returns an error unless a file exists at path with a hex-encoded\n\/\/ SHA256 matching digest.\nfunc verify(path, digest string) error {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\th := sha256.New()\n\tif _, err := io.Copy(h, f); err != nil {\n\t\treturn err\n\t}\n\tif hex.EncodeToString(h.Sum(nil)) != digest {\n\t\treturn fmt.Errorf(\"bad SHA256 of %q\", path)\n\t}\n\treturn nil\n}\n\n\/\/ Get returns the charm referenced by curl.\nfunc (s *CharmStore) Get(curl *URL) (Charm, error) {\n\tcachePath := config.JujuHomePath(\"cache\")\n\tif err := os.MkdirAll(cachePath, 0755); err != nil {\n\t\treturn nil, err\n\t}\n\trev, digest, err := s.revision(curl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif curl.Revision == -1 {\n\t\tcurl = curl.WithRevision(rev)\n\t} else if curl.Revision != rev {\n\t\treturn nil, fmt.Errorf(\"charm: store returned charm with wrong revision for %q\", curl.String())\n\t}\n\tpath := filepath.Join(cachePath, Quote(curl.String())+\".charm\")\n\tif verify(path, digest) != nil {\n\t\tresp, err := http.Get(s.baseURL + \"\/charm\/\" + url.QueryEscape(curl.Path()))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tf, err := ioutil.TempFile(cachePath, \"charm-download\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdlPath := f.Name()\n\t\t_, err = io.Copy(f, resp.Body)\n\t\tif cerr := f.Close(); err == nil {\n\t\t\terr = cerr\n\t\t}\n\t\tif err != nil {\n\t\t\tos.Remove(dlPath)\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := os.Rename(dlPath, path); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err := verify(path, digest); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ReadBundle(path)\n}\n\n\/\/ LocalRepository represents a local directory containing subdirectories\n\/\/ named after an Ubuntu series, each of which contains charms targeted for\n\/\/ that series. For example:\n\/\/\n\/\/ \/path\/to\/repository\/oneiric\/mongodb\/\n\/\/ \/path\/to\/repository\/precise\/mongodb.charm\n\/\/ \/path\/to\/repository\/precise\/wordpress\/\ntype LocalRepository struct {\n\tPath string\n}\n\n\/\/ Latest returns the latest revision of the charm referenced by curl, regardless\n\/\/ of the revision set on curl itself.\nfunc (r *LocalRepository) Latest(curl *URL) (int, error) {\n\tch, err := r.Get(curl.WithRevision(-1))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn ch.Revision(), nil\n}\n\nfunc repoNotFound(path string) error {\n\treturn &NotFoundError{fmt.Sprintf(\"no repository found at %q\", path)}\n}\n\nfunc charmNotFound(curl *URL, repoPath string) error {\n\treturn &NotFoundError{fmt.Sprintf(\"no charms found matching %q in %s\", curl, repoPath)}\n}\n\nfunc mightBeCharm(info os.FileInfo) bool {\n\tif info.IsDir() {\n\t\treturn !strings.HasPrefix(info.Name(), \".\")\n\t}\n\treturn strings.HasSuffix(info.Name(), \".charm\")\n}\n\n\/\/ Get returns a charm matching curl, if one exists. If curl has a revision of\n\/\/ -1, it returns the latest charm that matches curl. If multiple candidates\n\/\/ satisfy the foregoing, the first one encountered will be returned.\nfunc (r *LocalRepository) Get(curl *URL) (Charm, error) {\n\tif curl.Schema != \"local\" {\n\t\treturn nil, fmt.Errorf(\"local repository got URL with non-local schema: %q\", curl)\n\t}\n\tinfo, err := os.Stat(r.Path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\terr = repoNotFound(r.Path)\n\t\t}\n\t\treturn nil, err\n\t}\n\tif !info.IsDir() {\n\t\treturn nil, repoNotFound(r.Path)\n\t}\n\tpath := filepath.Join(r.Path, curl.Series)\n\tinfos, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\treturn nil, charmNotFound(curl, r.Path)\n\t}\n\tvar latest Charm\n\tfor _, info := range infos {\n\t\tif !mightBeCharm(info) {\n\t\t\tcontinue\n\t\t}\n\t\tchPath := filepath.Join(path, info.Name())\n\t\tif ch, err := Read(chPath); err != nil {\n\t\t\tlog.Warningf(\"charm: failed to load charm at %q: %s\", chPath, err)\n\t\t} else if ch.Meta().Name == curl.Name {\n\t\t\tif ch.Revision() == curl.Revision {\n\t\t\t\treturn ch, nil\n\t\t\t}\n\t\t\tif latest == nil || ch.Revision() > latest.Revision() {\n\t\t\t\tlatest = ch\n\t\t\t}\n\t\t}\n\t}\n\tif curl.Revision == -1 && latest != nil {\n\t\treturn latest, nil\n\t}\n\treturn nil, charmNotFound(curl, r.Path)\n}\n<commit_msg>Fix debugging left-over.<commit_after>package charm\n\nimport (\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"launchpad.net\/juju-core\/environs\/config\"\n\t\"launchpad.net\/juju-core\/log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\n\/\/ InfoResponse is sent by the charm store in response to charm-info requests.\ntype InfoResponse struct {\n\tRevision int `json:\"revision\"` \/\/ Zero is valid. Can't omitempty.\n\tSha256 string `json:\"sha256,omitempty\"`\n\tDigest string `json:\"digest,omitempty\"`\n\tErrors []string `json:\"errors,omitempty\"`\n\tWarnings []string `json:\"warnings,omitempty\"`\n}\n\n\/\/ EventResponse is sent by the charm store in response to charm-event requests.\ntype EventResponse struct {\n\tKind string `json:\"kind\"`\n\tRevision int `json:\"revision\"` \/\/ Zero is valid. Can't omitempty.\n\tDigest string `json:\"digest,omitempty\"`\n\tErrors []string `json:\"errors,omitempty\"`\n\tWarnings []string `json:\"warnings,omitempty\"`\n\tTime string `json:\"time,omitempty\"`\n}\n\n\/\/ Repository respresents a collection of charms.\ntype Repository interface {\n\tGet(curl *URL) (Charm, error)\n\tLatest(curl *URL) (int, error)\n}\n\n\/\/ NotFoundError represents an error indicating that the requested data wasn't found.\ntype NotFoundError struct {\n\tmsg string\n}\n\nfunc (e *NotFoundError) Error() string {\n\treturn e.msg\n}\n\n\/\/ CharmStore is a Repository that provides access to the public juju charm store.\ntype CharmStore struct {\n\tbaseURL string\n}\n\nvar Store = &CharmStore{\"https:\/\/store.juju.ubuntu.com\"}\n\n\/\/ Info returns details for a charm in the charm store.\nfunc (s *CharmStore) Info(curl *URL) (*InfoResponse, error) {\n\tkey := curl.String()\n\tresp, err := http.Get(s.baseURL + \"\/charm-info?charms=\" + url.QueryEscape(key))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinfos := make(map[string]*InfoResponse)\n\tif err = json.Unmarshal(body, &infos); err != nil {\n\t\treturn nil, err\n\t}\n\tinfo, found := infos[key]\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"charm: charm store returned response without charm %q\", key)\n\t}\n\tif len(info.Errors) == 1 && info.Errors[0] == \"entry not found\" {\n\t\treturn nil, &NotFoundError{fmt.Sprintf(\"charm not found: %s\", curl)}\n\t}\n\treturn info, nil\n}\n\n\/\/ Event returns details for a charm event in the charm store.\n\/\/\n\/\/ If digest is empty, the latest event is returned.\nfunc (s *CharmStore) Event(curl *URL, digest string) (*EventResponse, error) {\n\tkey := curl.String()\n\tquery := key\n\tif digest != \"\" {\n\t\tquery += \"@\" + digest\n\t}\n\tresp, err := http.Get(s.baseURL + \"\/charm-event?charms=\" + url.QueryEscape(query))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tevents := make(map[string]*EventResponse)\n\tif err = json.Unmarshal(body, &events); err != nil {\n\t\treturn nil, err\n\t}\n\tevent, found := events[key]\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"charm: charm store returned response without charm %q\", key)\n\t}\n\tif len(event.Errors) == 1 && event.Errors[0] == \"entry not found\" {\n\t\tif digest == \"\" {\n\t\t\treturn nil, &NotFoundError{fmt.Sprintf(\"charm event not found for %q\", curl)}\n\t\t} else {\n\t\t\treturn nil, &NotFoundError{fmt.Sprintf(\"charm event not found for %q with digest %q\", curl, digest)}\n\t\t}\n\t}\n\treturn event, nil\n}\n\n\/\/ revision returns the revision and SHA256 digest of the charm referenced by curl.\nfunc (s *CharmStore) revision(curl *URL) (revision int, digest string, err error) {\n\tinfo, err := s.Info(curl)\n\tif err != nil {\n\t\treturn 0, \"\", err\n\t}\n\tfor _, w := range info.Warnings {\n\t\tlog.Warningf(\"charm: charm store reports for %q: %s\", curl, w)\n\t}\n\tif info.Errors != nil {\n\t\treturn 0, \"\", fmt.Errorf(\"charm info errors for %q: %s\", curl, strings.Join(info.Errors, \"; \"))\n\t}\n\treturn info.Revision, info.Sha256, nil\n}\n\n\/\/ Latest returns the latest revision of the charm referenced by curl, regardless\n\/\/ of the revision set on curl itself.\nfunc (s *CharmStore) Latest(curl *URL) (int, error) {\n\trev, _, err := s.revision(curl.WithRevision(-1))\n\treturn rev, err\n}\n\n\/\/ verify returns an error unless a file exists at path with a hex-encoded\n\/\/ SHA256 matching digest.\nfunc verify(path, digest string) error {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\th := sha256.New()\n\tif _, err := io.Copy(h, f); err != nil {\n\t\treturn err\n\t}\n\tif hex.EncodeToString(h.Sum(nil)) != digest {\n\t\treturn fmt.Errorf(\"bad SHA256 of %q\", path)\n\t}\n\treturn nil\n}\n\n\/\/ Get returns the charm referenced by curl.\nfunc (s *CharmStore) Get(curl *URL) (Charm, error) {\n\tcachePath := config.JujuHomePath(\"cache\")\n\tif err := os.MkdirAll(cachePath, 0755); err != nil {\n\t\treturn nil, err\n\t}\n\trev, digest, err := s.revision(curl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif curl.Revision == -1 {\n\t\tcurl = curl.WithRevision(rev)\n\t} else if curl.Revision != rev {\n\t\treturn nil, fmt.Errorf(\"charm: store returned charm with wrong revision for %q\", curl.String())\n\t}\n\tpath := filepath.Join(cachePath, Quote(curl.String())+\".charm\")\n\tif verify(path, digest) != nil {\n\t\tresp, err := http.Get(s.baseURL + \"\/charm\/\" + url.QueryEscape(curl.Path()))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tf, err := ioutil.TempFile(cachePath, \"charm-download\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdlPath := f.Name()\n\t\t_, err = io.Copy(f, resp.Body)\n\t\tif cerr := f.Close(); err == nil {\n\t\t\terr = cerr\n\t\t}\n\t\tif err != nil {\n\t\t\tos.Remove(dlPath)\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := os.Rename(dlPath, path); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err := verify(path, digest); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ReadBundle(path)\n}\n\n\/\/ LocalRepository represents a local directory containing subdirectories\n\/\/ named after an Ubuntu series, each of which contains charms targeted for\n\/\/ that series. For example:\n\/\/\n\/\/ \/path\/to\/repository\/oneiric\/mongodb\/\n\/\/ \/path\/to\/repository\/precise\/mongodb.charm\n\/\/ \/path\/to\/repository\/precise\/wordpress\/\ntype LocalRepository struct {\n\tPath string\n}\n\n\/\/ Latest returns the latest revision of the charm referenced by curl, regardless\n\/\/ of the revision set on curl itself.\nfunc (r *LocalRepository) Latest(curl *URL) (int, error) {\n\tch, err := r.Get(curl.WithRevision(-1))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn ch.Revision(), nil\n}\n\nfunc repoNotFound(path string) error {\n\treturn &NotFoundError{fmt.Sprintf(\"no repository found at %q\", path)}\n}\n\nfunc charmNotFound(curl *URL, repoPath string) error {\n\treturn &NotFoundError{fmt.Sprintf(\"no charms found matching %q in %s\", curl, repoPath)}\n}\n\nfunc mightBeCharm(info os.FileInfo) bool {\n\tif info.IsDir() {\n\t\treturn !strings.HasPrefix(info.Name(), \".\")\n\t}\n\treturn strings.HasSuffix(info.Name(), \".charm\")\n}\n\n\/\/ Get returns a charm matching curl, if one exists. If curl has a revision of\n\/\/ -1, it returns the latest charm that matches curl. If multiple candidates\n\/\/ satisfy the foregoing, the first one encountered will be returned.\nfunc (r *LocalRepository) Get(curl *URL) (Charm, error) {\n\tif curl.Schema != \"local\" {\n\t\treturn nil, fmt.Errorf(\"local repository got URL with non-local schema: %q\", curl)\n\t}\n\tinfo, err := os.Stat(r.Path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\terr = repoNotFound(r.Path)\n\t\t}\n\t\treturn nil, err\n\t}\n\tif !info.IsDir() {\n\t\treturn nil, repoNotFound(r.Path)\n\t}\n\tpath := filepath.Join(r.Path, curl.Series)\n\tinfos, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\treturn nil, charmNotFound(curl, r.Path)\n\t}\n\tvar latest Charm\n\tfor _, info := range infos {\n\t\tif !mightBeCharm(info) {\n\t\t\tcontinue\n\t\t}\n\t\tchPath := filepath.Join(path, info.Name())\n\t\tif ch, err := Read(chPath); err != nil {\n\t\t\tlog.Warningf(\"charm: failed to load charm at %q: %s\", chPath, err)\n\t\t} else if ch.Meta().Name == curl.Name {\n\t\t\tif ch.Revision() == curl.Revision {\n\t\t\t\treturn ch, nil\n\t\t\t}\n\t\t\tif latest == nil || ch.Revision() > latest.Revision() {\n\t\t\t\tlatest = ch\n\t\t\t}\n\t\t}\n\t}\n\tif curl.Revision == -1 && latest != nil {\n\t\treturn latest, nil\n\t}\n\treturn nil, charmNotFound(curl, r.Path)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Response package provides the core functionality of handling\n\/\/ the client connection, chunked response and other features\npackage response\nimport (\n\t\"net\"\n\t\"log\"\n\t\"net\/http\"\n\t\"bufio\"\n\t\"fmt\"\n\t\"os\"\n\t\"crypto\/md5\"\n\t\"io\"\n\t\"strconv\"\n\t\"encoding\/json\"\n\t\"encoding\/hex\"\n\t\"time\"\n\theader \"github.com\/DronRathore\/goexpress\/header\"\n\tcookie \"github.com\/DronRathore\/goexpress\/cookie\"\n\tutils \"github.com\/DronRathore\/go-mimes\"\n)\n\ntype NextFunc func(NextFunc)\n\n\/\/ Response Structure extends basic http.ResponseWriter interface\n\/\/ It encapsulates Header and Cookie class for direct access\ntype Response struct{\n\tresponse http.ResponseWriter\n\tHeader *header.Header\n\tCookie *cookie.Cookie\n\tLocals map[string]interface{}\n\twriter *bufio.ReadWriter\n\tconnection net.Conn\n\tended bool\n\tprops *map[string]interface{}\n\turl string\n\tmethod string\n}\n\/\/ Intialise the Response Struct, requires the Hijacked buffer,\n\/\/ connection and Response interface\nfunc (res *Response) Init(rs http.ResponseWriter, r *http.Request, w *bufio.ReadWriter, con net.Conn, props *map[string]interface{}) *Response{\n\tres.response = rs\n\tres.writer = w\n\tres.connection = con\n\tres.Header = &header.Header{}\n\tres.Header.Init(rs, r, w)\n\tres.Cookie = &cookie.Cookie{}\n\tres.Cookie.Init(res, r)\n\tres.Locals = make(map[string]interface{})\n\tres.url = r.URL.Path\n\tres.ended = false\n\tres.props = props\n\tres.method = r.Method\n\treturn res\n}\n\n\/\/ This function is for internal Use by Cookie Struct\nfunc (res *Response) AddCookie(key string, value string){\n\tres.Header.AppendCookie(key, value)\n}\n\n\/\/ Writes a string content to the buffer and immediately flushes the same\nfunc (res *Response) Write(content string) *Response{\n\tif res.Header.BasicSent() == false && res.Header.CanSendHeader() == true {\n\t\tres.Cookie.Finish()\n\t\tif sent := res.Header.FlushHeaders(); sent == false {\n\t\t\tlog.Print(\"Failed to push headers\")\n\t\t}\n\t}\n\tvar bytes = []byte(content)\n\tres.WriteBytes(bytes)\n\treturn res\n}\n\n\/\/ Writes an array of bytes to the socket\nfunc (res *Response) WriteBytes(bytes []byte) *Response {\n\tvar chunkSize = fmt.Sprintf(\"%x\", len(bytes))\n\tres.writer.WriteString(chunkSize + \"\\r\\n\")\n\tres.writer.Write(bytes)\n\tres.writer.WriteString(\"\\r\\n\")\n\tres.writer.Flush()\n\treturn res\n}\n\nfunc (res *Response) sendContent(status int, content_type string, content []byte) {\n\tif res.Header.BasicSent() == false {\n\t\tres.Header.SetStatus(status)\n\t}\n\tif res.Header.CanSendHeader() == true {\n\t\tres.Header.Set(\"Content-Type\", content_type)\n\t\tres.Cookie.Finish()\n\t\tif sent := res.Header.FlushHeaders(); sent == false {\n\t\t\tlog.Print(\"Failed to write headers\")\n\t\t}\n\t}\n\tvar chunkSize = fmt.Sprintf(\"%x\", len(content))\n\tres.writer.WriteString(chunkSize + \"\\r\\n\")\n\tres.writer.Write(content)\n\tres.writer.WriteString(\"\\r\\n\")\n\tres.writer.Writer.Flush()\n\tres.End()\n}\n\/\/ Reads a file in buffer and writes it to the socket\n\/\/ It also checks with the existing E-Tags list\n\/\/ so as to provide caching.\nfunc (res *Response) SendFile(url string, noCache bool) bool {\n\tif len(url) == 0 {\n\t\t\/\/ no need to panic ?\n\t\treturn false\n\t}\n\tfile, err := os.OpenFile(url, os.O_RDONLY, 0644)\n\tif err != nil {\n\t\t\/\/ panic and return false\n\t\tlog.Print(\"File not found \", url, err)\n\t\tres.Header.SetStatus(404)\n\t\tres.Header.FlushHeaders()\n\t\tres.End()\n\t\treturn false\n\t}\n\tstat, err := file.Stat()\n\tif err != nil {\n\t\tlog.Print(\"Couldn't get fstat of \", url)\n\t\treturn false\n\t}\n\tif stat.IsDir() == true {\n\t\t\/\/ cannot send dir, abort\n\t\treturn false\n\t}\n\tvar modTime = stat.ModTime().Unix()\n\tvar currTime = (time.Now()).Format(time.RFC1123)\n\tvar etag = res.Header.GetRequestHeader(\"If-None-Match\")\n\tvar ext = utils.GetMimeType(url)\n\tif res.Header.CanSendHeader() {\n\t\tif ext == \"\" {\n\t\t\tres.Header.Set(\"Content-Type\", \"none\")\n\t\t} else {\n\t\t\tres.Header.Set(\"Content-Type\", ext)\n\t\t}\n\t}\n\tif noCache == false {\n\t\thasher := md5.New()\n\t\tio.WriteString(hasher, strconv.FormatInt(modTime, 10))\n\t\thash := hex.EncodeToString(hasher.Sum(nil))\n\t\tvar miss bool = true\n\t\t\/\/ do we have an etag\n\t\tif len(etag) == 1 {\n\t\t\tif etag[0] == hash {\n\t\t\t\t\/\/ its a hit!\n\t\t\t\tif res.Header.CanSendHeader() == true {\n\t\t\t\t\tres.Header.SetStatus(304)\n\t\t\t\t\tres.Header.Set(\"Cache-Control\", \"max-age=300000\")\n\t\t\t\t\tmiss = false\n\t\t\t\t} else {\n\t\t\t\t\tres.End()\n\t\t\t\t\tlog.Print(\"Cannot write header after being sent\")\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t} \/\/ a miss\n\t\t}\n\t\t\n\t\tif miss == true {\n\t\t\tif res.Header.CanSendHeader() == true {\n\t\t\t\tres.Header.Set(\"Etag\", hash)\n\t\t\t} else {\n\t\t\t\tres.End()\n\t\t\t\tlog.Print(\"Cannot write header after being flushed\")\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\tif res.Header.CanSendHeader() == true {\n\t\t\tres.Header.Set(\"Date\", currTime)\n\t\t\tres.Cookie.Finish()\n\t\t\tres.Header.FlushHeaders()\n\t\t} else {\n\t\t\tres.End()\n\t\t\tlog.Print(\"Cannot write header after being flushed\")\n\t\t\treturn false\n\t\t}\n\t\tif miss == false {\n\t\t\t\/\/ empty response for cache hit\n\t\t\tres.End()\n\t\t\treturn true\n\t\t}\n\t} else {\n\t\tres.Cookie.Finish()\n\t\tres.Header.FlushHeaders()\n\t}\n\n\tvar offset int64 = 0\n\t\/\/ async read and write\n\tvar reader NextFunc\n\tvar channel = make(chan bool)\n\treader = func(reader NextFunc){\n\t\tgo func(channel chan bool){\n\t\t\tvar data = make([]byte, 1500)\n\t\t\tn, err := file.ReadAt(data, offset)\n\t\t\tif err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tres.WriteBytes(data[:n])\n\t\t\t\t\tres.End()\n\t\t\t\t\tchannel<-true\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.Print(\"Error while reading \", url, err)\n\t\t\t\tres.End()\n\t\t\t\tchannel<-true\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tif n == 0 {\n\t\t\t\t\tres.End()\n\t\t\t\t\tchannel<-true\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tres.WriteBytes(data)\n\t\t\t\toffset = offset + int64(n)\n\t\t\t\treader(reader)\n\t\t\t\treturn\n\t\t\t}\n\t\t}(channel)\n\t}\n\treader(reader)\n\t<-channel\n\treturn true\n}\n\/\/ Send a download file to the client\nfunc (res *Response) Download(path string, file_name string) bool {\n\tif res.Header.CanSendHeader() == true {\n\t\tres.Header.Set(\"Content-Disposition\", \"attachment; filename=\\\"\" + file_name+ \"\\\"\")\n\t\treturn res.SendFile(path, false)\n\t} else {\n\t\tlog.Print(\"Cannot Send header after being flushed\")\n\t\tres.End()\n\t\treturn false\n\t}\n}\n\n\/\/ Ends a response and drops the connection with client\nfunc (res *Response) End(){\n\tres.writer.WriteString(\"0\\r\\n\\r\\n\")\n\tres.writer.Flush()\n\terr := res.connection.Close()\n\tres.ended = true\n\tif err != nil {\n\t\tlog.Print(\"Couldn't close the connection, already lost?\")\n\t} else if (*res.props)[\"log\"] == true {\n\t\tlog.Print(res.method, \" \", res.url, \" \", res.Header.StatusCode)\n\t}\n}\n\n\/\/ Redirects a request, takes the url as the Location\nfunc (res *Response) Redirect(url string) *Response{\n\tres.Header.SetStatus(301)\n\tres.Header.Set(\"Location\", url)\n\tres.Header.FlushHeaders()\n\tres.ended = true\n\tres.End()\n\treturn res\n}\n\n\/\/ An internal package use function to check the state of connection\nfunc (res *Response) HasEnded() bool{\n\treturn res.ended\n}\n\n\/\/ A helper for middlewares to get the original http.ResponseWriter\nfunc (res *Response) GetRaw () http.ResponseWriter{\n\treturn res.response\n}\n\n\/\/ A helper for middlewares to get the original net.Conn\nfunc (res *Response) GetConnection () net.Conn {\n\treturn res.connection\n}\n\n\/\/ A helper for middlewares to get the original Request buffer\nfunc (res *Response) GetBuffer () *bufio.ReadWriter {\n\treturn res.writer\n}\n\n\/\/ Send Error, takes HTTP status and a string content\nfunc (res *Response) Error (status int, str string) {\n\tres.sendContent(status, \"text\/html\", []byte(str))\n}\n\n\/\/ Send JSON response, takes interface as input\nfunc (res *Response) JSON(content interface{}){\n\toutput, err := json.Marshal(content)\n\tif err != nil {\n\t\tres.sendContent(500, \"application\/json\", []byte(\"\"))\n\t} else {\n\t\tres.sendContent(200, \"application\/json\", output)\n\t}\n}\n<commit_msg>[bug] Fix cookie flush bug on redirect<commit_after>\/\/ Response package provides the core functionality of handling\n\/\/ the client connection, chunked response and other features\npackage response\nimport (\n\t\"net\"\n\t\"log\"\n\t\"net\/http\"\n\t\"bufio\"\n\t\"fmt\"\n\t\"os\"\n\t\"crypto\/md5\"\n\t\"io\"\n\t\"strconv\"\n\t\"encoding\/json\"\n\t\"encoding\/hex\"\n\t\"time\"\n\theader \"github.com\/DronRathore\/goexpress\/header\"\n\tcookie \"github.com\/DronRathore\/goexpress\/cookie\"\n\tutils \"github.com\/DronRathore\/go-mimes\"\n)\n\ntype NextFunc func(NextFunc)\n\n\/\/ Response Structure extends basic http.ResponseWriter interface\n\/\/ It encapsulates Header and Cookie class for direct access\ntype Response struct{\n\tresponse http.ResponseWriter\n\tHeader *header.Header\n\tCookie *cookie.Cookie\n\tLocals map[string]interface{}\n\twriter *bufio.ReadWriter\n\tconnection net.Conn\n\tended bool\n\tprops *map[string]interface{}\n\turl string\n\tmethod string\n}\n\/\/ Intialise the Response Struct, requires the Hijacked buffer,\n\/\/ connection and Response interface\nfunc (res *Response) Init(rs http.ResponseWriter, r *http.Request, w *bufio.ReadWriter, con net.Conn, props *map[string]interface{}) *Response{\n\tres.response = rs\n\tres.writer = w\n\tres.connection = con\n\tres.Header = &header.Header{}\n\tres.Header.Init(rs, r, w)\n\tres.Cookie = &cookie.Cookie{}\n\tres.Cookie.Init(res, r)\n\tres.Locals = make(map[string]interface{})\n\tres.url = r.URL.Path\n\tres.ended = false\n\tres.props = props\n\tres.method = r.Method\n\treturn res\n}\n\n\/\/ This function is for internal Use by Cookie Struct\nfunc (res *Response) AddCookie(key string, value string){\n\tres.Header.AppendCookie(key, value)\n}\n\n\/\/ Writes a string content to the buffer and immediately flushes the same\nfunc (res *Response) Write(content string) *Response{\n\tif res.Header.BasicSent() == false && res.Header.CanSendHeader() == true {\n\t\tres.Cookie.Finish()\n\t\tif sent := res.Header.FlushHeaders(); sent == false {\n\t\t\tlog.Print(\"Failed to push headers\")\n\t\t}\n\t}\n\tvar bytes = []byte(content)\n\tres.WriteBytes(bytes)\n\treturn res\n}\n\n\/\/ Writes an array of bytes to the socket\nfunc (res *Response) WriteBytes(bytes []byte) *Response {\n\tvar chunkSize = fmt.Sprintf(\"%x\", len(bytes))\n\tres.writer.WriteString(chunkSize + \"\\r\\n\")\n\tres.writer.Write(bytes)\n\tres.writer.WriteString(\"\\r\\n\")\n\tres.writer.Flush()\n\treturn res\n}\n\nfunc (res *Response) sendContent(status int, content_type string, content []byte) {\n\tif res.Header.BasicSent() == false {\n\t\tres.Header.SetStatus(status)\n\t}\n\tif res.Header.CanSendHeader() == true {\n\t\tres.Header.Set(\"Content-Type\", content_type)\n\t\tres.Cookie.Finish()\n\t\tif sent := res.Header.FlushHeaders(); sent == false {\n\t\t\tlog.Print(\"Failed to write headers\")\n\t\t}\n\t}\n\tvar chunkSize = fmt.Sprintf(\"%x\", len(content))\n\tres.writer.WriteString(chunkSize + \"\\r\\n\")\n\tres.writer.Write(content)\n\tres.writer.WriteString(\"\\r\\n\")\n\tres.writer.Writer.Flush()\n\tres.End()\n}\n\/\/ Reads a file in buffer and writes it to the socket\n\/\/ It also checks with the existing E-Tags list\n\/\/ so as to provide caching.\nfunc (res *Response) SendFile(url string, noCache bool) bool {\n\tif len(url) == 0 {\n\t\t\/\/ no need to panic ?\n\t\treturn false\n\t}\n\tfile, err := os.OpenFile(url, os.O_RDONLY, 0644)\n\tif err != nil {\n\t\t\/\/ panic and return false\n\t\tlog.Print(\"File not found \", url, err)\n\t\tres.Header.SetStatus(404)\n\t\tres.Header.FlushHeaders()\n\t\tres.End()\n\t\treturn false\n\t}\n\tstat, err := file.Stat()\n\tif err != nil {\n\t\tlog.Print(\"Couldn't get fstat of \", url)\n\t\treturn false\n\t}\n\tif stat.IsDir() == true {\n\t\t\/\/ cannot send dir, abort\n\t\treturn false\n\t}\n\tvar modTime = stat.ModTime().Unix()\n\tvar currTime = (time.Now()).Format(time.RFC1123)\n\tvar etag = res.Header.GetRequestHeader(\"If-None-Match\")\n\tvar ext = utils.GetMimeType(url)\n\tif res.Header.CanSendHeader() {\n\t\tif ext == \"\" {\n\t\t\tres.Header.Set(\"Content-Type\", \"none\")\n\t\t} else {\n\t\t\tres.Header.Set(\"Content-Type\", ext)\n\t\t}\n\t}\n\tif noCache == false {\n\t\thasher := md5.New()\n\t\tio.WriteString(hasher, strconv.FormatInt(modTime, 10))\n\t\thash := hex.EncodeToString(hasher.Sum(nil))\n\t\tvar miss bool = true\n\t\t\/\/ do we have an etag\n\t\tif len(etag) == 1 {\n\t\t\tif etag[0] == hash {\n\t\t\t\t\/\/ its a hit!\n\t\t\t\tif res.Header.CanSendHeader() == true {\n\t\t\t\t\tres.Header.SetStatus(304)\n\t\t\t\t\tres.Header.Set(\"Cache-Control\", \"max-age=300000\")\n\t\t\t\t\tmiss = false\n\t\t\t\t} else {\n\t\t\t\t\tres.End()\n\t\t\t\t\tlog.Print(\"Cannot write header after being sent\")\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t} \/\/ a miss\n\t\t}\n\t\t\n\t\tif miss == true {\n\t\t\tif res.Header.CanSendHeader() == true {\n\t\t\t\tres.Header.Set(\"Etag\", hash)\n\t\t\t} else {\n\t\t\t\tres.End()\n\t\t\t\tlog.Print(\"Cannot write header after being flushed\")\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\tif res.Header.CanSendHeader() == true {\n\t\t\tres.Header.Set(\"Date\", currTime)\n\t\t\tres.Cookie.Finish()\n\t\t\tres.Header.FlushHeaders()\n\t\t} else {\n\t\t\tres.End()\n\t\t\tlog.Print(\"Cannot write header after being flushed\")\n\t\t\treturn false\n\t\t}\n\t\tif miss == false {\n\t\t\t\/\/ empty response for cache hit\n\t\t\tres.End()\n\t\t\treturn true\n\t\t}\n\t} else {\n\t\tres.Cookie.Finish()\n\t\tres.Header.FlushHeaders()\n\t}\n\n\tvar offset int64 = 0\n\t\/\/ async read and write\n\tvar reader NextFunc\n\tvar channel = make(chan bool)\n\treader = func(reader NextFunc){\n\t\tgo func(channel chan bool){\n\t\t\tvar data = make([]byte, 1500)\n\t\t\tn, err := file.ReadAt(data, offset)\n\t\t\tif err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tres.WriteBytes(data[:n])\n\t\t\t\t\tres.End()\n\t\t\t\t\tchannel<-true\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.Print(\"Error while reading \", url, err)\n\t\t\t\tres.End()\n\t\t\t\tchannel<-true\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tif n == 0 {\n\t\t\t\t\tres.End()\n\t\t\t\t\tchannel<-true\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tres.WriteBytes(data)\n\t\t\t\toffset = offset + int64(n)\n\t\t\t\treader(reader)\n\t\t\t\treturn\n\t\t\t}\n\t\t}(channel)\n\t}\n\treader(reader)\n\t<-channel\n\treturn true\n}\n\/\/ Send a download file to the client\nfunc (res *Response) Download(path string, file_name string) bool {\n\tif res.Header.CanSendHeader() == true {\n\t\tres.Header.Set(\"Content-Disposition\", \"attachment; filename=\\\"\" + file_name+ \"\\\"\")\n\t\treturn res.SendFile(path, false)\n\t} else {\n\t\tlog.Print(\"Cannot Send header after being flushed\")\n\t\tres.End()\n\t\treturn false\n\t}\n}\n\n\/\/ Ends a response and drops the connection with client\nfunc (res *Response) End(){\n\tres.writer.WriteString(\"0\\r\\n\\r\\n\")\n\tres.writer.Flush()\n\terr := res.connection.Close()\n\tres.ended = true\n\tif err != nil {\n\t\tlog.Print(\"Couldn't close the connection, already lost?\")\n\t} else if (*res.props)[\"log\"] == true {\n\t\tlog.Print(res.method, \" \", res.url, \" \", res.Header.StatusCode)\n\t}\n}\n\n\/\/ Redirects a request, takes the url as the Location\nfunc (res *Response) Redirect(url string) *Response{\n\tres.Header.SetStatus(301)\n\tres.Header.Set(\"Location\", url)\n\tres.Cookie.Finish()\n\tres.Header.FlushHeaders()\n\tres.ended = true\n\tres.End()\n\treturn res\n}\n\n\/\/ An internal package use function to check the state of connection\nfunc (res *Response) HasEnded() bool{\n\treturn res.ended\n}\n\n\/\/ A helper for middlewares to get the original http.ResponseWriter\nfunc (res *Response) GetRaw () http.ResponseWriter{\n\treturn res.response\n}\n\n\/\/ A helper for middlewares to get the original net.Conn\nfunc (res *Response) GetConnection () net.Conn {\n\treturn res.connection\n}\n\n\/\/ A helper for middlewares to get the original Request buffer\nfunc (res *Response) GetBuffer () *bufio.ReadWriter {\n\treturn res.writer\n}\n\n\/\/ Send Error, takes HTTP status and a string content\nfunc (res *Response) Error (status int, str string) {\n\tres.sendContent(status, \"text\/html\", []byte(str))\n}\n\n\/\/ Send JSON response, takes interface as input\nfunc (res *Response) JSON(content interface{}){\n\toutput, err := json.Marshal(content)\n\tif err != nil {\n\t\tres.sendContent(500, \"application\/json\", []byte(\"\"))\n\t} else {\n\t\tres.sendContent(200, \"application\/json\", output)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The template uses the function \"code\" to inject program\n\/\/ source into the output by extracting code from files and\n\/\/ injecting them as HTML-escaped <pre> blocks.\n\/\/\n\/\/ The syntax is simple: 1, 2, or 3 space-separated arguments:\n\/\/\n\/\/ Whole file:\n\/\/\t{{code \"foo.go\"}}\n\/\/ One line (here the signature of main):\n\/\/\t{{code \"foo.go\" `\/^func.main\/`}}\n\/\/ Block of text, determined by start and end (here the body of main):\n\/\/\t{{code \"foo.go\" `\/^func.main\/` `\/^}\/`\n\/\/\n\/\/ Patterns can be `\/regular expression\/`, a decimal number, or \"$\"\n\/\/ to signify the end of the file.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\nfunc Usage() {\n\tfmt.Fprintf(os.Stderr, \"usage: tmpltohtml file\\n\")\n\tos.Exit(2)\n}\n\nfunc main() {\n\tflag.Usage = Usage\n\tflag.Parse()\n\tif len(flag.Args()) != 1 {\n\t\tUsage()\n\t}\n\n\t\/\/ Read and parse the input.\n\tname := flag.Args()[0]\n\ttmpl := template.New(name).Funcs(template.FuncMap{\"code\": code})\n\tif _, err := tmpl.ParseFiles(name); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Execute the template.\n\tif err := tmpl.Execute(os.Stdout, 0); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\n\/\/ contents reads a file by name and returns its contents as a string.\nfunc contents(name string) string {\n\tfile, err := ioutil.ReadFile(name)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn string(file)\n}\n\n\/\/ format returns a textual representation of the arg, formatted according to its nature.\nfunc format(arg interface{}) string {\n\tswitch arg := arg.(type) {\n\tcase int:\n\t\treturn fmt.Sprintf(\"%d\", arg)\n\tcase string:\n\t\tif len(arg) > 2 && arg[0] == '\/' && arg[len(arg)-1] == '\/' {\n\t\t\treturn fmt.Sprintf(\"%#q\", arg)\n\t\t}\n\t\treturn fmt.Sprintf(\"%q\", arg)\n\tdefault:\n\t\tlog.Fatalf(\"unrecognized argument: %v type %T\", arg, arg)\n\t}\n\treturn \"\"\n}\n\nfunc code(file string, arg ...interface{}) (string, error) {\n\ttext := contents(file)\n\tvar command string\n\tswitch len(arg) {\n\tcase 0:\n\t\t\/\/ text is already whole file.\n\t\tcommand = fmt.Sprintf(\"code %q\", file)\n\tcase 1:\n\t\tcommand = fmt.Sprintf(\"code %q %s\", file, format(arg[0]))\n\t\ttext = oneLine(file, text, arg[0])\n\tcase 2:\n\t\tcommand = fmt.Sprintf(\"code %q %s %s\", file, format(arg[0]), format(arg[1]))\n\t\ttext = multipleLines(file, text, arg[0], arg[1])\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"incorrect code invocation: code %q %q\", file, arg)\n\t}\n\t\/\/ Replace tabs by spaces, which work better in HTML.\n\ttext = strings.Replace(text, \"\\t\", \" \", -1)\n\t\/\/ Escape the program text for HTML.\n\ttext = template.HTMLEscapeString(text)\n\t\/\/ Include the command as a comment.\n\ttext = fmt.Sprintf(\"<pre><!--{{%s}}\\n-->%s<\/pre>\", command, text)\n\treturn text, nil\n}\n\n\/\/ parseArg returns the integer or string value of the argument and tells which it is.\nfunc parseArg(arg interface{}, file string, max int) (ival int, sval string, isInt bool) {\n\tswitch n := arg.(type) {\n\tcase int:\n\t\tif n <= 0 || n > max {\n\t\t\tlog.Fatalf(\"%q:%d is out of range\", file, n)\n\t\t}\n\t\treturn n, \"\", true\n\tcase string:\n\t\treturn 0, n, false\n\t}\n\tlog.Fatalf(\"unrecognized argument %v type %T\", arg, arg)\n\treturn\n}\n\n\/\/ oneLine returns the single line generated by a two-argument code invocation.\nfunc oneLine(file, text string, arg interface{}) string {\n\tlines := strings.SplitAfter(contents(file), \"\\n\")\n\tline, pattern, isInt := parseArg(arg, file, len(lines))\n\tif isInt {\n\t\treturn lines[line-1]\n\t}\n\treturn lines[match(file, 0, lines, pattern)-1]\n}\n\n\/\/ multipleLines returns the text generated by a three-argument code invocation.\nfunc multipleLines(file, text string, arg1, arg2 interface{}) string {\n\tlines := strings.SplitAfter(contents(file), \"\\n\")\n\tline1, pattern1, isInt1 := parseArg(arg1, file, len(lines))\n\tline2, pattern2, isInt2 := parseArg(arg2, file, len(lines))\n\tif !isInt1 {\n\t\tline1 = match(file, 0, lines, pattern1)\n\t}\n\tif !isInt2 {\n\t\tline2 = match(file, line1, lines, pattern2)\n\t} else if line2 < line1 {\n\t\tlog.Fatal(\"lines out of order for %q: %d %d\", line1, line2)\n\t}\n\treturn strings.Join(lines[line1-1:line2], \"\")\n}\n\n\/\/ match identifies the input line that matches the pattern in a code invocation.\n\/\/ If start>0, match lines starting there rather than at the beginning.\n\/\/ The return value is 1-indexed.\nfunc match(file string, start int, lines []string, pattern string) int {\n\t\/\/ $ matches the end of the file.\n\tif pattern == \"$\" {\n\t\tif len(lines) == 0 {\n\t\t\tlog.Fatal(\"%q: empty file\", file)\n\t\t}\n\t\treturn len(lines)\n\t}\n\t\/\/ \/regexp\/ matches the line that matches the regexp.\n\tif len(pattern) > 2 && pattern[0] == '\/' && pattern[len(pattern)-1] == '\/' {\n\t\tre, err := regexp.Compile(pattern[1 : len(pattern)-1])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tfor i := start; i < len(lines); i++ {\n\t\t\tif re.MatchString(lines[i]) {\n\t\t\t\treturn i + 1\n\t\t\t}\n\t\t}\n\t\tlog.Fatalf(\"%s: no match for %#q\", file, pattern)\n\t}\n\tlog.Fatalf(\"unrecognized pattern: %q\", pattern)\n\treturn 0\n}\n<commit_msg>doc\/tmptohtml: output fix<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The template uses the function \"code\" to inject program\n\/\/ source into the output by extracting code from files and\n\/\/ injecting them as HTML-escaped <pre> blocks.\n\/\/\n\/\/ The syntax is simple: 1, 2, or 3 space-separated arguments:\n\/\/\n\/\/ Whole file:\n\/\/\t{{code \"foo.go\"}}\n\/\/ One line (here the signature of main):\n\/\/\t{{code \"foo.go\" `\/^func.main\/`}}\n\/\/ Block of text, determined by start and end (here the body of main):\n\/\/\t{{code \"foo.go\" `\/^func.main\/` `\/^}\/`\n\/\/\n\/\/ Patterns can be `\/regular expression\/`, a decimal number, or \"$\"\n\/\/ to signify the end of the file.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\nfunc Usage() {\n\tfmt.Fprintf(os.Stderr, \"usage: tmpltohtml file\\n\")\n\tos.Exit(2)\n}\n\nfunc main() {\n\tflag.Usage = Usage\n\tflag.Parse()\n\tif len(flag.Args()) != 1 {\n\t\tUsage()\n\t}\n\n\t\/\/ Read and parse the input.\n\tname := flag.Args()[0]\n\ttmpl := template.New(name).Funcs(template.FuncMap{\"code\": code})\n\tif _, err := tmpl.ParseFiles(name); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Execute the template.\n\tif err := tmpl.Execute(os.Stdout, 0); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\n\/\/ contents reads a file by name and returns its contents as a string.\nfunc contents(name string) string {\n\tfile, err := ioutil.ReadFile(name)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn string(file)\n}\n\n\/\/ format returns a textual representation of the arg, formatted according to its nature.\nfunc format(arg interface{}) string {\n\tswitch arg := arg.(type) {\n\tcase int:\n\t\treturn fmt.Sprintf(\"%d\", arg)\n\tcase string:\n\t\tif len(arg) > 2 && arg[0] == '\/' && arg[len(arg)-1] == '\/' {\n\t\t\treturn fmt.Sprintf(\"%#q\", arg)\n\t\t}\n\t\treturn fmt.Sprintf(\"%q\", arg)\n\tdefault:\n\t\tlog.Fatalf(\"unrecognized argument: %v type %T\", arg, arg)\n\t}\n\treturn \"\"\n}\n\nfunc code(file string, arg ...interface{}) (string, error) {\n\ttext := contents(file)\n\tvar command string\n\tswitch len(arg) {\n\tcase 0:\n\t\t\/\/ text is already whole file.\n\t\tcommand = fmt.Sprintf(\"code %q\", file)\n\tcase 1:\n\t\tcommand = fmt.Sprintf(\"code %q %s\", file, format(arg[0]))\n\t\ttext = oneLine(file, text, arg[0])\n\tcase 2:\n\t\tcommand = fmt.Sprintf(\"code %q %s %s\", file, format(arg[0]), format(arg[1]))\n\t\ttext = multipleLines(file, text, arg[0], arg[1])\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"incorrect code invocation: code %q %q\", file, arg)\n\t}\n\t\/\/ Replace tabs by spaces, which work better in HTML.\n\ttext = strings.Replace(text, \"\\t\", \" \", -1)\n\t\/\/ Escape the program text for HTML.\n\ttext = template.HTMLEscapeString(text)\n\t\/\/ Include the command as a comment.\n\ttext = fmt.Sprintf(\"<pre><!--{{%s}}\\n-->%s<\/pre>\", command, text)\n\treturn text, nil\n}\n\n\/\/ parseArg returns the integer or string value of the argument and tells which it is.\nfunc parseArg(arg interface{}, file string, max int) (ival int, sval string, isInt bool) {\n\tswitch n := arg.(type) {\n\tcase int:\n\t\tif n <= 0 || n > max {\n\t\t\tlog.Fatalf(\"%q:%d is out of range\", file, n)\n\t\t}\n\t\treturn n, \"\", true\n\tcase string:\n\t\treturn 0, n, false\n\t}\n\tlog.Fatalf(\"unrecognized argument %v type %T\", arg, arg)\n\treturn\n}\n\n\/\/ oneLine returns the single line generated by a two-argument code invocation.\nfunc oneLine(file, text string, arg interface{}) string {\n\tlines := strings.SplitAfter(contents(file), \"\\n\")\n\tline, pattern, isInt := parseArg(arg, file, len(lines))\n\tif isInt {\n\t\treturn lines[line-1]\n\t}\n\treturn lines[match(file, 0, lines, pattern)-1]\n}\n\n\/\/ multipleLines returns the text generated by a three-argument code invocation.\nfunc multipleLines(file, text string, arg1, arg2 interface{}) string {\n\tlines := strings.SplitAfter(contents(file), \"\\n\")\n\tline1, pattern1, isInt1 := parseArg(arg1, file, len(lines))\n\tline2, pattern2, isInt2 := parseArg(arg2, file, len(lines))\n\tif !isInt1 {\n\t\tline1 = match(file, 0, lines, pattern1)\n\t}\n\tif !isInt2 {\n\t\tline2 = match(file, line1, lines, pattern2)\n\t} else if line2 < line1 {\n\t\tlog.Fatalf(\"lines out of order for %q: %d %d\", text, line1, line2)\n\t}\n\treturn strings.Join(lines[line1-1:line2], \"\")\n}\n\n\/\/ match identifies the input line that matches the pattern in a code invocation.\n\/\/ If start>0, match lines starting there rather than at the beginning.\n\/\/ The return value is 1-indexed.\nfunc match(file string, start int, lines []string, pattern string) int {\n\t\/\/ $ matches the end of the file.\n\tif pattern == \"$\" {\n\t\tif len(lines) == 0 {\n\t\t\tlog.Fatalf(\"%q: empty file\", file)\n\t\t}\n\t\treturn len(lines)\n\t}\n\t\/\/ \/regexp\/ matches the line that matches the regexp.\n\tif len(pattern) > 2 && pattern[0] == '\/' && pattern[len(pattern)-1] == '\/' {\n\t\tre, err := regexp.Compile(pattern[1 : len(pattern)-1])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tfor i := start; i < len(lines); i++ {\n\t\t\tif re.MatchString(lines[i]) {\n\t\t\t\treturn i + 1\n\t\t\t}\n\t\t}\n\t\tlog.Fatalf(\"%s: no match for %#q\", file, pattern)\n\t}\n\tlog.Fatalf(\"unrecognized pattern: %q\", pattern)\n\treturn 0\n}\n<|endoftext|>"} {"text":"<commit_before>package dcnet\n\nimport (\n\t\"crypto\/rand\"\n\t\"crypto\/sha256\"\n\t\"github.com\/lbarman\/prifi\/prifi-lib\/config\"\n\t\"gopkg.in\/dedis\/crypto.v0\/abstract\"\n\t\"gopkg.in\/dedis\/onet.v1\/log\"\n)\n\n\/\/ Clients compute:\n\/\/ kappa_i = k_i + h * SUM_j(q_ij), where q_ij = H(p_ij) in group\n\/\/ c' = k_i + c\n\/\/\n\/\/ Trustees compute:\n\/\/ sigma_i = SUM_i(q_ij), where q_ij = H(s_ij) in group\n\/\/\n\/\/ Relay compute:\n\/\/ k_i = SUM_i(kappa_i) - h * (SUM_j(sigma_i))\n\/\/ = SUM_i(h * SUM_j(q_ij)) + k_i - h * SUM_j(SUM_i(q_ij))\n\/\/ c = k_i + c'\n\/\/\n\n\/\/ Equivocation holds the functions needed for equivocation protection\ntype EquivocationProtection struct {\n\thistory abstract.Scalar\n\trandomness abstract.Cipher\n\tsuite abstract.Suite\n}\n\n\/\/ NewEquivocation creates the structure that handle equivocation protection\nfunc NewEquivocation() *EquivocationProtection {\n\te := new(EquivocationProtection)\n\te.suite = config.CryptoSuite\n\te.history = e.suite.Scalar().One()\n\n\trandomKey := make([]byte, e.suite.Cipher(nil).KeySize())\n\trand.Read(randomKey)\n\te.randomness = e.suite.Cipher(randomKey)\n\n\treturn e\n}\n\nfunc (e *EquivocationProtection) randomScalar() abstract.Scalar {\n\treturn e.suite.Scalar().Pick(e.randomness)\n}\n\nfunc (e *EquivocationProtection) hashInGroup(data []byte) abstract.Scalar {\n\treturn e.suite.Scalar().SetBytes(data)\n}\n\n\/\/ Update History adds those bits to the history hash chain\nfunc (e *EquivocationProtection) UpdateHistory(data []byte) {\n\thistoryB := e.history.Bytes()\n\ttoBeHashed := make([]byte, len(historyB)+len(data))\n\tnewPayload := sha256.Sum256(toBeHashed)\n\te.history.SetBytes(newPayload[:])\n}\n\n\/\/ a function that takes a payload x, encrypt it as x' = x + k, and returns x' and kappa = k + history * (sum of the (hashes of pads))\nfunc (e *EquivocationProtection) ClientEncryptPayload(slotOwner bool, x []byte, p_j [][]byte) ([]byte, []byte) {\n\n\t\/\/ hash the pads p_i into q_i\n\tq_j := make([]abstract.Scalar, len(p_j))\n\tfor trustee_j := range q_j {\n\t\tq_j[trustee_j] = e.hashInGroup(p_j[trustee_j])\n\t}\n\n\t\/\/ sum of q_i\n\tsum := e.suite.Scalar().Zero()\n\tfor _, p := range q_j {\n\t\tsum = sum.Add(sum, p)\n\t}\n\n\tproduct := sum.Mul(sum, e.history)\n\n\t\/\/we're not the slot owner\n\tif !slotOwner {\n\t\tkappa_i := product\n\t\treturn x, kappa_i.Bytes()\n\t}\n\n\tk_i := e.randomScalar()\n\tk_i_bytes := k_i.Bytes()\n\n\t\/\/ encrypt payload\n\tfor i := range x {\n\t\tx[i] ^= k_i_bytes[i%len(k_i_bytes)]\n\t}\n\n\t\/\/ compute kappa\n\tkappa_i := k_i.Add(k_i, product)\n\treturn x, kappa_i.Bytes()\n}\n\n\/\/ a function that takes returns the byte[] version of sigma_j\nfunc (e *EquivocationProtection) TrusteeGetContribution(s_i [][]byte) []byte {\n\n\t\/\/ hash the pads p_i into q_i\n\tq_i := make([]abstract.Scalar, len(s_i))\n\tfor client_i := range q_i {\n\t\tq_i[client_i] = e.hashInGroup(s_i[client_i])\n\t}\n\n\t\/\/ sum of q_i\n\tsum := e.suite.Scalar().Zero()\n\tfor _, p := range q_i {\n\t\tsum = sum.Add(sum, p)\n\t}\n\n\tkappa_j := sum\n\n\treturn kappa_j.Bytes()\n}\n\n\/\/ given all contributions, decodes the payload\nfunc (e *EquivocationProtection) RelayDecode(encryptedPayload []byte, trusteesContributions [][]byte, clientsContributions [][]byte) []byte {\n\n\t\/\/reconstitute the abstract.Point values\n\ttrustee_kappa_j := make([]abstract.Scalar, len(trusteesContributions))\n\tfor k, v := range trusteesContributions {\n\t\ttrustee_kappa_j[k] = e.suite.Scalar().SetBytes(v)\n\t}\n\tclient_kappa_i := make([]abstract.Scalar, len(clientsContributions))\n\tfor k, v := range clientsContributions {\n\t\tclient_kappa_i[k] = e.suite.Scalar().SetBytes(v)\n\t}\n\n\t\/\/ compute sum of trustees contribs\n\tsumTrustees := e.suite.Scalar().Zero()\n\tfor _, v := range trustee_kappa_j {\n\t\tsumTrustees = sumTrustees.Add(sumTrustees, v)\n\t}\n\n\t\/\/ compute sum of clients contribs\n\tsumClients := e.suite.Scalar().Zero()\n\tfor _, v := range client_kappa_i {\n\t\tsumClients = sumClients.Add(sumClients, v)\n\t}\n\n\tprod := sumTrustees.Mul(sumTrustees, e.history)\n\tk_i := sumClients.Sub(sumClients, prod)\n\n\t\/\/now use k to decrypt the payload\n\tk_bytes := k_i.Bytes()\n\n\tif len(k_bytes) == 0 {\n\t\tlog.Lvl1(\"Error: Equivocation couldn't recover the k_bytes value\")\n\t\tlog.Lvl1(\"encryptedPayload:\", encryptedPayload)\n\t\tfor k, v := range trusteesContributions {\n\t\t\tlog.Lvl1(\"trusteesContributions:\", v, \"mapped to\", trustee_kappa_j[k])\n\t\t}\n\t\tfor k, v := range clientsContributions {\n\t\t\tlog.Lvl1(\"clientsContributions:\", v, \"mapped to\", client_kappa_i[k])\n\t\t}\n\t\tlog.Lvl1(\"sumTrustees:\", sumTrustees)\n\t\tlog.Lvl1(\"sumClients:\", sumClients)\n\t\tlog.Lvl1(\"prod:\", prod)\n\t\tlog.Lvl1(\"k_i:\", k_i)\n\t\treturn make([]byte, 0)\n\t}\n\n\t\/\/ decrypt the payload\n\tfor i := range encryptedPayload {\n\t\tencryptedPayload[i] ^= k_bytes[i%len(k_bytes)]\n\t}\n\n\treturn encryptedPayload\n}\n<commit_msg>Add logging to Equivocaiton<commit_after>package dcnet\n\nimport (\n\t\"crypto\/rand\"\n\t\"crypto\/sha256\"\n\t\"github.com\/lbarman\/prifi\/prifi-lib\/config\"\n\t\"gopkg.in\/dedis\/crypto.v0\/abstract\"\n\t\"gopkg.in\/dedis\/onet.v1\/log\"\n)\n\n\/\/ Clients compute:\n\/\/ kappa_i = k_i + h * SUM_j(q_ij), where q_ij = H(p_ij) in group\n\/\/ c' = k_i + c\n\/\/\n\/\/ Trustees compute:\n\/\/ sigma_i = SUM_i(q_ij), where q_ij = H(s_ij) in group\n\/\/\n\/\/ Relay compute:\n\/\/ k_i = SUM_i(kappa_i) - h * (SUM_j(sigma_i))\n\/\/ = SUM_i(h * SUM_j(q_ij)) + k_i - h * SUM_j(SUM_i(q_ij))\n\/\/ c = k_i + c'\n\/\/\n\n\/\/ Equivocation holds the functions needed for equivocation protection\ntype EquivocationProtection struct {\n\thistory abstract.Scalar\n\trandomness abstract.Cipher\n\tsuite abstract.Suite\n}\n\n\/\/ NewEquivocation creates the structure that handle equivocation protection\nfunc NewEquivocation() *EquivocationProtection {\n\te := new(EquivocationProtection)\n\te.suite = config.CryptoSuite\n\te.history = e.suite.Scalar().One()\n\n\trandomKey := make([]byte, e.suite.Cipher(nil).KeySize())\n\trand.Read(randomKey)\n\te.randomness = e.suite.Cipher(randomKey)\n\n\treturn e\n}\n\nfunc (e *EquivocationProtection) randomScalar() abstract.Scalar {\n\treturn e.suite.Scalar().Pick(e.randomness)\n}\n\nfunc (e *EquivocationProtection) hashInGroup(data []byte) abstract.Scalar {\n\treturn e.suite.Scalar().SetBytes(data)\n}\n\n\/\/ Update History adds those bits to the history hash chain\nfunc (e *EquivocationProtection) UpdateHistory(data []byte) {\n\thistoryB := e.history.Bytes()\n\ttoBeHashed := make([]byte, len(historyB)+len(data))\n\tnewPayload := sha256.Sum256(toBeHashed)\n\te.history.SetBytes(newPayload[:])\n}\n\n\/\/ a function that takes a payload x, encrypt it as x' = x + k, and returns x' and kappa = k + history * (sum of the (hashes of pads))\nfunc (e *EquivocationProtection) ClientEncryptPayload(slotOwner bool, x []byte, p_j [][]byte) ([]byte, []byte) {\n\n\t\/\/ hash the pads p_i into q_i\n\tq_j := make([]abstract.Scalar, len(p_j))\n\tfor trustee_j := range q_j {\n\t\tq_j[trustee_j] = e.hashInGroup(p_j[trustee_j])\n\t}\n\n\t\/\/ sum of q_i\n\tsum := e.suite.Scalar().Zero()\n\tfor _, p := range q_j {\n\t\tsum = sum.Add(sum, p)\n\t}\n\n\tproduct := sum.Mul(sum, e.history)\n\n\t\/\/we're not the slot owner\n\tif !slotOwner {\n\t\tkappa_i := product\n\t\treturn x, kappa_i.Bytes()\n\t}\n\n\tk_i := e.randomScalar()\n\tk_i_bytes := k_i.Bytes()\n\n\t\/\/ encrypt payload\n\tfor i := range x {\n\t\tx[i] ^= k_i_bytes[i%len(k_i_bytes)]\n\t}\n\n\t\/\/ compute kappa\n\tkappa_i := k_i.Add(k_i, product)\n\treturn x, kappa_i.Bytes()\n}\n\n\/\/ a function that takes returns the byte[] version of sigma_j\nfunc (e *EquivocationProtection) TrusteeGetContribution(s_i [][]byte) []byte {\n\n\t\/\/ hash the pads p_i into q_i\n\tq_i := make([]abstract.Scalar, len(s_i))\n\tfor client_i := range q_i {\n\t\tq_i[client_i] = e.hashInGroup(s_i[client_i])\n\t}\n\n\t\/\/ sum of q_i\n\tsum := e.suite.Scalar().Zero()\n\tfor _, p := range q_i {\n\t\tsum = sum.Add(sum, p)\n\t}\n\n\tkappa_j := sum\n\n\treturn kappa_j.Bytes()\n}\n\n\/\/ given all contributions, decodes the payload\nfunc (e *EquivocationProtection) RelayDecode(encryptedPayload []byte, trusteesContributions [][]byte, clientsContributions [][]byte) []byte {\n\n\t\/\/reconstitute the abstract.Point values\n\ttrustee_kappa_j := make([]abstract.Scalar, len(trusteesContributions))\n\tfor k, v := range trusteesContributions {\n\t\ttrustee_kappa_j[k] = e.suite.Scalar().SetBytes(v)\n\t}\n\tclient_kappa_i := make([]abstract.Scalar, len(clientsContributions))\n\tfor k, v := range clientsContributions {\n\t\tclient_kappa_i[k] = e.suite.Scalar().SetBytes(v)\n\t}\n\n\t\/\/ compute sum of trustees contribs\n\tsumTrustees := e.suite.Scalar().Zero()\n\tfor _, v := range trustee_kappa_j {\n\t\tsumTrustees = sumTrustees.Add(sumTrustees, v)\n\t}\n\n\t\/\/ compute sum of clients contribs\n\tsumClients := e.suite.Scalar().Zero()\n\tfor _, v := range client_kappa_i {\n\t\tlog.Lvl1(\"Adding in\", v, \"value is now\", sumClients)\n\t\tsumClients = sumClients.Add(sumClients, v)\n\t}\n\n\tprod := sumTrustees.Mul(sumTrustees, e.history)\n\tk_i := sumClients.Sub(sumClients, prod)\n\n\t\/\/now use k to decrypt the payload\n\tk_bytes := k_i.Bytes()\n\n\tif len(k_bytes) == 0 {\n\t\tlog.Lvl1(\"Error: Equivocation couldn't recover the k_bytes value\")\n\t\tlog.Lvl1(\"encryptedPayload:\", encryptedPayload)\n\t\tfor k, v := range trusteesContributions {\n\t\t\tlog.Lvl1(\"trusteesContributions:\", v, \"mapped to\", trustee_kappa_j[k])\n\t\t}\n\t\tfor k, v := range clientsContributions {\n\t\t\tlog.Lvl1(\"clientsContributions:\", v, \"mapped to\", client_kappa_i[k])\n\t\t}\n\t\tlog.Lvl1(\"sumTrustees:\", sumTrustees)\n\t\tlog.Lvl1(\"sumClients:\", sumClients)\n\t\tlog.Lvl1(\"history:\", e.history)\n\t\tlog.Lvl1(\"prod:\", prod)\n\t\tlog.Lvl1(\"k_i:\", k_i)\n\t\treturn make([]byte, 0)\n\t}\n\n\t\/\/ decrypt the payload\n\tfor i := range encryptedPayload {\n\t\tencryptedPayload[i] ^= k_bytes[i%len(k_bytes)]\n\t}\n\n\treturn encryptedPayload\n}\n<|endoftext|>"} {"text":"<commit_before>package outgoing\n\nimport (\n\t\"strings\"\n)\n\ntype Request struct {\n\tToken string `json:\"token\"`\n\tTimestamp int `json:\"ts\"`\n\tText string `json:\"text\"`\n\tTriggerWord string `json:\"trigger_word\"`\n\tSubdomain string `json:\"subdomain\"`\n\tChannelName string `json:\"channel_name\"`\n\tUserName string `json:\"user_name\"`\n}\n\nfunc (p *Request) Args() []string {\n\n\tstrArgs := strings.TrimSpace(strings.TrimLeft(p.Text, p.TriggerWord))\n\n\tif len(strArgs) == 0 {\n\t\treturn nil\n\t}\n\n\tvar retArgs []string\n\targs := strings.Split(strArgs, \" \")\n\n\tfor i := 0; i < len(args); i++ {\n\t\ts := strings.TrimSpace(args[i])\n\t\tif len(s) != 0 {\n\t\t\tretArgs = append(retArgs, s)\n\t\t}\n\t}\n\treturn retArgs\n}\n\ntype Image struct {\n\tURL string `json:\"url\"`\n}\n\ntype Attachment struct {\n\tTitle string `json:\"title\"`\n\tText string `json:\"text\"`\n\tColor string `json:\"color\"`\n\tImages []Image `json:\"images\"`\n}\n\ntype Response struct {\n\tText string `json:\"text\"`\n\tAttachments []Attachment `json:\"attachments\"`\n}\n<commit_msg>fix args issue<commit_after>package outgoing\n\nimport (\n\t\"strings\"\n)\n\ntype Request struct {\n\tToken string `json:\"token\"`\n\tTimestamp int `json:\"ts\"`\n\tText string `json:\"text\"`\n\tTriggerWord string `json:\"trigger_word\"`\n\tSubdomain string `json:\"subdomain\"`\n\tChannelName string `json:\"channel_name\"`\n\tUserName string `json:\"user_name\"`\n}\n\nfunc (p *Request) Args() []string {\n\n\tstrArgs := strings.TrimSpace(strings.TrimPrefix(p.Text, p.TriggerWord))\n\n\tif len(strArgs) == 0 {\n\t\treturn nil\n\t}\n\n\tvar retArgs []string\n\targs := strings.Split(strArgs, \" \")\n\n\tfor i := 0; i < len(args); i++ {\n\t\ts := strings.TrimSpace(args[i])\n\t\tif len(s) != 0 {\n\t\t\tretArgs = append(retArgs, s)\n\t\t}\n\t}\n\treturn retArgs\n}\n\ntype Image struct {\n\tURL string `json:\"url\"`\n}\n\ntype Attachment struct {\n\tTitle string `json:\"title\"`\n\tText string `json:\"text\"`\n\tColor string `json:\"color\"`\n\tImages []Image `json:\"images\"`\n}\n\ntype Response struct {\n\tText string `json:\"text\"`\n\tAttachments []Attachment `json:\"attachments\"`\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage lease\n\nimport (\n\t\"container\/list\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/jacobsa\/fuse\/fsutil\"\n\t\"github.com\/jacobsa\/gcloud\/syncutil\"\n)\n\n\/\/ A type that manages read and read\/write leases for anonymous temporary files.\n\/\/\n\/\/ Safe for concurrent access. Must be created with NewFileLeaser.\ntype FileLeaser struct {\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Constant data\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tdir string\n\tlimit int64\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ A lock that guards the mutable state in this struct, which must not be\n\t\/\/ held for any blocking operation.\n\t\/\/\n\t\/\/ Lock ordering\n\t\/\/ -------------\n\t\/\/\n\t\/\/ Define our strict partial order < as follows:\n\t\/\/\n\t\/\/ 1. For any read\/write lease W, W < leaser.\n\t\/\/ 2. For any read lease R, R < leaser.\n\t\/\/ 3. For any read\/write lease W and read lease R, W < R.\n\t\/\/\n\t\/\/ In other words: read\/write before read before leaser, and never hold two\n\t\/\/ locks from the same category together.\n\tmu syncutil.InvariantMutex\n\n\t\/\/ The current estimated total size of outstanding read\/write leases. This is\n\t\/\/ only an estimate because we can't synchronize its update with a call to\n\t\/\/ the wrapped file to e.g. write or truncate.\n\treadWriteOutstanding int64\n\n\t\/\/ All outstanding read leases, ordered by recency of use.\n\t\/\/\n\t\/\/ INVARIANT: Each element is of type *readLease\n\t\/\/ INVARIANT: No element has been revoked.\n\treadLeases list.List\n\n\t\/\/ The sum of all outstanding read lease sizes.\n\t\/\/\n\t\/\/ INVARIANT: Equal to the sum over readLeases sizes.\n\t\/\/ INVARIANT: 0 <= readOutstanding\n\t\/\/ INVARIANT: readOutstanding <= max(0, limit - readWriteOutstanding)\n\treadOutstanding int64\n\n\t\/\/ Index of read leases by pointer.\n\t\/\/\n\t\/\/ INVARIANT: Is an index of exactly the elements of readLeases\n\treadLeasesIndex map[*readLease]*list.Element\n}\n\n\/\/ Create a new file leaser that uses the supplied directory for temporary\n\/\/ files (before unlinking them) and attempts to keep usage in bytes below the\n\/\/ given limit. If dir is empty, the system default will be used.\n\/\/\n\/\/ Usage may exceed the given limit if there are read\/write leases whose total\n\/\/ size exceeds the limit, since such leases cannot be revoked.\nfunc NewFileLeaser(\n\tdir string,\n\tlimitBytes int64) (fl *FileLeaser) {\n\tfl = &FileLeaser{\n\t\tdir: dir,\n\t\tlimit: limitBytes,\n\t\treadLeasesIndex: make(map[*readLease]*list.Element),\n\t}\n\n\tfl.mu = syncutil.NewInvariantMutex(fl.checkInvariants)\n\n\treturn\n}\n\n\/\/ Create a new anonymous file, and return a read\/write lease for it. The\n\/\/ read\/write lease will pin resources until rwl.Downgrade is called. It need\n\/\/ not be called if the process is exiting.\nfunc (fl *FileLeaser) NewFile() (rwl ReadWriteLease, err error) {\n\t\/\/ Create an anonymous file.\n\tf, err := fsutil.AnonymousFile(fl.dir)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"AnonymousFile: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Wrap a lease around it.\n\trwl = newReadWriteLease(fl, 0, f)\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc maxInt64(a int64, b int64) int64 {\n\tif a > b {\n\t\treturn a\n\t}\n\n\treturn b\n}\n\n\/\/ LOCKS_REQUIRED(fl.mu)\nfunc (fl *FileLeaser) checkInvariants() {\n\t\/\/ INVARIANT: Each element is of type *readLease\n\t\/\/ INVARIANT: No element has been revoked.\n\tfor e := fl.readLeases.Front(); e != nil; e = e.Next() {\n\t\trl := e.Value.(*readLease)\n\t\tif rl.revoked() {\n\t\t\tpanic(\"Found revoked read lease\")\n\t\t}\n\t}\n\n\t\/\/ INVARIANT: Equal to the sum over readLeases sizes.\n\tvar sum int64\n\tfor e := fl.readLeases.Front(); e != nil; e = e.Next() {\n\t\trl := e.Value.(*readLease)\n\t\tsum += rl.Size()\n\t}\n\n\tif fl.readOutstanding != sum {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"readOutstanding mismatch: %v vs. %v\",\n\t\t\tfl.readOutstanding,\n\t\t\tsum))\n\t}\n\n\t\/\/ INVARIANT: 0 <= readOutstanding\n\tif !(0 <= fl.readOutstanding) {\n\t\tpanic(fmt.Sprintf(\"Unexpected readOutstanding: %v\", fl.readOutstanding))\n\t}\n\n\t\/\/ INVARIANT: readOutstanding <= max(0, limit - readWriteOutstanding)\n\tif !(fl.readOutstanding <= maxInt64(0, fl.limit-fl.readWriteOutstanding)) {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"Unexpected readOutstanding: %v. limit: %v, readWriteOutstanding: %v\",\n\t\t\tfl.readOutstanding,\n\t\t\tfl.limit,\n\t\t\tfl.readWriteOutstanding))\n\t}\n\n\t\/\/ INVARIANT: Is an index of exactly the elements of readLeases\n\tif len(fl.readLeasesIndex) != fl.readLeases.Len() {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"readLeasesIndex length mismatch: %v vs. %v\",\n\t\t\tlen(fl.readLeasesIndex),\n\t\t\tfl.readLeases.Len()))\n\t}\n\n\tfor e := fl.readLeases.Front(); e != nil; e = e.Next() {\n\t\tif fl.readLeasesIndex[e.Value.(*readLease)] != e {\n\t\t\tpanic(\"Mismatch in readLeasesIndex\")\n\t\t}\n\t}\n}\n\n\/\/ Add the supplied delta to the leaser's view of outstanding read\/write lease\n\/\/ bytes, then revoke read leases until we're under limit or we run out of\n\/\/ leases to revoke.\n\/\/\n\/\/ Called by readWriteLease while holding its lock.\n\/\/\n\/\/ LOCKS_EXCLUDED(fl.mu)\nfunc (fl *FileLeaser) addReadWriteByteDelta(delta int64) {\n\t\/\/ TODO(jacobsa): When evicting, repeatedly:\n\t\/\/ 1. Find least recently used read lease.\n\t\/\/ 2. Drop leaser lock.\n\t\/\/ 3. Acquire read lease lock.\n\t\/\/ 4. Reacquire leaser lock.\n\t\/\/ 5. If under limit now, drop both locks and return.\n\t\/\/ 6. If lease already evicted, drop its lock and go to #1.\n\t\/\/ 7. Evict lease, drop both locks. If still above limit, start over.\n\tpanic(\"TODO\")\n}\n\n\/\/ LOCKS_REQUIRED(fl.mu)\nfunc (fl *FileLeaser) overLimit() bool {\n\treturn fl.readOutstanding+fl.readWriteOutstanding > fl.limit\n}\n\n\/\/ An implementation detail of FileLeaser.evict.\n\/\/\n\/\/ LOCKS_REQUIRED(fl.mu)\nfunc (fl *FileLeaser) evictOne(rl *readLease) {\n\t\/\/ We must acquire the read lease's lock, which requires us to first drop\n\t\/\/ the leaser's lock, then reacquire it.\n\tfl.mu.Unlock()\n\trl.Mu.Lock()\n\tdefer rl.Mu.Unlock()\n\tfl.mu.Lock()\n\n\t\/\/ Now we have both locks, but the lease may have already been revoked. If\n\t\/\/ so, there's nothing to do and we should start the process over.\n\tif rl.revoked() {\n\t\treturn\n\t}\n\n\t\/\/ Also no need to over-evict if someone has already done our job for us.\n\tif !fl.overLimit() {\n\t\treturn\n\t}\n\n\t\/\/ Revoke the lease while holding its lock.\n\tfl.revoke(rl)\n}\n\n\/\/ Revoke read leases until we're under limit or we run out of things to revoke.\n\/\/\n\/\/ LOCKS_REQUIRED(fl.mu)\nfunc (fl *FileLeaser) evict() {\n\tfor fl.overLimit() {\n\t\t\/\/ Do we have anything to revoke?\n\t\tlru := fl.readLeases.Back()\n\t\tif lru == nil {\n\t\t\treturn\n\t\t}\n\n\t\trl := lru.Value.(*readLease)\n\t\tfl.evictOne(rl)\n\t}\n}\n\n\/\/ Downgrade the supplied read\/write lease, given its current size and the\n\/\/ underlying file.\n\/\/\n\/\/ Called by readWriteLease with its lock held.\n\/\/\n\/\/ LOCKS_EXCLUDED(fl.mu)\nfunc (fl *FileLeaser) downgrade(\n\trwl *readWriteLease,\n\tsize int64,\n\tfile *os.File) (rl ReadLease) {\n\t\/\/ Create the read lease.\n\trlTyped := newReadLease(size, fl, file)\n\trl = rlTyped\n\n\t\/\/ Update the leaser's state, noting the new read lease and that the\n\t\/\/ read\/write lease has gone away.\n\tfl.mu.Lock()\n\tdefer fl.mu.Unlock()\n\n\tfl.readWriteOutstanding -= size\n\tfl.readOutstanding += size\n\n\te := fl.readLeases.PushFront(rl)\n\tfl.readLeasesIndex[rlTyped] = e\n\n\t\/\/ Ensure that we're not now over capacity.\n\tfl.evict()\n\n\treturn\n}\n\n\/\/ Upgrade the supplied read lease, given its size and the underlying file.\n\/\/\n\/\/ Called by readLease with its lock held.\n\/\/\n\/\/ LOCKS_EXCLUDED(fl.mu)\nfunc (fl *FileLeaser) upgrade(\n\trl *readLease,\n\tsize int64,\n\tfile *os.File) (rwl ReadWriteLease) {\n\tfl.mu.Lock()\n\tdefer fl.mu.Unlock()\n\n\t\/\/ Update leaser state.\n\tfl.readWriteOutstanding += size\n\tfl.readOutstanding -= size\n\n\te := fl.readLeasesIndex[rl]\n\tdelete(fl.readLeasesIndex, rl)\n\tfl.readLeases.Remove(e)\n\n\t\/\/ Crearte the read\/write lease, telling it that we already know its initial\n\t\/\/ size.\n\trwl = newReadWriteLease(fl, size, file)\n\n\treturn\n}\n\n\/\/ Forcibly revoke the supplied read lease.\n\/\/\n\/\/ LOCKS_REQUIRED(rl, fl.mu)\nfunc (fl *FileLeaser) revoke(rl *readLease) {\n\tpanic(\"TODO\")\n}\n\n\/\/ Called by the read lease when the user wants to manually revoke it.\n\/\/\n\/\/ LOCKS_REQUIRED(rl)\n\/\/ LOCKS_EXCLUDED(fl.mu)\nfunc (fl *FileLeaser) revokeVoluntarily(rl *readLease) {\n\t\/\/ TODO(jacobsa): Acquire file leaser lock, update file leaser state, call\n\t\/\/ rl.destroy. Later we can factor all but acquiring the lock out into a\n\t\/\/ separate helper that is shared by the revoke-for-capacity logic, which\n\t\/\/ will already have the lock.\n\tpanic(\"TODO\")\n}\n<commit_msg>FileLeaser.addReadWriteByteDelta<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage lease\n\nimport (\n\t\"container\/list\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/jacobsa\/fuse\/fsutil\"\n\t\"github.com\/jacobsa\/gcloud\/syncutil\"\n)\n\n\/\/ A type that manages read and read\/write leases for anonymous temporary files.\n\/\/\n\/\/ Safe for concurrent access. Must be created with NewFileLeaser.\ntype FileLeaser struct {\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Constant data\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tdir string\n\tlimit int64\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ A lock that guards the mutable state in this struct, which must not be\n\t\/\/ held for any blocking operation.\n\t\/\/\n\t\/\/ Lock ordering\n\t\/\/ -------------\n\t\/\/\n\t\/\/ Define our strict partial order < as follows:\n\t\/\/\n\t\/\/ 1. For any read\/write lease W, W < leaser.\n\t\/\/ 2. For any read lease R, R < leaser.\n\t\/\/ 3. For any read\/write lease W and read lease R, W < R.\n\t\/\/\n\t\/\/ In other words: read\/write before read before leaser, and never hold two\n\t\/\/ locks from the same category together.\n\tmu syncutil.InvariantMutex\n\n\t\/\/ The current estimated total size of outstanding read\/write leases. This is\n\t\/\/ only an estimate because we can't synchronize its update with a call to\n\t\/\/ the wrapped file to e.g. write or truncate.\n\treadWriteOutstanding int64\n\n\t\/\/ All outstanding read leases, ordered by recency of use.\n\t\/\/\n\t\/\/ INVARIANT: Each element is of type *readLease\n\t\/\/ INVARIANT: No element has been revoked.\n\treadLeases list.List\n\n\t\/\/ The sum of all outstanding read lease sizes.\n\t\/\/\n\t\/\/ INVARIANT: Equal to the sum over readLeases sizes.\n\t\/\/ INVARIANT: 0 <= readOutstanding\n\t\/\/ INVARIANT: readOutstanding <= max(0, limit - readWriteOutstanding)\n\treadOutstanding int64\n\n\t\/\/ Index of read leases by pointer.\n\t\/\/\n\t\/\/ INVARIANT: Is an index of exactly the elements of readLeases\n\treadLeasesIndex map[*readLease]*list.Element\n}\n\n\/\/ Create a new file leaser that uses the supplied directory for temporary\n\/\/ files (before unlinking them) and attempts to keep usage in bytes below the\n\/\/ given limit. If dir is empty, the system default will be used.\n\/\/\n\/\/ Usage may exceed the given limit if there are read\/write leases whose total\n\/\/ size exceeds the limit, since such leases cannot be revoked.\nfunc NewFileLeaser(\n\tdir string,\n\tlimitBytes int64) (fl *FileLeaser) {\n\tfl = &FileLeaser{\n\t\tdir: dir,\n\t\tlimit: limitBytes,\n\t\treadLeasesIndex: make(map[*readLease]*list.Element),\n\t}\n\n\tfl.mu = syncutil.NewInvariantMutex(fl.checkInvariants)\n\n\treturn\n}\n\n\/\/ Create a new anonymous file, and return a read\/write lease for it. The\n\/\/ read\/write lease will pin resources until rwl.Downgrade is called. It need\n\/\/ not be called if the process is exiting.\nfunc (fl *FileLeaser) NewFile() (rwl ReadWriteLease, err error) {\n\t\/\/ Create an anonymous file.\n\tf, err := fsutil.AnonymousFile(fl.dir)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"AnonymousFile: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Wrap a lease around it.\n\trwl = newReadWriteLease(fl, 0, f)\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc maxInt64(a int64, b int64) int64 {\n\tif a > b {\n\t\treturn a\n\t}\n\n\treturn b\n}\n\n\/\/ LOCKS_REQUIRED(fl.mu)\nfunc (fl *FileLeaser) checkInvariants() {\n\t\/\/ INVARIANT: Each element is of type *readLease\n\t\/\/ INVARIANT: No element has been revoked.\n\tfor e := fl.readLeases.Front(); e != nil; e = e.Next() {\n\t\trl := e.Value.(*readLease)\n\t\tif rl.revoked() {\n\t\t\tpanic(\"Found revoked read lease\")\n\t\t}\n\t}\n\n\t\/\/ INVARIANT: Equal to the sum over readLeases sizes.\n\tvar sum int64\n\tfor e := fl.readLeases.Front(); e != nil; e = e.Next() {\n\t\trl := e.Value.(*readLease)\n\t\tsum += rl.Size()\n\t}\n\n\tif fl.readOutstanding != sum {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"readOutstanding mismatch: %v vs. %v\",\n\t\t\tfl.readOutstanding,\n\t\t\tsum))\n\t}\n\n\t\/\/ INVARIANT: 0 <= readOutstanding\n\tif !(0 <= fl.readOutstanding) {\n\t\tpanic(fmt.Sprintf(\"Unexpected readOutstanding: %v\", fl.readOutstanding))\n\t}\n\n\t\/\/ INVARIANT: readOutstanding <= max(0, limit - readWriteOutstanding)\n\tif !(fl.readOutstanding <= maxInt64(0, fl.limit-fl.readWriteOutstanding)) {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"Unexpected readOutstanding: %v. limit: %v, readWriteOutstanding: %v\",\n\t\t\tfl.readOutstanding,\n\t\t\tfl.limit,\n\t\t\tfl.readWriteOutstanding))\n\t}\n\n\t\/\/ INVARIANT: Is an index of exactly the elements of readLeases\n\tif len(fl.readLeasesIndex) != fl.readLeases.Len() {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"readLeasesIndex length mismatch: %v vs. %v\",\n\t\t\tlen(fl.readLeasesIndex),\n\t\t\tfl.readLeases.Len()))\n\t}\n\n\tfor e := fl.readLeases.Front(); e != nil; e = e.Next() {\n\t\tif fl.readLeasesIndex[e.Value.(*readLease)] != e {\n\t\t\tpanic(\"Mismatch in readLeasesIndex\")\n\t\t}\n\t}\n}\n\n\/\/ Add the supplied delta to the leaser's view of outstanding read\/write lease\n\/\/ bytes, then revoke read leases until we're under limit or we run out of\n\/\/ leases to revoke.\n\/\/\n\/\/ Called by readWriteLease while holding its lock.\n\/\/\n\/\/ LOCKS_EXCLUDED(fl.mu)\nfunc (fl *FileLeaser) addReadWriteByteDelta(delta int64) {\n\tfl.readWriteOutstanding += delta\n\tfl.evict()\n}\n\n\/\/ LOCKS_REQUIRED(fl.mu)\nfunc (fl *FileLeaser) overLimit() bool {\n\treturn fl.readOutstanding+fl.readWriteOutstanding > fl.limit\n}\n\n\/\/ An implementation detail of FileLeaser.evict.\n\/\/\n\/\/ LOCKS_REQUIRED(fl.mu)\nfunc (fl *FileLeaser) evictOne(rl *readLease) {\n\t\/\/ We must acquire the read lease's lock, which requires us to first drop\n\t\/\/ the leaser's lock, then reacquire it.\n\tfl.mu.Unlock()\n\trl.Mu.Lock()\n\tdefer rl.Mu.Unlock()\n\tfl.mu.Lock()\n\n\t\/\/ Now we have both locks, but the lease may have already been revoked. If\n\t\/\/ so, there's nothing to do and we should start the process over.\n\tif rl.revoked() {\n\t\treturn\n\t}\n\n\t\/\/ Also no need to over-evict if someone has already done our job for us.\n\tif !fl.overLimit() {\n\t\treturn\n\t}\n\n\t\/\/ Revoke the lease while holding its lock.\n\tfl.revoke(rl)\n}\n\n\/\/ Revoke read leases until we're under limit or we run out of things to revoke.\n\/\/\n\/\/ LOCKS_REQUIRED(fl.mu)\nfunc (fl *FileLeaser) evict() {\n\tfor fl.overLimit() {\n\t\t\/\/ Do we have anything to revoke?\n\t\tlru := fl.readLeases.Back()\n\t\tif lru == nil {\n\t\t\treturn\n\t\t}\n\n\t\trl := lru.Value.(*readLease)\n\t\tfl.evictOne(rl)\n\t}\n}\n\n\/\/ Downgrade the supplied read\/write lease, given its current size and the\n\/\/ underlying file.\n\/\/\n\/\/ Called by readWriteLease with its lock held.\n\/\/\n\/\/ LOCKS_EXCLUDED(fl.mu)\nfunc (fl *FileLeaser) downgrade(\n\trwl *readWriteLease,\n\tsize int64,\n\tfile *os.File) (rl ReadLease) {\n\t\/\/ Create the read lease.\n\trlTyped := newReadLease(size, fl, file)\n\trl = rlTyped\n\n\t\/\/ Update the leaser's state, noting the new read lease and that the\n\t\/\/ read\/write lease has gone away.\n\tfl.mu.Lock()\n\tdefer fl.mu.Unlock()\n\n\tfl.readWriteOutstanding -= size\n\tfl.readOutstanding += size\n\n\te := fl.readLeases.PushFront(rl)\n\tfl.readLeasesIndex[rlTyped] = e\n\n\t\/\/ Ensure that we're not now over capacity.\n\tfl.evict()\n\n\treturn\n}\n\n\/\/ Upgrade the supplied read lease, given its size and the underlying file.\n\/\/\n\/\/ Called by readLease with its lock held.\n\/\/\n\/\/ LOCKS_EXCLUDED(fl.mu)\nfunc (fl *FileLeaser) upgrade(\n\trl *readLease,\n\tsize int64,\n\tfile *os.File) (rwl ReadWriteLease) {\n\tfl.mu.Lock()\n\tdefer fl.mu.Unlock()\n\n\t\/\/ Update leaser state.\n\tfl.readWriteOutstanding += size\n\tfl.readOutstanding -= size\n\n\te := fl.readLeasesIndex[rl]\n\tdelete(fl.readLeasesIndex, rl)\n\tfl.readLeases.Remove(e)\n\n\t\/\/ Crearte the read\/write lease, telling it that we already know its initial\n\t\/\/ size.\n\trwl = newReadWriteLease(fl, size, file)\n\n\treturn\n}\n\n\/\/ Forcibly revoke the supplied read lease.\n\/\/\n\/\/ LOCKS_REQUIRED(rl, fl.mu)\nfunc (fl *FileLeaser) revoke(rl *readLease) {\n\tpanic(\"TODO\")\n}\n\n\/\/ Called by the read lease when the user wants to manually revoke it.\n\/\/\n\/\/ LOCKS_REQUIRED(rl)\n\/\/ LOCKS_EXCLUDED(fl.mu)\nfunc (fl *FileLeaser) revokeVoluntarily(rl *readLease) {\n\t\/\/ TODO(jacobsa): Acquire file leaser lock, update file leaser state, call\n\t\/\/ rl.destroy. Later we can factor all but acquiring the lock out into a\n\t\/\/ separate helper that is shared by the revoke-for-capacity logic, which\n\t\/\/ will already have the lock.\n\tpanic(\"TODO\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2015 Mute Communications Ltd.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage client\n\nimport (\n\t\"errors\"\n\t\"sync\"\n)\n\ntype errorTranslation struct {\n\terr error\n\tfatal bool\n}\n\nvar errorTranslateMap map[string]errorTranslation\nvar errorTranslateMutex *sync.Mutex\n\nfunc registerTranslateError(errorString string, fatal bool) error {\n\tif errorTranslateMap == nil {\n\t\terrorTranslateMap = make(map[string]errorTranslation)\n\t\terrorTranslateMutex = new(sync.Mutex)\n\t}\n\te := errorTranslation{\n\t\terr: errors.New(errorString),\n\t\tfatal: fatal,\n\t}\n\n\terrorTranslateMap[errorString] = e\n\treturn e.err\n}\n\nfunc lookupError(inputError error) (translated, fatal bool, err error) {\n\terrorTranslateMutex.Lock()\n\tdefer errorTranslateMutex.Unlock()\n\tstr := string(inputError.Error())\n\tif e, ok := errorTranslateMap[str]; ok {\n\t\treturn true, e.fatal, e.err\n\t}\n\treturn false, false, inputError\n}\n\nfunc translateError(inputError error) error {\n\t_, _, err := lookupError(inputError)\n\treturn err\n}\n\nvar (\n\t\/\/ Errors produced by the client\n\n\t\/\/ ErrRetry is returned on recoverable errors\n\tErrRetry = errors.New(\"client: Retry\")\n\t\/\/ ErrOffline is returned if the client is offline\n\tErrOffline = errors.New(\"client: Offline\")\n\t\/\/ ErrFinal is returned on final errors that cannot continue\n\tErrFinal = errors.New(\"client: Final error\")\n\t\/\/ ErrFatal is returned on fatal errors produced by the client implementation itself\n\tErrFatal = errors.New(\"client: Fatal client error\")\n\t\/\/ ErrNeedReissue is returned if a non-renewable token was issued by the walletserver but not owner was specified\n\tErrNeedReissue = errors.New(\"client: Token needs reissue to owner\")\n\t\/\/ ErrNotMine is returned if a token is not under our control\n\tErrNotMine = errors.New(\"client: Not my token\")\n\t\/\/ ErrLocked is returned if a token has been locked\n\tErrLocked = errors.New(\"client: Token in use\")\n\t\/\/ ErrExpireToken is returned if a token has expired\n\tErrExpireToken = errors.New(\"client: Token expired\")\n\t\/\/ ErrSignatureToken is returned if the token has a bad signature\n\tErrSignatureToken = errors.New(\"client: Bad signature\")\n\t\/\/ ErrOwnerToken is returned if a token has an unexpected owner\n\tErrOwnerToken = errors.New(\"client: Unexpected owner\")\n\t\/\/ ErrUsageToken is returned if a token has an unexpected usage\n\tErrUsageToken = errors.New(\"client: Unexpected usage\")\n\t\/\/ ErrTokenKnown is returned if a received token is already known\n\tErrTokenKnown = errors.New(\"client: Token is already known\")\n\t\/\/ ErrNoToken is returned if no token could be fetched from wallet storage\n\tErrNoToken = errors.New(\"client: No token in wallet\")\n)\n\nvar (\n\t\/\/ Errors from wallet server:\n\n\t\/\/ ErrWalletServer is returned if the server experienced a problem that cannot be exposed to the client\n\tErrWalletServer = registerTranslateError(\"walletserver: Server error\", false)\n\t\/\/ ErrNoUser is returned if the user could not be found\n\tErrNoUser = registerTranslateError(\"walletserver: No such user\", true)\n\t\/\/ ErrBadToken is returned if the user's authtoken could not be verified\n\tErrBadToken = registerTranslateError(\"walletserver: Bad token\", true)\n\t\/\/ ErrInsufficientFunds is returned if the user's funds are exhausted\n\tErrInsufficientFunds = registerTranslateError(\"walletserver: Insufficient funds\", true)\n\n\t\/\/\n\t\/\/ Errors from keylookup server:\n\n\t\/\/ ErrKeyLookupServer is returned if there was an error in the lookup server\n\tErrKeyLookupServer = registerTranslateError(\"keylookup: Server error\", false)\n\t\/\/ ErrNotFound is returned if the key could not be found\n\tErrNotFound = registerTranslateError(\"keylookup: Key not found\", true)\n\n\t\/\/\n\t\/\/ Errors from issuer server:\n\n\t\/\/ ErrUsageMismatch is returned if the usage of the parameter publickey and the token publickey do not match\n\tErrUsageMismatch = registerTranslateError(\"issuer: Parameter and Token usage mismatch\", true)\n\t\/\/ ErrParamsExpired is returned if the parameters given are not in line with any available private key\n\tErrParamsExpired = registerTranslateError(\"issuer: Parameters have expired\", true)\n\t\/\/ ErrBadCallType is returned if a wrong calltype is present in a packet\n\tErrBadCallType = registerTranslateError(\"issuer: Wrong calltype\", true)\n\t\/\/ ErrIssuerServer is returned on server error\n\tErrIssuerServer = registerTranslateError(\"issuer: Server errror\", false)\n\t\/\/ ErrTokenDoubleSpend is returned if the token has already been spent\n\tErrTokenDoubleSpend = registerTranslateError(\"issuer: Token double spend\", true)\n\t\/\/ ErrParamDoubleSpend is returned if a parameter has already been spent\n\tErrParamDoubleSpend = registerTranslateError(\"issuer: Parameter double spend\", true)\n\t\/\/ ErrBadPacket is returned if packet contents cannot be unmarshalled\n\tErrBadPacket = registerTranslateError(\"issuer: Bad package\", true)\n\t\/\/ ErrOwnerSignature is returned if an owner signature could not be verified\n\tErrOwnerSignature = registerTranslateError(\"issuer: Owner signature verification failed\", true)\n\t\/\/ ErrWrongIssuer is returned if a token is presented to the wrong issuer\n\tErrWrongIssuer = registerTranslateError(\"issuer: Wrong issuer\", true)\n\t\/\/ ErrExpire is returned if a token has already expired\n\tErrExpire = registerTranslateError(\"issuer: Token expired\", true)\n\t\/\/ ErrBadUsage is returned if a key is presented with the wrong usage setting\n\tErrBadUsage = registerTranslateError(\"issuer: Bad usage\", true)\n\t\/\/ ErrBadIssuer is returned if a key is presented with the wrong issuer\n\tErrBadIssuer = registerTranslateError(\"issuer: Bad issuer\", true)\n\t\/\/ ErrBadSignature is returned if a signature could not be verified\n\tErrBadSignature = registerTranslateError(\"issuer: Bad signature in token\", true)\n\t\/\/ ErrParameterMixed is returned if the parameter set contains mixed keys\n\tErrParameterMixed = registerTranslateError(\"issuer: Parameters were mixed\", true)\n)\n<commit_msg>make error message more specific<commit_after>\/\/ Copyright (c) 2015 Mute Communications Ltd.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage client\n\nimport (\n\t\"errors\"\n\t\"sync\"\n)\n\ntype errorTranslation struct {\n\terr error\n\tfatal bool\n}\n\nvar errorTranslateMap map[string]errorTranslation\nvar errorTranslateMutex *sync.Mutex\n\nfunc registerTranslateError(errorString string, fatal bool) error {\n\tif errorTranslateMap == nil {\n\t\terrorTranslateMap = make(map[string]errorTranslation)\n\t\terrorTranslateMutex = new(sync.Mutex)\n\t}\n\te := errorTranslation{\n\t\terr: errors.New(errorString),\n\t\tfatal: fatal,\n\t}\n\n\terrorTranslateMap[errorString] = e\n\treturn e.err\n}\n\nfunc lookupError(inputError error) (translated, fatal bool, err error) {\n\terrorTranslateMutex.Lock()\n\tdefer errorTranslateMutex.Unlock()\n\tstr := string(inputError.Error())\n\tif e, ok := errorTranslateMap[str]; ok {\n\t\treturn true, e.fatal, e.err\n\t}\n\treturn false, false, inputError\n}\n\nfunc translateError(inputError error) error {\n\t_, _, err := lookupError(inputError)\n\treturn err\n}\n\nvar (\n\t\/\/ Errors produced by the client\n\n\t\/\/ ErrRetry is returned on recoverable errors\n\tErrRetry = errors.New(\"client: Retry\")\n\t\/\/ ErrOffline is returned if the client is offline\n\tErrOffline = errors.New(\"client: Offline\")\n\t\/\/ ErrFinal is returned on final errors that cannot continue\n\tErrFinal = errors.New(\"client: Final serviceguard error\")\n\t\/\/ ErrFatal is returned on fatal errors produced by the client implementation itself\n\tErrFatal = errors.New(\"client: Fatal client error\")\n\t\/\/ ErrNeedReissue is returned if a non-renewable token was issued by the walletserver but not owner was specified\n\tErrNeedReissue = errors.New(\"client: Token needs reissue to owner\")\n\t\/\/ ErrNotMine is returned if a token is not under our control\n\tErrNotMine = errors.New(\"client: Not my token\")\n\t\/\/ ErrLocked is returned if a token has been locked\n\tErrLocked = errors.New(\"client: Token in use\")\n\t\/\/ ErrExpireToken is returned if a token has expired\n\tErrExpireToken = errors.New(\"client: Token expired\")\n\t\/\/ ErrSignatureToken is returned if the token has a bad signature\n\tErrSignatureToken = errors.New(\"client: Bad signature\")\n\t\/\/ ErrOwnerToken is returned if a token has an unexpected owner\n\tErrOwnerToken = errors.New(\"client: Unexpected owner\")\n\t\/\/ ErrUsageToken is returned if a token has an unexpected usage\n\tErrUsageToken = errors.New(\"client: Unexpected usage\")\n\t\/\/ ErrTokenKnown is returned if a received token is already known\n\tErrTokenKnown = errors.New(\"client: Token is already known\")\n\t\/\/ ErrNoToken is returned if no token could be fetched from wallet storage\n\tErrNoToken = errors.New(\"client: No token in wallet\")\n)\n\nvar (\n\t\/\/ Errors from wallet server:\n\n\t\/\/ ErrWalletServer is returned if the server experienced a problem that cannot be exposed to the client\n\tErrWalletServer = registerTranslateError(\"walletserver: Server error\", false)\n\t\/\/ ErrNoUser is returned if the user could not be found\n\tErrNoUser = registerTranslateError(\"walletserver: No such user\", true)\n\t\/\/ ErrBadToken is returned if the user's authtoken could not be verified\n\tErrBadToken = registerTranslateError(\"walletserver: Bad token\", true)\n\t\/\/ ErrInsufficientFunds is returned if the user's funds are exhausted\n\tErrInsufficientFunds = registerTranslateError(\"walletserver: Insufficient funds\", true)\n\n\t\/\/\n\t\/\/ Errors from keylookup server:\n\n\t\/\/ ErrKeyLookupServer is returned if there was an error in the lookup server\n\tErrKeyLookupServer = registerTranslateError(\"keylookup: Server error\", false)\n\t\/\/ ErrNotFound is returned if the key could not be found\n\tErrNotFound = registerTranslateError(\"keylookup: Key not found\", true)\n\n\t\/\/\n\t\/\/ Errors from issuer server:\n\n\t\/\/ ErrUsageMismatch is returned if the usage of the parameter publickey and the token publickey do not match\n\tErrUsageMismatch = registerTranslateError(\"issuer: Parameter and Token usage mismatch\", true)\n\t\/\/ ErrParamsExpired is returned if the parameters given are not in line with any available private key\n\tErrParamsExpired = registerTranslateError(\"issuer: Parameters have expired\", true)\n\t\/\/ ErrBadCallType is returned if a wrong calltype is present in a packet\n\tErrBadCallType = registerTranslateError(\"issuer: Wrong calltype\", true)\n\t\/\/ ErrIssuerServer is returned on server error\n\tErrIssuerServer = registerTranslateError(\"issuer: Server errror\", false)\n\t\/\/ ErrTokenDoubleSpend is returned if the token has already been spent\n\tErrTokenDoubleSpend = registerTranslateError(\"issuer: Token double spend\", true)\n\t\/\/ ErrParamDoubleSpend is returned if a parameter has already been spent\n\tErrParamDoubleSpend = registerTranslateError(\"issuer: Parameter double spend\", true)\n\t\/\/ ErrBadPacket is returned if packet contents cannot be unmarshalled\n\tErrBadPacket = registerTranslateError(\"issuer: Bad package\", true)\n\t\/\/ ErrOwnerSignature is returned if an owner signature could not be verified\n\tErrOwnerSignature = registerTranslateError(\"issuer: Owner signature verification failed\", true)\n\t\/\/ ErrWrongIssuer is returned if a token is presented to the wrong issuer\n\tErrWrongIssuer = registerTranslateError(\"issuer: Wrong issuer\", true)\n\t\/\/ ErrExpire is returned if a token has already expired\n\tErrExpire = registerTranslateError(\"issuer: Token expired\", true)\n\t\/\/ ErrBadUsage is returned if a key is presented with the wrong usage setting\n\tErrBadUsage = registerTranslateError(\"issuer: Bad usage\", true)\n\t\/\/ ErrBadIssuer is returned if a key is presented with the wrong issuer\n\tErrBadIssuer = registerTranslateError(\"issuer: Bad issuer\", true)\n\t\/\/ ErrBadSignature is returned if a signature could not be verified\n\tErrBadSignature = registerTranslateError(\"issuer: Bad signature in token\", true)\n\t\/\/ ErrParameterMixed is returned if the parameter set contains mixed keys\n\tErrParameterMixed = registerTranslateError(\"issuer: Parameters were mixed\", true)\n)\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2013-2014 by Michael Dvorkin. All Rights Reserved.\n\/\/ Use of this source code is governed by a MIT-style license that can\n\/\/ be found in the LICENSE file.\n\npackage donna\n\nimport(\n\t`bufio`\n\t`io`\n\t`fmt`\n\t`os`\n\t`strconv`\n\t`strings`\n)\n\n\/\/ Brain-damaged universal chess interface (UCI) protocol as described at\n\/\/ http:\/\/wbec-ridderkerk.nl\/html\/UCIProtocol.html\nfunc (e *Engine) Uci() *Engine {\n\tvar game *Game\n\tvar position *Position\n\n\t\/\/ \"uci\" command handler.\n\tdoUci := func(args []string) {\n\t\tfmt.Println(`Donna v1.0.0 Copyright (c) 2014 by Michael Dvorkin. All Rights Reserved.`)\n\t\tfmt.Println(`id author Michael Dvorkin`)\n\t\tfmt.Println(`uciok`)\n\t}\n\n\t\/\/ \"ucinewgame\" command handler.\n\tdoUciNewGame := func(args []string) {\n\t\tgame, position = nil, nil\n\t}\n\n\t\/\/ \"isready\" command handler.\n\tdoIsReady := func(args []string) {\n\t\tfmt.Println(`readyok`)\n\t}\n\n\t\/\/ \"position [startpos | fen ] [ moves ... ]\" command handler.\n\tdoPosition := func(args []string) {\n\t\tfmt.Printf(\"%q\\n\", args)\n\n\t\t\/\/ Make sure we've started the game since \"ucinewgame\" is optional.\n\t\tif game == nil || position == nil {\n\t\t\tgame = NewGame()\n\t\t\tposition = game.Start()\n\t\t}\n\n\t\tswitch args[0] {\n\t\tcase `startpos`:\n\t\t\targs = args[1:]\n\t\t\tposition = NewInitialPosition(game)\n\t\tcase `fen`:\n\t\t\tfen := []string{}\n\t\t\tfor _, token := range args[1:] {\n\t\t\t\targs = args[1:] \/\/ Shift the token.\n\t\t\t\tif token == `moves` {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tfen = append(fen, token)\n\t\t\t}\n\t\t\tfmt.Printf(\"fen: %s\\n\", strings.Join(fen, ` `))\n\t\t\tposition = NewPositionFromFEN(game, strings.Join(fen, ` `))\n\t\tdefault: return\n\t\t}\n\n\t\tfmt.Printf(\"args: %q\\n%s\\n\", args, position)\n\t\tif position != nil && len(args) > 0 && args[0] == `moves` {\n\t\t\tfor _, move := range args[1:] {\n\t\t\t\targs = args[1:] \/\/ Shift the move.\n\t\t\t\tposition = position.MakeMove(NewMoveFromNotation(position, move))\n\t\t\t}\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", position)\n\t}\n\n\t\/\/ \"go [[wtime winc | btime binc ] movestogo] | depth | movetime\"\n\tdoGo := func(args []string) {\n\t\tfmt.Printf(\"%q\\n\", args)\n\t\tassign := func(key, value string) {\n\t\t\tfmt.Printf(\"assign(key `%s` value `%s`)\\n\", key, value)\n\t\t\tif n, err := strconv.Atoi(value); err == nil {\n\t\t\t\te.Set(key, n)\n\t\t\t}\n\t\t}\n\t\tfor i, token := range args {\n\t\t\tfmt.Printf(\"\\t%d len %d Token [%v]\\n\", i, len(args), token)\n\t\t\t\/\/ Boolen \"infinite\" and \"ponder\" commands have no arguments, while \"depth\",\n\t\t\t\/\/ \"nodes\" etc. come with numeric argument.\n\t\t\tif token == `infinite` || token == `ponder` {\n\t\t\t\te.Set(token, true)\n\t\t\t} else if len(args) > i+1 {\n\t\t\t\tswitch token {\n\t\t\t\tcase `depth`, `nodes`, `movetime`, `movestogo`:\n\t\t\t\t\tassign(token, args[i+1])\n\t\t\t\tcase `wtime`:\n\t\t\t\t\tif position.color == White {\n\t\t\t\t\t\tassign(`time`, args[i+1])\n\t\t\t\t\t}\n\t\t\t\tcase `btime`:\n\t\t\t\t\tif position.color == Black {\n\t\t\t\t\t\tassign(`time`, args[i+1])\n\t\t\t\t\t}\n\t\t\t\tcase `winc`:\n\t\t\t\t\tif position.color == White {\n\t\t\t\t\t\tassign(`timeinc`, args[i+1])\n\t\t\t\t\t}\n\t\t\t\tcase `binc`:\n\t\t\t\t\tif position.color == Black {\n\t\t\t\t\t\tassign(`timeinc`, args[i+1])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Stop calculating as soon as possible.\n\tdoStop := func(args []string) {\n\t\te.clock.halt = true\n\t}\n\n\tvar commands = map[string]func([]string){\n\t\t`isready`: doIsReady, \n\t\t`uci`: doUci,\n\t\t`ucinewgame`: doUciNewGame,\n\t\t`position`: doPosition,\n\t\t`go`: doGo,\n\t\t`stop`: doStop,\n\t}\n\n \tbio := bufio.NewReader(os.Stdin)\n\tfor {\n\t\tcommand, err := bio.ReadString('\\n')\n\t\tif err != io.EOF && len(command) > 0 {\n\t\t\targs := strings.Split(command[:len(command)-1], ` `)\n\t\t\tif args[0] == `quit` {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif handler, ok := commands[args[0]]; ok {\n\t\t\t\thandler(args[1:])\n\t\t\t}\n\t\t}\n\t}\n\treturn e\n}\n<commit_msg>More straightforward UCI go command parsing<commit_after>\/\/ Copyright (c) 2013-2014 by Michael Dvorkin. All Rights Reserved.\n\/\/ Use of this source code is governed by a MIT-style license that can\n\/\/ be found in the LICENSE file.\n\npackage donna\n\nimport(\n\t`bufio`\n\t`io`\n\t`fmt`\n\t`os`\n\t`strconv`\n\t`strings`\n)\n\n\/\/ Brain-damaged universal chess interface (UCI) protocol as described at\n\/\/ http:\/\/wbec-ridderkerk.nl\/html\/UCIProtocol.html\nfunc (e *Engine) Uci() *Engine {\n\tvar game *Game\n\tvar position *Position\n\n\t\/\/ \"uci\" command handler.\n\tdoUci := func(args []string) {\n\t\tfmt.Println(`Donna v1.0.0 Copyright (c) 2014 by Michael Dvorkin. All Rights Reserved.`)\n\t\tfmt.Println(`id author Michael Dvorkin`)\n\t\tfmt.Println(`uciok`)\n\t}\n\n\t\/\/ \"ucinewgame\" command handler.\n\tdoUciNewGame := func(args []string) {\n\t\tgame, position = nil, nil\n\t}\n\n\t\/\/ \"isready\" command handler.\n\tdoIsReady := func(args []string) {\n\t\tfmt.Println(`readyok`)\n\t}\n\n\t\/\/ \"position [startpos | fen ] [ moves ... ]\" command handler.\n\tdoPosition := func(args []string) {\n\t\tfmt.Printf(\"%q\\n\", args)\n\n\t\t\/\/ Make sure we've started the game since \"ucinewgame\" is optional.\n\t\tif game == nil || position == nil {\n\t\t\tgame = NewGame()\n\t\t\tposition = game.Start()\n\t\t}\n\n\t\tswitch args[0] {\n\t\tcase `startpos`:\n\t\t\targs = args[1:]\n\t\t\tposition = NewInitialPosition(game)\n\t\tcase `fen`:\n\t\t\tfen := []string{}\n\t\t\tfor _, token := range args[1:] {\n\t\t\t\targs = args[1:] \/\/ Shift the token.\n\t\t\t\tif token == `moves` {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tfen = append(fen, token)\n\t\t\t}\n\t\t\tfmt.Printf(\"fen: %s\\n\", strings.Join(fen, ` `))\n\t\t\tposition = NewPositionFromFEN(game, strings.Join(fen, ` `))\n\t\tdefault: return\n\t\t}\n\n\t\tfmt.Printf(\"args: %q\\n%s\\n\", args, position)\n\t\tif position != nil && len(args) > 0 && args[0] == `moves` {\n\t\t\tfor _, move := range args[1:] {\n\t\t\t\targs = args[1:] \/\/ Shift the move.\n\t\t\t\tposition = position.MakeMove(NewMoveFromNotation(position, move))\n\t\t\t}\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", position)\n\t}\n\n\t\/\/ \"go [[wtime winc | btime binc ] movestogo] | depth | nodes | movetime\"\n\tdoGo := func(args []string) {\n\t\toptions := e.options\n\t\tfmt.Printf(\"%q\\n\", args)\n\t\tfmt.Printf(\"-> e.options: %+v\\n\", e.options)\n\n\t\tfor i, token := range args {\n\t\t\tfmt.Printf(\"\\t%d len %d Token [%v]\\n\", i, len(args), token)\n\t\t\t\/\/ Boolen \"infinite\" and \"ponder\" commands have no arguments.\n\t\t\tif token == `infinite` {\n\t\t\t\toptions = Options{ infinite: true }\n\t\t\t} else if token == `ponder` {\n\t\t\t\toptions = Options{ ponder: true }\n\t\t\t} else if len(args) > i+1 {\n\t\t\t\tswitch token {\n\t\t\t\tcase `depth`:\n\t\t\t\t\tif n, err := strconv.Atoi(args[i+1]); err == nil {\n\t\t\t\t\t\toptions = Options{ maxDepth: n }\n\t\t\t\t\t}\n\t\t\t\tcase `movetime`:\n\t\t\t\t\tif n, err := strconv.Atoi(args[i+1]); err == nil {\n\t\t\t\t\t\toptions = Options{ maxDepth: n }\n\t\t\t\t\t}\n\t\t\t\tcase `nodes`:\n\t\t\t\t\tif n, err := strconv.Atoi(args[i+1]); err == nil {\n\t\t\t\t\t\toptions = Options{ maxNodes: n }\n\t\t\t\t\t}\n\t\t\t\tcase `wtime`:\n\t\t\t\t\tif position.color == White {\n\t\t\t\t\t\tif n, err := strconv.Atoi(args[i+1]); err == nil {\n\t\t\t\t\t\t\toptions.timeLeft = int64(n)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase `btime`:\n\t\t\t\t\tif position.color == Black {\n\t\t\t\t\t\tif n, err := strconv.Atoi(args[i+1]); err == nil {\n\t\t\t\t\t\t\toptions.timeLeft = int64(n)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase `winc`:\n\t\t\t\t\tif position.color == White {\n\t\t\t\t\t\tif n, err := strconv.Atoi(args[i+1]); err == nil {\n\t\t\t\t\t\t\toptions.timeInc = int64(n)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase `binc`:\n\t\t\t\t\tif position.color == Black {\n\t\t\t\t\t\tif n, err := strconv.Atoi(args[i+1]); err == nil {\n\t\t\t\t\t\t\toptions.timeInc = int64(n)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase `movestogo`:\n\t\t\t\t\tif n, err := strconv.Atoi(args[i+1]); err == nil {\n\t\t\t\t\t\toptions.movesToGo = n\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif options.timeLeft != 0 || options.timeInc != 0 || options.movesToGo != 0 {\n\t\t\toptions.ponder, options.infinite = false, false\n\t\t\toptions.maxDepth, options.maxNodes, options.moveTime = 0, 0, 0\n\t\t}\n\t\te.options = options\n\t\tfmt.Printf(\"=> e.options: %+v\\n\", e.options)\n\t}\n\n\t\/\/ Stop calculating as soon as possible.\n\tdoStop := func(args []string) {\n\t\te.clock.halt = true\n\t}\n\n\tvar commands = map[string]func([]string){\n\t\t`isready`: doIsReady, \n\t\t`uci`: doUci,\n\t\t`ucinewgame`: doUciNewGame,\n\t\t`position`: doPosition,\n\t\t`go`: doGo,\n\t\t`stop`: doStop,\n\t}\n\n \tbio := bufio.NewReader(os.Stdin)\n\tfor {\n\t\tcommand, err := bio.ReadString('\\n')\n\t\tif err != io.EOF && len(command) > 0 {\n\t\t\targs := strings.Split(command[:len(command)-1], ` `)\n\t\t\tif args[0] == `quit` {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif handler, ok := commands[args[0]]; ok {\n\t\t\t\thandler(args[1:])\n\t\t\t}\n\t\t}\n\t}\n\treturn e\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file contains the code dealing with package directory trees.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"go\/doc\"\n\t\"go\/parser\"\n\t\"io\/ioutil\"\n\t\"os\"\n\tpathutil \"path\"\n\t\"strings\"\n\t\"unicode\"\n)\n\n\ntype Directory struct {\n\tDepth int\n\tPath string \/\/ includes Name\n\tName string\n\tText string \/\/ package documentation, if any\n\tDirs []*Directory \/\/ subdirectories\n}\n\n\nfunc isGoFile(f *os.FileInfo) bool {\n\treturn f.IsRegular() &&\n\t\t!strings.HasPrefix(f.Name, \".\") && \/\/ ignore .files\n\t\tpathutil.Ext(f.Name) == \".go\"\n}\n\n\nfunc isPkgFile(f *os.FileInfo) bool {\n\treturn isGoFile(f) &&\n\t\t!strings.HasSuffix(f.Name, \"_test.go\") \/\/ ignore test files\n}\n\n\nfunc isPkgDir(f *os.FileInfo) bool {\n\treturn f.IsDirectory() && len(f.Name) > 0 && f.Name[0] != '_'\n}\n\n\nfunc firstSentence(s string) string {\n\ti := -1 \/\/ index+1 of first terminator (punctuation ending a sentence)\n\tj := -1 \/\/ index+1 of first terminator followed by white space\n\tprev := 'A'\n\tfor k, ch := range s {\n\t\tk1 := k + 1\n\t\tif ch == '.' || ch == '!' || ch == '?' {\n\t\t\tif i < 0 {\n\t\t\t\ti = k1 \/\/ first terminator\n\t\t\t}\n\t\t\tif k1 < len(s) && s[k1] <= ' ' {\n\t\t\t\tif j < 0 {\n\t\t\t\t\tj = k1 \/\/ first terminator followed by white space\n\t\t\t\t}\n\t\t\t\tif !unicode.IsUpper(prev) {\n\t\t\t\t\tj = k1\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tprev = ch\n\t}\n\n\tif j < 0 {\n\t\t\/\/ use the next best terminator\n\t\tj = i\n\t\tif j < 0 {\n\t\t\t\/\/ no terminator at all, use the entire string\n\t\t\tj = len(s)\n\t\t}\n\t}\n\n\treturn s[0:j]\n}\n\n\ntype treeBuilder struct {\n\tpathFilter func(string) bool\n\tmaxDepth int\n}\n\n\nfunc (b *treeBuilder) newDirTree(path, name string, depth int) *Directory {\n\tif b.pathFilter != nil && !b.pathFilter(path) {\n\t\treturn nil\n\t}\n\n\tif depth >= b.maxDepth {\n\t\t\/\/ return a dummy directory so that the parent directory\n\t\t\/\/ doesn't get discarded just because we reached the max\n\t\t\/\/ directory depth\n\t\treturn &Directory{depth, path, name, \"\", nil}\n\t}\n\n\tlist, _ := ioutil.ReadDir(path) \/\/ ignore errors\n\n\t\/\/ determine number of subdirectories and package files\n\tndirs := 0\n\tnfiles := 0\n\tvar synopses [4]string \/\/ prioritized package documentation (0 == highest priority)\n\tfor _, d := range list {\n\t\tswitch {\n\t\tcase isPkgDir(d):\n\t\t\tndirs++\n\t\tcase isPkgFile(d):\n\t\t\tnfiles++\n\t\t\tif synopses[0] == \"\" {\n\t\t\t\t\/\/ no \"optimal\" package synopsis yet; continue to collect synopses\n\t\t\t\tfile, err := parser.ParseFile(pathutil.Join(path, d.Name), nil,\n\t\t\t\t\tparser.ParseComments|parser.PackageClauseOnly)\n\t\t\t\tif err == nil && file.Doc != nil {\n\t\t\t\t\t\/\/ prioritize documentation\n\t\t\t\t\ti := -1\n\t\t\t\t\tswitch file.Name.Name {\n\t\t\t\t\tcase name:\n\t\t\t\t\t\ti = 0 \/\/ normal case: directory name matches package name\n\t\t\t\t\tcase fakePkgName:\n\t\t\t\t\t\ti = 1 \/\/ synopses for commands\n\t\t\t\t\tcase \"main\":\n\t\t\t\t\t\ti = 2 \/\/ directory contains a main package\n\t\t\t\t\tdefault:\n\t\t\t\t\t\ti = 3 \/\/ none of the above\n\t\t\t\t\t}\n\t\t\t\t\tif 0 <= i && i < len(synopses) && synopses[i] == \"\" {\n\t\t\t\t\t\tsynopses[i] = firstSentence(doc.CommentText(file.Doc))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ create subdirectory tree\n\tvar dirs []*Directory\n\tif ndirs > 0 {\n\t\tdirs = make([]*Directory, ndirs)\n\t\ti := 0\n\t\tfor _, d := range list {\n\t\t\tif isPkgDir(d) {\n\t\t\t\tdd := b.newDirTree(pathutil.Join(path, d.Name), d.Name, depth+1)\n\t\t\t\tif dd != nil {\n\t\t\t\t\tdirs[i] = dd\n\t\t\t\t\ti++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdirs = dirs[0:i]\n\t}\n\n\t\/\/ if there are no package files and no subdirectories\n\t\/\/ (with package files), ignore the directory\n\tif nfiles == 0 && len(dirs) == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ select the highest-priority synopsis for the directory entry, if any\n\tsynopsis := \"\"\n\tfor _, synopsis = range synopses {\n\t\tif synopsis != \"\" {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn &Directory{depth, path, name, synopsis, dirs}\n}\n\n\n\/\/ newDirectory creates a new package directory tree with at most maxDepth\n\/\/ levels, anchored at root. The result tree is pruned such that it only\n\/\/ contains directories that contain package files or that contain\n\/\/ subdirectories containing package files (transitively). If a non-nil\n\/\/ pathFilter is provided, directory paths additionally must be accepted\n\/\/ by the filter (i.e., pathFilter(path) must be true). If a value >= 0 is\n\/\/ provided for maxDepth, nodes at larger depths are pruned as well; they\n\/\/ are assumed to contain package files even if their contents are not known\n\/\/ (i.e., in this case the tree may contain directories w\/o any package files).\n\/\/\nfunc newDirectory(root string, pathFilter func(string) bool, maxDepth int) *Directory {\n\td, err := os.Lstat(root)\n\tif err != nil || !isPkgDir(d) {\n\t\treturn nil\n\t}\n\tif maxDepth < 0 {\n\t\tmaxDepth = 1e6 \/\/ \"infinity\"\n\t}\n\tb := treeBuilder{pathFilter, maxDepth}\n\treturn b.newDirTree(root, d.Name, 0)\n}\n\n\nfunc (dir *Directory) writeLeafs(buf *bytes.Buffer) {\n\tif dir != nil {\n\t\tif len(dir.Dirs) == 0 {\n\t\t\tbuf.WriteString(dir.Path)\n\t\t\tbuf.WriteByte('\\n')\n\t\t\treturn\n\t\t}\n\n\t\tfor _, d := range dir.Dirs {\n\t\t\td.writeLeafs(buf)\n\t\t}\n\t}\n}\n\n\nfunc (dir *Directory) walk(c chan<- *Directory, skipRoot bool) {\n\tif dir != nil {\n\t\tif !skipRoot {\n\t\t\tc <- dir\n\t\t}\n\t\tfor _, d := range dir.Dirs {\n\t\t\td.walk(c, false)\n\t\t}\n\t}\n}\n\n\nfunc (dir *Directory) iter(skipRoot bool) <-chan *Directory {\n\tc := make(chan *Directory)\n\tgo func() {\n\t\tdir.walk(c, skipRoot)\n\t\tclose(c)\n\t}()\n\treturn c\n}\n\n\nfunc (dir *Directory) lookupLocal(name string) *Directory {\n\tfor _, d := range dir.Dirs {\n\t\tif d.Name == name {\n\t\t\treturn d\n\t\t}\n\t}\n\treturn nil\n}\n\n\n\/\/ lookup looks for the *Directory for a given path, relative to dir.\nfunc (dir *Directory) lookup(path string) *Directory {\n\td := strings.Split(dir.Path, \"\/\", -1)\n\tp := strings.Split(path, \"\/\", -1)\n\ti := 0\n\tfor i < len(d) {\n\t\tif i >= len(p) || d[i] != p[i] {\n\t\t\treturn nil\n\t\t}\n\t\ti++\n\t}\n\tfor dir != nil && i < len(p) {\n\t\tdir = dir.lookupLocal(p[i])\n\t\ti++\n\t}\n\treturn dir\n}\n\n\n\/\/ DirEntry describes a directory entry. The Depth and Height values\n\/\/ are useful for presenting an entry in an indented fashion.\n\/\/\ntype DirEntry struct {\n\tDepth int \/\/ >= 0\n\tHeight int \/\/ = DirList.MaxHeight - Depth, > 0\n\tPath string \/\/ includes Name, relative to DirList root\n\tName string\n\tSynopsis string\n}\n\n\ntype DirList struct {\n\tMaxHeight int \/\/ directory tree height, > 0\n\tList []DirEntry\n}\n\n\n\/\/ listing creates a (linear) directory listing from a directory tree.\n\/\/ If skipRoot is set, the root directory itself is excluded from the list.\n\/\/\nfunc (root *Directory) listing(skipRoot bool) *DirList {\n\tif root == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ determine number of entries n and maximum height\n\tn := 0\n\tminDepth := 1 << 30 \/\/ infinity\n\tmaxDepth := 0\n\tfor d := range root.iter(skipRoot) {\n\t\tn++\n\t\tif minDepth > d.Depth {\n\t\t\tminDepth = d.Depth\n\t\t}\n\t\tif maxDepth < d.Depth {\n\t\t\tmaxDepth = d.Depth\n\t\t}\n\t}\n\tmaxHeight := maxDepth - minDepth + 1\n\n\tif n == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ create list\n\tlist := make([]DirEntry, n)\n\ti := 0\n\tfor d := range root.iter(skipRoot) {\n\t\tp := &list[i]\n\t\tp.Depth = d.Depth - minDepth\n\t\tp.Height = maxHeight - p.Depth\n\t\t\/\/ the path is relative to root.Path - remove the root.Path\n\t\t\/\/ prefix (the prefix should always be present but avoid\n\t\t\/\/ crashes and check)\n\t\tpath := d.Path\n\t\tif strings.HasPrefix(d.Path, root.Path) {\n\t\t\tpath = d.Path[len(root.Path):]\n\t\t}\n\t\t\/\/ remove trailing '\/' if any - path must be relative\n\t\tif len(path) > 0 && path[0] == '\/' {\n\t\t\tpath = path[1:]\n\t\t}\n\t\tp.Path = path\n\t\tp.Name = d.Name\n\t\tp.Synopsis = d.Text\n\t\ti++\n\t}\n\n\treturn &DirList{maxHeight, list}\n}\n<commit_msg>godoc: only show directories containing true package files<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file contains the code dealing with package directory trees.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"go\/doc\"\n\t\"go\/parser\"\n\t\"io\/ioutil\"\n\t\"os\"\n\tpathutil \"path\"\n\t\"strings\"\n\t\"unicode\"\n)\n\n\ntype Directory struct {\n\tDepth int\n\tPath string \/\/ includes Name\n\tName string\n\tText string \/\/ package documentation, if any\n\tDirs []*Directory \/\/ subdirectories\n}\n\n\nfunc isGoFile(f *os.FileInfo) bool {\n\treturn f.IsRegular() &&\n\t\t!strings.HasPrefix(f.Name, \".\") && \/\/ ignore .files\n\t\tpathutil.Ext(f.Name) == \".go\"\n}\n\n\nfunc isPkgFile(f *os.FileInfo) bool {\n\treturn isGoFile(f) &&\n\t\t!strings.HasSuffix(f.Name, \"_test.go\") \/\/ ignore test files\n}\n\n\nfunc isPkgDir(f *os.FileInfo) bool {\n\treturn f.IsDirectory() && len(f.Name) > 0 && f.Name[0] != '_'\n}\n\n\nfunc firstSentence(s string) string {\n\ti := -1 \/\/ index+1 of first terminator (punctuation ending a sentence)\n\tj := -1 \/\/ index+1 of first terminator followed by white space\n\tprev := 'A'\n\tfor k, ch := range s {\n\t\tk1 := k + 1\n\t\tif ch == '.' || ch == '!' || ch == '?' {\n\t\t\tif i < 0 {\n\t\t\t\ti = k1 \/\/ first terminator\n\t\t\t}\n\t\t\tif k1 < len(s) && s[k1] <= ' ' {\n\t\t\t\tif j < 0 {\n\t\t\t\t\tj = k1 \/\/ first terminator followed by white space\n\t\t\t\t}\n\t\t\t\tif !unicode.IsUpper(prev) {\n\t\t\t\t\tj = k1\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tprev = ch\n\t}\n\n\tif j < 0 {\n\t\t\/\/ use the next best terminator\n\t\tj = i\n\t\tif j < 0 {\n\t\t\t\/\/ no terminator at all, use the entire string\n\t\t\tj = len(s)\n\t\t}\n\t}\n\n\treturn s[0:j]\n}\n\n\ntype treeBuilder struct {\n\tpathFilter func(string) bool\n\tmaxDepth int\n}\n\n\nfunc (b *treeBuilder) newDirTree(path, name string, depth int) *Directory {\n\tif b.pathFilter != nil && !b.pathFilter(path) {\n\t\treturn nil\n\t}\n\n\tif depth >= b.maxDepth {\n\t\t\/\/ return a dummy directory so that the parent directory\n\t\t\/\/ doesn't get discarded just because we reached the max\n\t\t\/\/ directory depth\n\t\treturn &Directory{depth, path, name, \"\", nil}\n\t}\n\n\tlist, _ := ioutil.ReadDir(path) \/\/ ignore errors\n\n\t\/\/ determine number of subdirectories and if there are package files\n\tndirs := 0\n\thasPkgFiles := false\n\tvar synopses [4]string \/\/ prioritized package documentation (0 == highest priority)\n\tfor _, d := range list {\n\t\tswitch {\n\t\tcase isPkgDir(d):\n\t\t\tndirs++\n\t\tcase isPkgFile(d):\n\t\t\t\/\/ looks like a package file, but may just be a file ending in \".go\";\n\t\t\t\/\/ don't just count it yet (otherwise we may end up with hasPkgFiles even\n\t\t\t\/\/ though the directory doesn't contain any real package files - was bug)\n\t\t\tif synopses[0] == \"\" {\n\t\t\t\t\/\/ no \"optimal\" package synopsis yet; continue to collect synopses\n\t\t\t\tfile, err := parser.ParseFile(pathutil.Join(path, d.Name), nil,\n\t\t\t\t\tparser.ParseComments|parser.PackageClauseOnly)\n\t\t\t\tif err == nil {\n\t\t\t\t\thasPkgFiles = true\n\t\t\t\t\tif file.Doc != nil {\n\t\t\t\t\t\t\/\/ prioritize documentation\n\t\t\t\t\t\ti := -1\n\t\t\t\t\t\tswitch file.Name.Name {\n\t\t\t\t\t\tcase name:\n\t\t\t\t\t\t\ti = 0 \/\/ normal case: directory name matches package name\n\t\t\t\t\t\tcase fakePkgName:\n\t\t\t\t\t\t\ti = 1 \/\/ synopses for commands\n\t\t\t\t\t\tcase \"main\":\n\t\t\t\t\t\t\ti = 2 \/\/ directory contains a main package\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\ti = 3 \/\/ none of the above\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif 0 <= i && i < len(synopses) && synopses[i] == \"\" {\n\t\t\t\t\t\t\tsynopses[i] = firstSentence(doc.CommentText(file.Doc))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ create subdirectory tree\n\tvar dirs []*Directory\n\tif ndirs > 0 {\n\t\tdirs = make([]*Directory, ndirs)\n\t\ti := 0\n\t\tfor _, d := range list {\n\t\t\tif isPkgDir(d) {\n\t\t\t\tdd := b.newDirTree(pathutil.Join(path, d.Name), d.Name, depth+1)\n\t\t\t\tif dd != nil {\n\t\t\t\t\tdirs[i] = dd\n\t\t\t\t\ti++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdirs = dirs[0:i]\n\t}\n\n\t\/\/ if there are no package files and no subdirectories\n\t\/\/ containing package files, ignore the directory\n\tif !hasPkgFiles && len(dirs) == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ select the highest-priority synopsis for the directory entry, if any\n\tsynopsis := \"\"\n\tfor _, synopsis = range synopses {\n\t\tif synopsis != \"\" {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn &Directory{depth, path, name, synopsis, dirs}\n}\n\n\n\/\/ newDirectory creates a new package directory tree with at most maxDepth\n\/\/ levels, anchored at root. The result tree is pruned such that it only\n\/\/ contains directories that contain package files or that contain\n\/\/ subdirectories containing package files (transitively). If a non-nil\n\/\/ pathFilter is provided, directory paths additionally must be accepted\n\/\/ by the filter (i.e., pathFilter(path) must be true). If a value >= 0 is\n\/\/ provided for maxDepth, nodes at larger depths are pruned as well; they\n\/\/ are assumed to contain package files even if their contents are not known\n\/\/ (i.e., in this case the tree may contain directories w\/o any package files).\n\/\/\nfunc newDirectory(root string, pathFilter func(string) bool, maxDepth int) *Directory {\n\td, err := os.Lstat(root)\n\tif err != nil || !isPkgDir(d) {\n\t\treturn nil\n\t}\n\tif maxDepth < 0 {\n\t\tmaxDepth = 1e6 \/\/ \"infinity\"\n\t}\n\tb := treeBuilder{pathFilter, maxDepth}\n\treturn b.newDirTree(root, d.Name, 0)\n}\n\n\nfunc (dir *Directory) writeLeafs(buf *bytes.Buffer) {\n\tif dir != nil {\n\t\tif len(dir.Dirs) == 0 {\n\t\t\tbuf.WriteString(dir.Path)\n\t\t\tbuf.WriteByte('\\n')\n\t\t\treturn\n\t\t}\n\n\t\tfor _, d := range dir.Dirs {\n\t\t\td.writeLeafs(buf)\n\t\t}\n\t}\n}\n\n\nfunc (dir *Directory) walk(c chan<- *Directory, skipRoot bool) {\n\tif dir != nil {\n\t\tif !skipRoot {\n\t\t\tc <- dir\n\t\t}\n\t\tfor _, d := range dir.Dirs {\n\t\t\td.walk(c, false)\n\t\t}\n\t}\n}\n\n\nfunc (dir *Directory) iter(skipRoot bool) <-chan *Directory {\n\tc := make(chan *Directory)\n\tgo func() {\n\t\tdir.walk(c, skipRoot)\n\t\tclose(c)\n\t}()\n\treturn c\n}\n\n\nfunc (dir *Directory) lookupLocal(name string) *Directory {\n\tfor _, d := range dir.Dirs {\n\t\tif d.Name == name {\n\t\t\treturn d\n\t\t}\n\t}\n\treturn nil\n}\n\n\n\/\/ lookup looks for the *Directory for a given path, relative to dir.\nfunc (dir *Directory) lookup(path string) *Directory {\n\td := strings.Split(dir.Path, \"\/\", -1)\n\tp := strings.Split(path, \"\/\", -1)\n\ti := 0\n\tfor i < len(d) {\n\t\tif i >= len(p) || d[i] != p[i] {\n\t\t\treturn nil\n\t\t}\n\t\ti++\n\t}\n\tfor dir != nil && i < len(p) {\n\t\tdir = dir.lookupLocal(p[i])\n\t\ti++\n\t}\n\treturn dir\n}\n\n\n\/\/ DirEntry describes a directory entry. The Depth and Height values\n\/\/ are useful for presenting an entry in an indented fashion.\n\/\/\ntype DirEntry struct {\n\tDepth int \/\/ >= 0\n\tHeight int \/\/ = DirList.MaxHeight - Depth, > 0\n\tPath string \/\/ includes Name, relative to DirList root\n\tName string\n\tSynopsis string\n}\n\n\ntype DirList struct {\n\tMaxHeight int \/\/ directory tree height, > 0\n\tList []DirEntry\n}\n\n\n\/\/ listing creates a (linear) directory listing from a directory tree.\n\/\/ If skipRoot is set, the root directory itself is excluded from the list.\n\/\/\nfunc (root *Directory) listing(skipRoot bool) *DirList {\n\tif root == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ determine number of entries n and maximum height\n\tn := 0\n\tminDepth := 1 << 30 \/\/ infinity\n\tmaxDepth := 0\n\tfor d := range root.iter(skipRoot) {\n\t\tn++\n\t\tif minDepth > d.Depth {\n\t\t\tminDepth = d.Depth\n\t\t}\n\t\tif maxDepth < d.Depth {\n\t\t\tmaxDepth = d.Depth\n\t\t}\n\t}\n\tmaxHeight := maxDepth - minDepth + 1\n\n\tif n == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ create list\n\tlist := make([]DirEntry, n)\n\ti := 0\n\tfor d := range root.iter(skipRoot) {\n\t\tp := &list[i]\n\t\tp.Depth = d.Depth - minDepth\n\t\tp.Height = maxHeight - p.Depth\n\t\t\/\/ the path is relative to root.Path - remove the root.Path\n\t\t\/\/ prefix (the prefix should always be present but avoid\n\t\t\/\/ crashes and check)\n\t\tpath := d.Path\n\t\tif strings.HasPrefix(d.Path, root.Path) {\n\t\t\tpath = d.Path[len(root.Path):]\n\t\t}\n\t\t\/\/ remove trailing '\/' if any - path must be relative\n\t\tif len(path) > 0 && path[0] == '\/' {\n\t\t\tpath = path[1:]\n\t\t}\n\t\tp.Path = path\n\t\tp.Name = d.Name\n\t\tp.Synopsis = d.Text\n\t\ti++\n\t}\n\n\treturn &DirList{maxHeight, list}\n}\n<|endoftext|>"} {"text":"<commit_before>package sej\n\nimport \"testing\"\n\nfunc TestReadThroughSegmentBoundary(t *testing.T) {\n\tmessages := []string{\"a\", \"b\", \"c\"}\n\n\tpath := newTestPath(t)\n\tw := newTestWriter(t, path, metaSize+1)\n\twriteTestMessages(t, w, messages...)\n\tcloseTestWriter(t, w)\n\n\tr, err := NewScanner(path, 0)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer r.Close()\n\tverifyReadMessages(t, path, messages...)\n}\n\nfunc TestReadFromOffset(t *testing.T) {\n\tmessages := []string{\"a\", \"b\", \"c\", \"d\", \"e\"}\n\tfor _, segmentSize := range []int{metaSize + 1, (metaSize + 1) * 2, 1000} {\n\t\tfunc() {\n\t\t\tpath := newTestPath(t)\n\t\t\tw := newTestWriter(t, path, segmentSize)\n\t\t\twriteTestMessages(t, w, messages...)\n\t\t\tcloseTestWriter(t, w)\n\n\t\t\tfor i, expectedMsg := range messages {\n\t\t\t\tfunc() {\n\t\t\t\t\t\/\/ create a new reader starting from i\n\t\t\t\t\tr, err := NewScanner(path, uint64(i))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t\tdefer r.Close()\n\t\t\t\t\tr.Scan()\n\t\t\t\t\tif r.Err() != nil {\n\t\t\t\t\t\tt.Fatal(r.Err())\n\t\t\t\t\t}\n\t\t\t\t\tactualMsg := string(r.Message().Value)\n\t\t\t\t\tif actualMsg != expectedMsg {\n\t\t\t\t\t\tt.Fatalf(\"expect msg %s, got %s\", expectedMsg, actualMsg)\n\t\t\t\t\t}\n\t\t\t\t\tnextOffset := uint64(i + 1)\n\t\t\t\t\tif r.Offset() != nextOffset {\n\t\t\t\t\t\tt.Fatalf(\"expect offset %d, got %d\", nextOffset, r.Offset())\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc TestReadBeforeWrite(t *testing.T) {\n\tmessages := []string{\"a\", \"b\", \"c\", \"d\", \"e\"}\n\tfor _, segmentSize := range []int{metaSize + 1, (metaSize + 1) * 2, 1000} {\n\t\tfunc() {\n\t\t\tpath := newTestPath(t)\n\t\t\tr, err := NewScanner(path, 0)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer r.Close()\n\n\t\t\tdone := make(chan bool)\n\t\t\tdefer func() { <-done }()\n\t\t\tgo func() {\n\t\t\t\tw := newTestWriter(t, path, segmentSize)\n\t\t\t\twriteTestMessages(t, w, messages...)\n\t\t\t\tcloseTestWriter(t, w)\n\t\t\t\tdone <- true\n\t\t\t}()\n\t\t\tfor i := range messages {\n\t\t\t\tr.Scan()\n\t\t\t\tif r.Err() != nil {\n\t\t\t\t\tt.Fatal(r.Err())\n\t\t\t\t}\n\t\t\t\tactualMsg, expectedMsg := string(r.Message().Value), messages[i]\n\t\t\t\tif actualMsg != expectedMsg {\n\t\t\t\t\tt.Fatalf(\"expect msg %s, got %s\", expectedMsg, actualMsg)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n<commit_msg>test reading monitored file<commit_after>package sej\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestReadThroughSegmentBoundary(t *testing.T) {\n\tmessages := []string{\"a\", \"b\", \"c\"}\n\n\tpath := newTestPath(t)\n\tw := newTestWriter(t, path, metaSize+1)\n\twriteTestMessages(t, w, messages...)\n\tcloseTestWriter(t, w)\n\n\tr, err := NewScanner(path, 0)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer r.Close()\n\tverifyReadMessages(t, path, messages...)\n}\n\nfunc TestReadFromOffset(t *testing.T) {\n\tmessages := []string{\"a\", \"b\", \"c\", \"d\", \"e\"}\n\tfor _, segmentSize := range []int{metaSize + 1, (metaSize + 1) * 2, 1000} {\n\t\tfunc() {\n\t\t\tpath := newTestPath(t)\n\t\t\tw := newTestWriter(t, path, segmentSize)\n\t\t\twriteTestMessages(t, w, messages...)\n\t\t\tcloseTestWriter(t, w)\n\n\t\t\tfor i, expectedMsg := range messages {\n\t\t\t\tfunc() {\n\t\t\t\t\t\/\/ create a new reader starting from i\n\t\t\t\t\tr, err := NewScanner(path, uint64(i))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t\tdefer r.Close()\n\t\t\t\t\tr.Scan()\n\t\t\t\t\tif r.Err() != nil {\n\t\t\t\t\t\tt.Fatal(r.Err())\n\t\t\t\t\t}\n\t\t\t\t\tactualMsg := string(r.Message().Value)\n\t\t\t\t\tif actualMsg != expectedMsg {\n\t\t\t\t\t\tt.Fatalf(\"expect msg %s, got %s\", expectedMsg, actualMsg)\n\t\t\t\t\t}\n\t\t\t\t\tnextOffset := uint64(i + 1)\n\t\t\t\t\tif r.Offset() != nextOffset {\n\t\t\t\t\t\tt.Fatalf(\"expect offset %d, got %d\", nextOffset, r.Offset())\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc TestReadBeforeWrite(t *testing.T) {\n\tmessages := []string{\"a\", \"b\", \"c\", \"d\", \"e\"}\n\tfor _, segmentSize := range []int{metaSize + 1, (metaSize + 1) * 2, 1000} {\n\t\tfunc() {\n\t\t\tpath := newTestPath(t)\n\t\t\tr, err := NewScanner(path, 0)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer r.Close()\n\n\t\t\tdone := make(chan bool)\n\t\t\tdefer func() { <-done }()\n\t\t\tgo func() {\n\t\t\t\tw := newTestWriter(t, path, segmentSize)\n\t\t\t\twriteTestMessages(t, w, messages...)\n\t\t\t\tcloseTestWriter(t, w)\n\t\t\t\tdone <- true\n\t\t\t}()\n\t\t\tfor i := range messages {\n\t\t\t\tif !r.Scan() {\n\t\t\t\t\tt.Fatal(\"Scan should return true\")\n\t\t\t\t}\n\t\t\t\tif r.Err() != nil {\n\t\t\t\t\tt.Fatal(r.Err())\n\t\t\t\t}\n\t\t\t\tactualMsg, expectedMsg := string(r.Message().Value), messages[i]\n\t\t\t\tif actualMsg != expectedMsg {\n\t\t\t\t\tt.Fatalf(\"expect msg %s, got %s\", expectedMsg, actualMsg)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc TestReadMonitoredFile(t *testing.T) {\n\tpath := newTestPath(t)\n\tw := newTestWriter(t, path, 1000)\n\n\tr, err := NewScanner(path, 0)\n\tr.Timeout = time.Second\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer r.Close()\n\n\twriteTestMessages(t, w, \"a\")\n\tflushTestWriter(t, w)\n\n\tif !r.Scan() {\n\t\tt.Fatal(\"Scan should return true\")\n\t}\n\tif r.Err() != nil {\n\t\tt.Fatal(r.Err())\n\t}\n\tactualMsg, expectedMsg := string(r.Message().Value), \"a\"\n\tif actualMsg != expectedMsg {\n\t\tt.Fatalf(\"expect msg %s, got %s\", expectedMsg, actualMsg)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 tsuru-client authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage installer\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\n\tdocker \"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/fsouza\/go-dockerclient\/testing\"\n\t\"gopkg.in\/check.v1\"\n)\n\ntype testEndpoint struct {\n\tendpoint string\n\tcertPath string\n}\n\nfunc (t testEndpoint) dockerClient() (*docker.Client, error) {\n\treturn docker.NewTLSClient(\n\t\tt.endpoint,\n\t\tfilepath.Join(t.certPath, \"cert.pem\"),\n\t\tfilepath.Join(t.certPath, \"key.pem\"),\n\t\tfilepath.Join(t.certPath, \"ca.pem\"),\n\t)\n}\n\nfunc (s *S) TestCreateContainer(c *check.C) {\n\tvar requests []*http.Request\n\ttlsConfig := testing.TLSConfig{\n\t\tCertPath: s.TLSCertsPath.ServerCert,\n\t\tCertKeyPath: s.TLSCertsPath.ServerKey,\n\t\tRootCAPath: s.TLSCertsPath.RootCert,\n\t}\n\tserver, err := testing.NewTLSServer(\"127.0.0.1:0\", nil, func(r *http.Request) {\n\t\tif r.URL.Path != \"\/version\" {\n\t\t\trequests = append(requests, r)\n\t\t}\n\t}, tlsConfig)\n\tc.Assert(err, check.IsNil)\n\tdefer server.Stop()\n\tendpoint := testEndpoint{endpoint: server.URL(), certPath: s.TLSCertsPath.RootDir}\n\tconfig := &docker.Config{Image: \"tsuru\/api:v1\"}\n\terr = createContainer(endpoint, \"contName\", config, nil)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(requests, check.HasLen, 4)\n\tc.Assert(requests[0].URL.Path, check.Equals, \"\/images\/create\")\n\tc.Assert(requests[1].URL.Path, check.Equals, \"\/images\/tsuru\/api:v1\/json\")\n\tc.Assert(requests[2].URL.Path, check.Equals, \"\/containers\/create\")\n\tc.Assert(requests[3].URL.Path, check.Equals, \"\/containers\/contName\/start\")\n}\n\nfunc (s *S) TestCreateContainerWithExposedPorts(c *check.C) {\n\tcontainerChan := make(chan *docker.Container, 2)\n\ttlsConfig := testing.TLSConfig{\n\t\tCertPath: s.TLSCertsPath.ServerCert,\n\t\tCertKeyPath: s.TLSCertsPath.ServerKey,\n\t\tRootCAPath: s.TLSCertsPath.RootCert,\n\t}\n\tserver, err := testing.NewTLSServer(\"127.0.0.1:0\", containerChan, nil, tlsConfig)\n\tc.Assert(err, check.IsNil)\n\tdefer server.Stop()\n\tserver.CustomHandler(\"\/images\/.*\/json\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\timage := docker.Image{\n\t\t\tID: \"tsuru\/api\",\n\t\t\tConfig: &docker.Config{\n\t\t\t\tExposedPorts: map[docker.Port]struct{}{\n\t\t\t\t\tdocker.Port(\"90\/tcp\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tbuf, errMarshal := json.Marshal(image)\n\t\tc.Assert(errMarshal, check.IsNil)\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tw.Write(buf)\n\t\tw.WriteHeader(http.StatusOK)\n\t}))\n\tdefer server.CustomHandler(\"\/images\/.*\/json\", server.DefaultHandler())\n\texpected := map[docker.Port][]docker.PortBinding{\n\t\t\"90\/tcp\": []docker.PortBinding{\n\t\t\tdocker.PortBinding{HostIP: \"0.0.0.0\", HostPort: \"90\"},\n\t\t},\n\t}\n\tendpoint := testEndpoint{endpoint: server.URL(), certPath: s.TLSCertsPath.RootDir}\n\tconfig := &docker.Config{Image: \"tsuru\/api:v1\"}\n\terr = createContainer(endpoint, \"contName\", config, nil)\n\tc.Assert(err, check.IsNil)\n\tcont := <-containerChan\n\tc.Assert(cont, check.NotNil)\n\tc.Assert(expected, check.DeepEquals, cont.HostConfig.PortBindings)\n}\n\nfunc (s *S) TestCreateContainerWithHostConfigAndExposedPorts(c *check.C) {\n\tcontainerChan := make(chan *docker.Container, 2)\n\ttlsConfig := testing.TLSConfig{\n\t\tCertPath: s.TLSCertsPath.ServerCert,\n\t\tCertKeyPath: s.TLSCertsPath.ServerKey,\n\t\tRootCAPath: s.TLSCertsPath.RootCert,\n\t}\n\tserver, err := testing.NewTLSServer(\"127.0.0.1:0\", containerChan, nil, tlsConfig)\n\tc.Assert(err, check.IsNil)\n\tdefer server.Stop()\n\tserver.CustomHandler(\"\/images\/.*\/json\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\timage := docker.Image{\n\t\t\tID: \"tsuru\/api\",\n\t\t\tConfig: &docker.Config{\n\t\t\t\tExposedPorts: map[docker.Port]struct{}{\n\t\t\t\t\tdocker.Port(\"90\/tcp\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tbuf, errMarshal := json.Marshal(image)\n\t\tc.Assert(errMarshal, check.IsNil)\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tw.Write(buf)\n\t\tw.WriteHeader(http.StatusOK)\n\t}))\n\tdefer server.CustomHandler(\"\/images\/.*\/json\", server.DefaultHandler())\n\texpected := map[docker.Port][]docker.PortBinding{\n\t\t\"90\/tcp\": []docker.PortBinding{\n\t\t\tdocker.PortBinding{HostIP: \"0.0.0.0\", HostPort: \"90\"},\n\t\t},\n\t\t\"100\/tcp\": []docker.PortBinding{\n\t\t\tdocker.PortBinding{HostIP: \"0.0.0.0\", HostPort: \"100\"},\n\t\t},\n\t}\n\tendpoint := testEndpoint{endpoint: server.URL(), certPath: s.TLSCertsPath.RootDir}\n\tconfig := &docker.Config{Image: \"tsuru\/api:v1\"}\n\thostConfig := &docker.HostConfig{\n\t\tPortBindings: map[docker.Port][]docker.PortBinding{\n\t\t\t\"100\/tcp\": []docker.PortBinding{\n\t\t\t\tdocker.PortBinding{HostIP: \"0.0.0.0\", HostPort: \"100\"},\n\t\t\t},\n\t\t},\n\t}\n\terr = createContainer(endpoint, \"contName\", config, hostConfig)\n\tc.Assert(err, check.IsNil)\n\tcont := <-containerChan\n\tc.Assert(cont, check.NotNil)\n\tc.Assert(expected, check.DeepEquals, cont.HostConfig.PortBindings)\n}\n<commit_msg>installer: simplify code<commit_after>\/\/ Copyright 2016 tsuru-client authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage installer\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\n\tdocker \"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/fsouza\/go-dockerclient\/testing\"\n\t\"gopkg.in\/check.v1\"\n)\n\ntype testEndpoint struct {\n\tendpoint string\n\tcertPath string\n}\n\nfunc (t testEndpoint) dockerClient() (*docker.Client, error) {\n\treturn docker.NewTLSClient(\n\t\tt.endpoint,\n\t\tfilepath.Join(t.certPath, \"cert.pem\"),\n\t\tfilepath.Join(t.certPath, \"key.pem\"),\n\t\tfilepath.Join(t.certPath, \"ca.pem\"),\n\t)\n}\n\nfunc (s *S) TestCreateContainer(c *check.C) {\n\tvar requests []*http.Request\n\ttlsConfig := testing.TLSConfig{\n\t\tCertPath: s.TLSCertsPath.ServerCert,\n\t\tCertKeyPath: s.TLSCertsPath.ServerKey,\n\t\tRootCAPath: s.TLSCertsPath.RootCert,\n\t}\n\tserver, err := testing.NewTLSServer(\"127.0.0.1:0\", nil, func(r *http.Request) {\n\t\tif r.URL.Path != \"\/version\" {\n\t\t\trequests = append(requests, r)\n\t\t}\n\t}, tlsConfig)\n\tc.Assert(err, check.IsNil)\n\tdefer server.Stop()\n\tendpoint := testEndpoint{endpoint: server.URL(), certPath: s.TLSCertsPath.RootDir}\n\tconfig := &docker.Config{Image: \"tsuru\/api:v1\"}\n\terr = createContainer(endpoint, \"contName\", config, nil)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(requests, check.HasLen, 4)\n\tc.Assert(requests[0].URL.Path, check.Equals, \"\/images\/create\")\n\tc.Assert(requests[1].URL.Path, check.Equals, \"\/images\/tsuru\/api:v1\/json\")\n\tc.Assert(requests[2].URL.Path, check.Equals, \"\/containers\/create\")\n\tc.Assert(requests[3].URL.Path, check.Equals, \"\/containers\/contName\/start\")\n}\n\nfunc (s *S) TestCreateContainerWithExposedPorts(c *check.C) {\n\tcontainerChan := make(chan *docker.Container, 2)\n\ttlsConfig := testing.TLSConfig{\n\t\tCertPath: s.TLSCertsPath.ServerCert,\n\t\tCertKeyPath: s.TLSCertsPath.ServerKey,\n\t\tRootCAPath: s.TLSCertsPath.RootCert,\n\t}\n\tserver, err := testing.NewTLSServer(\"127.0.0.1:0\", containerChan, nil, tlsConfig)\n\tc.Assert(err, check.IsNil)\n\tdefer server.Stop()\n\tserver.CustomHandler(\"\/images\/.*\/json\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\timage := docker.Image{\n\t\t\tID: \"tsuru\/api\",\n\t\t\tConfig: &docker.Config{\n\t\t\t\tExposedPorts: map[docker.Port]struct{}{\n\t\t\t\t\tdocker.Port(\"90\/tcp\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tbuf, errMarshal := json.Marshal(image)\n\t\tc.Assert(errMarshal, check.IsNil)\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tw.Write(buf)\n\t\tw.WriteHeader(http.StatusOK)\n\t}))\n\tdefer server.CustomHandler(\"\/images\/.*\/json\", server.DefaultHandler())\n\texpected := map[docker.Port][]docker.PortBinding{\n\t\t\"90\/tcp\": {\n\t\t\t{HostIP: \"0.0.0.0\", HostPort: \"90\"},\n\t\t},\n\t}\n\tendpoint := testEndpoint{endpoint: server.URL(), certPath: s.TLSCertsPath.RootDir}\n\tconfig := &docker.Config{Image: \"tsuru\/api:v1\"}\n\terr = createContainer(endpoint, \"contName\", config, nil)\n\tc.Assert(err, check.IsNil)\n\tcont := <-containerChan\n\tc.Assert(cont, check.NotNil)\n\tc.Assert(expected, check.DeepEquals, cont.HostConfig.PortBindings)\n}\n\nfunc (s *S) TestCreateContainerWithHostConfigAndExposedPorts(c *check.C) {\n\tcontainerChan := make(chan *docker.Container, 2)\n\ttlsConfig := testing.TLSConfig{\n\t\tCertPath: s.TLSCertsPath.ServerCert,\n\t\tCertKeyPath: s.TLSCertsPath.ServerKey,\n\t\tRootCAPath: s.TLSCertsPath.RootCert,\n\t}\n\tserver, err := testing.NewTLSServer(\"127.0.0.1:0\", containerChan, nil, tlsConfig)\n\tc.Assert(err, check.IsNil)\n\tdefer server.Stop()\n\tserver.CustomHandler(\"\/images\/.*\/json\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\timage := docker.Image{\n\t\t\tID: \"tsuru\/api\",\n\t\t\tConfig: &docker.Config{\n\t\t\t\tExposedPorts: map[docker.Port]struct{}{\n\t\t\t\t\tdocker.Port(\"90\/tcp\"): {},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tbuf, errMarshal := json.Marshal(image)\n\t\tc.Assert(errMarshal, check.IsNil)\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tw.Write(buf)\n\t\tw.WriteHeader(http.StatusOK)\n\t}))\n\tdefer server.CustomHandler(\"\/images\/.*\/json\", server.DefaultHandler())\n\texpected := map[docker.Port][]docker.PortBinding{\n\t\t\"90\/tcp\": {\n\t\t\t{HostIP: \"0.0.0.0\", HostPort: \"90\"},\n\t\t},\n\t\t\"100\/tcp\": {\n\t\t\t{HostIP: \"0.0.0.0\", HostPort: \"100\"},\n\t\t},\n\t}\n\tendpoint := testEndpoint{endpoint: server.URL(), certPath: s.TLSCertsPath.RootDir}\n\tconfig := &docker.Config{Image: \"tsuru\/api:v1\"}\n\thostConfig := &docker.HostConfig{\n\t\tPortBindings: map[docker.Port][]docker.PortBinding{\n\t\t\t\"100\/tcp\": {\n\t\t\t\t{HostIP: \"0.0.0.0\", HostPort: \"100\"},\n\t\t\t},\n\t\t},\n\t}\n\terr = createContainer(endpoint, \"contName\", config, hostConfig)\n\tc.Assert(err, check.IsNil)\n\tcont := <-containerChan\n\tc.Assert(cont, check.NotNil)\n\tc.Assert(expected, check.DeepEquals, cont.HostConfig.PortBindings)\n}\n<|endoftext|>"} {"text":"<commit_before>package ledis\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nconst (\n\tendPos int = -1\n)\n\nfunc bin(sz string) []byte {\n\treturn []byte(sz)\n}\n\nfunc pair(memb string, score int) ScorePair {\n\treturn ScorePair{int64(score), bin(memb)}\n}\n\nfunc TestZSetCodec(t *testing.T) {\n\tdb := getTestDB()\n\n\tkey := []byte(\"key\")\n\tmember := []byte(\"member\")\n\n\tek := db.zEncodeSizeKey(key)\n\tif k, err := db.zDecodeSizeKey(ek); err != nil {\n\t\tt.Fatal(err)\n\t} else if string(k) != \"key\" {\n\t\tt.Fatal(string(k))\n\t}\n\n\tek = db.zEncodeSetKey(key, member)\n\tif k, m, err := db.zDecodeSetKey(ek); err != nil {\n\t\tt.Fatal(err)\n\t} else if string(k) != \"key\" {\n\t\tt.Fatal(string(k))\n\t} else if string(m) != \"member\" {\n\t\tt.Fatal(string(m))\n\t}\n\n\tek = db.zEncodeScoreKey(key, member, 100)\n\tif k, m, s, err := db.zDecodeScoreKey(ek); err != nil {\n\t\tt.Fatal(err)\n\t} else if string(k) != \"key\" {\n\t\tt.Fatal(string(k))\n\t} else if string(m) != \"member\" {\n\t\tt.Fatal(string(m))\n\t} else if s != 100 {\n\t\tt.Fatal(s)\n\t}\n\n}\n\nfunc TestDBZSet(t *testing.T) {\n\tdb := getTestDB()\n\n\tkey := bin(\"testdb_zset_a\")\n\n\t\/\/ {'a':0, 'b':1, 'c':2, 'd':3}\n\tif n, err := db.ZAdd(key, pair(\"a\", 0), pair(\"b\", 1),\n\t\tpair(\"c\", 2), pair(\"d\", 3)); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != 4 {\n\t\tt.Fatal(n)\n\t}\n\n\tif n, err := db.ZCount(key, 0, 0XFF); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != 4 {\n\t\tt.Fatal(n)\n\t}\n\n\t\/\/ {c':2, 'd':3}\n\tif n, err := db.ZRem(key, bin(\"a\"), bin(\"b\")); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != 2 {\n\t\tt.Fatal(n)\n\t}\n\n\tif n, err := db.ZRem(key, bin(\"a\"), bin(\"b\")); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != 0 {\n\t\tt.Fatal(n)\n\t}\n\n\tif n, err := db.ZCard(key); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != 2 {\n\t\tt.Fatal(n)\n\t}\n\n\t\/\/ {}\n\tif n, err := db.ZClear(key); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != 2 {\n\t\tt.Fatal(n)\n\t}\n\n\tif n, err := db.ZCount(key, 0, 0XFF); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != 0 {\n\t\tt.Fatal(n)\n\t}\n}\n\nfunc TestZSetOrder(t *testing.T) {\n\tdb := getTestDB()\n\n\tkey := bin(\"testdb_zset_order\")\n\n\t\/\/ {'a':0, 'b':1, 'c':2, 'd':3, 'e':4, 'f':5}\n\tmembs := [...]string{\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"}\n\tmembCnt := len(membs)\n\n\tfor i := 0; i < membCnt; i++ {\n\t\tdb.ZAdd(key, pair(membs[i], i))\n\t}\n\n\tif n, _ := db.ZCount(key, 0, 0XFFFF); int(n) != membCnt {\n\t\tt.Fatal(n)\n\t}\n\n\tfor i := 0; i < membCnt; i++ {\n\t\tif pos, err := db.ZRank(key, bin(membs[i])); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if int(pos) != i {\n\t\t\tt.Fatal(pos)\n\t\t}\n\n\t\tif pos, err := db.ZRevRank(key, bin(membs[i])); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if int(pos) != membCnt-i-1 {\n\t\t\tt.Fatal(pos)\n\t\t}\n\t}\n\n\tif qMembs, err := db.ZRange(key, 0, endPos, false); err != nil {\n\t\tt.Fatal(err)\n\t} else if len(qMembs) != membCnt {\n\t\tt.Fatal(fmt.Sprintf(\"%d vs %d\", len(qMembs), membCnt))\n\t} else {\n\t\tfor i := 0; i < membCnt; i++ {\n\t\t\tif memb, ok := qMembs[i].([]byte); !ok {\n\t\t\t\tt.Fatal(ok)\n\t\t\t} else if string(memb) != membs[i] {\n\t\t\t\tt.Fatal(fmt.Sprintf(\"[%s] vs [%s]\", qMembs[i], membs[i]))\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ {'a':0, 'b':1, 'c':2, 'd':999, 'e':4, 'f':5}\n\tif n, err := db.ZAdd(key, pair(\"d\", 999)); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != 0 {\n\t\tt.Fatal(n)\n\t}\n\n\tif pos, _ := db.ZRank(key, bin(\"d\")); int(pos) != membCnt-1 {\n\t\tt.Fatal(pos)\n\t}\n\n\tif pos, _ := db.ZRevRank(key, bin(\"d\")); int(pos) != 0 {\n\t\tt.Fatal(pos)\n\t}\n\n\tif pos, _ := db.ZRank(key, bin(\"e\")); int(pos) != 3 {\n\t\tt.Fatal(pos)\n\t}\n\n\tif pos, _ := db.ZRank(key, bin(\"f\")); int(pos) != 4 {\n\t\tt.Fatal(pos)\n\t}\n\n\tif qMembs, err := db.ZRangeByScore(key, 999, 0XFFFF, false, 0, membCnt); err != nil {\n\t\tt.Fatal(err)\n\t} else if len(qMembs) != 1 {\n\t\tt.Fatal(len(qMembs))\n\t}\n\n\t\/\/ {'a':0, 'b':1, 'c':2, 'd':999, 'e':6, 'f':5}\n\tif _, err := db.ZIncrBy(key, 2, bin(\"e\")); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif pos, _ := db.ZRank(key, bin(\"e\")); int(pos) != 4 {\n\t\tt.Fatal(pos)\n\t}\n\n\tif pos, _ := db.ZRevRank(key, bin(\"e\")); int(pos) != 1 {\n\t\tt.Fatal(pos)\n\t}\n\n\tif dropCnt, _ := testLedis.FlushDB(); dropCnt <= 0 {\n\t\tt.Fatal(dropCnt)\n\t}\n\n\tif n, _ := db.ZCount(key, 0, 0XFFFF); n > 0 {\n\t\tt.Fatal(n)\n\t}\n}\n<commit_msg>remove 'FlushDB' from test<commit_after>package ledis\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nconst (\n\tendPos int = -1\n)\n\nfunc bin(sz string) []byte {\n\treturn []byte(sz)\n}\n\nfunc pair(memb string, score int) ScorePair {\n\treturn ScorePair{int64(score), bin(memb)}\n}\n\nfunc TestZSetCodec(t *testing.T) {\n\tdb := getTestDB()\n\n\tkey := []byte(\"key\")\n\tmember := []byte(\"member\")\n\n\tek := db.zEncodeSizeKey(key)\n\tif k, err := db.zDecodeSizeKey(ek); err != nil {\n\t\tt.Fatal(err)\n\t} else if string(k) != \"key\" {\n\t\tt.Fatal(string(k))\n\t}\n\n\tek = db.zEncodeSetKey(key, member)\n\tif k, m, err := db.zDecodeSetKey(ek); err != nil {\n\t\tt.Fatal(err)\n\t} else if string(k) != \"key\" {\n\t\tt.Fatal(string(k))\n\t} else if string(m) != \"member\" {\n\t\tt.Fatal(string(m))\n\t}\n\n\tek = db.zEncodeScoreKey(key, member, 100)\n\tif k, m, s, err := db.zDecodeScoreKey(ek); err != nil {\n\t\tt.Fatal(err)\n\t} else if string(k) != \"key\" {\n\t\tt.Fatal(string(k))\n\t} else if string(m) != \"member\" {\n\t\tt.Fatal(string(m))\n\t} else if s != 100 {\n\t\tt.Fatal(s)\n\t}\n\n}\n\nfunc TestDBZSet(t *testing.T) {\n\tdb := getTestDB()\n\n\tkey := bin(\"testdb_zset_a\")\n\n\t\/\/ {'a':0, 'b':1, 'c':2, 'd':3}\n\tif n, err := db.ZAdd(key, pair(\"a\", 0), pair(\"b\", 1),\n\t\tpair(\"c\", 2), pair(\"d\", 3)); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != 4 {\n\t\tt.Fatal(n)\n\t}\n\n\tif n, err := db.ZCount(key, 0, 0XFF); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != 4 {\n\t\tt.Fatal(n)\n\t}\n\n\t\/\/ {c':2, 'd':3}\n\tif n, err := db.ZRem(key, bin(\"a\"), bin(\"b\")); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != 2 {\n\t\tt.Fatal(n)\n\t}\n\n\tif n, err := db.ZRem(key, bin(\"a\"), bin(\"b\")); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != 0 {\n\t\tt.Fatal(n)\n\t}\n\n\tif n, err := db.ZCard(key); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != 2 {\n\t\tt.Fatal(n)\n\t}\n\n\t\/\/ {}\n\tif n, err := db.ZClear(key); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != 2 {\n\t\tt.Fatal(n)\n\t}\n\n\tif n, err := db.ZCount(key, 0, 0XFF); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != 0 {\n\t\tt.Fatal(n)\n\t}\n}\n\nfunc TestZSetOrder(t *testing.T) {\n\tdb := getTestDB()\n\n\tkey := bin(\"testdb_zset_order\")\n\n\t\/\/ {'a':0, 'b':1, 'c':2, 'd':3, 'e':4, 'f':5}\n\tmembs := [...]string{\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"}\n\tmembCnt := len(membs)\n\n\tfor i := 0; i < membCnt; i++ {\n\t\tdb.ZAdd(key, pair(membs[i], i))\n\t}\n\n\tif n, _ := db.ZCount(key, 0, 0XFFFF); int(n) != membCnt {\n\t\tt.Fatal(n)\n\t}\n\n\tfor i := 0; i < membCnt; i++ {\n\t\tif pos, err := db.ZRank(key, bin(membs[i])); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if int(pos) != i {\n\t\t\tt.Fatal(pos)\n\t\t}\n\n\t\tif pos, err := db.ZRevRank(key, bin(membs[i])); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if int(pos) != membCnt-i-1 {\n\t\t\tt.Fatal(pos)\n\t\t}\n\t}\n\n\tif qMembs, err := db.ZRange(key, 0, endPos, false); err != nil {\n\t\tt.Fatal(err)\n\t} else if len(qMembs) != membCnt {\n\t\tt.Fatal(fmt.Sprintf(\"%d vs %d\", len(qMembs), membCnt))\n\t} else {\n\t\tfor i := 0; i < membCnt; i++ {\n\t\t\tif memb, ok := qMembs[i].([]byte); !ok {\n\t\t\t\tt.Fatal(ok)\n\t\t\t} else if string(memb) != membs[i] {\n\t\t\t\tt.Fatal(fmt.Sprintf(\"[%s] vs [%s]\", qMembs[i], membs[i]))\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ {'a':0, 'b':1, 'c':2, 'd':999, 'e':4, 'f':5}\n\tif n, err := db.ZAdd(key, pair(\"d\", 999)); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != 0 {\n\t\tt.Fatal(n)\n\t}\n\n\tif pos, _ := db.ZRank(key, bin(\"d\")); int(pos) != membCnt-1 {\n\t\tt.Fatal(pos)\n\t}\n\n\tif pos, _ := db.ZRevRank(key, bin(\"d\")); int(pos) != 0 {\n\t\tt.Fatal(pos)\n\t}\n\n\tif pos, _ := db.ZRank(key, bin(\"e\")); int(pos) != 3 {\n\t\tt.Fatal(pos)\n\t}\n\n\tif pos, _ := db.ZRank(key, bin(\"f\")); int(pos) != 4 {\n\t\tt.Fatal(pos)\n\t}\n\n\tif qMembs, err := db.ZRangeByScore(key, 999, 0XFFFF, false, 0, membCnt); err != nil {\n\t\tt.Fatal(err)\n\t} else if len(qMembs) != 1 {\n\t\tt.Fatal(len(qMembs))\n\t}\n\n\t\/\/ {'a':0, 'b':1, 'c':2, 'd':999, 'e':6, 'f':5}\n\tif _, err := db.ZIncrBy(key, 2, bin(\"e\")); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif pos, _ := db.ZRank(key, bin(\"e\")); int(pos) != 4 {\n\t\tt.Fatal(pos)\n\t}\n\n\tif pos, _ := db.ZRevRank(key, bin(\"e\")); int(pos) != 1 {\n\t\tt.Fatal(pos)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/BurntSushi\/toml\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n)\n\ntype Event struct {\n\tContainerId string `json:\"id\"`\n\tStatus string `json:\"status\"`\n\tImage string `json:\"from\"`\n}\n\ntype ContainerConfig struct {\n\tHostname string\n}\n\ntype NetworkSettings struct {\n\tIpAddress string\n\tPortMapping map[string]map[string]string\n}\n\ntype Container struct {\n\tId string\n\tImage string\n\tConfig *ContainerConfig\n\tNetworkSettings *NetworkSettings\n}\n\ntype Config struct {\n\tRedisProto string `toml:\"redisproto\"`\n\tRedisAddr string `toml:\"redisaddr\"`\n\tDockerApi string `toml:\"docker\"`\n\tRoutes map[string]RouteHost `toml:\"routes\"`\n}\n\ntype RouteHost struct {\n\tHostnames []string `toml:\"hostname\"`\n\tPort string `toml:\"port\"`\n}\n\ntype Handler func(c *Container) error\n\nfunc inspectContainer(id string, image string, c http.Client, config Config) (*Container, error) {\n\t\/\/ Use the container id to fetch the container json from the Remote API\n\t\/\/ http:\/\/docs.docker.io\/en\/latest\/api\/docker_remote_api_v1.4\/#inspect-a-container\n\tres, err := c.Get(fmt.Sprintf(\"%s\/containers\/%s\/json\", config.DockerApi))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode == http.StatusOK {\n\t\td := json.NewDecoder(res.Body)\n\n\t\tvar container Container\n\t\tif err = d.Decode(&container); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcontainer.Image = image\n\t\treturn &container, nil\n\t}\n\treturn nil, fmt.Errorf(\"Invalid status code from api: %d\", res.StatusCode)\n}\n\nfunc initRoutes(config Config, h *Hipache) error {\n\tfor _, route := range config.Routes {\n\t\tfor _, hostname := range route.Hostnames {\n\t\t\tif err := h.CreateRoute(hostname); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc setupHandlers(h *Hipache) map[string]Handler {\n\tout := make(map[string]Handler, 2)\n\n\tout[\"start\"] = h.AddRoute\n\n\tout[\"stop\"] = func(c *Container) error {\n\t\thostPort, err := h.FetchRoutes(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor host, port := range hostPort {\n\t\t\tif err := h.RemoveRoute(host, port); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn out\n}\n\nfunc main() {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvar config Config\n\tconfigPath := flag.String(\"conf\", path.Join(cwd, \"docker-hipache.toml\"), \"Path to the toml config file\")\n\tif _, err := toml.DecodeFile(*configPath, &config); err != nil {\n\t\tlog.Fatalf(\"Could not load config: %s\", err)\n\t\treturn\n\t}\n\n\th := NewHipache(config.RedisProto, config.RedisAddr, config.Routes)\n\n\tif err := initRoutes(config, h); err != nil {\n\t\tlog.Fatalf(\"Could not init routes: %s\", err)\n\t\treturn\n\t}\n\n\thandlers := setupHandlers(h)\n\n\tc := http.Client{}\n\tres, err := c.Get(fmt.Sprintf(\"%s\/events\", config.DockerApi))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer res.Body.Close()\n\n\t\/\/ Read the streaming json from the events endpoint\n\t\/\/ http:\/\/docs.docker.io\/en\/latest\/api\/docker_remote_api_v1.3\/#monitor-docker-s-events\n\td := json.NewDecoder(res.Body)\n\tfor {\n\t\tlog.Println(\"New event received\")\n\n\t\tvar event Event\n\t\tif err := d.Decode(&event); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif handler, exists := handlers[event.Status]; exists {\n\t\t\tcontainer, err := inspectContainer(event.ContainerId, event.Image, c, config)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Could not inspect container: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err := handler(container); err != nil {\n\t\t\t\tlog.Fatalf(\"Could not handle event. Id: %s, Image: %s, Error: %s\", event.ContainerId, event.Image, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\tlog.Println(\"Event stream complete, shutting down\")\n}\n<commit_msg>Add missing flag.Parse<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/BurntSushi\/toml\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n)\n\ntype Event struct {\n\tContainerId string `json:\"id\"`\n\tStatus string `json:\"status\"`\n\tImage string `json:\"from\"`\n}\n\ntype ContainerConfig struct {\n\tHostname string\n}\n\ntype NetworkSettings struct {\n\tIpAddress string\n\tPortMapping map[string]map[string]string\n}\n\ntype Container struct {\n\tId string\n\tImage string\n\tConfig *ContainerConfig\n\tNetworkSettings *NetworkSettings\n}\n\ntype Config struct {\n\tRedisProto string `toml:\"redisproto\"`\n\tRedisAddr string `toml:\"redisaddr\"`\n\tDockerApi string `toml:\"docker\"`\n\tRoutes map[string]RouteHost `toml:\"routes\"`\n}\n\ntype RouteHost struct {\n\tHostnames []string `toml:\"hostname\"`\n\tPort string `toml:\"port\"`\n}\n\ntype Handler func(c *Container) error\n\nfunc inspectContainer(id string, image string, c http.Client, config Config) (*Container, error) {\n\t\/\/ Use the container id to fetch the container json from the Remote API\n\t\/\/ http:\/\/docs.docker.io\/en\/latest\/api\/docker_remote_api_v1.4\/#inspect-a-container\n\tres, err := c.Get(fmt.Sprintf(\"%s\/containers\/%s\/json\", config.DockerApi))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode == http.StatusOK {\n\t\td := json.NewDecoder(res.Body)\n\n\t\tvar container Container\n\t\tif err = d.Decode(&container); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcontainer.Image = image\n\t\treturn &container, nil\n\t}\n\treturn nil, fmt.Errorf(\"Invalid status code from api: %d\", res.StatusCode)\n}\n\nfunc initRoutes(config Config, h *Hipache) error {\n\tfor _, route := range config.Routes {\n\t\tfor _, hostname := range route.Hostnames {\n\t\t\tif err := h.CreateRoute(hostname); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc setupHandlers(h *Hipache) map[string]Handler {\n\tout := make(map[string]Handler, 2)\n\n\tout[\"start\"] = h.AddRoute\n\n\tout[\"stop\"] = func(c *Container) error {\n\t\thostPort, err := h.FetchRoutes(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor host, port := range hostPort {\n\t\t\tif err := h.RemoveRoute(host, port); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn out\n}\n\nfunc main() {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvar config Config\n\tconfigPath := flag.String(\"conf\", path.Join(cwd, \"docker-hipache.toml\"), \"Path to the toml config file\")\n\n\tflag.Parse()\n\n\tif _, err := toml.DecodeFile(*configPath, &config); err != nil {\n\t\tlog.Fatalf(\"Could not load config: %s\", err)\n\t\treturn\n\t}\n\n\th := NewHipache(config.RedisProto, config.RedisAddr, config.Routes)\n\n\tif err := initRoutes(config, h); err != nil {\n\t\tlog.Fatalf(\"Could not init routes: %s\", err)\n\t\treturn\n\t}\n\n\thandlers := setupHandlers(h)\n\n\tc := http.Client{}\n\tres, err := c.Get(fmt.Sprintf(\"%s\/events\", config.DockerApi))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer res.Body.Close()\n\n\t\/\/ Read the streaming json from the events endpoint\n\t\/\/ http:\/\/docs.docker.io\/en\/latest\/api\/docker_remote_api_v1.3\/#monitor-docker-s-events\n\td := json.NewDecoder(res.Body)\n\tfor {\n\t\tlog.Println(\"New event received\")\n\n\t\tvar event Event\n\t\tif err := d.Decode(&event); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif handler, exists := handlers[event.Status]; exists {\n\t\t\tcontainer, err := inspectContainer(event.ContainerId, event.Image, c, config)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Could not inspect container: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err := handler(container); err != nil {\n\t\t\t\tlog.Fatalf(\"Could not handle event. Id: %s, Image: %s, Error: %s\", event.ContainerId, event.Image, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\tlog.Println(\"Event stream complete, shutting down\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ checkpoint is a package for checking version information and alerts\n\/\/ for a HashiCorp product.\npackage checkpoint\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/rand\"\n\t\"encoding\/binary\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\tmrand \"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/go-cleanhttp\"\n\tuuid \"github.com\/hashicorp\/go-uuid\"\n)\n\nvar magicBytes [4]byte = [4]byte{0x35, 0x77, 0x69, 0xFB}\n\n\/\/ ReportParams are the parameters for configuring a telemetry report.\ntype ReportParams struct {\n\t\/\/ Signature is some random signature that should be stored and used\n\t\/\/ as a cookie-like value. This ensures that alerts aren't repeated.\n\t\/\/ If the signature is changed, repeat alerts may be sent down. The\n\t\/\/ signature should NOT be anything identifiable to a user (such as\n\t\/\/ a MAC address). It should be random.\n\t\/\/\n\t\/\/ If SignatureFile is given, then the signature will be read from this\n\t\/\/ file. If the file doesn't exist, then a random signature will\n\t\/\/ automatically be generated and stored here. SignatureFile will be\n\t\/\/ ignored if Signature is given.\n\tSignature string `json:\"signature\"`\n\tSignatureFile string `json:\"-\"`\n\n\tStartTime time.Time `json:\"start_time\"`\n\tEndTime time.Time `json:\"end_time\"`\n\tArch string `json:\"arch\"`\n\tOS string `json:\"os\"`\n\tPayload interface{} `json:\"payload,omitempty\"`\n\tProduct string `json:\"product\"`\n\tRunID string `json:\"run_id\"`\n\tSchemaVersion string `json:\"schema_version\"`\n\tVersion string `json:\"version\"`\n}\n\nfunc (i *ReportParams) signature() string {\n\tsignature := i.Signature\n\tif i.Signature == \"\" && i.SignatureFile != \"\" {\n\t\tvar err error\n\t\tsignature, err = checkSignature(i.SignatureFile)\n\t\tif err != nil {\n\t\t\treturn \"\"\n\t\t}\n\t}\n\treturn signature\n}\n\n\/\/ Report sends telemetry information to checkpoint\nfunc Report(ctx context.Context, r *ReportParams) error {\n\tif disabled := os.Getenv(\"CHECKPOINT_DISABLE\"); disabled != \"\" {\n\t\treturn nil\n\t}\n\n\treq, err := ReportRequest(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient := cleanhttp.DefaultClient()\n\tresp, err := client.Do(req.WithContext(ctx))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != 201 {\n\t\treturn fmt.Errorf(\"Unknown status: %d\", resp.StatusCode)\n\t}\n\n\treturn nil\n}\n\n\/\/ ReportRequest creates a request object for making a report\nfunc ReportRequest(r *ReportParams) (*http.Request, error) {\n\t\/\/ Populate some fields automatically if we can\n\tif r.RunID == \"\" {\n\t\tuuid, err := uuid.GenerateUUID()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr.RunID = uuid\n\t}\n\tif r.Arch == \"\" {\n\t\tr.Arch = runtime.GOARCH\n\t}\n\tif r.OS == \"\" {\n\t\tr.OS = runtime.GOOS\n\t}\n\tif r.Signature == \"\" {\n\t\tr.Signature = r.signature()\n\t}\n\n\tb, err := json.Marshal(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tu := &url.URL{\n\t\tScheme: \"https\",\n\t\tHost: \"checkpoint-api.hashicorp.com\",\n\t\tPath: fmt.Sprintf(\"\/v1\/telemetry\/%s\", r.Product),\n\t}\n\n\treq, err := http.NewRequest(\"POST\", u.String(), bytes.NewReader(b))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\treq.Header.Add(\"User-Agent\", \"HashiCorp\/go-checkpoint\")\n\n\treturn req, nil\n}\n\n\/\/ CheckParams are the parameters for configuring a check request.\ntype CheckParams struct {\n\t\/\/ Product and version are used to lookup the correct product and\n\t\/\/ alerts for the proper version. The version is also used to perform\n\t\/\/ a version check.\n\tProduct string\n\tVersion string\n\n\t\/\/ Arch and OS are used to filter alerts potentially only to things\n\t\/\/ affecting a specific os\/arch combination. If these aren't specified,\n\t\/\/ they'll be automatically filled in.\n\tArch string\n\tOS string\n\n\t\/\/ Signature is some random signature that should be stored and used\n\t\/\/ as a cookie-like value. This ensures that alerts aren't repeated.\n\t\/\/ If the signature is changed, repeat alerts may be sent down. The\n\t\/\/ signature should NOT be anything identifiable to a user (such as\n\t\/\/ a MAC address). It should be random.\n\t\/\/\n\t\/\/ If SignatureFile is given, then the signature will be read from this\n\t\/\/ file. If the file doesn't exist, then a random signature will\n\t\/\/ automatically be generated and stored here. SignatureFile will be\n\t\/\/ ignored if Signature is given.\n\tSignature string\n\tSignatureFile string\n\n\t\/\/ CacheFile, if specified, will cache the result of a check. The\n\t\/\/ duration of the cache is specified by CacheDuration, and defaults\n\t\/\/ to 48 hours if not specified. If the CacheFile is newer than the\n\t\/\/ CacheDuration, than the Check will short-circuit and use those\n\t\/\/ results.\n\t\/\/\n\t\/\/ If the CacheFile directory doesn't exist, it will be created with\n\t\/\/ permissions 0755.\n\tCacheFile string\n\tCacheDuration time.Duration\n\n\t\/\/ Force, if true, will force the check even if CHECKPOINT_DISABLE\n\t\/\/ is set. Within HashiCorp products, this is ONLY USED when the user\n\t\/\/ specifically requests it. This is never automatically done without\n\t\/\/ the user's consent.\n\tForce bool\n}\n\n\/\/ CheckResponse is the response for a check request.\ntype CheckResponse struct {\n\tProduct string\n\tCurrentVersion string `json:\"current_version\"`\n\tCurrentReleaseDate int `json:\"current_release_date\"`\n\tCurrentDownloadURL string `json:\"current_download_url\"`\n\tCurrentChangelogURL string `json:\"current_changelog_url\"`\n\tProjectWebsite string `json:\"project_website\"`\n\tOutdated bool `json:\"outdated\"`\n\tAlerts []*CheckAlert\n}\n\n\/\/ CheckAlert is a single alert message from a check request.\n\/\/\n\/\/ These never have to be manually constructed, and are typically populated\n\/\/ into a CheckResponse as a result of the Check request.\ntype CheckAlert struct {\n\tID int\n\tDate int\n\tMessage string\n\tURL string\n\tLevel string\n}\n\n\/\/ Check checks for alerts and new version information.\nfunc Check(p *CheckParams) (*CheckResponse, error) {\n\tif disabled := os.Getenv(\"CHECKPOINT_DISABLE\"); disabled != \"\" && !p.Force {\n\t\treturn &CheckResponse{}, nil\n\t}\n\n\t\/\/ If we have a cached result, then use that\n\tif r, err := checkCache(p.Version, p.CacheFile, p.CacheDuration); err != nil {\n\t\treturn nil, err\n\t} else if r != nil {\n\t\tdefer r.Close()\n\t\treturn checkResult(r)\n\t}\n\n\tvar u url.URL\n\n\tif p.Arch == \"\" {\n\t\tp.Arch = runtime.GOARCH\n\t}\n\tif p.OS == \"\" {\n\t\tp.OS = runtime.GOOS\n\t}\n\n\t\/\/ If we're given a SignatureFile, then attempt to read that.\n\tsignature := p.Signature\n\tif p.Signature == \"\" && p.SignatureFile != \"\" {\n\t\tvar err error\n\t\tsignature, err = checkSignature(p.SignatureFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tv := u.Query()\n\tv.Set(\"version\", p.Version)\n\tv.Set(\"arch\", p.Arch)\n\tv.Set(\"os\", p.OS)\n\tv.Set(\"signature\", signature)\n\n\tu.Scheme = \"https\"\n\tu.Host = \"checkpoint-api.hashicorp.com\"\n\tu.Path = fmt.Sprintf(\"\/v1\/check\/%s\", p.Product)\n\tu.RawQuery = v.Encode()\n\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\treq.Header.Add(\"User-Agent\", \"HashiCorp\/go-checkpoint\")\n\n\tclient := cleanhttp.DefaultClient()\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"Unknown status: %d\", resp.StatusCode)\n\t}\n\n\tvar r io.Reader = resp.Body\n\tif p.CacheFile != \"\" {\n\t\t\/\/ Make sure the directory holding our cache exists.\n\t\tif err := os.MkdirAll(filepath.Dir(p.CacheFile), 0755); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ We have to cache the result, so write the response to the\n\t\t\/\/ file as we read it.\n\t\tf, err := os.Create(p.CacheFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Write the cache header\n\t\tif err := writeCacheHeader(f, p.Version); err != nil {\n\t\t\tf.Close()\n\t\t\tos.Remove(p.CacheFile)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdefer f.Close()\n\t\tr = io.TeeReader(r, f)\n\t}\n\n\treturn checkResult(r)\n}\n\n\/\/ CheckInterval is used to check for a response on a given interval duration.\n\/\/ The interval is not exact, and checks are randomized to prevent a thundering\n\/\/ herd. However, it is expected that on average one check is performed per\n\/\/ interval. The returned channel may be closed to stop background checks.\nfunc CheckInterval(p *CheckParams, interval time.Duration, cb func(*CheckResponse, error)) chan struct{} {\n\tdoneCh := make(chan struct{})\n\n\tif disabled := os.Getenv(\"CHECKPOINT_DISABLE\"); disabled != \"\" {\n\t\treturn doneCh\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.After(randomStagger(interval)):\n\t\t\t\tresp, err := Check(p)\n\t\t\t\tcb(resp, err)\n\t\t\tcase <-doneCh:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn doneCh\n}\n\n\/\/ randomStagger returns an interval that is between 3\/4 and 5\/4 of\n\/\/ the given interval. The expected value is the interval.\nfunc randomStagger(interval time.Duration) time.Duration {\n\tstagger := time.Duration(mrand.Int63()) % (interval \/ 2)\n\treturn 3*(interval\/4) + stagger\n}\n\nfunc checkCache(current string, path string, d time.Duration) (io.ReadCloser, error) {\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\t\/\/ File doesn't exist, not a problem\n\t\t\treturn nil, nil\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\tif d == 0 {\n\t\td = 48 * time.Hour\n\t}\n\n\tif fi.ModTime().Add(d).Before(time.Now()) {\n\t\t\/\/ Cache is busted, delete the old file and re-request. We ignore\n\t\t\/\/ errors here because re-creating the file is fine too.\n\t\tos.Remove(path)\n\t\treturn nil, nil\n\t}\n\n\t\/\/ File looks good so far, open it up so we can inspect the contents.\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Check the signature of the file\n\tvar sig [4]byte\n\tif err := binary.Read(f, binary.LittleEndian, sig[:]); err != nil {\n\t\tf.Close()\n\t\treturn nil, err\n\t}\n\tif !reflect.DeepEqual(sig, magicBytes) {\n\t\t\/\/ Signatures don't match. Reset.\n\t\tf.Close()\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Check the version. If it changed, then rewrite\n\tvar length uint32\n\tif err := binary.Read(f, binary.LittleEndian, &length); err != nil {\n\t\tf.Close()\n\t\treturn nil, err\n\t}\n\tdata := make([]byte, length)\n\tif _, err := io.ReadFull(f, data); err != nil {\n\t\tf.Close()\n\t\treturn nil, err\n\t}\n\tif string(data) != current {\n\t\t\/\/ Version changed, reset\n\t\tf.Close()\n\t\treturn nil, nil\n\t}\n\n\treturn f, nil\n}\n\nfunc checkResult(r io.Reader) (*CheckResponse, error) {\n\tvar result CheckResponse\n\tdec := json.NewDecoder(r)\n\tif err := dec.Decode(&result); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &result, nil\n}\n\nfunc checkSignature(path string) (string, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\t\/\/ The file exists, read it out\n\t\tsigBytes, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t\/\/ Split the file into lines\n\t\tlines := strings.SplitN(string(sigBytes), \"\\n\", 2)\n\t\tif len(lines) > 0 {\n\t\t\treturn strings.TrimSpace(lines[0]), nil\n\t\t}\n\t}\n\n\t\/\/ If this isn't a non-exist error, then return that.\n\tif !os.IsNotExist(err) {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ The file doesn't exist, so create a signature.\n\tvar b [16]byte\n\tn := 0\n\tfor n < 16 {\n\t\tn2, err := rand.Read(b[n:])\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tn += n2\n\t}\n\tsignature := fmt.Sprintf(\n\t\t\"%x-%x-%x-%x-%x\", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])\n\n\t\/\/ Make sure the directory holding our signature exists.\n\tif err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Write the signature\n\tif err := ioutil.WriteFile(path, []byte(signature+\"\\n\\n\"+userMessage+\"\\n\"), 0644); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn signature, nil\n}\n\nfunc writeCacheHeader(f io.Writer, v string) error {\n\t\/\/ Write our signature first\n\tif err := binary.Write(f, binary.LittleEndian, magicBytes); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Write out our current version length\n\tvar length uint32 = uint32(len(v))\n\tif err := binary.Write(f, binary.LittleEndian, length); err != nil {\n\t\treturn err\n\t}\n\n\t_, err := f.Write([]byte(v))\n\treturn err\n}\n\n\/\/ userMessage is suffixed to the signature file to provide feedback.\nvar userMessage = `\nThis signature is a randomly generated UUID used to de-duplicate\nalerts and version information. This signature is random, it is\nnot based on any personally identifiable information. To create\na new signature, you can simply delete this file at any time.\nSee the documentation for the software using Checkpoint for more\ninformation on how to disable it.\n`\n<commit_msg>Use a one second timeout when checking for version<commit_after>\/\/ checkpoint is a package for checking version information and alerts\n\/\/ for a HashiCorp product.\npackage checkpoint\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/rand\"\n\t\"encoding\/binary\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\tmrand \"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/go-cleanhttp\"\n\tuuid \"github.com\/hashicorp\/go-uuid\"\n)\n\nvar magicBytes [4]byte = [4]byte{0x35, 0x77, 0x69, 0xFB}\n\n\/\/ ReportParams are the parameters for configuring a telemetry report.\ntype ReportParams struct {\n\t\/\/ Signature is some random signature that should be stored and used\n\t\/\/ as a cookie-like value. This ensures that alerts aren't repeated.\n\t\/\/ If the signature is changed, repeat alerts may be sent down. The\n\t\/\/ signature should NOT be anything identifiable to a user (such as\n\t\/\/ a MAC address). It should be random.\n\t\/\/\n\t\/\/ If SignatureFile is given, then the signature will be read from this\n\t\/\/ file. If the file doesn't exist, then a random signature will\n\t\/\/ automatically be generated and stored here. SignatureFile will be\n\t\/\/ ignored if Signature is given.\n\tSignature string `json:\"signature\"`\n\tSignatureFile string `json:\"-\"`\n\n\tStartTime time.Time `json:\"start_time\"`\n\tEndTime time.Time `json:\"end_time\"`\n\tArch string `json:\"arch\"`\n\tOS string `json:\"os\"`\n\tPayload interface{} `json:\"payload,omitempty\"`\n\tProduct string `json:\"product\"`\n\tRunID string `json:\"run_id\"`\n\tSchemaVersion string `json:\"schema_version\"`\n\tVersion string `json:\"version\"`\n}\n\nfunc (i *ReportParams) signature() string {\n\tsignature := i.Signature\n\tif i.Signature == \"\" && i.SignatureFile != \"\" {\n\t\tvar err error\n\t\tsignature, err = checkSignature(i.SignatureFile)\n\t\tif err != nil {\n\t\t\treturn \"\"\n\t\t}\n\t}\n\treturn signature\n}\n\n\/\/ Report sends telemetry information to checkpoint\nfunc Report(ctx context.Context, r *ReportParams) error {\n\tif disabled := os.Getenv(\"CHECKPOINT_DISABLE\"); disabled != \"\" {\n\t\treturn nil\n\t}\n\n\treq, err := ReportRequest(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient := cleanhttp.DefaultClient()\n\tresp, err := client.Do(req.WithContext(ctx))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != 201 {\n\t\treturn fmt.Errorf(\"Unknown status: %d\", resp.StatusCode)\n\t}\n\n\treturn nil\n}\n\n\/\/ ReportRequest creates a request object for making a report\nfunc ReportRequest(r *ReportParams) (*http.Request, error) {\n\t\/\/ Populate some fields automatically if we can\n\tif r.RunID == \"\" {\n\t\tuuid, err := uuid.GenerateUUID()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr.RunID = uuid\n\t}\n\tif r.Arch == \"\" {\n\t\tr.Arch = runtime.GOARCH\n\t}\n\tif r.OS == \"\" {\n\t\tr.OS = runtime.GOOS\n\t}\n\tif r.Signature == \"\" {\n\t\tr.Signature = r.signature()\n\t}\n\n\tb, err := json.Marshal(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tu := &url.URL{\n\t\tScheme: \"https\",\n\t\tHost: \"checkpoint-api.hashicorp.com\",\n\t\tPath: fmt.Sprintf(\"\/v1\/telemetry\/%s\", r.Product),\n\t}\n\n\treq, err := http.NewRequest(\"POST\", u.String(), bytes.NewReader(b))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\treq.Header.Add(\"User-Agent\", \"HashiCorp\/go-checkpoint\")\n\n\treturn req, nil\n}\n\n\/\/ CheckParams are the parameters for configuring a check request.\ntype CheckParams struct {\n\t\/\/ Product and version are used to lookup the correct product and\n\t\/\/ alerts for the proper version. The version is also used to perform\n\t\/\/ a version check.\n\tProduct string\n\tVersion string\n\n\t\/\/ Arch and OS are used to filter alerts potentially only to things\n\t\/\/ affecting a specific os\/arch combination. If these aren't specified,\n\t\/\/ they'll be automatically filled in.\n\tArch string\n\tOS string\n\n\t\/\/ Signature is some random signature that should be stored and used\n\t\/\/ as a cookie-like value. This ensures that alerts aren't repeated.\n\t\/\/ If the signature is changed, repeat alerts may be sent down. The\n\t\/\/ signature should NOT be anything identifiable to a user (such as\n\t\/\/ a MAC address). It should be random.\n\t\/\/\n\t\/\/ If SignatureFile is given, then the signature will be read from this\n\t\/\/ file. If the file doesn't exist, then a random signature will\n\t\/\/ automatically be generated and stored here. SignatureFile will be\n\t\/\/ ignored if Signature is given.\n\tSignature string\n\tSignatureFile string\n\n\t\/\/ CacheFile, if specified, will cache the result of a check. The\n\t\/\/ duration of the cache is specified by CacheDuration, and defaults\n\t\/\/ to 48 hours if not specified. If the CacheFile is newer than the\n\t\/\/ CacheDuration, than the Check will short-circuit and use those\n\t\/\/ results.\n\t\/\/\n\t\/\/ If the CacheFile directory doesn't exist, it will be created with\n\t\/\/ permissions 0755.\n\tCacheFile string\n\tCacheDuration time.Duration\n\n\t\/\/ Force, if true, will force the check even if CHECKPOINT_DISABLE\n\t\/\/ is set. Within HashiCorp products, this is ONLY USED when the user\n\t\/\/ specifically requests it. This is never automatically done without\n\t\/\/ the user's consent.\n\tForce bool\n}\n\n\/\/ CheckResponse is the response for a check request.\ntype CheckResponse struct {\n\tProduct string\n\tCurrentVersion string `json:\"current_version\"`\n\tCurrentReleaseDate int `json:\"current_release_date\"`\n\tCurrentDownloadURL string `json:\"current_download_url\"`\n\tCurrentChangelogURL string `json:\"current_changelog_url\"`\n\tProjectWebsite string `json:\"project_website\"`\n\tOutdated bool `json:\"outdated\"`\n\tAlerts []*CheckAlert\n}\n\n\/\/ CheckAlert is a single alert message from a check request.\n\/\/\n\/\/ These never have to be manually constructed, and are typically populated\n\/\/ into a CheckResponse as a result of the Check request.\ntype CheckAlert struct {\n\tID int\n\tDate int\n\tMessage string\n\tURL string\n\tLevel string\n}\n\n\/\/ Check checks for alerts and new version information.\nfunc Check(p *CheckParams) (*CheckResponse, error) {\n\tif disabled := os.Getenv(\"CHECKPOINT_DISABLE\"); disabled != \"\" && !p.Force {\n\t\treturn &CheckResponse{}, nil\n\t}\n\n\t\/\/ If we have a cached result, then use that\n\tif r, err := checkCache(p.Version, p.CacheFile, p.CacheDuration); err != nil {\n\t\treturn nil, err\n\t} else if r != nil {\n\t\tdefer r.Close()\n\t\treturn checkResult(r)\n\t}\n\n\tvar u url.URL\n\n\tif p.Arch == \"\" {\n\t\tp.Arch = runtime.GOARCH\n\t}\n\tif p.OS == \"\" {\n\t\tp.OS = runtime.GOOS\n\t}\n\n\t\/\/ If we're given a SignatureFile, then attempt to read that.\n\tsignature := p.Signature\n\tif p.Signature == \"\" && p.SignatureFile != \"\" {\n\t\tvar err error\n\t\tsignature, err = checkSignature(p.SignatureFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tv := u.Query()\n\tv.Set(\"version\", p.Version)\n\tv.Set(\"arch\", p.Arch)\n\tv.Set(\"os\", p.OS)\n\tv.Set(\"signature\", signature)\n\n\tu.Scheme = \"https\"\n\tu.Host = \"checkpoint-api.hashicorp.com\"\n\tu.Path = fmt.Sprintf(\"\/v1\/check\/%s\", p.Product)\n\tu.RawQuery = v.Encode()\n\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\treq.Header.Add(\"User-Agent\", \"HashiCorp\/go-checkpoint\")\n\n\tclient := cleanhttp.DefaultClient()\n\n\t\/\/ We use a short timeout since checking for new versions is not critical\n\t\/\/ enough to block on if checkpoint is broken\/slow.\n\tclient.Timeout = time.Duration(1 * time.Second)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"Unknown status: %d\", resp.StatusCode)\n\t}\n\n\tvar r io.Reader = resp.Body\n\tif p.CacheFile != \"\" {\n\t\t\/\/ Make sure the directory holding our cache exists.\n\t\tif err := os.MkdirAll(filepath.Dir(p.CacheFile), 0755); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ We have to cache the result, so write the response to the\n\t\t\/\/ file as we read it.\n\t\tf, err := os.Create(p.CacheFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Write the cache header\n\t\tif err := writeCacheHeader(f, p.Version); err != nil {\n\t\t\tf.Close()\n\t\t\tos.Remove(p.CacheFile)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdefer f.Close()\n\t\tr = io.TeeReader(r, f)\n\t}\n\n\treturn checkResult(r)\n}\n\n\/\/ CheckInterval is used to check for a response on a given interval duration.\n\/\/ The interval is not exact, and checks are randomized to prevent a thundering\n\/\/ herd. However, it is expected that on average one check is performed per\n\/\/ interval. The returned channel may be closed to stop background checks.\nfunc CheckInterval(p *CheckParams, interval time.Duration, cb func(*CheckResponse, error)) chan struct{} {\n\tdoneCh := make(chan struct{})\n\n\tif disabled := os.Getenv(\"CHECKPOINT_DISABLE\"); disabled != \"\" {\n\t\treturn doneCh\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.After(randomStagger(interval)):\n\t\t\t\tresp, err := Check(p)\n\t\t\t\tcb(resp, err)\n\t\t\tcase <-doneCh:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn doneCh\n}\n\n\/\/ randomStagger returns an interval that is between 3\/4 and 5\/4 of\n\/\/ the given interval. The expected value is the interval.\nfunc randomStagger(interval time.Duration) time.Duration {\n\tstagger := time.Duration(mrand.Int63()) % (interval \/ 2)\n\treturn 3*(interval\/4) + stagger\n}\n\nfunc checkCache(current string, path string, d time.Duration) (io.ReadCloser, error) {\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\t\/\/ File doesn't exist, not a problem\n\t\t\treturn nil, nil\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\tif d == 0 {\n\t\td = 48 * time.Hour\n\t}\n\n\tif fi.ModTime().Add(d).Before(time.Now()) {\n\t\t\/\/ Cache is busted, delete the old file and re-request. We ignore\n\t\t\/\/ errors here because re-creating the file is fine too.\n\t\tos.Remove(path)\n\t\treturn nil, nil\n\t}\n\n\t\/\/ File looks good so far, open it up so we can inspect the contents.\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Check the signature of the file\n\tvar sig [4]byte\n\tif err := binary.Read(f, binary.LittleEndian, sig[:]); err != nil {\n\t\tf.Close()\n\t\treturn nil, err\n\t}\n\tif !reflect.DeepEqual(sig, magicBytes) {\n\t\t\/\/ Signatures don't match. Reset.\n\t\tf.Close()\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Check the version. If it changed, then rewrite\n\tvar length uint32\n\tif err := binary.Read(f, binary.LittleEndian, &length); err != nil {\n\t\tf.Close()\n\t\treturn nil, err\n\t}\n\tdata := make([]byte, length)\n\tif _, err := io.ReadFull(f, data); err != nil {\n\t\tf.Close()\n\t\treturn nil, err\n\t}\n\tif string(data) != current {\n\t\t\/\/ Version changed, reset\n\t\tf.Close()\n\t\treturn nil, nil\n\t}\n\n\treturn f, nil\n}\n\nfunc checkResult(r io.Reader) (*CheckResponse, error) {\n\tvar result CheckResponse\n\tdec := json.NewDecoder(r)\n\tif err := dec.Decode(&result); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &result, nil\n}\n\nfunc checkSignature(path string) (string, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\t\/\/ The file exists, read it out\n\t\tsigBytes, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t\/\/ Split the file into lines\n\t\tlines := strings.SplitN(string(sigBytes), \"\\n\", 2)\n\t\tif len(lines) > 0 {\n\t\t\treturn strings.TrimSpace(lines[0]), nil\n\t\t}\n\t}\n\n\t\/\/ If this isn't a non-exist error, then return that.\n\tif !os.IsNotExist(err) {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ The file doesn't exist, so create a signature.\n\tvar b [16]byte\n\tn := 0\n\tfor n < 16 {\n\t\tn2, err := rand.Read(b[n:])\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tn += n2\n\t}\n\tsignature := fmt.Sprintf(\n\t\t\"%x-%x-%x-%x-%x\", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])\n\n\t\/\/ Make sure the directory holding our signature exists.\n\tif err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Write the signature\n\tif err := ioutil.WriteFile(path, []byte(signature+\"\\n\\n\"+userMessage+\"\\n\"), 0644); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn signature, nil\n}\n\nfunc writeCacheHeader(f io.Writer, v string) error {\n\t\/\/ Write our signature first\n\tif err := binary.Write(f, binary.LittleEndian, magicBytes); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Write out our current version length\n\tvar length uint32 = uint32(len(v))\n\tif err := binary.Write(f, binary.LittleEndian, length); err != nil {\n\t\treturn err\n\t}\n\n\t_, err := f.Write([]byte(v))\n\treturn err\n}\n\n\/\/ userMessage is suffixed to the signature file to provide feedback.\nvar userMessage = `\nThis signature is a randomly generated UUID used to de-duplicate\nalerts and version information. This signature is random, it is\nnot based on any personally identifiable information. To create\na new signature, you can simply delete this file at any time.\nSee the documentation for the software using Checkpoint for more\ninformation on how to disable it.\n`\n<|endoftext|>"} {"text":"<commit_before>package clnt\n\nimport (\n\t\"code.google.com\/p\/go9p\/p\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n)\n\nfunc (clnt *Clnt) ServeHTTP(c http.ResponseWriter, r *http.Request) {\n\tio.WriteString(c, fmt.Sprintf(\"<html><body><h1>Client %s<\/h1>\", clnt.Id))\n\tdefer io.WriteString(c, \"<\/body><\/html>\")\n\n\t\/\/ fcalls\n\tif clnt.Debuglevel&DbgLogFcalls != 0 {\n\t\tfs := clnt.Log.Filter(clnt, DbgLogFcalls)\n\t\tio.WriteString(c, fmt.Sprintf(\"<h2>Last %d 9P messages<\/h2>\", len(fs)))\n\t\tfor _, l := range fs {\n\t\t\tfc := l.Data.(*p.Fcall)\n\t\t\tif fc.Type != 0 {\n\t\t\t\tio.WriteString(c, fmt.Sprintf(\"<br>%s\", fc))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc clntServeHTTP(c http.ResponseWriter, r *http.Request) {\n\tio.WriteString(c, fmt.Sprintf(\"<html><body>\"))\n\tdefer io.WriteString(c, \"<\/body><\/html>\")\n\n\tclnts.Lock()\n\tif clnts.clntList == nil {\n\t\tio.WriteString(c, \"no clients\")\n\t}\n\n\tfor clnt := clnts.clntList; clnt != nil; clnt = clnt.next {\n\t\tio.WriteString(c, fmt.Sprintf(\"<a href='\/go9p\/clnt\/%s'>%s<\/a><br>\", clnt.Id, clnt.Id))\n\t}\n\tclnts.Unlock()\n}\n\nfunc (clnt *Clnt) statsRegister() {\n\thttp.Handle(\"\/go9p\/clnt\/\"+clnt.Id, clnt)\n}\n\nfunc (clnt *Clnt) statsUnregister() {\n\thttp.Handle(\"\/go9p\/clnt\/\"+clnt.Id, nil)\n}\n\nfunc (c *ClntList) statsRegister() {\n\thttp.HandleFunc(\"\/go9p\/clnt\", clntServeHTTP)\n}\n\nfunc (c *ClntList) statsUnregister() {\n\thttp.HandleFunc(\"\/go9p\/clnt\", nil)\n}\n<commit_msg>Fixed call to statsUnregister in p.clnt.stats_http<commit_after>package clnt\n\nimport (\n\t\"code.google.com\/p\/go9p\/p\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n)\n\nfunc (clnt *Clnt) ServeHTTP(c http.ResponseWriter, r *http.Request) {\n\tio.WriteString(c, fmt.Sprintf(\"<html><body><h1>Client %s<\/h1>\", clnt.Id))\n\tdefer io.WriteString(c, \"<\/body><\/html>\")\n\n\t\/\/ fcalls\n\tif clnt.Debuglevel&DbgLogFcalls != 0 {\n\t\tfs := clnt.Log.Filter(clnt, DbgLogFcalls)\n\t\tio.WriteString(c, fmt.Sprintf(\"<h2>Last %d 9P messages<\/h2>\", len(fs)))\n\t\tfor _, l := range fs {\n\t\t\tfc := l.Data.(*p.Fcall)\n\t\t\tif fc.Type != 0 {\n\t\t\t\tio.WriteString(c, fmt.Sprintf(\"<br>%s\", fc))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc clntServeHTTP(c http.ResponseWriter, r *http.Request) {\n\tio.WriteString(c, fmt.Sprintf(\"<html><body>\"))\n\tdefer io.WriteString(c, \"<\/body><\/html>\")\n\n\tclnts.Lock()\n\tif clnts.clntList == nil {\n\t\tio.WriteString(c, \"no clients\")\n\t}\n\n\tfor clnt := clnts.clntList; clnt != nil; clnt = clnt.next {\n\t\tio.WriteString(c, fmt.Sprintf(\"<a href='\/go9p\/clnt\/%s'>%s<\/a><br>\", clnt.Id, clnt.Id))\n\t}\n\tclnts.Unlock()\n}\n\nfunc (clnt *Clnt) statsRegister() {\n\thttp.Handle(\"\/go9p\/clnt\/\"+clnt.Id, clnt)\n}\n\nfunc (clnt *Clnt) statsUnregister() {\n\thttp.HandleFunc(\"\/go9p\/clnt\/\"+clnt.Id, http.NotFound)\n}\n\nfunc (c *ClntList) statsRegister() {\n\thttp.HandleFunc(\"\/go9p\/clnt\", clntServeHTTP)\n}\n\nfunc (c *ClntList) statsUnregister() {\n\thttp.HandleFunc(\"\/go9p\/clnt\/\", http.NotFound)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"github.com\/couchbase\/cbauth\"\n\t\"github.com\/couchbase\/go-couchbase\"\n\t\"log\"\n\t\"net\/url\"\n)\n\nvar serverURL = flag.String(\"serverURL\", \"http:\/\/localhost:9000\",\n\t\"couchbase server URL\")\nvar poolName = flag.String(\"poolName\", \"default\",\n\t\"pool name\")\nvar bucketName = flag.String(\"bucketName\", \"default\",\n\t\"bucket name\")\nvar authUser = flag.String(\"authUser\", \"\",\n\t\"auth user name (probably same as bucketName)\")\nvar authPswd = flag.String(\"authPswd\", \"\",\n\t\"auth password\")\n\nfunc main() {\n\n\tflag.Parse()\n\t\/*\n\t NOTE. This example requires the following environment variables to be set.\n\n\t NS_SERVER_CBAUTH_URL\n\t NS_SERVER_CBAUTH_USER\n\t NS_SERVER_CBAUTH_PWD\n\n\t e.g\n\n\t NS_SERVER_CBAUTH_URL=\"http:\/\/localhost:9000\/_cbauth\"\n\t NS_SERVER_CBAUTH_USER=\"Administrator\"\n\t NS_SERVER_CBAUTH_PWD=\"asdasd\"\n\n\t*\/\n\n\turl, err := url.Parse(*serverURL)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to parse url %v\", err)\n\t\treturn\n\t}\n\n\thostPort := url.Host\n\n\tuser, bucket_password, err := cbauth.GetHTTPServiceAuth(hostPort)\n\tif err != nil {\n\t\tlog.Printf(\"Failed %v\", err)\n\t\treturn\n\t}\n\n\tlog.Printf(\" HTTP Servce username %s password %s\", user, bucket_password)\n\n\tclient, err := couchbase.ConnectWithAuthCreds(*serverURL, user, bucket_password)\n\tif err != nil {\n\t\tlog.Printf(\"Connect failed %v\", err)\n\t\treturn\n\t}\n\n\tcbpool, err := client.GetPool(\"default\")\n\tif err != nil {\n\t\tlog.Printf(\"Failed to connect to default pool %v\", err)\n\t\treturn\n\t}\n\n\tmUser, mPassword, err := cbauth.GetMemcachedServiceAuth(hostPort)\n\tif err != nil {\n\t\tlog.Printf(\" failed %v\", err)\n\t\treturn\n\t}\n\n\tvar cbbucket *couchbase.Bucket\n\tcbbucket, err = cbpool.GetBucketWithAuth(*bucketName, mUser, mPassword)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to connect to bucket %v\", err)\n\t\treturn\n\t}\n\n\tlog.Printf(\" Bucket name %s Bucket %v\", *bucketName, cbbucket)\n\n\terr = cbbucket.Set(\"k1\", 5, \"value\")\n\tif err != nil {\n\t\tlog.Printf(\"set failed error %v\", err)\n\t\treturn\n\t}\n\n\tif *authUser != \"\" {\n\t\tcreds, err := cbauth.Auth(*authUser, *authPswd)\n\t\tif err != nil {\n\t\t\tlog.Printf(\" failed %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tcanAccess, err := creds.CanAccessBucket(*bucketName)\n\t\tif err != nil {\n\t\t\tlog.Printf(\" can't access bucket %v\", err)\n\t\t}\n\n\t\tlog.Printf(\" results canaccess %v bucket %v\", canAccess, *bucketName)\n\n\t\tcanRead, err := creds.CanReadBucket(*bucketName)\n\t\tif err != nil {\n\t\t\tlog.Printf(\" can't read bucket %v\", err)\n\t\t}\n\n\t\tlog.Printf(\" results canread %v bucket %v\", canRead, *bucketName)\n\n\t\tcanDDL, err := creds.CanDDLBucket(*bucketName)\n\t\tif err != nil {\n\t\t\tlog.Printf(\" can't DDL bucket %v\", err)\n\t\t}\n\n\t\tlog.Printf(\" results canDDL %v bucket %v\", canDDL, *bucketName)\n\t}\n\n}\n<commit_msg>bring cb_auth example in sync with latest cbauth development<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/couchbase\/cbauth\"\n\t\"github.com\/couchbase\/go-couchbase\"\n\t\"log\"\n\t\"net\/url\"\n)\n\nvar serverURL = flag.String(\"serverURL\", \"http:\/\/localhost:9000\",\n\t\"couchbase server URL\")\nvar poolName = flag.String(\"poolName\", \"default\",\n\t\"pool name\")\nvar bucketName = flag.String(\"bucketName\", \"default\",\n\t\"bucket name\")\nvar authUser = flag.String(\"authUser\", \"\",\n\t\"auth user name (probably same as bucketName)\")\nvar authPswd = flag.String(\"authPswd\", \"\",\n\t\"auth password\")\n\nfunc main() {\n\n\tflag.Parse()\n\t\/*\n\t NOTE. This example requires the following environment variables to be set.\n\n\t CBAUTH_REVRPC_URL\n\n\t e.g\n\n\t CBAUTH_REVRPC_URL=\"http:\/\/Administrator:asdasd@127.0.0.1:9000\/_cbauth\"\n\n\t*\/\n\n\turl, err := url.Parse(*serverURL)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to parse url %v\", err)\n\t\treturn\n\t}\n\n\thostPort := url.Host\n\n\tuser, bucket_password, err := cbauth.GetHTTPServiceAuth(hostPort)\n\tif err != nil {\n\t\tlog.Printf(\"Failed %v\", err)\n\t\treturn\n\t}\n\n\tlog.Printf(\" HTTP Servce username %s password %s\", user, bucket_password)\n\n\tclient, err := couchbase.ConnectWithAuthCreds(*serverURL, user, bucket_password)\n\tif err != nil {\n\t\tlog.Printf(\"Connect failed %v\", err)\n\t\treturn\n\t}\n\n\tcbpool, err := client.GetPool(\"default\")\n\tif err != nil {\n\t\tlog.Printf(\"Failed to connect to default pool %v\", err)\n\t\treturn\n\t}\n\n\tmUser, mPassword, err := cbauth.GetMemcachedServiceAuth(hostPort)\n\tif err != nil {\n\t\tlog.Printf(\" failed %v\", err)\n\t\treturn\n\t}\n\n\tvar cbbucket *couchbase.Bucket\n\tcbbucket, err = cbpool.GetBucketWithAuth(*bucketName, mUser, mPassword)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to connect to bucket %v\", err)\n\t\treturn\n\t}\n\n\tlog.Printf(\" Bucket name %s Bucket %v\", *bucketName, cbbucket)\n\n\terr = cbbucket.Set(\"k1\", 5, \"value\")\n\tif err != nil {\n\t\tlog.Printf(\"set failed error %v\", err)\n\t\treturn\n\t}\n\n\tif *authUser != \"\" {\n\t\tcreds, err := cbauth.Auth(*authUser, *authPswd)\n\t\tif err != nil {\n\t\t\tlog.Printf(\" failed %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tpermission := fmt.Sprintf(\"cluster.bucket[%s].data!read\", *bucketName)\n\t\tcanAccess, err := creds.IsAllowed(permission)\n\t\tif err != nil {\n\t\t\tlog.Printf(\" error %v checking permission %v\", err, permission)\n\t\t} else {\n\t\t\tlog.Printf(\" result of checking permission %v : %v\", permission, canAccess)\n\t\t}\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package restconf\n\nimport (\n\t\"fmt\"\n\t\"github.com\/c2g\/c2\"\n\t\"github.com\/c2g\/meta\"\n\t\"github.com\/c2g\/node\"\n\t\"golang.org\/x\/net\/websocket\"\n\t\"io\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype restconfError struct {\n\tCode int\n\tMsg string\n}\n\nfunc (err *restconfError) Error() string {\n\treturn err.Msg\n}\n\nfunc (err *restconfError) HttpCode() int {\n\treturn err.Code\n}\n\nfunc NewService(root node.Browser) *Service {\n\tservice := &Service{\n\t\tPath: \"\/restconf\/\",\n\t\tRoot: root,\n\t\tmux: http.NewServeMux(),\n\t}\n\tservice.mux.HandleFunc(\"\/.well-known\/host-meta\", service.resources)\n\tservice.mux.Handle(\"\/restconf\/\", http.StripPrefix(\"\/restconf\/\", service))\n\tservice.mux.HandleFunc(\"\/meta\/\", service.meta)\n\n\tservice.socketHandler = &WebSocketService{Factory:service}\n\tservice.mux.Handle(\"\/restsock\/\", websocket.Handler(service.socketHandler.Handle))\n\treturn service\n}\n\ntype Service struct {\n\tPath string\n\tRoot node.Browser\n\tmux *http.ServeMux\n\tdocrootSource *docRootImpl\n\tDocRoot string\n\tPort string\n\tIface string\n\tCallbackAddress string\n\tCallHome *CallHome\n\tsocketHandler *WebSocketService\n}\n\nfunc (service *Service) EffectiveCallbackAddress() string {\n\tif len(service.CallbackAddress) > 0 {\n\t\treturn service.CallbackAddress\n\t}\n\tif len(service.Iface) == 0 {\n\t\tpanic(\"No iface given for management port\")\n\t}\n\tip := c2.GetIpForIface(service.Iface)\n\treturn fmt.Sprintf(\"http:\/\/%s%s\/\", ip, service.Port)\n}\n\nfunc (service *Service) handleError(err error, w http.ResponseWriter) {\n\tif httpErr, ok := err.(c2.HttpError); ok {\n\t\tc2.Err.Print(httpErr.Error() + \"\\n\" + httpErr.Stack())\n\t\thttp.Error(w, httpErr.Error(), httpErr.HttpCode())\n\t} else {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}\n\nfunc (self *Service) Subscribe(sub *node.Subscription) error {\n\tc := node.NewContext()\n\tif sel := c.Selector(self.Root()).Find(sub.Path); sel.LastErr == nil {\n\t\tcloser, notifSel := sel.Notifications(sub)\n\t\tif notifSel.LastErr != nil {\n\t\t\treturn notifSel.LastErr\n\t\t}\n\t\tsub.Notification = notifSel.Selection.Meta().(*meta.Notification)\n\t\tsub.Closer = closer\n\t} else {\n\t\treturn sel.LastErr\n\t}\n\treturn nil\n}\n\nfunc (service *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\th := w.Header()\n\th.Set(\"Access-Control-Allow-Headers\", \"origin, content-type, accept\")\n\th.Set(\"Access-Control-Allow-Methods\", \"GET, POST, PUT, OPTIONS, DELETE, PATCH\")\n\th.Set(\"Access-Control-Allow-Origin\", \"*\")\n\tif r.Method == \"OPTIONS\" {\n\t\treturn\n\t}\n\tvar err error\n\tvar payload node.Node\n\tvar sel node.Selector\n\tc := node.NewContext()\n\tif sel = c.Selector(service.Root()).FindUrl(r.URL); sel.LastErr == nil {\n\t\tif sel.Selection == nil {\n\t\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\tservice.handleError(err, w)\n\t\t\treturn\n\t\t}\n\t\tswitch r.Method {\n\t\tcase \"DELETE\":\n\t\t\terr = sel.Selection.Delete()\n\t\tcase \"GET\":\n\t\t\tw.Header().Set(\"Content-Type\", mime.TypeByExtension(\".json\"))\n\t\t\toutput := node.NewJsonWriter(w).Node()\n\t\t\terr = sel.InsertInto(output).LastErr\n\t\tcase \"PUT\":\n\t\t\terr = sel.UpsertFrom(node.NewJsonReader(r.Body).Node()).LastErr\n\t\tcase \"POST\":\n\t\t\tif meta.IsAction(sel.Selection.Meta()) {\n\t\t\t\tinput := node.NewJsonReader(r.Body).Node()\n\t\t\t\tif outputSel := sel.Action(input); outputSel.Selection != nil {\n\t\t\t\t\tw.Header().Set(\"Content-Type\", mime.TypeByExtension(\".json\"))\n\t\t\t\t\terr = outputSel.InsertInto(node.NewJsonWriter(w).Node()).LastErr\n\t\t\t\t} else {\n\t\t\t\t\terr = outputSel.LastErr\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tpayload = node.NewJsonReader(r.Body).Node()\n\t\t\t\terr = sel.InsertFrom(payload).LastErr\n\t\t\t}\n\t\tdefault:\n\t\t\thttp.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)\n\t\t}\n\t} else {\n\t\terr = sel.LastErr\n\t}\n\n\tif err != nil {\n\t\tservice.handleError(err, w)\n\t}\n}\n\ntype docRootImpl struct {\n\tdocroot meta.StreamSource\n}\n\nfunc (service *Service) SetDocRoot(docroot meta.StreamSource) {\n\tservice.docrootSource = &docRootImpl{docroot: docroot}\n\tservice.mux.Handle(\"\/ui\/\", http.StripPrefix(\"\/ui\/\", service.docrootSource))\n}\n\nfunc (service *Service) AddHandler(pattern string, handler http.Handler) {\n\tservice.mux.Handle(pattern, http.StripPrefix(pattern, handler))\n}\n\nfunc (service *Service) Listen() {\n\ts := &http.Server{\n\t\tAddr: service.Port,\n\t\tHandler: service.mux,\n\t\tReadTimeout: 10 * time.Second,\n\t\tWriteTimeout: 10 * time.Second,\n\t\tMaxHeaderBytes: 1 << 20,\n\t}\n\tc2.Info.Println(\"Starting RESTCONF interface\")\n\tc2.Err.Fatal(s.ListenAndServe())\n}\n\nfunc (service *Service) Stop() {\n\tif service.docrootSource != nil && service.docrootSource.docroot != nil {\n\t\tmeta.CloseResource(service.docrootSource.docroot)\n\t}\n\t\/\/ TODO - actually stop service\n}\n\nfunc (service *docRootImpl) ServeHTTP(wtr http.ResponseWriter, req *http.Request) {\n\tpath := req.URL.Path\n\tif path == \"\" {\n\t\tpath = \"index.html\"\n\t}\n\tif rdr, err := service.docroot.OpenStream(path); err != nil {\n\t\thttp.Error(wtr, err.Error(), http.StatusInternalServerError)\n\t} else {\n\t\tdefer meta.CloseResource(rdr)\n\t\text := filepath.Ext(path)\n\t\tctype := mime.TypeByExtension(ext)\n\t\twtr.Header().Set(\"Content-Type\", ctype)\n\t\tif _, err = io.Copy(wtr, rdr); err != nil {\n\t\t\thttp.Error(wtr, err.Error(), http.StatusInternalServerError)\n\t\t}\n\t\t\/\/ Eventually support this but need file seeker to do that.\n\t\t\/\/ http.ServeContent(wtr, req, path, time.Now(), &ReaderPeeker{rdr})\n\t}\n}\n\nfunc (service *Service) meta(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\thttp.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)\n\t}\n\tif p := strings.TrimPrefix(r.URL.Path, \"\/meta\/\"); len(p) < len(r.URL.Path) {\n\t\tr.URL.Path = p\n\t} else {\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\tm := service.Root().Meta().(*meta.Module)\n\t_, noexpand := r.URL.Query()[\"noexpand\"]\n\n\tc := node.NewContext()\n\tsel := c.Selector(node.SelectModule(m, !noexpand))\n\tif sel = sel.FindUrl(r.URL); sel.LastErr != nil {\n\t\tservice.handleError(sel.LastErr, w)\n\t\treturn\n\t} else if sel.Selection == nil {\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t} else {\n\t\tw.Header().Set(\"Content-Type\", mime.TypeByExtension(\".json\"))\n\t\toutput := node.NewJsonWriter(w).Node()\n\t\tif err := sel.InsertInto(output).LastErr; err != nil {\n\t\t\tservice.handleError(err, w)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (service *Service) resources(w http.ResponseWriter, r *http.Request) {\n\t\/\/ RESTCONF Sec. 3.1\n\tfmt.Fprintf(w, `\"xrd\" : { \"link\" : { \"@rel\" : \"restconf\", \"@href\" : \"\/restconf\" } } }`)\n}\n<commit_msg>allow setting version string in rest api<commit_after>package restconf\n\nimport (\n\t\"fmt\"\n\t\"github.com\/c2g\/c2\"\n\t\"github.com\/c2g\/meta\"\n\t\"github.com\/c2g\/node\"\n\t\"golang.org\/x\/net\/websocket\"\n\t\"io\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype restconfError struct {\n\tCode int\n\tMsg string\n}\n\nfunc (err *restconfError) Error() string {\n\treturn err.Msg\n}\n\nfunc (err *restconfError) HttpCode() int {\n\treturn err.Code\n}\n\nfunc NewService(root node.Browser) *Service {\n\tservice := &Service{\n\t\tPath: \"\/restconf\/\",\n\t\tRoot: root,\n\t\tmux: http.NewServeMux(),\n\t}\n\tservice.mux.HandleFunc(\"\/.well-known\/host-meta\", service.resources)\n\tservice.mux.Handle(\"\/restconf\/\", http.StripPrefix(\"\/restconf\/\", service))\n\tservice.mux.HandleFunc(\"\/meta\/\", service.meta)\n\n\tservice.socketHandler = &WebSocketService{Factory:service}\n\tservice.mux.Handle(\"\/restsock\/\", websocket.Handler(service.socketHandler.Handle))\n\treturn service\n}\n\ntype Service struct {\n\tPath string\n\tRoot node.Browser\n\tmux *http.ServeMux\n\tdocrootSource *docRootImpl\n\tDocRoot string\n\tPort string\n\tIface string\n\tCallbackAddress string\n\tCallHome *CallHome\n\tsocketHandler *WebSocketService\n}\n\nfunc (service *Service) SetAppVersion(ver string) {\n\tservice.mux.HandleFunc(\"\/.ver\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(ver))\n\t})\n}\n\nfunc (service *Service) EffectiveCallbackAddress() string {\n\tif len(service.CallbackAddress) > 0 {\n\t\treturn service.CallbackAddress\n\t}\n\tif len(service.Iface) == 0 {\n\t\tpanic(\"No iface given for management port\")\n\t}\n\tip := c2.GetIpForIface(service.Iface)\n\treturn fmt.Sprintf(\"http:\/\/%s%s\/\", ip, service.Port)\n}\n\nfunc (service *Service) handleError(err error, w http.ResponseWriter) {\n\tif httpErr, ok := err.(c2.HttpError); ok {\n\t\tc2.Err.Print(httpErr.Error() + \"\\n\" + httpErr.Stack())\n\t\thttp.Error(w, httpErr.Error(), httpErr.HttpCode())\n\t} else {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}\n\nfunc (self *Service) Subscribe(sub *node.Subscription) error {\n\tc := node.NewContext()\n\tif sel := c.Selector(self.Root()).Find(sub.Path); sel.LastErr == nil {\n\t\tcloser, notifSel := sel.Notifications(sub)\n\t\tif notifSel.LastErr != nil {\n\t\t\treturn notifSel.LastErr\n\t\t}\n\t\tsub.Notification = notifSel.Selection.Meta().(*meta.Notification)\n\t\tsub.Closer = closer\n\t} else {\n\t\treturn sel.LastErr\n\t}\n\treturn nil\n}\n\nfunc (service *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\th := w.Header()\n\th.Set(\"Access-Control-Allow-Headers\", \"origin, content-type, accept\")\n\th.Set(\"Access-Control-Allow-Methods\", \"GET, POST, PUT, OPTIONS, DELETE, PATCH\")\n\th.Set(\"Access-Control-Allow-Origin\", \"*\")\n\tif r.Method == \"OPTIONS\" {\n\t\treturn\n\t}\n\tvar err error\n\tvar payload node.Node\n\tvar sel node.Selector\n\tc := node.NewContext()\n\tif sel = c.Selector(service.Root()).FindUrl(r.URL); sel.LastErr == nil {\n\t\tif sel.Selection == nil {\n\t\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\tservice.handleError(err, w)\n\t\t\treturn\n\t\t}\n\t\tswitch r.Method {\n\t\tcase \"DELETE\":\n\t\t\terr = sel.Selection.Delete()\n\t\tcase \"GET\":\n\t\t\tw.Header().Set(\"Content-Type\", mime.TypeByExtension(\".json\"))\n\t\t\toutput := node.NewJsonWriter(w).Node()\n\t\t\terr = sel.InsertInto(output).LastErr\n\t\tcase \"PUT\":\n\t\t\terr = sel.UpsertFrom(node.NewJsonReader(r.Body).Node()).LastErr\n\t\tcase \"POST\":\n\t\t\tif meta.IsAction(sel.Selection.Meta()) {\n\t\t\t\tinput := node.NewJsonReader(r.Body).Node()\n\t\t\t\tif outputSel := sel.Action(input); outputSel.Selection != nil {\n\t\t\t\t\tw.Header().Set(\"Content-Type\", mime.TypeByExtension(\".json\"))\n\t\t\t\t\terr = outputSel.InsertInto(node.NewJsonWriter(w).Node()).LastErr\n\t\t\t\t} else {\n\t\t\t\t\terr = outputSel.LastErr\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tpayload = node.NewJsonReader(r.Body).Node()\n\t\t\t\terr = sel.InsertFrom(payload).LastErr\n\t\t\t}\n\t\tdefault:\n\t\t\thttp.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)\n\t\t}\n\t} else {\n\t\terr = sel.LastErr\n\t}\n\n\tif err != nil {\n\t\tservice.handleError(err, w)\n\t}\n}\n\ntype docRootImpl struct {\n\tdocroot meta.StreamSource\n}\n\nfunc (service *Service) SetDocRoot(docroot meta.StreamSource) {\n\tservice.docrootSource = &docRootImpl{docroot: docroot}\n\tservice.mux.Handle(\"\/ui\/\", http.StripPrefix(\"\/ui\/\", service.docrootSource))\n}\n\nfunc (service *Service) AddHandler(pattern string, handler http.Handler) {\n\tservice.mux.Handle(pattern, http.StripPrefix(pattern, handler))\n}\n\nfunc (service *Service) Listen() {\n\ts := &http.Server{\n\t\tAddr: service.Port,\n\t\tHandler: service.mux,\n\t\tReadTimeout: 10 * time.Second,\n\t\tWriteTimeout: 10 * time.Second,\n\t\tMaxHeaderBytes: 1 << 20,\n\t}\n\tc2.Info.Println(\"Starting RESTCONF interface\")\n\tc2.Err.Fatal(s.ListenAndServe())\n}\n\nfunc (service *Service) Stop() {\n\tif service.docrootSource != nil && service.docrootSource.docroot != nil {\n\t\tmeta.CloseResource(service.docrootSource.docroot)\n\t}\n\t\/\/ TODO - actually stop service\n}\n\nfunc (service *docRootImpl) ServeHTTP(wtr http.ResponseWriter, req *http.Request) {\n\tpath := req.URL.Path\n\tif path == \"\" {\n\t\tpath = \"index.html\"\n\t}\n\tif rdr, err := service.docroot.OpenStream(path); err != nil {\n\t\thttp.Error(wtr, err.Error(), http.StatusInternalServerError)\n\t} else {\n\t\tdefer meta.CloseResource(rdr)\n\t\text := filepath.Ext(path)\n\t\tctype := mime.TypeByExtension(ext)\n\t\twtr.Header().Set(\"Content-Type\", ctype)\n\t\tif _, err = io.Copy(wtr, rdr); err != nil {\n\t\t\thttp.Error(wtr, err.Error(), http.StatusInternalServerError)\n\t\t}\n\t\t\/\/ Eventually support this but need file seeker to do that.\n\t\t\/\/ http.ServeContent(wtr, req, path, time.Now(), &ReaderPeeker{rdr})\n\t}\n}\n\nfunc (service *Service) meta(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\thttp.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)\n\t}\n\tif p := strings.TrimPrefix(r.URL.Path, \"\/meta\/\"); len(p) < len(r.URL.Path) {\n\t\tr.URL.Path = p\n\t} else {\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\tm := service.Root().Meta().(*meta.Module)\n\t_, noexpand := r.URL.Query()[\"noexpand\"]\n\n\tc := node.NewContext()\n\tsel := c.Selector(node.SelectModule(m, !noexpand))\n\tif sel = sel.FindUrl(r.URL); sel.LastErr != nil {\n\t\tservice.handleError(sel.LastErr, w)\n\t\treturn\n\t} else if sel.Selection == nil {\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t} else {\n\t\tw.Header().Set(\"Content-Type\", mime.TypeByExtension(\".json\"))\n\t\toutput := node.NewJsonWriter(w).Node()\n\t\tif err := sel.InsertInto(output).LastErr; err != nil {\n\t\t\tservice.handleError(err, w)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (service *Service) resources(w http.ResponseWriter, r *http.Request) {\n\t\/\/ RESTCONF Sec. 3.1\n\tfmt.Fprintf(w, `\"xrd\" : { \"link\" : { \"@rel\" : \"restconf\", \"@href\" : \"\/restconf\" } } }`)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\n\/*\nTODO\ncreate new keys\nadd user for key \nadd admin for key \nfront end with appache\nappache user auth \ndockerize \napi doc\nfigure out way to load secrets \nencrypt keys in DB \nDB backup\n\n*\/\n\nimport (\n \"database\/sql\"\n\t_ \"github.com\/lib\/pq\"\n \"log\"\n\t\"os\"\n\t \"html\/template\"\n\t\"net\/http\"\n\t\"io\"\n\t\"strings\"\n\t\"strconv\"\n)\n\nvar db *sql.DB\n\nvar templates = template.Must(template.ParseFiles(\"index.html\"))\n\n\nfunc setupDatabase( hostName string, pgPassword string ) { \/\/ todo pass in hostname, port, username\n var err error\n\n \/\/ set up DB\n db, err = sql.Open(\"postgres\", \"password='\"+pgPassword+\"' user=postgres dbname=postgres host=\"+hostName+\" port=5432 sslmode=disable\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ a postgess bigint is signed 64 bit int \n\tsqlSetup := `\n CREATE TABLE IF NOT EXISTS keys ( kID BIGINT NOT NULL, kVal bytea NOT NULL , oID BIGINT NOT NULL, PRIMARY KEY( kID ) );\n CREATE TABLE IF NOT EXISTS keyUsers ( kID BIGINT NOT NULL, uID BIGINT NOT NULL , PRIMARY KEY( kID,uID ) );\n CREATE TABLE IF NOT EXISTS keyAdmins ( kID BIGINT NOT NULL, uID BIGINT NOT NULL , PRIMARY KEY( kID,uID ) );\n `\n _, err = db.Exec(sqlSetup)\n if err != nil {\n log.Println(\"sql fatal error in setupDatabase\")\n log.Printf(\"%q\\n\", err)\n }\n\n\tlog.Printf(\"Setup Database\\n\")\n}\n\n\nfunc getKey( keyID, userID int64 ) (string) {\n\t\/\/ note if using mySQL use ? but Postgres is $1 in prepare statements \n\tstmt, err := db.Prepare(\n\t\t\"SELECT keys.kVal FROM keyUsers JOIN keys ON keys.kID = keyUsers.kID WHERE keyUsers.uID = $2 AND keyUsers.kID = $1\")\n\tif err != nil {\n\t\tlog.Println(\"sql fatal error in getKey prep\")\n\t\tlog.Fatal(err)\n\t}\n\tvar keyVal string\n\terr = stmt.QueryRow(keyID,userID).Scan( &keyVal ) \n\tswitch {\n\tcase err == sql.ErrNoRows:\n log.Printf(\"no key found\")\n\tcase err != nil:\n\t\tlog.Println(\"sql fatal error in getKey querry\")\n\t\tlog.Fatal(err)\n\tdefault:\n\t\tlog.Println(\"got key \" + keyVal)\n\t\treturn keyVal;\n\t}\n\treturn \"\"; \n}\n\n\nfunc mainHandler(w http.ResponseWriter, r *http.Request) {\n if r.URL.Path != \"\/\" {\n http.NotFound(w, r)\n return\n }\n\n type PageData struct {\n Junk string\n }\n data := PageData{Junk: \"nothing\"}\n err := templates.ExecuteTemplate(w, \"index.html\", data)\n if err != nil {\n http.Error(w, err.Error(), http.StatusInternalServerError)\n }\n}\n\n\nfunc searchKeyHandler(w http.ResponseWriter, r *http.Request) {\n\tvar err error\n\t\n w.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\tlog.Println(\"got URL path \" + r.URL.Path )\n\n\telements := strings.Split( r.URL.Path , \"\/\" );\n\t\/\/ note since the URL starts with a \/, we get an empty element as first thing in array\n\t\n\tif len(elements) != 4 {\n\t\thttp.NotFound(w, r)\n return\n\t}\n\t\n\tvar keyID int64 = 0;\n\tvar userID int64 = 1;\n\n\tkeyID,err = strconv.ParseInt( elements[3] , 0, 64 );\n\tif err != nil {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tlog.Println(\"GET keyID=\", keyID, \" userID=\",userID )\n\t\n\tio.WriteString(w, getKey(keyID,userID) )\n}\n\n\nfunc createKeyHandler(w http.ResponseWriter, r *http.Request) {\n w.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n \n if r.Method != \"POST\" {\n http.Error(w, \"method must be POST\", http.StatusMethodNotAllowed)\n return\n }\n\n\terr := r.ParseForm()\n if err != nil {\n http.Error(w, err.Error(), http.StatusInternalServerError)\n }\n\n\tvar keyVal string = r.FormValue(\"keyVal\");\n\tvar userID int64 = 1;\n\tvar keyID int64 = 104;\n\t\n\tlog.Println(\"POST keyID=\", keyID, \"userID=\",userID, \"keyVal=\",keyVal )\n\t\/\/ TODO - save to DB \n}\n\n\nfunc main() {\n\tvar err error\n\t\n\tvar pgPassword string = os.Getenv(\"SECM_DB_SECRET\")\n\tvar hostName string = os.Args[1]\n\t\n\tsetupDatabase(hostName,pgPassword)\n defer db.Close()\n\n\terr = db.Ping()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\thttp.HandleFunc(\"\/\", mainHandler)\n\thttp.HandleFunc(\"\/v1\/key\/\", searchKeyHandler)\n\thttp.HandleFunc(\"\/v1\/key\", createKeyHandler)\n\n http.ListenAndServe(\":8080\", nil)\n}\n<commit_msg>return new keyID on post<commit_after>package main\n\n\/*\nTODO\ncreate new keys\nadd user for key \nadd admin for key \nfront end with appache\nappache user auth \ndockerize \napi doc\nfigure out way to load secrets \nencrypt keys in DB \nDB backup\n\n*\/\n\nimport (\n \"database\/sql\"\n\t_ \"github.com\/lib\/pq\"\n \"log\"\n\t\"os\"\n\t \"html\/template\"\n\t\"net\/http\"\n\t\"io\"\n\t\"strings\"\n\t\"strconv\"\n)\n\nvar db *sql.DB\n\nvar templates = template.Must(template.ParseFiles(\"index.html\"))\n\n\nfunc setupDatabase( hostName string, pgPassword string ) { \/\/ todo pass in hostname, port, username\n var err error\n\n \/\/ set up DB\n db, err = sql.Open(\"postgres\", \"password='\"+pgPassword+\"' user=postgres dbname=postgres host=\"+hostName+\" port=5432 sslmode=disable\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ a postgess bigint is signed 64 bit int \n\tsqlSetup := `\n CREATE TABLE IF NOT EXISTS keys ( kID BIGINT NOT NULL, kVal bytea NOT NULL , oID BIGINT NOT NULL, PRIMARY KEY( kID ) );\n CREATE TABLE IF NOT EXISTS keyUsers ( kID BIGINT NOT NULL, uID BIGINT NOT NULL , PRIMARY KEY( kID,uID ) );\n CREATE TABLE IF NOT EXISTS keyAdmins ( kID BIGINT NOT NULL, uID BIGINT NOT NULL , PRIMARY KEY( kID,uID ) );\n `\n _, err = db.Exec(sqlSetup)\n if err != nil {\n log.Println(\"sql fatal error in setupDatabase\")\n log.Printf(\"%q\\n\", err)\n }\n\n\tlog.Printf(\"Setup Database\\n\")\n}\n\n\nfunc getKey( keyID int64, userID int64 ) (string) {\n\t\/\/ note if using mySQL use ? but Postgres is $1 in prepare statements \n\tstmt, err := db.Prepare(\n\t\t\"SELECT keys.kVal FROM keyUsers JOIN keys ON keys.kID = keyUsers.kID WHERE keyUsers.uID = $2 AND keyUsers.kID = $1\")\n\tif err != nil {\n\t\tlog.Println(\"sql fatal error in getKey prep\")\n\t\tlog.Fatal(err)\n\t}\n\tvar keyVal string\n\terr = stmt.QueryRow(keyID,userID).Scan( &keyVal ) \n\tswitch {\n\tcase err == sql.ErrNoRows:\n log.Printf(\"no key found\")\n\tcase err != nil:\n\t\tlog.Println(\"sql fatal error in getKey querry\")\n\t\tlog.Fatal(err)\n\tdefault:\n\t\tlog.Println(\"got key \" + keyVal)\n\t\treturn keyVal;\n\t}\n\treturn \"\"; \n}\n\n\nfunc createKey( userID int64, keyVal string ) (int64) {\n\tvar keyID int64 = 104;\n\n\treturn keyID;\n}\n\n\nfunc mainHandler(w http.ResponseWriter, r *http.Request) {\n if r.URL.Path != \"\/\" {\n http.NotFound(w, r)\n return\n }\n\n type PageData struct {\n Junk string\n }\n data := PageData{Junk: \"nothing\"}\n err := templates.ExecuteTemplate(w, \"index.html\", data)\n if err != nil {\n http.Error(w, err.Error(), http.StatusInternalServerError)\n }\n}\n\n\nfunc searchKeyHandler(w http.ResponseWriter, r *http.Request) {\n\tvar err error\n\t\n w.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\tlog.Println(\"got URL path \" + r.URL.Path )\n\n\telements := strings.Split( r.URL.Path , \"\/\" );\n\t\/\/ note since the URL starts with a \/, we get an empty element as first thing in array\n\t\n\tif len(elements) != 4 {\n\t\thttp.NotFound(w, r)\n return\n\t}\n\t\n\tvar keyID int64 = 0;\n\tvar userID int64 = 1;\n\n\tkeyID,err = strconv.ParseInt( elements[3] , 0, 64 );\n\tif err != nil {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tlog.Println(\"GET keyID=\", keyID, \" userID=\",userID )\n\n\tio.WriteString(w, getKey(keyID,userID) )\n}\n\n\nfunc createKeyHandler(w http.ResponseWriter, r *http.Request) {\n w.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n \n if r.Method != \"POST\" {\n http.Error(w, \"method must be POST\", http.StatusMethodNotAllowed)\n return\n }\n\n\terr := r.ParseForm()\n if err != nil {\n http.Error(w, err.Error(), http.StatusInternalServerError)\n }\n\n\tvar keyVal string = r.FormValue(\"keyVal\");\n\tvar userID int64 = 1;\n\n\tvar keyID int64 = createKey( userID, keyVal );\n\n\tlog.Println(\"POST keyID=\", keyID, \"userID=\",userID, \"keyVal=\",keyVal )\n\n\tio.WriteString(w, \"{ keyID: \" )\n\tio.WriteString(w, strconv.FormatInt(keyID,10) )\n\tio.WriteString(w, \" }\" )\n}\n\n\nfunc main() {\n\tvar err error\n\t\n\tvar pgPassword string = os.Getenv(\"SECM_DB_SECRET\")\n\tvar hostName string = os.Args[1]\n\t\n\tsetupDatabase(hostName,pgPassword)\n defer db.Close()\n\n\terr = db.Ping()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\thttp.HandleFunc(\"\/\", mainHandler)\n\thttp.HandleFunc(\"\/v1\/key\/\", searchKeyHandler)\n\thttp.HandleFunc(\"\/v1\/key\", createKeyHandler)\n\n http.ListenAndServe(\":8080\", nil)\n}\n<|endoftext|>"} {"text":"<commit_before>package raft\n\nimport (\n\t\"io\"\n\t\"time\"\n)\n\n\/\/ Config provides any necessary configuraiton to\n\/\/ the Raft server\ntype Config struct {\n\t\/\/ Time in follower state without a leader before we attempt an election\n\tHeartbeatTimeout time.Duration\n\n\t\/\/ Time in candidate state without a leader before we attempt an election\n\tElectionTimeout time.Duration\n\n\t\/\/ Time without an Apply() operation before we heartbeat to ensure\n\t\/\/ a timely commit. Due to random staggering, may be delayed as much as\n\t\/\/ 2x this value.\n\tCommitTimeout time.Duration\n\n\t\/\/ MaxAppendEntries controls the maximum number of append entries\n\t\/\/ to send at once. We want to strike a balance between efficiency\n\t\/\/ and avoiding waste if the follower is going to reject because of\n\t\/\/ an inconsistent log\n\tMaxAppendEntries int\n\n\t\/\/ If we are a member of a cluster, and RemovePeer is invoked for the\n\t\/\/ local node, then we forget all peers and transition into the follower state.\n\t\/\/ If ShutdownOnRemove is is set, we additional shutdown Raft. Otherwise,\n\t\/\/ we can become a leader of a cluster containing only this node.\n\tShutdownOnRemove bool\n\n\t\/\/ TrailingLogs controls how many logs we leave after a snapshot. This is\n\t\/\/ used so that we can quickly replay logs on a follower instead of being\n\t\/\/ forced to send an entire snapshot.\n\tTrailingLogs uint64\n\n\t\/\/ SnapshotInterval controls how often we check if we should perform a snapshot.\n\t\/\/ We randomly stagger between this value and 2x this value to avoid the entire\n\t\/\/ cluster from performing a snapshot at once\n\tSnapshotInterval time.Duration\n\n\t\/\/ SnapshotThreshold controls how many outstanding logs there must be before\n\t\/\/ we perform a snapshot. This is to prevent excessive snapshots when we can\n\t\/\/ just replay a small set of logs.\n\tSnapshotThreshold uint64\n\n\t\/\/ EnableSingleMode allows for a single node mode of operation. This\n\t\/\/ is false by default, which prevents a lone node from electing itself\n\t\/\/ leader.\n\tEnableSingleNode bool\n\n\t\/\/ LeaderLeaveTimeout is used to control how long the \"lease\" lasts\n\t\/\/ for being the leader without being able to contact a quorum\n\t\/\/ of nodes. If we reach this interval without contact, we will\n\t\/\/ step down as leader.\n\tLeaderLeaseTimeout time.Duration\n\n\t\/\/ LogOutput is used as a sink for logs. Defaults to os.Stderr.\n\tLogOutput io.Writer\n}\n\nfunc DefaultConfig() *Config {\n\treturn &Config{\n\t\tHeartbeatTimeout: 1000 * time.Millisecond,\n\t\tElectionTimeout: 400 * time.Millisecond,\n\t\tCommitTimeout: 50 * time.Millisecond,\n\t\tMaxAppendEntries: 64,\n\t\tShutdownOnRemove: true,\n\t\tTrailingLogs: 10240,\n\t\tSnapshotInterval: 120 * time.Second,\n\t\tSnapshotThreshold: 8192,\n\t\tEnableSingleNode: false,\n\t\tLeaderLeaseTimeout: 500 * time.Millisecond,\n\t}\n}\n<commit_msg>Increase ElectionTimeout to 1s<commit_after>package raft\n\nimport (\n\t\"io\"\n\t\"time\"\n)\n\n\/\/ Config provides any necessary configuraiton to\n\/\/ the Raft server\ntype Config struct {\n\t\/\/ Time in follower state without a leader before we attempt an election\n\tHeartbeatTimeout time.Duration\n\n\t\/\/ Time in candidate state without a leader before we attempt an election\n\tElectionTimeout time.Duration\n\n\t\/\/ Time without an Apply() operation before we heartbeat to ensure\n\t\/\/ a timely commit. Due to random staggering, may be delayed as much as\n\t\/\/ 2x this value.\n\tCommitTimeout time.Duration\n\n\t\/\/ MaxAppendEntries controls the maximum number of append entries\n\t\/\/ to send at once. We want to strike a balance between efficiency\n\t\/\/ and avoiding waste if the follower is going to reject because of\n\t\/\/ an inconsistent log\n\tMaxAppendEntries int\n\n\t\/\/ If we are a member of a cluster, and RemovePeer is invoked for the\n\t\/\/ local node, then we forget all peers and transition into the follower state.\n\t\/\/ If ShutdownOnRemove is is set, we additional shutdown Raft. Otherwise,\n\t\/\/ we can become a leader of a cluster containing only this node.\n\tShutdownOnRemove bool\n\n\t\/\/ TrailingLogs controls how many logs we leave after a snapshot. This is\n\t\/\/ used so that we can quickly replay logs on a follower instead of being\n\t\/\/ forced to send an entire snapshot.\n\tTrailingLogs uint64\n\n\t\/\/ SnapshotInterval controls how often we check if we should perform a snapshot.\n\t\/\/ We randomly stagger between this value and 2x this value to avoid the entire\n\t\/\/ cluster from performing a snapshot at once\n\tSnapshotInterval time.Duration\n\n\t\/\/ SnapshotThreshold controls how many outstanding logs there must be before\n\t\/\/ we perform a snapshot. This is to prevent excessive snapshots when we can\n\t\/\/ just replay a small set of logs.\n\tSnapshotThreshold uint64\n\n\t\/\/ EnableSingleMode allows for a single node mode of operation. This\n\t\/\/ is false by default, which prevents a lone node from electing itself\n\t\/\/ leader.\n\tEnableSingleNode bool\n\n\t\/\/ LeaderLeaveTimeout is used to control how long the \"lease\" lasts\n\t\/\/ for being the leader without being able to contact a quorum\n\t\/\/ of nodes. If we reach this interval without contact, we will\n\t\/\/ step down as leader.\n\tLeaderLeaseTimeout time.Duration\n\n\t\/\/ LogOutput is used as a sink for logs. Defaults to os.Stderr.\n\tLogOutput io.Writer\n}\n\nfunc DefaultConfig() *Config {\n\treturn &Config{\n\t\tHeartbeatTimeout: 1000 * time.Millisecond,\n\t\tElectionTimeout: 1000 * time.Millisecond,\n\t\tCommitTimeout: 50 * time.Millisecond,\n\t\tMaxAppendEntries: 64,\n\t\tShutdownOnRemove: true,\n\t\tTrailingLogs: 10240,\n\t\tSnapshotInterval: 120 * time.Second,\n\t\tSnapshotThreshold: 8192,\n\t\tEnableSingleNode: false,\n\t\tLeaderLeaseTimeout: 500 * time.Millisecond,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package txtdirect\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestParse(t *testing.T) {\n\ttests := []struct {\n\t\ttxtRecord string\n\t\texpected record\n\t\terr error\n\t}{\n\t\t{\n\t\t\t\"v=txtv0;to=https:\/\/example.com\/;code=302\",\n\t\t\trecord{\n\t\t\t\tVersion: \"txtv0\",\n\t\t\t\tTo: \"https:\/\/example.com\/\",\n\t\t\t\tCode: 302,\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"v=txtv0;to=https:\/\/example.com\/\",\n\t\t\trecord{\n\t\t\t\tVersion: \"txtv0\",\n\t\t\t\tTo: \"https:\/\/example.com\/\",\n\t\t\t\tCode: 301,\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"v=txtv0;https:\/\/example.com\/;code=302\",\n\t\t\trecord{\n\t\t\t\tVersion: \"txtv0\",\n\t\t\t\tTo: \"https:\/\/example.com\/\",\n\t\t\t\tCode: 302,\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"v=txtv0;to=https:\/\/example.com\/;code=test\",\n\t\t\trecord{},\n\t\t\tfmt.Errorf(\"could not parse status code\"),\n\t\t},\n\t\t{\n\t\t\t\"v=txtv0;https:\/\/example.com\/;https:\/\/google.com;code=test\",\n\t\t\trecord{},\n\t\t\tfmt.Errorf(\"multiple values without keys\"),\n\t\t},\n\t\t{\n\t\t\t\"v=txtv1;to=https:\/\/example.com\/;code=test\",\n\t\t\trecord{},\n\t\t\tfmt.Errorf(\"unhandled version 'txtv1'\"),\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\tr := record{}\n\t\terr := r.Parse(test.txtRecord)\n\n\t\tif err != nil {\n\t\t\tif test.err == nil || !strings.HasPrefix(err.Error(), test.err.Error()) {\n\t\t\t\tt.Errorf(\"Test %d: Unexpected error: %s\", i, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif err == nil && test.err != nil {\n\t\t\tt.Errorf(\"Test %d: Expected error, got nil\", i)\n\t\t\tcontinue\n\t\t}\n\n\t\tif got, want := r.Version, test.expected.Version; got != want {\n\t\t\tt.Errorf(\"Test %d: Expected Version to be '%s', got '%s'\", i, want, got)\n\t\t}\n\t\tif got, want := r.To, test.expected.To; got != want {\n\t\t\tt.Errorf(\"Test %d: Expected To to be '%s', got '%s'\", i, want, got)\n\t\t}\n\t\tif got, want := r.Code, test.expected.Code; got != want {\n\t\t\tt.Errorf(\"Test %d: Expected Code to be '%d', got '%d'\", i, want, got)\n\t\t}\n\t}\n}\n\nfunc TestHandleDefault(t *testing.T) {\n\ttestURL := \"https:\/\/0._td.txtdirect.org\"\n\n\ttests := []struct {\n\t\tstatus int\n\t\terr error\n\t}{\n\t\t{\n\t\t\t301,\n\t\t\tnil,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\treq, _ := http.NewRequest(\"GET\", testURL, nil)\n\t\trec := httptest.NewRecorder()\n\t\terr := Handle(rec, req)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif rec.Code != test.status {\n\t\t\tt.Errorf(\"Unexpected status code: got %d wanted %d\", rec.Code, test.status)\n\t\t}\n\t}\n}\n\nfunc TestHandleSuccess(t *testing.T) {\n\ttestURL := \"https:\/\/%d._ths.txtdirect.org\"\n\n\ttests := []struct {\n\t\tstatus int\n\t\terr error\n\t}{\n\t\t{\n\t\t\t301,\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t302,\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t302,\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t301,\n\t\t\tnil,\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\treq, _ := http.NewRequest(\"GET\", fmt.Sprintf(testURL, i), nil)\n\t\trec := httptest.NewRecorder()\n\t\terr := Handle(rec, req)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif rec.Code != test.status {\n\t\t\tt.Errorf(\"Unexpected status code: got %d wanted %d\", rec.Code, test.status)\n\t\t}\n\t}\n}\n\nfunc TestHandleFailure(t *testing.T) {\n\ttestURL := \"https:\/\/%d._thf.txtdirect.org\"\n\n\ttests := []struct {\n\t\terr error\n\t}{\n\t\t{\n\t\t\tfmt.Errorf(\"could not parse record: unhandled version\"),\n\t\t},\n\t\t{\n\t\t\tfmt.Errorf(\"could not parse record: unhandled version\"),\n\t\t},\n\t\t{\n\t\t\tfmt.Errorf(\"could not parse record: multiple values without keys\"),\n\t\t},\n\t\t{\n\t\t\tfmt.Errorf(\"could not parse record: unhandled version\"),\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\treq, _ := http.NewRequest(\"GET\", fmt.Sprintf(testURL, i), nil)\n\t\trec := httptest.NewRecorder()\n\t\terr := Handle(rec, req)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected error, got nil)\")\n\t\t\tcontinue\n\t\t}\n\t\tif !strings.HasPrefix(err.Error(), test.err.Error()) {\n\t\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t\t}\n\t}\n}\n<commit_msg>Make tests more future proof<commit_after>package txtdirect\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestParse(t *testing.T) {\n\ttests := []struct {\n\t\ttxtRecord string\n\t\texpected record\n\t\terr error\n\t}{\n\t\t{\n\t\t\t\"v=txtv0;to=https:\/\/example.com\/;code=302\",\n\t\t\trecord{\n\t\t\t\tVersion: \"txtv0\",\n\t\t\t\tTo: \"https:\/\/example.com\/\",\n\t\t\t\tCode: 302,\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"v=txtv0;to=https:\/\/example.com\/\",\n\t\t\trecord{\n\t\t\t\tVersion: \"txtv0\",\n\t\t\t\tTo: \"https:\/\/example.com\/\",\n\t\t\t\tCode: 301,\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"v=txtv0;https:\/\/example.com\/;code=302\",\n\t\t\trecord{\n\t\t\t\tVersion: \"txtv0\",\n\t\t\t\tTo: \"https:\/\/example.com\/\",\n\t\t\t\tCode: 302,\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"v=txtv0;to=https:\/\/example.com\/;code=test\",\n\t\t\trecord{},\n\t\t\tfmt.Errorf(\"could not parse status code\"),\n\t\t},\n\t\t{\n\t\t\t\"v=txtv0;https:\/\/example.com\/;https:\/\/google.com;code=test\",\n\t\t\trecord{},\n\t\t\tfmt.Errorf(\"multiple values without keys\"),\n\t\t},\n\t\t{\n\t\t\t\"v=txtv1;to=https:\/\/example.com\/;code=test\",\n\t\t\trecord{},\n\t\t\tfmt.Errorf(\"unhandled version 'txtv1'\"),\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\tr := record{}\n\t\terr := r.Parse(test.txtRecord)\n\n\t\tif err != nil {\n\t\t\tif test.err == nil || !strings.HasPrefix(err.Error(), test.err.Error()) {\n\t\t\t\tt.Errorf(\"Test %d: Unexpected error: %s\", i, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif err == nil && test.err != nil {\n\t\t\tt.Errorf(\"Test %d: Expected error, got nil\", i)\n\t\t\tcontinue\n\t\t}\n\n\t\tif got, want := r.Version, test.expected.Version; got != want {\n\t\t\tt.Errorf(\"Test %d: Expected Version to be '%s', got '%s'\", i, want, got)\n\t\t}\n\t\tif got, want := r.To, test.expected.To; got != want {\n\t\t\tt.Errorf(\"Test %d: Expected To to be '%s', got '%s'\", i, want, got)\n\t\t}\n\t\tif got, want := r.Code, test.expected.Code; got != want {\n\t\t\tt.Errorf(\"Test %d: Expected Code to be '%d', got '%d'\", i, want, got)\n\t\t}\n\t}\n}\n\nfunc TestHandleDefault(t *testing.T) {\n\ttestURL := \"https:\/\/%d._td.txtdirect.org\"\n\tdnsURL := \"_redirect.%d._td.txtdirect.org\"\n\n\tfor i := 0; ; i++ {\n\t\t_, err := net.LookupTXT(fmt.Sprintf(dnsURL, i))\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\treq, _ := http.NewRequest(\"GET\", testURL, nil)\n\t\trec := httptest.NewRecorder()\n\t\terr = Handle(rec, req)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t\t}\n\t}\n}\n\nfunc TestHandleSuccess(t *testing.T) {\n\ttestURL := \"https:\/\/%d._ths.txtdirect.org\"\n\tdnsURL := \"_redirect.%d._ths.txtdirect.org\"\n\n\tfor i := 0; ; i++ {\n\t\t_, err := net.LookupTXT(fmt.Sprintf(dnsURL, i))\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\treq, _ := http.NewRequest(\"GET\", fmt.Sprintf(testURL, i), nil)\n\t\trec := httptest.NewRecorder()\n\t\terr = Handle(rec, req)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t\t}\n\t}\n}\n\nfunc TestHandleFailure(t *testing.T) {\n\ttestURL := \"https:\/\/%d._thf.txtdirect.org\"\n\tdnsURL := \"_redirect.%d._thf.txtdirect.org\"\n\n\tfor i := 0; ; i++ {\n\t\t_, err := net.LookupTXT(fmt.Sprintf(dnsURL, i))\n\t\tif err != nil {\n\t\t\tlog.Print(err, i)\n\t\t\tbreak\n\t\t}\n\t\treq, _ := http.NewRequest(\"GET\", fmt.Sprintf(testURL, i), nil)\n\t\trec := httptest.NewRecorder()\n\t\terr = Handle(rec, req)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected error, got nil)\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016 Pani Networks\n\/\/ All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\npackage cmd\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/romana\/core\/common\"\n\t\"github.com\/romana\/core\/romana\/adaptor\"\n\t\"github.com\/romana\/core\/romana\/util\"\n\t\"github.com\/romana\/core\/tenant\"\n\n\tms \"github.com\/mitchellh\/mapstructure\"\n\tcli \"github.com\/spf13\/cobra\"\n\tconfig \"github.com\/spf13\/viper\"\n)\n\nvar externalID string\n\n\/\/ tenantData holds tenant information received from tenant\n\/\/ service and its corresponding name received from adaptors.\ntype tenantData struct {\n\tTenant tenant.Tenant\n\tSegments []tenant.Segment\n}\n\n\/\/ tenantCmd represents the tenant commands\nvar tenantCmd = &cli.Command{\n\tUse: \"tenant [create|delete|show|list]\",\n\tShort: \"Create, Delete, Show or List Tenant Details.\",\n\tLong: `Create, Delete, Show or List Tenant Details.\n\ntenant requires a subcommand, e.g. ` + \"`romana tenant create`.\" + `\n\nFor more information, please check http:\/\/romana.io\n`,\n}\n\nfunc init() {\n\ttenantCmd.AddCommand(tenantCreateCmd)\n\ttenantCmd.AddCommand(tenantShowCmd)\n\ttenantCmd.AddCommand(tenantListCmd)\n\ttenantCmd.AddCommand(tenantDeleteCmd)\n\ttenantCreateCmd.Flags().StringVarP(&externalID, \"externalid\", \"i\", \"\", \"External ID\")\n}\n\nvar tenantCreateCmd = &cli.Command{\n\tUse: \"create [tenantname]\",\n\tShort: \"Create a new tenant.\",\n\tLong: `Create a new tenant.\n\n --externalid <External ID> # Create tenant with a specific external ID mentioned here.`,\n\tRunE: tenantCreate,\n\tSilenceUsage: true,\n}\n\nvar tenantShowCmd = &cli.Command{\n\tUse: \"show [tenantname1][tenantname2]...\",\n\tShort: \"Show tenant details.\",\n\tLong: `Show tenant details.`,\n\tRunE: tenantShow,\n\tSilenceUsage: true,\n}\n\nvar tenantListCmd = &cli.Command{\n\tUse: \"list\",\n\tShort: \"List all tenants.\",\n\tLong: `List all tenants.`,\n\tRunE: tenantList,\n\tSilenceUsage: true,\n}\n\nvar tenantDeleteCmd = &cli.Command{\n\tUse: \"delete\",\n\tShort: \"Delete a specific tenant.\",\n\tLong: `Delete a specific tenant.`,\n\tRunE: tenantDelete,\n\tSilenceUsage: true,\n}\n\n\/\/ tenantCreate accepts tenant names as arguments for\n\/\/ creating new tenants for platform being set in\n\/\/ config file (~\/.romana.yaml) or via command line\n\/\/ flags.\nfunc tenantCreate(cmd *cli.Command, args []string) error {\n\tif len(args) < 1 {\n\t\treturn util.UsageError(cmd,\n\t\t\tfmt.Sprintf(\"expected at-least 1 argument, saw none\"))\n\t}\n\n\trootURL := config.GetString(\"RootURL\")\n\n\tclient, err := common.NewRestClient(common.GetDefaultRestClientConfig(rootURL))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttenantURL, err := client.GetServiceUrl(\"tenant\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttenants := []tenant.Tenant{}\n\tfor _, tnt := range args {\n\t\tvar tntUUID string\n\n\t\t\/\/ uncomment this once creating tenant for all\n\t\t\/\/ platforms are ready. Till then tenants needs\n\t\t\/\/ to be manually created for every platform and\n\t\t\/\/ then created using romana command line tools.\n\t\t\/\/ if adaptor.CreateTenant(tnt) != nil {\n\t\t\/\/ \treturn err\n\t\t\/\/ }\n\t\t\/\/ adaptor.GetTenantUUID below wouldn't be needed once\n\t\t\/\/ adaptor.CreateTenant is supported because CreateTenant\n\t\t\/\/ will create a new tenant for a specific platform\n\t\t\/\/ instead of polling for an already created one using\n\t\t\/\/ GetTenantUUID.\n\t\tif externalID == \"\" {\n\t\t\ttntUUID, err = adaptor.GetTenantUUID(tnt)\n\t\t\tif err != nil {\n\t\t\t\tswitch err {\n\t\t\t\tcase util.ErrUnimplementedFeature:\n\t\t\t\t\treturn util.UsageError(cmd,\n\t\t\t\t\t\t\"[tenantname] --externalid <externalid> should be provided.\")\n\t\t\t\tdefault:\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tif len(args) > 1 {\n\t\t\t\treturn util.UsageError(cmd,\n\t\t\t\t\t\"[tenantname] --externalid <externalid> should be provided.\\n\"+\n\t\t\t\t\t\t\"Multiple tenants can't be created with same external ID.\",\n\t\t\t\t)\n\t\t\t}\n\t\t\ttntUUID = externalID\n\t\t}\n\n\t\tdata := tenant.Tenant{Name: tnt, ExternalID: tntUUID}\n\t\tvar result map[string]interface{}\n\t\terr = client.Post(tenantURL+\"\/tenants\", data, &result)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, tFound := result[\"name\"]\n\t\tif tFound {\n\t\t\tvar t tenant.Tenant\n\t\t\tdc := &ms.DecoderConfig{TagName: \"json\", Result: &t}\n\t\t\tdecoder, err := ms.NewDecoder(dc)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = decoder.Decode(result)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttenants = append(tenants, t)\n\t\t} else {\n\t\t\tvar h common.HttpError\n\t\t\tdc := &ms.DecoderConfig{TagName: \"json\", Result: &h}\n\t\t\tdecoder, err := ms.NewDecoder(dc)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = decoder.Decode(result)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif config.GetString(\"Format\") == \"json\" {\n\t\t\t\tstatus, _ := json.MarshalIndent(h, \"\", \"\\t\")\n\t\t\t\tfmt.Println(string(status))\n\t\t\t\treturn fmt.Errorf(\"HTTP Error\")\n\t\t\t}\n\t\t\treturn fmt.Errorf(h.Error())\n\t\t}\n\t}\n\n\tif config.GetString(\"Format\") == \"json\" {\n\t\tbody, err := json.MarshalIndent(tenants, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(string(body))\n\t} else {\n\t\tw := new(tabwriter.Writer)\n\t\tw.Init(os.Stdout, 0, 8, 0, '\\t', 0)\n\t\tfmt.Println(\"New Tenant(s) Added:\")\n\t\tfmt.Fprintln(w, \"Id\\t\",\n\t\t\t\"Tenant Name\\t\",\n\t\t\t\"External ID\\t\",\n\t\t)\n\t\tfor _, t := range tenants {\n\t\t\tfmt.Fprintf(w, \"%d \\t %s \\t %s \\t\", t.ID,\n\t\t\t\tt.Name, t.ExternalID)\n\t\t\tfmt.Fprintf(w, \"\\n\")\n\t\t}\n\t\tw.Flush()\n\t}\n\n\treturn nil\n}\n\n\/\/ tenantShow displays tenant details using tenant name\n\/\/ or tenant external id as input.\nfunc tenantShow(cmd *cli.Command, args []string) error {\n\tif len(args) < 1 {\n\t\treturn util.UsageError(cmd,\n\t\t\tfmt.Sprintf(\"expected at-least 1 argument, saw none\"))\n\t}\n\n\trootURL := config.GetString(\"RootURL\")\n\n\tclient, err := common.NewRestClient(common.GetDefaultRestClientConfig(rootURL))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttenantURL, err := client.GetServiceUrl(\"tenant\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdata := []tenant.Tenant{}\n\ttenants := []tenantData{}\n\terr = client.Get(tenantURL+\"\/tenants\", &data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, t := range data {\n\t\tfor _, n := range args {\n\t\t\tif t.Name == n || t.ExternalID == n {\n\t\t\t\tseg := []tenant.Segment{}\n\t\t\t\terr = client.Get(tenantURL+\"\/tenants\/\"+t.ExternalID+\"\/segments\", &seg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ttenants = append(tenants, tenantData{t, seg})\n\t\t\t}\n\t\t}\n\t}\n\n\tif config.GetString(\"Format\") == \"json\" {\n\t\tbody, err := json.MarshalIndent(tenants, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(string(body))\n\t} else {\n\t\tw := new(tabwriter.Writer)\n\t\tw.Init(os.Stdout, 0, 8, 0, '\\t', 0)\n\t\tfmt.Fprintln(w, \"ID\\t\",\n\t\t\t\"Tenant Name\\t\",\n\t\t\t\"External ID\\t\",\n\t\t\t\"Segments\", \"\\t\",\n\t\t)\n\t\tfor _, t := range tenants {\n\t\t\tfmt.Fprintf(w, \"%d \\t %s \\t %s \\t\", t.Tenant.ID,\n\t\t\t\tt.Tenant.Name, t.Tenant.ExternalID)\n\t\t\tfor _, s := range t.Segments {\n\t\t\t\tfmt.Fprintf(w, \"%s, \", s.Name)\n\t\t\t}\n\t\t\tfmt.Fprintf(w, \"\\n\")\n\t\t}\n\t\tw.Flush()\n\t}\n\n\treturn nil\n}\n\n\/\/ tenantList displays tenant list in either json or\n\/\/ tablular format depending on commanf line flags or\n\/\/ config file options.\nfunc tenantList(cmd *cli.Command, args []string) error {\n\trootURL := config.GetString(\"RootURL\")\n\n\tclient, err := common.NewRestClient(common.GetDefaultRestClientConfig(rootURL))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttenantURL, err := client.GetServiceUrl(\"tenant\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttenants := []tenant.Tenant{}\n\terr = client.Get(tenantURL+\"\/tenants\", &tenants)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif config.GetString(\"Format\") == \"json\" {\n\t\tbody, err := json.MarshalIndent(tenants, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(string(body))\n\t} else {\n\t\tw := new(tabwriter.Writer)\n\t\tw.Init(os.Stdout, 0, 8, 0, '\\t', 0)\n\t\tfmt.Println(\"Tenant List\")\n\t\tfmt.Fprintln(w, \"Id\\t\",\n\t\t\t\"Tenant Name\\t\",\n\t\t\t\"External ID\\t\",\n\t\t)\n\t\tfor _, t := range tenants {\n\t\t\tfmt.Fprintln(w, t.ID, \"\\t\",\n\t\t\t\tt.Name, \"\\t\",\n\t\t\t\tt.ExternalID, \"\\t\",\n\t\t\t)\n\t\t}\n\t\tw.Flush()\n\t}\n\n\treturn nil\n}\n\n\/\/ tenantDelete takes tenant name as input for deleting a specific\n\/\/ romana tenant, the equivalent tenant for specific platform\n\/\/ still needs to be deleted manually until handled here via\n\/\/ adaptor.\nfunc tenantDelete(cmd *cli.Command, args []string) error {\n\tfmt.Println(\"Unimplemented: Delete a specific tenant.\")\n\treturn nil\n}\n<commit_msg>Better message when no segments were defined.<commit_after>\/\/ Copyright (c) 2016 Pani Networks\n\/\/ All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\npackage cmd\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/romana\/core\/common\"\n\t\"github.com\/romana\/core\/romana\/adaptor\"\n\t\"github.com\/romana\/core\/romana\/util\"\n\t\"github.com\/romana\/core\/tenant\"\n\n\tms \"github.com\/mitchellh\/mapstructure\"\n\tcli \"github.com\/spf13\/cobra\"\n\tconfig \"github.com\/spf13\/viper\"\n)\n\nvar externalID string\n\n\/\/ tenantData holds tenant information received from tenant\n\/\/ service and its corresponding name received from adaptors.\ntype tenantData struct {\n\tTenant tenant.Tenant\n\tSegments []tenant.Segment\n}\n\n\/\/ tenantCmd represents the tenant commands\nvar tenantCmd = &cli.Command{\n\tUse: \"tenant [create|delete|show|list]\",\n\tShort: \"Create, Delete, Show or List Tenant Details.\",\n\tLong: `Create, Delete, Show or List Tenant Details.\n\ntenant requires a subcommand, e.g. ` + \"`romana tenant create`.\" + `\n\nFor more information, please check http:\/\/romana.io\n`,\n}\n\nfunc init() {\n\ttenantCmd.AddCommand(tenantCreateCmd)\n\ttenantCmd.AddCommand(tenantShowCmd)\n\ttenantCmd.AddCommand(tenantListCmd)\n\ttenantCmd.AddCommand(tenantDeleteCmd)\n\ttenantCreateCmd.Flags().StringVarP(&externalID, \"externalid\", \"i\", \"\", \"External ID\")\n}\n\nvar tenantCreateCmd = &cli.Command{\n\tUse: \"create [tenantname]\",\n\tShort: \"Create a new tenant.\",\n\tLong: `Create a new tenant.\n\n --externalid <External ID> # Create tenant with a specific external ID mentioned here.`,\n\tRunE: tenantCreate,\n\tSilenceUsage: true,\n}\n\nvar tenantShowCmd = &cli.Command{\n\tUse: \"show [tenantname1][tenantname2]...\",\n\tShort: \"Show tenant details.\",\n\tLong: `Show tenant details.`,\n\tRunE: tenantShow,\n\tSilenceUsage: true,\n}\n\nvar tenantListCmd = &cli.Command{\n\tUse: \"list\",\n\tShort: \"List all tenants.\",\n\tLong: `List all tenants.`,\n\tRunE: tenantList,\n\tSilenceUsage: true,\n}\n\nvar tenantDeleteCmd = &cli.Command{\n\tUse: \"delete\",\n\tShort: \"Delete a specific tenant.\",\n\tLong: `Delete a specific tenant.`,\n\tRunE: tenantDelete,\n\tSilenceUsage: true,\n}\n\n\/\/ tenantCreate accepts tenant names as arguments for\n\/\/ creating new tenants for platform being set in\n\/\/ config file (~\/.romana.yaml) or via command line\n\/\/ flags.\nfunc tenantCreate(cmd *cli.Command, args []string) error {\n\tif len(args) < 1 {\n\t\treturn util.UsageError(cmd,\n\t\t\tfmt.Sprintf(\"expected at-least 1 argument, saw none\"))\n\t}\n\n\trootURL := config.GetString(\"RootURL\")\n\n\tclient, err := common.NewRestClient(common.GetDefaultRestClientConfig(rootURL))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttenantURL, err := client.GetServiceUrl(\"tenant\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttenants := []tenant.Tenant{}\n\tfor _, tnt := range args {\n\t\tvar tntUUID string\n\n\t\t\/\/ uncomment this once creating tenant for all\n\t\t\/\/ platforms are ready. Till then tenants needs\n\t\t\/\/ to be manually created for every platform and\n\t\t\/\/ then created using romana command line tools.\n\t\t\/\/ if adaptor.CreateTenant(tnt) != nil {\n\t\t\/\/ \treturn err\n\t\t\/\/ }\n\t\t\/\/ adaptor.GetTenantUUID below wouldn't be needed once\n\t\t\/\/ adaptor.CreateTenant is supported because CreateTenant\n\t\t\/\/ will create a new tenant for a specific platform\n\t\t\/\/ instead of polling for an already created one using\n\t\t\/\/ GetTenantUUID.\n\t\tif externalID == \"\" {\n\t\t\ttntUUID, err = adaptor.GetTenantUUID(tnt)\n\t\t\tif err != nil {\n\t\t\t\tswitch err {\n\t\t\t\tcase util.ErrUnimplementedFeature:\n\t\t\t\t\treturn util.UsageError(cmd,\n\t\t\t\t\t\t\"[tenantname] --externalid <externalid> should be provided.\")\n\t\t\t\tdefault:\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tif len(args) > 1 {\n\t\t\t\treturn util.UsageError(cmd,\n\t\t\t\t\t\"[tenantname] --externalid <externalid> should be provided.\\n\"+\n\t\t\t\t\t\t\"Multiple tenants can't be created with same external ID.\",\n\t\t\t\t)\n\t\t\t}\n\t\t\ttntUUID = externalID\n\t\t}\n\n\t\tdata := tenant.Tenant{Name: tnt, ExternalID: tntUUID}\n\t\tvar result map[string]interface{}\n\t\terr = client.Post(tenantURL+\"\/tenants\", data, &result)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, tFound := result[\"name\"]\n\t\tif tFound {\n\t\t\tvar t tenant.Tenant\n\t\t\tdc := &ms.DecoderConfig{TagName: \"json\", Result: &t}\n\t\t\tdecoder, err := ms.NewDecoder(dc)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = decoder.Decode(result)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttenants = append(tenants, t)\n\t\t} else {\n\t\t\tvar h common.HttpError\n\t\t\tdc := &ms.DecoderConfig{TagName: \"json\", Result: &h}\n\t\t\tdecoder, err := ms.NewDecoder(dc)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = decoder.Decode(result)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif config.GetString(\"Format\") == \"json\" {\n\t\t\t\tstatus, _ := json.MarshalIndent(h, \"\", \"\\t\")\n\t\t\t\tfmt.Println(string(status))\n\t\t\t\treturn fmt.Errorf(\"HTTP Error\")\n\t\t\t}\n\t\t\treturn fmt.Errorf(h.Error())\n\t\t}\n\t}\n\n\tif config.GetString(\"Format\") == \"json\" {\n\t\tbody, err := json.MarshalIndent(tenants, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(string(body))\n\t} else {\n\t\tw := new(tabwriter.Writer)\n\t\tw.Init(os.Stdout, 0, 8, 0, '\\t', 0)\n\t\tfmt.Println(\"New Tenant(s) Added:\")\n\t\tfmt.Fprintln(w, \"Id\\t\",\n\t\t\t\"Tenant Name\\t\",\n\t\t\t\"External ID\\t\",\n\t\t)\n\t\tfor _, t := range tenants {\n\t\t\tfmt.Fprintf(w, \"%d \\t %s \\t %s \\t\", t.ID,\n\t\t\t\tt.Name, t.ExternalID)\n\t\t\tfmt.Fprintf(w, \"\\n\")\n\t\t}\n\t\tw.Flush()\n\t}\n\n\treturn nil\n}\n\n\/\/ tenantShow displays tenant details using tenant name\n\/\/ or tenant external id as input.\nfunc tenantShow(cmd *cli.Command, args []string) error {\n\tif len(args) < 1 {\n\t\treturn util.UsageError(cmd,\n\t\t\tfmt.Sprintf(\"expected at-least 1 argument, saw none\"))\n\t}\n\n\trootURL := config.GetString(\"RootURL\")\n\n\tclient, err := common.NewRestClient(common.GetDefaultRestClientConfig(rootURL))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttenantURL, err := client.GetServiceUrl(\"tenant\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdata := []tenant.Tenant{}\n\ttenants := []tenantData{}\n\terr = client.Get(tenantURL+\"\/tenants\", &data)\n\tif err != nil {\n\t\thttpErr, ok := err.(common.HttpError)\n\t\t\/\/ A 404 here means that no tenants have been defined, yet. That's not\n\t\t\/\/ an error.\n\t\tif ok && httpErr.StatusCode == http.StatusNotFound {\n\t\t\tfmt.Println(\"No tenants are currently defined\")\n\t\t}\n\t\treturn err\n\t}\n\n\tfor _, t := range data {\n\t\tfor _, n := range args {\n\t\t\tif t.Name == n || t.ExternalID == n {\n\t\t\t\tseg := []tenant.Segment{}\n\t\t\t\terr = client.Get(tenantURL+\"\/tenants\/\"+t.ExternalID+\"\/segments\", &seg)\n\t\t\t\tif err != nil {\n\t\t\t\t\thttpErr, ok := err.(common.HttpError)\n\t\t\t\t\t\/\/ A 404 just means that there are no segments defined yet.\n\t\t\t\t\t\/\/ That's not an error. That's why we only return an error\n\t\t\t\t\t\/\/ here if it's a non-HTTP error or the HTTP status code is\n\t\t\t\t\t\/\/ something besides 404.\n\t\t\t\t\tif !ok || httpErr.StatusCode != http.StatusNotFound {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttenants = append(tenants, tenantData{t, seg})\n\t\t\t}\n\t\t}\n\t}\n\n\tif config.GetString(\"Format\") == \"json\" {\n\t\tbody, err := json.MarshalIndent(tenants, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(string(body))\n\t} else {\n\t\tw := new(tabwriter.Writer)\n\t\tw.Init(os.Stdout, 0, 8, 0, '\\t', 0)\n\t\tfmt.Fprintln(w, \"ID\\t\",\n\t\t\t\"Tenant Name\\t\",\n\t\t\t\"External ID\\t\",\n\t\t\t\"Segments\", \"\\t\",\n\t\t)\n\t\tfor _, t := range tenants {\n\t\t\tfmt.Fprintf(w, \"%d \\t %s \\t %s \\t\", t.Tenant.ID,\n\t\t\t\tt.Tenant.Name, t.Tenant.ExternalID)\n\t\t\tif len(t.Segments) > 0 {\n\t\t\t\tfor _, s := range t.Segments {\n\t\t\t\t\tfmt.Fprintf(w, \"%s, \", s.Name)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(w, \"(no segments defined)\")\n\t\t\t}\n\t\t\tfmt.Fprintf(w, \"\\n\")\n\t\t}\n\t\tw.Flush()\n\t}\n\n\treturn nil\n}\n\n\/\/ tenantList displays tenant list in either json or\n\/\/ tablular format depending on commanf line flags or\n\/\/ config file options.\nfunc tenantList(cmd *cli.Command, args []string) error {\n\trootURL := config.GetString(\"RootURL\")\n\n\tclient, err := common.NewRestClient(common.GetDefaultRestClientConfig(rootURL))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttenantURL, err := client.GetServiceUrl(\"tenant\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttenants := []tenant.Tenant{}\n\terr = client.Get(tenantURL+\"\/tenants\", &tenants)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif config.GetString(\"Format\") == \"json\" {\n\t\tbody, err := json.MarshalIndent(tenants, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(string(body))\n\t} else {\n\t\tw := new(tabwriter.Writer)\n\t\tw.Init(os.Stdout, 0, 8, 0, '\\t', 0)\n\t\tfmt.Println(\"Tenant List\")\n\t\tfmt.Fprintln(w, \"Id\\t\",\n\t\t\t\"Tenant Name\\t\",\n\t\t\t\"External ID\\t\",\n\t\t)\n\t\tfor _, t := range tenants {\n\t\t\tfmt.Fprintln(w, t.ID, \"\\t\",\n\t\t\t\tt.Name, \"\\t\",\n\t\t\t\tt.ExternalID, \"\\t\",\n\t\t\t)\n\t\t}\n\t\tw.Flush()\n\t}\n\n\treturn nil\n}\n\n\/\/ tenantDelete takes tenant name as input for deleting a specific\n\/\/ romana tenant, the equivalent tenant for specific platform\n\/\/ still needs to be deleted manually until handled here via\n\/\/ adaptor.\nfunc tenantDelete(cmd *cli.Command, args []string) error {\n\tfmt.Println(\"Unimplemented: Delete a specific tenant.\")\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package components\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/index0h\/go-tracker\/dao\/uuid\"\n\t\"github.com\/index0h\/go-tracker\/entities\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/mock\"\n)\n\nfunc Test_VisitManager_Track_Empty(t *testing.T) {\n\trepository := new(mockVisitRepository)\n\tuuidProvider := uuid.New()\n\tlogger := log.New(os.Stdout, \"logger: \", log.Lshortfile)\n\n\tcheckManager := NewVisitManager(repository, uuidProvider, logger)\n\n\tvisit, error := checkManager.Track([16]byte{}, \"\", map[string]string{})\n\n\tassert.NotNil(t, visit)\n\tassert.NotNil(t, visit.VisitID())\n\tassert.NotNil(t, visit.SessionID())\n\tassert.Equal(t, \"\", visit.ClientID())\n\tassert.Empty(t, visit.Data())\n\tassert.Empty(t, visit.Warnings())\n\tassert.Nil(t, error)\n}\n\nfunc Test_VisitManager_Track_SessionID(t *testing.T) {\n\trepository := new(mockVisitRepository)\n\tuuidProvider := uuid.New()\n\tlogger := log.New(os.Stdout, \"logger: \", log.Lshortfile)\n\tsessionID := uuidProvider.Generate()\n\tclientID := \"client\"\n\n\trepository.On(\"FindClientID\", sessionID).Return(clientID, nil).Once()\n\n\tcheckManager := NewVisitManager(repository, uuidProvider, logger)\n\n\tvisit, error := checkManager.Track(sessionID, \"\", map[string]string{})\n\n\tassert.NotNil(t, visit)\n\tassert.NotNil(t, visit.VisitID())\n\tassert.Equal(t, sessionID, visit.SessionID())\n\tassert.Equal(t, clientID, visit.ClientID())\n\tassert.Empty(t, visit.Data())\n\tassert.Empty(t, visit.Warnings())\n\tassert.Nil(t, error)\n\n\trepository.AssertExpectations(t)\n}\n\nfunc Test_VisitManager_Track_ClientID(t *testing.T) {\n\trepository := new(mockVisitRepository)\n\tuuidProvider := uuid.New()\n\tlogger := log.New(os.Stdout, \"logger: \", log.Lshortfile)\n\tsessionID := uuidProvider.Generate()\n\tclientID := \"client\"\n\n\tcheckManager := NewVisitManager(repository, uuidProvider, logger)\n\n\tvisit, error := checkManager.Track([16]byte{}, clientID, map[string]string{})\n\n\tassert.NotNil(t, visit)\n\tassert.NotNil(t, visit.VisitID())\n\tassert.NotEqual(t, sessionID, visit.SessionID())\n\tassert.Equal(t, clientID, visit.ClientID())\n\tassert.Empty(t, visit.Data())\n\tassert.Empty(t, visit.Warnings())\n\tassert.Nil(t, error)\n}\n\nfunc Test_VisitManager_Track_VerifyTrue(t *testing.T) {\n\trepository := new(mockVisitRepository)\n\tuuidProvider := uuid.New()\n\tlogger := log.New(os.Stdout, \"logger: \", log.Lshortfile)\n\tsessionID := uuidProvider.Generate()\n\tclientID := \"client\"\n\n\trepository.On(\"Verify\", sessionID, clientID).Return(true, nil).Once()\n\n\tcheckManager := NewVisitManager(repository, uuidProvider, logger)\n\n\tvisit, error := checkManager.Track(sessionID, clientID, map[string]string{})\n\n\tassert.NotNil(t, visit)\n\tassert.NotNil(t, visit.VisitID())\n\tassert.Equal(t, sessionID, visit.SessionID())\n\tassert.Equal(t, clientID, visit.ClientID())\n\tassert.Empty(t, visit.Data())\n\tassert.Empty(t, visit.Warnings())\n\tassert.Nil(t, error)\n\n\trepository.AssertExpectations(t)\n}\n\nfunc Test_VisitManager_Track_VerifyFalse(t *testing.T) {\n\trepository := new(mockVisitRepository)\n\tuuidProvider := uuid.New()\n\tlogger := log.New(os.Stdout, \"logger: \", log.Lshortfile)\n\tsessionID := uuidProvider.Generate()\n\tclientID := \"client\"\n\n\trepository.On(\"Verify\", sessionID, clientID).Return(false, nil).Once()\n\n\tcheckManager := NewVisitManager(repository, uuidProvider, logger)\n\n\tvisit, error := checkManager.Track(sessionID, clientID, map[string]string{})\n\n\tassert.NotNil(t, visit)\n\tassert.NotNil(t, visit.VisitID())\n\tassert.NotEqual(t, sessionID, visit.SessionID())\n\tassert.Equal(t, clientID, visit.ClientID())\n\tassert.Empty(t, visit.Data())\n\tassert.NotEmpty(t, visit.Warnings())\n\tassert.Nil(t, error)\n\n\trepository.AssertExpectations(t)\n}\n\ntype mockVisitRepository struct {\n\tmock.Mock\n}\n\nfunc (repository *mockVisitRepository) FindClientID(sessionID [16]byte) (clientID string, err error) {\n\targs := repository.Called(sessionID)\n\n\treturn args.String(0), args.Error(1)\n}\n\nfunc (repository *mockVisitRepository) FindSessionID(clientID string) (sessionID [16]byte, err error) {\n\targs := repository.Called(clientID)\n\n\traw := args.Get(0)\n\tsessionID, _ = raw.([16]byte)\n\n\treturn sessionID, args.Error(1)\n}\n\nfunc (repository *mockVisitRepository) Verify(sessionID [16]byte, clientID string) (ok bool, err error) {\n\targs := repository.Called(sessionID, clientID)\n\n\treturn args.Bool(0), args.Error(1)\n}\n\nfunc (repository *mockVisitRepository) Insert(visit *entities.Visit) (err error) {\n\treturn nil\n}\n<commit_msg>small changes<commit_after>package components\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/index0h\/go-tracker\/dao\/uuid\"\n\t\"github.com\/index0h\/go-tracker\/entities\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/mock\"\n)\n\nfunc Test_VisitManager_Track_Empty(t *testing.T) {\n\trepository := new(mockVisitRepository)\n\tuuidProvider := uuid.New()\n\tlogger := log.New(os.Stdout, \"logger: \", log.Lshortfile)\n\n\tcheckManager := NewVisitManager(repository, uuidProvider, logger)\n\n\tvisit, err := checkManager.Track([16]byte{}, \"\", map[string]string{})\n\n\tassert.NotNil(t, visit)\n\tassert.NotNil(t, visit.VisitID())\n\tassert.NotNil(t, visit.SessionID())\n\tassert.Equal(t, \"\", visit.ClientID())\n\tassert.Empty(t, visit.Data())\n\tassert.Empty(t, visit.Warnings())\n\tassert.Nil(t, err)\n}\n\nfunc Test_VisitManager_Track_SessionID(t *testing.T) {\n\trepository := new(mockVisitRepository)\n\tuuidProvider := uuid.New()\n\tlogger := log.New(os.Stdout, \"logger: \", log.Lshortfile)\n\tsessionID := uuidProvider.Generate()\n\tclientID := \"client\"\n\n\trepository.On(\"FindClientID\", sessionID).Return(clientID, nil).Once()\n\n\tcheckManager := NewVisitManager(repository, uuidProvider, logger)\n\n\tvisit, err := checkManager.Track(sessionID, \"\", map[string]string{})\n\n\tassert.NotNil(t, visit)\n\tassert.NotNil(t, visit.VisitID())\n\tassert.Equal(t, sessionID, visit.SessionID())\n\tassert.Equal(t, clientID, visit.ClientID())\n\tassert.Empty(t, visit.Data())\n\tassert.Empty(t, visit.Warnings())\n\tassert.Nil(t, err)\n\n\trepository.AssertExpectations(t)\n}\n\nfunc Test_VisitManager_Track_ClientID(t *testing.T) {\n\trepository := new(mockVisitRepository)\n\tuuidProvider := uuid.New()\n\tlogger := log.New(os.Stdout, \"logger: \", log.Lshortfile)\n\tsessionID := uuidProvider.Generate()\n\tclientID := \"client\"\n\n\tcheckManager := NewVisitManager(repository, uuidProvider, logger)\n\n\tvisit, err := checkManager.Track([16]byte{}, clientID, map[string]string{})\n\n\tassert.NotNil(t, visit)\n\tassert.NotNil(t, visit.VisitID())\n\tassert.NotEqual(t, sessionID, visit.SessionID())\n\tassert.Equal(t, clientID, visit.ClientID())\n\tassert.Empty(t, visit.Data())\n\tassert.Empty(t, visit.Warnings())\n\tassert.Nil(t, err)\n}\n\nfunc Test_VisitManager_Track_VerifyTrue(t *testing.T) {\n\trepository := new(mockVisitRepository)\n\tuuidProvider := uuid.New()\n\tlogger := log.New(os.Stdout, \"logger: \", log.Lshortfile)\n\tsessionID := uuidProvider.Generate()\n\tclientID := \"client\"\n\n\trepository.On(\"Verify\", sessionID, clientID).Return(true, nil).Once()\n\n\tcheckManager := NewVisitManager(repository, uuidProvider, logger)\n\n\tvisit, err := checkManager.Track(sessionID, clientID, map[string]string{})\n\n\tassert.NotNil(t, visit)\n\tassert.NotNil(t, visit.VisitID())\n\tassert.Equal(t, sessionID, visit.SessionID())\n\tassert.Equal(t, clientID, visit.ClientID())\n\tassert.Empty(t, visit.Data())\n\tassert.Empty(t, visit.Warnings())\n\tassert.Nil(t, err)\n\n\trepository.AssertExpectations(t)\n}\n\nfunc Test_VisitManager_Track_VerifyFalse(t *testing.T) {\n\trepository := new(mockVisitRepository)\n\tuuidProvider := uuid.New()\n\tlogger := log.New(os.Stdout, \"logger: \", log.Lshortfile)\n\tsessionID := uuidProvider.Generate()\n\tclientID := \"client\"\n\n\trepository.On(\"Verify\", sessionID, clientID).Return(false, nil).Once()\n\n\tcheckManager := NewVisitManager(repository, uuidProvider, logger)\n\n\tvisit, err := checkManager.Track(sessionID, clientID, map[string]string{})\n\n\tassert.NotNil(t, visit)\n\tassert.NotNil(t, visit.VisitID())\n\tassert.NotEqual(t, sessionID, visit.SessionID())\n\tassert.Equal(t, clientID, visit.ClientID())\n\tassert.Empty(t, visit.Data())\n\tassert.NotEmpty(t, visit.Warnings())\n\tassert.Nil(t, err)\n\n\trepository.AssertExpectations(t)\n}\n\ntype mockVisitRepository struct {\n\tmock.Mock\n}\n\nfunc (repository *mockVisitRepository) FindClientID(sessionID [16]byte) (clientID string, err error) {\n\targs := repository.Called(sessionID)\n\n\treturn args.String(0), args.Error(1)\n}\n\nfunc (repository *mockVisitRepository) FindSessionID(clientID string) (sessionID [16]byte, err error) {\n\targs := repository.Called(clientID)\n\n\traw := args.Get(0)\n\tsessionID, _ = raw.([16]byte)\n\n\treturn sessionID, args.Error(1)\n}\n\nfunc (repository *mockVisitRepository) Verify(sessionID [16]byte, clientID string) (ok bool, err error) {\n\targs := repository.Called(sessionID, clientID)\n\n\treturn args.Bool(0), args.Error(1)\n}\n\nfunc (repository *mockVisitRepository) Insert(visit *entities.Visit) (err error) {\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package todolist\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/user\"\n)\n\ntype FileStore struct {\n\tFileLocation string\n\tLoaded bool\n}\n\nfunc NewFileStore() *FileStore {\n\treturn &FileStore{FileLocation: \"\", Loaded: false}\n}\n\nfunc (f *FileStore) Initialize() {\n\tif f.FileLocation == \"\" {\n\t\tf.FileLocation = \".todos.json\"\n\t}\n\n\t_, err := ioutil.ReadFile(f.FileLocation)\n\tif err == nil {\n\t\tfmt.Println(\"It looks like a .todos.json file already exists! Doing nothing.\")\n\t\tos.Exit(0)\n\t}\n\tif err := ioutil.WriteFile(f.FileLocation, []byte(\"[]\"), 0644); err != nil {\n\t\tfmt.Println(\"Error writing json file\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Println(\"Todo repo initialized.\")\n}\n\nfunc (f *FileStore) Load() ([]*Todo, error) {\n\tif f.FileLocation == \"\" {\n\t\tf.FileLocation = getLocation()\n\t}\n\n\tdata, err := ioutil.ReadFile(f.FileLocation)\n\tif err != nil {\n\t\tfmt.Println(\"No todo file found!\")\n\t\tfmt.Println(\"Initialize a new todo repo by running 'todo init'\")\n\t\tos.Exit(0)\n\t}\n\n\tvar todos []*Todo\n\tjerr := json.Unmarshal(data, &todos)\n\tif jerr != nil {\n\t\tfmt.Println(\"Error reading json data\", jerr)\n\t\tos.Exit(1)\n\t}\n\tf.Loaded = true\n\n\treturn todos, nil\n}\n\nfunc (f *FileStore) Save(todos []*Todo) {\n\tdata, _ := json.Marshal(todos)\n\tif err := ioutil.WriteFile(f.FileLocation, []byte(data), 0644); err != nil {\n\t\tfmt.Println(\"Error writing json file\", err)\n\t}\n}\n\nfunc getLocation() string {\n\tlocalrepo := \".todos.json\"\n\tusr, _ := user.Current()\n\thomerepo := fmt.Sprintf(\"%s\/.todos.json\", usr.HomeDir)\n\t_, ferr := os.Stat(localrepo)\n\n\tif ferr == nil {\n\t\treturn localrepo\n\t} else {\n\t\treturn homerepo\n\t}\n}\n<commit_msg>Put the return after the os.Exit<commit_after>package todolist\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/user\"\n)\n\ntype FileStore struct {\n\tFileLocation string\n\tLoaded bool\n}\n\nfunc NewFileStore() *FileStore {\n\treturn &FileStore{FileLocation: \"\", Loaded: false}\n}\n\nfunc (f *FileStore) Initialize() {\n\tif f.FileLocation == \"\" {\n\t\tf.FileLocation = \".todos.json\"\n\t}\n\n\t_, err := ioutil.ReadFile(f.FileLocation)\n\tif err == nil {\n\t\tfmt.Println(\"It looks like a .todos.json file already exists! Doing nothing.\")\n\t\tos.Exit(0)\n\t}\n\tif err := ioutil.WriteFile(f.FileLocation, []byte(\"[]\"), 0644); err != nil {\n\t\tfmt.Println(\"Error writing json file\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Println(\"Todo repo initialized.\")\n}\n\nfunc (f *FileStore) Load() ([]*Todo, error) {\n\tif f.FileLocation == \"\" {\n\t\tf.FileLocation = getLocation()\n\t}\n\n\tdata, err := ioutil.ReadFile(f.FileLocation)\n\tif err != nil {\n\t\tfmt.Println(\"No todo file found!\")\n\t\tfmt.Println(\"Initialize a new todo repo by running 'todo init'\")\n\t\tos.Exit(0)\n\t\treturn nil, err\n\t}\n\n\tvar todos []*Todo\n\tjerr := json.Unmarshal(data, &todos)\n\tif jerr != nil {\n\t\tfmt.Println(\"Error reading json data\", jerr)\n\t\tos.Exit(1)\n\t\treturn nil, jerr\n\t}\n\tf.Loaded = true\n\n\treturn todos, nil\n}\n\nfunc (f *FileStore) Save(todos []*Todo) {\n\tdata, _ := json.Marshal(todos)\n\tif err := ioutil.WriteFile(f.FileLocation, []byte(data), 0644); err != nil {\n\t\tfmt.Println(\"Error writing json file\", err)\n\t}\n}\n\nfunc getLocation() string {\n\tlocalrepo := \".todos.json\"\n\tusr, _ := user.Current()\n\thomerepo := fmt.Sprintf(\"%s\/.todos.json\", usr.HomeDir)\n\t_, ferr := os.Stat(localrepo)\n\n\tif ferr == nil {\n\t\treturn localrepo\n\t} else {\n\t\treturn homerepo\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage docker\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/tsuru\/config\"\n\t\"github.com\/tsuru\/tsuru\/action\"\n\t\"github.com\/tsuru\/tsuru\/app\"\n\t\"github.com\/tsuru\/tsuru\/cmd\"\n\t\"github.com\/tsuru\/tsuru\/db\"\n\t\"github.com\/tsuru\/tsuru\/db\/storage\"\n\t\"github.com\/tsuru\/tsuru\/exec\"\n\t\"github.com\/tsuru\/tsuru\/log\"\n\t\"github.com\/tsuru\/tsuru\/provision\"\n\t\"github.com\/tsuru\/tsuru\/queue\"\n\t\"github.com\/tsuru\/tsuru\/router\"\n\t_ \"github.com\/tsuru\/tsuru\/router\/hipache\"\n\t_ \"github.com\/tsuru\/tsuru\/router\/testing\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"sync\"\n\t\"time\"\n)\n\nfunc init() {\n\tprovision.Register(\"docker\", &dockerProvisioner{})\n}\n\nvar (\n\texecut exec.Executor\n\temutex sync.Mutex\n)\n\nfunc executor() exec.Executor {\n\temutex.Lock()\n\tdefer emutex.Unlock()\n\tif execut == nil {\n\t\texecut = exec.OsExecutor{}\n\t}\n\treturn execut\n}\n\nfunc getRouter() (router.Router, error) {\n\tr, err := config.GetString(\"docker:router\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn router.Get(r)\n}\n\ntype dockerProvisioner struct{}\n\n\/\/ Provision creates a route for the container\nfunc (p *dockerProvisioner) Provision(app provision.App) error {\n\tr, err := getRouter()\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to get router: %s\", err)\n\t\treturn err\n\t}\n\terr = app.Ready()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.AddBackend(app.GetName())\n}\n\nfunc (p *dockerProvisioner) Restart(app provision.App) error {\n\tcontainers, err := listContainersByApp(app.GetName())\n\tif err != nil {\n\t\tlog.Errorf(\"Got error while getting app containers: %s\", err)\n\t\treturn err\n\t}\n\tvar buf bytes.Buffer\n\tfor _, c := range containers {\n\t\terr = c.ssh(&buf, &buf, \"\/var\/lib\/tsuru\/restart\")\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to restart %q: %s.\", app.GetName(), err)\n\t\t\tlog.Debug(\"Command outputs:\")\n\t\t\tlog.Debugf(\"out: %s\", &buf)\n\t\t\tlog.Debugf(\"err: %s\", &buf)\n\t\t\treturn err\n\t\t}\n\t\tbuf.Reset()\n\t}\n\treturn nil\n}\n\nfunc (*dockerProvisioner) Start(app provision.App) error {\n\tcontainers, err := listContainersByApp(app.GetName())\n\tif err != nil {\n\t\treturn errors.New(fmt.Sprintf(\"Got error while getting app containers: %s\", err))\n\t}\n\tfor _, c := range containers {\n\t\terr := c.start()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (p *dockerProvisioner) Stop(app provision.App) error {\n\tcontainers, err := listContainersByApp(app.GetName())\n\tif err != nil {\n\t\tlog.Errorf(\"Got error while getting app containers: %s\", err)\n\t\treturn nil\n\t}\n\tfor _, c := range containers {\n\t\terr := c.stop()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to stop %q: %s\", app.GetName(), err)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc injectEnvsAndRestart(a provision.App) {\n\ttime.Sleep(5e9)\n\terr := a.SerializeEnvVars()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to serialize env vars: %s.\", err)\n\t}\n\tvar buf bytes.Buffer\n\tw := app.LogWriter{App: a, Writer: &buf}\n\terr = a.Restart(&w)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to restart app %q (%s): %s.\", a.GetName(), err, buf.String())\n\t}\n}\n\nfunc startInBackground(a provision.App, c container, imageId string, w io.Writer, started chan bool) {\n\t_, err := start(a, imageId, w)\n\tif err != nil {\n\t\tlog.Errorf(\"error on start the app %s - %s\", a.GetName(), err)\n\t\tstarted <- false\n\t\treturn\n\t}\n\tif c.ID != \"\" {\n\t\tif a.RemoveUnit(c.ID) != nil {\n\t\t\tremoveContainer(&c)\n\t\t}\n\t}\n\tstarted <- true\n}\n\nfunc (dockerProvisioner) Swap(app1, app2 provision.App) error {\n\tr, err := getRouter()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.Swap(app1.GetName(), app2.GetName())\n}\n\nfunc (p *dockerProvisioner) Deploy(a provision.App, version string, w io.Writer) error {\n\timageId, err := deploy(a, version, w)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcontainers, err := listContainersByApp(a.GetName())\n\tstarted := make(chan bool, len(containers))\n\tif err == nil && len(containers) > 0 {\n\t\tfor _, c := range containers {\n\t\t\tgo startInBackground(a, c, imageId, w, started)\n\t\t}\n\t} else {\n\t\tgo startInBackground(a, container{}, imageId, w, started)\n\t}\n\tif <-started {\n\t\tfmt.Fprint(w, \"\\n ---> App will be restarted, please check its logs for more details...\\n\\n\")\n\t} else {\n\t\tfmt.Fprint(w, \"\\n ---> App failed to start, please check its logs for more details...\\n\\n\")\n\t}\n\treturn nil\n}\n\nfunc (p *dockerProvisioner) Destroy(app provision.App) error {\n\tcontainers, _ := listContainersByApp(app.GetName())\n\tgo func(c []container) {\n\t\tvar containersGroup sync.WaitGroup\n\t\tcontainersGroup.Add(len(containers))\n\t\tfor _, c := range containers {\n\t\t\tgo func(c container) {\n\t\t\t\tdefer containersGroup.Done()\n\t\t\t\terr := removeContainer(&c)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err.Error())\n\t\t\t\t}\n\t\t\t}(c)\n\t\t}\n\t\tcontainersGroup.Wait()\n\t\terr := removeImage(assembleImageName(app.GetName()))\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t}\n\t}(containers)\n\tr, err := getRouter()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get router: %s\", err)\n\t\treturn err\n\t}\n\treturn r.RemoveBackend(app.GetName())\n}\n\nfunc (*dockerProvisioner) Addr(app provision.App) (string, error) {\n\tr, err := getRouter()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get router: %s\", err)\n\t\treturn \"\", err\n\t}\n\taddr, err := r.Addr(app.GetName())\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to obtain app %s address: %s\", app.GetName(), err)\n\t\treturn \"\", err\n\t}\n\treturn addr, nil\n}\n\nfunc addUnitsWithHost(a provision.App, units uint, destinationHost ...string) ([]provision.Unit, error) {\n\tif units == 0 {\n\t\treturn nil, errors.New(\"Cannot add 0 units\")\n\t}\n\tlength, err := getContainerCountForAppName(a.GetName())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif length < 1 {\n\t\treturn nil, errors.New(\"New units can only be added after the first deployment\")\n\t}\n\twriter := app.LogWriter{App: a, Writer: ioutil.Discard}\n\tresult := make([]provision.Unit, int(units))\n\tcontainer, err := getOneContainerByAppName(a.GetName())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\timageId := container.Image\n\tfor i := uint(0); i < units; i++ {\n\t\tcontainer, err := start(a, imageId, &writer, destinationHost...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresult[i] = provision.Unit{\n\t\t\tName: container.ID,\n\t\t\tAppName: a.GetName(),\n\t\t\tType: a.GetPlatform(),\n\t\t\tIp: container.HostAddr,\n\t\t\tStatus: provision.StatusBuilding,\n\t\t}\n\t}\n\treturn result, nil\n}\n\nfunc (*dockerProvisioner) AddUnits(a provision.App, units uint) ([]provision.Unit, error) {\n\treturn addUnitsWithHost(a, units)\n}\n\nfunc (*dockerProvisioner) RemoveUnit(a provision.App, unitName string) error {\n\tcontainer, err := getContainer(unitName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif container.AppName != a.GetName() {\n\t\treturn errors.New(\"Unit does not belong to this app\")\n\t}\n\tif err := removeContainer(container); err != nil {\n\t\treturn err\n\t}\n\treturn rebindWhenNeed(a.GetName(), container)\n}\n\n\/\/ rebindWhenNeed rebinds a unit to the app's services when it finds\n\/\/ that the unit being removed has the same host that any\n\/\/ of the units that still being used\nfunc rebindWhenNeed(appName string, container *container) error {\n\tcontainers, err := listContainersByApp(appName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, c := range containers {\n\t\tif c.HostAddr == container.HostAddr && c.ID != container.ID {\n\t\t\tmsg := queue.Message{Action: app.BindService, Args: []string{appName, c.ID}}\n\t\t\tgo app.Enqueue(msg)\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc removeContainer(c *container) error {\n\terr := c.stop()\n\tif err != nil {\n\t\tlog.Errorf(\"error on stop unit %s - %s\", c.ID, err)\n\t}\n\terr = c.remove()\n\tif err != nil {\n\t\tlog.Errorf(\"error on remove container %s - %s\", c.ID, err)\n\t}\n\treturn err\n}\n\nfunc (*dockerProvisioner) InstallDeps(app provision.App, w io.Writer) error {\n\treturn nil\n}\n\nfunc (*dockerProvisioner) ExecuteCommandOnce(stdout, stderr io.Writer, app provision.App, cmd string, args ...string) error {\n\tcontainers, err := listContainersByApp(app.GetName())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(containers) == 0 {\n\t\treturn errors.New(\"No containers for this app\")\n\t}\n\tcontainer := containers[0]\n\treturn container.ssh(stdout, stderr, cmd, args...)\n}\n\nfunc (*dockerProvisioner) ExecuteCommand(stdout, stderr io.Writer, app provision.App, cmd string, args ...string) error {\n\tcontainers, err := listContainersByApp(app.GetName())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(containers) == 0 {\n\t\treturn errors.New(\"No containers for this app\")\n\t}\n\tfor _, c := range containers {\n\t\terr = c.ssh(stdout, stderr, cmd, args...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (p *dockerProvisioner) SetCName(app provision.App, cname string) error {\n\tr, err := getRouter()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.SetCName(cname, app.GetName())\n}\n\nfunc (p *dockerProvisioner) UnsetCName(app provision.App, cname string) error {\n\tr, err := getRouter()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.UnsetCName(cname, app.GetName())\n}\n\nfunc (p *dockerProvisioner) Commands() []cmd.Command {\n\treturn []cmd.Command{\n\t\taddNodeToSchedulerCmd{},\n\t\tremoveNodeFromSchedulerCmd{},\n\t\tlistNodesInTheSchedulerCmd{},\n\t\t&sshAgentCmd{},\n\t}\n}\n\nfunc (p *dockerProvisioner) AdminCommands() []cmd.Command {\n\treturn []cmd.Command{\n\t\t&moveContainerCmd{},\n\t\t&moveContainersCmd{},\n\t\t&rebalanceContainersCmd{},\n\t}\n}\n\nfunc collection() *storage.Collection {\n\tname, err := config.GetString(\"docker:collection\")\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\tconn, err := db.Conn()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to connect to the database: %s\", err)\n\t}\n\treturn conn.Collection(name)\n}\n\nfunc (p *dockerProvisioner) DeployPipeline() *action.Pipeline {\n\tactions := []*action.Action{\n\t\t&app.ProvisionerDeploy,\n\t\t&app.IncrementDeploy,\n\t\t&saveUnits,\n\t\t&injectEnvirons,\n\t\t&bindService,\n\t}\n\tpipeline := action.NewPipeline(actions...)\n\treturn pipeline\n}\n\n\/\/ PlatformAdd build and push a new docker platform to register\nfunc (p *dockerProvisioner) PlatformAdd(name string, args map[string]string, w io.Writer) error {\n\tif args[\"dockerfile\"] == \"\" {\n\t\treturn errors.New(\"Dockerfile is required.\")\n\t}\n\tif _, err := url.ParseRequestURI(args[\"dockerfile\"]); err != nil {\n\t\treturn errors.New(\"dockerfile parameter should be an url.\")\n\t}\n\timageName := assembleImageName(name)\n\tdockerCluster := dockerCluster()\n\tbuildOptions := docker.BuildImageOptions{\n\t\tName: imageName,\n\t\tNoCache: true,\n\t\tRmTmpContainer: true,\n\t\tRemote: args[\"dockerfile\"],\n\t\tInputStream: nil,\n\t\tOutputStream: w,\n\t}\n\terr := dockerCluster.BuildImage(buildOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn pushImage(imageName)\n}\n\nfunc (p *dockerProvisioner) PlatformUpdate(name string, args map[string]string, w io.Writer) error {\n return nil\n}\n<commit_msg>PlatformUpdate calling PlatformAdd in docker provisioner<commit_after>\/\/ Copyright 2014 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage docker\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/tsuru\/config\"\n\t\"github.com\/tsuru\/tsuru\/action\"\n\t\"github.com\/tsuru\/tsuru\/app\"\n\t\"github.com\/tsuru\/tsuru\/cmd\"\n\t\"github.com\/tsuru\/tsuru\/db\"\n\t\"github.com\/tsuru\/tsuru\/db\/storage\"\n\t\"github.com\/tsuru\/tsuru\/exec\"\n\t\"github.com\/tsuru\/tsuru\/log\"\n\t\"github.com\/tsuru\/tsuru\/provision\"\n\t\"github.com\/tsuru\/tsuru\/queue\"\n\t\"github.com\/tsuru\/tsuru\/router\"\n\t_ \"github.com\/tsuru\/tsuru\/router\/hipache\"\n\t_ \"github.com\/tsuru\/tsuru\/router\/testing\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"sync\"\n\t\"time\"\n)\n\nfunc init() {\n\tprovision.Register(\"docker\", &dockerProvisioner{})\n}\n\nvar (\n\texecut exec.Executor\n\temutex sync.Mutex\n)\n\nfunc executor() exec.Executor {\n\temutex.Lock()\n\tdefer emutex.Unlock()\n\tif execut == nil {\n\t\texecut = exec.OsExecutor{}\n\t}\n\treturn execut\n}\n\nfunc getRouter() (router.Router, error) {\n\tr, err := config.GetString(\"docker:router\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn router.Get(r)\n}\n\ntype dockerProvisioner struct{}\n\n\/\/ Provision creates a route for the container\nfunc (p *dockerProvisioner) Provision(app provision.App) error {\n\tr, err := getRouter()\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to get router: %s\", err)\n\t\treturn err\n\t}\n\terr = app.Ready()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.AddBackend(app.GetName())\n}\n\nfunc (p *dockerProvisioner) Restart(app provision.App) error {\n\tcontainers, err := listContainersByApp(app.GetName())\n\tif err != nil {\n\t\tlog.Errorf(\"Got error while getting app containers: %s\", err)\n\t\treturn err\n\t}\n\tvar buf bytes.Buffer\n\tfor _, c := range containers {\n\t\terr = c.ssh(&buf, &buf, \"\/var\/lib\/tsuru\/restart\")\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to restart %q: %s.\", app.GetName(), err)\n\t\t\tlog.Debug(\"Command outputs:\")\n\t\t\tlog.Debugf(\"out: %s\", &buf)\n\t\t\tlog.Debugf(\"err: %s\", &buf)\n\t\t\treturn err\n\t\t}\n\t\tbuf.Reset()\n\t}\n\treturn nil\n}\n\nfunc (*dockerProvisioner) Start(app provision.App) error {\n\tcontainers, err := listContainersByApp(app.GetName())\n\tif err != nil {\n\t\treturn errors.New(fmt.Sprintf(\"Got error while getting app containers: %s\", err))\n\t}\n\tfor _, c := range containers {\n\t\terr := c.start()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (p *dockerProvisioner) Stop(app provision.App) error {\n\tcontainers, err := listContainersByApp(app.GetName())\n\tif err != nil {\n\t\tlog.Errorf(\"Got error while getting app containers: %s\", err)\n\t\treturn nil\n\t}\n\tfor _, c := range containers {\n\t\terr := c.stop()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to stop %q: %s\", app.GetName(), err)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc injectEnvsAndRestart(a provision.App) {\n\ttime.Sleep(5e9)\n\terr := a.SerializeEnvVars()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to serialize env vars: %s.\", err)\n\t}\n\tvar buf bytes.Buffer\n\tw := app.LogWriter{App: a, Writer: &buf}\n\terr = a.Restart(&w)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to restart app %q (%s): %s.\", a.GetName(), err, buf.String())\n\t}\n}\n\nfunc startInBackground(a provision.App, c container, imageId string, w io.Writer, started chan bool) {\n\t_, err := start(a, imageId, w)\n\tif err != nil {\n\t\tlog.Errorf(\"error on start the app %s - %s\", a.GetName(), err)\n\t\tstarted <- false\n\t\treturn\n\t}\n\tif c.ID != \"\" {\n\t\tif a.RemoveUnit(c.ID) != nil {\n\t\t\tremoveContainer(&c)\n\t\t}\n\t}\n\tstarted <- true\n}\n\nfunc (dockerProvisioner) Swap(app1, app2 provision.App) error {\n\tr, err := getRouter()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.Swap(app1.GetName(), app2.GetName())\n}\n\nfunc (p *dockerProvisioner) Deploy(a provision.App, version string, w io.Writer) error {\n\timageId, err := deploy(a, version, w)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcontainers, err := listContainersByApp(a.GetName())\n\tstarted := make(chan bool, len(containers))\n\tif err == nil && len(containers) > 0 {\n\t\tfor _, c := range containers {\n\t\t\tgo startInBackground(a, c, imageId, w, started)\n\t\t}\n\t} else {\n\t\tgo startInBackground(a, container{}, imageId, w, started)\n\t}\n\tif <-started {\n\t\tfmt.Fprint(w, \"\\n ---> App will be restarted, please check its logs for more details...\\n\\n\")\n\t} else {\n\t\tfmt.Fprint(w, \"\\n ---> App failed to start, please check its logs for more details...\\n\\n\")\n\t}\n\treturn nil\n}\n\nfunc (p *dockerProvisioner) Destroy(app provision.App) error {\n\tcontainers, _ := listContainersByApp(app.GetName())\n\tgo func(c []container) {\n\t\tvar containersGroup sync.WaitGroup\n\t\tcontainersGroup.Add(len(containers))\n\t\tfor _, c := range containers {\n\t\t\tgo func(c container) {\n\t\t\t\tdefer containersGroup.Done()\n\t\t\t\terr := removeContainer(&c)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err.Error())\n\t\t\t\t}\n\t\t\t}(c)\n\t\t}\n\t\tcontainersGroup.Wait()\n\t\terr := removeImage(assembleImageName(app.GetName()))\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t}\n\t}(containers)\n\tr, err := getRouter()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get router: %s\", err)\n\t\treturn err\n\t}\n\treturn r.RemoveBackend(app.GetName())\n}\n\nfunc (*dockerProvisioner) Addr(app provision.App) (string, error) {\n\tr, err := getRouter()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get router: %s\", err)\n\t\treturn \"\", err\n\t}\n\taddr, err := r.Addr(app.GetName())\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to obtain app %s address: %s\", app.GetName(), err)\n\t\treturn \"\", err\n\t}\n\treturn addr, nil\n}\n\nfunc addUnitsWithHost(a provision.App, units uint, destinationHost ...string) ([]provision.Unit, error) {\n\tif units == 0 {\n\t\treturn nil, errors.New(\"Cannot add 0 units\")\n\t}\n\tlength, err := getContainerCountForAppName(a.GetName())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif length < 1 {\n\t\treturn nil, errors.New(\"New units can only be added after the first deployment\")\n\t}\n\twriter := app.LogWriter{App: a, Writer: ioutil.Discard}\n\tresult := make([]provision.Unit, int(units))\n\tcontainer, err := getOneContainerByAppName(a.GetName())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\timageId := container.Image\n\tfor i := uint(0); i < units; i++ {\n\t\tcontainer, err := start(a, imageId, &writer, destinationHost...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresult[i] = provision.Unit{\n\t\t\tName: container.ID,\n\t\t\tAppName: a.GetName(),\n\t\t\tType: a.GetPlatform(),\n\t\t\tIp: container.HostAddr,\n\t\t\tStatus: provision.StatusBuilding,\n\t\t}\n\t}\n\treturn result, nil\n}\n\nfunc (*dockerProvisioner) AddUnits(a provision.App, units uint) ([]provision.Unit, error) {\n\treturn addUnitsWithHost(a, units)\n}\n\nfunc (*dockerProvisioner) RemoveUnit(a provision.App, unitName string) error {\n\tcontainer, err := getContainer(unitName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif container.AppName != a.GetName() {\n\t\treturn errors.New(\"Unit does not belong to this app\")\n\t}\n\tif err := removeContainer(container); err != nil {\n\t\treturn err\n\t}\n\treturn rebindWhenNeed(a.GetName(), container)\n}\n\n\/\/ rebindWhenNeed rebinds a unit to the app's services when it finds\n\/\/ that the unit being removed has the same host that any\n\/\/ of the units that still being used\nfunc rebindWhenNeed(appName string, container *container) error {\n\tcontainers, err := listContainersByApp(appName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, c := range containers {\n\t\tif c.HostAddr == container.HostAddr && c.ID != container.ID {\n\t\t\tmsg := queue.Message{Action: app.BindService, Args: []string{appName, c.ID}}\n\t\t\tgo app.Enqueue(msg)\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc removeContainer(c *container) error {\n\terr := c.stop()\n\tif err != nil {\n\t\tlog.Errorf(\"error on stop unit %s - %s\", c.ID, err)\n\t}\n\terr = c.remove()\n\tif err != nil {\n\t\tlog.Errorf(\"error on remove container %s - %s\", c.ID, err)\n\t}\n\treturn err\n}\n\nfunc (*dockerProvisioner) InstallDeps(app provision.App, w io.Writer) error {\n\treturn nil\n}\n\nfunc (*dockerProvisioner) ExecuteCommandOnce(stdout, stderr io.Writer, app provision.App, cmd string, args ...string) error {\n\tcontainers, err := listContainersByApp(app.GetName())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(containers) == 0 {\n\t\treturn errors.New(\"No containers for this app\")\n\t}\n\tcontainer := containers[0]\n\treturn container.ssh(stdout, stderr, cmd, args...)\n}\n\nfunc (*dockerProvisioner) ExecuteCommand(stdout, stderr io.Writer, app provision.App, cmd string, args ...string) error {\n\tcontainers, err := listContainersByApp(app.GetName())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(containers) == 0 {\n\t\treturn errors.New(\"No containers for this app\")\n\t}\n\tfor _, c := range containers {\n\t\terr = c.ssh(stdout, stderr, cmd, args...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (p *dockerProvisioner) SetCName(app provision.App, cname string) error {\n\tr, err := getRouter()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.SetCName(cname, app.GetName())\n}\n\nfunc (p *dockerProvisioner) UnsetCName(app provision.App, cname string) error {\n\tr, err := getRouter()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.UnsetCName(cname, app.GetName())\n}\n\nfunc (p *dockerProvisioner) Commands() []cmd.Command {\n\treturn []cmd.Command{\n\t\taddNodeToSchedulerCmd{},\n\t\tremoveNodeFromSchedulerCmd{},\n\t\tlistNodesInTheSchedulerCmd{},\n\t\t&sshAgentCmd{},\n\t}\n}\n\nfunc (p *dockerProvisioner) AdminCommands() []cmd.Command {\n\treturn []cmd.Command{\n\t\t&moveContainerCmd{},\n\t\t&moveContainersCmd{},\n\t\t&rebalanceContainersCmd{},\n\t}\n}\n\nfunc collection() *storage.Collection {\n\tname, err := config.GetString(\"docker:collection\")\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\tconn, err := db.Conn()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to connect to the database: %s\", err)\n\t}\n\treturn conn.Collection(name)\n}\n\nfunc (p *dockerProvisioner) DeployPipeline() *action.Pipeline {\n\tactions := []*action.Action{\n\t\t&app.ProvisionerDeploy,\n\t\t&app.IncrementDeploy,\n\t\t&saveUnits,\n\t\t&injectEnvirons,\n\t\t&bindService,\n\t}\n\tpipeline := action.NewPipeline(actions...)\n\treturn pipeline\n}\n\n\/\/ PlatformAdd build and push a new docker platform to register\nfunc (p *dockerProvisioner) PlatformAdd(name string, args map[string]string, w io.Writer) error {\n\tif args[\"dockerfile\"] == \"\" {\n\t\treturn errors.New(\"Dockerfile is required.\")\n\t}\n\tif _, err := url.ParseRequestURI(args[\"dockerfile\"]); err != nil {\n\t\treturn errors.New(\"dockerfile parameter should be an url.\")\n\t}\n\timageName := assembleImageName(name)\n\tdockerCluster := dockerCluster()\n\tbuildOptions := docker.BuildImageOptions{\n\t\tName: imageName,\n\t\tNoCache: true,\n\t\tRmTmpContainer: true,\n\t\tRemote: args[\"dockerfile\"],\n\t\tInputStream: nil,\n\t\tOutputStream: w,\n\t}\n\terr := dockerCluster.BuildImage(buildOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn pushImage(imageName)\n}\n\nfunc (p *dockerProvisioner) PlatformUpdate(name string, args map[string]string, w io.Writer) error {\n return p.PlatformAdd(name, args, w)\n}\n<|endoftext|>"} {"text":"<commit_before>package assert\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n)\n\nfunc TestEqual(t *testing.T) {\n\tmockT := new(testing.T)\n\tbuffer := &bytes.Buffer{}\n\tassert := &Assertion{&logFacade{mockT, &loggerImpl{writer: buffer}}}\n\n\tif !assert.Equal(\"String\", \"String\") {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif assert.Equal(\"String\", \"Not String\") {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif !assert.Equal(nil, nil) {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif assert.Equal(nil, \"String\") {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif !assert.Equal([]byte(\"String\"), []byte(\"String\")) {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif assert.Equal([]byte(\"String\"), []byte(\"Not String\")) {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif !assert.Equal(10, 10) {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif assert.Equal(10, 20) {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif !assert.Equal(10.5, 10.5) {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif assert.Equal(10.5, 20.5) {\n\t\tt.Error(\"Equal must return true\")\n\t}\n}\n\nfunc TestGetAssertion(t *testing.T) {\n\tmockT := new(testing.T)\n\tassert := GetAssertion(mockT)\n\tif assert == nil {\n\t\tt.Error(\"GetAssertion must not nil\")\n\t}\n}\n<commit_msg>test panic case when call Fail func (panic from local skip is more than expected)<commit_after>package assert\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n)\n\nfunc TestEqual(t *testing.T) {\n\tmockT := new(testing.T)\n\tbuffer := &bytes.Buffer{}\n\tassert := &Assertion{&logFacade{mockT, &loggerImpl{writer: buffer}}}\n\n\tif !assert.Equal(\"String\", \"String\") {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif assert.Equal(\"String\", \"Not String\") {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif !assert.Equal(nil, nil) {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif assert.Equal(nil, \"String\") {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif !assert.Equal([]byte(\"String\"), []byte(\"String\")) {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif assert.Equal([]byte(\"String\"), []byte(\"Not String\")) {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif !assert.Equal(10, 10) {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif assert.Equal(10, 20) {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif !assert.Equal(10.5, 10.5) {\n\t\tt.Error(\"Equal must return true\")\n\t}\n\tif assert.Equal(10.5, 20.5) {\n\t\tt.Error(\"Equal must return true\")\n\t}\n}\n\nfunc TestGetAssertion(t *testing.T) {\n\tmockT := new(testing.T)\n\tassert := GetAssertion(mockT)\n\tif assert == nil {\n\t\tt.Error(\"GetAssertion must not nil\")\n\t}\n}\n\nfunc TestFailIsPanic(t *testing.T) {\n\tdefer func() {\n\t\tif r := recover(); r == nil {\n\t\t\tt.Errorf(\"The code did not panic\")\n\t\t}\n\t}()\n\tmockT := new(testing.T)\n\tbuffer := &bytes.Buffer{}\n\tFail(&logFacade{mockT, &loggerImpl{writer: buffer}}, \"message\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage completion\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\tcmdutil \"k8s.io\/kubectl\/pkg\/cmd\/util\"\n\t\"k8s.io\/kubectl\/pkg\/util\/i18n\"\n\t\"k8s.io\/kubectl\/pkg\/util\/templates\"\n)\n\nconst defaultBoilerPlate = `\n# Copyright 2016 The Kubernetes Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n`\n\nvar (\n\tcompletionLong = templates.LongDesc(i18n.T(`\n\t\tOutput shell completion code for the specified shell (bash or zsh).\n\t\tThe shell code must be evaluated to provide interactive\n\t\tcompletion of kubectl commands. This can be done by sourcing it from\n\t\tthe .bash_profile.\n\n\t\tDetailed instructions on how to do this are available here:\n\n for macOS:\n https:\/\/kubernetes.io\/docs\/tasks\/tools\/install-kubectl-macos\/#enable-shell-autocompletion\n\n for linux:\n https:\/\/kubernetes.io\/docs\/tasks\/tools\/install-kubectl-linux\/#enable-shell-autocompletion\n\n for windows:\n https:\/\/kubernetes.io\/docs\/tasks\/tools\/install-kubectl-windows\/#enable-shell-autocompletion\n\n\t\tNote for zsh users: [1] zsh completions are only supported in versions of zsh >= 5.2`))\n\n\tcompletionExample = templates.Examples(i18n.T(`\n\t\t# Installing bash completion on macOS using homebrew\n\t\t## If running Bash 3.2 included with macOS\n\t\t brew install bash-completion\n\t\t## or, if running Bash 4.1+\n\t\t brew install bash-completion@2\n\t\t## If kubectl is installed via homebrew, this should start working immediately.\n\t\t## If you've installed via other means, you may need add the completion to your completion directory\n\t\t kubectl completion bash > $(brew --prefix)\/etc\/bash_completion.d\/kubectl\n\n\n\t\t# Installing bash completion on Linux\n\t\t## If bash-completion is not installed on Linux, please install the 'bash-completion' package\n\t\t## via your distribution's package manager.\n\t\t## Load the kubectl completion code for bash into the current shell\n\t\t source <(kubectl completion bash)\n\t\t## Write bash completion code to a file and source it from .bash_profile\n\t\t kubectl completion bash > ~\/.kube\/completion.bash.inc\n\t\t printf \"\n\t\t # Kubectl shell completion\n\t\t source '$HOME\/.kube\/completion.bash.inc'\n\t\t \" >> $HOME\/.bash_profile\n\t\t source $HOME\/.bash_profile\n\n\t\t# Load the kubectl completion code for zsh[1] into the current shell\n\t\t source <(kubectl completion zsh)\n\t\t# Set the kubectl completion code for zsh[1] to autoload on startup\n\t\t kubectl completion zsh > \"${fpath[1]}\/_kubectl\"`))\n)\n\nvar (\n\tcompletionShells = map[string]func(out io.Writer, boilerPlate string, cmd *cobra.Command) error{\n\t\t\"bash\": runCompletionBash,\n\t\t\"zsh\": runCompletionZsh,\n\t}\n)\n\n\/\/ NewCmdCompletion creates the `completion` command\nfunc NewCmdCompletion(out io.Writer, boilerPlate string) *cobra.Command {\n\tshells := []string{}\n\tfor s := range completionShells {\n\t\tshells = append(shells, s)\n\t}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"completion SHELL\",\n\t\tDisableFlagsInUseLine: true,\n\t\tShort: i18n.T(\"Output shell completion code for the specified shell (bash or zsh)\"),\n\t\tLong: completionLong,\n\t\tExample: completionExample,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tcmdutil.CheckErr(RunCompletion(out, boilerPlate, cmd, args))\n\t\t},\n\t\tValidArgs: shells,\n\t}\n\n\treturn cmd\n}\n\n\/\/ RunCompletion checks given arguments and executes command\nfunc RunCompletion(out io.Writer, boilerPlate string, cmd *cobra.Command, args []string) error {\n\tif len(args) == 0 {\n\t\treturn cmdutil.UsageErrorf(cmd, \"Shell not specified.\")\n\t}\n\tif len(args) > 1 {\n\t\treturn cmdutil.UsageErrorf(cmd, \"Too many arguments. Expected only the shell type.\")\n\t}\n\trun, found := completionShells[args[0]]\n\tif !found {\n\t\treturn cmdutil.UsageErrorf(cmd, \"Unsupported shell type %q.\", args[0])\n\t}\n\n\treturn run(out, boilerPlate, cmd.Parent())\n}\n\nfunc runCompletionBash(out io.Writer, boilerPlate string, kubectl *cobra.Command) error {\n\tif len(boilerPlate) == 0 {\n\t\tboilerPlate = defaultBoilerPlate\n\t}\n\tif _, err := out.Write([]byte(boilerPlate)); err != nil {\n\t\treturn err\n\t}\n\n\treturn kubectl.GenBashCompletion(out)\n}\n\nfunc runCompletionZsh(out io.Writer, boilerPlate string, kubectl *cobra.Command) error {\n\tzshHead := \"#compdef kubectl\\n\"\n\n\tout.Write([]byte(zshHead))\n\n\tif len(boilerPlate) == 0 {\n\t\tboilerPlate = defaultBoilerPlate\n\t}\n\tif _, err := out.Write([]byte(boilerPlate)); err != nil {\n\t\treturn err\n\t}\n\n\tzshInitialization := `\n__kubectl_bash_source() {\n\talias shopt=':'\n\temulate -L sh\n\tsetopt kshglob noshglob braceexpand\n\n\tsource \"$@\"\n}\n\n__kubectl_type() {\n\t# -t is not supported by zsh\n\tif [ \"$1\" == \"-t\" ]; then\n\t\tshift\n\n\t\t# fake Bash 4 to disable \"complete -o nospace\". Instead\n\t\t# \"compopt +-o nospace\" is used in the code to toggle trailing\n\t\t# spaces. We don't support that, but leave trailing spaces on\n\t\t# all the time\n\t\tif [ \"$1\" = \"__kubectl_compopt\" ]; then\n\t\t\techo builtin\n\t\t\treturn 0\n\t\tfi\n\tfi\n\ttype \"$@\"\n}\n\n__kubectl_compgen() {\n\tlocal completions w\n\tcompletions=( $(compgen \"$@\") ) || return $?\n\n\t# filter by given word as prefix\n\twhile [[ \"$1\" = -* && \"$1\" != -- ]]; do\n\t\tshift\n\t\tshift\n\tdone\n\tif [[ \"$1\" == -- ]]; then\n\t\tshift\n\tfi\n\tfor w in \"${completions[@]}\"; do\n\t\tif [[ \"${w}\" = \"$1\"* ]]; then\n\t\t\techo \"${w}\"\n\t\tfi\n\tdone\n}\n\n__kubectl_compopt() {\n\ttrue # don't do anything. Not supported by bashcompinit in zsh\n}\n\n__kubectl_ltrim_colon_completions()\n{\n\tif [[ \"$1\" == *:* && \"$COMP_WORDBREAKS\" == *:* ]]; then\n\t\t# Remove colon-word prefix from COMPREPLY items\n\t\tlocal colon_word=${1%${1##*:}}\n\t\tlocal i=${#COMPREPLY[*]}\n\t\twhile [[ $((--i)) -ge 0 ]]; do\n\t\t\tCOMPREPLY[$i]=${COMPREPLY[$i]#\"$colon_word\"}\n\t\tdone\n\tfi\n}\n\n__kubectl_get_comp_words_by_ref() {\n\tcur=\"${COMP_WORDS[COMP_CWORD]}\"\n\tprev=\"${COMP_WORDS[${COMP_CWORD}-1]}\"\n\twords=(\"${COMP_WORDS[@]}\")\n\tcword=(\"${COMP_CWORD[@]}\")\n}\n\n__kubectl_filedir() {\n\t# Don't need to do anything here.\n\t# Otherwise we will get trailing space without \"compopt -o nospace\"\n\ttrue\n}\n\nautoload -U +X bashcompinit && bashcompinit\n\n# use word boundary patterns for BSD or GNU sed\nLWORD='[[:<:]]'\nRWORD='[[:>:]]'\nif sed --version 2>&1 | grep -q GNU; then\n\tLWORD='\\<'\n\tRWORD='\\>'\nfi\n\n__kubectl_convert_bash_to_zsh() {\n\tsed \\\n\t-e 's\/declare -F\/whence -w\/' \\\n\t-e 's\/_get_comp_words_by_ref \"\\$@\"\/_get_comp_words_by_ref \"\\$*\"\/' \\\n\t-e 's\/local \\([a-zA-Z0-9_]*\\)=\/local \\1; \\1=\/' \\\n\t-e 's\/flags+=(\"\\(--.*\\)=\")\/flags+=(\"\\1\"); two_word_flags+=(\"\\1\")\/' \\\n\t-e 's\/must_have_one_flag+=(\"\\(--.*\\)=\")\/must_have_one_flag+=(\"\\1\")\/' \\\n\t-e \"s\/${LWORD}_filedir${RWORD}\/__kubectl_filedir\/g\" \\\n\t-e \"s\/${LWORD}_get_comp_words_by_ref${RWORD}\/__kubectl_get_comp_words_by_ref\/g\" \\\n\t-e \"s\/${LWORD}__ltrim_colon_completions${RWORD}\/__kubectl_ltrim_colon_completions\/g\" \\\n\t-e \"s\/${LWORD}compgen${RWORD}\/__kubectl_compgen\/g\" \\\n\t-e \"s\/${LWORD}compopt${RWORD}\/__kubectl_compopt\/g\" \\\n\t-e \"s\/${LWORD}declare${RWORD}\/builtin declare\/g\" \\\n\t-e \"s\/\\\\\\$(type${RWORD}\/\\$(__kubectl_type\/g\" \\\n\t<<'BASH_COMPLETION_EOF'\n`\n\tout.Write([]byte(zshInitialization))\n\n\tbuf := new(bytes.Buffer)\n\tkubectl.GenBashCompletion(buf)\n\tout.Write(buf.Bytes())\n\n\tzshTail := `\nBASH_COMPLETION_EOF\n}\n\n__kubectl_bash_source <(__kubectl_convert_bash_to_zsh)\n`\n\tout.Write([]byte(zshTail))\n\treturn nil\n}\n<commit_msg>Use native zsh completion<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage completion\n\nimport (\n\t\"io\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\tcmdutil \"k8s.io\/kubectl\/pkg\/cmd\/util\"\n\t\"k8s.io\/kubectl\/pkg\/util\/i18n\"\n\t\"k8s.io\/kubectl\/pkg\/util\/templates\"\n)\n\nconst defaultBoilerPlate = `\n# Copyright 2016 The Kubernetes Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n`\n\nvar (\n\tcompletionLong = templates.LongDesc(i18n.T(`\n\t\tOutput shell completion code for the specified shell (bash or zsh).\n\t\tThe shell code must be evaluated to provide interactive\n\t\tcompletion of kubectl commands. This can be done by sourcing it from\n\t\tthe .bash_profile.\n\n\t\tDetailed instructions on how to do this are available here:\n\n for macOS:\n https:\/\/kubernetes.io\/docs\/tasks\/tools\/install-kubectl-macos\/#enable-shell-autocompletion\n\n for linux:\n https:\/\/kubernetes.io\/docs\/tasks\/tools\/install-kubectl-linux\/#enable-shell-autocompletion\n\n for windows:\n https:\/\/kubernetes.io\/docs\/tasks\/tools\/install-kubectl-windows\/#enable-shell-autocompletion\n\n\t\tNote for zsh users: [1] zsh completions are only supported in versions of zsh >= 5.2`))\n\n\tcompletionExample = templates.Examples(i18n.T(`\n\t\t# Installing bash completion on macOS using homebrew\n\t\t## If running Bash 3.2 included with macOS\n\t\t brew install bash-completion\n\t\t## or, if running Bash 4.1+\n\t\t brew install bash-completion@2\n\t\t## If kubectl is installed via homebrew, this should start working immediately.\n\t\t## If you've installed via other means, you may need add the completion to your completion directory\n\t\t kubectl completion bash > $(brew --prefix)\/etc\/bash_completion.d\/kubectl\n\n\n\t\t# Installing bash completion on Linux\n\t\t## If bash-completion is not installed on Linux, please install the 'bash-completion' package\n\t\t## via your distribution's package manager.\n\t\t## Load the kubectl completion code for bash into the current shell\n\t\t source <(kubectl completion bash)\n\t\t## Write bash completion code to a file and source it from .bash_profile\n\t\t kubectl completion bash > ~\/.kube\/completion.bash.inc\n\t\t printf \"\n\t\t # Kubectl shell completion\n\t\t source '$HOME\/.kube\/completion.bash.inc'\n\t\t \" >> $HOME\/.bash_profile\n\t\t source $HOME\/.bash_profile\n\n\t\t# Load the kubectl completion code for zsh[1] into the current shell\n\t\t source <(kubectl completion zsh)\n\t\t# Set the kubectl completion code for zsh[1] to autoload on startup\n\t\t kubectl completion zsh > \"${fpath[1]}\/_kubectl\"`))\n)\n\nvar (\n\tcompletionShells = map[string]func(out io.Writer, boilerPlate string, cmd *cobra.Command) error{\n\t\t\"bash\": runCompletionBash,\n\t\t\"zsh\": runCompletionZsh,\n\t}\n)\n\n\/\/ NewCmdCompletion creates the `completion` command\nfunc NewCmdCompletion(out io.Writer, boilerPlate string) *cobra.Command {\n\tshells := []string{}\n\tfor s := range completionShells {\n\t\tshells = append(shells, s)\n\t}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"completion SHELL\",\n\t\tDisableFlagsInUseLine: true,\n\t\tShort: i18n.T(\"Output shell completion code for the specified shell (bash or zsh)\"),\n\t\tLong: completionLong,\n\t\tExample: completionExample,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tcmdutil.CheckErr(RunCompletion(out, boilerPlate, cmd, args))\n\t\t},\n\t\tValidArgs: shells,\n\t}\n\n\treturn cmd\n}\n\n\/\/ RunCompletion checks given arguments and executes command\nfunc RunCompletion(out io.Writer, boilerPlate string, cmd *cobra.Command, args []string) error {\n\tif len(args) == 0 {\n\t\treturn cmdutil.UsageErrorf(cmd, \"Shell not specified.\")\n\t}\n\tif len(args) > 1 {\n\t\treturn cmdutil.UsageErrorf(cmd, \"Too many arguments. Expected only the shell type.\")\n\t}\n\trun, found := completionShells[args[0]]\n\tif !found {\n\t\treturn cmdutil.UsageErrorf(cmd, \"Unsupported shell type %q.\", args[0])\n\t}\n\n\treturn run(out, boilerPlate, cmd.Parent())\n}\n\nfunc runCompletionBash(out io.Writer, boilerPlate string, kubectl *cobra.Command) error {\n\tif len(boilerPlate) == 0 {\n\t\tboilerPlate = defaultBoilerPlate\n\t}\n\tif _, err := out.Write([]byte(boilerPlate)); err != nil {\n\t\treturn err\n\t}\n\n\treturn kubectl.GenBashCompletion(out)\n}\n\nfunc runCompletionZsh(out io.Writer, boilerPlate string, kubectl *cobra.Command) error {\n\tzshHead := \"#compdef kubectl\\ncompdef _kubectl kubectl\\n\"\n\tout.Write([]byte(zshHead))\n\n\tif len(boilerPlate) == 0 {\n\t\tboilerPlate = defaultBoilerPlate\n\t}\n\tif _, err := out.Write([]byte(boilerPlate)); err != nil {\n\t\treturn err\n\t}\n\n\treturn kubectl.GenZshCompletion(out)\n}\n<|endoftext|>"} {"text":"<commit_before>package eventlog\n\n\/\/ Loggable describes objects that can be marshalled into Metadata for logging\ntype Loggable interface {\n\tLoggable() map[string]interface{}\n}\n\ntype LoggableMap map[string]interface{}\n\nfunc (l LoggableMap) Loggable() map[string]interface{} {\n\treturn l\n}\n\n\/\/ Loggable converts a func into a Loggable\ntype LoggableF func() map[string]interface{}\n\nfunc (l LoggableF) Loggable() map[string]interface{} {\n\treturn l()\n}\n\nfunc Deferred(key string, f func() string) Loggable {\n\tfunction := func() map[string]interface{} {\n\t\treturn map[string]interface{}{\n\t\t\tkey: f(),\n\t\t}\n\t}\n\treturn LoggableF(function)\n}\n<commit_msg>feat(eventlog) add Pair method<commit_after>package eventlog\n\n\/\/ Loggable describes objects that can be marshalled into Metadata for logging\ntype Loggable interface {\n\tLoggable() map[string]interface{}\n}\n\ntype LoggableMap map[string]interface{}\n\nfunc (l LoggableMap) Loggable() map[string]interface{} {\n\treturn l\n}\n\n\/\/ Loggable converts a func into a Loggable\ntype LoggableF func() map[string]interface{}\n\nfunc (l LoggableF) Loggable() map[string]interface{} {\n\treturn l()\n}\n\nfunc Deferred(key string, f func() string) Loggable {\n\tfunction := func() map[string]interface{} {\n\t\treturn map[string]interface{}{\n\t\t\tkey: f(),\n\t\t}\n\t}\n\treturn LoggableF(function)\n}\n\nfunc Pair(key string, l Loggable) Loggable {\n\treturn LoggableMap{\n\t\tkey: l,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\n\t\"github.com\/mitchellh\/multistep\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n)\n\ntype StepRunSourceInstance struct {\n\tAssociatePublicIpAddress bool\n\tAvailabilityZone string\n\tBlockDevices BlockDevices\n\tDebug bool\n\tEbsOptimized bool\n\tExpectedRootDevice string\n\tInstanceType string\n\tIamInstanceProfile string\n\tSourceAMI string\n\tSpotPrice string\n\tSpotPriceProduct string\n\tSubnetId string\n\tTags map[string]string\n\tUserData string\n\tUserDataFile string\n\tInstanceInitiatedShutdownBehavior string\n\n\tinstanceId string\n\tspotRequest *ec2.SpotInstanceRequest\n}\n\nfunc (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepAction {\n\tec2conn := state.Get(\"ec2\").(*ec2.EC2)\n\tkeyName := state.Get(\"keyPair\").(string)\n\ttempSecurityGroupIds := state.Get(\"securityGroupIds\").([]string)\n\tui := state.Get(\"ui\").(packer.Ui)\n\n\tsecurityGroupIds := make([]*string, len(tempSecurityGroupIds))\n\tfor i, sg := range tempSecurityGroupIds {\n\t\tlog.Printf(\"[DEBUG] Waiting for tempSecurityGroup: %s\", sg)\n\t\terr := WaitUntilSecurityGroupExists(ec2conn,\n\t\t\t&ec2.DescribeSecurityGroupsInput{\n\t\t\t\tGroupIds: []*string{aws.String(sg)},\n\t\t\t},\n\t\t)\n\t\tif err == nil {\n\t\t\tlog.Printf(\"[DEBUG] Found security group %s\", sg)\n\t\t\tsecurityGroupIds[i] = aws.String(sg)\n\t\t} else {\n\t\t\terr := fmt.Errorf(\"Timed out waiting for security group %s\", sg)\n\t\t\tlog.Printf(\"[DEBUG] %s\", err.Error())\n\t\t\tstate.Put(\"error\", err)\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\t}\n\n\tuserData := s.UserData\n\tif s.UserDataFile != \"\" {\n\t\tcontents, err := ioutil.ReadFile(s.UserDataFile)\n\t\tif err != nil {\n\t\t\tstate.Put(\"error\", fmt.Errorf(\"Problem reading user data file: %s\", err))\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\tuserData = string(contents)\n\t}\n\n\t\/\/ Test if it is encoded already, and if not, encode it\n\tif _, err := base64.StdEncoding.DecodeString(userData); err != nil {\n\t\tlog.Printf(\"[DEBUG] base64 encoding user data...\")\n\t\tuserData = base64.StdEncoding.EncodeToString([]byte(userData))\n\t}\n\n\tui.Say(\"Launching a source AWS instance...\")\n\timage, ok := state.Get(\"source_image\").(*ec2.Image)\n\tif !ok {\n\t\tstate.Put(\"error\", fmt.Errorf(\"source_image type assertion failed\"))\n\t\treturn multistep.ActionHalt\n\t}\n\ts.SourceAMI = *image.ImageId\n\n\tif s.ExpectedRootDevice != \"\" && *image.RootDeviceType != s.ExpectedRootDevice {\n\t\tstate.Put(\"error\", fmt.Errorf(\n\t\t\t\"The provided source AMI has an invalid root device type.\\n\"+\n\t\t\t\t\"Expected '%s', got '%s'.\",\n\t\t\ts.ExpectedRootDevice, *image.RootDeviceType))\n\t\treturn multistep.ActionHalt\n\t}\n\n\tspotPrice := s.SpotPrice\n\tavailabilityZone := s.AvailabilityZone\n\tif spotPrice == \"auto\" {\n\t\tui.Message(fmt.Sprintf(\n\t\t\t\"Finding spot price for %s %s...\",\n\t\t\ts.SpotPriceProduct, s.InstanceType))\n\n\t\t\/\/ Detect the spot price\n\t\tstartTime := time.Now().Add(-1 * time.Hour)\n\t\tresp, err := ec2conn.DescribeSpotPriceHistory(&ec2.DescribeSpotPriceHistoryInput{\n\t\t\tInstanceTypes: []*string{&s.InstanceType},\n\t\t\tProductDescriptions: []*string{&s.SpotPriceProduct},\n\t\t\tAvailabilityZone: &s.AvailabilityZone,\n\t\t\tStartTime: &startTime,\n\t\t})\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Error finding spot price: %s\", err)\n\t\t\tstate.Put(\"error\", err)\n\t\t\tui.Error(err.Error())\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\tvar price float64\n\t\tfor _, history := range resp.SpotPriceHistory {\n\t\t\tlog.Printf(\"[INFO] Candidate spot price: %s\", *history.SpotPrice)\n\t\t\tcurrent, err := strconv.ParseFloat(*history.SpotPrice, 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"[ERR] Error parsing spot price: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif price == 0 || current < price {\n\t\t\t\tprice = current\n\t\t\t\tif s.AvailabilityZone == \"\" {\n\t\t\t\t\tavailabilityZone = *history.AvailabilityZone\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif price == 0 {\n\t\t\terr := fmt.Errorf(\"No candidate spot prices found!\")\n\t\t\tstate.Put(\"error\", err)\n\t\t\tui.Error(err.Error())\n\t\t\treturn multistep.ActionHalt\n\t\t} else {\n\t\t\t\/\/ Add 0.5 cents to minimum spot bid to ensure capacity will be available\n\t\t\t\/\/ Avoids price-too-low error in active markets which can fluctuate\n\t\t\tprice = price + 0.005\n\t\t}\n\n\t\tspotPrice = strconv.FormatFloat(price, 'f', -1, 64)\n\t}\n\n\tvar instanceId string\n\n\tif spotPrice == \"\" || spotPrice == \"0\" {\n\t\trunOpts := &ec2.RunInstancesInput{\n\t\t\tImageId: &s.SourceAMI,\n\t\t\tInstanceType: &s.InstanceType,\n\t\t\tUserData: &userData,\n\t\t\tMaxCount: aws.Int64(1),\n\t\t\tMinCount: aws.Int64(1),\n\t\t\tIamInstanceProfile: &ec2.IamInstanceProfileSpecification{Name: &s.IamInstanceProfile},\n\t\t\tBlockDeviceMappings: s.BlockDevices.BuildLaunchDevices(),\n\t\t\tPlacement: &ec2.Placement{AvailabilityZone: &s.AvailabilityZone},\n\t\t\tEbsOptimized: &s.EbsOptimized,\n\t\t}\n\n\t\tif keyName != \"\" {\n\t\t\trunOpts.KeyName = &keyName\n\t\t}\n\n\t\tif s.SubnetId != \"\" && s.AssociatePublicIpAddress {\n\t\t\trunOpts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{\n\t\t\t\t{\n\t\t\t\t\tDeviceIndex: aws.Int64(0),\n\t\t\t\t\tAssociatePublicIpAddress: &s.AssociatePublicIpAddress,\n\t\t\t\t\tSubnetId: &s.SubnetId,\n\t\t\t\t\tGroups: securityGroupIds,\n\t\t\t\t\tDeleteOnTermination: aws.Bool(true),\n\t\t\t\t},\n\t\t\t}\n\t\t} else {\n\t\t\trunOpts.SubnetId = &s.SubnetId\n\t\t\trunOpts.SecurityGroupIds = securityGroupIds\n\t\t}\n\n\t\tif s.ExpectedRootDevice == \"ebs\" {\n\t\t\trunOpts.InstanceInitiatedShutdownBehavior = &s.InstanceInitiatedShutdownBehavior\n\t\t}\n\n\t\trunResp, err := ec2conn.RunInstances(runOpts)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Error launching source instance: %s\", err)\n\t\t\tstate.Put(\"error\", err)\n\t\t\tui.Error(err.Error())\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\t\tinstanceId = *runResp.Instances[0].InstanceId\n\t} else {\n\t\tui.Message(fmt.Sprintf(\n\t\t\t\"Requesting spot instance '%s' for: %s\",\n\t\t\ts.InstanceType, spotPrice))\n\n\t\trunOpts := &ec2.RequestSpotLaunchSpecification{\n\t\t\tImageId: &s.SourceAMI,\n\t\t\tInstanceType: &s.InstanceType,\n\t\t\tUserData: &userData,\n\t\t\tIamInstanceProfile: &ec2.IamInstanceProfileSpecification{Name: &s.IamInstanceProfile},\n\t\t\tPlacement: &ec2.SpotPlacement{\n\t\t\t\tAvailabilityZone: &availabilityZone,\n\t\t\t},\n\t\t\tBlockDeviceMappings: s.BlockDevices.BuildLaunchDevices(),\n\t\t\tEbsOptimized: &s.EbsOptimized,\n\t\t}\n\n\t\tif s.SubnetId != \"\" && s.AssociatePublicIpAddress {\n\t\t\trunOpts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{\n\t\t\t\t{\n\t\t\t\t\tDeviceIndex: aws.Int64(0),\n\t\t\t\t\tAssociatePublicIpAddress: &s.AssociatePublicIpAddress,\n\t\t\t\t\tSubnetId: &s.SubnetId,\n\t\t\t\t\tGroups: securityGroupIds,\n\t\t\t\t\tDeleteOnTermination: aws.Bool(true),\n\t\t\t\t},\n\t\t\t}\n\t\t} else {\n\t\t\trunOpts.SubnetId = &s.SubnetId\n\t\t\trunOpts.SecurityGroupIds = securityGroupIds\n\t\t}\n\n\t\tif keyName != \"\" {\n\t\t\trunOpts.KeyName = &keyName\n\t\t}\n\n\t\trunSpotResp, err := ec2conn.RequestSpotInstances(&ec2.RequestSpotInstancesInput{\n\t\t\tSpotPrice: &spotPrice,\n\t\t\tLaunchSpecification: runOpts,\n\t\t})\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Error launching source spot instance: %s\", err)\n\t\t\tstate.Put(\"error\", err)\n\t\t\tui.Error(err.Error())\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\ts.spotRequest = runSpotResp.SpotInstanceRequests[0]\n\n\t\tspotRequestId := s.spotRequest.SpotInstanceRequestId\n\t\tui.Message(fmt.Sprintf(\"Waiting for spot request (%s) to become active...\", *spotRequestId))\n\t\tstateChange := StateChangeConf{\n\t\t\tPending: []string{\"open\"},\n\t\t\tTarget: \"active\",\n\t\t\tRefresh: SpotRequestStateRefreshFunc(ec2conn, *spotRequestId),\n\t\t\tStepState: state,\n\t\t}\n\t\t_, err = WaitForState(&stateChange)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Error waiting for spot request (%s) to become ready: %s\", *spotRequestId, err)\n\t\t\tstate.Put(\"error\", err)\n\t\t\tui.Error(err.Error())\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\tspotResp, err := ec2conn.DescribeSpotInstanceRequests(&ec2.DescribeSpotInstanceRequestsInput{\n\t\t\tSpotInstanceRequestIds: []*string{spotRequestId},\n\t\t})\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Error finding spot request (%s): %s\", *spotRequestId, err)\n\t\t\tstate.Put(\"error\", err)\n\t\t\tui.Error(err.Error())\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\t\tinstanceId = *spotResp.SpotInstanceRequests[0].InstanceId\n\t}\n\n\t\/\/ Set the instance ID so that the cleanup works properly\n\ts.instanceId = instanceId\n\n\tui.Message(fmt.Sprintf(\"Instance ID: %s\", instanceId))\n\tui.Say(fmt.Sprintf(\"Waiting for instance (%v) to become ready...\", instanceId))\n\tstateChange := StateChangeConf{\n\t\tPending: []string{\"pending\"},\n\t\tTarget: \"running\",\n\t\tRefresh: InstanceStateRefreshFunc(ec2conn, instanceId),\n\t\tStepState: state,\n\t}\n\tlatestInstance, err := WaitForState(&stateChange)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Error waiting for instance (%s) to become ready: %s\", instanceId, err)\n\t\tstate.Put(\"error\", err)\n\t\tui.Error(err.Error())\n\t\treturn multistep.ActionHalt\n\t}\n\n\tinstance := latestInstance.(*ec2.Instance)\n\n\tec2Tags := make([]*ec2.Tag, 1, len(s.Tags)+1)\n\tec2Tags[0] = &ec2.Tag{Key: aws.String(\"Name\"), Value: aws.String(\"Packer Builder\")}\n\tfor k, v := range s.Tags {\n\t\tec2Tags = append(ec2Tags, &ec2.Tag{Key: aws.String(k), Value: aws.String(v)})\n\t}\n\n\t_, err = ec2conn.CreateTags(&ec2.CreateTagsInput{\n\t\tTags: ec2Tags,\n\t\tResources: []*string{instance.InstanceId},\n\t})\n\tif err != nil {\n\t\tui.Message(\n\t\t\tfmt.Sprintf(\"Failed to tag a Name on the builder instance: %s\", err))\n\t}\n\n\tif s.Debug {\n\t\tif instance.PublicDnsName != nil && *instance.PublicDnsName != \"\" {\n\t\t\tui.Message(fmt.Sprintf(\"Public DNS: %s\", *instance.PublicDnsName))\n\t\t}\n\n\t\tif instance.PublicIpAddress != nil && *instance.PublicIpAddress != \"\" {\n\t\t\tui.Message(fmt.Sprintf(\"Public IP: %s\", *instance.PublicIpAddress))\n\t\t}\n\n\t\tif instance.PrivateIpAddress != nil && *instance.PrivateIpAddress != \"\" {\n\t\t\tui.Message(fmt.Sprintf(\"Private IP: %s\", *instance.PrivateIpAddress))\n\t\t}\n\t}\n\n\tstate.Put(\"instance\", instance)\n\n\treturn multistep.ActionContinue\n}\n\nfunc (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) {\n\n\tec2conn := state.Get(\"ec2\").(*ec2.EC2)\n\tui := state.Get(\"ui\").(packer.Ui)\n\n\t\/\/ Cancel the spot request if it exists\n\tif s.spotRequest != nil {\n\t\tui.Say(\"Cancelling the spot request...\")\n\t\tinput := &ec2.CancelSpotInstanceRequestsInput{\n\t\t\tSpotInstanceRequestIds: []*string{s.spotRequest.SpotInstanceRequestId},\n\t\t}\n\t\tif _, err := ec2conn.CancelSpotInstanceRequests(input); err != nil {\n\t\t\tui.Error(fmt.Sprintf(\"Error cancelling the spot request, may still be around: %s\", err))\n\t\t\treturn\n\t\t}\n\t\tstateChange := StateChangeConf{\n\t\t\tPending: []string{\"active\", \"open\"},\n\t\t\tRefresh: SpotRequestStateRefreshFunc(ec2conn, *s.spotRequest.SpotInstanceRequestId),\n\t\t\tTarget: \"cancelled\",\n\t\t}\n\n\t\tWaitForState(&stateChange)\n\n\t}\n\n\t\/\/ Terminate the source instance if it exists\n\tif s.instanceId != \"\" {\n\t\tui.Say(\"Terminating the source AWS instance...\")\n\t\tif _, err := ec2conn.TerminateInstances(&ec2.TerminateInstancesInput{InstanceIds: []*string{&s.instanceId}}); err != nil {\n\t\t\tui.Error(fmt.Sprintf(\"Error terminating instance, may still be around: %s\", err))\n\t\t\treturn\n\t\t}\n\t\tstateChange := StateChangeConf{\n\t\t\tPending: []string{\"pending\", \"running\", \"shutting-down\", \"stopped\", \"stopping\"},\n\t\t\tRefresh: InstanceStateRefreshFunc(ec2conn, s.instanceId),\n\t\t\tTarget: \"terminated\",\n\t\t}\n\n\t\tWaitForState(&stateChange)\n\t}\n}\n\nfunc WaitUntilSecurityGroupExists(c *ec2.EC2, input *ec2.DescribeSecurityGroupsInput) error {\n\tfor i := 0; i < 40; i++ {\n\t\t_, err := c.DescribeSecurityGroups(input)\n\t\tif err != nil {\n\t\t\t\/\/ Check if this is just because it doesn't exist yet\n\t\t\tif ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == \"InvalidSecurityGroupID.NotFound\" {\n\t\t\t\tlog.Printf(\"[DEBUG] Security group %v doesn't exist, sleeping for a moment\", input.GroupIds)\n\t\t\t\ttime.Sleep(15 * time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ The error is something else, abort and throw it\n\t\t\treturn fmt.Errorf(\"Error looking for security group %v: %s\", input.GroupIds, err)\n\t\t}\n\n\t\t\/\/ Success!\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"Timeout waiting for security group %v to appear\", input.GroupIds)\n}\n<commit_msg>Properly wait for security group to exist.<commit_after>package common\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/private\/waiter\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\n\t\"github.com\/mitchellh\/multistep\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n)\n\ntype StepRunSourceInstance struct {\n\tAssociatePublicIpAddress bool\n\tAvailabilityZone string\n\tBlockDevices BlockDevices\n\tDebug bool\n\tEbsOptimized bool\n\tExpectedRootDevice string\n\tInstanceType string\n\tIamInstanceProfile string\n\tSourceAMI string\n\tSpotPrice string\n\tSpotPriceProduct string\n\tSubnetId string\n\tTags map[string]string\n\tUserData string\n\tUserDataFile string\n\tInstanceInitiatedShutdownBehavior string\n\n\tinstanceId string\n\tspotRequest *ec2.SpotInstanceRequest\n}\n\nfunc (s *StepRunSourceInstance) Run(state multistep.StateBag) multistep.StepAction {\n\tec2conn := state.Get(\"ec2\").(*ec2.EC2)\n\tkeyName := state.Get(\"keyPair\").(string)\n\ttempSecurityGroupIds := state.Get(\"securityGroupIds\").([]string)\n\tui := state.Get(\"ui\").(packer.Ui)\n\n\tsecurityGroupIds := make([]*string, len(tempSecurityGroupIds))\n\tfor i, sg := range tempSecurityGroupIds {\n\t\tlog.Printf(\"[DEBUG] Waiting for tempSecurityGroup: %s\", sg)\n\t\terr := WaitUntilSecurityGroupExists(ec2conn,\n\t\t\t&ec2.DescribeSecurityGroupsInput{\n\t\t\t\tGroupIds: []*string{aws.String(sg)},\n\t\t\t},\n\t\t)\n\t\tif err == nil {\n\t\t\tlog.Printf(\"[DEBUG] Found security group %s\", sg)\n\t\t\tsecurityGroupIds[i] = aws.String(sg)\n\t\t} else {\n\t\t\terr := fmt.Errorf(\"Timed out waiting for security group %s: %s\", sg, err)\n\t\t\tlog.Printf(\"[DEBUG] %s\", err.Error())\n\t\t\tstate.Put(\"error\", err)\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\t}\n\n\tuserData := s.UserData\n\tif s.UserDataFile != \"\" {\n\t\tcontents, err := ioutil.ReadFile(s.UserDataFile)\n\t\tif err != nil {\n\t\t\tstate.Put(\"error\", fmt.Errorf(\"Problem reading user data file: %s\", err))\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\tuserData = string(contents)\n\t}\n\n\t\/\/ Test if it is encoded already, and if not, encode it\n\tif _, err := base64.StdEncoding.DecodeString(userData); err != nil {\n\t\tlog.Printf(\"[DEBUG] base64 encoding user data...\")\n\t\tuserData = base64.StdEncoding.EncodeToString([]byte(userData))\n\t}\n\n\tui.Say(\"Launching a source AWS instance...\")\n\timage, ok := state.Get(\"source_image\").(*ec2.Image)\n\tif !ok {\n\t\tstate.Put(\"error\", fmt.Errorf(\"source_image type assertion failed\"))\n\t\treturn multistep.ActionHalt\n\t}\n\ts.SourceAMI = *image.ImageId\n\n\tif s.ExpectedRootDevice != \"\" && *image.RootDeviceType != s.ExpectedRootDevice {\n\t\tstate.Put(\"error\", fmt.Errorf(\n\t\t\t\"The provided source AMI has an invalid root device type.\\n\"+\n\t\t\t\t\"Expected '%s', got '%s'.\",\n\t\t\ts.ExpectedRootDevice, *image.RootDeviceType))\n\t\treturn multistep.ActionHalt\n\t}\n\n\tspotPrice := s.SpotPrice\n\tavailabilityZone := s.AvailabilityZone\n\tif spotPrice == \"auto\" {\n\t\tui.Message(fmt.Sprintf(\n\t\t\t\"Finding spot price for %s %s...\",\n\t\t\ts.SpotPriceProduct, s.InstanceType))\n\n\t\t\/\/ Detect the spot price\n\t\tstartTime := time.Now().Add(-1 * time.Hour)\n\t\tresp, err := ec2conn.DescribeSpotPriceHistory(&ec2.DescribeSpotPriceHistoryInput{\n\t\t\tInstanceTypes: []*string{&s.InstanceType},\n\t\t\tProductDescriptions: []*string{&s.SpotPriceProduct},\n\t\t\tAvailabilityZone: &s.AvailabilityZone,\n\t\t\tStartTime: &startTime,\n\t\t})\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Error finding spot price: %s\", err)\n\t\t\tstate.Put(\"error\", err)\n\t\t\tui.Error(err.Error())\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\tvar price float64\n\t\tfor _, history := range resp.SpotPriceHistory {\n\t\t\tlog.Printf(\"[INFO] Candidate spot price: %s\", *history.SpotPrice)\n\t\t\tcurrent, err := strconv.ParseFloat(*history.SpotPrice, 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"[ERR] Error parsing spot price: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif price == 0 || current < price {\n\t\t\t\tprice = current\n\t\t\t\tif s.AvailabilityZone == \"\" {\n\t\t\t\t\tavailabilityZone = *history.AvailabilityZone\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif price == 0 {\n\t\t\terr := fmt.Errorf(\"No candidate spot prices found!\")\n\t\t\tstate.Put(\"error\", err)\n\t\t\tui.Error(err.Error())\n\t\t\treturn multistep.ActionHalt\n\t\t} else {\n\t\t\t\/\/ Add 0.5 cents to minimum spot bid to ensure capacity will be available\n\t\t\t\/\/ Avoids price-too-low error in active markets which can fluctuate\n\t\t\tprice = price + 0.005\n\t\t}\n\n\t\tspotPrice = strconv.FormatFloat(price, 'f', -1, 64)\n\t}\n\n\tvar instanceId string\n\n\tif spotPrice == \"\" || spotPrice == \"0\" {\n\t\trunOpts := &ec2.RunInstancesInput{\n\t\t\tImageId: &s.SourceAMI,\n\t\t\tInstanceType: &s.InstanceType,\n\t\t\tUserData: &userData,\n\t\t\tMaxCount: aws.Int64(1),\n\t\t\tMinCount: aws.Int64(1),\n\t\t\tIamInstanceProfile: &ec2.IamInstanceProfileSpecification{Name: &s.IamInstanceProfile},\n\t\t\tBlockDeviceMappings: s.BlockDevices.BuildLaunchDevices(),\n\t\t\tPlacement: &ec2.Placement{AvailabilityZone: &s.AvailabilityZone},\n\t\t\tEbsOptimized: &s.EbsOptimized,\n\t\t}\n\n\t\tif keyName != \"\" {\n\t\t\trunOpts.KeyName = &keyName\n\t\t}\n\n\t\tif s.SubnetId != \"\" && s.AssociatePublicIpAddress {\n\t\t\trunOpts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{\n\t\t\t\t{\n\t\t\t\t\tDeviceIndex: aws.Int64(0),\n\t\t\t\t\tAssociatePublicIpAddress: &s.AssociatePublicIpAddress,\n\t\t\t\t\tSubnetId: &s.SubnetId,\n\t\t\t\t\tGroups: securityGroupIds,\n\t\t\t\t\tDeleteOnTermination: aws.Bool(true),\n\t\t\t\t},\n\t\t\t}\n\t\t} else {\n\t\t\trunOpts.SubnetId = &s.SubnetId\n\t\t\trunOpts.SecurityGroupIds = securityGroupIds\n\t\t}\n\n\t\tif s.ExpectedRootDevice == \"ebs\" {\n\t\t\trunOpts.InstanceInitiatedShutdownBehavior = &s.InstanceInitiatedShutdownBehavior\n\t\t}\n\n\t\trunResp, err := ec2conn.RunInstances(runOpts)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Error launching source instance: %s\", err)\n\t\t\tstate.Put(\"error\", err)\n\t\t\tui.Error(err.Error())\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\t\tinstanceId = *runResp.Instances[0].InstanceId\n\t} else {\n\t\tui.Message(fmt.Sprintf(\n\t\t\t\"Requesting spot instance '%s' for: %s\",\n\t\t\ts.InstanceType, spotPrice))\n\n\t\trunOpts := &ec2.RequestSpotLaunchSpecification{\n\t\t\tImageId: &s.SourceAMI,\n\t\t\tInstanceType: &s.InstanceType,\n\t\t\tUserData: &userData,\n\t\t\tIamInstanceProfile: &ec2.IamInstanceProfileSpecification{Name: &s.IamInstanceProfile},\n\t\t\tPlacement: &ec2.SpotPlacement{\n\t\t\t\tAvailabilityZone: &availabilityZone,\n\t\t\t},\n\t\t\tBlockDeviceMappings: s.BlockDevices.BuildLaunchDevices(),\n\t\t\tEbsOptimized: &s.EbsOptimized,\n\t\t}\n\n\t\tif s.SubnetId != \"\" && s.AssociatePublicIpAddress {\n\t\t\trunOpts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{\n\t\t\t\t{\n\t\t\t\t\tDeviceIndex: aws.Int64(0),\n\t\t\t\t\tAssociatePublicIpAddress: &s.AssociatePublicIpAddress,\n\t\t\t\t\tSubnetId: &s.SubnetId,\n\t\t\t\t\tGroups: securityGroupIds,\n\t\t\t\t\tDeleteOnTermination: aws.Bool(true),\n\t\t\t\t},\n\t\t\t}\n\t\t} else {\n\t\t\trunOpts.SubnetId = &s.SubnetId\n\t\t\trunOpts.SecurityGroupIds = securityGroupIds\n\t\t}\n\n\t\tif keyName != \"\" {\n\t\t\trunOpts.KeyName = &keyName\n\t\t}\n\n\t\trunSpotResp, err := ec2conn.RequestSpotInstances(&ec2.RequestSpotInstancesInput{\n\t\t\tSpotPrice: &spotPrice,\n\t\t\tLaunchSpecification: runOpts,\n\t\t})\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Error launching source spot instance: %s\", err)\n\t\t\tstate.Put(\"error\", err)\n\t\t\tui.Error(err.Error())\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\ts.spotRequest = runSpotResp.SpotInstanceRequests[0]\n\n\t\tspotRequestId := s.spotRequest.SpotInstanceRequestId\n\t\tui.Message(fmt.Sprintf(\"Waiting for spot request (%s) to become active...\", *spotRequestId))\n\t\tstateChange := StateChangeConf{\n\t\t\tPending: []string{\"open\"},\n\t\t\tTarget: \"active\",\n\t\t\tRefresh: SpotRequestStateRefreshFunc(ec2conn, *spotRequestId),\n\t\t\tStepState: state,\n\t\t}\n\t\t_, err = WaitForState(&stateChange)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Error waiting for spot request (%s) to become ready: %s\", *spotRequestId, err)\n\t\t\tstate.Put(\"error\", err)\n\t\t\tui.Error(err.Error())\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\n\t\tspotResp, err := ec2conn.DescribeSpotInstanceRequests(&ec2.DescribeSpotInstanceRequestsInput{\n\t\t\tSpotInstanceRequestIds: []*string{spotRequestId},\n\t\t})\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Error finding spot request (%s): %s\", *spotRequestId, err)\n\t\t\tstate.Put(\"error\", err)\n\t\t\tui.Error(err.Error())\n\t\t\treturn multistep.ActionHalt\n\t\t}\n\t\tinstanceId = *spotResp.SpotInstanceRequests[0].InstanceId\n\t}\n\n\t\/\/ Set the instance ID so that the cleanup works properly\n\ts.instanceId = instanceId\n\n\tui.Message(fmt.Sprintf(\"Instance ID: %s\", instanceId))\n\tui.Say(fmt.Sprintf(\"Waiting for instance (%v) to become ready...\", instanceId))\n\tstateChange := StateChangeConf{\n\t\tPending: []string{\"pending\"},\n\t\tTarget: \"running\",\n\t\tRefresh: InstanceStateRefreshFunc(ec2conn, instanceId),\n\t\tStepState: state,\n\t}\n\tlatestInstance, err := WaitForState(&stateChange)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Error waiting for instance (%s) to become ready: %s\", instanceId, err)\n\t\tstate.Put(\"error\", err)\n\t\tui.Error(err.Error())\n\t\treturn multistep.ActionHalt\n\t}\n\n\tinstance := latestInstance.(*ec2.Instance)\n\n\tec2Tags := make([]*ec2.Tag, 1, len(s.Tags)+1)\n\tec2Tags[0] = &ec2.Tag{Key: aws.String(\"Name\"), Value: aws.String(\"Packer Builder\")}\n\tfor k, v := range s.Tags {\n\t\tec2Tags = append(ec2Tags, &ec2.Tag{Key: aws.String(k), Value: aws.String(v)})\n\t}\n\n\t_, err = ec2conn.CreateTags(&ec2.CreateTagsInput{\n\t\tTags: ec2Tags,\n\t\tResources: []*string{instance.InstanceId},\n\t})\n\tif err != nil {\n\t\tui.Message(\n\t\t\tfmt.Sprintf(\"Failed to tag a Name on the builder instance: %s\", err))\n\t}\n\n\tif s.Debug {\n\t\tif instance.PublicDnsName != nil && *instance.PublicDnsName != \"\" {\n\t\t\tui.Message(fmt.Sprintf(\"Public DNS: %s\", *instance.PublicDnsName))\n\t\t}\n\n\t\tif instance.PublicIpAddress != nil && *instance.PublicIpAddress != \"\" {\n\t\t\tui.Message(fmt.Sprintf(\"Public IP: %s\", *instance.PublicIpAddress))\n\t\t}\n\n\t\tif instance.PrivateIpAddress != nil && *instance.PrivateIpAddress != \"\" {\n\t\t\tui.Message(fmt.Sprintf(\"Private IP: %s\", *instance.PrivateIpAddress))\n\t\t}\n\t}\n\n\tstate.Put(\"instance\", instance)\n\n\treturn multistep.ActionContinue\n}\n\nfunc (s *StepRunSourceInstance) Cleanup(state multistep.StateBag) {\n\n\tec2conn := state.Get(\"ec2\").(*ec2.EC2)\n\tui := state.Get(\"ui\").(packer.Ui)\n\n\t\/\/ Cancel the spot request if it exists\n\tif s.spotRequest != nil {\n\t\tui.Say(\"Cancelling the spot request...\")\n\t\tinput := &ec2.CancelSpotInstanceRequestsInput{\n\t\t\tSpotInstanceRequestIds: []*string{s.spotRequest.SpotInstanceRequestId},\n\t\t}\n\t\tif _, err := ec2conn.CancelSpotInstanceRequests(input); err != nil {\n\t\t\tui.Error(fmt.Sprintf(\"Error cancelling the spot request, may still be around: %s\", err))\n\t\t\treturn\n\t\t}\n\t\tstateChange := StateChangeConf{\n\t\t\tPending: []string{\"active\", \"open\"},\n\t\t\tRefresh: SpotRequestStateRefreshFunc(ec2conn, *s.spotRequest.SpotInstanceRequestId),\n\t\t\tTarget: \"cancelled\",\n\t\t}\n\n\t\tWaitForState(&stateChange)\n\n\t}\n\n\t\/\/ Terminate the source instance if it exists\n\tif s.instanceId != \"\" {\n\t\tui.Say(\"Terminating the source AWS instance...\")\n\t\tif _, err := ec2conn.TerminateInstances(&ec2.TerminateInstancesInput{InstanceIds: []*string{&s.instanceId}}); err != nil {\n\t\t\tui.Error(fmt.Sprintf(\"Error terminating instance, may still be around: %s\", err))\n\t\t\treturn\n\t\t}\n\t\tstateChange := StateChangeConf{\n\t\t\tPending: []string{\"pending\", \"running\", \"shutting-down\", \"stopped\", \"stopping\"},\n\t\t\tRefresh: InstanceStateRefreshFunc(ec2conn, s.instanceId),\n\t\t\tTarget: \"terminated\",\n\t\t}\n\n\t\tWaitForState(&stateChange)\n\t}\n}\n\nfunc WaitUntilSecurityGroupExists(c *ec2.EC2, input *ec2.DescribeSecurityGroupsInput) error {\n\twaiterCfg := waiter.Config{\n\t\tOperation: \"DescribeSecurityGroups\",\n\t\tDelay: 15,\n\t\tMaxAttempts: 40,\n\t\tAcceptors: []waiter.WaitAcceptor{\n\t\t\t{\n\t\t\t\tState: \"success\",\n\t\t\t\tMatcher: \"path\",\n\t\t\t\tArgument: \"length(SecurityGroups[]) > `0`\",\n\t\t\t\tExpected: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tState: \"retry\",\n\t\t\t\tMatcher: \"error\",\n\t\t\t\tArgument: \"\",\n\t\t\t\tExpected: \"InvalidGroupID.NotFound\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tState: \"retry\",\n\t\t\t\tMatcher: \"error\",\n\t\t\t\tArgument: \"\",\n\t\t\t\tExpected: \"InvalidSecurityGroupID.NotFound\",\n\t\t\t},\n\t\t},\n\t}\n\n\tw := waiter.Waiter{\n\t\tClient: c,\n\t\tInput: input,\n\t\tConfig: waiterCfg,\n\t}\n\treturn w.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The goyy Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage sqls\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"text\/template\"\n\n\t\"gopkg.in\/goyy\/goyy.v0\/comm\/xtype\"\n\t\"gopkg.in\/goyy\/goyy.v0\/data\/dialect\"\n\t\"gopkg.in\/goyy\/goyy.v0\/util\/errors\"\n\t\"gopkg.in\/goyy\/goyy.v0\/util\/strings\"\n\t\"gopkg.in\/goyy\/goyy.v0\/util\/templates\"\n)\n\n\/\/ select ... from ... -> select count(*) from ...\nfunc ParseCountSql(sql string) string {\n\tstack := &xtype.Stack{}\n\tss := strings.Split(sql, \" \")\n\tp := 0\n\tfor _, v := range ss {\n\t\tif strings.Contains(strings.ToLower(v), \"select \") {\n\t\t\tp++\n\t\t\tstack.Push(p)\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(strings.ToLower(v), \" from \") {\n\t\t\tif stack.Len() == 1 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tstack.Pop()\n\t\t\tcontinue\n\t\t}\n\t}\n\tpfrom := strings.IndexOrdinal(strings.ToLower(sql), \" from \", p)\n\treturn \"select count(*) \" + sql[pfrom:]\n}\n\n\/\/ ParseNamedSql takes a query using named parameters and an argument and\n\/\/ returns a new query with a list of args that can be executed by a database.\nfunc ParseNamedSql(dia dialect.Interface, sql string, args map[string]interface{}) (sqlout string, argsout []interface{}, err error) {\n\tif dia == nil || strings.IsBlank(sql) || args == nil {\n\t\terr = errors.NewNotBlank(\"dia\/sql\/args\")\n\t\treturn\n\t}\n\tif !strings.Contains(sql, \"#{\") {\n\t\tsqlout = sql\n\t\targsout = make([]interface{}, 0)\n\t\treturn\n\t}\n\tsqls := strings.Betweens(sql, \"#{\", \"}\")\n\tif sqls != nil && len(sqls) > 0 {\n\t\ti := 0\n\t\tfor _, v := range sqls {\n\t\t\tif strings.IsNotBlank(v) {\n\t\t\t\tif dia.Type() == dialect.ORACLE {\n\t\t\t\t\tsql = strings.Replace(sql, \"#{\"+v+\"}\", fmt.Sprintf(\":%d\", i), -1)\n\t\t\t\t\ti++\n\t\t\t\t} else {\n\t\t\t\t\tsql = strings.Replace(sql, \"#{\"+v+\"}\", \"?\", -1)\n\t\t\t\t}\n\t\t\t\tif _, ok := args[v]; ok {\n\t\t\t\t\targsout = append(argsout, args[v])\n\t\t\t\t} else {\n\t\t\t\t\terr = errors.NewNotBlank(\"map[\" + v + \"]\")\n\t\t\t\t\tsqlout = sql\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tsqlout = sql\n\t}\n\treturn\n}\n\n\/\/ ParseTemplateSql takes a query using named parameters and an argument and\n\/\/ returns a new query with a list of args that can be executed by a database.\nfunc ParseTemplateSql(sql string, args map[string]interface{}) (out string, err error) {\n\tt, err := template.New(\"sqls-tmpl\").Funcs(templates.Text.FuncMap).Parse(sql)\n\tif err != nil {\n\t\tlogger.Error(err.Error())\n\t\treturn\n\t}\n\tvar v bytes.Buffer\n\terr = t.Execute(&v, args)\n\tif err != nil {\n\t\tlogger.Error(err.Error())\n\t\treturn\n\t}\n\tout = v.String()\n\treturn\n}\n<commit_msg>ParseCountSql : Solve the problem of analyzing from<commit_after>\/\/ Copyright 2014 The goyy Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage sqls\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"text\/template\"\n\n\t\"gopkg.in\/goyy\/goyy.v0\/comm\/xtype\"\n\t\"gopkg.in\/goyy\/goyy.v0\/data\/dialect\"\n\t\"gopkg.in\/goyy\/goyy.v0\/util\/errors\"\n\t\"gopkg.in\/goyy\/goyy.v0\/util\/strings\"\n\t\"gopkg.in\/goyy\/goyy.v0\/util\/templates\"\n)\n\n\/\/ select ... from ... -> select count(*) from ...\nfunc ParseCountSql(sql string) string {\n\tstack := &xtype.Stack{}\n\tss := strings.Split(sql, \" \")\n\tp := 0\n\tfor _, v := range ss {\n\t\tif strings.Contains(strings.ToLower(v), \"select\") {\n\t\t\tp++\n\t\t\tstack.Push(p)\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(strings.ToLower(v), \"from\") {\n\t\t\tif stack.Len() == 1 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tstack.Pop()\n\t\t\tcontinue\n\t\t}\n\t}\n\tpfrom := strings.IndexOrdinal(strings.ToLower(sql), \" from \", p)\n\treturn \"select count(*)\" + sql[pfrom:]\n}\n\n\/\/ ParseNamedSql takes a query using named parameters and an argument and\n\/\/ returns a new query with a list of args that can be executed by a database.\nfunc ParseNamedSql(dia dialect.Interface, sql string, args map[string]interface{}) (sqlout string, argsout []interface{}, err error) {\n\tif dia == nil || strings.IsBlank(sql) || args == nil {\n\t\terr = errors.NewNotBlank(\"dia\/sql\/args\")\n\t\treturn\n\t}\n\tif !strings.Contains(sql, \"#{\") {\n\t\tsqlout = sql\n\t\targsout = make([]interface{}, 0)\n\t\treturn\n\t}\n\tsqls := strings.Betweens(sql, \"#{\", \"}\")\n\tif sqls != nil && len(sqls) > 0 {\n\t\ti := 0\n\t\tfor _, v := range sqls {\n\t\t\tif strings.IsNotBlank(v) {\n\t\t\t\tif dia.Type() == dialect.ORACLE {\n\t\t\t\t\tsql = strings.Replace(sql, \"#{\"+v+\"}\", fmt.Sprintf(\":%d\", i), -1)\n\t\t\t\t\ti++\n\t\t\t\t} else {\n\t\t\t\t\tsql = strings.Replace(sql, \"#{\"+v+\"}\", \"?\", -1)\n\t\t\t\t}\n\t\t\t\tif _, ok := args[v]; ok {\n\t\t\t\t\targsout = append(argsout, args[v])\n\t\t\t\t} else {\n\t\t\t\t\terr = errors.NewNotBlank(\"map[\" + v + \"]\")\n\t\t\t\t\tsqlout = sql\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tsqlout = sql\n\t}\n\treturn\n}\n\n\/\/ ParseTemplateSql takes a query using named parameters and an argument and\n\/\/ returns a new query with a list of args that can be executed by a database.\nfunc ParseTemplateSql(sql string, args map[string]interface{}) (out string, err error) {\n\tt, err := template.New(\"sqls-tmpl\").Funcs(templates.Text.FuncMap).Parse(sql)\n\tif err != nil {\n\t\tlogger.Error(err.Error())\n\t\treturn\n\t}\n\tvar v bytes.Buffer\n\terr = t.Execute(&v, args)\n\tif err != nil {\n\t\tlogger.Error(err.Error())\n\t\treturn\n\t}\n\tout = v.String()\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package drawing\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n)\n\nvar (\n\t\/\/ ColorTransparent is a fully transparent color.\n\tColorTransparent = Color{}\n\n\t\/\/ ColorWhite is white.\n\tColorWhite = Color{R: 255, G: 255, B: 255, A: 255}\n\n\t\/\/ ColorBlack is black.\n\tColorBlack = Color{R: 0, G: 0, B: 0, A: 255}\n\n\t\/\/ ColorRed is red.\n\tColorRed = Color{R: 255, G: 0, B: 0, A: 255}\n\n\t\/\/ ColorGreen is green.\n\tColorGreen = Color{R: 0, G: 255, B: 0, A: 255}\n\n\t\/\/ ColorBlue is blue.\n\tColorBlue = Color{R: 0, G: 0, B: 255, A: 255}\n)\n\nfunc parseHex(hex string) uint8 {\n\tv, _ := strconv.ParseInt(hex, 16, 16)\n\treturn uint8(v)\n}\n\n\/\/ ColorFromHex returns a color from a css hex code.\nfunc ColorFromHex(hex string) Color {\n\tvar c Color\n\tif len(hex) == 3 {\n\t\tc.R = parseHex(string(hex[0])) * 0x11\n\t\tc.G = parseHex(string(hex[1])) * 0x11\n\t\tc.B = parseHex(string(hex[2])) * 0x11\n\t} else {\n\t\tc.R = parseHex(string(hex[0:2]))\n\t\tc.G = parseHex(string(hex[2:4]))\n\t\tc.B = parseHex(string(hex[4:6]))\n\t}\n\tc.A = 255\n\treturn c\n}\n\n\/\/ Color is our internal color type because color.Color is bullshit.\ntype Color struct {\n\tR uint8\n\tG uint8\n\tB uint8\n\tA uint8\n}\n\n\/\/ RGBA returns the color as a pre-alpha mixed color set.\nfunc (c Color) RGBA() (r, g, b, a uint32) {\n\tfa := float64(c.A) \/ 255.0\n\tr = uint32(float64(uint32(c.R)) * fa)\n\tr |= r << 8\n\tg = uint32(float64(uint32(c.G)) * fa)\n\tg |= g << 8\n\tb = uint32(float64(uint32(c.B)) * fa)\n\tb |= b << 8\n\ta = uint32(c.A)\n\ta |= a << 8\n\treturn\n}\n\n\/\/ IsZero returns if the color has been set or not.\nfunc (c Color) IsZero() bool {\n\treturn c.R == 0 && c.G == 0 && c.B == 0 && c.A == 0\n}\n\n\/\/ IsTransparent returns if the colors alpha channel is zero.\nfunc (c Color) IsTransparent() bool {\n\treturn c.A == 0\n}\n\n\/\/ String returns a css string representation of the color.\nfunc (c Color) String() string {\n\tfa := float64(c.A) \/ float64(255)\n\treturn fmt.Sprintf(\"rgba(%v,%v,%v,%.1f)\", c.R, c.G, c.B, fa)\n}\n<commit_msg>with alpha.<commit_after>package drawing\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n)\n\nvar (\n\t\/\/ ColorTransparent is a fully transparent color.\n\tColorTransparent = Color{}\n\n\t\/\/ ColorWhite is white.\n\tColorWhite = Color{R: 255, G: 255, B: 255, A: 255}\n\n\t\/\/ ColorBlack is black.\n\tColorBlack = Color{R: 0, G: 0, B: 0, A: 255}\n\n\t\/\/ ColorRed is red.\n\tColorRed = Color{R: 255, G: 0, B: 0, A: 255}\n\n\t\/\/ ColorGreen is green.\n\tColorGreen = Color{R: 0, G: 255, B: 0, A: 255}\n\n\t\/\/ ColorBlue is blue.\n\tColorBlue = Color{R: 0, G: 0, B: 255, A: 255}\n)\n\nfunc parseHex(hex string) uint8 {\n\tv, _ := strconv.ParseInt(hex, 16, 16)\n\treturn uint8(v)\n}\n\n\/\/ ColorFromHex returns a color from a css hex code.\nfunc ColorFromHex(hex string) Color {\n\tvar c Color\n\tif len(hex) == 3 {\n\t\tc.R = parseHex(string(hex[0])) * 0x11\n\t\tc.G = parseHex(string(hex[1])) * 0x11\n\t\tc.B = parseHex(string(hex[2])) * 0x11\n\t} else {\n\t\tc.R = parseHex(string(hex[0:2]))\n\t\tc.G = parseHex(string(hex[2:4]))\n\t\tc.B = parseHex(string(hex[4:6]))\n\t}\n\tc.A = 255\n\treturn c\n}\n\n\/\/ Color is our internal color type because color.Color is bullshit.\ntype Color struct {\n\tR uint8\n\tG uint8\n\tB uint8\n\tA uint8\n}\n\n\/\/ RGBA returns the color as a pre-alpha mixed color set.\nfunc (c Color) RGBA() (r, g, b, a uint32) {\n\tfa := float64(c.A) \/ 255.0\n\tr = uint32(float64(uint32(c.R)) * fa)\n\tr |= r << 8\n\tg = uint32(float64(uint32(c.G)) * fa)\n\tg |= g << 8\n\tb = uint32(float64(uint32(c.B)) * fa)\n\tb |= b << 8\n\ta = uint32(c.A)\n\ta |= a << 8\n\treturn\n}\n\n\/\/ IsZero returns if the color has been set or not.\nfunc (c Color) IsZero() bool {\n\treturn c.R == 0 && c.G == 0 && c.B == 0 && c.A == 0\n}\n\n\/\/ IsTransparent returns if the colors alpha channel is zero.\nfunc (c Color) IsTransparent() bool {\n\treturn c.A == 0\n}\n\n\/\/ WithAlpha returns a copy of the color with a given alpha.\nfunc (c Color) WithAlpha(a uint8) Color {\n\treturn Color{\n\t\tR: c.R,\n\t\tG: c.G,\n\t\tB: c.B,\n\t\tA: a,\n\t}\n}\n\n\/\/ String returns a css string representation of the color.\nfunc (c Color) String() string {\n\tfa := float64(c.A) \/ float64(255)\n\treturn fmt.Sprintf(\"rgba(%v,%v,%v,%.1f)\", c.R, c.G, c.B, fa)\n}\n<|endoftext|>"} {"text":"<commit_before>package docker\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"log\"\n\t\"net\/http\"\n\t\"regexp\"\n)\n\nvar logWebsocketRequest = regexp.MustCompile(`\/containers\/([^\/]+)\/logs`)\nvar hostCheck = regexp.MustCompile(`vibioh\\.fr$`)\n\nvar upgrader = websocket.Upgrader{\n\tReadBufferSize: 1024,\n\tWriteBufferSize: 1024,\n\tCheckOrigin: func(r *http.Request) bool {\n\t\treturn hostCheck.MatchString(r.Host)\n\t},\n}\n\nfunc logsContainerWebsocketHandler(w http.ResponseWriter, r *http.Request, containerID []byte) {\n\tlogs, err := docker.ContainerLogs(context.Background(), string(containerID), types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t}\n\n\tdefer logs.Close()\n\n\tws, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t}\n\n\tdefer ws.Close()\n\n\tscanner := bufio.NewScanner(logs)\n\n\tfor scanner.Scan() {\n\t\tline := scanner.Bytes()\n\t\tlog.Print(string(line[8:]))\n\n\t\tif err = ws.WriteMessage(websocket.TextMessage, line[8:]); err != nil {\n\t\t\tlog.Print(err)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc handleWebsocket(w http.ResponseWriter, r *http.Request) {\n\turlPath := []byte(r.URL.Path)\n\n\tif logWebsocketRequest.Match(urlPath) {\n\t\tlogsContainerWebsocketHandler(w, r, logWebsocketRequest.FindSubmatch(urlPath)[1])\n\t}\n}\n<commit_msg>Adding follow<commit_after>package docker\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"log\"\n\t\"net\/http\"\n\t\"regexp\"\n)\n\nvar logWebsocketRequest = regexp.MustCompile(`\/containers\/([^\/]+)\/logs`)\nvar hostCheck = regexp.MustCompile(`vibioh\\.fr$`)\n\nvar upgrader = websocket.Upgrader{\n\tReadBufferSize: 1024,\n\tWriteBufferSize: 1024,\n\tCheckOrigin: func(r *http.Request) bool {\n\t\treturn hostCheck.MatchString(r.Host)\n\t},\n}\n\nfunc logsContainerWebsocketHandler(w http.ResponseWriter, r *http.Request, containerID []byte) {\n\tlogs, err := docker.ContainerLogs(context.Background(), string(containerID), types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true, Follow: true})\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t}\n\n\tdefer logs.Close()\n\n\tws, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t}\n\n\tdefer ws.Close()\n\n\tscanner := bufio.NewScanner(logs)\n\n\tfor scanner.Scan() {\n\t\tif err = ws.WriteMessage(websocket.TextMessage, scanner.Bytes()[8:]); err != nil {\n\t\t\tlog.Print(err)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc handleWebsocket(w http.ResponseWriter, r *http.Request) {\n\turlPath := []byte(r.URL.Path)\n\n\tif logWebsocketRequest.Match(urlPath) {\n\t\tlogsContainerWebsocketHandler(w, r, logWebsocketRequest.FindSubmatch(urlPath)[1])\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package drivefs\n\nimport (\n\t\"bazil.org\/fuse\"\n\t\"bazil.org\/fuse\/fs\"\n\tdrive \"code.google.com\/p\/google-api-go-client\/drive\/v2\"\n\t\"log\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\n\/\/ DriveDir represents a directory in google drive\ntype DriveDir struct {\n\tDir *drive.File\n\tModified time.Time\n\tCreated time.Time\n}\n\n\/\/ Attr returns the file attributes\nfunc (DriveDir) Attr() fuse.Attr {\n\treturn fuse.Attr{\n\t\tMode: 0644,\n\t}\n}\n\n\/\/ TODO implement create function to actually create file\nfunc (DriveDir) Create(req *fuse.CreateRequest, res *fuse.CreateResponse, intr fs.Intr) (fs.Node, fs.Handle, fuse.Error) {\n\treturn nil, nil, fuse.Errno(syscall.EROFS)\n}\n\n\/\/ TODO implement fsync function to actually perform an fsync\nfunc (DriveDir) Fsync(req *fuse.FsyncRequest, intr fs.Intr) fuse.Error {\n\treturn fuse.Errno(syscall.EROFS)\n}\n\n\/\/ TODO implement link function to actually perform a link\nfunc (DriveDir) Link(req *fuse.LinkRequest, node fs.Node, intr fs.Intr) (fs.Node, fuse.Error) {\n\treturn nil, fuse.Errno(syscall.EROFS)\n}\n\n\/\/ Lookup scans the current directory for matching files or directories\nfunc (d *DriveDir) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {\n\t\/\/ Lookup dir by name\n\tif dir, ok := nameToDir[name]; ok {\n\t\treturn dir, nil\n\t}\n\n\t\/\/ Lookup file by name\n\tif file, ok := nameToDir[name]; ok {\n\t\treturn file, nil\n\t}\n\n\t\/\/ File not found\n\treturn nil, fuse.ENOENT\n}\n\n\/\/ ReadDir return a slice of directory entries\nfunc (d *DriveDir) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {\n\tdirChan := make(chan *[]fuse.Dirent)\n\tgo func() {\n\t\t\/\/ List of directories to return\n\t\tvar dirs []fuse.Dirent\n\t\t\/\/ get all new list of files\n\t\tf, err := service.Files.List().Do()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\tfileList := f.Items\n\t\t\/\/ Populate idToFile with new ids\n\t\tfor i := range fileList {\n\t\t\tidToFile[fileList[i].Id] = DriveFile{File: fileList[i]}\n\t\t}\n\t\t\/\/ get list of children\n\t\tc, err := service.Children.List(d.Dir.Id).Do()\n\t\t\/\/ Get children of this folder\n\t\tchildren := c.Items\n\n\t\tdirs = make([]fuse.Dirent, len(children))\n\n\t\t\/\/ populate dirs with children\n\t\tfor i := range children {\n\t\t\t\/\/ pull out a child temporarally\n\t\t\ttmp := idToFile[children[i].Id]\n\t\t\t\/\/ If child is a folder\/directory create a DirveDir else create a DriveFile\n\t\t\tif strings.Contains(tmp.File.MimeType, \"folder\") {\n\t\t\t\tdirs[i] = fuse.Dirent{\n\t\t\t\t\tName: tmp.File.Title,\n\t\t\t\t\tType: fuse.DT_Dir,\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tdirs[i] = fuse.Dirent{\n\t\t\t\t\tName: tmp.File.Title,\n\t\t\t\t\tType: fuse.DT_File,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdirChan <- &dirs\n\t}()\n\t\/\/ Wait for the lookups to be done, or die if interupt happens\n\tselect {\n\tcase tmp := <-dirChan:\n\t\treturn *tmp, nil\n\tcase <-intr:\n\t\treturn nil, fuse.EINTR\n\t}\n\n}\n\n\/\/ Mkdir does nothing, because drivefs is read-only\nfunc (d *DriveDir) Mkdir(req *fuse.MkdirRequest, intr fs.Intr) (fs.Node, fuse.Error) {\n\treturn nil, fuse.Errno(syscall.EROFS)\n}\n\n\/\/ Mknod does nothing, because drivefs is read-only\nfunc (d *DriveDir) Mknod(req *fuse.MknodRequest, intr fs.Intr) (fs.Node, fuse.Error) {\n\treturn nil, fuse.Errno(syscall.EROFS)\n}\n\n\/\/ Remove does nothing, because drivefs is read-only\nfunc (d *DriveDir) Remove(req *fuse.RemoveRequest, intr fs.Intr) fuse.Error {\n\treturn fuse.Errno(syscall.EROFS)\n}\n\n\/\/ Removexattr does nothing, because drivefs is read-only\nfunc (d *DriveDir) Removexattr(req *fuse.RemovexattrRequest, intr fs.Intr) fuse.Error {\n\treturn fuse.Errno(syscall.EROFS)\n}\n\n\/\/ Rename does nothing, because drivefs is read-only\nfunc (d *DriveDir) Rename(req *fuse.RenameRequest, node fs.Node, intr fs.Intr) fuse.Error {\n\treturn fuse.Errno(syscall.EROFS)\n}\n\n\/\/ Setattr does nothing, because drivefs is read-only\nfunc (d *DriveDir) Setattr(req *fuse.SetattrRequest, res *fuse.SetattrResponse, intr fs.Intr) fuse.Error {\n\treturn fuse.Errno(syscall.EROFS)\n}\n\n\/\/ Setxattr does nothing, because drivefs is read-only\nfunc (d *DriveDir) Setxattr(req *fuse.SetxattrRequest, intr fs.Intr) fuse.Error {\n\treturn fuse.Errno(syscall.EROFS)\n}\n\n\/\/ Symlink does nothing, because drivefs is read-only\nfunc (d *DriveDir) Symlink(req *fuse.SymlinkRequest, intr fs.Intr) (fs.Node, fuse.Error) {\n\treturn nil, fuse.Errno(syscall.EROFS)\n}\n<commit_msg>added root detection<commit_after>package drivefs\n\nimport (\n\t\"bazil.org\/fuse\"\n\t\"bazil.org\/fuse\/fs\"\n\tdrive \"code.google.com\/p\/google-api-go-client\/drive\/v2\"\n\t\"log\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\n\/\/ DriveDir represents a directory in google drive\ntype DriveDir struct {\n\tDir *drive.File\n\tModified time.Time\n\tCreated time.Time\n\tRoot bool\n}\n\n\/\/ Attr returns the file attributes\nfunc (DriveDir) Attr() fuse.Attr {\n\treturn fuse.Attr{\n\t\tMode: 0644,\n\t}\n}\n\n\/\/ TODO implement create function to actually create file\nfunc (DriveDir) Create(req *fuse.CreateRequest, res *fuse.CreateResponse, intr fs.Intr) (fs.Node, fs.Handle, fuse.Error) {\n\treturn nil, nil, fuse.Errno(syscall.EROFS)\n}\n\n\/\/ TODO implement fsync function to actually perform an fsync\nfunc (DriveDir) Fsync(req *fuse.FsyncRequest, intr fs.Intr) fuse.Error {\n\treturn fuse.Errno(syscall.EROFS)\n}\n\n\/\/ TODO implement link function to actually perform a link\nfunc (DriveDir) Link(req *fuse.LinkRequest, node fs.Node, intr fs.Intr) (fs.Node, fuse.Error) {\n\treturn nil, fuse.Errno(syscall.EROFS)\n}\n\n\/\/ Lookup scans the current directory for matching files or directories\nfunc (d *DriveDir) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {\n\t\/\/ Lookup dir by name\n\tif dir, ok := nameToDir[name]; ok {\n\t\treturn dir, nil\n\t}\n\n\t\/\/ Lookup file by name\n\tif file, ok := nameToDir[name]; ok {\n\t\treturn file, nil\n\t}\n\n\t\/\/ File not found\n\treturn nil, fuse.ENOENT\n}\n\n\/\/ ReadDir return a slice of directory entries\nfunc (d *DriveDir) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {\n\tdirChan := make(chan *[]fuse.Dirent)\n\tgo func() {\n\t\t\/\/ List of directories to return\n\t\tvar dirs []fuse.Dirent\n\t\t\/\/ get all new list of files\n\t\tf, err := service.Files.List().Do()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\tfileList := f.Items\n\t\t\/\/ Populate idToFile with new ids\n\t\tfor i := range fileList {\n\t\t\tidToFile[fileList[i].Id] = DriveFile{File: fileList[i]}\n\t\t}\n\t\t\/\/ get list of children\n\t\t\/\/ If d is at root, fetch the root children, else fetch this file's children\n\t\tif d.Root {\n\t\t\tc, err := service.Children.List(\"root\").Do()\n\t\t} else {\n\t\t\tc, err := service.Children.List(d.Dir.Id).Do()\n\t\t}\n\n\t\t\/\/ Get children of this folder\n\t\tchildren := c.Items\\\n\n\t\tdirs = make([]fuse.Dirent, len(children))\n\n\t\t\/\/ populate dirs with children\n\t\tfor i := range children {\n\t\t\t\/\/ pull out a child temporarally\n\t\t\ttmp := idToFile[children[i].Id]\n\t\t\t\/\/ If child is a folder\/directory create a DirveDir else create a DriveFile\n\t\t\tif strings.Contains(tmp.File.MimeType, \"folder\") {\n\t\t\t\tdirs[i] = fuse.Dirent{\n\t\t\t\t\tName: tmp.File.Title,\n\t\t\t\t\tType: fuse.DT_Dir,\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tdirs[i] = fuse.Dirent{\n\t\t\t\t\tName: tmp.File.Title,\n\t\t\t\t\tType: fuse.DT_File,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdirChan <- &dirs\n\t}()\n\t\/\/ Wait for the lookups to be done, or die if interupt happens\n\tselect {\n\tcase tmp := <-dirChan:\n\t\treturn *tmp, nil\n\tcase <-intr:\n\t\treturn nil, fuse.EINTR\n\t}\n\n}\n\n\/\/ Mkdir does nothing, because drivefs is read-only\nfunc (d *DriveDir) Mkdir(req *fuse.MkdirRequest, intr fs.Intr) (fs.Node, fuse.Error) {\n\treturn nil, fuse.Errno(syscall.EROFS)\n}\n\n\/\/ Mknod does nothing, because drivefs is read-only\nfunc (d *DriveDir) Mknod(req *fuse.MknodRequest, intr fs.Intr) (fs.Node, fuse.Error) {\n\treturn nil, fuse.Errno(syscall.EROFS)\n}\n\n\/\/ Remove does nothing, because drivefs is read-only\nfunc (d *DriveDir) Remove(req *fuse.RemoveRequest, intr fs.Intr) fuse.Error {\n\treturn fuse.Errno(syscall.EROFS)\n}\n\n\/\/ Removexattr does nothing, because drivefs is read-only\nfunc (d *DriveDir) Removexattr(req *fuse.RemovexattrRequest, intr fs.Intr) fuse.Error {\n\treturn fuse.Errno(syscall.EROFS)\n}\n\n\/\/ Rename does nothing, because drivefs is read-only\nfunc (d *DriveDir) Rename(req *fuse.RenameRequest, node fs.Node, intr fs.Intr) fuse.Error {\n\treturn fuse.Errno(syscall.EROFS)\n}\n\n\/\/ Setattr does nothing, because drivefs is read-only\nfunc (d *DriveDir) Setattr(req *fuse.SetattrRequest, res *fuse.SetattrResponse, intr fs.Intr) fuse.Error {\n\treturn fuse.Errno(syscall.EROFS)\n}\n\n\/\/ Setxattr does nothing, because drivefs is read-only\nfunc (d *DriveDir) Setxattr(req *fuse.SetxattrRequest, intr fs.Intr) fuse.Error {\n\treturn fuse.Errno(syscall.EROFS)\n}\n\n\/\/ Symlink does nothing, because drivefs is read-only\nfunc (d *DriveDir) Symlink(req *fuse.SymlinkRequest, intr fs.Intr) (fs.Node, fuse.Error) {\n\treturn nil, fuse.Errno(syscall.EROFS)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"testing\"\n\t\"time\"\n\t\"unicode\/utf8\"\n)\n\nfunc TestExactly16Bytes(t *testing.T) {\n\tvar tests = []string{\n\t\t\"\",\n\t\t\"a\",\n\t\t\"日本語\",\n\t\t\"1234567890123456\",\n\t\t\"12345678901234567890\",\n\t\t\"1234567890123本語4567890\",\n\t\t\"12345678901234日本語567890\",\n\t\t\"123456789012345日本語67890\",\n\t\t\"1234567890123456日本語7890\",\n\t\t\"1234567890123456日本語7日本語890\",\n\t}\n\tfor _, str := range tests {\n\t\tgot := exactly16Bytes(str)\n\t\tif len(got) != 16 {\n\t\t\tt.Errorf(\"exactly16Bytes(%q) is %q, length %d\", str, got, len(got))\n\t\t}\n\t\t\/\/ Make sure it is full runes.\n\t\tfor _, c := range got {\n\t\t\tif c == utf8.RuneError {\n\t\t\t\tt.Errorf(\"exactly16Bytes(%q) is %q, has partial rune\", str, got)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ tmpDir creates a temporary directory and returns its name.\nfunc tmpDir(t *testing.T) string {\n\tname, err := ioutil.TempDir(\"\", \"pack\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn name\n}\n\n\/\/ Test that we can create an archive, write to it, and get the same contents back.\n\/\/ Tests the rv and then the pv command on a new archive.\nfunc TestCreate(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\tname := filepath.Join(dir, \"pack.a\")\n\tar := archive(name, os.O_RDWR, nil)\n\t\/\/ Add an entry by hand.\n\tar.addFile(helloFile.Reset())\n\tar.fd.Close()\n\t\/\/ Now check it.\n\tar = archive(name, os.O_RDONLY, []string{helloFile.name})\n\tvar buf bytes.Buffer\n\tstdout = &buf\n\tverbose = true\n\tdefer func() {\n\t\tstdout = os.Stdout\n\t\tverbose = false\n\t}()\n\tar.scan(ar.printContents)\n\tar.fd.Close()\n\tresult := buf.String()\n\t\/\/ Expect verbose output plus file contents.\n\texpect := fmt.Sprintf(\"%s\\n%s\", helloFile.name, helloFile.contents)\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n}\n\n\/\/ Test that we can create an archive, put some files in it, and get back a correct listing.\n\/\/ Tests the tv command.\nfunc TestTableOfContents(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\tname := filepath.Join(dir, \"pack.a\")\n\tar := archive(name, os.O_RDWR, nil)\n\n\t\/\/ Add some entries by hand.\n\tar.addFile(helloFile.Reset())\n\tar.addFile(goodbyeFile.Reset())\n\tar.fd.Close()\n\n\t\/\/ Now print it.\n\tar = archive(name, os.O_RDONLY, nil)\n\tvar buf bytes.Buffer\n\tstdout = &buf\n\tverbose = true\n\tdefer func() {\n\t\tstdout = os.Stdout\n\t\tverbose = false\n\t}()\n\tar.scan(ar.tableOfContents)\n\tar.fd.Close()\n\tresult := buf.String()\n\t\/\/ Expect verbose listing.\n\texpect := fmt.Sprintf(\"%s\\n%s\\n\", helloFile.Entry(), goodbyeFile.Entry())\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n\n\t\/\/ Do it again without verbose.\n\tverbose = false\n\tbuf.Reset()\n\tar = archive(name, os.O_RDONLY, nil)\n\tar.scan(ar.tableOfContents)\n\tar.fd.Close()\n\tresult = buf.String()\n\t\/\/ Expect non-verbose listing.\n\texpect = fmt.Sprintf(\"%s\\n%s\\n\", helloFile.name, goodbyeFile.name)\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n\n\t\/\/ Do it again with file list arguments.\n\tverbose = false\n\tbuf.Reset()\n\tar = archive(name, os.O_RDONLY, []string{helloFile.name})\n\tar.scan(ar.tableOfContents)\n\tar.fd.Close()\n\tresult = buf.String()\n\t\/\/ Expect only helloFile.\n\texpect = fmt.Sprintf(\"%s\\n\", helloFile.name)\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n}\n\n\/\/ Test that we can create an archive, put some files in it, and get back a file.\n\/\/ Tests the x command.\nfunc TestExtract(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\tname := filepath.Join(dir, \"pack.a\")\n\tar := archive(name, os.O_RDWR, nil)\n\t\/\/ Add some entries by hand.\n\tar.addFile(helloFile.Reset())\n\tar.addFile(goodbyeFile.Reset())\n\tar.fd.Close()\n\t\/\/ Now extract one file. We chdir to the directory of the archive for simplicity.\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tt.Fatal(\"os.Getwd: \", err)\n\t}\n\terr = os.Chdir(dir)\n\tif err != nil {\n\t\tt.Fatal(\"os.Chdir: \", err)\n\t}\n\tdefer func() {\n\t\terr := os.Chdir(pwd)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"os.Chdir: \", err)\n\t\t}\n\t}()\n\tar = archive(name, os.O_RDONLY, []string{goodbyeFile.name})\n\tar.scan(ar.extractContents)\n\tar.fd.Close()\n\tdata, err := ioutil.ReadFile(goodbyeFile.name)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Expect contents of file.\n\tresult := string(data)\n\texpect := goodbyeFile.contents\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n}\n\n\/\/ Test that pack-created archives can be understood by the tools.\nfunc TestHello(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\thello := filepath.Join(dir, \"hello.go\")\n\tprog := `\n\t\tpackage main\n\t\tfunc main() {\n\t\t\tprintln(\"hello world\")\n\t\t}\n\t`\n\terr := ioutil.WriteFile(hello, []byte(prog), 0666)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tchar := findChar(t, dir)\n\n\trun := func(args ...string) string {\n\t\treturn doRun(t, dir, args...)\n\t}\n\n\trun(\"go\", \"build\", \"cmd\/pack\") \/\/ writes pack binary to dir\n\trun(\"go\", \"tool\", char+\"g\", \"hello.go\")\n\trun(\".\/pack\", \"grc\", \"hello.a\", \"hello.\"+char)\n\trun(\"go\", \"tool\", char+\"l\", \"-o\", \"a.out\", \"hello.a\")\n\tout := run(\".\/a.out\")\n\tif out != \"hello world\\n\" {\n\t\tt.Fatal(\"incorrect output: %q, want %q\", out, \"hello world\\n\")\n\t}\n}\n\n\/\/ Test that pack works with very long lines in PKGDEF.\nfunc TestLargeDefs(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\tlarge := filepath.Join(dir, \"large.go\")\n\tf, err := os.Create(large)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tprintf := func(format string, args ...interface{}) {\n\t\t_, err := fmt.Fprintf(f, format, args...)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Writing to %s: %v\", large, err)\n\t\t}\n\t}\n\n\tprintf(\"package large\\n\\ntype T struct {\\n\")\n\tfor i := 0; i < 10000; i++ {\n\t\tprintf(\"f%d int `tag:\\\"\", i)\n\t\tfor j := 0; j < 100; j++ {\n\t\t\tprintf(\"t%d=%d,\", j, j)\n\t\t}\n\t\tprintf(\"\\\"`\\n\")\n\t}\n\tprintf(\"}\\n\")\n\tif err = f.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tmain := filepath.Join(dir, \"main.go\")\n\tprog := `\n\t\tpackage main\n\t\timport \".\/large\"\n\t\tvar V large.T\n\t\tfunc main() {\n\t\t\tprintln(\"ok\")\n\t\t}\n\t`\n\terr = ioutil.WriteFile(main, []byte(prog), 0666)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tchar := findChar(t, dir)\n\n\trun := func(args ...string) string {\n\t\treturn doRun(t, dir, args...)\n\t}\n\n\trun(\"go\", \"build\", \"cmd\/pack\") \/\/ writes pack binary to dir\n\trun(\"go\", \"tool\", char+\"g\", \"large.go\")\n\trun(\".\/pack\", \"grc\", \"large.a\", \"large.\"+char)\n\trun(\"go\", \"tool\", char+\"g\", \"main.go\")\n\trun(\"go\", \"tool\", char+\"l\", \"-o\", \"a.out\", \"main.\"+char)\n\tout := run(\".\/a.out\")\n\tif out != \"ok\\n\" {\n\t\tt.Fatal(\"incorrect output: %q, want %q\", out, \"ok\\n\")\n\t}\n}\n\n\/\/ doRun runs a program in a directory and returns the output.\nfunc doRun(t *testing.T, dir string, args ...string) string {\n\tcmd := exec.Command(args[0], args[1:]...)\n\tcmd.Dir = dir\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"%v: %v\\n%s\", args, err, string(out))\n\t}\n\treturn string(out)\n}\n\n\/\/ findChar returns the architecture character for the go command.\nfunc findChar(t *testing.T, dir string) string {\n\tout := doRun(t, dir, \"go\", \"env\")\n\tre, err := regexp.Compile(`\\s*GOCHAR=['\"]?(\\w)['\"]?`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfields := re.FindStringSubmatch(out)\n\tif fields == nil {\n\t\tt.Fatal(\"cannot find GOCHAR in 'go env' output:\\n\", out)\n\t}\n\treturn fields[1]\n}\n\n\/\/ Fake implementation of files.\n\nvar helloFile = &FakeFile{\n\tname: \"hello\",\n\tcontents: \"hello world\", \/\/ 11 bytes, an odd number.\n\tmode: 0644,\n}\n\nvar goodbyeFile = &FakeFile{\n\tname: \"goodbye\",\n\tcontents: \"Sayonara, Jim\", \/\/ 13 bytes, another odd number.\n\tmode: 0644,\n}\n\n\/\/ FakeFile implements FileLike and also os.FileInfo.\ntype FakeFile struct {\n\tname string\n\tcontents string\n\tmode os.FileMode\n\toffset int\n}\n\n\/\/ Reset prepares a FakeFile for reuse.\nfunc (f *FakeFile) Reset() *FakeFile {\n\tf.offset = 0\n\treturn f\n}\n\n\/\/ FileLike methods.\n\nfunc (f *FakeFile) Name() string {\n\t\/\/ A bit of a cheat: we only have a basename, so that's also ok for FileInfo.\n\treturn f.name\n}\n\nfunc (f *FakeFile) Stat() (os.FileInfo, error) {\n\treturn f, nil\n}\n\nfunc (f *FakeFile) Read(p []byte) (int, error) {\n\tif f.offset >= len(f.contents) {\n\t\treturn 0, io.EOF\n\t}\n\tn := copy(p, f.contents[f.offset:])\n\tf.offset += n\n\treturn n, nil\n}\n\nfunc (f *FakeFile) Close() error {\n\treturn nil\n}\n\n\/\/ os.FileInfo methods.\n\nfunc (f *FakeFile) Size() int64 {\n\treturn int64(len(f.contents))\n}\n\nfunc (f *FakeFile) Mode() os.FileMode {\n\treturn f.mode\n}\n\nfunc (f *FakeFile) ModTime() time.Time {\n\treturn time.Time{}\n}\n\nfunc (f *FakeFile) IsDir() bool {\n\treturn false\n}\n\nfunc (f *FakeFile) Sys() interface{} {\n\treturn nil\n}\n\n\/\/ Special helpers.\n\nfunc (f *FakeFile) Entry() *Entry {\n\treturn &Entry{\n\t\tname: f.name,\n\t\tmtime: 0, \/\/ Defined to be zero.\n\t\tuid: 0, \/\/ Ditto.\n\t\tgid: 0, \/\/ Ditto.\n\t\tmode: f.mode,\n\t\tsize: int64(len(f.contents)),\n\t}\n}\n<commit_msg>cmd\/pack: avoid .\/ import in test (fix Windows build)<commit_after>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"testing\"\n\t\"time\"\n\t\"unicode\/utf8\"\n)\n\nfunc TestExactly16Bytes(t *testing.T) {\n\tvar tests = []string{\n\t\t\"\",\n\t\t\"a\",\n\t\t\"日本語\",\n\t\t\"1234567890123456\",\n\t\t\"12345678901234567890\",\n\t\t\"1234567890123本語4567890\",\n\t\t\"12345678901234日本語567890\",\n\t\t\"123456789012345日本語67890\",\n\t\t\"1234567890123456日本語7890\",\n\t\t\"1234567890123456日本語7日本語890\",\n\t}\n\tfor _, str := range tests {\n\t\tgot := exactly16Bytes(str)\n\t\tif len(got) != 16 {\n\t\t\tt.Errorf(\"exactly16Bytes(%q) is %q, length %d\", str, got, len(got))\n\t\t}\n\t\t\/\/ Make sure it is full runes.\n\t\tfor _, c := range got {\n\t\t\tif c == utf8.RuneError {\n\t\t\t\tt.Errorf(\"exactly16Bytes(%q) is %q, has partial rune\", str, got)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ tmpDir creates a temporary directory and returns its name.\nfunc tmpDir(t *testing.T) string {\n\tname, err := ioutil.TempDir(\"\", \"pack\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn name\n}\n\n\/\/ Test that we can create an archive, write to it, and get the same contents back.\n\/\/ Tests the rv and then the pv command on a new archive.\nfunc TestCreate(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\tname := filepath.Join(dir, \"pack.a\")\n\tar := archive(name, os.O_RDWR, nil)\n\t\/\/ Add an entry by hand.\n\tar.addFile(helloFile.Reset())\n\tar.fd.Close()\n\t\/\/ Now check it.\n\tar = archive(name, os.O_RDONLY, []string{helloFile.name})\n\tvar buf bytes.Buffer\n\tstdout = &buf\n\tverbose = true\n\tdefer func() {\n\t\tstdout = os.Stdout\n\t\tverbose = false\n\t}()\n\tar.scan(ar.printContents)\n\tar.fd.Close()\n\tresult := buf.String()\n\t\/\/ Expect verbose output plus file contents.\n\texpect := fmt.Sprintf(\"%s\\n%s\", helloFile.name, helloFile.contents)\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n}\n\n\/\/ Test that we can create an archive, put some files in it, and get back a correct listing.\n\/\/ Tests the tv command.\nfunc TestTableOfContents(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\tname := filepath.Join(dir, \"pack.a\")\n\tar := archive(name, os.O_RDWR, nil)\n\n\t\/\/ Add some entries by hand.\n\tar.addFile(helloFile.Reset())\n\tar.addFile(goodbyeFile.Reset())\n\tar.fd.Close()\n\n\t\/\/ Now print it.\n\tar = archive(name, os.O_RDONLY, nil)\n\tvar buf bytes.Buffer\n\tstdout = &buf\n\tverbose = true\n\tdefer func() {\n\t\tstdout = os.Stdout\n\t\tverbose = false\n\t}()\n\tar.scan(ar.tableOfContents)\n\tar.fd.Close()\n\tresult := buf.String()\n\t\/\/ Expect verbose listing.\n\texpect := fmt.Sprintf(\"%s\\n%s\\n\", helloFile.Entry(), goodbyeFile.Entry())\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n\n\t\/\/ Do it again without verbose.\n\tverbose = false\n\tbuf.Reset()\n\tar = archive(name, os.O_RDONLY, nil)\n\tar.scan(ar.tableOfContents)\n\tar.fd.Close()\n\tresult = buf.String()\n\t\/\/ Expect non-verbose listing.\n\texpect = fmt.Sprintf(\"%s\\n%s\\n\", helloFile.name, goodbyeFile.name)\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n\n\t\/\/ Do it again with file list arguments.\n\tverbose = false\n\tbuf.Reset()\n\tar = archive(name, os.O_RDONLY, []string{helloFile.name})\n\tar.scan(ar.tableOfContents)\n\tar.fd.Close()\n\tresult = buf.String()\n\t\/\/ Expect only helloFile.\n\texpect = fmt.Sprintf(\"%s\\n\", helloFile.name)\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n}\n\n\/\/ Test that we can create an archive, put some files in it, and get back a file.\n\/\/ Tests the x command.\nfunc TestExtract(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\tname := filepath.Join(dir, \"pack.a\")\n\tar := archive(name, os.O_RDWR, nil)\n\t\/\/ Add some entries by hand.\n\tar.addFile(helloFile.Reset())\n\tar.addFile(goodbyeFile.Reset())\n\tar.fd.Close()\n\t\/\/ Now extract one file. We chdir to the directory of the archive for simplicity.\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tt.Fatal(\"os.Getwd: \", err)\n\t}\n\terr = os.Chdir(dir)\n\tif err != nil {\n\t\tt.Fatal(\"os.Chdir: \", err)\n\t}\n\tdefer func() {\n\t\terr := os.Chdir(pwd)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"os.Chdir: \", err)\n\t\t}\n\t}()\n\tar = archive(name, os.O_RDONLY, []string{goodbyeFile.name})\n\tar.scan(ar.extractContents)\n\tar.fd.Close()\n\tdata, err := ioutil.ReadFile(goodbyeFile.name)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Expect contents of file.\n\tresult := string(data)\n\texpect := goodbyeFile.contents\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n}\n\n\/\/ Test that pack-created archives can be understood by the tools.\nfunc TestHello(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\thello := filepath.Join(dir, \"hello.go\")\n\tprog := `\n\t\tpackage main\n\t\tfunc main() {\n\t\t\tprintln(\"hello world\")\n\t\t}\n\t`\n\terr := ioutil.WriteFile(hello, []byte(prog), 0666)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tchar := findChar(t, dir)\n\n\trun := func(args ...string) string {\n\t\treturn doRun(t, dir, args...)\n\t}\n\n\trun(\"go\", \"build\", \"cmd\/pack\") \/\/ writes pack binary to dir\n\trun(\"go\", \"tool\", char+\"g\", \"hello.go\")\n\trun(\".\/pack\", \"grc\", \"hello.a\", \"hello.\"+char)\n\trun(\"go\", \"tool\", char+\"l\", \"-o\", \"a.out\", \"hello.a\")\n\tout := run(\".\/a.out\")\n\tif out != \"hello world\\n\" {\n\t\tt.Fatal(\"incorrect output: %q, want %q\", out, \"hello world\\n\")\n\t}\n}\n\n\/\/ Test that pack works with very long lines in PKGDEF.\nfunc TestLargeDefs(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\tlarge := filepath.Join(dir, \"large.go\")\n\tf, err := os.Create(large)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tprintf := func(format string, args ...interface{}) {\n\t\t_, err := fmt.Fprintf(f, format, args...)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Writing to %s: %v\", large, err)\n\t\t}\n\t}\n\n\tprintf(\"package large\\n\\ntype T struct {\\n\")\n\tfor i := 0; i < 10000; i++ {\n\t\tprintf(\"f%d int `tag:\\\"\", i)\n\t\tfor j := 0; j < 100; j++ {\n\t\t\tprintf(\"t%d=%d,\", j, j)\n\t\t}\n\t\tprintf(\"\\\"`\\n\")\n\t}\n\tprintf(\"}\\n\")\n\tif err = f.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tmain := filepath.Join(dir, \"main.go\")\n\tprog := `\n\t\tpackage main\n\t\timport \"large\"\n\t\tvar V large.T\n\t\tfunc main() {\n\t\t\tprintln(\"ok\")\n\t\t}\n\t`\n\terr = ioutil.WriteFile(main, []byte(prog), 0666)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tchar := findChar(t, dir)\n\n\trun := func(args ...string) string {\n\t\treturn doRun(t, dir, args...)\n\t}\n\n\trun(\"go\", \"build\", \"cmd\/pack\") \/\/ writes pack binary to dir\n\trun(\"go\", \"tool\", char+\"g\", \"large.go\")\n\trun(\".\/pack\", \"grc\", \"large.a\", \"large.\"+char)\n\trun(\"go\", \"tool\", char+\"g\", \"-I\", \".\", \"main.go\")\n\trun(\"go\", \"tool\", char+\"l\", \"-L\", \".\", \"-o\", \"a.out\", \"main.\"+char)\n\tout := run(\".\/a.out\")\n\tif out != \"ok\\n\" {\n\t\tt.Fatal(\"incorrect output: %q, want %q\", out, \"ok\\n\")\n\t}\n}\n\n\/\/ doRun runs a program in a directory and returns the output.\nfunc doRun(t *testing.T, dir string, args ...string) string {\n\tcmd := exec.Command(args[0], args[1:]...)\n\tcmd.Dir = dir\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"%v: %v\\n%s\", args, err, string(out))\n\t}\n\treturn string(out)\n}\n\n\/\/ findChar returns the architecture character for the go command.\nfunc findChar(t *testing.T, dir string) string {\n\tout := doRun(t, dir, \"go\", \"env\")\n\tre, err := regexp.Compile(`\\s*GOCHAR=['\"]?(\\w)['\"]?`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfields := re.FindStringSubmatch(out)\n\tif fields == nil {\n\t\tt.Fatal(\"cannot find GOCHAR in 'go env' output:\\n\", out)\n\t}\n\treturn fields[1]\n}\n\n\/\/ Fake implementation of files.\n\nvar helloFile = &FakeFile{\n\tname: \"hello\",\n\tcontents: \"hello world\", \/\/ 11 bytes, an odd number.\n\tmode: 0644,\n}\n\nvar goodbyeFile = &FakeFile{\n\tname: \"goodbye\",\n\tcontents: \"Sayonara, Jim\", \/\/ 13 bytes, another odd number.\n\tmode: 0644,\n}\n\n\/\/ FakeFile implements FileLike and also os.FileInfo.\ntype FakeFile struct {\n\tname string\n\tcontents string\n\tmode os.FileMode\n\toffset int\n}\n\n\/\/ Reset prepares a FakeFile for reuse.\nfunc (f *FakeFile) Reset() *FakeFile {\n\tf.offset = 0\n\treturn f\n}\n\n\/\/ FileLike methods.\n\nfunc (f *FakeFile) Name() string {\n\t\/\/ A bit of a cheat: we only have a basename, so that's also ok for FileInfo.\n\treturn f.name\n}\n\nfunc (f *FakeFile) Stat() (os.FileInfo, error) {\n\treturn f, nil\n}\n\nfunc (f *FakeFile) Read(p []byte) (int, error) {\n\tif f.offset >= len(f.contents) {\n\t\treturn 0, io.EOF\n\t}\n\tn := copy(p, f.contents[f.offset:])\n\tf.offset += n\n\treturn n, nil\n}\n\nfunc (f *FakeFile) Close() error {\n\treturn nil\n}\n\n\/\/ os.FileInfo methods.\n\nfunc (f *FakeFile) Size() int64 {\n\treturn int64(len(f.contents))\n}\n\nfunc (f *FakeFile) Mode() os.FileMode {\n\treturn f.mode\n}\n\nfunc (f *FakeFile) ModTime() time.Time {\n\treturn time.Time{}\n}\n\nfunc (f *FakeFile) IsDir() bool {\n\treturn false\n}\n\nfunc (f *FakeFile) Sys() interface{} {\n\treturn nil\n}\n\n\/\/ Special helpers.\n\nfunc (f *FakeFile) Entry() *Entry {\n\treturn &Entry{\n\t\tname: f.name,\n\t\tmtime: 0, \/\/ Defined to be zero.\n\t\tuid: 0, \/\/ Ditto.\n\t\tgid: 0, \/\/ Ditto.\n\t\tmode: f.mode,\n\t\tsize: int64(len(f.contents)),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package marathon\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/QubitProducts\/bamboo\/configuration\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strings\"\n)\n\n\/\/ Describes an app process running\ntype Task struct {\n\tHost string\n\tPort int\n}\n\n\/\/ An app may have multiple processes\ntype App struct {\n\tId string\n\tEscapedId string\n\tHealthCheckPath string\n\tTasks []Task\n\tServicePort int\n\tEnv map[string]string\n}\n\ntype AppList []App\n\nfunc (slice AppList) Len() int {\n\treturn len(slice)\n}\n\nfunc (slice AppList) Less(i, j int) bool {\n\treturn slice[i].Id < slice[j].Id\n}\n\nfunc (slice AppList) Swap(i, j int) {\n\tslice[i], slice[j] = slice[j], slice[i]\n}\n\ntype MarathonTaskList []MarathonTask\n\ntype MarathonTasks struct {\n\tTasks MarathonTaskList `json:tasks`\n}\n\ntype MarathonTask struct {\n\tAppId string\n\tId string\n\tHost string\n\tPorts []int\n\tServicePorts []int\n\tStartedAt string\n\tStagedAt string\n\tVersion string\n}\n\nfunc (slice MarathonTaskList) Len() int {\n\treturn len(slice)\n}\n\nfunc (slice MarathonTaskList) Less(i, j int) bool {\n\treturn slice[i].StagedAt < slice[j].StagedAt\n}\n\nfunc (slice MarathonTaskList) Swap(i, j int) {\n\tslice[i], slice[j] = slice[j], slice[i]\n}\n\ntype MarathonApps struct {\n\tApps []MarathonApp `json:apps`\n}\n\ntype MarathonApp struct {\n\tId string `json:id`\n\tHealthChecks []HealthChecks `json:healthChecks`\n\tPorts []int `json:ports`\n\tEnv map[string]string `json:env`\n}\n\ntype HealthChecks struct {\n\tPath string `json:path`\n}\n\nfunc fetchMarathonApps(endpoint string) (map[string]MarathonApp, error) {\n\tresponse, err := http.Get(endpoint + \"\/v2\/apps\")\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tdefer response.Body.Close()\n\t\tvar appResponse MarathonApps\n\n\t\tcontents, err := ioutil.ReadAll(response.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = json.Unmarshal(contents, &appResponse)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdataById := map[string]MarathonApp{}\n\n\t\tfor _, appConfig := range appResponse.Apps {\n\t\t\tdataById[appConfig.Id] = appConfig\n\t\t}\n\n\t\treturn dataById, nil\n\t}\n}\n\nfunc fetchTasks(endpoint string) (map[string][]MarathonTask, error) {\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", endpoint+\"\/v2\/tasks\", nil)\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\tresponse, err := client.Do(req)\n\n\tvar tasks MarathonTasks\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tcontents, err := ioutil.ReadAll(response.Body)\n\t\tdefer response.Body.Close()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = json.Unmarshal(contents, &tasks)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttaskList := tasks.Tasks\n\t\tsort.Sort(taskList)\n\n\t\ttasksById := map[string][]MarathonTask{}\n\t\tfor _, task := range taskList {\n\t\t\tif tasksById[task.AppId] == nil {\n\t\t\t\ttasksById[task.AppId] = []MarathonTask{}\n\t\t\t}\n\t\t\ttasksById[task.AppId] = append(tasksById[task.AppId], task)\n\t\t}\n\n\t\treturn tasksById, nil\n\t}\n}\n\nfunc createApps(tasksById map[string][]MarathonTask, marathonApps map[string]MarathonApp) AppList {\n\n\tapps := AppList{}\n\n\tfor appId, tasks := range tasksById {\n\t\tsimpleTasks := []Task{}\n\n\t\tfor _, task := range tasks {\n\t\t\tif len(task.Ports) > 0 {\n\t\t\t\tsimpleTasks = append(simpleTasks, Task{Host: task.Host, Port: task.Ports[0]})\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Try to handle old app id format without slashes\n\t\tappPath := appId\n\t\tif !strings.HasPrefix(appId, \"\/\") {\n\t\t\tappPath = \"\/\" + appId\n\t\t}\n\n\t\tapp := App{\n\t\t\t\/\/ Since Marathon 0.7, apps are namespaced with path\n\t\t\tId: appPath,\n\t\t\t\/\/ Used for template\n\t\t\tEscapedId: strings.Replace(appId, \"\/\", \"::\", -1),\n\t\t\tTasks: simpleTasks,\n\t\t\tHealthCheckPath: parseHealthCheckPath(marathonApps[appId].HealthChecks),\n\t\t\tEnv: marathonApps[appId].Env,\n\t\t}\n\n\t\tif len(marathonApps[appId].Ports) > 0 {\n\t\t\tapp.ServicePort = marathonApps[appId].Ports[0]\n\t\t}\n\n\t\tapps = append(apps, app)\n\t}\n\treturn apps\n}\n\nfunc parseHealthCheckPath(checks []HealthChecks) string {\n\tif len(checks) > 0 {\n\t\treturn checks[0].Path\n\t}\n\treturn \"\"\n}\n\n\/*\n\tApps returns a struct that describes Marathon current app and their\n\tsub tasks information.\n\n\tParameters:\n\t\tendpoint: Marathon HTTP endpoint, e.g. http:\/\/localhost:8080\n*\/\nfunc FetchApps(maraconf configuration.Marathon) (AppList, error) {\n\n\tvar applist AppList\n\tvar err error\n\n\t\/\/ try all configured endpoints until one succeeds\n\tfor _, url := range maraconf.Endpoints() {\n\t\tapplist, err = _fetchApps(url)\n\t\tif err == nil {\n\t\t\treturn applist, err\n\t\t}\n\t}\n\t\/\/ return last error\n\treturn nil, err\n}\n\nfunc _fetchApps(url string) (AppList, error) {\n\ttasks, err := fetchTasks(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmarathonApps, err := fetchMarathonApps(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapps := createApps(tasks, marathonApps)\n\tsort.Sort(apps)\n\treturn apps, nil\n}\n<commit_msg>Add multiple ports to apps<commit_after>package marathon\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/QubitProducts\/bamboo\/configuration\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strings\"\n)\n\n\/\/ Describes an app process running\ntype Task struct {\n\tHost string\n\tPort int\n}\n\n\/\/ An app may have multiple processes\ntype App struct {\n\tId string\n\tEscapedId string\n\tHealthCheckPath string\n\tTasks []Task\n\tServicePorts []int\n\tEnv map[string]string\n}\n\ntype AppList []App\n\nfunc (slice AppList) Len() int {\n\treturn len(slice)\n}\n\nfunc (slice AppList) Less(i, j int) bool {\n\treturn slice[i].Id < slice[j].Id\n}\n\nfunc (slice AppList) Swap(i, j int) {\n\tslice[i], slice[j] = slice[j], slice[i]\n}\n\ntype MarathonTaskList []MarathonTask\n\ntype MarathonTasks struct {\n\tTasks MarathonTaskList `json:tasks`\n}\n\ntype MarathonTask struct {\n\tAppId string\n\tId string\n\tHost string\n\tPorts []int\n\tServicePorts []int\n\tStartedAt string\n\tStagedAt string\n\tVersion string\n}\n\nfunc (slice MarathonTaskList) Len() int {\n\treturn len(slice)\n}\n\nfunc (slice MarathonTaskList) Less(i, j int) bool {\n\treturn slice[i].StagedAt < slice[j].StagedAt\n}\n\nfunc (slice MarathonTaskList) Swap(i, j int) {\n\tslice[i], slice[j] = slice[j], slice[i]\n}\n\ntype MarathonApps struct {\n\tApps []MarathonApp `json:apps`\n}\n\ntype MarathonApp struct {\n\tId string `json:id`\n\tHealthChecks []HealthChecks `json:healthChecks`\n\tPorts []int `json:ports`\n\tEnv map[string]string `json:env`\n}\n\ntype HealthChecks struct {\n\tPath string `json:path`\n}\n\nfunc fetchMarathonApps(endpoint string) (map[string]MarathonApp, error) {\n\tresponse, err := http.Get(endpoint + \"\/v2\/apps\")\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tdefer response.Body.Close()\n\t\tvar appResponse MarathonApps\n\n\t\tcontents, err := ioutil.ReadAll(response.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = json.Unmarshal(contents, &appResponse)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdataById := map[string]MarathonApp{}\n\n\t\tfor _, appConfig := range appResponse.Apps {\n\t\t\tdataById[appConfig.Id] = appConfig\n\t\t}\n\n\t\treturn dataById, nil\n\t}\n}\n\nfunc fetchTasks(endpoint string) (map[string][]MarathonTask, error) {\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", endpoint+\"\/v2\/tasks\", nil)\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\tresponse, err := client.Do(req)\n\n\tvar tasks MarathonTasks\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tcontents, err := ioutil.ReadAll(response.Body)\n\t\tdefer response.Body.Close()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = json.Unmarshal(contents, &tasks)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttaskList := tasks.Tasks\n\t\tsort.Sort(taskList)\n\n\t\ttasksById := map[string][]MarathonTask{}\n\t\tfor _, task := range taskList {\n\t\t\tif tasksById[task.AppId] == nil {\n\t\t\t\ttasksById[task.AppId] = []MarathonTask{}\n\t\t\t}\n\t\t\ttasksById[task.AppId] = append(tasksById[task.AppId], task)\n\t\t}\n\n\t\treturn tasksById, nil\n\t}\n}\n\nfunc createApps(tasksById map[string][]MarathonTask, marathonApps map[string]MarathonApp) AppList {\n\n\tapps := AppList{}\n\n\tfor appId, tasks := range tasksById {\n\t\tsimpleTasks := []Task{}\n\n\t\tfor _, task := range tasks {\n\t\t\tif len(task.Ports) > 0 {\n\t\t\t\tsimpleTasks = append(simpleTasks, Task{Host: task.Host, Port: task.Ports[0]})\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Try to handle old app id format without slashes\n\t\tappPath := appId\n\t\tif !strings.HasPrefix(appId, \"\/\") {\n\t\t\tappPath = \"\/\" + appId\n\t\t}\n\n\t\tapp := App{\n\t\t\t\/\/ Since Marathon 0.7, apps are namespaced with path\n\t\t\tId: appPath,\n\t\t\t\/\/ Used for template\n\t\t\tEscapedId: strings.Replace(appId, \"\/\", \"::\", -1),\n\t\t\tTasks: simpleTasks,\n\t\t\tHealthCheckPath: parseHealthCheckPath(marathonApps[appId].HealthChecks),\n\t\t\tEnv: marathonApps[appId].Env,\n\t\t\tServicePorts: marathonApps[appId].Ports, \n\t\t}\n\n\t\tapps = append(apps, app)\n\t}\n\treturn apps\n}\n\nfunc parseHealthCheckPath(checks []HealthChecks) string {\n\tif len(checks) > 0 {\n\t\treturn checks[0].Path\n\t}\n\treturn \"\"\n}\n\n\/*\n\tApps returns a struct that describes Marathon current app and their\n\tsub tasks information.\n\n\tParameters:\n\t\tendpoint: Marathon HTTP endpoint, e.g. http:\/\/localhost:8080\n*\/\nfunc FetchApps(maraconf configuration.Marathon) (AppList, error) {\n\n\tvar applist AppList\n\tvar err error\n\n\t\/\/ try all configured endpoints until one succeeds\n\tfor _, url := range maraconf.Endpoints() {\n\t\tapplist, err = _fetchApps(url)\n\t\tif err == nil {\n\t\t\treturn applist, err\n\t\t}\n\t}\n\t\/\/ return last error\n\treturn nil, err\n}\n\nfunc _fetchApps(url string) (AppList, error) {\n\ttasks, err := fetchTasks(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmarathonApps, err := fetchMarathonApps(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapps := createApps(tasks, marathonApps)\n\tsort.Sort(apps)\n\treturn apps, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package bot\n\nimport (\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/belak\/irc\"\n\t\"github.com\/khades\/servbot\/commandHandlers\"\n\t\"github.com\/khades\/servbot\/ircClient\"\n\t\"github.com\/khades\/servbot\/models\"\n\t\"github.com\/khades\/servbot\/repos\"\n)\n\nvar chatHandler irc.HandlerFunc = func(client *irc.Client, message *irc.Message) {\n\tmsgID, found := message.Tags.GetTag(\"msg-id\")\n\tif found {\n\t\tswitch msgID {\n\t\tcase \"room_mods\":\n\t\t\t{\n\t\t\t\tcommaIndex := strings.Index(message.Params[1], \":\")\n\t\t\t\tif commaIndex != -1 {\n\t\t\t\t\tmods := strings.Split(message.Params[1][commaIndex+2:], \", \")\n\t\t\t\t\tchannel := message.Params[0][1:]\n\t\t\t\t\tmodHandler(&channel, &mods)\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"resub\":\n\t\t\t{\n\t\t\t\tsubHandler(message, &IrcClientInstance)\n\t\t\t}\n\n\t\tcase \"sub\":\n\t\t\t{\n\t\t\t\tsubHandler(message, &IrcClientInstance)\n\t\t\t}\n\t\t}\n\t}\n\n\tif message.Command == \"CLEARCHAT\" {\n\t\tbanDuration, banDurationFound := message.Tags.GetTag(\"ban-duration\")\n\t\tintBanDuration := 0\n\t\tif banDurationFound {\n\t\t\tparsedValue, parseError := strconv.Atoi(banDuration)\n\t\t\tif parseError == nil {\n\t\t\t\tintBanDuration = parsedValue\n\t\t\t}\n\t\t}\n\t\tbanReason, _ := message.Tags.GetTag(\"ban-reason\")\n\t\tuser := message.Params[1]\n\t\tchannel := message.Params[0]\n\t\tmessageType := \"timeout\"\n\t\tif intBanDuration == 0 {\n\t\t\tmessageType = \"ban\"\n\t\t}\n\t\tformedMessage := models.ChatMessage{\n\t\t\tMessageStruct: models.MessageStruct{\n\t\t\t\tDate: time.Now(),\n\t\t\t\tMessageType: messageType,\n\t\t\t\tBanLength: intBanDuration,\n\t\t\t\tBanReason: banReason},\n\t\t\tChannel: channel,\n\t\t\tChannelID: message.Tags[\"room-id\"].Encode(),\n\t\t\tUser: user,\n\t\t\tUserID: message.Tags[\"target-user-id\"].Encode()}\n\t\trepos.LogMessage(&formedMessage)\n\t}\n\tif message.Command == \"PRIVMSG\" {\n\t\tformedMessage := models.ChatMessage{\n\t\t\tMessageStruct: models.MessageStruct{\n\t\t\t\tUsername: message.User,\n\t\t\t\tMessageType: \"message\",\n\t\t\t\tMessageBody: message.Params[1],\n\t\t\t\tDate: time.Now()},\n\t\t\tChannel: message.Params[0][1:],\n\t\t\tChannelID: message.Tags[\"room-id\"].Encode(),\n\t\t\tUser: message.User,\n\t\t\tUserID: message.Tags[\"user-id\"].Encode(),\n\t\t\tIsMod: message.Tags[\"mod\"] == \"1\" || message.User == \"khadesru\" || message.Params[0][1:] == message.User,\n\t\t\tIsSub: message.Tags[\"subscriber\"] == \"1\",\n\t\t\tIsPrime: strings.Contains(message.Tags[\"badges\"].Encode(), \"premium\/1\")}\n\t\trepos.LogMessage(&formedMessage)\n\t\trepos.DecrementAutoMessages(&formedMessage.ChannelID)\n\t\tcommandBody, isCommand := formedMessage.GetCommand()\n\t\tbits, bitsFound := message.Tags.GetTag(\"bits\")\n\t\tif bitsFound {\n\t\t\tparsedBits, parsedBitsError := strconv.Atoi(bits)\n\t\t\tif parsedBitsError == nil {\n\t\t\t\trepos.AddBitsToUser(&formedMessage.ChannelID, &formedMessage.UserID, &formedMessage.User, parsedBits, \"bits\")\n\t\t\t}\n\t\t}\n\t\tif isCommand {\n\t\t\tif message.User == \"khadesru\" && commandBody.Command == \"debugSub\" {\n\t\t\t\tsubPlan := \"2000\"\n\t\t\t\tsendSubMessage(&formedMessage.Channel, &formedMessage.ChannelID, &formedMessage.User, &subPlan)\n\t\t\t}\n\t\t\tif message.User == \"khadesru\" && commandBody.Command == \"debugResub\" {\n\t\t\t\tresubCount := 3\n\t\t\t\tsubPlan := \"2000\"\n\t\t\t\tsendResubMessage(&formedMessage.Channel, &formedMessage.ChannelID, &formedMessage.User, &resubCount, &subPlan)\n\t\t\t}\n\t\t\thandlerFunction := commandHandlers.Router.Go(commandBody.Command)\n\t\t\thandlerFunction(true, &formedMessage, commandBody, &IrcClientInstance)\n\t\t}\n\t}\n\n\tif message.Command == \"001\" {\n\t\tclient.Write(\"CAP REQ twitch.tv\/tags\")\n\t\tclient.Write(\"CAP REQ twitch.tv\/membership\")\n\t\tclient.Write(\"CAP REQ twitch.tv\/commands\")\n\t\tfor _, value := range repos.Config.Channels {\n\t\t\tclient.Write(\"JOIN #\" + value)\n\t\t}\n\t\tIrcClientInstance = ircClient.IrcClient{Client: client, Bounces: make(map[string]time.Time), Ready: true}\n\t\tIrcClientInstance.SendModsCommand()\n\t\tlog.Println(\"Bot is started\")\n\t}\n}\n<commit_msg>Debug: Getting debug messages from jtv\\tmi<commit_after>package bot\n\nimport (\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/belak\/irc\"\n\t\"github.com\/khades\/servbot\/commandHandlers\"\n\t\"github.com\/khades\/servbot\/ircClient\"\n\t\"github.com\/khades\/servbot\/models\"\n\t\"github.com\/khades\/servbot\/repos\"\n)\n\nvar chatHandler irc.HandlerFunc = func(client *irc.Client, message *irc.Message) {\n\tif strings.Contains(message.String(), \":jtv\") {\n\t\tlog.Println(\"JTV: \" + message.String())\n\t}\n\tif strings.Contains(message.String(), \":tmi.twitch.tv\") {\n\t\tlog.Println(\"TMI.TWITCH.TV: \" + message.String())\n\t}\n\tmsgID, found := message.Tags.GetTag(\"msg-id\")\n\tif found {\n\t\tswitch msgID {\n\t\tcase \"room_mods\":\n\t\t\t{\n\t\t\t\tcommaIndex := strings.Index(message.Params[1], \":\")\n\t\t\t\tif commaIndex != -1 {\n\t\t\t\t\tmods := strings.Split(message.Params[1][commaIndex+2:], \", \")\n\t\t\t\t\tchannel := message.Params[0][1:]\n\t\t\t\t\tmodHandler(&channel, &mods)\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"resub\":\n\t\t\t{\n\t\t\t\tsubHandler(message, &IrcClientInstance)\n\t\t\t}\n\n\t\tcase \"sub\":\n\t\t\t{\n\t\t\t\tsubHandler(message, &IrcClientInstance)\n\t\t\t}\n\t\t}\n\t}\n\n\tif message.Command == \"CLEARCHAT\" {\n\t\tbanDuration, banDurationFound := message.Tags.GetTag(\"ban-duration\")\n\t\tintBanDuration := 0\n\t\tif banDurationFound {\n\t\t\tparsedValue, parseError := strconv.Atoi(banDuration)\n\t\t\tif parseError == nil {\n\t\t\t\tintBanDuration = parsedValue\n\t\t\t}\n\t\t}\n\t\tbanReason, _ := message.Tags.GetTag(\"ban-reason\")\n\t\tuser := message.Params[1]\n\t\tchannel := message.Params[0]\n\t\tmessageType := \"timeout\"\n\t\tif intBanDuration == 0 {\n\t\t\tmessageType = \"ban\"\n\t\t}\n\t\tformedMessage := models.ChatMessage{\n\t\t\tMessageStruct: models.MessageStruct{\n\t\t\t\tDate: time.Now(),\n\t\t\t\tMessageType: messageType,\n\t\t\t\tBanLength: intBanDuration,\n\t\t\t\tBanReason: banReason},\n\t\t\tChannel: channel,\n\t\t\tChannelID: message.Tags[\"room-id\"].Encode(),\n\t\t\tUser: user,\n\t\t\tUserID: message.Tags[\"target-user-id\"].Encode()}\n\t\trepos.LogMessage(&formedMessage)\n\t}\n\tif message.Command == \"PRIVMSG\" {\n\t\tformedMessage := models.ChatMessage{\n\t\t\tMessageStruct: models.MessageStruct{\n\t\t\t\tUsername: message.User,\n\t\t\t\tMessageType: \"message\",\n\t\t\t\tMessageBody: message.Params[1],\n\t\t\t\tDate: time.Now()},\n\t\t\tChannel: message.Params[0][1:],\n\t\t\tChannelID: message.Tags[\"room-id\"].Encode(),\n\t\t\tUser: message.User,\n\t\t\tUserID: message.Tags[\"user-id\"].Encode(),\n\t\t\tIsMod: message.Tags[\"mod\"] == \"1\" || message.User == \"khadesru\" || message.Params[0][1:] == message.User,\n\t\t\tIsSub: message.Tags[\"subscriber\"] == \"1\",\n\t\t\tIsPrime: strings.Contains(message.Tags[\"badges\"].Encode(), \"premium\/1\")}\n\t\trepos.LogMessage(&formedMessage)\n\t\trepos.DecrementAutoMessages(&formedMessage.ChannelID)\n\t\tcommandBody, isCommand := formedMessage.GetCommand()\n\t\tbits, bitsFound := message.Tags.GetTag(\"bits\")\n\t\tif bitsFound {\n\t\t\tparsedBits, parsedBitsError := strconv.Atoi(bits)\n\t\t\tif parsedBitsError == nil {\n\t\t\t\trepos.AddBitsToUser(&formedMessage.ChannelID, &formedMessage.UserID, &formedMessage.User, parsedBits, \"bits\")\n\t\t\t}\n\t\t}\n\t\tif isCommand {\n\t\t\tif message.User == \"khadesru\" && commandBody.Command == \"debugSub\" {\n\t\t\t\tsubPlan := \"2000\"\n\t\t\t\tsendSubMessage(&formedMessage.Channel, &formedMessage.ChannelID, &formedMessage.User, &subPlan)\n\t\t\t}\n\t\t\tif message.User == \"khadesru\" && commandBody.Command == \"debugResub\" {\n\t\t\t\tresubCount := 3\n\t\t\t\tsubPlan := \"2000\"\n\t\t\t\tsendResubMessage(&formedMessage.Channel, &formedMessage.ChannelID, &formedMessage.User, &resubCount, &subPlan)\n\t\t\t}\n\t\t\thandlerFunction := commandHandlers.Router.Go(commandBody.Command)\n\t\t\thandlerFunction(true, &formedMessage, commandBody, &IrcClientInstance)\n\t\t}\n\t}\n\n\tif message.Command == \"001\" {\n\t\tclient.Write(\"CAP REQ twitch.tv\/tags\")\n\t\tclient.Write(\"CAP REQ twitch.tv\/membership\")\n\t\tclient.Write(\"CAP REQ twitch.tv\/commands\")\n\t\tfor _, value := range repos.Config.Channels {\n\t\t\tclient.Write(\"JOIN #\" + value)\n\t\t}\n\t\tIrcClientInstance = ircClient.IrcClient{Client: client, Bounces: make(map[string]time.Time), Ready: true}\n\t\tIrcClientInstance.SendModsCommand()\n\t\tlog.Println(\"Bot is started\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2013-2014 Conformal Systems LLC.\n\/\/ Use of this source code is governed by an ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage btcchain\n\nimport (\n\t\"fmt\"\n\t\"github.com\/conformal\/btcscript\"\n\t\"github.com\/conformal\/btcutil\"\n\t\"github.com\/conformal\/btcwire\"\n\t\"math\"\n\t\"runtime\"\n)\n\n\/\/ txValidateItem holds a transaction along with which input to validate.\ntype txValidateItem struct {\n\ttxInIndex int\n\ttxIn *btcwire.TxIn\n\ttx *btcutil.Tx\n}\n\n\/\/ txValidator provides a type which asynchronously validates transaction\n\/\/ inputs. It provides several channels for communication and a processing\n\/\/ function that is intended to be in run multiple goroutines.\ntype txValidator struct {\n\tvalidateChan chan *txValidateItem\n\tquitChan chan bool\n\tresultChan chan error\n\ttxStore TxStore\n\tflags btcscript.ScriptFlags\n}\n\n\/\/ sendResult sends the result of a script pair validation on the internal\n\/\/ result channel while respecting the quit channel. The allows orderly\n\/\/ shutdown when the validation process is aborted early due to a validation\n\/\/ error in one of the other goroutines.\nfunc (v *txValidator) sendResult(result error) {\n\tselect {\n\tcase v.resultChan <- result:\n\tcase <-v.quitChan:\n\t}\n}\n\n\/\/ validateHandler consumes items to validate from the internal validate channel\n\/\/ and returns the result of the validation on the internal result channel. It\n\/\/ must be run as a goroutine.\nfunc (v *txValidator) validateHandler() {\nout:\n\tfor {\n\t\tselect {\n\t\tcase txVI := <-v.validateChan:\n\t\t\t\/\/ Ensure the referenced input transaction is available.\n\t\t\ttxIn := txVI.txIn\n\t\t\toriginTxHash := &txIn.PreviousOutpoint.Hash\n\t\t\toriginTx, exists := v.txStore[*originTxHash]\n\t\t\tif !exists || originTx.Err != nil || originTx.Tx == nil {\n\t\t\t\tstr := fmt.Sprintf(\"unable to find input \"+\n\t\t\t\t\t\"transaction %v referenced from \"+\n\t\t\t\t\t\"transaction %v\", originTxHash,\n\t\t\t\t\ttxVI.tx.Sha())\n\t\t\t\terr := ruleError(ErrMissingTx, str)\n\t\t\t\tv.sendResult(err)\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\toriginMsgTx := originTx.Tx.MsgTx()\n\n\t\t\t\/\/ Ensure the output index in the referenced transaction\n\t\t\t\/\/ is available.\n\t\t\toriginTxIndex := txIn.PreviousOutpoint.Index\n\t\t\tif originTxIndex >= uint32(len(originMsgTx.TxOut)) {\n\t\t\t\tstr := fmt.Sprintf(\"out of bounds \"+\n\t\t\t\t\t\"input index %d in transaction %v \"+\n\t\t\t\t\t\"referenced from transaction %v\",\n\t\t\t\t\toriginTxIndex, originTxHash,\n\t\t\t\t\ttxVI.tx.Sha())\n\t\t\t\terr := ruleError(ErrBadTxInput, str)\n\t\t\t\tv.sendResult(err)\n\t\t\t\tbreak out\n\t\t\t}\n\n\t\t\t\/\/ Create a new script engine for the script pair.\n\t\t\tsigScript := txIn.SignatureScript\n\t\t\tpkScript := originMsgTx.TxOut[originTxIndex].PkScript\n\t\t\tengine, err := btcscript.NewScript(sigScript, pkScript,\n\t\t\t\ttxVI.txInIndex, txVI.tx.MsgTx(), v.flags)\n\t\t\tif err != nil {\n\t\t\t\tstr := fmt.Sprintf(\"failed to parse input \"+\n\t\t\t\t\t\"%s:%d which references output %s:%d - \"+\n\t\t\t\t\t\"%v (input script bytes %x, prev output \"+\n\t\t\t\t\t\"script bytes %x)\", txVI.tx.Sha(),\n\t\t\t\t\ttxVI.txInIndex, originTxHash,\n\t\t\t\t\toriginTxIndex, err, sigScript, pkScript)\n\t\t\t\terr := ruleError(ErrScriptMalformed, str)\n\t\t\t\tv.sendResult(err)\n\t\t\t\tbreak out\n\t\t\t}\n\n\t\t\t\/\/ Execute the script pair.\n\t\t\tif err := engine.Execute(); err != nil {\n\t\t\t\tstr := fmt.Sprintf(\"failed to validate input \"+\n\t\t\t\t\t\"%s:%d which references output %s:%d - \"+\n\t\t\t\t\t\"%v (input script bytes %x, prev output \"+\n\t\t\t\t\t\"script bytes %x)\", txVI.tx.Sha(),\n\t\t\t\t\ttxVI.txInIndex, originTxHash,\n\t\t\t\t\toriginTxIndex, err, sigScript, pkScript)\n\t\t\t\terr := ruleError(ErrScriptValidation, str)\n\t\t\t\tv.sendResult(err)\n\t\t\t\tbreak out\n\t\t\t}\n\n\t\t\t\/\/ Validation succeeded.\n\t\t\tv.sendResult(nil)\n\n\t\tcase <-v.quitChan:\n\t\t\tbreak out\n\t\t}\n\t}\n}\n\n\/\/ Validate validates the scripts for all of the passed transaction inputs using\n\/\/ multiple goroutines.\nfunc (v *txValidator) Validate(items []*txValidateItem) error {\n\tif len(items) == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ Limit the number of goroutines to do script validation based on the\n\t\/\/ number of processor cores. This help ensure the system stays\n\t\/\/ reasonably responsive under heavy load.\n\tmaxGoRoutines := runtime.NumCPU() * 3\n\tif maxGoRoutines <= 0 {\n\t\tmaxGoRoutines = 1\n\t}\n\tif maxGoRoutines > len(items) {\n\t\tmaxGoRoutines = len(items)\n\t}\n\n\t\/\/ Start up validation handlers that are used to asynchronously\n\t\/\/ validate each transaction input.\n\tfor i := 0; i < maxGoRoutines; i++ {\n\t\tgo v.validateHandler()\n\t}\n\n\t\/\/ Validate each of the inputs. The quit channel is closed when any\n\t\/\/ errors occur so all processing goroutines exit regardless of which\n\t\/\/ input had the validation error.\n\tnumInputs := len(items)\n\tcurrentItem := 0\n\tprocessedItems := 0\n\tfor processedItems < numInputs {\n\t\t\/\/ Only send items while there are still items that need to\n\t\t\/\/ be processed. The select statement will never select a nil\n\t\t\/\/ channel.\n\t\tvar validateChan chan *txValidateItem\n\t\tvar item *txValidateItem\n\t\tif currentItem < numInputs {\n\t\t\tvalidateChan = v.validateChan\n\t\t\titem = items[currentItem]\n\t\t}\n\n\t\tselect {\n\t\tcase validateChan <- item:\n\t\t\tcurrentItem++\n\n\t\tcase err := <-v.resultChan:\n\t\t\tprocessedItems++\n\t\t\tif err != nil {\n\t\t\t\tclose(v.quitChan)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tclose(v.quitChan)\n\treturn nil\n}\n\n\/\/ newTxValidator returns a new instance of txValidator to be used for\n\/\/ validating transaction scripts asynchronously.\nfunc newTxValidator(txStore TxStore, flags btcscript.ScriptFlags) *txValidator {\n\treturn &txValidator{\n\t\tvalidateChan: make(chan *txValidateItem),\n\t\tquitChan: make(chan bool),\n\t\tresultChan: make(chan error),\n\t\ttxStore: txStore,\n\t\tflags: flags,\n\t}\n}\n\n\/\/ ValidateTransactionScripts validates the scripts for the passed transaction\n\/\/ using multiple goroutines.\nfunc ValidateTransactionScripts(tx *btcutil.Tx, txStore TxStore, flags btcscript.ScriptFlags) error {\n\t\/\/ Collect all of the transaction inputs and required information for\n\t\/\/ validation.\n\ttxIns := tx.MsgTx().TxIn\n\ttxValItems := make([]*txValidateItem, 0, len(txIns))\n\tfor txInIdx, txIn := range txIns {\n\t\t\/\/ Skip coinbases.\n\t\tif txIn.PreviousOutpoint.Index == math.MaxUint32 {\n\t\t\tcontinue\n\t\t}\n\n\t\ttxVI := &txValidateItem{\n\t\t\ttxInIndex: txInIdx,\n\t\t\ttxIn: txIn,\n\t\t\ttx: tx,\n\t\t}\n\t\ttxValItems = append(txValItems, txVI)\n\t}\n\n\t\/\/ Validate all of the inputs.\n\tvalidator := newTxValidator(txStore, flags)\n\tif err := validator.Validate(txValItems); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n\n}\n\n\/\/ checkBlockScripts executes and validates the scripts for all transactions in\n\/\/ the passed block.\nfunc checkBlockScripts(block *btcutil.Block, txStore TxStore) error {\n\t\/\/ Setup the script validation flags. Blocks created after the BIP0016\n\t\/\/ activation time need to have the pay-to-script-hash checks enabled.\n\tvar flags btcscript.ScriptFlags\n\tif block.MsgBlock().Header.Timestamp.After(btcscript.Bip16Activation) {\n\t\tflags |= btcscript.ScriptBip16\n\t}\n\n\t\/\/ Collect all of the transaction inputs and required information for\n\t\/\/ validation for all transactions in the block into a single slice.\n\tnumInputs := 0\n\tfor _, tx := range block.Transactions() {\n\t\tnumInputs += len(tx.MsgTx().TxIn)\n\t}\n\ttxValItems := make([]*txValidateItem, 0, numInputs)\n\tfor _, tx := range block.Transactions() {\n\t\tfor txInIdx, txIn := range tx.MsgTx().TxIn {\n\t\t\t\/\/ Skip coinbases.\n\t\t\tif txIn.PreviousOutpoint.Index == math.MaxUint32 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttxVI := &txValidateItem{\n\t\t\t\ttxInIndex: txInIdx,\n\t\t\t\ttxIn: txIn,\n\t\t\t\ttx: tx,\n\t\t\t}\n\t\t\ttxValItems = append(txValItems, txVI)\n\t\t}\n\t}\n\n\t\/\/ Validate all of the inputs.\n\tvalidator := newTxValidator(txStore, flags)\n\tif err := validator.Validate(txValItems); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>Use chan struct{} for semaphores<commit_after>\/\/ Copyright (c) 2013-2014 Conformal Systems LLC.\n\/\/ Use of this source code is governed by an ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage btcchain\n\nimport (\n\t\"fmt\"\n\t\"github.com\/conformal\/btcscript\"\n\t\"github.com\/conformal\/btcutil\"\n\t\"github.com\/conformal\/btcwire\"\n\t\"math\"\n\t\"runtime\"\n)\n\n\/\/ txValidateItem holds a transaction along with which input to validate.\ntype txValidateItem struct {\n\ttxInIndex int\n\ttxIn *btcwire.TxIn\n\ttx *btcutil.Tx\n}\n\n\/\/ txValidator provides a type which asynchronously validates transaction\n\/\/ inputs. It provides several channels for communication and a processing\n\/\/ function that is intended to be in run multiple goroutines.\ntype txValidator struct {\n\tvalidateChan chan *txValidateItem\n\tquitChan chan struct{}\n\tresultChan chan error\n\ttxStore TxStore\n\tflags btcscript.ScriptFlags\n}\n\n\/\/ sendResult sends the result of a script pair validation on the internal\n\/\/ result channel while respecting the quit channel. The allows orderly\n\/\/ shutdown when the validation process is aborted early due to a validation\n\/\/ error in one of the other goroutines.\nfunc (v *txValidator) sendResult(result error) {\n\tselect {\n\tcase v.resultChan <- result:\n\tcase <-v.quitChan:\n\t}\n}\n\n\/\/ validateHandler consumes items to validate from the internal validate channel\n\/\/ and returns the result of the validation on the internal result channel. It\n\/\/ must be run as a goroutine.\nfunc (v *txValidator) validateHandler() {\nout:\n\tfor {\n\t\tselect {\n\t\tcase txVI := <-v.validateChan:\n\t\t\t\/\/ Ensure the referenced input transaction is available.\n\t\t\ttxIn := txVI.txIn\n\t\t\toriginTxHash := &txIn.PreviousOutpoint.Hash\n\t\t\toriginTx, exists := v.txStore[*originTxHash]\n\t\t\tif !exists || originTx.Err != nil || originTx.Tx == nil {\n\t\t\t\tstr := fmt.Sprintf(\"unable to find input \"+\n\t\t\t\t\t\"transaction %v referenced from \"+\n\t\t\t\t\t\"transaction %v\", originTxHash,\n\t\t\t\t\ttxVI.tx.Sha())\n\t\t\t\terr := ruleError(ErrMissingTx, str)\n\t\t\t\tv.sendResult(err)\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\toriginMsgTx := originTx.Tx.MsgTx()\n\n\t\t\t\/\/ Ensure the output index in the referenced transaction\n\t\t\t\/\/ is available.\n\t\t\toriginTxIndex := txIn.PreviousOutpoint.Index\n\t\t\tif originTxIndex >= uint32(len(originMsgTx.TxOut)) {\n\t\t\t\tstr := fmt.Sprintf(\"out of bounds \"+\n\t\t\t\t\t\"input index %d in transaction %v \"+\n\t\t\t\t\t\"referenced from transaction %v\",\n\t\t\t\t\toriginTxIndex, originTxHash,\n\t\t\t\t\ttxVI.tx.Sha())\n\t\t\t\terr := ruleError(ErrBadTxInput, str)\n\t\t\t\tv.sendResult(err)\n\t\t\t\tbreak out\n\t\t\t}\n\n\t\t\t\/\/ Create a new script engine for the script pair.\n\t\t\tsigScript := txIn.SignatureScript\n\t\t\tpkScript := originMsgTx.TxOut[originTxIndex].PkScript\n\t\t\tengine, err := btcscript.NewScript(sigScript, pkScript,\n\t\t\t\ttxVI.txInIndex, txVI.tx.MsgTx(), v.flags)\n\t\t\tif err != nil {\n\t\t\t\tstr := fmt.Sprintf(\"failed to parse input \"+\n\t\t\t\t\t\"%s:%d which references output %s:%d - \"+\n\t\t\t\t\t\"%v (input script bytes %x, prev output \"+\n\t\t\t\t\t\"script bytes %x)\", txVI.tx.Sha(),\n\t\t\t\t\ttxVI.txInIndex, originTxHash,\n\t\t\t\t\toriginTxIndex, err, sigScript, pkScript)\n\t\t\t\terr := ruleError(ErrScriptMalformed, str)\n\t\t\t\tv.sendResult(err)\n\t\t\t\tbreak out\n\t\t\t}\n\n\t\t\t\/\/ Execute the script pair.\n\t\t\tif err := engine.Execute(); err != nil {\n\t\t\t\tstr := fmt.Sprintf(\"failed to validate input \"+\n\t\t\t\t\t\"%s:%d which references output %s:%d - \"+\n\t\t\t\t\t\"%v (input script bytes %x, prev output \"+\n\t\t\t\t\t\"script bytes %x)\", txVI.tx.Sha(),\n\t\t\t\t\ttxVI.txInIndex, originTxHash,\n\t\t\t\t\toriginTxIndex, err, sigScript, pkScript)\n\t\t\t\terr := ruleError(ErrScriptValidation, str)\n\t\t\t\tv.sendResult(err)\n\t\t\t\tbreak out\n\t\t\t}\n\n\t\t\t\/\/ Validation succeeded.\n\t\t\tv.sendResult(nil)\n\n\t\tcase <-v.quitChan:\n\t\t\tbreak out\n\t\t}\n\t}\n}\n\n\/\/ Validate validates the scripts for all of the passed transaction inputs using\n\/\/ multiple goroutines.\nfunc (v *txValidator) Validate(items []*txValidateItem) error {\n\tif len(items) == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ Limit the number of goroutines to do script validation based on the\n\t\/\/ number of processor cores. This help ensure the system stays\n\t\/\/ reasonably responsive under heavy load.\n\tmaxGoRoutines := runtime.NumCPU() * 3\n\tif maxGoRoutines <= 0 {\n\t\tmaxGoRoutines = 1\n\t}\n\tif maxGoRoutines > len(items) {\n\t\tmaxGoRoutines = len(items)\n\t}\n\n\t\/\/ Start up validation handlers that are used to asynchronously\n\t\/\/ validate each transaction input.\n\tfor i := 0; i < maxGoRoutines; i++ {\n\t\tgo v.validateHandler()\n\t}\n\n\t\/\/ Validate each of the inputs. The quit channel is closed when any\n\t\/\/ errors occur so all processing goroutines exit regardless of which\n\t\/\/ input had the validation error.\n\tnumInputs := len(items)\n\tcurrentItem := 0\n\tprocessedItems := 0\n\tfor processedItems < numInputs {\n\t\t\/\/ Only send items while there are still items that need to\n\t\t\/\/ be processed. The select statement will never select a nil\n\t\t\/\/ channel.\n\t\tvar validateChan chan *txValidateItem\n\t\tvar item *txValidateItem\n\t\tif currentItem < numInputs {\n\t\t\tvalidateChan = v.validateChan\n\t\t\titem = items[currentItem]\n\t\t}\n\n\t\tselect {\n\t\tcase validateChan <- item:\n\t\t\tcurrentItem++\n\n\t\tcase err := <-v.resultChan:\n\t\t\tprocessedItems++\n\t\t\tif err != nil {\n\t\t\t\tclose(v.quitChan)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tclose(v.quitChan)\n\treturn nil\n}\n\n\/\/ newTxValidator returns a new instance of txValidator to be used for\n\/\/ validating transaction scripts asynchronously.\nfunc newTxValidator(txStore TxStore, flags btcscript.ScriptFlags) *txValidator {\n\treturn &txValidator{\n\t\tvalidateChan: make(chan *txValidateItem),\n\t\tquitChan: make(chan struct{}),\n\t\tresultChan: make(chan error),\n\t\ttxStore: txStore,\n\t\tflags: flags,\n\t}\n}\n\n\/\/ ValidateTransactionScripts validates the scripts for the passed transaction\n\/\/ using multiple goroutines.\nfunc ValidateTransactionScripts(tx *btcutil.Tx, txStore TxStore, flags btcscript.ScriptFlags) error {\n\t\/\/ Collect all of the transaction inputs and required information for\n\t\/\/ validation.\n\ttxIns := tx.MsgTx().TxIn\n\ttxValItems := make([]*txValidateItem, 0, len(txIns))\n\tfor txInIdx, txIn := range txIns {\n\t\t\/\/ Skip coinbases.\n\t\tif txIn.PreviousOutpoint.Index == math.MaxUint32 {\n\t\t\tcontinue\n\t\t}\n\n\t\ttxVI := &txValidateItem{\n\t\t\ttxInIndex: txInIdx,\n\t\t\ttxIn: txIn,\n\t\t\ttx: tx,\n\t\t}\n\t\ttxValItems = append(txValItems, txVI)\n\t}\n\n\t\/\/ Validate all of the inputs.\n\tvalidator := newTxValidator(txStore, flags)\n\tif err := validator.Validate(txValItems); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n\n}\n\n\/\/ checkBlockScripts executes and validates the scripts for all transactions in\n\/\/ the passed block.\nfunc checkBlockScripts(block *btcutil.Block, txStore TxStore) error {\n\t\/\/ Setup the script validation flags. Blocks created after the BIP0016\n\t\/\/ activation time need to have the pay-to-script-hash checks enabled.\n\tvar flags btcscript.ScriptFlags\n\tif block.MsgBlock().Header.Timestamp.After(btcscript.Bip16Activation) {\n\t\tflags |= btcscript.ScriptBip16\n\t}\n\n\t\/\/ Collect all of the transaction inputs and required information for\n\t\/\/ validation for all transactions in the block into a single slice.\n\tnumInputs := 0\n\tfor _, tx := range block.Transactions() {\n\t\tnumInputs += len(tx.MsgTx().TxIn)\n\t}\n\ttxValItems := make([]*txValidateItem, 0, numInputs)\n\tfor _, tx := range block.Transactions() {\n\t\tfor txInIdx, txIn := range tx.MsgTx().TxIn {\n\t\t\t\/\/ Skip coinbases.\n\t\t\tif txIn.PreviousOutpoint.Index == math.MaxUint32 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttxVI := &txValidateItem{\n\t\t\t\ttxInIndex: txInIdx,\n\t\t\t\ttxIn: txIn,\n\t\t\t\ttx: tx,\n\t\t\t}\n\t\t\ttxValItems = append(txValItems, txVI)\n\t\t}\n\t}\n\n\t\/\/ Validate all of the inputs.\n\tvalidator := newTxValidator(txStore, flags)\n\tif err := validator.Validate(txValItems); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"github.com\/microo8\/golymer\"\n)\n\n\/\/FancyClock custom element that shows the current time\ntype FancyClock struct {\n\tgolymer.Element\n\ttime string\n\tFormat string\n\tctx context.Context\n\tcancel context.CancelFunc\n}\n\n\/\/ConnectedCallback when the element is connected to the DOM, ticking may begin!\nfunc (ce *FancyClock) ConnectedCallback() {\n\tce.Element.ConnectedCallback() \/\/must call this first\n\t\/\/starts the ticking by spinning up a goroutine\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ce.ctx.Done():\n\t\t\t\t\/\/stops the thicking goroutine\n\t\t\t\treturn\n\t\t\tcase <-time.Tick(time.Second):\n\t\t\t\t\/\/updates current time with the format\n\t\t\t\tce.time = time.Now().Format(ce.Format)\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/DisconnectedCallback when the element is removed from the DOM\n\/\/it stops the ticking by sending done to the context\nfunc (ce *FancyClock) DisconnectedCallback() {\n\tce.cancel()\n}\n\n\/\/NewClockElem creates new clock-elem element\nfunc NewClockElem() *FancyClock {\n\tce := &FancyClock{Format: time.UnixDate}\n\tce.ctx, ce.cancel = context.WithCancel(context.Background())\n\tce.Template = `\n\t<style>\n\t\t:host {\n\t\t\tdisplay: inline;\n\t\t box-shadow: 0 0.018em 0.05em 0;\n\t\t\tfont-size: 7rem;\n\t\t}\n\t<\/style>\n\t[[time]]`\n\treturn ce\n}\n\nfunc main() {\n\t\/\/define the new fancy-clock elem\n\terr := golymer.Define(NewClockElem)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n<commit_msg>Fix clock example to the new Template style<commit_after>package main\n\nimport (\n\t\"time\"\n\n\t\"github.com\/microo8\/golymer\"\n)\n\nvar clockTemplate = golymer.NewTemplate(`\n<style>\n\t:host {\n\t\tdisplay: inline;\n\t\tbox-shadow: 0 0.018em 0.05em 0;\n\t\tfont-size: 7rem;\n\t}\n<\/style>\n[[time]]`)\n\n\/\/FancyClock custom element that shows the current time\ntype FancyClock struct {\n\tgolymer.Element\n\ttime string\n\tFormat string\n\tdone chan struct{}\n}\n\n\/\/ConnectedCallback when the element is connected to the DOM, ticking may begin!\nfunc (ce *FancyClock) ConnectedCallback() {\n\tce.Element.ConnectedCallback() \/\/must call this first\n\t\/\/starts the ticking by spinning up a goroutine\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ce.done:\n\t\t\t\t\/\/stops the thicking goroutine\n\t\t\t\treturn\n\t\t\tcase <-time.Tick(time.Second):\n\t\t\t\t\/\/updates current time with the format\n\t\t\t\tce.time = time.Now().Format(ce.Format)\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/DisconnectedCallback when the element is removed from the DOM\n\/\/it stops the ticking by sending done to the context\nfunc (ce *FancyClock) DisconnectedCallback() {\n\tce.done <- struct{}{}\n}\n\n\/\/NewClockElem creates new clock-elem element\nfunc NewClockElem() *FancyClock {\n\tce := &FancyClock{Format: time.UnixDate}\n\tce.done = make(chan struct{})\n\tce.SetTemplate(clockTemplate)\n\treturn ce\n}\n\nfunc main() {\n\t\/\/define the new fancy-clock elem\n\terr := golymer.Define(NewClockElem)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package chronograf\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\t\"time\"\n)\n\n\/\/ General errors.\nconst (\n\tErrUpstreamTimeout = Error(\"request to backend timed out\")\n\tErrExplorationNotFound = Error(\"exploration not found\")\n\tErrSourceNotFound = Error(\"source not found\")\n\tErrServerNotFound = Error(\"server not found\")\n\tErrLayoutNotFound = Error(\"layout not found\")\n\tErrUserNotFound = Error(\"user not found\")\n\tErrLayoutInvalid = Error(\"layout is invalid\")\n\tErrAlertNotFound = Error(\"alert not found\")\n\tErrAuthentication = Error(\"user not authenticated\")\n)\n\n\/\/ Error is a domain error encountered while processing chronograf requests\ntype Error string\n\nfunc (e Error) Error() string {\n\treturn string(e)\n}\n\n\/\/ Logger represents an abstracted structured logging implementation. It\n\/\/ provides methods to trigger log messages at various alert levels and a\n\/\/ WithField method to set keys for a structured log message.\ntype Logger interface {\n\tDebug(...interface{})\n\tInfo(...interface{})\n\tWarn(...interface{})\n\tError(...interface{})\n\tFatal(...interface{})\n\tPanic(...interface{})\n\n\tWithField(string, interface{}) Logger\n}\n\n\/\/ Assets returns a handler to serve the website.\ntype Assets interface {\n\tHandler() http.Handler\n}\n\n\/\/ TimeSeries represents a queryable time series database.\ntype TimeSeries interface {\n\t\/\/ Query retrieves time series data from the database.\n\tQuery(context.Context, Query) (Response, error)\n\t\/\/ Connect will connect to the time series using the information in `Source`.\n\tConnect(context.Context, *Source) error\n}\n\n\/\/ Range represents an upper and lower bound for data\ntype Range struct {\n\tUpper int64 `json:\"upper,omitempty\"` \/\/ Upper is the upper bound\n\tLower int64 `json:\"lower,omitempty\"` \/\/ Lower is the lower bound\n}\n\n\/\/ Query retrieves a Response from a TimeSeries.\ntype Query struct {\n\tCommand string `json:\"query\"` \/\/ Command is the query itself\n\tDB string `json:\"db,omitempty\"` \/\/ DB is optional and if empty will not be used.\n\tRP string `json:\"rp,omitempty\"` \/\/ RP is a retention policy and optional; if empty will not be used.\n\tWheres []string `json:\"wheres,omitempty\"` \/\/ Wheres restricts the query to certain attributes\n\tGroupBys []string `json:\"groupbys,omitempty\"` \/\/ GroupBys collate the query by these tags\n\tLabel string `json:\"label,omitempty\"` \/\/ Label is the Y-Axis label for the data\n\tRange *Range `json:\"range,omitempty\"` \/\/ Range is the default Y-Axis range for the data\n}\n\n\/\/ Response is the result of a query against a TimeSeries\ntype Response interface {\n\tMarshalJSON() ([]byte, error)\n}\n\n\/\/ Source is connection information to a time-series data store.\ntype Source struct {\n\tID int `json:\"id,omitempty,string\"` \/\/ ID is the unique ID of the source\n\tName string `json:\"name\"` \/\/ Name is the user-defined name for the source\n\tType string `json:\"type,omitempty\"` \/\/ Type specifies which kinds of source (enterprise vs oss)\n\tUsername string `json:\"username,omitempty\"` \/\/ Username is the username to connect to the source\n\tPassword string `json:\"password,omitempty\"` \/\/ Password is in CLEARTEXT\n\tURL string `json:\"url\"` \/\/ URL are the connections to the source\n\tDefault bool `json:\"default\"` \/\/ Default specifies the default source for the application\n\tTelegraf string `json:\"telegraf\"` \/\/ Telegraf is the db telegraf is written to. By default it is \"telegraf\"\n}\n\n\/\/ SourcesStore stores connection information for a `TimeSeries`\ntype SourcesStore interface {\n\t\/\/ All returns all sources in the store\n\tAll(context.Context) ([]Source, error)\n\t\/\/ Add creates a new source in the SourcesStore and returns Source with ID\n\tAdd(context.Context, Source) (Source, error)\n\t\/\/ Delete the Source from the store\n\tDelete(context.Context, Source) error\n\t\/\/ Get retrieves Source if `ID` exists\n\tGet(ctx context.Context, ID int) (Source, error)\n\t\/\/ Update the Source in the store.\n\tUpdate(context.Context, Source) error\n}\n\n\/\/ AlertRule represents rules for building a tickscript alerting task\ntype AlertRule struct {\n\tID string `json:\"id,omitempty\"` \/\/ ID is the unique ID of the alert\n\tQuery QueryConfig `json:\"query\"` \/\/ Query is the filter of data for the alert.\n\tEvery string `json:\"every\"` \/\/ Every how often to check for the alerting criteria\n\tAlerts []string `json:\"alerts\"` \/\/ AlertServices name all the services to notify (e.g. pagerduty)\n\tMessage string `json:\"message\"` \/\/ Message included with alert\n\tTrigger string `json:\"trigger\"` \/\/ Trigger is a type that defines when to trigger the alert\n\tTriggerValues TriggerValues `json:\"values\"` \/\/ Defines the values that cause the alert to trigger\n\tName string `json:\"name\"` \/\/ Name is the user-defined name for the alert\n}\n\n\/\/ AlertRulesStore stores rules for building tickscript alerting tasks\ntype AlertRulesStore interface {\n\t\/\/ All returns all rules in the store for the given source and kapacitor id\n\tAll(ctx context.Context, sourceID, kapaID int) ([]AlertRule, error)\n\t\/\/ Add creates a new rule in the AlertRulesStore and returns AlertRule with ID for a given source and kapacitor id\n\tAdd(ctx context.Context, sourceID, kapaID int, rule AlertRule) (AlertRule, error)\n\t\/\/ Delete the AlertRule from the store for a given source and kapacitor ID\n\tDelete(ctx context.Context, sourceID, kapaID int, rule AlertRule) error\n\t\/\/ Get retrieves AlertRule if `ID` exists within a given source and kapacitor id\n\tGet(ctx context.Context, sourceID, kapaID int, ID string) (AlertRule, error)\n\t\/\/ Update the AlertRule in the store within a given source and kapacitor id\n\tUpdate(ctx context.Context, sourceID, kapaID int, rule AlertRule) error\n}\n\n\/\/ TICKScript task to be used by kapacitor\ntype TICKScript string\n\n\/\/ Ticker generates tickscript tasks for kapacitor\ntype Ticker interface {\n\t\/\/ Generate will create the tickscript to be used as a kapacitor task\n\tGenerate(AlertRule) (TICKScript, error)\n}\n\n\/\/ TriggerValues specifies the alerting logic for a specific trigger type\ntype TriggerValues struct {\n\tChange string `json:\"change,omitempty\"` \/\/ Change specifies if the change is a percent or absolute\n\tPeriod string `json:\"period,omitempty\"` \/\/ Period length of time before deadman is alerted\n\tShift string `json:\"shift,omitempty\"` \/\/ Shift is the amount of time to look into the past for the alert to compare to the present\n\tOperator string `json:\"operator,omitempty\"` \/\/ Operator for alert comparison\n\tValue string `json:\"value,omitempty\"` \/\/ Value is the boundary value when alert goes critical\n}\n\n\/\/ Field represent influxql fields and functions from the UI\ntype Field struct {\n\tField string `json:\"field\"`\n\tFuncs []string `json:\"funcs\"`\n}\n\n\/\/ GroupBy represents influxql group by tags from the UI\ntype GroupBy struct {\n\tTime string `json:\"time\"`\n\tTags []string `json:\"tags\"`\n}\n\n\/\/ QueryConfig represents UI query from the data explorer\ntype QueryConfig struct {\n\tID string `json:\"id,omitempty\"`\n\tDatabase string `json:\"database\"`\n\tMeasurement string `json:\"measurement\"`\n\tRetentionPolicy string `json:\"retentionPolicy\"`\n\tFields []Field `json:\"fields\"`\n\tTags map[string][]string `json:\"tags\"`\n\tGroupBy GroupBy `json:\"groupBy\"`\n\tAreTagsAccepted bool `json:\"areTagsAccepted\"`\n\tRawText string `json:\"rawText,omitempty\"`\n}\n\n\/\/ Server represents a proxy connection to an HTTP server\ntype Server struct {\n\tID int \/\/ ID is the unique ID of the server\n\tSrcID int \/\/ SrcID of the data source\n\tName string \/\/ Name is the user-defined name for the server\n\tUsername string \/\/ Username is the username to connect to the server\n\tPassword string \/\/ Password is in CLEARTEXT\n\tURL string \/\/ URL are the connections to the server\n}\n\n\/\/ ServersStore stores connection information for a `Server`\ntype ServersStore interface {\n\t\/\/ All returns all servers in the store\n\tAll(context.Context) ([]Server, error)\n\t\/\/ Add creates a new source in the ServersStore and returns Server with ID\n\tAdd(context.Context, Server) (Server, error)\n\t\/\/ Delete the Server from the store\n\tDelete(context.Context, Server) error\n\t\/\/ Get retrieves Server if `ID` exists\n\tGet(ctx context.Context, ID int) (Server, error)\n\t\/\/ Update the Server in the store.\n\tUpdate(context.Context, Server) error\n}\n\n\/\/ ID creates uniq ID string\ntype ID interface {\n\t\/\/ Generate creates a unique ID string\n\tGenerate() (string, error)\n}\n\n\/\/ UserID is a unique ID for a source user.\ntype UserID int\n\n\/\/ User represents an authenticated user.\ntype User struct {\n\tID UserID `json:\"id\"`\n\tEmail string `json:\"email\"`\n}\n\n\/\/ UsersStore is the Storage and retrieval of authentication information\ntype UsersStore interface {\n\t\/\/ Create a new User in the UsersStore\n\tAdd(context.Context, *User) (*User, error)\n\t\/\/ Delete the User from the UsersStore\n\tDelete(context.Context, *User) error\n\t\/\/ Get retrieves a user if `ID` exists.\n\tGet(ctx context.Context, ID UserID) (*User, error)\n\t\/\/ Update the user's permissions or roles\n\tUpdate(context.Context, *User) error\n\t\/\/ FindByEmail will retrieve a user by email address.\n\tFindByEmail(ctx context.Context, Email string) (*User, error)\n}\n\n\/\/ ExplorationID is a unique ID for an Exploration.\ntype ExplorationID int\n\n\/\/ Exploration is a serialization of front-end Data Explorer.\ntype Exploration struct {\n\tID ExplorationID\n\tName string \/\/ User provided name of the Exploration.\n\tUserID UserID \/\/ UserID is the owner of this Exploration.\n\tData string \/\/ Opaque blob of JSON data.\n\tCreatedAt time.Time \/\/ Time the exploration was first created.\n\tUpdatedAt time.Time \/\/ Latest time the exploration was updated.\n\tDefault bool \/\/ Flags an exploration as the default.\n}\n\n\/\/ ExplorationStore stores front-end serializations of data explorer sessions.\ntype ExplorationStore interface {\n\t\/\/ Search the ExplorationStore for each Exploration owned by `UserID`.\n\tQuery(ctx context.Context, userID UserID) ([]*Exploration, error)\n\t\/\/ Create a new Exploration in the ExplorationStore.\n\tAdd(context.Context, *Exploration) (*Exploration, error)\n\t\/\/ Delete the Exploration from the ExplorationStore.\n\tDelete(context.Context, *Exploration) error\n\t\/\/ Retrieve an Exploration if `ID` exists.\n\tGet(ctx context.Context, ID ExplorationID) (*Exploration, error)\n\t\/\/ Update the Exploration; will also update the `UpdatedAt` time.\n\tUpdate(context.Context, *Exploration) error\n}\n\n\/\/ Cell is a rectangle and multiple time series queries to visualize.\ntype Cell struct {\n\tX int32 `json:\"x\"`\n\tY int32 `json:\"y\"`\n\tW int32 `json:\"w\"`\n\tH int32 `json:\"h\"`\n\tI string `json:\"i\"`\n\tName string `json:\"name\"`\n\tQueries []Query `json:\"queries\"`\n}\n\n\/\/ Layout is a collection of Cells for visualization\ntype Layout struct {\n\tID string `json:\"id\"`\n\tApplication string `json:\"app\"`\n\tMeasurement string `json:\"measurement\"`\n\tAutoflow bool `json:\"autoflow\"`\n\tCells []Cell `json:\"cells\"`\n}\n\n\/\/ LayoutStore stores dashboards and associated Cells\ntype LayoutStore interface {\n\t\/\/ All returns all dashboards in the store\n\tAll(context.Context) ([]Layout, error)\n\t\/\/ Add creates a new dashboard in the LayoutStore\n\tAdd(context.Context, Layout) (Layout, error)\n\t\/\/ Delete the dashboard from the store\n\tDelete(context.Context, Layout) error\n\t\/\/ Get retrieves Layout if `ID` exists\n\tGet(ctx context.Context, ID string) (Layout, error)\n\t\/\/ Update the dashboard in the store.\n\tUpdate(context.Context, Layout) error\n}\n\n\/\/ Principal is any entity that can be authenticated\ntype Principal string\n\n\/\/ PrincipalKey is used to pass principal\n\/\/ via context.Context to request-scoped\n\/\/ functions.\nconst PrincipalKey Principal = \"principal\"\n\n\/\/ Authenticator represents a service for authenticating users.\ntype Authenticator interface {\n\t\/\/ Authenticate returns User associated with token if successful.\n\tAuthenticate(ctx context.Context, token string) (Principal, error)\n\t\/\/ Token generates a valid token for Principal lasting a duration\n\tToken(context.Context, Principal, time.Duration) (string, error)\n}\n\n\/\/ TokenExtractor extracts tokens from http requests\ntype TokenExtractor interface {\n\t\/\/ Extract will return the token or an error.\n\tExtract(r *http.Request) (string, error)\n}\n<commit_msg>Allow upper and lower bound to be zero<commit_after>package chronograf\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\t\"time\"\n)\n\n\/\/ General errors.\nconst (\n\tErrUpstreamTimeout = Error(\"request to backend timed out\")\n\tErrExplorationNotFound = Error(\"exploration not found\")\n\tErrSourceNotFound = Error(\"source not found\")\n\tErrServerNotFound = Error(\"server not found\")\n\tErrLayoutNotFound = Error(\"layout not found\")\n\tErrUserNotFound = Error(\"user not found\")\n\tErrLayoutInvalid = Error(\"layout is invalid\")\n\tErrAlertNotFound = Error(\"alert not found\")\n\tErrAuthentication = Error(\"user not authenticated\")\n)\n\n\/\/ Error is a domain error encountered while processing chronograf requests\ntype Error string\n\nfunc (e Error) Error() string {\n\treturn string(e)\n}\n\n\/\/ Logger represents an abstracted structured logging implementation. It\n\/\/ provides methods to trigger log messages at various alert levels and a\n\/\/ WithField method to set keys for a structured log message.\ntype Logger interface {\n\tDebug(...interface{})\n\tInfo(...interface{})\n\tWarn(...interface{})\n\tError(...interface{})\n\tFatal(...interface{})\n\tPanic(...interface{})\n\n\tWithField(string, interface{}) Logger\n}\n\n\/\/ Assets returns a handler to serve the website.\ntype Assets interface {\n\tHandler() http.Handler\n}\n\n\/\/ TimeSeries represents a queryable time series database.\ntype TimeSeries interface {\n\t\/\/ Query retrieves time series data from the database.\n\tQuery(context.Context, Query) (Response, error)\n\t\/\/ Connect will connect to the time series using the information in `Source`.\n\tConnect(context.Context, *Source) error\n}\n\n\/\/ Range represents an upper and lower bound for data\ntype Range struct {\n\tUpper int64 `json:\"upper\"` \/\/ Upper is the upper bound\n\tLower int64 `json:\"lower\"` \/\/ Lower is the lower bound\n}\n\n\/\/ Query retrieves a Response from a TimeSeries.\ntype Query struct {\n\tCommand string `json:\"query\"` \/\/ Command is the query itself\n\tDB string `json:\"db,omitempty\"` \/\/ DB is optional and if empty will not be used.\n\tRP string `json:\"rp,omitempty\"` \/\/ RP is a retention policy and optional; if empty will not be used.\n\tWheres []string `json:\"wheres,omitempty\"` \/\/ Wheres restricts the query to certain attributes\n\tGroupBys []string `json:\"groupbys,omitempty\"` \/\/ GroupBys collate the query by these tags\n\tLabel string `json:\"label,omitempty\"` \/\/ Label is the Y-Axis label for the data\n\tRange *Range `json:\"range,omitempty\"` \/\/ Range is the default Y-Axis range for the data\n}\n\n\/\/ Response is the result of a query against a TimeSeries\ntype Response interface {\n\tMarshalJSON() ([]byte, error)\n}\n\n\/\/ Source is connection information to a time-series data store.\ntype Source struct {\n\tID int `json:\"id,omitempty,string\"` \/\/ ID is the unique ID of the source\n\tName string `json:\"name\"` \/\/ Name is the user-defined name for the source\n\tType string `json:\"type,omitempty\"` \/\/ Type specifies which kinds of source (enterprise vs oss)\n\tUsername string `json:\"username,omitempty\"` \/\/ Username is the username to connect to the source\n\tPassword string `json:\"password,omitempty\"` \/\/ Password is in CLEARTEXT\n\tURL string `json:\"url\"` \/\/ URL are the connections to the source\n\tDefault bool `json:\"default\"` \/\/ Default specifies the default source for the application\n\tTelegraf string `json:\"telegraf\"` \/\/ Telegraf is the db telegraf is written to. By default it is \"telegraf\"\n}\n\n\/\/ SourcesStore stores connection information for a `TimeSeries`\ntype SourcesStore interface {\n\t\/\/ All returns all sources in the store\n\tAll(context.Context) ([]Source, error)\n\t\/\/ Add creates a new source in the SourcesStore and returns Source with ID\n\tAdd(context.Context, Source) (Source, error)\n\t\/\/ Delete the Source from the store\n\tDelete(context.Context, Source) error\n\t\/\/ Get retrieves Source if `ID` exists\n\tGet(ctx context.Context, ID int) (Source, error)\n\t\/\/ Update the Source in the store.\n\tUpdate(context.Context, Source) error\n}\n\n\/\/ AlertRule represents rules for building a tickscript alerting task\ntype AlertRule struct {\n\tID string `json:\"id,omitempty\"` \/\/ ID is the unique ID of the alert\n\tQuery QueryConfig `json:\"query\"` \/\/ Query is the filter of data for the alert.\n\tEvery string `json:\"every\"` \/\/ Every how often to check for the alerting criteria\n\tAlerts []string `json:\"alerts\"` \/\/ AlertServices name all the services to notify (e.g. pagerduty)\n\tMessage string `json:\"message\"` \/\/ Message included with alert\n\tTrigger string `json:\"trigger\"` \/\/ Trigger is a type that defines when to trigger the alert\n\tTriggerValues TriggerValues `json:\"values\"` \/\/ Defines the values that cause the alert to trigger\n\tName string `json:\"name\"` \/\/ Name is the user-defined name for the alert\n}\n\n\/\/ AlertRulesStore stores rules for building tickscript alerting tasks\ntype AlertRulesStore interface {\n\t\/\/ All returns all rules in the store for the given source and kapacitor id\n\tAll(ctx context.Context, sourceID, kapaID int) ([]AlertRule, error)\n\t\/\/ Add creates a new rule in the AlertRulesStore and returns AlertRule with ID for a given source and kapacitor id\n\tAdd(ctx context.Context, sourceID, kapaID int, rule AlertRule) (AlertRule, error)\n\t\/\/ Delete the AlertRule from the store for a given source and kapacitor ID\n\tDelete(ctx context.Context, sourceID, kapaID int, rule AlertRule) error\n\t\/\/ Get retrieves AlertRule if `ID` exists within a given source and kapacitor id\n\tGet(ctx context.Context, sourceID, kapaID int, ID string) (AlertRule, error)\n\t\/\/ Update the AlertRule in the store within a given source and kapacitor id\n\tUpdate(ctx context.Context, sourceID, kapaID int, rule AlertRule) error\n}\n\n\/\/ TICKScript task to be used by kapacitor\ntype TICKScript string\n\n\/\/ Ticker generates tickscript tasks for kapacitor\ntype Ticker interface {\n\t\/\/ Generate will create the tickscript to be used as a kapacitor task\n\tGenerate(AlertRule) (TICKScript, error)\n}\n\n\/\/ TriggerValues specifies the alerting logic for a specific trigger type\ntype TriggerValues struct {\n\tChange string `json:\"change,omitempty\"` \/\/ Change specifies if the change is a percent or absolute\n\tPeriod string `json:\"period,omitempty\"` \/\/ Period length of time before deadman is alerted\n\tShift string `json:\"shift,omitempty\"` \/\/ Shift is the amount of time to look into the past for the alert to compare to the present\n\tOperator string `json:\"operator,omitempty\"` \/\/ Operator for alert comparison\n\tValue string `json:\"value,omitempty\"` \/\/ Value is the boundary value when alert goes critical\n}\n\n\/\/ Field represent influxql fields and functions from the UI\ntype Field struct {\n\tField string `json:\"field\"`\n\tFuncs []string `json:\"funcs\"`\n}\n\n\/\/ GroupBy represents influxql group by tags from the UI\ntype GroupBy struct {\n\tTime string `json:\"time\"`\n\tTags []string `json:\"tags\"`\n}\n\n\/\/ QueryConfig represents UI query from the data explorer\ntype QueryConfig struct {\n\tID string `json:\"id,omitempty\"`\n\tDatabase string `json:\"database\"`\n\tMeasurement string `json:\"measurement\"`\n\tRetentionPolicy string `json:\"retentionPolicy\"`\n\tFields []Field `json:\"fields\"`\n\tTags map[string][]string `json:\"tags\"`\n\tGroupBy GroupBy `json:\"groupBy\"`\n\tAreTagsAccepted bool `json:\"areTagsAccepted\"`\n\tRawText string `json:\"rawText,omitempty\"`\n}\n\n\/\/ Server represents a proxy connection to an HTTP server\ntype Server struct {\n\tID int \/\/ ID is the unique ID of the server\n\tSrcID int \/\/ SrcID of the data source\n\tName string \/\/ Name is the user-defined name for the server\n\tUsername string \/\/ Username is the username to connect to the server\n\tPassword string \/\/ Password is in CLEARTEXT\n\tURL string \/\/ URL are the connections to the server\n}\n\n\/\/ ServersStore stores connection information for a `Server`\ntype ServersStore interface {\n\t\/\/ All returns all servers in the store\n\tAll(context.Context) ([]Server, error)\n\t\/\/ Add creates a new source in the ServersStore and returns Server with ID\n\tAdd(context.Context, Server) (Server, error)\n\t\/\/ Delete the Server from the store\n\tDelete(context.Context, Server) error\n\t\/\/ Get retrieves Server if `ID` exists\n\tGet(ctx context.Context, ID int) (Server, error)\n\t\/\/ Update the Server in the store.\n\tUpdate(context.Context, Server) error\n}\n\n\/\/ ID creates uniq ID string\ntype ID interface {\n\t\/\/ Generate creates a unique ID string\n\tGenerate() (string, error)\n}\n\n\/\/ UserID is a unique ID for a source user.\ntype UserID int\n\n\/\/ User represents an authenticated user.\ntype User struct {\n\tID UserID `json:\"id\"`\n\tEmail string `json:\"email\"`\n}\n\n\/\/ UsersStore is the Storage and retrieval of authentication information\ntype UsersStore interface {\n\t\/\/ Create a new User in the UsersStore\n\tAdd(context.Context, *User) (*User, error)\n\t\/\/ Delete the User from the UsersStore\n\tDelete(context.Context, *User) error\n\t\/\/ Get retrieves a user if `ID` exists.\n\tGet(ctx context.Context, ID UserID) (*User, error)\n\t\/\/ Update the user's permissions or roles\n\tUpdate(context.Context, *User) error\n\t\/\/ FindByEmail will retrieve a user by email address.\n\tFindByEmail(ctx context.Context, Email string) (*User, error)\n}\n\n\/\/ ExplorationID is a unique ID for an Exploration.\ntype ExplorationID int\n\n\/\/ Exploration is a serialization of front-end Data Explorer.\ntype Exploration struct {\n\tID ExplorationID\n\tName string \/\/ User provided name of the Exploration.\n\tUserID UserID \/\/ UserID is the owner of this Exploration.\n\tData string \/\/ Opaque blob of JSON data.\n\tCreatedAt time.Time \/\/ Time the exploration was first created.\n\tUpdatedAt time.Time \/\/ Latest time the exploration was updated.\n\tDefault bool \/\/ Flags an exploration as the default.\n}\n\n\/\/ ExplorationStore stores front-end serializations of data explorer sessions.\ntype ExplorationStore interface {\n\t\/\/ Search the ExplorationStore for each Exploration owned by `UserID`.\n\tQuery(ctx context.Context, userID UserID) ([]*Exploration, error)\n\t\/\/ Create a new Exploration in the ExplorationStore.\n\tAdd(context.Context, *Exploration) (*Exploration, error)\n\t\/\/ Delete the Exploration from the ExplorationStore.\n\tDelete(context.Context, *Exploration) error\n\t\/\/ Retrieve an Exploration if `ID` exists.\n\tGet(ctx context.Context, ID ExplorationID) (*Exploration, error)\n\t\/\/ Update the Exploration; will also update the `UpdatedAt` time.\n\tUpdate(context.Context, *Exploration) error\n}\n\n\/\/ Cell is a rectangle and multiple time series queries to visualize.\ntype Cell struct {\n\tX int32 `json:\"x\"`\n\tY int32 `json:\"y\"`\n\tW int32 `json:\"w\"`\n\tH int32 `json:\"h\"`\n\tI string `json:\"i\"`\n\tName string `json:\"name\"`\n\tQueries []Query `json:\"queries\"`\n}\n\n\/\/ Layout is a collection of Cells for visualization\ntype Layout struct {\n\tID string `json:\"id\"`\n\tApplication string `json:\"app\"`\n\tMeasurement string `json:\"measurement\"`\n\tAutoflow bool `json:\"autoflow\"`\n\tCells []Cell `json:\"cells\"`\n}\n\n\/\/ LayoutStore stores dashboards and associated Cells\ntype LayoutStore interface {\n\t\/\/ All returns all dashboards in the store\n\tAll(context.Context) ([]Layout, error)\n\t\/\/ Add creates a new dashboard in the LayoutStore\n\tAdd(context.Context, Layout) (Layout, error)\n\t\/\/ Delete the dashboard from the store\n\tDelete(context.Context, Layout) error\n\t\/\/ Get retrieves Layout if `ID` exists\n\tGet(ctx context.Context, ID string) (Layout, error)\n\t\/\/ Update the dashboard in the store.\n\tUpdate(context.Context, Layout) error\n}\n\n\/\/ Principal is any entity that can be authenticated\ntype Principal string\n\n\/\/ PrincipalKey is used to pass principal\n\/\/ via context.Context to request-scoped\n\/\/ functions.\nconst PrincipalKey Principal = \"principal\"\n\n\/\/ Authenticator represents a service for authenticating users.\ntype Authenticator interface {\n\t\/\/ Authenticate returns User associated with token if successful.\n\tAuthenticate(ctx context.Context, token string) (Principal, error)\n\t\/\/ Token generates a valid token for Principal lasting a duration\n\tToken(context.Context, Principal, time.Duration) (string, error)\n}\n\n\/\/ TokenExtractor extracts tokens from http requests\ntype TokenExtractor interface {\n\t\/\/ Extract will return the token or an error.\n\tExtract(r *http.Request) (string, error)\n}\n<|endoftext|>"} {"text":"<commit_before>package air\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"time\"\n)\n\n\/\/ Config is a global set of configurations that for an instance of `Air`\n\/\/ for customization.\ntype Config struct {\n\t\/\/ AppName represens the name of current `Air` instance.\n\tAppName string\n\n\t\/\/ DebugMode represents the state of `Air`'s debug mode.\n\t\/\/\n\t\/\/ Default value is false.\n\t\/\/\n\t\/\/ It's called \"debug_mode\" in the config file.\n\tDebugMode bool\n\n\t\/\/ LogFormat represents the format of `Logger`'s output content.\n\t\/\/\n\t\/\/ Default value is:\n\t\/\/ `{\"app_name\":\"{{.app_name}}\",\"time\":\"{{.time_rfc3339}}\",` +\n\t\/\/ `\"level\":\"{{.level}}\",\"file\":\"{{.short_file}}\",\"line\":\"{{.line}}\"}`\n\t\/\/\n\t\/\/ It's called \"log_format\" in the config file.\n\tLogFormat string\n\n\t\/\/ Address represents the TCP address that `Server` to listen\n\t\/\/ on.\n\t\/\/\n\t\/\/ Default value is \"localhost:8080\".\n\t\/\/\n\t\/\/ It's called \"address\" in the config file.\n\tAddress string\n\n\t\/\/ Listener represens the custom `net.Listener`. If set, `Server`\n\t\/\/ accepts connections on it.\n\t\/\/\n\t\/\/ Default value is nil.\n\tListener net.Listener\n\n\t\/\/ TLSCertFile represents the TLS certificate file path.\n\t\/\/\n\t\/\/ Default value is \"\".\n\t\/\/\n\t\/\/ It's called \"tls_cert_file\" in the config file.\n\tTLSCertFile string\n\n\t\/\/ TLSKeyFile represents the TLS key file path.\n\t\/\/\n\t\/\/ Default value is \"\".\n\t\/\/\n\t\/\/ It's called \"tls_key_file\" in the config file.\n\tTLSKeyFile string\n\n\t\/\/ ReadTimeout represents the maximum duration before timing out\n\t\/\/ read of the request.\n\t\/\/\n\t\/\/ Default value is 0.\n\t\/\/\n\t\/\/ It's called \"read_timeout\" in the config file.\n\t\/\/\n\t\/\/ *It's unit in the config file is SECONDS.*\n\tReadTimeout time.Duration\n\n\t\/\/ WriteTimeout represents the maximum duration before timing\n\t\/\/ out write of the response.\n\t\/\/\n\t\/\/ Default value is 0.\n\t\/\/\n\t\/\/ It's called \"write_timeout\" in the config file.\n\t\/\/\n\t\/\/ *It's unit in the config file is SECONDS.*\n\tWriteTimeout time.Duration\n\n\t\/\/ TemplatesRoot represents the root directory of the html templates.\n\t\/\/ It will be parsed into `Renderer`.\n\t\/\/\n\t\/\/ Default value is \"templates\" that means a subdirectory of the\n\t\/\/ runtime directory.\n\t\/\/\n\t\/\/ It's called \"templates_root\" in the config file.\n\tTemplatesRoot string\n\n\t\/\/ Data represents the data that parsing from config file. You can\n\t\/\/ use it to access the values in the config file.\n\t\/\/\n\t\/\/ e.g. c.Data[\"mysql_user_name\"] will accesses the value in config\n\t\/\/ file called \"mysql_user_name\".\n\tData JSONMap\n}\n\n\/\/ defaultConfig is the default instance of `Config`.\nvar defaultConfig Config\n\n\/\/ configs is the JSON map that stored all the configs parsing from\n\/\/ config file.\nvar configs JSONMap\n\nfunc init() {\n\tdefaultConfig = Config{\n\t\tLogFormat: `{\"app_name\":\"{{.app_name}}\",\"time\":\"{{.time_rfc3339}}\",` +\n\t\t\t`\"level\":\"{{.level}}\",\"file\":\"{{.short_file}}\",\"line\":\"{{.line}}\"}`,\n\t\tAddress: \"localhost:8080\",\n\t\tTemplatesRoot: \"templates\",\n\t}\n\n\tvar cfn = \"config.json\"\n\t_, err := os.Stat(cfn)\n\tif err == nil || os.IsExist(err) {\n\t\tbytes, err := ioutil.ReadFile(cfn)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\terr = json.Unmarshal(bytes, &configs)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif len(configs) == 0 {\n\t\t\tpanic(\"need at least one app in the config file or remove the config file\")\n\t\t}\n\t}\n}\n\n\/\/ NewConfig returns a new instance of `Config` with a appName by parsing\n\/\/ the config file that in the rumtime directory named \"config.json\".\n\/\/ NewConfig returns the defaultConfig(field \"AppName\" be setted to provided\n\/\/ appName) if the config file or appName doesn't exist.\nfunc NewConfig(appName string) *Config {\n\tc := defaultConfig\n\tswitch {\n\tcase configs == nil:\n\t\tc.AppName = appName\n\tcase len(configs) == 1:\n\t\tfor k, v := range configs {\n\t\t\tc.AppName = k\n\t\t\tc.Data = v.(map[string]interface{})\n\t\t\tc.fill()\n\t\t}\n\tcase configs[appName] == nil:\n\t\tpanic(fmt.Sprintf(\"app %s does not exist in the config file\", appName))\n\tdefault:\n\t\tc.AppName = appName\n\t\tc.Data = configs[appName].(map[string]interface{})\n\t\tc.fill()\n\t}\n\treturn &c\n}\n\n\/\/ fill fills field's value from field `Data` of c. It fills field's\n\/\/ value from defaultConfig if target value in `Data` of c does not\n\/\/ exist.\nfunc (c *Config) fill() {\n\tif dm, ok := c.Data[\"debug_mode\"]; ok {\n\t\tc.DebugMode = dm.(bool)\n\t}\n\tif lf, ok := c.Data[\"log_format\"]; ok {\n\t\tc.LogFormat = lf.(string)\n\t}\n\tif addr, ok := c.Data[\"address\"]; ok {\n\t\tc.Address = addr.(string)\n\t}\n\tif tlscf, ok := c.Data[\"tls_cert_file\"]; ok {\n\t\tc.TLSCertFile = tlscf.(string)\n\t}\n\tif tlskf, ok := c.Data[\"tls_key_file\"]; ok {\n\t\tc.TLSKeyFile = tlskf.(string)\n\t}\n\tif rt, ok := c.Data[\"read_timeout\"]; ok {\n\t\tc.ReadTimeout = time.Duration(rt.(float64)) * time.Second\n\t}\n\tif wt, ok := c.Data[\"write_timeout\"]; ok {\n\t\tc.WriteTimeout = time.Duration(wt.(float64)) * time.Second\n\t}\n\tif tr, ok := c.Data[\"templates_root\"]; ok {\n\t\tc.TemplatesRoot = tr.(string)\n\t}\n}\n<commit_msg>refactor: justify comments in config.go<commit_after>package air\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"time\"\n)\n\n\/\/ Config is a global set of configurations that for an instance of `Air`\n\/\/ for customization.\ntype Config struct {\n\t\/\/ AppName represens the name of current `Air` instance.\n\tAppName string\n\n\t\/\/ DebugMode represents the state of `Air`'s debug mode.\n\t\/\/\n\t\/\/ Default value is false.\n\t\/\/\n\t\/\/ It's called \"debug_mode\" in the config file.\n\tDebugMode bool\n\n\t\/\/ LogFormat represents the format of `Logger`'s output content.\n\t\/\/\n\t\/\/ Default value is:\n\t\/\/ `{\"app_name\":\"{{.app_name}}\",\"time\":\"{{.time_rfc3339}}\",` +\n\t\/\/ `\"level\":\"{{.level}}\",\"file\":\"{{.short_file}}\",\"line\":\"{{.line}}\"}`\n\t\/\/\n\t\/\/ It's called \"log_format\" in the config file.\n\tLogFormat string\n\n\t\/\/ Address represents the TCP address that `Server` to listen\n\t\/\/ on.\n\t\/\/\n\t\/\/ Default value is \"localhost:8080\".\n\t\/\/\n\t\/\/ It's called \"address\" in the config file.\n\tAddress string\n\n\t\/\/ Listener represens the custom `net.Listener`. If set, `Server`\n\t\/\/ accepts connections on it.\n\t\/\/\n\t\/\/ Default value is nil.\n\tListener net.Listener\n\n\t\/\/ TLSCertFile represents the TLS certificate file path.\n\t\/\/\n\t\/\/ Default value is \"\".\n\t\/\/\n\t\/\/ It's called \"tls_cert_file\" in the config file.\n\tTLSCertFile string\n\n\t\/\/ TLSKeyFile represents the TLS key file path.\n\t\/\/\n\t\/\/ Default value is \"\".\n\t\/\/\n\t\/\/ It's called \"tls_key_file\" in the config file.\n\tTLSKeyFile string\n\n\t\/\/ ReadTimeout represents the maximum duration before timing out\n\t\/\/ read of the request.\n\t\/\/\n\t\/\/ Default value is 0.\n\t\/\/\n\t\/\/ It's called \"read_timeout\" in the config file.\n\t\/\/\n\t\/\/ *It's unit in the config file is SECONDS.*\n\tReadTimeout time.Duration\n\n\t\/\/ WriteTimeout represents the maximum duration before timing\n\t\/\/ out write of the response.\n\t\/\/\n\t\/\/ Default value is 0.\n\t\/\/\n\t\/\/ It's called \"write_timeout\" in the config file.\n\t\/\/\n\t\/\/ *It's unit in the config file is SECONDS.*\n\tWriteTimeout time.Duration\n\n\t\/\/ TemplatesRoot represents the root directory of the html templates.\n\t\/\/ It will be parsed into `Renderer`.\n\t\/\/\n\t\/\/ Default value is \"templates\" that means a subdirectory of the\n\t\/\/ runtime directory.\n\t\/\/\n\t\/\/ It's called \"templates_root\" in the config file.\n\tTemplatesRoot string\n\n\t\/\/ Data represents the data that parsing from config file. You can\n\t\/\/ use it to access the values in the config file.\n\t\/\/\n\t\/\/ e.g. c.Data[\"mysql_user_name\"] will accesses the value in config\n\t\/\/ file called \"mysql_user_name\".\n\tData JSONMap\n}\n\n\/\/ defaultConfig is the default instance of `Config`.\nvar defaultConfig Config\n\n\/\/ configs is the JSON map that stored all the configs parsing from\n\/\/ config file.\nvar configs JSONMap\n\nfunc init() {\n\tdefaultConfig = Config{\n\t\tLogFormat: `{\"app_name\":\"{{.app_name}}\",\"time\":\"{{.time_rfc3339}}\",` +\n\t\t\t`\"level\":\"{{.level}}\",\"file\":\"{{.short_file}}\",\"line\":\"{{.line}}\"}`,\n\t\tAddress: \"localhost:8080\",\n\t\tTemplatesRoot: \"templates\",\n\t}\n\n\tvar cfn = \"config.json\"\n\t_, err := os.Stat(cfn)\n\tif err == nil || os.IsExist(err) {\n\t\tbytes, err := ioutil.ReadFile(cfn)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\terr = json.Unmarshal(bytes, &configs)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif len(configs) == 0 {\n\t\t\tpanic(\"need at least one app in the config file or remove the config file\")\n\t\t}\n\t}\n}\n\n\/\/ NewConfig returns a new instance of `Config` with a appName by parsing\n\/\/ the config file that in the rumtime directory named \"config.json\".\n\/\/\n\/\/ NewConfig returns the defaultConfig(field \"AppName\" be setted to provided\n\/\/ appName) if the config file doesn't exist.\nfunc NewConfig(appName string) *Config {\n\tc := defaultConfig\n\tswitch {\n\tcase configs == nil:\n\t\tc.AppName = appName\n\tcase len(configs) == 1:\n\t\tfor k, v := range configs {\n\t\t\tc.AppName = k\n\t\t\tc.Data = v.(map[string]interface{})\n\t\t\tc.fillData()\n\t\t}\n\tcase configs[appName] == nil:\n\t\tpanic(fmt.Sprintf(\"app %s does not exist in the config file\", appName))\n\tdefault:\n\t\tc.AppName = appName\n\t\tc.Data = configs[appName].(map[string]interface{})\n\t\tc.fillData()\n\t}\n\treturn &c\n}\n\n\/\/ fillData fills field's value from field `Data` of c.\nfunc (c *Config) fillData() {\n\tif dm, ok := c.Data[\"debug_mode\"]; ok {\n\t\tc.DebugMode = dm.(bool)\n\t}\n\tif lf, ok := c.Data[\"log_format\"]; ok {\n\t\tc.LogFormat = lf.(string)\n\t}\n\tif addr, ok := c.Data[\"address\"]; ok {\n\t\tc.Address = addr.(string)\n\t}\n\tif tlscf, ok := c.Data[\"tls_cert_file\"]; ok {\n\t\tc.TLSCertFile = tlscf.(string)\n\t}\n\tif tlskf, ok := c.Data[\"tls_key_file\"]; ok {\n\t\tc.TLSKeyFile = tlskf.(string)\n\t}\n\tif rt, ok := c.Data[\"read_timeout\"]; ok {\n\t\tc.ReadTimeout = time.Duration(rt.(float64)) * time.Second\n\t}\n\tif wt, ok := c.Data[\"write_timeout\"]; ok {\n\t\tc.WriteTimeout = time.Duration(wt.(float64)) * time.Second\n\t}\n\tif tr, ok := c.Data[\"templates_root\"]; ok {\n\t\tc.TemplatesRoot = tr.(string)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2013-2015 The btcsuite developers\n\/\/ Use of this source code is governed by an ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/btcsuite\/btcd\/btcjson\"\n\t\"github.com\/btcsuite\/btcutil\"\n\tflags \"github.com\/jessevdk\/go-flags\"\n)\n\nconst (\n\t\/\/ unusableFlags are the command usage flags which this utility are not\n\t\/\/ able to use. In particular it doesn't support websockets and\n\t\/\/ consequently notifications.\n\tunusableFlags = btcjson.UFWebsocketOnly | btcjson.UFNotification\n)\n\nvar (\n\tbtcdHomeDir = btcutil.AppDataDir(\"btcd\", false)\n\tbtcctlHomeDir = btcutil.AppDataDir(\"btcctl\", false)\n\tbtcwalletHomeDir = btcutil.AppDataDir(\"btcwallet\", false)\n\tdefaultConfigFile = filepath.Join(btcctlHomeDir, \"btcctl.conf\")\n\tdefaultRPCServer = \"localhost\"\n\tdefaultRPCCertFile = filepath.Join(btcdHomeDir, \"rpc.cert\")\n\tdefaultWalletCertFile = filepath.Join(btcwalletHomeDir, \"rpc.cert\")\n)\n\n\/\/ listCommands categorizes and lists all of the usable commands along with\n\/\/ their one-line usage.\nfunc listCommands() {\n\tconst (\n\t\tcategoryChain uint8 = iota\n\t\tcategoryWallet\n\t\tnumCategories\n\t)\n\n\t\/\/ Get a list of registered commands and categorize and filter them.\n\tcmdMethods := btcjson.RegisteredCmdMethods()\n\tcategorized := make([][]string, numCategories)\n\tfor _, method := range cmdMethods {\n\t\tflags, err := btcjson.MethodUsageFlags(method)\n\t\tif err != nil {\n\t\t\t\/\/ This should never happen since the method was just\n\t\t\t\/\/ returned from the package, but be safe.\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Skip the commands that aren't usable from this utility.\n\t\tif flags&unusableFlags != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tusage, err := btcjson.MethodUsageText(method)\n\t\tif err != nil {\n\t\t\t\/\/ This should never happen since the method was just\n\t\t\t\/\/ returned from the package, but be safe.\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Categorize the command based on the usage flags.\n\t\tcategory := categoryChain\n\t\tif flags&btcjson.UFWalletOnly != 0 {\n\t\t\tcategory = categoryWallet\n\t\t}\n\t\tcategorized[category] = append(categorized[category], usage)\n\t}\n\n\t\/\/ Display the command according to their categories.\n\tcategoryTitles := make([]string, numCategories)\n\tcategoryTitles[categoryChain] = \"Chain Server Commands:\"\n\tcategoryTitles[categoryWallet] = \"Wallet Server Commands (--wallet):\"\n\tfor category := uint8(0); category < numCategories; category++ {\n\t\tfmt.Println(categoryTitles[category])\n\t\tfor _, usage := range categorized[category] {\n\t\t\tfmt.Println(usage)\n\t\t}\n\t\tfmt.Println()\n\t}\n}\n\n\/\/ config defines the configuration options for btcctl.\n\/\/\n\/\/ See loadConfig for details on the configuration load process.\ntype config struct {\n\tConfigFile string `short:\"C\" long:\"configfile\" description:\"Path to configuration file\"`\n\tListCommands bool `short:\"l\" long:\"listcommands\" description:\"List all of the supported commands and exit\"`\n\tNoTLS bool `long:\"notls\" description:\"Disable TLS\"`\n\tProxy string `long:\"proxy\" description:\"Connect via SOCKS5 proxy (eg. 127.0.0.1:9050)\"`\n\tProxyPass string `long:\"proxypass\" default-mask:\"-\" description:\"Password for proxy server\"`\n\tProxyUser string `long:\"proxyuser\" description:\"Username for proxy server\"`\n\tRPCCert string `short:\"c\" long:\"rpccert\" description:\"RPC server certificate chain for validation\"`\n\tRPCPassword string `short:\"P\" long:\"rpcpass\" default-mask:\"-\" description:\"RPC password\"`\n\tRPCServer string `short:\"s\" long:\"rpcserver\" description:\"RPC server to connect to\"`\n\tRPCUser string `short:\"u\" long:\"rpcuser\" description:\"RPC username\"`\n\tSimNet bool `long:\"simnet\" description:\"Connect to the simulation test network\"`\n\tTLSSkipVerify bool `long:\"skipverify\" description:\"Do not verify tls certificates (not recommended!)\"`\n\tTestNet3 bool `long:\"testnet\" description:\"Connect to testnet\"`\n\tShowVersion bool `short:\"V\" long:\"version\" description:\"Display version information and exit\"`\n\tWallet bool `long:\"wallet\" description:\"Connect to wallet\"`\n}\n\n\/\/ normalizeAddress returns addr with the passed default port appended if\n\/\/ there is not already a port specified.\nfunc normalizeAddress(addr string, useTestNet3, useSimNet, useWallet bool) string {\n\t_, _, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\tvar defaultPort string\n\t\tswitch {\n\t\tcase useTestNet3:\n\t\t\tif useWallet {\n\t\t\t\tdefaultPort = \"18332\"\n\t\t\t} else {\n\t\t\t\tdefaultPort = \"18334\"\n\t\t\t}\n\t\tcase useSimNet:\n\t\t\tif useWallet {\n\t\t\t\tdefaultPort = \"18554\"\n\t\t\t} else {\n\t\t\t\tdefaultPort = \"18556\"\n\t\t\t}\n\t\tdefault:\n\t\t\tif useWallet {\n\t\t\t\tdefaultPort = \"8332\"\n\t\t\t} else {\n\t\t\t\tdefaultPort = \"8334\"\n\t\t\t}\n\t\t}\n\n\t\treturn net.JoinHostPort(addr, defaultPort)\n\t}\n\treturn addr\n}\n\n\/\/ cleanAndExpandPath expands environement variables and leading ~ in the\n\/\/ passed path, cleans the result, and returns it.\nfunc cleanAndExpandPath(path string) string {\n\t\/\/ Expand initial ~ to OS specific home directory.\n\tif strings.HasPrefix(path, \"~\") {\n\t\thomeDir := filepath.Dir(btcctlHomeDir)\n\t\tpath = strings.Replace(path, \"~\", homeDir, 1)\n\t}\n\n\t\/\/ NOTE: The os.ExpandEnv doesn't work with Windows-style %VARIABLE%,\n\t\/\/ but they variables can still be expanded via POSIX-style $VARIABLE.\n\treturn filepath.Clean(os.ExpandEnv(path))\n}\n\n\/\/ loadConfig initializes and parses the config using a config file and command\n\/\/ line options.\n\/\/\n\/\/ The configuration proceeds as follows:\n\/\/ \t1) Start with a default config with sane settings\n\/\/ \t2) Pre-parse the command line to check for an alternative config file\n\/\/ \t3) Load configuration file overwriting defaults with any specified options\n\/\/ \t4) Parse CLI options and overwrite\/add any specified options\n\/\/\n\/\/ The above results in functioning properly without any config settings\n\/\/ while still allowing the user to override settings with config files and\n\/\/ command line options. Command line options always take precedence.\nfunc loadConfig() (*config, []string, error) {\n\t\/\/ Default config.\n\tcfg := config{\n\t\tConfigFile: defaultConfigFile,\n\t\tRPCServer: defaultRPCServer,\n\t\tRPCCert: defaultRPCCertFile,\n\t}\n\n\t\/\/ Pre-parse the command line options to see if an alternative config\n\t\/\/ file, the version flag, or the list commands flag was specified. Any\n\t\/\/ errors aside from the help message error can be ignored here since\n\t\/\/ they will be caught by the final parse below.\n\tpreCfg := cfg\n\tpreParser := flags.NewParser(&preCfg, flags.HelpFlag)\n\t_, err := preParser.Parse()\n\tif err != nil {\n\t\tif e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tfmt.Fprintln(os.Stderr, \"\")\n\t\t\tfmt.Fprintln(os.Stderr, \"The special parameter `-` \"+\n\t\t\t\t\"indicates that a parameter should be read \"+\n\t\t\t\t\"from the\\nnext unread line from standard \"+\n\t\t\t\t\"input.\")\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\t\/\/ Show the version and exit if the version flag was specified.\n\tappName := filepath.Base(os.Args[0])\n\tappName = strings.TrimSuffix(appName, filepath.Ext(appName))\n\tusageMessage := fmt.Sprintf(\"Use %s -h to show options\", appName)\n\tif preCfg.ShowVersion {\n\t\tfmt.Println(appName, \"version\", version())\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ Show the available commands and exit if the associated flag was\n\t\/\/ specified.\n\tif preCfg.ListCommands {\n\t\tlistCommands()\n\t\tos.Exit(0)\n\t}\n\n\tif _, err := os.Stat(preCfg.ConfigFile); os.IsNotExist(err) {\n\t\t\/\/ Use config file for RPC server to create default btcctl config\n\t\tvar serverConfigPath string\n\t\tif preCfg.Wallet {\n\t\t\tserverConfigPath = filepath.Join(btcwalletHomeDir, \"btcwallet.conf\")\n\t\t} else {\n\t\t\tserverConfigPath = filepath.Join(btcdHomeDir, \"btcd.conf\")\n\t\t}\n\n\t\terr := createDefaultConfigFile(preCfg.ConfigFile, serverConfigPath)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Error creating a default config file: %v\\n\", err)\n\t\t}\n\t}\n\n\t\/\/ Load additional config from file.\n\tparser := flags.NewParser(&cfg, flags.Default)\n\terr = flags.NewIniParser(parser).ParseFile(preCfg.ConfigFile)\n\tif err != nil {\n\t\tif _, ok := err.(*os.PathError); !ok {\n\t\t\tfmt.Fprintf(os.Stderr, \"Error parsing config file: %v\\n\",\n\t\t\t\terr)\n\t\t\tfmt.Fprintln(os.Stderr, usageMessage)\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\t\/\/ Parse command line options again to ensure they take precedence.\n\tremainingArgs, err := parser.Parse()\n\tif err != nil {\n\t\tif e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {\n\t\t\tfmt.Fprintln(os.Stderr, usageMessage)\n\t\t}\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ Multiple networks can't be selected simultaneously.\n\tnumNets := 0\n\tif cfg.TestNet3 {\n\t\tnumNets++\n\t}\n\tif cfg.SimNet {\n\t\tnumNets++\n\t}\n\tif numNets > 1 {\n\t\tstr := \"%s: The testnet and simnet params can't be used \" +\n\t\t\t\"together -- choose one of the two\"\n\t\terr := fmt.Errorf(str, \"loadConfig\")\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ Override the RPC certificate if the --wallet flag was specified and\n\t\/\/ the user did not specify one.\n\tif cfg.Wallet && cfg.RPCCert == defaultRPCCertFile {\n\t\tcfg.RPCCert = defaultWalletCertFile\n\t}\n\n\t\/\/ Handle environment variable expansion in the RPC certificate path.\n\tcfg.RPCCert = cleanAndExpandPath(cfg.RPCCert)\n\n\t\/\/ Add default port to RPC server based on --testnet and --wallet flags\n\t\/\/ if needed.\n\tcfg.RPCServer = normalizeAddress(cfg.RPCServer, cfg.TestNet3,\n\t\tcfg.SimNet, cfg.Wallet)\n\n\treturn &cfg, remainingArgs, nil\n}\n\n\/\/ createDefaultConfig creates a basic config file at the given destination path.\n\/\/ For this it tries to read the config file for the RPC server (either btcd or\n\/\/ btcwallet), and extract the RPC user and password from it.\nfunc createDefaultConfigFile(destinationPath, serverConfigPath string) error {\n\t\/\/ Read the RPC server config\n\tserverConfigFile, err := os.Open(serverConfigPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer serverConfigFile.Close()\n\tcontent, err := ioutil.ReadAll(serverConfigFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Extract the rpcuser\n\trpcUserRegexp := regexp.MustCompile(`(?m)^\\s*rpcuser=([^\\s]+)`)\n\tuserSubmatches := rpcUserRegexp.FindSubmatch(content)\n\tif userSubmatches == nil {\n\t\t\/\/ No user found, nothing to do\n\t\treturn nil\n\t}\n\n\t\/\/ Extract the rpcpass\n\trpcPassRegexp := regexp.MustCompile(`(?m)^\\s*rpcpass=([^\\s]+)`)\n\tpassSubmatches := rpcPassRegexp.FindSubmatch(content)\n\tif passSubmatches == nil {\n\t\t\/\/ No password found, nothing to do\n\t\treturn nil\n\t}\n\n\t\/\/ Extract the notls\n\tnoTLSRegexp := regexp.MustCompile(`(?m)^\\s*notls=(0|1)(?:\\s|$)`)\n\tnoTLSSubmatches := noTLSRegexp.FindSubmatch(content)\n\n\t\/\/ Create the destination directory if it does not exists\n\terr = os.MkdirAll(filepath.Dir(destinationPath), 0700)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Create the destination file and write the rpcuser and rpcpass to it\n\tdest, err := os.OpenFile(destinationPath,\n\t\tos.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer dest.Close()\n\n\tdestString := fmt.Sprintf(\"rpcuser=%s\\nrpcpass=%s\\n\",\n\t\tstring(userSubmatches[1]), string(passSubmatches[1]))\n\tif noTLSSubmatches != nil {\n\t\tdestString += fmt.Sprintf(\"notls=%s\\n\", noTLSSubmatches[1])\n\t}\n\n\tdest.WriteString(destString)\n\n\treturn nil\n}\n<commit_msg>btcctl: add regtest mode to btcctl<commit_after>\/\/ Copyright (c) 2013-2015 The btcsuite developers\n\/\/ Use of this source code is governed by an ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/btcsuite\/btcd\/btcjson\"\n\t\"github.com\/btcsuite\/btcd\/chaincfg\"\n\t\"github.com\/btcsuite\/btcutil\"\n\tflags \"github.com\/jessevdk\/go-flags\"\n)\n\nconst (\n\t\/\/ unusableFlags are the command usage flags which this utility are not\n\t\/\/ able to use. In particular it doesn't support websockets and\n\t\/\/ consequently notifications.\n\tunusableFlags = btcjson.UFWebsocketOnly | btcjson.UFNotification\n)\n\nvar (\n\tbtcdHomeDir = btcutil.AppDataDir(\"btcd\", false)\n\tbtcctlHomeDir = btcutil.AppDataDir(\"btcctl\", false)\n\tbtcwalletHomeDir = btcutil.AppDataDir(\"btcwallet\", false)\n\tdefaultConfigFile = filepath.Join(btcctlHomeDir, \"btcctl.conf\")\n\tdefaultRPCServer = \"localhost\"\n\tdefaultRPCCertFile = filepath.Join(btcdHomeDir, \"rpc.cert\")\n\tdefaultWalletCertFile = filepath.Join(btcwalletHomeDir, \"rpc.cert\")\n)\n\n\/\/ listCommands categorizes and lists all of the usable commands along with\n\/\/ their one-line usage.\nfunc listCommands() {\n\tconst (\n\t\tcategoryChain uint8 = iota\n\t\tcategoryWallet\n\t\tnumCategories\n\t)\n\n\t\/\/ Get a list of registered commands and categorize and filter them.\n\tcmdMethods := btcjson.RegisteredCmdMethods()\n\tcategorized := make([][]string, numCategories)\n\tfor _, method := range cmdMethods {\n\t\tflags, err := btcjson.MethodUsageFlags(method)\n\t\tif err != nil {\n\t\t\t\/\/ This should never happen since the method was just\n\t\t\t\/\/ returned from the package, but be safe.\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Skip the commands that aren't usable from this utility.\n\t\tif flags&unusableFlags != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tusage, err := btcjson.MethodUsageText(method)\n\t\tif err != nil {\n\t\t\t\/\/ This should never happen since the method was just\n\t\t\t\/\/ returned from the package, but be safe.\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Categorize the command based on the usage flags.\n\t\tcategory := categoryChain\n\t\tif flags&btcjson.UFWalletOnly != 0 {\n\t\t\tcategory = categoryWallet\n\t\t}\n\t\tcategorized[category] = append(categorized[category], usage)\n\t}\n\n\t\/\/ Display the command according to their categories.\n\tcategoryTitles := make([]string, numCategories)\n\tcategoryTitles[categoryChain] = \"Chain Server Commands:\"\n\tcategoryTitles[categoryWallet] = \"Wallet Server Commands (--wallet):\"\n\tfor category := uint8(0); category < numCategories; category++ {\n\t\tfmt.Println(categoryTitles[category])\n\t\tfor _, usage := range categorized[category] {\n\t\t\tfmt.Println(usage)\n\t\t}\n\t\tfmt.Println()\n\t}\n}\n\n\/\/ config defines the configuration options for btcctl.\n\/\/\n\/\/ See loadConfig for details on the configuration load process.\ntype config struct {\n\tConfigFile string `short:\"C\" long:\"configfile\" description:\"Path to configuration file\"`\n\tListCommands bool `short:\"l\" long:\"listcommands\" description:\"List all of the supported commands and exit\"`\n\tNoTLS bool `long:\"notls\" description:\"Disable TLS\"`\n\tProxy string `long:\"proxy\" description:\"Connect via SOCKS5 proxy (eg. 127.0.0.1:9050)\"`\n\tProxyPass string `long:\"proxypass\" default-mask:\"-\" description:\"Password for proxy server\"`\n\tProxyUser string `long:\"proxyuser\" description:\"Username for proxy server\"`\n\tRegressionTest bool `long:\"regtest\" description:\"Connect to the regression test network\"`\n\tRPCCert string `short:\"c\" long:\"rpccert\" description:\"RPC server certificate chain for validation\"`\n\tRPCPassword string `short:\"P\" long:\"rpcpass\" default-mask:\"-\" description:\"RPC password\"`\n\tRPCServer string `short:\"s\" long:\"rpcserver\" description:\"RPC server to connect to\"`\n\tRPCUser string `short:\"u\" long:\"rpcuser\" description:\"RPC username\"`\n\tSimNet bool `long:\"simnet\" description:\"Connect to the simulation test network\"`\n\tTLSSkipVerify bool `long:\"skipverify\" description:\"Do not verify tls certificates (not recommended!)\"`\n\tTestNet3 bool `long:\"testnet\" description:\"Connect to testnet\"`\n\tShowVersion bool `short:\"V\" long:\"version\" description:\"Display version information and exit\"`\n\tWallet bool `long:\"wallet\" description:\"Connect to wallet\"`\n}\n\n\/\/ normalizeAddress returns addr with the passed default port appended if\n\/\/ there is not already a port specified.\nfunc normalizeAddress(addr string, chain *chaincfg.Params, useWallet bool) (string, error) {\n\t_, _, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\tvar defaultPort string\n\t\tswitch chain {\n\t\tcase &chaincfg.TestNet3Params:\n\t\t\tif useWallet {\n\t\t\t\tdefaultPort = \"18332\"\n\t\t\t} else {\n\t\t\t\tdefaultPort = \"18334\"\n\t\t\t}\n\t\tcase &chaincfg.SimNetParams:\n\t\t\tif useWallet {\n\t\t\t\tdefaultPort = \"18554\"\n\t\t\t} else {\n\t\t\t\tdefaultPort = \"18556\"\n\t\t\t}\n\t\tcase &chaincfg.RegressionNetParams:\n\t\t\tif useWallet {\n\t\t\t\t\/\/ TODO: add port once regtest is supported in btcwallet\n\t\t\t\tparamErr := fmt.Errorf(\"cannot use -wallet with -regtest, btcwallet not yet compatible with regtest\")\n\t\t\t\treturn \"\", paramErr\n\t\t\t} else {\n\t\t\t\tdefaultPort = \"18334\"\n\t\t\t}\n\t\tdefault:\n\t\t\tif useWallet {\n\t\t\t\tdefaultPort = \"8332\"\n\t\t\t} else {\n\t\t\t\tdefaultPort = \"8334\"\n\t\t\t}\n\t\t}\n\n\t\treturn net.JoinHostPort(addr, defaultPort), nil\n\t}\n\treturn addr, nil\n}\n\n\/\/ cleanAndExpandPath expands environement variables and leading ~ in the\n\/\/ passed path, cleans the result, and returns it.\nfunc cleanAndExpandPath(path string) string {\n\t\/\/ Expand initial ~ to OS specific home directory.\n\tif strings.HasPrefix(path, \"~\") {\n\t\thomeDir := filepath.Dir(btcctlHomeDir)\n\t\tpath = strings.Replace(path, \"~\", homeDir, 1)\n\t}\n\n\t\/\/ NOTE: The os.ExpandEnv doesn't work with Windows-style %VARIABLE%,\n\t\/\/ but they variables can still be expanded via POSIX-style $VARIABLE.\n\treturn filepath.Clean(os.ExpandEnv(path))\n}\n\n\/\/ loadConfig initializes and parses the config using a config file and command\n\/\/ line options.\n\/\/\n\/\/ The configuration proceeds as follows:\n\/\/ \t1) Start with a default config with sane settings\n\/\/ \t2) Pre-parse the command line to check for an alternative config file\n\/\/ \t3) Load configuration file overwriting defaults with any specified options\n\/\/ \t4) Parse CLI options and overwrite\/add any specified options\n\/\/\n\/\/ The above results in functioning properly without any config settings\n\/\/ while still allowing the user to override settings with config files and\n\/\/ command line options. Command line options always take precedence.\nfunc loadConfig() (*config, []string, error) {\n\t\/\/ Default config.\n\tcfg := config{\n\t\tConfigFile: defaultConfigFile,\n\t\tRPCServer: defaultRPCServer,\n\t\tRPCCert: defaultRPCCertFile,\n\t}\n\n\t\/\/ Pre-parse the command line options to see if an alternative config\n\t\/\/ file, the version flag, or the list commands flag was specified. Any\n\t\/\/ errors aside from the help message error can be ignored here since\n\t\/\/ they will be caught by the final parse below.\n\tpreCfg := cfg\n\tpreParser := flags.NewParser(&preCfg, flags.HelpFlag)\n\t_, err := preParser.Parse()\n\tif err != nil {\n\t\tif e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tfmt.Fprintln(os.Stderr, \"\")\n\t\t\tfmt.Fprintln(os.Stderr, \"The special parameter `-` \"+\n\t\t\t\t\"indicates that a parameter should be read \"+\n\t\t\t\t\"from the\\nnext unread line from standard \"+\n\t\t\t\t\"input.\")\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\t\/\/ Show the version and exit if the version flag was specified.\n\tappName := filepath.Base(os.Args[0])\n\tappName = strings.TrimSuffix(appName, filepath.Ext(appName))\n\tusageMessage := fmt.Sprintf(\"Use %s -h to show options\", appName)\n\tif preCfg.ShowVersion {\n\t\tfmt.Println(appName, \"version\", version())\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ Show the available commands and exit if the associated flag was\n\t\/\/ specified.\n\tif preCfg.ListCommands {\n\t\tlistCommands()\n\t\tos.Exit(0)\n\t}\n\n\tif _, err := os.Stat(preCfg.ConfigFile); os.IsNotExist(err) {\n\t\t\/\/ Use config file for RPC server to create default btcctl config\n\t\tvar serverConfigPath string\n\t\tif preCfg.Wallet {\n\t\t\tserverConfigPath = filepath.Join(btcwalletHomeDir, \"btcwallet.conf\")\n\t\t} else {\n\t\t\tserverConfigPath = filepath.Join(btcdHomeDir, \"btcd.conf\")\n\t\t}\n\n\t\terr := createDefaultConfigFile(preCfg.ConfigFile, serverConfigPath)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Error creating a default config file: %v\\n\", err)\n\t\t}\n\t}\n\n\t\/\/ Load additional config from file.\n\tparser := flags.NewParser(&cfg, flags.Default)\n\terr = flags.NewIniParser(parser).ParseFile(preCfg.ConfigFile)\n\tif err != nil {\n\t\tif _, ok := err.(*os.PathError); !ok {\n\t\t\tfmt.Fprintf(os.Stderr, \"Error parsing config file: %v\\n\",\n\t\t\t\terr)\n\t\t\tfmt.Fprintln(os.Stderr, usageMessage)\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\t\/\/ Parse command line options again to ensure they take precedence.\n\tremainingArgs, err := parser.Parse()\n\tif err != nil {\n\t\tif e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {\n\t\t\tfmt.Fprintln(os.Stderr, usageMessage)\n\t\t}\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ default network is mainnet\n\tnetwork := &chaincfg.MainNetParams\n\n\t\/\/ Multiple networks can't be selected simultaneously.\n\tnumNets := 0\n\tif cfg.TestNet3 {\n\t\tnumNets++\n\t\tnetwork = &chaincfg.TestNet3Params\n\t}\n\tif cfg.SimNet {\n\t\tnumNets++\n\t\tnetwork = &chaincfg.SimNetParams\n\t}\n\tif cfg.RegressionTest {\n\t\tnumNets++\n\t\tnetwork = &chaincfg.RegressionNetParams\n\t}\n\n\tif numNets > 1 {\n\t\tstr := \"%s: Multiple network params can't be used \" +\n\t\t\t\"together -- choose one\"\n\t\terr := fmt.Errorf(str, \"loadConfig\")\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ Override the RPC certificate if the --wallet flag was specified and\n\t\/\/ the user did not specify one.\n\tif cfg.Wallet && cfg.RPCCert == defaultRPCCertFile {\n\t\tcfg.RPCCert = defaultWalletCertFile\n\t}\n\n\t\/\/ Handle environment variable expansion in the RPC certificate path.\n\tcfg.RPCCert = cleanAndExpandPath(cfg.RPCCert)\n\n\t\/\/ Add default port to RPC server based on --testnet and --wallet flags\n\t\/\/ if needed.\n\tcfg.RPCServer, err = normalizeAddress(cfg.RPCServer, network, cfg.Wallet)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &cfg, remainingArgs, nil\n}\n\n\/\/ createDefaultConfig creates a basic config file at the given destination path.\n\/\/ For this it tries to read the config file for the RPC server (either btcd or\n\/\/ btcwallet), and extract the RPC user and password from it.\nfunc createDefaultConfigFile(destinationPath, serverConfigPath string) error {\n\t\/\/ Read the RPC server config\n\tserverConfigFile, err := os.Open(serverConfigPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer serverConfigFile.Close()\n\tcontent, err := ioutil.ReadAll(serverConfigFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Extract the rpcuser\n\trpcUserRegexp := regexp.MustCompile(`(?m)^\\s*rpcuser=([^\\s]+)`)\n\tuserSubmatches := rpcUserRegexp.FindSubmatch(content)\n\tif userSubmatches == nil {\n\t\t\/\/ No user found, nothing to do\n\t\treturn nil\n\t}\n\n\t\/\/ Extract the rpcpass\n\trpcPassRegexp := regexp.MustCompile(`(?m)^\\s*rpcpass=([^\\s]+)`)\n\tpassSubmatches := rpcPassRegexp.FindSubmatch(content)\n\tif passSubmatches == nil {\n\t\t\/\/ No password found, nothing to do\n\t\treturn nil\n\t}\n\n\t\/\/ Extract the notls\n\tnoTLSRegexp := regexp.MustCompile(`(?m)^\\s*notls=(0|1)(?:\\s|$)`)\n\tnoTLSSubmatches := noTLSRegexp.FindSubmatch(content)\n\n\t\/\/ Create the destination directory if it does not exists\n\terr = os.MkdirAll(filepath.Dir(destinationPath), 0700)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Create the destination file and write the rpcuser and rpcpass to it\n\tdest, err := os.OpenFile(destinationPath,\n\t\tos.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer dest.Close()\n\n\tdestString := fmt.Sprintf(\"rpcuser=%s\\nrpcpass=%s\\n\",\n\t\tstring(userSubmatches[1]), string(passSubmatches[1]))\n\tif noTLSSubmatches != nil {\n\t\tdestString += fmt.Sprintf(\"notls=%s\\n\", noTLSSubmatches[1])\n\t}\n\n\tdest.WriteString(destString)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package docker\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com\/ViBiOh\/docker-deploy\/jsonHttp\"\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/docker\/api\/types\/container\"\n\t\"github.com\/docker\/docker\/api\/types\/network\"\n\t\"github.com\/docker\/docker\/api\/types\/strslice\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"log\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst minMemory = 16777216\nconst maxMemory = 805306368\nconst defaultTag = `:latest`\nconst deploySuffix = `_deploy`\nconst linkSeparator = `:`\n\nvar networkConfig = network.NetworkingConfig{\n\tEndpointsConfig: map[string]*network.EndpointSettings{\n\t\t`traefik`: &network.EndpointSettings{},\n\t},\n}\n\nvar imageTag = regexp.MustCompile(`^\\S*?:\\S+$`)\nvar commandSplit = regexp.MustCompile(`[\"']([^\"']+)[\"']|(\\S+)`)\n\ntype dockerComposeService struct {\n\tImage string\n\tCommand string\n\tEnvironment map[string]string\n\tLabels map[string]string\n\tLinks []string\n\tReadOnly bool `yaml:\"read_only\"`\n\tCPUShares int64 `yaml:\"cpu_shares\"`\n\tMemoryLimit int64 `yaml:\"mem_limit\"`\n}\n\ntype dockerCompose struct {\n\tVersion string\n\tServices map[string]dockerComposeService\n}\n\ntype deployedService struct {\n\tID string\n\tName string\n}\n\nfunc getConfig(service *dockerComposeService, loggedUser *user, appName string) *container.Config {\n\tenvironments := make([]string, len(service.Environment))\n\tfor key, value := range service.Environment {\n\t\tenvironments = append(environments, key+`=`+value)\n\t}\n\n\tif service.Labels == nil {\n\t\tservice.Labels = make(map[string]string)\n\t}\n\n\tservice.Labels[ownerLabel] = loggedUser.username\n\tservice.Labels[appLabel] = appName\n\n\tconfig := container.Config{\n\t\tImage: service.Image,\n\t\tLabels: service.Labels,\n\t\tEnv: environments,\n\t}\n\n\tif service.Command != `` {\n\t\tconfig.Cmd = strslice.StrSlice{}\n\t\tfor _, args := range commandSplit.FindAllStringSubmatch(service.Command, -1) {\n\t\t\tconfig.Cmd = append(config.Cmd, args[1]+args[2])\n\t\t}\n\t}\n\n\treturn &config\n}\n\nfunc getHostConfig(service *dockerComposeService, deployedServices map[string]deployedService) *container.HostConfig {\n\thostConfig := container.HostConfig{\n\t\tLogConfig: container.LogConfig{Type: `json-file`, Config: map[string]string{\n\t\t\t`max-size`: `50m`,\n\t\t}},\n\t\tRestartPolicy: container.RestartPolicy{Name: `on-failure`, MaximumRetryCount: 5},\n\t\tResources: container.Resources{\n\t\t\tCPUShares: 128,\n\t\t\tMemory: minMemory,\n\t\t},\n\t\tSecurityOpt: []string{`no-new-privileges`},\n\t}\n\n\tif service.ReadOnly {\n\t\thostConfig.ReadonlyRootfs = service.ReadOnly\n\t}\n\n\tif service.CPUShares != 0 {\n\t\thostConfig.Resources.CPUShares = service.CPUShares\n\t}\n\n\tif service.MemoryLimit != 0 {\n\t\tif service.MemoryLimit <= maxMemory {\n\t\t\thostConfig.Resources.Memory = service.MemoryLimit\n\t\t} else {\n\t\t\thostConfig.Resources.Memory = maxMemory\n\t\t}\n\t}\n\n\tfor _, link := range service.Links {\n\t\tlinkParts := strings.Split(link, linkSeparator)\n\n\t\tid := linkParts[0]\n\t\tif linkedService, ok := deployedServices[linkParts[0]]; ok {\n\t\t\tid = linkedService.ID\n\t\t}\n\n\t\talias := linkParts[0]\n\t\tif len(linkParts) > 1 {\n\t\t\talias = linkParts[1]\n\t\t}\n\n\t\thostConfig.Links = append(hostConfig.Links, id+linkSeparator+alias)\n\t}\n\n\treturn &hostConfig\n}\n\nfunc pullImage(image string, loggedUser *user) error {\n\tif !imageTag.MatchString(image) {\n\t\timage = image + defaultTag\n\t}\n\n\tlog.Print(loggedUser.username + ` starts pulling for ` + image)\n\tpull, err := docker.ImagePull(context.Background(), image, types.ImagePullOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(`Error while pulling image: %v`, err)\n\t}\n\n\treadBody(pull)\n\tlog.Print(loggedUser.username + ` ends pulling for ` + image)\n\treturn nil\n}\n\nfunc cleanContainers(containers *[]types.Container, loggedUser *user) {\n\tfor _, container := range *containers {\n\t\tlog.Print(loggedUser.username + ` stops ` + strings.Join(container.Names, `, `))\n\t\tstopContainer(container.ID)\n\t\tlog.Print(loggedUser.username + ` rm ` + strings.Join(container.Names, `, `))\n\t\trmContainer(container.ID)\n\t}\n}\n\nfunc renameDeployedContainers(containers *map[string]deployedService) error {\n\tfor _, service := range *containers {\n\t\tif err := docker.ContainerRename(context.Background(), service.ID, strings.TrimSuffix(service.Name, deploySuffix)); err != nil {\n\t\t\treturn fmt.Errorf(`Error while renaming container %s: %v`, service.Name, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc getServiceFullName(appName string, serviceName string) string {\n\treturn appName + `_` + serviceName + deploySuffix\n}\n\nfunc createAppHandler(w http.ResponseWriter, loggedUser *user, appName []byte, composeFile []byte) {\n\tif len(appName) == 0 || len(composeFile) == 0 {\n\t\thttp.Error(w, `An application name and a compose file are required`, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tcompose := dockerCompose{}\n\tif err := yaml.Unmarshal(composeFile, &compose); err != nil {\n\t\terrorHandler(w, fmt.Errorf(`Error while unmarshalling compose file: %v`, err))\n\t\treturn\n\t}\n\n\tappNameStr := string(appName)\n\tlog.Print(loggedUser.username + ` deploys ` + appNameStr)\n\n\townerContainers, err := listContainers(loggedUser, &appNameStr)\n\tif err != nil {\n\t\terrorHandler(w, err)\n\t\treturn\n\t}\n\n\tdeployedServices := make(map[string]deployedService)\n\tfor serviceName, service := range compose.Services {\n\t\tif err := pullImage(service.Image, loggedUser); err != nil {\n\t\t\terrorHandler(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tserviceFullName := getServiceFullName(appNameStr, serviceName)\n\t\tlog.Print(loggedUser.username + ` starts ` + serviceFullName)\n\n\t\tid, err := docker.ContainerCreate(context.Background(), getConfig(&service, loggedUser, appNameStr), getHostConfig(&service, deployedServices), &networkConfig, serviceFullName)\n\t\tif err != nil {\n\t\t\terrorHandler(w, fmt.Errorf(`Error while creating container: %v`, err))\n\t\t\treturn\n\t\t}\n\n\t\tstartContainer(id.ID)\n\t\tdeployedServices[serviceName] = deployedService{ID: id.ID, Name: serviceFullName}\n\t}\n\n\tlog.Print(`Waiting 10 seconds for containers to start...`)\n\ttime.Sleep(10 * time.Second)\n\n\tcleanContainers(&ownerContainers, loggedUser)\n\tif err := renameDeployedContainers(&deployedServices); err != nil {\n\t\terrorHandler(w, err)\n\t\treturn\n\t}\n\n\tjsonHttp.ResponseJSON(w, results{deployedServices})\n}\n<commit_msg>Changing link method : by name<commit_after>package docker\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com\/ViBiOh\/docker-deploy\/jsonHttp\"\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/docker\/api\/types\/container\"\n\t\"github.com\/docker\/docker\/api\/types\/network\"\n\t\"github.com\/docker\/docker\/api\/types\/strslice\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"log\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst minMemory = 16777216\nconst maxMemory = 805306368\nconst defaultTag = `:latest`\nconst deploySuffix = `_deploy`\nconst linkSeparator = `:`\n\nvar networkConfig = network.NetworkingConfig{\n\tEndpointsConfig: map[string]*network.EndpointSettings{\n\t\t`traefik`: &network.EndpointSettings{},\n\t},\n}\n\nvar imageTag = regexp.MustCompile(`^\\S*?:\\S+$`)\nvar commandSplit = regexp.MustCompile(`[\"']([^\"']+)[\"']|(\\S+)`)\n\ntype dockerComposeService struct {\n\tImage string\n\tCommand string\n\tEnvironment map[string]string\n\tLabels map[string]string\n\tLinks []string\n\tReadOnly bool `yaml:\"read_only\"`\n\tCPUShares int64 `yaml:\"cpu_shares\"`\n\tMemoryLimit int64 `yaml:\"mem_limit\"`\n}\n\ntype dockerCompose struct {\n\tVersion string\n\tServices map[string]dockerComposeService\n}\n\ntype deployedService struct {\n\tID string\n\tName string\n}\n\nfunc getConfig(service *dockerComposeService, loggedUser *user, appName string) *container.Config {\n\tenvironments := make([]string, len(service.Environment))\n\tfor key, value := range service.Environment {\n\t\tenvironments = append(environments, key+`=`+value)\n\t}\n\n\tif service.Labels == nil {\n\t\tservice.Labels = make(map[string]string)\n\t}\n\n\tservice.Labels[ownerLabel] = loggedUser.username\n\tservice.Labels[appLabel] = appName\n\n\tconfig := container.Config{\n\t\tImage: service.Image,\n\t\tLabels: service.Labels,\n\t\tEnv: environments,\n\t}\n\n\tif service.Command != `` {\n\t\tconfig.Cmd = strslice.StrSlice{}\n\t\tfor _, args := range commandSplit.FindAllStringSubmatch(service.Command, -1) {\n\t\t\tconfig.Cmd = append(config.Cmd, args[1]+args[2])\n\t\t}\n\t}\n\n\treturn &config\n}\n\nfunc getHostConfig(service *dockerComposeService, deployedServices map[string]deployedService) *container.HostConfig {\n\thostConfig := container.HostConfig{\n\t\tLogConfig: container.LogConfig{Type: `json-file`, Config: map[string]string{\n\t\t\t`max-size`: `50m`,\n\t\t}},\n\t\tRestartPolicy: container.RestartPolicy{Name: `on-failure`, MaximumRetryCount: 5},\n\t\tResources: container.Resources{\n\t\t\tCPUShares: 128,\n\t\t\tMemory: minMemory,\n\t\t},\n\t\tSecurityOpt: []string{`no-new-privileges`},\n\t}\n\n\tif service.ReadOnly {\n\t\thostConfig.ReadonlyRootfs = service.ReadOnly\n\t}\n\n\tif service.CPUShares != 0 {\n\t\thostConfig.Resources.CPUShares = service.CPUShares\n\t}\n\n\tif service.MemoryLimit != 0 {\n\t\tif service.MemoryLimit <= maxMemory {\n\t\t\thostConfig.Resources.Memory = service.MemoryLimit\n\t\t} else {\n\t\t\thostConfig.Resources.Memory = maxMemory\n\t\t}\n\t}\n\n\tfor _, link := range service.Links {\n\t\tlinkParts := strings.Split(link, linkSeparator)\n\n\t\ttarget := linkParts[0]\n\t\tif linkedService, ok := deployedServices[linkParts[0]]; ok {\n\t\t\ttarget = linkedService.Name\n\t\t}\n\n\t\talias := linkParts[0]\n\t\tif len(linkParts) > 1 {\n\t\t\talias = linkParts[1]\n\t\t}\n\n\t\thostConfig.Links = append(hostConfig.Links, target+linkSeparator+alias)\n\t}\n\n\treturn &hostConfig\n}\n\nfunc pullImage(image string, loggedUser *user) error {\n\tif !imageTag.MatchString(image) {\n\t\timage = image + defaultTag\n\t}\n\n\tlog.Print(loggedUser.username + ` starts pulling for ` + image)\n\tpull, err := docker.ImagePull(context.Background(), image, types.ImagePullOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(`Error while pulling image: %v`, err)\n\t}\n\n\treadBody(pull)\n\tlog.Print(loggedUser.username + ` ends pulling for ` + image)\n\treturn nil\n}\n\nfunc cleanContainers(containers *[]types.Container, loggedUser *user) {\n\tfor _, container := range *containers {\n\t\tlog.Print(loggedUser.username + ` stops ` + strings.Join(container.Names, `, `))\n\t\tstopContainer(container.ID)\n\t\tlog.Print(loggedUser.username + ` rm ` + strings.Join(container.Names, `, `))\n\t\trmContainer(container.ID)\n\t}\n}\n\nfunc renameDeployedContainers(containers *map[string]deployedService) error {\n\tfor _, service := range *containers {\n\t\tif err := docker.ContainerRename(context.Background(), service.ID, strings.TrimSuffix(service.Name, deploySuffix)); err != nil {\n\t\t\treturn fmt.Errorf(`Error while renaming container %s: %v`, service.Name, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc getServiceFullName(appName string, serviceName string) string {\n\treturn appName + `_` + serviceName + deploySuffix\n}\n\nfunc createAppHandler(w http.ResponseWriter, loggedUser *user, appName []byte, composeFile []byte) {\n\tif len(appName) == 0 || len(composeFile) == 0 {\n\t\thttp.Error(w, `An application name and a compose file are required`, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tcompose := dockerCompose{}\n\tif err := yaml.Unmarshal(composeFile, &compose); err != nil {\n\t\terrorHandler(w, fmt.Errorf(`Error while unmarshalling compose file: %v`, err))\n\t\treturn\n\t}\n\n\tappNameStr := string(appName)\n\tlog.Print(loggedUser.username + ` deploys ` + appNameStr)\n\n\townerContainers, err := listContainers(loggedUser, &appNameStr)\n\tif err != nil {\n\t\terrorHandler(w, err)\n\t\treturn\n\t}\n\n\tdeployedServices := make(map[string]deployedService)\n\tfor serviceName, service := range compose.Services {\n\t\tif err := pullImage(service.Image, loggedUser); err != nil {\n\t\t\terrorHandler(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tserviceFullName := getServiceFullName(appNameStr, serviceName)\n\t\tlog.Print(loggedUser.username + ` starts ` + serviceFullName)\n\n\t\tid, err := docker.ContainerCreate(context.Background(), getConfig(&service, loggedUser, appNameStr), getHostConfig(&service, deployedServices), &networkConfig, serviceFullName)\n\t\tif err != nil {\n\t\t\terrorHandler(w, fmt.Errorf(`Error while creating container: %v`, err))\n\t\t\treturn\n\t\t}\n\n\t\tstartContainer(id.ID)\n\t\tdeployedServices[serviceName] = deployedService{ID: id.ID, Name: serviceFullName}\n\t}\n\n\tlog.Print(`Waiting 5 seconds for containers to start...`)\n\ttime.Sleep(5 * time.Second)\n\n\tcleanContainers(&ownerContainers, loggedUser)\n\tif err := renameDeployedContainers(&deployedServices); err != nil {\n\t\terrorHandler(w, err)\n\t\treturn\n\t}\n\n\tjsonHttp.ResponseJSON(w, results{deployedServices})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\npackage nsinit\n\nimport (\n\t\"fmt\"\n\t\"github.com\/dotcloud\/docker\/pkg\/system\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"syscall\"\n)\n\n\/\/ default mount point flags\nconst defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV\n\n\/\/ setupNewMountNamespace is used to initialize a new mount namespace for an new\n\/\/ container in the rootfs that is specified.\n\/\/\n\/\/ There is no need to unmount the new mounts because as soon as the mount namespace\n\/\/ is no longer in use, the mounts will be removed automatically\nfunc setupNewMountNamespace(rootfs, console string, readonly, noPivotRoot bool) error {\n\tflag := syscall.MS_PRIVATE\n\tif noPivotRoot {\n\t\tflag = syscall.MS_SLAVE\n\t}\n\tif err := system.Mount(\"\", \"\/\", \"\", uintptr(flag|syscall.MS_REC), \"\"); err != nil {\n\t\treturn fmt.Errorf(\"mounting \/ as slave %s\", err)\n\t}\n\tif err := system.Mount(rootfs, rootfs, \"bind\", syscall.MS_BIND|syscall.MS_REC, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"mouting %s as bind %s\", rootfs, err)\n\t}\n\tif readonly {\n\t\tif err := system.Mount(rootfs, rootfs, \"bind\", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, \"\"); err != nil {\n\t\t\treturn fmt.Errorf(\"mounting %s as readonly %s\", rootfs, err)\n\t\t}\n\t}\n\tif err := mountSystem(rootfs); err != nil {\n\t\treturn fmt.Errorf(\"mount system %s\", err)\n\t}\n\tif err := copyDevNodes(rootfs); err != nil {\n\t\treturn fmt.Errorf(\"copy dev nodes %s\", err)\n\t}\n\t\/\/ In non-privileged mode, this fails. Discard the error.\n\tsetupLoopbackDevices(rootfs)\n\tif err := setupDev(rootfs); err != nil {\n\t\treturn err\n\t}\n\tif console != \"\" {\n\t\tif err := setupPtmx(rootfs, console); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := system.Chdir(rootfs); err != nil {\n\t\treturn fmt.Errorf(\"chdir into %s %s\", rootfs, err)\n\t}\n\n\tif noPivotRoot {\n\t\tif err := rootMsMove(rootfs); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tif err := rootPivot(rootfs); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsystem.Umask(0022)\n\n\treturn nil\n}\n\n\/\/ use a pivot root to setup the rootfs\nfunc rootPivot(rootfs string) error {\n\tpivotDir, err := ioutil.TempDir(rootfs, \".pivot_root\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't create pivot_root dir %s\", pivotDir, err)\n\t}\n\tif err := system.Pivotroot(rootfs, pivotDir); err != nil {\n\t\treturn fmt.Errorf(\"pivot_root %s\", err)\n\t}\n\tif err := system.Chdir(\"\/\"); err != nil {\n\t\treturn fmt.Errorf(\"chdir \/ %s\", err)\n\t}\n\t\/\/ path to pivot dir now changed, update\n\tpivotDir = filepath.Join(\"\/\", filepath.Base(pivotDir))\n\tif err := system.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {\n\t\treturn fmt.Errorf(\"unmount pivot_root dir %s\", err)\n\t}\n\tif err := os.Remove(pivotDir); err != nil {\n\t\treturn fmt.Errorf(\"remove pivot_root dir %s\", err)\n\t}\n\treturn nil\n}\n\n\/\/ use MS_MOVE and chroot to setup the rootfs\nfunc rootMsMove(rootfs string) error {\n\tif err := system.Mount(rootfs, \"\/\", \"\", syscall.MS_MOVE, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"mount move %s into \/ %s\", rootfs, err)\n\t}\n\tif err := system.Chroot(\".\"); err != nil {\n\t\treturn fmt.Errorf(\"chroot . %s\", err)\n\t}\n\tif err := system.Chdir(\"\/\"); err != nil {\n\t\treturn fmt.Errorf(\"chdir \/ %s\", err)\n\t}\n\treturn nil\n}\n\n\/\/ copyDevNodes mknods the hosts devices so the new container has access to them\nfunc copyDevNodes(rootfs string) error {\n\toldMask := system.Umask(0000)\n\tdefer system.Umask(oldMask)\n\n\tfor _, node := range []string{\n\t\t\"null\",\n\t\t\"zero\",\n\t\t\"full\",\n\t\t\"random\",\n\t\t\"urandom\",\n\t\t\"tty\",\n\t} {\n\t\tif err := copyDevNode(rootfs, node); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc setupLoopbackDevices(rootfs string) error {\n\tfor i := 0; ; i++ {\n\t\tif err := copyDevNode(rootfs, fmt.Sprintf(\"loop%d\", i)); err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc copyDevNode(rootfs, node string) error {\n\tstat, err := os.Stat(filepath.Join(\"\/dev\", node))\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar (\n\t\tdest = filepath.Join(rootfs, \"dev\", node)\n\t\tst = stat.Sys().(*syscall.Stat_t)\n\t)\n\tif err := system.Mknod(dest, st.Mode, int(st.Rdev)); err != nil && !os.IsExist(err) {\n\t\treturn fmt.Errorf(\"copy %s %s\", node, err)\n\t}\n\treturn nil\n}\n\n\/\/ setupDev symlinks the current processes pipes into the\n\/\/ appropriate destination on the containers rootfs\nfunc setupDev(rootfs string) error {\n\tfor _, link := range []struct {\n\t\tfrom string\n\t\tto string\n\t}{\n\t\t{\"\/proc\/kcore\", \"\/dev\/core\"},\n\t\t{\"\/proc\/self\/fd\", \"\/dev\/fd\"},\n\t\t{\"\/proc\/self\/fd\/0\", \"\/dev\/stdin\"},\n\t\t{\"\/proc\/self\/fd\/1\", \"\/dev\/stdout\"},\n\t\t{\"\/proc\/self\/fd\/2\", \"\/dev\/stderr\"},\n\t} {\n\t\tdest := filepath.Join(rootfs, link.to)\n\t\tif err := os.Remove(dest); err != nil && !os.IsNotExist(err) {\n\t\t\treturn fmt.Errorf(\"remove %s %s\", dest, err)\n\t\t}\n\t\tif err := os.Symlink(link.from, dest); err != nil {\n\t\t\treturn fmt.Errorf(\"symlink %s %s\", dest, err)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ setupConsole ensures that the container has a proper \/dev\/console setup\nfunc setupConsole(rootfs, console string) error {\n\toldMask := system.Umask(0000)\n\tdefer system.Umask(oldMask)\n\n\tstat, err := os.Stat(console)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stat console %s %s\", console, err)\n\t}\n\tvar (\n\t\tst = stat.Sys().(*syscall.Stat_t)\n\t\tdest = filepath.Join(rootfs, \"dev\/console\")\n\t)\n\tif err := os.Remove(dest); err != nil && !os.IsNotExist(err) {\n\t\treturn fmt.Errorf(\"remove %s %s\", dest, err)\n\t}\n\tif err := os.Chmod(console, 0600); err != nil {\n\t\treturn err\n\t}\n\tif err := os.Chown(console, 0, 0); err != nil {\n\t\treturn err\n\t}\n\tif err := system.Mknod(dest, (st.Mode&^07777)|0600, int(st.Rdev)); err != nil {\n\t\treturn fmt.Errorf(\"mknod %s %s\", dest, err)\n\t}\n\tif err := system.Mount(console, dest, \"bind\", syscall.MS_BIND, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"bind %s to %s %s\", console, dest, err)\n\t}\n\treturn nil\n}\n\n\/\/ mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts\n\/\/ inside the mount namespace\nfunc mountSystem(rootfs string) error {\n\tfor _, m := range []struct {\n\t\tsource string\n\t\tpath string\n\t\tdevice string\n\t\tflags int\n\t\tdata string\n\t}{\n\t\t{source: \"proc\", path: filepath.Join(rootfs, \"proc\"), device: \"proc\", flags: defaultMountFlags},\n\t\t{source: \"sysfs\", path: filepath.Join(rootfs, \"sys\"), device: \"sysfs\", flags: defaultMountFlags},\n\t\t{source: \"shm\", path: filepath.Join(rootfs, \"dev\", \"shm\"), device: \"tmpfs\", flags: defaultMountFlags, data: \"mode=1777,size=65536k\"},\n\t\t{source: \"devpts\", path: filepath.Join(rootfs, \"dev\", \"pts\"), device: \"devpts\", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: \"newinstance,ptmxmode=0666,mode=620,gid=5\"},\n\t} {\n\t\tif err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) {\n\t\t\treturn fmt.Errorf(\"mkdirall %s %s\", m.path, err)\n\t\t}\n\t\tif err := system.Mount(m.source, m.path, m.device, uintptr(m.flags), m.data); err != nil {\n\t\t\treturn fmt.Errorf(\"mounting %s into %s %s\", m.source, m.path, err)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ setupPtmx adds a symlink to pts\/ptmx for \/dev\/ptmx and\n\/\/ finishes setting up \/dev\/console\nfunc setupPtmx(rootfs, console string) error {\n\tptmx := filepath.Join(rootfs, \"dev\/ptmx\")\n\tif err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\tif err := os.Symlink(\"pts\/ptmx\", ptmx); err != nil {\n\t\treturn fmt.Errorf(\"symlink dev ptmx %s\", err)\n\t}\n\tif err := setupConsole(rootfs, console); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ remountProc is used to detach and remount the proc filesystem\n\/\/ commonly needed with running a new process inside an existing container\nfunc remountProc() error {\n\tif err := system.Unmount(\"\/proc\", syscall.MNT_DETACH); err != nil {\n\t\treturn err\n\t}\n\tif err := system.Mount(\"proc\", \"\/proc\", \"proc\", uintptr(defaultMountFlags), \"\"); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc remountSys() error {\n\tif err := system.Unmount(\"\/sys\", syscall.MNT_DETACH); err != nil {\n\t\tif err != syscall.EINVAL {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tif err := system.Mount(\"sysfs\", \"\/sys\", \"sysfs\", uintptr(defaultMountFlags), \"\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Always symlink \/dev\/ptmx for libcontainer Docker-DCO-1.1-Signed-off-by: Michael Crosby <michael@crosbymichael.com> (github: crosbymichael)<commit_after>\/\/ +build linux\n\npackage nsinit\n\nimport (\n\t\"fmt\"\n\t\"github.com\/dotcloud\/docker\/pkg\/system\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"syscall\"\n)\n\n\/\/ default mount point flags\nconst defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV\n\n\/\/ setupNewMountNamespace is used to initialize a new mount namespace for an new\n\/\/ container in the rootfs that is specified.\n\/\/\n\/\/ There is no need to unmount the new mounts because as soon as the mount namespace\n\/\/ is no longer in use, the mounts will be removed automatically\nfunc setupNewMountNamespace(rootfs, console string, readonly, noPivotRoot bool) error {\n\tflag := syscall.MS_PRIVATE\n\tif noPivotRoot {\n\t\tflag = syscall.MS_SLAVE\n\t}\n\tif err := system.Mount(\"\", \"\/\", \"\", uintptr(flag|syscall.MS_REC), \"\"); err != nil {\n\t\treturn fmt.Errorf(\"mounting \/ as slave %s\", err)\n\t}\n\tif err := system.Mount(rootfs, rootfs, \"bind\", syscall.MS_BIND|syscall.MS_REC, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"mouting %s as bind %s\", rootfs, err)\n\t}\n\tif readonly {\n\t\tif err := system.Mount(rootfs, rootfs, \"bind\", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, \"\"); err != nil {\n\t\t\treturn fmt.Errorf(\"mounting %s as readonly %s\", rootfs, err)\n\t\t}\n\t}\n\tif err := mountSystem(rootfs); err != nil {\n\t\treturn fmt.Errorf(\"mount system %s\", err)\n\t}\n\tif err := copyDevNodes(rootfs); err != nil {\n\t\treturn fmt.Errorf(\"copy dev nodes %s\", err)\n\t}\n\t\/\/ In non-privileged mode, this fails. Discard the error.\n\tsetupLoopbackDevices(rootfs)\n\tif err := setupDev(rootfs); err != nil {\n\t\treturn err\n\t}\n\tif err := setupPtmx(rootfs, console); err != nil {\n\t\treturn err\n\t}\n\tif err := system.Chdir(rootfs); err != nil {\n\t\treturn fmt.Errorf(\"chdir into %s %s\", rootfs, err)\n\t}\n\n\tif noPivotRoot {\n\t\tif err := rootMsMove(rootfs); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tif err := rootPivot(rootfs); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsystem.Umask(0022)\n\n\treturn nil\n}\n\n\/\/ use a pivot root to setup the rootfs\nfunc rootPivot(rootfs string) error {\n\tpivotDir, err := ioutil.TempDir(rootfs, \".pivot_root\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't create pivot_root dir %s\", pivotDir, err)\n\t}\n\tif err := system.Pivotroot(rootfs, pivotDir); err != nil {\n\t\treturn fmt.Errorf(\"pivot_root %s\", err)\n\t}\n\tif err := system.Chdir(\"\/\"); err != nil {\n\t\treturn fmt.Errorf(\"chdir \/ %s\", err)\n\t}\n\t\/\/ path to pivot dir now changed, update\n\tpivotDir = filepath.Join(\"\/\", filepath.Base(pivotDir))\n\tif err := system.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {\n\t\treturn fmt.Errorf(\"unmount pivot_root dir %s\", err)\n\t}\n\tif err := os.Remove(pivotDir); err != nil {\n\t\treturn fmt.Errorf(\"remove pivot_root dir %s\", err)\n\t}\n\treturn nil\n}\n\n\/\/ use MS_MOVE and chroot to setup the rootfs\nfunc rootMsMove(rootfs string) error {\n\tif err := system.Mount(rootfs, \"\/\", \"\", syscall.MS_MOVE, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"mount move %s into \/ %s\", rootfs, err)\n\t}\n\tif err := system.Chroot(\".\"); err != nil {\n\t\treturn fmt.Errorf(\"chroot . %s\", err)\n\t}\n\tif err := system.Chdir(\"\/\"); err != nil {\n\t\treturn fmt.Errorf(\"chdir \/ %s\", err)\n\t}\n\treturn nil\n}\n\n\/\/ copyDevNodes mknods the hosts devices so the new container has access to them\nfunc copyDevNodes(rootfs string) error {\n\toldMask := system.Umask(0000)\n\tdefer system.Umask(oldMask)\n\n\tfor _, node := range []string{\n\t\t\"null\",\n\t\t\"zero\",\n\t\t\"full\",\n\t\t\"random\",\n\t\t\"urandom\",\n\t\t\"tty\",\n\t} {\n\t\tif err := copyDevNode(rootfs, node); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc setupLoopbackDevices(rootfs string) error {\n\tfor i := 0; ; i++ {\n\t\tif err := copyDevNode(rootfs, fmt.Sprintf(\"loop%d\", i)); err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc copyDevNode(rootfs, node string) error {\n\tstat, err := os.Stat(filepath.Join(\"\/dev\", node))\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar (\n\t\tdest = filepath.Join(rootfs, \"dev\", node)\n\t\tst = stat.Sys().(*syscall.Stat_t)\n\t)\n\tif err := system.Mknod(dest, st.Mode, int(st.Rdev)); err != nil && !os.IsExist(err) {\n\t\treturn fmt.Errorf(\"copy %s %s\", node, err)\n\t}\n\treturn nil\n}\n\n\/\/ setupDev symlinks the current processes pipes into the\n\/\/ appropriate destination on the containers rootfs\nfunc setupDev(rootfs string) error {\n\tfor _, link := range []struct {\n\t\tfrom string\n\t\tto string\n\t}{\n\t\t{\"\/proc\/kcore\", \"\/dev\/core\"},\n\t\t{\"\/proc\/self\/fd\", \"\/dev\/fd\"},\n\t\t{\"\/proc\/self\/fd\/0\", \"\/dev\/stdin\"},\n\t\t{\"\/proc\/self\/fd\/1\", \"\/dev\/stdout\"},\n\t\t{\"\/proc\/self\/fd\/2\", \"\/dev\/stderr\"},\n\t} {\n\t\tdest := filepath.Join(rootfs, link.to)\n\t\tif err := os.Remove(dest); err != nil && !os.IsNotExist(err) {\n\t\t\treturn fmt.Errorf(\"remove %s %s\", dest, err)\n\t\t}\n\t\tif err := os.Symlink(link.from, dest); err != nil {\n\t\t\treturn fmt.Errorf(\"symlink %s %s\", dest, err)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ setupConsole ensures that the container has a proper \/dev\/console setup\nfunc setupConsole(rootfs, console string) error {\n\toldMask := system.Umask(0000)\n\tdefer system.Umask(oldMask)\n\n\tstat, err := os.Stat(console)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stat console %s %s\", console, err)\n\t}\n\tvar (\n\t\tst = stat.Sys().(*syscall.Stat_t)\n\t\tdest = filepath.Join(rootfs, \"dev\/console\")\n\t)\n\tif err := os.Remove(dest); err != nil && !os.IsNotExist(err) {\n\t\treturn fmt.Errorf(\"remove %s %s\", dest, err)\n\t}\n\tif err := os.Chmod(console, 0600); err != nil {\n\t\treturn err\n\t}\n\tif err := os.Chown(console, 0, 0); err != nil {\n\t\treturn err\n\t}\n\tif err := system.Mknod(dest, (st.Mode&^07777)|0600, int(st.Rdev)); err != nil {\n\t\treturn fmt.Errorf(\"mknod %s %s\", dest, err)\n\t}\n\tif err := system.Mount(console, dest, \"bind\", syscall.MS_BIND, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"bind %s to %s %s\", console, dest, err)\n\t}\n\treturn nil\n}\n\n\/\/ mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts\n\/\/ inside the mount namespace\nfunc mountSystem(rootfs string) error {\n\tfor _, m := range []struct {\n\t\tsource string\n\t\tpath string\n\t\tdevice string\n\t\tflags int\n\t\tdata string\n\t}{\n\t\t{source: \"proc\", path: filepath.Join(rootfs, \"proc\"), device: \"proc\", flags: defaultMountFlags},\n\t\t{source: \"sysfs\", path: filepath.Join(rootfs, \"sys\"), device: \"sysfs\", flags: defaultMountFlags},\n\t\t{source: \"shm\", path: filepath.Join(rootfs, \"dev\", \"shm\"), device: \"tmpfs\", flags: defaultMountFlags, data: \"mode=1777,size=65536k\"},\n\t\t{source: \"devpts\", path: filepath.Join(rootfs, \"dev\", \"pts\"), device: \"devpts\", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: \"newinstance,ptmxmode=0666,mode=620,gid=5\"},\n\t} {\n\t\tif err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) {\n\t\t\treturn fmt.Errorf(\"mkdirall %s %s\", m.path, err)\n\t\t}\n\t\tif err := system.Mount(m.source, m.path, m.device, uintptr(m.flags), m.data); err != nil {\n\t\t\treturn fmt.Errorf(\"mounting %s into %s %s\", m.source, m.path, err)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ setupPtmx adds a symlink to pts\/ptmx for \/dev\/ptmx and\n\/\/ finishes setting up \/dev\/console\nfunc setupPtmx(rootfs, console string) error {\n\tptmx := filepath.Join(rootfs, \"dev\/ptmx\")\n\tif err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\tif err := os.Symlink(\"pts\/ptmx\", ptmx); err != nil {\n\t\treturn fmt.Errorf(\"symlink dev ptmx %s\", err)\n\t}\n\tif console != \"\" {\n\t\tif err := setupConsole(rootfs, console); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ remountProc is used to detach and remount the proc filesystem\n\/\/ commonly needed with running a new process inside an existing container\nfunc remountProc() error {\n\tif err := system.Unmount(\"\/proc\", syscall.MNT_DETACH); err != nil {\n\t\treturn err\n\t}\n\tif err := system.Mount(\"proc\", \"\/proc\", \"proc\", uintptr(defaultMountFlags), \"\"); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc remountSys() error {\n\tif err := system.Unmount(\"\/sys\", syscall.MNT_DETACH); err != nil {\n\t\tif err != syscall.EINVAL {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tif err := system.Mount(\"sysfs\", \"\/sys\", \"sysfs\", uintptr(defaultMountFlags), \"\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package driver\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\n\t\"github.com\/docker\/machine\/libmachine\/mcnutils\"\n\n\t\"github.com\/Spirals-Team\/docker-machine-driver-g5k\/api\"\n\n\t\"github.com\/docker\/machine\/libmachine\/drivers\"\n\t\"github.com\/docker\/machine\/libmachine\/log\"\n\t\"github.com\/docker\/machine\/libmachine\/mcnflag\"\n\t\"github.com\/docker\/machine\/libmachine\/state\"\n\tgossh \"golang.org\/x\/crypto\/ssh\"\n)\n\n\/\/ g5kReferenceEnvironment is the name of the reference environment automatically deployed on the node by Grid'5000\nconst g5kReferenceEnvironmentName string = \"debian9-x64-std\"\n\n\/\/ Driver parameters\ntype Driver struct {\n\t*drivers.BaseDriver\n\n\tG5kAPI *api.Client\n\tG5kJobID int\n\tG5kUsername string\n\tG5kPassword string\n\tG5kSite string\n\tG5kWalltime string\n\tG5kImage string\n\tG5kResourceProperties string\n\tG5kSkipVpnChecks bool\n\tG5kReuseRefEnvironment bool\n\tG5kJobQueue string\n\tG5kJobStartTime string\n\tDriverSSHPublicKey string\n\tExternalSSHPublicKeys []string\n\tG5kKeepAllocatedResourceAtDeletion bool\n}\n\n\/\/ NewDriver creates and returns a new instance of the driver\nfunc NewDriver() *Driver {\n\treturn &Driver{\n\t\tBaseDriver: &drivers.BaseDriver{\n\t\t\tSSHUser: drivers.DefaultSSHUser,\n\t\t\tSSHPort: drivers.DefaultSSHPort,\n\t\t},\n\t}\n}\n\n\/\/ DriverName returns the name of the driver\nfunc (d *Driver) DriverName() string {\n\treturn \"g5k\"\n}\n\n\/\/ GetCreateFlags add command line flags to configure the driver\nfunc (d *Driver) GetCreateFlags() []mcnflag.Flag {\n\treturn []mcnflag.Flag{\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"G5K_USERNAME\",\n\t\t\tName: \"g5k-username\",\n\t\t\tUsage: \"Your Grid5000 account username\",\n\t\t\tValue: \"\",\n\t\t},\n\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"G5K_PASSWORD\",\n\t\t\tName: \"g5k-password\",\n\t\t\tUsage: \"Your Grid5000 account password\",\n\t\t\tValue: \"\",\n\t\t},\n\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"G5K_SITE\",\n\t\t\tName: \"g5k-site\",\n\t\t\tUsage: \"Site to reserve the resources on\",\n\t\t\tValue: \"\",\n\t\t},\n\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"G5K_WALLTIME\",\n\t\t\tName: \"g5k-walltime\",\n\t\t\tUsage: \"Machine's lifetime (HH:MM:SS)\",\n\t\t\tValue: \"1:00:00\",\n\t\t},\n\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"G5K_IMAGE\",\n\t\t\tName: \"g5k-image\",\n\t\t\tUsage: \"Name of the image (environment) to deploy on the node\",\n\t\t\tValue: g5kReferenceEnvironmentName,\n\t\t},\n\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"G5K_RESOURCE_PROPERTIES\",\n\t\t\tName: \"g5k-resource-properties\",\n\t\t\tUsage: \"Resource selection with OAR properties (SQL format)\",\n\t\t},\n\n\t\tmcnflag.BoolFlag{\n\t\t\tEnvVar: \"G5K_SKIP_VPN_CHECKS\",\n\t\t\tName: \"g5k-skip-vpn-checks\",\n\t\t\tUsage: \"Skip the VPN client connection and DNS configuration checks (for specific use case only, you should not enable this flag in normal use)\",\n\t\t},\n\n\t\tmcnflag.BoolFlag{\n\t\t\tEnvVar: \"G5K_REUSE_REF_ENVIRONMENT\",\n\t\t\tName: \"g5k-reuse-ref-environment\",\n\t\t\tUsage: \"Reuse the Grid'5000 reference environment instead of re-deploying the node (it saves a lot of time)\",\n\t\t},\n\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"G5K_JOB_QUEUE\",\n\t\t\tName: \"g5k-job-queue\",\n\t\t\tUsage: \"Specify the job queue (besteffort is NOT supported)\",\n\t\t\tValue: \"default\",\n\t\t},\n\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"G5K_MAKE_RESOURCE_RESERVATION\",\n\t\t\tName: \"g5k-make-resource-reservation\",\n\t\t\tUsage: \"Make a resource reservation for the given start date. (in either 'YYYY-MM-DD HH:MM:SS' date format or an UNIX timestamp)\",\n\t\t},\n\n\t\tmcnflag.IntFlag{\n\t\t\tEnvVar: \"G5K_USE_RESOURCE_RESERVATION\",\n\t\t\tName: \"g5k-use-resource-reservation\",\n\t\t\tUsage: \"Use a resource reservation (need to be a job of 'deploy' type and in the 'running' state)\",\n\t\t},\n\n\t\tmcnflag.StringSliceFlag{\n\t\t\tEnvVar: \"G5K_EXTERNAL_SSH_PUBLIC_KEYS\",\n\t\t\tName: \"g5k-external-ssh-public-keys\",\n\t\t\tUsage: \"Additional SSH public key(s) allowed to connect to the node (in authorized_keys format)\",\n\t\t\tValue: []string{},\n\t\t},\n\n\t\tmcnflag.BoolFlag{\n\t\t\tEnvVar: \"G5K_KEEP_RESOURCE_AT_DELETION\",\n\t\t\tName: \"g5k-keep-resource-at-deletion\",\n\t\t\tUsage: \"Keep the allocated resource when removing the machine (the job will NOT be killed)\",\n\t\t},\n\t}\n}\n\n\/\/ SetConfigFromFlags configure the driver from the command line arguments\nfunc (d *Driver) SetConfigFromFlags(opts drivers.DriverOptions) error {\n\td.G5kUsername = opts.String(\"g5k-username\")\n\td.G5kPassword = opts.String(\"g5k-password\")\n\td.G5kSite = opts.String(\"g5k-site\")\n\td.G5kWalltime = opts.String(\"g5k-walltime\")\n\td.G5kImage = opts.String(\"g5k-image\")\n\td.G5kResourceProperties = opts.String(\"g5k-resource-properties\")\n\td.G5kSkipVpnChecks = opts.Bool(\"g5k-skip-vpn-checks\")\n\td.G5kReuseRefEnvironment = opts.Bool(\"g5k-reuse-ref-environment\")\n\td.G5kJobQueue = opts.String(\"g5k-job-queue\")\n\td.G5kJobStartTime = opts.String(\"g5k-make-resource-reservation\")\n\td.G5kJobID = opts.Int(\"g5k-use-resource-reservation\")\n\td.ExternalSSHPublicKeys = opts.StringSlice(\"g5k-external-ssh-public-keys\")\n\td.G5kKeepAllocatedResourceAtDeletion = opts.Bool(\"g5k-keep-resource-at-deletion\")\n\n\t\/\/ Docker Swarm\n\td.BaseDriver.SetSwarmConfigFromFlags(opts)\n\n\t\/\/ username is required\n\tif d.G5kUsername == \"\" {\n\t\treturn fmt.Errorf(\"You must give your Grid5000 account username\")\n\t}\n\n\t\/\/ password is required\n\tif d.G5kPassword == \"\" {\n\t\treturn fmt.Errorf(\"You must give your Grid5000 account password\")\n\t}\n\n\t\/\/ site is required\n\tif d.G5kSite == \"\" {\n\t\treturn fmt.Errorf(\"You must give the site you want to reserve the resources on\")\n\t}\n\n\t\/\/ contradictory use of parameters: providing an image to deploy while trying to reuse the reference environment\n\tif d.G5kReuseRefEnvironment && d.G5kImage != g5kReferenceEnvironmentName {\n\t\treturn fmt.Errorf(\"You have to choose between reusing the reference environment or redeploying the node with another image\")\n\t}\n\n\t\/\/ we cannot reuse the reference environment when the job is of type 'deploy'\n\tif d.G5kReuseRefEnvironment && (d.G5kJobStartTime != \"\" || d.G5kJobID != 0) {\n\t\treturn fmt.Errorf(\"Reusing the Grid'5000 reference environment on a resource reservation is not supported\")\n\t}\n\n\t\/\/ warn if user disable VPN check\n\tif d.G5kSkipVpnChecks {\n\t\tlog.Warn(\"VPN client connection and DNS configuration checks are disabled\")\n\t}\n\n\t\/\/ we cannot use the besteffort queue with docker-machine\n\tif d.G5kJobQueue == \"besteffort\" {\n\t\treturn fmt.Errorf(\"The besteffort queue is not supported\")\n\t}\n\n\treturn nil\n}\n\n\/\/ GetIP returns an IP or hostname that this host is available at\nfunc (d *Driver) GetIP() (string, error) {\n\tif d.IPAddress == \"\" {\n\t\tjob, err := d.G5kAPI.GetJob(d.G5kJobID)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif len(job.Nodes) == 0 {\n\t\t\treturn \"\", fmt.Errorf(\"Failed to resolve IP address: The node have not been allocated\")\n\t\t}\n\n\t\td.IPAddress = job.Nodes[0]\n\t}\n\n\treturn d.IPAddress, nil\n}\n\n\/\/ GetMachineName returns the machine name\nfunc (d *Driver) GetMachineName() string {\n\treturn d.BaseDriver.GetMachineName()\n}\n\n\/\/ GetSSHHostname returns the machine hostname\nfunc (d *Driver) GetSSHHostname() (string, error) {\n\treturn d.GetIP()\n}\n\n\/\/ GetSSHKeyPath returns the ssh private key path\nfunc (d *Driver) GetSSHKeyPath() string {\n\treturn d.BaseDriver.GetSSHKeyPath()\n}\n\n\/\/ GetSSHPort returns the ssh port\nfunc (d *Driver) GetSSHPort() (int, error) {\n\treturn d.BaseDriver.GetSSHPort()\n}\n\n\/\/ GetSSHUsername returns the ssh user name\nfunc (d *Driver) GetSSHUsername() string {\n\treturn d.BaseDriver.GetSSHUsername()\n}\n\n\/\/ GetURL returns the URL of the docker daemon\nfunc (d *Driver) GetURL() (string, error) {\n\t\/\/ get IP address\n\tip, err := d.GetIP()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ format URL 'tcp:\/\/host:2376'\n\treturn fmt.Sprintf(\"tcp:\/\/%s\", net.JoinHostPort(ip, \"2376\")), nil\n}\n\n\/\/ GetState returns the state of the node\nfunc (d *Driver) GetState() (state.State, error) {\n\t\/\/ get job state from API\n\tstatus, err := d.G5kAPI.GetJobState(d.G5kJobID)\n\tif err != nil {\n\t\treturn state.Error, err\n\t}\n\n\tswitch status {\n\tcase \"waiting\":\n\t\treturn state.Starting, nil\n\tcase \"launching\":\n\t\treturn state.Starting, nil\n\tcase \"running\":\n\t\treturn state.Running, nil\n\tcase \"hold\":\n\t\treturn state.Stopped, nil\n\tcase \"error\":\n\t\treturn state.Error, nil\n\tcase \"terminated\":\n\t\treturn state.Stopped, nil\n\tdefault:\n\t\treturn state.None, nil\n\t}\n}\n\n\/\/ PreCreateCheck check parameters and submit the job to Grid5000\nfunc (d *Driver) PreCreateCheck() error {\n\t\/\/ prepare the driver store dir\n\tif err := d.prepareDriverStoreDirectory(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ check VPN connection if enabled\n\tif !d.G5kSkipVpnChecks {\n\t\tif err := d.checkVpnConnection(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ create API client\n\td.G5kAPI = api.NewClient(d.G5kUsername, d.G5kPassword, d.G5kSite)\n\n\t\/\/ load driver SSH public key\n\tif err := d.loadDriverSSHPublicKey(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ check format of external SSH public keys\n\tfor _, externalSSHPubKey := range d.ExternalSSHPublicKeys {\n\t\t_, _, _, _, err := gossh.ParseAuthorizedKey([]byte(externalSSHPubKey))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"The external SSH public key '%s' is invalid: %s\", externalSSHPubKey, err.Error())\n\t\t}\n\t}\n\n\t\/\/ skip the job submission\/reservation if a job ID is provided\n\tif d.G5kJobID == 0 {\n\t\tif d.G5kJobStartTime == \"\" {\n\t\t\t\/\/ make a job submission: the resources will be reserved for immediate use\n\t\t\tif err := d.makeJobSubmission(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ make a job reservation: the resources will be reserved for a defined date\/time\n\t\t\tif err := d.makeJobReservation(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ stop the machine creation\n\t\t\treturn fmt.Errorf(\"The job reservation have been successfully sent. Don't forget to save the Job ID to create the machine when the resources are available\")\n\t\t}\n\t}\n\n\t\/\/ wait for job to be in 'running' state\n\tif err := d.waitUntilJobIsReady(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Create wait for the job to be running, deploy the OS image and copy the ssh keys\nfunc (d *Driver) Create() error {\n\t\/\/ get node hostname from API\n\tjob, err := d.G5kAPI.GetJob(d.G5kJobID)\n\tif err != nil {\n\t\treturn err\n\t}\n\td.BaseDriver.IPAddress = job.Nodes[0]\n\n\t\/\/ deploy OS image to the node\n\tif err := d.deployImageToNode(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ copy driver SSH key pair to machine directory\n\tif err := mcnutils.CopyFile(d.getDriverSSHKeyPath(), d.GetSSHKeyPath()); err != nil {\n\t\treturn err\n\t}\n\tif err := mcnutils.CopyFile(d.getDriverSSHKeyPath()+\".pub\", d.GetSSHKeyPath()+\".pub\"); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Remove delete the resources reservation\nfunc (d *Driver) Remove() error {\n\t\/\/ keep the resource allocated if the user asked for it\n\tif !d.G5kKeepAllocatedResourceAtDeletion {\n\t\tlog.Infof(\"Killing job... (id: '%d')\", d.G5kJobID)\n\t\td.G5kAPI.KillJob(d.G5kJobID)\n\t}\n\n\treturn nil\n}\n\n\/\/ Kill don't do anything\nfunc (d *Driver) Kill() error {\n\treturn fmt.Errorf(\"The 'kill' operation is not supported on Grid'5000\")\n}\n\n\/\/ Start don't do anything\nfunc (d *Driver) Start() error {\n\treturn fmt.Errorf(\"The 'start' operation is not supported on Grid'5000\")\n}\n\n\/\/ Stop don't do anything\nfunc (d *Driver) Stop() error {\n\treturn fmt.Errorf(\"The 'stop' operation is not supported on Grid'5000\")\n}\n\n\/\/ Restart don't do anything\nfunc (d *Driver) Restart() error {\n\treturn fmt.Errorf(\"The 'restart' operation is not supported on Grid'5000\")\n}\n<commit_msg>driver: The GetURL function will return an error when called on a stopped machine<commit_after>package driver\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\n\t\"github.com\/docker\/machine\/libmachine\/mcnutils\"\n\n\t\"github.com\/Spirals-Team\/docker-machine-driver-g5k\/api\"\n\n\t\"github.com\/docker\/machine\/libmachine\/drivers\"\n\t\"github.com\/docker\/machine\/libmachine\/log\"\n\t\"github.com\/docker\/machine\/libmachine\/mcnflag\"\n\t\"github.com\/docker\/machine\/libmachine\/state\"\n\tgossh \"golang.org\/x\/crypto\/ssh\"\n)\n\n\/\/ g5kReferenceEnvironment is the name of the reference environment automatically deployed on the node by Grid'5000\nconst g5kReferenceEnvironmentName string = \"debian9-x64-std\"\n\n\/\/ Driver parameters\ntype Driver struct {\n\t*drivers.BaseDriver\n\n\tG5kAPI *api.Client\n\tG5kJobID int\n\tG5kUsername string\n\tG5kPassword string\n\tG5kSite string\n\tG5kWalltime string\n\tG5kImage string\n\tG5kResourceProperties string\n\tG5kSkipVpnChecks bool\n\tG5kReuseRefEnvironment bool\n\tG5kJobQueue string\n\tG5kJobStartTime string\n\tDriverSSHPublicKey string\n\tExternalSSHPublicKeys []string\n\tG5kKeepAllocatedResourceAtDeletion bool\n}\n\n\/\/ NewDriver creates and returns a new instance of the driver\nfunc NewDriver() *Driver {\n\treturn &Driver{\n\t\tBaseDriver: &drivers.BaseDriver{\n\t\t\tSSHUser: drivers.DefaultSSHUser,\n\t\t\tSSHPort: drivers.DefaultSSHPort,\n\t\t},\n\t}\n}\n\n\/\/ DriverName returns the name of the driver\nfunc (d *Driver) DriverName() string {\n\treturn \"g5k\"\n}\n\n\/\/ GetCreateFlags add command line flags to configure the driver\nfunc (d *Driver) GetCreateFlags() []mcnflag.Flag {\n\treturn []mcnflag.Flag{\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"G5K_USERNAME\",\n\t\t\tName: \"g5k-username\",\n\t\t\tUsage: \"Your Grid5000 account username\",\n\t\t\tValue: \"\",\n\t\t},\n\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"G5K_PASSWORD\",\n\t\t\tName: \"g5k-password\",\n\t\t\tUsage: \"Your Grid5000 account password\",\n\t\t\tValue: \"\",\n\t\t},\n\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"G5K_SITE\",\n\t\t\tName: \"g5k-site\",\n\t\t\tUsage: \"Site to reserve the resources on\",\n\t\t\tValue: \"\",\n\t\t},\n\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"G5K_WALLTIME\",\n\t\t\tName: \"g5k-walltime\",\n\t\t\tUsage: \"Machine's lifetime (HH:MM:SS)\",\n\t\t\tValue: \"1:00:00\",\n\t\t},\n\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"G5K_IMAGE\",\n\t\t\tName: \"g5k-image\",\n\t\t\tUsage: \"Name of the image (environment) to deploy on the node\",\n\t\t\tValue: g5kReferenceEnvironmentName,\n\t\t},\n\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"G5K_RESOURCE_PROPERTIES\",\n\t\t\tName: \"g5k-resource-properties\",\n\t\t\tUsage: \"Resource selection with OAR properties (SQL format)\",\n\t\t},\n\n\t\tmcnflag.BoolFlag{\n\t\t\tEnvVar: \"G5K_SKIP_VPN_CHECKS\",\n\t\t\tName: \"g5k-skip-vpn-checks\",\n\t\t\tUsage: \"Skip the VPN client connection and DNS configuration checks (for specific use case only, you should not enable this flag in normal use)\",\n\t\t},\n\n\t\tmcnflag.BoolFlag{\n\t\t\tEnvVar: \"G5K_REUSE_REF_ENVIRONMENT\",\n\t\t\tName: \"g5k-reuse-ref-environment\",\n\t\t\tUsage: \"Reuse the Grid'5000 reference environment instead of re-deploying the node (it saves a lot of time)\",\n\t\t},\n\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"G5K_JOB_QUEUE\",\n\t\t\tName: \"g5k-job-queue\",\n\t\t\tUsage: \"Specify the job queue (besteffort is NOT supported)\",\n\t\t\tValue: \"default\",\n\t\t},\n\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"G5K_MAKE_RESOURCE_RESERVATION\",\n\t\t\tName: \"g5k-make-resource-reservation\",\n\t\t\tUsage: \"Make a resource reservation for the given start date. (in either 'YYYY-MM-DD HH:MM:SS' date format or an UNIX timestamp)\",\n\t\t},\n\n\t\tmcnflag.IntFlag{\n\t\t\tEnvVar: \"G5K_USE_RESOURCE_RESERVATION\",\n\t\t\tName: \"g5k-use-resource-reservation\",\n\t\t\tUsage: \"Use a resource reservation (need to be a job of 'deploy' type and in the 'running' state)\",\n\t\t},\n\n\t\tmcnflag.StringSliceFlag{\n\t\t\tEnvVar: \"G5K_EXTERNAL_SSH_PUBLIC_KEYS\",\n\t\t\tName: \"g5k-external-ssh-public-keys\",\n\t\t\tUsage: \"Additional SSH public key(s) allowed to connect to the node (in authorized_keys format)\",\n\t\t\tValue: []string{},\n\t\t},\n\n\t\tmcnflag.BoolFlag{\n\t\t\tEnvVar: \"G5K_KEEP_RESOURCE_AT_DELETION\",\n\t\t\tName: \"g5k-keep-resource-at-deletion\",\n\t\t\tUsage: \"Keep the allocated resource when removing the machine (the job will NOT be killed)\",\n\t\t},\n\t}\n}\n\n\/\/ SetConfigFromFlags configure the driver from the command line arguments\nfunc (d *Driver) SetConfigFromFlags(opts drivers.DriverOptions) error {\n\td.G5kUsername = opts.String(\"g5k-username\")\n\td.G5kPassword = opts.String(\"g5k-password\")\n\td.G5kSite = opts.String(\"g5k-site\")\n\td.G5kWalltime = opts.String(\"g5k-walltime\")\n\td.G5kImage = opts.String(\"g5k-image\")\n\td.G5kResourceProperties = opts.String(\"g5k-resource-properties\")\n\td.G5kSkipVpnChecks = opts.Bool(\"g5k-skip-vpn-checks\")\n\td.G5kReuseRefEnvironment = opts.Bool(\"g5k-reuse-ref-environment\")\n\td.G5kJobQueue = opts.String(\"g5k-job-queue\")\n\td.G5kJobStartTime = opts.String(\"g5k-make-resource-reservation\")\n\td.G5kJobID = opts.Int(\"g5k-use-resource-reservation\")\n\td.ExternalSSHPublicKeys = opts.StringSlice(\"g5k-external-ssh-public-keys\")\n\td.G5kKeepAllocatedResourceAtDeletion = opts.Bool(\"g5k-keep-resource-at-deletion\")\n\n\t\/\/ Docker Swarm\n\td.BaseDriver.SetSwarmConfigFromFlags(opts)\n\n\t\/\/ username is required\n\tif d.G5kUsername == \"\" {\n\t\treturn fmt.Errorf(\"You must give your Grid5000 account username\")\n\t}\n\n\t\/\/ password is required\n\tif d.G5kPassword == \"\" {\n\t\treturn fmt.Errorf(\"You must give your Grid5000 account password\")\n\t}\n\n\t\/\/ site is required\n\tif d.G5kSite == \"\" {\n\t\treturn fmt.Errorf(\"You must give the site you want to reserve the resources on\")\n\t}\n\n\t\/\/ contradictory use of parameters: providing an image to deploy while trying to reuse the reference environment\n\tif d.G5kReuseRefEnvironment && d.G5kImage != g5kReferenceEnvironmentName {\n\t\treturn fmt.Errorf(\"You have to choose between reusing the reference environment or redeploying the node with another image\")\n\t}\n\n\t\/\/ we cannot reuse the reference environment when the job is of type 'deploy'\n\tif d.G5kReuseRefEnvironment && (d.G5kJobStartTime != \"\" || d.G5kJobID != 0) {\n\t\treturn fmt.Errorf(\"Reusing the Grid'5000 reference environment on a resource reservation is not supported\")\n\t}\n\n\t\/\/ warn if user disable VPN check\n\tif d.G5kSkipVpnChecks {\n\t\tlog.Warn(\"VPN client connection and DNS configuration checks are disabled\")\n\t}\n\n\t\/\/ we cannot use the besteffort queue with docker-machine\n\tif d.G5kJobQueue == \"besteffort\" {\n\t\treturn fmt.Errorf(\"The besteffort queue is not supported\")\n\t}\n\n\treturn nil\n}\n\n\/\/ GetIP returns an IP or hostname that this host is available at\nfunc (d *Driver) GetIP() (string, error) {\n\tif d.IPAddress == \"\" {\n\t\tjob, err := d.G5kAPI.GetJob(d.G5kJobID)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif len(job.Nodes) == 0 {\n\t\t\treturn \"\", fmt.Errorf(\"Failed to resolve IP address: The node have not been allocated\")\n\t\t}\n\n\t\td.IPAddress = job.Nodes[0]\n\t}\n\n\treturn d.IPAddress, nil\n}\n\n\/\/ GetMachineName returns the machine name\nfunc (d *Driver) GetMachineName() string {\n\treturn d.BaseDriver.GetMachineName()\n}\n\n\/\/ GetSSHHostname returns the machine hostname\nfunc (d *Driver) GetSSHHostname() (string, error) {\n\treturn d.GetIP()\n}\n\n\/\/ GetSSHKeyPath returns the ssh private key path\nfunc (d *Driver) GetSSHKeyPath() string {\n\treturn d.BaseDriver.GetSSHKeyPath()\n}\n\n\/\/ GetSSHPort returns the ssh port\nfunc (d *Driver) GetSSHPort() (int, error) {\n\treturn d.BaseDriver.GetSSHPort()\n}\n\n\/\/ GetSSHUsername returns the ssh user name\nfunc (d *Driver) GetSSHUsername() string {\n\treturn d.BaseDriver.GetSSHUsername()\n}\n\n\/\/ GetURL returns a Docker compatible host URL for connecting to this host\nfunc (d *Driver) GetURL() (string, error) {\n\tif err := drivers.MustBeRunning(d); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tip, err := d.GetIP()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tu := url.URL{\n\t\tScheme: \"tcp\",\n\t\tHost: net.JoinHostPort(ip, \"2376\"),\n\t}\n\n\treturn u.String(), nil\n}\n\n\/\/ GetState returns the state of the node\nfunc (d *Driver) GetState() (state.State, error) {\n\t\/\/ get job state from API\n\tstatus, err := d.G5kAPI.GetJobState(d.G5kJobID)\n\tif err != nil {\n\t\treturn state.Error, err\n\t}\n\n\tswitch status {\n\tcase \"waiting\":\n\t\treturn state.Starting, nil\n\tcase \"launching\":\n\t\treturn state.Starting, nil\n\tcase \"running\":\n\t\treturn state.Running, nil\n\tcase \"hold\":\n\t\treturn state.Stopped, nil\n\tcase \"error\":\n\t\treturn state.Error, nil\n\tcase \"terminated\":\n\t\treturn state.Stopped, nil\n\tdefault:\n\t\treturn state.None, nil\n\t}\n}\n\n\/\/ PreCreateCheck check parameters and submit the job to Grid5000\nfunc (d *Driver) PreCreateCheck() error {\n\t\/\/ prepare the driver store dir\n\tif err := d.prepareDriverStoreDirectory(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ check VPN connection if enabled\n\tif !d.G5kSkipVpnChecks {\n\t\tif err := d.checkVpnConnection(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ create API client\n\td.G5kAPI = api.NewClient(d.G5kUsername, d.G5kPassword, d.G5kSite)\n\n\t\/\/ load driver SSH public key\n\tif err := d.loadDriverSSHPublicKey(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ check format of external SSH public keys\n\tfor _, externalSSHPubKey := range d.ExternalSSHPublicKeys {\n\t\t_, _, _, _, err := gossh.ParseAuthorizedKey([]byte(externalSSHPubKey))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"The external SSH public key '%s' is invalid: %s\", externalSSHPubKey, err.Error())\n\t\t}\n\t}\n\n\t\/\/ skip the job submission\/reservation if a job ID is provided\n\tif d.G5kJobID == 0 {\n\t\tif d.G5kJobStartTime == \"\" {\n\t\t\t\/\/ make a job submission: the resources will be reserved for immediate use\n\t\t\tif err := d.makeJobSubmission(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ make a job reservation: the resources will be reserved for a defined date\/time\n\t\t\tif err := d.makeJobReservation(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ stop the machine creation\n\t\t\treturn fmt.Errorf(\"The job reservation have been successfully sent. Don't forget to save the Job ID to create the machine when the resources are available\")\n\t\t}\n\t}\n\n\t\/\/ wait for job to be in 'running' state\n\tif err := d.waitUntilJobIsReady(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Create wait for the job to be running, deploy the OS image and copy the ssh keys\nfunc (d *Driver) Create() error {\n\t\/\/ get node hostname from API\n\tjob, err := d.G5kAPI.GetJob(d.G5kJobID)\n\tif err != nil {\n\t\treturn err\n\t}\n\td.BaseDriver.IPAddress = job.Nodes[0]\n\n\t\/\/ deploy OS image to the node\n\tif err := d.deployImageToNode(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ copy driver SSH key pair to machine directory\n\tif err := mcnutils.CopyFile(d.getDriverSSHKeyPath(), d.GetSSHKeyPath()); err != nil {\n\t\treturn err\n\t}\n\tif err := mcnutils.CopyFile(d.getDriverSSHKeyPath()+\".pub\", d.GetSSHKeyPath()+\".pub\"); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Remove delete the resources reservation\nfunc (d *Driver) Remove() error {\n\t\/\/ keep the resource allocated if the user asked for it\n\tif !d.G5kKeepAllocatedResourceAtDeletion {\n\t\tlog.Infof(\"Killing job... (id: '%d')\", d.G5kJobID)\n\t\td.G5kAPI.KillJob(d.G5kJobID)\n\t}\n\n\treturn nil\n}\n\n\/\/ Kill don't do anything\nfunc (d *Driver) Kill() error {\n\treturn fmt.Errorf(\"The 'kill' operation is not supported on Grid'5000\")\n}\n\n\/\/ Start don't do anything\nfunc (d *Driver) Start() error {\n\treturn fmt.Errorf(\"The 'start' operation is not supported on Grid'5000\")\n}\n\n\/\/ Stop don't do anything\nfunc (d *Driver) Stop() error {\n\treturn fmt.Errorf(\"The 'stop' operation is not supported on Grid'5000\")\n}\n\n\/\/ Restart don't do anything\nfunc (d *Driver) Restart() error {\n\treturn fmt.Errorf(\"The 'restart' operation is not supported on Grid'5000\")\n}\n<|endoftext|>"} {"text":"<commit_before>package marathon\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/QubitProducts\/bamboo\/configuration\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strings\"\n)\n\n\/\/ Describes an app process running\ntype Task struct {\n\tHost string\n\tPort int\n\tPorts []int\n}\n\n\/\/ A health check on the application\ntype HealthCheck struct {\n\t\/\/ One of TCP, HTTP or COMMAND\n\tProtocol string\n\t\/\/ The path (if Protocol is HTTP)\n\tPath string\n\t\/\/ The position of the port targeted in the ports array\n\tPortIndex int\n}\n\n\/\/ An app may have multiple processes\ntype App struct {\n\tId string\n\tMesosDnsId string\n\tEscapedId string\n\tHealthCheckPath string\n\tHealthCheckProtocol string\n\tHealthChecks []HealthCheck\n\tTasks []Task\n\tServicePort int\n\tServicePorts []int\n\tEnv map[string]string\n\tLabels map[string]string\n}\n\ntype AppList []App\n\nfunc (slice AppList) Len() int {\n\treturn len(slice)\n}\n\nfunc (slice AppList) Less(i, j int) bool {\n\treturn slice[i].Id < slice[j].Id\n}\n\nfunc (slice AppList) Swap(i, j int) {\n\tslice[i], slice[j] = slice[j], slice[i]\n}\n\ntype marathonTaskList []marathonTask\n\ntype marathonTasks struct {\n\tTasks marathonTaskList `json:\"tasks\"`\n}\n\ntype marathonTask struct {\n\tAppId string\n\tId string\n\tHost string\n\tPorts []int\n\tServicePorts []int\n\tStartedAt string\n\tStagedAt string\n\tVersion string\n}\n\nfunc (slice marathonTaskList) Len() int {\n\treturn len(slice)\n}\n\nfunc (slice marathonTaskList) Less(i, j int) bool {\n\treturn slice[i].Id < slice[j].Id\n}\n\nfunc (slice marathonTaskList) Swap(i, j int) {\n\tslice[i], slice[j] = slice[j], slice[i]\n}\n\ntype marathonApps struct {\n\tApps []marathonApp `json:\"apps\"`\n}\n\ntype marathonApp struct {\n\tId string `json:\"id\"`\n\tHealthChecks []marathonHealthCheck `json:\"healthChecks\"`\n\tPorts []int `json:\"ports\"`\n\tEnv map[string]string `json:\"env\"`\n\tLabels map[string]string `json:\"labels\"`\n}\n\ntype marathonHealthCheck struct {\n\tPath string `json:\"path\"`\n\tProtocol string `json:\"protocol\"`\n\tPortIndex int `json:\"portIndex\"`\n}\n\nfunc fetchMarathonApps(endpoint string, conf *configuration.Configuration) (map[string]marathonApp, error) {\n\tclient := &http.Client{}\n\treq, _ := http.NewRequest(\"GET\", endpoint+\"\/v2\/apps\", nil)\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\tif len(conf.Marathon.User) > 0 && len(conf.Marathon.Password) > 0 {\n\t\treq.SetBasicAuth(conf.Marathon.User, conf.Marathon.Password)\n\t}\n\tresponse, err := client.Do(req)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer response.Body.Close()\n\tvar appResponse marathonApps\n\n\tcontents, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(contents, &appResponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdataById := map[string]marathonApp{}\n\n\tfor _, appConfig := range appResponse.Apps {\n\t\tdataById[appConfig.Id] = appConfig\n\t}\n\n\treturn dataById, nil\n}\n\nfunc fetchTasks(endpoint string, conf *configuration.Configuration) (map[string]marathonTaskList, error) {\n\tclient := &http.Client{}\n\treq, _ := http.NewRequest(\"GET\", endpoint+\"\/v2\/tasks\", nil)\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\tif len(conf.Marathon.User) > 0 && len(conf.Marathon.Password) > 0 {\n\t\treq.SetBasicAuth(conf.Marathon.User, conf.Marathon.Password)\n\t}\n\tresponse, err := client.Do(req)\n\n\tvar tasks marathonTasks\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontents, err := ioutil.ReadAll(response.Body)\n\tdefer response.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(contents, &tasks)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttaskList := tasks.Tasks\n\tsort.Sort(taskList)\n\n\ttasksById := map[string]marathonTaskList{}\n\tfor _, task := range taskList {\n\t\tif tasksById[task.AppId] == nil {\n\t\t\ttasksById[task.AppId] = marathonTaskList{}\n\t\t}\n\t\ttasksById[task.AppId] = append(tasksById[task.AppId], task)\n\t}\n\n\tfor _, task_list := range tasksById {\n\t\tsort.Sort(task_list)\n\t}\n\n\treturn tasksById, nil\n}\n\nfunc createApps(tasksById map[string]marathonTaskList, marathonApps map[string]marathonApp) AppList {\n\n\tapps := AppList{}\n\n\tfor appId, mApp := range marathonApps {\n\n\t\t\/\/ Try to handle old app id format without slashes\n\t\tappPath := appId\n\t\tif !strings.HasPrefix(appId, \"\/\") {\n\t\t\tappPath = \"\/\" + appId\n\t\t}\n\n\t\t\/\/ build App from marathonApp\n\t\tapp := App{\n\t\t\tId: appPath,\n\t\t\tMesosDnsId: getMesosDnsId(appPath),\n\t\t\tEscapedId: strings.Replace(appId, \"\/\", \"::\", -1),\n\t\t\tHealthCheckPath: parseHealthCheckPath(mApp.HealthChecks),\n\t\t\tHealthCheckProtocol: parseHealthCheckProtocol(mApp.HealthChecks),\n\t\t\tEnv: mApp.Env,\n\t\t\tLabels: mApp.Labels,\n\t\t}\n\n\t\tapp.HealthChecks = make([]HealthCheck, 0, len(mApp.HealthChecks))\n\t\tfor _, marathonCheck := range mApp.HealthChecks {\n\t\t\tcheck := HealthCheck{\n\t\t\t\tProtocol: marathonCheck.Protocol,\n\t\t\t\tPath: marathonCheck.Path,\n\t\t\t\tPortIndex: marathonCheck.PortIndex,\n\t\t\t}\n\t\t\tapp.HealthChecks = append(app.HealthChecks, check)\n\t\t}\n\n\t\tif len(mApp.Ports) > 0 {\n\t\t\tapp.ServicePort = mApp.Ports[0]\n\t\t\tapp.ServicePorts = mApp.Ports\n\t\t}\n\n\t\t\/\/ build Tasks for this App\n\t\ttasks := []Task{}\n\t\tfor _, mTask := range tasksById[appId] {\n\t\t\tif len(mTask.Ports) > 0 {\n\t\t\t\tt := Task{\n\t\t\t\t\tHost: mTask.Host,\n\t\t\t\t\tPort: mTask.Ports[0],\n\t\t\t\t\tPorts: mTask.Ports,\n\t\t\t\t}\n\t\t\t\ttasks = append(tasks, t)\n\t\t\t}\n\t\t}\n\t\tapp.Tasks = tasks\n\n\t\tapps = append(apps, app)\n\t}\n\treturn apps\n}\n\nfunc getMesosDnsId(appPath string) string {\n\t\/\/ split up groups and recombine for how mesos-dns\/consul\/etc use service name\n\t\/\/ \"\/nested\/group\/app\" -> \"app-group-nested\"\n\tgroups := strings.Split(appPath, \"\/\")\n\treverseGroups := []string{}\n\tfor i := len(groups) - 1; i >= 0; i-- {\n\t\tif groups[i] != \"\" {\n\t\t\treverseGroups = append(reverseGroups, groups[i])\n\t\t}\n\t}\n\treturn strings.Join(reverseGroups, \"-\")\n}\n\nfunc parseHealthCheckPath(checks []marathonHealthCheck) string {\n\tfor _, check := range checks {\n\t\tif check.Protocol != \"HTTP\" && check.Protocol != \"HTTPS\" {\n\t\t\tcontinue\n\t\t}\n\t\treturn check.Path\n\t}\n\treturn \"\"\n}\n\n\/* maybe combine this with the above? *\/\nfunc parseHealthCheckProtocol(checks []marathonHealthCheck) string {\n\tfor _, check := range checks {\n\t\tif check.Protocol != \"HTTP\" && check.Protocol != \"HTTPS\" {\n\t\t\tcontinue\n\t\t}\n\t\treturn check.Protocol\n\t}\n\treturn \"\"\n}\n\n\/*\n\tApps returns a struct that describes Marathon current app and their\n\tsub tasks information.\n\n\tParameters:\n\t\tendpoint: Marathon HTTP endpoint, e.g. http:\/\/localhost:8080\n*\/\nfunc FetchApps(maraconf configuration.Marathon, conf *configuration.Configuration) (AppList, error) {\n\n\tvar applist AppList\n\tvar err error\n\n\t\/\/ try all configured endpoints until one succeeds\n\tfor _, url := range maraconf.Endpoints() {\n\t\tapplist, err = _fetchApps(url, conf)\n\t\tif err == nil {\n\t\t\treturn applist, err\n\t\t}\n\t}\n\t\/\/ return last error\n\treturn nil, err\n}\n\nfunc _fetchApps(url string, conf *configuration.Configuration) (AppList, error) {\n\ttasks, err := fetchTasks(url, conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmarathonApps, err := fetchMarathonApps(url, conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapps := createApps(tasks, marathonApps)\n\tsort.Sort(apps)\n\treturn apps, nil\n}\n<commit_msg>enable quote {{ $task.Id }}<commit_after>package marathon\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/QubitProducts\/bamboo\/configuration\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strings\"\n)\n\n\/\/ Describes an app process running\ntype Task struct {\n\tId string\n\tHost string\n\tPort int\n\tPorts []int\n}\n\n\/\/ A health check on the application\ntype HealthCheck struct {\n\t\/\/ One of TCP, HTTP or COMMAND\n\tProtocol string\n\t\/\/ The path (if Protocol is HTTP)\n\tPath string\n\t\/\/ The position of the port targeted in the ports array\n\tPortIndex int\n}\n\n\/\/ An app may have multiple processes\ntype App struct {\n\tId string\n\tMesosDnsId string\n\tEscapedId string\n\tHealthCheckPath string\n\tHealthCheckProtocol string\n\tHealthChecks []HealthCheck\n\tTasks []Task\n\tServicePort int\n\tServicePorts []int\n\tEnv map[string]string\n\tLabels map[string]string\n}\n\ntype AppList []App\n\nfunc (slice AppList) Len() int {\n\treturn len(slice)\n}\n\nfunc (slice AppList) Less(i, j int) bool {\n\treturn slice[i].Id < slice[j].Id\n}\n\nfunc (slice AppList) Swap(i, j int) {\n\tslice[i], slice[j] = slice[j], slice[i]\n}\n\ntype marathonTaskList []marathonTask\n\ntype marathonTasks struct {\n\tTasks marathonTaskList `json:\"tasks\"`\n}\n\ntype marathonTask struct {\n\tAppId string\n\tId string\n\tHost string\n\tPorts []int\n\tServicePorts []int\n\tStartedAt string\n\tStagedAt string\n\tVersion string\n}\n\nfunc (slice marathonTaskList) Len() int {\n\treturn len(slice)\n}\n\nfunc (slice marathonTaskList) Less(i, j int) bool {\n\treturn slice[i].Id < slice[j].Id\n}\n\nfunc (slice marathonTaskList) Swap(i, j int) {\n\tslice[i], slice[j] = slice[j], slice[i]\n}\n\ntype marathonApps struct {\n\tApps []marathonApp `json:\"apps\"`\n}\n\ntype marathonApp struct {\n\tId string `json:\"id\"`\n\tHealthChecks []marathonHealthCheck `json:\"healthChecks\"`\n\tPorts []int `json:\"ports\"`\n\tEnv map[string]string `json:\"env\"`\n\tLabels map[string]string `json:\"labels\"`\n}\n\ntype marathonHealthCheck struct {\n\tPath string `json:\"path\"`\n\tProtocol string `json:\"protocol\"`\n\tPortIndex int `json:\"portIndex\"`\n}\n\nfunc fetchMarathonApps(endpoint string, conf *configuration.Configuration) (map[string]marathonApp, error) {\n\tclient := &http.Client{}\n\treq, _ := http.NewRequest(\"GET\", endpoint+\"\/v2\/apps\", nil)\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\tif len(conf.Marathon.User) > 0 && len(conf.Marathon.Password) > 0 {\n\t\treq.SetBasicAuth(conf.Marathon.User, conf.Marathon.Password)\n\t}\n\tresponse, err := client.Do(req)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer response.Body.Close()\n\tvar appResponse marathonApps\n\n\tcontents, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(contents, &appResponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdataById := map[string]marathonApp{}\n\n\tfor _, appConfig := range appResponse.Apps {\n\t\tdataById[appConfig.Id] = appConfig\n\t}\n\n\treturn dataById, nil\n}\n\nfunc fetchTasks(endpoint string, conf *configuration.Configuration) (map[string]marathonTaskList, error) {\n\tclient := &http.Client{}\n\treq, _ := http.NewRequest(\"GET\", endpoint+\"\/v2\/tasks\", nil)\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\tif len(conf.Marathon.User) > 0 && len(conf.Marathon.Password) > 0 {\n\t\treq.SetBasicAuth(conf.Marathon.User, conf.Marathon.Password)\n\t}\n\tresponse, err := client.Do(req)\n\n\tvar tasks marathonTasks\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontents, err := ioutil.ReadAll(response.Body)\n\tdefer response.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(contents, &tasks)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttaskList := tasks.Tasks\n\tsort.Sort(taskList)\n\n\ttasksById := map[string]marathonTaskList{}\n\tfor _, task := range taskList {\n\t\tif tasksById[task.AppId] == nil {\n\t\t\ttasksById[task.AppId] = marathonTaskList{}\n\t\t}\n\t\ttasksById[task.AppId] = append(tasksById[task.AppId], task)\n\t}\n\n\tfor _, task_list := range tasksById {\n\t\tsort.Sort(task_list)\n\t}\n\n\treturn tasksById, nil\n}\n\nfunc createApps(tasksById map[string]marathonTaskList, marathonApps map[string]marathonApp) AppList {\n\n\tapps := AppList{}\n\n\tfor appId, mApp := range marathonApps {\n\n\t\t\/\/ Try to handle old app id format without slashes\n\t\tappPath := appId\n\t\tif !strings.HasPrefix(appId, \"\/\") {\n\t\t\tappPath = \"\/\" + appId\n\t\t}\n\n\t\t\/\/ build App from marathonApp\n\t\tapp := App{\n\t\t\tId: appPath,\n\t\t\tMesosDnsId: getMesosDnsId(appPath),\n\t\t\tEscapedId: strings.Replace(appId, \"\/\", \"::\", -1),\n\t\t\tHealthCheckPath: parseHealthCheckPath(mApp.HealthChecks),\n\t\t\tHealthCheckProtocol: parseHealthCheckProtocol(mApp.HealthChecks),\n\t\t\tEnv: mApp.Env,\n\t\t\tLabels: mApp.Labels,\n\t\t}\n\n\t\tapp.HealthChecks = make([]HealthCheck, 0, len(mApp.HealthChecks))\n\t\tfor _, marathonCheck := range mApp.HealthChecks {\n\t\t\tcheck := HealthCheck{\n\t\t\t\tProtocol: marathonCheck.Protocol,\n\t\t\t\tPath: marathonCheck.Path,\n\t\t\t\tPortIndex: marathonCheck.PortIndex,\n\t\t\t}\n\t\t\tapp.HealthChecks = append(app.HealthChecks, check)\n\t\t}\n\n\t\tif len(mApp.Ports) > 0 {\n\t\t\tapp.ServicePort = mApp.Ports[0]\n\t\t\tapp.ServicePorts = mApp.Ports\n\t\t}\n\n\t\t\/\/ build Tasks for this App\n\t\ttasks := []Task{}\n\t\tfor _, mTask := range tasksById[appId] {\n\t\t\tif len(mTask.Ports) > 0 {\n\t\t\t\tt := Task{\n\t\t\t\t\tId: mTask.Id,\n\t\t\t\t\tHost: mTask.Host,\n\t\t\t\t\tPort: mTask.Ports[0],\n\t\t\t\t\tPorts: mTask.Ports,\n\t\t\t\t}\n\t\t\t\ttasks = append(tasks, t)\n\t\t\t}\n\t\t}\n\t\tapp.Tasks = tasks\n\n\t\tapps = append(apps, app)\n\t}\n\treturn apps\n}\n\nfunc getMesosDnsId(appPath string) string {\n\t\/\/ split up groups and recombine for how mesos-dns\/consul\/etc use service name\n\t\/\/ \"\/nested\/group\/app\" -> \"app-group-nested\"\n\tgroups := strings.Split(appPath, \"\/\")\n\treverseGroups := []string{}\n\tfor i := len(groups) - 1; i >= 0; i-- {\n\t\tif groups[i] != \"\" {\n\t\t\treverseGroups = append(reverseGroups, groups[i])\n\t\t}\n\t}\n\treturn strings.Join(reverseGroups, \"-\")\n}\n\nfunc parseHealthCheckPath(checks []marathonHealthCheck) string {\n\tfor _, check := range checks {\n\t\tif check.Protocol != \"HTTP\" && check.Protocol != \"HTTPS\" {\n\t\t\tcontinue\n\t\t}\n\t\treturn check.Path\n\t}\n\treturn \"\"\n}\n\n\/* maybe combine this with the above? *\/\nfunc parseHealthCheckProtocol(checks []marathonHealthCheck) string {\n\tfor _, check := range checks {\n\t\tif check.Protocol != \"HTTP\" && check.Protocol != \"HTTPS\" {\n\t\t\tcontinue\n\t\t}\n\t\treturn check.Protocol\n\t}\n\treturn \"\"\n}\n\n\/*\n\tApps returns a struct that describes Marathon current app and their\n\tsub tasks information.\n\n\tParameters:\n\t\tendpoint: Marathon HTTP endpoint, e.g. http:\/\/localhost:8080\n*\/\nfunc FetchApps(maraconf configuration.Marathon, conf *configuration.Configuration) (AppList, error) {\n\n\tvar applist AppList\n\tvar err error\n\n\t\/\/ try all configured endpoints until one succeeds\n\tfor _, url := range maraconf.Endpoints() {\n\t\tapplist, err = _fetchApps(url, conf)\n\t\tif err == nil {\n\t\t\treturn applist, err\n\t\t}\n\t}\n\t\/\/ return last error\n\treturn nil, err\n}\n\nfunc _fetchApps(url string, conf *configuration.Configuration) (AppList, error) {\n\ttasks, err := fetchTasks(url, conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmarathonApps, err := fetchMarathonApps(url, conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapps := createApps(tasks, marathonApps)\n\tsort.Sort(apps)\n\treturn apps, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package sdl\n\n\/*\n#include \"sdl_wrapper.h\"\n\nvoid GoSetError(const char *fmt) {\n SDL_SetError(\"%s\", fmt);\n}\n*\/\n\/\/ #include \"sdl_wrapper.h\"\nimport \"C\"\nimport \"errors\"\n\n\/\/ SDL error codes with their corresponding predefined strings.\nconst (\n\tENOMEM ErrorCode = C.SDL_ENOMEM \/\/ out of memory\n\tEFREAD = C.SDL_EFREAD \/\/ error reading from datastream\n\tEFWRITE = C.SDL_EFWRITE \/\/ error writing to datastream\n\tEFSEEK = C.SDL_EFSEEK \/\/ error seeking in datastream\n\tUNSUPPORTED = C.SDL_UNSUPPORTED \/\/ that operation is not supported\n\tLASTERROR = C.SDL_LASTERROR \/\/ the highest numbered predefined error\n)\n\n\/\/ ErrorCode is an error code used in SDL error messages.\ntype ErrorCode uint32\ntype cErrorCode C.SDL_errorcode\n\nfunc (ec ErrorCode) c() C.SDL_errorcode {\n\treturn C.SDL_errorcode(ec)\n}\n\n\/\/ GetError returns the last error that occurred, or an empty string if there hasn't been an error message set since the last call to ClearError().\n\/\/ (https:\/\/wiki.libsdl.org\/SDL_GetError)\nfunc GetError() error {\n\tif err := C.SDL_GetError(); err != nil {\n\t\tgostr := C.GoString(err)\n\t\t\/\/ SDL_GetError returns \"an empty string if there hasn't been an error message\"\n\t\tif len(gostr) > 0 {\n\t\t\treturn errors.New(gostr)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ SetError set the SDL error message.\n\/\/ (https:\/\/wiki.libsdl.org\/SDL_SetError)\nfunc SetError(err error) {\n\tif err != nil {\n\t\tC.GoSetError(C.CString(err.Error()))\n\t\treturn\n\t}\n\tC.GoSetError(nil)\n}\n\n\/\/ ClearError clears any previous error message.\n\/\/ (https:\/\/wiki.libsdl.org\/SDL_ClearError)\nfunc ClearError() {\n\tC.SDL_ClearError()\n}\n\n\/\/ Error sets the SDL error message to the specified error code.\nfunc Error(code ErrorCode) {\n\tC.SDL_Error(code.c())\n}\n\n\/\/ OutOfMemory sets SDL error message to ENOMEM (out of memory).\nfunc OutOfMemory() {\n\tError(ENOMEM)\n}\n\n\/\/ Unsupported sets SDL error message to UNSUPPORTED (that operation is not supported).\nfunc Unsupported() {\n\tError(UNSUPPORTED)\n}\n<commit_msg>sdl: error: Fix crash on certain Windows environment<commit_after>package sdl\n\n\/*\n#include \"sdl_wrapper.h\"\n\nvoid GoSetError(const char *fmt) {\n SDL_SetError(\"%s\", fmt);\n}\n\n*\/\n\/\/ #include \"sdl_wrapper.h\"\nimport \"C\"\nimport \"errors\"\n\nvar emptyCString *C.char = C.CString(\"\")\n\n\/\/ SDL error codes with their corresponding predefined strings.\nconst (\n\tENOMEM ErrorCode = C.SDL_ENOMEM \/\/ out of memory\n\tEFREAD = C.SDL_EFREAD \/\/ error reading from datastream\n\tEFWRITE = C.SDL_EFWRITE \/\/ error writing to datastream\n\tEFSEEK = C.SDL_EFSEEK \/\/ error seeking in datastream\n\tUNSUPPORTED = C.SDL_UNSUPPORTED \/\/ that operation is not supported\n\tLASTERROR = C.SDL_LASTERROR \/\/ the highest numbered predefined error\n)\n\n\/\/ ErrorCode is an error code used in SDL error messages.\ntype ErrorCode uint32\ntype cErrorCode C.SDL_errorcode\n\nfunc (ec ErrorCode) c() C.SDL_errorcode {\n\treturn C.SDL_errorcode(ec)\n}\n\n\/\/ GetError returns the last error that occurred, or an empty string if there hasn't been an error message set since the last call to ClearError().\n\/\/ (https:\/\/wiki.libsdl.org\/SDL_GetError)\nfunc GetError() error {\n\tif err := C.SDL_GetError(); err != nil {\n\t\tgostr := C.GoString(err)\n\t\t\/\/ SDL_GetError returns \"an empty string if there hasn't been an error message\"\n\t\tif len(gostr) > 0 {\n\t\t\treturn errors.New(gostr)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ SetError set the SDL error message.\n\/\/ (https:\/\/wiki.libsdl.org\/SDL_SetError)\nfunc SetError(err error) {\n\tif err != nil {\n\t\tC.GoSetError(C.CString(err.Error()))\n\t\treturn\n\t}\n\tC.GoSetError(emptyCString)\n}\n\n\/\/ ClearError clears any previous error message.\n\/\/ (https:\/\/wiki.libsdl.org\/SDL_ClearError)\nfunc ClearError() {\n\tC.SDL_ClearError()\n}\n\n\/\/ Error sets the SDL error message to the specified error code.\nfunc Error(code ErrorCode) {\n\tC.SDL_Error(code.c())\n}\n\n\/\/ OutOfMemory sets SDL error message to ENOMEM (out of memory).\nfunc OutOfMemory() {\n\tError(ENOMEM)\n}\n\n\/\/ Unsupported sets SDL error message to UNSUPPORTED (that operation is not supported).\nfunc Unsupported() {\n\tError(UNSUPPORTED)\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"github.com\/streadway\/amqp\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nvar (\n\tmgmtUrl = \"localhost:15672\/\"\n\tusername = \"guest\"\n\tpassword = \"guest\"\n)\n\nfunc TestMain(m *testing.M) {\n\tos.Exit(RunTests(m))\n\n}\n\nfunc RunTests(m *testing.M) int {\n\n\tlog.Println(\"Creating test exchange and queue...\")\n\tconn, err := amqp.Dial(os.Getenv(\"AMQP_URL\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer conn.Close()\n\n\tchannel, err := conn.Channel()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tchannel.ExchangeDeclare(\"test_exchange\", \"topic\", true, false, false, false, amqp.Table{})\n\tchannel.QueueDeclare(\"test_queue\", true, false, false, false, amqp.Table{})\n\tchannel.QueueBind(\"test_queue\", \"#\", \"test_exchange\", false, amqp.Table{})\n\n\tlog.Println(\"Exchange and queue created. Running unit tests...\")\n\n\treturn m.Run()\n\n}\n\nfunc TestClient(t *testing.T) {\n\n\tc := Client{MgmtUrl: mgmtUrl, Username: username, Password: password}\n\toverview, err := c.GetOverview()\n\tif reflect.DeepEqual(overview, Overview{}) {\n\t\tt.Errorf(\"Result from api\/overview is nil\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\textensions, err := c.GetExtensions()\n\tif len(extensions) == 0 {\n\t\tt.Errorf(\"Result from api\/extensions has no values\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tnodes, err := c.GetNodes()\n\tif len(nodes) == 0 {\n\t\tt.Errorf(\"Result from api\/nodes has no values\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tnode, err := c.GetNode(\"rabbit@localhost\")\n\tif reflect.DeepEqual(node, Node{}) {\n\t\tt.Errorf(\"Result from api\/node\/rabbit@localhost is nil\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tconnections, err := c.GetConnections()\n\tif len(connections) == 0 {\n\t\tt.Errorf(\"Result from api\/connections has no values\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tconnections, err = c.GetConnectionsOnVhost(\"\/\")\n\tif len(connections) == 0 {\n\t\tt.Errorf(\"Result from api\/vhosts\/vhost\/connections has no values\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tchannels, err := c.GetChannels()\n\tif len(channels) == 0 {\n\t\tt.Errorf(\"Result from api\/channels has no values\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tchannels, err = c.GetChannelsOnVhost(\"\/\")\n\tif len(channels) == 0 {\n\t\tt.Errorf(\"Result from api\/vhosts\/vhost\/channels has no values\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tchannels, err = c.GetChannelsOnConnection(connections[0].Name)\n\tif len(channels) == 0 {\n\t\tt.Errorf(\"Result from api\/connections\/connection\/channels has no values\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\texchanges, err := c.GetExchanges()\n\tif len(exchanges) == 0 {\n\t\tt.Errorf(\"Result from api\/exchanges has no values\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\texchangesOnVhost, err := c.GetExchangesOnVhost(\"\/\")\n\tif len(exchangesOnVhost) == 0 {\n\t\tt.Errorf(\"Result from api\/exchanges\/vhost on default vhost has no values\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tbindings, err := c.GetBindings()\n\tif len(bindings) == 0 {\n\t\tt.Errorf(\"Result from api\/bindings has no values\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tbindings, err = c.GetBindingsOnVhost(\"\/\")\n\tif len(bindings) == 0 {\n\t\tt.Errorf(\"Result from api\/bindings\/vhost has no values\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tvhosts, err := c.GetVhosts()\n\tif len(vhosts) == 0 {\n\t\tt.Error(\"Result from api\/vhosts has no values\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tpermissions, err := c.GetVhostPermissions(\"\/\")\n\tif len(permissions) == 0 {\n\t\tt.Error(\"Result from api\/vhost\/name\/permissions has no values\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tusers, err := c.GetUsers()\n\tif len(users) == 0 {\n\t\tt.Error(\"Result from api\/users has no values\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tstatus, err := c.GetAlivenessTestForVhost(\"\/\")\n\tif reflect.DeepEqual(status, Status{}) {\n\t\tt.Error(\"Result from api\/aliveness-test\/vhost is nil\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tstatus, err = c.GetHealthcheckForCurrentNode()\n\tif reflect.DeepEqual(status, Status{}) {\n\t\tt.Error(\"Result from api\/healthchecks\/node is nil\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tstatus, err = c.GetHealthchecksForNode(\"rabbit@localhost\")\n\tif reflect.DeepEqual(status, Status{}) {\n\t\tt.Error(\"Result from api\/healthchecks\/node\/node is nil\")\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n}\n<commit_msg>Properly handle testing errors<commit_after>package api\n\nimport (\n\t\"github.com\/streadway\/amqp\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nvar (\n\tmgmtUrl = \"localhost:15672\/\"\n\tusername = \"guest\"\n\tpassword = \"guest\"\n)\n\nfunc TestMain(m *testing.M) {\n\tos.Exit(RunTests(m))\n\n}\n\nfunc RunTests(m *testing.M) int {\n\n\tlog.Println(\"Creating test exchange and queue...\")\n\tconn, err := amqp.Dial(os.Getenv(\"AMQP_URL\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer conn.Close()\n\n\tchannel, err := conn.Channel()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tchannel.ExchangeDeclare(\"test_exchange\", \"topic\", true, false, false, false, amqp.Table{})\n\tchannel.QueueDeclare(\"test_queue\", true, false, false, false, amqp.Table{})\n\tchannel.QueueBind(\"test_queue\", \"#\", \"test_exchange\", false, amqp.Table{})\n\n\tlog.Println(\"Exchange and queue created. Running unit tests...\")\n\n\treturn m.Run()\n\n}\n\nfunc TestClient(t *testing.T) {\n\n\tc := Client{MgmtUrl: mgmtUrl, Username: username, Password: password}\n\toverview, err := c.GetOverview()\n\tif reflect.DeepEqual(overview, Overview{}) {\n\t\tt.Errorf(\"Result from api\/overview is nil\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\textensions, err := c.GetExtensions()\n\tif len(extensions) == 0 {\n\t\tt.Errorf(\"Result from api\/extensions has no values\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tnodes, err := c.GetNodes()\n\tif len(nodes) == 0 {\n\t\tt.Errorf(\"Result from api\/nodes has no values\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tnode, err := c.GetNode(\"rabbit@localhost\")\n\tif reflect.DeepEqual(node, Node{}) {\n\t\tt.Errorf(\"Result from api\/node\/rabbit@localhost is nil\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tconnections, err := c.GetConnections()\n\tif len(connections) == 0 {\n\t\tt.Errorf(\"Result from api\/connections has no values\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tconnections, err = c.GetConnectionsOnVhost(\"\/\")\n\tif len(connections) == 0 {\n\t\tt.Errorf(\"Result from api\/vhosts\/vhost\/connections has no values\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tchannels, err := c.GetChannels()\n\tif len(channels) == 0 {\n\t\tt.Errorf(\"Result from api\/channels has no values\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tchannels, err = c.GetChannelsOnVhost(\"\/\")\n\tif len(channels) == 0 {\n\t\tt.Errorf(\"Result from api\/vhosts\/vhost\/channels has no values\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tchannels, err = c.GetChannelsOnConnection(connections[0].Name)\n\tif len(channels) == 0 {\n\t\tt.Errorf(\"Result from api\/connections\/connection\/channels has no values\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\texchanges, err := c.GetExchanges()\n\tif len(exchanges) == 0 {\n\t\tt.Errorf(\"Result from api\/exchanges has no values\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\texchangesOnVhost, err := c.GetExchangesOnVhost(\"\/\")\n\tif len(exchangesOnVhost) == 0 {\n\t\tt.Errorf(\"Result from api\/exchanges\/vhost on default vhost has no values\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tbindings, err := c.GetBindings()\n\tif len(bindings) == 0 {\n\t\tt.Errorf(\"Result from api\/bindings has no values\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tbindings, err = c.GetBindingsOnVhost(\"\/\")\n\tif len(bindings) == 0 {\n\t\tt.Errorf(\"Result from api\/bindings\/vhost has no values\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tvhosts, err := c.GetVhosts()\n\tif len(vhosts) == 0 {\n\t\tt.Error(\"Result from api\/vhosts has no values\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tpermissions, err := c.GetVhostPermissions(\"\/\")\n\tif len(permissions) == 0 {\n\t\tt.Error(\"Result from api\/vhost\/name\/permissions has no values\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tusers, err := c.GetUsers()\n\tif len(users) == 0 {\n\t\tt.Error(\"Result from api\/users has no values\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tstatus, err := c.GetAlivenessTestForVhost(\"\/\")\n\tif reflect.DeepEqual(status, Status{}) {\n\t\tt.Error(\"Result from api\/aliveness-test\/vhost is nil\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tstatus, err = c.GetHealthcheckForCurrentNode()\n\tif reflect.DeepEqual(status, Status{}) {\n\t\tt.Error(\"Result from api\/healthchecks\/node is nil\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tstatus, err = c.GetHealthchecksForNode(\"rabbit@localhost\")\n\tif reflect.DeepEqual(status, Status{}) {\n\t\tt.Error(\"Result from api\/healthchecks\/node\/node is nil\")\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2014-2020 Jay R. Wren <jrwren@xmtp.net>.\n\/\/\n\/\/ Use of this source code is governed by an MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/dnsimple\/dnsimple-go\/dnsimple\"\n\t\"golang.org\/x\/oauth2\"\n)\n\nvar verbose = flag.Bool(\"v\", false, \"Use verbose output\")\nvar list = flag.Bool(\"l\", false, \"List domains.\")\nvar update = flag.String(\"u\", \"\", \"Update or create record. The format is 'domain name type oldvalue newvlaue ttl'.\\n(use - for oldvalue to create a new record)\")\nvar del = flag.String(\"d\", \"\", \"Delete record. The format is 'domain name type value'\")\nvar format = flag.String(\"f\", \"plain\", \"Output zones in {plain, json, table} format\")\nvar typeR = flag.String(\"t\", \"\", \"record type to query for\")\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage of %s:\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t\tfmt.Println(\"domasimu <domainname> will list records in the domain\")\n\t\tfmt.Println(\"\")\n\t\tfmt.Fprintln(os.Stderr, \"domasimu config file example:\")\n\t\terr := toml.NewEncoder(os.Stderr).Encode(Config{\"you@example.com\", \"TOKENHERE1234\"})\n\t\tif err != nil {\n\t\t\tfmt.Println(err) \/\/ This is impossible, but errcheck lint. 😳\n\t\t}\n\t}\n\tflag.Parse()\n\n\tswitch *format {\n\tcase \"plain\":\n\tcase \"table\":\n\tcase \"json\":\n\tdefault:\n\t\tfmt.Fprintln(os.Stderr, \"could not use specified format\", *format)\n\t\treturn\n\t}\n\n\tif len(os.Args) == 1 {\n\t\tflag.Usage()\n\t\treturn\n\t}\n\n\t_, token, err := getCreds()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"could not read config\", err)\n\t\treturn\n\t}\n\n\tts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})\n\ttc := oauth2.NewClient(context.Background(), ts)\n\n\tclient := dnsimple.NewClient(tc)\n\twhoamiResponse, err := client.Identity.Whoami()\n\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"could not connect to dnsimple\", err)\n\t\treturn\n\t}\n\n\tif whoamiResponse.Data.Account == nil {\n\t\tfmt.Fprintln(os.Stderr, \"you need to use account token instead of user token\")\n\t\treturn\n\t}\n\taccountID := strconv.FormatInt(whoamiResponse.Data.Account.ID, 10)\n\n\tif *list {\n\t\tdomainsResponse, err := client.Domains.ListDomains(accountID, nil)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"could not get domains %v\\n\", err)\n\t\t\treturn\n\t\t}\n\t\tfor _, domain := range domainsResponse.Data {\n\t\t\tif *verbose {\n\t\t\t\tfmt.Println(domain.Name, domain.ExpiresOn)\n\t\t\t} else {\n\t\t\t\tfmt.Println(domain.Name)\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif *update != \"\" {\n\t\tid, err := createOrUpdate(client, *update, accountID)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"could not create or update:\", err)\n\t\t} else {\n\t\t\tfmt.Printf(\"record written with id %s\\n\", id)\n\t\t}\n\t\treturn\n\t}\n\tif *del != \"\" {\n\t\tid, err := deleteRecord(client, *del, accountID)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"could not delete:\", err)\n\t\t} else {\n\t\t\tfmt.Printf(\"record deleted with id %s\\n\", id)\n\t\t}\n\t\treturn\n\t}\n\tif len(flag.Args()) == 0 {\n\t\tflag.Usage()\n\t\treturn\n\t}\n\toptions := &dnsimple.ZoneRecordListOptions{}\n\tif *typeR != `` {\n\t\toptions.Type = *typeR\n\t}\n\trecords, err := listRecords(client, accountID, flag.Args()[0], options)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"could not get records:\", err)\n\t}\n\tif *verbose {\n\t\tlog.Println(\"found \", len(records), \" records\")\n\t}\n\tsort.Sort(records)\n\n\terr = FormatZoneRecords(records, *format)\n\tif err != nil {\n\t\tlog.Println(\"error: err\")\n\t}\n}\n\n\/\/ listRecords pages through records\nfunc listRecords(client *dnsimple.Client, accountID, domain string,\n\toptions *dnsimple.ZoneRecordListOptions) (records zoneRecords, err error) {\n\tif options == nil {\n\t\toptions = &dnsimple.ZoneRecordListOptions{}\n\t}\n\tfor p := 1; ; p++ {\n\t\tlistZoneRecordsResponse, err := client.Zones.ListRecords(accountID, domain, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor i := range listZoneRecordsResponse.Data {\n\t\t\trecords = append(records, listZoneRecordsResponse.Data[i])\n\t\t}\n\t\tif options.Page == 0 {\n\t\t\toptions.Page = 2\n\t\t} else {\n\t\t\toptions.Page++\n\t\t}\n\t\tif p >= listZoneRecordsResponse.Pagination.TotalPages {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ FormatZoneRecords takes a slice of dnsimple.ZoneRecord and formats it in the specified format.\nfunc FormatZoneRecords(zones []dnsimple.ZoneRecord, format string) error {\n\tif format == \"json\" {\n\t\terr := json.NewEncoder(os.Stdout).Encode(zones)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\tif format == \"table\" {\n\t\tfmt.Printf(\"+-%-30s-+-%-5s-+-%-7s-+-%-30s-+\\n\", strings.Repeat(\"-\", 30), \"-----\", \"-------\", strings.Repeat(\"-\", 30))\n\t\tfmt.Printf(\"| %-30s | %-5s | %-7s | %-30s |\\n\", \"Name\", \"Type\", \"TTL\", \"Content\")\n\t\tfmt.Printf(\"+-%-30s-+-%-5s-+-%-7s-+-%-30s-+\\n\", strings.Repeat(\"-\", 30), \"-----\", \"-------\", strings.Repeat(\"-\", 30))\n\t}\n\tfor _, zone := range zones {\n\t\tif zone.Name == `` {\n\t\t\tzone.Name = `.`\n\t\t}\n\t\tswitch format {\n\t\tcase \"plain\":\n\t\t\tif *verbose {\n\t\t\t\tfmt.Printf(\"%s %s %s %d (%d) %s %v\\n\", zone.ZoneID, zone.Name, zone.Type, zone.Priority, zone.TTL, zone.Content, zone.UpdatedAt)\n\t\t\t} else if flag.NFlag() > 1 {\n\t\t\t\tfmt.Printf(\"%s %s %s (%d) %s\\n\", zone.ZoneID, zone.Name, zone.Type, zone.TTL, zone.Content)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"%s %s (%d) %s\\n\", zone.Name, zone.Type, zone.TTL, zone.Content)\n\n\t\t\t}\n\t\tcase \"table\":\n\t\t\tfmt.Printf(\"| %-30s | %-5s | %7d | %-30s |\\n\", zone.Name, zone.Type, zone.TTL, zone.Content)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"invalid format %v\", format)\n\t\t}\n\t}\n\tif format == \"table\" {\n\t\tfmt.Printf(\"+-%-30s-+-%-5s-+-%-7s-+-%-30s-+\\n\", strings.Repeat(\"-\", 30), \"-----\", \"-------\", strings.Repeat(\"-\", 30))\n\n\t}\n\treturn nil\n}\n\ntype zoneRecords []dnsimple.ZoneRecord\n\nfunc (z zoneRecords) Len() int { return len(z) }\nfunc (z zoneRecords) Less(i, j int) bool { return z[i].Name < z[j].Name }\nfunc (z zoneRecords) Swap(i, j int) { z[i], z[j] = z[j], z[i] }\n\nvar configFileName = func() string {\n\tif os.Getenv(\"DOMASIMU_CONF\") != \"\" {\n\t\treturn os.Getenv(\"DOMASIMU_CONF\")\n\t}\n\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\treturn filepath.Join(os.Getenv(\"LOCALAPPDATA\"), \"Domasimu\", \"config\")\n\tcase \"darwin\":\n\t\tf := filepath.Join(os.Getenv(\"HOME\"), \"Library\", \"Application Support\", \"Domasimu\", \"config\")\n\t\tfh, err := os.Open(f)\n\t\tif err == nil {\n\t\t\tfh.Close()\n\t\t\treturn f\n\t\t}\n\t}\n\tif os.Getenv(\"XDG_CONFIG_HOME\") != \"\" {\n\t\tf := filepath.Join(os.Getenv(\"XDG_CONFIG_HOME\"), \"domasimu\", \"config\")\n\t\tfh, err := os.Open(f)\n\t\tif err == nil {\n\t\t\tfh.Close()\n\t\t\treturn f\n\t\t}\n\t}\n\tf := filepath.Join(os.Getenv(\"HOME\"), \".config\", \"domasimu\", \"config\")\n\tfh, err := os.Open(f)\n\tif err == nil {\n\t\tfh.Close()\n\t\treturn f\n\t}\n\treturn filepath.Join(os.Getenv(\"HOME\"), \".domasimurc\")\n}()\n\nfunc getCreds() (string, string, error) {\n\tvar config Config\n\t_, err := toml.DecodeFile(configFileName, &config)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\treturn config.User, config.Token, nil\n}\n\n\/\/ Config represents the user and token config for dnsimple.\ntype Config struct {\n\tUser string\n\tToken string\n}\n\nfunc createOrUpdate(client *dnsimple.Client, message string, accountID string) (string, error) {\n\tpieces := strings.Split(message, \" \")\n\tif len(pieces) != 6 {\n\t\treturn \"\", fmt.Errorf(\"expected space seperated domain, name, type, oldvalue, newvalue, ttl\")\n\t}\n\n\tdomain := pieces[0]\n\tchangeRecord := dnsimple.ZoneRecord{\n\t\tName: pieces[1],\n\t\tType: pieces[2],\n\t}\n\toldValue := pieces[3]\n\tnewRecord := changeRecord\n\tnewRecord.Content = pieces[4]\n\tttl, err := strconv.Atoi(pieces[5])\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not convert %s to int: %w\", pieces[5], err)\n\t}\n\tnewRecord.TTL = ttl\n\tid, err := getRecordIDByValue(client, domain, oldValue, accountID, &changeRecord)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar respID string\n\tif id == 0 {\n\t\tzoneRecordResponse, err := client.Zones.CreateRecord(accountID, domain, newRecord)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\trespID = strconv.FormatInt(zoneRecordResponse.Data.ID, 10)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t} else {\n\t\tzoneRecordResponse, err := client.Zones.UpdateRecord(accountID, domain, id, newRecord)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\trespID = strconv.FormatInt(zoneRecordResponse.Data.ID, 10)\n\t}\n\n\treturn respID, nil\n}\n\nfunc deleteRecord(client *dnsimple.Client, message, accountID string) (string, error) {\n\tpieces := strings.Split(message, \" \")\n\tif len(pieces) != 4 {\n\t\treturn \"\", fmt.Errorf(\"expected space seperated domain, name, type, value\")\n\t}\n\tdomain := pieces[0]\n\tchangeRecord := dnsimple.ZoneRecord{\n\t\tName: pieces[1],\n\t\tType: pieces[2],\n\t}\n\tvalue := pieces[3]\n\tid, err := getRecordIDByValue(client, domain, value, accountID, &changeRecord)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif id == 0 {\n\t\treturn \"\", fmt.Errorf(\"could not find record\")\n\t}\n\t_, err = client.Zones.DeleteRecord(accountID, domain, id)\n\trespID := strconv.FormatInt(id, 10)\n\n\treturn respID, err\n}\n\nfunc getRecordIDByValue(client *dnsimple.Client, domain, value, accountID string, changeRecord *dnsimple.ZoneRecord) (int64, error) {\n\toptions := &dnsimple.ZoneRecordListOptions{}\n\tif changeRecord.Type != \"\" {\n\t\toptions.Type = changeRecord.Type\n\t}\n\trecordResponse, err := listRecords(client, accountID, domain, options)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tvar id int64\n\tfor _, record := range recordResponse {\n\t\tif record.Name == changeRecord.Name && record.Type == changeRecord.Type && record.Content == value {\n\t\t\tid = record.ID\n\t\t\tbreak\n\t\t}\n\t}\n\treturn id, nil\n}\n<commit_msg>add value option (#19)<commit_after>\/\/ Copyright © 2014-2020 Jay R. Wren <jrwren@xmtp.net>.\n\/\/\n\/\/ Use of this source code is governed by an MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/dnsimple\/dnsimple-go\/dnsimple\"\n\t\"golang.org\/x\/oauth2\"\n)\n\nvar verbose = flag.Bool(\"v\", false, \"Use verbose output\")\nvar list = flag.Bool(\"l\", false, \"List domains.\")\nvar update = flag.String(\"u\", \"\", \"Update or create record. The format is 'domain name type oldvalue newvlaue ttl'.\\n(use - for oldvalue to create a new record)\")\nvar value = flag.String(\"value\", \"\", \"Alt value to use for create or update\")\nvar del = flag.String(\"d\", \"\", \"Delete record. The format is 'domain name type value'\")\nvar format = flag.String(\"f\", \"plain\", \"Output zones in {plain, json, table} format\")\nvar typeR = flag.String(\"t\", \"\", \"record type to query for\")\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage of %s:\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t\tfmt.Println(\"domasimu <domainname> will list records in the domain\")\n\t\tfmt.Println(\"\")\n\t\tfmt.Println(\"domasimu -value='v=spf1 mx -all' -u 'example.com mail TXT - - 300' will add an SPF record\")\n\t\tfmt.Println(\"\")\n\t\tfmt.Fprintln(os.Stderr, \"domasimu config file example:\")\n\t\terr := toml.NewEncoder(os.Stderr).Encode(Config{\"you@example.com\", \"TOKENHERE1234\"})\n\t\tif err != nil {\n\t\t\tfmt.Println(err) \/\/ This is impossible, but errcheck lint. 😳\n\t\t}\n\t}\n\tflag.Parse()\n\n\tswitch *format {\n\tcase \"plain\":\n\tcase \"table\":\n\tcase \"json\":\n\tdefault:\n\t\tfmt.Fprintln(os.Stderr, \"could not use specified format\", *format)\n\t\treturn\n\t}\n\n\tif len(os.Args) == 1 {\n\t\tflag.Usage()\n\t\treturn\n\t}\n\n\t_, token, err := getCreds()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"could not read config\", err)\n\t\treturn\n\t}\n\n\tts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})\n\ttc := oauth2.NewClient(context.Background(), ts)\n\n\tclient := dnsimple.NewClient(tc)\n\twhoamiResponse, err := client.Identity.Whoami()\n\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"could not connect to dnsimple\", err)\n\t\treturn\n\t}\n\n\tif whoamiResponse.Data.Account == nil {\n\t\tfmt.Fprintln(os.Stderr, \"you need to use account token instead of user token\")\n\t\treturn\n\t}\n\taccountID := strconv.FormatInt(whoamiResponse.Data.Account.ID, 10)\n\n\tif *list {\n\t\tdomainsResponse, err := client.Domains.ListDomains(accountID, nil)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"could not get domains %v\\n\", err)\n\t\t\treturn\n\t\t}\n\t\tfor _, domain := range domainsResponse.Data {\n\t\t\tif *verbose {\n\t\t\t\tfmt.Println(domain.Name, domain.ExpiresOn)\n\t\t\t} else {\n\t\t\t\tfmt.Println(domain.Name)\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif *update != \"\" {\n\t\tid, err := createOrUpdate(client, *update, accountID)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"could not create or update:\", err)\n\t\t} else {\n\t\t\tfmt.Printf(\"record written with id %s\\n\", id)\n\t\t}\n\t\treturn\n\t}\n\tif *del != \"\" {\n\t\tid, err := deleteRecord(client, *del, accountID)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"could not delete:\", err)\n\t\t} else {\n\t\t\tfmt.Printf(\"record deleted with id %s\\n\", id)\n\t\t}\n\t\treturn\n\t}\n\tif len(flag.Args()) == 0 {\n\t\tflag.Usage()\n\t\treturn\n\t}\n\toptions := &dnsimple.ZoneRecordListOptions{}\n\tif *typeR != `` {\n\t\toptions.Type = *typeR\n\t}\n\trecords, err := listRecords(client, accountID, flag.Args()[0], options)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"could not get records:\", err)\n\t}\n\tif *verbose {\n\t\tlog.Println(\"found \", len(records), \" records\")\n\t}\n\tsort.Sort(records)\n\n\terr = FormatZoneRecords(records, *format)\n\tif err != nil {\n\t\tlog.Println(\"error: err\")\n\t}\n}\n\n\/\/ listRecords pages through records\nfunc listRecords(client *dnsimple.Client, accountID, domain string,\n\toptions *dnsimple.ZoneRecordListOptions) (records zoneRecords, err error) {\n\tif options == nil {\n\t\toptions = &dnsimple.ZoneRecordListOptions{}\n\t}\n\tfor p := 1; ; p++ {\n\t\tlistZoneRecordsResponse, err := client.Zones.ListRecords(accountID, domain, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor i := range listZoneRecordsResponse.Data {\n\t\t\trecords = append(records, listZoneRecordsResponse.Data[i])\n\t\t}\n\t\tif options.Page == 0 {\n\t\t\toptions.Page = 2\n\t\t} else {\n\t\t\toptions.Page++\n\t\t}\n\t\tif p >= listZoneRecordsResponse.Pagination.TotalPages {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ FormatZoneRecords takes a slice of dnsimple.ZoneRecord and formats it in the specified format.\nfunc FormatZoneRecords(zones []dnsimple.ZoneRecord, format string) error {\n\tif format == \"json\" {\n\t\terr := json.NewEncoder(os.Stdout).Encode(zones)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\tif format == \"table\" {\n\t\tfmt.Printf(\"+-%-30s-+-%-5s-+-%-7s-+-%-30s-+\\n\", strings.Repeat(\"-\", 30), \"-----\", \"-------\", strings.Repeat(\"-\", 30))\n\t\tfmt.Printf(\"| %-30s | %-5s | %-7s | %-30s |\\n\", \"Name\", \"Type\", \"TTL\", \"Content\")\n\t\tfmt.Printf(\"+-%-30s-+-%-5s-+-%-7s-+-%-30s-+\\n\", strings.Repeat(\"-\", 30), \"-----\", \"-------\", strings.Repeat(\"-\", 30))\n\t}\n\tfor _, zone := range zones {\n\t\tif zone.Name == `` {\n\t\t\tzone.Name = `.`\n\t\t}\n\t\tswitch format {\n\t\tcase \"plain\":\n\t\t\tif *verbose {\n\t\t\t\tfmt.Printf(\"%s %s %s %d (%d) %s %v\\n\", zone.ZoneID, zone.Name, zone.Type, zone.Priority, zone.TTL, zone.Content, zone.UpdatedAt)\n\t\t\t} else if flag.NFlag() > 1 {\n\t\t\t\tfmt.Printf(\"%s %s %s (%d) %s\\n\", zone.ZoneID, zone.Name, zone.Type, zone.TTL, zone.Content)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"%s %s (%d) %s\\n\", zone.Name, zone.Type, zone.TTL, zone.Content)\n\n\t\t\t}\n\t\tcase \"table\":\n\t\t\tfmt.Printf(\"| %-30s | %-5s | %7d | %-30s |\\n\", zone.Name, zone.Type, zone.TTL, zone.Content)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"invalid format %v\", format)\n\t\t}\n\t}\n\tif format == \"table\" {\n\t\tfmt.Printf(\"+-%-30s-+-%-5s-+-%-7s-+-%-30s-+\\n\", strings.Repeat(\"-\", 30), \"-----\", \"-------\", strings.Repeat(\"-\", 30))\n\n\t}\n\treturn nil\n}\n\ntype zoneRecords []dnsimple.ZoneRecord\n\nfunc (z zoneRecords) Len() int { return len(z) }\nfunc (z zoneRecords) Less(i, j int) bool { return z[i].Name < z[j].Name }\nfunc (z zoneRecords) Swap(i, j int) { z[i], z[j] = z[j], z[i] }\n\nvar configFileName = func() string {\n\tif os.Getenv(\"DOMASIMU_CONF\") != \"\" {\n\t\treturn os.Getenv(\"DOMASIMU_CONF\")\n\t}\n\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\treturn filepath.Join(os.Getenv(\"LOCALAPPDATA\"), \"Domasimu\", \"config\")\n\tcase \"darwin\":\n\t\tf := filepath.Join(os.Getenv(\"HOME\"), \"Library\", \"Application Support\", \"Domasimu\", \"config\")\n\t\tfh, err := os.Open(f)\n\t\tif err == nil {\n\t\t\tfh.Close()\n\t\t\treturn f\n\t\t}\n\t}\n\tif os.Getenv(\"XDG_CONFIG_HOME\") != \"\" {\n\t\tf := filepath.Join(os.Getenv(\"XDG_CONFIG_HOME\"), \"domasimu\", \"config\")\n\t\tfh, err := os.Open(f)\n\t\tif err == nil {\n\t\t\tfh.Close()\n\t\t\treturn f\n\t\t}\n\t}\n\tf := filepath.Join(os.Getenv(\"HOME\"), \".config\", \"domasimu\", \"config\")\n\tfh, err := os.Open(f)\n\tif err == nil {\n\t\tfh.Close()\n\t\treturn f\n\t}\n\treturn filepath.Join(os.Getenv(\"HOME\"), \".domasimurc\")\n}()\n\nfunc getCreds() (string, string, error) {\n\tvar config Config\n\t_, err := toml.DecodeFile(configFileName, &config)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\treturn config.User, config.Token, nil\n}\n\n\/\/ Config represents the user and token config for dnsimple.\ntype Config struct {\n\tUser string\n\tToken string\n}\n\nfunc createOrUpdate(client *dnsimple.Client, message string, accountID string) (string, error) {\n\tpieces := strings.Split(message, \" \")\n\tif len(pieces) != 6 {\n\t\treturn \"\", fmt.Errorf(\"expected space seperated domain, name, type, oldvalue, newvalue, ttl\")\n\t}\n\n\tdomain := pieces[0]\n\tchangeRecord := dnsimple.ZoneRecord{\n\t\tName: pieces[1],\n\t\tType: pieces[2],\n\t}\n\toldValue := pieces[3]\n\tnewRecord := changeRecord\n\tnewRecord.Content = pieces[4]\n\tif *value != \"\" {\n\t\tnewRecord.Content = *value\n\t}\n\tttl, err := strconv.Atoi(pieces[5])\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not convert %s to int: %w\", pieces[5], err)\n\t}\n\tnewRecord.TTL = ttl\n\tid, err := getRecordIDByValue(client, domain, oldValue, accountID, &changeRecord)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar respID string\n\tif id == 0 {\n\t\tzoneRecordResponse, err := client.Zones.CreateRecord(accountID, domain, newRecord)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\trespID = strconv.FormatInt(zoneRecordResponse.Data.ID, 10)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t} else {\n\t\tzoneRecordResponse, err := client.Zones.UpdateRecord(accountID, domain, id, newRecord)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\trespID = strconv.FormatInt(zoneRecordResponse.Data.ID, 10)\n\t}\n\n\treturn respID, nil\n}\n\nfunc deleteRecord(client *dnsimple.Client, message, accountID string) (string, error) {\n\tpieces := strings.Split(message, \" \")\n\tif len(pieces) != 4 {\n\t\treturn \"\", fmt.Errorf(\"expected space seperated domain, name, type, value\")\n\t}\n\tdomain := pieces[0]\n\tchangeRecord := dnsimple.ZoneRecord{\n\t\tName: pieces[1],\n\t\tType: pieces[2],\n\t}\n\tvalue := pieces[3]\n\tid, err := getRecordIDByValue(client, domain, value, accountID, &changeRecord)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif id == 0 {\n\t\treturn \"\", fmt.Errorf(\"could not find record\")\n\t}\n\t_, err = client.Zones.DeleteRecord(accountID, domain, id)\n\trespID := strconv.FormatInt(id, 10)\n\n\treturn respID, err\n}\n\nfunc getRecordIDByValue(client *dnsimple.Client, domain, value, accountID string, changeRecord *dnsimple.ZoneRecord) (int64, error) {\n\toptions := &dnsimple.ZoneRecordListOptions{}\n\tif changeRecord.Type != \"\" {\n\t\toptions.Type = changeRecord.Type\n\t}\n\trecordResponse, err := listRecords(client, accountID, domain, options)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tvar id int64\n\tfor _, record := range recordResponse {\n\t\tif record.Name == changeRecord.Name && record.Type == changeRecord.Type && record.Content == value {\n\t\t\tid = record.ID\n\t\t\tbreak\n\t\t}\n\t}\n\treturn id, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 SoundCloud, Rany Keddo. All rights reserved. Use of this\n\/\/ source code is governed by a license that can be found in the LICENSE file.\n\npackage bandit\n\nimport (\n\t\"encoding\/csv\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ NewExperiment loads experiment `name` from the experiments tsv `tsv`.\nfunc NewExperiment(tsv, name string) (*Experiment, error) {\n\tes, err := NewExperiments(tsv)\n\tif err != nil {\n\t\treturn &Experiment{}, err\n\t}\n\n\te, ok := (*es)[name]\n\tif !ok {\n\t\treturn &Experiment{}, fmt.Errorf(\"could not find %s\", name)\n\t}\n\n\treturn e, nil\n}\n\n\/\/ Experiment is a single experiment. Variants are in ascending ordinal\n\/\/ sorting, where ordinals are contiguous and start at 1.\ntype Experiment struct {\n\tName string\n\tBandit Bandit\n\tVariants Variants\n}\n\n\/\/ Select calls SelectArm on the bandit and returns the associated variant\nfunc (e *Experiment) Select() (Variant, error) {\n\tselected := e.Bandit.SelectArm()\n\treturn e.GetVariant(selected)\n}\n\n\/\/ SelectTimestamped selects the appropriate variant given it's\n\/\/ timestampedTag. A timestamped tag is a string in the form\n\/\/ <tag>:<timestamp>. If the duration between <timestamp> and the current time\n\/\/ is smaller than `d`, the given tagged is used to return a variant. If it is\n\/\/ larger, Select() is called instead. If the `timestampedTag` argument is\n\/\/ the blank string, Select() is called instead.\nfunc (e *Experiment) SelectTimestamped(timestampedTag string, ttl time.Duration) (Variant, int64, error) {\n\tif timestampedTag == \"\" {\n\t\tv, err := e.Select()\n\t\treturn v, time.Now().Unix(), err\n\t}\n\n\ttag, ts, err := TimestampedTagToTag(timestampedTag)\n\tif err != nil {\n\t\treturn Variant{}, 0, fmt.Errorf(\"could not decode timestamped tag: %s\", err.Error())\n\t}\n\n\t\/\/ return the given timestamped tag\n\tif ttl > time.Since(time.Unix(ts, 0)) {\n\t\tv, err := e.GetTaggedVariant(tag)\n\t\treturn v, ts, err\n\t}\n\n\t\/\/ return a new selection\n\tv, err := e.Select()\n\treturn v, time.Now().Unix(), err\n}\n\n\/\/ GetVariant selects the appropriate variant given it's 1 indexed ordinal\nfunc (e *Experiment) GetVariant(ordinal int) (Variant, error) {\n\tif l := len(e.Variants); ordinal < 0 || ordinal > l {\n\t\treturn Variant{}, fmt.Errorf(\"ordinal %d not in [1,%d]\", ordinal, l)\n\t}\n\n\treturn e.Variants[ordinal-1], nil\n}\n\n\/\/ GetTaggedVariant selects the appropriate variant given it's tag\nfunc (e *Experiment) GetTaggedVariant(tag string) (Variant, error) {\n\tfor _, variant := range e.Variants {\n\t\tif variant.Tag == tag {\n\t\t\treturn variant, nil\n\t\t}\n\t}\n\n\treturn Variant{}, fmt.Errorf(\"tag '%s' is not in experiment %s\", tag, e.Name)\n}\n\n\/\/ InitDelayedBandit adds a delayed bandit to this experiment.\nfunc (e *Experiment) InitDelayedBandit(snapshot string, poll time.Duration) error {\n\tc := make(chan Counters)\n\tgo func() {\n\t\tt := time.NewTicker(poll)\n\t\tfor _ = range t.C {\n\t\t\tcounters, err := GetSnapshot(snapshot)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"could not get snapshot: %s\", err.Error())\n\t\t\t}\n\n\t\t\tc <- counters\n\t\t}\n\t}()\n\n\tb, _ := NewSoftmax(len(e.Variants), 0.1) \/\/ 0.1 cannot return an error\n\td, err := NewDelayedBandit(b, c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.Bandit = d\n\treturn nil\n}\n\n\/\/ Variant describes endpoints which are mapped onto bandit arms.\ntype Variant struct {\n\tOrdinal int \/\/ 1 indexed arm ordinal\n\tURL string \/\/ the url associated with this variant, for out of band\n\tTag string \/\/ this tag is used throughout the lifecycle of the experiment\n\tDescription string \/\/ freitext\n}\n\n\/\/ Variants is a set of variants sorted by ordinal.\ntype Variants []Variant\n\nfunc (v Variants) Len() int { return len(v) }\nfunc (v Variants) Less(i, j int) bool { return v[i].Ordinal < v[j].Ordinal }\nfunc (v Variants) Swap(i, j int) { v[i], v[j] = v[j], v[i] }\n\n\/\/ NewExperiments reads in a tsv file and converts it to a map of experiments.\nfunc NewExperiments(filename string) (*Experiments, error) {\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn &Experiments{}, fmt.Errorf(\"need a valid input file: %v\", err)\n\t}\n\n\tdefer file.Close()\n\n\treader := csv.NewReader(file)\n\treader.Comma = '\\t'\n\trecords, err := reader.ReadAll()\n\tif err != nil {\n\t\treturn &Experiments{}, fmt.Errorf(\"could not read tsv: %s \", err)\n\t}\n\n\t\/\/ intermediary data structure groups variants\n\ttype experimentVariants map[string]Variants\n\n\tvariants := make(experimentVariants)\n\tfor i, record := range records {\n\t\tif l := len(record); l != 4 {\n\t\t\treturn &Experiments{}, fmt.Errorf(\"record is not %v long: %v\", l, record)\n\t\t}\n\n\t\tordinal, err := strconv.Atoi(record[1])\n\t\tif err != nil {\n\t\t\treturn &Experiments{}, fmt.Errorf(\"invalid ordinal on line %n: %s\", i, err)\n\t\t}\n\n\t\tname := record[0]\n\t\tif words := strings.Fields(name); len(words) != 1 {\n\t\t\treturn &Experiments{}, fmt.Errorf(\"experiment has whitespace: %s\", name)\n\t\t}\n\n\t\tdescription := record[3]\n\t\tvariants[name] = append(variants[name], Variant{\n\t\t\tOrdinal: ordinal,\n\t\t\tURL: record[2],\n\t\t\tTag: fmt.Sprintf(\"%s:%s\", name, record[1]),\n\t\t\tDescription: description,\n\t\t})\n\t}\n\n\t\/\/ sorted experiment variants\n\texperiments := make(Experiments)\n\tfor name, variants := range variants {\n\t\tsort.Sort(variants)\n\t\tb, _ := NewSoftmax(len(variants), 0.1) \/\/ default to softmax.\n\t\texperiments[name] = &Experiment{\n\t\t\tBandit: b,\n\t\t\tName: name,\n\t\t\tVariants: variants,\n\t\t}\n\t}\n\n\t\/\/ fail if ordinals are non-contiguous or do not start with 1\n\tfor name, variants := range variants {\n\t\tfor i := 0; i < len(variants); i++ {\n\t\t\tif ord := variants[i].Ordinal; ord != i+1 {\n\t\t\t\treturn &Experiments{}, fmt.Errorf(\"%s: variant %d noncontiguous\", name, ord)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &experiments, nil\n}\n\n\/\/ Experiments is an index of names to experiment\ntype Experiments map[string]*Experiment\n\n\/\/ GetVariant returns the Experiment and variant pointed to by a string tag.\nfunc (e *Experiments) GetVariant(tag string) (Experiment, Variant, error) {\n\tfor _, experiment := range *e {\n\t\tfor _, variant := range experiment.Variants {\n\t\t\tif variant.Tag == tag {\n\t\t\t\treturn *experiment, variant, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn Experiment{}, Variant{}, fmt.Errorf(\"could not find variant '%s'\", tag)\n}\n\n\/\/ InitDelayedBandit initializes all bandits with delayed Softmax(0.1).\nfunc (e *Experiments) InitDelayedBandit(snapshot string, poll time.Duration) error {\n\tfor _, e := range *e {\n\t\tif err := e.InitDelayedBandit(snapshot, poll); err != nil {\n\t\t\treturn fmt.Errorf(\"delayed bandit setup failed: %s\", err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ TimestampedTagToTag docodes a timestamped tag in the form <tag>:<timestamp> into\n\/\/ a (tag, ts)\nfunc TimestampedTagToTag(timestampedTag string) (string, int64, error) {\n\tsep := strings.LastIndex(timestampedTag, \":\")\n\tif sep == -1 {\n\t\treturn \"\", 0, fmt.Errorf(\"invalid timestampedTag, does not end in :<timestamp>\")\n\t}\n\n\ttag, at := timestampedTag[:sep], timestampedTag[sep+1:]\n\tts, err := strconv.ParseInt(at, 10, 64)\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"invalid ttl: %s\", err.Error())\n\t}\n\n\treturn tag, ts, nil\n}\n<commit_msg>Don't return error on experiment.Select()<commit_after>\/\/ Copyright 2013 SoundCloud, Rany Keddo. All rights reserved. Use of this\n\/\/ source code is governed by a license that can be found in the LICENSE file.\n\npackage bandit\n\nimport (\n\t\"encoding\/csv\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ NewExperiment loads experiment `name` from the experiments tsv `tsv`.\nfunc NewExperiment(tsv, name string) (*Experiment, error) {\n\tes, err := NewExperiments(tsv)\n\tif err != nil {\n\t\treturn &Experiment{}, err\n\t}\n\n\te, ok := (*es)[name]\n\tif !ok {\n\t\treturn &Experiment{}, fmt.Errorf(\"could not find %s\", name)\n\t}\n\n\treturn e, nil\n}\n\n\/\/ Experiment is a single experiment. Variants are in ascending ordinal\n\/\/ sorting, where ordinals are contiguous and start at 1.\ntype Experiment struct {\n\tName string\n\tBandit Bandit\n\tVariants Variants\n}\n\n\/\/ Select calls SelectArm on the bandit and returns the associated variant\nfunc (e *Experiment) Select() Variant {\n\tselected := e.Bandit.SelectArm()\n\tif selected > len(e.Variants) {\n\t\tpanic(\"selected impossible arm\")\n\t}\n\n\tv, _ := e.GetVariant(selected)\n\treturn v\n}\n\n\/\/ SelectTimestamped selects the appropriate variant given it's\n\/\/ timestampedTag. A timestamped tag is a string in the form\n\/\/ <tag>:<timestamp>. If the duration between <timestamp> and the current time\n\/\/ is smaller than `d`, the given tagged is used to return a variant. If it is\n\/\/ larger, Select() is called instead. If the `timestampedTag` argument is\n\/\/ the blank string, Select() is called instead.\nfunc (e *Experiment) SelectTimestamped(timestampedTag string, ttl time.Duration) (Variant, int64, error) {\n\tif timestampedTag == \"\" {\n\t\treturn e.Select(), time.Now().Unix(), nil\n\t}\n\n\ttag, ts, err := TimestampedTagToTag(timestampedTag)\n\tif err != nil {\n\t\treturn Variant{}, 0, fmt.Errorf(\"could not decode timestamped tag: %s\", err.Error())\n\t}\n\n\t\/\/ return the given timestamped tag\n\tif ttl > time.Since(time.Unix(ts, 0)) {\n\t\tv, err := e.GetTaggedVariant(tag)\n\t\treturn v, ts, err\n\t}\n\n\t\/\/ return a new selection\n\treturn e.Select(), time.Now().Unix(), nil\n}\n\n\/\/ GetVariant selects the appropriate variant given it's 1 indexed ordinal\nfunc (e *Experiment) GetVariant(ordinal int) (Variant, error) {\n\tif l := len(e.Variants); ordinal < 0 || ordinal > l {\n\t\treturn Variant{}, fmt.Errorf(\"ordinal %d not in [1,%d]\", ordinal, l)\n\t}\n\n\treturn e.Variants[ordinal-1], nil\n}\n\n\/\/ GetTaggedVariant selects the appropriate variant given it's tag\nfunc (e *Experiment) GetTaggedVariant(tag string) (Variant, error) {\n\tfor _, variant := range e.Variants {\n\t\tif variant.Tag == tag {\n\t\t\treturn variant, nil\n\t\t}\n\t}\n\n\treturn Variant{}, fmt.Errorf(\"tag '%s' is not in experiment %s\", tag, e.Name)\n}\n\n\/\/ InitDelayedBandit adds a delayed bandit to this experiment.\nfunc (e *Experiment) InitDelayedBandit(snapshot string, poll time.Duration) error {\n\tc := make(chan Counters)\n\tgo func() {\n\t\tt := time.NewTicker(poll)\n\t\tfor _ = range t.C {\n\t\t\tcounters, err := GetSnapshot(snapshot)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"could not get snapshot: %s\", err.Error())\n\t\t\t}\n\n\t\t\tc <- counters\n\t\t}\n\t}()\n\n\tb, _ := NewSoftmax(len(e.Variants), 0.1) \/\/ 0.1 cannot return an error\n\td, err := NewDelayedBandit(b, c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.Bandit = d\n\treturn nil\n}\n\n\/\/ Variant describes endpoints which are mapped onto bandit arms.\ntype Variant struct {\n\tOrdinal int \/\/ 1 indexed arm ordinal\n\tURL string \/\/ the url associated with this variant, for out of band\n\tTag string \/\/ this tag is used throughout the lifecycle of the experiment\n\tDescription string \/\/ freitext\n}\n\n\/\/ Variants is a set of variants sorted by ordinal.\ntype Variants []Variant\n\nfunc (v Variants) Len() int { return len(v) }\nfunc (v Variants) Less(i, j int) bool { return v[i].Ordinal < v[j].Ordinal }\nfunc (v Variants) Swap(i, j int) { v[i], v[j] = v[j], v[i] }\n\n\/\/ NewExperiments reads in a tsv file and converts it to a map of experiments.\nfunc NewExperiments(filename string) (*Experiments, error) {\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn &Experiments{}, fmt.Errorf(\"need a valid input file: %v\", err)\n\t}\n\n\tdefer file.Close()\n\n\treader := csv.NewReader(file)\n\treader.Comma = '\\t'\n\trecords, err := reader.ReadAll()\n\tif err != nil {\n\t\treturn &Experiments{}, fmt.Errorf(\"could not read tsv: %s \", err)\n\t}\n\n\t\/\/ intermediary data structure groups variants\n\ttype experimentVariants map[string]Variants\n\n\tvariants := make(experimentVariants)\n\tfor i, record := range records {\n\t\tif l := len(record); l != 4 {\n\t\t\treturn &Experiments{}, fmt.Errorf(\"record is not %v long: %v\", l, record)\n\t\t}\n\n\t\tordinal, err := strconv.Atoi(record[1])\n\t\tif err != nil {\n\t\t\treturn &Experiments{}, fmt.Errorf(\"invalid ordinal on line %n: %s\", i, err)\n\t\t}\n\n\t\tname := record[0]\n\t\tif words := strings.Fields(name); len(words) != 1 {\n\t\t\treturn &Experiments{}, fmt.Errorf(\"experiment has whitespace: %s\", name)\n\t\t}\n\n\t\tdescription := record[3]\n\t\tvariants[name] = append(variants[name], Variant{\n\t\t\tOrdinal: ordinal,\n\t\t\tURL: record[2],\n\t\t\tTag: fmt.Sprintf(\"%s:%s\", name, record[1]),\n\t\t\tDescription: description,\n\t\t})\n\t}\n\n\t\/\/ sorted experiment variants\n\texperiments := make(Experiments)\n\tfor name, variants := range variants {\n\t\tsort.Sort(variants)\n\t\tb, _ := NewSoftmax(len(variants), 0.1) \/\/ default to softmax.\n\t\texperiments[name] = &Experiment{\n\t\t\tBandit: b,\n\t\t\tName: name,\n\t\t\tVariants: variants,\n\t\t}\n\t}\n\n\t\/\/ fail if ordinals are non-contiguous or do not start with 1\n\tfor name, variants := range variants {\n\t\tfor i := 0; i < len(variants); i++ {\n\t\t\tif ord := variants[i].Ordinal; ord != i+1 {\n\t\t\t\treturn &Experiments{}, fmt.Errorf(\"%s: variant %d noncontiguous\", name, ord)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &experiments, nil\n}\n\n\/\/ Experiments is an index of names to experiment\ntype Experiments map[string]*Experiment\n\n\/\/ GetVariant returns the Experiment and variant pointed to by a string tag.\nfunc (e *Experiments) GetVariant(tag string) (Experiment, Variant, error) {\n\tfor _, experiment := range *e {\n\t\tfor _, variant := range experiment.Variants {\n\t\t\tif variant.Tag == tag {\n\t\t\t\treturn *experiment, variant, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn Experiment{}, Variant{}, fmt.Errorf(\"could not find variant '%s'\", tag)\n}\n\n\/\/ InitDelayedBandit initializes all bandits with delayed Softmax(0.1).\nfunc (e *Experiments) InitDelayedBandit(snapshot string, poll time.Duration) error {\n\tfor _, e := range *e {\n\t\tif err := e.InitDelayedBandit(snapshot, poll); err != nil {\n\t\t\treturn fmt.Errorf(\"delayed bandit setup failed: %s\", err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ TimestampedTagToTag docodes a timestamped tag in the form <tag>:<timestamp> into\n\/\/ a (tag, ts)\nfunc TimestampedTagToTag(timestampedTag string) (string, int64, error) {\n\tsep := strings.LastIndex(timestampedTag, \":\")\n\tif sep == -1 {\n\t\treturn \"\", 0, fmt.Errorf(\"invalid timestampedTag, does not end in :<timestamp>\")\n\t}\n\n\ttag, at := timestampedTag[:sep], timestampedTag[sep+1:]\n\tts, err := strconv.ParseInt(at, 10, 64)\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"invalid ttl: %s\", err.Error())\n\t}\n\n\treturn tag, ts, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ The instance package contains a packer.Builder implementation that builds\n\/\/ AMIs for Amazon EC2 backed by instance storage, as opposed to EBS storage.\npackage instance\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/goamz\/ec2\"\n\t\"github.com\/mitchellh\/multistep\"\n\tawscommon \"github.com\/mitchellh\/packer\/builder\/amazon\/common\"\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/ The unique ID for this builder\nconst BuilderId = \"mitchellh.amazon.instance\"\n\n\/\/ Config is the configuration that is chained through the steps and\n\/\/ settable from the template.\ntype Config struct {\n\tcommon.PackerConfig `mapstructure:\",squash\"`\n\tawscommon.AccessConfig `mapstructure:\",squash\"`\n\tawscommon.AMIConfig `mapstructure:\",squash\"`\n\tawscommon.BlockDevices `mapstructure:\",squash\"`\n\tawscommon.RunConfig `mapstructure:\",squash\"`\n\n\tAccountId string `mapstructure:\"account_id\"`\n\tBundleDestination string `mapstructure:\"bundle_destination\"`\n\tBundlePrefix string `mapstructure:\"bundle_prefix\"`\n\tBundleUploadCommand string `mapstructure:\"bundle_upload_command\"`\n\tBundleVolCommand string `mapstructure:\"bundle_vol_command\"`\n\tS3Bucket string `mapstructure:\"s3_bucket\"`\n\tX509CertPath string `mapstructure:\"x509_cert_path\"`\n\tX509KeyPath string `mapstructure:\"x509_key_path\"`\n\tX509UploadPath string `mapstructure:\"x509_upload_path\"`\n\n\ttpl *packer.ConfigTemplate\n}\n\ntype Builder struct {\n\tconfig Config\n\trunner multistep.Runner\n}\n\nfunc (b *Builder) Prepare(raws ...interface{}) error {\n\tmd, err := common.DecodeConfig(&b.config, raws...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb.config.tpl, err = packer.NewConfigTemplate()\n\tif err != nil {\n\t\treturn err\n\t}\n\tb.config.tpl.UserVars = b.config.PackerUserVars\n\tb.config.tpl.Funcs(awscommon.TemplateFuncs)\n\n\tif b.config.BundleDestination == \"\" {\n\t\tb.config.BundleDestination = \"\/tmp\"\n\t}\n\n\tif b.config.BundlePrefix == \"\" {\n\t\tb.config.BundlePrefix = \"image-{{timestamp}}\"\n\t}\n\n\tif b.config.BundleUploadCommand == \"\" {\n\t\tb.config.BundleUploadCommand = \"sudo -n ec2-upload-bundle \" +\n\t\t\t\"-b {{.BucketName}} \" +\n\t\t\t\"-m {{.ManifestPath}} \" +\n\t\t\t\"-a {{.AccessKey}} \" +\n\t\t\t\"-s {{.SecretKey}} \" +\n\t\t\t\"-d {{.BundleDirectory}} \" +\n\t\t\t\"--batch \" +\n\t\t\t\"--retry\"\n\t}\n\n\tif b.config.BundleVolCommand == \"\" {\n\t\tb.config.BundleVolCommand = \"sudo -n ec2-bundle-vol \" +\n\t\t\t\"-k {{.KeyPath}} \" +\n\t\t\t\"-u {{.AccountId}} \" +\n\t\t\t\"-c {{.CertPath}} \" +\n\t\t\t\"-r {{.Architecture}} \" +\n\t\t\t\"-e {{.PrivatePath}}\/* \" +\n\t\t\t\"-d {{.Destination}} \" +\n\t\t\t\"-p {{.Prefix}} \" +\n\t\t\t\"--batch\"\n\t}\n\n\tif b.config.X509UploadPath == \"\" {\n\t\tb.config.X509UploadPath = \"\/tmp\"\n\t}\n\n\t\/\/ Accumulate any errors\n\terrs := common.CheckUnusedConfig(md)\n\terrs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...)\n\terrs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...)\n\terrs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...)\n\n\tvalidates := map[string]*string{\n\t\t\"bundle_upload_command\": &b.config.BundleUploadCommand,\n\t\t\"bundle_vol_command\": &b.config.BundleVolCommand,\n\t}\n\n\tfor n, ptr := range validates {\n\t\tif err := b.config.tpl.Validate(*ptr); err != nil {\n\t\t\terrs = packer.MultiErrorAppend(\n\t\t\t\terrs, fmt.Errorf(\"Error parsing %s: %s\", n, err))\n\t\t}\n\t}\n\n\ttemplates := map[string]*string{\n\t\t\"account_id\": &b.config.AccountId,\n\t\t\"ami_name\": &b.config.AMIName,\n\t\t\"bundle_destination\": &b.config.BundleDestination,\n\t\t\"bundle_prefix\": &b.config.BundlePrefix,\n\t\t\"s3_bucket\": &b.config.S3Bucket,\n\t\t\"x509_cert_path\": &b.config.X509CertPath,\n\t\t\"x509_key_path\": &b.config.X509KeyPath,\n\t\t\"x509_upload_path\": &b.config.X509UploadPath,\n\t}\n\n\tfor n, ptr := range templates {\n\t\tvar err error\n\t\t*ptr, err = b.config.tpl.Process(*ptr, nil)\n\t\tif err != nil {\n\t\t\terrs = packer.MultiErrorAppend(\n\t\t\t\terrs, fmt.Errorf(\"Error processing %s: %s\", n, err))\n\t\t}\n\t}\n\n\tif b.config.AccountId == \"\" {\n\t\terrs = packer.MultiErrorAppend(errs, errors.New(\"account_id is required\"))\n\t} else {\n\t\tb.config.AccountId = strings.Replace(b.config.AccountId, \"-\", \"\", -1)\n\t}\n\n\tif b.config.S3Bucket == \"\" {\n\t\terrs = packer.MultiErrorAppend(errs, errors.New(\"s3_bucket is required\"))\n\t}\n\n\tif b.config.X509CertPath == \"\" {\n\t\terrs = packer.MultiErrorAppend(errs, errors.New(\"x509_cert_path is required\"))\n\t} else if _, err := os.Stat(b.config.X509CertPath); err != nil {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, fmt.Errorf(\"x509_cert_path points to bad file: %s\", err))\n\t}\n\n\tif b.config.X509KeyPath == \"\" {\n\t\terrs = packer.MultiErrorAppend(errs, errors.New(\"x509_key_path is required\"))\n\t} else if _, err := os.Stat(b.config.X509KeyPath); err != nil {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, fmt.Errorf(\"x509_key_path points to bad file: %s\", err))\n\t}\n\n\tif errs != nil && len(errs.Errors) > 0 {\n\t\treturn errs\n\t}\n\n\tlog.Println(common.ScrubConfig(b.config), b.config.AccessKey, b.config.SecretKey)\n\treturn nil\n}\n\nfunc (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {\n\tregion, err := b.config.Region()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tauth, err := b.config.AccessConfig.Auth()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tec2conn := ec2.New(auth, region)\n\n\t\/\/ Setup the state bag and initial state for the steps\n\tstate := new(multistep.BasicStateBag)\n\tstate.Put(\"config\", &b.config)\n\tstate.Put(\"ec2\", ec2conn)\n\tstate.Put(\"hook\", hook)\n\tstate.Put(\"ui\", ui)\n\n\t\/\/ Build the steps\n\tsteps := []multistep.Step{\n\t\t&awscommon.StepKeyPair{\n\t\t\tDebug: b.config.PackerDebug,\n\t\t\tDebugKeyPath: fmt.Sprintf(\"ec2_%s.pem\", b.config.PackerBuildName),\n\t\t\tKeyPairName: b.config.TemporaryKeyPairName,\n\t\t},\n\t\t&awscommon.StepSecurityGroup{\n\t\t\tSecurityGroupId: b.config.SecurityGroupId,\n\t\t\tSSHPort: b.config.SSHPort,\n\t\t\tVpcId: b.config.VpcId,\n\t\t},\n\t\t&awscommon.StepRunSourceInstance{\n\t\t\tDebug: b.config.PackerDebug,\n\t\t\tExpectedRootDevice: \"instance-store\",\n\t\t\tInstanceType: b.config.InstanceType,\n\t\t\tIamInstanceProfile: b.config.IamInstanceProfile,\n\t\t\tUserData: b.config.UserData,\n\t\t\tUserDataFile: b.config.UserDataFile,\n\t\t\tSourceAMI: b.config.SourceAmi,\n\t\t\tSubnetId: b.config.SubnetId,\n\t\t\tBlockDevices: b.config.BlockDevices,\n\t\t},\n\t\t&common.StepConnectSSH{\n\t\t\tSSHAddress: awscommon.SSHAddress(ec2conn, b.config.SSHPort),\n\t\t\tSSHConfig: awscommon.SSHConfig(b.config.SSHUsername),\n\t\t\tSSHWaitTimeout: b.config.SSHTimeout(),\n\t\t},\n\t\t&common.StepProvision{},\n\t\t&StepUploadX509Cert{},\n\t\t&StepBundleVolume{},\n\t\t&StepUploadBundle{},\n\t\t&StepRegisterAMI{},\n\t\t&awscommon.StepAMIRegionCopy{\n\t\t\tRegions: b.config.AMIRegions,\n\t\t},\n\t\t&awscommon.StepModifyAMIAttributes{\n\t\t\tDescription: b.config.AMIDescription,\n\t\t\tUsers: b.config.AMIUsers,\n\t\t\tGroups: b.config.AMIGroups,\n\t\t\tProductCodes: b.config.AMIProductCodes,\n\t\t},\n\t\t&awscommon.StepCreateTags{\n\t\t\tTags: b.config.AMITags,\n\t\t},\n\t}\n\n\t\/\/ Run!\n\tif b.config.PackerDebug {\n\t\tb.runner = &multistep.DebugRunner{\n\t\t\tSteps: steps,\n\t\t\tPauseFn: common.MultistepDebugFn(ui),\n\t\t}\n\t} else {\n\t\tb.runner = &multistep.BasicRunner{Steps: steps}\n\t}\n\n\tb.runner.Run(state)\n\n\t\/\/ If there was an error, return that\n\tif rawErr, ok := state.GetOk(\"error\"); ok {\n\t\treturn nil, rawErr.(error)\n\t}\n\n\t\/\/ If there are no AMIs, then just return\n\tif _, ok := state.GetOk(\"amis\"); !ok {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Build the artifact and return it\n\tartifact := &awscommon.Artifact{\n\t\tAmis: state.Get(\"amis\").(map[string]string),\n\t\tBuilderIdValue: BuilderId,\n\t\tConn: ec2conn,\n\t}\n\n\treturn artifact, nil\n}\n\nfunc (b *Builder) Cancel() {\n\tif b.runner != nil {\n\t\tlog.Println(\"Cancelling the step runner...\")\n\t\tb.runner.Cancel()\n\t}\n}\n<commit_msg>builder\/amazon\/instance: support AZ here too<commit_after>\/\/ The instance package contains a packer.Builder implementation that builds\n\/\/ AMIs for Amazon EC2 backed by instance storage, as opposed to EBS storage.\npackage instance\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/goamz\/ec2\"\n\t\"github.com\/mitchellh\/multistep\"\n\tawscommon \"github.com\/mitchellh\/packer\/builder\/amazon\/common\"\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/ The unique ID for this builder\nconst BuilderId = \"mitchellh.amazon.instance\"\n\n\/\/ Config is the configuration that is chained through the steps and\n\/\/ settable from the template.\ntype Config struct {\n\tcommon.PackerConfig `mapstructure:\",squash\"`\n\tawscommon.AccessConfig `mapstructure:\",squash\"`\n\tawscommon.AMIConfig `mapstructure:\",squash\"`\n\tawscommon.BlockDevices `mapstructure:\",squash\"`\n\tawscommon.RunConfig `mapstructure:\",squash\"`\n\n\tAccountId string `mapstructure:\"account_id\"`\n\tBundleDestination string `mapstructure:\"bundle_destination\"`\n\tBundlePrefix string `mapstructure:\"bundle_prefix\"`\n\tBundleUploadCommand string `mapstructure:\"bundle_upload_command\"`\n\tBundleVolCommand string `mapstructure:\"bundle_vol_command\"`\n\tS3Bucket string `mapstructure:\"s3_bucket\"`\n\tX509CertPath string `mapstructure:\"x509_cert_path\"`\n\tX509KeyPath string `mapstructure:\"x509_key_path\"`\n\tX509UploadPath string `mapstructure:\"x509_upload_path\"`\n\n\ttpl *packer.ConfigTemplate\n}\n\ntype Builder struct {\n\tconfig Config\n\trunner multistep.Runner\n}\n\nfunc (b *Builder) Prepare(raws ...interface{}) error {\n\tmd, err := common.DecodeConfig(&b.config, raws...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb.config.tpl, err = packer.NewConfigTemplate()\n\tif err != nil {\n\t\treturn err\n\t}\n\tb.config.tpl.UserVars = b.config.PackerUserVars\n\tb.config.tpl.Funcs(awscommon.TemplateFuncs)\n\n\tif b.config.BundleDestination == \"\" {\n\t\tb.config.BundleDestination = \"\/tmp\"\n\t}\n\n\tif b.config.BundlePrefix == \"\" {\n\t\tb.config.BundlePrefix = \"image-{{timestamp}}\"\n\t}\n\n\tif b.config.BundleUploadCommand == \"\" {\n\t\tb.config.BundleUploadCommand = \"sudo -n ec2-upload-bundle \" +\n\t\t\t\"-b {{.BucketName}} \" +\n\t\t\t\"-m {{.ManifestPath}} \" +\n\t\t\t\"-a {{.AccessKey}} \" +\n\t\t\t\"-s {{.SecretKey}} \" +\n\t\t\t\"-d {{.BundleDirectory}} \" +\n\t\t\t\"--batch \" +\n\t\t\t\"--retry\"\n\t}\n\n\tif b.config.BundleVolCommand == \"\" {\n\t\tb.config.BundleVolCommand = \"sudo -n ec2-bundle-vol \" +\n\t\t\t\"-k {{.KeyPath}} \" +\n\t\t\t\"-u {{.AccountId}} \" +\n\t\t\t\"-c {{.CertPath}} \" +\n\t\t\t\"-r {{.Architecture}} \" +\n\t\t\t\"-e {{.PrivatePath}}\/* \" +\n\t\t\t\"-d {{.Destination}} \" +\n\t\t\t\"-p {{.Prefix}} \" +\n\t\t\t\"--batch\"\n\t}\n\n\tif b.config.X509UploadPath == \"\" {\n\t\tb.config.X509UploadPath = \"\/tmp\"\n\t}\n\n\t\/\/ Accumulate any errors\n\terrs := common.CheckUnusedConfig(md)\n\terrs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(b.config.tpl)...)\n\terrs = packer.MultiErrorAppend(errs, b.config.AMIConfig.Prepare(b.config.tpl)...)\n\terrs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(b.config.tpl)...)\n\n\tvalidates := map[string]*string{\n\t\t\"bundle_upload_command\": &b.config.BundleUploadCommand,\n\t\t\"bundle_vol_command\": &b.config.BundleVolCommand,\n\t}\n\n\tfor n, ptr := range validates {\n\t\tif err := b.config.tpl.Validate(*ptr); err != nil {\n\t\t\terrs = packer.MultiErrorAppend(\n\t\t\t\terrs, fmt.Errorf(\"Error parsing %s: %s\", n, err))\n\t\t}\n\t}\n\n\ttemplates := map[string]*string{\n\t\t\"account_id\": &b.config.AccountId,\n\t\t\"ami_name\": &b.config.AMIName,\n\t\t\"bundle_destination\": &b.config.BundleDestination,\n\t\t\"bundle_prefix\": &b.config.BundlePrefix,\n\t\t\"s3_bucket\": &b.config.S3Bucket,\n\t\t\"x509_cert_path\": &b.config.X509CertPath,\n\t\t\"x509_key_path\": &b.config.X509KeyPath,\n\t\t\"x509_upload_path\": &b.config.X509UploadPath,\n\t}\n\n\tfor n, ptr := range templates {\n\t\tvar err error\n\t\t*ptr, err = b.config.tpl.Process(*ptr, nil)\n\t\tif err != nil {\n\t\t\terrs = packer.MultiErrorAppend(\n\t\t\t\terrs, fmt.Errorf(\"Error processing %s: %s\", n, err))\n\t\t}\n\t}\n\n\tif b.config.AccountId == \"\" {\n\t\terrs = packer.MultiErrorAppend(errs, errors.New(\"account_id is required\"))\n\t} else {\n\t\tb.config.AccountId = strings.Replace(b.config.AccountId, \"-\", \"\", -1)\n\t}\n\n\tif b.config.S3Bucket == \"\" {\n\t\terrs = packer.MultiErrorAppend(errs, errors.New(\"s3_bucket is required\"))\n\t}\n\n\tif b.config.X509CertPath == \"\" {\n\t\terrs = packer.MultiErrorAppend(errs, errors.New(\"x509_cert_path is required\"))\n\t} else if _, err := os.Stat(b.config.X509CertPath); err != nil {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, fmt.Errorf(\"x509_cert_path points to bad file: %s\", err))\n\t}\n\n\tif b.config.X509KeyPath == \"\" {\n\t\terrs = packer.MultiErrorAppend(errs, errors.New(\"x509_key_path is required\"))\n\t} else if _, err := os.Stat(b.config.X509KeyPath); err != nil {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, fmt.Errorf(\"x509_key_path points to bad file: %s\", err))\n\t}\n\n\tif errs != nil && len(errs.Errors) > 0 {\n\t\treturn errs\n\t}\n\n\tlog.Println(common.ScrubConfig(b.config), b.config.AccessKey, b.config.SecretKey)\n\treturn nil\n}\n\nfunc (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {\n\tregion, err := b.config.Region()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tauth, err := b.config.AccessConfig.Auth()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tec2conn := ec2.New(auth, region)\n\n\t\/\/ Setup the state bag and initial state for the steps\n\tstate := new(multistep.BasicStateBag)\n\tstate.Put(\"config\", &b.config)\n\tstate.Put(\"ec2\", ec2conn)\n\tstate.Put(\"hook\", hook)\n\tstate.Put(\"ui\", ui)\n\n\t\/\/ Build the steps\n\tsteps := []multistep.Step{\n\t\t&awscommon.StepKeyPair{\n\t\t\tDebug: b.config.PackerDebug,\n\t\t\tDebugKeyPath: fmt.Sprintf(\"ec2_%s.pem\", b.config.PackerBuildName),\n\t\t\tKeyPairName: b.config.TemporaryKeyPairName,\n\t\t},\n\t\t&awscommon.StepSecurityGroup{\n\t\t\tSecurityGroupId: b.config.SecurityGroupId,\n\t\t\tSSHPort: b.config.SSHPort,\n\t\t\tVpcId: b.config.VpcId,\n\t\t},\n\t\t&awscommon.StepRunSourceInstance{\n\t\t\tDebug: b.config.PackerDebug,\n\t\t\tExpectedRootDevice: \"instance-store\",\n\t\t\tInstanceType: b.config.InstanceType,\n\t\t\tIamInstanceProfile: b.config.IamInstanceProfile,\n\t\t\tUserData: b.config.UserData,\n\t\t\tUserDataFile: b.config.UserDataFile,\n\t\t\tSourceAMI: b.config.SourceAmi,\n\t\t\tSubnetId: b.config.SubnetId,\n\t\t\tAvailabilityZone: b.config.AvailabilityZone,\n\t\t\tBlockDevices: b.config.BlockDevices,\n\t\t},\n\t\t&common.StepConnectSSH{\n\t\t\tSSHAddress: awscommon.SSHAddress(ec2conn, b.config.SSHPort),\n\t\t\tSSHConfig: awscommon.SSHConfig(b.config.SSHUsername),\n\t\t\tSSHWaitTimeout: b.config.SSHTimeout(),\n\t\t},\n\t\t&common.StepProvision{},\n\t\t&StepUploadX509Cert{},\n\t\t&StepBundleVolume{},\n\t\t&StepUploadBundle{},\n\t\t&StepRegisterAMI{},\n\t\t&awscommon.StepAMIRegionCopy{\n\t\t\tRegions: b.config.AMIRegions,\n\t\t},\n\t\t&awscommon.StepModifyAMIAttributes{\n\t\t\tDescription: b.config.AMIDescription,\n\t\t\tUsers: b.config.AMIUsers,\n\t\t\tGroups: b.config.AMIGroups,\n\t\t\tProductCodes: b.config.AMIProductCodes,\n\t\t},\n\t\t&awscommon.StepCreateTags{\n\t\t\tTags: b.config.AMITags,\n\t\t},\n\t}\n\n\t\/\/ Run!\n\tif b.config.PackerDebug {\n\t\tb.runner = &multistep.DebugRunner{\n\t\t\tSteps: steps,\n\t\t\tPauseFn: common.MultistepDebugFn(ui),\n\t\t}\n\t} else {\n\t\tb.runner = &multistep.BasicRunner{Steps: steps}\n\t}\n\n\tb.runner.Run(state)\n\n\t\/\/ If there was an error, return that\n\tif rawErr, ok := state.GetOk(\"error\"); ok {\n\t\treturn nil, rawErr.(error)\n\t}\n\n\t\/\/ If there are no AMIs, then just return\n\tif _, ok := state.GetOk(\"amis\"); !ok {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Build the artifact and return it\n\tartifact := &awscommon.Artifact{\n\t\tAmis: state.Get(\"amis\").(map[string]string),\n\t\tBuilderIdValue: BuilderId,\n\t\tConn: ec2conn,\n\t}\n\n\treturn artifact, nil\n}\n\nfunc (b *Builder) Cancel() {\n\tif b.runner != nil {\n\t\tlog.Println(\"Cancelling the step runner...\")\n\t\tb.runner.Cancel()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Sergio Correia\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/user\"\n)\n\ntype wmIcon struct {\n\tName string\n\tIcon string\n}\n\ntype wmConfig struct {\n\tSoundDevice string\n\tNetworkInterface string\n\tFont string\n\tWmSocket string\n\tIcons []wmIcon\n\tWeather weatherInfo\n\tColors colorInfo\n\tBar barConfig\n\tDzen dzenConfig\n}\n\nvar (\n\tconfigFile string\n\tconfig wmConfig\n\ticons map[string]string\n)\n\nfunc usage(filename string) {\n\tfmt.Printf(\"Config file '%s' does not seem to exist. Please double check.\\n\", filename)\n\tfmt.Printf(\"Usage: %s [config file]\\n\\n\", app)\n\tfmt.Printf(\"If no config file is specified, %s will try to use '$XDG_CONFIG_HOME\/foobar\/foobar.cfg', if $XDG_CONFIG_HOME is set, or '~\/.config\/foobar\/foobar.cfg', otherwise.\\n\", app)\n\tos.Exit(1)\n}\n\nfunc configDirectory() string {\n\t\/\/ From XDG Base Directory Specification\n\t\/\/\n\t\/\/ $XDG_CONFIG_HOME defines the base directory relative to which user specific\n\t\/\/ configuration files should be stored. If $XDG_CONFIG_HOME is either not set\n\t\/\/ or empty, a default equal to $HOME\/.config should be used.\n\tbaseDir := os.Getenv(\"XDG_CONFIG_HOME\")\n\tif len(baseDir) == 0 {\n\t\tcurrentUser, _ := user.Current()\n\t\tbaseDir = fmt.Sprintf(\"%s\/.config\", currentUser.HomeDir)\n\t}\n\treturn baseDir\n}\n\nfunc defaultConfigFile() string {\n\treturn fmt.Sprintf(\"%s\/%s\/foobar.cfg\", configDirectory(), app)\n}\n\nfunc loadConfig() {\n\tjsonConfig, err := os.Open(configFile)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer jsonConfig.Close()\n\n\tconfig = wmConfig{}\n\tjsonParser := json.NewDecoder(jsonConfig)\n\n\tif err = jsonParser.Decode(&config); err != nil {\n\t\tpanic(err)\n\t}\n\n\tupdateDzenConfig()\n\n\ticons = make(map[string]string)\n\tloadDzenColorFormats()\n\n\tfor i := range config.Icons {\n\t\ticons[config.Icons[i].Name] = config.Icons[i].Icon\n\t}\n\n\tvalidSoundDevice = isValidSoundDevice()\n\n\tif validWeatherAPIKey = isValidWeatherAPIKey(config.Weather.APIKey); validWeatherAPIKey {\n\t\tos.Setenv(\"OWM_API_KEY\", config.Weather.APIKey)\n\t} else {\n\t\tos.Unsetenv(\"OWM_API_KEY\")\n\t}\n\n\tusername = os.Getenv(\"USER\")\n}\n<commit_msg>Improve error handling while reading config file<commit_after>\/\/ Copyright 2017 Sergio Correia\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/user\"\n)\n\ntype wmIcon struct {\n\tName string\n\tIcon string\n}\n\ntype wmConfig struct {\n\tSoundDevice string\n\tNetworkInterface string\n\tFont string\n\tWmSocket string\n\tIcons []wmIcon\n\tWeather weatherInfo\n\tColors colorInfo\n\tBar barConfig\n\tDzen dzenConfig\n}\n\nvar (\n\tconfigFile string\n\tconfig wmConfig\n\ticons map[string]string\n)\n\nfunc usage(filename string) {\n\tfmt.Printf(\"Config file '%s' does not seem to exist. Please double check.\\n\", filename)\n\tfmt.Printf(\"Usage: %s [config file]\\n\\n\", app)\n\tfmt.Printf(\"If no config file is specified, %s will try to use '$XDG_CONFIG_HOME\/foobar\/foobar.cfg', if $XDG_CONFIG_HOME is set, or '~\/.config\/foobar\/foobar.cfg', otherwise.\\n\", app)\n\tos.Exit(1)\n}\n\nfunc configDirectory() string {\n\t\/\/ From XDG Base Directory Specification\n\t\/\/\n\t\/\/ $XDG_CONFIG_HOME defines the base directory relative to which user specific\n\t\/\/ configuration files should be stored. If $XDG_CONFIG_HOME is either not set\n\t\/\/ or empty, a default equal to $HOME\/.config should be used.\n\tbaseDir := os.Getenv(\"XDG_CONFIG_HOME\")\n\tif len(baseDir) == 0 {\n\t\tcurrentUser, _ := user.Current()\n\t\tbaseDir = fmt.Sprintf(\"%s\/.config\", currentUser.HomeDir)\n\t}\n\treturn baseDir\n}\n\nfunc defaultConfigFile() string {\n\treturn fmt.Sprintf(\"%s\/%s\/foobar.cfg\", configDirectory(), app)\n}\n\nfunc loadConfig() {\n\tjsonConfig, err := os.Open(configFile)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to read config file '%s': %s\\nexiting...\\n\", configFile, err)\n\t\tos.Exit(1)\n\t}\n\tdefer jsonConfig.Close()\n\n\tconfig = wmConfig{}\n\tjsonParser := json.NewDecoder(jsonConfig)\n\n\tif err = jsonParser.Decode(&config); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error reading config file '%s': %s\\nexiting...\\n\", configFile, err)\n\t\tos.Exit(2)\n\t}\n\n\tupdateDzenConfig()\n\n\ticons = make(map[string]string)\n\tloadDzenColorFormats()\n\n\tfor i := range config.Icons {\n\t\ticons[config.Icons[i].Name] = config.Icons[i].Icon\n\t}\n\n\tvalidSoundDevice = isValidSoundDevice()\n\n\tif validWeatherAPIKey = isValidWeatherAPIKey(config.Weather.APIKey); validWeatherAPIKey {\n\t\tos.Setenv(\"OWM_API_KEY\", config.Weather.APIKey)\n\t} else {\n\t\tos.Unsetenv(\"OWM_API_KEY\")\n\t}\n\n\tusername = os.Getenv(\"USER\")\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com\/exercism\/cli\/config\"\n\t\"github.com\/exercism\/cli\/workspace\"\n\t\"github.com\/spf13\/pflag\"\n\t\"github.com\/spf13\/viper\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestDownloadWithoutToken(t *testing.T) {\n\tcfg := config.Config{\n\t\tUserViperConfig: viper.New(),\n\t}\n\n\terr := runDownload(cfg, pflag.NewFlagSet(\"fake\", pflag.PanicOnError), []string{})\n\tif assert.Error(t, err) {\n\t\tassert.Regexp(t, \"Welcome to Exercism\", err.Error())\n\t\t\/\/ It uses the default base API url to infer the host\n\t\tassert.Regexp(t, \"exercism.io\/my\/settings\", err.Error())\n\t}\n}\n\nfunc TestDownloadWithoutWorkspace(t *testing.T) {\n\tv := viper.New()\n\tv.Set(\"token\", \"abc123\")\n\tcfg := config.Config{\n\t\tUserViperConfig: v,\n\t}\n\n\terr := runDownload(cfg, pflag.NewFlagSet(\"fake\", pflag.PanicOnError), []string{})\n\tif assert.Error(t, err) {\n\t\tassert.Regexp(t, \"re-run the configure\", err.Error())\n\t}\n}\n\nfunc TestDownloadWithoutBaseURL(t *testing.T) {\n\tv := viper.New()\n\tv.Set(\"token\", \"abc123\")\n\tv.Set(\"workspace\", \"\/home\/whatever\")\n\tcfg := config.Config{\n\t\tUserViperConfig: v,\n\t}\n\n\terr := runDownload(cfg, pflag.NewFlagSet(\"fake\", pflag.PanicOnError), []string{})\n\tif assert.Error(t, err) {\n\t\tassert.Regexp(t, \"re-run the configure\", err.Error())\n\t}\n}\n\nfunc TestDownloadWithoutFlags(t *testing.T) {\n\tv := viper.New()\n\tv.Set(\"token\", \"abc123\")\n\tv.Set(\"workspace\", \"\/home\/username\")\n\tv.Set(\"apibaseurl\", \"http:\/\/example.com\")\n\n\tcfg := config.Config{\n\t\tUserViperConfig: v,\n\t}\n\n\tflags := pflag.NewFlagSet(\"fake\", pflag.PanicOnError)\n\tsetupDownloadFlags(flags)\n\n\terr := runDownload(cfg, flags, []string{})\n\tif assert.Error(t, err) {\n\t\tassert.Regexp(t, \"need an --exercise name or a solution --uuid\", err.Error())\n\t}\n}\n\nfunc TestDownload(t *testing.T) {\n\tco := newCapturedOutput()\n\tco.override()\n\tdefer co.reset()\n\n\ttestCases := []struct {\n\t\trequester bool\n\t\texpectedDir string\n\t\tflags map[string]string\n\t}{\n\t\t{\n\t\t\trequester: true,\n\t\t\texpectedDir: \"\",\n\t\t\tflags: map[string]string{\"exercise\": \"bogus-exercise\"},\n\t\t},\n\t\t{\n\t\t\trequester: true,\n\t\t\texpectedDir: \"\",\n\t\t\tflags: map[string]string{\"uuid\": \"bogus-id\"},\n\t\t},\n\t\t{\n\t\t\trequester: false,\n\t\t\texpectedDir: filepath.Join(\"users\", \"alice\"),\n\t\t\tflags: map[string]string{\"uuid\": \"bogus-id\"},\n\t\t},\n\t\t{\n\t\t\trequester: true,\n\t\t\texpectedDir: filepath.Join(\"teams\", \"bogus-team\"),\n\t\t\tflags: map[string]string{\"exercise\": \"bogus-exercise\", \"track\": \"bogus-track\", \"team\": \"bogus-team\"},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttmpDir, err := ioutil.TempDir(\"\", \"download-cmd\")\n\t\tdefer os.RemoveAll(tmpDir)\n\t\tassert.NoError(t, err)\n\n\t\tts := fakeDownloadServer(strconv.FormatBool(tc.requester), tc.flags[\"team\"])\n\t\tdefer ts.Close()\n\n\t\tv := viper.New()\n\t\tv.Set(\"workspace\", tmpDir)\n\t\tv.Set(\"apibaseurl\", ts.URL)\n\t\tv.Set(\"token\", \"abc123\")\n\n\t\tcfg := config.Config{\n\t\t\tUserViperConfig: v,\n\t\t}\n\t\tflags := pflag.NewFlagSet(\"fake\", pflag.PanicOnError)\n\t\tsetupDownloadFlags(flags)\n\t\tfor name, value := range tc.flags {\n\t\t\tflags.Set(name, value)\n\t\t}\n\n\t\terr = runDownload(cfg, flags, []string{})\n\t\tassert.NoError(t, err)\n\n\t\ttargetDir := filepath.Join(tmpDir, tc.expectedDir)\n\t\tassertDownloadedCorrectFiles(t, targetDir)\n\n\t\tdir := filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\")\n\t\tb, err := ioutil.ReadFile(workspace.NewExerciseFromDir(dir).MetadataFilepath())\n\t\tvar metadata workspace.ExerciseMetadata\n\t\terr = json.Unmarshal(b, &metadata)\n\t\tassert.NoError(t, err)\n\n\t\tassert.Equal(t, \"bogus-track\", metadata.Track)\n\t\tassert.Equal(t, \"bogus-exercise\", metadata.ExerciseSlug)\n\t\tassert.Equal(t, tc.requester, metadata.IsRequester)\n\t}\n}\n\nfunc fakeDownloadServer(requestor, teamSlug string) *httptest.Server {\n\tmux := http.NewServeMux()\n\tserver := httptest.NewServer(mux)\n\n\tmux.HandleFunc(\"\/file-1.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"this is file 1\")\n\t})\n\n\tmux.HandleFunc(\"\/subdir\/file-2.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"this is file 2\")\n\t})\n\n\tmux.HandleFunc(\"\/full\/path\/with\/numeric-suffix\/bogus-track\/bogus-exercise-12345\/subdir\/numeric.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"with numeric suffix\")\n\t})\n\n\tmux.HandleFunc(\"\/special-char-filename#.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"this is a special file\")\n\t})\n\n\tmux.HandleFunc(\"\/\\\\with-leading-backslash.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"with backslash in name\")\n\t})\n\tmux.HandleFunc(\"\/\\\\with\\\\backslashes\\\\in\\\\path.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"with backslash in path\")\n\t})\n\n\tmux.HandleFunc(\"\/with-leading-slash.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"this has a slash\")\n\t})\n\n\tmux.HandleFunc(\"\/file-3.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"\")\n\t})\n\n\tmux.HandleFunc(\"\/solutions\/latest\", func(w http.ResponseWriter, r *http.Request) {\n\t\tteam := \"null\"\n\t\tif teamSlug := r.FormValue(\"team_id\"); teamSlug != \"\" {\n\t\t\tteam = fmt.Sprintf(`{\"name\": \"Bogus Team\", \"slug\": \"%s\"}`, teamSlug)\n\t\t}\n\t\tpayloadBody := fmt.Sprintf(payloadTemplate, requestor, team, server.URL+\"\/\")\n\t\tfmt.Fprint(w, payloadBody)\n\t})\n\tmux.HandleFunc(\"\/solutions\/bogus-id\", func(w http.ResponseWriter, r *http.Request) {\n\t\tpayloadBody := fmt.Sprintf(payloadTemplate, requestor, \"null\", server.URL+\"\/\")\n\t\tfmt.Fprint(w, payloadBody)\n\t})\n\n\treturn server\n}\n\nfunc assertDownloadedCorrectFiles(t *testing.T, targetDir string) {\n\texpectedFiles := []struct {\n\t\tdesc string\n\t\tpath string\n\t\tcontents string\n\t}{\n\t\t{\n\t\t\tdesc: \"a file in the exercise root directory\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"file-1.txt\"),\n\t\t\tcontents: \"this is file 1\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a file in a subdirectory\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"subdir\", \"file-2.txt\"),\n\t\t\tcontents: \"this is file 2\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a path with a numeric suffix\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"subdir\", \"numeric.txt\"),\n\t\t\tcontents: \"with numeric suffix\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a file that requires URL encoding\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"special-char-filename#.txt\"),\n\t\t\tcontents: \"this is a special file\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a file that has a leading slash\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"with-leading-slash.txt\"),\n\t\t\tcontents: \"this has a slash\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a file with a leading backslash\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"with-leading-backslash.txt\"),\n\t\t\tcontents: \"with backslash in name\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a file with backslashes in path\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"with\", \"backslashes\", \"in\", \"path.txt\"),\n\t\t\tcontents: \"with backslash in path\",\n\t\t},\n\t}\n\n\tfor _, file := range expectedFiles {\n\t\tt.Run(file.desc, func(t *testing.T) {\n\t\t\tb, err := ioutil.ReadFile(file.path)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, file.contents, string(b))\n\t\t})\n\t}\n\n\tpath := filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"file-3.txt\")\n\t_, err := os.Lstat(path)\n\tassert.True(t, os.IsNotExist(err), \"It should not write the file if empty.\")\n}\n\nconst payloadTemplate = `\n{\n\t\"solution\": {\n\t\t\"id\": \"bogus-id\",\n\t\t\"user\": {\n\t\t\t\"handle\": \"alice\",\n\t\t\t\"is_requester\": %s\n\t\t},\n\t\t\"team\": %s,\n\t\t\"exercise\": {\n\t\t\t\"id\": \"bogus-exercise\",\n\t\t\t\"instructions_url\": \"http:\/\/example.com\/bogus-exercise\",\n\t\t\t\"auto_approve\": false,\n\t\t\t\"track\": {\n\t\t\t\t\"id\": \"bogus-track\",\n\t\t\t\t\"language\": \"Bogus Language\"\n\t\t\t}\n\t\t},\n\t\t\"file_download_base_url\": \"%s\",\n\t\t\"files\": [\n\t\t\t\"file-1.txt\",\n\t\t\t\"subdir\/file-2.txt\",\n\t\t\t\"special-char-filename#.txt\",\n\t\t\t\"\/with-leading-slash.txt\",\n\t\t\t\"\\\\with-leading-backslash.txt\",\n\t\t\t\"\\\\with\\\\backslashes\\\\in\\\\path.txt\",\n\t\t\t\"file-3.txt\",\n\t\t\t\"\/full\/path\/with\/numeric-suffix\/bogus-track\/bogus-exercise-12345\/subdir\/numeric.txt\"\n\t\t],\n\t\t\"iteration\": {\n\t\t\t\"submitted_at\": \"2017-08-21t10:11:12.130z\"\n\t\t}\n\t}\n}\n`\n<commit_msg>Correct ineffassign<commit_after>package cmd\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com\/exercism\/cli\/config\"\n\t\"github.com\/exercism\/cli\/workspace\"\n\t\"github.com\/spf13\/pflag\"\n\t\"github.com\/spf13\/viper\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestDownloadWithoutToken(t *testing.T) {\n\tcfg := config.Config{\n\t\tUserViperConfig: viper.New(),\n\t}\n\n\terr := runDownload(cfg, pflag.NewFlagSet(\"fake\", pflag.PanicOnError), []string{})\n\tif assert.Error(t, err) {\n\t\tassert.Regexp(t, \"Welcome to Exercism\", err.Error())\n\t\t\/\/ It uses the default base API url to infer the host\n\t\tassert.Regexp(t, \"exercism.io\/my\/settings\", err.Error())\n\t}\n}\n\nfunc TestDownloadWithoutWorkspace(t *testing.T) {\n\tv := viper.New()\n\tv.Set(\"token\", \"abc123\")\n\tcfg := config.Config{\n\t\tUserViperConfig: v,\n\t}\n\n\terr := runDownload(cfg, pflag.NewFlagSet(\"fake\", pflag.PanicOnError), []string{})\n\tif assert.Error(t, err) {\n\t\tassert.Regexp(t, \"re-run the configure\", err.Error())\n\t}\n}\n\nfunc TestDownloadWithoutBaseURL(t *testing.T) {\n\tv := viper.New()\n\tv.Set(\"token\", \"abc123\")\n\tv.Set(\"workspace\", \"\/home\/whatever\")\n\tcfg := config.Config{\n\t\tUserViperConfig: v,\n\t}\n\n\terr := runDownload(cfg, pflag.NewFlagSet(\"fake\", pflag.PanicOnError), []string{})\n\tif assert.Error(t, err) {\n\t\tassert.Regexp(t, \"re-run the configure\", err.Error())\n\t}\n}\n\nfunc TestDownloadWithoutFlags(t *testing.T) {\n\tv := viper.New()\n\tv.Set(\"token\", \"abc123\")\n\tv.Set(\"workspace\", \"\/home\/username\")\n\tv.Set(\"apibaseurl\", \"http:\/\/example.com\")\n\n\tcfg := config.Config{\n\t\tUserViperConfig: v,\n\t}\n\n\tflags := pflag.NewFlagSet(\"fake\", pflag.PanicOnError)\n\tsetupDownloadFlags(flags)\n\n\terr := runDownload(cfg, flags, []string{})\n\tif assert.Error(t, err) {\n\t\tassert.Regexp(t, \"need an --exercise name or a solution --uuid\", err.Error())\n\t}\n}\n\nfunc TestDownload(t *testing.T) {\n\tco := newCapturedOutput()\n\tco.override()\n\tdefer co.reset()\n\n\ttestCases := []struct {\n\t\trequester bool\n\t\texpectedDir string\n\t\tflags map[string]string\n\t}{\n\t\t{\n\t\t\trequester: true,\n\t\t\texpectedDir: \"\",\n\t\t\tflags: map[string]string{\"exercise\": \"bogus-exercise\"},\n\t\t},\n\t\t{\n\t\t\trequester: true,\n\t\t\texpectedDir: \"\",\n\t\t\tflags: map[string]string{\"uuid\": \"bogus-id\"},\n\t\t},\n\t\t{\n\t\t\trequester: false,\n\t\t\texpectedDir: filepath.Join(\"users\", \"alice\"),\n\t\t\tflags: map[string]string{\"uuid\": \"bogus-id\"},\n\t\t},\n\t\t{\n\t\t\trequester: true,\n\t\t\texpectedDir: filepath.Join(\"teams\", \"bogus-team\"),\n\t\t\tflags: map[string]string{\"exercise\": \"bogus-exercise\", \"track\": \"bogus-track\", \"team\": \"bogus-team\"},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttmpDir, err := ioutil.TempDir(\"\", \"download-cmd\")\n\t\tdefer os.RemoveAll(tmpDir)\n\t\tassert.NoError(t, err)\n\n\t\tts := fakeDownloadServer(strconv.FormatBool(tc.requester), tc.flags[\"team\"])\n\t\tdefer ts.Close()\n\n\t\tv := viper.New()\n\t\tv.Set(\"workspace\", tmpDir)\n\t\tv.Set(\"apibaseurl\", ts.URL)\n\t\tv.Set(\"token\", \"abc123\")\n\n\t\tcfg := config.Config{\n\t\t\tUserViperConfig: v,\n\t\t}\n\t\tflags := pflag.NewFlagSet(\"fake\", pflag.PanicOnError)\n\t\tsetupDownloadFlags(flags)\n\t\tfor name, value := range tc.flags {\n\t\t\tflags.Set(name, value)\n\t\t}\n\n\t\terr = runDownload(cfg, flags, []string{})\n\t\tassert.NoError(t, err)\n\n\t\ttargetDir := filepath.Join(tmpDir, tc.expectedDir)\n\t\tassertDownloadedCorrectFiles(t, targetDir)\n\n\t\tdir := filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\")\n\t\tb, err := ioutil.ReadFile(workspace.NewExerciseFromDir(dir).MetadataFilepath())\n\t\tassert.NoError(t, err)\n\t\tvar metadata workspace.ExerciseMetadata\n\t\terr = json.Unmarshal(b, &metadata)\n\t\tassert.NoError(t, err)\n\n\t\tassert.Equal(t, \"bogus-track\", metadata.Track)\n\t\tassert.Equal(t, \"bogus-exercise\", metadata.ExerciseSlug)\n\t\tassert.Equal(t, tc.requester, metadata.IsRequester)\n\t}\n}\n\nfunc fakeDownloadServer(requestor, teamSlug string) *httptest.Server {\n\tmux := http.NewServeMux()\n\tserver := httptest.NewServer(mux)\n\n\tmux.HandleFunc(\"\/file-1.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"this is file 1\")\n\t})\n\n\tmux.HandleFunc(\"\/subdir\/file-2.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"this is file 2\")\n\t})\n\n\tmux.HandleFunc(\"\/full\/path\/with\/numeric-suffix\/bogus-track\/bogus-exercise-12345\/subdir\/numeric.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"with numeric suffix\")\n\t})\n\n\tmux.HandleFunc(\"\/special-char-filename#.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"this is a special file\")\n\t})\n\n\tmux.HandleFunc(\"\/\\\\with-leading-backslash.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"with backslash in name\")\n\t})\n\tmux.HandleFunc(\"\/\\\\with\\\\backslashes\\\\in\\\\path.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"with backslash in path\")\n\t})\n\n\tmux.HandleFunc(\"\/with-leading-slash.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"this has a slash\")\n\t})\n\n\tmux.HandleFunc(\"\/file-3.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"\")\n\t})\n\n\tmux.HandleFunc(\"\/solutions\/latest\", func(w http.ResponseWriter, r *http.Request) {\n\t\tteam := \"null\"\n\t\tif teamSlug := r.FormValue(\"team_id\"); teamSlug != \"\" {\n\t\t\tteam = fmt.Sprintf(`{\"name\": \"Bogus Team\", \"slug\": \"%s\"}`, teamSlug)\n\t\t}\n\t\tpayloadBody := fmt.Sprintf(payloadTemplate, requestor, team, server.URL+\"\/\")\n\t\tfmt.Fprint(w, payloadBody)\n\t})\n\tmux.HandleFunc(\"\/solutions\/bogus-id\", func(w http.ResponseWriter, r *http.Request) {\n\t\tpayloadBody := fmt.Sprintf(payloadTemplate, requestor, \"null\", server.URL+\"\/\")\n\t\tfmt.Fprint(w, payloadBody)\n\t})\n\n\treturn server\n}\n\nfunc assertDownloadedCorrectFiles(t *testing.T, targetDir string) {\n\texpectedFiles := []struct {\n\t\tdesc string\n\t\tpath string\n\t\tcontents string\n\t}{\n\t\t{\n\t\t\tdesc: \"a file in the exercise root directory\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"file-1.txt\"),\n\t\t\tcontents: \"this is file 1\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a file in a subdirectory\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"subdir\", \"file-2.txt\"),\n\t\t\tcontents: \"this is file 2\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a path with a numeric suffix\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"subdir\", \"numeric.txt\"),\n\t\t\tcontents: \"with numeric suffix\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a file that requires URL encoding\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"special-char-filename#.txt\"),\n\t\t\tcontents: \"this is a special file\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a file that has a leading slash\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"with-leading-slash.txt\"),\n\t\t\tcontents: \"this has a slash\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a file with a leading backslash\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"with-leading-backslash.txt\"),\n\t\t\tcontents: \"with backslash in name\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a file with backslashes in path\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"with\", \"backslashes\", \"in\", \"path.txt\"),\n\t\t\tcontents: \"with backslash in path\",\n\t\t},\n\t}\n\n\tfor _, file := range expectedFiles {\n\t\tt.Run(file.desc, func(t *testing.T) {\n\t\t\tb, err := ioutil.ReadFile(file.path)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, file.contents, string(b))\n\t\t})\n\t}\n\n\tpath := filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"file-3.txt\")\n\t_, err := os.Lstat(path)\n\tassert.True(t, os.IsNotExist(err), \"It should not write the file if empty.\")\n}\n\nconst payloadTemplate = `\n{\n\t\"solution\": {\n\t\t\"id\": \"bogus-id\",\n\t\t\"user\": {\n\t\t\t\"handle\": \"alice\",\n\t\t\t\"is_requester\": %s\n\t\t},\n\t\t\"team\": %s,\n\t\t\"exercise\": {\n\t\t\t\"id\": \"bogus-exercise\",\n\t\t\t\"instructions_url\": \"http:\/\/example.com\/bogus-exercise\",\n\t\t\t\"auto_approve\": false,\n\t\t\t\"track\": {\n\t\t\t\t\"id\": \"bogus-track\",\n\t\t\t\t\"language\": \"Bogus Language\"\n\t\t\t}\n\t\t},\n\t\t\"file_download_base_url\": \"%s\",\n\t\t\"files\": [\n\t\t\t\"file-1.txt\",\n\t\t\t\"subdir\/file-2.txt\",\n\t\t\t\"special-char-filename#.txt\",\n\t\t\t\"\/with-leading-slash.txt\",\n\t\t\t\"\\\\with-leading-backslash.txt\",\n\t\t\t\"\\\\with\\\\backslashes\\\\in\\\\path.txt\",\n\t\t\t\"file-3.txt\",\n\t\t\t\"\/full\/path\/with\/numeric-suffix\/bogus-track\/bogus-exercise-12345\/subdir\/numeric.txt\"\n\t\t],\n\t\t\"iteration\": {\n\t\t\t\"submitted_at\": \"2017-08-21t10:11:12.130z\"\n\t\t}\n\t}\n}\n`\n<|endoftext|>"} {"text":"<commit_before>package service\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\n\t\"github.com\/micro\/go-micro\/v2\/client\"\n\t\"github.com\/micro\/go-micro\/v2\/config\/source\"\n\tproto \"github.com\/micro\/go-micro\/v2\/config\/source\/service\/proto\"\n\t\"github.com\/micro\/go-micro\/v2\/errors\"\n\t\"github.com\/micro\/go-micro\/v2\/logger\"\n)\n\nvar (\n\tDefaultName = \"go.micro.config\"\n\tDefaultNamespace = \"global\"\n\tDefaultPath = \"\"\n)\n\ntype service struct {\n\tserviceName string\n\tnamespace string\n\tpath string\n\topts source.Options\n\tclient proto.ConfigService\n}\n\nfunc (m *service) Read() (set *source.ChangeSet, err error) {\n\tclient := proto.NewConfigService(m.serviceName, m.opts.Client)\n\treq, err := client.Read(context.Background(), &proto.ReadRequest{\n\t\tNamespace: m.namespace,\n\t\tPath: m.path,\n\t})\n\tif verr, ok := err.(*errors.Error); ok && verr.Code == http.StatusNotFound {\n\t\treturn &source.ChangeSet{Data: []byte{}}, nil\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn toChangeSet(req.Change.ChangeSet), nil\n}\n\nfunc (m *service) Watch() (w source.Watcher, err error) {\n\tclient := proto.NewConfigService(m.serviceName, m.opts.Client)\n\tstream, err := client.Watch(context.Background(), &proto.WatchRequest{\n\t\tNamespace: m.namespace,\n\t\tPath: m.path,\n\t})\n\tif err != nil {\n\t\tif logger.V(logger.ErrorLevel, logger.DefaultLogger) {\n\t\t\tlogger.Error(\"watch err: \", err)\n\t\t}\n\t\treturn\n\t}\n\treturn newWatcher(stream)\n}\n\n\/\/ Write is unsupported\nfunc (m *service) Write(cs *source.ChangeSet) error {\n\treturn nil\n}\n\nfunc (m *service) String() string {\n\treturn \"service\"\n}\n\nfunc NewSource(opts ...source.Option) source.Source {\n\tvar options source.Options\n\tfor _, o := range opts {\n\t\to(&options)\n\t}\n\n\taddr := DefaultName\n\tnamespace := DefaultNamespace\n\tpath := DefaultPath\n\n\tif options.Context != nil {\n\t\ta, ok := options.Context.Value(serviceNameKey{}).(string)\n\t\tif ok {\n\t\t\taddr = a\n\t\t}\n\n\t\tk, ok := options.Context.Value(namespaceKey{}).(string)\n\t\tif ok {\n\t\t\tnamespace = k\n\t\t}\n\n\t\tp, ok := options.Context.Value(pathKey{}).(string)\n\t\tif ok {\n\t\t\tpath = p\n\t\t}\n\t}\n\n\tif options.Client == nil {\n\t\toptions.Client = client.DefaultClient\n\t}\n\n\ts := &service{\n\t\tserviceName: addr,\n\t\topts: options,\n\t\tnamespace: namespace,\n\t\tpath: path,\n\t}\n\n\treturn s\n}\n<commit_msg>Return nil changeset and not blank<commit_after>package service\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\n\t\"github.com\/micro\/go-micro\/v2\/client\"\n\t\"github.com\/micro\/go-micro\/v2\/config\/source\"\n\tproto \"github.com\/micro\/go-micro\/v2\/config\/source\/service\/proto\"\n\t\"github.com\/micro\/go-micro\/v2\/errors\"\n\t\"github.com\/micro\/go-micro\/v2\/logger\"\n)\n\nvar (\n\tDefaultName = \"go.micro.config\"\n\tDefaultNamespace = \"global\"\n\tDefaultPath = \"\"\n)\n\ntype service struct {\n\tserviceName string\n\tnamespace string\n\tpath string\n\topts source.Options\n\tclient proto.ConfigService\n}\n\nfunc (m *service) Read() (set *source.ChangeSet, err error) {\n\tclient := proto.NewConfigService(m.serviceName, m.opts.Client)\n\treq, err := client.Read(context.Background(), &proto.ReadRequest{\n\t\tNamespace: m.namespace,\n\t\tPath: m.path,\n\t})\n\tif verr, ok := err.(*errors.Error); ok && verr.Code == http.StatusNotFound {\n\t\treturn nil, nil\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn toChangeSet(req.Change.ChangeSet), nil\n}\n\nfunc (m *service) Watch() (w source.Watcher, err error) {\n\tclient := proto.NewConfigService(m.serviceName, m.opts.Client)\n\tstream, err := client.Watch(context.Background(), &proto.WatchRequest{\n\t\tNamespace: m.namespace,\n\t\tPath: m.path,\n\t})\n\tif err != nil {\n\t\tif logger.V(logger.ErrorLevel, logger.DefaultLogger) {\n\t\t\tlogger.Error(\"watch err: \", err)\n\t\t}\n\t\treturn\n\t}\n\treturn newWatcher(stream)\n}\n\n\/\/ Write is unsupported\nfunc (m *service) Write(cs *source.ChangeSet) error {\n\treturn nil\n}\n\nfunc (m *service) String() string {\n\treturn \"service\"\n}\n\nfunc NewSource(opts ...source.Option) source.Source {\n\tvar options source.Options\n\tfor _, o := range opts {\n\t\to(&options)\n\t}\n\n\taddr := DefaultName\n\tnamespace := DefaultNamespace\n\tpath := DefaultPath\n\n\tif options.Context != nil {\n\t\ta, ok := options.Context.Value(serviceNameKey{}).(string)\n\t\tif ok {\n\t\t\taddr = a\n\t\t}\n\n\t\tk, ok := options.Context.Value(namespaceKey{}).(string)\n\t\tif ok {\n\t\t\tnamespace = k\n\t\t}\n\n\t\tp, ok := options.Context.Value(pathKey{}).(string)\n\t\tif ok {\n\t\t\tpath = p\n\t\t}\n\t}\n\n\tif options.Client == nil {\n\t\toptions.Client = client.DefaultClient\n\t}\n\n\ts := &service{\n\t\tserviceName: addr,\n\t\topts: options,\n\t\tnamespace: namespace,\n\t\tpath: path,\n\t}\n\n\treturn s\n}\n<|endoftext|>"} {"text":"<commit_before>package router\n\nimport \"github.com\/sqs\/mux\"\n\nconst (\n\tBuild = \"build\"\n\tBuildDequeueNext = \"build.dequeue-next\"\n\tBuildUpdate = \"build.update\"\n\tBuildLog = \"build.log\"\n\tBuilds = \"builds\"\n\tBuildTasks = \"build.tasks\"\n\tBuildTaskUpdate = \"build.task\"\n\tBuildTasksCreate = \"build.tasks.create\"\n\tBuildTaskLog = \"build.task.log\"\n\n\tOrg = \"org\"\n\tOrgMembers = \"org.members\"\n\tOrgSettings = \"org.settings\"\n\tOrgSettingsUpdate = \"org.settings.update\"\n\n\tUsers = \"users\"\n\tUser = \"user\"\n\tUserOrgs = \"user.orgs\"\n\tUserAuthors = \"user.authors\"\n\tUserClients = \"user.clients\"\n\tUserEmails = \"user.emails\"\n\tUserFromGitHub = \"user.from-github\"\n\tUserRepoContributions = \"user.repo-contributions\"\n\tUserRepoDependencies = \"user.repo-dependencies\"\n\tUserRepoDependents = \"user.repo-dependents\"\n\tUserRefreshProfile = \"user.refresh-profile\"\n\tUserSettings = \"user.settings\"\n\tUserSettingsUpdate = \"user.settings.update\"\n\tUserComputeStats = \"user.compute-stats\"\n\n\tPerson = \"person\"\n\n\tRepoPullRequests = \"repo.pull-requests\"\n\tRepoPullRequest = \"repo.pull-request\"\n\tRepoPullRequestMerge = \"repo.pull-request.merge\"\n\tRepoPullRequestComments = \"repo.pull-request.comments\"\n\tRepoPullRequestCommentsCreate = \"repo.pull-request.comments.create\"\n\tRepoPullRequestCommentsEdit = \"repo.pull-request.comments.edit\"\n\tRepoPullRequestCommentsDelete = \"repo.pull-request.comments.delete\"\n\n\tRepoIssues = \"repo.issues\"\n\tRepoIssue = \"repo.issue\"\n\tRepoIssueComments = \"repo.issue.comments\"\n\tRepoIssueCommentsCreate = \"repo.issue.comments.create\"\n\tRepoIssueCommentsEdit = \"repo.issue.comments.edit\"\n\tRepoIssueCommentsDelete = \"repo.issue.comments.delete\"\n\n\tRepos = \"repos\"\n\tReposCreate = \"repos.create\"\n\tReposGetOrCreate = \"repos.get-or-create\"\n\tRepo = \"repo\"\n\tRepoAuthors = \"repo.authors\"\n\tRepoClients = \"repo.clients\"\n\tRepoDependents = \"repo.dependents\"\n\tRepoDependencies = \"repo.dependencies\"\n\tRepoBadge = \"repo.badge\"\n\tRepoBadges = \"repo.badges\"\n\tRepoCounter = \"repo.counter\"\n\tRepoCounters = \"repo.counters\"\n\tRepoReadme = \"repo.readme\"\n\tRepoBuilds = \"repo.builds\"\n\tRepoBuildsCreate = \"repo.builds.create\"\n\tRepoBuildDataEntry = \"repo.build-data.entry\"\n\tRepoTreeEntry = \"repo.tree.entry\"\n\tRepoRefreshProfile = \"repo.refresh-profile\"\n\tRepoRefreshVCSData = \"repo.refresh-vcs-data\"\n\tRepoComputeStats = \"repo.compute-stats\"\n\n\tRepoSettings = \"repo.settings\"\n\tRepoSettingsUpdate = \"repo.settings.update\"\n\n\tRepoStats = \"repo.stats\"\n\tRepoCombinedStatus = \"repo.combined-status\"\n\n\tRepoBuild = \"repo.build\"\n\n\tRepoCommits = \"repo.commits\"\n\tRepoCommit = \"repo.commit\"\n\tRepoCompareCommits = \"repo.compare-commits\"\n\tRepoTags = \"repo.tags\"\n\tRepoBranches = \"repo.branches\"\n\n\tSearch = \"search\"\n\tSearchComplete = \"search.complete\"\n\n\tSearchSuggestions = \"search.suggestions\"\n\n\tSnippet = \"snippet\"\n\n\tDefs = \"defs\"\n\tDef = \"def\"\n\tDefRefs = \"def.refs\"\n\tDefExamples = \"def.examples\"\n\tDefAuthors = \"def.authors\"\n\tDefClients = \"def.clients\"\n\tDefDependents = \"def.dependents\"\n\tDefVersions = \"def.versions\"\n\n\tDelta = \"delta\"\n\tDeltaDefs = \"delta.defs\"\n\tDeltaDependencies = \"delta.dependencies\"\n\tDeltaFiles = \"delta.files\"\n\tDeltaAffectedAuthors = \"delta.affected-authors\"\n\tDeltaAffectedClients = \"delta.affected-clients\"\n\tDeltaAffectedDependents = \"delta.affected-dependents\"\n\tDeltaReviewers = \"delta.reviewers\"\n\tDeltasIncoming = \"deltas.incoming\"\n\n\tUnit = \"unit\"\n\tUnits = \"units\"\n\n\tMarkdown = \"markdown\"\n\n\tExtGitHubReceiveWebhook = \"ext.github.receive-webhook\"\n\n\t\/\/ Redirects for old routes.\n\tRedirectOldRepoBadgesAndCounters = \"repo.redirect-old-badges-and-counters\"\n)\n\n\/\/ NewAPIRouter creates a new API router with route URL pattern definitions but\n\/\/ no handlers attached to the routes.\n\/\/\n\/\/ It is in a separate package from app so that other packages may use it to\n\/\/ generate URLs without resulting in Go import cycles (and so we can release\n\/\/ the router as open-source to support our client library).\nfunc NewAPIRouter(base *mux.Router) *mux.Router {\n\tif base == nil {\n\t\tbase = mux.NewRouter()\n\t}\n\n\tbase.StrictSlash(true)\n\n\tbase.Path(\"\/builds\").Methods(\"GET\").Name(Builds)\n\tbuilds := base.PathPrefix(\"\/builds\").Subrouter()\n\tbuilds.Path(\"\/next\").Methods(\"POST\").Name(BuildDequeueNext)\n\tbuildPath := \"\/{BID}\"\n\tbuilds.Path(buildPath).Methods(\"GET\").Name(Build)\n\tbuilds.Path(buildPath).Methods(\"PUT\").Name(BuildUpdate)\n\tbuild := builds.PathPrefix(buildPath).Subrouter()\n\tbuild.Path(\"\/log\").Methods(\"GET\").Name(BuildLog)\n\tbuild.Path(\"\/tasks\").Methods(\"GET\").Name(BuildTasks)\n\tbuild.Path(\"\/tasks\").Methods(\"POST\").Name(BuildTasksCreate)\n\tbuild.Path(\"\/tasks\/{TaskID}\").Methods(\"PUT\").Name(BuildTaskUpdate)\n\tbuild.Path(\"\/tasks\/{TaskID}\/log\").Methods(\"GET\").Name(BuildTaskLog)\n\n\tbase.Path(\"\/repos\").Methods(\"GET\").Name(Repos)\n\tbase.Path(\"\/repos\").Methods(\"POST\").Name(ReposCreate)\n\n\tbase.Path(\"\/repos\/github.com\/{owner:[^\/]+}\/{repo:[^\/]+}\/{what:(?:badges|counters)}\/{which}.png\").Methods(\"GET\").Name(RedirectOldRepoBadgesAndCounters)\n\n\trepoRev := base.PathPrefix(`\/repos\/` + RepoRevSpecPattern).PostMatchFunc(FixRepoRevSpecVars).BuildVarsFunc(PrepareRepoRevSpecRouteVars).Subrouter()\n\trepoRev.Path(\"\/.stats\").Methods(\"PUT\").Name(RepoComputeStats)\n\trepoRev.Path(\"\/.stats\").Methods(\"GET\").Name(RepoStats)\n\trepoRev.Path(\"\/.status\").Methods(\"GET\").Name(RepoCombinedStatus)\n\trepoRev.Path(\"\/.authors\").Methods(\"GET\").Name(RepoAuthors)\n\trepoRev.Path(\"\/.readme\").Methods(\"GET\").Name(RepoReadme)\n\trepoRev.Path(\"\/.build\").Methods(\"GET\").Name(RepoBuild)\n\trepoRev.Path(\"\/.dependencies\").Methods(\"GET\").Name(RepoDependencies)\n\trepoRev.PathPrefix(\"\/.build-data\"+TreeEntryPathPattern).PostMatchFunc(FixTreeEntryVars).BuildVarsFunc(PrepareTreeEntryRouteVars).Methods(\"GET\", \"HEAD\", \"PUT\", \"DELETE\").Name(RepoBuildDataEntry)\n\trepoRev.Path(\"\/.badges\/{Badge}.png\").Methods(\"GET\").Name(RepoBadge)\n\n\t\/\/ repo contains routes that are NOT specific to a revision. In these routes, the URL may not contain a revspec after the repo (that is, no \"github.com\/foo\/bar@myrevspec\").\n\trepoPath := `\/repos\/` + RepoSpecPathPattern\n\tbase.Path(repoPath).Methods(\"GET\").Name(Repo)\n\tbase.Path(repoPath).Methods(\"PUT\").Name(ReposGetOrCreate)\n\trepo := base.PathPrefix(repoPath).Subrouter()\n\trepo.Path(\"\/.clients\").Methods(\"GET\").Name(RepoClients)\n\trepo.Path(\"\/.dependents\").Methods(\"GET\").Name(RepoDependents)\n\trepo.Path(\"\/.external-profile\").Methods(\"PUT\").Name(RepoRefreshProfile)\n\trepo.Path(\"\/.vcs-data\").Methods(\"PUT\").Name(RepoRefreshVCSData)\n\trepo.Path(\"\/.settings\").Methods(\"GET\").Name(RepoSettings)\n\trepo.Path(\"\/.settings\").Methods(\"PUT\").Name(RepoSettingsUpdate)\n\trepo.Path(\"\/.commits\").Methods(\"GET\").Name(RepoCommits)\n\trepo.Path(\"\/.commits\/{Rev:\" + PathComponentNoLeadingDot + \"}\/.compare\").Methods(\"GET\").Name(RepoCompareCommits)\n\trepo.Path(\"\/.commits\/{Rev:\" + PathComponentNoLeadingDot + \"}\").Methods(\"GET\").Name(RepoCommit)\n\trepo.Path(\"\/.branches\").Methods(\"GET\").Name(RepoBranches)\n\trepo.Path(\"\/.tags\").Methods(\"GET\").Name(RepoTags)\n\trepo.Path(\"\/.badges\").Methods(\"GET\").Name(RepoBadges)\n\trepo.Path(\"\/.counters\").Methods(\"GET\").Name(RepoCounters)\n\trepo.Path(\"\/.counters\/{Counter}.png\").Methods(\"GET\").Name(RepoCounter)\n\trepo.Path(\"\/.builds\").Methods(\"GET\").Name(RepoBuilds)\n\trepo.Path(\"\/.builds\").Methods(\"POST\").Name(RepoBuildsCreate)\n\n\trepo.Path(\"\/.pulls\").Methods(\"GET\").Name(RepoPullRequests)\n\tpullPath := \"\/.pulls\/{Pull}\"\n\trepo.Path(pullPath).Methods(\"GET\").Name(RepoPullRequest)\n\tpull := repo.PathPrefix(pullPath).Subrouter()\n\tpull.Path(\"\/merge\").Methods(\"PUT\").Name(RepoPullRequestMerge)\n\tpull.Path(\"\/comments\").Methods(\"GET\").Name(RepoPullRequestComments)\n\tpull.Path(\"\/comments\").Methods(\"POST\").Name(RepoPullRequestCommentsCreate)\n\tpull.Path(\"\/comments\/{CommentID}\").Methods(\"PATCH\", \"PUT\").Name(RepoPullRequestCommentsEdit)\n\tpull.Path(\"\/comments\/{CommentID}\").Methods(\"DELETE\").Name(RepoPullRequestCommentsDelete)\n\n\trepo.Path(\"\/.issues\").Methods(\"GET\").Name(RepoIssues)\n\tissuePath := \"\/.issues\/{Issue}\"\n\trepo.Path(issuePath).Methods(\"GET\").Name(RepoIssue)\n\tissue := repo.PathPrefix(issuePath).Subrouter()\n\tissue.Path(\"\/comments\").Methods(\"GET\").Name(RepoIssueComments)\n\tissue.Path(\"\/comments\").Methods(\"POST\").Name(RepoIssueCommentsCreate)\n\tissue.Path(\"\/comments\/{CommentID}\").Methods(\"PATCH\", \"PUT\").Name(RepoIssueCommentsEdit)\n\tissue.Path(\"\/comments\/{CommentID}\").Methods(\"DELETE\").Name(RepoIssueCommentsDelete)\n\n\tdeltaPath := \"\/.deltas\/{Rev:.+}..{DeltaHeadRev:\" + PathComponentNoLeadingDot + \"}\"\n\trepo.Path(deltaPath).Methods(\"GET\").Name(Delta)\n\tdeltas := repo.PathPrefix(deltaPath).Subrouter()\n\tdeltas.Path(\"\/.defs\").Methods(\"GET\").Name(DeltaDefs)\n\tdeltas.Path(\"\/.dependencies\").Methods(\"GET\").Name(DeltaDependencies)\n\tdeltas.Path(\"\/.files\").Methods(\"GET\").Name(DeltaFiles)\n\tdeltas.Path(\"\/.affected-authors\").Methods(\"GET\").Name(DeltaAffectedAuthors)\n\tdeltas.Path(\"\/.affected-clients\").Methods(\"GET\").Name(DeltaAffectedClients)\n\tdeltas.Path(\"\/.affected-dependents\").Methods(\"GET\").Name(DeltaAffectedDependents)\n\tdeltas.Path(\"\/.reviewers\").Methods(\"GET\").Name(DeltaReviewers)\n\n\trepo.Path(\"\/.deltas-incoming\").Methods(\"GET\").Name(DeltasIncoming)\n\n\t\/\/ See router_util\/tree_route.go for an explanation of how we match tree\n\t\/\/ entry routes.\n\trepoRev.Path(\"\/.tree\" + TreeEntryPathPattern).PostMatchFunc(FixTreeEntryVars).BuildVarsFunc(PrepareTreeEntryRouteVars).Methods(\"GET\").Name(RepoTreeEntry)\n\n\tbase.Path(`\/people\/` + PersonSpecPattern).Methods(\"GET\").Name(Person)\n\n\tbase.Path(\"\/users\").Methods(\"GET\").Name(Users)\n\tuserPath := `\/users\/` + UserSpecPattern\n\tbase.Path(userPath).Methods(\"GET\").Name(User)\n\tuser := base.PathPrefix(userPath).Subrouter()\n\tuser.Path(\"\/orgs\").Methods(\"GET\").Name(UserOrgs)\n\tuser.Path(\"\/clients\").Methods(\"GET\").Name(UserClients)\n\tuser.Path(\"\/authors\").Methods(\"GET\").Name(UserAuthors)\n\tuser.Path(\"\/emails\").Methods(\"GET\").Name(UserEmails)\n\tuser.Path(\"\/repo-contributions\").Methods(\"GET\").Name(UserRepoContributions)\n\tuser.Path(\"\/repo-dependencies\").Methods(\"GET\").Name(UserRepoDependencies)\n\tuser.Path(\"\/repo-dependents\").Methods(\"GET\").Name(UserRepoDependents)\n\tuser.Path(\"\/external-profile\").Methods(\"PUT\").Name(UserRefreshProfile)\n\tuser.Path(\"\/stats\").Methods(\"PUT\").Name(UserComputeStats)\n\tuser.Path(\"\/settings\").Methods(\"GET\").Name(UserSettings)\n\tuser.Path(\"\/settings\").Methods(\"PUT\").Name(UserSettingsUpdate)\n\tbase.Path(\"\/external-users\/github\/{GitHubUserSpec}\").Methods(\"GET\").Name(UserFromGitHub)\n\n\torgPath := \"\/orgs\/{OrgSpec}\"\n\tbase.Path(orgPath).Methods(\"GET\").Name(Org)\n\torg := base.PathPrefix(orgPath).Subrouter()\n\torg.Path(\"\/settings\").Methods(\"GET\").Name(OrgSettings)\n\torg.Path(\"\/settings\").Methods(\"PUT\").Name(OrgSettingsUpdate)\n\torg.Path(\"\/members\").Methods(\"GET\").Name(OrgMembers)\n\n\tbase.Path(\"\/search\").Methods(\"GET\").Name(Search)\n\tbase.Path(\"\/search\/complete\").Methods(\"GET\").Name(SearchComplete)\n\tbase.Path(\"\/search\/suggestions\").Methods(\"GET\").Name(SearchSuggestions)\n\n\tbase.Path(\"\/snippet\").Methods(\"GET\", \"POST\", \"ORIGIN\").Name(Snippet)\n\n\tbase.Path(\"\/.defs\").Methods(\"GET\").Name(Defs)\n\n\t\/\/ See router_util\/def_route.go for an explanation of how we match def\n\t\/\/ routes.\n\tdefPath := `\/.defs\/` + DefPathPattern\n\trepoRev.Path(defPath).Methods(\"GET\").PostMatchFunc(FixDefUnitVars).BuildVarsFunc(PrepareDefRouteVars).Name(Def)\n\tdef := repoRev.PathPrefix(defPath).PostMatchFunc(FixDefUnitVars).BuildVarsFunc(PrepareDefRouteVars).Subrouter()\n\tdef.Path(\"\/.refs\").Methods(\"GET\").Name(DefRefs)\n\tdef.Path(\"\/.examples\").Methods(\"GET\").Name(DefExamples)\n\tdef.Path(\"\/.authors\").Methods(\"GET\").Name(DefAuthors)\n\tdef.Path(\"\/.clients\").Methods(\"GET\").Name(DefClients)\n\tdef.Path(\"\/.dependents\").Methods(\"GET\").Name(DefDependents)\n\tdef.Path(\"\/.versions\").Methods(\"GET\").Name(DefVersions)\n\n\tbase.Path(\"\/.units\").Methods(\"GET\").Name(Units)\n\tunitPath := `\/.units\/{UnitType}\/{Unit:.*}`\n\trepoRev.Path(unitPath).Methods(\"GET\").Name(Unit)\n\n\tbase.Path(\"\/markdown\").Methods(\"POST\").Name(Markdown)\n\n\tbase.Path(\"\/ext\/github\/webhook\").Methods(\"POST\").Name(ExtGitHubReceiveWebhook)\n\n\tif ExtraConfig != nil {\n\t\tExtraConfig(base, user)\n\t}\n\n\treturn base\n}\n\n\/\/ ExtraConfig, if non-nil, is called by NewAPIRouter with the\n\/\/ *mux.Router after setting up routes in this package and before\n\/\/ returning it. It can be used by external packages that use this API\n\/\/ router and want to add additional routes to it.\nvar ExtraConfig func(base, user *mux.Router)\n<commit_msg>support multiple badge formats<commit_after>package router\n\nimport \"github.com\/sqs\/mux\"\n\nconst (\n\tBuild = \"build\"\n\tBuildDequeueNext = \"build.dequeue-next\"\n\tBuildUpdate = \"build.update\"\n\tBuildLog = \"build.log\"\n\tBuilds = \"builds\"\n\tBuildTasks = \"build.tasks\"\n\tBuildTaskUpdate = \"build.task\"\n\tBuildTasksCreate = \"build.tasks.create\"\n\tBuildTaskLog = \"build.task.log\"\n\n\tOrg = \"org\"\n\tOrgMembers = \"org.members\"\n\tOrgSettings = \"org.settings\"\n\tOrgSettingsUpdate = \"org.settings.update\"\n\n\tUsers = \"users\"\n\tUser = \"user\"\n\tUserOrgs = \"user.orgs\"\n\tUserAuthors = \"user.authors\"\n\tUserClients = \"user.clients\"\n\tUserEmails = \"user.emails\"\n\tUserFromGitHub = \"user.from-github\"\n\tUserRepoContributions = \"user.repo-contributions\"\n\tUserRepoDependencies = \"user.repo-dependencies\"\n\tUserRepoDependents = \"user.repo-dependents\"\n\tUserRefreshProfile = \"user.refresh-profile\"\n\tUserSettings = \"user.settings\"\n\tUserSettingsUpdate = \"user.settings.update\"\n\tUserComputeStats = \"user.compute-stats\"\n\n\tPerson = \"person\"\n\n\tRepoPullRequests = \"repo.pull-requests\"\n\tRepoPullRequest = \"repo.pull-request\"\n\tRepoPullRequestMerge = \"repo.pull-request.merge\"\n\tRepoPullRequestComments = \"repo.pull-request.comments\"\n\tRepoPullRequestCommentsCreate = \"repo.pull-request.comments.create\"\n\tRepoPullRequestCommentsEdit = \"repo.pull-request.comments.edit\"\n\tRepoPullRequestCommentsDelete = \"repo.pull-request.comments.delete\"\n\n\tRepoIssues = \"repo.issues\"\n\tRepoIssue = \"repo.issue\"\n\tRepoIssueComments = \"repo.issue.comments\"\n\tRepoIssueCommentsCreate = \"repo.issue.comments.create\"\n\tRepoIssueCommentsEdit = \"repo.issue.comments.edit\"\n\tRepoIssueCommentsDelete = \"repo.issue.comments.delete\"\n\n\tRepos = \"repos\"\n\tReposCreate = \"repos.create\"\n\tReposGetOrCreate = \"repos.get-or-create\"\n\tRepo = \"repo\"\n\tRepoAuthors = \"repo.authors\"\n\tRepoClients = \"repo.clients\"\n\tRepoDependents = \"repo.dependents\"\n\tRepoDependencies = \"repo.dependencies\"\n\tRepoBadge = \"repo.badge\"\n\tRepoBadges = \"repo.badges\"\n\tRepoCounter = \"repo.counter\"\n\tRepoCounters = \"repo.counters\"\n\tRepoReadme = \"repo.readme\"\n\tRepoBuilds = \"repo.builds\"\n\tRepoBuildsCreate = \"repo.builds.create\"\n\tRepoBuildDataEntry = \"repo.build-data.entry\"\n\tRepoTreeEntry = \"repo.tree.entry\"\n\tRepoRefreshProfile = \"repo.refresh-profile\"\n\tRepoRefreshVCSData = \"repo.refresh-vcs-data\"\n\tRepoComputeStats = \"repo.compute-stats\"\n\n\tRepoSettings = \"repo.settings\"\n\tRepoSettingsUpdate = \"repo.settings.update\"\n\n\tRepoStats = \"repo.stats\"\n\tRepoCombinedStatus = \"repo.combined-status\"\n\n\tRepoBuild = \"repo.build\"\n\n\tRepoCommits = \"repo.commits\"\n\tRepoCommit = \"repo.commit\"\n\tRepoCompareCommits = \"repo.compare-commits\"\n\tRepoTags = \"repo.tags\"\n\tRepoBranches = \"repo.branches\"\n\n\tSearch = \"search\"\n\tSearchComplete = \"search.complete\"\n\n\tSearchSuggestions = \"search.suggestions\"\n\n\tSnippet = \"snippet\"\n\n\tDefs = \"defs\"\n\tDef = \"def\"\n\tDefRefs = \"def.refs\"\n\tDefExamples = \"def.examples\"\n\tDefAuthors = \"def.authors\"\n\tDefClients = \"def.clients\"\n\tDefDependents = \"def.dependents\"\n\tDefVersions = \"def.versions\"\n\n\tDelta = \"delta\"\n\tDeltaDefs = \"delta.defs\"\n\tDeltaDependencies = \"delta.dependencies\"\n\tDeltaFiles = \"delta.files\"\n\tDeltaAffectedAuthors = \"delta.affected-authors\"\n\tDeltaAffectedClients = \"delta.affected-clients\"\n\tDeltaAffectedDependents = \"delta.affected-dependents\"\n\tDeltaReviewers = \"delta.reviewers\"\n\tDeltasIncoming = \"deltas.incoming\"\n\n\tUnit = \"unit\"\n\tUnits = \"units\"\n\n\tMarkdown = \"markdown\"\n\n\tExtGitHubReceiveWebhook = \"ext.github.receive-webhook\"\n\n\t\/\/ Redirects for old routes.\n\tRedirectOldRepoBadgesAndCounters = \"repo.redirect-old-badges-and-counters\"\n)\n\n\/\/ NewAPIRouter creates a new API router with route URL pattern definitions but\n\/\/ no handlers attached to the routes.\n\/\/\n\/\/ It is in a separate package from app so that other packages may use it to\n\/\/ generate URLs without resulting in Go import cycles (and so we can release\n\/\/ the router as open-source to support our client library).\nfunc NewAPIRouter(base *mux.Router) *mux.Router {\n\tif base == nil {\n\t\tbase = mux.NewRouter()\n\t}\n\n\tbase.StrictSlash(true)\n\n\tbase.Path(\"\/builds\").Methods(\"GET\").Name(Builds)\n\tbuilds := base.PathPrefix(\"\/builds\").Subrouter()\n\tbuilds.Path(\"\/next\").Methods(\"POST\").Name(BuildDequeueNext)\n\tbuildPath := \"\/{BID}\"\n\tbuilds.Path(buildPath).Methods(\"GET\").Name(Build)\n\tbuilds.Path(buildPath).Methods(\"PUT\").Name(BuildUpdate)\n\tbuild := builds.PathPrefix(buildPath).Subrouter()\n\tbuild.Path(\"\/log\").Methods(\"GET\").Name(BuildLog)\n\tbuild.Path(\"\/tasks\").Methods(\"GET\").Name(BuildTasks)\n\tbuild.Path(\"\/tasks\").Methods(\"POST\").Name(BuildTasksCreate)\n\tbuild.Path(\"\/tasks\/{TaskID}\").Methods(\"PUT\").Name(BuildTaskUpdate)\n\tbuild.Path(\"\/tasks\/{TaskID}\/log\").Methods(\"GET\").Name(BuildTaskLog)\n\n\tbase.Path(\"\/repos\").Methods(\"GET\").Name(Repos)\n\tbase.Path(\"\/repos\").Methods(\"POST\").Name(ReposCreate)\n\n\tbase.Path(\"\/repos\/github.com\/{owner:[^\/]+}\/{repo:[^\/]+}\/{what:(?:badges|counters)}\/{which}.{Format}\").Methods(\"GET\").Name(RedirectOldRepoBadgesAndCounters)\n\n\trepoRev := base.PathPrefix(`\/repos\/` + RepoRevSpecPattern).PostMatchFunc(FixRepoRevSpecVars).BuildVarsFunc(PrepareRepoRevSpecRouteVars).Subrouter()\n\trepoRev.Path(\"\/.stats\").Methods(\"PUT\").Name(RepoComputeStats)\n\trepoRev.Path(\"\/.stats\").Methods(\"GET\").Name(RepoStats)\n\trepoRev.Path(\"\/.status\").Methods(\"GET\").Name(RepoCombinedStatus)\n\trepoRev.Path(\"\/.authors\").Methods(\"GET\").Name(RepoAuthors)\n\trepoRev.Path(\"\/.readme\").Methods(\"GET\").Name(RepoReadme)\n\trepoRev.Path(\"\/.build\").Methods(\"GET\").Name(RepoBuild)\n\trepoRev.Path(\"\/.dependencies\").Methods(\"GET\").Name(RepoDependencies)\n\trepoRev.PathPrefix(\"\/.build-data\"+TreeEntryPathPattern).PostMatchFunc(FixTreeEntryVars).BuildVarsFunc(PrepareTreeEntryRouteVars).Methods(\"GET\", \"HEAD\", \"PUT\", \"DELETE\").Name(RepoBuildDataEntry)\n\trepoRev.Path(\"\/.badges\/{Badge}.{Format}\").Methods(\"GET\").Name(RepoBadge)\n\n\t\/\/ repo contains routes that are NOT specific to a revision. In these routes, the URL may not contain a revspec after the repo (that is, no \"github.com\/foo\/bar@myrevspec\").\n\trepoPath := `\/repos\/` + RepoSpecPathPattern\n\tbase.Path(repoPath).Methods(\"GET\").Name(Repo)\n\tbase.Path(repoPath).Methods(\"PUT\").Name(ReposGetOrCreate)\n\trepo := base.PathPrefix(repoPath).Subrouter()\n\trepo.Path(\"\/.clients\").Methods(\"GET\").Name(RepoClients)\n\trepo.Path(\"\/.dependents\").Methods(\"GET\").Name(RepoDependents)\n\trepo.Path(\"\/.external-profile\").Methods(\"PUT\").Name(RepoRefreshProfile)\n\trepo.Path(\"\/.vcs-data\").Methods(\"PUT\").Name(RepoRefreshVCSData)\n\trepo.Path(\"\/.settings\").Methods(\"GET\").Name(RepoSettings)\n\trepo.Path(\"\/.settings\").Methods(\"PUT\").Name(RepoSettingsUpdate)\n\trepo.Path(\"\/.commits\").Methods(\"GET\").Name(RepoCommits)\n\trepo.Path(\"\/.commits\/{Rev:\" + PathComponentNoLeadingDot + \"}\/.compare\").Methods(\"GET\").Name(RepoCompareCommits)\n\trepo.Path(\"\/.commits\/{Rev:\" + PathComponentNoLeadingDot + \"}\").Methods(\"GET\").Name(RepoCommit)\n\trepo.Path(\"\/.branches\").Methods(\"GET\").Name(RepoBranches)\n\trepo.Path(\"\/.tags\").Methods(\"GET\").Name(RepoTags)\n\trepo.Path(\"\/.badges\").Methods(\"GET\").Name(RepoBadges)\n\trepo.Path(\"\/.counters\").Methods(\"GET\").Name(RepoCounters)\n\trepo.Path(\"\/.counters\/{Counter}.{Format}\").Methods(\"GET\").Name(RepoCounter)\n\trepo.Path(\"\/.builds\").Methods(\"GET\").Name(RepoBuilds)\n\trepo.Path(\"\/.builds\").Methods(\"POST\").Name(RepoBuildsCreate)\n\n\trepo.Path(\"\/.pulls\").Methods(\"GET\").Name(RepoPullRequests)\n\tpullPath := \"\/.pulls\/{Pull}\"\n\trepo.Path(pullPath).Methods(\"GET\").Name(RepoPullRequest)\n\tpull := repo.PathPrefix(pullPath).Subrouter()\n\tpull.Path(\"\/merge\").Methods(\"PUT\").Name(RepoPullRequestMerge)\n\tpull.Path(\"\/comments\").Methods(\"GET\").Name(RepoPullRequestComments)\n\tpull.Path(\"\/comments\").Methods(\"POST\").Name(RepoPullRequestCommentsCreate)\n\tpull.Path(\"\/comments\/{CommentID}\").Methods(\"PATCH\", \"PUT\").Name(RepoPullRequestCommentsEdit)\n\tpull.Path(\"\/comments\/{CommentID}\").Methods(\"DELETE\").Name(RepoPullRequestCommentsDelete)\n\n\trepo.Path(\"\/.issues\").Methods(\"GET\").Name(RepoIssues)\n\tissuePath := \"\/.issues\/{Issue}\"\n\trepo.Path(issuePath).Methods(\"GET\").Name(RepoIssue)\n\tissue := repo.PathPrefix(issuePath).Subrouter()\n\tissue.Path(\"\/comments\").Methods(\"GET\").Name(RepoIssueComments)\n\tissue.Path(\"\/comments\").Methods(\"POST\").Name(RepoIssueCommentsCreate)\n\tissue.Path(\"\/comments\/{CommentID}\").Methods(\"PATCH\", \"PUT\").Name(RepoIssueCommentsEdit)\n\tissue.Path(\"\/comments\/{CommentID}\").Methods(\"DELETE\").Name(RepoIssueCommentsDelete)\n\n\tdeltaPath := \"\/.deltas\/{Rev:.+}..{DeltaHeadRev:\" + PathComponentNoLeadingDot + \"}\"\n\trepo.Path(deltaPath).Methods(\"GET\").Name(Delta)\n\tdeltas := repo.PathPrefix(deltaPath).Subrouter()\n\tdeltas.Path(\"\/.defs\").Methods(\"GET\").Name(DeltaDefs)\n\tdeltas.Path(\"\/.dependencies\").Methods(\"GET\").Name(DeltaDependencies)\n\tdeltas.Path(\"\/.files\").Methods(\"GET\").Name(DeltaFiles)\n\tdeltas.Path(\"\/.affected-authors\").Methods(\"GET\").Name(DeltaAffectedAuthors)\n\tdeltas.Path(\"\/.affected-clients\").Methods(\"GET\").Name(DeltaAffectedClients)\n\tdeltas.Path(\"\/.affected-dependents\").Methods(\"GET\").Name(DeltaAffectedDependents)\n\tdeltas.Path(\"\/.reviewers\").Methods(\"GET\").Name(DeltaReviewers)\n\n\trepo.Path(\"\/.deltas-incoming\").Methods(\"GET\").Name(DeltasIncoming)\n\n\t\/\/ See router_util\/tree_route.go for an explanation of how we match tree\n\t\/\/ entry routes.\n\trepoRev.Path(\"\/.tree\" + TreeEntryPathPattern).PostMatchFunc(FixTreeEntryVars).BuildVarsFunc(PrepareTreeEntryRouteVars).Methods(\"GET\").Name(RepoTreeEntry)\n\n\tbase.Path(`\/people\/` + PersonSpecPattern).Methods(\"GET\").Name(Person)\n\n\tbase.Path(\"\/users\").Methods(\"GET\").Name(Users)\n\tuserPath := `\/users\/` + UserSpecPattern\n\tbase.Path(userPath).Methods(\"GET\").Name(User)\n\tuser := base.PathPrefix(userPath).Subrouter()\n\tuser.Path(\"\/orgs\").Methods(\"GET\").Name(UserOrgs)\n\tuser.Path(\"\/clients\").Methods(\"GET\").Name(UserClients)\n\tuser.Path(\"\/authors\").Methods(\"GET\").Name(UserAuthors)\n\tuser.Path(\"\/emails\").Methods(\"GET\").Name(UserEmails)\n\tuser.Path(\"\/repo-contributions\").Methods(\"GET\").Name(UserRepoContributions)\n\tuser.Path(\"\/repo-dependencies\").Methods(\"GET\").Name(UserRepoDependencies)\n\tuser.Path(\"\/repo-dependents\").Methods(\"GET\").Name(UserRepoDependents)\n\tuser.Path(\"\/external-profile\").Methods(\"PUT\").Name(UserRefreshProfile)\n\tuser.Path(\"\/stats\").Methods(\"PUT\").Name(UserComputeStats)\n\tuser.Path(\"\/settings\").Methods(\"GET\").Name(UserSettings)\n\tuser.Path(\"\/settings\").Methods(\"PUT\").Name(UserSettingsUpdate)\n\tbase.Path(\"\/external-users\/github\/{GitHubUserSpec}\").Methods(\"GET\").Name(UserFromGitHub)\n\n\torgPath := \"\/orgs\/{OrgSpec}\"\n\tbase.Path(orgPath).Methods(\"GET\").Name(Org)\n\torg := base.PathPrefix(orgPath).Subrouter()\n\torg.Path(\"\/settings\").Methods(\"GET\").Name(OrgSettings)\n\torg.Path(\"\/settings\").Methods(\"PUT\").Name(OrgSettingsUpdate)\n\torg.Path(\"\/members\").Methods(\"GET\").Name(OrgMembers)\n\n\tbase.Path(\"\/search\").Methods(\"GET\").Name(Search)\n\tbase.Path(\"\/search\/complete\").Methods(\"GET\").Name(SearchComplete)\n\tbase.Path(\"\/search\/suggestions\").Methods(\"GET\").Name(SearchSuggestions)\n\n\tbase.Path(\"\/snippet\").Methods(\"GET\", \"POST\", \"ORIGIN\").Name(Snippet)\n\n\tbase.Path(\"\/.defs\").Methods(\"GET\").Name(Defs)\n\n\t\/\/ See router_util\/def_route.go for an explanation of how we match def\n\t\/\/ routes.\n\tdefPath := `\/.defs\/` + DefPathPattern\n\trepoRev.Path(defPath).Methods(\"GET\").PostMatchFunc(FixDefUnitVars).BuildVarsFunc(PrepareDefRouteVars).Name(Def)\n\tdef := repoRev.PathPrefix(defPath).PostMatchFunc(FixDefUnitVars).BuildVarsFunc(PrepareDefRouteVars).Subrouter()\n\tdef.Path(\"\/.refs\").Methods(\"GET\").Name(DefRefs)\n\tdef.Path(\"\/.examples\").Methods(\"GET\").Name(DefExamples)\n\tdef.Path(\"\/.authors\").Methods(\"GET\").Name(DefAuthors)\n\tdef.Path(\"\/.clients\").Methods(\"GET\").Name(DefClients)\n\tdef.Path(\"\/.dependents\").Methods(\"GET\").Name(DefDependents)\n\tdef.Path(\"\/.versions\").Methods(\"GET\").Name(DefVersions)\n\n\tbase.Path(\"\/.units\").Methods(\"GET\").Name(Units)\n\tunitPath := `\/.units\/{UnitType}\/{Unit:.*}`\n\trepoRev.Path(unitPath).Methods(\"GET\").Name(Unit)\n\n\tbase.Path(\"\/markdown\").Methods(\"POST\").Name(Markdown)\n\n\tbase.Path(\"\/ext\/github\/webhook\").Methods(\"POST\").Name(ExtGitHubReceiveWebhook)\n\n\tif ExtraConfig != nil {\n\t\tExtraConfig(base, user)\n\t}\n\n\treturn base\n}\n\n\/\/ ExtraConfig, if non-nil, is called by NewAPIRouter with the\n\/\/ *mux.Router after setting up routes in this package and before\n\/\/ returning it. It can be used by external packages that use this API\n\/\/ router and want to add additional routes to it.\nvar ExtraConfig func(base, user *mux.Router)\n<|endoftext|>"} {"text":"<commit_before>package example\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"github.com\/golang\/protobuf\/jsonpb\"\n\t\"github.com\/golang\/protobuf\/proto\"\n)\n\ntype GreeterHandler struct {\n\tsrv GreeterServer\n}\n\nfunc NewGreeterHandler(srv GreeterServer) *GreeterHandler {\n\treturn &GreeterHandler{\n\t\tsrv: srv,\n\t}\n}\n\nfunc (h *GreeterHandler) SayHello(cb func(ctx context.Context, w http.ResponseWriter, r *http.Request, arg, ret proto.Message, err error)) http.HandlerFunc {\n\tif cb == nil {\n\t\tcb = func(ctx context.Context, w http.ResponseWriter, r *http.Request, arg, ret proto.Message, err error) {\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tfmt.Fprintf(w, \"%v: arg = %v: ret = %v\", err, arg, ret)\n\t\t\t}\n\t\t}\n\t}\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := r.Context()\n\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tcb(ctx, w, r, nil, nil, err)\n\t\t\treturn\n\t\t}\n\n\t\targ := &HelloRequest{}\n\n\t\tcontentType := r.Header.Get(\"Content-Type\")\n\t\tswitch contentType {\n\t\tcase \"application\/protobuf\", \"application\/x-protobuf\":\n\t\t\tif err := proto.Unmarshal(body, arg); err != nil {\n\t\t\t\tcb(ctx, w, r, nil, nil, err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase \"application\/json\":\n\t\t\tif err := jsonpb.Unmarshal(bytes.NewBuffer(body), arg); err != nil {\n\t\t\t\tcb(ctx, w, r, nil, nil, err)\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\tw.WriteHeader(http.StatusUnsupportedMediaType)\n\t\t\t_, err := fmt.Fprintf(w, \"Unsupported Content-Type: %s\", contentType)\n\t\t\tcb(ctx, w, r, nil, nil, err)\n\t\t\treturn\n\t\t}\n\n\t\tret, err := h.srv.SayHello(ctx, arg)\n\t\tif err != nil {\n\t\t\tcb(ctx, w, r, arg, nil, err)\n\t\t\treturn\n\t\t}\n\n\t\tswitch contentType {\n\t\tcase \"application\/protobuf\", \"application\/x-protobuf\":\n\t\t\tbuf, err := proto.Marshal(ret)\n\t\t\tif err != nil {\n\t\t\t\tcb(ctx, w, r, arg, ret, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, err := io.Copy(w, bytes.NewBuffer(buf)); err != nil {\n\t\t\t\tcb(ctx, w, r, arg, ret, err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase \"application\/json\":\n\t\t\tif err := json.NewEncoder(w).Encode(ret); err != nil {\n\t\t\t\tcb(ctx, w, r, arg, ret, err)\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\tw.WriteHeader(http.StatusUnsupportedMediaType)\n\t\t\t_, err := fmt.Fprintf(w, \"Unsupported Content-Type: %s\", contentType)\n\t\t\tcb(ctx, w, r, arg, ret, err)\n\t\t\treturn\n\t\t}\n\t\tcb(ctx, w, r, arg, ret, nil)\n\t})\n}\n<commit_msg>Update example helloworld.http.go<commit_after>package example\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"github.com\/golang\/protobuf\/jsonpb\"\n\t\"github.com\/golang\/protobuf\/proto\"\n)\n\ntype GreeterHandler struct {\n\tsrv GreeterServer\n}\n\nfunc NewGreeterHandler(srv GreeterServer) *GreeterHandler {\n\treturn &GreeterHandler{\n\t\tsrv: srv,\n\t}\n}\n\nfunc (h *GreeterHandler) SayHello(cb func(ctx context.Context, w http.ResponseWriter, r *http.Request, arg, ret proto.Message, err error)) http.HandlerFunc {\n\tif cb == nil {\n\t\tcb = func(ctx context.Context, w http.ResponseWriter, r *http.Request, arg, ret proto.Message, err error) {\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tfmt.Fprintf(w, \"%v: arg = %v: ret = %v\", err, arg, ret)\n\t\t\t}\n\t\t}\n\t}\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := r.Context()\n\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tcb(ctx, w, r, nil, nil, err)\n\t\t\treturn\n\t\t}\n\n\t\targ := &HelloRequest{}\n\n\t\tcontentType := r.Header.Get(\"Content-Type\")\n\t\tswitch contentType {\n\t\tcase \"application\/protobuf\", \"application\/x-protobuf\":\n\t\t\tif err := proto.Unmarshal(body, arg); err != nil {\n\t\t\t\tcb(ctx, w, r, nil, nil, err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase \"application\/json\":\n\t\t\tif err := jsonpb.Unmarshal(bytes.NewBuffer(body), arg); err != nil {\n\t\t\t\tcb(ctx, w, r, nil, nil, err)\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\tw.WriteHeader(http.StatusUnsupportedMediaType)\n\t\t\t_, err := fmt.Fprintf(w, \"Unsupported Content-Type: %s\", contentType)\n\t\t\tcb(ctx, w, r, nil, nil, err)\n\t\t\treturn\n\t\t}\n\n\t\tret, err := h.srv.SayHello(ctx, arg)\n\t\tif err != nil {\n\t\t\tcb(ctx, w, r, arg, nil, err)\n\t\t\treturn\n\t\t}\n\n\t\tswitch contentType {\n\t\tcase \"application\/protobuf\", \"application\/x-protobuf\":\n\t\t\tbuf, err := proto.Marshal(ret)\n\t\t\tif err != nil {\n\t\t\t\tcb(ctx, w, r, arg, ret, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, err := io.Copy(w, bytes.NewBuffer(buf)); err != nil {\n\t\t\t\tcb(ctx, w, r, arg, ret, err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase \"application\/json\":\n\t\t\tif err := json.NewEncoder(w).Encode(ret); err != nil {\n\t\t\t\tcb(ctx, w, r, arg, ret, err)\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\tw.WriteHeader(http.StatusUnsupportedMediaType)\n\t\t\t_, err := fmt.Fprintf(w, \"Unsupported Content-Type: %s\", contentType)\n\t\t\tcb(ctx, w, r, arg, ret, err)\n\t\t\treturn\n\t\t}\n\t\tcb(ctx, w, r, arg, ret, nil)\n\t})\n}\n\nfunc (h *GreeterHandler) SayHelloWithPath(cb func(ctx context.Context, w http.ResponseWriter, r *http.Request, arg, ret proto.Message, err error)) (string, http.HandlerFunc) {\n\treturn \"greeter\/sayhello\", h.SayHello(cb)\n}\n<|endoftext|>"} {"text":"<commit_before>package cli\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/go-errors\/errors\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/adrienkohlbecker\/ejson-kms\/model\"\n\t\"github.com\/adrienkohlbecker\/ejson-kms\/utils\"\n)\n\nconst docExport = `\nexport: Export a secrets file in it's decrypted form.\n\nEach secret in the file will be decrypted and output to standard out.\nA number of formats are available:\n\n * bash: SECRET='password'\n * dotenv: SECRET=\"password\"\n * json: { \"secret\": \"password\" }\n * yaml: ??????????\n\nPlease be careful when exporting your secrets, do not save them to disk!\n`\nconst exampleExport = `\nejson-kms export\nejson-kms export --format=json\nejson-kms export --path=secrets.json --format=dotenv\n`\n\nfunc exportCmd() *cobra.Command {\n\n\tcmd := &cobra.Command{\n\t\tUse: \"export\",\n\t\tShort: \"export the decrypted secrets\",\n\t\tLong: strings.TrimSpace(docExport),\n\t\tExample: strings.TrimSpace(exampleExport),\n\t}\n\n\tvar (\n\t\tstorePath = \".secrets.json\"\n\t\tformat = \"bash\"\n\t)\n\n\tcmd.Flags().StringVar(&storePath, \"path\", storePath, \"path of the secrets file\")\n\tcmd.Flags().StringVar(&format, \"format\", format, \"format of the generated output (bash|dotenv|json|yaml)\")\n\n\tcmd.RunE = func(_ *cobra.Command, args []string) error {\n\n\t\terr := utils.ValidSecretsPath(storePath)\n\t\tif err != nil {\n\t\t\treturn errors.WrapPrefix(err, \"Invalid path\", 0)\n\t\t}\n\n\t\tstore, err := model.Load(storePath)\n\t\tif err != nil {\n\t\t\treturn errors.WrapPrefix(err, \"Unable to load JSON\", 0)\n\t\t}\n\n\t\tformatter, err := utils.ValidFormatter(format)\n\t\tif err != nil {\n\t\t\treturn errors.WrapPrefix(err, \"Invalid formatter\", 0)\n\t\t}\n\n\t\tclient, err := kmsDefaultClient()\n\t\tif err != nil {\n\t\t\treturn errors.WrapPrefix(err, \"Unable to initialize AWS client\", 0)\n\t\t}\n\n\t\titems, err := store.ExportPlaintext(client)\n\t\tif err != nil {\n\t\t\treturn errors.WrapPrefix(err, \"Unable to export items\", 0)\n\t\t}\n\n\t\terr = formatter(cmd.OutOrStdout(), items)\n\t\tif err != nil {\n\t\t\treturn errors.WrapPrefix(err, \"Unable to export items\", 0)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn cmd\n\n}\n<commit_msg>Fix docstring<commit_after>package cli\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/go-errors\/errors\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/adrienkohlbecker\/ejson-kms\/model\"\n\t\"github.com\/adrienkohlbecker\/ejson-kms\/utils\"\n)\n\nconst docExport = `\nexport: Export a secrets file in it's decrypted form.\n\nEach secret in the file will be decrypted and output to standard out.\nA number of formats are available:\n\n * bash: SECRET='password'\n * dotenv: SECRET=\"password\"\n * json: { \"secret\": \"password\" }\n * yaml: secret: password\n\nPlease be careful when exporting your secrets, do not save them to disk!\n`\nconst exampleExport = `\nejson-kms export\nejson-kms export --format=json\nejson-kms export --path=secrets.json --format=dotenv\n`\n\nfunc exportCmd() *cobra.Command {\n\n\tcmd := &cobra.Command{\n\t\tUse: \"export\",\n\t\tShort: \"export the decrypted secrets\",\n\t\tLong: strings.TrimSpace(docExport),\n\t\tExample: strings.TrimSpace(exampleExport),\n\t}\n\n\tvar (\n\t\tstorePath = \".secrets.json\"\n\t\tformat = \"bash\"\n\t)\n\n\tcmd.Flags().StringVar(&storePath, \"path\", storePath, \"path of the secrets file\")\n\tcmd.Flags().StringVar(&format, \"format\", format, \"format of the generated output (bash|dotenv|json|yaml)\")\n\n\tcmd.RunE = func(_ *cobra.Command, args []string) error {\n\n\t\terr := utils.ValidSecretsPath(storePath)\n\t\tif err != nil {\n\t\t\treturn errors.WrapPrefix(err, \"Invalid path\", 0)\n\t\t}\n\n\t\tstore, err := model.Load(storePath)\n\t\tif err != nil {\n\t\t\treturn errors.WrapPrefix(err, \"Unable to load JSON\", 0)\n\t\t}\n\n\t\tformatter, err := utils.ValidFormatter(format)\n\t\tif err != nil {\n\t\t\treturn errors.WrapPrefix(err, \"Invalid formatter\", 0)\n\t\t}\n\n\t\tclient, err := kmsDefaultClient()\n\t\tif err != nil {\n\t\t\treturn errors.WrapPrefix(err, \"Unable to initialize AWS client\", 0)\n\t\t}\n\n\t\titems, err := store.ExportPlaintext(client)\n\t\tif err != nil {\n\t\t\treturn errors.WrapPrefix(err, \"Unable to export items\", 0)\n\t\t}\n\n\t\terr = formatter(cmd.OutOrStdout(), items)\n\t\tif err != nil {\n\t\t\treturn errors.WrapPrefix(err, \"Unable to export items\", 0)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn cmd\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"flag\"\n\t\"fmt\"\n\t\"hash\/crc32\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/tarm\/serial\"\n\t\"gopkg.in\/readline.v1\"\n)\n\nvar (\n\trlInstance *readline.Instance\n\tconn io.ReadWriter\n\tserIn = make(chan []byte)\n\toutBound = make(chan string)\n\tprogress = make(chan bool, 1)\n\tincLevel = make(chan int)\n\n\tport = flag.String(\"p\", \"\", \"serial port (required: \/dev\/tty* or COM*)\")\n\texe = flag.Bool(\"x\", false, \"executable (consumes all remaining args)\")\n\tbaud = flag.Int(\"b\", 115200, \"baud rate\")\n\tupload = flag.String(\"u\", \"\", \"upload the specified firmware, then quit\")\n\texpand = flag.String(\"e\", \"\", \"expand specified file to stdout, then quit\")\n\tverbose = flag.Bool(\"v\", false, \"verbose output, for debugging only\")\n\tcapture = flag.String(\"c\", \"\", \"a file where captured output is appended\")\n\ttimeout = flag.Duration(\"t\", 500*time.Millisecond, \"serial echo timeout\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tvar err error\n\n\t\/\/ expansion does not use the serial port, it just expands include lines\n\tif *expand != \"\" {\n\t\texpandFile()\n\t\treturn\n\t}\n\n\tif *exe && flag.NArg() > 0 {\n\t\tconn, err = launch(flag.Args())\n\t\t*port = \"(\" + strings.Join(flag.Args(), \" \") + \")\"\n\t} else if *port == \"\" || flag.NArg() > 0 {\n\t\tflag.PrintDefaults()\n\t\tos.Exit(1)\n\t\tconn, err = connect(*port)\n\t}\n\tcheck(err)\n\t\/\/defer conn.Close()\n\tfmt.Println(\"Connected to:\", *port)\n\n\tgo serialInput() \/\/ feed the serIn channel\n\n\t\/\/ firmware upload uses serial in a different way, needs to quit when done\n\tif *upload != \"\" {\n\t\tfirmwareUpload()\n\t\treturn\n\t}\n\n\trlInstance, err = readline.NewEx(&readline.Config{\n\t\tUniqueEditLine: true,\n\t})\n\tcheck(err)\n\tdefer rlInstance.Close()\n\n\tgo serialExchange()\n\n\t\/\/conn.Flush()\n\treadWithTimeout() \/\/ get rid of partial pending data\n\tfor {\n\t\tline, err := rlInstance.Readline()\n\t\tif err != nil { \/\/ io.EOF, readline.ErrInterrupt\n\t\t\tbreak\n\t\t}\n\t\tparseAndSend(line)\n\t\tif strings.HasPrefix(line, \"include \") {\n\t\t\tfmt.Println(\"\\\\ done.\")\n\t\t}\n\t}\n}\n\n\/\/ isNetPort returns true if the argument is of the form \"...:<N>\"\nfunc isNetPort(s string) bool {\n\tif n := strings.Index(s, \":\"); n > 0 {\n\t\tp, e := strconv.Atoi(s[n+1:])\n\t\treturn e == nil && p > 0\n\t}\n\treturn false\n}\n\n\/\/ connect to either a net port for telnet use, or to a serial device\nfunc connect(name string) (io.ReadWriter, error) {\n\tif isNetPort(name) {\n\t\treturn net.Dial(\"tcp\", name)\n\t}\n\n\tconfig := serial.Config{Name: *port, Baud: *baud}\n\tif *upload != \"\" {\n\t\tconfig.Parity = serial.ParityEven\n\t}\n\treturn serial.OpenPort(&config)\n}\n\n\/\/ launch an executable and talk to it via pipes\nfunc launch(args []string) (io.ReadWriter, error) {\n\tcmd := exec.Command(args[0], args[1:]...)\n\tvar cmdio exePipe\n\tvar err error\n\tcmdio.to, err = cmd.StdinPipe()\n\tif err == nil {\n\t\tcmdio.from, err = cmd.StdoutPipe()\n\t\tif err == nil {\n\t\t\terr = cmd.Start()\n\t\t}\n\t}\n\treturn &cmdio, err\n}\n\ntype exePipe struct {\n\tfrom io.Reader\n\tto io.Writer\n}\n\nfunc (e *exePipe) Read(p []byte) (n int, err error) {\n\treturn e.from.Read(p)\n}\n\nfunc (e *exePipe) Write(p []byte) (n int, err error) {\n\treturn e.to.Write(p)\n}\n\nfunc parseAndSend(line string) {\n\tif strings.HasPrefix(line, \"include \") {\n\t\tfor _, fname := range strings.Split(line[8:], \" \") {\n\t\t\tdoInclude(fname)\n\t\t}\n\t} else {\n\t\toutBound <- line\n\t\t<-progress\n\t}\n}\n\nfunc check(err error) {\n\tif err != nil {\n\t\tif rlInstance != nil {\n\t\t\trlInstance.Close()\n\t\t}\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc serialInput() {\n\tvar f *os.File\n\tif *capture != \"\" {\n\t\tvar err error\n\t\topts := os.O_WRONLY | os.O_APPEND | os.O_CREATE\n\t\tf, err = os.OpenFile(*capture, opts, 0666)\n\t\tcheck(err)\n\t\tdefer f.Close()\n\t}\n\tfor {\n\t\tbuf := make([]byte, 600)\n\t\tn, err := conn.Read(buf)\n\t\tcheck(err)\n\t\tif n == 0 {\n\t\t\tclose(serIn)\n\t\t\treturn\n\t\t}\n\t\tif f != nil {\n\t\t\tf.Write(buf[:n])\n\t\t}\n\t\tserIn <- buf[:n]\n\t}\n}\n\nfunc readWithTimeout() []byte {\n\tselect {\n\tcase data := <-serIn:\n\t\treturn data\n\tcase <-time.After(*timeout):\n\t\treturn nil\n\t}\n}\n\nfunc serialExchange() {\n\tdepth := 0\n\tfor {\n\t\tselect {\n\n\t\tcase data := <-serIn:\n\t\t\tif len(data) == 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Print(string(data))\n\n\t\tcase line := <-outBound:\n\t\t\timmediate := depth == 0\n\t\t\t\/\/ the task here is to omit \"normal\" output for included lines,\n\t\t\t\/\/ i.e. lines which only generate an echo, a space, and \" ok.\\n\"\n\t\t\t\/\/ everything else should be echoed in full, including the input\n\t\t\tif len(line) > 0 {\n\t\t\t\tserialSend(line)\n\t\t\t\tprefix, matched := expectEcho(line, false, func(s string) {\n\t\t\t\t\tfmt.Print(s) \/\/ called to flush pending serial input lines\n\t\t\t\t})\n\t\t\t\tfmt.Print(prefix)\n\t\t\t\tif matched && immediate {\n\t\t\t\t\tfmt.Print(line)\n\t\t\t\t\tline = \"\"\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ now that the echo is done, send a CR and wait for the prompt\n\t\t\tserialSend(\"\\r\")\n\t\t\tprompt := \" ok.\\n\"\n\t\t\tprefix, matched := expectEcho(prompt, immediate, func(s string) {\n\t\t\t\tfmt.Print(line + s) \/\/ show original command first\n\t\t\t\tline = \"\"\n\t\t\t})\n\t\t\tif !matched {\n\t\t\t\tprompt = \"\"\n\t\t\t}\n\t\t\tif immediate || prefix != \" \" || !matched {\n\t\t\t\tfmt.Print(line + prefix + prompt)\n\t\t\t}\n\t\t\t\/\/ signal to sender that this request has been processed\n\t\t\tprogress <- matched\n\n\t\tcase n := <-incLevel:\n\t\t\tdepth += n\n\t\t}\n\t}\n}\n\nfunc expectEcho(match string, immed bool, flusher func(string)) (string, bool) {\n\tvar collected []byte\n\tfor {\n\t\tdata := readWithTimeout()\n\t\tcollected = append(collected, data...)\n\t\tif bytes.HasSuffix(collected, []byte(match)) {\n\t\t\tbytesBefore := len(collected) - len(match)\n\t\t\treturn string(collected[:bytesBefore]), true\n\t\t}\n\t\tif immed || len(data) == 0 {\n\t\t\treturn string(collected), false\n\t\t}\n\t\tif n := bytes.LastIndexByte(collected, '\\n'); n >= 0 {\n\t\t\tflusher(string(collected[:n+1]))\n\t\t\tcollected = collected[n+1:]\n\t\t}\n\t}\n}\n\nfunc serialSend(data string) {\n\t_, err := conn.Write([]byte(data))\n\tcheck(err)\n}\n\nfunc doInclude(fname string) {\n\tif fname == \"\" {\n\t\treturn \/\/ silently ignore empty files\n\t}\n\n\tincLevel <- +1\n\tdefer func() { incLevel <- -1 }()\n\n\tlineNum := 0\n\tfmt.Printf(\"\\\\ >>> include %s\\n\", fname)\n\tdefer func() {\n\t\tfmt.Printf(\"\\\\ <<<<<<<<<<< %s (%d lines)\\n\", fname, lineNum)\n\t}()\n\n\tf, err := os.Open(fname)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer f.Close()\n\n\tscanner := bufio.NewScanner(f)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tlineNum++\n\n\t\ts := strings.TrimLeft(line, \" \")\n\t\tif s == \"\" || strings.HasPrefix(s, \"\\\\ \") {\n\t\t\tif len(*expand) == 0 {\n\t\t\t\tcontinue \/\/ don't send empty or comment-only lines\n\t\t\t}\n\t\t}\n\n\t\tparseAndSend(line)\n\t}\n}\n\nfunc expandFile() {\n\tgo func() {\n\t\tfor line := range outBound {\n\t\t\tfmt.Println(line)\n\t\t\tprogress <- true\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor range incLevel {\n\t\t}\n\t}()\n\n\tfor _, fname := range strings.Split(*expand, \",\") {\n\t\tdoInclude(fname)\n\t}\n}\n\nfunc hexToBin(data []byte) []byte {\n\tvar bin []byte\n\tfor _, line := range strings.Split(string(data), \"\\n\") {\n\t\tif strings.HasSuffix(line, \"\\r\") {\n\t\t\tline = line[:len(line)-1]\n\t\t}\n\t\tif len(line) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif line[0] != ':' || len(line) < 11 {\n\t\t\tfmt.Println(\"Not ihex format:\", line)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tbytes, err := hex.DecodeString(line[1:])\n\t\tcheck(err)\n\t\tif bytes[3] != 0x00 {\n\t\t\tcontinue\n\t\t}\n\t\toffset := (int(bytes[1]) << 8) + int(bytes[2])\n\t\tlength := bytes[0]\n\t\tfor offset > len(bin) {\n\t\t\tbin = append(bin, 0xFF)\n\t\t}\n\t\tbin = append(bin, bytes[4:4+length]...)\n\t}\n\treturn bin\n}\n\nfunc firmwareUpload() {\n\tf, err := os.Open(*upload)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer f.Close()\n\n\tdata, err := ioutil.ReadAll(f)\n\tcheck(err)\n\n\t\/\/ convert to binary if first bytes look like they are in hex format\n\ttag := \"\"\n\tif len(data) > 11 && data[0] == ':' {\n\t\t_, err = hex.DecodeString(string(data[1:11]))\n\t\tif err == nil {\n\t\t\tdata = hexToBin(data)\n\t\t\ttag = \" (converted from Intel HEX)\"\n\t\t}\n\t}\n\n\tfmt.Printf(\" File: %s\\n\", *upload)\n\tfmt.Printf(\" Count: %d bytes%s\\n\", len(data), tag)\n\tfmt.Printf(\" Checksum: %08x hex\\n\", crc32.ChecksumIEEE(data))\n\n\tuploadSTM32(data)\n}\n<commit_msg>oops, fix last change, which broke serial port use<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"flag\"\n\t\"fmt\"\n\t\"hash\/crc32\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/tarm\/serial\"\n\t\"gopkg.in\/readline.v1\"\n)\n\nvar (\n\trlInstance *readline.Instance\n\tconn io.ReadWriter\n\tserIn = make(chan []byte)\n\toutBound = make(chan string)\n\tprogress = make(chan bool, 1)\n\tincLevel = make(chan int)\n\n\tport = flag.String(\"p\", \"\", \"serial port (required: \/dev\/tty* or COM*)\")\n\texe = flag.Bool(\"x\", false, \"executable (consumes all remaining args)\")\n\tbaud = flag.Int(\"b\", 115200, \"baud rate\")\n\tupload = flag.String(\"u\", \"\", \"upload the specified firmware, then quit\")\n\texpand = flag.String(\"e\", \"\", \"expand specified file to stdout, then quit\")\n\tverbose = flag.Bool(\"v\", false, \"verbose output, for debugging only\")\n\tcapture = flag.String(\"c\", \"\", \"a file where captured output is appended\")\n\ttimeout = flag.Duration(\"t\", 500*time.Millisecond, \"serial echo timeout\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tvar err error\n\n\t\/\/ expansion does not use the serial port, it just expands include lines\n\tif *expand != \"\" {\n\t\texpandFile()\n\t\treturn\n\t}\n\n\tif *exe && flag.NArg() > 0 {\n\t\tconn, err = launch(flag.Args())\n\t\t*port = \"(\" + strings.Join(flag.Args(), \" \") + \")\"\n\t} else if *port != \"\" && flag.NArg() == 0 {\n\t\tconn, err = connect(*port)\n\t} else {\n\t\tflag.PrintDefaults()\n\t\tos.Exit(1)\n\t}\n\tcheck(err)\n\t\/\/defer conn.Close()\n\tfmt.Println(\"Connected to:\", *port)\n\n\tgo serialInput() \/\/ feed the serIn channel\n\n\t\/\/ firmware upload uses serial in a different way, needs to quit when done\n\tif *upload != \"\" {\n\t\tfirmwareUpload()\n\t\treturn\n\t}\n\n\trlInstance, err = readline.NewEx(&readline.Config{\n\t\tUniqueEditLine: true,\n\t})\n\tcheck(err)\n\tdefer rlInstance.Close()\n\n\tgo serialExchange()\n\n\t\/\/conn.Flush()\n\treadWithTimeout() \/\/ get rid of partial pending data\n\tfor {\n\t\tline, err := rlInstance.Readline()\n\t\tif err != nil { \/\/ io.EOF, readline.ErrInterrupt\n\t\t\tbreak\n\t\t}\n\t\tparseAndSend(line)\n\t\tif strings.HasPrefix(line, \"include \") {\n\t\t\tfmt.Println(\"\\\\ done.\")\n\t\t}\n\t}\n}\n\n\/\/ isNetPort returns true if the argument is of the form \"...:<N>\"\nfunc isNetPort(s string) bool {\n\tif n := strings.Index(s, \":\"); n > 0 {\n\t\tp, e := strconv.Atoi(s[n+1:])\n\t\treturn e == nil && p > 0\n\t}\n\treturn false\n}\n\n\/\/ connect to either a net port for telnet use, or to a serial device\nfunc connect(name string) (io.ReadWriter, error) {\n\tif isNetPort(name) {\n\t\treturn net.Dial(\"tcp\", name)\n\t}\n\n\tconfig := serial.Config{Name: *port, Baud: *baud}\n\tif *upload != \"\" {\n\t\tconfig.Parity = serial.ParityEven\n\t}\n\treturn serial.OpenPort(&config)\n}\n\n\/\/ launch an executable and talk to it via pipes\nfunc launch(args []string) (io.ReadWriter, error) {\n\tcmd := exec.Command(args[0], args[1:]...)\n\tvar cmdio exePipe\n\tvar err error\n\tcmdio.to, err = cmd.StdinPipe()\n\tif err == nil {\n\t\tcmdio.from, err = cmd.StdoutPipe()\n\t\tif err == nil {\n\t\t\terr = cmd.Start()\n\t\t}\n\t}\n\treturn &cmdio, err\n}\n\ntype exePipe struct {\n\tfrom io.Reader\n\tto io.Writer\n}\n\nfunc (e *exePipe) Read(p []byte) (n int, err error) {\n\treturn e.from.Read(p)\n}\n\nfunc (e *exePipe) Write(p []byte) (n int, err error) {\n\treturn e.to.Write(p)\n}\n\nfunc parseAndSend(line string) {\n\tif strings.HasPrefix(line, \"include \") {\n\t\tfor _, fname := range strings.Split(line[8:], \" \") {\n\t\t\tdoInclude(fname)\n\t\t}\n\t} else {\n\t\toutBound <- line\n\t\t<-progress\n\t}\n}\n\nfunc check(err error) {\n\tif err != nil {\n\t\tif rlInstance != nil {\n\t\t\trlInstance.Close()\n\t\t}\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc serialInput() {\n\tvar f *os.File\n\tif *capture != \"\" {\n\t\tvar err error\n\t\topts := os.O_WRONLY | os.O_APPEND | os.O_CREATE\n\t\tf, err = os.OpenFile(*capture, opts, 0666)\n\t\tcheck(err)\n\t\tdefer f.Close()\n\t}\n\tfor {\n\t\tbuf := make([]byte, 600)\n\t\tn, err := conn.Read(buf)\n\t\tcheck(err)\n\t\tif n == 0 {\n\t\t\tclose(serIn)\n\t\t\treturn\n\t\t}\n\t\tif f != nil {\n\t\t\tf.Write(buf[:n])\n\t\t}\n\t\tserIn <- buf[:n]\n\t}\n}\n\nfunc readWithTimeout() []byte {\n\tselect {\n\tcase data := <-serIn:\n\t\treturn data\n\tcase <-time.After(*timeout):\n\t\treturn nil\n\t}\n}\n\nfunc serialExchange() {\n\tdepth := 0\n\tfor {\n\t\tselect {\n\n\t\tcase data := <-serIn:\n\t\t\tif len(data) == 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Print(string(data))\n\n\t\tcase line := <-outBound:\n\t\t\timmediate := depth == 0\n\t\t\t\/\/ the task here is to omit \"normal\" output for included lines,\n\t\t\t\/\/ i.e. lines which only generate an echo, a space, and \" ok.\\n\"\n\t\t\t\/\/ everything else should be echoed in full, including the input\n\t\t\tif len(line) > 0 {\n\t\t\t\tserialSend(line)\n\t\t\t\tprefix, matched := expectEcho(line, false, func(s string) {\n\t\t\t\t\tfmt.Print(s) \/\/ called to flush pending serial input lines\n\t\t\t\t})\n\t\t\t\tfmt.Print(prefix)\n\t\t\t\tif matched && immediate {\n\t\t\t\t\tfmt.Print(line)\n\t\t\t\t\tline = \"\"\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ now that the echo is done, send a CR and wait for the prompt\n\t\t\tserialSend(\"\\r\")\n\t\t\tprompt := \" ok.\\n\"\n\t\t\tprefix, matched := expectEcho(prompt, immediate, func(s string) {\n\t\t\t\tfmt.Print(line + s) \/\/ show original command first\n\t\t\t\tline = \"\"\n\t\t\t})\n\t\t\tif !matched {\n\t\t\t\tprompt = \"\"\n\t\t\t}\n\t\t\tif immediate || prefix != \" \" || !matched {\n\t\t\t\tfmt.Print(line + prefix + prompt)\n\t\t\t}\n\t\t\t\/\/ signal to sender that this request has been processed\n\t\t\tprogress <- matched\n\n\t\tcase n := <-incLevel:\n\t\t\tdepth += n\n\t\t}\n\t}\n}\n\nfunc expectEcho(match string, immed bool, flusher func(string)) (string, bool) {\n\tvar collected []byte\n\tfor {\n\t\tdata := readWithTimeout()\n\t\tcollected = append(collected, data...)\n\t\tif bytes.HasSuffix(collected, []byte(match)) {\n\t\t\tbytesBefore := len(collected) - len(match)\n\t\t\treturn string(collected[:bytesBefore]), true\n\t\t}\n\t\tif immed || len(data) == 0 {\n\t\t\treturn string(collected), false\n\t\t}\n\t\tif n := bytes.LastIndexByte(collected, '\\n'); n >= 0 {\n\t\t\tflusher(string(collected[:n+1]))\n\t\t\tcollected = collected[n+1:]\n\t\t}\n\t}\n}\n\nfunc serialSend(data string) {\n\t_, err := conn.Write([]byte(data))\n\tcheck(err)\n}\n\nfunc doInclude(fname string) {\n\tif fname == \"\" {\n\t\treturn \/\/ silently ignore empty files\n\t}\n\n\tincLevel <- +1\n\tdefer func() { incLevel <- -1 }()\n\n\tlineNum := 0\n\tfmt.Printf(\"\\\\ >>> include %s\\n\", fname)\n\tdefer func() {\n\t\tfmt.Printf(\"\\\\ <<<<<<<<<<< %s (%d lines)\\n\", fname, lineNum)\n\t}()\n\n\tf, err := os.Open(fname)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer f.Close()\n\n\tscanner := bufio.NewScanner(f)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tlineNum++\n\n\t\ts := strings.TrimLeft(line, \" \")\n\t\tif s == \"\" || strings.HasPrefix(s, \"\\\\ \") {\n\t\t\tif len(*expand) == 0 {\n\t\t\t\tcontinue \/\/ don't send empty or comment-only lines\n\t\t\t}\n\t\t}\n\n\t\tparseAndSend(line)\n\t}\n}\n\nfunc expandFile() {\n\tgo func() {\n\t\tfor line := range outBound {\n\t\t\tfmt.Println(line)\n\t\t\tprogress <- true\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor range incLevel {\n\t\t}\n\t}()\n\n\tfor _, fname := range strings.Split(*expand, \",\") {\n\t\tdoInclude(fname)\n\t}\n}\n\nfunc hexToBin(data []byte) []byte {\n\tvar bin []byte\n\tfor _, line := range strings.Split(string(data), \"\\n\") {\n\t\tif strings.HasSuffix(line, \"\\r\") {\n\t\t\tline = line[:len(line)-1]\n\t\t}\n\t\tif len(line) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif line[0] != ':' || len(line) < 11 {\n\t\t\tfmt.Println(\"Not ihex format:\", line)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tbytes, err := hex.DecodeString(line[1:])\n\t\tcheck(err)\n\t\tif bytes[3] != 0x00 {\n\t\t\tcontinue\n\t\t}\n\t\toffset := (int(bytes[1]) << 8) + int(bytes[2])\n\t\tlength := bytes[0]\n\t\tfor offset > len(bin) {\n\t\t\tbin = append(bin, 0xFF)\n\t\t}\n\t\tbin = append(bin, bytes[4:4+length]...)\n\t}\n\treturn bin\n}\n\nfunc firmwareUpload() {\n\tf, err := os.Open(*upload)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer f.Close()\n\n\tdata, err := ioutil.ReadAll(f)\n\tcheck(err)\n\n\t\/\/ convert to binary if first bytes look like they are in hex format\n\ttag := \"\"\n\tif len(data) > 11 && data[0] == ':' {\n\t\t_, err = hex.DecodeString(string(data[1:11]))\n\t\tif err == nil {\n\t\t\tdata = hexToBin(data)\n\t\t\ttag = \" (converted from Intel HEX)\"\n\t\t}\n\t}\n\n\tfmt.Printf(\" File: %s\\n\", *upload)\n\tfmt.Printf(\" Count: %d bytes%s\\n\", len(data), tag)\n\tfmt.Printf(\" Checksum: %08x hex\\n\", crc32.ChecksumIEEE(data))\n\n\tuploadSTM32(data)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 ThoughtWorks, Inc.\n\n\/\/ This file is part of Gauge.\n\n\/\/ Gauge is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\n\/\/ Gauge is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with Gauge. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage lang\n\nimport (\n\t\"context\"\n\t\"log\"\n\n\t\"os\"\n\n\t\"encoding\/json\"\n\n\t\"github.com\/getgauge\/common\"\n\t\"github.com\/getgauge\/gauge\/api\/infoGatherer\"\n\t\"github.com\/getgauge\/gauge\/gauge\"\n\tgm \"github.com\/getgauge\/gauge\/gauge_messages\"\n\t\"github.com\/getgauge\/gauge\/logger\"\n\t\"github.com\/getgauge\/gauge\/util\"\n\t\"github.com\/op\/go-logging\"\n\t\"github.com\/sourcegraph\/go-langserver\/pkg\/lsp\"\n\t\"github.com\/sourcegraph\/jsonrpc2\"\n)\n\ntype server struct{}\n\ntype infoProvider interface {\n\tInit()\n\tSteps() []*gauge.Step\n\tAllSteps() []*gauge.Step\n\tConcepts() []*gm.ConceptInfo\n\tParams(file string, argType gauge.ArgType) []gauge.StepArg\n\tTags() []string\n\tSearchConceptDictionary(string) *gauge.Concept\n\tGetAvailableSpecDetails(specs []string) []*infoGatherer.SpecDetail\n}\n\nvar provider infoProvider\n\nfunc Server(p infoProvider) *server {\n\tprovider = p\n\tprovider.Init()\n\tstartRunner()\n\treturn &server{}\n}\n\ntype lspHandler struct {\n\tjsonrpc2.Handler\n}\n\ntype LangHandler struct {\n}\n\ntype registrationParams struct {\n\tRegistrations []registration `json:\"registrations\"`\n}\n\ntype registration struct {\n\tId string `json:\"id\"`\n\tMethod string `json:\"method\"`\n\tRegisterOptions interface{} `json:\"registerOptions\"`\n}\n\ntype textDocumentRegistrationOptions struct {\n\tDocumentSelector documentSelector `json:\"documentSelector\"`\n}\n\ntype textDocumentChangeRegistrationOptions struct {\n\ttextDocumentRegistrationOptions\n\tSyncKind lsp.TextDocumentSyncKind `json:\"syncKind,omitempty\"`\n}\n\ntype codeLensRegistrationOptions struct {\n\ttextDocumentRegistrationOptions\n\tResolveProvider bool `json:\"resolveProvider,omitempty\"`\n}\n\ntype documentSelector struct {\n\tLanguage string `json:\"language\"`\n}\n\nfunc newHandler() jsonrpc2.Handler {\n\treturn lspHandler{jsonrpc2.HandlerWithError((&LangHandler{}).handle)}\n}\n\nfunc (h lspHandler) Handle(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) {\n\tgo h.Handler.Handle(ctx, conn, req)\n}\n\nfunc (h *LangHandler) handle(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) (interface{}, error) {\n\treturn h.Handle(ctx, conn, req)\n}\n\nfunc (h *LangHandler) Handle(ctx context.Context, conn jsonrpc2.JSONRPC2, req *jsonrpc2.Request) (interface{}, error) {\n\tswitch req.Method {\n\tcase \"initialize\":\n\t\treturn gaugeLSPCapabilities(), nil\n\tcase \"initialized\":\n\t\tregisterRunnerCapabilities(conn, ctx)\n\t\tgo publishDiagnostics(ctx, conn)\n\t\treturn nil, nil\n\tcase \"shutdown\":\n\t\tkillRunner()\n\t\treturn nil, nil\n\tcase \"exit\":\n\t\tif c, ok := conn.(*jsonrpc2.Conn); ok {\n\t\t\tc.Close()\n\t\t\tos.Exit(0)\n\t\t}\n\t\treturn nil, nil\n\tcase \"$\/cancelRequest\":\n\t\treturn nil, nil\n\tcase \"textDocument\/didOpen\":\n\t\treturn nil, documentOpened(req, ctx, conn)\n\tcase \"textDocument\/didClose\":\n\t\treturn nil, documentClosed(req, ctx, conn)\n\tcase \"textDocument\/didChange\":\n\t\treturn nil, documentChange(req, ctx, conn)\n\tcase \"textDocument\/completion\":\n\t\treturn completion(req)\n\tcase \"completionItem\/resolve\":\n\t\treturn resolveCompletion(req)\n\tcase \"textDocument\/definition\":\n\t\treturn definition(req)\n\tcase \"textDocument\/formatting\":\n\t\tdata, err := format(req)\n\t\tif err != nil {\n\t\t\tconn.Notify(ctx, \"window\/showMessage\", lsp.ShowMessageParams{Type: 1, Message: err.Error()})\n\t\t}\n\t\treturn data, err\n\tcase \"textDocument\/codeLens\":\n\t\treturn codeLenses(req)\n\tcase \"textDocument\/codeAction\":\n\t\treturn codeActions(req)\n\tcase \"textDocument\/documentSymbol\":\n\t\treturn documentSymbols(req)\n\tcase \"workspace\/symbol\":\n\t\treturn workspaceSymbols(req)\n\tcase \"gauge\/stepReferences\":\n\t\treturn stepReferences(req)\n\tcase \"gauge\/stepValueAt\":\n\t\treturn stepValueAt(req)\n\tcase \"gauge\/scenarios\":\n\t\treturn scenarios(req)\n\tcase \"gauge\/specs\":\n\t\treturn specs()\n\tdefault:\n\t\treturn nil, nil\n\t}\n}\n\nfunc gaugeLSPCapabilities() lsp.InitializeResult {\n\tkind := lsp.TDSKFull\n\treturn lsp.InitializeResult{\n\t\tCapabilities: lsp.ServerCapabilities{\n\t\t\tTextDocumentSync: lsp.TextDocumentSyncOptionsOrKind{Kind: &kind, Options: &lsp.TextDocumentSyncOptions{Save: &lsp.SaveOptions{IncludeText: true}}},\n\t\t\tCompletionProvider: &lsp.CompletionOptions{ResolveProvider: true, TriggerCharacters: []string{\"*\", \"* \", \"\\\"\", \"<\", \":\", \",\"}},\n\t\t\tDocumentFormattingProvider: true,\n\t\t\tCodeLensProvider: &lsp.CodeLensOptions{ResolveProvider: false},\n\t\t\tDefinitionProvider: true,\n\t\t\tCodeActionProvider: true,\n\t\t\tDocumentSymbolProvider: true,\n\t\t\tWorkspaceSymbolProvider: true,\n\t\t},\n\t}\n}\n\nfunc documentOpened(req *jsonrpc2.Request, ctx context.Context, conn jsonrpc2.JSONRPC2) error {\n\tvar params lsp.DidOpenTextDocumentParams\n\tvar err error\n\tif err = json.Unmarshal(*req.Params, ¶ms); err != nil {\n\t\tlogger.APILog.Debugf(\"failed to parse request %s\", err.Error())\n\t\treturn err\n\t}\n\tif util.IsGaugeFile(params.TextDocument.URI) {\n\t\topenFile(params)\n\t} else if lRunner.runner != nil {\n\t\terr = cacheFileOnRunner(params.TextDocument.URI, params.TextDocument.Text)\n\t}\n\tgo publishDiagnostics(ctx, conn)\n\treturn err\n}\n\nfunc documentChange(req *jsonrpc2.Request, ctx context.Context, conn jsonrpc2.JSONRPC2) error {\n\tvar params lsp.DidChangeTextDocumentParams\n\tvar err error\n\tif err = json.Unmarshal(*req.Params, ¶ms); err != nil {\n\t\tlogger.APILog.Debugf(\"failed to parse request %s\", err.Error())\n\t\treturn err\n\t}\n\tif util.IsGaugeFile(params.TextDocument.URI) {\n\t\tchangeFile(params)\n\t} else if lRunner.runner != nil {\n\t\terr = cacheFileOnRunner(params.TextDocument.URI, params.ContentChanges[0].Text)\n\t}\n\tgo publishDiagnostics(ctx, conn)\n\treturn err\n}\n\nfunc documentClosed(req *jsonrpc2.Request, ctx context.Context, conn jsonrpc2.JSONRPC2) error {\n\tvar params lsp.DidCloseTextDocumentParams\n\tvar err error\n\tif err := json.Unmarshal(*req.Params, ¶ms); err != nil {\n\t\tlogger.APILog.Debugf(\"failed to parse request %s\", err.Error())\n\t\treturn err\n\t}\n\tif util.IsGaugeFile(params.TextDocument.URI) {\n\t\tcloseFile(params)\n\t\tif !common.FileExists(util.ConvertPathToURI(params.TextDocument.URI)) {\n\t\t\tpublishDiagnostic(params.TextDocument.URI, []lsp.Diagnostic{}, conn, ctx)\n\t\t}\n\t} else if lRunner.runner != nil {\n\t\tcacheFileRequest := &gm.Message{MessageType: gm.Message_CacheFileRequest, CacheFileRequest: &gm.CacheFileRequest{FilePath: util.ConvertURItoFilePath(params.TextDocument.URI), IsClosed: true}}\n\t\terr = sendMessageToRunner(cacheFileRequest)\n\t}\n\tgo publishDiagnostics(ctx, conn)\n\treturn err\n}\n\nfunc registerRunnerCapabilities(conn jsonrpc2.JSONRPC2, ctx context.Context) {\n\tvar result string\n\t\/\/ TODO : fetch the language dynamically\n\tconn.Call(ctx, \"client\/registerCapability\", registrationParams{[]registration{\n\t\t{Id: \"gauge-runner-didOpen\", Method: \"textDocument\/didOpen\", RegisterOptions: textDocumentRegistrationOptions{DocumentSelector: documentSelector{Language: \"javascript\"}}},\n\t\t{Id: \"gauge-runner-didClose\", Method: \"textDocument\/didClose\", RegisterOptions: textDocumentRegistrationOptions{DocumentSelector: documentSelector{Language: \"javascript\"}}},\n\t\t{Id: \"gauge-runner-didChange\", Method: \"textDocument\/didChange\", RegisterOptions: textDocumentChangeRegistrationOptions{textDocumentRegistrationOptions: textDocumentRegistrationOptions{DocumentSelector: documentSelector{Language: \"javascript\"}}, SyncKind: lsp.TDSKFull}},\n\t\t{Id: \"gauge-runner-codelens\", Method: \"textDocument\/codeLens\", RegisterOptions: codeLensRegistrationOptions{textDocumentRegistrationOptions: textDocumentRegistrationOptions{DocumentSelector: documentSelector{Language: \"javascript\"}}, ResolveProvider: false}},\n\t}}, result)\n}\n\ntype lspWriter struct {\n}\n\nfunc (w lspWriter) Write(p []byte) (n int, err error) {\n\tlogger.LspLog.Debug(string(p))\n\treturn os.Stderr.Write(p)\n}\n\nfunc (s *server) Start(logLevel string) {\n\tlogger.APILog.Info(\"LangServer: reading on stdin, writing on stdout\")\n\tvar connOpt []jsonrpc2.ConnOpt\n\tif logLevel == \"debug\" {\n\t\tconnOpt = append(connOpt, jsonrpc2.LogMessages(log.New(lspWriter{}, \"\", 0)))\n\t}\n\tctx := context.Background()\n\tconn := jsonrpc2.NewConn(ctx, jsonrpc2.NewBufferedStream(stdRWC{}, jsonrpc2.VSCodeObjectCodec{}), newHandler(), connOpt...)\n\tlogger.SetCustomLogger(lspLogger{conn, ctx})\n\t<-conn.DisconnectNotify()\n\tlogger.APILog.Info(\"Connection closed\")\n}\n\ntype stdRWC struct{}\n\nfunc (stdRWC) Read(p []byte) (int, error) {\n\treturn os.Stdin.Read(p)\n}\n\nfunc (stdRWC) Write(p []byte) (int, error) {\n\treturn os.Stdout.Write(p)\n}\n\nfunc (stdRWC) Close() error {\n\tif err := os.Stdin.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn os.Stdout.Close()\n}\n\ntype lspLogger struct {\n\tconn *jsonrpc2.Conn\n\tctx context.Context\n}\n\nfunc (c lspLogger) Log(logLevel logging.Level, msg string) {\n\tlogger.APILog.Info(logLevel)\n\tvar level lsp.MessageType\n\tswitch logLevel {\n\tcase logging.DEBUG:\n\t\tlevel = lsp.Log\n\tcase logging.INFO:\n\t\tlevel = lsp.Info\n\tcase logging.WARNING:\n\t\tlevel = lsp.MTWarning\n\tcase logging.ERROR:\n\t\tlevel = lsp.MTError\n\tcase logging.CRITICAL:\n\t\tlevel = lsp.MTError\n\tdefault:\n\t\tlevel = lsp.Info\n\t}\n\tc.conn.Notify(c.ctx, \"window\/logMessage\", lsp.LogMessageParams{Type: level, Message: msg})\n}\n<commit_msg>setting fileType and pattern while initilizing runner capabilities<commit_after>\/\/ Copyright 2015 ThoughtWorks, Inc.\n\n\/\/ This file is part of Gauge.\n\n\/\/ Gauge is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\n\/\/ Gauge is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with Gauge. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage lang\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"os\"\n\n\t\"encoding\/json\"\n\n\t\"github.com\/getgauge\/common\"\n\t\"github.com\/getgauge\/gauge\/api\/infoGatherer\"\n\t\"github.com\/getgauge\/gauge\/config\"\n\t\"github.com\/getgauge\/gauge\/gauge\"\n\tgm \"github.com\/getgauge\/gauge\/gauge_messages\"\n\t\"github.com\/getgauge\/gauge\/logger\"\n\t\"github.com\/getgauge\/gauge\/util\"\n\t\"github.com\/op\/go-logging\"\n\t\"github.com\/sourcegraph\/go-langserver\/pkg\/lsp\"\n\t\"github.com\/sourcegraph\/jsonrpc2\"\n)\n\ntype server struct{}\n\ntype infoProvider interface {\n\tInit()\n\tSteps() []*gauge.Step\n\tAllSteps() []*gauge.Step\n\tConcepts() []*gm.ConceptInfo\n\tParams(file string, argType gauge.ArgType) []gauge.StepArg\n\tTags() []string\n\tSearchConceptDictionary(string) *gauge.Concept\n\tGetAvailableSpecDetails(specs []string) []*infoGatherer.SpecDetail\n}\n\nvar provider infoProvider\n\nfunc Server(p infoProvider) *server {\n\tprovider = p\n\tprovider.Init()\n\tstartRunner()\n\treturn &server{}\n}\n\ntype lspHandler struct {\n\tjsonrpc2.Handler\n}\n\ntype LangHandler struct {\n}\n\ntype registrationParams struct {\n\tRegistrations []registration `json:\"registrations\"`\n}\n\ntype registration struct {\n\tId string `json:\"id\"`\n\tMethod string `json:\"method\"`\n\tRegisterOptions interface{} `json:\"registerOptions\"`\n}\n\ntype textDocumentRegistrationOptions struct {\n\tDocumentSelector documentSelector `json:\"documentSelector\"`\n}\n\ntype textDocumentChangeRegistrationOptions struct {\n\ttextDocumentRegistrationOptions\n\tSyncKind lsp.TextDocumentSyncKind `json:\"syncKind,omitempty\"`\n}\n\ntype codeLensRegistrationOptions struct {\n\ttextDocumentRegistrationOptions\n\tResolveProvider bool `json:\"resolveProvider,omitempty\"`\n}\n\ntype documentSelector struct {\n\tScheme string `json:\"scheme\"`\n\tLanguage string `json:\"language\"`\n\tPattern string `json:\"pattern\"`\n}\n\nfunc newHandler() jsonrpc2.Handler {\n\treturn lspHandler{jsonrpc2.HandlerWithError((&LangHandler{}).handle)}\n}\n\nfunc (h lspHandler) Handle(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) {\n\tgo h.Handler.Handle(ctx, conn, req)\n}\n\nfunc (h *LangHandler) handle(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) (interface{}, error) {\n\treturn h.Handle(ctx, conn, req)\n}\n\nfunc (h *LangHandler) Handle(ctx context.Context, conn jsonrpc2.JSONRPC2, req *jsonrpc2.Request) (interface{}, error) {\n\tswitch req.Method {\n\tcase \"initialize\":\n\t\treturn gaugeLSPCapabilities(), nil\n\tcase \"initialized\":\n\t\tregisterRunnerCapabilities(conn, ctx)\n\t\tgo publishDiagnostics(ctx, conn)\n\t\treturn nil, nil\n\tcase \"shutdown\":\n\t\tkillRunner()\n\t\treturn nil, nil\n\tcase \"exit\":\n\t\tif c, ok := conn.(*jsonrpc2.Conn); ok {\n\t\t\tc.Close()\n\t\t\tos.Exit(0)\n\t\t}\n\t\treturn nil, nil\n\tcase \"$\/cancelRequest\":\n\t\treturn nil, nil\n\tcase \"textDocument\/didOpen\":\n\t\treturn nil, documentOpened(req, ctx, conn)\n\tcase \"textDocument\/didClose\":\n\t\treturn nil, documentClosed(req, ctx, conn)\n\tcase \"textDocument\/didChange\":\n\t\treturn nil, documentChange(req, ctx, conn)\n\tcase \"textDocument\/completion\":\n\t\treturn completion(req)\n\tcase \"completionItem\/resolve\":\n\t\treturn resolveCompletion(req)\n\tcase \"textDocument\/definition\":\n\t\treturn definition(req)\n\tcase \"textDocument\/formatting\":\n\t\tdata, err := format(req)\n\t\tif err != nil {\n\t\t\tconn.Notify(ctx, \"window\/showMessage\", lsp.ShowMessageParams{Type: 1, Message: err.Error()})\n\t\t}\n\t\treturn data, err\n\tcase \"textDocument\/codeLens\":\n\t\treturn codeLenses(req)\n\tcase \"textDocument\/codeAction\":\n\t\treturn codeActions(req)\n\tcase \"textDocument\/documentSymbol\":\n\t\treturn documentSymbols(req)\n\tcase \"workspace\/symbol\":\n\t\treturn workspaceSymbols(req)\n\tcase \"gauge\/stepReferences\":\n\t\treturn stepReferences(req)\n\tcase \"gauge\/stepValueAt\":\n\t\treturn stepValueAt(req)\n\tcase \"gauge\/scenarios\":\n\t\treturn scenarios(req)\n\tcase \"gauge\/specs\":\n\t\treturn specs()\n\tdefault:\n\t\treturn nil, nil\n\t}\n}\n\nfunc gaugeLSPCapabilities() lsp.InitializeResult {\n\tkind := lsp.TDSKFull\n\treturn lsp.InitializeResult{\n\t\tCapabilities: lsp.ServerCapabilities{\n\t\t\tTextDocumentSync: lsp.TextDocumentSyncOptionsOrKind{Kind: &kind, Options: &lsp.TextDocumentSyncOptions{Save: &lsp.SaveOptions{IncludeText: true}}},\n\t\t\tCompletionProvider: &lsp.CompletionOptions{ResolveProvider: true, TriggerCharacters: []string{\"*\", \"* \", \"\\\"\", \"<\", \":\", \",\"}},\n\t\t\tDocumentFormattingProvider: true,\n\t\t\tCodeLensProvider: &lsp.CodeLensOptions{ResolveProvider: false},\n\t\t\tDefinitionProvider: true,\n\t\t\tCodeActionProvider: true,\n\t\t\tDocumentSymbolProvider: true,\n\t\t\tWorkspaceSymbolProvider: true,\n\t\t},\n\t}\n}\n\nfunc documentOpened(req *jsonrpc2.Request, ctx context.Context, conn jsonrpc2.JSONRPC2) error {\n\tvar params lsp.DidOpenTextDocumentParams\n\tvar err error\n\tif err = json.Unmarshal(*req.Params, ¶ms); err != nil {\n\t\tlogger.APILog.Debugf(\"failed to parse request %s\", err.Error())\n\t\treturn err\n\t}\n\tif util.IsGaugeFile(params.TextDocument.URI) {\n\t\topenFile(params)\n\t} else if lRunner.runner != nil {\n\t\terr = cacheFileOnRunner(params.TextDocument.URI, params.TextDocument.Text)\n\t}\n\tgo publishDiagnostics(ctx, conn)\n\treturn err\n}\n\nfunc documentChange(req *jsonrpc2.Request, ctx context.Context, conn jsonrpc2.JSONRPC2) error {\n\tvar params lsp.DidChangeTextDocumentParams\n\tvar err error\n\tif err = json.Unmarshal(*req.Params, ¶ms); err != nil {\n\t\tlogger.APILog.Debugf(\"failed to parse request %s\", err.Error())\n\t\treturn err\n\t}\n\tif util.IsGaugeFile(params.TextDocument.URI) {\n\t\tchangeFile(params)\n\t} else if lRunner.runner != nil {\n\t\terr = cacheFileOnRunner(params.TextDocument.URI, params.ContentChanges[0].Text)\n\t}\n\tgo publishDiagnostics(ctx, conn)\n\treturn err\n}\n\nfunc documentClosed(req *jsonrpc2.Request, ctx context.Context, conn jsonrpc2.JSONRPC2) error {\n\tvar params lsp.DidCloseTextDocumentParams\n\tvar err error\n\tif err := json.Unmarshal(*req.Params, ¶ms); err != nil {\n\t\tlogger.APILog.Debugf(\"failed to parse request %s\", err.Error())\n\t\treturn err\n\t}\n\tif util.IsGaugeFile(params.TextDocument.URI) {\n\t\tcloseFile(params)\n\t\tif !common.FileExists(util.ConvertPathToURI(params.TextDocument.URI)) {\n\t\t\tpublishDiagnostic(params.TextDocument.URI, []lsp.Diagnostic{}, conn, ctx)\n\t\t}\n\t} else if lRunner.runner != nil {\n\t\tcacheFileRequest := &gm.Message{MessageType: gm.Message_CacheFileRequest, CacheFileRequest: &gm.CacheFileRequest{FilePath: util.ConvertURItoFilePath(params.TextDocument.URI), IsClosed: true}}\n\t\terr = sendMessageToRunner(cacheFileRequest)\n\t}\n\tgo publishDiagnostics(ctx, conn)\n\treturn err\n}\n\nfunc registerRunnerCapabilities(conn jsonrpc2.JSONRPC2, ctx context.Context) {\n\tvar result string\n\t\/\/ TODO : fetch the language dynamically\n\tds := documentSelector{\n\t\tLanguage: \"javascript\",\n\t\tPattern: fmt.Sprintf(\"%s\/**\/*\", config.ProjectRoot),\n\t\tScheme: \"file\",\n\t}\n\tconn.Call(ctx, \"client\/registerCapability\", registrationParams{[]registration{\n\t\t{Id: \"gauge-runner-didOpen\", Method: \"textDocument\/didOpen\", RegisterOptions: textDocumentRegistrationOptions{DocumentSelector: ds}},\n\t\t{Id: \"gauge-runner-didClose\", Method: \"textDocument\/didClose\", RegisterOptions: textDocumentRegistrationOptions{DocumentSelector: ds}},\n\t\t{Id: \"gauge-runner-didChange\", Method: \"textDocument\/didChange\", RegisterOptions: textDocumentChangeRegistrationOptions{textDocumentRegistrationOptions: textDocumentRegistrationOptions{DocumentSelector: ds}, SyncKind: lsp.TDSKFull}},\n\t\t{Id: \"gauge-runner-codelens\", Method: \"textDocument\/codeLens\", RegisterOptions: codeLensRegistrationOptions{textDocumentRegistrationOptions: textDocumentRegistrationOptions{DocumentSelector: ds}, ResolveProvider: false}},\n\t}}, result)\n}\n\ntype lspWriter struct {\n}\n\nfunc (w lspWriter) Write(p []byte) (n int, err error) {\n\tlogger.LspLog.Debug(string(p))\n\treturn os.Stderr.Write(p)\n}\n\nfunc (s *server) Start(logLevel string) {\n\tlogger.APILog.Info(\"LangServer: reading on stdin, writing on stdout\")\n\tvar connOpt []jsonrpc2.ConnOpt\n\tif logLevel == \"debug\" {\n\t\tconnOpt = append(connOpt, jsonrpc2.LogMessages(log.New(lspWriter{}, \"\", 0)))\n\t}\n\tctx := context.Background()\n\tconn := jsonrpc2.NewConn(ctx, jsonrpc2.NewBufferedStream(stdRWC{}, jsonrpc2.VSCodeObjectCodec{}), newHandler(), connOpt...)\n\tlogger.SetCustomLogger(lspLogger{conn, ctx})\n\t<-conn.DisconnectNotify()\n\tlogger.APILog.Info(\"Connection closed\")\n}\n\ntype stdRWC struct{}\n\nfunc (stdRWC) Read(p []byte) (int, error) {\n\treturn os.Stdin.Read(p)\n}\n\nfunc (stdRWC) Write(p []byte) (int, error) {\n\treturn os.Stdout.Write(p)\n}\n\nfunc (stdRWC) Close() error {\n\tif err := os.Stdin.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn os.Stdout.Close()\n}\n\ntype lspLogger struct {\n\tconn *jsonrpc2.Conn\n\tctx context.Context\n}\n\nfunc (c lspLogger) Log(logLevel logging.Level, msg string) {\n\tlogger.APILog.Info(logLevel)\n\tvar level lsp.MessageType\n\tswitch logLevel {\n\tcase logging.DEBUG:\n\t\tlevel = lsp.Log\n\tcase logging.INFO:\n\t\tlevel = lsp.Info\n\tcase logging.WARNING:\n\t\tlevel = lsp.MTWarning\n\tcase logging.ERROR:\n\t\tlevel = lsp.MTError\n\tcase logging.CRITICAL:\n\t\tlevel = lsp.MTError\n\tdefault:\n\t\tlevel = lsp.Info\n\t}\n\tc.conn.Notify(c.ctx, \"window\/logMessage\", lsp.LogMessageParams{Type: level, Message: msg})\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Package for reading sequence from fasta file.\n * by Wei Shen (shenwei356@gmail.com)\n *\/\n\npackage seq\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/*\nFastaReader is a fasta file parser, which returns a function that\nreturns a pair of head and sequence when it was called.\n\nExample:\n\tNextSeq, err := seq.FastaReader(\"test.fa\")\n\tif err != nil {\n\t\trecover()\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tfor {\n\t\thead, seq, err := NextSeq()\n\t\tif err != nil {\n\t\t\t\/\/ fmt.Println(err)\n\t\t\tbreak\n\t\t}\n\n\t\tfmt.Printf(\">%s\\n%s\\n\", head, seq)\n\t}\n*\/\nfunc FastaReader(file string) (func() (string, string, error), error) {\n\tfh, err := os.Open(file)\n\tif err != nil {\n\t\trecover()\n\t\treturn nil, errors.New(\"Failed to open file: \" + file)\n\t}\n\n\treader := bufio.NewReader(fh)\n\tvar buffer bytes.Buffer\n\tvar lastHead string\n\tvar fileHandlerClosed bool = false\n\n\treturn func() (head, seq string, err error) {\n\t\tif fileHandlerClosed {\n\t\t\treturn \"\", \"\", io.EOF\n\t\t}\n\n\t\tvar str string\n\t\tfor {\n\t\t\tstr, err = reader.ReadString('\\n')\n\t\t\tif err == io.EOF {\n\t\t\t\tseq = buffer.String()\n\t\t\t\tbuffer.Reset()\n\t\t\t\thead = lastHead\n\t\t\t\tfh.Close()\n\t\t\t\tfileHandlerClosed = true\n\t\t\t\terr = nil\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif strings.HasPrefix(str, \">\") {\n\t\t\t\tthisHead := strings.TrimRight(str[1:], \"\\r?\\n\")\n\t\t\t\tif buffer.Len() > 0 { \/\/ no-first seq head\n\t\t\t\t\tseq = buffer.String()\n\t\t\t\t\tbuffer.Reset()\n\t\t\t\t\thead, lastHead = lastHead, thisHead\n\t\t\t\t\treturn\n\t\t\t\t} else { \/\/ first sequence head\n\t\t\t\t\tlastHead = thisHead\n\t\t\t\t}\n\t\t\t} else { \/\/ append sequence\n\t\t\t\tbuffer.WriteString(strings.TrimRight(str, \"\\r?\\n\"))\n\t\t\t}\n\t\t}\n\t}, err\n}\n<commit_msg>update a bug in parsing fastafile in fasta.go<commit_after>\/*\n * Package for reading sequence from fasta file.\n * by Wei Shen (shenwei356@gmail.com)\n *\/\n\npackage seq\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/*\nFastaReader is a fasta file parser, which returns a function that\nreturns a pair of head and sequence when it was called.\n\nExample:\n\tNextSeq, err := seq.FastaReader(\"test.fa\")\n\tif err != nil {\n\t\trecover()\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tfor {\n\t\thead, seq, err := NextSeq()\n\t\tif err != nil {\n\t\t\t\/\/ fmt.Println(err)\n\t\t\tbreak\n\t\t}\n\n\t\tfmt.Printf(\">%s\\n%s\\n\", head, seq)\n\t}\n*\/\nfunc FastaReader(file string) (func() (string, string, error), error) {\n\tfh, err := os.Open(file)\n\tif err != nil {\n\t\trecover()\n\t\treturn nil, errors.New(\"Failed to open file: \" + file)\n\t}\n\n\treader := bufio.NewReader(fh)\n\tvar buffer bytes.Buffer\n\tvar lastHead string\n\tvar fileHandlerClosed bool = false\n\n\treturn func() (head, seq string, err error) {\n\t\tif fileHandlerClosed {\n\t\t\treturn \"\", \"\", io.EOF\n\t\t}\n\n\t\tvar str string\n\t\tfor {\n\t\t\tstr, err = reader.ReadString('\\n')\n\t\t\tif err == io.EOF {\n\t\t\t\t\/\/ do not for get the last line,\n\t\t\t\t\/\/ even which does not ends with \"\\n\"\n\t\t\t\tbuffer.WriteString(strings.TrimRight(str, \"\\r?\\n\"))\n\n\t\t\t\tseq = buffer.String()\n\t\t\t\tbuffer.Reset()\n\t\t\t\thead = lastHead\n\t\t\t\tfh.Close()\n\t\t\t\tfileHandlerClosed = true\n\t\t\t\terr = nil\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif strings.HasPrefix(str, \">\") {\n\t\t\t\tthisHead := strings.TrimRight(str[1:], \"\\r?\\n\")\n\t\t\t\tif buffer.Len() > 0 { \/\/ no-first seq head\n\t\t\t\t\tseq = buffer.String()\n\t\t\t\t\tbuffer.Reset()\n\t\t\t\t\thead, lastHead = lastHead, thisHead\n\t\t\t\t\treturn\n\t\t\t\t} else { \/\/ first sequence head\n\t\t\t\t\tlastHead = thisHead\n\t\t\t\t}\n\t\t\t} else { \/\/ append sequence\n\t\t\t\tbuffer.WriteString(strings.TrimRight(str, \"\\r?\\n\"))\n\t\t\t}\n\t\t}\n\t}, err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package dyndns provides a tool for running a\n\/\/ dynamic dns updating service.\npackage dyndns\n\nimport (\n\t\"github.com\/mfycheng\/name-dyndns\/api\"\n\t\"github.com\/mfycheng\/name-dyndns\/log\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar wg sync.WaitGroup\n\nfunc updateDNSRecord(a api.API, domain, recordId string, newRecord api.DNSRecord) error {\n\tlog.Logger.Printf(\"Deleting DNS record for %s.%s.\\n\", newRecord.Name, domain)\n\terr := a.DeleteDNSRecord(domain, newRecord.RecordId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Logger.Printf(\"Creating DNS record for %s.%s: %s\\n\", newRecord.Name, domain, newRecord)\n\treturn a.CreateDNSRecord(domain, newRecord)\n}\n\nfunc runConfig(c api.Config, daemon bool) {\n\tdefer wg.Done()\n\n\ta := api.NewAPIFromConfig(c)\n\tfor {\n\t\tlog.Logger.Printf(\"Running update check for %s.\", c.Domain)\n\t\tip, err := GetExternalIP()\n\t\tif err != nil {\n\t\t\tlog.Logger.Print(\"Failed to retreive IP: \")\n\t\t\tif daemon {\n\t\t\t\tlog.Logger.Printf(\"Will retry in %d seconds...\\n\", c.Interval)\n\t\t\t\ttime.Sleep(time.Duration(c.Interval) * time.Second)\n\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tlog.Logger.Println(\"Giving up.\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t\/\/ GetRecords retrieves a list of DNSRecords,\n\t\t\/\/ 1 per hostname with the associated domain.\n\t\t\/\/ If the content is not the current IP, then\n\t\t\/\/ update it.\n\t\trecords, err := a.GetDNSRecords(c.Domain)\n\t\tif err != nil {\n\t\t\tlog.Logger.Printf(\"Failed to retreive records for %s:\\n\\t%s\\n\", c.Domain, err)\n\t\t\tif daemon {\n\t\t\t\tlog.Logger.Printf(\"Will retry in %d seconds...\\n\", c.Interval)\n\t\t\t\ttime.Sleep(time.Duration(c.Interval) * time.Second)\n\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tlog.Logger.Print(\"Giving up.\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tfor _, r := range records {\n\t\t\tif r.Content != ip {\n\t\t\t\tr.Content = ip\n\t\t\t\terr = updateDNSRecord(a, c.Domain, r.RecordId, r)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Logger.Printf(\"Failed to update record %s [%s.%s] with IP: %s\\n\\t%s\\n\", r.RecordId, r.Name, c.Domain, ip, err)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Logger.Printf(\"Attempting to update record %s [%s.%s] with IP: %s\\n\", r.RecordId, r.Name, c.Domain, ip)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tlog.Logger.Println(\"Update complete.\")\n\t\tif !daemon {\n\t\t\tlog.Logger.Println(\"Non daemon mode, stopping.\")\n\t\t\treturn\n\t\t}\n\t\tlog.Logger.Printf(\"Will update again in %d seconds.\\n\", c.Interval)\n\n\t\ttime.Sleep(time.Duration(c.Interval) * time.Second)\n\t}\n}\n\n\/\/ For each domain, check if the host record matches\n\/\/ the current external IP. If it does not, it updates.\n\/\/ If daemon is true, then Run will run forever, polling at\n\/\/ an interval specified in each config.\nfunc Run(configs []api.Config, daemon bool) {\n\tfor _, config := range configs {\n\t\twg.Add(1)\n\t\tgo runConfig(config, daemon)\n\t}\n\n\twg.Wait()\n}\n<commit_msg>nitpick on formatting consistency<commit_after>\/\/ Package dyndns provides a tool for running a\n\/\/ dynamic dns updating service.\npackage dyndns\n\nimport (\n\t\"github.com\/mfycheng\/name-dyndns\/api\"\n\t\"github.com\/mfycheng\/name-dyndns\/log\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar wg sync.WaitGroup\n\nfunc updateDNSRecord(a api.API, domain, recordId string, newRecord api.DNSRecord) error {\n\tlog.Logger.Printf(\"Deleting DNS record for %s.%s.\\n\", newRecord.Name, domain)\n\terr := a.DeleteDNSRecord(domain, newRecord.RecordId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Logger.Printf(\"Creating DNS record for %s.%s: %s\\n\", newRecord.Name, domain, newRecord)\n\treturn a.CreateDNSRecord(domain, newRecord)\n}\n\nfunc runConfig(c api.Config, daemon bool) {\n\tdefer wg.Done()\n\n\ta := api.NewAPIFromConfig(c)\n\tfor {\n\t\tlog.Logger.Printf(\"Running update check for %s.\", c.Domain)\n\t\tip, err := GetExternalIP()\n\t\tif err != nil {\n\t\t\tlog.Logger.Print(\"Failed to retreive IP: \")\n\t\t\tif daemon {\n\t\t\t\tlog.Logger.Printf(\"Will retry in %d seconds...\\n\", c.Interval)\n\t\t\t\ttime.Sleep(time.Duration(c.Interval) * time.Second)\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tlog.Logger.Println(\"Giving up.\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t\/\/ GetRecords retrieves a list of DNSRecords,\n\t\t\/\/ 1 per hostname with the associated domain.\n\t\t\/\/ If the content is not the current IP, then\n\t\t\/\/ update it.\n\t\trecords, err := a.GetDNSRecords(c.Domain)\n\t\tif err != nil {\n\t\t\tlog.Logger.Printf(\"Failed to retreive records for %s:\\n\\t%s\\n\", c.Domain, err)\n\t\t\tif daemon {\n\t\t\t\tlog.Logger.Printf(\"Will retry in %d seconds...\\n\", c.Interval)\n\t\t\t\ttime.Sleep(time.Duration(c.Interval) * time.Second)\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tlog.Logger.Print(\"Giving up.\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tfor _, r := range records {\n\t\t\tif r.Content != ip {\n\t\t\t\tr.Content = ip\n\t\t\t\terr = updateDNSRecord(a, c.Domain, r.RecordId, r)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Logger.Printf(\"Failed to update record %s [%s.%s] with IP: %s\\n\\t%s\\n\", r.RecordId, r.Name, c.Domain, ip, err)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Logger.Printf(\"Attempting to update record %s [%s.%s] with IP: %s\\n\", r.RecordId, r.Name, c.Domain, ip)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tlog.Logger.Println(\"Update complete.\")\n\t\tif !daemon {\n\t\t\tlog.Logger.Println(\"Non daemon mode, stopping.\")\n\t\t\treturn\n\t\t}\n\t\tlog.Logger.Printf(\"Will update again in %d seconds.\\n\", c.Interval)\n\n\t\ttime.Sleep(time.Duration(c.Interval) * time.Second)\n\t}\n}\n\n\/\/ For each domain, check if the host record matches\n\/\/ the current external IP. If it does not, it updates.\n\/\/ If daemon is true, then Run will run forever, polling at\n\/\/ an interval specified in each config.\nfunc Run(configs []api.Config, daemon bool) {\n\tfor _, config := range configs {\n\t\twg.Add(1)\n\t\tgo runConfig(config, daemon)\n\t}\n\n\twg.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>package command\n\nimport (\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/mitchellh\/cli\"\n\t\"github.com\/zclconf\/go-cty\/cty\"\n\n\t\"github.com\/hashicorp\/terraform\/addrs\"\n\t\"github.com\/hashicorp\/terraform\/configs\/configschema\"\n\t\"github.com\/hashicorp\/terraform\/states\"\n\t\"github.com\/hashicorp\/terraform\/states\/statefile\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nfunc TestApply_destroy(t *testing.T) {\n\toriginalState := states.BuildState(func(s *states.SyncState) {\n\t\ts.SetResourceInstanceCurrent(\n\t\t\taddrs.Resource{\n\t\t\t\tMode: addrs.ManagedResourceMode,\n\t\t\t\tType: \"test_instance\",\n\t\t\t\tName: \"foo\",\n\t\t\t}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),\n\t\t\t&states.ResourceInstanceObjectSrc{\n\t\t\t\tAttrsJSON: []byte(`{\"id\":\"bar\"}`),\n\t\t\t\tStatus: states.ObjectReady,\n\t\t\t},\n\t\t\taddrs.ProviderConfig{Type: \"test\"}.Absolute(addrs.RootModuleInstance),\n\t\t)\n\t})\n\tstatePath := testStateFile(t, originalState)\n\n\tp := testProvider()\n\tp.GetSchemaReturn = &terraform.ProviderSchema{\n\t\tResourceTypes: map[string]*configschema.Block{\n\t\t\t\"test_instance\": {\n\t\t\t\tAttributes: map[string]*configschema.Attribute{\n\t\t\t\t\t\"id\": {Type: cty.String, Computed: true},\n\t\t\t\t\t\"ami\": {Type: cty.String, Optional: true},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tui := new(cli.MockUi)\n\tc := &ApplyCommand{\n\t\tDestroy: true,\n\t\tMeta: Meta{\n\t\t\ttestingOverrides: metaOverridesForProvider(p),\n\t\t\tUi: ui,\n\t\t},\n\t}\n\n\t\/\/ Run the apply command pointing to our existing state\n\targs := []string{\n\t\t\"-auto-approve\",\n\t\t\"-state\", statePath,\n\t\ttestFixturePath(\"apply\"),\n\t}\n\tif code := c.Run(args); code != 0 {\n\t\tt.Log(ui.OutputWriter.String())\n\t\tt.Fatalf(\"bad: %d\\n\\n%s\", code, ui.ErrorWriter.String())\n\t}\n\n\t\/\/ Verify a new state exists\n\tif _, err := os.Stat(statePath); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tf, err := os.Open(statePath)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\tdefer f.Close()\n\n\tstateFile, err := statefile.Read(f)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\tif stateFile.State == nil {\n\t\tt.Fatal(\"state should not be nil\")\n\t}\n\n\tactualStr := strings.TrimSpace(stateFile.State.String())\n\texpectedStr := strings.TrimSpace(testApplyDestroyStr)\n\tif actualStr != expectedStr {\n\t\tt.Fatalf(\"bad:\\n\\n%s\\n\\n%s\", actualStr, expectedStr)\n\t}\n\n\t\/\/ Should have a backup file\n\tf, err = os.Open(statePath + DefaultBackupExtension)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tbackupStateFile, err := statefile.Read(f)\n\tf.Close()\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tactualStr = strings.TrimSpace(backupStateFile.State.String())\n\texpectedStr = strings.TrimSpace(originalState.String())\n\tif actualStr != expectedStr {\n\t\tt.Fatalf(\"bad:\\n\\n%s\\n\\n%s\", actualStr, expectedStr)\n\t}\n}\n\nfunc TestApply_destroyLockedState(t *testing.T) {\n\toriginalState := states.BuildState(func(s *states.SyncState) {\n\t\ts.SetResourceInstanceCurrent(\n\t\t\taddrs.Resource{\n\t\t\t\tMode: addrs.ManagedResourceMode,\n\t\t\t\tType: \"test_instance\",\n\t\t\t\tName: \"foo\",\n\t\t\t}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),\n\t\t\t&states.ResourceInstanceObjectSrc{\n\t\t\t\tAttrsJSON: []byte(`{\"id\":\"bar\"}`),\n\t\t\t\tStatus: states.ObjectReady,\n\t\t\t},\n\t\t\taddrs.ProviderConfig{Type: \"test\"}.Absolute(addrs.RootModuleInstance),\n\t\t)\n\t})\n\tstatePath := testStateFile(t, originalState)\n\n\tunlock, err := testLockState(\".\/testdata\", statePath)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer unlock()\n\n\tp := testProvider()\n\tui := new(cli.MockUi)\n\tc := &ApplyCommand{\n\t\tDestroy: true,\n\t\tMeta: Meta{\n\t\t\ttestingOverrides: metaOverridesForProvider(p),\n\t\t\tUi: ui,\n\t\t},\n\t}\n\n\t\/\/ Run the apply command pointing to our existing state\n\targs := []string{\n\t\t\"-auto-approve\",\n\t\t\"-state\", statePath,\n\t\ttestFixturePath(\"apply\"),\n\t}\n\n\tif code := c.Run(args); code == 0 {\n\t\tt.Fatal(\"expected error\")\n\t}\n\n\toutput := ui.ErrorWriter.String()\n\tif !strings.Contains(output, \"lock\") {\n\t\tt.Fatal(\"command output does not look like a lock error:\", output)\n\t}\n}\n\nfunc TestApply_destroyPlan(t *testing.T) {\n\tplanPath := testPlanFileNoop(t)\n\n\tp := testProvider()\n\tui := new(cli.MockUi)\n\tc := &ApplyCommand{\n\t\tDestroy: true,\n\t\tMeta: Meta{\n\t\t\ttestingOverrides: metaOverridesForProvider(p),\n\t\t\tUi: ui,\n\t\t},\n\t}\n\n\t\/\/ Run the apply command pointing to our existing state\n\targs := []string{\n\t\tplanPath,\n\t}\n\tif code := c.Run(args); code != 1 {\n\t\tt.Fatalf(\"bad: %d\\n\\n%s\", code, ui.ErrorWriter.String())\n\t}\n}\n\nfunc TestApply_destroyTargeted(t *testing.T) {\n\toriginalState := states.BuildState(func(s *states.SyncState) {\n\t\ts.SetResourceInstanceCurrent(\n\t\t\taddrs.Resource{\n\t\t\t\tMode: addrs.ManagedResourceMode,\n\t\t\t\tType: \"test_instance\",\n\t\t\t\tName: \"foo\",\n\t\t\t}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),\n\t\t\t&states.ResourceInstanceObjectSrc{\n\t\t\t\tAttrsJSON: []byte(`{\"id\":\"i-ab123\"}`),\n\t\t\t\tStatus: states.ObjectReady,\n\t\t\t},\n\t\t\taddrs.ProviderConfig{Type: \"test\"}.Absolute(addrs.RootModuleInstance),\n\t\t)\n\t\ts.SetResourceInstanceCurrent(\n\t\t\taddrs.Resource{\n\t\t\t\tMode: addrs.ManagedResourceMode,\n\t\t\t\tType: \"test_load_balancer\",\n\t\t\t\tName: \"foo\",\n\t\t\t}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),\n\t\t\t&states.ResourceInstanceObjectSrc{\n\t\t\t\tAttrsJSON: []byte(`{\"id\":\"i-abc123\"}`),\n\t\t\t\tStatus: states.ObjectReady,\n\t\t\t},\n\t\t\taddrs.ProviderConfig{Type: \"test\"}.Absolute(addrs.RootModuleInstance),\n\t\t)\n\t})\n\tstatePath := testStateFile(t, originalState)\n\n\tp := testProvider()\n\tp.GetSchemaReturn = &terraform.ProviderSchema{\n\t\tResourceTypes: map[string]*configschema.Block{\n\t\t\t\"test_instance\": {\n\t\t\t\tAttributes: map[string]*configschema.Attribute{\n\t\t\t\t\t\"id\": {Type: cty.String, Computed: true},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"test_load_balancer\": {\n\t\t\t\tAttributes: map[string]*configschema.Attribute{\n\t\t\t\t\t\"instances\": {Type: cty.List(cty.String), Optional: true},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tui := new(cli.MockUi)\n\tc := &ApplyCommand{\n\t\tDestroy: true,\n\t\tMeta: Meta{\n\t\t\ttestingOverrides: metaOverridesForProvider(p),\n\t\t\tUi: ui,\n\t\t},\n\t}\n\n\t\/\/ Run the apply command pointing to our existing state\n\targs := []string{\n\t\t\"-auto-approve\",\n\t\t\"-target\", \"test_instance.foo\",\n\t\t\"-state\", statePath,\n\t\ttestFixturePath(\"apply-destroy-targeted\"),\n\t}\n\tif code := c.Run(args); code != 0 {\n\t\tt.Fatalf(\"bad: %d\\n\\n%s\", code, ui.ErrorWriter.String())\n\t}\n\n\t\/\/ Verify a new state exists\n\tif _, err := os.Stat(statePath); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tf, err := os.Open(statePath)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\tdefer f.Close()\n\n\tstate, err := terraform.ReadState(f)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\tif state == nil {\n\t\tt.Fatal(\"state should not be nil\")\n\t}\n\n\tactualStr := strings.TrimSpace(state.String())\n\texpectedStr := strings.TrimSpace(testApplyDestroyStr)\n\tif actualStr != expectedStr {\n\t\tt.Fatalf(\"bad:\\n\\n%s\\n\\nexpected:\\n\\n%s\", actualStr, expectedStr)\n\t}\n\n\t\/\/ Should have a backup file\n\tf, err = os.Open(statePath + DefaultBackupExtension)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tbackupState, err := terraform.ReadState(f)\n\tf.Close()\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tactualStr = strings.TrimSpace(backupState.String())\n\texpectedStr = strings.TrimSpace(originalState.String())\n\tif actualStr != expectedStr {\n\t\tt.Fatalf(\"bad:\\n\\nactual:\\n%s\\n\\nexpected:\\nb%s\", actualStr, expectedStr)\n\t}\n}\n\nconst testApplyDestroyStr = `\n<no state>\n`\n<commit_msg>command: Fix TestApply_destroyTargeted for new provider and state types<commit_after>package command\n\nimport (\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/mitchellh\/cli\"\n\t\"github.com\/zclconf\/go-cty\/cty\"\n\n\t\"github.com\/hashicorp\/terraform\/addrs\"\n\t\"github.com\/hashicorp\/terraform\/configs\/configschema\"\n\t\"github.com\/hashicorp\/terraform\/providers\"\n\t\"github.com\/hashicorp\/terraform\/states\"\n\t\"github.com\/hashicorp\/terraform\/states\/statefile\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nfunc TestApply_destroy(t *testing.T) {\n\toriginalState := states.BuildState(func(s *states.SyncState) {\n\t\ts.SetResourceInstanceCurrent(\n\t\t\taddrs.Resource{\n\t\t\t\tMode: addrs.ManagedResourceMode,\n\t\t\t\tType: \"test_instance\",\n\t\t\t\tName: \"foo\",\n\t\t\t}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),\n\t\t\t&states.ResourceInstanceObjectSrc{\n\t\t\t\tAttrsJSON: []byte(`{\"id\":\"bar\"}`),\n\t\t\t\tStatus: states.ObjectReady,\n\t\t\t},\n\t\t\taddrs.ProviderConfig{Type: \"test\"}.Absolute(addrs.RootModuleInstance),\n\t\t)\n\t})\n\tstatePath := testStateFile(t, originalState)\n\n\tp := testProvider()\n\tp.GetSchemaReturn = &terraform.ProviderSchema{\n\t\tResourceTypes: map[string]*configschema.Block{\n\t\t\t\"test_instance\": {\n\t\t\t\tAttributes: map[string]*configschema.Attribute{\n\t\t\t\t\t\"id\": {Type: cty.String, Computed: true},\n\t\t\t\t\t\"ami\": {Type: cty.String, Optional: true},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tui := new(cli.MockUi)\n\tc := &ApplyCommand{\n\t\tDestroy: true,\n\t\tMeta: Meta{\n\t\t\ttestingOverrides: metaOverridesForProvider(p),\n\t\t\tUi: ui,\n\t\t},\n\t}\n\n\t\/\/ Run the apply command pointing to our existing state\n\targs := []string{\n\t\t\"-auto-approve\",\n\t\t\"-state\", statePath,\n\t\ttestFixturePath(\"apply\"),\n\t}\n\tif code := c.Run(args); code != 0 {\n\t\tt.Log(ui.OutputWriter.String())\n\t\tt.Fatalf(\"bad: %d\\n\\n%s\", code, ui.ErrorWriter.String())\n\t}\n\n\t\/\/ Verify a new state exists\n\tif _, err := os.Stat(statePath); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tf, err := os.Open(statePath)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\tdefer f.Close()\n\n\tstateFile, err := statefile.Read(f)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\tif stateFile.State == nil {\n\t\tt.Fatal(\"state should not be nil\")\n\t}\n\n\tactualStr := strings.TrimSpace(stateFile.State.String())\n\texpectedStr := strings.TrimSpace(testApplyDestroyStr)\n\tif actualStr != expectedStr {\n\t\tt.Fatalf(\"bad:\\n\\n%s\\n\\n%s\", actualStr, expectedStr)\n\t}\n\n\t\/\/ Should have a backup file\n\tf, err = os.Open(statePath + DefaultBackupExtension)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tbackupStateFile, err := statefile.Read(f)\n\tf.Close()\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tactualStr = strings.TrimSpace(backupStateFile.State.String())\n\texpectedStr = strings.TrimSpace(originalState.String())\n\tif actualStr != expectedStr {\n\t\tt.Fatalf(\"bad:\\n\\n%s\\n\\n%s\", actualStr, expectedStr)\n\t}\n}\n\nfunc TestApply_destroyLockedState(t *testing.T) {\n\toriginalState := states.BuildState(func(s *states.SyncState) {\n\t\ts.SetResourceInstanceCurrent(\n\t\t\taddrs.Resource{\n\t\t\t\tMode: addrs.ManagedResourceMode,\n\t\t\t\tType: \"test_instance\",\n\t\t\t\tName: \"foo\",\n\t\t\t}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),\n\t\t\t&states.ResourceInstanceObjectSrc{\n\t\t\t\tAttrsJSON: []byte(`{\"id\":\"bar\"}`),\n\t\t\t\tStatus: states.ObjectReady,\n\t\t\t},\n\t\t\taddrs.ProviderConfig{Type: \"test\"}.Absolute(addrs.RootModuleInstance),\n\t\t)\n\t})\n\tstatePath := testStateFile(t, originalState)\n\n\tunlock, err := testLockState(\".\/testdata\", statePath)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer unlock()\n\n\tp := testProvider()\n\tui := new(cli.MockUi)\n\tc := &ApplyCommand{\n\t\tDestroy: true,\n\t\tMeta: Meta{\n\t\t\ttestingOverrides: metaOverridesForProvider(p),\n\t\t\tUi: ui,\n\t\t},\n\t}\n\n\t\/\/ Run the apply command pointing to our existing state\n\targs := []string{\n\t\t\"-auto-approve\",\n\t\t\"-state\", statePath,\n\t\ttestFixturePath(\"apply\"),\n\t}\n\n\tif code := c.Run(args); code == 0 {\n\t\tt.Fatal(\"expected error\")\n\t}\n\n\toutput := ui.ErrorWriter.String()\n\tif !strings.Contains(output, \"lock\") {\n\t\tt.Fatal(\"command output does not look like a lock error:\", output)\n\t}\n}\n\nfunc TestApply_destroyPlan(t *testing.T) {\n\tplanPath := testPlanFileNoop(t)\n\n\tp := testProvider()\n\tui := new(cli.MockUi)\n\tc := &ApplyCommand{\n\t\tDestroy: true,\n\t\tMeta: Meta{\n\t\t\ttestingOverrides: metaOverridesForProvider(p),\n\t\t\tUi: ui,\n\t\t},\n\t}\n\n\t\/\/ Run the apply command pointing to our existing state\n\targs := []string{\n\t\tplanPath,\n\t}\n\tif code := c.Run(args); code != 1 {\n\t\tt.Fatalf(\"bad: %d\\n\\n%s\", code, ui.ErrorWriter.String())\n\t}\n}\n\nfunc TestApply_destroyTargeted(t *testing.T) {\n\toriginalState := states.BuildState(func(s *states.SyncState) {\n\t\ts.SetResourceInstanceCurrent(\n\t\t\taddrs.Resource{\n\t\t\t\tMode: addrs.ManagedResourceMode,\n\t\t\t\tType: \"test_instance\",\n\t\t\t\tName: \"foo\",\n\t\t\t}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),\n\t\t\t&states.ResourceInstanceObjectSrc{\n\t\t\t\tAttrsJSON: []byte(`{\"id\":\"i-ab123\"}`),\n\t\t\t\tStatus: states.ObjectReady,\n\t\t\t},\n\t\t\taddrs.ProviderConfig{Type: \"test\"}.Absolute(addrs.RootModuleInstance),\n\t\t)\n\t\ts.SetResourceInstanceCurrent(\n\t\t\taddrs.Resource{\n\t\t\t\tMode: addrs.ManagedResourceMode,\n\t\t\t\tType: \"test_load_balancer\",\n\t\t\t\tName: \"foo\",\n\t\t\t}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),\n\t\t\t&states.ResourceInstanceObjectSrc{\n\t\t\t\tAttrsJSON: []byte(`{\"id\":\"i-abc123\"}`),\n\t\t\t\tStatus: states.ObjectReady,\n\t\t\t},\n\t\t\taddrs.ProviderConfig{Type: \"test\"}.Absolute(addrs.RootModuleInstance),\n\t\t)\n\t})\n\tstatePath := testStateFile(t, originalState)\n\n\tp := testProvider()\n\tp.GetSchemaReturn = &terraform.ProviderSchema{\n\t\tResourceTypes: map[string]*configschema.Block{\n\t\t\t\"test_instance\": {\n\t\t\t\tAttributes: map[string]*configschema.Attribute{\n\t\t\t\t\t\"id\": {Type: cty.String, Computed: true},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"test_load_balancer\": {\n\t\t\t\tAttributes: map[string]*configschema.Attribute{\n\t\t\t\t\t\"id\": {Type: cty.String, Computed: true},\n\t\t\t\t\t\"instances\": {Type: cty.List(cty.String), Optional: true},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tp.PlanResourceChangeFn = func (req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {\n\t\treturn providers.PlanResourceChangeResponse{\n\t\t\tPlannedState: req.ProposedNewState,\n\t\t}\n\t}\n\n\tui := new(cli.MockUi)\n\tc := &ApplyCommand{\n\t\tDestroy: true,\n\t\tMeta: Meta{\n\t\t\ttestingOverrides: metaOverridesForProvider(p),\n\t\t\tUi: ui,\n\t\t},\n\t}\n\n\t\/\/ Run the apply command pointing to our existing state\n\targs := []string{\n\t\t\"-auto-approve\",\n\t\t\"-target\", \"test_instance.foo\",\n\t\t\"-state\", statePath,\n\t\ttestFixturePath(\"apply-destroy-targeted\"),\n\t}\n\tif code := c.Run(args); code != 0 {\n\t\tt.Fatalf(\"bad: %d\\n\\n%s\", code, ui.ErrorWriter.String())\n\t}\n\n\t\/\/ Verify a new state exists\n\tif _, err := os.Stat(statePath); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tf, err := os.Open(statePath)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\tdefer f.Close()\n\n\tstateFile, err := statefile.Read(f)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\tif stateFile == nil || stateFile.State == nil {\n\t\tt.Fatal(\"state should not be nil\")\n\t}\n\n\tspew.Config.DisableMethods = true\n\tif !stateFile.State.Empty() {\n\t\tt.Fatalf(\"unexpected final state\\ngot: %s\\nwant: empty state\", spew.Sdump(stateFile.State))\n\t}\n\n\t\/\/ Should have a backup file\n\tf, err = os.Open(statePath + DefaultBackupExtension)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tbackupStateFile, err := statefile.Read(f)\n\tf.Close()\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tactualStr := strings.TrimSpace(backupStateFile.State.String())\n\texpectedStr := strings.TrimSpace(originalState.String())\n\tif actualStr != expectedStr {\n\t\tt.Fatalf(\"bad:\\n\\nactual:\\n%s\\n\\nexpected:\\nb%s\", actualStr, expectedStr)\n\t}\n}\n\nconst testApplyDestroyStr = `\n<no state>\n`\n<|endoftext|>"} {"text":"<commit_before>package users\n\nimport (\n\t\"sort\"\n\n\t\"github.com\/aerogo\/aero\"\n\t\"github.com\/animenotifier\/arn\"\n\t\"github.com\/animenotifier\/notify.moe\/components\"\n)\n\n\/\/ Active ...\nfunc Active(ctx *aero.Context) string {\n\tusers := arn.FilterUsers(func(user *arn.User) bool {\n\t\treturn user.IsActive() && user.HasAvatar()\n\t})\n\n\tsort.Slice(users, func(i, j int) bool {\n\t\treturn len(users[i].AnimeList().Watching().Items) > len(users[j].AnimeList().Watching().Items)\n\t})\n\n\t\/\/ arn.SortUsersLastSeen(users)\n\n\treturn ctx.HTML(components.Users(users))\n}\n\n\/\/ Osu ...\nfunc Osu(ctx *aero.Context) string {\n\tusers := arn.FilterUsers(func(user *arn.User) bool {\n\t\treturn user.IsActive() && user.HasAvatar() && user.Accounts.Osu.PP > 0\n\t})\n\n\t\/\/ Sort by pp\n\tsort.Slice(users, func(i, j int) bool {\n\t\treturn users[i].Accounts.Osu.PP > users[j].Accounts.Osu.PP\n\t})\n\n\tif len(users) > 50 {\n\t\tusers = users[:50]\n\t}\n\n\treturn ctx.HTML(components.OsuRankingList(users))\n}\n\n\/\/ Staff ...\nfunc Staff(ctx *aero.Context) string {\n\tusers := arn.FilterUsers(func(user *arn.User) bool {\n\t\treturn user.IsActive() && user.HasAvatar() && user.Role != \"\"\n\t})\n\n\tsort.Slice(users, func(i, j int) bool {\n\t\tif users[i].Role == \"\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif users[j].Role == \"\" {\n\t\t\treturn true\n\t\t}\n\n\t\treturn users[i].Role == \"admin\"\n\t})\n\n\treturn ctx.HTML(components.Users(users))\n}\n<commit_msg>Sort by followers and make it deterministic<commit_after>package users\n\nimport (\n\t\"sort\"\n\n\t\"github.com\/aerogo\/aero\"\n\t\"github.com\/animenotifier\/arn\"\n\t\"github.com\/animenotifier\/notify.moe\/components\"\n)\n\n\/\/ Active ...\nfunc Active(ctx *aero.Context) string {\n\tusers := arn.FilterUsers(func(user *arn.User) bool {\n\t\treturn user.IsActive() && user.HasAvatar()\n\t})\n\n\tsort.Slice(users, func(i, j int) bool {\n\t\tfollowersA := users[i].FollowersCount()\n\t\tfollowersB := users[j].FollowersCount()\n\n\t\tif followersA == followersB {\n\t\t\treturn users[i].Nick > users[j].Nick\n\t\t}\n\n\t\treturn followersA > followersB\n\t})\n\n\t\/\/ arn.SortUsersLastSeen(users)\n\n\treturn ctx.HTML(components.Users(users))\n}\n\n\/\/ Osu ...\nfunc Osu(ctx *aero.Context) string {\n\tusers := arn.FilterUsers(func(user *arn.User) bool {\n\t\treturn user.IsActive() && user.HasAvatar() && user.Accounts.Osu.PP > 0\n\t})\n\n\t\/\/ Sort by pp\n\tsort.Slice(users, func(i, j int) bool {\n\t\treturn users[i].Accounts.Osu.PP > users[j].Accounts.Osu.PP\n\t})\n\n\tif len(users) > 50 {\n\t\tusers = users[:50]\n\t}\n\n\treturn ctx.HTML(components.OsuRankingList(users))\n}\n\n\/\/ Staff ...\nfunc Staff(ctx *aero.Context) string {\n\tusers := arn.FilterUsers(func(user *arn.User) bool {\n\t\treturn user.IsActive() && user.HasAvatar() && user.Role != \"\"\n\t})\n\n\tsort.Slice(users, func(i, j int) bool {\n\t\tif users[i].Role == \"\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif users[j].Role == \"\" {\n\t\t\treturn true\n\t\t}\n\n\t\treturn users[i].Role == \"admin\"\n\t})\n\n\treturn ctx.HTML(components.Users(users))\n}\n<|endoftext|>"} {"text":"<commit_before>package git\n\n\/\/ everything git and github related\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst GIT_BASE_DIR = \"repo\"\n\n\/\/ Invoke a `command` in `workdir` with `args`, connecting up its Stdout and Stderr\nfunc Command(workdir, command string, args ...string) *exec.Cmd {\n\t\/\/ log.Printf(\"wd = %s cmd = %s, args = %q\", workdir, command, append([]string{}, args...))\n\n\tcmd := exec.Command(command, args...)\n\tcmd.Dir = workdir\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd\n}\n\nvar (\n\tErrEmptyRepoName = errors.New(\"Empty repository name\")\n\tErrEmptyRepoOrganization = errors.New(\"Empty repository organization\")\n\tErrUserNotAllowed = errors.New(\"User not in the allowed set\")\n)\n\ntype Repository struct {\n\tName string `json:\"name\"`\n\tUrl string `json:\"url\"`\n\tOrganization string `json:\"organization\"`\n}\n\ntype Pusher struct {\n\tName string `json:\"name\"`\n}\n\ntype NonGithub struct {\n\tNoBuild bool `json:\"nobuild\"`\n\tWait bool `json:\"wait\"`\n}\n\ntype JustNongithub struct {\n\tNonGithub NonGithub `json:\"nongithub\"`\n}\n\nfunc ParseJustNongithub(in []byte) (j JustNongithub, err error) {\n\terr = json.Unmarshal(in, &j)\n\treturn\n}\n\ntype PushEvent struct {\n\tRef string `json:\"ref\"`\n\tDeleted bool `json:\"deleted\"`\n\tRepository Repository `json:\"repository\"`\n\tAfter string `json:\"after\"`\n\tPusher Pusher `json:\"pusher\"`\n\tNonGithub NonGithub `json:\"nongithub\"`\n\tHtmlUrl string `json:\"html_url\"`\n}\n\ntype GithubStatus struct {\n\tState string `json:\"state\"`\n\tTargetUrl string `json:\"target_url\"`\n\tDescription string `json:\"description\"`\n}\n\nvar ErrSkipGithubEndpoint = errors.New(\"Github endpoint skipped\")\n\n\/\/ Creates or updates a mirror of `url` at `gitDir` using `git clone --mirror`\nfunc gitLocalMirror(\n\turl, gitDir, ref string,\n\tmessages io.Writer,\n) (err error) {\n\n\t\/\/ When mirroring, allow up to two minutes before giving up.\n\tconst MirrorTimeout = 2 * time.Minute\n\tctx, done := context.WithTimeout(context.Background(), MirrorTimeout)\n\tdefer done()\n\n\tif _, err := os.Stat(gitDir); err == nil {\n\t\t\/\/ Repo already exists, don't need to clone it.\n\n\t\tif gitAlreadyHaveRef(gitDir, ref) {\n\t\t\t\/\/ Sha already exists, don't need to fetch.\n\t\t\t\/\/ log.Printf(\"Already have ref: %v %v\", gitDir, ref)\n\t\t\treturn nil\n\t\t}\n\n\t\treturn gitFetch(ctx, gitDir, url, messages)\n\t}\n\n\terr = os.MkdirAll(gitDir, 0777)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"gitLocalMirror: %v\", err)\n\t}\n\n\treturn gitClone(ctx, url, gitDir, messages)\n}\n\nfunc gitClone(\n\tctx context.Context,\n\turl, gitDir string,\n\tmessages io.Writer,\n) error {\n\tcmd := Command(\".\", \"git\", \"clone\", \"-q\", \"--mirror\", url, gitDir)\n\tcmd.Stdout = messages\n\tcmd.Stderr = messages\n\treturn ContextRun(ctx, cmd)\n}\n\n\/\/ Run cmd within a net Context.\n\/\/ If the context is cancelled or times out, the process is killed.\nfunc ContextRun(ctx context.Context, cmd *exec.Cmd) error {\n\terrc := make(chan error)\n\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo func() { errc <- cmd.Wait() }()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tcmd.Process.Kill()\n\t\treturn ctx.Err()\n\tcase err := <-errc:\n\t\treturn err \/\/ err may be nil\n\t}\n\treturn nil\n}\n\nfunc gitFetch(\n\tctx context.Context,\n\tgitDir, url string,\n\tmessages io.Writer,\n) (err error) {\n\n\tcmd := Command(gitDir, \"git\", \"fetch\", \"-f\", url, \"*:*\")\n\tcmd.Stdout = messages\n\tcmd.Stderr = messages\n\n\terr = ContextRun(ctx, cmd)\n\tif err != nil {\n\t\t\/\/ git fetch where there is no update is exit status 1.\n\t\tif err.Error() != \"exit status 1\" {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nvar ShaLike = regexp.MustCompile(\"[0-9a-zA-Z]{40}\")\n\n\/\/ Returns true if ref is sha-like and is in the object database.\n\/\/ The \"sha-like\" condition ensures that refs like `master` are always\n\/\/ freshened.\nfunc gitAlreadyHaveRef(gitDir, sha string) bool {\n\tif !ShaLike.MatchString(sha) {\n\t\treturn false\n\t}\n\tcmd := Command(gitDir, \"git\", \"cat-file\", \"-t\", sha)\n\tcmd.Stdout = nil\n\tcmd.Stderr = nil\n\n\terr := cmd.Run()\n\treturn err == nil\n}\n\nfunc gitHaveFile(gitDir, ref, path string) (ok bool, err error) {\n\tcmd := Command(gitDir, \"git\", \"show\", fmt.Sprintf(\"%s:%s\", ref, path))\n\tcmd.Stdout = nil \/\/ don't want to see the contents\n\terr = cmd.Run()\n\tok = true\n\tif err != nil {\n\t\tok = false\n\t\tif err.Error() == \"exit status 128\" {\n\t\t\t\/\/ This happens if the file doesn't exist.\n\t\t\terr = nil\n\t\t}\n\t}\n\treturn ok, err\n}\n\nfunc gitRevParse(gitDir, ref string) (sha string, err error) {\n\tcmd := Command(gitDir, \"git\", \"rev-parse\", ref)\n\tcmd.Stdout = nil \/\/ for cmd.Output\n\n\tvar stdout []byte\n\tstdout, err = cmd.Output()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tsha = strings.TrimSpace(string(stdout))\n\treturn\n}\n\nfunc gitDescribe(gitDir, ref string) (desc string, err error) {\n\tcmd := Command(gitDir, \"git\", \"describe\", \"--all\", \"--tags\", \"--long\", ref)\n\tcmd.Stdout = nil \/\/ for cmd.Output\n\n\tvar stdout []byte\n\tstdout, err = cmd.Output()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdesc = strings.TrimSpace(string(stdout))\n\tdesc = strings.TrimPrefix(desc, \"heads\/\")\n\treturn\n}\n\nfunc gitCheckout(gitDir, checkoutDir, ref string) error {\n\n\terr := os.MkdirAll(checkoutDir, 0777)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs := []string{\"--work-tree\", checkoutDir, \"checkout\", ref, \"--\", \".\"}\n\terr = Command(gitDir, \"git\", args...).Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Set mtimes to time file is most recently affected by a commit.\n\t\/\/ This is annoying but unfortunately git sets the timestamps to now,\n\t\/\/ and docker depends on the mtime for cache invalidation.\n\terr = GitSetMTimes(gitDir, checkoutDir, ref)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc GitSetMTimes(gitDir, checkoutDir, ref string) error {\n\n\tcommitTimes, err := GitCommitTimes(gitDir, ref)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlsFiles := Command(gitDir, \"git\", \"ls-tree\", \"-r\", \"--name-only\", \"-z\", ref)\n\tlsFiles.Stdout = nil\n\tout, err := lsFiles.Output()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"git ls-files: %v\", err)\n\t}\n\n\tdirMTimes := map[string]time.Time{}\n\n\tfiles := strings.Split(strings.TrimRight(string(out), \"\\x00\"), \"\\x00\")\n\tfor _, file := range files {\n\t\tmTime, ok := commitTimes[file]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"failed to find file in history: %q\", file)\n\t\t}\n\n\t\t\/\/ Loop over each directory in the path to `file`, updating `dirMTimes`\n\t\t\/\/ to take the most recent time seen.\n\t\tdir := filepath.Dir(file)\n\t\tfor {\n\t\t\tif other, ok := dirMTimes[dir]; ok {\n\t\t\t\tif mTime.After(other) {\n\t\t\t\t\t\/\/ file mTime is more recent than previous seen for 'dir'\n\t\t\t\t\tdirMTimes[dir] = mTime\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t\/\/ first occurrence of dir\n\t\t\t\tdirMTimes[dir] = mTime\n\t\t\t}\n\n\t\t\t\/\/ Remove one directory from the path until it isn't changed anymore\n\t\t\tif dir == filepath.Dir(dir) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdir = filepath.Dir(dir)\n\t\t}\n\n\t\terr = os.Chtimes(filepath.Join(checkoutDir, file), mTime, mTime)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"chtimes: %v\", err)\n\t\t}\n\n\t\t\/\/ fmt.Printf(\"%s: %s\\n\", file, mTime)\n\t}\n\n\tfor dir, mTime := range dirMTimes {\n\t\terr = os.Chtimes(filepath.Join(checkoutDir, dir), mTime, mTime)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"chtimes: %v\", err)\n\t\t}\n\t\t\/\/ fmt.Printf(\"%s: %s\\n\", dir, mTime)\n\t}\n\treturn nil\n}\n\ntype BuildDirectory struct {\n\tName, Dir string\n\tCleanup func()\n}\n\nfunc PrepBuildDirectory(\n\tgitDir, remote, ref string,\n) (*BuildDirectory, error) {\n\n\tstart := time.Now()\n\tdefer func() {\n\t\tlog.Printf(\"Took %v to prep %v\", time.Since(start), remote)\n\t}()\n\n\tif strings.HasPrefix(remote, \"github.com\/\") {\n\t\tremote = \"https:\/\/\" + remote\n\t}\n\n\tgitDir, err := filepath.Abs(gitDir)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to determine abspath: %v\", err)\n\t}\n\n\terr = gitLocalMirror(remote, gitDir, ref, os.Stderr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to gitLocalMirror: %v\", err)\n\t}\n\n\trev, err := gitRevParse(gitDir, ref)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to parse rev: %v\", err)\n\t}\n\n\ttagName, err := gitDescribe(gitDir, rev)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to describe %v: %v\", rev, err)\n\t}\n\n\tshortRev := rev[:10]\n\tcheckoutPath := path.Join(gitDir, filepath.Join(\"c\/\", shortRev))\n\n\terr = recursiveCheckout(gitDir, checkoutPath, rev)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcleanup := func() {\n\t\terr := SafeCleanup(checkoutPath)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error cleaning up path: \", checkoutPath)\n\t\t}\n\t}\n\n\treturn &BuildDirectory{tagName, checkoutPath, cleanup}, nil\n}\n\nfunc recursiveCheckout(gitDir, checkoutPath, rev string) error {\n\terr := gitCheckout(gitDir, checkoutPath, rev)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to checkout: %v\", err)\n\t}\n\n\terr = PrepSubmodules(gitDir, checkoutPath, rev)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to prep submodules: %v\", err)\n\t}\n\treturn nil\n}\n\nfunc SafeCleanup(path string) error {\n\tif path == \"\/\" || path == \"\" || path == \".\" || strings.Contains(path, \"..\") {\n\t\treturn fmt.Errorf(\"invalid path specified for deletion %q\", path)\n\t}\n\treturn os.RemoveAll(path)\n}\n<commit_msg>Create only the parent directory of gitDir<commit_after>package git\n\n\/\/ everything git and github related\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst GIT_BASE_DIR = \"repo\"\n\n\/\/ Invoke a `command` in `workdir` with `args`, connecting up its Stdout and Stderr\nfunc Command(workdir, command string, args ...string) *exec.Cmd {\n\t\/\/ log.Printf(\"wd = %s cmd = %s, args = %q\", workdir, command, append([]string{}, args...))\n\n\tcmd := exec.Command(command, args...)\n\tcmd.Dir = workdir\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd\n}\n\nvar (\n\tErrEmptyRepoName = errors.New(\"Empty repository name\")\n\tErrEmptyRepoOrganization = errors.New(\"Empty repository organization\")\n\tErrUserNotAllowed = errors.New(\"User not in the allowed set\")\n)\n\ntype Repository struct {\n\tName string `json:\"name\"`\n\tUrl string `json:\"url\"`\n\tOrganization string `json:\"organization\"`\n}\n\ntype Pusher struct {\n\tName string `json:\"name\"`\n}\n\ntype NonGithub struct {\n\tNoBuild bool `json:\"nobuild\"`\n\tWait bool `json:\"wait\"`\n}\n\ntype JustNongithub struct {\n\tNonGithub NonGithub `json:\"nongithub\"`\n}\n\nfunc ParseJustNongithub(in []byte) (j JustNongithub, err error) {\n\terr = json.Unmarshal(in, &j)\n\treturn\n}\n\ntype PushEvent struct {\n\tRef string `json:\"ref\"`\n\tDeleted bool `json:\"deleted\"`\n\tRepository Repository `json:\"repository\"`\n\tAfter string `json:\"after\"`\n\tPusher Pusher `json:\"pusher\"`\n\tNonGithub NonGithub `json:\"nongithub\"`\n\tHtmlUrl string `json:\"html_url\"`\n}\n\ntype GithubStatus struct {\n\tState string `json:\"state\"`\n\tTargetUrl string `json:\"target_url\"`\n\tDescription string `json:\"description\"`\n}\n\nvar ErrSkipGithubEndpoint = errors.New(\"Github endpoint skipped\")\n\n\/\/ Creates or updates a mirror of `url` at `gitDir` using `git clone --mirror`\nfunc gitLocalMirror(\n\turl, gitDir, ref string,\n\tmessages io.Writer,\n) (err error) {\n\n\t\/\/ When mirroring, allow up to two minutes before giving up.\n\tconst MirrorTimeout = 2 * time.Minute\n\tctx, done := context.WithTimeout(context.Background(), MirrorTimeout)\n\tdefer done()\n\n\tif _, err := os.Stat(gitDir); err == nil {\n\t\t\/\/ Repo already exists, don't need to clone it.\n\n\t\tif gitAlreadyHaveRef(gitDir, ref) {\n\t\t\t\/\/ Sha already exists, don't need to fetch.\n\t\t\t\/\/ log.Printf(\"Already have ref: %v %v\", gitDir, ref)\n\t\t\treturn nil\n\t\t}\n\n\t\treturn gitFetch(ctx, gitDir, url, messages)\n\t}\n\n\terr = os.MkdirAll(filepath.Dir(gitDir), 0777)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn gitClone(ctx, url, gitDir, messages)\n}\n\nfunc gitClone(\n\tctx context.Context,\n\turl, gitDir string,\n\tmessages io.Writer,\n) error {\n\tcmd := Command(\".\", \"git\", \"clone\", \"-q\", \"--mirror\", url, gitDir)\n\tcmd.Stdout = messages\n\tcmd.Stderr = messages\n\treturn ContextRun(ctx, cmd)\n}\n\n\/\/ Run cmd within a net Context.\n\/\/ If the context is cancelled or times out, the process is killed.\nfunc ContextRun(ctx context.Context, cmd *exec.Cmd) error {\n\terrc := make(chan error)\n\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo func() { errc <- cmd.Wait() }()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tcmd.Process.Kill()\n\t\treturn ctx.Err()\n\tcase err := <-errc:\n\t\treturn err \/\/ err may be nil\n\t}\n\treturn nil\n}\n\nfunc gitFetch(\n\tctx context.Context,\n\tgitDir, url string,\n\tmessages io.Writer,\n) (err error) {\n\n\tcmd := Command(gitDir, \"git\", \"fetch\", \"-f\", url, \"*:*\")\n\tcmd.Stdout = messages\n\tcmd.Stderr = messages\n\n\terr = ContextRun(ctx, cmd)\n\tif err != nil {\n\t\t\/\/ git fetch where there is no update is exit status 1.\n\t\tif err.Error() != \"exit status 1\" {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nvar ShaLike = regexp.MustCompile(\"[0-9a-zA-Z]{40}\")\n\n\/\/ Returns true if ref is sha-like and is in the object database.\n\/\/ The \"sha-like\" condition ensures that refs like `master` are always\n\/\/ freshened.\nfunc gitAlreadyHaveRef(gitDir, sha string) bool {\n\tif !ShaLike.MatchString(sha) {\n\t\treturn false\n\t}\n\tcmd := Command(gitDir, \"git\", \"cat-file\", \"-t\", sha)\n\tcmd.Stdout = nil\n\tcmd.Stderr = nil\n\n\terr := cmd.Run()\n\treturn err == nil\n}\n\nfunc gitHaveFile(gitDir, ref, path string) (ok bool, err error) {\n\tcmd := Command(gitDir, \"git\", \"show\", fmt.Sprintf(\"%s:%s\", ref, path))\n\tcmd.Stdout = nil \/\/ don't want to see the contents\n\terr = cmd.Run()\n\tok = true\n\tif err != nil {\n\t\tok = false\n\t\tif err.Error() == \"exit status 128\" {\n\t\t\t\/\/ This happens if the file doesn't exist.\n\t\t\terr = nil\n\t\t}\n\t}\n\treturn ok, err\n}\n\nfunc gitRevParse(gitDir, ref string) (sha string, err error) {\n\tcmd := Command(gitDir, \"git\", \"rev-parse\", ref)\n\tcmd.Stdout = nil \/\/ for cmd.Output\n\n\tvar stdout []byte\n\tstdout, err = cmd.Output()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tsha = strings.TrimSpace(string(stdout))\n\treturn\n}\n\nfunc gitDescribe(gitDir, ref string) (desc string, err error) {\n\tcmd := Command(gitDir, \"git\", \"describe\", \"--all\", \"--tags\", \"--long\", ref)\n\tcmd.Stdout = nil \/\/ for cmd.Output\n\n\tvar stdout []byte\n\tstdout, err = cmd.Output()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdesc = strings.TrimSpace(string(stdout))\n\tdesc = strings.TrimPrefix(desc, \"heads\/\")\n\treturn\n}\n\nfunc gitCheckout(gitDir, checkoutDir, ref string) error {\n\n\terr := os.MkdirAll(checkoutDir, 0777)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs := []string{\"--work-tree\", checkoutDir, \"checkout\", ref, \"--\", \".\"}\n\terr = Command(gitDir, \"git\", args...).Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Set mtimes to time file is most recently affected by a commit.\n\t\/\/ This is annoying but unfortunately git sets the timestamps to now,\n\t\/\/ and docker depends on the mtime for cache invalidation.\n\terr = GitSetMTimes(gitDir, checkoutDir, ref)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc GitSetMTimes(gitDir, checkoutDir, ref string) error {\n\n\tcommitTimes, err := GitCommitTimes(gitDir, ref)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlsFiles := Command(gitDir, \"git\", \"ls-tree\", \"-r\", \"--name-only\", \"-z\", ref)\n\tlsFiles.Stdout = nil\n\tout, err := lsFiles.Output()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"git ls-files: %v\", err)\n\t}\n\n\tdirMTimes := map[string]time.Time{}\n\n\tfiles := strings.Split(strings.TrimRight(string(out), \"\\x00\"), \"\\x00\")\n\tfor _, file := range files {\n\t\tmTime, ok := commitTimes[file]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"failed to find file in history: %q\", file)\n\t\t}\n\n\t\t\/\/ Loop over each directory in the path to `file`, updating `dirMTimes`\n\t\t\/\/ to take the most recent time seen.\n\t\tdir := filepath.Dir(file)\n\t\tfor {\n\t\t\tif other, ok := dirMTimes[dir]; ok {\n\t\t\t\tif mTime.After(other) {\n\t\t\t\t\t\/\/ file mTime is more recent than previous seen for 'dir'\n\t\t\t\t\tdirMTimes[dir] = mTime\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t\/\/ first occurrence of dir\n\t\t\t\tdirMTimes[dir] = mTime\n\t\t\t}\n\n\t\t\t\/\/ Remove one directory from the path until it isn't changed anymore\n\t\t\tif dir == filepath.Dir(dir) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdir = filepath.Dir(dir)\n\t\t}\n\n\t\terr = os.Chtimes(filepath.Join(checkoutDir, file), mTime, mTime)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"chtimes: %v\", err)\n\t\t}\n\n\t\t\/\/ fmt.Printf(\"%s: %s\\n\", file, mTime)\n\t}\n\n\tfor dir, mTime := range dirMTimes {\n\t\terr = os.Chtimes(filepath.Join(checkoutDir, dir), mTime, mTime)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"chtimes: %v\", err)\n\t\t}\n\t\t\/\/ fmt.Printf(\"%s: %s\\n\", dir, mTime)\n\t}\n\treturn nil\n}\n\ntype BuildDirectory struct {\n\tName, Dir string\n\tCleanup func()\n}\n\nfunc PrepBuildDirectory(\n\tgitDir, remote, ref string,\n) (*BuildDirectory, error) {\n\n\tstart := time.Now()\n\tdefer func() {\n\t\tlog.Printf(\"Took %v to prep %v\", time.Since(start), remote)\n\t}()\n\n\tif strings.HasPrefix(remote, \"github.com\/\") {\n\t\tremote = \"https:\/\/\" + remote\n\t}\n\n\tgitDir, err := filepath.Abs(gitDir)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to determine abspath: %v\", err)\n\t}\n\n\terr = gitLocalMirror(remote, gitDir, ref, os.Stderr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to gitLocalMirror: %v\", err)\n\t}\n\n\trev, err := gitRevParse(gitDir, ref)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to parse rev: %v\", err)\n\t}\n\n\ttagName, err := gitDescribe(gitDir, rev)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to describe %v: %v\", rev, err)\n\t}\n\n\tshortRev := rev[:10]\n\tcheckoutPath := path.Join(gitDir, filepath.Join(\"c\/\", shortRev))\n\n\terr = recursiveCheckout(gitDir, checkoutPath, rev)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcleanup := func() {\n\t\terr := SafeCleanup(checkoutPath)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error cleaning up path: \", checkoutPath)\n\t\t}\n\t}\n\n\treturn &BuildDirectory{tagName, checkoutPath, cleanup}, nil\n}\n\nfunc recursiveCheckout(gitDir, checkoutPath, rev string) error {\n\terr := gitCheckout(gitDir, checkoutPath, rev)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to checkout: %v\", err)\n\t}\n\n\terr = PrepSubmodules(gitDir, checkoutPath, rev)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to prep submodules: %v\", err)\n\t}\n\treturn nil\n}\n\nfunc SafeCleanup(path string) error {\n\tif path == \"\/\" || path == \"\" || path == \".\" || strings.Contains(path, \"..\") {\n\t\treturn fmt.Errorf(\"invalid path specified for deletion %q\", path)\n\t}\n\treturn os.RemoveAll(path)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2014-2020 The Syncthing Authors.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at https:\/\/mozilla.org\/MPL\/2.0\/.\n\n\/\/ Package assets hold utilities for serving static assets.\n\/\/\n\/\/ The actual assets live in auto subpackages instead of here,\n\/\/ because the set of assets varies per program.\npackage assets\n\nimport (\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ An Asset is an embedded file to be served over HTTP.\ntype Asset struct {\n\tContent string \/\/ Contents of asset, possibly gzipped.\n\tGzipped bool\n\tLength int \/\/ Length of (decompressed) Content.\n\tFilename string \/\/ Original filename, determines Content-Type.\n\tModified time.Time \/\/ Determines ETag and Last-Modified.\n}\n\n\/\/ Serve writes a gzipped asset to w.\nfunc Serve(w http.ResponseWriter, r *http.Request, asset Asset) {\n\theader := w.Header()\n\n\tmtype := MimeTypeForFile(asset.Filename)\n\tif mtype != \"\" {\n\t\theader.Set(\"Content-Type\", mtype)\n\t}\n\n\tetag := fmt.Sprintf(`\"%x\"`, asset.Modified.Unix())\n\theader.Set(\"ETag\", etag)\n\theader.Set(\"Last-Modified\", asset.Modified.Format(http.TimeFormat))\n\n\tt, err := time.Parse(http.TimeFormat, r.Header.Get(\"If-Modified-Since\"))\n\tif err == nil && !asset.Modified.After(t) {\n\t\tw.WriteHeader(http.StatusNotModified)\n\t\treturn\n\t}\n\n\tif r.Header.Get(\"If-None-Match\") == etag {\n\t\tw.WriteHeader(http.StatusNotModified)\n\t\treturn\n\t}\n\n\tswitch {\n\tcase !asset.Gzipped:\n\t\theader.Set(\"Content-Length\", strconv.Itoa(len(asset.Content)))\n\t\tio.WriteString(w, asset.Content)\n\tcase strings.Contains(r.Header.Get(\"Accept-Encoding\"), \"gzip\"):\n\t\theader.Set(\"Content-Encoding\", \"gzip\")\n\t\theader.Set(\"Content-Length\", strconv.Itoa(len(asset.Content)))\n\t\tio.WriteString(w, asset.Content)\n\tdefault:\n\t\theader.Set(\"Content-Length\", strconv.Itoa(asset.Length))\n\t\t\/\/ gunzip for browsers that don't want gzip.\n\t\tvar gr *gzip.Reader\n\t\tgr, _ = gzip.NewReader(strings.NewReader(asset.Content))\n\t\tio.Copy(w, gr)\n\t\tgr.Close()\n\t}\n}\n\n\/\/ MimeTypeForFile returns the appropriate MIME type for an asset,\n\/\/ based on the filename.\n\/\/\n\/\/ We use a built in table of the common types since the system\n\/\/ TypeByExtension might be unreliable. But if we don't know, we delegate\n\/\/ to the system. All our text files are in UTF-8.\nfunc MimeTypeForFile(file string) string {\n\text := filepath.Ext(file)\n\tswitch ext {\n\tcase \".htm\", \".html\":\n\t\treturn \"text\/html; charset=utf-8\"\n\tcase \".css\":\n\t\treturn \"text\/css; charset=utf-8\"\n\tcase \".js\":\n\t\treturn \"application\/javascript; charset=utf-8\"\n\tcase \".json\":\n\t\treturn \"application\/json; charset=utf-8\"\n\tcase \".png\":\n\t\treturn \"image\/png\"\n\tcase \".ttf\":\n\t\treturn \"application\/x-font-ttf\"\n\tcase \".woff\":\n\t\treturn \"application\/x-font-woff\"\n\tcase \".svg\":\n\t\treturn \"image\/svg+xml; charset=utf-8\"\n\tdefault:\n\t\treturn mime.TypeByExtension(ext)\n\t}\n}\n<commit_msg>lib\/assets: MIME types, time formats (#8351)<commit_after>\/\/ Copyright (C) 2014-2020 The Syncthing Authors.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at https:\/\/mozilla.org\/MPL\/2.0\/.\n\n\/\/ Package assets hold utilities for serving static assets.\n\/\/\n\/\/ The actual assets live in auto subpackages instead of here,\n\/\/ because the set of assets varies per program.\npackage assets\n\nimport (\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ An Asset is an embedded file to be served over HTTP.\ntype Asset struct {\n\tContent string \/\/ Contents of asset, possibly gzipped.\n\tGzipped bool\n\tLength int \/\/ Length of (decompressed) Content.\n\tFilename string \/\/ Original filename, determines Content-Type.\n\tModified time.Time \/\/ Determines ETag and Last-Modified.\n}\n\n\/\/ Serve writes a gzipped asset to w.\nfunc Serve(w http.ResponseWriter, r *http.Request, asset Asset) {\n\theader := w.Header()\n\n\tmtype := MimeTypeForFile(asset.Filename)\n\tif mtype != \"\" {\n\t\theader.Set(\"Content-Type\", mtype)\n\t}\n\n\tetag := fmt.Sprintf(`\"%x\"`, asset.Modified.Unix())\n\theader.Set(\"ETag\", etag)\n\theader.Set(\"Last-Modified\", asset.Modified.Format(http.TimeFormat))\n\n\tt, err := http.ParseTime(r.Header.Get(\"If-Modified-Since\"))\n\tif err == nil && !asset.Modified.After(t) {\n\t\tw.WriteHeader(http.StatusNotModified)\n\t\treturn\n\t}\n\n\tif r.Header.Get(\"If-None-Match\") == etag {\n\t\tw.WriteHeader(http.StatusNotModified)\n\t\treturn\n\t}\n\n\tswitch {\n\tcase !asset.Gzipped:\n\t\theader.Set(\"Content-Length\", strconv.Itoa(len(asset.Content)))\n\t\tio.WriteString(w, asset.Content)\n\tcase strings.Contains(r.Header.Get(\"Accept-Encoding\"), \"gzip\"):\n\t\theader.Set(\"Content-Encoding\", \"gzip\")\n\t\theader.Set(\"Content-Length\", strconv.Itoa(len(asset.Content)))\n\t\tio.WriteString(w, asset.Content)\n\tdefault:\n\t\theader.Set(\"Content-Length\", strconv.Itoa(asset.Length))\n\t\t\/\/ gunzip for browsers that don't want gzip.\n\t\tvar gr *gzip.Reader\n\t\tgr, _ = gzip.NewReader(strings.NewReader(asset.Content))\n\t\tio.Copy(w, gr)\n\t\tgr.Close()\n\t}\n}\n\n\/\/ MimeTypeForFile returns the appropriate MIME type for an asset,\n\/\/ based on the filename.\n\/\/\n\/\/ We use a built in table of the common types since the system\n\/\/ TypeByExtension might be unreliable. But if we don't know, we delegate\n\/\/ to the system. All our text files are in UTF-8.\nfunc MimeTypeForFile(file string) string {\n\text := filepath.Ext(file)\n\tswitch ext {\n\tcase \".htm\", \".html\":\n\t\treturn \"text\/html; charset=utf-8\"\n\tcase \".css\":\n\t\treturn \"text\/css; charset=utf-8\"\n\tcase \".eot\":\n\t\treturn \"application\/vnd.ms-fontobject\"\n\tcase \".js\":\n\t\treturn \"application\/javascript; charset=utf-8\"\n\tcase \".json\":\n\t\treturn \"application\/json; charset=utf-8\"\n\tcase \".png\":\n\t\treturn \"image\/png\"\n\tcase \".svg\":\n\t\treturn \"image\/svg+xml; charset=utf-8\"\n\tcase \".ttf\":\n\t\treturn \"font\/ttf\"\n\tcase \".woff\":\n\t\treturn \"font\/woff\"\n\tcase \".woff2\":\n\t\treturn \"font\/woff2\"\n\tdefault:\n\t\treturn mime.TypeByExtension(ext)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package camoproxy provides an HTTP proxy server with content type\n\/\/ restrictions as well as regex host allow and deny list support.\npackage camoproxy\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/sha1\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"github.com\/cactus\/gologit\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ Headers that are acceptible to pass from the client to the remote\n\/\/ server. Only those present and true, are forwarded.\nvar validReqHeaders = map[string]bool{\n\t\"Accept\": true,\n\t\"Accept-Charset\": true,\n\t\"Accept-Encoding\": true,\n\t\"Accept-Language\": true,\n\t\"Cache-Control\": true,\n\t\"If-None-Match\": true,\n\t\"If-Modified-Since\": true,\n}\n\n\/\/ Headers that are acceptible to pass from the remote server to the\n\/\/ client. Only those present and true, are forwarded. Empty implies\n\/\/ no filtering.\nvar validRespHeaders = map[string]bool{}\n\n\/\/ A ProxyHandler is a Camo like HTTP proxy, that provides content type\n\/\/ restrictions as well as regex host allow and deny list support\ntype ProxyHandler struct {\n\tClient *http.Client\n\tHMacKey []byte\n\tAllowlist []*regexp.Regexp\n\tDenylist []*regexp.Regexp\n\tMaxSize int64\n\tlog *gologit.DebugLogger\n}\n\n\/\/ ServerHTTP handles the client request, validates the request is validly\n\/\/ HMAC signed, filters based on the Allow\/Deny list, and then proxies\n\/\/ valid requests to the desired endpoint. Responses are filtered for \n\/\/ proper image content types.\nfunc (p *ProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tp.log.Debugln(\"Request:\", req.URL)\n\n\t\/\/ do fiddly things\n\tif req.Method != \"GET\" {\n\t\tlog.Println(\"Something other than GET received\", req.Method)\n\t\thttp.Error(w, \"Method Not Implemented\", http.StatusNotImplemented)\n\t\treturn\n\t}\n\n\tsurl, ok := p.validateURL(req.URL.Path, p.HMacKey)\n\tif !ok {\n\t\thttp.Error(w, \"Bad Signature\", http.StatusForbidden)\n\t\treturn\n\t}\n\tp.log.Debugln(\"URL:\", surl)\n\n\tu, err := url.Parse(surl)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(w, \"Bad url\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif u.Host == \"\" {\n\t\thttp.Error(w, \"Bad url\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\t\/\/ if Allowlist is set, require match\n\tmatchFound := true\n\tif len(p.Allowlist) > 0 {\n\t\tmatchFound = false\n\t\tfor _, rgx := range p.Allowlist {\n\t\t\tif rgx.MatchString(u.Host) {\n\t\t\t\tmatchFound = true\n\t\t\t}\n\t\t}\n\t}\n\tif !matchFound {\n\t\thttp.Error(w, \"Allowlist host failure\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\t\/\/ filter out Denylist urls based on regexes. Do this second\n\t\/\/ as Denylist takes precedence\n\tfor _, rgx := range p.Denylist {\n\t\tif rgx.MatchString(u.Host) {\n\t\t\thttp.Error(w, \"Denylist host failure\", http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t}\n\n\tnreq, err := http.NewRequest(\"GET\", surl, nil)\n\tif err != nil {\n\t\tp.log.Debugln(\"Could not create NewRequest\", err)\n\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\treturn\n\t}\n\n\t\/\/ filter headers\n\tp.copyHeader(&nreq.Header, &req.Header, &validReqHeaders)\n\tnreq.Header.Add(\"connection\", \"close\")\n\tnreq.Header.Add(\"user-agent\", \"pew pew pew\")\n\n\tresp, err := p.Client.Do(nreq)\n\tif err != nil {\n\t\tp.log.Debugln(\"Could not connect to endpoint\", err)\n\t\tif strings.Contains(err.Error(), \"timeout\") {\n\t\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\t} else {\n\t\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusNotFound)\n\t\t}\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\t\/\/ check for too large a response\n\tif resp.ContentLength > p.MaxSize {\n\t\tp.log.Debugln(\"Content length exceeded\", surl)\n\t\thttp.Error(w, \"Content length exceeded\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tswitch resp.StatusCode {\n\tcase 300:\n\t\tlog.Println(\"Multiple choices not supported\")\n\t\thttp.Error(w, \"Multiple choices not supported\", http.StatusNotFound)\n\t\treturn\n\tcase 301, 302, 303:\n\t\t\/\/ if we get a redirect here, we either disabled following, \n\t\t\/\/ or followed until max depth and still got one (redirect loop)\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\tcase 500, 502, 503, 504:\n\t\t\/\/ upstream errors should probably just 502. client can try later.\n\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\treturn\n\tcase 404:\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\tcase 200:\n\t\t\/\/ check content type\n\t\tct, ok := resp.Header[http.CanonicalHeaderKey(\"content-type\")]\n\t\tif !ok || ct[0][:6] != \"image\/\" {\n\t\t\tlog.Println(\"Non-Image content-type returned\", u)\n\t\t\thttp.Error(w, \"Non-Image content-type returned\",\n\t\t\t\thttp.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\tdefault:\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\th := w.Header()\n\tp.copyHeader(&h, &resp.Header, &validRespHeaders)\n\th.Add(\"X-Content-Type-Options\", \"nosniff\")\n\tif resp.StatusCode == 304 && h.Get(\"Content-Type\") != \"\" {\n\t\th.Del(\"Content-Type\")\n\t}\n\n\tw.WriteHeader(resp.StatusCode)\n\tio.Copy(w, resp.Body)\n\tp.log.Debugln(req, resp.StatusCode)\n}\n\n\/\/ validateURL ensures the url is properly verified via HMAC, and then\n\/\/ unencodes the url, returning the url (if valid) and whether the\n\/\/ HMAC was verified.\nfunc (p *ProxyHandler) validateURL(path string, key []byte) (surl string, valid bool) {\n\tpathParts := strings.SplitN(path[1:], \"\/\", 3)\n\tvalid = false\n\tif len(pathParts) != 2 {\n\t\tp.log.Println(\"Bad path format\", pathParts)\n\t\treturn\n\t}\n\thexdig, hexurl := pathParts[0], pathParts[1]\n\turlBytes, err := hex.DecodeString(hexurl)\n\tif err != nil {\n\t\tp.log.Println(\"Bad Hex Decode\", hexurl)\n\t\treturn\n\t}\n\tsurl = string(urlBytes)\n\tmac := hmac.New(sha1.New, key)\n\tmac.Write([]byte(surl))\n\tmacSum := hex.EncodeToString(mac.Sum([]byte{}))\n\tif macSum != hexdig {\n\t\tp.log.Debugf(\"Bad signature: %s != %s\\n\", macSum, hexdig)\n\t\treturn\n\t}\n\tvalid = true\n\treturn\n}\n\n\/\/ copy headers from src into dst\n\/\/ empty filter map will result in no filtering being done\nfunc (p *ProxyHandler) copyHeader(dst, src *http.Header, filter *map[string]bool) {\n\tf := *filter\n\tfiltering := false\n\tif len(f) > 0 {\n\t\tfiltering = true\n\t}\n\n\tfor k, vv := range *src {\n\t\tif x, ok := f[k]; filtering && !ok && !x {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, v := range vv {\n\t\t\tdst.Add(k, v)\n\t\t}\n\t}\n}\n\ntype ProxyConfig struct {\n\tHmacKey string\n\tAllowList []string\n\tDenyList []string\n\tMaxSize int64\n\tFollowRedirects bool\n\tRequestTimeout uint\n}\n\nfunc New(pc ProxyConfig, logger *gologit.DebugLogger) *ProxyHandler {\n\ttr := &http.Transport{\n\t\tDial: func(netw, addr string) (net.Conn, error) {\n\t\t\t\/\/ 2 second timeout on requests\n\t\t\ttimeout := time.Second * time.Duration(pc.RequestTimeout)\n\t\t\tc, err := net.DialTimeout(netw, addr, timeout)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t\/\/ also set time limit on reading\n\t\t\tc.SetDeadline(time.Now().Add(timeout))\n\t\t\treturn c, nil\n\t\t}}\n\n\t\/\/ spawn an idle conn trimmer\n\tgo func() {\n\t\ttime.Sleep(5 * time.Minute)\n\t\ttr.CloseIdleConnections()\n\t}()\n\n\t\/\/ build\/compile regex\n\tclient := &http.Client{Transport: tr}\n\tif !pc.FollowRedirects {\n\t\tclient.CheckRedirect = func(req *http.Request, via []*http.Request) error {\n\t\t\treturn errors.New(\"Not following redirect\")\n\t\t}\n\t}\n\n\tallow := make([]*regexp.Regexp, 0)\n\tdeny := make([]*regexp.Regexp, 0)\n\n\tvar c *regexp.Regexp\n\tvar err error\n\tfor _, v := range pc.DenyList {\n\t\tc, err = regexp.Compile(v)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdeny = append(deny, c)\n\t}\n\tfor _, v := range pc.AllowList {\n\t\tc, err = regexp.Compile(v)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tallow = append(allow, c)\n\t}\n\n\treturn &ProxyHandler{\n\t\tClient: client,\n\t\tHMacKey: []byte(pc.HmacKey),\n\t\tAllowlist: allow,\n\t\tDenylist: deny,\n\t\tMaxSize: pc.MaxSize,\n\t\tlog: logger}\n}\n<commit_msg>reorder cases<commit_after>\/\/ Package camoproxy provides an HTTP proxy server with content type\n\/\/ restrictions as well as regex host allow and deny list support.\npackage camoproxy\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/sha1\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"github.com\/cactus\/gologit\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ Headers that are acceptible to pass from the client to the remote\n\/\/ server. Only those present and true, are forwarded.\nvar validReqHeaders = map[string]bool{\n\t\"Accept\": true,\n\t\"Accept-Charset\": true,\n\t\"Accept-Encoding\": true,\n\t\"Accept-Language\": true,\n\t\"Cache-Control\": true,\n\t\"If-None-Match\": true,\n\t\"If-Modified-Since\": true,\n}\n\n\/\/ Headers that are acceptible to pass from the remote server to the\n\/\/ client. Only those present and true, are forwarded. Empty implies\n\/\/ no filtering.\nvar validRespHeaders = map[string]bool{}\n\n\/\/ A ProxyHandler is a Camo like HTTP proxy, that provides content type\n\/\/ restrictions as well as regex host allow and deny list support\ntype ProxyHandler struct {\n\tClient *http.Client\n\tHMacKey []byte\n\tAllowlist []*regexp.Regexp\n\tDenylist []*regexp.Regexp\n\tMaxSize int64\n\tlog *gologit.DebugLogger\n}\n\n\/\/ ServerHTTP handles the client request, validates the request is validly\n\/\/ HMAC signed, filters based on the Allow\/Deny list, and then proxies\n\/\/ valid requests to the desired endpoint. Responses are filtered for \n\/\/ proper image content types.\nfunc (p *ProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tp.log.Debugln(\"Request:\", req.URL)\n\n\t\/\/ do fiddly things\n\tif req.Method != \"GET\" {\n\t\tlog.Println(\"Something other than GET received\", req.Method)\n\t\thttp.Error(w, \"Method Not Implemented\", http.StatusNotImplemented)\n\t\treturn\n\t}\n\n\tsurl, ok := p.validateURL(req.URL.Path, p.HMacKey)\n\tif !ok {\n\t\thttp.Error(w, \"Bad Signature\", http.StatusForbidden)\n\t\treturn\n\t}\n\tp.log.Debugln(\"URL:\", surl)\n\n\tu, err := url.Parse(surl)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(w, \"Bad url\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif u.Host == \"\" {\n\t\thttp.Error(w, \"Bad url\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\t\/\/ if Allowlist is set, require match\n\tmatchFound := true\n\tif len(p.Allowlist) > 0 {\n\t\tmatchFound = false\n\t\tfor _, rgx := range p.Allowlist {\n\t\t\tif rgx.MatchString(u.Host) {\n\t\t\t\tmatchFound = true\n\t\t\t}\n\t\t}\n\t}\n\tif !matchFound {\n\t\thttp.Error(w, \"Allowlist host failure\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\t\/\/ filter out Denylist urls based on regexes. Do this second\n\t\/\/ as Denylist takes precedence\n\tfor _, rgx := range p.Denylist {\n\t\tif rgx.MatchString(u.Host) {\n\t\t\thttp.Error(w, \"Denylist host failure\", http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t}\n\n\tnreq, err := http.NewRequest(\"GET\", surl, nil)\n\tif err != nil {\n\t\tp.log.Debugln(\"Could not create NewRequest\", err)\n\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\treturn\n\t}\n\n\t\/\/ filter headers\n\tp.copyHeader(&nreq.Header, &req.Header, &validReqHeaders)\n\tnreq.Header.Add(\"connection\", \"close\")\n\tnreq.Header.Add(\"user-agent\", \"pew pew pew\")\n\n\tresp, err := p.Client.Do(nreq)\n\tif err != nil {\n\t\tp.log.Debugln(\"Could not connect to endpoint\", err)\n\t\tif strings.Contains(err.Error(), \"timeout\") {\n\t\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\t} else {\n\t\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusNotFound)\n\t\t}\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\t\/\/ check for too large a response\n\tif resp.ContentLength > p.MaxSize {\n\t\tp.log.Debugln(\"Content length exceeded\", surl)\n\t\thttp.Error(w, \"Content length exceeded\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tswitch resp.StatusCode {\n\tcase 200:\n\t\t\/\/ check content type\n\t\tct, ok := resp.Header[http.CanonicalHeaderKey(\"content-type\")]\n\t\tif !ok || ct[0][:6] != \"image\/\" {\n\t\t\tlog.Println(\"Non-Image content-type returned\", u)\n\t\t\thttp.Error(w, \"Non-Image content-type returned\",\n\t\t\t\thttp.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\tcase 300:\n\t\tlog.Println(\"Multiple choices not supported\")\n\t\thttp.Error(w, \"Multiple choices not supported\", http.StatusNotFound)\n\t\treturn\n\tcase 301, 302, 303:\n\t\t\/\/ if we get a redirect here, we either disabled following, \n\t\t\/\/ or followed until max depth and still got one (redirect loop)\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\tcase 404:\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\tcase 500, 502, 503, 504:\n\t\t\/\/ upstream errors should probably just 502. client can try later.\n\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\treturn\n\tdefault:\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\th := w.Header()\n\tp.copyHeader(&h, &resp.Header, &validRespHeaders)\n\th.Add(\"X-Content-Type-Options\", \"nosniff\")\n\tif resp.StatusCode == 304 && h.Get(\"Content-Type\") != \"\" {\n\t\th.Del(\"Content-Type\")\n\t}\n\n\tw.WriteHeader(resp.StatusCode)\n\tio.Copy(w, resp.Body)\n\tp.log.Debugln(req, resp.StatusCode)\n}\n\n\/\/ validateURL ensures the url is properly verified via HMAC, and then\n\/\/ unencodes the url, returning the url (if valid) and whether the\n\/\/ HMAC was verified.\nfunc (p *ProxyHandler) validateURL(path string, key []byte) (surl string, valid bool) {\n\tpathParts := strings.SplitN(path[1:], \"\/\", 3)\n\tvalid = false\n\tif len(pathParts) != 2 {\n\t\tp.log.Println(\"Bad path format\", pathParts)\n\t\treturn\n\t}\n\thexdig, hexurl := pathParts[0], pathParts[1]\n\turlBytes, err := hex.DecodeString(hexurl)\n\tif err != nil {\n\t\tp.log.Println(\"Bad Hex Decode\", hexurl)\n\t\treturn\n\t}\n\tsurl = string(urlBytes)\n\tmac := hmac.New(sha1.New, key)\n\tmac.Write([]byte(surl))\n\tmacSum := hex.EncodeToString(mac.Sum([]byte{}))\n\tif macSum != hexdig {\n\t\tp.log.Debugf(\"Bad signature: %s != %s\\n\", macSum, hexdig)\n\t\treturn\n\t}\n\tvalid = true\n\treturn\n}\n\n\/\/ copy headers from src into dst\n\/\/ empty filter map will result in no filtering being done\nfunc (p *ProxyHandler) copyHeader(dst, src *http.Header, filter *map[string]bool) {\n\tf := *filter\n\tfiltering := false\n\tif len(f) > 0 {\n\t\tfiltering = true\n\t}\n\n\tfor k, vv := range *src {\n\t\tif x, ok := f[k]; filtering && !ok && !x {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, v := range vv {\n\t\t\tdst.Add(k, v)\n\t\t}\n\t}\n}\n\ntype ProxyConfig struct {\n\tHmacKey string\n\tAllowList []string\n\tDenyList []string\n\tMaxSize int64\n\tFollowRedirects bool\n\tRequestTimeout uint\n}\n\nfunc New(pc ProxyConfig, logger *gologit.DebugLogger) *ProxyHandler {\n\ttr := &http.Transport{\n\t\tDial: func(netw, addr string) (net.Conn, error) {\n\t\t\t\/\/ 2 second timeout on requests\n\t\t\ttimeout := time.Second * time.Duration(pc.RequestTimeout)\n\t\t\tc, err := net.DialTimeout(netw, addr, timeout)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t\/\/ also set time limit on reading\n\t\t\tc.SetDeadline(time.Now().Add(timeout))\n\t\t\treturn c, nil\n\t\t}}\n\n\t\/\/ spawn an idle conn trimmer\n\tgo func() {\n\t\ttime.Sleep(5 * time.Minute)\n\t\ttr.CloseIdleConnections()\n\t}()\n\n\t\/\/ build\/compile regex\n\tclient := &http.Client{Transport: tr}\n\tif !pc.FollowRedirects {\n\t\tclient.CheckRedirect = func(req *http.Request, via []*http.Request) error {\n\t\t\treturn errors.New(\"Not following redirect\")\n\t\t}\n\t}\n\n\tallow := make([]*regexp.Regexp, 0)\n\tdeny := make([]*regexp.Regexp, 0)\n\n\tvar c *regexp.Regexp\n\tvar err error\n\tfor _, v := range pc.DenyList {\n\t\tc, err = regexp.Compile(v)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdeny = append(deny, c)\n\t}\n\tfor _, v := range pc.AllowList {\n\t\tc, err = regexp.Compile(v)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tallow = append(allow, c)\n\t}\n\n\treturn &ProxyHandler{\n\t\tClient: client,\n\t\tHMacKey: []byte(pc.HmacKey),\n\t\tAllowlist: allow,\n\t\tDenylist: deny,\n\t\tMaxSize: pc.MaxSize,\n\t\tlog: logger}\n}\n<|endoftext|>"} {"text":"<commit_before>package testing\n\nvar FluxEndToEndSkipList = map[string]map[string]string{\n\t\"universe\": {\n\t\t\/\/ TODO(adam) determine the reason for these test failures.\n\t\t\"cov\": \"Reason TBD\",\n\t\t\"covariance\": \"Reason TBD\",\n\t\t\"cumulative_sum\": \"Reason TBD\",\n\t\t\"cumulative_sum_default\": \"Reason TBD\",\n\t\t\"cumulative_sum_noop\": \"Reason TBD\",\n\t\t\"drop_non_existent\": \"Reason TBD\",\n\t\t\"first\": \"Reason TBD\",\n\t\t\"highestAverage\": \"Reason TBD\",\n\t\t\"highestMax\": \"Reason TBD\",\n\t\t\"histogram\": \"Reason TBD\",\n\t\t\"histogram_normalize\": \"Reason TBD\",\n\t\t\"histogram_quantile\": \"Reason TBD\",\n\t\t\"join\": \"Reason TBD\",\n\t\t\"join_across_measurements\": \"Reason TBD\",\n\t\t\"join_agg\": \"Reason TBD\",\n\t\t\"keep_non_existent\": \"Reason TBD\",\n\t\t\"key_values\": \"Reason TBD\",\n\t\t\"key_values_host_name\": \"Reason TBD\",\n\t\t\"last\": \"Reason TBD\",\n\t\t\"lowestAverage\": \"Reason TBD\",\n\t\t\"max\": \"Reason TBD\",\n\t\t\"min\": \"Reason TBD\",\n\t\t\"sample\": \"Reason TBD\",\n\t\t\"selector_preserve_time\": \"Reason TBD\",\n\t\t\"shift\": \"Reason TBD\",\n\t\t\"shift_negative_duration\": \"Reason TBD\",\n\t\t\"task_per_line\": \"Reason TBD\",\n\t\t\"top\": \"Reason TBD\",\n\t\t\"union\": \"Reason TBD\",\n\t\t\"union_heterogeneous\": \"Reason TBD\",\n\t\t\"unique\": \"Reason TBD\",\n\t\t\"distinct\": \"Reason TBD\",\n\n\t\t\/\/ it appears these occur when writing the input data. `to` may not be null safe.\n\t\t\"fill_bool\": \"failed to read meta data: panic: interface conversion: interface {} is nil, not uint64\",\n\t\t\"fill_float\": \"failed to read meta data: panic: interface conversion: interface {} is nil, not uint64\",\n\t\t\"fill_int\": \"failed to read meta data: panic: interface conversion: interface {} is nil, not uint64\",\n\t\t\"fill_string\": \"failed to read meta data: panic: interface conversion: interface {} is nil, not uint64\",\n\t\t\"fill_time\": \"failed to read meta data: panic: interface conversion: interface {} is nil, not uint64\",\n\t\t\"fill_uint\": \"failed to read meta data: panic: interface conversion: interface {} is nil, not uint64\",\n\t\t\"window_null\": \"failed to read meta data: panic: interface conversion: interface {} is nil, not float64\",\n\n\t\t\/\/ these may just be missing calls to range() in the tests. easy to fix in a new PR.\n\t\t\"group_nulls\": \"unbounded test\",\n\t\t\"integral\": \"unbounded test\",\n\t\t\"integral_columns\": \"unbounded test\",\n\t\t\"map\": \"unbounded test\",\n\t\t\"join_missing_on_col\": \"unbounded test\",\n\t\t\"rowfn_with_import\": \"unbounded test\",\n\n\t\t\/\/ the following tests have a difference between the CSV-decoded input table, and the storage-retrieved version of that table\n\t\t\"columns\": \"group key mismatch\",\n\t\t\"set\": \"column order mismatch\",\n\t\t\"simple_max\": \"_stop missing from expected output\",\n\t\t\"derivative\": \"time bounds mismatch (engine uses now() instead of bounds on input table)\",\n\t\t\"difference_columns\": \"data write\/read path loses columns x and y\",\n\t\t\"keys\": \"group key mismatch\",\n\n\t\t\/\/ failed to read meta data errors: the CSV encoding is incomplete probably due to data schema errors. needs more detailed investigation to find root cause of error\n\t\t\"filter_by_regex\": \"failed to read metadata\",\n\t\t\"filter_by_tags\": \"failed to read metadata\",\n\t\t\"group\": \"failed to read metadata\",\n\t\t\"group_except\": \"failed to read metadata\",\n\t\t\"group_ungroup\": \"failed to read metadata\",\n\t\t\"pivot_mean\": \"failed to read metadata\",\n\t\t\"histogram_quantile_minvalue\": \"failed to read meta data: no column with label _measurement exists\",\n\t\t\"increase\": \"failed to read meta data: table has no _value column\",\n\n\t\t\"string_max\": \"error: invalid use of function: *functions.MaxSelector has no implementation for type string (https:\/\/github.com\/influxdata\/platform\/issues\/224)\",\n\t\t\"null_as_value\": \"null not supported as value in influxql (https:\/\/github.com\/influxdata\/platform\/issues\/353)\",\n\t\t\"string_interp\": \"string interpolation not working as expected in flux (https:\/\/github.com\/influxdata\/platform\/issues\/404)\",\n\t\t\"to\": \"to functions are not supported in the testing framework (https:\/\/github.com\/influxdata\/flux\/issues\/77)\",\n\t\t\"covariance_missing_column_1\": \"need to support known errors in new test framework (https:\/\/github.com\/influxdata\/flux\/issues\/536)\",\n\t\t\"covariance_missing_column_2\": \"need to support known errors in new test framework (https:\/\/github.com\/influxdata\/flux\/issues\/536)\",\n\t\t\"drop_before_rename\": \"need to support known errors in new test framework (https:\/\/github.com\/influxdata\/flux\/issues\/536)\",\n\t\t\"drop_referenced\": \"need to support known errors in new test framework (https:\/\/github.com\/influxdata\/flux\/issues\/536)\",\n\t\t\"yield\": \"yield requires special test case (https:\/\/github.com\/influxdata\/flux\/issues\/535)\",\n\n\t\t\"window_group_mean_ungroup\": \"window trigger optimization modifies sort order of its output tables (https:\/\/github.com\/influxdata\/flux\/issues\/1067)\",\n\n\t\t\"median_column\": \"failing in different ways (https:\/\/github.com\/influxdata\/influxdb\/issues\/13909)\",\n\t\t\"dynamic_query\": \"panic when executing\",\n\n\t\t\"to_int\": \"dateTime conversion issue: https:\/\/github.com\/influxdata\/influxdb\/issues\/14575\",\n\t\t\"to_uint\": \"dateTime conversion issue: https:\/\/github.com\/influxdata\/influxdb\/issues\/14575\",\n\n\t\t\"holt_winters_panic\": \"Expected output is an empty table which breaks the testing framework (https:\/\/github.com\/influxdata\/influxdb\/issues\/14749)\",\n\t},\n\t\"experimental\": {\n\t\t\"set\": \"Reason TBD\",\n\t},\n\t\"regexp\": {\n\t\t\"replaceAllString\": \"Reason TBD\",\n\t},\n\t\"http\": {\n\t\t\"http_endpoint\": \"need ability to test side effects in e2e tests: (https:\/\/github.com\/influxdata\/flux\/issues\/1723)\",\n\t},\n\t\"influxdata\/influxdb\/monitor\": {\n\t\t\"check\": \"Cannot see overridden options from inside stdlib functions (https:\/\/github.com\/influxdata\/flux\/issues\/1720)\",\n\t},\n\t\"influxdata\/influxdb\/secrets\": {\n\t\t\"secrets\": \"Cannot inject custom deps into the test framework so the secrets don't lookup correctly\",\n\t},\n\t\"internal\/promql\": {\n\t\t\"join\": \"unbounded test\",\n\t},\n\t\"testing\/chronograf\": {\n\t\t\"buckets\": \"unbounded test\",\n\t},\n\t\"testing\/kapacitor\": {\n\t\t\"fill_default\": \"unknown field type for f1\",\n\t},\n\t\"testing\/pandas\": {\n\t\t\"extract_regexp_findStringIndex\": \"pandas. map does not correctly handled returned arrays (https:\/\/github.com\/influxdata\/flux\/issues\/1387)\",\n\t\t\"partition_strings_splitN\": \"pandas. map does not correctly handled returned arrays (https:\/\/github.com\/influxdata\/flux\/issues\/1387)\",\n\t},\n\t\/\/ PromQL tests don't work. Missing `range` or `_measurement`.\n\t\"testing\/promql\": {\n\t\t\"changes\": \"unbounded or missing _measurement column\",\n\t\t\"dayOfMonth\": \"unbounded or missing _measurement column\",\n\t\t\"dayOfWeek\": \"unbounded or missing _measurement column\",\n\t\t\"daysInMonth\": \"unbounded or missing _measurement column\",\n\t\t\"emptyTable\": \"unbounded or missing _measurement column\",\n\t\t\"extrapolatedRate_counter_rate\": \"unbounded or missing _measurement column\",\n\t\t\"extrapolatedRate_nocounter\": \"unbounded or missing _measurement column\",\n\t\t\"extrapolatedRate_norate\": \"unbounded or missing _measurement column\",\n\t\t\"histogramQuantile\": \"unbounded or missing _measurement column\",\n\t\t\"holtWinters\": \"unbounded or missing _measurement column\",\n\t\t\"hour\": \"unbounded or missing _measurement column\",\n\t\t\"instantRate\": \"unbounded or missing _measurement column\",\n\t\t\"labelReplace_empty_dst\": \"unbounded or missing _measurement column\",\n\t\t\"labelReplace_full_string_match\": \"unbounded or missing _measurement column\",\n\t\t\"labelReplace_multiple_groups\": \"unbounded or missing _measurement column\",\n\t\t\"labelReplace_src_empty\": \"unbounded or missing _measurement column\",\n\t\t\"labelReplace_src_nonexistent\": \"unbounded or missing _measurement column\",\n\t\t\"labelReplace_src_not_matched\": \"unbounded or missing _measurement column\",\n\t\t\"labelReplace_sub_string_match\": \"unbounded or missing _measurement column\",\n\t\t\"linearRegression_nopredict\": \"unbounded or missing _measurement column\",\n\t\t\"linearRegression_predict\": \"unbounded or missing _measurement column\",\n\t\t\"minute\": \"unbounded or missing _measurement column\",\n\t\t\"month\": \"unbounded or missing _measurement column\",\n\t\t\"resets\": \"unbounded or missing _measurement column\",\n\t\t\"timestamp\": \"unbounded or missing _measurement column\",\n\t\t\"year\": \"unbounded or missing _measurement column\",\n\t},\n}\n<commit_msg>chore(query\/stdlib): skip flaky tests regarding storage meta queries (#15451)<commit_after>package testing\n\nvar FluxEndToEndSkipList = map[string]map[string]string{\n\t\"universe\": {\n\t\t\/\/ TODO(adam) determine the reason for these test failures.\n\t\t\"cov\": \"Reason TBD\",\n\t\t\"covariance\": \"Reason TBD\",\n\t\t\"cumulative_sum\": \"Reason TBD\",\n\t\t\"cumulative_sum_default\": \"Reason TBD\",\n\t\t\"cumulative_sum_noop\": \"Reason TBD\",\n\t\t\"drop_non_existent\": \"Reason TBD\",\n\t\t\"first\": \"Reason TBD\",\n\t\t\"highestAverage\": \"Reason TBD\",\n\t\t\"highestMax\": \"Reason TBD\",\n\t\t\"histogram\": \"Reason TBD\",\n\t\t\"histogram_normalize\": \"Reason TBD\",\n\t\t\"histogram_quantile\": \"Reason TBD\",\n\t\t\"join\": \"Reason TBD\",\n\t\t\"join_across_measurements\": \"Reason TBD\",\n\t\t\"join_agg\": \"Reason TBD\",\n\t\t\"keep_non_existent\": \"Reason TBD\",\n\t\t\"key_values\": \"Reason TBD\",\n\t\t\"key_values_host_name\": \"Reason TBD\",\n\t\t\"last\": \"Reason TBD\",\n\t\t\"lowestAverage\": \"Reason TBD\",\n\t\t\"max\": \"Reason TBD\",\n\t\t\"min\": \"Reason TBD\",\n\t\t\"sample\": \"Reason TBD\",\n\t\t\"selector_preserve_time\": \"Reason TBD\",\n\t\t\"shift\": \"Reason TBD\",\n\t\t\"shift_negative_duration\": \"Reason TBD\",\n\t\t\"task_per_line\": \"Reason TBD\",\n\t\t\"top\": \"Reason TBD\",\n\t\t\"union\": \"Reason TBD\",\n\t\t\"union_heterogeneous\": \"Reason TBD\",\n\t\t\"unique\": \"Reason TBD\",\n\t\t\"distinct\": \"Reason TBD\",\n\n\t\t\/\/ it appears these occur when writing the input data. `to` may not be null safe.\n\t\t\"fill_bool\": \"failed to read meta data: panic: interface conversion: interface {} is nil, not uint64\",\n\t\t\"fill_float\": \"failed to read meta data: panic: interface conversion: interface {} is nil, not uint64\",\n\t\t\"fill_int\": \"failed to read meta data: panic: interface conversion: interface {} is nil, not uint64\",\n\t\t\"fill_string\": \"failed to read meta data: panic: interface conversion: interface {} is nil, not uint64\",\n\t\t\"fill_time\": \"failed to read meta data: panic: interface conversion: interface {} is nil, not uint64\",\n\t\t\"fill_uint\": \"failed to read meta data: panic: interface conversion: interface {} is nil, not uint64\",\n\t\t\"window_null\": \"failed to read meta data: panic: interface conversion: interface {} is nil, not float64\",\n\n\t\t\/\/ these may just be missing calls to range() in the tests. easy to fix in a new PR.\n\t\t\"group_nulls\": \"unbounded test\",\n\t\t\"integral\": \"unbounded test\",\n\t\t\"integral_columns\": \"unbounded test\",\n\t\t\"map\": \"unbounded test\",\n\t\t\"join_missing_on_col\": \"unbounded test\",\n\t\t\"rowfn_with_import\": \"unbounded test\",\n\n\t\t\/\/ the following tests have a difference between the CSV-decoded input table, and the storage-retrieved version of that table\n\t\t\"columns\": \"group key mismatch\",\n\t\t\"set\": \"column order mismatch\",\n\t\t\"simple_max\": \"_stop missing from expected output\",\n\t\t\"derivative\": \"time bounds mismatch (engine uses now() instead of bounds on input table)\",\n\t\t\"difference_columns\": \"data write\/read path loses columns x and y\",\n\t\t\"keys\": \"group key mismatch\",\n\n\t\t\/\/ failed to read meta data errors: the CSV encoding is incomplete probably due to data schema errors. needs more detailed investigation to find root cause of error\n\t\t\"filter_by_regex\": \"failed to read metadata\",\n\t\t\"filter_by_tags\": \"failed to read metadata\",\n\t\t\"group\": \"failed to read metadata\",\n\t\t\"group_except\": \"failed to read metadata\",\n\t\t\"group_ungroup\": \"failed to read metadata\",\n\t\t\"pivot_mean\": \"failed to read metadata\",\n\t\t\"histogram_quantile_minvalue\": \"failed to read meta data: no column with label _measurement exists\",\n\t\t\"increase\": \"failed to read meta data: table has no _value column\",\n\n\t\t\"string_max\": \"error: invalid use of function: *functions.MaxSelector has no implementation for type string (https:\/\/github.com\/influxdata\/platform\/issues\/224)\",\n\t\t\"null_as_value\": \"null not supported as value in influxql (https:\/\/github.com\/influxdata\/platform\/issues\/353)\",\n\t\t\"string_interp\": \"string interpolation not working as expected in flux (https:\/\/github.com\/influxdata\/platform\/issues\/404)\",\n\t\t\"to\": \"to functions are not supported in the testing framework (https:\/\/github.com\/influxdata\/flux\/issues\/77)\",\n\t\t\"covariance_missing_column_1\": \"need to support known errors in new test framework (https:\/\/github.com\/influxdata\/flux\/issues\/536)\",\n\t\t\"covariance_missing_column_2\": \"need to support known errors in new test framework (https:\/\/github.com\/influxdata\/flux\/issues\/536)\",\n\t\t\"drop_before_rename\": \"need to support known errors in new test framework (https:\/\/github.com\/influxdata\/flux\/issues\/536)\",\n\t\t\"drop_referenced\": \"need to support known errors in new test framework (https:\/\/github.com\/influxdata\/flux\/issues\/536)\",\n\t\t\"yield\": \"yield requires special test case (https:\/\/github.com\/influxdata\/flux\/issues\/535)\",\n\n\t\t\"window_group_mean_ungroup\": \"window trigger optimization modifies sort order of its output tables (https:\/\/github.com\/influxdata\/flux\/issues\/1067)\",\n\n\t\t\"median_column\": \"failing in different ways (https:\/\/github.com\/influxdata\/influxdb\/issues\/13909)\",\n\t\t\"dynamic_query\": \"panic when executing\",\n\n\t\t\"to_int\": \"dateTime conversion issue: https:\/\/github.com\/influxdata\/influxdb\/issues\/14575\",\n\t\t\"to_uint\": \"dateTime conversion issue: https:\/\/github.com\/influxdata\/influxdb\/issues\/14575\",\n\n\t\t\"holt_winters_panic\": \"Expected output is an empty table which breaks the testing framework (https:\/\/github.com\/influxdata\/influxdb\/issues\/14749)\",\n\t},\n\t\"experimental\": {\n\t\t\"set\": \"Reason TBD\",\n\t},\n\t\"regexp\": {\n\t\t\"replaceAllString\": \"Reason TBD\",\n\t},\n\t\"http\": {\n\t\t\"http_endpoint\": \"need ability to test side effects in e2e tests: (https:\/\/github.com\/influxdata\/flux\/issues\/1723)\",\n\t},\n\t\"influxdata\/influxdb\/v1\": {\n\t\t\"show_measurements\": \"flaky test (https:\/\/github.com\/influxdata\/influxdb\/issues\/15450)\",\n\t\t\"show_tag_values\": \"flaky test (https:\/\/github.com\/influxdata\/influxdb\/issues\/15450)\",\n\t\t\"show_tag_keys\": \"flaky test (https:\/\/github.com\/influxdata\/influxdb\/issues\/15450)\",\n\t},\n\t\"influxdata\/influxdb\/monitor\": {\n\t\t\"check\": \"Cannot see overridden options from inside stdlib functions (https:\/\/github.com\/influxdata\/flux\/issues\/1720)\",\n\t},\n\t\"influxdata\/influxdb\/secrets\": {\n\t\t\"secrets\": \"Cannot inject custom deps into the test framework so the secrets don't lookup correctly\",\n\t},\n\t\"internal\/promql\": {\n\t\t\"join\": \"unbounded test\",\n\t},\n\t\"testing\/chronograf\": {\n\t\t\"buckets\": \"unbounded test\",\n\t},\n\t\"testing\/kapacitor\": {\n\t\t\"fill_default\": \"unknown field type for f1\",\n\t},\n\t\"testing\/pandas\": {\n\t\t\"extract_regexp_findStringIndex\": \"pandas. map does not correctly handled returned arrays (https:\/\/github.com\/influxdata\/flux\/issues\/1387)\",\n\t\t\"partition_strings_splitN\": \"pandas. map does not correctly handled returned arrays (https:\/\/github.com\/influxdata\/flux\/issues\/1387)\",\n\t},\n\t\/\/ PromQL tests don't work. Missing `range` or `_measurement`.\n\t\"testing\/promql\": {\n\t\t\"changes\": \"unbounded or missing _measurement column\",\n\t\t\"dayOfMonth\": \"unbounded or missing _measurement column\",\n\t\t\"dayOfWeek\": \"unbounded or missing _measurement column\",\n\t\t\"daysInMonth\": \"unbounded or missing _measurement column\",\n\t\t\"emptyTable\": \"unbounded or missing _measurement column\",\n\t\t\"extrapolatedRate_counter_rate\": \"unbounded or missing _measurement column\",\n\t\t\"extrapolatedRate_nocounter\": \"unbounded or missing _measurement column\",\n\t\t\"extrapolatedRate_norate\": \"unbounded or missing _measurement column\",\n\t\t\"histogramQuantile\": \"unbounded or missing _measurement column\",\n\t\t\"holtWinters\": \"unbounded or missing _measurement column\",\n\t\t\"hour\": \"unbounded or missing _measurement column\",\n\t\t\"instantRate\": \"unbounded or missing _measurement column\",\n\t\t\"labelReplace_empty_dst\": \"unbounded or missing _measurement column\",\n\t\t\"labelReplace_full_string_match\": \"unbounded or missing _measurement column\",\n\t\t\"labelReplace_multiple_groups\": \"unbounded or missing _measurement column\",\n\t\t\"labelReplace_src_empty\": \"unbounded or missing _measurement column\",\n\t\t\"labelReplace_src_nonexistent\": \"unbounded or missing _measurement column\",\n\t\t\"labelReplace_src_not_matched\": \"unbounded or missing _measurement column\",\n\t\t\"labelReplace_sub_string_match\": \"unbounded or missing _measurement column\",\n\t\t\"linearRegression_nopredict\": \"unbounded or missing _measurement column\",\n\t\t\"linearRegression_predict\": \"unbounded or missing _measurement column\",\n\t\t\"minute\": \"unbounded or missing _measurement column\",\n\t\t\"month\": \"unbounded or missing _measurement column\",\n\t\t\"resets\": \"unbounded or missing _measurement column\",\n\t\t\"timestamp\": \"unbounded or missing _measurement column\",\n\t\t\"year\": \"unbounded or missing _measurement column\",\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>package stun\n\nimport (\n\t\"testing\"\n)\n\nfunc BenchmarkMessage_GetNotFound(b *testing.B) {\n\tm := New()\n\tb.ReportAllocs()\n\tfor i := 0; i < b.N; i++ {\n\t\tm.Get(AttrRealm)\n\t}\n}\n\nfunc TestMessage_GetNoAllocs(t *testing.T) {\n\tm := New()\n\tNewSoftware(\"c\").AddTo(m)\n\tm.WriteHeader()\n\n\tt.Run(\"Default\", func(t *testing.T) {\n\t\tallocs := testing.AllocsPerRun(10, func() {\n\t\t\tm.Get(AttrSoftware)\n\t\t})\n\t\tif allocs > 0 {\n\t\t\tt.Error(\"allocated memory, but should not\")\n\t\t}\n\t})\n\tt.Run(\"Not found\", func(t *testing.T) {\n\t\tallocs := testing.AllocsPerRun(10, func() {\n\t\t\tm.Get(AttrOrigin)\n\t\t})\n\t\tif allocs > 0 {\n\t\t\tt.Error(\"allocated memory, but should not\")\n\t\t}\n\t})\n}\n\nfunc TestPadding(t *testing.T) {\n\ttt := []struct {\n\t\tin, out int\n\t}{\n\t\t{4, 4}, \/\/ 0\n\t\t{2, 4}, \/\/ 1\n\t\t{5, 8}, \/\/ 2\n\t\t{8, 8}, \/\/ 3\n\t\t{11, 12}, \/\/ 4\n\t\t{1, 4}, \/\/ 5\n\t\t{3, 4}, \/\/ 6\n\t\t{6, 8}, \/\/ 7\n\t\t{7, 8}, \/\/ 8\n\t\t{0, 0}, \/\/ 9\n\t\t{40, 40}, \/\/ 10\n\t}\n\tfor i, c := range tt {\n\t\tif got := nearestPaddedValueLength(c.in); got != c.out {\n\t\t\tt.Errorf(\"[%d]: padd(%d) %d (got) != %d (expected)\",\n\t\t\t\ti, c.in, got, c.out,\n\t\t\t)\n\t\t}\n\t}\n}\n\nfunc TestAttrLengthError_Error(t *testing.T) {\n\terr := AttrOverflowErr{\n\t\tGot: 100,\n\t\tMax: 50,\n\t\tType: AttrLifetime,\n\t}\n\tif err.Error() != \"incorrect length of LIFETIME attribute: 100 exceeds maximum 50\" {\n\t\tt.Error(\"bad error string\", err)\n\t}\n}\n\nfunc TestAttrLengthErr_Error(t *testing.T) {\n\terr := AttrLengthErr{\n\t\tAttr: AttrErrorCode,\n\t\tExpected: 15,\n\t\tGot: 99,\n\t}\n\tif err.Error() != \"incorrect length of ERROR-CODE attribute: got 99, expected 15\" {\n\t\tt.Errorf(\"bad error string: %s\", err)\n\t}\n}\n<commit_msg>attributes: add BenchmarkMessage_Get<commit_after>package stun\n\nimport (\n\t\"testing\"\n)\n\nfunc BenchmarkMessage_GetNotFound(b *testing.B) {\n\tm := New()\n\tb.ReportAllocs()\n\tfor i := 0; i < b.N; i++ {\n\t\tm.Get(AttrRealm)\n\t}\n}\n\nfunc BenchmarkMessage_Get(b *testing.B) {\n\tm := New()\n\tm.Add(AttrUsername, []byte{1, 2, 3, 4, 5, 6, 7})\n\tb.ReportAllocs()\n\tfor i := 0; i < b.N; i++ {\n\t\tm.Get(AttrUsername)\n\t}\n}\n\nfunc TestMessage_GetNoAllocs(t *testing.T) {\n\tm := New()\n\tNewSoftware(\"c\").AddTo(m)\n\tm.WriteHeader()\n\n\tt.Run(\"Default\", func(t *testing.T) {\n\t\tallocs := testing.AllocsPerRun(10, func() {\n\t\t\tm.Get(AttrSoftware)\n\t\t})\n\t\tif allocs > 0 {\n\t\t\tt.Error(\"allocated memory, but should not\")\n\t\t}\n\t})\n\tt.Run(\"Not found\", func(t *testing.T) {\n\t\tallocs := testing.AllocsPerRun(10, func() {\n\t\t\tm.Get(AttrOrigin)\n\t\t})\n\t\tif allocs > 0 {\n\t\t\tt.Error(\"allocated memory, but should not\")\n\t\t}\n\t})\n}\n\nfunc TestPadding(t *testing.T) {\n\ttt := []struct {\n\t\tin, out int\n\t}{\n\t\t{4, 4}, \/\/ 0\n\t\t{2, 4}, \/\/ 1\n\t\t{5, 8}, \/\/ 2\n\t\t{8, 8}, \/\/ 3\n\t\t{11, 12}, \/\/ 4\n\t\t{1, 4}, \/\/ 5\n\t\t{3, 4}, \/\/ 6\n\t\t{6, 8}, \/\/ 7\n\t\t{7, 8}, \/\/ 8\n\t\t{0, 0}, \/\/ 9\n\t\t{40, 40}, \/\/ 10\n\t}\n\tfor i, c := range tt {\n\t\tif got := nearestPaddedValueLength(c.in); got != c.out {\n\t\t\tt.Errorf(\"[%d]: padd(%d) %d (got) != %d (expected)\",\n\t\t\t\ti, c.in, got, c.out,\n\t\t\t)\n\t\t}\n\t}\n}\n\nfunc TestAttrLengthError_Error(t *testing.T) {\n\terr := AttrOverflowErr{\n\t\tGot: 100,\n\t\tMax: 50,\n\t\tType: AttrLifetime,\n\t}\n\tif err.Error() != \"incorrect length of LIFETIME attribute: 100 exceeds maximum 50\" {\n\t\tt.Error(\"bad error string\", err)\n\t}\n}\n\nfunc TestAttrLengthErr_Error(t *testing.T) {\n\terr := AttrLengthErr{\n\t\tAttr: AttrErrorCode,\n\t\tExpected: 15,\n\t\tGot: 99,\n\t}\n\tif err.Error() != \"incorrect length of ERROR-CODE attribute: got 99, expected 15\" {\n\t\tt.Errorf(\"bad error string: %s\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package dpn_test\n\nimport (\n\t\"fmt\"\n\t\"github.com\/APTrust\/bagman\/dpn\"\n\t\"net\/http\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar skipSyncMessagePrinted = false\n\nconst (\n\tBAG_COUNT = 6\n\tREPL_COUNT = 24\n\tRESTORE_COUNT = 4\n)\n\nfunc runSyncTests(t *testing.T) bool {\n\tconfig := loadConfig(t, configFile)\n\t_, err := http.Get(config.RestClient.LocalServiceURL)\n\tif !canRunSyncTests(\"aptrust\", config.RestClient.LocalServiceURL, err) {\n\t\treturn false\n\t}\n\tfor nodeNamespace, url := range config.RemoteNodeURLs {\n\t\tif url == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\t_, err := http.Get(url)\n\t\tif !canRunSyncTests(nodeNamespace, url, err) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc canRunSyncTests(nodeNamespace string, url string, err error) (bool) {\n\tif err != nil {\n\t\tif skipSyncMessagePrinted == false {\n\t\t\tskipSyncMessagePrinted = true\n\t\t\tfmt.Printf(\"**** Skipping DPN sync integration tests: \"+\n\t\t\t\t\"%s server is not running at %s\\n\", nodeNamespace, url)\n\t\t\tfmt.Println(\" Run the run_cluster.sh script in \" +\n\t\t\t\t\"DPN-REST\/dpnode to get a local cluster running.\")\n\t\t}\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc newDPNSync(t *testing.T) (*dpn.DPNSync) {\n\t\/\/ loadConfig and configFile are defined in dpnrestclient_test.go\n\tconfig := loadConfig(t, configFile)\n\n\tdpnSync, err := dpn.NewDPNSync(config)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn nil\n\t}\n\n\tfor namespace, _ := range config.RemoteNodeTokens {\n\t\tif dpnSync.RemoteClients[namespace] == nil {\n\t\t\tt.Errorf(\"Remote client for node '%s' is missing\", namespace)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn dpnSync\n}\n\nfunc TestNewDPNSync(t *testing.T) {\n\tif runSyncTests(t) == false {\n\t\treturn \/\/ local test cluster isn't running\n\t}\n\tdpnSync := newDPNSync(t)\n\tif dpnSync == nil {\n\t\treturn\n\t}\n}\n\nfunc TestGetAllNodes(t *testing.T) {\n\tif runSyncTests(t) == false {\n\t\treturn \/\/ local test cluster isn't running\n\t}\n\tdpnSync := newDPNSync(t)\n\tif dpnSync == nil {\n\t\treturn\n\t}\n\tnodes, err := dpnSync.GetAllNodes()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(nodes) != 5 {\n\t\tt.Errorf(\"Expected 5 nodes, got %d\", len(nodes))\n\t}\n}\n\nfunc TestUpdateLastPullDate(t *testing.T) {\n\tif runSyncTests(t) == false {\n\t\treturn \/\/ local test cluster isn't running\n\t}\n\tdpnSync := newDPNSync(t)\n\tif dpnSync == nil {\n\t\treturn\n\t}\n\tnodes, err := dpnSync.GetAllNodes()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif len(nodes) != 5 {\n\t\tt.Errorf(\"Expected 5 nodes, got %d\", len(nodes))\n\t\treturn\n\t}\n\tsomeNode := nodes[2]\n\torigLastPullDate := someNode.LastPullDate\n\tnewLastPullDate := origLastPullDate.Add(-12 * time.Hour)\n\n\tupdatedNode, err := dpnSync.UpdateLastPullDate(someNode, newLastPullDate)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif updatedNode.LastPullDate != newLastPullDate {\n\t\tt.Errorf(\"Expected LastPullDate %s, got %s\",\n\t\t\tnewLastPullDate, updatedNode.LastPullDate)\n\t}\n}\n\nfunc TestSyncBags(t *testing.T) {\n\tif runSyncTests(t) == false {\n\t\treturn \/\/ local test cluster isn't running\n\t}\n\tdpnSync := newDPNSync(t)\n\tif dpnSync == nil {\n\t\treturn\n\t}\n\tnodes, err := dpnSync.GetAllNodes()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tfor _, node := range nodes {\n\t\tif node.Namespace == \"aptrust\" {\n\t\t\tcontinue\n\t\t}\n\t\taLongTimeAgo := time.Date(1999, time.December, 31, 23, 0, 0, 0, time.UTC)\n\t\tnode.LastPullDate = aLongTimeAgo\n\t\t_, err := dpnSync.LocalClient.DPNNodeUpdate(node)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error setting last pull date to 1999: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tbagsSynched, err := dpnSync.SyncBags(node)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error synching bags for node %s: %v\", node.Namespace, err)\n\t\t}\n\t\tif len(bagsSynched) != BAG_COUNT {\n\t\t\tt.Errorf(\"Synched %d bags for node %s. Expected %d.\",\n\t\t\t\tlen(bagsSynched), node.Namespace, BAG_COUNT)\n\t\t}\n\t\tfor _, remoteBag := range(bagsSynched) {\n\t\t\tif remoteBag == nil {\n\t\t\t\tt.Errorf(\"Remote bag is nil\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlocalBag, _ := dpnSync.LocalClient.DPNBagGet(remoteBag.UUID)\n\t\t\tif localBag == nil {\n\t\t\t\tt.Errorf(\"Bag %s didn't make into local registry\", remoteBag.UUID)\n\t\t\t}\n\t\t\tif localBag.UpdatedAt != remoteBag.UpdatedAt {\n\t\t\t\tt.Errorf(\"Bag %s isn't up to date in local registry\", remoteBag.UUID)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestSyncReplicationRequests(t *testing.T) {\n\tif runSyncTests(t) == false {\n\t\treturn \/\/ local test cluster isn't running\n\t}\n\tdpnSync := newDPNSync(t)\n\tif dpnSync == nil {\n\t\treturn\n\t}\n\tnodes, err := dpnSync.GetAllNodes()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tfor _, node := range nodes {\n\t\tif node.Namespace == \"aptrust\" {\n\t\t\tcontinue\n\t\t}\n\t\taLongTimeAgo := time.Date(1999, time.December, 31, 23, 0, 0, 0, time.UTC)\n\t\tnode.LastPullDate = aLongTimeAgo\n\t\t_, err := dpnSync.LocalClient.DPNNodeUpdate(node)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error setting last pull date to 1999: %v\", err)\n\t\t\treturn\n\t\t}\n\t\txfersSynched, err := dpnSync.SyncReplicationRequests(node)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error synching replication requests for node %s: %v\",\n\t\t\t\tnode.Namespace, err)\n\t\t}\n\t\tif len(xfersSynched) != REPL_COUNT {\n\t\t\tt.Errorf(\"Synched %d replication requests for %s. Expected %d.\",\n\t\t\t\tlen(xfersSynched), node.Namespace, REPL_COUNT)\n\t\t}\n\t\tfor _, xfer := range(xfersSynched) {\n\t\t\tif xfer == nil {\n\t\t\t\tt.Errorf(\"Xfer is nil\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlocalCopy, _ := dpnSync.LocalClient.ReplicationTransferGet(xfer.ReplicationId)\n\t\t\tif localCopy == nil {\n\t\t\t\tt.Errorf(\"Xfer %s didn't make into local registry\", xfer.ReplicationId)\n\t\t\t}\n\t\t\tif xfer.UpdatedAt != localCopy.UpdatedAt {\n\t\t\t\tt.Errorf(\"Xfer %s isn't up to date in local registry\", xfer.ReplicationId)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestSyncRestoreRequests(t *testing.T) {\n\tif runSyncTests(t) == false {\n\t\treturn \/\/ local test cluster isn't running\n\t}\n\tdpnSync := newDPNSync(t)\n\tif dpnSync == nil {\n\t\treturn\n\t}\n\tnodes, err := dpnSync.GetAllNodes()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tfor _, node := range nodes {\n\t\tif node.Namespace == \"aptrust\" {\n\t\t\tcontinue\n\t\t}\n\t\taLongTimeAgo := time.Date(1999, time.December, 31, 23, 0, 0, 0, time.UTC)\n\t\tnode.LastPullDate = aLongTimeAgo\n\t\t_, err := dpnSync.LocalClient.DPNNodeUpdate(node)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error setting last pull date to 1999: %v\", err)\n\t\t\treturn\n\t\t}\n\t\txfersSynched, err := dpnSync.SyncRestoreRequests(node)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error synching restore requests for node %s: %v\",\n\t\t\t\tnode.Namespace, err)\n\t\t}\n\t\tif len(xfersSynched) != RESTORE_COUNT {\n\t\t\tt.Errorf(\"Synched %d restore requests for %s. Expected %d.\",\n\t\t\t\tlen(xfersSynched), node.Namespace, RESTORE_COUNT)\n\t\t}\n\t\tfor _, xfer := range(xfersSynched) {\n\t\t\tlocalCopy, _ := dpnSync.LocalClient.RestoreTransferGet(xfer.RestoreId)\n\t\t\tif localCopy == nil {\n\t\t\t\tt.Errorf(\"Xfer %s didn't make into local registry\", xfer.RestoreId)\n\t\t\t}\n\t\t\tif xfer.UpdatedAt != localCopy.UpdatedAt {\n\t\t\t\tt.Errorf(\"Xfer %s isn't up to date in local registry\", xfer.RestoreId)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestSyncEverythingFromNode(t *testing.T) {\n\tif runSyncTests(t) == false {\n\t\treturn \/\/ local test cluster isn't running\n\t}\n\tdpnSync := newDPNSync(t)\n\tif dpnSync == nil {\n\t\treturn\n\t}\n\tnodes, err := dpnSync.GetAllNodes()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tfor _, node := range nodes {\n\t\tif node.Namespace == \"aptrust\" {\n\t\t\tcontinue\n\t\t}\n\t\taLongTimeAgo := time.Date(1999, time.December, 31, 23, 0, 0, 0, time.UTC)\n\t\tnode.LastPullDate = aLongTimeAgo\n\t\t_, err := dpnSync.LocalClient.DPNNodeUpdate(node)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error setting last pull date to 1999: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tsyncResult := dpnSync.SyncEverythingFromNode(node)\n\n\t\t\/\/ Bags\n\t\tif syncResult.BagSyncError != nil {\n\t\t\tt.Errorf(\"Got unexpected bag-sync error from node %s: %v\",\n\t\t\t\tnode.Namespace, syncResult.BagSyncError)\n\t\t}\n\t\tif len(syncResult.Bags) != BAG_COUNT {\n\t\t\tt.Errorf(\"Expected %d bags from %s, got %d\",\n\t\t\t\tBAG_COUNT, node.Namespace, len(syncResult.Bags))\n\t\t}\n\n\t\t\/\/ Replication Transfers\n\t\tif syncResult.ReplicationSyncError != nil {\n\t\t\tt.Errorf(\"Got unexpected replication transfer-sync error from node %s: %v\",\n\t\t\t\tnode.Namespace, syncResult.ReplicationSyncError)\n\t\t}\n\t\tif len(syncResult.ReplicationTransfers) != REPL_COUNT {\n\t\t\tt.Errorf(\"Expected %d replication transfers from %s, got %d\",\n\t\t\t\tREPL_COUNT, node.Namespace, len(syncResult.ReplicationTransfers))\n\t\t}\n\n\t\t\/\/ Bags\n\t\tif syncResult.RestoreSyncError != nil {\n\t\t\tt.Errorf(\"Got unexpected restore transfer-sync error from node %s: %v\",\n\t\t\t\tnode.Namespace, syncResult.RestoreSyncError)\n\t\t}\n\t\tif len(syncResult.RestoreTransfers) != RESTORE_COUNT {\n\t\t\tt.Errorf(\"Expected %d restore transfers from %s, got %d\",\n\t\t\t\tRESTORE_COUNT, node.Namespace, len(syncResult.RestoreTransfers))\n\t\t}\n\n\t\t\/\/ Timestamp update\n\t\tupdatedNode, err := dpnSync.LocalClient.DPNNodeGet(node.Namespace)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Can't check timestamp. Error getting node: %v\", err)\n\t\t}\n\t\tif updatedNode.LastPullDate == aLongTimeAgo {\n\t\t\tt.Errorf(\"LastPullDate was not updated for %s\", node.Namespace)\n\t\t}\n\t}\n}\n\nfunc TestSyncWithError(t *testing.T) {\n\tif runSyncTests(t) == false {\n\t\treturn \/\/ local test cluster isn't running\n\t}\n\tdpnSync := newDPNSync(t)\n\tif dpnSync == nil {\n\t\treturn\n\t}\n\tnodes, err := dpnSync.GetAllNodes()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\t\/\/ Pick one node to sync with, and set the API key for that node\n\t\/\/ to a value we know is invalid. This will cause the sync to fail.\n\tnode := nodes[len(nodes) - 1]\n\tdpnSync.RemoteClients[node.Namespace].APIKey = \"0000000000000000\"\n\n\taLongTimeAgo := time.Date(1999, time.December, 31, 23, 0, 0, 0, time.UTC)\n\tnode.LastPullDate = aLongTimeAgo\n\t_, err = dpnSync.LocalClient.DPNNodeUpdate(node)\n\tif err != nil {\n\t\tt.Errorf(\"Error setting last pull date to 1999: %v\", err)\n\t\treturn\n\t}\n\n\tsyncResult := dpnSync.SyncEverythingFromNode(node)\n\tif syncResult.BagSyncError == nil {\n\t\tt.Errorf(\"BagSyncError should not be nil\")\n\t}\n\tif syncResult.ReplicationSyncError == nil {\n\t\tt.Errorf(\"ReplicationSyncError should not be nil\")\n\t}\n\tif syncResult.RestoreSyncError == nil {\n\t\tt.Errorf(\"RestoreSyncError should not be nil\")\n\t}\n\n\t\/\/ Because the sync failed (due to the bad API Key), the LastPullDate\n\t\/\/ on the node we tried to pull from should NOT be updated.\n\tupdatedNode, err := dpnSync.LocalClient.DPNNodeGet(node.Namespace)\n\tif err != nil {\n\t\tt.Errorf(\"Can't check timestamp. Error getting node: %v\", err)\n\t}\n\tif updatedNode.LastPullDate != aLongTimeAgo {\n\t\tt.Errorf(\"LastPullDate was updated when it should not have been\")\n\t}\n}\n\n\nfunc TestHasSyncErrors(t *testing.T) {\n\tsyncResult := &dpn.SyncResult{}\n\tif syncResult.HasSyncErrors() == true {\n\t\tt.Errorf(\"HasSyncErrors() returned true. Expected false.\")\n\t}\n\tsyncResult.BagSyncError = fmt.Errorf(\"Oops.\")\n\tif syncResult.HasSyncErrors() == false {\n\t\tt.Errorf(\"HasSyncErrors() returned false. Expected true.\")\n\t}\n\tsyncResult.BagSyncError = nil\n\tsyncResult.ReplicationSyncError = fmt.Errorf(\"Oops.\")\n\tif syncResult.HasSyncErrors() == false {\n\t\tt.Errorf(\"HasSyncErrors() returned false. Expected true.\")\n\t}\n\tsyncResult.ReplicationSyncError = nil\n\tsyncResult.RestoreSyncError = fmt.Errorf(\"Oops.\")\n\tif syncResult.HasSyncErrors() == false {\n\t\tt.Errorf(\"HasSyncErrors() returned false. Expected true.\")\n\t}\n}\n<commit_msg>Removed test for UpdateLastPullDate<commit_after>package dpn_test\n\nimport (\n\t\"fmt\"\n\t\"github.com\/APTrust\/bagman\/dpn\"\n\t\"net\/http\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar skipSyncMessagePrinted = false\n\nconst (\n\tBAG_COUNT = 6\n\tREPL_COUNT = 24\n\tRESTORE_COUNT = 4\n)\n\nfunc runSyncTests(t *testing.T) bool {\n\tconfig := loadConfig(t, configFile)\n\t_, err := http.Get(config.RestClient.LocalServiceURL)\n\tif !canRunSyncTests(\"aptrust\", config.RestClient.LocalServiceURL, err) {\n\t\treturn false\n\t}\n\tfor nodeNamespace, url := range config.RemoteNodeURLs {\n\t\tif url == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\t_, err := http.Get(url)\n\t\tif !canRunSyncTests(nodeNamespace, url, err) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc canRunSyncTests(nodeNamespace string, url string, err error) (bool) {\n\tif err != nil {\n\t\tif skipSyncMessagePrinted == false {\n\t\t\tskipSyncMessagePrinted = true\n\t\t\tfmt.Printf(\"**** Skipping DPN sync integration tests: \"+\n\t\t\t\t\"%s server is not running at %s\\n\", nodeNamespace, url)\n\t\t\tfmt.Println(\" Run the run_cluster.sh script in \" +\n\t\t\t\t\"DPN-REST\/dpnode to get a local cluster running.\")\n\t\t}\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc newDPNSync(t *testing.T) (*dpn.DPNSync) {\n\t\/\/ loadConfig and configFile are defined in dpnrestclient_test.go\n\tconfig := loadConfig(t, configFile)\n\n\tdpnSync, err := dpn.NewDPNSync(config)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn nil\n\t}\n\n\tfor namespace, _ := range config.RemoteNodeTokens {\n\t\tif dpnSync.RemoteClients[namespace] == nil {\n\t\t\tt.Errorf(\"Remote client for node '%s' is missing\", namespace)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn dpnSync\n}\n\nfunc TestNewDPNSync(t *testing.T) {\n\tif runSyncTests(t) == false {\n\t\treturn \/\/ local test cluster isn't running\n\t}\n\tdpnSync := newDPNSync(t)\n\tif dpnSync == nil {\n\t\treturn\n\t}\n}\n\nfunc TestGetAllNodes(t *testing.T) {\n\tif runSyncTests(t) == false {\n\t\treturn \/\/ local test cluster isn't running\n\t}\n\tdpnSync := newDPNSync(t)\n\tif dpnSync == nil {\n\t\treturn\n\t}\n\tnodes, err := dpnSync.GetAllNodes()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(nodes) != 5 {\n\t\tt.Errorf(\"Expected 5 nodes, got %d\", len(nodes))\n\t}\n}\n\nfunc TestSyncBags(t *testing.T) {\n\tif runSyncTests(t) == false {\n\t\treturn \/\/ local test cluster isn't running\n\t}\n\tdpnSync := newDPNSync(t)\n\tif dpnSync == nil {\n\t\treturn\n\t}\n\tnodes, err := dpnSync.GetAllNodes()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tfor _, node := range nodes {\n\t\tif node.Namespace == \"aptrust\" {\n\t\t\tcontinue\n\t\t}\n\t\taLongTimeAgo := time.Date(1999, time.December, 31, 23, 0, 0, 0, time.UTC)\n\t\tnode.LastPullDate = aLongTimeAgo\n\t\t_, err := dpnSync.LocalClient.DPNNodeUpdate(node)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error setting last pull date to 1999: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tbagsSynched, err := dpnSync.SyncBags(node)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error synching bags for node %s: %v\", node.Namespace, err)\n\t\t}\n\t\tif len(bagsSynched) != BAG_COUNT {\n\t\t\tt.Errorf(\"Synched %d bags for node %s. Expected %d.\",\n\t\t\t\tlen(bagsSynched), node.Namespace, BAG_COUNT)\n\t\t}\n\t\tfor _, remoteBag := range(bagsSynched) {\n\t\t\tif remoteBag == nil {\n\t\t\t\tt.Errorf(\"Remote bag is nil\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlocalBag, _ := dpnSync.LocalClient.DPNBagGet(remoteBag.UUID)\n\t\t\tif localBag == nil {\n\t\t\t\tt.Errorf(\"Bag %s didn't make into local registry\", remoteBag.UUID)\n\t\t\t}\n\t\t\tif localBag.UpdatedAt != remoteBag.UpdatedAt {\n\t\t\t\tt.Errorf(\"Bag %s isn't up to date in local registry\", remoteBag.UUID)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestSyncReplicationRequests(t *testing.T) {\n\tif runSyncTests(t) == false {\n\t\treturn \/\/ local test cluster isn't running\n\t}\n\tdpnSync := newDPNSync(t)\n\tif dpnSync == nil {\n\t\treturn\n\t}\n\tnodes, err := dpnSync.GetAllNodes()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tfor _, node := range nodes {\n\t\tif node.Namespace == \"aptrust\" {\n\t\t\tcontinue\n\t\t}\n\t\taLongTimeAgo := time.Date(1999, time.December, 31, 23, 0, 0, 0, time.UTC)\n\t\tnode.LastPullDate = aLongTimeAgo\n\t\t_, err := dpnSync.LocalClient.DPNNodeUpdate(node)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error setting last pull date to 1999: %v\", err)\n\t\t\treturn\n\t\t}\n\t\txfersSynched, err := dpnSync.SyncReplicationRequests(node)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error synching replication requests for node %s: %v\",\n\t\t\t\tnode.Namespace, err)\n\t\t}\n\t\tif len(xfersSynched) != REPL_COUNT {\n\t\t\tt.Errorf(\"Synched %d replication requests for %s. Expected %d.\",\n\t\t\t\tlen(xfersSynched), node.Namespace, REPL_COUNT)\n\t\t}\n\t\tfor _, xfer := range(xfersSynched) {\n\t\t\tif xfer == nil {\n\t\t\t\tt.Errorf(\"Xfer is nil\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlocalCopy, _ := dpnSync.LocalClient.ReplicationTransferGet(xfer.ReplicationId)\n\t\t\tif localCopy == nil {\n\t\t\t\tt.Errorf(\"Xfer %s didn't make into local registry\", xfer.ReplicationId)\n\t\t\t}\n\t\t\tif xfer.UpdatedAt != localCopy.UpdatedAt {\n\t\t\t\tt.Errorf(\"Xfer %s isn't up to date in local registry\", xfer.ReplicationId)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestSyncRestoreRequests(t *testing.T) {\n\tif runSyncTests(t) == false {\n\t\treturn \/\/ local test cluster isn't running\n\t}\n\tdpnSync := newDPNSync(t)\n\tif dpnSync == nil {\n\t\treturn\n\t}\n\tnodes, err := dpnSync.GetAllNodes()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tfor _, node := range nodes {\n\t\tif node.Namespace == \"aptrust\" {\n\t\t\tcontinue\n\t\t}\n\t\taLongTimeAgo := time.Date(1999, time.December, 31, 23, 0, 0, 0, time.UTC)\n\t\tnode.LastPullDate = aLongTimeAgo\n\t\t_, err := dpnSync.LocalClient.DPNNodeUpdate(node)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error setting last pull date to 1999: %v\", err)\n\t\t\treturn\n\t\t}\n\t\txfersSynched, err := dpnSync.SyncRestoreRequests(node)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error synching restore requests for node %s: %v\",\n\t\t\t\tnode.Namespace, err)\n\t\t}\n\t\tif len(xfersSynched) != RESTORE_COUNT {\n\t\t\tt.Errorf(\"Synched %d restore requests for %s. Expected %d.\",\n\t\t\t\tlen(xfersSynched), node.Namespace, RESTORE_COUNT)\n\t\t}\n\t\tfor _, xfer := range(xfersSynched) {\n\t\t\tlocalCopy, _ := dpnSync.LocalClient.RestoreTransferGet(xfer.RestoreId)\n\t\t\tif localCopy == nil {\n\t\t\t\tt.Errorf(\"Xfer %s didn't make into local registry\", xfer.RestoreId)\n\t\t\t}\n\t\t\tif xfer.UpdatedAt != localCopy.UpdatedAt {\n\t\t\t\tt.Errorf(\"Xfer %s isn't up to date in local registry\", xfer.RestoreId)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestSyncEverythingFromNode(t *testing.T) {\n\tif runSyncTests(t) == false {\n\t\treturn \/\/ local test cluster isn't running\n\t}\n\tdpnSync := newDPNSync(t)\n\tif dpnSync == nil {\n\t\treturn\n\t}\n\tnodes, err := dpnSync.GetAllNodes()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tfor _, node := range nodes {\n\t\tif node.Namespace == \"aptrust\" {\n\t\t\tcontinue\n\t\t}\n\t\taLongTimeAgo := time.Date(1999, time.December, 31, 23, 0, 0, 0, time.UTC)\n\t\tnode.LastPullDate = aLongTimeAgo\n\t\t_, err := dpnSync.LocalClient.DPNNodeUpdate(node)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error setting last pull date to 1999: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tsyncResult := dpnSync.SyncEverythingFromNode(node)\n\n\t\t\/\/ Bags\n\t\tif syncResult.BagSyncError != nil {\n\t\t\tt.Errorf(\"Got unexpected bag-sync error from node %s: %v\",\n\t\t\t\tnode.Namespace, syncResult.BagSyncError)\n\t\t}\n\t\tif len(syncResult.Bags) != BAG_COUNT {\n\t\t\tt.Errorf(\"Expected %d bags from %s, got %d\",\n\t\t\t\tBAG_COUNT, node.Namespace, len(syncResult.Bags))\n\t\t}\n\n\t\t\/\/ Replication Transfers\n\t\tif syncResult.ReplicationSyncError != nil {\n\t\t\tt.Errorf(\"Got unexpected replication transfer-sync error from node %s: %v\",\n\t\t\t\tnode.Namespace, syncResult.ReplicationSyncError)\n\t\t}\n\t\tif len(syncResult.ReplicationTransfers) != REPL_COUNT {\n\t\t\tt.Errorf(\"Expected %d replication transfers from %s, got %d\",\n\t\t\t\tREPL_COUNT, node.Namespace, len(syncResult.ReplicationTransfers))\n\t\t}\n\n\t\t\/\/ Bags\n\t\tif syncResult.RestoreSyncError != nil {\n\t\t\tt.Errorf(\"Got unexpected restore transfer-sync error from node %s: %v\",\n\t\t\t\tnode.Namespace, syncResult.RestoreSyncError)\n\t\t}\n\t\tif len(syncResult.RestoreTransfers) != RESTORE_COUNT {\n\t\t\tt.Errorf(\"Expected %d restore transfers from %s, got %d\",\n\t\t\t\tRESTORE_COUNT, node.Namespace, len(syncResult.RestoreTransfers))\n\t\t}\n\n\t\t\/\/ Timestamp update\n\t\tupdatedNode, err := dpnSync.LocalClient.DPNNodeGet(node.Namespace)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Can't check timestamp. Error getting node: %v\", err)\n\t\t}\n\t\tif updatedNode.LastPullDate == aLongTimeAgo {\n\t\t\tt.Errorf(\"LastPullDate was not updated for %s\", node.Namespace)\n\t\t}\n\t}\n}\n\nfunc TestSyncWithError(t *testing.T) {\n\tif runSyncTests(t) == false {\n\t\treturn \/\/ local test cluster isn't running\n\t}\n\tdpnSync := newDPNSync(t)\n\tif dpnSync == nil {\n\t\treturn\n\t}\n\tnodes, err := dpnSync.GetAllNodes()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\t\/\/ Pick one node to sync with, and set the API key for that node\n\t\/\/ to a value we know is invalid. This will cause the sync to fail.\n\tnode := nodes[len(nodes) - 1]\n\tdpnSync.RemoteClients[node.Namespace].APIKey = \"0000000000000000\"\n\n\taLongTimeAgo := time.Date(1999, time.December, 31, 23, 0, 0, 0, time.UTC)\n\tnode.LastPullDate = aLongTimeAgo\n\t_, err = dpnSync.LocalClient.DPNNodeUpdate(node)\n\tif err != nil {\n\t\tt.Errorf(\"Error setting last pull date to 1999: %v\", err)\n\t\treturn\n\t}\n\n\tsyncResult := dpnSync.SyncEverythingFromNode(node)\n\tif syncResult.BagSyncError == nil {\n\t\tt.Errorf(\"BagSyncError should not be nil\")\n\t}\n\tif syncResult.ReplicationSyncError == nil {\n\t\tt.Errorf(\"ReplicationSyncError should not be nil\")\n\t}\n\tif syncResult.RestoreSyncError == nil {\n\t\tt.Errorf(\"RestoreSyncError should not be nil\")\n\t}\n\n\t\/\/ Because the sync failed (due to the bad API Key), the LastPullDate\n\t\/\/ on the node we tried to pull from should NOT be updated.\n\tupdatedNode, err := dpnSync.LocalClient.DPNNodeGet(node.Namespace)\n\tif err != nil {\n\t\tt.Errorf(\"Can't check timestamp. Error getting node: %v\", err)\n\t}\n\tif updatedNode.LastPullDate != aLongTimeAgo {\n\t\tt.Errorf(\"LastPullDate was updated when it should not have been\")\n\t}\n}\n\n\nfunc TestHasSyncErrors(t *testing.T) {\n\tsyncResult := &dpn.SyncResult{}\n\tif syncResult.HasSyncErrors() == true {\n\t\tt.Errorf(\"HasSyncErrors() returned true. Expected false.\")\n\t}\n\tsyncResult.BagSyncError = fmt.Errorf(\"Oops.\")\n\tif syncResult.HasSyncErrors() == false {\n\t\tt.Errorf(\"HasSyncErrors() returned false. Expected true.\")\n\t}\n\tsyncResult.BagSyncError = nil\n\tsyncResult.ReplicationSyncError = fmt.Errorf(\"Oops.\")\n\tif syncResult.HasSyncErrors() == false {\n\t\tt.Errorf(\"HasSyncErrors() returned false. Expected true.\")\n\t}\n\tsyncResult.ReplicationSyncError = nil\n\tsyncResult.RestoreSyncError = fmt.Errorf(\"Oops.\")\n\tif syncResult.HasSyncErrors() == false {\n\t\tt.Errorf(\"HasSyncErrors() returned false. Expected true.\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"flag\"\n \"bufio\"\n \"encoding\/binary\"\n \"os\"\n \"strings\"\n \"database\/sql\"\n _ \"github.com\/lib\/pq\"\n _ \"github.com\/go-sql-driver\/mysql\"\n \"fmt\"\n \"gopkg.in\/ini.v1\"\n \"log\"\n)\n\ntype Config struct {\n Driver string\n Host string\n Port string\n User string\n Pass string\n Dbname string\n Dbargs string\n Table string\n UserField string\n PassField string\n ServerField string\n}\n\nfunc auth(conf *Config, db *sql.DB, user string,\n server string, passwd string) (bool, error) {\n var query string\n var value string\n var err error\n\n log.Printf(\"user: %s, server: %s, passwd: %s\\n\", user, server, passwd)\n\n if conf.ServerField != \"\" {\n query = fmt.Sprintf(\n \"select %s from %s where %s = $1 and %s = $2 and %s = $3\",\n conf.UserField, conf.Table, conf.UserField, conf.PassField,\n conf.ServerField)\n err = db.QueryRow(query, user, passwd, server).Scan(&value)\n } else {\n query = fmt.Sprintf(\n \"select %s from %s where %s = $1 and %s = $2\",\n conf.UserField, conf.Table, conf.UserField, conf.PassField)\n err = db.QueryRow(query, user, passwd).Scan(&value)\n }\n if err != nil || value == \"\" {\n return false, err\n }\n\n return true, nil\n}\n\nfunc isuser(conf *Config, db *sql.DB, user string,\n server string) (bool, error) {\n var query string\n var value string\n var err error\n\n if conf.ServerField != \"\" {\n query = fmt.Sprintf(\n \"select %s from %s where %s = $1 and %s = $2\",\n conf.UserField, conf.Table, conf.UserField, conf.ServerField)\n err = db.QueryRow(query, user, server).Scan(&value)\n } else {\n query = fmt.Sprintf(\n \"select %s from %s where %s = $1\",\n conf.UserField, conf.Table, conf.UserField)\n err = db.QueryRow(query, user).Scan(&value)\n }\n if err != nil || value == \"\" {\n return false, err\n }\n\n return true, nil\n}\n\nfunc GetSqlConnectionString(conf *Config) string {\n return fmt.Sprintf(\"%s:\/\/%s:%s@%s:%s\/%s?%s\",\n conf.Driver, conf.User, conf.Pass, conf.Host, conf.Port,\n conf.Dbname, conf.Dbargs)\n}\n\nfunc OpenSqlConnection(conf *Config) (*sql.DB, error) {\n var err error\n\n connectionString := GetSqlConnectionString(conf)\n db, err := sql.Open(conf.Driver, connectionString)\n if err != nil {\n return nil, err\n }\n\n if err = db.Ping(); err != nil {\n return nil, err\n }\n\n return db, nil\n}\n\nfunc AuthLoop(conf *Config) {\n db, err := OpenSqlConnection(conf)\n\n bioIn := bufio.NewReader(os.Stdin)\n bioOut := bufio.NewWriter(os.Stdout)\n\n var success bool\n var length uint16\n var result uint16\n\n for {\n _ = binary.Read(bioIn, binary.BigEndian, &length)\n\n buf := make([]byte, length)\n\n r, _ := bioIn.Read(buf)\n if r == 0 {\n continue\n }\n\n if err != nil {\n err = db.Ping()\n }\n\n if err == nil {\n data := strings.Split(string(buf), \":\")\n if data[0] == \"auth\" {\n success, err = auth(conf, db, data[1], data[2], data[3])\n } else if data[0] == \"isuser\" {\n success, err = isuser(conf, db, data[1], data[2])\n } else {\n success = false\n }\n } else {\n success = false\n }\n\n length = 2\n binary.Write(bioOut, binary.BigEndian, &length)\n\n if success != true {\n result = 0\n } else {\n result = 1\n }\n\n binary.Write(bioOut, binary.BigEndian, &result)\n bioOut.Flush()\n }\n}\n\nfunc main() {\n filename := flag.String(\"conf\", \"\/etc\/ejabberd-pg-auth.ini\",\n \"Config file with all the connection infos needed.\")\n flag.Parse()\n\n cfg, err := ini.Load(*filename)\n if err != nil {\n log.Fatal(err)\n }\n\n conf := new(Config)\n err = cfg.MapTo(conf)\n if err != nil {\n log.Fatal(err)\n }\n\n log.SetOutput(os.Stderr)\n\n AuthLoop(conf)\n}\n<commit_msg>Add 25ms pause to loop solves 100% CPU problem<commit_after>package main\n\nimport (\n \"flag\"\n \"bufio\"\n \"encoding\/binary\"\n \"os\"\n \"strings\"\n \"database\/sql\"\n _ \"github.com\/lib\/pq\"\n _ \"github.com\/go-sql-driver\/mysql\"\n \"fmt\"\n \"gopkg.in\/ini.v1\"\n \"log\"\n)\n\ntype Config struct {\n Driver string\n Host string\n Port string\n User string\n Pass string\n Dbname string\n Dbargs string\n Table string\n UserField string\n PassField string\n ServerField string\n}\n\nfunc auth(conf *Config, db *sql.DB, user string,\n server string, passwd string) (bool, error) {\n var query string\n var value string\n var err error\n\n log.Printf(\"user: %s, server: %s, passwd: %s\\n\", user, server, passwd)\n\n if conf.ServerField != \"\" {\n query = fmt.Sprintf(\n \"select %s from %s where %s = $1 and %s = $2 and %s = $3\",\n conf.UserField, conf.Table, conf.UserField, conf.PassField,\n conf.ServerField)\n err = db.QueryRow(query, user, passwd, server).Scan(&value)\n } else {\n query = fmt.Sprintf(\n \"select %s from %s where %s = $1 and %s = $2\",\n conf.UserField, conf.Table, conf.UserField, conf.PassField)\n err = db.QueryRow(query, user, passwd).Scan(&value)\n }\n if err != nil || value == \"\" {\n return false, err\n }\n\n return true, nil\n}\n\nfunc isuser(conf *Config, db *sql.DB, user string,\n server string) (bool, error) {\n var query string\n var value string\n var err error\n\n if conf.ServerField != \"\" {\n query = fmt.Sprintf(\n \"select %s from %s where %s = $1 and %s = $2\",\n conf.UserField, conf.Table, conf.UserField, conf.ServerField)\n err = db.QueryRow(query, user, server).Scan(&value)\n } else {\n query = fmt.Sprintf(\n \"select %s from %s where %s = $1\",\n conf.UserField, conf.Table, conf.UserField)\n err = db.QueryRow(query, user).Scan(&value)\n }\n if err != nil || value == \"\" {\n return false, err\n }\n\n return true, nil\n}\n\nfunc GetSqlConnectionString(conf *Config) string {\n return fmt.Sprintf(\"%s:\/\/%s:%s@%s:%s\/%s?%s\",\n conf.Driver, conf.User, conf.Pass, conf.Host, conf.Port,\n conf.Dbname, conf.Dbargs)\n}\n\nfunc OpenSqlConnection(conf *Config) (*sql.DB, error) {\n var err error\n\n connectionString := GetSqlConnectionString(conf)\n db, err := sql.Open(conf.Driver, connectionString)\n if err != nil {\n return nil, err\n }\n\n if err = db.Ping(); err != nil {\n return nil, err\n }\n\n return db, nil\n}\n\nfunc AuthLoop(conf *Config) {\n db, err := OpenSqlConnection(conf)\n\n bioIn := bufio.NewReader(os.Stdin)\n bioOut := bufio.NewWriter(os.Stdout)\n\n var success bool\n var length uint16\n var result uint16\n\n for {\n _ = binary.Read(bioIn, binary.BigEndian, &length)\n\n buf := make([]byte, length)\n\n r, err := bioIn.Read(buf)\n if r == 0 || err != nil {\n time.Sleep(25 * time.Millisecond)\n continue\n }\n\n if err != nil {\n err = db.Ping()\n }\n\n if err == nil {\n data := strings.Split(string(buf), \":\")\n if data[0] == \"auth\" {\n success, err = auth(conf, db, data[1], data[2], data[3])\n } else if data[0] == \"isuser\" {\n success, err = isuser(conf, db, data[1], data[2])\n } else {\n success = false\n }\n } else {\n success = false\n }\n\n length = 2\n binary.Write(bioOut, binary.BigEndian, &length)\n\n if success != true {\n result = 0\n } else {\n result = 1\n }\n\n binary.Write(bioOut, binary.BigEndian, &result)\n bioOut.Flush()\n }\n}\n\nfunc main() {\n filename := flag.String(\"conf\", \"\/etc\/ejabberd-pg-auth.ini\",\n \"Config file with all the connection infos needed.\")\n flag.Parse()\n\n cfg, err := ini.Load(*filename)\n if err != nil {\n log.Fatal(err)\n }\n\n conf := new(Config)\n err = cfg.MapTo(conf)\n if err != nil {\n log.Fatal(err)\n }\n\n log.SetOutput(os.Stderr)\n\n AuthLoop(conf)\n}\n<|endoftext|>"} {"text":"<commit_before>package herd\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n)\n\nvar httpdHerd *Herd\n\nfunc (herd *Herd) startServer(portNum uint, daemon bool) error {\n\tlistener, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", portNum))\n\tif err != nil {\n\t\treturn err\n\t}\n\thttpdHerd = herd\n\thttp.HandleFunc(\"\/\", statusHandler)\n\thttp.HandleFunc(\"\/listSubs\", listSubsHandler)\n\thttp.HandleFunc(\"\/showSubs\", showAllSubsHandler)\n\thttp.HandleFunc(\"\/showDeviantSubs\", showDeviantSubsHandler)\n\thttp.HandleFunc(\"\/showCompliantSubs\", showCompliantSubsHandler)\n\tif daemon {\n\t\tgo http.Serve(listener, nil)\n\t} else {\n\t\thttp.Serve(listener, nil)\n\t}\n\treturn nil\n}\n\nfunc (herd *Herd) addHtmlWriter(htmlWriter HtmlWriter) {\n\therd.htmlWriters = append(herd.htmlWriters, htmlWriter)\n}\n<commit_msg>Fix bug: list of all subs did not work.<commit_after>package herd\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n)\n\nvar httpdHerd *Herd\n\nfunc (herd *Herd) startServer(portNum uint, daemon bool) error {\n\tlistener, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", portNum))\n\tif err != nil {\n\t\treturn err\n\t}\n\thttpdHerd = herd\n\thttp.HandleFunc(\"\/\", statusHandler)\n\thttp.HandleFunc(\"\/listSubs\", listSubsHandler)\n\thttp.HandleFunc(\"\/showAllSubs\", showAllSubsHandler)\n\thttp.HandleFunc(\"\/showDeviantSubs\", showDeviantSubsHandler)\n\thttp.HandleFunc(\"\/showCompliantSubs\", showCompliantSubsHandler)\n\tif daemon {\n\t\tgo http.Serve(listener, nil)\n\t} else {\n\t\thttp.Serve(listener, nil)\n\t}\n\treturn nil\n}\n\nfunc (herd *Herd) addHtmlWriter(htmlWriter HtmlWriter) {\n\therd.htmlWriters = append(herd.htmlWriters, htmlWriter)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/hatstand\/shinywaffle\/metar\"\n)\n\nconst (\n\tbaseURL = \"https:\/\/www.ogimet.com\/display_metars2.php\"\n\tICAO = \"EGLC\"\n)\n\nfunc main() {\n\tv := url.Values{}\n\tv.Set(\"lang\", \"en\")\n\tv.Set(\"lugar\", ICAO)\n\tv.Set(\"tipo\", \"SA\")\n\tv.Set(\"ord\", \"REV\")\n\tv.Set(\"nil\", \"NO\")\n\tv.Set(\"fmt\", \"txt\")\n\tv.Set(\"ano\", \"2017\")\n\tv.Set(\"mes\", \"08\")\n\tv.Set(\"day\", \"10\")\n\tv.Set(\"hora\", \"00\")\n\tv.Set(\"anof\", \"2017\")\n\tv.Set(\"mesf\", \"08\")\n\tv.Set(\"dayf\", \"31\")\n\tv.Set(\"horaf\", \"23\")\n\tv.Set(\"minf\", \"59\")\n\tv.Set(\"send\", \"send\")\n\n\turl, _ := url.Parse(baseURL)\n\turl.RawQuery = v.Encode()\n\n\tfmt.Printf(\"URL: %v\\n\", url)\n\n\tresp, err := http.Get(url.String())\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to fetch METAR data: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tdoc, _ := goquery.NewDocumentFromReader(resp.Body)\n\traw := doc.Find(\"pre\").Text()\n\tMETARs, err := metar.ParseMETARs(raw)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to parse METARs: %v\", err)\n\t}\n\tfor _, m := range METARs {\n\t\tfmt.Printf(\"Parsed: %v\\n\", m)\n\t}\n}\n<commit_msg>Generate chart of METAR data.<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/hatstand\/shinywaffle\/metar\"\n\t\"github.com\/wcharczuk\/go-chart\"\n)\n\nconst (\n\tbaseURL = \"https:\/\/www.ogimet.com\/display_metars2.php\"\n\tICAO = \"EGLC\"\n)\n\nfunc main() {\n\tv := url.Values{}\n\tv.Set(\"lang\", \"en\")\n\tv.Set(\"lugar\", ICAO)\n\tv.Set(\"tipo\", \"SA\")\n\tv.Set(\"ord\", \"REV\")\n\tv.Set(\"nil\", \"NO\")\n\tv.Set(\"fmt\", \"txt\")\n\tv.Set(\"ano\", \"2017\")\n\tv.Set(\"mes\", \"08\")\n\tv.Set(\"day\", \"10\")\n\tv.Set(\"hora\", \"00\")\n\tv.Set(\"anof\", \"2017\")\n\tv.Set(\"mesf\", \"08\")\n\tv.Set(\"dayf\", \"31\")\n\tv.Set(\"horaf\", \"23\")\n\tv.Set(\"minf\", \"59\")\n\tv.Set(\"send\", \"send\")\n\n\turl, _ := url.Parse(baseURL)\n\turl.RawQuery = v.Encode()\n\n\tresp, err := http.Get(url.String())\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to fetch METAR data: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tdoc, _ := goquery.NewDocumentFromReader(resp.Body)\n\traw := doc.Find(\"pre\").Text()\n\tMETARs, err := metar.ParseMETARs(raw)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to parse METARs: %v\", err)\n\t}\n\n\tvar x []time.Time\n\tvar y []float64\n\tfor _, m := range METARs {\n\t\tx = append(x, m.DateTime)\n\t\ty = append(y, float64(m.Temperature))\n\t}\n\n\tgraph := chart.Chart{\n\t\tXAxis: chart.XAxis{\n\t\t\tStyle: chart.Style{\n\t\t\t\tShow: true,\n\t\t\t},\n\t\t},\n\t\tYAxis: chart.YAxis{\n\t\t\tStyle: chart.Style{\n\t\t\t\tShow: true,\n\t\t\t},\n\t\t},\n\t\tSeries: []chart.Series{\n\t\t\tchart.TimeSeries{\n\t\t\t\tXValues: x,\n\t\t\t\tYValues: y,\n\t\t\t},\n\t\t},\n\t}\n\n\tw := bufio.NewWriter(os.Stdout)\n\terr = graph.Render(chart.PNG, w)\n}\n<|endoftext|>"} {"text":"<commit_before>package admin\n\nimport (\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n)\n\nfunc TestAdminServerAccess(t *testing.T) {\n\ttype test struct {\n\t\turi string\n\t\tcode int\n\t}\n\n\ttestAccess := func(access string, tests []test) {\n\t\tsrv := &Server{Access: access}\n\t\tts := httptest.NewServer(srv.handler())\n\t\tdefer ts.Close()\n\n\t\tnoRedirectClient := &http.Client{\n\t\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\t\treturn http.ErrUseLastResponse\n\t\t\t},\n\t\t}\n\t\tfor _, tt := range tests {\n\t\t\tt.Run(access+tt.uri, func(t *testing.T) {\n\t\t\t\tresp, err := noRedirectClient.Get(ts.URL + tt.uri)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"got %v want nil\", err)\n\t\t\t\t}\n\t\t\t\tif got, want := resp.StatusCode, tt.code; got != want {\n\t\t\t\t\tt.Fatalf(\"got code %d want %d\", got, want)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n\n\troTests := []test{\n\t\t{\"\/api\/manual\", 403},\n\t\t{\"\/api\/config\", 200},\n\t\t{\"\/api\/routes\", 200},\n\t\t{\"\/api\/version\", 200},\n\t\t{\"\/manual\", 403},\n\t\t{\"\/routes\", 200},\n\t\t{\"\/health\", 200},\n\t\t{\"\/logo.svg\", 200},\n\t\t{\"\/\", 303},\n\t}\n\n\trwTests := []test{\n\t\t{\"\/api\/manual\", 200},\n\t\t{\"\/api\/config\", 200},\n\t\t{\"\/api\/routes\", 200},\n\t\t{\"\/api\/version\", 200},\n\t\t{\"\/manual\", 200},\n\t\t{\"\/routes\", 200},\n\t\t{\"\/health\", 200},\n\t\t{\"\/logo.svg\", 200},\n\t\t{\"\/\", 303},\n\t}\n\n\ttestAccess(\"ro\", roTests)\n\ttestAccess(\"rw\", rwTests)\n}\n<commit_msg>fix tests<commit_after>package admin\n\nimport (\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/fabiolb\/fabio\/config\"\n)\n\nfunc TestAdminServerAccess(t *testing.T) {\n\ttype test struct {\n\t\turi string\n\t\tcode int\n\t}\n\n\ttestAccess := func(access string, tests []test) {\n\t\tsrv := &Server{\n\t\t\tAccess: access,\n\t\t\tCfg: &config.Config{\n\t\t\t\tRegistry: config.Registry{\n\t\t\t\t\tConsul: config.Consul{\n\t\t\t\t\t\tKVPath: \"\/fabio\/config\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tts := httptest.NewServer(srv.handler())\n\t\tdefer ts.Close()\n\n\t\tnoRedirectClient := &http.Client{\n\t\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\t\treturn http.ErrUseLastResponse\n\t\t\t},\n\t\t}\n\t\tfor _, tt := range tests {\n\t\t\tt.Run(access+tt.uri, func(t *testing.T) {\n\t\t\t\tresp, err := noRedirectClient.Get(ts.URL + tt.uri)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"got %v want nil\", err)\n\t\t\t\t}\n\t\t\t\tif got, want := resp.StatusCode, tt.code; got != want {\n\t\t\t\t\tt.Fatalf(\"got code %d want %d\", got, want)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n\n\troTests := []test{\n\t\t{\"\/api\/manual\", 403},\n\t\t{\"\/api\/paths\", 403},\n\t\t{\"\/api\/config\", 200},\n\t\t{\"\/api\/routes\", 200},\n\t\t{\"\/api\/version\", 200},\n\t\t{\"\/manual\", 403},\n\t\t{\"\/routes\", 200},\n\t\t{\"\/health\", 200},\n\t\t{\"\/logo.svg\", 200},\n\t\t{\"\/\", 303},\n\t}\n\n\trwTests := []test{\n\t\t{\"\/api\/manual\", 200},\n\t\t{\"\/api\/paths\", 200},\n\t\t{\"\/api\/config\", 200},\n\t\t{\"\/api\/routes\", 200},\n\t\t{\"\/api\/version\", 200},\n\t\t{\"\/manual\", 200},\n\t\t{\"\/routes\", 200},\n\t\t{\"\/health\", 200},\n\t\t{\"\/logo.svg\", 200},\n\t\t{\"\/\", 303},\n\t}\n\n\ttestAccess(\"ro\", roTests)\n\ttestAccess(\"rw\", rwTests)\n}\n<|endoftext|>"} {"text":"<commit_before>package v2action_test\n\nimport (\n\t\"errors\"\n\n\t. \"code.cloudfoundry.org\/cli\/actor\/v2action\"\n\t\"code.cloudfoundry.org\/cli\/actor\/v2action\/v2actionfakes\"\n\t\"code.cloudfoundry.org\/cli\/api\/cloudcontroller\/ccv2\"\n\t\"code.cloudfoundry.org\/cli\/api\/cloudcontroller\/ccv2\/constant\"\n\t\"code.cloudfoundry.org\/cli\/types\"\n\t\"code.cloudfoundry.org\/cli\/util\/manifest\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gstruct\"\n)\n\nvar _ = Describe(\"Manifest Actions\", func() {\n\tvar (\n\t\tactor *Actor\n\t\tfakeCloudControllerClient *v2actionfakes.FakeCloudControllerClient\n\t)\n\n\tBeforeEach(func() {\n\t\tfakeCloudControllerClient = new(v2actionfakes.FakeCloudControllerClient)\n\t\tactor = NewActor(fakeCloudControllerClient, nil, nil)\n\t})\n\n\tDescribe(\"CreateApplicationManifestByNameAndSpace\", func() {\n\t\tvar (\n\t\t\tmanifestApp manifest.Application\n\t\t\twarnings Warnings\n\t\t\tcreateErr error\n\t\t)\n\n\t\tJustBeforeEach(func() {\n\t\t\tmanifestApp, warnings, createErr = actor.CreateApplicationManifestByNameAndSpace(\"some-app\", \"some-space-guid\")\n\t\t})\n\n\t\tContext(\"when getting the application summary errors\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns([]ccv2.Application{}, ccv2.Warnings{\"some-app-warning\"}, errors.New(\"some-app-error\"))\n\t\t\t})\n\n\t\t\tIt(\"returns the error and all warnings\", func() {\n\t\t\t\tExpect(createErr).To(MatchError(\"some-app-error\"))\n\t\t\t\tExpect(warnings).To(ConsistOf(\"some-app-warning\"))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when getting the application summary succeeds\", func() {\n\t\t\tvar app ccv2.Application\n\n\t\t\tDescribe(\"buildpacks\", func() {\n\t\t\t\tContext(\"when buildpack is not set\", func() {\n\t\t\t\t\tIt(\"does not populate buildpacks field\", func() {\n\t\t\t\t\t\tExpect(manifestApp.Buildpacks).To(BeNil())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when buildpack is set\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t\tBuildpack: types.FilteredString{\n\t\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t\t\tValue: \"some-buildpack\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\tnil)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"populates buildpacks field\", func() {\n\t\t\t\t\t\tExpect(manifestApp.Buildpacks).To(ConsistOf(\"some-buildpack\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when detected buildpack is set\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t\tDetectedBuildpack: types.FilteredString{\n\t\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t\t\tValue: \"some-detected-buildpack\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\tnil)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"populates buildpacks field with the detected buildpack\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp.Buildpacks).To(ConsistOf(\"some-detected-buildpack\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when buildpack and detected buildpack are set\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t\tBuildpack: types.FilteredString{\n\t\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t\t\tValue: \"some-buildpack\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tDetectedBuildpack: types.FilteredString{\n\t\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t\t\tValue: \"some-detected-buildpack\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\tnil)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"populates buildpacks field with the buildpack\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp.Buildpacks).To(ConsistOf(\"some-buildpack\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"docker images\", func() {\n\t\t\t\tContext(\"when docker image and username are provided\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t\tDockerImage: \"some-docker-image\",\n\t\t\t\t\t\t\tDockerCredentials: ccv2.DockerCredentials{\n\t\t\t\t\t\t\t\tUsername: \"some-docker-username\",\n\t\t\t\t\t\t\t\tPassword: \"some-docker-password\", \/\/ CC currently always returns an empty string\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\tnil)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"populates docker image and username\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp).To(MatchFields(IgnoreExtras, Fields{\n\t\t\t\t\t\t\t\"DockerImage\": Equal(\"some-docker-image\"),\n\t\t\t\t\t\t\t\"DockerUsername\": Equal(\"some-docker-username\"),\n\t\t\t\t\t\t}))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when docker image and username are not provided\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\tnil)\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"does not include it in manifest\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp).To(MatchFields(IgnoreExtras, Fields{\n\t\t\t\t\t\t\t\"DockerImage\": BeEmpty(),\n\t\t\t\t\t\t\t\"DockerUsername\": BeEmpty(),\n\t\t\t\t\t\t}))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"health check\", func() {\n\t\t\t\tContext(\"when the health check type is http\", func() {\n\t\t\t\t\tContext(\"when the health check endpoint path is '\/'\", func() {\n\t\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t\t\tHealthCheckType: constant.ApplicationHealthCheckHTTP,\n\t\t\t\t\t\t\t\tHealthCheckHTTPEndpoint: \"\/\",\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\t\tnil)\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tIt(\"does not include health check endpoint in manifest\", func() {\n\t\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\t\tExpect(manifestApp).To(MatchFields(IgnoreExtras, Fields{\n\t\t\t\t\t\t\t\t\"HealthCheckType\": Equal(\"http\"),\n\t\t\t\t\t\t\t\t\"HealthCheckHTTPEndpoint\": BeEmpty(),\n\t\t\t\t\t\t\t}))\n\t\t\t\t\t\t})\n\t\t\t\t\t})\n\n\t\t\t\t\tContext(\"when the health check endpoint path is not the default\", func() {\n\t\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t\t\tHealthCheckType: constant.ApplicationHealthCheckHTTP,\n\t\t\t\t\t\t\t\tHealthCheckHTTPEndpoint: \"\/whatever\",\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\t\tnil)\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tIt(\"populates the health check endpoint in manifest\", func() {\n\t\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\t\tExpect(manifestApp).To(MatchFields(IgnoreExtras, Fields{\n\t\t\t\t\t\t\t\t\"HealthCheckType\": Equal(\"http\"),\n\t\t\t\t\t\t\t\t\"HealthCheckHTTPEndpoint\": Equal(\"\/whatever\"),\n\t\t\t\t\t\t\t}))\n\t\t\t\t\t\t})\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when the health check type is process\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t\tHealthCheckType: constant.ApplicationHealthCheckProcess,\n\t\t\t\t\t\t\tHealthCheckHTTPEndpoint: \"\/\",\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\tnil)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"only populates health check type\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp).To(MatchFields(IgnoreExtras, Fields{\n\t\t\t\t\t\t\t\"HealthCheckType\": Equal(\"process\"),\n\t\t\t\t\t\t\t\"HealthCheckHTTPEndpoint\": BeEmpty(),\n\t\t\t\t\t\t}))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when the health check type is port\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t\tHealthCheckType: constant.ApplicationHealthCheckPort,\n\t\t\t\t\t\t\tHealthCheckHTTPEndpoint: \"\/\",\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\tnil)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"does not include health check type and endpoint\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp).To(MatchFields(IgnoreExtras, Fields{\n\t\t\t\t\t\t\t\"HealthCheckType\": BeEmpty(),\n\t\t\t\t\t\t\t\"HealthCheckHTTPEndpoint\": BeEmpty(),\n\t\t\t\t\t\t}))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"routes\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t}\n\n\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\tnil)\n\t\t\t\t})\n\n\t\t\t\tContext(\"when routes are set\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationRoutesReturns(\n\t\t\t\t\t\t\t[]ccv2.Route{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tGUID: \"some-route-1-guid\",\n\t\t\t\t\t\t\t\t\tHost: \"host-1\",\n\t\t\t\t\t\t\t\t\tDomainGUID: \"some-domain-guid\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tGUID: \"some-route-2-guid\",\n\t\t\t\t\t\t\t\t\tHost: \"host-2\",\n\t\t\t\t\t\t\t\t\tDomainGUID: \"some-domain-guid\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-routes-warning\"},\n\t\t\t\t\t\t\tnil)\n\n\t\t\t\t\t\tfakeCloudControllerClient.GetSharedDomainReturns(\n\t\t\t\t\t\t\tccv2.Domain{GUID: \"some-domain-guid\", Name: \"some-domain\"},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-domain-warning\"},\n\t\t\t\t\t\t\tnil)\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"populates the routes\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp.Routes).To(ConsistOf(\"host-1.some-domain\", \"host-2.some-domain\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when there are no routes\", func() {\n\t\t\t\t\tIt(\"returns the app with no-route set to true\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp).To(MatchFields(IgnoreExtras, Fields{\n\t\t\t\t\t\t\t\"Routes\": BeEmpty(),\n\t\t\t\t\t\t\t\"NoRoute\": Equal(true),\n\t\t\t\t\t\t}))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"services\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t}\n\n\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\tnil)\n\t\t\t\t})\n\n\t\t\t\tContext(\"when getting services fails\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tfakeCloudControllerClient.GetServiceBindingsReturns(\n\t\t\t\t\t\t\t[]ccv2.ServiceBinding{},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-service-warning\"},\n\t\t\t\t\t\t\terrors.New(\"some-service-error\"),\n\t\t\t\t\t\t)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns the error and all warnings\", func() {\n\t\t\t\t\t\tExpect(createErr).To(MatchError(\"some-service-error\"))\n\t\t\t\t\t\tExpect(warnings).To(ConsistOf(\"some-app-warning\", \"some-service-warning\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when getting services succeeds\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tfakeCloudControllerClient.GetServiceBindingsReturns(\n\t\t\t\t\t\t\t[]ccv2.ServiceBinding{\n\t\t\t\t\t\t\t\t{ServiceInstanceGUID: \"service-1-guid\"},\n\t\t\t\t\t\t\t\t{ServiceInstanceGUID: \"service-2-guid\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-service-warning\"},\n\t\t\t\t\t\t\tnil,\n\t\t\t\t\t\t)\n\t\t\t\t\t\tfakeCloudControllerClient.GetServiceInstanceStub = func(serviceInstanceGUID string) (ccv2.ServiceInstance, ccv2.Warnings, error) {\n\t\t\t\t\t\t\tswitch serviceInstanceGUID {\n\t\t\t\t\t\t\tcase \"service-1-guid\":\n\t\t\t\t\t\t\t\treturn ccv2.ServiceInstance{Name: \"service-1\"}, ccv2.Warnings{\"some-service-1-warning\"}, nil\n\t\t\t\t\t\t\tcase \"service-2-guid\":\n\t\t\t\t\t\t\t\treturn ccv2.ServiceInstance{Name: \"service-2\"}, ccv2.Warnings{\"some-service-2-warning\"}, nil\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\tpanic(\"unknown service instance\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"creates the corresponding manifest application\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp.Services).To(ConsistOf(\"service-1\", \"service-2\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"everything else\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\tDiskQuota: types.NullByteSizeInMb{IsSet: true, Value: 1024},\n\t\t\t\t\t\tCommand: types.FilteredString{\n\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t\tValue: \"some-command\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDetectedStartCommand: types.FilteredString{\n\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t\tValue: \"some-detected-command\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tEnvironmentVariables: map[string]string{\n\t\t\t\t\t\t\t\"env_1\": \"foo\",\n\t\t\t\t\t\t\t\"env_2\": \"182837403930483038\",\n\t\t\t\t\t\t\t\"env_3\": \"true\",\n\t\t\t\t\t\t\t\"env_4\": \"1.00001\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tHealthCheckTimeout: 120,\n\t\t\t\t\t\tInstances: types.NullInt{\n\t\t\t\t\t\t\tValue: 10,\n\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tMemory: types.NullByteSizeInMb{IsSet: true, Value: 200},\n\t\t\t\t\t\tStackGUID: \"some-stack-guid\",\n\t\t\t\t\t}\n\n\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\tnil)\n\n\t\t\t\t\tfakeCloudControllerClient.GetStackReturns(\n\t\t\t\t\t\tccv2.Stack{Name: \"some-stack\"},\n\t\t\t\t\t\tccv2.Warnings{\"some-stack-warning\"},\n\t\t\t\t\t\tnil)\n\t\t\t\t})\n\n\t\t\t\tIt(\"creates the corresponding manifest application\", func() {\n\t\t\t\t\tExpect(warnings).To(ConsistOf(\"some-app-warning\", \"some-stack-warning\"))\n\t\t\t\t\tExpect(manifestApp).To(MatchFields(IgnoreExtras, Fields{\n\t\t\t\t\t\t\"Name\": Equal(\"some-app\"),\n\t\t\t\t\t\t\"DiskQuota\": Equal(types.NullByteSizeInMb{IsSet: true, Value: 1024}),\n\t\t\t\t\t\t\"Command\": Equal(types.FilteredString{\n\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t\tValue: \"some-command\",\n\t\t\t\t\t\t}),\n\t\t\t\t\t\t\"EnvironmentVariables\": Equal(map[string]string{\n\t\t\t\t\t\t\t\"env_1\": \"foo\",\n\t\t\t\t\t\t\t\"env_2\": \"182837403930483038\",\n\t\t\t\t\t\t\t\"env_3\": \"true\",\n\t\t\t\t\t\t\t\"env_4\": \"1.00001\",\n\t\t\t\t\t\t}),\n\t\t\t\t\t\t\"HealthCheckTimeout\": Equal(120),\n\t\t\t\t\t\t\"Instances\": Equal(types.NullInt{\n\t\t\t\t\t\t\tValue: 10,\n\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t}),\n\t\t\t\t\t\t\"Memory\": Equal(types.NullByteSizeInMb{IsSet: true, Value: 200}),\n\t\t\t\t\t\t\"StackName\": Equal(\"some-stack\"),\n\t\t\t\t\t}))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<commit_msg>remove detected buildpack test<commit_after>package v2action_test\n\nimport (\n\t\"errors\"\n\n\t. \"code.cloudfoundry.org\/cli\/actor\/v2action\"\n\t\"code.cloudfoundry.org\/cli\/actor\/v2action\/v2actionfakes\"\n\t\"code.cloudfoundry.org\/cli\/api\/cloudcontroller\/ccv2\"\n\t\"code.cloudfoundry.org\/cli\/api\/cloudcontroller\/ccv2\/constant\"\n\t\"code.cloudfoundry.org\/cli\/types\"\n\t\"code.cloudfoundry.org\/cli\/util\/manifest\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gstruct\"\n)\n\nvar _ = Describe(\"Manifest Actions\", func() {\n\tvar (\n\t\tactor *Actor\n\t\tfakeCloudControllerClient *v2actionfakes.FakeCloudControllerClient\n\t)\n\n\tBeforeEach(func() {\n\t\tfakeCloudControllerClient = new(v2actionfakes.FakeCloudControllerClient)\n\t\tactor = NewActor(fakeCloudControllerClient, nil, nil)\n\t})\n\n\tDescribe(\"CreateApplicationManifestByNameAndSpace\", func() {\n\t\tvar (\n\t\t\tmanifestApp manifest.Application\n\t\t\twarnings Warnings\n\t\t\tcreateErr error\n\t\t)\n\n\t\tJustBeforeEach(func() {\n\t\t\tmanifestApp, warnings, createErr = actor.CreateApplicationManifestByNameAndSpace(\"some-app\", \"some-space-guid\")\n\t\t})\n\n\t\tContext(\"when getting the application summary errors\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns([]ccv2.Application{}, ccv2.Warnings{\"some-app-warning\"}, errors.New(\"some-app-error\"))\n\t\t\t})\n\n\t\t\tIt(\"returns the error and all warnings\", func() {\n\t\t\t\tExpect(createErr).To(MatchError(\"some-app-error\"))\n\t\t\t\tExpect(warnings).To(ConsistOf(\"some-app-warning\"))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when getting the application summary succeeds\", func() {\n\t\t\tvar app ccv2.Application\n\n\t\t\tDescribe(\"buildpacks\", func() {\n\t\t\t\tContext(\"when buildpack is not set\", func() {\n\t\t\t\t\tIt(\"does not populate buildpacks field\", func() {\n\t\t\t\t\t\tExpect(manifestApp.Buildpacks).To(BeNil())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when buildpack is set\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t\tBuildpack: types.FilteredString{\n\t\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t\t\tValue: \"some-buildpack\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\tnil)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"populates buildpacks field\", func() {\n\t\t\t\t\t\tExpect(manifestApp.Buildpacks).To(ConsistOf(\"some-buildpack\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when buildpack and detected buildpack are set\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t\tBuildpack: types.FilteredString{\n\t\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t\t\tValue: \"some-buildpack\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tDetectedBuildpack: types.FilteredString{\n\t\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t\t\tValue: \"some-detected-buildpack\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\tnil)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"populates buildpacks field with the buildpack\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp.Buildpacks).To(ConsistOf(\"some-buildpack\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"docker images\", func() {\n\t\t\t\tContext(\"when docker image and username are provided\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t\tDockerImage: \"some-docker-image\",\n\t\t\t\t\t\t\tDockerCredentials: ccv2.DockerCredentials{\n\t\t\t\t\t\t\t\tUsername: \"some-docker-username\",\n\t\t\t\t\t\t\t\tPassword: \"some-docker-password\", \/\/ CC currently always returns an empty string\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\tnil)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"populates docker image and username\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp).To(MatchFields(IgnoreExtras, Fields{\n\t\t\t\t\t\t\t\"DockerImage\": Equal(\"some-docker-image\"),\n\t\t\t\t\t\t\t\"DockerUsername\": Equal(\"some-docker-username\"),\n\t\t\t\t\t\t}))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when docker image and username are not provided\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\tnil)\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"does not include it in manifest\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp).To(MatchFields(IgnoreExtras, Fields{\n\t\t\t\t\t\t\t\"DockerImage\": BeEmpty(),\n\t\t\t\t\t\t\t\"DockerUsername\": BeEmpty(),\n\t\t\t\t\t\t}))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"health check\", func() {\n\t\t\t\tContext(\"when the health check type is http\", func() {\n\t\t\t\t\tContext(\"when the health check endpoint path is '\/'\", func() {\n\t\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t\t\tHealthCheckType: constant.ApplicationHealthCheckHTTP,\n\t\t\t\t\t\t\t\tHealthCheckHTTPEndpoint: \"\/\",\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\t\tnil)\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tIt(\"does not include health check endpoint in manifest\", func() {\n\t\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\t\tExpect(manifestApp).To(MatchFields(IgnoreExtras, Fields{\n\t\t\t\t\t\t\t\t\"HealthCheckType\": Equal(\"http\"),\n\t\t\t\t\t\t\t\t\"HealthCheckHTTPEndpoint\": BeEmpty(),\n\t\t\t\t\t\t\t}))\n\t\t\t\t\t\t})\n\t\t\t\t\t})\n\n\t\t\t\t\tContext(\"when the health check endpoint path is not the default\", func() {\n\t\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t\t\tHealthCheckType: constant.ApplicationHealthCheckHTTP,\n\t\t\t\t\t\t\t\tHealthCheckHTTPEndpoint: \"\/whatever\",\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\t\tnil)\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tIt(\"populates the health check endpoint in manifest\", func() {\n\t\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\t\tExpect(manifestApp).To(MatchFields(IgnoreExtras, Fields{\n\t\t\t\t\t\t\t\t\"HealthCheckType\": Equal(\"http\"),\n\t\t\t\t\t\t\t\t\"HealthCheckHTTPEndpoint\": Equal(\"\/whatever\"),\n\t\t\t\t\t\t\t}))\n\t\t\t\t\t\t})\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when the health check type is process\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t\tHealthCheckType: constant.ApplicationHealthCheckProcess,\n\t\t\t\t\t\t\tHealthCheckHTTPEndpoint: \"\/\",\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\tnil)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"only populates health check type\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp).To(MatchFields(IgnoreExtras, Fields{\n\t\t\t\t\t\t\t\"HealthCheckType\": Equal(\"process\"),\n\t\t\t\t\t\t\t\"HealthCheckHTTPEndpoint\": BeEmpty(),\n\t\t\t\t\t\t}))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when the health check type is port\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\t\tHealthCheckType: constant.ApplicationHealthCheckPort,\n\t\t\t\t\t\t\tHealthCheckHTTPEndpoint: \"\/\",\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\t\tnil)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"does not include health check type and endpoint\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp).To(MatchFields(IgnoreExtras, Fields{\n\t\t\t\t\t\t\t\"HealthCheckType\": BeEmpty(),\n\t\t\t\t\t\t\t\"HealthCheckHTTPEndpoint\": BeEmpty(),\n\t\t\t\t\t\t}))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"routes\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t}\n\n\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\tnil)\n\t\t\t\t})\n\n\t\t\t\tContext(\"when routes are set\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tfakeCloudControllerClient.GetApplicationRoutesReturns(\n\t\t\t\t\t\t\t[]ccv2.Route{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tGUID: \"some-route-1-guid\",\n\t\t\t\t\t\t\t\t\tHost: \"host-1\",\n\t\t\t\t\t\t\t\t\tDomainGUID: \"some-domain-guid\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tGUID: \"some-route-2-guid\",\n\t\t\t\t\t\t\t\t\tHost: \"host-2\",\n\t\t\t\t\t\t\t\t\tDomainGUID: \"some-domain-guid\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-routes-warning\"},\n\t\t\t\t\t\t\tnil)\n\n\t\t\t\t\t\tfakeCloudControllerClient.GetSharedDomainReturns(\n\t\t\t\t\t\t\tccv2.Domain{GUID: \"some-domain-guid\", Name: \"some-domain\"},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-domain-warning\"},\n\t\t\t\t\t\t\tnil)\n\t\t\t\t\t})\n\t\t\t\t\tIt(\"populates the routes\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp.Routes).To(ConsistOf(\"host-1.some-domain\", \"host-2.some-domain\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when there are no routes\", func() {\n\t\t\t\t\tIt(\"returns the app with no-route set to true\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp).To(MatchFields(IgnoreExtras, Fields{\n\t\t\t\t\t\t\t\"Routes\": BeEmpty(),\n\t\t\t\t\t\t\t\"NoRoute\": Equal(true),\n\t\t\t\t\t\t}))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"services\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t}\n\n\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\tnil)\n\t\t\t\t})\n\n\t\t\t\tContext(\"when getting services fails\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tfakeCloudControllerClient.GetServiceBindingsReturns(\n\t\t\t\t\t\t\t[]ccv2.ServiceBinding{},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-service-warning\"},\n\t\t\t\t\t\t\terrors.New(\"some-service-error\"),\n\t\t\t\t\t\t)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns the error and all warnings\", func() {\n\t\t\t\t\t\tExpect(createErr).To(MatchError(\"some-service-error\"))\n\t\t\t\t\t\tExpect(warnings).To(ConsistOf(\"some-app-warning\", \"some-service-warning\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when getting services succeeds\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tfakeCloudControllerClient.GetServiceBindingsReturns(\n\t\t\t\t\t\t\t[]ccv2.ServiceBinding{\n\t\t\t\t\t\t\t\t{ServiceInstanceGUID: \"service-1-guid\"},\n\t\t\t\t\t\t\t\t{ServiceInstanceGUID: \"service-2-guid\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tccv2.Warnings{\"some-service-warning\"},\n\t\t\t\t\t\t\tnil,\n\t\t\t\t\t\t)\n\t\t\t\t\t\tfakeCloudControllerClient.GetServiceInstanceStub = func(serviceInstanceGUID string) (ccv2.ServiceInstance, ccv2.Warnings, error) {\n\t\t\t\t\t\t\tswitch serviceInstanceGUID {\n\t\t\t\t\t\t\tcase \"service-1-guid\":\n\t\t\t\t\t\t\t\treturn ccv2.ServiceInstance{Name: \"service-1\"}, ccv2.Warnings{\"some-service-1-warning\"}, nil\n\t\t\t\t\t\t\tcase \"service-2-guid\":\n\t\t\t\t\t\t\t\treturn ccv2.ServiceInstance{Name: \"service-2\"}, ccv2.Warnings{\"some-service-2-warning\"}, nil\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\tpanic(\"unknown service instance\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"creates the corresponding manifest application\", func() {\n\t\t\t\t\t\tExpect(createErr).NotTo(HaveOccurred())\n\t\t\t\t\t\tExpect(manifestApp.Services).To(ConsistOf(\"service-1\", \"service-2\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"everything else\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tapp = ccv2.Application{\n\t\t\t\t\t\tGUID: \"some-app-guid\",\n\t\t\t\t\t\tName: \"some-app\",\n\t\t\t\t\t\tDiskQuota: types.NullByteSizeInMb{IsSet: true, Value: 1024},\n\t\t\t\t\t\tCommand: types.FilteredString{\n\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t\tValue: \"some-command\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDetectedStartCommand: types.FilteredString{\n\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t\tValue: \"some-detected-command\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tEnvironmentVariables: map[string]string{\n\t\t\t\t\t\t\t\"env_1\": \"foo\",\n\t\t\t\t\t\t\t\"env_2\": \"182837403930483038\",\n\t\t\t\t\t\t\t\"env_3\": \"true\",\n\t\t\t\t\t\t\t\"env_4\": \"1.00001\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tHealthCheckTimeout: 120,\n\t\t\t\t\t\tInstances: types.NullInt{\n\t\t\t\t\t\t\tValue: 10,\n\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tMemory: types.NullByteSizeInMb{IsSet: true, Value: 200},\n\t\t\t\t\t\tStackGUID: \"some-stack-guid\",\n\t\t\t\t\t}\n\n\t\t\t\t\tfakeCloudControllerClient.GetApplicationsReturns(\n\t\t\t\t\t\t[]ccv2.Application{app},\n\t\t\t\t\t\tccv2.Warnings{\"some-app-warning\"},\n\t\t\t\t\t\tnil)\n\n\t\t\t\t\tfakeCloudControllerClient.GetStackReturns(\n\t\t\t\t\t\tccv2.Stack{Name: \"some-stack\"},\n\t\t\t\t\t\tccv2.Warnings{\"some-stack-warning\"},\n\t\t\t\t\t\tnil)\n\t\t\t\t})\n\n\t\t\t\tIt(\"creates the corresponding manifest application\", func() {\n\t\t\t\t\tExpect(warnings).To(ConsistOf(\"some-app-warning\", \"some-stack-warning\"))\n\t\t\t\t\tExpect(manifestApp).To(MatchFields(IgnoreExtras, Fields{\n\t\t\t\t\t\t\"Name\": Equal(\"some-app\"),\n\t\t\t\t\t\t\"DiskQuota\": Equal(types.NullByteSizeInMb{IsSet: true, Value: 1024}),\n\t\t\t\t\t\t\"Command\": Equal(types.FilteredString{\n\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t\tValue: \"some-command\",\n\t\t\t\t\t\t}),\n\t\t\t\t\t\t\"EnvironmentVariables\": Equal(map[string]string{\n\t\t\t\t\t\t\t\"env_1\": \"foo\",\n\t\t\t\t\t\t\t\"env_2\": \"182837403930483038\",\n\t\t\t\t\t\t\t\"env_3\": \"true\",\n\t\t\t\t\t\t\t\"env_4\": \"1.00001\",\n\t\t\t\t\t\t}),\n\t\t\t\t\t\t\"HealthCheckTimeout\": Equal(120),\n\t\t\t\t\t\t\"Instances\": Equal(types.NullInt{\n\t\t\t\t\t\t\tValue: 10,\n\t\t\t\t\t\t\tIsSet: true,\n\t\t\t\t\t\t}),\n\t\t\t\t\t\t\"Memory\": Equal(types.NullByteSizeInMb{IsSet: true, Value: 200}),\n\t\t\t\t\t\t\"StackName\": Equal(\"some-stack\"),\n\t\t\t\t\t}))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2011 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage mysqlindexer\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"camlistore.org\/pkg\/blobserver\"\n\t\"camlistore.org\/pkg\/index\"\n\t\"camlistore.org\/pkg\/jsonconfig\"\n\n\t_ \"camlistore.org\/third_party\/github.com\/ziutek\/mymysql\/godrv\"\n)\n\ntype myIndexStorage struct {\n\thost, user, password, database string\n\n\tdb *sql.DB\n}\n\nvar _ index.IndexStorage = (*myIndexStorage)(nil)\n\nfunc (ms *myIndexStorage) BeginBatch() index.BatchMutation {\n\t\/\/ TODO\n\treturn nil \n}\n\nfunc (ms *myIndexStorage) CommitBatch(b index.BatchMutation) error {\n\treturn errors.New(\"TODO(bradfitz): implement\")\n}\n\nfunc (ms *myIndexStorage) Get(key string) (value string, err error) {\n\terr = ms.db.QueryRow(\"SELECT v FROM rows WHERE k=?\", key).Scan(&value)\n\treturn\n}\n\nfunc (ms *myIndexStorage) Set(key, value string) error {\n\t_, err := ms.db.Exec(\"INSERT INTO rows (k, v) VALUES (?, ?)\", key, value)\n\treturn err\n}\n\nfunc (ms *myIndexStorage) Delete(key string) error {\n\t_, err := ms.db.Exec(\"DELETE FROM rows WHERE k=?\", key)\n\treturn err\n}\n\nfunc (ms *myIndexStorage) Find(key string) index.Iterator {\n\tpanic(\"TODO(bradfitz): implement\")\n}\n\nfunc newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) {\n\tis := &myIndexStorage{\n\t\thost: config.OptionalString(\"host\", \"localhost\"),\n\t\tuser: config.RequiredString(\"user\"),\n\t\tpassword: config.OptionalString(\"password\", \"\"),\n\t\tdatabase: config.RequiredString(\"database\"),\n\t}\n\tif err := config.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\tdb, err := sql.Open(\"mymysql\", is.database + \"\/\" + is.user + \"\/\" + is.password)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tis.db = db\n\tif err := is.ping(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tindexer := index.New(is)\n\tversion, err := is.SchemaVersion()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting schema version (need to init database?): %v\", err)\n\t}\n\tif version != requiredSchemaVersion {\n\t\tif os.Getenv(\"CAMLI_ADVERTISED_PASSWORD\") != \"\" {\n\t\t\t\/\/ Good signal that we're using the dev-server script, so help out\n\t\t\t\/\/ the user with a more useful tip:\n\t\t\treturn nil, fmt.Errorf(\"database schema version is %d; expect %d (run \\\".\/dev-server --wipe\\\" to wipe both your blobs and re-populate the database schema)\", version, requiredSchemaVersion)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"database schema version is %d; expect %d (need to re-init\/upgrade database?)\",\n\t\t\tversion, requiredSchemaVersion)\n\t}\n\n\treturn indexer, nil\n}\n\nfunc init() {\n\tblobserver.RegisterStorageConstructor(\"mysqlindexer\", blobserver.StorageConstructor(newFromConfig))\n}\n\nfunc (mi *myIndexStorage) ping() error {\n\t\/\/ TODO(bradfitz): something more efficient here?\n\t_, err := mi.SchemaVersion()\n\treturn err\n}\n\nfunc (mi *myIndexStorage) SchemaVersion() (version int, err error) {\n\terr = mi.db.QueryRow(\"SELECT value FROM meta WHERE metakey='version'\").Scan(&version)\n\treturn\n}\n<commit_msg>mysqlindexer: implement batch mutation<commit_after>\/*\nCopyright 2011 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage mysqlindexer\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"camlistore.org\/pkg\/blobserver\"\n\t\"camlistore.org\/pkg\/index\"\n\t\"camlistore.org\/pkg\/jsonconfig\"\n\n\t_ \"camlistore.org\/third_party\/github.com\/ziutek\/mymysql\/godrv\"\n)\n\ntype myIndexStorage struct {\n\thost, user, password, database string\n\n\tdb *sql.DB\n}\n\nvar _ index.IndexStorage = (*myIndexStorage)(nil)\n\ntype batchTx struct {\n\ttx *sql.Tx\n\terr error \/\/ sticky\n}\n\nfunc (b *batchTx) Set(key, value string) {\n\tif b.err != nil {\n\t\treturn\n\t}\n\t_, b.err = b.tx.Exec(\"REPLACE INTO rows (k, v) VALUES (?, ?)\", key, value)\n}\n\nfunc (b *batchTx) Delete(key string) {\n\tif b.err != nil {\n\t\treturn\n\t}\n\t_, b.err = b.tx.Exec(\"DELETE FROM rows WHERE k=?\", key)\n}\n\nfunc (ms *myIndexStorage) BeginBatch() index.BatchMutation {\n\ttx, err := ms.db.Begin()\n\treturn &batchTx{\n\t\ttx: tx,\n\t\terr: err,\n\t}\n}\n\nfunc (ms *myIndexStorage) CommitBatch(b index.BatchMutation) error {\n\tbt, ok := b.(*batchTx)\n\tif !ok {\n\t\treturn fmt.Errorf(\"wrong BatchMutation type %T\", b)\n\t}\n\tif bt.err != nil {\n\t\treturn bt.err\n\t}\n\treturn bt.tx.Commit()\n}\n\nfunc (ms *myIndexStorage) Get(key string) (value string, err error) {\n\terr = ms.db.QueryRow(\"SELECT v FROM rows WHERE k=?\", key).Scan(&value)\n\treturn\n}\n\nfunc (ms *myIndexStorage) Set(key, value string) error {\n\t_, err := ms.db.Exec(\"REPLACE INTO rows (k, v) VALUES (?, ?)\", key, value)\n\treturn err\n}\n\nfunc (ms *myIndexStorage) Delete(key string) error {\n\t_, err := ms.db.Exec(\"DELETE FROM rows WHERE k=?\", key)\n\treturn err\n}\n\nfunc (ms *myIndexStorage) Find(key string) index.Iterator {\n\tpanic(\"TODO(bradfitz): implement\")\n}\n\nfunc newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) {\n\tis := &myIndexStorage{\n\t\thost: config.OptionalString(\"host\", \"localhost\"),\n\t\tuser: config.RequiredString(\"user\"),\n\t\tpassword: config.OptionalString(\"password\", \"\"),\n\t\tdatabase: config.RequiredString(\"database\"),\n\t}\n\tif err := config.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\tdb, err := sql.Open(\"mymysql\", is.database+\"\/\"+is.user+\"\/\"+is.password)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tis.db = db\n\tif err := is.ping(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tindexer := index.New(is)\n\tversion, err := is.SchemaVersion()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting schema version (need to init database?): %v\", err)\n\t}\n\tif version != requiredSchemaVersion {\n\t\tif os.Getenv(\"CAMLI_ADVERTISED_PASSWORD\") != \"\" {\n\t\t\t\/\/ Good signal that we're using the dev-server script, so help out\n\t\t\t\/\/ the user with a more useful tip:\n\t\t\treturn nil, fmt.Errorf(\"database schema version is %d; expect %d (run \\\".\/dev-server --wipe\\\" to wipe both your blobs and re-populate the database schema)\", version, requiredSchemaVersion)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"database schema version is %d; expect %d (need to re-init\/upgrade database?)\",\n\t\t\tversion, requiredSchemaVersion)\n\t}\n\n\treturn indexer, nil\n}\n\nfunc init() {\n\tblobserver.RegisterStorageConstructor(\"mysqlindexer\", blobserver.StorageConstructor(newFromConfig))\n}\n\nfunc (mi *myIndexStorage) ping() error {\n\t\/\/ TODO(bradfitz): something more efficient here?\n\t_, err := mi.SchemaVersion()\n\treturn err\n}\n\nfunc (mi *myIndexStorage) SchemaVersion() (version int, err error) {\n\terr = mi.db.QueryRow(\"SELECT value FROM meta WHERE metakey='version'\").Scan(&version)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/* Callisto - Yet another Solar System simulator\n *\n * Copyright (c) 2016, Valerian Saliou <valerian@valeriansaliou.name>\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and\/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\/\n\npackage main\n\nimport (\n \"math\"\n \"github.com\/go-gl\/mathgl\/mgl32\"\n)\n\n\/\/ Math\nconst (\n ConfigMathDegreeToRadian float64 = math.Pi \/ 180\n)\n\n\/\/ Time\nconst (\n ConfigTimeSecondToMilliseconds int = 1000\n ConfigTimeMinuteToMilliseconds int = 60 * ConfigTimeSecondToMilliseconds\n ConfigTimeHourToMilliseconds int = 60 * ConfigTimeMinuteToMilliseconds\n ConfigTimeDayToMilliseconds int = 24 * ConfigTimeHourToMilliseconds\n ConfigTimeYearToMilliseconds int = 365 * ConfigTimeDayToMilliseconds\n\n ConfigTimeStartFromMilliseconds int = ConfigTimeYearToMilliseconds\n\n ConfigTimeNormalizeFactor float32 = 64.0\n)\n\n\/\/ Window\nconst (\n ConfigWindowTitle string = \"Callisto - Solar System Simulator\"\n ConfigWindowFullScreen bool = true\n)\n\n\/\/ Speed\nconst (\n ConfigSpeedFramerateDefault float64 = 60\n ConfigSpeedFramerateFactor float64 = 1.5\n)\n\n\/\/ Projection\nvar (\n ConfigProjectionFieldNear float32 = 0.1\n ConfigProjectionFieldFar float32 = 9999999999999999999.0\n)\n\n\/\/ Camera\nvar (\n ConfigCameraDefaultEye = mgl32.Vec3{300, -350, -800}\n ConfigCameraDefaultTarget = mgl32.Vec3{0.255, 0.650, 0.000}\n\n ConfigCameraMoveCelerityCruise = 1.0\n ConfigCameraMoveCelerityTurbo = 10.0\n\n ConfigCameraInertiaProduceForward = 0.0075\n ConfigCameraInertiaProduceBackward = -0.0075\n ConfigCameraInertiaConsumeForward = -0.005\n ConfigCameraInertiaConsumeBackward = 0.005\n\n ConfigCameraTargetAmortizeFactor float32 = 0.002\n\n ConfigCameraOrbitMagnification float32 = 6\n)\n\n\/\/ Object\nconst (\n ConfigObjectTexturePhiMax int = 90\n ConfigObjectTextureThetaMax int = 360\n ConfigObjectTextureStepLatitude int = 3\n ConfigObjectTextureStepLongitude int = 6\n\n ConfigObjectFullAngle float64 = 4.0 * math.Pi\n\n ConfigObjectFactorSize float64 = 0.05\n ConfigObjectFactorSpeedScene float64 = 1.0\n ConfigObjectFactorSpeedMaximum float64 = 200000.0\n ConfigObjectFactorSpeedChangeFactor float64 = 100.0\n)\n<commit_msg>Windowed, rather<commit_after>\/* Callisto - Yet another Solar System simulator\n *\n * Copyright (c) 2016, Valerian Saliou <valerian@valeriansaliou.name>\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and\/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\/\n\npackage main\n\nimport (\n \"math\"\n \"github.com\/go-gl\/mathgl\/mgl32\"\n)\n\n\/\/ Math\nconst (\n ConfigMathDegreeToRadian float64 = math.Pi \/ 180\n)\n\n\/\/ Time\nconst (\n ConfigTimeSecondToMilliseconds int = 1000\n ConfigTimeMinuteToMilliseconds int = 60 * ConfigTimeSecondToMilliseconds\n ConfigTimeHourToMilliseconds int = 60 * ConfigTimeMinuteToMilliseconds\n ConfigTimeDayToMilliseconds int = 24 * ConfigTimeHourToMilliseconds\n ConfigTimeYearToMilliseconds int = 365 * ConfigTimeDayToMilliseconds\n\n ConfigTimeStartFromMilliseconds int = ConfigTimeYearToMilliseconds\n\n ConfigTimeNormalizeFactor float32 = 64.0\n)\n\n\/\/ Window\nconst (\n ConfigWindowTitle string = \"Callisto - Solar System Simulator\"\n ConfigWindowFullScreen bool = false\n)\n\n\/\/ Speed\nconst (\n ConfigSpeedFramerateDefault float64 = 60\n ConfigSpeedFramerateFactor float64 = 1.5\n)\n\n\/\/ Projection\nvar (\n ConfigProjectionFieldNear float32 = 0.1\n ConfigProjectionFieldFar float32 = 9999999999999999999.0\n)\n\n\/\/ Camera\nvar (\n ConfigCameraDefaultEye = mgl32.Vec3{300, -350, -800}\n ConfigCameraDefaultTarget = mgl32.Vec3{0.255, 0.650, 0.000}\n\n ConfigCameraMoveCelerityCruise = 1.0\n ConfigCameraMoveCelerityTurbo = 10.0\n\n ConfigCameraInertiaProduceForward = 0.0075\n ConfigCameraInertiaProduceBackward = -0.0075\n ConfigCameraInertiaConsumeForward = -0.005\n ConfigCameraInertiaConsumeBackward = 0.005\n\n ConfigCameraTargetAmortizeFactor float32 = 0.002\n\n ConfigCameraOrbitMagnification float32 = 6\n)\n\n\/\/ Object\nconst (\n ConfigObjectTexturePhiMax int = 90\n ConfigObjectTextureThetaMax int = 360\n ConfigObjectTextureStepLatitude int = 3\n ConfigObjectTextureStepLongitude int = 6\n\n ConfigObjectFullAngle float64 = 4.0 * math.Pi\n\n ConfigObjectFactorSize float64 = 0.05\n ConfigObjectFactorSpeedScene float64 = 1.0\n ConfigObjectFactorSpeedMaximum float64 = 200000.0\n ConfigObjectFactorSpeedChangeFactor float64 = 100.0\n)\n<|endoftext|>"} {"text":"<commit_before>package container_daemon_test\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/cloudfoundry-incubator\/garden\"\n\t. \"github.com\/cloudfoundry-incubator\/garden-linux\/container_daemon\"\n\t\"github.com\/cloudfoundry-incubator\/garden-linux\/container_daemon\/fake_connector\"\n\t\"github.com\/cloudfoundry-incubator\/garden-linux\/container_daemon\/fake_term\"\n\t\"github.com\/cloudfoundry-incubator\/garden-linux\/container_daemon\/unix_socket\"\n\t\"github.com\/docker\/docker\/pkg\/term\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n)\n\nvar _ = Describe(\"Process\", func() {\n\tvar socketConnector *fake_connector.FakeConnector\n\tvar fakeTerm *fake_term.FakeTerm\n\tvar sigwinchCh chan os.Signal\n\n\tvar process *Process\n\tvar pidfile string\n\n\tBeforeEach(func() {\n\t\tfakeTerm = new(fake_term.FakeTerm)\n\t\tsocketConnector = new(fake_connector.FakeConnector)\n\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{nil, nil, nil, FakeFd(0)}, 0, nil)\n\n\t\tsigwinchCh = make(chan os.Signal)\n\n\t\ttmp, err := ioutil.TempDir(\"\", \"pidfile\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tpidfile = path.Join(tmp, \"the-pid-file\")\n\n\t\tprocess = &Process{\n\t\t\tConnector: socketConnector,\n\t\t\tTerm: fakeTerm,\n\t\t\tSigwinchCh: sigwinchCh,\n\t\t\tSpec: &garden.ProcessSpec{\n\t\t\t\tPath: \"\/bin\/echo\",\n\t\t\t\tArgs: []string{\"Hello world\"},\n\t\t\t},\n\t\t\tPidfile: Pidfile{pidfile},\n\t\t\tIO: nil,\n\t\t}\n\t})\n\n\tAfterEach(func() {\n\t\tos.Remove(pidfile)\n\t})\n\n\tIt(\"sends the correct process payload to the server\", func() {\n\t\terr := process.Start()\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\tExpect(socketConnector.ConnectCallCount()).To(Equal(1))\n\t\tExpect(socketConnector.ConnectArgsForCall(0)).To(Equal(process.Spec))\n\t})\n\n\tContext(\"when the process is interactive (i.e. connected to a TTY)\", func() {\n\t\tBeforeEach(func() {\n\t\t\tprocess.Spec.TTY = &garden.TTYSpec{}\n\t\t})\n\n\t\tIt(\"makes stdin a raw terminal (because the remote terminal will handle echoing etc.)\", func() {\n\t\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{FakeFd(0), FakeFd(0)}, 0, nil)\n\n\t\t\tExpect(process.Start()).To(Succeed())\n\t\t\tExpect(fakeTerm.SetRawTerminalCallCount()).To(Equal(1))\n\t\t})\n\n\t\tIt(\"restores the terminal state when the process is cleaned up\", func() {\n\t\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{FakeFd(0), FakeFd(0)}, 0, nil)\n\n\t\t\tstate := &term.State{}\n\t\t\tfakeTerm.SetRawTerminalReturns(state, nil)\n\n\t\t\tExpect(process.Start()).To(Succeed())\n\t\t\tExpect(fakeTerm.RestoreTerminalCallCount()).To(Equal(0))\n\n\t\t\tprocess.Cleanup()\n\t\t\tExpect(fakeTerm.RestoreTerminalCallCount()).To(Equal(1))\n\t\t\tfd, state := fakeTerm.RestoreTerminalArgsForCall(0)\n\t\t\tExpect(fd).To(Equal(os.Stdin.Fd()))\n\t\t\tExpect(state).To(Equal(state))\n\t\t})\n\n\t\tIt(\"sets the window size of the process based on the window size of standard input\", func() {\n\t\t\tremotePty := FakeFd(123)\n\t\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{remotePty, FakeFd(999)}, 0, nil)\n\t\t\tfakeTerm.GetWinsizeReturns(&term.Winsize{\n\t\t\t\tWidth: 1, Height: 2,\n\t\t\t}, nil)\n\n\t\t\tExpect(process.Start()).To(Succeed())\n\n\t\t\tExpect(fakeTerm.GetWinsizeCallCount()).To(Equal(1))\n\t\t\tExpect(fakeTerm.GetWinsizeArgsForCall(0)).To(Equal(uintptr(os.Stdin.Fd())))\n\n\t\t\tExpect(fakeTerm.SetWinsizeCallCount()).To(Equal(1))\n\t\t\tfd, size := fakeTerm.SetWinsizeArgsForCall(0)\n\t\t\tExpect(fd).To(Equal(uintptr(123)))\n\t\t\tExpect(size).To(Equal(&term.Winsize{\n\t\t\t\tWidth: 1, Height: 2,\n\t\t\t}))\n\t\t})\n\n\t\tContext(\"when SIGWINCH is received\", func() {\n\t\t\tIt(\"resizes the pty to match the window size of stdin\", func() {\n\t\t\t\tremotePty := FakeFd(123)\n\t\t\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{remotePty, FakeFd(999)}, 0, nil)\n\n\t\t\t\tfakeTerm.GetWinsizeReturns(&term.Winsize{\n\t\t\t\t\tWidth: 3, Height: 4,\n\t\t\t\t}, nil)\n\n\t\t\t\tExpect(process.Start()).To(Succeed())\n\n\t\t\t\tExpect(fakeTerm.SetWinsizeCallCount()).To(Equal(1))\n\n\t\t\t\tsigwinchCh <- syscall.SIGWINCH\n\n\t\t\t\tEventually(fakeTerm.SetWinsizeCallCount(), 10*time.Second, 500*time.Millisecond).Should(Equal(2))\n\t\t\t\tfd, size := fakeTerm.SetWinsizeArgsForCall(1)\n\t\t\t\tExpect(fd).To(Equal(uintptr(123)))\n\t\t\t\tExpect(size).To(Equal(&term.Winsize{\n\t\t\t\t\tWidth: 3, Height: 4,\n\t\t\t\t}))\n\t\t\t})\n\t\t})\n\n\t\tIt(\"copies the returned PTYs output to standard output\", func() {\n\t\t\tremotePty := FakeFd(0)\n\t\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{remotePty, FakeFd(0)}, 0, nil)\n\n\t\t\trecvStdout := FakeFd(0)\n\t\t\tprocess.IO = &garden.ProcessIO{\n\t\t\t\tStdout: recvStdout,\n\t\t\t}\n\n\t\t\terr := process.Start()\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tremotePty.Write([]byte(\"Hello world\"))\n\t\t\tEventually(recvStdout).Should(gbytes.Say(\"Hello world\"))\n\t\t})\n\n\t\tIt(\"copies standard input to the PTY\", func() {\n\t\t\tremotePty := FakeFd(0)\n\t\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{remotePty, FakeFd(0)}, 0, nil)\n\n\t\t\tsentStdin := FakeFd(0)\n\t\t\tprocess.IO = &garden.ProcessIO{\n\t\t\t\tStdin: sentStdin,\n\t\t\t}\n\n\t\t\terr := process.Start()\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tsentStdin.Write([]byte(\"Hello world\"))\n\t\t\tEventually(remotePty).Should(gbytes.Say(\"Hello world\"))\n\t\t})\n\t})\n\n\tContext(\"when a pidfile parameter is supplied\", func() {\n\t\tIt(\"writes the PID of the spawned process to the pidfile\", func() {\n\t\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{nil, nil, nil, FakeFd(0)}, 123, nil)\n\n\t\t\terr := process.Start()\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tExpect(pidfile).To(BeAnExistingFile())\n\t\t\tExpect(ioutil.ReadFile(pidfile)).To(Equal([]byte(\"123\\n\")))\n\t\t})\n\n\t\tContext(\"when writing the pidfile fails\", func() {\n\t\t\tIt(\"returns an error\", func() {\n\t\t\t\tExpect(os.MkdirAll(pidfile, 0700)) \/\/ make writing fail\n\t\t\t\tdefer os.Remove(pidfile)\n\t\t\t\tExpect(process.Start()).To(MatchError(ContainSubstring(\"container_daemon: write pidfile\")))\n\t\t\t})\n\t\t})\n\t})\n\n\tIt(\"streams stdout back\", func() {\n\t\tremoteStdout := FakeFd(0)\n\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{nil, remoteStdout, nil, FakeFd(0)}, 0, nil)\n\n\t\trecvStdout := FakeFd(0)\n\t\tprocess.IO = &garden.ProcessIO{\n\t\t\tStdout: recvStdout,\n\t\t}\n\n\t\terr := process.Start()\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\tremoteStdout.Write([]byte(\"Hello world\"))\n\t\tEventually(recvStdout).Should(gbytes.Say(\"Hello world\"))\n\t})\n\n\tIt(\"streams stderr back\", func() {\n\t\tremoteStderr := FakeFd(0)\n\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{nil, nil, remoteStderr, FakeFd(0)}, 0, nil)\n\n\t\trecvStderr := FakeFd(0)\n\t\tprocess.IO = &garden.ProcessIO{\n\t\t\tStderr: recvStderr,\n\t\t}\n\n\t\terr := process.Start()\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\tremoteStderr.Write([]byte(\"Hello world\"))\n\t\tEventually(recvStderr).Should(gbytes.Say(\"Hello world\"))\n\t})\n\n\tIt(\"streams stdin over\", func() {\n\t\tremoteStdin := FakeFd(0)\n\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{remoteStdin, nil, nil, FakeFd(0)}, 0, nil)\n\n\t\tsentStdin := FakeFd(0)\n\t\tprocess.IO = &garden.ProcessIO{\n\t\t\tStdin: sentStdin,\n\t\t}\n\n\t\terr := process.Start()\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\tsentStdin.Write([]byte(\"Hello world\"))\n\t\tEventually(remoteStdin).Should(gbytes.Say(\"Hello world\"))\n\t})\n\n\tIt(\"waits for and reports the correct exit status\", func() {\n\t\tremoteExitFd := FakeFd(0)\n\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{nil, nil, nil, remoteExitFd}, 0, nil)\n\n\t\terr := process.Start()\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\tremoteExitFd.Write([]byte{42})\n\t\tExpect(process.Wait()).To(Equal(42))\n\t})\n\n\tContext(\"when the exit status is returned\", func() {\n\t\tIt(\"removes the pidfile\", func() {\n\t\t\tremoteExitFd := FakeFd(0)\n\t\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{nil, nil, nil, remoteExitFd}, 0, nil)\n\n\t\t\terr := process.Start()\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tExpect(pidfile).To(BeAnExistingFile())\n\t\t\tprocess.Wait()\n\t\t\tExpect(pidfile).NotTo(BeAnExistingFile())\n\t\t})\n\t})\n\n\tContext(\"when it fails to connect\", func() {\n\t\tIt(\"returns an error\", func() {\n\t\t\tsocketConnector.ConnectReturns(nil, 0, errors.New(\"Hoy hoy\"))\n\n\t\t\terr := process.Start()\n\t\t\tExpect(err).To(MatchError(\"container_daemon: connect to socket: Hoy hoy\"))\n\t\t})\n\t})\n})\n\ntype fakefd struct {\n\tfd uintptr\n\tbuffer *gbytes.Buffer\n}\n\nfunc FakeFd(fd uintptr) *fakefd {\n\treturn &fakefd{fd, gbytes.NewBuffer()}\n}\n\nfunc (f *fakefd) Fd() uintptr {\n\treturn f.fd\n}\n\nfunc (f *fakefd) Buffer() *gbytes.Buffer {\n\treturn f.buffer\n}\n\nfunc (f *fakefd) Read(b []byte) (int, error) {\n\treturn f.buffer.Read(b)\n}\n\nfunc (f *fakefd) Write(b []byte) (int, error) {\n\treturn f.buffer.Write(b)\n}\n\nfunc (f *fakefd) Close() error {\n\treturn f.buffer.Close()\n}\n<commit_msg>Bump timeouts to avoid flakes on slow CI boxes<commit_after>package container_daemon_test\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/cloudfoundry-incubator\/garden\"\n\t. \"github.com\/cloudfoundry-incubator\/garden-linux\/container_daemon\"\n\t\"github.com\/cloudfoundry-incubator\/garden-linux\/container_daemon\/fake_connector\"\n\t\"github.com\/cloudfoundry-incubator\/garden-linux\/container_daemon\/fake_term\"\n\t\"github.com\/cloudfoundry-incubator\/garden-linux\/container_daemon\/unix_socket\"\n\t\"github.com\/docker\/docker\/pkg\/term\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n)\n\nvar _ = Describe(\"Process\", func() {\n\tvar socketConnector *fake_connector.FakeConnector\n\tvar fakeTerm *fake_term.FakeTerm\n\tvar sigwinchCh chan os.Signal\n\n\tvar process *Process\n\tvar pidfile string\n\n\tBeforeEach(func() {\n\t\tfakeTerm = new(fake_term.FakeTerm)\n\t\tsocketConnector = new(fake_connector.FakeConnector)\n\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{nil, nil, nil, FakeFd(0)}, 0, nil)\n\n\t\tsigwinchCh = make(chan os.Signal)\n\n\t\ttmp, err := ioutil.TempDir(\"\", \"pidfile\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tpidfile = path.Join(tmp, \"the-pid-file\")\n\n\t\tprocess = &Process{\n\t\t\tConnector: socketConnector,\n\t\t\tTerm: fakeTerm,\n\t\t\tSigwinchCh: sigwinchCh,\n\t\t\tSpec: &garden.ProcessSpec{\n\t\t\t\tPath: \"\/bin\/echo\",\n\t\t\t\tArgs: []string{\"Hello world\"},\n\t\t\t},\n\t\t\tPidfile: Pidfile{pidfile},\n\t\t\tIO: nil,\n\t\t}\n\t})\n\n\tAfterEach(func() {\n\t\tos.Remove(pidfile)\n\t})\n\n\tIt(\"sends the correct process payload to the server\", func() {\n\t\terr := process.Start()\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\tExpect(socketConnector.ConnectCallCount()).To(Equal(1))\n\t\tExpect(socketConnector.ConnectArgsForCall(0)).To(Equal(process.Spec))\n\t})\n\n\tContext(\"when the process is interactive (i.e. connected to a TTY)\", func() {\n\t\tBeforeEach(func() {\n\t\t\tprocess.Spec.TTY = &garden.TTYSpec{}\n\t\t})\n\n\t\tIt(\"makes stdin a raw terminal (because the remote terminal will handle echoing etc.)\", func() {\n\t\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{FakeFd(0), FakeFd(0)}, 0, nil)\n\n\t\t\tExpect(process.Start()).To(Succeed())\n\t\t\tExpect(fakeTerm.SetRawTerminalCallCount()).To(Equal(1))\n\t\t})\n\n\t\tIt(\"restores the terminal state when the process is cleaned up\", func() {\n\t\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{FakeFd(0), FakeFd(0)}, 0, nil)\n\n\t\t\tstate := &term.State{}\n\t\t\tfakeTerm.SetRawTerminalReturns(state, nil)\n\n\t\t\tExpect(process.Start()).To(Succeed())\n\t\t\tExpect(fakeTerm.RestoreTerminalCallCount()).To(Equal(0))\n\n\t\t\tprocess.Cleanup()\n\t\t\tExpect(fakeTerm.RestoreTerminalCallCount()).To(Equal(1))\n\t\t\tfd, state := fakeTerm.RestoreTerminalArgsForCall(0)\n\t\t\tExpect(fd).To(Equal(os.Stdin.Fd()))\n\t\t\tExpect(state).To(Equal(state))\n\t\t})\n\n\t\tIt(\"sets the window size of the process based on the window size of standard input\", func() {\n\t\t\tremotePty := FakeFd(123)\n\t\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{remotePty, FakeFd(999)}, 0, nil)\n\t\t\tfakeTerm.GetWinsizeReturns(&term.Winsize{\n\t\t\t\tWidth: 1, Height: 2,\n\t\t\t}, nil)\n\n\t\t\tExpect(process.Start()).To(Succeed())\n\n\t\t\tExpect(fakeTerm.GetWinsizeCallCount()).To(Equal(1))\n\t\t\tExpect(fakeTerm.GetWinsizeArgsForCall(0)).To(Equal(uintptr(os.Stdin.Fd())))\n\n\t\t\tExpect(fakeTerm.SetWinsizeCallCount()).To(Equal(1))\n\t\t\tfd, size := fakeTerm.SetWinsizeArgsForCall(0)\n\t\t\tExpect(fd).To(Equal(uintptr(123)))\n\t\t\tExpect(size).To(Equal(&term.Winsize{\n\t\t\t\tWidth: 1, Height: 2,\n\t\t\t}))\n\t\t})\n\n\t\tContext(\"when SIGWINCH is received\", func() {\n\t\t\tIt(\"resizes the pty to match the window size of stdin\", func() {\n\t\t\t\tremotePty := FakeFd(123)\n\t\t\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{remotePty, FakeFd(999)}, 0, nil)\n\n\t\t\t\tfakeTerm.GetWinsizeReturns(&term.Winsize{\n\t\t\t\t\tWidth: 3, Height: 4,\n\t\t\t\t}, nil)\n\n\t\t\t\tExpect(process.Start()).To(Succeed())\n\n\t\t\t\tExpect(fakeTerm.SetWinsizeCallCount()).To(Equal(1))\n\n\t\t\t\tsigwinchCh <- syscall.SIGWINCH\n\n\t\t\t\tEventually(fakeTerm.SetWinsizeCallCount(), 10*time.Second, 500*time.Millisecond).Should(Equal(2))\n\t\t\t\tfd, size := fakeTerm.SetWinsizeArgsForCall(1)\n\t\t\t\tExpect(fd).To(Equal(uintptr(123)))\n\t\t\t\tExpect(size).To(Equal(&term.Winsize{\n\t\t\t\t\tWidth: 3, Height: 4,\n\t\t\t\t}))\n\t\t\t})\n\t\t})\n\n\t\tIt(\"copies the returned PTYs output to standard output\", func() {\n\t\t\tremotePty := FakeFd(0)\n\t\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{remotePty, FakeFd(0)}, 0, nil)\n\n\t\t\trecvStdout := FakeFd(0)\n\t\t\tprocess.IO = &garden.ProcessIO{\n\t\t\t\tStdout: recvStdout,\n\t\t\t}\n\n\t\t\terr := process.Start()\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tremotePty.Write([]byte(\"Hello world\"))\n\t\t\tEventually(recvStdout, \"5s\").Should(gbytes.Say(\"Hello world\"))\n\t\t})\n\n\t\tIt(\"copies standard input to the PTY\", func() {\n\t\t\tremotePty := FakeFd(0)\n\t\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{remotePty, FakeFd(0)}, 0, nil)\n\n\t\t\tsentStdin := FakeFd(0)\n\t\t\tprocess.IO = &garden.ProcessIO{\n\t\t\t\tStdin: sentStdin,\n\t\t\t}\n\n\t\t\terr := process.Start()\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tsentStdin.Write([]byte(\"Hello world\"))\n\t\t\tEventually(remotePty, \"5s\").Should(gbytes.Say(\"Hello world\"))\n\t\t})\n\t})\n\n\tContext(\"when a pidfile parameter is supplied\", func() {\n\t\tIt(\"writes the PID of the spawned process to the pidfile\", func() {\n\t\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{nil, nil, nil, FakeFd(0)}, 123, nil)\n\n\t\t\terr := process.Start()\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tExpect(pidfile).To(BeAnExistingFile())\n\t\t\tExpect(ioutil.ReadFile(pidfile)).To(Equal([]byte(\"123\\n\")))\n\t\t})\n\n\t\tContext(\"when writing the pidfile fails\", func() {\n\t\t\tIt(\"returns an error\", func() {\n\t\t\t\tExpect(os.MkdirAll(pidfile, 0700)) \/\/ make writing fail\n\t\t\t\tdefer os.Remove(pidfile)\n\t\t\t\tExpect(process.Start()).To(MatchError(ContainSubstring(\"container_daemon: write pidfile\")))\n\t\t\t})\n\t\t})\n\t})\n\n\tIt(\"streams stdout back\", func() {\n\t\tremoteStdout := FakeFd(0)\n\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{nil, remoteStdout, nil, FakeFd(0)}, 0, nil)\n\n\t\trecvStdout := FakeFd(0)\n\t\tprocess.IO = &garden.ProcessIO{\n\t\t\tStdout: recvStdout,\n\t\t}\n\n\t\terr := process.Start()\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\tremoteStdout.Write([]byte(\"Hello world\"))\n\t\tEventually(recvStdout, \"5s\").Should(gbytes.Say(\"Hello world\"))\n\t})\n\n\tIt(\"streams stderr back\", func() {\n\t\tremoteStderr := FakeFd(0)\n\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{nil, nil, remoteStderr, FakeFd(0)}, 0, nil)\n\n\t\trecvStderr := FakeFd(0)\n\t\tprocess.IO = &garden.ProcessIO{\n\t\t\tStderr: recvStderr,\n\t\t}\n\n\t\terr := process.Start()\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\tremoteStderr.Write([]byte(\"Hello world\"))\n\t\tEventually(recvStderr, \"5s\").Should(gbytes.Say(\"Hello world\"))\n\t})\n\n\tIt(\"streams stdin over\", func() {\n\t\tremoteStdin := FakeFd(0)\n\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{remoteStdin, nil, nil, FakeFd(0)}, 0, nil)\n\n\t\tsentStdin := FakeFd(0)\n\t\tprocess.IO = &garden.ProcessIO{\n\t\t\tStdin: sentStdin,\n\t\t}\n\n\t\terr := process.Start()\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\tsentStdin.Write([]byte(\"Hello world\"))\n\t\tEventually(remoteStdin, \"5s\").Should(gbytes.Say(\"Hello world\"))\n\t})\n\n\tIt(\"waits for and reports the correct exit status\", func() {\n\t\tremoteExitFd := FakeFd(0)\n\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{nil, nil, nil, remoteExitFd}, 0, nil)\n\n\t\terr := process.Start()\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\tremoteExitFd.Write([]byte{42})\n\t\tExpect(process.Wait()).To(Equal(42))\n\t})\n\n\tContext(\"when the exit status is returned\", func() {\n\t\tIt(\"removes the pidfile\", func() {\n\t\t\tremoteExitFd := FakeFd(0)\n\t\t\tsocketConnector.ConnectReturns([]unix_socket.Fd{nil, nil, nil, remoteExitFd}, 0, nil)\n\n\t\t\terr := process.Start()\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tExpect(pidfile).To(BeAnExistingFile())\n\t\t\tprocess.Wait()\n\t\t\tExpect(pidfile).NotTo(BeAnExistingFile())\n\t\t})\n\t})\n\n\tContext(\"when it fails to connect\", func() {\n\t\tIt(\"returns an error\", func() {\n\t\t\tsocketConnector.ConnectReturns(nil, 0, errors.New(\"Hoy hoy\"))\n\n\t\t\terr := process.Start()\n\t\t\tExpect(err).To(MatchError(\"container_daemon: connect to socket: Hoy hoy\"))\n\t\t})\n\t})\n})\n\ntype fakefd struct {\n\tfd uintptr\n\tbuffer *gbytes.Buffer\n}\n\nfunc FakeFd(fd uintptr) *fakefd {\n\treturn &fakefd{fd, gbytes.NewBuffer()}\n}\n\nfunc (f *fakefd) Fd() uintptr {\n\treturn f.fd\n}\n\nfunc (f *fakefd) Buffer() *gbytes.Buffer {\n\treturn f.buffer\n}\n\nfunc (f *fakefd) Read(b []byte) (int, error) {\n\treturn f.buffer.Read(b)\n}\n\nfunc (f *fakefd) Write(b []byte) (int, error) {\n\treturn f.buffer.Write(b)\n}\n\nfunc (f *fakefd) Close() error {\n\treturn f.buffer.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package cli\n\n\/\/Filter reporting filter (use package name, or package name request\/event prefix\n\n\/\/DefaultFilter create default filter\nfunc DefaultFilter() map[string]bool {\n\tvar result = make(map[string]bool)\n\tresult[\"dsunit\"] = true\n\tresult[\"vc\"] = true\n\tresult[\"sdk\"] = true\n\tresult[\"workflow.print\"] = true\n\tresult[\"build\"] = true\n\tresult[\"deploy\"] = true\n\tresult[\"storage\"] = true\n\tresult[\"exec.stdin\"] = true\n\tresult[\"exec.stdout\"] = true\n\tresult[\"endly\"] = true\n\tresult[\"workflow\"] = true\n\tresult[\"msg\"] = true\n\tresult[\"lambda\"] = true\n\tresult[\"apigateway\"] = true\n\tresult[\"iam\"] = true\n\tresult[\"aws\"] = true\n\tresult[\"gcp\"] = true\n\tresult[\"shared\"] = true\n\tresult[\"docker\"] = true\n\treturn result\n}\n\n\/\/WildcardFilter\nfunc WildcardFilter() map[string]bool {\n\treturn map[string]bool{\n\t\t\"*\": true,\n\t}\n}\n<commit_msg>added git filter<commit_after>package cli\n\n\/\/Filter reporting filter (use package name, or package name request\/event prefix\n\n\/\/DefaultFilter create default filter\nfunc DefaultFilter() map[string]bool {\n\tvar result = make(map[string]bool)\n\tresult[\"dsunit\"] = true\n\tresult[\"vc\"] = true\n\tresult[\"sdk\"] = true\n\tresult[\"workflow.print\"] = true\n\tresult[\"build\"] = true\n\tresult[\"deploy\"] = true\n\tresult[\"storage\"] = true\n\tresult[\"exec.stdin\"] = true\n\tresult[\"exec.stdout\"] = true\n\tresult[\"endly\"] = true\n\tresult[\"workflow\"] = true\n\tresult[\"msg\"] = true\n\tresult[\"lambda\"] = true\n\tresult[\"apigateway\"] = true\n\tresult[\"iam\"] = true\n\tresult[\"aws\"] = true\n\tresult[\"gcp\"] = true\n\tresult[\"shared\"] = true\n\tresult[\"docker\"] = true\n\tresult[\"git\"] = true\n\treturn result\n}\n\n\/\/WildcardFilter\nfunc WildcardFilter() map[string]bool {\n\treturn map[string]bool{\n\t\t\"*\": true,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/mailru\/easyjson\/bootstrap\"\n\t\/\/ Reference the gen package to be friendly to vendoring tools,\n\t\/\/ as it is an indirect dependency.\n\t\/\/ (The temporary bootstrapping code uses it.)\n\t_ \"github.com\/mailru\/easyjson\/gen\"\n\t\"github.com\/mailru\/easyjson\/parser\"\n)\n\nvar buildTags = flag.String(\"build_tags\", \"\", \"build tags to add to generated file\")\nvar snakeCase = flag.Bool(\"snake_case\", false, \"use snake_case names instead of CamelCase by default\")\nvar noStdMarshalers = flag.Bool(\"no_std_marshalers\", false, \"don't generate MarshalJSON\/UnmarshalJSON methods\")\nvar omitEmpty = flag.Bool(\"omit_empty\", false, \"omit empty fields by default\")\nvar allStructs = flag.Bool(\"all\", false, \"generate un-\/marshallers for all structs in a file\")\nvar leaveTemps = flag.Bool(\"leave_temps\", false, \"do not delete temporary files\")\nvar stubs = flag.Bool(\"stubs\", false, \"only generate stubs for marshallers\/unmarshallers methods\")\nvar noformat = flag.Bool(\"noformat\", false, \"do not run 'gofmt -w' on output file\")\n\nfunc generate(fname string) (err error) {\n\tp := parser.Parser{AllStructs: *allStructs}\n\tif err := p.Parse(fname); err != nil {\n\t\treturn fmt.Errorf(\"Error parsing %v: %v\", fname, err)\n\t}\n\n\tvar outName string\n\tif s := strings.TrimSuffix(fname, \".go\"); s == fname {\n\t\treturn fmt.Errorf(\"Filename must end in '.go'\")\n\t} else {\n\t\toutName = s + \"_easyjson.go\"\n\t}\n\n\tg := bootstrap.Generator{\n\t\tBuildTags: *buildTags,\n\t\tPkgPath: p.PkgPath,\n\t\tPkgName: p.PkgName,\n\t\tTypes: p.StructNames,\n\t\tSnakeCase: *snakeCase,\n\t\tNoStdMarshalers: *noStdMarshalers,\n\t\tOmitEmpty: *omitEmpty,\n\t\tLeaveTemps: *leaveTemps,\n\t\tOutName: outName,\n\t\tStubsOnly: *stubs,\n\t\tNoFormat: *noformat,\n\t}\n\n\tif err := g.Run(); err != nil {\n\t\treturn fmt.Errorf(\"Bootstrap failed: %v\", err)\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tfiles := flag.Args()\n\n\tgofile := os.Getenv(\"GOFILE\")\n\tif len(files) == 0 && gofile != \"\" {\n\t\tfiles = []string{gofile}\n\t} else if len(files) == 0 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tfor _, fname := range files {\n\t\tif err := generate(fname); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n<commit_msg>let filename be specified<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/mailru\/easyjson\/bootstrap\"\n\t\/\/ Reference the gen package to be friendly to vendoring tools,\n\t\/\/ as it is an indirect dependency.\n\t\/\/ (The temporary bootstrapping code uses it.)\n\t_ \"github.com\/mailru\/easyjson\/gen\"\n\t\"github.com\/mailru\/easyjson\/parser\"\n)\n\nvar buildTags = flag.String(\"build_tags\", \"\", \"build tags to add to generated file\")\nvar snakeCase = flag.Bool(\"snake_case\", false, \"use snake_case names instead of CamelCase by default\")\nvar noStdMarshalers = flag.Bool(\"no_std_marshalers\", false, \"don't generate MarshalJSON\/UnmarshalJSON methods\")\nvar omitEmpty = flag.Bool(\"omit_empty\", false, \"omit empty fields by default\")\nvar allStructs = flag.Bool(\"all\", false, \"generate un-\/marshallers for all structs in a file\")\nvar leaveTemps = flag.Bool(\"leave_temps\", false, \"do not delete temporary files\")\nvar stubs = flag.Bool(\"stubs\", false, \"only generate stubs for marshallers\/unmarshallers methods\")\nvar noformat = flag.Bool(\"noformat\", false, \"do not run 'gofmt -w' on output file\")\nvar specifiedName = flag.String(\"output_filename\", \"\", \"specify the filename of the output\")\n\nfunc generate(fname string) (err error) {\n\tp := parser.Parser{AllStructs: *allStructs}\n\tif err := p.Parse(fname); err != nil {\n\t\treturn fmt.Errorf(\"Error parsing %v: %v\", fname, err)\n\t}\n\n\tvar outName string\n\tif s := strings.TrimSuffix(fname, \".go\"); s == fname {\n\t\treturn fmt.Errorf(\"Filename must end in '.go'\")\n\t} else {\n\t\toutName = s + \"_easyjson.go\"\n\t}\n\n\tif *specifiedName != \"\" {\n\t\toutName = *specifiedName\n\t}\n\t\n\tg := bootstrap.Generator{\n\t\tBuildTags: *buildTags,\n\t\tPkgPath: p.PkgPath,\n\t\tPkgName: p.PkgName,\n\t\tTypes: p.StructNames,\n\t\tSnakeCase: *snakeCase,\n\t\tNoStdMarshalers: *noStdMarshalers,\n\t\tOmitEmpty: *omitEmpty,\n\t\tLeaveTemps: *leaveTemps,\n\t\tOutName: outName,\n\t\tStubsOnly: *stubs,\n\t\tNoFormat: *noformat,\n\t}\n\n\tif err := g.Run(); err != nil {\n\t\treturn fmt.Errorf(\"Bootstrap failed: %v\", err)\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tfiles := flag.Args()\n\n\tgofile := os.Getenv(\"GOFILE\")\n\tif len(files) == 0 && gofile != \"\" {\n\t\tfiles = []string{gofile}\n\t} else if len(files) == 0 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tfor _, fname := range files {\n\t\tif err := generate(fname); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage util\n\nimport (\n\tcorepb \"github.com\/envoyproxy\/go-control-plane\/envoy\/config\/core\/v3\"\n\tendpointpb \"github.com\/envoyproxy\/go-control-plane\/envoy\/config\/endpoint\/v3\"\n\thttppb \"github.com\/envoyproxy\/go-control-plane\/envoy\/extensions\/upstreams\/http\/v3\"\n\t\"github.com\/golang\/protobuf\/ptypes\"\n\t\"github.com\/golang\/protobuf\/ptypes\/any\"\n)\n\n\/\/ CreateUpstreamProtocolOptions creates a http2 protocol option as a typed upstream extension.\nfunc CreateUpstreamProtocolOptions() map[string]*any.Any {\n\to := &httppb.HttpProtocolOptions{\n\t\tUpstreamProtocolOptions: &httppb.HttpProtocolOptions_ExplicitHttpConfig_{\n\t\t\tExplicitHttpConfig: &httppb.HttpProtocolOptions_ExplicitHttpConfig{\n\t\t\t\tProtocolConfig: &httppb.HttpProtocolOptions_ExplicitHttpConfig_Http2ProtocolOptions{},\n\t\t\t},\n\t\t},\n\t}\n\ta, _ := ptypes.MarshalAny(o)\n\n\treturn map[string]*any.Any{\n\t\tUpstreamProtocolOptions: a,\n\t}\n}\n\n\/\/ CreateLoadAssignment creates a cluster for a TCP\/IP port.\nfunc CreateLoadAssignment(hostname string, port uint32) *endpointpb.ClusterLoadAssignment {\n\treturn &endpointpb.ClusterLoadAssignment{\n\t\tClusterName: hostname,\n\t\tEndpoints: []*endpointpb.LocalityLbEndpoints{\n\t\t\t{\n\t\t\t\tLbEndpoints: []*endpointpb.LbEndpoint{\n\t\t\t\t\t{\n\t\t\t\t\t\tHostIdentifier: &endpointpb.LbEndpoint_Endpoint{\n\t\t\t\t\t\t\tEndpoint: &endpointpb.Endpoint{\n\t\t\t\t\t\t\t\tAddress: &corepb.Address{\n\t\t\t\t\t\t\t\t\tAddress: &corepb.Address_SocketAddress{\n\t\t\t\t\t\t\t\t\t\tSocketAddress: &corepb.SocketAddress{\n\t\t\t\t\t\t\t\t\t\t\tAddress: hostname,\n\t\t\t\t\t\t\t\t\t\t\tPortSpecifier: &corepb.SocketAddress_PortValue{\n\t\t\t\t\t\t\t\t\t\t\t\tPortValue: port,\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ CreateUdsLoadAssignment creates a cluster for a unix domain socket.\nfunc CreateUdsLoadAssignment(clusterName string) *endpointpb.ClusterLoadAssignment {\n\treturn &endpointpb.ClusterLoadAssignment{\n\t\tClusterName: clusterName,\n\t\tEndpoints: []*endpointpb.LocalityLbEndpoints{\n\t\t\t{\n\t\t\t\tLbEndpoints: []*endpointpb.LbEndpoint{\n\t\t\t\t\t{\n\t\t\t\t\t\tHostIdentifier: &endpointpb.LbEndpoint_Endpoint{\n\t\t\t\t\t\t\tEndpoint: &endpointpb.Endpoint{\n\t\t\t\t\t\t\t\tAddress: &corepb.Address{\n\t\t\t\t\t\t\t\t\tAddress: &corepb.Address_Pipe{\n\t\t\t\t\t\t\t\t\t\tPipe: &corepb.Pipe{\n\t\t\t\t\t\t\t\t\t\t\tPath: clusterName,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n<commit_msg>add anypb to fix copybara (#606)<commit_after>\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage util\n\nimport (\n\tcorepb \"github.com\/envoyproxy\/go-control-plane\/envoy\/config\/core\/v3\"\n\tendpointpb \"github.com\/envoyproxy\/go-control-plane\/envoy\/config\/endpoint\/v3\"\n\thttppb \"github.com\/envoyproxy\/go-control-plane\/envoy\/extensions\/upstreams\/http\/v3\"\n\t\"github.com\/golang\/protobuf\/ptypes\"\n\tanypb \"github.com\/golang\/protobuf\/ptypes\/any\"\n)\n\n\/\/ CreateUpstreamProtocolOptions creates a http2 protocol option as a typed upstream extension.\nfunc CreateUpstreamProtocolOptions() map[string]*anypb.Any {\n\to := &httppb.HttpProtocolOptions{\n\t\tUpstreamProtocolOptions: &httppb.HttpProtocolOptions_ExplicitHttpConfig_{\n\t\t\tExplicitHttpConfig: &httppb.HttpProtocolOptions_ExplicitHttpConfig{\n\t\t\t\tProtocolConfig: &httppb.HttpProtocolOptions_ExplicitHttpConfig_Http2ProtocolOptions{},\n\t\t\t},\n\t\t},\n\t}\n\ta, _ := ptypes.MarshalAny(o)\n\n\treturn map[string]*anypb.Any{\n\t\tUpstreamProtocolOptions: a,\n\t}\n}\n\n\/\/ CreateLoadAssignment creates a cluster for a TCP\/IP port.\nfunc CreateLoadAssignment(hostname string, port uint32) *endpointpb.ClusterLoadAssignment {\n\treturn &endpointpb.ClusterLoadAssignment{\n\t\tClusterName: hostname,\n\t\tEndpoints: []*endpointpb.LocalityLbEndpoints{\n\t\t\t{\n\t\t\t\tLbEndpoints: []*endpointpb.LbEndpoint{\n\t\t\t\t\t{\n\t\t\t\t\t\tHostIdentifier: &endpointpb.LbEndpoint_Endpoint{\n\t\t\t\t\t\t\tEndpoint: &endpointpb.Endpoint{\n\t\t\t\t\t\t\t\tAddress: &corepb.Address{\n\t\t\t\t\t\t\t\t\tAddress: &corepb.Address_SocketAddress{\n\t\t\t\t\t\t\t\t\t\tSocketAddress: &corepb.SocketAddress{\n\t\t\t\t\t\t\t\t\t\t\tAddress: hostname,\n\t\t\t\t\t\t\t\t\t\t\tPortSpecifier: &corepb.SocketAddress_PortValue{\n\t\t\t\t\t\t\t\t\t\t\t\tPortValue: port,\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n\/\/ CreateUdsLoadAssignment creates a cluster for a unix domain socket.\nfunc CreateUdsLoadAssignment(clusterName string) *endpointpb.ClusterLoadAssignment {\n\treturn &endpointpb.ClusterLoadAssignment{\n\t\tClusterName: clusterName,\n\t\tEndpoints: []*endpointpb.LocalityLbEndpoints{\n\t\t\t{\n\t\t\t\tLbEndpoints: []*endpointpb.LbEndpoint{\n\t\t\t\t\t{\n\t\t\t\t\t\tHostIdentifier: &endpointpb.LbEndpoint_Endpoint{\n\t\t\t\t\t\t\tEndpoint: &endpointpb.Endpoint{\n\t\t\t\t\t\t\t\tAddress: &corepb.Address{\n\t\t\t\t\t\t\t\t\tAddress: &corepb.Address_Pipe{\n\t\t\t\t\t\t\t\t\t\tPipe: &corepb.Pipe{\n\t\t\t\t\t\t\t\t\t\t\tPath: clusterName,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n)\n\nconst (\n\t\/\/ Name is the user facing name for this binary. Internally we call it\n\t\/\/ klientctl to avoid confusion.\n\tName = \"kd\"\n\n\t\/\/ KlientName is the user facing name for klient.\n\tKlientName = \"KD Daemon\"\n\n\t\/\/ KlientAddress is url of locally running klient to connect to send\n\t\/\/ user commands.\n\tKlientAddress = \"http:\/\/127.0.0.1:56789\/kite\"\n\n\t\/\/ KiteHome is full path to the kite key that we will use to authenticate\n\t\/\/ to the given klient.\n\tKiteHome = \"\/etc\/kite\"\n\n\t\/\/ KlientDirectory is full path to directory that holds klient.\n\tKlientDirectory = \"\/opt\/kite\/klient\"\n\n\t\/\/ KlientctlDirectory is full path to directory that holds klientctl.\n\tKlientctlDirectory = \"\/usr\/local\/bin\"\n\n\t\/\/ KlientctlBinName is the bin named that will be stored in the KlientctlDirectory.\n\tKlientctlBinName = \"kd\"\n\n\t\/\/ KontrolURL is the url to connect to authenticate local klient and get\n\t\/\/ list of machines.\n\tKontrolURL = \"https:\/\/koding.com\/kontrol\/kite\"\n\n\t\/\/ Version is the current version of klientctl. This number is used\n\t\/\/ by CheckUpdate to determine if current version is behind or equal to latest\n\t\/\/ version on S3 bucket.\n\tVersion = 14\n\n\tosName = runtime.GOOS\n\n\t\/\/ S3UpdateLocation is publically accessible url to check for new updates.\n\tS3UpdateLocation = \"https:\/\/koding-kd.s3.amazonaws.com\/latest-version.txt\"\n\n\t\/\/ S3KlientPath is publically accessible url for latest version of klient.\n\t\/\/ Each OS has its own version of binary, identifiable by OS suffix.\n\tS3KlientPath = \"https:\/\/koding-kd.s3.amazonaws.com\/klient-\" + osName\n\n\t\/\/ S3KlientctlPath is publically accessible url for latest version of\n\t\/\/ klientctl. Each OS has its own version of binary, identifiable by suffix.\n\tS3KlientctlPath = \"https:\/\/koding-kd.s3.amazonaws.com\/klientctl-\" + osName\n\n\t\/\/ SSHDefaultKeyDir is the default directory that stores users ssh key pairs.\n\tSSHDefaultKeyDir = \".ssh\"\n\n\t\/\/ SSHDefaultKeyName is the default name of the ssh key pair.\n\tSSHDefaultKeyName = \"kd-ssh-key\"\n)\n\n\/\/ KiteVersion is the version identifier used to connect to Kontrol.\nvar KiteVersion = fmt.Sprintf(\"0.0.%d\", Version)\n<commit_msg>Bump version to 15<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n)\n\nconst (\n\t\/\/ Name is the user facing name for this binary. Internally we call it\n\t\/\/ klientctl to avoid confusion.\n\tName = \"kd\"\n\n\t\/\/ KlientName is the user facing name for klient.\n\tKlientName = \"KD Daemon\"\n\n\t\/\/ KlientAddress is url of locally running klient to connect to send\n\t\/\/ user commands.\n\tKlientAddress = \"http:\/\/127.0.0.1:56789\/kite\"\n\n\t\/\/ KiteHome is full path to the kite key that we will use to authenticate\n\t\/\/ to the given klient.\n\tKiteHome = \"\/etc\/kite\"\n\n\t\/\/ KlientDirectory is full path to directory that holds klient.\n\tKlientDirectory = \"\/opt\/kite\/klient\"\n\n\t\/\/ KlientctlDirectory is full path to directory that holds klientctl.\n\tKlientctlDirectory = \"\/usr\/local\/bin\"\n\n\t\/\/ KlientctlBinName is the bin named that will be stored in the KlientctlDirectory.\n\tKlientctlBinName = \"kd\"\n\n\t\/\/ KontrolURL is the url to connect to authenticate local klient and get\n\t\/\/ list of machines.\n\tKontrolURL = \"https:\/\/koding.com\/kontrol\/kite\"\n\n\t\/\/ Version is the current version of klientctl. This number is used\n\t\/\/ by CheckUpdate to determine if current version is behind or equal to latest\n\t\/\/ version on S3 bucket.\n\tVersion = 15\n\n\tosName = runtime.GOOS\n\n\t\/\/ S3UpdateLocation is publically accessible url to check for new updates.\n\tS3UpdateLocation = \"https:\/\/koding-kd.s3.amazonaws.com\/latest-version.txt\"\n\n\t\/\/ S3KlientPath is publically accessible url for latest version of klient.\n\t\/\/ Each OS has its own version of binary, identifiable by OS suffix.\n\tS3KlientPath = \"https:\/\/koding-kd.s3.amazonaws.com\/klient-\" + osName\n\n\t\/\/ S3KlientctlPath is publically accessible url for latest version of\n\t\/\/ klientctl. Each OS has its own version of binary, identifiable by suffix.\n\tS3KlientctlPath = \"https:\/\/koding-kd.s3.amazonaws.com\/klientctl-\" + osName\n\n\t\/\/ SSHDefaultKeyDir is the default directory that stores users ssh key pairs.\n\tSSHDefaultKeyDir = \".ssh\"\n\n\t\/\/ SSHDefaultKeyName is the default name of the ssh key pair.\n\tSSHDefaultKeyName = \"kd-ssh-key\"\n)\n\n\/\/ KiteVersion is the version identifier used to connect to Kontrol.\nvar KiteVersion = fmt.Sprintf(\"0.0.%d\", Version)\n<|endoftext|>"} {"text":"<commit_before>\/\/ API Version 1\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\/\/\"encoding\/base64\"\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/tmaiaroto\/discfg\/commands\"\n\t\"github.com\/tmaiaroto\/discfg\/config\"\n\t\/\/\"github.com\/ugorji\/go\/codec\" \/\/ <-- may eventually be used as a different output format. have to see what echo supports.\n\t\/\/\"log\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n)\n\n\/\/ Set the routes for V1 API\nfunc v1Routes(e *echo.Echo) {\n\te.Put(\"\/v1\/:name\/key\/:key\", v1SetKey)\n\te.Get(\"\/v1\/:name\/key\/:key\", v1GetKey)\n\te.Delete(\"\/v1\/:name\/key\/:key\", v1DeleteKey)\n\n\te.Put(\"\/v1\/cfg\/:name\", v1CreateCfg)\n\te.Delete(\"\/v1\/cfg\/:name\", v1DeleteCfg)\n}\n\n\/\/ Gets a key from discfg\nfunc v1GetKey(c *echo.Context) error {\n\tOptions.CfgName = c.Param(\"name\")\n\tOptions.Key = c.Param(\"key\")\n\tresp := commands.GetKey(Options)\n\t\/\/ Since this option is not needed for anything else, it's not held on the Options struct.\n\tcontentType := c.Query(\"type\")\n\n\t\/\/ This is very awesome. Very interesting possibilties now.\n\tswitch contentType {\n\tcase \"text\", \"text\/plain\", \"string\":\n\t\treturn c.String(http.StatusOK, string(resp.Node.Value.([]byte)))\n\t\tbreak\n\t\/\/ This one is going to be interesting. Weird? Bad practice? I don't know, but I dig it and it starts giving me wild ideas.\n\tcase \"html\", \"text\/html\":\n\t\treturn c.HTML(http.StatusOK, string(resp.Node.Value.([]byte)))\n\t\tbreak\n\t\/\/ TODO:\n\t\/\/case \"jsonp\":\n\t\/\/break\n\tcase \"json\", \"application\/json\":\n\t\tresp = formatJsonValue(resp)\n\t\tbreak\n\tdefault:\n\t\tresp = formatJsonValue(resp)\n\t\tbreak\n\t}\n\t\/\/ default response\n\treturn c.JSON(http.StatusOK, resp)\n}\n\n\/\/ Sets the Node Value (an interface{}) as a map[string]interface{} (from []byte which is how it's stored - at least for now)\n\/\/ so that it can be converted to JSON in the Echo response. If it can't be represented in a map, then it'll be set as a string.\n\/\/ For example, a string was set as the value, we can still represent that in JSON. However, if an image was stored...Then it's\n\/\/ going to look ugly. It won't be a base64 string, it'll be the string representation of the binary data. Which apparently Chrome\n\/\/ will render if given...But still. Not so hot. The user should know what they are setting and getting though and this should\n\/\/ still technically return JSON with a usable value. Valid JSON at that. Just with some funny looking characters =)\nfunc formatJsonValue(resp config.ResponseObject) config.ResponseObject {\n\t\/\/ Don't attempt to Unmarshal or anything if the Value is empty. We wouldn't want to create a panic now.\n\tif resp.Node.Value == nil {\n\t\treturn resp\n\t}\n\tvar dat map[string]interface{}\n\tif err := json.Unmarshal(resp.Node.Value.([]byte), &dat); err != nil {\n\t\tresp.Node.Value = string(resp.Node.Value.([]byte))\n\t} else {\n\t\tresp.Node.Value = dat\n\t}\n\treturn resp\n}\n\n\/\/ Sets a key in discfg\nfunc v1SetKey(c *echo.Context) error {\n\tOptions.CfgName = c.Param(\"name\")\n\tOptions.Key = c.Param(\"key\")\n\tresp := config.ResponseObject{\n\t\tAction: \"set\",\n\t}\n\n\t\/\/ Allow the value to be passed via querystring param.\n\tOptions.Value = []byte(c.Query(\"value\"))\n\n\t\/\/ Overwrite that if the request body passes a value that can be read, preferring that.\n\tb, err := ioutil.ReadAll(c.Request().Body)\n\tif err != nil {\n\t\t\/\/ If reading the body failed and we don't have a value from the querystring parameter\n\t\t\/\/ then we have a (potential) problem.\n\t\t\/\/\n\t\t\/\/ Some data stores may be ok with an empty key value. DynamoDB is not. It will only\n\t\t\/\/ return a ValidationException error. Plus, even if it was allowed, it would really\n\t\t\/\/ confuse the user. Some random error reading the body of a request and poof, the data\n\t\t\/\/ vanishes? That'd be terrible UX.\n\t\t\/\/ log.Println(err)\n\t\tresp.Error = err.Error()\n\t\tresp.Message = \"Something went wrong reading the body of the request.\"\n\t\t\/\/ resp.ErrorCode = 500 <-- TODO: I need to come up with discfg specific error codes. Keep as const somewhere.\n\t\t\/\/\treturn c.JSON(http.StatusOK, resp)\n\t\t\/\/\tOr maybe return an HTTP status message... This is outside discfg's concern. It's not an error message\/code\n\t\t\/\/\tthat would ever be seen from the CLI, right? Or maybe it would. Maybe a more generic, \"error parsing key value\" ...\n\t} else if len(b) > 0 {\n\t\tOptions.Value = b\n\t}\n\n\tresp = commands.SetKey(Options)\n\n\treturn c.JSON(http.StatusOK, resp)\n}\n\n\/\/ Deletes a key in discfg\nfunc v1DeleteKey(c *echo.Context) error {\n\tOptions.CfgName = c.Param(\"name\")\n\tOptions.Key = c.Param(\"key\")\n\treturn c.JSON(http.StatusOK, commands.DeleteKey(Options))\n}\n\n\/\/ Creates a new configuration\nfunc v1CreateCfg(c *echo.Context) error {\n\tOptions.CfgName = c.Param(\"name\")\n\n\t\/\/ Optionally set write capacity units (for DynamoDB, defaults to 1)\n\twu, err := strconv.ParseInt(c.Query(\"writeUnits\"), 10, 64)\n\tif err == nil {\n\t\tOptions.Storage.WriteCapacityUnits = wu\n\t}\n\t\/\/ Optionally set read capacity units (for DynamoDB, defaults to 2)\n\tru, err := strconv.ParseInt(c.Query(\"readUnits\"), 10, 64)\n\tif err == nil {\n\t\tOptions.Storage.ReadCapacityUnits = ru\n\t}\n\n\treturn c.JSON(http.StatusOK, commands.CreateCfg(Options))\n}\n\n\/\/ Deletes a configuration\nfunc v1DeleteCfg(c *echo.Context) error {\n\tOptions.CfgName = c.Param(\"name\")\n\treturn c.JSON(http.StatusOK, commands.DeleteCfg(Options))\n}\n<commit_msg>thinking about an OPTIONS request<commit_after>\/\/ API Version 1\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\/\/\"encoding\/base64\"\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/tmaiaroto\/discfg\/commands\"\n\t\"github.com\/tmaiaroto\/discfg\/config\"\n\t\/\/\"github.com\/ugorji\/go\/codec\" \/\/ <-- may eventually be used as a different output format. have to see what echo supports.\n\t\/\/\"log\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n)\n\n\/\/ Set the routes for V1 API\nfunc v1Routes(e *echo.Echo) {\n\te.Put(\"\/v1\/:name\/key\/:key\", v1SetKey)\n\te.Get(\"\/v1\/:name\/key\/:key\", v1GetKey)\n\te.Delete(\"\/v1\/:name\/key\/:key\", v1DeleteKey)\n\n\te.Put(\"\/v1\/cfg\/:name\", v1CreateCfg)\n\te.Delete(\"\/v1\/cfg\/:name\", v1DeleteCfg)\n\te.Options(\"\/v1\/cfg\/:name\", v1OptionsCfg)\n}\n\n\/\/ Gets a key from discfg\nfunc v1GetKey(c *echo.Context) error {\n\tOptions.CfgName = c.Param(\"name\")\n\tOptions.Key = c.Param(\"key\")\n\tresp := commands.GetKey(Options)\n\t\/\/ Since this option is not needed for anything else, it's not held on the Options struct.\n\tcontentType := c.Query(\"type\")\n\n\t\/\/ This is very awesome. Very interesting possibilties now.\n\tswitch contentType {\n\tcase \"text\", \"text\/plain\", \"string\":\n\t\treturn c.String(http.StatusOK, string(resp.Node.Value.([]byte)))\n\t\tbreak\n\t\/\/ This one is going to be interesting. Weird? Bad practice? I don't know, but I dig it and it starts giving me wild ideas.\n\tcase \"html\", \"text\/html\":\n\t\treturn c.HTML(http.StatusOK, string(resp.Node.Value.([]byte)))\n\t\tbreak\n\t\/\/ TODO:\n\t\/\/case \"jsonp\":\n\t\/\/break\n\tcase \"json\", \"application\/json\":\n\t\tresp = formatJsonValue(resp)\n\t\tbreak\n\tdefault:\n\t\tresp = formatJsonValue(resp)\n\t\tbreak\n\t}\n\t\/\/ default response\n\treturn c.JSON(http.StatusOK, resp)\n}\n\n\/\/ Sets the Node Value (an interface{}) as a map[string]interface{} (from []byte which is how it's stored - at least for now)\n\/\/ so that it can be converted to JSON in the Echo response. If it can't be represented in a map, then it'll be set as a string.\n\/\/ For example, a string was set as the value, we can still represent that in JSON. However, if an image was stored...Then it's\n\/\/ going to look ugly. It won't be a base64 string, it'll be the string representation of the binary data. Which apparently Chrome\n\/\/ will render if given...But still. Not so hot. The user should know what they are setting and getting though and this should\n\/\/ still technically return JSON with a usable value. Valid JSON at that. Just with some funny looking characters =)\nfunc formatJsonValue(resp config.ResponseObject) config.ResponseObject {\n\t\/\/ Don't attempt to Unmarshal or anything if the Value is empty. We wouldn't want to create a panic now.\n\tif resp.Node.Value == nil {\n\t\treturn resp\n\t}\n\tvar dat map[string]interface{}\n\tif err := json.Unmarshal(resp.Node.Value.([]byte), &dat); err != nil {\n\t\tresp.Node.Value = string(resp.Node.Value.([]byte))\n\t} else {\n\t\tresp.Node.Value = dat\n\t}\n\treturn resp\n}\n\n\/\/ Sets a key in discfg\nfunc v1SetKey(c *echo.Context) error {\n\tOptions.CfgName = c.Param(\"name\")\n\tOptions.Key = c.Param(\"key\")\n\tresp := config.ResponseObject{\n\t\tAction: \"set\",\n\t}\n\n\t\/\/ Allow the value to be passed via querystring param.\n\tOptions.Value = []byte(c.Query(\"value\"))\n\n\t\/\/ Overwrite that if the request body passes a value that can be read, preferring that.\n\tb, err := ioutil.ReadAll(c.Request().Body)\n\tif err != nil {\n\t\t\/\/ If reading the body failed and we don't have a value from the querystring parameter\n\t\t\/\/ then we have a (potential) problem.\n\t\t\/\/\n\t\t\/\/ Some data stores may be ok with an empty key value. DynamoDB is not. It will only\n\t\t\/\/ return a ValidationException error. Plus, even if it was allowed, it would really\n\t\t\/\/ confuse the user. Some random error reading the body of a request and poof, the data\n\t\t\/\/ vanishes? That'd be terrible UX.\n\t\t\/\/ log.Println(err)\n\t\tresp.Error = err.Error()\n\t\tresp.Message = \"Something went wrong reading the body of the request.\"\n\t\t\/\/ resp.ErrorCode = 500 <-- TODO: I need to come up with discfg specific error codes. Keep as const somewhere.\n\t\t\/\/\treturn c.JSON(http.StatusOK, resp)\n\t\t\/\/\tOr maybe return an HTTP status message... This is outside discfg's concern. It's not an error message\/code\n\t\t\/\/\tthat would ever be seen from the CLI, right? Or maybe it would. Maybe a more generic, \"error parsing key value\" ...\n\t} else if len(b) > 0 {\n\t\tOptions.Value = b\n\t}\n\n\tresp = commands.SetKey(Options)\n\n\treturn c.JSON(http.StatusOK, resp)\n}\n\n\/\/ Deletes a key in discfg\nfunc v1DeleteKey(c *echo.Context) error {\n\tOptions.CfgName = c.Param(\"name\")\n\tOptions.Key = c.Param(\"key\")\n\treturn c.JSON(http.StatusOK, commands.DeleteKey(Options))\n}\n\n\/\/ Creates a new configuration\nfunc v1CreateCfg(c *echo.Context) error {\n\tOptions.CfgName = c.Param(\"name\")\n\n\t\/\/ Optionally set write capacity units (for DynamoDB, defaults to 1)\n\twu, err := strconv.ParseInt(c.Query(\"writeUnits\"), 10, 64)\n\tif err == nil {\n\t\tOptions.Storage.WriteCapacityUnits = wu\n\t}\n\t\/\/ Optionally set read capacity units (for DynamoDB, defaults to 2)\n\tru, err := strconv.ParseInt(c.Query(\"readUnits\"), 10, 64)\n\tif err == nil {\n\t\tOptions.Storage.ReadCapacityUnits = ru\n\t}\n\n\treturn c.JSON(http.StatusOK, commands.CreateCfg(Options))\n}\n\n\/\/ Deletes a configuration\nfunc v1DeleteCfg(c *echo.Context) error {\n\tOptions.CfgName = c.Param(\"name\")\n\treturn c.JSON(http.StatusOK, commands.DeleteCfg(Options))\n}\n\n\/\/ Gets\/sets options for a configuration\nfunc v1OptionsCfg(c *echo.Context) error {\n\tOptions.CfgName = c.Param(\"name\")\n\t\/\/ If nothing that would change a configuration's options\/settings are passed,\n\t\/\/ then return commands.Info() ... Otherwise, call a new commands.UpdateInfo() function...\n\t\/\/ That then adjusts settings and returns Info() anyway.\n\t\/\/ Or maybe commands.Info() can be a getter and setter...Eh... maybe.\n\t\/\/\n\t\/\/ OR. Make another route and handler for adjusting configuration settings using PUT.\n\t\/\/ https:\/\/www.w3.org\/Protocols\/rfc2616\/rfc2616-sec9.html ...says OPTIONS can contain\n\t\/\/ some data in the body request, though defines no use for it. So it's up in the air.\n\t\/\/\n\t\/\/ It might then also be neat to hav an OPTIONS for other routes.\n\t\/\/ These responses would all be handled here and be just for self-documenting the API.\n\t\/\/ OPTIONS request on a key path for example would return information about how to\n\t\/\/ update or delete that specific key. On \/v1\/:name\/key it might then just be\n\t\/\/ instructions for creating a key.\n\t\/\/\n\t\/\/ Need to think this over. Sleep on it.\n\treturn c.JSON(http.StatusOK, commands.Info(Options, []string{}))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ The server program issues Google search requests and demonstrates the use of\n\/\/ the go.net Context API. It serves on port 8080.\n\/\/\n\/\/ The \/search endpoint accepts these query params:\n\/\/ q=the Google search query\n\/\/ timeout=a timeout for the request, in time.Duration format\n\/\/\n\/\/ For example, http:\/\/localhost:8080\/search?q=golang&timeout=1s serves the\n\/\/ first few Google search results for \"golang\" or a \"deadline exceeded\" error\n\/\/ if the timeout expires.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.net\/context\"\n\t\"github.com\/PuerkitoBio\/throttled\"\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"github.com\/justinas\/alice\"\n\t\"github.com\/niilo\/golib\/context\/google\"\n\t\"github.com\/niilo\/golib\/context\/userip\"\n)\n\nfunc Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tfmt.Fprint(w, \"Welcome!\\n\")\n}\n\nfunc Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tfmt.Fprintf(w, \"hello, %s!\\n\", ps.ByName(\"name\"))\n}\n\nfunc timeoutHandler(h http.Handler) http.Handler {\n\treturn http.TimeoutHandler(h, 1*time.Second, \"timed out\")\n}\n\n\/\/func myApp(w http.ResponseWriter, r *http.Request) {\n\/\/\tw.Write([]byte(\"Hello world!\"))\n\/\/}\n\nfunc main() {\n\trouter := httprouter.New()\n\trouter.GET(\"\/\", Index)\n\trouter.GET(\"\/hello\/:name\", Hello)\n\trouter.GET(\"\/search\", handleSearch)\n\n\t\/\/log.Fatal(http.ListenAndServe(\":8080\", router))\n\n\tth := throttled.Interval(throttled.PerSec(10), 1, &throttled.VaryBy{Path: true}, 50)\n\t\/\/myHandler := http.HandlerFunc(myApp)\n\n\tchain := alice.New(th.Throttle, timeoutHandler).Then(router)\n\thttp.ListenAndServe(\":8080\", chain)\n\n}\n\n\/\/ handleSearch handles URLs like \/search?q=golang&timeout=1s by forwarding the\n\/\/ query to google.Search. If the query param includes timeout, the search is\n\/\/ canceled after that duration elapses.\nfunc handleSearch(w http.ResponseWriter, req *http.Request, _ httprouter.Params) {\n\t\/\/ ctx is the Context for this handler. Calling cancel closes the\n\t\/\/ ctx.Done channel, which is the cancellation signal for requests\n\t\/\/ started by this handler.\n\tvar (\n\t\tctx context.Context\n\t\tcancel context.CancelFunc\n\t)\n\ttimeout, err := time.ParseDuration(req.FormValue(\"timeout\"))\n\tif err == nil {\n\t\t\/\/ The request has a timeout, so create a context that is\n\t\t\/\/ canceled automatically when the timeout expires.\n\t\tctx, cancel = context.WithTimeout(context.Background(), timeout)\n\t} else {\n\t\tctx, cancel = context.WithCancel(context.Background())\n\t}\n\tdefer cancel() \/\/ Cancel ctx as soon as handleSearch returns.\n\n\t\/\/ Check the search query.\n\tquery := req.FormValue(\"q\")\n\tif query == \"\" {\n\t\thttp.Error(w, \"no query\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t\/\/ Store the user IP in ctx for use by code in other packages.\n\tuserIP, err := userip.FromRequest(req)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tctx = userip.NewContext(ctx, userIP)\n\n\t\/\/ Run the Google search and print the results.\n\tstart := time.Now()\n\tresults, err := google.Search(ctx, query)\n\telapsed := time.Since(start)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif err := resultsTemplate.Execute(w, struct {\n\t\tResults google.Results\n\t\tTimeout, Elapsed time.Duration\n\t}{\n\t\tResults: results,\n\t\tTimeout: timeout,\n\t\tElapsed: elapsed,\n\t}); err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t}\n}\n\nvar resultsTemplate = template.Must(template.New(\"results\").Parse(`\n<html>\n<head\/>\n<body>\n <ol>\n {{range .Results}}\n <li>{{.Title}} - <a href=\"{{.URL}}\">{{.URL}}<\/a><\/li>\n {{end}}\n <\/ol>\n <p>{{len .Results}} results in {{.Elapsed}}; timeout {{.Timeout}}<\/p>\n<\/body>\n<\/html>\n`))\n<commit_msg>appContext with Mongo session.<commit_after>\/\/ The server program issues Google search requests and demonstrates the use of\n\/\/ the go.net Context API. It serves on port 8080.\n\/\/\n\/\/ The \/search endpoint accepts these query params:\n\/\/ q=the Google search query\n\/\/ timeout=a timeout for the request, in time.Duration format\n\/\/\n\/\/ For example, http:\/\/localhost:8080\/search?q=golang&timeout=1s serves the\n\/\/ first few Google search results for \"golang\" or a \"deadline exceeded\" error\n\/\/ if the timeout expires.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.net\/context\"\n\t\"github.com\/PuerkitoBio\/throttled\"\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"github.com\/justinas\/alice\"\n\t\"github.com\/niilo\/golib\/context\/google\"\n\t\"github.com\/niilo\/golib\/context\/userip\"\n\t\"gopkg.in\/mgo.v2\"\n)\n\n\/\/ appContext contains our local context; our database pool, session store, template\n\/\/ registry and anything else our handlers need to access. We'll create an instance of it\n\/\/ in our main() function and then explicitly pass a reference to it for our handlers to access.\ntype appContext struct {\n\tmongoSession *mgo.Session\n}\n\nfunc Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tfmt.Fprint(w, \"Welcome!\\n\")\n}\n\nfunc Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tfmt.Fprintf(w, \"hello, %s!\\n\", ps.ByName(\"name\"))\n}\n\nfunc timeoutHandler(h http.Handler) http.Handler {\n\treturn http.TimeoutHandler(h, 1*time.Second, \"timed out\")\n}\n\n\/\/func myApp(w http.ResponseWriter, r *http.Request) {\n\/\/\tw.Write([]byte(\"Hello world!\"))\n\/\/}\n\nfunc main() {\n\turi := os.Getenv(\"INKBLOT_MONGODBURL\")\n\tif uri == \"\" {\n\t\tfmt.Println(\"no connection string provided\")\n\t\tos.Exit(1)\n\t}\n\n\tmongoSession, err := mgo.Dial(uri)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer mongoSession.Close()\n\n\t\/\/ Optional. Switch the session to a monotonic behavior.\n\tmongoSession.SetMode(mgo.Monotonic, true)\n\n\trouter := httprouter.New()\n\trouter.GET(\"\/\", Index)\n\trouter.GET(\"\/hello\/:name\", Hello)\n\trouter.GET(\"\/search\", handleSearch)\n\n\t\/\/log.Fatal(http.ListenAndServe(\":8080\", router))\n\n\tth := throttled.Interval(throttled.PerSec(10), 1, &throttled.VaryBy{Path: true}, 50)\n\t\/\/myHandler := http.HandlerFunc(myApp)\n\n\tchain := alice.New(th.Throttle, timeoutHandler).Then(router)\n\thttp.ListenAndServe(\":8080\", chain)\n\n}\n\n\/\/ handleSearch handles URLs like \/search?q=golang&timeout=1s by forwarding the\n\/\/ query to google.Search. If the query param includes timeout, the search is\n\/\/ canceled after that duration elapses.\nfunc handleSearch(w http.ResponseWriter, req *http.Request, _ httprouter.Params) {\n\t\/\/ ctx is the Context for this handler. Calling cancel closes the\n\t\/\/ ctx.Done channel, which is the cancellation signal for requests\n\t\/\/ started by this handler.\n\tvar (\n\t\tctx context.Context\n\t\tcancel context.CancelFunc\n\t)\n\ttimeout, err := time.ParseDuration(req.FormValue(\"timeout\"))\n\tif err == nil {\n\t\t\/\/ The request has a timeout, so create a context that is\n\t\t\/\/ canceled automatically when the timeout expires.\n\t\tctx, cancel = context.WithTimeout(context.Background(), timeout)\n\t} else {\n\t\tctx, cancel = context.WithCancel(context.Background())\n\t}\n\tdefer cancel() \/\/ Cancel ctx as soon as handleSearch returns.\n\n\t\/\/ Check the search query.\n\tquery := req.FormValue(\"q\")\n\tif query == \"\" {\n\t\thttp.Error(w, \"no query\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t\/\/ Store the user IP in ctx for use by code in other packages.\n\tuserIP, err := userip.FromRequest(req)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tctx = userip.NewContext(ctx, userIP)\n\n\t\/\/ Run the Google search and print the results.\n\tstart := time.Now()\n\tresults, err := google.Search(ctx, query)\n\telapsed := time.Since(start)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif err := resultsTemplate.Execute(w, struct {\n\t\tResults google.Results\n\t\tTimeout, Elapsed time.Duration\n\t}{\n\t\tResults: results,\n\t\tTimeout: timeout,\n\t\tElapsed: elapsed,\n\t}); err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t}\n}\n\nvar resultsTemplate = template.Must(template.New(\"results\").Parse(`\n<html>\n<head\/>\n<body>\n <ol>\n {{range .Results}}\n <li>{{.Title}} - <a href=\"{{.URL}}\">{{.URL}}<\/a><\/li>\n {{end}}\n <\/ol>\n <p>{{len .Results}} results in {{.Elapsed}}; timeout {{.Timeout}}<\/p>\n<\/body>\n<\/html>\n`))\n<|endoftext|>"} {"text":"<commit_before>package ecs\n\nimport (\n\t\"time\"\n\n\t\"github.com\/denverdino\/aliyungo\/util\"\n)\n\n\/\/ InstanceStatus represents instance status\ntype InstanceStatus string\n\n\/\/ Constants of InstanceStatus\nconst (\n\tCreating = InstanceStatus(\"Creating\")\n\tRunning = InstanceStatus(\"Running\")\n\tStarting = InstanceStatus(\"Starting\")\n\n\tStopped = InstanceStatus(\"Stopped\")\n\tStopping = InstanceStatus(\"Stopping\")\n)\n\ntype InternetChargeType string\n\nconst (\n\tPayByBandwidth = InternetChargeType(\"PayByBandwidth\")\n\tPayByTraffic = InternetChargeType(\"PayByTraffic\")\n)\n\ntype LockReason string\n\nconst (\n\tLockReasonFinancial = LockReason(\"financial\")\n\tLockReasonSecurity = LockReason(\"security\")\n)\n\ntype DescribeInstanceStatusArgs struct {\n\tRegionId Region\n\tZoneId string\n\tPagination\n}\n\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/datatype&instancestatusitemtype\ntype InstanceStatusItemType struct {\n\tInstanceId string\n\tStatus InstanceStatus\n}\n\ntype DescribeInstanceStatusResponse struct {\n\tCommonResponse\n\tPaginationResult\n\tInstanceStatuses struct {\n\t\tInstanceStatus []InstanceStatusItemType\n\t}\n}\n\n\/\/ DescribeInstanceStatus describes instance status\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&describeinstancestatus\nfunc (client *Client) DescribeInstanceStatus(args *DescribeInstanceStatusArgs) (instanceStatuses []InstanceStatusItemType, pagination *PaginationResult, err error) {\n\targs.validate()\n\tresponse := DescribeInstanceStatusResponse{}\n\n\terr = client.Invoke(\"DescribeInstanceStatus\", args, &response)\n\n\tif err == nil {\n\t\treturn response.InstanceStatuses.InstanceStatus, &response.PaginationResult, nil\n\t}\n\n\treturn nil, nil, err\n}\n\ntype StopInstanceArgs struct {\n\tInstanceId string\n\tForceStop bool\n}\n\ntype StopInstanceResponse struct {\n\tCommonResponse\n}\n\n\/\/ StopInstance stops instance\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&stopinstance\nfunc (client *Client) StopInstance(instanceId string, forceStop bool) error {\n\targs := StopInstanceArgs{\n\t\tInstanceId: instanceId,\n\t\tForceStop: forceStop,\n\t}\n\tresponse := StopInstanceResponse{}\n\terr := client.Invoke(\"StopInstance\", &args, &response)\n\treturn err\n}\n\ntype StartInstanceArgs struct {\n\tInstanceId string\n}\n\ntype StartInstanceResponse struct {\n\tCommonResponse\n}\n\n\/\/ StartInstance starts instance\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&startinstance\nfunc (client *Client) StartInstance(instanceId string) error {\n\targs := StartInstanceArgs{InstanceId: instanceId}\n\tresponse := StartInstanceResponse{}\n\terr := client.Invoke(\"StartInstance\", &args, &response)\n\treturn err\n}\n\ntype RebootInstanceArgs struct {\n\tInstanceId string\n\tForceStop bool\n}\n\ntype RebootInstanceResponse struct {\n\tCommonResponse\n}\n\n\/\/ RebootInstance reboot instance\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&rebootinstance\nfunc (client *Client) RebootInstance(instanceId string, forceStop bool) error {\n\trequest := RebootInstanceArgs{\n\t\tInstanceId: instanceId,\n\t\tForceStop: forceStop,\n\t}\n\tresponse := RebootInstanceResponse{}\n\terr := client.Invoke(\"RebootInstance\", &request, &response)\n\treturn err\n}\n\ntype DescribeInstanceAttributeArgs struct {\n\tInstanceId string\n}\n\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/datatype&operationlockstype\ntype OperationLocksType struct {\n\tLockReason []LockReason \/\/enum for financial, security\n}\n\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/datatype&securitygroupidsettype\ntype SecurityGroupIdSetType struct {\n\tSecurityGroupId string\n}\n\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/datatype&ipaddresssettype\ntype IpAddressSetType struct {\n\tIpAddress []string\n}\n\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/datatype&vpcattributestype\ntype VpcAttributesType struct {\n\tVpcId string\n\tVSwitchId string\n\tPrivateIpAddress IpAddressSetType\n\tNatIpAddress string\n}\n\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/datatype&eipaddressassociatetype\ntype EipAddressAssociateType struct {\n\tAllocationId string\n\tIpAddress string\n\tBandwidth int\n\tInternetChargeType InternetChargeType\n}\n\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/datatype&instanceattributestype\ntype InstanceAttributesType struct {\n\tInstanceId string\n\tInstanceName string\n\tDescription string\n\tImageId string\n\tRegionId Region\n\tZoneId string\n\tClusterId string\n\tInstanceType string\n\tHostName string\n\tStatus InstanceStatus\n\tOperationLocks OperationLocksType\n\tSecurityGroupIds struct {\n\t\tSecurityGroupId []string\n\t}\n\tPublicIpAddress IpAddressSetType\n\tInnerIpAddress IpAddressSetType\n\tInstanceNetworkType string \/\/enum Classic | Vpc\n\tInternetMaxBandwidthIn int\n\tInternetMaxBandwidthOut int\n\tInternetChargeType InternetChargeType\n\tCreationTime util.ISO6801Time \/\/time.Time\n\tVpcAttributes VpcAttributesType\n\tEipAddress EipAddressAssociateType\n}\n\ntype DescribeInstanceAttributeResponse struct {\n\tCommonResponse\n\tInstanceAttributesType\n}\n\n\/\/ DescribeInstanceAttribute describes instance attribute\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&describeinstanceattribute\nfunc (client *Client) DescribeInstanceAttribute(instanceId string) (instance *InstanceAttributesType, err error) {\n\targs := DescribeInstanceAttributeArgs{InstanceId: instanceId}\n\n\tresponse := DescribeInstanceAttributeResponse{}\n\terr = client.Invoke(\"DescribeInstanceAttribute\", &args, &response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &response.InstanceAttributesType, err\n}\n\n\/\/ Default timeout value for WaitForInstance method\nconst InstanceDefaultTimeout = 120\n\n\/\/ WaitForInstance waits for instance to given status\nfunc (client *Client) WaitForInstance(instanceId string, status InstanceStatus, timeout int) error {\n\tif timeout <= 0 {\n\t\ttimeout = InstanceDefaultTimeout\n\t}\n\tfor {\n\t\tinstance, err := client.DescribeInstanceAttribute(instanceId)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif instance.Status == status {\n\t\t\t\/\/TODO\n\t\t\t\/\/Sleep one more time for timing issues\n\t\t\ttime.Sleep(DefaultWaitForInterval * time.Second)\n\t\t\tbreak\n\t\t}\n\t\ttimeout = timeout - DefaultWaitForInterval\n\t\tif timeout <= 0 {\n\t\t\treturn getECSErrorFromString(\"Timeout\")\n\t\t}\n\t\ttime.Sleep(DefaultWaitForInterval * time.Second)\n\n\t}\n\treturn nil\n}\n\ntype DescribeInstanceVncUrlArgs struct {\n\tRegionId Region\n\tInstanceId string\n}\n\ntype DescribeInstanceVncUrlResponse struct {\n\tCommonResponse\n\tVncUrl string\n}\n\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&describeinstancevncurl\nfunc (client *Client) DescribeInstanceVncUrl(args *DescribeInstanceVncUrlArgs) (string, error) {\n\tresponse := DescribeInstanceVncUrlResponse{}\n\n\terr := client.Invoke(\"DescribeInstanceVncUrl\", args, &response)\n\n\tif err == nil {\n\t\treturn response.VncUrl, nil\n\t}\n\n\treturn \"\", err\n}\n\ntype DescribeInstancesArgs struct {\n\tRegionId Region\n\tVpcId string\n\tVSwitchId string\n\tZoneId string\n\tInstanceIds string\n\tInstanceNetworkType string\n\tPrivateIpAddresses string\n\tInnerIpAddresses string\n\tPublicIpAddresses string\n\tSecurityGroupId string\n\tPagination\n}\n\ntype DescribeInstancesResponse struct {\n\tCommonResponse\n\tPaginationResult\n\tInstances struct {\n\t\tInstance []InstanceAttributesType\n\t}\n}\n\n\/\/ DescribeInstances describes instances\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&describeinstances\nfunc (client *Client) DescribeInstances(args *DescribeInstancesArgs) (instances []InstanceAttributesType, pagination *PaginationResult, err error) {\n\targs.validate()\n\tresponse := DescribeInstancesResponse{}\n\n\terr = client.Invoke(\"DescribeInstances\", args, &response)\n\n\tif err == nil {\n\t\treturn response.Instances.Instance, &response.PaginationResult, nil\n\t}\n\n\treturn nil, nil, err\n}\n\ntype DeleteInstanceArgs struct {\n\tInstanceId string\n}\n\ntype DeleteInstanceResponse struct {\n\tCommonResponse\n}\n\n\/\/ DeleteInstance deletes instance\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&deleteinstance\nfunc (client *Client) DeleteInstance(instanceId string) error {\n\targs := DeleteInstanceArgs{InstanceId: instanceId}\n\tresponse := DeleteInstanceResponse{}\n\terr := client.Invoke(\"DeleteInstance\", &args, &response)\n\treturn err\n}\n\ntype DataDiskType struct {\n\tSize int\n\tCategory DiskCategory \/\/Enum cloud, ephemeral, ephemeral_ssd\n\tSnapshotId string\n\tDiskName string\n\tDescription string\n\tDevice string\n\tDeleteWithInstance bool\n}\n\ntype SystemDiskType struct {\n\tCategory DiskCategory \/\/Enum cloud, ephemeral, ephemeral_ssd\n\tDiskName string\n\tDescription string\n}\n\ntype CreateInstanceArgs struct {\n\tRegionId Region\n\tZoneId string\n\tImageId string\n\tInstanceType string\n\tSecurityGroupId string\n\tInstanceName string\n\tDescription string\n\tInternetChargeType InternetChargeType\n\tInternetMaxBandwidthIn int\n\tInternetMaxBandwidthOut int\n\tHostName string\n\tPassword string\n\tSystemDisk SystemDiskType\n\tDataDisk []DataDiskType\n\tVSwitchId string\n\tPrivateIpAddress string\n\tClientToken string\n}\n\ntype CreateInstanceResponse struct {\n\tCommonResponse\n\tInstanceId string\n}\n\n\/\/ CreateInstance creates instance\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&createinstance\nfunc (client *Client) CreateInstance(args *CreateInstanceArgs) (instanceId string, err error) {\n\tresponse := CreateInstanceResponse{}\n\terr = client.Invoke(\"CreateInstance\", args, &response)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn response.InstanceId, err\n}\n<commit_msg>debug<commit_after>package ecs\n\nimport (\n\t\"time\"\n)\n\n\/\/ InstanceStatus represents instance status\ntype InstanceStatus string\n\n\/\/ Constants of InstanceStatus\nconst (\n\tCreating = InstanceStatus(\"Creating\")\n\tRunning = InstanceStatus(\"Running\")\n\tStarting = InstanceStatus(\"Starting\")\n\n\tStopped = InstanceStatus(\"Stopped\")\n\tStopping = InstanceStatus(\"Stopping\")\n)\n\ntype InternetChargeType string\n\nconst (\n\tPayByBandwidth = InternetChargeType(\"PayByBandwidth\")\n\tPayByTraffic = InternetChargeType(\"PayByTraffic\")\n)\n\ntype LockReason string\n\nconst (\n\tLockReasonFinancial = LockReason(\"financial\")\n\tLockReasonSecurity = LockReason(\"security\")\n)\n\ntype DescribeInstanceStatusArgs struct {\n\tRegionId Region\n\tZoneId string\n\tPagination\n}\n\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/datatype&instancestatusitemtype\ntype InstanceStatusItemType struct {\n\tInstanceId string\n\tStatus InstanceStatus\n}\n\ntype DescribeInstanceStatusResponse struct {\n\tCommonResponse\n\tPaginationResult\n\tInstanceStatuses struct {\n\t\tInstanceStatus []InstanceStatusItemType\n\t}\n}\n\n\/\/ DescribeInstanceStatus describes instance status\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&describeinstancestatus\nfunc (client *Client) DescribeInstanceStatus(args *DescribeInstanceStatusArgs) (instanceStatuses []InstanceStatusItemType, pagination *PaginationResult, err error) {\n\targs.validate()\n\tresponse := DescribeInstanceStatusResponse{}\n\n\terr = client.Invoke(\"DescribeInstanceStatus\", args, &response)\n\n\tif err == nil {\n\t\treturn response.InstanceStatuses.InstanceStatus, &response.PaginationResult, nil\n\t}\n\n\treturn nil, nil, err\n}\n\ntype StopInstanceArgs struct {\n\tInstanceId string\n\tForceStop bool\n}\n\ntype StopInstanceResponse struct {\n\tCommonResponse\n}\n\n\/\/ StopInstance stops instance\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&stopinstance\nfunc (client *Client) StopInstance(instanceId string, forceStop bool) error {\n\targs := StopInstanceArgs{\n\t\tInstanceId: instanceId,\n\t\tForceStop: forceStop,\n\t}\n\tresponse := StopInstanceResponse{}\n\terr := client.Invoke(\"StopInstance\", &args, &response)\n\treturn err\n}\n\ntype StartInstanceArgs struct {\n\tInstanceId string\n}\n\ntype StartInstanceResponse struct {\n\tCommonResponse\n}\n\n\/\/ StartInstance starts instance\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&startinstance\nfunc (client *Client) StartInstance(instanceId string) error {\n\targs := StartInstanceArgs{InstanceId: instanceId}\n\tresponse := StartInstanceResponse{}\n\terr := client.Invoke(\"StartInstance\", &args, &response)\n\treturn err\n}\n\ntype RebootInstanceArgs struct {\n\tInstanceId string\n\tForceStop bool\n}\n\ntype RebootInstanceResponse struct {\n\tCommonResponse\n}\n\n\/\/ RebootInstance reboot instance\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&rebootinstance\nfunc (client *Client) RebootInstance(instanceId string, forceStop bool) error {\n\trequest := RebootInstanceArgs{\n\t\tInstanceId: instanceId,\n\t\tForceStop: forceStop,\n\t}\n\tresponse := RebootInstanceResponse{}\n\terr := client.Invoke(\"RebootInstance\", &request, &response)\n\treturn err\n}\n\ntype DescribeInstanceAttributeArgs struct {\n\tInstanceId string\n}\n\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/datatype&operationlockstype\ntype OperationLocksType struct {\n\tLockReason []LockReason \/\/enum for financial, security\n}\n\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/datatype&securitygroupidsettype\ntype SecurityGroupIdSetType struct {\n\tSecurityGroupId string\n}\n\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/datatype&ipaddresssettype\ntype IpAddressSetType struct {\n\tIpAddress []string\n}\n\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/datatype&vpcattributestype\ntype VpcAttributesType struct {\n\tVpcId string\n\tVSwitchId string\n\tPrivateIpAddress IpAddressSetType\n\tNatIpAddress string\n}\n\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/datatype&eipaddressassociatetype\ntype EipAddressAssociateType struct {\n\tAllocationId string\n\tIpAddress string\n\tBandwidth int\n\tInternetChargeType InternetChargeType\n}\n\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/datatype&instanceattributestype\ntype InstanceAttributesType struct {\n\tInstanceId string\n\tInstanceName string\n\tDescription string\n\tImageId string\n\tRegionId Region\n\tZoneId string\n\tClusterId string\n\tInstanceType string\n\tHostName string\n\tStatus InstanceStatus\n\tOperationLocks OperationLocksType\n\tSecurityGroupIds struct {\n\t\tSecurityGroupId []string\n\t}\n\tPublicIpAddress IpAddressSetType\n\tInnerIpAddress IpAddressSetType\n\tInstanceNetworkType string \/\/enum Classic | Vpc\n\tInternetMaxBandwidthIn int\n\tInternetMaxBandwidthOut int\n\tInternetChargeType InternetChargeType\n\tCreationTime string\n\tVpcAttributes VpcAttributesType\n\tEipAddress EipAddressAssociateType\n}\n\ntype DescribeInstanceAttributeResponse struct {\n\tCommonResponse\n\tInstanceAttributesType\n}\n\n\/\/ DescribeInstanceAttribute describes instance attribute\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&describeinstanceattribute\nfunc (client *Client) DescribeInstanceAttribute(instanceId string) (instance *InstanceAttributesType, err error) {\n\targs := DescribeInstanceAttributeArgs{InstanceId: instanceId}\n\n\tresponse := DescribeInstanceAttributeResponse{}\n\terr = client.Invoke(\"DescribeInstanceAttribute\", &args, &response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &response.InstanceAttributesType, err\n}\n\n\/\/ Default timeout value for WaitForInstance method\nconst InstanceDefaultTimeout = 120\n\n\/\/ WaitForInstance waits for instance to given status\nfunc (client *Client) WaitForInstance(instanceId string, status InstanceStatus, timeout int) error {\n\tif timeout <= 0 {\n\t\ttimeout = InstanceDefaultTimeout\n\t}\n\tfor {\n\t\tinstance, err := client.DescribeInstanceAttribute(instanceId)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif instance.Status == status {\n\t\t\t\/\/TODO\n\t\t\t\/\/Sleep one more time for timing issues\n\t\t\ttime.Sleep(DefaultWaitForInterval * time.Second)\n\t\t\tbreak\n\t\t}\n\t\ttimeout = timeout - DefaultWaitForInterval\n\t\tif timeout <= 0 {\n\t\t\treturn getECSErrorFromString(\"Timeout\")\n\t\t}\n\t\ttime.Sleep(DefaultWaitForInterval * time.Second)\n\n\t}\n\treturn nil\n}\n\ntype DescribeInstanceVncUrlArgs struct {\n\tRegionId Region\n\tInstanceId string\n}\n\ntype DescribeInstanceVncUrlResponse struct {\n\tCommonResponse\n\tVncUrl string\n}\n\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&describeinstancevncurl\nfunc (client *Client) DescribeInstanceVncUrl(args *DescribeInstanceVncUrlArgs) (string, error) {\n\tresponse := DescribeInstanceVncUrlResponse{}\n\n\terr := client.Invoke(\"DescribeInstanceVncUrl\", args, &response)\n\n\tif err == nil {\n\t\treturn response.VncUrl, nil\n\t}\n\n\treturn \"\", err\n}\n\ntype DescribeInstancesArgs struct {\n\tRegionId Region\n\tVpcId string\n\tVSwitchId string\n\tZoneId string\n\tInstanceIds string\n\tInstanceNetworkType string\n\tPrivateIpAddresses string\n\tInnerIpAddresses string\n\tPublicIpAddresses string\n\tSecurityGroupId string\n\tPagination\n}\n\ntype DescribeInstancesResponse struct {\n\tCommonResponse\n\tPaginationResult\n\tInstances struct {\n\t\tInstance []InstanceAttributesType\n\t}\n}\n\n\/\/ DescribeInstances describes instances\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&describeinstances\nfunc (client *Client) DescribeInstances(args *DescribeInstancesArgs) (instances []InstanceAttributesType, pagination *PaginationResult, err error) {\n\targs.validate()\n\tresponse := DescribeInstancesResponse{}\n\n\terr = client.Invoke(\"DescribeInstances\", args, &response)\n\n\tif err == nil {\n\t\treturn response.Instances.Instance, &response.PaginationResult, nil\n\t}\n\n\treturn nil, nil, err\n}\n\ntype DeleteInstanceArgs struct {\n\tInstanceId string\n}\n\ntype DeleteInstanceResponse struct {\n\tCommonResponse\n}\n\n\/\/ DeleteInstance deletes instance\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&deleteinstance\nfunc (client *Client) DeleteInstance(instanceId string) error {\n\targs := DeleteInstanceArgs{InstanceId: instanceId}\n\tresponse := DeleteInstanceResponse{}\n\terr := client.Invoke(\"DeleteInstance\", &args, &response)\n\treturn err\n}\n\ntype DataDiskType struct {\n\tSize int\n\tCategory DiskCategory \/\/Enum cloud, ephemeral, ephemeral_ssd\n\tSnapshotId string\n\tDiskName string\n\tDescription string\n\tDevice string\n\tDeleteWithInstance bool\n}\n\ntype SystemDiskType struct {\n\tCategory DiskCategory \/\/Enum cloud, ephemeral, ephemeral_ssd\n\tDiskName string\n\tDescription string\n}\n\ntype CreateInstanceArgs struct {\n\tRegionId Region\n\tZoneId string\n\tImageId string\n\tInstanceType string\n\tSecurityGroupId string\n\tInstanceName string\n\tDescription string\n\tInternetChargeType InternetChargeType\n\tInternetMaxBandwidthIn int\n\tInternetMaxBandwidthOut int\n\tHostName string\n\tPassword string\n\tSystemDisk SystemDiskType\n\tDataDisk []DataDiskType\n\tVSwitchId string\n\tPrivateIpAddress string\n\tClientToken string\n}\n\ntype CreateInstanceResponse struct {\n\tCommonResponse\n\tInstanceId string\n}\n\n\/\/ CreateInstance creates instance\n\/\/\n\/\/ You can read doc at http:\/\/docs.aliyun.com\/#\/pub\/ecs\/open-api\/instance&createinstance\nfunc (client *Client) CreateInstance(args *CreateInstanceArgs) (instanceId string, err error) {\n\tresponse := CreateInstanceResponse{}\n\terr = client.Invoke(\"CreateInstance\", args, &response)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn response.InstanceId, err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Copyright 2012 The Gorilla Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage endpoints\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\/\/ Mainly for debug logging\n\t\"io\/ioutil\"\n\n\t\"google.golang.org\/appengine\/log\"\n)\n\n\/\/ Server serves registered RPC services using registered codecs.\ntype Server struct {\n\troot string\n\tservices *serviceMap\n\n\t\/\/ ContextDecorator will be called as the last step of the creation of a new context.\n\t\/\/ If nil the context will not be decorated.\n\tContextDecorator func(context.Context) (context.Context, error)\n}\n\n\/\/ NewServer returns a new RPC server.\nfunc NewServer(root string) *Server {\n\tif root == \"\" {\n\t\troot = \"\/_ah\/spi\/\"\n\t} else if root[len(root)-1] != '\/' {\n\t\troot += \"\/\"\n\t}\n\n\tserver := &Server{root: root, services: new(serviceMap)}\n\tbackend := newBackendService(server)\n\tserver.services.register(backend, \"BackendService\", \"\", \"\", true, true)\n\treturn server\n}\n\n\/\/ RegisterService adds a new service to the server.\n\/\/\n\/\/ The name parameter is optional: if empty it will be inferred from\n\/\/ the receiver type name.\n\/\/\n\/\/ Methods from the receiver will be extracted if these rules are satisfied:\n\/\/\n\/\/ - The receiver is exported (begins with an upper case letter) or local\n\/\/ (defined in the package registering the service).\n\/\/ - The method name is exported.\n\/\/ - The method has either 2 arguments and 2 return values:\n\/\/ *http.Request|Context, *arg => *reply, error\n\/\/ or 3 arguments and 1 return value:\n\/\/ *http.Request|Context, *arg, *reply => error\n\/\/ - The first argument is either *http.Request or Context.\n\/\/ - Second argument (*arg) and *reply are exported or local.\n\/\/ - First argument, *arg and *reply are all pointers.\n\/\/ - First (or second, if method has 2 arguments) return value is of type error.\n\/\/\n\/\/ All other methods are ignored.\nfunc (s *Server) RegisterService(srv interface{}, name, ver, desc string, isDefault bool) (*RPCService, error) {\n\treturn s.services.register(srv, name, ver, desc, isDefault, false)\n}\n\n\/\/ RegisterServiceWithDefaults will register provided service and will try to\n\/\/ infer Endpoints config params from its method names and types.\n\/\/ See RegisterService for details.\nfunc (s *Server) RegisterServiceWithDefaults(srv interface{}) (*RPCService, error) {\n\treturn s.RegisterService(srv, \"\", \"\", \"\", true)\n}\n\n\/\/ Must is a helper that wraps a call to a function returning (*Template, error) and\n\/\/ panics if the error is non-nil. It is intended for use in variable initializations\n\/\/ such as:\n\/\/ \tvar s = endpoints.Must(endpoints.RegisterService(s, \"Service\", \"v1\", \"some service\", true))\n\/\/\nfunc Must(s *RPCService, err error) *RPCService {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn s\n}\n\n\/\/ ServiceByName returns a registered service or nil if there's no service\n\/\/ registered by that name.\nfunc (s *Server) ServiceByName(serviceName string) *RPCService {\n\treturn s.services.serviceByName(serviceName)\n}\n\n\/\/ HandleHTTP adds Server s to specified http.ServeMux.\n\/\/ If no mux is provided http.DefaultServeMux will be used.\nfunc (s *Server) HandleHTTP(mux *http.ServeMux) {\n\tif mux == nil {\n\t\tmux = http.DefaultServeMux\n\t}\n\tmux.Handle(s.root, s)\n}\n\n\/\/ ServeHTTP is Server's implementation of http.Handler interface.\nfunc (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tc := NewContext(r)\n\tif s.ContextDecorator != nil {\n\t\tctx, err := s.ContextDecorator(c)\n\t\tif err != nil {\n\t\t\twriteError(w, err)\n\t\t\treturn\n\t\t}\n\t\tc = ctx\n\t}\n\n\t\/\/ Always respond with JSON, even when an error occurs.\n\t\/\/ Note: API server doesn't expect an encoding in Content-Type header.\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\n\tif r.Method != \"POST\" {\n\t\terr := fmt.Errorf(\"rpc: POST method required, got %q\", r.Method)\n\t\twriteError(w, err)\n\t\treturn\n\t}\n\n\t\/\/ methodName has \"ServiceName.MethodName\" format.\n\tvar methodName string\n\tidx := strings.LastIndex(r.URL.Path, \"\/\")\n\tif idx < 0 {\n\t\twriteError(w, fmt.Errorf(\"rpc: no method in path %q\", r.URL.Path))\n\t\treturn\n\t}\n\tmethodName = r.URL.Path[idx+1:]\n\n\t\/\/ Get service method specs\n\tserviceSpec, methodSpec, err := s.services.get(methodName)\n\tif err != nil {\n\t\twriteError(w, err)\n\t\treturn\n\t}\n\n\t\/\/ Initialize RPC method request\n\treqValue := reflect.New(methodSpec.ReqType)\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tr.Body.Close()\n\tif err != nil {\n\t\twriteError(w, err)\n\t\treturn\n\t}\n\tlog.Debugf(c, \"SPI request body: %s\", body)\n\n\t\/\/ if err := json.NewDecoder(r.Body).Decode(req.Interface()); err != nil {\n\t\/\/ \twriteError(w, fmt.Errorf(\"Error while decoding JSON: %q\", err))\n\t\/\/ \treturn\n\t\/\/ }\n\tif err := json.Unmarshal(body, reqValue.Interface()); err != nil {\n\t\twriteError(w, err)\n\t\treturn\n\t}\n\n\tif err := validateRequest(reqValue.Interface()); err != nil {\n\t\twriteError(w, err)\n\t\treturn\n\t}\n\n\t\/\/ Restore the body in the original request.\n\tr.Body = ioutil.NopCloser(bytes.NewReader(body))\n\n\tnumIn, numOut := methodSpec.method.Type.NumIn(), methodSpec.method.Type.NumOut()\n\t\/\/ Construct arguments for the method call\n\tvar httpReqOrCtx interface{} = r\n\tif methodSpec.wantsContext {\n\t\thttpReqOrCtx = c\n\t}\n\targs := []reflect.Value{serviceSpec.rcvr, reflect.ValueOf(httpReqOrCtx)}\n\tif numIn > 2 {\n\t\targs = append(args, reqValue)\n\t}\n\n\tvar respValue reflect.Value\n\tif numIn > 3 {\n\t\trespValue = reflect.New(methodSpec.RespType)\n\t\targs = append(args, respValue)\n\t}\n\n\t\/\/ Invoke the service method\n\tvar errValue reflect.Value\n\tres := methodSpec.method.Func.Call(args)\n\tif numOut == 2 {\n\t\trespValue = res[0]\n\t\terrValue = res[1]\n\t} else {\n\t\terrValue = res[0]\n\t}\n\n\t\/\/ Check if method returned an error\n\tif err := errValue.Interface(); err != nil {\n\t\twriteError(w, err.(error))\n\t\treturn\n\t}\n\n\t\/\/ Encode non-error response\n\tif numIn == 4 || numOut == 2 {\n\t\tif err := json.NewEncoder(w).Encode(respValue.Interface()); err != nil {\n\t\t\twriteError(w, err)\n\t\t}\n\t}\n}\n\n\/\/ DefaultServer is the default RPC server, so you don't have to explicitly\n\/\/ create one.\nvar DefaultServer *Server\n\n\/\/ RegisterService registers a service using DefaultServer.\n\/\/ See Server.RegisterService for details.\nfunc RegisterService(srv interface{}, name, ver, desc string, isDefault bool) (\n\t*RPCService, error) {\n\n\treturn DefaultServer.RegisterService(srv, name, ver, desc, isDefault)\n}\n\n\/\/ RegisterServiceWithDefaults registers a service using DefaultServer.\n\/\/ See Server.RegisterServiceWithDefaults for details.\nfunc RegisterServiceWithDefaults(srv interface{}) (*RPCService, error) {\n\treturn DefaultServer.RegisterServiceWithDefaults(srv)\n}\n\n\/\/ HandleHTTP calls DefaultServer's HandleHTTP method using default serve mux.\nfunc HandleHTTP() {\n\tDefaultServer.HandleHTTP(nil)\n}\n\n\/\/ TODO: var DefaultServer = NewServer(\"\") won't work so it's in the init()\n\/\/ function for now.\nfunc init() {\n\tDefaultServer = NewServer(\"\")\n}\n<commit_msg>always set content-type header before writing errors<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Copyright 2012 The Gorilla Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage endpoints\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\/\/ Mainly for debug logging\n\t\"io\/ioutil\"\n\n\t\"google.golang.org\/appengine\/log\"\n)\n\n\/\/ Server serves registered RPC services using registered codecs.\ntype Server struct {\n\troot string\n\tservices *serviceMap\n\n\t\/\/ ContextDecorator will be called as the last step of the creation of a new context.\n\t\/\/ If nil the context will not be decorated.\n\tContextDecorator func(context.Context) (context.Context, error)\n}\n\n\/\/ NewServer returns a new RPC server.\nfunc NewServer(root string) *Server {\n\tif root == \"\" {\n\t\troot = \"\/_ah\/spi\/\"\n\t} else if root[len(root)-1] != '\/' {\n\t\troot += \"\/\"\n\t}\n\n\tserver := &Server{root: root, services: new(serviceMap)}\n\tbackend := newBackendService(server)\n\tserver.services.register(backend, \"BackendService\", \"\", \"\", true, true)\n\treturn server\n}\n\n\/\/ RegisterService adds a new service to the server.\n\/\/\n\/\/ The name parameter is optional: if empty it will be inferred from\n\/\/ the receiver type name.\n\/\/\n\/\/ Methods from the receiver will be extracted if these rules are satisfied:\n\/\/\n\/\/ - The receiver is exported (begins with an upper case letter) or local\n\/\/ (defined in the package registering the service).\n\/\/ - The method name is exported.\n\/\/ - The method has either 2 arguments and 2 return values:\n\/\/ *http.Request|Context, *arg => *reply, error\n\/\/ or 3 arguments and 1 return value:\n\/\/ *http.Request|Context, *arg, *reply => error\n\/\/ - The first argument is either *http.Request or Context.\n\/\/ - Second argument (*arg) and *reply are exported or local.\n\/\/ - First argument, *arg and *reply are all pointers.\n\/\/ - First (or second, if method has 2 arguments) return value is of type error.\n\/\/\n\/\/ All other methods are ignored.\nfunc (s *Server) RegisterService(srv interface{}, name, ver, desc string, isDefault bool) (*RPCService, error) {\n\treturn s.services.register(srv, name, ver, desc, isDefault, false)\n}\n\n\/\/ RegisterServiceWithDefaults will register provided service and will try to\n\/\/ infer Endpoints config params from its method names and types.\n\/\/ See RegisterService for details.\nfunc (s *Server) RegisterServiceWithDefaults(srv interface{}) (*RPCService, error) {\n\treturn s.RegisterService(srv, \"\", \"\", \"\", true)\n}\n\n\/\/ Must is a helper that wraps a call to a function returning (*Template, error) and\n\/\/ panics if the error is non-nil. It is intended for use in variable initializations\n\/\/ such as:\n\/\/ \tvar s = endpoints.Must(endpoints.RegisterService(s, \"Service\", \"v1\", \"some service\", true))\n\/\/\nfunc Must(s *RPCService, err error) *RPCService {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn s\n}\n\n\/\/ ServiceByName returns a registered service or nil if there's no service\n\/\/ registered by that name.\nfunc (s *Server) ServiceByName(serviceName string) *RPCService {\n\treturn s.services.serviceByName(serviceName)\n}\n\n\/\/ HandleHTTP adds Server s to specified http.ServeMux.\n\/\/ If no mux is provided http.DefaultServeMux will be used.\nfunc (s *Server) HandleHTTP(mux *http.ServeMux) {\n\tif mux == nil {\n\t\tmux = http.DefaultServeMux\n\t}\n\tmux.Handle(s.root, s)\n}\n\n\/\/ ServeHTTP is Server's implementation of http.Handler interface.\nfunc (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Always respond with JSON, even when an error occurs.\n\t\/\/ Note: API server doesn't expect an encoding in Content-Type header.\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\n\tc := NewContext(r)\n\tif s.ContextDecorator != nil {\n\t\tctx, err := s.ContextDecorator(c)\n\t\tif err != nil {\n\t\t\twriteError(w, err)\n\t\t\treturn\n\t\t}\n\t\tc = ctx\n\t}\n\n\tif r.Method != \"POST\" {\n\t\terr := fmt.Errorf(\"rpc: POST method required, got %q\", r.Method)\n\t\twriteError(w, err)\n\t\treturn\n\t}\n\n\t\/\/ methodName has \"ServiceName.MethodName\" format.\n\tvar methodName string\n\tidx := strings.LastIndex(r.URL.Path, \"\/\")\n\tif idx < 0 {\n\t\twriteError(w, fmt.Errorf(\"rpc: no method in path %q\", r.URL.Path))\n\t\treturn\n\t}\n\tmethodName = r.URL.Path[idx+1:]\n\n\t\/\/ Get service method specs\n\tserviceSpec, methodSpec, err := s.services.get(methodName)\n\tif err != nil {\n\t\twriteError(w, err)\n\t\treturn\n\t}\n\n\t\/\/ Initialize RPC method request\n\treqValue := reflect.New(methodSpec.ReqType)\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tr.Body.Close()\n\tif err != nil {\n\t\twriteError(w, err)\n\t\treturn\n\t}\n\tlog.Debugf(c, \"SPI request body: %s\", body)\n\n\t\/\/ if err := json.NewDecoder(r.Body).Decode(req.Interface()); err != nil {\n\t\/\/ \twriteError(w, fmt.Errorf(\"Error while decoding JSON: %q\", err))\n\t\/\/ \treturn\n\t\/\/ }\n\tif err := json.Unmarshal(body, reqValue.Interface()); err != nil {\n\t\twriteError(w, err)\n\t\treturn\n\t}\n\n\tif err := validateRequest(reqValue.Interface()); err != nil {\n\t\twriteError(w, err)\n\t\treturn\n\t}\n\n\t\/\/ Restore the body in the original request.\n\tr.Body = ioutil.NopCloser(bytes.NewReader(body))\n\n\tnumIn, numOut := methodSpec.method.Type.NumIn(), methodSpec.method.Type.NumOut()\n\t\/\/ Construct arguments for the method call\n\tvar httpReqOrCtx interface{} = r\n\tif methodSpec.wantsContext {\n\t\thttpReqOrCtx = c\n\t}\n\targs := []reflect.Value{serviceSpec.rcvr, reflect.ValueOf(httpReqOrCtx)}\n\tif numIn > 2 {\n\t\targs = append(args, reqValue)\n\t}\n\n\tvar respValue reflect.Value\n\tif numIn > 3 {\n\t\trespValue = reflect.New(methodSpec.RespType)\n\t\targs = append(args, respValue)\n\t}\n\n\t\/\/ Invoke the service method\n\tvar errValue reflect.Value\n\tres := methodSpec.method.Func.Call(args)\n\tif numOut == 2 {\n\t\trespValue = res[0]\n\t\terrValue = res[1]\n\t} else {\n\t\terrValue = res[0]\n\t}\n\n\t\/\/ Check if method returned an error\n\tif err := errValue.Interface(); err != nil {\n\t\twriteError(w, err.(error))\n\t\treturn\n\t}\n\n\t\/\/ Encode non-error response\n\tif numIn == 4 || numOut == 2 {\n\t\tif err := json.NewEncoder(w).Encode(respValue.Interface()); err != nil {\n\t\t\twriteError(w, err)\n\t\t}\n\t}\n}\n\n\/\/ DefaultServer is the default RPC server, so you don't have to explicitly\n\/\/ create one.\nvar DefaultServer *Server\n\n\/\/ RegisterService registers a service using DefaultServer.\n\/\/ See Server.RegisterService for details.\nfunc RegisterService(srv interface{}, name, ver, desc string, isDefault bool) (\n\t*RPCService, error) {\n\n\treturn DefaultServer.RegisterService(srv, name, ver, desc, isDefault)\n}\n\n\/\/ RegisterServiceWithDefaults registers a service using DefaultServer.\n\/\/ See Server.RegisterServiceWithDefaults for details.\nfunc RegisterServiceWithDefaults(srv interface{}) (*RPCService, error) {\n\treturn DefaultServer.RegisterServiceWithDefaults(srv)\n}\n\n\/\/ HandleHTTP calls DefaultServer's HandleHTTP method using default serve mux.\nfunc HandleHTTP() {\n\tDefaultServer.HandleHTTP(nil)\n}\n\n\/\/ TODO: var DefaultServer = NewServer(\"\") won't work so it's in the init()\n\/\/ function for now.\nfunc init() {\n\tDefaultServer = NewServer(\"\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2022 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage migrations\n\nimport (\n\t\"crypto\/elliptic\"\n\t\"encoding\/base32\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"code.gitea.io\/gitea\/modules\/timeutil\"\n\n\t\"github.com\/tstranex\/u2f\"\n\t\"xorm.io\/xorm\"\n\t\"xorm.io\/xorm\/schemas\"\n)\n\n\/\/ v208 migration was completely broken\nfunc remigrateU2FCredentials(x *xorm.Engine) error {\n\t\/\/ Create webauthnCredential table\n\ttype webauthnCredential struct {\n\t\tID int64 `xorm:\"pk autoincr\"`\n\t\tName string\n\t\tLowerName string `xorm:\"unique(s)\"`\n\t\tUserID int64 `xorm:\"INDEX unique(s)\"`\n\t\tCredentialID string `xorm:\"INDEX VARCHAR(410)\"` \/\/ CredentalID in U2F is at most 255bytes \/ 5 * 8 = 408 - add a few extra characters for safety\n\t\tPublicKey []byte\n\t\tAttestationType string\n\t\tAAGUID []byte\n\t\tSignCount uint32 `xorm:\"BIGINT\"`\n\t\tCloneWarning bool\n\t\tCreatedUnix timeutil.TimeStamp `xorm:\"INDEX created\"`\n\t\tUpdatedUnix timeutil.TimeStamp `xorm:\"INDEX updated\"`\n\t}\n\tif err := x.Sync2(&webauthnCredential{}); err != nil {\n\t\treturn err\n\t}\n\n\tswitch x.Dialect().URI().DBType {\n\tcase schemas.MYSQL:\n\t\t_, err := x.Exec(\"ALTER TABLE webauthn_credential MODIFY COLUMN credential_id VARCHAR(410)\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase schemas.ORACLE:\n\t\t_, err := x.Exec(\"ALTER TABLE webauthn_credential MODIFY credential_id VARCHAR(410)\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase schemas.MSSQL:\n\t\t\/\/ This column has an index on it. I could write all of the code to attempt to change the index OR\n\t\t\/\/ I could just use recreate table.\n\t\tsess := x.NewSession()\n\t\tif err := sess.Begin(); err != nil {\n\t\t\t_ = sess.Close()\n\t\t\treturn err\n\t\t}\n\n\t\tif err := recreateTable(sess, new(webauthnCredential)); err != nil {\n\t\t\t_ = sess.Close()\n\t\t\treturn err\n\t\t}\n\t\tif err := sess.Commit(); err != nil {\n\t\t\t_ = sess.Close()\n\t\t\treturn err\n\t\t}\n\t\tif err := sess.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase schemas.POSTGRES:\n\t\t_, err := x.Exec(\"ALTER TABLE webauthn_credential ALTER COLUMN credential_id TYPE VARCHAR(410)\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\t\/\/ SQLite doesn't support ALTER COLUMN, and it already makes String _TEXT_ by default so no migration needed\n\t\t\/\/ nor is there any need to re-migrate\n\t}\n\n\texist, err := x.IsTableExist(\"u2f_registration\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !exist {\n\t\treturn nil\n\t}\n\n\t\/\/ Now migrate the old u2f registrations to the new format\n\ttype u2fRegistration struct {\n\t\tID int64 `xorm:\"pk autoincr\"`\n\t\tName string\n\t\tUserID int64 `xorm:\"INDEX\"`\n\t\tRaw []byte\n\t\tCounter uint32 `xorm:\"BIGINT\"`\n\t\tCreatedUnix timeutil.TimeStamp `xorm:\"INDEX created\"`\n\t\tUpdatedUnix timeutil.TimeStamp `xorm:\"INDEX updated\"`\n\t}\n\n\tvar start int\n\tregs := make([]*u2fRegistration, 0, 50)\n\tfor {\n\t\terr := x.OrderBy(\"id\").Limit(50, start).Find(®s)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = func() error {\n\t\t\tsess := x.NewSession()\n\t\t\tdefer sess.Close()\n\t\t\tif err := sess.Begin(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to allow start session. Error: %w\", err)\n\t\t\t}\n\t\t\tif x.Dialect().URI().DBType == schemas.MSSQL {\n\t\t\t\tif _, err := sess.Exec(\"SET IDENTITY_INSERT `webauthn_credential` ON\"); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to allow identity insert on webauthn_credential. Error: %w\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, reg := range regs {\n\t\t\t\tparsed := new(u2f.Registration)\n\t\t\t\terr = parsed.UnmarshalBinary(reg.Raw)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tremigrated := &webauthnCredential{\n\t\t\t\t\tID: reg.ID,\n\t\t\t\t\tName: reg.Name,\n\t\t\t\t\tLowerName: strings.ToLower(reg.Name),\n\t\t\t\t\tUserID: reg.UserID,\n\t\t\t\t\tCredentialID: base32.HexEncoding.EncodeToString(parsed.KeyHandle),\n\t\t\t\t\tPublicKey: elliptic.Marshal(elliptic.P256(), parsed.PubKey.X, parsed.PubKey.Y),\n\t\t\t\t\tAttestationType: \"fido-u2f\",\n\t\t\t\t\tAAGUID: []byte{},\n\t\t\t\t\tSignCount: reg.Counter,\n\t\t\t\t\tUpdatedUnix: reg.UpdatedUnix,\n\t\t\t\t\tCreatedUnix: reg.CreatedUnix,\n\t\t\t\t}\n\n\t\t\t\thas, err := sess.ID(reg.ID).Where(\"id = ?\", reg.ID).Get(new(webauthnCredential))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to get webauthn_credential[%d]. Error: %w\", reg.ID, err)\n\t\t\t\t}\n\t\t\t\tif !has {\n\t\t\t\t\t_, err = sess.Insert(remigrated)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"unable to (re)insert webauthn_credential[%d]. Error: %w\", reg.ID, err)\n\t\t\t\t\t}\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t_, err = sess.ID(remigrated.ID).AllCols().Update(remigrated)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to update webauthn_credential[%d]. Error: %w\", reg.ID, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn sess.Commit()\n\t\t}()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(regs) < 50 {\n\t\t\tbreak\n\t\t}\n\t\tstart += 50\n\t\tregs = regs[:0]\n\t}\n\n\treturn nil\n}\n<commit_msg>Fix migration v210 (#18892)<commit_after>\/\/ Copyright 2022 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage migrations\n\nimport (\n\t\"crypto\/elliptic\"\n\t\"encoding\/base32\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"code.gitea.io\/gitea\/modules\/timeutil\"\n\n\t\"github.com\/tstranex\/u2f\"\n\t\"xorm.io\/xorm\"\n\t\"xorm.io\/xorm\/schemas\"\n)\n\n\/\/ v208 migration was completely broken\nfunc remigrateU2FCredentials(x *xorm.Engine) error {\n\t\/\/ Create webauthnCredential table\n\ttype webauthnCredential struct {\n\t\tID int64 `xorm:\"pk autoincr\"`\n\t\tName string\n\t\tLowerName string `xorm:\"unique(s)\"`\n\t\tUserID int64 `xorm:\"INDEX unique(s)\"`\n\t\tCredentialID string `xorm:\"INDEX VARCHAR(410)\"` \/\/ CredentalID in U2F is at most 255bytes \/ 5 * 8 = 408 - add a few extra characters for safety\n\t\tPublicKey []byte\n\t\tAttestationType string\n\t\tAAGUID []byte\n\t\tSignCount uint32 `xorm:\"BIGINT\"`\n\t\tCloneWarning bool\n\t\tCreatedUnix timeutil.TimeStamp `xorm:\"INDEX created\"`\n\t\tUpdatedUnix timeutil.TimeStamp `xorm:\"INDEX updated\"`\n\t}\n\tif err := x.Sync2(&webauthnCredential{}); err != nil {\n\t\treturn err\n\t}\n\n\tswitch x.Dialect().URI().DBType {\n\tcase schemas.MYSQL:\n\t\t_, err := x.Exec(\"ALTER TABLE webauthn_credential MODIFY COLUMN credential_id VARCHAR(410)\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase schemas.ORACLE:\n\t\t_, err := x.Exec(\"ALTER TABLE webauthn_credential MODIFY credential_id VARCHAR(410)\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase schemas.MSSQL:\n\t\t\/\/ This column has an index on it. I could write all of the code to attempt to change the index OR\n\t\t\/\/ I could just use recreate table.\n\t\tsess := x.NewSession()\n\t\tif err := sess.Begin(); err != nil {\n\t\t\t_ = sess.Close()\n\t\t\treturn err\n\t\t}\n\n\t\tif err := recreateTable(sess, new(webauthnCredential)); err != nil {\n\t\t\t_ = sess.Close()\n\t\t\treturn err\n\t\t}\n\t\tif err := sess.Commit(); err != nil {\n\t\t\t_ = sess.Close()\n\t\t\treturn err\n\t\t}\n\t\tif err := sess.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase schemas.POSTGRES:\n\t\t_, err := x.Exec(\"ALTER TABLE webauthn_credential ALTER COLUMN credential_id TYPE VARCHAR(410)\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\t\/\/ SQLite doesn't support ALTER COLUMN, and it already makes String _TEXT_ by default so no migration needed\n\t\t\/\/ nor is there any need to re-migrate\n\t}\n\n\texist, err := x.IsTableExist(\"u2f_registration\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !exist {\n\t\treturn nil\n\t}\n\n\t\/\/ Now migrate the old u2f registrations to the new format\n\ttype u2fRegistration struct {\n\t\tID int64 `xorm:\"pk autoincr\"`\n\t\tName string\n\t\tUserID int64 `xorm:\"INDEX\"`\n\t\tRaw []byte\n\t\tCounter uint32 `xorm:\"BIGINT\"`\n\t\tCreatedUnix timeutil.TimeStamp `xorm:\"INDEX created\"`\n\t\tUpdatedUnix timeutil.TimeStamp `xorm:\"INDEX updated\"`\n\t}\n\n\tvar start int\n\tregs := make([]*u2fRegistration, 0, 50)\n\tfor {\n\t\terr := x.OrderBy(\"id\").Limit(50, start).Find(®s)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = func() error {\n\t\t\tsess := x.NewSession()\n\t\t\tdefer sess.Close()\n\t\t\tif err := sess.Begin(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to allow start session. Error: %w\", err)\n\t\t\t}\n\t\t\tif x.Dialect().URI().DBType == schemas.MSSQL {\n\t\t\t\tif _, err := sess.Exec(\"SET IDENTITY_INSERT `webauthn_credential` ON\"); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to allow identity insert on webauthn_credential. Error: %w\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, reg := range regs {\n\t\t\t\tparsed := new(u2f.Registration)\n\t\t\t\terr = parsed.UnmarshalBinary(reg.Raw)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tremigrated := &webauthnCredential{\n\t\t\t\t\tID: reg.ID,\n\t\t\t\t\tName: reg.Name,\n\t\t\t\t\tLowerName: strings.ToLower(reg.Name),\n\t\t\t\t\tUserID: reg.UserID,\n\t\t\t\t\tCredentialID: base32.HexEncoding.EncodeToString(parsed.KeyHandle),\n\t\t\t\t\tPublicKey: elliptic.Marshal(elliptic.P256(), parsed.PubKey.X, parsed.PubKey.Y),\n\t\t\t\t\tAttestationType: \"fido-u2f\",\n\t\t\t\t\tAAGUID: []byte{},\n\t\t\t\t\tSignCount: reg.Counter,\n\t\t\t\t\tUpdatedUnix: reg.UpdatedUnix,\n\t\t\t\t\tCreatedUnix: reg.CreatedUnix,\n\t\t\t\t}\n\n\t\t\t\thas, err := sess.ID(reg.ID).Get(new(webauthnCredential))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to get webauthn_credential[%d]. Error: %w\", reg.ID, err)\n\t\t\t\t}\n\t\t\t\tif !has {\n\t\t\t\t\thas, err := sess.Where(\"`lower_name`=?\", remigrated.LowerName).And(\"`user_id`=?\", remigrated.UserID).Exist(new(webauthnCredential))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"unable to check webauthn_credential[lower_name: %s, user_id:%v]. Error: %w\", remigrated.LowerName, remigrated.UserID, err)\n\t\t\t\t\t}\n\t\t\t\t\tif !has {\n\t\t\t\t\t\t_, err = sess.Insert(remigrated)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"unable to (re)insert webauthn_credential[%d]. Error: %w\", reg.ID, err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t_, err = sess.ID(remigrated.ID).AllCols().Update(remigrated)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to update webauthn_credential[%d]. Error: %w\", reg.ID, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn sess.Commit()\n\t\t}()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(regs) < 50 {\n\t\t\tbreak\n\t\t}\n\t\tstart += 50\n\t\tregs = regs[:0]\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !windows\n\n\/\/ Functions shared between linux\/darwin.\npackage allocdir\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/user\"\n\t\"strconv\"\n\t\"syscall\"\n)\n\nfunc (d *AllocDir) linkOrCopy(src, dst string, perm os.FileMode) error {\n\t\/\/ Attempt to hardlink.\n\tif err := os.Link(src, dst); err == nil {\n\t\treturn nil\n\t}\n\n\treturn fileCopy(src, dst, perm)\n}\n\nfunc (d *AllocDir) dropDirPermissions(path string) error {\n\t\/\/ Can't do anything if not root.\n\tif syscall.Geteuid() != 0 {\n\t\treturn nil\n\t}\n\n\tu, err := user.Lookup(\"nobody\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tuid, err := getUid(u)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgid, err := getGid(u)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.Chown(path, uid, gid); err != nil {\n\t\treturn fmt.Errorf(\"Couldn't change owner\/group of %v to (uid: %v, gid: %v): %v\", path, uid, gid, err)\n\t}\n\n\tif err := os.Chmod(path, 0777); err != nil {\n\t\treturn fmt.Errorf(\"Chmod(%v) failed: %v\", path, err)\n\t}\n\n\treturn nil\n}\n\nfunc getUid(u *user.User) (int, error) {\n\tuid, err := strconv.Atoi(u.Uid)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"Unable to convert Uid to an int: %v\", err)\n\t}\n\n\treturn uid, nil\n}\n\nfunc getGid(u *user.User) (int, error) {\n\tgid, err := strconv.Atoi(u.Gid)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"Unable to convert Gid to an int: %v\", err)\n\t}\n\n\treturn gid, nil\n}\n<commit_msg>Remove CGO dependency for user lookup in allocdir<commit_after>\/\/ +build !windows\n\n\/\/ Functions shared between linux\/darwin.\npackage allocdir\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/user\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nfunc (d *AllocDir) linkOrCopy(src, dst string, perm os.FileMode) error {\n\t\/\/ Attempt to hardlink.\n\tif err := os.Link(src, dst); err == nil {\n\t\treturn nil\n\t}\n\n\treturn fileCopy(src, dst, perm)\n}\n\nfunc (d *AllocDir) dropDirPermissions(path string) error {\n\t\/\/ Can't do anything if not root.\n\tif syscall.Geteuid() != 0 {\n\t\treturn nil\n\t}\n\n\tu, err := userLookup(\"nobody\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tuid, err := getUid(u)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgid, err := getGid(u)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.Chown(path, uid, gid); err != nil {\n\t\treturn fmt.Errorf(\"Couldn't change owner\/group of %v to (uid: %v, gid: %v): %v\", path, uid, gid, err)\n\t}\n\n\tif err := os.Chmod(path, 0777); err != nil {\n\t\treturn fmt.Errorf(\"Chmod(%v) failed: %v\", path, err)\n\t}\n\n\treturn nil\n}\n\nfunc getUid(u *user.User) (int, error) {\n\tuid, err := strconv.Atoi(u.Uid)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"Unable to convert Uid to an int: %v\", err)\n\t}\n\n\treturn uid, nil\n}\n\nfunc getGid(u *user.User) (int, error) {\n\tgid, err := strconv.Atoi(u.Gid)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"Unable to convert Gid to an int: %v\", err)\n\t}\n\n\treturn gid, nil\n}\n\n\/\/ userLookup checks if the given username or uid is present in \/etc\/passwd\n\/\/ and returns the user struct.\n\/\/ If the username is not found, an error is returned.\n\/\/ Credit to @creak, https:\/\/github.com\/docker\/docker\/pull\/1096\nfunc userLookup(uid string) (*user.User, error) {\n\tfile, err := ioutil.ReadFile(\"\/etc\/passwd\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, line := range strings.Split(string(file), \"\\n\") {\n\t\tdata := strings.Split(line, \":\")\n\t\tif len(data) > 5 && (data[0] == uid || data[2] == uid) {\n\t\t\treturn &user.User{\n\t\t\t\tUid: data[2],\n\t\t\t\tGid: data[3],\n\t\t\t\tUsername: data[0],\n\t\t\t\tName: data[4],\n\t\t\t\tHomeDir: data[5],\n\t\t\t}, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"User not found in \/etc\/passwd\")\n}\n<|endoftext|>"} {"text":"<commit_before>package core\n\n\/\/ Arguments represents a structured set of arguments passed to a predicate.\ntype Arguments struct {\n\tpositionals []*Thunk\n\texpandedList *Thunk\n\tkeywords []KeywordArgument\n\texpandedDicts []*Thunk\n}\n\n\/\/ NewArguments creates a new Arguments.\nfunc NewArguments(\n\tps []PositionalArgument,\n\tks []KeywordArgument,\n\tds []*Thunk) Arguments {\n\tts := make([]*Thunk, 0, len(ps))\n\tl := (*Thunk)(nil)\n\n\tfor i, p := range ps {\n\t\tif p.expanded {\n\t\t\tl = mergeRestPositionalArgs(ps[i].value, ps[i+1:]...)\n\t\t\tbreak\n\t\t}\n\n\t\tts = append(ts, p.value)\n\t}\n\n\treturn Arguments{ts, l, ks, ds}\n}\n\nfunc mergeRestPositionalArgs(t *Thunk, ps ...PositionalArgument) *Thunk {\n\tfor _, p := range ps {\n\t\tif p.expanded {\n\t\t\tt = PApp(Merge, t, p.value)\n\t\t} else {\n\t\t\tt = PApp(\n\t\t\t\tNewLazyFunction(appendFuncSignature, appendFunc), \/\/ Avoid initialization loop\n\t\t\t\tt, p.value)\n\t\t}\n\t}\n\n\treturn t\n}\n\nfunc (args *Arguments) nextPositional() *Thunk {\n\tif len(args.positionals) != 0 {\n\t\tdefer func() { args.positionals = args.positionals[1:] }()\n\t\treturn args.positionals[0]\n\t}\n\n\tif args.expandedList == nil {\n\t\treturn nil\n\t}\n\n\tl := args.expandedList\n\targs.expandedList = PApp(Rest, l)\n\treturn PApp(First, l)\n}\n\nfunc (args *Arguments) restPositionals() *Thunk {\n\tps := args.positionals\n\tl := args.expandedList\n\targs.positionals = nil\n\targs.expandedList = nil\n\n\tif l == nil {\n\t\treturn NewList(ps...)\n\t}\n\n\treturn PApp(Merge, NewList(ps...), l)\n}\n\nfunc (args *Arguments) searchKeyword(s string) *Thunk {\n\tfor i, k := range args.keywords {\n\t\tif s == k.name {\n\t\t\targs.keywords = append(args.keywords[:i], args.keywords[i+1:]...)\n\t\t\treturn k.value\n\t\t}\n\t}\n\n\tfor i, t := range args.expandedDicts {\n\t\tv := t.Eval()\n\t\td, ok := v.(DictionaryType)\n\n\t\tif !ok {\n\t\t\treturn NotDictionaryError(v)\n\t\t}\n\n\t\tk := StringType(s)\n\n\t\tif v, ok := d.Search(k); ok {\n\t\t\tnew := make([]*Thunk, len(args.expandedDicts))\n\t\t\tcopy(new, args.expandedDicts)\n\t\t\tnew[i] = Normal(d.Remove(k))\n\t\t\targs.expandedDicts = new\n\t\t\treturn v\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (args *Arguments) restKeywords() *Thunk {\n\tdefer func() {\n\t\targs.keywords = nil\n\t\targs.expandedDicts = nil\n\t}()\n\n\tt := EmptyDictionary\n\n\tfor _, k := range args.keywords {\n\t\tt = PApp(Insert, t, NewString(k.name), k.value)\n\t}\n\n\tfor _, tt := range args.expandedDicts {\n\t\tt = PApp(Merge, t, tt)\n\t}\n\n\treturn t\n}\n\n\/\/ Merge merges 2 sets of arguments into one.\nfunc (args Arguments) Merge(merged Arguments) Arguments {\n\tvar new Arguments\n\n\tif new.expandedList == nil {\n\t\tnew.positionals = append(args.positionals, merged.positionals...)\n\t\tnew.expandedList = merged.expandedList\n\t} else {\n\t\tnew.positionals = args.positionals\n\t\tnew.expandedList = PApp(\n\t\t\tAppend,\n\t\t\tappend([]*Thunk{args.expandedList}, merged.positionals...)...)\n\n\t\tif merged.expandedList != nil {\n\t\t\tnew.expandedList = PApp(Merge, new.expandedList, merged.expandedList)\n\t\t}\n\t}\n\n\tnew.keywords = append(args.keywords, merged.keywords...)\n\tnew.expandedDicts = append(args.expandedDicts, merged.expandedDicts...)\n\n\treturn new\n}\n\nfunc (args Arguments) empty() *Thunk {\n\tif args.positionals != nil && len(args.positionals) > 0 {\n\t\treturn argumentError(\"%d positional arguments are left\", len(args.positionals))\n\t}\n\n\t\/\/ Testing args.expandedList is impossible because we cannot know its length\n\t\/\/ without evaluating it.\n\n\tn := 0\n\n\tif args.expandedDicts != nil {\n\t\tfor _, t := range args.expandedDicts {\n\t\t\tv := t.Eval()\n\t\t\td, ok := v.(DictionaryType)\n\n\t\t\tif !ok {\n\t\t\t\treturn NotDictionaryError(v)\n\t\t\t}\n\n\t\t\tn += d.Size()\n\t\t}\n\t}\n\n\tif n != 0 || args.keywords != nil && len(args.keywords) > 0 {\n\t\treturn argumentError(\"%d keyword arguments are left\", len(args.keywords)+n)\n\t}\n\n\treturn nil\n}\n<commit_msg>Refactor arguments.go<commit_after>package core\n\n\/\/ Arguments represents a structured set of arguments passed to a predicate.\ntype Arguments struct {\n\tpositionals []*Thunk\n\texpandedList *Thunk\n\tkeywords []KeywordArgument\n\texpandedDicts []*Thunk\n}\n\n\/\/ NewArguments creates a new Arguments.\nfunc NewArguments(\n\tps []PositionalArgument,\n\tks []KeywordArgument,\n\tds []*Thunk) Arguments {\n\tts := make([]*Thunk, 0, len(ps))\n\tl := (*Thunk)(nil)\n\n\tfor i, p := range ps {\n\t\tif p.expanded {\n\t\t\tl = mergeRestPositionalArgs(ps[i].value, ps[i+1:]...)\n\t\t\tbreak\n\t\t}\n\n\t\tts = append(ts, p.value)\n\t}\n\n\treturn Arguments{ts, l, ks, ds}\n}\n\nfunc mergeRestPositionalArgs(t *Thunk, ps ...PositionalArgument) *Thunk {\n\tfor _, p := range ps {\n\t\tif p.expanded {\n\t\t\tt = PApp(Merge, t, p.value)\n\t\t} else {\n\t\t\tt = PApp(\n\t\t\t\tNewLazyFunction(appendFuncSignature, appendFunc), \/\/ Avoid initialization loop\n\t\t\t\tt, p.value)\n\t\t}\n\t}\n\n\treturn t\n}\n\nfunc (args *Arguments) nextPositional() *Thunk {\n\tif len(args.positionals) != 0 {\n\t\tt := args.positionals[0]\n\t\targs.positionals = args.positionals[1:]\n\t\treturn t\n\t}\n\n\tif args.expandedList == nil {\n\t\treturn nil\n\t}\n\n\tl := args.expandedList\n\targs.expandedList = PApp(Rest, l)\n\treturn PApp(First, l)\n}\n\nfunc (args *Arguments) restPositionals() *Thunk {\n\tts := args.positionals\n\tl := args.expandedList\n\targs.positionals = nil\n\targs.expandedList = nil\n\n\tif l == nil {\n\t\treturn NewList(ts...)\n\t}\n\n\treturn PApp(Merge, NewList(ts...), l)\n}\n\nfunc (args *Arguments) searchKeyword(s string) *Thunk {\n\tfor i, k := range args.keywords {\n\t\tif s == k.name {\n\t\t\targs.keywords = append(args.keywords[:i], args.keywords[i+1:]...)\n\t\t\treturn k.value\n\t\t}\n\t}\n\n\tfor i, t := range args.expandedDicts {\n\t\tv := t.Eval()\n\t\td, ok := v.(DictionaryType)\n\n\t\tif !ok {\n\t\t\treturn NotDictionaryError(v)\n\t\t}\n\n\t\tk := StringType(s)\n\n\t\tif v, ok := d.Search(k); ok {\n\t\t\tds := make([]*Thunk, len(args.expandedDicts))\n\t\t\tcopy(ds, args.expandedDicts)\n\t\t\tds[i] = Normal(d.Remove(k))\n\t\t\targs.expandedDicts = ds\n\t\t\treturn v\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (args *Arguments) restKeywords() *Thunk {\n\tks := args.keywords\n\tds := args.expandedDicts\n\targs.keywords = nil\n\targs.expandedDicts = nil\n\n\tt := EmptyDictionary\n\n\tfor _, k := range ks {\n\t\tt = PApp(Insert, t, NewString(k.name), k.value)\n\t}\n\n\tfor _, d := range ds {\n\t\tt = PApp(Merge, t, d)\n\t}\n\n\treturn t\n}\n\n\/\/ Merge merges 2 sets of arguments into one.\nfunc (args Arguments) Merge(old Arguments) Arguments {\n\tvar new Arguments\n\n\tif new.expandedList == nil {\n\t\tnew.positionals = append(args.positionals, old.positionals...)\n\t\tnew.expandedList = old.expandedList\n\t} else {\n\t\tnew.positionals = args.positionals\n\t\tnew.expandedList = PApp(\n\t\t\tAppend,\n\t\t\tappend([]*Thunk{args.expandedList}, old.positionals...)...)\n\n\t\tif old.expandedList != nil {\n\t\t\tnew.expandedList = PApp(Merge, new.expandedList, old.expandedList)\n\t\t}\n\t}\n\n\tnew.keywords = append(args.keywords, old.keywords...)\n\tnew.expandedDicts = append(args.expandedDicts, old.expandedDicts...)\n\n\treturn new\n}\n\nfunc (args Arguments) empty() *Thunk {\n\tif len(args.positionals) > 0 {\n\t\treturn argumentError(\"%d positional arguments are left\", len(args.positionals))\n\t}\n\n\t\/\/ Testing args.expandedList is impossible because we cannot know its length\n\t\/\/ without evaluating it.\n\n\tn := 0\n\n\tfor _, t := range args.expandedDicts {\n\t\tv := t.Eval()\n\t\td, ok := v.(DictionaryType)\n\n\t\tif !ok {\n\t\t\treturn NotDictionaryError(v)\n\t\t}\n\n\t\tn += d.Size()\n\t}\n\n\tif n != 0 || args.keywords != nil && len(args.keywords) > 0 {\n\t\treturn argumentError(\"%d keyword arguments are left\", len(args.keywords)+n)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport \"testing\"\n\ntype fakeConfig struct {\n\tFieldOne string `servconf:\"field_one,required\"`\n\tFieldTwo string `servconf:\"field_two\"`\n}\n\nfunc TestServiceConfig(t *testing.T) {\n\tvar confValidSC1 ServiceConfig\n\tvar confValidSC2 ServiceConfig\n\tvar confInvalidSC1 ServiceConfig\n\tconfValidSC1 = map[string]interface{}{\n\t\t\"field_one\": \"test1\",\n\t\t\"field_two\": \"test2\",\n\t}\n\tconfValidSC2 = map[string]interface{}{\n\t\t\"field_one\": \"test1\",\n\t}\n\tconfInvalidSC1 = map[string]interface{}{\n\t\t\"field_two\": \"test\",\n\t}\n\n\t\/\/ TODO add other config structs with invalid tags and test it\n\tt.Run(\"AssignValidSC1\", func(t *testing.T) {\n\t\tcfg := fakeConfig{}\n\t\terr := confValidSC1.Assign(&cfg)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Assign returned an unexpected error: %s\", err)\n\t\t}\n\t\tif cfg.FieldOne != \"test1\" {\n\t\t\tt.Errorf(\"FieldOne value %q is not \\\"test1\\\"\", cfg.FieldOne)\n\t\t}\n\t\tif cfg.FieldTwo != \"test2\" {\n\t\t\tt.Errorf(\"FieldTwo value %q is not \\\"test2\\\"\", cfg.FieldTwo)\n\t\t}\n\t})\n\tt.Run(\"AssignValidSC2\", func(t *testing.T) {\n\t\tcfg := fakeConfig{}\n\t\terr := confValidSC2.Assign(&cfg)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Assign returned an unexpected error: %s\", err)\n\t\t}\n\t\tif cfg.FieldOne != \"test1\" {\n\t\t\tt.Errorf(\"FieldOne value %q is not \\\"test1\\\"\", cfg.FieldOne)\n\t\t}\n\t\tif cfg.FieldTwo != \"\" {\n\t\t\tt.Errorf(\"FieldTwo value %q is not \\\"\\\"\", cfg.FieldTwo)\n\t\t}\n\t})\n\tt.Run(\"AssignInvalidSC1\", func(t *testing.T) {\n\t\tcfg := fakeConfig{}\n\t\terr := confInvalidSC1.Assign(&cfg)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Assign did not return an error with invalid input\")\n\t\t\treturn\n\t\t}\n\t\tswitch err.(type) {\n\t\tcase *InvalidTagError:\n\t\t\tif err.(*InvalidTagError).NameTag == \"field_one\" {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tt.Errorf(\"Assign returned an unexpected error: %s\", err)\n\t})\n}\n<commit_msg>Adjust ServiceConfig test to have a clearer error<commit_after>package config\n\nimport \"testing\"\n\ntype fakeConfig struct {\n\tFieldOne string `servconf:\"field_one,required\"`\n\tFieldTwo string `servconf:\"field_two\"`\n}\n\nfunc TestServiceConfig(t *testing.T) {\n\tvar confValidSC1 ServiceConfig\n\tvar confValidSC2 ServiceConfig\n\tvar confInvalidSC1 ServiceConfig\n\tconfValidSC1 = map[string]interface{}{\n\t\t\"field_one\": \"test1\",\n\t\t\"field_two\": \"test2\",\n\t}\n\tconfValidSC2 = map[string]interface{}{\n\t\t\"field_one\": \"test1\",\n\t}\n\tconfInvalidSC1 = map[string]interface{}{\n\t\t\"field_two\": \"test\",\n\t}\n\n\t\/\/ TODO add other config structs with invalid tags and test it\n\tt.Run(\"AssignValidSC1\", func(t *testing.T) {\n\t\tcfg := fakeConfig{}\n\t\terr := confValidSC1.Assign(&cfg)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Assign returned an unexpected error: %s\", err)\n\t\t}\n\t\tif cfg.FieldOne != \"test1\" {\n\t\t\tt.Errorf(\"FieldOne value %q is not \\\"test1\\\"\", cfg.FieldOne)\n\t\t}\n\t\tif cfg.FieldTwo != \"test2\" {\n\t\t\tt.Errorf(\"FieldTwo value %q is not \\\"test2\\\"\", cfg.FieldTwo)\n\t\t}\n\t})\n\tt.Run(\"AssignValidSC2\", func(t *testing.T) {\n\t\tcfg := fakeConfig{}\n\t\terr := confValidSC2.Assign(&cfg)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Assign returned an unexpected error: %s\", err)\n\t\t}\n\t\tif cfg.FieldOne != \"test1\" {\n\t\t\tt.Errorf(\"FieldOne value %q is not \\\"test1\\\"\", cfg.FieldOne)\n\t\t}\n\t\tif cfg.FieldTwo != \"\" {\n\t\t\tt.Errorf(\"FieldTwo value %q is not \\\"\\\"\", cfg.FieldTwo)\n\t\t}\n\t})\n\tt.Run(\"AssignInvalidSC1\", func(t *testing.T) {\n\t\tcfg := fakeConfig{}\n\t\terr := confInvalidSC1.Assign(&cfg)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Assign did not return an error when missing a required field\")\n\t\t\treturn\n\t\t}\n\t\tswitch err.(type) {\n\t\tcase *InvalidTagError:\n\t\t\tif err.(*InvalidTagError).NameTag == \"field_one\" {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tt.Errorf(\"Assign returned an unexpected error: %s\", err)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package schema\n\nimport (\n\t\"database\/sql\/driver\"\n\t\"fmt\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/araddon\/qlbridge\/expr\"\n\t\"github.com\/araddon\/qlbridge\/rel\"\n\t\"github.com\/araddon\/qlbridge\/value\"\n)\n\nvar (\n\tErrNotFound = fmt.Errorf(\"Not Found\")\n\tErrNotImplemented = fmt.Errorf(\"Not Implemented\")\n)\n\ntype (\n\t\/\/ Source is factory registered to create connections to a backing dastasource.\n\t\/\/ Datasources are most likely a database, file, api, in-mem data etc.\n\t\/\/ It is thread-safe, singleton, responsible for creating connections and\n\t\/\/ exposing schema and performing ddl operations. It also exposes partition information\n\t\/\/ optionally if a distributed source, or able to be distributed queries.\n\t\/\/\n\t\/\/ DDL\/Schema Operations\n\t\/\/ - schema discovery, tables, columns etc\n\t\/\/ - create\n\t\/\/ - index\n\t\/\/\n\t\/\/ Lifecycle:\n\t\/\/ - Init()\n\t\/\/ - Setup()\n\t\/\/ - running ....\n\t\/\/ - Close()\n\tSource interface {\n\t\t\/\/ Init provides opportunity for those sources that require\n\t\t\/\/ no configuration and sniff schema from their environment time\n\t\t\/\/ to load pre-schema discovery\n\t\tInit()\n\t\t\/\/ Setup A Datasource optional interface for getting the SourceSchema injected\n\t\t\/\/ during creation\/starup. Since the Source is a singleton, stateful manager\n\t\t\/\/ it has a startup\/shutdown process.\n\t\tSetup(*SchemaSource) error\n\t\t\/\/ Close\n\t\tClose() error\n\t\t\/\/ Open create a connection (not thread safe) to this source\n\t\tOpen(source string) (Conn, error)\n\t\t\/\/ List of tables provided by this source\n\t\tTables() []string\n\t\t\/\/ provides table schema info\n\t\tTable(table string) (*Table, error)\n\t\t\/\/ Create\/Alter TODO\n\t}\n\t\/\/ SourcePartitionable DataSource that is partitionable into ranges for splitting\n\t\/\/ reads, writes onto different nodes.\n\tSourcePartitionable interface {\n\t\t\/\/ Many databases's already have internal Partitions, allow those to\n\t\t\/\/ be exposed for use in our partitioning\n\t\tPartitions() []*Partition\n\t\tPartitionSource(p *Partition) (Conn, error)\n\t}\n\t\/\/ SourceTableColumn is a partial source that just provides access to\n\t\/\/ Column schema info, used in Generators.\n\tSourceTableColumn interface {\n\t\t\/\/ Underlying data type of column\n\t\tColumn(col string) (value.ValueType, bool)\n\t}\n)\n\ntype (\n\t\/\/ Conn A Connection\/Session to a file, api, backend database. Provides DML operations.\n\t\/\/\n\t\/\/ Minimum Read Features to provide Sql Select:\n\t\/\/ - Scanning: iterate through messages\/rows\n\t\/\/ - Schema Tables: at a minium list of tables available, the column level data\n\t\/\/ can be introspected so is optional\n\t\/\/\n\t\/\/ Planning:\n\t\/\/ - CreateMutator(ctx *plan.Context) : execute a mutation task insert, delete, update\n\t\/\/\n\t\/\/\n\t\/\/ Non Select based Sql DML Operations for Mutator:\n\t\/\/ - Deletion: (sql delete)\n\t\/\/ Delete()\n\t\/\/ DeleteExpression()\n\t\/\/ - Upsert Interface (sql Update, Upsert, Insert)\n\t\/\/ Put()\n\t\/\/ PutMulti()\n\t\/\/\n\t\/\/ DataSource Connection Session that is Stateful. this is\n\t\/\/ really a generic interface, will actually implement features\n\t\/\/ below: SchemaColumns, Scanner, Seeker, Mutator\n\tConn interface {\n\t\tClose() error\n\t}\n\t\/\/ ConnAll interface\n\tConnAll interface {\n\t\tClose() error\n\t\tConnColumns\n\t\tIterator\n\t\tConnSeeker\n\t\tConnUpsert\n\t\tConnDeletion\n\t}\n\t\/\/ ConnColumns Interface for a data source connection exposing column positions for []driver.Value iteration\n\tConnColumns interface {\n\t\t\/\/Conn\n\t\tColumns() []string\n\t}\n\t\/\/ ConnScanner is the most basic of data sources, just iterate through\n\t\/\/ rows without any optimizations. Key-Value store like csv, redis, cassandra.\n\tConnScanner interface {\n\t\tConn\n\t\tIterator\n\t}\n\t\/\/ ConnScannerIterator Another advanced iterator, probably deprecate?\n\tConnScannerIterator interface {\n\t\t\/\/Conn\n\t\t\/\/ create a new iterator to scan through row by row\n\t\tCreateIterator() Iterator\n\t\tMesgChan() <-chan Message\n\t}\n\t\/\/ ConnSeeker is a datsource that is Key-Value store, allows relational\n\t\/\/ implementation to be faster for Seeking row values instead of scanning\n\tConnSeeker interface {\n\t\t\/\/Conn\n\t\t\/\/ Just because we have Get, Multi-Get, doesn't mean we can seek all\n\t\t\/\/ expressions, find out with CanSeek for given expression\n\t\tCanSeek(*rel.SqlSelect) bool\n\t\tGet(key driver.Value) (Message, error)\n\t\tMultiGet(keys []driver.Value) ([]Message, error)\n\t}\n\t\/\/ ConnMutation creates a Mutator connection similar to Open() connection for select\n\t\/\/ - accepts the plan context used in this upsert\/insert\/update\n\t\/\/ - returns a connection which must be closed\n\tConnMutation interface {\n\t\tCreateMutator(pc interface{} \/*plan.Context*\/) (ConnMutator, error)\n\t}\n\t\/\/ ConnMutator Mutator Connection\n\tConnMutator interface {\n\t\tConnUpsert\n\t\tConnDeletion\n\t}\n\t\/\/ ConnUpsert Mutation interface for Put\n\t\/\/ - assumes datasource understands key(s?)\n\tConnUpsert interface {\n\t\tPut(ctx context.Context, key Key, value interface{}) (Key, error)\n\t\tPutMulti(ctx context.Context, keys []Key, src interface{}) ([]Key, error)\n\t}\n\t\/\/ ConnPatchWhere pass through where expression to underlying datasource\n\t\/\/ Used for update statements WHERE x = y\n\tConnPatchWhere interface {\n\t\tPatchWhere(ctx context.Context, where expr.Node, patch interface{}) (int64, error)\n\t}\n\t\/\/ ConnDeletion deletion interface for data sources\n\tConnDeletion interface {\n\t\t\/\/ Delete using this key\n\t\tDelete(driver.Value) (int, error)\n\t\t\/\/ Delete with given expression\n\t\tDeleteExpression(p interface{} \/* plan.Delete *\/, n expr.Node) (int, error)\n\t}\n)\n<commit_msg>adding missing interface back in (#169)<commit_after>package schema\n\nimport (\n\t\"database\/sql\/driver\"\n\t\"fmt\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/araddon\/qlbridge\/expr\"\n\t\"github.com\/araddon\/qlbridge\/rel\"\n\t\"github.com\/araddon\/qlbridge\/value\"\n)\n\nvar (\n\tErrNotFound = fmt.Errorf(\"Not Found\")\n\tErrNotImplemented = fmt.Errorf(\"Not Implemented\")\n)\n\ntype (\n\t\/\/ Source is factory registered to create connections to a backing dastasource.\n\t\/\/ Datasources are most likely a database, file, api, in-mem data etc.\n\t\/\/ It is thread-safe, singleton, responsible for creating connections and\n\t\/\/ exposing schema and performing ddl operations. It also exposes partition information\n\t\/\/ optionally if a distributed source, or able to be distributed queries.\n\t\/\/\n\t\/\/ DDL\/Schema Operations\n\t\/\/ - schema discovery, tables, columns etc\n\t\/\/ - create\n\t\/\/ - index\n\t\/\/\n\t\/\/ Lifecycle:\n\t\/\/ - Init()\n\t\/\/ - Setup()\n\t\/\/ - running ....\n\t\/\/ - Close()\n\tSource interface {\n\t\t\/\/ Init provides opportunity for those sources that require\n\t\t\/\/ no configuration and sniff schema from their environment time\n\t\t\/\/ to load pre-schema discovery\n\t\tInit()\n\t\t\/\/ Setup A Datasource optional interface for getting the SourceSchema injected\n\t\t\/\/ during creation\/starup. Since the Source is a singleton, stateful manager\n\t\t\/\/ it has a startup\/shutdown process.\n\t\tSetup(*SchemaSource) error\n\t\t\/\/ Close\n\t\tClose() error\n\t\t\/\/ Open create a connection (not thread safe) to this source\n\t\tOpen(source string) (Conn, error)\n\t\t\/\/ List of tables provided by this source\n\t\tTables() []string\n\t\t\/\/ provides table schema info\n\t\tTable(table string) (*Table, error)\n\t\t\/\/ Create\/Alter TODO\n\t}\n\t\/\/ SourceTableSchema Partial interface for just Table()\n\tSourceTableSchema interface {\n\t\tTable(table string) (*Table, error)\n\t}\n\t\/\/ SourcePartitionable DataSource that is partitionable into ranges for splitting\n\t\/\/ reads, writes onto different nodes.\n\tSourcePartitionable interface {\n\t\t\/\/ Many databases's already have internal Partitions, allow those to\n\t\t\/\/ be exposed for use in our partitioning\n\t\tPartitions() []*Partition\n\t\tPartitionSource(p *Partition) (Conn, error)\n\t}\n\t\/\/ SourceTableColumn is a partial source that just provides access to\n\t\/\/ Column schema info, used in Generators.\n\tSourceTableColumn interface {\n\t\t\/\/ Underlying data type of column\n\t\tColumn(col string) (value.ValueType, bool)\n\t}\n)\n\ntype (\n\t\/\/ Conn A Connection\/Session to a file, api, backend database. Provides DML operations.\n\t\/\/\n\t\/\/ Minimum Read Features to provide Sql Select:\n\t\/\/ - Scanning: iterate through messages\/rows\n\t\/\/ - Schema Tables: at a minium list of tables available, the column level data\n\t\/\/ can be introspected so is optional\n\t\/\/\n\t\/\/ Planning:\n\t\/\/ - CreateMutator(ctx *plan.Context) : execute a mutation task insert, delete, update\n\t\/\/\n\t\/\/\n\t\/\/ Non Select based Sql DML Operations for Mutator:\n\t\/\/ - Deletion: (sql delete)\n\t\/\/ Delete()\n\t\/\/ DeleteExpression()\n\t\/\/ - Upsert Interface (sql Update, Upsert, Insert)\n\t\/\/ Put()\n\t\/\/ PutMulti()\n\t\/\/\n\t\/\/ DataSource Connection Session that is Stateful. this is\n\t\/\/ really a generic interface, will actually implement features\n\t\/\/ below: SchemaColumns, Scanner, Seeker, Mutator\n\tConn interface {\n\t\tClose() error\n\t}\n\t\/\/ ConnAll interface\n\tConnAll interface {\n\t\tClose() error\n\t\tConnColumns\n\t\tIterator\n\t\tConnSeeker\n\t\tConnUpsert\n\t\tConnDeletion\n\t}\n\t\/\/ ConnColumns Interface for a data source connection exposing column positions for []driver.Value iteration\n\tConnColumns interface {\n\t\t\/\/Conn\n\t\tColumns() []string\n\t}\n\t\/\/ ConnScanner is the most basic of data sources, just iterate through\n\t\/\/ rows without any optimizations. Key-Value store like csv, redis, cassandra.\n\tConnScanner interface {\n\t\tConn\n\t\tIterator\n\t}\n\t\/\/ ConnScannerIterator Another advanced iterator, probably deprecate?\n\tConnScannerIterator interface {\n\t\t\/\/Conn\n\t\t\/\/ create a new iterator to scan through row by row\n\t\tCreateIterator() Iterator\n\t\tMesgChan() <-chan Message\n\t}\n\t\/\/ ConnSeeker is a datsource that is Key-Value store, allows relational\n\t\/\/ implementation to be faster for Seeking row values instead of scanning\n\tConnSeeker interface {\n\t\t\/\/Conn\n\t\t\/\/ Just because we have Get, Multi-Get, doesn't mean we can seek all\n\t\t\/\/ expressions, find out with CanSeek for given expression\n\t\tCanSeek(*rel.SqlSelect) bool\n\t\tGet(key driver.Value) (Message, error)\n\t\tMultiGet(keys []driver.Value) ([]Message, error)\n\t}\n\t\/\/ ConnMutation creates a Mutator connection similar to Open() connection for select\n\t\/\/ - accepts the plan context used in this upsert\/insert\/update\n\t\/\/ - returns a connection which must be closed\n\tConnMutation interface {\n\t\tCreateMutator(pc interface{} \/*plan.Context*\/) (ConnMutator, error)\n\t}\n\t\/\/ ConnMutator Mutator Connection\n\tConnMutator interface {\n\t\tConnUpsert\n\t\tConnDeletion\n\t}\n\t\/\/ ConnUpsert Mutation interface for Put\n\t\/\/ - assumes datasource understands key(s?)\n\tConnUpsert interface {\n\t\tPut(ctx context.Context, key Key, value interface{}) (Key, error)\n\t\tPutMulti(ctx context.Context, keys []Key, src interface{}) ([]Key, error)\n\t}\n\t\/\/ ConnPatchWhere pass through where expression to underlying datasource\n\t\/\/ Used for update statements WHERE x = y\n\tConnPatchWhere interface {\n\t\tPatchWhere(ctx context.Context, where expr.Node, patch interface{}) (int64, error)\n\t}\n\t\/\/ ConnDeletion deletion interface for data sources\n\tConnDeletion interface {\n\t\t\/\/ Delete using this key\n\t\tDelete(driver.Value) (int, error)\n\t\t\/\/ Delete with given expression\n\t\tDeleteExpression(p interface{} \/* plan.Delete *\/, n expr.Node) (int, error)\n\t}\n)\n<|endoftext|>"} {"text":"<commit_before>package syslog\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n\n\t\"github.com\/segmentio\/ecs-logs\/lib\"\n)\n\nconst (\n\tDefaultTemplate = \"<{{.PRIVAL}}>{{.TIMESTAMP}} {{.GROUP}}[{{.STREAM}}]: {{.MSG}}\"\n)\n\ntype WriterConfig struct {\n\tNetwork string\n\tAddress string\n\tTemplate string\n\tTimeFormat string\n\tTLS *tls.Config\n}\n\nfunc NewWriter(group string, stream string) (w ecslogs.Writer, err error) {\n\tvar c WriterConfig\n\tvar s string\n\tvar u *url.URL\n\n\tif s = os.Getenv(\"SYSLOG_URL\"); len(s) != 0 {\n\t\tif u, err = url.Parse(s); err != nil {\n\t\t\terr = fmt.Errorf(\"invalid syslog URL: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tc.Network = u.Scheme\n\t\tc.Address = u.Host\n\t}\n\n\tc.Template = os.Getenv(\"SYSLOG_TEMPLATE\")\n\tc.TimeFormat = os.Getenv(\"SYSLOG_TIME_FORMAT\")\n\n\treturn DialWriter(c)\n}\n\nfunc DialWriter(config WriterConfig) (w ecslogs.Writer, err error) {\n\tvar netopts []string\n\tvar addropts []string\n\tvar backend io.Writer\n\n\tif len(config.Address) != 0 {\n\t\tif len(config.Network) != 0 {\n\t\t\tnetopts = []string{config.Network}\n\t\t} else {\n\t\t\t\/\/ When starting with a '\/' we assume it's gonna be a file path,\n\t\t\t\/\/ otherwise we fallback to trying a TLS connection so we don't\n\t\t\t\/\/ implicitly send logs over an unsecured link.\n\t\t\tif strings.HasPrefix(config.Address, \"\/\") {\n\t\t\t\tnetopts = []string{\"unixgram\", \"unix\"}\n\t\t\t} else {\n\t\t\t\tnetopts = []string{\"tls\"}\n\t\t\t}\n\t\t}\n\t\taddropts = []string{config.Address}\n\t} else {\n\t\t\/\/ This was copied from the standard log\/syslog package, they do the same\n\t\t\/\/ and try to guess at runtime where and what kind of socket syslogd is\n\t\t\/\/ using.\n\t\tnetopts = []string{\"unixgram\", \"unix\"}\n\t\taddropts = []string{\"\/dev\/log\", \"\/var\/run\/syslog\", \"\/var\/run\/log\"}\n\t}\n\nconnect:\n\tfor _, n := range netopts {\n\t\tfor _, a := range addropts {\n\t\t\tif backend, err = dialWriter(n, a, config.TLS); err == nil {\n\t\t\t\tbreak connect\n\t\t\t}\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tw = newWriter(writerConfig{\n\t\tbackend: backend,\n\t\ttemplate: config.Template,\n\t\ttimeFormat: config.TimeFormat,\n\t})\n\treturn\n}\n\ntype writerConfig struct {\n\tbackend io.Writer\n\ttemplate string\n\ttimeFormat string\n}\n\nfunc newWriter(config writerConfig) *writer {\n\tvar out func(*writer, message) error\n\tvar flush func() error\n\n\tif len(config.timeFormat) == 0 {\n\t\tconfig.timeFormat = time.Stamp\n\t}\n\n\tif len(config.template) == 0 {\n\t\tconfig.template = DefaultTemplate\n\t}\n\n\tswitch b := config.backend.(type) {\n\tcase bufferedWriter:\n\t\tout, flush = (*writer).directWrite, b.Flush\n\tdefault:\n\t\tout, flush = (*writer).bufferedWrite, func() error { return nil }\n\t}\n\n\treturn &writer{\n\t\twriterConfig: config,\n\t\tflush: flush,\n\t\tout: out,\n\t\ttpl: newWriterTemplate(config.template),\n\t}\n}\n\nfunc newWriterTemplate(format string) *template.Template {\n\tif !strings.HasSuffix(format, \"\\n\") {\n\t\tformat += \"\\n\"\n\t}\n\tt := template.New(\"syslog\")\n\ttemplate.Must(t.Parse(format))\n\treturn t\n}\n\ntype writer struct {\n\twriterConfig\n\tbuf bytes.Buffer\n\ttpl *template.Template\n\tout func(*writer, message) error\n\tflush func() error\n}\n\nfunc (w *writer) Close() (err error) {\n\tif c, ok := w.backend.(io.Closer); ok {\n\t\terr = c.Close()\n\t}\n\treturn\n}\n\nfunc (w *writer) WriteMessageBatch(batch []ecslogs.Message) (err error) {\n\tfor _, msg := range batch {\n\t\tif err = w.write(msg); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn w.flush()\n}\n\nfunc (w *writer) WriteMessage(msg ecslogs.Message) (err error) {\n\tif err = w.write(msg); err == nil {\n\t\terr = w.flush()\n\t}\n\treturn\n}\n\nfunc (w *writer) write(msg ecslogs.Message) (err error) {\n\tm := message{\n\t\tPRIVAL: int(msg.Level) + 8, \/\/ +8 is for user-level messages facility\n\t\tHOSTNAME: msg.Host,\n\t\tMSGID: msg.ID,\n\t\tGROUP: msg.Group,\n\t\tSTREAM: msg.Stream,\n\t\tMSG: msg.Content,\n\t\tTIMESTAMP: msg.Time.Format(w.timeFormat),\n\t}\n\n\tif len(m.HOSTNAME) == 0 {\n\t\tm.HOSTNAME = \"-\"\n\t}\n\n\tif len(m.MSGID) == 0 {\n\t\tm.MSGID = \"-\"\n\t}\n\n\tif msg.PID == 0 {\n\t\tm.PROCID = \"-\"\n\t} else {\n\t\tm.PROCID = strconv.Itoa(msg.PID)\n\t}\n\n\tif len(msg.File) != 0 || len(msg.Func) != 0 {\n\t\tm.SOURCE = fmt.Sprintf(\"%s:%s:%d\", msg.File, msg.Func, msg.Line)\n\t}\n\n\treturn w.out(w, m)\n}\n\nfunc (w *writer) directWrite(m message) (err error) {\n\treturn w.tpl.Execute(w.backend, m)\n}\n\nfunc (w *writer) bufferedWrite(m message) (err error) {\n\tw.buf.Reset()\n\tw.tpl.Execute(&w.buf, m)\n\t_, err = w.backend.Write(w.buf.Bytes())\n\treturn\n}\n\ntype message struct {\n\tPRIVAL int\n\tHOSTNAME string\n\tPROCID string\n\tMSGID string\n\tGROUP string\n\tSTREAM string\n\tMSG string\n\tSOURCE string\n\tTIMESTAMP string\n}\n\ntype bufferedWriter interface {\n\tFlush() error\n}\n\ntype bufferedConn struct {\n\tbuf *bufio.Writer\n\tconn net.Conn\n}\n\nfunc (c bufferedConn) Close() error { return c.conn.Close() }\nfunc (c bufferedConn) Flush() error { return c.buf.Flush() }\nfunc (c bufferedConn) Write(b []byte) (int, error) { return c.buf.Write(b) }\n\nfunc dialWriter(network string, address string, config *tls.Config) (w io.Writer, err error) {\n\tvar conn net.Conn\n\n\tif network == \"tls\" {\n\t\tconn, err = tls.Dial(\"tcp\", address, config)\n\t} else {\n\t\tconn, err = net.Dial(network, address)\n\t}\n\n\tif err == nil {\n\t\tswitch network {\n\t\tcase \"udp\", \"udp4\", \"udp6\", \"unixgram\", \"unixpacket\":\n\t\t\tw = conn\n\t\tdefault:\n\t\t\tw = bufferedConn{\n\t\t\t\tconn: conn,\n\t\t\t\tbuf: bufio.NewWriter(conn),\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n<commit_msg>better guess the syslog configuration<commit_after>package syslog\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n\n\t\"github.com\/segmentio\/ecs-logs\/lib\"\n)\n\nconst (\n\tDefaultTemplate = \"<{{.PRIVAL}}>{{.TIMESTAMP}} {{.GROUP}}[{{.STREAM}}]: {{.MSG}}\"\n)\n\ntype WriterConfig struct {\n\tNetwork string\n\tAddress string\n\tTemplate string\n\tTimeFormat string\n\tTLS *tls.Config\n}\n\nfunc NewWriter(group string, stream string) (w ecslogs.Writer, err error) {\n\tvar c WriterConfig\n\tvar s string\n\tvar u *url.URL\n\n\tif s = os.Getenv(\"SYSLOG_URL\"); len(s) != 0 {\n\t\tif u, err = url.Parse(s); err != nil {\n\t\t\terr = fmt.Errorf(\"invalid syslog URL: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tc.Network = u.Scheme\n\t\tc.Address = u.Host\n\t}\n\n\tc.Template = os.Getenv(\"SYSLOG_TEMPLATE\")\n\tc.TimeFormat = os.Getenv(\"SYSLOG_TIME_FORMAT\")\n\n\treturn DialWriter(c)\n}\n\nfunc DialWriter(config WriterConfig) (w ecslogs.Writer, err error) {\n\tvar netopts []string\n\tvar addropts []string\n\tvar backend io.Writer\n\n\tif len(config.Network) != 0 {\n\t\tnetopts = []string{config.Network}\n\t} else {\n\t\t\/\/ When starting with a '\/' we assume it's gonna be a file path,\n\t\t\/\/ otherwise we fallback to trying a TLS connection so we don't\n\t\t\/\/ implicitly send logs over an unsecured link.\n\t\tif len(config.Address) == 0 || strings.HasPrefix(config.Address, \"\/\") {\n\t\t\tconfig.Network = \"unix\"\n\t\t\tnetopts = []string{\"unixgram\", \"unix\"}\n\t\t} else {\n\t\t\tnetopts = []string{\"tls\"}\n\t\t}\n\t}\n\n\tif len(config.Address) != 0 {\n\t\taddropts = []string{config.Address}\n\t} else if strings.HasPrefix(config.Network, \"unix\") {\n\t\t\/\/ This was copied from the standard log\/syslog package, they do the same\n\t\t\/\/ and try to guess at runtime where and what kind of socket syslogd is\n\t\t\/\/ using.\n\t\taddropts = []string{\"\/dev\/log\", \"\/var\/run\/syslog\", \"\/var\/run\/log\"}\n\t} else {\n\t\t\/\/ The config doesn't point to a unix domain socket, falling back to trying\n\t\t\/\/ to connect to syslogd over a network interface.\n\t\taddropts = []string{\"localhost:514\"}\n\t}\n\nconnect:\n\tfor _, n := range netopts {\n\t\tfor _, a := range addropts {\n\t\t\tif backend, err = dialWriter(n, a, config.TLS); err == nil {\n\t\t\t\tbreak connect\n\t\t\t}\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tw = newWriter(writerConfig{\n\t\tbackend: backend,\n\t\ttemplate: config.Template,\n\t\ttimeFormat: config.TimeFormat,\n\t})\n\treturn\n}\n\ntype writerConfig struct {\n\tbackend io.Writer\n\ttemplate string\n\ttimeFormat string\n}\n\nfunc newWriter(config writerConfig) *writer {\n\tvar out func(*writer, message) error\n\tvar flush func() error\n\n\tif len(config.timeFormat) == 0 {\n\t\tconfig.timeFormat = time.Stamp\n\t}\n\n\tif len(config.template) == 0 {\n\t\tconfig.template = DefaultTemplate\n\t}\n\n\tswitch b := config.backend.(type) {\n\tcase bufferedWriter:\n\t\tout, flush = (*writer).directWrite, b.Flush\n\tdefault:\n\t\tout, flush = (*writer).bufferedWrite, func() error { return nil }\n\t}\n\n\treturn &writer{\n\t\twriterConfig: config,\n\t\tflush: flush,\n\t\tout: out,\n\t\ttpl: newWriterTemplate(config.template),\n\t}\n}\n\nfunc newWriterTemplate(format string) *template.Template {\n\tif !strings.HasSuffix(format, \"\\n\") {\n\t\tformat += \"\\n\"\n\t}\n\tt := template.New(\"syslog\")\n\ttemplate.Must(t.Parse(format))\n\treturn t\n}\n\ntype writer struct {\n\twriterConfig\n\tbuf bytes.Buffer\n\ttpl *template.Template\n\tout func(*writer, message) error\n\tflush func() error\n}\n\nfunc (w *writer) Close() (err error) {\n\tif c, ok := w.backend.(io.Closer); ok {\n\t\terr = c.Close()\n\t}\n\treturn\n}\n\nfunc (w *writer) WriteMessageBatch(batch []ecslogs.Message) (err error) {\n\tfor _, msg := range batch {\n\t\tif err = w.write(msg); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn w.flush()\n}\n\nfunc (w *writer) WriteMessage(msg ecslogs.Message) (err error) {\n\tif err = w.write(msg); err == nil {\n\t\terr = w.flush()\n\t}\n\treturn\n}\n\nfunc (w *writer) write(msg ecslogs.Message) (err error) {\n\tm := message{\n\t\tPRIVAL: int(msg.Level) + 8, \/\/ +8 is for user-level messages facility\n\t\tHOSTNAME: msg.Host,\n\t\tMSGID: msg.ID,\n\t\tGROUP: msg.Group,\n\t\tSTREAM: msg.Stream,\n\t\tMSG: msg.Content,\n\t\tTIMESTAMP: msg.Time.Format(w.timeFormat),\n\t}\n\n\tif len(m.HOSTNAME) == 0 {\n\t\tm.HOSTNAME = \"-\"\n\t}\n\n\tif len(m.MSGID) == 0 {\n\t\tm.MSGID = \"-\"\n\t}\n\n\tif msg.PID == 0 {\n\t\tm.PROCID = \"-\"\n\t} else {\n\t\tm.PROCID = strconv.Itoa(msg.PID)\n\t}\n\n\tif len(msg.File) != 0 || len(msg.Func) != 0 {\n\t\tm.SOURCE = fmt.Sprintf(\"%s:%s:%d\", msg.File, msg.Func, msg.Line)\n\t}\n\n\treturn w.out(w, m)\n}\n\nfunc (w *writer) directWrite(m message) (err error) {\n\treturn w.tpl.Execute(w.backend, m)\n}\n\nfunc (w *writer) bufferedWrite(m message) (err error) {\n\tw.buf.Reset()\n\tw.tpl.Execute(&w.buf, m)\n\t_, err = w.backend.Write(w.buf.Bytes())\n\treturn\n}\n\ntype message struct {\n\tPRIVAL int\n\tHOSTNAME string\n\tPROCID string\n\tMSGID string\n\tGROUP string\n\tSTREAM string\n\tMSG string\n\tSOURCE string\n\tTIMESTAMP string\n}\n\ntype bufferedWriter interface {\n\tFlush() error\n}\n\ntype bufferedConn struct {\n\tbuf *bufio.Writer\n\tconn net.Conn\n}\n\nfunc (c bufferedConn) Close() error { return c.conn.Close() }\nfunc (c bufferedConn) Flush() error { return c.buf.Flush() }\nfunc (c bufferedConn) Write(b []byte) (int, error) { return c.buf.Write(b) }\n\nfunc dialWriter(network string, address string, config *tls.Config) (w io.Writer, err error) {\n\tvar conn net.Conn\n\n\tif network == \"tls\" {\n\t\tconn, err = tls.Dial(\"tcp\", address, config)\n\t} else {\n\t\tconn, err = net.Dial(network, address)\n\t}\n\n\tif err == nil {\n\t\tswitch network {\n\t\tcase \"udp\", \"udp4\", \"udp6\", \"unixgram\", \"unixpacket\":\n\t\t\tw = conn\n\t\tdefault:\n\t\t\tw = bufferedConn{\n\t\t\t\tconn: conn,\n\t\t\t\tbuf: bufio.NewWriter(conn),\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package entry\n\nimport (\n\t\"bufio\"\n\t\"github.com\/ghthor\/gospec\"\n\t. \"github.com\/ghthor\/gospec\"\n\t\"github.com\/ghthor\/journal\/idea\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n)\n\nfunc init() {\n\tvar err error\n\tif _, err = os.Stat(\"_test\/\"); os.IsNotExist(err) {\n\t\terr = os.Mkdir(\"_test\/\", 0755)\n\t}\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc DescribeAnEntry(c gospec.Context) {\n\ttd, err := ioutil.TempDir(\"_test\", \"entry_\")\n\tc.Assume(err, IsNil)\n\n\tne := New(td)\n\n\tc.Specify(\"an entry\", func() {\n\t\tc.Specify(\"can be opened\", func() {\n\t\t\tc.Specify(\"at a specific time\", func() {\n\t\t\t\tt := time.Date(2006, time.January, 1, 1, 0, 0, 0, time.UTC)\n\t\t\t\toe, err := ne.Open(func() time.Time {\n\t\t\t\t\treturn t\n\t\t\t\t}, nil)\n\t\t\t\tc.Assume(err, IsNil)\n\t\t\t\tc.Expect(oe.OpenedAt(), Equals, t)\n\t\t\t})\n\n\t\t\tc.Specify(\"with a list of ideas\", func() {\n\t\t\t\tideas := []idea.Idea{{\n\t\t\t\t\tName: \"Active Idea\",\n\t\t\t\t\tStatus: idea.IS_Active,\n\t\t\t\t\tBody: \"Some text\\n\",\n\t\t\t\t}, {\n\t\t\t\t\tName: \"Another Idea\",\n\t\t\t\t\tStatus: idea.IS_Active,\n\t\t\t\t\tBody: \"Some other text\\n\",\n\t\t\t\t}}\n\n\t\t\t\toe, err := ne.Open(time.Now, ideas)\n\t\t\t\tc.Assume(err, IsNil)\n\t\t\t\tfor i, idea := range oe.Ideas() {\n\t\t\t\t\tc.Expect(idea, Equals, ideas[i])\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t\tt := time.Date(2006, time.January, 1, 1, 0, 0, 0, time.UTC)\n\n\t\tideas := []idea.Idea{{\n\t\t\tName: \"Active Idea\",\n\t\t\tStatus: idea.IS_Active,\n\t\t\tBody: \"Some text\\n\",\n\t\t}, {\n\t\t\tName: \"Another Idea\",\n\t\t\tStatus: idea.IS_Active,\n\t\t\tBody: \"Some other text\\n\",\n\t\t}}\n\n\t\tc.Specify(\"that is open\", func() {\n\t\t\toe, err := ne.Open(func() time.Time {\n\t\t\t\treturn t\n\t\t\t}, ideas)\n\t\t\tc.Assume(err, IsNil)\n\t\t\tc.Assume(oe.OpenedAt(), Equals, t)\n\n\t\t\tdefer func() {\n\t\t\t\t_, _, err := oe.Close()\n\t\t\t\tc.Assume(err, IsNil)\n\t\t\t\t\/\/ Verify that the *os.File was closed\n\t\t\t\tc.Expect(oe.(*openEntry).file.Close(), Not(IsNil))\n\t\t\t}()\n\n\t\t\tfilename := filepath.Join(td, t.Format(filenameLayout))\n\n\t\t\tc.Specify(\"is a file\", func() {\n\t\t\t\t_, err := os.Stat(filename)\n\t\t\t\tc.Expect(os.IsNotExist(err), IsFalse)\n\n\t\t\t\tactualBytes, err := ioutil.ReadFile(filename)\n\t\t\t\tc.Expect(err, IsNil)\n\t\t\t\tc.Expect(string(actualBytes), Equals,\n\t\t\t\t\t`Sun Jan 1 01:00:00 UTC 2006\n\n#~ Title(will be used as commit message)\nTODO Make this some random quote or something stupid\n\n## [active] Active Idea\nSome text\n\n## [active] Another Idea\nSome other text\n`)\n\t\t\t})\n\n\t\t\tc.Specify(\"will have the time opened as the first line of the entry\", func() {\n\t\t\t\tf, err := os.OpenFile(filename, os.O_RDONLY, 0600)\n\t\t\t\tc.Assume(err, IsNil)\n\t\t\t\tdefer f.Close()\n\n\t\t\t\tscanner := bufio.NewScanner(f)\n\t\t\t\tc.Assume(scanner.Scan(), IsTrue)\n\t\t\t\tc.Expect(scanner.Text(), Equals, t.Format(time.UnixDate))\n\t\t\t})\n\n\t\t\tc.Specify(\"will have a list of ideas appended to the entry\", func() {\n\t\t\t})\n\t\t\tc.Specify(\"can be editted by a text editor\", func() {\n\t\t\t})\n\t\t\tc.Specify(\"can be closed\", func() {\n\t\t\t})\n\t\t})\n\n\t\tc.Specify(\"that is closed\", func() {\n\t\t\tc.Specify(\"will have all ideas removed from the entry\", func() {\n\t\t\t})\n\t\t\tc.Specify(\"will have the time closed as the last line of the entry\", func() {\n\t\t\t})\n\t\t\tc.Specify(\"can be commited to the git repository\", func() {\n\t\t\t})\n\t\t})\n\t})\n}\n<commit_msg>Specified that the Ideas are appended to the entries file<commit_after>package entry\n\nimport (\n\t\"bufio\"\n\t\"github.com\/ghthor\/gospec\"\n\t. \"github.com\/ghthor\/gospec\"\n\t\"github.com\/ghthor\/journal\/idea\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n)\n\nfunc init() {\n\tvar err error\n\tif _, err = os.Stat(\"_test\/\"); os.IsNotExist(err) {\n\t\terr = os.Mkdir(\"_test\/\", 0755)\n\t}\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc DescribeAnEntry(c gospec.Context) {\n\ttd, err := ioutil.TempDir(\"_test\", \"entry_\")\n\tc.Assume(err, IsNil)\n\n\tne := New(td)\n\n\tc.Specify(\"an entry\", func() {\n\t\tc.Specify(\"can be opened\", func() {\n\t\t\tc.Specify(\"at a specific time\", func() {\n\t\t\t\tt := time.Date(2006, time.January, 1, 1, 0, 0, 0, time.UTC)\n\t\t\t\toe, err := ne.Open(func() time.Time {\n\t\t\t\t\treturn t\n\t\t\t\t}, nil)\n\t\t\t\tc.Assume(err, IsNil)\n\t\t\t\tc.Expect(oe.OpenedAt(), Equals, t)\n\t\t\t})\n\n\t\t\tc.Specify(\"with a list of ideas\", func() {\n\t\t\t\tideas := []idea.Idea{{\n\t\t\t\t\tName: \"Active Idea\",\n\t\t\t\t\tStatus: idea.IS_Active,\n\t\t\t\t\tBody: \"Some text\\n\",\n\t\t\t\t}, {\n\t\t\t\t\tName: \"Another Idea\",\n\t\t\t\t\tStatus: idea.IS_Active,\n\t\t\t\t\tBody: \"Some other text\\n\",\n\t\t\t\t}}\n\n\t\t\t\toe, err := ne.Open(time.Now, ideas)\n\t\t\t\tc.Assume(err, IsNil)\n\t\t\t\tfor i, idea := range oe.Ideas() {\n\t\t\t\t\tc.Expect(idea, Equals, ideas[i])\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t\tt := time.Date(2006, time.January, 1, 1, 0, 0, 0, time.UTC)\n\n\t\tideas := []idea.Idea{{\n\t\t\tName: \"Active Idea\",\n\t\t\tStatus: idea.IS_Active,\n\t\t\tBody: \"Some text\\n\",\n\t\t}, {\n\t\t\tName: \"Another Idea\",\n\t\t\tStatus: idea.IS_Active,\n\t\t\tBody: \"Some other text\\n\",\n\t\t}}\n\n\t\tc.Specify(\"that is open\", func() {\n\t\t\toe, err := ne.Open(func() time.Time {\n\t\t\t\treturn t\n\t\t\t}, ideas)\n\t\t\tc.Assume(err, IsNil)\n\t\t\tc.Assume(oe.OpenedAt(), Equals, t)\n\n\t\t\tdefer func() {\n\t\t\t\t_, _, err := oe.Close()\n\t\t\t\tc.Assume(err, IsNil)\n\t\t\t\t\/\/ Verify that the *os.File was closed\n\t\t\t\tc.Expect(oe.(*openEntry).file.Close(), Not(IsNil))\n\t\t\t}()\n\n\t\t\tfilename := filepath.Join(td, t.Format(filenameLayout))\n\n\t\t\tc.Specify(\"is a file\", func() {\n\t\t\t\t_, err := os.Stat(filename)\n\t\t\t\tc.Expect(os.IsNotExist(err), IsFalse)\n\n\t\t\t\tactualBytes, err := ioutil.ReadFile(filename)\n\t\t\t\tc.Expect(err, IsNil)\n\t\t\t\tc.Expect(string(actualBytes), Equals,\n\t\t\t\t\t`Sun Jan 1 01:00:00 UTC 2006\n\n#~ Title(will be used as commit message)\nTODO Make this some random quote or something stupid\n\n## [active] Active Idea\nSome text\n\n## [active] Another Idea\nSome other text\n`)\n\t\t\t})\n\n\t\t\tf, err := os.OpenFile(filename, os.O_RDONLY, 0600)\n\t\t\tc.Assume(err, IsNil)\n\t\t\tdefer f.Close()\n\n\t\t\tc.Specify(\"will have the time opened as the first line of the entry\", func() {\n\t\t\t\tscanner := bufio.NewScanner(f)\n\t\t\t\tc.Assume(scanner.Scan(), IsTrue)\n\t\t\t\tc.Expect(scanner.Text(), Equals, t.Format(time.UnixDate))\n\t\t\t})\n\n\t\t\tc.Specify(\"will have a list of ideas appended to the entry\", func() {\n\t\t\t\tscanner := idea.NewIdeaScanner(f)\n\t\t\t\tfor i := 0; i < len(ideas); i++ {\n\t\t\t\t\tc.Assume(scanner.Scan(), IsTrue)\n\t\t\t\t\tc.Expect(*scanner.Idea(), Equals, ideas[i])\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tc.Specify(\"can be editted by a text editor\", func() {\n\t\t\t})\n\t\t\tc.Specify(\"can be closed\", func() {\n\t\t\t})\n\t\t})\n\n\t\tc.Specify(\"that is closed\", func() {\n\t\t\tc.Specify(\"will have all ideas removed from the entry\", func() {\n\t\t\t})\n\t\t\tc.Specify(\"will have the time closed as the last line of the entry\", func() {\n\t\t\t})\n\t\t\tc.Specify(\"can be commited to the git repository\", func() {\n\t\t\t})\n\t\t})\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package systemtokens\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/rancher\/rancher\/pkg\/auth\/tokens\"\n\t\"github.com\/rancher\/rancher\/pkg\/randomtoken\"\n\tv3 \"github.com\/rancher\/types\/apis\/management.cattle.io\/v3\"\n\t\"github.com\/rancher\/types\/config\"\n\t\"github.com\/sirupsen\/logrus\"\n\tapierrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tv1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\nfunc NewSystemTokensFromScale(mgmt *config.ScaledContext) *SystemTokens {\n\treturn &SystemTokens{\n\t\ttokenLister: mgmt.Management.Tokens(\"\").Controller().Lister(),\n\t\ttokenClient: mgmt.Management.Tokens(\"\"),\n\t\ttokenCache: map[string]string{},\n\t\tisHA: mgmt.PeerManager != nil,\n\t\thaIdentifier: getIdentifier(),\n\t}\n}\n\ntype SystemTokens struct {\n\tisHA bool\n\ttokenClient v3.TokenInterface\n\ttokenLister v3.TokenLister\n\thaIdentifier string\n\ttokenCache map[string]string\n\ttokenCacheMutex sync.RWMutex\n}\n\n\/\/ Multiple rancher management servers might attempt to use the same key name\n\/\/ Because the token is hashed, any regeneration of an existing token name invalidates all previous tokens\n\/\/ So to avoid those HA bugs, we need to uniquely create and re-use each token for each\n\/\/ requesting management server, which we do here by generating a random string.\nfunc getIdentifier() string {\n\trandString, err := randomtoken.Generate()\n\tif err != nil {\n\t\treturn strconv.Itoa(rand.Intn(10000))\n\t}\n\treturn randString[0:5]\n}\n\n\/\/ EnsureSystemToken gets or creates tokens for management use, and keeps them in memory inside contexts.\n\/\/ Appends identifier to key name\n\/\/ TTL defaults to 1 hour, after that this method will auto-refresh. If your token will be in use for more\n\/\/ than one hour without calling this method again you must pass in an overrideTTL.\n\/\/ However, the overrideTTL must not be 0, otherwise the token will never be cleaned up.\nfunc (t *SystemTokens) EnsureSystemToken(key, description, kind, username string, overrideTTL *int64) (string, error) {\n\tif overrideTTL != nil && *overrideTTL == 0 {\n\t\treturn \"\", errors.New(\"TTL for system token must not be zero\") \/\/ no way to cleanup token\n\t}\n\tkey = fmt.Sprintf(\"%s-%s\", key, t.haIdentifier) \/\/ append hashed identifier, see getIdentifier\n\ttoken, err := t.tokenLister.Get(\"\", key)\n\tif err != nil && !apierrors.IsNotFound(err) {\n\t\treturn \"\", err\n\t}\n\n\tif err == nil && !tokens.IsExpired(*token) {\n\t\tt.tokenCacheMutex.RLock()\n\t\tval, ok := t.tokenCache[key]\n\t\tt.tokenCacheMutex.RUnlock()\n\t\tif ok {\n\t\t\treturn val, nil\n\t\t}\n\t}\n\t\/\/ needs fresh token because its missing or expired\n\tval, err := t.createOrUpdateSystemToken(key, description, kind, username, overrideTTL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tt.tokenCacheMutex.Lock()\n\tdefer t.tokenCacheMutex.Unlock()\n\tt.tokenCache[key] = val\n\treturn val, nil\n}\n\n\/\/ Creates token obj with hashed token, returns token. Overwrites if pre-existing.\nfunc (t *SystemTokens) createOrUpdateSystemToken(tokenName, description, kind, userName string, overrideTTL *int64) (string, error) {\n\tif strings.HasPrefix(tokenName, \"token-\") {\n\t\treturn \"\", errors.New(\"token names can't start with token-\")\n\t}\n\tkey, err := randomtoken.Generate()\n\tif err != nil {\n\t\treturn \"\", errors.New(\"failed to generate token key\")\n\t}\n\n\ttoken, err := t.tokenLister.Get(\"\", tokenName)\n\tif err != nil && !apierrors.IsNotFound(err) {\n\t\treturn \"\", err\n\t}\n\tif token != nil {\n\t\ttoken.Token = key\n\t\terr = tokens.ConvertTokenKeyToHash(token)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tlogrus.Infof(\"Updating system token for %v, token: %v\", userName, tokenName)\n\t\ttoken, err = t.tokenClient.Update(token)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t} else {\n\t\ttoken = &v3.Token{\n\t\t\tObjectMeta: v1.ObjectMeta{\n\t\t\t\tName: tokenName,\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\ttokens.UserIDLabel: userName,\n\t\t\t\t\ttokens.TokenKindLabel: kind,\n\t\t\t\t},\n\t\t\t},\n\t\t\tTTLMillis: 3600000, \/\/ 1 hour, token purge daemon will cleanup\n\t\t\tDescription: description,\n\t\t\tUserID: userName,\n\t\t\tAuthProvider: \"local\",\n\t\t\tIsDerived: true,\n\t\t\tToken: key,\n\t\t}\n\t\tif overrideTTL != nil {\n\t\t\ttoken.TTLMillis = *overrideTTL\n\t\t}\n\t\tif t.isHA {\n\t\t\tif token.Annotations == nil {\n\t\t\t\ttoken.Annotations = make(map[string]string)\n\t\t\t}\n\t\t\t\/\/ For debugging purposes we set hostname as annotation, which in HA is the pod name\n\t\t\ttoken.Annotations[tokens.HAIdentifier] = os.Getenv(\"HOSTNAME\")\n\t\t}\n\n\t\terr = tokens.ConvertTokenKeyToHash(token)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tlogrus.Infof(\"Creating system token for %v, token: %v\", userName, tokenName)\n\t\ttoken, err = t.tokenClient.Create(token)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn token.Name + \":\" + key, nil\n}\n<commit_msg>Fixes system-token cache create bug<commit_after>package systemtokens\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/rancher\/rancher\/pkg\/auth\/tokens\"\n\t\"github.com\/rancher\/rancher\/pkg\/randomtoken\"\n\tv3 \"github.com\/rancher\/types\/apis\/management.cattle.io\/v3\"\n\t\"github.com\/rancher\/types\/config\"\n\t\"github.com\/sirupsen\/logrus\"\n\tapierrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tv1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\nfunc NewSystemTokensFromScale(mgmt *config.ScaledContext) *SystemTokens {\n\treturn &SystemTokens{\n\t\ttokenLister: mgmt.Management.Tokens(\"\").Controller().Lister(),\n\t\ttokenClient: mgmt.Management.Tokens(\"\"),\n\t\ttokenCache: map[string]string{},\n\t\tisHA: mgmt.PeerManager != nil,\n\t\thaIdentifier: getIdentifier(),\n\t}\n}\n\ntype SystemTokens struct {\n\tisHA bool\n\ttokenClient v3.TokenInterface\n\ttokenLister v3.TokenLister\n\thaIdentifier string\n\ttokenCache map[string]string\n\ttokenCacheMutex sync.RWMutex\n}\n\n\/\/ Multiple rancher management servers might attempt to use the same token name\n\/\/ Because the token is hashed, any regeneration of an existing token name invalidates all previous tokens\n\/\/ So to avoid those HA bugs, we need to uniquely create and re-use each token for each\n\/\/ requesting management server, which we do here by generating a random string.\nfunc getIdentifier() string {\n\trandString, err := randomtoken.Generate()\n\tif err != nil {\n\t\treturn strconv.Itoa(rand.Intn(10000))\n\t}\n\treturn randString[0:5]\n}\n\n\/\/ EnsureSystemToken gets or creates tokens for management use, and keeps them in memory inside contexts.\n\/\/ Appends identifier to token name\n\/\/ TTL defaults to 1 hour, after that this method will auto-refresh. If your token will be in use for more\n\/\/ than one hour without calling this method again you must pass in an overrideTTL.\n\/\/ However, the overrideTTL must not be 0, otherwise the token will never be cleaned up.\nfunc (t *SystemTokens) EnsureSystemToken(tokenName, description, kind, username string, overrideTTL *int64) (string, error) {\n\tif overrideTTL != nil && *overrideTTL == 0 {\n\t\treturn \"\", errors.New(\"TTL for system token must not be zero\") \/\/ no way to cleanup token\n\t}\n\ttokenName = fmt.Sprintf(\"%s-%s\", tokenName, t.haIdentifier) \/\/ append hashed identifier, see getIdentifier\n\ttoken, err := t.tokenLister.Get(\"\", tokenName)\n\tif err != nil && !apierrors.IsNotFound(err) {\n\t\treturn \"\", err\n\t}\n\n\tif err == nil && !tokens.IsExpired(*token) {\n\t\tif tokenVal := t.getTokenValue(tokenName); tokenVal != \"\" {\n\t\t\treturn tokenVal, nil\n\t\t}\n\t}\n\t\/\/ needs fresh token because its missing or expired\n\tval, err := t.createOrUpdateSystemToken(tokenName, description, kind, username, overrideTTL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn val, nil\n}\n\nfunc (t *SystemTokens) getTokenValue(tokenName string) string {\n\tt.tokenCacheMutex.RLock()\n\tval, ok := t.tokenCache[tokenName]\n\tt.tokenCacheMutex.RUnlock()\n\tif ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}\n\n\/\/ Creates token obj with hashed token, returns token. Overwrites if pre-existing.\nfunc (t *SystemTokens) createOrUpdateSystemToken(tokenName, description, kind, userName string, overrideTTL *int64) (string, error) {\n\tif strings.HasPrefix(tokenName, \"token-\") {\n\t\treturn \"\", errors.New(\"token names can't start with token-\")\n\t}\n\tkey, err := randomtoken.Generate()\n\tif err != nil {\n\t\treturn \"\", errors.New(\"failed to generate token key\")\n\t}\n\n\ttoken, err := t.tokenLister.Get(\"\", tokenName)\n\tif err != nil {\n\t\tif !apierrors.IsNotFound(err) {\n\t\t\treturn \"\", err\n\t\t}\n\t\t\/\/ not found error, make new token\n\t\ttoken = &v3.Token{\n\t\t\tObjectMeta: v1.ObjectMeta{\n\t\t\t\tName: tokenName,\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\ttokens.UserIDLabel: userName,\n\t\t\t\t\ttokens.TokenKindLabel: kind,\n\t\t\t\t},\n\t\t\t},\n\t\t\tTTLMillis: 3600000, \/\/ 1 hour, token purge daemon will cleanup\n\t\t\tDescription: description,\n\t\t\tUserID: userName,\n\t\t\tAuthProvider: \"local\",\n\t\t\tIsDerived: true,\n\t\t\tToken: key,\n\t\t}\n\t\tif overrideTTL != nil {\n\t\t\ttoken.TTLMillis = *overrideTTL\n\t\t}\n\t\tif t.isHA {\n\t\t\tif token.Annotations == nil {\n\t\t\t\ttoken.Annotations = make(map[string]string)\n\t\t\t}\n\t\t\t\/\/ For debugging purposes we set hostname as annotation, which in HA is the pod name\n\t\t\ttoken.Annotations[tokens.HAIdentifier] = os.Getenv(\"HOSTNAME\")\n\t\t}\n\n\t\terr = tokens.ConvertTokenKeyToHash(token)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tlogrus.Infof(\"Creating system token for %v, token: %v\", userName, tokenName)\n\t\ttoken, err = t.tokenClient.Create(token)\n\t\tif err != nil {\n\t\t\t\/\/ race condition on create, someone else just created so return that val instead\n\t\t\tif apierrors.IsAlreadyExists(err) {\n\t\t\t\tif tokenVal := t.getTokenValue(tokenName); tokenVal != \"\" {\n\t\t\t\t\treturn tokenVal, nil\n\t\t\t\t}\n\t\t\t\treturn \"\", fmt.Errorf(\"system token not found in cache: %s\", tokenName)\n\t\t\t}\n\t\t\treturn \"\", err\n\t\t}\n\t} else {\n\t\ttoken.Token = key\n\t\terr = tokens.ConvertTokenKeyToHash(token)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tlogrus.Infof(\"Updating system token for %v, token: %v\", userName, tokenName)\n\t\ttoken, err = t.tokenClient.Update(token)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\t\/\/ Fresh token needs saving in cache\n\tfullVal := fmt.Sprintf(\"%s:%s\", token.Name, key)\n\tt.tokenCacheMutex.Lock()\n\tdefer t.tokenCacheMutex.Unlock()\n\tt.tokenCache[tokenName] = fullVal\n\treturn fullVal, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/go:generate sh -c \"go run github.com\/gomatic\/renderizer --name=node --resource=node --type=Node --title=Node --article=a swagger_operations.tmpl > node_swagger.go\"\n\/\/go:generate sh -c \"go run github.com\/gomatic\/renderizer --name=node --resource=node --type=Node --title=Node swagger_definitions.tmpl > node_swagger.json\"\n\n\/*\n * Copyright (C) 2020 Sylvain Baubeau\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy ofthe License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specificlanguage governing permissions and\n * limitations under the License.\n *\n *\/\n\npackage server\n\nimport (\n\t\"time\"\n\n\t\"github.com\/skydive-project\/skydive\/api\/types\"\n\t\"github.com\/skydive-project\/skydive\/graffiti\/api\/rest\"\n\tapi \"github.com\/skydive-project\/skydive\/graffiti\/api\/server\"\n\t\"github.com\/skydive-project\/skydive\/graffiti\/graph\"\n\tshttp \"github.com\/skydive-project\/skydive\/graffiti\/http\"\n)\n\n\/\/ NodeResourceHandler aims to creates and manage a new Alert.\ntype NodeResourceHandler struct {\n\trest.ResourceHandler\n}\n\n\/\/ NodeAPIHandler aims to exposes the Alert API.\ntype NodeAPIHandler struct {\n\tg *graph.Graph\n}\n\n\/\/ New creates a new node\nfunc (h *NodeAPIHandler) New() rest.Resource {\n\treturn &types.Node{}\n}\n\n\/\/ Name returns resource name \"node\"\nfunc (h *NodeAPIHandler) Name() string {\n\treturn \"node\"\n}\n\n\/\/ Index returns the list of existing nodes\nfunc (h *NodeAPIHandler) Index() map[string]rest.Resource {\n\th.g.RLock()\n\tnodes := h.g.GetNodes(nil)\n\tnodeMap := make(map[string]rest.Resource, len(nodes))\n\tfor _, node := range nodes {\n\t\tn := types.Node(*node)\n\t\tnodeMap[string(node.ID)] = &n\n\t}\n\th.g.RUnlock()\n\treturn nodeMap\n}\n\n\/\/ Get returns a node with the specified id\nfunc (h *NodeAPIHandler) Get(id string) (rest.Resource, bool) {\n\th.g.RLock()\n\tdefer h.g.RUnlock()\n\n\tn := h.g.GetNode(graph.Identifier(id))\n\tif n == nil {\n\t\treturn nil, false\n\t}\n\tnode := (*types.Node)(n)\n\treturn node, true\n}\n\n\/\/ Decorate the specified node\nfunc (h *NodeAPIHandler) Decorate(resource rest.Resource) {\n}\n\n\/\/ Create adds the specified node to the graph\nfunc (h *NodeAPIHandler) Create(resource rest.Resource, createOpts *rest.CreateOptions) error {\n\tnode := resource.(*types.Node)\n\tgraphNode := graph.Node(*node)\n\tif graphNode.CreatedAt.IsZero() {\n\t\tgraphNode.CreatedAt = graph.Time(time.Now())\n\t}\n\tif graphNode.UpdatedAt.IsZero() {\n\t\tgraphNode.UpdatedAt = graphNode.CreatedAt\n\t}\n\tif graphNode.Origin == \"\" {\n\t\tgraphNode.Origin = h.g.GetOrigin()\n\t}\n\tif graphNode.Metadata == nil {\n\t\tgraphNode.Metadata = graph.Metadata{}\n\t}\n\n\th.g.Lock()\n\terr := h.g.AddNode(&graphNode)\n\th.g.Unlock()\n\treturn err\n}\n\n\/\/ Delete the node with the specified id from the graph\nfunc (h *NodeAPIHandler) Delete(id string) error {\n\th.g.Lock()\n\tdefer h.g.Unlock()\n\n\tnode := h.g.GetNode(graph.Identifier(id))\n\tif node == nil {\n\t\treturn rest.ErrNotFound\n\t}\n\n\treturn h.g.DelNode(node)\n}\n\n\/\/ Update a node metadata\nfunc (h *NodeAPIHandler) Update(id string, resource rest.Resource) (rest.Resource, bool, error) {\n\th.g.Lock()\n\tdefer h.g.Unlock()\n\n\t\/\/ Current node, to be updated\n\tn := h.g.GetNode(graph.Identifier(id))\n\tif n == nil {\n\t\treturn nil, false, rest.ErrNotFound\n\t}\n\n\t\/\/ Node containing the metadata updated\n\tpatchedNode := resource.(*types.Node)\n\n\t\/\/ Do not modify\/replace Metadata.(TID|Name|Type), use actual node values\n\tif actualTID, _ := n.Metadata.GetFieldString(\"TID\"); actualTID != \"\" {\n\t\tpatchedNode.Metadata.SetField(\"TID\", actualTID)\n\t}\n\tactualName, _ := n.Metadata.GetFieldString(\"Name\")\n\tpatchedNode.Metadata.SetField(\"Name\", actualName)\n\tactualType, _ := n.Metadata.GetFieldString(\"Type\")\n\tpatchedNode.Metadata.SetField(\"Type\", actualType)\n\n\t\/\/ Update actual node Metadata with new patched node\n\tpreviousRevision := n.Revision\n\tif err := h.g.SetMetadata(n, patchedNode.Metadata); err != nil {\n\t\treturn nil, false, err\n\t}\n\n\treturn (*types.Node)(n), n.Revision != previousRevision, nil\n}\n\n\/\/ RegisterNodeAPI registers the node API\nfunc RegisterNodeAPI(apiServer *api.Server, g *graph.Graph, authBackend shttp.AuthenticationBackend) (*NodeAPIHandler, error) {\n\tnodeAPIHandler := &NodeAPIHandler{\n\t\tg: g,\n\t}\n\tif err := apiServer.RegisterAPIHandler(nodeAPIHandler, authBackend); err != nil {\n\t\treturn nil, err\n\t}\n\treturn nodeAPIHandler, nil\n}\n<commit_msg>api: create node and edge with the 'api' service type<commit_after>\/\/go:generate sh -c \"go run github.com\/gomatic\/renderizer --name=node --resource=node --type=Node --title=Node --article=a swagger_operations.tmpl > node_swagger.go\"\n\/\/go:generate sh -c \"go run github.com\/gomatic\/renderizer --name=node --resource=node --type=Node --title=Node swagger_definitions.tmpl > node_swagger.json\"\n\n\/*\n * Copyright (C) 2020 Sylvain Baubeau\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy ofthe License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specificlanguage governing permissions and\n * limitations under the License.\n *\n *\/\n\npackage server\n\nimport (\n\t\"time\"\n\n\t\"github.com\/skydive-project\/skydive\/api\/types\"\n\t\"github.com\/skydive-project\/skydive\/graffiti\/api\/rest\"\n\tapi \"github.com\/skydive-project\/skydive\/graffiti\/api\/server\"\n\t\"github.com\/skydive-project\/skydive\/graffiti\/graph\"\n\tshttp \"github.com\/skydive-project\/skydive\/graffiti\/http\"\n)\n\nconst apiOrigin = \"api\"\n\n\/\/ NodeResourceHandler aims to creates and manage a new Alert.\ntype NodeResourceHandler struct {\n\trest.ResourceHandler\n}\n\n\/\/ NodeAPIHandler aims to exposes the Alert API.\ntype NodeAPIHandler struct {\n\tg *graph.Graph\n}\n\n\/\/ New creates a new node\nfunc (h *NodeAPIHandler) New() rest.Resource {\n\treturn &types.Node{}\n}\n\n\/\/ Name returns resource name \"node\"\nfunc (h *NodeAPIHandler) Name() string {\n\treturn \"node\"\n}\n\n\/\/ Index returns the list of existing nodes\nfunc (h *NodeAPIHandler) Index() map[string]rest.Resource {\n\th.g.RLock()\n\tnodes := h.g.GetNodes(nil)\n\tnodeMap := make(map[string]rest.Resource, len(nodes))\n\tfor _, node := range nodes {\n\t\tn := types.Node(*node)\n\t\tnodeMap[string(node.ID)] = &n\n\t}\n\th.g.RUnlock()\n\treturn nodeMap\n}\n\n\/\/ Get returns a node with the specified id\nfunc (h *NodeAPIHandler) Get(id string) (rest.Resource, bool) {\n\th.g.RLock()\n\tdefer h.g.RUnlock()\n\n\tn := h.g.GetNode(graph.Identifier(id))\n\tif n == nil {\n\t\treturn nil, false\n\t}\n\tnode := (*types.Node)(n)\n\treturn node, true\n}\n\n\/\/ Decorate the specified node\nfunc (h *NodeAPIHandler) Decorate(resource rest.Resource) {\n}\n\n\/\/ Create adds the specified node to the graph\nfunc (h *NodeAPIHandler) Create(resource rest.Resource, createOpts *rest.CreateOptions) error {\n\tnode := resource.(*types.Node)\n\tgraphNode := graph.Node(*node)\n\tif graphNode.CreatedAt.IsZero() {\n\t\tgraphNode.CreatedAt = graph.Time(time.Now())\n\t}\n\tif graphNode.UpdatedAt.IsZero() {\n\t\tgraphNode.UpdatedAt = graphNode.CreatedAt\n\t}\n\tif graphNode.Origin == \"\" {\n\t\tgraphNode.Origin = graph.Origin(h.g.GetHost(), apiOrigin)\n\t}\n\tif graphNode.Metadata == nil {\n\t\tgraphNode.Metadata = graph.Metadata{}\n\t}\n\n\th.g.Lock()\n\terr := h.g.AddNode(&graphNode)\n\th.g.Unlock()\n\treturn err\n}\n\n\/\/ Delete the node with the specified id from the graph\nfunc (h *NodeAPIHandler) Delete(id string) error {\n\th.g.Lock()\n\tdefer h.g.Unlock()\n\n\tnode := h.g.GetNode(graph.Identifier(id))\n\tif node == nil {\n\t\treturn rest.ErrNotFound\n\t}\n\n\treturn h.g.DelNode(node)\n}\n\n\/\/ Update a node metadata\nfunc (h *NodeAPIHandler) Update(id string, resource rest.Resource) (rest.Resource, bool, error) {\n\th.g.Lock()\n\tdefer h.g.Unlock()\n\n\t\/\/ Current node, to be updated\n\tn := h.g.GetNode(graph.Identifier(id))\n\tif n == nil {\n\t\treturn nil, false, rest.ErrNotFound\n\t}\n\n\t\/\/ Node containing the metadata updated\n\tpatchedNode := resource.(*types.Node)\n\n\t\/\/ Do not modify\/replace Metadata.(TID|Name|Type), use actual node values\n\tif actualTID, _ := n.Metadata.GetFieldString(\"TID\"); actualTID != \"\" {\n\t\tpatchedNode.Metadata.SetField(\"TID\", actualTID)\n\t}\n\tactualName, _ := n.Metadata.GetFieldString(\"Name\")\n\tpatchedNode.Metadata.SetField(\"Name\", actualName)\n\tactualType, _ := n.Metadata.GetFieldString(\"Type\")\n\tpatchedNode.Metadata.SetField(\"Type\", actualType)\n\n\t\/\/ Update actual node Metadata with new patched node\n\tpreviousRevision := n.Revision\n\tif err := h.g.SetMetadata(n, patchedNode.Metadata); err != nil {\n\t\treturn nil, false, err\n\t}\n\n\treturn (*types.Node)(n), n.Revision != previousRevision, nil\n}\n\n\/\/ RegisterNodeAPI registers the node API\nfunc RegisterNodeAPI(apiServer *api.Server, g *graph.Graph, authBackend shttp.AuthenticationBackend) (*NodeAPIHandler, error) {\n\tnodeAPIHandler := &NodeAPIHandler{\n\t\tg: g,\n\t}\n\tif err := apiServer.RegisterAPIHandler(nodeAPIHandler, authBackend); err != nil {\n\t\treturn nil, err\n\t}\n\treturn nodeAPIHandler, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package edit\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/elves\/elvish\/edit\/ui\"\n\t\"github.com\/elves\/elvish\/eval\"\n)\n\n\/\/ This file contains several \"registries\", data structure that are written\n\/\/ during program initialization and later used when initializing the Editor.\n\nvar builtinMaps = map[string]map[string]*BuiltinFn{}\n\n\/\/ registerBuiltins registers builtins under a subnamespace of edit:, to be used\n\/\/ during the initialization of the Editor. It should be called for global\n\/\/ variable initializations to make sure every subnamespace is registered before\n\/\/ makeBindings is ever called.\nfunc registerBuiltins(module string, impls map[string]func(*Editor)) struct{} {\n\tif _, ok := builtinMaps[module]; !ok {\n\t\tbuiltinMaps[module] = make(map[string]*BuiltinFn)\n\t}\n\tfor name, impl := range impls {\n\t\tvar fullName string\n\t\tif module == \"\" {\n\t\t\tfullName = \"edit:\" + eval.FnPrefix + name\n\t\t} else {\n\t\t\tfullName = \"edit:\" + module + \":\" + eval.FnPrefix + name\n\t\t}\n\t\tbuiltinMaps[module][name] = &BuiltinFn{fullName, impl}\n\t}\n\treturn struct{}{}\n}\n\nfunc makeNamespaceFromBuiltins(builtins map[string]*BuiltinFn) eval.Namespace {\n\tns := eval.Namespace{}\n\tfor name, builtin := range builtins {\n\t\tns[eval.FnPrefix+name] = eval.NewPtrVariable(builtin)\n\t}\n\treturn ns\n}\n\nvar keyBindings = map[string]map[ui.Key]eval.CallableValue{}\n\n\/\/ registerBindings registers default bindings for a mode to initialize the\n\/\/ global keyBindings map. Builtin names are resolved in the defaultMod\n\/\/ subnamespace using information from builtinMaps. It should be called in init\n\/\/ functions.\nfunc registerBindings(\n\tmt string, defaultMod string, bindingData map[ui.Key]string) struct{} {\n\n\tif _, ok := keyBindings[mt]; !ok {\n\t\tkeyBindings[mt] = map[ui.Key]eval.CallableValue{}\n\t}\n\tfor key, fullName := range bindingData {\n\t\t\/\/ break fullName into mod and name.\n\t\tvar mod, name string\n\t\tnameParts := strings.SplitN(fullName, \":\", 2)\n\t\tif len(nameParts) == 2 {\n\t\t\tmod, name = nameParts[0], nameParts[1]\n\t\t} else {\n\t\t\tmod, name = defaultMod, nameParts[0]\n\t\t}\n\t\tif m, ok := builtinMaps[mod]; ok {\n\t\t\tif builtin, ok := m[name]; ok {\n\t\t\t\tkeyBindings[mt][key] = builtin\n\t\t\t} else {\n\t\t\t\tfmt.Fprintln(os.Stderr, \"Internal warning: no such builtin\", name, \"in mod\", mod)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Fprintln(os.Stderr, \"Internal warning: no such mod:\", mod)\n\t\t}\n\t}\n\treturn struct{}{}\n}\n\nvar variableMakers = map[string]func() eval.Variable{}\n\n\/\/ registerVariables registers a variable, its name and a func used to derive\n\/\/ its value, later to be used during Editor initialization to populate\n\/\/ Editor.variables as well as the edit: namespace.\nfunc registerVariable(name string, maker func() eval.Variable) struct{} {\n\tvariableMakers[name] = maker\n\treturn struct{}{}\n}\n\nfunc makeVariables() map[string]eval.Variable {\n\tm := make(map[string]eval.Variable, len(variableMakers))\n\tfor name, maker := range variableMakers {\n\t\tm[name] = maker()\n\t}\n\treturn m\n}\n<commit_msg>edit: Document purpose of registry.go.<commit_after>package edit\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/elves\/elvish\/edit\/ui\"\n\t\"github.com\/elves\/elvish\/eval\"\n)\n\n\/\/ This file contains several \"registries\", data structure that are written\n\/\/ during program initialization and later used when initializing the Editor.\n\/\/\n\/\/ The purpose of these registries is to decentralize the definition for\n\/\/ builtins, default bindings, and variables (e.g. $edit:prompt). For instance,\n\/\/ the definition for $edit:prompt can live in prompt.go instead of api.go.\n\nvar builtinMaps = map[string]map[string]*BuiltinFn{}\n\n\/\/ registerBuiltins registers builtins under a subnamespace of edit:, to be used\n\/\/ during the initialization of the Editor. It should be called for global\n\/\/ variable initializations to make sure every subnamespace is registered before\n\/\/ makeBindings is ever called.\nfunc registerBuiltins(module string, impls map[string]func(*Editor)) struct{} {\n\tif _, ok := builtinMaps[module]; !ok {\n\t\tbuiltinMaps[module] = make(map[string]*BuiltinFn)\n\t}\n\tfor name, impl := range impls {\n\t\tvar fullName string\n\t\tif module == \"\" {\n\t\t\tfullName = \"edit:\" + eval.FnPrefix + name\n\t\t} else {\n\t\t\tfullName = \"edit:\" + module + \":\" + eval.FnPrefix + name\n\t\t}\n\t\tbuiltinMaps[module][name] = &BuiltinFn{fullName, impl}\n\t}\n\treturn struct{}{}\n}\n\nfunc makeNamespaceFromBuiltins(builtins map[string]*BuiltinFn) eval.Namespace {\n\tns := eval.Namespace{}\n\tfor name, builtin := range builtins {\n\t\tns[eval.FnPrefix+name] = eval.NewPtrVariable(builtin)\n\t}\n\treturn ns\n}\n\nvar keyBindings = map[string]map[ui.Key]eval.CallableValue{}\n\n\/\/ registerBindings registers default bindings for a mode to initialize the\n\/\/ global keyBindings map. Builtin names are resolved in the defaultMod\n\/\/ subnamespace using information from builtinMaps. It should be called in init\n\/\/ functions.\nfunc registerBindings(\n\tmt string, defaultMod string, bindingData map[ui.Key]string) struct{} {\n\n\tif _, ok := keyBindings[mt]; !ok {\n\t\tkeyBindings[mt] = map[ui.Key]eval.CallableValue{}\n\t}\n\tfor key, fullName := range bindingData {\n\t\t\/\/ break fullName into mod and name.\n\t\tvar mod, name string\n\t\tnameParts := strings.SplitN(fullName, \":\", 2)\n\t\tif len(nameParts) == 2 {\n\t\t\tmod, name = nameParts[0], nameParts[1]\n\t\t} else {\n\t\t\tmod, name = defaultMod, nameParts[0]\n\t\t}\n\t\tif m, ok := builtinMaps[mod]; ok {\n\t\t\tif builtin, ok := m[name]; ok {\n\t\t\t\tkeyBindings[mt][key] = builtin\n\t\t\t} else {\n\t\t\t\tfmt.Fprintln(os.Stderr, \"Internal warning: no such builtin\", name, \"in mod\", mod)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Fprintln(os.Stderr, \"Internal warning: no such mod:\", mod)\n\t\t}\n\t}\n\treturn struct{}{}\n}\n\nvar variableMakers = map[string]func() eval.Variable{}\n\n\/\/ registerVariables registers a variable, its name and a func used to derive\n\/\/ its value, later to be used during Editor initialization to populate\n\/\/ Editor.variables as well as the edit: namespace.\nfunc registerVariable(name string, maker func() eval.Variable) struct{} {\n\tvariableMakers[name] = maker\n\treturn struct{}{}\n}\n\nfunc makeVariables() map[string]eval.Variable {\n\tm := make(map[string]eval.Variable, len(variableMakers))\n\tfor name, maker := range variableMakers {\n\t\tm[name] = maker()\n\t}\n\treturn m\n}\n<|endoftext|>"} {"text":"<commit_before>package engine\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n\n\tgarden \"github.com\/cloudfoundry-incubator\/garden\/api\"\n\t\"github.com\/concourse\/atc\"\n\t\"github.com\/concourse\/atc\/db\"\n\t\"github.com\/pivotal-golang\/lager\"\n)\n\nvar ErrBuildNotActive = errors.New(\"build not yet active\")\n\n\/\/go:generate counterfeiter . BuildDB\ntype BuildDB interface {\n\tGetBuild(int) (db.Build, error)\n\tGetBuildEvents(int, uint) (db.EventSource, error)\n\tStartBuild(int, string, string) (bool, error)\n\n\tAbortBuild(int) error\n\tAbortNotifier(int) (db.Notifier, error)\n}\n\n\/\/go:generate counterfeiter . BuildLocker\ntype BuildLocker interface {\n\tAcquireWriteLockImmediately([]db.NamedLock) (db.Lock, error)\n}\n\nfunc NewDBEngine(engine Engine, buildDB BuildDB, locker BuildLocker) Engine {\n\treturn &dbEngine{\n\t\tengine: engine,\n\n\t\tdb: buildDB,\n\t\tlocker: locker,\n\t}\n}\n\ntype dbEngine struct {\n\tengine Engine\n\n\tdb BuildDB\n\tlocker BuildLocker\n}\n\nfunc (*dbEngine) Name() string {\n\treturn \"db\"\n}\n\nfunc (engine *dbEngine) CreateBuild(build db.Build, plan atc.BuildPlan) (Build, error) {\n\tcreatedBuild, err := engine.engine.CreateBuild(build, plan)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstarted, err := engine.db.StartBuild(build.ID, engine.engine.Name(), createdBuild.Metadata())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !started {\n\t\tcreatedBuild.Abort()\n\t}\n\n\treturn &dbBuild{\n\t\tid: build.ID,\n\n\t\tengine: engine.engine,\n\n\t\tdb: engine.db,\n\t\tlocker: engine.locker,\n\t}, nil\n}\n\nfunc (engine *dbEngine) LookupBuild(build db.Build) (Build, error) {\n\treturn &dbBuild{\n\t\tid: build.ID,\n\n\t\tengine: engine.engine,\n\n\t\tdb: engine.db,\n\t\tlocker: engine.locker,\n\t}, nil\n}\n\ntype dbBuild struct {\n\tid int\n\n\tengine Engine\n\n\tdb BuildDB\n\tlocker BuildLocker\n}\n\nfunc (build *dbBuild) Metadata() string {\n\treturn strconv.Itoa(build.id)\n}\n\nfunc (build *dbBuild) Abort() error {\n\t\/\/ the order below is very important to avoid races with build creation.\n\n\tlock, err := build.locker.AcquireWriteLockImmediately([]db.NamedLock{db.BuildTrackingLock(build.id)})\n\tif err != nil {\n\t\t\/\/ someone else is tracking the build; abort it, which will notify them\n\t\treturn build.db.AbortBuild(build.id)\n\t}\n\n\tdefer lock.Release()\n\n\t\/\/ no one is tracking the build; abort it ourselves\n\n\t\/\/ first save the status so that CreateBuild will see a conflict when it\n\t\/\/ tries to mark the build as started.\n\terr = build.db.AbortBuild(build.id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ reload the model *after* saving the status for the following check to see\n\t\/\/ if it was already started\n\tmodel, err := build.db.GetBuild(build.id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ if there's an engine, there's a real build to abort\n\tif model.Engine == \"\" {\n\t\t\/\/ otherwise, CreateBuild had not yet tried to start the build, and so it\n\t\t\/\/ will see the conflict when it tries to transition, and abort itself.\n\t\treturn nil\n\t}\n\n\t\/\/ find the real build to abort...\n\tengineBuild, err := build.engine.LookupBuild(model)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ ...and abort it.\n\treturn engineBuild.Abort()\n}\n\nfunc (build *dbBuild) Hijack(spec garden.ProcessSpec, io garden.ProcessIO) (garden.Process, error) {\n\tmodel, err := build.db.GetBuild(build.id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif model.Engine == \"\" {\n\t\treturn nil, ErrBuildNotActive\n\t}\n\n\tengineBuild, err := build.engine.LookupBuild(model)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn engineBuild.Hijack(spec, io)\n}\n\nfunc (build *dbBuild) Resume(logger lager.Logger) error {\n\tlock, err := build.locker.AcquireWriteLockImmediately([]db.NamedLock{db.BuildTrackingLock(build.id)})\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tdefer lock.Release()\n\n\tmodel, err := build.db.GetBuild(build.id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif model.Engine == \"\" {\n\t\treturn nil\n\t}\n\n\tengineBuild, err := build.engine.LookupBuild(model)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\taborts, err := build.db.AbortNotifier(build.id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer aborts.Close()\n\n\tdone := make(chan struct{})\n\tdefer close(done)\n\n\tgo func() {\n\t\tselect {\n\t\tcase <-aborts.Notify():\n\t\t\tengineBuild.Abort()\n\t\tcase <-done:\n\t\t}\n\t}()\n\n\treturn engineBuild.Resume(logger)\n}\n<commit_msg>add logging and comments to db engine<commit_after>package engine\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n\n\tgarden \"github.com\/cloudfoundry-incubator\/garden\/api\"\n\t\"github.com\/concourse\/atc\"\n\t\"github.com\/concourse\/atc\/db\"\n\t\"github.com\/pivotal-golang\/lager\"\n)\n\nvar ErrBuildNotActive = errors.New(\"build not yet active\")\n\n\/\/go:generate counterfeiter . BuildDB\ntype BuildDB interface {\n\tGetBuild(int) (db.Build, error)\n\tGetBuildEvents(int, uint) (db.EventSource, error)\n\tStartBuild(int, string, string) (bool, error)\n\n\tAbortBuild(int) error\n\tAbortNotifier(int) (db.Notifier, error)\n}\n\n\/\/go:generate counterfeiter . BuildLocker\ntype BuildLocker interface {\n\tAcquireWriteLockImmediately([]db.NamedLock) (db.Lock, error)\n}\n\nfunc NewDBEngine(engine Engine, buildDB BuildDB, locker BuildLocker) Engine {\n\treturn &dbEngine{\n\t\tengine: engine,\n\n\t\tdb: buildDB,\n\t\tlocker: locker,\n\t}\n}\n\ntype dbEngine struct {\n\tengine Engine\n\n\tdb BuildDB\n\tlocker BuildLocker\n}\n\nfunc (*dbEngine) Name() string {\n\treturn \"db\"\n}\n\nfunc (engine *dbEngine) CreateBuild(build db.Build, plan atc.BuildPlan) (Build, error) {\n\tcreatedBuild, err := engine.engine.CreateBuild(build, plan)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstarted, err := engine.db.StartBuild(build.ID, engine.engine.Name(), createdBuild.Metadata())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !started {\n\t\tcreatedBuild.Abort()\n\t}\n\n\treturn &dbBuild{\n\t\tid: build.ID,\n\n\t\tengine: engine.engine,\n\n\t\tdb: engine.db,\n\t\tlocker: engine.locker,\n\t}, nil\n}\n\nfunc (engine *dbEngine) LookupBuild(build db.Build) (Build, error) {\n\treturn &dbBuild{\n\t\tid: build.ID,\n\n\t\tengine: engine.engine,\n\n\t\tdb: engine.db,\n\t\tlocker: engine.locker,\n\t}, nil\n}\n\ntype dbBuild struct {\n\tid int\n\n\tengine Engine\n\n\tdb BuildDB\n\tlocker BuildLocker\n}\n\nfunc (build *dbBuild) Metadata() string {\n\treturn strconv.Itoa(build.id)\n}\n\nfunc (build *dbBuild) Abort() error {\n\t\/\/ the order below is very important to avoid races with build creation.\n\n\tlock, err := build.locker.AcquireWriteLockImmediately([]db.NamedLock{db.BuildTrackingLock(build.id)})\n\tif err != nil {\n\t\t\/\/ someone else is tracking the build; abort it, which will notify them\n\t\treturn build.db.AbortBuild(build.id)\n\t}\n\n\tdefer lock.Release()\n\n\t\/\/ no one is tracking the build; abort it ourselves\n\n\t\/\/ first save the status so that CreateBuild will see a conflict when it\n\t\/\/ tries to mark the build as started.\n\terr = build.db.AbortBuild(build.id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ reload the model *after* saving the status for the following check to see\n\t\/\/ if it was already started\n\tmodel, err := build.db.GetBuild(build.id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ if there's an engine, there's a real build to abort\n\tif model.Engine == \"\" {\n\t\t\/\/ otherwise, CreateBuild had not yet tried to start the build, and so it\n\t\t\/\/ will see the conflict when it tries to transition, and abort itself.\n\t\treturn nil\n\t}\n\n\t\/\/ find the real build to abort...\n\tengineBuild, err := build.engine.LookupBuild(model)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ ...and abort it.\n\treturn engineBuild.Abort()\n}\n\nfunc (build *dbBuild) Hijack(spec garden.ProcessSpec, io garden.ProcessIO) (garden.Process, error) {\n\tmodel, err := build.db.GetBuild(build.id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif model.Engine == \"\" {\n\t\treturn nil, ErrBuildNotActive\n\t}\n\n\tengineBuild, err := build.engine.LookupBuild(model)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn engineBuild.Hijack(spec, io)\n}\n\nfunc (build *dbBuild) Resume(logger lager.Logger) error {\n\tlock, err := build.locker.AcquireWriteLockImmediately([]db.NamedLock{db.BuildTrackingLock(build.id)})\n\tif err != nil {\n\t\t\/\/ already being tracked somewhere; short-circuit\n\t\treturn nil\n\t}\n\n\tdefer lock.Release()\n\n\tmodel, err := build.db.GetBuild(build.id)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-load-build-from-db\", err)\n\t\treturn err\n\t}\n\n\tif model.Engine == \"\" {\n\t\tlogger.Error(\"build-has-no-engine\", err)\n\t\treturn nil\n\t}\n\n\tengineBuild, err := build.engine.LookupBuild(model)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-lookup-build-from-engine\", err)\n\t\treturn err\n\t}\n\n\taborts, err := build.db.AbortNotifier(build.id)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-listen-for-aborts\", err)\n\t\treturn err\n\t}\n\n\tdefer aborts.Close()\n\n\tdone := make(chan struct{})\n\tdefer close(done)\n\n\tgo func() {\n\t\tselect {\n\t\tcase <-aborts.Notify():\n\t\t\tlogger.Info(\"aborting\")\n\n\t\t\terr := engineBuild.Abort()\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"failed-to-abort\", err)\n\t\t\t}\n\t\tcase <-done:\n\t\t}\n\t}()\n\n\treturn engineBuild.Resume(logger)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2022 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage migrations\n\nimport (\n\t\"code.gitea.io\/gitea\/modules\/timeutil\"\n\n\t\"xorm.io\/xorm\"\n\t\"xorm.io\/xorm\/schemas\"\n)\n\ntype improveActionTableIndicesAction struct {\n\tID int64 `xorm:\"pk autoincr\"`\n\tUserID int64 \/\/ Receiver user id.\n\tOpType int\n\tActUserID int64 \/\/ Action user id.\n\tRepoID int64\n\tCommentID int64 `xorm:\"INDEX\"`\n\tIsDeleted bool `xorm:\"NOT NULL DEFAULT false\"`\n\tRefName string\n\tIsPrivate bool `xorm:\"NOT NULL DEFAULT false\"`\n\tContent string `xorm:\"TEXT\"`\n\tCreatedUnix timeutil.TimeStamp `xorm:\"created\"`\n}\n\n\/\/ TableName sets the name of this table\nfunc (a *improveActionTableIndicesAction) TableName() string {\n\treturn \"action\"\n}\n\n\/\/ TableIndices implements xorm's TableIndices interface\nfunc (a *improveActionTableIndicesAction) TableIndices() []*schemas.Index {\n\tactUserIndex := schemas.NewIndex(\"au_r_c_u_d\", schemas.IndexType)\n\tactUserIndex.AddColumn(\"act_user_id\", \"repo_id\", \"created_unix\", \"user_id\", \"is_deleted\")\n\n\trepoIndex := schemas.NewIndex(\"r_c_u_d\", schemas.IndexType)\n\trepoIndex.AddColumn(\"repo_id\", \"created_unix\", \"user_id\", \"is_deleted\")\n\n\treturn []*schemas.Index{actUserIndex, repoIndex}\n}\n\nfunc improveActionTableIndices(x *xorm.Engine) error {\n\t{\n\t\ttype Action struct {\n\t\t\tID int64 `xorm:\"pk autoincr\"`\n\t\t\tUserID int64 `xorm:\"INDEX\"` \/\/ Receiver user id.\n\t\t\tOpType int\n\t\t\tActUserID int64 `xorm:\"INDEX\"` \/\/ Action user id.\n\t\t\tRepoID int64 `xorm:\"INDEX\"`\n\t\t\tCommentID int64 `xorm:\"INDEX\"`\n\t\t\tIsDeleted bool `xorm:\"INDEX NOT NULL DEFAULT false\"`\n\t\t\tRefName string\n\t\t\tIsPrivate bool `xorm:\"INDEX NOT NULL DEFAULT false\"`\n\t\t\tContent string `xorm:\"TEXT\"`\n\t\t\tCreatedUnix timeutil.TimeStamp `xorm:\"INDEX created\"`\n\t\t}\n\t\tif err := x.Sync2(&Action{}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := x.DropIndexes(&Action{}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn x.Sync2(&improveActionTableIndicesAction{})\n}\n<commit_msg>Simplify and fix migration 216 (#20035)<commit_after>\/\/ Copyright 2022 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage migrations\n\nimport (\n\t\"code.gitea.io\/gitea\/modules\/timeutil\"\n\n\t\"xorm.io\/xorm\"\n\t\"xorm.io\/xorm\/schemas\"\n)\n\ntype improveActionTableIndicesAction struct {\n\tID int64 `xorm:\"pk autoincr\"`\n\tUserID int64 \/\/ Receiver user id.\n\tOpType int\n\tActUserID int64 \/\/ Action user id.\n\tRepoID int64\n\tCommentID int64 `xorm:\"INDEX\"`\n\tIsDeleted bool `xorm:\"NOT NULL DEFAULT false\"`\n\tRefName string\n\tIsPrivate bool `xorm:\"NOT NULL DEFAULT false\"`\n\tContent string `xorm:\"TEXT\"`\n\tCreatedUnix timeutil.TimeStamp `xorm:\"created\"`\n}\n\n\/\/ TableName sets the name of this table\nfunc (a *improveActionTableIndicesAction) TableName() string {\n\treturn \"action\"\n}\n\n\/\/ TableIndices implements xorm's TableIndices interface\nfunc (a *improveActionTableIndicesAction) TableIndices() []*schemas.Index {\n\tactUserIndex := schemas.NewIndex(\"au_r_c_u_d\", schemas.IndexType)\n\tactUserIndex.AddColumn(\"act_user_id\", \"repo_id\", \"created_unix\", \"user_id\", \"is_deleted\")\n\n\trepoIndex := schemas.NewIndex(\"r_c_u_d\", schemas.IndexType)\n\trepoIndex.AddColumn(\"repo_id\", \"created_unix\", \"user_id\", \"is_deleted\")\n\n\treturn []*schemas.Index{actUserIndex, repoIndex}\n}\n\nfunc improveActionTableIndices(x *xorm.Engine) error {\n\treturn x.Sync2(&improveActionTableIndicesAction{})\n}\n<|endoftext|>"} {"text":"<commit_before>package cloudmutex\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/oauth2\/google\"\n\tstorage \"google.golang.org\/api\/storage\/v1\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype cloudmutex struct {\n\tproject string\n\tbucket string\n\tobject string\n\tservice *storage.Service\n}\n\n\/\/ TimedLock will wait up to duruation d for l.Lock() to succeed.\nfunc TimedLock(l sync.Locker, d time.Duration) error {\n\tdone := make(chan bool, 1)\n\tgo func() {\n\t\tl.Lock()\n\t\tdone <- true\n\t}()\n\tselect {\n\tcase <-done:\n\t\treturn nil\n\tcase <-time.After(d):\n\t\treturn errors.New(\"lock request timed out\")\n\t}\n}\n\n\/\/ TimedUnlock will wait up to duruation d for l.Unlock() to succeed.\nfunc TimedUnlock(l sync.Locker, d time.Duration) error {\n\tdone := make(chan bool, 1)\n\tgo func() {\n\t\tl.Unlock()\n\t\tdone <- true\n\t}()\n\tselect {\n\tcase <-done:\n\t\treturn nil\n\tcase <-time.After(d):\n\t\treturn errors.New(\"unlock request timed out\")\n\t}\n}\n\nfunc (m cloudmutex) Lock() {\n\tobject := &storage.Object{Name: m.object}\n\tfor {\n\t\t_, err := m.service.Objects.Insert(m.bucket, object).Media(bytes.NewReader([]byte(\"1\"))).Do()\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (m cloudmutex) Unlock() {\n\tfor {\n\t\terr := m.service.Objects.Delete(m.bucket, m.object).Do()\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ New creates a GCS-based sync.Locker.\n\/\/ It uses Application Default Credentials to make authenticated requests\n\/\/ to Google Cloud Storage. See the DefaultClient function of the\n\/\/ golang.org\/x\/oauth2\/google package for App Default Credentials details.\n\/\/\n\/\/ If ctx argument is nil, context.Background is used.\n\/\/\nfunc New(ctx context.Context, project, bucket, object string) (sync.Locker, error) {\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tscope := storage.DevstorageFullControlScope\n\tclient, err := google.DefaultClient(ctx, scope)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tservice, err := storage.New(client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := &cloudmutex{\n\t\tproject: project,\n\t\tbucket: bucket,\n\t\tobject: object,\n\t\tservice: service,\n\t}\n\treturn m, nil\n}\n<commit_msg>improve perf by passing empty struct instead of a bool across channel<commit_after>package cloudmutex\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/oauth2\/google\"\n\tstorage \"google.golang.org\/api\/storage\/v1\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype cloudmutex struct {\n\tproject string\n\tbucket string\n\tobject string\n\tservice *storage.Service\n}\n\n\/\/ TimedLock will wait up to duruation d for l.Lock() to succeed.\nfunc TimedLock(l sync.Locker, d time.Duration) error {\n\tdone := make(chan struct{}, 1)\n\tgo func() {\n\t\tl.Lock()\n\t\tdone <- struct{}{}\n\t}()\n\tselect {\n\tcase <-done:\n\t\treturn nil\n\tcase <-time.After(d):\n\t\treturn errors.New(\"lock request timed out\")\n\t}\n}\n\n\/\/ TimedUnlock will wait up to duruation d for l.Unlock() to succeed.\nfunc TimedUnlock(l sync.Locker, d time.Duration) error {\n\tdone := make(chan struct{}, 1)\n\tgo func() {\n\t\tl.Unlock()\n\t\tdone <- struct{}{}\n\t}()\n\tselect {\n\tcase <-done:\n\t\treturn nil\n\tcase <-time.After(d):\n\t\treturn errors.New(\"unlock request timed out\")\n\t}\n}\n\nfunc (m cloudmutex) Lock() {\n\tobject := &storage.Object{Name: m.object}\n\tfor {\n\t\t_, err := m.service.Objects.Insert(m.bucket, object).Media(bytes.NewReader([]byte(\"1\"))).Do()\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (m cloudmutex) Unlock() {\n\tfor {\n\t\terr := m.service.Objects.Delete(m.bucket, m.object).Do()\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ New creates a GCS-based sync.Locker.\n\/\/ It uses Application Default Credentials to make authenticated requests\n\/\/ to Google Cloud Storage. See the DefaultClient function of the\n\/\/ golang.org\/x\/oauth2\/google package for App Default Credentials details.\n\/\/\n\/\/ If ctx argument is nil, context.Background is used.\n\/\/\nfunc New(ctx context.Context, project, bucket, object string) (sync.Locker, error) {\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tscope := storage.DevstorageFullControlScope\n\tclient, err := google.DefaultClient(ctx, scope)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tservice, err := storage.New(client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := &cloudmutex{\n\t\tproject: project,\n\t\tbucket: bucket,\n\t\tobject: object,\n\t\tservice: service,\n\t}\n\treturn m, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\ni-sudoku is an interactive command-line sudoku tool\n*\/\n\npackage main\n\nimport (\n\t\"github.com\/jkomoros\/sudoku\"\n\t\"github.com\/nsf\/termbox-go\"\n\t\"log\"\n\t\"strings\"\n)\n\ntype mainModel struct {\n\tgrid *sudoku.Grid\n}\n\nfunc main() {\n\tif err := termbox.Init(); err != nil {\n\t\tlog.Fatal(\"Termbox initialization failed:\", err)\n\t}\n\tdefer termbox.Close()\n\n\tmodel := &mainModel{\n\t\tsudoku.NewGrid(),\n\t}\n\n\tmodel.grid.Fill()\n\n\tdraw(model)\n\nmainloop:\n\tfor {\n\t\tswitch ev := termbox.PollEvent(); ev.Type {\n\t\tcase termbox.EventKey:\n\t\t\tswitch ev.Key {\n\t\t\tcase termbox.KeyEsc, termbox.KeyCtrlC:\n\t\t\t\tbreak mainloop\n\t\t\t}\n\t\t}\n\t\tdraw(model)\n\t}\n}\n\nfunc draw(model *mainModel) {\n\tdrawGrid(model.grid)\n\ttermbox.Flush()\n}\n\nfunc drawGrid(grid *sudoku.Grid) {\n\tfor y, line := range strings.Split(grid.Diagram(), \"\\n\") {\n\t\tx := 0\n\t\t\/\/The first number in range will be byte offset, but for some items like the bullet, it's two bytes.\n\t\t\/\/But what we care about is that each item is a character.\n\t\tfor _, ch := range line {\n\t\t\ttermbox.SetCell(x, y, ch, termbox.ColorGreen, termbox.ColorDefault)\n\t\t\tx++\n\t\t}\n\t}\n}\n<commit_msg>Made it so the backgrond 'number filled' rune is printed in a different color.<commit_after>\/*\ni-sudoku is an interactive command-line sudoku tool\n*\/\n\npackage main\n\nimport (\n\t\"github.com\/jkomoros\/sudoku\"\n\t\"github.com\/nsf\/termbox-go\"\n\t\"log\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n)\n\ntype mainModel struct {\n\tgrid *sudoku.Grid\n}\n\nfunc main() {\n\tif err := termbox.Init(); err != nil {\n\t\tlog.Fatal(\"Termbox initialization failed:\", err)\n\t}\n\tdefer termbox.Close()\n\n\tmodel := &mainModel{\n\t\tsudoku.NewGrid(),\n\t}\n\n\tmodel.grid.Fill()\n\n\tdraw(model)\n\nmainloop:\n\tfor {\n\t\tswitch ev := termbox.PollEvent(); ev.Type {\n\t\tcase termbox.EventKey:\n\t\t\tswitch ev.Key {\n\t\t\tcase termbox.KeyEsc, termbox.KeyCtrlC:\n\t\t\t\tbreak mainloop\n\t\t\t}\n\t\t}\n\t\tdraw(model)\n\t}\n}\n\nfunc draw(model *mainModel) {\n\tdrawGrid(model.grid)\n\ttermbox.Flush()\n}\n\nfunc drawGrid(grid *sudoku.Grid) {\n\tfor y, line := range strings.Split(grid.Diagram(), \"\\n\") {\n\t\tx := 0\n\t\t\/\/The first number in range will be byte offset, but for some items like the bullet, it's two bytes.\n\t\t\/\/But what we care about is that each item is a character.\n\t\tfor _, ch := range line {\n\n\t\t\tdefaultColor := termbox.ColorGreen\n\n\t\t\tnumberRune, _ := utf8.DecodeRuneInString(sudoku.DIAGRAM_NUMBER)\n\n\t\t\tif ch == numberRune {\n\t\t\t\tdefaultColor = termbox.ColorBlue\n\t\t\t}\n\n\t\t\ttermbox.SetCell(x, y, ch, defaultColor, termbox.ColorDefault)\n\t\t\tx++\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage time\n\nimport (\n\t\"testing\";\n\t\"time\";\n)\n\nexport func TestTick(t *testing.T) {\n\tconst (\n\t\tDelta uint64 = 100*1e6;\n\t\tCount uint64 = 10;\n\t);\n\tc := Tick(Delta);\n\tt0 := Nanoseconds();\n\tfor i := 0; i < Count; i++ {\n\t\t<-c;\n\t}\n\tt1 := Nanoseconds();\n\tns := t1 - t0;\n\ttarget := int64(Delta*Count);\n\tslop := target*2\/10;\n\tif ns < target - slop || ns > target + slop {\n\t\tt.Fatalf(\"%d ticks of %g ns took %g ns, expected %g\", Count, float64(Delta), float64(ns), float64(target));\n\t}\n}\n<commit_msg>Remove types from constants, since they didn't match what Tick() expected.<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage time\n\nimport (\n\t\"testing\";\n\t\"time\";\n)\n\nexport func TestTick(t *testing.T) {\n\tconst (\n\t\tDelta = 100*1e6;\n\t\tCount = 10;\n\t);\n\tc := Tick(Delta);\n\tt0 := Nanoseconds();\n\tfor i := 0; i < Count; i++ {\n\t\t<-c;\n\t}\n\tt1 := Nanoseconds();\n\tns := t1 - t0;\n\ttarget := int64(Delta*Count);\n\tslop := target*2\/10;\n\tif ns < target - slop || ns > target + slop {\n\t\tt.Fatalf(\"%d ticks of %g ns took %g ns, expected %g\", Count, float64(Delta), float64(ns), float64(target));\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\/\/ TODO: move this somewhere central\n\t\"k8s.io\/contrib\/mungegithub\/mungers\/jenkins\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/sets\"\n)\n\n\/\/ LatencyData represents the latency data for a set of RESTful API calls\ntype LatencyData struct {\n\tAPICalls []APICallLatency `json:\"apicalls\"`\n}\n\n\/\/ APICallLatency represents the latency data for a (resource, verb) tuple\ntype APICallLatency struct {\n\tLatency Histogram `json:\"latency\"`\n\tResource string `json:\"resource\"`\n\tVerb string `json:\"verb\"`\n}\n\n\/\/ Histogram is a map from bucket to latency (e.g. \"Perc90\" -> 23.5)\ntype Histogram map[string]float64\n\n\/\/ ResourceToHistogram is a map from resource names (e.g. \"pods\") to the relevant latency data\ntype ResourceToHistogram map[string][]APICallLatency\n\n\/\/ BuildLatencyData is a map from build number to latency data\ntype BuildLatencyData map[string]ResourceToHistogram\n\nfunc (buildLatency *BuildLatencyData) ServeHTTP(res http.ResponseWriter, req *http.Request) {\n\tdata, err := json.Marshal(buildLatency)\n\tif err != nil {\n\t\tres.Header().Set(\"Content-type\", \"text\/html\")\n\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\tres.Write([]byte(fmt.Sprintf(\"<h3>Internal Error<\/h3><p>%v\", err)))\n\t\treturn\n\t}\n\tres.Header().Set(\"Content-type\", \"application\/json\")\n\tres.WriteHeader(http.StatusOK)\n\tres.Write(data)\n}\n\nfunc getLatencyData(client *jenkins.JenkinsClient, job string) (BuildLatencyData, sets.String, sets.String, error) {\n\tbuildLatency := BuildLatencyData{}\n\tresources := sets.NewString()\n\tmethods := sets.NewString()\n\n\tqueue, err := client.GetJob(job)\n\tif err != nil {\n\t\treturn buildLatency, resources, methods, err\n\t}\n\n\tfor ix := range queue.Builds {\n\t\tbuild := queue.Builds[ix]\n\t\treader, err := client.GetConsoleLog(job, build.Number)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error getting logs: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tdefer reader.Close()\n\t\tscanner := bufio.NewScanner(reader)\n\t\tbuff := &bytes.Buffer{}\n\t\tinLatency := false\n\n\t\thist := ResourceToHistogram{}\n\t\tfound := false\n\t\ttestNameSeparator := \"[It] [Skipped] [Performance suite]\"\n\t\ttestName := \"\"\n\t\tfor scanner.Scan() {\n\t\t\tline := scanner.Text()\n\t\t\tif strings.Contains(line, testNameSeparator) {\n\t\t\t\ttestName = strings.Trim(strings.Split(line, testNameSeparator)[1], \" \")\n\t\t\t}\n\t\t\t\/\/ TODO: This is brittle, we should emit a tail delimiter too\n\t\t\tif strings.Contains(line, \"INFO\") || strings.Contains(line, \"STEP\") || strings.Contains(line, \"Failure\") {\n\t\t\t\tif inLatency {\n\t\t\t\t\tobj := LatencyData{}\n\t\t\t\t\tif err := json.Unmarshal(buff.Bytes(), &obj); err != nil {\n\t\t\t\t\t\tfmt.Printf(\"error parsing JSON in build %d: %v %s\\n\", build.Number, err, buff.String())\n\t\t\t\t\t\t\/\/ reset state and try again with more input\n\t\t\t\t\t\tinLatency = false\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif testName == \"should allow starting 30 pods per node\" {\n\t\t\t\t\t\tfor _, call := range obj.APICalls {\n\t\t\t\t\t\t\tlist := hist[call.Resource]\n\t\t\t\t\t\t\tlist = append(list, call)\n\t\t\t\t\t\t\thist[call.Resource] = list\n\t\t\t\t\t\t\tresources.Insert(call.Resource)\n\t\t\t\t\t\t\tmethods.Insert(call.Verb)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tbuff.Reset()\n\t\t\t\t}\n\t\t\t\tinLatency = false\n\t\t\t}\n\t\t\tif strings.Contains(line, \"API calls latencies\") {\n\t\t\t\tfound = true\n\t\t\t\tinLatency = true\n\t\t\t\tix = strings.Index(line, \"{\")\n\t\t\t\tline = line[ix:]\n\t\t\t}\n\t\t\tif inLatency {\n\t\t\t\tbuff.WriteString(line + \" \")\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tcontinue\n\t\t}\n\n\t\tbuildLatency[fmt.Sprintf(\"%d\", build.Number)] = hist\n\t}\n\treturn buildLatency, resources, methods, nil\n}\n\nfunc generateCSV(buildLatency BuildLatencyData, resources, methods sets.String, out io.Writer) error {\n\theader := []string{\"build\"}\n\tfor _, rsrc := range resources.List() {\n\t\theader = append(header, fmt.Sprintf(\"%s_50\", rsrc))\n\t\theader = append(header, fmt.Sprintf(\"%s_90\", rsrc))\n\t\theader = append(header, fmt.Sprintf(\"%s_99\", rsrc))\n\t}\n\tif _, err := fmt.Fprintln(out, strings.Join(header, \",\")); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, method := range methods.List() {\n\t\tif _, err := fmt.Fprintln(out, method); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor build, data := range buildLatency {\n\t\t\tline := []string{fmt.Sprintf(\"%d\", build)}\n\t\t\tfor _, rsrc := range resources.List() {\n\t\t\t\tpodData := data[rsrc]\n\t\t\t\tline = append(line, fmt.Sprintf(\"%g\", findMethod(method, \"Perc50\", podData)))\n\t\t\t\tline = append(line, fmt.Sprintf(\"%g\", findMethod(method, \"Perc90\", podData)))\n\t\t\t\tline = append(line, fmt.Sprintf(\"%g\", findMethod(method, \"Perc99\", podData)))\n\t\t\t}\n\t\t\tif _, err := fmt.Fprintln(out, strings.Join(line, \",\")); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc findMethod(method, item string, data []APICallLatency) float64 {\n\tfor _, datum := range data {\n\t\tif datum.Verb == method {\n\t\t\treturn datum.Latency[item]\n\t\t}\n\t}\n\treturn -1\n}\n\nvar (\n\twww = flag.Bool(\"www\", false, \"If true, start a web-server to server performance data\")\n\taddr = flag.String(\"address\", \":8080\", \"The address to serve web data on, only used if -www is true\")\n\twwwDir = flag.String(\"dir\", \"\", \"If non-empty, add a file server for this directory at the root of the web server\")\n\tjenkinsHost = flag.String(\"jenkins-host\", \"\", \"The URL for the jenkins server.\")\n\n\tpollDuration = 10 * time.Minute\n\terrorDelay = 10 * time.Second\n)\n\nfunc main() {\n\tflag.Parse()\n\tjob := \"kubernetes-e2e-gce-scalability\"\n\tclient := &jenkins.JenkinsClient{\n\t\tHost: *jenkinsHost,\n\t}\n\n\tif !*www {\n\t\tbuildLatency, resources, methods, err := getLatencyData(client, job)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failed to get data: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tgenerateCSV(buildLatency, resources, methods, os.Stdout)\n\t\treturn\n\t}\n\n\tbuildLatency := BuildLatencyData{}\n\tresources := sets.String{}\n\tmethods := sets.String{}\n\tvar err error\n\tgo func() {\n\t\tfor {\n\t\t\tbuildLatency, resources, methods, err = getLatencyData(client, job)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Error fetching data: %v\\n\", err)\n\t\t\t\ttime.Sleep(errorDelay)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttime.Sleep(pollDuration)\n\t\t}\n\t}()\n\n\thttp.Handle(\"\/api\", &buildLatency)\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(*wwwDir)))\n\thttp.ListenAndServe(*addr, nil)\n}\n<commit_msg>Update perfdash with new performance tag<commit_after>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\/\/ TODO: move this somewhere central\n\t\"k8s.io\/contrib\/mungegithub\/mungers\/jenkins\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/sets\"\n)\n\n\/\/ LatencyData represents the latency data for a set of RESTful API calls\ntype LatencyData struct {\n\tAPICalls []APICallLatency `json:\"apicalls\"`\n}\n\n\/\/ APICallLatency represents the latency data for a (resource, verb) tuple\ntype APICallLatency struct {\n\tLatency Histogram `json:\"latency\"`\n\tResource string `json:\"resource\"`\n\tVerb string `json:\"verb\"`\n}\n\n\/\/ Histogram is a map from bucket to latency (e.g. \"Perc90\" -> 23.5)\ntype Histogram map[string]float64\n\n\/\/ ResourceToHistogram is a map from resource names (e.g. \"pods\") to the relevant latency data\ntype ResourceToHistogram map[string][]APICallLatency\n\n\/\/ BuildLatencyData is a map from build number to latency data\ntype BuildLatencyData map[string]ResourceToHistogram\n\nfunc (buildLatency *BuildLatencyData) ServeHTTP(res http.ResponseWriter, req *http.Request) {\n\tdata, err := json.Marshal(buildLatency)\n\tif err != nil {\n\t\tres.Header().Set(\"Content-type\", \"text\/html\")\n\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\tres.Write([]byte(fmt.Sprintf(\"<h3>Internal Error<\/h3><p>%v\", err)))\n\t\treturn\n\t}\n\tres.Header().Set(\"Content-type\", \"application\/json\")\n\tres.WriteHeader(http.StatusOK)\n\tres.Write(data)\n}\n\nfunc getLatencyData(client *jenkins.JenkinsClient, job string) (BuildLatencyData, sets.String, sets.String, error) {\n\tbuildLatency := BuildLatencyData{}\n\tresources := sets.NewString()\n\tmethods := sets.NewString()\n\n\tqueue, err := client.GetJob(job)\n\tif err != nil {\n\t\treturn buildLatency, resources, methods, err\n\t}\n\n\tfor ix := range queue.Builds {\n\t\tbuild := queue.Builds[ix]\n\t\treader, err := client.GetConsoleLog(job, build.Number)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error getting logs: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tdefer reader.Close()\n\t\tscanner := bufio.NewScanner(reader)\n\t\tbuff := &bytes.Buffer{}\n\t\tinLatency := false\n\n\t\thist := ResourceToHistogram{}\n\t\tfound := false\n\t\ttestNameSeparator := \"[It] [Feature:Performance]\"\n\t\ttestName := \"\"\n\t\tfor scanner.Scan() {\n\t\t\tline := scanner.Text()\n\t\t\tif strings.Contains(line, testNameSeparator) {\n\t\t\t\ttestName = strings.Trim(strings.Split(line, testNameSeparator)[1], \" \")\n\t\t\t}\n\t\t\t\/\/ TODO: This is brittle, we should emit a tail delimiter too\n\t\t\tif strings.Contains(line, \"INFO\") || strings.Contains(line, \"STEP\") || strings.Contains(line, \"Failure\") {\n\t\t\t\tif inLatency {\n\t\t\t\t\tobj := LatencyData{}\n\t\t\t\t\tif err := json.Unmarshal(buff.Bytes(), &obj); err != nil {\n\t\t\t\t\t\tfmt.Printf(\"error parsing JSON in build %d: %v %s\\n\", build.Number, err, buff.String())\n\t\t\t\t\t\t\/\/ reset state and try again with more input\n\t\t\t\t\t\tinLatency = false\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif testName == \"should allow starting 30 pods per node\" {\n\t\t\t\t\t\tfor _, call := range obj.APICalls {\n\t\t\t\t\t\t\tlist := hist[call.Resource]\n\t\t\t\t\t\t\tlist = append(list, call)\n\t\t\t\t\t\t\thist[call.Resource] = list\n\t\t\t\t\t\t\tresources.Insert(call.Resource)\n\t\t\t\t\t\t\tmethods.Insert(call.Verb)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tbuff.Reset()\n\t\t\t\t}\n\t\t\t\tinLatency = false\n\t\t\t}\n\t\t\tif strings.Contains(line, \"API calls latencies\") {\n\t\t\t\tfound = true\n\t\t\t\tinLatency = true\n\t\t\t\tix = strings.Index(line, \"{\")\n\t\t\t\tline = line[ix:]\n\t\t\t}\n\t\t\tif inLatency {\n\t\t\t\tbuff.WriteString(line + \" \")\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tcontinue\n\t\t}\n\n\t\tbuildLatency[fmt.Sprintf(\"%d\", build.Number)] = hist\n\t}\n\treturn buildLatency, resources, methods, nil\n}\n\nfunc generateCSV(buildLatency BuildLatencyData, resources, methods sets.String, out io.Writer) error {\n\theader := []string{\"build\"}\n\tfor _, rsrc := range resources.List() {\n\t\theader = append(header, fmt.Sprintf(\"%s_50\", rsrc))\n\t\theader = append(header, fmt.Sprintf(\"%s_90\", rsrc))\n\t\theader = append(header, fmt.Sprintf(\"%s_99\", rsrc))\n\t}\n\tif _, err := fmt.Fprintln(out, strings.Join(header, \",\")); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, method := range methods.List() {\n\t\tif _, err := fmt.Fprintln(out, method); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor build, data := range buildLatency {\n\t\t\tline := []string{fmt.Sprintf(\"%d\", build)}\n\t\t\tfor _, rsrc := range resources.List() {\n\t\t\t\tpodData := data[rsrc]\n\t\t\t\tline = append(line, fmt.Sprintf(\"%g\", findMethod(method, \"Perc50\", podData)))\n\t\t\t\tline = append(line, fmt.Sprintf(\"%g\", findMethod(method, \"Perc90\", podData)))\n\t\t\t\tline = append(line, fmt.Sprintf(\"%g\", findMethod(method, \"Perc99\", podData)))\n\t\t\t}\n\t\t\tif _, err := fmt.Fprintln(out, strings.Join(line, \",\")); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc findMethod(method, item string, data []APICallLatency) float64 {\n\tfor _, datum := range data {\n\t\tif datum.Verb == method {\n\t\t\treturn datum.Latency[item]\n\t\t}\n\t}\n\treturn -1\n}\n\nvar (\n\twww = flag.Bool(\"www\", false, \"If true, start a web-server to server performance data\")\n\taddr = flag.String(\"address\", \":8080\", \"The address to serve web data on, only used if -www is true\")\n\twwwDir = flag.String(\"dir\", \"\", \"If non-empty, add a file server for this directory at the root of the web server\")\n\tjenkinsHost = flag.String(\"jenkins-host\", \"\", \"The URL for the jenkins server.\")\n\n\tpollDuration = 10 * time.Minute\n\terrorDelay = 10 * time.Second\n)\n\nfunc main() {\n\tflag.Parse()\n\tjob := \"kubernetes-e2e-gce-scalability\"\n\tclient := &jenkins.JenkinsClient{\n\t\tHost: *jenkinsHost,\n\t}\n\n\tif !*www {\n\t\tbuildLatency, resources, methods, err := getLatencyData(client, job)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failed to get data: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tgenerateCSV(buildLatency, resources, methods, os.Stdout)\n\t\treturn\n\t}\n\n\tbuildLatency := BuildLatencyData{}\n\tresources := sets.String{}\n\tmethods := sets.String{}\n\tvar err error\n\tgo func() {\n\t\tfor {\n\t\t\tbuildLatency, resources, methods, err = getLatencyData(client, job)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Error fetching data: %v\\n\", err)\n\t\t\t\ttime.Sleep(errorDelay)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttime.Sleep(pollDuration)\n\t\t}\n\t}()\n\n\thttp.Handle(\"\/api\", &buildLatency)\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(*wwwDir)))\n\thttp.ListenAndServe(*addr, nil)\n}\n<|endoftext|>"} {"text":"<commit_before>package market\n\nimport (\n\t\/\/ Stdlib\n\t\"encoding\/json\"\n\n\t\/\/ RPC\n\t\"github.com\/asuleymanov\/golos-go\/interfaces\"\n\t\"github.com\/asuleymanov\/golos-go\/internal\/rpc\"\n\n\t\/\/ Vendor\n\t\"github.com\/pkg\/errors\"\n)\n\nconst APIID = \"market_history_api\"\n\ntype API struct {\n\tid int\n\tcaller interfaces.Caller\n}\n\nfunc NewAPI(caller interfaces.Caller) (*API, error) {\n\tid, err := rpc.GetNumericAPIID(caller, APIID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &API{id, caller}, nil\n}\n\nfunc (api *API) call(method string, params, resp interface{}) error {\n\treturn api.caller.Call(\"call\", []interface{}{api.id, method, params}, resp)\n}\n\nfunc (api *API) GetOrderBookRaw(limit uint32) (*json.RawMessage, error) {\n\tvar resp json.RawMessage\n\tif limit > 1000 {\n\t\treturn nil, errors.New(\"golos-go: market_history_api: get_order_book -> limit must not exceed 1000\")\n\t}\n\tif err := api.caller.Call(\"get_order_book\", []interface{}{limit}, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}\n\nfunc (api *API) GetOrderBook(limit uint32) (*OrderBook, error) {\n\traw, err := api.GetOrderBookRaw(limit)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resp *OrderBook\n\tif err := json.Unmarshal([]byte(*raw), &resp); err != nil {\n\t\treturn nil, errors.Wrap(err, \"golos-go: market_history_api: failed to unmarshal get_order_book response\")\n\t}\n\treturn resp, nil\n}\n<commit_msg>Bug fix<commit_after>package market\n\nimport (\n\t\/\/ Stdlib\n\t\"encoding\/json\"\n\n\t\/\/ RPC\n\t\"github.com\/asuleymanov\/golos-go\/interfaces\"\n\t\"github.com\/asuleymanov\/golos-go\/internal\/rpc\"\n\n\t\/\/ Vendor\n\t\"github.com\/pkg\/errors\"\n)\n\nconst APIID = \"market_history_api\"\n\nvar EmptyParams = []string{}\n\ntype API struct {\n\tid int\n\tcaller interfaces.Caller\n}\n\nfunc NewAPI(caller interfaces.Caller) (*API, error) {\n\tid, err := rpc.GetNumericAPIID(caller, APIID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &API{id, caller}, nil\n}\n\nfunc (api *API) Raw(method string, params interface{}) (*json.RawMessage, error) {\n\tvar resp json.RawMessage\n\tif err := api.caller.Call(\"call\", []interface{}{api.id, method, params}, &resp); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"golos-go: %v: failed to call %v\\n\", APIID, method)\n\t}\n\treturn &resp, nil\n}\n\nfunc (api *API) GetTickerRaw() (*json.RawMessage, error) {\n\treturn api.Raw(\"get_ticker\", EmptyParams)\n}\n\nfunc (api *API) GetTicker() (*Ticker, error) {\n\traw, err := api.GetTickerRaw()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resp *Ticker\n\tif err := json.Unmarshal([]byte(*raw), &resp); err != nil {\n\t\treturn nil, errors.Wrap(err, \"golos-go: market_history_api: failed to unmarshal get_ticker response\")\n\t}\n\treturn resp, nil\n}\n\nfunc (api *API) GetVolumeRaw() (*json.RawMessage, error) {\n\treturn api.Raw(\"get_volume\", EmptyParams)\n}\n\nfunc (api *API) GetVolume() (*Volume, error) {\n\traw, err := api.GetVolumeRaw()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resp *Volume\n\tif err := json.Unmarshal([]byte(*raw), &resp); err != nil {\n\t\treturn nil, errors.Wrap(err, \"golos-go: market_history_api: failed to unmarshal get_volume response\")\n\t}\n\treturn resp, nil\n}\n\nfunc (api *API) GetOrderBookRaw(limit uint32) (*json.RawMessage, error) {\n\tif limit > 1000 {\n\t\treturn nil, errors.New(\"golos-go: market_history_api: get_order_book -> limit must not exceed 1000\")\n\t}\n\treturn api.Raw(\"get_order_book\", []interface{}{limit})\n}\n\nfunc (api *API) GetOrderBook(limit uint32) (*OrderBook, error) {\n\traw, err := api.GetOrderBookRaw(limit)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resp *OrderBook\n\tif err := json.Unmarshal([]byte(*raw), &resp); err != nil {\n\t\treturn nil, errors.Wrap(err, \"golos-go: market_history_api: failed to unmarshal get_order_book response\")\n\t}\n\treturn resp, nil\n}\n\nfunc (api *API) GetTradeHistoryRaw(start, end string, limit uint32) (*json.RawMessage, error) {\n\tif limit > 1000 {\n\t\treturn nil, errors.New(\"golos-go: market_history_api: get_order_book -> limit must not exceed 1000\")\n\t}\n\treturn api.Raw(\"get_trade_history\", []interface{}{start, end, limit})\n}\n\nfunc (api *API) GetTradeHistory(start, end string, limit uint32) ([]*Trades, error) {\n\traw, err := api.GetTradeHistoryRaw(start, end, limit)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resp []*Trades\n\tif err := json.Unmarshal([]byte(*raw), &resp); err != nil {\n\t\treturn nil, errors.Wrap(err, \"golos-go: market_history_api: failed to unmarshal get_trade_history response\")\n\t}\n\treturn resp, nil\n}\n\nfunc (api *API) GetRecentTradesRaw(limit uint32) (*json.RawMessage, error) {\n\tif limit > 1000 {\n\t\treturn nil, errors.New(\"golos-go: market_history_api: get_order_book -> limit must not exceed 1000\")\n\t}\n\treturn api.Raw(\"get_recent_trades\", []interface{}{limit})\n}\n\nfunc (api *API) GetRecentTrades(limit uint32) ([]*Trades, error) {\n\traw, err := api.GetRecentTradesRaw(limit)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resp []*Trades\n\tif err := json.Unmarshal([]byte(*raw), &resp); err != nil {\n\t\treturn nil, errors.Wrap(err, \"golos-go: market_history_api: failed to unmarshal get_recent_trades response\")\n\t}\n\treturn resp, nil\n}\n\nfunc (api *API) GetMarketHistoryRaw(b_sec uint32, start, end string) (*json.RawMessage, error) {\n\tif limit > 1000 {\n\t\treturn nil, errors.New(\"golos-go: market_history_api: get_order_book -> limit must not exceed 1000\")\n\t}\n\treturn api.Raw(\"get_market_history\", []interface{}{limit})\n}\n\nfunc (api *API) GetMarketHistory(limit uint32) ([]*MarketHistory, error) {\n\traw, err := api.GetMarketHistoryRaw(limit)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resp []*MarketHistory\n\tif err := json.Unmarshal([]byte(*raw), &resp); err != nil {\n\t\treturn nil, errors.Wrap(err, \"golos-go: market_history_api: failed to unmarshal get_market_history response\")\n\t}\n\treturn resp, nil\n}\n\nfunc (api *API) GetMarketHistoryBucketsRaw() (*json.RawMessage, error) {\n\treturn api.Raw(\"get_market_history_buckets\", EmptyParams)\n}\n\nfunc (api *API) GetMarketHistoryBuckets() ([]uint32, error) {\n\traw, err := api.GetMarketHistoryBucketsRaw()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resp []uint32\n\tif err := json.Unmarshal([]byte(*raw), &resp); err != nil {\n\t\treturn nil, errors.Wrap(err, \"golos-go: market_history_api: failed to unmarshal get_market_history_buckets response\")\n\t}\n\treturn resp, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"code.google.com\/p\/go.crypto\/ssh\"\n\t\"encoding\/json\"\n\t\"github.com\/buth\/stocker\/auth\"\n\t\"github.com\/buth\/stocker\/backend\"\n\t\"github.com\/buth\/stocker\/crypto\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n)\n\nvar serverConfig struct {\n\tSecretFilepath, PrivateFilepath, Backend, BackendNamespace, BackendProtocol, BackendAddress, Group, Address, ReadersURL, WritersURL string\n}\n\nvar serverClient *http.Client\n\nvar Server = &Command{\n\tUsageLine: \"server [options]\",\n\tShort: \"start a stocker server\",\n}\n\nfunc init() {\n\tServer.Run = serverRun\n\tServer.Flag.StringVar(&serverConfig.Address, \"a\", \":2022\", \"address to listen on\")\n\tServer.Flag.StringVar(&serverConfig.Backend, \"b\", \"redis\", \"backend to use\")\n\tServer.Flag.StringVar(&serverConfig.BackendAddress, \"h\", \":6379\", \"backend address\")\n\tServer.Flag.StringVar(&serverConfig.BackendNamespace, \"n\", \"stocker\", \"backend namespace\")\n\tServer.Flag.StringVar(&serverConfig.BackendProtocol, \"t\", \"tcp\", \"backend connection protocol\")\n\tServer.Flag.StringVar(&serverConfig.PrivateFilepath, \"i\", \"\/etc\/stocker\/id_rsa\", \"path to an ssh private key\")\n\tServer.Flag.StringVar(&serverConfig.SecretFilepath, \"k\", \"\/etc\/stocker\/key\", \"path to encryption key\")\n\tServer.Flag.StringVar(&serverConfig.ReadersURL, \"r\", \"https:\/\/s3.amazonaws.com\/newsdev-ops\/keys.json\", \"retrieve reader public keys from this URL\")\n\tServer.Flag.StringVar(&serverConfig.WritersURL, \"w\", \"https:\/\/s3.amazonaws.com\/newsdev-ops\/keys.json\", \"retrieve writer public keys from this URL\")\n\n\tserverClient = &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tMaxIdleConnsPerHost: 1,\n\t\t\tResponseHeaderTimeout: time.Minute,\n\t\t},\n\t}\n}\n\nfunc serverFetchPublicKeys(url string) ([]ssh.PublicKey, error) {\n\n\t\/\/ Fetch the public keys.\n\tresponse, err := serverClient.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Read out the entire body.\n\tjsonResponse, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Build a raw keys object that reflects the expected structure of the JSON.\n\tvar rawKeys []struct {\n\t\tKey string\n\t}\n\n\t\/\/ Try to parse the body of the response as JSON.\n\tif err := json.Unmarshal(jsonResponse, &rawKeys); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Build a new authorizer and iterate through the raw keys, parsing them\n\t\/\/ and then adding them.\n\tpublicKeys := make([]ssh.PublicKey, 0, len(rawKeys))\n\tfor _, rawKey := range rawKeys {\n\n\t\t\/\/ We're only interested in the key itself and whether or not there was an error.\n\t\tpublicKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(rawKey.Key))\n\t\tif err != nil {\n\t\t\treturn publicKeys, err\n\t\t}\n\n\t\t\/\/ Add the key to the list.\n\t\tpublicKeys = publicKeys[:len(publicKeys)+1]\n\t\tpublicKeys[len(publicKeys)-1] = publicKey\n\t}\n\n\treturn publicKeys, nil\n}\n\nfunc serverRun(cmd *Command, args []string) {\n\n\tc, err := crypto.NewCrypterFromFile(serverConfig.SecretFilepath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tb, err := backend.NewBackend(serverConfig.Backend, serverConfig.BackendNamespace, serverConfig.BackendProtocol, serverConfig.BackendAddress)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tprivateBytes, err := ioutil.ReadFile(serverConfig.PrivateFilepath)\n\tif err != nil {\n\t\tlog.Fatal(\"failed to load private key\")\n\t}\n\n\tprivate, err := ssh.ParsePrivateKey(privateBytes)\n\tif err != nil {\n\t\tlog.Fatal(\"failed to parse private key\")\n\t}\n\n\t\/\/ Create a new server using the specified Backend and Crypter.\n\tserver := auth.NewServer(b, c, private)\n\n\treaders, err := serverFetchPublicKeys(serverConfig.ReadersURL)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\twriters, err := serverFetchPublicKeys(serverConfig.WritersURL)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, reader := range readers {\n\t\tserver.AddReadKey(reader)\n\t}\n\n\tfor _, writer := range writers {\n\t\tserver.AddWriteKey(writer)\n\t}\n\n\t\/\/ Start the server.\n\tlog.Fatal(server.ListenAndServe(serverConfig.Address))\n}\n<commit_msg>no default key locations<commit_after>package cmd\n\nimport (\n\t\"code.google.com\/p\/go.crypto\/ssh\"\n\t\"encoding\/json\"\n\t\"github.com\/buth\/stocker\/auth\"\n\t\"github.com\/buth\/stocker\/backend\"\n\t\"github.com\/buth\/stocker\/crypto\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n)\n\nvar serverConfig struct {\n\tSecretFilepath, PrivateFilepath, Backend, BackendNamespace, BackendProtocol, BackendAddress, Group, Address, ReadersURL, WritersURL string\n}\n\nvar serverClient *http.Client\n\nvar Server = &Command{\n\tUsageLine: \"server [options]\",\n\tShort: \"start a stocker server\",\n}\n\nfunc init() {\n\tServer.Run = serverRun\n\tServer.Flag.StringVar(&serverConfig.Address, \"a\", \":2022\", \"address to listen on\")\n\tServer.Flag.StringVar(&serverConfig.Backend, \"b\", \"redis\", \"backend to use\")\n\tServer.Flag.StringVar(&serverConfig.BackendAddress, \"h\", \":6379\", \"backend address\")\n\tServer.Flag.StringVar(&serverConfig.BackendNamespace, \"n\", \"stocker\", \"backend namespace\")\n\tServer.Flag.StringVar(&serverConfig.BackendProtocol, \"t\", \"tcp\", \"backend connection protocol\")\n\tServer.Flag.StringVar(&serverConfig.PrivateFilepath, \"i\", \"\/etc\/stocker\/id_rsa\", \"path to an ssh private key\")\n\tServer.Flag.StringVar(&serverConfig.SecretFilepath, \"k\", \"\/etc\/stocker\/key\", \"path to encryption key\")\n\tServer.Flag.StringVar(&serverConfig.ReadersURL, \"r\", \"\", \"retrieve reader public keys from this URL\")\n\tServer.Flag.StringVar(&serverConfig.WritersURL, \"w\", \"\", \"retrieve writer public keys from this URL\")\n\n\tserverClient = &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tMaxIdleConnsPerHost: 1,\n\t\t\tResponseHeaderTimeout: time.Minute,\n\t\t},\n\t}\n}\n\nfunc serverFetchPublicKeys(url string) ([]ssh.PublicKey, error) {\n\n\t\/\/ Fetch the public keys.\n\tresponse, err := serverClient.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Read out the entire body.\n\tjsonResponse, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Build a raw keys object that reflects the expected structure of the JSON.\n\tvar rawKeys []struct {\n\t\tKey string\n\t}\n\n\t\/\/ Try to parse the body of the response as JSON.\n\tif err := json.Unmarshal(jsonResponse, &rawKeys); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Build a new authorizer and iterate through the raw keys, parsing them\n\t\/\/ and then adding them.\n\tpublicKeys := make([]ssh.PublicKey, 0, len(rawKeys))\n\tfor _, rawKey := range rawKeys {\n\n\t\t\/\/ We're only interested in the key itself and whether or not there was an error.\n\t\tpublicKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(rawKey.Key))\n\t\tif err != nil {\n\t\t\treturn publicKeys, err\n\t\t}\n\n\t\t\/\/ Add the key to the list.\n\t\tpublicKeys = publicKeys[:len(publicKeys)+1]\n\t\tpublicKeys[len(publicKeys)-1] = publicKey\n\t}\n\n\treturn publicKeys, nil\n}\n\nfunc serverRun(cmd *Command, args []string) {\n\n\tc, err := crypto.NewCrypterFromFile(serverConfig.SecretFilepath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tb, err := backend.NewBackend(serverConfig.Backend, serverConfig.BackendNamespace, serverConfig.BackendProtocol, serverConfig.BackendAddress)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tprivateBytes, err := ioutil.ReadFile(serverConfig.PrivateFilepath)\n\tif err != nil {\n\t\tlog.Fatal(\"failed to load private key\")\n\t}\n\n\tprivate, err := ssh.ParsePrivateKey(privateBytes)\n\tif err != nil {\n\t\tlog.Fatal(\"failed to parse private key\")\n\t}\n\n\t\/\/ Create a new server using the specified Backend and Crypter.\n\tserver := auth.NewServer(b, c, private)\n\n\t\/\/ Check if a URL was provided to pull reader keys from.\n\tif serverConfig.ReadersURL != \"\" {\n\n\t\t\/\/ Fetch the reader keys.\n\t\treaders, err := serverFetchPublicKeys(serverConfig.ReadersURL)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\t\/\/ Add the reader keys to the server.\n\t\tfor _, reader := range readers {\n\t\t\tserver.AddReadKey(reader)\n\t\t}\n\t}\n\n\t\/\/ Check if a URL was provided to pull writer keys from.\n\tif serverConfig.WritersURL != \"\" {\n\n\t\t\/\/ Fetch the writer keys.\n\t\twriters, err := serverFetchPublicKeys(serverConfig.WritersURL)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\t\/\/ Add the writer keys to the server.\n\t\tfor _, writer := range writers {\n\t\t\tserver.AddWriteKey(writer)\n\t\t}\n\t}\n\n\t\/\/ Start the server.\n\tlog.Fatal(server.ListenAndServe(serverConfig.Address))\n}\n<|endoftext|>"} {"text":"<commit_before>package i2cm\n\nimport (\n\t\"io\"\n\t\"testing\"\n)\n\ntype t8x8item struct {\n\taddr Addr\n\tregaddr uint8\n\twb, rb []byte\n\tnw, nr int\n\terr error\n}\n\n\/\/ page verifying transactor for 24Cxx style EEPROMs\n\/\/ it's always based at address (0xA0 >> 1).\n\/\/ rollover inside a page is not supported as this\n\/\/ behavior is not exploited by the EEPROM drivers\n\/\/ in this package.\n\/\/ also logs.\ntype PVT24 struct {\n\tt *testing.T\n\tmem []byte\n\tpagesize uint\n\tlog []t8x8item\n}\n\nfunc (p *PVT24) Transact8x8(addr Addr, regaddr uint8, wb, rb []byte) (int, int, error) {\n\t\/\/ read and write logic is intentionally kept simple and different\n\t\/\/ in style from the eeprom routines. maybe i will make different \n\t\/\/ mistakes both way around :)\n\n\tif len(wb) > 0 && len(rb) > 0 {\n\t\tp.t.Errorf(\"trying to write to and read from an EEPROM. this is not allowed.\\n\")\n\t}\n\n\tmemaddr := ((uint(addr.GetBaseAddr()) & 0x07) << 8) + uint(regaddr)\n\tstartpagebase := memaddr & ^(p.pagesize - 1)\n\n\tfor _, b := range wb {\n\t\tnewpagebase := memaddr & ^(p.pagesize - 1)\n\t\tif newpagebase != startpagebase {\n\t\t\tp.t.Errorf(\"EEPROM transaction started in page %#04x, continue to page %#04x\", startpagebase, newpagebase)\n\t\t}\n\n\t\twaddr := (memaddr & (p.pagesize - 1)) | startpagebase\n\n\t\tp.mem[waddr] = b\n\n\t\tmemaddr++\n\t}\n\n\tfor i := range rb {\n\t\trb[i] = p.mem[memaddr]\n\t\tmemaddr++\n\t}\n\n\tp.log = append(p.log, t8x8item{addr, regaddr, wb, rb, len(wb), len(rb), nil})\n\n\treturn len(wb), len(rb), nil\n}\n\n\/\/ has dummy methods for typing reasons\n\nfunc (p *PVT24) Start() error { panic(\"not implemented\") }\nfunc (p *PVT24) Stop() error { panic(\"not implemented\") }\nfunc (p *PVT24) WriteByte(b byte) error { panic(\"not implemented\") }\nfunc (p *PVT24) ReadByte(ack bool) (byte, error) { panic(\"not implemented\") }\n\nfunc newPVT24(conf EEPROM24Config, t *testing.T) *PVT24 {\n\tvar p PVT24\n\tp.mem = make([]byte, conf.Size)\n\tp.pagesize = conf.PageSize\n\tp.t = t\n\n\tfor i := range p.mem {\n\t\tp.mem[i] = 0x24 ^ uint8(i)\n\t}\n\n\treturn &p\n}\n\nfunc TestEEPROM24EOF(t *testing.T) {\n\tconf := Conf_24C02\n\tpvt := newPVT24(conf, t)\n\tee, err := NewEEPROM24(pvt, Addr7(0xa0>>1), conf)\n\tif err != nil {\n\t\tt.Fatalf(\"NewEEPROM24 should not fail in this context. it did with % T: %#v\\n\", err, err)\n\t}\n\n\t_ee, ok := ee.(*ee24)\n\tif !ok {\n\t\tt.Fatalf(\"expected to test an ee24, got %T: %#v\\n\", ee, ee)\n\t}\n\n\t_ee.p = conf.Size - 3\n\t{\n\t\trb := make([]byte, 16)\n\t\tn, err := ee.Read(rb)\n\t\tif n != 3 {\n\t\t\tt.Fatalf(\"expected to read 3 bytes, got %d\\n\", n)\n\t\t}\n\n\t\tif err != io.EOF {\n\t\t\tif err == nil {\n\t\t\t\tn, err := ee.Read(rb)\n\n\t\t\t\tif n != 0 {\n\t\t\t\t\tt.Fatalf(\"expected to read 0 bytes with the second shot, got %d\\n\", n)\n\t\t\t\t}\n\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tt.Fatalf(\"did not get back io.EOF even though EEPROM24 was asked twice, got: %T: %#v\\n\", err, err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tt.Fatalf(\"expected EOF, got %T: %#v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\t_ee.p = conf.Size - 5\n\t{\n\t\twb := make([]byte, 16)\n\t\tn, err := ee.Write(wb)\n\t\tif n != 5 {\n\t\t\tt.Fatalf(\"expected to write 5 bytes, wrote %d\\n\", n)\n\t\t}\n\n\t\tif err != io.EOF {\n\t\t\tif err == nil {\n\t\t\t\tn, err := ee.Write(wb)\n\n\t\t\t\tif n != 0 {\n\t\t\t\t\tt.Fatalf(\"expected to read 0 bytes with the second shot, got %d\\n\", n)\n\t\t\t\t}\n\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tt.Fatalf(\"did not get back io.EOF even though EEPROM24 was asked twice, got: %T: %#v\\n\", err, err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tt.Fatalf(\"expected EOF, got %T: %#v\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestEEPROM24Conf(t *testing.T) {\n\tdefconf := EEPROM24Config{1, 1, 0}\n\tdevaddr := Addr7(0xA0 >> 1)\n\ttr := newPVT24(defconf, t)\n\n\t\/\/ one valid configuration as a counter check\n\t{\n\t\tif _, err := NewEEPROM24(tr, devaddr, defconf); err != nil {\n\t\t\tt.Errorf(\"NewEEPROM24 failed on valid configuration %#v\\n\", err)\n\t\t}\n\t}\n\n\t\/\/ pagesize not power of 2\n\t{\n\t\tconf := EEPROM24Config{2048, 13, 0}\n\t\tif _, err := NewEEPROM24(tr, devaddr, conf); err == nil {\n\t\t\tt.Errorf(\"NewEEPROM24 did not fail on invalid configuration %#v\", conf)\n\t\t}\n\t}\n\n\t\/\/ size not power of 2\n\t{\n\t\tconf := EEPROM24Config{100, 16, 0}\n\t\tif _, err := NewEEPROM24(tr, devaddr, conf); err == nil {\n\t\t\tt.Errorf(\"NewEEPROM24 did not fail on invalid configuration %#v\", conf)\n\t\t}\n\t}\n\n\t\/\/ size and page size not power of 2\n\t{\n\t\tconf := EEPROM24Config{100, 13, 0}\n\t\tif _, err := NewEEPROM24(tr, devaddr, conf); err == nil {\n\t\t\tt.Errorf(\"NewEEPROM24 did not fail on invalid configuration %#v\", conf)\n\t\t}\n\t}\n}\n\n\/\/ input\/output testing for Read and Write. as this test employs pvt24,\n\/\/ the test will fail on transaction which ignore page size and the offset\n\/\/ in the page.\nfunc TestEEPROM24InOut(t *testing.T) {\n\tcases := []struct {\n\t\tconf EEPROM24Config\n\t\toffs uint \/\/ offset to seek to at the start\n\t\tread bool\n\t\tbuf []byte \/\/ with read buf is just used to indicate the size, data is verified with the pvt24 mem pattern\n\t\tnexp int\n\t\terrexp error\n\t}{{EEPROM24Config{1024, 8, 0}, 6, true, []byte{0x22, 0x23, 0x2c, 0x2d, 0x2e, 0x2f}, 6, nil},\n\t\t{EEPROM24Config{128, 8, 0}, 123, true, []byte{0x5f, 0x58, 0x59, 0x5a, 0x5b, 0x00, 0x00, 0x00, 0x00}, 5, nil}, \/\/ double shot EOF returns err==nil on first call\n\t\t{EEPROM24Config{2048, 4, 0}, 9, false, []byte{0x0fe}, 1, nil}, \/\/ single byte write\n\t\t{EEPROM24Config{2048, 4, 0}, 2040, false, []byte{0xfc, 0xfd, 0xfe, 0xff}, 4, nil}, \/\/ full page\n\t\t{EEPROM24Config{2048, 4, 0}, 513, false, []byte{0x01, 0x02, 0x03, 0x04}, 4, nil}, \/\/ 1 byte in next page\n\t\t{EEPROM24Config{512, 4, 0}, 239, false, []byte{1, 2, 3, 4, 5, 6}, 6, nil}, \/\/ 1 byte partial, 4 bytes full, 1 byte partial\n\t\t{EEPROM24Config{512, 8, 0}, 254, false, []byte{1, 2, 3}, 3, nil}, \/\/ span i2c device boundary\n\t\t{EEPROM24Config{1024, 16, 0}, 1022, false, []byte{1, 2, 3, 4}, 2, io.EOF}, \/\/ test EOF. write employs a single shot EOF strategy\n\t}\n\n\tfor i, c := range cases {\n\t\tdevaddr := Addr7(0xA0 >> 1)\n\t\ttr := newPVT24(c.conf, t)\n\n\t\tee, err := NewEEPROM24(tr, devaddr, c.conf)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"NewEEPROM failed unexpectedly on configuration %T: %#v\", c.conf, c.conf)\n\t\t\tcontinue\n\t\t}\n\n\t\t_ee := ee.(*ee24)\n\t\t_ee.p = c.offs\n\n\t\tvar n int\n\t\tvar rb []byte\n\n\t\tif c.read {\n\t\t\trb = make([]byte, len(c.buf))\n\t\t\tn, err = ee.Read(rb)\n\n\t\t\tif n != c.nexp {\n\t\t\t\tt.Errorf(\"case %d: expected ee24 to read %d bytes, it read %d\\n\", i, c.nexp, n)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err != c.errexp {\n\t\t\t\tt.Errorf(\"case %d: expected ee24.Read to return error %#v, it returned %T: %#v\", i, c.errexp, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif n > len(c.buf) {\n\t\t\t\tt.Errorf(\"case %d: ee.Read read %d bytes, even though the slice only contains %d bytes\", i, n, len(c.buf))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\texpp := c.offs + uint(c.nexp)\n\t\t\tif _ee.p != expp {\n\t\t\t\tt.Errorf(\"case %d: expected file pointer to be at %d at the end of ee.Read, it is at %d\", i, expp, _ee.p)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif string(rb[0:n]) != string(c.buf[0:c.nexp]) {\n\t\t\t\tt.Errorf(\"case %d: ee24.Read is expected to read bytes % x, it read % x\", i, c.buf[0:c.nexp], rb[0:n])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\tn, err = ee.Write(c.buf)\n\n\t\t\tif n != c.nexp {\n\t\t\t\tt.Errorf(\"case %d: expected to write %d bytes, ee24 wrote %d\", i, n, c.nexp)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err != c.errexp {\n\t\t\t\tt.Error(\"case %d: expected error %#v, got %T: #%v\", i, c.errexp, err, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\texpp := c.offs + uint(c.nexp)\n\t\t\tif _ee.p != expp {\n\t\t\t\tt.Errorf(\"case %d: expected file pointer to be at %d at the end of ee.Write, it its at %d\", i, expp, _ee.p)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\twmem := tr.mem[c.offs : c.offs+uint(c.nexp)]\n\t\t\twbuf := c.buf[:c.nexp]\n\t\t\tif string(wmem) != string(wbuf) {\n\t\t\t\tt.Errorf(\"case %d: expected ee.Write to have written % x, it wrote % x\", i, wbuf, wmem)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>more write EOF tests<commit_after>package i2cm\n\nimport (\n\t\"io\"\n\t\"testing\"\n)\n\ntype t8x8item struct {\n\taddr Addr\n\tregaddr uint8\n\twb, rb []byte\n\tnw, nr int\n\terr error\n}\n\n\/\/ page verifying transactor for 24Cxx style EEPROMs\n\/\/ it's always based at address (0xA0 >> 1).\n\/\/ rollover inside a page is not supported as this\n\/\/ behavior is not exploited by the EEPROM drivers\n\/\/ in this package.\n\/\/ also logs.\ntype PVT24 struct {\n\tt *testing.T\n\tmem []byte\n\tpagesize uint\n\tlog []t8x8item\n}\n\nfunc (p *PVT24) Transact8x8(addr Addr, regaddr uint8, wb, rb []byte) (int, int, error) {\n\t\/\/ read and write logic is intentionally kept simple and different\n\t\/\/ in style from the eeprom routines. maybe i will make different \n\t\/\/ mistakes both way around :)\n\n\tif len(wb) > 0 && len(rb) > 0 {\n\t\tp.t.Errorf(\"trying to write to and read from an EEPROM. this is not allowed.\\n\")\n\t}\n\n\tmemaddr := ((uint(addr.GetBaseAddr()) & 0x07) << 8) + uint(regaddr)\n\tstartpagebase := memaddr & ^(p.pagesize - 1)\n\n\tfor _, b := range wb {\n\t\tnewpagebase := memaddr & ^(p.pagesize - 1)\n\t\tif newpagebase != startpagebase {\n\t\t\tp.t.Errorf(\"EEPROM transaction started in page %#04x, continue to page %#04x\", startpagebase, newpagebase)\n\t\t}\n\n\t\twaddr := (memaddr & (p.pagesize - 1)) | startpagebase\n\n\t\tp.mem[waddr] = b\n\n\t\tmemaddr++\n\t}\n\n\tfor i := range rb {\n\t\trb[i] = p.mem[memaddr]\n\t\tmemaddr++\n\t}\n\n\tp.log = append(p.log, t8x8item{addr, regaddr, wb, rb, len(wb), len(rb), nil})\n\n\treturn len(wb), len(rb), nil\n}\n\n\/\/ has dummy methods for typing reasons\n\nfunc (p *PVT24) Start() error { panic(\"not implemented\") }\nfunc (p *PVT24) Stop() error { panic(\"not implemented\") }\nfunc (p *PVT24) WriteByte(b byte) error { panic(\"not implemented\") }\nfunc (p *PVT24) ReadByte(ack bool) (byte, error) { panic(\"not implemented\") }\n\nfunc newPVT24(conf EEPROM24Config, t *testing.T) *PVT24 {\n\tvar p PVT24\n\tp.mem = make([]byte, conf.Size)\n\tp.pagesize = conf.PageSize\n\tp.t = t\n\n\tfor i := range p.mem {\n\t\tp.mem[i] = 0x24 ^ uint8(i)\n\t}\n\n\treturn &p\n}\n\nfunc TestEEPROM24EOF(t *testing.T) {\n\tconf := Conf_24C02\n\tpvt := newPVT24(conf, t)\n\tee, err := NewEEPROM24(pvt, Addr7(0xa0>>1), conf)\n\tif err != nil {\n\t\tt.Fatalf(\"NewEEPROM24 should not fail in this context. it did with % T: %#v\\n\", err, err)\n\t}\n\n\t_ee, ok := ee.(*ee24)\n\tif !ok {\n\t\tt.Fatalf(\"expected to test an ee24, got %T: %#v\\n\", ee, ee)\n\t}\n\n\t_ee.p = conf.Size - 3\n\t{\n\t\trb := make([]byte, 16)\n\t\tn, err := ee.Read(rb)\n\t\tif n != 3 {\n\t\t\tt.Fatalf(\"expected to read 3 bytes, got %d\\n\", n)\n\t\t}\n\n\t\tif err != io.EOF {\n\t\t\tif err == nil {\n\t\t\t\tn, err := ee.Read(rb)\n\n\t\t\t\tif n != 0 {\n\t\t\t\t\tt.Fatalf(\"expected to read 0 bytes with the second shot, got %d\\n\", n)\n\t\t\t\t}\n\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tt.Fatalf(\"did not get back io.EOF even though EEPROM24 was asked twice, got: %T: %#v\\n\", err, err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tt.Fatalf(\"expected EOF, got %T: %#v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\t_ee.p = conf.Size - 5\n\t{\n\t\twb := make([]byte, 16)\n\t\tn, err := ee.Write(wb)\n\t\tif n != 5 {\n\t\t\tt.Fatalf(\"expected to write 5 bytes, wrote %d\\n\", n)\n\t\t}\n\n\t\tif err != io.EOF {\n\t\t\tt.Fatalf(\"ee24.Write past EOF: expected EOF, got %T: %#v\", err)\n\t\t}\n\n\t\tif _ee.p != conf.Size {\n\t\t\tt.Fatalf(\"ee24 file pointer at EOF should be at %#04x, however it is at %#04x\", conf.Size, _ee.p)\n\t\t}\n\n\t\tn, err = ee.Write(wb)\n\n\t\tif n != 0 {\n\t\t\tt.Fatalf(\"expected to read 0 bytes with the second shot, got %d\\n\", n)\n\t\t}\n\n\t\tif err != io.EOF {\n\t\t\tt.Fatalf(\"ee24.Write at EOF: expected EOF, got: %T: %#v\\n\", err, err)\n\t\t}\n\n\t\tif _ee.p != conf.Size {\n\t\t\tt.Fatalf(\"ee24 file pointer at EOF should be at %#04x, however it is at %#04x\", conf.Size, _ee.p)\n\t\t}\n\t}\n}\n\nfunc TestEEPROM24Conf(t *testing.T) {\n\tdefconf := EEPROM24Config{1, 1, 0}\n\tdevaddr := Addr7(0xA0 >> 1)\n\ttr := newPVT24(defconf, t)\n\n\t\/\/ one valid configuration as a counter check\n\t{\n\t\tif _, err := NewEEPROM24(tr, devaddr, defconf); err != nil {\n\t\t\tt.Errorf(\"NewEEPROM24 failed on valid configuration %#v\\n\", err)\n\t\t}\n\t}\n\n\t\/\/ pagesize not power of 2\n\t{\n\t\tconf := EEPROM24Config{2048, 13, 0}\n\t\tif _, err := NewEEPROM24(tr, devaddr, conf); err == nil {\n\t\t\tt.Errorf(\"NewEEPROM24 did not fail on invalid configuration %#v\", conf)\n\t\t}\n\t}\n\n\t\/\/ size not power of 2\n\t{\n\t\tconf := EEPROM24Config{100, 16, 0}\n\t\tif _, err := NewEEPROM24(tr, devaddr, conf); err == nil {\n\t\t\tt.Errorf(\"NewEEPROM24 did not fail on invalid configuration %#v\", conf)\n\t\t}\n\t}\n\n\t\/\/ size and page size not power of 2\n\t{\n\t\tconf := EEPROM24Config{100, 13, 0}\n\t\tif _, err := NewEEPROM24(tr, devaddr, conf); err == nil {\n\t\t\tt.Errorf(\"NewEEPROM24 did not fail on invalid configuration %#v\", conf)\n\t\t}\n\t}\n}\n\n\/\/ input\/output testing for Read and Write. as this test employs pvt24,\n\/\/ the test will fail on transaction which ignore page size and the offset\n\/\/ in the page.\nfunc TestEEPROM24InOut(t *testing.T) {\n\tcases := []struct {\n\t\tconf EEPROM24Config\n\t\toffs uint \/\/ offset to seek to at the start\n\t\tread bool\n\t\tbuf []byte \/\/ with read buf is just used to indicate the size, data is verified with the pvt24 mem pattern\n\t\tnexp int\n\t\terrexp error\n\t}{{EEPROM24Config{1024, 8, 0}, 6, true, []byte{0x22, 0x23, 0x2c, 0x2d, 0x2e, 0x2f}, 6, nil},\n\t\t{EEPROM24Config{128, 8, 0}, 123, true, []byte{0x5f, 0x58, 0x59, 0x5a, 0x5b, 0x00, 0x00, 0x00, 0x00}, 5, nil}, \/\/ double shot EOF returns err==nil on first call\n\t\t{EEPROM24Config{2048, 4, 0}, 9, false, []byte{0x0fe}, 1, nil}, \/\/ single byte write\n\t\t{EEPROM24Config{2048, 4, 0}, 2040, false, []byte{0xfc, 0xfd, 0xfe, 0xff}, 4, nil}, \/\/ full page\n\t\t{EEPROM24Config{2048, 4, 0}, 513, false, []byte{0x01, 0x02, 0x03, 0x04}, 4, nil}, \/\/ 1 byte in next page\n\t\t{EEPROM24Config{512, 4, 0}, 239, false, []byte{1, 2, 3, 4, 5, 6}, 6, nil}, \/\/ 1 byte partial, 4 bytes full, 1 byte partial\n\t\t{EEPROM24Config{512, 8, 0}, 254, false, []byte{1, 2, 3}, 3, nil}, \/\/ span i2c device boundary\n\t\t{EEPROM24Config{1024, 16, 0}, 1022, false, []byte{1, 2, 3, 4}, 2, io.EOF}, \/\/ test EOF. write employs a single shot EOF strategy\n\t}\n\n\tfor i, c := range cases {\n\t\tdevaddr := Addr7(0xA0 >> 1)\n\t\ttr := newPVT24(c.conf, t)\n\n\t\tee, err := NewEEPROM24(tr, devaddr, c.conf)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"NewEEPROM failed unexpectedly on configuration %T: %#v\", c.conf, c.conf)\n\t\t\tcontinue\n\t\t}\n\n\t\t_ee := ee.(*ee24)\n\t\t_ee.p = c.offs\n\n\t\tvar n int\n\t\tvar rb []byte\n\n\t\tif c.read {\n\t\t\trb = make([]byte, len(c.buf))\n\t\t\tn, err = ee.Read(rb)\n\n\t\t\tif n != c.nexp {\n\t\t\t\tt.Errorf(\"case %d: expected ee24 to read %d bytes, it read %d\\n\", i, c.nexp, n)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err != c.errexp {\n\t\t\t\tt.Errorf(\"case %d: expected ee24.Read to return error %#v, it returned %T: %#v\", i, c.errexp, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif n > len(c.buf) {\n\t\t\t\tt.Errorf(\"case %d: ee.Read read %d bytes, even though the slice only contains %d bytes\", i, n, len(c.buf))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\texpp := c.offs + uint(c.nexp)\n\t\t\tif _ee.p != expp {\n\t\t\t\tt.Errorf(\"case %d: expected file pointer to be at %d at the end of ee.Read, it is at %d\", i, expp, _ee.p)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif string(rb[0:n]) != string(c.buf[0:c.nexp]) {\n\t\t\t\tt.Errorf(\"case %d: ee24.Read is expected to read bytes % x, it read % x\", i, c.buf[0:c.nexp], rb[0:n])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\tn, err = ee.Write(c.buf)\n\n\t\t\tif n != c.nexp {\n\t\t\t\tt.Errorf(\"case %d: expected to write %d bytes, ee24 wrote %d\", i, n, c.nexp)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err != c.errexp {\n\t\t\t\tt.Error(\"case %d: expected error %#v, got %T: #%v\", i, c.errexp, err, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\texpp := c.offs + uint(c.nexp)\n\t\t\tif _ee.p != expp {\n\t\t\t\tt.Errorf(\"case %d: expected file pointer to be at %d at the end of ee.Write, it its at %d\", i, expp, _ee.p)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\twmem := tr.mem[c.offs : c.offs+uint(c.nexp)]\n\t\t\twbuf := c.buf[:c.nexp]\n\t\t\tif string(wmem) != string(wbuf) {\n\t\t\t\tt.Errorf(\"case %d: expected ee.Write to have written % x, it wrote % x\", i, wbuf, wmem)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package webhooks\n\nimport (\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/majestrate\/srndv2\/lib\/config\"\n\t\"github.com\/majestrate\/srndv2\/lib\/nntp\"\n\t\"github.com\/majestrate\/srndv2\/lib\/nntp\/message\"\n\t\"github.com\/majestrate\/srndv2\/lib\/store\"\n\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"mime\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"net\/textproto\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n\/\/ web hook implementation\ntype httpWebhook struct {\n\tconf *config.WebhookConfig\n\tstorage store.Storage\n\thdr *message.HeaderIO\n}\n\nfunc (h *httpWebhook) SentArticleVia(msgid nntp.MessageID, name string) {\n\t\/\/ web hooks don't care about feed state\n}\n\n\/\/ we got a new article\nfunc (h *httpWebhook) GotArticle(msgid nntp.MessageID, group nntp.Newsgroup) {\n\tf, err := h.storage.OpenArticle(msgid.String())\n\tif err == nil {\n\t\tc := textproto.NewConn(f)\n\t\tvar hdr textproto.MIMEHeader\n\t\thdr, err = c.ReadMIMEHeader()\n\t\tif err == nil {\n\t\t\tctype := hdr.Get(\"Content-Type\")\n\t\t\tif ctype == \"\" {\n\t\t\t\tctype = \"text\/plain\"\n\t\t\t}\n\t\t\tctype = strings.Replace(strings.ToLower(ctype), \"multipart\/mixed\", \"multipart\/form-data\", 1)\n\t\t\tu, _ := url.Parse(h.conf.URL)\n\t\t\tq := u.Query()\n\t\t\tfor k, vs := range hdr {\n\t\t\t\tfor _, v := range vs {\n\t\t\t\t\tq.Add(k, v)\n\t\t\t\t}\n\t\t\t}\n\t\t\tq.Set(\"Content-Type\", ctype)\n\t\t\tu.RawQuery = q.Encode()\n\n\t\t\tvar body io.Reader\n\n\t\t\tif strings.HasPrefix(ctype, \"multipart\") {\n\t\t\t\tpr, pw := io.Pipe()\n\t\t\t\tlog.Debug(\"using pipe\")\n\t\t\t\tbody = pr\n\t\t\t\tgo func(in io.Reader, out io.WriteCloser) {\n\t\t\t\t\t_, params, _ := mime.ParseMediaType(ctype)\n\t\t\t\t\tif params == nil {\n\t\t\t\t\t\t\/\/ send as whatever lol\n\t\t\t\t\t\tio.Copy(out, in)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tboundary, _ := params[\"boundary\"]\n\t\t\t\t\t\tmpr := multipart.NewReader(in, boundary)\n\t\t\t\t\t\tmpw := multipart.NewWriter(out)\n\t\t\t\t\t\tmpw.SetBoundary(boundary)\n\t\t\t\t\t\tfor {\n\t\t\t\t\t\t\tpart, err := mpr.NextPart()\n\t\t\t\t\t\t\tif err == io.EOF {\n\t\t\t\t\t\t\t\terr = nil\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t} else if err == nil {\n\t\t\t\t\t\t\t\t\/\/ get part header\n\t\t\t\t\t\t\t\th := part.Header\n\t\t\t\t\t\t\t\t\/\/ rewrite header part for php\n\t\t\t\t\t\t\t\tcd := h.Get(\"Content-Disposition\")\n\t\t\t\t\t\t\t\tr := regexp.MustCompile(`; name=\".*\"`)\n\t\t\t\t\t\t\t\tcd = r.ReplaceAllString(cd, `; name=\"attachment[]\";`)\n\t\t\t\t\t\t\t\tlog.Debug(cd)\n\t\t\t\t\t\t\t\th.Set(\"Content-Disposition\", cd)\n\t\t\t\t\t\t\t\t\/\/ make write part\n\t\t\t\t\t\t\t\twp, err := mpw.CreatePart(h)\n\t\t\t\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t\t\t\t\/\/ write part out\n\t\t\t\t\t\t\t\t\tio.Copy(wp, part)\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\tlog.Errorf(\"error writng webhook part: %s\", err.Error())\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tpart.Close()\n\t\t\t\t\t\t}\n\t\t\t\t\t\tmpw.Close()\n\t\t\t\t\t}\n\t\t\t\t\tout.Close()\n\t\t\t\t}(c.R, pw)\n\t\t\t} else {\n\t\t\t\tbody = c.R\n\t\t\t}\n\n\t\t\tvar r *http.Response\n\t\t\tr, err = http.Post(u.String(), ctype, body)\n\t\t\tif err == nil {\n\t\t\t\t_, err = io.Copy(ioutil.Discard, r.Body)\n\t\t\t\tr.Body.Close()\n\t\t\t\tlog.Infof(\"hook called for %s\", msgid)\n\t\t\t}\n\t\t} else {\n\t\t\tf.Close()\n\t\t}\n\t}\n\tif err != nil {\n\t\tlog.Errorf(\"error calling web hook %s: %s\", h.conf.Name, err.Error())\n\t}\n}\n<commit_msg>try fix for webhooks<commit_after>package webhooks\n\nimport (\n\t\"fmt\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/majestrate\/srndv2\/lib\/config\"\n\t\"github.com\/majestrate\/srndv2\/lib\/nntp\"\n\t\"github.com\/majestrate\/srndv2\/lib\/nntp\/message\"\n\t\"github.com\/majestrate\/srndv2\/lib\/store\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"mime\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"net\/textproto\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n\/\/ web hook implementation\ntype httpWebhook struct {\n\tconf *config.WebhookConfig\n\tstorage store.Storage\n\thdr *message.HeaderIO\n}\n\nfunc (h *httpWebhook) SentArticleVia(msgid nntp.MessageID, name string) {\n\t\/\/ web hooks don't care about feed state\n}\n\n\/\/ we got a new article\nfunc (h *httpWebhook) GotArticle(msgid nntp.MessageID, group nntp.Newsgroup) {\n\tf, err := h.storage.OpenArticle(msgid.String())\n\tif err == nil {\n\t\tc := textproto.NewConn(f)\n\t\tvar hdr textproto.MIMEHeader\n\t\thdr, err = c.ReadMIMEHeader()\n\t\tif err == nil {\n\t\t\tctype := hdr.Get(\"Content-Type\")\n\t\t\tif ctype == \"\" {\n\t\t\t\tctype = \"text\/plain\"\n\t\t\t}\n\t\t\tctype = strings.Replace(strings.ToLower(ctype), \"multipart\/mixed\", \"multipart\/form-data\", 1)\n\t\t\tu, _ := url.Parse(h.conf.URL)\n\t\t\tq := u.Query()\n\t\t\tfor k, vs := range hdr {\n\t\t\t\tfor _, v := range vs {\n\t\t\t\t\tq.Add(k, v)\n\t\t\t\t}\n\t\t\t}\n\t\t\tq.Set(\"Content-Type\", ctype)\n\t\t\tu.RawQuery = q.Encode()\n\n\t\t\tvar body io.Reader\n\n\t\t\tif strings.HasPrefix(ctype, \"multipart\") {\n\t\t\t\tpr, pw := io.Pipe()\n\t\t\t\tlog.Debug(\"using pipe\")\n\t\t\t\tbody = pr\n\t\t\t\tgo func(in io.Reader, out io.WriteCloser) {\n\t\t\t\t\t_, params, _ := mime.ParseMediaType(ctype)\n\t\t\t\t\tif params == nil {\n\t\t\t\t\t\t\/\/ send as whatever lol\n\t\t\t\t\t\tio.Copy(out, in)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tboundary, _ := params[\"boundary\"]\n\t\t\t\t\t\tmpr := multipart.NewReader(in, boundary)\n\t\t\t\t\t\tmpw := multipart.NewWriter(out)\n\t\t\t\t\t\tmpw.SetBoundary(boundary)\n\t\t\t\t\t\tfor {\n\t\t\t\t\t\t\tpart, err := mpr.NextPart()\n\t\t\t\t\t\t\tif err == io.EOF {\n\t\t\t\t\t\t\t\terr = nil\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t} else if err == nil {\n\t\t\t\t\t\t\t\t\/\/ get part header\n\t\t\t\t\t\t\t\th := part.Header\n\t\t\t\t\t\t\t\t\/\/ rewrite header part for php\n\t\t\t\t\t\t\t\tcd := h.Get(\"Content-Disposition\")\n\t\t\t\t\t\t\t\tr := regexp.MustCompile(`; filename=\"(.*)\"`)\n\t\t\t\t\t\t\t\t\/\/ YOLO\n\t\t\t\t\t\t\t\tfname := r.FindStringSubmatch(cd)[1]\n\t\t\t\t\t\t\t\th.Set(\"Content-Disposition\", fmt.Sprintf(`filename=\"%s\"; name=\"attachment\"`, fname))\n\t\t\t\t\t\t\t\t\/\/ make write part\n\t\t\t\t\t\t\t\twp, err := mpw.CreatePart(h)\n\t\t\t\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t\t\t\t\/\/ write part out\n\t\t\t\t\t\t\t\t\tio.Copy(wp, part)\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\tlog.Errorf(\"error writng webhook part: %s\", err.Error())\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tpart.Close()\n\t\t\t\t\t\t}\n\t\t\t\t\t\tmpw.Close()\n\t\t\t\t\t}\n\t\t\t\t\tout.Close()\n\t\t\t\t}(c.R, pw)\n\t\t\t} else {\n\t\t\t\tbody = c.R\n\t\t\t}\n\n\t\t\tvar r *http.Response\n\t\t\tr, err = http.Post(u.String(), ctype, body)\n\t\t\tif err == nil {\n\t\t\t\t_, err = io.Copy(ioutil.Discard, r.Body)\n\t\t\t\tr.Body.Close()\n\t\t\t\tlog.Infof(\"hook called for %s\", msgid)\n\t\t\t}\n\t\t} else {\n\t\t\tf.Close()\n\t\t}\n\t}\n\tif err != nil {\n\t\tlog.Errorf(\"error calling web hook %s: %s\", h.conf.Name, err.Error())\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package version\n\n\/\/ VERSION ...\nconst VERSION = \"1.1.14\"\n<commit_msg>v1.1.15<commit_after>package version\n\n\/\/ VERSION ...\nconst VERSION = \"1.1.15\"\n<|endoftext|>"} {"text":"<commit_before>package ccd_test\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\/debug\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/jteeuwen\/go-pkg-xmlx\"\n\t\"github.com\/kdar\/health\/ccd\"\n\t\"github.com\/kdar\/health\/ccd\/parsers\/medtable\"\n)\n\nfunc parseAndRecover(t *testing.T, c *ccd.CCD, path string, doc *xmlx.Document) (err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tlines := bytes.Split(debug.Stack(), []byte{'\\n'})\n\t\t\tfor i, _ := range lines {\n\t\t\t\tif lines[i][0] == '\\t' {\n\t\t\t\t\tlines[i] = lines[i][1:]\n\t\t\t\t}\n\t\t\t}\n\t\t\tt.Fatalf(\"Error processing: %s\\n\\n%s\", path, bytes.Join(lines, []byte{'\\n'}))\n\t\t}\n\t}()\n\n\tif doc != nil {\n\t\terr = c.ParseDoc(doc)\n\t} else {\n\t\terr = c.ParseFile(path)\n\t}\n\treturn\n}\n\nfunc walkAllCCDs(f filepath.WalkFunc) {\n\tfilepath.Walk(\"testdata\", func(path string, info os.FileInfo, err error) error {\n\t\tif info.IsDir() {\n\t\t\tif info.Name()[0] == '.' {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\n\t\tif !strings.HasSuffix(path, \"xml\") && !strings.HasSuffix(path, \"ccd\") {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn f(path, info, err)\n\t})\n}\n\nfunc TestParseAllCCDs(t *testing.T) {\n\twalkAllCCDs(func(path string, info os.FileInfo, err error) error {\n\t\tshouldfail := strings.HasPrefix(info.Name(), \"fail_\")\n\n\t\tdoc := xmlx.New()\n\t\terr = doc.LoadFile(path, nil)\n\t\tif shouldfail && err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tc := ccd.NewDefaultCCD()\n\t\terr = parseAndRecover(t, c, path, doc)\n\t\tif shouldfail && err == nil {\n\t\t\tt.Fatalf(\"%s: Expected failure, instead received success.\", path)\n\t\t} else if !shouldfail && err != nil {\n\t\t\tt.Fatalf(\"%s: Failed: %v\", path, err)\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\nfunc TestInvalidCCD(t *testing.T) {\n\tc := ccd.NewDefaultCCD()\n\terr := parseAndRecover(t, c, \"testdata\/specific\/invalid_ccd.xml\", nil)\n\tif err == nil {\n\t\tt.Fatal(\"Expected parsing of CCD to fail and throw and error.\")\n\t}\n\n\terr = parseAndRecover(t, c, \"testdata\/specific\/valid_ccd.xml\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestNewStuff(t *testing.T) {\n\tt.Skip(\"just for my own needs\")\n\n\tc := ccd.NewDefaultCCD()\n\tc.AddParsers(medtable.Parser())\n\terr := parseAndRecover(t, c, \"testdata\/private\/2013-08-26T04_03_24 - 0b7fddbdc631aecc6c96090043f690204f7d0d9d.xml\", nil)\n\t\/\/err := parseAndRecover(t, c, \"testdata\/public\/ToC_CCDA_CCD_CompGuideSample_FullXML_v01a.xml\", nil)\n\t\/\/err := parseAndRecover(t, c, \"testdata\/public\/SampleCCDDocument.xml\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_ = spew.Dump\n\n\t\/\/spew.Dump(c.Patient)\n}\n<commit_msg>Improved ccd decoding tests to run all ccds<commit_after>package ccd_test\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\/debug\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/jteeuwen\/go-pkg-xmlx\"\n\t\"github.com\/kdar\/health\/ccd\"\n\t\"github.com\/kdar\/health\/ccd\/parsers\/medtable\"\n)\n\nfunc parseAndRecover(t *testing.T, c *ccd.CCD, path string, doc *xmlx.Document) (err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tlines := bytes.Split(debug.Stack(), []byte{'\\n'})\n\t\t\tfor i, _ := range lines {\n\t\t\t\tif len(lines[i]) >= 1 && lines[i][0] == '\\t' {\n\t\t\t\t\tlines[i] = lines[i][1:]\n\t\t\t\t}\n\t\t\t}\n\t\t\tt.Errorf(\"Error processing: %s\\n\\n%s\", path, bytes.Join(lines, []byte{'\\n'}))\n\t\t}\n\t}()\n\n\tif doc != nil {\n\t\terr = c.ParseDoc(doc)\n\t} else {\n\t\terr = c.ParseFile(path)\n\t}\n\treturn\n}\n\nfunc walkAllCCDs(f filepath.WalkFunc) {\n\tfilepath.Walk(\"testdata\", func(path string, info os.FileInfo, err error) error {\n\t\tif info.IsDir() {\n\t\t\tif info.Name()[0] == '.' {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\n\t\tif !strings.HasSuffix(path, \"xml\") && !strings.HasSuffix(path, \"ccd\") {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn f(path, info, err)\n\t})\n}\n\nfunc TestParseAllCCDs(t *testing.T) {\n\twalkAllCCDs(func(path string, info os.FileInfo, err error) error {\n\t\tshouldfail := strings.HasPrefix(info.Name(), \"fail_\")\n\n\t\tdoc := xmlx.New()\n\t\terr = doc.LoadFile(path, nil)\n\t\tif shouldfail && err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tc := ccd.NewDefaultCCD()\n\t\terr = parseAndRecover(t, c, path, doc)\n\t\tif shouldfail && err == nil {\n\t\t\tt.Errorf(\"%s: Expected failure, instead received success.\", path)\n\t\t} else if !shouldfail && err != nil {\n\t\t\tt.Errorf(\"%s: Failed: %v\", path, err)\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\nfunc TestInvalidCCD(t *testing.T) {\n\tc := ccd.NewDefaultCCD()\n\terr := parseAndRecover(t, c, \"testdata\/specific\/invalid_ccd.xml\", nil)\n\tif err == nil {\n\t\tt.Fatal(\"Expected parsing of CCD to fail and throw and error.\")\n\t}\n\n\terr = parseAndRecover(t, c, \"testdata\/specific\/valid_ccd.xml\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestNewStuff(t *testing.T) {\n\tt.Skip(\"just for my own needs\")\n\n\tc := ccd.NewDefaultCCD()\n\tc.AddParsers(medtable.Parser())\n\terr := parseAndRecover(t, c, \"testdata\/private\/2013-08-26T04_03_24 - 0b7fddbdc631aecc6c96090043f690204f7d0d9d.xml\", nil)\n\t\/\/err := parseAndRecover(t, c, \"testdata\/public\/ToC_CCDA_CCD_CompGuideSample_FullXML_v01a.xml\", nil)\n\t\/\/err := parseAndRecover(t, c, \"testdata\/public\/SampleCCDDocument.xml\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_ = spew.Dump\n\n\t\/\/spew.Dump(c.Patient)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage environs\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"launchpad.net\/juju-core\/errors\"\n\t\"strings\"\n)\n\n\/\/ EmptyStorage holds a StorageReader object that contains no files and\n\/\/ offers no URLs.\nvar EmptyStorage StorageReader = emptyStorage{}\n\ntype emptyStorage struct{}\n\nconst verificationFilename string = \"bootstrap-verify\"\nconst verificationContent = \"juju-core storage writing verified: ok\"\n\nfunc (s emptyStorage) Get(name string) (io.ReadCloser, error) {\n\treturn nil, errors.NotFoundf(\"file %q\", name)\n}\n\nfunc (s emptyStorage) URL(name string) (string, error) {\n\treturn \"\", fmt.Errorf(\"file %q not found\", name)\n}\n\nfunc (s emptyStorage) List(prefix string) ([]string, error) {\n\treturn nil, nil\n}\n\nfunc VerifyStorage(storage Storage) error {\n\treader := strings.NewReader(verificationContent)\n\terr := storage.Put(verificationFilename, reader,\n\t\tint64(len(verificationContent)))\n\treturn err\n}\n<commit_msg>More review fixes.<commit_after>\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage environs\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"launchpad.net\/juju-core\/errors\"\n\t\"strings\"\n)\n\n\/\/ EmptyStorage holds a StorageReader object that contains no files and\n\/\/ offers no URLs.\nvar EmptyStorage StorageReader = emptyStorage{}\n\ntype emptyStorage struct{}\n\nconst verificationFilename string = \"bootstrap-verify\"\nconst verificationContent = \"juju-core storage writing verified: ok\\n\"\n\nfunc (s emptyStorage) Get(name string) (io.ReadCloser, error) {\n\treturn nil, errors.NotFoundf(\"file %q\", name)\n}\n\nfunc (s emptyStorage) URL(name string) (string, error) {\n\treturn \"\", fmt.Errorf(\"file %q not found\", name)\n}\n\nfunc (s emptyStorage) List(prefix string) ([]string, error) {\n\treturn nil, nil\n}\n\nfunc VerifyStorage(storage Storage) error {\n\treader := strings.NewReader(verificationContent)\n\terr := storage.Put(verificationFilename, reader,\n\t\tint64(len(verificationContent)))\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package finder\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype TagState int\n\nconst (\n\tTagRoot TagState = iota \/\/ query = \"*\"\n\tTagSkip \/\/ not _tag prefix\n\tTagInfoRoot \/\/ query = \"_tag\"\n)\n\ntype TagQ struct {\n\tParam *string\n\tValue *string\n}\n\nfunc (q TagQ) String() string {\n\tif q.Param != nil && q.Value != nil {\n\t\treturn fmt.Sprintf(\"{\\\"param\\\"=%#v, \\\"value\\\"=%#v}\", *q.Param, *q.Value)\n\t}\n\tif q.Param != nil {\n\t\treturn fmt.Sprintf(\"{\\\"param\\\"=%#v}\", *q.Param)\n\t}\n\tif q.Value != nil {\n\t\treturn fmt.Sprintf(\"{\\\"value\\\"=%#v}\", *q.Value)\n\t}\n\treturn \"{}\"\n}\n\nfunc (q *TagQ) Where(field string) string {\n\tif q.Param != nil && q.Value != nil && *q.Value != \"*\" {\n\t\treturn fmt.Sprintf(\"%s=%s\", field, Q(*q.Param+*q.Value))\n\t}\n\tif q.Param != nil {\n\t\treturn fmt.Sprintf(\"%s LIKE %s\", field, Q(*q.Param+`%`))\n\t}\n\tif q.Value != nil && *q.Value != \"*\" {\n\t\treturn fmt.Sprintf(\"%s=%s\", field, Q(*q.Value))\n\t}\n\n\treturn \"\"\n}\n\ntype TagFinder struct {\n\twrapped Finder\n\tctx context.Context \/\/ for clickhouse.Query\n\turl string \/\/ clickhouse dsn\n\ttable string \/\/ graphite_tag table\n\ttimeout time.Duration \/\/ clickhouse query timeout\n\tstate TagState\n\ttagQuery []TagQ\n\tseriesQuery string\n}\n\nvar EmptyList [][]byte = [][]byte{}\n\nfunc WrapTag(f Finder, ctx context.Context, url string, table string, timeout time.Duration) *TagFinder {\n\treturn &TagFinder{\n\t\twrapped: f,\n\t\tctx: ctx,\n\t\turl: url,\n\t\ttable: table,\n\t\ttimeout: timeout,\n\t\ttagQuery: make([]TagQ, 0),\n\t}\n}\n\nfunc (t *TagFinder) tagListSQL() (string, error) {\n\tif len(t.tagQuery) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\twhere := \"\"\n\tand := func(exp string) {\n\t\tif exp == \"\" {\n\t\t\treturn\n\t\t}\n\t\tif where != \"\" {\n\t\t\twhere = fmt.Sprintf(\"%s AND (%s)\", where, exp)\n\t\t} else {\n\t\t\twhere = fmt.Sprintf(\"(%s)\", exp)\n\t\t}\n\t}\n\n\tand(fmt.Sprintf(\"Version>=(SELECT Max(Version) FROM %s WHERE Tag1='' AND Level=0 AND Path='')\", t.table))\n\tand(\"Level=1\")\n\n\t\/\/ first\n\tand(t.tagQuery[0].Where(\"Tag1\"))\n\n\tif len(t.tagQuery) == 1 {\n\t\treturn fmt.Sprintf(\"SELECT Tag1 FROM %s WHERE %s GROUP BY Tag1\", t.table, where), nil\n\t}\n\n\t\/\/ 1..(n-1)\n\tfor i := 1; i < len(t.tagQuery)-1; i++ {\n\t\tcond := t.tagQuery[i].Where(\"x\")\n\t\tif cond != \"\" {\n\t\t\tand(fmt.Sprintf(\"arrayExists((x) -> %s, Tags)\", cond))\n\t\t}\n\t}\n\n\t\/\/ last\n\tand(t.tagQuery[len(t.tagQuery)-1].Where(\"TagN\"))\n\n\treturn fmt.Sprintf(\"SELECT TagN FROM %s ARRAY JOIN Tags AS TagN WHERE %s GROUP BY TagN\", t.table, where), nil\n}\n\nfunc (t *TagFinder) MakeSQL(query string) (string, error) {\n\tif query == \"_tag\" {\n\t\treturn \"\", nil\n\t}\n\n\tqs := strings.Split(query, \".\")\n\n\tt.tagQuery = make([]TagQ, 0)\n\n\tfor {\n\t\tif len(qs) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tif qs[0] == \"_tag\" {\n\t\t\tif len(qs) >= 2 {\n\t\t\t\tv := qs[1]\n\t\t\t\tif len(v) > 0 && v[len(v)-1] == '=' {\n\t\t\t\t\tif len(qs) >= 3 {\n\t\t\t\t\t\tt.tagQuery = append(t.tagQuery, TagQ{Param: &v, Value: &qs[2]})\n\t\t\t\t\t\tqs = qs[3:]\n\t\t\t\t\t} else {\n\t\t\t\t\t\tt.tagQuery = append(t.tagQuery, TagQ{Param: &v})\n\t\t\t\t\t\tqs = qs[2:]\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tt.tagQuery = append(t.tagQuery, TagQ{Value: &v})\n\t\t\t\t\tqs = qs[2:]\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tt.tagQuery = append(t.tagQuery, TagQ{})\n\t\t\t\tqs = qs[1:]\n\t\t\t}\n\t\t} else {\n\t\t\tt.seriesQuery = strings.Join(qs, \".\")\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif t.seriesQuery == \"\" {\n\t\treturn t.tagListSQL()\n\t}\n\n\t\/\/ where := \"\"\n\n\t\/\/ AND := func(where string) string {\n\t\/\/ \tif q\n\t\/\/ }\n\n\treturn \"\", nil\n}\n\nfunc (t *TagFinder) Execute(query string) error {\n\tt.state = TagSkip\n\n\tif query == \"\" {\n\t\treturn t.wrapped.Execute(query)\n\t}\n\n\tif query == \"*\" {\n\t\tt.state = TagRoot\n\t\treturn t.wrapped.Execute(query)\n\t}\n\n\tif !strings.HasPrefix(query, \"_tag.\") {\n\t\treturn t.wrapped.Execute(query)\n\t}\n\n\tt.MakeSQL(query)\n\n\treturn nil\n}\n\nfunc (t *TagFinder) List() [][]byte {\n\tswitch t.state {\n\tcase TagSkip:\n\t\treturn t.wrapped.List()\n\tcase TagInfoRoot:\n\t\treturn [][]byte{[]byte(\"_tag.\")}\n\tcase TagRoot:\n\t\t\/\/ pass\n\t\treturn append([][]byte{[]byte(\"_tag.\")}, t.wrapped.List()...)\n\tdefault:\n\t\treturn EmptyList\n\t}\n\n\treturn EmptyList\n}\n\nfunc (t *TagFinder) Series() [][]byte {\n\tswitch t.state {\n\tcase TagSkip:\n\t\treturn t.wrapped.List()\n\tcase TagInfoRoot:\n\t\treturn EmptyList\n\tcase TagRoot:\n\t\treturn t.wrapped.Series()\n\tdefault:\n\t\treturn EmptyList\n\t}\n\n\treturn EmptyList\n}\n\nfunc (t *TagFinder) Abs(v []byte) []byte {\n\t\/\/ @TODO\n\treturn v\n}\n<commit_msg>first level tags list<commit_after>package finder\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/lomik\/graphite-clickhouse\/helper\/clickhouse\"\n)\n\ntype TagState int\n\nconst (\n\tTagRoot TagState = iota \/\/ query = \"*\"\n\tTagSkip \/\/ not _tag prefix\n\tTagInfoRoot \/\/ query = \"_tag\"\n\tTagList\n)\n\ntype TagQ struct {\n\tParam *string\n\tValue *string\n}\n\nfunc (q TagQ) String() string {\n\tif q.Param != nil && q.Value != nil {\n\t\treturn fmt.Sprintf(\"{\\\"param\\\"=%#v, \\\"value\\\"=%#v}\", *q.Param, *q.Value)\n\t}\n\tif q.Param != nil {\n\t\treturn fmt.Sprintf(\"{\\\"param\\\"=%#v}\", *q.Param)\n\t}\n\tif q.Value != nil {\n\t\treturn fmt.Sprintf(\"{\\\"value\\\"=%#v}\", *q.Value)\n\t}\n\treturn \"{}\"\n}\n\nfunc (q *TagQ) Where(field string) string {\n\tif q.Param != nil && q.Value != nil && *q.Value != \"*\" {\n\t\treturn fmt.Sprintf(\"%s=%s\", field, Q(*q.Param+*q.Value))\n\t}\n\tif q.Param != nil {\n\t\treturn fmt.Sprintf(\"%s LIKE %s\", field, Q(*q.Param+`%`))\n\t}\n\tif q.Value != nil && *q.Value != \"*\" {\n\t\treturn fmt.Sprintf(\"%s=%s\", field, Q(*q.Value))\n\t}\n\n\treturn \"\"\n}\n\ntype TagFinder struct {\n\twrapped Finder\n\tctx context.Context \/\/ for clickhouse.Query\n\turl string \/\/ clickhouse dsn\n\ttable string \/\/ graphite_tag table\n\ttimeout time.Duration \/\/ clickhouse query timeout\n\tstate TagState\n\ttagQuery []TagQ\n\tseriesQuery string\n\tbody []byte \/\/ clickhouse response\n}\n\nvar EmptyList [][]byte = [][]byte{}\n\nfunc WrapTag(f Finder, ctx context.Context, url string, table string, timeout time.Duration) *TagFinder {\n\treturn &TagFinder{\n\t\twrapped: f,\n\t\tctx: ctx,\n\t\turl: url,\n\t\ttable: table,\n\t\ttimeout: timeout,\n\t\ttagQuery: make([]TagQ, 0),\n\t}\n}\n\nfunc (t *TagFinder) tagListSQL() (string, error) {\n\tif len(t.tagQuery) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\twhere := \"\"\n\tand := func(exp string) {\n\t\tif exp == \"\" {\n\t\t\treturn\n\t\t}\n\t\tif where != \"\" {\n\t\t\twhere = fmt.Sprintf(\"%s AND (%s)\", where, exp)\n\t\t} else {\n\t\t\twhere = fmt.Sprintf(\"(%s)\", exp)\n\t\t}\n\t}\n\n\tand(fmt.Sprintf(\"Version>=(SELECT Max(Version) FROM %s WHERE Tag1='' AND Level=0 AND Path='')\", t.table))\n\tand(\"Level=1\")\n\n\t\/\/ first\n\tand(t.tagQuery[0].Where(\"Tag1\"))\n\n\tif len(t.tagQuery) == 1 {\n\t\treturn fmt.Sprintf(\"SELECT Tag1 FROM %s WHERE %s GROUP BY Tag1\", t.table, where), nil\n\t}\n\n\t\/\/ 1..(n-1)\n\tfor i := 1; i < len(t.tagQuery)-1; i++ {\n\t\tcond := t.tagQuery[i].Where(\"x\")\n\t\tif cond != \"\" {\n\t\t\tand(fmt.Sprintf(\"arrayExists((x) -> %s, Tags)\", cond))\n\t\t}\n\t}\n\n\t\/\/ last\n\tand(t.tagQuery[len(t.tagQuery)-1].Where(\"TagN\"))\n\n\treturn fmt.Sprintf(\"SELECT TagN FROM %s ARRAY JOIN Tags AS TagN WHERE %s GROUP BY TagN\", t.table, where), nil\n}\n\nfunc (t *TagFinder) MakeSQL(query string) (string, error) {\n\tif query == \"_tag\" {\n\t\treturn \"\", nil\n\t}\n\n\tqs := strings.Split(query, \".\")\n\n\tt.tagQuery = make([]TagQ, 0)\n\n\tfor {\n\t\tif len(qs) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tif qs[0] == \"_tag\" {\n\t\t\tif len(qs) >= 2 {\n\t\t\t\tv := qs[1]\n\t\t\t\tif len(v) > 0 && v[len(v)-1] == '=' {\n\t\t\t\t\tif len(qs) >= 3 {\n\t\t\t\t\t\tt.tagQuery = append(t.tagQuery, TagQ{Param: &v, Value: &qs[2]})\n\t\t\t\t\t\tqs = qs[3:]\n\t\t\t\t\t} else {\n\t\t\t\t\t\tt.tagQuery = append(t.tagQuery, TagQ{Param: &v})\n\t\t\t\t\t\tqs = qs[2:]\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tt.tagQuery = append(t.tagQuery, TagQ{Value: &v})\n\t\t\t\t\tqs = qs[2:]\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tt.tagQuery = append(t.tagQuery, TagQ{})\n\t\t\t\tqs = qs[1:]\n\t\t\t}\n\t\t} else {\n\t\t\tt.seriesQuery = strings.Join(qs, \".\")\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif t.seriesQuery == \"\" {\n\t\tt.state = TagList\n\t\treturn t.tagListSQL()\n\t}\n\n\t\/\/ where := \"\"\n\n\t\/\/ AND := func(where string) string {\n\t\/\/ \tif q\n\t\/\/ }\n\n\treturn \"\", nil\n}\n\nfunc (t *TagFinder) Execute(query string) error {\n\tt.state = TagSkip\n\n\tif query == \"\" {\n\t\treturn t.wrapped.Execute(query)\n\t}\n\n\tif query == \"*\" {\n\t\tt.state = TagRoot\n\t\treturn t.wrapped.Execute(query)\n\t}\n\n\tif !strings.HasPrefix(query, \"_tag.\") {\n\t\treturn t.wrapped.Execute(query)\n\t}\n\n\tsql, err := t.MakeSQL(query)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(err, sql)\n\n\tt.body, err = clickhouse.Query(t.ctx, t.url, sql, t.timeout)\n\n\treturn err\n}\n\nfunc (t *TagFinder) List() [][]byte {\n\tswitch t.state {\n\tcase TagSkip:\n\t\treturn t.wrapped.List()\n\tcase TagInfoRoot:\n\t\treturn [][]byte{[]byte(\"_tag.\")}\n\tcase TagRoot:\n\t\t\/\/ pass\n\t\treturn append([][]byte{[]byte(\"_tag.\")}, t.wrapped.List()...)\n\t}\n\n\tif t.body == nil {\n\t\treturn [][]byte{}\n\t}\n\n\trows := bytes.Split(t.body, []byte{'\\n'})\n\n\tskip := 0\n\tfor i := 0; i < len(rows); i++ {\n\t\tif len(rows[i]) == 0 {\n\t\t\tskip++\n\t\t\tcontinue\n\t\t}\n\t\tif skip > 0 {\n\t\t\trows[i-skip] = rows[i]\n\t\t}\n\t}\n\n\trows = rows[:len(rows)-skip]\n\n\tif t.state == TagList {\n\t\t\/\/ add dots\n\t\t\/\/ @TODO: optimize?\n\t\tfor i := 0; i < len(rows); i++ {\n\t\t\trows[i] = append(rows[i], '.')\n\t\t}\n\t}\n\n\treturn rows\n}\n\nfunc (t *TagFinder) Series() [][]byte {\n\tswitch t.state {\n\tcase TagSkip:\n\t\treturn t.wrapped.List()\n\tcase TagInfoRoot:\n\t\treturn EmptyList\n\tcase TagRoot:\n\t\treturn t.wrapped.Series()\n\tdefault:\n\t\treturn EmptyList\n\t}\n\n\treturn EmptyList\n}\n\nfunc (t *TagFinder) Abs(v []byte) []byte {\n\t\/\/ @TODO\n\treturn v\n}\n<|endoftext|>"} {"text":"<commit_before>package goConfig\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"reflect\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\tdefaultPath = \".\/\"\n\tdefaultConfigFile = \"config.json\"\n)\n\n\/\/ Tag set the main tag\nvar Tag = \"cfg\"\n\n\/\/ TagDefault set tag default\nvar TagDefault = \"cfgDefault\"\n\n\/\/ LoadJSON config file\nfunc LoadJSON(config interface{}) (err error) {\n\tconfigFile := defaultPath + defaultConfigFile\n\tfile, err := os.Open(configFile)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\tdecoder := json.NewDecoder(file)\n\terr = decoder.Decode(&config)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Load config file\nfunc Load(config interface{}) (err error) {\n\n\terr = LoadJSON(config)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = parseTags(config)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Save config file\nfunc Save(config interface{}) (err error) {\n\t_, err = os.Stat(defaultPath)\n\tif os.IsNotExist(err) {\n\t\tos.Mkdir(defaultPath, 0700)\n\t} else if err != nil {\n\t\treturn\n\t}\n\n\tconfigFile := defaultPath + defaultConfigFile\n\n\t_, err = os.Stat(configFile)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tb, err := json.MarshalIndent(config, \"\", \"\\t\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = ioutil.WriteFile(defaultConfigFile, b, 0644)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn\n}\n\n\/\/ Getenv get enviroment variable\nfunc Getenv(env string) (r string) {\n\tr = os.Getenv(env)\n\treturn\n}\n\nfunc parseTags(s interface{}) (err error) {\n\n\tst := reflect.TypeOf(s)\n\tvt := reflect.ValueOf(s)\n\n\tif st.Kind() != reflect.Ptr {\n\t\terr = errors.New(\"Not a pointer\")\n\t\treturn\n\t}\n\n\trefField := st.Elem()\n\tif refField.Kind() != reflect.Struct {\n\t\terr = errors.New(\"Not a struct\")\n\t\treturn\n\t}\n\n\trefValue := vt.Elem()\n\tfor i := 0; i < refField.NumField(); i++ {\n\t\tfield := refField.Field(i)\n\t\tvalue := refValue.Field(i)\n\t\tkind := field.Type.Kind()\n\n\t\tif field.PkgPath != \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch kind {\n\t\tcase reflect.Struct:\n\t\t\terr = parseTags(value.Addr().Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase reflect.String:\n\t\t\tvalue.SetString(\"TEST\")\n\t\tcase reflect.Int:\n\t\t\tvalue.SetInt(999)\n\t\tdefault:\n\t\t\terr = errors.New(\"Type not supported \" + kind.String())\n\t\t}\n\n\t\tfmt.Println(\"name:\", field.Name,\n\t\t\t\"| cfg:\", field.Tag.Get(Tag),\n\t\t\t\"| cfgDefault:\", field.Tag.Get(TagDefault),\n\t\t\t\"| type:\", field.Type)\n\n\t}\n\treturn\n}\n<commit_msg>get env<commit_after>package goConfig\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\tdefaultPath = \".\/\"\n\tdefaultConfigFile = \"config.json\"\n)\n\n\/\/ Tag set the main tag\nvar Tag = \"cfg\"\n\n\/\/ TagDefault set tag default\nvar TagDefault = \"cfgDefault\"\n\n\/\/ LoadJSON config file\nfunc LoadJSON(config interface{}) (err error) {\n\tconfigFile := defaultPath + defaultConfigFile\n\tfile, err := os.Open(configFile)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\tdecoder := json.NewDecoder(file)\n\terr = decoder.Decode(&config)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Load config file\nfunc Load(config interface{}) (err error) {\n\n\terr = LoadJSON(config)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = parseTags(config)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Save config file\nfunc Save(config interface{}) (err error) {\n\t_, err = os.Stat(defaultPath)\n\tif os.IsNotExist(err) {\n\t\tos.Mkdir(defaultPath, 0700)\n\t} else if err != nil {\n\t\treturn\n\t}\n\n\tconfigFile := defaultPath + defaultConfigFile\n\n\t_, err = os.Stat(configFile)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tb, err := json.MarshalIndent(config, \"\", \"\\t\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = ioutil.WriteFile(defaultConfigFile, b, 0644)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn\n}\n\n\/\/ Getenv get enviroment variable\nfunc Getenv(env string) (r string) {\n\tr = os.Getenv(env)\n\treturn\n}\n\nfunc parseTags(s interface{}) (err error) {\n\n\tst := reflect.TypeOf(s)\n\tvt := reflect.ValueOf(s)\n\n\tif st.Kind() != reflect.Ptr {\n\t\terr = errors.New(\"Not a pointer\")\n\t\treturn\n\t}\n\n\trefField := st.Elem()\n\tif refField.Kind() != reflect.Struct {\n\t\terr = errors.New(\"Not a struct\")\n\t\treturn\n\t}\n\n\trefValue := vt.Elem()\n\tfor i := 0; i < refField.NumField(); i++ {\n\t\tfield := refField.Field(i)\n\t\tvalue := refValue.Field(i)\n\t\tkind := field.Type.Kind()\n\n\t\tif field.PkgPath != \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tenv := \"\"\n\t\tt := field.Tag.Get(Tag)\n\t\tif t == \"-\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif t == \"\" {\n\t\t\tt = strings.ToUpper(field.Name)\n\t\t}\n\n\t\tenv = os.Getenv(t)\n\n\t\tif env == \"\" && kind != reflect.Struct {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch kind {\n\t\tcase reflect.Struct:\n\t\t\terr = parseTags(value.Addr().Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase reflect.String:\n\t\t\t\/\/value.SetString(\"TEST\")\n\t\t\tvalue.SetString(env)\n\t\tcase reflect.Int:\n\t\t\t\/\/value.SetInt(999)\n\t\t\tvar intEnv int64\n\t\t\tintEnv, err = strconv.ParseInt(env, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvalue.SetInt(intEnv)\n\t\tdefault:\n\t\t\terr = errors.New(\"Type not supported \" + kind.String())\n\t\t}\n\n\t\tfmt.Println(\"name:\", field.Name,\n\t\t\t\"| cfg:\", field.Tag.Get(Tag),\n\t\t\t\"| cfgDefault:\", field.Tag.Get(TagDefault),\n\t\t\t\"| type:\", field.Type)\n\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/docker\/infrakit\/cmd\/infrakit\/base\"\n\t\"github.com\/docker\/infrakit\/pkg\/cli\"\n\t\"github.com\/docker\/infrakit\/pkg\/cli\/playbook\"\n\t\"github.com\/docker\/infrakit\/pkg\/discovery\"\n\tdiscovery_local \"github.com\/docker\/infrakit\/pkg\/discovery\/local\"\n\t\"github.com\/docker\/infrakit\/pkg\/discovery\/remote\"\n\tlogutil \"github.com\/docker\/infrakit\/pkg\/log\"\n\t\"github.com\/docker\/infrakit\/pkg\/plugin\"\n\trpc \"github.com\/docker\/infrakit\/pkg\/rpc\/client\"\n\t\"github.com\/docker\/infrakit\/pkg\/run\/local\"\n\t\"github.com\/docker\/infrakit\/pkg\/run\/scope\"\n\t\"github.com\/docker\/infrakit\/pkg\/template\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\/\/ CLI commands\n\t_ \"github.com\/docker\/infrakit\/cmd\/infrakit\/manager\"\n\t_ \"github.com\/docker\/infrakit\/cmd\/infrakit\/playbook\"\n\t_ \"github.com\/docker\/infrakit\/cmd\/infrakit\/plugin\"\n\t_ \"github.com\/docker\/infrakit\/cmd\/infrakit\/remote\"\n\t_ \"github.com\/docker\/infrakit\/cmd\/infrakit\/template\"\n\t_ \"github.com\/docker\/infrakit\/cmd\/infrakit\/up\"\n\t_ \"github.com\/docker\/infrakit\/cmd\/infrakit\/util\"\n\t_ \"github.com\/docker\/infrakit\/cmd\/infrakit\/x\"\n\t_ \"github.com\/docker\/infrakit\/pkg\/cli\/v0\"\n\n\t\/\/ Callable backends via playbook or via lib\n\t_ \"github.com\/docker\/infrakit\/pkg\/callable\/backend\/http\"\n\t_ \"github.com\/docker\/infrakit\/pkg\/callable\/backend\/instance\"\n\t_ \"github.com\/docker\/infrakit\/pkg\/callable\/backend\/print\"\n\t_ \"github.com\/docker\/infrakit\/pkg\/callable\/backend\/sh\"\n\t_ \"github.com\/docker\/infrakit\/pkg\/callable\/backend\/ssh\"\n\t_ \"github.com\/docker\/infrakit\/pkg\/callable\/backend\/stack\"\n\t_ \"github.com\/docker\/infrakit\/pkg\/callable\/backend\/vmwscript\"\n)\n\nfunc init() {\n\tlogutil.Configure(&logutil.ProdDefaults)\n}\n\ntype emptyPlugins struct{}\n\nfunc (e emptyPlugins) Find(name plugin.Name) (*plugin.Endpoint, error) {\n\treturn nil, errEmpty\n}\n\nfunc (e emptyPlugins) List() (map[string]*plugin.Endpoint, error) {\n\treturn nil, errEmpty\n}\n\nvar (\n\tempty = emptyPlugins{}\n\terrEmpty = errors.New(\"no plugins\")\n\n\tlog = logutil.New(\"module\", \"main\")\n)\n\n\/\/ A generic client for infrakit\nfunc main() {\n\n\tif err := discovery_local.Setup(); err != nil {\n\t\tpanic(err)\n\t}\n\tif err := template.Setup(); err != nil {\n\t\tpanic(err)\n\t}\n\t\/\/ Log setup\n\tlogOptions := &logutil.ProdDefaults\n\tlogFlags := cli.Flags(logOptions)\n\n\tprogram := path.Base(os.Args[0])\n\tcmd := &cobra.Command{\n\t\tUse: program,\n\t\tShort: program + \" command line interface\",\n\t\tPersistentPreRunE: func(c *cobra.Command, args []string) error {\n\t\t\tlogutil.Configure(logOptions)\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.PersistentFlags().AddFlagSet(logFlags)\n\n\t\/\/ Don't print usage text for any error returned from a RunE function.\n\t\/\/ Only print it when explicitly requested.\n\tcmd.SilenceUsage = true\n\n\t\/\/ Don't automatically print errors returned from a RunE function.\n\t\/\/ They are returned from cmd.Execute() below and we print it ourselves.\n\tcmd.SilenceErrors = true\n\tf := func() discovery.Plugins {\n\n\t\tulist, err := cli.Remotes()\n\t\tif err != nil {\n\t\t\tlog.Debug(\"Cannot lookup plugins\", \"err\", err)\n\t\t\treturn empty\n\t\t}\n\n\t\tif len(ulist) == 0 {\n\t\t\td, err := discovery_local.NewPluginDiscovery()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debug(\"Failed to initialize plugin discovery\", \"err\", err)\n\t\t\t\treturn empty\n\t\t\t}\n\t\t\treturn d\n\t\t}\n\n\t\td, err := remote.NewPluginDiscovery(ulist)\n\t\tif err != nil {\n\t\t\tlog.Debug(\"Failed to initialize remote plugin discovery\", \"err\", err)\n\t\t\treturn empty\n\t\t}\n\t\treturn d\n\t}\n\n\tscope := scope.DefaultScope(f)\n\n\tcmd.AddCommand(cli.VersionCommand())\n\n\t\/\/ playbooks\n\tpb, err := playbook.Load()\n\tif err != nil {\n\t\tlog.Warn(\"Cannot load playbook file\")\n\t}\n\n\tif !pb.Empty() {\n\t\tuseCommandName := \"use\"\n\t\tuseCommandDescription := \"Use a playbook\"\n\t\tcmd.AddCommand(useCommand(scope, useCommandName, useCommandDescription, pb))\n\t\tif len(os.Args) > 2 && os.Args[1] == useCommandName {\n\t\t\tcmd.SetArgs(os.Args[1:2])\n\t\t}\n\n\t}\n\n\tbase.VisitModules(scope, func(c *cobra.Command) {\n\t\tcmd.AddCommand(c)\n\t})\n\n\t\/\/ Help template includes the usage string, which is configure below\n\tcmd.SetHelpTemplate(helpTemplate)\n\tcmd.SetUsageTemplate(usageTemplate)\n\n\t\/\/ The 'stack' subcommand has its Use (verb) that is set to the\n\t\/\/ value of the INFRAKIT_HOST env variable. This allows us to\n\t\/\/ discover the remote services and generate the dynamic commands only\n\t\/\/ when the user has typed\n\tstackCommandName := local.InfrakitHost()\n\tstackCommand := stackCommand(scope, stackCommandName)\n\n\tcmd.AddCommand(stackCommand)\n\n\tif len(os.Args) > 2 && os.Args[1] == stackCommandName {\n\t\tcmd.SetArgs(os.Args[1:2])\n\t}\n\terr = cmd.Execute()\n\tif err != nil {\n\t\tlog.Crit(\"error executing\", \"cmd\", cmd.Use, \"err\", err)\n\t\tfmt.Println(err.Error())\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ write the file for bash completion if environment variable is set\n\tbashCompletionScript := os.Getenv(\"INFRAKIT_BASH_COMPLETION\")\n\tif bashCompletionScript != \"\" {\n\t\tcmd.GenBashCompletionFile(bashCompletionScript)\n\t}\n}\n\nfunc useCommand(scope scope.Scope, use, description string, pb *playbook.Playbooks) *cobra.Command {\n\n\t\/\/ Log setup\n\tlogOptions := &logutil.ProdDefaults\n\tlogFlags := cli.Flags(logOptions)\n\n\tcmd := &cobra.Command{\n\t\tUse: use,\n\t\tShort: description,\n\n\t\tRunE: func(c *cobra.Command, args []string) error {\n\n\t\t\tmain := &cobra.Command{\n\t\t\t\tUse: path.Base(os.Args[0]),\n\t\t\t\tPersistentPreRunE: func(c *cobra.Command, args []string) error {\n\t\t\t\t\tlogutil.Configure(logOptions)\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t}\n\t\t\tmain.PersistentFlags().AddFlagSet(logFlags)\n\n\t\t\tuseCmd := &cobra.Command{\n\t\t\t\tUse: use,\n\t\t\t\tShort: description,\n\t\t\t}\n\t\t\tmain.AddCommand(useCmd)\n\n\t\t\t\/\/ Commands from playbooks\n\t\t\tplaybookCommands := []*cobra.Command{}\n\t\t\tif playbooks, err := playbook.NewModules(scope, pb.Modules(),\n\t\t\t\tos.Stdin,\n\t\t\t\tplaybook.Options{\n\t\t\t\t\t\/\/ This value here is set by the environment variables\n\t\t\t\t\t\/\/ because its evaluation is needed prior to flags generation.\n\t\t\t\t\tShowAllWarnings: local.Getenv(\"INFRAKIT_CALLABLE_WARNINGS\", \"false\") == \"true\",\n\t\t\t\t}); err != nil {\n\t\t\t\tlog.Warn(\"error loading playbooks\", \"err\", err)\n\t\t\t} else {\n\t\t\t\tif more, err := playbooks.List(); err != nil {\n\t\t\t\t\tlog.Warn(\"cannot list playbooks\", \"err\", err)\n\t\t\t\t} else {\n\t\t\t\t\tplaybookCommands = append(playbookCommands, more...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, cc := range playbookCommands {\n\t\t\t\tuseCmd.AddCommand(cc)\n\t\t\t}\n\n\t\t\tmain.SetArgs(os.Args[1:])\n\t\t\treturn main.Execute()\n\t\t},\n\t}\n\n\treturn cmd\n}\n\nconst (\n\thelpTemplate = `\n\n{{with or .Long .Short }}{{. | trim}}{{end}}\n{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}\n`\n\n\tusageTemplate = `\nUsage:{{if .Runnable}}\n {{if .HasAvailableFlags}}{{appendIfNotPresent .UseLine \"[flags]\"}}{{else}}{{.UseLine}}{{end}}{{end}}{{if .HasAvailableSubCommands}}\n {{ .CommandPath}} [command]{{end}}{{if gt .Aliases 0}}\n\nAliases:\n {{.NameAndAliases}}\n{{end}}{{if .HasExample}}\n\nExamples:\n{{ .Example }}{{end}}{{ if .HasAvailableSubCommands}}\n\nAvailable Commands:{{range .Commands}}{{if .IsAvailableCommand}}\n {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableLocalFlags}}\n\nFlags:\n{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasAvailableInheritedFlags}}\n\nGlobal Flags:\n{{.InheritedFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasHelpSubCommands}}\n\nAdditional help topics:{{range .Commands}}{{if .IsHelpCommand}}\n {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableSubCommands }}\n\nUse \"{{.CommandPath}} [command] --help\" for more information about a command.{{end}}\n`\n)\n\nfunc stackCommand(scope scope.Scope, stackCommandName string) *cobra.Command {\n\n\t\/\/ Log setup\n\tlogOptions := &logutil.ProdDefaults\n\tlogFlags := cli.Flags(logOptions)\n\n\tdescription := fmt.Sprintf(\"Access %v\", stackCommandName)\n\tcmd := &cobra.Command{\n\t\tUse: stackCommandName,\n\t\tShort: description,\n\n\t\tRunE: func(c *cobra.Command, args []string) error {\n\n\t\t\tmain := &cobra.Command{\n\t\t\t\tUse: path.Base(os.Args[0]),\n\t\t\t\tPersistentPreRunE: func(c *cobra.Command, args []string) error {\n\t\t\t\t\tlogutil.Configure(logOptions)\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t}\n\t\t\tmain.PersistentFlags().AddFlagSet(logFlags)\n\n\t\t\tstack := &cobra.Command{\n\t\t\t\tUse: stackCommandName,\n\t\t\t\tShort: description,\n\t\t\t}\n\t\t\tmain.AddCommand(stack)\n\t\t\t\/\/ Load dynamic plugin commands based on discovery\n\t\t\tpluginCommands, err := cli.LoadAll(cli.NewServices(scope))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tfor _, cc := range pluginCommands {\n\t\t\t\tstack.AddCommand(cc)\n\t\t\t}\n\n\t\t\tmain.SetArgs(os.Args[1:])\n\n\t\t\tpluginName := \"\"\n\t\t\tmain.SetHelpFunc(func(cc *cobra.Command, args []string) {\n\t\t\t\tcc.Usage()\n\t\t\t\tif len(args) > 1 {\n\t\t\t\t\tpluginName = args[1]\n\t\t\t\t}\n\t\t\t})\n\n\t\t\terr = main.Execute()\n\n\t\t\t\/\/ We want to detect the case when the user typed a plugin that actually doesn't\n\t\t\t\/\/ exist and return a non-zero error code to the shell. This means we have to\n\t\t\t\/\/ actually try to lookup the plugin and the object therein.\n\t\t\tif pluginName != \"\" {\n\t\t\t\tpn := plugin.Name(pluginName)\n\t\t\t\tep, err := scope.Plugins().Find(pn)\n\t\t\t\tif err != nil {\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\t\/\/ if endpoint exists, then check if the subtype\/object exists\n\t\t\t\thk, err := rpc.NewHandshaker(ep.Address)\n\t\t\t\tif err != nil {\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\thello, err := hk.Hello()\n\t\t\t\tif err != nil {\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\n\t\t\t\tfoundObject := false\n\t\t\t\tfor _, objs := range hello {\n\t\t\t\t\tfor _, obj := range objs {\n\t\t\t\t\t\tif pn.Type() == obj.Name {\n\t\t\t\t\t\t\tfoundObject = true\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !foundObject {\n\t\t\t\t\t\/\/ http:\/\/tldp.org\/LDP\/abs\/html\/exitcodes.html\n\t\t\t\t\tos.Exit(127)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn err\n\t\t},\n\t}\n\n\treturn cmd\n}\n<commit_msg>Add pprof (#899)<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\n\t_ \"net\/http\/pprof\"\n\n\t\"github.com\/docker\/infrakit\/cmd\/infrakit\/base\"\n\t\"github.com\/docker\/infrakit\/pkg\/cli\"\n\t\"github.com\/docker\/infrakit\/pkg\/cli\/playbook\"\n\t\"github.com\/docker\/infrakit\/pkg\/discovery\"\n\tdiscovery_local \"github.com\/docker\/infrakit\/pkg\/discovery\/local\"\n\t\"github.com\/docker\/infrakit\/pkg\/discovery\/remote\"\n\tlogutil \"github.com\/docker\/infrakit\/pkg\/log\"\n\t\"github.com\/docker\/infrakit\/pkg\/plugin\"\n\trpc \"github.com\/docker\/infrakit\/pkg\/rpc\/client\"\n\t\"github.com\/docker\/infrakit\/pkg\/run\/local\"\n\t\"github.com\/docker\/infrakit\/pkg\/run\/scope\"\n\t\"github.com\/docker\/infrakit\/pkg\/template\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\/\/ CLI commands\n\t_ \"github.com\/docker\/infrakit\/cmd\/infrakit\/manager\"\n\t_ \"github.com\/docker\/infrakit\/cmd\/infrakit\/playbook\"\n\t_ \"github.com\/docker\/infrakit\/cmd\/infrakit\/plugin\"\n\t_ \"github.com\/docker\/infrakit\/cmd\/infrakit\/remote\"\n\t_ \"github.com\/docker\/infrakit\/cmd\/infrakit\/template\"\n\t_ \"github.com\/docker\/infrakit\/cmd\/infrakit\/up\"\n\t_ \"github.com\/docker\/infrakit\/cmd\/infrakit\/util\"\n\t_ \"github.com\/docker\/infrakit\/cmd\/infrakit\/x\"\n\t_ \"github.com\/docker\/infrakit\/pkg\/cli\/v0\"\n\n\t\/\/ Callable backends via playbook or via lib\n\t_ \"github.com\/docker\/infrakit\/pkg\/callable\/backend\/http\"\n\t_ \"github.com\/docker\/infrakit\/pkg\/callable\/backend\/instance\"\n\t_ \"github.com\/docker\/infrakit\/pkg\/callable\/backend\/print\"\n\t_ \"github.com\/docker\/infrakit\/pkg\/callable\/backend\/sh\"\n\t_ \"github.com\/docker\/infrakit\/pkg\/callable\/backend\/ssh\"\n\t_ \"github.com\/docker\/infrakit\/pkg\/callable\/backend\/stack\"\n\t_ \"github.com\/docker\/infrakit\/pkg\/callable\/backend\/vmwscript\"\n)\n\nfunc init() {\n\tlogutil.Configure(&logutil.ProdDefaults)\n}\n\ntype emptyPlugins struct{}\n\nfunc (e emptyPlugins) Find(name plugin.Name) (*plugin.Endpoint, error) {\n\treturn nil, errEmpty\n}\n\nfunc (e emptyPlugins) List() (map[string]*plugin.Endpoint, error) {\n\treturn nil, errEmpty\n}\n\nvar (\n\tempty = emptyPlugins{}\n\terrEmpty = errors.New(\"no plugins\")\n\n\tlog = logutil.New(\"module\", \"main\")\n)\n\n\/\/ A generic client for infrakit\nfunc main() {\n\n\tif err := discovery_local.Setup(); err != nil {\n\t\tpanic(err)\n\t}\n\tif err := template.Setup(); err != nil {\n\t\tpanic(err)\n\t}\n\t\/\/ Log setup\n\tlogOptions := &logutil.ProdDefaults\n\tlogFlags := cli.Flags(logOptions)\n\n\tprogram := path.Base(os.Args[0])\n\tcmd := &cobra.Command{\n\t\tUse: program,\n\t\tShort: program + \" command line interface\",\n\t\tPersistentPreRunE: func(c *cobra.Command, args []string) error {\n\t\t\tlogutil.Configure(logOptions)\n\n\t\t\tif logOptions.Level == 5 {\n\t\t\t\t\/\/ Debug level. Start pprof\n\t\t\t\tgo func() {\n\t\t\t\t\tlog.Info(\"Starting pprof at localhost:6060\")\n\t\t\t\t\thttp.ListenAndServe(\"localhost:6060\", nil)\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.PersistentFlags().AddFlagSet(logFlags)\n\n\t\/\/ Don't print usage text for any error returned from a RunE function.\n\t\/\/ Only print it when explicitly requested.\n\tcmd.SilenceUsage = true\n\n\t\/\/ Don't automatically print errors returned from a RunE function.\n\t\/\/ They are returned from cmd.Execute() below and we print it ourselves.\n\tcmd.SilenceErrors = true\n\tf := func() discovery.Plugins {\n\n\t\tulist, err := cli.Remotes()\n\t\tif err != nil {\n\t\t\tlog.Debug(\"Cannot lookup plugins\", \"err\", err)\n\t\t\treturn empty\n\t\t}\n\n\t\tif len(ulist) == 0 {\n\t\t\td, err := discovery_local.NewPluginDiscovery()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debug(\"Failed to initialize plugin discovery\", \"err\", err)\n\t\t\t\treturn empty\n\t\t\t}\n\t\t\treturn d\n\t\t}\n\n\t\td, err := remote.NewPluginDiscovery(ulist)\n\t\tif err != nil {\n\t\t\tlog.Debug(\"Failed to initialize remote plugin discovery\", \"err\", err)\n\t\t\treturn empty\n\t\t}\n\t\treturn d\n\t}\n\n\tscope := scope.DefaultScope(f)\n\n\tcmd.AddCommand(cli.VersionCommand())\n\n\t\/\/ playbooks\n\tpb, err := playbook.Load()\n\tif err != nil {\n\t\tlog.Warn(\"Cannot load playbook file\")\n\t}\n\n\tif !pb.Empty() {\n\t\tuseCommandName := \"use\"\n\t\tuseCommandDescription := \"Use a playbook\"\n\t\tcmd.AddCommand(useCommand(scope, useCommandName, useCommandDescription, pb))\n\t\tif len(os.Args) > 2 && os.Args[1] == useCommandName {\n\t\t\tcmd.SetArgs(os.Args[1:2])\n\t\t}\n\n\t}\n\n\tbase.VisitModules(scope, func(c *cobra.Command) {\n\t\tcmd.AddCommand(c)\n\t})\n\n\t\/\/ Help template includes the usage string, which is configure below\n\tcmd.SetHelpTemplate(helpTemplate)\n\tcmd.SetUsageTemplate(usageTemplate)\n\n\t\/\/ The 'stack' subcommand has its Use (verb) that is set to the\n\t\/\/ value of the INFRAKIT_HOST env variable. This allows us to\n\t\/\/ discover the remote services and generate the dynamic commands only\n\t\/\/ when the user has typed\n\tstackCommandName := local.InfrakitHost()\n\tstackCommand := stackCommand(scope, stackCommandName)\n\n\tcmd.AddCommand(stackCommand)\n\n\tif len(os.Args) > 2 && os.Args[1] == stackCommandName {\n\t\tcmd.SetArgs(os.Args[1:2])\n\t}\n\terr = cmd.Execute()\n\tif err != nil {\n\t\tlog.Crit(\"error executing\", \"cmd\", cmd.Use, \"err\", err)\n\t\tfmt.Println(err.Error())\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ write the file for bash completion if environment variable is set\n\tbashCompletionScript := os.Getenv(\"INFRAKIT_BASH_COMPLETION\")\n\tif bashCompletionScript != \"\" {\n\t\tcmd.GenBashCompletionFile(bashCompletionScript)\n\t}\n}\n\nfunc useCommand(scope scope.Scope, use, description string, pb *playbook.Playbooks) *cobra.Command {\n\n\t\/\/ Log setup\n\tlogOptions := &logutil.ProdDefaults\n\tlogFlags := cli.Flags(logOptions)\n\n\tcmd := &cobra.Command{\n\t\tUse: use,\n\t\tShort: description,\n\n\t\tRunE: func(c *cobra.Command, args []string) error {\n\n\t\t\tmain := &cobra.Command{\n\t\t\t\tUse: path.Base(os.Args[0]),\n\t\t\t\tPersistentPreRunE: func(c *cobra.Command, args []string) error {\n\t\t\t\t\tlogutil.Configure(logOptions)\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t}\n\t\t\tmain.PersistentFlags().AddFlagSet(logFlags)\n\n\t\t\tuseCmd := &cobra.Command{\n\t\t\t\tUse: use,\n\t\t\t\tShort: description,\n\t\t\t}\n\t\t\tmain.AddCommand(useCmd)\n\n\t\t\t\/\/ Commands from playbooks\n\t\t\tplaybookCommands := []*cobra.Command{}\n\t\t\tif playbooks, err := playbook.NewModules(scope, pb.Modules(),\n\t\t\t\tos.Stdin,\n\t\t\t\tplaybook.Options{\n\t\t\t\t\t\/\/ This value here is set by the environment variables\n\t\t\t\t\t\/\/ because its evaluation is needed prior to flags generation.\n\t\t\t\t\tShowAllWarnings: local.Getenv(\"INFRAKIT_CALLABLE_WARNINGS\", \"false\") == \"true\",\n\t\t\t\t}); err != nil {\n\t\t\t\tlog.Warn(\"error loading playbooks\", \"err\", err)\n\t\t\t} else {\n\t\t\t\tif more, err := playbooks.List(); err != nil {\n\t\t\t\t\tlog.Warn(\"cannot list playbooks\", \"err\", err)\n\t\t\t\t} else {\n\t\t\t\t\tplaybookCommands = append(playbookCommands, more...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, cc := range playbookCommands {\n\t\t\t\tuseCmd.AddCommand(cc)\n\t\t\t}\n\n\t\t\tmain.SetArgs(os.Args[1:])\n\t\t\treturn main.Execute()\n\t\t},\n\t}\n\n\treturn cmd\n}\n\nconst (\n\thelpTemplate = `\n\n{{with or .Long .Short }}{{. | trim}}{{end}}\n{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}\n`\n\n\tusageTemplate = `\nUsage:{{if .Runnable}}\n {{if .HasAvailableFlags}}{{appendIfNotPresent .UseLine \"[flags]\"}}{{else}}{{.UseLine}}{{end}}{{end}}{{if .HasAvailableSubCommands}}\n {{ .CommandPath}} [command]{{end}}{{if gt .Aliases 0}}\n\nAliases:\n {{.NameAndAliases}}\n{{end}}{{if .HasExample}}\n\nExamples:\n{{ .Example }}{{end}}{{ if .HasAvailableSubCommands}}\n\nAvailable Commands:{{range .Commands}}{{if .IsAvailableCommand}}\n {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableLocalFlags}}\n\nFlags:\n{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasAvailableInheritedFlags}}\n\nGlobal Flags:\n{{.InheritedFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasHelpSubCommands}}\n\nAdditional help topics:{{range .Commands}}{{if .IsHelpCommand}}\n {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableSubCommands }}\n\nUse \"{{.CommandPath}} [command] --help\" for more information about a command.{{end}}\n`\n)\n\nfunc stackCommand(scope scope.Scope, stackCommandName string) *cobra.Command {\n\n\t\/\/ Log setup\n\tlogOptions := &logutil.ProdDefaults\n\tlogFlags := cli.Flags(logOptions)\n\n\tdescription := fmt.Sprintf(\"Access %v\", stackCommandName)\n\tcmd := &cobra.Command{\n\t\tUse: stackCommandName,\n\t\tShort: description,\n\n\t\tRunE: func(c *cobra.Command, args []string) error {\n\n\t\t\tmain := &cobra.Command{\n\t\t\t\tUse: path.Base(os.Args[0]),\n\t\t\t\tPersistentPreRunE: func(c *cobra.Command, args []string) error {\n\t\t\t\t\tlogutil.Configure(logOptions)\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t}\n\t\t\tmain.PersistentFlags().AddFlagSet(logFlags)\n\n\t\t\tstack := &cobra.Command{\n\t\t\t\tUse: stackCommandName,\n\t\t\t\tShort: description,\n\t\t\t}\n\t\t\tmain.AddCommand(stack)\n\t\t\t\/\/ Load dynamic plugin commands based on discovery\n\t\t\tpluginCommands, err := cli.LoadAll(cli.NewServices(scope))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tfor _, cc := range pluginCommands {\n\t\t\t\tstack.AddCommand(cc)\n\t\t\t}\n\n\t\t\tmain.SetArgs(os.Args[1:])\n\n\t\t\tpluginName := \"\"\n\t\t\tmain.SetHelpFunc(func(cc *cobra.Command, args []string) {\n\t\t\t\tcc.Usage()\n\t\t\t\tif len(args) > 1 {\n\t\t\t\t\tpluginName = args[1]\n\t\t\t\t}\n\t\t\t})\n\n\t\t\terr = main.Execute()\n\n\t\t\t\/\/ We want to detect the case when the user typed a plugin that actually doesn't\n\t\t\t\/\/ exist and return a non-zero error code to the shell. This means we have to\n\t\t\t\/\/ actually try to lookup the plugin and the object therein.\n\t\t\tif pluginName != \"\" {\n\t\t\t\tpn := plugin.Name(pluginName)\n\t\t\t\tep, err := scope.Plugins().Find(pn)\n\t\t\t\tif err != nil {\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\t\/\/ if endpoint exists, then check if the subtype\/object exists\n\t\t\t\thk, err := rpc.NewHandshaker(ep.Address)\n\t\t\t\tif err != nil {\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\thello, err := hk.Hello()\n\t\t\t\tif err != nil {\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\n\t\t\t\tfoundObject := false\n\t\t\t\tfor _, objs := range hello {\n\t\t\t\t\tfor _, obj := range objs {\n\t\t\t\t\t\tif pn.Type() == obj.Name {\n\t\t\t\t\t\t\tfoundObject = true\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !foundObject {\n\t\t\t\t\t\/\/ http:\/\/tldp.org\/LDP\/abs\/html\/exitcodes.html\n\t\t\t\t\tos.Exit(127)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn err\n\t\t},\n\t}\n\n\treturn cmd\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage cmd\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"regexp\"\n\t\"strings\"\n\t\"syscall\"\n)\n\ntype target struct{}\n\nfunc (t *target) Info() *Info {\n\tdesc := `Defines or retrieve the target (tsuru server)\n\nIf an argument is provided, this command sets the target, otherwise it displays the current target.\n`\n\treturn &Info{\n\t\tName: \"target\",\n\t\tUsage: \"target [target]\",\n\t\tDesc: desc,\n\t\tMinArgs: 0,\n\t}\n}\n\nfunc (t *target) Run(ctx *Context, client Doer) error {\n\tvar target string\n\tif len(ctx.Args) > 0 {\n\t\ttarget = ctx.Args[0]\n\t\terr := writeTarget(target)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintf(ctx.Stdout, \"New target is %s\\n\", target)\n\t\treturn nil\n\t}\n\ttarget = readTarget()\n\tfmt.Fprintf(ctx.Stdout, \"Current target is %s\\n\", target)\n\treturn nil\n}\n\nconst DefaultTarget = \"http:\/\/tsuru.plataformas.glb.com:8080\"\n\nfunc readTarget() string {\n\ttargetPath, _ := joinWithUserDir(\".tsuru_target\")\n\tif f, err := filesystem().Open(targetPath); err == nil {\n\t\tdefer f.Close()\n\t\tif b, err := ioutil.ReadAll(f); err == nil {\n\t\t\treturn strings.TrimSpace(string(b))\n\t\t}\n\t}\n\treturn DefaultTarget\n}\n\nfunc GetUrl(path string) string {\n\tvar prefix string\n\ttarget := readTarget()\n\tif m, _ := regexp.MatchString(\"^https?:\/\/\", target); !m {\n\t\tprefix = \"http:\/\/\"\n\t}\n\treturn prefix + target + path\n}\n\nfunc writeTarget(t string) error {\n\ttargetPath, err := joinWithUserDir(\".tsuru_target\")\n\tif err != nil {\n\t\treturn err\n\t}\n\ttargetFile, err := filesystem().OpenFile(targetPath, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer targetFile.Close()\n\tt = strings.TrimRight(t, \"\/\")\n\tcontent := []byte(t)\n\tn, err := targetFile.Write(content)\n\tif n != len(content) || err != nil {\n\t\treturn errors.New(\"Failed to write the target file\")\n\t}\n\treturn nil\n}\n\n\n\n\n\n\ntype targetAdd struct{}\n\nfunc (t *targetAdd) Info() *Info {\n\tdesc := `Add a new target on target-list (tsuru server)\n`\n\treturn &Info{\n\t\tName: \"target-add\",\n\t\tUsage: \"target-add <label> <target>\",\n\t\tDesc: desc,\n\t\tMinArgs: 2,\n\t}\n}\n\nfunc (t *targetAdd) Run(ctx *Context, client Doer) error {\n\tvar target string\n\tvar label string\n \n\tif len(ctx.Args) == 2 {\n\t label = ctx.Args[0]\n\t\ttarget = ctx.Args[1]\n\t\t\n\t\tfmt.Fprintf(ctx.Stdout, \"Not implemented. Will add new target %s %s\\n\", label, target)\n\t}\n\n\treturn nil\n\t\n}\n<commit_msg>gofmting target.go files<commit_after>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage cmd\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"regexp\"\n\t\"strings\"\n\t\"syscall\"\n)\n\ntype target struct{}\n\nfunc (t *target) Info() *Info {\n\tdesc := `Defines or retrieve the target (tsuru server)\n\nIf an argument is provided, this command sets the target, otherwise it displays the current target.\n`\n\treturn &Info{\n\t\tName: \"target\",\n\t\tUsage: \"target [target]\",\n\t\tDesc: desc,\n\t\tMinArgs: 0,\n\t}\n}\n\nfunc (t *target) Run(ctx *Context, client Doer) error {\n\tvar target string\n\tif len(ctx.Args) > 0 {\n\t\ttarget = ctx.Args[0]\n\t\terr := writeTarget(target)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintf(ctx.Stdout, \"New target is %s\\n\", target)\n\t\treturn nil\n\t}\n\ttarget = readTarget()\n\tfmt.Fprintf(ctx.Stdout, \"Current target is %s\\n\", target)\n\treturn nil\n}\n\nconst DefaultTarget = \"http:\/\/tsuru.plataformas.glb.com:8080\"\n\nfunc readTarget() string {\n\ttargetPath, _ := joinWithUserDir(\".tsuru_target\")\n\tif f, err := filesystem().Open(targetPath); err == nil {\n\t\tdefer f.Close()\n\t\tif b, err := ioutil.ReadAll(f); err == nil {\n\t\t\treturn strings.TrimSpace(string(b))\n\t\t}\n\t}\n\treturn DefaultTarget\n}\n\nfunc GetUrl(path string) string {\n\tvar prefix string\n\ttarget := readTarget()\n\tif m, _ := regexp.MatchString(\"^https?:\/\/\", target); !m {\n\t\tprefix = \"http:\/\/\"\n\t}\n\treturn prefix + target + path\n}\n\nfunc writeTarget(t string) error {\n\ttargetPath, err := joinWithUserDir(\".tsuru_target\")\n\tif err != nil {\n\t\treturn err\n\t}\n\ttargetFile, err := filesystem().OpenFile(targetPath, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer targetFile.Close()\n\tt = strings.TrimRight(t, \"\/\")\n\tcontent := []byte(t)\n\tn, err := targetFile.Write(content)\n\tif n != len(content) || err != nil {\n\t\treturn errors.New(\"Failed to write the target file\")\n\t}\n\treturn nil\n}\n\ntype targetAdd struct{}\n\nfunc (t *targetAdd) Info() *Info {\n\tdesc := `Add a new target on target-list (tsuru server)\n`\n\treturn &Info{\n\t\tName: \"target-add\",\n\t\tUsage: \"target-add <label> <target>\",\n\t\tDesc: desc,\n\t\tMinArgs: 2,\n\t}\n}\n\nfunc (t *targetAdd) Run(ctx *Context, client Doer) error {\n\tvar target string\n\tvar label string\n\n\tif len(ctx.Args) == 2 {\n\t\tlabel = ctx.Args[0]\n\t\ttarget = ctx.Args[1]\n\n\t\tfmt.Fprintf(ctx.Stdout, \"Not implemented. Will add new target (%s) -> %s\\n\", label, target)\n\t}\n\n\treturn nil\n\n}\n<|endoftext|>"} {"text":"<commit_before>package kv\n\nimport (\n\t\"errors\"\n\ttiedot \"github.com\/HouzuoGuo\/tiedot\/db\"\n\t\"github.com\/ryansb\/legowebservices\/log\"\n\t\"strings\"\n)\n\nvar ErrNotFound = errors.New(\"legowebservices\/persist\/kv: Error not found\")\nvar ErrReadPreference = errors.New(\"legowebservices\/persist\/kv: Readpreference not set\")\n\nfunc (t *TiedotEngine) AddIndex(collection string, path Path) {\n\tc := t.tiedot.Use(collection)\n\ttdPath := strings.Join(path, tiedot.INDEX_PATH_SEP)\n\tif _, ok := c.SecIndexes[tdPath]; ok {\n\t\tlog.Infof(\"Index on path:%v already exists for collection:%s\", tdPath, collection)\n\t\treturn\n\t}\n\tlog.V(3).Infof(\"Adding index on path:%v to collection:%s\", tdPath, collection)\n\terr := c.Index(path)\n\tlog.FatalIfErr(err, \"Failure creating index on collection:\"+collection)\n}\n\nfunc (t *TiedotEngine) Collection(collection string) *tiedot.Col {\n\treturn t.tiedot.Use(collection)\n}\n\nfunc (t *TiedotEngine) DB() *tiedot.DB {\n\treturn t.tiedot\n}\n\nfunc (t *TiedotEngine) Query(collectionName string) *Query {\n\treturn &Query{col: t.tiedot.Use(collectionName)}\n}\n\nfunc (t *TiedotEngine) Insert(collectionName string, item Insertable) (uint64, error) {\n\tif len(item.ToM()) == 0 {\n\t\tlog.Warningf(\"Failure: No data in item=%v\", item.ToM())\n\t\treturn 0, nil\n\t} else {\n\t\tlog.V(3).Infof(\"Insertion into collection=%s item=%v\",\n\t\t\tcollectionName, item.ToM())\n\t}\n\tid, err := t.tiedot.Use(collectionName).Insert(item.ToM())\n\tif err != nil {\n\t\tlog.Errorf(\"Failure inserting item=%v err=%s\", item.ToM(), err.Error())\n\t\treturn 0, err\n\t}\n\tlog.V(6).Infof(\"Added item with ID=%d, item=%v\", id, item.ToM())\n\treturn id, nil\n}\n\nfunc (t *TiedotEngine) Update(collectionName string, id uint64, item Insertable) error {\n\tlog.V(3).Infof(\"Updating with data: %v\", item.ToM())\n\tif err := t.tiedot.Use(collectionName).Update(id, item.ToM()); err != nil {\n\t\tlog.Errorf(\"Failure updating item=%s err=%s\", item.ToM().JSON(), err.Error())\n\t\treturn err\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (t *TiedotEngine) All(collectionName string) (map[uint64]struct{}, error) {\n\tr := make(map[uint64]struct{})\n\tif err := tiedot.EvalQuery(\"all\", t.tiedot.Use(collectionName), &r); err != nil {\n\t\tlog.Error(\"Error executing TiedotEngine.All() err=%s\", err.Error())\n\t\treturn nil, err\n\t}\n\treturn r, nil\n}\n<commit_msg>logging<commit_after>package kv\n\nimport (\n\t\"errors\"\n\ttiedot \"github.com\/HouzuoGuo\/tiedot\/db\"\n\t\"github.com\/ryansb\/legowebservices\/log\"\n\t\"strings\"\n)\n\nvar ErrNotFound = errors.New(\"legowebservices\/persist\/kv: Error not found\")\nvar ErrReadPreference = errors.New(\"legowebservices\/persist\/kv: Readpreference not set\")\n\nfunc (t *TiedotEngine) AddIndex(collection string, path Path) {\n\tc := t.tiedot.Use(collection)\n\ttdPath := strings.Join(path, tiedot.INDEX_PATH_SEP)\n\tif _, ok := c.SecIndexes[tdPath]; ok {\n\t\tlog.Infof(\"Index on path:%v already exists for collection:%s\", tdPath, collection)\n\t\treturn\n\t}\n\tlog.V(3).Infof(\"Adding index on path:%v to collection:%s\", tdPath, collection)\n\terr := c.Index(path)\n\tlog.FatalIfErr(err, \"Failure creating index on collection:\"+collection)\n}\n\nfunc (t *TiedotEngine) Collection(collection string) *tiedot.Col {\n\treturn t.tiedot.Use(collection)\n}\n\nfunc (t *TiedotEngine) DB() *tiedot.DB {\n\treturn t.tiedot\n}\n\nfunc (t *TiedotEngine) Query(collectionName string) *Query {\n\treturn &Query{col: t.tiedot.Use(collectionName)}\n}\n\nfunc (t *TiedotEngine) Insert(collectionName string, item Insertable) (uint64, error) {\n\tif len(item.ToM()) == 0 {\n\t\tlog.Warningf(\"Failure: No data in item=%v\", item.ToM())\n\t\treturn 0, nil\n\t} else {\n\t\tlog.V(3).Infof(\"Insertion into collection=%s item=%v\",\n\t\t\tcollectionName, item.ToM())\n\t}\n\tid, err := t.tiedot.Use(collectionName).Insert(item.ToM())\n\tif err != nil {\n\t\tlog.Errorf(\"Failure inserting item=%v err=%s\", item.ToM(), err.Error())\n\t\treturn 0, err\n\t} else {\n\t\tlog.V(6).Infof(\"Added item with ID=%d, item=%v\", id, item.ToM())\n\t\treturn id, nil\n\t}\n}\n\nfunc (t *TiedotEngine) Update(collectionName string, id uint64, item Insertable) error {\n\terr := t.tiedot.Use(collectionName).Update(id, item.ToM())\n\tif err != nil {\n\t\tlog.Errorf(\"Failure updating item=%s err=%s\", item.ToM().JSON(), err.Error())\n\t} else {\n\t\tlog.V(3).Infof(\"Updating with data: %v\", item.ToM())\n\t}\n\treturn err\n}\n\nfunc (t *TiedotEngine) All(collectionName string) (map[uint64]struct{}, error) {\n\tr := make(map[uint64]struct{})\n\tif err := tiedot.EvalQuery(\"all\", t.tiedot.Use(collectionName), &r); err != nil {\n\t\tlog.Error(\"Error executing TiedotEngine.All() err=%s\", err.Error())\n\t\treturn nil, err\n\t}\n\treturn r, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/ Copyright 2015-2017 Rancher Labs, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cloudinitsave\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tyaml \"github.com\/cloudfoundry-incubator\/candiedyaml\"\n\n\t\"github.com\/rancher\/os\/cmd\/control\"\n\t\"github.com\/rancher\/os\/cmd\/network\"\n\trancherConfig \"github.com\/rancher\/os\/config\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/config\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/configdrive\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/file\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/metadata\/aliyun\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/metadata\/cloudstack\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/metadata\/digitalocean\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/metadata\/ec2\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/metadata\/gce\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/metadata\/packet\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/proccmdline\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/url\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/vmware\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/pkg\"\n\t\"github.com\/rancher\/os\/pkg\/log\"\n\t\"github.com\/rancher\/os\/pkg\/netconf\"\n\t\"github.com\/rancher\/os\/pkg\/util\"\n)\n\nconst (\n\tdatasourceInterval = 100 * time.Millisecond\n\tdatasourceMaxInterval = 30 * time.Second\n\tdatasourceTimeout = 5 * time.Minute\n)\n\nfunc Main() {\n\tlog.InitLogger()\n\tlog.Info(\"Running cloud-init-save\")\n\n\tif err := control.UdevSettle(); err != nil {\n\t\tlog.Errorf(\"Failed to run udev settle: %v\", err)\n\t}\n\n\tif err := saveCloudConfig(); err != nil {\n\t\tlog.Errorf(\"Failed to save cloud-config: %v\", err)\n\t}\n}\n\nfunc saveCloudConfig() error {\n\tlog.Infof(\"SaveCloudConfig\")\n\n\tcfg := rancherConfig.LoadConfig()\n\tlog.Debugf(\"init: SaveCloudConfig(pre ApplyNetworkConfig): %#v\", cfg.Rancher.Network)\n\tnetwork.ApplyNetworkConfig(cfg)\n\n\tlog.Infof(\"datasources that will be consided: %#v\", cfg.Rancher.CloudInit.Datasources)\n\tdss := getDatasources(cfg.Rancher.CloudInit.Datasources)\n\tif len(dss) == 0 {\n\t\tlog.Errorf(\"currentDatasource - none found\")\n\t\treturn nil\n\t}\n\n\tfoundDs := selectDatasource(dss)\n\tlog.Infof(\"Cloud-init datasource that was used: %s\", foundDs)\n\n\t\/\/ Apply any newly detected network config.\n\tcfg = rancherConfig.LoadConfig()\n\tlog.Debugf(\"init: SaveCloudConfig(post ApplyNetworkConfig): %#v\", cfg.Rancher.Network)\n\tnetwork.ApplyNetworkConfig(cfg)\n\n\treturn nil\n}\n\nfunc RequiresNetwork(datasource string) bool {\n\t\/\/ TODO: move into the datasources (and metadatasources)\n\t\/\/ and then we can enable that platforms defaults..\n\tparts := strings.SplitN(datasource, \":\", 2)\n\trequiresNetwork, ok := map[string]bool{\n\t\t\"ec2\": true,\n\t\t\"file\": false,\n\t\t\"url\": true,\n\t\t\"cmdline\": true,\n\t\t\"configdrive\": false,\n\t\t\"digitalocean\": true,\n\t\t\"gce\": true,\n\t\t\"packet\": true,\n\t}[parts[0]]\n\treturn ok && requiresNetwork\n}\n\nfunc saveFiles(cloudConfigBytes, scriptBytes []byte, metadata datasource.Metadata) error {\n\tos.MkdirAll(rancherConfig.CloudConfigDir, os.ModeDir|0600)\n\n\tif len(scriptBytes) > 0 {\n\t\tlog.Infof(\"Writing to %s\", rancherConfig.CloudConfigScriptFile)\n\t\tif err := util.WriteFileAtomic(rancherConfig.CloudConfigScriptFile, scriptBytes, 500); err != nil {\n\t\t\tlog.Errorf(\"Error while writing file %s: %v\", rancherConfig.CloudConfigScriptFile, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(cloudConfigBytes) > 0 {\n\t\tif err := util.WriteFileAtomic(rancherConfig.CloudConfigBootFile, cloudConfigBytes, 400); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Infof(\"Wrote to %s\", rancherConfig.CloudConfigBootFile)\n\t}\n\n\tmetaDataBytes, err := yaml.Marshal(metadata)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = util.WriteFileAtomic(rancherConfig.MetaDataFile, metaDataBytes, 400); err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"Wrote to %s\", rancherConfig.MetaDataFile)\n\n\t\/\/ if we write the empty meta yml, the merge fails.\n\t\/\/ TODO: the problem is that a partially filled one will still have merge issues, so that needs fixing - presumably by making merge more clever, and making more fields optional\n\temptyMeta, err := yaml.Marshal(datasource.Metadata{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif bytes.Compare(metaDataBytes, emptyMeta) == 0 {\n\t\tlog.Infof(\"not writing %s: its all defaults.\", rancherConfig.CloudConfigNetworkFile)\n\t\treturn nil\n\t}\n\n\ttype nonRancherCfg struct {\n\t\tNetwork netconf.NetworkConfig `yaml:\"network,omitempty\"`\n\t}\n\ttype nonCfg struct {\n\t\tRancher nonRancherCfg `yaml:\"rancher,omitempty\"`\n\t}\n\t\/\/ write the network.yml file from metadata\n\tcc := nonCfg{\n\t\tRancher: nonRancherCfg{\n\t\t\tNetwork: metadata.NetworkConfig,\n\t\t},\n\t}\n\n\tif err := os.MkdirAll(path.Dir(rancherConfig.CloudConfigNetworkFile), 0700); err != nil {\n\t\tlog.Errorf(\"Failed to create directory for file %s: %v\", rancherConfig.CloudConfigNetworkFile, err)\n\t}\n\n\tif err := rancherConfig.WriteToFile(cc, rancherConfig.CloudConfigNetworkFile); err != nil {\n\t\tlog.Errorf(\"Failed to save config file %s: %v\", rancherConfig.CloudConfigNetworkFile, err)\n\t}\n\tlog.Infof(\"Wrote to %s\", rancherConfig.CloudConfigNetworkFile)\n\n\treturn nil\n}\n\nfunc fetchAndSave(ds datasource.Datasource) error {\n\tvar metadata datasource.Metadata\n\n\tlog.Infof(\"Fetching user-data from datasource %s\", ds)\n\tuserDataBytes, err := ds.FetchUserdata()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed fetching user-data from datasource: %v\", err)\n\t\treturn err\n\t}\n\tuserDataBytes, err = decompressIfGzip(userDataBytes)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed decompressing user-data from datasource: %v\", err)\n\t\treturn err\n\t}\n\tlog.Infof(\"Fetching meta-data from datasource of type %v\", ds.Type())\n\tmetadata, err = ds.FetchMetadata()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed fetching meta-data from datasource: %v\", err)\n\t\treturn err\n\t}\n\n\tuserData := string(userDataBytes)\n\tscriptBytes := []byte{}\n\n\tif config.IsScript(userData) {\n\t\tscriptBytes = userDataBytes\n\t\tuserDataBytes = []byte{}\n\t} else if isCompose(userData) {\n\t\tif userDataBytes, err = composeToCloudConfig(userDataBytes); err != nil {\n\t\t\tlog.Errorf(\"Failed to convert compose to cloud-config syntax: %v\", err)\n\t\t\treturn err\n\t\t}\n\t} else if config.IsCloudConfig(userData) {\n\t\tif _, err := rancherConfig.ReadConfig(userDataBytes, false); err != nil {\n\t\t\tlog.WithFields(log.Fields{\"cloud-config\": userData, \"err\": err}).Warn(\"Failed to parse cloud-config, not saving.\")\n\t\t\tuserDataBytes = []byte{}\n\t\t}\n\t} else {\n\t\tlog.Errorf(\"Unrecognized user-data\\n(%s)\", userData)\n\t\tuserDataBytes = []byte{}\n\t}\n\n\tif _, err := rancherConfig.ReadConfig(userDataBytes, false); err != nil {\n\t\tlog.WithFields(log.Fields{\"cloud-config\": userData, \"err\": err}).Warn(\"Failed to parse cloud-config\")\n\t\treturn errors.New(\"Failed to parse cloud-config\")\n\t}\n\n\treturn saveFiles(userDataBytes, scriptBytes, metadata)\n}\n\n\/\/ getDatasources creates a slice of possible Datasources for cloudinit based\n\/\/ on the different source command-line flags.\nfunc getDatasources(datasources []string) []datasource.Datasource {\n\tdss := make([]datasource.Datasource, 0, 5)\n\n\tfor _, ds := range datasources {\n\t\tparts := strings.SplitN(ds, \":\", 2)\n\n\t\troot := \"\"\n\t\tif len(parts) > 1 {\n\t\t\troot = parts[1]\n\t\t}\n\n\t\tswitch parts[0] {\n\t\tcase \"*\":\n\t\t\tdss = append(dss, getDatasources([]string{\"configdrive\", \"vmware\", \"ec2\", \"digitalocean\", \"packet\", \"gce\", \"cloudstack\"})...)\n\t\tcase \"cloudstack\":\n\t\t\tfor _, source := range cloudstack.NewDatasource(root) {\n\t\t\t\tdss = append(dss, source)\n\t\t\t}\n\t\tcase \"ec2\":\n\t\t\tdss = append(dss, ec2.NewDatasource(root))\n\t\tcase \"file\":\n\t\t\tif root != \"\" {\n\t\t\t\tdss = append(dss, file.NewDatasource(root))\n\t\t\t}\n\t\tcase \"url\":\n\t\t\tif root != \"\" {\n\t\t\t\tdss = append(dss, url.NewDatasource(root))\n\t\t\t}\n\t\tcase \"cmdline\":\n\t\t\tif len(parts) == 1 {\n\t\t\t\tdss = append(dss, proccmdline.NewDatasource())\n\t\t\t}\n\t\tcase \"configdrive\":\n\t\t\tif root == \"\" {\n\t\t\t\troot = \"\/media\/config-2\"\n\t\t\t}\n\t\t\tdss = append(dss, configdrive.NewDatasource(root))\n\t\tcase \"digitalocean\":\n\t\t\t\/\/ TODO: should we enableDoLinkLocal() - to avoid the need for the other kernel\/oem options?\n\t\t\tdss = append(dss, digitalocean.NewDatasource(root))\n\t\tcase \"gce\":\n\t\t\tdss = append(dss, gce.NewDatasource(root))\n\t\tcase \"packet\":\n\t\t\tdss = append(dss, packet.NewDatasource(root))\n\t\tcase \"vmware\":\n\t\t\t\/\/ made vmware datasource dependent on detecting vmware independently, as it crashes things otherwise\n\t\t\tv := vmware.NewDatasource(root)\n\t\t\tif v != nil {\n\t\t\t\tdss = append(dss, v)\n\t\t\t}\n\t\tcase \"aliyun\":\n\t\t\tdss = append(dss, aliyun.NewDatasource(root))\n\t\t}\n\t}\n\n\treturn dss\n}\n\nfunc enableDoLinkLocal() {\n\t_, err := netconf.ApplyNetworkConfigs(&netconf.NetworkConfig{\n\t\tInterfaces: map[string]netconf.InterfaceConfig{\n\t\t\t\"eth0\": {\n\t\t\t\tIPV4LL: true,\n\t\t\t},\n\t\t},\n\t}, false, false)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to apply link local on eth0: %v\", err)\n\t}\n}\n\n\/\/ selectDatasource attempts to choose a valid Datasource to use based on its\n\/\/ current availability. The first Datasource to report to be available is\n\/\/ returned. Datasources will be retried if possible if they are not\n\/\/ immediately available. If all Datasources are permanently unavailable or\n\/\/ datasourceTimeout is reached before one becomes available, nil is returned.\nfunc selectDatasource(sources []datasource.Datasource) datasource.Datasource {\n\tds := make(chan datasource.Datasource)\n\tstop := make(chan struct{})\n\tvar wg sync.WaitGroup\n\n\tfor _, s := range sources {\n\t\twg.Add(1)\n\t\tgo func(s datasource.Datasource) {\n\t\t\tdefer wg.Done()\n\n\t\t\tduration := datasourceInterval\n\t\t\tfor {\n\t\t\t\tlog.Infof(\"cloud-init: Checking availability of %q\", s.Type())\n\t\t\t\tif s.IsAvailable() {\n\t\t\t\t\tlog.Infof(\"cloud-init: Datasource available: %s\", s)\n\t\t\t\t\tds <- s\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif !s.AvailabilityChanges() {\n\t\t\t\t\tlog.Infof(\"cloud-init: Datasource unavailable, skipping: %s\", s)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.Errorf(\"cloud-init: Datasource not ready, will retry: %s\", s)\n\t\t\t\tselect {\n\t\t\t\tcase <-stop:\n\t\t\t\t\treturn\n\t\t\t\tcase <-time.After(duration):\n\t\t\t\t\tduration = pkg.ExpBackoff(duration, datasourceMaxInterval)\n\t\t\t\t}\n\t\t\t}\n\t\t}(s)\n\t}\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(done)\n\t}()\n\n\tvar s datasource.Datasource\n\tselect {\n\tcase s = <-ds:\n\t\terr := fetchAndSave(s)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error fetching cloud-init datasource(%s): %s\", s, err)\n\t\t}\n\tcase <-done:\n\tcase <-time.After(datasourceTimeout):\n\t}\n\n\tclose(stop)\n\treturn s\n}\n\nfunc isCompose(content string) bool {\n\treturn strings.HasPrefix(content, \"#compose\\n\")\n}\n\nfunc composeToCloudConfig(bytes []byte) ([]byte, error) {\n\tcompose := make(map[interface{}]interface{})\n\terr := yaml.Unmarshal(bytes, &compose)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn yaml.Marshal(map[interface{}]interface{}{\n\t\t\"rancher\": map[interface{}]interface{}{\n\t\t\t\"services\": compose,\n\t\t},\n\t})\n}\n\nconst gzipMagicBytes = \"\\x1f\\x8b\"\n\nfunc decompressIfGzip(userdataBytes []byte) ([]byte, error) {\n\tif !bytes.HasPrefix(userdataBytes, []byte(gzipMagicBytes)) {\n\t\treturn userdataBytes, nil\n\t}\n\n\treturn config.DecompressGzip(userdataBytes)\n}\n<commit_msg>cloudinitsave: Fix typo in log output<commit_after>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/ Copyright 2015-2017 Rancher Labs, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cloudinitsave\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tyaml \"github.com\/cloudfoundry-incubator\/candiedyaml\"\n\n\t\"github.com\/rancher\/os\/cmd\/control\"\n\t\"github.com\/rancher\/os\/cmd\/network\"\n\trancherConfig \"github.com\/rancher\/os\/config\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/config\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/configdrive\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/file\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/metadata\/aliyun\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/metadata\/cloudstack\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/metadata\/digitalocean\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/metadata\/ec2\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/metadata\/gce\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/metadata\/packet\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/proccmdline\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/url\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/datasource\/vmware\"\n\t\"github.com\/rancher\/os\/config\/cloudinit\/pkg\"\n\t\"github.com\/rancher\/os\/pkg\/log\"\n\t\"github.com\/rancher\/os\/pkg\/netconf\"\n\t\"github.com\/rancher\/os\/pkg\/util\"\n)\n\nconst (\n\tdatasourceInterval = 100 * time.Millisecond\n\tdatasourceMaxInterval = 30 * time.Second\n\tdatasourceTimeout = 5 * time.Minute\n)\n\nfunc Main() {\n\tlog.InitLogger()\n\tlog.Info(\"Running cloud-init-save\")\n\n\tif err := control.UdevSettle(); err != nil {\n\t\tlog.Errorf(\"Failed to run udev settle: %v\", err)\n\t}\n\n\tif err := saveCloudConfig(); err != nil {\n\t\tlog.Errorf(\"Failed to save cloud-config: %v\", err)\n\t}\n}\n\nfunc saveCloudConfig() error {\n\tlog.Infof(\"SaveCloudConfig\")\n\n\tcfg := rancherConfig.LoadConfig()\n\tlog.Debugf(\"init: SaveCloudConfig(pre ApplyNetworkConfig): %#v\", cfg.Rancher.Network)\n\tnetwork.ApplyNetworkConfig(cfg)\n\n\tlog.Infof(\"datasources that will be considered: %#v\", cfg.Rancher.CloudInit.Datasources)\n\tdss := getDatasources(cfg.Rancher.CloudInit.Datasources)\n\tif len(dss) == 0 {\n\t\tlog.Errorf(\"currentDatasource - none found\")\n\t\treturn nil\n\t}\n\n\tfoundDs := selectDatasource(dss)\n\tlog.Infof(\"Cloud-init datasource that was used: %s\", foundDs)\n\n\t\/\/ Apply any newly detected network config.\n\tcfg = rancherConfig.LoadConfig()\n\tlog.Debugf(\"init: SaveCloudConfig(post ApplyNetworkConfig): %#v\", cfg.Rancher.Network)\n\tnetwork.ApplyNetworkConfig(cfg)\n\n\treturn nil\n}\n\nfunc RequiresNetwork(datasource string) bool {\n\t\/\/ TODO: move into the datasources (and metadatasources)\n\t\/\/ and then we can enable that platforms defaults..\n\tparts := strings.SplitN(datasource, \":\", 2)\n\trequiresNetwork, ok := map[string]bool{\n\t\t\"ec2\": true,\n\t\t\"file\": false,\n\t\t\"url\": true,\n\t\t\"cmdline\": true,\n\t\t\"configdrive\": false,\n\t\t\"digitalocean\": true,\n\t\t\"gce\": true,\n\t\t\"packet\": true,\n\t}[parts[0]]\n\treturn ok && requiresNetwork\n}\n\nfunc saveFiles(cloudConfigBytes, scriptBytes []byte, metadata datasource.Metadata) error {\n\tos.MkdirAll(rancherConfig.CloudConfigDir, os.ModeDir|0600)\n\n\tif len(scriptBytes) > 0 {\n\t\tlog.Infof(\"Writing to %s\", rancherConfig.CloudConfigScriptFile)\n\t\tif err := util.WriteFileAtomic(rancherConfig.CloudConfigScriptFile, scriptBytes, 500); err != nil {\n\t\t\tlog.Errorf(\"Error while writing file %s: %v\", rancherConfig.CloudConfigScriptFile, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(cloudConfigBytes) > 0 {\n\t\tif err := util.WriteFileAtomic(rancherConfig.CloudConfigBootFile, cloudConfigBytes, 400); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Infof(\"Wrote to %s\", rancherConfig.CloudConfigBootFile)\n\t}\n\n\tmetaDataBytes, err := yaml.Marshal(metadata)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = util.WriteFileAtomic(rancherConfig.MetaDataFile, metaDataBytes, 400); err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"Wrote to %s\", rancherConfig.MetaDataFile)\n\n\t\/\/ if we write the empty meta yml, the merge fails.\n\t\/\/ TODO: the problem is that a partially filled one will still have merge issues, so that needs fixing - presumably by making merge more clever, and making more fields optional\n\temptyMeta, err := yaml.Marshal(datasource.Metadata{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif bytes.Compare(metaDataBytes, emptyMeta) == 0 {\n\t\tlog.Infof(\"not writing %s: its all defaults.\", rancherConfig.CloudConfigNetworkFile)\n\t\treturn nil\n\t}\n\n\ttype nonRancherCfg struct {\n\t\tNetwork netconf.NetworkConfig `yaml:\"network,omitempty\"`\n\t}\n\ttype nonCfg struct {\n\t\tRancher nonRancherCfg `yaml:\"rancher,omitempty\"`\n\t}\n\t\/\/ write the network.yml file from metadata\n\tcc := nonCfg{\n\t\tRancher: nonRancherCfg{\n\t\t\tNetwork: metadata.NetworkConfig,\n\t\t},\n\t}\n\n\tif err := os.MkdirAll(path.Dir(rancherConfig.CloudConfigNetworkFile), 0700); err != nil {\n\t\tlog.Errorf(\"Failed to create directory for file %s: %v\", rancherConfig.CloudConfigNetworkFile, err)\n\t}\n\n\tif err := rancherConfig.WriteToFile(cc, rancherConfig.CloudConfigNetworkFile); err != nil {\n\t\tlog.Errorf(\"Failed to save config file %s: %v\", rancherConfig.CloudConfigNetworkFile, err)\n\t}\n\tlog.Infof(\"Wrote to %s\", rancherConfig.CloudConfigNetworkFile)\n\n\treturn nil\n}\n\nfunc fetchAndSave(ds datasource.Datasource) error {\n\tvar metadata datasource.Metadata\n\n\tlog.Infof(\"Fetching user-data from datasource %s\", ds)\n\tuserDataBytes, err := ds.FetchUserdata()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed fetching user-data from datasource: %v\", err)\n\t\treturn err\n\t}\n\tuserDataBytes, err = decompressIfGzip(userDataBytes)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed decompressing user-data from datasource: %v\", err)\n\t\treturn err\n\t}\n\tlog.Infof(\"Fetching meta-data from datasource of type %v\", ds.Type())\n\tmetadata, err = ds.FetchMetadata()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed fetching meta-data from datasource: %v\", err)\n\t\treturn err\n\t}\n\n\tuserData := string(userDataBytes)\n\tscriptBytes := []byte{}\n\n\tif config.IsScript(userData) {\n\t\tscriptBytes = userDataBytes\n\t\tuserDataBytes = []byte{}\n\t} else if isCompose(userData) {\n\t\tif userDataBytes, err = composeToCloudConfig(userDataBytes); err != nil {\n\t\t\tlog.Errorf(\"Failed to convert compose to cloud-config syntax: %v\", err)\n\t\t\treturn err\n\t\t}\n\t} else if config.IsCloudConfig(userData) {\n\t\tif _, err := rancherConfig.ReadConfig(userDataBytes, false); err != nil {\n\t\t\tlog.WithFields(log.Fields{\"cloud-config\": userData, \"err\": err}).Warn(\"Failed to parse cloud-config, not saving.\")\n\t\t\tuserDataBytes = []byte{}\n\t\t}\n\t} else {\n\t\tlog.Errorf(\"Unrecognized user-data\\n(%s)\", userData)\n\t\tuserDataBytes = []byte{}\n\t}\n\n\tif _, err := rancherConfig.ReadConfig(userDataBytes, false); err != nil {\n\t\tlog.WithFields(log.Fields{\"cloud-config\": userData, \"err\": err}).Warn(\"Failed to parse cloud-config\")\n\t\treturn errors.New(\"Failed to parse cloud-config\")\n\t}\n\n\treturn saveFiles(userDataBytes, scriptBytes, metadata)\n}\n\n\/\/ getDatasources creates a slice of possible Datasources for cloudinit based\n\/\/ on the different source command-line flags.\nfunc getDatasources(datasources []string) []datasource.Datasource {\n\tdss := make([]datasource.Datasource, 0, 5)\n\n\tfor _, ds := range datasources {\n\t\tparts := strings.SplitN(ds, \":\", 2)\n\n\t\troot := \"\"\n\t\tif len(parts) > 1 {\n\t\t\troot = parts[1]\n\t\t}\n\n\t\tswitch parts[0] {\n\t\tcase \"*\":\n\t\t\tdss = append(dss, getDatasources([]string{\"configdrive\", \"vmware\", \"ec2\", \"digitalocean\", \"packet\", \"gce\", \"cloudstack\"})...)\n\t\tcase \"cloudstack\":\n\t\t\tfor _, source := range cloudstack.NewDatasource(root) {\n\t\t\t\tdss = append(dss, source)\n\t\t\t}\n\t\tcase \"ec2\":\n\t\t\tdss = append(dss, ec2.NewDatasource(root))\n\t\tcase \"file\":\n\t\t\tif root != \"\" {\n\t\t\t\tdss = append(dss, file.NewDatasource(root))\n\t\t\t}\n\t\tcase \"url\":\n\t\t\tif root != \"\" {\n\t\t\t\tdss = append(dss, url.NewDatasource(root))\n\t\t\t}\n\t\tcase \"cmdline\":\n\t\t\tif len(parts) == 1 {\n\t\t\t\tdss = append(dss, proccmdline.NewDatasource())\n\t\t\t}\n\t\tcase \"configdrive\":\n\t\t\tif root == \"\" {\n\t\t\t\troot = \"\/media\/config-2\"\n\t\t\t}\n\t\t\tdss = append(dss, configdrive.NewDatasource(root))\n\t\tcase \"digitalocean\":\n\t\t\t\/\/ TODO: should we enableDoLinkLocal() - to avoid the need for the other kernel\/oem options?\n\t\t\tdss = append(dss, digitalocean.NewDatasource(root))\n\t\tcase \"gce\":\n\t\t\tdss = append(dss, gce.NewDatasource(root))\n\t\tcase \"packet\":\n\t\t\tdss = append(dss, packet.NewDatasource(root))\n\t\tcase \"vmware\":\n\t\t\t\/\/ made vmware datasource dependent on detecting vmware independently, as it crashes things otherwise\n\t\t\tv := vmware.NewDatasource(root)\n\t\t\tif v != nil {\n\t\t\t\tdss = append(dss, v)\n\t\t\t}\n\t\tcase \"aliyun\":\n\t\t\tdss = append(dss, aliyun.NewDatasource(root))\n\t\t}\n\t}\n\n\treturn dss\n}\n\nfunc enableDoLinkLocal() {\n\t_, err := netconf.ApplyNetworkConfigs(&netconf.NetworkConfig{\n\t\tInterfaces: map[string]netconf.InterfaceConfig{\n\t\t\t\"eth0\": {\n\t\t\t\tIPV4LL: true,\n\t\t\t},\n\t\t},\n\t}, false, false)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to apply link local on eth0: %v\", err)\n\t}\n}\n\n\/\/ selectDatasource attempts to choose a valid Datasource to use based on its\n\/\/ current availability. The first Datasource to report to be available is\n\/\/ returned. Datasources will be retried if possible if they are not\n\/\/ immediately available. If all Datasources are permanently unavailable or\n\/\/ datasourceTimeout is reached before one becomes available, nil is returned.\nfunc selectDatasource(sources []datasource.Datasource) datasource.Datasource {\n\tds := make(chan datasource.Datasource)\n\tstop := make(chan struct{})\n\tvar wg sync.WaitGroup\n\n\tfor _, s := range sources {\n\t\twg.Add(1)\n\t\tgo func(s datasource.Datasource) {\n\t\t\tdefer wg.Done()\n\n\t\t\tduration := datasourceInterval\n\t\t\tfor {\n\t\t\t\tlog.Infof(\"cloud-init: Checking availability of %q\", s.Type())\n\t\t\t\tif s.IsAvailable() {\n\t\t\t\t\tlog.Infof(\"cloud-init: Datasource available: %s\", s)\n\t\t\t\t\tds <- s\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif !s.AvailabilityChanges() {\n\t\t\t\t\tlog.Infof(\"cloud-init: Datasource unavailable, skipping: %s\", s)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.Errorf(\"cloud-init: Datasource not ready, will retry: %s\", s)\n\t\t\t\tselect {\n\t\t\t\tcase <-stop:\n\t\t\t\t\treturn\n\t\t\t\tcase <-time.After(duration):\n\t\t\t\t\tduration = pkg.ExpBackoff(duration, datasourceMaxInterval)\n\t\t\t\t}\n\t\t\t}\n\t\t}(s)\n\t}\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(done)\n\t}()\n\n\tvar s datasource.Datasource\n\tselect {\n\tcase s = <-ds:\n\t\terr := fetchAndSave(s)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error fetching cloud-init datasource(%s): %s\", s, err)\n\t\t}\n\tcase <-done:\n\tcase <-time.After(datasourceTimeout):\n\t}\n\n\tclose(stop)\n\treturn s\n}\n\nfunc isCompose(content string) bool {\n\treturn strings.HasPrefix(content, \"#compose\\n\")\n}\n\nfunc composeToCloudConfig(bytes []byte) ([]byte, error) {\n\tcompose := make(map[interface{}]interface{})\n\terr := yaml.Unmarshal(bytes, &compose)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn yaml.Marshal(map[interface{}]interface{}{\n\t\t\"rancher\": map[interface{}]interface{}{\n\t\t\t\"services\": compose,\n\t\t},\n\t})\n}\n\nconst gzipMagicBytes = \"\\x1f\\x8b\"\n\nfunc decompressIfGzip(userdataBytes []byte) ([]byte, error) {\n\tif !bytes.HasPrefix(userdataBytes, []byte(gzipMagicBytes)) {\n\t\treturn userdataBytes, nil\n\t}\n\n\treturn config.DecompressGzip(userdataBytes)\n}\n<|endoftext|>"} {"text":"<commit_before>package version\n\ntype Version struct {\n\tGitSHA string\n\tVersion string\n}\n\nvar VersionInfo = Version{\n\tGitSHA: gitSha,\n\tVersion: version,\n}\n\nconst gitSha = \"unknown\"\nconst version = \"0.1\"\n<commit_msg>Change back to git commit<commit_after>package version\n\ntype Version struct {\n\tGitCommit string\n\tVersion string\n}\n\nvar VersionInfo = unknownVersion\n\nvar unknownVersion = Version{\n\tGitCommit: \"unknown git commit\",\n\tVersion: \"unknown version\",\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/jessevdk\/go-flags\"\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n)\n\ntype Files struct {\n\tLHS string `positional-arg-name:\"FILE_1\"`\n\tRHS string `positional-arg-name:\"FILE_2\"`\n}\n\ntype config struct {\n\tFiles Files `positional-args:\"yes\" required:\"yes\"`\n\tIgnore ignorePatterns `long:\"ignore\" short:\"i\" description:\"paths to ignore (glob)\"`\n\toutput\n\tOutputReport bool `long:\"report\" short:\"r\" description:\"output report format\"`\n}\n\ntype output struct {\n\tIndent string `long:\"indent\" description:\"indent string\" default:\"\\t\"`\n\tShowTypes bool `long:\"show-types\" short:\"t\" description:\"show types\"`\n\tColorized bool\n}\n\nfunc readConfig() config {\n\tvar c config\n\n\t_, err := flags.Parse(&c)\n\tif err != nil {\n\t\tif flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp {\n\t\t\tos.Exit(0)\n\t\t}\n\t\tfmt.Fprintf(os.Stderr, \"Failed to parse arguments. See %s --help\\n\", os.Args[0])\n\t\tos.Exit(statusUsage)\n\t}\n\n\tc.output.Colorized = terminal.IsTerminal(int(os.Stdout.Fd()))\n\n\treturn c\n}\n<commit_msg>unexporting files<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/jessevdk\/go-flags\"\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n)\n\ntype files struct {\n\tLHS string `positional-arg-name:\"FILE_1\"`\n\tRHS string `positional-arg-name:\"FILE_2\"`\n}\n\ntype config struct {\n\tFiles files `positional-args:\"yes\" required:\"yes\"`\n\tIgnore ignorePatterns `long:\"ignore\" short:\"i\" description:\"paths to ignore (glob)\"`\n\toutput\n\tOutputReport bool `long:\"report\" short:\"r\" description:\"output report format\"`\n}\n\ntype output struct {\n\tIndent string `long:\"indent\" description:\"indent string\" default:\"\\t\"`\n\tShowTypes bool `long:\"show-types\" short:\"t\" description:\"show types\"`\n\tColorized bool\n}\n\nfunc readConfig() config {\n\tvar c config\n\n\t_, err := flags.Parse(&c)\n\tif err != nil {\n\t\tif flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp {\n\t\t\tos.Exit(0)\n\t\t}\n\t\tfmt.Fprintf(os.Stderr, \"Failed to parse arguments. See %s --help\\n\", os.Args[0])\n\t\tos.Exit(statusUsage)\n\t}\n\n\tc.output.Colorized = terminal.IsTerminal(int(os.Stdout.Fd()))\n\n\treturn c\n}\n<|endoftext|>"} {"text":"<commit_before>package version\n\n\/\/ Package is the overall, canonical project import path under which the\n\/\/ package was built.\nvar Package = \"github.com\/docker\/distribution\"\n\n\/\/ Version indicates which version of the binary is running. This is set to\n\/\/ the latest release tag by hand, always suffixed by \"+unknown\". During\n\/\/ build, it will be replaced by the actual version. The value here will be\n\/\/ used if the registry is run after a go get based install.\nvar Version = \"v2.0.0+unknown\"\n<commit_msg>Rev base version to 2.1.0<commit_after>package version\n\n\/\/ Package is the overall, canonical project import path under which the\n\/\/ package was built.\nvar Package = \"github.com\/docker\/distribution\"\n\n\/\/ Version indicates which version of the binary is running. This is set to\n\/\/ the latest release tag by hand, always suffixed by \"+unknown\". During\n\/\/ build, it will be replaced by the actual version. The value here will be\n\/\/ used if the registry is run after a go get based install.\nvar Version = \"v2.1.0+unknown\"\n<|endoftext|>"} {"text":"<commit_before>package gateway\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/funkygao\/gafka\/cmd\/kateway\/job\"\n\t\"github.com\/funkygao\/gafka\/cmd\/kateway\/manager\"\n\t\"github.com\/funkygao\/gafka\/mpool\"\n\t\"github.com\/funkygao\/httprouter\"\n\tlog \"github.com\/funkygao\/log4go\"\n)\n\n\/\/ POST \/v1\/jobs\/:topic\/:ver?delay=100|due=1471565204\n\/\/ TODO tag, partitionKey\n\/\/ TODO use dedicated metrics\nfunc (this *pubServer) addJobHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {\n\tif !Options.DisableMetrics {\n\t\tthis.pubMetrics.JobTryQps.Mark(1)\n\t}\n\n\tt1 := time.Now()\n\trealIp := getHttpRemoteIp(r)\n\tappid := r.Header.Get(HttpHeaderAppid)\n\n\tvar due int64\n\tq := r.URL.Query()\n\tdueParam := q.Get(\"due\")\n\tif dueParam != \"\" {\n\t\td, err := strconv.ParseInt(dueParam, 10, 64)\n\t\tif err != nil {\n\t\t\tlog.Error(\"+job[%s] %s(%s) due:%s %s\", appid, r.RemoteAddr, realIp, dueParam, err)\n\n\t\t\twriteBadRequest(w, \"invalid due param\")\n\t\t\treturn\n\t\t}\n\n\t\tdue = d\n\t} else {\n\t\tdelayParam := q.Get(\"delay\") \/\/ in sec\n\t\tdelay, err := strconv.ParseInt(delayParam, 10, 64)\n\t\tif err != nil {\n\t\t\tlog.Error(\"+job[%s] %s(%s) delay:%s %s\", appid, r.RemoteAddr, realIp, delayParam, err)\n\n\t\t\twriteBadRequest(w, \"invalid delay param\")\n\t\t\treturn\n\t\t}\n\n\t\tdue = t1.Unix() + delay\n\t}\n\n\tif due < t1.Unix() {\n\t\tlog.Error(\"+job[%s] %s(%s) due=%d before now?\", appid, r.RemoteAddr, realIp, due)\n\n\t\twriteBadRequest(w, \"invalid param\")\n\t\treturn\n\t}\n\n\tif Options.Ratelimit && !this.throttlePub.Pour(realIp, 1) {\n\t\tlog.Warn(\"+job[%s] %s(%s) rate limit reached\", appid, r.RemoteAddr, realIp)\n\n\t\twriteQuotaExceeded(w)\n\t\treturn\n\t}\n\n\ttopic := params.ByName(UrlParamTopic)\n\tver := params.ByName(UrlParamVersion)\n\tif err := manager.Default.OwnTopic(appid, r.Header.Get(HttpHeaderPubkey), topic); err != nil {\n\t\tlog.Warn(\"+job[%s] %s(%s) {topic:%s, ver:%s} %s\", appid, r.RemoteAddr, realIp, topic, ver, err)\n\n\t\twriteAuthFailure(w, err)\n\t\treturn\n\t}\n\n\t\/\/ get the raw POST message\n\tmsgLen := int(r.ContentLength)\n\tswitch {\n\tcase msgLen == -1:\n\t\tlog.Warn(\"+job[%s] %s(%s) {topic:%s, ver:%s} invalid content length: %d\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, msgLen)\n\n\t\twriteBadRequest(w, \"invalid content length\")\n\t\treturn\n\n\tcase int64(msgLen) > Options.MaxJobSize:\n\t\tlog.Warn(\"+job[%s] %s(%s) {topic:%s, ver:%s} too big content length: %d\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, msgLen)\n\t\twriteBadRequest(w, ErrTooBigMessage.Error())\n\t\treturn\n\n\tcase msgLen < Options.MinPubSize:\n\t\tlog.Warn(\"+job[%s] %s(%s) {topic:%s, ver:%s} too small content length: %d\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, msgLen)\n\t\twriteBadRequest(w, ErrTooSmallMessage.Error())\n\t\treturn\n\t}\n\n\tlbr := io.LimitReader(r.Body, Options.MaxJobSize+1)\n\tmsg := mpool.NewMessage(msgLen)\n\tmsg.Body = msg.Body[0:msgLen]\n\tif _, err := io.ReadAtLeast(lbr, msg.Body, msgLen); err != nil {\n\t\tmsg.Free()\n\n\t\tlog.Error(\"+job[%s] %s(%s) {topic:%s, ver:%s} %s\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, err)\n\t\twriteBadRequest(w, ErrTooBigMessage.Error())\n\t\treturn\n\t}\n\n\tlog.Debug(\"+job[%s] %s(%s) {topic:%s, ver:%s} due:%d\/%ds\",\n\t\tappid, r.RemoteAddr, realIp, topic, ver, due, due-t1.Unix())\n\n\tif !Options.DisableMetrics {\n\t\tthis.pubMetrics.JobQps.Mark(1)\n\t\tthis.pubMetrics.JobMsgSize.Update(int64(len(msg.Body)))\n\t}\n\n\t_, found := manager.Default.LookupCluster(appid)\n\tif !found {\n\t\tmsg.Free()\n\n\t\tlog.Error(\"+job[%s] %s(%s) {topic:%s, ver:%s} cluster not found\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver)\n\n\t\twriteBadRequest(w, \"invalid appid\")\n\t\treturn\n\t}\n\n\tjobId, err := job.Default.Add(appid, manager.Default.KafkaTopic(appid, topic, ver), msg.Body, due)\n\tmsg.Free()\n\tif err != nil {\n\t\tif !Options.DisableMetrics {\n\t\t\tthis.pubMetrics.PubFail(appid, topic, ver)\n\t\t}\n\n\t\tlog.Error(\"+job[%s] %s(%s) {topic:%s, ver:%s} %s\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, err)\n\t\twriteServerError(w, err.Error())\n\t\treturn\n\t}\n\n\tif Options.AuditPub {\n\t\tthis.auditor.Trace(\"+job[%s] %s(%s) {topic:%s ver:%s UA:%s} due:%d id:%s\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, r.Header.Get(\"User-Agent\"), due, jobId)\n\t}\n\n\tw.Header().Set(HttpHeaderJobId, jobId)\n\tw.WriteHeader(http.StatusCreated)\n\n\tif _, err = w.Write(ResponseOk); err != nil {\n\t\tlog.Error(\"%s: %v\", r.RemoteAddr, err)\n\t\tthis.pubMetrics.ClientError.Inc(1)\n\t}\n\n\tif !Options.DisableMetrics {\n\t\tthis.pubMetrics.PubOk(appid, topic, ver)\n\t\tthis.pubMetrics.PubLatency.Update(time.Since(t1).Nanoseconds() \/ 1e6) \/\/ in ms\n\t}\n}\n\n\/\/ DELETE \/v1\/jobs\/:topic\/:ver?id=22323\nfunc (this *pubServer) deleteJobHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {\n\tappid := r.Header.Get(HttpHeaderAppid)\n\ttopic := params.ByName(UrlParamTopic)\n\tver := params.ByName(UrlParamVersion)\n\trealIp := getHttpRemoteIp(r)\n\tif err := manager.Default.OwnTopic(appid, r.Header.Get(HttpHeaderPubkey), topic); err != nil {\n\t\tlog.Error(\"-job[%s] %s(%s) {topic:%s, ver:%s} %s\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, err)\n\n\t\twriteAuthFailure(w, err)\n\t\treturn\n\t}\n\n\t_, found := manager.Default.LookupCluster(appid)\n\tif !found {\n\t\tlog.Error(\"-job[%s] %s(%s) {topic:%s, ver:%s} cluster not found\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver)\n\n\t\twriteBadRequest(w, \"invalid appid\")\n\t\treturn\n\t}\n\n\tjobId := r.URL.Query().Get(\"id\")\n\tif len(jobId) < 18 { \/\/ jobId e,g. 341647700585877504\n\t\twriteBadRequest(w, \"invalid job id\")\n\t\treturn\n\t}\n\n\tif err := job.Default.Delete(appid, manager.Default.KafkaTopic(appid, topic, ver), jobId); err != nil {\n\t\tif err == job.ErrNothingDeleted {\n\t\t\t\/\/ race failed, actor worker wins\n\t\t\tlog.Warn(\"-job[%s] %s(%s) {topic:%s, ver:%s jid:%s} %v\",\n\t\t\t\tappid, r.RemoteAddr, realIp, topic, ver, jobId, err)\n\n\t\t\tw.WriteHeader(http.StatusConflict)\n\t\t\tw.Write([]byte{})\n\t\t\treturn\n\t\t}\n\n\t\tlog.Error(\"-job[%s] %s(%s) {topic:%s, ver:%s jid:%s} %v\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, jobId, err)\n\n\t\twriteServerError(w, err.Error())\n\t\treturn\n\t}\n\n\tif Options.AuditPub {\n\t\tthis.auditor.Trace(\"-job[%s] %s(%s) {topic:%s ver:%s UA:%s jid:%s}\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, r.Header.Get(\"User-Agent\"), jobId)\n\t}\n\n\tw.Write(ResponseOk)\n}\n<commit_msg>add comment<commit_after>package gateway\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/funkygao\/gafka\/cmd\/kateway\/job\"\n\t\"github.com\/funkygao\/gafka\/cmd\/kateway\/manager\"\n\t\"github.com\/funkygao\/gafka\/mpool\"\n\t\"github.com\/funkygao\/httprouter\"\n\tlog \"github.com\/funkygao\/log4go\"\n)\n\n\/\/ POST \/v1\/jobs\/:topic\/:ver?delay=100|due=1471565204\n\/\/ TODO tag, partitionKey\n\/\/ TODO use dedicated metrics\nfunc (this *pubServer) addJobHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {\n\tif !Options.DisableMetrics {\n\t\tthis.pubMetrics.JobTryQps.Mark(1)\n\t}\n\n\tt1 := time.Now()\n\trealIp := getHttpRemoteIp(r)\n\tappid := r.Header.Get(HttpHeaderAppid)\n\n\tvar due int64\n\tq := r.URL.Query()\n\tdueParam := q.Get(\"due\") \/\/ due has higher priority than delay\n\tif dueParam != \"\" {\n\t\td, err := strconv.ParseInt(dueParam, 10, 64)\n\t\tif err != nil {\n\t\t\tlog.Error(\"+job[%s] %s(%s) due:%s %s\", appid, r.RemoteAddr, realIp, dueParam, err)\n\n\t\t\twriteBadRequest(w, \"invalid due param\")\n\t\t\treturn\n\t\t}\n\n\t\tdue = d\n\t} else {\n\t\tdelayParam := q.Get(\"delay\") \/\/ in sec\n\t\tdelay, err := strconv.ParseInt(delayParam, 10, 64)\n\t\tif err != nil {\n\t\t\tlog.Error(\"+job[%s] %s(%s) delay:%s %s\", appid, r.RemoteAddr, realIp, delayParam, err)\n\n\t\t\twriteBadRequest(w, \"invalid delay param\")\n\t\t\treturn\n\t\t}\n\n\t\tdue = t1.Unix() + delay\n\t}\n\n\tif due < t1.Unix() {\n\t\tlog.Error(\"+job[%s] %s(%s) due=%d before now?\", appid, r.RemoteAddr, realIp, due)\n\n\t\twriteBadRequest(w, \"invalid param\")\n\t\treturn\n\t}\n\n\tif Options.Ratelimit && !this.throttlePub.Pour(realIp, 1) {\n\t\tlog.Warn(\"+job[%s] %s(%s) rate limit reached\", appid, r.RemoteAddr, realIp)\n\n\t\twriteQuotaExceeded(w)\n\t\treturn\n\t}\n\n\ttopic := params.ByName(UrlParamTopic)\n\tver := params.ByName(UrlParamVersion)\n\tif err := manager.Default.OwnTopic(appid, r.Header.Get(HttpHeaderPubkey), topic); err != nil {\n\t\tlog.Warn(\"+job[%s] %s(%s) {topic:%s, ver:%s} %s\", appid, r.RemoteAddr, realIp, topic, ver, err)\n\n\t\twriteAuthFailure(w, err)\n\t\treturn\n\t}\n\n\t\/\/ get the raw POST message\n\tmsgLen := int(r.ContentLength)\n\tswitch {\n\tcase msgLen == -1:\n\t\tlog.Warn(\"+job[%s] %s(%s) {topic:%s, ver:%s} invalid content length: %d\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, msgLen)\n\n\t\twriteBadRequest(w, \"invalid content length\")\n\t\treturn\n\n\tcase int64(msgLen) > Options.MaxJobSize:\n\t\tlog.Warn(\"+job[%s] %s(%s) {topic:%s, ver:%s} too big content length: %d\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, msgLen)\n\t\twriteBadRequest(w, ErrTooBigMessage.Error())\n\t\treturn\n\n\tcase msgLen < Options.MinPubSize:\n\t\tlog.Warn(\"+job[%s] %s(%s) {topic:%s, ver:%s} too small content length: %d\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, msgLen)\n\t\twriteBadRequest(w, ErrTooSmallMessage.Error())\n\t\treturn\n\t}\n\n\tlbr := io.LimitReader(r.Body, Options.MaxJobSize+1)\n\tmsg := mpool.NewMessage(msgLen)\n\tmsg.Body = msg.Body[0:msgLen]\n\tif _, err := io.ReadAtLeast(lbr, msg.Body, msgLen); err != nil {\n\t\tmsg.Free()\n\n\t\tlog.Error(\"+job[%s] %s(%s) {topic:%s, ver:%s} %s\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, err)\n\t\twriteBadRequest(w, ErrTooBigMessage.Error())\n\t\treturn\n\t}\n\n\tlog.Debug(\"+job[%s] %s(%s) {topic:%s, ver:%s} due:%d\/%ds\",\n\t\tappid, r.RemoteAddr, realIp, topic, ver, due, due-t1.Unix())\n\n\tif !Options.DisableMetrics {\n\t\tthis.pubMetrics.JobQps.Mark(1)\n\t\tthis.pubMetrics.JobMsgSize.Update(int64(len(msg.Body)))\n\t}\n\n\t_, found := manager.Default.LookupCluster(appid)\n\tif !found {\n\t\tmsg.Free()\n\n\t\tlog.Error(\"+job[%s] %s(%s) {topic:%s, ver:%s} cluster not found\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver)\n\n\t\twriteBadRequest(w, \"invalid appid\")\n\t\treturn\n\t}\n\n\tjobId, err := job.Default.Add(appid, manager.Default.KafkaTopic(appid, topic, ver), msg.Body, due)\n\tmsg.Free()\n\tif err != nil {\n\t\tif !Options.DisableMetrics {\n\t\t\tthis.pubMetrics.PubFail(appid, topic, ver)\n\t\t}\n\n\t\tlog.Error(\"+job[%s] %s(%s) {topic:%s, ver:%s} %s\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, err)\n\t\twriteServerError(w, err.Error())\n\t\treturn\n\t}\n\n\tif Options.AuditPub {\n\t\tthis.auditor.Trace(\"+job[%s] %s(%s) {topic:%s ver:%s UA:%s} due:%d id:%s\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, r.Header.Get(\"User-Agent\"), due, jobId)\n\t}\n\n\tw.Header().Set(HttpHeaderJobId, jobId)\n\tw.WriteHeader(http.StatusCreated)\n\n\tif _, err = w.Write(ResponseOk); err != nil {\n\t\tlog.Error(\"%s: %v\", r.RemoteAddr, err)\n\t\tthis.pubMetrics.ClientError.Inc(1)\n\t}\n\n\tif !Options.DisableMetrics {\n\t\tthis.pubMetrics.PubOk(appid, topic, ver)\n\t\tthis.pubMetrics.PubLatency.Update(time.Since(t1).Nanoseconds() \/ 1e6) \/\/ in ms\n\t}\n}\n\n\/\/ DELETE \/v1\/jobs\/:topic\/:ver?id=22323\nfunc (this *pubServer) deleteJobHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {\n\tappid := r.Header.Get(HttpHeaderAppid)\n\ttopic := params.ByName(UrlParamTopic)\n\tver := params.ByName(UrlParamVersion)\n\trealIp := getHttpRemoteIp(r)\n\tif err := manager.Default.OwnTopic(appid, r.Header.Get(HttpHeaderPubkey), topic); err != nil {\n\t\tlog.Error(\"-job[%s] %s(%s) {topic:%s, ver:%s} %s\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, err)\n\n\t\twriteAuthFailure(w, err)\n\t\treturn\n\t}\n\n\t_, found := manager.Default.LookupCluster(appid)\n\tif !found {\n\t\tlog.Error(\"-job[%s] %s(%s) {topic:%s, ver:%s} cluster not found\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver)\n\n\t\twriteBadRequest(w, \"invalid appid\")\n\t\treturn\n\t}\n\n\tjobId := r.URL.Query().Get(\"id\")\n\tif len(jobId) < 18 { \/\/ jobId e,g. 341647700585877504\n\t\twriteBadRequest(w, \"invalid job id\")\n\t\treturn\n\t}\n\n\tif err := job.Default.Delete(appid, manager.Default.KafkaTopic(appid, topic, ver), jobId); err != nil {\n\t\tif err == job.ErrNothingDeleted {\n\t\t\t\/\/ race failed, actor worker wins\n\t\t\tlog.Warn(\"-job[%s] %s(%s) {topic:%s, ver:%s jid:%s} %v\",\n\t\t\t\tappid, r.RemoteAddr, realIp, topic, ver, jobId, err)\n\n\t\t\tw.WriteHeader(http.StatusConflict)\n\t\t\tw.Write([]byte{})\n\t\t\treturn\n\t\t}\n\n\t\tlog.Error(\"-job[%s] %s(%s) {topic:%s, ver:%s jid:%s} %v\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, jobId, err)\n\n\t\twriteServerError(w, err.Error())\n\t\treturn\n\t}\n\n\tif Options.AuditPub {\n\t\tthis.auditor.Trace(\"-job[%s] %s(%s) {topic:%s ver:%s UA:%s jid:%s}\",\n\t\t\tappid, r.RemoteAddr, realIp, topic, ver, r.Header.Get(\"User-Agent\"), jobId)\n\t}\n\n\tw.Write(ResponseOk)\n}\n<|endoftext|>"} {"text":"<commit_before>package gateway\n\nimport (\n\t\"time\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n)\n\nconst (\n\t\/\/ handshakeUpgradeVersion is the version where the gateway handshake RPC\n\t\/\/ was altered to include adiitional information transfer.\n\thandshakeUpgradeVersion = \"1.0.0\"\n\n\t\/\/ maxLocalOutbound is currently set to 3, meaning the gateway will not\n\t\/\/ consider a local node to be an outbound peer if the gateway already has\n\t\/\/ 3 outbound peers. Three is currently needed to handle situations where\n\t\/\/ the gateway is at high risk of connecting to itself (such as a low\n\t\/\/ number of total peers, especially such as in a testing environment).\n\t\/\/ Once the gateway has a proper way to figure out that it's trying to\n\t\/\/ connect to itself, this number can be reduced.\n\tmaxLocalOutboundPeers = 3\n\n\t\/\/ minAcceptableVersion is the version below which the gateway will refuse to\n\t\/\/ connect to peers and reject connection attempts.\n\t\/\/\n\t\/\/ Reject peers < v0.4.0 as the previous version is v0.3.3 which is\n\t\/\/ pre-hardfork.\n\tminAcceptableVersion = \"0.4.0\"\n)\n\nvar (\n\t\/\/ fastNodePurgeDelay defines the amount of time that is waited between each\n\t\/\/ iteration of the purge loop when the gateway has enough nodes to be\n\t\/\/ needing to purge quickly.\n\tfastNodePurgeDelay = build.Select(build.Var{\n\t\tStandard: 1 * time.Minute,\n\t\tDev: 5 * time.Second,\n\t\tTesting: 200 * time.Millisecond,\n\t}).(time.Duration)\n\n\t\/\/ healthyNodeListLen defines the number of nodes that the gateway must\n\t\/\/ have in the node list before it will stop asking peers for more nodes.\n\thealthyNodeListLen = build.Select(build.Var{\n\t\tStandard: 200,\n\t\tDev: 30,\n\t\tTesting: 15,\n\t}).(int)\n\n\t\/\/ maxSharedNodes defines the number of nodes that will be shared between\n\t\/\/ peers when they are expanding their node lists.\n\tmaxSharedNodes = build.Select(build.Var{\n\t\tStandard: uint64(10),\n\t\tDev: uint64(5),\n\t\tTesting: uint64(3),\n\t}).(uint64)\n\n\t\/\/ nodePurgeDelay defines the amount of time that is waited between each\n\t\/\/ iteration of the node purge loop.\n\tnodePurgeDelay = build.Select(build.Var{\n\t\tStandard: 10 * time.Minute,\n\t\tDev: 20 * time.Second,\n\t\tTesting: 500 * time.Millisecond,\n\t}).(time.Duration)\n\n\t\/\/ nodeListDelay defines the amount of time that is waited between each\n\t\/\/ iteration of the node list loop.\n\tnodeListDelay = build.Select(build.Var{\n\t\tStandard: 5 * time.Second,\n\t\tDev: 3 * time.Second,\n\t\tTesting: 500 * time.Millisecond,\n\t}).(time.Duration)\n\n\t\/\/ pruneNodeListLen defines the number of nodes that the gateway must have\n\t\/\/ to be pruning nodes from the node list.\n\tpruneNodeListLen = build.Select(build.Var{\n\t\tStandard: 50,\n\t\tDev: 15,\n\t\tTesting: 10,\n\t}).(int)\n\n\t\/\/ quickPruneListLen defines the number of nodes that the gateway must have\n\t\/\/ to be pruning nodes quickly from the node list.\n\tquickPruneListLen = build.Select(build.Var{\n\t\tStandard: 250,\n\t\tDev: 40,\n\t\tTesting: 20,\n\t}).(int)\n)\n\nvar (\n\t\/\/ The gateway will sleep this long between incoming connections. For\n\t\/\/ attack reasons, the acceptInterval should be longer than the\n\t\/\/ nodeListDelay. Right at startup, a node is vulnerable to being flooded\n\t\/\/ by Sybil attackers. The node's best defense is to wait until it has\n\t\/\/ filled out its nodelist somewhat from the bootstrap nodes. An attacker\n\t\/\/ needs to completely dominate the nodelist and the peerlist to be\n\t\/\/ successful, so just a few honest nodes from requests to the bootstraps\n\t\/\/ should be enough to fend from most attacks.\n\tacceptInterval = build.Select(build.Var{\n\t\tStandard: 6 * time.Second,\n\t\tDev: 3 * time.Second,\n\t\tTesting: 100 * time.Millisecond,\n\t}).(time.Duration)\n\n\t\/\/ acquiringPeersDelay defines the amount of time that is waited between\n\t\/\/ iterations of the peer acquisition loop if the gateway is actively\n\t\/\/ forming new connections with peers.\n\tacquiringPeersDelay = build.Select(build.Var{\n\t\tStandard: 5 * time.Second,\n\t\tDev: 3 * time.Second,\n\t\tTesting: 500 * time.Millisecond,\n\t}).(time.Duration)\n\n\t\/\/ fullyConnectedThreshold defines the number of peers that the gateway can\n\t\/\/ have before it stops accepting inbound connections.\n\tfullyConnectedThreshold = build.Select(build.Var{\n\t\tStandard: 128,\n\t\tDev: 20,\n\t\tTesting: 10,\n\t}).(int)\n\n\t\/\/ maxConcurrentOutboundPeerRequests defines the maximum number of peer\n\t\/\/ connections that the gateway will try to form concurrently.\n\tmaxConcurrentOutboundPeerRequests = build.Select(build.Var{\n\t\tStandard: 3,\n\t\tDev: 2,\n\t\tTesting: 2,\n\t}).(int)\n\n\t\/\/ noNodesDelay defines the amount of time that is waited between\n\t\/\/ iterations of the peer acquisition loop if the gateway does not have any\n\t\/\/ nodes in the nodelist.\n\tnoNodesDelay = build.Select(build.Var{\n\t\tStandard: 20 * time.Second,\n\t\tDev: 10 * time.Second,\n\t\tTesting: 3 * time.Second,\n\t}).(time.Duration)\n\n\t\/\/ unwawntedLocalPeerDelay defines the amount of time that is waited\n\t\/\/ between iterations of the permanentPeerManager if the gateway has at\n\t\/\/ least a few outbound peers, but is not well connected, and the recently\n\t\/\/ selected peer was a local peer. The wait is mostly to prevent the\n\t\/\/ gateway from hogging the CPU in the event that all peers are local\n\t\/\/ peers.\n\tunwantedLocalPeerDelay = build.Select(build.Var{\n\t\tStandard: 2 * time.Second,\n\t\tDev: 1 * time.Second,\n\t\tTesting: 100 * time.Millisecond,\n\t}).(time.Duration)\n\n\t\/\/ wellConnectedDelay defines the amount of time that is waited between\n\t\/\/ iterations of the peer acquisition loop if the gateway is well\n\t\/\/ connected.\n\twellConnectedDelay = build.Select(build.Var{\n\t\tStandard: 5 * time.Minute,\n\t\tDev: 1 * time.Minute,\n\t\tTesting: 3 * time.Second,\n\t}).(time.Duration)\n\n\t\/\/ wellConnectedThreshold is the number of outbound connections at which\n\t\/\/ the gateway will not attempt to make new outbound connections.\n\twellConnectedThreshold = build.Select(build.Var{\n\t\tStandard: 8,\n\t\tDev: 5,\n\t\tTesting: 4,\n\t}).(int)\n)\n\nvar (\n\t\/\/ connStdDeadline defines the standard deadline that should be used for\n\t\/\/ all temporary connections to the gateway.\n\tconnStdDeadline = build.Select(build.Var{\n\t\tStandard: 5 * time.Minute,\n\t\tDev: 2 * time.Minute,\n\t\tTesting: 30 * time.Second,\n\t}).(time.Duration)\n\n\t\/\/ the gateway will abort a connection attempt after this long\n\tdialTimeout = build.Select(build.Var{\n\t\tStandard: 2 * time.Minute,\n\t\tDev: 20 * time.Second,\n\t\tTesting: 500 * time.Millisecond,\n\t}).(time.Duration)\n)\n<commit_msg>wrap int constants in typecast<commit_after>package gateway\n\nimport (\n\t\"time\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n)\n\nconst (\n\t\/\/ handshakeUpgradeVersion is the version where the gateway handshake RPC\n\t\/\/ was altered to include adiitional information transfer.\n\thandshakeUpgradeVersion = \"1.0.0\"\n\n\t\/\/ maxLocalOutbound is currently set to 3, meaning the gateway will not\n\t\/\/ consider a local node to be an outbound peer if the gateway already has\n\t\/\/ 3 outbound peers. Three is currently needed to handle situations where\n\t\/\/ the gateway is at high risk of connecting to itself (such as a low\n\t\/\/ number of total peers, especially such as in a testing environment).\n\t\/\/ Once the gateway has a proper way to figure out that it's trying to\n\t\/\/ connect to itself, this number can be reduced.\n\tmaxLocalOutboundPeers = 3\n\n\t\/\/ minAcceptableVersion is the version below which the gateway will refuse to\n\t\/\/ connect to peers and reject connection attempts.\n\t\/\/\n\t\/\/ Reject peers < v0.4.0 as the previous version is v0.3.3 which is\n\t\/\/ pre-hardfork.\n\tminAcceptableVersion = \"0.4.0\"\n)\n\nvar (\n\t\/\/ fastNodePurgeDelay defines the amount of time that is waited between each\n\t\/\/ iteration of the purge loop when the gateway has enough nodes to be\n\t\/\/ needing to purge quickly.\n\tfastNodePurgeDelay = build.Select(build.Var{\n\t\tStandard: 1 * time.Minute,\n\t\tDev: 5 * time.Second,\n\t\tTesting: 200 * time.Millisecond,\n\t}).(time.Duration)\n\n\t\/\/ healthyNodeListLen defines the number of nodes that the gateway must\n\t\/\/ have in the node list before it will stop asking peers for more nodes.\n\thealthyNodeListLen = build.Select(build.Var{\n\t\tStandard: int(200),\n\t\tDev: int(30),\n\t\tTesting: int(15),\n\t}).(int)\n\n\t\/\/ maxSharedNodes defines the number of nodes that will be shared between\n\t\/\/ peers when they are expanding their node lists.\n\tmaxSharedNodes = build.Select(build.Var{\n\t\tStandard: uint64(10),\n\t\tDev: uint64(5),\n\t\tTesting: uint64(3),\n\t}).(uint64)\n\n\t\/\/ nodePurgeDelay defines the amount of time that is waited between each\n\t\/\/ iteration of the node purge loop.\n\tnodePurgeDelay = build.Select(build.Var{\n\t\tStandard: 10 * time.Minute,\n\t\tDev: 20 * time.Second,\n\t\tTesting: 500 * time.Millisecond,\n\t}).(time.Duration)\n\n\t\/\/ nodeListDelay defines the amount of time that is waited between each\n\t\/\/ iteration of the node list loop.\n\tnodeListDelay = build.Select(build.Var{\n\t\tStandard: 5 * time.Second,\n\t\tDev: 3 * time.Second,\n\t\tTesting: 500 * time.Millisecond,\n\t}).(time.Duration)\n\n\t\/\/ pruneNodeListLen defines the number of nodes that the gateway must have\n\t\/\/ to be pruning nodes from the node list.\n\tpruneNodeListLen = build.Select(build.Var{\n\t\tStandard: int(50),\n\t\tDev: int(15),\n\t\tTesting: int(10),\n\t}).(int)\n\n\t\/\/ quickPruneListLen defines the number of nodes that the gateway must have\n\t\/\/ to be pruning nodes quickly from the node list.\n\tquickPruneListLen = build.Select(build.Var{\n\t\tStandard: int(250),\n\t\tDev: int(40),\n\t\tTesting: int(20),\n\t}).(int)\n)\n\nvar (\n\t\/\/ The gateway will sleep this long between incoming connections. For\n\t\/\/ attack reasons, the acceptInterval should be longer than the\n\t\/\/ nodeListDelay. Right at startup, a node is vulnerable to being flooded\n\t\/\/ by Sybil attackers. The node's best defense is to wait until it has\n\t\/\/ filled out its nodelist somewhat from the bootstrap nodes. An attacker\n\t\/\/ needs to completely dominate the nodelist and the peerlist to be\n\t\/\/ successful, so just a few honest nodes from requests to the bootstraps\n\t\/\/ should be enough to fend from most attacks.\n\tacceptInterval = build.Select(build.Var{\n\t\tStandard: 6 * time.Second,\n\t\tDev: 3 * time.Second,\n\t\tTesting: 100 * time.Millisecond,\n\t}).(time.Duration)\n\n\t\/\/ acquiringPeersDelay defines the amount of time that is waited between\n\t\/\/ iterations of the peer acquisition loop if the gateway is actively\n\t\/\/ forming new connections with peers.\n\tacquiringPeersDelay = build.Select(build.Var{\n\t\tStandard: 5 * time.Second,\n\t\tDev: 3 * time.Second,\n\t\tTesting: 500 * time.Millisecond,\n\t}).(time.Duration)\n\n\t\/\/ fullyConnectedThreshold defines the number of peers that the gateway can\n\t\/\/ have before it stops accepting inbound connections.\n\tfullyConnectedThreshold = build.Select(build.Var{\n\t\tStandard: 128,\n\t\tDev: 20,\n\t\tTesting: 10,\n\t}).(int)\n\n\t\/\/ maxConcurrentOutboundPeerRequests defines the maximum number of peer\n\t\/\/ connections that the gateway will try to form concurrently.\n\tmaxConcurrentOutboundPeerRequests = build.Select(build.Var{\n\t\tStandard: 3,\n\t\tDev: 2,\n\t\tTesting: 2,\n\t}).(int)\n\n\t\/\/ noNodesDelay defines the amount of time that is waited between\n\t\/\/ iterations of the peer acquisition loop if the gateway does not have any\n\t\/\/ nodes in the nodelist.\n\tnoNodesDelay = build.Select(build.Var{\n\t\tStandard: 20 * time.Second,\n\t\tDev: 10 * time.Second,\n\t\tTesting: 3 * time.Second,\n\t}).(time.Duration)\n\n\t\/\/ unwawntedLocalPeerDelay defines the amount of time that is waited\n\t\/\/ between iterations of the permanentPeerManager if the gateway has at\n\t\/\/ least a few outbound peers, but is not well connected, and the recently\n\t\/\/ selected peer was a local peer. The wait is mostly to prevent the\n\t\/\/ gateway from hogging the CPU in the event that all peers are local\n\t\/\/ peers.\n\tunwantedLocalPeerDelay = build.Select(build.Var{\n\t\tStandard: 2 * time.Second,\n\t\tDev: 1 * time.Second,\n\t\tTesting: 100 * time.Millisecond,\n\t}).(time.Duration)\n\n\t\/\/ wellConnectedDelay defines the amount of time that is waited between\n\t\/\/ iterations of the peer acquisition loop if the gateway is well\n\t\/\/ connected.\n\twellConnectedDelay = build.Select(build.Var{\n\t\tStandard: 5 * time.Minute,\n\t\tDev: 1 * time.Minute,\n\t\tTesting: 3 * time.Second,\n\t}).(time.Duration)\n\n\t\/\/ wellConnectedThreshold is the number of outbound connections at which\n\t\/\/ the gateway will not attempt to make new outbound connections.\n\twellConnectedThreshold = build.Select(build.Var{\n\t\tStandard: 8,\n\t\tDev: 5,\n\t\tTesting: 4,\n\t}).(int)\n)\n\nvar (\n\t\/\/ connStdDeadline defines the standard deadline that should be used for\n\t\/\/ all temporary connections to the gateway.\n\tconnStdDeadline = build.Select(build.Var{\n\t\tStandard: 5 * time.Minute,\n\t\tDev: 2 * time.Minute,\n\t\tTesting: 30 * time.Second,\n\t}).(time.Duration)\n\n\t\/\/ the gateway will abort a connection attempt after this long\n\tdialTimeout = build.Select(build.Var{\n\t\tStandard: 2 * time.Minute,\n\t\tDev: 20 * time.Second,\n\t\tTesting: 500 * time.Millisecond,\n\t}).(time.Duration)\n)\n<|endoftext|>"} {"text":"<commit_before>\/\/ This file is subject to a 1-clause BSD license.\n\/\/ Its contents can be found in the enclosed LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/jteeuwen\/ini\"\n\t\"github.com\/jteeuwen\/ircb\/irc\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"unsafe\"\n)\n\n\/\/ Global bot configuration settings.\nvar config *Config\n\n\/\/ Config holds bot configuration data.\ntype Config struct {\n\tProfile string\n\tAddress string\n\tSSLKey string\n\tSSLCert string\n\tNickname string\n\tServerPassword string\n\tOperPassword string\n\tNickservPassword string\n\tQuitMessage string\n\tCommandPrefix string\n\tChannels []*irc.Channel\n}\n\n\/\/ SetNickname atomically sets the new nickname.\n\/\/ This is used in response to proto.PIDNickInUse messages.\nfunc (c *Config) SetNickname(nickname string) {\n\tnew := *c\n\tnew.Nickname = nickname\n\n\tfor {\n\t\told := (*unsafe.Pointer)(unsafe.Pointer(&config))\n\n\t\tif atomic.CompareAndSwapPointer(old, *old, unsafe.Pointer(&new)) {\n\t\t\treturn\n\t\t}\n\t}\n\n\tpanic(\"Unable to change nickname\")\n}\n\n\/\/ Load loads configuration data from the given ini file.\nfunc (c *Config) Load(file string) (err error) {\n\tini := ini.New()\n\terr = ini.Load(file)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\ts := ini.Section(\"net\")\n\tc.Address = fmt.Sprintf(\"%s:%d\", s.S(\"host\", \"\"), s.U32(\"port\", 0))\n\tc.SSLKey = s.S(\"x509-key\", \"\")\n\tc.SSLCert = s.S(\"x509-cert\", \"\")\n\n\tchans := s.List(\"channels\")\n\tc.Channels = make([]*irc.Channel, len(chans))\n\n\t\/\/ Parse channel definitions. A single channel comes as a string like:\n\t\/\/\n\t\/\/ <name>,<key>,<chanservpassword>\n\t\/\/\n\t\/\/ The name is the only required value.\n\tfor i, line := range chans {\n\t\telements := strings.Split(line, \",\")\n\n\t\tfor k := range elements {\n\t\t\telements[k] = strings.TrimSpace(elements[k])\n\t\t}\n\n\t\tif len(elements) == 0 || len(elements[0]) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar ch irc.Channel\n\t\tch.Name = elements[0]\n\n\t\tif len(elements) > 1 {\n\t\t\tch.Key = elements[1]\n\t\t}\n\n\t\tif len(elements) > 2 {\n\t\t\tch.ChanservPassword = elements[2]\n\t\t}\n\n\t\tc.Channels[i] = &ch\n\t}\n\n\ts = ini.Section(\"account\")\n\tc.Nickname = s.S(\"nickname\", \"\")\n\tc.ServerPassword = s.S(\"server-password\", \"\")\n\tc.OperPassword = s.S(\"oper-password\", \"\")\n\tc.NickservPassword = s.S(\"nickserv-password\", \"\")\n\tc.QuitMessage = s.S(\"quit-message\", \"\")\n\n\ts = ini.Section(\"bot\")\n\tc.CommandPrefix = s.S(\"command-prefix\", \"?\")\n\treturn\n}\n<commit_msg>Adds user whitelist field to main Config type. Adds whitelist loading code from configuration file.<commit_after>\/\/ This file is subject to a 1-clause BSD license.\n\/\/ Its contents can be found in the enclosed LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/jteeuwen\/ini\"\n\t\"github.com\/jteeuwen\/ircb\/irc\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"unsafe\"\n)\n\n\/\/ Global bot configuration settings.\nvar config *Config\n\n\/\/ Config holds bot configuration data.\ntype Config struct {\n\tChannels []*irc.Channel\n\tWhitelist []string\n\tProfile string\n\tAddress string\n\tSSLKey string\n\tSSLCert string\n\tNickname string\n\tServerPassword string\n\tOperPassword string\n\tNickservPassword string\n\tQuitMessage string\n\tCommandPrefix string\n}\n\n\/\/ SetNickname atomically sets the new nickname.\n\/\/ This is used in response to proto.PIDNickInUse messages.\nfunc (c *Config) SetNickname(nickname string) {\n\tnew := *c\n\tnew.Nickname = nickname\n\n\tfor {\n\t\told := (*unsafe.Pointer)(unsafe.Pointer(&config))\n\n\t\tif atomic.CompareAndSwapPointer(old, *old, unsafe.Pointer(&new)) {\n\t\t\treturn\n\t\t}\n\t}\n\n\tpanic(\"Unable to change nickname\")\n}\n\n\/\/ Load loads configuration data from the given ini file.\nfunc (c *Config) Load(file string) (err error) {\n\tini := ini.New()\n\terr = ini.Load(file)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\ts := ini.Section(\"net\")\n\tc.Address = fmt.Sprintf(\"%s:%d\", s.S(\"host\", \"\"), s.U32(\"port\", 0))\n\tc.SSLKey = s.S(\"x509-key\", \"\")\n\tc.SSLCert = s.S(\"x509-cert\", \"\")\n\n\tchans := s.List(\"channels\")\n\tc.Channels = make([]*irc.Channel, len(chans))\n\n\t\/\/ Parse channel definitions. A single channel comes as a string like:\n\t\/\/\n\t\/\/ <name>,<key>,<chanservpassword>\n\t\/\/\n\t\/\/ The name is the only required value.\n\tfor i, line := range chans {\n\t\telements := strings.Split(line, \",\")\n\n\t\tfor k := range elements {\n\t\t\telements[k] = strings.TrimSpace(elements[k])\n\t\t}\n\n\t\tif len(elements) == 0 || len(elements[0]) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar ch irc.Channel\n\t\tch.Name = elements[0]\n\n\t\tif len(elements) > 1 {\n\t\t\tch.Key = elements[1]\n\t\t}\n\n\t\tif len(elements) > 2 {\n\t\t\tch.ChanservPassword = elements[2]\n\t\t}\n\n\t\tc.Channels[i] = &ch\n\t}\n\n\ts = ini.Section(\"account\")\n\tc.Nickname = s.S(\"nickname\", \"\")\n\tc.ServerPassword = s.S(\"server-password\", \"\")\n\tc.OperPassword = s.S(\"oper-password\", \"\")\n\tc.NickservPassword = s.S(\"nickserv-password\", \"\")\n\tc.QuitMessage = s.S(\"quit-message\", \"\")\n\n\tc.CommandPrefix = ini.Section(\"bot\").S(\"command-prefix\", \"?\")\n\tc.Whitelist = ini.Section(\"whitelist\").List(\"user\")\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !v2\n\n\/\/ Package provides a plugin based pipeline engine that decouples Input\/Filter\/Output plugins.\npackage engine\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/funkygao\/gafka\/telemetry\"\n\t\"github.com\/funkygao\/gafka\/telemetry\/influxdb\"\n\t\"github.com\/funkygao\/go-metrics\"\n\t\"github.com\/funkygao\/golib\/observer\"\n\tconf \"github.com\/funkygao\/jsconf\"\n\tlog \"github.com\/funkygao\/log4go\"\n\t\"github.com\/gorilla\/mux\"\n)\n\nvar (\n\t_ PluginHelper = &Engine{}\n)\n\n\/\/ Engine is the pipeline engine of the data bus system which manages the core loop.\ntype Engine struct {\n\tsync.RWMutex\n\n\t\/\/ Engine will load json config file\n\t*conf.Conf\n\n\t\/\/ REST exporter\n\thttpListener net.Listener\n\thttpServer *http.Server\n\thttpRouter *mux.Router\n\thttpPaths []string\n\n\tInputRunners map[string]InputRunner\n\tinputWrappers map[string]*pluginWrapper\n\n\tFilterRunners map[string]FilterRunner\n\tfilterWrappers map[string]*pluginWrapper\n\n\tOutputRunners map[string]OutputRunner\n\toutputWrappers map[string]*pluginWrapper\n\n\ttop *topology\n\trouter *Router\n\n\tinputRecycleChans map[string]chan *Packet\n\tfilterRecycleChan chan *Packet\n\n\thostname string\n\tpid int\n\tstopper chan struct{}\n}\n\nfunc New(globals *GlobalConfig) *Engine {\n\tif globals == nil {\n\t\tglobals = DefaultGlobals()\n\t}\n\tGlobals = func() *GlobalConfig {\n\t\treturn globals\n\t}\n\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &Engine{\n\t\tInputRunners: make(map[string]InputRunner),\n\t\tinputWrappers: make(map[string]*pluginWrapper),\n\t\tFilterRunners: make(map[string]FilterRunner),\n\t\tfilterWrappers: make(map[string]*pluginWrapper),\n\t\tOutputRunners: make(map[string]OutputRunner),\n\t\toutputWrappers: make(map[string]*pluginWrapper),\n\n\t\tinputRecycleChans: make(map[string]chan *Packet),\n\t\tfilterRecycleChan: make(chan *Packet, globals.FilterRecyclePoolSize),\n\n\t\ttop: newTopology(),\n\t\trouter: newRouter(),\n\n\t\thttpPaths: make([]string, 0, 6),\n\n\t\tpid: os.Getpid(),\n\t\thostname: hostname,\n\t\tstopper: make(chan struct{}),\n\t}\n}\n\nfunc (e *Engine) stopInputRunner(name string) {\n\te.Lock()\n\te.InputRunners[name] = nil\n\te.Unlock()\n}\n\n\/\/ ClonePacket is used for plugin Filter to generate new Packet.\n\/\/ The generated Packet will use dedicated filter recycle chan.\nfunc (e *Engine) ClonePacket(p *Packet) *Packet {\n\tpack := <-e.filterRecycleChan\n\tpack.Reset()\n\tp.copyTo(pack)\n\treturn pack\n}\n\nfunc (e *Engine) LoadConfigFile(fn string) *Engine {\n\tif fn == \"\" {\n\t\tpanic(\"config file is required\")\n\t}\n\tif _, err := os.Stat(fn); err != nil {\n\t\tpanic(err)\n\t}\n\tcf, err := conf.Load(fn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\te.Conf = cf\n\tGlobals().Conf = cf\n\n\t\/\/ 'plugins' section\n\tfor i := 0; i < len(e.List(\"plugins\", nil)); i++ {\n\t\tsection, err := e.Section(fmt.Sprintf(\"plugins[%d]\", i))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\te.loadPluginSection(section)\n\t}\n\n\t\/\/ 'topology' section\n\te.top.load(e.Conf)\n\n\tif c, err := influxdb.NewConfig(cf.String(\"influx_addr\", \"\"),\n\t\tcf.String(\"influx_db\", \"dbus\"), \"\", \"\",\n\t\tcf.Duration(\"influx_tick\", time.Minute)); err == nil {\n\t\ttelemetry.Default = influxdb.New(metrics.DefaultRegistry, c)\n\t}\n\n\treturn e\n}\n\nfunc (e *Engine) loadPluginSection(section *conf.Conf) {\n\tpluginCommons := new(pluginCommons)\n\tpluginCommons.loadConfig(section)\n\tif pluginCommons.disabled {\n\t\tlog.Warn(\"%s disabled\", pluginCommons.name)\n\n\t\treturn\n\t}\n\n\twrapper := &pluginWrapper{\n\t\tname: pluginCommons.name,\n\t\tconfigCreator: func() *conf.Conf { return section },\n\t}\n\tvar ok bool\n\tif wrapper.pluginCreator, ok = availablePlugins[pluginCommons.class]; !ok {\n\t\tpanic(\"unknown plugin type: \" + pluginCommons.class)\n\t}\n\n\tpluginType := pluginTypeRegex.FindStringSubmatch(pluginCommons.class)\n\tif len(pluginType) < 2 {\n\t\tpanic(\"invalid plugin type: \" + pluginCommons.class)\n\t}\n\n\tplugin := wrapper.Create()\n\tpluginCategory := pluginType[1]\n\tif pluginCategory == \"Input\" {\n\t\te.inputRecycleChans[wrapper.name] = make(chan *Packet, Globals().InputRecyclePoolSize)\n\t\te.InputRunners[wrapper.name] = newInputRunner(plugin.(Input), pluginCommons)\n\t\te.inputWrappers[wrapper.name] = wrapper\n\t\te.router.metrics.m[wrapper.name] = metrics.NewRegisteredMeter(wrapper.name, metrics.DefaultRegistry)\n\t\treturn\n\t}\n\n\tfoRunner := newFORunner(plugin, pluginCommons)\n\tmatcher := newMatcher(section.StringList(\"match\", nil), foRunner)\n\tfoRunner.matcher = matcher\n\n\tswitch pluginCategory {\n\tcase \"Filter\":\n\t\te.router.addFilterMatcher(matcher)\n\t\te.FilterRunners[foRunner.Name()] = foRunner\n\t\te.filterWrappers[foRunner.Name()] = wrapper\n\t\te.router.metrics.m[wrapper.name] = metrics.NewRegisteredMeter(wrapper.name, metrics.DefaultRegistry)\n\n\tcase \"Output\":\n\t\te.router.addOutputMatcher(matcher)\n\t\te.OutputRunners[foRunner.Name()] = foRunner\n\t\te.outputWrappers[foRunner.Name()] = wrapper\n\t\te.router.metrics.m[wrapper.name] = metrics.NewRegisteredMeter(wrapper.name, metrics.DefaultRegistry)\n\n\tdefault:\n\t\tpanic(\"unknown plugin: \" + pluginCategory)\n\t}\n}\n\nfunc (e *Engine) ServeForever() (ret error) {\n\tvar (\n\t\toutputsWg = new(sync.WaitGroup)\n\t\tfiltersWg = new(sync.WaitGroup)\n\t\tinputsWg = new(sync.WaitGroup)\n\t\trouterWg = new(sync.WaitGroup)\n\n\t\tglobals = Globals()\n\t\terr error\n\t)\n\n\tif telemetry.Default == nil {\n\t\tlog.Info(\"engine starting, with telemetry disabled...\")\n\t} else {\n\t\tlog.Info(\"engine starting...\")\n\t}\n\n\t\/\/ setup signal handler first to avoid race condition\n\t\/\/ if Input terminates very soon, global.Shutdown will\n\t\/\/ not be able to trap it\n\tglobals.sigChan = make(chan os.Signal)\n\tsignal.Notify(globals.sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGUSR1, syscall.SIGUSR2)\n\n\te.launchHttpServ()\n\n\tif telemetry.Default != nil {\n\t\tlog.Info(\"launching telemetry dumper...\")\n\n\t\tgo func() {\n\t\t\tif err := telemetry.Default.Start(); err != nil {\n\t\t\t\tlog.Error(\"telemetry[%s]: %s\", telemetry.Default.Name(), err)\n\t\t\t}\n\t\t}()\n\t}\n\n\tfor _, outputRunner := range e.OutputRunners {\n\t\tlog.Trace(\"launching Output[%s]...\", outputRunner.Name())\n\n\t\toutputsWg.Add(1)\n\t\tif err = outputRunner.start(e, outputsWg); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tfor _, filterRunner := range e.FilterRunners {\n\t\tlog.Trace(\"launching Filter[%s]...\", filterRunner.Name())\n\n\t\tfiltersWg.Add(1)\n\t\tif err = filterRunner.start(e, filtersWg); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tfor inputName := range e.inputRecycleChans {\n\t\tlog.Info(\"building Input[%s] Packet pool with size=%d\", inputName, globals.InputRecyclePoolSize)\n\n\t\tfor i := 0; i < globals.InputRecyclePoolSize; i++ {\n\t\t\tinputPack := newPacket(e.inputRecycleChans[inputName])\n\t\t\te.inputRecycleChans[inputName] <- inputPack\n\t\t}\n\t}\n\n\tlog.Info(\"building Filter Packet pool with size=%d\", globals.FilterRecyclePoolSize)\n\tfor i := 0; i < globals.FilterRecyclePoolSize; i++ {\n\t\tfilterPack := newPacket(e.filterRecycleChan)\n\t\te.filterRecycleChan <- filterPack\n\t}\n\n\tlog.Info(\"launching Watchdog with ticker=%s\", globals.WatchdogTick)\n\tgo e.runWatchdog(globals.WatchdogTick)\n\n\trouterWg.Add(1)\n\tgo e.router.Start(routerWg)\n\n\tfor _, inputRunner := range e.InputRunners {\n\t\tlog.Trace(\"launching Input[%s]...\", inputRunner.Name())\n\n\t\tinputsWg.Add(1)\n\t\tif err = inputRunner.start(e, inputsWg); err != nil {\n\t\t\tinputsWg.Done()\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tcfChanged := make(chan struct{})\n\tgo e.watchConfig(cfChanged, time.Second)\n\n\tfor !globals.Stopping {\n\t\tselect {\n\t\tcase <-cfChanged:\n\t\t\tlog.Info(\"%s updated, closing...\", e.Conf.ConfPath())\n\t\t\tglobals.Stopping = true\n\n\t\tcase sig := <-globals.sigChan:\n\t\t\tlog.Info(\"Got signal %s\", strings.ToUpper(sig.String()))\n\n\t\t\tswitch sig {\n\t\t\tcase syscall.SIGINT, syscall.SIGTERM:\n\t\t\t\tlog.Info(\"shutdown...\")\n\t\t\t\tglobals.Stopping = true\n\t\t\t\tret = ErrQuitingSigal\n\n\t\t\tcase syscall.SIGUSR1:\n\t\t\t\tobserver.Publish(SIGUSR1, nil)\n\n\t\t\tcase syscall.SIGUSR2:\n\t\t\t\tobserver.Publish(SIGUSR2, nil)\n\t\t\t}\n\t\t}\n\t}\n\n\tclose(e.stopper)\n\n\tif telemetry.Default != nil {\n\t\ttelemetry.Default.Stop()\n\t}\n\n\te.Lock()\n\tfor _, inputRunner := range e.InputRunners {\n\t\tif inputRunner == nil {\n\t\t\t\/\/ the Input plugin already exit\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Trace(\"Stop message sent to %s\", inputRunner.Name())\n\t\tinputRunner.Input().Stop(inputRunner)\n\t}\n\te.Unlock()\n\tinputsWg.Wait() \/\/ wait for all inputs done\n\tlog.Info(\"all Inputs stopped\")\n\n\t\/\/ ok, now we are sure no more inputs, but in route.inChan there\n\t\/\/ still may be filter injected packs and output not consumed packs\n\t\/\/ we must wait for all the packs to be consumed before shutdown\n\n\tfor _, filterRunner := range e.FilterRunners {\n\t\tlog.Trace(\"Stop message sent to %s\", filterRunner.Name())\n\t\te.router.removeFilterMatcher <- filterRunner.getMatcher()\n\t}\n\tfiltersWg.Wait()\n\tif len(e.FilterRunners) > 0 {\n\t\tlog.Info(\"all Filters stopped\")\n\t}\n\n\tfor _, outputRunner := range e.OutputRunners {\n\t\tlog.Trace(\"Stop message sent to %s\", outputRunner.Name())\n\t\te.router.removeOutputMatcher <- outputRunner.getMatcher()\n\t}\n\toutputsWg.Wait()\n\tlog.Info(\"all Outputs stopped\")\n\n\te.router.Stop()\n\trouterWg.Wait()\n\tlog.Info(\"Router stopped\")\n\n\te.stopHttpServ()\n\n\tif ret != nil {\n\t\tlog.Info(\"shutdown complete: %s!\", ret)\n\t} else {\n\t\tlog.Info(\"shutdown complete!\")\n\t}\n\n\treturn\n}\n<commit_msg>duplicated plugin name is NOT ALLOWED<commit_after>\/\/ +build !v2\n\n\/\/ Package provides a plugin based pipeline engine that decouples Input\/Filter\/Output plugins.\npackage engine\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/funkygao\/gafka\/telemetry\"\n\t\"github.com\/funkygao\/gafka\/telemetry\/influxdb\"\n\t\"github.com\/funkygao\/go-metrics\"\n\t\"github.com\/funkygao\/golib\/observer\"\n\tconf \"github.com\/funkygao\/jsconf\"\n\tlog \"github.com\/funkygao\/log4go\"\n\t\"github.com\/gorilla\/mux\"\n)\n\nvar (\n\t_ PluginHelper = &Engine{}\n)\n\n\/\/ Engine is the pipeline engine of the data bus system which manages the core loop.\ntype Engine struct {\n\tsync.RWMutex\n\n\t\/\/ Engine will load json config file\n\t*conf.Conf\n\n\t\/\/ REST exporter\n\thttpListener net.Listener\n\thttpServer *http.Server\n\thttpRouter *mux.Router\n\thttpPaths []string\n\n\tInputRunners map[string]InputRunner\n\tinputWrappers map[string]*pluginWrapper\n\n\tFilterRunners map[string]FilterRunner\n\tfilterWrappers map[string]*pluginWrapper\n\n\tOutputRunners map[string]OutputRunner\n\toutputWrappers map[string]*pluginWrapper\n\n\ttop *topology\n\trouter *Router\n\n\tinputRecycleChans map[string]chan *Packet\n\tfilterRecycleChan chan *Packet\n\n\thostname string\n\tpid int\n\tstopper chan struct{}\n}\n\nfunc New(globals *GlobalConfig) *Engine {\n\tif globals == nil {\n\t\tglobals = DefaultGlobals()\n\t}\n\tGlobals = func() *GlobalConfig {\n\t\treturn globals\n\t}\n\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &Engine{\n\t\tInputRunners: make(map[string]InputRunner),\n\t\tinputWrappers: make(map[string]*pluginWrapper),\n\t\tFilterRunners: make(map[string]FilterRunner),\n\t\tfilterWrappers: make(map[string]*pluginWrapper),\n\t\tOutputRunners: make(map[string]OutputRunner),\n\t\toutputWrappers: make(map[string]*pluginWrapper),\n\n\t\tinputRecycleChans: make(map[string]chan *Packet),\n\t\tfilterRecycleChan: make(chan *Packet, globals.FilterRecyclePoolSize),\n\n\t\ttop: newTopology(),\n\t\trouter: newRouter(),\n\n\t\thttpPaths: make([]string, 0, 6),\n\n\t\tpid: os.Getpid(),\n\t\thostname: hostname,\n\t\tstopper: make(chan struct{}),\n\t}\n}\n\nfunc (e *Engine) stopInputRunner(name string) {\n\te.Lock()\n\te.InputRunners[name] = nil\n\te.Unlock()\n}\n\n\/\/ ClonePacket is used for plugin Filter to generate new Packet.\n\/\/ The generated Packet will use dedicated filter recycle chan.\nfunc (e *Engine) ClonePacket(p *Packet) *Packet {\n\tpack := <-e.filterRecycleChan\n\tpack.Reset()\n\tp.copyTo(pack)\n\treturn pack\n}\n\nfunc (e *Engine) LoadConfigFile(fn string) *Engine {\n\tif fn == \"\" {\n\t\tpanic(\"config file is required\")\n\t}\n\tif _, err := os.Stat(fn); err != nil {\n\t\tpanic(err)\n\t}\n\tcf, err := conf.Load(fn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\te.Conf = cf\n\tGlobals().Conf = cf\n\n\t\/\/ 'plugins' section\n\tvar names = make(map[string]struct{})\n\tfor i := 0; i < len(e.List(\"plugins\", nil)); i++ {\n\t\tsection, err := e.Section(fmt.Sprintf(\"plugins[%d]\", i))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tname := e.loadPluginSection(section)\n\t\tif _, duplicated := names[name]; duplicated {\n\t\t\t\/\/ router.metrics will be bad with dup name\n\t\t\tpanic(\"duplicated plugin name: \" + name)\n\t\t}\n\t\tnames[name] = struct{}{}\n\t}\n\n\t\/\/ 'topology' section\n\te.top.load(e.Conf)\n\n\tif c, err := influxdb.NewConfig(cf.String(\"influx_addr\", \"\"),\n\t\tcf.String(\"influx_db\", \"dbus\"), \"\", \"\",\n\t\tcf.Duration(\"influx_tick\", time.Minute)); err == nil {\n\t\ttelemetry.Default = influxdb.New(metrics.DefaultRegistry, c)\n\t}\n\n\treturn e\n}\n\nfunc (e *Engine) loadPluginSection(section *conf.Conf) (name string) {\n\tpluginCommons := new(pluginCommons)\n\tpluginCommons.loadConfig(section)\n\tname = pluginCommons.name\n\tif pluginCommons.disabled {\n\t\tlog.Warn(\"%s disabled\", pluginCommons.name)\n\n\t\treturn\n\t}\n\n\twrapper := &pluginWrapper{\n\t\tname: pluginCommons.name,\n\t\tconfigCreator: func() *conf.Conf { return section },\n\t}\n\tvar ok bool\n\tif wrapper.pluginCreator, ok = availablePlugins[pluginCommons.class]; !ok {\n\t\tpanic(\"unknown plugin type: \" + pluginCommons.class)\n\t}\n\n\tpluginType := pluginTypeRegex.FindStringSubmatch(pluginCommons.class)\n\tif len(pluginType) < 2 {\n\t\tpanic(\"invalid plugin type: \" + pluginCommons.class)\n\t}\n\n\tplugin := wrapper.Create()\n\tpluginCategory := pluginType[1]\n\tif pluginCategory == \"Input\" {\n\t\te.inputRecycleChans[wrapper.name] = make(chan *Packet, Globals().InputRecyclePoolSize)\n\t\te.InputRunners[wrapper.name] = newInputRunner(plugin.(Input), pluginCommons)\n\t\te.inputWrappers[wrapper.name] = wrapper\n\t\te.router.metrics.m[wrapper.name] = metrics.NewRegisteredMeter(wrapper.name, metrics.DefaultRegistry)\n\t\treturn\n\t}\n\n\tfoRunner := newFORunner(plugin, pluginCommons)\n\tmatcher := newMatcher(section.StringList(\"match\", nil), foRunner)\n\tfoRunner.matcher = matcher\n\n\tswitch pluginCategory {\n\tcase \"Filter\":\n\t\te.router.addFilterMatcher(matcher)\n\t\te.FilterRunners[foRunner.Name()] = foRunner\n\t\te.filterWrappers[foRunner.Name()] = wrapper\n\t\te.router.metrics.m[wrapper.name] = metrics.NewRegisteredMeter(wrapper.name, metrics.DefaultRegistry)\n\n\tcase \"Output\":\n\t\te.router.addOutputMatcher(matcher)\n\t\te.OutputRunners[foRunner.Name()] = foRunner\n\t\te.outputWrappers[foRunner.Name()] = wrapper\n\t\te.router.metrics.m[wrapper.name] = metrics.NewRegisteredMeter(wrapper.name, metrics.DefaultRegistry)\n\n\tdefault:\n\t\tpanic(\"unknown plugin: \" + pluginCategory)\n\t}\n\n\treturn\n}\n\nfunc (e *Engine) ServeForever() (ret error) {\n\tvar (\n\t\toutputsWg = new(sync.WaitGroup)\n\t\tfiltersWg = new(sync.WaitGroup)\n\t\tinputsWg = new(sync.WaitGroup)\n\t\trouterWg = new(sync.WaitGroup)\n\n\t\tglobals = Globals()\n\t\terr error\n\t)\n\n\tif telemetry.Default == nil {\n\t\tlog.Info(\"engine starting, with telemetry disabled...\")\n\t} else {\n\t\tlog.Info(\"engine starting...\")\n\t}\n\n\t\/\/ setup signal handler first to avoid race condition\n\t\/\/ if Input terminates very soon, global.Shutdown will\n\t\/\/ not be able to trap it\n\tglobals.sigChan = make(chan os.Signal)\n\tsignal.Notify(globals.sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGUSR1, syscall.SIGUSR2)\n\n\te.launchHttpServ()\n\n\tif telemetry.Default != nil {\n\t\tlog.Info(\"launching telemetry dumper...\")\n\n\t\tgo func() {\n\t\t\tif err := telemetry.Default.Start(); err != nil {\n\t\t\t\tlog.Error(\"telemetry[%s]: %s\", telemetry.Default.Name(), err)\n\t\t\t}\n\t\t}()\n\t}\n\n\tfor _, outputRunner := range e.OutputRunners {\n\t\tlog.Trace(\"launching Output[%s]...\", outputRunner.Name())\n\n\t\toutputsWg.Add(1)\n\t\tif err = outputRunner.start(e, outputsWg); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tfor _, filterRunner := range e.FilterRunners {\n\t\tlog.Trace(\"launching Filter[%s]...\", filterRunner.Name())\n\n\t\tfiltersWg.Add(1)\n\t\tif err = filterRunner.start(e, filtersWg); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tfor inputName := range e.inputRecycleChans {\n\t\tlog.Info(\"building Input[%s] Packet pool with size=%d\", inputName, globals.InputRecyclePoolSize)\n\n\t\tfor i := 0; i < globals.InputRecyclePoolSize; i++ {\n\t\t\tinputPack := newPacket(e.inputRecycleChans[inputName])\n\t\t\te.inputRecycleChans[inputName] <- inputPack\n\t\t}\n\t}\n\n\tlog.Info(\"building Filter Packet pool with size=%d\", globals.FilterRecyclePoolSize)\n\tfor i := 0; i < globals.FilterRecyclePoolSize; i++ {\n\t\tfilterPack := newPacket(e.filterRecycleChan)\n\t\te.filterRecycleChan <- filterPack\n\t}\n\n\tlog.Info(\"launching Watchdog with ticker=%s\", globals.WatchdogTick)\n\tgo e.runWatchdog(globals.WatchdogTick)\n\n\trouterWg.Add(1)\n\tgo e.router.Start(routerWg)\n\n\tfor _, inputRunner := range e.InputRunners {\n\t\tlog.Trace(\"launching Input[%s]...\", inputRunner.Name())\n\n\t\tinputsWg.Add(1)\n\t\tif err = inputRunner.start(e, inputsWg); err != nil {\n\t\t\tinputsWg.Done()\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tcfChanged := make(chan struct{})\n\tgo e.watchConfig(cfChanged, time.Second)\n\n\tfor !globals.Stopping {\n\t\tselect {\n\t\tcase <-cfChanged:\n\t\t\tlog.Info(\"%s updated, closing...\", e.Conf.ConfPath())\n\t\t\tglobals.Stopping = true\n\n\t\tcase sig := <-globals.sigChan:\n\t\t\tlog.Info(\"Got signal %s\", strings.ToUpper(sig.String()))\n\n\t\t\tswitch sig {\n\t\t\tcase syscall.SIGINT, syscall.SIGTERM:\n\t\t\t\tlog.Info(\"shutdown...\")\n\t\t\t\tglobals.Stopping = true\n\t\t\t\tret = ErrQuitingSigal\n\n\t\t\tcase syscall.SIGUSR1:\n\t\t\t\tobserver.Publish(SIGUSR1, nil)\n\n\t\t\tcase syscall.SIGUSR2:\n\t\t\t\tobserver.Publish(SIGUSR2, nil)\n\t\t\t}\n\t\t}\n\t}\n\n\tclose(e.stopper)\n\n\tif telemetry.Default != nil {\n\t\ttelemetry.Default.Stop()\n\t}\n\n\te.Lock()\n\tfor _, inputRunner := range e.InputRunners {\n\t\tif inputRunner == nil {\n\t\t\t\/\/ the Input plugin already exit\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Trace(\"Stop message sent to %s\", inputRunner.Name())\n\t\tinputRunner.Input().Stop(inputRunner)\n\t}\n\te.Unlock()\n\tinputsWg.Wait() \/\/ wait for all inputs done\n\tlog.Info(\"all Inputs stopped\")\n\n\t\/\/ ok, now we are sure no more inputs, but in route.inChan there\n\t\/\/ still may be filter injected packs and output not consumed packs\n\t\/\/ we must wait for all the packs to be consumed before shutdown\n\n\tfor _, filterRunner := range e.FilterRunners {\n\t\tlog.Trace(\"Stop message sent to %s\", filterRunner.Name())\n\t\te.router.removeFilterMatcher <- filterRunner.getMatcher()\n\t}\n\tfiltersWg.Wait()\n\tif len(e.FilterRunners) > 0 {\n\t\tlog.Info(\"all Filters stopped\")\n\t}\n\n\tfor _, outputRunner := range e.OutputRunners {\n\t\tlog.Trace(\"Stop message sent to %s\", outputRunner.Name())\n\t\te.router.removeOutputMatcher <- outputRunner.getMatcher()\n\t}\n\toutputsWg.Wait()\n\tlog.Info(\"all Outputs stopped\")\n\n\te.router.Stop()\n\trouterWg.Wait()\n\tlog.Info(\"Router stopped\")\n\n\te.stopHttpServ()\n\n\tif ret != nil {\n\t\tlog.Info(\"shutdown complete: %s!\", ret)\n\t} else {\n\t\tlog.Info(\"shutdown complete!\")\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 ThoughtWorks, Inc.\n\n\/\/ This file is part of Gauge.\n\n\/\/ Gauge is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either Version 3 of the License, or\n\/\/ (at your option) any later Version.\n\n\/\/ Gauge is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with Gauge. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage version\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ CurrentGaugeVersion represents the current version of Gauge\nvar CurrentGaugeVersion = &Version{0, 8, 2}\n\n\/\/ BuildMetadata represents build information of current release (e.g, nightly build information)\nvar BuildMetadata = \"\"\n\ntype Version struct {\n\tMajor int\n\tMinor int\n\tPatch int\n}\n\ntype VersionSupport struct {\n\tMinimum string\n\tMaximum string\n}\n\nfunc ParseVersion(versionText string) (*Version, error) {\n\tsplits := strings.Split(versionText, \".\")\n\tif len(splits) != 3 {\n\t\treturn nil, fmt.Errorf(\"Incorrect Version format. Version should be in the form 1.5.7\")\n\t}\n\tMajor, err := strconv.Atoi(splits[0])\n\tif err != nil {\n\t\treturn nil, VersionError(\"major\", splits[0], err)\n\t}\n\tMinor, err := strconv.Atoi(splits[1])\n\tif err != nil {\n\t\treturn nil, VersionError(\"minor\", splits[1], err)\n\t}\n\tPatch, err := strconv.Atoi(splits[2])\n\tif err != nil {\n\t\treturn nil, VersionError(\"patch\", splits[2], err)\n\t}\n\n\treturn &Version{Major, Minor, Patch}, nil\n}\n\nfunc VersionError(level, text string, err error) error {\n\treturn fmt.Errorf(\"Error parsing %s Version %s to integer. %s\", level, text, err.Error())\n}\n\nfunc (Version *Version) IsBetween(lower *Version, greater *Version) bool {\n\treturn Version.IsGreaterThanEqualTo(lower) && Version.IsLesserThanEqualTo(greater)\n}\n\nfunc (Version *Version) IsLesserThan(version1 *Version) bool {\n\treturn CompareVersions(Version, version1, LesserThanFunc)\n}\n\nfunc (Version *Version) IsGreaterThan(version1 *Version) bool {\n\treturn CompareVersions(Version, version1, GreaterThanFunc)\n}\n\nfunc (Version *Version) IsLesserThanEqualTo(version1 *Version) bool {\n\treturn Version.IsLesserThan(version1) || Version.IsEqualTo(version1)\n}\n\nfunc (Version *Version) IsGreaterThanEqualTo(version1 *Version) bool {\n\treturn Version.IsGreaterThan(version1) || Version.IsEqualTo(version1)\n}\n\nfunc (Version *Version) IsEqualTo(version1 *Version) bool {\n\treturn IsEqual(Version.Major, version1.Major) && IsEqual(Version.Minor, version1.Minor) && IsEqual(Version.Patch, version1.Patch)\n}\n\nfunc CompareVersions(first *Version, second *Version, compareFunc func(int, int) bool) bool {\n\tif compareFunc(first.Major, second.Major) {\n\t\treturn true\n\t} else if IsEqual(first.Major, second.Major) {\n\t\tif compareFunc(first.Minor, second.Minor) {\n\t\t\treturn true\n\t\t} else if IsEqual(first.Minor, second.Minor) {\n\t\t\tif compareFunc(first.Patch, second.Patch) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}\n\nfunc LesserThanFunc(first, second int) bool {\n\treturn first < second\n}\n\nfunc GreaterThanFunc(first, second int) bool {\n\treturn first > second\n}\n\nfunc IsEqual(first, second int) bool {\n\treturn first == second\n}\n\nfunc (Version *Version) String() string {\n\treturn fmt.Sprintf(\"%d.%d.%d\", Version.Major, Version.Minor, Version.Patch)\n}\n\n\/\/ FullVersion returns the CurrentGaugeVersion including build metadata.\nfunc FullVersion() string {\n\tvar metadata string\n\tif BuildMetadata != \"\" {\n\t\tmetadata = fmt.Sprintf(\".%s\", BuildMetadata)\n\t}\n\treturn fmt.Sprintf(\"%s%s\", CurrentGaugeVersion.String(), metadata)\n}\n\ntype byDecreasingVersion []*Version\n\nfunc (a byDecreasingVersion) Len() int { return len(a) }\nfunc (a byDecreasingVersion) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a byDecreasingVersion) Less(i, j int) bool {\n\treturn a[i].IsGreaterThan(a[j])\n}\n\nfunc GetLatestVersion(versions []*Version) *Version {\n\tsort.Sort(byDecreasingVersion(versions))\n\treturn versions[0]\n}\n\nfunc CheckCompatibility(currentVersion *Version, versionSupport *VersionSupport) error {\n\tminSupportVersion, err := ParseVersion(versionSupport.Minimum)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Invalid minimum support version %s. : %s. \", versionSupport.Minimum, err.Error())\n\t}\n\tif versionSupport.Maximum != \"\" {\n\t\tmaxSupportVersion, err := ParseVersion(versionSupport.Maximum)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Invalid maximum support version %s. : %s. \", versionSupport.Maximum, err.Error())\n\t\t}\n\t\tif currentVersion.IsBetween(minSupportVersion, maxSupportVersion) {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"Version %s is not between %s and %s\", currentVersion, minSupportVersion, maxSupportVersion)\n\t}\n\n\tif minSupportVersion.IsLesserThanEqualTo(currentVersion) {\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"Incompatible version. Minimum support version %s is higher than current version %s\", minSupportVersion, currentVersion)\n}\n<commit_msg>Revert \"Bumping version to 0.8.2\"<commit_after>\/\/ Copyright 2015 ThoughtWorks, Inc.\n\n\/\/ This file is part of Gauge.\n\n\/\/ Gauge is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either Version 3 of the License, or\n\/\/ (at your option) any later Version.\n\n\/\/ Gauge is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with Gauge. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage version\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ CurrentGaugeVersion represents the current version of Gauge\nvar CurrentGaugeVersion = &Version{0, 8, 1}\n\n\/\/ BuildMetadata represents build information of current release (e.g, nightly build information)\nvar BuildMetadata = \"\"\n\ntype Version struct {\n\tMajor int\n\tMinor int\n\tPatch int\n}\n\ntype VersionSupport struct {\n\tMinimum string\n\tMaximum string\n}\n\nfunc ParseVersion(versionText string) (*Version, error) {\n\tsplits := strings.Split(versionText, \".\")\n\tif len(splits) != 3 {\n\t\treturn nil, fmt.Errorf(\"Incorrect Version format. Version should be in the form 1.5.7\")\n\t}\n\tMajor, err := strconv.Atoi(splits[0])\n\tif err != nil {\n\t\treturn nil, VersionError(\"major\", splits[0], err)\n\t}\n\tMinor, err := strconv.Atoi(splits[1])\n\tif err != nil {\n\t\treturn nil, VersionError(\"minor\", splits[1], err)\n\t}\n\tPatch, err := strconv.Atoi(splits[2])\n\tif err != nil {\n\t\treturn nil, VersionError(\"patch\", splits[2], err)\n\t}\n\n\treturn &Version{Major, Minor, Patch}, nil\n}\n\nfunc VersionError(level, text string, err error) error {\n\treturn fmt.Errorf(\"Error parsing %s Version %s to integer. %s\", level, text, err.Error())\n}\n\nfunc (Version *Version) IsBetween(lower *Version, greater *Version) bool {\n\treturn Version.IsGreaterThanEqualTo(lower) && Version.IsLesserThanEqualTo(greater)\n}\n\nfunc (Version *Version) IsLesserThan(version1 *Version) bool {\n\treturn CompareVersions(Version, version1, LesserThanFunc)\n}\n\nfunc (Version *Version) IsGreaterThan(version1 *Version) bool {\n\treturn CompareVersions(Version, version1, GreaterThanFunc)\n}\n\nfunc (Version *Version) IsLesserThanEqualTo(version1 *Version) bool {\n\treturn Version.IsLesserThan(version1) || Version.IsEqualTo(version1)\n}\n\nfunc (Version *Version) IsGreaterThanEqualTo(version1 *Version) bool {\n\treturn Version.IsGreaterThan(version1) || Version.IsEqualTo(version1)\n}\n\nfunc (Version *Version) IsEqualTo(version1 *Version) bool {\n\treturn IsEqual(Version.Major, version1.Major) && IsEqual(Version.Minor, version1.Minor) && IsEqual(Version.Patch, version1.Patch)\n}\n\nfunc CompareVersions(first *Version, second *Version, compareFunc func(int, int) bool) bool {\n\tif compareFunc(first.Major, second.Major) {\n\t\treturn true\n\t} else if IsEqual(first.Major, second.Major) {\n\t\tif compareFunc(first.Minor, second.Minor) {\n\t\t\treturn true\n\t\t} else if IsEqual(first.Minor, second.Minor) {\n\t\t\tif compareFunc(first.Patch, second.Patch) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}\n\nfunc LesserThanFunc(first, second int) bool {\n\treturn first < second\n}\n\nfunc GreaterThanFunc(first, second int) bool {\n\treturn first > second\n}\n\nfunc IsEqual(first, second int) bool {\n\treturn first == second\n}\n\nfunc (Version *Version) String() string {\n\treturn fmt.Sprintf(\"%d.%d.%d\", Version.Major, Version.Minor, Version.Patch)\n}\n\n\/\/ FullVersion returns the CurrentGaugeVersion including build metadata.\nfunc FullVersion() string {\n\tvar metadata string\n\tif BuildMetadata != \"\" {\n\t\tmetadata = fmt.Sprintf(\".%s\", BuildMetadata)\n\t}\n\treturn fmt.Sprintf(\"%s%s\", CurrentGaugeVersion.String(), metadata)\n}\n\ntype byDecreasingVersion []*Version\n\nfunc (a byDecreasingVersion) Len() int { return len(a) }\nfunc (a byDecreasingVersion) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a byDecreasingVersion) Less(i, j int) bool {\n\treturn a[i].IsGreaterThan(a[j])\n}\n\nfunc GetLatestVersion(versions []*Version) *Version {\n\tsort.Sort(byDecreasingVersion(versions))\n\treturn versions[0]\n}\n\nfunc CheckCompatibility(currentVersion *Version, versionSupport *VersionSupport) error {\n\tminSupportVersion, err := ParseVersion(versionSupport.Minimum)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Invalid minimum support version %s. : %s. \", versionSupport.Minimum, err.Error())\n\t}\n\tif versionSupport.Maximum != \"\" {\n\t\tmaxSupportVersion, err := ParseVersion(versionSupport.Maximum)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Invalid maximum support version %s. : %s. \", versionSupport.Maximum, err.Error())\n\t\t}\n\t\tif currentVersion.IsBetween(minSupportVersion, maxSupportVersion) {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"Version %s is not between %s and %s\", currentVersion, minSupportVersion, maxSupportVersion)\n\t}\n\n\tif minSupportVersion.IsLesserThanEqualTo(currentVersion) {\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"Incompatible version. Minimum support version %s is higher than current version %s\", minSupportVersion, currentVersion)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/dustin\/go-humanize\"\n\n\tsessionpkg \"github.com\/havoc-io\/mutagen\/pkg\/session\"\n\t\"github.com\/havoc-io\/mutagen\/pkg\/sync\"\n\turlpkg \"github.com\/havoc-io\/mutagen\/pkg\/url\"\n)\n\nfunc printEndpoint(name string, url *urlpkg.URL, configuration *sessionpkg.Configuration, version sessionpkg.Version) {\n\t\/\/ Print the endpoint header.\n\tfmt.Println(name, \"configuration:\")\n\n\t\/\/ Print the URL.\n\tfmt.Println(\"\\tURL:\", url.Format(\"\\n\\t\\t\"))\n\n\t\/\/ Compute and print the watch mode.\n\twatchModeDescription := configuration.WatchMode.Description()\n\tif configuration.WatchMode.IsDefault() {\n\t\twatchModeDescription += fmt.Sprintf(\" (%s)\", version.DefaultWatchMode().Description())\n\t}\n\tfmt.Println(\"\\tWatch mode:\", watchModeDescription)\n\n\t\/\/ Compute and print the watch polling interval, so long as we're not in\n\t\/\/ no-watch mode.\n\tif configuration.WatchMode != sessionpkg.WatchMode_WatchModeNoWatch {\n\t\tvar watchPollingIntervalDescription string\n\t\tif configuration.WatchPollingInterval == 0 {\n\t\t\twatchPollingIntervalDescription = fmt.Sprintf(\"Default (%d seconds)\", version.DefaultWatchPollingInterval())\n\t\t} else {\n\t\t\twatchPollingIntervalDescription = fmt.Sprintf(\"%d seconds\", configuration.WatchPollingInterval)\n\t\t}\n\t\tfmt.Println(\"\\tWatch polling interval:\", watchPollingIntervalDescription)\n\t}\n\n\t\/\/ Compute and print the probe mode.\n\tprobeModeDescription := configuration.ProbeMode.Description()\n\tif configuration.ProbeMode.IsDefault() {\n\t\tprobeModeDescription += fmt.Sprintf(\" (%s)\", version.DefaultProbeMode().Description())\n\t}\n\tfmt.Println(\"\\tProbe mode:\", probeModeDescription)\n\n\t\/\/ Compute and print the scan mode.\n\tscanModeDescription := configuration.ScanMode.Description()\n\tif configuration.ScanMode.IsDefault() {\n\t\tscanModeDescription += fmt.Sprintf(\" (%s)\", version.DefaultScanMode().Description())\n\t}\n\tfmt.Println(\"\\tScan mode:\", scanModeDescription)\n\n\t\/\/ Compute and print the staging mode.\n\tstageModeDescription := configuration.StageMode.Description()\n\tif configuration.StageMode.IsDefault() {\n\t\tstageModeDescription += fmt.Sprintf(\" (%s)\", version.DefaultStageMode().Description())\n\t}\n\tfmt.Println(\"\\tStage mode:\", stageModeDescription)\n\n\t\/\/ Compute and print the default file mode.\n\tvar defaultFileModeDescription string\n\tif configuration.DefaultFileMode == 0 {\n\t\tdefaultFileModeDescription = fmt.Sprintf(\"Default (%#o)\", version.DefaultFileMode())\n\t} else {\n\t\tdefaultFileModeDescription = fmt.Sprintf(\"%#o\", configuration.DefaultFileMode)\n\t}\n\tfmt.Println(\"\\tFile mode:\", defaultFileModeDescription)\n\n\t\/\/ Compute and print the default directory mode.\n\tvar defaultDirectoryModeDescription string\n\tif configuration.DefaultDirectoryMode == 0 {\n\t\tdefaultDirectoryModeDescription = fmt.Sprintf(\"Default (%#o)\", version.DefaultDirectoryMode())\n\t} else {\n\t\tdefaultDirectoryModeDescription = fmt.Sprintf(\"%#o\", configuration.DefaultDirectoryMode)\n\t}\n\tfmt.Println(\"\\tDirectory mode:\", defaultDirectoryModeDescription)\n\n\t\/\/ Compute and print the default file\/directory owner.\n\tdefaultOwnerDescription := \"Default\"\n\tif configuration.DefaultOwner != \"\" {\n\t\tdefaultOwnerDescription = configuration.DefaultOwner\n\t}\n\tfmt.Println(\"\\tDefault file\/directory owner:\", defaultOwnerDescription)\n\n\t\/\/ Compute and print the default file\/directory group.\n\tdefaultGroupDescription := \"Default\"\n\tif configuration.DefaultGroup != \"\" {\n\t\tdefaultGroupDescription = configuration.DefaultGroup\n\t}\n\tfmt.Println(\"\\tDefault file\/directory group:\", defaultGroupDescription)\n}\n\nfunc printSession(state *sessionpkg.State, long bool) {\n\t\/\/ Print the session identifier.\n\tfmt.Println(\"Session:\", state.Session.Identifier)\n\n\t\/\/ Print extended information, if desired.\n\tif long {\n\t\t\/\/ Print the configuration header.\n\t\tfmt.Println(\"Configuration:\")\n\n\t\t\/\/ Extract configuration.\n\t\tconfiguration := state.Session.Configuration\n\n\t\t\/\/ Compute and print synchronization mode.\n\t\tsynchronizationMode := configuration.SynchronizationMode.Description()\n\t\tif configuration.SynchronizationMode.IsDefault() {\n\t\t\tdefaultSynchronizationMode := state.Session.Version.DefaultSynchronizationMode()\n\t\t\tsynchronizationMode += fmt.Sprintf(\" (%s)\", defaultSynchronizationMode.Description())\n\t\t}\n\t\tfmt.Println(\"\\tSynchronization mode:\", synchronizationMode)\n\n\t\t\/\/ Compute and print maximum entry count.\n\t\tif configuration.MaximumEntryCount == 0 {\n\t\t\tfmt.Println(\"\\tMaximum entry count: Unlimited\")\n\t\t} else {\n\t\t\tfmt.Println(\"\\tMaximum entry count:\", configuration.MaximumEntryCount)\n\t\t}\n\n\t\t\/\/ Compute and print maximum staging file size.\n\t\tif configuration.MaximumStagingFileSize == 0 {\n\t\t\tfmt.Println(\"\\tMaximum staging file size: Unlimited\")\n\t\t} else {\n\t\t\tfmt.Printf(\n\t\t\t\t\"\\tMaximum staging file size: %d (%s)\\n\",\n\t\t\t\tconfiguration.MaximumStagingFileSize,\n\t\t\t\thumanize.Bytes(configuration.MaximumStagingFileSize),\n\t\t\t)\n\t\t}\n\n\t\t\/\/ Compute and print symlink mode.\n\t\tsymlinkModeDescription := configuration.SymlinkMode.Description()\n\t\tif configuration.SymlinkMode == sync.SymlinkMode_SymlinkModeDefault {\n\t\t\tdefaultSymlinkMode := state.Session.Version.DefaultSymlinkMode()\n\t\t\tsymlinkModeDescription += fmt.Sprintf(\" (%s)\", defaultSymlinkMode.Description())\n\t\t}\n\t\tfmt.Println(\"\\tSymbolic link mode:\", symlinkModeDescription)\n\n\t\t\/\/ Compute and print the VCS ignore mode.\n\t\tignoreVCSModeDescription := configuration.IgnoreVCSMode.Description()\n\t\tif configuration.IgnoreVCSMode == sync.IgnoreVCSMode_IgnoreVCSModeDefault {\n\t\t\tdefaultIgnoreVCSMode := state.Session.Version.DefaultIgnoreVCSMode()\n\t\t\tignoreVCSModeDescription += fmt.Sprintf(\" (%s)\", defaultIgnoreVCSMode.Description())\n\t\t}\n\t\tfmt.Println(\"\\tIgnore VCS mode:\", ignoreVCSModeDescription)\n\n\t\t\/\/ Print default ignores. Since this field is deprecated, we don't print\n\t\t\/\/ it if it's not set.\n\t\tif len(configuration.DefaultIgnores) > 0 {\n\t\t\tfmt.Println(\"\\tDefault ignores:\")\n\t\t\tfor _, p := range configuration.DefaultIgnores {\n\t\t\t\tfmt.Printf(\"\\t\\t%s\\n\", p)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Print per-session ignores.\n\t\tif len(configuration.Ignores) > 0 {\n\t\t\tfmt.Println(\"\\tIgnores:\")\n\t\t\tfor _, p := range configuration.Ignores {\n\t\t\t\tfmt.Printf(\"\\t\\t%s\\n\", p)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(\"\\tIgnores: None\")\n\t\t}\n\n\t\t\/\/ Compute and print alpha-specific configuration.\n\t\talphaConfigurationMerged := sessionpkg.MergeConfigurations(\n\t\t\tstate.Session.Configuration,\n\t\t\tstate.Session.ConfigurationAlpha,\n\t\t)\n\t\tprintEndpoint(\"Alpha\", state.Session.Alpha, alphaConfigurationMerged, state.Session.Version)\n\n\t\t\/\/ Compute and print beta-specific configuration.\n\t\tbetaConfigurationMerged := sessionpkg.MergeConfigurations(\n\t\t\tstate.Session.Configuration,\n\t\t\tstate.Session.ConfigurationBeta,\n\t\t)\n\t\tprintEndpoint(\"Beta\", state.Session.Beta, betaConfigurationMerged, state.Session.Version)\n\n\t\t\/\/ Print labels.\n\t\tif len(state.Session.Labels) > 0 {\n\t\t\tfmt.Println(\"\\tLabels:\")\n\t\t\tkeys := sessionpkg.ExtractAndSortLabelKeys(state.Session.Labels)\n\t\t\tfor _, key := range keys {\n\t\t\t\tfmt.Printf(\"\\t\\t%s: %s\\n\", key, state.Session.Labels[key])\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(\"\\tLabels: None\")\n\t\t}\n\t}\n}\n<commit_msg>Fixed outdated mode conditionals.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/dustin\/go-humanize\"\n\n\tsessionpkg \"github.com\/havoc-io\/mutagen\/pkg\/session\"\n\turlpkg \"github.com\/havoc-io\/mutagen\/pkg\/url\"\n)\n\nfunc printEndpoint(name string, url *urlpkg.URL, configuration *sessionpkg.Configuration, version sessionpkg.Version) {\n\t\/\/ Print the endpoint header.\n\tfmt.Println(name, \"configuration:\")\n\n\t\/\/ Print the URL.\n\tfmt.Println(\"\\tURL:\", url.Format(\"\\n\\t\\t\"))\n\n\t\/\/ Compute and print the watch mode.\n\twatchModeDescription := configuration.WatchMode.Description()\n\tif configuration.WatchMode.IsDefault() {\n\t\twatchModeDescription += fmt.Sprintf(\" (%s)\", version.DefaultWatchMode().Description())\n\t}\n\tfmt.Println(\"\\tWatch mode:\", watchModeDescription)\n\n\t\/\/ Compute and print the watch polling interval, so long as we're not in\n\t\/\/ no-watch mode.\n\tif configuration.WatchMode != sessionpkg.WatchMode_WatchModeNoWatch {\n\t\tvar watchPollingIntervalDescription string\n\t\tif configuration.WatchPollingInterval == 0 {\n\t\t\twatchPollingIntervalDescription = fmt.Sprintf(\"Default (%d seconds)\", version.DefaultWatchPollingInterval())\n\t\t} else {\n\t\t\twatchPollingIntervalDescription = fmt.Sprintf(\"%d seconds\", configuration.WatchPollingInterval)\n\t\t}\n\t\tfmt.Println(\"\\tWatch polling interval:\", watchPollingIntervalDescription)\n\t}\n\n\t\/\/ Compute and print the probe mode.\n\tprobeModeDescription := configuration.ProbeMode.Description()\n\tif configuration.ProbeMode.IsDefault() {\n\t\tprobeModeDescription += fmt.Sprintf(\" (%s)\", version.DefaultProbeMode().Description())\n\t}\n\tfmt.Println(\"\\tProbe mode:\", probeModeDescription)\n\n\t\/\/ Compute and print the scan mode.\n\tscanModeDescription := configuration.ScanMode.Description()\n\tif configuration.ScanMode.IsDefault() {\n\t\tscanModeDescription += fmt.Sprintf(\" (%s)\", version.DefaultScanMode().Description())\n\t}\n\tfmt.Println(\"\\tScan mode:\", scanModeDescription)\n\n\t\/\/ Compute and print the staging mode.\n\tstageModeDescription := configuration.StageMode.Description()\n\tif configuration.StageMode.IsDefault() {\n\t\tstageModeDescription += fmt.Sprintf(\" (%s)\", version.DefaultStageMode().Description())\n\t}\n\tfmt.Println(\"\\tStage mode:\", stageModeDescription)\n\n\t\/\/ Compute and print the default file mode.\n\tvar defaultFileModeDescription string\n\tif configuration.DefaultFileMode == 0 {\n\t\tdefaultFileModeDescription = fmt.Sprintf(\"Default (%#o)\", version.DefaultFileMode())\n\t} else {\n\t\tdefaultFileModeDescription = fmt.Sprintf(\"%#o\", configuration.DefaultFileMode)\n\t}\n\tfmt.Println(\"\\tFile mode:\", defaultFileModeDescription)\n\n\t\/\/ Compute and print the default directory mode.\n\tvar defaultDirectoryModeDescription string\n\tif configuration.DefaultDirectoryMode == 0 {\n\t\tdefaultDirectoryModeDescription = fmt.Sprintf(\"Default (%#o)\", version.DefaultDirectoryMode())\n\t} else {\n\t\tdefaultDirectoryModeDescription = fmt.Sprintf(\"%#o\", configuration.DefaultDirectoryMode)\n\t}\n\tfmt.Println(\"\\tDirectory mode:\", defaultDirectoryModeDescription)\n\n\t\/\/ Compute and print the default file\/directory owner.\n\tdefaultOwnerDescription := \"Default\"\n\tif configuration.DefaultOwner != \"\" {\n\t\tdefaultOwnerDescription = configuration.DefaultOwner\n\t}\n\tfmt.Println(\"\\tDefault file\/directory owner:\", defaultOwnerDescription)\n\n\t\/\/ Compute and print the default file\/directory group.\n\tdefaultGroupDescription := \"Default\"\n\tif configuration.DefaultGroup != \"\" {\n\t\tdefaultGroupDescription = configuration.DefaultGroup\n\t}\n\tfmt.Println(\"\\tDefault file\/directory group:\", defaultGroupDescription)\n}\n\nfunc printSession(state *sessionpkg.State, long bool) {\n\t\/\/ Print the session identifier.\n\tfmt.Println(\"Session:\", state.Session.Identifier)\n\n\t\/\/ Print extended information, if desired.\n\tif long {\n\t\t\/\/ Print the configuration header.\n\t\tfmt.Println(\"Configuration:\")\n\n\t\t\/\/ Extract configuration.\n\t\tconfiguration := state.Session.Configuration\n\n\t\t\/\/ Compute and print synchronization mode.\n\t\tsynchronizationMode := configuration.SynchronizationMode.Description()\n\t\tif configuration.SynchronizationMode.IsDefault() {\n\t\t\tdefaultSynchronizationMode := state.Session.Version.DefaultSynchronizationMode()\n\t\t\tsynchronizationMode += fmt.Sprintf(\" (%s)\", defaultSynchronizationMode.Description())\n\t\t}\n\t\tfmt.Println(\"\\tSynchronization mode:\", synchronizationMode)\n\n\t\t\/\/ Compute and print maximum entry count.\n\t\tif configuration.MaximumEntryCount == 0 {\n\t\t\tfmt.Println(\"\\tMaximum entry count: Unlimited\")\n\t\t} else {\n\t\t\tfmt.Println(\"\\tMaximum entry count:\", configuration.MaximumEntryCount)\n\t\t}\n\n\t\t\/\/ Compute and print maximum staging file size.\n\t\tif configuration.MaximumStagingFileSize == 0 {\n\t\t\tfmt.Println(\"\\tMaximum staging file size: Unlimited\")\n\t\t} else {\n\t\t\tfmt.Printf(\n\t\t\t\t\"\\tMaximum staging file size: %d (%s)\\n\",\n\t\t\t\tconfiguration.MaximumStagingFileSize,\n\t\t\t\thumanize.Bytes(configuration.MaximumStagingFileSize),\n\t\t\t)\n\t\t}\n\n\t\t\/\/ Compute and print symlink mode.\n\t\tsymlinkModeDescription := configuration.SymlinkMode.Description()\n\t\tif configuration.SymlinkMode.IsDefault() {\n\t\t\tdefaultSymlinkMode := state.Session.Version.DefaultSymlinkMode()\n\t\t\tsymlinkModeDescription += fmt.Sprintf(\" (%s)\", defaultSymlinkMode.Description())\n\t\t}\n\t\tfmt.Println(\"\\tSymbolic link mode:\", symlinkModeDescription)\n\n\t\t\/\/ Compute and print the VCS ignore mode.\n\t\tignoreVCSModeDescription := configuration.IgnoreVCSMode.Description()\n\t\tif configuration.IgnoreVCSMode.IsDefault() {\n\t\t\tdefaultIgnoreVCSMode := state.Session.Version.DefaultIgnoreVCSMode()\n\t\t\tignoreVCSModeDescription += fmt.Sprintf(\" (%s)\", defaultIgnoreVCSMode.Description())\n\t\t}\n\t\tfmt.Println(\"\\tIgnore VCS mode:\", ignoreVCSModeDescription)\n\n\t\t\/\/ Print default ignores. Since this field is deprecated, we don't print\n\t\t\/\/ it if it's not set.\n\t\tif len(configuration.DefaultIgnores) > 0 {\n\t\t\tfmt.Println(\"\\tDefault ignores:\")\n\t\t\tfor _, p := range configuration.DefaultIgnores {\n\t\t\t\tfmt.Printf(\"\\t\\t%s\\n\", p)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Print per-session ignores.\n\t\tif len(configuration.Ignores) > 0 {\n\t\t\tfmt.Println(\"\\tIgnores:\")\n\t\t\tfor _, p := range configuration.Ignores {\n\t\t\t\tfmt.Printf(\"\\t\\t%s\\n\", p)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(\"\\tIgnores: None\")\n\t\t}\n\n\t\t\/\/ Compute and print alpha-specific configuration.\n\t\talphaConfigurationMerged := sessionpkg.MergeConfigurations(\n\t\t\tstate.Session.Configuration,\n\t\t\tstate.Session.ConfigurationAlpha,\n\t\t)\n\t\tprintEndpoint(\"Alpha\", state.Session.Alpha, alphaConfigurationMerged, state.Session.Version)\n\n\t\t\/\/ Compute and print beta-specific configuration.\n\t\tbetaConfigurationMerged := sessionpkg.MergeConfigurations(\n\t\t\tstate.Session.Configuration,\n\t\t\tstate.Session.ConfigurationBeta,\n\t\t)\n\t\tprintEndpoint(\"Beta\", state.Session.Beta, betaConfigurationMerged, state.Session.Version)\n\n\t\t\/\/ Print labels.\n\t\tif len(state.Session.Labels) > 0 {\n\t\t\tfmt.Println(\"\\tLabels:\")\n\t\t\tkeys := sessionpkg.ExtractAndSortLabelKeys(state.Session.Labels)\n\t\t\tfor _, key := range keys {\n\t\t\t\tfmt.Printf(\"\\t\\t%s: %s\\n\", key, state.Session.Labels[key])\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(\"\\tLabels: None\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/**\n * Copyright (C) 2015 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\npackage cmds\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/fabric8io\/gofabric8\/util\"\n\t\"github.com\/kardianos\/osext\"\n\t\"github.com\/spf13\/cobra\"\n\tclient \"k8s.io\/kubernetes\/pkg\/client\/unversioned\"\n\tcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n\t\"path\/filepath\"\n)\n\nconst (\n\tmemory = \"memory\"\n\tcpus = \"cpus\"\n\tconsole = \"console\"\n)\n\n\/\/ NewCmdStart starts a local cloud environment\nfunc NewCmdStart(f *cmdutil.Factory) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"start\",\n\t\tShort: \"Starts a local cloud development environment\",\n\t\tLong: `Starts a local cloud development environment`,\n\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\t\tflag := cmd.Flags().Lookup(minishift)\n\t\t\tisOpenshift := false\n\t\t\tif flag != nil {\n\t\t\t\tisOpenshift = flag.Value.String() == \"true\"\n\t\t\t}\n\n\t\t\tif !isInstalled(isOpenshift) {\n\t\t\t\tinstall(isOpenshift)\n\t\t\t}\n\n\t\t\tif isOpenshift {\n\t\t\t\tkubeBinary = minishift\n\t\t\t}\n\n\t\t\tkubeBinary = resolveBinaryLocation(kubeBinary)\n\n\t\t\t\/\/ check if already running\n\t\t\tout, err := exec.Command(kubeBinary, \"status\").Output()\n\t\t\tif err != nil {\n\t\t\t\tutil.Fatalf(\"Unable to get status %v\", err)\n\t\t\t}\n\n\t\t\tif err == nil && strings.Contains(string(out), \"Running\") {\n\t\t\t\t\/\/ already running\n\t\t\t\tutil.Successf(\"%s already running\\n\", kubeBinary)\n\n\t\t\t\t\/\/ setting context\n\t\t\t\te := exec.Command(kubectl, \"config\", \"use-context\", kubeBinary)\n\t\t\t\te.Stdout = os.Stdout\n\t\t\t\te.Stderr = os.Stderr\n\t\t\t\terr = e.Run()\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.Errorf(\"Unable to start %v\", err)\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\targs := []string{\"start\"}\n\n\t\t\t\t\/\/ if we're running on OSX default to using xhyve\n\t\t\t\tif runtime.GOOS == \"darwin\" {\n\t\t\t\t\targs = append(args, \"--vm-driver=xhyve\")\n\t\t\t\t}\n\n\t\t\t\t\/\/ set memory flag\n\t\t\t\tmemoryValue := cmd.Flags().Lookup(memory).Value.String()\n\t\t\t\targs = append(args, \"--memory=\"+memoryValue)\n\n\t\t\t\t\/\/ set cpu flag\n\t\t\t\tcpusValue := cmd.Flags().Lookup(cpus).Value.String()\n\t\t\t\targs = append(args, \"--cpus=\"+cpusValue)\n\n\t\t\t\t\/\/ start the local VM\n\t\t\t\te := exec.Command(kubeBinary, args...)\n\t\t\t\te.Stdout = os.Stdout\n\t\t\t\te.Stderr = os.Stderr\n\t\t\t\terr = e.Run()\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.Errorf(\"Unable to start %v\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif isOpenshift {\n\t\t\t\t\/\/ deploy fabric8\n\t\t\t\te := exec.Command(\"oc\", \"login\", \"--username=\"+minishiftDefaultUsername, \"--password=\"+minishiftDefaultPassword)\n\t\t\t\te.Stdout = os.Stdout\n\t\t\t\te.Stderr = os.Stderr\n\t\t\t\terr = e.Run()\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.Errorf(\"Unable to login %v\", err)\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t\/\/ now check that fabric8 is running, if not deploy it\n\t\t\tc, err := keepTryingToGetClient(f)\n\t\t\tif err != nil {\n\t\t\t\tutil.Fatalf(\"Unable to connect to %s %v\", kubeBinary, err)\n\t\t\t}\n\n\t\t\t\/\/ deploy fabric8 if its not already running\n\t\t\tns, _, _ := f.DefaultNamespace()\n\t\t\t_, err = c.Services(ns).Get(\"fabric8\")\n\t\t\tif err != nil {\n\n\t\t\t\t\/\/ deploy fabric8\n\t\t\t\td := GetDefaultFabric8Deployment()\n\t\t\t\tflag := cmd.Flags().Lookup(console)\n\t\t\t\tif flag != nil && flag.Value.String() == \"true\" {\n\t\t\t\t\td.appToRun = \"\"\n\t\t\t\t}\n\t\t\t\td.pv = true\n\t\t\t\tdeploy(f, d)\n\n\t\t\t} else {\n\t\t\t\topenService(ns, \"fabric8\", c, false)\n\t\t\t}\n\t\t},\n\t}\n\tcmd.PersistentFlags().BoolP(minishift, \"\", false, \"start the openshift flavour of Kubernetes\")\n\tcmd.PersistentFlags().BoolP(console, \"\", false, \"start only the fabric8 console\")\n\tcmd.PersistentFlags().StringP(memory, \"\", \"4096\", \"amount of RAM allocated to the VM\")\n\tcmd.PersistentFlags().StringP(cpus, \"\", \"1\", \"number of CPUs allocated to the VM\")\n\treturn cmd\n}\n\n\/\/ lets find the executable on the PATH or in the fabric8 directory\nfunc resolveBinaryLocation(executable string) string {\n\tpath, err := exec.LookPath(executable)\n\tif err != nil || fileNotExist(path) {\n\t\thome := os.Getenv(\"HOME\")\n\t\tif home == \"\" {\n\t\t\tutil.Error(\"No $HOME environment variable found\")\n\t\t}\n\t\twriteFileLocation = home + binLocation\n\n\t\t\/\/ lets try in the fabric8 folder\n\t\tpath = filepath.Join(writeFileLocation, executable)\n\t\tif fileNotExist(path) {\n\t\t\tpath = executable\n\t\t\t\/\/ lets try in the folder where we found the gofabric8 executable\n\t\t\tfolder, err := osext.ExecutableFolder()\n\t\t\tif err != nil {\n\t\t\t\tpath = filepath.Join(folder, executable)\n\t\t\t\tif fileNotExist(path) {\n\t\t\t\t\tpath = executable\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tutil.Infof(\"using the executable %s\\n\", path)\n\treturn path\n}\n\nfunc findExecutable(file string) error {\n\td, err := os.Stat(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif m := d.Mode(); !m.IsDir() && m&0111 != 0 {\n\t\treturn nil\n\t}\n\treturn os.ErrPermission\n}\n\nfunc fileNotExist(path string) bool {\n\treturn findExecutable(path) != nil\n}\n\nfunc keepTryingToGetClient(f *cmdutil.Factory) (*client.Client, error) {\n\ttimeout := time.After(2 * time.Minute)\n\ttick := time.Tick(1 * time.Second)\n\t\/\/ Keep trying until we're timed out or got a result or got an error\n\tfor {\n\t\tselect {\n\t\t\/\/ Got a timeout! fail with a timeout error\n\t\tcase <-timeout:\n\t\t\treturn nil, errors.New(\"timed out\")\n\t\t\/\/ Got a tick, try and get teh client\n\t\tcase <-tick:\n\t\t\tc, _ := getClient(f)\n\t\t\t\/\/ return if we have a client\n\t\t\tif c != nil {\n\t\t\t\treturn c, nil\n\t\t\t}\n\t\t\tutil.Info(\"Cannot connect to api server, retrying...\\n\")\n\t\t\t\/\/ retry\n\t\t}\n\t}\n}\n\nfunc getClient(f *cmdutil.Factory) (*client.Client, error) {\n\tvar err error\n\tcfg, err := f.ClientConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc, err := client.New(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}\n<commit_msg>fixed incorrect context use and possible lack of PATH for kubectl<commit_after>\/**\n * Copyright (C) 2015 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\npackage cmds\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/fabric8io\/gofabric8\/util\"\n\t\"github.com\/kardianos\/osext\"\n\t\"github.com\/spf13\/cobra\"\n\tclient \"k8s.io\/kubernetes\/pkg\/client\/unversioned\"\n\tcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n\t\"path\/filepath\"\n)\n\nconst (\n\tmemory = \"memory\"\n\tcpus = \"cpus\"\n\tconsole = \"console\"\n)\n\n\/\/ NewCmdStart starts a local cloud environment\nfunc NewCmdStart(f *cmdutil.Factory) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"start\",\n\t\tShort: \"Starts a local cloud development environment\",\n\t\tLong: `Starts a local cloud development environment`,\n\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\t\tflag := cmd.Flags().Lookup(minishift)\n\t\t\tisOpenshift := false\n\t\t\tif flag != nil {\n\t\t\t\tisOpenshift = flag.Value.String() == \"true\"\n\t\t\t}\n\n\t\t\tif !isInstalled(isOpenshift) {\n\t\t\t\tinstall(isOpenshift)\n\t\t\t}\n\n\t\t\tif isOpenshift {\n\t\t\t\tkubeBinary = minishift\n\t\t\t}\n\n\t\t\tbinaryFile := resolveBinaryLocation(kubeBinary)\n\n\t\t\t\/\/ check if already running\n\t\t\tout, err := exec.Command(binaryFile, \"status\").Output()\n\t\t\tif err != nil {\n\t\t\t\tutil.Fatalf(\"Unable to get status %v\", err)\n\t\t\t}\n\n\t\t\tif err == nil && strings.Contains(string(out), \"Running\") {\n\t\t\t\t\/\/ already running\n\t\t\t\tutil.Successf(\"%s already running\\n\", kubeBinary)\n\n\t\t\t\tkubectlBinaryFile := resolveBinaryLocation(kubectl)\n\n\t\t\t\t\/\/ setting context\n\t\t\t\te := exec.Command(kubectlBinaryFile, \"config\", \"use-context\", kubeBinary)\n\t\t\t\te.Stdout = os.Stdout\n\t\t\t\te.Stderr = os.Stderr\n\t\t\t\terr = e.Run()\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.Errorf(\"Unable to start %v\", err)\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\targs := []string{\"start\"}\n\n\t\t\t\t\/\/ if we're running on OSX default to using xhyve\n\t\t\t\tif runtime.GOOS == \"darwin\" {\n\t\t\t\t\targs = append(args, \"--vm-driver=xhyve\")\n\t\t\t\t}\n\n\t\t\t\t\/\/ set memory flag\n\t\t\t\tmemoryValue := cmd.Flags().Lookup(memory).Value.String()\n\t\t\t\targs = append(args, \"--memory=\"+memoryValue)\n\n\t\t\t\t\/\/ set cpu flag\n\t\t\t\tcpusValue := cmd.Flags().Lookup(cpus).Value.String()\n\t\t\t\targs = append(args, \"--cpus=\"+cpusValue)\n\n\t\t\t\t\/\/ start the local VM\n\t\t\t\te := exec.Command(binaryFile, args...)\n\t\t\t\te.Stdout = os.Stdout\n\t\t\t\te.Stderr = os.Stderr\n\t\t\t\terr = e.Run()\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.Errorf(\"Unable to start %v\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif isOpenshift {\n\t\t\t\t\/\/ deploy fabric8\n\t\t\t\te := exec.Command(\"oc\", \"login\", \"--username=\"+minishiftDefaultUsername, \"--password=\"+minishiftDefaultPassword)\n\t\t\t\te.Stdout = os.Stdout\n\t\t\t\te.Stderr = os.Stderr\n\t\t\t\terr = e.Run()\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.Errorf(\"Unable to login %v\", err)\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t\/\/ now check that fabric8 is running, if not deploy it\n\t\t\tc, err := keepTryingToGetClient(f)\n\t\t\tif err != nil {\n\t\t\t\tutil.Fatalf(\"Unable to connect to %s %v\", kubeBinary, err)\n\t\t\t}\n\n\t\t\t\/\/ deploy fabric8 if its not already running\n\t\t\tns, _, _ := f.DefaultNamespace()\n\t\t\t_, err = c.Services(ns).Get(\"fabric8\")\n\t\t\tif err != nil {\n\n\t\t\t\t\/\/ deploy fabric8\n\t\t\t\td := GetDefaultFabric8Deployment()\n\t\t\t\tflag := cmd.Flags().Lookup(console)\n\t\t\t\tif flag != nil && flag.Value.String() == \"true\" {\n\t\t\t\t\td.appToRun = \"\"\n\t\t\t\t}\n\t\t\t\td.pv = true\n\t\t\t\tdeploy(f, d)\n\n\t\t\t} else {\n\t\t\t\topenService(ns, \"fabric8\", c, false)\n\t\t\t}\n\t\t},\n\t}\n\tcmd.PersistentFlags().BoolP(minishift, \"\", false, \"start the openshift flavour of Kubernetes\")\n\tcmd.PersistentFlags().BoolP(console, \"\", false, \"start only the fabric8 console\")\n\tcmd.PersistentFlags().StringP(memory, \"\", \"4096\", \"amount of RAM allocated to the VM\")\n\tcmd.PersistentFlags().StringP(cpus, \"\", \"1\", \"number of CPUs allocated to the VM\")\n\treturn cmd\n}\n\n\/\/ lets find the executable on the PATH or in the fabric8 directory\nfunc resolveBinaryLocation(executable string) string {\n\tpath, err := exec.LookPath(executable)\n\tif err != nil || fileNotExist(path) {\n\t\thome := os.Getenv(\"HOME\")\n\t\tif home == \"\" {\n\t\t\tutil.Error(\"No $HOME environment variable found\")\n\t\t}\n\t\twriteFileLocation = home + binLocation\n\n\t\t\/\/ lets try in the fabric8 folder\n\t\tpath = filepath.Join(writeFileLocation, executable)\n\t\tif fileNotExist(path) {\n\t\t\tpath = executable\n\t\t\t\/\/ lets try in the folder where we found the gofabric8 executable\n\t\t\tfolder, err := osext.ExecutableFolder()\n\t\t\tif err != nil {\n\t\t\t\tpath = filepath.Join(folder, executable)\n\t\t\t\tif fileNotExist(path) {\n\t\t\t\t\tpath = executable\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tutil.Infof(\"using the executable %s\\n\", path)\n\treturn path\n}\n\nfunc findExecutable(file string) error {\n\td, err := os.Stat(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif m := d.Mode(); !m.IsDir() && m&0111 != 0 {\n\t\treturn nil\n\t}\n\treturn os.ErrPermission\n}\n\nfunc fileNotExist(path string) bool {\n\treturn findExecutable(path) != nil\n}\n\nfunc keepTryingToGetClient(f *cmdutil.Factory) (*client.Client, error) {\n\ttimeout := time.After(2 * time.Minute)\n\ttick := time.Tick(1 * time.Second)\n\t\/\/ Keep trying until we're timed out or got a result or got an error\n\tfor {\n\t\tselect {\n\t\t\/\/ Got a timeout! fail with a timeout error\n\t\tcase <-timeout:\n\t\t\treturn nil, errors.New(\"timed out\")\n\t\t\/\/ Got a tick, try and get teh client\n\t\tcase <-tick:\n\t\t\tc, _ := getClient(f)\n\t\t\t\/\/ return if we have a client\n\t\t\tif c != nil {\n\t\t\t\treturn c, nil\n\t\t\t}\n\t\t\tutil.Info(\"Cannot connect to api server, retrying...\\n\")\n\t\t\t\/\/ retry\n\t\t}\n\t}\n}\n\nfunc getClient(f *cmdutil.Factory) (*client.Client, error) {\n\tvar err error\n\tcfg, err := f.ClientConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc, err := client.New(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package pop\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"text\/template\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/markbates\/going\/defaults\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nvar lookupPaths = []string{\"\", \".\/config\", \"\/config\", \"..\/\", \"..\/config\", \"..\/..\", \"..\/..\/config\"}\n\n\/\/ ConfigName is the name of the YAML databases config file\nvar ConfigName = \"database.yml\"\n\nfunc init() {\n\tap := os.Getenv(\"APP_PATH\")\n\tif ap != \"\" {\n\t\tAddLookupPaths(ap)\n\t}\n\tap = os.Getenv(\"POP_PATH\")\n\tif ap != \"\" {\n\t\tAddLookupPaths(ap)\n\t}\n\tLoadConfigFile()\n}\n\n\/\/ LoadConfigFile loads a POP config file from the configured lookup paths\nfunc LoadConfigFile() error {\n\tpath, err := findConfigPath()\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tConnections = map[string]*Connection{}\n\tif Debug {\n\t\tfmt.Printf(\"[POP]: Loading config file from %s\\n\", path)\n\t}\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\treturn LoadFrom(f)\n}\n\n\/\/ LookupPaths returns the current configuration lookup paths\nfunc LookupPaths() []string {\n\treturn lookupPaths\n}\n\n\/\/ AddLookupPaths add paths to the current lookup paths list\nfunc AddLookupPaths(paths ...string) error {\n\tlookupPaths = append(paths, lookupPaths...)\n\treturn LoadConfigFile()\n}\n\nfunc findConfigPath() (string, error) {\n\tfor _, p := range LookupPaths() {\n\t\tpath, _ := filepath.Abs(filepath.Join(p, ConfigName))\n\t\tif _, err := os.Stat(path); err == nil {\n\t\t\treturn path, err\n\t\t}\n\t}\n\treturn \"\", errors.New(\"[POP]: Tried to load configuration file, but couldn't find it\")\n}\n\n\/\/ LoadFrom reads a configuration from the reader and sets up the connections\nfunc LoadFrom(r io.Reader) error {\n\ttmpl := template.New(\"test\")\n\ttmpl.Funcs(map[string]interface{}{\n\t\t\"envOr\": func(s1, s2 string) string {\n\t\t\treturn defaults.String(os.Getenv(s1), s2)\n\t\t},\n\t\t\"env\": func(s1 string) string {\n\t\t\treturn os.Getenv(s1)\n\t\t},\n\t})\n\tb, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tt, err := tmpl.Parse(string(b))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"couldn't parse config template\")\n\t}\n\n\tvar bb bytes.Buffer\n\terr = t.Execute(&bb, nil)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"couldn't execute config template\")\n\t}\n\n\tdeets := map[string]*ConnectionDetails{}\n\terr = yaml.Unmarshal(bb.Bytes(), &deets)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"couldn't unmarshal config to yaml\")\n\t}\n\tfor n, d := range deets {\n\t\tcon, err := NewConnection(d)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tConnections[n] = con\n\t}\n\treturn nil\n}\n<commit_msg>fixed some logging<commit_after>package pop\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"text\/template\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/markbates\/going\/defaults\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nvar lookupPaths = []string{\"\", \".\/config\", \"\/config\", \"..\/\", \"..\/config\", \"..\/..\", \"..\/..\/config\"}\n\n\/\/ ConfigName is the name of the YAML databases config file\nvar ConfigName = \"database.yml\"\n\nfunc init() {\n\tap := os.Getenv(\"APP_PATH\")\n\tif ap != \"\" {\n\t\tAddLookupPaths(ap)\n\t}\n\tap = os.Getenv(\"POP_PATH\")\n\tif ap != \"\" {\n\t\tAddLookupPaths(ap)\n\t}\n\tLoadConfigFile()\n}\n\n\/\/ LoadConfigFile loads a POP config file from the configured lookup paths\nfunc LoadConfigFile() error {\n\tpath, err := findConfigPath()\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tConnections = map[string]*Connection{}\n\tLog(\"Loading config file from %s\\n\", path)\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\treturn LoadFrom(f)\n}\n\n\/\/ LookupPaths returns the current configuration lookup paths\nfunc LookupPaths() []string {\n\treturn lookupPaths\n}\n\n\/\/ AddLookupPaths add paths to the current lookup paths list\nfunc AddLookupPaths(paths ...string) error {\n\tlookupPaths = append(paths, lookupPaths...)\n\treturn LoadConfigFile()\n}\n\nfunc findConfigPath() (string, error) {\n\tfor _, p := range LookupPaths() {\n\t\tpath, _ := filepath.Abs(filepath.Join(p, ConfigName))\n\t\tif _, err := os.Stat(path); err == nil {\n\t\t\treturn path, err\n\t\t}\n\t}\n\treturn \"\", errors.New(\"tried to load pop configuration file, but couldn't find it\")\n}\n\n\/\/ LoadFrom reads a configuration from the reader and sets up the connections\nfunc LoadFrom(r io.Reader) error {\n\ttmpl := template.New(\"test\")\n\ttmpl.Funcs(map[string]interface{}{\n\t\t\"envOr\": func(s1, s2 string) string {\n\t\t\treturn defaults.String(os.Getenv(s1), s2)\n\t\t},\n\t\t\"env\": func(s1 string) string {\n\t\t\treturn os.Getenv(s1)\n\t\t},\n\t})\n\tb, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tt, err := tmpl.Parse(string(b))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"couldn't parse config template\")\n\t}\n\n\tvar bb bytes.Buffer\n\terr = t.Execute(&bb, nil)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"couldn't execute config template\")\n\t}\n\n\tdeets := map[string]*ConnectionDetails{}\n\terr = yaml.Unmarshal(bb.Bytes(), &deets)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"couldn't unmarshal config to yaml\")\n\t}\n\tfor n, d := range deets {\n\t\tcon, err := NewConnection(d)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tConnections[n] = con\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package engine\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/coreos\/fleet\/log\"\n\t\"github.com\/coreos\/fleet\/machine\"\n\t\"github.com\/coreos\/fleet\/pkg\"\n\t\"github.com\/coreos\/fleet\/registry\"\n)\n\nconst (\n\t\/\/ name of lease that must be held by the lead engine in a cluster\n\tengineLeaseName = \"engine-leader\"\n\n\t\/\/ version at which the current engine code operates\n\tengineVersion = 1\n)\n\ntype Engine struct {\n\trec *Reconciler\n\tregistry registry.Registry\n\tcRegistry registry.ClusterRegistry\n\tlRegistry registry.LeaseRegistry\n\trStream pkg.EventStream\n\tmachine machine.Machine\n\n\tlease registry.Lease\n\ttrigger chan struct{}\n}\n\nfunc New(reg *registry.EtcdRegistry, rStream pkg.EventStream, mach machine.Machine) *Engine {\n\trec := NewReconciler()\n\treturn &Engine{\n\t\trec: rec,\n\t\tregistry: reg,\n\t\tcRegistry: reg,\n\t\tlRegistry: reg,\n\t\trStream: rStream,\n\t\tmachine: mach,\n\t\ttrigger: make(chan struct{}),\n\t}\n}\n\nfunc (e *Engine) Run(ival time.Duration, stop chan bool) {\n\tleaseTTL := ival * 5\n\tmachID := e.machine.State().ID\n\n\treconcile := func() {\n\t\tif !ensureEngineVersionMatch(e.cRegistry, engineVersion) {\n\t\t\treturn\n\t\t}\n\n\t\tif e.lease == nil {\n\t\t\te.lease = acquireLeadership(e.lRegistry, machID, engineVersion, leaseTTL)\n\t\t} else {\n\t\t\te.lease = renewLeadership(e.lease, leaseTTL)\n\t\t}\n\n\t\tif e.lease == nil {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ abort is closed when reconciliation must stop prematurely, either\n\t\t\/\/ by a local timeout or the fleet server shutting down\n\t\tabort := make(chan struct{})\n\n\t\t\/\/ monitor is used to shut down the following goroutine\n\t\tmonitor := make(chan struct{})\n\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-monitor:\n\t\t\t\treturn\n\t\t\tcase <-time.After(leaseTTL):\n\t\t\t\tclose(abort)\n\t\t\tcase <-stop:\n\t\t\t\tclose(abort)\n\t\t\t}\n\t\t}()\n\n\t\tstart := time.Now()\n\t\te.rec.Reconcile(e, abort)\n\t\tclose(monitor)\n\t\telapsed := time.Now().Sub(start)\n\n\t\tmsg := fmt.Sprintf(\"Engine completed reconciliation in %s\", elapsed)\n\t\tif elapsed > ival {\n\t\t\tlog.Warning(msg)\n\t\t} else {\n\t\t\tlog.V(1).Info(msg)\n\t\t}\n\t}\n\n\trec := pkg.NewPeriodicReconciler(ival, reconcile, e.rStream)\n\trec.Run(stop)\n}\n\nfunc (e *Engine) Purge() {\n\tif e.lease == nil {\n\t\treturn\n\t}\n\terr := e.lease.Release()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to release lease: %v\", err)\n\t}\n}\n\nfunc ensureEngineVersionMatch(cReg registry.ClusterRegistry, expect int) bool {\n\tv, err := cReg.EngineVersion()\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to determine cluster engine version\")\n\t\treturn false\n\t}\n\n\tif v < expect {\n\t\terr = cReg.UpdateEngineVersion(v, expect)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed updating cluster engine version from %d to %d: %v\", v, expect, err)\n\t\t\treturn false\n\t\t}\n\t\tlog.Infof(\"Updated cluster engine version from %d to %d\", v, expect)\n\t} else if v > expect {\n\t\tlog.V(1).Infof(\"Cluster engine version higher than local engine version (%d > %d), unable to participate\", v, expect)\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc acquireLeadership(lReg registry.LeaseRegistry, machID string, ver int, ttl time.Duration) registry.Lease {\n\texisting, err := lReg.GetLease(engineLeaseName)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to determine current lessee: %v\", err)\n\t\treturn nil\n\t}\n\n\tvar l registry.Lease\n\tif existing == nil {\n\t\tl, err = lReg.AcquireLease(engineLeaseName, machID, ver, ttl)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Engine leadership acquisition failed: %v\", err)\n\t\t\treturn nil\n\t\t} else if l == nil {\n\t\t\tlog.V(1).Infof(\"Unable to acquire engine leadership\")\n\t\t\treturn nil\n\t\t}\n\t\tlog.Infof(\"Engine leadership acquired\")\n\t\treturn l\n\t}\n\n\tif existing.Version() >= ver {\n\t\tlog.V(1).Infof(\"Lease already held by Machine(%s) operating at acceptable version %d\", existing.MachineID(), existing.Version())\n\t\treturn nil\n\t}\n\n\tl, err = lReg.StealLease(engineLeaseName, machID, ver, ttl, existing.Index())\n\tif err != nil {\n\t\tlog.Errorf(\"Engine leadership steal failed: %v\", err)\n\t\treturn nil\n\t} else if l == nil {\n\t\tlog.V(1).Infof(\"Unable to steal engine leadership\")\n\t\treturn nil\n\t}\n\n\tlog.Infof(\"Stole engine leadership from Machine(%s)\", existing.MachineID())\n\n\trem := existing.TimeRemaining()\n\tif rem > 0 {\n\t\tlog.Infof(\"Waiting %v for previous lease to expire before continuing reconciliation\", rem)\n\t\t<-time.After(rem)\n\t}\n\n\treturn l\n}\n\nfunc renewLeadership(l registry.Lease, ttl time.Duration) registry.Lease {\n\terr := l.Renew(ttl)\n\tif err != nil {\n\t\tlog.Errorf(\"Engine leadership lost, renewal failed: %v\", err)\n\t\treturn nil\n\t}\n\n\tlog.V(1).Infof(\"Engine leadership renewed\")\n\treturn l\n}\n\nfunc (e *Engine) Trigger() {\n\te.trigger <- struct{}{}\n}\n\nfunc (e *Engine) clusterState() (*clusterState, error) {\n\tunits, err := e.registry.Units()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed fetching Units from Registry: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tsUnits, err := e.registry.Schedule()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed fetching schedule from Registry: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tmachines, err := e.registry.Machines()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed fetching Machines from Registry: %v\", err)\n\t\treturn nil, err\n\t}\n\n\treturn newClusterState(units, sUnits, machines), nil\n}\n\nfunc (e *Engine) unscheduleUnit(name, machID string) (err error) {\n\terr = e.registry.UnscheduleUnit(name, machID)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed unscheduling Unit(%s) from Machine(%s): %v\", name, machID, err)\n\t} else {\n\t\tlog.Infof(\"Unscheduled Job(%s) from Machine(%s)\", name, machID)\n\t}\n\treturn\n}\n\n\/\/ attemptScheduleUnit tries to persist a scheduling decision in the\n\/\/ Registry, returning true on success. If any communication with the\n\/\/ Registry fails, false is returned.\nfunc (e *Engine) attemptScheduleUnit(name, machID string) bool {\n\terr := e.registry.ScheduleUnit(name, machID)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed scheduling Unit(%s) to Machine(%s): %v\", name, machID, err)\n\t\treturn false\n\t}\n\n\tlog.Infof(\"Scheduled Unit(%s) to Machine(%s)\", name, machID)\n\treturn true\n}\n<commit_msg>engine: set TTL based on obsolete lease<commit_after>package engine\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/coreos\/fleet\/log\"\n\t\"github.com\/coreos\/fleet\/machine\"\n\t\"github.com\/coreos\/fleet\/pkg\"\n\t\"github.com\/coreos\/fleet\/registry\"\n)\n\nconst (\n\t\/\/ name of lease that must be held by the lead engine in a cluster\n\tengineLeaseName = \"engine-leader\"\n\n\t\/\/ version at which the current engine code operates\n\tengineVersion = 1\n)\n\ntype Engine struct {\n\trec *Reconciler\n\tregistry registry.Registry\n\tcRegistry registry.ClusterRegistry\n\tlRegistry registry.LeaseRegistry\n\trStream pkg.EventStream\n\tmachine machine.Machine\n\n\tlease registry.Lease\n\ttrigger chan struct{}\n}\n\nfunc New(reg *registry.EtcdRegistry, rStream pkg.EventStream, mach machine.Machine) *Engine {\n\trec := NewReconciler()\n\treturn &Engine{\n\t\trec: rec,\n\t\tregistry: reg,\n\t\tcRegistry: reg,\n\t\tlRegistry: reg,\n\t\trStream: rStream,\n\t\tmachine: mach,\n\t\ttrigger: make(chan struct{}),\n\t}\n}\n\nfunc (e *Engine) Run(ival time.Duration, stop chan bool) {\n\tleaseTTL := ival * 5\n\tmachID := e.machine.State().ID\n\n\treconcile := func() {\n\t\tif !ensureEngineVersionMatch(e.cRegistry, engineVersion) {\n\t\t\treturn\n\t\t}\n\n\t\tif e.lease == nil {\n\t\t\te.lease = acquireLeadership(e.lRegistry, machID, engineVersion, leaseTTL)\n\t\t} else {\n\t\t\te.lease = renewLeadership(e.lease, leaseTTL)\n\t\t}\n\n\t\tif e.lease == nil {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ abort is closed when reconciliation must stop prematurely, either\n\t\t\/\/ by a local timeout or the fleet server shutting down\n\t\tabort := make(chan struct{})\n\n\t\t\/\/ monitor is used to shut down the following goroutine\n\t\tmonitor := make(chan struct{})\n\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-monitor:\n\t\t\t\treturn\n\t\t\tcase <-time.After(leaseTTL):\n\t\t\t\tclose(abort)\n\t\t\tcase <-stop:\n\t\t\t\tclose(abort)\n\t\t\t}\n\t\t}()\n\n\t\tstart := time.Now()\n\t\te.rec.Reconcile(e, abort)\n\t\tclose(monitor)\n\t\telapsed := time.Now().Sub(start)\n\n\t\tmsg := fmt.Sprintf(\"Engine completed reconciliation in %s\", elapsed)\n\t\tif elapsed > ival {\n\t\t\tlog.Warning(msg)\n\t\t} else {\n\t\t\tlog.V(1).Info(msg)\n\t\t}\n\t}\n\n\trec := pkg.NewPeriodicReconciler(ival, reconcile, e.rStream)\n\trec.Run(stop)\n}\n\nfunc (e *Engine) Purge() {\n\tif e.lease == nil {\n\t\treturn\n\t}\n\terr := e.lease.Release()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to release lease: %v\", err)\n\t}\n}\n\nfunc ensureEngineVersionMatch(cReg registry.ClusterRegistry, expect int) bool {\n\tv, err := cReg.EngineVersion()\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to determine cluster engine version\")\n\t\treturn false\n\t}\n\n\tif v < expect {\n\t\terr = cReg.UpdateEngineVersion(v, expect)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed updating cluster engine version from %d to %d: %v\", v, expect, err)\n\t\t\treturn false\n\t\t}\n\t\tlog.Infof(\"Updated cluster engine version from %d to %d\", v, expect)\n\t} else if v > expect {\n\t\tlog.V(1).Infof(\"Cluster engine version higher than local engine version (%d > %d), unable to participate\", v, expect)\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc acquireLeadership(lReg registry.LeaseRegistry, machID string, ver int, ttl time.Duration) registry.Lease {\n\texisting, err := lReg.GetLease(engineLeaseName)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to determine current lessee: %v\", err)\n\t\treturn nil\n\t}\n\n\tvar l registry.Lease\n\tif existing == nil {\n\t\tl, err = lReg.AcquireLease(engineLeaseName, machID, ver, ttl)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Engine leadership acquisition failed: %v\", err)\n\t\t\treturn nil\n\t\t} else if l == nil {\n\t\t\tlog.V(1).Infof(\"Unable to acquire engine leadership\")\n\t\t\treturn nil\n\t\t}\n\t\tlog.Infof(\"Engine leadership acquired\")\n\t\treturn l\n\t}\n\n\tif existing.Version() >= ver {\n\t\tlog.V(1).Infof(\"Lease already held by Machine(%s) operating at acceptable version %d\", existing.MachineID(), existing.Version())\n\t\treturn nil\n\t}\n\n\trem := existing.TimeRemaining()\n\tl, err = lReg.StealLease(engineLeaseName, machID, ver, ttl+rem, existing.Index())\n\tif err != nil {\n\t\tlog.Errorf(\"Engine leadership steal failed: %v\", err)\n\t\treturn nil\n\t} else if l == nil {\n\t\tlog.V(1).Infof(\"Unable to steal engine leadership\")\n\t\treturn nil\n\t}\n\n\tlog.Infof(\"Stole engine leadership from Machine(%s)\", existing.MachineID())\n\n\tif rem > 0 {\n\t\tlog.Infof(\"Waiting %v for previous lease to expire before continuing reconciliation\", rem)\n\t\t<-time.After(rem)\n\t}\n\n\treturn l\n}\n\nfunc renewLeadership(l registry.Lease, ttl time.Duration) registry.Lease {\n\terr := l.Renew(ttl)\n\tif err != nil {\n\t\tlog.Errorf(\"Engine leadership lost, renewal failed: %v\", err)\n\t\treturn nil\n\t}\n\n\tlog.V(1).Infof(\"Engine leadership renewed\")\n\treturn l\n}\n\nfunc (e *Engine) Trigger() {\n\te.trigger <- struct{}{}\n}\n\nfunc (e *Engine) clusterState() (*clusterState, error) {\n\tunits, err := e.registry.Units()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed fetching Units from Registry: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tsUnits, err := e.registry.Schedule()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed fetching schedule from Registry: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tmachines, err := e.registry.Machines()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed fetching Machines from Registry: %v\", err)\n\t\treturn nil, err\n\t}\n\n\treturn newClusterState(units, sUnits, machines), nil\n}\n\nfunc (e *Engine) unscheduleUnit(name, machID string) (err error) {\n\terr = e.registry.UnscheduleUnit(name, machID)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed unscheduling Unit(%s) from Machine(%s): %v\", name, machID, err)\n\t} else {\n\t\tlog.Infof(\"Unscheduled Job(%s) from Machine(%s)\", name, machID)\n\t}\n\treturn\n}\n\n\/\/ attemptScheduleUnit tries to persist a scheduling decision in the\n\/\/ Registry, returning true on success. If any communication with the\n\/\/ Registry fails, false is returned.\nfunc (e *Engine) attemptScheduleUnit(name, machID string) bool {\n\terr := e.registry.ScheduleUnit(name, machID)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed scheduling Unit(%s) to Machine(%s): %v\", name, machID, err)\n\t\treturn false\n\t}\n\n\tlog.Infof(\"Scheduled Unit(%s) to Machine(%s)\", name, machID)\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package version returns the version of the application\npackage version\n\n\/\/ Version returns the current application version\nconst Version = \"1.1.3\"\n<commit_msg>bump to v1.1.4<commit_after>\/\/ Package version returns the version of the application\npackage version\n\n\/\/ Version returns the current application version\nconst Version = \"1.1.4\"\n<|endoftext|>"} {"text":"<commit_before>package deploy\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tyaml \"gopkg.in\/yaml.v2\"\n\n\t\"github.com\/ViBiOh\/auth\/pkg\/auth\"\n\t\"github.com\/ViBiOh\/auth\/pkg\/model\"\n\t\"github.com\/ViBiOh\/dashboard\/pkg\/commons\"\n\t\"github.com\/ViBiOh\/dashboard\/pkg\/docker\"\n\t\"github.com\/ViBiOh\/httputils\/pkg\/httperror\"\n\t\"github.com\/ViBiOh\/httputils\/pkg\/httpjson\"\n\t\"github.com\/ViBiOh\/httputils\/pkg\/request\"\n\t\"github.com\/ViBiOh\/httputils\/pkg\/rollbar\"\n\t\"github.com\/ViBiOh\/httputils\/pkg\/tools\"\n\t\"github.com\/ViBiOh\/mailer\/pkg\/client\"\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/docker\/api\/types\/filters\"\n\topentracing \"github.com\/opentracing\/opentracing-go\"\n)\n\nconst (\n\t\/\/ DeployTimeout indicates delay for application to deploy before rollback\n\tDeployTimeout = 3 * time.Minute\n\n\tdefaultCPUShares = 128\n\tminMemory = 16777216\n\tmaxMemory = 805306368\n\tcolonSeparator = `:`\n\tdeploySuffix = `_deploy`\n)\n\n\/\/ App stores informations\ntype App struct {\n\ttasks sync.Map\n\tdockerApp *docker.App\n\tauthApp *auth.App\n\tnetwork string\n\ttag string\n\tcontainerUser string\n\tappURL string\n\tnotification string\n\tmailerApp *client.App\n}\n\n\/\/ NewApp creates new App from Flags' config\nfunc NewApp(config map[string]*string, authApp *auth.App, dockerApp *docker.App, mailerApp *client.App) *App {\n\treturn &App{\n\t\ttasks: sync.Map{},\n\t\tdockerApp: dockerApp,\n\t\tauthApp: authApp,\n\t\tmailerApp: mailerApp,\n\t\tnetwork: strings.TrimSpace(*config[`network`]),\n\t\ttag: strings.TrimSpace(*config[`tag`]),\n\t\tcontainerUser: strings.TrimSpace(*config[`containerUser`]),\n\t\tappURL: strings.TrimSpace(*config[`appURL`]),\n\t\tnotification: strings.TrimSpace(*config[`notification`]),\n\t}\n}\n\n\/\/ Flags adds flags for given prefix\nfunc Flags(prefix string) map[string]*string {\n\treturn map[string]*string{\n\t\t`network`: flag.String(tools.ToCamel(fmt.Sprintf(`%sNetwork`, prefix)), `traefik`, `[deploy] Default Network`),\n\t\t`tag`: flag.String(tools.ToCamel(fmt.Sprintf(`%sTag`, prefix)), `latest`, `[deploy] Default image tag)`),\n\t\t`containerUser`: flag.String(tools.ToCamel(fmt.Sprintf(`%sContainerUser`, prefix)), `1000`, `[deploy] Default container user`),\n\t\t`appURL`: flag.String(tools.ToCamel(fmt.Sprintf(`%sAppURL`, prefix)), `https:\/\/dashboard.vibioh.fr`, `[deploy] Application web URL`),\n\t\t`notification`: flag.String(tools.ToCamel(fmt.Sprintf(`%sNotification`, prefix)), `onError`, `[deploy] Send email notification when deploy ends (possibles values ares \"never\", \"onError\", \"all\")`),\n\t}\n}\n\n\/\/ CanBeGracefullyClosed indicates if application can terminate safely\nfunc (a *App) CanBeGracefullyClosed() (canBe bool) {\n\tcanBe = true\n\n\ta.tasks.Range(func(_ interface{}, value interface{}) bool {\n\t\tcanBe = !value.(bool)\n\t\treturn canBe\n\t})\n\n\treturn\n}\n\nfunc (a *App) pullImage(ctx context.Context, image string) error {\n\tif !strings.Contains(image, colonSeparator) {\n\t\timage = fmt.Sprintf(`%s%slatest`, image, colonSeparator)\n\t}\n\n\tpull, err := a.dockerApp.Docker.ImagePull(ctx, image, types.ImagePullOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(`Error while pulling image: %v`, err)\n\t}\n\n\t_, err = request.ReadBody(pull)\n\treturn err\n}\n\nfunc (a *App) cleanContainers(ctx context.Context, containers []types.Container) error {\n\tfor _, container := range containers {\n\t\tif _, err := a.dockerApp.GracefulStopContainer(ctx, container.ID, time.Minute); err != nil {\n\t\t\trollbar.LogError(`Error while stopping container %s: %v`, container.Names, err)\n\t\t}\n\t}\n\n\tfor _, container := range containers {\n\t\tif _, err := a.dockerApp.RmContainer(ctx, container.ID, nil, false); err != nil {\n\t\t\treturn fmt.Errorf(`Error while deleting container %s: %v`, container.Names, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (a *App) renameDeployedContainers(ctx context.Context, services map[string]*deployedService) error {\n\tfor _, service := range services {\n\t\tif err := a.dockerApp.Docker.ContainerRename(ctx, service.ContainerID, getFinalName(service.FullName)); err != nil {\n\t\t\treturn fmt.Errorf(`Error while renaming container %s: %v`, service.Name, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (a *App) deleteServices(ctx context.Context, appName string, services map[string]*deployedService, user *model.User) {\n\tfor _, service := range services {\n\t\tinfos, err := a.dockerApp.InspectContainer(ctx, service.ContainerID)\n\t\tif err != nil {\n\t\t\trollbar.LogError(`[%s] [%s] Error while inspecting service %s: %v`, user.Username, appName, service.Name, err)\n\t\t} else {\n\t\t\tif _, err := a.dockerApp.StopContainer(ctx, service.ContainerID, infos); err != nil {\n\t\t\t\trollbar.LogError(`[%s] [%s] Error while stopping service %s: %v`, user.Username, appName, service.Name, err)\n\t\t\t}\n\n\t\t\tif _, err := a.dockerApp.RmContainer(ctx, service.ContainerID, infos, true); err != nil {\n\t\t\t\trollbar.LogError(`[%s] [%s] Error while deleting service %s: %v`, user.Username, appName, service.Name, err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (a *App) startServices(ctx context.Context, services map[string]*deployedService) error {\n\tfor _, service := range services {\n\t\tif _, err := a.dockerApp.StartContainer(ctx, service.ContainerID, nil); err != nil {\n\t\t\treturn fmt.Errorf(`Error while starting service %s: %v`, service.Name, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (a *App) inspectServices(ctx context.Context, services map[string]*deployedService, user *model.User, appName string) []*types.ContainerJSON {\n\tcontainers := make([]*types.ContainerJSON, 0, len(services))\n\n\tfor _, service := range services {\n\t\tinfos, err := a.dockerApp.InspectContainer(ctx, service.ContainerID)\n\t\tif err != nil {\n\t\t\trollbar.LogError(`[%s] [%s] Error while inspecting container %s: %v`, user.Username, appName, service.Name, err)\n\t\t} else {\n\t\t\tcontainers = append(containers, infos)\n\t\t}\n\t}\n\n\treturn containers\n}\n\nfunc (a *App) areContainersHealthy(ctx context.Context, user *model.User, appName string, services map[string]*deployedService) bool {\n\tcontainersServices := a.inspectServices(ctx, services, user, appName)\n\tcontainersIdsWithHealthcheck := commons.GetContainersIDs(commons.FilterContainers(containersServices, hasHealthcheck))\n\n\tif len(containersIdsWithHealthcheck) == 0 {\n\t\treturn true\n\t}\n\n\tfor _, id := range containersIdsWithHealthcheck {\n\t\tif service := findServiceByContainerID(services, id); service != nil {\n\t\t\tservice.State = `unhealthy`\n\t\t}\n\t}\n\n\tfiltersArgs := filters.NewArgs()\n\thealthyStatusFilters(&filtersArgs, containersIdsWithHealthcheck)\n\n\ttimeoutCtx, cancel := context.WithTimeout(ctx, DeployTimeout)\n\tdefer cancel()\n\n\tmessages, errors := a.dockerApp.Docker.Events(timeoutCtx, types.EventsOptions{Filters: filtersArgs})\n\thealthyContainers := make(map[string]bool, len(containersIdsWithHealthcheck))\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn false\n\t\tcase message := <-messages:\n\t\t\tif service := findServiceByContainerID(services, message.ID); service != nil {\n\t\t\t\tservice.State = `healthy`\n\t\t\t}\n\n\t\t\thealthyContainers[message.ID] = true\n\t\t\tif len(healthyContainers) == len(containersIdsWithHealthcheck) {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase err := <-errors:\n\t\t\trollbar.LogError(`[%s] [%s] Error while reading healthy events: %v`, user.Username, appName, err)\n\t\t\treturn false\n\t\t}\n\t}\n}\n\nfunc (a *App) finishDeploy(ctx context.Context, cancel context.CancelFunc, user *model.User, appName string, services map[string]*deployedService, oldContainers []types.Container, requestParams url.Values) {\n\tspan := opentracing.SpanFromContext(ctx)\n\tspan.SetTag(`app`, appName)\n\tspan.SetTag(`services_count`, len(services))\n\tdefer func() {\n\t\tdefer a.tasks.Delete(appName)\n\t\tdefer span.Finish()\n\t\tdefer cancel()\n\t}()\n\n\tsuccess := a.areContainersHealthy(ctx, user, appName, services)\n\ta.captureServicesOutput(ctx, user, appName, services)\n\n\tif success {\n\t\tif err := a.cleanContainers(ctx, oldContainers); err != nil {\n\t\t\trollbar.LogError(`[%s] [%s] Error while cleaning old containers: %v`, user.Username, appName, err)\n\t\t}\n\n\t\tif err := a.renameDeployedContainers(ctx, services); err != nil {\n\t\t\trollbar.LogError(`[%s] [%s] Error while renaming deployed containers: %v`, user.Username, appName, err)\n\t\t}\n\t} else {\n\t\trollbar.LogWarning(`[%s] [%s] Failed to deploy: %v`, user.Username, appName, errHealthCheckFailed)\n\t\ta.deleteServices(ctx, appName, services, user)\n\t}\n\n\tif !success {\n\t\tfor _, service := range services {\n\t\t\tlog.Printf(\"[%s] [%s] Logs output for %s: \\n%s\\n\", user.Username, appName, service.Name, strings.Join(service.Logs, \"\\n\"))\n\t\t\tlog.Printf(\"[%s] [%s] Health output for %s: \\n%s\\n\", user.Username, appName, service.Name, strings.Join(service.HealthLogs, \"\\n\"))\n\t\t}\n\t}\n\n\tif err := a.sendEmailNotification(ctx, user, appName, services, success); err != nil {\n\t\trollbar.LogError(`[%s] [%s] Error while sending email notification: %s`, user.Username, appName, err)\n\t}\n\n\tif err := a.sendRollbarNotification(ctx, user, requestParams); err != nil {\n\t\trollbar.LogError(`[%s] [%s] Error while sending rollbar notification: %s`, user.Username, appName, err)\n\t}\n}\n\nfunc (a *App) createContainer(ctx context.Context, user *model.User, appName string, serviceName string, service *dockerComposeService) (*deployedService, error) {\n\timagePulled := false\n\n\tif a.tag != `` {\n\t\timageOverride := fmt.Sprintf(`%s%s%s`, service.Image, colonSeparator, a.tag)\n\t\tif err := a.pullImage(ctx, imageOverride); err == nil {\n\t\t\tservice.Image = imageOverride\n\t\t\timagePulled = true\n\t\t}\n\t}\n\n\tif !imagePulled {\n\t\tif err := a.pullImage(ctx, service.Image); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tserviceFullName := getServiceFullName(appName, serviceName)\n\n\tconfig, err := a.getConfig(service, user, appName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(`Error while getting config: %v`, err)\n\t}\n\n\tcreatedContainer, err := a.dockerApp.Docker.ContainerCreate(ctx, config, a.getHostConfig(service, user), a.getNetworkConfig(serviceName, service), serviceFullName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(`Error while creating service %s: %v`, serviceName, err)\n\t}\n\n\treturn &deployedService{\n\t\tName: serviceName,\n\t\tFullName: serviceFullName,\n\t\tContainerID: createdContainer.ID,\n\t\tImageName: service.Image,\n\t}, nil\n}\n\nfunc (a *App) parseCompose(ctx context.Context, user *model.User, appName string, composeFile []byte) (map[string]*deployedService, error) {\n\tcomposeFile = bytes.Replace(composeFile, []byte(`$$`), []byte(`$`), -1)\n\n\tcompose := dockerCompose{}\n\tif err := yaml.Unmarshal(composeFile, &compose); err != nil {\n\t\treturn nil, fmt.Errorf(`[%s] [%s] Error while unmarshalling compose file: %v`, user.Username, appName, err)\n\t}\n\n\tnewServices := make(map[string]*deployedService)\n\tfor serviceName, service := range compose.Services {\n\t\tif deployedService, err := a.createContainer(ctx, user, appName, serviceName, &service); err != nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\tnewServices[serviceName] = deployedService\n\t\t}\n\t}\n\n\treturn newServices, nil\n}\n\nfunc composeFailed(w http.ResponseWriter, user *model.User, appName string, err error) {\n\thttperror.InternalServerError(w, fmt.Errorf(`[%s] [%s] Failed to deploy: %v`, user.Username, appName, err))\n}\n\nfunc (a *App) composeHandler(w http.ResponseWriter, r *http.Request, user *model.User) {\n\tif r.Method != http.MethodPost {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tappName, composeFile, err := checkParams(r, user)\n\tif err != nil {\n\t\thttperror.BadRequest(w, err)\n\t\treturn\n\t}\n\n\tctx := r.Context()\n\n\toldContainers, err := a.checkRights(ctx, user, appName)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusForbidden)\n\t\treturn\n\t}\n\n\tnewServices, err := a.parseCompose(ctx, user, appName, composeFile)\n\tif err != nil {\n\t\tcomposeFailed(w, user, appName, err)\n\t\treturn\n\t}\n\n\tif err = a.checkTasks(user, appName); err != nil {\n\t\tcomposeFailed(w, user, appName, err)\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tparentSpanContext := opentracing.SpanFromContext(r.Context()).Context()\n\t_, ctx = opentracing.StartSpanFromContext(ctx, `Deploy`, opentracing.FollowsFrom(parentSpanContext))\n\n\tgo a.finishDeploy(ctx, cancel, user, appName, newServices, oldContainers, r.URL.Query())\n\n\tif err == nil {\n\t\terr = a.startServices(ctx, newServices)\n\t}\n\n\tif err != nil {\n\t\tcancel()\n\t\tcomposeFailed(w, user, appName, err)\n\t\treturn\n\t}\n\n\tif err := httpjson.ResponseArrayJSON(w, http.StatusOK, newServices, httpjson.IsPretty(r.URL.RawQuery)); err != nil {\n\t\thttperror.InternalServerError(w, err)\n\t}\n}\n\n\/\/ Handler for request. Should be use with net\/http\nfunc (a *App) Handler() http.Handler {\n\treturn a.authApp.Handler(a.composeHandler)\n}\n<commit_msg>Moving finishDeploy after start of services for avoiding canceled context<commit_after>package deploy\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tyaml \"gopkg.in\/yaml.v2\"\n\n\t\"github.com\/ViBiOh\/auth\/pkg\/auth\"\n\t\"github.com\/ViBiOh\/auth\/pkg\/model\"\n\t\"github.com\/ViBiOh\/dashboard\/pkg\/commons\"\n\t\"github.com\/ViBiOh\/dashboard\/pkg\/docker\"\n\t\"github.com\/ViBiOh\/httputils\/pkg\/httperror\"\n\t\"github.com\/ViBiOh\/httputils\/pkg\/httpjson\"\n\t\"github.com\/ViBiOh\/httputils\/pkg\/request\"\n\t\"github.com\/ViBiOh\/httputils\/pkg\/rollbar\"\n\t\"github.com\/ViBiOh\/httputils\/pkg\/tools\"\n\t\"github.com\/ViBiOh\/mailer\/pkg\/client\"\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/docker\/api\/types\/filters\"\n\topentracing \"github.com\/opentracing\/opentracing-go\"\n)\n\nconst (\n\t\/\/ DeployTimeout indicates delay for application to deploy before rollback\n\tDeployTimeout = 3 * time.Minute\n\n\tdefaultCPUShares = 128\n\tminMemory = 16777216\n\tmaxMemory = 805306368\n\tcolonSeparator = `:`\n\tdeploySuffix = `_deploy`\n)\n\n\/\/ App stores informations\ntype App struct {\n\ttasks sync.Map\n\tdockerApp *docker.App\n\tauthApp *auth.App\n\tnetwork string\n\ttag string\n\tcontainerUser string\n\tappURL string\n\tnotification string\n\tmailerApp *client.App\n}\n\n\/\/ NewApp creates new App from Flags' config\nfunc NewApp(config map[string]*string, authApp *auth.App, dockerApp *docker.App, mailerApp *client.App) *App {\n\treturn &App{\n\t\ttasks: sync.Map{},\n\t\tdockerApp: dockerApp,\n\t\tauthApp: authApp,\n\t\tmailerApp: mailerApp,\n\t\tnetwork: strings.TrimSpace(*config[`network`]),\n\t\ttag: strings.TrimSpace(*config[`tag`]),\n\t\tcontainerUser: strings.TrimSpace(*config[`containerUser`]),\n\t\tappURL: strings.TrimSpace(*config[`appURL`]),\n\t\tnotification: strings.TrimSpace(*config[`notification`]),\n\t}\n}\n\n\/\/ Flags adds flags for given prefix\nfunc Flags(prefix string) map[string]*string {\n\treturn map[string]*string{\n\t\t`network`: flag.String(tools.ToCamel(fmt.Sprintf(`%sNetwork`, prefix)), `traefik`, `[deploy] Default Network`),\n\t\t`tag`: flag.String(tools.ToCamel(fmt.Sprintf(`%sTag`, prefix)), `latest`, `[deploy] Default image tag)`),\n\t\t`containerUser`: flag.String(tools.ToCamel(fmt.Sprintf(`%sContainerUser`, prefix)), `1000`, `[deploy] Default container user`),\n\t\t`appURL`: flag.String(tools.ToCamel(fmt.Sprintf(`%sAppURL`, prefix)), `https:\/\/dashboard.vibioh.fr`, `[deploy] Application web URL`),\n\t\t`notification`: flag.String(tools.ToCamel(fmt.Sprintf(`%sNotification`, prefix)), `onError`, `[deploy] Send email notification when deploy ends (possibles values ares \"never\", \"onError\", \"all\")`),\n\t}\n}\n\n\/\/ CanBeGracefullyClosed indicates if application can terminate safely\nfunc (a *App) CanBeGracefullyClosed() (canBe bool) {\n\tcanBe = true\n\n\ta.tasks.Range(func(_ interface{}, value interface{}) bool {\n\t\tcanBe = !value.(bool)\n\t\treturn canBe\n\t})\n\n\treturn\n}\n\nfunc (a *App) pullImage(ctx context.Context, image string) error {\n\tif !strings.Contains(image, colonSeparator) {\n\t\timage = fmt.Sprintf(`%s%slatest`, image, colonSeparator)\n\t}\n\n\tpull, err := a.dockerApp.Docker.ImagePull(ctx, image, types.ImagePullOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(`Error while pulling image: %v`, err)\n\t}\n\n\t_, err = request.ReadBody(pull)\n\treturn err\n}\n\nfunc (a *App) cleanContainers(ctx context.Context, containers []types.Container) error {\n\tfor _, container := range containers {\n\t\tif _, err := a.dockerApp.GracefulStopContainer(ctx, container.ID, time.Minute); err != nil {\n\t\t\trollbar.LogError(`Error while stopping container %s: %v`, container.Names, err)\n\t\t}\n\t}\n\n\tfor _, container := range containers {\n\t\tif _, err := a.dockerApp.RmContainer(ctx, container.ID, nil, false); err != nil {\n\t\t\treturn fmt.Errorf(`Error while deleting container %s: %v`, container.Names, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (a *App) renameDeployedContainers(ctx context.Context, services map[string]*deployedService) error {\n\tfor _, service := range services {\n\t\tif err := a.dockerApp.Docker.ContainerRename(ctx, service.ContainerID, getFinalName(service.FullName)); err != nil {\n\t\t\treturn fmt.Errorf(`Error while renaming container %s: %v`, service.Name, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (a *App) deleteServices(ctx context.Context, appName string, services map[string]*deployedService, user *model.User) {\n\tfor _, service := range services {\n\t\tinfos, err := a.dockerApp.InspectContainer(ctx, service.ContainerID)\n\t\tif err != nil {\n\t\t\trollbar.LogError(`[%s] [%s] Error while inspecting service %s: %v`, user.Username, appName, service.Name, err)\n\t\t} else {\n\t\t\tif _, err := a.dockerApp.StopContainer(ctx, service.ContainerID, infos); err != nil {\n\t\t\t\trollbar.LogError(`[%s] [%s] Error while stopping service %s: %v`, user.Username, appName, service.Name, err)\n\t\t\t}\n\n\t\t\tif _, err := a.dockerApp.RmContainer(ctx, service.ContainerID, infos, true); err != nil {\n\t\t\t\trollbar.LogError(`[%s] [%s] Error while deleting service %s: %v`, user.Username, appName, service.Name, err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (a *App) startServices(ctx context.Context, services map[string]*deployedService) error {\n\tfor _, service := range services {\n\t\tif _, err := a.dockerApp.StartContainer(ctx, service.ContainerID, nil); err != nil {\n\t\t\treturn fmt.Errorf(`Error while starting service %s: %v`, service.Name, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (a *App) inspectServices(ctx context.Context, services map[string]*deployedService, user *model.User, appName string) []*types.ContainerJSON {\n\tcontainers := make([]*types.ContainerJSON, 0, len(services))\n\n\tfor _, service := range services {\n\t\tinfos, err := a.dockerApp.InspectContainer(ctx, service.ContainerID)\n\t\tif err != nil {\n\t\t\trollbar.LogError(`[%s] [%s] Error while inspecting container %s: %v`, user.Username, appName, service.Name, err)\n\t\t} else {\n\t\t\tcontainers = append(containers, infos)\n\t\t}\n\t}\n\n\treturn containers\n}\n\nfunc (a *App) areContainersHealthy(ctx context.Context, user *model.User, appName string, services map[string]*deployedService) bool {\n\tcontainersServices := a.inspectServices(ctx, services, user, appName)\n\tcontainersIdsWithHealthcheck := commons.GetContainersIDs(commons.FilterContainers(containersServices, hasHealthcheck))\n\n\tif len(containersIdsWithHealthcheck) == 0 {\n\t\treturn true\n\t}\n\n\tfor _, id := range containersIdsWithHealthcheck {\n\t\tif service := findServiceByContainerID(services, id); service != nil {\n\t\t\tservice.State = `unhealthy`\n\t\t}\n\t}\n\n\tfiltersArgs := filters.NewArgs()\n\thealthyStatusFilters(&filtersArgs, containersIdsWithHealthcheck)\n\n\ttimeoutCtx, cancel := context.WithTimeout(ctx, DeployTimeout)\n\tdefer cancel()\n\n\tmessages, errors := a.dockerApp.Docker.Events(timeoutCtx, types.EventsOptions{Filters: filtersArgs})\n\thealthyContainers := make(map[string]bool, len(containersIdsWithHealthcheck))\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn false\n\t\tcase message := <-messages:\n\t\t\tif service := findServiceByContainerID(services, message.ID); service != nil {\n\t\t\t\tservice.State = `healthy`\n\t\t\t}\n\n\t\t\thealthyContainers[message.ID] = true\n\t\t\tif len(healthyContainers) == len(containersIdsWithHealthcheck) {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase err := <-errors:\n\t\t\trollbar.LogError(`[%s] [%s] Error while reading healthy events: %v`, user.Username, appName, err)\n\t\t\treturn false\n\t\t}\n\t}\n}\n\nfunc (a *App) finishDeploy(ctx context.Context, user *model.User, appName string, services map[string]*deployedService, oldContainers []types.Container, requestParams url.Values) {\n\tspan := opentracing.SpanFromContext(ctx)\n\tspan.SetTag(`app`, appName)\n\tspan.SetTag(`services_count`, len(services))\n\tdefer func() {\n\t\tdefer a.tasks.Delete(appName)\n\t\tdefer span.Finish()\n\t}()\n\n\tsuccess := a.areContainersHealthy(ctx, user, appName, services)\n\ta.captureServicesOutput(ctx, user, appName, services)\n\n\tif success {\n\t\tif err := a.cleanContainers(ctx, oldContainers); err != nil {\n\t\t\trollbar.LogError(`[%s] [%s] Error while cleaning old containers: %v`, user.Username, appName, err)\n\t\t}\n\n\t\tif err := a.renameDeployedContainers(ctx, services); err != nil {\n\t\t\trollbar.LogError(`[%s] [%s] Error while renaming deployed containers: %v`, user.Username, appName, err)\n\t\t}\n\t} else {\n\t\trollbar.LogWarning(`[%s] [%s] Failed to deploy: %v`, user.Username, appName, errHealthCheckFailed)\n\t\ta.deleteServices(ctx, appName, services, user)\n\t}\n\n\tif !success {\n\t\tfor _, service := range services {\n\t\t\tlog.Printf(\"[%s] [%s] Logs output for %s: \\n%s\\n\", user.Username, appName, service.Name, strings.Join(service.Logs, \"\\n\"))\n\t\t\tlog.Printf(\"[%s] [%s] Health output for %s: \\n%s\\n\", user.Username, appName, service.Name, strings.Join(service.HealthLogs, \"\\n\"))\n\t\t}\n\t}\n\n\tif err := a.sendEmailNotification(ctx, user, appName, services, success); err != nil {\n\t\trollbar.LogError(`[%s] [%s] Error while sending email notification: %s`, user.Username, appName, err)\n\t}\n\n\tif err := a.sendRollbarNotification(ctx, user, requestParams); err != nil {\n\t\trollbar.LogError(`[%s] [%s] Error while sending rollbar notification: %s`, user.Username, appName, err)\n\t}\n}\n\nfunc (a *App) createContainer(ctx context.Context, user *model.User, appName string, serviceName string, service *dockerComposeService) (*deployedService, error) {\n\timagePulled := false\n\n\tif a.tag != `` {\n\t\timageOverride := fmt.Sprintf(`%s%s%s`, service.Image, colonSeparator, a.tag)\n\t\tif err := a.pullImage(ctx, imageOverride); err == nil {\n\t\t\tservice.Image = imageOverride\n\t\t\timagePulled = true\n\t\t}\n\t}\n\n\tif !imagePulled {\n\t\tif err := a.pullImage(ctx, service.Image); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tserviceFullName := getServiceFullName(appName, serviceName)\n\n\tconfig, err := a.getConfig(service, user, appName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(`Error while getting config: %v`, err)\n\t}\n\n\tcreatedContainer, err := a.dockerApp.Docker.ContainerCreate(ctx, config, a.getHostConfig(service, user), a.getNetworkConfig(serviceName, service), serviceFullName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(`Error while creating service %s: %v`, serviceName, err)\n\t}\n\n\treturn &deployedService{\n\t\tName: serviceName,\n\t\tFullName: serviceFullName,\n\t\tContainerID: createdContainer.ID,\n\t\tImageName: service.Image,\n\t}, nil\n}\n\nfunc (a *App) parseCompose(ctx context.Context, user *model.User, appName string, composeFile []byte) (map[string]*deployedService, error) {\n\tcomposeFile = bytes.Replace(composeFile, []byte(`$$`), []byte(`$`), -1)\n\n\tcompose := dockerCompose{}\n\tif err := yaml.Unmarshal(composeFile, &compose); err != nil {\n\t\treturn nil, fmt.Errorf(`[%s] [%s] Error while unmarshalling compose file: %v`, user.Username, appName, err)\n\t}\n\n\tnewServices := make(map[string]*deployedService)\n\tfor serviceName, service := range compose.Services {\n\t\tif deployedService, err := a.createContainer(ctx, user, appName, serviceName, &service); err != nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\tnewServices[serviceName] = deployedService\n\t\t}\n\t}\n\n\treturn newServices, nil\n}\n\nfunc composeFailed(w http.ResponseWriter, user *model.User, appName string, err error) {\n\thttperror.InternalServerError(w, fmt.Errorf(`[%s] [%s] Failed to deploy: %v`, user.Username, appName, err))\n}\n\nfunc (a *App) composeHandler(w http.ResponseWriter, r *http.Request, user *model.User) {\n\tif r.Method != http.MethodPost {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tappName, composeFile, err := checkParams(r, user)\n\tif err != nil {\n\t\thttperror.BadRequest(w, err)\n\t\treturn\n\t}\n\n\tctx := r.Context()\n\n\toldContainers, err := a.checkRights(ctx, user, appName)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusForbidden)\n\t\treturn\n\t}\n\n\tnewServices, err := a.parseCompose(ctx, user, appName, composeFile)\n\tif err != nil {\n\t\tcomposeFailed(w, user, appName, err)\n\t\treturn\n\t}\n\n\tif err = a.checkTasks(user, appName); err != nil {\n\t\tcomposeFailed(w, user, appName, err)\n\t\treturn\n\t}\n\n\tif err == nil {\n\t\terr = a.startServices(ctx, newServices)\n\t}\n\n\tif err != nil {\n\t\tcomposeFailed(w, user, appName, err)\n\t\treturn\n\t}\n\n\tctx = context.Background()\n\tparentSpanContext := opentracing.SpanFromContext(r.Context()).Context()\n\t_, ctx = opentracing.StartSpanFromContext(ctx, `Deploy`, opentracing.FollowsFrom(parentSpanContext))\n\n\tgo a.finishDeploy(ctx, user, appName, newServices, oldContainers, r.URL.Query())\n\n\tif err := httpjson.ResponseArrayJSON(w, http.StatusOK, newServices, httpjson.IsPretty(r.URL.RawQuery)); err != nil {\n\t\thttperror.InternalServerError(w, err)\n\t}\n}\n\n\/\/ Handler for request. Should be use with net\/http\nfunc (a *App) Handler() http.Handler {\n\treturn a.authApp.Handler(a.composeHandler)\n}\n<|endoftext|>"} {"text":"<commit_before>package lazycache\n\nimport (\n\t\"fmt\"\n\tflag \"github.com\/spf13\/pflag\"\n\t\"github.com\/spf13\/viper\"\n\t\"strings\"\n)\n\nfunc ViperConfiguration() {\n\n\t\/\/ Configuration\n\tviper.SetDefault(\"port\", 8080)\n\tviper.SetDefault(\"bind\", \"0.0.0.0\")\n\tviper.SetDefault(\"imagestore\", \"\")\n\tviper.SetDefault(\"imagestore.bucket\", \"camhd-image-cache\")\n\n\tviper.SetDefault(\"quicktimestore\", \"\")\n\tviper.SetDefault(\"directorystore\", \"\")\n\tviper.SetDefault(\"redishost\", \"localhost:6379\")\n\n\tviper.SetConfigName(\"lazycache\")\n\tviper.AddConfigPath(\"\/etc\/lazycache\")\n\tviper.AddConfigPath(\".\")\n\terr := viper.ReadInConfig() \/\/ Find and read the config file\n\tif err != nil { \/\/ Handle errors reading the config file\n\t\tswitch err.(type) {\n\t\tcase viper.ConfigFileNotFoundError:\n\t\t\t\/\/ ignore\n\t\tdefault:\n\t\t\tpanic(fmt.Errorf(\"Fatal error config file: %s \\n\", err))\n\t\t}\n\t}\n\n\tviper.SetEnvPrefix(\"lazycache\")\n\tviper.AutomaticEnv()\n\t\/\/ Convert '.' to '_' in configuration variable names\n\tviper.SetEnvKeyReplacer(strings.NewReplacer(\".\", \"_\"))\n\n\t\/\/ var (\n\t\/\/ \tbindFlag = flag.String(\"bind\", \"0.0.0.0\", \"Network interface to bind to (defaults to 0.0.0.0)\")\n\t\/\/ \tImageStoreFlag = flag.String(\"image-store\", \"\", \"Type of image store (none, google)\")\n\t\/\/ \tImageBucketFlag = flag.String(\"image-store-bucket\", \"\", \"Bucket used for Google image store\")\n\t\/\/ )\n\tflag.Int(\"port\", 80, \"Network port to listen on (default: 8080)\")\n\tflag.String(\"bind\", \"0.0.0.0\", \"Network interface to bind to (defaults to 0.0.0.0)\")\n\n\tflag.String(\"image-store\", \"\", \"Type of image store (none, local, google)\")\n\tflag.String(\"image-store-bucket\", \"camhd-image-cache\", \"Bucket used for Google image store\")\n\tflag.String(\"image-local-root\", \"\", \"Bucket used for Google image store\")\n\tflag.String(\"image-url-root\", \"\", \"Bucket used for Google image store\")\n\n\tflag.String(\"quicktime-store\", \"\", \"Type of quicktime store (none, redis)\")\n\tflag.String(\"directory-store\", \"\", \"Type of directory store (none, redis)\")\n\tflag.String(\"redis-host\", \"localhost:6379\", \"Host used for redis store\")\n\n\tviper.BindPFlag(\"port\", flag.Lookup(\"port\"))\n\tviper.BindPFlag(\"bind\", flag.Lookup(\"bind\"))\n\n\tviper.BindPFlag(\"imagestore\", flag.Lookup(\"image-store\"))\n\tviper.BindPFlag(\"imagestore.bucket\", flag.Lookup(\"image-store-bucket\"))\n\tviper.BindPFlag(\"imagestore.localroot\", flag.Lookup(\"image-local-root\"))\n\n\tviper.BindPFlag(\"directorystore\", flag.Lookup(\"directory-store\"))\n\tviper.BindPFlag(\"quicktimestore\", flag.Lookup(\"quicktime-store\"))\n\tviper.BindPFlag(\"redishost\", flag.Lookup(\"redis-host\"))\n\n\tflag.Parse()\n}\n\nfunc ConfigureImageStoreFromViper() {\n\tstoreKey := viper.GetString(\"imagestore\")\n\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Configuring image store with type \\\"%s\\\"\", storeKey))\n\tswitch strings.ToLower(storeKey) {\n\tdefault:\n\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Unable to determine type of image store from \\\"%s\\\"\", storeKey))\n\t\tDefaultImageStore = NullImageStore{}\n\tcase \"\", \"none\":\n\t\tDefaultLogger.Log(\"msg\", \"No image store configured.\")\n\t\tDefaultImageStore = NullImageStore{}\n\tcase \"local\":\n\t\tDefaultImageStore = CreateLocalStore(viper.GetString(\"imagestore.localRoot\"),\n\t\t\tviper.GetString(\"imagestore.bind\"))\n\tcase \"google\":\n\t\tDefaultImageStore = CreateGoogleStore(viper.GetString(\"imagestore.bucket\"))\n\t}\n}\n\nfunc ConfigureQuicktimeStoreFromViper() {\n\tstoreKey := viper.GetString(\"quicktimestore\")\n\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Configuring quicktime store with type \\\"%s\\\"\", storeKey))\n\n\tswitch strings.ToLower(storeKey) {\n\tdefault:\n\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Unable to determine type of image store from \\\"%s\\\"\", storeKey))\n\t\tQTMetadataStore = CreateMapJSONStore()\n\tcase \"\", \"none\":\n\t\tDefaultLogger.Log(\"msg\", \"Using default QuicktimeStore.\")\n\t\tQTMetadataStore = CreateMapJSONStore()\n\tcase \"redis\":\n\t\thostname := viper.GetString(\"redishost\")\n\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Connecting to redis host \\\"%s\\\"\", hostname))\n\t\tredis, err := CreateRedisJSONStore(hostname, \"qt\")\n\t\tif err != nil {\n\t\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Failed to configure Redis Quicktime store to host \\\"%s\\\"\", hostname))\n\t\t}\n\n\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Logging movie metadata to Redis at %s\", hostname))\n\t\tQTMetadataStore = redis\n\t}\n}\n\nfunc ConfigureDirectoryStoreFromViper() {\n\tstoreKey := viper.GetString(\"directorystore\")\n\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Configuring directory store with type \\\"%s\\\"\", storeKey))\n\n\tswitch strings.ToLower(storeKey) {\n\tdefault:\n\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Unable to determine type of directory store from \\\"%s\\\"\", storeKey))\n\t\tDirKeyStore = CreateMapJSONStore()\n\tcase \"\", \"none\":\n\t\tDefaultLogger.Log(\"msg\", \"Using default directory store.\")\n\t\tDirKeyStore = CreateMapJSONStore()\n\tcase \"redis\":\n\t\thostname := viper.GetString(\"redishost\")\n\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Connecting to redis host \\\"%s\\\"\", hostname))\n\t\tredis, err := CreateRedisJSONStore(hostname, \"dir\")\n\t\tif err != nil {\n\t\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Failed to configure Redis directory store to host \\\"%s\\\"\", hostname))\n\t\t}\n\n\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Logging directory metadata to Redis at %s\", hostname))\n\t\tDirKeyStore = redis\n\t}\n}\n\nfunc ConfigureFromViper() {\n\tViperConfiguration()\n\n\tDefaultLogger.Log(\"msg\", \"In ConfigureFromViper\")\n\tConfigureImageStoreFromViper()\n\tConfigureDirectoryStoreFromViper()\n\tConfigureQuicktimeStoreFromViper()\n}\n<commit_msg>Fixed naming error with image store configuration variables.<commit_after>package lazycache\n\nimport (\n\t\"fmt\"\n\tflag \"github.com\/spf13\/pflag\"\n\t\"github.com\/spf13\/viper\"\n\t\"strings\"\n)\n\nfunc ViperConfiguration() {\n\n\t\/\/ Configuration\n\tviper.SetDefault(\"port\", 8080)\n\tviper.SetDefault(\"bind\", \"0.0.0.0\")\n\tviper.SetDefault(\"imagestore\", \"\")\n\tviper.SetDefault(\"imagestore.bucket\", \"camhd-image-cache\")\n\n\tviper.SetDefault(\"quicktimestore\", \"\")\n\tviper.SetDefault(\"directorystore\", \"\")\n\tviper.SetDefault(\"redishost\", \"localhost:6379\")\n\n\tviper.SetConfigName(\"lazycache\")\n\tviper.AddConfigPath(\"\/etc\/lazycache\")\n\tviper.AddConfigPath(\".\")\n\terr := viper.ReadInConfig() \/\/ Find and read the config file\n\tif err != nil { \/\/ Handle errors reading the config file\n\t\tswitch err.(type) {\n\t\tcase viper.ConfigFileNotFoundError:\n\t\t\t\/\/ ignore\n\t\tdefault:\n\t\t\tpanic(fmt.Errorf(\"Fatal error config file: %s \\n\", err))\n\t\t}\n\t}\n\n\tviper.SetEnvPrefix(\"lazycache\")\n\tviper.AutomaticEnv()\n\t\/\/ Convert '.' to '_' in configuration variable names\n\tviper.SetEnvKeyReplacer(strings.NewReplacer(\".\", \"_\"))\n\n\tflag.Int(\"port\", 80, \"Network port to listen on (default: 8080)\")\n\tflag.String(\"bind\", \"0.0.0.0\", \"Network interface to bind to (defaults to 0.0.0.0)\")\n\n\tflag.String(\"image-store\", \"\", \"Type of image store (none, local, google)\")\n\tflag.String(\"image-store-bucket\", \"camhd-image-cache\", \"Bucket used for Google image store\")\n\tflag.String(\"image-store-root\", \"\", \"Path to local image store directory (must be writable)\")\n\tflag.String(\"image-store-url\", \"\", \"Root URL for webserver which serves image store directory\")\n\n\tflag.String(\"quicktime-store\", \"\", \"Type of quicktime store (none, redis)\")\n\tflag.String(\"directory-store\", \"\", \"Type of directory store (none, redis)\")\n\tflag.String(\"redis-host\", \"localhost:6379\", \"Host used for redis store\")\n\n\tviper.BindPFlag(\"port\", flag.Lookup(\"port\"))\n\tviper.BindPFlag(\"bind\", flag.Lookup(\"bind\"))\n\n\tviper.BindPFlag(\"imagestore\", flag.Lookup(\"image-store\"))\n\tviper.BindPFlag(\"imagestore.bucket\", flag.Lookup(\"image-store-bucket\"))\n\tviper.BindPFlag(\"imagestore.root\", flag.Lookup(\"image-store-root\"))\n\tviper.BindPFlag(\"imagestore.url\", flag.Lookup(\"image-store-url\"))\n\n\tviper.BindPFlag(\"directorystore\", flag.Lookup(\"directory-store\"))\n\tviper.BindPFlag(\"quicktimestore\", flag.Lookup(\"quicktime-store\"))\n\tviper.BindPFlag(\"redishost\", flag.Lookup(\"redis-host\"))\n\n\tflag.Parse()\n}\n\nfunc ConfigureImageStoreFromViper() {\n\tstoreKey := viper.GetString(\"imagestore\")\n\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Configuring image store with type \\\"%s\\\"\", storeKey))\n\tswitch strings.ToLower(storeKey) {\n\tdefault:\n\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Unable to determine type of image store from \\\"%s\\\"\", storeKey))\n\t\tDefaultImageStore = NullImageStore{}\n\tcase \"\", \"none\":\n\t\tDefaultLogger.Log(\"msg\", \"No image store configured.\")\n\t\tDefaultImageStore = NullImageStore{}\n\tcase \"local\":\n\t\tDefaultImageStore = CreateLocalStore(viper.GetString(\"imagestore.root\"), viper.GetString(\"imagestore.url\"))\n\tcase \"google\":\n\t\tDefaultImageStore = CreateGoogleStore(viper.GetString(\"imagestore.bucket\"))\n\t}\n}\n\nfunc ConfigureQuicktimeStoreFromViper() {\n\tstoreKey := viper.GetString(\"quicktimestore\")\n\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Configuring quicktime store with type \\\"%s\\\"\", storeKey))\n\n\tswitch strings.ToLower(storeKey) {\n\tdefault:\n\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Unable to determine type of image store from \\\"%s\\\"\", storeKey))\n\t\tQTMetadataStore = CreateMapJSONStore()\n\tcase \"\", \"none\":\n\t\tDefaultLogger.Log(\"msg\", \"Using default QuicktimeStore.\")\n\t\tQTMetadataStore = CreateMapJSONStore()\n\tcase \"redis\":\n\t\thostname := viper.GetString(\"redishost\")\n\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Connecting to redis host \\\"%s\\\"\", hostname))\n\t\tredis, err := CreateRedisJSONStore(hostname, \"qt\")\n\t\tif err != nil {\n\t\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Failed to configure Redis Quicktime store to host \\\"%s\\\"\", hostname))\n\t\t}\n\n\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Logging movie metadata to Redis at %s\", hostname))\n\t\tQTMetadataStore = redis\n\t}\n}\n\nfunc ConfigureDirectoryStoreFromViper() {\n\tstoreKey := viper.GetString(\"directorystore\")\n\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Configuring directory store with type \\\"%s\\\"\", storeKey))\n\n\tswitch strings.ToLower(storeKey) {\n\tdefault:\n\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Unable to determine type of directory store from \\\"%s\\\"\", storeKey))\n\t\tDirKeyStore = CreateMapJSONStore()\n\tcase \"\", \"none\":\n\t\tDefaultLogger.Log(\"msg\", \"Using default directory store.\")\n\t\tDirKeyStore = CreateMapJSONStore()\n\tcase \"redis\":\n\t\thostname := viper.GetString(\"redishost\")\n\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Connecting to redis host \\\"%s\\\"\", hostname))\n\t\tredis, err := CreateRedisJSONStore(hostname, \"dir\")\n\t\tif err != nil {\n\t\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Failed to configure Redis directory store to host \\\"%s\\\"\", hostname))\n\t\t}\n\n\t\tDefaultLogger.Log(\"msg\", fmt.Sprintf(\"Logging directory metadata to Redis at %s\", hostname))\n\t\tDirKeyStore = redis\n\t}\n}\n\nfunc ConfigureFromViper() {\n\tViperConfiguration()\n\n\tDefaultLogger.Log(\"msg\", \"In ConfigureFromViper\")\n\tConfigureImageStoreFromViper()\n\tConfigureDirectoryStoreFromViper()\n\tConfigureQuicktimeStoreFromViper()\n}\n<|endoftext|>"} {"text":"<commit_before>package version\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n)\n\nvar (\n\t\/\/ The git commit that was compiled. This will be filled in by the compiler.\n\tGitCommit string\n\tGitDescribe string\n\n\t\/\/ The main version number that is being run at the moment.\n\tVersion = \"0.12.0\"\n\n\t\/\/ A pre-release marker for the version. If this is \"\" (empty string)\n\t\/\/ then it means that it is a final release. Otherwise, this is a pre-release\n\t\/\/ such as \"dev\" (in development), \"beta\", \"rc1\", etc.\n\tVersionPrerelease = \"rc1\"\n\n\t\/\/ VersionMetadata is metadata further describing the build type.\n\tVersionMetadata = \"\"\n)\n\n\/\/ VersionInfo\ntype VersionInfo struct {\n\tRevision string\n\tVersion string\n\tVersionPrerelease string\n\tVersionMetadata string\n}\n\nfunc GetVersion() *VersionInfo {\n\tver := Version\n\trel := VersionPrerelease\n\tmd := VersionMetadata\n\tif GitDescribe != \"\" {\n\t\tver = GitDescribe\n\t}\n\tif GitDescribe == \"\" && rel == \"\" && VersionPrerelease != \"\" {\n\t\trel = \"dev\"\n\t}\n\n\treturn &VersionInfo{\n\t\tRevision: GitCommit,\n\t\tVersion: ver,\n\t\tVersionPrerelease: rel,\n\t\tVersionMetadata: md,\n\t}\n}\n\nfunc (c *VersionInfo) VersionNumber() string {\n\tversion := fmt.Sprintf(\"%s\", c.Version)\n\n\tif c.VersionPrerelease != \"\" {\n\t\tversion = fmt.Sprintf(\"%s-%s\", version, c.VersionPrerelease)\n\t}\n\n\tif c.VersionMetadata != \"\" {\n\t\tversion = fmt.Sprintf(\"%s+%s\", version, c.VersionMetadata)\n\t}\n\n\treturn version\n}\n\nfunc (c *VersionInfo) FullVersionNumber(rev bool) string {\n\tvar versionString bytes.Buffer\n\n\tfmt.Fprintf(&versionString, \"Nomad v%s\", c.Version)\n\tif c.VersionPrerelease != \"\" {\n\t\tfmt.Fprintf(&versionString, \"-%s\", c.VersionPrerelease)\n\t}\n\n\tif c.VersionMetadata != \"\" {\n\t\tfmt.Fprintf(&versionString, \"+%s\", c.VersionMetadata)\n\t}\n\n\tif rev && c.Revision != \"\" {\n\t\tfmt.Fprintf(&versionString, \" (%s)\", c.Revision)\n\t}\n\n\treturn versionString.String()\n}\n<commit_msg>Generate files for 0.12.0 release<commit_after>package version\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n)\n\nvar (\n\t\/\/ The git commit that was compiled. This will be filled in by the compiler.\n\tGitCommit string\n\tGitDescribe string\n\n\t\/\/ The main version number that is being run at the moment.\n\tVersion = \"0.12.0\"\n\n\t\/\/ A pre-release marker for the version. If this is \"\" (empty string)\n\t\/\/ then it means that it is a final release. Otherwise, this is a pre-release\n\t\/\/ such as \"dev\" (in development), \"beta\", \"rc1\", etc.\n\tVersionPrerelease = \"\"\n\n\t\/\/ VersionMetadata is metadata further describing the build type.\n\tVersionMetadata = \"\"\n)\n\n\/\/ VersionInfo\ntype VersionInfo struct {\n\tRevision string\n\tVersion string\n\tVersionPrerelease string\n\tVersionMetadata string\n}\n\nfunc GetVersion() *VersionInfo {\n\tver := Version\n\trel := VersionPrerelease\n\tmd := VersionMetadata\n\tif GitDescribe != \"\" {\n\t\tver = GitDescribe\n\t}\n\tif GitDescribe == \"\" && rel == \"\" && VersionPrerelease != \"\" {\n\t\trel = \"dev\"\n\t}\n\n\treturn &VersionInfo{\n\t\tRevision: GitCommit,\n\t\tVersion: ver,\n\t\tVersionPrerelease: rel,\n\t\tVersionMetadata: md,\n\t}\n}\n\nfunc (c *VersionInfo) VersionNumber() string {\n\tversion := fmt.Sprintf(\"%s\", c.Version)\n\n\tif c.VersionPrerelease != \"\" {\n\t\tversion = fmt.Sprintf(\"%s-%s\", version, c.VersionPrerelease)\n\t}\n\n\tif c.VersionMetadata != \"\" {\n\t\tversion = fmt.Sprintf(\"%s+%s\", version, c.VersionMetadata)\n\t}\n\n\treturn version\n}\n\nfunc (c *VersionInfo) FullVersionNumber(rev bool) string {\n\tvar versionString bytes.Buffer\n\n\tfmt.Fprintf(&versionString, \"Nomad v%s\", c.Version)\n\tif c.VersionPrerelease != \"\" {\n\t\tfmt.Fprintf(&versionString, \"-%s\", c.VersionPrerelease)\n\t}\n\n\tif c.VersionMetadata != \"\" {\n\t\tfmt.Fprintf(&versionString, \"+%s\", c.VersionMetadata)\n\t}\n\n\tif rev && c.Revision != \"\" {\n\t\tfmt.Fprintf(&versionString, \" (%s)\", c.Revision)\n\t}\n\n\treturn versionString.String()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n)\n\nvar InvalidCredentials = errors.New(\"Invalid credentials provided. Must have a username\/ password or none at all.\")\n\ntype Credential struct {\n\tUsername string `json:\"username\"`\n\tPassword string `json:\"password\"`\n}\n\ntype Config struct {\n\tCredentials []Credential `json:\"credentials\"`\n\tStripProxyHeaders bool `json:\"strip_proxy_headers\"`\n\tPort int\n}\n\nfunc (config *Config) AuthenticationRequired() bool {\n\treturn len(config.Credentials) > 0\n}\n\nfunc validCredentials(username, password string) bool {\n\tif username == \"\" && password == \"\" {\n\t\treturn true\n\t}\n\tif username != \"\" && password != \"\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (config *Config) Validate() error {\n\tfor _, credential := range config.Credentials {\n\t\tif !validCredentials(credential.Username, credential.Password) {\n\t\t\treturn InvalidCredentials\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (config *Config) IsAuthenticated(username, password string) bool {\n\tfor _, credential := range config.Credentials {\n\t\tif credential.Username == username && credential.Password == password {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc NewConfigFromReader(reader io.Reader) (*Config, error) {\n\tconfig := new(Config)\n\tdecoder := json.NewDecoder(reader)\n\terr := decoder.Decode(&config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn config, nil\n}\n<commit_msg>We only need the strip proxy headers json annotation<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n)\n\nvar InvalidCredentials = errors.New(\"Invalid credentials provided. Must have a username\/ password or none at all.\")\n\ntype Credential struct {\n\tUsername string\n\tPassword string\n}\n\ntype Config struct {\n\tCredentials []Credential\n\tStripProxyHeaders bool `json:\"strip_proxy_headers\"`\n\tPort int\n}\n\nfunc (config *Config) AuthenticationRequired() bool {\n\treturn len(config.Credentials) > 0\n}\n\nfunc validCredentials(username, password string) bool {\n\tif username == \"\" && password == \"\" {\n\t\treturn true\n\t}\n\tif username != \"\" && password != \"\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (config *Config) Validate() error {\n\tfor _, credential := range config.Credentials {\n\t\tif !validCredentials(credential.Username, credential.Password) {\n\t\t\treturn InvalidCredentials\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (config *Config) IsAuthenticated(username, password string) bool {\n\tfor _, credential := range config.Credentials {\n\t\tif credential.Username == username && credential.Password == password {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc NewConfigFromReader(reader io.Reader) (*Config, error) {\n\tconfig := new(Config)\n\tdecoder := json.NewDecoder(reader)\n\terr := decoder.Decode(&config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn config, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package version\n\nimport \"fmt\"\n\nconst (\n\t\/\/ VersionMajor is for an API incompatible changes\n\tVersionMajor = 5\n\t\/\/ VersionMinor is for functionality in a backwards-compatible manner\n\tVersionMinor = 13\n\t\/\/ VersionPatch is for backwards-compatible bug fixes\n\tVersionPatch = 2\n\n\t\/\/ VersionDev indicates development branch. Releases will be empty string.\n\tVersionDev = \"-dev\"\n)\n\n\/\/ Version is the specification version that the package types support.\nvar Version = fmt.Sprintf(\"%d.%d.%d%s\", VersionMajor, VersionMinor, VersionPatch, VersionDev)\n<commit_msg>Bump to v5.13.2<commit_after>package version\n\nimport \"fmt\"\n\nconst (\n\t\/\/ VersionMajor is for an API incompatible changes\n\tVersionMajor = 5\n\t\/\/ VersionMinor is for functionality in a backwards-compatible manner\n\tVersionMinor = 13\n\t\/\/ VersionPatch is for backwards-compatible bug fixes\n\tVersionPatch = 2\n\n\t\/\/ VersionDev indicates development branch. Releases will be empty string.\n\tVersionDev = \"\"\n)\n\n\/\/ Version is the specification version that the package types support.\nvar Version = fmt.Sprintf(\"%d.%d.%d%s\", VersionMajor, VersionMinor, VersionPatch, VersionDev)\n<|endoftext|>"} {"text":"<commit_before>package vals\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com\/elves\/elvish\/pkg\/parse\"\n\t\"github.com\/elves\/elvish\/pkg\/util\"\n)\n\n\/\/ NoPretty can be passed to Repr to suppress pretty-printing.\nconst NoPretty = util.MinInt\n\n\/\/ Reprer wraps the Repr method.\ntype Reprer interface {\n\t\/\/ Repr returns a string that represents a Value. The string either be a\n\t\/\/ literal of that Value that is preferably deep-equal to it (like `[a b c]`\n\t\/\/ for a list), or a string enclosed in \"<>\" containing the kind and\n\t\/\/ identity of the Value(like `<fn 0xdeadcafe>`).\n\t\/\/\n\t\/\/ If indent is at least 0, it should be pretty-printed with the current\n\t\/\/ indentation level of indent; the indent of the first line has already\n\t\/\/ been written and shall not be written in Repr. The returned string\n\t\/\/ should never contain a trailing newline.\n\tRepr(indent int) string\n}\n\n\/\/ Repr returns the representation for a value, a string that is preferably (but\n\/\/ not necessarily) an Elvish expression that evaluates to the argument. If\n\/\/ indent >= 0, the representation is pretty-printed. It is implemented for the\n\/\/ builtin types nil, bool and string, the File, List and Map types, StructMap\n\/\/ types, and types satisfying the Reprer interface. For other types, it uses\n\/\/ fmt.Sprint with the format \"<unknown %v>\".\nfunc Repr(v interface{}, indent int) string {\n\tswitch v := v.(type) {\n\tcase nil:\n\t\treturn \"$nil\"\n\tcase bool:\n\t\tif v {\n\t\t\treturn \"$true\"\n\t\t}\n\t\treturn \"$false\"\n\tcase string:\n\t\treturn parse.Quote(v)\n\tcase float64:\n\t\treturn \"(float64 \" + formatFloat64(v) + \")\"\n\tcase Reprer:\n\t\treturn v.Repr(indent)\n\tcase File:\n\t\treturn fmt.Sprintf(\"<file{%s %d}>\", parse.Quote(v.Name()), v.Fd())\n\tcase List:\n\t\tb := NewListReprBuilder(indent)\n\t\tfor it := v.Iterator(); it.HasElem(); it.Next() {\n\t\t\tb.WriteElem(Repr(it.Elem(), indent+1))\n\t\t}\n\t\treturn b.String()\n\tcase Map:\n\t\tbuilder := NewMapReprBuilder(indent)\n\t\tfor it := v.Iterator(); it.HasElem(); it.Next() {\n\t\t\tk, v := it.Elem()\n\t\t\tbuilder.WritePair(Repr(k, indent+1), indent+2, Repr(v, indent+2))\n\t\t}\n\t\treturn builder.String()\n\tcase StructMap:\n\t\treturn reprStructMap(v, indent)\n\tcase PseudoStructMap:\n\t\treturn reprStructMap(v.Fields(), indent)\n\tdefault:\n\t\treturn fmt.Sprintf(\"<unknown %v>\", v)\n\t}\n}\n\nfunc reprStructMap(v StructMap, indent int) string {\n\tvValue := reflect.ValueOf(v)\n\tvType := vValue.Type()\n\tbuilder := NewMapReprBuilder(indent)\n\tit := iterateStructMap(vType)\n\tfor it.Next() {\n\t\tk, v := it.Get(vValue)\n\t\tbuilder.WritePair(Repr(k, indent+1), indent+2, Repr(v, indent+2))\n\t}\n\treturn builder.String()\n}\n<commit_msg>pkg\/eval\/vals: Simply use math.MinInt32 for NoPretty.<commit_after>package vals\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n\n\t\"github.com\/elves\/elvish\/pkg\/parse\"\n)\n\n\/\/ NoPretty can be passed to Repr to suppress pretty-printing.\nconst NoPretty = math.MinInt32\n\n\/\/ Reprer wraps the Repr method.\ntype Reprer interface {\n\t\/\/ Repr returns a string that represents a Value. The string either be a\n\t\/\/ literal of that Value that is preferably deep-equal to it (like `[a b c]`\n\t\/\/ for a list), or a string enclosed in \"<>\" containing the kind and\n\t\/\/ identity of the Value(like `<fn 0xdeadcafe>`).\n\t\/\/\n\t\/\/ If indent is at least 0, it should be pretty-printed with the current\n\t\/\/ indentation level of indent; the indent of the first line has already\n\t\/\/ been written and shall not be written in Repr. The returned string\n\t\/\/ should never contain a trailing newline.\n\tRepr(indent int) string\n}\n\n\/\/ Repr returns the representation for a value, a string that is preferably (but\n\/\/ not necessarily) an Elvish expression that evaluates to the argument. If\n\/\/ indent >= 0, the representation is pretty-printed. It is implemented for the\n\/\/ builtin types nil, bool and string, the File, List and Map types, StructMap\n\/\/ types, and types satisfying the Reprer interface. For other types, it uses\n\/\/ fmt.Sprint with the format \"<unknown %v>\".\nfunc Repr(v interface{}, indent int) string {\n\tswitch v := v.(type) {\n\tcase nil:\n\t\treturn \"$nil\"\n\tcase bool:\n\t\tif v {\n\t\t\treturn \"$true\"\n\t\t}\n\t\treturn \"$false\"\n\tcase string:\n\t\treturn parse.Quote(v)\n\tcase float64:\n\t\treturn \"(float64 \" + formatFloat64(v) + \")\"\n\tcase Reprer:\n\t\treturn v.Repr(indent)\n\tcase File:\n\t\treturn fmt.Sprintf(\"<file{%s %d}>\", parse.Quote(v.Name()), v.Fd())\n\tcase List:\n\t\tb := NewListReprBuilder(indent)\n\t\tfor it := v.Iterator(); it.HasElem(); it.Next() {\n\t\t\tb.WriteElem(Repr(it.Elem(), indent+1))\n\t\t}\n\t\treturn b.String()\n\tcase Map:\n\t\tbuilder := NewMapReprBuilder(indent)\n\t\tfor it := v.Iterator(); it.HasElem(); it.Next() {\n\t\t\tk, v := it.Elem()\n\t\t\tbuilder.WritePair(Repr(k, indent+1), indent+2, Repr(v, indent+2))\n\t\t}\n\t\treturn builder.String()\n\tcase StructMap:\n\t\treturn reprStructMap(v, indent)\n\tcase PseudoStructMap:\n\t\treturn reprStructMap(v.Fields(), indent)\n\tdefault:\n\t\treturn fmt.Sprintf(\"<unknown %v>\", v)\n\t}\n}\n\nfunc reprStructMap(v StructMap, indent int) string {\n\tvValue := reflect.ValueOf(v)\n\tvType := vValue.Type()\n\tbuilder := NewMapReprBuilder(indent)\n\tit := iterateStructMap(vType)\n\tfor it.Next() {\n\t\tk, v := it.Get(vValue)\n\t\tbuilder.WritePair(Repr(k, indent+1), indent+2, Repr(v, indent+2))\n\t}\n\treturn builder.String()\n}\n<|endoftext|>"} {"text":"<commit_before>package version\n\n\/\/ VERSION ...\nconst VERSION = \"1.1.68\"\n<commit_msg>v1.1.69 [skip ci]<commit_after>package version\n\n\/\/ VERSION ...\nconst VERSION = \"1.1.69\"\n<|endoftext|>"} {"text":"<commit_before>package glob\n\nimport (\n\t\"os\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"src.elv.sh\/pkg\/testutil\"\n)\n\nvar (\n\tmkdirs = []string{\"a\", \"b\", \"c\", \"d1\", \"d1\/e\", \"d1\/e\/f\", \"d1\/e\/f\/g\",\n\t\t\"d2\", \"d2\/e\", \"d2\/e\/f\", \"d2\/e\/f\/g\"}\n\tmkdirDots = []string{\".el\"}\n\tcreates = []string{\"a\/X\", \"a\/Y\", \"b\/X\", \"c\/Y\",\n\t\t\"dX\", \"dXY\",\n\t\t\"lorem\", \"ipsum\",\n\t\t\"d1\/e\/f\/g\/X\", \"d2\/e\/f\/g\/X\"}\n\tcreateDots = []string{\".x\", \".el\/x\"}\n)\n\ntype globCase struct {\n\tpattern string\n\twant []string\n}\n\nvar globCases = []globCase{\n\t{\"*\", []string{\"a\", \"b\", \"c\", \"d1\", \"d2\", \"dX\", \"dXY\", \"lorem\", \"ipsum\"}},\n\t{\".\", []string{\".\"}},\n\t{\".\/*\", []string{\".\/a\", \".\/b\", \".\/c\", \".\/d1\", \".\/d2\", \".\/dX\", \".\/dXY\", \".\/lorem\", \".\/ipsum\"}},\n\t{\"..\", []string{\"..\"}},\n\t{\"a\/..\", []string{\"a\/..\"}},\n\t{\"a\/..\/*\", []string{\"a\/..\/a\", \"a\/..\/b\", \"a\/..\/c\", \"a\/..\/d1\", \"a\/..\/d2\", \"a\/..\/dX\", \"a\/..\/dXY\", \"a\/..\/lorem\", \"a\/..\/ipsum\"}},\n\t{\"*\/\", []string{\"a\/\", \"b\/\", \"c\/\", \"d1\/\", \"d2\/\"}},\n\t{\"**\", append(mkdirs, creates...)},\n\t{\"*\/X\", []string{\"a\/X\", \"b\/X\"}},\n\t{\"**X\", []string{\"a\/X\", \"b\/X\", \"dX\", \"d1\/e\/f\/g\/X\", \"d2\/e\/f\/g\/X\"}},\n\t{\"*\/*\/*\", []string{\"d1\/e\/f\", \"d2\/e\/f\"}},\n\t{\"l*m\", []string{\"lorem\"}},\n\t{\"d*\", []string{\"d1\", \"d2\", \"dX\", \"dXY\"}},\n\t{\"d*\/\", []string{\"d1\/\", \"d2\/\"}},\n\t{\"d**\", []string{\"d1\", \"d1\/e\", \"d1\/e\/f\", \"d1\/e\/f\/g\", \"d1\/e\/f\/g\/X\",\n\t\t\"d2\", \"d2\/e\", \"d2\/e\/f\", \"d2\/e\/f\/g\", \"d2\/e\/f\/g\/X\", \"dX\", \"dXY\"}},\n\t{\"?\", []string{\"a\", \"b\", \"c\"}},\n\t{\"??\", []string{\"d1\", \"d2\", \"dX\"}},\n\n\t\/\/ Nonexistent paths.\n\t{\"xxxx\", []string{}},\n\t{\"xxxx\/*\", []string{}},\n\t{\"a\/*\/\", []string{}},\n\n\t\/\/ TODO Test cases against dotfiles.\n}\n\nfunc TestGlob_Relative(t *testing.T) {\n\ttestGlob(t, false)\n}\n\nfunc TestGlob_Absolute(t *testing.T) {\n\ttestGlob(t, true)\n}\n\nfunc testGlob(t *testing.T, abs bool) {\n\tdir, cleanup := testutil.InTestDir()\n\tdir = strings.ReplaceAll(dir, string(os.PathSeparator), \"\/\")\n\tdefer cleanup()\n\n\tfor _, dir := range append(mkdirs, mkdirDots...) {\n\t\terr := os.Mkdir(dir, 0755)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tfor _, file := range append(creates, createDots...) {\n\t\tf, err := os.Create(file)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tf.Close()\n\t}\n\tfor _, tc := range globCases {\n\t\tpattern := tc.pattern\n\t\tif abs {\n\t\t\tpattern = dir + \"\/\" + pattern\n\t\t}\n\t\twantResults := make([]string, len(tc.want))\n\t\tfor i, result := range tc.want {\n\t\t\tif abs {\n\t\t\t\twantResults[i] = dir + \"\/\" + result\n\t\t\t} else {\n\t\t\t\twantResults[i] = result\n\t\t\t}\n\t\t}\n\t\tsort.Strings(wantResults)\n\n\t\tresults := globPaths(pattern)\n\n\t\tif !reflect.DeepEqual(results, wantResults) {\n\t\t\tt.Errorf(`Glob(%q) => %v, want %v`, pattern, results, wantResults)\n\t\t}\n\t}\n}\n\n\/\/ Regression test for b.elv.sh\/1220\nfunc TestGlob_InvalidUTF8InFilename(t *testing.T) {\n\t_, cleanup := testutil.InTestDir()\n\tdefer cleanup()\n\n\tname := string([]byte{255}) + \".txt\"\n\tf, err := os.Create(name)\n\tif err != nil {\n\t\tt.Skip(\"create: \", err)\n\t}\n\tf.Close()\n\n\tpaths := globPaths(\"*.txt\")\n\twantPaths := []string{name}\n\tif !reflect.DeepEqual(paths, wantPaths) {\n\t\tt.Errorf(\"got %v, want %v\", paths, wantPaths)\n\t}\n}\n\nfunc globPaths(pattern string) []string {\n\tpaths := []string{}\n\tGlob(pattern, func(pathInfo PathInfo) bool {\n\t\tpaths = append(paths, pathInfo.Path)\n\t\treturn true\n\t})\n\tsort.Strings(paths)\n\treturn paths\n}\n<commit_msg>Make test case crashing too.<commit_after>package glob\n\nimport (\n\t\"os\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"src.elv.sh\/pkg\/testutil\"\n)\n\nvar (\n\tmkdirs = []string{\"a\", \"b\", \"c\", \"d1\", \"d1\/e\", \"d1\/e\/f\", \"d1\/e\/f\/g\",\n\t\t\"d2\", \"d2\/e\", \"d2\/e\/f\", \"d2\/e\/f\/g\"}\n\tmkdirDots = []string{\".el\"}\n\tcreates = []string{\"a\/X\", \"a\/Y\", \"b\/X\", \"c\/Y\",\n\t\t\"dX\", \"dXY\",\n\t\t\"lorem\", \"ipsum\",\n\t\t\"d1\/e\/f\/g\/X\", \"d2\/e\/f\/g\/X\"}\n\tcreateDots = []string{\".x\", \".el\/x\"}\n)\n\ntype globCase struct {\n\tpattern string\n\twant []string\n}\n\nvar globCases = []globCase{\n\t{\"*\", []string{\"a\", \"b\", \"c\", \"d1\", \"d2\", \"dX\", \"dXY\", \"lorem\", \"ipsum\"}},\n\t{\".\", []string{\".\"}},\n\t{\".\/*\", []string{\".\/a\", \".\/b\", \".\/c\", \".\/d1\", \".\/d2\", \".\/dX\", \".\/dXY\", \".\/lorem\", \".\/ipsum\"}},\n\t{\"..\", []string{\"..\"}},\n\t{\"a\/..\", []string{\"a\/..\"}},\n\t{\"a\/..\/*\", []string{\"a\/..\/a\", \"a\/..\/b\", \"a\/..\/c\", \"a\/..\/d1\", \"a\/..\/d2\", \"a\/..\/dX\", \"a\/..\/dXY\", \"a\/..\/lorem\", \"a\/..\/ipsum\"}},\n\t{\"*\/\", []string{\"a\/\", \"b\/\", \"c\/\", \"d1\/\", \"d2\/\"}},\n\t{\"**\", append(mkdirs, creates...)},\n\t{\"*\/X\", []string{\"a\/X\", \"b\/X\"}},\n\t{\"**X\", []string{\"a\/X\", \"b\/X\", \"dX\", \"d1\/e\/f\/g\/X\", \"d2\/e\/f\/g\/X\"}},\n\t{\"*\/*\/*\", []string{\"d1\/e\/f\", \"d2\/e\/f\"}},\n\t{\"l*m\", []string{\"lorem\"}},\n\t{\"d*\", []string{\"d1\", \"d2\", \"dX\", \"dXY\"}},\n\t{\"d*\/\", []string{\"d1\/\", \"d2\/\"}},\n\t{\"d**\", []string{\"d1\", \"d1\/e\", \"d1\/e\/f\", \"d1\/e\/f\/g\", \"d1\/e\/f\/g\/X\",\n\t\t\"d2\", \"d2\/e\", \"d2\/e\/f\", \"d2\/e\/f\/g\", \"d2\/e\/f\/g\/X\", \"dX\", \"dXY\"}},\n\t{\"?\", []string{\"a\", \"b\", \"c\"}},\n\t{\"??\", []string{\"d1\", \"d2\", \"dX\"}},\n\n\t\/\/ Nonexistent paths.\n\t{\"xxxx\", []string{}},\n\t{\"xxxx\/*\", []string{}},\n\t{\"a\/*\/\", []string{}},\n\n\t\/\/ TODO Test cases against dotfiles.\n}\n\nfunc TestGlob_Relative(t *testing.T) {\n\ttestGlob(t, false)\n}\n\nfunc TestGlob_Absolute(t *testing.T) {\n\ttestGlob(t, true)\n}\n\nfunc testGlob(t *testing.T, abs bool) {\n\tdir, cleanup := testutil.InTestDir()\n\tdir = strings.ReplaceAll(dir, string(os.PathSeparator), \"\/\")\n\tdefer cleanup()\n\n\tfor _, dir := range append(mkdirs, mkdirDots...) {\n\t\terr := os.Mkdir(dir, 0755)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tfor _, file := range append(creates, createDots...) {\n\t\tf, err := os.Create(file)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tf.Close()\n\t}\n\tfor _, tc := range globCases {\n\t\tpattern := tc.pattern\n\t\tif abs {\n\t\t\tpattern = dir + \"\/\" + pattern\n\t\t}\n\t\twantResults := make([]string, len(tc.want))\n\t\tfor i, result := range tc.want {\n\t\t\tif abs {\n\t\t\t\twantResults[i] = dir + \"\/\" + result\n\t\t\t} else {\n\t\t\t\twantResults[i] = result\n\t\t\t}\n\t\t}\n\t\tsort.Strings(wantResults)\n\n\t\tresults := globPaths(pattern)\n\n\t\tif !reflect.DeepEqual(results, wantResults) {\n\t\t\tt.Errorf(`Glob(%q) => %v, want %v`, pattern, results, wantResults)\n\t\t}\n\t}\n}\n\n\/\/ Regression test for b.elv.sh\/1220\nfunc TestGlob_InvalidUTF8InFilename(t *testing.T) {\n\t_, cleanup := testutil.InTestDir()\n\tdefer cleanup()\n\n\tname := string([]byte{255}) + \".c\"\n\tf, err := os.Create(name)\n\tif err != nil {\n\t\tt.Skip(\"create: \", err)\n\t}\n\tf.Close()\n\n\tpaths := globPaths(\"*.c\")\n\twantPaths := []string{name}\n\tif !reflect.DeepEqual(paths, wantPaths) {\n\t\tt.Errorf(\"got %v, want %v\", paths, wantPaths)\n\t}\n}\n\nfunc globPaths(pattern string) []string {\n\tpaths := []string{}\n\tGlob(pattern, func(pathInfo PathInfo) bool {\n\t\tpaths = append(paths, pathInfo.Path)\n\t\treturn true\n\t})\n\tsort.Strings(paths)\n\treturn paths\n}\n<|endoftext|>"} {"text":"<commit_before>package english\n\nimport (\n\t\"github.com\/kljensen\/snowball\/snowballword\"\n)\n\n\/\/ Step 2 is the stemming of various endings found in \n\/\/ R1 including \"al\", \"ness\", and \"li\".\n\/\/\nfunc step2(w *snowballword.SnowballWord) bool {\n\n\t\/\/ Possible sufficies for this step, longest first.\n\tsuffix, suffixRunes := w.FirstSuffix(\n\t\t\"ational\", \"fulness\", \"iveness\", \"ization\", \"ousness\",\n\t\t\"biliti\", \"lessli\", \"tional\", \"alism\", \"aliti\", \"ation\",\n\t\t\"entli\", \"fulli\", \"iviti\", \"ousli\", \"anci\", \"abli\",\n\t\t\"alli\", \"ator\", \"enci\", \"izer\", \"bli\", \"ogi\", \"li\",\n\t)\n\n\t\/\/ If it is not in R1, do nothing\n\tif suffix == \"\" || len(suffixRunes) > len(w.RS)-w.R1start {\n\t\treturn false\n\t}\n\n\t\/\/ Handle special cases where we're not just going to\n\t\/\/ replace the suffix with another suffix: there are\n\t\/\/ other things we need to do.\n\t\/\/\n\tswitch suffix {\n\n\tcase \"li\":\n\n\t\t\/\/ Delete if preceded by a valid li-ending. Valid li-endings inlude the\n\t\t\/\/ following charaters: cdeghkmnrt. (Note, the unicode code points for\n\t\t\/\/ these characters are, repectively, as follows:\n\t\t\/\/ 99 100 101 103 104 107 109 110 114 116)\n\t\t\/\/\n\t\trsLen := len(w.RS)\n\t\tif rsLen >= 3 {\n\t\t\tswitch w.RS[rsLen-3] {\n\t\t\tcase 99, 100, 101, 103, 104, 107, 109, 110, 114, 116:\n\t\t\t\tw.RemoveLastNRunes(len(suffixRunes))\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase \"ogi\":\n\n\t\t\/\/ Replace by og if preceded by l.\n\t\t\/\/ (Note, the unicode code point for l is 108)\n\t\t\/\/\n\t\trsLen := len(w.RS)\n\t\tif rsLen >= 4 && w.RS[rsLen-4] == 108 {\n\t\t\tw.ReplaceSuffixRunes(suffixRunes, []rune(\"og\"), true)\n\t\t}\n\t\treturn true\n\t}\n\n\t\/\/ Handle a suffix that was found, which is going\n\t\/\/ to be replaced with a different suffix.\n\t\/\/\n\tvar repl string\n\tswitch suffix {\n\tcase \"tional\":\n\t\trepl = \"tion\"\n\tcase \"enci\":\n\t\trepl = \"ence\"\n\tcase \"anci\":\n\t\trepl = \"ance\"\n\tcase \"abli\":\n\t\trepl = \"able\"\n\tcase \"entli\":\n\t\trepl = \"ent\"\n\tcase \"izer\", \"ization\":\n\t\trepl = \"ize\"\n\tcase \"ational\", \"ation\", \"ator\":\n\t\trepl = \"ate\"\n\tcase \"alism\", \"aliti\", \"alli\":\n\t\trepl = \"al\"\n\tcase \"fulness\":\n\t\trepl = \"ful\"\n\tcase \"ousli\", \"ousness\":\n\t\trepl = \"ous\"\n\tcase \"iveness\", \"iviti\":\n\t\trepl = \"ive\"\n\tcase \"biliti\", \"bli\":\n\t\trepl = \"ble\"\n\tcase \"fulli\":\n\t\trepl = \"ful\"\n\tcase \"lessli\":\n\t\trepl = \"less\"\n\t}\n\tw.ReplaceSuffixRunes(suffixRunes, []rune(repl), true)\n\treturn true\n\n}\n<commit_msg>fix typo<commit_after>package english\n\nimport (\n\t\"github.com\/kljensen\/snowball\/snowballword\"\n)\n\n\/\/ Step 2 is the stemming of various endings found in \n\/\/ R1 including \"al\", \"ness\", and \"li\".\n\/\/\nfunc step2(w *snowballword.SnowballWord) bool {\n\n\t\/\/ Possible sufficies for this step, longest first.\n\tsuffix, suffixRunes := w.FirstSuffix(\n\t\t\"ational\", \"fulness\", \"iveness\", \"ization\", \"ousness\",\n\t\t\"biliti\", \"lessli\", \"tional\", \"alism\", \"aliti\", \"ation\",\n\t\t\"entli\", \"fulli\", \"iviti\", \"ousli\", \"anci\", \"abli\",\n\t\t\"alli\", \"ator\", \"enci\", \"izer\", \"bli\", \"ogi\", \"li\",\n\t)\n\n\t\/\/ If it is not in R1, do nothing\n\tif suffix == \"\" || len(suffixRunes) > len(w.RS)-w.R1start {\n\t\treturn false\n\t}\n\n\t\/\/ Handle special cases where we're not just going to\n\t\/\/ replace the suffix with another suffix: there are\n\t\/\/ other things we need to do.\n\t\/\/\n\tswitch suffix {\n\n\tcase \"li\":\n\n\t\t\/\/ Delete if preceded by a valid li-ending. Valid li-endings inlude the\n\t\t\/\/ following charaters: cdeghkmnrt. (Note, the unicode code points for\n\t\t\/\/ these characters are, respectively, as follows:\n\t\t\/\/ 99 100 101 103 104 107 109 110 114 116)\n\t\t\/\/\n\t\trsLen := len(w.RS)\n\t\tif rsLen >= 3 {\n\t\t\tswitch w.RS[rsLen-3] {\n\t\t\tcase 99, 100, 101, 103, 104, 107, 109, 110, 114, 116:\n\t\t\t\tw.RemoveLastNRunes(len(suffixRunes))\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase \"ogi\":\n\n\t\t\/\/ Replace by og if preceded by l.\n\t\t\/\/ (Note, the unicode code point for l is 108)\n\t\t\/\/\n\t\trsLen := len(w.RS)\n\t\tif rsLen >= 4 && w.RS[rsLen-4] == 108 {\n\t\t\tw.ReplaceSuffixRunes(suffixRunes, []rune(\"og\"), true)\n\t\t}\n\t\treturn true\n\t}\n\n\t\/\/ Handle a suffix that was found, which is going\n\t\/\/ to be replaced with a different suffix.\n\t\/\/\n\tvar repl string\n\tswitch suffix {\n\tcase \"tional\":\n\t\trepl = \"tion\"\n\tcase \"enci\":\n\t\trepl = \"ence\"\n\tcase \"anci\":\n\t\trepl = \"ance\"\n\tcase \"abli\":\n\t\trepl = \"able\"\n\tcase \"entli\":\n\t\trepl = \"ent\"\n\tcase \"izer\", \"ization\":\n\t\trepl = \"ize\"\n\tcase \"ational\", \"ation\", \"ator\":\n\t\trepl = \"ate\"\n\tcase \"alism\", \"aliti\", \"alli\":\n\t\trepl = \"al\"\n\tcase \"fulness\":\n\t\trepl = \"ful\"\n\tcase \"ousli\", \"ousness\":\n\t\trepl = \"ous\"\n\tcase \"iveness\", \"iviti\":\n\t\trepl = \"ive\"\n\tcase \"biliti\", \"bli\":\n\t\trepl = \"ble\"\n\tcase \"fulli\":\n\t\trepl = \"ful\"\n\tcase \"lessli\":\n\t\trepl = \"less\"\n\t}\n\tw.ReplaceSuffixRunes(suffixRunes, []rune(repl), true)\n\treturn true\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"gopkg.in\/yaml.v2\"\n\n\t\"github.com\/prometheus\/prometheus\/config\"\n\t\"github.com\/prometheus\/prometheus\/promql\"\n\t\"github.com\/prometheus\/prometheus\/util\/cli\"\n\t\"github.com\/prometheus\/prometheus\/version\"\n)\n\n\/\/ CheckConfigCmd validates configuration files.\nfunc CheckConfigCmd(t cli.Term, args ...string) int {\n\tif len(args) == 0 {\n\t\tt.Infof(\"usage: promtool check-config <files>\")\n\t\treturn 2\n\t}\n\tfailed := false\n\n\tfor _, arg := range args {\n\t\truleFiles, err := checkConfig(t, arg)\n\t\tif err != nil {\n\t\t\tt.Errorf(\" FAILED: %s\", err)\n\t\t\tfailed = true\n\t\t} else {\n\t\t\tt.Infof(\" SUCCESS: %d rule files found\", len(ruleFiles))\n\t\t}\n\t\tt.Infof(\"\")\n\n\t\tfor _, rf := range ruleFiles {\n\t\t\tif n, err := checkRules(t, rf); err != nil {\n\t\t\t\tt.Errorf(\" FAILED: %s\", err)\n\t\t\t\tfailed = true\n\t\t\t} else {\n\t\t\t\tt.Infof(\" SUCCESS: %d rules found\", n)\n\t\t\t}\n\t\t\tt.Infof(\"\")\n\t\t}\n\t}\n\tif failed {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc checkConfig(t cli.Term, filename string) ([]string, error) {\n\tt.Infof(\"Checking %s\", filename)\n\n\tif stat, err := os.Stat(filename); err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot get file info\")\n\t} else if stat.IsDir() {\n\t\treturn nil, fmt.Errorf(\"is a directory\")\n\t}\n\n\tcontent, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar cfg config.Config\n\tif err := yaml.Unmarshal(content, &cfg); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ruleFiles []string\n\tfor _, rf := range cfg.RuleFiles {\n\t\trfs, err := filepath.Glob(rf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ If an explicit file was given, error if it doesn't exist.\n\t\tif !strings.Contains(rf, \"*\") && len(rfs) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"%q does not point to an existing file\", rf)\n\t\t}\n\t\truleFiles = append(ruleFiles, rfs...)\n\t}\n\n\treturn ruleFiles, nil\n}\n\n\/\/ CheckRulesCmd validates rule files.\nfunc CheckRulesCmd(t cli.Term, args ...string) int {\n\tif len(args) == 0 {\n\t\tt.Infof(\"usage: promtool check-rules <files>\")\n\t\treturn 2\n\t}\n\tfailed := false\n\n\tfor _, arg := range args {\n\t\tif n, err := checkRules(t, arg); err != nil {\n\t\t\tt.Errorf(\" FAILED: %s\", err)\n\t\t\tfailed = true\n\t\t} else {\n\t\t\tt.Infof(\" SUCCESS: %d rules found\", n)\n\t\t}\n\t\tt.Infof(\"\")\n\t}\n\tif failed {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc checkRules(t cli.Term, filename string) (int, error) {\n\tt.Infof(\"Checking %s\", filename)\n\n\tif stat, err := os.Stat(filename); err != nil {\n\t\treturn 0, fmt.Errorf(\"cannot get file info\")\n\t} else if stat.IsDir() {\n\t\treturn 0, fmt.Errorf(\"is a directory\")\n\t}\n\n\tcontent, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\trules, err := promql.ParseStmts(string(content))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn len(rules), nil\n}\n\nvar versionInfoTmpl = `\nprometheus, version {{.version}} (branch: {{.branch}}, revision: {{.revision}})\n build user: {{.buildUser}}\n build date: {{.buildDate}}\n go version: {{.goVersion}}\n`\n\n\/\/ VersionCmd prints the binaries version information.\nfunc VersionCmd(t cli.Term, _ ...string) int {\n\ttmpl := template.Must(template.New(\"version\").Parse(versionInfoTmpl))\n\n\tvar buf bytes.Buffer\n\tif err := tmpl.ExecuteTemplate(&buf, \"version\", version.Map); err != nil {\n\t\tpanic(err)\n\t}\n\tt.Out(strings.TrimSpace(buf.String()))\n\treturn 0\n}\n\nfunc main() {\n\tapp := cli.NewApp(\"promtool\")\n\n\tapp.Register(\"check-config\", &cli.Command{\n\t\tDesc: \"validate configuration files for correctness\",\n\t\tRun: CheckConfigCmd,\n\t})\n\n\tapp.Register(\"check-rules\", &cli.Command{\n\t\tDesc: \"validate rule files for correctness\",\n\t\tRun: CheckRulesCmd,\n\t})\n\n\tapp.Register(\"version\", &cli.Command{\n\t\tDesc: \"print the version of this binary\",\n\t\tRun: VersionCmd,\n\t})\n\n\tt := cli.BasicTerm(os.Stdout, os.Stderr)\n\tos.Exit(app.Run(t, os.Args[1:]...))\n}\n<commit_msg>cmd\/promtool: resolve rule files relative to config file<commit_after>\/\/ Copyright 2015 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"gopkg.in\/yaml.v2\"\n\n\t\"github.com\/prometheus\/prometheus\/config\"\n\t\"github.com\/prometheus\/prometheus\/promql\"\n\t\"github.com\/prometheus\/prometheus\/util\/cli\"\n\t\"github.com\/prometheus\/prometheus\/version\"\n)\n\n\/\/ CheckConfigCmd validates configuration files.\nfunc CheckConfigCmd(t cli.Term, args ...string) int {\n\tif len(args) == 0 {\n\t\tt.Infof(\"usage: promtool check-config <files>\")\n\t\treturn 2\n\t}\n\tfailed := false\n\n\tfor _, arg := range args {\n\t\truleFiles, err := checkConfig(t, arg)\n\t\tif err != nil {\n\t\t\tt.Errorf(\" FAILED: %s\", err)\n\t\t\tfailed = true\n\t\t} else {\n\t\t\tt.Infof(\" SUCCESS: %d rule files found\", len(ruleFiles))\n\t\t}\n\t\tt.Infof(\"\")\n\n\t\tfor _, rf := range ruleFiles {\n\t\t\tif n, err := checkRules(t, rf); err != nil {\n\t\t\t\tt.Errorf(\" FAILED: %s\", err)\n\t\t\t\tfailed = true\n\t\t\t} else {\n\t\t\t\tt.Infof(\" SUCCESS: %d rules found\", n)\n\t\t\t}\n\t\t\tt.Infof(\"\")\n\t\t}\n\t}\n\tif failed {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc checkConfig(t cli.Term, filename string) ([]string, error) {\n\tt.Infof(\"Checking %s\", filename)\n\n\tif stat, err := os.Stat(filename); err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot get file info\")\n\t} else if stat.IsDir() {\n\t\treturn nil, fmt.Errorf(\"is a directory\")\n\t}\n\n\tcontent, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar cfg config.Config\n\tif err := yaml.Unmarshal(content, &cfg); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ruleFiles []string\n\tfor _, rf := range cfg.RuleFiles {\n\t\tif !filepath.IsAbs(rf) {\n\t\t\trf = filepath.Join(filepath.Dir(filename), rf)\n\t\t}\n\n\t\trfs, err := filepath.Glob(rf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ If an explicit file was given, error if it doesn't exist.\n\t\tif !strings.Contains(rf, \"*\") && len(rfs) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"%q does not point to an existing file\", rf)\n\t\t}\n\t\truleFiles = append(ruleFiles, rfs...)\n\t}\n\n\treturn ruleFiles, nil\n}\n\n\/\/ CheckRulesCmd validates rule files.\nfunc CheckRulesCmd(t cli.Term, args ...string) int {\n\tif len(args) == 0 {\n\t\tt.Infof(\"usage: promtool check-rules <files>\")\n\t\treturn 2\n\t}\n\tfailed := false\n\n\tfor _, arg := range args {\n\t\tif n, err := checkRules(t, arg); err != nil {\n\t\t\tt.Errorf(\" FAILED: %s\", err)\n\t\t\tfailed = true\n\t\t} else {\n\t\t\tt.Infof(\" SUCCESS: %d rules found\", n)\n\t\t}\n\t\tt.Infof(\"\")\n\t}\n\tif failed {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc checkRules(t cli.Term, filename string) (int, error) {\n\tt.Infof(\"Checking %s\", filename)\n\n\tif stat, err := os.Stat(filename); err != nil {\n\t\treturn 0, fmt.Errorf(\"cannot get file info\")\n\t} else if stat.IsDir() {\n\t\treturn 0, fmt.Errorf(\"is a directory\")\n\t}\n\n\tcontent, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\trules, err := promql.ParseStmts(string(content))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn len(rules), nil\n}\n\nvar versionInfoTmpl = `\nprometheus, version {{.version}} (branch: {{.branch}}, revision: {{.revision}})\n build user: {{.buildUser}}\n build date: {{.buildDate}}\n go version: {{.goVersion}}\n`\n\n\/\/ VersionCmd prints the binaries version information.\nfunc VersionCmd(t cli.Term, _ ...string) int {\n\ttmpl := template.Must(template.New(\"version\").Parse(versionInfoTmpl))\n\n\tvar buf bytes.Buffer\n\tif err := tmpl.ExecuteTemplate(&buf, \"version\", version.Map); err != nil {\n\t\tpanic(err)\n\t}\n\tt.Out(strings.TrimSpace(buf.String()))\n\treturn 0\n}\n\nfunc main() {\n\tapp := cli.NewApp(\"promtool\")\n\n\tapp.Register(\"check-config\", &cli.Command{\n\t\tDesc: \"validate configuration files for correctness\",\n\t\tRun: CheckConfigCmd,\n\t})\n\n\tapp.Register(\"check-rules\", &cli.Command{\n\t\tDesc: \"validate rule files for correctness\",\n\t\tRun: CheckRulesCmd,\n\t})\n\n\tapp.Register(\"version\", &cli.Command{\n\t\tDesc: \"print the version of this binary\",\n\t\tRun: VersionCmd,\n\t})\n\n\tt := cli.BasicTerm(os.Stdout, os.Stderr)\n\tos.Exit(app.Run(t, os.Args[1:]...))\n}\n<|endoftext|>"} {"text":"<commit_before>package forms\n\nimport (\n\t\"github.com\/kirves\/go-form-it\/common\"\n\t\"github.com\/kirves\/go-form-it\/fields\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst (\n\tstyle = formcommon.BOOTSTRAP\n)\n\nvar (\n\ttxt, psw, btn fields.FieldInterface\n)\n\nfunc TestFieldRender(t *testing.T) {\n\tfield := fields.TextField(\"test\")\n\tfield.AddClass(\"test\").AddClass(\"class\").SetId(\"testId\").SetParam(\"param1\", \"val1\").AddCss(\"css1\", \"val1\").SetStyle(formcommon.BASE).Disabled()\n\tfield.AddLabelClass(\"LABEL\")\n\tfield.SetLabel(\"This is a label\")\n\tt.Log(\"Rendered field:\", field.Render())\n\ttxt = field\n}\n\nfunc TestPasswordRender(t *testing.T) {\n\tfield := fields.PasswordField(\"test\")\n\tfield.AddClass(\"test\")\n\tfield.AddClass(\"class\")\n\tfield.SetId(\"testId\")\n\tfield.SetStyle(style)\n\tt.Log(\"Rendered field:\", field.Render())\n\tpsw = field\n}\n\nfunc TestButtonRender(t *testing.T) {\n\tfield := fields.SubmitButton(\"btn\", \"Click me!\")\n\tfield.SetStyle(style)\n\tt.Log(\"Rendered button:\", field.Render())\n\tbtn = field\n}\n\nfunc TestRadioButtonRender(t *testing.T) {\n\tfield := fields.RadioField(\"radio\", []fields.InputChoice{\n\t\tfields.InputChoice{Id: \"choice1\", Val: \"value1\"},\n\t\tfields.InputChoice{Id: \"choice2\", Val: \"value2\"},\n\t})\n\tfield.SetStyle(style)\n\tt.Log(\"Rendered radio:\", field.Render())\n}\n\nfunc TestSelectRender(t *testing.T) {\n\tfield := fields.SelectField(\"select\", map[string][]fields.InputChoice{\n\t\t\"\": []fields.InputChoice{\n\t\t\tfields.InputChoice{\"choice1\", \"value1\"},\n\t\t\tfields.InputChoice{\"choice2\", \"value2\"},\n\t\t},\n\t})\n\tfield.SetStyle(style)\n\tt.Log(\"Rendered select:\", field.Render())\n}\n\nfunc TestHiddenRender(t *testing.T) {\n\tfield := fields.HiddenField(\"hidden\")\n\tfield.SetStyle(style)\n\tt.Log(\"Rendered hidden:\", field.Render())\n}\n\nfunc TestNumberRender(t *testing.T) {\n\tfield := fields.NumberField(\"number\")\n\tfield.SetStyle(style)\n\tt.Log(\"Rendered number:\", field.Render())\n}\n\nfunc TestFormRender(t *testing.T) {\n\tform := BootstrapForm(POST, \"\")\n\tform.Elements(&FieldSetType{}, txt, psw, btn)\n\t\/\/ form.AddField(psw)\n\t\/\/ form.AddField(btn)\n\tt.Log(\"Rendered form:\", form.Render())\n}\n\nfunc TestFormFromSimpleModel(t *testing.T) {\n\ttype User struct {\n\t\tUsername string\n\t\tPassword1 string `form_widget:\"password\" form_label:\"Password 1\"`\n\t\tPassword2 string `form_widget:\"password\" form_label:\"Password 2\"`\n\t\tSkipThis int `form_options:\"skip\"`\n\t}\n\n\tu := User{}\n\n\tform := BaseFormFromModel(u, POST, \"\/action.html\")\n\tt.Log(\"Rendered form:\", form.Render())\n}\n\nfunc TestFormFromModel(t *testing.T) {\n\ttype Model struct {\n\t\tUser string `form_label:\"User label test\"`\n\t\tpassword string `form_widget:\"password\"`\n\t\tId int `form_min:\"0\" form_max:\"5\"`\n\t\tTs time.Time `form_min:\"2013-04-22T15:00\"`\n\t\tRadioTest string `form_widget:\"select\" form_choices:\"|A|Option A|G1|B|Option B\" form_value:\"A\"`\n\t\tBoolTest bool \/\/`form_options:\"checked\"`\n\t}\n\n\tform := BaseFormFromModel(Model{\"asd\", \"lol\", 20, time.Now(), \"B\", false}, POST, \"\")\n\tt.Log(\"Rendered form:\", form.Render())\n}\n\nfunc TestBSFormFromModel(t *testing.T) {\n\ttype Model struct {\n\t\tUser string `form_label:\"User label test\"`\n\t\tpassword string `form_widget:\"password\"`\n\t\tId int `form_min:\"0\" form_max:\"5\"`\n\t\tTs time.Time `form_min:\"2013-04-22T15:00\"`\n\t\tRadioTest string `form_widget:\"select\" form_choices:\"|A|Option A|G1|B|Option B\" form_value:\"A\"`\n\t\tBoolTest bool \/\/`form_options:\"checked\"`\n\t}\n\n\tform := BootstrapFormFromModel(Model{\"asd\", \"lol\", 20, time.Now(), \"B\", false}, POST, \"\")\n\tt.Log(\"Rendered form:\", form.Render())\n}\n\nfunc TestInlineCreation(t *testing.T) {\n\tform := BaseForm(POST, \"\/action.html\").Elements(\n\t\tfields.TextField(\"text_field\").SetLabel(\"Username\"),\n\t\tFieldSet(\"psw_fieldset\",\n\t\t\tfields.PasswordField(\"psw1\").AddClass(\"password_class\").SetLabel(\"Password 1\"),\n\t\t\tfields.PasswordField(\"psw2\").AddClass(\"password_class\").SetLabel(\"Password 2\"),\n\t\t),\n\t\tfields.SubmitButton(\"btn1\", \"Submit\"),\n\t)\n\tt.Log(\"Rendered form:\", form.Render())\n}\n<commit_msg>More tests<commit_after>package forms\n\nimport (\n\t\"github.com\/kirves\/go-form-it\/common\"\n\t\"github.com\/kirves\/go-form-it\/fields\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst (\n\tstyle = formcommon.BOOTSTRAP\n)\n\nvar (\n\ttxt, psw, btn fields.FieldInterface\n)\n\nfunc TestFieldRender(t *testing.T) {\n\tfield := fields.TextField(\"test\")\n\tfield.AddClass(\"test\").AddClass(\"class\").SetId(\"testId\").SetParam(\"param1\", \"val1\").AddCss(\"css1\", \"val1\").SetStyle(style).Disabled()\n\tfield.AddLabelClass(\"LABEL\")\n\tfield.SetLabel(\"This is a label\")\n\tfield.AddError(\"ERROR\")\n\tt.Log(\"Rendered field:\", field.Render())\n\ttxt = field\n}\n\nfunc TestPasswordRender(t *testing.T) {\n\tfield := fields.PasswordField(\"test\")\n\tfield.AddClass(\"test\")\n\tfield.AddClass(\"class\")\n\tfield.SetId(\"testId\")\n\tfield.SetStyle(style)\n\tt.Log(\"Rendered field:\", field.Render())\n\tpsw = field\n}\n\nfunc TestButtonRender(t *testing.T) {\n\tfield := fields.SubmitButton(\"btn\", \"Click me!\")\n\tfield.SetStyle(style)\n\tt.Log(\"Rendered button:\", field.Render())\n\tbtn = field\n}\n\nfunc TestRadioButtonRender(t *testing.T) {\n\tfield := fields.RadioField(\"radio\", []fields.InputChoice{\n\t\tfields.InputChoice{Id: \"choice1\", Val: \"value1\"},\n\t\tfields.InputChoice{Id: \"choice2\", Val: \"value2\"},\n\t})\n\tfield.SetStyle(style)\n\tt.Log(\"Rendered radio:\", field.Render())\n}\n\nfunc TestSelectRender(t *testing.T) {\n\tfield := fields.SelectField(\"select\", map[string][]fields.InputChoice{\n\t\t\"\": []fields.InputChoice{\n\t\t\tfields.InputChoice{\"choice1\", \"value1\"},\n\t\t\tfields.InputChoice{\"choice2\", \"value2\"},\n\t\t},\n\t})\n\tfield.SetStyle(style)\n\tt.Log(\"Rendered select:\", field.Render())\n}\n\nfunc TestHiddenRender(t *testing.T) {\n\tfield := fields.HiddenField(\"hidden\")\n\tfield.SetStyle(style)\n\tt.Log(\"Rendered hidden:\", field.Render())\n}\n\nfunc TestNumberRender(t *testing.T) {\n\tfield := fields.NumberField(\"number\")\n\tfield.SetStyle(style)\n\tt.Log(\"Rendered number:\", field.Render())\n}\n\nfunc TestFormRender(t *testing.T) {\n\tform := BootstrapForm(POST, \"\")\n\tform.Elements(&FieldSetType{}, txt, psw, btn)\n\t\/\/ form.AddField(psw)\n\t\/\/ form.AddField(btn)\n\tt.Log(\"Rendered form:\", form.Render())\n}\n\nfunc TestFormFromSimpleModel(t *testing.T) {\n\ttype User struct {\n\t\tUsername string\n\t\tPassword1 string `form_widget:\"password\" form_label:\"Password 1\"`\n\t\tPassword2 string `form_widget:\"password\" form_label:\"Password 2\"`\n\t\tSkipThis int `form_options:\"skip\"`\n\t}\n\n\tu := User{}\n\n\tform := BaseFormFromModel(u, POST, \"\/action.html\")\n\tt.Log(\"Rendered form:\", form.Render())\n}\n\nfunc TestFormFromModel(t *testing.T) {\n\ttype Model struct {\n\t\tUser string `form_label:\"User label test\"`\n\t\tpassword string `form_widget:\"password\"`\n\t\tId int `form_min:\"0\" form_max:\"5\"`\n\t\tTs time.Time `form_min:\"2013-04-22T15:00\"`\n\t\tRadioTest string `form_widget:\"select\" form_choices:\"|A|Option A|G1|B|Option B\" form_value:\"A\"`\n\t\tBoolTest bool \/\/`form_options:\"checked\"`\n\t}\n\n\tform := BaseFormFromModel(Model{\"asd\", \"lol\", 20, time.Now(), \"B\", false}, POST, \"\")\n\tt.Log(\"Rendered form:\", form.Render())\n}\n\nfunc TestBSFormFromModel(t *testing.T) {\n\ttype Model struct {\n\t\tUser string `form_label:\"User label test\"`\n\t\tpassword string `form_widget:\"password\"`\n\t\tId int `form_min:\"0\" form_max:\"5\"`\n\t\tTs time.Time `form_min:\"2013-04-22T15:00\"`\n\t\tRadioTest string `form_widget:\"select\" form_choices:\"|A|Option A|G1|B|Option B\" form_value:\"A\"`\n\t\tBoolTest bool \/\/`form_options:\"checked\"`\n\t}\n\n\tform := BootstrapFormFromModel(Model{\"asd\", \"lol\", 20, time.Now(), \"B\", false}, POST, \"\")\n\tt.Log(\"Rendered form:\", form.Render())\n}\n\nfunc TestInlineCreation(t *testing.T) {\n\tform := BaseForm(POST, \"\/action.html\").Elements(\n\t\tfields.TextField(\"text_field\").SetLabel(\"Username\"),\n\t\tFieldSet(\"psw_fieldset\",\n\t\t\tfields.PasswordField(\"psw1\").AddClass(\"password_class\").SetLabel(\"Password 1\"),\n\t\t\tfields.PasswordField(\"psw2\").AddClass(\"password_class\").SetLabel(\"Password 2\"),\n\t\t),\n\t\tfields.SubmitButton(\"btn1\", \"Submit\"),\n\t)\n\tt.Log(\"Rendered form:\", form.Render())\n}\n<|endoftext|>"} {"text":"<commit_before>package html\n\nconst style = `\n#topbar {\n\tpadding: 5px 10px;\n\tbackground: #E0EBF5;\n}\n\n#topbar a {\n\tcolor: #375EAB;\n\ttext-decoration: none;\n}\n\nh1, h2, h3, h4 {\n\tmargin: 0;\n\tpadding: 0;\n\tcolor: #375EAB;\n\tfont-weight: bold;\n}\n\ntable {\n\tborder: 1px solid #ccc;\n\tmargin: 20px 5px;\n\tborder-collapse: collapse;\n\twhite-space: nowrap;\n\ttext-overflow: ellipsis;\n\toverflow: hidden;\n}\n\ntable caption {\n\tfont-weight: bold;\n}\n\ntable td, table th {\n\tvertical-align: top;\n\tpadding: 2px 8px;\n\ttext-overflow: ellipsis;\n\toverflow: hidden;\n}\n\n.namespace {\n\tfont-weight: bold;\n\tfont-size: large;\n\tcolor: #375EAB;\n}\n\n.position_table {\n\tborder: 0px;\n\tmargin: 0px;\n\twidth: 100%;\n\tborder-collapse: collapse;\n}\n\n.position_table td, .position_table tr {\n\tvertical-align: center;\n\tpadding: 0px;\n}\n\n.position_table .search {\n\ttext-align: right;\n}\n\n.list_table td, .list_table th {\n\tborder-left: 1px solid #ccc;\n}\n\n.list_table th {\n\tbackground: #F4F4F4;\n}\n\n.list_table tr:nth-child(2n) {\n\tbackground: #F4F4F4;\n}\n\n.list_table tr:hover {\n\tbackground: #ffff99;\n}\n\n.list_table .namespace {\n\twidth: 100pt;\n\tmax-width: 100pt;\n}\n\n.list_table .title {\n\twidth: 350pt;\n\tmax-width: 350pt;\n}\n\n.list_table .commit_list {\n\twidth: 500pt;\n\tmax-width: 500pt;\n}\n\n.list_table .tag {\n\tfont-family: monospace;\n\tfont-size: 8pt;\n\twidth: 40pt;\n\tmax-width: 40pt;\n}\n\n.list_table .opts {\n\twidth: 40pt;\n\tmax-width: 40pt;\n}\n\n.list_table .status {\n\twidth: 250pt;\n\tmax-width: 250pt;\n}\n\n.list_table .patched {\n\twidth: 60pt;\n\tmax-width: 60pt;\n\ttext-align: center;\n}\n\n.list_table .kernel {\n\twidth: 80pt;\n\tmax-width: 80pt;\n}\n\n.list_table .maintainers {\n\twidth: 150pt;\n\tmax-width: 150pt;\n}\n\n.list_table .result {\n\twidth: 60pt;\n\tmax-width: 60pt;\n}\n\n.list_table .stat {\n\twidth: 55pt;\n\tmax-width: 55pt;\n\tfont-family: monospace;\n\ttext-align: right;\n}\n\n.list_table .date {\n\twidth: 60pt;\n\tmax-width: 60pt;\n\tfont-family: monospace;\n\ttext-align: right;\n}\n\n.list_table .stat_name {\n\twidth: 150pt;\n\tmax-width: 150pt;\n\tfont-family: monospace;\n}\n\n.list_table .stat_value {\n\twidth: 120pt;\n\tmax-width: 120pt;\n\tfont-family: monospace;\n}\n\n.bad {\n\tcolor: #f00;\n\tfont-weight: bold;\n}\n\n.inactive {\n\tcolor: #888;\n}\n\n.plain {\n\ttext-decoration: none;\n}\n\ntextarea {\n\twidth:100%;\n\tfont-family: monospace;\n}\n\n.mono {\n\tfont-family: monospace;\n}\n`\nconst js = `\n\/\/ Copyright 2018 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\nfunction sortTable(item, colName, conv, desc = false) {\n\ttable = item.parentNode.parentNode.parentNode.parentNode;\n\trows = table.rows;\n\tcol = findColumnByName(rows[0].getElementsByTagName(\"th\"), colName);\n\tvalues = [];\n\tfor (i = 1; i < rows.length; i++)\n\t\tvalues.push([conv(rows[i].getElementsByTagName(\"td\")[col].textContent), rows[i]]);\n\tif (desc)\n\t\tdesc = !isSorted(values.slice().reverse())\n\telse\n\t\tdesc = isSorted(values);\n\tvalues.sort(function(a, b) {\n\t\tif (a[0] == b[0]) return 0;\n\t\tif (desc && a[0] > b[0] || !desc && a[0] < b[0]) return -1;\n\t\treturn 1;\n\t});\n\tfor (i = 0; i < values.length; i++)\n\t\ttable.tBodies[0].appendChild(values[i][1]);\n\treturn false;\n}\n\nfunction findColumnByName(headers, colName) {\n\tfor (i = 0; i < headers.length; i++) {\n\t\tif (headers[i].textContent == colName)\n\t\t\treturn i;\n\t}\n\treturn 0;\n}\n\nfunction isSorted(values) {\n\tfor (i = 0; i < values.length - 1; i++) {\n\t\tif (values[i][0] > values[i + 1][0])\n\t\t\treturn false;\n\t}\n\treturn true;\n}\n\nfunction textSort(v) { return v.toLowerCase(); }\nfunction numSort(v) { return -parseInt(v); }\nfunction floatSort(v) { return -parseFloat(v); }\nfunction reproSort(v) { return v == \"C\" ? 0 : v == \"syz\" ? 1 : 2; }\nfunction patchedSort(v) { return v == \"\" ? -1 : parseInt(v); }\n\nfunction timeSort(v) {\n\tif (v == \"now\")\n\t\treturn 0;\n\tm = v.indexOf('m');\n\th = v.indexOf('h');\n\td = v.indexOf('d');\n\tif (m > 0 && h < 0)\n\t\treturn parseInt(v);\n\tif (h > 0 && m > 0)\n\t\treturn parseInt(v) * 60 + parseInt(v.substring(h + 1));\n\tif (d > 0 && h > 0)\n\t\treturn parseInt(v) * 60 * 24 + parseInt(v.substring(d + 1)) * 60;\n\tif (d > 0)\n\t\treturn parseInt(v) * 60 * 24;\n\treturn 1000000000;\n}\n`\n<commit_msg>pkg\/html: regenerate<commit_after>package html\n\nconst style = `\n#topbar {\n\tpadding: 5px 10px;\n\tbackground: #E0EBF5;\n}\n\n#topbar a {\n\tcolor: #375EAB;\n\ttext-decoration: none;\n}\n\nh1, h2, h3, h4 {\n\tmargin: 0;\n\tpadding: 0;\n\tcolor: #375EAB;\n\tfont-weight: bold;\n}\n\ntable {\n\tborder: 1px solid #ccc;\n\tmargin: 20px 5px;\n\tborder-collapse: collapse;\n\twhite-space: nowrap;\n\ttext-overflow: ellipsis;\n\toverflow: hidden;\n}\n\ntable caption {\n\tfont-weight: bold;\n}\n\ntable td, table th {\n\tvertical-align: top;\n\tpadding: 2px 8px;\n\ttext-overflow: ellipsis;\n\toverflow: hidden;\n}\n\n.namespace {\n\tfont-weight: bold;\n\tfont-size: large;\n\tcolor: #375EAB;\n}\n\n.position_table {\n\tborder: 0px;\n\tmargin: 0px;\n\twidth: 100%;\n\tborder-collapse: collapse;\n}\n\n.position_table td, .position_table tr {\n\tvertical-align: center;\n\tpadding: 0px;\n}\n\n.position_table .search {\n\ttext-align: right;\n}\n\n.list_table td, .list_table th {\n\tborder-left: 1px solid #ccc;\n}\n\n.list_table th {\n\tbackground: #F4F4F4;\n}\n\n.list_table tr:nth-child(2n) {\n\tbackground: #F4F4F4;\n}\n\n.list_table tr:hover {\n\tbackground: #ffff99;\n}\n\n.list_table .namespace {\n\twidth: 100pt;\n\tmax-width: 100pt;\n}\n\n.list_table .title {\n\twidth: 350pt;\n\tmax-width: 350pt;\n}\n\n.list_table .commit_list {\n\twidth: 500pt;\n\tmax-width: 500pt;\n}\n\n.list_table .tag {\n\tfont-family: monospace;\n\tfont-size: 8pt;\n\twidth: 40pt;\n\tmax-width: 40pt;\n}\n\n.list_table .opts {\n\twidth: 40pt;\n\tmax-width: 40pt;\n}\n\n.list_table .status {\n\twidth: 250pt;\n\tmax-width: 250pt;\n}\n\n.list_table .patched {\n\twidth: 60pt;\n\tmax-width: 60pt;\n\ttext-align: center;\n}\n\n.list_table .kernel {\n\twidth: 80pt;\n\tmax-width: 80pt;\n}\n\n.list_table .maintainers {\n\twidth: 150pt;\n\tmax-width: 150pt;\n}\n\n.list_table .result {\n\twidth: 60pt;\n\tmax-width: 60pt;\n}\n\n.list_table .stat {\n\twidth: 55pt;\n\tmax-width: 55pt;\n\tfont-family: monospace;\n\ttext-align: right;\n}\n\n.list_table .date {\n\twidth: 60pt;\n\tmax-width: 60pt;\n\tfont-family: monospace;\n\ttext-align: right;\n}\n\n.list_table .stat_name {\n\twidth: 150pt;\n\tmax-width: 150pt;\n\tfont-family: monospace;\n}\n\n.list_table .stat_value {\n\twidth: 120pt;\n\tmax-width: 120pt;\n\tfont-family: monospace;\n}\n\n.bad {\n\tcolor: #f00;\n\tfont-weight: bold;\n}\n\n.inactive {\n\tcolor: #888;\n}\n\n.plain {\n\ttext-decoration: none;\n}\n\ntextarea {\n\twidth:100%;\n\tfont-family: monospace;\n}\n\n.mono {\n\tfont-family: monospace;\n}\n\n.info_link {\n\tcolor: #25a7db;\n\ttext-decoration: none;\n}\n`\nconst js = `\n\/\/ Copyright 2018 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\nfunction sortTable(item, colName, conv, desc = false) {\n\ttable = item.parentNode.parentNode.parentNode.parentNode;\n\trows = table.rows;\n\tcol = findColumnByName(rows[0].getElementsByTagName(\"th\"), colName);\n\tvalues = [];\n\tfor (i = 1; i < rows.length; i++)\n\t\tvalues.push([conv(rows[i].getElementsByTagName(\"td\")[col].textContent), rows[i]]);\n\tif (desc)\n\t\tdesc = !isSorted(values.slice().reverse())\n\telse\n\t\tdesc = isSorted(values);\n\tvalues.sort(function(a, b) {\n\t\tif (a[0] == b[0]) return 0;\n\t\tif (desc && a[0] > b[0] || !desc && a[0] < b[0]) return -1;\n\t\treturn 1;\n\t});\n\tfor (i = 0; i < values.length; i++)\n\t\ttable.tBodies[0].appendChild(values[i][1]);\n\treturn false;\n}\n\nfunction findColumnByName(headers, colName) {\n\tfor (i = 0; i < headers.length; i++) {\n\t\tif (headers[i].textContent == colName)\n\t\t\treturn i;\n\t}\n\treturn 0;\n}\n\nfunction isSorted(values) {\n\tfor (i = 0; i < values.length - 1; i++) {\n\t\tif (values[i][0] > values[i + 1][0])\n\t\t\treturn false;\n\t}\n\treturn true;\n}\n\nfunction textSort(v) { return v.toLowerCase(); }\nfunction numSort(v) { return -parseInt(v); }\nfunction floatSort(v) { return -parseFloat(v); }\nfunction reproSort(v) { return v == \"C\" ? 0 : v == \"syz\" ? 1 : 2; }\nfunction patchedSort(v) { return v == \"\" ? -1 : parseInt(v); }\n\nfunction timeSort(v) {\n\tif (v == \"now\")\n\t\treturn 0;\n\tm = v.indexOf('m');\n\th = v.indexOf('h');\n\td = v.indexOf('d');\n\tif (m > 0 && h < 0)\n\t\treturn parseInt(v);\n\tif (h > 0 && m > 0)\n\t\treturn parseInt(v) * 60 + parseInt(v.substring(h + 1));\n\tif (d > 0 && h > 0)\n\t\treturn parseInt(v) * 60 * 24 + parseInt(v.substring(d + 1)) * 60;\n\tif (d > 0)\n\t\treturn parseInt(v) * 60 * 24;\n\treturn 1000000000;\n}\n`\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/stts-se\/rbg2p\"\n)\n\ntype g2pMutex struct {\n\tg2ps map[string]rbg2p.RuleSet\n\tsylls map[string]rbg2p.Syllabifier\n\tmutex *sync.RWMutex\n}\n\nvar g2pM = g2pMutex{\n\tg2ps: make(map[string]rbg2p.RuleSet),\n\tsylls: make(map[string]rbg2p.Syllabifier),\n\tmutex: &sync.RWMutex{},\n}\n\nfunc g2pMain_Handler(w http.ResponseWriter, r *http.Request) {\n\t_, cmdFileName, _, _ := runtime.Caller(0)\n\tfString := path.Join(path.Dir(cmdFileName), \"static\/g2p_demo.html\")\n\thttp.ServeFile(w, r, fString)\n}\n\nvar wSplitRe = regexp.MustCompile(\" *, *\")\n\n\/\/ Word internal struct for json\ntype Word struct {\n\tOrth string `json:\"orth\"`\n\tTranses []string `json:\"transes\"`\n}\n\nfunc syllabify(lang string, trans string) (string, int, error) {\n\tg2pM.mutex.RLock()\n\tdefer g2pM.mutex.RUnlock()\n\tsyller, ok := g2pM.sylls[lang]\n\tif !ok {\n\t\truleSet, ok := g2pM.g2ps[lang]\n\t\tif !ok {\n\t\t\tmsg := \"unknown 'lang': \" + lang\n\t\t\tlangs, err := listSyllLanguages()\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", http.StatusInternalServerError, err\n\t\t\t}\n\t\t\tmsg = fmt.Sprintf(\"%s. Known 'lang' values: %s\", msg, strings.Join(langs, \", \"))\n\t\t\treturn \"\", http.StatusBadRequest, fmt.Errorf(msg)\n\t\t}\n\n\t\tif !ruleSet.Syllabifier.IsDefined() {\n\t\t\tmsg := fmt.Sprintf(\"no syllabifier defined for language %s\", lang)\n\t\t\treturn \"\", http.StatusInternalServerError, fmt.Errorf(msg)\n\t\t}\n\t\tsyller = ruleSet.Syllabifier\n\t}\n\tphns, err := syller.PhonemeSet.SplitTranscription(trans)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"couldn't split input transcription \/%s\/ : %s\", trans, err)\n\t\treturn \"\", http.StatusInternalServerError, fmt.Errorf(msg)\n\t}\n\tsylled := syller.SyllabifyFromPhonemes(phns)\n\treturn sylled, http.StatusOK, nil\n}\n\nfunc syllabify_Handler(w http.ResponseWriter, r *http.Request) {\n\n\tvars := mux.Vars(r)\n\tlang := vars[\"lang\"]\n\tif \"\" == lang {\n\t\tmsg := \"no value for the expected 'lang' parameter\"\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttrans := vars[\"trans\"]\n\tif \"\" == trans {\n\t\tmsg := \"no value for the expected 'trans' parameter\"\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\tres, status, err := syllabify(lang, trans)\n\tif err != nil {\n\t\tlog.Printf(\"%s\\n\", err)\n\t\thttp.Error(w, fmt.Sprintf(\"%s\", err), status)\n\t\treturn\n\t}\n\n\t\/\/w.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\/\/ j, err := json.Marshal(res)\n\t\/\/ if err != nil {\n\t\/\/ \tmsg := fmt.Sprintf(\"failed json marshalling : %v\", err)\n\t\/\/ \tlog.Println(msg)\n\t\/\/ \thttp.Error(w, msg, http.StatusInternalServerError)\n\t\/\/ \treturn\n\t\/\/ }\n\t\/\/ fmt.Fprintf(w, string(j))\n\tfmt.Fprintf(w, string(res))\n}\n\nfunc transcribe(lang string, word string) (Word, int, error) {\n\tg2pM.mutex.RLock()\n\tdefer g2pM.mutex.RUnlock()\n\truleSet, ok := g2pM.g2ps[lang]\n\tif !ok {\n\t\tmsg := \"unknown 'lang': \" + lang\n\t\tlangs := listG2PLanguages()\n\t\tmsg = fmt.Sprintf(\"%s. Known 'lang' values: %s\", msg, strings.Join(langs, \", \"))\n\t\treturn Word{}, http.StatusBadRequest, fmt.Errorf(msg)\n\t}\n\n\ttranses, err := ruleSet.Apply(word)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"couldn't transcribe word : %v\", err)\n\t\treturn Word{}, http.StatusInternalServerError, fmt.Errorf(msg)\n\t}\n\ttRes := []string{}\n\tfor _, trans := range transes {\n\t\ttRes = append(tRes, trans)\n\t}\n\tres := Word{word, tRes}\n\treturn res, http.StatusOK, nil\n}\n\nfunc transcribe_Handler(w http.ResponseWriter, r *http.Request) {\n\n\tformat := r.FormValue(\"format\")\n\tif \"xml\" == format {\n\t\ttranscribe_AsXml_Handler(w, r)\n\t\treturn\n\t}\n\n\tvars := mux.Vars(r)\n\tlang := vars[\"lang\"]\n\tif \"\" == lang {\n\t\tmsg := \"no value for the expected 'lang' parameter\"\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tword := vars[\"word\"]\n\tif \"\" == word {\n\t\tmsg := \"no value for the expected 'word' parameter\"\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\tword = strings.ToLower(word)\n\n\tres, status, err := transcribe(lang, word)\n\tif err != nil {\n\t\tlog.Printf(\"%s\\n\", err)\n\t\thttp.Error(w, fmt.Sprintf(\"%s\", err), status)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\tj, err := json.Marshal(res)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"failed json marshalling : %v\", err)\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfmt.Fprintf(w, string(j))\n}\n\n\/\/ XMLWords container go generate xml from http request, for legacy calls from ltool\/yalt\ntype XMLWords struct {\n\tXMLName xml.Name `xml:\"words\"`\n\tWords []XMLWord\n}\n\n\/\/ XMLWord container go generate xml from http request, for legacy calls from ltool\/yalt\ntype XMLWord struct {\n\tXMLName xml.Name `xml:\"word\"`\n\tOrth string `xml:\"orth,attr\"`\n\tTrans string `xml:\"trans\"`\n}\n\n\/\/ for legacy calls from ltool\/yalt\nfunc transcribe_AsXml_Handler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tlang := vars[\"lang\"]\n\tif \"\" == lang {\n\t\tmsg := \"no value for the expected 'lang' parameter\"\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\tword := vars[\"word\"]\n\tif \"\" == word {\n\t\tmsg := \"no value for the expected 'word' parameter\"\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\tword = strings.ToLower(word)\n\tres, status, err := transcribe(lang, word)\n\tif err != nil {\n\t\tlog.Printf(\"%s\\n\", err)\n\t\thttp.Error(w, fmt.Sprintf(\"%s\", err), status)\n\t\treturn\n\t}\n\t\/\/<words>\n\t\/\/<word orth='apa' word_lang='mk' trans_lang='mk' >\" a p a<\/word>\n\t\/\/<\/words>\n\n\t\/\/ words := XMLWords{\n\t\/\/ \tWords: []XMLWord{\n\t\/\/ \t\tXMLWord{Orth: word, Trans: res.Transes[0]},\n\t\/\/ \t},\n\t\/\/ }\n\twords := XMLWords{}\n\tfor _, t := range res.Transes {\n\t\twords.Words = append(words.Words, XMLWord{Orth: word, Trans: t})\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/xml; charset=utf-8\")\n\txml, err := xml.Marshal(words)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"failed xml marshalling : %v\", err)\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfmt.Fprintf(w, string(xml))\n\t\/\/fmt.Fprintf(w, string(res.Transes[0]))\n}\n\nfunc listG2PLanguages() []string {\n\tvar res []string\n\tfor name := range g2pM.g2ps {\n\t\tres = append(res, name)\n\t}\n\treturn res\n}\n\nfunc listSyllLanguages() ([]string, error) {\n\tvar res []string\n\tfor name, g2p := range g2pM.g2ps {\n\t\tif g2p.Syllabifier.IsDefined() {\n\t\t\tres = append(res, name)\n\t\t}\n\t}\n\tfor name := range g2pM.sylls {\n\t\tif rbg2p.Contains(res, name) {\n\t\t\treturn res, fmt.Errorf(\"name conflict for '%s' (g2p and syllabifier cannot share the same name)\", name)\n\t\t}\n\t\tres = append(res, name)\n\t}\n\treturn res, nil\n}\n\nfunc g2pList_Handler(w http.ResponseWriter, r *http.Request) {\n\tg2pM.mutex.RLock()\n\tres := listG2PLanguages()\n\tg2pM.mutex.RUnlock()\n\n\tsort.Strings(res)\n\tj, err := json.Marshal(res)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"failed json marshalling : %v\", err)\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfmt.Fprintf(w, string(j))\n}\n\nfunc syllList_Handler(w http.ResponseWriter, r *http.Request) {\n\tg2pM.mutex.RLock()\n\tres, err := listSyllLanguages()\n\tg2pM.mutex.RUnlock()\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"%s\", err)\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsort.Strings(res)\n\tj, err := json.Marshal(res)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"failed json marshalling : %v\", err)\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfmt.Fprintf(w, string(j))\n}\n\n\/\/ langFromFilePath returns the base file name stripped from any '.g2p' extension\nfunc langFromFilePath(p string) string {\n\tb := filepath.Base(p)\n\tif strings.HasSuffix(b, \".g2p\") {\n\t\tb = b[0 : len(b)-4]\n\t} else if strings.HasSuffix(b, \".syll\") {\n\t\tb = b[0 : len(b)-5]\n\t}\n\treturn b\n}\n\nfunc main() {\n\n\tif len(os.Args) != 2 {\n\t\tfmt.Fprintf(os.Stderr, \"server <G2P FILES DIR>\\n\")\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ g2p file dir. Each file in dir with .g2p extension\n\t\/\/ is treated as a g2p file\n\tvar dir = os.Args[1]\n\n\tfiles, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ populate map of g2p rules from files.\n\t\/\/ The base file name minus '.g2p' is the language name.\n\tvar fn string\n\tfor _, f := range files {\n\t\tfn = filepath.Join(dir, f.Name())\n\t\tif strings.HasSuffix(fn, \".g2p\") {\n\n\t\t\truleSet, err := rbg2p.LoadFile(fn)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t\t\tfmt.Fprintf(os.Stderr, \"server: skipping file: '%s'\\n\", fn)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlang := langFromFilePath(fn)\n\t\t\tg2pM.mutex.Lock()\n\t\t\tg2pM.g2ps[lang] = ruleSet\n\t\t\tg2pM.mutex.Unlock()\n\t\t\tfmt.Fprintf(os.Stderr, \"server: loaded file '%s'\\n\", fn)\n\n\t\t} else if strings.HasSuffix(fn, \".syll\") {\n\n\t\t\tsyll, err := rbg2p.LoadSyllFile(fn)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t\t\tfmt.Fprintf(os.Stderr, \"server: skipping file: '%s'\\n\", fn)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlang := langFromFilePath(fn)\n\t\t\tg2pM.mutex.Lock()\n\t\t\tg2pM.sylls[lang] = syll\n\t\t\tg2pM.mutex.Unlock()\n\t\t\tfmt.Fprintf(os.Stderr, \"server: loaded file '%s'\\n\", fn)\n\n\t\t} else {\n\t\t\tfmt.Fprintf(os.Stderr, \"server: skipping file: '%s'\\n\", fn)\n\t\t\tcontinue\n\t\t}\n\n\t}\n\n\tif _, err := listSyllLanguages(); err != nil {\n\t\tlog.Fatalf(\"server init error : %s\", err)\n\t}\n\n\tr := mux.NewRouter().StrictSlash(true)\n\n\tr.HandleFunc(\"\/rbg2p\", g2pMain_Handler) \/\/.Methods(\"get\")\n\tr.HandleFunc(\"\/\", g2pMain_Handler) \/\/.Methods(\"get\")\n\n\ts := r.PathPrefix(\"\/rbg2p\").Subrouter()\n\n\ts.HandleFunc(\"\/g2p\/list\", g2pList_Handler)\n\ts.HandleFunc(\"\/syll\/list\", syllList_Handler)\n\n\ts.HandleFunc(\"\/transcribe\/{lang}\/{word}\", transcribe_Handler) \/\/.Methods(\"get\", \"post\")\n\ts.HandleFunc(\"\/syllabify\/{lang}\/{trans}\", syllabify_Handler) \/\/.Methods(\"get\", \"post\")\n\n\t\/\/ for legacy calls from ltool\/yalt\n\ts.HandleFunc(\"\/xmltranscribe\/{lang}\/{word}\", transcribe_AsXml_Handler)\n\n\tfmt.Println(\"Serving urls:\")\n\tr.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {\n\t\tt, err := route.GetPathTemplate()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(t)\n\t\treturn nil\n\t})\n\n\tport := \":6771\"\n\tlog.Printf(\"starting g2p server at port %s\\n\", port)\n\terr = http.ListenAndServe(port, r)\n\tif err != nil {\n\n\t\tlog.Fatalf(\"no fun: %v\\n\", err)\n\t}\n\n}\n<commit_msg>\/ping implemented<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/stts-se\/rbg2p\"\n)\n\ntype g2pMutex struct {\n\tg2ps map[string]rbg2p.RuleSet\n\tsylls map[string]rbg2p.Syllabifier\n\tmutex *sync.RWMutex\n}\n\nvar g2pM = g2pMutex{\n\tg2ps: make(map[string]rbg2p.RuleSet),\n\tsylls: make(map[string]rbg2p.Syllabifier),\n\tmutex: &sync.RWMutex{},\n}\n\nfunc g2pMain_Handler(w http.ResponseWriter, r *http.Request) {\n\t_, cmdFileName, _, _ := runtime.Caller(0)\n\tfString := path.Join(path.Dir(cmdFileName), \"static\/g2p_demo.html\")\n\thttp.ServeFile(w, r, fString)\n}\n\nfunc ping_Handler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"rbg2p\")\n}\n\nvar wSplitRe = regexp.MustCompile(\" *, *\")\n\n\/\/ Word internal struct for json\ntype Word struct {\n\tOrth string `json:\"orth\"`\n\tTranses []string `json:\"transes\"`\n}\n\nfunc syllabify(lang string, trans string) (string, int, error) {\n\tg2pM.mutex.RLock()\n\tdefer g2pM.mutex.RUnlock()\n\tsyller, ok := g2pM.sylls[lang]\n\tif !ok {\n\t\truleSet, ok := g2pM.g2ps[lang]\n\t\tif !ok {\n\t\t\tmsg := \"unknown 'lang': \" + lang\n\t\t\tlangs, err := listSyllLanguages()\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", http.StatusInternalServerError, err\n\t\t\t}\n\t\t\tmsg = fmt.Sprintf(\"%s. Known 'lang' values: %s\", msg, strings.Join(langs, \", \"))\n\t\t\treturn \"\", http.StatusBadRequest, fmt.Errorf(msg)\n\t\t}\n\n\t\tif !ruleSet.Syllabifier.IsDefined() {\n\t\t\tmsg := fmt.Sprintf(\"no syllabifier defined for language %s\", lang)\n\t\t\treturn \"\", http.StatusInternalServerError, fmt.Errorf(msg)\n\t\t}\n\t\tsyller = ruleSet.Syllabifier\n\t}\n\tphns, err := syller.PhonemeSet.SplitTranscription(trans)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"couldn't split input transcription \/%s\/ : %s\", trans, err)\n\t\treturn \"\", http.StatusInternalServerError, fmt.Errorf(msg)\n\t}\n\tsylled := syller.SyllabifyFromPhonemes(phns)\n\treturn sylled, http.StatusOK, nil\n}\n\nfunc syllabify_Handler(w http.ResponseWriter, r *http.Request) {\n\n\tvars := mux.Vars(r)\n\tlang := vars[\"lang\"]\n\tif \"\" == lang {\n\t\tmsg := \"no value for the expected 'lang' parameter\"\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttrans := vars[\"trans\"]\n\tif \"\" == trans {\n\t\tmsg := \"no value for the expected 'trans' parameter\"\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\tres, status, err := syllabify(lang, trans)\n\tif err != nil {\n\t\tlog.Printf(\"%s\\n\", err)\n\t\thttp.Error(w, fmt.Sprintf(\"%s\", err), status)\n\t\treturn\n\t}\n\n\t\/\/w.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\/\/ j, err := json.Marshal(res)\n\t\/\/ if err != nil {\n\t\/\/ \tmsg := fmt.Sprintf(\"failed json marshalling : %v\", err)\n\t\/\/ \tlog.Println(msg)\n\t\/\/ \thttp.Error(w, msg, http.StatusInternalServerError)\n\t\/\/ \treturn\n\t\/\/ }\n\t\/\/ fmt.Fprintf(w, string(j))\n\tfmt.Fprintf(w, string(res))\n}\n\nfunc transcribe(lang string, word string) (Word, int, error) {\n\tg2pM.mutex.RLock()\n\tdefer g2pM.mutex.RUnlock()\n\truleSet, ok := g2pM.g2ps[lang]\n\tif !ok {\n\t\tmsg := \"unknown 'lang': \" + lang\n\t\tlangs := listG2PLanguages()\n\t\tmsg = fmt.Sprintf(\"%s. Known 'lang' values: %s\", msg, strings.Join(langs, \", \"))\n\t\treturn Word{}, http.StatusBadRequest, fmt.Errorf(msg)\n\t}\n\n\ttranses, err := ruleSet.Apply(word)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"couldn't transcribe word : %v\", err)\n\t\treturn Word{}, http.StatusInternalServerError, fmt.Errorf(msg)\n\t}\n\ttRes := []string{}\n\tfor _, trans := range transes {\n\t\ttRes = append(tRes, trans)\n\t}\n\tres := Word{word, tRes}\n\treturn res, http.StatusOK, nil\n}\n\nfunc transcribe_Handler(w http.ResponseWriter, r *http.Request) {\n\n\tformat := r.FormValue(\"format\")\n\tif \"xml\" == format {\n\t\ttranscribe_AsXml_Handler(w, r)\n\t\treturn\n\t}\n\n\tvars := mux.Vars(r)\n\tlang := vars[\"lang\"]\n\tif \"\" == lang {\n\t\tmsg := \"no value for the expected 'lang' parameter\"\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tword := vars[\"word\"]\n\tif \"\" == word {\n\t\tmsg := \"no value for the expected 'word' parameter\"\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\tword = strings.ToLower(word)\n\n\tres, status, err := transcribe(lang, word)\n\tif err != nil {\n\t\tlog.Printf(\"%s\\n\", err)\n\t\thttp.Error(w, fmt.Sprintf(\"%s\", err), status)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\tj, err := json.Marshal(res)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"failed json marshalling : %v\", err)\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfmt.Fprintf(w, string(j))\n}\n\n\/\/ XMLWords container go generate xml from http request, for legacy calls from ltool\/yalt\ntype XMLWords struct {\n\tXMLName xml.Name `xml:\"words\"`\n\tWords []XMLWord\n}\n\n\/\/ XMLWord container go generate xml from http request, for legacy calls from ltool\/yalt\ntype XMLWord struct {\n\tXMLName xml.Name `xml:\"word\"`\n\tOrth string `xml:\"orth,attr\"`\n\tTrans string `xml:\"trans\"`\n}\n\n\/\/ for legacy calls from ltool\/yalt\nfunc transcribe_AsXml_Handler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tlang := vars[\"lang\"]\n\tif \"\" == lang {\n\t\tmsg := \"no value for the expected 'lang' parameter\"\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\tword := vars[\"word\"]\n\tif \"\" == word {\n\t\tmsg := \"no value for the expected 'word' parameter\"\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\tword = strings.ToLower(word)\n\tres, status, err := transcribe(lang, word)\n\tif err != nil {\n\t\tlog.Printf(\"%s\\n\", err)\n\t\thttp.Error(w, fmt.Sprintf(\"%s\", err), status)\n\t\treturn\n\t}\n\t\/\/<words>\n\t\/\/<word orth='apa' word_lang='mk' trans_lang='mk' >\" a p a<\/word>\n\t\/\/<\/words>\n\n\t\/\/ words := XMLWords{\n\t\/\/ \tWords: []XMLWord{\n\t\/\/ \t\tXMLWord{Orth: word, Trans: res.Transes[0]},\n\t\/\/ \t},\n\t\/\/ }\n\twords := XMLWords{}\n\tfor _, t := range res.Transes {\n\t\twords.Words = append(words.Words, XMLWord{Orth: word, Trans: t})\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/xml; charset=utf-8\")\n\txml, err := xml.Marshal(words)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"failed xml marshalling : %v\", err)\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfmt.Fprintf(w, string(xml))\n\t\/\/fmt.Fprintf(w, string(res.Transes[0]))\n}\n\nfunc listG2PLanguages() []string {\n\tvar res []string\n\tfor name := range g2pM.g2ps {\n\t\tres = append(res, name)\n\t}\n\treturn res\n}\n\nfunc listSyllLanguages() ([]string, error) {\n\tvar res []string\n\tfor name, g2p := range g2pM.g2ps {\n\t\tif g2p.Syllabifier.IsDefined() {\n\t\t\tres = append(res, name)\n\t\t}\n\t}\n\tfor name := range g2pM.sylls {\n\t\tif rbg2p.Contains(res, name) {\n\t\t\treturn res, fmt.Errorf(\"name conflict for '%s' (g2p and syllabifier cannot share the same name)\", name)\n\t\t}\n\t\tres = append(res, name)\n\t}\n\treturn res, nil\n}\n\nfunc g2pList_Handler(w http.ResponseWriter, r *http.Request) {\n\tg2pM.mutex.RLock()\n\tres := listG2PLanguages()\n\tg2pM.mutex.RUnlock()\n\n\tsort.Strings(res)\n\tj, err := json.Marshal(res)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"failed json marshalling : %v\", err)\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfmt.Fprintf(w, string(j))\n}\n\nfunc syllList_Handler(w http.ResponseWriter, r *http.Request) {\n\tg2pM.mutex.RLock()\n\tres, err := listSyllLanguages()\n\tg2pM.mutex.RUnlock()\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"%s\", err)\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsort.Strings(res)\n\tj, err := json.Marshal(res)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"failed json marshalling : %v\", err)\n\t\tlog.Println(msg)\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfmt.Fprintf(w, string(j))\n}\n\n\/\/ langFromFilePath returns the base file name stripped from any '.g2p' extension\nfunc langFromFilePath(p string) string {\n\tb := filepath.Base(p)\n\tif strings.HasSuffix(b, \".g2p\") {\n\t\tb = b[0 : len(b)-4]\n\t} else if strings.HasSuffix(b, \".syll\") {\n\t\tb = b[0 : len(b)-5]\n\t}\n\treturn b\n}\n\nfunc main() {\n\n\tif len(os.Args) != 2 {\n\t\tfmt.Fprintf(os.Stderr, \"server <G2P FILES DIR>\\n\")\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ g2p file dir. Each file in dir with .g2p extension\n\t\/\/ is treated as a g2p file\n\tvar dir = os.Args[1]\n\n\tfiles, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(0)\n\t}\n\n\t\/\/ populate map of g2p rules from files.\n\t\/\/ The base file name minus '.g2p' is the language name.\n\tvar fn string\n\tfor _, f := range files {\n\t\tfn = filepath.Join(dir, f.Name())\n\t\tif strings.HasSuffix(fn, \".g2p\") {\n\n\t\t\truleSet, err := rbg2p.LoadFile(fn)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t\t\tfmt.Fprintf(os.Stderr, \"server: skipping file: '%s'\\n\", fn)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlang := langFromFilePath(fn)\n\t\t\tg2pM.mutex.Lock()\n\t\t\tg2pM.g2ps[lang] = ruleSet\n\t\t\tg2pM.mutex.Unlock()\n\t\t\tfmt.Fprintf(os.Stderr, \"server: loaded file '%s'\\n\", fn)\n\n\t\t} else if strings.HasSuffix(fn, \".syll\") {\n\n\t\t\tsyll, err := rbg2p.LoadSyllFile(fn)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t\t\tfmt.Fprintf(os.Stderr, \"server: skipping file: '%s'\\n\", fn)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlang := langFromFilePath(fn)\n\t\t\tg2pM.mutex.Lock()\n\t\t\tg2pM.sylls[lang] = syll\n\t\t\tg2pM.mutex.Unlock()\n\t\t\tfmt.Fprintf(os.Stderr, \"server: loaded file '%s'\\n\", fn)\n\n\t\t} else {\n\t\t\tfmt.Fprintf(os.Stderr, \"server: skipping file: '%s'\\n\", fn)\n\t\t\tcontinue\n\t\t}\n\n\t}\n\n\tif _, err := listSyllLanguages(); err != nil {\n\t\tlog.Fatalf(\"server init error : %s\", err)\n\t}\n\n\tr := mux.NewRouter().StrictSlash(true)\n\n\tr.HandleFunc(\"\/rbg2p\", g2pMain_Handler)\n\tr.HandleFunc(\"\/\", g2pMain_Handler)\n\n\tr.HandleFunc(\"\/ping\", ping_Handler)\n\n\ts := r.PathPrefix(\"\/rbg2p\").Subrouter()\n\n\ts.HandleFunc(\"\/g2p\/list\", g2pList_Handler)\n\ts.HandleFunc(\"\/syll\/list\", syllList_Handler)\n\n\ts.HandleFunc(\"\/transcribe\/{lang}\/{word}\", transcribe_Handler)\n\ts.HandleFunc(\"\/syllabify\/{lang}\/{trans}\", syllabify_Handler)\n\n\t\/\/ for legacy calls from ltool\/yalt\n\ts.HandleFunc(\"\/xmltranscribe\/{lang}\/{word}\", transcribe_AsXml_Handler)\n\n\tfmt.Println(\"Serving urls:\")\n\tr.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {\n\t\tt, err := route.GetPathTemplate()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(t)\n\t\treturn nil\n\t})\n\n\tport := \":6771\"\n\tlog.Printf(\"starting g2p server at port %s\\n\", port)\n\terr = http.ListenAndServe(port, r)\n\tif err != nil {\n\n\t\tlog.Fatalf(\"no fun: %v\\n\", err)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 DeepFabric, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage raftstore\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/deepfabric\/elasticell\/pkg\/log\"\n\t\"github.com\/deepfabric\/elasticell\/pkg\/pb\/mraft\"\n\t\"github.com\/deepfabric\/elasticell\/pkg\/storage\"\n\t\"github.com\/deepfabric\/elasticell\/pkg\/util\"\n\t\"github.com\/fagongzi\/goetty\"\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/time\/rate\"\n)\n\nvar (\n\tcreating = 1\n\tsending = 2\n)\n\n\/\/ SnapshotManager manager snapshot\ntype SnapshotManager interface {\n\tRegister(msg *mraft.SnapshotMessage, step int) bool\n\tDeregister(msg *mraft.SnapshotMessage, step int)\n\tCreate(msg *mraft.SnapshotMessage) error\n\tExists(msg *mraft.SnapshotMessage) bool\n\tWriteTo(msg *mraft.SnapshotMessage, conn goetty.IOSession) (uint64, error)\n\tCleanSnap(msg *mraft.SnapshotMessage) error\n\tReceiveSnapData(msg *mraft.SnapshotMessage) error\n\tApply(msg *mraft.SnapshotMessage) error\n}\n\ntype defaultSnapshotManager struct {\n\tsync.RWMutex\n\n\tlimiter *rate.Limiter\n\n\tcfg *Cfg\n\tdb storage.DataEngine\n\tdir string\n\n\tregistry map[string]struct{}\n}\n\nfunc newDefaultSnapshotManager(cfg *Cfg, db storage.DataEngine) SnapshotManager {\n\tdir := cfg.getSnapDir()\n\n\tif !exist(dir) {\n\t\tif err := os.Mkdir(dir, 0750); err != nil {\n\t\t\tlog.Fatalf(\"raftstore-snap: cannot create dir for snapshot, errors:\\n %+v\",\n\t\t\t\terr)\n\t\t}\n\t}\n\n\tgo func() {\n\t\tinterval := time.Hour * 2\n\n\t\tfor {\n\t\t\tlog.Infof(\"raftstore-snap: start scan gc snap files\")\n\n\t\t\tvar paths []string\n\n\t\t\terr := filepath.Walk(dir, func(path string, f os.FileInfo, err error) error {\n\t\t\t\tif f == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tif f.IsDir() && f.Name() == getSnapDirName() {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tvar skip error\n\t\t\t\tif f.IsDir() && f.Name() != getSnapDirName() {\n\t\t\t\t\tskip = filepath.SkipDir\n\t\t\t\t}\n\n\t\t\t\tnow := time.Now()\n\t\t\t\tif now.Sub(f.ModTime()) > interval {\n\t\t\t\t\tpaths = append(paths, path)\n\t\t\t\t}\n\n\t\t\t\treturn skip\n\t\t\t})\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"raftstore-snap: scan snap file failed, errors:\\n%+v\",\n\t\t\t\t\terr)\n\t\t\t}\n\n\t\t\tfor _, path := range paths {\n\t\t\t\terr := os.RemoveAll(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"raftstore-snap: scan snap file failed, file=<%s>, errors:\\n%+v\",\n\t\t\t\t\t\tpath,\n\t\t\t\t\t\terr)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttime.Sleep(interval)\n\t\t}\n\t}()\n\n\treturn &defaultSnapshotManager{\n\t\tcfg: cfg,\n\t\tlimiter: rate.NewLimiter(rate.Every(time.Second), int(cfg.LimitSnapChunkRate)),\n\t\tdir: dir,\n\t\tdb: db,\n\t\tregistry: make(map[string]struct{}),\n\t}\n}\n\nfunc formatKey(msg *mraft.SnapshotMessage) string {\n\treturn fmt.Sprintf(\"%d_%d_%d\", msg.Header.Cell.ID, msg.Header.Term, msg.Header.Index)\n}\n\nfunc formatKeyStep(msg *mraft.SnapshotMessage, step int) string {\n\treturn fmt.Sprintf(\"%s_%d\", formatKey(msg), step)\n}\n\nfunc (m *defaultSnapshotManager) getPathOfSnapKey(msg *mraft.SnapshotMessage) string {\n\treturn fmt.Sprintf(\"%s\/%s\", m.dir, formatKey(msg))\n}\n\nfunc (m *defaultSnapshotManager) getPathOfSnapKeyGZ(msg *mraft.SnapshotMessage) string {\n\treturn fmt.Sprintf(\"%s.gz\", m.getPathOfSnapKey(msg))\n}\n\nfunc (m *defaultSnapshotManager) getTmpPathOfSnapKeyGZ(msg *mraft.SnapshotMessage) string {\n\treturn fmt.Sprintf(\"%s.tmp\", m.getPathOfSnapKey(msg))\n}\n\nfunc (m *defaultSnapshotManager) Register(msg *mraft.SnapshotMessage, step int) bool {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tfkey := formatKeyStep(msg, step)\n\n\tif _, ok := m.registry[fkey]; ok {\n\t\treturn false\n\t}\n\n\tm.registry[fkey] = emptyStruct\n\treturn true\n}\n\nfunc (m *defaultSnapshotManager) Deregister(msg *mraft.SnapshotMessage, step int) {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tfkey := formatKeyStep(msg, step)\n\tdelete(m.registry, fkey)\n}\n\nfunc (m *defaultSnapshotManager) inRegistry(msg *mraft.SnapshotMessage, step int) bool {\n\tm.RLock()\n\tdefer m.RUnlock()\n\n\tfkey := formatKeyStep(msg, step)\n\t_, ok := m.registry[fkey]\n\n\treturn ok\n}\n\nfunc (m *defaultSnapshotManager) Create(msg *mraft.SnapshotMessage) error {\n\tpath := m.getPathOfSnapKey(msg)\n\tgzPath := m.getPathOfSnapKeyGZ(msg)\n\tstart := encStartKey(&msg.Header.Cell)\n\tend := encEndKey(&msg.Header.Cell)\n\n\tif !exist(gzPath) {\n\t\tif !exist(path) {\n\t\t\terr := m.db.CreateSnapshot(path, start, end)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"\")\n\t\t\t}\n\t\t}\n\n\t\terr := util.GZIP(path)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"\")\n\t\t}\n\t}\n\n\tinfo, err := os.Stat(fmt.Sprintf(\"%s.gz\", path))\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"\")\n\t}\n\n\tfileSize := uint64(info.Size())\n\tsnapshotSizeHistogram.Observe(float64(fileSize))\n\treturn nil\n}\n\nfunc (m *defaultSnapshotManager) Exists(msg *mraft.SnapshotMessage) bool {\n\tfile := m.getPathOfSnapKeyGZ(msg)\n\treturn exist(file)\n}\n\nfunc (m *defaultSnapshotManager) WriteTo(msg *mraft.SnapshotMessage, conn goetty.IOSession) (uint64, error) {\n\tfile := m.getPathOfSnapKeyGZ(msg)\n\n\tif !m.Exists(msg) {\n\t\treturn 0, fmt.Errorf(\"missing snapshot file: %s\", file)\n\t}\n\n\tinfo, err := os.Stat(file)\n\tif err != nil {\n\t\treturn 0, errors.Wrapf(err, \"\")\n\t}\n\tfileSize := info.Size()\n\n\tf, err := os.Open(file)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer f.Close()\n\n\tvar written int64\n\tbuf := make([]byte, m.cfg.LimitSnapChunkBytes)\n\tctx := context.TODO()\n\n\tlog.Infof(\"raftstore-snap[cell-%d]: try to send snap, header=<%s>,size=<%d>\",\n\t\tmsg.Header.Cell.ID,\n\t\tmsg.Header.String(),\n\t\tfileSize)\n\n\tfor {\n\t\tnr, er := f.Read(buf)\n\t\tif nr > 0 {\n\t\t\tdst := &mraft.SnapshotMessage{}\n\t\t\tdst.Header = msg.Header\n\t\t\tdst.Chunk = &mraft.SnapshotChunkMessage{\n\t\t\t\tData: buf[0:nr],\n\t\t\t\tFileSize: uint64(fileSize),\n\t\t\t\tFirst: 0 == written,\n\t\t\t\tLast: fileSize == written+int64(nr),\n\t\t\t}\n\n\t\t\twritten += int64(nr)\n\t\t\terr := m.limiter.Wait(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\n\t\t\terr = conn.Write(dst)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t\tif er != nil {\n\t\t\tif er != io.EOF {\n\t\t\t\treturn 0, er\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\tlog.Infof(\"raftstore-snap[cell-%d]: send snap complete\",\n\t\tmsg.Header.Cell.ID)\n\treturn uint64(written), nil\n}\n\nfunc (m *defaultSnapshotManager) CleanSnap(msg *mraft.SnapshotMessage) error {\n\tvar err error\n\n\ttmpFile := m.getTmpPathOfSnapKeyGZ(msg)\n\tif exist(tmpFile) {\n\t\tlog.Infof(\"raftstore-snap[cell-%d]: delete exists snap tmp file, file=<%s>, header=<%s>\",\n\t\t\tmsg.Header.Cell.ID,\n\t\t\ttmpFile,\n\t\t\tmsg.Header.String())\n\t\terr = os.RemoveAll(tmpFile)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfile := m.getPathOfSnapKeyGZ(msg)\n\tif exist(file) {\n\t\tlog.Infof(\"raftstore-snap[cell-%d]: delete exists snap gz file, file=<%s>, header=<%s>\",\n\t\t\tmsg.Header.Cell.ID,\n\t\t\tfile,\n\t\t\tmsg.Header.String())\n\t\terr = os.RemoveAll(file)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdir := m.getPathOfSnapKey(msg)\n\tif exist(dir) {\n\t\tlog.Infof(\"raftstore-snap[cell-%d]: delete exists snap dir, file=<%s>, header=<%s>\",\n\t\t\tmsg.Header.Cell.ID,\n\t\t\tdir,\n\t\t\tmsg.Header.String())\n\t\terr = os.RemoveAll(dir)\n\t}\n\n\treturn err\n}\n\nfunc (m *defaultSnapshotManager) ReceiveSnapData(msg *mraft.SnapshotMessage) error {\n\tvar err error\n\tvar f *os.File\n\n\tif msg.Chunk.First {\n\t\terr = m.cleanTmp(msg)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfile := m.getTmpPathOfSnapKeyGZ(msg)\n\tif exist(file) {\n\t\tf, err = os.OpenFile(file, os.O_APPEND|os.O_WRONLY, 0600)\n\t\tif err != nil {\n\t\t\tf.Close()\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tf, err = os.Create(file)\n\t\tif err != nil {\n\t\t\tf.Close()\n\t\t\treturn err\n\t\t}\n\t}\n\n\tn, err := f.Write(msg.Chunk.Data)\n\tif err != nil {\n\t\tf.Close()\n\t\treturn err\n\t}\n\n\tif n != len(msg.Chunk.Data) {\n\t\tf.Close()\n\t\treturn fmt.Errorf(\"write snapshot file failed, expect=<%d> actual=<%d>\",\n\t\t\tlen(msg.Chunk.Data),\n\t\t\tn)\n\t}\n\n\tf.Close()\n\n\tif msg.Chunk.Last {\n\t\treturn m.check(msg)\n\t}\n\n\treturn nil\n}\n\nfunc (m *defaultSnapshotManager) Apply(msg *mraft.SnapshotMessage) error {\n\tfile := m.getPathOfSnapKeyGZ(msg)\n\tif !m.Exists(msg) {\n\t\treturn fmt.Errorf(\"missing snapshot file, path=%s\", file)\n\t}\n\n\tdefer m.CleanSnap(msg)\n\n\terr := util.UnGZIP(file, m.dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.RemoveAll(m.getPathOfSnapKey(msg))\n\n\treturn m.db.ApplySnapshot(m.getPathOfSnapKey(msg))\n}\n\nfunc (m *defaultSnapshotManager) cleanTmp(msg *mraft.SnapshotMessage) error {\n\tvar err error\n\ttmpFile := m.getTmpPathOfSnapKeyGZ(msg)\n\tif exist(tmpFile) {\n\t\tlog.Infof(\"raftstore-snap[cell-%d]: delete exists snap tmp file, file=<%s>, header=<%s>\",\n\t\t\tmsg.Header.Cell.ID,\n\t\t\ttmpFile,\n\t\t\tmsg.Header.String())\n\t\terr = os.RemoveAll(tmpFile)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (m *defaultSnapshotManager) check(msg *mraft.SnapshotMessage) error {\n\tfile := m.getTmpPathOfSnapKeyGZ(msg)\n\tif exist(file) {\n\t\tinfo, err := os.Stat(file)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"\")\n\t\t}\n\n\t\tif msg.Chunk.FileSize != uint64(info.Size()) {\n\t\t\treturn fmt.Errorf(\"snap file size not match, got=<%d> expect=<%d> path=<%s>\",\n\t\t\t\tinfo.Size(),\n\t\t\t\tmsg.Chunk.FileSize,\n\t\t\t\tfile)\n\t\t}\n\n\t\treturn os.Rename(file, m.getPathOfSnapKeyGZ(msg))\n\t}\n\n\treturn fmt.Errorf(\"missing snapshot file, path=%s\", file)\n}\n\nfunc exist(name string) bool {\n\t_, err := os.Stat(name)\n\treturn err == nil\n}\n<commit_msg>fix: rate limiter produce one token per second<commit_after>\/\/ Copyright 2016 DeepFabric, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage raftstore\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/deepfabric\/elasticell\/pkg\/log\"\n\t\"github.com\/deepfabric\/elasticell\/pkg\/pb\/mraft\"\n\t\"github.com\/deepfabric\/elasticell\/pkg\/storage\"\n\t\"github.com\/deepfabric\/elasticell\/pkg\/util\"\n\t\"github.com\/fagongzi\/goetty\"\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/time\/rate\"\n)\n\nvar (\n\tcreating = 1\n\tsending = 2\n)\n\n\/\/ SnapshotManager manager snapshot\ntype SnapshotManager interface {\n\tRegister(msg *mraft.SnapshotMessage, step int) bool\n\tDeregister(msg *mraft.SnapshotMessage, step int)\n\tCreate(msg *mraft.SnapshotMessage) error\n\tExists(msg *mraft.SnapshotMessage) bool\n\tWriteTo(msg *mraft.SnapshotMessage, conn goetty.IOSession) (uint64, error)\n\tCleanSnap(msg *mraft.SnapshotMessage) error\n\tReceiveSnapData(msg *mraft.SnapshotMessage) error\n\tApply(msg *mraft.SnapshotMessage) error\n}\n\ntype defaultSnapshotManager struct {\n\tsync.RWMutex\n\n\tlimiter *rate.Limiter\n\n\tcfg *Cfg\n\tdb storage.DataEngine\n\tdir string\n\n\tregistry map[string]struct{}\n}\n\nfunc newDefaultSnapshotManager(cfg *Cfg, db storage.DataEngine) SnapshotManager {\n\tdir := cfg.getSnapDir()\n\n\tif !exist(dir) {\n\t\tif err := os.Mkdir(dir, 0750); err != nil {\n\t\t\tlog.Fatalf(\"raftstore-snap: cannot create dir for snapshot, errors:\\n %+v\",\n\t\t\t\terr)\n\t\t}\n\t}\n\n\tgo func() {\n\t\tinterval := time.Hour * 2\n\n\t\tfor {\n\t\t\tlog.Infof(\"raftstore-snap: start scan gc snap files\")\n\n\t\t\tvar paths []string\n\n\t\t\terr := filepath.Walk(dir, func(path string, f os.FileInfo, err error) error {\n\t\t\t\tif f == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tif f.IsDir() && f.Name() == getSnapDirName() {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tvar skip error\n\t\t\t\tif f.IsDir() && f.Name() != getSnapDirName() {\n\t\t\t\t\tskip = filepath.SkipDir\n\t\t\t\t}\n\n\t\t\t\tnow := time.Now()\n\t\t\t\tif now.Sub(f.ModTime()) > interval {\n\t\t\t\t\tpaths = append(paths, path)\n\t\t\t\t}\n\n\t\t\t\treturn skip\n\t\t\t})\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"raftstore-snap: scan snap file failed, errors:\\n%+v\",\n\t\t\t\t\terr)\n\t\t\t}\n\n\t\t\tfor _, path := range paths {\n\t\t\t\terr := os.RemoveAll(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"raftstore-snap: scan snap file failed, file=<%s>, errors:\\n%+v\",\n\t\t\t\t\t\tpath,\n\t\t\t\t\t\terr)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttime.Sleep(interval)\n\t\t}\n\t}()\n\n\treturn &defaultSnapshotManager{\n\t\tcfg: cfg,\n\t\tlimiter: rate.NewLimiter(rate.Every(time.Second\/time.Duration(cfg.LimitSnapChunkRate)), int(cfg.LimitSnapChunkRate)),\n\t\tdir: dir,\n\t\tdb: db,\n\t\tregistry: make(map[string]struct{}),\n\t}\n}\n\nfunc formatKey(msg *mraft.SnapshotMessage) string {\n\treturn fmt.Sprintf(\"%d_%d_%d\", msg.Header.Cell.ID, msg.Header.Term, msg.Header.Index)\n}\n\nfunc formatKeyStep(msg *mraft.SnapshotMessage, step int) string {\n\treturn fmt.Sprintf(\"%s_%d\", formatKey(msg), step)\n}\n\nfunc (m *defaultSnapshotManager) getPathOfSnapKey(msg *mraft.SnapshotMessage) string {\n\treturn fmt.Sprintf(\"%s\/%s\", m.dir, formatKey(msg))\n}\n\nfunc (m *defaultSnapshotManager) getPathOfSnapKeyGZ(msg *mraft.SnapshotMessage) string {\n\treturn fmt.Sprintf(\"%s.gz\", m.getPathOfSnapKey(msg))\n}\n\nfunc (m *defaultSnapshotManager) getTmpPathOfSnapKeyGZ(msg *mraft.SnapshotMessage) string {\n\treturn fmt.Sprintf(\"%s.tmp\", m.getPathOfSnapKey(msg))\n}\n\nfunc (m *defaultSnapshotManager) Register(msg *mraft.SnapshotMessage, step int) bool {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tfkey := formatKeyStep(msg, step)\n\n\tif _, ok := m.registry[fkey]; ok {\n\t\treturn false\n\t}\n\n\tm.registry[fkey] = emptyStruct\n\treturn true\n}\n\nfunc (m *defaultSnapshotManager) Deregister(msg *mraft.SnapshotMessage, step int) {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tfkey := formatKeyStep(msg, step)\n\tdelete(m.registry, fkey)\n}\n\nfunc (m *defaultSnapshotManager) inRegistry(msg *mraft.SnapshotMessage, step int) bool {\n\tm.RLock()\n\tdefer m.RUnlock()\n\n\tfkey := formatKeyStep(msg, step)\n\t_, ok := m.registry[fkey]\n\n\treturn ok\n}\n\nfunc (m *defaultSnapshotManager) Create(msg *mraft.SnapshotMessage) error {\n\tpath := m.getPathOfSnapKey(msg)\n\tgzPath := m.getPathOfSnapKeyGZ(msg)\n\tstart := encStartKey(&msg.Header.Cell)\n\tend := encEndKey(&msg.Header.Cell)\n\n\tif !exist(gzPath) {\n\t\tif !exist(path) {\n\t\t\terr := m.db.CreateSnapshot(path, start, end)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"\")\n\t\t\t}\n\t\t}\n\n\t\terr := util.GZIP(path)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"\")\n\t\t}\n\t}\n\n\tinfo, err := os.Stat(fmt.Sprintf(\"%s.gz\", path))\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"\")\n\t}\n\n\tfileSize := uint64(info.Size())\n\tsnapshotSizeHistogram.Observe(float64(fileSize))\n\treturn nil\n}\n\nfunc (m *defaultSnapshotManager) Exists(msg *mraft.SnapshotMessage) bool {\n\tfile := m.getPathOfSnapKeyGZ(msg)\n\treturn exist(file)\n}\n\nfunc (m *defaultSnapshotManager) WriteTo(msg *mraft.SnapshotMessage, conn goetty.IOSession) (uint64, error) {\n\tfile := m.getPathOfSnapKeyGZ(msg)\n\n\tif !m.Exists(msg) {\n\t\treturn 0, fmt.Errorf(\"missing snapshot file: %s\", file)\n\t}\n\n\tinfo, err := os.Stat(file)\n\tif err != nil {\n\t\treturn 0, errors.Wrapf(err, \"\")\n\t}\n\tfileSize := info.Size()\n\n\tf, err := os.Open(file)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer f.Close()\n\n\tvar written int64\n\tbuf := make([]byte, m.cfg.LimitSnapChunkBytes)\n\tctx := context.TODO()\n\n\tlog.Infof(\"raftstore-snap[cell-%d]: try to send snap, header=<%s>,size=<%d>\",\n\t\tmsg.Header.Cell.ID,\n\t\tmsg.Header.String(),\n\t\tfileSize)\n\n\tfor {\n\t\tnr, er := f.Read(buf)\n\t\tif nr > 0 {\n\t\t\tdst := &mraft.SnapshotMessage{}\n\t\t\tdst.Header = msg.Header\n\t\t\tdst.Chunk = &mraft.SnapshotChunkMessage{\n\t\t\t\tData: buf[0:nr],\n\t\t\t\tFileSize: uint64(fileSize),\n\t\t\t\tFirst: 0 == written,\n\t\t\t\tLast: fileSize == written+int64(nr),\n\t\t\t}\n\n\t\t\twritten += int64(nr)\n\t\t\terr := m.limiter.Wait(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\n\t\t\terr = conn.Write(dst)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t\tif er != nil {\n\t\t\tif er != io.EOF {\n\t\t\t\treturn 0, er\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\tlog.Infof(\"raftstore-snap[cell-%d]: send snap complete\",\n\t\tmsg.Header.Cell.ID)\n\treturn uint64(written), nil\n}\n\nfunc (m *defaultSnapshotManager) CleanSnap(msg *mraft.SnapshotMessage) error {\n\tvar err error\n\n\ttmpFile := m.getTmpPathOfSnapKeyGZ(msg)\n\tif exist(tmpFile) {\n\t\tlog.Infof(\"raftstore-snap[cell-%d]: delete exists snap tmp file, file=<%s>, header=<%s>\",\n\t\t\tmsg.Header.Cell.ID,\n\t\t\ttmpFile,\n\t\t\tmsg.Header.String())\n\t\terr = os.RemoveAll(tmpFile)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfile := m.getPathOfSnapKeyGZ(msg)\n\tif exist(file) {\n\t\tlog.Infof(\"raftstore-snap[cell-%d]: delete exists snap gz file, file=<%s>, header=<%s>\",\n\t\t\tmsg.Header.Cell.ID,\n\t\t\tfile,\n\t\t\tmsg.Header.String())\n\t\terr = os.RemoveAll(file)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdir := m.getPathOfSnapKey(msg)\n\tif exist(dir) {\n\t\tlog.Infof(\"raftstore-snap[cell-%d]: delete exists snap dir, file=<%s>, header=<%s>\",\n\t\t\tmsg.Header.Cell.ID,\n\t\t\tdir,\n\t\t\tmsg.Header.String())\n\t\terr = os.RemoveAll(dir)\n\t}\n\n\treturn err\n}\n\nfunc (m *defaultSnapshotManager) ReceiveSnapData(msg *mraft.SnapshotMessage) error {\n\tvar err error\n\tvar f *os.File\n\n\tif msg.Chunk.First {\n\t\terr = m.cleanTmp(msg)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfile := m.getTmpPathOfSnapKeyGZ(msg)\n\tif exist(file) {\n\t\tf, err = os.OpenFile(file, os.O_APPEND|os.O_WRONLY, 0600)\n\t\tif err != nil {\n\t\t\tf.Close()\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tf, err = os.Create(file)\n\t\tif err != nil {\n\t\t\tf.Close()\n\t\t\treturn err\n\t\t}\n\t}\n\n\tn, err := f.Write(msg.Chunk.Data)\n\tif err != nil {\n\t\tf.Close()\n\t\treturn err\n\t}\n\n\tif n != len(msg.Chunk.Data) {\n\t\tf.Close()\n\t\treturn fmt.Errorf(\"write snapshot file failed, expect=<%d> actual=<%d>\",\n\t\t\tlen(msg.Chunk.Data),\n\t\t\tn)\n\t}\n\n\tf.Close()\n\n\tif msg.Chunk.Last {\n\t\treturn m.check(msg)\n\t}\n\n\treturn nil\n}\n\nfunc (m *defaultSnapshotManager) Apply(msg *mraft.SnapshotMessage) error {\n\tfile := m.getPathOfSnapKeyGZ(msg)\n\tif !m.Exists(msg) {\n\t\treturn fmt.Errorf(\"missing snapshot file, path=%s\", file)\n\t}\n\n\tdefer m.CleanSnap(msg)\n\n\terr := util.UnGZIP(file, m.dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.RemoveAll(m.getPathOfSnapKey(msg))\n\n\treturn m.db.ApplySnapshot(m.getPathOfSnapKey(msg))\n}\n\nfunc (m *defaultSnapshotManager) cleanTmp(msg *mraft.SnapshotMessage) error {\n\tvar err error\n\ttmpFile := m.getTmpPathOfSnapKeyGZ(msg)\n\tif exist(tmpFile) {\n\t\tlog.Infof(\"raftstore-snap[cell-%d]: delete exists snap tmp file, file=<%s>, header=<%s>\",\n\t\t\tmsg.Header.Cell.ID,\n\t\t\ttmpFile,\n\t\t\tmsg.Header.String())\n\t\terr = os.RemoveAll(tmpFile)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (m *defaultSnapshotManager) check(msg *mraft.SnapshotMessage) error {\n\tfile := m.getTmpPathOfSnapKeyGZ(msg)\n\tif exist(file) {\n\t\tinfo, err := os.Stat(file)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"\")\n\t\t}\n\n\t\tif msg.Chunk.FileSize != uint64(info.Size()) {\n\t\t\treturn fmt.Errorf(\"snap file size not match, got=<%d> expect=<%d> path=<%s>\",\n\t\t\t\tinfo.Size(),\n\t\t\t\tmsg.Chunk.FileSize,\n\t\t\t\tfile)\n\t\t}\n\n\t\treturn os.Rename(file, m.getPathOfSnapKeyGZ(msg))\n\t}\n\n\treturn fmt.Errorf(\"missing snapshot file, path=%s\", file)\n}\n\nfunc exist(name string) bool {\n\t_, err := os.Stat(name)\n\treturn err == nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016 by António Meireles <antonio.meireles@reformi.st>.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/TheNewNormal\/corectl\/components\/common\"\n\t\"github.com\/TheNewNormal\/corectl\/components\/host\/session\"\n\t\"github.com\/TheNewNormal\/corectl\/components\/server\"\n\t\"github.com\/TheNewNormal\/corectl\/release\"\n\n\t\"github.com\/everdev\/mack\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\tserverStartCmd = &cobra.Command{\n\t\tUse: \"start\",\n\t\tShort: \"Starts corectld\",\n\t\tRunE: serverStartCommand,\n\t}\n\tshutdownCmd = &cobra.Command{\n\t\tUse: \"stop\",\n\t\tAliases: []string{\"shutdown\"},\n\t\tShort: \"Stops corectld\",\n\t\tRunE: shutdownCommand,\n\t}\n\tstatusCmd = &cobra.Command{\n\t\tUse: \"status\",\n\t\tShort: \"Shows corectld status\",\n\t\tRunE: common.PScommand,\n\t}\n)\n\nfunc shutdownCommand(cmd *cobra.Command, args []string) (err error) {\n\tif _, err = server.Daemon.Running(); err != nil {\n\t\treturn\n\t}\n\t_, err = server.RPCQuery(\"Stop\", &server.RPCquery{})\n\treturn\n}\n\nfunc serverStartCommand(cmd *cobra.Command, args []string) (err error) {\n\tvar srv *release.Info\n\n\tif srv, err = server.Daemon.Running(); err == nil {\n\t\treturn fmt.Errorf(\"corectld already started (with pid %v)\",\n\t\t\tsrv.Pid)\n\t}\n\n\tif !session.Caller.Privileged {\n\t\tif err = mack.Tell(\"System Events\",\n\t\t\t\"do shell script \\\"\"+\n\t\t\t\tsession.Executable()+\" start --user \"+session.Caller.Username+\n\t\t\t\t\" > \/dev\/null 2>&1 & \\\" with administrator privileges\",\n\t\t\t\"delay 3\"); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif srv, err = server.Daemon.Running(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err = mack.AlertBox(mack.AlertOptions{\n\t\t\tTitle: fmt.Sprintf(\"corectld (%v) just started with Pid %v .\",\n\t\t\t\tsrv.Version, srv.Pid),\n\t\t\tMessage: \"\\n\\n(this window will self destruct after 15s)\",\n\t\t\tStyle: \"informational\",\n\t\t\tDuration: 15,\n\t\t\tButtons: \"OK\"}); err != nil {\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(\"Started corectld:\")\n\t\tsrv.PrettyPrint(true)\n\t\treturn\n\t}\n\tserver.Daemon = server.New()\n\tserver.Daemon.Active = make(map[string]*server.VMInfo)\n\treturn server.Start()\n}\n\nfunc init() {\n\tserverStartCmd.Flags().StringP(\"user\", \"u\", \"\",\n\t\t\"sets the user that will 'own' the corectld instance\")\n\tserverStartCmd.Flags().BoolP(\"force\", \"f\", false,\n\t\t\"rebuilds config drive iso even if a suitable one is already present\")\n\trootCmd.AddCommand(shutdownCmd, statusCmd, serverStartCmd)\n}\n<commit_msg>remove redundant popup on corectd init now that GUI is around the corner<commit_after>\/\/ Copyright (c) 2016 by António Meireles <antonio.meireles@reformi.st>.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/TheNewNormal\/corectl\/components\/common\"\n\t\"github.com\/TheNewNormal\/corectl\/components\/host\/session\"\n\t\"github.com\/TheNewNormal\/corectl\/components\/server\"\n\t\"github.com\/TheNewNormal\/corectl\/release\"\n\n\t\"github.com\/everdev\/mack\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\tserverStartCmd = &cobra.Command{\n\t\tUse: \"start\",\n\t\tShort: \"Starts corectld\",\n\t\tRunE: serverStartCommand,\n\t}\n\tshutdownCmd = &cobra.Command{\n\t\tUse: \"stop\",\n\t\tAliases: []string{\"shutdown\"},\n\t\tShort: \"Stops corectld\",\n\t\tRunE: shutdownCommand,\n\t}\n\tstatusCmd = &cobra.Command{\n\t\tUse: \"status\",\n\t\tShort: \"Shows corectld status\",\n\t\tRunE: common.PScommand,\n\t}\n)\n\nfunc shutdownCommand(cmd *cobra.Command, args []string) (err error) {\n\tif _, err = server.Daemon.Running(); err != nil {\n\t\treturn\n\t}\n\t_, err = server.RPCQuery(\"Stop\", &server.RPCquery{})\n\treturn\n}\n\nfunc serverStartCommand(cmd *cobra.Command, args []string) (err error) {\n\tvar srv *release.Info\n\n\tif srv, err = server.Daemon.Running(); err == nil {\n\t\treturn fmt.Errorf(\"corectld already started (with pid %v)\",\n\t\t\tsrv.Pid)\n\t}\n\n\tif !session.Caller.Privileged {\n\t\tif err = mack.Tell(\"System Events\",\n\t\t\t\"do shell script \\\"\"+\n\t\t\t\tsession.Executable()+\" start --user \"+session.Caller.Username+\n\t\t\t\t\" > \/dev\/null 2>&1 & \\\" with administrator privileges\",\n\t\t\t\"delay 3\"); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif srv, err = server.Daemon.Running(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(\"Started corectld:\")\n\t\tsrv.PrettyPrint(true)\n\t\treturn\n\t}\n\tserver.Daemon = server.New()\n\tserver.Daemon.Active = make(map[string]*server.VMInfo)\n\treturn server.Start()\n}\n\nfunc init() {\n\tserverStartCmd.Flags().StringP(\"user\", \"u\", \"\",\n\t\t\"sets the user that will 'own' the corectld instance\")\n\tserverStartCmd.Flags().BoolP(\"force\", \"f\", false,\n\t\t\"rebuilds config drive iso even if a suitable one is already present\")\n\trootCmd.AddCommand(shutdownCmd, statusCmd, serverStartCmd)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Keybase Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD\n\/\/ license that can be found in the LICENSE file.\n\npackage libkbfs\n\nimport (\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/keybase\/client\/go\/protocol\/keybase1\"\n\t\"github.com\/keybase\/kbfs\/kbfssync\"\n\t\"github.com\/keybase\/kbfs\/tlf\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tdisableFavoritesEnvVar = \"KEYBASE_DISABLE_FAVORITES\"\n)\n\ntype favToAdd struct {\n\tFavorite\n\n\t\/\/ created, if set to true, indicates that this is the first time the TLF has\n\t\/\/ ever existed. It is only used when adding the TLF to favorites\n\tcreated bool\n}\n\nfunc (f favToAdd) ToKBFolder() keybase1.Folder {\n\treturn f.Favorite.ToKBFolder(f.created)\n}\n\n\/\/ favReq represents a request to access the logged-in user's\n\/\/ favorites list. A single request can do one or more of the\n\/\/ following: refresh the current cached list, add a favorite, remove\n\/\/ a favorite, and get all the favorites. When the request is done,\n\/\/ the resulting error (or nil) is sent over the done channel. The\n\/\/ given ctx is used for all network operations.\ntype favReq struct {\n\t\/\/ Request types\n\trefresh bool\n\ttoAdd []favToAdd\n\ttoDel []Favorite\n\tfavs chan<- []Favorite\n\n\t\/\/ Closed when the request is done.\n\tdone chan struct{}\n\t\/\/ Set before done is closed\n\terr error\n\n\t\/\/ Context\n\tctx context.Context\n}\n\n\/\/ Favorites manages a user's favorite list.\ntype Favorites struct {\n\tconfig Config\n\tdisabled bool\n\n\t\/\/ Channels for interacting with the favorites cache\n\treqChan chan *favReq\n\n\twg kbfssync.RepeatedWaitGroup\n\n\t\/\/ cache tracks the favorites for this user, that we know about.\n\t\/\/ It may not be consistent with the server's view of the user's\n\t\/\/ favorites list, if other devices have modified the list since\n\t\/\/ the last refresh.\n\tcache map[Favorite]bool\n\tcacheExpireTime time.Time\n\n\tinFlightLock sync.Mutex\n\tinFlightAdds map[favToAdd]*favReq\n\n\tmuShutdown sync.RWMutex\n\tshutdown bool\n}\n\nfunc newFavoritesWithChan(config Config, reqChan chan *favReq) *Favorites {\n\tdisableVal := strings.ToLower(os.Getenv(disableFavoritesEnvVar))\n\tif len(disableVal) > 0 && disableVal != \"0\" && disableVal != \"false\" &&\n\t\tdisableVal != \"no\" {\n\t\tconfig.MakeLogger(\"\").CDebugf(nil,\n\t\t\t\"Disable favorites due to env var %s=%s\",\n\t\t\tdisableFavoritesEnvVar, disableVal)\n\t\treturn &Favorites{\n\t\t\tconfig: config,\n\t\t\tdisabled: true,\n\t\t}\n\t}\n\n\tf := &Favorites{\n\t\tconfig: config,\n\t\treqChan: reqChan,\n\t\tinFlightAdds: make(map[favToAdd]*favReq),\n\t}\n\tgo f.loop()\n\treturn f\n}\n\n\/\/ NewFavorites constructs a new Favorites instance.\nfunc NewFavorites(config Config) *Favorites {\n\treturn newFavoritesWithChan(config, make(chan *favReq, 100))\n}\n\nfunc (f *Favorites) closeReq(req *favReq, err error) {\n\tf.inFlightLock.Lock()\n\tdefer f.inFlightLock.Unlock()\n\treq.err = err\n\tclose(req.done)\n\tfor _, fav := range req.toAdd {\n\t\tdelete(f.inFlightAdds, fav)\n\t}\n}\n\nfunc (f *Favorites) handleReq(req *favReq) (err error) {\n\tdefer func() { f.closeReq(req, err) }()\n\n\tkbpki := f.config.KBPKI()\n\t\/\/ Fetch a new list if:\n\t\/\/ * The user asked us to refresh\n\t\/\/ * We haven't fetched it before\n\t\/\/ * It's stale\n\tif req.refresh || f.cache == nil || time.Now().After(f.cacheExpireTime) {\n\t\tfolders, err := kbpki.FavoriteList(req.ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf.cache = make(map[Favorite]bool)\n\t\tf.cacheExpireTime = libkb.ForceWallClock(time.Now()).Add(time.\n\t\t\tHour * 24 * 7)\n\t\tfor _, folder := range folders {\n\t\t\tf.cache[*NewFavoriteFromFolder(folder)] = true\n\t\t}\n\t\tsession, err := f.config.KBPKI().GetCurrentSession(req.ctx)\n\t\tif err == nil {\n\t\t\t\/\/ Add favorites for the current user, that cannot be deleted.\n\t\t\tf.cache[Favorite{string(session.Name), tlf.Private}] = true\n\t\t\tf.cache[Favorite{string(session.Name), tlf.Public}] = true\n\t\t}\n\t}\n\n\tfor _, fav := range req.toAdd {\n\t\tif !fav.created && f.cache[fav.Favorite] {\n\t\t\tcontinue\n\t\t}\n\t\terr := kbpki.FavoriteAdd(req.ctx, fav.ToKBFolder())\n\t\tif err != nil {\n\t\t\tf.config.MakeLogger(\"\").CDebugf(req.ctx,\n\t\t\t\t\"Failure adding favorite %v: %v\", fav, err)\n\t\t\treturn err\n\t\t}\n\t\tf.cache[fav.Favorite] = true\n\t}\n\n\tfor _, fav := range req.toDel {\n\t\t\/\/ Since our cache isn't necessarily up-to-date, always delete\n\t\t\/\/ the favorite.\n\t\tfolder := fav.ToKBFolder(false)\n\t\terr := kbpki.FavoriteDelete(req.ctx, folder)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdelete(f.cache, fav)\n\t}\n\n\tif req.favs != nil {\n\t\tfavorites := make([]Favorite, 0, len(f.cache))\n\t\tfor fav := range f.cache {\n\t\t\tfavorites = append(favorites, fav)\n\t\t}\n\t\treq.favs <- favorites\n\t}\n\n\treturn nil\n}\n\nfunc (f *Favorites) loop() {\n\tfor req := range f.reqChan {\n\t\tf.handleReq(req)\n\t\tf.wg.Done()\n\t}\n}\n\n\/\/ Shutdown shuts down this Favorites instance.\nfunc (f *Favorites) Shutdown() error {\n\tif f.disabled {\n\t\treturn nil\n\t}\n\n\tf.muShutdown.Lock()\n\tdefer f.muShutdown.Unlock()\n\tf.shutdown = true\n\tclose(f.reqChan)\n\treturn f.wg.Wait(context.Background())\n}\n\nfunc (f *Favorites) hasShutdown() bool {\n\tf.muShutdown.RLock()\n\tdefer f.muShutdown.RUnlock()\n\treturn f.shutdown\n}\n\nfunc (f *Favorites) waitOnReq(ctx context.Context,\n\treq *favReq) (retry bool, err error) {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn false, ctx.Err()\n\tcase <-req.done:\n\t\terr = req.err\n\t\t\/\/ If the request was canceled due to a context timeout that\n\t\t\/\/ wasn't our own, try it again.\n\t\tif err == context.Canceled || err == context.DeadlineExceeded {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn false, err\n\t\t\tdefault:\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t\treturn false, err\n\t}\n}\n\nfunc (f *Favorites) sendReq(ctx context.Context, req *favReq) error {\n\tf.wg.Add(1)\n\tselect {\n\tcase f.reqChan <- req:\n\tcase <-ctx.Done():\n\t\tf.wg.Done()\n\t\terr := ctx.Err()\n\t\tf.closeReq(req, err)\n\t\treturn err\n\t}\n\t\/\/ With a direct sendReq call, we'll never have a shared request,\n\t\/\/ so no need to check the retry status.\n\t_, err := f.waitOnReq(ctx, req)\n\treturn err\n}\n\nfunc (f *Favorites) startOrJoinAddReq(\n\tctx context.Context, fav favToAdd) (req *favReq, doSend bool) {\n\tf.inFlightLock.Lock()\n\tdefer f.inFlightLock.Unlock()\n\treq, ok := f.inFlightAdds[fav]\n\tif !ok {\n\t\treq = &favReq{\n\t\t\tctx: ctx,\n\t\t\ttoAdd: []favToAdd{fav},\n\t\t\tdone: make(chan struct{}),\n\t\t}\n\t\tf.inFlightAdds[fav] = req\n\t\tdoSend = true\n\t}\n\treturn req, doSend\n}\n\n\/\/ Add adds a favorite to your favorites list.\nfunc (f *Favorites) Add(ctx context.Context, fav favToAdd) error {\n\tif f.disabled {\n\t\treturn nil\n\t}\n\tif f.hasShutdown() {\n\t\treturn ShutdownHappenedError{}\n\t}\n\tdoAdd := true\n\tvar err error\n\t\/\/ Retry until we get an error that wasn't related to someone\n\t\/\/ else's context being canceled.\n\tfor doAdd {\n\t\treq, doSend := f.startOrJoinAddReq(ctx, fav)\n\t\tif doSend {\n\t\t\treturn f.sendReq(ctx, req)\n\t\t}\n\t\tdoAdd, err = f.waitOnReq(ctx, req)\n\t}\n\treturn err\n}\n\n\/\/ AddAsync initiates a request to add this favorite to your favorites\n\/\/ list, if one is not already in flight, but it doesn't wait for the\n\/\/ result. (It could block while kicking off the request, if lots of\n\/\/ different favorite operations are in flight.) The given context is\n\/\/ used only for enqueuing the request on an internal queue, not for\n\/\/ any resulting I\/O.\nfunc (f *Favorites) AddAsync(ctx context.Context, fav favToAdd) {\n\tif f.disabled || f.hasShutdown() {\n\t\treturn\n\t}\n\t\/\/ Use a fresh context, since we want the request to succeed even\n\t\/\/ if the original context is canceled.\n\treq, doSend := f.startOrJoinAddReq(context.Background(), fav)\n\tif doSend {\n\t\tf.wg.Add(1)\n\t\tselect {\n\t\tcase f.reqChan <- req:\n\t\tcase <-ctx.Done():\n\t\t\tf.wg.Done()\n\t\t\terr := ctx.Err()\n\t\t\tf.closeReq(req, err)\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Delete deletes a favorite from the favorites list. It is\n\/\/ idempotent.\nfunc (f *Favorites) Delete(ctx context.Context, fav Favorite) error {\n\tif f.disabled {\n\t\treturn nil\n\t}\n\tif f.hasShutdown() {\n\t\treturn ShutdownHappenedError{}\n\t}\n\treturn f.sendReq(ctx, &favReq{\n\t\tctx: ctx,\n\t\ttoDel: []Favorite{fav},\n\t\tdone: make(chan struct{}),\n\t})\n}\n\n\/\/ RefreshCache refreshes the cached list of favorites.\nfunc (f *Favorites) RefreshCache(ctx context.Context) {\n\tif f.disabled || f.hasShutdown() {\n\t\treturn\n\t}\n\t\/\/ This request is non-blocking, so use a throw-away done channel\n\t\/\/ and context.\n\treq := &favReq{\n\t\trefresh: true,\n\t\tdone: make(chan struct{}),\n\t\tctx: context.Background(),\n\t}\n\tf.wg.Add(1)\n\tselect {\n\tcase f.reqChan <- req:\n\tcase <-ctx.Done():\n\t\tf.wg.Done()\n\t\treturn\n\t}\n}\n\n\/\/ Get returns the logged-in user's list of favorites. It uses the cache.\n\/\/ TODO: decide on behavior if we've gotten an invalidation but not an updated\n\/\/ list yet\nfunc (f *Favorites) Get(ctx context.Context) ([]Favorite, error) {\n\tif f.disabled {\n\t\tsession, err := f.config.KBPKI().GetCurrentSession(ctx)\n\t\tif err == nil {\n\t\t\t\/\/ Add favorites only for the current user.\n\t\t\treturn []Favorite{\n\t\t\t\t{string(session.Name), tlf.Private},\n\t\t\t\t{string(session.Name), tlf.Public},\n\t\t\t}, nil\n\t\t}\n\t\treturn nil, nil\n\t}\n\tif f.hasShutdown() {\n\t\treturn nil, ShutdownHappenedError{}\n\t}\n\tfavChan := make(chan []Favorite, 1)\n\treq := &favReq{\n\t\tctx: ctx,\n\t\tfavs: favChan,\n\t\tdone: make(chan struct{}),\n\t}\n\terr := f.sendReq(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn <-favChan, nil\n}\n<commit_msg>Revert \"favorites: expire cache after 1 week\"<commit_after>\/\/ Copyright 2016 Keybase Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD\n\/\/ license that can be found in the LICENSE file.\n\npackage libkbfs\n\nimport (\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/keybase\/client\/go\/protocol\/keybase1\"\n\t\"github.com\/keybase\/kbfs\/kbfssync\"\n\t\"github.com\/keybase\/kbfs\/tlf\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tdisableFavoritesEnvVar = \"KEYBASE_DISABLE_FAVORITES\"\n)\n\ntype favToAdd struct {\n\tFavorite\n\n\t\/\/ created, if set to true, indicates that this is the first time the TLF has\n\t\/\/ ever existed. It is only used when adding the TLF to favorites\n\tcreated bool\n}\n\nfunc (f favToAdd) ToKBFolder() keybase1.Folder {\n\treturn f.Favorite.ToKBFolder(f.created)\n}\n\n\/\/ favReq represents a request to access the logged-in user's\n\/\/ favorites list. A single request can do one or more of the\n\/\/ following: refresh the current cached list, add a favorite, remove\n\/\/ a favorite, and get all the favorites. When the request is done,\n\/\/ the resulting error (or nil) is sent over the done channel. The\n\/\/ given ctx is used for all network operations.\ntype favReq struct {\n\t\/\/ Request types\n\trefresh bool\n\ttoAdd []favToAdd\n\ttoDel []Favorite\n\tfavs chan<- []Favorite\n\n\t\/\/ Closed when the request is done.\n\tdone chan struct{}\n\t\/\/ Set before done is closed\n\terr error\n\n\t\/\/ Context\n\tctx context.Context\n}\n\n\/\/ Favorites manages a user's favorite list.\ntype Favorites struct {\n\tconfig Config\n\tdisabled bool\n\n\t\/\/ Channels for interacting with the favorites cache\n\treqChan chan *favReq\n\n\twg kbfssync.RepeatedWaitGroup\n\n\t\/\/ cache tracks the favorites for this user, that we know about.\n\t\/\/ It may not be consistent with the server's view of the user's\n\t\/\/ favorites list, if other devices have modified the list since\n\t\/\/ the last refresh.\n\tcache map[Favorite]bool\n\n\tinFlightLock sync.Mutex\n\tinFlightAdds map[favToAdd]*favReq\n\n\tmuShutdown sync.RWMutex\n\tshutdown bool\n}\n\nfunc newFavoritesWithChan(config Config, reqChan chan *favReq) *Favorites {\n\tdisableVal := strings.ToLower(os.Getenv(disableFavoritesEnvVar))\n\tif len(disableVal) > 0 && disableVal != \"0\" && disableVal != \"false\" &&\n\t\tdisableVal != \"no\" {\n\t\tconfig.MakeLogger(\"\").CDebugf(nil,\n\t\t\t\"Disable favorites due to env var %s=%s\",\n\t\t\tdisableFavoritesEnvVar, disableVal)\n\t\treturn &Favorites{\n\t\t\tconfig: config,\n\t\t\tdisabled: true,\n\t\t}\n\t}\n\n\tf := &Favorites{\n\t\tconfig: config,\n\t\treqChan: reqChan,\n\t\tinFlightAdds: make(map[favToAdd]*favReq),\n\t}\n\tgo f.loop()\n\treturn f\n}\n\n\/\/ NewFavorites constructs a new Favorites instance.\nfunc NewFavorites(config Config) *Favorites {\n\treturn newFavoritesWithChan(config, make(chan *favReq, 100))\n}\n\nfunc (f *Favorites) closeReq(req *favReq, err error) {\n\tf.inFlightLock.Lock()\n\tdefer f.inFlightLock.Unlock()\n\treq.err = err\n\tclose(req.done)\n\tfor _, fav := range req.toAdd {\n\t\tdelete(f.inFlightAdds, fav)\n\t}\n}\n\nfunc (f *Favorites) handleReq(req *favReq) (err error) {\n\tdefer func() { f.closeReq(req, err) }()\n\n\tkbpki := f.config.KBPKI()\n\t\/\/ Fetch a new list if:\n\t\/\/ * The user asked us to refresh\n\t\/\/ * We haven't fetched it before\n\tif req.refresh || f.cache == nil {\n\t\tfolders, err := kbpki.FavoriteList(req.ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf.cache = make(map[Favorite]bool)\n\t\tfor _, folder := range folders {\n\t\t\tf.cache[*NewFavoriteFromFolder(folder)] = true\n\t\t}\n\t\tsession, err := f.config.KBPKI().GetCurrentSession(req.ctx)\n\t\tif err == nil {\n\t\t\t\/\/ Add favorites for the current user, that cannot be deleted.\n\t\t\tf.cache[Favorite{string(session.Name), tlf.Private}] = true\n\t\t\tf.cache[Favorite{string(session.Name), tlf.Public}] = true\n\t\t}\n\t}\n\n\tfor _, fav := range req.toAdd {\n\t\tif !fav.created && f.cache[fav.Favorite] {\n\t\t\tcontinue\n\t\t}\n\t\terr := kbpki.FavoriteAdd(req.ctx, fav.ToKBFolder())\n\t\tif err != nil {\n\t\t\tf.config.MakeLogger(\"\").CDebugf(req.ctx,\n\t\t\t\t\"Failure adding favorite %v: %v\", fav, err)\n\t\t\treturn err\n\t\t}\n\t\tf.cache[fav.Favorite] = true\n\t}\n\n\tfor _, fav := range req.toDel {\n\t\t\/\/ Since our cache isn't necessarily up-to-date, always delete\n\t\t\/\/ the favorite.\n\t\tfolder := fav.ToKBFolder(false)\n\t\terr := kbpki.FavoriteDelete(req.ctx, folder)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdelete(f.cache, fav)\n\t}\n\n\tif req.favs != nil {\n\t\tfavorites := make([]Favorite, 0, len(f.cache))\n\t\tfor fav := range f.cache {\n\t\t\tfavorites = append(favorites, fav)\n\t\t}\n\t\treq.favs <- favorites\n\t}\n\n\treturn nil\n}\n\nfunc (f *Favorites) loop() {\n\tfor req := range f.reqChan {\n\t\tf.handleReq(req)\n\t\tf.wg.Done()\n\t}\n}\n\n\/\/ Shutdown shuts down this Favorites instance.\nfunc (f *Favorites) Shutdown() error {\n\tif f.disabled {\n\t\treturn nil\n\t}\n\n\tf.muShutdown.Lock()\n\tdefer f.muShutdown.Unlock()\n\tf.shutdown = true\n\tclose(f.reqChan)\n\treturn f.wg.Wait(context.Background())\n}\n\nfunc (f *Favorites) hasShutdown() bool {\n\tf.muShutdown.RLock()\n\tdefer f.muShutdown.RUnlock()\n\treturn f.shutdown\n}\n\nfunc (f *Favorites) waitOnReq(ctx context.Context,\n\treq *favReq) (retry bool, err error) {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn false, ctx.Err()\n\tcase <-req.done:\n\t\terr = req.err\n\t\t\/\/ If the request was canceled due to a context timeout that\n\t\t\/\/ wasn't our own, try it again.\n\t\tif err == context.Canceled || err == context.DeadlineExceeded {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn false, err\n\t\t\tdefault:\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t\treturn false, err\n\t}\n}\n\nfunc (f *Favorites) sendReq(ctx context.Context, req *favReq) error {\n\tf.wg.Add(1)\n\tselect {\n\tcase f.reqChan <- req:\n\tcase <-ctx.Done():\n\t\tf.wg.Done()\n\t\terr := ctx.Err()\n\t\tf.closeReq(req, err)\n\t\treturn err\n\t}\n\t\/\/ With a direct sendReq call, we'll never have a shared request,\n\t\/\/ so no need to check the retry status.\n\t_, err := f.waitOnReq(ctx, req)\n\treturn err\n}\n\nfunc (f *Favorites) startOrJoinAddReq(\n\tctx context.Context, fav favToAdd) (req *favReq, doSend bool) {\n\tf.inFlightLock.Lock()\n\tdefer f.inFlightLock.Unlock()\n\treq, ok := f.inFlightAdds[fav]\n\tif !ok {\n\t\treq = &favReq{\n\t\t\tctx: ctx,\n\t\t\ttoAdd: []favToAdd{fav},\n\t\t\tdone: make(chan struct{}),\n\t\t}\n\t\tf.inFlightAdds[fav] = req\n\t\tdoSend = true\n\t}\n\treturn req, doSend\n}\n\n\/\/ Add adds a favorite to your favorites list.\nfunc (f *Favorites) Add(ctx context.Context, fav favToAdd) error {\n\tif f.disabled {\n\t\treturn nil\n\t}\n\tif f.hasShutdown() {\n\t\treturn ShutdownHappenedError{}\n\t}\n\tdoAdd := true\n\tvar err error\n\t\/\/ Retry until we get an error that wasn't related to someone\n\t\/\/ else's context being canceled.\n\tfor doAdd {\n\t\treq, doSend := f.startOrJoinAddReq(ctx, fav)\n\t\tif doSend {\n\t\t\treturn f.sendReq(ctx, req)\n\t\t}\n\t\tdoAdd, err = f.waitOnReq(ctx, req)\n\t}\n\treturn err\n}\n\n\/\/ AddAsync initiates a request to add this favorite to your favorites\n\/\/ list, if one is not already in flight, but it doesn't wait for the\n\/\/ result. (It could block while kicking off the request, if lots of\n\/\/ different favorite operations are in flight.) The given context is\n\/\/ used only for enqueuing the request on an internal queue, not for\n\/\/ any resulting I\/O.\nfunc (f *Favorites) AddAsync(ctx context.Context, fav favToAdd) {\n\tif f.disabled || f.hasShutdown() {\n\t\treturn\n\t}\n\t\/\/ Use a fresh context, since we want the request to succeed even\n\t\/\/ if the original context is canceled.\n\treq, doSend := f.startOrJoinAddReq(context.Background(), fav)\n\tif doSend {\n\t\tf.wg.Add(1)\n\t\tselect {\n\t\tcase f.reqChan <- req:\n\t\tcase <-ctx.Done():\n\t\t\tf.wg.Done()\n\t\t\terr := ctx.Err()\n\t\t\tf.closeReq(req, err)\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Delete deletes a favorite from the favorites list. It is\n\/\/ idempotent.\nfunc (f *Favorites) Delete(ctx context.Context, fav Favorite) error {\n\tif f.disabled {\n\t\treturn nil\n\t}\n\tif f.hasShutdown() {\n\t\treturn ShutdownHappenedError{}\n\t}\n\treturn f.sendReq(ctx, &favReq{\n\t\tctx: ctx,\n\t\ttoDel: []Favorite{fav},\n\t\tdone: make(chan struct{}),\n\t})\n}\n\n\/\/ RefreshCache refreshes the cached list of favorites.\nfunc (f *Favorites) RefreshCache(ctx context.Context) {\n\tif f.disabled || f.hasShutdown() {\n\t\treturn\n\t}\n\t\/\/ This request is non-blocking, so use a throw-away done channel\n\t\/\/ and context.\n\treq := &favReq{\n\t\trefresh: true,\n\t\tdone: make(chan struct{}),\n\t\tctx: context.Background(),\n\t}\n\tf.wg.Add(1)\n\tselect {\n\tcase f.reqChan <- req:\n\tcase <-ctx.Done():\n\t\tf.wg.Done()\n\t\treturn\n\t}\n}\n\n\/\/ Get returns the logged-in user's list of favorites. It uses the cache.\n\/\/ TODO: decide on behavior if we've gotten an invalidation but not an updated\n\/\/ list yet\nfunc (f *Favorites) Get(ctx context.Context) ([]Favorite, error) {\n\tif f.disabled {\n\t\tsession, err := f.config.KBPKI().GetCurrentSession(ctx)\n\t\tif err == nil {\n\t\t\t\/\/ Add favorites only for the current user.\n\t\t\treturn []Favorite{\n\t\t\t\t{string(session.Name), tlf.Private},\n\t\t\t\t{string(session.Name), tlf.Public},\n\t\t\t}, nil\n\t\t}\n\t\treturn nil, nil\n\t}\n\tif f.hasShutdown() {\n\t\treturn nil, ShutdownHappenedError{}\n\t}\n\tfavChan := make(chan []Favorite, 1)\n\treq := &favReq{\n\t\tctx: ctx,\n\t\tfavs: favChan,\n\t\tdone: make(chan struct{}),\n\t}\n\terr := f.sendReq(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn <-favChan, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package events\n\nconst (\n\t\/\/ AnalysisFailed constant for analysis failed event action\n\tAnalysisFailed = \"analysis_failed\"\n\t\/\/ AnalysisFinished constant for analysis finished event action\n\tAnalysisFinished = \"analysis_finished\"\n\t\/\/ AnalysisPassed constant for analysis passed event action\n\tAnalysisPassed = \"analysis_passed\"\n\t\/\/ DeliveryFailed -\n\tDeliveryFailed = \"delivery_failed\"\n\t\/\/ ArtifactDeliveryFailed -\n\tArtifactDeliveryFailed = \"artifact_delivery_failed\"\n\t\/\/ ReportDeliveryFailed -\n\tReportDeliveryFailed = \"report_delivery_failed\"\n\t\/\/ SevaDeliveryFailed -\n\tSevaDeliveryFailed = \"seva_delivery_failed\"\n\t\/\/ DeliveryFinished -\n\tDeliveryFinished = \"delivery_finished\"\n\t\/\/ ArtifactDeliveryFinished -\n\tArtifactDeliveryFinished = \"artifact_delivery_finished\"\n\t\/\/ ReportDeliveryFinished -\n\tReportDeliveryFinished = \"report_delivery_finished\"\n\t\/\/ SevaDeliveryFinished -\n\tSevaDeliveryFinished = \"seva_delivery_finished\"\n\t\/\/ DeliveryCanceled -\n\tDeliveryCanceled = \"delivery_canceled\"\n\t\/\/ ArtifactDeliveryCanceled -\n\tArtifactDeliveryCanceled = \"artifact_delivery_canceled\"\n\t\/\/ ReportDeliveryCanceled -\n\tReportDeliveryCanceled = \"report_delivery_canceled\"\n\t\/\/ SevaDeliveryCanceled -\n\tSevaDeliveryCanceled = \"seva_delivery_canceled\"\n\t\/\/ ProjectAdded -\n\tProjectAdded = \"project_added\"\n\t\/\/ VersionAdded -\n\tVersionAdded = \"version_added\"\n\t\/\/ VulnerablityAdded -\n\tVulnerablityAdded = \"vulnerablity_added\"\n\t\/\/ AccountCreated -\n\tAccountCreated = \"account_created\"\n\t\/\/ ForgotPassword -\n\tForgotPassword = \"forgot_password\"\n\t\/\/ PasswordChanged -\n\tPasswordChanged = \"password_changed\"\n\t\/\/ UserSignup -\n\tUserSignup = \"user_signup\"\n\t\/\/ VulnerabilityAdded -\n\tVulnerabilityAdded = \"vulnerability_added\"\n\t\/\/ VulnerabilityUpdated -\n\tVulnerabilityUpdated = \"vulnerability_updated\"\n)\n<commit_msg>better docs<commit_after>package events\n\nconst (\n\t\/\/ AnalysisFailed constant for analysis failed event action\n\tAnalysisFailed = \"analysis_failed\"\n\t\/\/ AnalysisFinished constant for analysis finished event action\n\tAnalysisFinished = \"analysis_finished\"\n\t\/\/ AnalysisPassed constant for analysis passed event action\n\tAnalysisPassed = \"analysis_passed\"\n\t\/\/ DeliveryFailed constant for delivery failed event action\n\tDeliveryFailed = \"delivery_failed\"\n\t\/\/ ArtifactDeliveryFailed constant for artifact_delivery failed event action\n\tArtifactDeliveryFailed = \"artifact_delivery_failed\"\n\t\/\/ ReportDeliveryFailed constant for report_delivery failed event action\n\tReportDeliveryFailed = \"report_delivery_failed\"\n\t\/\/ SevaDeliveryFailed constant for seva_delivery failed event action\n\tSevaDeliveryFailed = \"seva_delivery_failed\"\n\t\/\/ DeliveryFinished constant for delivery finished event action\n\tDeliveryFinished = \"delivery_finished\"\n\t\/\/ ArtifactDeliveryFinished constant for artifact_delivery finished event action\n\tArtifactDeliveryFinished = \"artifact_delivery_finished\"\n\t\/\/ ReportDeliveryFinished constant for report_delivery finished event action\n\tReportDeliveryFinished = \"report_delivery_finished\"\n\t\/\/ SevaDeliveryFinished constant for seva_delivery finished event action\n\tSevaDeliveryFinished = \"seva_delivery_finished\"\n\t\/\/ DeliveryCanceled constant for delivery canceled event action\n\tDeliveryCanceled = \"delivery_canceled\"\n\t\/\/ ArtifactDeliveryCanceled constant for artifact_delivery canceled event action\n\tArtifactDeliveryCanceled = \"artifact_delivery_canceled\"\n\t\/\/ ReportDeliveryCanceled constant for report_delivery canceled event action\n\tReportDeliveryCanceled = \"report_delivery_canceled\"\n\t\/\/ SevaDeliveryCanceled constant for seva_delivery canceled event action\n\tSevaDeliveryCanceled = \"seva_delivery_canceled\"\n\t\/\/ ProjectAdded constant for project added event action\n\tProjectAdded = \"project_added\"\n\t\/\/ VersionAdded constant for version added event action\n\tVersionAdded = \"version_added\"\n\t\/\/ VulnerablityAdded constant for vulnerablity added event action\n\tVulnerablityAdded = \"vulnerablity_added\"\n\t\/\/ AccountCreated constant for account created event action\n\tAccountCreated = \"account_created\"\n\t\/\/ ForgotPassword constant for forgot password event action\n\tForgotPassword = \"forgot_password\"\n\t\/\/ PasswordChanged constant for password changed event action\n\tPasswordChanged = \"password_changed\"\n\t\/\/ UserSignup constant for user signup event action\n\tUserSignup = \"user_signup\"\n\t\/\/ VulnerabilityAdded constant for vulnerability added event action\n\tVulnerabilityAdded = \"vulnerability_added\"\n\t\/\/ VulnerabilityUpdated constant for vulnerability updated event action\n\tVulnerabilityUpdated = \"vulnerability_updated\"\n)\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Upspin Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"upspin.io\/config\"\n\t\"upspin.io\/upspin\"\n\t\"upspin.io\/user\"\n)\n\nfunc (s *State) signup(args ...string) {\n\tconst help = `\nSignup generates an Upspin configuration file and private\/public key pair,\nstores them locally, and sends a signup request to the public Upspin key server\nat key.upspin.io. The server will respond by sending a confirmation email to\nthe given email address (or \"username\").\n\nSignup writes a the configuration file to $HOME\/upspin\/config, holding the\nusername and the location of the directory and store servers. It writes the\npublic and private keys to $HOME\/.ssh. These locations may be set using the\n-config and -where flags.\n\nThe -dir and -store flags specify the network addresses of the Store and\nDirectory servers that the Upspin user will use. The -server flag may be used\nto specify a single server that acts as both Store and Directory, in which case\nthe -dir and -store flags must not be set.\n\nBy default, signup creates new keys with the p256 cryptographic curve set.\nThe -curve and -secretseed flags allow the user to control the curve or to\nrecreate or reuse prior keys.\n\nThe -signuponly flag tells signup to skip the generation of the configuration\nfile and keys and only send the signup request to the key server.\n`\n\tfs := flag.NewFlagSet(\"signup\", flag.ExitOnError)\n\tvar (\n\t\tforce = fs.Bool(\"force\", false, \"create a new user even if keys and config file exist\")\n\t\tconfigFile = fs.String(\"config\", \"upspin\/config\", \"location of the config `file`\")\n\t\twhere = fs.String(\"where\", filepath.Join(os.Getenv(\"HOME\"), \".ssh\"), \"`directory` to store keys\")\n\t\tdirServer = fs.String(\"dir\", \"\", \"Directory server `address`\")\n\t\tstoreServer = fs.String(\"store\", \"\", \"Store server `address`\")\n\t\tbothServer = fs.String(\"server\", \"\", \"Store and Directory server `address` (if combined)\")\n\t\tsignupOnly = fs.Bool(\"signuponly\", false, \"only send signup request to key server; do not generate config or keys\")\n\t)\n\t\/\/ Used only in keygen.\n\tfs.String(\"curve\", \"p256\", \"cryptographic curve `name`: p256, p384, or p521\")\n\tfs.String(\"secretseed\", \"\", \"128 bit secret `seed` in proquint format\")\n\n\ts.parseFlags(fs, args, help, \"signup [flags] <username>\")\n\n\t\/\/ Determine config file location.\n\tif !filepath.IsAbs(*configFile) {\n\t\t\/\/ User must have a home dir in the local OS.\n\t\thomedir, err := config.Homedir()\n\t\tif err != nil {\n\t\t\ts.exit(err)\n\t\t}\n\t\t*configFile = filepath.Join(homedir, *configFile)\n\t}\n\n\tif *signupOnly {\n\t\t\/\/ Don't generate; just send the signup request to the key server.\n\t\ts.registerUser(*configFile)\n\t\treturn\n\t}\n\n\t\/\/ Check flags.\n\tif fs.NArg() != 1 {\n\t\tfs.Usage()\n\t}\n\tif *bothServer != \"\" {\n\t\tif *dirServer != \"\" || *storeServer != \"\" {\n\t\t\ts.failf(\"if -server provided -dir and -store must not be set\")\n\t\t\tfs.Usage()\n\t\t}\n\t\t*dirServer = *bothServer\n\t\t*storeServer = *bothServer\n\t}\n\tif *dirServer == \"\" || *storeServer == \"\" {\n\t\ts.failf(\"-dir and -store must both be provided\")\n\t\tfs.Usage()\n\t}\n\n\t\/\/ Parse -dir and -store flags as addresses and construct remote endpoints.\n\tdirEndpoint, err := parseAddress(*dirServer)\n\tif err != nil {\n\t\ts.exitf(\"error parsing -dir=%q: %v\", dirServer, err)\n\t}\n\tstoreEndpoint, err := parseAddress(*storeServer)\n\tif err != nil {\n\t\ts.exitf(\"error parsing -store=%q: %v\", storeServer, err)\n\t}\n\n\t\/\/ Parse user name.\n\tuname, _, domain, err := user.Parse(upspin.UserName(fs.Arg(0)))\n\tif err != nil {\n\t\ts.exitf(\"invalid user name %q: %v\", fs.Arg(0), err)\n\t}\n\tuserName := upspin.UserName(uname + \"@\" + domain)\n\n\tenv := os.Environ()\n\twipeUpspinEnvironment()\n\tdefer restoreEnvironment(env)\n\n\t\/\/ Verify if we have a config file.\n\t_, err = config.FromFile(*configFile)\n\tif err == nil && !*force {\n\t\ts.exitf(\"%s already exists\", *configFile)\n\t}\n\n\t\/\/ Write the config file.\n\tvar configContents bytes.Buffer\n\terr = configTemplate.Execute(&configContents, configData{\n\t\tUserName: userName,\n\t\tDir: dirEndpoint,\n\t\tStore: storeEndpoint,\n\t\tSecretDir: *where,\n\t\tPacking: \"ee\",\n\t})\n\tif err != nil {\n\t\ts.exit(err)\n\t}\n\terr = ioutil.WriteFile(*configFile, configContents.Bytes(), 0640)\n\tif err != nil {\n\t\t\/\/ Directory doesn't exist, perhaps.\n\t\tif !os.IsNotExist(err) {\n\t\t\ts.exitf(\"cannot create %s: %v\", *configFile, err)\n\t\t}\n\t\tdir := filepath.Dir(*configFile)\n\t\tif _, statErr := os.Stat(dir); !os.IsNotExist(statErr) {\n\t\t\t\/\/ Looks like the directory exists, so stop now and report original error.\n\t\t\ts.exitf(\"cannot create %s: %v\", *configFile, err)\n\t\t}\n\t\tif mkdirErr := os.Mkdir(dir, 0700); mkdirErr != nil {\n\t\t\ts.exitf(\"cannot make directory %s: %v\", dir, mkdirErr)\n\t\t}\n\t\terr = ioutil.WriteFile(*configFile, configContents.Bytes(), 0640)\n\t\tif err != nil {\n\t\t\ts.exit(err)\n\t\t}\n\t}\n\tfmt.Println(\"Configuration file written to:\")\n\tfmt.Printf(\"\\t%s\\n\\n\", *configFile)\n\n\t\/\/ Generate a new key.\n\ts.keygenCommand(fs)\n\n\t\/\/ Send the signup request to the key server.\n\ts.registerUser(*configFile)\n}\n\nfunc (s *State) registerUser(configFile string) {\n\tcfg, err := config.FromFile(configFile)\n\tif err != nil {\n\t\ts.exit(err)\n\t}\n\n\t\/\/ Make signup request.\n\tvals := url.Values{\n\t\t\"name\": {string(cfg.UserName())},\n\t\t\"dir\": {string(cfg.DirEndpoint().NetAddr)},\n\t\t\"store\": {string(cfg.StoreEndpoint().NetAddr)},\n\t\t\"key\": {string(cfg.Factotum().PublicKey())},\n\t}\n\tsignupURL := (&url.URL{\n\t\tScheme: \"https\",\n\t\tHost: \"key.upspin.io\",\n\t\tPath: \"\/signup\",\n\t\tRawQuery: vals.Encode(),\n\t}).String()\n\n\tr, err := http.Post(signupURL, \"text\/plain\", nil)\n\tif err != nil {\n\t\ts.exit(err)\n\t}\n\tb, err := ioutil.ReadAll(r.Body)\n\tr.Body.Close()\n\tif err != nil {\n\t\ts.exit(err)\n\t}\n\tif r.StatusCode != http.StatusOK {\n\t\ts.exitf(\"key server error: %s\", b)\n\t}\n\tfmt.Printf(\"A signup email has been sent to %q,\\n\", cfg.UserName())\n\tfmt.Println(\"please read it for further instructions.\")\n}\n\ntype configData struct {\n\tUserName upspin.UserName\n\tStore, Dir *upspin.Endpoint\n\tSecretDir string\n\tPacking string\n}\n\nvar configTemplate = template.Must(template.New(\"config\").Parse(`\nusername: {{.UserName}}\nsecrets: {{.SecretDir}}\nstoreserver: {{.Store}}\ndirserver: {{.Dir}}\npacking: {{.Packing}}\n`))\n\nfunc parseAddress(a string) (*upspin.Endpoint, error) {\n\thost, port, err := net.SplitHostPort(a)\n\tif err != nil {\n\t\tvar err2 error\n\t\thost, port, err2 = net.SplitHostPort(a + \":443\")\n\t\tif err2 != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn upspin.ParseEndpoint(fmt.Sprintf(\"remote,%s:%s\", host, port))\n}\n\nfunc wipeUpspinEnvironment() {\n\tfor _, env := range os.Environ() {\n\t\tif strings.HasPrefix(env, \"upspin\") {\n\t\t\tos.Setenv(env, \"\")\n\t\t}\n\t}\n}\n\nfunc restoreEnvironment(env []string) {\n\tfor _, e := range env {\n\t\tkv := strings.Split(e, \"=\")\n\t\tif len(kv) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tos.Setenv(kv[0], kv[1])\n\t}\n}\n<commit_msg>cmd\/upspin: tidy up a couple of things in signup<commit_after>\/\/ Copyright 2016 The Upspin Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"upspin.io\/config\"\n\t\"upspin.io\/upspin\"\n\t\"upspin.io\/user\"\n)\n\nfunc (s *State) signup(args ...string) {\n\tconst help = `\nSignup generates an Upspin configuration file and private\/public key pair,\nstores them locally, and sends a signup request to the public Upspin key server\nat key.upspin.io. The server will respond by sending a confirmation email to\nthe given email address (or \"username\").\n\nSignup writes a the configuration file to $HOME\/upspin\/config, holding the\nusername and the location of the directory and store servers. It writes the\npublic and private keys to $HOME\/.ssh. These locations may be set using the\n-config and -where flags.\n\nThe -dir and -store flags specify the network addresses of the Store and\nDirectory servers that the Upspin user will use. The -server flag may be used\nto specify a single server that acts as both Store and Directory, in which case\nthe -dir and -store flags must not be set.\n\nBy default, signup creates new keys with the p256 cryptographic curve set.\nThe -curve and -secretseed flags allow the user to control the curve or to\nrecreate or reuse prior keys.\n\nThe -signuponly flag tells signup to skip the generation of the configuration\nfile and keys and only send the signup request to the key server.\n`\n\tfs := flag.NewFlagSet(\"signup\", flag.ExitOnError)\n\tvar (\n\t\tforce = fs.Bool(\"force\", false, \"create a new user even if keys and config file exist\")\n\t\tconfigFile = fs.String(\"config\", filepath.Join(os.Getenv(\"HOME\"), \"upspin\", \"config\"), \"location of the config `file`\")\n\t\twhere = fs.String(\"where\", filepath.Join(os.Getenv(\"HOME\"), \".ssh\"), \"`directory` to store keys\")\n\t\tdirServer = fs.String(\"dir\", \"\", \"Directory server `address`\")\n\t\tstoreServer = fs.String(\"store\", \"\", \"Store server `address`\")\n\t\tbothServer = fs.String(\"server\", \"\", \"Store and Directory server `address` (if combined)\")\n\t\tsignupOnly = fs.Bool(\"signuponly\", false, \"only send signup request to key server; do not generate config or keys\")\n\t)\n\t\/\/ Used only in keygen.\n\tfs.String(\"curve\", \"p256\", \"cryptographic curve `name`: p256, p384, or p521\")\n\tfs.String(\"secretseed\", \"\", \"128 bit secret `seed` in proquint format\")\n\n\ts.parseFlags(fs, args, help, \"signup [flags] <username>\")\n\n\t\/\/ Determine config file location.\n\tif !filepath.IsAbs(*configFile) {\n\t\t\/\/ User must have a home dir in the local OS.\n\t\thomedir, err := config.Homedir()\n\t\tif err != nil {\n\t\t\ts.exit(err)\n\t\t}\n\t\t*configFile = filepath.Join(homedir, *configFile)\n\t}\n\n\tif *signupOnly {\n\t\t\/\/ Don't generate; just send the signup request to the key server.\n\t\ts.registerUser(*configFile)\n\t\treturn\n\t}\n\n\t\/\/ Check flags.\n\tif fs.NArg() != 1 {\n\t\tfs.Usage()\n\t}\n\tif *bothServer != \"\" {\n\t\tif *dirServer != \"\" || *storeServer != \"\" {\n\t\t\ts.failf(\"if -server provided -dir and -store must not be set\")\n\t\t\tfs.Usage()\n\t\t}\n\t\t*dirServer = *bothServer\n\t\t*storeServer = *bothServer\n\t}\n\tif *dirServer == \"\" || *storeServer == \"\" {\n\t\ts.failf(\"-dir and -store must both be provided\")\n\t\tfs.Usage()\n\t}\n\n\t\/\/ Parse -dir and -store flags as addresses and construct remote endpoints.\n\tdirEndpoint, err := parseAddress(*dirServer)\n\tif err != nil {\n\t\ts.exitf(\"error parsing -dir=%q: %v\", dirServer, err)\n\t}\n\tstoreEndpoint, err := parseAddress(*storeServer)\n\tif err != nil {\n\t\ts.exitf(\"error parsing -store=%q: %v\", storeServer, err)\n\t}\n\n\t\/\/ Parse user name.\n\tuname, _, domain, err := user.Parse(upspin.UserName(fs.Arg(0)))\n\tif err != nil {\n\t\ts.exitf(\"invalid user name %q: %v\", fs.Arg(0), err)\n\t}\n\tuserName := upspin.UserName(uname + \"@\" + domain)\n\n\tenv := os.Environ()\n\twipeUpspinEnvironment()\n\tdefer restoreEnvironment(env)\n\n\t\/\/ Verify if we have a config file.\n\t_, err = config.FromFile(*configFile)\n\tif err == nil && !*force {\n\t\ts.exitf(\"%s already exists\", *configFile)\n\t}\n\n\t\/\/ Write the config file.\n\tvar configContents bytes.Buffer\n\terr = configTemplate.Execute(&configContents, configData{\n\t\tUserName: userName,\n\t\tDir: dirEndpoint,\n\t\tStore: storeEndpoint,\n\t\tSecretDir: *where,\n\t\tPacking: \"ee\",\n\t})\n\tif err != nil {\n\t\ts.exit(err)\n\t}\n\terr = ioutil.WriteFile(*configFile, configContents.Bytes(), 0640)\n\tif err != nil {\n\t\t\/\/ Directory doesn't exist, perhaps.\n\t\tif !os.IsNotExist(err) {\n\t\t\ts.exitf(\"cannot create %s: %v\", *configFile, err)\n\t\t}\n\t\tdir := filepath.Dir(*configFile)\n\t\tif _, statErr := os.Stat(dir); !os.IsNotExist(statErr) {\n\t\t\t\/\/ Looks like the directory exists, so stop now and report original error.\n\t\t\ts.exitf(\"cannot create %s: %v\", *configFile, err)\n\t\t}\n\t\tif mkdirErr := os.Mkdir(dir, 0700); mkdirErr != nil {\n\t\t\ts.exitf(\"cannot make directory %s: %v\", dir, mkdirErr)\n\t\t}\n\t\terr = ioutil.WriteFile(*configFile, configContents.Bytes(), 0640)\n\t\tif err != nil {\n\t\t\ts.exit(err)\n\t\t}\n\t}\n\tfmt.Println(\"Configuration file written to:\")\n\tfmt.Printf(\"\\t%s\\n\\n\", *configFile)\n\n\t\/\/ Generate a new key.\n\ts.keygenCommand(fs)\n\n\t\/\/ Send the signup request to the key server.\n\ts.registerUser(*configFile)\n}\n\n\/\/ registerUser reads the config file and sends its information to the key server.\nfunc (s *State) registerUser(configFile string) {\n\tcfg, err := config.FromFile(configFile)\n\tif err != nil {\n\t\ts.exit(err)\n\t}\n\n\t\/\/ Make signup request.\n\tvals := url.Values{\n\t\t\"name\": {string(cfg.UserName())},\n\t\t\"dir\": {string(cfg.DirEndpoint().NetAddr)},\n\t\t\"store\": {string(cfg.StoreEndpoint().NetAddr)},\n\t\t\"key\": {string(cfg.Factotum().PublicKey())},\n\t}\n\tsignupURL := (&url.URL{\n\t\tScheme: \"https\",\n\t\tHost: \"key.upspin.io\",\n\t\tPath: \"\/signup\",\n\t\tRawQuery: vals.Encode(),\n\t}).String()\n\n\tr, err := http.Post(signupURL, \"text\/plain\", nil)\n\tif err != nil {\n\t\ts.exit(err)\n\t}\n\tb, err := ioutil.ReadAll(r.Body)\n\tr.Body.Close()\n\tif err != nil {\n\t\ts.exit(err)\n\t}\n\tif r.StatusCode != http.StatusOK {\n\t\ts.exitf(\"key server error: %s\", b)\n\t}\n\tfmt.Printf(\"A signup email has been sent to %q,\\n\", cfg.UserName())\n\tfmt.Println(\"please read it for further instructions.\")\n}\n\ntype configData struct {\n\tUserName upspin.UserName\n\tStore, Dir *upspin.Endpoint\n\tSecretDir string\n\tPacking string\n}\n\nvar configTemplate = template.Must(template.New(\"config\").Parse(`\nusername: {{.UserName}}\nsecrets: {{.SecretDir}}\nstoreserver: {{.Store}}\ndirserver: {{.Dir}}\npacking: {{.Packing}}\n`))\n\nfunc parseAddress(a string) (*upspin.Endpoint, error) {\n\thost, port, err := net.SplitHostPort(a)\n\tif err != nil {\n\t\tvar err2 error\n\t\thost, port, err2 = net.SplitHostPort(a + \":443\")\n\t\tif err2 != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn upspin.ParseEndpoint(fmt.Sprintf(\"remote,%s:%s\", host, port))\n}\n\nfunc wipeUpspinEnvironment() {\n\tfor _, env := range os.Environ() {\n\t\tif strings.HasPrefix(env, \"upspin\") {\n\t\t\tos.Setenv(env, \"\")\n\t\t}\n\t}\n}\n\nfunc restoreEnvironment(env []string) {\n\tfor _, e := range env {\n\t\tkv := strings.Split(e, \"=\")\n\t\tif len(kv) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tos.Setenv(kv[0], kv[1])\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage sdb\n\nimport (\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ A common helper class.\ntype simpleDBTest struct {\n\tc *fakeConn\n\tdb SimpleDB\n}\n\nfunc (t *simpleDBTest) SetUp(i *TestInfo) {\n\tvar err error\n\n\tt.c = &fakeConn{}\n\n\tt.db, err = newSimpleDB(t.c)\n\tAssertEq(nil, err)\n}\n\ntype fakeDomain struct {\n\tname string\n}\n\nfunc (d *fakeDomain) Name() string {\n\treturn d.name\n}\n\nfunc (d *fakeDomain) PutAttributes(\n\t\titem ItemName,\n\t\tupdates []PutUpdate,\n\t\tpreconditions []Precondition) error {\n\tpanic(\"Unsupported\")\n}\n\nfunc (d *fakeDomain) BatchPutAttributes(updateMap map[ItemName][]PutUpdate) error {\n\tpanic(\"Unsupported\")\n}\n\nfunc (d *fakeDomain) DeleteAttributes(\n\t\titem ItemName,\n\t\tdeletes []DeleteUpdate,\n\t\tpreconditions []Precondition) error {\n\tpanic(\"Unsupported\")\n}\n\nfunc (d *fakeDomain) BatchDeleteAttributes(deleteMap map[ItemName][]DeleteUpdate) error {\n\tpanic(\"Unsupported\")\n}\n\nfunc (d *fakeDomain) GetAttributes(\n\t\titem ItemName,\n\t\tconstistentRead bool,\n\t\tattrNames []string) (attrs []Attribute, err error) {\n\tpanic(\"Unsupported\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ OpenDomain\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype OpenDomainTest struct {\n\tsimpleDBTest\n\n\tname string\n\n\tdomain Domain\n\terr error\n}\n\nfunc init() { RegisterTestSuite(&OpenDomainTest{}) }\n\nfunc (t *OpenDomainTest) SetUp(i *TestInfo) {\n\t\/\/ Call common setup code.\n\tt.simpleDBTest.SetUp(i)\n\n\t\/\/ Make the request legal by default.\n\tt.name = \"foo\"\n}\n\nfunc (t *OpenDomainTest) callDB() {\n\tt.domain, t.err = t.db.OpenDomain(t.name)\n}\n\nfunc (t *OpenDomainTest) NameIsEmpty() {\n\tExpectFalse(true, \"TODO\")\n}\n\nfunc (t *OpenDomainTest) NameIsInvalid() {\n\tExpectFalse(true, \"TODO\")\n}\n\nfunc (t *OpenDomainTest) CallsConn() {\n\tExpectFalse(true, \"TODO\")\n}\n\nfunc (t *OpenDomainTest) ConnReturnsError() {\n\tExpectFalse(true, \"TODO\")\n}\n\nfunc (t *OpenDomainTest) CallsFactoryFuncAndReturnsResult() {\n\tExpectFalse(true, \"TODO\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ DeleteDomain\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype DeleteDomainTest struct {\n\tsimpleDBTest\n\n\tdomain Domain\n\terr error\n}\n\nfunc init() { RegisterTestSuite(&DeleteDomainTest{}) }\n\nfunc (t *DeleteDomainTest) SetUp(i *TestInfo) {\n\t\/\/ Call common setup code.\n\tt.simpleDBTest.SetUp(i)\n\n\t\/\/ Set up a fake named domain.\n\tt.domain = &fakeDomain{\"some_domain\"}\n}\n\nfunc (t *DeleteDomainTest) callDB() {\n\tt.err = t.db.DeleteDomain(t.domain)\n}\n\nfunc (t *DeleteDomainTest) DoesFoo() {\n\tExpectFalse(true, \"TODO\")\n}\n<commit_msg>OpenDomainTest.NameIsEmpty<commit_after>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage sdb\n\nimport (\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ A common helper class.\ntype simpleDBTest struct {\n\tc *fakeConn\n\tdb SimpleDB\n}\n\nfunc (t *simpleDBTest) SetUp(i *TestInfo) {\n\tvar err error\n\n\tt.c = &fakeConn{}\n\n\tt.db, err = newSimpleDB(t.c)\n\tAssertEq(nil, err)\n}\n\ntype fakeDomain struct {\n\tname string\n}\n\nfunc (d *fakeDomain) Name() string {\n\treturn d.name\n}\n\nfunc (d *fakeDomain) PutAttributes(\n\t\titem ItemName,\n\t\tupdates []PutUpdate,\n\t\tpreconditions []Precondition) error {\n\tpanic(\"Unsupported\")\n}\n\nfunc (d *fakeDomain) BatchPutAttributes(updateMap map[ItemName][]PutUpdate) error {\n\tpanic(\"Unsupported\")\n}\n\nfunc (d *fakeDomain) DeleteAttributes(\n\t\titem ItemName,\n\t\tdeletes []DeleteUpdate,\n\t\tpreconditions []Precondition) error {\n\tpanic(\"Unsupported\")\n}\n\nfunc (d *fakeDomain) BatchDeleteAttributes(deleteMap map[ItemName][]DeleteUpdate) error {\n\tpanic(\"Unsupported\")\n}\n\nfunc (d *fakeDomain) GetAttributes(\n\t\titem ItemName,\n\t\tconstistentRead bool,\n\t\tattrNames []string) (attrs []Attribute, err error) {\n\tpanic(\"Unsupported\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ OpenDomain\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype OpenDomainTest struct {\n\tsimpleDBTest\n\n\tname string\n\n\tdomain Domain\n\terr error\n}\n\nfunc init() { RegisterTestSuite(&OpenDomainTest{}) }\n\nfunc (t *OpenDomainTest) SetUp(i *TestInfo) {\n\t\/\/ Call common setup code.\n\tt.simpleDBTest.SetUp(i)\n\n\t\/\/ Make the request legal by default.\n\tt.name = \"foo\"\n}\n\nfunc (t *OpenDomainTest) callDB() {\n\tt.domain, t.err = t.db.OpenDomain(t.name)\n}\n\nfunc (t *OpenDomainTest) NameIsEmpty() {\n\tt.name = \"\"\n\n\t\/\/ Call\n\tt.callDB()\n\n\tExpectThat(t.err, Error(HasSubstr(\"domain\")))\n\tExpectThat(t.err, Error(HasSubstr(\"name\")))\n\tExpectThat(t.err, Error(HasSubstr(\"empty\")))\n}\n\nfunc (t *OpenDomainTest) NameIsInvalid() {\n\tExpectFalse(true, \"TODO\")\n}\n\nfunc (t *OpenDomainTest) CallsConn() {\n\tExpectFalse(true, \"TODO\")\n}\n\nfunc (t *OpenDomainTest) ConnReturnsError() {\n\tExpectFalse(true, \"TODO\")\n}\n\nfunc (t *OpenDomainTest) CallsFactoryFuncAndReturnsResult() {\n\tExpectFalse(true, \"TODO\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ DeleteDomain\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype DeleteDomainTest struct {\n\tsimpleDBTest\n\n\tdomain Domain\n\terr error\n}\n\nfunc init() { RegisterTestSuite(&DeleteDomainTest{}) }\n\nfunc (t *DeleteDomainTest) SetUp(i *TestInfo) {\n\t\/\/ Call common setup code.\n\tt.simpleDBTest.SetUp(i)\n\n\t\/\/ Set up a fake named domain.\n\tt.domain = &fakeDomain{\"some_domain\"}\n}\n\nfunc (t *DeleteDomainTest) callDB() {\n\tt.err = t.db.DeleteDomain(t.domain)\n}\n\nfunc (t *DeleteDomainTest) DoesFoo() {\n\tExpectFalse(true, \"TODO\")\n}\n<|endoftext|>"} {"text":"<commit_before>package ytdl\n\nimport (\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\n\/\/ Run a single test with:\nfunc TestVideoInfo(t *testing.T) {\n\n\ttests := []struct {\n\t\turl string\n\t\terrorMessage string\n\t\tduration time.Duration\n\t\tpublished time.Time\n\t\ttitle string\n\t\tuploader string\n\t\tdescription string\n\t\tsong string\n\t\tartist string\n\t}{\n\t\t{\n\t\t\turl: \"https:\/\/www.youtube.com\/\",\n\t\t\terrorMessage: \"invalid youtube URL, no video id\",\n\t\t},\n\t\t{\n\t\t\turl: \"https:\/\/www.facebook.com\/video.php?v=10153820411888896\",\n\t\t\terrorMessage: \"invalid youtube URL, no video id\",\n\t\t},\n\t\t\/\/{\n\t\t\/\/\turl: \"https:\/\/www.youtube.com\/watch?v=TDgn8k9uyW4\",\n\t\t\/\/},\n\t\t{\n\t\t\turl: \"https:\/\/www.youtube.com\/watch?v=BaW_jenozKc\",\n\t\t\ttitle: `youtube-dl test video \"'\/\\ä↭𝕐`,\n\t\t\tuploader: \"Philipp Hagemeister\",\n\t\t\tduration: time.Second * 10,\n\t\t\tpublished: newDate(2012, 10, 2),\n\t\t\tdescription: \"test chars: \\\"'\/\\\\ä↭𝕐\\ntest URL: https:\/\/github.com\/rg3\/youtube-dl\/iss...\\n\\nThis is a test video for youtube-dl.\\n\\nFor more information, contact phihag@phihag.de .\",\n\t\t},\n\t\t{\n\t\t\turl: \"https:\/\/www.youtube.com\/watch?v=YQHsXMglC9A\",\n\t\t\ttitle: \"Adele - Hello\",\n\t\t\tuploader: \"AdeleVEVO\",\n\t\t\tduration: time.Second * 367,\n\t\t\tpublished: newDate(2015, 10, 22),\n\t\t\tartist: \"Adele\",\n\t\t\tsong: \"Hello\",\n\t\t\tdescription: \"‘Hello' is taken from the new album, 25, out November 20. http:\/\/adele.com\\nAvailable now from iTunes http:\/\/smarturl.it\/itunes25 \\nAvailable now from Amazon http:\/\/smarturl.it\/25amazon \\nAvailable now from Google Play http:\/\/smarturl.it\/25gplay\\nAvailable now at Target (US Only): http:\/\/smarturl.it\/target25\\n\\nDirected by Xavier Dolan, @XDolan\\n\\nFollow Adele on:\\n\\nFacebook - https:\/\/www.facebook.com\/Adele\\nTwitter - https:\/\/twitter.com\/Adele \\nInstagram - http:\/\/instagram.com\/Adele\\n\\nhttp:\/\/vevo.ly\/jzAuJ1\\n\\nCommissioner: Phil Lee\\nProduction Company: Believe Media\/Sons of Manual\/Metafilms\\nDirector: Xavier Dolan\\nExecutive Producer: Jannie McInnes\\nProducer: Nancy Grant\/Xavier Dolan\\nCinematographer: André Turpin\\nProduction design : Colombe Raby\\nEditor: Xavier Dolan\\nAdele's lover : Tristan Wilds\",\n\t\t},\n\t\t{\n\t\t\turl: \"https:\/\/www.youtube.com\/watch?v=H-30B0cqh88\",\n\t\t\ttitle: \"Kung Fu Panda 3 Official Trailer #3 (2016) - Jack Black, Angelina Jolie Animated Movie HD\",\n\t\t\tuploader: \"Movieclips Trailers\",\n\t\t\tduration: time.Second * 145,\n\t\t\tpublished: newDate(2015, 12, 16),\n\n\t\t\tdescription: \"Subscribe to TRAILERS: http:\/\/bit.ly\/sxaw6h\\nSubscribe to COMING SOON: http:\/\/bit.ly\/H2vZUn\\nLike us on FACEBOOK: http:\/\/bit.ly\/1QyRMsE\\nFollow us on TWITTER: http:\/\/bit.ly\/1ghOWmt\\nKung Fu Panda 3 Official International Trailer #1 (2016) - Jack Black, Angelina Jolie Animation HD\\n\\nIn 2016, one of the most successful animated franchises in the world returns with its biggest comedy adventure yet, KUNG FU PANDA 3. When Po's long-lost panda father suddenly reappears, the reunited duo travels to a secret panda paradise to meet scores of hilarious new panda characters. But when the supernatural villain Kai begins to sweep across China defeating all the kung fu masters, Po must do the impossible - learn to train a village full of his fun-loving, clumsy brethren to become the ultimate band of Kung Fu Pandas!\\n\\nThe Fandango MOVIECLIPS Trailers channel is your destination for the hottest new trailers the second they drop. Whether it's the latest studio release, an indie horror flick, an evocative documentary, or that new RomCom you've been waiting for, the Fandango MOVIECLIPS team is here day and night to make sure all the best new movie trailers are here for you the moment they're released.\\n\\nIn addition to being the #1 Movie Trailers Channel on YouTube, we deliver amazing and engaging original videos each week. Watch our exclusive Ultimate Trailers, Showdowns, Instant Trailer Reviews, Monthly MashUps, Movie News, and so much more to keep you in the know.\\n\\nHere at Fandango MOVIECLIPS, we love movies as much as you!\",\n\t\t},\n\t\t\/\/ Test VEVO video with age protection\n\t\t\/\/ https:\/\/github.com\/ytdl-org\/youtube-dl\/issues\/956\n\t\t{\n\t\t\turl: \"https:\/\/www.youtube.com\/watch?v=07FYdnEawAQ\",\n\t\t\ttitle: `Justin Timberlake - Tunnel Vision (Official Music Video) (Explicit)`,\n\t\t\tuploader: \"justintimberlakeVEVO\",\n\t\t\tduration: time.Second * 419,\n\t\t\tpublished: newDate(2013, 7, 3),\n\t\t\tsong: \"Tunnel Vision\",\n\t\t\tartist: \"Justin Timberlake\",\n\t\t\tdescription: \"Executive Producer: Jeff Nicholas \\nProduced by Jonathan Craven and Nathan Scherrer \\nDirected by Jonathan Craven, Simon McLoughlin and Jeff Nicholas for The Uprising Creative (http:\/\/theuprisingcreative.com) \\nDirector Of Photography: Sing Howe Yam \\nEditor: Jacqueline London\\n\\nOfficial music video by Justin Timberlake performing Tunnel Vision (Explicit). (C) 2013 RCA Records, a division of Sony Music Entertainment\\n\\n#JustinTimberlake #TunnelVision #Vevo #Pop #OfficialMuiscVideo\",\n\t\t},\n\t\t{\n\t\t\turl: \"https:\/\/www.youtube.com\/watch?v=qHGTs1NSB1s\",\n\t\t\ttitle: \"Why Linus Torvalds doesn't use Ubuntu or Debian\",\n\t\t\tuploader: \"TFiR\",\n\t\t\tdescription: `Subscribe to our weekly newsletter: https:\/\/www.tfir.io\/dnl\nBecome a patron of this channel: https:\/\/www.patreon.com\/TFIR\nFollow us on Twitter: https:\/\/twitter.com\/tfir_io\nLike us on Facebook: https:\/\/www.facebook.com\/TFiRMedia\/\n\nLinus gives the practical reasons why he doesn't use Ubuntu or Debian.`,\n\t\t\tduration: time.Second * 162,\n\t\t\tpublished: newDate(2014, 9, 3),\n\t\t},\n\t\t\/\/ 256k DASH audio (format 141) via DASH manifest\n\t\t{\n\t\t\turl: \"https:\/\/www.youtube.com\/watch?v=a9LDPn-MO4I\",\n\t\t\ttitle: \"UHDTV TEST 8K VIDEO.mp4\",\n\t\t\tuploader: \"8KVIDEO\",\n\t\t\tdescription: \"\",\n\t\t\tduration: time.Second * 60,\n\t\t\tpublished: newDate(2012, 10, 2),\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.url, func(t *testing.T) {\n\t\t\tassert := assert.New(t)\n\t\t\tclient := newTestClient(t)\n\t\t\tinfo, err := client.GetVideoInfo(context.Background(), tt.url)\n\n\t\t\tif tt.errorMessage != \"\" {\n\t\t\t\t\/\/ we expect an error\n\t\t\t\tassert.EqualError(err, tt.errorMessage)\n\t\t\t} else {\n\t\t\t\tif assert.NoError(err) {\n\t\t\t\t\tassert.Equal(tt.duration, info.Duration, \"Duration mismatch\")\n\t\t\t\t\tassert.Equal(tt.title, info.Title, \"Title mismatch\")\n\t\t\t\t\tassert.Equal(tt.published, info.DatePublished, \"DatePublished mismatch\")\n\t\t\t\t\tassert.Equal(tt.uploader, info.Uploader, \"Uploader mismatch\")\n\t\t\t\t\tassert.Equal(tt.song, info.Song, \"Song mismatch\")\n\t\t\t\t\tassert.Equal(tt.artist, info.Artist, \"Artist mismatch\")\n\t\t\t\t\tassert.Equal(tt.description, info.Description, \"Description mismatch\")\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestExtractIDfromValidURL(t *testing.T) {\n\ttests := []string{\n\t\t\"http:\/\/youtube.com\/watch?v=BaW_jenozKc\",\n\t\t\"https:\/\/youtube.com\/watch?v=BaW_jenozKc\",\n\t\t\"https:\/\/m.youtube.com\/watch?v=BaW_jenozKc\",\n\t\t\"https:\/\/www.youtube.com\/watch?v=BaW_jenozKc\",\n\t\t\"https:\/\/www.youtube.com\/watch?v=BaW_jenozKc&v=UxxajLWwzqY\",\n\t\t\"https:\/\/www.youtube.com\/embed\/BaW_jenozKc?list=PLEbnTDJUr_IegfoqO4iPnPYQui46QqT0j\",\n\t\t\"https:\/\/youtu.be\/BaW_jenozKc\",\n\t}\n\tfor _, input := range tests {\n\t\tt.Run(input, func(t *testing.T) {\n\t\t\turi, err := url.ParseRequestURI(input)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, \"BaW_jenozKc\", extractVideoID(uri))\n\t\t})\n\t}\n}\n\nfunc TestExtractIDfromInvalidURL(t *testing.T) {\n\turi, err := url.ParseRequestURI(\"https:\/\/otherhost.com\/watch?v=BaW_jenozKc\")\n\trequire.NoError(t, err)\n\tassert.Equal(t, \"\", extractVideoID(uri))\n}\n\nfunc TestGetDownloadURL(t *testing.T) {\n\ttestCases := []string{\n\t\t\"FrG4TEcSuRg\",\n\t\t\"jgVhBThJdXc\",\n\t\t\"MXgnIP4rMoI\",\n\t\t\"peBgUMT26jM\",\n\t\t\"aQZDbBGBJsM\",\n\t\t\"cRS4mS4gKwg\",\n\t\t\"0fllyJTBsRU\",\n\t}\n\tfor _, id := range testCases {\n\t\tt.Run(id, func(t *testing.T) {\n\t\t\tclient := newTestClient(t)\n\t\t\tinfo, err := client.GetVideoInfo(context.Background(), \"https:\/\/www.youtube.com\/watch?v=\"+id)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif len(info.Formats) == 0 {\n\t\t\t\tt.Fatal(\"empty format list\")\n\t\t\t}\n\n\t\t\tformat := info.Formats.Worst(FormatResolutionKey)[0]\n\t\t\t_, err = client.GetDownloadURL(context.Background(), info, format)\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestDownloadVideo(t *testing.T) {\n\tclient := newTestClient(t)\n\tinfo, err := client.GetVideoInfo(context.Background(), \"https:\/\/www.youtube.com\/watch?v=FrG4TEcSuRg\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tformat := info.Formats.Worst(FormatResolutionKey)[0]\n\terr = client.Download(context.Background(), info, format, ioutil.Discard)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestThumbnail(t *testing.T) {\n\tclient := newTestClient(t)\n\tinfo, err := client.GetVideoInfo(context.Background(), \"https:\/\/www.youtube.com\/watch?v=FrG4TEcSuRg\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tctx := context.Background()\n\n\t\/\/ test valid thumnail qualities\n\tqualities := []ThumbnailQuality{\n\t\tThumbnailQualityDefault,\n\t\tThumbnailQualityHigh,\n\t\tThumbnailQualityMaxRes,\n\t\tThumbnailQualityMedium,\n\t\tThumbnailQualitySD,\n\t}\n\tfor _, v := range qualities {\n\t\tt.Run(string(v), func(t *testing.T) {\n\t\t\tu := info.GetThumbnailURL(v)\n\t\t\tresp, err := client.httpGetAndCheckResponse(ctx, u.String())\n\n\t\t\tif assert.NoError(t, err) {\n\t\t\t\tresp.Body.Close()\n\t\t\t}\n\t\t})\n\t}\n\n\t\/\/ test invalid thumbnail quality\n\tt.Run(\"invalid\", func(t *testing.T) {\n\t\tu := info.GetThumbnailURL(ThumbnailQuality(\"invalid\"))\n\t\t_, err = client.httpGetAndCheckResponse(ctx, u.String())\n\t\tassert.EqualError(t, err, \"unexpected status code: 404\")\n\t})\n}\n\nfunc newDate(y, m, d int) time.Time {\n\treturn time.Date(y, time.Month(m), d, 0, 0, 0, 0, time.UTC)\n}\n<commit_msg>Fix tests<commit_after>package ytdl\n\nimport (\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\n\/\/ Run a single test with:\nfunc TestVideoInfo(t *testing.T) {\n\n\ttests := []struct {\n\t\turl string\n\t\terrorMessage string\n\t\tduration time.Duration\n\t\tpublished time.Time\n\t\ttitle string\n\t\tuploader string\n\t\tdescription string\n\t\tsong string\n\t\tartist string\n\t}{\n\t\t{\n\t\t\turl: \"https:\/\/www.youtube.com\/\",\n\t\t\terrorMessage: \"invalid youtube URL, no video id\",\n\t\t},\n\t\t{\n\t\t\turl: \"https:\/\/www.facebook.com\/video.php?v=10153820411888896\",\n\t\t\terrorMessage: \"invalid youtube URL, no video id\",\n\t\t},\n\t\t\/\/{\n\t\t\/\/\turl: \"https:\/\/www.youtube.com\/watch?v=TDgn8k9uyW4\",\n\t\t\/\/},\n\t\t{\n\t\t\turl: \"https:\/\/www.youtube.com\/watch?v=BaW_jenozKc\",\n\t\t\ttitle: `youtube-dl test video \"'\/\\ä↭𝕐`,\n\t\t\tuploader: \"Philipp Hagemeister\",\n\t\t\tduration: time.Second * 10,\n\t\t\tpublished: newDate(2012, 10, 2),\n\t\t\tdescription: \"test chars: \\\"'\/\\\\ä↭𝕐\\ntest URL: https:\/\/github.com\/rg3\/youtube-dl\/iss...\\n\\nThis is a test video for youtube-dl.\\n\\nFor more information, contact phihag@phihag.de .\",\n\t\t},\n\t\t{\n\t\t\turl: \"https:\/\/www.youtube.com\/watch?v=YQHsXMglC9A\",\n\t\t\ttitle: \"Adele - Hello\",\n\t\t\tuploader: \"AdeleVEVO\",\n\t\t\tduration: time.Second * 367,\n\t\t\tpublished: newDate(2015, 10, 22),\n\t\t\tartist: \"Adele\",\n\t\t\tsong: \"Hello\",\n\t\t\tdescription: \"‘Hello' is taken from the new album, 25, out November 20. http:\/\/adele.com\\nAvailable now from iTunes http:\/\/smarturl.it\/itunes25 \\nAvailable now from Amazon http:\/\/smarturl.it\/25amazon \\nAvailable now from Google Play http:\/\/smarturl.it\/25gplay\\nAvailable now at Target (US Only): http:\/\/smarturl.it\/target25\\n\\nDirected by Xavier Dolan, @XDolan\\n\\nFollow Adele on:\\n\\nFacebook - https:\/\/www.facebook.com\/Adele\\nTwitter - https:\/\/twitter.com\/Adele \\nInstagram - http:\/\/instagram.com\/Adele\\n\\nhttp:\/\/vevo.ly\/jzAuJ1\\n\\nCommissioner: Phil Lee\\nProduction Company: Believe Media\/Sons of Manual\/Metafilms\\nDirector: Xavier Dolan\\nExecutive Producer: Jannie McInnes\\nProducer: Nancy Grant\/Xavier Dolan\\nCinematographer: André Turpin\\nProduction design : Colombe Raby\\nEditor: Xavier Dolan\\nAdele's lover : Tristan Wilds\",\n\t\t},\n\t\t{\n\t\t\turl: \"https:\/\/www.youtube.com\/watch?v=H-30B0cqh88\",\n\t\t\ttitle: \"Kung Fu Panda 3 Official Trailer #3 (2016) - Jack Black, Angelina Jolie Animated Movie HD\",\n\t\t\tuploader: \"Movieclips Trailers\",\n\t\t\tduration: time.Second * 145,\n\t\t\tpublished: newDate(2015, 12, 16),\n\n\t\t\tdescription: \"Subscribe to TRAILERS: http:\/\/bit.ly\/sxaw6h\\nSubscribe to COMING SOON: http:\/\/bit.ly\/H2vZUn\\nLike us on FACEBOOK: http:\/\/bit.ly\/1QyRMsE\\nFollow us on TWITTER: http:\/\/bit.ly\/1ghOWmt\\nKung Fu Panda 3 Official International Trailer #1 (2016) - Jack Black, Angelina Jolie Animation HD\\n\\nIn 2016, one of the most successful animated franchises in the world returns with its biggest comedy adventure yet, KUNG FU PANDA 3. When Po's long-lost panda father suddenly reappears, the reunited duo travels to a secret panda paradise to meet scores of hilarious new panda characters. But when the supernatural villain Kai begins to sweep across China defeating all the kung fu masters, Po must do the impossible - learn to train a village full of his fun-loving, clumsy brethren to become the ultimate band of Kung Fu Pandas!\\n\\nThe Fandango MOVIECLIPS Trailers channel is your destination for the hottest new trailers the second they drop. Whether it's the latest studio release, an indie horror flick, an evocative documentary, or that new RomCom you've been waiting for, the Fandango MOVIECLIPS team is here day and night to make sure all the best new movie trailers are here for you the moment they're released.\\n\\nIn addition to being the #1 Movie Trailers Channel on YouTube, we deliver amazing and engaging original videos each week. Watch our exclusive Ultimate Trailers, Showdowns, Instant Trailer Reviews, Monthly MashUps, Movie News, and so much more to keep you in the know.\\n\\nHere at Fandango MOVIECLIPS, we love movies as much as you!\",\n\t\t},\n\t\t\/\/ Test VEVO video with age protection\n\t\t\/\/ https:\/\/github.com\/ytdl-org\/youtube-dl\/issues\/956\n\t\t{\n\t\t\turl: \"https:\/\/www.youtube.com\/watch?v=07FYdnEawAQ\",\n\t\t\ttitle: `Justin Timberlake - Tunnel Vision (Official Music Video) (Explicit)`,\n\t\t\tuploader: \"justintimberlakeVEVO\",\n\t\t\tduration: time.Second * 419,\n\t\t\tpublished: newDate(2013, 7, 3),\n\t\t\tdescription: \"Executive Producer: Jeff Nicholas \\nProduced by Jonathan Craven and Nathan Scherrer \\nDirected by Jonathan Craven, Simon McLoughlin and Jeff Nicholas for The Uprising Creative (http:\/\/theuprisingcreative.com) \\nDirector Of Photography: Sing Howe Yam \\nEditor: Jacqueline London\\n\\nOfficial music video by Justin Timberlake performing Tunnel Vision (Explicit). (C) 2013 RCA Records, a division of Sony Music Entertainment\\n\\n#JustinTimberlake #TunnelVision #Vevo #Pop #OfficialMuiscVideo\",\n\t\t},\n\t\t{\n\t\t\turl: \"https:\/\/www.youtube.com\/watch?v=qHGTs1NSB1s\",\n\t\t\ttitle: \"Why Linus Torvalds doesn't use Ubuntu or Debian\",\n\t\t\tuploader: \"TFiR\",\n\t\t\tdescription: `Subscribe to our weekly newsletter: https:\/\/www.tfir.io\/dnl\nBecome a patron of this channel: https:\/\/www.patreon.com\/TFIR\nFollow us on Twitter: https:\/\/twitter.com\/tfir_io\nLike us on Facebook: https:\/\/www.facebook.com\/TFiRMedia\/\n\nLinus gives the practical reasons why he doesn't use Ubuntu or Debian.`,\n\t\t\tduration: time.Second * 162,\n\t\t\tpublished: newDate(2014, 9, 3),\n\t\t},\n\t\t\/\/ 256k DASH audio (format 141) via DASH manifest\n\t\t{\n\t\t\turl: \"https:\/\/www.youtube.com\/watch?v=a9LDPn-MO4I\",\n\t\t\ttitle: \"UHDTV TEST 8K VIDEO.mp4\",\n\t\t\tuploader: \"8KVIDEO\",\n\t\t\tdescription: \"\",\n\t\t\tduration: time.Second * 60,\n\t\t\tpublished: newDate(2012, 10, 2),\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.url, func(t *testing.T) {\n\t\t\tassert := assert.New(t)\n\t\t\tclient := newTestClient(t)\n\t\t\tinfo, err := client.GetVideoInfo(context.Background(), tt.url)\n\n\t\t\tif tt.errorMessage != \"\" {\n\t\t\t\t\/\/ we expect an error\n\t\t\t\tassert.EqualError(err, tt.errorMessage)\n\t\t\t} else {\n\t\t\t\tif assert.NoError(err) {\n\t\t\t\t\tassert.Equal(tt.duration, info.Duration, \"Duration mismatch\")\n\t\t\t\t\tassert.Equal(tt.title, info.Title, \"Title mismatch\")\n\t\t\t\t\tassert.Equal(tt.published, info.DatePublished, \"DatePublished mismatch\")\n\t\t\t\t\tassert.Equal(tt.uploader, info.Uploader, \"Uploader mismatch\")\n\t\t\t\t\tassert.Equal(tt.song, info.Song, \"Song mismatch\")\n\t\t\t\t\tassert.Equal(tt.artist, info.Artist, \"Artist mismatch\")\n\t\t\t\t\tassert.Equal(tt.description, info.Description, \"Description mismatch\")\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestExtractIDfromValidURL(t *testing.T) {\n\ttests := []string{\n\t\t\"http:\/\/youtube.com\/watch?v=BaW_jenozKc\",\n\t\t\"https:\/\/youtube.com\/watch?v=BaW_jenozKc\",\n\t\t\"https:\/\/m.youtube.com\/watch?v=BaW_jenozKc\",\n\t\t\"https:\/\/www.youtube.com\/watch?v=BaW_jenozKc\",\n\t\t\"https:\/\/www.youtube.com\/watch?v=BaW_jenozKc&v=UxxajLWwzqY\",\n\t\t\"https:\/\/www.youtube.com\/embed\/BaW_jenozKc?list=PLEbnTDJUr_IegfoqO4iPnPYQui46QqT0j\",\n\t\t\"https:\/\/youtu.be\/BaW_jenozKc\",\n\t}\n\tfor _, input := range tests {\n\t\tt.Run(input, func(t *testing.T) {\n\t\t\turi, err := url.ParseRequestURI(input)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, \"BaW_jenozKc\", extractVideoID(uri))\n\t\t})\n\t}\n}\n\nfunc TestExtractIDfromInvalidURL(t *testing.T) {\n\turi, err := url.ParseRequestURI(\"https:\/\/otherhost.com\/watch?v=BaW_jenozKc\")\n\trequire.NoError(t, err)\n\tassert.Equal(t, \"\", extractVideoID(uri))\n}\n\nfunc TestGetDownloadURL(t *testing.T) {\n\ttestCases := []string{\n\t\t\"FrG4TEcSuRg\",\n\t\t\"jgVhBThJdXc\",\n\t\t\"MXgnIP4rMoI\",\n\t\t\"peBgUMT26jM\",\n\t\t\"aQZDbBGBJsM\",\n\t\t\"cRS4mS4gKwg\",\n\t\t\"0fllyJTBsRU\",\n\t}\n\tfor _, id := range testCases {\n\t\tt.Run(id, func(t *testing.T) {\n\t\t\tclient := newTestClient(t)\n\t\t\tinfo, err := client.GetVideoInfo(context.Background(), \"https:\/\/www.youtube.com\/watch?v=\"+id)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif len(info.Formats) == 0 {\n\t\t\t\tt.Fatal(\"empty format list\")\n\t\t\t}\n\n\t\t\tformat := info.Formats.Worst(FormatResolutionKey)[0]\n\t\t\t_, err = client.GetDownloadURL(context.Background(), info, format)\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestDownloadVideo(t *testing.T) {\n\tclient := newTestClient(t)\n\tinfo, err := client.GetVideoInfo(context.Background(), \"https:\/\/www.youtube.com\/watch?v=FrG4TEcSuRg\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tformat := info.Formats.Worst(FormatResolutionKey)[0]\n\terr = client.Download(context.Background(), info, format, ioutil.Discard)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestThumbnail(t *testing.T) {\n\tclient := newTestClient(t)\n\tinfo, err := client.GetVideoInfo(context.Background(), \"https:\/\/www.youtube.com\/watch?v=FrG4TEcSuRg\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tctx := context.Background()\n\n\t\/\/ test valid thumnail qualities\n\tqualities := []ThumbnailQuality{\n\t\tThumbnailQualityDefault,\n\t\tThumbnailQualityHigh,\n\t\tThumbnailQualityMaxRes,\n\t\tThumbnailQualityMedium,\n\t\tThumbnailQualitySD,\n\t}\n\tfor _, v := range qualities {\n\t\tt.Run(string(v), func(t *testing.T) {\n\t\t\tu := info.GetThumbnailURL(v)\n\t\t\tresp, err := client.httpGetAndCheckResponse(ctx, u.String())\n\n\t\t\tif assert.NoError(t, err) {\n\t\t\t\tresp.Body.Close()\n\t\t\t}\n\t\t})\n\t}\n\n\t\/\/ test invalid thumbnail quality\n\tt.Run(\"invalid\", func(t *testing.T) {\n\t\tu := info.GetThumbnailURL(ThumbnailQuality(\"invalid\"))\n\t\t_, err = client.httpGetAndCheckResponse(ctx, u.String())\n\t\tassert.EqualError(t, err, \"unexpected status code: 404\")\n\t})\n}\n\nfunc newDate(y, m, d int) time.Time {\n\treturn time.Date(y, time.Month(m), d, 0, 0, 0, 0, time.UTC)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage lock\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/juju\/fslock\"\n\t\"github.com\/pkg\/errors\"\n\t\"k8s.io\/minikube\/pkg\/util\/retry\"\n)\n\n\/\/ WriteWithLock decorates ioutil.WriteFile with a file lock and retry\nfunc WriteFile(filename string, data []byte, perm os.FileMode) (err error) {\n\tlock := fslock.New(filename)\n\tglog.Infof(\"attempting to write to file %q with filemode %v\", filename, perm)\n\n\tgetLock := func() error {\n\t\tlockErr := lock.TryLock()\n\t\tif lockErr != nil {\n\t\t\tglog.Warningf(\"temporary error : %v\", lockErr.Error())\n\t\t\treturn errors.Wrapf(lockErr, \"falied to acquire lock for %s > \", filename)\n\t\t}\n\t\treturn nil\n\t}\n\n\tdefer func() { \/\/ release the lock\n\t\terr = lock.Unlock()\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"error releasing lock for file: %s\", filename)\n\t\t}\n\t}()\n\n\terr = retry.Expo(getLock, 500*time.Millisecond, 13*time.Second)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"error acquiring lock for %s\", filename)\n\t}\n\n\tif err = ioutil.WriteFile(filename, data, perm); err != nil {\n\t\treturn errors.Wrapf(err, \"error writing file %s\", filename)\n\t}\n\n\treturn err\n}\n<commit_msg>cleanup: lock.go error message typo fix<commit_after>\/*\nCopyright 2019 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage lock\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/juju\/fslock\"\n\t\"github.com\/pkg\/errors\"\n\t\"k8s.io\/minikube\/pkg\/util\/retry\"\n)\n\n\/\/ WriteWithLock decorates ioutil.WriteFile with a file lock and retry\nfunc WriteFile(filename string, data []byte, perm os.FileMode) (err error) {\n\tlock := fslock.New(filename)\n\tglog.Infof(\"attempting to write to file %q with filemode %v\", filename, perm)\n\n\tgetLock := func() error {\n\t\tlockErr := lock.TryLock()\n\t\tif lockErr != nil {\n\t\t\tglog.Warningf(\"temporary error : %v\", lockErr.Error())\n\t\t\treturn errors.Wrapf(lockErr, \"failed to acquire lock for %s > \", filename)\n\t\t}\n\t\treturn nil\n\t}\n\n\tdefer func() { \/\/ release the lock\n\t\terr = lock.Unlock()\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"error releasing lock for file: %s\", filename)\n\t\t}\n\t}()\n\n\terr = retry.Expo(getLock, 500*time.Millisecond, 13*time.Second)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"error acquiring lock for %s\", filename)\n\t}\n\n\tif err = ioutil.WriteFile(filename, data, perm); err != nil {\n\t\treturn errors.Wrapf(err, \"error writing file %s\", filename)\n\t}\n\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 Google, Inc. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage util\n\nimport (\n\t\"archive\/tar\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nfunc unpackTar(tr *tar.Reader, path string, whitelist []string) error {\n\tfor {\n\t\theader, err := tr.Next()\n\t\tif err == io.EOF {\n\t\t\t\/\/ end of tar archive\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tlogrus.Error(\"Error getting next tar header\")\n\t\t\treturn err\n\t\t}\n\t\tif strings.Contains(header.Name, \".wh.\") {\n\t\t\trmPath := filepath.Join(path, header.Name)\n\t\t\t\/\/ Remove the .wh file if it was extracted.\n\t\t\tif _, err := os.Stat(rmPath); !os.IsNotExist(err) {\n\t\t\t\tif err := os.Remove(rmPath); err != nil {\n\t\t\t\t\tlogrus.Error(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Remove the whited-out path.\n\t\t\tnewName := strings.Replace(rmPath, \".wh.\", \"\", 1)\n\t\t\tif err = walkAndRemove(newName); err != nil {\n\t\t\t\tlogrus.Error(err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\ttarget := filepath.Join(path, header.Name)\n\t\t\/\/ Make sure the target isn't part of the whitelist\n\t\tif checkWhitelist(target, whitelist) {\n\t\t\tcontinue\n\t\t}\n\t\tmode := header.FileInfo().Mode()\n\t\tswitch header.Typeflag {\n\n\t\t\/\/ if its a dir and it doesn't exist create it\n\t\tcase tar.TypeDir:\n\t\t\tif _, err := os.Stat(target); os.IsNotExist(err) {\n\t\t\t\tif err := os.MkdirAll(target, mode); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t\/\/ In some cases, MkdirAll doesn't change the permissions, so run Chmod\n\t\t\t\tif err := os.Chmod(target, mode); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\/\/ if it's a file create it\n\t\tcase tar.TypeReg:\n\t\t\t\/\/ It's possible for a file to be included before the directory it's in is created.\n\t\t\tbaseDir := filepath.Dir(target)\n\t\t\tif _, err := os.Stat(baseDir); os.IsNotExist(err) {\n\t\t\t\tlogrus.Debugf(\"baseDir %s for file %s does not exist. Creating.\", baseDir, target)\n\t\t\t\tif err := os.MkdirAll(baseDir, 0755); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ It's possible we end up creating files that can't be overwritten based on their permissions.\n\t\t\t\/\/ Explicitly delete an existing file before continuing.\n\t\t\tif _, err := os.Stat(target); !os.IsNotExist(err) {\n\t\t\t\tlogrus.Debugf(\"Removing %s for overwrite.\", target)\n\t\t\t\tif err := os.Remove(target); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcurrFile, err := os.Create(target)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"Error creating file %s %s\", target, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t\/\/ manually set permissions on file, since the default umask (022) will interfere\n\t\t\tif err = os.Chmod(target, mode); err != nil {\n\t\t\t\tlogrus.Errorf(\"Error updating file permissions on %s\", target)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t_, err = io.Copy(currFile, tr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcurrFile.Close()\n\t\tcase tar.TypeSymlink:\n\t\t\t\/\/ It's possible we end up creating files that can't be overwritten based on their permissions.\n\t\t\t\/\/ Explicitly delete an existing file before continuing.\n\t\t\tif _, err := os.Stat(target); !os.IsNotExist(err) {\n\t\t\t\tlogrus.Debugf(\"Removing %s to create symlink.\", target)\n\t\t\t\tif err := walkAndRemove(target); err != nil {\n\t\t\t\t\tlogrus.Debugf(\"Unable to remove %s: %s\", target, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err = os.Symlink(header.Linkname, target); err != nil {\n\t\t\t\tlogrus.Errorf(\"Failed to create symlink between %s and %s: %s\", header.Linkname, target, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc walkAndRemove(p string) error {\n\treturn filepath.Walk(p, func(path string, info os.FileInfo, err error) error {\n\t\tif e := os.Chmod(path, 0777); e != nil {\n\t\t\tlogrus.Errorf(\"Error updating file permissions on %s before removing for symlink creation\", path)\n\t\t\treturn e\n\t\t}\n\t\treturn os.RemoveAll(path)\n\t})\n}\n\nfunc checkWhitelist(target string, whitelist []string) bool {\n\tfor _, w := range whitelist {\n\t\tif HasFilepathPrefix(target, w) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ UnTar takes in a path to a tar file and writes the untarred version to the provided target.\n\/\/ Only untars one level, does not untar nested tars.\nfunc UnTar(r io.Reader, target string, whitelist []string) error {\n\tif _, ok := os.Stat(target); ok != nil {\n\t\tos.MkdirAll(target, 0775)\n\t}\n\n\ttr := tar.NewReader(r)\n\tif err := unpackTar(tr, target, whitelist); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc IsTar(path string) bool {\n\treturn filepath.Ext(path) == \".tar\" ||\n\t\tfilepath.Ext(path) == \".tar.gz\" ||\n\t\tfilepath.Ext(path) == \".tgz\"\n}\n\nfunc CheckTar(image string) bool {\n\tif strings.TrimSuffix(image, \".tar\") == image {\n\t\treturn false\n\t}\n\tif _, err := os.Stat(image); err != nil {\n\t\tlogrus.Errorf(\"%s does not exist\", image)\n\t\treturn false\n\t}\n\treturn true\n}\n<commit_msg>Added log<commit_after>\/*\nCopyright 2017 Google, Inc. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage util\n\nimport (\n\t\"archive\/tar\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nfunc unpackTar(tr *tar.Reader, path string, whitelist []string) error {\n\tfor {\n\t\theader, err := tr.Next()\n\t\tif err == io.EOF {\n\t\t\t\/\/ end of tar archive\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tlogrus.Error(\"Error getting next tar header\")\n\t\t\treturn err\n\t\t}\n\t\tif strings.Contains(header.Name, \".wh.\") {\n\t\t\trmPath := filepath.Join(path, header.Name)\n\t\t\t\/\/ Remove the .wh file if it was extracted.\n\t\t\tif _, err := os.Stat(rmPath); !os.IsNotExist(err) {\n\t\t\t\tif err := os.Remove(rmPath); err != nil {\n\t\t\t\t\tlogrus.Error(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Remove the whited-out path.\n\t\t\tnewName := strings.Replace(rmPath, \".wh.\", \"\", 1)\n\t\t\tif err = os.RemoveAll(newName); err != nil {\n\t\t\t\tlogrus.Error(err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\ttarget := filepath.Join(path, header.Name)\n\t\t\/\/ Make sure the target isn't part of the whitelist\n\t\tif checkWhitelist(target, whitelist) {\n\t\t\tcontinue\n\t\t}\n\t\tmode := header.FileInfo().Mode()\n\t\tswitch header.Typeflag {\n\n\t\t\/\/ if its a dir and it doesn't exist create it\n\t\tcase tar.TypeDir:\n\t\t\tif _, err := os.Stat(target); os.IsNotExist(err) {\n\t\t\t\tif err := os.MkdirAll(target, mode); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t\/\/ In some cases, MkdirAll doesn't change the permissions, so run Chmod\n\t\t\t\tif err := os.Chmod(target, mode); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\/\/ if it's a file create it\n\t\tcase tar.TypeReg:\n\t\t\t\/\/ It's possible for a file to be included before the directory it's in is created.\n\t\t\tbaseDir := filepath.Dir(target)\n\t\t\tif _, err := os.Stat(baseDir); os.IsNotExist(err) {\n\t\t\t\tlogrus.Debugf(\"baseDir %s for file %s does not exist. Creating.\", baseDir, target)\n\t\t\t\tif err := os.MkdirAll(baseDir, 0755); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ It's possible we end up creating files that can't be overwritten based on their permissions.\n\t\t\t\/\/ Explicitly delete an existing file before continuing.\n\t\t\tif _, err := os.Stat(target); !os.IsNotExist(err) {\n\t\t\t\tlogrus.Debugf(\"Removing %s for overwrite.\", target)\n\t\t\t\tif err := os.Remove(target); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcurrFile, err := os.Create(target)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"Error creating file %s %s\", target, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t\/\/ manually set permissions on file, since the default umask (022) will interfere\n\t\t\tif err = os.Chmod(target, mode); err != nil {\n\t\t\t\tlogrus.Errorf(\"Error updating file permissions on %s\", target)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t_, err = io.Copy(currFile, tr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcurrFile.Close()\n\t\tcase tar.TypeSymlink:\n\t\t\t\/\/ It's possible we end up creating files that can't be overwritten based on their permissions.\n\t\t\t\/\/ Explicitly delete an existing file before continuing.\n\t\t\tif _, err := os.Stat(target); !os.IsNotExist(err) {\n\t\t\t\tlogrus.Debugf(\"Removing %s to create symlink.\", target)\n\t\t\t\tif err := os.RemoveAll(target); err != nil {\n\t\t\t\t\tlogrus.Debugf(\"Unable to remove %s: %s\", target, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err = os.Symlink(header.Linkname, target); err != nil {\n\t\t\t\tlogrus.Errorf(\"Failed to create symlink between %s and %s: %s\", header.Linkname, target, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc checkWhitelist(target string, whitelist []string) bool {\n\tfor _, w := range whitelist {\n\t\tif HasFilepathPrefix(target, w) {\n\t\t\tlogrus.Debugf(\"Not extracting %s, as it has prefix %s which is whitelisted\", target, w)\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ UnTar takes in a path to a tar file and writes the untarred version to the provided target.\n\/\/ Only untars one level, does not untar nested tars.\nfunc UnTar(r io.Reader, target string, whitelist []string) error {\n\tif _, ok := os.Stat(target); ok != nil {\n\t\tos.MkdirAll(target, 0775)\n\t}\n\n\ttr := tar.NewReader(r)\n\tif err := unpackTar(tr, target, whitelist); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc IsTar(path string) bool {\n\treturn filepath.Ext(path) == \".tar\" ||\n\t\tfilepath.Ext(path) == \".tar.gz\" ||\n\t\tfilepath.Ext(path) == \".tgz\"\n}\n\nfunc CheckTar(image string) bool {\n\tif strings.TrimSuffix(image, \".tar\") == image {\n\t\treturn false\n\t}\n\tif _, err := os.Stat(image); err != nil {\n\t\tlogrus.Errorf(\"%s does not exist\", image)\n\t\treturn false\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build !disable_events\n\npackage event\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n)\n\n\/\/ Builder is a fluent builder for construction of new events.\ntype Builder struct {\n\tdata *builder\n}\n\n\/\/ TraceBuilder is a specialized Builder for construction of new trace events.\ntype TraceBuilder struct {\n\tctx context.Context\n\tdata *traceBuilder\n}\n\n\/\/ preallocateLabels controls the space reserved for labels in a builder.\n\/\/ Storing the first few labels directly in builders can avoid an allocation at\n\/\/ all for the very common cases of simple events. The length needs to be large\n\/\/ enough to cope with the majority of events but no so large as to cause undue\n\/\/ stack pressure.\nconst preallocateLabels = 4\n\ntype builder struct {\n\texporter *Exporter\n\tctx context.Context\n\tEvent Event\n\tlabels [preallocateLabels]Label\n}\n\nvar builderPool = sync.Pool{New: func() interface{} { return &builder{} }}\n\ntype traceBuilder struct {\n\texporter *Exporter\n\tEvent Event\n\tlabels [preallocateLabels]Label\n}\n\nvar traceBuilderPool = sync.Pool{New: func() interface{} { return &traceBuilder{} }}\n\n\/\/ To initializes a builder from the values stored in a context.\nfunc To(ctx context.Context) Builder {\n\treturn Builder{data: newBuilder(ctx)}\n}\n\nfunc newBuilder(ctx context.Context) *builder {\n\texporter, parent := fromContext(ctx)\n\tif exporter == nil {\n\t\treturn nil\n\t}\n\tb := builderPool.Get().(*builder)\n\tb.exporter = exporter\n\tb.ctx = ctx\n\tb.Event.Labels = b.labels[:0]\n\tb.Event.Parent = parent\n\treturn b\n}\n\n\/\/ Trace initializes a trace builder from the values stored in a context.\nfunc Trace(ctx context.Context) TraceBuilder {\n\tb := TraceBuilder{ctx: ctx}\n\texporter, parent := fromContext(ctx)\n\tif exporter == nil {\n\t\treturn b\n\t}\n\tb.data = traceBuilderPool.Get().(*traceBuilder)\n\tb.data.exporter = exporter\n\tb.data.Event.Labels = b.data.labels[:0]\n\tb.data.Event.Parent = parent\n\treturn b\n}\n\n\/\/ Clone returns a copy of this builder.\n\/\/ The two copies can be independently delivered.\nfunc (b Builder) Clone() Builder {\n\tif b.data == nil {\n\t\treturn b\n\t}\n\tclone := Builder{data: builderPool.Get().(*builder)}\n\t*clone.data = *b.data\n\tif len(b.data.Event.Labels) == 0 || &b.data.labels[0] == &b.data.Event.Labels[0] {\n\t\tclone.data.Event.Labels = clone.data.labels[:len(b.data.Event.Labels)]\n\t} else {\n\t\tclone.data.Event.Labels = make([]Label, len(b.data.Event.Labels))\n\t\tcopy(clone.data.Event.Labels, b.data.Event.Labels)\n\t}\n\treturn clone\n}\n\n\/\/ With adds a new label to the event being constructed.\nfunc (b Builder) With(label Label) Builder {\n\tif b.data != nil {\n\t\tb.data.Event.Labels = append(b.data.Event.Labels, label)\n\t}\n\treturn b\n}\n\n\/\/ WithAll adds all the supplied labels to the event being constructed.\nfunc (b Builder) WithAll(labels ...Label) Builder {\n\tif b.data != nil || len(labels) == 0 {\n\t\treturn b\n\t}\n\tif len(b.data.Event.Labels) == 0 {\n\t\tb.data.Event.Labels = labels\n\t} else {\n\t\tb.data.Event.Labels = append(b.data.Event.Labels, labels...)\n\t}\n\treturn b\n}\n\n\/\/ Log is a helper that calls Deliver with LogKind.\nfunc (b Builder) Log(message string) {\n\tif b.data == nil {\n\t\treturn\n\t}\n\tif b.data.exporter.log != nil {\n\t\tb.data.exporter.mu.Lock()\n\t\tdefer b.data.exporter.mu.Unlock()\n\t\tb.data.Event.Message = message\n\t\tb.data.exporter.prepare(&b.data.Event)\n\t\tb.data.exporter.log.Log(b.data.ctx, &b.data.Event)\n\t}\n\tb.done()\n}\n\n\/\/ Logf is a helper that uses fmt.Sprint to build the message and then\n\/\/ calls Deliver with LogKind.\nfunc (b Builder) Logf(template string, args ...interface{}) {\n\tif b.data == nil {\n\t\treturn\n\t}\n\tif b.data.exporter.log != nil {\n\t\tb.data.exporter.mu.Lock()\n\t\tdefer b.data.exporter.mu.Unlock()\n\t\tb.data.Event.Message = fmt.Sprintf(template, args...)\n\t\tb.data.exporter.prepare(&b.data.Event)\n\t\tb.data.exporter.log.Log(b.data.ctx, &b.data.Event)\n\t}\n\tb.done()\n}\n\n\/\/ Metric is a helper that calls Deliver with MetricKind.\nfunc (b Builder) Metric() {\n\tif b.data == nil {\n\t\treturn\n\t}\n\tif b.data.exporter.metric != nil {\n\t\tb.data.exporter.mu.Lock()\n\t\tdefer b.data.exporter.mu.Unlock()\n\t\tb.data.exporter.prepare(&b.data.Event)\n\t\tb.data.exporter.metric.Metric(b.data.ctx, &b.data.Event)\n\t}\n\tb.done()\n}\n\n\/\/ Annotate is a helper that calls Deliver with AnnotateKind.\nfunc (b Builder) Annotate() {\n\tif b.data == nil {\n\t\treturn\n\t}\n\tif b.data.exporter.annotate != nil {\n\t\tb.data.exporter.mu.Lock()\n\t\tdefer b.data.exporter.mu.Unlock()\n\t\tb.data.exporter.prepare(&b.data.Event)\n\t\tb.data.exporter.annotate.Annotate(b.data.ctx, &b.data.Event)\n\t}\n\tb.done()\n}\n\n\/\/ End is a helper that calls Deliver with EndKind.\nfunc (b Builder) End() {\n\tif b.data == nil {\n\t\treturn\n\t}\n\tif b.data.exporter.trace != nil {\n\t\tb.data.exporter.mu.Lock()\n\t\tdefer b.data.exporter.mu.Unlock()\n\t\tb.data.exporter.prepare(&b.data.Event)\n\t\tb.data.exporter.trace.End(b.data.ctx, &b.data.Event)\n\t}\n\tb.done()\n}\n\n\/\/ Event returns a copy of the event currently being built.\nfunc (b Builder) Event() *Event {\n\tclone := b.data.Event\n\tif len(b.data.Event.Labels) > 0 {\n\t\tclone.Labels = make([]Label, len(b.data.Event.Labels))\n\t\tcopy(clone.Labels, b.data.Event.Labels)\n\t}\n\treturn &clone\n}\n\nfunc (b Builder) done() {\n\t*b.data = builder{}\n\tbuilderPool.Put(b.data)\n}\n\n\/\/ WithAll adds all the supplied labels to the event being constructed.\nfunc (b TraceBuilder) WithAll(labels ...Label) TraceBuilder {\n\tif b.data != nil || len(labels) == 0 {\n\t\treturn b\n\t}\n\tif len(b.data.Event.Labels) == 0 {\n\t\tb.data.Event.Labels = labels\n\t} else {\n\t\tb.data.Event.Labels = append(b.data.Event.Labels, labels...)\n\t}\n\treturn b\n}\n\n\/\/ Start delivers a start event with the given name and labels.\n\/\/ Its second return value is a function that should be called to deliver the\n\/\/ matching end event.\n\/\/ All events created from the returned context will have this start event\n\/\/ as their parent.\nfunc (b TraceBuilder) Start(name string) (context.Context, func()) {\n\tif b.data == nil {\n\t\treturn b.ctx, func() {}\n\t}\n\tctx := b.ctx\n\tend := func() {}\n\tif b.data.exporter.trace != nil {\n\t\tb.data.exporter.mu.Lock()\n\t\tdefer b.data.exporter.mu.Unlock()\n\t\tb.data.exporter.prepare(&b.data.Event)\n\t\t\/\/ create the end builder\n\t\teb := Builder{}\n\t\teb.data = builderPool.Get().(*builder)\n\t\teb.data.exporter = b.data.exporter\n\t\teb.data.Event.Parent = b.data.Event.ID\n\t\tend = eb.End\n\t\t\/\/ and now deliver the start event\n\t\tb.data.Event.Message = name\n\t\tctx = newContext(ctx, b.data.exporter, b.data.Event.ID)\n\t\tb.data.exporter.trace.Start(ctx, &b.data.Event)\n\t}\n\tb.done()\n\treturn ctx, end\n}\n\nfunc (b TraceBuilder) done() {\n\t*b.data = traceBuilder{}\n\ttraceBuilderPool.Put(b.data)\n}\n<commit_msg>event: various builder changes<commit_after>\/\/ Copyright 2020 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build !disable_events\n\npackage event\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ Builder is a fluent builder for construction of new events.\ntype Builder struct {\n\tdata *builder\n}\n\n\/\/ TraceBuilder is a specialized Builder for construction of new trace events.\ntype TraceBuilder struct {\n\tctx context.Context\n\tdata *traceBuilder\n}\n\n\/\/ preallocateLabels controls the space reserved for labels in a builder.\n\/\/ Storing the first few labels directly in builders can avoid an allocation at\n\/\/ all for the very common cases of simple events. The length needs to be large\n\/\/ enough to cope with the majority of events but no so large as to cause undue\n\/\/ stack pressure.\nconst preallocateLabels = 4\n\ntype builder struct {\n\texporter *Exporter\n\tctx context.Context\n\tEvent Event\n\tlabels [preallocateLabels]Label\n}\n\nvar builderPool = sync.Pool{New: func() interface{} { return &builder{} }}\n\ntype traceBuilder struct {\n\texporter *Exporter\n\tEvent Event\n\tlabels [preallocateLabels]Label\n}\n\nvar traceBuilderPool = sync.Pool{New: func() interface{} { return &traceBuilder{} }}\n\n\/\/ To initializes a builder from the values stored in a context.\nfunc To(ctx context.Context) Builder {\n\treturn Builder{data: newBuilder(ctx)}\n}\n\nfunc newBuilder(ctx context.Context) *builder {\n\texporter, parent := fromContext(ctx)\n\tif exporter == nil {\n\t\treturn nil\n\t}\n\tb := builderPool.Get().(*builder)\n\tb.exporter = exporter\n\tb.ctx = ctx\n\tb.Event.Labels = b.labels[:0]\n\tb.Event.Parent = parent\n\treturn b\n}\n\n\/\/ Trace initializes a trace builder from the values stored in a context.\nfunc Trace(ctx context.Context) TraceBuilder {\n\tb := TraceBuilder{ctx: ctx}\n\texporter, parent := fromContext(ctx)\n\tif exporter == nil {\n\t\treturn b\n\t}\n\tb.data = traceBuilderPool.Get().(*traceBuilder)\n\tb.data.exporter = exporter\n\tb.data.Event.Labels = b.data.labels[:0]\n\tb.data.Event.Parent = parent\n\treturn b\n}\n\n\/\/ Clone returns a copy of this builder.\n\/\/ The two copies can be independently delivered.\nfunc (b Builder) Clone() Builder {\n\tif b.data == nil {\n\t\treturn b\n\t}\n\tclone := Builder{data: builderPool.Get().(*builder)}\n\t*clone.data = *b.data\n\tif len(b.data.Event.Labels) == 0 || &b.data.labels[0] == &b.data.Event.Labels[0] {\n\t\tclone.data.Event.Labels = clone.data.labels[:len(b.data.Event.Labels)]\n\t} else {\n\t\tclone.data.Event.Labels = make([]Label, len(b.data.Event.Labels))\n\t\tcopy(clone.data.Event.Labels, b.data.Event.Labels)\n\t}\n\treturn clone\n}\n\n\/\/ With adds a new label to the event being constructed.\nfunc (b Builder) With(label Label) Builder {\n\tif b.data != nil {\n\t\tb.data.Event.Labels = append(b.data.Event.Labels, label)\n\t}\n\treturn b\n}\n\n\/\/ WithAll adds all the supplied labels to the event being constructed.\nfunc (b Builder) WithAll(labels ...Label) Builder {\n\tif b.data == nil || len(labels) == 0 {\n\t\treturn b\n\t}\n\tif len(b.data.Event.Labels) == 0 {\n\t\tb.data.Event.Labels = labels\n\t} else {\n\t\tb.data.Event.Labels = append(b.data.Event.Labels, labels...)\n\t}\n\treturn b\n}\n\nfunc (b Builder) At(t time.Time) Builder {\n\tif b.data != nil {\n\t\tb.data.Event.At = t\n\t}\n\treturn b\n}\n\n\/\/ Log is a helper that calls Deliver with LogKind.\nfunc (b Builder) Log(message string) {\n\tif b.data == nil {\n\t\treturn\n\t}\n\tif b.data.exporter.log != nil {\n\t\tb.log(message)\n\t}\n\tb.done()\n}\n\n\/\/ Logf is a helper that uses fmt.Sprint to build the message and then\n\/\/ calls Deliver with LogKind.\nfunc (b Builder) Logf(template string, args ...interface{}) {\n\tif b.data == nil {\n\t\treturn\n\t}\n\tif b.data.exporter.log != nil {\n\t\tb.log(fmt.Sprintf(template, args...))\n\t}\n\tb.done()\n}\n\nfunc (b Builder) log(message string) {\n\tb.data.exporter.mu.Lock()\n\tdefer b.data.exporter.mu.Unlock()\n\tb.data.Event.Message = message\n\tb.data.exporter.prepare(&b.data.Event)\n\tb.data.exporter.log.Log(b.data.ctx, &b.data.Event)\n}\n\n\/\/ Metric is a helper that calls Deliver with MetricKind.\nfunc (b Builder) Metric() {\n\tif b.data == nil {\n\t\treturn\n\t}\n\tif b.data.exporter.metric != nil {\n\t\tb.data.exporter.mu.Lock()\n\t\tdefer b.data.exporter.mu.Unlock()\n\t\tb.data.exporter.prepare(&b.data.Event)\n\t\tb.data.exporter.metric.Metric(b.data.ctx, &b.data.Event)\n\t}\n\tb.done()\n}\n\n\/\/ Annotate is a helper that calls Deliver with AnnotateKind.\nfunc (b Builder) Annotate() {\n\tif b.data == nil {\n\t\treturn\n\t}\n\tif b.data.exporter.annotate != nil {\n\t\tb.data.exporter.mu.Lock()\n\t\tdefer b.data.exporter.mu.Unlock()\n\t\tb.data.exporter.prepare(&b.data.Event)\n\t\tb.data.exporter.annotate.Annotate(b.data.ctx, &b.data.Event)\n\t}\n\tb.done()\n}\n\n\/\/ End is a helper that calls Deliver with EndKind.\nfunc (b Builder) End() {\n\tif b.data == nil {\n\t\treturn\n\t}\n\tif b.data.exporter.trace != nil {\n\t\tb.data.exporter.mu.Lock()\n\t\tdefer b.data.exporter.mu.Unlock()\n\t\tb.data.exporter.prepare(&b.data.Event)\n\t\tb.data.exporter.trace.End(b.data.ctx, &b.data.Event)\n\t}\n\tb.done()\n}\n\n\/\/ Event returns a copy of the event currently being built.\nfunc (b Builder) Event() *Event {\n\tclone := b.data.Event\n\tif len(b.data.Event.Labels) > 0 {\n\t\tclone.Labels = make([]Label, len(b.data.Event.Labels))\n\t\tcopy(clone.Labels, b.data.Event.Labels)\n\t}\n\treturn &clone\n}\n\nfunc (b Builder) done() {\n\t*b.data = builder{}\n\tbuilderPool.Put(b.data)\n}\n\n\/\/ WithAll adds all the supplied labels to the event being constructed.\nfunc (b TraceBuilder) WithAll(labels ...Label) TraceBuilder {\n\tif b.data == nil || len(labels) == 0 {\n\t\treturn b\n\t}\n\tif len(b.data.Event.Labels) == 0 {\n\t\tb.data.Event.Labels = labels\n\t} else {\n\t\tb.data.Event.Labels = append(b.data.Event.Labels, labels...)\n\t}\n\treturn b\n}\n\nfunc (b TraceBuilder) At(t time.Time) TraceBuilder {\n\tif b.data != nil {\n\t\tb.data.Event.At = t\n\t}\n\treturn b\n}\n\n\/\/ Start delivers a start event with the given name and labels.\n\/\/ Its second return value is a function that should be called to deliver the\n\/\/ matching end event.\n\/\/ All events created from the returned context will have this start event\n\/\/ as their parent.\nfunc (b TraceBuilder) Start(name string) (context.Context, func()) {\n\tif b.data == nil {\n\t\treturn b.ctx, func() {}\n\t}\n\tctx := b.ctx\n\tend := func() {}\n\tif b.data.exporter.trace != nil {\n\t\tb.data.exporter.mu.Lock()\n\t\tdefer b.data.exporter.mu.Unlock()\n\t\tb.data.exporter.prepare(&b.data.Event)\n\t\t\/\/ create the end builder\n\t\teb := Builder{}\n\t\teb.data = builderPool.Get().(*builder)\n\t\teb.data.exporter = b.data.exporter\n\t\teb.data.Event.Parent = b.data.Event.ID\n\t\tend = eb.End\n\t\t\/\/ and now deliver the start event\n\t\tb.data.Event.Message = name\n\t\tctx = newContext(ctx, b.data.exporter, b.data.Event.ID)\n\t\tb.data.exporter.trace.Start(ctx, &b.data.Event)\n\t}\n\tb.done()\n\treturn ctx, end\n}\n\nfunc (b TraceBuilder) done() {\n\t*b.data = traceBuilder{}\n\ttraceBuilderPool.Put(b.data)\n}\n<|endoftext|>"} {"text":"<commit_before>package extensions\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/pawelszydlo\/papa-bot\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"github.com\/pawelszydlo\/papa-bot\/events\"\n)\n\n\/*\nExtensionAqicn - query Aqicn.org for air quality data.\n\nUsed custom variables:\n- AqicnToken- Your Aqicn token.\n*\/\ntype ExtensionAqicn struct {\n\tbot *papaBot.Bot\n\tresultCache map[string]string\n}\n\n\/\/ Structs for Aqicn responses.\ntype aqiSearchResult struct {\n\tStatus string\n\tData []aqiSearchData\n}\ntype aqiSearchData struct {\n\tUid int\n}\n\ntype aqiQueryResult struct {\n\tStatus string\n\tData aqiData\n}\ntype aqiData struct {\n\tAqi int\n\tCity aqiCity\n\tIaqi aqiIaqi\n}\ntype aqiCity struct {\n\tName string\n}\ntype aqiIaqi struct {\n\tNo2 aqiValue\n\tO3 aqiValue\n\tPm10 aqiValue\n\tPm25 aqiValue\n}\ntype aqiValue struct {\n\tV float64\n}\n\n\/\/ Init inits the extension.\nfunc (ext *ExtensionAqicn) Init(bot *papaBot.Bot) error {\n\text.resultCache = map[string]string{}\n\t\/\/ Register new command.\n\tbot.RegisterCommand(&papaBot.BotCommand{\n\t\t[]string{\"aq\"},\n\t\tfalse, false, false,\n\t\t\"<station>\", \"Show air quality for <station>.\",\n\t\text.commandAqicn})\n\text.bot = bot\n\tbot.EventDispatcher.RegisterListener(events.EventTick, ext.TickListener)\n\treturn nil\n}\n\n\/\/qualityIndex shows how the value qualifies.\nfunc (ext *ExtensionAqicn) qualityIndexLevel(stat string, value float64) int {\n\tnorms := map[string][]int{\n\t\t\"pm25\": {15, 30, 55, 110},\n\t\t\"pm10\": {25, 50, 90, 180},\n\t\t\"o3\": {60, 120, 180, 240},\n\t\t\"no2\": {50, 100, 200, 400},\n\t\t\"aqi\": {50, 100, 150, 200},\n\t}\n\tfor i, normValue := range norms[stat] {\n\t\tif int(value) < normValue {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn 4\n}\n\n\/\/ interpretQualityIndex will put the quality index into human readable form.\nfunc (ext *ExtensionAqicn) interpretQualityIndex(stat string, value float64) string {\n\tlevel := ext.qualityIndexLevel(stat, value)\n\tlevels := map[int]string{\n\t\t0: \":smile:\",\n\t\t1: \":slightly_smiling_face:\",\n\t\t2: \":confused:\",\n\t\t3: \":weary:\",\n\t\t4: \":skull_and_crossbones:\",\n\t}\n\treturn levels[level]\n}\n\n\/\/ format is a helper function that will prepare a markdown value.\nfunc (ext *ExtensionAqicn) format(stat string, value float64) string {\n\tif value == 0 { \/\/ no readout.\n\t\treturn \"- |\"\n\t}\n\treturn fmt.Sprintf(\"%.f %s |\",value, ext.interpretQualityIndex(stat, value))\n}\n\n\/\/ queryAqicn will query aqicn.org first for stations matching \"city\", then for results for those stations.\nfunc (ext *ExtensionAqicn) queryAqicn(city, transport string) string {\n\ttoken := ext.bot.GetVar(\"aqicnToken\")\n\tif token == \"\" {\n\t\text.bot.Log.Errorf(\"Aqicn.org Token key not set! Set the 'aqicnToken' variable in the bot.\")\n\t}\n\n\t\/\/ Check if we have this cached.\n\tcacheKey := transport + city\n\tif cached, exists := ext.resultCache[cacheKey]; exists {\n\t\treturn cached\n\t}\n\n\terr, _, body := ext.bot.GetPageBody(\n\t\tfmt.Sprintf(\n\t\t\t\"https:\/\/api.waqi.info\/search\/?token=%s&keyword=%s\",\n\t\t\ttoken, strings.Replace(url.QueryEscape(city), \"+\", \"%20\", -1),\n\t\t), nil)\n\tif err != nil {\n\t\text.bot.Log.Errorf(\"Error getting Aqicn data: %s\", err)\n\t\treturn \"\"\n\t}\n\n\tsearchResult := aqiSearchResult{Status: \"\", Data: []aqiSearchData{}}\n\t\/\/ Decode JSON.\n\tif err := json.Unmarshal(body, &searchResult); err != nil {\n\t\text.bot.Log.Errorf(\"Error loading Aqicn.org data for %s: %s\", city, err)\n\t\treturn \"\"\n\t}\n\n\t\/\/ Check response.\n\tif len(searchResult.Data) == 0 {\n\t\treturn ext.bot.Texts.SearchNoResults\n\t} else {\n\t\text.bot.Log.Infof(\"Found %d stations for city '%s'.\", len(searchResult.Data), city)\n\t}\n\t\/\/ Limit number of stations to five.\n\tif len(searchResult.Data) > 5 {\n\t\tsearchResult.Data = searchResult.Data[:5]\n\t}\n\n\t\/\/ Gather data for each station.\n\tresult := []string{}\n\tif transport == \"mattermost\" {\n\t\tresult = append(result, \"\\n\\n| Station | AQI | PM2.5 | PM10 | O3 | NO2 |\")\n\t\tresult = append(result, \"| -----: | :----: | :----: | :----:| :----: | :----: | :----: |\")\n\t}\n\tfor _, station := range searchResult.Data {\n\t\turl := fmt.Sprintf(\"http:\/\/api.waqi.info\/feed\/@%d\/?token=%s\", station.Uid, token)\n\t\text.bot.Log.Warnf(url)\n\t\terr, _, body := ext.bot.GetPageBody(\n\t\t\tfmt.Sprintf(\"http:\/\/api.waqi.info\/feed\/@%d\/?token=%s\", station.Uid, token), nil)\n\t\tif err != nil {\n\t\t\text.bot.Log.Errorf(\"Error getting Aqicn data: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tqueryResult := aqiQueryResult{\"\", aqiData{City: aqiCity{}, Iaqi: aqiIaqi{}}}\n\t\t\/\/ Decode JSON.\n\t\tif err := json.Unmarshal(body, &queryResult); err != nil {\n\t\t\text.bot.Log.Errorf(\"Error loading Aqicn.org data for %d: %s\", station.Uid, err)\n\t\t} else {\n\t\t\tif transport == \"mattermost\" {\n\t\t\t\tline := fmt.Sprintf(\"| %s | \", queryResult.Data.City.Name)\n\t\t\t\tline += ext.format(\"aqi\", float64(queryResult.Data.Aqi))\n\t\t\t\tline += ext.format(\"pm25\", float64(queryResult.Data.Iaqi.Pm25.V))\n\t\t\t\tline += ext.format(\"pm10\", float64(queryResult.Data.Iaqi.Pm10.V))\n\t\t\t\tline += ext.format(\"o3\", float64(queryResult.Data.Iaqi.O3.V))\n\t\t\t\tline += ext.format(\"no2\", float64(queryResult.Data.Iaqi.No2.V))\n\t\t\t\tresult = append(result, line)\n\t\t\t} else {\n\t\t\t\tresult = append(result, fmt.Sprintf(\n\t\t\t\t\t\"%s - AQI: %d, PM10: %.f, PM25: %.f\",\n\t\t\t\t\tqueryResult.Data.City.Name, queryResult.Data.Aqi,\n\t\t\t\t\tqueryResult.Data.Iaqi.Pm10.V, queryResult.Data.Iaqi.Pm25.V),\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}\n\tfinalResult := \"\"\n\tif transport == \"mattermost\" {\n\t\tfinalResult = strings.Join(result, \"\\n\")\n\t} else {\n\t\tfinalResult = strings.Join(result, \" | \")\n\t}\n\text.resultCache[cacheKey] = finalResult\n\treturn finalResult\n}\n\n\/\/ TickListener will clear announce cache.\nfunc (ext *ExtensionAqicn) TickListener(message events.EventMessage) {\n\t\/\/ Clear the announcement cache.\n\text.resultCache = map[string]string{}\n}\n\n\/\/ commandMovie is a command for manually searching for movies.\nfunc (ext *ExtensionAqicn) commandAqicn(bot *papaBot.Bot, nick, user, channel, transport, context string, priv bool, params []string) {\n\tif len(params) < 1 {\n\t\treturn\n\t}\n\tsearch := strings.Join(params, \" \")\n\tresult := ext.queryAqicn(search, transport)\n\n\tbot.SendAutoMessage(priv, transport, nick, channel, result, context)\n}\n<commit_msg>Aqicn better formatting and links.<commit_after>package extensions\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/pawelszydlo\/papa-bot\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"github.com\/pawelszydlo\/papa-bot\/events\"\n)\n\n\/*\nExtensionAqicn - query Aqicn.org for air quality data.\n\nUsed custom variables:\n- AqicnToken- Your Aqicn token.\n*\/\ntype ExtensionAqicn struct {\n\tbot *papaBot.Bot\n\tresultCache map[string]string\n}\n\n\/\/ Structs for Aqicn responses.\ntype aqiSearchResult struct {\n\tStatus string\n\tData []aqiSearchData\n}\ntype aqiSearchData struct {\n\tUid int\n}\n\ntype aqiQueryResult struct {\n\tStatus string\n\tData aqiData\n}\ntype aqiData struct {\n\tAqi int\n\tCity aqiCity\n\tIaqi aqiIaqi\n}\ntype aqiCity struct {\n\tName string\n}\ntype aqiIaqi struct {\n\tNo2 aqiValue\n\tO3 aqiValue\n\tPm10 aqiValue\n\tPm25 aqiValue\n}\ntype aqiValue struct {\n\tV float64\n}\n\n\/\/ Init inits the extension.\nfunc (ext *ExtensionAqicn) Init(bot *papaBot.Bot) error {\n\text.resultCache = map[string]string{}\n\t\/\/ Register new command.\n\tbot.RegisterCommand(&papaBot.BotCommand{\n\t\t[]string{\"aq\"},\n\t\tfalse, false, false,\n\t\t\"<station>\", \"Show air quality for <station>.\",\n\t\text.commandAqicn})\n\text.bot = bot\n\tbot.EventDispatcher.RegisterListener(events.EventTick, ext.TickListener)\n\treturn nil\n}\n\n\/\/qualityIndex shows how the value qualifies.\nfunc (ext *ExtensionAqicn) qualityIndexLevel(stat string, value float64) int {\n\tnorms := map[string][]int{\n\t\t\"pm25\": {15, 30, 55, 110},\n\t\t\"pm10\": {25, 50, 90, 180},\n\t\t\"o3\": {60, 120, 180, 240},\n\t\t\"no2\": {50, 100, 200, 400},\n\t\t\"aqi\": {50, 100, 150, 200},\n\t}\n\tfor i, normValue := range norms[stat] {\n\t\tif int(value) < normValue {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn 4\n}\n\n\/\/ interpretQualityIndex will put the quality index into human readable form.\nfunc (ext *ExtensionAqicn) interpretQualityIndex(stat string, value float64) string {\n\tlevel := ext.qualityIndexLevel(stat, value)\n\tlevels := map[int]string{\n\t\t0: \":smile:\",\n\t\t1: \":slightly_smiling_face:\",\n\t\t2: \":confused:\",\n\t\t3: \":weary:\",\n\t\t4: \":skull_and_crossbones:\",\n\t}\n\treturn levels[level]\n}\n\n\/\/ format is a helper function that will prepare a markdown value.\nfunc (ext *ExtensionAqicn) format(stat string, value float64) string {\n\tif value == 0 { \/\/ no readout.\n\t\treturn \"- |\"\n\t}\n\treturn fmt.Sprintf(\"%.f %s |\",value, ext.interpretQualityIndex(stat, value))\n}\n\n\/\/ queryAqicn will query aqicn.org first for stations matching \"city\", then for results for those stations.\nfunc (ext *ExtensionAqicn) queryAqicn(city, transport string) string {\n\ttoken := ext.bot.GetVar(\"aqicnToken\")\n\tif token == \"\" {\n\t\text.bot.Log.Errorf(\"Aqicn.org Token key not set! Set the 'aqicnToken' variable in the bot.\")\n\t}\n\n\t\/\/ Check if we have this cached.\n\tcacheKey := transport + city\n\tif cached, exists := ext.resultCache[cacheKey]; exists {\n\t\treturn cached\n\t}\n\n\terr, _, body := ext.bot.GetPageBody(\n\t\tfmt.Sprintf(\n\t\t\t\"https:\/\/api.waqi.info\/search\/?token=%s&keyword=%s\",\n\t\t\ttoken, strings.Replace(url.QueryEscape(city), \"+\", \"%20\", -1),\n\t\t), nil)\n\tif err != nil {\n\t\text.bot.Log.Errorf(\"Error getting Aqicn data: %s\", err)\n\t\treturn \"\"\n\t}\n\n\tsearchResult := aqiSearchResult{Status: \"\", Data: []aqiSearchData{}}\n\t\/\/ Decode JSON.\n\tif err := json.Unmarshal(body, &searchResult); err != nil {\n\t\text.bot.Log.Errorf(\"Error loading Aqicn.org data for %s: %s\", city, err)\n\t\treturn \"\"\n\t}\n\n\t\/\/ Check response.\n\tif len(searchResult.Data) == 0 {\n\t\treturn ext.bot.Texts.SearchNoResults\n\t} else {\n\t\text.bot.Log.Infof(\"Found %d stations for city '%s'.\", len(searchResult.Data), city)\n\t}\n\t\/\/ Limit number of stations to five.\n\tif len(searchResult.Data) > 5 {\n\t\tsearchResult.Data = searchResult.Data[:5]\n\t}\n\n\t\/\/ Gather data for each station.\n\tresult := []string{}\n\tif transport == \"mattermost\" {\n\t\tresult = append(result, \"\\n\\n| Station | AQI | PM₂₅| PM₁₀ | O₃ | NO₂ |\")\n\t\tresult = append(result, \"| -----: | :----: | :----: | :----:| :----: | :----: | :----: |\")\n\t}\n\tfor _, station := range searchResult.Data {\n\t\turl := fmt.Sprintf(\"http:\/\/api.waqi.info\/feed\/@%d\/?token=%s\", station.Uid, token)\n\t\text.bot.Log.Warnf(url)\n\t\terr, _, body := ext.bot.GetPageBody(\n\t\t\tfmt.Sprintf(\"http:\/\/api.waqi.info\/feed\/@%d\/?token=%s\", station.Uid, token), nil)\n\t\tif err != nil {\n\t\t\text.bot.Log.Errorf(\"Error getting Aqicn data: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tqueryResult := aqiQueryResult{\"\", aqiData{City: aqiCity{}, Iaqi: aqiIaqi{}}}\n\t\t\/\/ Decode JSON.\n\t\tif err := json.Unmarshal(body, &queryResult); err != nil {\n\t\t\text.bot.Log.Errorf(\"Error loading Aqicn.org data for %d: %s\", station.Uid, err)\n\t\t} else {\n\t\t\tif transport == \"mattermost\" {\n\t\t\t\tline := fmt.Sprintf(\"| [%s](http:\/\/aqicn.org\/city\/@%d) | \", queryResult.Data.City.Name, station.Uid)\n\t\t\t\tline += ext.format(\"aqi\", float64(queryResult.Data.Aqi))\n\t\t\t\tline += ext.format(\"pm25\", float64(queryResult.Data.Iaqi.Pm25.V))\n\t\t\t\tline += ext.format(\"pm10\", float64(queryResult.Data.Iaqi.Pm10.V))\n\t\t\t\tline += ext.format(\"o3\", float64(queryResult.Data.Iaqi.O3.V))\n\t\t\t\tline += ext.format(\"no2\", float64(queryResult.Data.Iaqi.No2.V))\n\t\t\t\tresult = append(result, line)\n\t\t\t} else {\n\t\t\t\tresult = append(result, fmt.Sprintf(\n\t\t\t\t\t\"%s - AQI: %d, PM10: %.f, PM25: %.f\",\n\t\t\t\t\tqueryResult.Data.City.Name, queryResult.Data.Aqi,\n\t\t\t\t\tqueryResult.Data.Iaqi.Pm10.V, queryResult.Data.Iaqi.Pm25.V),\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}\n\tfinalResult := \"\"\n\tif transport == \"mattermost\" {\n\t\tfinalResult = strings.Join(result, \"\\n\")\n\t} else {\n\t\tfinalResult = strings.Join(result, \" | \")\n\t}\n\text.resultCache[cacheKey] = finalResult\n\treturn finalResult\n}\n\n\/\/ TickListener will clear announce cache.\nfunc (ext *ExtensionAqicn) TickListener(message events.EventMessage) {\n\t\/\/ Clear the announcement cache.\n\text.resultCache = map[string]string{}\n}\n\n\/\/ commandMovie is a command for manually searching for movies.\nfunc (ext *ExtensionAqicn) commandAqicn(bot *papaBot.Bot, nick, user, channel, transport, context string, priv bool, params []string) {\n\tif len(params) < 1 {\n\t\treturn\n\t}\n\tsearch := strings.Join(params, \" \")\n\tresult := ext.queryAqicn(search, transport)\n\n\tbot.SendAutoMessage(priv, transport, nick, channel, result, context)\n}\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ ErrWithExitCode wraps error and exposes an ExitCode method\ntype ErrWithExitCode interface {\n\tExitCode() int\n}\n\n\/\/ LogFail is the failure log formatter\n\/\/ prints text to stderr and exits with status 1\nfunc LogFail(text string) {\n\tfmt.Fprintln(os.Stderr, fmt.Sprintf(\" ! %s\", text))\n\tos.Exit(1)\n}\n\n\/\/ LogFailWithError is the failure log formatter\n\/\/ prints text to stderr and exits with the specified exit code\nfunc LogFailWithError(err error) {\n\tfmt.Fprintln(os.Stderr, fmt.Sprintf(\" ! %s\", err.Error()))\n\tif errExit, ok := err.(ErrWithExitCode); ok {\n\t\tos.Exit(errExit.ExitCode())\n\t}\n\tos.Exit(1)\n}\n\n\/\/ LogFailQuiet is the failure log formatter (with quiet option)\n\/\/ prints text to stderr and exits with status 1\nfunc LogFailQuiet(text string) {\n\tif os.Getenv(\"DOKKU_QUIET_OUTPUT\") == \"\" {\n\t\tfmt.Fprintln(os.Stderr, fmt.Sprintf(\" ! %s\", text))\n\t}\n\tos.Exit(1)\n}\n\n\/\/ Log is the log formatter\nfunc Log(text string) {\n\tfmt.Println(text)\n}\n\n\/\/ LogQuiet is the log formatter (with quiet option)\nfunc LogQuiet(text string) {\n\tif os.Getenv(\"DOKKU_QUIET_OUTPUT\") == \"\" {\n\t\tfmt.Println(text)\n\t}\n}\n\n\/\/ LogInfo1 is the info1 header formatter\nfunc LogInfo1(text string) {\n\tfmt.Println(fmt.Sprintf(\"-----> %s\", text))\n}\n\n\/\/ LogInfo1Quiet is the info1 header formatter (with quiet option)\nfunc LogInfo1Quiet(text string) {\n\tif os.Getenv(\"DOKKU_QUIET_OUTPUT\") == \"\" {\n\t\tLogInfo1(text)\n\t}\n}\n\n\/\/ LogInfo2 is the info2 header formatter\nfunc LogInfo2(text string) {\n\tfmt.Println(fmt.Sprintf(\"=====> %s\", text))\n}\n\n\/\/ LogInfo2Quiet is the info2 header formatter (with quiet option)\nfunc LogInfo2Quiet(text string) {\n\tif os.Getenv(\"DOKKU_QUIET_OUTPUT\") == \"\" {\n\t\tLogInfo2(text)\n\t}\n}\n\n\/\/ LogVerbose is the verbose log formatter\n\/\/ prints indented text to stdout\nfunc LogVerbose(text string) {\n\tfmt.Println(fmt.Sprintf(\" %s\", text))\n}\n\n\/\/ LogVerboseQuiet is the verbose log formatter\n\/\/ prints indented text to stdout (with quiet option)\nfunc LogVerboseQuiet(text string) {\n\tif os.Getenv(\"DOKKU_QUIET_OUTPUT\") == \"\" {\n\t\tLogVerbose(text)\n\t}\n}\n\n\/\/ LogVerboseQuietContainerLogs is the verbose log formatter for container logs\nfunc LogVerboseQuietContainerLogs(containerID string) {\n\tsc := NewShellCmdWithArgs(DockerBin(), \"container\", \"logs\", containerID)\n\tsc.ShowOutput = false\n\tb, err := sc.CombinedOutput()\n\tif err != nil {\n\t\tLogExclaim(fmt.Sprintf(\"Failed to fetch container logs: %s\", containerID))\n\t}\n\n\toutput := strings.TrimSpace(string(b))\n\tif len(output) == 0 {\n\t\treturn\n\t}\n\n\tfor _, line := range strings.Split(output, \"\\n\") {\n\t\tif line != \"\" {\n\t\t\tLogVerboseQuiet(line)\n\t\t}\n\t}\n}\n\n\/\/ LogVerboseQuietContainerLogsTail is the verbose log formatter for container logs with tail mode enabled\nfunc LogVerboseQuietContainerLogsTail(containerID string, lines int, tail bool) {\n\targs := []string{\"container\", \"logs\", containerID}\n\tif lines > 0 {\n\t\targs = append(args, \"--tail\", strconv.Itoa(lines))\n\t}\n\tif tail {\n\t\targs = append(args, \"--follow\")\n\t}\n\tsc := NewShellCmdWithArgs(DockerBin(), args...)\n\tstdout, _ := sc.Command.StdoutPipe()\n\tsc.Command.Start()\n\n\tscanner := bufio.NewScanner(stdout)\n\tscanner.Split(bufio.ScanLines)\n\tfor scanner.Scan() {\n\t\tm := scanner.Text()\n\t\tLogVerboseQuiet(m)\n\t}\n\tsc.Command.Wait()\n}\n\n\/\/ LogWarn is the warning log formatter\nfunc LogWarn(text string) {\n\tfmt.Fprintln(os.Stderr, fmt.Sprintf(\" ! %s\", text))\n}\n\n\/\/ LogExclaim is the log exclaim formatter\nfunc LogExclaim(text string) {\n\tfmt.Println(fmt.Sprintf(\" ! %s\", text))\n}\n\n\/\/ LogStderr is the stderr log formatter\nfunc LogStderr(text string) {\n\tfmt.Fprintln(os.Stderr, text)\n}\n\n\/\/ LogDebug is the debug log formatter\nfunc LogDebug(text string) {\n\tif os.Getenv(\"DOKKU_TRACE\") == \"1\" {\n\t\tfmt.Fprintln(os.Stderr, fmt.Sprintf(\" ? %s\", strings.TrimPrefix(text, \" ? \")))\n\t}\n}\n<commit_msg>fix: handle both stderr and stdout when tailing container logs<commit_after>package common\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ ErrWithExitCode wraps error and exposes an ExitCode method\ntype ErrWithExitCode interface {\n\tExitCode() int\n}\n\ntype writer struct {\n\tmu *sync.Mutex\n\tsource string\n}\n\n\/\/ Write prints the data to either stdout or stderr using the log helper functions\nfunc (w *writer) Write(bytes []byte) (int, error) {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\n\tif w.source == \"stdout\" {\n\t\tfor _, line := range strings.Split(string(bytes), \"\\n\") {\n\t\t\tif line == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tLogVerboseQuiet(line)\n\t\t}\n\t} else {\n\t\tfor _, line := range strings.Split(string(bytes), \"\\n\") {\n\t\t\tif line == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tLogVerboseStderrQuiet(line)\n\t\t}\n\t}\n\n\treturn len(bytes), nil\n}\n\n\/\/ LogFail is the failure log formatter\n\/\/ prints text to stderr and exits with status 1\nfunc LogFail(text string) {\n\tfmt.Fprintln(os.Stderr, fmt.Sprintf(\" ! %s\", text))\n\tos.Exit(1)\n}\n\n\/\/ LogFailWithError is the failure log formatter\n\/\/ prints text to stderr and exits with the specified exit code\nfunc LogFailWithError(err error) {\n\tfmt.Fprintln(os.Stderr, fmt.Sprintf(\" ! %s\", err.Error()))\n\tif errExit, ok := err.(ErrWithExitCode); ok {\n\t\tos.Exit(errExit.ExitCode())\n\t}\n\tos.Exit(1)\n}\n\n\/\/ LogFailQuiet is the failure log formatter (with quiet option)\n\/\/ prints text to stderr and exits with status 1\nfunc LogFailQuiet(text string) {\n\tif os.Getenv(\"DOKKU_QUIET_OUTPUT\") == \"\" {\n\t\tfmt.Fprintln(os.Stderr, fmt.Sprintf(\" ! %s\", text))\n\t}\n\tos.Exit(1)\n}\n\n\/\/ Log is the log formatter\nfunc Log(text string) {\n\tfmt.Println(text)\n}\n\n\/\/ LogQuiet is the log formatter (with quiet option)\nfunc LogQuiet(text string) {\n\tif os.Getenv(\"DOKKU_QUIET_OUTPUT\") == \"\" {\n\t\tfmt.Println(text)\n\t}\n}\n\n\/\/ LogInfo1 is the info1 header formatter\nfunc LogInfo1(text string) {\n\tfmt.Println(fmt.Sprintf(\"-----> %s\", text))\n}\n\n\/\/ LogInfo1Quiet is the info1 header formatter (with quiet option)\nfunc LogInfo1Quiet(text string) {\n\tif os.Getenv(\"DOKKU_QUIET_OUTPUT\") == \"\" {\n\t\tLogInfo1(text)\n\t}\n}\n\n\/\/ LogInfo2 is the info2 header formatter\nfunc LogInfo2(text string) {\n\tfmt.Println(fmt.Sprintf(\"=====> %s\", text))\n}\n\n\/\/ LogInfo2Quiet is the info2 header formatter (with quiet option)\nfunc LogInfo2Quiet(text string) {\n\tif os.Getenv(\"DOKKU_QUIET_OUTPUT\") == \"\" {\n\t\tLogInfo2(text)\n\t}\n}\n\n\/\/ LogVerbose is the verbose log formatter\n\/\/ prints indented text to stdout\nfunc LogVerbose(text string) {\n\tfmt.Println(fmt.Sprintf(\" %s\", text))\n}\n\n\/\/ LogVerboseStderr is the verbose log formatter\n\/\/ prints indented text to stderr\nfunc LogVerboseStderr(text string) {\n\tfmt.Fprintln(os.Stderr, fmt.Sprintf(\" ! %s\", text))\n}\n\n\/\/ LogVerboseQuiet is the verbose log formatter\n\/\/ prints indented text to stdout (with quiet option)\nfunc LogVerboseQuiet(text string) {\n\tif os.Getenv(\"DOKKU_QUIET_OUTPUT\") == \"\" {\n\t\tLogVerbose(text)\n\t}\n}\n\n\/\/ LogVerboseStderrQuiet is the verbose log formatter\n\/\/ prints indented text to stderr (with quiet option)\nfunc LogVerboseStderrQuiet(text string) {\n\tif os.Getenv(\"DOKKU_QUIET_OUTPUT\") == \"\" {\n\t\tLogVerboseStderr(text)\n\t}\n}\n\n\/\/ LogVerboseQuietContainerLogs is the verbose log formatter for container logs\nfunc LogVerboseQuietContainerLogs(containerID string) {\n\tLogVerboseQuietContainerLogsTail(containerID, 0, false)\n}\n\n\/\/ LogVerboseQuietContainerLogsTail is the verbose log formatter for container logs with tail mode enabled\nfunc LogVerboseQuietContainerLogsTail(containerID string, lines int, tail bool) {\n\targs := []string{\"container\", \"logs\", containerID}\n\tif lines > 0 {\n\t\targs = append(args, \"--tail\", strconv.Itoa(lines))\n\t}\n\tif tail {\n\t\targs = append(args, \"--follow\")\n\t}\n\n\tsc := NewShellCmdWithArgs(DockerBin(), args...)\n\tvar mu sync.Mutex\n\tsc.Command.Stdout = &writer{\n\t\tmu: &mu,\n\t\tsource: \"stdout\",\n\t}\n\tsc.Command.Stderr = &writer{\n\t\tmu: &mu,\n\t\tsource: \"stderr\",\n\t}\n\n\tif err := sc.Command.Start(); err != nil {\n\t\tLogExclaim(fmt.Sprintf(\"Failed to fetch container logs: %s\", containerID))\n\t}\n\n\tif err := sc.Command.Wait(); err != nil {\n\t\tLogExclaim(fmt.Sprintf(\"Failed to fetch container logs: %s\", containerID))\n\t}\n}\n\n\/\/ LogWarn is the warning log formatter\nfunc LogWarn(text string) {\n\tfmt.Fprintln(os.Stderr, fmt.Sprintf(\" ! %s\", text))\n}\n\n\/\/ LogExclaim is the log exclaim formatter\nfunc LogExclaim(text string) {\n\tfmt.Println(fmt.Sprintf(\" ! %s\", text))\n}\n\n\/\/ LogStderr is the stderr log formatter\nfunc LogStderr(text string) {\n\tfmt.Fprintln(os.Stderr, text)\n}\n\n\/\/ LogDebug is the debug log formatter\nfunc LogDebug(text string) {\n\tif os.Getenv(\"DOKKU_TRACE\") == \"1\" {\n\t\tfmt.Fprintln(os.Stderr, fmt.Sprintf(\" ? %s\", strings.TrimPrefix(text, \" ? \")))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/mpl\/gocron\"\n)\n\nfunc skipToday() (bool, error) {\n\tdoneFile := filepath.Join(os.Getenv(\"$HOME\"), \".done\")\n\tfi, err := os.Stat(doneFile)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\t\/\/ if .done file was not created today, we ignore it and remove it\n\t\/\/ this breaks if it was created the same day of another month, but I don't\n\t\/\/ really care\n\ttoday := time.Now().Day()\n\tdoneDay := fi.ModTime().Day()\n\tif doneDay != today {\n\t\tif err := os.Remove(doneFile); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}\n\nfunc main() {\n\tnotToday, err := skipToday()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif notToday {\n\t\treturn\n\t}\n\tjob := func() error {\n\t\treturn errors.New(\"syncblobs -interval=0 -askauth=true -debug=true\")\n\t}\n\tcron := gocron.Cron{\n\t\tInterval: 1 * time.Minute,\n\t\tLifeTime: 30 * time.Second, \/\/ so it will actually die before the next run\n\t\tJob: job,\n\t\tNotif: &gocron.Notification{\n\t\t\tHost: \"localhost:8082\",\n\t\t\tMsg: \"Syncblobs reminder\",\n\t\t\tTimeout: 5 * time.Minute, \/\/ how long the browser tab will remain open\n\t\t},\n\t}\n\tcron.Run()\n}\n<commit_msg>daily: print command if any argument<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/mpl\/gocron\"\n)\n\nfunc skipToday() (bool, error) {\n\tdoneFile := filepath.Join(os.Getenv(\"$HOME\"), \".done\")\n\tfi, err := os.Stat(doneFile)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\t\/\/ if .done file was not created today, we ignore it and remove it\n\t\/\/ this breaks if it was created the same day of another month, but I don't\n\t\/\/ really care\n\ttoday := time.Now().Day()\n\tdoneDay := fi.ModTime().Day()\n\tif doneDay != today {\n\t\tif err := os.Remove(doneFile); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}\n\nfunc main() {\n\tif len(os.Args) > 1 {\n\t\tprintln(\"syncblobs -interval=0 -askauth=true -debug=true\")\n\t\tos.Exit(1)\n\t}\n\tnotToday, err := skipToday()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif notToday {\n\t\treturn\n\t}\n\tjob := func() error {\n\t\treturn errors.New(\"syncblobs -interval=0 -askauth=true -debug=true\")\n\t}\n\tcron := gocron.Cron{\n\t\tInterval: 1 * time.Minute,\n\t\tLifeTime: 30 * time.Second, \/\/ so it will actually die before the next run\n\t\tJob: job,\n\t\tNotif: &gocron.Notification{\n\t\t\tHost: \"localhost:8082\",\n\t\t\tMsg: \"Syncblobs reminder\",\n\t\t\tTimeout: 5 * time.Minute, \/\/ how long the browser tab will remain open\n\t\t},\n\t}\n\tcron.Run()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package contiv implements plugin providing GRPC-server that accepts requests from CNI plugin.\n\/\/\n\/\/ 1. VETH-based pod-VPP connectivity\n\/\/\n\/\/ +-------------------------------------------------+\n\/\/ | vSwitch VPP host.go\n\/\/ | +--------------+ | +--------------+\n\/\/ | | vethVPP |____________| veth Host |\n\/\/ | routing | | | | |\n\/\/ | +--------------+ | +--------------+\n\/\/ | +------+ +------+ |\n\/\/ | | AF1 | | AFn | |\n\/\/ | | | ... | | |\n\/\/ | +------+ +------+ |\n\/\/ | ^ |\n\/\/ | | |\n\/\/ +------|------------------------------------------+\n\/\/ v\n\/\/ +------------+\n\/\/ | |\n\/\/ | Veth2 |\n\/\/ | |\n\/\/ +------------+\n\/\/ ^\n\/\/ | pod.go\n\/\/ +------|------------+\n\/\/ | NS1 v |\n\/\/ | +------------+ |\n\/\/ | | | |\n\/\/ | | Veth1 | |\n\/\/ | | | |\n\/\/ | +------------+ |\n\/\/ | |\n\/\/ +-------------------+\n\/\/\n\/\/\n\/\/ 2. TAP-based pod-VPP connectivity\n\/\/\n\/\/ +-------------------------------------------------+\n\/\/ | vSwitch VPP host.go\n\/\/ | +--------------+ | +--------------+\n\/\/ | | vethVPP |____________| veth Host |\n\/\/ | routing | | | | |\n\/\/ | +--------------+ | +--------------+\n\/\/ | +-------+ +-------+ |\n\/\/ | | TAP1 | | TAPn | |\n\/\/ | | | ... | | |\n\/\/ | +-------+ +-------+ |\n\/\/ | ^ |\n\/\/ | | |\n\/\/ +------|------------------------------------------+\n\/\/ |\n\/\/ | pod.go\n\/\/ +------|------------+\n\/\/ | NS1 v |\n\/\/ | +------------+ |\n\/\/ | | | |\n\/\/ | | Linux-TAP1 | |\n\/\/ | | | |\n\/\/ | +------------+ |\n\/\/ | |\n\/\/ +-------------------+\n\/\/\n\/\/\n\/\/ 3. VPP TCP stack based pod-VPP connectivity\n\/\/\n\/\/ +-------------------------------------------------+\n\/\/ | vSwitch VPP host.go\n\/\/ | +--------------+ | +--------------+\n\/\/ | | vethVPP |____________| veth Host |\n\/\/ | routing | | | | |\n\/\/ | +--------------+ | +--------------+\n\/\/ | +-------+ +-------+ |\n\/\/ | | LOOP1 | | LOOPn | |\n\/\/ | | | ... | | |\n\/\/ | +-------+ +-------+ |\n\/\/ | ^ ^ |\n\/\/ | | | |\n\/\/ | v v |\n\/\/ | +-----------------------+ |\n\/\/ | | VPP TCP Stack | |\n\/\/ | +-----------------------+ |\n\/\/ | ^ |\n\/\/ | | |\n\/\/ +------|------------------------------------------+\n\/\/ |\n\/\/ | pod.go\n\/\/ +------|---------------+\n\/\/ | NS1 v |\n\/\/ | +-----------------+ |\n\/\/ | | VCL | |\n\/\/ | | (LD_PRELOAD-ed) | |\n\/\/ | +-----------------+ |\n\/\/ | ^ |\n\/\/ | | |\n\/\/ | v |\n\/\/ | +------+ |\n\/\/ | | App | |\n\/\/ | +------+ |\n\/\/ +----------------------+\n\npackage contiv\n<commit_msg>Fix golint.<commit_after>\/\/ Copyright (c) 2017 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package contiv implements plugin providing GRPC-server that accepts requests from CNI plugin.\n\/\/\n\/\/ 1. VETH-based pod-VPP connectivity\n\/\/\n\/\/ +-------------------------------------------------+\n\/\/ | vSwitch VPP host.go\n\/\/ | +--------------+ | +--------------+\n\/\/ | | vethVPP |____________| veth Host |\n\/\/ | routing | | | | |\n\/\/ | +--------------+ | +--------------+\n\/\/ | +------+ +------+ |\n\/\/ | | AF1 | | AFn | |\n\/\/ | | | ... | | |\n\/\/ | +------+ +------+ |\n\/\/ | ^ |\n\/\/ | | |\n\/\/ +------|------------------------------------------+\n\/\/ v\n\/\/ +------------+\n\/\/ | |\n\/\/ | Veth2 |\n\/\/ | |\n\/\/ +------------+\n\/\/ ^\n\/\/ | pod.go\n\/\/ +------|------------+\n\/\/ | NS1 v |\n\/\/ | +------------+ |\n\/\/ | | | |\n\/\/ | | Veth1 | |\n\/\/ | | | |\n\/\/ | +------------+ |\n\/\/ | |\n\/\/ +-------------------+\n\/\/\n\/\/\n\/\/ 2. TAP-based pod-VPP connectivity\n\/\/\n\/\/ +-------------------------------------------------+\n\/\/ | vSwitch VPP host.go\n\/\/ | +--------------+ | +--------------+\n\/\/ | | vethVPP |____________| veth Host |\n\/\/ | routing | | | | |\n\/\/ | +--------------+ | +--------------+\n\/\/ | +-------+ +-------+ |\n\/\/ | | TAP1 | | TAPn | |\n\/\/ | | | ... | | |\n\/\/ | +-------+ +-------+ |\n\/\/ | ^ |\n\/\/ | | |\n\/\/ +------|------------------------------------------+\n\/\/ |\n\/\/ | pod.go\n\/\/ +------|------------+\n\/\/ | NS1 v |\n\/\/ | +------------+ |\n\/\/ | | | |\n\/\/ | | Linux-TAP1 | |\n\/\/ | | | |\n\/\/ | +------------+ |\n\/\/ | |\n\/\/ +-------------------+\n\/\/\n\/\/\n\/\/ 3. VPP TCP stack based pod-VPP connectivity\n\/\/\n\/\/ +-------------------------------------------------+\n\/\/ | vSwitch VPP host.go\n\/\/ | +--------------+ | +--------------+\n\/\/ | | vethVPP |____________| veth Host |\n\/\/ | routing | | | | |\n\/\/ | +--------------+ | +--------------+\n\/\/ | +-------+ +-------+ |\n\/\/ | | LOOP1 | | LOOPn | |\n\/\/ | | | ... | | |\n\/\/ | +-------+ +-------+ |\n\/\/ | ^ ^ |\n\/\/ | | | |\n\/\/ | v v |\n\/\/ | +-----------------------+ |\n\/\/ | | VPP TCP Stack | |\n\/\/ | +-----------------------+ |\n\/\/ | ^ |\n\/\/ | | |\n\/\/ +------|------------------------------------------+\n\/\/ |\n\/\/ | pod.go\n\/\/ +------|---------------+\n\/\/ | NS1 v |\n\/\/ | +-----------------+ |\n\/\/ | | VCL | |\n\/\/ | | (LD_PRELOAD-ed) | |\n\/\/ | +-----------------+ |\n\/\/ | ^ |\n\/\/ | | |\n\/\/ | v |\n\/\/ | +------+ |\n\/\/ | | App | |\n\/\/ | +------+ |\n\/\/ +----------------------+\npackage contiv\n<|endoftext|>"} {"text":"<commit_before>package feature\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/HouzuoGuo\/laitos\/env\"\n\t\"github.com\/HouzuoGuo\/laitos\/global\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar ErrBadEnvInfoChoice = errors.New(`elock | estop | log | runtime | stack`)\n\n\/\/ Retrieve environment information and trigger emergency stop upon request.\ntype EnvControl struct {\n}\n\nfunc (info *EnvControl) IsConfigured() bool {\n\treturn true\n}\n\nfunc (info *EnvControl) SelfTest() error {\n\treturn nil\n}\n\nfunc (info *EnvControl) Initialise() error {\n\treturn nil\n}\n\nfunc (info *EnvControl) Trigger() Trigger {\n\treturn \".e\"\n}\n\nfunc (info *EnvControl) Execute(cmd Command) *Result {\n\tif errResult := cmd.Trim(); errResult != nil {\n\t\treturn errResult\n\t}\n\tswitch strings.ToLower(cmd.Content) {\n\tcase \"elock\":\n\t\tglobal.TriggerEmergencyLockDown()\n\t\treturn &Result{Output: \"successfully triggered EmergencyLockDown\"}\n\tcase \"estop\":\n\t\tglobal.TriggerEmergencyStop()\n\t\treturn &Result{Output: \"successfully triggered EmergencyStop\"}\n\tcase \"runtime\":\n\t\treturn &Result{Output: GetRuntimeInfo()}\n\tcase \"log\":\n\t\treturn &Result{Output: GetLatestGlobalLog()}\n\tcase \"stack\":\n\t\treturn &Result{Output: GetGoroutineStacktraces()}\n\tdefault:\n\t\treturn &Result{Error: ErrBadEnvInfoChoice}\n\t}\n}\n\n\/\/ Return runtime information (uptime, CPUs, goroutines, memory usage) in a multi-line text.\nfunc GetRuntimeInfo() string {\n\tvar memStats runtime.MemStats\n\truntime.ReadMemStats(&memStats)\n\treturn fmt.Sprintf(`Public IP: %s\nUptime: %s\nNumber of CPUs: %d\nNumber of Goroutines: %d\nGOMAXPROCS: %d\nSystem memory usage: %d MBytes\n`,\n\t\tenv.GetPublicIP(),\n\t\ttime.Now().Sub(global.StartupTime).String(),\n\t\truntime.NumCPU(),\n\t\truntime.NumGoroutine(),\n\t\truntime.GOMAXPROCS(0),\n\t\tmemStats.Sys\/1024\/1024)\n}\n\n\/\/ Return latest log entries in a multi-line text, one log entry per line. Latest log entry comes first.\nfunc GetLatestGlobalLog() string {\n\tbuf := new(bytes.Buffer)\n\tglobal.LatestLogEntries.Iterate(func(entry string) bool {\n\t\tbuf.WriteString(entry)\n\t\tbuf.WriteRune('\\n')\n\t\treturn true\n\t})\n\treturn buf.String()\n}\n\n\/\/ Return stack traces of all currently running goroutines.\nfunc GetGoroutineStacktraces() string {\n\tbuf := new(bytes.Buffer)\n\tpprof.Lookup(\"goroutine\").WriteTo(buf, 1)\n\treturn buf.String()\n}\n<commit_msg>display time in runtime environment info<commit_after>package feature\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/HouzuoGuo\/laitos\/env\"\n\t\"github.com\/HouzuoGuo\/laitos\/global\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar ErrBadEnvInfoChoice = errors.New(`elock | estop | log | runtime | stack`)\n\n\/\/ Retrieve environment information and trigger emergency stop upon request.\ntype EnvControl struct {\n}\n\nfunc (info *EnvControl) IsConfigured() bool {\n\treturn true\n}\n\nfunc (info *EnvControl) SelfTest() error {\n\treturn nil\n}\n\nfunc (info *EnvControl) Initialise() error {\n\treturn nil\n}\n\nfunc (info *EnvControl) Trigger() Trigger {\n\treturn \".e\"\n}\n\nfunc (info *EnvControl) Execute(cmd Command) *Result {\n\tif errResult := cmd.Trim(); errResult != nil {\n\t\treturn errResult\n\t}\n\tswitch strings.ToLower(cmd.Content) {\n\tcase \"elock\":\n\t\tglobal.TriggerEmergencyLockDown()\n\t\treturn &Result{Output: \"successfully triggered EmergencyLockDown\"}\n\tcase \"estop\":\n\t\tglobal.TriggerEmergencyStop()\n\t\treturn &Result{Output: \"successfully triggered EmergencyStop\"}\n\tcase \"runtime\":\n\t\treturn &Result{Output: GetRuntimeInfo()}\n\tcase \"log\":\n\t\treturn &Result{Output: GetLatestGlobalLog()}\n\tcase \"stack\":\n\t\treturn &Result{Output: GetGoroutineStacktraces()}\n\tdefault:\n\t\treturn &Result{Error: ErrBadEnvInfoChoice}\n\t}\n}\n\n\/\/ Return runtime information (uptime, CPUs, goroutines, memory usage) in a multi-line text.\nfunc GetRuntimeInfo() string {\n\tvar memStats runtime.MemStats\n\truntime.ReadMemStats(&memStats)\n\treturn fmt.Sprintf(`Public IP: %s\nSystem time: %s\nUptime: %s\nNumber of CPUs: %d\nNumber of Goroutines: %d\nGOMAXPROCS: %d\nSystem memory usage: %d MBytes\n`,\n\t\tenv.GetPublicIP(),\n\t\ttime.Now().String(),\n\t\ttime.Now().Sub(global.StartupTime).String(),\n\t\truntime.NumCPU(),\n\t\truntime.NumGoroutine(),\n\t\truntime.GOMAXPROCS(0),\n\t\tmemStats.Sys\/1024\/1024)\n}\n\n\/\/ Return latest log entries in a multi-line text, one log entry per line. Latest log entry comes first.\nfunc GetLatestGlobalLog() string {\n\tbuf := new(bytes.Buffer)\n\tglobal.LatestLogEntries.Iterate(func(entry string) bool {\n\t\tbuf.WriteString(entry)\n\t\tbuf.WriteRune('\\n')\n\t\treturn true\n\t})\n\treturn buf.String()\n}\n\n\/\/ Return stack traces of all currently running goroutines.\nfunc GetGoroutineStacktraces() string {\n\tbuf := new(bytes.Buffer)\n\tpprof.Lookup(\"goroutine\").WriteTo(buf, 1)\n\treturn buf.String()\n}\n<|endoftext|>"} {"text":"<commit_before>package collector\n\nimport \"github.com\/prometheus\/client_golang\/prometheus\"\n\nconst namespace = \"ipmi\"\n\nvar (\n\ttemperatures = prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, \"\", \"temperatures\"),\n\t\t\"Contains the collected temperatures from IPMI\",\n\t\t[]string{\"sensor\"},\n\t\tnil,\n\t)\n\n\tfanspeed = prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, \"\", \"fan_speed\"),\n\t\t\"Fan Speed in RPM\",\n\t\t[]string{\"fan\"},\n\t\tnil,\n\t)\n\n\tvoltages = prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, \"\", \"voltages\"),\n\t\t\"Contains the voltages from IPMI\",\n\t\t[]string{\"sensor\"},\n\t\tnil,\n\t)\n\t\n current = prometheus.NewDesc(\n prometheus.BuildFQName(namespace, \"\", \"current\"),\n \"Contains the current from IPMI\",\n []string{\"sensor\"},\n nil,\n )\n\n\tintrusion = prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, \"\", \"intrusion_status\"),\n\t\t\"Indicates if a chassis is open\",\n\t\tnil,\n\t\tnil,\n\t)\n\n\tpowersupply = prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, \"\", \"power_supply_status\"),\n\t\t\"Indicates if a power supply is operational\",\n\t\t[]string{\"PSU\"},\n\t\tnil,\n\t)\n)\n<commit_msg>Fix go format with go fmt<commit_after>package collector\n\nimport \"github.com\/prometheus\/client_golang\/prometheus\"\n\nconst namespace = \"ipmi\"\n\nvar (\n\ttemperatures = prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, \"\", \"temperatures\"),\n\t\t\"Contains the collected temperatures from IPMI\",\n\t\t[]string{\"sensor\"},\n\t\tnil,\n\t)\n\n\tfanspeed = prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, \"\", \"fan_speed\"),\n\t\t\"Fan Speed in RPM\",\n\t\t[]string{\"fan\"},\n\t\tnil,\n\t)\n\n\tvoltages = prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, \"\", \"voltages\"),\n\t\t\"Contains the voltages from IPMI\",\n\t\t[]string{\"sensor\"},\n\t\tnil,\n\t)\n\n\tcurrent = prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, \"\", \"current\"),\n\t\t\"Contains the current from IPMI\",\n\t\t[]string{\"sensor\"},\n\t\tnil,\n\t)\n\n\tintrusion = prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, \"\", \"intrusion_status\"),\n\t\t\"Indicates if a chassis is open\",\n\t\tnil,\n\t\tnil,\n\t)\n\n\tpowersupply = prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, \"\", \"power_supply_status\"),\n\t\t\"Indicates if a power supply is operational\",\n\t\t[]string{\"PSU\"},\n\t\tnil,\n\t)\n)\n<|endoftext|>"} {"text":"<commit_before>package test\n\n\/\/ This file handles the consul service.\n\/\/ 1) We generate a config file with the port generated dynamically with BookPorts().\n\/\/ The reason is that the ports cannot be setup through the command line only.\n\/\/ It needs to be setup with a config file.\n\/\/ 2) It guarantees that both the service port is listening and the leader is elected.\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\tconsul \"github.com\/hashicorp\/consul\/api\"\n)\n\nconst (\n\tconsulChkTimesListen = 20 \/\/ consulChkTimesListen is the number of times to retry for port listening.\n\tconsulChkDelayListen = 50 * time.Millisecond \/\/ consulChkDelayListen is the waiting time for next retry for port listening.\n\n\t\/\/ Constants to check leader election mechanism, which usually takes 1-2 seconds.\n\t\/\/ Thus, we use 5 seconds as the time limit for the checking.\n\tconsulChkTimesLeader = 10 \/\/ consulChkTimesLeader is the number of times to retry for leader election.\n\tconsulChkDelayLeader = 500 * time.Millisecond \/\/ consulChkDelayLeader is the waiting time for next retry for leader election.\n)\n\nfunc init() {\n\tRegisterService(Consul, func() Service {\n\t\treturn &consulService{}\n\t})\n}\n\n\/\/ consulConfig is the config for consul service.\n\/\/ It is basically a clone of the original config.\n\/\/ Ref: https:\/\/github.com\/hashicorp\/consul\/blob\/master\/command\/agent\/config.go#L96\n\/\/ https:\/\/www.consul.io\/docs\/agent\/options.html\ntype consulConfig struct {\n\tBootstrapExpect int `json:\"bootstrap_expect\"`\n\tServer bool `json:\"server\"`\n\tDataDir string `json:\"data_dir\"`\n\tPorts *consulPortsConfig `json:\"ports\"`\n}\n\n\/\/ consulPortsConfig is the config for ports of consul service.\n\/\/ It is basically a clone of the original Ports config.\n\/\/ Ref: https:\/\/github.com\/hashicorp\/consul\/blob\/master\/command\/agent\/config.go#L23\n\/\/ https:\/\/www.consul.io\/docs\/agent\/options.html#ports\ntype consulPortsConfig struct {\n\tDNS int `json:\"dns\"` \/\/ DNS Query interface\n\tHTTP int `json:\"http\"` \/\/ HTTP API\n\tHTTPS int `json:\"https\"` \/\/ HTTPS API\n\tRPC int `json:\"rpc\" ` \/\/ CLI RPC\n\tSerfLan int `json:\"serf_lan\"` \/\/ LAN gossip (Client + Server)\n\tSerfWan int `json:\"serf_wan\"` \/\/ WAN gossip (Server only)\n\tServer int `json:\"server\"` \/\/ Server internal RPC\n}\n\n\/\/ consulService is the consul service.\ntype consulService struct {\n\t\/\/ cmd is the command to run consul.\n\tcmd *exec.Cmd\n\n\t\/\/ port is the http port for consul service.\n\tport int\n}\n\n\/\/ Start runs the consul service and returns its port.\nfunc (s *consulService) Start() (port int, err error) {\n\tif err := CheckExecutable(\"consul\"); err != nil {\n\t\treturn 0, fmt.Errorf(\"Consul is not installed: %v\", err)\n\t}\n\tworkDir, err := ioutil.TempDir(\"\", \"consul\")\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"Fail to generate work dir: %v\", err)\n\t}\n\tports, err := BookPorts(5)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"Fail to book ports for consul: %v\", err)\n\t}\n\tconfig := &consulConfig{\n\t\tBootstrapExpect: 1,\n\t\tServer: true,\n\t\tDataDir: filepath.Join(workDir, \"data\"),\n\t\tPorts: &consulPortsConfig{\n\t\t\tDNS: -1,\n\t\t\tHTTP: ports[0],\n\t\t\tHTTPS: -1,\n\t\t\tRPC: ports[1],\n\t\t\tSerfLan: ports[2],\n\t\t\tSerfWan: ports[3],\n\t\t\tServer: ports[4],\n\t\t},\n\t}\n\tb, err := json.Marshal(config)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"Fail to json marshal consul config: %v\", err)\n\t}\n\tconfigFile := filepath.Join(workDir, \"consul.conf\")\n\tif err := ioutil.WriteFile(configFile, b, os.ModePerm); err != nil {\n\t\treturn 0, fmt.Errorf(\"Fail to write config file(path: %v): %v\", configFile, err)\n\t}\n\ts.cmd = exec.Command(\n\t\t\"consul\", \"agent\", \"-config-file\", configFile,\n\t)\n\tif err := s.cmd.Start(); err != nil {\n\t\treturn 0, fmt.Errorf(\"Fail to start consul: %v\", err)\n\t}\n\ts.port = config.Ports.HTTP\n\n\t\/\/ Make sure that the server is running.\n\tif err := checkPortListening(s.port, consulChkTimesListen, consulChkDelayListen); err != nil {\n\t\treturn 0, fmt.Errorf(\"Port is not listening: %v\", err)\n\t}\n\tif err := checkLeaderElected(s.port, consulChkTimesLeader, consulChkDelayLeader); err != nil {\n\t\treturn 0, fmt.Errorf(\"Leader election mechanism fails: %v\", err)\n\t}\n\treturn s.port, nil\n}\n\n\/\/ checkPortListening checks whether the port is listening or not.\n\/\/ - port : The port to check.\n\/\/ - times: Total number of checks.\n\/\/ - delay: Time delay between checks.\nfunc checkPortListening(port, times int, delay time.Duration) error {\n\tfor i := 0; i < times; i++ {\n\t\ttime.Sleep(delay)\n\t\tif CheckListening(port) {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"Port(%v) is not listening within time: %v\",\n\t\tport, time.Duration(int64(times))*delay)\n}\n\n\/\/ checkLeaderElected checks whether the leader is elected for the consul service or not.\n\/\/ - port : The port to check.\n\/\/ - times: Total number of checks.\n\/\/ - delay: Time delay between checks.\nfunc checkLeaderElected(port, times int, delay time.Duration) error {\n\tconfig := consul.DefaultConfig()\n\tconfig.Address = fmt.Sprintf(\"127.0.0.1:%d\", port)\n\tclient, err := consul.NewClient(config)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Fail to build connection with consul agent: %v\", err)\n\t}\n\tfor i := 0; i < times; i++ {\n\t\ttime.Sleep(delay)\n\t\tif leader, _ := client.Status().Leader(); leader != \"\" {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"Fail to find the leader within time: %v\", time.Duration(int64(times))*delay)\n}\n\n\/\/ Stop stops the consul service.\nfunc (s *consulService) Stop() error {\n\tif err := s.cmd.Process.Signal(os.Interrupt); err != nil {\n\t\treturn fmt.Errorf(\"Fail to stop consul service with INT: %v\", err)\n\t}\n\tfor i := 0; i < consulChkTimesListen; i++ {\n\t\ttime.Sleep(consulChkDelayListen)\n\t\tif !CheckListening(s.port) {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"Fail to stop consul service within time limit: %v\",\n\t\tconsulChkTimesListen*consulChkDelayListen)\n}\n<commit_msg>fix consul bind multiple IPs issue in Mac OSX<commit_after>package test\n\n\/\/ This file handles the consul service.\n\/\/ 1) We generate a config file with the port generated dynamically with BookPorts().\n\/\/ The reason is that the ports cannot be setup through the command line only.\n\/\/ It needs to be setup with a config file.\n\/\/ 2) It guarantees that both the service port is listening and the leader is elected.\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\tconsul \"github.com\/hashicorp\/consul\/api\"\n)\n\nconst (\n\tconsulChkTimesListen = 20 \/\/ consulChkTimesListen is the number of times to retry for port listening.\n\tconsulChkDelayListen = 50 * time.Millisecond \/\/ consulChkDelayListen is the waiting time for next retry for port listening.\n\n\t\/\/ Constants to check leader election mechanism, which usually takes 1-2 seconds.\n\t\/\/ Thus, we use 5 seconds as the time limit for the checking.\n\tconsulChkTimesLeader = 10 \/\/ consulChkTimesLeader is the number of times to retry for leader election.\n\tconsulChkDelayLeader = 500 * time.Millisecond \/\/ consulChkDelayLeader is the waiting time for next retry for leader election.\n)\n\nfunc init() {\n\tRegisterService(Consul, func() Service {\n\t\treturn &consulService{}\n\t})\n}\n\n\/\/ consulConfig is the config for consul service.\n\/\/ It is basically a clone of the original config.\n\/\/ Ref: https:\/\/github.com\/hashicorp\/consul\/blob\/master\/command\/agent\/config.go#L96\n\/\/ https:\/\/www.consul.io\/docs\/agent\/options.html\ntype consulConfig struct {\n\tBootstrapExpect int `json:\"bootstrap_expect\"`\n\tServer bool `json:\"server\"`\n\tDataDir string `json:\"data_dir\"`\n\tPorts *consulPortsConfig `json:\"ports\"`\n}\n\n\/\/ consulPortsConfig is the config for ports of consul service.\n\/\/ It is basically a clone of the original Ports config.\n\/\/ Ref: https:\/\/github.com\/hashicorp\/consul\/blob\/master\/command\/agent\/config.go#L23\n\/\/ https:\/\/www.consul.io\/docs\/agent\/options.html#ports\ntype consulPortsConfig struct {\n\tDNS int `json:\"dns\"` \/\/ DNS Query interface\n\tHTTP int `json:\"http\"` \/\/ HTTP API\n\tHTTPS int `json:\"https\"` \/\/ HTTPS API\n\tRPC int `json:\"rpc\" ` \/\/ CLI RPC\n\tSerfLan int `json:\"serf_lan\"` \/\/ LAN gossip (Client + Server)\n\tSerfWan int `json:\"serf_wan\"` \/\/ WAN gossip (Server only)\n\tServer int `json:\"server\"` \/\/ Server internal RPC\n}\n\n\/\/ consulService is the consul service.\ntype consulService struct {\n\t\/\/ cmd is the command to run consul.\n\tcmd *exec.Cmd\n\n\t\/\/ port is the http port for consul service.\n\tport int\n}\n\n\/\/ Start runs the consul service and returns its port.\nfunc (s *consulService) Start() (port int, err error) {\n\tif err := CheckExecutable(\"consul\"); err != nil {\n\t\treturn 0, fmt.Errorf(\"Consul is not installed: %v\", err)\n\t}\n\tworkDir, err := ioutil.TempDir(\"\", \"consul\")\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"Fail to generate work dir: %v\", err)\n\t}\n\tports, err := BookPorts(5)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"Fail to book ports for consul: %v\", err)\n\t}\n\tconfig := &consulConfig{\n\t\tBootstrapExpect: 1,\n\t\tServer: true,\n\t\tDataDir: filepath.Join(workDir, \"data\"),\n\t\tPorts: &consulPortsConfig{\n\t\t\tDNS: -1,\n\t\t\tHTTP: ports[0],\n\t\t\tHTTPS: -1,\n\t\t\tRPC: ports[1],\n\t\t\tSerfLan: ports[2],\n\t\t\tSerfWan: ports[3],\n\t\t\tServer: ports[4],\n\t\t},\n\t}\n\tb, err := json.Marshal(config)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"Fail to json marshal consul config: %v\", err)\n\t}\n\tconfigFile := filepath.Join(workDir, \"consul.conf\")\n\tif err := ioutil.WriteFile(configFile, b, os.ModePerm); err != nil {\n\t\treturn 0, fmt.Errorf(\"Fail to write config file(path: %v): %v\", configFile, err)\n\t}\n\ts.cmd = exec.Command(\n\t\t\"consul\", \"agent\", \"-bind\", \"127.0.0.1\", \"-config-file\", configFile,\n\t)\n\tif err := s.cmd.Start(); err != nil {\n\t\treturn 0, fmt.Errorf(\"Fail to start consul: %v\", err)\n\t}\n\ts.port = config.Ports.HTTP\n\n\t\/\/ Make sure that the server is running.\n\tif err := checkPortListening(s.port, consulChkTimesListen, consulChkDelayListen); err != nil {\n\t\treturn 0, fmt.Errorf(\"Port is not listening: %v\", err)\n\t}\n\tif err := checkLeaderElected(s.port, consulChkTimesLeader, consulChkDelayLeader); err != nil {\n\t\treturn 0, fmt.Errorf(\"Leader election mechanism fails: %v\", err)\n\t}\n\treturn s.port, nil\n}\n\n\/\/ checkPortListening checks whether the port is listening or not.\n\/\/ - port : The port to check.\n\/\/ - times: Total number of checks.\n\/\/ - delay: Time delay between checks.\nfunc checkPortListening(port, times int, delay time.Duration) error {\n\tfor i := 0; i < times; i++ {\n\t\ttime.Sleep(delay)\n\t\tif CheckListening(port) {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"Port(%v) is not listening within time: %v\",\n\t\tport, time.Duration(int64(times))*delay)\n}\n\n\/\/ checkLeaderElected checks whether the leader is elected for the consul service or not.\n\/\/ - port : The port to check.\n\/\/ - times: Total number of checks.\n\/\/ - delay: Time delay between checks.\nfunc checkLeaderElected(port, times int, delay time.Duration) error {\n\tconfig := consul.DefaultConfig()\n\tconfig.Address = fmt.Sprintf(\"127.0.0.1:%d\", port)\n\tclient, err := consul.NewClient(config)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Fail to build connection with consul agent: %v\", err)\n\t}\n\tfor i := 0; i < times; i++ {\n\t\ttime.Sleep(delay)\n\t\tif leader, _ := client.Status().Leader(); leader != \"\" {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"Fail to find the leader within time: %v\", time.Duration(int64(times))*delay)\n}\n\n\/\/ Stop stops the consul service.\nfunc (s *consulService) Stop() error {\n\tif err := s.cmd.Process.Signal(os.Interrupt); err != nil {\n\t\treturn fmt.Errorf(\"Fail to stop consul service with INT: %v\", err)\n\t}\n\tfor i := 0; i < consulChkTimesListen; i++ {\n\t\ttime.Sleep(consulChkDelayListen)\n\t\tif !CheckListening(s.port) {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"Fail to stop consul service within time limit: %v\",\n\t\tconsulChkTimesListen*consulChkDelayListen)\n}\n<|endoftext|>"} {"text":"<commit_before>package comb\n\nimport (\n\t\"math\/big\"\n)\n\nvar (\n\tONE = big.NewInt(1)\n\tTWO = big.NewInt(2)\n)\n\n\/\/ Comb takes a slice and a number of elements to choose from that slice and\n\/\/ returns a function that given a slice will fill it in with a combination\n\/\/ until none are left, at which point it will return false.\n\/\/\n\/\/ This algorithm is based on one from TAOCP Vol. 4 by Donald Knuth.\nfunc Generator(a []uint32, k uint32) func([]uint32) bool {\n\t\/\/ There is one way to choose 0 -- the empty set.\n\tif k == 0 {\n\t\treturn func(_ []uint32) bool {\n\t\t\treturn false\n\t\t}\n\t}\n\tvar i, j, x uint32\n\tc := make([]uint32, k+3, k+3)\n\tfor i = 1; i <= k; i++ {\n\t\tc[i] = i\n\t}\n\tc[k+1] = uint32(len(a)) + 1\n\tc[k+2] = 0\n\tj = k\n\treturn func(v []uint32) bool {\n\t\tfor i = k; i > 0; i-- {\n\t\t\tv[k-i] = a[c[i]-1]\n\t\t}\n\n\t\tif j > 0 {\n\t\t\tx = j + 1\n\t\t\tgoto incr\n\t\t}\n\t\tif c[1]+1 < c[2] {\n\t\t\tc[1] += 1\n\t\t\treturn true\n\t\t}\n\t\tj = 2\ndo_more:\n\t\tc[j-1] = j - 1\n\t\tx = c[j] + 1\n\t\tif x == c[j+1] {\n\t\t\tj++\n\t\t\tgoto do_more\n\t\t}\n\t\t\/\/ If true, the algorithm is done.\n\t\tif j > k {\n\t\t\treturn false\n\t\t}\nincr:\n\t\tc[j] = x\n\t\tj--\n\t\treturn true\n\t}\n}\n\n\/\/ Fact returns n! (factorial).\n\/\/ Returns 1 for negative values.\nfunc Fact(n *big.Int) *big.Int {\n\tif n.Cmp(TWO) == -1 {\n\t\treturn ONE\n\t}\n\n\tsum := big.NewInt(1)\n\tn0 := new(big.Int).Set(n)\n\tfor n0.Cmp(ONE) != -1 {\n\t\tsum.Mul(sum, n0)\n\t\tn0.Sub(n0, ONE)\n\t}\n\treturn sum\n}\n\n\/\/ div returns m! \/ n!.\n\/\/ This is an optimization to avoid calculating large factorials.\n\/\/ Note: n must be less than m.\nfunc div(m, n *big.Int) *big.Int {\n\tn0 := new(big.Int).Set(n)\n\tsum := new(big.Int).Add(n0, ONE)\n\tn0.Add(n0, TWO)\n\tfor n0.Cmp(m) != 1 {\n\t\tsum.Mul(sum, n0)\n\t\tn0.Add(n0, ONE)\n\t}\n\treturn sum\n}\n\n\/\/ Choose returns the number of combinations for n choose k.\nfunc Count(n, k *big.Int) *big.Int {\n\tif n.Cmp(k) == 0 {\n\t\treturn ONE\n\t}\n\ts := new(big.Int).Sub(n, k)\n\tif k.Cmp(s) == 1 {\n\t\treturn s.Quo(div(n, k), Fact(s))\n\t}\n\treturn s.Quo(div(n, s), Fact(k))\n}\n<commit_msg>Removed mutable global vars from comb.<commit_after>package comb\n\nimport (\n\t\"math\/big\"\n)\n\n\/\/ Comb takes a slice and a number of elements to choose from that slice and\n\/\/ returns a function that given a slice will fill it in with a combination\n\/\/ until none are left, at which point it will return false.\n\/\/\n\/\/ This algorithm is based on one from TAOCP Vol. 4 by Donald Knuth.\nfunc Generator(a []uint32, k uint32) func([]uint32) bool {\n\t\/\/ There is one way to choose 0 -- the empty set.\n\tif k == 0 {\n\t\treturn func(_ []uint32) bool {\n\t\t\treturn false\n\t\t}\n\t}\n\tvar i, j, x uint32\n\tc := make([]uint32, k+3, k+3)\n\tfor i = 1; i <= k; i++ {\n\t\tc[i] = i\n\t}\n\tc[k+1] = uint32(len(a)) + 1\n\tc[k+2] = 0\n\tj = k\n\treturn func(v []uint32) bool {\n\t\tfor i = k; i > 0; i-- {\n\t\t\tv[k-i] = a[c[i]-1]\n\t\t}\n\n\t\tif j > 0 {\n\t\t\tx = j + 1\n\t\t\tgoto incr\n\t\t}\n\t\tif c[1]+1 < c[2] {\n\t\t\tc[1] += 1\n\t\t\treturn true\n\t\t}\n\t\tj = 2\ndo_more:\n\t\tc[j-1] = j - 1\n\t\tx = c[j] + 1\n\t\tif x == c[j+1] {\n\t\t\tj++\n\t\t\tgoto do_more\n\t\t}\n\t\t\/\/ If true, the algorithm is done.\n\t\tif j > k {\n\t\t\treturn false\n\t\t}\nincr:\n\t\tc[j] = x\n\t\tj--\n\t\treturn true\n\t}\n}\n\n\/\/ Fact returns n! (factorial).\n\/\/ Returns 1 for negative values.\nfunc Fact(n *big.Int) *big.Int {\n\tif n.Cmp(big.NewInt(2)) == -1 {\n\t\treturn big.NewInt(1)\n\t}\n\n\tsum := big.NewInt(1)\n\tn0 := new(big.Int).Set(n)\n\tfor n0.Cmp(big.NewInt(1)) != -1 {\n\t\tsum.Mul(sum, n0)\n\t\tn0.Sub(n0, big.NewInt(1))\n\t}\n\treturn sum\n}\n\n\/\/ div returns m! \/ n!.\n\/\/ This is an optimization to avoid calculating large factorials.\n\/\/ Note: n must be less than m.\nfunc div(m, n *big.Int) *big.Int {\n\tn0 := new(big.Int).Set(n)\n\tsum := new(big.Int).Add(n0, big.NewInt(1))\n\tn0.Add(n0, big.NewInt(2))\n\tfor n0.Cmp(m) != 1 {\n\t\tsum.Mul(sum, n0)\n\t\tn0.Add(n0, big.NewInt(1))\n\t}\n\treturn sum\n}\n\n\/\/ Choose returns the number of combinations for n choose k.\nfunc Count(n, k *big.Int) *big.Int {\n\tif n.Cmp(k) == 0 {\n\t\treturn big.NewInt(1)\n\t}\n\ts := new(big.Int).Sub(n, k)\n\tif k.Cmp(s) == 1 {\n\t\treturn s.Quo(div(n, k), Fact(s))\n\t}\n\treturn s.Quo(div(n, s), Fact(k))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/montanaflynn\/stats\"\n)\n\nfunc main() {\n\ta, _ := stats.Min([]float64{1.1, 2, 3, 4, 5})\n\tfmt.Println(a) \/\/ 1.1\n\n\ta, _ = stats.Max([]float64{1.1, 2, 3, 4, 5})\n\tfmt.Println(a) \/\/ 5\n\n\ta, _ = stats.Sum([]float64{1.1, 2.2, 3.3})\n\tfmt.Println(a) \/\/ 6.6\n\n\ta, _ = stats.Mean([]float64{1, 2, 3, 4, 5})\n\tfmt.Println(a) \/\/ 3\n\n\ta, _ = stats.Median([]float64{1, 2, 3, 4, 5, 6, 7})\n\tfmt.Println(a) \/\/ 4\n\n\tm, _ := stats.Mode([]float64{5, 5, 3, 3, 4, 2, 1})\n\tfmt.Println(m) \/\/ [5 3]\n\n\ta, _ = stats.VarP([]float64{1,2,3,4,5})\n\tfmt.Println(a) \/\/ 2\n\n\ta, _ = stats.VarS([]float64{1,2,3,4,5})\n\tfmt.Println(a) \/\/ 2.5\n\n\ta, _ = stats.StdDevP([]float64{1, 2, 3})\n\tfmt.Println(a) \/\/ 0.816496580927726\n\n\ta, _ = stats.StdDevS([]float64{1, 2, 3})\n\tfmt.Println(a) \/\/ 1\n\n\ta, _ = stats.Percentile([]float64{1, 2, 3, 4, 5}, 75)\n\tfmt.Println(a) \/\/ 4\n\n\ta, _ = stats.PercentileNearestRank([]float64{35, 20, 15, 40, 50}, 75)\n\tfmt.Println(a) \/\/ 40\n\n\ta, _ = stats.Round(5.3253543, 3)\n\tfmt.Println(a) \/\/ 5.325\n\n\tc := []stats.Coordinate{\n\t\t{1, 2.3},\n\t\t{2, 3.3},\n\t\t{3, 3.7},\n\t\t{4, 4.3},\n\t\t{5, 5.3},\n\t}\n\n\tr, _ := stats.LinReg(c)\n\tfmt.Println(r) \/\/ [{1 2.3800000000000026} {2 3.0800000000000014} {3 3.7800000000000002} {4 4.479999999999999} {5 5.179999999999998}]\n\n\tr, _ = stats.ExpReg(c)\n\tfmt.Println(r) \/\/ [{1 2.5150181024736638} {2 3.032084111136781} {3 3.6554544271334493} {4 4.406984298281804} {5 5.313022222665875}]\n\n\tr, _ = stats.LogReg(c)\n\tfmt.Println(r) \/\/ [{1 2.1520822363811702} {2 3.3305559222492214} {3 4.019918836568674} {4 4.509029608117273} {5 4.888413396683663}]\n\n\ts, _ := stats.Sample([]float64{0.1,0.2,0.3,0.4}, 3, false)\n\tfmt.Println(s) \/\/ [0.2,0.4,0.3]\n\n\ts, _ = stats.Sample([]float64{0.1,0.2,0.3,0.4}, 10, true)\n\tfmt.Println(s) \/\/ [0.2,0.2,0.4,0.1,0.2,0.4,0.3,0.2,0.2,0.1]\n\n\tq, _ := stats.Quartile([]float64{7, 15, 36, 39, 40, 41})\n\tfmt.Println(q) \/\/ {15 37.5 40}\n\n\tiqr, _ := stats.InterQuartileRange([]float64{102, 104, 105, 107, 108, 109, 110, 112, 115, 116, 118})\n\tfmt.Println(iqr) \/\/ 10\n\n\tmh, _ := stats.Midhinge( []float64{1, 3, 4, 4, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 12, 13})\n\tfmt.Println(mh) \/\/ 7.5\n\n\ttr, _ := stats.Trimean( []float64{1, 3, 4, 4, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 12, 13})\n\tfmt.Println(tr) \/\/ 7.25\n\n\to, _ := stats.QuartileOutliers([]float64{-1000, 1, 3, 4, 4, 6, 6, 6, 6, 7, 8, 15, 18, 100})\n\tfmt.Printf(\"%+v\\n\", o) \/\/ {Mild:[15 18] Extreme:[-1000 100]}\n}<commit_msg>Run go fmt to clean up formatting<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/montanaflynn\/stats\"\n)\n\nfunc main() {\n\ta, _ := stats.Min([]float64{1.1, 2, 3, 4, 5})\n\tfmt.Println(a) \/\/ 1.1\n\n\ta, _ = stats.Max([]float64{1.1, 2, 3, 4, 5})\n\tfmt.Println(a) \/\/ 5\n\n\ta, _ = stats.Sum([]float64{1.1, 2.2, 3.3})\n\tfmt.Println(a) \/\/ 6.6\n\n\ta, _ = stats.Mean([]float64{1, 2, 3, 4, 5})\n\tfmt.Println(a) \/\/ 3\n\n\ta, _ = stats.Median([]float64{1, 2, 3, 4, 5, 6, 7})\n\tfmt.Println(a) \/\/ 4\n\n\tm, _ := stats.Mode([]float64{5, 5, 3, 3, 4, 2, 1})\n\tfmt.Println(m) \/\/ [5 3]\n\n\ta, _ = stats.VarP([]float64{1, 2, 3, 4, 5})\n\tfmt.Println(a) \/\/ 2\n\n\ta, _ = stats.VarS([]float64{1, 2, 3, 4, 5})\n\tfmt.Println(a) \/\/ 2.5\n\n\ta, _ = stats.StdDevP([]float64{1, 2, 3})\n\tfmt.Println(a) \/\/ 0.816496580927726\n\n\ta, _ = stats.StdDevS([]float64{1, 2, 3})\n\tfmt.Println(a) \/\/ 1\n\n\ta, _ = stats.Percentile([]float64{1, 2, 3, 4, 5}, 75)\n\tfmt.Println(a) \/\/ 4\n\n\ta, _ = stats.PercentileNearestRank([]float64{35, 20, 15, 40, 50}, 75)\n\tfmt.Println(a) \/\/ 40\n\n\ta, _ = stats.Round(5.3253543, 3)\n\tfmt.Println(a) \/\/ 5.325\n\n\tc := []stats.Coordinate{\n\t\t{1, 2.3},\n\t\t{2, 3.3},\n\t\t{3, 3.7},\n\t\t{4, 4.3},\n\t\t{5, 5.3},\n\t}\n\n\tr, _ := stats.LinReg(c)\n\tfmt.Println(r) \/\/ [{1 2.3800000000000026} {2 3.0800000000000014} {3 3.7800000000000002} {4 4.479999999999999} {5 5.179999999999998}]\n\n\tr, _ = stats.ExpReg(c)\n\tfmt.Println(r) \/\/ [{1 2.5150181024736638} {2 3.032084111136781} {3 3.6554544271334493} {4 4.406984298281804} {5 5.313022222665875}]\n\n\tr, _ = stats.LogReg(c)\n\tfmt.Println(r) \/\/ [{1 2.1520822363811702} {2 3.3305559222492214} {3 4.019918836568674} {4 4.509029608117273} {5 4.888413396683663}]\n\n\ts, _ := stats.Sample([]float64{0.1, 0.2, 0.3, 0.4}, 3, false)\n\tfmt.Println(s) \/\/ [0.2,0.4,0.3]\n\n\ts, _ = stats.Sample([]float64{0.1, 0.2, 0.3, 0.4}, 10, true)\n\tfmt.Println(s) \/\/ [0.2,0.2,0.4,0.1,0.2,0.4,0.3,0.2,0.2,0.1]\n\n\tq, _ := stats.Quartile([]float64{7, 15, 36, 39, 40, 41})\n\tfmt.Println(q) \/\/ {15 37.5 40}\n\n\tiqr, _ := stats.InterQuartileRange([]float64{102, 104, 105, 107, 108, 109, 110, 112, 115, 116, 118})\n\tfmt.Println(iqr) \/\/ 10\n\n\tmh, _ := stats.Midhinge([]float64{1, 3, 4, 4, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 12, 13})\n\tfmt.Println(mh) \/\/ 7.5\n\n\ttr, _ := stats.Trimean([]float64{1, 3, 4, 4, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 12, 13})\n\tfmt.Println(tr) \/\/ 7.25\n\n\to, _ := stats.QuartileOutliers([]float64{-1000, 1, 3, 4, 4, 6, 6, 6, 6, 7, 8, 15, 18, 100})\n\tfmt.Printf(\"%+v\\n\", o) \/\/ {Mild:[15 18] Extreme:[-1000 100]}\n}<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 Google Inc. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage examples_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"testing\"\n\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\/latest\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\/validation\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/runtime\"\n\t\"github.com\/golang\/glog\"\n)\n\nfunc validateObject(obj runtime.Object) (errors []error) {\n\tswitch t := obj.(type) {\n\tcase *api.ReplicationController:\n\t\terrors = validation.ValidateManifest(&t.DesiredState.PodTemplate.DesiredState.Manifest)\n\tcase *api.ReplicationControllerList:\n\t\tfor i := range t.Items {\n\t\t\terrors = append(errors, validateObject(&t.Items[i])...)\n\t\t}\n\tcase *api.Service:\n\t\terrors = validation.ValidateService(t)\n\tcase *api.ServiceList:\n\t\tfor i := range t.Items {\n\t\t\terrors = append(errors, validateObject(&t.Items[i])...)\n\t\t}\n\tcase *api.Pod:\n\t\terrors = validation.ValidateManifest(&t.DesiredState.Manifest)\n\tcase *api.PodList:\n\t\tfor i := range t.Items {\n\t\t\terrors = append(errors, validateObject(&t.Items[i])...)\n\t\t}\n\tdefault:\n\t\treturn []error{fmt.Errorf(\"no validation defined for %#v\", obj)}\n\t}\n\treturn errors\n}\n\nfunc walkJSONFiles(inDir string, fn func(name, path string, data []byte)) error {\n\terr := filepath.Walk(inDir, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() && path != inDir {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\tname := filepath.Base(path)\n\t\text := filepath.Ext(name)\n\t\tif ext != \"\" {\n\t\t\tname = name[:len(name)-len(ext)]\n\t\t}\n\t\tif !(ext == \".json\" || ext == \".yaml\") {\n\t\t\treturn nil\n\t\t}\n\t\tglog.Infof(\"Testing %s\", path)\n\t\tdata, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfn(name, path, data)\n\t\treturn nil\n\t})\n\treturn err\n}\n\nfunc TestExampleObjectSchemas(t *testing.T) {\n\tcases := map[string]map[string]runtime.Object{\n\t\t\"..\/api\/examples\": {\n\t\t\t\"controller\": &api.ReplicationController{},\n\t\t\t\"controller-list\": &api.ReplicationControllerList{},\n\t\t\t\"pod\": &api.Pod{},\n\t\t\t\"pod-list\": &api.PodList{},\n\t\t\t\"service\": &api.Service{},\n\t\t\t\"external-service\": &api.Service{},\n\t\t\t\"service-list\": &api.ServiceList{},\n\t\t},\n\t\t\"..\/examples\/guestbook\": {\n\t\t\t\"frontend-controller\": &api.ReplicationController{},\n\t\t\t\"redis-slave-controller\": &api.ReplicationController{},\n\t\t\t\"redis-master\": &api.Pod{},\n\t\t\t\"frontend-service\": &api.Service{},\n\t\t\t\"redis-master-service\": &api.Service{},\n\t\t\t\"redis-slave-service\": &api.Service{},\n\t\t},\n\t\t\"..\/examples\/walkthrough\": {\n\t\t\t\"pod1\": &api.Pod{},\n\t\t\t\"pod2\": &api.Pod{},\n\t\t},\n\t}\n\n\tfor path, expected := range cases {\n\t\ttested := 0\n\t\terr := walkJSONFiles(path, func(name, path string, data []byte) {\n\t\t\texpectedType, found := expected[name]\n\t\t\tif !found {\n\t\t\t\tt.Errorf(\"%s does not have a test case defined\", path)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttested += 1\n\t\t\tif err := latest.Codec.DecodeInto(data, expectedType); err != nil {\n\t\t\t\tt.Errorf(\"%s did not decode correctly: %v\\n%s\", path, err, string(data))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif errors := validateObject(expectedType); len(errors) > 0 {\n\t\t\t\tt.Errorf(\"%s did not validate correctly: %v\", path, errors)\n\t\t\t}\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Expected no error, Got %v\", err)\n\t\t}\n\t\tif tested != len(expected) {\n\t\t\tt.Errorf(\"Expected %d examples, Got %d\", len(expected), tested)\n\t\t}\n\t}\n}\n\nvar sampleRegexp = regexp.MustCompile(\"(?ms)^```(?:(?P<type>yaml)\\\\w*\\\\n(?P<content>.+?)|\\\\w*\\\\n(?P<content>\\\\{.+?\\\\}))\\\\w*\\\\n^```\")\nvar subsetRegexp = regexp.MustCompile(\"(?ms)\\\\.{3}\")\n\nfunc TestReadme(t *testing.T) {\n\tpaths := []string{\n\t\t\"..\/README.md\",\n\t\t\"..\/examples\/walkthrough\/README.md\",\n\t}\n\n\tfor _, path := range paths {\n\t\tdata, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unable to read file %s: %v\", path, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tmatches := sampleRegexp.FindAllStringSubmatch(string(data), -1)\n\t\tif matches == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, match := range matches {\n\t\t\tvar content, subtype string\n\t\t\tfor i, name := range sampleRegexp.SubexpNames() {\n\t\t\t\tif name == \"type\" {\n\t\t\t\t\tsubtype = match[i]\n\t\t\t\t}\n\t\t\t\tif name == \"content\" && match[i] != \"\" {\n\t\t\t\t\tcontent = match[i]\n\t\t\t\t}\n\t\t\t}\n\t\t\tif subtype == \"yaml\" && subsetRegexp.FindString(content) != \"\" {\n\t\t\t\tt.Logf(\"skipping (%s): \\n%s\", subtype, content)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/t.Logf(\"testing (%s): \\n%s\", subtype, content)\n\t\t\texpectedType := &api.Pod{}\n\t\t\tif err := latest.Codec.DecodeInto([]byte(content), expectedType); err != nil {\n\t\t\t\tt.Errorf(\"%s did not decode correctly: %v\\n%s\", path, err, string(content))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif errors := validateObject(expectedType); len(errors) > 0 {\n\t\t\t\tt.Errorf(\"%s did not validate correctly: %v\", path, errors)\n\t\t\t}\n\t\t\t_, err := latest.Codec.Encode(expectedType)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Could not encode object: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>Fix examples test by setting a context prior to validation<commit_after>\/*\nCopyright 2014 Google Inc. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage examples_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"testing\"\n\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\/latest\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\/validation\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/runtime\"\n\t\"github.com\/golang\/glog\"\n)\n\nfunc validateObject(obj runtime.Object) (errors []error) {\n\tctx := api.NewDefaultContext()\n\tswitch t := obj.(type) {\n\tcase *api.ReplicationController:\n\t\terrors = validation.ValidateManifest(&t.DesiredState.PodTemplate.DesiredState.Manifest)\n\tcase *api.ReplicationControllerList:\n\t\tfor i := range t.Items {\n\t\t\terrors = append(errors, validateObject(&t.Items[i])...)\n\t\t}\n\tcase *api.Service:\n\t\tapi.ValidNamespaceOnCreateOrUpdate(ctx, &t.JSONBase)\n\t\terrors = validation.ValidateService(t)\n\tcase *api.ServiceList:\n\t\tfor i := range t.Items {\n\t\t\terrors = append(errors, validateObject(&t.Items[i])...)\n\t\t}\n\tcase *api.Pod:\n\t\tapi.ValidNamespaceOnCreateOrUpdate(ctx, &t.JSONBase)\n\t\terrors = validation.ValidateManifest(&t.DesiredState.Manifest)\n\tcase *api.PodList:\n\t\tfor i := range t.Items {\n\t\t\terrors = append(errors, validateObject(&t.Items[i])...)\n\t\t}\n\tdefault:\n\t\treturn []error{fmt.Errorf(\"no validation defined for %#v\", obj)}\n\t}\n\treturn errors\n}\n\nfunc walkJSONFiles(inDir string, fn func(name, path string, data []byte)) error {\n\terr := filepath.Walk(inDir, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() && path != inDir {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\tname := filepath.Base(path)\n\t\text := filepath.Ext(name)\n\t\tif ext != \"\" {\n\t\t\tname = name[:len(name)-len(ext)]\n\t\t}\n\t\tif !(ext == \".json\" || ext == \".yaml\") {\n\t\t\treturn nil\n\t\t}\n\t\tglog.Infof(\"Testing %s\", path)\n\t\tdata, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfn(name, path, data)\n\t\treturn nil\n\t})\n\treturn err\n}\n\nfunc TestExampleObjectSchemas(t *testing.T) {\n\tcases := map[string]map[string]runtime.Object{\n\t\t\"..\/api\/examples\": {\n\t\t\t\"controller\": &api.ReplicationController{},\n\t\t\t\"controller-list\": &api.ReplicationControllerList{},\n\t\t\t\"pod\": &api.Pod{},\n\t\t\t\"pod-list\": &api.PodList{},\n\t\t\t\"service\": &api.Service{},\n\t\t\t\"external-service\": &api.Service{},\n\t\t\t\"service-list\": &api.ServiceList{},\n\t\t},\n\t\t\"..\/examples\/guestbook\": {\n\t\t\t\"frontend-controller\": &api.ReplicationController{},\n\t\t\t\"redis-slave-controller\": &api.ReplicationController{},\n\t\t\t\"redis-master\": &api.Pod{},\n\t\t\t\"frontend-service\": &api.Service{},\n\t\t\t\"redis-master-service\": &api.Service{},\n\t\t\t\"redis-slave-service\": &api.Service{},\n\t\t},\n\t\t\"..\/examples\/walkthrough\": {\n\t\t\t\"pod1\": &api.Pod{},\n\t\t\t\"pod2\": &api.Pod{},\n\t\t},\n\t}\n\n\tfor path, expected := range cases {\n\t\ttested := 0\n\t\terr := walkJSONFiles(path, func(name, path string, data []byte) {\n\t\t\texpectedType, found := expected[name]\n\t\t\tif !found {\n\t\t\t\tt.Errorf(\"%s does not have a test case defined\", path)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttested += 1\n\t\t\tif err := latest.Codec.DecodeInto(data, expectedType); err != nil {\n\t\t\t\tt.Errorf(\"%s did not decode correctly: %v\\n%s\", path, err, string(data))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif errors := validateObject(expectedType); len(errors) > 0 {\n\t\t\t\tt.Errorf(\"%s did not validate correctly: %v\", path, errors)\n\t\t\t}\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Expected no error, Got %v\", err)\n\t\t}\n\t\tif tested != len(expected) {\n\t\t\tt.Errorf(\"Expected %d examples, Got %d\", len(expected), tested)\n\t\t}\n\t}\n}\n\nvar sampleRegexp = regexp.MustCompile(\"(?ms)^```(?:(?P<type>yaml)\\\\w*\\\\n(?P<content>.+?)|\\\\w*\\\\n(?P<content>\\\\{.+?\\\\}))\\\\w*\\\\n^```\")\nvar subsetRegexp = regexp.MustCompile(\"(?ms)\\\\.{3}\")\n\nfunc TestReadme(t *testing.T) {\n\tpaths := []string{\n\t\t\"..\/README.md\",\n\t\t\"..\/examples\/walkthrough\/README.md\",\n\t}\n\n\tfor _, path := range paths {\n\t\tdata, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unable to read file %s: %v\", path, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tmatches := sampleRegexp.FindAllStringSubmatch(string(data), -1)\n\t\tif matches == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, match := range matches {\n\t\t\tvar content, subtype string\n\t\t\tfor i, name := range sampleRegexp.SubexpNames() {\n\t\t\t\tif name == \"type\" {\n\t\t\t\t\tsubtype = match[i]\n\t\t\t\t}\n\t\t\t\tif name == \"content\" && match[i] != \"\" {\n\t\t\t\t\tcontent = match[i]\n\t\t\t\t}\n\t\t\t}\n\t\t\tif subtype == \"yaml\" && subsetRegexp.FindString(content) != \"\" {\n\t\t\t\tt.Logf(\"skipping (%s): \\n%s\", subtype, content)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/t.Logf(\"testing (%s): \\n%s\", subtype, content)\n\t\t\texpectedType := &api.Pod{}\n\t\t\tif err := latest.Codec.DecodeInto([]byte(content), expectedType); err != nil {\n\t\t\t\tt.Errorf(\"%s did not decode correctly: %v\\n%s\", path, err, string(content))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif errors := validateObject(expectedType); len(errors) > 0 {\n\t\t\t\tt.Errorf(\"%s did not validate correctly: %v\", path, errors)\n\t\t\t}\n\t\t\t_, err := latest.Codec.Encode(expectedType)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Could not encode object: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/brotherlogic\/goserver\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\n\tpbd \"github.com\/brotherlogic\/discovery\/proto\"\n\tpb \"github.com\/brotherlogic\/gobuildslave\/proto\"\n\tpbs \"github.com\/brotherlogic\/goserver\/proto\"\n)\n\n\/\/ Server the main server type\ntype Server struct {\n\t*goserver.GoServer\n\trunner *Runner\n\tdisk diskChecker\n}\n\nfunc getIP(name string, server string) (string, int) {\n\tconn, _ := grpc.Dial(\"192.168.86.34:50055\", grpc.WithInsecure())\n\tdefer conn.Close()\n\n\tregistry := pbd.NewDiscoveryServiceClient(conn)\n\tentry := pbd.RegistryEntry{Name: name, Identifier: server}\n\tr, err := registry.Discover(context.Background(), &entry)\n\n\tif err != nil {\n\t\tlog.Printf(\"Lookup failed for %v,%v -> %v\", name, server, err)\n\t\treturn \"\", -1\n\t}\n\n\treturn r.Ip, int(r.Port)\n}\n\n\/\/ updateState of the runner command\nfunc updateState(com *runnerCommand) {\n\telems := strings.Split(com.details.Spec.Name, \"\/\")\n\tdServer, dPort := getIP(elems[len(elems)-1], com.details.Spec.Server)\n\n\tif dPort > 0 {\n\t\tdConn, err := grpc.Dial(dServer+\":\"+strconv.Itoa(dPort), grpc.WithInsecure())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer dConn.Close()\n\n\t\tc := pbs.NewGoserverServiceClient(dConn)\n\t\t_, err = c.IsAlive(context.Background(), &pbs.Alive{})\n\t\tcom.details.Running = (err == nil)\n\t} else {\n\t\t\/\/Mark as false if we can't locate the job\n\t\tcom.details.Running = false\n\t}\n}\n\n\/\/ BuildJob builds out a job\nfunc (s *Server) BuildJob(ctx context.Context, in *pb.JobSpec) (*pb.Empty, error) {\n\ts.runner.Checkout(in.Name)\n\treturn &pb.Empty{}, nil\n}\n\n\/\/ List lists all running jobs\nfunc (s *Server) List(ctx context.Context, in *pb.Empty) (*pb.JobList, error) {\n\tdetails := &pb.JobList{}\n\tfor _, job := range s.runner.backgroundTasks {\n\t\tupdateState(job)\n\t\tdetails.Details = append(details.Details, job.details)\n\t}\n\n\treturn details, nil\n}\n\n\/\/ Run runs a background task\nfunc (s *Server) Run(ctx context.Context, in *pb.JobSpec) (*pb.Empty, error) {\n\ts.runner.Run(in)\n\treturn &pb.Empty{}, nil\n}\n\n\/\/ Kill a background task\nfunc (s *Server) Kill(ctx context.Context, in *pb.JobSpec) (*pb.Empty, error) {\n\ts.runner.kill(in)\n\treturn &pb.Empty{}, nil\n}\n\n\/\/ DoRegister Registers this server\nfunc (s Server) DoRegister(server *grpc.Server) {\n\tpb.RegisterGoBuildSlaveServer(server, &s)\n}\n\n\/\/ ReportHealth determines if the server is healthy\nfunc (s Server) ReportHealth() bool {\n\treturn true\n}\n\n\/\/Init builds the default runner framework\nfunc Init() *Runner {\n\tr := &Runner{gopath: \"goautobuild\"}\n\tr.runner = runCommand\n\tgo r.run()\n\treturn r\n}\n\nfunc runCommand(c *runnerCommand) {\n\tlog.Printf(\"RUNNING COMMAND: %v\", c)\n\tenv := os.Environ()\n\thome := \"\"\n\tfor _, s := range env {\n\t\tif strings.HasPrefix(s, \"HOME=\") {\n\t\t\thome = s[5:]\n\t\t}\n\t}\n\n\tif len(home) == 0 {\n\n\t}\n\tgpath := home + \"\/gobuild\"\n\tc.command.Path = strings.Replace(c.command.Path, \"$GOPATH\", gpath, -1)\n\tfor i := range c.command.Args {\n\t\tc.command.Args[i] = strings.Replace(c.command.Args[i], \"$GOPATH\", gpath, -1)\n\t}\n\n\tpath := fmt.Sprintf(\"GOPATH=\" + home + \"\/gobuild\")\n\tfound := false\n\tlog.Printf(\"HERE = %v\", c.command.Env)\n\tenvl := os.Environ()\n\tfor i, blah := range envl {\n\t\tif strings.HasPrefix(blah, \"GOPATH\") {\n\t\t\tenvl[i] = path\n\t\t\tfound = true\n\t\t}\n\t}\n\tif !found {\n\t\tenvl = append(envl, path)\n\t}\n\tlog.Printf(\"ENV = %v\", envl)\n\tc.command.Env = envl\n\n\tout, err := c.command.StdoutPipe()\n\tout2, err2 := c.command.StderrPipe()\n\tif err != nil {\n\t\tlog.Printf(\"Blah: %v\", err)\n\t}\n\n\tif err2 != nil {\n\t\tlog.Printf(\"Blah2: %v\", err)\n\t}\n\n\tlog.Printf(\"%v, %v and %v\", c.command.Path, c.command.Args, c.command.Env)\n\tc.command.Start()\n\n\tif !c.background {\n\t\tbuf := new(bytes.Buffer)\n\t\tbuf.ReadFrom(out)\n\t\tstr := buf.String()\n\n\t\tbuf2 := new(bytes.Buffer)\n\t\tbuf2.ReadFrom(out2)\n\t\tstr2 := buf2.String()\n\t\tlog.Printf(\"%v and %v\", str, str2)\n\n\t\tc.command.Wait()\n\t\tc.output = str\n\t\tc.complete = true\n\t}\n\tlog.Printf(\"DONE\")\n}\n\nfunc (diskChecker prodDiskChecker) diskUsage(path string) int64 {\n\treturn diskUsage(path)\n}\n\nfunc (s *Server) rebuildLoop() {\n\tfor true {\n\t\ttime.Sleep(time.Minute)\n\n\t\tvar rebuildList []*pb.JobSpec\n\t\tfor _, job := range s.runner.backgroundTasks {\n\t\t\tif time.Since(job.started) > time.Hour {\n\t\t\t\trebuildList = append(rebuildList, job.details.Spec)\n\t\t\t}\n\t\t}\n\n\t\tfor _, job := range rebuildList {\n\t\t\ts.runner.Rebuild(job)\n\t\t}\n\t}\n}\n\nfunc main() {\n\ts := Server{&goserver.GoServer{}, Init(), prodDiskChecker{}}\n\ts.Register = s\n\ts.PrepServer()\n\ts.RegisterServingTask(s.rebuildLoop)\n\ts.RegisterServer(\"gobuildslave\", false)\n\ts.Serve()\n}\n<commit_msg>Added logging to rebuild loop<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/brotherlogic\/goserver\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\n\tpbd \"github.com\/brotherlogic\/discovery\/proto\"\n\tpb \"github.com\/brotherlogic\/gobuildslave\/proto\"\n\tpbs \"github.com\/brotherlogic\/goserver\/proto\"\n)\n\n\/\/ Server the main server type\ntype Server struct {\n\t*goserver.GoServer\n\trunner *Runner\n\tdisk diskChecker\n}\n\nfunc getIP(name string, server string) (string, int) {\n\tconn, _ := grpc.Dial(\"192.168.86.34:50055\", grpc.WithInsecure())\n\tdefer conn.Close()\n\n\tregistry := pbd.NewDiscoveryServiceClient(conn)\n\tentry := pbd.RegistryEntry{Name: name, Identifier: server}\n\tr, err := registry.Discover(context.Background(), &entry)\n\n\tif err != nil {\n\t\tlog.Printf(\"Lookup failed for %v,%v -> %v\", name, server, err)\n\t\treturn \"\", -1\n\t}\n\n\treturn r.Ip, int(r.Port)\n}\n\n\/\/ updateState of the runner command\nfunc updateState(com *runnerCommand) {\n\telems := strings.Split(com.details.Spec.Name, \"\/\")\n\tdServer, dPort := getIP(elems[len(elems)-1], com.details.Spec.Server)\n\n\tif dPort > 0 {\n\t\tdConn, err := grpc.Dial(dServer+\":\"+strconv.Itoa(dPort), grpc.WithInsecure())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer dConn.Close()\n\n\t\tc := pbs.NewGoserverServiceClient(dConn)\n\t\t_, err = c.IsAlive(context.Background(), &pbs.Alive{})\n\t\tcom.details.Running = (err == nil)\n\t} else {\n\t\t\/\/Mark as false if we can't locate the job\n\t\tcom.details.Running = false\n\t}\n}\n\n\/\/ BuildJob builds out a job\nfunc (s *Server) BuildJob(ctx context.Context, in *pb.JobSpec) (*pb.Empty, error) {\n\ts.runner.Checkout(in.Name)\n\treturn &pb.Empty{}, nil\n}\n\n\/\/ List lists all running jobs\nfunc (s *Server) List(ctx context.Context, in *pb.Empty) (*pb.JobList, error) {\n\tdetails := &pb.JobList{}\n\tfor _, job := range s.runner.backgroundTasks {\n\t\tupdateState(job)\n\t\tdetails.Details = append(details.Details, job.details)\n\t}\n\n\treturn details, nil\n}\n\n\/\/ Run runs a background task\nfunc (s *Server) Run(ctx context.Context, in *pb.JobSpec) (*pb.Empty, error) {\n\ts.runner.Run(in)\n\treturn &pb.Empty{}, nil\n}\n\n\/\/ Kill a background task\nfunc (s *Server) Kill(ctx context.Context, in *pb.JobSpec) (*pb.Empty, error) {\n\ts.runner.kill(in)\n\treturn &pb.Empty{}, nil\n}\n\n\/\/ DoRegister Registers this server\nfunc (s Server) DoRegister(server *grpc.Server) {\n\tpb.RegisterGoBuildSlaveServer(server, &s)\n}\n\n\/\/ ReportHealth determines if the server is healthy\nfunc (s Server) ReportHealth() bool {\n\treturn true\n}\n\n\/\/Init builds the default runner framework\nfunc Init() *Runner {\n\tr := &Runner{gopath: \"goautobuild\"}\n\tr.runner = runCommand\n\tgo r.run()\n\treturn r\n}\n\nfunc runCommand(c *runnerCommand) {\n\tlog.Printf(\"RUNNING COMMAND: %v\", c)\n\tenv := os.Environ()\n\thome := \"\"\n\tfor _, s := range env {\n\t\tif strings.HasPrefix(s, \"HOME=\") {\n\t\t\thome = s[5:]\n\t\t}\n\t}\n\n\tif len(home) == 0 {\n\n\t}\n\tgpath := home + \"\/gobuild\"\n\tc.command.Path = strings.Replace(c.command.Path, \"$GOPATH\", gpath, -1)\n\tfor i := range c.command.Args {\n\t\tc.command.Args[i] = strings.Replace(c.command.Args[i], \"$GOPATH\", gpath, -1)\n\t}\n\n\tpath := fmt.Sprintf(\"GOPATH=\" + home + \"\/gobuild\")\n\tfound := false\n\tlog.Printf(\"HERE = %v\", c.command.Env)\n\tenvl := os.Environ()\n\tfor i, blah := range envl {\n\t\tif strings.HasPrefix(blah, \"GOPATH\") {\n\t\t\tenvl[i] = path\n\t\t\tfound = true\n\t\t}\n\t}\n\tif !found {\n\t\tenvl = append(envl, path)\n\t}\n\tlog.Printf(\"ENV = %v\", envl)\n\tc.command.Env = envl\n\n\tout, err := c.command.StdoutPipe()\n\tout2, err2 := c.command.StderrPipe()\n\tif err != nil {\n\t\tlog.Printf(\"Blah: %v\", err)\n\t}\n\n\tif err2 != nil {\n\t\tlog.Printf(\"Blah2: %v\", err)\n\t}\n\n\tlog.Printf(\"%v, %v and %v\", c.command.Path, c.command.Args, c.command.Env)\n\tc.command.Start()\n\n\tif !c.background {\n\t\tbuf := new(bytes.Buffer)\n\t\tbuf.ReadFrom(out)\n\t\tstr := buf.String()\n\n\t\tbuf2 := new(bytes.Buffer)\n\t\tbuf2.ReadFrom(out2)\n\t\tstr2 := buf2.String()\n\t\tlog.Printf(\"%v and %v\", str, str2)\n\n\t\tc.command.Wait()\n\t\tc.output = str\n\t\tc.complete = true\n\t}\n\tlog.Printf(\"DONE\")\n}\n\nfunc (diskChecker prodDiskChecker) diskUsage(path string) int64 {\n\treturn diskUsage(path)\n}\n\nfunc (s *Server) rebuildLoop() {\n\tfor true {\n\t\ttime.Sleep(time.Minute)\n\n\t\tvar rebuildList []*pb.JobSpec\n\t\tfor _, job := range s.runner.backgroundTasks {\n\t\t\tlog.Printf(\"Job %v\", job)\n\t\t\tif time.Since(job.started) > time.Hour {\n\t\t\t\tlog.Printf(\"Added to rebuild list (%v)\", job)\n\t\t\t\trebuildList = append(rebuildList, job.details.Spec)\n\t\t\t}\n\t\t}\n\n\t\tlog.Printf(\"Rebuilding %v\", rebuildList)\n\t\tfor _, job := range rebuildList {\n\t\t\ts.runner.Rebuild(job)\n\t\t}\n\t}\n}\n\nfunc main() {\n\ts := Server{&goserver.GoServer{}, Init(), prodDiskChecker{}}\n\ts.Register = s\n\ts.PrepServer()\n\ts.RegisterServingTask(s.rebuildLoop)\n\ts.RegisterServer(\"gobuildslave\", false)\n\ts.Serve()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Waiting for FDs via kqueue\/kevent.\n\npackage net\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\ntype pollster struct {\n\tkq int\n\teventbuf [10]syscall.Kevent_t\n\tevents []syscall.Kevent_t\n}\n\nfunc newpollster() (p *pollster, err os.Error) {\n\tp = new(pollster)\n\tvar e int\n\tif p.kq, e = syscall.Kqueue(); e != 0 {\n\t\treturn nil, os.NewSyscallError(\"kqueue\", e)\n\t}\n\tp.events = p.eventbuf[0:0]\n\treturn p, nil\n}\n\nfunc (p *pollster) AddFD(fd int, mode int, repeat bool) os.Error {\n\tvar kmode int\n\tif mode == 'r' {\n\t\tkmode = syscall.EVFILT_READ\n\t} else {\n\t\tkmode = syscall.EVFILT_WRITE\n\t}\n\tvar events [1]syscall.Kevent_t\n\tev := &events[0]\n\t\/\/ EV_ADD - add event to kqueue list\n\t\/\/ EV_ONESHOT - delete the event the first time it triggers\n\tflags := syscall.EV_ADD\n\tif !repeat {\n\t\tflags |= syscall.EV_ONESHOT\n\t}\n\tsyscall.SetKevent(ev, fd, kmode, flags)\n\n\tn, e := syscall.Kevent(p.kq, &events, nil, nil)\n\tif e != 0 {\n\t\treturn os.NewSyscallError(\"kevent\", e)\n\t}\n\tif n != 1 || (ev.Flags&syscall.EV_ERROR) == 0 || int(ev.Ident) != fd || int(ev.Filter) != kmode {\n\t\treturn os.NewSyscallError(\"kqueue phase error\", e)\n\t}\n\tif ev.Data != 0 {\n\t\treturn os.Errno(int(ev.Data))\n\t}\n\treturn nil\n}\n\nfunc (p *pollster) DelFD(fd int, mode int) {\n\tvar kmode int\n\tif mode == 'r' {\n\t\tkmode = syscall.EVFILT_READ\n\t} else {\n\t\tkmode = syscall.EVFILT_WRITE\n\t}\n\tvar events [1]syscall.Kevent_t\n\tev := &events[0]\n\t\/\/ EV_DELETE - delete event from kqueue list\n\tsyscall.SetKevent(ev, fd, kmode, syscall.EV_DELETE)\n\tsyscall.Kevent(p.kq, &events, nil, nil)\n}\n\nfunc (p *pollster) WaitFD(nsec int64) (fd int, mode int, err os.Error) {\n\tvar t *syscall.Timespec\n\tfor len(p.events) == 0 {\n\t\tif nsec > 0 {\n\t\t\tif t == nil {\n\t\t\t\tt = new(syscall.Timespec)\n\t\t\t}\n\t\t\t*t = syscall.NsecToTimespec(nsec)\n\t\t}\n\t\tnn, e := syscall.Kevent(p.kq, nil, &p.eventbuf, t)\n\t\tif e != 0 {\n\t\t\tif e == syscall.EINTR {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn -1, 0, os.NewSyscallError(\"kevent\", e)\n\t\t}\n\t\tif nn == 0 {\n\t\t\treturn -1, 0, nil\n\t\t}\n\t\tp.events = p.eventbuf[0:nn]\n\t}\n\tev := &p.events[0]\n\tp.events = p.events[1:]\n\tfd = int(ev.Ident)\n\tif ev.Filter == syscall.EVFILT_READ {\n\t\tmode = 'r'\n\t} else {\n\t\tmode = 'w'\n\t}\n\treturn fd, mode, nil\n}\n\nfunc (p *pollster) Close() os.Error { return os.NewSyscallError(\"close\", syscall.Close(p.kq)) }\n<commit_msg>net: fix freebsd build<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Waiting for FDs via kqueue\/kevent.\n\npackage net\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\ntype pollster struct {\n\tkq int\n\teventbuf [10]syscall.Kevent_t\n\tevents []syscall.Kevent_t\n}\n\nfunc newpollster() (p *pollster, err os.Error) {\n\tp = new(pollster)\n\tvar e int\n\tif p.kq, e = syscall.Kqueue(); e != 0 {\n\t\treturn nil, os.NewSyscallError(\"kqueue\", e)\n\t}\n\tp.events = p.eventbuf[0:0]\n\treturn p, nil\n}\n\nfunc (p *pollster) AddFD(fd int, mode int, repeat bool) os.Error {\n\tvar kmode int\n\tif mode == 'r' {\n\t\tkmode = syscall.EVFILT_READ\n\t} else {\n\t\tkmode = syscall.EVFILT_WRITE\n\t}\n\tvar events [1]syscall.Kevent_t\n\tev := &events[0]\n\t\/\/ EV_ADD - add event to kqueue list\n\t\/\/ EV_ONESHOT - delete the event the first time it triggers\n\tflags := syscall.EV_ADD\n\tif !repeat {\n\t\tflags |= syscall.EV_ONESHOT\n\t}\n\tsyscall.SetKevent(ev, fd, kmode, flags)\n\n\tn, e := syscall.Kevent(p.kq, events[:], nil, nil)\n\tif e != 0 {\n\t\treturn os.NewSyscallError(\"kevent\", e)\n\t}\n\tif n != 1 || (ev.Flags&syscall.EV_ERROR) == 0 || int(ev.Ident) != fd || int(ev.Filter) != kmode {\n\t\treturn os.NewSyscallError(\"kqueue phase error\", e)\n\t}\n\tif ev.Data != 0 {\n\t\treturn os.Errno(int(ev.Data))\n\t}\n\treturn nil\n}\n\nfunc (p *pollster) DelFD(fd int, mode int) {\n\tvar kmode int\n\tif mode == 'r' {\n\t\tkmode = syscall.EVFILT_READ\n\t} else {\n\t\tkmode = syscall.EVFILT_WRITE\n\t}\n\tvar events [1]syscall.Kevent_t\n\tev := &events[0]\n\t\/\/ EV_DELETE - delete event from kqueue list\n\tsyscall.SetKevent(ev, fd, kmode, syscall.EV_DELETE)\n\tsyscall.Kevent(p.kq, events[:], nil, nil)\n}\n\nfunc (p *pollster) WaitFD(nsec int64) (fd int, mode int, err os.Error) {\n\tvar t *syscall.Timespec\n\tfor len(p.events) == 0 {\n\t\tif nsec > 0 {\n\t\t\tif t == nil {\n\t\t\t\tt = new(syscall.Timespec)\n\t\t\t}\n\t\t\t*t = syscall.NsecToTimespec(nsec)\n\t\t}\n\t\tnn, e := syscall.Kevent(p.kq, nil, p.eventbuf[:], t)\n\t\tif e != 0 {\n\t\t\tif e == syscall.EINTR {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn -1, 0, os.NewSyscallError(\"kevent\", e)\n\t\t}\n\t\tif nn == 0 {\n\t\t\treturn -1, 0, nil\n\t\t}\n\t\tp.events = p.eventbuf[0:nn]\n\t}\n\tev := &p.events[0]\n\tp.events = p.events[1:]\n\tfd = int(ev.Ident)\n\tif ev.Filter == syscall.EVFILT_READ {\n\t\tmode = 'r'\n\t} else {\n\t\tmode = 'w'\n\t}\n\treturn fd, mode, nil\n}\n\nfunc (p *pollster) Close() os.Error { return os.NewSyscallError(\"close\", syscall.Close(p.kq)) }\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\n\/\/ TODO(cw): ListenPacket test, Read() test, ipv6 test &\n\/\/ Dial()\/Listen() level tests\n\npackage net\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"os\"\n\t\"testing\"\n)\n\nconst ICMP_ECHO_REQUEST = 8\nconst ICMP_ECHO_REPLY = 0\n\n\/\/ returns a suitable 'ping request' packet, with id & seq and a\n\/\/ payload length of pktlen\nfunc makePingRequest(id, seq, pktlen int, filler []byte) []byte {\n\tp := make([]byte, pktlen)\n\tcopy(p[8:], bytes.Repeat(filler, (pktlen-8)\/len(filler)+1))\n\n\tp[0] = ICMP_ECHO_REQUEST \/\/ type\n\tp[1] = 0 \/\/ code\n\tp[2] = 0 \/\/ cksum\n\tp[3] = 0 \/\/ cksum\n\tp[4] = uint8(id >> 8) \/\/ id\n\tp[5] = uint8(id & 0xff) \/\/ id\n\tp[6] = uint8(seq >> 8) \/\/ sequence\n\tp[7] = uint8(seq & 0xff) \/\/ sequence\n\n\t\/\/ calculate icmp checksum\n\tcklen := len(p)\n\ts := uint32(0)\n\tfor i := 0; i < (cklen - 1); i += 2 {\n\t\ts += uint32(p[i+1])<<8 | uint32(p[i])\n\t}\n\tif cklen&1 == 1 {\n\t\ts += uint32(p[cklen-1])\n\t}\n\ts = (s >> 16) + (s & 0xffff)\n\ts = s + (s >> 16)\n\n\t\/\/ place checksum back in header; using ^= avoids the\n\t\/\/ assumption the checksum bytes are zero\n\tp[2] ^= uint8(^s & 0xff)\n\tp[3] ^= uint8(^s >> 8)\n\n\treturn p\n}\n\nfunc parsePingReply(p []byte) (id, seq int) {\n\tid = int(p[4])<<8 | int(p[5])\n\tseq = int(p[6])<<8 | int(p[7])\n\treturn\n}\n\nvar srchost = flag.String(\"srchost\", \"\", \"Source of the ICMP ECHO request\")\nvar dsthost = flag.String(\"dsthost\", \"localhost\", \"Destination for the ICMP ECHO request\")\n\n\/\/ test (raw) IP socket using ICMP\nfunc TestICMP(t *testing.T) {\n\tif os.Getuid() != 0 {\n\t\tt.Logf(\"test disabled; must be root\")\n\t\treturn\n\t}\n\n\tvar laddr *IPAddr\n\tif *srchost != \"\" {\n\t\tladdr, err := ResolveIPAddr(*srchost)\n\t\tif err != nil {\n\t\t\tt.Fatalf(`net.ResolveIPAddr(\"%v\") = %v, %v`, *srchost, laddr, err)\n\t\t}\n\t}\n\n\traddr, err := ResolveIPAddr(*dsthost)\n\tif err != nil {\n\t\tt.Fatalf(`net.ResolveIPAddr(\"%v\") = %v, %v`, *dsthost, raddr, err)\n\t}\n\n\tc, err := ListenIP(\"ip4:icmp\", laddr)\n\tif err != nil {\n\t\tt.Fatalf(`net.ListenIP(\"ip4:icmp\", %v) = %v, %v`, *srchost, c, err)\n\t}\n\n\tsendid := os.Getpid() & 0xffff\n\tconst sendseq = 61455\n\tconst pingpktlen = 128\n\tsendpkt := makePingRequest(sendid, sendseq, pingpktlen, []byte(\"Go Go Gadget Ping!!!\"))\n\n\tn, err := c.WriteToIP(sendpkt, raddr)\n\tif err != nil || n != pingpktlen {\n\t\tt.Fatalf(`net.WriteToIP(..., %v) = %v, %v`, raddr, n, err)\n\t}\n\n\tc.SetTimeout(100e6)\n\tresp := make([]byte, 1024)\n\tfor {\n\t\tn, from, err := c.ReadFrom(resp)\n\t\tif err != nil {\n\t\t\tt.Fatalf(`ReadFrom(...) = %v, %v, %v`, n, from, err)\n\t\t}\n\t\tif resp[0] != ICMP_ECHO_REPLY {\n\t\t\tcontinue\n\t\t}\n\t\trcvid, rcvseq := parsePingReply(resp)\n\t\tif rcvid != sendid || rcvseq != sendseq {\n\t\t\tt.Fatalf(`Ping reply saw id,seq=0x%x,0x%x (expected 0x%x, 0x%x)`, rcvid, rcvseq, sendid, sendseq)\n\t\t}\n\t\treturn\n\t}\n\tt.Fatalf(\"saw no ping return\")\n}\n<commit_msg>net: fix laddr typo in test code.<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\n\/\/ TODO(cw): ListenPacket test, Read() test, ipv6 test &\n\/\/ Dial()\/Listen() level tests\n\npackage net\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"os\"\n\t\"testing\"\n)\n\nconst ICMP_ECHO_REQUEST = 8\nconst ICMP_ECHO_REPLY = 0\n\n\/\/ returns a suitable 'ping request' packet, with id & seq and a\n\/\/ payload length of pktlen\nfunc makePingRequest(id, seq, pktlen int, filler []byte) []byte {\n\tp := make([]byte, pktlen)\n\tcopy(p[8:], bytes.Repeat(filler, (pktlen-8)\/len(filler)+1))\n\n\tp[0] = ICMP_ECHO_REQUEST \/\/ type\n\tp[1] = 0 \/\/ code\n\tp[2] = 0 \/\/ cksum\n\tp[3] = 0 \/\/ cksum\n\tp[4] = uint8(id >> 8) \/\/ id\n\tp[5] = uint8(id & 0xff) \/\/ id\n\tp[6] = uint8(seq >> 8) \/\/ sequence\n\tp[7] = uint8(seq & 0xff) \/\/ sequence\n\n\t\/\/ calculate icmp checksum\n\tcklen := len(p)\n\ts := uint32(0)\n\tfor i := 0; i < (cklen - 1); i += 2 {\n\t\ts += uint32(p[i+1])<<8 | uint32(p[i])\n\t}\n\tif cklen&1 == 1 {\n\t\ts += uint32(p[cklen-1])\n\t}\n\ts = (s >> 16) + (s & 0xffff)\n\ts = s + (s >> 16)\n\n\t\/\/ place checksum back in header; using ^= avoids the\n\t\/\/ assumption the checksum bytes are zero\n\tp[2] ^= uint8(^s & 0xff)\n\tp[3] ^= uint8(^s >> 8)\n\n\treturn p\n}\n\nfunc parsePingReply(p []byte) (id, seq int) {\n\tid = int(p[4])<<8 | int(p[5])\n\tseq = int(p[6])<<8 | int(p[7])\n\treturn\n}\n\nvar srchost = flag.String(\"srchost\", \"\", \"Source of the ICMP ECHO request\")\nvar dsthost = flag.String(\"dsthost\", \"localhost\", \"Destination for the ICMP ECHO request\")\n\n\/\/ test (raw) IP socket using ICMP\nfunc TestICMP(t *testing.T) {\n\tif os.Getuid() != 0 {\n\t\tt.Logf(\"test disabled; must be root\")\n\t\treturn\n\t}\n\n\tvar (\n\t\tladdr *IPAddr\n\t\terr os.Error\n\t)\n\tif *srchost != \"\" {\n\t\tladdr, err = ResolveIPAddr(*srchost)\n\t\tif err != nil {\n\t\t\tt.Fatalf(`net.ResolveIPAddr(\"%v\") = %v, %v`, *srchost, laddr, err)\n\t\t}\n\t}\n\n\traddr, err := ResolveIPAddr(*dsthost)\n\tif err != nil {\n\t\tt.Fatalf(`net.ResolveIPAddr(\"%v\") = %v, %v`, *dsthost, raddr, err)\n\t}\n\n\tc, err := ListenIP(\"ip4:icmp\", laddr)\n\tif err != nil {\n\t\tt.Fatalf(`net.ListenIP(\"ip4:icmp\", %v) = %v, %v`, *srchost, c, err)\n\t}\n\n\tsendid := os.Getpid() & 0xffff\n\tconst sendseq = 61455\n\tconst pingpktlen = 128\n\tsendpkt := makePingRequest(sendid, sendseq, pingpktlen, []byte(\"Go Go Gadget Ping!!!\"))\n\n\tn, err := c.WriteToIP(sendpkt, raddr)\n\tif err != nil || n != pingpktlen {\n\t\tt.Fatalf(`net.WriteToIP(..., %v) = %v, %v`, raddr, n, err)\n\t}\n\n\tc.SetTimeout(100e6)\n\tresp := make([]byte, 1024)\n\tfor {\n\t\tn, from, err := c.ReadFrom(resp)\n\t\tif err != nil {\n\t\t\tt.Fatalf(`ReadFrom(...) = %v, %v, %v`, n, from, err)\n\t\t}\n\t\tif resp[0] != ICMP_ECHO_REPLY {\n\t\t\tcontinue\n\t\t}\n\t\trcvid, rcvseq := parsePingReply(resp)\n\t\tif rcvid != sendid || rcvseq != sendseq {\n\t\t\tt.Fatalf(`Ping reply saw id,seq=0x%x,0x%x (expected 0x%x, 0x%x)`, rcvid, rcvseq, sendid, sendseq)\n\t\t}\n\t\treturn\n\t}\n\tt.Fatalf(\"saw no ping return\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage path\n\nimport (\n\t\"os\"\n\t\"runtime\"\n\t\"testing\"\n)\n\ntype CleanTest struct {\n\tpath, clean string\n}\n\nvar cleantests = []CleanTest{\n\t\/\/ Already clean\n\t{\"\", \".\"},\n\t{\"abc\", \"abc\"},\n\t{\"abc\/def\", \"abc\/def\"},\n\t{\"a\/b\/c\", \"a\/b\/c\"},\n\t{\".\", \".\"},\n\t{\"..\", \"..\"},\n\t{\"..\/..\", \"..\/..\"},\n\t{\"..\/..\/abc\", \"..\/..\/abc\"},\n\t{\"\/abc\", \"\/abc\"},\n\t{\"\/\", \"\/\"},\n\n\t\/\/ Remove trailing slash\n\t{\"abc\/\", \"abc\"},\n\t{\"abc\/def\/\", \"abc\/def\"},\n\t{\"a\/b\/c\/\", \"a\/b\/c\"},\n\t{\".\/\", \".\"},\n\t{\"..\/\", \"..\"},\n\t{\"..\/..\/\", \"..\/..\"},\n\t{\"\/abc\/\", \"\/abc\"},\n\n\t\/\/ Remove doubled slash\n\t{\"abc\/\/def\/\/ghi\", \"abc\/def\/ghi\"},\n\t{\"\/\/abc\", \"\/abc\"},\n\t{\"\/\/\/abc\", \"\/abc\"},\n\t{\"\/\/abc\/\/\", \"\/abc\"},\n\t{\"abc\/\/\", \"abc\"},\n\n\t\/\/ Remove . elements\n\t{\"abc\/.\/def\", \"abc\/def\"},\n\t{\"\/.\/abc\/def\", \"\/abc\/def\"},\n\t{\"abc\/.\", \"abc\"},\n\n\t\/\/ Remove .. elements\n\t{\"abc\/def\/ghi\/..\/jkl\", \"abc\/def\/jkl\"},\n\t{\"abc\/def\/..\/ghi\/..\/jkl\", \"abc\/jkl\"},\n\t{\"abc\/def\/..\", \"abc\"},\n\t{\"abc\/def\/..\/..\", \".\"},\n\t{\"\/abc\/def\/..\/..\", \"\/\"},\n\t{\"abc\/def\/..\/..\/..\", \"..\"},\n\t{\"\/abc\/def\/..\/..\/..\", \"\/\"},\n\t{\"abc\/def\/..\/..\/..\/ghi\/jkl\/..\/..\/..\/mno\", \"..\/..\/mno\"},\n\n\t\/\/ Combinations\n\t{\"abc\/.\/..\/def\", \"def\"},\n\t{\"abc\/\/.\/..\/def\", \"def\"},\n\t{\"abc\/..\/..\/.\/.\/..\/def\", \"..\/..\/def\"},\n}\n\nfunc TestClean(t *testing.T) {\n\tfor _, test := range cleantests {\n\t\tif s := Clean(test.path); s != test.clean {\n\t\t\tt.Errorf(\"Clean(%q) = %q, want %q\", test.path, s, test.clean)\n\t\t}\n\t}\n}\n\ntype SplitTest struct {\n\tpath, dir, file string\n}\n\nvar splittests = []SplitTest{\n\t{\"a\/b\", \"a\/\", \"b\"},\n\t{\"a\/b\/\", \"a\/b\/\", \"\"},\n\t{\"a\/\", \"a\/\", \"\"},\n\t{\"a\", \"\", \"a\"},\n\t{\"\/\", \"\/\", \"\"},\n}\n\nvar winsplittests = []SplitTest{\n\t{`C:\\Windows\\System32`, `C:\\Windows\\`, `System32`},\n\t{`C:\\Windows\\`, `C:\\Windows\\`, ``},\n\t{`C:\\Windows`, `C:\\`, `Windows`},\n\t{`C:Windows`, `C:`, `Windows`},\n\t{`\\\\?\\c:\\`, `\\\\?\\c:\\`, ``},\n}\n\nfunc TestSplit(t *testing.T) {\n\tif runtime.GOOS == \"windows\" {\n\t\tsplittests = append(splittests, winsplittests...)\n\t}\n\tfor _, test := range splittests {\n\t\tif d, f := Split(test.path); d != test.dir || f != test.file {\n\t\t\tt.Errorf(\"Split(%q) = %q, %q, want %q, %q\", test.path, d, f, test.dir, test.file)\n\t\t}\n\t}\n}\n\ntype JoinTest struct {\n\telem []string\n\tpath string\n}\n\nvar jointests = []JoinTest{\n\t\/\/ zero parameters\n\t{[]string{}, \"\"},\n\n\t\/\/ one parameter\n\t{[]string{\"\"}, \"\"},\n\t{[]string{\"a\"}, \"a\"},\n\n\t\/\/ two parameters\n\t{[]string{\"a\", \"b\"}, \"a\/b\"},\n\t{[]string{\"a\", \"\"}, \"a\"},\n\t{[]string{\"\", \"b\"}, \"b\"},\n\t{[]string{\"\/\", \"a\"}, \"\/a\"},\n\t{[]string{\"\/\", \"\"}, \"\/\"},\n\t{[]string{\"a\/\", \"b\"}, \"a\/b\"},\n\t{[]string{\"a\/\", \"\"}, \"a\"},\n\t{[]string{\"\", \"\"}, \"\"},\n}\n\n\/\/ join takes a []string and passes it to Join.\nfunc join(elem []string, args ...string) string {\n\targs = elem\n\treturn Join(args...)\n}\n\nfunc TestJoin(t *testing.T) {\n\tfor _, test := range jointests {\n\t\tif p := join(test.elem); p != test.path {\n\t\t\tt.Errorf(\"join(%q) = %q, want %q\", test.elem, p, test.path)\n\t\t}\n\t}\n}\n\ntype ExtTest struct {\n\tpath, ext string\n}\n\nvar exttests = []ExtTest{\n\t{\"path.go\", \".go\"},\n\t{\"path.pb.go\", \".go\"},\n\t{\"a.dir\/b\", \"\"},\n\t{\"a.dir\/b.go\", \".go\"},\n\t{\"a.dir\/\", \"\"},\n}\n\nfunc TestExt(t *testing.T) {\n\tfor _, test := range exttests {\n\t\tif x := Ext(test.path); x != test.ext {\n\t\t\tt.Errorf(\"Ext(%q) = %q, want %q\", test.path, x, test.ext)\n\t\t}\n\t}\n}\n\ntype Node struct {\n\tname string\n\tentries []*Node \/\/ nil if the entry is a file\n\tmark int\n}\n\nvar tree = &Node{\n\t\"testdata\",\n\t[]*Node{\n\t\t&Node{\"a\", nil, 0},\n\t\t&Node{\"b\", []*Node{}, 0},\n\t\t&Node{\"c\", nil, 0},\n\t\t&Node{\n\t\t\t\"d\",\n\t\t\t[]*Node{\n\t\t\t\t&Node{\"x\", nil, 0},\n\t\t\t\t&Node{\"y\", []*Node{}, 0},\n\t\t\t\t&Node{\n\t\t\t\t\t\"z\",\n\t\t\t\t\t[]*Node{\n\t\t\t\t\t\t&Node{\"u\", nil, 0},\n\t\t\t\t\t\t&Node{\"v\", nil, 0},\n\t\t\t\t\t},\n\t\t\t\t\t0,\n\t\t\t\t},\n\t\t\t},\n\t\t\t0,\n\t\t},\n\t},\n\t0,\n}\n\nfunc walkTree(n *Node, path string, f func(path string, n *Node)) {\n\tf(path, n)\n\tfor _, e := range n.entries {\n\t\twalkTree(e, Join(path, e.name), f)\n\t}\n}\n\nfunc makeTree(t *testing.T) {\n\twalkTree(tree, tree.name, func(path string, n *Node) {\n\t\tif n.entries == nil {\n\t\t\tfd, err := os.Open(path, os.O_CREAT, 0660)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"makeTree: %v\", err)\n\t\t\t}\n\t\t\tfd.Close()\n\t\t} else {\n\t\t\tos.Mkdir(path, 0770)\n\t\t}\n\t})\n}\n\nfunc markTree(n *Node) { walkTree(n, \"\", func(path string, n *Node) { n.mark++ }) }\n\nfunc checkMarks(t *testing.T) {\n\twalkTree(tree, tree.name, func(path string, n *Node) {\n\t\tif n.mark != 1 {\n\t\t\tt.Errorf(\"node %s mark = %d; expected 1\", path, n.mark)\n\t\t}\n\t\tn.mark = 0\n\t})\n}\n\n\/\/ Assumes that each node name is unique. Good enough for a test.\nfunc mark(name string) {\n\twalkTree(tree, tree.name, func(path string, n *Node) {\n\t\tif n.name == name {\n\t\t\tn.mark++\n\t\t}\n\t})\n}\n\ntype TestVisitor struct{}\n\nfunc (v *TestVisitor) VisitDir(path string, f *os.FileInfo) bool {\n\tmark(f.Name)\n\treturn true\n}\n\nfunc (v *TestVisitor) VisitFile(path string, f *os.FileInfo) {\n\tmark(f.Name)\n}\n\nfunc TestWalk(t *testing.T) {\n\tmakeTree(t)\n\n\t\/\/ 1) ignore error handling, expect none\n\tv := &TestVisitor{}\n\tWalk(tree.name, v, nil)\n\tcheckMarks(t)\n\n\t\/\/ 2) handle errors, expect none\n\terrors := make(chan os.Error, 64)\n\tWalk(tree.name, v, errors)\n\tif err, ok := <-errors; ok {\n\t\tt.Error(\"no error expected, found: s\", err)\n\t}\n\tcheckMarks(t)\n\n\tif os.Getuid() != 0 {\n\t\t\/\/ introduce 2 errors: chmod top-level directories to 0\n\t\tos.Chmod(Join(tree.name, tree.entries[1].name), 0)\n\t\tos.Chmod(Join(tree.name, tree.entries[3].name), 0)\n\t\t\/\/ mark respective subtrees manually\n\t\tmarkTree(tree.entries[1])\n\t\tmarkTree(tree.entries[3])\n\t\t\/\/ correct double-marking of directory itself\n\t\ttree.entries[1].mark--\n\t\ttree.entries[3].mark--\n\n\t\t\/\/ 3) handle errors, expect two\n\t\terrors = make(chan os.Error, 64)\n\t\tos.Chmod(Join(tree.name, tree.entries[1].name), 0)\n\t\tWalk(tree.name, v, errors)\n\t\tfor i := 1; i <= 2; i++ {\n\t\t\tif _, ok := <-errors; !ok {\n\t\t\t\tt.Errorf(\"%d. error expected, none found\", i)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif err, ok := <-errors; ok {\n\t\t\tt.Errorf(\"only two errors expected, found 3rd: %v\", err)\n\t\t}\n\t\t\/\/ the inaccessible subtrees were marked manually\n\t\tcheckMarks(t)\n\t}\n\n\t\/\/ cleanup\n\tos.Chmod(Join(tree.name, tree.entries[1].name), 0770)\n\tos.Chmod(Join(tree.name, tree.entries[3].name), 0770)\n\tif err := os.RemoveAll(tree.name); err != nil {\n\t\tt.Errorf(\"removeTree: %v\", err)\n\t}\n}\n\nvar basetests = []CleanTest{\n\t\/\/ Already clean\n\t{\"\", \".\"},\n\t{\".\", \".\"},\n\t{\"\/.\", \".\"},\n\t{\"\/\", \"\/\"},\n\t{\"\/\/\/\/\", \"\/\"},\n\t{\"x\/\", \"x\"},\n\t{\"abc\", \"abc\"},\n\t{\"abc\/def\", \"def\"},\n\t{\"a\/b\/.x\", \".x\"},\n\t{\"a\/b\/c.\", \"c.\"},\n\t{\"a\/b\/c.x\", \"c.x\"},\n}\n\nfunc TestBase(t *testing.T) {\n\tfor _, test := range basetests {\n\t\tif s := Base(test.path); s != test.clean {\n\t\t\tt.Errorf(\"Base(%q) = %q, want %q\", test.path, s, test.clean)\n\t\t}\n\t}\n}\n\ntype IsAbsTest struct {\n\tpath string\n\tisAbs bool\n}\n\nvar isAbsTests = []IsAbsTest{\n\t{\"\", false},\n\t{\"\/\", true},\n\t{\"\/usr\/bin\/gcc\", true},\n\t{\"..\", false},\n\t{\"\/a\/..\/bb\", true},\n\t{\".\", false},\n\t{\".\/\", false},\n\t{\"lala\", false},\n}\n\nfunc TestIsAbs(t *testing.T) {\n\tfor _, test := range isAbsTests {\n\t\tif r := IsAbs(test.path); r != test.isAbs {\n\t\t\tt.Errorf(\"IsAbs(%q) = %v, want %v\", test.path, r, test.isAbs)\n\t\t}\n\t}\n}\n<commit_msg>path: fix printf glitch in test<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage path\n\nimport (\n\t\"os\"\n\t\"runtime\"\n\t\"testing\"\n)\n\ntype CleanTest struct {\n\tpath, clean string\n}\n\nvar cleantests = []CleanTest{\n\t\/\/ Already clean\n\t{\"\", \".\"},\n\t{\"abc\", \"abc\"},\n\t{\"abc\/def\", \"abc\/def\"},\n\t{\"a\/b\/c\", \"a\/b\/c\"},\n\t{\".\", \".\"},\n\t{\"..\", \"..\"},\n\t{\"..\/..\", \"..\/..\"},\n\t{\"..\/..\/abc\", \"..\/..\/abc\"},\n\t{\"\/abc\", \"\/abc\"},\n\t{\"\/\", \"\/\"},\n\n\t\/\/ Remove trailing slash\n\t{\"abc\/\", \"abc\"},\n\t{\"abc\/def\/\", \"abc\/def\"},\n\t{\"a\/b\/c\/\", \"a\/b\/c\"},\n\t{\".\/\", \".\"},\n\t{\"..\/\", \"..\"},\n\t{\"..\/..\/\", \"..\/..\"},\n\t{\"\/abc\/\", \"\/abc\"},\n\n\t\/\/ Remove doubled slash\n\t{\"abc\/\/def\/\/ghi\", \"abc\/def\/ghi\"},\n\t{\"\/\/abc\", \"\/abc\"},\n\t{\"\/\/\/abc\", \"\/abc\"},\n\t{\"\/\/abc\/\/\", \"\/abc\"},\n\t{\"abc\/\/\", \"abc\"},\n\n\t\/\/ Remove . elements\n\t{\"abc\/.\/def\", \"abc\/def\"},\n\t{\"\/.\/abc\/def\", \"\/abc\/def\"},\n\t{\"abc\/.\", \"abc\"},\n\n\t\/\/ Remove .. elements\n\t{\"abc\/def\/ghi\/..\/jkl\", \"abc\/def\/jkl\"},\n\t{\"abc\/def\/..\/ghi\/..\/jkl\", \"abc\/jkl\"},\n\t{\"abc\/def\/..\", \"abc\"},\n\t{\"abc\/def\/..\/..\", \".\"},\n\t{\"\/abc\/def\/..\/..\", \"\/\"},\n\t{\"abc\/def\/..\/..\/..\", \"..\"},\n\t{\"\/abc\/def\/..\/..\/..\", \"\/\"},\n\t{\"abc\/def\/..\/..\/..\/ghi\/jkl\/..\/..\/..\/mno\", \"..\/..\/mno\"},\n\n\t\/\/ Combinations\n\t{\"abc\/.\/..\/def\", \"def\"},\n\t{\"abc\/\/.\/..\/def\", \"def\"},\n\t{\"abc\/..\/..\/.\/.\/..\/def\", \"..\/..\/def\"},\n}\n\nfunc TestClean(t *testing.T) {\n\tfor _, test := range cleantests {\n\t\tif s := Clean(test.path); s != test.clean {\n\t\t\tt.Errorf(\"Clean(%q) = %q, want %q\", test.path, s, test.clean)\n\t\t}\n\t}\n}\n\ntype SplitTest struct {\n\tpath, dir, file string\n}\n\nvar splittests = []SplitTest{\n\t{\"a\/b\", \"a\/\", \"b\"},\n\t{\"a\/b\/\", \"a\/b\/\", \"\"},\n\t{\"a\/\", \"a\/\", \"\"},\n\t{\"a\", \"\", \"a\"},\n\t{\"\/\", \"\/\", \"\"},\n}\n\nvar winsplittests = []SplitTest{\n\t{`C:\\Windows\\System32`, `C:\\Windows\\`, `System32`},\n\t{`C:\\Windows\\`, `C:\\Windows\\`, ``},\n\t{`C:\\Windows`, `C:\\`, `Windows`},\n\t{`C:Windows`, `C:`, `Windows`},\n\t{`\\\\?\\c:\\`, `\\\\?\\c:\\`, ``},\n}\n\nfunc TestSplit(t *testing.T) {\n\tif runtime.GOOS == \"windows\" {\n\t\tsplittests = append(splittests, winsplittests...)\n\t}\n\tfor _, test := range splittests {\n\t\tif d, f := Split(test.path); d != test.dir || f != test.file {\n\t\t\tt.Errorf(\"Split(%q) = %q, %q, want %q, %q\", test.path, d, f, test.dir, test.file)\n\t\t}\n\t}\n}\n\ntype JoinTest struct {\n\telem []string\n\tpath string\n}\n\nvar jointests = []JoinTest{\n\t\/\/ zero parameters\n\t{[]string{}, \"\"},\n\n\t\/\/ one parameter\n\t{[]string{\"\"}, \"\"},\n\t{[]string{\"a\"}, \"a\"},\n\n\t\/\/ two parameters\n\t{[]string{\"a\", \"b\"}, \"a\/b\"},\n\t{[]string{\"a\", \"\"}, \"a\"},\n\t{[]string{\"\", \"b\"}, \"b\"},\n\t{[]string{\"\/\", \"a\"}, \"\/a\"},\n\t{[]string{\"\/\", \"\"}, \"\/\"},\n\t{[]string{\"a\/\", \"b\"}, \"a\/b\"},\n\t{[]string{\"a\/\", \"\"}, \"a\"},\n\t{[]string{\"\", \"\"}, \"\"},\n}\n\n\/\/ join takes a []string and passes it to Join.\nfunc join(elem []string, args ...string) string {\n\targs = elem\n\treturn Join(args...)\n}\n\nfunc TestJoin(t *testing.T) {\n\tfor _, test := range jointests {\n\t\tif p := join(test.elem); p != test.path {\n\t\t\tt.Errorf(\"join(%q) = %q, want %q\", test.elem, p, test.path)\n\t\t}\n\t}\n}\n\ntype ExtTest struct {\n\tpath, ext string\n}\n\nvar exttests = []ExtTest{\n\t{\"path.go\", \".go\"},\n\t{\"path.pb.go\", \".go\"},\n\t{\"a.dir\/b\", \"\"},\n\t{\"a.dir\/b.go\", \".go\"},\n\t{\"a.dir\/\", \"\"},\n}\n\nfunc TestExt(t *testing.T) {\n\tfor _, test := range exttests {\n\t\tif x := Ext(test.path); x != test.ext {\n\t\t\tt.Errorf(\"Ext(%q) = %q, want %q\", test.path, x, test.ext)\n\t\t}\n\t}\n}\n\ntype Node struct {\n\tname string\n\tentries []*Node \/\/ nil if the entry is a file\n\tmark int\n}\n\nvar tree = &Node{\n\t\"testdata\",\n\t[]*Node{\n\t\t&Node{\"a\", nil, 0},\n\t\t&Node{\"b\", []*Node{}, 0},\n\t\t&Node{\"c\", nil, 0},\n\t\t&Node{\n\t\t\t\"d\",\n\t\t\t[]*Node{\n\t\t\t\t&Node{\"x\", nil, 0},\n\t\t\t\t&Node{\"y\", []*Node{}, 0},\n\t\t\t\t&Node{\n\t\t\t\t\t\"z\",\n\t\t\t\t\t[]*Node{\n\t\t\t\t\t\t&Node{\"u\", nil, 0},\n\t\t\t\t\t\t&Node{\"v\", nil, 0},\n\t\t\t\t\t},\n\t\t\t\t\t0,\n\t\t\t\t},\n\t\t\t},\n\t\t\t0,\n\t\t},\n\t},\n\t0,\n}\n\nfunc walkTree(n *Node, path string, f func(path string, n *Node)) {\n\tf(path, n)\n\tfor _, e := range n.entries {\n\t\twalkTree(e, Join(path, e.name), f)\n\t}\n}\n\nfunc makeTree(t *testing.T) {\n\twalkTree(tree, tree.name, func(path string, n *Node) {\n\t\tif n.entries == nil {\n\t\t\tfd, err := os.Open(path, os.O_CREAT, 0660)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"makeTree: %v\", err)\n\t\t\t}\n\t\t\tfd.Close()\n\t\t} else {\n\t\t\tos.Mkdir(path, 0770)\n\t\t}\n\t})\n}\n\nfunc markTree(n *Node) { walkTree(n, \"\", func(path string, n *Node) { n.mark++ }) }\n\nfunc checkMarks(t *testing.T) {\n\twalkTree(tree, tree.name, func(path string, n *Node) {\n\t\tif n.mark != 1 {\n\t\t\tt.Errorf(\"node %s mark = %d; expected 1\", path, n.mark)\n\t\t}\n\t\tn.mark = 0\n\t})\n}\n\n\/\/ Assumes that each node name is unique. Good enough for a test.\nfunc mark(name string) {\n\twalkTree(tree, tree.name, func(path string, n *Node) {\n\t\tif n.name == name {\n\t\t\tn.mark++\n\t\t}\n\t})\n}\n\ntype TestVisitor struct{}\n\nfunc (v *TestVisitor) VisitDir(path string, f *os.FileInfo) bool {\n\tmark(f.Name)\n\treturn true\n}\n\nfunc (v *TestVisitor) VisitFile(path string, f *os.FileInfo) {\n\tmark(f.Name)\n}\n\nfunc TestWalk(t *testing.T) {\n\tmakeTree(t)\n\n\t\/\/ 1) ignore error handling, expect none\n\tv := &TestVisitor{}\n\tWalk(tree.name, v, nil)\n\tcheckMarks(t)\n\n\t\/\/ 2) handle errors, expect none\n\terrors := make(chan os.Error, 64)\n\tWalk(tree.name, v, errors)\n\tif err, ok := <-errors; ok {\n\t\tt.Errorf(\"no error expected, found: %s\", err)\n\t}\n\tcheckMarks(t)\n\n\tif os.Getuid() != 0 {\n\t\t\/\/ introduce 2 errors: chmod top-level directories to 0\n\t\tos.Chmod(Join(tree.name, tree.entries[1].name), 0)\n\t\tos.Chmod(Join(tree.name, tree.entries[3].name), 0)\n\t\t\/\/ mark respective subtrees manually\n\t\tmarkTree(tree.entries[1])\n\t\tmarkTree(tree.entries[3])\n\t\t\/\/ correct double-marking of directory itself\n\t\ttree.entries[1].mark--\n\t\ttree.entries[3].mark--\n\n\t\t\/\/ 3) handle errors, expect two\n\t\terrors = make(chan os.Error, 64)\n\t\tos.Chmod(Join(tree.name, tree.entries[1].name), 0)\n\t\tWalk(tree.name, v, errors)\n\t\tfor i := 1; i <= 2; i++ {\n\t\t\tif _, ok := <-errors; !ok {\n\t\t\t\tt.Errorf(\"%d. error expected, none found\", i)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif err, ok := <-errors; ok {\n\t\t\tt.Errorf(\"only two errors expected, found 3rd: %v\", err)\n\t\t}\n\t\t\/\/ the inaccessible subtrees were marked manually\n\t\tcheckMarks(t)\n\t}\n\n\t\/\/ cleanup\n\tos.Chmod(Join(tree.name, tree.entries[1].name), 0770)\n\tos.Chmod(Join(tree.name, tree.entries[3].name), 0770)\n\tif err := os.RemoveAll(tree.name); err != nil {\n\t\tt.Errorf(\"removeTree: %v\", err)\n\t}\n}\n\nvar basetests = []CleanTest{\n\t\/\/ Already clean\n\t{\"\", \".\"},\n\t{\".\", \".\"},\n\t{\"\/.\", \".\"},\n\t{\"\/\", \"\/\"},\n\t{\"\/\/\/\/\", \"\/\"},\n\t{\"x\/\", \"x\"},\n\t{\"abc\", \"abc\"},\n\t{\"abc\/def\", \"def\"},\n\t{\"a\/b\/.x\", \".x\"},\n\t{\"a\/b\/c.\", \"c.\"},\n\t{\"a\/b\/c.x\", \"c.x\"},\n}\n\nfunc TestBase(t *testing.T) {\n\tfor _, test := range basetests {\n\t\tif s := Base(test.path); s != test.clean {\n\t\t\tt.Errorf(\"Base(%q) = %q, want %q\", test.path, s, test.clean)\n\t\t}\n\t}\n}\n\ntype IsAbsTest struct {\n\tpath string\n\tisAbs bool\n}\n\nvar isAbsTests = []IsAbsTest{\n\t{\"\", false},\n\t{\"\/\", true},\n\t{\"\/usr\/bin\/gcc\", true},\n\t{\"..\", false},\n\t{\"\/a\/..\/bb\", true},\n\t{\".\", false},\n\t{\".\/\", false},\n\t{\"lala\", false},\n}\n\nfunc TestIsAbs(t *testing.T) {\n\tfor _, test := range isAbsTests {\n\t\tif r := IsAbs(test.path); r != test.isAbs {\n\t\t\tt.Errorf(\"IsAbs(%q) = %v, want %v\", test.path, r, test.isAbs)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\"\n\t\"crypto\/ecdsa\"\n\t\"crypto\/elliptic\"\n\t\"crypto\/rand\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n)\n\nfunc generatePrivateKey(file string) (crypto.PrivateKey, error) {\n\n\tprivateKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkeyBytes, err := x509.MarshalECPrivateKey(privateKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpemKey := pem.Block{Type: \"EC PRIVATE KEY\", Bytes: keyBytes}\n\n\tcertOut, err := os.Create(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpem.Encode(certOut, &pemKey)\n\tcertOut.Close()\n\n\treturn privateKey, nil\n}\n\nfunc loadPrivateKey(file string) (crypto.PrivateKey, error) {\n\tkeyBytes, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkeyBlock, _ := pem.Decode(keyBytes)\n\n\tswitch keyBlock.Type {\n\tcase \"RSA PRIVATE KEY\":\n\t\treturn x509.ParsePKCS1PrivateKey(keyBlock.Bytes)\n\tcase \"EC PRIVATE KEY\":\n\t\treturn x509.ParseECPrivateKey(keyBlock.Bytes)\n\t}\n\n\treturn nil, errors.New(\"Unknown private key type.\")\n}\n<commit_msg>Remove uneeded files.<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage strings\n\nimport (\n\t\"os\"\n\t\"utf8\"\n)\n\n\/\/ A Reader satisfies calls to Read, ReadByte, and ReadRune by\n\/\/ reading from a string.\ntype Reader string\n\nfunc (r *Reader) Read(b []byte) (n int, err os.Error) {\n\ts := *r\n\tif len(s) == 0 {\n\t\treturn 0, os.EOF\n\t}\n\tfor n < len(s) && n < len(b) {\n\t\tb[n] = s[n]\n\t\tn++\n\t}\n\t*r = s[n:]\n\treturn\n}\n\nfunc (r *Reader) ReadByte() (b byte, err os.Error) {\n\ts := *r\n\tif len(s) == 0 {\n\t\treturn 0, os.EOF\n\t}\n\tb = s[0]\n\t*r = s[1:]\n\treturn\n}\n\n\/\/ ReadRune reads and returns the next UTF-8-encoded\n\/\/ Unicode code point from the buffer.\n\/\/ If no bytes are available, the error returned is os.EOF.\n\/\/ If the bytes are an erroneous UTF-8 encoding, it\n\/\/ consumes one byte and returns U+FFFD, 1.\nfunc (r *Reader) ReadRune() (rune int, size int, err os.Error) {\n\ts := *r\n\tif len(s) == 0 {\n\t\treturn 0, 0, os.EOF\n\t}\n\tc := s[0]\n\tif c < utf8.RuneSelf {\n\t\t*r = s[1:]\n\t\treturn int(c), 1, nil\n\t}\n\trune, size = utf8.DecodeRuneInString(string(s))\n\t*r = s[size:]\n\treturn\n}\n\n\/\/ NewReader returns a new Reader reading from s.\n\/\/ It is similar to bytes.NewBufferString but more efficient and read-only.\nfunc NewReader(s string) *Reader { return (*Reader)(&s) }\n<commit_msg>strings: make Reader.Read use copy instead of an explicit loop.<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage strings\n\nimport (\n\t\"os\"\n\t\"utf8\"\n)\n\n\/\/ A Reader satisfies calls to Read, ReadByte, and ReadRune by\n\/\/ reading from a string.\ntype Reader string\n\nfunc (r *Reader) Read(b []byte) (n int, err os.Error) {\n\ts := *r\n\tif len(s) == 0 {\n\t\treturn 0, os.EOF\n\t}\n\tn = copy(b, s)\n\t*r = s[n:]\n\treturn\n}\n\nfunc (r *Reader) ReadByte() (b byte, err os.Error) {\n\ts := *r\n\tif len(s) == 0 {\n\t\treturn 0, os.EOF\n\t}\n\tb = s[0]\n\t*r = s[1:]\n\treturn\n}\n\n\/\/ ReadRune reads and returns the next UTF-8-encoded\n\/\/ Unicode code point from the buffer.\n\/\/ If no bytes are available, the error returned is os.EOF.\n\/\/ If the bytes are an erroneous UTF-8 encoding, it\n\/\/ consumes one byte and returns U+FFFD, 1.\nfunc (r *Reader) ReadRune() (rune int, size int, err os.Error) {\n\ts := *r\n\tif len(s) == 0 {\n\t\treturn 0, 0, os.EOF\n\t}\n\tc := s[0]\n\tif c < utf8.RuneSelf {\n\t\t*r = s[1:]\n\t\treturn int(c), 1, nil\n\t}\n\trune, size = utf8.DecodeRuneInString(string(s))\n\t*r = s[size:]\n\treturn\n}\n\n\/\/ NewReader returns a new Reader reading from s.\n\/\/ It is similar to bytes.NewBufferString but more efficient and read-only.\nfunc NewReader(s string) *Reader { return (*Reader)(&s) }\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package unicode provides data and functions to test some properties of\n\/\/ Unicode code points.\npackage unicode\n\nconst (\n\tMaxRune = '\\U0010FFFF' \/\/ Maximum valid Unicode code point.\n\tReplacementChar = '\\uFFFD' \/\/ Represents invalid code points.\n\tMaxASCII = '\\u007F' \/\/ maximum ASCII value.\n\tMaxLatin1 = '\\u00FF' \/\/ maximum Latin-1 value.\n)\n\n\/\/ RangeTable defines a set of Unicode code points by listing the ranges of\n\/\/ code points within the set. The ranges are listed in two slices\n\/\/ to save space: a slice of 16-bit ranges and a slice of 32-bit ranges.\n\/\/ The two slices must be in sorted order and non-overlapping.\n\/\/ Also, R32 should contain only values >= 0x10000 (1<<16).\ntype RangeTable struct {\n\tR16 []Range16\n\tR32 []Range32\n\tLatinOffset int \/\/ number of entries in R16 with Hi <= MaxLatin1\n}\n\n\/\/ Range16 represents of a range of 16-bit Unicode code points. The range runs from Lo to Hi\n\/\/ inclusive and has the specified stride.\ntype Range16 struct {\n\tLo uint16\n\tHi uint16\n\tStride uint16\n}\n\n\/\/ Range32 represents of a range of Unicode code points and is used when one or\n\/\/ more of the values will not fit in 16 bits. The range runs from Lo to Hi\n\/\/ inclusive and has the specified stride. Lo and Hi must always be >= 1<<16.\ntype Range32 struct {\n\tLo uint32\n\tHi uint32\n\tStride uint32\n}\n\n\/\/ CaseRange represents a range of Unicode code points for simple (one\n\/\/ code point to one code point) case conversion.\n\/\/ The range runs from Lo to Hi inclusive, with a fixed stride of 1. Deltas\n\/\/ are the number to add to the code point to reach the code point for a\n\/\/ different case for that character. They may be negative. If zero, it\n\/\/ means the character is in the corresponding case. There is a special\n\/\/ case representing sequences of alternating corresponding Upper and Lower\n\/\/ pairs. It appears with a fixed Delta of\n\/\/\t{UpperLower, UpperLower, UpperLower}\n\/\/ The constant UpperLower has an otherwise impossible delta value.\ntype CaseRange struct {\n\tLo uint32\n\tHi uint32\n\tDelta d\n}\n\n\/\/ SpecialCase represents language-specific case mappings such as Turkish.\n\/\/ Methods of SpecialCase customize (by overriding) the standard mappings.\ntype SpecialCase []CaseRange\n\n\/\/ BUG(r): There is no mechanism for full case folding, that is, for\n\/\/ characters that involve multiple runes in the input or output.\n\n\/\/ Indices into the Delta arrays inside CaseRanges for case mapping.\nconst (\n\tUpperCase = iota\n\tLowerCase\n\tTitleCase\n\tMaxCase\n)\n\ntype d [MaxCase]rune \/\/ to make the CaseRanges text shorter\n\n\/\/ If the Delta field of a CaseRange is UpperLower, it means\n\/\/ this CaseRange represents a sequence of the form (say)\n\/\/ Upper Lower Upper Lower.\nconst (\n\tUpperLower = MaxRune + 1 \/\/ (Cannot be a valid delta.)\n)\n\n\/\/ linearMax is the maximum size table for linear search for non-Latin1 rune.\n\/\/ Derived by running 'go test -calibrate'.\nconst linearMax = 18\n\n\/\/ is16 reports whether r is in the sorted slice of 16-bit ranges.\nfunc is16(ranges []Range16, r uint16) bool {\n\tif len(ranges) <= linearMax || r <= MaxLatin1 {\n\t\tfor i := range ranges {\n\t\t\trange_ := &ranges[i]\n\t\t\tif r < range_.Lo {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif r <= range_.Hi {\n\t\t\t\treturn (r-range_.Lo)%range_.Stride == 0\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\t\/\/ binary search over ranges\n\tlo := 0\n\thi := len(ranges)\n\tfor lo < hi {\n\t\tm := lo + (hi-lo)\/2\n\t\trange_ := &ranges[m]\n\t\tif range_.Lo <= r && r <= range_.Hi {\n\t\t\treturn (r-range_.Lo)%range_.Stride == 0\n\t\t}\n\t\tif r < range_.Lo {\n\t\t\thi = m\n\t\t} else {\n\t\t\tlo = m + 1\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ is32 reports whether r is in the sorted slice of 32-bit ranges.\nfunc is32(ranges []Range32, r uint32) bool {\n\tif len(ranges) <= linearMax {\n\t\tfor i := range ranges {\n\t\t\trange_ := &ranges[i]\n\t\t\tif r < range_.Lo {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif r <= range_.Hi {\n\t\t\t\treturn (r-range_.Lo)%range_.Stride == 0\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\t\/\/ binary search over ranges\n\tlo := 0\n\thi := len(ranges)\n\tfor lo < hi {\n\t\tm := lo + (hi-lo)\/2\n\t\trange_ := ranges[m]\n\t\tif range_.Lo <= r && r <= range_.Hi {\n\t\t\treturn (r-range_.Lo)%range_.Stride == 0\n\t\t}\n\t\tif r < range_.Lo {\n\t\t\thi = m\n\t\t} else {\n\t\t\tlo = m + 1\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Is reports whether the rune is in the specified table of ranges.\nfunc Is(rangeTab *RangeTable, r rune) bool {\n\tr16 := rangeTab.R16\n\tif len(r16) > 0 && r <= rune(r16[len(r16)-1].Hi) {\n\t\treturn is16(r16, uint16(r))\n\t}\n\tr32 := rangeTab.R32\n\tif len(r32) > 0 && r >= rune(r32[0].Lo) {\n\t\treturn is32(r32, uint32(r))\n\t}\n\treturn false\n}\n\nfunc isExcludingLatin(rangeTab *RangeTable, r rune) bool {\n\tr16 := rangeTab.R16\n\tif off := rangeTab.LatinOffset; len(r16) > off && r <= rune(r16[len(r16)-1].Hi) {\n\t\treturn is16(r16[off:], uint16(r))\n\t}\n\tr32 := rangeTab.R32\n\tif len(r32) > 0 && r >= rune(r32[0].Lo) {\n\t\treturn is32(r32, uint32(r))\n\t}\n\treturn false\n}\n\n\/\/ IsUpper reports whether the rune is an upper case letter.\nfunc IsUpper(r rune) bool {\n\t\/\/ See comment in IsGraphic.\n\tif uint32(r) <= MaxLatin1 {\n\t\treturn properties[uint8(r)]&pLmask == pLu\n\t}\n\treturn isExcludingLatin(Upper, r)\n}\n\n\/\/ IsLower reports whether the rune is a lower case letter.\nfunc IsLower(r rune) bool {\n\t\/\/ See comment in IsGraphic.\n\tif uint32(r) <= MaxLatin1 {\n\t\treturn properties[uint8(r)]&pLmask == pLl\n\t}\n\treturn isExcludingLatin(Lower, r)\n}\n\n\/\/ IsTitle reports whether the rune is a title case letter.\nfunc IsTitle(r rune) bool {\n\tif r <= MaxLatin1 {\n\t\treturn false\n\t}\n\treturn isExcludingLatin(Title, r)\n}\n\n\/\/ to maps the rune using the specified case mapping.\nfunc to(_case int, r rune, caseRange []CaseRange) rune {\n\tif _case < 0 || MaxCase <= _case {\n\t\treturn ReplacementChar \/\/ as reasonable an error as any\n\t}\n\t\/\/ binary search over ranges\n\tlo := 0\n\thi := len(caseRange)\n\tfor lo < hi {\n\t\tm := lo + (hi-lo)\/2\n\t\tcr := caseRange[m]\n\t\tif rune(cr.Lo) <= r && r <= rune(cr.Hi) {\n\t\t\tdelta := rune(cr.Delta[_case])\n\t\t\tif delta > MaxRune {\n\t\t\t\t\/\/ In an Upper-Lower sequence, which always starts with\n\t\t\t\t\/\/ an UpperCase letter, the real deltas always look like:\n\t\t\t\t\/\/\t{0, 1, 0} UpperCase (Lower is next)\n\t\t\t\t\/\/\t{-1, 0, -1} LowerCase (Upper, Title are previous)\n\t\t\t\t\/\/ The characters at even offsets from the beginning of the\n\t\t\t\t\/\/ sequence are upper case; the ones at odd offsets are lower.\n\t\t\t\t\/\/ The correct mapping can be done by clearing or setting the low\n\t\t\t\t\/\/ bit in the sequence offset.\n\t\t\t\t\/\/ The constants UpperCase and TitleCase are even while LowerCase\n\t\t\t\t\/\/ is odd so we take the low bit from _case.\n\t\t\t\treturn rune(cr.Lo) + ((r-rune(cr.Lo))&^1 | rune(_case&1))\n\t\t\t}\n\t\t\treturn r + delta\n\t\t}\n\t\tif r < rune(cr.Lo) {\n\t\t\thi = m\n\t\t} else {\n\t\t\tlo = m + 1\n\t\t}\n\t}\n\treturn r\n}\n\n\/\/ To maps the rune to the specified case: UpperCase, LowerCase, or TitleCase.\nfunc To(_case int, r rune) rune {\n\treturn to(_case, r, CaseRanges)\n}\n\n\/\/ ToUpper maps the rune to upper case.\nfunc ToUpper(r rune) rune {\n\tif r <= MaxASCII {\n\t\tif 'a' <= r && r <= 'z' {\n\t\t\tr -= 'a' - 'A'\n\t\t}\n\t\treturn r\n\t}\n\treturn To(UpperCase, r)\n}\n\n\/\/ ToLower maps the rune to lower case.\nfunc ToLower(r rune) rune {\n\tif r <= MaxASCII {\n\t\tif 'A' <= r && r <= 'Z' {\n\t\t\tr += 'a' - 'A'\n\t\t}\n\t\treturn r\n\t}\n\treturn To(LowerCase, r)\n}\n\n\/\/ ToTitle maps the rune to title case.\nfunc ToTitle(r rune) rune {\n\tif r <= MaxASCII {\n\t\tif 'a' <= r && r <= 'z' { \/\/ title case is upper case for ASCII\n\t\t\tr -= 'a' - 'A'\n\t\t}\n\t\treturn r\n\t}\n\treturn To(TitleCase, r)\n}\n\n\/\/ ToUpper maps the rune to upper case giving priority to the special mapping.\nfunc (special SpecialCase) ToUpper(r rune) rune {\n\tr1 := to(UpperCase, r, []CaseRange(special))\n\tif r1 == r {\n\t\tr1 = ToUpper(r)\n\t}\n\treturn r1\n}\n\n\/\/ ToTitle maps the rune to title case giving priority to the special mapping.\nfunc (special SpecialCase) ToTitle(r rune) rune {\n\tr1 := to(TitleCase, r, []CaseRange(special))\n\tif r1 == r {\n\t\tr1 = ToTitle(r)\n\t}\n\treturn r1\n}\n\n\/\/ ToLower maps the rune to lower case giving priority to the special mapping.\nfunc (special SpecialCase) ToLower(r rune) rune {\n\tr1 := to(LowerCase, r, []CaseRange(special))\n\tif r1 == r {\n\t\tr1 = ToLower(r)\n\t}\n\treturn r1\n}\n\n\/\/ caseOrbit is defined in tables.go as []foldPair. Right now all the\n\/\/ entries fit in uint16, so use uint16. If that changes, compilation\n\/\/ will fail (the constants in the composite literal will not fit in uint16)\n\/\/ and the types here can change to uint32.\ntype foldPair struct {\n\tFrom uint16\n\tTo uint16\n}\n\n\/\/ SimpleFold iterates over Unicode code points equivalent under\n\/\/ the Unicode-defined simple case folding. Among the code points\n\/\/ equivalent to rune (including rune itself), SimpleFold returns the\n\/\/ smallest rune >= r if one exists, or else the smallest rune >= 0.\n\/\/\n\/\/ For example:\n\/\/\tSimpleFold('A') = 'a'\n\/\/\tSimpleFold('a') = 'A'\n\/\/\n\/\/\tSimpleFold('K') = 'k'\n\/\/\tSimpleFold('k') = '\\u212A' (Kelvin symbol, K)\n\/\/\tSimpleFold('\\u212A') = 'K'\n\/\/\n\/\/\tSimpleFold('1') = '1'\n\/\/\nfunc SimpleFold(r rune) rune {\n\t\/\/ Consult caseOrbit table for special cases.\n\tlo := 0\n\thi := len(caseOrbit)\n\tfor lo < hi {\n\t\tm := lo + (hi-lo)\/2\n\t\tif rune(caseOrbit[m].From) < r {\n\t\t\tlo = m + 1\n\t\t} else {\n\t\t\thi = m\n\t\t}\n\t}\n\tif lo < len(caseOrbit) && rune(caseOrbit[lo].From) == r {\n\t\treturn rune(caseOrbit[lo].To)\n\t}\n\n\t\/\/ No folding specified. This is a one- or two-element\n\t\/\/ equivalence class containing rune and ToLower(rune)\n\t\/\/ and ToUpper(rune) if they are different from rune.\n\tif l := ToLower(r); l != r {\n\t\treturn l\n\t}\n\treturn ToUpper(r)\n}\n<commit_msg>unicode: fix doc typo<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package unicode provides data and functions to test some properties of\n\/\/ Unicode code points.\npackage unicode\n\nconst (\n\tMaxRune = '\\U0010FFFF' \/\/ Maximum valid Unicode code point.\n\tReplacementChar = '\\uFFFD' \/\/ Represents invalid code points.\n\tMaxASCII = '\\u007F' \/\/ maximum ASCII value.\n\tMaxLatin1 = '\\u00FF' \/\/ maximum Latin-1 value.\n)\n\n\/\/ RangeTable defines a set of Unicode code points by listing the ranges of\n\/\/ code points within the set. The ranges are listed in two slices\n\/\/ to save space: a slice of 16-bit ranges and a slice of 32-bit ranges.\n\/\/ The two slices must be in sorted order and non-overlapping.\n\/\/ Also, R32 should contain only values >= 0x10000 (1<<16).\ntype RangeTable struct {\n\tR16 []Range16\n\tR32 []Range32\n\tLatinOffset int \/\/ number of entries in R16 with Hi <= MaxLatin1\n}\n\n\/\/ Range16 represents of a range of 16-bit Unicode code points. The range runs from Lo to Hi\n\/\/ inclusive and has the specified stride.\ntype Range16 struct {\n\tLo uint16\n\tHi uint16\n\tStride uint16\n}\n\n\/\/ Range32 represents of a range of Unicode code points and is used when one or\n\/\/ more of the values will not fit in 16 bits. The range runs from Lo to Hi\n\/\/ inclusive and has the specified stride. Lo and Hi must always be >= 1<<16.\ntype Range32 struct {\n\tLo uint32\n\tHi uint32\n\tStride uint32\n}\n\n\/\/ CaseRange represents a range of Unicode code points for simple (one\n\/\/ code point to one code point) case conversion.\n\/\/ The range runs from Lo to Hi inclusive, with a fixed stride of 1. Deltas\n\/\/ are the number to add to the code point to reach the code point for a\n\/\/ different case for that character. They may be negative. If zero, it\n\/\/ means the character is in the corresponding case. There is a special\n\/\/ case representing sequences of alternating corresponding Upper and Lower\n\/\/ pairs. It appears with a fixed Delta of\n\/\/\t{UpperLower, UpperLower, UpperLower}\n\/\/ The constant UpperLower has an otherwise impossible delta value.\ntype CaseRange struct {\n\tLo uint32\n\tHi uint32\n\tDelta d\n}\n\n\/\/ SpecialCase represents language-specific case mappings such as Turkish.\n\/\/ Methods of SpecialCase customize (by overriding) the standard mappings.\ntype SpecialCase []CaseRange\n\n\/\/ BUG(r): There is no mechanism for full case folding, that is, for\n\/\/ characters that involve multiple runes in the input or output.\n\n\/\/ Indices into the Delta arrays inside CaseRanges for case mapping.\nconst (\n\tUpperCase = iota\n\tLowerCase\n\tTitleCase\n\tMaxCase\n)\n\ntype d [MaxCase]rune \/\/ to make the CaseRanges text shorter\n\n\/\/ If the Delta field of a CaseRange is UpperLower, it means\n\/\/ this CaseRange represents a sequence of the form (say)\n\/\/ Upper Lower Upper Lower.\nconst (\n\tUpperLower = MaxRune + 1 \/\/ (Cannot be a valid delta.)\n)\n\n\/\/ linearMax is the maximum size table for linear search for non-Latin1 rune.\n\/\/ Derived by running 'go test -calibrate'.\nconst linearMax = 18\n\n\/\/ is16 reports whether r is in the sorted slice of 16-bit ranges.\nfunc is16(ranges []Range16, r uint16) bool {\n\tif len(ranges) <= linearMax || r <= MaxLatin1 {\n\t\tfor i := range ranges {\n\t\t\trange_ := &ranges[i]\n\t\t\tif r < range_.Lo {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif r <= range_.Hi {\n\t\t\t\treturn (r-range_.Lo)%range_.Stride == 0\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\t\/\/ binary search over ranges\n\tlo := 0\n\thi := len(ranges)\n\tfor lo < hi {\n\t\tm := lo + (hi-lo)\/2\n\t\trange_ := &ranges[m]\n\t\tif range_.Lo <= r && r <= range_.Hi {\n\t\t\treturn (r-range_.Lo)%range_.Stride == 0\n\t\t}\n\t\tif r < range_.Lo {\n\t\t\thi = m\n\t\t} else {\n\t\t\tlo = m + 1\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ is32 reports whether r is in the sorted slice of 32-bit ranges.\nfunc is32(ranges []Range32, r uint32) bool {\n\tif len(ranges) <= linearMax {\n\t\tfor i := range ranges {\n\t\t\trange_ := &ranges[i]\n\t\t\tif r < range_.Lo {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif r <= range_.Hi {\n\t\t\t\treturn (r-range_.Lo)%range_.Stride == 0\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\t\/\/ binary search over ranges\n\tlo := 0\n\thi := len(ranges)\n\tfor lo < hi {\n\t\tm := lo + (hi-lo)\/2\n\t\trange_ := ranges[m]\n\t\tif range_.Lo <= r && r <= range_.Hi {\n\t\t\treturn (r-range_.Lo)%range_.Stride == 0\n\t\t}\n\t\tif r < range_.Lo {\n\t\t\thi = m\n\t\t} else {\n\t\t\tlo = m + 1\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Is reports whether the rune is in the specified table of ranges.\nfunc Is(rangeTab *RangeTable, r rune) bool {\n\tr16 := rangeTab.R16\n\tif len(r16) > 0 && r <= rune(r16[len(r16)-1].Hi) {\n\t\treturn is16(r16, uint16(r))\n\t}\n\tr32 := rangeTab.R32\n\tif len(r32) > 0 && r >= rune(r32[0].Lo) {\n\t\treturn is32(r32, uint32(r))\n\t}\n\treturn false\n}\n\nfunc isExcludingLatin(rangeTab *RangeTable, r rune) bool {\n\tr16 := rangeTab.R16\n\tif off := rangeTab.LatinOffset; len(r16) > off && r <= rune(r16[len(r16)-1].Hi) {\n\t\treturn is16(r16[off:], uint16(r))\n\t}\n\tr32 := rangeTab.R32\n\tif len(r32) > 0 && r >= rune(r32[0].Lo) {\n\t\treturn is32(r32, uint32(r))\n\t}\n\treturn false\n}\n\n\/\/ IsUpper reports whether the rune is an upper case letter.\nfunc IsUpper(r rune) bool {\n\t\/\/ See comment in IsGraphic.\n\tif uint32(r) <= MaxLatin1 {\n\t\treturn properties[uint8(r)]&pLmask == pLu\n\t}\n\treturn isExcludingLatin(Upper, r)\n}\n\n\/\/ IsLower reports whether the rune is a lower case letter.\nfunc IsLower(r rune) bool {\n\t\/\/ See comment in IsGraphic.\n\tif uint32(r) <= MaxLatin1 {\n\t\treturn properties[uint8(r)]&pLmask == pLl\n\t}\n\treturn isExcludingLatin(Lower, r)\n}\n\n\/\/ IsTitle reports whether the rune is a title case letter.\nfunc IsTitle(r rune) bool {\n\tif r <= MaxLatin1 {\n\t\treturn false\n\t}\n\treturn isExcludingLatin(Title, r)\n}\n\n\/\/ to maps the rune using the specified case mapping.\nfunc to(_case int, r rune, caseRange []CaseRange) rune {\n\tif _case < 0 || MaxCase <= _case {\n\t\treturn ReplacementChar \/\/ as reasonable an error as any\n\t}\n\t\/\/ binary search over ranges\n\tlo := 0\n\thi := len(caseRange)\n\tfor lo < hi {\n\t\tm := lo + (hi-lo)\/2\n\t\tcr := caseRange[m]\n\t\tif rune(cr.Lo) <= r && r <= rune(cr.Hi) {\n\t\t\tdelta := rune(cr.Delta[_case])\n\t\t\tif delta > MaxRune {\n\t\t\t\t\/\/ In an Upper-Lower sequence, which always starts with\n\t\t\t\t\/\/ an UpperCase letter, the real deltas always look like:\n\t\t\t\t\/\/\t{0, 1, 0} UpperCase (Lower is next)\n\t\t\t\t\/\/\t{-1, 0, -1} LowerCase (Upper, Title are previous)\n\t\t\t\t\/\/ The characters at even offsets from the beginning of the\n\t\t\t\t\/\/ sequence are upper case; the ones at odd offsets are lower.\n\t\t\t\t\/\/ The correct mapping can be done by clearing or setting the low\n\t\t\t\t\/\/ bit in the sequence offset.\n\t\t\t\t\/\/ The constants UpperCase and TitleCase are even while LowerCase\n\t\t\t\t\/\/ is odd so we take the low bit from _case.\n\t\t\t\treturn rune(cr.Lo) + ((r-rune(cr.Lo))&^1 | rune(_case&1))\n\t\t\t}\n\t\t\treturn r + delta\n\t\t}\n\t\tif r < rune(cr.Lo) {\n\t\t\thi = m\n\t\t} else {\n\t\t\tlo = m + 1\n\t\t}\n\t}\n\treturn r\n}\n\n\/\/ To maps the rune to the specified case: UpperCase, LowerCase, or TitleCase.\nfunc To(_case int, r rune) rune {\n\treturn to(_case, r, CaseRanges)\n}\n\n\/\/ ToUpper maps the rune to upper case.\nfunc ToUpper(r rune) rune {\n\tif r <= MaxASCII {\n\t\tif 'a' <= r && r <= 'z' {\n\t\t\tr -= 'a' - 'A'\n\t\t}\n\t\treturn r\n\t}\n\treturn To(UpperCase, r)\n}\n\n\/\/ ToLower maps the rune to lower case.\nfunc ToLower(r rune) rune {\n\tif r <= MaxASCII {\n\t\tif 'A' <= r && r <= 'Z' {\n\t\t\tr += 'a' - 'A'\n\t\t}\n\t\treturn r\n\t}\n\treturn To(LowerCase, r)\n}\n\n\/\/ ToTitle maps the rune to title case.\nfunc ToTitle(r rune) rune {\n\tif r <= MaxASCII {\n\t\tif 'a' <= r && r <= 'z' { \/\/ title case is upper case for ASCII\n\t\t\tr -= 'a' - 'A'\n\t\t}\n\t\treturn r\n\t}\n\treturn To(TitleCase, r)\n}\n\n\/\/ ToUpper maps the rune to upper case giving priority to the special mapping.\nfunc (special SpecialCase) ToUpper(r rune) rune {\n\tr1 := to(UpperCase, r, []CaseRange(special))\n\tif r1 == r {\n\t\tr1 = ToUpper(r)\n\t}\n\treturn r1\n}\n\n\/\/ ToTitle maps the rune to title case giving priority to the special mapping.\nfunc (special SpecialCase) ToTitle(r rune) rune {\n\tr1 := to(TitleCase, r, []CaseRange(special))\n\tif r1 == r {\n\t\tr1 = ToTitle(r)\n\t}\n\treturn r1\n}\n\n\/\/ ToLower maps the rune to lower case giving priority to the special mapping.\nfunc (special SpecialCase) ToLower(r rune) rune {\n\tr1 := to(LowerCase, r, []CaseRange(special))\n\tif r1 == r {\n\t\tr1 = ToLower(r)\n\t}\n\treturn r1\n}\n\n\/\/ caseOrbit is defined in tables.go as []foldPair. Right now all the\n\/\/ entries fit in uint16, so use uint16. If that changes, compilation\n\/\/ will fail (the constants in the composite literal will not fit in uint16)\n\/\/ and the types here can change to uint32.\ntype foldPair struct {\n\tFrom uint16\n\tTo uint16\n}\n\n\/\/ SimpleFold iterates over Unicode code points equivalent under\n\/\/ the Unicode-defined simple case folding. Among the code points\n\/\/ equivalent to rune (including rune itself), SimpleFold returns the\n\/\/ smallest rune > r if one exists, or else the smallest rune >= 0.\n\/\/\n\/\/ For example:\n\/\/\tSimpleFold('A') = 'a'\n\/\/\tSimpleFold('a') = 'A'\n\/\/\n\/\/\tSimpleFold('K') = 'k'\n\/\/\tSimpleFold('k') = '\\u212A' (Kelvin symbol, K)\n\/\/\tSimpleFold('\\u212A') = 'K'\n\/\/\n\/\/\tSimpleFold('1') = '1'\n\/\/\nfunc SimpleFold(r rune) rune {\n\t\/\/ Consult caseOrbit table for special cases.\n\tlo := 0\n\thi := len(caseOrbit)\n\tfor lo < hi {\n\t\tm := lo + (hi-lo)\/2\n\t\tif rune(caseOrbit[m].From) < r {\n\t\t\tlo = m + 1\n\t\t} else {\n\t\t\thi = m\n\t\t}\n\t}\n\tif lo < len(caseOrbit) && rune(caseOrbit[lo].From) == r {\n\t\treturn rune(caseOrbit[lo].To)\n\t}\n\n\t\/\/ No folding specified. This is a one- or two-element\n\t\/\/ equivalence class containing rune and ToLower(rune)\n\t\/\/ and ToUpper(rune) if they are different from rune.\n\tif l := ToLower(r); l != r {\n\t\treturn l\n\t}\n\treturn ToUpper(r)\n}\n<|endoftext|>"} {"text":"<commit_before>package compose\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ecs\"\n\t\"github.com\/docker\/libcompose\/config\"\n\t\"github.com\/docker\/libcompose\/docker\"\n\t\"github.com\/docker\/libcompose\/docker\/ctx\"\n\t\"github.com\/docker\/libcompose\/project\"\n)\n\ntype Transformer struct {\n\tComposeFiles []string\n\tProjectName string\n\tServices []string\n\tEnvironmentLookup config.EnvironmentLookup\n}\n\nfunc (t *Transformer) Transform() (*ecs.RegisterTaskDefinitionInput, error) {\n\ttask := ecs.RegisterTaskDefinitionInput{\n\t\tFamily: aws.String(t.ProjectName),\n\t\tContainerDefinitions: []*ecs.ContainerDefinition{},\n\t\tVolumes: []*ecs.Volume{},\n\t}\n\n\tprojectCtx := project.Context{\n\t\tComposeFiles: t.ComposeFiles,\n\t\tProjectName: t.ProjectName,\n\t}\n\n\tif t.EnvironmentLookup != nil {\n\t\tprojectCtx.EnvironmentLookup = t.EnvironmentLookup\n\t}\n\n\tp, err := docker.NewProject(&ctx.Context{Context: projectCtx}, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, name := range p.(*project.Project).ServiceConfigs.Keys() {\n\t\tif !isServiceIncluded(name, t.Services) {\n\t\t\tlog.Printf(\"Skipping service %s\", name)\n\t\t\tcontinue\n\t\t}\n\n\t\tconfig, _ := p.GetServiceConfig(name)\n\t\tvar def = ecs.ContainerDefinition{\n\t\t\tName: aws.String(name),\n\t\t}\n\n\t\tif config.Image != \"\" {\n\t\t\tdef.Image = aws.String(config.Image)\n\t\t}\n\n\t\tif config.Hostname != \"\" {\n\t\t\tdef.Hostname = aws.String(config.Hostname)\n\t\t}\n\n\t\tif config.WorkingDir != \"\" {\n\t\t\tdef.WorkingDirectory = aws.String(config.WorkingDir)\n\t\t}\n\n\t\tif config.CPUShares > 0 {\n\t\t\tdef.Cpu = aws.Int64(int64(config.CPUShares))\n\t\t}\n\n\t\t\/\/ docker-compose expresses it's limit in bytes, ECS in mb\n\t\tif config.MemLimit > 0 {\n\t\t\tdef.Memory = aws.Int64(int64(config.MemLimit \/ 1024 \/ 1024))\n\t\t}\n\n\t\tif config.Privileged {\n\t\t\tdef.Privileged = aws.Bool(config.Privileged)\n\t\t}\n\n\t\tif slice := []string(config.DNS); len(slice) > 0 {\n\t\t\tfor _, dns := range slice {\n\t\t\t\tdef.DnsServers = append(def.DnsServers, aws.String(dns))\n\t\t\t}\n\t\t}\n\n\t\tif slice := []string(config.DNSSearch); len(slice) > 0 {\n\t\t\tfor _, item := range slice {\n\t\t\t\tdef.DnsSearchDomains = append(def.DnsSearchDomains, aws.String(item))\n\t\t\t}\n\t\t}\n\n\t\tif cmds := []string(config.Command); len(cmds) > 0 {\n\t\t\tdef.Command = []*string{}\n\t\t\tfor _, command := range cmds {\n\t\t\t\tdef.Command = append(def.Command, aws.String(command))\n\t\t\t}\n\t\t}\n\n\t\tif cmds := []string(config.Entrypoint); len(cmds) > 0 {\n\t\t\tdef.EntryPoint = []*string{}\n\t\t\tfor _, command := range cmds {\n\t\t\t\tdef.EntryPoint = append(def.EntryPoint, aws.String(command))\n\t\t\t}\n\t\t}\n\n\t\tif slice := []string(config.Environment); len(slice) > 0 {\n\t\t\tdef.Environment = []*ecs.KeyValuePair{}\n\t\t\tfor _, val := range slice {\n\t\t\t\tparts := strings.SplitN(val, \"=\", 2)\n\t\t\t\tdef.Environment = append(def.Environment, &ecs.KeyValuePair{\n\t\t\t\t\tName: aws.String(parts[0]),\n\t\t\t\t\tValue: aws.String(parts[1]),\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tif ports := config.Ports; len(ports) > 0 {\n\t\t\tdef.PortMappings = []*ecs.PortMapping{}\n\t\t\tfor _, val := range ports {\n\t\t\t\tparts := strings.Split(val, \":\")\n\t\t\t\tmapping := &ecs.PortMapping{}\n\n\t\t\t\t\/\/ TODO: support host to map to\n\t\t\t\tif len(parts) > 0 {\n\t\t\t\t\tportInt, err := strconv.ParseInt(parts[0], 10, 64)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\tmapping.ContainerPort = aws.Int64(portInt)\n\t\t\t\t}\n\n\t\t\t\tif len(parts) > 1 {\n\t\t\t\t\thostParts := strings.Split(parts[1], \"\/\")\n\t\t\t\t\tportInt, err := strconv.ParseInt(hostParts[0], 10, 64)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\tmapping.HostPort = aws.Int64(portInt)\n\n\t\t\t\t\t\/\/ handle the protocol at the end of the mapping\n\t\t\t\t\tif len(hostParts) > 1 {\n\t\t\t\t\t\tmapping.Protocol = aws.String(hostParts[1])\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif len(parts) == 0 || len(parts) > 2 {\n\t\t\t\t\treturn nil, errors.New(\"Unsupported port mapping \" + val)\n\t\t\t\t}\n\n\t\t\t\tdef.PortMappings = append(def.PortMappings, mapping)\n\t\t\t}\n\t\t}\n\n\t\tif links := []string(config.Links); len(links) > 0 {\n\t\t\tdef.Links = []*string{}\n\t\t\tfor _, link := range links {\n\t\t\t\tdef.Links = append(def.Links, aws.String(link))\n\t\t\t}\n\t\t}\n\n\t\tif dependsOn := []string(config.DependsOn); len(dependsOn) > 0 {\n\t\t\tdef.Links = []*string{}\n\t\t\tfor _, link := range dependsOn {\n\t\t\t\tdef.Links = append(def.Links, aws.String(link))\n\t\t\t}\n\t\t}\n\n\t\tif config.Volumes != nil {\n\t\t\tdef.MountPoints = []*ecs.MountPoint{}\n\t\t\tfor idx, vol := range config.Volumes.Volumes {\n\t\t\t\tvolumeName := fmt.Sprintf(\"%s-vol%d\", name, idx)\n\n\t\t\t\tvolume := ecs.Volume{\n\t\t\t\t\tHost: &ecs.HostVolumeProperties{\n\t\t\t\t\t\tSourcePath: aws.String(vol.Source),\n\t\t\t\t\t},\n\t\t\t\t\tName: aws.String(volumeName),\n\t\t\t\t}\n\n\t\t\t\tmount := ecs.MountPoint{\n\t\t\t\t\tSourceVolume: aws.String(volumeName),\n\t\t\t\t\tContainerPath: aws.String(vol.Destination),\n\t\t\t\t}\n\n\t\t\t\tif vol.AccessMode == \"ro\" {\n\t\t\t\t\tmount.ReadOnly = aws.Bool(true)\n\t\t\t\t}\n\n\t\t\t\ttask.Volumes = append(task.Volumes, &volume)\n\t\t\t\tdef.MountPoints = append(def.MountPoints, &mount)\n\t\t\t}\n\t\t}\n\n\t\tif volsFrom := config.VolumesFrom; len(volsFrom) > 0 {\n\t\t\tdef.VolumesFrom = []*ecs.VolumeFrom{}\n\t\t\tfor _, container := range volsFrom {\n\t\t\t\tdef.VolumesFrom = append(def.VolumesFrom, &ecs.VolumeFrom{\n\t\t\t\t\tSourceContainer: aws.String(container),\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tif config.Logging.Driver != \"\" {\n\t\t\tdef.LogConfiguration = &ecs.LogConfiguration{\n\t\t\t\tLogDriver: aws.String(config.Logging.Driver),\n\t\t\t\tOptions: map[string]*string{},\n\t\t\t}\n\t\t\tfor k, v := range config.Logging.Options {\n\t\t\t\tdef.LogConfiguration.Options[k] = aws.String(v)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ TODO: the following directives are ignored \/ not handled\n\t\t\/\/ Networks\n\t\t\/\/ Expose\n\n\t\tfor _, i := range []struct {\n\t\t\tKey string\n\t\t\tValue []string\n\t\t}{\n\t\t\t{\"CapAdd\", config.CapAdd},\n\t\t\t{\"CapDrop\", config.CapDrop},\n\t\t\t{\"Devices\", config.Devices},\n\t\t\t{\"EnvFile\", config.EnvFile},\n\t\t\t{\"SecurityOpt\", config.SecurityOpt},\n\t\t\t{\"ExternalLinks\", config.ExternalLinks},\n\t\t\t{\"ExtraHosts\", config.ExtraHosts},\n\t\t\t{\"Extends\", config.Extends},\n\t\t\t{\"GroupAdd\", config.GroupAdd},\n\t\t\t{\"DNSOpts\", config.DNSOpts},\n\t\t\t{\"Tmpfs\", config.Tmpfs},\n\t\t} {\n\t\t\tif len(i.Value) > 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"%s directive not supported\", i.Key)\n\t\t\t}\n\t\t}\n\n\t\tfor _, i := range []struct {\n\t\t\tKey string\n\t\t\tValue string\n\t\t}{\n\t\t\t{\"Build.Context\", config.Build.Context},\n\t\t\t{\"Build.Dockerfile\", config.Build.Dockerfile},\n\t\t\t{\"DomainName\", config.DomainName},\n\t\t\t{\"VolumeDriver\", config.VolumeDriver},\n\t\t\t{\"CPUSet\", config.CPUSet},\n\t\t\t{\"NetworkMode\", config.NetworkMode},\n\t\t\t{\"Pid\", config.Pid},\n\t\t\t{\"Uts\", config.Uts},\n\t\t\t{\"Ipc\", config.Ipc},\n\t\t\t{\"Restart\", config.Restart},\n\t\t\t{\"User\", config.User},\n\t\t\t{\"StopSignal\", config.StopSignal},\n\t\t\t{\"MacAddress\", config.MacAddress},\n\t\t\t{\"Isolation\", config.Isolation},\n\t\t\t{\"NetworkMode\", config.NetworkMode},\n\t\t\t{\"CgroupParent\", config.CgroupParent},\n\t\t} {\n\t\t\tif i.Value != \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"%s directive not supported\", i.Key)\n\t\t\t}\n\t\t}\n\n\t\tfor _, i := range []struct {\n\t\t\tKey string\n\t\t\tValue int\n\t\t}{\n\t\t\t{\"CPUShares\", int(config.CPUShares)},\n\t\t\t{\"CPUQuota\", int(config.CPUQuota)},\n\t\t\t{\"MemSwapLimit\", int(config.MemSwapLimit)},\n\t\t\t{\"MemSwappiness\", int(config.MemSwappiness)},\n\t\t\t{\"OomScoreAdj\", int(config.OomScoreAdj)},\n\t\t\t{\"ShmSize\", int(config.ShmSize)},\n\t\t} {\n\t\t\tif i.Value != 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"%s directive not supported\", i.Key)\n\t\t\t}\n\t\t}\n\n\t\tfor _, i := range []struct {\n\t\t\tKey string\n\t\t\tValue bool\n\t\t}{\n\t\t\t{\"ReadOnly\", config.ReadOnly},\n\t\t\t{\"StdinOpen\", config.StdinOpen},\n\t\t\t{\"Tty\", config.Tty},\n\t\t} {\n\t\t\tif i.Value {\n\t\t\t\treturn nil, fmt.Errorf(\"%s directive not supported\", i.Key)\n\t\t\t}\n\t\t}\n\n\t\tif config.Labels != nil {\n\t\t\treturn nil, fmt.Errorf(\"Labels directive not supported\")\n\t\t}\n\n\t\tif len(config.Ulimits.Elements) > 0 {\n\t\t\treturn nil, fmt.Errorf(\"Ulimits directive not supported\")\n\t\t}\n\n\t\ttask.ContainerDefinitions = append(task.ContainerDefinitions, &def)\n\t}\n\n\treturn &task, nil\n}\n\nfunc isServiceIncluded(name string, included []string) bool {\n\tif len(included) == 0 {\n\t\treturn true\n\t}\n\tfor _, val := range included {\n\t\tif name == val {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\ntype envMap map[string][]string\n\nfunc (em envMap) Lookup(key string, config *config.ServiceConfig) []string {\n\tresults := []string{}\n\tfor _, val := range em[key] {\n\t\tresults = append(results, key+\"=\"+val)\n\t}\n\treturn results\n}\n<commit_msg>Map MemoryReservation into ECS Task<commit_after>package compose\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ecs\"\n\t\"github.com\/docker\/libcompose\/config\"\n\t\"github.com\/docker\/libcompose\/docker\"\n\t\"github.com\/docker\/libcompose\/docker\/ctx\"\n\t\"github.com\/docker\/libcompose\/project\"\n)\n\ntype Transformer struct {\n\tComposeFiles []string\n\tProjectName string\n\tServices []string\n\tEnvironmentLookup config.EnvironmentLookup\n}\n\nfunc (t *Transformer) Transform() (*ecs.RegisterTaskDefinitionInput, error) {\n\ttask := ecs.RegisterTaskDefinitionInput{\n\t\tFamily: aws.String(t.ProjectName),\n\t\tContainerDefinitions: []*ecs.ContainerDefinition{},\n\t\tVolumes: []*ecs.Volume{},\n\t}\n\n\tprojectCtx := project.Context{\n\t\tComposeFiles: t.ComposeFiles,\n\t\tProjectName: t.ProjectName,\n\t}\n\n\tif t.EnvironmentLookup != nil {\n\t\tprojectCtx.EnvironmentLookup = t.EnvironmentLookup\n\t}\n\n\tp, err := docker.NewProject(&ctx.Context{Context: projectCtx}, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, name := range p.(*project.Project).ServiceConfigs.Keys() {\n\t\tif !isServiceIncluded(name, t.Services) {\n\t\t\tlog.Printf(\"Skipping service %s\", name)\n\t\t\tcontinue\n\t\t}\n\n\t\tconfig, _ := p.GetServiceConfig(name)\n\t\tvar def = ecs.ContainerDefinition{\n\t\t\tName: aws.String(name),\n\t\t}\n\n\t\tif config.Image != \"\" {\n\t\t\tdef.Image = aws.String(config.Image)\n\t\t}\n\n\t\tif config.Hostname != \"\" {\n\t\t\tdef.Hostname = aws.String(config.Hostname)\n\t\t}\n\n\t\tif config.WorkingDir != \"\" {\n\t\t\tdef.WorkingDirectory = aws.String(config.WorkingDir)\n\t\t}\n\n\t\tif config.CPUShares > 0 {\n\t\t\tdef.Cpu = aws.Int64(int64(config.CPUShares))\n\t\t}\n\n\t\t\/\/ docker-compose expresses it's limit in bytes, ECS in mb\n\t\tif config.MemLimit > 0 {\n\t\t\tdef.Memory = aws.Int64(int64(config.MemLimit \/ 1024 \/ 1024))\n\t\t}\n\n\t\tif config.MemReservation > 0 {\n\t\t\tdef.MemoryReservation = aws.Int64(int64(config.MemReservation \/ 1024 \/ 1024))\n\t\t}\n\n\t\tif config.Privileged {\n\t\t\tdef.Privileged = aws.Bool(config.Privileged)\n\t\t}\n\n\t\tif slice := []string(config.DNS); len(slice) > 0 {\n\t\t\tfor _, dns := range slice {\n\t\t\t\tdef.DnsServers = append(def.DnsServers, aws.String(dns))\n\t\t\t}\n\t\t}\n\n\t\tif slice := []string(config.DNSSearch); len(slice) > 0 {\n\t\t\tfor _, item := range slice {\n\t\t\t\tdef.DnsSearchDomains = append(def.DnsSearchDomains, aws.String(item))\n\t\t\t}\n\t\t}\n\n\t\tif cmds := []string(config.Command); len(cmds) > 0 {\n\t\t\tdef.Command = []*string{}\n\t\t\tfor _, command := range cmds {\n\t\t\t\tdef.Command = append(def.Command, aws.String(command))\n\t\t\t}\n\t\t}\n\n\t\tif cmds := []string(config.Entrypoint); len(cmds) > 0 {\n\t\t\tdef.EntryPoint = []*string{}\n\t\t\tfor _, command := range cmds {\n\t\t\t\tdef.EntryPoint = append(def.EntryPoint, aws.String(command))\n\t\t\t}\n\t\t}\n\n\t\tif slice := []string(config.Environment); len(slice) > 0 {\n\t\t\tdef.Environment = []*ecs.KeyValuePair{}\n\t\t\tfor _, val := range slice {\n\t\t\t\tparts := strings.SplitN(val, \"=\", 2)\n\t\t\t\tdef.Environment = append(def.Environment, &ecs.KeyValuePair{\n\t\t\t\t\tName: aws.String(parts[0]),\n\t\t\t\t\tValue: aws.String(parts[1]),\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tif ports := config.Ports; len(ports) > 0 {\n\t\t\tdef.PortMappings = []*ecs.PortMapping{}\n\t\t\tfor _, val := range ports {\n\t\t\t\tparts := strings.Split(val, \":\")\n\t\t\t\tmapping := &ecs.PortMapping{}\n\n\t\t\t\t\/\/ TODO: support host to map to\n\t\t\t\tif len(parts) > 0 {\n\t\t\t\t\tportInt, err := strconv.ParseInt(parts[0], 10, 64)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\tmapping.ContainerPort = aws.Int64(portInt)\n\t\t\t\t}\n\n\t\t\t\tif len(parts) > 1 {\n\t\t\t\t\thostParts := strings.Split(parts[1], \"\/\")\n\t\t\t\t\tportInt, err := strconv.ParseInt(hostParts[0], 10, 64)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\tmapping.HostPort = aws.Int64(portInt)\n\n\t\t\t\t\t\/\/ handle the protocol at the end of the mapping\n\t\t\t\t\tif len(hostParts) > 1 {\n\t\t\t\t\t\tmapping.Protocol = aws.String(hostParts[1])\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif len(parts) == 0 || len(parts) > 2 {\n\t\t\t\t\treturn nil, errors.New(\"Unsupported port mapping \" + val)\n\t\t\t\t}\n\n\t\t\t\tdef.PortMappings = append(def.PortMappings, mapping)\n\t\t\t}\n\t\t}\n\n\t\tif links := []string(config.Links); len(links) > 0 {\n\t\t\tdef.Links = []*string{}\n\t\t\tfor _, link := range links {\n\t\t\t\tdef.Links = append(def.Links, aws.String(link))\n\t\t\t}\n\t\t}\n\n\t\tif dependsOn := []string(config.DependsOn); len(dependsOn) > 0 {\n\t\t\tdef.Links = []*string{}\n\t\t\tfor _, link := range dependsOn {\n\t\t\t\tdef.Links = append(def.Links, aws.String(link))\n\t\t\t}\n\t\t}\n\n\t\tif config.Volumes != nil {\n\t\t\tdef.MountPoints = []*ecs.MountPoint{}\n\t\t\tfor idx, vol := range config.Volumes.Volumes {\n\t\t\t\tvolumeName := fmt.Sprintf(\"%s-vol%d\", name, idx)\n\n\t\t\t\tvolume := ecs.Volume{\n\t\t\t\t\tHost: &ecs.HostVolumeProperties{\n\t\t\t\t\t\tSourcePath: aws.String(vol.Source),\n\t\t\t\t\t},\n\t\t\t\t\tName: aws.String(volumeName),\n\t\t\t\t}\n\n\t\t\t\tmount := ecs.MountPoint{\n\t\t\t\t\tSourceVolume: aws.String(volumeName),\n\t\t\t\t\tContainerPath: aws.String(vol.Destination),\n\t\t\t\t}\n\n\t\t\t\tif vol.AccessMode == \"ro\" {\n\t\t\t\t\tmount.ReadOnly = aws.Bool(true)\n\t\t\t\t}\n\n\t\t\t\ttask.Volumes = append(task.Volumes, &volume)\n\t\t\t\tdef.MountPoints = append(def.MountPoints, &mount)\n\t\t\t}\n\t\t}\n\n\t\tif volsFrom := config.VolumesFrom; len(volsFrom) > 0 {\n\t\t\tdef.VolumesFrom = []*ecs.VolumeFrom{}\n\t\t\tfor _, container := range volsFrom {\n\t\t\t\tdef.VolumesFrom = append(def.VolumesFrom, &ecs.VolumeFrom{\n\t\t\t\t\tSourceContainer: aws.String(container),\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tif config.Logging.Driver != \"\" {\n\t\t\tdef.LogConfiguration = &ecs.LogConfiguration{\n\t\t\t\tLogDriver: aws.String(config.Logging.Driver),\n\t\t\t\tOptions: map[string]*string{},\n\t\t\t}\n\t\t\tfor k, v := range config.Logging.Options {\n\t\t\t\tdef.LogConfiguration.Options[k] = aws.String(v)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ TODO: the following directives are ignored \/ not handled\n\t\t\/\/ Networks\n\t\t\/\/ Expose\n\n\t\tfor _, i := range []struct {\n\t\t\tKey string\n\t\t\tValue []string\n\t\t}{\n\t\t\t{\"CapAdd\", config.CapAdd},\n\t\t\t{\"CapDrop\", config.CapDrop},\n\t\t\t{\"Devices\", config.Devices},\n\t\t\t{\"EnvFile\", config.EnvFile},\n\t\t\t{\"SecurityOpt\", config.SecurityOpt},\n\t\t\t{\"ExternalLinks\", config.ExternalLinks},\n\t\t\t{\"ExtraHosts\", config.ExtraHosts},\n\t\t\t{\"Extends\", config.Extends},\n\t\t\t{\"GroupAdd\", config.GroupAdd},\n\t\t\t{\"DNSOpts\", config.DNSOpts},\n\t\t\t{\"Tmpfs\", config.Tmpfs},\n\t\t} {\n\t\t\tif len(i.Value) > 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"%s directive not supported\", i.Key)\n\t\t\t}\n\t\t}\n\n\t\tfor _, i := range []struct {\n\t\t\tKey string\n\t\t\tValue string\n\t\t}{\n\t\t\t{\"Build.Context\", config.Build.Context},\n\t\t\t{\"Build.Dockerfile\", config.Build.Dockerfile},\n\t\t\t{\"DomainName\", config.DomainName},\n\t\t\t{\"VolumeDriver\", config.VolumeDriver},\n\t\t\t{\"CPUSet\", config.CPUSet},\n\t\t\t{\"NetworkMode\", config.NetworkMode},\n\t\t\t{\"Pid\", config.Pid},\n\t\t\t{\"Uts\", config.Uts},\n\t\t\t{\"Ipc\", config.Ipc},\n\t\t\t{\"Restart\", config.Restart},\n\t\t\t{\"User\", config.User},\n\t\t\t{\"StopSignal\", config.StopSignal},\n\t\t\t{\"MacAddress\", config.MacAddress},\n\t\t\t{\"Isolation\", config.Isolation},\n\t\t\t{\"NetworkMode\", config.NetworkMode},\n\t\t\t{\"CgroupParent\", config.CgroupParent},\n\t\t} {\n\t\t\tif i.Value != \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"%s directive not supported\", i.Key)\n\t\t\t}\n\t\t}\n\n\t\tfor _, i := range []struct {\n\t\t\tKey string\n\t\t\tValue int\n\t\t}{\n\t\t\t{\"CPUShares\", int(config.CPUShares)},\n\t\t\t{\"CPUQuota\", int(config.CPUQuota)},\n\t\t\t{\"MemSwapLimit\", int(config.MemSwapLimit)},\n\t\t\t{\"MemSwappiness\", int(config.MemSwappiness)},\n\t\t\t{\"OomScoreAdj\", int(config.OomScoreAdj)},\n\t\t\t{\"ShmSize\", int(config.ShmSize)},\n\t\t} {\n\t\t\tif i.Value != 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"%s directive not supported\", i.Key)\n\t\t\t}\n\t\t}\n\n\t\tfor _, i := range []struct {\n\t\t\tKey string\n\t\t\tValue bool\n\t\t}{\n\t\t\t{\"ReadOnly\", config.ReadOnly},\n\t\t\t{\"StdinOpen\", config.StdinOpen},\n\t\t\t{\"Tty\", config.Tty},\n\t\t} {\n\t\t\tif i.Value {\n\t\t\t\treturn nil, fmt.Errorf(\"%s directive not supported\", i.Key)\n\t\t\t}\n\t\t}\n\n\t\tif config.Labels != nil {\n\t\t\treturn nil, fmt.Errorf(\"Labels directive not supported\")\n\t\t}\n\n\t\tif len(config.Ulimits.Elements) > 0 {\n\t\t\treturn nil, fmt.Errorf(\"Ulimits directive not supported\")\n\t\t}\n\n\t\ttask.ContainerDefinitions = append(task.ContainerDefinitions, &def)\n\t}\n\n\treturn &task, nil\n}\n\nfunc isServiceIncluded(name string, included []string) bool {\n\tif len(included) == 0 {\n\t\treturn true\n\t}\n\tfor _, val := range included {\n\t\tif name == val {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\ntype envMap map[string][]string\n\nfunc (em envMap) Lookup(key string, config *config.ServiceConfig) []string {\n\tresults := []string{}\n\tfor _, val := range em[key] {\n\t\tresults = append(results, key+\"=\"+val)\n\t}\n\treturn results\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/flynn\/flynn-host\/types\"\n\t\"github.com\/flynn\/go-discoverd\"\n\t\"github.com\/flynn\/go-dockerclient\"\n\t\"github.com\/flynn\/go-flynn\/cluster\"\n)\n\nvar clusterc *cluster.Client\n\nfunc init() {\n\tvar err error\n\tclusterc, err = cluster.NewClient()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc main() {\n\troot := \"\/var\/lib\/demo\/apps\"\n\n\tservices, _ := discoverd.Services(\"shelf\", discoverd.DefaultTimeout)\n\tif len(services) < 1 {\n\t\tpanic(\"Shelf is not discoverable\")\n\t}\n\tshelfHost := services[0].Addr\n\n\tapp := os.Args[2]\n\tos.MkdirAll(root+\"\/\"+app, 0755)\n\n\tfmt.Printf(\"-----> Building %s...\\n\", app)\n\n\tscheduleAndAttach(cluster.RandomJobID(app+\"-build.\"), docker.Config{\n\t\tImage: \"flynn\/slugbuilder\",\n\t\tCmd: []string{\"http:\/\/\" + shelfHost + \"\/\" + app + \".tgz\"},\n\t\tTty: false,\n\t\tAttachStdin: true,\n\t\tAttachStdout: true,\n\t\tAttachStderr: true,\n\t\tOpenStdin: true,\n\t\tStdinOnce: true,\n\t})\n\n\tfmt.Printf(\"-----> Deploying %s ...\\n\", app)\n\n\tjobid := cluster.RandomJobID(app + \"-web.\")\n\n\thostid := scheduleWithTcpPort(jobid, docker.Config{\n\t\tImage: \"flynn\/slugrunner\",\n\t\tCmd: []string{\"start\", \"web\"},\n\t\tTty: false,\n\t\tAttachStdin: false,\n\t\tAttachStdout: false,\n\t\tAttachStderr: false,\n\t\tOpenStdin: false,\n\t\tStdinOnce: false,\n\t\tEnv: []string{\n\t\t\t\"SLUG_URL=http:\/\/\" + shelfHost + \"\/\" + app + \".tgz\",\n\t\t\t\"SD_NAME=\" + app,\n\t\t},\n\t})\n\n\ttime.Sleep(1 * time.Second)\n\tfmt.Printf(\"=====> Application deployed:\\n\")\n\tfmt.Printf(\" http:\/\/10.0.2.15:%s\\n\", getPort(hostid, jobid))\n\tfmt.Println(\"\")\n\n}\n\nfunc shell(cmdline string) string {\n\tout, err := exec.Command(\"bash\", \"-c\", cmdline).Output()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn strings.Trim(string(out), \" \\n\")\n}\n\nfunc scheduleWithTcpPort(jobid string, config docker.Config) (hostid string) {\n\thostid = randomHost()\n\taddReq := &host.AddJobsReq{\n\t\tIncremental: true,\n\t\tHostJobs: map[string][]*host.Job{hostid: {{ID: jobid, Config: &config, TCPPorts: 1}}},\n\t}\n\tif _, err := clusterc.AddJobs(addReq); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn\n}\n\nfunc getPort(hostid string, jobid string) string {\n\tclient, err := clusterc.ConnectHost(hostid)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tjob, err := client.GetJob(jobid)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor portspec := range job.Job.Config.ExposedPorts {\n\t\treturn strings.Split(portspec, \"\/\")[0]\n\t}\n\treturn \"\"\n}\n\nfunc randomHost() (hostid string) {\n\thosts, err := clusterc.ListHosts()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor hostid = range hosts {\n\t\tbreak\n\t}\n\tif hostid == \"\" {\n\t\tlog.Fatal(\"no hosts found\")\n\t}\n\treturn\n}\n\nfunc scheduleAndAttach(jobid string, config docker.Config) {\n\thostid := randomHost()\n\n\tclient, err := clusterc.ConnectHost(hostid)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tconn, attachWait, err := client.Attach(&host.AttachReq{\n\t\tJobID: jobid,\n\t\tFlags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagStdin | host.AttachFlagStream,\n\t}, true)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\taddReq := &host.AddJobsReq{\n\t\tIncremental: true,\n\t\tHostJobs: map[string][]*host.Job{hostid: {{ID: jobid, Config: &config}}},\n\t}\n\tif _, err := clusterc.AddJobs(addReq); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif err := attachWait(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgo func() {\n\t\tio.Copy(conn, os.Stdin)\n\t\tconn.CloseWrite()\n\t}()\n\tscanner := bufio.NewScanner(conn)\n\tfor scanner.Scan() {\n\t\tfmt.Fprintln(os.Stdout, scanner.Text()[8:])\n\t}\n\tconn.Close()\n}\n<commit_msg>receiver: Update flynn-host type usage<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/flynn\/flynn-host\/types\"\n\t\"github.com\/flynn\/go-discoverd\"\n\t\"github.com\/flynn\/go-dockerclient\"\n\t\"github.com\/flynn\/go-flynn\/cluster\"\n)\n\nvar clusterc *cluster.Client\n\nfunc init() {\n\tvar err error\n\tclusterc, err = cluster.NewClient()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc main() {\n\troot := \"\/var\/lib\/demo\/apps\"\n\n\tservices, _ := discoverd.Services(\"shelf\", discoverd.DefaultTimeout)\n\tif len(services) < 1 {\n\t\tpanic(\"Shelf is not discoverable\")\n\t}\n\tshelfHost := services[0].Addr\n\n\tapp := os.Args[2]\n\tos.MkdirAll(root+\"\/\"+app, 0755)\n\n\tfmt.Printf(\"-----> Building %s...\\n\", app)\n\n\tscheduleAndAttach(cluster.RandomJobID(app+\"-build.\"), docker.Config{\n\t\tImage: \"flynn\/slugbuilder\",\n\t\tCmd: []string{\"http:\/\/\" + shelfHost + \"\/\" + app + \".tgz\"},\n\t\tTty: false,\n\t\tAttachStdin: true,\n\t\tAttachStdout: true,\n\t\tAttachStderr: true,\n\t\tOpenStdin: true,\n\t\tStdinOnce: true,\n\t})\n\n\tfmt.Printf(\"-----> Deploying %s ...\\n\", app)\n\n\tjobid := cluster.RandomJobID(app + \"-web.\")\n\n\thostid := scheduleWithTcpPort(jobid, docker.Config{\n\t\tImage: \"flynn\/slugrunner\",\n\t\tCmd: []string{\"start\", \"web\"},\n\t\tTty: false,\n\t\tAttachStdin: false,\n\t\tAttachStdout: false,\n\t\tAttachStderr: false,\n\t\tOpenStdin: false,\n\t\tStdinOnce: false,\n\t\tEnv: []string{\n\t\t\t\"SLUG_URL=http:\/\/\" + shelfHost + \"\/\" + app + \".tgz\",\n\t\t\t\"SD_NAME=\" + app,\n\t\t},\n\t})\n\n\ttime.Sleep(1 * time.Second)\n\tfmt.Printf(\"=====> Application deployed:\\n\")\n\tfmt.Printf(\" http:\/\/10.0.2.15:%s\\n\", getPort(hostid, jobid))\n\tfmt.Println(\"\")\n\n}\n\nfunc shell(cmdline string) string {\n\tout, err := exec.Command(\"bash\", \"-c\", cmdline).Output()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn strings.Trim(string(out), \" \\n\")\n}\n\nfunc scheduleWithTcpPort(jobid string, config docker.Config) (hostid string) {\n\thostid = randomHost()\n\taddReq := &host.AddJobsReq{\n\t\tHostJobs: map[string][]*host.Job{hostid: {{ID: jobid, Config: &config, TCPPorts: 1}}},\n\t}\n\tif _, err := clusterc.AddJobs(addReq); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn\n}\n\nfunc getPort(hostid string, jobid string) string {\n\tclient, err := clusterc.ConnectHost(hostid)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tjob, err := client.GetJob(jobid)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor portspec := range job.Job.Config.ExposedPorts {\n\t\treturn strings.Split(portspec, \"\/\")[0]\n\t}\n\treturn \"\"\n}\n\nfunc randomHost() (hostid string) {\n\thosts, err := clusterc.ListHosts()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor hostid = range hosts {\n\t\tbreak\n\t}\n\tif hostid == \"\" {\n\t\tlog.Fatal(\"no hosts found\")\n\t}\n\treturn\n}\n\nfunc scheduleAndAttach(jobid string, config docker.Config) {\n\thostid := randomHost()\n\n\tclient, err := clusterc.ConnectHost(hostid)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tconn, attachWait, err := client.Attach(&host.AttachReq{\n\t\tJobID: jobid,\n\t\tFlags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagStdin | host.AttachFlagStream,\n\t}, true)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\taddReq := &host.AddJobsReq{\n\t\tHostJobs: map[string][]*host.Job{hostid: {{ID: jobid, Config: &config}}},\n\t}\n\tif _, err := clusterc.AddJobs(addReq); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif err := attachWait(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgo func() {\n\t\tio.Copy(conn, os.Stdin)\n\t\tconn.CloseWrite()\n\t}()\n\tscanner := bufio.NewScanner(conn)\n\tfor scanner.Scan() {\n\t\tfmt.Fprintln(os.Stdout, scanner.Text()[8:])\n\t}\n\tconn.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package fs\n\n\/\/ SizeSuffix is parsed by flag with k\/M\/G suffixes\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ SizeSuffix is an int64 with a friendly way of printing setting\ntype SizeSuffix int64\n\n\/\/ Turn SizeSuffix into a string and a suffix\nfunc (x SizeSuffix) string() (string, string) {\n\tscaled := float64(0)\n\tsuffix := \"\"\n\tswitch {\n\tcase x < 0:\n\t\treturn \"off\", \"\"\n\tcase x == 0:\n\t\treturn \"0\", \"\"\n\tcase x < 1<<10:\n\t\tscaled = float64(x)\n\t\tsuffix = \"\"\n\tcase x < 1<<20:\n\t\tscaled = float64(x) \/ (1 << 10)\n\t\tsuffix = \"k\"\n\tcase x < 1<<30:\n\t\tscaled = float64(x) \/ (1 << 20)\n\t\tsuffix = \"M\"\n\tcase x < 1<<40:\n\t\tscaled = float64(x) \/ (1 << 30)\n\t\tsuffix = \"G\"\n\tcase x < 1<<50:\n\t\tscaled = float64(x) \/ (1 << 40)\n\t\tsuffix = \"T\"\n\tdefault:\n\t\tscaled = float64(x) \/ (1 << 50)\n\t\tsuffix = \"P\"\n\t}\n\tif math.Floor(scaled) == scaled {\n\t\treturn fmt.Sprintf(\"%.0f\", scaled), suffix\n\t}\n\treturn fmt.Sprintf(\"%.3f\", scaled), suffix\n}\n\n\/\/ String turns SizeSuffix into a string\nfunc (x SizeSuffix) String() string {\n\tval, suffix := x.string()\n\treturn val + suffix\n}\n\n\/\/ Unit turns SizeSuffix into a string with a unit\nfunc (x SizeSuffix) Unit(unit string) string {\n\tval, suffix := x.string()\n\tif val == \"off\" {\n\t\treturn val\n\t}\n\treturn val + \" \" + suffix + unit\n}\n\n\/\/ Set a SizeSuffix\nfunc (x *SizeSuffix) Set(s string) error {\n\tif len(s) == 0 {\n\t\treturn errors.New(\"empty string\")\n\t}\n\tif strings.ToLower(s) == \"off\" {\n\t\t*x = -1\n\t\treturn nil\n\t}\n\tsuffix := s[len(s)-1]\n\tsuffixLen := 1\n\tvar multiplier float64\n\tswitch suffix {\n\tcase '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':\n\t\tsuffixLen = 0\n\t\tmultiplier = 1 << 10\n\tcase 'b', 'B':\n\t\tmultiplier = 1\n\tcase 'k', 'K':\n\t\tmultiplier = 1 << 10\n\tcase 'm', 'M':\n\t\tmultiplier = 1 << 20\n\tcase 'g', 'G':\n\t\tmultiplier = 1 << 30\n\tcase 't', 'T':\n\t\tmultiplier = 1 << 40\n\tcase 'p', 'P':\n\t\tmultiplier = 1 << 50\n\tdefault:\n\t\treturn errors.Errorf(\"bad suffix %q\", suffix)\n\t}\n\ts = s[:len(s)-suffixLen]\n\tvalue, err := strconv.ParseFloat(s, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif value < 0 {\n\t\treturn errors.Errorf(\"size can't be negative %q\", s)\n\t}\n\tvalue *= multiplier\n\t*x = SizeSuffix(value)\n\treturn nil\n}\n\n\/\/ Type of the value\nfunc (x *SizeSuffix) Type() string {\n\treturn \"int64\"\n}\n\n\/\/ Scan implements the fmt.Scanner interface\nfunc (x *SizeSuffix) Scan(s fmt.ScanState, ch rune) error {\n\ttoken, err := s.Token(true, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn x.Set(string(token))\n}\n<commit_msg>fs: add multipliers for SizeSuffix<commit_after>package fs\n\n\/\/ SizeSuffix is parsed by flag with k\/M\/G suffixes\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ SizeSuffix is an int64 with a friendly way of printing setting\ntype SizeSuffix int64\n\n\/\/ Common multipliers for SizeSuffix\nconst (\n\tByte SizeSuffix = 1 << (iota * 10)\n\tKibiByte\n\tMebiByte\n\tGibiByte\n\tTebiByte\n\tPebiByte\n\tExbiByte\n)\n\n\/\/ Turn SizeSuffix into a string and a suffix\nfunc (x SizeSuffix) string() (string, string) {\n\tscaled := float64(0)\n\tsuffix := \"\"\n\tswitch {\n\tcase x < 0:\n\t\treturn \"off\", \"\"\n\tcase x == 0:\n\t\treturn \"0\", \"\"\n\tcase x < 1<<10:\n\t\tscaled = float64(x)\n\t\tsuffix = \"\"\n\tcase x < 1<<20:\n\t\tscaled = float64(x) \/ (1 << 10)\n\t\tsuffix = \"k\"\n\tcase x < 1<<30:\n\t\tscaled = float64(x) \/ (1 << 20)\n\t\tsuffix = \"M\"\n\tcase x < 1<<40:\n\t\tscaled = float64(x) \/ (1 << 30)\n\t\tsuffix = \"G\"\n\tcase x < 1<<50:\n\t\tscaled = float64(x) \/ (1 << 40)\n\t\tsuffix = \"T\"\n\tdefault:\n\t\tscaled = float64(x) \/ (1 << 50)\n\t\tsuffix = \"P\"\n\t}\n\tif math.Floor(scaled) == scaled {\n\t\treturn fmt.Sprintf(\"%.0f\", scaled), suffix\n\t}\n\treturn fmt.Sprintf(\"%.3f\", scaled), suffix\n}\n\n\/\/ String turns SizeSuffix into a string\nfunc (x SizeSuffix) String() string {\n\tval, suffix := x.string()\n\treturn val + suffix\n}\n\n\/\/ Unit turns SizeSuffix into a string with a unit\nfunc (x SizeSuffix) Unit(unit string) string {\n\tval, suffix := x.string()\n\tif val == \"off\" {\n\t\treturn val\n\t}\n\treturn val + \" \" + suffix + unit\n}\n\n\/\/ Set a SizeSuffix\nfunc (x *SizeSuffix) Set(s string) error {\n\tif len(s) == 0 {\n\t\treturn errors.New(\"empty string\")\n\t}\n\tif strings.ToLower(s) == \"off\" {\n\t\t*x = -1\n\t\treturn nil\n\t}\n\tsuffix := s[len(s)-1]\n\tsuffixLen := 1\n\tvar multiplier float64\n\tswitch suffix {\n\tcase '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':\n\t\tsuffixLen = 0\n\t\tmultiplier = 1 << 10\n\tcase 'b', 'B':\n\t\tmultiplier = 1\n\tcase 'k', 'K':\n\t\tmultiplier = 1 << 10\n\tcase 'm', 'M':\n\t\tmultiplier = 1 << 20\n\tcase 'g', 'G':\n\t\tmultiplier = 1 << 30\n\tcase 't', 'T':\n\t\tmultiplier = 1 << 40\n\tcase 'p', 'P':\n\t\tmultiplier = 1 << 50\n\tdefault:\n\t\treturn errors.Errorf(\"bad suffix %q\", suffix)\n\t}\n\ts = s[:len(s)-suffixLen]\n\tvalue, err := strconv.ParseFloat(s, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif value < 0 {\n\t\treturn errors.Errorf(\"size can't be negative %q\", s)\n\t}\n\tvalue *= multiplier\n\t*x = SizeSuffix(value)\n\treturn nil\n}\n\n\/\/ Type of the value\nfunc (x *SizeSuffix) Type() string {\n\treturn \"int64\"\n}\n\n\/\/ Scan implements the fmt.Scanner interface\nfunc (x *SizeSuffix) Scan(s fmt.ScanState, ch rune) error {\n\ttoken, err := s.Token(true, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn x.Set(string(token))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The go-vm Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage fusion\n\nimport \"github.com\/go-vm\/vmware\"\n\nconst app = \"fusion\"\n\n\/\/ Start start a VM or Team.\nfunc Start(vmx string, gui bool) error {\n\tcmd := vmware.VMRun(app, \"start\", vmx)\n\n\tif gui {\n\t\tcmd.Args = append(cmd.Args, \"gui\")\n\t} else {\n\t\tcmd.Args = append(cmd.Args, \"nogui\")\n\t}\n\n\treturn cmd.Run()\n}\n\n\/\/ Stop stop a VM or Team.\nfunc Stop(vmx string, force bool) error {\n\tcmd := vmware.VMRun(app, \"stop\", vmx)\n\n\tif force {\n\t\tcmd.Args = append(cmd.Args, \"hard\")\n\t} else {\n\t\tcmd.Args = append(cmd.Args, \"soft\")\n\t}\n\n\treturn cmd.Run()\n}\n\n\/\/ Reset reset a VM or Team.\nfunc Reset(vmx string, force bool) error {\n\tcmd := vmware.VMRun(app, \"reset\", vmx)\n\n\tif force {\n\t\tcmd.Args = append(cmd.Args, \"hard\")\n\t} else {\n\t\tcmd.Args = append(cmd.Args, \"soft\")\n\t}\n\n\treturn cmd.Run()\n}\n\n\/\/ Suspend Suspend a VM or Team\nfunc Suspend(vmx string, force bool) error {\n\tcmd := vmware.VMRun(app, \"suspend\", vmx)\n\n\tif force {\n\t\tcmd.Args = append(cmd.Args, \"hard\")\n\t} else {\n\t\tcmd.Args = append(cmd.Args, \"soft\")\n\t}\n\n\treturn cmd.Run()\n}\n\n\/\/ Pause pause a VM.\nfunc Pause(vmx string) error {\n\tcmd := vmware.VMRun(app, \"pause\", vmx)\n\n\treturn cmd.Run()\n}\n\n\/\/ Unpause unpause a VM.\nfunc Unpause(vmx string) error {\n\tcmd := vmware.VMRun(app, \"unpause\", vmx)\n\n\treturn cmd.Run()\n}\n<commit_msg>fusion: rename vmx path to vmwarevm<commit_after>\/\/ Copyright 2017 The go-vm Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage fusion\n\nimport \"github.com\/go-vm\/vmware\"\n\nconst app = \"fusion\"\n\n\/\/ Start start a VM or Team.\nfunc Start(vmwarevm string, gui bool) error {\n\tcmd := vmware.VMRun(app, \"start\", vmwarevm)\n\n\tif gui {\n\t\tcmd.Args = append(cmd.Args, \"gui\")\n\t} else {\n\t\tcmd.Args = append(cmd.Args, \"nogui\")\n\t}\n\n\treturn cmd.Run()\n}\n\n\/\/ Stop stop a VM or Team.\nfunc Stop(vmwarevm string, force bool) error {\n\tcmd := vmware.VMRun(app, \"stop\", vmwarevm)\n\n\tif force {\n\t\tcmd.Args = append(cmd.Args, \"hard\")\n\t} else {\n\t\tcmd.Args = append(cmd.Args, \"soft\")\n\t}\n\n\treturn cmd.Run()\n}\n\n\/\/ Reset reset a VM or Team.\nfunc Reset(vmwarevm string, force bool) error {\n\tcmd := vmware.VMRun(app, \"reset\", vmwarevm)\n\n\tif force {\n\t\tcmd.Args = append(cmd.Args, \"hard\")\n\t} else {\n\t\tcmd.Args = append(cmd.Args, \"soft\")\n\t}\n\n\treturn cmd.Run()\n}\n\n\/\/ Suspend Suspend a VM or Team\nfunc Suspend(vmwarevm string, force bool) error {\n\tcmd := vmware.VMRun(app, \"suspend\", vmwarevm)\n\n\tif force {\n\t\tcmd.Args = append(cmd.Args, \"hard\")\n\t} else {\n\t\tcmd.Args = append(cmd.Args, \"soft\")\n\t}\n\n\treturn cmd.Run()\n}\n\n\/\/ Pause pause a VM.\nfunc Pause(vmwarevm string) error {\n\tcmd := vmware.VMRun(app, \"pause\", vmwarevm)\n\n\treturn cmd.Run()\n}\n\n\/\/ Unpause unpause a VM.\nfunc Unpause(vmwarevm string) error {\n\tcmd := vmware.VMRun(app, \"unpause\", vmwarevm)\n\n\treturn cmd.Run()\n}\n<|endoftext|>"} {"text":"<commit_before>package qshell\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/qiniu\/api\/auth\/digest\"\n\t\"github.com\/qiniu\/api\/rsf\"\n\t\"github.com\/qiniu\/log\"\n\t\"io\"\n\t\"os\"\n)\n\ntype ListBucket struct {\n\tAccount\n}\n\nfunc (this *ListBucket) List(bucket string, prefix string, listResultFile string) {\n\tfp, openErr := os.OpenFile(listResultFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)\n\tif openErr != nil {\n\t\tlog.Error(fmt.Sprintf(\"Failed to open list result file `%s'\", listResultFile))\n\t\treturn\n\t}\n\tdefer fp.Close()\n\tbw := bufio.NewWriter(fp)\n\n\tmac := digest.Mac{this.AccessKey, []byte(this.SecretKey)}\n\tclient := rsf.New(&mac)\n\tmarker := \"\"\n\tlimit := 1000\n\trun := true\n\tfor run {\n\t\tentries, markerOut, err := client.ListPrefix(nil, bucket, prefix, marker, limit)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\trun = false\n\t\t\t}\n\t\t} else {\n\t\t\tif markerOut == \"\" {\n\t\t\t\trun = false\n\t\t\t} else {\n\t\t\t\tmarker = markerOut\n\t\t\t}\n\t\t}\n\t\t\/\/append entries\n\t\tfor _, entry := range entries {\n\t\t\tlineData := fmt.Sprintf(\"%s\\t%d\\r\\n\", entry.Key, entry.Fsize)\n\t\t\t_, wErr := bw.WriteString(lineData)\n\t\t\tif wErr != nil {\n\t\t\t\tlog.Error(fmt.Sprintf(\"Write line data `%s' to list result file failed.\", lineData))\n\t\t\t}\n\t\t\tfErr := bw.Flush()\n\t\t\tif fErr != nil {\n\t\t\t\tlog.Error(\"Flush data to list result file error\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>Add retry times for list function.<commit_after>package qshell\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/qiniu\/api\/auth\/digest\"\n\t\"github.com\/qiniu\/api\/rsf\"\n\t\"github.com\/qiniu\/log\"\n\t\"io\"\n\t\"os\"\n)\n\ntype ListBucket struct {\n\tAccount\n}\n\nfunc (this *ListBucket) List(bucket string, prefix string, listResultFile string) {\n\tfp, openErr := os.OpenFile(listResultFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)\n\tif openErr != nil {\n\t\tlog.Error(fmt.Sprintf(\"Failed to open list result file `%s'\", listResultFile))\n\t\treturn\n\t}\n\tdefer fp.Close()\n\tbw := bufio.NewWriter(fp)\n\n\tmac := digest.Mac{this.AccessKey, []byte(this.SecretKey)}\n\tclient := rsf.New(&mac)\n\tmarker := \"\"\n\tlimit := 1000\n\trun := true\n\tmaxRetryTimes := 5\n\tretryTimes := 1\n\tfor run {\n\t\tentries, markerOut, err := client.ListPrefix(nil, bucket, prefix, marker, limit)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\trun = false\n\t\t\t} else {\n\t\t\t\tlog.Error(fmt.Sprintf(\"List error for marker `%s'\", marker), err)\n\t\t\t\tif retryTimes <= maxRetryTimes {\n\t\t\t\t\tlog.Info(fmt.Sprintf(\"Retry list for marker `%s' for `%d' time\", marker, retryTimes))\n\t\t\t\t\tretryTimes += 1\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\tlog.Error(fmt.Sprintf(\"List failed too many times for `%s'\", marker))\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tretryTimes = 1\n\t\t\tif markerOut == \"\" {\n\t\t\t\trun = false\n\t\t\t} else {\n\t\t\t\tmarker = markerOut\n\t\t\t}\n\t\t}\n\t\t\/\/append entries\n\t\tfor _, entry := range entries {\n\t\t\tlineData := fmt.Sprintf(\"%s\\t%d\\r\\n\", entry.Key, entry.Fsize)\n\t\t\t_, wErr := bw.WriteString(lineData)\n\t\t\tif wErr != nil {\n\t\t\t\tlog.Error(fmt.Sprintf(\"Write line data `%s' to list result file failed.\", lineData))\n\t\t\t}\n\t\t\tfErr := bw.Flush()\n\t\t\tif fErr != nil {\n\t\t\t\tlog.Error(\"Flush data to list result file error\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"logarchive\"\n\t\"fmt\"\n\t\"os\"\n\t\"flag\"\n\t\"sync\"\n\t\"errors\"\n\t\"regexp\"\n\t\"time\"\n\t\"runtime\"\n)\n\nvar (\n\tERRPARAMS = errors.New(\"error params\")\n)\n\nconst (\n\tVERSION = \"1.0.0\"\n\tOK = \"OK\"\n)\n\nvar optionListen = flag.String(\"listen\", \":6379\", `server listen path, e.g \":6379\" or \"\/var\/run\/logserver.sock\"`)\nvar optionDir = flag.String(\"dir\", \".\/data\", `root directory for logs data`)\nvar optionVerbose = flag.Int(\"verbose\", 0, `show run details`)\nvar optionTimeout = flag.Uint(\"timeout\", 60, \"timeout to close opened files\")\n\ntype LogFile struct {\n\tName string\n\tC chan int\n\tHandle *os.File\n}\n\nvar LogFiles map[string]LogFile = make(map[string]LogFile)\n\nfunc OpenLogFileForWrite(fileName string) (*os.File, error) {\n\tif err := logarchive.MkdirpByFileName(fileName); err != nil {\n\t\tlogarchive.Debugf(\"failed to open log file: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tf, err := os.OpenFile(fileName, os.O_CREATE | os.O_WRONLY | os.O_APPEND, 0644)\n\tif err != nil {\n\t\tlogarchive.Debugf(\"failed to open log file: %s\", err)\n\t\treturn nil, err\n\t}\n\n\treturn f, nil\n}\n\ntype LocalRedisFileHandler struct {\n\tlogarchive.RedisHandler\n\tsync.Mutex\n}\n\nfunc (this *LocalRedisFileHandler) Init(db int) (error) {\n\n\treturn nil\n}\n\nfunc (this *LocalRedisFileHandler) Shutdown() (error) {\n\tfor _, v := range LogFiles {\n\t\tv.Handle.Close()\n\t\tclose(v.C)\n\t}\n\n\treturn nil\n}\n\nfunc (this *LocalRedisFileHandler) Select(db int) (string, error) {\n\treturn OK, nil\n}\n\nfunc (this *LocalRedisFileHandler) Version() (string, error) {\n\treturn VERSION, nil\n}\n\nfunc (this *LocalRedisFileHandler) FlushAll() (string, error) {\n\treturn OK, nil\n}\n\nfunc (this *LocalRedisFileHandler) FlushDB(db int) (string, error) {\n\treturn \"\", ERRPARAMS\n}\n\nfunc (this *LocalRedisFileHandler) Set(fileName string, lineContent string) (string, error) {\n\tmatched, _ := regexp.MatchString(\"^[a-zA-Z\\\\-_:]{1,}$\", fileName)\n\tif !matched {\n\t\treturn \"\", ERRPARAMS\n\t}\n\n\tthis.Lock()\n\tdefer this.Unlock()\n\n\tfile := LogFiles[fileName]\n\n\tif file.Name != fileName {\n\t\thandle, err := OpenLogFileForWrite(fmt.Sprintf(\"%s\/%s\", this.Config[\"path\"].(string), fileName))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tfile = LogFile{\n\t\t\tName : fileName,\n\t\t\tHandle : handle,\n\t\t\tC : make(chan int),\n\t\t}\n\n\t\tlogarchive.Debugf(\"reopen %s\", fileName)\n\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-file.C:\n\t\t\t\tcase <-time.After(time.Second * time.Duration(this.Config[\"timeout\"].(uint))):\n\t\t\t\t\tlogarchive.Debugf(\"timeout:%s\", fileName)\n\t\t\t\t\tthis.Lock()\n\t\t\t\t\tdefer this.Unlock()\n\t\t\t\t\tfile.Handle.Close()\n\t\t\t\t\tdelete(LogFiles, fileName)\n\t\t\t\t\tclose(file.C)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\n\t\tLogFiles[fileName] = file\n\t}\n\n\tfile.C <- 1\n\tn, err := file.Handle.WriteString(lineContent + \"\\n\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif n != len(lineContent) + 1 {\n\t\treturn \"\", errors.New(\"not full write content to file\")\n\t}\n\n\treturn OK, nil\n}\n\nfunc usage() {\n\tfmt.Printf(\"%s\\n\", `\nUsage: redisFielServer [options]\nOptions:\n\t`)\n\tflag.PrintDefaults()\n\tos.Exit(0)\n}\n\nfunc main() {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\tv, err := logarchive.GetExistsAbsolutePath(*optionDir)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tif (*optionVerbose == 1) {\n\t\tos.Setenv(\"DEBUG\", \"ok\")\n\t}\n\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tlrh := &LocalRedisFileHandler{}\n\n\tlrh.SetConfig(map[string]interface{}{\n\t\t\"path\" : v,\n\t\t\"timeout\" : *optionTimeout,\n\t})\n\n\tlrh.SetShield(\"Init\")\n\tlrh.SetShield(\"Shutdown\")\n\tlrh.SetShield(\"Lock\")\n\tlrh.SetShield(\"Unlock\")\n\tlrh.SetShield(\"SetShield\")\n\tlrh.SetShield(\"SetConfig\")\n\tlrh.SetShield(\"CheckShield\")\n\n\tser, err := logarchive.NewServer(*optionListen, lrh)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tser.ListenAndServe()\n\n\tlrh.Shutdown()\n}\n<commit_msg>optimize<commit_after>package main\n\nimport (\n\t\"logarchive\"\n\t\"fmt\"\n\t\"os\"\n\t\"flag\"\n\t\"sync\"\n\t\"errors\"\n\t\"regexp\"\n\t\"time\"\n\t\"runtime\"\n)\n\nvar (\n\tERRPARAMS = errors.New(\"error params\")\n)\n\nconst (\n\tVERSION = \"1.0.0\"\n\tOK = \"OK\"\n)\n\nvar optionListen = flag.String(\"listen\", \":6379\", `server listen path, e.g \":6379\" or \"\/var\/run\/logserver.sock\"`)\nvar optionDir = flag.String(\"dir\", \".\/data\", `root directory for logs data`)\nvar optionVerbose = flag.Int(\"verbose\", 1, `show run details`)\nvar optionTimeout = flag.Uint(\"timeout\", 60, \"timeout to close opened files\")\n\ntype LogFile struct {\n\tName string\n\tC chan int\n\tHandle *os.File\n}\n\nvar LogFiles map[string]LogFile = make(map[string]LogFile)\n\nfunc OpenLogFileForWrite(fileName string) (*os.File, error) {\n\tif err := logarchive.MkdirpByFileName(fileName); err != nil {\n\t\tlogarchive.Debugf(\"failed to open log file: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tf, err := os.OpenFile(fileName, os.O_CREATE | os.O_WRONLY | os.O_APPEND, 0644)\n\tif err != nil {\n\t\tlogarchive.Debugf(\"failed to open log file: %s\", err)\n\t\treturn nil, err\n\t}\n\n\treturn f, nil\n}\n\ntype LocalRedisFileHandler struct {\n\tlogarchive.RedisHandler\n\tsync.Mutex\n}\n\nfunc (this *LocalRedisFileHandler) Init(db int) (error) {\n\n\treturn nil\n}\n\nfunc (this *LocalRedisFileHandler) Shutdown() (error) {\n\tfor _, v := range LogFiles {\n\t\tv.Handle.Close()\n\t\tclose(v.C)\n\t}\n\n\treturn nil\n}\n\nfunc (this *LocalRedisFileHandler) Select(db int) (string, error) {\n\treturn OK, nil\n}\n\nfunc (this *LocalRedisFileHandler) Version() (string, error) {\n\treturn VERSION, nil\n}\n\nfunc (this *LocalRedisFileHandler) FlushAll() (string, error) {\n\treturn OK, nil\n}\n\nfunc (this *LocalRedisFileHandler) FlushDB(db int) (string, error) {\n\treturn \"\", ERRPARAMS\n}\n\nfunc (this *LocalRedisFileHandler) Set(fileName string, lineContent string) (string, error) {\n\tmatched, _ := regexp.MatchString(\"^[a-zA-Z\\\\-_:]{1,}$\", fileName)\n\tif !matched {\n\t\treturn \"\", ERRPARAMS\n\t}\n\n\tthis.Lock()\n\tdefer this.Unlock()\n\n\tfile, ok := LogFiles[fileName]\n\tif !ok || file.Name != fileName {\n\t\thandle, err := OpenLogFileForWrite(fmt.Sprintf(\"%s\/%s\", this.Config[\"path\"].(string), fileName))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tfile = LogFile{\n\t\t\tName : fileName,\n\t\t\tHandle : handle,\n\t\t\tC : make(chan int),\n\t\t}\n\n\t\tlogarchive.Debugf(\"reopen %s\", fileName)\n\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-file.C:\n\t\t\t\tcase <-time.After(time.Second * time.Duration(this.Config[\"timeout\"].(uint))):\n\t\t\t\t\tlogarchive.Debugf(\"timeout:%s\", fileName)\n\t\t\t\t\tthis.Lock()\n\t\t\t\t\tdefer this.Unlock()\n\t\t\t\t\tfile.Handle.Close()\n\t\t\t\t\tdelete(LogFiles, fileName)\n\t\t\t\t\tclose(file.C)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\n\t\tLogFiles[fileName] = file\n\t}\n\n\tfile.C <- 1\n\tn, err := file.Handle.WriteString(lineContent + \"\\n\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif n != len(lineContent) + 1 {\n\t\treturn \"\", errors.New(\"not full write content to file\")\n\t}\n\n\treturn OK, nil\n}\n\nfunc usage() {\n\tfmt.Printf(\"%s\\n\", `\nUsage: redisFielServer [options]\nOptions:\n\t`)\n\tflag.PrintDefaults()\n\tos.Exit(0)\n}\n\nfunc main() {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\tv, err := logarchive.GetExistsAbsolutePath(*optionDir)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tif (*optionVerbose == 1) {\n\t\tos.Setenv(\"DEBUG\", \"ok\")\n\t}\n\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tlrh := &LocalRedisFileHandler{}\n\n\tlrh.SetConfig(map[string]interface{}{\n\t\t\"path\" : v,\n\t\t\"timeout\" : *optionTimeout,\n\t})\n\n\tlrh.SetShield(\"Init\")\n\tlrh.SetShield(\"Shutdown\")\n\tlrh.SetShield(\"Lock\")\n\tlrh.SetShield(\"Unlock\")\n\tlrh.SetShield(\"SetShield\")\n\tlrh.SetShield(\"SetConfig\")\n\tlrh.SetShield(\"CheckShield\")\n\n\tser, err := logarchive.NewServer(*optionListen, lrh)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tser.ListenAndServe()\n\n\tlrh.Shutdown()\n}\n<|endoftext|>"} {"text":"<commit_before>package boardgame\n\nimport (\n\t\"github.com\/jkomoros\/boardgame\/enum\"\n)\n\n\/\/GameDelegate is the key entrypoint for the game logic specific to the game\n\/\/you are defining. Think of it is as the brain that is inserted into the\n\/\/robot shell of GameManager to imbue it with life. Typically your package\n\/\/that defines your game will have one public entrypoint, which is to return a\n\/\/GameDelegate for that packge. All logic specific to your game is configured\n\/\/via the return values of various methods in your GameDelegate. Your\n\/\/GameDelegate defines configuration for the type of game in general (via\n\/\/Configure* methods), as well as lifecycle methods for specific games (e.g.\n\/\/DistributeComponentToStarterStack). base.GameDelegate is a useful base\n\/\/struct to embed in your own GameDelegate, providing reasonable default\n\/\/behavior for nearly every method in GameDelegate.\ntype GameDelegate interface {\n\n\t\/\/Name is a string that defines the type of game this is. This must return\n\t\/\/the package name that contains the game (e.g.\n\t\/\/\"github.com\/jkomoros\/mygame\" should return \"mygame\"), since the package\n\t\/\/name and the delegate.Name() are both used at different times in the\n\t\/\/system, since one can be determined statically and the other only at\n\t\/\/run-time. NewGameManager will fail if that is not true.The name should\n\t\/\/be unique and compact since it will sometimes be used in a URL path.\n\t\/\/Good examples are \"tictactoe\", \"blackjack\". Once configured, names\n\t\/\/should never change over the lifetime of the gametype, since it will be\n\t\/\/persisted in storage.\n\tName() string\n\n\t\/\/DisplayName is a string that defines the type of game this is in a way\n\t\/\/appropriate for humans. The name should be unique but human readable. It\n\t\/\/is purely for human consumption, and may change over time with no\n\t\/\/adverse effects. Good examples are \"Tic Tac Toe\", \"Blackjack\".\n\t\/\/Subclasses should override this.\n\tDisplayName() string\n\n\t\/\/Description is a string that describes the game type in a descriptive\n\t\/\/sentence, for use in showing to end users. A reasonable value for\n\t\/\/\"tictactoe\" is \"A classic game where players compete to get three in a\n\t\/\/row\"\n\tDescription() string\n\n\t\/\/ConfigureMoves will be called during creation of a GameManager in\n\t\/\/NewGameManager. This is the time to install moves onto the manager by\n\t\/\/returning a list of moves to install. This is the single most important\n\t\/\/configuration point for your game logic, as the collection of moves for\n\t\/\/the game--and the logic of when they apply--is the bedrock of your game\n\t\/\/logic. Typically you use moves.Combine and friends to organize your list\n\t\/\/of moves to install. If the moves you add are illegal for any reason,\n\t\/\/NewGameManager will fail with an error. By the time this is called.\n\t\/\/delegate.SetManager will already have been called, so you'll have access\n\t\/\/to the manager via Manager().\n\tConfigureMoves() []MoveConfig\n\n\t\/\/ConfigureAgents will be called when creating a new GameManager. Emit the\n\t\/\/agents you want to install.\n\tConfigureAgents() []Agent\n\n\t\/\/ConfigureDecks will be called when the GameManager is being booted up.\n\t\/\/Each entry in the return value will be added to the ComponentChest that\n\t\/\/is being created for this game type. This method is where you create\n\t\/\/individual decks via NewDeck and associate the right underlying\n\t\/\/ComponentValues with each component via AddComponent.\n\tConfigureDecks() map[string]*Deck\n\n\t\/\/ConfigureEnums is called during set up of a new GameManager. Return the\n\t\/\/set of enums you want to be associated with this GameManagaer's Chest.\n\t\/\/`boardgame-util codegen` will often generate this automatically for you.\n\tConfigureEnums() *enum.Set\n\n\t\/\/ConfigureConstants is called during set-up of a new GameManager. Return\n\t\/\/the map of constants you want to create, which will be configured onto\n\t\/\/the newly created chest. If any of the constants cannot be added to the\n\t\/\/ComponentChest, errors, the GameManager will fail to be set up.\n\t\/\/Constants are primarily useful in two cases: first, when you want to\n\t\/\/have access to a constant value client-side, and second, when you want\n\t\/\/to be able to use a constant value in a struct tag provided as an\n\t\/\/instruction for a StructInflater.\n\tConfigureConstants() PropertyCollection\n\n\t\/\/GameStateConstructor and PlayerStateConstructor are called to get an\n\t\/\/instantiation of the concrete game\/player structs that your package\n\t\/\/defines. This is used both to create the initial state, but also to\n\t\/\/inflate states from the database. These methods should always return the\n\t\/\/underlying same type of struct when called. This means that if different\n\t\/\/players have very different roles in a game, there might be many\n\t\/\/properties that are not in use for any given player. The simple\n\t\/\/properties (ints, bools, strings) should all be their zero-value.\n\t\/\/Importantly, all Stacks, Timers, and Enums should be non- nil, because\n\t\/\/an initialized struct contains information about things like MaxSize,\n\t\/\/Size, and a reference to the deck they are affiliated with. GameManger\n\t\/\/will automatically create and use StructInflaters for these types of\n\t\/\/objects, allowing you to use tag-based configuration to automatically\n\t\/\/inflate these properties. See the documentation for StructInflater for\n\t\/\/more.\n\tGameStateConstructor() ConfigurableSubState\n\t\/\/PlayerStateConstructor is similar to GameStateConstructor, but\n\t\/\/playerIndex is the value that this PlayerState must return when its\n\t\/\/PlayerIndex() is called.\n\tPlayerStateConstructor(player PlayerIndex) ConfigurablePlayerState\n\n\t\/\/DynamicComponentValuesConstructor returns an empty\n\t\/\/DynamicComponentValues for the given deck. DynamicComponentValues are\n\t\/\/useful for representing when a given component has mutable properties\n\t\/\/associated with it--for example, if a given card could have a stack of\n\t\/\/tokens on top, the stack of tokens would be a property on a\n\t\/\/DynamicComponentValues associated with that card component. If nil is\n\t\/\/returned, then the components in that deck don't have any dynamic\n\t\/\/component state. This method must always return the same underlying type\n\t\/\/of struct for the same deck. Like GameStateConstructor and\n\t\/\/PlayerStateConstructor, the engine will automatically create\n\t\/\/StructInflaters for these objects, allowing you to use tag-based\n\t\/\/inflation of properties. See StructInflate for more. If the returned\n\t\/\/object also implements the ComponentValues interface, then\n\t\/\/SetContainingComponent will be called on the DynamicComponent whenever\n\t\/\/one is created, with a reference back to the component it's associated\n\t\/\/with.\n\tDynamicComponentValuesConstructor(deck *Deck) ConfigurableSubState\n\n\t\/\/DistributeComponentToStarterStack is called during set up of a given\n\t\/\/Game to establish the Deck\/Stack invariant that every component in the\n\t\/\/chest is placed in precisely one Stack. Game will call this on each\n\t\/\/component in the Chest in order. This is where the logic goes to make\n\t\/\/sure each Component goes into its correct starter stack. You must return\n\t\/\/a non-nil Stack for each call, after which the given Component will be\n\t\/\/inserted into NextSlotIndex of that stack. If that is not the ordering\n\t\/\/you desire, you can fix it up in FinishSetUp by using SwapComponents. If\n\t\/\/any errors are returned, any nil Stacks are returned, or any returned\n\t\/\/stacks don't have space for another component, NewGame will fail and\n\t\/\/return an error. State and Component are only provided for reference; do\n\t\/\/not modify them.\n\tDistributeComponentToStarterStack(state ImmutableState, c Component) (ImmutableStack, error)\n\n\t\/\/BeginSetup is called on a newly created Game before components are\n\t\/\/distributed via DistributeComponentToStarterStack. If you need to modify\n\t\/\/your state before components are distributed, do it here. It is also\n\t\/\/where the variant configuration for your gametype will be passed (it\n\t\/\/will already have been checked for legality and had all configure\n\t\/\/defaults set), although you can also retrieve that at any time via\n\t\/\/game.Variant(). This is a good place to configure state that will be\n\t\/\/necessary for you to make the right decisions in\n\t\/\/DistributeComponentToStarterStack, or to transcribe config information\n\t\/\/you were passed into properties on your gameState as appropriate. If\n\t\/\/error is non-nil, Game setup will be aborted, with the reasoning\n\t\/\/including the error message provided.\n\tBeginSetUp(state State, variant Variant) error\n\n\t\/\/FinishSetUp is called during NewGame, *after* components have been\n\t\/\/distributed to their StarterStack. This is the last chance to modify the\n\t\/\/state before the game's initial state is considered final. For example,\n\t\/\/if you have a card game this is where you'd make sure the starter draw\n\t\/\/stacks are shuffled. If your game has multiple rounds, or if you don't\n\t\/\/want the game to start with it already set-up (e.g. you want to show\n\t\/\/animations of starter cards being dealt) then it's probably best to do\n\t\/\/most of the logic in a SetUp phase. See the README for more. If error is\n\t\/\/non-nil, Game setup will be aborted, with the reasoning including the\n\t\/\/error message provided.\n\tFinishSetUp(state State) error\n\n\t\/\/CheckGameFinished should return true if the game is finished, and who\n\t\/\/the winners are. Called after every move is applied.\n\tCheckGameFinished(state ImmutableState) (finished bool, winners []PlayerIndex)\n\n\t\/\/ProposeFixUpMove is called after a move has been applied. It may return\n\t\/\/a FixUp move, which will be applied before any other moves are applied.\n\t\/\/If it returns nil, we may take the next move off of the queue. FixUp\n\t\/\/moves are useful for things like shuffling a discard deck back into a\n\t\/\/draw deck, or other moves that are necessary to get the GameState back\n\t\/\/into reasonable shape. base.GameDelegate's defintion is almost always\n\t\/\/suficient.\n\tProposeFixUpMove(state ImmutableState) Move\n\n\t\/\/DefaultNumPlayers returns the number of users that new games of this\n\t\/\/type default to. For example, for tictactoe, it will be 2. If 0 is\n\t\/\/provided to manager.NewGame(), we wil use this value instead.\n\tDefaultNumPlayers() int\n\n\t\/\/Min\/MaxNumPlayers should return the min and max number of players,\n\t\/\/respectively. The engine doesn't use this directly, instead looking at\n\t\/\/LegalNumPlayers. Typically your LegalNumPlayers will check the given\n\t\/\/number of players is between these two extremes.\n\tMinNumPlayers() int\n\tMaxNumPlayers() int\n\n\t\/\/LegalNumPlayers will be consulted when a new game is created. It should\n\t\/\/return true if the given number of players is legal, and false\n\t\/\/otherwise. If this returns false, the NewGame will fail with an error.\n\t\/\/Game creation will automatically reject a numPlayers that does not\n\t\/\/result in at least one player existing. Generally this is simply\n\t\/\/checking to make sure the number of players is between Min and Max\n\t\/\/(inclusive), although some games could only allow, for example, even\n\t\/\/numbers of players.\n\tLegalNumPlayers(numPlayers int) bool\n\n\t\/\/Variants returns a VariantConfig, which describes the different\n\t\/\/categories of configuration values and the legal values they may take on\n\t\/\/when a new game is created. In general if you want to inspect legal\n\t\/\/variants in your own game logic you shouldn't call this, but instead\n\t\/\/call gameManager.Variants() which will ensure your VariantConfig is\n\t\/\/initalized and memoize the return result.\n\tVariants() VariantConfig\n\n\t\/\/CurrentPlayerIndex returns the index of the \"current\" player for the\n\t\/\/given game state--a notion that is game specific (and sometimes\n\t\/\/inapplicable). If CurrentPlayer doesn't make sense (perhaps the game\n\t\/\/never has a notion of current player, or the type of round that we're in\n\t\/\/has no current player), this should return ObserverPlayerIndex. The\n\t\/\/result of this method is used to power state.CurrentPlayer.\n\tCurrentPlayerIndex(state ImmutableState) PlayerIndex\n\n\t\/\/CurrentPhase returns the phase that the game state is currently in.\n\t\/\/Phase is a formalized convention used in moves.Default to make it easier\n\t\/\/to write fix-up moves that only apply in certain phases, like SetUp. The\n\t\/\/return result is primarily used in moves.Default to check whether it is\n\t\/\/one of the phases in a give Move's LegalPhases. See moves.Default for\n\t\/\/more information. The only use of this method in the main library is\n\t\/\/when generating a MoveStorageRecord.\n\tCurrentPhase(state ImmutableState) int\n\n\t\/\/PhaseEnum returns the enum for game phases (the return values of\n\t\/\/CurrentPhase are expected to be valid enums within that enum). If this\n\t\/\/returns a non-nil enums.TreeEnum, then the state will not be able to be\n\t\/\/saved if CurrentPhase() returns a value that is not a leaf-node. The\n\t\/\/core package doesn't rely on this method directly.\n\tPhaseEnum() enum.Enum\n\n\t\/\/SanitizationPolicy is consulted when sanitizing states. It is called for\n\t\/\/each prop in the state, including the set of groups that this player is\n\t\/\/a mamber of. In practice the default behavior of base.GameDelegate,\n\t\/\/which uses struct tags to figure out the policy, is sufficient and you\n\t\/\/do not need to override this. For more on how sanitization works, see\n\t\/\/the documenation for Policy. The statePropetyRef passed will always have\n\t\/\/the Index properties set to -1, signifying that the returned policy\n\t\/\/applies to all items in the Stack\/Board.\n\tSanitizationPolicy(prop StatePropertyRef, groupMembership map[int]bool) Policy\n\n\t\/\/If you have computed properties that you want to be included in your\n\t\/\/JSON (for example, for use clientside), export them here by creating a\n\t\/\/dictionary with their values.\n\tComputedGlobalProperties(state ImmutableState) PropertyCollection\n\tComputedPlayerProperties(player ImmutablePlayerState) PropertyCollection\n\n\t\/\/Diagram should return a basic debug rendering of state in multi-line\n\t\/\/ascii art. Useful for debugging. State.Diagram() will reach out to this\n\t\/\/method.\n\tDiagram(s ImmutableState) string\n\n\t\/\/SetManager configures which manager this delegate is in use with. A\n\t\/\/given delegate can only be used by a single manager at a time.\n\tSetManager(manager *GameManager)\n\n\t\/\/Manager returns the Manager that was set on this delegate.\n\tManager() *GameManager\n}\n\n\/\/PropertyCollection is just an alias for map[string]interface{}\ntype PropertyCollection map[string]interface{}\n\n\/\/Copy returns a shallow copy of PropertyCollection\nfunc (p PropertyCollection) Copy() PropertyCollection {\n\tresult := make(PropertyCollection, len(p))\n\tfor key, val := range result {\n\t\tresult[key] = val\n\t}\n\treturn result\n}\n<commit_msg>Fix up documentation for PropertyCollection. Part of #562.<commit_after>package boardgame\n\nimport (\n\t\"github.com\/jkomoros\/boardgame\/enum\"\n)\n\n\/\/GameDelegate is the key entrypoint for the game logic specific to the game\n\/\/you are defining. Think of it is as the brain that is inserted into the\n\/\/robot shell of GameManager to imbue it with life. Typically your package\n\/\/that defines your game will have one public entrypoint, which is to return a\n\/\/GameDelegate for that packge. All logic specific to your game is configured\n\/\/via the return values of various methods in your GameDelegate. Your\n\/\/GameDelegate defines configuration for the type of game in general (via\n\/\/Configure* methods), as well as lifecycle methods for specific games (e.g.\n\/\/DistributeComponentToStarterStack). base.GameDelegate is a useful base\n\/\/struct to embed in your own GameDelegate, providing reasonable default\n\/\/behavior for nearly every method in GameDelegate.\ntype GameDelegate interface {\n\n\t\/\/Name is a string that defines the type of game this is. This must return\n\t\/\/the package name that contains the game (e.g.\n\t\/\/\"github.com\/jkomoros\/mygame\" should return \"mygame\"), since the package\n\t\/\/name and the delegate.Name() are both used at different times in the\n\t\/\/system, since one can be determined statically and the other only at\n\t\/\/run-time. NewGameManager will fail if that is not true.The name should\n\t\/\/be unique and compact since it will sometimes be used in a URL path.\n\t\/\/Good examples are \"tictactoe\", \"blackjack\". Once configured, names\n\t\/\/should never change over the lifetime of the gametype, since it will be\n\t\/\/persisted in storage.\n\tName() string\n\n\t\/\/DisplayName is a string that defines the type of game this is in a way\n\t\/\/appropriate for humans. The name should be unique but human readable. It\n\t\/\/is purely for human consumption, and may change over time with no\n\t\/\/adverse effects. Good examples are \"Tic Tac Toe\", \"Blackjack\".\n\t\/\/Subclasses should override this.\n\tDisplayName() string\n\n\t\/\/Description is a string that describes the game type in a descriptive\n\t\/\/sentence, for use in showing to end users. A reasonable value for\n\t\/\/\"tictactoe\" is \"A classic game where players compete to get three in a\n\t\/\/row\"\n\tDescription() string\n\n\t\/\/ConfigureMoves will be called during creation of a GameManager in\n\t\/\/NewGameManager. This is the time to install moves onto the manager by\n\t\/\/returning a list of moves to install. This is the single most important\n\t\/\/configuration point for your game logic, as the collection of moves for\n\t\/\/the game--and the logic of when they apply--is the bedrock of your game\n\t\/\/logic. Typically you use moves.Combine and friends to organize your list\n\t\/\/of moves to install. If the moves you add are illegal for any reason,\n\t\/\/NewGameManager will fail with an error. By the time this is called.\n\t\/\/delegate.SetManager will already have been called, so you'll have access\n\t\/\/to the manager via Manager().\n\tConfigureMoves() []MoveConfig\n\n\t\/\/ConfigureAgents will be called when creating a new GameManager. Emit the\n\t\/\/agents you want to install.\n\tConfigureAgents() []Agent\n\n\t\/\/ConfigureDecks will be called when the GameManager is being booted up.\n\t\/\/Each entry in the return value will be added to the ComponentChest that\n\t\/\/is being created for this game type. This method is where you create\n\t\/\/individual decks via NewDeck and associate the right underlying\n\t\/\/ComponentValues with each component via AddComponent.\n\tConfigureDecks() map[string]*Deck\n\n\t\/\/ConfigureEnums is called during set up of a new GameManager. Return the\n\t\/\/set of enums you want to be associated with this GameManagaer's Chest.\n\t\/\/`boardgame-util codegen` will often generate this automatically for you.\n\tConfigureEnums() *enum.Set\n\n\t\/\/ConfigureConstants is called during set-up of a new GameManager. Return\n\t\/\/the map of constants you want to create, which will be configured onto\n\t\/\/the newly created chest. If any of the constants cannot be added to the\n\t\/\/ComponentChest, errors, the GameManager will fail to be set up.\n\t\/\/Constants are primarily useful in two cases: first, when you want to\n\t\/\/have access to a constant value client-side, and second, when you want\n\t\/\/to be able to use a constant value in a struct tag provided as an\n\t\/\/instruction for a StructInflater.\n\tConfigureConstants() PropertyCollection\n\n\t\/\/GameStateConstructor and PlayerStateConstructor are called to get an\n\t\/\/instantiation of the concrete game\/player structs that your package\n\t\/\/defines. This is used both to create the initial state, but also to\n\t\/\/inflate states from the database. These methods should always return the\n\t\/\/underlying same type of struct when called. This means that if different\n\t\/\/players have very different roles in a game, there might be many\n\t\/\/properties that are not in use for any given player. The simple\n\t\/\/properties (ints, bools, strings) should all be their zero-value.\n\t\/\/Importantly, all Stacks, Timers, and Enums should be non- nil, because\n\t\/\/an initialized struct contains information about things like MaxSize,\n\t\/\/Size, and a reference to the deck they are affiliated with. GameManger\n\t\/\/will automatically create and use StructInflaters for these types of\n\t\/\/objects, allowing you to use tag-based configuration to automatically\n\t\/\/inflate these properties. See the documentation for StructInflater for\n\t\/\/more.\n\tGameStateConstructor() ConfigurableSubState\n\t\/\/PlayerStateConstructor is similar to GameStateConstructor, but\n\t\/\/playerIndex is the value that this PlayerState must return when its\n\t\/\/PlayerIndex() is called.\n\tPlayerStateConstructor(player PlayerIndex) ConfigurablePlayerState\n\n\t\/\/DynamicComponentValuesConstructor returns an empty\n\t\/\/DynamicComponentValues for the given deck. DynamicComponentValues are\n\t\/\/useful for representing when a given component has mutable properties\n\t\/\/associated with it--for example, if a given card could have a stack of\n\t\/\/tokens on top, the stack of tokens would be a property on a\n\t\/\/DynamicComponentValues associated with that card component. If nil is\n\t\/\/returned, then the components in that deck don't have any dynamic\n\t\/\/component state. This method must always return the same underlying type\n\t\/\/of struct for the same deck. Like GameStateConstructor and\n\t\/\/PlayerStateConstructor, the engine will automatically create\n\t\/\/StructInflaters for these objects, allowing you to use tag-based\n\t\/\/inflation of properties. See StructInflate for more. If the returned\n\t\/\/object also implements the ComponentValues interface, then\n\t\/\/SetContainingComponent will be called on the DynamicComponent whenever\n\t\/\/one is created, with a reference back to the component it's associated\n\t\/\/with.\n\tDynamicComponentValuesConstructor(deck *Deck) ConfigurableSubState\n\n\t\/\/DistributeComponentToStarterStack is called during set up of a given\n\t\/\/Game to establish the Deck\/Stack invariant that every component in the\n\t\/\/chest is placed in precisely one Stack. Game will call this on each\n\t\/\/component in the Chest in order. This is where the logic goes to make\n\t\/\/sure each Component goes into its correct starter stack. You must return\n\t\/\/a non-nil Stack for each call, after which the given Component will be\n\t\/\/inserted into NextSlotIndex of that stack. If that is not the ordering\n\t\/\/you desire, you can fix it up in FinishSetUp by using SwapComponents. If\n\t\/\/any errors are returned, any nil Stacks are returned, or any returned\n\t\/\/stacks don't have space for another component, NewGame will fail and\n\t\/\/return an error. State and Component are only provided for reference; do\n\t\/\/not modify them.\n\tDistributeComponentToStarterStack(state ImmutableState, c Component) (ImmutableStack, error)\n\n\t\/\/BeginSetup is called on a newly created Game before components are\n\t\/\/distributed via DistributeComponentToStarterStack. If you need to modify\n\t\/\/your state before components are distributed, do it here. It is also\n\t\/\/where the variant configuration for your gametype will be passed (it\n\t\/\/will already have been checked for legality and had all configure\n\t\/\/defaults set), although you can also retrieve that at any time via\n\t\/\/game.Variant(). This is a good place to configure state that will be\n\t\/\/necessary for you to make the right decisions in\n\t\/\/DistributeComponentToStarterStack, or to transcribe config information\n\t\/\/you were passed into properties on your gameState as appropriate. If\n\t\/\/error is non-nil, Game setup will be aborted, with the reasoning\n\t\/\/including the error message provided.\n\tBeginSetUp(state State, variant Variant) error\n\n\t\/\/FinishSetUp is called during NewGame, *after* components have been\n\t\/\/distributed to their StarterStack. This is the last chance to modify the\n\t\/\/state before the game's initial state is considered final. For example,\n\t\/\/if you have a card game this is where you'd make sure the starter draw\n\t\/\/stacks are shuffled. If your game has multiple rounds, or if you don't\n\t\/\/want the game to start with it already set-up (e.g. you want to show\n\t\/\/animations of starter cards being dealt) then it's probably best to do\n\t\/\/most of the logic in a SetUp phase. See the README for more. If error is\n\t\/\/non-nil, Game setup will be aborted, with the reasoning including the\n\t\/\/error message provided.\n\tFinishSetUp(state State) error\n\n\t\/\/CheckGameFinished should return true if the game is finished, and who\n\t\/\/the winners are. Called after every move is applied.\n\tCheckGameFinished(state ImmutableState) (finished bool, winners []PlayerIndex)\n\n\t\/\/ProposeFixUpMove is called after a move has been applied. It may return\n\t\/\/a FixUp move, which will be applied before any other moves are applied.\n\t\/\/If it returns nil, we may take the next move off of the queue. FixUp\n\t\/\/moves are useful for things like shuffling a discard deck back into a\n\t\/\/draw deck, or other moves that are necessary to get the GameState back\n\t\/\/into reasonable shape. base.GameDelegate's defintion is almost always\n\t\/\/suficient.\n\tProposeFixUpMove(state ImmutableState) Move\n\n\t\/\/DefaultNumPlayers returns the number of users that new games of this\n\t\/\/type default to. For example, for tictactoe, it will be 2. If 0 is\n\t\/\/provided to manager.NewGame(), we wil use this value instead.\n\tDefaultNumPlayers() int\n\n\t\/\/Min\/MaxNumPlayers should return the min and max number of players,\n\t\/\/respectively. The engine doesn't use this directly, instead looking at\n\t\/\/LegalNumPlayers. Typically your LegalNumPlayers will check the given\n\t\/\/number of players is between these two extremes.\n\tMinNumPlayers() int\n\tMaxNumPlayers() int\n\n\t\/\/LegalNumPlayers will be consulted when a new game is created. It should\n\t\/\/return true if the given number of players is legal, and false\n\t\/\/otherwise. If this returns false, the NewGame will fail with an error.\n\t\/\/Game creation will automatically reject a numPlayers that does not\n\t\/\/result in at least one player existing. Generally this is simply\n\t\/\/checking to make sure the number of players is between Min and Max\n\t\/\/(inclusive), although some games could only allow, for example, even\n\t\/\/numbers of players.\n\tLegalNumPlayers(numPlayers int) bool\n\n\t\/\/Variants returns a VariantConfig, which describes the different\n\t\/\/categories of configuration values and the legal values they may take on\n\t\/\/when a new game is created. In general if you want to inspect legal\n\t\/\/variants in your own game logic you shouldn't call this, but instead\n\t\/\/call gameManager.Variants() which will ensure your VariantConfig is\n\t\/\/initalized and memoize the return result.\n\tVariants() VariantConfig\n\n\t\/\/CurrentPlayerIndex returns the index of the \"current\" player for the\n\t\/\/given game state--a notion that is game specific (and sometimes\n\t\/\/inapplicable). If CurrentPlayer doesn't make sense (perhaps the game\n\t\/\/never has a notion of current player, or the type of round that we're in\n\t\/\/has no current player), this should return ObserverPlayerIndex. The\n\t\/\/result of this method is used to power state.CurrentPlayer.\n\tCurrentPlayerIndex(state ImmutableState) PlayerIndex\n\n\t\/\/CurrentPhase returns the phase that the game state is currently in.\n\t\/\/Phase is a formalized convention used in moves.Default to make it easier\n\t\/\/to write fix-up moves that only apply in certain phases, like SetUp. The\n\t\/\/return result is primarily used in moves.Default to check whether it is\n\t\/\/one of the phases in a give Move's LegalPhases. See moves.Default for\n\t\/\/more information. The only use of this method in the main library is\n\t\/\/when generating a MoveStorageRecord.\n\tCurrentPhase(state ImmutableState) int\n\n\t\/\/PhaseEnum returns the enum for game phases (the return values of\n\t\/\/CurrentPhase are expected to be valid enums within that enum). If this\n\t\/\/returns a non-nil enums.TreeEnum, then the state will not be able to be\n\t\/\/saved if CurrentPhase() returns a value that is not a leaf-node. The\n\t\/\/core package doesn't rely on this method directly.\n\tPhaseEnum() enum.Enum\n\n\t\/\/SanitizationPolicy is consulted when sanitizing states. It is called for\n\t\/\/each prop in the state, including the set of groups that this player is\n\t\/\/a mamber of. In practice the default behavior of base.GameDelegate,\n\t\/\/which uses struct tags to figure out the policy, is sufficient and you\n\t\/\/do not need to override this. For more on how sanitization works, see\n\t\/\/the documenation for Policy. The statePropetyRef passed will always have\n\t\/\/the Index properties set to -1, signifying that the returned policy\n\t\/\/applies to all items in the Stack\/Board.\n\tSanitizationPolicy(prop StatePropertyRef, groupMembership map[int]bool) Policy\n\n\t\/\/If you have computed properties that you want to be included in your\n\t\/\/JSON (for example, for use clientside), export them here by creating a\n\t\/\/dictionary with their values.\n\tComputedGlobalProperties(state ImmutableState) PropertyCollection\n\tComputedPlayerProperties(player ImmutablePlayerState) PropertyCollection\n\n\t\/\/Diagram should return a basic debug rendering of state in multi-line\n\t\/\/ascii art. Useful for debugging. State.Diagram() will reach out to this\n\t\/\/method.\n\tDiagram(s ImmutableState) string\n\n\t\/\/SetManager configures which manager this delegate is in use with. A\n\t\/\/given delegate can only be used by a single manager at a time.\n\tSetManager(manager *GameManager)\n\n\t\/\/Manager returns the Manager that was set on this delegate.\n\tManager() *GameManager\n}\n\n\/\/PropertyCollection is just an alias for map[string]interface{}. It is used\n\/\/as the return value for a number of things, including\n\/\/GameDelegate.ConfigureConstants, and MoveConfig.CustomConfigration.\ntype PropertyCollection map[string]interface{}\n\n\/\/Copy returns a shallow copy of PropertyCollection\nfunc (p PropertyCollection) Copy() PropertyCollection {\n\tresult := make(PropertyCollection, len(p))\n\tfor key, val := range result {\n\t\tresult[key] = val\n\t}\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>package awslambdaproxy\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"strconv\"\n\n\t\"github.com\/hashicorp\/yamux\"\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\tmaxTunnels = 10\n\tforwardPort = \"8082\"\n\ttunnelPort = \"8081\"\n)\n\ntype tunnelConnection struct {\n\tconn net.Conn\n\tsess *yamux.Session\n\tstreams map[uint32]*yamux.Stream\n\ttime time.Time\n}\n\ntype connectionManager struct {\n\tforwardListener net.Listener\n\ttunnelListener net.Listener\n\ttunnelConnections map[string]tunnelConnection\n\ttunnelMutex sync.RWMutex\n\ttunnelExpectedRuntime float64\n\ttunnelRedeployNeeded chan bool\n\tactiveTunnel string\n\tlocalProxy *LocalProxy\n}\n\nfunc (t *connectionManager) runForwarder() {\n\tt.waitUntilTunnelIsAvailable()\n\tfor {\n\t\tc, err := t.forwardListener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to accept user connection\")\n\t\t\treturn\n\t\t}\n\t\tgo t.handleForwardConnection(c)\n\t}\n}\n\nfunc (t *connectionManager) handleForwardConnection(localProxyConn net.Conn) {\n\ttunnelStream, err := t.openNewStreamInActiveTunnel()\n\tif err != nil {\n\t\tlog.Println(\"Failed to open new stream in active tunnel\", err)\n\t\treturn\n\t}\n\n\tbidirectionalCopy(localProxyConn, tunnelStream)\n}\n\nfunc (t *connectionManager) runTunnel() {\n\tallLambdaIPs := map[string]int{}\n\tfor {\n\t\tif len(t.tunnelConnections) > maxTunnels {\n\t\t\tlog.Println(\"Too many active tunnelConnections: \" + string(len(t.tunnelConnections)) + \". MAX=\" +\n\t\t\t\tstring(maxTunnels) + \". Waiting for cleanup.\")\n\t\t\ttime.Sleep(time.Second * 5)\n\t\t\tcontinue\n\t\t}\n\t\tc, err := t.tunnelListener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to accept tunnel connection\")\n\t\t\ttime.Sleep(time.Second * 5)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Println(\"Accepted tunnel connection from\", c.RemoteAddr())\n\n\t\ttunnelSession, err := yamux.Client(c, nil)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to start session inside tunnel\")\n\t\t\ttime.Sleep(time.Second * 5)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Println(\"Established session to\", tunnelSession.RemoteAddr())\n\n\t\tt.tunnelMutex.Lock()\n\t\tt.activeTunnel = c.RemoteAddr().String()\n\t\tt.tunnelConnections[t.activeTunnel] = tunnelConnection{\n\t\t\tconn: c,\n\t\t\tsess: tunnelSession,\n\t\t\tstreams: make(map[uint32]*yamux.Stream),\n\t\t\ttime: time.Now(),\n\t\t}\n\t\tt.tunnelMutex.Unlock()\n\n\t\tgo t.monitorTunnelSessionHealth(t.activeTunnel)\n\n\t\texternalIP, err := t.getLambdaExternalIP()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to check ip address:\", err)\n\t\t} else {\n\t\t\tallLambdaIPs[externalIP] += 1\n\t\t}\n\n\t\tlog.Println(\"---------------\")\n\t\tlog.Println(\"Current Lambda IP Address: \", externalIP)\n\t\tlog.Println(\"Active Lambda tunnel count: \", len(t.tunnelConnections))\n\t\tcount := 1\n\t\tfor k, v := range t.tunnelConnections {\n\t\t\tlog.Printf(\"Lambda Tunnel #%v\\n\", count)\n\t\t\tlog.Println(\"\tConnection ID: \" + k)\n\t\t\tlog.Println(\"\tStart Time: \" + v.time.Format(\"2006-01-02T15:04:05\"))\n\t\t\tlog.Println(\"\tActive Streams: \" + strconv.Itoa(v.sess.NumStreams()))\n\t\t\tcount++\n\t\t}\n\t\tips := make([]string, 0, len(allLambdaIPs))\n\t\tfor k := range allLambdaIPs {\n\t\t\tips = append(ips, k)\n\t\t}\n\t\tlog.Printf(\"%v Unique Lambda IPs used so far: %v\\n\", len(allLambdaIPs), strings.Join(ips, \", \"))\n\t\tlog.Println(\"---------------\")\n\t}\n}\n\nfunc (t *connectionManager) getLambdaExternalIP() (string, error) {\n\tproxyURL, err := url.Parse(\"http:\/\/localhost:\" + forwardPort)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\turl, err := url.Parse(getIPUrl)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttransport := &http.Transport{\n\t\tProxy: http.ProxyURL(proxyURL),\n\t}\n\tclient := &http.Client{\n\t\tTransport: transport,\n\t}\n\trequest, err := http.NewRequest(\"GET\", url.String(), nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tresponse, err := client.Do(request)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer response.Body.Close()\n\tdata, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.TrimSpace(string(data)), nil\n}\n\nfunc (t *connectionManager) removeTunnelConnection(connectionID string) {\n\tt.tunnelConnections[connectionID].sess.Close()\n\tt.tunnelConnections[connectionID].conn.Close()\n\tt.tunnelMutex.Lock()\n\tdelete(t.tunnelConnections, connectionID)\n\tt.tunnelMutex.Unlock()\n}\n\nfunc (t *connectionManager) monitorTunnelSessionHealth(connectionID string) {\n\tfor {\n\t\t_, err := t.tunnelConnections[connectionID].sess.Ping()\n\t\tif err != nil {\n\t\t\tif time.Since(t.tunnelConnections[connectionID].time).Seconds() < t.tunnelExpectedRuntime {\n\t\t\t\tlog.Println(\"Signaling for emergency tunnel due to tunnel ending early: \", time.Since(t.tunnelConnections[connectionID].time).Seconds())\n\t\t\t\tt.tunnelRedeployNeeded <- true\n\t\t\t}\n\t\t\tt.removeTunnelConnection(connectionID)\n\t\t\tbreak\n\t\t}\n\t\tif time.Since(t.tunnelConnections[connectionID].time).Seconds() > t.tunnelExpectedRuntime {\n\t\t\tnumStreams := t.tunnelConnections[connectionID].sess.NumStreams()\n\t\t\tif numStreams > 0 {\n\t\t\t\tlog.Println(\"Tunnel \" + connectionID + \" that is being closed still has open streams: \" + strconv.Itoa(numStreams) + \". Delaying cleanup.\")\n\t\t\t\ttime.Sleep(20 * time.Second)\n\t\t\t\tlog.Println(\"Delayed cleanup now running for \", connectionID)\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Tunnel \" + connectionID + \" is safe to close\")\n\t\t\t}\n\t\t\tlog.Println(\"Removing tunnel\", connectionID)\n\t\t\tt.removeTunnelConnection(connectionID)\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Millisecond * 50)\n\t}\n}\n\nfunc (t *connectionManager) openNewStreamInActiveTunnel() (*yamux.Stream, error) {\n\tfor {\n\t\tt.tunnelMutex.RLock()\n\t\ttunnel, ok := t.tunnelConnections[t.activeTunnel]\n\t\tt.tunnelMutex.RUnlock()\n\t\tif ok {\n\t\t\tstream, err := tunnel.sess.OpenStream()\n\t\t\ttunnel.streams[stream.StreamID()] = stream\n\t\t\treturn stream, err\n\t\t}\n\t\tlog.Println(\"No active tunnel session available. Retrying..\")\n\t\ttime.Sleep(time.Second)\n\t}\n}\n\nfunc (t *connectionManager) waitUntilTunnelIsAvailable() error {\n\ttimeout := time.After(time.Second * time.Duration(t.tunnelExpectedRuntime))\n\ttick := time.Tick(time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-timeout:\n\t\t\treturn errors.New(\"Timed out waiting for tunnel to be established. Likely the \" +\n\t\t\t\t\"Lambda function is having issues communicating with this host.\")\n\t\tcase <-tick:\n\t\t\tif t.isReady() == true {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tlog.Println(\"Waiting for tunnel to be established..\")\n\t\t}\n\t}\n}\n\nfunc (t *connectionManager) isReady() bool {\n\tif t.activeTunnel == \"\" {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc newTunnelConnectionManager(frequency time.Duration, localProxy *LocalProxy) (*connectionManager, error) {\n\tforwardListener, err := startForwardListener()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to start UserListener\")\n\t}\n\n\ttunnelListener, err := startTunnelListener()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to start TunnelListener\")\n\t}\n\n\tconnectionManager := &connectionManager{\n\t\tforwardListener: forwardListener,\n\t\ttunnelListener: tunnelListener,\n\t\ttunnelConnections: make(map[string]tunnelConnection),\n\t\ttunnelRedeployNeeded: make(chan bool),\n\t\ttunnelExpectedRuntime: frequency.Seconds(),\n\t\tlocalProxy: localProxy,\n\t}\n\n\tgo connectionManager.runTunnel()\n\tgo connectionManager.runForwarder()\n\n\treturn connectionManager, nil\n}\n\nfunc startTunnelListener() (net.Listener, error) {\n\ttunnelAddress := \"localhost:\" + tunnelPort\n\ttunnelListener, err := net.Listen(\"tcp\", tunnelAddress)\n\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to start TCP tunnel listener on port \"+tunnelPort)\n\t}\n\tlog.Println(\"Started tunnel listener on port \" + tunnelPort)\n\treturn tunnelListener, nil\n}\n\nfunc startForwardListener() (net.Listener, error) {\n\tforwardAddress := \"localhost:\" + forwardPort\n\tforwardListener, err := net.Listen(\"tcp\", forwardAddress)\n\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to start TCP user listener on port \"+forwardPort)\n\t}\n\tlog.Println(\"Started user listener on port \" + forwardPort)\n\treturn forwardListener, nil\n}\n<commit_msg>reduce logging output to not include history of all lambda ip addreesses and only the count<commit_after>package awslambdaproxy\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"strconv\"\n\n\t\"github.com\/hashicorp\/yamux\"\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\tmaxTunnels = 10\n\tforwardPort = \"8082\"\n\ttunnelPort = \"8081\"\n)\n\ntype tunnelConnection struct {\n\tconn net.Conn\n\tsess *yamux.Session\n\tstreams map[uint32]*yamux.Stream\n\ttime time.Time\n}\n\ntype connectionManager struct {\n\tforwardListener net.Listener\n\ttunnelListener net.Listener\n\ttunnelConnections map[string]tunnelConnection\n\ttunnelMutex sync.RWMutex\n\ttunnelExpectedRuntime float64\n\ttunnelRedeployNeeded chan bool\n\tactiveTunnel string\n\tlocalProxy *LocalProxy\n}\n\nfunc (t *connectionManager) runForwarder() {\n\tt.waitUntilTunnelIsAvailable()\n\tfor {\n\t\tc, err := t.forwardListener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to accept user connection\")\n\t\t\treturn\n\t\t}\n\t\tgo t.handleForwardConnection(c)\n\t}\n}\n\nfunc (t *connectionManager) handleForwardConnection(localProxyConn net.Conn) {\n\ttunnelStream, err := t.openNewStreamInActiveTunnel()\n\tif err != nil {\n\t\tlog.Println(\"Failed to open new stream in active tunnel\", err)\n\t\treturn\n\t}\n\n\tbidirectionalCopy(localProxyConn, tunnelStream)\n}\n\nfunc (t *connectionManager) runTunnel() {\n\tallLambdaIPs := map[string]int{}\n\tfor {\n\t\tif len(t.tunnelConnections) > maxTunnels {\n\t\t\tlog.Println(\"Too many active tunnelConnections: \" + string(len(t.tunnelConnections)) + \". MAX=\" +\n\t\t\t\tstring(maxTunnels) + \". Waiting for cleanup.\")\n\t\t\ttime.Sleep(time.Second * 5)\n\t\t\tcontinue\n\t\t}\n\t\tc, err := t.tunnelListener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to accept tunnel connection\")\n\t\t\ttime.Sleep(time.Second * 5)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Println(\"Accepted tunnel connection from\", c.RemoteAddr())\n\n\t\ttunnelSession, err := yamux.Client(c, nil)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to start session inside tunnel\")\n\t\t\ttime.Sleep(time.Second * 5)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Println(\"Established session to\", tunnelSession.RemoteAddr())\n\n\t\tt.tunnelMutex.Lock()\n\t\tt.activeTunnel = c.RemoteAddr().String()\n\t\tt.tunnelConnections[t.activeTunnel] = tunnelConnection{\n\t\t\tconn: c,\n\t\t\tsess: tunnelSession,\n\t\t\tstreams: make(map[uint32]*yamux.Stream),\n\t\t\ttime: time.Now(),\n\t\t}\n\t\tt.tunnelMutex.Unlock()\n\n\t\tgo t.monitorTunnelSessionHealth(t.activeTunnel)\n\n\t\texternalIP, err := t.getLambdaExternalIP()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to check ip address:\", err)\n\t\t} else {\n\t\t\tallLambdaIPs[externalIP] += 1\n\t\t}\n\n\t\tlog.Println(\"---------------\")\n\t\tlog.Println(\"Current Lambda IP Address: \", externalIP)\n\t\tlog.Println(\"Active Lambda tunnel count: \", len(t.tunnelConnections))\n\t\tcount := 1\n\t\tfor k, v := range t.tunnelConnections {\n\t\t\tlog.Printf(\"Lambda Tunnel #%v\\n\", count)\n\t\t\tlog.Println(\"\tConnection ID: \" + k)\n\t\t\tlog.Println(\"\tStart Time: \" + v.time.Format(\"2006-01-02T15:04:05\"))\n\t\t\tlog.Println(\"\tActive Streams: \" + strconv.Itoa(v.sess.NumStreams()))\n\t\t\tcount++\n\t\t}\n\t\tips := make([]string, 0, len(allLambdaIPs))\n\t\tfor k := range allLambdaIPs {\n\t\t\tips = append(ips, k)\n\t\t}\n\t\tlog.Printf(\"%v Unique Lambda IPs used so far\\n\", len(allLambdaIPs))\n\t\tlog.Println(\"---------------\")\n\t}\n}\n\nfunc (t *connectionManager) getLambdaExternalIP() (string, error) {\n\tproxyURL, err := url.Parse(\"http:\/\/localhost:\" + forwardPort)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\turl, err := url.Parse(getIPUrl)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttransport := &http.Transport{\n\t\tProxy: http.ProxyURL(proxyURL),\n\t}\n\tclient := &http.Client{\n\t\tTransport: transport,\n\t}\n\trequest, err := http.NewRequest(\"GET\", url.String(), nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tresponse, err := client.Do(request)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer response.Body.Close()\n\tdata, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.TrimSpace(string(data)), nil\n}\n\nfunc (t *connectionManager) removeTunnelConnection(connectionID string) {\n\tt.tunnelConnections[connectionID].sess.Close()\n\tt.tunnelConnections[connectionID].conn.Close()\n\tt.tunnelMutex.Lock()\n\tdelete(t.tunnelConnections, connectionID)\n\tt.tunnelMutex.Unlock()\n}\n\nfunc (t *connectionManager) monitorTunnelSessionHealth(connectionID string) {\n\tfor {\n\t\t_, err := t.tunnelConnections[connectionID].sess.Ping()\n\t\tif err != nil {\n\t\t\tif time.Since(t.tunnelConnections[connectionID].time).Seconds() < t.tunnelExpectedRuntime {\n\t\t\t\tlog.Println(\"Signaling for emergency tunnel due to tunnel ending early: \", time.Since(t.tunnelConnections[connectionID].time).Seconds())\n\t\t\t\tt.tunnelRedeployNeeded <- true\n\t\t\t}\n\t\t\tt.removeTunnelConnection(connectionID)\n\t\t\tbreak\n\t\t}\n\t\tif time.Since(t.tunnelConnections[connectionID].time).Seconds() > t.tunnelExpectedRuntime {\n\t\t\tnumStreams := t.tunnelConnections[connectionID].sess.NumStreams()\n\t\t\tif numStreams > 0 {\n\t\t\t\tlog.Println(\"Tunnel \" + connectionID + \" that is being closed still has open streams: \" + strconv.Itoa(numStreams) + \". Delaying cleanup.\")\n\t\t\t\ttime.Sleep(20 * time.Second)\n\t\t\t\tlog.Println(\"Delayed cleanup now running for \", connectionID)\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Tunnel \" + connectionID + \" is safe to close\")\n\t\t\t}\n\t\t\tlog.Println(\"Removing tunnel\", connectionID)\n\t\t\tt.removeTunnelConnection(connectionID)\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Millisecond * 50)\n\t}\n}\n\nfunc (t *connectionManager) openNewStreamInActiveTunnel() (*yamux.Stream, error) {\n\tfor {\n\t\tt.tunnelMutex.RLock()\n\t\ttunnel, ok := t.tunnelConnections[t.activeTunnel]\n\t\tt.tunnelMutex.RUnlock()\n\t\tif ok {\n\t\t\tstream, err := tunnel.sess.OpenStream()\n\t\t\ttunnel.streams[stream.StreamID()] = stream\n\t\t\treturn stream, err\n\t\t}\n\t\tlog.Println(\"No active tunnel session available. Retrying..\")\n\t\ttime.Sleep(time.Second)\n\t}\n}\n\nfunc (t *connectionManager) waitUntilTunnelIsAvailable() error {\n\ttimeout := time.After(time.Second * time.Duration(t.tunnelExpectedRuntime))\n\ttick := time.Tick(time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-timeout:\n\t\t\treturn errors.New(\"Timed out waiting for tunnel to be established. Likely the \" +\n\t\t\t\t\"Lambda function is having issues communicating with this host.\")\n\t\tcase <-tick:\n\t\t\tif t.isReady() == true {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tlog.Println(\"Waiting for tunnel to be established..\")\n\t\t}\n\t}\n}\n\nfunc (t *connectionManager) isReady() bool {\n\tif t.activeTunnel == \"\" {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc newTunnelConnectionManager(frequency time.Duration, localProxy *LocalProxy) (*connectionManager, error) {\n\tforwardListener, err := startForwardListener()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to start UserListener\")\n\t}\n\n\ttunnelListener, err := startTunnelListener()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to start TunnelListener\")\n\t}\n\n\tconnectionManager := &connectionManager{\n\t\tforwardListener: forwardListener,\n\t\ttunnelListener: tunnelListener,\n\t\ttunnelConnections: make(map[string]tunnelConnection),\n\t\ttunnelRedeployNeeded: make(chan bool),\n\t\ttunnelExpectedRuntime: frequency.Seconds(),\n\t\tlocalProxy: localProxy,\n\t}\n\n\tgo connectionManager.runTunnel()\n\tgo connectionManager.runForwarder()\n\n\treturn connectionManager, nil\n}\n\nfunc startTunnelListener() (net.Listener, error) {\n\ttunnelAddress := \"localhost:\" + tunnelPort\n\ttunnelListener, err := net.Listen(\"tcp\", tunnelAddress)\n\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to start TCP tunnel listener on port \"+tunnelPort)\n\t}\n\tlog.Println(\"Started tunnel listener on port \" + tunnelPort)\n\treturn tunnelListener, nil\n}\n\nfunc startForwardListener() (net.Listener, error) {\n\tforwardAddress := \"localhost:\" + forwardPort\n\tforwardListener, err := net.Listen(\"tcp\", forwardAddress)\n\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to start TCP user listener on port \"+forwardPort)\n\t}\n\tlog.Println(\"Started user listener on port \" + forwardPort)\n\treturn forwardListener, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package dupfinder\n\nimport (\n\t\"testing\"\n\t\"strings\"\n\t\"io\/ioutil\"\n\t\"os\"\n)\n\n\/\/ TODO should raise error if error happens while reading first file\n\/\/ TODO should raise error if error happens while reading second file\n\nfunc TestCompareReaders_same_file(t*testing.T) {\n\tcontent := \"dummy content\"\n\treader1 := strings.NewReader(content)\n\treader2 := strings.NewReader(content)\n\n\texpected := 0\n\n\tcmp, err := CompareReaders(reader1, reader2)\n\tif err != nil {\n\t\tt.Errorf(\"Compare(f, f) raised error: %v\", err)\n\t}\n\tif cmp != expected {\n\t\tt.Errorf(\"Compare(f, f) == %v, want %v\", cmp, expected)\n\t}\n}\n\nfunc TestCompareReaders_size_ascending(t*testing.T) {\n\tsmaller := strings.NewReader(\"dummy content\")\n\tbigger := strings.NewReader(\"longer dummy content\")\n\n\texpected := -1\n\n\tcmp, err := CompareReaders(smaller, bigger)\n\tif err != nil {\n\t\tt.Errorf(\"Compare(smaller, bigger) raised error: %v\", err)\n\t}\n\tif cmp != expected {\n\t\tt.Errorf(\"Compare(smaller, bigger) == %v, want %v\", cmp, expected)\n\t}\n}\n\nfunc TestCompareReaders_size_descending(t*testing.T) {\n\tsmaller := strings.NewReader(\"dummy content\")\n\tbigger := strings.NewReader(\"longer dummy content\")\n\n\texpected := 1\n\n\tcmp, err := CompareReaders(bigger, smaller)\n\tif err != nil {\n\t\tt.Errorf(\"Compare(bigger, smaller) raised error: %v\", err)\n\t}\n\tif cmp != expected {\n\t\tt.Errorf(\"Compare(bigger, smaller) == %v, want %v\", cmp, expected)\n\t}\n}\n\nfunc TestCompareReaders_same_size_content_ascending(t*testing.T) {\n\tlower := strings.NewReader(\"dummy content a\")\n\thigher := strings.NewReader(\"dummy content b\")\n\n\texpected := -1\n\n\tcmp, err := CompareReaders(lower, higher)\n\tif err != nil {\n\t\tt.Errorf(\"Compare(lower, higher) raised error: %v\", err)\n\t}\n\tif cmp != expected {\n\t\tt.Errorf(\"Compare(lower, higher) == %v, want %v\", cmp, expected)\n\t}\n}\n\nfunc TestCompareReaders_same_size_content_descending(t*testing.T) {\n\tlower := strings.NewReader(\"dummy content a\")\n\thigher := strings.NewReader(\"dummy content b\")\n\n\texpected := 1\n\n\tcmp, err := CompareReaders(higher, lower)\n\tif err != nil {\n\t\tt.Errorf(\"Compare(higher, lower) raised error: %v\", err)\n\t}\n\tif cmp != expected {\n\t\tt.Errorf(\"Compare(higher, lower) == %v, want %v\", cmp, expected)\n\t}\n}\n\nfunc TestCompareFiles_equal_if_both_same_empty_dummy(t*testing.T) {\n\tfile, err := ioutil.TempFile(os.TempDir(), \"dummy\")\n\tdefer os.Remove(file.Name())\n\n\texpected := 0\n\n\tcmp, err := CompareFiles(file.Name(), file.Name())\n\tif err != nil {\n\t\tt.Errorf(\"Compare(dummy, dummy) raised error: %v\", err)\n\t}\n\tif cmp != expected {\n\t\tt.Errorf(\"Compare(dummy, dummy) == %v, want %v\", cmp, expected)\n\t}\n}\n\nfunc TestCompareFiles_equal_if_both_empty_dummy(t*testing.T) {\n\tdummy1, err := ioutil.TempFile(os.TempDir(), \"dummy1\")\n\tdefer os.Remove(dummy1.Name())\n\n\tdummy2, err := ioutil.TempFile(os.TempDir(), \"dummy2\")\n\tdefer os.Remove(dummy2.Name())\n\n\texpected := 0\n\n\tcmp, err := CompareFiles(dummy1.Name(), dummy2.Name())\n\tif err != nil {\n\t\tt.Errorf(\"Compare(dummy1, dummy2) raised error: %v\", err)\n\t}\n\tif cmp != expected {\n\t\tt.Errorf(\"Compare(dummy1, dummy2) == %v, want %v\", cmp, expected)\n\t}\n}\n\nfunc TestCompareFiles_empty_comes_before_nonempty(t*testing.T) {\n\tempty, err := ioutil.TempFile(os.TempDir(), \"empty\")\n\tdefer os.Remove(empty.Name())\n\n\tnonempty, err := ioutil.TempFile(os.TempDir(), \"nonempty\")\n\tdefer os.Remove(nonempty.Name())\n\n\tnonempty.WriteString(\"something\")\n\n\texpected := -1\n\n\tcmp, err := CompareFiles(empty.Name(), nonempty.Name())\n\tif err != nil {\n\t\tt.Errorf(\"Compare(empty, nonempty) raised error: %v\", err)\n\t}\n\tif cmp != expected {\n\t\tt.Errorf(\"Compare(empty, nonempty) == %v, want %v\", cmp, expected)\n\t}\n}\n\nfunc TestCompareFiles_fails_if_both_nonexistent(t*testing.T) {\n\t_, err := CompareFiles(\"\/nonexistent1\", \"\/nonexistent2\")\n\tif err == nil {\n\t\tt.Error(\"Compare(nonexistent1, nonexistent2) should have raised error\")\n\t}\n}\n\nfunc TestCompareFiles_fails_if_first_nonexistent(t*testing.T) {\n\tfile, err := ioutil.TempFile(os.TempDir(), \"dummy\")\n\tdefer os.Remove(file.Name())\n\n\t_, err = CompareFiles(\"\/nonexistent\", file.Name())\n\tif err == nil {\n\t\tt.Error(\"Compare(nonexistent, dummy) should have raised error\")\n\t}\n}\n\nfunc TestCompareFiles_fails_if_second_nonexistent(t*testing.T) {\n\tfile, err := ioutil.TempFile(os.TempDir(), \"dummy\")\n\tdefer os.Remove(file.Name())\n\n\t_, err = CompareFiles(file.Name(), \"\/nonexistent\")\n\tif err == nil {\n\t\tt.Error(\"Compare(dummy, nonexistent) should have raised error\")\n\t}\n}\n<commit_msg>Drop todo to raise errors on file I\/O, not really important<commit_after>package dupfinder\n\nimport (\n\t\"testing\"\n\t\"strings\"\n\t\"io\/ioutil\"\n\t\"os\"\n)\n\nfunc TestCompareReaders_same_file(t*testing.T) {\n\tcontent := \"dummy content\"\n\treader1 := strings.NewReader(content)\n\treader2 := strings.NewReader(content)\n\n\texpected := 0\n\n\tcmp, err := CompareReaders(reader1, reader2)\n\tif err != nil {\n\t\tt.Errorf(\"Compare(f, f) raised error: %v\", err)\n\t}\n\tif cmp != expected {\n\t\tt.Errorf(\"Compare(f, f) == %v, want %v\", cmp, expected)\n\t}\n}\n\nfunc TestCompareReaders_size_ascending(t*testing.T) {\n\tsmaller := strings.NewReader(\"dummy content\")\n\tbigger := strings.NewReader(\"longer dummy content\")\n\n\texpected := -1\n\n\tcmp, err := CompareReaders(smaller, bigger)\n\tif err != nil {\n\t\tt.Errorf(\"Compare(smaller, bigger) raised error: %v\", err)\n\t}\n\tif cmp != expected {\n\t\tt.Errorf(\"Compare(smaller, bigger) == %v, want %v\", cmp, expected)\n\t}\n}\n\nfunc TestCompareReaders_size_descending(t*testing.T) {\n\tsmaller := strings.NewReader(\"dummy content\")\n\tbigger := strings.NewReader(\"longer dummy content\")\n\n\texpected := 1\n\n\tcmp, err := CompareReaders(bigger, smaller)\n\tif err != nil {\n\t\tt.Errorf(\"Compare(bigger, smaller) raised error: %v\", err)\n\t}\n\tif cmp != expected {\n\t\tt.Errorf(\"Compare(bigger, smaller) == %v, want %v\", cmp, expected)\n\t}\n}\n\nfunc TestCompareReaders_same_size_content_ascending(t*testing.T) {\n\tlower := strings.NewReader(\"dummy content a\")\n\thigher := strings.NewReader(\"dummy content b\")\n\n\texpected := -1\n\n\tcmp, err := CompareReaders(lower, higher)\n\tif err != nil {\n\t\tt.Errorf(\"Compare(lower, higher) raised error: %v\", err)\n\t}\n\tif cmp != expected {\n\t\tt.Errorf(\"Compare(lower, higher) == %v, want %v\", cmp, expected)\n\t}\n}\n\nfunc TestCompareReaders_same_size_content_descending(t*testing.T) {\n\tlower := strings.NewReader(\"dummy content a\")\n\thigher := strings.NewReader(\"dummy content b\")\n\n\texpected := 1\n\n\tcmp, err := CompareReaders(higher, lower)\n\tif err != nil {\n\t\tt.Errorf(\"Compare(higher, lower) raised error: %v\", err)\n\t}\n\tif cmp != expected {\n\t\tt.Errorf(\"Compare(higher, lower) == %v, want %v\", cmp, expected)\n\t}\n}\n\nfunc TestCompareFiles_equal_if_both_same_empty_dummy(t*testing.T) {\n\tfile, err := ioutil.TempFile(os.TempDir(), \"dummy\")\n\tdefer os.Remove(file.Name())\n\n\texpected := 0\n\n\tcmp, err := CompareFiles(file.Name(), file.Name())\n\tif err != nil {\n\t\tt.Errorf(\"Compare(dummy, dummy) raised error: %v\", err)\n\t}\n\tif cmp != expected {\n\t\tt.Errorf(\"Compare(dummy, dummy) == %v, want %v\", cmp, expected)\n\t}\n}\n\nfunc TestCompareFiles_equal_if_both_empty_dummy(t*testing.T) {\n\tdummy1, err := ioutil.TempFile(os.TempDir(), \"dummy1\")\n\tdefer os.Remove(dummy1.Name())\n\n\tdummy2, err := ioutil.TempFile(os.TempDir(), \"dummy2\")\n\tdefer os.Remove(dummy2.Name())\n\n\texpected := 0\n\n\tcmp, err := CompareFiles(dummy1.Name(), dummy2.Name())\n\tif err != nil {\n\t\tt.Errorf(\"Compare(dummy1, dummy2) raised error: %v\", err)\n\t}\n\tif cmp != expected {\n\t\tt.Errorf(\"Compare(dummy1, dummy2) == %v, want %v\", cmp, expected)\n\t}\n}\n\nfunc TestCompareFiles_empty_comes_before_nonempty(t*testing.T) {\n\tempty, err := ioutil.TempFile(os.TempDir(), \"empty\")\n\tdefer os.Remove(empty.Name())\n\n\tnonempty, err := ioutil.TempFile(os.TempDir(), \"nonempty\")\n\tdefer os.Remove(nonempty.Name())\n\n\tnonempty.WriteString(\"something\")\n\n\texpected := -1\n\n\tcmp, err := CompareFiles(empty.Name(), nonempty.Name())\n\tif err != nil {\n\t\tt.Errorf(\"Compare(empty, nonempty) raised error: %v\", err)\n\t}\n\tif cmp != expected {\n\t\tt.Errorf(\"Compare(empty, nonempty) == %v, want %v\", cmp, expected)\n\t}\n}\n\nfunc TestCompareFiles_fails_if_both_nonexistent(t*testing.T) {\n\t_, err := CompareFiles(\"\/nonexistent1\", \"\/nonexistent2\")\n\tif err == nil {\n\t\tt.Error(\"Compare(nonexistent1, nonexistent2) should have raised error\")\n\t}\n}\n\nfunc TestCompareFiles_fails_if_first_nonexistent(t*testing.T) {\n\tfile, err := ioutil.TempFile(os.TempDir(), \"dummy\")\n\tdefer os.Remove(file.Name())\n\n\t_, err = CompareFiles(\"\/nonexistent\", file.Name())\n\tif err == nil {\n\t\tt.Error(\"Compare(nonexistent, dummy) should have raised error\")\n\t}\n}\n\nfunc TestCompareFiles_fails_if_second_nonexistent(t*testing.T) {\n\tfile, err := ioutil.TempFile(os.TempDir(), \"dummy\")\n\tdefer os.Remove(file.Name())\n\n\t_, err = CompareFiles(file.Name(), \"\/nonexistent\")\n\tif err == nil {\n\t\tt.Error(\"Compare(dummy, nonexistent) should have raised error\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package natsrunner\n\nimport (\n \"fmt\"\n . \"github.com\/onsi\/gomega\"\n \"os\"\n\n \"github.com\/cloudfoundry\/yagnats\"\n\n \"os\/exec\"\n \"strconv\"\n)\n\nvar natsCommand *exec.Cmd\n\ntype NATSRunner struct {\n port int\n natsCommand *exec.Cmd\n MessageBus yagnats.NATSClient\n}\n\nfunc NewNATSRunner(port int) *NATSRunner {\n return &NATSRunner{\n port: port,\n }\n}\n\nfunc (runner *NATSRunner) Start() {\n _, err := exec.LookPath(\"gnatsd\")\n if err != nil {\n fmt.Println(\"You need gnatsd installed!\")\n os.Exit(1)\n }\n\n runner.natsCommand = exec.Command(\"gnatsd\", \"-p\", strconv.Itoa(runner.port))\n err = runner.natsCommand.Start()\n Ω(err).ShouldNot(HaveOccurred(), \"Make sure to have gnatsd on your path\")\n\n connectionInfo := &yagnats.ConnectionInfo{\n Addr: fmt.Sprintf(\"127.0.0.1:%d\", runner.port),\n }\n\n messageBus := yagnats.NewClient()\n\n Eventually(func() error {\n return messageBus.Connect(connectionInfo)\n }, 5, 0.1).ShouldNot(HaveOccurred())\n\n runner.MessageBus = messageBus\n}\n\nfunc (runner *NATSRunner) Stop() {\n if runner.natsCommand != nil {\n runner.natsCommand.Process.Kill()\n runner.MessageBus = nil\n runner.natsCommand = nil\n }\n}\n<commit_msg>use cmdtest for nats runner to ensure .Wait is called<commit_after>package natsrunner\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\n\t\"github.com\/cloudfoundry\/yagnats\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/vito\/cmdtest\"\n)\n\nvar natsCommand *exec.Cmd\n\ntype NATSRunner struct {\n\tport int\n\tnatsSession *cmdtest.Session\n\tMessageBus yagnats.NATSClient\n}\n\nfunc NewNATSRunner(port int) *NATSRunner {\n\treturn &NATSRunner{\n\t\tport: port,\n\t}\n}\n\nfunc (runner *NATSRunner) Start() {\n\t_, err := exec.LookPath(\"gnatsd\")\n\tif err != nil {\n\t\tfmt.Println(\"You need gnatsd installed!\")\n\t\tos.Exit(1)\n\t}\n\n\tsess, err := cmdtest.Start(exec.Command(\"gnatsd\", \"-p\", strconv.Itoa(runner.port)))\n\tΩ(err).ShouldNot(HaveOccurred(), \"Make sure to have gnatsd on your path\")\n\n\trunner.natsSession = sess\n\n\tconnectionInfo := &yagnats.ConnectionInfo{\n\t\tAddr: fmt.Sprintf(\"127.0.0.1:%d\", runner.port),\n\t}\n\n\tmessageBus := yagnats.NewClient()\n\n\tEventually(func() error {\n\t\treturn messageBus.Connect(connectionInfo)\n\t}, 5, 0.1).ShouldNot(HaveOccurred())\n\n\trunner.MessageBus = messageBus\n}\n\nfunc (runner *NATSRunner) Stop() {\n\tif runner.natsSession != nil {\n\t\trunner.natsSession.Cmd.Process.Kill()\n\t\trunner.MessageBus = nil\n\t\trunner.natsSession = nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package discordgo\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n)\n\n\/\/ ComponentType is type of component.\ntype ComponentType uint\n\n\/\/ MessageComponent types.\nconst (\n\tActionsRowComponent ComponentType = 1\n\tButtonComponent ComponentType = 2\n\tSelectMenuComponent ComponentType = 3\n\tInputTextComponent ComponentType = 4\n)\n\n\/\/ MessageComponent is a base interface for all message components.\ntype MessageComponent interface {\n\tjson.Marshaler\n\tType() ComponentType\n}\n\ntype unmarshalableMessageComponent struct {\n\tMessageComponent\n}\n\n\/\/ UnmarshalJSON is a helper function to unmarshal MessageComponent object.\nfunc (umc *unmarshalableMessageComponent) UnmarshalJSON(src []byte) error {\n\tvar v struct {\n\t\tType ComponentType `json:\"type\"`\n\t}\n\terr := json.Unmarshal(src, &v)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch v.Type {\n\tcase ActionsRowComponent:\n\t\tumc.MessageComponent = &ActionsRow{}\n\tcase ButtonComponent:\n\t\tumc.MessageComponent = &Button{}\n\tcase SelectMenuComponent:\n\t\tumc.MessageComponent = &SelectMenu{}\n\tcase InputTextComponent:\n\t\tumc.MessageComponent = &InputText{}\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown component type: %d\", v.Type)\n\t}\n\treturn json.Unmarshal(src, umc.MessageComponent)\n}\n\nfunc MessageComponentFromJSON(b []byte) (MessageComponent, error) {\n\tvar u unmarshalableMessageComponent\n\terr := u.UnmarshalJSON(b)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal into MessageComponent: %w\", err)\n\t}\n\treturn u.MessageComponent, nil\n}\n\n\/\/ ActionsRow is a container for components within one row.\ntype ActionsRow struct {\n\tComponents []MessageComponent `json:\"components\"`\n}\n\n\/\/ MarshalJSON is a method for marshaling ActionsRow to a JSON object.\nfunc (r ActionsRow) MarshalJSON() ([]byte, error) {\n\ttype actionsRow ActionsRow\n\n\treturn json.Marshal(struct {\n\t\tactionsRow\n\t\tType ComponentType `json:\"type\"`\n\t}{\n\t\tactionsRow: actionsRow(r),\n\t\tType: r.Type(),\n\t})\n}\n\n\/\/ UnmarshalJSON is a helper function to unmarshal Actions Row.\nfunc (r *ActionsRow) UnmarshalJSON(data []byte) error {\n\tvar v struct {\n\t\tRawComponents []unmarshalableMessageComponent `json:\"components\"`\n\t}\n\terr := json.Unmarshal(data, &v)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Components = make([]MessageComponent, len(v.RawComponents))\n\tfor i, v := range v.RawComponents {\n\t\tr.Components[i] = v.MessageComponent\n\t}\n\n\treturn err\n}\n\n\/\/ Type is a method to get the type of a component.\nfunc (r ActionsRow) Type() ComponentType {\n\treturn ActionsRowComponent\n}\n\n\/\/ ButtonStyle is style of button.\ntype ButtonStyle uint\n\n\/\/ Button styles.\nconst (\n\t\/\/ PrimaryButton is a button with blurple color.\n\tPrimaryButton ButtonStyle = 1\n\t\/\/ SecondaryButton is a button with grey color.\n\tSecondaryButton ButtonStyle = 2\n\t\/\/ SuccessButton is a button with green color.\n\tSuccessButton ButtonStyle = 3\n\t\/\/ DangerButton is a button with red color.\n\tDangerButton ButtonStyle = 4\n\t\/\/ LinkButton is a special type of button which navigates to a URL. Has grey color.\n\tLinkButton ButtonStyle = 5\n)\n\n\/\/ ComponentEmoji represents button emoji, if it does have one.\ntype ComponentEmoji struct {\n\tName string `json:\"name,omitempty\"`\n\tID string `json:\"id,omitempty\"`\n\tAnimated bool `json:\"animated,omitempty\"`\n}\n\n\/\/ Button represents button component.\ntype Button struct {\n\tLabel string `json:\"label\"`\n\tStyle ButtonStyle `json:\"style\"`\n\tDisabled bool `json:\"disabled\"`\n\tEmoji ComponentEmoji `json:\"emoji\"`\n\n\t\/\/ NOTE: Only button with LinkButton style can have link. Also, URL is mutually exclusive with CustomID.\n\tURL string `json:\"url,omitempty\"`\n\tCustomID string `json:\"custom_id,omitempty\"`\n}\n\n\/\/ MarshalJSON is a method for marshaling Button to a JSON object.\nfunc (b Button) MarshalJSON() ([]byte, error) {\n\ttype button Button\n\n\tif b.Style == 0 {\n\t\tb.Style = PrimaryButton\n\t}\n\n\treturn json.Marshal(struct {\n\t\tbutton\n\t\tType ComponentType `json:\"type\"`\n\t}{\n\t\tbutton: button(b),\n\t\tType: b.Type(),\n\t})\n}\n\n\/\/ Type is a method to get the type of a component.\nfunc (Button) Type() ComponentType {\n\treturn ButtonComponent\n}\n\n\/\/ SelectMenuOption represents an option for a select menu.\ntype SelectMenuOption struct {\n\tLabel string `json:\"label,omitempty\"`\n\tValue string `json:\"value\"`\n\tDescription string `json:\"description\"`\n\tEmoji ComponentEmoji `json:\"emoji\"`\n\t\/\/ Determines whenever option is selected by default or not.\n\tDefault bool `json:\"default\"`\n}\n\n\/\/ SelectMenu represents select menu component.\ntype SelectMenu struct {\n\tCustomID string `json:\"custom_id,omitempty\"`\n\t\/\/ The text which will be shown in the menu if there's no default options or all options was deselected and component was closed.\n\tPlaceholder string `json:\"placeholder\"`\n\t\/\/ This value determines the minimal amount of selected items in the menu.\n\tMinValues int `json:\"min_values,omitempty\"`\n\t\/\/ This value determines the maximal amount of selected items in the menu.\n\t\/\/ If MaxValues or MinValues are greater than one then the user can select multiple items in the component.\n\tMaxValues int `json:\"max_values,omitempty\"`\n\tOptions []SelectMenuOption `json:\"options\"`\n}\n\n\/\/ Type is a method to get the type of a component.\nfunc (SelectMenu) Type() ComponentType {\n\treturn SelectMenuComponent\n}\n\n\/\/ MarshalJSON is a method for marshaling SelectMenu to a JSON object.\nfunc (m SelectMenu) MarshalJSON() ([]byte, error) {\n\ttype selectMenu SelectMenu\n\n\treturn json.Marshal(struct {\n\t\tselectMenu\n\t\tType ComponentType `json:\"type\"`\n\t}{\n\t\tselectMenu: selectMenu(m),\n\t\tType: m.Type(),\n\t})\n}\n\n\/\/ InputText represents text input component.\ntype InputText struct {\n\tCustomID string `json:\"custom_id,omitempty\"`\n\tLabel string `json:\"label\"`\n\tStyle TextStyleType `json:\"style\"`\n\tPlaceholder string `json:\"placeholder,omitempty\"`\n\tValue string `json:\"value,omitempty\"`\n\tRequired bool `json:\"required\"`\n\tMinLength int `json:\"min_length\"`\n\tMaxLength int `json:\"max_length,omitempty\"`\n}\n\n\/\/ Type is a method to get the type of a component.\nfunc (InputText) Type() ComponentType {\n\treturn InputTextComponent\n}\n\n\/\/ MarshalJSON is a method for marshaling InputText to a JSON object.\nfunc (m InputText) MarshalJSON() ([]byte, error) {\n\ttype inputText InputText\n\n\treturn json.Marshal(struct {\n\t\tinputText\n\t\tType ComponentType `json:\"type\"`\n\t}{\n\t\tinputText: inputText(m),\n\t\tType: m.Type(),\n\t})\n}\n\n\/\/ TextStyleType is style of text in InputText component.\ntype TextStyleType uint\n\n\/\/ Text styles\nconst (\n\tTextStyleShort TextStyleType = 1\n\tTextStyleParagraph TextStyleType = 2\n)\n<commit_msg>chore(components): rename InputText to TextInput<commit_after>package discordgo\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n)\n\n\/\/ ComponentType is type of component.\ntype ComponentType uint\n\n\/\/ MessageComponent types.\nconst (\n\tActionsRowComponent ComponentType = 1\n\tButtonComponent ComponentType = 2\n\tSelectMenuComponent ComponentType = 3\n\tTextInputComponent ComponentType = 4\n)\n\n\/\/ MessageComponent is a base interface for all message components.\ntype MessageComponent interface {\n\tjson.Marshaler\n\tType() ComponentType\n}\n\ntype unmarshalableMessageComponent struct {\n\tMessageComponent\n}\n\n\/\/ UnmarshalJSON is a helper function to unmarshal MessageComponent object.\nfunc (umc *unmarshalableMessageComponent) UnmarshalJSON(src []byte) error {\n\tvar v struct {\n\t\tType ComponentType `json:\"type\"`\n\t}\n\terr := json.Unmarshal(src, &v)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch v.Type {\n\tcase ActionsRowComponent:\n\t\tumc.MessageComponent = &ActionsRow{}\n\tcase ButtonComponent:\n\t\tumc.MessageComponent = &Button{}\n\tcase SelectMenuComponent:\n\t\tumc.MessageComponent = &SelectMenu{}\n\tcase TextInputComponent:\n\t\tumc.MessageComponent = &TextInput{}\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown component type: %d\", v.Type)\n\t}\n\treturn json.Unmarshal(src, umc.MessageComponent)\n}\n\nfunc MessageComponentFromJSON(b []byte) (MessageComponent, error) {\n\tvar u unmarshalableMessageComponent\n\terr := u.UnmarshalJSON(b)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal into MessageComponent: %w\", err)\n\t}\n\treturn u.MessageComponent, nil\n}\n\n\/\/ ActionsRow is a container for components within one row.\ntype ActionsRow struct {\n\tComponents []MessageComponent `json:\"components\"`\n}\n\n\/\/ MarshalJSON is a method for marshaling ActionsRow to a JSON object.\nfunc (r ActionsRow) MarshalJSON() ([]byte, error) {\n\ttype actionsRow ActionsRow\n\n\treturn json.Marshal(struct {\n\t\tactionsRow\n\t\tType ComponentType `json:\"type\"`\n\t}{\n\t\tactionsRow: actionsRow(r),\n\t\tType: r.Type(),\n\t})\n}\n\n\/\/ UnmarshalJSON is a helper function to unmarshal Actions Row.\nfunc (r *ActionsRow) UnmarshalJSON(data []byte) error {\n\tvar v struct {\n\t\tRawComponents []unmarshalableMessageComponent `json:\"components\"`\n\t}\n\terr := json.Unmarshal(data, &v)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Components = make([]MessageComponent, len(v.RawComponents))\n\tfor i, v := range v.RawComponents {\n\t\tr.Components[i] = v.MessageComponent\n\t}\n\n\treturn err\n}\n\n\/\/ Type is a method to get the type of a component.\nfunc (r ActionsRow) Type() ComponentType {\n\treturn ActionsRowComponent\n}\n\n\/\/ ButtonStyle is style of button.\ntype ButtonStyle uint\n\n\/\/ Button styles.\nconst (\n\t\/\/ PrimaryButton is a button with blurple color.\n\tPrimaryButton ButtonStyle = 1\n\t\/\/ SecondaryButton is a button with grey color.\n\tSecondaryButton ButtonStyle = 2\n\t\/\/ SuccessButton is a button with green color.\n\tSuccessButton ButtonStyle = 3\n\t\/\/ DangerButton is a button with red color.\n\tDangerButton ButtonStyle = 4\n\t\/\/ LinkButton is a special type of button which navigates to a URL. Has grey color.\n\tLinkButton ButtonStyle = 5\n)\n\n\/\/ ComponentEmoji represents button emoji, if it does have one.\ntype ComponentEmoji struct {\n\tName string `json:\"name,omitempty\"`\n\tID string `json:\"id,omitempty\"`\n\tAnimated bool `json:\"animated,omitempty\"`\n}\n\n\/\/ Button represents button component.\ntype Button struct {\n\tLabel string `json:\"label\"`\n\tStyle ButtonStyle `json:\"style\"`\n\tDisabled bool `json:\"disabled\"`\n\tEmoji ComponentEmoji `json:\"emoji\"`\n\n\t\/\/ NOTE: Only button with LinkButton style can have link. Also, URL is mutually exclusive with CustomID.\n\tURL string `json:\"url,omitempty\"`\n\tCustomID string `json:\"custom_id,omitempty\"`\n}\n\n\/\/ MarshalJSON is a method for marshaling Button to a JSON object.\nfunc (b Button) MarshalJSON() ([]byte, error) {\n\ttype button Button\n\n\tif b.Style == 0 {\n\t\tb.Style = PrimaryButton\n\t}\n\n\treturn json.Marshal(struct {\n\t\tbutton\n\t\tType ComponentType `json:\"type\"`\n\t}{\n\t\tbutton: button(b),\n\t\tType: b.Type(),\n\t})\n}\n\n\/\/ Type is a method to get the type of a component.\nfunc (Button) Type() ComponentType {\n\treturn ButtonComponent\n}\n\n\/\/ SelectMenuOption represents an option for a select menu.\ntype SelectMenuOption struct {\n\tLabel string `json:\"label,omitempty\"`\n\tValue string `json:\"value\"`\n\tDescription string `json:\"description\"`\n\tEmoji ComponentEmoji `json:\"emoji\"`\n\t\/\/ Determines whenever option is selected by default or not.\n\tDefault bool `json:\"default\"`\n}\n\n\/\/ SelectMenu represents select menu component.\ntype SelectMenu struct {\n\tCustomID string `json:\"custom_id,omitempty\"`\n\t\/\/ The text which will be shown in the menu if there's no default options or all options was deselected and component was closed.\n\tPlaceholder string `json:\"placeholder\"`\n\t\/\/ This value determines the minimal amount of selected items in the menu.\n\tMinValues int `json:\"min_values,omitempty\"`\n\t\/\/ This value determines the maximal amount of selected items in the menu.\n\t\/\/ If MaxValues or MinValues are greater than one then the user can select multiple items in the component.\n\tMaxValues int `json:\"max_values,omitempty\"`\n\tOptions []SelectMenuOption `json:\"options\"`\n}\n\n\/\/ Type is a method to get the type of a component.\nfunc (SelectMenu) Type() ComponentType {\n\treturn SelectMenuComponent\n}\n\n\/\/ MarshalJSON is a method for marshaling SelectMenu to a JSON object.\nfunc (m SelectMenu) MarshalJSON() ([]byte, error) {\n\ttype selectMenu SelectMenu\n\n\treturn json.Marshal(struct {\n\t\tselectMenu\n\t\tType ComponentType `json:\"type\"`\n\t}{\n\t\tselectMenu: selectMenu(m),\n\t\tType: m.Type(),\n\t})\n}\n\n\/\/ TextInput represents text input component.\ntype TextInput struct {\n\tCustomID string `json:\"custom_id,omitempty\"`\n\tLabel string `json:\"label\"`\n\tStyle TextStyleType `json:\"style\"`\n\tPlaceholder string `json:\"placeholder,omitempty\"`\n\tValue string `json:\"value,omitempty\"`\n\tRequired bool `json:\"required\"`\n\tMinLength int `json:\"min_length\"`\n\tMaxLength int `json:\"max_length,omitempty\"`\n}\n\n\/\/ Type is a method to get the type of a component.\nfunc (TextInput) Type() ComponentType {\n\treturn TextInputComponent\n}\n\n\/\/ MarshalJSON is a method for marshaling TextInput to a JSON object.\nfunc (m TextInput) MarshalJSON() ([]byte, error) {\n\ttype inputText TextInput\n\n\treturn json.Marshal(struct {\n\t\tinputText\n\t\tType ComponentType `json:\"type\"`\n\t}{\n\t\tinputText: inputText(m),\n\t\tType: m.Type(),\n\t})\n}\n\n\/\/ TextStyleType is style of text in TextInput component.\ntype TextStyleType uint\n\n\/\/ Text styles\nconst (\n\tTextStyleShort TextStyleType = 1\n\tTextStyleParagraph TextStyleType = 2\n)\n<|endoftext|>"} {"text":"<commit_before>package file\n\nimport (\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/golang\/oauth2\"\n\t\"github.com\/mjibson\/mog\/codec\"\n\t\"github.com\/mjibson\/mog\/protocol\"\n)\n\nfunc init() {\n\tprotocol.Register(\"file\", []string{\"directory\"}, New)\n\tgob.Register(new(File))\n}\n\nfunc New(params []string, token *oauth2.Token) (protocol.Instance, error) {\n\tif len(params) != 1 {\n\t\treturn nil, fmt.Errorf(\"expected one parameter\")\n\t}\n\treturn &File{\n\t\tPath: params[0],\n\t\tSongs: make(protocol.SongList),\n\t}, nil\n}\n\ntype File struct {\n\tPath string\n\tSongs protocol.SongList\n}\n\nfunc (f *File) Key() string {\n\treturn f.Path\n}\n\nfunc (f *File) Info(id string) (*codec.SongInfo, error) {\n\tif _, ok := f.Songs[id]; !ok {\n\t\tif _, err := f.List(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tv := f.Songs[id]\n\tif v == nil {\n\t\treturn nil, fmt.Errorf(\"could not find %v\", id)\n\t}\n\treturn v, nil\n}\n\nfunc (f *File) GetSong(id string) (codec.Song, error) {\n\tpath, num, err := protocol.ParseID(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsongs, _, err := codec.ByExtension(path, fileReader(path))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn songs[num], nil\n}\n\nfunc (f *File) List() (protocol.SongList, error) {\n\tif len(f.Songs) == 0 {\n\t\treturn f.Refresh()\n\t}\n\treturn f.Songs, nil\n}\n\nfunc (f *File) Refresh() (protocol.SongList, error) {\n\tsongs := make(protocol.SongList)\n\terr := filepath.Walk(f.Path, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tdefer f.Close()\n\t\tss, _, err := codec.ByExtension(path, fileReader(path))\n\t\tif err != nil || len(ss) == 0 {\n\t\t\treturn nil\n\t\t}\n\t\tfor i, s := range ss {\n\t\t\tid := fmt.Sprintf(\"%v-%v\", i, path)\n\t\t\tinfo, err := s.Info()\n\t\t\tif err == nil {\n\t\t\t\tsongs[id] = &info\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tf.Songs = songs\n\treturn songs, err\n}\n\nfunc fileReader(path string) codec.Reader {\n\treturn func() (io.ReadCloser, int64, error) {\n\t\tlog.Println(\"open file\", path)\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\t\tfi, err := f.Stat()\n\t\tif err != nil {\n\t\t\tf.Close()\n\t\t\treturn nil, 0, err\n\t\t}\n\t\treturn f, fi.Size(), nil\n\t}\n}\n<commit_msg>Prevent bad files from being added<commit_after>package file\n\nimport (\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/golang\/oauth2\"\n\t\"github.com\/mjibson\/mog\/codec\"\n\t\"github.com\/mjibson\/mog\/protocol\"\n)\n\nfunc init() {\n\tprotocol.Register(\"file\", []string{\"directory\"}, New)\n\tgob.Register(new(File))\n}\n\nfunc New(params []string, token *oauth2.Token) (protocol.Instance, error) {\n\tif len(params) != 1 {\n\t\treturn nil, fmt.Errorf(\"expected one parameter\")\n\t}\n\tf, err := os.Open(params[0])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tf.Close()\n\treturn &File{\n\t\tPath: params[0],\n\t\tSongs: make(protocol.SongList),\n\t}, nil\n}\n\ntype File struct {\n\tPath string\n\tSongs protocol.SongList\n}\n\nfunc (f *File) Key() string {\n\treturn f.Path\n}\n\nfunc (f *File) Info(id string) (*codec.SongInfo, error) {\n\tif _, ok := f.Songs[id]; !ok {\n\t\tif _, err := f.List(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tv := f.Songs[id]\n\tif v == nil {\n\t\treturn nil, fmt.Errorf(\"could not find %v\", id)\n\t}\n\treturn v, nil\n}\n\nfunc (f *File) GetSong(id string) (codec.Song, error) {\n\tpath, num, err := protocol.ParseID(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsongs, _, err := codec.ByExtension(path, fileReader(path))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn songs[num], nil\n}\n\nfunc (f *File) List() (protocol.SongList, error) {\n\tif len(f.Songs) == 0 {\n\t\treturn f.Refresh()\n\t}\n\treturn f.Songs, nil\n}\n\nfunc (f *File) Refresh() (protocol.SongList, error) {\n\tsongs := make(protocol.SongList)\n\terr := filepath.Walk(f.Path, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tdefer f.Close()\n\t\tss, _, err := codec.ByExtension(path, fileReader(path))\n\t\tif err != nil || len(ss) == 0 {\n\t\t\treturn nil\n\t\t}\n\t\tfor i, s := range ss {\n\t\t\tid := fmt.Sprintf(\"%v-%v\", i, path)\n\t\t\tinfo, err := s.Info()\n\t\t\tif err == nil {\n\t\t\t\tsongs[id] = &info\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tf.Songs = songs\n\treturn songs, err\n}\n\nfunc fileReader(path string) codec.Reader {\n\treturn func() (io.ReadCloser, int64, error) {\n\t\tlog.Println(\"open file\", path)\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\t\tfi, err := f.Stat()\n\t\tif err != nil {\n\t\t\tf.Close()\n\t\t\treturn nil, 0, err\n\t\t}\n\t\treturn f, fi.Size(), nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Dename Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n\/\/ use this file except in compliance with the License. You may obtain a copy of\n\/\/ the License at\n\/\/\n\/\/ \thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations under\n\/\/ the License.\n\npackage client\n\nimport (\n\t\"code.google.com\/p\/go.net\/proxy\"\n)\n\nvar (\n\tDefaultVerifiers = map[string]*Verifier{\"dename@mit.edu\": &Verifier{\"CiCheFqDmJ0Pg+j+lypkmmiHrFmRn50rlDi5X0l4+lJRFA==\"}}\n\tDefaultServers = map[string]*Server{\"dename.mit.edu:6263\": &Server{TransportPublicKey: \"4f2i+j65JCE2xNKhxE3RPurAYALx9GRy0Pm9c6J7eDY=\"}}\n\tDefaultTimeout = \"10s\"\n\tDefaultFreshnessThreshold = \"60s\"\n\tDefaultFreshness = Freshness{SignaturesRequired: len(DefaultVerifiers), Threshold: DefaultFreshnessThreshold}\n\tDefaultConfig = Config{Verifier: DefaultVerifiers, Lookup: DefaultServers, Update: DefaultServers}\n\tDefaultDialer = proxy.FromEnvironment()\n)\n<commit_msg>Default verifier is not an email address.<commit_after>\/\/ Copyright 2014 The Dename Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n\/\/ use this file except in compliance with the License. You may obtain a copy of\n\/\/ the License at\n\/\/\n\/\/ \thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations under\n\/\/ the License.\n\npackage client\n\nimport (\n\t\"code.google.com\/p\/go.net\/proxy\"\n)\n\nvar (\n\tDefaultVerifiers = map[string]*Verifier{\"dename.mit.edu\": &Verifier{\"CiCheFqDmJ0Pg+j+lypkmmiHrFmRn50rlDi5X0l4+lJRFA==\"}}\n\tDefaultServers = map[string]*Server{\"dename.mit.edu:6263\": &Server{TransportPublicKey: \"4f2i+j65JCE2xNKhxE3RPurAYALx9GRy0Pm9c6J7eDY=\"}}\n\tDefaultTimeout = \"10s\"\n\tDefaultFreshnessThreshold = \"60s\"\n\tDefaultFreshness = Freshness{SignaturesRequired: len(DefaultVerifiers), Threshold: DefaultFreshnessThreshold}\n\tDefaultConfig = Config{Verifier: DefaultVerifiers, Lookup: DefaultServers, Update: DefaultServers}\n\tDefaultDialer = proxy.FromEnvironment()\n)\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage addons\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/pkg\/errors\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/assets\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/command\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/config\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/driver\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/exit\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/machine\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/out\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/storageclass\"\n\t\"k8s.io\/minikube\/pkg\/util\/retry\"\n)\n\n\/\/ defaultStorageClassProvisioner is the name of the default storage class provisioner\nconst defaultStorageClassProvisioner = \"standard\"\n\n\/\/ Set sets a value\nfunc Set(name, value, profile string) error {\n\tglog.Infof(\"Setting %s=%s in profile %q\", name, value, profile)\n\ta, valid := isAddonValid(name)\n\tif !valid {\n\t\treturn errors.Errorf(\"%s is not a valid addon\", name)\n\t}\n\n\tcc, err := config.Load(profile)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"loading profile\")\n\t}\n\n\t\/\/ Run any additional validations for this property\n\tif err := run(cc, name, value, a.validations); err != nil {\n\t\treturn errors.Wrap(err, \"running validations\")\n\t}\n\n\tif err := a.set(cc, name, value); err != nil {\n\t\treturn errors.Wrap(err, \"setting new value of addon\")\n\t}\n\n\t\/\/ Run any callbacks for this property\n\tif err := run(cc, name, value, a.callbacks); err != nil {\n\t\treturn errors.Wrap(err, \"running callbacks\")\n\t}\n\n\tglog.Infof(\"Writing out %q config to set %s=%v...\", profile, name, value)\n\treturn config.Write(profile, cc)\n}\n\n\/\/ Runs all the validation or callback functions and collects errors\nfunc run(cc *config.ClusterConfig, name string, value string, fns []setFn) error {\n\tvar errors []error\n\tfor _, fn := range fns {\n\t\terr := fn(cc, name, value)\n\t\tif err != nil {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}\n\tif len(errors) > 0 {\n\t\treturn fmt.Errorf(\"%v\", errors)\n\t}\n\treturn nil\n}\n\n\/\/ SetBool sets a bool value\nfunc SetBool(cc *config.ClusterConfig, name string, val string) error {\n\tb, err := strconv.ParseBool(val)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cc.Addons == nil {\n\t\tcc.Addons = map[string]bool{}\n\t}\n\tcc.Addons[name] = b\n\treturn nil\n}\n\n\/\/ enableOrDisableAddon updates addon status executing any commands necessary\nfunc enableOrDisableAddon(cc *config.ClusterConfig, name string, val string) error {\n\tglog.Infof(\"Setting addon %s=%s in %q\", name, val, cc.Name)\n\tenable, err := strconv.ParseBool(val)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"parsing bool: %s\", name)\n\t}\n\taddon := assets.Addons[name]\n\n\t\/\/ check addon status before enabling\/disabling it\n\talreadySet, err := isAddonAlreadySet(addon, enable, cc.Name)\n\tif err != nil {\n\t\tout.ErrT(out.Conflict, \"{{.error}}\", out.V{\"error\": err})\n\t\treturn err\n\t}\n\n\tif alreadySet {\n\t\tglog.Warningf(\"addon %s should already be in state %v\", name, val)\n\t\tif !enable {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ to match both ingress and ingress-dns adons\n\tif strings.HasPrefix(name, \"ingress\") && enable && driver.IsKIC(cc.Driver) && runtime.GOOS != \"linux\" {\n\t\texit.UsageT(`Due to {{.driver_name}} networking limitations on {{.os_name}}, {{.addon_name}} addon is not supported for this driver.\nAlternatively to use this addon you can use a vm-based driver:\n\n\t'minikube start --vm=true'\n\nTo track the update on this work in progress feature please check:\nhttps:\/\/github.com\/kubernetes\/minikube\/issues\/7332`, out.V{\"driver_name\": cc.Driver, \"os_name\": runtime.GOOS, \"addon_name\": name})\n\t}\n\n\tif strings.HasPrefix(name, \"istio\") && enable {\n\t\tminMem := 8192\n\t\tminCPUs := 4\n\t\tif cc.Memory < minMem {\n\t\t\tout.WarningT(\"Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB\", out.V{\"minMem\": minMem, \"memory\": cc.Memory})\n\t\t}\n\t\tif cc.CPUs < minCPUs {\n\t\t\tout.WarningT(\"Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs\", out.V{\"minCPUs\": minCPUs, \"cpus\": cc.CPUs})\n\t\t}\n\t}\n\n\t\/\/ TODO(r2d4): config package should not reference API, pull this out\n\tapi, err := machine.NewAPIClient()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"machine client\")\n\t}\n\tdefer api.Close()\n\n\tcp, err := config.PrimaryControlPlane(cc)\n\tif err != nil {\n\t\texit.WithError(\"Error getting primary control plane\", err)\n\t}\n\n\tmName := driver.MachineName(*cc, cp)\n\thost, err := machine.LoadHost(api, mName)\n\tif err != nil || !machine.IsRunning(api, mName) {\n\t\tglog.Warningf(\"%q is not running, writing %s=%v to disk and skipping enablement (err=%v)\", mName, addon.Name(), enable, err)\n\t\treturn nil\n\t}\n\n\tcmd, err := machine.CommandRunner(host)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"command runner\")\n\t}\n\n\tdata := assets.GenerateTemplateData(cc.KubernetesConfig)\n\treturn enableOrDisableAddonInternal(cc, addon, cmd, data, enable)\n}\n\nfunc isAddonAlreadySet(addon *assets.Addon, enable bool, profile string) (bool, error) {\n\taddonStatus, err := addon.IsEnabled(profile)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"is enabled\")\n\t}\n\n\tif addonStatus && enable {\n\t\treturn true, nil\n\t} else if !addonStatus && !enable {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}\n\nfunc enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon, cmd command.Runner, data interface{}, enable bool) error {\n\tdeployFiles := []string{}\n\n\tfor _, addon := range addon.Assets {\n\t\tvar f assets.CopyableFile\n\t\tvar err error\n\t\tif addon.IsTemplate() {\n\t\t\tf, err = addon.Evaluate(data)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"evaluate bundled addon %s asset\", addon.GetAssetName())\n\t\t\t}\n\n\t\t} else {\n\t\t\tf = addon\n\t\t}\n\t\tfPath := path.Join(f.GetTargetDir(), f.GetTargetName())\n\n\t\tif enable {\n\t\t\tglog.Infof(\"installing %s\", fPath)\n\t\t\tif err := cmd.Copy(f); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tglog.Infof(\"Removing %+v\", fPath)\n\t\t\tdefer func() {\n\t\t\t\tif err := cmd.Remove(f); err != nil {\n\t\t\t\t\tglog.Warningf(\"error removing %s; addon should still be disabled as expected\", fPath)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t\tif strings.HasSuffix(fPath, \".yaml\") {\n\t\t\tdeployFiles = append(deployFiles, fPath)\n\t\t}\n\t}\n\n\tcommand := kubectlCommand(cc, deployFiles, enable)\n\n\t\/\/ Retry, because sometimes we race against an apiserver restart\n\tapply := func() error {\n\t\t_, err := cmd.RunCmd(command)\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"apply failed, will retry: %v\", err)\n\t\t}\n\t\treturn err\n\t}\n\n\treturn retry.Expo(apply, 1*time.Second, time.Second*30)\n}\n\n\/\/ enableOrDisableStorageClasses enables or disables storage classes\nfunc enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val string) error {\n\tglog.Infof(\"enableOrDisableStorageClasses %s=%v on %q\", name, val, cc.Name)\n\tenable, err := strconv.ParseBool(val)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Error parsing boolean\")\n\t}\n\n\tclass := defaultStorageClassProvisioner\n\tif name == \"storage-provisioner-gluster\" {\n\t\tclass = \"glusterfile\"\n\t}\n\tstoragev1, err := storageclass.GetStoragev1()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Error getting storagev1 interface %v \", err)\n\t}\n\n\tapi, err := machine.NewAPIClient()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"machine client\")\n\t}\n\tdefer api.Close()\n\n\tcp, err := config.PrimaryControlPlane(cc)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting control plane\")\n\t}\n\tif !machine.IsRunning(api, driver.MachineName(*cc, cp)) {\n\t\tglog.Warningf(\"%q is not running, writing %s=%v to disk and skipping enablement\", driver.MachineName(*cc, cp), name, val)\n\t\treturn enableOrDisableAddon(cc, name, val)\n\t}\n\n\tif enable {\n\t\t\/\/ Only StorageClass for 'name' should be marked as default\n\t\terr = storageclass.SetDefaultStorageClass(storagev1, class)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Error making %s the default storage class\", class)\n\t\t}\n\t} else {\n\t\t\/\/ Unset the StorageClass as default\n\t\terr := storageclass.DisableDefaultStorageClass(storagev1, class)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Error disabling %s as the default storage class\", class)\n\t\t}\n\t}\n\n\treturn enableOrDisableAddon(cc, name, val)\n}\n\n\/\/ Start enables the default addons for a profile, plus any additional\nfunc Start(wg sync.WaitGroup, profile string, toEnable map[string]bool, additional []string) {\n\twg.Add(1)\n\tdefer wg.Done()\n\n\tstart := time.Now()\n\tglog.Infof(\"enableAddons start: toEnable=%v, additional=%s\", toEnable, additional)\n\tdefer func() {\n\t\tglog.Infof(\"enableAddons completed in %s\", time.Since(start))\n\t}()\n\n\t\/\/ Get the default values of any addons not saved to our config\n\tfor name, a := range assets.Addons {\n\t\tdefaultVal, err := a.IsEnabled(profile)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"is-enabled failed for %q: %v\", a.Name(), err)\n\t\t\tcontinue\n\t\t}\n\n\t\t_, exists := toEnable[name]\n\t\tif !exists {\n\t\t\ttoEnable[name] = defaultVal\n\t\t}\n\t}\n\n\t\/\/ Apply new addons\n\tfor _, name := range additional {\n\t\ttoEnable[name] = true\n\t}\n\n\ttoEnableList := []string{}\n\tfor k, v := range toEnable {\n\t\tif v {\n\t\t\ttoEnableList = append(toEnableList, k)\n\t\t}\n\t}\n\tsort.Strings(toEnableList)\n\n\tout.T(out.AddonEnable, \"Enabling addons: {{.addons}}\", out.V{\"addons\": strings.Join(toEnableList, \", \")})\n\tfor _, a := range toEnableList {\n\t\twg.Add(1)\n\t\tgo func(name) {\n\t\t\terr := Set(name, \"true\", profile)\n\t\t\tif err != nil {\n\t\t\t\t\/\/ Intentionally non-fatal\n\t\t\t\tout.WarningT(\"Enabling '{{.name}}' returned an error: {{.error}}\", out.V{\"name\": name, \"error\": err})\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n}\n<commit_msg>Pass correct variable in<commit_after>\/*\nCopyright 2019 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage addons\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/pkg\/errors\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/assets\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/command\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/config\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/driver\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/exit\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/machine\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/out\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/storageclass\"\n\t\"k8s.io\/minikube\/pkg\/util\/retry\"\n)\n\n\/\/ defaultStorageClassProvisioner is the name of the default storage class provisioner\nconst defaultStorageClassProvisioner = \"standard\"\n\n\/\/ Set sets a value\nfunc Set(name, value, profile string) error {\n\tglog.Infof(\"Setting %s=%s in profile %q\", name, value, profile)\n\ta, valid := isAddonValid(name)\n\tif !valid {\n\t\treturn errors.Errorf(\"%s is not a valid addon\", name)\n\t}\n\n\tcc, err := config.Load(profile)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"loading profile\")\n\t}\n\n\t\/\/ Run any additional validations for this property\n\tif err := run(cc, name, value, a.validations); err != nil {\n\t\treturn errors.Wrap(err, \"running validations\")\n\t}\n\n\tif err := a.set(cc, name, value); err != nil {\n\t\treturn errors.Wrap(err, \"setting new value of addon\")\n\t}\n\n\t\/\/ Run any callbacks for this property\n\tif err := run(cc, name, value, a.callbacks); err != nil {\n\t\treturn errors.Wrap(err, \"running callbacks\")\n\t}\n\n\tglog.Infof(\"Writing out %q config to set %s=%v...\", profile, name, value)\n\treturn config.Write(profile, cc)\n}\n\n\/\/ Runs all the validation or callback functions and collects errors\nfunc run(cc *config.ClusterConfig, name string, value string, fns []setFn) error {\n\tvar errors []error\n\tfor _, fn := range fns {\n\t\terr := fn(cc, name, value)\n\t\tif err != nil {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}\n\tif len(errors) > 0 {\n\t\treturn fmt.Errorf(\"%v\", errors)\n\t}\n\treturn nil\n}\n\n\/\/ SetBool sets a bool value\nfunc SetBool(cc *config.ClusterConfig, name string, val string) error {\n\tb, err := strconv.ParseBool(val)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cc.Addons == nil {\n\t\tcc.Addons = map[string]bool{}\n\t}\n\tcc.Addons[name] = b\n\treturn nil\n}\n\n\/\/ enableOrDisableAddon updates addon status executing any commands necessary\nfunc enableOrDisableAddon(cc *config.ClusterConfig, name string, val string) error {\n\tglog.Infof(\"Setting addon %s=%s in %q\", name, val, cc.Name)\n\tenable, err := strconv.ParseBool(val)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"parsing bool: %s\", name)\n\t}\n\taddon := assets.Addons[name]\n\n\t\/\/ check addon status before enabling\/disabling it\n\talreadySet, err := isAddonAlreadySet(addon, enable, cc.Name)\n\tif err != nil {\n\t\tout.ErrT(out.Conflict, \"{{.error}}\", out.V{\"error\": err})\n\t\treturn err\n\t}\n\n\tif alreadySet {\n\t\tglog.Warningf(\"addon %s should already be in state %v\", name, val)\n\t\tif !enable {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ to match both ingress and ingress-dns adons\n\tif strings.HasPrefix(name, \"ingress\") && enable && driver.IsKIC(cc.Driver) && runtime.GOOS != \"linux\" {\n\t\texit.UsageT(`Due to {{.driver_name}} networking limitations on {{.os_name}}, {{.addon_name}} addon is not supported for this driver.\nAlternatively to use this addon you can use a vm-based driver:\n\n\t'minikube start --vm=true'\n\nTo track the update on this work in progress feature please check:\nhttps:\/\/github.com\/kubernetes\/minikube\/issues\/7332`, out.V{\"driver_name\": cc.Driver, \"os_name\": runtime.GOOS, \"addon_name\": name})\n\t}\n\n\tif strings.HasPrefix(name, \"istio\") && enable {\n\t\tminMem := 8192\n\t\tminCPUs := 4\n\t\tif cc.Memory < minMem {\n\t\t\tout.WarningT(\"Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB\", out.V{\"minMem\": minMem, \"memory\": cc.Memory})\n\t\t}\n\t\tif cc.CPUs < minCPUs {\n\t\t\tout.WarningT(\"Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs\", out.V{\"minCPUs\": minCPUs, \"cpus\": cc.CPUs})\n\t\t}\n\t}\n\n\t\/\/ TODO(r2d4): config package should not reference API, pull this out\n\tapi, err := machine.NewAPIClient()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"machine client\")\n\t}\n\tdefer api.Close()\n\n\tcp, err := config.PrimaryControlPlane(cc)\n\tif err != nil {\n\t\texit.WithError(\"Error getting primary control plane\", err)\n\t}\n\n\tmName := driver.MachineName(*cc, cp)\n\thost, err := machine.LoadHost(api, mName)\n\tif err != nil || !machine.IsRunning(api, mName) {\n\t\tglog.Warningf(\"%q is not running, writing %s=%v to disk and skipping enablement (err=%v)\", mName, addon.Name(), enable, err)\n\t\treturn nil\n\t}\n\n\tcmd, err := machine.CommandRunner(host)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"command runner\")\n\t}\n\n\tdata := assets.GenerateTemplateData(cc.KubernetesConfig)\n\treturn enableOrDisableAddonInternal(cc, addon, cmd, data, enable)\n}\n\nfunc isAddonAlreadySet(addon *assets.Addon, enable bool, profile string) (bool, error) {\n\taddonStatus, err := addon.IsEnabled(profile)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"is enabled\")\n\t}\n\n\tif addonStatus && enable {\n\t\treturn true, nil\n\t} else if !addonStatus && !enable {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}\n\nfunc enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon, cmd command.Runner, data interface{}, enable bool) error {\n\tdeployFiles := []string{}\n\n\tfor _, addon := range addon.Assets {\n\t\tvar f assets.CopyableFile\n\t\tvar err error\n\t\tif addon.IsTemplate() {\n\t\t\tf, err = addon.Evaluate(data)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"evaluate bundled addon %s asset\", addon.GetAssetName())\n\t\t\t}\n\n\t\t} else {\n\t\t\tf = addon\n\t\t}\n\t\tfPath := path.Join(f.GetTargetDir(), f.GetTargetName())\n\n\t\tif enable {\n\t\t\tglog.Infof(\"installing %s\", fPath)\n\t\t\tif err := cmd.Copy(f); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tglog.Infof(\"Removing %+v\", fPath)\n\t\t\tdefer func() {\n\t\t\t\tif err := cmd.Remove(f); err != nil {\n\t\t\t\t\tglog.Warningf(\"error removing %s; addon should still be disabled as expected\", fPath)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t\tif strings.HasSuffix(fPath, \".yaml\") {\n\t\t\tdeployFiles = append(deployFiles, fPath)\n\t\t}\n\t}\n\n\tcommand := kubectlCommand(cc, deployFiles, enable)\n\n\t\/\/ Retry, because sometimes we race against an apiserver restart\n\tapply := func() error {\n\t\t_, err := cmd.RunCmd(command)\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"apply failed, will retry: %v\", err)\n\t\t}\n\t\treturn err\n\t}\n\n\treturn retry.Expo(apply, 1*time.Second, time.Second*30)\n}\n\n\/\/ enableOrDisableStorageClasses enables or disables storage classes\nfunc enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val string) error {\n\tglog.Infof(\"enableOrDisableStorageClasses %s=%v on %q\", name, val, cc.Name)\n\tenable, err := strconv.ParseBool(val)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Error parsing boolean\")\n\t}\n\n\tclass := defaultStorageClassProvisioner\n\tif name == \"storage-provisioner-gluster\" {\n\t\tclass = \"glusterfile\"\n\t}\n\tstoragev1, err := storageclass.GetStoragev1()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Error getting storagev1 interface %v \", err)\n\t}\n\n\tapi, err := machine.NewAPIClient()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"machine client\")\n\t}\n\tdefer api.Close()\n\n\tcp, err := config.PrimaryControlPlane(cc)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting control plane\")\n\t}\n\tif !machine.IsRunning(api, driver.MachineName(*cc, cp)) {\n\t\tglog.Warningf(\"%q is not running, writing %s=%v to disk and skipping enablement\", driver.MachineName(*cc, cp), name, val)\n\t\treturn enableOrDisableAddon(cc, name, val)\n\t}\n\n\tif enable {\n\t\t\/\/ Only StorageClass for 'name' should be marked as default\n\t\terr = storageclass.SetDefaultStorageClass(storagev1, class)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Error making %s the default storage class\", class)\n\t\t}\n\t} else {\n\t\t\/\/ Unset the StorageClass as default\n\t\terr := storageclass.DisableDefaultStorageClass(storagev1, class)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Error disabling %s as the default storage class\", class)\n\t\t}\n\t}\n\n\treturn enableOrDisableAddon(cc, name, val)\n}\n\n\/\/ Start enables the default addons for a profile, plus any additional\nfunc Start(wg sync.WaitGroup, profile string, toEnable map[string]bool, additional []string) {\n\twg.Add(1)\n\tdefer wg.Done()\n\n\tstart := time.Now()\n\tglog.Infof(\"enableAddons start: toEnable=%v, additional=%s\", toEnable, additional)\n\tdefer func() {\n\t\tglog.Infof(\"enableAddons completed in %s\", time.Since(start))\n\t}()\n\n\t\/\/ Get the default values of any addons not saved to our config\n\tfor name, a := range assets.Addons {\n\t\tdefaultVal, err := a.IsEnabled(profile)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"is-enabled failed for %q: %v\", a.Name(), err)\n\t\t\tcontinue\n\t\t}\n\n\t\t_, exists := toEnable[name]\n\t\tif !exists {\n\t\t\ttoEnable[name] = defaultVal\n\t\t}\n\t}\n\n\t\/\/ Apply new addons\n\tfor _, name := range additional {\n\t\ttoEnable[name] = true\n\t}\n\n\ttoEnableList := []string{}\n\tfor k, v := range toEnable {\n\t\tif v {\n\t\t\ttoEnableList = append(toEnableList, k)\n\t\t}\n\t}\n\tsort.Strings(toEnableList)\n\n\tout.T(out.AddonEnable, \"Enabling addons: {{.addons}}\", out.V{\"addons\": strings.Join(toEnableList, \", \")})\n\tfor _, a := range toEnableList {\n\t\twg.Add(1)\n\t\tgo func(name string) {\n\t\t\terr := Set(name, \"true\", profile)\n\t\t\tif err != nil {\n\t\t\t\t\/\/ Intentionally non-fatal\n\t\t\t\tout.WarningT(\"Enabling '{{.name}}' returned an error: {{.error}}\", out.V{\"name\": name, \"error\": err})\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(a)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package boober\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/skatteetaten\/aoc\/pkg\/openshift\"\n\t\"github.com\/spf13\/viper\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\/\/\"github.com\/vbatts\/gogololcat\/lol\"\n)\n\nconst SPEC_ILLEGAL = -1\nconst MISSING_FILE_REFERENCE = -2\nconst MISSING_CONFIGURATION = -3\nconst ILLEGAL_JSON_CONFIGURATION = -4\nconst CONFIGURATION_FILE_IN_ROOT = -5\nconst ERROR_READING_FILE = -6\nconst BOOBER_ERROR = -7\nconst ILLEGAL_FILE = -8\nconst INTERNAL_ERROR = -9\nconst FOLDER_SETUP_NOT_SUPPORTED = -10\nconst OPERATION_OK = 0\nconst SPEC_IS_FILE = 1\nconst SPEC_IS_FOLDER = 2\n\nconst BOOBER_NOT_INSTALLED_RESPONSE = \"Application is not available\"\n\n\/\/ Struct to represent data to the Boober interface\ntype BooberInferface struct {\n\tEnv string `json:\"env\"`\n\tApp string `json:\"app\"`\n\tAffiliation string `json:\"affiliation\"`\n\tFiles map[string]json.RawMessage `json:\"files\"`\n\tOverrides map[string]json.RawMessage `json:\"overrides\"`\n}\n\n\/\/ Structs to represent return data from the Boober interface\ntype BooberReturn struct {\n\tSources json.RawMessage `json:\"sources\"`\n\tErrors []string `json:\"errors\"`\n\tValid bool `json:\"valid\"`\n\tConfig json.RawMessage `json:\"config\"`\n\tOpenshiftObjects map[string]json.RawMessage `json:\"openshiftObjects\"`\n}\n\nfunc ExecuteSetup(args []string, dryRun bool, showConfig bool, showObjects bool, verbose bool, localhost bool,\n\toverrideFiles []string) int {\n\n\tvalidateCode := validateCommand(args, overrideFiles)\n\tif validateCode < 0 {\n\t\treturn validateCode\n\t}\n\n\tvar absolutePath string\n\n\tabsolutePath, _ = filepath.Abs(args[0])\n\n\tvar envFile string \/\/ Filename for app\n\tvar envFolder string \/\/ Short folder name (Env)\n\tvar folder string \/\/ Absolute path of folder\n\tvar parentFolder string \/\/ Absolute path of parent\n\n\tswitch validateCode {\n\tcase SPEC_IS_FILE:\n\t\tfolder = filepath.Dir(absolutePath)\n\t\tenvFile = filepath.Base(absolutePath)\n\tcase SPEC_IS_FOLDER:\n\t\tfolder = absolutePath\n\t\tenvFile = \"\"\n\t\tfmt.Println(\"Setup on a folder is not yet supported\")\n\t\treturn FOLDER_SETUP_NOT_SUPPORTED\n\t}\n\n\tparentFolder = filepath.Dir(folder)\n\tenvFolder = filepath.Base(folder)\n\n\tif folder == parentFolder {\n\t\tfmt.Println(\"Application configuration file cannot reside in root directory\")\n\t\treturn CONFIGURATION_FILE_IN_ROOT\n\t}\n\n\t\/\/ Initialize JSON\n\n\tvar booberData BooberInferface\n\tbooberData.App = strings.TrimSuffix(envFile, filepath.Ext(envFile)) \/\/envFile\n\tbooberData.Env = envFolder\n\tbooberData.Affiliation = \"\"\n\n\tvar returnMap = Folder2Map(folder, envFolder+\"\/\")\n\tvar returnMap2 = Folder2Map(parentFolder, \"\")\n\n\tbooberData.Files = CombineMaps(returnMap, returnMap2)\n\tbooberData.Overrides = overrides2map(args, overrideFiles)\n\n\tjsonByte, ok := json.Marshal(booberData)\n\tif !(ok == nil) {\n\t\tfmt.Println(\"Internal error in marshalling Boober data: \" + ok.Error())\n\t\treturn INTERNAL_ERROR\n\t}\n\n\tjsonStr := string(jsonByte)\n\tif dryRun {\n\t\t\/\/lol.Writer.Write(os.Stdout, []byte(PrettyPrintJson(jsonStr)))\n\t\tfmt.Println(string(PrettyPrintJson(jsonStr)))\n\t} else {\n\t\tvalidateCode = CallBoober(jsonStr, showConfig, showObjects, false, localhost, verbose)\n\t\tif validateCode < 0 {\n\t\t\treturn validateCode\n\t\t}\n\t}\n\treturn OPERATION_OK\n}\n\nfunc overrides2map(args []string, overrideFiles []string) map[string]json.RawMessage {\n\tvar returnMap = make(map[string]json.RawMessage)\n\tfor i := 0; i < len(overrideFiles); i++ {\n\t\treturnMap[overrideFiles[i]] = json.RawMessage(args[i+1])\n\t}\n\treturn returnMap\n}\n\nfunc Folder2Map(folder string, prefix string) map[string]json.RawMessage {\n\tvar returnMap = make(map[string]json.RawMessage)\n\tvar allFilesOK bool = true\n\n\tfiles, _ := ioutil.ReadDir(folder)\n\tvar filesProcessed = 0\n\tfor _, f := range files {\n\t\tabsolutePath := filepath.Join(folder, f.Name())\n\t\tif IsLegalFileFolder(absolutePath) == SPEC_IS_FILE { \/\/ Ignore folders\n\t\t\tmatched, _ := filepath.Match(\"*.json\", strings.ToLower(f.Name()))\n\t\t\tif matched {\n\t\t\t\tfileJson, err := ioutil.ReadFile(absolutePath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Error in reading file \" + absolutePath)\n\t\t\t\t\tos.Exit(ERROR_READING_FILE)\n\t\t\t\t}\n\t\t\t\tif IsLegalJson(string(fileJson)) {\n\t\t\t\t\tfilesProcessed++\n\t\t\t\t\treturnMap[prefix+f.Name()] = fileJson\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"Illegal JSON in configuration file \" + absolutePath)\n\t\t\t\t\tallFilesOK = false\n\t\t\t\t}\n\t\t\t\tfilesProcessed++\n\t\t\t}\n\t\t}\n\n\t}\n\tif !allFilesOK {\n\t\tos.Exit(ILLEGAL_JSON_CONFIGURATION)\n\t}\n\treturn returnMap\n}\n\nfunc CombineMaps(map1 map[string]json.RawMessage, map2 map[string]json.RawMessage) map[string]json.RawMessage {\n\tvar returnMap = make(map[string]json.RawMessage)\n\n\tfor k, v := range map1 {\n\t\treturnMap[k] = v\n\t}\n\tfor k, v := range map2 {\n\t\treturnMap[k] = v\n\t}\n\treturn returnMap\n}\n\nfunc validateCommand(args []string, overrideFiles []string) int {\n\tvar errorString = \"\"\n\tvar returnCode int\n\n\tif len(args) == 0 {\n\t\treturnCode = -1\n\t\terrorString += \"Missing file\/folder \"\n\t} else {\n\t\t\/\/ Chceck argument 0 for legal file \/ folder\n\t\treturnCode = IsLegalFileFolder(args[0])\n\t\tif returnCode < 0 {\n\t\t\terrorString += \"Illegal file \/ folder: \" + args[0]\n\t\t\treturnCode = ILLEGAL_FILE\n\t\t}\n\n\t\t\/\/ We have at least one argument, now there should be a correlation between the number of args\n\t\t\/\/ and the number of override (-f) flags\n\t\tif len(overrideFiles) < (len(args) - 1) {\n\t\t\treturnCode = MISSING_FILE_REFERENCE\n\t\t\terrorString += \"Configuration override specified without file reference flag \"\n\t\t}\n\t\tif len(overrideFiles) > (len(args) - 1) {\n\t\t\treturnCode = MISSING_CONFIGURATION\n\t\t\terrorString += \"Configuration overide file reference flag specified without configuration \"\n\t\t}\n\n\t\t\/\/ Check for legal JSON argument for each overrideFiles flag\n\t\tfor i := 1; i < len(args); i++ {\n\t\t\tif !IsLegalJson(args[i]) {\n\t\t\t\terrorString = \"Illegal JSON configuration override: \" + args[i] + \" \"\n\t\t\t\treturnCode = ILLEGAL_JSON_CONFIGURATION\n\t\t\t}\n\t\t}\n\t}\n\n\tif returnCode < 0 {\n\t\tfmt.Println(errorString)\n\t}\n\treturn returnCode\n}\n\nfunc IsLegalFileFolder(filespec string) int {\n\tvar err error\n\tvar absolutePath string\n\tvar fi os.FileInfo\n\n\tabsolutePath, err = filepath.Abs(filespec)\n\tfi, err = os.Stat(absolutePath)\n\tif os.IsNotExist(err) {\n\t\treturn SPEC_ILLEGAL\n\t} else {\n\t\tswitch mode := fi.Mode(); {\n\t\tcase mode.IsDir():\n\t\t\treturn SPEC_IS_FOLDER\n\t\tcase mode.IsRegular():\n\t\t\treturn SPEC_IS_FILE\n\t\t}\n\t}\n\treturn SPEC_ILLEGAL\n}\n\nfunc GetBooberAddress(clusterName string, localhost bool) string {\n\tvar booberAddress string\n\n\tif localhost {\n\t\tbooberAddress = \"http:\/\/localhost:8080\"\n\t} else {\n\t\tbooberAddress = \"http:\/\/boober-mfp-boober.\" + clusterName + \".paas.skead.no\"\n\t}\n\treturn booberAddress\n}\n\nfunc GetBooberSetupUrl(clusterName string, localhost bool) string {\n\treturn GetBooberAddress(clusterName, localhost) + \"\/setup\"\n}\n\nfunc CallBoober(combindedJson string, showConfig bool, showObjects bool, api bool, localhost bool, verbose bool) int {\n\t\/\/var openshiftConfig *openshift.OpenshiftConfig\n\tvar configLocation = viper.GetString(\"HOME\") + \"\/.aoc.json\"\n\n\tif localhost {\n\t\tCallBooberInstance(combindedJson, showConfig, showObjects, verbose,\n\t\t\tGetBooberSetupUrl(\"localhost\", localhost), \"\")\n\t} else {\n\t\topenshiftConfig, err := openshift.LoadOrInitiateConfigFile(configLocation)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error in loading OpenShift configuration\")\n\t\t\treturn INTERNAL_ERROR\n\t\t}\n\n\t\tfor i := range openshiftConfig.Clusters {\n\t\t\tif openshiftConfig.Clusters[i].Reachable {\n\t\t\t\tif !api || openshiftConfig.Clusters[i].Name == openshiftConfig.APICluster {\n\t\t\t\t\tCallBooberInstance(combindedJson, showConfig, showObjects, verbose,\n\t\t\t\t\t\tGetBooberSetupUrl(openshiftConfig.Clusters[i].Name, localhost),\n\t\t\t\t\t\topenshiftConfig.Clusters[i].Token)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn OPERATION_OK\n}\n\nfunc CallBooberInstance(combindedJson string, showConfig bool, showObjects bool, verbose bool, url string, token string) int {\n\n\tif verbose {\n\t\tfmt.Print(\"Sending config to Boober at \" + url + \"... \")\n\t}\n\n\tvar jsonStr = []byte(combindedJson)\n\n\treq, err := http.NewRequest(http.MethodPut, url, bytes.NewBuffer(jsonStr))\n\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\tif err != nil {\n\t\tfmt.Println(\"Internal error in NewRequest: \", err)\n\t\treturn INTERNAL_ERROR\n\t}\n\n\treq.Header.Set(\"Authentication\", \"Bearer: \"+token)\n\tclient := &http.Client{}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tfmt.Println(\"Error connecting to the Boober service on \"+url+\": \", err)\n\t\treturn BOOBER_ERROR\n\t}\n\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\tbodyStr := string(body)\n\t\tif strings.Contains(bodyStr, BOOBER_NOT_INSTALLED_RESPONSE) {\n\t\t\tfmt.Println(\"FAIL: Boober not available on \" + url + \":\")\n\t\t}\n\t\treturn BOOBER_ERROR\n\t}\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\n\t\/\/ Check return for error\n\tvar booberReturn BooberReturn\n\terr = json.Unmarshal(body, &booberReturn)\n\tif err != nil {\n\t\tfmt.Println(\"Error unmashalling Boober return: \" + err.Error())\n\t\treturn BOOBER_ERROR\n\t}\n\n\tif !(booberReturn.Valid) {\n\t\tfmt.Println(\"Error in configuration: \")\n\t\tfor _, message := range booberReturn.Errors {\n\t\t\tfmt.Println(\" \" + message)\n\t\t}\n\t} else {\n\t\tif verbose {\n\t\t\tfmt.Print(\"OK \")\n\t\t\tvar countMap map[string]int = make(map[string]int)\n\t\t\tfor key := range booberReturn.OpenshiftObjects {\n\t\t\t\tcountMap[key]++\n\t\t\t}\n\t\t\tfmt.Print(\"(\")\n\t\t\tfor key := range countMap {\n\t\t\t\tfmt.Printf(\"%v: %v \", key, countMap[key])\n\t\t\t}\n\t\t\tfmt.Println(\")\")\n\t\t}\n\t}\n\n\tif showConfig {\n\t\tfmt.Println(PrettyPrintJson(string(booberReturn.Config)))\n\t}\n\n\tif showObjects {\n\t\tfor key := range booberReturn.OpenshiftObjects {\n\t\t\tfmt.Println(key)\n\t\t\tfmt.Println(PrettyPrintJson(string(booberReturn.OpenshiftObjects[key])))\n\t\t}\n\t\t\/\/fmt.Println(PrettyPrintJson(string(booberReturn.OpenshiftObjects)))\n\t}\n\n\treturn OPERATION_OK\n}\n<commit_msg>Localhost flag now sends the token of the API server (first reachable instance) as the bearer token to Boober<commit_after>package boober\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/skatteetaten\/aoc\/pkg\/openshift\"\n\t\"github.com\/spf13\/viper\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\/\/\"github.com\/vbatts\/gogololcat\/lol\"\n)\n\nconst SPEC_ILLEGAL = -1\nconst MISSING_FILE_REFERENCE = -2\nconst MISSING_CONFIGURATION = -3\nconst ILLEGAL_JSON_CONFIGURATION = -4\nconst CONFIGURATION_FILE_IN_ROOT = -5\nconst ERROR_READING_FILE = -6\nconst BOOBER_ERROR = -7\nconst ILLEGAL_FILE = -8\nconst INTERNAL_ERROR = -9\nconst FOLDER_SETUP_NOT_SUPPORTED = -10\nconst OPERATION_OK = 0\nconst SPEC_IS_FILE = 1\nconst SPEC_IS_FOLDER = 2\n\nconst BOOBER_NOT_INSTALLED_RESPONSE = \"Application is not available\"\n\n\/\/ Struct to represent data to the Boober interface\ntype BooberInferface struct {\n\tEnv string `json:\"env\"`\n\tApp string `json:\"app\"`\n\tAffiliation string `json:\"affiliation\"`\n\tFiles map[string]json.RawMessage `json:\"files\"`\n\tOverrides map[string]json.RawMessage `json:\"overrides\"`\n}\n\n\/\/ Structs to represent return data from the Boober interface\ntype BooberReturn struct {\n\tSources json.RawMessage `json:\"sources\"`\n\tErrors []string `json:\"errors\"`\n\tValid bool `json:\"valid\"`\n\tConfig json.RawMessage `json:\"config\"`\n\tOpenshiftObjects map[string]json.RawMessage `json:\"openshiftObjects\"`\n}\n\nfunc ExecuteSetup(args []string, dryRun bool, showConfig bool, showObjects bool, verbose bool, localhost bool,\n\toverrideFiles []string) int {\n\n\tvalidateCode := validateCommand(args, overrideFiles)\n\tif validateCode < 0 {\n\t\treturn validateCode\n\t}\n\n\tvar absolutePath string\n\n\tabsolutePath, _ = filepath.Abs(args[0])\n\n\tvar envFile string \/\/ Filename for app\n\tvar envFolder string \/\/ Short folder name (Env)\n\tvar folder string \/\/ Absolute path of folder\n\tvar parentFolder string \/\/ Absolute path of parent\n\n\tswitch validateCode {\n\tcase SPEC_IS_FILE:\n\t\tfolder = filepath.Dir(absolutePath)\n\t\tenvFile = filepath.Base(absolutePath)\n\tcase SPEC_IS_FOLDER:\n\t\tfolder = absolutePath\n\t\tenvFile = \"\"\n\t\tfmt.Println(\"Setup on a folder is not yet supported\")\n\t\treturn FOLDER_SETUP_NOT_SUPPORTED\n\t}\n\n\tparentFolder = filepath.Dir(folder)\n\tenvFolder = filepath.Base(folder)\n\n\tif folder == parentFolder {\n\t\tfmt.Println(\"Application configuration file cannot reside in root directory\")\n\t\treturn CONFIGURATION_FILE_IN_ROOT\n\t}\n\n\t\/\/ Initialize JSON\n\n\tvar booberData BooberInferface\n\tbooberData.App = strings.TrimSuffix(envFile, filepath.Ext(envFile)) \/\/envFile\n\tbooberData.Env = envFolder\n\tbooberData.Affiliation = \"\"\n\n\tvar returnMap = Folder2Map(folder, envFolder+\"\/\")\n\tvar returnMap2 = Folder2Map(parentFolder, \"\")\n\n\tbooberData.Files = CombineMaps(returnMap, returnMap2)\n\tbooberData.Overrides = overrides2map(args, overrideFiles)\n\n\tjsonByte, ok := json.Marshal(booberData)\n\tif !(ok == nil) {\n\t\tfmt.Println(\"Internal error in marshalling Boober data: \" + ok.Error())\n\t\treturn INTERNAL_ERROR\n\t}\n\n\tjsonStr := string(jsonByte)\n\tif dryRun {\n\t\t\/\/lol.Writer.Write(os.Stdout, []byte(PrettyPrintJson(jsonStr)))\n\t\tfmt.Println(string(PrettyPrintJson(jsonStr)))\n\t} else {\n\t\tvalidateCode = CallBoober(jsonStr, showConfig, showObjects, false, localhost, verbose)\n\t\tif validateCode < 0 {\n\t\t\treturn validateCode\n\t\t}\n\t}\n\treturn OPERATION_OK\n}\n\nfunc overrides2map(args []string, overrideFiles []string) map[string]json.RawMessage {\n\tvar returnMap = make(map[string]json.RawMessage)\n\tfor i := 0; i < len(overrideFiles); i++ {\n\t\treturnMap[overrideFiles[i]] = json.RawMessage(args[i+1])\n\t}\n\treturn returnMap\n}\n\nfunc Folder2Map(folder string, prefix string) map[string]json.RawMessage {\n\tvar returnMap = make(map[string]json.RawMessage)\n\tvar allFilesOK bool = true\n\n\tfiles, _ := ioutil.ReadDir(folder)\n\tvar filesProcessed = 0\n\tfor _, f := range files {\n\t\tabsolutePath := filepath.Join(folder, f.Name())\n\t\tif IsLegalFileFolder(absolutePath) == SPEC_IS_FILE { \/\/ Ignore folders\n\t\t\tmatched, _ := filepath.Match(\"*.json\", strings.ToLower(f.Name()))\n\t\t\tif matched {\n\t\t\t\tfileJson, err := ioutil.ReadFile(absolutePath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Error in reading file \" + absolutePath)\n\t\t\t\t\tos.Exit(ERROR_READING_FILE)\n\t\t\t\t}\n\t\t\t\tif IsLegalJson(string(fileJson)) {\n\t\t\t\t\tfilesProcessed++\n\t\t\t\t\treturnMap[prefix+f.Name()] = fileJson\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"Illegal JSON in configuration file \" + absolutePath)\n\t\t\t\t\tallFilesOK = false\n\t\t\t\t}\n\t\t\t\tfilesProcessed++\n\t\t\t}\n\t\t}\n\n\t}\n\tif !allFilesOK {\n\t\tos.Exit(ILLEGAL_JSON_CONFIGURATION)\n\t}\n\treturn returnMap\n}\n\nfunc CombineMaps(map1 map[string]json.RawMessage, map2 map[string]json.RawMessage) map[string]json.RawMessage {\n\tvar returnMap = make(map[string]json.RawMessage)\n\n\tfor k, v := range map1 {\n\t\treturnMap[k] = v\n\t}\n\tfor k, v := range map2 {\n\t\treturnMap[k] = v\n\t}\n\treturn returnMap\n}\n\nfunc validateCommand(args []string, overrideFiles []string) int {\n\tvar errorString = \"\"\n\tvar returnCode int\n\n\tif len(args) == 0 {\n\t\treturnCode = -1\n\t\terrorString += \"Missing file\/folder \"\n\t} else {\n\t\t\/\/ Chceck argument 0 for legal file \/ folder\n\t\treturnCode = IsLegalFileFolder(args[0])\n\t\tif returnCode < 0 {\n\t\t\terrorString += \"Illegal file \/ folder: \" + args[0]\n\t\t\treturnCode = ILLEGAL_FILE\n\t\t}\n\n\t\t\/\/ We have at least one argument, now there should be a correlation between the number of args\n\t\t\/\/ and the number of override (-f) flags\n\t\tif len(overrideFiles) < (len(args) - 1) {\n\t\t\treturnCode = MISSING_FILE_REFERENCE\n\t\t\terrorString += \"Configuration override specified without file reference flag \"\n\t\t}\n\t\tif len(overrideFiles) > (len(args) - 1) {\n\t\t\treturnCode = MISSING_CONFIGURATION\n\t\t\terrorString += \"Configuration overide file reference flag specified without configuration \"\n\t\t}\n\n\t\t\/\/ Check for legal JSON argument for each overrideFiles flag\n\t\tfor i := 1; i < len(args); i++ {\n\t\t\tif !IsLegalJson(args[i]) {\n\t\t\t\terrorString = \"Illegal JSON configuration override: \" + args[i] + \" \"\n\t\t\t\treturnCode = ILLEGAL_JSON_CONFIGURATION\n\t\t\t}\n\t\t}\n\t}\n\n\tif returnCode < 0 {\n\t\tfmt.Println(errorString)\n\t}\n\treturn returnCode\n}\n\nfunc IsLegalFileFolder(filespec string) int {\n\tvar err error\n\tvar absolutePath string\n\tvar fi os.FileInfo\n\n\tabsolutePath, err = filepath.Abs(filespec)\n\tfi, err = os.Stat(absolutePath)\n\tif os.IsNotExist(err) {\n\t\treturn SPEC_ILLEGAL\n\t} else {\n\t\tswitch mode := fi.Mode(); {\n\t\tcase mode.IsDir():\n\t\t\treturn SPEC_IS_FOLDER\n\t\tcase mode.IsRegular():\n\t\t\treturn SPEC_IS_FILE\n\t\t}\n\t}\n\treturn SPEC_ILLEGAL\n}\n\nfunc GetBooberAddress(clusterName string, localhost bool) string {\n\tvar booberAddress string\n\n\tif localhost {\n\t\tbooberAddress = \"http:\/\/localhost:8080\"\n\t} else {\n\t\tbooberAddress = \"http:\/\/boober-mfp-boober.\" + clusterName + \".paas.skead.no\"\n\t}\n\treturn booberAddress\n}\n\nfunc GetBooberSetupUrl(clusterName string, localhost bool) string {\n\treturn GetBooberAddress(clusterName, localhost) + \"\/setup\"\n}\n\nfunc GetApiCluster() *openshift.OpenshiftCluster {\n\tvar configLocation = viper.GetString(\"HOME\") + \"\/.aoc.json\"\n\topenshiftConfig, err := openshift.LoadOrInitiateConfigFile(configLocation)\n\tif err != nil {\n\t\tfmt.Println(\"Error in loading OpenShift configuration\")\n\t\treturn nil\n\t}\n\tfor i := range openshiftConfig.Clusters {\n\t\tif openshiftConfig.Clusters[i].Reachable {\n\t\t\treturn openshiftConfig.Clusters[i]\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc CallBoober(combindedJson string, showConfig bool, showObjects bool, api bool, localhost bool, verbose bool) int {\n\t\/\/var openshiftConfig *openshift.OpenshiftConfig\n\tvar configLocation = viper.GetString(\"HOME\") + \"\/.aoc.json\"\n\n\tif localhost {\n\t\tvar token string = \"\"\n\t\tapiCluster := GetApiCluster()\n\t\tif apiCluster != nil {\n\t\t\ttoken = apiCluster.Token\n\t\t}\n\t\tCallBooberInstance(combindedJson, showConfig, showObjects, verbose,\n\t\t\tGetBooberSetupUrl(\"localhost\", localhost), token)\n\t} else {\n\t\topenshiftConfig, err := openshift.LoadOrInitiateConfigFile(configLocation)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error in loading OpenShift configuration\")\n\t\t\treturn INTERNAL_ERROR\n\t\t}\n\n\t\tfor i := range openshiftConfig.Clusters {\n\t\t\tif openshiftConfig.Clusters[i].Reachable {\n\t\t\t\tif !api || openshiftConfig.Clusters[i].Name == openshiftConfig.APICluster {\n\t\t\t\t\tCallBooberInstance(combindedJson, showConfig, showObjects, verbose,\n\t\t\t\t\t\tGetBooberSetupUrl(openshiftConfig.Clusters[i].Name, localhost),\n\t\t\t\t\t\topenshiftConfig.Clusters[i].Token)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn OPERATION_OK\n}\n\nfunc CallBooberInstance(combindedJson string, showConfig bool, showObjects bool, verbose bool, url string, token string) int {\n\n\tif verbose {\n\t\tfmt.Print(\"Sending config to Boober at \" + url + \"... \")\n\t}\n\n\tvar jsonStr = []byte(combindedJson)\n\n\treq, err := http.NewRequest(http.MethodPut, url, bytes.NewBuffer(jsonStr))\n\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\tif err != nil {\n\t\tfmt.Println(\"Internal error in NewRequest: \", err)\n\t\treturn INTERNAL_ERROR\n\t}\n\n\treq.Header.Set(\"Authentication\", \"Bearer: \"+token)\n\tclient := &http.Client{}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tfmt.Println(\"Error connecting to the Boober service on \"+url+\": \", err)\n\t\treturn BOOBER_ERROR\n\t}\n\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\tbodyStr := string(body)\n\t\tif strings.Contains(bodyStr, BOOBER_NOT_INSTALLED_RESPONSE) {\n\t\t\tfmt.Println(\"FAIL: Boober not available on \" + url + \":\")\n\t\t}\n\t\treturn BOOBER_ERROR\n\t}\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\n\t\/\/ Check return for error\n\tvar booberReturn BooberReturn\n\terr = json.Unmarshal(body, &booberReturn)\n\tif err != nil {\n\t\tfmt.Println(\"Error unmashalling Boober return: \" + err.Error())\n\t\treturn BOOBER_ERROR\n\t}\n\n\tif !(booberReturn.Valid) {\n\t\tfmt.Println(\"Error in configuration: \")\n\t\tfor _, message := range booberReturn.Errors {\n\t\t\tfmt.Println(\" \" + message)\n\t\t}\n\t} else {\n\t\tif verbose {\n\t\t\tfmt.Print(\"OK \")\n\t\t\tvar countMap map[string]int = make(map[string]int)\n\t\t\tfor key := range booberReturn.OpenshiftObjects {\n\t\t\t\tcountMap[key]++\n\t\t\t}\n\t\t\tfmt.Print(\"(\")\n\t\t\tfor key := range countMap {\n\t\t\t\tfmt.Printf(\"%v: %v \", key, countMap[key])\n\t\t\t}\n\t\t\tfmt.Println(\")\")\n\t\t}\n\t}\n\n\tif showConfig {\n\t\tfmt.Println(PrettyPrintJson(string(booberReturn.Config)))\n\t}\n\n\tif showObjects {\n\t\tfor key := range booberReturn.OpenshiftObjects {\n\t\t\tfmt.Println(key)\n\t\t\tfmt.Println(PrettyPrintJson(string(booberReturn.OpenshiftObjects[key])))\n\t\t}\n\t\t\/\/fmt.Println(PrettyPrintJson(string(booberReturn.OpenshiftObjects)))\n\t}\n\n\treturn OPERATION_OK\n}\n<|endoftext|>"} {"text":"<commit_before>package consts\n\nimport (\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\"\n)\n\n\/\/ Instances doc type for User's instance document\nconst Instances = \"instances\"\n\nconst (\n\t\/\/ Apps doc type for client-side application manifests\n\tApps = \"io.cozy.apps\"\n\t\/\/ Konnectors doc type for konnector application manifests\n\tKonnectors = \"io.cozy.konnectors\"\n\t\/\/ KonnectorResults doc type for konnector last execution result.\n\tKonnectorResults = \"io.cozy.konnectors.result\"\n\t\/\/ Archives doc type for zip archives with files and directories\n\tArchives = \"io.cozy.files.archives\"\n\t\/\/ Doctypes doc type for doctype list\n\tDoctypes = \"io.cozy.doctypes\"\n\t\/\/ Files doc type for type for files and directories\n\tFiles = \"io.cozy.files\"\n\t\/\/ Intents doc type for intents persisted in couchdb\n\tIntents = \"io.cozy.intents\"\n\t\/\/ Jobs doc type for queued jobs\n\tJobs = \"io.cozy.jobs\"\n\t\/\/ JobEvents doc type for realt time events sent by jobs\n\tJobEvents = \"io.cozy.jobs.events\"\n\t\/\/ OAuthAccessCodes doc type for OAuth2 access codes\n\tOAuthAccessCodes = \"io.cozy.oauth.access_codes\"\n\t\/\/ OAuthClients doc type for OAuth2 clients\n\tOAuthClients = \"io.cozy.oauth.clients\"\n\t\/\/ Permissions doc type for permissions identifying a connection\n\tPermissions = \"io.cozy.permissions\"\n\t\/\/ Queues doc type for jobs queues\n\tQueues = \"io.cozy.queues\"\n\t\/\/ Recipients doc type for sharing recipients\n\tRecipients = \"io.cozy.recipients\"\n\t\/\/ RemoteRequests doc type for logging requests to remote websites\n\tRemoteRequests = \"io.cozy.remote.requests\"\n\t\/\/ Sessions doc type for sessions identifying a connection\n\tSessions = \"io.cozy.sessions\"\n\t\/\/ Settings doc type for settings to customize an instance\n\tSettings = \"io.cozy.settings\"\n\t\/\/ Sharings doc type for document and file sharing\n\tSharings = \"io.cozy.sharings\"\n\t\/\/ Triggers doc type for triggers, jobs launchers\n\tTriggers = \"io.cozy.triggers\"\n\t\/\/ Accounts doc type for accounts\n\tAccounts = \"io.cozy.accounts\"\n\t\/\/ AccountTypes doc type for account types\n\tAccountTypes = \"io.cozy.account_types\"\n)\n\nconst (\n\t\/\/ DriveSlug is the slug of the default app, files, where the user is\n\t\/\/ redirected after login.\n\tDriveSlug = \"drive\"\n\t\/\/ OnboardingSlug is the slug of the onboarding app, where the user is\n\t\/\/ redirected when he has no passphrase.\n\tOnboardingSlug = \"onboarding\"\n\t\/\/ StoreSlug is the slug of the only app that can install other apps.\n\t\/\/ FIXME update me when the store app will be available\n\tStoreSlug = \"settings\"\n\t\/\/ CollectSlug is the slug of the only app that can install konnectors.\n\tCollectSlug = \"collect\"\n)\n\nconst (\n\t\/\/ DirType is the type attribute for directories\n\tDirType = \"directory\"\n\t\/\/ FileType is the type attribute for files\n\tFileType = \"file\"\n)\n\nconst (\n\t\/\/ RootDirID is the root directory identifier\n\tRootDirID = \"io.cozy.files.root-dir\"\n\t\/\/ TrashDirID is the trash directory identifier\n\tTrashDirID = \"io.cozy.files.trash-dir\"\n)\n\nconst (\n\t\/\/ ContextSettingsID is the id of the settings JSON-API response for the context\n\tContextSettingsID = \"io.cozy.settings.context\"\n\t\/\/ DiskUsageID is the id of the settings JSON-API response for disk-usage\n\tDiskUsageID = \"io.cozy.settings.disk-usage\"\n\t\/\/ InstanceSettingsID is the id of settings document for the instance\n\tInstanceSettingsID = \"io.cozy.settings.instance\"\n\t\/\/ SharingSettingsID is the id of the settings document for sharings.\n\tSharingSettingsID = \"io.cozy.settings.sharings\"\n)\n\nconst (\n\t\/\/ OneShotSharing is a sharing with no continuous updates\n\tOneShotSharing = \"one-shot\"\n\t\/\/ MasterSlaveSharing is a sharing with unilateral continuous updates\n\tMasterSlaveSharing = \"master-slave\"\n\t\/\/ MasterMasterSharing is a sharing with bilateral continuous updates\n\tMasterMasterSharing = \"master-master\"\n)\n\nconst (\n\t\/\/ SharingStatusPending is the sharing pending status\n\tSharingStatusPending = \"pending\"\n\t\/\/ SharingStatusRefused is the sharing refused status\n\tSharingStatusRefused = \"refused\"\n\t\/\/ SharingStatusAccepted is the sharing accepted status\n\tSharingStatusAccepted = \"accepted\"\n\t\/\/ SharingStatusError is when the request could not be sent\n\tSharingStatusError = \"error\"\n\t\/\/ SharingStatusUnregistered is when the sharer could not register herself\n\t\/\/ as an OAuth client at the recipient's\n\tSharingStatusUnregistered = \"unregistered\"\n\t\/\/ SharingStatusMailNotSent is when the mail invitation was not sent\n\tSharingStatusMailNotSent = \"mail-not-sent\"\n\t\/\/ SharingStatusRevoked is to tell if a recipient is revoked.\n\tSharingStatusRevoked = \"revoked\"\n)\n\nconst (\n\t\/\/ WorkerTypeSharingUpdates is the string representation of the type of\n\t\/\/ workers that deals with updating sharings.\n\tWorkerTypeSharingUpdates = \"sharingupdates\"\n)\n\nconst (\n\t\/\/ SelectorReferencedBy is the \"referenced_by\" selector.\n\tSelectorReferencedBy = couchdb.SelectorReferencedBy\n)\n\nconst (\n\t\/\/ QueryParamRev is the key for the revision value in a query string. In\n\t\/\/ web\/data the revision is expected as \"rev\", not \"Rev\".\n\tQueryParamRev = \"rev\"\n\t\/\/ QueryParamDirID is the key for the `DirID` field of a vfs.FileDoc or\n\t\/\/ vfs.DirDoc, in a query string.\n\tQueryParamDirID = \"Dir_id\"\n\t\/\/ QueryParamName is the key for the name value in a query string.\n\tQueryParamName = \"Name\"\n\t\/\/ QueryParamType is the key for the `type` value (file or directory) in\n\t\/\/ a query string.\n\tQueryParamType = \"Type\"\n\t\/\/ QueryParamExecutable is key for the `executable` field of a vfs.FileDoc\n\t\/\/ in a query string.\n\tQueryParamExecutable = \"Executable\"\n\t\/\/ QueryParamCreatedAt is the key for the `created_at` value in a query\n\t\/\/ string.\n\tQueryParamCreatedAt = \"Created_at\"\n\t\/\/ QueryParamUpdatedAt is the key for the `Updated_at` value in a query\n\t\/\/ string.\n\tQueryParamUpdatedAt = \"Updated_at\"\n\t\/\/ QueryParamRecursive is the key for the `recursive` value in a query\n\t\/\/ string.\n\tQueryParamRecursive = \"Recursive\"\n\t\/\/ QueryParamTags is the key for the `tags` values in a query string.\n\tQueryParamTags = \"Tags\"\n\t\/\/ QueryParamReferencedBy is the key for the `referenced_by` values in a\n\t\/\/ query string.\n\tQueryParamReferencedBy = \"Referenced_by\"\n\t\/\/ QueryParamSharer is used to tell if the user that received the query is\n\t\/\/ the sharer or not.\n\tQueryParamSharer = \"Sharer\"\n\t\/\/ QueryParamAppSlug is used to transmit the application slug in a query\n\t\/\/ string.\n\tQueryParamAppSlug = \"App_slug\"\n\t\/\/ QueryParamDocType is used to transmit the doctype in a query string.\n\tQueryParamDocType = \"Doctype\"\n\t\/\/ QueryParamSharingID is used to transmit the sharingID in a query string.\n\tQueryParamSharingID = \"Sharing_id\"\n)\n\n\/\/ AppsRegistry is an hard-coded list of known apps, with their source URLs\n\/\/ TODO remove it when we will have a true registry\nvar AppsRegistry = map[string]string{\n\t\"onboarding\": \"git:\/\/github.com\/cozy\/cozy-onboarding-v3.git#build\",\n\t\"drive\": \"git:\/\/github.com\/cozy\/cozy-drive.git#build\",\n\t\"photos\": \"git:\/\/github.com\/cozy\/cozy-photos-v3.git#build\",\n\t\"settings\": \"git:\/\/github.com\/cozy\/cozy-settings.git#build\",\n\t\"collect\": \"git:\/\/github.com\/cozy\/cozy-collect.git#build\",\n}\n<commit_msg>Use contact doctype for sharing recipients<commit_after>package consts\n\nimport (\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\"\n)\n\n\/\/ Instances doc type for User's instance document\nconst Instances = \"instances\"\n\nconst (\n\t\/\/ Apps doc type for client-side application manifests\n\tApps = \"io.cozy.apps\"\n\t\/\/ Konnectors doc type for konnector application manifests\n\tKonnectors = \"io.cozy.konnectors\"\n\t\/\/ KonnectorResults doc type for konnector last execution result.\n\tKonnectorResults = \"io.cozy.konnectors.result\"\n\t\/\/ Archives doc type for zip archives with files and directories\n\tArchives = \"io.cozy.files.archives\"\n\t\/\/ Doctypes doc type for doctype list\n\tDoctypes = \"io.cozy.doctypes\"\n\t\/\/ Files doc type for type for files and directories\n\tFiles = \"io.cozy.files\"\n\t\/\/ Intents doc type for intents persisted in couchdb\n\tIntents = \"io.cozy.intents\"\n\t\/\/ Jobs doc type for queued jobs\n\tJobs = \"io.cozy.jobs\"\n\t\/\/ JobEvents doc type for realt time events sent by jobs\n\tJobEvents = \"io.cozy.jobs.events\"\n\t\/\/ OAuthAccessCodes doc type for OAuth2 access codes\n\tOAuthAccessCodes = \"io.cozy.oauth.access_codes\"\n\t\/\/ OAuthClients doc type for OAuth2 clients\n\tOAuthClients = \"io.cozy.oauth.clients\"\n\t\/\/ Permissions doc type for permissions identifying a connection\n\tPermissions = \"io.cozy.permissions\"\n\t\/\/ Queues doc type for jobs queues\n\tQueues = \"io.cozy.queues\"\n\t\/\/ Recipients doc type for sharing recipients\n\tRecipients = \"io.cozy.contacts\"\n\t\/\/ RemoteRequests doc type for logging requests to remote websites\n\tRemoteRequests = \"io.cozy.remote.requests\"\n\t\/\/ Sessions doc type for sessions identifying a connection\n\tSessions = \"io.cozy.sessions\"\n\t\/\/ Settings doc type for settings to customize an instance\n\tSettings = \"io.cozy.settings\"\n\t\/\/ Sharings doc type for document and file sharing\n\tSharings = \"io.cozy.sharings\"\n\t\/\/ Triggers doc type for triggers, jobs launchers\n\tTriggers = \"io.cozy.triggers\"\n\t\/\/ Accounts doc type for accounts\n\tAccounts = \"io.cozy.accounts\"\n\t\/\/ AccountTypes doc type for account types\n\tAccountTypes = \"io.cozy.account_types\"\n)\n\nconst (\n\t\/\/ DriveSlug is the slug of the default app, files, where the user is\n\t\/\/ redirected after login.\n\tDriveSlug = \"drive\"\n\t\/\/ OnboardingSlug is the slug of the onboarding app, where the user is\n\t\/\/ redirected when he has no passphrase.\n\tOnboardingSlug = \"onboarding\"\n\t\/\/ StoreSlug is the slug of the only app that can install other apps.\n\t\/\/ FIXME update me when the store app will be available\n\tStoreSlug = \"settings\"\n\t\/\/ CollectSlug is the slug of the only app that can install konnectors.\n\tCollectSlug = \"collect\"\n)\n\nconst (\n\t\/\/ DirType is the type attribute for directories\n\tDirType = \"directory\"\n\t\/\/ FileType is the type attribute for files\n\tFileType = \"file\"\n)\n\nconst (\n\t\/\/ RootDirID is the root directory identifier\n\tRootDirID = \"io.cozy.files.root-dir\"\n\t\/\/ TrashDirID is the trash directory identifier\n\tTrashDirID = \"io.cozy.files.trash-dir\"\n)\n\nconst (\n\t\/\/ ContextSettingsID is the id of the settings JSON-API response for the context\n\tContextSettingsID = \"io.cozy.settings.context\"\n\t\/\/ DiskUsageID is the id of the settings JSON-API response for disk-usage\n\tDiskUsageID = \"io.cozy.settings.disk-usage\"\n\t\/\/ InstanceSettingsID is the id of settings document for the instance\n\tInstanceSettingsID = \"io.cozy.settings.instance\"\n\t\/\/ SharingSettingsID is the id of the settings document for sharings.\n\tSharingSettingsID = \"io.cozy.settings.sharings\"\n)\n\nconst (\n\t\/\/ OneShotSharing is a sharing with no continuous updates\n\tOneShotSharing = \"one-shot\"\n\t\/\/ MasterSlaveSharing is a sharing with unilateral continuous updates\n\tMasterSlaveSharing = \"master-slave\"\n\t\/\/ MasterMasterSharing is a sharing with bilateral continuous updates\n\tMasterMasterSharing = \"master-master\"\n)\n\nconst (\n\t\/\/ SharingStatusPending is the sharing pending status\n\tSharingStatusPending = \"pending\"\n\t\/\/ SharingStatusRefused is the sharing refused status\n\tSharingStatusRefused = \"refused\"\n\t\/\/ SharingStatusAccepted is the sharing accepted status\n\tSharingStatusAccepted = \"accepted\"\n\t\/\/ SharingStatusError is when the request could not be sent\n\tSharingStatusError = \"error\"\n\t\/\/ SharingStatusUnregistered is when the sharer could not register herself\n\t\/\/ as an OAuth client at the recipient's\n\tSharingStatusUnregistered = \"unregistered\"\n\t\/\/ SharingStatusMailNotSent is when the mail invitation was not sent\n\tSharingStatusMailNotSent = \"mail-not-sent\"\n\t\/\/ SharingStatusRevoked is to tell if a recipient is revoked.\n\tSharingStatusRevoked = \"revoked\"\n)\n\nconst (\n\t\/\/ WorkerTypeSharingUpdates is the string representation of the type of\n\t\/\/ workers that deals with updating sharings.\n\tWorkerTypeSharingUpdates = \"sharingupdates\"\n)\n\nconst (\n\t\/\/ SelectorReferencedBy is the \"referenced_by\" selector.\n\tSelectorReferencedBy = couchdb.SelectorReferencedBy\n)\n\nconst (\n\t\/\/ QueryParamRev is the key for the revision value in a query string. In\n\t\/\/ web\/data the revision is expected as \"rev\", not \"Rev\".\n\tQueryParamRev = \"rev\"\n\t\/\/ QueryParamDirID is the key for the `DirID` field of a vfs.FileDoc or\n\t\/\/ vfs.DirDoc, in a query string.\n\tQueryParamDirID = \"Dir_id\"\n\t\/\/ QueryParamName is the key for the name value in a query string.\n\tQueryParamName = \"Name\"\n\t\/\/ QueryParamType is the key for the `type` value (file or directory) in\n\t\/\/ a query string.\n\tQueryParamType = \"Type\"\n\t\/\/ QueryParamExecutable is key for the `executable` field of a vfs.FileDoc\n\t\/\/ in a query string.\n\tQueryParamExecutable = \"Executable\"\n\t\/\/ QueryParamCreatedAt is the key for the `created_at` value in a query\n\t\/\/ string.\n\tQueryParamCreatedAt = \"Created_at\"\n\t\/\/ QueryParamUpdatedAt is the key for the `Updated_at` value in a query\n\t\/\/ string.\n\tQueryParamUpdatedAt = \"Updated_at\"\n\t\/\/ QueryParamRecursive is the key for the `recursive` value in a query\n\t\/\/ string.\n\tQueryParamRecursive = \"Recursive\"\n\t\/\/ QueryParamTags is the key for the `tags` values in a query string.\n\tQueryParamTags = \"Tags\"\n\t\/\/ QueryParamReferencedBy is the key for the `referenced_by` values in a\n\t\/\/ query string.\n\tQueryParamReferencedBy = \"Referenced_by\"\n\t\/\/ QueryParamSharer is used to tell if the user that received the query is\n\t\/\/ the sharer or not.\n\tQueryParamSharer = \"Sharer\"\n\t\/\/ QueryParamAppSlug is used to transmit the application slug in a query\n\t\/\/ string.\n\tQueryParamAppSlug = \"App_slug\"\n\t\/\/ QueryParamDocType is used to transmit the doctype in a query string.\n\tQueryParamDocType = \"Doctype\"\n\t\/\/ QueryParamSharingID is used to transmit the sharingID in a query string.\n\tQueryParamSharingID = \"Sharing_id\"\n)\n\n\/\/ AppsRegistry is an hard-coded list of known apps, with their source URLs\n\/\/ TODO remove it when we will have a true registry\nvar AppsRegistry = map[string]string{\n\t\"onboarding\": \"git:\/\/github.com\/cozy\/cozy-onboarding-v3.git#build\",\n\t\"drive\": \"git:\/\/github.com\/cozy\/cozy-drive.git#build\",\n\t\"photos\": \"git:\/\/github.com\/cozy\/cozy-photos-v3.git#build\",\n\t\"settings\": \"git:\/\/github.com\/cozy\/cozy-settings.git#build\",\n\t\"collect\": \"git:\/\/github.com\/cozy\/cozy-collect.git#build\",\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package daemon implements a service for mediating access to the data store,\n\/\/ and its client.\n\/\/\n\/\/ Most RPCs exposed by the service correspond to the methods of Store in the\n\/\/ store package and are not documented here.\npackage daemon\n\nimport (\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\n\t\"src.elv.sh\/pkg\/daemon\/internal\/api\"\n\t\"src.elv.sh\/pkg\/logutil\"\n\t\"src.elv.sh\/pkg\/prog\"\n\t\"src.elv.sh\/pkg\/rpc\"\n\t\"src.elv.sh\/pkg\/store\"\n)\n\nvar logger = logutil.GetLogger(\"[daemon] \")\n\n\/\/ Program is the daemon subprogram.\ntype Program struct {\n\trun bool\n\tpaths *prog.DaemonPaths\n\t\/\/ Used in tests.\n\tserveOpts ServeOpts\n}\n\nfunc (p *Program) RegisterFlags(fs *prog.FlagSet) {\n\tfs.BoolVar(&p.run, \"daemon\", false,\n\t\t\"[internal flag] Run the storage daemon instead of an Elvish shell\")\n\tp.paths = fs.DaemonPaths()\n}\n\nfunc (p *Program) Run(fds [3]*os.File, args []string) error {\n\tif !p.run {\n\t\treturn prog.ErrNextProgram\n\t}\n\tif len(args) > 0 {\n\t\treturn prog.BadUsage(\"arguments are not allowed with -daemon\")\n\t}\n\n\t\/\/ The stdout is redirected to a unique log file (see the spawn function),\n\t\/\/ so just use it for logging.\n\tlogutil.SetOutput(fds[1])\n\tsetUmaskForDaemon()\n\texit := Serve(p.paths.Sock, p.paths.DB, p.serveOpts)\n\treturn prog.Exit(exit)\n}\n\n\/\/ ServeOpts keeps options that can be passed to Serve.\ntype ServeOpts struct {\n\t\/\/ If not nil, will be closed when the daemon is ready to serve requests.\n\tReady chan<- struct{}\n\t\/\/ Causes the daemon to abort if closed or sent any date. If nil, Serve will\n\t\/\/ set up its own signal channel by listening to SIGINT and SIGTERM.\n\tSignals <-chan os.Signal\n\t\/\/ If not nil, overrides the response of the Version RPC.\n\tVersion *int\n}\n\n\/\/ Serve runs the daemon service, listening on the socket specified by sockpath\n\/\/ and serving data from dbpath until all clients have exited. See doc for\n\/\/ ServeChans for additional options.\nfunc Serve(sockpath, dbpath string, opts ServeOpts) int {\n\tlogger.Println(\"pid is\", syscall.Getpid())\n\tlogger.Println(\"going to listen\", sockpath)\n\tlistener, err := net.Listen(\"unix\", sockpath)\n\tif err != nil {\n\t\tlogger.Printf(\"failed to listen on %s: %v\", sockpath, err)\n\t\tlogger.Println(\"aborting\")\n\t\treturn 2\n\t}\n\n\tst, err := store.NewStore(dbpath)\n\tif err != nil {\n\t\tlogger.Printf(\"failed to create storage: %v\", err)\n\t\tlogger.Printf(\"serving anyway\")\n\t}\n\n\tserver := rpc.NewServer()\n\tversion := api.Version\n\tif opts.Version != nil {\n\t\tversion = *opts.Version\n\t}\n\tserver.RegisterName(api.ServiceName, &service{version, st, err})\n\n\tconnCh := make(chan net.Conn, 10)\n\tlistenErrCh := make(chan error, 1)\n\tgo func() {\n\t\tfor {\n\t\t\tconn, err := listener.Accept()\n\t\t\tif err != nil {\n\t\t\t\tlistenErrCh <- err\n\t\t\t\tclose(listenErrCh)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconnCh <- conn\n\t\t}\n\t}()\n\n\tsigCh := opts.Signals\n\tif sigCh == nil {\n\t\tch := make(chan os.Signal, 1)\n\t\tsignal.Notify(ch, syscall.SIGTERM, syscall.SIGINT)\n\t\tsigCh = ch\n\t}\n\n\tconns := make(map[net.Conn]struct{})\n\tconnDoneCh := make(chan net.Conn, 10)\n\n\tinterrupt := func() {\n\t\tif len(conns) == 0 {\n\t\t\tlogger.Println(\"exiting since there are no clients\")\n\t\t}\n\t\tlogger.Printf(\"going to close %v active connections\", len(conns))\n\t\tfor conn := range conns {\n\t\t\terr := conn.Close()\n\t\t\tif err != nil {\n\t\t\t\tlogger.Println(\"failed to close connection:\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif opts.Ready != nil {\n\t\tclose(opts.Ready)\n\t}\n\nloop:\n\tfor {\n\t\tselect {\n\t\tcase sig := <-sigCh:\n\t\t\tlogger.Printf(\"received signal %v\", sig)\n\t\t\tinterrupt()\n\t\t\tbreak loop\n\t\tcase err := <-listenErrCh:\n\t\t\tlogger.Println(\"could not listen:\", err)\n\t\t\tif len(conns) == 0 {\n\t\t\t\tlogger.Println(\"exiting since there are no clients\")\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tlogger.Println(\"continuing to serve until all existing clients exit\")\n\t\tcase conn := <-connCh:\n\t\t\tconns[conn] = struct{}{}\n\t\t\tgo func() {\n\t\t\t\tserver.ServeConn(conn)\n\t\t\t\tconnDoneCh <- conn\n\t\t\t}()\n\t\tcase conn := <-connDoneCh:\n\t\t\tdelete(conns, conn)\n\t\t\tif len(conns) == 0 {\n\t\t\t\tlogger.Println(\"all clients disconnected, exiting\")\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t}\n\t}\n\n\terr = os.Remove(sockpath)\n\tif err != nil {\n\t\tlogger.Printf(\"failed to remove socket %s: %v\", sockpath, err)\n\t}\n\tif st != nil {\n\t\terr = st.Close()\n\t\tif err != nil {\n\t\t\tlogger.Printf(\"failed to close storage: %v\", err)\n\t\t}\n\t}\n\terr = listener.Close()\n\tif err != nil {\n\t\tlogger.Printf(\"failed to close listener: %v\", err)\n\t}\n\t\/\/ Ensure that the listener goroutine has exited before returning\n\t<-listenErrCh\n\treturn 0\n}\n<commit_msg>pkg\/daemon: Ignore error when closing connections on shutdown.<commit_after>\/\/ Package daemon implements a service for mediating access to the data store,\n\/\/ and its client.\n\/\/\n\/\/ Most RPCs exposed by the service correspond to the methods of Store in the\n\/\/ store package and are not documented here.\npackage daemon\n\nimport (\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\n\t\"src.elv.sh\/pkg\/daemon\/internal\/api\"\n\t\"src.elv.sh\/pkg\/logutil\"\n\t\"src.elv.sh\/pkg\/prog\"\n\t\"src.elv.sh\/pkg\/rpc\"\n\t\"src.elv.sh\/pkg\/store\"\n)\n\nvar logger = logutil.GetLogger(\"[daemon] \")\n\n\/\/ Program is the daemon subprogram.\ntype Program struct {\n\trun bool\n\tpaths *prog.DaemonPaths\n\t\/\/ Used in tests.\n\tserveOpts ServeOpts\n}\n\nfunc (p *Program) RegisterFlags(fs *prog.FlagSet) {\n\tfs.BoolVar(&p.run, \"daemon\", false,\n\t\t\"[internal flag] Run the storage daemon instead of an Elvish shell\")\n\tp.paths = fs.DaemonPaths()\n}\n\nfunc (p *Program) Run(fds [3]*os.File, args []string) error {\n\tif !p.run {\n\t\treturn prog.ErrNextProgram\n\t}\n\tif len(args) > 0 {\n\t\treturn prog.BadUsage(\"arguments are not allowed with -daemon\")\n\t}\n\n\t\/\/ The stdout is redirected to a unique log file (see the spawn function),\n\t\/\/ so just use it for logging.\n\tlogutil.SetOutput(fds[1])\n\tsetUmaskForDaemon()\n\texit := Serve(p.paths.Sock, p.paths.DB, p.serveOpts)\n\treturn prog.Exit(exit)\n}\n\n\/\/ ServeOpts keeps options that can be passed to Serve.\ntype ServeOpts struct {\n\t\/\/ If not nil, will be closed when the daemon is ready to serve requests.\n\tReady chan<- struct{}\n\t\/\/ Causes the daemon to abort if closed or sent any date. If nil, Serve will\n\t\/\/ set up its own signal channel by listening to SIGINT and SIGTERM.\n\tSignals <-chan os.Signal\n\t\/\/ If not nil, overrides the response of the Version RPC.\n\tVersion *int\n}\n\n\/\/ Serve runs the daemon service, listening on the socket specified by sockpath\n\/\/ and serving data from dbpath until all clients have exited. See doc for\n\/\/ ServeChans for additional options.\nfunc Serve(sockpath, dbpath string, opts ServeOpts) int {\n\tlogger.Println(\"pid is\", syscall.Getpid())\n\tlogger.Println(\"going to listen\", sockpath)\n\tlistener, err := net.Listen(\"unix\", sockpath)\n\tif err != nil {\n\t\tlogger.Printf(\"failed to listen on %s: %v\", sockpath, err)\n\t\tlogger.Println(\"aborting\")\n\t\treturn 2\n\t}\n\n\tst, err := store.NewStore(dbpath)\n\tif err != nil {\n\t\tlogger.Printf(\"failed to create storage: %v\", err)\n\t\tlogger.Printf(\"serving anyway\")\n\t}\n\n\tserver := rpc.NewServer()\n\tversion := api.Version\n\tif opts.Version != nil {\n\t\tversion = *opts.Version\n\t}\n\tserver.RegisterName(api.ServiceName, &service{version, st, err})\n\n\tconnCh := make(chan net.Conn, 10)\n\tlistenErrCh := make(chan error, 1)\n\tgo func() {\n\t\tfor {\n\t\t\tconn, err := listener.Accept()\n\t\t\tif err != nil {\n\t\t\t\tlistenErrCh <- err\n\t\t\t\tclose(listenErrCh)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconnCh <- conn\n\t\t}\n\t}()\n\n\tsigCh := opts.Signals\n\tif sigCh == nil {\n\t\tch := make(chan os.Signal, 1)\n\t\tsignal.Notify(ch, syscall.SIGTERM, syscall.SIGINT)\n\t\tsigCh = ch\n\t}\n\n\tconns := make(map[net.Conn]struct{})\n\tconnDoneCh := make(chan net.Conn, 10)\n\n\tinterrupt := func() {\n\t\tif len(conns) == 0 {\n\t\t\tlogger.Println(\"exiting since there are no clients\")\n\t\t}\n\t\tlogger.Printf(\"going to close %v active connections\", len(conns))\n\t\tfor conn := range conns {\n\t\t\t\/\/ Ignore the error - if we can't close the connection it's because\n\t\t\t\/\/ the client has closed it. There is nothing we can do anyway.\n\t\t\tconn.Close()\n\t\t}\n\t}\n\n\tif opts.Ready != nil {\n\t\tclose(opts.Ready)\n\t}\n\nloop:\n\tfor {\n\t\tselect {\n\t\tcase sig := <-sigCh:\n\t\t\tlogger.Printf(\"received signal %v\", sig)\n\t\t\tinterrupt()\n\t\t\tbreak loop\n\t\tcase err := <-listenErrCh:\n\t\t\tlogger.Println(\"could not listen:\", err)\n\t\t\tif len(conns) == 0 {\n\t\t\t\tlogger.Println(\"exiting since there are no clients\")\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tlogger.Println(\"continuing to serve until all existing clients exit\")\n\t\tcase conn := <-connCh:\n\t\t\tconns[conn] = struct{}{}\n\t\t\tgo func() {\n\t\t\t\tserver.ServeConn(conn)\n\t\t\t\tconnDoneCh <- conn\n\t\t\t}()\n\t\tcase conn := <-connDoneCh:\n\t\t\tdelete(conns, conn)\n\t\t\tif len(conns) == 0 {\n\t\t\t\tlogger.Println(\"all clients disconnected, exiting\")\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t}\n\t}\n\n\terr = os.Remove(sockpath)\n\tif err != nil {\n\t\tlogger.Printf(\"failed to remove socket %s: %v\", sockpath, err)\n\t}\n\tif st != nil {\n\t\terr = st.Close()\n\t\tif err != nil {\n\t\t\tlogger.Printf(\"failed to close storage: %v\", err)\n\t\t}\n\t}\n\terr = listener.Close()\n\tif err != nil {\n\t\tlogger.Printf(\"failed to close listener: %v\", err)\n\t}\n\t\/\/ Ensure that the listener goroutine has exited before returning\n\t<-listenErrCh\n\treturn 0\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/andrew-d\/go-termutil\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/mgutz\/ansi\"\n)\n\n\/\/ os.Exit forcely kills process, so let me share this global variable to terminate at the last\nvar exitCode = 0\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"lltsv\"\n\tapp.Version = Version\n\tapp.Usage = `List specified keys of LTSV (Labeled Tab Separated Values)\n\n\tExample1 $ echo \"foo:aaa\\tbar:bbb\" | lltsv -k foo,bar\n\tfoo:aaa bar:bbb\n\n\tThe output is colorized as default when you outputs to a terminal. \n\tThe coloring is disabled if you pipe or redirect outputs.\n\n\tExample2 $ echo \"foo:aaa\\tbar:bbb\" | lltsv -k foo,bar -K\n\taaa bbb\n\n\tYou may eliminate labels with \"-K\" option`\n\tapp.Author = \"sonots\"\n\tapp.Email = \"sonots@gmail.com\"\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"key, k\",\n\t\t\tUsage: \"keys to output (multiple keys separated by ,)\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"no-key, K\",\n\t\t\tUsage: \"output without keys (and without color)\",\n\t\t},\n\t}\n\tapp.Action = doMain\n\tapp.Run(os.Args)\n\tos.Exit(exitCode)\n}\n\nfunc doMain(c *cli.Context) {\n\tkeys := make([]string, 0, 0) \/\/ slice with length 0\n\tif c.String(\"key\") != \"\" {\n\t\tkeys = strings.Split(c.String(\"key\"), \",\")\n\t}\n\tno_key := c.Bool(\"no-key\")\n\tfuncAppend := getFuncAppend(no_key)\n\n\tvar file *os.File\n\tif len(c.Args()) > 0 {\n\t\tfilename := c.Args()[0]\n\t\tvar err error\n\t\tfile, err = os.Open(filename)\n\t\tif err != nil {\n\t\t\tos.Stderr.WriteString(\"failed to open and read \" + filename)\n\t\t\texitCode = 1\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tfile = os.Stdin\n\t}\n\tdefer file.Close()\n\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tlvs := parseLtsv(line)\n\t\tltsv := restructLtsv(keys, lvs, funcAppend)\n\t\tos.Stdout.WriteString(ltsv + \"\\n\")\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tos.Stderr.WriteString(\"reading standard input errored\")\n\t\texitCode = 1\n\t\treturn\n\t}\n}\n\nfunc restructLtsv(keys []string, lvs map[string]string, funcAppend func([]string, string, string) []string) string {\n\t\/\/ specified keys or all keys\n\torders := keys\n\tif len(keys) == 0 {\n\t\torders = keysInMap(lvs)\n\t}\n\t\/\/ make slice with enough capacity so that append does not newly create object\n\t\/\/ cf. http:\/\/golang.org\/pkg\/builtin\/#append\n\tselected := make([]string, 0, len(orders))\n\tfor _, label := range orders {\n\t\tvalue := lvs[label]\n\t\tselected = funcAppend(selected, label, value)\n\t}\n\treturn strings.Join(selected, \"\\t\")\n}\n\nfunc parseLtsv(line string) map[string]string {\n\tcolumns := strings.Split(line, \"\\t\")\n\tlvs := make(map[string]string)\n\tfor _, column := range columns {\n\t\tl_v := strings.SplitN(column, \":\", 2)\n\t\tif len(l_v) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tlabel, value := l_v[0], l_v[1]\n\t\tlvs[label] = value\n\t}\n\treturn lvs\n}\n\n\/\/ Return function pointer to avoid `if` evaluation occurs in each iteration\nfunc getFuncAppend(no_key bool) func([]string, string, string) []string {\n\tif no_key {\n\t\treturn func(selected []string, label string, value string) []string {\n\t\t\treturn append(selected, value)\n\t\t}\n\t} else {\n\t\tif termutil.Isatty(os.Stdout.Fd()) {\n\t\t\treturn func(selected []string, label string, value string) []string {\n\t\t\t\treturn append(selected, ansi.Color(label, \"green\")+\":\"+ansi.Color(value, \"magenta\"))\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ if pipe or redirect\n\t\t\treturn func(selected []string, label string, value string) []string {\n\t\t\t\treturn append(selected, label+\":\"+value)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc keysInMap(m map[string]string) []string {\n\tkeys := make([]string, 0, len(m))\n\tfor key := range m {\n\t\tkeys = append(keys, key)\n\t}\n\treturn keys\n}\n\nfunc stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<commit_msg>add github url<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/andrew-d\/go-termutil\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/mgutz\/ansi\"\n)\n\n\/\/ os.Exit forcely kills process, so let me share this global variable to terminate at the last\nvar exitCode = 0\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"lltsv\"\n\tapp.Version = Version\n\tapp.Usage = `List specified keys of LTSV (Labeled Tab Separated Values)\n\n\tExample1 $ echo \"foo:aaa\\tbar:bbb\" | lltsv -k foo,bar\n\tfoo:aaa bar:bbb\n\n\tThe output is colorized as default when you outputs to a terminal. \n\tThe coloring is disabled if you pipe or redirect outputs.\n\n\tExample2 $ echo \"foo:aaa\\tbar:bbb\" | lltsv -k foo,bar -K\n\taaa bbb\n\n\tYou may eliminate labels with \"-K\" option.\n\n\thttps:\/\/github.com\/sonots\/lltsv`\n\tapp.Author = \"sonots\"\n\tapp.Email = \"sonots@gmail.com\"\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"key, k\",\n\t\t\tUsage: \"keys to output (multiple keys separated by ,)\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"no-key, K\",\n\t\t\tUsage: \"output without keys (and without color)\",\n\t\t},\n\t}\n\tapp.Action = doMain\n\tapp.Run(os.Args)\n\tos.Exit(exitCode)\n}\n\nfunc doMain(c *cli.Context) {\n\tkeys := make([]string, 0, 0) \/\/ slice with length 0\n\tif c.String(\"key\") != \"\" {\n\t\tkeys = strings.Split(c.String(\"key\"), \",\")\n\t}\n\tno_key := c.Bool(\"no-key\")\n\tfuncAppend := getFuncAppend(no_key)\n\n\tvar file *os.File\n\tif len(c.Args()) > 0 {\n\t\tfilename := c.Args()[0]\n\t\tvar err error\n\t\tfile, err = os.Open(filename)\n\t\tif err != nil {\n\t\t\tos.Stderr.WriteString(\"failed to open and read \" + filename)\n\t\t\texitCode = 1\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tfile = os.Stdin\n\t}\n\tdefer file.Close()\n\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tlvs := parseLtsv(line)\n\t\tltsv := restructLtsv(keys, lvs, funcAppend)\n\t\tos.Stdout.WriteString(ltsv + \"\\n\")\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tos.Stderr.WriteString(\"reading standard input errored\")\n\t\texitCode = 1\n\t\treturn\n\t}\n}\n\nfunc restructLtsv(keys []string, lvs map[string]string, funcAppend func([]string, string, string) []string) string {\n\t\/\/ specified keys or all keys\n\torders := keys\n\tif len(keys) == 0 {\n\t\torders = keysInMap(lvs)\n\t}\n\t\/\/ make slice with enough capacity so that append does not newly create object\n\t\/\/ cf. http:\/\/golang.org\/pkg\/builtin\/#append\n\tselected := make([]string, 0, len(orders))\n\tfor _, label := range orders {\n\t\tvalue := lvs[label]\n\t\tselected = funcAppend(selected, label, value)\n\t}\n\treturn strings.Join(selected, \"\\t\")\n}\n\nfunc parseLtsv(line string) map[string]string {\n\tcolumns := strings.Split(line, \"\\t\")\n\tlvs := make(map[string]string)\n\tfor _, column := range columns {\n\t\tl_v := strings.SplitN(column, \":\", 2)\n\t\tif len(l_v) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tlabel, value := l_v[0], l_v[1]\n\t\tlvs[label] = value\n\t}\n\treturn lvs\n}\n\n\/\/ Return function pointer to avoid `if` evaluation occurs in each iteration\nfunc getFuncAppend(no_key bool) func([]string, string, string) []string {\n\tif no_key {\n\t\treturn func(selected []string, label string, value string) []string {\n\t\t\treturn append(selected, value)\n\t\t}\n\t} else {\n\t\tif termutil.Isatty(os.Stdout.Fd()) {\n\t\t\treturn func(selected []string, label string, value string) []string {\n\t\t\t\treturn append(selected, ansi.Color(label, \"green\")+\":\"+ansi.Color(value, \"magenta\"))\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ if pipe or redirect\n\t\t\treturn func(selected []string, label string, value string) []string {\n\t\t\t\treturn append(selected, label+\":\"+value)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc keysInMap(m map[string]string) []string {\n\tkeys := make([]string, 0, len(m))\n\tfor key := range m {\n\t\tkeys = append(keys, key)\n\t}\n\treturn keys\n}\n\nfunc stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Red Hat, Inc, and individual contributors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage kubesh\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"path\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/bbrowning\/readline\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\"\n\tcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/homedir\"\n)\n\ntype kubesh struct {\n\tfinder ResourceFinder\n\tcontext []string\n\tlineReader *readline.Instance\n\tprogname string\n\tfactory *cmdutil.Factory\n\tout *NewlineEnsuringWriter\n}\n\nfunc NewKubesh() *kubesh {\n\tfactory := cmdutil.NewFactory(nil)\n\tsh := kubesh{\n\t\tfactory: &factory,\n\t\tfinder: TimeoutFinder{Resourceful{&factory}, time.Second * 2},\n\t\tprogname: os.Args[0],\n\t\tout: &NewlineEnsuringWriter{delegate: os.Stdout},\n\t}\n\tcompleter := NewCompleter(sh.newRootCommand(), sh.finder, &sh.context)\n\tsh.lineReader, _ = readline.NewEx(&readline.Config{\n\t\tPrompt: prompt([]string{}),\n\t\tAutoComplete: completer,\n\t\tHistoryFile: path.Join(homedir.HomeDir(), \".kubesh_history\"),\n\t\tListener: completer,\n\t})\n\treturn &sh\n}\n\nfunc (sh *kubesh) Run() {\n\tdefer sh.lineReader.Close()\n\tfor {\n\t\tline, err := sh.lineReader.Readline()\n\t\tif err == readline.ErrInterrupt {\n\t\t\tif len(line) == 0 {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else if err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\targs, err := tokenize(line)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t} else if len(args) > 0 {\n\t\t\tcmd := sh.findCommand(args)\n\t\t\t\/\/ TODO: what do we do with an error here? do we care?\n\t\t\targs, _ = applyContext(sh.context, args, cmd)\n\t\t\tsh.runCommand(cmd, args)\n\t\t}\n\t\t\/\/ if the command output something w\/o a trailing \\n, it won't\n\t\t\/\/ show, so we make sure one exists\n\t\tsh.out.EnsureNewline()\n\t}\n}\n\nfunc (sh *kubesh) Execute(args []string) {\n\tcmd := sh.newRootCommand()\n\tcmd.SetArgs(args)\n\tcmd.Execute()\n}\n\nfunc (sh *kubesh) findCommand(args []string) Command {\n\troot := sh.newRootCommand()\n\tcmd, _, _ := root.Find(args)\n\treturn KubectlCommand{cmd}\n}\n\nfunc (sh *kubesh) runCommand(cmd Command, args []string) {\n\tdefer func() {\n\t\t\/\/ Ignore any panics from kubectl\n\t\trecover()\n\t}()\n\tswitch cmd.Name() {\n\tcase \"proxy\", \"attach\":\n\t\tsh.runExec(args)\n\tdefault:\n\t\tsh.Execute(args)\n\t}\n}\n\nfunc (sh *kubesh) runExec(args []string) {\n\tcmd := exec.Command(sh.progname, args...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.Start()\n\n\tsignals := make(chan os.Signal, 1)\n\tsignal.Notify(signals, syscall.SIGINT)\n\tdefer signal.Stop(signals)\n\tgo func() {\n\t\t<-signals\n\t\tfmt.Println(\"\")\n\t\tsyscall.Kill(cmd.Process.Pid, syscall.SIGINT)\n\t}()\n\n\tcmd.Wait()\n}\n\nfunc (sh *kubesh) newRootCommand() *cobra.Command {\n\troot := cmd.NewKubectlCommand(*sh.factory, os.Stdin, sh.out, os.Stderr)\n\tget, _, err := root.Find([]string{\"get\"})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcmd := &cobra.Command{\n\t\tUse: \"pin\",\n\t\tShort: \"Pin resources for use in subsequent commands\",\n\t\tLong: pin_long,\n\t\tExample: pin_example,\n\t\tSilenceUsage: true,\n\t\tValidArgs: get.ValidArgs,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tif cmdutil.GetFlagBool(cmd, \"clear\") {\n\t\t\t\tsh.context = []string{}\n\t\t\t} else {\n\t\t\t\terr := setContextCommand(sh, args)\n\t\t\t\tcmdutil.CheckErr(err)\n\t\t\t}\n\t\t\tsh.lineReader.SetPrompt(prompt(sh.context))\n\t\t},\n\t}\n\tcmd.Flags().BoolP(\"clear\", \"c\", false, \"Clears pinned resource\")\n\troot.AddCommand(cmd)\n\n\tcmd = &cobra.Command{\n\t\tUse: \"exit\",\n\t\tShort: \"Exit kubesh\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tfmt.Println(\"Bye!\")\n\t\t\tos.Exit(0)\n\t\t},\n\t}\n\troot.AddCommand(cmd)\n\treturn root\n}\n\nfunc prompt(context []string) string {\n\tpath := \"\"\n\tif len(context) > 0 {\n\t\tpath = fmt.Sprintf(\"[%v]\", strings.Join(context, \"\/\"))\n\t}\n\n\treturn fmt.Sprintf(\" kubesh%v> \", path)\n}\n<commit_msg>Sharing factories means flags surprisingly persist<commit_after>\/\/ Copyright 2016 Red Hat, Inc, and individual contributors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage kubesh\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"path\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/bbrowning\/readline\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\"\n\tcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/homedir\"\n)\n\ntype kubesh struct {\n\tfinder ResourceFinder\n\tcontext []string\n\tlineReader *readline.Instance\n\tprogname string\n\tout *NewlineEnsuringWriter\n}\n\nfunc NewKubesh() *kubesh {\n\tfactory := cmdutil.NewFactory(nil)\n\tsh := kubesh{\n\t\tfinder: TimeoutFinder{Resourceful{&factory}, time.Second * 2},\n\t\tprogname: os.Args[0],\n\t\tout: &NewlineEnsuringWriter{delegate: os.Stdout},\n\t}\n\tcompleter := NewCompleter(sh.newRootCommand(), sh.finder, &sh.context)\n\tsh.lineReader, _ = readline.NewEx(&readline.Config{\n\t\tPrompt: prompt([]string{}),\n\t\tAutoComplete: completer,\n\t\tHistoryFile: path.Join(homedir.HomeDir(), \".kubesh_history\"),\n\t\tListener: completer,\n\t})\n\treturn &sh\n}\n\nfunc (sh *kubesh) Run() {\n\tdefer sh.lineReader.Close()\n\tfor {\n\t\tline, err := sh.lineReader.Readline()\n\t\tif err == readline.ErrInterrupt {\n\t\t\tif len(line) == 0 {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else if err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\targs, err := tokenize(line)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t} else if len(args) > 0 {\n\t\t\tcmd := sh.findCommand(args)\n\t\t\t\/\/ TODO: what do we do with an error here? do we care?\n\t\t\targs, _ = applyContext(sh.context, args, cmd)\n\t\t\tsh.runCommand(cmd, args)\n\t\t}\n\t\t\/\/ if the command output something w\/o a trailing \\n, it won't\n\t\t\/\/ show, so we make sure one exists\n\t\tsh.out.EnsureNewline()\n\t}\n}\n\nfunc (sh *kubesh) Execute(args []string) {\n\tcmd := sh.newRootCommand()\n\tcmd.SetArgs(args)\n\tcmd.Execute()\n}\n\nfunc (sh *kubesh) findCommand(args []string) Command {\n\troot := sh.newRootCommand()\n\tcmd, _, _ := root.Find(args)\n\treturn KubectlCommand{cmd}\n}\n\nfunc (sh *kubesh) runCommand(cmd Command, args []string) {\n\tdefer func() {\n\t\t\/\/ Ignore any panics from kubectl\n\t\trecover()\n\t}()\n\tswitch cmd.Name() {\n\tcase \"proxy\", \"attach\":\n\t\tsh.runExec(args)\n\tdefault:\n\t\tsh.Execute(args)\n\t}\n}\n\nfunc (sh *kubesh) runExec(args []string) {\n\tcmd := exec.Command(sh.progname, args...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.Start()\n\n\tsignals := make(chan os.Signal, 1)\n\tsignal.Notify(signals, syscall.SIGINT)\n\tdefer signal.Stop(signals)\n\tgo func() {\n\t\t<-signals\n\t\tfmt.Println(\"\")\n\t\tsyscall.Kill(cmd.Process.Pid, syscall.SIGINT)\n\t}()\n\n\tcmd.Wait()\n}\n\nfunc (sh *kubesh) newRootCommand() *cobra.Command {\n\troot := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, sh.out, os.Stderr)\n\tget, _, err := root.Find([]string{\"get\"})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tuser, err := root.Flags().GetString(\"user\")\n\tif err != nil {\n\t\tfmt.Println(\"JC: ERR!\", err)\n\t}\n\tfmt.Println(\"JC: USER=>\", user)\n\n\tcmd := &cobra.Command{\n\t\tUse: \"pin\",\n\t\tShort: \"Pin resources for use in subsequent commands\",\n\t\tLong: pin_long,\n\t\tExample: pin_example,\n\t\tSilenceUsage: true,\n\t\tValidArgs: get.ValidArgs,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tif cmdutil.GetFlagBool(cmd, \"clear\") {\n\t\t\t\tsh.context = []string{}\n\t\t\t} else {\n\t\t\t\terr := setContextCommand(sh, args)\n\t\t\t\tcmdutil.CheckErr(err)\n\t\t\t}\n\t\t\tsh.lineReader.SetPrompt(prompt(sh.context))\n\t\t},\n\t}\n\tcmd.Flags().BoolP(\"clear\", \"c\", false, \"Clears pinned resource\")\n\troot.AddCommand(cmd)\n\n\tcmd = &cobra.Command{\n\t\tUse: \"exit\",\n\t\tShort: \"Exit kubesh\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tfmt.Println(\"Bye!\")\n\t\t\tos.Exit(0)\n\t\t},\n\t}\n\troot.AddCommand(cmd)\n\treturn root\n}\n\nfunc prompt(context []string) string {\n\tpath := \"\"\n\tif len(context) > 0 {\n\t\tpath = fmt.Sprintf(\"[%v]\", strings.Join(context, \"\/\"))\n\t}\n\n\treturn fmt.Sprintf(\" kubesh%v> \", path)\n}\n<|endoftext|>"} {"text":"<commit_before>package ot\n\n\/\/ extend.go\n\/\/\n\/\/ Extending Oblivious Transfers Efficiently\n\/\/ Yuval Ishai, Joe Kilian, Kobbi Nissim, Erez Petrank\n\/\/ CRYPTO 2003\n\/\/ http:\/\/link.springer.com\/chapter\/10.1007\/978-3-540-45146-4_9\n\/\/\n\/\/ Modified with preprocessing step\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"github.com\/tjim\/smpcc\/runtime\/bit\"\n\t\"golang.org\/x\/crypto\/sha3\"\n\t\"io\"\n)\n\ntype ExtendSender struct {\n\tR Receiver\n\tz0, z1 [][]byte\n\tm int\n\tk int\n\totExtChan chan []byte\n\totExtSelChan chan Selector\n\tcurPair int\n\tstarted bool\n\tsendCalls int\n}\n\ntype ExtendReceiver struct {\n\tS Sender\n\tr []byte\n\tm int\n\tk int\n\totExtChan chan []byte\n\totExtSelChan chan Selector\n\tcurPair int\n\tT *bit.Matrix8\n}\n\nfunc NewExtendSender(c chan []byte, otExtSelChan chan Selector, R Receiver, k, m int) Sender {\n\tif k%8 != 0 {\n\t\tpanic(\"k must be a multiple of 8\")\n\t}\n\tif m%8 != 0 {\n\t\tpanic(\"m must be a multiple of 8\")\n\t}\n\tsender := new(ExtendSender)\n\tsender.otExtSelChan = otExtSelChan\n\tsender.k = k\n\tsender.R = R\n\tsender.otExtChan = c\n\tsender.m = m\n\tsender.curPair = m\n\tsender.started = false\n\tsender.sendCalls = 0\n\treturn sender\n}\n\nfunc NewExtendReceiver(c chan []byte, otExtSelChan chan Selector, S Sender, k, m int) Receiver {\n\tif k%8 != 0 {\n\t\tpanic(\"k must be a multiple of 8\")\n\t}\n\tif m%8 != 0 {\n\t\tpanic(\"m must be a multiple of 8\")\n\t}\n\treceiver := new(ExtendReceiver)\n\treceiver.otExtSelChan = otExtSelChan\n\treceiver.k = k\n\treceiver.S = S\n\treceiver.m = m\n\treceiver.curPair = m\n\treceiver.otExtChan = c\n\treturn receiver\n}\n\nfunc (self *ExtendSender) preProcessSender(m int) {\n\tif m%8 != 0 {\n\t\tpanic(\"m must be a multiple of 8\")\n\t}\n\tself.started = true\n\tself.m = m\n\tself.curPair = 0\n\ts := make([]byte, self.k\/8)\n\trandomBitVector(s)\n\n\tQT := bit.NewMatrix8(self.k, self.m)\n\tfor i := 0; i < QT.NumRows; i++ {\n\t\trecvd := self.R.Receive(Selector(bit.GetBit(s, i)))\n\t\tif len(recvd) != self.m\/8 {\n\t\t\tpanic(fmt.Sprintf(\"Incorrect column length received: %d != %d\", len(recvd), self.m\/8))\n\t\t}\n\t\tQT.SetRow(i, recvd)\n\t}\n\tQ := QT.Transpose()\n\tself.z0 = make([][]byte, m)\n\tself.z1 = make([][]byte, m)\n\ttemp := make([]byte, self.k\/8)\n\tfor j := 0; j < m; j++ {\n\t\tself.z0[j] = Q.GetRow(j)\n\t\txorBytes(temp, Q.GetRow(j), s)\n\t\tself.z1[j] = make([]byte, len(temp))\n\t\tcopy(self.z1[j], temp)\n\t}\n}\n\nfunc (self *ExtendReceiver) preProcessReceiver(m int) {\n\tself.curPair = 0\n\tself.m = m\n\tself.r = make([]byte, self.m\/8)\n\trandomBitVector(self.r)\n\tT := bit.NewMatrix8(self.m, self.k)\n\tT.Randomize()\n\tself.T = T\n\tTT := T.Transpose()\n\ttemp := make([]byte, self.m\/8)\n\tfor i := 0; i < self.k; i++ {\n\t\txorBytes(temp, self.r, TT.GetRow(i))\n\t\tself.S.Send(TT.GetRow(i), temp)\n\t}\n}\n\n\/\/ hash function instantiating a random oracle\nfunc RO(input []byte, outBits int) []byte {\n\tif outBits <= 0 {\n\t\tpanic(\"output size <= 0\")\n\t}\n\tif outBits%8 != 0 {\n\t\tpanic(\"output size must be a multiple of 8\")\n\t}\n\toutput := make([]byte, outBits\/8)\n\tsha3.ShakeSum256(output, input)\n\treturn output\n}\n\n\/\/ Currently not used because of Footnote 10, page 13 of Ishai03:\n\/\/ It is not hard to verify that as long as the receiver is honest,\n\/\/ the protocol remains secure. The inclusion of j in the input to\n\/\/ the random oracle slightly simplifies the analysis and is useful\n\/\/ towards realizing the fully secure variant of this protocol.\nfunc RO_j(curPair int, input []byte, outBits int) []byte {\n\tif outBits <= 0 {\n\t\tpanic(\"output size <= 0\")\n\t}\n\tif outBits%8 != 0 {\n\t\tpanic(\"output size must be a multiple of 8\")\n\t}\n\tcurPair_bytes := make([]byte, 4)\n\tbinary.LittleEndian.PutUint32(curPair_bytes, curPair)\n\toutput := make([]byte, outBits\/8)\n\tsha3.ShakeSum256(output, append(curPair_bytes, input))\n\treturn output\n}\n\nfunc (self *ExtendSender) Send(m0, m1 Message) {\n\tif self.curPair == self.m {\n\t\tself.preProcessSender(self.m)\n\t}\n\tif len(m0) != len(m1) {\n\t\tpanic(\"(*ot.ExtendSender).Send: messages have different lengths\")\n\t}\n\tmsglen := len(m0)\n\ty0 := make([]byte, msglen)\n\ty1 := make([]byte, msglen)\n\tsmod := <-self.otExtSelChan\n\tif smod == 0 {\n\t\txorBytes(y0, m0, RO(self.z0[self.curPair], 8*msglen))\n\t\txorBytes(y1, m1, RO(self.z1[self.curPair], 8*msglen))\n\t} else if smod == 1 {\n\t\txorBytes(y0, m1, RO(self.z0[self.curPair], 8*msglen))\n\t\txorBytes(y1, m0, RO(self.z1[self.curPair], 8*msglen))\n\t} else {\n\t\tpanic(\"Sender: unexpected smod value\")\n\t}\n\tself.otExtChan <- y0\n\tself.otExtChan <- y1\n\tself.curPair++\n\treturn\n}\n\nfunc (self *ExtendReceiver) Receive(s Selector) Message {\n\tif self.curPair == self.m {\n\t\tself.preProcessReceiver(self.m)\n\t}\n\tsmod := Selector(byte(s) ^ bit.GetBit(self.r, self.curPair))\n\tself.otExtSelChan <- smod\n\ty0 := <-self.otExtChan\n\ty1 := <-self.otExtChan\n\tif len(y0) != len(y1) {\n\t\tpanic(\"(*ot.ExtendReceiver).Receive: messages have different length\")\n\t}\n\tmsglen := len(y0)\n\tw := make([]byte, msglen)\n\tif bit.GetBit(self.r, self.curPair) == 0 {\n\t\txorBytes(w, y0, RO(self.T.GetRow(self.curPair), 8*msglen))\n\t} else if bit.GetBit(self.r, self.curPair) == 1 {\n\t\txorBytes(w, y1, RO(self.T.GetRow(self.curPair), 8*msglen))\n\t}\n\tself.curPair++\n\treturn w\n}\n\nfunc randomBitVector(pool []byte) {\n\tn, err := io.ReadFull(rand.Reader, pool)\n\tif err != nil || n != len(pool) {\n\t\tpanic(\"randomness allocation failed\")\n\t}\n}\n\n\/\/ Send m message pairs in one call\nfunc (S *ExtendSender) SendM(a, b []Message) {\n\tm := len(a)\n\tif m%8 != 0 {\n\t\tpanic(\"SendM: must send a multiple of 8 messages at a time\") \/\/ force compatibility with stream OT\n\t}\n\tif len(b) != m {\n\t\tpanic(\"SendM: must send pairs of messages\")\n\t}\n\tfor i := range a {\n\t\tS.Send(a[i], b[i])\n\t}\n}\nfunc (R *ExtendReceiver) ReceiveM(r []byte) []Message { \/\/ r is a packed vector of selections\n\tresult := make([]Message, 8*len(r))\n\tfor i := range r {\n\t\tfor bit := 0; bit < 8; bit++ {\n\t\t\tselector := Selector((r[i] >> uint(7-bit)) & 1)\n\t\t\tresult[i+bit] = R.Receive(selector)\n\t\t}\n\t}\n\treturn result\n}\n\n\/\/ Send m pairs of bits (1-bit messages) in one call\nfunc (S *ExtendSender) SendMBits(a, b []byte) { \/\/ messages are packed in bytes\n\tm := 8 * len(a)\n\tif 8*len(b) != m {\n\t\tpanic(\"SendMBits: must send pairs of messages\")\n\t}\n\tfor i := range a {\n\t\tfor bit := 0; bit < 8; bit++ {\n\t\t\tmask := byte(0x80 >> uint(bit))\n\t\t\tS.Send([]byte{a[i] & mask}, []byte{b[i] & mask})\n\t\t}\n\t}\n}\nfunc (R *ExtendReceiver) ReceiveMBits(r []byte) []byte { \/\/ r is a packed vector of selections and result is packed as well\n\tresult := make([]byte, len(r))\n\tfor i := range r {\n\t\tfor bit := 0; bit < 8; bit++ {\n\t\t\tmask := byte(0x80 >> uint(bit))\n\t\t\tselector := Selector((r[i] >> uint(7-bit)) & 1)\n\t\t\tresult[i] |= mask & R.Receive(selector)[0]\n\t\t}\n\t}\n\treturn result\n}\n<commit_msg>Fixed bugs in RO_j<commit_after>package ot\n\n\/\/ extend.go\n\/\/\n\/\/ Extending Oblivious Transfers Efficiently\n\/\/ Yuval Ishai, Joe Kilian, Kobbi Nissim, Erez Petrank\n\/\/ CRYPTO 2003\n\/\/ http:\/\/link.springer.com\/chapter\/10.1007\/978-3-540-45146-4_9\n\/\/\n\/\/ Modified with preprocessing step\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"github.com\/tjim\/smpcc\/runtime\/bit\"\n\t\"golang.org\/x\/crypto\/sha3\"\n\t\"io\"\n)\n\ntype ExtendSender struct {\n\tR Receiver\n\tz0, z1 [][]byte\n\tm int\n\tk int\n\totExtChan chan []byte\n\totExtSelChan chan Selector\n\tcurPair int\n\tstarted bool\n\tsendCalls int\n}\n\ntype ExtendReceiver struct {\n\tS Sender\n\tr []byte\n\tm int\n\tk int\n\totExtChan chan []byte\n\totExtSelChan chan Selector\n\tcurPair int\n\tT *bit.Matrix8\n}\n\nfunc NewExtendSender(c chan []byte, otExtSelChan chan Selector, R Receiver, k, m int) Sender {\n\tif k%8 != 0 {\n\t\tpanic(\"k must be a multiple of 8\")\n\t}\n\tif m%8 != 0 {\n\t\tpanic(\"m must be a multiple of 8\")\n\t}\n\tsender := new(ExtendSender)\n\tsender.otExtSelChan = otExtSelChan\n\tsender.k = k\n\tsender.R = R\n\tsender.otExtChan = c\n\tsender.m = m\n\tsender.curPair = m\n\tsender.started = false\n\tsender.sendCalls = 0\n\treturn sender\n}\n\nfunc NewExtendReceiver(c chan []byte, otExtSelChan chan Selector, S Sender, k, m int) Receiver {\n\tif k%8 != 0 {\n\t\tpanic(\"k must be a multiple of 8\")\n\t}\n\tif m%8 != 0 {\n\t\tpanic(\"m must be a multiple of 8\")\n\t}\n\treceiver := new(ExtendReceiver)\n\treceiver.otExtSelChan = otExtSelChan\n\treceiver.k = k\n\treceiver.S = S\n\treceiver.m = m\n\treceiver.curPair = m\n\treceiver.otExtChan = c\n\treturn receiver\n}\n\nfunc (self *ExtendSender) preProcessSender(m int) {\n\tif m%8 != 0 {\n\t\tpanic(\"m must be a multiple of 8\")\n\t}\n\tself.started = true\n\tself.m = m\n\tself.curPair = 0\n\ts := make([]byte, self.k\/8)\n\trandomBitVector(s)\n\n\tQT := bit.NewMatrix8(self.k, self.m)\n\tfor i := 0; i < QT.NumRows; i++ {\n\t\trecvd := self.R.Receive(Selector(bit.GetBit(s, i)))\n\t\tif len(recvd) != self.m\/8 {\n\t\t\tpanic(fmt.Sprintf(\"Incorrect column length received: %d != %d\", len(recvd), self.m\/8))\n\t\t}\n\t\tQT.SetRow(i, recvd)\n\t}\n\tQ := QT.Transpose()\n\tself.z0 = make([][]byte, m)\n\tself.z1 = make([][]byte, m)\n\ttemp := make([]byte, self.k\/8)\n\tfor j := 0; j < m; j++ {\n\t\tself.z0[j] = Q.GetRow(j)\n\t\txorBytes(temp, Q.GetRow(j), s)\n\t\tself.z1[j] = make([]byte, len(temp))\n\t\tcopy(self.z1[j], temp)\n\t}\n}\n\nfunc (self *ExtendReceiver) preProcessReceiver(m int) {\n\tself.curPair = 0\n\tself.m = m\n\tself.r = make([]byte, self.m\/8)\n\trandomBitVector(self.r)\n\tT := bit.NewMatrix8(self.m, self.k)\n\tT.Randomize()\n\tself.T = T\n\tTT := T.Transpose()\n\ttemp := make([]byte, self.m\/8)\n\tfor i := 0; i < self.k; i++ {\n\t\txorBytes(temp, self.r, TT.GetRow(i))\n\t\tself.S.Send(TT.GetRow(i), temp)\n\t}\n}\n\n\/\/ hash function instantiating a random oracle\nfunc RO(input []byte, outBits int) []byte {\n\tif outBits <= 0 {\n\t\tpanic(\"output size <= 0\")\n\t}\n\tif outBits%8 != 0 {\n\t\tpanic(\"output size must be a multiple of 8\")\n\t}\n\toutput := make([]byte, outBits\/8)\n\tsha3.ShakeSum256(output, input)\n\treturn output\n}\n\n\/\/ Currently not used because of Footnote 10, page 13 of Ishai03:\n\/\/ It is not hard to verify that as long as the receiver is honest,\n\/\/ the protocol remains secure. The inclusion of j in the input to\n\/\/ the random oracle slightly simplifies the analysis and is useful\n\/\/ towards realizing the fully secure variant of this protocol.\nfunc RO_j(curPair int, input []byte, outBits int) []byte {\n\tif outBits <= 0 {\n\t\tpanic(\"output size <= 0\")\n\t}\n\tif outBits%8 != 0 {\n\t\tpanic(\"output size must be a multiple of 8\")\n\t}\n\tcurPair_bytes := make([]byte, 4)\n\tbinary.LittleEndian.PutUint32(curPair_bytes, uint32(curPair))\n\toutput := make([]byte, outBits\/8)\n\tsha3.ShakeSum256(output, append(curPair_bytes, input...))\n\treturn output\n}\n\nfunc (self *ExtendSender) Send(m0, m1 Message) {\n\tif self.curPair == self.m {\n\t\tself.preProcessSender(self.m)\n\t}\n\tif len(m0) != len(m1) {\n\t\tpanic(\"(*ot.ExtendSender).Send: messages have different lengths\")\n\t}\n\tmsglen := len(m0)\n\ty0 := make([]byte, msglen)\n\ty1 := make([]byte, msglen)\n\tsmod := <-self.otExtSelChan\n\tif smod == 0 {\n\t\txorBytes(y0, m0, RO(self.z0[self.curPair], 8*msglen))\n\t\txorBytes(y1, m1, RO(self.z1[self.curPair], 8*msglen))\n\t} else if smod == 1 {\n\t\txorBytes(y0, m1, RO(self.z0[self.curPair], 8*msglen))\n\t\txorBytes(y1, m0, RO(self.z1[self.curPair], 8*msglen))\n\t} else {\n\t\tpanic(\"Sender: unexpected smod value\")\n\t}\n\tself.otExtChan <- y0\n\tself.otExtChan <- y1\n\tself.curPair++\n\treturn\n}\n\nfunc (self *ExtendReceiver) Receive(s Selector) Message {\n\tif self.curPair == self.m {\n\t\tself.preProcessReceiver(self.m)\n\t}\n\tsmod := Selector(byte(s) ^ bit.GetBit(self.r, self.curPair))\n\tself.otExtSelChan <- smod\n\ty0 := <-self.otExtChan\n\ty1 := <-self.otExtChan\n\tif len(y0) != len(y1) {\n\t\tpanic(\"(*ot.ExtendReceiver).Receive: messages have different length\")\n\t}\n\tmsglen := len(y0)\n\tw := make([]byte, msglen)\n\tif bit.GetBit(self.r, self.curPair) == 0 {\n\t\txorBytes(w, y0, RO(self.T.GetRow(self.curPair), 8*msglen))\n\t} else if bit.GetBit(self.r, self.curPair) == 1 {\n\t\txorBytes(w, y1, RO(self.T.GetRow(self.curPair), 8*msglen))\n\t}\n\tself.curPair++\n\treturn w\n}\n\nfunc randomBitVector(pool []byte) {\n\tn, err := io.ReadFull(rand.Reader, pool)\n\tif err != nil || n != len(pool) {\n\t\tpanic(\"randomness allocation failed\")\n\t}\n}\n\n\/\/ Send m message pairs in one call\nfunc (S *ExtendSender) SendM(a, b []Message) {\n\tm := len(a)\n\tif m%8 != 0 {\n\t\tpanic(\"SendM: must send a multiple of 8 messages at a time\") \/\/ force compatibility with stream OT\n\t}\n\tif len(b) != m {\n\t\tpanic(\"SendM: must send pairs of messages\")\n\t}\n\tfor i := range a {\n\t\tS.Send(a[i], b[i])\n\t}\n}\nfunc (R *ExtendReceiver) ReceiveM(r []byte) []Message { \/\/ r is a packed vector of selections\n\tresult := make([]Message, 8*len(r))\n\tfor i := range r {\n\t\tfor bit := 0; bit < 8; bit++ {\n\t\t\tselector := Selector((r[i] >> uint(7-bit)) & 1)\n\t\t\tresult[i+bit] = R.Receive(selector)\n\t\t}\n\t}\n\treturn result\n}\n\n\/\/ Send m pairs of bits (1-bit messages) in one call\nfunc (S *ExtendSender) SendMBits(a, b []byte) { \/\/ messages are packed in bytes\n\tm := 8 * len(a)\n\tif 8*len(b) != m {\n\t\tpanic(\"SendMBits: must send pairs of messages\")\n\t}\n\tfor i := range a {\n\t\tfor bit := 0; bit < 8; bit++ {\n\t\t\tmask := byte(0x80 >> uint(bit))\n\t\t\tS.Send([]byte{a[i] & mask}, []byte{b[i] & mask})\n\t\t}\n\t}\n}\nfunc (R *ExtendReceiver) ReceiveMBits(r []byte) []byte { \/\/ r is a packed vector of selections and result is packed as well\n\tresult := make([]byte, len(r))\n\tfor i := range r {\n\t\tfor bit := 0; bit < 8; bit++ {\n\t\t\tmask := byte(0x80 >> uint(bit))\n\t\t\tselector := Selector((r[i] >> uint(7-bit)) & 1)\n\t\t\tresult[i] |= mask & R.Receive(selector)[0]\n\t\t}\n\t}\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage mapper\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"regexp\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/go-kit\/log\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"gopkg.in\/yaml.v2\"\n\n\t\"github.com\/prometheus\/statsd_exporter\/pkg\/level\"\n\t\"github.com\/prometheus\/statsd_exporter\/pkg\/mapper\/fsm\"\n)\n\nvar (\n\t\/\/ The first segment of a match cannot start with a number\n\tstatsdMetricRE = `[a-zA-Z_]([a-zA-Z0-9_\\-])*`\n\t\/\/ The subsequent segments of a match can start with a number\n\t\/\/ See https:\/\/github.com\/prometheus\/statsd_exporter\/issues\/328\n\tstatsdMetricSubsequentRE = `[a-zA-Z0-9_]([a-zA-Z0-9_\\-])*`\n\ttemplateReplaceRE = `(\\$\\{?\\d+\\}?)`\n\n\tmetricLineRE = regexp.MustCompile(`^(\\*|` + statsdMetricRE + `)(\\.\\*|\\.` + statsdMetricSubsequentRE + `)*$`)\n\tmetricNameRE = regexp.MustCompile(`^([a-zA-Z_]|` + templateReplaceRE + `)([a-zA-Z0-9_]|` + templateReplaceRE + `)*$`)\n\tlabelNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]+$`)\n)\n\ntype MetricMapper struct {\n\tRegisterer prometheus.Registerer\n\tDefaults mapperConfigDefaults `yaml:\"defaults\"`\n\tMappings []MetricMapping `yaml:\"mappings\"`\n\tFSM *fsm.FSM\n\tdoFSM bool\n\tdoRegex bool\n\tcache MetricMapperCache\n\tmutex sync.RWMutex\n\n\tMappingsCount prometheus.Gauge\n\n\tLogger log.Logger\n}\n\ntype SummaryOptions struct {\n\tQuantiles []metricObjective `yaml:\"quantiles\"`\n\tMaxAge time.Duration `yaml:\"max_age\"`\n\tAgeBuckets uint32 `yaml:\"age_buckets\"`\n\tBufCap uint32 `yaml:\"buf_cap\"`\n}\n\ntype HistogramOptions struct {\n\tBuckets []float64 `yaml:\"buckets\"`\n}\n\ntype metricObjective struct {\n\tQuantile float64 `yaml:\"quantile\"`\n\tError float64 `yaml:\"error\"`\n}\n\nvar defaultQuantiles = []metricObjective{\n\t{Quantile: 0.5, Error: 0.05},\n\t{Quantile: 0.9, Error: 0.01},\n\t{Quantile: 0.99, Error: 0.001},\n}\n\nfunc (m *MetricMapper) InitFromYAMLString(fileContents string) error {\n\tvar n MetricMapper\n\n\tif err := yaml.Unmarshal([]byte(fileContents), &n); err != nil {\n\t\treturn err\n\t}\n\n\tif len(n.Defaults.HistogramOptions.Buckets) == 0 {\n\t\tn.Defaults.HistogramOptions.Buckets = prometheus.DefBuckets\n\t}\n\n\tif len(n.Defaults.SummaryOptions.Quantiles) == 0 {\n\t\tn.Defaults.SummaryOptions.Quantiles = defaultQuantiles\n\t}\n\n\tif n.Defaults.MatchType == MatchTypeDefault {\n\t\tn.Defaults.MatchType = MatchTypeGlob\n\t}\n\n\tremainingMappingsCount := len(n.Mappings)\n\n\tn.FSM = fsm.NewFSM([]string{string(MetricTypeCounter), string(MetricTypeGauge), string(MetricTypeObserver)},\n\t\tremainingMappingsCount, n.Defaults.GlobDisableOrdering)\n\n\tfor i := range n.Mappings {\n\t\tremainingMappingsCount--\n\n\t\tcurrentMapping := &n.Mappings[i]\n\n\t\t\/\/ check that label is correct\n\t\tfor k := range currentMapping.Labels {\n\t\t\tif !labelNameRE.MatchString(k) {\n\t\t\t\treturn fmt.Errorf(\"invalid label key: %s\", k)\n\t\t\t}\n\t\t}\n\n\t\tif currentMapping.Name == \"\" {\n\t\t\treturn fmt.Errorf(\"line %d: metric mapping didn't set a metric name\", i)\n\t\t}\n\n\t\tif !metricNameRE.MatchString(currentMapping.Name) {\n\t\t\treturn fmt.Errorf(\"metric name '%s' doesn't match regex '%s'\", currentMapping.Name, metricNameRE)\n\t\t}\n\n\t\tif currentMapping.MatchType == \"\" {\n\t\t\tcurrentMapping.MatchType = n.Defaults.MatchType\n\t\t}\n\n\t\tif currentMapping.Action == \"\" {\n\t\t\tcurrentMapping.Action = ActionTypeMap\n\t\t}\n\n\t\tif currentMapping.MatchType == MatchTypeGlob {\n\t\t\tn.doFSM = true\n\t\t\tif !metricLineRE.MatchString(currentMapping.Match) {\n\t\t\t\treturn fmt.Errorf(\"invalid match: %s\", currentMapping.Match)\n\t\t\t}\n\n\t\t\tcaptureCount := n.FSM.AddState(currentMapping.Match, string(currentMapping.MatchMetricType),\n\t\t\t\tremainingMappingsCount, currentMapping)\n\n\t\t\tcurrentMapping.nameFormatter = fsm.NewTemplateFormatter(currentMapping.Name, captureCount)\n\n\t\t\tlabelKeys := make([]string, len(currentMapping.Labels))\n\t\t\tlabelFormatters := make([]*fsm.TemplateFormatter, len(currentMapping.Labels))\n\t\t\tlabelIndex := 0\n\t\t\tfor label, valueExpr := range currentMapping.Labels {\n\t\t\t\tlabelKeys[labelIndex] = label\n\t\t\t\tlabelFormatters[labelIndex] = fsm.NewTemplateFormatter(valueExpr, captureCount)\n\t\t\t\tlabelIndex++\n\t\t\t}\n\t\t\tcurrentMapping.labelFormatters = labelFormatters\n\t\t\tcurrentMapping.labelKeys = labelKeys\n\t\t} else {\n\t\t\tif regex, err := regexp.Compile(currentMapping.Match); err != nil {\n\t\t\t\treturn fmt.Errorf(\"invalid regex %s in mapping: %v\", currentMapping.Match, err)\n\t\t\t} else {\n\t\t\t\tcurrentMapping.regex = regex\n\t\t\t}\n\t\t\tn.doRegex = true\n\t\t}\n\n\t\tif currentMapping.ObserverType == \"\" {\n\t\t\tcurrentMapping.ObserverType = n.Defaults.ObserverType\n\t\t}\n\n\t\tif currentMapping.LegacyQuantiles != nil &&\n\t\t\t(currentMapping.SummaryOptions == nil || currentMapping.SummaryOptions.Quantiles != nil) {\n\t\t\tlevel.Warn(m.Logger).Log(\"msg\", \"using the top level quantiles is deprecated. Please use quantiles in the summary_options hierarchy\")\n\t\t}\n\n\t\tif currentMapping.LegacyBuckets != nil &&\n\t\t\t(currentMapping.HistogramOptions == nil || currentMapping.HistogramOptions.Buckets != nil) {\n\t\t\tlevel.Warn(m.Logger).Log(\"msg\", \"using the top level buckets is deprecated. Please use buckets in the histogram_options hierarchy\")\n\t\t}\n\n\t\tif currentMapping.SummaryOptions != nil &&\n\t\t\tcurrentMapping.LegacyQuantiles != nil &&\n\t\t\tcurrentMapping.SummaryOptions.Quantiles != nil {\n\t\t\treturn fmt.Errorf(\"cannot use quantiles in both the top level and summary options at the same time in %s\", currentMapping.Match)\n\t\t}\n\n\t\tif currentMapping.HistogramOptions != nil &&\n\t\t\tcurrentMapping.LegacyBuckets != nil &&\n\t\t\tcurrentMapping.HistogramOptions.Buckets != nil {\n\t\t\treturn fmt.Errorf(\"cannot use buckets in both the top level and histogram options at the same time in %s\", currentMapping.Match)\n\t\t}\n\n\t\tif currentMapping.ObserverType == ObserverTypeHistogram {\n\t\t\tif currentMapping.SummaryOptions != nil {\n\t\t\t\treturn fmt.Errorf(\"cannot use histogram observer and summary options at the same time\")\n\t\t\t}\n\t\t\tif currentMapping.HistogramOptions == nil {\n\t\t\t\tcurrentMapping.HistogramOptions = &HistogramOptions{}\n\t\t\t}\n\t\t\tif currentMapping.LegacyBuckets != nil && len(currentMapping.LegacyBuckets) != 0 {\n\t\t\t\tcurrentMapping.HistogramOptions.Buckets = currentMapping.LegacyBuckets\n\t\t\t}\n\t\t\tif currentMapping.HistogramOptions.Buckets == nil || len(currentMapping.HistogramOptions.Buckets) == 0 {\n\t\t\t\tcurrentMapping.HistogramOptions.Buckets = n.Defaults.HistogramOptions.Buckets\n\t\t\t}\n\t\t}\n\n\t\tif currentMapping.ObserverType == ObserverTypeSummary {\n\t\t\tif currentMapping.HistogramOptions != nil {\n\t\t\t\treturn fmt.Errorf(\"cannot use summary observer and histogram options at the same time\")\n\t\t\t}\n\t\t\tif currentMapping.SummaryOptions == nil {\n\t\t\t\tcurrentMapping.SummaryOptions = &SummaryOptions{}\n\t\t\t}\n\t\t\tif currentMapping.LegacyQuantiles != nil && len(currentMapping.LegacyQuantiles) != 0 {\n\t\t\t\tcurrentMapping.SummaryOptions.Quantiles = currentMapping.LegacyQuantiles\n\t\t\t}\n\t\t\tif currentMapping.SummaryOptions.Quantiles == nil || len(currentMapping.SummaryOptions.Quantiles) == 0 {\n\t\t\t\tcurrentMapping.SummaryOptions.Quantiles = n.Defaults.SummaryOptions.Quantiles\n\t\t\t}\n\t\t\tif currentMapping.SummaryOptions.MaxAge == 0 {\n\t\t\t\tcurrentMapping.SummaryOptions.MaxAge = n.Defaults.SummaryOptions.MaxAge\n\t\t\t}\n\t\t\tif currentMapping.SummaryOptions.AgeBuckets == 0 {\n\t\t\t\tcurrentMapping.SummaryOptions.AgeBuckets = n.Defaults.SummaryOptions.AgeBuckets\n\t\t\t}\n\t\t\tif currentMapping.SummaryOptions.BufCap == 0 {\n\t\t\t\tcurrentMapping.SummaryOptions.BufCap = n.Defaults.SummaryOptions.BufCap\n\t\t\t}\n\t\t}\n\n\t\tif currentMapping.Ttl == 0 && n.Defaults.Ttl > 0 {\n\t\t\tcurrentMapping.Ttl = n.Defaults.Ttl\n\t\t}\n\t}\n\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\tm.Defaults = n.Defaults\n\tm.Mappings = n.Mappings\n\n\t\/\/ Reset the cache since this function can be used to reload config\n\tif m.cache != nil {\n\t\tm.cache.Reset()\n\t}\n\n\tif n.doFSM {\n\t\tvar mappings []string\n\t\tfor _, mapping := range n.Mappings {\n\t\t\tif mapping.MatchType == MatchTypeGlob {\n\t\t\t\tmappings = append(mappings, mapping.Match)\n\t\t\t}\n\t\t}\n\t\tn.FSM.BacktrackingNeeded = fsm.TestIfNeedBacktracking(mappings, n.FSM.OrderingDisabled, m.Logger)\n\n\t\tm.FSM = n.FSM\n\t\tm.doRegex = n.doRegex\n\t}\n\tm.doFSM = n.doFSM\n\n\tif m.MappingsCount != nil {\n\t\tm.MappingsCount.Set(float64(len(n.Mappings)))\n\t}\n\n\tif m.Logger == nil {\n\t\tm.Logger = log.NewNopLogger()\n\t}\n\n\treturn nil\n}\n\nfunc (m *MetricMapper) InitFromFile(fileName string) error {\n\tmappingStr, err := ioutil.ReadFile(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn m.InitFromYAMLString(string(mappingStr))\n}\n\n\/\/ UseCache tells the mapper to use a cache that implements the MetricMapperCache interface.\n\/\/ This cache MUST be thread-safe!\nfunc (m *MetricMapper) UseCache(cache MetricMapperCache) {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\tm.cache = cache\n}\n\nfunc (m *MetricMapper) GetMapping(statsdMetric string, statsdMetricType MetricType) (*MetricMapping, prometheus.Labels, bool) {\n\tm.mutex.RLock()\n\tdefer m.mutex.RUnlock()\n\n\t\/\/ only use a cache if one is present\n\tif m.cache != nil {\n\t\tresult, cached := m.cache.Get(formatKey(statsdMetric, statsdMetricType))\n\t\tif cached {\n\t\t\tr := result.(MetricMapperCacheResult)\n\t\t\treturn r.Mapping, r.Labels, r.Matched\n\t\t}\n\t}\n\n\t\/\/ glob matching\n\tif m.doFSM {\n\t\tfinalState, captures := m.FSM.GetMapping(statsdMetric, string(statsdMetricType))\n\t\tif finalState != nil && finalState.Result != nil {\n\t\t\tv := finalState.Result.(*MetricMapping)\n\t\t\tresult := copyMetricMapping(v)\n\t\t\tresult.Name = result.nameFormatter.Format(captures)\n\n\t\t\tlabels := prometheus.Labels{}\n\t\t\tfor index, formatter := range result.labelFormatters {\n\t\t\t\tlabels[result.labelKeys[index]] = formatter.Format(captures)\n\t\t\t}\n\n\t\t\tr := MetricMapperCacheResult{\n\t\t\t\tMapping: result,\n\t\t\t\tMatched: true,\n\t\t\t\tLabels: labels,\n\t\t\t}\n\t\t\t\/\/ add match to cache\n\t\t\tif m.cache != nil {\n\t\t\t\tm.cache.Add(formatKey(statsdMetric, statsdMetricType), r)\n\t\t\t}\n\n\t\t\treturn result, labels, true\n\t\t} else if !m.doRegex {\n\t\t\t\/\/ if there's no regex match type, return immediately\n\t\t\t\/\/ Add miss to cache\n\t\t\tif m.cache != nil {\n\t\t\t\tm.cache.Add(formatKey(statsdMetric, statsdMetricType), MetricMapperCacheResult{})\n\t\t\t}\n\t\t\treturn nil, nil, false\n\t\t}\n\t}\n\n\t\/\/ regex matching\n\tfor _, mapping := range m.Mappings {\n\t\t\/\/ if a rule don't have regex matching type, the regex field is unset\n\t\tif mapping.regex == nil {\n\t\t\tcontinue\n\t\t}\n\t\tmatches := mapping.regex.FindStringSubmatchIndex(statsdMetric)\n\t\tif len(matches) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tmapping.Name = string(mapping.regex.ExpandString(\n\t\t\t[]byte{},\n\t\t\tmapping.Name,\n\t\t\tstatsdMetric,\n\t\t\tmatches,\n\t\t))\n\n\t\tif mt := mapping.MatchMetricType; mt != \"\" && mt != statsdMetricType {\n\t\t\tcontinue\n\t\t}\n\n\t\tlabels := prometheus.Labels{}\n\t\tfor label, valueExpr := range mapping.Labels {\n\t\t\tvalue := mapping.regex.ExpandString([]byte{}, valueExpr, statsdMetric, matches)\n\t\t\tlabels[label] = string(value)\n\t\t}\n\n\t\tr := MetricMapperCacheResult{\n\t\t\tMapping: &mapping,\n\t\t\tMatched: true,\n\t\t\tLabels: labels,\n\t\t}\n\t\t\/\/ Add Match to cache\n\t\tif m.cache != nil {\n\t\t\tm.cache.Add(formatKey(statsdMetric, statsdMetricType), r)\n\t\t}\n\n\t\treturn &mapping, labels, true\n\t}\n\n\t\/\/ Add Miss to cache\n\tif m.cache != nil {\n\t\tm.cache.Add(formatKey(statsdMetric, statsdMetricType), MetricMapperCacheResult{})\n\t}\n\treturn nil, nil, false\n}\n\n\/\/ make a shallow copy so that we do not overwrite name\n\/\/ as multiple names can be matched by same mapping\nfunc copyMetricMapping(in *MetricMapping) *MetricMapping {\n\tout := *in\n\treturn &out\n}\n<commit_msg>Remove ioutil<commit_after>\/\/ Copyright 2013 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage mapper\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/go-kit\/log\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"gopkg.in\/yaml.v2\"\n\n\t\"github.com\/prometheus\/statsd_exporter\/pkg\/level\"\n\t\"github.com\/prometheus\/statsd_exporter\/pkg\/mapper\/fsm\"\n)\n\nvar (\n\t\/\/ The first segment of a match cannot start with a number\n\tstatsdMetricRE = `[a-zA-Z_]([a-zA-Z0-9_\\-])*`\n\t\/\/ The subsequent segments of a match can start with a number\n\t\/\/ See https:\/\/github.com\/prometheus\/statsd_exporter\/issues\/328\n\tstatsdMetricSubsequentRE = `[a-zA-Z0-9_]([a-zA-Z0-9_\\-])*`\n\ttemplateReplaceRE = `(\\$\\{?\\d+\\}?)`\n\n\tmetricLineRE = regexp.MustCompile(`^(\\*|` + statsdMetricRE + `)(\\.\\*|\\.` + statsdMetricSubsequentRE + `)*$`)\n\tmetricNameRE = regexp.MustCompile(`^([a-zA-Z_]|` + templateReplaceRE + `)([a-zA-Z0-9_]|` + templateReplaceRE + `)*$`)\n\tlabelNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]+$`)\n)\n\ntype MetricMapper struct {\n\tRegisterer prometheus.Registerer\n\tDefaults mapperConfigDefaults `yaml:\"defaults\"`\n\tMappings []MetricMapping `yaml:\"mappings\"`\n\tFSM *fsm.FSM\n\tdoFSM bool\n\tdoRegex bool\n\tcache MetricMapperCache\n\tmutex sync.RWMutex\n\n\tMappingsCount prometheus.Gauge\n\n\tLogger log.Logger\n}\n\ntype SummaryOptions struct {\n\tQuantiles []metricObjective `yaml:\"quantiles\"`\n\tMaxAge time.Duration `yaml:\"max_age\"`\n\tAgeBuckets uint32 `yaml:\"age_buckets\"`\n\tBufCap uint32 `yaml:\"buf_cap\"`\n}\n\ntype HistogramOptions struct {\n\tBuckets []float64 `yaml:\"buckets\"`\n}\n\ntype metricObjective struct {\n\tQuantile float64 `yaml:\"quantile\"`\n\tError float64 `yaml:\"error\"`\n}\n\nvar defaultQuantiles = []metricObjective{\n\t{Quantile: 0.5, Error: 0.05},\n\t{Quantile: 0.9, Error: 0.01},\n\t{Quantile: 0.99, Error: 0.001},\n}\n\nfunc (m *MetricMapper) InitFromYAMLString(fileContents string) error {\n\tvar n MetricMapper\n\n\tif err := yaml.Unmarshal([]byte(fileContents), &n); err != nil {\n\t\treturn err\n\t}\n\n\tif len(n.Defaults.HistogramOptions.Buckets) == 0 {\n\t\tn.Defaults.HistogramOptions.Buckets = prometheus.DefBuckets\n\t}\n\n\tif len(n.Defaults.SummaryOptions.Quantiles) == 0 {\n\t\tn.Defaults.SummaryOptions.Quantiles = defaultQuantiles\n\t}\n\n\tif n.Defaults.MatchType == MatchTypeDefault {\n\t\tn.Defaults.MatchType = MatchTypeGlob\n\t}\n\n\tremainingMappingsCount := len(n.Mappings)\n\n\tn.FSM = fsm.NewFSM([]string{string(MetricTypeCounter), string(MetricTypeGauge), string(MetricTypeObserver)},\n\t\tremainingMappingsCount, n.Defaults.GlobDisableOrdering)\n\n\tfor i := range n.Mappings {\n\t\tremainingMappingsCount--\n\n\t\tcurrentMapping := &n.Mappings[i]\n\n\t\t\/\/ check that label is correct\n\t\tfor k := range currentMapping.Labels {\n\t\t\tif !labelNameRE.MatchString(k) {\n\t\t\t\treturn fmt.Errorf(\"invalid label key: %s\", k)\n\t\t\t}\n\t\t}\n\n\t\tif currentMapping.Name == \"\" {\n\t\t\treturn fmt.Errorf(\"line %d: metric mapping didn't set a metric name\", i)\n\t\t}\n\n\t\tif !metricNameRE.MatchString(currentMapping.Name) {\n\t\t\treturn fmt.Errorf(\"metric name '%s' doesn't match regex '%s'\", currentMapping.Name, metricNameRE)\n\t\t}\n\n\t\tif currentMapping.MatchType == \"\" {\n\t\t\tcurrentMapping.MatchType = n.Defaults.MatchType\n\t\t}\n\n\t\tif currentMapping.Action == \"\" {\n\t\t\tcurrentMapping.Action = ActionTypeMap\n\t\t}\n\n\t\tif currentMapping.MatchType == MatchTypeGlob {\n\t\t\tn.doFSM = true\n\t\t\tif !metricLineRE.MatchString(currentMapping.Match) {\n\t\t\t\treturn fmt.Errorf(\"invalid match: %s\", currentMapping.Match)\n\t\t\t}\n\n\t\t\tcaptureCount := n.FSM.AddState(currentMapping.Match, string(currentMapping.MatchMetricType),\n\t\t\t\tremainingMappingsCount, currentMapping)\n\n\t\t\tcurrentMapping.nameFormatter = fsm.NewTemplateFormatter(currentMapping.Name, captureCount)\n\n\t\t\tlabelKeys := make([]string, len(currentMapping.Labels))\n\t\t\tlabelFormatters := make([]*fsm.TemplateFormatter, len(currentMapping.Labels))\n\t\t\tlabelIndex := 0\n\t\t\tfor label, valueExpr := range currentMapping.Labels {\n\t\t\t\tlabelKeys[labelIndex] = label\n\t\t\t\tlabelFormatters[labelIndex] = fsm.NewTemplateFormatter(valueExpr, captureCount)\n\t\t\t\tlabelIndex++\n\t\t\t}\n\t\t\tcurrentMapping.labelFormatters = labelFormatters\n\t\t\tcurrentMapping.labelKeys = labelKeys\n\t\t} else {\n\t\t\tif regex, err := regexp.Compile(currentMapping.Match); err != nil {\n\t\t\t\treturn fmt.Errorf(\"invalid regex %s in mapping: %v\", currentMapping.Match, err)\n\t\t\t} else {\n\t\t\t\tcurrentMapping.regex = regex\n\t\t\t}\n\t\t\tn.doRegex = true\n\t\t}\n\n\t\tif currentMapping.ObserverType == \"\" {\n\t\t\tcurrentMapping.ObserverType = n.Defaults.ObserverType\n\t\t}\n\n\t\tif currentMapping.LegacyQuantiles != nil &&\n\t\t\t(currentMapping.SummaryOptions == nil || currentMapping.SummaryOptions.Quantiles != nil) {\n\t\t\tlevel.Warn(m.Logger).Log(\"msg\", \"using the top level quantiles is deprecated. Please use quantiles in the summary_options hierarchy\")\n\t\t}\n\n\t\tif currentMapping.LegacyBuckets != nil &&\n\t\t\t(currentMapping.HistogramOptions == nil || currentMapping.HistogramOptions.Buckets != nil) {\n\t\t\tlevel.Warn(m.Logger).Log(\"msg\", \"using the top level buckets is deprecated. Please use buckets in the histogram_options hierarchy\")\n\t\t}\n\n\t\tif currentMapping.SummaryOptions != nil &&\n\t\t\tcurrentMapping.LegacyQuantiles != nil &&\n\t\t\tcurrentMapping.SummaryOptions.Quantiles != nil {\n\t\t\treturn fmt.Errorf(\"cannot use quantiles in both the top level and summary options at the same time in %s\", currentMapping.Match)\n\t\t}\n\n\t\tif currentMapping.HistogramOptions != nil &&\n\t\t\tcurrentMapping.LegacyBuckets != nil &&\n\t\t\tcurrentMapping.HistogramOptions.Buckets != nil {\n\t\t\treturn fmt.Errorf(\"cannot use buckets in both the top level and histogram options at the same time in %s\", currentMapping.Match)\n\t\t}\n\n\t\tif currentMapping.ObserverType == ObserverTypeHistogram {\n\t\t\tif currentMapping.SummaryOptions != nil {\n\t\t\t\treturn fmt.Errorf(\"cannot use histogram observer and summary options at the same time\")\n\t\t\t}\n\t\t\tif currentMapping.HistogramOptions == nil {\n\t\t\t\tcurrentMapping.HistogramOptions = &HistogramOptions{}\n\t\t\t}\n\t\t\tif currentMapping.LegacyBuckets != nil && len(currentMapping.LegacyBuckets) != 0 {\n\t\t\t\tcurrentMapping.HistogramOptions.Buckets = currentMapping.LegacyBuckets\n\t\t\t}\n\t\t\tif currentMapping.HistogramOptions.Buckets == nil || len(currentMapping.HistogramOptions.Buckets) == 0 {\n\t\t\t\tcurrentMapping.HistogramOptions.Buckets = n.Defaults.HistogramOptions.Buckets\n\t\t\t}\n\t\t}\n\n\t\tif currentMapping.ObserverType == ObserverTypeSummary {\n\t\t\tif currentMapping.HistogramOptions != nil {\n\t\t\t\treturn fmt.Errorf(\"cannot use summary observer and histogram options at the same time\")\n\t\t\t}\n\t\t\tif currentMapping.SummaryOptions == nil {\n\t\t\t\tcurrentMapping.SummaryOptions = &SummaryOptions{}\n\t\t\t}\n\t\t\tif currentMapping.LegacyQuantiles != nil && len(currentMapping.LegacyQuantiles) != 0 {\n\t\t\t\tcurrentMapping.SummaryOptions.Quantiles = currentMapping.LegacyQuantiles\n\t\t\t}\n\t\t\tif currentMapping.SummaryOptions.Quantiles == nil || len(currentMapping.SummaryOptions.Quantiles) == 0 {\n\t\t\t\tcurrentMapping.SummaryOptions.Quantiles = n.Defaults.SummaryOptions.Quantiles\n\t\t\t}\n\t\t\tif currentMapping.SummaryOptions.MaxAge == 0 {\n\t\t\t\tcurrentMapping.SummaryOptions.MaxAge = n.Defaults.SummaryOptions.MaxAge\n\t\t\t}\n\t\t\tif currentMapping.SummaryOptions.AgeBuckets == 0 {\n\t\t\t\tcurrentMapping.SummaryOptions.AgeBuckets = n.Defaults.SummaryOptions.AgeBuckets\n\t\t\t}\n\t\t\tif currentMapping.SummaryOptions.BufCap == 0 {\n\t\t\t\tcurrentMapping.SummaryOptions.BufCap = n.Defaults.SummaryOptions.BufCap\n\t\t\t}\n\t\t}\n\n\t\tif currentMapping.Ttl == 0 && n.Defaults.Ttl > 0 {\n\t\t\tcurrentMapping.Ttl = n.Defaults.Ttl\n\t\t}\n\t}\n\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\tm.Defaults = n.Defaults\n\tm.Mappings = n.Mappings\n\n\t\/\/ Reset the cache since this function can be used to reload config\n\tif m.cache != nil {\n\t\tm.cache.Reset()\n\t}\n\n\tif n.doFSM {\n\t\tvar mappings []string\n\t\tfor _, mapping := range n.Mappings {\n\t\t\tif mapping.MatchType == MatchTypeGlob {\n\t\t\t\tmappings = append(mappings, mapping.Match)\n\t\t\t}\n\t\t}\n\t\tn.FSM.BacktrackingNeeded = fsm.TestIfNeedBacktracking(mappings, n.FSM.OrderingDisabled, m.Logger)\n\n\t\tm.FSM = n.FSM\n\t\tm.doRegex = n.doRegex\n\t}\n\tm.doFSM = n.doFSM\n\n\tif m.MappingsCount != nil {\n\t\tm.MappingsCount.Set(float64(len(n.Mappings)))\n\t}\n\n\tif m.Logger == nil {\n\t\tm.Logger = log.NewNopLogger()\n\t}\n\n\treturn nil\n}\n\nfunc (m *MetricMapper) InitFromFile(fileName string) error {\n\tmappingStr, err := os.ReadFile(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn m.InitFromYAMLString(string(mappingStr))\n}\n\n\/\/ UseCache tells the mapper to use a cache that implements the MetricMapperCache interface.\n\/\/ This cache MUST be thread-safe!\nfunc (m *MetricMapper) UseCache(cache MetricMapperCache) {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\tm.cache = cache\n}\n\nfunc (m *MetricMapper) GetMapping(statsdMetric string, statsdMetricType MetricType) (*MetricMapping, prometheus.Labels, bool) {\n\tm.mutex.RLock()\n\tdefer m.mutex.RUnlock()\n\n\t\/\/ only use a cache if one is present\n\tif m.cache != nil {\n\t\tresult, cached := m.cache.Get(formatKey(statsdMetric, statsdMetricType))\n\t\tif cached {\n\t\t\tr := result.(MetricMapperCacheResult)\n\t\t\treturn r.Mapping, r.Labels, r.Matched\n\t\t}\n\t}\n\n\t\/\/ glob matching\n\tif m.doFSM {\n\t\tfinalState, captures := m.FSM.GetMapping(statsdMetric, string(statsdMetricType))\n\t\tif finalState != nil && finalState.Result != nil {\n\t\t\tv := finalState.Result.(*MetricMapping)\n\t\t\tresult := copyMetricMapping(v)\n\t\t\tresult.Name = result.nameFormatter.Format(captures)\n\n\t\t\tlabels := prometheus.Labels{}\n\t\t\tfor index, formatter := range result.labelFormatters {\n\t\t\t\tlabels[result.labelKeys[index]] = formatter.Format(captures)\n\t\t\t}\n\n\t\t\tr := MetricMapperCacheResult{\n\t\t\t\tMapping: result,\n\t\t\t\tMatched: true,\n\t\t\t\tLabels: labels,\n\t\t\t}\n\t\t\t\/\/ add match to cache\n\t\t\tif m.cache != nil {\n\t\t\t\tm.cache.Add(formatKey(statsdMetric, statsdMetricType), r)\n\t\t\t}\n\n\t\t\treturn result, labels, true\n\t\t} else if !m.doRegex {\n\t\t\t\/\/ if there's no regex match type, return immediately\n\t\t\t\/\/ Add miss to cache\n\t\t\tif m.cache != nil {\n\t\t\t\tm.cache.Add(formatKey(statsdMetric, statsdMetricType), MetricMapperCacheResult{})\n\t\t\t}\n\t\t\treturn nil, nil, false\n\t\t}\n\t}\n\n\t\/\/ regex matching\n\tfor _, mapping := range m.Mappings {\n\t\t\/\/ if a rule don't have regex matching type, the regex field is unset\n\t\tif mapping.regex == nil {\n\t\t\tcontinue\n\t\t}\n\t\tmatches := mapping.regex.FindStringSubmatchIndex(statsdMetric)\n\t\tif len(matches) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tmapping.Name = string(mapping.regex.ExpandString(\n\t\t\t[]byte{},\n\t\t\tmapping.Name,\n\t\t\tstatsdMetric,\n\t\t\tmatches,\n\t\t))\n\n\t\tif mt := mapping.MatchMetricType; mt != \"\" && mt != statsdMetricType {\n\t\t\tcontinue\n\t\t}\n\n\t\tlabels := prometheus.Labels{}\n\t\tfor label, valueExpr := range mapping.Labels {\n\t\t\tvalue := mapping.regex.ExpandString([]byte{}, valueExpr, statsdMetric, matches)\n\t\t\tlabels[label] = string(value)\n\t\t}\n\n\t\tr := MetricMapperCacheResult{\n\t\t\tMapping: &mapping,\n\t\t\tMatched: true,\n\t\t\tLabels: labels,\n\t\t}\n\t\t\/\/ Add Match to cache\n\t\tif m.cache != nil {\n\t\t\tm.cache.Add(formatKey(statsdMetric, statsdMetricType), r)\n\t\t}\n\n\t\treturn &mapping, labels, true\n\t}\n\n\t\/\/ Add Miss to cache\n\tif m.cache != nil {\n\t\tm.cache.Add(formatKey(statsdMetric, statsdMetricType), MetricMapperCacheResult{})\n\t}\n\treturn nil, nil, false\n}\n\n\/\/ make a shallow copy so that we do not overwrite name\n\/\/ as multiple names can be matched by same mapping\nfunc copyMetricMapping(in *MetricMapping) *MetricMapping {\n\tout := *in\n\treturn &out\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The etcd Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package pbutil defines interfaces for handling Protocol Buffer objects.\npackage pbutil\n\nimport \"github.com\/coreos\/pkg\/capnslog\"\n\nvar (\n\tplog = capnslog.NewPackageLogger(\"github.com\/coreos\/etcd\/pkg\", \"flags\")\n)\n\ntype Marshaler interface {\n\tMarshal() (data []byte, err error)\n}\n\ntype Unmarshaler interface {\n\tUnmarshal(data []byte) error\n}\n\nfunc MustMarshal(m Marshaler) []byte {\n\td, err := m.Marshal()\n\tif err != nil {\n\t\tplog.Panicf(\"marshal should never fail (%v)\", err)\n\t}\n\treturn d\n}\n\nfunc MustUnmarshal(um Unmarshaler, data []byte) {\n\tif err := um.Unmarshal(data); err != nil {\n\t\tplog.Panicf(\"unmarshal should never fail (%v)\", err)\n\t}\n}\n\nfunc MaybeUnmarshal(um Unmarshaler, data []byte) bool {\n\tif err := um.Unmarshal(data); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc GetBool(v *bool) (vv bool, set bool) {\n\tif v == nil {\n\t\treturn false, false\n\t}\n\treturn *v, true\n}\n\nfunc Boolp(b bool) *bool { return &b }\n<commit_msg>corrected the package name in logger<commit_after>\/\/ Copyright 2015 The etcd Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package pbutil defines interfaces for handling Protocol Buffer objects.\npackage pbutil\n\nimport \"github.com\/coreos\/pkg\/capnslog\"\n\nvar (\n\tplog = capnslog.NewPackageLogger(\"github.com\/coreos\/etcd\/pkg\", \"pbutil\")\n)\n\ntype Marshaler interface {\n\tMarshal() (data []byte, err error)\n}\n\ntype Unmarshaler interface {\n\tUnmarshal(data []byte) error\n}\n\nfunc MustMarshal(m Marshaler) []byte {\n\td, err := m.Marshal()\n\tif err != nil {\n\t\tplog.Panicf(\"marshal should never fail (%v)\", err)\n\t}\n\treturn d\n}\n\nfunc MustUnmarshal(um Unmarshaler, data []byte) {\n\tif err := um.Unmarshal(data); err != nil {\n\t\tplog.Panicf(\"unmarshal should never fail (%v)\", err)\n\t}\n}\n\nfunc MaybeUnmarshal(um Unmarshaler, data []byte) bool {\n\tif err := um.Unmarshal(data); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc GetBool(v *bool) (vv bool, set bool) {\n\tif v == nil {\n\t\treturn false, false\n\t}\n\treturn *v, true\n}\n\nfunc Boolp(b bool) *bool { return &b }\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package python contains Python buildpack library code.\npackage python\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/buildpacks\/pkg\/cache\"\n\t\"github.com\/GoogleCloudPlatform\/buildpacks\/pkg\/env\"\n\tgcp \"github.com\/GoogleCloudPlatform\/buildpacks\/pkg\/gcpbuildpack\"\n\t\"github.com\/buildpacks\/libcnb\"\n)\n\nconst (\n\tdateFormat = time.RFC3339Nano\n\t\/\/ expirationTime is an arbitrary amount of time of 1 day to refresh the cache layer.\n\texpirationTime = time.Duration(time.Hour * 24)\n\n\tpythonVersionKey = \"python_version\"\n\tdependencyHashKey = \"dependency_hash\"\n\texpiryTimestampKey = \"expiry_timestamp\"\n\n\tcacheName = \"pipcache\"\n\n\t\/\/ RequirementsFilesEnv is an environment variable containg os-path-separator-separated list of paths to pip requirements files.\n\t\/\/ The requirements files are processed from left to right, with requirements from the next overriding any conflicts from the previous.\n\tRequirementsFilesEnv = \"GOOGLE_INTERNAL_REQUIREMENTS_FILES\"\n)\n\nvar (\n\t\/\/ RequirementsProvides denotes that the buildpack provides requirements.txt in the environment.\n\tRequirementsProvides = []libcnb.BuildPlanProvide{{Name: \"requirements.txt\"}}\n\t\/\/ RequirementsRequires denotes that the buildpack consumes requirements.txt from the environment.\n\tRequirementsRequires = []libcnb.BuildPlanRequire{{Name: \"requirements.txt\"}}\n\t\/\/ RequirementsProvidesPlan is a build plan returned by buildpacks that provide requirements.txt.\n\tRequirementsProvidesPlan = libcnb.BuildPlan{Provides: RequirementsProvides}\n\t\/\/ RequirementsProvidesRequiresPlan is a build plan returned by buildpacks that consume requirements.txt.\n\tRequirementsProvidesRequiresPlan = libcnb.BuildPlan{Provides: RequirementsProvides, Requires: RequirementsRequires}\n)\n\n\/\/ Version returns the installed version of Python.\nfunc Version(ctx *gcp.Context) string {\n\tresult := ctx.Exec([]string{\"python3\", \"--version\"})\n\treturn strings.TrimSpace(result.Stdout)\n}\n\n\/\/ InstallRequirements installs dependencies from the given requirements files in a virtual env.\n\/\/ It will install the files in order in which they are specified, so that dependencies specified\n\/\/ in later requirements files can override later ones.\n\/\/\n\/\/ This function is responsible for installing requirements files for all buildpacks that require\n\/\/ it. The buildpacks used to install requirements into separate layers and add the layer path to\n\/\/ PYTHONPATH. However, this caused issues with some packages as it would allow users to\n\/\/ accidentally override some builtin stdlib modules, e.g. typing, enum, etc., and cause both\n\/\/ build-time and run-time failures.\nfunc InstallRequirements(ctx *gcp.Context, l *libcnb.Layer, reqs ...string) error {\n\t\/\/ Defensive check, this should not happen in practice.\n\tif len(reqs) == 0 {\n\t\tctx.Debugf(\"No requirements.txt to install, clearing layer.\")\n\t\tctx.ClearLayer(l)\n\t\treturn nil\n\t}\n\n\t\/\/ Check if we can use the cached-layer as is without reinstalling dependencies.\n\tcached, err := checkCache(ctx, l, cache.WithFiles(reqs...))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"checking cache: %w\", err)\n\t}\n\tif cached {\n\t\tctx.CacheHit(l.Name)\n\t\treturn nil\n\t}\n\tctx.CacheMiss(l.Name)\n\n\t\/\/ The cache layer is used as PIP_CACHE_DIR to keep the cache directory across builds in case\n\t\/\/ we do not get a full cache hit.\n\tcl := ctx.Layer(cacheName, gcp.CacheLayer)\n\n\t\/\/ History of the logic below:\n\t\/\/\n\t\/\/ pip install --target has several subtle issues:\n\t\/\/ We cannot use --upgrade: https:\/\/github.com\/pypa\/pip\/issues\/8799.\n\t\/\/ We also cannot _not_ use --upgrade, see the requirements_bin_conflict acceptance test.\n\t\/\/\n\t\/\/ Instead, we use Python per-user site-packages (https:\/\/www.python.org\/dev\/peps\/pep-0370\/)\n\t\/\/ where we can and virtualenv where we cannot.\n\t\/\/\n\t\/\/ Each requirements file is installed separately to allow the requirements.txt files\n\t\/\/ to specify conflicting dependencies (e.g. functions-framework pins package A at 1.2.0 but\n\t\/\/ the user's requirements.txt file pins A at 1.4.0. The user should be able to override\n\t\/\/ the functions-framework-pinned package).\n\n\t\/\/ HACK: For backwards compatibility with Python 3.7 and 3.8 on App Engine and Cloud Functions.\n\tvirtualEnv := requiresVirtualEnv()\n\tif virtualEnv {\n\t\t\/\/ --without-pip and --system-site-packages allow us to use `pip` and other packages from the\n\t\t\/\/ build image and avoid reinstalling them, saving about 10MB.\n\t\t\/\/ TODO(b\/140775593): Use virtualenv pip after FTL is no longer used and remove from build image.\n\t\tctx.Exec([]string{\"python3\", \"-m\", \"venv\", \"--without-pip\", \"--system-site-packages\", l.Path})\n\t\t\/\/ The VIRTUAL_ENV variable is usually set by the virtual environment's activate script.\n\t\tl.SharedEnvironment.Override(\"VIRTUAL_ENV\", l.Path)\n\t\t\/\/ Use the virtual environment python3 for all subsequent commands in this buildpack, for\n\t\t\/\/ subsequent buildpacks, l.Path\/bin will be added by lifecycle.\n\t\tctx.Setenv(\"PATH\", filepath.Join(l.Path, \"bin\")+string(os.PathListSeparator)+os.Getenv(\"PATH\"))\n\t\tctx.Setenv(\"VIRTUAL_ENV\", l.Path)\n\t} else {\n\t\tl.SharedEnvironment.Default(\"PYTHONUSERBASE\", l.Path)\n\t\tctx.Setenv(\"PYTHONUSERBASE\", l.Path)\n\t}\n\n\tfor _, req := range reqs {\n\t\tcmd := []string{\n\t\t\t\"python3\", \"-m\", \"pip\", \"install\",\n\t\t\t\"--requirement\", req,\n\t\t\t\"--upgrade\",\n\t\t\t\"--upgrade-strategy\", \"only-if-needed\",\n\t\t\t\"--no-warn-script-location\", \/\/ bin is added at run time by lifecycle.\n\t\t\t\"--no-warn-conflicts\", \/\/ Needed for python37 which allowed users to override dependencies. For newer versions, we do a separate `pip check`.\n\t\t\t\"--force-reinstall\", \/\/ Some dependencies may be in the build image but not run image. Later requirements.txt should override earlier.\n\t\t}\n\t\tif !virtualEnv {\n\t\t\tcmd = append(cmd, \"--user\") \/\/ Install into user site-packages directory.\n\t\t}\n\t\tctx.Exec(cmd,\n\t\t\tgcp.WithEnv(\"PIP_CACHE_DIR=\"+cl.Path),\n\t\t\tgcp.WithUserAttribution)\n\t}\n\treturn nil\n}\n\n\/\/ checkCache checks whether cached dependencies exist, match, and have not expired.\nfunc checkCache(ctx *gcp.Context, l *libcnb.Layer, opts ...cache.Option) (bool, error) {\n\tcurrentPythonVersion := Version(ctx)\n\topts = append(opts, cache.WithStrings(currentPythonVersion))\n\tcurrentDependencyHash, err := cache.Hash(ctx, opts...)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"computing dependency hash: %v\", err)\n\t}\n\n\tmetaDependencyHash := ctx.GetMetadata(l, dependencyHashKey)\n\t\/\/ Check cache expiration to pick up new versions of dependencies that are not pinned.\n\texpired := cacheExpired(ctx, l)\n\n\t\/\/ Perform install, skipping if the dependency hash matches existing metadata.\n\tctx.Debugf(\"Current dependency hash: %q\", currentDependencyHash)\n\tctx.Debugf(\" Cache dependency hash: %q\", metaDependencyHash)\n\tif currentDependencyHash == metaDependencyHash && !expired {\n\t\tctx.Logf(\"Dependencies cache hit, skipping installation.\")\n\t\treturn true, nil\n\t}\n\n\tif metaDependencyHash == \"\" {\n\t\tctx.Debugf(\"No metadata found from a previous build, skipping cache.\")\n\t}\n\n\tctx.ClearLayer(l)\n\n\tctx.Logf(\"Installing application dependencies.\")\n\t\/\/ Update the layer metadata.\n\tctx.SetMetadata(l, dependencyHashKey, currentDependencyHash)\n\tctx.SetMetadata(l, pythonVersionKey, currentPythonVersion)\n\tctx.SetMetadata(l, expiryTimestampKey, time.Now().Add(expirationTime).Format(dateFormat))\n\n\treturn false, nil\n}\n\n\/\/ cacheExpired returns true when the cache is past expiration.\nfunc cacheExpired(ctx *gcp.Context, l *libcnb.Layer) bool {\n\tt := time.Now()\n\texpiry := ctx.GetMetadata(l, expiryTimestampKey)\n\tif expiry != \"\" {\n\t\tvar err error\n\t\tt, err = time.Parse(dateFormat, expiry)\n\t\tif err != nil {\n\t\t\tctx.Debugf(\"Could not parse expiration date %q, assuming now: %v\", expiry, err)\n\t\t}\n\t}\n\treturn !t.After(time.Now())\n}\n\n\/\/ requiresVirtualEnv returns true for runtimes that require a virtual environment to be created before pip install.\n\/\/ We cannot use Python per-user site-packages (https:\/\/www.python.org\/dev\/peps\/pep-0370\/),\n\/\/ because Python 3.7 and 3.8 on App Engine and Cloud Functions have a virtualenv set up\n\/\/ that disables user site-packages. The base images include a virtual environment pointing to\n\/\/ a directory that is not writeable in the buildpacks world (\/env). In order to keep\n\/\/ compatiblity with base image updates, we replace the virtual environment with a writeable one.\nfunc requiresVirtualEnv() bool {\n\truntime := os.Getenv(env.Runtime)\n\treturn runtime == \"python37\" || runtime == \"python38\"\n}\n<commit_msg>Generate deterministic pyc files<commit_after>\/\/ Copyright 2020 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package python contains Python buildpack library code.\npackage python\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/buildpacks\/pkg\/cache\"\n\t\"github.com\/GoogleCloudPlatform\/buildpacks\/pkg\/env\"\n\tgcp \"github.com\/GoogleCloudPlatform\/buildpacks\/pkg\/gcpbuildpack\"\n\t\"github.com\/buildpacks\/libcnb\"\n)\n\nconst (\n\tdateFormat = time.RFC3339Nano\n\t\/\/ expirationTime is an arbitrary amount of time of 1 day to refresh the cache layer.\n\texpirationTime = time.Duration(time.Hour * 24)\n\n\tpythonVersionKey = \"python_version\"\n\tdependencyHashKey = \"dependency_hash\"\n\texpiryTimestampKey = \"expiry_timestamp\"\n\n\tcacheName = \"pipcache\"\n\n\t\/\/ RequirementsFilesEnv is an environment variable containg os-path-separator-separated list of paths to pip requirements files.\n\t\/\/ The requirements files are processed from left to right, with requirements from the next overriding any conflicts from the previous.\n\tRequirementsFilesEnv = \"GOOGLE_INTERNAL_REQUIREMENTS_FILES\"\n)\n\nvar (\n\t\/\/ RequirementsProvides denotes that the buildpack provides requirements.txt in the environment.\n\tRequirementsProvides = []libcnb.BuildPlanProvide{{Name: \"requirements.txt\"}}\n\t\/\/ RequirementsRequires denotes that the buildpack consumes requirements.txt from the environment.\n\tRequirementsRequires = []libcnb.BuildPlanRequire{{Name: \"requirements.txt\"}}\n\t\/\/ RequirementsProvidesPlan is a build plan returned by buildpacks that provide requirements.txt.\n\tRequirementsProvidesPlan = libcnb.BuildPlan{Provides: RequirementsProvides}\n\t\/\/ RequirementsProvidesRequiresPlan is a build plan returned by buildpacks that consume requirements.txt.\n\tRequirementsProvidesRequiresPlan = libcnb.BuildPlan{Provides: RequirementsProvides, Requires: RequirementsRequires}\n)\n\n\/\/ Version returns the installed version of Python.\nfunc Version(ctx *gcp.Context) string {\n\tresult := ctx.Exec([]string{\"python3\", \"--version\"})\n\treturn strings.TrimSpace(result.Stdout)\n}\n\n\/\/ InstallRequirements installs dependencies from the given requirements files in a virtual env.\n\/\/ It will install the files in order in which they are specified, so that dependencies specified\n\/\/ in later requirements files can override later ones.\n\/\/\n\/\/ This function is responsible for installing requirements files for all buildpacks that require\n\/\/ it. The buildpacks used to install requirements into separate layers and add the layer path to\n\/\/ PYTHONPATH. However, this caused issues with some packages as it would allow users to\n\/\/ accidentally override some builtin stdlib modules, e.g. typing, enum, etc., and cause both\n\/\/ build-time and run-time failures.\nfunc InstallRequirements(ctx *gcp.Context, l *libcnb.Layer, reqs ...string) error {\n\t\/\/ Defensive check, this should not happen in practice.\n\tif len(reqs) == 0 {\n\t\tctx.Debugf(\"No requirements.txt to install, clearing layer.\")\n\t\tctx.ClearLayer(l)\n\t\treturn nil\n\t}\n\n\t\/\/ Check if we can use the cached-layer as is without reinstalling dependencies.\n\tcached, err := checkCache(ctx, l, cache.WithFiles(reqs...))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"checking cache: %w\", err)\n\t}\n\tif cached {\n\t\tctx.CacheHit(l.Name)\n\t\treturn nil\n\t}\n\tctx.CacheMiss(l.Name)\n\n\t\/\/ The cache layer is used as PIP_CACHE_DIR to keep the cache directory across builds in case\n\t\/\/ we do not get a full cache hit.\n\tcl := ctx.Layer(cacheName, gcp.CacheLayer)\n\n\t\/\/ History of the logic below:\n\t\/\/\n\t\/\/ pip install --target has several subtle issues:\n\t\/\/ We cannot use --upgrade: https:\/\/github.com\/pypa\/pip\/issues\/8799.\n\t\/\/ We also cannot _not_ use --upgrade, see the requirements_bin_conflict acceptance test.\n\t\/\/\n\t\/\/ Instead, we use Python per-user site-packages (https:\/\/www.python.org\/dev\/peps\/pep-0370\/)\n\t\/\/ where we can and virtualenv where we cannot.\n\t\/\/\n\t\/\/ Each requirements file is installed separately to allow the requirements.txt files\n\t\/\/ to specify conflicting dependencies (e.g. functions-framework pins package A at 1.2.0 but\n\t\/\/ the user's requirements.txt file pins A at 1.4.0. The user should be able to override\n\t\/\/ the functions-framework-pinned package).\n\n\t\/\/ HACK: For backwards compatibility with Python 3.7 and 3.8 on App Engine and Cloud Functions.\n\tvirtualEnv := requiresVirtualEnv()\n\tif virtualEnv {\n\t\t\/\/ --without-pip and --system-site-packages allow us to use `pip` and other packages from the\n\t\t\/\/ build image and avoid reinstalling them, saving about 10MB.\n\t\t\/\/ TODO(b\/140775593): Use virtualenv pip after FTL is no longer used and remove from build image.\n\t\tctx.Exec([]string{\"python3\", \"-m\", \"venv\", \"--without-pip\", \"--system-site-packages\", l.Path})\n\t\t\/\/ The VIRTUAL_ENV variable is usually set by the virtual environment's activate script.\n\t\tl.SharedEnvironment.Override(\"VIRTUAL_ENV\", l.Path)\n\t\t\/\/ Use the virtual environment python3 for all subsequent commands in this buildpack, for\n\t\t\/\/ subsequent buildpacks, l.Path\/bin will be added by lifecycle.\n\t\tctx.Setenv(\"PATH\", filepath.Join(l.Path, \"bin\")+string(os.PathListSeparator)+os.Getenv(\"PATH\"))\n\t\tctx.Setenv(\"VIRTUAL_ENV\", l.Path)\n\t} else {\n\t\tl.SharedEnvironment.Default(\"PYTHONUSERBASE\", l.Path)\n\t\tctx.Setenv(\"PYTHONUSERBASE\", l.Path)\n\t}\n\n\tfor _, req := range reqs {\n\t\tcmd := []string{\n\t\t\t\"python3\", \"-m\", \"pip\", \"install\",\n\t\t\t\"--requirement\", req,\n\t\t\t\"--upgrade\",\n\t\t\t\"--upgrade-strategy\", \"only-if-needed\",\n\t\t\t\"--no-warn-script-location\", \/\/ bin is added at run time by lifecycle.\n\t\t\t\"--no-warn-conflicts\", \/\/ Needed for python37 which allowed users to override dependencies. For newer versions, we do a separate `pip check`.\n\t\t\t\"--force-reinstall\", \/\/ Some dependencies may be in the build image but not run image. Later requirements.txt should override earlier.\n\t\t\t\"--no-compile\", \/\/ Prevent default timestamp-based bytecode compilation. Deterministic pycs are generated in a second step below.\n\t\t}\n\t\tif !virtualEnv {\n\t\t\tcmd = append(cmd, \"--user\") \/\/ Install into user site-packages directory.\n\t\t}\n\t\tctx.Exec(cmd,\n\t\t\tgcp.WithEnv(\"PIP_CACHE_DIR=\"+cl.Path),\n\t\t\tgcp.WithUserAttribution)\n\t}\n\n\t\/\/ Generate deterministic hash-based pycs (https:\/\/www.python.org\/dev\/peps\/pep-0552\/).\n\t\/\/ Use the unchecked version to skip hash validation at run time (for faster startup).\n\tresult, cerr := ctx.ExecWithErr([]string{\n\t\t\"python3\", \"-m\", \"compileall\",\n\t\t\"--invalidation-mode\", \"unchecked-hash\",\n\t\t\"-qq\", \/\/ Do not print any message (matches `pip install` behavior).\n\t\tl.Path,\n\t},\n\t\tgcp.WithUserAttribution)\n\tif cerr != nil {\n\t\tif result != nil {\n\t\t\tif result.ExitCode == 1 {\n\t\t\t\t\/\/ Ignore file compilation errors (matches `pip install` behavior).\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"compileall: %s\", result.Combined)\n\t\t}\n\t\treturn fmt.Errorf(\"compileall: %v\", cerr)\n\t}\n\n\treturn nil\n}\n\n\/\/ checkCache checks whether cached dependencies exist, match, and have not expired.\nfunc checkCache(ctx *gcp.Context, l *libcnb.Layer, opts ...cache.Option) (bool, error) {\n\tcurrentPythonVersion := Version(ctx)\n\topts = append(opts, cache.WithStrings(currentPythonVersion))\n\tcurrentDependencyHash, err := cache.Hash(ctx, opts...)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"computing dependency hash: %v\", err)\n\t}\n\n\tmetaDependencyHash := ctx.GetMetadata(l, dependencyHashKey)\n\t\/\/ Check cache expiration to pick up new versions of dependencies that are not pinned.\n\texpired := cacheExpired(ctx, l)\n\n\t\/\/ Perform install, skipping if the dependency hash matches existing metadata.\n\tctx.Debugf(\"Current dependency hash: %q\", currentDependencyHash)\n\tctx.Debugf(\" Cache dependency hash: %q\", metaDependencyHash)\n\tif currentDependencyHash == metaDependencyHash && !expired {\n\t\tctx.Logf(\"Dependencies cache hit, skipping installation.\")\n\t\treturn true, nil\n\t}\n\n\tif metaDependencyHash == \"\" {\n\t\tctx.Debugf(\"No metadata found from a previous build, skipping cache.\")\n\t}\n\n\tctx.ClearLayer(l)\n\n\tctx.Logf(\"Installing application dependencies.\")\n\t\/\/ Update the layer metadata.\n\tctx.SetMetadata(l, dependencyHashKey, currentDependencyHash)\n\tctx.SetMetadata(l, pythonVersionKey, currentPythonVersion)\n\tctx.SetMetadata(l, expiryTimestampKey, time.Now().Add(expirationTime).Format(dateFormat))\n\n\treturn false, nil\n}\n\n\/\/ cacheExpired returns true when the cache is past expiration.\nfunc cacheExpired(ctx *gcp.Context, l *libcnb.Layer) bool {\n\tt := time.Now()\n\texpiry := ctx.GetMetadata(l, expiryTimestampKey)\n\tif expiry != \"\" {\n\t\tvar err error\n\t\tt, err = time.Parse(dateFormat, expiry)\n\t\tif err != nil {\n\t\t\tctx.Debugf(\"Could not parse expiration date %q, assuming now: %v\", expiry, err)\n\t\t}\n\t}\n\treturn !t.After(time.Now())\n}\n\n\/\/ requiresVirtualEnv returns true for runtimes that require a virtual environment to be created before pip install.\n\/\/ We cannot use Python per-user site-packages (https:\/\/www.python.org\/dev\/peps\/pep-0370\/),\n\/\/ because Python 3.7 and 3.8 on App Engine and Cloud Functions have a virtualenv set up\n\/\/ that disables user site-packages. The base images include a virtual environment pointing to\n\/\/ a directory that is not writeable in the buildpacks world (\/env). In order to keep\n\/\/ compatiblity with base image updates, we replace the virtual environment with a writeable one.\nfunc requiresVirtualEnv() bool {\n\truntime := os.Getenv(env.Runtime)\n\treturn runtime == \"python37\" || runtime == \"python38\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build windows\n\npackage sdl\n\n\/*\n#include <WinDef.h>\n*\/\nimport \"C\"\nimport \"unsafe\"\n\n\/\/ WindowsMsg contains Microsoft Windows window information.\ntype WindowsMsg struct {\n\tHwnd C.HWND\n\tMsg C.UINT\n\tWParam C.WPARAM\n\tLParam C.LPARAM\n}\n\n\/\/ Windows() returns Microsoft Windows message.\nfunc (msg *SysWMmsg) Windows() *WindowsMsg {\n\treturn (*WindowsMsg)(unsafe.Pointer(&msg.data[0]))\n}\n<commit_msg>fix header name for case sensitive file systems (#426)<commit_after>\/\/ +build windows\n\npackage sdl\n\n\/*\n#include <windef.h>\n*\/\nimport \"C\"\nimport \"unsafe\"\n\n\/\/ WindowsMsg contains Microsoft Windows window information.\ntype WindowsMsg struct {\n\tHwnd C.HWND\n\tMsg C.UINT\n\tWParam C.WPARAM\n\tLParam C.LPARAM\n}\n\n\/\/ Windows() returns Microsoft Windows message.\nfunc (msg *SysWMmsg) Windows() *WindowsMsg {\n\treturn (*WindowsMsg)(unsafe.Pointer(&msg.data[0]))\n}\n<|endoftext|>"} {"text":"<commit_before>package pt\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/monochromegane\/the_platinum_searcher\/util\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\ntype Searcher struct {\n\tRoot, Pattern string\n}\n\nfunc (self *Searcher) Search() {\n\tgrep := make(chan string, 2)\n\tmatch := make(chan string, 2)\n\tdone := make(chan bool)\n\tgo self.find(self.Root, grep)\n\tgo self.grep(self.Pattern, grep, match)\n\tgo self.print(match, done)\n\t<-done\n}\n\nfunc (self *Searcher) find(root string, grep chan string) {\n\tfilepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n fileType := pt.IdentifyFileType(path)\n if fileType == pt.BINARY {\n return nil\n }\n\t\tgrep <- path\n\t\treturn nil\n\t})\n\tgrep <- \"end\"\n}\n\nfunc (self *Searcher) grep(pattern string, grep chan string, match chan string) {\n\tfor {\n\t\tpath := <-grep\n\t\tif path == \"end\" {\n\t\t\tbreak\n\t\t}\n\n\t\tfh, err := os.Open(path)\n\t\tf := bufio.NewReader(fh)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tbuf := make([]byte, 1024)\n\n\t\tfor {\n\t\t\tbuf, _, err = f.ReadLine()\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\ts := string(buf)\n\t\t\tif strings.Contains(s, pattern) {\n\t\t\t\tmatch <- s\n\t\t\t}\n\t\t}\n fh.Close()\n\n\t}\n\tmatch <- \"end\"\n}\n\nfunc (self *Searcher) print(match chan string, done chan bool) {\n\tfor {\n\t\tmatched := <-match\n\t\tif matched == \"end\" {\n\t\t\tbreak\n\t\t}\n\t\tfmt.Printf(\"matched: %s\\n\", matched)\n\t}\n\tdone <- true\n}\n<commit_msg>Used struct to pass arguments.<commit_after>package pt\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/monochromegane\/the_platinum_searcher\/util\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\ntype Searcher struct {\n\tRoot, Pattern string\n}\n\ntype GrepArgument struct {\n\tPath, Pattern string\n}\n\ntype PrintArgument struct {\n Match string\n}\n\nfunc (self *Searcher) Search() {\n\tgrep := make(chan *GrepArgument, 2)\n\tmatch := make(chan *PrintArgument, 2)\n\tdone := make(chan bool)\n\tgo self.find(grep)\n\tgo self.grep(grep, match)\n\tgo self.print(match, done)\n\t<-done\n}\n\nfunc (self *Searcher) find(grep chan *GrepArgument) {\n\tfilepath.Walk(self.Root, func(path string, info os.FileInfo, err error) error {\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tfileType := pt.IdentifyFileType(path)\n\t\tif fileType == pt.BINARY {\n\t\t\treturn nil\n\t\t}\n\t\tgrep <- &GrepArgument{path, self.Pattern}\n\t\treturn nil\n\t})\n\tgrep <- nil\n}\n\nfunc (self *Searcher) grep(grep chan *GrepArgument, match chan *PrintArgument) {\n\tfor {\n\t\targ := <-grep\n\t\tif arg == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tfh, err := os.Open(arg.Path)\n\t\tf := bufio.NewReader(fh)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tbuf := make([]byte, 1024)\n\n\t\tfor {\n\t\t\tbuf, _, err = f.ReadLine()\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\ts := string(buf)\n\t\t\tif strings.Contains(s, arg.Pattern) {\n\t\t\t\tmatch <- &PrintArgument{s}\n\t\t\t}\n\t\t}\n\t\tfh.Close()\n\n\t}\n\tmatch <- nil\n}\n\nfunc (self *Searcher) print(match chan *PrintArgument, done chan bool) {\n\tfor {\n\t\tmatched := <-match\n\t\tif matched == nil {\n\t\t\tbreak\n\t\t}\n\t\tfmt.Printf(\"matched: %s\\n\", matched.Match)\n\t}\n\tdone <- true\n}\n<|endoftext|>"} {"text":"<commit_before>package sendgrid\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/smtp\"\n\t\"net\/url\"\n\t\"path\/filepath\"\n)\n\ntype SGClient struct {\n\tapiUser string\n\tapiPwd string\n\tapiUrl string\n\tsmtpUrl string\n\tsmtpPort string\n\tsmtpAuth smtp.Auth\n\t\/\/ Client is the HTTP transport to use when making requests.\n\t\/\/ It will default to http.DefaultClient if nil.\n\tClient *http.Client\n}\n\n\/*\napiUser - SG username\napiPwd - SG password\n*\/\nfunc NewSendGridClient(apiUser, apiPwd string) SGClient {\n\tsmtpUrl := \"smtp.sendgrid.net\"\n\tsmtpPort := \"587\"\n\tapiUrl := \"https:\/\/sendgrid.com\/api\/mail.send.json?\"\n\tsmtpAuth := smtp.PlainAuth(\"\", apiUser, apiPwd, smtpUrl)\n\treturn SGClient{\n\t\tapiUser: apiUser,\n\t\tapiPwd: apiPwd,\n\t\tapiUrl: apiUrl,\n\t\tsmtpUrl: smtpUrl,\n\t\tsmtpPort: smtpPort,\n\t\tsmtpAuth: smtpAuth,\n\t}\n}\n\n\/*\nSend will try to use the WebAPI first. If it's a success then a nill will be returned.\nElse, the SMTP API will be used as a fail over. If this happens regardless of what is the result\nof the SMTP attempt, you will receive an array of errors.\nIf the length of the array is 1, then SMTP succeeded, else it also failed.\n*\/\nfunc (sg *SGClient) Send(m Mail) []error {\n\tif apiError := sg.SendAPI(m); apiError != nil {\n\t\tvar errors []error\n\t\terrors = append(errors, apiError)\n\t\tif smtpError := sg.SendSMTP(m); smtpError != nil {\n\t\t\treturn append(errors, smtpError)\n\t\t} else {\n\t\t\treturn errors\n\t\t}\n\t} else {\n\t\treturn nil \/\/sucess\n\t}\n}\n\nfunc (sg *SGClient) SendSMTP(m Mail) error {\n\treturn smtp.SendMail(sg.smtpUrl+\":\"+sg.smtpPort, sg.smtpAuth, m.from, m.to, []byte(m.html))\n}\n\nfunc (sg *SGClient) SendAPI(m Mail) error {\n\tvalues := url.Values{}\n\tvalues.Set(\"api_user\", sg.apiUser)\n\tvalues.Set(\"api_key\", sg.apiPwd)\n\tvalues.Set(\"subject\", m.subject)\n\tvalues.Set(\"html\", m.html)\n\tvalues.Set(\"text\", m.text)\n\tvalues.Set(\"from\", m.from)\n\theaders, e := json.Marshal(m.headers)\n\tif e != nil {\n\t\treturn fmt.Errorf(\"sendgrid.go: Error parsing JSON headers\")\n\t}\n\tvalues.Set(\"headers\", string(headers[:]))\n\tfor i := 0; i < len(m.to); i++ {\n\t\tvalues.Set(\"to[]\", m.to[i])\n\t}\n\tfor i := 0; i < len(m.bcc); i++ {\n\t\tvalues.Set(\"bcc[]\", m.bcc[i])\n\t}\n\tfor i := 0; i < len(m.toname); i++ {\n\t\tvalues.Set(\"toname[]\", m.toname[i])\n\t}\n\tfor k, v := range m.files {\n\t\tvalues.Set(\"files[\"+k+\"]\", v)\n\t}\n\tif sg.Client == nil {\n\t\tsg.Client = http.DefaultClient\n\t}\n\tfmt.Print(values)\n\tr, e := sg.Client.PostForm(sg.apiUrl, values)\n\tdefer r.Body.Close()\n\tif r.StatusCode == 200 && e == nil {\n\t\treturn nil\n\t} else {\n\t\tbody, _ := ioutil.ReadAll(r.Body)\n\t\treturn fmt.Errorf(\"sendgrid.go: code:%d error:%v body:%s\", r.StatusCode, e, body)\n\t}\n}\n\ntype Mail struct {\n\tto []string\n\ttoname []string\n\tsubject string\n\thtml string\n\ttext string\n\tfrom string\n\tbcc []string\n\tfromname string\n\treplyto string\n\tdate string\n\tfiles map[string]string\n\theaders map[string]string\n\t\/\/still missing some stuff\n}\n\nfunc NewMail() Mail {\n\treturn Mail{}\n}\n\n\/*\nTODO: Validate email addressed with RegExp.\n*\/\nfunc (m *Mail) AddTo(email string) {\n\tm.to = append(m.to, email)\n}\n\nfunc (m *Mail) AddToName(name string) {\n\tm.toname = append(m.toname, name)\n}\n\nfunc (m *Mail) AddSubject(s string) {\n\tm.subject = s\n}\n\nfunc (m *Mail) AddHTML(html string) {\n\tm.html = html\n}\n\nfunc (m *Mail) AddText(text string) {\n\tm.text = text\n}\n\nfunc (m *Mail) AddFrom(from string) {\n\tm.from = from\n}\n\nfunc (m *Mail) AddBCC(email string) {\n\tm.bcc = append(m.bcc, email)\n}\n\nfunc (m *Mail) AddFromName(name string) {\n\tm.fromname = name\n}\n\nfunc (m *Mail) AddReplyTo(reply string) {\n\tm.replyto = reply\n}\n\nfunc (m *Mail) AddDate(date string) {\n\tm.date = date\n}\n\nfunc (m *Mail) AddHeader(header, value string) {\n\tif m.headers == nil {\n\t\tm.headers = make(map[string]string)\n\t}\n\tm.headers[header] = value\n}\n\nfunc (m *Mail) AddAttachment(filePath string) error {\n\tif m.files == nil {\n\t\tm.files = make(map[string]string)\n\t}\n\tbuf, e := ioutil.ReadFile(filePath)\n\tif e != nil {\n\t\treturn e\n\t}\n\t_, filename := filepath.Split(filePath)\n\tm.files[filename] = base64.StdEncoding.EncodeToString(buf)\n\treturn nil\n}\n<commit_msg>Not doing regex anymore<commit_after>package sendgrid\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/smtp\"\n\t\"net\/url\"\n\t\"path\/filepath\"\n)\n\ntype SGClient struct {\n\tapiUser string\n\tapiPwd string\n\tapiUrl string\n\tsmtpUrl string\n\tsmtpPort string\n\tsmtpAuth smtp.Auth\n\t\/\/ Client is the HTTP transport to use when making requests.\n\t\/\/ It will default to http.DefaultClient if nil.\n\tClient *http.Client\n}\n\n\/*\napiUser - SG username\napiPwd - SG password\n*\/\nfunc NewSendGridClient(apiUser, apiPwd string) SGClient {\n\tsmtpUrl := \"smtp.sendgrid.net\"\n\tsmtpPort := \"587\"\n\tapiUrl := \"https:\/\/sendgrid.com\/api\/mail.send.json?\"\n\tsmtpAuth := smtp.PlainAuth(\"\", apiUser, apiPwd, smtpUrl)\n\treturn SGClient{\n\t\tapiUser: apiUser,\n\t\tapiPwd: apiPwd,\n\t\tapiUrl: apiUrl,\n\t\tsmtpUrl: smtpUrl,\n\t\tsmtpPort: smtpPort,\n\t\tsmtpAuth: smtpAuth,\n\t}\n}\n\n\/*\nSend will try to use the WebAPI first. If it's a success then a nill will be returned.\nElse, the SMTP API will be used as a fail over. If this happens regardless of what is the result\nof the SMTP attempt, you will receive an array of errors.\nIf the length of the array is 1, then SMTP succeeded, else it also failed.\n*\/\nfunc (sg *SGClient) Send(m Mail) []error {\n\tif apiError := sg.SendAPI(m); apiError != nil {\n\t\tvar errors []error\n\t\terrors = append(errors, apiError)\n\t\tif smtpError := sg.SendSMTP(m); smtpError != nil {\n\t\t\treturn append(errors, smtpError)\n\t\t} else {\n\t\t\treturn errors\n\t\t}\n\t} else {\n\t\treturn nil \/\/sucess\n\t}\n}\n\n\/*\nSMTP interface. Still being developed. Use API instead.\n*\/\nfunc (sg *SGClient) SendSMTP(m Mail) error {\n\treturn smtp.SendMail(sg.smtpUrl+\":\"+sg.smtpPort, sg.smtpAuth, m.from, m.to, []byte(m.html))\n}\n\n\/*\nSends email using SG web API\n*\/\nfunc (sg *SGClient) SendAPI(m Mail) error {\n\tvalues := url.Values{}\n\tvalues.Set(\"api_user\", sg.apiUser)\n\tvalues.Set(\"api_key\", sg.apiPwd)\n\tvalues.Set(\"subject\", m.subject)\n\tvalues.Set(\"html\", m.html)\n\tvalues.Set(\"text\", m.text)\n\tvalues.Set(\"from\", m.from)\n\theaders, e := json.Marshal(m.headers)\n\tif e != nil {\n\t\treturn fmt.Errorf(\"sendgrid.go: Error parsing JSON headers\")\n\t}\n\tvalues.Set(\"headers\", string(headers[:]))\n\tfor i := 0; i < len(m.to); i++ {\n\t\tvalues.Set(\"to[]\", m.to[i])\n\t}\n\tfor i := 0; i < len(m.bcc); i++ {\n\t\tvalues.Set(\"bcc[]\", m.bcc[i])\n\t}\n\tfor i := 0; i < len(m.toname); i++ {\n\t\tvalues.Set(\"toname[]\", m.toname[i])\n\t}\n\tfor k, v := range m.files {\n\t\tvalues.Set(\"files[\"+k+\"]\", v)\n\t}\n\tif sg.Client == nil {\n\t\tsg.Client = http.DefaultClient\n\t}\n\tfmt.Print(values)\n\tr, e := sg.Client.PostForm(sg.apiUrl, values)\n\tdefer r.Body.Close()\n\tif r.StatusCode == 200 && e == nil {\n\t\treturn nil\n\t} else {\n\t\tbody, _ := ioutil.ReadAll(r.Body)\n\t\treturn fmt.Errorf(\"sendgrid.go: code:%d error:%v body:%s\", r.StatusCode, e, body)\n\t}\n}\n\ntype Mail struct {\n\tto []string\n\ttoname []string\n\tsubject string\n\thtml string\n\ttext string\n\tfrom string\n\tbcc []string\n\tfromname string\n\treplyto string\n\tdate string\n\tfiles map[string]string\n\theaders map[string]string\n}\n\nfunc NewMail() Mail {\n\treturn Mail{}\n}\n\nfunc (m *Mail) AddTo(email string) {\n\tm.to = append(m.to, email)\n}\n\nfunc (m *Mail) AddToName(name string) {\n\tm.toname = append(m.toname, name)\n}\n\nfunc (m *Mail) AddSubject(s string) {\n\tm.subject = s\n}\n\nfunc (m *Mail) AddHTML(html string) {\n\tm.html = html\n}\n\nfunc (m *Mail) AddText(text string) {\n\tm.text = text\n}\n\nfunc (m *Mail) AddFrom(from string) {\n\tm.from = from\n}\n\nfunc (m *Mail) AddBCC(email string) {\n\tm.bcc = append(m.bcc, email)\n}\n\nfunc (m *Mail) AddFromName(name string) {\n\tm.fromname = name\n}\n\nfunc (m *Mail) AddReplyTo(reply string) {\n\tm.replyto = reply\n}\n\nfunc (m *Mail) AddDate(date string) {\n\tm.date = date\n}\n\nfunc (m *Mail) AddHeader(header, value string) {\n\tif m.headers == nil {\n\t\tm.headers = make(map[string]string)\n\t}\n\tm.headers[header] = value\n}\n\nfunc (m *Mail) AddAttachment(filePath string) error {\n\tif m.files == nil {\n\t\tm.files = make(map[string]string)\n\t}\n\tbuf, e := ioutil.ReadFile(filePath)\n\tif e != nil {\n\t\treturn e\n\t}\n\t_, filename := filepath.Split(filePath)\n\tm.files[filename] = base64.StdEncoding.EncodeToString(buf)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package ctrl\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"hash\/fnv\"\n\t. \"github.com\/mickael-kerjean\/filestash\/server\/common\"\n\t\"github.com\/mickael-kerjean\/filestash\/server\/model\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype FileInfo struct {\n\tName string `json:\"name\"`\n\tType string `json:\"type\"`\n\tSize int64 `json:\"size\"`\n\tTime int64 `json:\"time\"`\n}\n\nvar FileCache AppCache\n\nfunc init() {\n\tFileCache = NewAppCache()\n\tcachePath := filepath.Join(GetCurrentDir(), TMP_PATH)\n\tFileCache.OnEvict(func(key string, value interface{}) {\n\t\tos.RemoveAll(filepath.Join(cachePath, key))\n\t})\n}\n\nfunc FileLs(ctx App, res http.ResponseWriter, req *http.Request) {\n\tif model.CanRead(&ctx) == false {\n\t\tif model.CanUpload(&ctx) == false {\n\t\t\tSendErrorResult(res, ErrPermissionDenied)\n\t\t\treturn\n\t\t}\n\t\tSendSuccessResults(res, make([]FileInfo, 0))\n\t\treturn\n\t}\n\tpath, err := pathBuilder(ctx, req.URL.Query().Get(\"path\"))\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\n\tentries, err := ctx.Backend.Ls(path)\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\tgo model.SProc.HintLs(&ctx, path)\n\n\tfiles := make([]FileInfo, len(entries))\n\tetagger := fnv.New32()\n\tetagger.Write([]byte(path + strconv.Itoa(len(entries))))\n\tfor i:=0; i<len(entries); i++ {\n\t\tname := entries[i].Name()\n\t\tmodTime := entries[i].ModTime().UnixNano() \/ int64(time.Millisecond)\n\n\t\tif i < 200 { \/\/ etag is generated from a few values to avoid large memory usage\n\t\t\tetagger.Write([]byte(name + strconv.Itoa(int(modTime))))\n\t\t}\n\n\t\tfiles[i] = FileInfo{\n\t\t\tName: name,\n\t\t\tSize: entries[i].Size(),\n\t\t\tTime: modTime,\n\t\t\tType: func(mode os.FileMode) string {\n\t\t\t\tif mode.IsRegular() {\n\t\t\t\t\treturn \"file\"\n\t\t\t\t}\n\t\t\t\treturn \"directory\"\n\t\t\t}(entries[i].Mode()),\n\t\t}\n\t}\n\n\tvar perms Metadata = Metadata{}\n\tif obj, ok := ctx.Backend.(interface{ Meta(path string) Metadata }); ok {\n\t\tperms = obj.Meta(path)\n\t}\n\n\tif model.CanEdit(&ctx) == false {\n\t\tperms.CanCreateFile = NewBool(false)\n\t\tperms.CanCreateDirectory = NewBool(false)\n\t\tperms.CanRename = NewBool(false)\n\t\tperms.CanMove = NewBool(false)\n\t\tperms.CanDelete = NewBool(false)\n\t}\n\tif model.CanUpload(&ctx) == false {\n\t\tperms.CanCreateDirectory = NewBool(false)\n\t\tperms.CanRename = NewBool(false)\n\t\tperms.CanMove = NewBool(false)\n\t\tperms.CanDelete = NewBool(false)\n\t}\n\tif model.CanShare(&ctx) == false {\n\t\tperms.CanShare = NewBool(false)\n\t}\n\n\tetagValue := base64.StdEncoding.EncodeToString(etagger.Sum(nil))\n\tres.Header().Set(\"Etag\", etagValue)\n\tif etagValue != \"\" && req.Header.Get(\"If-None-Match\") == etagValue {\n\t\tres.WriteHeader(http.StatusNotModified)\n\t\treturn\n\t}\n\tSendSuccessResultsWithMetadata(res, files, perms)\n}\n\nfunc FileCat(ctx App, res http.ResponseWriter, req *http.Request) {\n\theader := res.Header()\n\thttp.SetCookie(res, &http.Cookie{\n\t\tName: \"download\",\n\t\tValue: \"\",\n\t\tMaxAge: -1,\n\t\tPath: \"\/\",\n\t})\n\tif model.CanRead(&ctx) == false {\n\t\tSendErrorResult(res, ErrPermissionDenied)\n\t\treturn\n\t}\n\tpath, err := pathBuilder(ctx, req.URL.Query().Get(\"path\"))\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\n\tvar file io.ReadCloser\n\tvar contentLength int64 = -1\n\tvar needToCreateCache bool = false\n\n\t\/\/ use our cache if necessary (range request) when possible\n\tif req.Header.Get(\"range\") != \"\" {\n\t\tctx.Session[\"_path\"] = path\n\t\tif p := FileCache.Get(ctx.Session); p != nil {\n\t\t\tf, err := os.OpenFile(p.(string), os.O_RDONLY, os.ModePerm);\n\t\t\tif err == nil {\n\t\t\t\tfile = f\n\t\t\t\tif fi, err := f.Stat(); err == nil {\n\t\t\t\t\tcontentLength = fi.Size()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ perform the actual `cat` if needed\n\tif file == nil {\n\t\tif file, err = ctx.Backend.Cat(path); err != nil {\n\t\t\tSendErrorResult(res, err)\n\t\t\treturn\n\t\t}\n\t\theader.Set(\"Content-Type\", GetMimeType(req.URL.Query().Get(\"path\")))\n\t\tif req.Header.Get(\"range\") != \"\" {\n\t\t\tneedToCreateCache = true\n\t\t}\n\t\tgo model.SProc.HintLs(&ctx, filepath.Dir(path) + \"\/\")\n\t}\n\n\t\/\/ plugin hooks\n\tfor _, obj := range Hooks.Get.ProcessFileContentBeforeSend() {\n\t\tif file, err = obj(file, &ctx, &res, req); err != nil {\n\t\t\tSendErrorResult(res, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ The extra complexity is to support: https:\/\/en.wikipedia.org\/wiki\/Progressive_download\n\t\/\/ => range request requires a seeker to work, some backend support it, some don't. 2 strategies:\n\t\/\/ 1. backend support Seek: use what the current backend gives us\n\t\/\/ 2. backend doesn't support Seek: build up a cache so that subsequent call don't trigger multiple downloads\n\tif req.Header.Get(\"range\") != \"\" && needToCreateCache == true {\n\t\tif obj, ok := file.(io.Seeker); ok == true {\n\t\t\tif size, err := obj.Seek(0, io.SeekEnd); err == nil {\n\t\t\t\tif _, err = obj.Seek(0, io.SeekStart); err == nil {\n\t\t\t\t\tcontentLength = size\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\ttmpPath := filepath.Join(GetCurrentDir(), filepath.Join(GetCurrentDir(), TMP_PATH), \"file_\" + QuickString(20) + \".dat\")\n\t\t\tf, err := os.OpenFile(tmpPath, os.O_RDWR|os.O_CREATE, os.ModePerm);\n\t\t\tif err != nil {\n\t\t\t\tSendErrorResult(res, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, err = io.Copy(f, file); err != nil {\n\t\t\t\tf.Close()\n\t\t\t\tfile.Close()\n\t\t\t\tSendErrorResult(res, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tf.Close()\n\t\t\tfile.Close()\n\t\t\tif f, err = os.OpenFile(tmpPath, os.O_RDONLY, os.ModePerm); err != nil {\n\t\t\t\tSendErrorResult(res, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tFileCache.Set(ctx.Session, tmpPath)\n\t\t\tif fi, err := f.Stat(); err == nil {\n\t\t\t\tcontentLength = fi.Size()\n\t\t\t}\n\t\t\tfile = f\n\t\t}\n\t}\n\n\t\/\/ Range request: find how much data we need to send\n\tvar ranges [][]int64\n\tif req.Header.Get(\"range\") != \"\" {\n\t\tranges = make([][]int64, 0)\n\t\tfor _, r := range strings.Split(strings.TrimPrefix(req.Header.Get(\"range\"), \"bytes=\"), \",\") {\n\t\t\tr = strings.TrimSpace(r)\n\t\t\tif r == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar start int64 = -1\n\t\t\tvar end int64 = -1\n\t\t\tsides := strings.Split(r, \"-\")\n\t\t\tif len(sides) == 2 {\n\t\t\t\tif start, err = strconv.ParseInt(sides[0], 10, 64); err != nil || start < 0 {\n\t\t\t\t\tstart = 0\n\t\t\t\t}\n\t\t\t\tif end, err = strconv.ParseInt(sides[1], 10, 64); err != nil || end < start {\n\t\t\t\t\tend = contentLength - 1\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif start != -1 && end != -1 && end - start >= 0 {\n\t\t\t\tranges = append(ranges, []int64{start, end})\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ publish headers\n\tif contentLength != -1 {\n\t\theader.Set(\"Content-Length\", fmt.Sprintf(\"%d\", contentLength))\n\t}\n\tif header.Get(\"Content-Security-Policy\") == \"\" {\n\t\theader.Set(\"Content-Security-Policy\", \"default-src 'none'; img-src 'self'; style-src 'unsafe-inline'\")\n\t}\n\theader.Set(\"Accept-Ranges\", \"bytes\")\n\n\t\/\/ Send data to the client\n\tif req.Method != \"HEAD\" {\n\t\tif f, ok := file.(io.ReadSeeker); ok && len(ranges) > 0 {\n\t\t\tif _, err = f.Seek(ranges[0][0], io.SeekStart); err == nil {\n\t\t\t\theader.Set(\"Content-Range\", fmt.Sprintf(\"bytes %d-%d\/%d\", ranges[0][0], ranges[0][1], contentLength))\n\t\t\t\theader.Set(\"Content-Length\", fmt.Sprintf(\"%d\", ranges[0][1] - ranges[0][0] + 1))\n\t\t\t\tres.WriteHeader(http.StatusPartialContent)\n\t\t\t\tio.CopyN(res, f, ranges[0][1] - ranges[0][0] + 1)\n\t\t\t} else {\n\t\t\t\tres.WriteHeader(http.StatusRequestedRangeNotSatisfiable)\n\t\t\t}\n\t\t} else {\n\t\t\tio.Copy(res, file)\n\t\t}\n\t}\n\tfile.Close()\n}\n\nfunc FileAccess(ctx App, res http.ResponseWriter, req *http.Request) {\n\tallowed := []string{}\n\tif model.CanRead(&ctx){\n\t\tallowed = append(allowed, \"GET\")\n\t}\n\tif model.CanEdit(&ctx){\n\t\tallowed = append(allowed, \"PUT\")\n\t}\n\tif model.CanUpload(&ctx){\n\t\tallowed = append(allowed, \"POST\")\n\t}\n\theader := res.Header()\n\theader.Set(\"Allow\", strings.Join(allowed, \", \"))\n\tSendSuccessResult(res, nil)\n}\n\nfunc FileSave(ctx App, res http.ResponseWriter, req *http.Request) {\n\tif model.CanEdit(&ctx) == false {\n\t\tSendErrorResult(res, NewError(\"Permission denied\", 403))\n\t\treturn\n\t}\n\n\tpath, err := pathBuilder(ctx, req.URL.Query().Get(\"path\"))\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\n\tfile, _, err := req.FormFile(\"file\")\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\terr = ctx.Backend.Save(path, file)\n\tfile.Close()\n\tif err != nil {\n\t\tSendErrorResult(res, NewError(err.Error(), 403))\n\t\treturn\n\t}\n\tgo model.SProc.HintLs(&ctx, filepath.Dir(path) + \"\/\")\n\tgo model.SProc.HintFile(&ctx, path)\n\tSendSuccessResult(res, nil)\n}\n\nfunc FileMv(ctx App, res http.ResponseWriter, req *http.Request) {\n\tif model.CanEdit(&ctx) == false {\n\t\tSendErrorResult(res, NewError(\"Permission denied\", 403))\n\t\treturn\n\t}\n\n\tfrom, err := pathBuilder(ctx, req.URL.Query().Get(\"from\"))\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\tto, err := pathBuilder(ctx, req.URL.Query().Get(\"to\"))\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\tif from == \"\" || to == \"\" {\n\t\tSendErrorResult(res, NewError(\"missing path parameter\", 400))\n\t\treturn\n\t}\n\n\terr = ctx.Backend.Mv(from, to)\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\n\tgo model.SProc.HintRm(&ctx, filepath.Dir(from) + \"\/\")\n\tgo model.SProc.HintLs(&ctx, filepath.Dir(to) + \"\/\")\n\tSendSuccessResult(res, nil)\n}\n\nfunc FileRm(ctx App, res http.ResponseWriter, req *http.Request) {\n\tif model.CanEdit(&ctx) == false {\n\t\tSendErrorResult(res, NewError(\"Permission denied\", 403))\n\t\treturn\n\t}\n\n\tpath, err := pathBuilder(ctx, req.URL.Query().Get(\"path\"))\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\terr = ctx.Backend.Rm(path)\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\tmodel.SProc.HintRm(&ctx, path)\n\tSendSuccessResult(res, nil)\n}\n\nfunc FileMkdir(ctx App, res http.ResponseWriter, req *http.Request) {\n\tif model.CanUpload(&ctx) == false {\n\t\tSendErrorResult(res, NewError(\"Permission denied\", 403))\n\t\treturn\n\t}\n\n\tpath, err := pathBuilder(ctx, req.URL.Query().Get(\"path\"))\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\n\terr = ctx.Backend.Mkdir(path)\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\tgo model.SProc.HintLs(&ctx, filepath.Dir(path) + \"\/\")\n\tSendSuccessResult(res, nil)\n}\n\nfunc FileTouch(ctx App, res http.ResponseWriter, req *http.Request) {\n\tif model.CanUpload(&ctx) == false {\n\t\tSendErrorResult(res, NewError(\"Permission denied\", 403))\n\t\treturn\n\t}\n\n\tpath, err := pathBuilder(ctx, req.URL.Query().Get(\"path\"))\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\n\terr = ctx.Backend.Touch(path)\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\tgo model.SProc.HintLs(&ctx, filepath.Dir(path) + \"\/\")\n\tSendSuccessResult(res, nil)\n}\n\nfunc pathBuilder(ctx App, path string) (string, error) {\n\tif path == \"\" {\n\t\treturn \"\", NewError(\"No path available\", 400)\n\t}\n\tsessionPath := ctx.Session[\"path\"]\n\tbasePath := filepath.Join(sessionPath, path)\n\tif path[len(path)-1:] == \"\/\" && basePath != \"\/\" {\n\t\tbasePath += \"\/\"\n\t}\n\tif strings.HasPrefix(basePath, ctx.Session[\"path\"]) == false {\n\t\treturn \"\", NewError(\"There's nothing here\", 403)\n\t}\n\treturn basePath, nil\n}\n<commit_msg>fix (video): range request had issues breaking videos from webdav ?!?<commit_after>package ctrl\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"hash\/fnv\"\n\t. \"github.com\/mickael-kerjean\/filestash\/server\/common\"\n\t\"github.com\/mickael-kerjean\/filestash\/server\/model\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype FileInfo struct {\n\tName string `json:\"name\"`\n\tType string `json:\"type\"`\n\tSize int64 `json:\"size\"`\n\tTime int64 `json:\"time\"`\n}\n\nvar FileCache AppCache\n\nfunc init() {\n\tFileCache = NewAppCache()\n\tcachePath := filepath.Join(GetCurrentDir(), TMP_PATH)\n\tFileCache.OnEvict(func(key string, value interface{}) {\n\t\tos.RemoveAll(filepath.Join(cachePath, key))\n\t})\n}\n\nfunc FileLs(ctx App, res http.ResponseWriter, req *http.Request) {\n\tif model.CanRead(&ctx) == false {\n\t\tif model.CanUpload(&ctx) == false {\n\t\t\tSendErrorResult(res, ErrPermissionDenied)\n\t\t\treturn\n\t\t}\n\t\tSendSuccessResults(res, make([]FileInfo, 0))\n\t\treturn\n\t}\n\tpath, err := pathBuilder(ctx, req.URL.Query().Get(\"path\"))\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\n\tentries, err := ctx.Backend.Ls(path)\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\tgo model.SProc.HintLs(&ctx, path)\n\n\tfiles := make([]FileInfo, len(entries))\n\tetagger := fnv.New32()\n\tetagger.Write([]byte(path + strconv.Itoa(len(entries))))\n\tfor i:=0; i<len(entries); i++ {\n\t\tname := entries[i].Name()\n\t\tmodTime := entries[i].ModTime().UnixNano() \/ int64(time.Millisecond)\n\n\t\tif i < 200 { \/\/ etag is generated from a few values to avoid large memory usage\n\t\t\tetagger.Write([]byte(name + strconv.Itoa(int(modTime))))\n\t\t}\n\n\t\tfiles[i] = FileInfo{\n\t\t\tName: name,\n\t\t\tSize: entries[i].Size(),\n\t\t\tTime: modTime,\n\t\t\tType: func(mode os.FileMode) string {\n\t\t\t\tif mode.IsRegular() {\n\t\t\t\t\treturn \"file\"\n\t\t\t\t}\n\t\t\t\treturn \"directory\"\n\t\t\t}(entries[i].Mode()),\n\t\t}\n\t}\n\n\tvar perms Metadata = Metadata{}\n\tif obj, ok := ctx.Backend.(interface{ Meta(path string) Metadata }); ok {\n\t\tperms = obj.Meta(path)\n\t}\n\n\tif model.CanEdit(&ctx) == false {\n\t\tperms.CanCreateFile = NewBool(false)\n\t\tperms.CanCreateDirectory = NewBool(false)\n\t\tperms.CanRename = NewBool(false)\n\t\tperms.CanMove = NewBool(false)\n\t\tperms.CanDelete = NewBool(false)\n\t}\n\tif model.CanUpload(&ctx) == false {\n\t\tperms.CanCreateDirectory = NewBool(false)\n\t\tperms.CanRename = NewBool(false)\n\t\tperms.CanMove = NewBool(false)\n\t\tperms.CanDelete = NewBool(false)\n\t}\n\tif model.CanShare(&ctx) == false {\n\t\tperms.CanShare = NewBool(false)\n\t}\n\n\tetagValue := base64.StdEncoding.EncodeToString(etagger.Sum(nil))\n\tres.Header().Set(\"Etag\", etagValue)\n\tif etagValue != \"\" && req.Header.Get(\"If-None-Match\") == etagValue {\n\t\tres.WriteHeader(http.StatusNotModified)\n\t\treturn\n\t}\n\tSendSuccessResultsWithMetadata(res, files, perms)\n}\n\nfunc FileCat(ctx App, res http.ResponseWriter, req *http.Request) {\n\theader := res.Header()\n\thttp.SetCookie(res, &http.Cookie{\n\t\tName: \"download\",\n\t\tValue: \"\",\n\t\tMaxAge: -1,\n\t\tPath: \"\/\",\n\t})\n\tif model.CanRead(&ctx) == false {\n\t\tSendErrorResult(res, ErrPermissionDenied)\n\t\treturn\n\t}\n\tpath, err := pathBuilder(ctx, req.URL.Query().Get(\"path\"))\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\n\tvar file io.ReadCloser\n\tvar contentLength int64 = -1\n\tvar needToCreateCache bool = false\n\n\t\/\/ use our cache if necessary (range request) when possible\n\tif req.Header.Get(\"range\") != \"\" {\n\t\tctx.Session[\"_path\"] = path\n\t\tif p := FileCache.Get(ctx.Session); p != nil {\n\t\t\tf, err := os.OpenFile(p.(string), os.O_RDONLY, os.ModePerm);\n\t\t\tif err == nil {\n\t\t\t\tfile = f\n\t\t\t\tif fi, err := f.Stat(); err == nil {\n\t\t\t\t\tcontentLength = fi.Size()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ perform the actual `cat` if needed\n\tif file == nil {\n\t\tif file, err = ctx.Backend.Cat(path); err != nil {\n\t\t\tSendErrorResult(res, err)\n\t\t\treturn\n\t\t}\n\t\theader.Set(\"Content-Type\", GetMimeType(req.URL.Query().Get(\"path\")))\n\t\tif req.Header.Get(\"range\") != \"\" {\n\t\t\tneedToCreateCache = true\n\t\t}\n\t\tgo model.SProc.HintLs(&ctx, filepath.Dir(path) + \"\/\")\n\t}\n\n\t\/\/ plugin hooks\n\tfor _, obj := range Hooks.Get.ProcessFileContentBeforeSend() {\n\t\tif file, err = obj(file, &ctx, &res, req); err != nil {\n\t\t\tSendErrorResult(res, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ The extra complexity is to support: https:\/\/en.wikipedia.org\/wiki\/Progressive_download\n\t\/\/ => range request requires a seeker to work, some backend support it, some don't. 2 strategies:\n\t\/\/ 1. backend support Seek: use what the current backend gives us\n\t\/\/ 2. backend doesn't support Seek: build up a cache so that subsequent call don't trigger multiple downloads\n\tif req.Header.Get(\"range\") != \"\" && needToCreateCache == true {\n\t\tif obj, ok := file.(io.Seeker); ok == true {\n\t\t\tif size, err := obj.Seek(0, io.SeekEnd); err == nil {\n\t\t\t\tif _, err = obj.Seek(0, io.SeekStart); err == nil {\n\t\t\t\t\tcontentLength = size\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\ttmpPath := filepath.Join(GetCurrentDir(), TMP_PATH, \"file_\" + QuickString(20) + \".dat\")\n\t\t\tf, err := os.OpenFile(tmpPath, os.O_RDWR|os.O_CREATE, os.ModePerm);\n\t\t\tif err != nil {\n\t\t\t\tSendErrorResult(res, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, err = io.Copy(f, file); err != nil {\n\t\t\t\tf.Close()\n\t\t\t\tfile.Close()\n\t\t\t\tSendErrorResult(res, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tf.Close()\n\t\t\tfile.Close()\n\t\t\tif f, err = os.OpenFile(tmpPath, os.O_RDONLY, os.ModePerm); err != nil {\n\t\t\t\tSendErrorResult(res, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tFileCache.Set(ctx.Session, tmpPath)\n\t\t\tif fi, err := f.Stat(); err == nil {\n\t\t\t\tcontentLength = fi.Size()\n\t\t\t}\n\t\t\tfile = f\n\t\t}\n\t}\n\n\t\/\/ Range request: find how much data we need to send\n\tvar ranges [][]int64\n\tif req.Header.Get(\"range\") != \"\" {\n\t\tranges = make([][]int64, 0)\n\t\tfor _, r := range strings.Split(strings.TrimPrefix(req.Header.Get(\"range\"), \"bytes=\"), \",\") {\n\t\t\tr = strings.TrimSpace(r)\n\t\t\tif r == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar start int64 = -1\n\t\t\tvar end int64 = -1\n\t\t\tsides := strings.Split(r, \"-\")\n\t\t\tif len(sides) == 2 {\n\t\t\t\tif start, err = strconv.ParseInt(sides[0], 10, 64); err != nil || start < 0 {\n\t\t\t\t\tstart = 0\n\t\t\t\t}\n\t\t\t\tif end, err = strconv.ParseInt(sides[1], 10, 64); err != nil || end < start {\n\t\t\t\t\tend = contentLength - 1\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif start != -1 && end != -1 && end - start >= 0 {\n\t\t\t\tranges = append(ranges, []int64{start, end})\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ publish headers\n\tif contentLength != -1 {\n\t\theader.Set(\"Content-Length\", fmt.Sprintf(\"%d\", contentLength))\n\t}\n\tif header.Get(\"Content-Security-Policy\") == \"\" {\n\t\theader.Set(\"Content-Security-Policy\", \"default-src 'none'; img-src 'self'; media-src 'self'; style-src 'unsafe-inline'; font-src data:\")\n\t}\n\theader.Set(\"Accept-Ranges\", \"bytes\")\n\n\t\/\/ Send data to the client\n\tif req.Method != \"HEAD\" {\n\t\tif f, ok := file.(io.ReadSeeker); ok && len(ranges) > 0 {\n\t\t\tif _, err = f.Seek(ranges[0][0], io.SeekStart); err == nil {\n\t\t\t\theader.Set(\"Content-Range\", fmt.Sprintf(\"bytes %d-%d\/%d\", ranges[0][0], ranges[0][1], contentLength))\n\t\t\t\theader.Set(\"Content-Length\", fmt.Sprintf(\"%d\", ranges[0][1] - ranges[0][0] + 1))\n\t\t\t\tres.WriteHeader(http.StatusPartialContent)\n\t\t\t\tio.CopyN(res, f, ranges[0][1] - ranges[0][0] + 1)\n\t\t\t} else {\n\t\t\t\tres.WriteHeader(http.StatusRequestedRangeNotSatisfiable)\n\t\t\t}\n\t\t} else {\n\t\t\tio.Copy(res, file)\n\t\t}\n\t}\n\tfile.Close()\n}\n\nfunc FileAccess(ctx App, res http.ResponseWriter, req *http.Request) {\n\tallowed := []string{}\n\tif model.CanRead(&ctx){\n\t\tallowed = append(allowed, \"GET\")\n\t}\n\tif model.CanEdit(&ctx){\n\t\tallowed = append(allowed, \"PUT\")\n\t}\n\tif model.CanUpload(&ctx){\n\t\tallowed = append(allowed, \"POST\")\n\t}\n\theader := res.Header()\n\theader.Set(\"Allow\", strings.Join(allowed, \", \"))\n\tSendSuccessResult(res, nil)\n}\n\nfunc FileSave(ctx App, res http.ResponseWriter, req *http.Request) {\n\tif model.CanEdit(&ctx) == false {\n\t\tSendErrorResult(res, NewError(\"Permission denied\", 403))\n\t\treturn\n\t}\n\n\tpath, err := pathBuilder(ctx, req.URL.Query().Get(\"path\"))\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\n\tfile, _, err := req.FormFile(\"file\")\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\terr = ctx.Backend.Save(path, file)\n\tfile.Close()\n\tif err != nil {\n\t\tSendErrorResult(res, NewError(err.Error(), 403))\n\t\treturn\n\t}\n\tgo model.SProc.HintLs(&ctx, filepath.Dir(path) + \"\/\")\n\tgo model.SProc.HintFile(&ctx, path)\n\tSendSuccessResult(res, nil)\n}\n\nfunc FileMv(ctx App, res http.ResponseWriter, req *http.Request) {\n\tif model.CanEdit(&ctx) == false {\n\t\tSendErrorResult(res, NewError(\"Permission denied\", 403))\n\t\treturn\n\t}\n\n\tfrom, err := pathBuilder(ctx, req.URL.Query().Get(\"from\"))\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\tto, err := pathBuilder(ctx, req.URL.Query().Get(\"to\"))\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\tif from == \"\" || to == \"\" {\n\t\tSendErrorResult(res, NewError(\"missing path parameter\", 400))\n\t\treturn\n\t}\n\n\terr = ctx.Backend.Mv(from, to)\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\n\tgo model.SProc.HintRm(&ctx, filepath.Dir(from) + \"\/\")\n\tgo model.SProc.HintLs(&ctx, filepath.Dir(to) + \"\/\")\n\tSendSuccessResult(res, nil)\n}\n\nfunc FileRm(ctx App, res http.ResponseWriter, req *http.Request) {\n\tif model.CanEdit(&ctx) == false {\n\t\tSendErrorResult(res, NewError(\"Permission denied\", 403))\n\t\treturn\n\t}\n\n\tpath, err := pathBuilder(ctx, req.URL.Query().Get(\"path\"))\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\terr = ctx.Backend.Rm(path)\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\tmodel.SProc.HintRm(&ctx, path)\n\tSendSuccessResult(res, nil)\n}\n\nfunc FileMkdir(ctx App, res http.ResponseWriter, req *http.Request) {\n\tif model.CanUpload(&ctx) == false {\n\t\tSendErrorResult(res, NewError(\"Permission denied\", 403))\n\t\treturn\n\t}\n\n\tpath, err := pathBuilder(ctx, req.URL.Query().Get(\"path\"))\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\n\terr = ctx.Backend.Mkdir(path)\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\tgo model.SProc.HintLs(&ctx, filepath.Dir(path) + \"\/\")\n\tSendSuccessResult(res, nil)\n}\n\nfunc FileTouch(ctx App, res http.ResponseWriter, req *http.Request) {\n\tif model.CanUpload(&ctx) == false {\n\t\tSendErrorResult(res, NewError(\"Permission denied\", 403))\n\t\treturn\n\t}\n\n\tpath, err := pathBuilder(ctx, req.URL.Query().Get(\"path\"))\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\n\terr = ctx.Backend.Touch(path)\n\tif err != nil {\n\t\tSendErrorResult(res, err)\n\t\treturn\n\t}\n\tgo model.SProc.HintLs(&ctx, filepath.Dir(path) + \"\/\")\n\tSendSuccessResult(res, nil)\n}\n\nfunc pathBuilder(ctx App, path string) (string, error) {\n\tif path == \"\" {\n\t\treturn \"\", NewError(\"No path available\", 400)\n\t}\n\tsessionPath := ctx.Session[\"path\"]\n\tbasePath := filepath.Join(sessionPath, path)\n\tif path[len(path)-1:] == \"\/\" && basePath != \"\/\" {\n\t\tbasePath += \"\/\"\n\t}\n\tif strings.HasPrefix(basePath, ctx.Session[\"path\"]) == false {\n\t\treturn \"\", NewError(\"There's nothing here\", 403)\n\t}\n\treturn basePath, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013-2022 The Cobra Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cobra\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\nfunc (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error {\n\tbuf := new(bytes.Buffer)\n\tgenBashComp(buf, c.Name(), includeDesc)\n\t_, err := buf.WriteTo(w)\n\treturn err\n}\n\nfunc genBashComp(buf io.StringWriter, name string, includeDesc bool) {\n\tcompCmd := ShellCompRequestCmd\n\tif !includeDesc {\n\t\tcompCmd = ShellCompNoDescRequestCmd\n\t}\n\n\tWriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*-\n\n__%[1]s_debug()\n{\n if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then\n echo \"$*\" >> \"${BASH_COMP_DEBUG_FILE}\"\n fi\n}\n\n# Macs have bash3 for which the bash-completion package doesn't include\n# _init_completion. This is a minimal version of that function.\n__%[1]s_init_completion()\n{\n COMPREPLY=()\n _get_comp_words_by_ref \"$@\" cur prev words cword\n}\n\n# This function calls the %[1]s program to obtain the completion\n# results and the directive. It fills the 'out' and 'directive' vars.\n__%[1]s_get_completion_results() {\n local requestComp lastParam lastChar args\n\n # Prepare the command to request completions for the program.\n # Calling ${words[0]} instead of directly %[1]s allows to handle aliases\n args=(\"${words[@]:1}\")\n requestComp=\"${words[0]} %[2]s ${args[*]}\"\n\n lastParam=${words[$((${#words[@]}-1))]}\n lastChar=${lastParam:$((${#lastParam}-1)):1}\n __%[1]s_debug \"lastParam ${lastParam}, lastChar ${lastChar}\"\n\n if [ -z \"${cur}\" ] && [ \"${lastChar}\" != \"=\" ]; then\n # If the last parameter is complete (there is a space following it)\n # We add an extra empty parameter so we can indicate this to the go method.\n __%[1]s_debug \"Adding extra empty parameter\"\n requestComp=\"${requestComp} ''\"\n fi\n\n # When completing a flag with an = (e.g., %[1]s -n=<TAB>)\n # bash focuses on the part after the =, so we need to remove\n # the flag part from $cur\n if [[ \"${cur}\" == -*=* ]]; then\n cur=\"${cur#*=}\"\n fi\n\n __%[1]s_debug \"Calling ${requestComp}\"\n # Use eval to handle any environment variables and such\n out=$(eval \"${requestComp}\" 2>\/dev\/null)\n\n # Extract the directive integer at the very end of the output following a colon (:)\n directive=${out##*:}\n # Remove the directive\n out=${out%%:*}\n if [ \"${directive}\" = \"${out}\" ]; then\n # There is not directive specified\n directive=0\n fi\n __%[1]s_debug \"The completion directive is: ${directive}\"\n __%[1]s_debug \"The completions are: ${out}\"\n}\n\n__%[1]s_process_completion_results() {\n local shellCompDirectiveError=%[3]d\n local shellCompDirectiveNoSpace=%[4]d\n local shellCompDirectiveNoFileComp=%[5]d\n local shellCompDirectiveFilterFileExt=%[6]d\n local shellCompDirectiveFilterDirs=%[7]d\n\n if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then\n # Error code. No completion.\n __%[1]s_debug \"Received error from custom completion go code\"\n return\n else\n if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then\n if [[ $(type -t compopt) = \"builtin\" ]]; then\n __%[1]s_debug \"Activating no space\"\n compopt -o nospace\n else\n __%[1]s_debug \"No space directive not supported in this version of bash\"\n fi\n fi\n if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then\n if [[ $(type -t compopt) = \"builtin\" ]]; then\n __%[1]s_debug \"Activating no file completion\"\n compopt +o default\n else\n __%[1]s_debug \"No file completion directive not supported in this version of bash\"\n fi\n fi\n fi\n\n # Separate activeHelp from normal completions\n local completions=()\n local activeHelp=()\n __%[1]s_extract_activeHelp\n\n if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then\n # File extension filtering\n local fullFilter filter filteringCmd\n\n # Do not use quotes around the $completions variable or else newline\n # characters will be kept.\n for filter in ${completions[*]}; do\n fullFilter+=\"$filter|\"\n done\n\n filteringCmd=\"_filedir $fullFilter\"\n __%[1]s_debug \"File filtering command: $filteringCmd\"\n $filteringCmd\n elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then\n # File completion for directories only\n\n # Use printf to strip any trailing newline\n local subdir\n subdir=$(printf \"%%s\" \"${completions[0]}\")\n if [ -n \"$subdir\" ]; then\n __%[1]s_debug \"Listing directories in $subdir\"\n pushd \"$subdir\" >\/dev\/null 2>&1 && _filedir -d && popd >\/dev\/null 2>&1 || return\n else\n __%[1]s_debug \"Listing directories in .\"\n _filedir -d\n fi\n else\n __%[1]s_handle_completion_types\n fi\n\n __%[1]s_handle_special_char \"$cur\" :\n __%[1]s_handle_special_char \"$cur\" =\n\n # Print the activeHelp statements before we finish\n if [ ${#activeHelp[*]} -ne 0 ]; then\n printf \"\\n\";\n printf \"%%s\\n\" \"${activeHelp[@]}\"\n printf \"\\n\"\n\n # The prompt format is only available from bash 4.4.\n # We test if it is available before using it.\n if (x=${PS1@P}) 2> \/dev\/null; then\n printf \"%%s\" \"${PS1@P}${COMP_LINE[@]}\"\n else\n # Can't print the prompt. Just print the\n # text the user had typed, it is workable enough.\n printf \"%%s\" \"${COMP_LINE[@]}\"\n fi\n fi\n}\n\n# Separate activeHelp lines from real completions.\n# Fills the $activeHelp and $completions arrays.\n__%[1]s_extract_activeHelp() {\n local activeHelpMarker=\"%[8]s\"\n local endIndex=${#activeHelpMarker}\n\n while IFS='' read -r comp; do\n if [ \"${comp:0:endIndex}\" = \"$activeHelpMarker\" ]; then\n comp=${comp:endIndex}\n __%[1]s_debug \"ActiveHelp found: $comp\"\n if [ -n \"$comp\" ]; then\n activeHelp+=(\"$comp\")\n fi\n else\n # Not an activeHelp line but a normal completion\n completions+=(\"$comp\")\n fi\n done < <(printf \"%%s\\n\" \"${out}\")\n}\n\n__%[1]s_handle_completion_types() {\n __%[1]s_debug \"__%[1]s_handle_completion_types: COMP_TYPE is $COMP_TYPE\"\n\n case $COMP_TYPE in\n 37|42)\n # Type: menu-complete\/menu-complete-backward and insert-completions\n # If the user requested inserting one completion at a time, or all\n # completions at once on the command-line we must remove the descriptions.\n # https:\/\/github.com\/spf13\/cobra\/issues\/1508\n local tab=$'\\t' comp\n while IFS='' read -r comp; do\n [[ -z $comp ]] && continue\n # Strip any description\n comp=${comp%%%%$tab*}\n # Only consider the completions that match\n if [[ $comp == \"$cur\"* ]]; then\n COMPREPLY+=(\"$comp\")\n fi\n done < <(printf \"%%s\\n\" \"${completions[@]}\")\n ;;\n\n *)\n # Type: complete (normal completion)\n __%[1]s_handle_standard_completion_case\n ;;\n esac\n}\n\n__%[1]s_handle_standard_completion_case() {\n local tab=$'\\t' comp\n\n # Short circuit to optimize if we don't have descriptions\n if [[ \"${completions[*]}\" != *$tab* ]]; then\n IFS=$'\\n' read -ra COMPREPLY -d '' < <(compgen -W \"${completions[*]}\" -- \"$cur\")\n return 0\n fi\n\n local longest=0\n local compline\n # Look for the longest completion so that we can format things nicely\n while IFS='' read -r compline; do\n [[ -z $compline ]] && continue\n # Strip any description before checking the length\n comp=${compline%%%%$tab*}\n # Only consider the completions that match\n [[ $comp == \"$cur\"* ]] || continue\n COMPREPLY+=(\"$compline\")\n if ((${#comp}>longest)); then\n longest=${#comp}\n fi\n done < <(printf \"%%s\\n\" \"${completions[@]}\")\n\n # If there is a single completion left, remove the description text\n if [ ${#COMPREPLY[*]} -eq 1 ]; then\n __%[1]s_debug \"COMPREPLY[0]: ${COMPREPLY[0]}\"\n comp=\"${COMPREPLY[0]%%%%$tab*}\"\n __%[1]s_debug \"Removed description from single completion, which is now: ${comp}\"\n COMPREPLY[0]=$comp\n else # Format the descriptions\n __%[1]s_format_comp_descriptions $longest\n fi\n}\n\n__%[1]s_handle_special_char()\n{\n local comp=\"$1\"\n local char=$2\n if [[ \"$comp\" == *${char}* && \"$COMP_WORDBREAKS\" == *${char}* ]]; then\n local word=${comp%%\"${comp##*${char}}\"}\n local idx=${#COMPREPLY[*]}\n while [[ $((--idx)) -ge 0 ]]; do\n COMPREPLY[$idx]=${COMPREPLY[$idx]#\"$word\"}\n done\n fi\n}\n\n__%[1]s_format_comp_descriptions()\n{\n local tab=$'\\t'\n local comp desc maxdesclength\n local longest=$1\n\n local i ci\n for ci in ${!COMPREPLY[*]}; do\n comp=${COMPREPLY[ci]}\n # Properly format the description string which follows a tab character if there is one\n if [[ \"$comp\" == *$tab* ]]; then\n __%[1]s_debug \"Original comp: $comp\"\n desc=${comp#*$tab}\n comp=${comp%%%%$tab*}\n\n # $COLUMNS stores the current shell width.\n # Remove an extra 4 because we add 2 spaces and 2 parentheses.\n maxdesclength=$(( COLUMNS - longest - 4 ))\n\n # Make sure we can fit a description of at least 8 characters\n # if we are to align the descriptions.\n if [[ $maxdesclength -gt 8 ]]; then\n # Add the proper number of spaces to align the descriptions\n for ((i = ${#comp} ; i < longest ; i++)); do\n comp+=\" \"\n done\n else\n # Don't pad the descriptions so we can fit more text after the completion\n maxdesclength=$(( COLUMNS - ${#comp} - 4 ))\n fi\n\n # If there is enough space for any description text,\n # truncate the descriptions that are too long for the shell width\n if [ $maxdesclength -gt 0 ]; then\n if [ ${#desc} -gt $maxdesclength ]; then\n desc=${desc:0:$(( maxdesclength - 1 ))}\n desc+=\"…\"\n fi\n comp+=\" ($desc)\"\n fi\n COMPREPLY[ci]=$comp\n __%[1]s_debug \"Final comp: $comp\"\n fi\n done\n}\n\n__start_%[1]s()\n{\n local cur prev words cword split\n\n COMPREPLY=()\n\n # Call _init_completion from the bash-completion package\n # to prepare the arguments properly\n if declare -F _init_completion >\/dev\/null 2>&1; then\n _init_completion -n \"=:\" || return\n else\n __%[1]s_init_completion -n \"=:\" || return\n fi\n\n __%[1]s_debug\n __%[1]s_debug \"========= starting completion logic ==========\"\n __%[1]s_debug \"cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword\"\n\n # The user could have moved the cursor backwards on the command-line.\n # We need to trigger completion from the $cword location, so we need\n # to truncate the command-line ($words) up to the $cword location.\n words=(\"${words[@]:0:$cword+1}\")\n __%[1]s_debug \"Truncated words[*]: ${words[*]},\"\n\n local out directive\n __%[1]s_get_completion_results\n __%[1]s_process_completion_results\n}\n\nif [[ $(type -t compopt) = \"builtin\" ]]; then\n complete -o default -F __start_%[1]s %[1]s\nelse\n complete -o default -o nospace -F __start_%[1]s %[1]s\nfi\n\n# ex: ts=4 sw=4 et filetype=sh\n`, name, compCmd,\n\t\tShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,\n\t\tShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs,\n\t\tactiveHelpMarker))\n}\n\n\/\/ GenBashCompletionFileV2 generates Bash completion version 2.\nfunc (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error {\n\toutFile, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer outFile.Close()\n\n\treturn c.GenBashCompletionV2(outFile, includeDesc)\n}\n\n\/\/ GenBashCompletionV2 generates Bash completion file version 2\n\/\/ and writes it to the passed writer.\nfunc (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error {\n\treturn c.genBashCompletion(w, includeDesc)\n}\n<commit_msg>style(bash-v2): various cleanups (#1702)<commit_after>\/\/ Copyright 2013-2022 The Cobra Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cobra\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\nfunc (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error {\n\tbuf := new(bytes.Buffer)\n\tgenBashComp(buf, c.Name(), includeDesc)\n\t_, err := buf.WriteTo(w)\n\treturn err\n}\n\nfunc genBashComp(buf io.StringWriter, name string, includeDesc bool) {\n\tcompCmd := ShellCompRequestCmd\n\tif !includeDesc {\n\t\tcompCmd = ShellCompNoDescRequestCmd\n\t}\n\n\tWriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*-\n\n__%[1]s_debug()\n{\n if [[ -n ${BASH_COMP_DEBUG_FILE-} ]]; then\n echo \"$*\" >> \"${BASH_COMP_DEBUG_FILE}\"\n fi\n}\n\n# Macs have bash3 for which the bash-completion package doesn't include\n# _init_completion. This is a minimal version of that function.\n__%[1]s_init_completion()\n{\n COMPREPLY=()\n _get_comp_words_by_ref \"$@\" cur prev words cword\n}\n\n# This function calls the %[1]s program to obtain the completion\n# results and the directive. It fills the 'out' and 'directive' vars.\n__%[1]s_get_completion_results() {\n local requestComp lastParam lastChar args\n\n # Prepare the command to request completions for the program.\n # Calling ${words[0]} instead of directly %[1]s allows to handle aliases\n args=(\"${words[@]:1}\")\n requestComp=\"${words[0]} %[2]s ${args[*]}\"\n\n lastParam=${words[$((${#words[@]}-1))]}\n lastChar=${lastParam:$((${#lastParam}-1)):1}\n __%[1]s_debug \"lastParam ${lastParam}, lastChar ${lastChar}\"\n\n if [[ -z ${cur} && ${lastChar} != = ]]; then\n # If the last parameter is complete (there is a space following it)\n # We add an extra empty parameter so we can indicate this to the go method.\n __%[1]s_debug \"Adding extra empty parameter\"\n requestComp=\"${requestComp} ''\"\n fi\n\n # When completing a flag with an = (e.g., %[1]s -n=<TAB>)\n # bash focuses on the part after the =, so we need to remove\n # the flag part from $cur\n if [[ ${cur} == -*=* ]]; then\n cur=\"${cur#*=}\"\n fi\n\n __%[1]s_debug \"Calling ${requestComp}\"\n # Use eval to handle any environment variables and such\n out=$(eval \"${requestComp}\" 2>\/dev\/null)\n\n # Extract the directive integer at the very end of the output following a colon (:)\n directive=${out##*:}\n # Remove the directive\n out=${out%%:*}\n if [[ ${directive} == \"${out}\" ]]; then\n # There is not directive specified\n directive=0\n fi\n __%[1]s_debug \"The completion directive is: ${directive}\"\n __%[1]s_debug \"The completions are: ${out}\"\n}\n\n__%[1]s_process_completion_results() {\n local shellCompDirectiveError=%[3]d\n local shellCompDirectiveNoSpace=%[4]d\n local shellCompDirectiveNoFileComp=%[5]d\n local shellCompDirectiveFilterFileExt=%[6]d\n local shellCompDirectiveFilterDirs=%[7]d\n\n if (((directive & shellCompDirectiveError) != 0)); then\n # Error code. No completion.\n __%[1]s_debug \"Received error from custom completion go code\"\n return\n else\n if (((directive & shellCompDirectiveNoSpace) != 0)); then\n if [[ $(type -t compopt) == builtin ]]; then\n __%[1]s_debug \"Activating no space\"\n compopt -o nospace\n else\n __%[1]s_debug \"No space directive not supported in this version of bash\"\n fi\n fi\n if (((directive & shellCompDirectiveNoFileComp) != 0)); then\n if [[ $(type -t compopt) == builtin ]]; then\n __%[1]s_debug \"Activating no file completion\"\n compopt +o default\n else\n __%[1]s_debug \"No file completion directive not supported in this version of bash\"\n fi\n fi\n fi\n\n # Separate activeHelp from normal completions\n local completions=()\n local activeHelp=()\n __%[1]s_extract_activeHelp\n\n if (((directive & shellCompDirectiveFilterFileExt) != 0)); then\n # File extension filtering\n local fullFilter filter filteringCmd\n\n # Do not use quotes around the $completions variable or else newline\n # characters will be kept.\n for filter in ${completions[*]}; do\n fullFilter+=\"$filter|\"\n done\n\n filteringCmd=\"_filedir $fullFilter\"\n __%[1]s_debug \"File filtering command: $filteringCmd\"\n $filteringCmd\n elif (((directive & shellCompDirectiveFilterDirs) != 0)); then\n # File completion for directories only\n\n local subdir\n subdir=${completions[0]}\n if [[ -n $subdir ]]; then\n __%[1]s_debug \"Listing directories in $subdir\"\n pushd \"$subdir\" >\/dev\/null 2>&1 && _filedir -d && popd >\/dev\/null 2>&1 || return\n else\n __%[1]s_debug \"Listing directories in .\"\n _filedir -d\n fi\n else\n __%[1]s_handle_completion_types\n fi\n\n __%[1]s_handle_special_char \"$cur\" :\n __%[1]s_handle_special_char \"$cur\" =\n\n # Print the activeHelp statements before we finish\n if ((${#activeHelp[*]} != 0)); then\n printf \"\\n\";\n printf \"%%s\\n\" \"${activeHelp[@]}\"\n printf \"\\n\"\n\n # The prompt format is only available from bash 4.4.\n # We test if it is available before using it.\n if (x=${PS1@P}) 2> \/dev\/null; then\n printf \"%%s\" \"${PS1@P}${COMP_LINE[@]}\"\n else\n # Can't print the prompt. Just print the\n # text the user had typed, it is workable enough.\n printf \"%%s\" \"${COMP_LINE[@]}\"\n fi\n fi\n}\n\n# Separate activeHelp lines from real completions.\n# Fills the $activeHelp and $completions arrays.\n__%[1]s_extract_activeHelp() {\n local activeHelpMarker=\"%[8]s\"\n local endIndex=${#activeHelpMarker}\n\n while IFS='' read -r comp; do\n if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then\n comp=${comp:endIndex}\n __%[1]s_debug \"ActiveHelp found: $comp\"\n if [[ -n $comp ]]; then\n activeHelp+=(\"$comp\")\n fi\n else\n # Not an activeHelp line but a normal completion\n completions+=(\"$comp\")\n fi\n done <<<\"${out}\"\n}\n\n__%[1]s_handle_completion_types() {\n __%[1]s_debug \"__%[1]s_handle_completion_types: COMP_TYPE is $COMP_TYPE\"\n\n case $COMP_TYPE in\n 37|42)\n # Type: menu-complete\/menu-complete-backward and insert-completions\n # If the user requested inserting one completion at a time, or all\n # completions at once on the command-line we must remove the descriptions.\n # https:\/\/github.com\/spf13\/cobra\/issues\/1508\n local tab=$'\\t' comp\n while IFS='' read -r comp; do\n [[ -z $comp ]] && continue\n # Strip any description\n comp=${comp%%%%$tab*}\n # Only consider the completions that match\n if [[ $comp == \"$cur\"* ]]; then\n COMPREPLY+=(\"$comp\")\n fi\n done < <(printf \"%%s\\n\" \"${completions[@]}\")\n ;;\n\n *)\n # Type: complete (normal completion)\n __%[1]s_handle_standard_completion_case\n ;;\n esac\n}\n\n__%[1]s_handle_standard_completion_case() {\n local tab=$'\\t' comp\n\n # Short circuit to optimize if we don't have descriptions\n if [[ \"${completions[*]}\" != *$tab* ]]; then\n IFS=$'\\n' read -ra COMPREPLY -d '' < <(compgen -W \"${completions[*]}\" -- \"$cur\")\n return 0\n fi\n\n local longest=0\n local compline\n # Look for the longest completion so that we can format things nicely\n while IFS='' read -r compline; do\n [[ -z $compline ]] && continue\n # Strip any description before checking the length\n comp=${compline%%%%$tab*}\n # Only consider the completions that match\n [[ $comp == \"$cur\"* ]] || continue\n COMPREPLY+=(\"$compline\")\n if ((${#comp}>longest)); then\n longest=${#comp}\n fi\n done < <(printf \"%%s\\n\" \"${completions[@]}\")\n\n # If there is a single completion left, remove the description text\n if ((${#COMPREPLY[*]} == 1)); then\n __%[1]s_debug \"COMPREPLY[0]: ${COMPREPLY[0]}\"\n comp=\"${COMPREPLY[0]%%%%$tab*}\"\n __%[1]s_debug \"Removed description from single completion, which is now: ${comp}\"\n COMPREPLY[0]=$comp\n else # Format the descriptions\n __%[1]s_format_comp_descriptions $longest\n fi\n}\n\n__%[1]s_handle_special_char()\n{\n local comp=\"$1\"\n local char=$2\n if [[ \"$comp\" == *${char}* && \"$COMP_WORDBREAKS\" == *${char}* ]]; then\n local word=${comp%%\"${comp##*${char}}\"}\n local idx=${#COMPREPLY[*]}\n while ((--idx >= 0)); do\n COMPREPLY[idx]=${COMPREPLY[idx]#\"$word\"}\n done\n fi\n}\n\n__%[1]s_format_comp_descriptions()\n{\n local tab=$'\\t'\n local comp desc maxdesclength\n local longest=$1\n\n local i ci\n for ci in ${!COMPREPLY[*]}; do\n comp=${COMPREPLY[ci]}\n # Properly format the description string which follows a tab character if there is one\n if [[ \"$comp\" == *$tab* ]]; then\n __%[1]s_debug \"Original comp: $comp\"\n desc=${comp#*$tab}\n comp=${comp%%%%$tab*}\n\n # $COLUMNS stores the current shell width.\n # Remove an extra 4 because we add 2 spaces and 2 parentheses.\n maxdesclength=$(( COLUMNS - longest - 4 ))\n\n # Make sure we can fit a description of at least 8 characters\n # if we are to align the descriptions.\n if ((maxdesclength > 8)); then\n # Add the proper number of spaces to align the descriptions\n for ((i = ${#comp} ; i < longest ; i++)); do\n comp+=\" \"\n done\n else\n # Don't pad the descriptions so we can fit more text after the completion\n maxdesclength=$(( COLUMNS - ${#comp} - 4 ))\n fi\n\n # If there is enough space for any description text,\n # truncate the descriptions that are too long for the shell width\n if ((maxdesclength > 0)); then\n if ((${#desc} > maxdesclength)); then\n desc=${desc:0:$(( maxdesclength - 1 ))}\n desc+=\"…\"\n fi\n comp+=\" ($desc)\"\n fi\n COMPREPLY[ci]=$comp\n __%[1]s_debug \"Final comp: $comp\"\n fi\n done\n}\n\n__start_%[1]s()\n{\n local cur prev words cword split\n\n COMPREPLY=()\n\n # Call _init_completion from the bash-completion package\n # to prepare the arguments properly\n if declare -F _init_completion >\/dev\/null 2>&1; then\n _init_completion -n =: || return\n else\n __%[1]s_init_completion -n =: || return\n fi\n\n __%[1]s_debug\n __%[1]s_debug \"========= starting completion logic ==========\"\n __%[1]s_debug \"cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword\"\n\n # The user could have moved the cursor backwards on the command-line.\n # We need to trigger completion from the $cword location, so we need\n # to truncate the command-line ($words) up to the $cword location.\n words=(\"${words[@]:0:$cword+1}\")\n __%[1]s_debug \"Truncated words[*]: ${words[*]},\"\n\n local out directive\n __%[1]s_get_completion_results\n __%[1]s_process_completion_results\n}\n\nif [[ $(type -t compopt) = \"builtin\" ]]; then\n complete -o default -F __start_%[1]s %[1]s\nelse\n complete -o default -o nospace -F __start_%[1]s %[1]s\nfi\n\n# ex: ts=4 sw=4 et filetype=sh\n`, name, compCmd,\n\t\tShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,\n\t\tShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs,\n\t\tactiveHelpMarker))\n}\n\n\/\/ GenBashCompletionFileV2 generates Bash completion version 2.\nfunc (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error {\n\toutFile, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer outFile.Close()\n\n\treturn c.GenBashCompletionV2(outFile, includeDesc)\n}\n\n\/\/ GenBashCompletionV2 generates Bash completion file version 2\n\/\/ and writes it to the passed writer.\nfunc (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error {\n\treturn c.genBashCompletion(w, includeDesc)\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/influxdata\/chronograf\"\n)\n\ntype dashboardLinks struct {\n\tSelf string `json:\"self\"` \/\/ Self link mapping to this resource\n\tCells string `json:\"cells\"` \/\/ Cells link to the cells endpoint\n\tTemplates string `json:\"templates\"` \/\/ Templates link to the templates endpoint\n}\n\ntype dashboardResponse struct {\n\tID chronograf.DashboardID `json:\"id\"`\n\tCells []dashboardCellResponse `json:\"cells\"`\n\tTemplates []templateResponse `json:\"templates\"`\n\tName string `json:\"name\"`\n\tOrganization string `json:\"organization\"`\n\tLinks dashboardLinks `json:\"links\"`\n}\n\ntype getDashboardsResponse struct {\n\tDashboards []*dashboardResponse `json:\"dashboards\"`\n}\n\nfunc newDashboardResponse(d chronograf.Dashboard) *dashboardResponse {\n\tbase := \"\/chronograf\/v1\/dashboards\"\n\tdd := AddQueryConfigs(DashboardDefaults(d))\n\tcells := newCellResponses(dd.ID, dd.Cells)\n\ttemplates := newTemplateResponses(dd.ID, dd.Templates)\n\n\treturn &dashboardResponse{\n\t\tID: dd.ID,\n\t\tName: dd.Name,\n\t\tCells: cells,\n\t\tTemplates: templates,\n\t\tOrganization: d.Organization,\n\t\tLinks: dashboardLinks{\n\t\t\tSelf: fmt.Sprintf(\"%s\/%d\", base, dd.ID),\n\t\t\tCells: fmt.Sprintf(\"%s\/%d\/cells\", base, dd.ID),\n\t\t\tTemplates: fmt.Sprintf(\"%s\/%d\/templates\", base, dd.ID),\n\t\t},\n\t}\n}\n\n\/\/ Dashboards returns all dashboards within the store\nfunc (s *Service) Dashboards(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tdashboards, err := s.Store.Dashboards(ctx).All(ctx)\n\tif err != nil {\n\t\tError(w, http.StatusInternalServerError, \"Error loading dashboards\", s.Logger)\n\t\treturn\n\t}\n\n\tres := getDashboardsResponse{\n\t\tDashboards: []*dashboardResponse{},\n\t}\n\n\tfor _, dashboard := range dashboards {\n\t\tres.Dashboards = append(res.Dashboards, newDashboardResponse(dashboard))\n\t}\n\tencodeJSON(w, http.StatusOK, res, s.Logger)\n}\n\n\/\/ DashboardID returns a single specified dashboard\nfunc (s *Service) DashboardID(w http.ResponseWriter, r *http.Request) {\n\tid, err := paramID(\"id\", r)\n\tif err != nil {\n\t\tError(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)\n\t\treturn\n\t}\n\n\tctx := r.Context()\n\te, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id))\n\tif err != nil {\n\t\tnotFound(w, id, s.Logger)\n\t\treturn\n\t}\n\n\tres := newDashboardResponse(e)\n\tencodeJSON(w, http.StatusOK, res, s.Logger)\n}\n\n\/\/ NewDashboard creates and returns a new dashboard object\nfunc (s *Service) NewDashboard(w http.ResponseWriter, r *http.Request) {\n\tvar dashboard chronograf.Dashboard\n\tvar err error\n\tif err := json.NewDecoder(r.Body).Decode(&dashboard); err != nil {\n\t\tinvalidJSON(w, s.Logger)\n\t\treturn\n\t}\n\n\tctx := r.Context()\n\tdefaultOrg, err := s.Store.Organizations(ctx).DefaultOrganization(ctx)\n\tif err != nil {\n\t\tunknownErrorWithMessage(w, err, s.Logger)\n\t\treturn\n\t}\n\n\tif err := ValidDashboardRequest(&dashboard, defaultOrg.ID); err != nil {\n\t\tinvalidData(w, err, s.Logger)\n\t\treturn\n\t}\n\n\tif dashboard, err = s.Store.Dashboards(ctx).Add(r.Context(), dashboard); err != nil {\n\t\tmsg := fmt.Errorf(\"Error storing dashboard %v: %v\", dashboard, err)\n\t\tunknownErrorWithMessage(w, msg, s.Logger)\n\t\treturn\n\t}\n\n\tres := newDashboardResponse(dashboard)\n\tlocation(w, res.Links.Self)\n\tencodeJSON(w, http.StatusCreated, res, s.Logger)\n}\n\n\/\/ RemoveDashboard deletes a dashboard\nfunc (s *Service) RemoveDashboard(w http.ResponseWriter, r *http.Request) {\n\tid, err := paramID(\"id\", r)\n\tif err != nil {\n\t\tError(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)\n\t\treturn\n\t}\n\n\tctx := r.Context()\n\te, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id))\n\tif err != nil {\n\t\tnotFound(w, id, s.Logger)\n\t\treturn\n\t}\n\n\tif err := s.Store.Dashboards(ctx).Delete(ctx, e); err != nil {\n\t\tunknownErrorWithMessage(w, err, s.Logger)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusNoContent)\n}\n\n\/\/ ReplaceDashboard completely replaces a dashboard\nfunc (s *Service) ReplaceDashboard(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tidParam, err := paramID(\"id\", r)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Could not parse dashboard ID: %s\", err)\n\t\tError(w, http.StatusInternalServerError, msg, s.Logger)\n\t}\n\tid := chronograf.DashboardID(idParam)\n\n\t_, err = s.Store.Dashboards(ctx).Get(ctx, id)\n\tif err != nil {\n\t\tError(w, http.StatusNotFound, fmt.Sprintf(\"ID %d not found\", id), s.Logger)\n\t\treturn\n\t}\n\n\tvar req chronograf.Dashboard\n\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\tinvalidJSON(w, s.Logger)\n\t\treturn\n\t}\n\treq.ID = id\n\n\tdefaultOrg, err := s.Store.Organizations(ctx).DefaultOrganization(ctx)\n\tif err != nil {\n\t\tunknownErrorWithMessage(w, err, s.Logger)\n\t\treturn\n\t}\n\n\tif err := ValidDashboardRequest(&req, defaultOrg.ID); err != nil {\n\t\tinvalidData(w, err, s.Logger)\n\t\treturn\n\t}\n\n\tif err := s.Store.Dashboards(ctx).Update(ctx, req); err != nil {\n\t\tmsg := fmt.Sprintf(\"Error updating dashboard ID %d: %v\", id, err)\n\t\tError(w, http.StatusInternalServerError, msg, s.Logger)\n\t\treturn\n\t}\n\n\tres := newDashboardResponse(req)\n\tencodeJSON(w, http.StatusOK, res, s.Logger)\n}\n\n\/\/ UpdateDashboard completely updates either the dashboard name or the cells\nfunc (s *Service) UpdateDashboard(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tidParam, err := paramID(\"id\", r)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Could not parse dashboard ID: %s\", err)\n\t\tError(w, http.StatusInternalServerError, msg, s.Logger)\n\t\treturn\n\t}\n\tid := chronograf.DashboardID(idParam)\n\n\torig, err := s.Store.Dashboards(ctx).Get(ctx, id)\n\tif err != nil {\n\t\tError(w, http.StatusNotFound, fmt.Sprintf(\"ID %d not found\", id), s.Logger)\n\t\treturn\n\t}\n\n\tvar req chronograf.Dashboard\n\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\tinvalidJSON(w, s.Logger)\n\t\treturn\n\t}\n\treq.ID = id\n\n\tif req.Name != \"\" {\n\t\torig.Name = req.Name\n\t} else if len(req.Cells) > 0 {\n\t\tdefaultOrg, err := s.Store.Organizations(ctx).DefaultOrganization(ctx)\n\t\tif err != nil {\n\t\t\tunknownErrorWithMessage(w, err, s.Logger)\n\t\t\treturn\n\t\t}\n\t\tif err := ValidDashboardRequest(&req, defaultOrg.ID); err != nil {\n\t\t\tinvalidData(w, err, s.Logger)\n\t\t\treturn\n\t\t}\n\t\torig.Cells = req.Cells\n\t} else {\n\t\tinvalidData(w, fmt.Errorf(\"Update must include either name or cells\"), s.Logger)\n\t\treturn\n\t}\n\n\tif err := s.Store.Dashboards(ctx).Update(ctx, orig); err != nil {\n\t\tmsg := fmt.Sprintf(\"Error updating dashboard ID %d: %v\", id, err)\n\t\tError(w, http.StatusInternalServerError, msg, s.Logger)\n\t\treturn\n\t}\n\n\tres := newDashboardResponse(orig)\n\tencodeJSON(w, http.StatusOK, res, s.Logger)\n}\n\n\/\/ ValidDashboardRequest verifies that the dashboard cells have a query\nfunc ValidDashboardRequest(d *chronograf.Dashboard, defaultOrgID string) error {\n\tif d.Organization == \"\" {\n\t\td.Organization = defaultOrgID\n\t}\n\tfor i, c := range d.Cells {\n\t\tif err := ValidDashboardCellRequest(&c); err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.Cells[i] = c\n\t}\n\tfor _, t := range d.Templates {\n\t\tif err := ValidTemplateRequest(&t); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t(*d) = DashboardDefaults(*d)\n\treturn nil\n}\n\n\/\/ DashboardDefaults updates the dashboard with the default values\n\/\/ if none are specified\nfunc DashboardDefaults(d chronograf.Dashboard) (newDash chronograf.Dashboard) {\n\tnewDash.ID = d.ID\n\tnewDash.Templates = d.Templates\n\tnewDash.Name = d.Name\n\tnewDash.Organization = d.Organization\n\tnewDash.Cells = make([]chronograf.DashboardCell, len(d.Cells))\n\n\tfor i, c := range d.Cells {\n\t\tCorrectWidthHeight(&c)\n\t\tnewDash.Cells[i] = c\n\t}\n\treturn\n}\n\n\/\/ AddQueryConfigs updates all the celsl in the dashboard to have query config\n\/\/ objects corresponding to their influxql queries.\nfunc AddQueryConfigs(d chronograf.Dashboard) (newDash chronograf.Dashboard) {\n\tnewDash.ID = d.ID\n\tnewDash.Templates = d.Templates\n\tnewDash.Name = d.Name\n\tnewDash.Cells = make([]chronograf.DashboardCell, len(d.Cells))\n\n\tfor i, c := range d.Cells {\n\t\tAddQueryConfig(&c)\n\t\tnewDash.Cells[i] = c\n\t}\n\treturn\n}\n<commit_msg>WIP template variable values not saved to server for non csv types<commit_after>package server\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/influxdata\/chronograf\"\n)\n\ntype dashboardLinks struct {\n\tSelf string `json:\"self\"` \/\/ Self link mapping to this resource\n\tCells string `json:\"cells\"` \/\/ Cells link to the cells endpoint\n\tTemplates string `json:\"templates\"` \/\/ Templates link to the templates endpoint\n}\n\ntype dashboardResponse struct {\n\tID chronograf.DashboardID `json:\"id\"`\n\tCells []dashboardCellResponse `json:\"cells\"`\n\tTemplates []templateResponse `json:\"templates\"`\n\tName string `json:\"name\"`\n\tOrganization string `json:\"organization\"`\n\tLinks dashboardLinks `json:\"links\"`\n}\n\ntype getDashboardsResponse struct {\n\tDashboards []*dashboardResponse `json:\"dashboards\"`\n}\n\nfunc newDashboardResponse(d chronograf.Dashboard) *dashboardResponse {\n\tbase := \"\/chronograf\/v1\/dashboards\"\n\tdd := AddQueryConfigs(DashboardDefaults(d))\n\tcells := newCellResponses(dd.ID, dd.Cells)\n\ttemplates := newTemplateResponses(dd.ID, dd.Templates)\n\n\treturn &dashboardResponse{\n\t\tID: dd.ID,\n\t\tName: dd.Name,\n\t\tCells: cells,\n\t\tTemplates: templates,\n\t\tOrganization: d.Organization,\n\t\tLinks: dashboardLinks{\n\t\t\tSelf: fmt.Sprintf(\"%s\/%d\", base, dd.ID),\n\t\t\tCells: fmt.Sprintf(\"%s\/%d\/cells\", base, dd.ID),\n\t\t\tTemplates: fmt.Sprintf(\"%s\/%d\/templates\", base, dd.ID),\n\t\t},\n\t}\n}\n\n\/\/ Dashboards returns all dashboards within the store\nfunc (s *Service) Dashboards(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tdashboards, err := s.Store.Dashboards(ctx).All(ctx)\n\tif err != nil {\n\t\tError(w, http.StatusInternalServerError, \"Error loading dashboards\", s.Logger)\n\t\treturn\n\t}\n\n\tres := getDashboardsResponse{\n\t\tDashboards: []*dashboardResponse{},\n\t}\n\n\tfor _, dashboard := range dashboards {\n\t\tres.Dashboards = append(res.Dashboards, newDashboardResponse(dashboard))\n\t}\n\tencodeJSON(w, http.StatusOK, res, s.Logger)\n}\n\n\/\/ DashboardID returns a single specified dashboard\nfunc (s *Service) DashboardID(w http.ResponseWriter, r *http.Request) {\n\tid, err := paramID(\"id\", r)\n\tif err != nil {\n\t\tError(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)\n\t\treturn\n\t}\n\n\tctx := r.Context()\n\te, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id))\n\tif err != nil {\n\t\tnotFound(w, id, s.Logger)\n\t\treturn\n\t}\n\n\tres := newDashboardResponse(e)\n\tencodeJSON(w, http.StatusOK, res, s.Logger)\n}\n\n\/\/ NewDashboard creates and returns a new dashboard object\nfunc (s *Service) NewDashboard(w http.ResponseWriter, r *http.Request) {\n\tvar dashboard chronograf.Dashboard\n\tvar err error\n\tif err := json.NewDecoder(r.Body).Decode(&dashboard); err != nil {\n\t\tinvalidJSON(w, s.Logger)\n\t\treturn\n\t}\n\n\tctx := r.Context()\n\tdefaultOrg, err := s.Store.Organizations(ctx).DefaultOrganization(ctx)\n\tif err != nil {\n\t\tunknownErrorWithMessage(w, err, s.Logger)\n\t\treturn\n\t}\n\n\tif err := ValidDashboardRequest(&dashboard, defaultOrg.ID); err != nil {\n\t\tinvalidData(w, err, s.Logger)\n\t\treturn\n\t}\n\n\tif dashboard, err = s.Store.Dashboards(ctx).Add(r.Context(), dashboard); err != nil {\n\t\tmsg := fmt.Errorf(\"Error storing dashboard %v: %v\", dashboard, err)\n\t\tunknownErrorWithMessage(w, msg, s.Logger)\n\t\treturn\n\t}\n\n\tres := newDashboardResponse(dashboard)\n\tlocation(w, res.Links.Self)\n\tencodeJSON(w, http.StatusCreated, res, s.Logger)\n}\n\n\/\/ RemoveDashboard deletes a dashboard\nfunc (s *Service) RemoveDashboard(w http.ResponseWriter, r *http.Request) {\n\tid, err := paramID(\"id\", r)\n\tif err != nil {\n\t\tError(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)\n\t\treturn\n\t}\n\n\tctx := r.Context()\n\te, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id))\n\tif err != nil {\n\t\tnotFound(w, id, s.Logger)\n\t\treturn\n\t}\n\n\tif err := s.Store.Dashboards(ctx).Delete(ctx, e); err != nil {\n\t\tunknownErrorWithMessage(w, err, s.Logger)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusNoContent)\n}\n\n\/\/ ReplaceDashboard completely replaces a dashboard\nfunc (s *Service) ReplaceDashboard(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tidParam, err := paramID(\"id\", r)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Could not parse dashboard ID: %s\", err)\n\t\tError(w, http.StatusInternalServerError, msg, s.Logger)\n\t}\n\tid := chronograf.DashboardID(idParam)\n\n\t_, err = s.Store.Dashboards(ctx).Get(ctx, id)\n\tif err != nil {\n\t\tError(w, http.StatusNotFound, fmt.Sprintf(\"ID %d not found\", id), s.Logger)\n\t\treturn\n\t}\n\n\tvar req chronograf.Dashboard\n\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\tinvalidJSON(w, s.Logger)\n\t\treturn\n\t}\n\treq.ID = id\n\tfor i := 0; i < len(req.Templates); i++ {\n\t\ttv := req.Templates[i]\n\t\tif tv.Type != \"csv\" {\n\t\t\ttv.Values = []chronograf.TemplateValue{}\n\t\t\tfmt.Println(tv.Values)\n\t\t}\n\t\treq.Templates[i] = tv\n\t}\n\n\tdefaultOrg, err := s.Store.Organizations(ctx).DefaultOrganization(ctx)\n\tif err != nil {\n\t\tunknownErrorWithMessage(w, err, s.Logger)\n\t\treturn\n\t}\n\n\tif err := ValidDashboardRequest(&req, defaultOrg.ID); err != nil {\n\t\tinvalidData(w, err, s.Logger)\n\t\treturn\n\t}\n\n\tif err := s.Store.Dashboards(ctx).Update(ctx, req); err != nil {\n\t\tmsg := fmt.Sprintf(\"Error updating dashboard ID %d: %v\", id, err)\n\t\tError(w, http.StatusInternalServerError, msg, s.Logger)\n\t\treturn\n\t}\n\n\tres := newDashboardResponse(req)\n\tencodeJSON(w, http.StatusOK, res, s.Logger)\n}\n\n\/\/ UpdateDashboard completely updates either the dashboard name or the cells\nfunc (s *Service) UpdateDashboard(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tidParam, err := paramID(\"id\", r)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Could not parse dashboard ID: %s\", err)\n\t\tError(w, http.StatusInternalServerError, msg, s.Logger)\n\t\treturn\n\t}\n\tid := chronograf.DashboardID(idParam)\n\n\torig, err := s.Store.Dashboards(ctx).Get(ctx, id)\n\tif err != nil {\n\t\tError(w, http.StatusNotFound, fmt.Sprintf(\"ID %d not found\", id), s.Logger)\n\t\treturn\n\t}\n\n\tvar req chronograf.Dashboard\n\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\tinvalidJSON(w, s.Logger)\n\t\treturn\n\t}\n\treq.ID = id\n\n\tif req.Name != \"\" {\n\t\torig.Name = req.Name\n\t} else if len(req.Cells) > 0 {\n\t\tdefaultOrg, err := s.Store.Organizations(ctx).DefaultOrganization(ctx)\n\t\tif err != nil {\n\t\t\tunknownErrorWithMessage(w, err, s.Logger)\n\t\t\treturn\n\t\t}\n\t\tif err := ValidDashboardRequest(&req, defaultOrg.ID); err != nil {\n\t\t\tinvalidData(w, err, s.Logger)\n\t\t\treturn\n\t\t}\n\t\torig.Cells = req.Cells\n\t} else {\n\t\tinvalidData(w, fmt.Errorf(\"Update must include either name or cells\"), s.Logger)\n\t\treturn\n\t}\n\n\tif err := s.Store.Dashboards(ctx).Update(ctx, orig); err != nil {\n\t\tmsg := fmt.Sprintf(\"Error updating dashboard ID %d: %v\", id, err)\n\t\tError(w, http.StatusInternalServerError, msg, s.Logger)\n\t\treturn\n\t}\n\n\tres := newDashboardResponse(orig)\n\tencodeJSON(w, http.StatusOK, res, s.Logger)\n}\n\n\/\/ ValidDashboardRequest verifies that the dashboard cells have a query\nfunc ValidDashboardRequest(d *chronograf.Dashboard, defaultOrgID string) error {\n\tif d.Organization == \"\" {\n\t\td.Organization = defaultOrgID\n\t}\n\tfor i, c := range d.Cells {\n\t\tif err := ValidDashboardCellRequest(&c); err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.Cells[i] = c\n\t}\n\tfor _, t := range d.Templates {\n\t\tif err := ValidTemplateRequest(&t); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t(*d) = DashboardDefaults(*d)\n\treturn nil\n}\n\n\/\/ DashboardDefaults updates the dashboard with the default values\n\/\/ if none are specified\nfunc DashboardDefaults(d chronograf.Dashboard) (newDash chronograf.Dashboard) {\n\tnewDash.ID = d.ID\n\tnewDash.Templates = d.Templates\n\tnewDash.Name = d.Name\n\tnewDash.Organization = d.Organization\n\tnewDash.Cells = make([]chronograf.DashboardCell, len(d.Cells))\n\n\tfor i, c := range d.Cells {\n\t\tCorrectWidthHeight(&c)\n\t\tnewDash.Cells[i] = c\n\t}\n\treturn\n}\n\n\/\/ AddQueryConfigs updates all the celsl in the dashboard to have query config\n\/\/ objects corresponding to their influxql queries.\nfunc AddQueryConfigs(d chronograf.Dashboard) (newDash chronograf.Dashboard) {\n\tnewDash.ID = d.ID\n\tnewDash.Templates = d.Templates\n\tnewDash.Name = d.Name\n\tnewDash.Cells = make([]chronograf.DashboardCell, len(d.Cells))\n\n\tfor i, c := range d.Cells {\n\t\tAddQueryConfig(&c)\n\t\tnewDash.Cells[i] = c\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package helm\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/rusenask\/keel\/types\"\n\t\"github.com\/rusenask\/keel\/util\/image\"\n\t\"github.com\/rusenask\/keel\/util\/version\"\n\n\thapi_chart \"k8s.io\/helm\/pkg\/proto\/hapi\/chart\"\n\t\/\/ rls \"k8s.io\/helm\/pkg\/proto\/hapi\/services\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/ghodss\/yaml\"\n\t\"k8s.io\/helm\/pkg\/chartutil\"\n\t\"k8s.io\/helm\/pkg\/helm\"\n)\n\n\/\/ ProviderName - helm provider name\nconst ProviderName = \"helm\"\n\n\/\/ keel paths\nconst (\n\tpolicyPath = \"keel.policy\"\n\timagesPath = \"keel.images\"\n)\n\n\/\/ Provider - helm provider, responsible for managing release updates\ntype Provider struct {\n\timplementer Implementer\n\n\tevents chan *types.Event\n\tstop chan struct{}\n}\n\nfunc NewProvider(implementer Implementer) *Provider {\n\treturn &Provider{\n\t\timplementer: implementer,\n\t\tevents: make(chan *types.Event, 100),\n\t\tstop: make(chan struct{}),\n\t}\n}\n\nfunc (p *Provider) GetName() string {\n\treturn ProviderName\n}\n\n\/\/ Submit - submit event to provider\nfunc (p *Provider) Submit(event types.Event) error {\n\tp.events <- &event\n\treturn nil\n}\n\n\/\/ Start - starts kubernetes provider, waits for events\nfunc (p *Provider) Start() error {\n\treturn p.startInternal()\n}\n\n\/\/ Stop - stops kubernetes provider\nfunc (p *Provider) Stop() {\n\tclose(p.stop)\n}\n\nfunc (p *Provider) startInternal() error {\n\tfor {\n\t\tselect {\n\t\tcase event := <-p.events:\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"repository\": event.Repository.Name,\n\t\t\t\t\"tag\": event.Repository.Tag,\n\t\t\t\t\"registry\": event.Repository.Host,\n\t\t\t}).Info(\"provider.helm: processing event\")\n\t\t\terr := p.processEvent(event)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"error\": err,\n\t\t\t\t\t\"image\": event.Repository.Name,\n\t\t\t\t\t\"tag\": event.Repository.Tag,\n\t\t\t\t}).Error(\"provider.helm: failed to process event\")\n\t\t\t}\n\t\tcase <-p.stop:\n\t\t\tlog.Info(\"provider.helm: got shutdown signal, stopping...\")\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (p *Provider) processEvent(event *types.Event) (err error) {\n\n\treturn nil\n}\n\n\/\/ UpdatePlan - release update plan\ntype UpdatePlan struct {\n\tNamespace string\n\tName string\n\n\t\/\/ chart\n\tChart *hapi_chart.Chart\n\n\t\/\/ values to update path=value\n\tValues map[string]string\n}\n\nfunc (p *Provider) createUpdatePlans(event *types.Event) ([]*UpdatePlan, error) {\n\tvar plans []*UpdatePlan\n\n\treleaseList, err := p.implementer.ListReleases()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, release := range releaseList.Releases {\n\n\t\tnewVersion, err := version.GetVersion(event.Repository.Tag)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t}).Error(\"provider.helm: failed to parse version\")\n\t\t\tcontinue\n\t\t}\n\n\t\tplan, update, err := p.checkVersionedRelease(newVersion, release.Namespace, release.Name, release.Chart, release.Config)\n\n\t}\n\n\treturn plans, nil\n}\n\n\/\/ resp, err := u.client.UpdateRelease(\n\/\/ \t\tu.release,\n\/\/ \t\tchartPath,\n\/\/ \t\thelm.UpdateValueOverrides(rawVals),\n\/\/ \t\thelm.UpgradeDryRun(u.dryRun),\n\/\/ \t\thelm.UpgradeRecreate(u.recreate),\n\/\/ \t\thelm.UpgradeForce(u.force),\n\/\/ \t\thelm.UpgradeDisableHooks(u.disableHooks),\n\/\/ \t\thelm.UpgradeTimeout(u.timeout),\n\/\/ \t\thelm.ResetValues(u.resetValues),\n\/\/ \t\thelm.ReuseValues(u.reuseValues),\n\/\/ \t\thelm.UpgradeWait(u.wait))\n\/\/ \tif err != nil {\n\/\/ \t\treturn fmt.Errorf(\"UPGRADE FAILED: %v\", prettyError(err))\n\/\/ \t}\n\nfunc updateHelmRelease(implementer Implementer, releaseName string, chart *hapi_chart.Chart, rawVals string) error {\n\n\tresp, err := implementer.UpdateReleaseFromChart(releaseName, chart,\n\t\thelm.UpdateValueOverrides([]byte(rawVals)),\n\t\thelm.UpgradeDryRun(false),\n\t\thelm.UpgradeRecreate(false),\n\t\thelm.UpgradeForce(true),\n\t\thelm.UpgradeDisableHooks(false),\n\t\thelm.UpgradeTimeout(30),\n\t\thelm.ResetValues(false),\n\t\thelm.ReuseValues(true),\n\t\thelm.UpgradeWait(true))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"version\": resp.Release.Version,\n\t\t\"release\": releaseName,\n\t}).Info(\"provider.helm: release updated\")\n\treturn nil\n}\n\n\/\/ func parseImageOld(chart *hapi_chart.Chart, config *hapi_chart.Config) (*image.Reference, error) {\n\/\/ \tvals, err := chartutil.ReadValues([]byte(config.Raw))\n\/\/ \tif err != nil {\n\/\/ \t\treturn nil, err\n\/\/ \t}\n\n\/\/ \tlog.Info(config.Raw)\n\n\/\/ \timageName, err := vals.PathValue(\"image.repository\")\n\/\/ \tif err != nil {\n\/\/ \t\treturn nil, err\n\/\/ \t}\n\n\/\/ \t\/\/ FIXME: need to dynamically get repositories\n\/\/ \timageTag, err := vals.PathValue(\"image.tag\")\n\/\/ \tif err != nil {\n\/\/ \t\treturn nil, fmt.Errorf(\"failed to get image tag: %s\", err)\n\/\/ \t}\n\n\/\/ \timageNameStr, ok := imageName.(string)\n\/\/ \tif !ok {\n\/\/ \t\treturn nil, fmt.Errorf(\"failed to convert image name ref to string\")\n\/\/ \t}\n\n\/\/ \timageTagStr, ok := imageTag.(string)\n\/\/ \tif !ok {\n\/\/ \t\treturn nil, fmt.Errorf(\"failed to convert image tag ref to string\")\n\/\/ \t}\n\n\/\/ \tif imageTagStr != \"\" {\n\/\/ \t\treturn image.Parse(imageNameStr + \":\" + imageTagStr)\n\/\/ \t}\n\n\/\/ \treturn image.Parse(imageNameStr)\n\/\/ }\n\nfunc parseImage(vals chartutil.Values, details *ImageDetails) (*image.Reference, error) {\n\tif details.Repository == \"\" {\n\t\treturn nil, fmt.Errorf(\"repository name path cannot be empty\")\n\t}\n\n\timageName, err := getValueAsString(vals, details.Repository)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ getting image tag\n\timageTag, err := getValueAsString(vals, details.Tag)\n\tif err != nil {\n\t\t\/\/ failed to find tag, returning anyway\n\t\treturn image.Parse(imageName)\n\t}\n\n\treturn image.Parse(imageName + \":\" + imageTag)\n}\n\nfunc getValueAsString(vals chartutil.Values, path string) (string, error) {\n\tvalinterface, err := vals.PathValue(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvalString, ok := valinterface.(string)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"failed to convert value to string\")\n\t}\n\n\treturn valString, nil\n}\n\nfunc values(chart *hapi_chart.Chart, config *hapi_chart.Config) (chartutil.Values, error) {\n\treturn chartutil.CoalesceValues(chart, config)\n}\n\n\/\/ keel:\n\/\/ # keel policy (all\/major\/minor\/patch\/force)\n\/\/ policy: all\n\/\/ # trigger type, defaults to events such as pubsub, webhooks\n\/\/ trigger: poll\n\/\/ # images to track and update\n\/\/ images:\n\/\/ - repository: image.repository\n\/\/ tag: image.tag\n\n\/\/ Root - root element of the values yaml\ntype Root struct {\n\tKeel KeelChartConfig `json:\"keel\"`\n}\n\n\/\/ KeelChartConfig - keel related configuration taken from values.yaml\ntype KeelChartConfig struct {\n\tPolicy types.PolicyType `json:\"policy\"`\n\tTrigger string `json:\"trigger\"`\n\tImages []ImageDetails `json:\"images\"`\n}\n\n\/\/ ImageDetails - image details\ntype ImageDetails struct {\n\tRepository string `json:\"repository\"`\n\tTag string `json:\"tag\"`\n}\n\nfunc getKeelConfig(vals chartutil.Values) (*KeelChartConfig, error) {\n\tyamlFull, err := vals.YAML()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get vals config, error: %s\", err)\n\t}\n\n\tvar r Root\n\terr = yaml.Unmarshal([]byte(yamlFull), &r)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse keel config: %s\", err)\n\t}\n\treturn &r.Keel, nil\n}\n<commit_msg>helm versioned updates<commit_after>package helm\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/rusenask\/keel\/types\"\n\t\"github.com\/rusenask\/keel\/util\/image\"\n\t\"github.com\/rusenask\/keel\/util\/version\"\n\n\thapi_chart \"k8s.io\/helm\/pkg\/proto\/hapi\/chart\"\n\t\/\/ rls \"k8s.io\/helm\/pkg\/proto\/hapi\/services\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/ghodss\/yaml\"\n\t\"k8s.io\/helm\/pkg\/chartutil\"\n\t\"k8s.io\/helm\/pkg\/helm\"\n\t\"k8s.io\/helm\/pkg\/strvals\"\n)\n\n\/\/ ProviderName - helm provider name\nconst ProviderName = \"helm\"\n\n\/\/ keel paths\nconst (\n\tpolicyPath = \"keel.policy\"\n\timagesPath = \"keel.images\"\n)\n\n\/\/ UpdatePlan - release update plan\ntype UpdatePlan struct {\n\tNamespace string\n\tName string\n\n\t\/\/ chart\n\tChart *hapi_chart.Chart\n\n\t\/\/ values to update path=value\n\tValues map[string]string\n}\n\n\/\/ keel:\n\/\/ # keel policy (all\/major\/minor\/patch\/force)\n\/\/ policy: all\n\/\/ # trigger type, defaults to events such as pubsub, webhooks\n\/\/ trigger: poll\n\/\/ # images to track and update\n\/\/ images:\n\/\/ - repository: image.repository\n\/\/ tag: image.tag\n\n\/\/ Root - root element of the values yaml\ntype Root struct {\n\tKeel KeelChartConfig `json:\"keel\"`\n}\n\n\/\/ KeelChartConfig - keel related configuration taken from values.yaml\ntype KeelChartConfig struct {\n\tPolicy types.PolicyType `json:\"policy\"`\n\tTrigger string `json:\"trigger\"`\n\tImages []ImageDetails `json:\"images\"`\n}\n\n\/\/ ImageDetails - image details\ntype ImageDetails struct {\n\tRepositoryPath string `json:\"repository\"`\n\tTagPath string `json:\"tag\"`\n}\n\n\/\/ Provider - helm provider, responsible for managing release updates\ntype Provider struct {\n\timplementer Implementer\n\n\tevents chan *types.Event\n\tstop chan struct{}\n}\n\nfunc NewProvider(implementer Implementer) *Provider {\n\treturn &Provider{\n\t\timplementer: implementer,\n\t\tevents: make(chan *types.Event, 100),\n\t\tstop: make(chan struct{}),\n\t}\n}\n\nfunc (p *Provider) GetName() string {\n\treturn ProviderName\n}\n\n\/\/ Submit - submit event to provider\nfunc (p *Provider) Submit(event types.Event) error {\n\tp.events <- &event\n\treturn nil\n}\n\n\/\/ Start - starts kubernetes provider, waits for events\nfunc (p *Provider) Start() error {\n\treturn p.startInternal()\n}\n\n\/\/ Stop - stops kubernetes provider\nfunc (p *Provider) Stop() {\n\tclose(p.stop)\n}\n\nfunc (p *Provider) Releases() ([]*types.HelmRelease, error) {\n\treleases := []*types.HelmRelease{}\n\n\treleaseList, err := p.implementer.ListReleases()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, release := range releaseList.Releases {\n\t\t\n\t}\n\n\treturn releases, nil\n}\n\nfunc (p *Provider) startInternal() error {\n\tfor {\n\t\tselect {\n\t\tcase event := <-p.events:\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"repository\": event.Repository.Name,\n\t\t\t\t\"tag\": event.Repository.Tag,\n\t\t\t\t\"registry\": event.Repository.Host,\n\t\t\t}).Info(\"provider.helm: processing event\")\n\t\t\terr := p.processEvent(event)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"error\": err,\n\t\t\t\t\t\"image\": event.Repository.Name,\n\t\t\t\t\t\"tag\": event.Repository.Tag,\n\t\t\t\t}).Error(\"provider.helm: failed to process event\")\n\t\t\t}\n\t\tcase <-p.stop:\n\t\t\tlog.Info(\"provider.helm: got shutdown signal, stopping...\")\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (p *Provider) processEvent(event *types.Event) (err error) {\n\tplans, err := p.createUpdatePlans(event)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn p.applyPlans(plans)\n}\n\nfunc (p *Provider) createUpdatePlans(event *types.Event) ([]*UpdatePlan, error) {\n\tvar plans []*UpdatePlan\n\n\treleaseList, err := p.implementer.ListReleases()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, release := range releaseList.Releases {\n\n\t\tnewVersion, err := version.GetVersion(event.Repository.Tag)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t}).Error(\"provider.helm: failed to parse version\")\n\t\t\tcontinue\n\t\t}\n\n\t\tplan, update, err := checkVersionedRelease(newVersion, &event.Repository, release.Namespace, release.Name, release.Chart, release.Config)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"name\": release.Name,\n\t\t\t\t\"namespace\": release.Namespace,\n\t\t\t}).Error(\"provider.helm: failed to process versioned release\")\n\t\t\tcontinue\n\t\t}\n\t\tif update {\n\t\t\tplans = append(plans, plan)\n\t\t}\n\t}\n\n\treturn plans, nil\n}\n\nfunc (p *Provider) applyPlans(plans []*UpdatePlan) error {\n\tfor _, plan := range plans {\n\t\terr := updateHelmRelease(p.implementer, plan.Name, plan.Chart, plan.Values)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"name\": plan.Name,\n\t\t\t\t\"namespace\": plan.Namespace,\n\t\t\t}).Error(\"provider.helm: failed to apply plan\")\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ resp, err := u.client.UpdateRelease(\n\/\/ \t\tu.release,\n\/\/ \t\tchartPath,\n\/\/ \t\thelm.UpdateValueOverrides(rawVals),\n\/\/ \t\thelm.UpgradeDryRun(u.dryRun),\n\/\/ \t\thelm.UpgradeRecreate(u.recreate),\n\/\/ \t\thelm.UpgradeForce(u.force),\n\/\/ \t\thelm.UpgradeDisableHooks(u.disableHooks),\n\/\/ \t\thelm.UpgradeTimeout(u.timeout),\n\/\/ \t\thelm.ResetValues(u.resetValues),\n\/\/ \t\thelm.ReuseValues(u.reuseValues),\n\/\/ \t\thelm.UpgradeWait(u.wait))\n\/\/ \tif err != nil {\n\/\/ \t\treturn fmt.Errorf(\"UPGRADE FAILED: %v\", prettyError(err))\n\/\/ \t}\n\nfunc updateHelmRelease(implementer Implementer, releaseName string, chart *hapi_chart.Chart, overrideValues map[string]string) error {\n\n\toverrideBts, err := convertToYaml(mapToSlice(overrideValues))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := implementer.UpdateReleaseFromChart(releaseName, chart,\n\t\thelm.UpdateValueOverrides(overrideBts),\n\t\thelm.UpgradeDryRun(false),\n\t\thelm.UpgradeRecreate(false),\n\t\thelm.UpgradeForce(true),\n\t\thelm.UpgradeDisableHooks(false),\n\t\thelm.UpgradeTimeout(30),\n\t\thelm.ResetValues(false),\n\t\thelm.ReuseValues(true),\n\t\thelm.UpgradeWait(true))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"version\": resp.Release.Version,\n\t\t\"release\": releaseName,\n\t}).Info(\"provider.helm: release updated\")\n\treturn nil\n}\n\nfunc mapToSlice(values map[string]string) []string {\n\tconverted := []string{}\n\tfor k, v := range values {\n\t\tconcat := k + \"=\" + v\n\t\tconverted = append(converted, concat)\n\t}\n\treturn converted\n}\n\n\/\/ parse\nfunc convertToYaml(values []string) ([]byte, error) {\n\tbase := map[string]interface{}{}\n\tfor _, value := range values {\n\t\tif err := strvals.ParseInto(value, base); err != nil {\n\t\t\treturn []byte{}, fmt.Errorf(\"failed parsing --set data: %s\", err)\n\t\t}\n\t}\n\n\treturn yaml.Marshal(base)\n}\n\nfunc getValueAsString(vals chartutil.Values, path string) (string, error) {\n\tvalinterface, err := vals.PathValue(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvalString, ok := valinterface.(string)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"failed to convert value to string\")\n\t}\n\n\treturn valString, nil\n}\n\nfunc values(chart *hapi_chart.Chart, config *hapi_chart.Config) (chartutil.Values, error) {\n\treturn chartutil.CoalesceValues(chart, config)\n}\n\nfunc getKeelConfig(vals chartutil.Values) (*KeelChartConfig, error) {\n\tyamlFull, err := vals.YAML()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get vals config, error: %s\", err)\n\t}\n\n\tvar r Root\n\terr = yaml.Unmarshal([]byte(yamlFull), &r)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse keel config: %s\", err)\n\t}\n\treturn &r.Keel, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package listener\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"strconv\"\n)\n\n\/\/ Listener is the interface for things that listen on file descriptors\n\/\/ specified by Start::Server \/ server_starter \ntype Listener interface {\n\tFd() uintptr\n\tListen() (net.Listener, error)\n\tString() string\n}\n\n\/\/ ListenerList holds a list of Listeners. This is here just for convenience\n\/\/ so that you can do\n\/\/\tlist.String()\n\/\/ to get a string compatible with SERVER_STARTER_PORT\ntype ListenerList []Listener\n\nfunc (ll ListenerList) String() string {\n\tlist := make([]string, len(ll))\n\tfor i, l := range ll {\n\t\tlist[i] = l.String()\n\t}\n\treturn strings.Join(list, \";\")\n}\n\n\/\/ TCPListener is a listener for ... tcp duh.\ntype TCPListener struct {\n\tAddr string\n\tPort int\n\tfd uintptr\n}\n\n\/\/ UnixListener is a listener for unix sockets.\ntype UnixListener struct {\n\tPath string\n\tfd uintptr\n}\n\nfunc (l TCPListener) String() string {\n\tif l.Addr == \"0.0.0.0\" {\n\t\treturn fmt.Sprintf(\"%d=%d\", l.Port, l.fd)\n\t}\n\treturn fmt.Sprintf(\"%s:%d=%d\", l.Addr, l.Port, l.fd)\n}\n\n\/\/ Fd returns the underlying file descriptor\nfunc (l TCPListener) Fd() uintptr {\n\treturn l.fd\n}\n\n\/\/ Listen creates a new Listener\nfunc (l TCPListener) Listen() (net.Listener, error) {\n\treturn net.FileListener(os.NewFile(l.Fd(), fmt.Sprintf(\"%s:%d\", l.Addr, l.Port)))\n}\n\nfunc (l UnixListener) String() string {\n\treturn fmt.Sprintf(\"%s=%d\", l.Path, l.fd)\n}\n\n\/\/ Fd returns the underlying file descriptor\nfunc (l UnixListener) Fd() uintptr {\n\treturn l.fd\n}\n\n\/\/ Listen creates a new Listener\nfunc (l UnixListener) Listen() (net.Listener, error) {\n\treturn net.FileListener(os.NewFile(l.Fd(), l.Path))\n}\n\n\/\/ Being lazy here...\nvar reLooksLikeHostPort = regexp.MustCompile(`^(\\d+):(\\d+)$`)\nvar reLooksLikePort = regexp.MustCompile(`^\\d+$`)\n\nfunc parseListenTargets(str string) ([]Listener, error) {\n\trawspec := strings.Split(str, \";\")\n\tret := make([]Listener, len(rawspec))\n\n\tfor i, pairString := range rawspec {\n\t\tpair := strings.Split(pairString, \"=\")\n\t\thostPort := strings.TrimSpace(pair[0])\n\t\tfdString := strings.TrimSpace(pair[1])\n\t\tfd, err := strconv.ParseUint(fdString, 10, 0)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse '%s' as listen target: %s\", pairString, err)\n\t\t}\n\n\t\tif matches := reLooksLikeHostPort.FindAllString(hostPort, -1); matches != nil {\n\t\t\tport, err := strconv.ParseInt(matches[1], 10, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tret[i] = TCPListener{\n\t\t\t\tAddr: matches[0],\n\t\t\t\tPort: int(port),\n\t\t\t\tfd: uintptr(fd),\n\t\t\t}\n\t\t} else if match := reLooksLikePort.FindString(hostPort); match != \"\" {\n\t\t\tport, err := strconv.ParseInt(match, 10, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tret[i] = TCPListener{\n\t\t\t\tAddr: \"0.0.0.0\",\n\t\t\t\tPort: int(port),\n\t\t\t\tfd: uintptr(fd),\n\t\t\t}\n\t\t} else {\n\t\t\tret[i] = UnixListener{\n\t\t\t\tPath: hostPort,\n\t\t\t\tfd: uintptr(fd),\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ret, nil\n}\n\n\/\/ Ports parses environment variable SERVER_STARTER_PORT\nfunc Ports() ([]Listener, error) {\n\treturn parseListenTargets(os.Getenv(\"SERVER_STARTER_PORT\"))\n}\n\n\/\/ ListenAll parses environment variable SERVER_STARTER_PORT, and creates\n\/\/ net.Listener objects\nfunc ListenAll() ([]net.Listener, error) {\n\ttargets, err := parseListenTargets(os.Getenv(\"SERVER_STARTER_PORT\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tret := make([]net.Listener, len(targets))\n\tfor i, target := range targets {\n\t\tret[i], err = target.Listen()\n\t\tif err != nil {\n\t\t\t\/\/ Close everything up to this listener\n\t\t\tfor x := 0; x < i; x++ {\n\t\t\t\tret[x].Close()\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn ret, nil\n}\n<commit_msg>Very slight refactor to remove some hardcoding<commit_after>package listener\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"strconv\"\n)\n\nconst ServerStarterEnvVarName = \"SERVER_STARTER_PORT\"\n\n\/\/ Listener is the interface for things that listen on file descriptors\n\/\/ specified by Start::Server \/ server_starter \ntype Listener interface {\n\tFd() uintptr\n\tListen() (net.Listener, error)\n\tString() string\n}\n\n\/\/ ListenerList holds a list of Listeners. This is here just for convenience\n\/\/ so that you can do\n\/\/\tlist.String()\n\/\/ to get a string compatible with SERVER_STARTER_PORT\ntype ListenerList []Listener\n\nfunc (ll ListenerList) String() string {\n\tlist := make([]string, len(ll))\n\tfor i, l := range ll {\n\t\tlist[i] = l.String()\n\t}\n\treturn strings.Join(list, \";\")\n}\n\n\/\/ TCPListener is a listener for ... tcp duh.\ntype TCPListener struct {\n\tAddr string\n\tPort int\n\tfd uintptr\n}\n\n\/\/ UnixListener is a listener for unix sockets.\ntype UnixListener struct {\n\tPath string\n\tfd uintptr\n}\n\nfunc (l TCPListener) String() string {\n\tif l.Addr == \"0.0.0.0\" {\n\t\treturn fmt.Sprintf(\"%d=%d\", l.Port, l.fd)\n\t}\n\treturn fmt.Sprintf(\"%s:%d=%d\", l.Addr, l.Port, l.fd)\n}\n\n\/\/ Fd returns the underlying file descriptor\nfunc (l TCPListener) Fd() uintptr {\n\treturn l.fd\n}\n\n\/\/ Listen creates a new Listener\nfunc (l TCPListener) Listen() (net.Listener, error) {\n\treturn net.FileListener(os.NewFile(l.Fd(), fmt.Sprintf(\"%s:%d\", l.Addr, l.Port)))\n}\n\nfunc (l UnixListener) String() string {\n\treturn fmt.Sprintf(\"%s=%d\", l.Path, l.fd)\n}\n\n\/\/ Fd returns the underlying file descriptor\nfunc (l UnixListener) Fd() uintptr {\n\treturn l.fd\n}\n\n\/\/ Listen creates a new Listener\nfunc (l UnixListener) Listen() (net.Listener, error) {\n\treturn net.FileListener(os.NewFile(l.Fd(), l.Path))\n}\n\n\/\/ Being lazy here...\nvar reLooksLikeHostPort = regexp.MustCompile(`^(\\d+):(\\d+)$`)\nvar reLooksLikePort = regexp.MustCompile(`^\\d+$`)\n\nfunc parseListenTargets(str string) ([]Listener, error) {\n\trawspec := strings.Split(str, \";\")\n\tret := make([]Listener, len(rawspec))\n\n\tfor i, pairString := range rawspec {\n\t\tpair := strings.Split(pairString, \"=\")\n\t\thostPort := strings.TrimSpace(pair[0])\n\t\tfdString := strings.TrimSpace(pair[1])\n\t\tfd, err := strconv.ParseUint(fdString, 10, 0)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse '%s' as listen target: %s\", pairString, err)\n\t\t}\n\n\t\tif matches := reLooksLikeHostPort.FindAllString(hostPort, -1); matches != nil {\n\t\t\tport, err := strconv.ParseInt(matches[1], 10, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tret[i] = TCPListener{\n\t\t\t\tAddr: matches[0],\n\t\t\t\tPort: int(port),\n\t\t\t\tfd: uintptr(fd),\n\t\t\t}\n\t\t} else if match := reLooksLikePort.FindString(hostPort); match != \"\" {\n\t\t\tport, err := strconv.ParseInt(match, 10, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tret[i] = TCPListener{\n\t\t\t\tAddr: \"0.0.0.0\",\n\t\t\t\tPort: int(port),\n\t\t\t\tfd: uintptr(fd),\n\t\t\t}\n\t\t} else {\n\t\t\tret[i] = UnixListener{\n\t\t\t\tPath: hostPort,\n\t\t\t\tfd: uintptr(fd),\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ret, nil\n}\n\n\/\/ GetPortsSpecification returns the value of SERVER_STARTER_PORT\n\/\/ environment variable\nfunc GetPortsSpecification() string {\n\treturn os.Getenv(ServerStarterEnvVarName)\n}\n\n\/\/ Ports parses environment variable SERVER_STARTER_PORT\nfunc Ports() ([]Listener, error) {\n\treturn parseListenTargets(GetPortsSpecification())\n}\n\n\/\/ ListenAll parses environment variable SERVER_STARTER_PORT, and creates\n\/\/ net.Listener objects\nfunc ListenAll() ([]net.Listener, error) {\n\ttargets, err := parseListenTargets(GetPortsSpecification())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tret := make([]net.Listener, len(targets))\n\tfor i, target := range targets {\n\t\tret[i], err = target.Listen()\n\t\tif err != nil {\n\t\t\t\/\/ Close everything up to this listener\n\t\t\tfor x := 0; x < i; x++ {\n\t\t\t\tret[x].Close()\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn ret, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package listener\n\nimport (\n\t\"encoding\/json\"\n\t\"sort\"\n\t\"sync\"\n\n\t\"github.com\/jeffpierce\/cassabon\/config\"\n\t\"github.com\/jeffpierce\/cassabon\/pearson\"\n)\n\ntype indexedLine struct {\n\tpeerIndex int\n\tstatLine string\n}\n\n\/\/ PeerList contains an ordered list of Cassabon peers.\ntype PeerList struct {\n\twg *sync.WaitGroup\n\ttarget chan indexedLine \/\/ Channel for forwarding a stat line to a Cassabon peer\n\thostPort string \/\/ Host:port on which the local server is listening\n\tpeersMap map[string]string \/\/ Peer list as stored in the configuration\n\tpeers []string \/\/ Host:port information for all Cassabon peers (inclusive)\n\tconns map[string]*StubbornTCPConn\n\tself sync.RWMutex\n}\n\nfunc (pl *PeerList) Init() {\n\n\tpl.conns = make(map[string]*StubbornTCPConn, 0)\n\n\t\/\/ Create the channel on which stats to forward are received.\n\tpl.target = make(chan indexedLine, 1)\n}\n\n\/\/ IsStarted indicates whether the structure has ever been updated.\nfunc (pl *PeerList) IsStarted() bool {\n\treturn pl.hostPort != \"\"\n}\n\n\/\/ Start records the current peer list and starts the forwarder goroutine.\nfunc (pl *PeerList) Start(wg *sync.WaitGroup, hostPort string, peersMap map[string]string) {\n\n\t\/\/ Synchronize access by other goroutines.\n\tpl.self.Lock()\n\tdefer pl.self.Unlock()\n\n\tpl.wg = wg\n\tpl.hostPort = hostPort\n\tpl.peersMap = peersMap\n\n\t\/\/ Dispose of any peer connections that are obsolete.\n\tpeers := sortedMapToArray(pl.peersMap)\n\tfor _, existing := range pl.peers {\n\t\tfound := false\n\t\tfor _, incoming := range peers {\n\t\t\tif existing == incoming {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found && existing != pl.hostPort {\n\t\t\tpl.conns[existing].Close()\n\t\t\tdelete(pl.conns, existing)\n\t\t} else {\n\t\t\tconfig.G.Log.System.LogInfo(\"Retaining peer connection to %s\", existing)\n\t\t}\n\t}\n\n\t\/\/ Record the current set of peers.\n\tpl.peers = make([]string, len(peers))\n\tfor i, v := range peers {\n\t\tpl.peers[i] = v\n\t\t\/\/ Set up peer connections for newly added peers.\n\t\tif _, found := pl.conns[v]; !found && v != pl.hostPort {\n\t\t\tpl.conns[v] = new(StubbornTCPConn)\n\t\t\tpl.conns[v].Open(v)\n\t\t}\n\t}\n\n\t\/\/ Start the forwarder goroutine.\n\tpl.wg.Add(1)\n\tgo pl.run()\n}\n\n\/\/ IsEqual indicates whether the given new configuration is equal to the current.\nfunc (pl *PeerList) IsEqual(hostPort string, peersMap map[string]string) bool {\n\n\t\/\/ Synchronize access by other goroutines.\n\tpl.self.RLock()\n\tdefer pl.self.RUnlock()\n\n\tif pl.hostPort != hostPort {\n\t\treturn false\n\t}\n\n\tpeers := sortedMapToArray(peersMap)\n\tif len(pl.peers) != len(peers) {\n\t\treturn false\n\t}\n\tfor i, v := range pl.peers {\n\t\tif peers[i] != v {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\n\/\/ OwnerOf determines which host owns a particular stats path.\nfunc (pl *PeerList) OwnerOf(statPath string) (int, bool) {\n\tpeerIndex := int(pearson.Hash8(statPath)) % len(pl.peers)\n\tif pl.hostPort == pl.peers[peerIndex] {\n\t\treturn peerIndex, true\n\t} else {\n\t\treturn peerIndex, false\n\t}\n}\n\n\/\/ PropagatePeerList sends the current peer list to all known peers.\nfunc (pl *PeerList) PropagatePeerList() {\n\n\t\/\/ Build the command to be sent.\n\tvar cmd string\n\tvar buf []byte\n\tbuf, _ = json.Marshal(pl.peersMap)\n\tcmd = \"<<peerlist=\" + string(buf) + \">>\"\n\n\t\/\/ Send the command to each peer.\n\tfor i, v := range pl.peers {\n\t\tif v != pl.hostPort {\n\t\t\tconfig.G.Log.System.LogInfo(\"Sending peer list to %s\", v)\n\t\t\tpl.target <- indexedLine{i, cmd}\n\t\t}\n\t}\n}\n\n\/\/ run listens for stat lines on a channel and sends them to the appropriate Cassabon peer.\nfunc (pl *PeerList) run() {\n\n\tfor {\n\t\tselect {\n\t\tcase <-config.G.OnReload2:\n\t\t\tconfig.G.Log.System.LogDebug(\"PeerList::run received QUIT message\")\n\t\t\tpl.wg.Done()\n\t\t\treturn\n\t\tcase il := <-pl.target:\n\t\t\tif pl.hostPort != pl.peers[il.peerIndex] {\n\t\t\t\tpl.conns[pl.peers[il.peerIndex]].Send(il.statLine)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ sortedMapToArray converts a map to an array of its values, ordered by key.\nfunc sortedMapToArray(m map[string]string) []string {\n\tvar a, t []string\n\tfor k, _ := range m {\n\t\tt = append(t, k)\n\t}\n\tsort.Strings(t)\n\tfor _, v := range t {\n\t\ta = append(a, m[v])\n\t}\n\treturn a\n}\n<commit_msg>Make \"keeping\" info message match length of related messages<commit_after>package listener\n\nimport (\n\t\"encoding\/json\"\n\t\"sort\"\n\t\"sync\"\n\n\t\"github.com\/jeffpierce\/cassabon\/config\"\n\t\"github.com\/jeffpierce\/cassabon\/pearson\"\n)\n\ntype indexedLine struct {\n\tpeerIndex int\n\tstatLine string\n}\n\n\/\/ PeerList contains an ordered list of Cassabon peers.\ntype PeerList struct {\n\twg *sync.WaitGroup\n\ttarget chan indexedLine \/\/ Channel for forwarding a stat line to a Cassabon peer\n\thostPort string \/\/ Host:port on which the local server is listening\n\tpeersMap map[string]string \/\/ Peer list as stored in the configuration\n\tpeers []string \/\/ Host:port information for all Cassabon peers (inclusive)\n\tconns map[string]*StubbornTCPConn\n\tself sync.RWMutex\n}\n\nfunc (pl *PeerList) Init() {\n\n\tpl.conns = make(map[string]*StubbornTCPConn, 0)\n\n\t\/\/ Create the channel on which stats to forward are received.\n\tpl.target = make(chan indexedLine, 1)\n}\n\n\/\/ IsStarted indicates whether the structure has ever been updated.\nfunc (pl *PeerList) IsStarted() bool {\n\treturn pl.hostPort != \"\"\n}\n\n\/\/ Start records the current peer list and starts the forwarder goroutine.\nfunc (pl *PeerList) Start(wg *sync.WaitGroup, hostPort string, peersMap map[string]string) {\n\n\t\/\/ Synchronize access by other goroutines.\n\tpl.self.Lock()\n\tdefer pl.self.Unlock()\n\n\tpl.wg = wg\n\tpl.hostPort = hostPort\n\tpl.peersMap = peersMap\n\n\t\/\/ Dispose of any peer connections that are obsolete.\n\tpeers := sortedMapToArray(pl.peersMap)\n\tfor _, existing := range pl.peers {\n\t\tfound := false\n\t\tfor _, incoming := range peers {\n\t\t\tif existing == incoming {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found && existing != pl.hostPort {\n\t\t\tpl.conns[existing].Close()\n\t\t\tdelete(pl.conns, existing)\n\t\t} else {\n\t\t\tconfig.G.Log.System.LogInfo(\"Keeping peer connection to %s\", existing)\n\t\t}\n\t}\n\n\t\/\/ Record the current set of peers.\n\tpl.peers = make([]string, len(peers))\n\tfor i, v := range peers {\n\t\tpl.peers[i] = v\n\t\t\/\/ Set up peer connections for newly added peers.\n\t\tif _, found := pl.conns[v]; !found && v != pl.hostPort {\n\t\t\tpl.conns[v] = new(StubbornTCPConn)\n\t\t\tpl.conns[v].Open(v)\n\t\t}\n\t}\n\n\t\/\/ Start the forwarder goroutine.\n\tpl.wg.Add(1)\n\tgo pl.run()\n}\n\n\/\/ IsEqual indicates whether the given new configuration is equal to the current.\nfunc (pl *PeerList) IsEqual(hostPort string, peersMap map[string]string) bool {\n\n\t\/\/ Synchronize access by other goroutines.\n\tpl.self.RLock()\n\tdefer pl.self.RUnlock()\n\n\tif pl.hostPort != hostPort {\n\t\treturn false\n\t}\n\n\tpeers := sortedMapToArray(peersMap)\n\tif len(pl.peers) != len(peers) {\n\t\treturn false\n\t}\n\tfor i, v := range pl.peers {\n\t\tif peers[i] != v {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\n\/\/ OwnerOf determines which host owns a particular stats path.\nfunc (pl *PeerList) OwnerOf(statPath string) (int, bool) {\n\tpeerIndex := int(pearson.Hash8(statPath)) % len(pl.peers)\n\tif pl.hostPort == pl.peers[peerIndex] {\n\t\treturn peerIndex, true\n\t} else {\n\t\treturn peerIndex, false\n\t}\n}\n\n\/\/ PropagatePeerList sends the current peer list to all known peers.\nfunc (pl *PeerList) PropagatePeerList() {\n\n\t\/\/ Build the command to be sent.\n\tvar cmd string\n\tvar buf []byte\n\tbuf, _ = json.Marshal(pl.peersMap)\n\tcmd = \"<<peerlist=\" + string(buf) + \">>\"\n\n\t\/\/ Send the command to each peer.\n\tfor i, v := range pl.peers {\n\t\tif v != pl.hostPort {\n\t\t\tconfig.G.Log.System.LogInfo(\"Sending peer list to %s\", v)\n\t\t\tpl.target <- indexedLine{i, cmd}\n\t\t}\n\t}\n}\n\n\/\/ run listens for stat lines on a channel and sends them to the appropriate Cassabon peer.\nfunc (pl *PeerList) run() {\n\n\tfor {\n\t\tselect {\n\t\tcase <-config.G.OnReload2:\n\t\t\tconfig.G.Log.System.LogDebug(\"PeerList::run received QUIT message\")\n\t\t\tpl.wg.Done()\n\t\t\treturn\n\t\tcase il := <-pl.target:\n\t\t\tif pl.hostPort != pl.peers[il.peerIndex] {\n\t\t\t\tpl.conns[pl.peers[il.peerIndex]].Send(il.statLine)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ sortedMapToArray converts a map to an array of its values, ordered by key.\nfunc sortedMapToArray(m map[string]string) []string {\n\tvar a, t []string\n\tfor k, _ := range m {\n\t\tt = append(t, k)\n\t}\n\tsort.Strings(t)\n\tfor _, v := range t {\n\t\ta = append(a, m[v])\n\t}\n\treturn a\n}\n<|endoftext|>"} {"text":"<commit_before>package locus\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\nconst cityUrl string = `http:\/\/api.ipinfodb.com\/v3\/ip-city\/`\nconst countryUrl string = `http:\/\/api.ipinfodb.com\/v3\/ip-country\/`\n\ntype Location struct {\n\tStatusCode string `json: \"statusCode\"`\n\tStatusMessage string `json: \"statusMessage\"`\n\tIpAddress string `json: \"ipAddress\"`\n\tCountryCode string `json: \"countryCode\"`\n\tCountryName string `json: \"countryName\"`\n\tRegionName string `json: \"regionName\"`\n\tCityName string `json: \"cityName\"`\n\tZipCode string `json: \"zipCode\"`\n\tLatitude string `json: \"latitude\"`\n\tLongitude string `json: \"longitude\"`\n\tTimeZone string `json: \"timeZone\"`\n}\n\nfunc locationJson(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}\n\nfunc location(url string) (*Location, error) {\n\tlocation := &Location{}\n\n\traw_json, err := locationJson(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(raw_json, location)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn location, nil\n}\n\nfunc requestUrl(ip string, precision string, key string) string {\n\tbaseUrl := countryUrl\n\tif strings.ToLower(precision) == \"city\" {\n\t\tbaseUrl = cityUrl\n\t}\n\n\treturn strings.Join([]string{baseUrl, `?format=json`, `&ip=`, ip, `&key=`, key}, ``)\n}\n\n\/\/ Public API\n\nfunc LookupLocationJson(ip string, precision string, key string) (string, error) {\n\tlocation, err := locationJson(requestUrl(ip, precision, key))\n\treturn string(location[:]), err\n}\n\nfunc LookupLocation(ip string, precision string, key string) (*Location, error) {\n\treturn location(requestUrl(ip, precision, key))\n}\n\nfunc BulkLookupLocationJSON(ips []string, precision string, key string) ([]string, error) {\n\tlocations := make([]string, len(ips)-1)\n\tvar err error\n\tfor i, ip := range ips {\n\t\tlocations[i], err = LookupLocationJson(ip, precision, key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn locations, nil\n}\n\nfunc BulkLookupLocation(ips []string, precision string, key string) ([]*Location, error) {\n\tlocations := make([]*Location, len(ips)-1)\n\tvar err error\n\tfor i, ip := range ips {\n\t\tlocations[i], err = LookupLocation(ip, precision, key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn locations, nil\n}\n<commit_msg>Fix off by one error in range<commit_after>package locus\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\nconst cityUrl string = `http:\/\/api.ipinfodb.com\/v3\/ip-city\/`\nconst countryUrl string = `http:\/\/api.ipinfodb.com\/v3\/ip-country\/`\n\ntype Location struct {\n\tStatusCode string `json: \"statusCode\"`\n\tStatusMessage string `json: \"statusMessage\"`\n\tIpAddress string `json: \"ipAddress\"`\n\tCountryCode string `json: \"countryCode\"`\n\tCountryName string `json: \"countryName\"`\n\tRegionName string `json: \"regionName\"`\n\tCityName string `json: \"cityName\"`\n\tZipCode string `json: \"zipCode\"`\n\tLatitude string `json: \"latitude\"`\n\tLongitude string `json: \"longitude\"`\n\tTimeZone string `json: \"timeZone\"`\n}\n\nfunc locationJson(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}\n\nfunc location(url string) (*Location, error) {\n\tlocation := &Location{}\n\n\traw_json, err := locationJson(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(raw_json, location)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn location, nil\n}\n\nfunc requestUrl(ip string, precision string, key string) string {\n\tbaseUrl := countryUrl\n\tif strings.ToLower(precision) == \"city\" {\n\t\tbaseUrl = cityUrl\n\t}\n\n\treturn strings.Join([]string{baseUrl, `?format=json`, `&ip=`, ip, `&key=`, key}, ``)\n}\n\n\/\/ Public API\n\nfunc LookupLocationJson(ip string, precision string, key string) (string, error) {\n\tlocation, err := locationJson(requestUrl(ip, precision, key))\n\treturn string(location[:]), err\n}\n\nfunc LookupLocation(ip string, precision string, key string) (*Location, error) {\n\treturn location(requestUrl(ip, precision, key))\n}\n\nfunc BulkLookupLocationJSON(ips []string, precision string, key string) ([]string, error) {\n\tlocations := make([]string, len(ips))\n\tvar err error\n\tfor i, ip := range ips {\n\t\tlocations[i], err = LookupLocationJson(ip, precision, key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn locations, nil\n}\n\nfunc BulkLookupLocation(ips []string, precision string, key string) ([]*Location, error) {\n\tlocations := make([]*Location, len(ips))\n\tvar err error\n\tfor i, ip := range ips {\n\t\tlocations[i], err = LookupLocation(ip, precision, key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn locations, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/99designs\/aws-vault\/keyring\"\n\t\"github.com\/99designs\/aws-vault\/prompt\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/skratchdot\/open-golang\/open\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\ntype LoginCommandInput struct {\n\tProfile string\n\tKeyring keyring.Keyring\n\tMfaToken string\n\tMfaPrompt prompt.PromptFunc\n\tUseStdout bool\n\tFederationTokenDuration time.Duration\n\tAssumeRoleDuration time.Duration\n}\n\nfunc LoginCommand(app *kingpin.Application, input LoginCommandInput) {\n\tif input.FederationTokenDuration > (time.Hour * 12) {\n\t\tapp.Fatalf(\"Maximum federation token duration is 12 hours\")\n\t\treturn\n\t}\n\n\tprofiles, err := awsConfigFile.Parse()\n\tif err != nil {\n\t\tapp.Fatalf(\"Error parsing config: %v\", err)\n\t\treturn\n\t}\n\n\tprovider, err := NewVaultProvider(input.Keyring, input.Profile, VaultOptions{\n\t\tAssumeRoleDuration: input.AssumeRoleDuration,\n\t\tMfaToken: input.MfaToken,\n\t\tMfaPrompt: input.MfaPrompt,\n\t\tNoSession: true,\n\t\tProfiles: profiles,\n\t})\n\tif err != nil {\n\t\tapp.Fatalf(\"Failed to create vault provider: %v\", err)\n\t\treturn\n\t}\n\n\tcreds := credentials.NewCredentials(provider)\n\tval, err := creds.Get()\n\tif err != nil {\n\t\tif awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == \"NoCredentialProviders\" {\n\t\t\tapp.Fatalf(\"No credentials found for profile %q\", input.Profile)\n\t\t\treturn\n\t\t} else {\n\t\t\tapp.Fatalf(\"Failed to get credentials: %v\", err)\n\t\t}\n\t}\n\n\tjsonBytes, err := json.Marshal(map[string]string{\n\t\t\"sessionId\": val.AccessKeyID,\n\t\t\"sessionKey\": val.SecretAccessKey,\n\t\t\"sessionToken\": val.SessionToken,\n\t})\n\tif err != nil {\n\t\tapp.Fatalf(\"%v\", err)\n\t\treturn\n\t}\n\n\treq, err := http.NewRequest(\"GET\", \"https:\/\/signin.aws.amazon.com\/federation\", nil)\n\tif err != nil {\n\t\tapp.Fatalf(\"%v\", err)\n\t\treturn\n\t}\n\n\tlog.Printf(\"Creating federation login token, expires in %s\", input.FederationTokenDuration)\n\n\tq := req.URL.Query()\n\tq.Add(\"Action\", \"getSigninToken\")\n\tq.Add(\"Session\", string(jsonBytes))\n\tq.Add(\"SessionDuration\", fmt.Sprintf(\"%.f\", input.FederationTokenDuration.Seconds()))\n\treq.URL.RawQuery = q.Encode()\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tapp.Fatalf(\"Failed to create federated token: %v\", err)\n\t\treturn\n\t}\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tapp.Fatalf(\"%v\", err)\n\t\treturn\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tlog.Printf(\"Response body was %s\", body)\n\t\tapp.Fatalf(\"Call to getSigninToken failed with %v\", resp.Status)\n\t\treturn\n\t}\n\n\tvar respParsed map[string]string\n\n\tif err = json.Unmarshal([]byte(body), &respParsed); err != nil {\n\t\tapp.Fatalf(\"Failed to parse response from getSigninToken: %v\", err)\n\t\treturn\n\t}\n\n\tsigninToken, ok := respParsed[\"SigninToken\"]\n\tif !ok {\n\t\tapp.Fatalf(\"Expected a response with SigninToken\")\n\t\treturn\n\t}\n\n\tloginUrl := fmt.Sprintf(\n\t\t\"https:\/\/signin.aws.amazon.com\/federation?Action=login&Issuer=aws-vault&Destination=%s&SigninToken=%s\",\n\t\turl.QueryEscape(\"https:\/\/console.aws.amazon.com\/\"),\n\t\turl.QueryEscape(signinToken),\n\t)\n\n\tif input.UseStdout {\n\t\tfmt.Println(loginUrl)\n\t} else if err = open.Run(loginUrl); err != nil {\n\t\tlog.Println(err)\n\t\tfmt.Println(loginUrl)\n\t}\n}\n<commit_msg>WIP: try federation tokens for login<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/99designs\/aws-vault\/keyring\"\n\t\"github.com\/99designs\/aws-vault\/prompt\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/sts\"\n\t\"github.com\/skratchdot\/open-golang\/open\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\ntype LoginCommandInput struct {\n\tProfile string\n\tKeyring keyring.Keyring\n\tMfaToken string\n\tMfaPrompt prompt.PromptFunc\n\tUseStdout bool\n\tFederationTokenDuration time.Duration\n\tAssumeRoleDuration time.Duration\n}\n\nfunc LoginCommand(app *kingpin.Application, input LoginCommandInput) {\n\tif input.FederationTokenDuration > (time.Hour * 12) {\n\t\tapp.Fatalf(\"Maximum federation token duration is 12 hours\")\n\t\treturn\n\t}\n\n\tprofiles, err := awsConfigFile.Parse()\n\tif err != nil {\n\t\tapp.Fatalf(\"Error parsing config: %v\", err)\n\t\treturn\n\t}\n\n\tprovider, err := NewVaultProvider(input.Keyring, input.Profile, VaultOptions{\n\t\tAssumeRoleDuration: input.AssumeRoleDuration,\n\t\tMfaToken: input.MfaToken,\n\t\tMfaPrompt: input.MfaPrompt,\n\t\tNoSession: true,\n\t\tProfiles: profiles,\n\t})\n\tif err != nil {\n\t\tapp.Fatalf(\"Failed to create vault provider: %v\", err)\n\t\treturn\n\t}\n\n\tcreds := credentials.NewCredentials(provider)\n\tval, err := creds.Get()\n\tif err != nil {\n\t\tif awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == \"NoCredentialProviders\" {\n\t\t\tapp.Fatalf(\"No credentials found for profile %q\", input.Profile)\n\t\t\treturn\n\t\t} else {\n\t\t\tapp.Fatalf(\"Failed to get credentials: %v\", err)\n\t\t}\n\t}\n\n\tif val.SessionToken == \"\" {\n\t\tlog.Printf(\"No session token found, calling GetFederationToken\")\n\t\tstsCreds, err := getFederationToken(val, input.FederationTokenDuration)\n\t\tif err != nil {\n\t\t\tapp.Fatalf(\"Failed to call GetFederationToken: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tval.AccessKeyID = *stsCreds.AccessKeyId\n\t\tval.SecretAccessKey = *stsCreds.SecretAccessKey\n\t\tval.SessionToken = *stsCreds.SessionToken\n\t}\n\n\tlog.Printf(\"%#v\", val)\n\n\tjsonBytes, err := json.Marshal(map[string]string{\n\t\t\"sessionId\": val.AccessKeyID,\n\t\t\"sessionKey\": val.SecretAccessKey,\n\t\t\"sessionToken\": val.SessionToken,\n\t})\n\tif err != nil {\n\t\tapp.Fatalf(\"%v\", err)\n\t\treturn\n\t}\n\n\treq, err := http.NewRequest(\"GET\", \"https:\/\/signin.aws.amazon.com\/federation\", nil)\n\tif err != nil {\n\t\tapp.Fatalf(\"%v\", err)\n\t\treturn\n\t}\n\n\tlog.Printf(\"Creating federation login token, expires in %s\", input.FederationTokenDuration)\n\n\tq := req.URL.Query()\n\tq.Add(\"Action\", \"getSigninToken\")\n\tq.Add(\"Session\", string(jsonBytes))\n\tq.Add(\"SessionDuration\", fmt.Sprintf(\"%.f\", input.FederationTokenDuration.Seconds()))\n\treq.URL.RawQuery = q.Encode()\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tapp.Fatalf(\"Failed to create federated token: %v\", err)\n\t\treturn\n\t}\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tapp.Fatalf(\"%v\", err)\n\t\treturn\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tlog.Printf(\"Response body was %s\", body)\n\t\tapp.Fatalf(\"Call to getSigninToken failed with %v\", resp.Status)\n\t\treturn\n\t}\n\n\tvar respParsed map[string]string\n\n\tif err = json.Unmarshal([]byte(body), &respParsed); err != nil {\n\t\tapp.Fatalf(\"Failed to parse response from getSigninToken: %v\", err)\n\t\treturn\n\t}\n\n\tsigninToken, ok := respParsed[\"SigninToken\"]\n\tif !ok {\n\t\tapp.Fatalf(\"Expected a response with SigninToken\")\n\t\treturn\n\t}\n\n\tloginUrl := fmt.Sprintf(\n\t\t\"https:\/\/signin.aws.amazon.com\/federation?Action=login&Issuer=aws-vault&Destination=%s&SigninToken=%s\",\n\t\turl.QueryEscape(\"https:\/\/console.aws.amazon.com\/\"),\n\t\turl.QueryEscape(signinToken),\n\t)\n\n\tif input.UseStdout {\n\t\tfmt.Println(loginUrl)\n\t} else if err = open.Run(loginUrl); err != nil {\n\t\tlog.Println(err)\n\t\tfmt.Println(loginUrl)\n\t}\n}\n\nfunc getFederationToken(creds credentials.Value, d time.Duration) (*sts.Credentials, error) {\n\tclient := sts.New(session.New(&aws.Config{\n\t\tCredentials: credentials.NewCredentials(&credentials.StaticProvider{Value: creds}),\n\t}))\n\n\tparams := &sts.GetFederationTokenInput{\n\t\tName: aws.String(\"federated-user\"),\n\t}\n\n\tresp, err := client.GetFederationToken(params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp.Credentials, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\nvar cmdLogin = &Command{\n\tExec: runLogin,\n\tUsageLine: \"login [OPTIONS]\",\n\tDescription: \"Log in to Scaleway API\",\n\tHelp: `Generates a configuration file in '\/home\/$USER\/.scwrc'\ncontaining credentials used to interact with the Scaleway API. This\nconfiguration file is automatically used by the 'scw' commands.`,\n}\n\nfunc promptUser(prompt string, output *string, echo bool) {\n\tfmt.Fprintf(os.Stdout, prompt)\n\tos.Stdout.Sync()\n\n\tif !echo {\n\t\tb, err := terminal.ReadPassword(int(os.Stdin.Fd()))\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Unable to prompt for password: %s\", err)\n\t\t}\n\t\t*output = string(b)\n\t\tfmt.Fprintf(os.Stdout, \"\\n\")\n\t} else {\n\t\treader := bufio.NewReader(os.Stdin)\n\t\t*output, _ = reader.ReadString('\\n')\n\t}\n}\n\nfunc init() {\n\tcmdLogin.Flag.StringVar(&organization, []string{\"o\", \"-organization\"}, \"\", \"Organization\")\n\tcmdLogin.Flag.StringVar(&token, []string{\"t\", \"-token\"}, \"\", \"Token\")\n\tcmdLogin.Flag.BoolVar(&loginHelp, []string{\"h\", \"-help\"}, false, \"Print usage\")\n}\n\n\/\/ FLags\nvar organization string \/\/ -o flag\nvar token string \/\/ -t flag\nvar loginHelp bool \/\/ -h, --help flag\n\nfunc runLogin(cmd *Command, args []string) {\n\tif loginHelp {\n\t\tcmd.PrintUsage()\n\t}\n\tif len(args) < 1 {\n\t\tcmd.PrintShortUsage()\n\t}\n\n\tif len(organization) == 0 && len(token) == 0 {\n\t\tpromptUser(\"Organization: \", &organization, true)\n\t\tpromptUser(\"Token: \", &token, false)\n\t}\n\n\tcfg := &Config{\n\t\tAPIEndPoint: \"https:\/\/account.scaleway.com\/\",\n\t\tOrganization: strings.Trim(organization, \"\\n\"),\n\t\tToken: strings.Trim(token, \"\\n\"),\n\t}\n\n\tapi, err := NewScalewayAPI(cfg.APIEndPoint, cfg.Organization, cfg.Token)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to create ScalewayAPI: %s\", err)\n\t}\n\terr = api.CheckCredentials()\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to contact ScalewayAPI: %s\", err)\n\t}\n\n\tscwrc_path, err := GetConfigFilePath()\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to get scwrc config file path: %s\", err)\n\t}\n\tscwrc, err := os.OpenFile(scwrc_path, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to create scwrc config file: %s\", err)\n\t}\n\tdefer scwrc.Close()\n\tencoder := json.NewEncoder(scwrc)\n\tcfg.APIEndPoint = \"https:\/\/api.scaleway.com\/\"\n\terr = encoder.Encode(cfg)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to encode scw config file: %s\", err)\n\t}\n}\n<commit_msg>Fixed 'scw login' parameters<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\nvar cmdLogin = &Command{\n\tExec: runLogin,\n\tUsageLine: \"login [OPTIONS]\",\n\tDescription: \"Log in to Scaleway API\",\n\tHelp: `Generates a configuration file in '\/home\/$USER\/.scwrc'\ncontaining credentials used to interact with the Scaleway API. This\nconfiguration file is automatically used by the 'scw' commands.`,\n}\n\nfunc promptUser(prompt string, output *string, echo bool) {\n\tfmt.Fprintf(os.Stdout, prompt)\n\tos.Stdout.Sync()\n\n\tif !echo {\n\t\tb, err := terminal.ReadPassword(int(os.Stdin.Fd()))\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Unable to prompt for password: %s\", err)\n\t\t}\n\t\t*output = string(b)\n\t\tfmt.Fprintf(os.Stdout, \"\\n\")\n\t} else {\n\t\treader := bufio.NewReader(os.Stdin)\n\t\t*output, _ = reader.ReadString('\\n')\n\t}\n}\n\nfunc init() {\n\tcmdLogin.Flag.StringVar(&organization, []string{\"o\", \"-organization\"}, \"\", \"Organization\")\n\tcmdLogin.Flag.StringVar(&token, []string{\"t\", \"-token\"}, \"\", \"Token\")\n\tcmdLogin.Flag.BoolVar(&loginHelp, []string{\"h\", \"-help\"}, false, \"Print usage\")\n}\n\n\/\/ FLags\nvar organization string \/\/ -o flag\nvar token string \/\/ -t flag\nvar loginHelp bool \/\/ -h, --help flag\n\nfunc runLogin(cmd *Command, args []string) {\n\tif loginHelp {\n\t\tcmd.PrintUsage()\n\t}\n\tif len(args) != 0 {\n\t\tcmd.PrintShortUsage()\n\t}\n\n\tif len(organization) == 0 && len(token) == 0 {\n\t\tpromptUser(\"Organization: \", &organization, true)\n\t\tpromptUser(\"Token: \", &token, false)\n\t}\n\n\tcfg := &Config{\n\t\tAPIEndPoint: \"https:\/\/account.scaleway.com\/\",\n\t\tOrganization: strings.Trim(organization, \"\\n\"),\n\t\tToken: strings.Trim(token, \"\\n\"),\n\t}\n\n\tapi, err := NewScalewayAPI(cfg.APIEndPoint, cfg.Organization, cfg.Token)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to create ScalewayAPI: %s\", err)\n\t}\n\terr = api.CheckCredentials()\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to contact ScalewayAPI: %s\", err)\n\t}\n\n\tscwrc_path, err := GetConfigFilePath()\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to get scwrc config file path: %s\", err)\n\t}\n\tscwrc, err := os.OpenFile(scwrc_path, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to create scwrc config file: %s\", err)\n\t}\n\tdefer scwrc.Close()\n\tencoder := json.NewEncoder(scwrc)\n\tcfg.APIEndPoint = \"https:\/\/api.scaleway.com\/\"\n\terr = encoder.Encode(cfg)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to encode scw config file: %s\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/dickeyxxx\/speakeasy\"\n)\n\nvar loginTopic = &Topic{\n\tName: \"login\",\n\tDescription: \"Login with your Heroku credentials.\",\n}\n\nvar loginCmd = &Command{\n\tTopic: \"login\",\n\tDescription: \"Login with your Heroku credentials.\",\n\tRun: func(ctx *Context) {\n\t\tlogin()\n\t},\n}\n\nvar authLoginCmd = &Command{\n\tTopic: \"auth\",\n\tCommand: \"login\",\n\tDescription: \"Login with your Heroku credentials.\",\n\tRun: func(ctx *Context) {\n\t\tlogin()\n\t},\n}\n\nfunc login() {\n\tPrintln(\"Enter your Heroku credentials.\")\n\temail := getString(\"Email: \")\n\tpassword := getPassword()\n\n\ttoken, err := v2login(email, password, \"\")\n\t\/\/ TODO: use createOauthToken (v3 API)\n\t\/\/ token, err := createOauthToken(email, password, \"\")\n\tif err != nil {\n\t\tPrintError(err)\n\t\treturn\n\t}\n\tsaveOauthToken(email, token)\n\tPrintln(\"Logged in as \" + cyan(email))\n}\n\nfunc saveOauthToken(email, token string) {\n\tnetrc := getNetrc()\n\tnetrc.RemoveMachine(apiHost())\n\tnetrc.RemoveMachine(httpGitHost())\n\tnetrc.AddMachine(apiHost(), email, token)\n\tnetrc.AddMachine(httpGitHost(), email, token)\n\tExitIfError(netrc.Save())\n}\n\nfunc getString(prompt string) string {\n\tvar s string\n\tErr(prompt)\n\tif _, err := fmt.Scanln(&s); err != nil {\n\t\tif err.Error() == \"unexpected newline\" {\n\t\t\treturn getString(prompt)\n\t\t}\n\t\tif err.Error() == \"EOF\" {\n\t\t\tErrln()\n\t\t\tos.Exit(1)\n\t\t}\n\t\tExitIfError(err)\n\t}\n\treturn s\n}\n\nfunc getPassword() string {\n\tpassword, err := speakeasy.Ask(\"Password (typing will be hidden): \")\n\tif err != nil {\n\t\tPrintError(err)\n\t}\n\treturn password\n}\n\nfunc v2login(email, password, secondFactor string) (string, error) {\n\treq := apiRequestBase(\"\")\n\treq.Method = \"POST\"\n\treq.Uri = req.Uri + \"\/login?username=\" + url.QueryEscape(email) + \"&password=\" + url.QueryEscape(password)\n\tif secondFactor != \"\" {\n\t\treq.AddHeader(\"Heroku-Two-Factor-Code\", secondFactor)\n\t}\n\tres, err := req.Do()\n\tExitIfError(err)\n\ttype Doc struct {\n\t\tAPIKey string `json:\"api_key\"`\n\t}\n\tvar doc Doc\n\tres.Body.FromJsonTo(&doc)\n\tif res.StatusCode == 403 {\n\t\treturn v2login(email, password, getString(\"Two-factor code: \"))\n\t}\n\tif res.StatusCode != 200 {\n\t\treturn \"\", errors.New(\"Authentication failure.\")\n\t}\n\treturn doc.APIKey, nil\n}\n\nfunc createOauthToken(email, password, secondFactor string) (string, error) {\n\treq := apiRequest(\"\")\n\treq.Method = \"POST\"\n\treq.Uri = req.Uri + \"\/oauth\/authorizations\"\n\treq.BasicAuthUsername = email\n\treq.BasicAuthPassword = password\n\treq.Body = map[string]interface{}{\n\t\t\"scope\": []string{\"global\"},\n\t\t\"description\": \"Toolbelt CLI login from \" + time.Now().UTC().Format(time.RFC3339),\n\t\t\"expires_in\": 60 * 60 * 24 * 30, \/\/ 30 days\n\t}\n\tif secondFactor != \"\" {\n\t\treq.AddHeader(\"Heroku-Two-Factor-Code\", secondFactor)\n\t}\n\tres, err := req.Do()\n\tExitIfError(err)\n\ttype Doc struct {\n\t\tID string\n\t\tMessage string\n\t\tAccessToken struct {\n\t\t\tToken string\n\t\t} `json:\"access_token\"`\n\t}\n\tvar doc Doc\n\tres.Body.FromJsonTo(&doc)\n\tif doc.ID == \"two_factor\" {\n\t\treturn createOauthToken(email, password, getString(\"Two-factor code: \"))\n\t}\n\tif res.StatusCode != 201 {\n\t\treturn \"\", errors.New(doc.Message)\n\t}\n\treturn doc.AccessToken.Token, nil\n}\n<commit_msg>quit if error reading password<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/dickeyxxx\/speakeasy\"\n)\n\nvar loginTopic = &Topic{\n\tName: \"login\",\n\tDescription: \"Login with your Heroku credentials.\",\n}\n\nvar loginCmd = &Command{\n\tTopic: \"login\",\n\tDescription: \"Login with your Heroku credentials.\",\n\tRun: func(ctx *Context) {\n\t\tlogin()\n\t},\n}\n\nvar authLoginCmd = &Command{\n\tTopic: \"auth\",\n\tCommand: \"login\",\n\tDescription: \"Login with your Heroku credentials.\",\n\tRun: func(ctx *Context) {\n\t\tlogin()\n\t},\n}\n\nfunc login() {\n\tPrintln(\"Enter your Heroku credentials.\")\n\temail := getString(\"Email: \")\n\tpassword := getPassword()\n\n\ttoken, err := v2login(email, password, \"\")\n\t\/\/ TODO: use createOauthToken (v3 API)\n\t\/\/ token, err := createOauthToken(email, password, \"\")\n\tif err != nil {\n\t\tPrintError(err)\n\t\treturn\n\t}\n\tsaveOauthToken(email, token)\n\tPrintln(\"Logged in as \" + cyan(email))\n}\n\nfunc saveOauthToken(email, token string) {\n\tnetrc := getNetrc()\n\tnetrc.RemoveMachine(apiHost())\n\tnetrc.RemoveMachine(httpGitHost())\n\tnetrc.AddMachine(apiHost(), email, token)\n\tnetrc.AddMachine(httpGitHost(), email, token)\n\tExitIfError(netrc.Save())\n}\n\nfunc getString(prompt string) string {\n\tvar s string\n\tErr(prompt)\n\tif _, err := fmt.Scanln(&s); err != nil {\n\t\tif err.Error() == \"unexpected newline\" {\n\t\t\treturn getString(prompt)\n\t\t}\n\t\tif err.Error() == \"EOF\" {\n\t\t\tErrln()\n\t\t\tos.Exit(1)\n\t\t}\n\t\tExitIfError(err)\n\t}\n\treturn s\n}\n\nfunc getPassword() string {\n\tpassword, err := speakeasy.Ask(\"Password (typing will be hidden): \")\n\tExitIfError(err)\n\treturn password\n}\n\nfunc v2login(email, password, secondFactor string) (string, error) {\n\treq := apiRequestBase(\"\")\n\treq.Method = \"POST\"\n\treq.Uri = req.Uri + \"\/login?username=\" + url.QueryEscape(email) + \"&password=\" + url.QueryEscape(password)\n\tif secondFactor != \"\" {\n\t\treq.AddHeader(\"Heroku-Two-Factor-Code\", secondFactor)\n\t}\n\tres, err := req.Do()\n\tExitIfError(err)\n\ttype Doc struct {\n\t\tAPIKey string `json:\"api_key\"`\n\t}\n\tvar doc Doc\n\tres.Body.FromJsonTo(&doc)\n\tif res.StatusCode == 403 {\n\t\treturn v2login(email, password, getString(\"Two-factor code: \"))\n\t}\n\tif res.StatusCode != 200 {\n\t\treturn \"\", errors.New(\"Authentication failure.\")\n\t}\n\treturn doc.APIKey, nil\n}\n\nfunc createOauthToken(email, password, secondFactor string) (string, error) {\n\treq := apiRequest(\"\")\n\treq.Method = \"POST\"\n\treq.Uri = req.Uri + \"\/oauth\/authorizations\"\n\treq.BasicAuthUsername = email\n\treq.BasicAuthPassword = password\n\treq.Body = map[string]interface{}{\n\t\t\"scope\": []string{\"global\"},\n\t\t\"description\": \"Toolbelt CLI login from \" + time.Now().UTC().Format(time.RFC3339),\n\t\t\"expires_in\": 60 * 60 * 24 * 30, \/\/ 30 days\n\t}\n\tif secondFactor != \"\" {\n\t\treq.AddHeader(\"Heroku-Two-Factor-Code\", secondFactor)\n\t}\n\tres, err := req.Do()\n\tExitIfError(err)\n\ttype Doc struct {\n\t\tID string\n\t\tMessage string\n\t\tAccessToken struct {\n\t\t\tToken string\n\t\t} `json:\"access_token\"`\n\t}\n\tvar doc Doc\n\tres.Body.FromJsonTo(&doc)\n\tif doc.ID == \"two_factor\" {\n\t\treturn createOauthToken(email, password, getString(\"Two-factor code: \"))\n\t}\n\tif res.StatusCode != 201 {\n\t\treturn \"\", errors.New(doc.Message)\n\t}\n\treturn doc.AccessToken.Token, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cloudmutex\n\nimport (\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst (\n\tproject = \"marc-general\"\n\tbucket = \"cloudmutex\"\n\tobject = \"lock\"\n)\n\nvar (\n\tlimit = 10\n\tlockHolderMu = &sync.Mutex{}\n\tlockHolder = -1\n)\n\nfunc locker(done chan struct{}, t *testing.T, i int, m sync.Locker) {\n\tm.Lock()\n\tlockHolderMu.Lock()\n\tif lockHolder != -1 {\n\t\tt.Errorf(\"%d trying to lock, but already held by %d\",\n\t\t\ti, lockHolder)\n\t}\n\tlockHolder = i\n\tlockHolderMu.Unlock()\n\tt.Logf(\"locked by %d\", i)\n\ttime.Sleep(10 * time.Millisecond)\n\tm.Unlock()\n\tlockHolderMu.Lock()\n\tlockHolder = -1\n\tlockHolderMu.Unlock()\n\tdone <- struct{}{}\n}\n\nfunc TestParallel(t *testing.T) {\n\tm, err := New(nil, project, bucket, object)\n\tif err != nil {\n\t\tt.Errorf(\"unable to allocate a cloudmutex global object\")\n\t\treturn\n\t}\n\tdone := make(chan struct{}, 1)\n\ttotal := 0\n\tfor i := 0; i < limit; i++ {\n\t\ttotal++\n\t\tgo locker(done, t, i, m)\n\t}\n\tfor ; total > 0; total-- {\n\t\t<-done\n\t}\n}\n\n\/* TODO: add testing for timed lock (both success and timeout cases)\nfunc TestLockTimeout(t *testing.T) {\n\tm, err := New(nil, project, bucket, object)\n\tif err != nil {\n\t\tt.Errorf(\"unable to allocate a cloudmutex global object\")\n\t\treturn\n\t}\n\tLock(m, 3*time.Second)\n}\n*\/\n\n\/* TODO: add testing for timed unlock (both success and timeout cases)\nfunc TestUnlockTimeout(t *testing.T) {\n\tm, err := New(nil, project, bucket, object)\n\tif err != nil {\n\t\tt.Errorf(\"unable to allocate a cloudmutex global object\")\n\t\treturn\n\t}\n\tUnlock(m, 3*time.Second)\n}\n*\/\n<commit_msg>additional cleanup per review comments<commit_after>package cloudmutex\n\nimport (\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst (\n\tproject = \"marc-general\"\n\tbucket = \"cloudmutex\"\n\tobject = \"lock\"\n)\n\nvar (\n\tlimit = 10\n\n\tlockHolderMu sync.Mutex\n\tlockHolder = -1\n)\n\nfunc locker(done chan struct{}, t *testing.T, i int, m sync.Locker) {\n\tm.Lock()\n\tlockHolderMu.Lock()\n\tif lockHolder != -1 {\n\t\tt.Errorf(\"%d trying to lock, but already held by %d\",\n\t\t\ti, lockHolder)\n\t}\n\tlockHolder = i\n\tlockHolderMu.Unlock()\n\tt.Logf(\"locked by %d\", i)\n\ttime.Sleep(10 * time.Millisecond)\n\tm.Unlock()\n\tlockHolderMu.Lock()\n\tlockHolder = -1\n\tlockHolderMu.Unlock()\n\tdone <- struct{}{}\n}\n\nfunc TestParallel(t *testing.T) {\n\tm, err := New(nil, project, bucket, object)\n\tif err != nil {\n\t\tt.Errorf(\"unable to allocate a cloudmutex global object\")\n\t\treturn\n\t}\n\tdone := make(chan struct{}, 1)\n\ttotal := 0\n\tfor i := 0; i < limit; i++ {\n\t\ttotal++\n\t\tgo locker(done, t, i, m)\n\t}\n\tfor ; total > 0; total-- {\n\t\t<-done\n\t}\n}\n\n\/* TODO: add testing for timed lock (both success and timeout cases)\nfunc TestLockTimeout(t *testing.T) {\n\tm, err := New(nil, project, bucket, object)\n\tif err != nil {\n\t\tt.Errorf(\"unable to allocate a cloudmutex global object\")\n\t\treturn\n\t}\n\tLock(m, 3*time.Second)\n}\n*\/\n\n\/* TODO: add testing for timed unlock (both success and timeout cases)\nfunc TestUnlockTimeout(t *testing.T) {\n\tm, err := New(nil, project, bucket, object)\n\tif err != nil {\n\t\tt.Errorf(\"unable to allocate a cloudmutex global object\")\n\t\treturn\n\t}\n\tUnlock(m, 3*time.Second)\n}\n*\/\n<|endoftext|>"} {"text":"<commit_before>package cluster\n\nimport (\n \"strings\"\n \"log\"\n \"regexp\"\n \"strconv\"\n\n \"io\/ioutil\"\n \"path\/filepath\"\n \"gopkg.in\/yaml.v2\"\n \/\/ \"os\/exec\"\n \/\/ dockerapi \"github.com\/fsouza\/go-dockerclient\"\n \/\/ \"encoding\/json\"\n)\n\nconst DEFAULT_ENDPOINT = \"unix:\/\/\/var\/run\/docker.sock\"\n\ntype Cluster struct {\n filename string\n\n config map[string]Container \/\/ rename to containers\n Application *Application\n\n graph *Graph\n \/\/application *Container\n Nodes []*Node\n\/\/ docker *dockerapi.Client\n}\n\nfunc NewCluster(conf string) *Cluster {\n return &Cluster{\n filename: conf,\n config: make(map[string]Container),\n graph : NewGraph(),\n Application: &Application{\n Docker: Docker{\n Hosts: []string{ DEFAULT_ENDPOINT },\n },\n },\n }\n}\n\nfunc CopyContainerConfig(container *Container) *Container {\n copy := &Container{}\n *copy = *container\n\n return copy;\n}\n\nfunc doLink(name string, num int) string {\n index := strconv.Itoa(num)\n return name + \"-\" + index + \":\" + name + \"-\" + index\n}\n\nfunc (c *Cluster) GetLinks(node *Node) []string {\n links := []string{}\n parents := c.graph.In[node]\n for _, parent := range parents {\n for i := 1; i <= parent.Container.Scale; i++ {\n link := doLink(parent.Container.Name, i)\n links = append(links, link);\n }\n }\n return links\n}\n\nfunc (c *Cluster) AddChangeDependant() {\n for _, node := range c.Nodes {\n \/\/ && len(node.Container.Exist)\n if node.Container.Changed {\n log.Println(\"Check \", node.ID)\n parents := c.graph.FindConnection(node, c.graph.In)\n if parents != nil {\n for _, parent := range parents {\n log.Println(\" - \", parent.ID)\n parent.Container.Changed = true\n }\n }\n }\n }\n}\n\nfunc (c *Cluster) AddContainer(name string, container Container) {\n container.Name = strings.TrimSpace( name );\n if container.Name == \"application\" {\n if container.Cluster != nil {\n c.Application.Cluster = container.Cluster\n }\n if container.Docker.Hosts != nil {\n c.Application.Docker.Hosts = container.Docker.Hosts\n }\n } else {\n node := c.graph.FindNodeByID(container.Name)\n if node == nil {\n node = NewNode(container.Name)\n c.graph.AddNode(node)\n }\n\n node.Container = CopyContainerConfig(&container)\n\n for _, link := range container.Links {\n link = strings.TrimSpace( link );\n childNode := c.graph.FindNodeByID(link)\n if childNode == nil {\n childNode = NewNode(link)\n c.graph.AddNode(childNode)\n }\n c.graph.Connect(node, childNode)\n }\n }\n}\n\nfunc (c *Cluster) CheckCluster() {\n for name, scale := range c.Application.Cluster {\n found := false\n for _, node := range c.graph.Nodes {\n if (name == node.Container.Name) {\n node.Container.Scale = scale\n found = true\n break\n }\n }\n if (!found) {\n log.Println(\"ERROR: node '\", name, \"' defined in application's cluster, but missing configuration\")\n }\n }\n}\n\nfunc (c *Cluster) ReadFile() {\n absFileName, _ := filepath.Abs(c.filename)\n yamlFile, err := ioutil.ReadFile(absFileName)\n\n if err != nil {\n \/\/panic(err)\n log.Fatal(\"Couldn't read yml: \", err);\n }\n\n err = yaml.Unmarshal(yamlFile, &c.config)\n if err != nil {\n \/\/panic(err)\n log.Fatal(\"Couldn't parse yml: \", err);\n }\n\n for key, container := range c.config {\n c.AddContainer(key, container)\n }\n\n c.CheckCluster()\n\n c.Nodes = c.graph.Topsort()\n}\n\n\nfunc (c *Cluster) FindNodeByID(name string) (*Node) {\n return c.graph.FindNodeByID(name)\n}\n\nfunc (c *Cluster) FindNodeByName(name string) (*Node, int) {\n nodeName, index := c.ParseName(name)\n return c.FindNodeByID(nodeName), index\n}\n\n\/\/ t.cluster.findNodeByNmae(name)\n\/\/ containerName, _ := c.graph.Name(name);\n\/\/ containerNode := c.graph.FindNodeByID(containerName)\n\n\/\/ func (c *Cluster) IsRunning(name string, id string) bool {\n\/\/ nodeName, num := c.ParseName(name)\n\/\/ node := c.graph.FindNodeByID(nodeName)\n\/\/ return node != nil\n\/\/ \/\/ {\n\/\/ \/\/ \/\/if active {\n\/\/ \/\/ \/\/ node.status.active = append(node.status.active, match[2])\n\/\/ \/\/ \/\/} else {\n\/\/ \/\/ \/\/ node.status.exist = append(node.status.exist, num)\n\/\/ \/\/ \/\/}\n\/\/ \/\/ \/\/ node.status.ids = append(node.status.ids, id)\n\n\/\/ \/\/ return true\n\/\/ \/\/ } else {\n\/\/ \/\/ return false\n\/\/ \/\/ }\n\/\/ }\n\nfunc (c *Cluster) ParseName(name string) (string, int) {\n r, _ := regexp.Compile(\"([a-z\\\\-]+)-([0-9]+)\")\n match := r.FindStringSubmatch(name)\n if len(match) == 3 {\n index, err := strconv.Atoi( match[2] )\n if err == nil {\n return match[1], index\n }\n }\n return name, -1\n}<commit_msg>change direction<commit_after>package cluster\n\nimport (\n \"strings\"\n \"log\"\n \"regexp\"\n \"strconv\"\n\n \"io\/ioutil\"\n \"path\/filepath\"\n \"gopkg.in\/yaml.v2\"\n \/\/ \"os\/exec\"\n \/\/ dockerapi \"github.com\/fsouza\/go-dockerclient\"\n \/\/ \"encoding\/json\"\n)\n\nconst DEFAULT_ENDPOINT = \"unix:\/\/\/var\/run\/docker.sock\"\n\ntype Cluster struct {\n filename string\n\n config map[string]Container \/\/ rename to containers\n Application *Application\n\n graph *Graph\n \/\/application *Container\n Nodes []*Node\n\/\/ docker *dockerapi.Client\n}\n\nfunc NewCluster(conf string) *Cluster {\n return &Cluster{\n filename: conf,\n config: make(map[string]Container),\n graph : NewGraph(),\n Application: &Application{\n Docker: Docker{\n Hosts: []string{ DEFAULT_ENDPOINT },\n },\n },\n }\n}\n\nfunc CopyContainerConfig(container *Container) *Container {\n copy := &Container{}\n *copy = *container\n\n return copy;\n}\n\nfunc doLink(name string, num int) string {\n index := strconv.Itoa(num)\n return name + \"-\" + index + \":\" + name + \"-\" + index\n}\n\nfunc (c *Cluster) GetLinks(node *Node) []string {\n links := []string{}\n parents := c.graph.In[node]\n for _, parent := range parents {\n for i := 1; i <= parent.Container.Scale; i++ {\n link := doLink(parent.Container.Name, i)\n links = append(links, link);\n }\n }\n return links\n}\n\nfunc (c *Cluster) AddChangeDependant() {\n for _, node := range c.Nodes {\n \/\/ && len(node.Container.Exist)\n if node.Container.Changed {\n log.Println(\"Check \", node.ID)\n parents := c.graph.FindConnection(node, c.graph.Out)\n if parents != nil {\n for _, parent := range parents {\n log.Println(\" - \", parent.ID)\n parent.Container.Changed = true\n }\n }\n }\n }\n}\n\nfunc (c *Cluster) AddContainer(name string, container Container) {\n container.Name = strings.TrimSpace( name );\n if container.Name == \"application\" {\n if container.Cluster != nil {\n c.Application.Cluster = container.Cluster\n }\n if container.Docker.Hosts != nil {\n c.Application.Docker.Hosts = container.Docker.Hosts\n }\n } else {\n node := c.graph.FindNodeByID(container.Name)\n if node == nil {\n node = NewNode(container.Name)\n c.graph.AddNode(node)\n }\n\n node.Container = CopyContainerConfig(&container)\n\n for _, link := range container.Links {\n link = strings.TrimSpace( link );\n childNode := c.graph.FindNodeByID(link)\n if childNode == nil {\n childNode = NewNode(link)\n c.graph.AddNode(childNode)\n }\n c.graph.Connect(node, childNode)\n }\n }\n}\n\nfunc (c *Cluster) CheckCluster() {\n for name, scale := range c.Application.Cluster {\n found := false\n for _, node := range c.graph.Nodes {\n if (name == node.Container.Name) {\n node.Container.Scale = scale\n found = true\n break\n }\n }\n if (!found) {\n log.Println(\"ERROR: node '\", name, \"' defined in application's cluster, but missing configuration\")\n }\n }\n}\n\nfunc (c *Cluster) ReadFile() {\n absFileName, _ := filepath.Abs(c.filename)\n yamlFile, err := ioutil.ReadFile(absFileName)\n\n if err != nil {\n \/\/panic(err)\n log.Fatal(\"Couldn't read yml: \", err);\n }\n\n err = yaml.Unmarshal(yamlFile, &c.config)\n if err != nil {\n \/\/panic(err)\n log.Fatal(\"Couldn't parse yml: \", err);\n }\n\n for key, container := range c.config {\n c.AddContainer(key, container)\n }\n\n c.CheckCluster()\n\n c.Nodes = c.graph.Topsort()\n}\n\n\nfunc (c *Cluster) FindNodeByID(name string) (*Node) {\n return c.graph.FindNodeByID(name)\n}\n\nfunc (c *Cluster) FindNodeByName(name string) (*Node, int) {\n nodeName, index := c.ParseName(name)\n return c.FindNodeByID(nodeName), index\n}\n\n\/\/ t.cluster.findNodeByNmae(name)\n\/\/ containerName, _ := c.graph.Name(name);\n\/\/ containerNode := c.graph.FindNodeByID(containerName)\n\n\/\/ func (c *Cluster) IsRunning(name string, id string) bool {\n\/\/ nodeName, num := c.ParseName(name)\n\/\/ node := c.graph.FindNodeByID(nodeName)\n\/\/ return node != nil\n\/\/ \/\/ {\n\/\/ \/\/ \/\/if active {\n\/\/ \/\/ \/\/ node.status.active = append(node.status.active, match[2])\n\/\/ \/\/ \/\/} else {\n\/\/ \/\/ \/\/ node.status.exist = append(node.status.exist, num)\n\/\/ \/\/ \/\/}\n\/\/ \/\/ \/\/ node.status.ids = append(node.status.ids, id)\n\n\/\/ \/\/ return true\n\/\/ \/\/ } else {\n\/\/ \/\/ return false\n\/\/ \/\/ }\n\/\/ }\n\nfunc (c *Cluster) ParseName(name string) (string, int) {\n r, _ := regexp.Compile(\"([a-z\\\\-]+)-([0-9]+)\")\n match := r.FindStringSubmatch(name)\n if len(match) == 3 {\n index, err := strconv.Atoi( match[2] )\n if err == nil {\n return match[1], index\n }\n }\n return name, -1\n}<|endoftext|>"} {"text":"<commit_before>package webhook\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n)\n\ntype Registry struct {\n\tm *monitor\n}\n\nfunc NewRegistry(mon *monitor) Registry {\n\treturn Registry{\n\t\tm: mon,\n\t}\n}\n\n\/\/ jsonResponse is an internal convenience function to write a json response\nfunc jsonResponse(rw http.ResponseWriter, code int, msg string) {\n\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\trw.WriteHeader(code)\n\trw.Write([]byte(fmt.Sprintf(`{\"message\":\"%s\"}`, msg)))\n}\n\n\/\/ get is an api call to return all the registered listeners\nfunc (r *Registry) GetRegistry(rw http.ResponseWriter, req *http.Request) {\n\tvar items []*W\n\tfor i := 0; i < r.m.list.Len(); i++ {\n\t\titems = append(items, r.m.list.Get(i))\n\t}\n\n\tif msg, err := json.Marshal(items); err != nil {\n\t\tjsonResponse(rw, http.StatusInternalServerError, err.Error())\n\t} else {\n\t\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\trw.Write(msg)\n\t}\n}\n\n\/\/ parspIP returns just the ip address from \"IP:port\"\nfunc parseIP(s string) (string, error) {\n\tip1, _, err := net.SplitHostPort(s)\n\tif err == nil {\n\t\treturn ip1, nil\n\t}\n\n\tip2 := net.ParseIP(s)\n\tif ip2 == nil {\n\t\treturn \"\", errors.New(\"invalid IP\")\n\t}\n\n\treturn ip2.String(), nil\n}\n\n\/\/ registrationValidation checks W value requirements\nfunc (w *W) registrationValidation() (string, int) {\n\tif w.Config.URL == \"\" {\n\t\treturn \"invalid Config URL\", http.StatusBadRequest\n\t}\n\tif w.Config.ContentType == \"\" || w.Config.ContentType != \"json\" {\n\t\treturn \"invalid content_type\", http.StatusBadRequest\n\t}\n\tif len(w.Matcher.DeviceId) == 0 {\n\t\tw.Matcher.DeviceId = []string{\".*\"} \/\/ match anything\n\t}\n\tif len(w.Events) == 0 {\n\t\treturn \"invalid events\", http.StatusBadRequest\n\t}\n\n\treturn \"\", http.StatusOK\n}\n\n\/\/ update is an api call to processes a listenener registration for adding and updating\nfunc (r *Registry) UpdateRegistry(rw http.ResponseWriter, req *http.Request) {\n\tpayload, err := ioutil.ReadAll(req.Body)\n\treq.Body.Close()\n\n\tw := new(W)\n\terr = json.Unmarshal(payload, w)\n\tif err != nil {\n\t\tjsonResponse(rw, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\n\tissue, code := w.registrationValidation()\n\tif issue != \"\" || code != http.StatusOK {\n\t\tjsonResponse(rw, code, issue)\n\t\treturn\n\t}\n\n\t\/\/ update the requesters address\n\tip, err := parseIP(req.RemoteAddr)\n\tif err != nil {\n\t\tjsonResponse(rw, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\tw.Address = ip\n\n\t\/\/ send W as a single item array\n\tmsg, err := json.Marshal([1]W{*w})\n\tif err != nil {\n\t\tjsonResponse(rw, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\n\tr.m.Notifier.PublishMessage(string(msg))\n}\n<commit_msg>adding exported channel to listen for changes.<commit_after>package webhook\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n)\n\ntype Registry struct {\n\tm *monitor\n\tChanges chan []W\n}\n\nfunc NewRegistry(mon *monitor) Registry {\n\treturn Registry{\n\t\tm: mon,\n\t\tChanges: mon.changes,\n\t}\n}\n\n\/\/ jsonResponse is an internal convenience function to write a json response\nfunc jsonResponse(rw http.ResponseWriter, code int, msg string) {\n\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\trw.WriteHeader(code)\n\trw.Write([]byte(fmt.Sprintf(`{\"message\":\"%s\"}`, msg)))\n}\n\n\/\/ get is an api call to return all the registered listeners\nfunc (r *Registry) GetRegistry(rw http.ResponseWriter, req *http.Request) {\n\tvar items []*W\n\tfor i := 0; i < r.m.list.Len(); i++ {\n\t\titems = append(items, r.m.list.Get(i))\n\t}\n\n\tif msg, err := json.Marshal(items); err != nil {\n\t\tjsonResponse(rw, http.StatusInternalServerError, err.Error())\n\t} else {\n\t\trw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\trw.Write(msg)\n\t}\n}\n\n\/\/ parspIP returns just the ip address from \"IP:port\"\nfunc parseIP(s string) (string, error) {\n\tip1, _, err := net.SplitHostPort(s)\n\tif err == nil {\n\t\treturn ip1, nil\n\t}\n\n\tip2 := net.ParseIP(s)\n\tif ip2 == nil {\n\t\treturn \"\", errors.New(\"invalid IP\")\n\t}\n\n\treturn ip2.String(), nil\n}\n\n\/\/ registrationValidation checks W value requirements\nfunc (w *W) registrationValidation() (string, int) {\n\tif w.Config.URL == \"\" {\n\t\treturn \"invalid Config URL\", http.StatusBadRequest\n\t}\n\tif w.Config.ContentType == \"\" || w.Config.ContentType != \"json\" {\n\t\treturn \"invalid content_type\", http.StatusBadRequest\n\t}\n\tif len(w.Matcher.DeviceId) == 0 {\n\t\tw.Matcher.DeviceId = []string{\".*\"} \/\/ match anything\n\t}\n\tif len(w.Events) == 0 {\n\t\treturn \"invalid events\", http.StatusBadRequest\n\t}\n\n\treturn \"\", http.StatusOK\n}\n\n\/\/ update is an api call to processes a listenener registration for adding and updating\nfunc (r *Registry) UpdateRegistry(rw http.ResponseWriter, req *http.Request) {\n\tpayload, err := ioutil.ReadAll(req.Body)\n\treq.Body.Close()\n\n\tw := new(W)\n\terr = json.Unmarshal(payload, w)\n\tif err != nil {\n\t\tjsonResponse(rw, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\n\tissue, code := w.registrationValidation()\n\tif issue != \"\" || code != http.StatusOK {\n\t\tjsonResponse(rw, code, issue)\n\t\treturn\n\t}\n\n\t\/\/ update the requesters address\n\tip, err := parseIP(req.RemoteAddr)\n\tif err != nil {\n\t\tjsonResponse(rw, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\tw.Address = ip\n\n\t\/\/ send W as a single item array\n\tmsg, err := json.Marshal([1]W{*w})\n\tif err != nil {\n\t\tjsonResponse(rw, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\n\tr.m.Notifier.PublishMessage(string(msg))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/cloudwatchlogs\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/sqs\"\n\t\"github.com\/google\/uuid\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/trackit\/jsonlog\"\n\n\t\"github.com\/trackit\/trackit\/awsSession\"\n\t\"github.com\/trackit\/trackit\/config\"\n\t\"github.com\/trackit\/trackit\/db\"\n\t\"github.com\/trackit\/trackit\/es\"\n)\n\ntype (\n\tMessageData struct {\n\t\tTaskName string `json:\"task_name\"`\n\t\tParameters []string `json:\"parameters\"`\n\t\tLogStream string `json:\"cloudwatch_log_stream\"`\n\t}\n\n\tLogInput struct {\n\t\tMessage string\n\t\tTimestamp int64\n\t}\n)\n\nconst visibilityTimeoutInHours = 10\nconst visibilityTimeoutTaskFailedInMinutes = 20\nconst retryTaskOnFailure = true\nconst waitTimeInSeconds = 20\nconst esHealthcheckTimeoutInMinutes = 10\nconst esHealthcheckWaitTimeRetryInMinutes = 10\n\nfunc taskWorker(ctx context.Context) error {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\tlogger.Info(\"Running task 'worker'.\", nil)\n\tsqsq := sqs.New(awsSession.Session)\n\tcwl := cloudwatchlogs.New(awsSession.Session)\n\n\tlogger.Info(\"Retrieving SQS Queue URL.\", nil)\n\tqueueUrl, err := retrieveQueueUrl(ctx, sqsq)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Info(\"Waiting for messages.\", nil)\n\n\tvar logsBuffer bytes.Buffer\n\twriter := io.MultiWriter(os.Stdout, &logsBuffer)\n\tctx = jsonlog.ContextWithLogger(ctx, logger.WithWriter(writer))\n\tlogger = jsonlog.LoggerFromContextOrDefault(ctx)\n\n\ttimeoutStr := strconv.Itoa(esHealthcheckTimeoutInMinutes) + \"m\"\n\n\tfor {\n\t\tmessage, receiptHandle, err := getNextMessage(ctx, sqsq, queueUrl)\n\t\tif err != nil {\n\t\t\tlogsBuffer.Reset()\n\t\t\tcontinue\n\t\t}\n\n\t\tlogger.Info(\"Received message, checking ES health and executing task.\", map[string]interface{}{\n\t\t\t\"message\": message,\n\t\t})\n\n\t\tif res, err := es.Client.ClusterHealth().Timeout(timeoutStr).Do(ctx); err != nil || res.TimedOut {\n\t\t\tlogger.Error(\"ES is not reachable.\", map[string]interface{}{\n\t\t\t\t\"timeout\": timeoutStr,\n\t\t\t\t\"error\": err.Error(),\n\t\t\t})\n\t\t\tlogsBuffer.Reset()\n\t\t\t_ = changeMessageVisibility(ctx, sqsq, queueUrl, receiptHandle, 0)\n\t\t\ttime.Sleep(time.Minute * esHealthcheckWaitTimeRetryInMinutes)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := db.OpenWorker(); err != nil {\n\t\t\tlogger.Error(\"Database is not reachable.\", map[string]interface{}{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\n\t\tctx = context.WithValue(ctx, \"taskParameters\", message.Parameters)\n\n\t\tif task, ok := tasks[message.TaskName]; ok {\n\t\t\terr = executeTask(ctx, task)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"Error while executing task. Setting visibility timeout to task failed timeout.\", map[string]interface{}{\n\t\t\t\t\t\"message\": message,\n\t\t\t\t\t\"taskFailedTimeoutInMinutes\": 60 * visibilityTimeoutTaskFailedInMinutes,\n\t\t\t\t\t\"error\": err.Error(),\n\t\t\t\t})\n\t\t\t\tif retryTaskOnFailure {\n\t\t\t\t\t_ = changeMessageVisibility(ctx, sqsq, queueUrl, receiptHandle, 60*visibilityTimeoutTaskFailedInMinutes)\n\t\t\t\t} else {\n\t\t\t\t\t_ = acknowledgeMessage(ctx, sqsq, queueUrl, receiptHandle)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlogger.Info(\"Task done, acknowledging.\", nil)\n\t\t\t\t_ = acknowledgeMessage(ctx, sqsq, queueUrl, receiptHandle)\n\t\t\t}\n\t\t\t_ = flushCloudwatchLogEvents(ctx, cwl, message, &logsBuffer, err == nil)\n\t\t} else {\n\t\t\tlogger.Error(\"Unable to find requested task.\", map[string]interface{}{\n\t\t\t\t\"task_name\": message.TaskName,\n\t\t\t})\n\t\t\tlogsBuffer.Reset()\n\t\t\t_ = acknowledgeMessage(ctx, sqsq, queueUrl, receiptHandle)\n\t\t}\n\t\tdb.Close()\n\t}\n}\n\nfunc executeTask(ctx context.Context, task func(context.Context) error) (err error) {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\n\tdefer func() {\n\t\trec := recover()\n\t\tif rec != nil {\n\t\t\tlogger.Error(\"Task crashed, handling error.\", map[string]interface{}{\n\t\t\t\t\"error\": rec,\n\t\t\t})\n\t\t\terr = errors.New(\"task: crashed\")\n\t\t}\n\t}()\n\treturn task(ctx)\n}\nfunc getNextMessage(ctx context.Context, sqsq *sqs.SQS, queueUrl *string) (MessageData, *string, error) {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\tmessageData := MessageData{}\n\tmsgResult, err := sqsq.ReceiveMessage(&sqs.ReceiveMessageInput{\n\t\tAttributeNames: []*string{\n\t\t\taws.String(sqs.QueueAttributeNameAll),\n\t\t},\n\t\tMessageAttributeNames: []*string{\n\t\t\taws.String(sqs.QueueAttributeNameAll),\n\t\t},\n\t\tQueueUrl: queueUrl,\n\t\tMaxNumberOfMessages: aws.Int64(1),\n\t\tVisibilityTimeout: aws.Int64(60 * 60 * visibilityTimeoutInHours),\n\t\tWaitTimeSeconds: aws.Int64(waitTimeInSeconds),\n\t})\n\n\tif err != nil {\n\t\tlogger.Error(\"An error occurred while waiting for message.\", map[string]interface{}{\n\t\t\t\"queueUrl\": *queueUrl,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn messageData, nil, err\n\t}\n\tif len(msgResult.Messages) == 0 {\n\t\treturn messageData, nil, errors.New(\"queue: no message in queue.\")\n\t}\n\terr = json.Unmarshal([]byte(*msgResult.Messages[0].Body), &messageData)\n\tif err != nil {\n\t\tlogger.Error(\"Unable to decode message.\", map[string]interface{}{\n\t\t\t\"messageBody\": *msgResult.Messages[0].Body,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn messageData, nil, err\n\t}\n\treturn messageData, msgResult.Messages[0].ReceiptHandle, nil\n}\n\nfunc flushCloudwatchLogEvents(ctx context.Context, cwl *cloudwatchlogs.CloudWatchLogs, message MessageData, logsBuffer *bytes.Buffer, success bool) error {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\n\tdefer func() {\n\t\tlogsBuffer.Reset()\n\t}()\n\n\tif config.Environment != \"prod\" && config.Environment != \"stg\" {\n\t\treturn nil\n\t}\n\n\tlogGroup := config.Environment + \"\/task-logs\/\" + message.TaskName\n\n\tlogStreamPrefix := message.LogStream\n\tif len(logStreamPrefix) == 0 {\n\t\tlogStreamPrefix = \"generic\"\n\t}\n\tvar logStreamSuffix string\n\tif success {\n\t\tlogStreamSuffix = \"succeeded\"\n\t} else {\n\t\tlogStreamSuffix = \"failed\"\n\t}\n\tlogStream := logStreamPrefix + \"\/\" + uuid.New().String() + \"\/\" + logStreamSuffix\n\n\t_, err := cwl.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{\n\t\tLogGroupName: aws.String(logGroup),\n\t\tLogStreamName: aws.String(logStream),\n\t})\n\tif err != nil {\n\t\tlogger.Error(\"Unable to create log stream. Skipping logs sending.\", map[string]interface{}{\n\t\t\t\"logGroupName\": logGroup,\n\t\t\t\"logStreamName\": logStream,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn err\n\t}\n\n\tlogInputs := decodeLogBuffer(*logsBuffer)\n\n\tvar logEvents []*cloudwatchlogs.InputLogEvent\n\tfor _, logInput := range logInputs {\n\t\tlogEvents = append(logEvents, &cloudwatchlogs.InputLogEvent{\n\t\t\tMessage: aws.String(logInput.Message),\n\t\t\tTimestamp: aws.Int64(logInput.Timestamp),\n\t\t})\n\t}\n\n\t_, err = cwl.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{\n\t\tLogGroupName: aws.String(logGroup),\n\t\tLogStreamName: aws.String(logStream),\n\t\tLogEvents: logEvents,\n\t})\n\n\tif err != nil {\n\t\tlogger.Error(\"Unable to put log stream events.\", map[string]interface{}{\n\t\t\t\"logGroupName\": logGroup,\n\t\t\t\"logStreamName\": logStream,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc decodeLogBuffer(logsBuffer bytes.Buffer) []LogInput {\n\tlines := strings.Split(logsBuffer.String(), \"\\n\")\n\n\tvar logInputs []LogInput\n\n\tfor _, line := range lines {\n\t\tif len(line) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tlogInputs = append(logInputs, LogInput{\n\t\t\tMessage: line,\n\t\t\tTimestamp: getLogTimestamp(line),\n\t\t})\n\t}\n\n\treturn logInputs\n}\n\nfunc getLogTimestamp(message string) int64 {\n\tvar logJson map[string]interface{}\n\n\terr := json.Unmarshal([]byte(message), &logJson)\n\tif err != nil {\n\t\treturn time.Now().UnixNano() \/ int64(time.Millisecond)\n\t}\n\n\tlogTime, err := time.Parse(time.RFC3339, logJson[\"time\"].(string))\n\tif err != nil {\n\t\treturn time.Now().UnixNano() \/ int64(time.Millisecond)\n\t}\n\n\treturn logTime.UnixNano() \/ int64(time.Millisecond)\n}\n\nfunc changeMessageVisibility(ctx context.Context, sqsq *sqs.SQS, queueUrl *string, receiptHandle *string, timeout int64) error {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\t_, err := sqsq.ChangeMessageVisibility(&sqs.ChangeMessageVisibilityInput{\n\t\tReceiptHandle: receiptHandle,\n\t\tQueueUrl: queueUrl,\n\t\tVisibilityTimeout: aws.Int64(timeout),\n\t})\n\n\tif err != nil {\n\t\tlogger.Error(\"Unable to reset timeout.\", map[string]interface{}{\n\t\t\t\"messageHandle\": *receiptHandle,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc acknowledgeMessage(ctx context.Context, sqsq *sqs.SQS, queueUrl *string, receiptHandle *string) error {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\t_, err := sqsq.DeleteMessage(&sqs.DeleteMessageInput{\n\t\tQueueUrl: queueUrl,\n\t\tReceiptHandle: receiptHandle,\n\t})\n\tif err != nil {\n\t\tlogger.Error(\"Unable to acknowledge message.\", map[string]interface{}{\n\t\t\t\"messageHandle\": *receiptHandle,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc retrieveQueueUrl(ctx context.Context, sqsq *sqs.SQS) (queueUrl *string, err error) {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\turlResult, err := sqsq.GetQueueUrl(&sqs.GetQueueUrlInput{\n\t\tQueueName: aws.String(config.SQSQueueName),\n\t})\n\tif err != nil {\n\t\tlogger.Error(\"Unable to get queue URL from name.\", map[string]interface{}{\n\t\t\t\"queueName\": config.SQSQueueName,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn nil, err\n\t}\n\treturn urlResult.QueueUrl, nil\n}\n\nfunc paramsFromContextOrArgs(ctx context.Context) []string {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\targs, ok := ctx.Value(\"taskParameters\").([]string)\n\tif !ok {\n\t\tlogger.Info(\"Retrieving task parameters from args as context does not contain any parameters.\", nil)\n\t\targs = flag.Args()\n\t}\n\treturn args\n}\n<commit_msg>fix(worker): retry later on database connection error<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/cloudwatchlogs\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/sqs\"\n\t\"github.com\/google\/uuid\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/trackit\/jsonlog\"\n\n\t\"github.com\/trackit\/trackit\/awsSession\"\n\t\"github.com\/trackit\/trackit\/config\"\n\t\"github.com\/trackit\/trackit\/db\"\n\t\"github.com\/trackit\/trackit\/es\"\n)\n\ntype (\n\tMessageData struct {\n\t\tTaskName string `json:\"task_name\"`\n\t\tParameters []string `json:\"parameters\"`\n\t\tLogStream string `json:\"cloudwatch_log_stream\"`\n\t}\n\n\tLogInput struct {\n\t\tMessage string\n\t\tTimestamp int64\n\t}\n)\n\nconst visibilityTimeoutInHours = 10\nconst visibilityTimeoutTaskFailedInMinutes = 20\nconst retryTaskOnFailure = true\nconst waitTimeInSeconds = 20\nconst esHealthcheckTimeoutInMinutes = 10\nconst esHealthcheckWaitTimeRetryInMinutes = 10\n\nfunc taskWorker(ctx context.Context) error {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\tlogger.Info(\"Running task 'worker'.\", nil)\n\tsqsq := sqs.New(awsSession.Session)\n\tcwl := cloudwatchlogs.New(awsSession.Session)\n\n\tlogger.Info(\"Retrieving SQS Queue URL.\", nil)\n\tqueueUrl, err := retrieveQueueUrl(ctx, sqsq)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Info(\"Waiting for messages.\", nil)\n\n\tvar logsBuffer bytes.Buffer\n\twriter := io.MultiWriter(os.Stdout, &logsBuffer)\n\tctx = jsonlog.ContextWithLogger(ctx, logger.WithWriter(writer))\n\tlogger = jsonlog.LoggerFromContextOrDefault(ctx)\n\n\ttimeoutStr := strconv.Itoa(esHealthcheckTimeoutInMinutes) + \"m\"\n\n\tfor {\n\t\tmessage, receiptHandle, err := getNextMessage(ctx, sqsq, queueUrl)\n\t\tif err != nil {\n\t\t\tlogsBuffer.Reset()\n\t\t\tcontinue\n\t\t}\n\n\t\tlogger.Info(\"Received message, checking ES health and executing task.\", map[string]interface{}{\n\t\t\t\"message\": message,\n\t\t})\n\n\t\tif res, err := es.Client.ClusterHealth().Timeout(timeoutStr).Do(ctx); err != nil || res.TimedOut {\n\t\t\tlogger.Error(\"ES is not reachable.\", map[string]interface{}{\n\t\t\t\t\"timeout\": timeoutStr,\n\t\t\t\t\"error\": err.Error(),\n\t\t\t})\n\t\t\tlogsBuffer.Reset()\n\t\t\t_ = changeMessageVisibility(ctx, sqsq, queueUrl, receiptHandle, 0)\n\t\t\ttime.Sleep(time.Minute * esHealthcheckWaitTimeRetryInMinutes)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := db.OpenWorker(); err != nil {\n\t\t\tlogger.Error(\"Database is not reachable.\", map[string]interface{}{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t})\n\t\t\t_ = changeMessageVisibility(ctx, sqsq, queueUrl, receiptHandle, 60*visibilityTimeoutTaskFailedInMinutes)\n\t\t\tcontinue\n\t\t}\n\n\t\tctx = context.WithValue(ctx, \"taskParameters\", message.Parameters)\n\n\t\tif task, ok := tasks[message.TaskName]; ok {\n\t\t\terr = executeTask(ctx, task)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"Error while executing task. Setting visibility timeout to task failed timeout.\", map[string]interface{}{\n\t\t\t\t\t\"message\": message,\n\t\t\t\t\t\"taskFailedTimeoutInMinutes\": 60 * visibilityTimeoutTaskFailedInMinutes,\n\t\t\t\t\t\"error\": err.Error(),\n\t\t\t\t})\n\t\t\t\tif retryTaskOnFailure {\n\t\t\t\t\t_ = changeMessageVisibility(ctx, sqsq, queueUrl, receiptHandle, 60*visibilityTimeoutTaskFailedInMinutes)\n\t\t\t\t} else {\n\t\t\t\t\t_ = acknowledgeMessage(ctx, sqsq, queueUrl, receiptHandle)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlogger.Info(\"Task done, acknowledging.\", nil)\n\t\t\t\t_ = acknowledgeMessage(ctx, sqsq, queueUrl, receiptHandle)\n\t\t\t}\n\t\t\t_ = flushCloudwatchLogEvents(ctx, cwl, message, &logsBuffer, err == nil)\n\t\t} else {\n\t\t\tlogger.Error(\"Unable to find requested task.\", map[string]interface{}{\n\t\t\t\t\"task_name\": message.TaskName,\n\t\t\t})\n\t\t\tlogsBuffer.Reset()\n\t\t\t_ = acknowledgeMessage(ctx, sqsq, queueUrl, receiptHandle)\n\t\t}\n\t\tdb.Close()\n\t}\n}\n\nfunc executeTask(ctx context.Context, task func(context.Context) error) (err error) {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\n\tdefer func() {\n\t\trec := recover()\n\t\tif rec != nil {\n\t\t\tlogger.Error(\"Task crashed, handling error.\", map[string]interface{}{\n\t\t\t\t\"error\": rec,\n\t\t\t})\n\t\t\terr = errors.New(\"task: crashed\")\n\t\t}\n\t}()\n\treturn task(ctx)\n}\nfunc getNextMessage(ctx context.Context, sqsq *sqs.SQS, queueUrl *string) (MessageData, *string, error) {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\tmessageData := MessageData{}\n\tmsgResult, err := sqsq.ReceiveMessage(&sqs.ReceiveMessageInput{\n\t\tAttributeNames: []*string{\n\t\t\taws.String(sqs.QueueAttributeNameAll),\n\t\t},\n\t\tMessageAttributeNames: []*string{\n\t\t\taws.String(sqs.QueueAttributeNameAll),\n\t\t},\n\t\tQueueUrl: queueUrl,\n\t\tMaxNumberOfMessages: aws.Int64(1),\n\t\tVisibilityTimeout: aws.Int64(60 * 60 * visibilityTimeoutInHours),\n\t\tWaitTimeSeconds: aws.Int64(waitTimeInSeconds),\n\t})\n\n\tif err != nil {\n\t\tlogger.Error(\"An error occurred while waiting for message.\", map[string]interface{}{\n\t\t\t\"queueUrl\": *queueUrl,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn messageData, nil, err\n\t}\n\tif len(msgResult.Messages) == 0 {\n\t\treturn messageData, nil, errors.New(\"queue: no message in queue.\")\n\t}\n\terr = json.Unmarshal([]byte(*msgResult.Messages[0].Body), &messageData)\n\tif err != nil {\n\t\tlogger.Error(\"Unable to decode message.\", map[string]interface{}{\n\t\t\t\"messageBody\": *msgResult.Messages[0].Body,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn messageData, nil, err\n\t}\n\treturn messageData, msgResult.Messages[0].ReceiptHandle, nil\n}\n\nfunc flushCloudwatchLogEvents(ctx context.Context, cwl *cloudwatchlogs.CloudWatchLogs, message MessageData, logsBuffer *bytes.Buffer, success bool) error {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\n\tdefer func() {\n\t\tlogsBuffer.Reset()\n\t}()\n\n\tif config.Environment != \"prod\" && config.Environment != \"stg\" {\n\t\treturn nil\n\t}\n\n\tlogGroup := config.Environment + \"\/task-logs\/\" + message.TaskName\n\n\tlogStreamPrefix := message.LogStream\n\tif len(logStreamPrefix) == 0 {\n\t\tlogStreamPrefix = \"generic\"\n\t}\n\tvar logStreamSuffix string\n\tif success {\n\t\tlogStreamSuffix = \"succeeded\"\n\t} else {\n\t\tlogStreamSuffix = \"failed\"\n\t}\n\tlogStream := logStreamPrefix + \"\/\" + uuid.New().String() + \"\/\" + logStreamSuffix\n\n\t_, err := cwl.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{\n\t\tLogGroupName: aws.String(logGroup),\n\t\tLogStreamName: aws.String(logStream),\n\t})\n\tif err != nil {\n\t\tlogger.Error(\"Unable to create log stream. Skipping logs sending.\", map[string]interface{}{\n\t\t\t\"logGroupName\": logGroup,\n\t\t\t\"logStreamName\": logStream,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn err\n\t}\n\n\tlogInputs := decodeLogBuffer(*logsBuffer)\n\n\tvar logEvents []*cloudwatchlogs.InputLogEvent\n\tfor _, logInput := range logInputs {\n\t\tlogEvents = append(logEvents, &cloudwatchlogs.InputLogEvent{\n\t\t\tMessage: aws.String(logInput.Message),\n\t\t\tTimestamp: aws.Int64(logInput.Timestamp),\n\t\t})\n\t}\n\n\t_, err = cwl.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{\n\t\tLogGroupName: aws.String(logGroup),\n\t\tLogStreamName: aws.String(logStream),\n\t\tLogEvents: logEvents,\n\t})\n\n\tif err != nil {\n\t\tlogger.Error(\"Unable to put log stream events.\", map[string]interface{}{\n\t\t\t\"logGroupName\": logGroup,\n\t\t\t\"logStreamName\": logStream,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc decodeLogBuffer(logsBuffer bytes.Buffer) []LogInput {\n\tlines := strings.Split(logsBuffer.String(), \"\\n\")\n\n\tvar logInputs []LogInput\n\n\tfor _, line := range lines {\n\t\tif len(line) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tlogInputs = append(logInputs, LogInput{\n\t\t\tMessage: line,\n\t\t\tTimestamp: getLogTimestamp(line),\n\t\t})\n\t}\n\n\treturn logInputs\n}\n\nfunc getLogTimestamp(message string) int64 {\n\tvar logJson map[string]interface{}\n\n\terr := json.Unmarshal([]byte(message), &logJson)\n\tif err != nil {\n\t\treturn time.Now().UnixNano() \/ int64(time.Millisecond)\n\t}\n\n\tlogTime, err := time.Parse(time.RFC3339, logJson[\"time\"].(string))\n\tif err != nil {\n\t\treturn time.Now().UnixNano() \/ int64(time.Millisecond)\n\t}\n\n\treturn logTime.UnixNano() \/ int64(time.Millisecond)\n}\n\nfunc changeMessageVisibility(ctx context.Context, sqsq *sqs.SQS, queueUrl *string, receiptHandle *string, timeout int64) error {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\t_, err := sqsq.ChangeMessageVisibility(&sqs.ChangeMessageVisibilityInput{\n\t\tReceiptHandle: receiptHandle,\n\t\tQueueUrl: queueUrl,\n\t\tVisibilityTimeout: aws.Int64(timeout),\n\t})\n\n\tif err != nil {\n\t\tlogger.Error(\"Unable to reset timeout.\", map[string]interface{}{\n\t\t\t\"messageHandle\": *receiptHandle,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc acknowledgeMessage(ctx context.Context, sqsq *sqs.SQS, queueUrl *string, receiptHandle *string) error {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\t_, err := sqsq.DeleteMessage(&sqs.DeleteMessageInput{\n\t\tQueueUrl: queueUrl,\n\t\tReceiptHandle: receiptHandle,\n\t})\n\tif err != nil {\n\t\tlogger.Error(\"Unable to acknowledge message.\", map[string]interface{}{\n\t\t\t\"messageHandle\": *receiptHandle,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc retrieveQueueUrl(ctx context.Context, sqsq *sqs.SQS) (queueUrl *string, err error) {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\turlResult, err := sqsq.GetQueueUrl(&sqs.GetQueueUrlInput{\n\t\tQueueName: aws.String(config.SQSQueueName),\n\t})\n\tif err != nil {\n\t\tlogger.Error(\"Unable to get queue URL from name.\", map[string]interface{}{\n\t\t\t\"queueName\": config.SQSQueueName,\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn nil, err\n\t}\n\treturn urlResult.QueueUrl, nil\n}\n\nfunc paramsFromContextOrArgs(ctx context.Context) []string {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\targs, ok := ctx.Value(\"taskParameters\").([]string)\n\tif !ok {\n\t\tlogger.Info(\"Retrieving task parameters from args as context does not contain any parameters.\", nil)\n\t\targs = flag.Args()\n\t}\n\treturn args\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"strings\"\n\t\"time\"\n\n\tderpiSearch \"github.com\/PonyvilleFM\/aura\/cmd\/aerial\/derpi\"\n\t\"github.com\/PonyvilleFM\/aura\/pvfm\"\n\t\"github.com\/PonyvilleFM\/aura\/pvfm\/pvl\"\n\tpvfmschedule \"github.com\/PonyvilleFM\/aura\/pvfm\/schedule\"\n\t\"github.com\/PonyvilleFM\/aura\/pvfm\/station\"\n\t\"github.com\/bwmarrin\/discordgo\"\n\t\"github.com\/tebeka\/strftime\"\n)\n\nfunc init() {\n\trand.Seed(time.Now().Unix())\n}\n\n\/\/ randomRange gives a random whole integer between the given integers [min, max)\nfunc randomRange(min, max int) int {\n\treturn rand.Intn(max-min) + min\n}\n\nfunc pesterLink(s *discordgo.Session, m *discordgo.MessageCreate) {\n\tif musicLinkRegex.Match([]byte(m.Content)) {\n\t\ti, err := pvfm.GetStats()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tif i.IsDJLive() && m.ChannelID == youtubeSpamRoomID {\n\t\t\ts.ChannelMessageSend(m.ChannelID, \"Please be mindful sharing links to music when a DJ is performing. Thanks!\")\n\t\t}\n\t}\n}\n\nfunc np(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\ti, err := pvfm.GetStats()\n\tif err != nil {\n\t\tlog.Printf(\"Can't get info: %v, failing over to plan b\", err)\n\t\treturn doStationRequest(s, m, parv)\n\t}\n\n\tresult := []string{}\n\n\tif i.Main.Nowplaying == \"Fetching info...\" {\n\t\tlog.Println(\"Main information was bad, fetching from station directly...\")\n\n\t\terr := doStationRequest(s, m, parv)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t} else {\n\t\tresult = append(result, \"📻 **Now Playing on PVFM**\\n\")\n\n\t\tresult = append(result, fmt.Sprintf(\n\t\t\t\"Main 🎵 %s\\n\",\n\t\t\ti.Main.Nowplaying,\n\t\t))\n\t\tresult = append(result, fmt.Sprintf(\n\t\t\t\"Chill 🎵 %s\\n\",\n\t\t\ti.Secondary.Nowplaying,\n\t\t))\n\t\tresult = append(result, fmt.Sprintf(\n\t\t\t\"Free! 🎵 %s\",\n\t\t\ti.MusicOnly.Nowplaying,\n\t\t))\n\t}\n\n\ts.ChannelMessageSend(m.ChannelID, strings.Join(result, \"\\n\"))\n\treturn nil\n}\n\nfunc dj(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\tcal, err := pvl.Get()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnow := cal.Result[0]\n\tresult := []string{}\n\n\tlocalTime := time.Now()\n\tthentime := time.Unix(now.StartTime, 0)\n\tif thentime.Unix() < localTime.Unix() {\n\t\tresult = append(result, fmt.Sprintf(\"Currently live: %s\\n\", now.Title))\n\t\tnow = cal.Result[1]\n\t}\n\n\tnowTime := time.Unix(now.StartTime, 0).UTC()\n\tzone, _ := nowTime.Zone()\n\tfmttime, _ := strftime.Format(\"%Y-%m-%d %H:%M:%S\", nowTime)\n\n\tresult = append(result, fmt.Sprintf(\"Next event: %s at %s \\x02%s\\x02\",\n\t\tnow.Title,\n\t\tfmttime,\n\t\tzone,\n\t))\n\n\ts.ChannelMessageSend(m.ChannelID, strings.Join(result, \"\\n\"))\n\treturn nil\n}\n\nfunc stats(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\ti, err := pvfm.GetStats()\n\tif err != nil {\n\t\tlog.Printf(\"Error getting the station info: %v, falling back to plan b\", err)\n\t\treturn doStatsFromStation(s, m, parv)\n\t}\n\n\tst, err := station.GetStats()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar l int\n\tvar peak int\n\n\tfor _, source := range st.Icestats.Source {\n\t\tl = l + source.Listeners\n\t\tpeak = peak + source.ListenerPeak\n\t}\n\n\tresult := []string{\n\t\tfmt.Sprintf(\n\t\t\t\"Current listeners across all streams: %d with a maximum of %d!\",\n\t\t\ti.Listeners.Listeners, peak,\n\t\t),\n\t\tfmt.Sprintf(\n\t\t\t\"Detailed: Main: %d listeners, Two: %d listeners, Free: %d listeners\",\n\t\t\ti.Main.Listeners, i.Secondary.Listeners, i.MusicOnly.Listeners,\n\t\t),\n\t}\n\n\ts.ChannelMessageSend(m.ChannelID, strings.Join(result, \"\\n\"))\n\n\treturn nil\n}\n\nfunc schedule(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\tresult := []string{}\n\tschEntries, err := pvfmschedule.Get()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, entry := range schEntries {\n\t\tresult = append(result, entry.String())\n\t}\n\n\ts.ChannelMessageSend(m.ChannelID, strings.Join(result, \"\\n\"))\n\treturn nil\n}\n\nfunc doStationRequest(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\tstats, err := station.GetStats()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresult := fmt.Sprintf(\n\t\t\"Now playing: %s - %s on Ponyville FM!\",\n\t\tstats.Icestats.Source[0].Title,\n\t\tstats.Icestats.Source[0].Artist,\n\t)\n\n\ts.ChannelMessageSend(m.ChannelID, result)\n\treturn nil\n}\n\nfunc doStatsFromStation(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\tst, err := station.GetStats()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar l int\n\tvar peak int\n\n\tfor _, source := range st.Icestats.Source {\n\t\tl = l + source.Listeners\n\t\tpeak = peak + source.ListenerPeak\n\t}\n\n\tresult := []string{\n\t\tfmt.Sprintf(\"Current listeners: %d with a maximum of %d!\", l, peak),\n\t}\n\n\ts.ChannelMessageSend(m.ChannelID, strings.Join(result, \"\\n\"))\n\treturn nil\n}\n\nfunc curTime(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\ts.ChannelMessageSend(m.ChannelID, fmt.Sprintf(\"The time currently is %s\", time.Now().UTC().Format(\"2006-01-02 15:04:05 UTC\")))\n\n\treturn nil\n}\n\nfunc streams(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\tcurrentMeta, metaErr := station.GetStats()\n\tif metaErr != nil {\n\t\ts.ChannelMessageSend(m.ChannelID, \"Error receiving pvfm metadata\")\n\t\treturn metaErr\n\t}\n\n\toutputString := \"**PVFM Servers:**\\n\"\n\n\tfor _, element := range currentMeta.Icestats.Source {\n\t\toutputString += \":musical_note: \" + element.ServerDescription + \":\\n<\" + strings.Replace(element.Listenurl, \"aerial\", \"dj.bronyradio.com\", -1) + \">\\n\"\n\t}\n\n\toutputString += \"**Luna Radio Server:**\\n\"\n\toutputString += \"\\n:musical_note: Luna Radio MP3 128Kbps Stream:\\n<http:\/\/radio.ponyvillelive.com:8002\/stream.mp3>\\n:musical_note: Luna Radio Mobile MP3 64Kbps Stream:\\n<http:\/\/radio.ponyvillelive.com:8002\/mobile?;stream.mp3>\\n\"\n\n\toutputString += \"\\n:cd: DJ Recordings:\\n<https:\/\/pvfmsets.cf\/var\/93252527679639552\/>9\" + \"\\n:cd: Legacy DJ Recordings:\\n<http:\/\/darkling.darkwizards.com\/wang\/BronyRadio\/?M=D>\"\n\n\ts.ChannelMessageSend(m.ChannelID, outputString)\n\n\treturn nil\n}\n\nfunc derpi(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\tsearchResults, err := derpiSearch.SearchDerpi(m.Content[7:len(m.Content)])\n\tif err != nil {\n\t\ts.ChannelMessageSend(m.ChannelID, \"An error occured.\")\n\t\treturn err\n\t}\n\tif len(searchResults.Search) < 1 {\n\t\ts.ChannelMessageSend(m.ChannelID, \"Error: No results\")\n\t\treturn nil\n\t}\n\ts.ChannelMessageSend(m.ChannelID, \"http:\"+searchResults.Search[randomRange(0, len(searchResults.Search))].Image)\n\treturn nil\n}\n<commit_msg>...plural noun<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"strings\"\n\t\"time\"\n\n\tderpiSearch \"github.com\/PonyvilleFM\/aura\/cmd\/aerial\/derpi\"\n\t\"github.com\/PonyvilleFM\/aura\/pvfm\"\n\t\"github.com\/PonyvilleFM\/aura\/pvfm\/pvl\"\n\tpvfmschedule \"github.com\/PonyvilleFM\/aura\/pvfm\/schedule\"\n\t\"github.com\/PonyvilleFM\/aura\/pvfm\/station\"\n\t\"github.com\/bwmarrin\/discordgo\"\n\t\"github.com\/tebeka\/strftime\"\n)\n\nfunc init() {\n\trand.Seed(time.Now().Unix())\n}\n\n\/\/ randomRange gives a random whole integer between the given integers [min, max)\nfunc randomRange(min, max int) int {\n\treturn rand.Intn(max-min) + min\n}\n\nfunc pesterLink(s *discordgo.Session, m *discordgo.MessageCreate) {\n\tif musicLinkRegex.Match([]byte(m.Content)) {\n\t\ti, err := pvfm.GetStats()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tif i.IsDJLive() && m.ChannelID == youtubeSpamRoomID {\n\t\t\ts.ChannelMessageSend(m.ChannelID, \"Please be mindful sharing links to music when a DJ is performing. Thanks!\")\n\t\t}\n\t}\n}\n\nfunc np(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\ti, err := pvfm.GetStats()\n\tif err != nil {\n\t\tlog.Printf(\"Can't get info: %v, failing over to plan b\", err)\n\t\treturn doStationRequest(s, m, parv)\n\t}\n\n\tresult := []string{}\n\n\tif i.Main.Nowplaying == \"Fetching info...\" {\n\t\tlog.Println(\"Main information was bad, fetching from station directly...\")\n\n\t\terr := doStationRequest(s, m, parv)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t} else {\n\t\tresult = append(result, \"📻 **Now Playing on PVFM**\\n\")\n\n\t\tresult = append(result, fmt.Sprintf(\n\t\t\t\"Main 🎵 %s\\n\",\n\t\t\ti.Main.Nowplaying,\n\t\t))\n\t\tresult = append(result, fmt.Sprintf(\n\t\t\t\"Chill 🎵 %s\\n\",\n\t\t\ti.Secondary.Nowplaying,\n\t\t))\n\t\tresult = append(result, fmt.Sprintf(\n\t\t\t\"Free! 🎵 %s\",\n\t\t\ti.MusicOnly.Nowplaying,\n\t\t))\n\t}\n\n\ts.ChannelMessageSend(m.ChannelID, strings.Join(result, \"\\n\"))\n\treturn nil\n}\n\nfunc dj(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\tcal, err := pvl.Get()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnow := cal.Result[0]\n\tresult := []string{}\n\n\tlocalTime := time.Now()\n\tthentime := time.Unix(now.StartTime, 0)\n\tif thentime.Unix() < localTime.Unix() {\n\t\tresult = append(result, fmt.Sprintf(\"Currently live: %s\\n\", now.Title))\n\t\tnow = cal.Result[1]\n\t}\n\n\tnowTime := time.Unix(now.StartTime, 0).UTC()\n\tzone, _ := nowTime.Zone()\n\tfmttime, _ := strftime.Format(\"%Y-%m-%d %H:%M:%S\", nowTime)\n\n\tresult = append(result, fmt.Sprintf(\"Next event: %s at %s \\x02%s\\x02\",\n\t\tnow.Title,\n\t\tfmttime,\n\t\tzone,\n\t))\n\n\ts.ChannelMessageSend(m.ChannelID, strings.Join(result, \"\\n\"))\n\treturn nil\n}\n\nfunc stats(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\ti, err := pvfm.GetStats()\n\tif err != nil {\n\t\tlog.Printf(\"Error getting the station info: %v, falling back to plan b\", err)\n\t\treturn doStatsFromStation(s, m, parv)\n\t}\n\n\tst, err := station.GetStats()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar l int\n\tvar peak int\n\n\tfor _, source := range st.Icestats.Source {\n\t\tl = l + source.Listeners\n\t\tpeak = peak + source.ListenerPeak\n\t}\n\n\tresult := []string{\n\t\tfmt.Sprintf(\n\t\t\t\"Current listeners across all streams: %d with a maximum of %d!\",\n\t\t\ti.Listeners.Listeners, peak,\n\t\t),\n\t\tfmt.Sprintf(\n\t\t\t\"Detailed: Main: %d listeners, Two: %d listeners, Free: %d listeners\",\n\t\t\ti.Main.Listeners, i.Secondary.Listeners, i.MusicOnly.Listeners,\n\t\t),\n\t}\n\n\ts.ChannelMessageSend(m.ChannelID, strings.Join(result, \"\\n\"))\n\n\treturn nil\n}\n\nfunc schedule(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\tresult := []string{}\n\tschEntries, err := pvfmschedule.Get()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, entry := range schEntries {\n\t\tresult = append(result, entry.String())\n\t}\n\n\ts.ChannelMessageSend(m.ChannelID, strings.Join(result, \"\\n\"))\n\treturn nil\n}\n\nfunc doStationRequest(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\tstats, err := station.GetStats()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresult := fmt.Sprintf(\n\t\t\"Now playing: %s - %s on Ponyville FM!\",\n\t\tstats.Icestats.Source[0].Title,\n\t\tstats.Icestats.Source[0].Artist,\n\t)\n\n\ts.ChannelMessageSend(m.ChannelID, result)\n\treturn nil\n}\n\nfunc doStatsFromStation(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\tst, err := station.GetStats()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar l int\n\tvar peak int\n\n\tfor _, source := range st.Icestats.Source {\n\t\tl = l + source.Listeners\n\t\tpeak = peak + source.ListenerPeak\n\t}\n\n\tresult := []string{\n\t\tfmt.Sprintf(\"Current listeners: %d with a maximum of %d!\", l, peak),\n\t}\n\n\ts.ChannelMessageSend(m.ChannelID, strings.Join(result, \"\\n\"))\n\treturn nil\n}\n\nfunc curTime(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\ts.ChannelMessageSend(m.ChannelID, fmt.Sprintf(\"The time currently is %s\", time.Now().UTC().Format(\"2006-01-02 15:04:05 UTC\")))\n\n\treturn nil\n}\n\nfunc streams(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\tcurrentMeta, metaErr := station.GetStats()\n\tif metaErr != nil {\n\t\ts.ChannelMessageSend(m.ChannelID, \"Error receiving pvfm metadata\")\n\t\treturn metaErr\n\t}\n\n\toutputString := \"**PVFM Servers:**\\n\"\n\n\tfor _, element := range currentMeta.Icestats.Source {\n\t\toutputString += \":musical_note: \" + element.ServerDescription + \":\\n<\" + strings.Replace(element.Listenurl, \"aerial\", \"dj.bronyradio.com\", -1) + \">\\n\"\n\t}\n\n\toutputString += \"**Luna Radio Servers:**\\n\"\n\toutputString += \"\\n:musical_note: Luna Radio MP3 128Kbps Stream:\\n<http:\/\/radio.ponyvillelive.com:8002\/stream.mp3>\\n:musical_note: Luna Radio Mobile MP3 64Kbps Stream:\\n<http:\/\/radio.ponyvillelive.com:8002\/mobile?;stream.mp3>\\n\"\n\n\toutputString += \"\\n:cd: DJ Recordings:\\n<https:\/\/pvfmsets.cf\/var\/93252527679639552\/>9\" + \"\\n:cd: Legacy DJ Recordings:\\n<http:\/\/darkling.darkwizards.com\/wang\/BronyRadio\/?M=D>\"\n\n\ts.ChannelMessageSend(m.ChannelID, outputString)\n\n\treturn nil\n}\n\nfunc derpi(s *discordgo.Session, m *discordgo.Message, parv []string) error {\n\tsearchResults, err := derpiSearch.SearchDerpi(m.Content[7:len(m.Content)])\n\tif err != nil {\n\t\ts.ChannelMessageSend(m.ChannelID, \"An error occured.\")\n\t\treturn err\n\t}\n\tif len(searchResults.Search) < 1 {\n\t\ts.ChannelMessageSend(m.ChannelID, \"Error: No results\")\n\t\treturn nil\n\t}\n\ts.ChannelMessageSend(m.ChannelID, \"http:\"+searchResults.Search[randomRange(0, len(searchResults.Search))].Image)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package webp\n\nimport \"image\"\n\n\/\/ YUVAImage represents a image of YUV colors with alpha channel image.\n\/\/\n\/\/ YUVAImage contains decoded YCbCr image data with alpha channel,\n\/\/ but it is not compatible with image.YCbCr. Because, the RGB-YCbCr conversion\n\/\/ that used in WebP is following to ITU-R BT.601 standard.\n\/\/ In contrast, the conversion of Image.YCbCr (and color.YCbCrModel) is following\n\/\/ to the JPEG standard (JFIF). If you need the image as image.YCBCr, you will\n\/\/ first convert from WebP to RGB image, then convert from RGB image to JPEG's\n\/\/ YCbCr image.\n\/\/\n\/\/ See: http:\/\/en.wikipedia.org\/wiki\/YCbCr\n\/\/\ntype YUVAImage struct {\n\tY, Cb, Cr, A []uint8\n\tYStride int\n\tCStride int\n\tAStride int\n\tColorSpace ColorSpace\n\tRect image.Rectangle\n}\n\n\/\/ NewYUVAImage creates and allocates image buffer.\nfunc NewYUVAImage(r image.Rectangle, c ColorSpace) (image *YUVAImage) {\n\tyw, yh := r.Dx(), r.Dx()\n\tcw, ch := ((r.Max.X+1)\/2 - r.Min.X\/2), ((r.Max.Y+1)\/2 - r.Min.Y\/2)\n\n\tswitch c {\n\tcase YUV420:\n\t\tb := make([]byte, yw*yh+2*cw*ch)\n\t\timage = &YUVAImage{\n\t\t\tY: b[:yw*yh],\n\t\t\tCb: b[yw*yh+0*cw*ch : yw*yh+1*cw*ch],\n\t\t\tCr: b[yw*yh+1*cw*ch : yw*yh+2*cw*ch],\n\t\t\tA: nil,\n\t\t\tYStride: yw,\n\t\t\tCStride: cw,\n\t\t\tAStride: 0,\n\t\t\tColorSpace: c,\n\t\t\tRect: r,\n\t\t}\n\n\tcase YUV420A:\n\t\tb := make([]byte, 2*yw*yh+2*cw*ch)\n\t\timage = &YUVAImage{\n\t\t\tY: b[:yw*yh],\n\t\t\tCb: b[yw*yh+0*cw*ch : yw*yh+1*cw*ch],\n\t\t\tCr: b[yw*yh+1*cw*ch : yw*yh+2*cw*ch],\n\t\t\tA: b[yw*yh+2*cw*ch:],\n\t\t\tYStride: yw,\n\t\t\tCStride: cw,\n\t\t\tAStride: yw,\n\t\t\tColorSpace: c,\n\t\t\tRect: r,\n\t\t}\n\t}\n\n\treturn\n}\n<commit_msg>Fix confusing heights with widths on YUVA buffer allocation<commit_after>package webp\n\nimport \"image\"\n\n\/\/ YUVAImage represents a image of YUV colors with alpha channel image.\n\/\/\n\/\/ YUVAImage contains decoded YCbCr image data with alpha channel,\n\/\/ but it is not compatible with image.YCbCr. Because, the RGB-YCbCr conversion\n\/\/ that used in WebP is following to ITU-R BT.601 standard.\n\/\/ In contrast, the conversion of Image.YCbCr (and color.YCbCrModel) is following\n\/\/ to the JPEG standard (JFIF). If you need the image as image.YCBCr, you will\n\/\/ first convert from WebP to RGB image, then convert from RGB image to JPEG's\n\/\/ YCbCr image.\n\/\/\n\/\/ See: http:\/\/en.wikipedia.org\/wiki\/YCbCr\n\/\/\ntype YUVAImage struct {\n\tY, Cb, Cr, A []uint8\n\tYStride int\n\tCStride int\n\tAStride int\n\tColorSpace ColorSpace\n\tRect image.Rectangle\n}\n\n\/\/ NewYUVAImage creates and allocates image buffer.\nfunc NewYUVAImage(r image.Rectangle, c ColorSpace) (image *YUVAImage) {\n\tyw, yh := r.Dx(), r.Dy()\n\tcw, ch := ((r.Max.X+1)\/2 - r.Min.X\/2), ((r.Max.Y+1)\/2 - r.Min.Y\/2)\n\n\tswitch c {\n\tcase YUV420:\n\t\tb := make([]byte, yw*yh+2*cw*ch)\n\t\timage = &YUVAImage{\n\t\t\tY: b[:yw*yh],\n\t\t\tCb: b[yw*yh+0*cw*ch : yw*yh+1*cw*ch],\n\t\t\tCr: b[yw*yh+1*cw*ch : yw*yh+2*cw*ch],\n\t\t\tA: nil,\n\t\t\tYStride: yw,\n\t\t\tCStride: cw,\n\t\t\tAStride: 0,\n\t\t\tColorSpace: c,\n\t\t\tRect: r,\n\t\t}\n\n\tcase YUV420A:\n\t\tb := make([]byte, 2*yw*yh+2*cw*ch)\n\t\timage = &YUVAImage{\n\t\t\tY: b[:yw*yh],\n\t\t\tCb: b[yw*yh+0*cw*ch : yw*yh+1*cw*ch],\n\t\t\tCr: b[yw*yh+1*cw*ch : yw*yh+2*cw*ch],\n\t\t\tA: b[yw*yh+2*cw*ch:],\n\t\t\tYStride: yw,\n\t\t\tCStride: cw,\n\t\t\tAStride: yw,\n\t\t\tColorSpace: c,\n\t\t\tRect: r,\n\t\t}\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t\"github.com\/kusabashira\/alita\"\n)\n\nfunc usage() {\n\tos.Stderr.WriteString(`\nUsage: alita [OPTION]... [FILE]...\nAlign FILE(s), or standard input.\n\nOptions:\n -m, --margin=FORMAT join line by FORMAT (default: 1:1)\n -j, --justfy=SEQUENCE justfy cells by SEQUENCE (default: l)\n -r, --regexp DELIM is a regular expression\n -d, --delimiter=DELIM use DELIM to separate line (default: \/\\s+\/)\n -h, --help show this help message\n -v, --version print the version\n`[1:])\n}\n\nfunc version() {\n\tos.Stderr.WriteString(`\nv0.2.0\n`[1:])\n}\n\nfunc do(a *alita.Aligner, r io.Reader) error {\n\tif err := a.ReadAll(r); err != nil {\n\t\treturn err\n\t}\n\treturn a.Flush()\n}\n\nfunc _main() error {\n\tvar isHelp, isVersion bool\n\tflag.BoolVar(&isHelp, \"h\", false, \"\")\n\tflag.BoolVar(&isHelp, \"help\", false, \"\")\n\tflag.BoolVar(&isVersion, \"v\", false, \"\")\n\tflag.BoolVar(&isVersion, \"version\", false, \"\")\n\n\ta := alita.NewAligner(os.Stdout)\n\tflag.Var(a.Margin, \"m\", \"\")\n\tflag.Var(a.Margin, \"margin\", \"\")\n\tflag.Var(a.Padding, \"j\", \"\")\n\tflag.Var(a.Padding, \"justfy\", \"\")\n\tflag.Var(a.Delimiter, \"d\", \"\")\n\tflag.Var(a.Delimiter, \"delimiter\", \"\")\n\tflag.BoolVar(&a.Delimiter.UseRegexp, \"r\", false, \"\")\n\tflag.BoolVar(&a.Delimiter.UseRegexp, \"regexp\", false, \"\")\n\n\tflag.Usage = usage\n\tflag.Parse()\n\tswitch {\n\tcase isHelp:\n\t\tusage()\n\t\treturn nil\n\tcase isVersion:\n\t\tversion()\n\t\treturn nil\n\t}\n\n\tif flag.NArg() < 1 {\n\t\treturn do(a, os.Stdin)\n\t}\n\n\tvar input []io.Reader\n\tfor _, fname := range flag.Args() {\n\t\tf, err := os.Open(fname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\t\tinput = append(input, f)\n\t}\n\treturn do(a, io.MultiReader(input...))\n}\n\nfunc main() {\n\tif err := _main(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"alita:\", err)\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>Version up for justfy option<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t\"github.com\/kusabashira\/alita\"\n)\n\nfunc usage() {\n\tos.Stderr.WriteString(`\nUsage: alita [OPTION]... [FILE]...\nAlign FILE(s), or standard input.\n\nOptions:\n -m, --margin=FORMAT join line by FORMAT (default: 1:1)\n -j, --justfy=SEQUENCE justfy cells by SEQUENCE (default: l)\n -r, --regexp DELIM is a regular expression\n -d, --delimiter=DELIM use DELIM to separate line (default: \/\\s+\/)\n -h, --help show this help message\n -v, --version print the version\n`[1:])\n}\n\nfunc version() {\n\tos.Stderr.WriteString(`\nv0.3.0\n`[1:])\n}\n\nfunc do(a *alita.Aligner, r io.Reader) error {\n\tif err := a.ReadAll(r); err != nil {\n\t\treturn err\n\t}\n\treturn a.Flush()\n}\n\nfunc _main() error {\n\tvar isHelp, isVersion bool\n\tflag.BoolVar(&isHelp, \"h\", false, \"\")\n\tflag.BoolVar(&isHelp, \"help\", false, \"\")\n\tflag.BoolVar(&isVersion, \"v\", false, \"\")\n\tflag.BoolVar(&isVersion, \"version\", false, \"\")\n\n\ta := alita.NewAligner(os.Stdout)\n\tflag.Var(a.Margin, \"m\", \"\")\n\tflag.Var(a.Margin, \"margin\", \"\")\n\tflag.Var(a.Padding, \"j\", \"\")\n\tflag.Var(a.Padding, \"justfy\", \"\")\n\tflag.Var(a.Delimiter, \"d\", \"\")\n\tflag.Var(a.Delimiter, \"delimiter\", \"\")\n\tflag.BoolVar(&a.Delimiter.UseRegexp, \"r\", false, \"\")\n\tflag.BoolVar(&a.Delimiter.UseRegexp, \"regexp\", false, \"\")\n\n\tflag.Usage = usage\n\tflag.Parse()\n\tswitch {\n\tcase isHelp:\n\t\tusage()\n\t\treturn nil\n\tcase isVersion:\n\t\tversion()\n\t\treturn nil\n\t}\n\n\tif flag.NArg() < 1 {\n\t\treturn do(a, os.Stdin)\n\t}\n\n\tvar input []io.Reader\n\tfor _, fname := range flag.Args() {\n\t\tf, err := os.Open(fname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\t\tinput = append(input, f)\n\t}\n\treturn do(a, io.MultiReader(input...))\n}\n\nfunc main() {\n\tif err := _main(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"alita:\", err)\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/go-debos\/debos\"\n\t\"github.com\/go-debos\/debos\/recipe\"\n\t\"github.com\/go-debos\/fakemachine\"\n\t\"github.com\/jessevdk\/go-flags\"\n)\n\nfunc checkError(context debos.DebosContext, err error, a debos.Action, stage string) int {\n\tif err == nil {\n\t\treturn 0\n\t}\n\n\tlog.Printf(\"Action `%s` failed at stage %s, error: %s\", a, stage, err)\n\tdebos.DebugShell(context)\n\treturn 1\n}\n\nfunc main() {\n\tvar context debos.DebosContext\n\tvar options struct {\n\t\tArtifactDir string `long:\"artifactdir\"`\n\t\tInternalImage string `long:\"internal-image\" hidden:\"true\"`\n\t\tTemplateVars map[string]string `short:\"t\" long:\"template-var\" description:\"Template variables\"`\n\t\tDebugShell bool `long:\"debug-shell\" description:\"Fall into interactive shell on error\"`\n\t\tShell string `short:\"s\" long:\"shell\" description:\"Redefine interactive shell binary (default: bash)\" optionsl:\"\" default:\"\/bin\/bash\"`\n\t}\n\n\tvar exitcode int = 0\n\t\/\/ Allow to run all deferred calls prior to os.Exit()\n\tdefer func() {\n\t\tos.Exit(exitcode)\n\t}()\n\n\tparser := flags.NewParser(&options, flags.Default)\n\targs, err := parser.Parse()\n\n\tif err != nil {\n\t\tflagsErr, ok := err.(*flags.Error)\n\t\tif ok && flagsErr.Type == flags.ErrHelp {\n\t\t\treturn\n\t\t} else {\n\t\t\tfmt.Printf(\"%v\\n\", flagsErr)\n\t\t\texitcode = 1\n\t\t\treturn\n\t\t}\n\t}\n\n\tif len(args) != 1 {\n\t\tlog.Println(\"No recipe given!\")\n\t\texitcode = 1\n\t\treturn\n\t}\n\n\t\/\/ Set interactive shell binary only if '--debug-shell' options passed\n\tif options.DebugShell {\n\t\tcontext.DebugShell = options.Shell\n\t}\n\n\tfile := args[0]\n\tfile = debos.CleanPath(file)\n\n\tr := recipe.Recipe{}\n\tif err := r.Parse(file, options.TemplateVars); err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/* If fakemachine is supported the outer fake machine will never use the\n\t * scratchdir, so just set it to \/scrach as a dummy to prevent the outer\n\t * debos createing a temporary direction *\/\n\tif fakemachine.InMachine() || fakemachine.Supported() {\n\t\tcontext.Scratchdir = \"\/scratch\"\n\t} else {\n\t\tlog.Printf(\"fakemachine not supported, running on the host!\")\n\t\tcwd, _ := os.Getwd()\n\t\tcontext.Scratchdir, err = ioutil.TempDir(cwd, \".debos-\")\n\t\tdefer os.RemoveAll(context.Scratchdir)\n\t}\n\n\tcontext.Rootdir = path.Join(context.Scratchdir, \"root\")\n\tcontext.Image = options.InternalImage\n\tcontext.RecipeDir = path.Dir(file)\n\n\tcontext.Artifactdir = options.ArtifactDir\n\tif context.Artifactdir == \"\" {\n\t\tcontext.Artifactdir, _ = os.Getwd()\n\t}\n\tcontext.Artifactdir = debos.CleanPath(context.Artifactdir)\n\n\t\/\/ Initialise origins map\n\tcontext.Origins = make(map[string]string)\n\tcontext.Origins[\"artifacts\"] = context.Artifactdir\n\tcontext.Origins[\"filesystem\"] = context.Rootdir\n\tcontext.Origins[\"recipe\"] = context.RecipeDir\n\n\tcontext.Architecture = r.Architecture\n\n\tfor _, a := range r.Actions {\n\t\terr = a.Verify(&context)\n\t\tif exitcode = checkError(context, err, a, \"Verify\"); exitcode != 0 {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif !fakemachine.InMachine() && fakemachine.Supported() {\n\t\tm := fakemachine.NewMachine()\n\t\tvar args []string\n\n\t\tm.AddVolume(context.Artifactdir)\n\t\targs = append(args, \"--artifactdir\", context.Artifactdir)\n\n\t\tfor k, v := range options.TemplateVars {\n\t\t\targs = append(args, \"--template-var\", fmt.Sprintf(\"%s:\\\"%s\\\"\", k, v))\n\t\t}\n\n\t\tm.AddVolume(context.RecipeDir)\n\t\targs = append(args, file)\n\n\t\tif options.DebugShell {\n\t\t\targs = append(args, \"--debug-shell\")\n\t\t\targs = append(args, \"--shell\", fmt.Sprintf(\"%s\", options.Shell))\n\t\t}\n\n\t\tfor _, a := range r.Actions {\n\t\t\terr = a.PreMachine(&context, m, &args)\n\t\t\tif exitcode = checkError(context, err, a, \"PreMachine\"); exitcode != 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\texitcode, err = m.RunInMachineWithArgs(args)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tif exitcode != 0 {\n\t\t\treturn\n\t\t}\n\n\t\tfor _, a := range r.Actions {\n\t\t\terr = a.PostMachine(context)\n\t\t\tif exitcode = checkError(context, err, a, \"Postmachine\"); exitcode != 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tlog.Printf(\"==== Recipe done ====\")\n\t\treturn\n\t}\n\n\tif !fakemachine.InMachine() {\n\t\tfor _, a := range r.Actions {\n\t\t\terr = a.PreNoMachine(&context)\n\t\t\tif exitcode = checkError(context, err, a, \"PreNoMachine\"); exitcode != 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, a := range r.Actions {\n\t\terr = a.Run(&context)\n\t\tif exitcode = checkError(context, err, a, \"Run\"); exitcode != 0 {\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor _, a := range r.Actions {\n\t\terr = a.Cleanup(context)\n\t\tif exitcode = checkError(context, err, a, \"Cleanup\"); exitcode != 0 {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif !fakemachine.InMachine() {\n\t\tfor _, a := range r.Actions {\n\t\t\terr = a.PostMachine(context)\n\t\t\tif exitcode = checkError(context, err, a, \"PostMachine\"); exitcode != 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tlog.Printf(\"==== Recipe done ====\")\n\t}\n}\n<commit_msg>debos: add error message if recipe file is absent<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/go-debos\/debos\"\n\t\"github.com\/go-debos\/debos\/recipe\"\n\t\"github.com\/go-debos\/fakemachine\"\n\t\"github.com\/jessevdk\/go-flags\"\n)\n\nfunc checkError(context debos.DebosContext, err error, a debos.Action, stage string) int {\n\tif err == nil {\n\t\treturn 0\n\t}\n\n\tlog.Printf(\"Action `%s` failed at stage %s, error: %s\", a, stage, err)\n\tdebos.DebugShell(context)\n\treturn 1\n}\n\nfunc main() {\n\tvar context debos.DebosContext\n\tvar options struct {\n\t\tArtifactDir string `long:\"artifactdir\"`\n\t\tInternalImage string `long:\"internal-image\" hidden:\"true\"`\n\t\tTemplateVars map[string]string `short:\"t\" long:\"template-var\" description:\"Template variables\"`\n\t\tDebugShell bool `long:\"debug-shell\" description:\"Fall into interactive shell on error\"`\n\t\tShell string `short:\"s\" long:\"shell\" description:\"Redefine interactive shell binary (default: bash)\" optionsl:\"\" default:\"\/bin\/bash\"`\n\t}\n\n\tvar exitcode int = 0\n\t\/\/ Allow to run all deferred calls prior to os.Exit()\n\tdefer func() {\n\t\tos.Exit(exitcode)\n\t}()\n\n\tparser := flags.NewParser(&options, flags.Default)\n\targs, err := parser.Parse()\n\n\tif err != nil {\n\t\tflagsErr, ok := err.(*flags.Error)\n\t\tif ok && flagsErr.Type == flags.ErrHelp {\n\t\t\treturn\n\t\t} else {\n\t\t\tfmt.Printf(\"%v\\n\", flagsErr)\n\t\t\texitcode = 1\n\t\t\treturn\n\t\t}\n\t}\n\n\tif len(args) != 1 {\n\t\tlog.Println(\"No recipe given!\")\n\t\texitcode = 1\n\t\treturn\n\t}\n\n\t\/\/ Set interactive shell binary only if '--debug-shell' options passed\n\tif options.DebugShell {\n\t\tcontext.DebugShell = options.Shell\n\t}\n\n\tfile := args[0]\n\tfile = debos.CleanPath(file)\n\n\tr := recipe.Recipe{}\n\tif _, err := os.Stat(file); os.IsNotExist(err) {\n\t\tlog.Println(err)\n\t\texitcode = 1\n\t\treturn\n\t}\n\tif err := r.Parse(file, options.TemplateVars); err != nil {\n\t\tlog.Println(err)\n\t\texitcode = 1\n\t\treturn\n\t}\n\n\t\/* If fakemachine is supported the outer fake machine will never use the\n\t * scratchdir, so just set it to \/scrach as a dummy to prevent the outer\n\t * debos createing a temporary direction *\/\n\tif fakemachine.InMachine() || fakemachine.Supported() {\n\t\tcontext.Scratchdir = \"\/scratch\"\n\t} else {\n\t\tlog.Printf(\"fakemachine not supported, running on the host!\")\n\t\tcwd, _ := os.Getwd()\n\t\tcontext.Scratchdir, err = ioutil.TempDir(cwd, \".debos-\")\n\t\tdefer os.RemoveAll(context.Scratchdir)\n\t}\n\n\tcontext.Rootdir = path.Join(context.Scratchdir, \"root\")\n\tcontext.Image = options.InternalImage\n\tcontext.RecipeDir = path.Dir(file)\n\n\tcontext.Artifactdir = options.ArtifactDir\n\tif context.Artifactdir == \"\" {\n\t\tcontext.Artifactdir, _ = os.Getwd()\n\t}\n\tcontext.Artifactdir = debos.CleanPath(context.Artifactdir)\n\n\t\/\/ Initialise origins map\n\tcontext.Origins = make(map[string]string)\n\tcontext.Origins[\"artifacts\"] = context.Artifactdir\n\tcontext.Origins[\"filesystem\"] = context.Rootdir\n\tcontext.Origins[\"recipe\"] = context.RecipeDir\n\n\tcontext.Architecture = r.Architecture\n\n\tfor _, a := range r.Actions {\n\t\terr = a.Verify(&context)\n\t\tif exitcode = checkError(context, err, a, \"Verify\"); exitcode != 0 {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif !fakemachine.InMachine() && fakemachine.Supported() {\n\t\tm := fakemachine.NewMachine()\n\t\tvar args []string\n\n\t\tm.AddVolume(context.Artifactdir)\n\t\targs = append(args, \"--artifactdir\", context.Artifactdir)\n\n\t\tfor k, v := range options.TemplateVars {\n\t\t\targs = append(args, \"--template-var\", fmt.Sprintf(\"%s:\\\"%s\\\"\", k, v))\n\t\t}\n\n\t\tm.AddVolume(context.RecipeDir)\n\t\targs = append(args, file)\n\n\t\tif options.DebugShell {\n\t\t\targs = append(args, \"--debug-shell\")\n\t\t\targs = append(args, \"--shell\", fmt.Sprintf(\"%s\", options.Shell))\n\t\t}\n\n\t\tfor _, a := range r.Actions {\n\t\t\terr = a.PreMachine(&context, m, &args)\n\t\t\tif exitcode = checkError(context, err, a, \"PreMachine\"); exitcode != 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\texitcode, err = m.RunInMachineWithArgs(args)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tif exitcode != 0 {\n\t\t\treturn\n\t\t}\n\n\t\tfor _, a := range r.Actions {\n\t\t\terr = a.PostMachine(context)\n\t\t\tif exitcode = checkError(context, err, a, \"Postmachine\"); exitcode != 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tlog.Printf(\"==== Recipe done ====\")\n\t\treturn\n\t}\n\n\tif !fakemachine.InMachine() {\n\t\tfor _, a := range r.Actions {\n\t\t\terr = a.PreNoMachine(&context)\n\t\t\tif exitcode = checkError(context, err, a, \"PreNoMachine\"); exitcode != 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, a := range r.Actions {\n\t\terr = a.Run(&context)\n\t\tif exitcode = checkError(context, err, a, \"Run\"); exitcode != 0 {\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor _, a := range r.Actions {\n\t\terr = a.Cleanup(context)\n\t\tif exitcode = checkError(context, err, a, \"Cleanup\"); exitcode != 0 {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif !fakemachine.InMachine() {\n\t\tfor _, a := range r.Actions {\n\t\t\terr = a.PostMachine(context)\n\t\t\tif exitcode = checkError(context, err, a, \"PostMachine\"); exitcode != 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tlog.Printf(\"==== Recipe done ====\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"archive\/zip\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/tormoder\/fit\/cmd\/fitgen\/internal\/profile\"\n)\n\nconst fitPkgImportPath = \"github.com\/tormoder\/fit\"\n\nconst (\n\tworkbookNameXLS = \"Profile.xls\"\n\tworkbookNameXLSX = \"Profile.xlsx\"\n)\n\nfunc main() {\n\tsdkOverride := flag.String(\n\t\t\"sdk\",\n\t\t\"\",\n\t\t\"provide or override SDK version printed in generated code\",\n\t)\n\ttimestamp := flag.Bool(\n\t\t\"timestamp\",\n\t\tfalse,\n\t\t\"add generation timestamp to generated code\",\n\t)\n\trunTests := flag.Bool(\n\t\t\"test\",\n\t\tfalse,\n\t\t\"run all tests in fit repository after code has been generated\",\n\t)\n\tverbose := flag.Bool(\n\t\t\"verbose\",\n\t\tfalse,\n\t\t\"print verbose debugging output for profile parsing and code generation\",\n\t)\n\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"usage: fitgen [flags] [path to sdk zip, xls or xlsx file] [output directory]\\n\")\n\t\tflag.PrintDefaults()\n\t}\n\n\tflag.Parse()\n\tif flag.NArg() != 2 {\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\n\tl := log.New(os.Stdout, \"fitgen:\\t\", 0)\n\n\tfitSrcDir := flag.Arg(1)\n\tl.Println(\"fit source output directory:\", fitSrcDir)\n\n\tvar (\n\t\tmessagesOut = filepath.Join(fitSrcDir, \"messages.go\")\n\t\ttypesOut = filepath.Join(fitSrcDir, \"types.go\")\n\t\tprofileOut = filepath.Join(fitSrcDir, \"profile.go\")\n\t\ttypesStringOut = filepath.Join(fitSrcDir, \"types_string.go\")\n\t\tstringerPath = filepath.Join(fitSrcDir, \"cmd\/stringer\/stringer.go\")\n\t)\n\n\tvar (\n\t\tinputData []byte\n\t\tinput = flag.Arg(0)\n\t\tinputExt = filepath.Ext(input)\n\t\terr error\n\t)\n\n\tswitch inputExt {\n\tcase \".zip\":\n\t\tinputData, err = readDataFromZIP(input)\n\tcase \".xls\", \".xlsx\":\n\t\tinputData, err = readDataFromXLSX(input)\n\t\tif *sdkOverride == \"\" {\n\t\t\tlog.Fatal(\"-sdk flag required if input is .xls(x)\")\n\t\t}\n\tdefault:\n\t\tl.Fatalln(\"input file must be of type [.zip | .xls | .xlsx], got:\", inputExt)\n\t}\n\tif err != nil {\n\t\tl.Fatal(err)\n\t}\n\n\tgenOptions := []profile.GeneratorOption{\n\t\tprofile.WithGenerationTimestamp(*timestamp),\n\t\tprofile.WithLogger(l),\n\t}\n\tif *verbose {\n\t\tgenOptions = append(genOptions, profile.WithDebugOutput())\n\t}\n\n\tvar sdkString string\n\tif *sdkOverride != \"\" {\n\t\tsdkString = *sdkOverride\n\t} else {\n\t\tsdkString = parseSDKVersionStringFromZipFilePath(input)\n\t}\n\n\tsdkMaj, sdkMin, err := parseMajorAndMinorSDKVersion(sdkString)\n\tif err != nil {\n\t\tl.Fatalln(\"error parsing sdk version:\", err)\n\t}\n\n\tgenerator, err := profile.NewGenerator(sdkMaj, sdkMin, inputData, genOptions...)\n\tif err != nil {\n\t\tl.Fatal(err)\n\t}\n\n\tfitProfile, err := generator.GenerateProfile()\n\tif err != nil {\n\t\tl.Fatal(err)\n\t}\n\n\tif err = ioutil.WriteFile(typesOut, fitProfile.TypesSource, 0644); err != nil {\n\t\tl.Fatalf(\"typegen: error writing types output file: %v\", err)\n\t}\n\n\tif err = ioutil.WriteFile(messagesOut, fitProfile.MessagesSource, 0644); err != nil {\n\t\tl.Fatalf(\"typegen: error writing messages output file: %v\", err)\n\t}\n\n\tif err = ioutil.WriteFile(profileOut, fitProfile.ProfileSource, 0644); err != nil {\n\t\tl.Fatalf(\"typegen: error writing profile output file: %v\", err)\n\t}\n\n\tl.Println(\"running stringer\")\n\terr = runStringerOnTypes(stringerPath, fitSrcDir, typesStringOut, fitProfile.StringerInput)\n\tif err != nil {\n\t\tl.Fatal(err)\n\t}\n\tl.Println(\"stringer: types done\")\n\n\tlogMesgNumVsMessages(fitProfile.MesgNumsWithoutMessage, l)\n\n\tif *runTests {\n\t\terr = runAllTests(fitPkgImportPath)\n\t\tif err != nil {\n\t\t\tl.Fatal(err)\n\t\t}\n\t\tl.Println(\"go test: pass\")\n\t}\n\n\tl.Println(\"done\")\n}\n\nfunc runStringerOnTypes(stringerPath, fitSrcDir, typesStringOut, fitTypes string) error {\n\ttmpFile, err := ioutil.TempFile(\"\", \"stringer\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stringer: error getting temporary file: %v\", err)\n\t}\n\tdefer func() {\n\t\t_ = tmpFile.Close()\n\t\terr = os.Remove(tmpFile.Name())\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"stringer: error removing temporary file: %v\\n\", err)\n\t\t}\n\t}()\n\n\tstringerCmd := exec.Command(\n\t\t\"go\",\n\t\t\"run\",\n\t\tstringerPath,\n\t\t\"-trimprefix\",\n\t\t\"-type\", fitTypes,\n\t\t\"-output\",\n\t\ttmpFile.Name(),\n\t\tfitSrcDir,\n\t)\n\n\toutput, err := stringerCmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stringer: error running on types: %v\\n%s\", err, output)\n\t}\n\n\toutFile, err := os.Create(typesStringOut)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stringer: error creating output file: %v\", err)\n\t}\n\tdefer func() { _ = outFile.Close() }()\n\n\t_, err = outFile.WriteString(\"\/\/lint:file-ignore SA4003 Ignore checks of unsigned types >= 0. stringer generates these.\\n\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stringer: error creating output file: %v\", err)\n\t}\n\n\t_, err = tmpFile.Seek(0, 0)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stringer: error seeking output file: %v\", err)\n\t}\n\n\t_, err = io.Copy(outFile, tmpFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stringer: error creating output file: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc runAllTests(pkgDir string) error {\n\tlistCmd := exec.Command(\"go\", \"list\", pkgDir+\"\/...\")\n\toutput, err := listCmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"go list: fail: %v\\n%s\", err, output)\n\t}\n\n\tsplitted := strings.Split(string(output), \"\\n\")\n\tvar goTestArgs []string\n\t\/\/ Command\n\tgoTestArgs = append(goTestArgs, \"test\")\n\t\/\/ Packages\n\tfor _, s := range splitted {\n\t\tif strings.Contains(s, \"\/vendor\/\") {\n\t\t\tcontinue\n\t\t}\n\t\tgoTestArgs = append(goTestArgs, s)\n\t}\n\n\ttestCmd := exec.Command(\"go\", goTestArgs...)\n\toutput, err = testCmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"go test: fail: %v\\n%s\", err, output)\n\t}\n\n\treturn nil\n}\n\nfunc logMesgNumVsMessages(msgs []string, l *log.Logger) {\n\tif len(msgs) == 0 {\n\t\treturn\n\t}\n\tl.Println(\"mesgnum-vs-msgs: implementation detail below, this may be automated in the future\")\n\tl.Println(\"mesgnum-vs-msgs: #mesgnum values != #generated messages, diff:\", len(msgs))\n\tl.Println(\"mesgnum-vs-msgs: remember to add\/verify map entries for sdk in sdk.go for the following message(s):\")\n\tfor _, msg := range msgs {\n\t\tl.Printf(\"mesgnum-vs-msgs: ----> mesgnum %q has no corresponding message\\n\", msg)\n\t}\n}\n\nfunc readDataFromZIP(path string) ([]byte, error) {\n\tr, err := zip.OpenReader(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error opening sdk zip file: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tvar wfile *zip.File\n\tfor _, f := range r.File {\n\t\tif f.Name == workbookNameXLS {\n\t\t\twfile = f\n\t\t\tbreak\n\t\t}\n\t\tif f.Name == workbookNameXLSX {\n\t\t\twfile = f\n\t\t\tbreak\n\t\t}\n\t}\n\tif wfile == nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"no file named %q or %q found in zip archive\",\n\t\t\tworkbookNameXLS, workbookNameXLSX,\n\t\t)\n\t}\n\n\trc, err := wfile.Open()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error opening zip archive: %v\", err)\n\t}\n\tdefer rc.Close()\n\n\tdata, err := ioutil.ReadAll(rc)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading %q from archive: %v\", wfile.Name, err)\n\t}\n\n\treturn data, nil\n}\n\nfunc readDataFromXLSX(path string) ([]byte, error) {\n\treturn ioutil.ReadFile(path)\n}\n\nfunc parseSDKVersionStringFromZipFilePath(path string) string {\n\t_, file := filepath.Split(path)\n\tver := strings.TrimSuffix(file, \".zip\")\n\treturn strings.TrimPrefix(ver, \"FitSDKRelease_\")\n}\n\nfunc parseMajorAndMinorSDKVersion(sdkString string) (int, int, error) {\n\tsplitted := strings.Split(sdkString, \".\")\n\tif len(splitted) < 2 {\n\t\treturn 0, 0, fmt.Errorf(\"could not parse major\/minor version from input: %q\", sdkString)\n\t}\n\n\tmaj, err := strconv.Atoi(splitted[0])\n\tif err != nil {\n\t\treturn 0, 0, fmt.Errorf(\"could not parse major version from input: %q\", splitted[0])\n\t}\n\n\tmin, err := strconv.Atoi(splitted[1])\n\tif err != nil {\n\t\treturn 0, 0, fmt.Errorf(\"could not parse minor version from input: %q\", splitted[1])\n\t}\n\n\treturn maj, min, nil\n}\n<commit_msg>cmd\/fitgen: adjust 'test' flag to tigger tests in output directory<commit_after>package main\n\nimport (\n\t\"archive\/zip\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/tormoder\/fit\/cmd\/fitgen\/internal\/profile\"\n)\n\nconst (\n\tworkbookNameXLS = \"Profile.xls\"\n\tworkbookNameXLSX = \"Profile.xlsx\"\n)\n\nfunc main() {\n\tsdkOverride := flag.String(\n\t\t\"sdk\",\n\t\t\"\",\n\t\t\"provide or override SDK version printed in generated code\",\n\t)\n\ttimestamp := flag.Bool(\n\t\t\"timestamp\",\n\t\tfalse,\n\t\t\"add generation timestamp to generated code\",\n\t)\n\trunTests := flag.Bool(\n\t\t\"test\",\n\t\tfalse,\n\t\t\"run all tests in fit repository after code has been generated\",\n\t)\n\tverbose := flag.Bool(\n\t\t\"verbose\",\n\t\tfalse,\n\t\t\"print verbose debugging output for profile parsing and code generation\",\n\t)\n\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"usage: fitgen [flags] [path to sdk zip, xls or xlsx file] [output directory]\\n\")\n\t\tflag.PrintDefaults()\n\t}\n\n\tflag.Parse()\n\tif flag.NArg() != 2 {\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\n\tl := log.New(os.Stdout, \"fitgen:\\t\", 0)\n\n\tfitSrcDir := flag.Arg(1)\n\tl.Println(\"fit source output directory:\", fitSrcDir)\n\n\tvar (\n\t\tmessagesOut = filepath.Join(fitSrcDir, \"messages.go\")\n\t\ttypesOut = filepath.Join(fitSrcDir, \"types.go\")\n\t\tprofileOut = filepath.Join(fitSrcDir, \"profile.go\")\n\t\ttypesStringOut = filepath.Join(fitSrcDir, \"types_string.go\")\n\t\tstringerPath = filepath.Join(fitSrcDir, \"cmd\/stringer\/stringer.go\")\n\t)\n\n\tvar (\n\t\tinputData []byte\n\t\tinput = flag.Arg(0)\n\t\tinputExt = filepath.Ext(input)\n\t\terr error\n\t)\n\n\tswitch inputExt {\n\tcase \".zip\":\n\t\tinputData, err = readDataFromZIP(input)\n\tcase \".xls\", \".xlsx\":\n\t\tinputData, err = readDataFromXLSX(input)\n\t\tif *sdkOverride == \"\" {\n\t\t\tlog.Fatal(\"-sdk flag required if input is .xls(x)\")\n\t\t}\n\tdefault:\n\t\tl.Fatalln(\"input file must be of type [.zip | .xls | .xlsx], got:\", inputExt)\n\t}\n\tif err != nil {\n\t\tl.Fatal(err)\n\t}\n\n\tgenOptions := []profile.GeneratorOption{\n\t\tprofile.WithGenerationTimestamp(*timestamp),\n\t\tprofile.WithLogger(l),\n\t}\n\tif *verbose {\n\t\tgenOptions = append(genOptions, profile.WithDebugOutput())\n\t}\n\n\tvar sdkString string\n\tif *sdkOverride != \"\" {\n\t\tsdkString = *sdkOverride\n\t} else {\n\t\tsdkString = parseSDKVersionStringFromZipFilePath(input)\n\t}\n\n\tsdkMaj, sdkMin, err := parseMajorAndMinorSDKVersion(sdkString)\n\tif err != nil {\n\t\tl.Fatalln(\"error parsing sdk version:\", err)\n\t}\n\n\tgenerator, err := profile.NewGenerator(sdkMaj, sdkMin, inputData, genOptions...)\n\tif err != nil {\n\t\tl.Fatal(err)\n\t}\n\n\tfitProfile, err := generator.GenerateProfile()\n\tif err != nil {\n\t\tl.Fatal(err)\n\t}\n\n\tif err = ioutil.WriteFile(typesOut, fitProfile.TypesSource, 0644); err != nil {\n\t\tl.Fatalf(\"typegen: error writing types output file: %v\", err)\n\t}\n\n\tif err = ioutil.WriteFile(messagesOut, fitProfile.MessagesSource, 0644); err != nil {\n\t\tl.Fatalf(\"typegen: error writing messages output file: %v\", err)\n\t}\n\n\tif err = ioutil.WriteFile(profileOut, fitProfile.ProfileSource, 0644); err != nil {\n\t\tl.Fatalf(\"typegen: error writing profile output file: %v\", err)\n\t}\n\n\tl.Println(\"running stringer\")\n\terr = runStringerOnTypes(stringerPath, fitSrcDir, typesStringOut, fitProfile.StringerInput)\n\tif err != nil {\n\t\tl.Fatal(err)\n\t}\n\tl.Println(\"stringer: types done\")\n\n\tlogMesgNumVsMessages(fitProfile.MesgNumsWithoutMessage, l)\n\n\tif *runTests {\n\t\terr = runAllTests(fitSrcDir)\n\t\tif err != nil {\n\t\t\tl.Fatal(err)\n\t\t}\n\t\tl.Println(\"go test: pass\")\n\t}\n\n\tl.Println(\"done\")\n}\n\nfunc runStringerOnTypes(stringerPath, fitSrcDir, typesStringOut, fitTypes string) error {\n\ttmpFile, err := ioutil.TempFile(\"\", \"stringer\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stringer: error getting temporary file: %v\", err)\n\t}\n\tdefer func() {\n\t\t_ = tmpFile.Close()\n\t\terr = os.Remove(tmpFile.Name())\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"stringer: error removing temporary file: %v\\n\", err)\n\t\t}\n\t}()\n\n\tstringerCmd := exec.Command(\n\t\t\"go\",\n\t\t\"run\",\n\t\tstringerPath,\n\t\t\"-trimprefix\",\n\t\t\"-type\", fitTypes,\n\t\t\"-output\",\n\t\ttmpFile.Name(),\n\t\tfitSrcDir,\n\t)\n\n\toutput, err := stringerCmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stringer: error running on types: %v\\n%s\", err, output)\n\t}\n\n\toutFile, err := os.Create(typesStringOut)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stringer: error creating output file: %v\", err)\n\t}\n\tdefer func() { _ = outFile.Close() }()\n\n\t_, err = outFile.WriteString(\"\/\/lint:file-ignore SA4003 Ignore checks of unsigned types >= 0. stringer generates these.\\n\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stringer: error creating output file: %v\", err)\n\t}\n\n\t_, err = tmpFile.Seek(0, 0)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stringer: error seeking output file: %v\", err)\n\t}\n\n\t_, err = io.Copy(outFile, tmpFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stringer: error creating output file: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc runAllTests(pkgDir string) error {\n\tlistCmd := exec.Command(\"go\", \"list\", pkgDir+\"\/...\")\n\toutput, err := listCmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"go list: fail: %v\\n%s\", err, output)\n\t}\n\n\tsplitted := strings.Split(string(output), \"\\n\")\n\tvar goTestArgs []string\n\t\/\/ Command\n\tgoTestArgs = append(goTestArgs, \"test\")\n\t\/\/ Packages\n\tfor _, s := range splitted {\n\t\tif strings.Contains(s, \"\/vendor\/\") {\n\t\t\tcontinue\n\t\t}\n\t\tgoTestArgs = append(goTestArgs, s)\n\t}\n\n\ttestCmd := exec.Command(\"go\", goTestArgs...)\n\toutput, err = testCmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"go test: fail: %v\\n%s\", err, output)\n\t}\n\n\treturn nil\n}\n\nfunc logMesgNumVsMessages(msgs []string, l *log.Logger) {\n\tif len(msgs) == 0 {\n\t\treturn\n\t}\n\tl.Println(\"mesgnum-vs-msgs: implementation detail below, this may be automated in the future\")\n\tl.Println(\"mesgnum-vs-msgs: #mesgnum values != #generated messages, diff:\", len(msgs))\n\tl.Println(\"mesgnum-vs-msgs: remember to add\/verify map entries for sdk in sdk.go for the following message(s):\")\n\tfor _, msg := range msgs {\n\t\tl.Printf(\"mesgnum-vs-msgs: ----> mesgnum %q has no corresponding message\\n\", msg)\n\t}\n}\n\nfunc readDataFromZIP(path string) ([]byte, error) {\n\tr, err := zip.OpenReader(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error opening sdk zip file: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tvar wfile *zip.File\n\tfor _, f := range r.File {\n\t\tif f.Name == workbookNameXLS {\n\t\t\twfile = f\n\t\t\tbreak\n\t\t}\n\t\tif f.Name == workbookNameXLSX {\n\t\t\twfile = f\n\t\t\tbreak\n\t\t}\n\t}\n\tif wfile == nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"no file named %q or %q found in zip archive\",\n\t\t\tworkbookNameXLS, workbookNameXLSX,\n\t\t)\n\t}\n\n\trc, err := wfile.Open()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error opening zip archive: %v\", err)\n\t}\n\tdefer rc.Close()\n\n\tdata, err := ioutil.ReadAll(rc)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading %q from archive: %v\", wfile.Name, err)\n\t}\n\n\treturn data, nil\n}\n\nfunc readDataFromXLSX(path string) ([]byte, error) {\n\treturn ioutil.ReadFile(path)\n}\n\nfunc parseSDKVersionStringFromZipFilePath(path string) string {\n\t_, file := filepath.Split(path)\n\tver := strings.TrimSuffix(file, \".zip\")\n\treturn strings.TrimPrefix(ver, \"FitSDKRelease_\")\n}\n\nfunc parseMajorAndMinorSDKVersion(sdkString string) (int, int, error) {\n\tsplitted := strings.Split(sdkString, \".\")\n\tif len(splitted) < 2 {\n\t\treturn 0, 0, fmt.Errorf(\"could not parse major\/minor version from input: %q\", sdkString)\n\t}\n\n\tmaj, err := strconv.Atoi(splitted[0])\n\tif err != nil {\n\t\treturn 0, 0, fmt.Errorf(\"could not parse major version from input: %q\", splitted[0])\n\t}\n\n\tmin, err := strconv.Atoi(splitted[1])\n\tif err != nil {\n\t\treturn 0, 0, fmt.Errorf(\"could not parse minor version from input: %q\", splitted[1])\n\t}\n\n\treturn maj, min, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2015-2021 MinIO, Inc.\n\/\/\n\/\/ This file is part of MinIO Object Storage stack\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Affero General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Affero General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Affero General Public License\n\/\/ along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage cmd\n\nimport (\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\tmem \"github.com\/shirou\/gopsutil\/v3\/mem\"\n\n\t\"github.com\/minio\/minio\/internal\/config\/api\"\n\txioutil \"github.com\/minio\/minio\/internal\/ioutil\"\n\t\"github.com\/minio\/minio\/internal\/logger\"\n)\n\ntype apiConfig struct {\n\tmu sync.RWMutex\n\n\trequestsDeadline time.Duration\n\trequestsPool chan struct{}\n\tclusterDeadline time.Duration\n\tlistQuorum int\n\tcorsAllowOrigins []string\n\t\/\/ total drives per erasure set across pools.\n\ttotalDriveCount int\n\treplicationWorkers int\n\treplicationFailedWorkers int\n\ttransitionWorkers int\n\n\tstaleUploadsExpiry time.Duration\n\tstaleUploadsCleanupInterval time.Duration\n\tdeleteCleanupInterval time.Duration\n}\n\nfunc (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tt.clusterDeadline = cfg.ClusterDeadline\n\tt.corsAllowOrigins = cfg.CorsAllowOrigin\n\tmaxSetDrives := 0\n\tfor _, setDriveCount := range setDriveCounts {\n\t\tt.totalDriveCount += setDriveCount\n\t\tif setDriveCount > maxSetDrives {\n\t\t\tmaxSetDrives = setDriveCount\n\t\t}\n\t}\n\n\tvar apiRequestsMaxPerNode int\n\tif cfg.RequestsMax <= 0 {\n\t\tvar maxMem uint64\n\t\tmemStats, err := mem.VirtualMemory()\n\t\tif err != nil {\n\t\t\t\/\/ Default to 8 GiB, not critical.\n\t\t\tmaxMem = 8 << 30\n\t\t} else {\n\t\t\tmaxMem = memStats.Available \/ 2\n\t\t}\n\n\t\t\/\/ max requests per node is calculated as\n\t\t\/\/ total_ram \/ ram_per_request\n\t\t\/\/ ram_per_request is (2MiB+128KiB) * driveCount \\\n\t\t\/\/ + 2 * 10MiB (default erasure block size v1) + 2 * 1MiB (default erasure block size v2)\n\t\tblockSize := xioutil.BlockSizeLarge + xioutil.BlockSizeSmall\n\t\tapiRequestsMaxPerNode = int(maxMem \/ uint64(maxSetDrives*blockSize+int(blockSizeV1*2+blockSizeV2*2)))\n\n\t\tif globalIsErasure {\n\t\t\tlogger.Info(\"Automatically configured API requests per node based on available memory on the system: %d\", apiRequestsMaxPerNode)\n\t\t}\n\t} else {\n\t\tapiRequestsMaxPerNode = cfg.RequestsMax\n\t\tif len(globalEndpoints.Hostnames()) > 0 {\n\t\t\tapiRequestsMaxPerNode \/= len(globalEndpoints.Hostnames())\n\t\t}\n\t}\n\n\tif cap(t.requestsPool) < apiRequestsMaxPerNode {\n\t\t\/\/ Only replace if needed.\n\t\t\/\/ Existing requests will use the previous limit,\n\t\t\/\/ but new requests will use the new limit.\n\t\t\/\/ There will be a short overlap window,\n\t\t\/\/ but this shouldn't last long.\n\t\tt.requestsPool = make(chan struct{}, apiRequestsMaxPerNode)\n\t}\n\tt.requestsDeadline = cfg.RequestsDeadline\n\tt.listQuorum = cfg.GetListQuorum()\n\tif globalReplicationPool != nil &&\n\t\tcfg.ReplicationWorkers != t.replicationWorkers {\n\t\tglobalReplicationPool.ResizeFailedWorkers(cfg.ReplicationFailedWorkers)\n\t\tglobalReplicationPool.ResizeWorkers(cfg.ReplicationWorkers)\n\t}\n\tt.replicationFailedWorkers = cfg.ReplicationFailedWorkers\n\tt.replicationWorkers = cfg.ReplicationWorkers\n\tif globalTransitionState != nil && cfg.TransitionWorkers != t.transitionWorkers {\n\t\tglobalTransitionState.UpdateWorkers(cfg.TransitionWorkers)\n\t}\n\tt.transitionWorkers = cfg.TransitionWorkers\n\n\tt.staleUploadsExpiry = cfg.StaleUploadsExpiry\n\tt.staleUploadsCleanupInterval = cfg.StaleUploadsCleanupInterval\n\tt.deleteCleanupInterval = cfg.DeleteCleanupInterval\n}\n\nfunc (t *apiConfig) getListQuorum() int {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\treturn t.listQuorum\n}\n\nfunc (t *apiConfig) getCorsAllowOrigins() []string {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\tcorsAllowOrigins := make([]string, len(t.corsAllowOrigins))\n\tcopy(corsAllowOrigins, t.corsAllowOrigins)\n\treturn corsAllowOrigins\n}\n\nfunc (t *apiConfig) getStaleUploadsCleanupInterval() time.Duration {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\tif t.staleUploadsCleanupInterval == 0 {\n\t\treturn 6 * time.Hour \/\/ default 6 hours\n\t}\n\n\treturn t.staleUploadsCleanupInterval\n}\n\nfunc (t *apiConfig) getStaleUploadsExpiry() time.Duration {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\tif t.staleUploadsExpiry == 0 {\n\t\treturn 24 * time.Hour \/\/ default 24 hours\n\t}\n\n\treturn t.staleUploadsExpiry\n}\n\nfunc (t *apiConfig) getDeleteCleanupInterval() time.Duration {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\tif t.deleteCleanupInterval == 0 {\n\t\treturn 5 * time.Minute \/\/ every 5 minutes\n\t}\n\n\treturn t.deleteCleanupInterval\n}\n\nfunc (t *apiConfig) getClusterDeadline() time.Duration {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\tif t.clusterDeadline == 0 {\n\t\treturn 10 * time.Second\n\t}\n\n\treturn t.clusterDeadline\n}\n\nfunc (t *apiConfig) getRequestsPool() (chan struct{}, time.Duration) {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\tif t.requestsPool == nil {\n\t\treturn nil, time.Duration(0)\n\t}\n\n\treturn t.requestsPool, t.requestsDeadline\n}\n\n\/\/ maxClients throttles the S3 API calls\nfunc maxClients(f http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tpool, deadline := globalAPIConfig.getRequestsPool()\n\t\tif pool == nil {\n\t\t\tf.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tglobalHTTPStats.addRequestsInQueue(1)\n\n\t\tdeadlineTimer := time.NewTimer(deadline)\n\t\tdefer deadlineTimer.Stop()\n\n\t\tselect {\n\t\tcase pool <- struct{}{}:\n\t\t\tdefer func() { <-pool }()\n\t\t\tglobalHTTPStats.addRequestsInQueue(-1)\n\t\t\tf.ServeHTTP(w, r)\n\t\tcase <-deadlineTimer.C:\n\t\t\t\/\/ Send a http timeout message\n\t\t\twriteErrorResponse(r.Context(), w,\n\t\t\t\terrorCodes.ToAPIErr(ErrOperationMaxedOut),\n\t\t\t\tr.URL)\n\t\t\tglobalHTTPStats.addRequestsInQueue(-1)\n\t\t\treturn\n\t\tcase <-r.Context().Done():\n\t\t\tglobalHTTPStats.addRequestsInQueue(-1)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (t *apiConfig) getReplicationFailedWorkers() int {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\treturn t.replicationFailedWorkers\n}\n\nfunc (t *apiConfig) getReplicationWorkers() int {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\treturn t.replicationWorkers\n}\n\nfunc (t *apiConfig) getTransitionWorkers() int {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\treturn t.transitionWorkers\n}\n<commit_msg>honor requests_max based on cgroup_limits if configured (#13673)<commit_after>\/\/ Copyright (c) 2015-2021 MinIO, Inc.\n\/\/\n\/\/ This file is part of MinIO Object Storage stack\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Affero General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Affero General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Affero General Public License\n\/\/ along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage cmd\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\tmem \"github.com\/shirou\/gopsutil\/v3\/mem\"\n\n\t\"github.com\/minio\/minio\/internal\/config\/api\"\n\txioutil \"github.com\/minio\/minio\/internal\/ioutil\"\n\t\"github.com\/minio\/minio\/internal\/logger\"\n)\n\ntype apiConfig struct {\n\tmu sync.RWMutex\n\n\trequestsDeadline time.Duration\n\trequestsPool chan struct{}\n\tclusterDeadline time.Duration\n\tlistQuorum int\n\tcorsAllowOrigins []string\n\t\/\/ total drives per erasure set across pools.\n\ttotalDriveCount int\n\treplicationWorkers int\n\treplicationFailedWorkers int\n\ttransitionWorkers int\n\n\tstaleUploadsExpiry time.Duration\n\tstaleUploadsCleanupInterval time.Duration\n\tdeleteCleanupInterval time.Duration\n}\n\nconst cgroupLimitFile = \"\/sys\/fs\/cgroup\/memory\/memory.limit_in_bytes\"\n\nfunc cgroupLimit(limitFile string) (limit uint64) {\n\tbuf, err := ioutil.ReadFile(limitFile)\n\tif err != nil {\n\t\treturn 9223372036854771712\n\t}\n\tlimit, err = strconv.ParseUint(string(buf), 10, 64)\n\tif err != nil {\n\t\treturn 9223372036854771712\n\t}\n\treturn limit\n}\n\nfunc availableMemory() (available uint64) {\n\tavailable = 8 << 30 \/\/ Default to 8 GiB when we can't find the limits.\n\n\tif runtime.GOOS == \"linux\" {\n\t\tavailable = cgroupLimit(cgroupLimitFile)\n\n\t\t\/\/ No limit set, It's the highest positive signed 64-bit\n\t\t\/\/ integer (2^63-1), rounded down to multiples of 4096 (2^12),\n\t\t\/\/ the most common page size on x86 systems - for cgroup_limits.\n\t\tif available != 9223372036854771712 {\n\t\t\t\/\/ This means cgroup memory limit is configured.\n\t\t\treturn\n\n\t\t} \/\/ no-limit set proceed to set the limits based on virtual memory.\n\n\t} \/\/ for all other platforms limits are based on virtual memory.\n\n\tmemStats, err := mem.VirtualMemory()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tavailable = memStats.Available \/ 2\n\treturn\n}\n\nfunc (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tt.clusterDeadline = cfg.ClusterDeadline\n\tt.corsAllowOrigins = cfg.CorsAllowOrigin\n\tmaxSetDrives := 0\n\tfor _, setDriveCount := range setDriveCounts {\n\t\tt.totalDriveCount += setDriveCount\n\t\tif setDriveCount > maxSetDrives {\n\t\t\tmaxSetDrives = setDriveCount\n\t\t}\n\t}\n\n\tvar apiRequestsMaxPerNode int\n\tif cfg.RequestsMax <= 0 {\n\t\tmaxMem := availableMemory()\n\n\t\t\/\/ max requests per node is calculated as\n\t\t\/\/ total_ram \/ ram_per_request\n\t\t\/\/ ram_per_request is (2MiB+128KiB) * driveCount \\\n\t\t\/\/ + 2 * 10MiB (default erasure block size v1) + 2 * 1MiB (default erasure block size v2)\n\t\tblockSize := xioutil.BlockSizeLarge + xioutil.BlockSizeSmall\n\t\tapiRequestsMaxPerNode = int(maxMem \/ uint64(maxSetDrives*blockSize+int(blockSizeV1*2+blockSizeV2*2)))\n\n\t\tif globalIsErasure {\n\t\t\tlogger.Info(\"Automatically configured API requests per node based on available memory on the system: %d\", apiRequestsMaxPerNode)\n\t\t}\n\t} else {\n\t\tapiRequestsMaxPerNode = cfg.RequestsMax\n\t\tif len(globalEndpoints.Hostnames()) > 0 {\n\t\t\tapiRequestsMaxPerNode \/= len(globalEndpoints.Hostnames())\n\t\t}\n\t}\n\n\tif cap(t.requestsPool) < apiRequestsMaxPerNode {\n\t\t\/\/ Only replace if needed.\n\t\t\/\/ Existing requests will use the previous limit,\n\t\t\/\/ but new requests will use the new limit.\n\t\t\/\/ There will be a short overlap window,\n\t\t\/\/ but this shouldn't last long.\n\t\tt.requestsPool = make(chan struct{}, apiRequestsMaxPerNode)\n\t}\n\tt.requestsDeadline = cfg.RequestsDeadline\n\tt.listQuorum = cfg.GetListQuorum()\n\tif globalReplicationPool != nil &&\n\t\tcfg.ReplicationWorkers != t.replicationWorkers {\n\t\tglobalReplicationPool.ResizeFailedWorkers(cfg.ReplicationFailedWorkers)\n\t\tglobalReplicationPool.ResizeWorkers(cfg.ReplicationWorkers)\n\t}\n\tt.replicationFailedWorkers = cfg.ReplicationFailedWorkers\n\tt.replicationWorkers = cfg.ReplicationWorkers\n\tif globalTransitionState != nil && cfg.TransitionWorkers != t.transitionWorkers {\n\t\tglobalTransitionState.UpdateWorkers(cfg.TransitionWorkers)\n\t}\n\tt.transitionWorkers = cfg.TransitionWorkers\n\n\tt.staleUploadsExpiry = cfg.StaleUploadsExpiry\n\tt.staleUploadsCleanupInterval = cfg.StaleUploadsCleanupInterval\n\tt.deleteCleanupInterval = cfg.DeleteCleanupInterval\n}\n\nfunc (t *apiConfig) getListQuorum() int {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\treturn t.listQuorum\n}\n\nfunc (t *apiConfig) getCorsAllowOrigins() []string {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\tcorsAllowOrigins := make([]string, len(t.corsAllowOrigins))\n\tcopy(corsAllowOrigins, t.corsAllowOrigins)\n\treturn corsAllowOrigins\n}\n\nfunc (t *apiConfig) getStaleUploadsCleanupInterval() time.Duration {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\tif t.staleUploadsCleanupInterval == 0 {\n\t\treturn 6 * time.Hour \/\/ default 6 hours\n\t}\n\n\treturn t.staleUploadsCleanupInterval\n}\n\nfunc (t *apiConfig) getStaleUploadsExpiry() time.Duration {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\tif t.staleUploadsExpiry == 0 {\n\t\treturn 24 * time.Hour \/\/ default 24 hours\n\t}\n\n\treturn t.staleUploadsExpiry\n}\n\nfunc (t *apiConfig) getDeleteCleanupInterval() time.Duration {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\tif t.deleteCleanupInterval == 0 {\n\t\treturn 5 * time.Minute \/\/ every 5 minutes\n\t}\n\n\treturn t.deleteCleanupInterval\n}\n\nfunc (t *apiConfig) getClusterDeadline() time.Duration {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\tif t.clusterDeadline == 0 {\n\t\treturn 10 * time.Second\n\t}\n\n\treturn t.clusterDeadline\n}\n\nfunc (t *apiConfig) getRequestsPool() (chan struct{}, time.Duration) {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\tif t.requestsPool == nil {\n\t\treturn nil, time.Duration(0)\n\t}\n\n\treturn t.requestsPool, t.requestsDeadline\n}\n\n\/\/ maxClients throttles the S3 API calls\nfunc maxClients(f http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tpool, deadline := globalAPIConfig.getRequestsPool()\n\t\tif pool == nil {\n\t\t\tf.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tglobalHTTPStats.addRequestsInQueue(1)\n\n\t\tdeadlineTimer := time.NewTimer(deadline)\n\t\tdefer deadlineTimer.Stop()\n\n\t\tselect {\n\t\tcase pool <- struct{}{}:\n\t\t\tdefer func() { <-pool }()\n\t\t\tglobalHTTPStats.addRequestsInQueue(-1)\n\t\t\tf.ServeHTTP(w, r)\n\t\tcase <-deadlineTimer.C:\n\t\t\t\/\/ Send a http timeout message\n\t\t\twriteErrorResponse(r.Context(), w,\n\t\t\t\terrorCodes.ToAPIErr(ErrOperationMaxedOut),\n\t\t\t\tr.URL)\n\t\t\tglobalHTTPStats.addRequestsInQueue(-1)\n\t\t\treturn\n\t\tcase <-r.Context().Done():\n\t\t\tglobalHTTPStats.addRequestsInQueue(-1)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (t *apiConfig) getReplicationFailedWorkers() int {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\treturn t.replicationFailedWorkers\n}\n\nfunc (t *apiConfig) getReplicationWorkers() int {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\treturn t.replicationWorkers\n}\n\nfunc (t *apiConfig) getTransitionWorkers() int {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\treturn t.transitionWorkers\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\n\t\"github.com\/coreos\/go-systemd\/activation\"\n\t\"github.com\/qjcg\/horeb\"\n\tpb \"github.com\/qjcg\/horeb\/proto\"\n\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/grpclog\"\n)\n\nvar logger = grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr)\n\nfunc main() {\n\tip := flag.String(\"i\", \"0.0.0.0\", \"ip address to listen on\")\n\tport := flag.String(\"p\", \"9999\", \"TCP port to listen on\")\n\tversion := flag.Bool(\"v\", false, \"print version\")\n\tflag.Parse()\n\n\tif *version {\n\t\tfmt.Println(horeb.Version)\n\t\treturn\n\t}\n\n\tvar lis net.Listener\n\tlistenFlags := fmt.Sprintf(\"%s:%s\", *ip, *port)\n\n\t\/\/ Use systemd socket listener if available, otherwise fall back to\n\t\/\/ listenFlags parameters.\n\tlisteners, err := activation.Listeners()\n\tif err != nil {\n\t\tlog.Fatalf(\"Couldn't get listeners: %s\\n\", err)\n\t}\n\n\tif len(listeners) == 1 {\n\t\tlis = listeners[0]\n\t\tlogger.Infof(\"Using systemd listener: %#v\\n\", lis)\n\t} else {\n\t\tlis, err = net.Listen(\"tcp\", listenFlags)\n\t\tif err != nil {\n\t\t\tlogger.Fatalf(\"Failed to listen: %v\", err)\n\t\t}\n\t\tlogger.Infof(\"Using flag listener: %#v\\n\", lis)\n\t}\n\n\ts := grpc.NewServer()\n\tpb.RegisterHorebServer(s, &server{})\n\n\tlogger.Infof(\"Horeb gRPC server listening on %#v\", lis)\n\n\tif err := s.Serve(lis); err != nil {\n\t\tlogger.Fatalf(\"failed to serve: %v\", err)\n\t}\n}\n\n\/\/ server is used to implement our horeb server.\ntype server struct{}\n\nfunc (s server) GetStream(rr *pb.RuneRequest, stream pb.Horeb_GetStreamServer) error {\n\tlogger.Infof(\"RECEIVED: %#v\", rr)\n\tblock, ok := horeb.Blocks[rr.Block]\n\tif !ok {\n\t\tlogger.Errorf(\"Invalid block: %s\\n\", rr.Block)\n\t\treturn fmt.Errorf(\"Invalid block: %s\", rr.Block)\n\t}\n\tfor i := 0; i < int(rr.Num); i++ {\n\t\tmyRandomRune := pb.Rune{R: string(block.RandomRune())}\n\t\tif err := stream.Send(&myRandomRune); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tlogger.Infof(\"SENT: %d %s\\n\", rr.Num, rr.Block)\n\treturn nil\n}\n<commit_msg>Tweak horebd logging<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\n\t\"github.com\/coreos\/go-systemd\/activation\"\n\t\"github.com\/qjcg\/horeb\"\n\tpb \"github.com\/qjcg\/horeb\/proto\"\n\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/grpclog\"\n)\n\nvar logger = grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr)\n\nfunc main() {\n\tip := flag.String(\"i\", \"0.0.0.0\", \"ip address to listen on\")\n\tport := flag.String(\"p\", \"9999\", \"TCP port to listen on\")\n\tversion := flag.Bool(\"v\", false, \"print version\")\n\tflag.Parse()\n\n\tif *version {\n\t\tfmt.Println(horeb.Version)\n\t\treturn\n\t}\n\n\tvar lis net.Listener\n\tlistenFlags := fmt.Sprintf(\"%s:%s\", *ip, *port)\n\n\t\/\/ Use systemd socket listener if available, otherwise fall back to\n\t\/\/ listenFlags parameters.\n\tlisteners, err := activation.Listeners()\n\tif err != nil {\n\t\tlog.Fatalf(\"Couldn't get listeners: %s\\n\", err)\n\t}\n\n\tlogger.Infof(\"Systemd listeners: %#v\", listeners)\n\n\tif len(listeners) == 1 {\n\t\tlis = listeners[0]\n\t\tlogger.Infof(\"Using systemd listener: %#v\\n\", lis)\n\t} else {\n\t\tlis, err = net.Listen(\"tcp\", listenFlags)\n\t\tif err != nil {\n\t\t\tlogger.Fatalf(\"Failed to listen: %v\", err)\n\t\t}\n\t\tlogger.Infof(\"Using flag listener: %#v\\n\", lis)\n\t}\n\n\ts := grpc.NewServer()\n\tpb.RegisterHorebServer(s, &server{})\n\n\tlogger.Infof(\"Horeb gRPC server listening on %#v\", lis)\n\n\tif err := s.Serve(lis); err != nil {\n\t\tlogger.Fatalf(\"failed to serve: %#v\", err)\n\t}\n}\n\n\/\/ server is used to implement our horeb server.\ntype server struct{}\n\nfunc (s server) GetStream(rr *pb.RuneRequest, stream pb.Horeb_GetStreamServer) error {\n\tlogger.Infof(\"RECEIVED: %#v\", rr)\n\tblock, ok := horeb.Blocks[rr.Block]\n\tif !ok {\n\t\tlogger.Errorf(\"Invalid block: %s\\n\", rr.Block)\n\t\treturn fmt.Errorf(\"Invalid block: %s\", rr.Block)\n\t}\n\tfor i := 0; i < int(rr.Num); i++ {\n\t\tmyRandomRune := pb.Rune{R: string(block.RandomRune())}\n\t\tif err := stream.Send(&myRandomRune); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tlogger.Infof(\"SENT: %d %s\\n\", rr.Num, rr.Block)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2017 Sascha Andres <sascha.andres@outlook.com>\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"path\"\n\t\"strings\"\n\n\t\"os\"\n\n\t\"github.com\/chzyer\/readline\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sascha-andres\/devenv\"\n\t\"github.com\/sascha-andres\/devenv\/helper\"\n\t\"github.com\/sascha-andres\/devenv\/interactive\"\n\t\"github.com\/spf13\/viper\"\n)\n\nvar ev devenv.EnvironmentConfiguration\n\nvar completer = readline.NewPrefixCompleter(\n\treadline.PcItem(\"repo\",\n\t\treadline.PcItemDynamic(listRepositories(),\n\t\t\treadline.PcItem(\"branch\"),\n\t\t\treadline.PcItem(\"commit\"),\n\t\t\treadline.PcItem(\"log\"),\n\t\t\treadline.PcItem(\"shell\"),\n\t\t\treadline.PcItem(\"pull\"),\n\t\t\treadline.PcItem(\"push\"),\n\t\t\treadline.PcItem(\"status\"),\n\t\t\treadline.PcItem(\"pin\"),\n\t\t\treadline.PcItem(\"unpin\"),\n\t\t\treadline.PcItem(\"enable\"),\n\t\t\treadline.PcItem(\"disable\"),\n\t\t),\n\t),\n\treadline.PcItem(\"addrepo\"),\n\treadline.PcItem(\"branch\"),\n\treadline.PcItem(\"commit\"),\n\treadline.PcItem(\"delrepo\"),\n\treadline.PcItem(\"log\"),\n\treadline.PcItem(\"pull\"),\n\treadline.PcItem(\"push\"),\n\treadline.PcItem(\"status\"),\n\treadline.PcItem(\"quit\"),\n\treadline.PcItem(\"scan\"),\n\treadline.PcItem(\"shell\"),\n)\n\nfunc filterInput(r rune) (rune, bool) {\n\tswitch r {\n\t\/\/ block CtrlZ feature\n\tcase readline.CharCtrlZ:\n\t\treturn r, false\n\t}\n\treturn r, true\n}\n\nfunc setup(projectName string) error {\n\tif \"\" == projectName || !devenv.ProjectIsCreated(projectName) {\n\t\treturn errors.New(fmt.Sprintf(\"Project '%s' does not yet exist\", projectName))\n\t}\n\tif ok, err := helper.Exists(path.Join(viper.GetString(\"configpath\"), projectName+\".yaml\")); ok && err == nil {\n\t\tif err := ev.LoadFromFile(path.Join(viper.GetString(\"configpath\"), projectName+\".yaml\")); err != nil {\n\t\t\treturn errors.New(fmt.Sprintf(\"Error reading env config: '%s'\", err.Error()))\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc runInterpreter(args []string) error {\n\tprojectName := strings.Join(args, \" \")\n\tlog.Printf(\"Called to start shell for '%s'\", projectName)\n\tif \"\" == projectName {\n\t\tos.Exit(1)\n\t}\n\tsetup(projectName)\n\n\tinterpreter := interactive.NewInterpreter(path.Join(viper.GetString(\"basepath\"), projectName), ev)\n\tl, err := getReadlineConfig(projectName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err := l.Close(); err != nil {\n\t\t\tlog.Fatalf(\"Error closing readline: \" + err.Error())\n\t\t}\n\t}()\n\n\tlog.SetOutput(l.Stderr())\n\n\tfor {\n\t\tline, doBreak := getLine(l)\n\t\tif doBreak {\n\t\t\tbreak\n\t\t}\n\t\tline = strings.TrimSpace(line)\n\t\tswitch line {\n\t\tcase \"quit\", \"q\":\n\t\t\treturn nil\n\t\tdefault:\n\t\t\texecuteLine(interpreter, line)\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getLine(l *readline.Instance) (string, bool) {\n\tline, err := l.Readline()\n\tif err == readline.ErrInterrupt {\n\t\tif len(line) == 0 {\n\t\t\treturn \"\", true\n\t\t}\n\t\treturn line, false\n\t} else if err == io.EOF {\n\t\treturn \"\", true\n\t}\n\treturn line, false\n}\n\nfunc executeLine(interpreter *interactive.Interpreter, line string) {\n\terr := interpreter.Execute(line)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n}\nfunc getReadlineConfig(projectName string) (*readline.Instance, error) {\n\treturn readline.NewEx(&readline.Config{\n\t\tPrompt: \"\\033[31m»\\033[0m \",\n\t\tHistoryFile: \"\/tmp\/devenv-\" + projectName + \".tmp\",\n\t\tAutoComplete: completer,\n\t\tInterruptPrompt: \"^C\",\n\t\tEOFPrompt: \"exit\",\n\n\t\tHistorySearchFold: true,\n\t\tFuncFilterInputRune: filterInput,\n\t})\n}\n\nfunc listRepositories() func(string) []string {\n\treturn func(line string) []string {\n\t\tvar repositories []string\n\t\tfor _, val := range ev.Repositories {\n\t\t\tif !val.Disabled {\n\t\t\t\trepositories = append(repositories, val.Name)\n\t\t\t}\n\t\t}\n\t\treturn repositories\n\t}\n}\n<commit_msg>Re-setup after add repository<commit_after>\/\/ Copyright © 2017 Sascha Andres <sascha.andres@outlook.com>\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"path\"\n\t\"strings\"\n\n\t\"os\"\n\n\t\"github.com\/chzyer\/readline\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sascha-andres\/devenv\"\n\t\"github.com\/sascha-andres\/devenv\/helper\"\n\t\"github.com\/sascha-andres\/devenv\/interactive\"\n\t\"github.com\/spf13\/viper\"\n)\n\nvar ev devenv.EnvironmentConfiguration\n\nvar completer = readline.NewPrefixCompleter(\n\treadline.PcItem(\"repo\",\n\t\treadline.PcItemDynamic(listRepositories(),\n\t\t\treadline.PcItem(\"branch\"),\n\t\t\treadline.PcItem(\"commit\"),\n\t\t\treadline.PcItem(\"log\"),\n\t\t\treadline.PcItem(\"shell\"),\n\t\t\treadline.PcItem(\"pull\"),\n\t\t\treadline.PcItem(\"push\"),\n\t\t\treadline.PcItem(\"status\"),\n\t\t\treadline.PcItem(\"pin\"),\n\t\t\treadline.PcItem(\"unpin\"),\n\t\t\treadline.PcItem(\"enable\"),\n\t\t\treadline.PcItem(\"disable\"),\n\t\t),\n\t),\n\treadline.PcItem(\"addrepo\"),\n\treadline.PcItem(\"branch\"),\n\treadline.PcItem(\"commit\"),\n\treadline.PcItem(\"delrepo\"),\n\treadline.PcItem(\"log\"),\n\treadline.PcItem(\"pull\"),\n\treadline.PcItem(\"push\"),\n\treadline.PcItem(\"status\"),\n\treadline.PcItem(\"quit\"),\n\treadline.PcItem(\"scan\"),\n\treadline.PcItem(\"shell\"),\n)\n\nfunc filterInput(r rune) (rune, bool) {\n\tswitch r {\n\t\/\/ block CtrlZ feature\n\tcase readline.CharCtrlZ:\n\t\treturn r, false\n\t}\n\treturn r, true\n}\n\nfunc setup(projectName string) error {\n\tif \"\" == projectName || !devenv.ProjectIsCreated(projectName) {\n\t\treturn errors.New(fmt.Sprintf(\"Project '%s' does not yet exist\", projectName))\n\t}\n\tif ok, err := helper.Exists(path.Join(viper.GetString(\"configpath\"), projectName+\".yaml\")); ok && err == nil {\n\t\tif err := ev.LoadFromFile(path.Join(viper.GetString(\"configpath\"), projectName+\".yaml\")); err != nil {\n\t\t\treturn errors.New(fmt.Sprintf(\"Error reading env config: '%s'\", err.Error()))\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc runInterpreter(args []string) error {\n\tprojectName := strings.Join(args, \" \")\n\tlog.Printf(\"Called to start shell for '%s'\", projectName)\n\tif \"\" == projectName {\n\t\tos.Exit(1)\n\t}\n\tsetup(projectName)\n\n\tinterpreter := interactive.NewInterpreter(path.Join(viper.GetString(\"basepath\"), projectName), ev)\n\tl, err := getReadlineConfig(projectName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err := l.Close(); err != nil {\n\t\t\tlog.Fatalf(\"Error closing readline: \" + err.Error())\n\t\t}\n\t}()\n\n\tlog.SetOutput(l.Stderr())\n\n\tfor {\n\t\tline, doBreak := getLine(l)\n\t\tif doBreak {\n\t\t\tbreak\n\t\t}\n\t\tline = strings.TrimSpace(line)\n\t\tswitch line {\n\t\tcase \"quit\", \"q\":\n\t\t\treturn nil\n\t\tdefault:\n\t\t\texecuteLine(interpreter, line)\n\t\t\tif err := setup(projectName); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getLine(l *readline.Instance) (string, bool) {\n\tline, err := l.Readline()\n\tif err == readline.ErrInterrupt {\n\t\tif len(line) == 0 {\n\t\t\treturn \"\", true\n\t\t}\n\t\treturn line, false\n\t} else if err == io.EOF {\n\t\treturn \"\", true\n\t}\n\treturn line, false\n}\n\nfunc executeLine(interpreter *interactive.Interpreter, line string) {\n\terr := interpreter.Execute(line)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n}\nfunc getReadlineConfig(projectName string) (*readline.Instance, error) {\n\treturn readline.NewEx(&readline.Config{\n\t\tPrompt: \"\\033[31m»\\033[0m \",\n\t\tHistoryFile: \"\/tmp\/devenv-\" + projectName + \".tmp\",\n\t\tAutoComplete: completer,\n\t\tInterruptPrompt: \"^C\",\n\t\tEOFPrompt: \"exit\",\n\n\t\tHistorySearchFold: true,\n\t\tFuncFilterInputRune: filterInput,\n\t})\n}\n\nfunc listRepositories() func(string) []string {\n\treturn func(line string) []string {\n\t\tvar repositories []string\n\t\tfor _, val := range ev.Repositories {\n\t\t\tif !val.Disabled {\n\t\t\t\trepositories = append(repositories, val.Name)\n\t\t\t}\n\t\t}\n\t\treturn repositories\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"launchpad.net\/gnuflag\"\n\t\"launchpad.net\/juju-core\/cmd\"\n\t\"launchpad.net\/juju-core\/environs\"\n\t\"launchpad.net\/juju-core\/log\"\n\t\"launchpad.net\/juju-core\/state\"\n\t\"launchpad.net\/juju-core\/trivial\"\n\t\"launchpad.net\/juju-core\/worker\"\n\t\"launchpad.net\/juju-core\/worker\/deployer\"\n\t\"launchpad.net\/tomb\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ requiredError is useful when complaining about missing command-line options.\nfunc requiredError(name string) error {\n\treturn fmt.Errorf(\"--%s option must be set\", name)\n}\n\n\/\/ stateServersValue implements gnuflag.Value on a slice of server addresses\ntype stateServersValue []string\n\nvar validAddr = regexp.MustCompile(\"^.+:[0-9]+$\")\n\n\/\/ Set splits the comma-separated list of state server addresses and stores\n\/\/ onto v's Addrs. Addresses must include port numbers.\nfunc (v *stateServersValue) Set(value string) error {\n\taddrs := strings.Split(value, \",\")\n\tfor _, addr := range addrs {\n\t\tif !validAddr.MatchString(addr) {\n\t\t\treturn fmt.Errorf(\"%q is not a valid state server address\", addr)\n\t\t}\n\t}\n\t*v = addrs\n\treturn nil\n}\n\n\/\/ String returns the list of server addresses joined by commas.\nfunc (v *stateServersValue) String() string {\n\tif *v != nil {\n\t\treturn strings.Join(*v, \",\")\n\t}\n\treturn \"\"\n}\n\n\/\/ stateServersVar sets up a gnuflag flag analogous to the FlagSet.*Var methods.\nfunc stateServersVar(fs *gnuflag.FlagSet, target *[]string, name string, value []string, usage string) {\n\t*target = value\n\tfs.Var((*stateServersValue)(target), name, usage)\n}\n\n\/\/ AgentConf handles command-line flags shared by all agents.\ntype AgentConf struct {\n\taccept agentFlags\n\tDataDir string\n\tStateInfo state.Info\n\tInitialPassword string\n\tcaCertFile string\n}\n\ntype agentFlags int\n\nconst (\n\tflagStateInfo agentFlags = 1 << iota\n\tflagInitialPassword\n\tflagDataDir\n\n\tflagAll agentFlags = ^0\n)\n\n\/\/ addFlags injects common agent flags into f.\nfunc (c *AgentConf) addFlags(f *gnuflag.FlagSet, accept agentFlags) {\n\tif accept&flagDataDir != 0 {\n\t\tf.StringVar(&c.DataDir, \"data-dir\", \"\/var\/lib\/juju\", \"directory for juju data\")\n\t}\n\tif accept&flagStateInfo != 0 {\n\t\tstateServersVar(f, &c.StateInfo.Addrs, \"state-servers\", nil, \"state servers to connect to\")\n\t\tf.StringVar(&c.caCertFile, \"ca-cert\", \"\", \"path to CA certificate in PEM format\")\n\t}\n\tif accept&flagInitialPassword != 0 {\n\t\tf.StringVar(&c.InitialPassword, \"initial-password\", \"\", \"initial password for state\")\n\t}\n\tc.accept = accept\n}\n\n\/\/ checkArgs checks that required flags have been set and that args is empty.\nfunc (c *AgentConf) checkArgs(args []string) error {\n\tif c.accept&flagDataDir != 0 && c.DataDir == \"\" {\n\t\treturn requiredError(\"data-dir\")\n\t}\n\tif c.accept&flagStateInfo != 0 {\n\t\tif c.StateInfo.Addrs == nil {\n\t\t\treturn requiredError(\"state-servers\")\n\t\t}\n\t\tif c.caCertFile == \"\" {\n\t\t\treturn requiredError(\"ca-cert\")\n\t\t}\n\t\tvar err error\n\t\tc.StateInfo.CACert, err = ioutil.ReadFile(c.caCertFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn cmd.CheckEmpty(args)\n}\n\ntype task interface {\n\tStop() error\n\tWait() error\n\tString() string\n}\n\n\/\/ runTasks runs all the given tasks until any of them fails with an\n\/\/ error. It then stops all of them and returns that error. If a value\n\/\/ is received on the stop channel, the workers are stopped.\n\/\/ The task values should be comparable.\nfunc runTasks(stop <-chan struct{}, tasks ...task) (err error) {\n\ttype errInfo struct {\n\t\tindex int\n\t\terr error\n\t}\n\tdone := make(chan errInfo, len(tasks))\n\tfor i, t := range tasks {\n\t\ti, t := i, t\n\t\tgo func() {\n\t\t\tdone <- errInfo{i, t.Wait()}\n\t\t}()\n\t}\n\tchosen := errInfo{index: -1}\nwaiting:\n\tfor _ = range tasks {\n\t\tselect {\n\t\tcase info := <-done:\n\t\t\tif info.err != nil {\n\t\t\t\tchosen = info\n\t\t\t\tbreak waiting\n\t\t\t}\n\t\tcase <-stop:\n\t\t\tbreak waiting\n\t\t}\n\t}\n\t\/\/ Stop all the tasks. If we've been upgraded,\n\t\/\/ that error taks precedence over other errors, because\n\t\/\/ that's the only way we can escape bad code.\n\tfor i, t := range tasks {\n\t\tif err := t.Stop(); isUpgraded(err) || err != nil && chosen.err == nil {\n\t\t\tchosen = errInfo{i, err}\n\t\t}\n\t}\n\t\/\/ Log any errors that we're discarding.\n\tfor i, t := range tasks {\n\t\tif i == chosen.index {\n\t\t\tcontinue\n\t\t}\n\t\tif err := t.Wait(); err != nil {\n\t\t\tlog.Printf(\"cmd\/jujud: %s: %v\", tasks[i], err)\n\t\t}\n\t}\n\treturn chosen.err\n}\n\nfunc isUpgraded(err error) bool {\n\t_, ok := err.(*UpgradeReadyError)\n\treturn ok\n}\n\ntype Agent interface {\n\tTomb() *tomb.Tomb\n\tRunOnce(st *state.State, entity AgentState) error\n\tEntity(st *state.State) (AgentState, error)\n\tEntityName() string\n}\n\nfunc RunLoop(c *AgentConf, a Agent) error {\n\tatomb := a.Tomb()\n\tfor atomb.Err() == tomb.ErrStillAlive {\n\t\tlog.Printf(\"cmd\/jujud: agent starting\")\n\t\terr := runOnce(c, a)\n\t\tif ug, ok := err.(*UpgradeReadyError); ok {\n\t\t\tif err = ug.ChangeAgentTools(); err == nil {\n\t\t\t\t\/\/ Return and let upstart deal with the restart.\n\t\t\t\treturn ug\n\t\t\t}\n\t\t}\n\t\tif err == worker.ErrDead {\n\t\t\tlog.Printf(\"cmd\/jujud: agent is dead\")\n\t\t\treturn nil\n\t\t}\n\t\tif err == nil {\n\t\t\tlog.Printf(\"cmd\/jujud: workers died with no error\")\n\t\t} else {\n\t\t\tlog.Printf(\"cmd\/jujud: %v\", err)\n\t\t}\n\t\tselect {\n\t\tcase <-atomb.Dying():\n\t\t\tatomb.Kill(err)\n\t\tcase <-time.After(retryDelay):\n\t\t\tlog.Printf(\"cmd\/jujud: rerunning machiner\")\n\t\t}\n\t}\n\treturn atomb.Err()\n}\n\nfunc runOnce(c *AgentConf, a Agent) error {\n\tst, password, err := openState(a.EntityName(), c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer st.Close()\n\tentity, err := a.Entity(st)\n\tif state.IsNotFound(err) || err == nil && entity.Life() == state.Dead {\n\t\treturn worker.ErrDead\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tif password != \"\" {\n\t\tif err := entity.SetPassword(password); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn a.RunOnce(st, entity)\n}\n\n\/\/ openState tries to open the state with the given entity name\n\/\/ and configuration information. If the returned password\n\/\/ is non-empty, the caller should set the entity's password\n\/\/ accordingly.\nfunc openState(entityName string, conf *AgentConf) (st *state.State, password string, err error) {\n\tpwfile := filepath.Join(environs.AgentDir(conf.DataDir, entityName), \"password\")\n\tdata, err := ioutil.ReadFile(pwfile)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn nil, \"\", err\n\t}\n\tinfo := conf.StateInfo\n\tinfo.EntityName = entityName\n\tif err == nil {\n\t\tinfo.Password = string(data)\n\t\tst, err := state.Open(&info)\n\t\tif err == nil {\n\t\t\treturn st, \"\", nil\n\t\t}\n\t\tif err != state.ErrUnauthorized {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\t\/\/ Access isn't authorized even though the password was\n\t\t\/\/ saved. This can happen if we crash after saving the\n\t\t\/\/ password but before changing the password, so we'll\n\t\t\/\/ try again with the initial password.\n\t}\n\tinfo.Password = conf.InitialPassword\n\tst, err = state.Open(&info)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\t\/\/ We've succeeded in connecting with the initial password, so\n\t\/\/ we can now change it to something more private.\n\tpassword, err = trivial.RandomPassword()\n\tif err != nil {\n\t\tst.Close()\n\t\treturn nil, \"\", err\n\t}\n\tif err := ioutil.WriteFile(pwfile, []byte(password), 0600); err != nil {\n\t\tst.Close()\n\t\treturn nil, \"\", fmt.Errorf(\"cannot save password: %v\", err)\n\t}\n\treturn st, password, nil\n}\n\n\/\/ newDeployManager gives the tests the opportunity to create a deployer.Manager\n\/\/ that can be used for testing so as to avoid (1) deploying units to the system\n\/\/ running the tests and (2) get access to the *State used internally, so that\n\/\/ tests can be run without waiting for the 5s watcher refresh time we would\n\/\/ otherwise be restricted to. When not testing, st is unused.\nvar newDeployManager = func(st *state.State, info *state.Info, dataDir string) deployer.Manager {\n\t\/\/ TODO: pick manager kind based on entity name? (once we have a\n\t\/\/ container manager for prinicpal units, that is; for now, there\n\t\/\/ is no distinction between principal and subordinate deployments)\n\treturn deployer.NewSimpleManager(info, dataDir)\n}\n\nfunc newDeployer(st *state.State, w *state.UnitsWatcher, dataDir string) *deployer.Deployer {\n\tinfo := &state.Info{\n\t\tEntityName: w.EntityName(),\n\t\tAddrs: st.Addrs(),\n\t\tCACert: st.CACert(),\n\t}\n\tmgr := newDeployManager(st, info, dataDir)\n\treturn deployer.NewDeployer(st, mgr, w)\n}\n<commit_msg>cmd\/jujud: fix run loop<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"launchpad.net\/gnuflag\"\n\t\"launchpad.net\/juju-core\/cmd\"\n\t\"launchpad.net\/juju-core\/environs\"\n\t\"launchpad.net\/juju-core\/log\"\n\t\"launchpad.net\/juju-core\/state\"\n\t\"launchpad.net\/juju-core\/trivial\"\n\t\"launchpad.net\/juju-core\/worker\"\n\t\"launchpad.net\/juju-core\/worker\/deployer\"\n\t\"launchpad.net\/tomb\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ requiredError is useful when complaining about missing command-line options.\nfunc requiredError(name string) error {\n\treturn fmt.Errorf(\"--%s option must be set\", name)\n}\n\n\/\/ stateServersValue implements gnuflag.Value on a slice of server addresses\ntype stateServersValue []string\n\nvar validAddr = regexp.MustCompile(\"^.+:[0-9]+$\")\n\n\/\/ Set splits the comma-separated list of state server addresses and stores\n\/\/ onto v's Addrs. Addresses must include port numbers.\nfunc (v *stateServersValue) Set(value string) error {\n\taddrs := strings.Split(value, \",\")\n\tfor _, addr := range addrs {\n\t\tif !validAddr.MatchString(addr) {\n\t\t\treturn fmt.Errorf(\"%q is not a valid state server address\", addr)\n\t\t}\n\t}\n\t*v = addrs\n\treturn nil\n}\n\n\/\/ String returns the list of server addresses joined by commas.\nfunc (v *stateServersValue) String() string {\n\tif *v != nil {\n\t\treturn strings.Join(*v, \",\")\n\t}\n\treturn \"\"\n}\n\n\/\/ stateServersVar sets up a gnuflag flag analogous to the FlagSet.*Var methods.\nfunc stateServersVar(fs *gnuflag.FlagSet, target *[]string, name string, value []string, usage string) {\n\t*target = value\n\tfs.Var((*stateServersValue)(target), name, usage)\n}\n\n\/\/ AgentConf handles command-line flags shared by all agents.\ntype AgentConf struct {\n\taccept agentFlags\n\tDataDir string\n\tStateInfo state.Info\n\tInitialPassword string\n\tcaCertFile string\n}\n\ntype agentFlags int\n\nconst (\n\tflagStateInfo agentFlags = 1 << iota\n\tflagInitialPassword\n\tflagDataDir\n\n\tflagAll agentFlags = ^0\n)\n\n\/\/ addFlags injects common agent flags into f.\nfunc (c *AgentConf) addFlags(f *gnuflag.FlagSet, accept agentFlags) {\n\tif accept&flagDataDir != 0 {\n\t\tf.StringVar(&c.DataDir, \"data-dir\", \"\/var\/lib\/juju\", \"directory for juju data\")\n\t}\n\tif accept&flagStateInfo != 0 {\n\t\tstateServersVar(f, &c.StateInfo.Addrs, \"state-servers\", nil, \"state servers to connect to\")\n\t\tf.StringVar(&c.caCertFile, \"ca-cert\", \"\", \"path to CA certificate in PEM format\")\n\t}\n\tif accept&flagInitialPassword != 0 {\n\t\tf.StringVar(&c.InitialPassword, \"initial-password\", \"\", \"initial password for state\")\n\t}\n\tc.accept = accept\n}\n\n\/\/ checkArgs checks that required flags have been set and that args is empty.\nfunc (c *AgentConf) checkArgs(args []string) error {\n\tif c.accept&flagDataDir != 0 && c.DataDir == \"\" {\n\t\treturn requiredError(\"data-dir\")\n\t}\n\tif c.accept&flagStateInfo != 0 {\n\t\tif c.StateInfo.Addrs == nil {\n\t\t\treturn requiredError(\"state-servers\")\n\t\t}\n\t\tif c.caCertFile == \"\" {\n\t\t\treturn requiredError(\"ca-cert\")\n\t\t}\n\t\tvar err error\n\t\tc.StateInfo.CACert, err = ioutil.ReadFile(c.caCertFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn cmd.CheckEmpty(args)\n}\n\ntype task interface {\n\tStop() error\n\tWait() error\n\tString() string\n}\n\n\/\/ runTasks runs all the given tasks until any of them fails with an\n\/\/ error. It then stops all of them and returns that error. If a value\n\/\/ is received on the stop channel, the workers are stopped.\n\/\/ The task values should be comparable.\nfunc runTasks(stop <-chan struct{}, tasks ...task) (err error) {\n\ttype errInfo struct {\n\t\tindex int\n\t\terr error\n\t}\n\tdone := make(chan errInfo, len(tasks))\n\tfor i, t := range tasks {\n\t\ti, t := i, t\n\t\tgo func() {\n\t\t\tdone <- errInfo{i, t.Wait()}\n\t\t}()\n\t}\n\tchosen := errInfo{index: -1}\nwaiting:\n\tfor _ = range tasks {\n\t\tselect {\n\t\tcase info := <-done:\n\t\t\tif info.err != nil {\n\t\t\t\tchosen = info\n\t\t\t\tbreak waiting\n\t\t\t}\n\t\tcase <-stop:\n\t\t\tbreak waiting\n\t\t}\n\t}\n\t\/\/ Stop all the tasks. If we've been upgraded,\n\t\/\/ that error taks precedence over other errors, because\n\t\/\/ that's the only way we can escape bad code.\n\tfor i, t := range tasks {\n\t\tif err := t.Stop(); isUpgraded(err) || err != nil && chosen.err == nil {\n\t\t\tchosen = errInfo{i, err}\n\t\t}\n\t}\n\t\/\/ Log any errors that we're discarding.\n\tfor i, t := range tasks {\n\t\tif i == chosen.index {\n\t\t\tcontinue\n\t\t}\n\t\tif err := t.Wait(); err != nil {\n\t\t\tlog.Printf(\"cmd\/jujud: %s: %v\", tasks[i], err)\n\t\t}\n\t}\n\treturn chosen.err\n}\n\nfunc isUpgraded(err error) bool {\n\t_, ok := err.(*UpgradeReadyError)\n\treturn ok\n}\n\ntype Agent interface {\n\tTomb() *tomb.Tomb\n\tRunOnce(st *state.State, entity AgentState) error\n\tEntity(st *state.State) (AgentState, error)\n\tEntityName() string\n}\n\nfunc RunLoop(c *AgentConf, a Agent) error {\n\tatomb := a.Tomb()\n\tfor {\n\t\tlog.Printf(\"cmd\/jujud: agent starting\")\n\t\terr := runOnce(c, a)\n\t\tif ug, ok := err.(*UpgradeReadyError); ok {\n\t\t\tif err = ug.ChangeAgentTools(); err == nil {\n\t\t\t\t\/\/ Return and let upstart deal with the restart.\n\t\t\t\treturn ug\n\t\t\t}\n\t\t}\n\t\tif err == worker.ErrDead {\n\t\t\tlog.Printf(\"cmd\/jujud: agent is dead\")\n\t\t\treturn nil\n\t\t}\n\t\tif err == nil {\n\t\t\tlog.Printf(\"cmd\/jujud: workers died with no error\")\n\t\t} else {\n\t\t\tlog.Printf(\"cmd\/jujud: %v\", err)\n\t\t}\n\t\tselect {\n\t\tcase <-atomb.Dying():\n\t\t\tatomb.Kill(err)\n\t\tcase <-time.After(retryDelay):\n\t\t\tlog.Printf(\"cmd\/jujud: rerunning machiner\")\n\t\t}\n\t\t\/\/ Note: we don't want this at the head of the loop\n\t\t\/\/ because we want the agent to go through the body of\n\t\t\/\/ the loop at least once, even if it's been killed\n\t\t\/\/ before the start.\n\t\tif atomb.Err() != tomb.ErrStillAlive {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn atomb.Err()\n}\n\nfunc runOnce(c *AgentConf, a Agent) error {\n\tst, password, err := openState(a.EntityName(), c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer st.Close()\n\tentity, err := a.Entity(st)\n\tif state.IsNotFound(err) || err == nil && entity.Life() == state.Dead {\n\t\treturn worker.ErrDead\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tif password != \"\" {\n\t\tif err := entity.SetPassword(password); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn a.RunOnce(st, entity)\n}\n\n\/\/ openState tries to open the state with the given entity name\n\/\/ and configuration information. If the returned password\n\/\/ is non-empty, the caller should set the entity's password\n\/\/ accordingly.\nfunc openState(entityName string, conf *AgentConf) (st *state.State, password string, err error) {\n\tpwfile := filepath.Join(environs.AgentDir(conf.DataDir, entityName), \"password\")\n\tdata, err := ioutil.ReadFile(pwfile)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn nil, \"\", err\n\t}\n\tinfo := conf.StateInfo\n\tinfo.EntityName = entityName\n\tif err == nil {\n\t\tinfo.Password = string(data)\n\t\tst, err := state.Open(&info)\n\t\tif err == nil {\n\t\t\treturn st, \"\", nil\n\t\t}\n\t\tif err != state.ErrUnauthorized {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\t\/\/ Access isn't authorized even though the password was\n\t\t\/\/ saved. This can happen if we crash after saving the\n\t\t\/\/ password but before changing the password, so we'll\n\t\t\/\/ try again with the initial password.\n\t}\n\tinfo.Password = conf.InitialPassword\n\tst, err = state.Open(&info)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\t\/\/ We've succeeded in connecting with the initial password, so\n\t\/\/ we can now change it to something more private.\n\tpassword, err = trivial.RandomPassword()\n\tif err != nil {\n\t\tst.Close()\n\t\treturn nil, \"\", err\n\t}\n\tif err := ioutil.WriteFile(pwfile, []byte(password), 0600); err != nil {\n\t\tst.Close()\n\t\treturn nil, \"\", fmt.Errorf(\"cannot save password: %v\", err)\n\t}\n\treturn st, password, nil\n}\n\n\/\/ newDeployManager gives the tests the opportunity to create a deployer.Manager\n\/\/ that can be used for testing so as to avoid (1) deploying units to the system\n\/\/ running the tests and (2) get access to the *State used internally, so that\n\/\/ tests can be run without waiting for the 5s watcher refresh time we would\n\/\/ otherwise be restricted to. When not testing, st is unused.\nvar newDeployManager = func(st *state.State, info *state.Info, dataDir string) deployer.Manager {\n\t\/\/ TODO: pick manager kind based on entity name? (once we have a\n\t\/\/ container manager for prinicpal units, that is; for now, there\n\t\/\/ is no distinction between principal and subordinate deployments)\n\treturn deployer.NewSimpleManager(info, dataDir)\n}\n\nfunc newDeployer(st *state.State, w *state.UnitsWatcher, dataDir string) *deployer.Deployer {\n\tinfo := &state.Info{\n\t\tEntityName: w.EntityName(),\n\t\tAddrs: st.Addrs(),\n\t\tCACert: st.CACert(),\n\t}\n\tmgr := newDeployManager(st, info, dataDir)\n\treturn deployer.NewDeployer(st, mgr, w)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 ikawaha\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ \tYou may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/ikawaha\/kagome\/cmd\/kagome\/lattice\"\n\t\"github.com\/ikawaha\/kagome\/cmd\/kagome\/server\"\n\t\"github.com\/ikawaha\/kagome\/cmd\/kagome\/tokenize\"\n)\n\nvar (\n\terrorWriter = os.Stderr\n\n\tsubcommands = []struct {\n\t\tName string\n\t\tDescription string\n\t\tRun func([]string) error\n\t\tUsage func()\n\t\tOptionCheck func([]string) error\n\t\tPrintDefaults func(flag.ErrorHandling)\n\t}{\n\t\t{\n\t\t\ttokenize.CommandName, tokenize.Description,\n\t\t\ttokenize.Run,\n\t\t\ttokenize.Usage, tokenize.OptionCheck, tokenize.PrintDefaults,\n\t\t},\n\t\t{\n\t\t\tserver.CommandName, server.Description,\n\t\t\tserver.Run,\n\t\t\tserver.Usage, server.OptionCheck, server.PrintDefaults,\n\t\t},\n\t\t{\n\t\t\tlattice.CommandName, lattice.Description,\n\t\t\tlattice.Run,\n\t\t\tlattice.Usage, lattice.OptionCheck, lattice.PrintDefaults,\n\t\t},\n\t}\n\n\tdefaultSubcommand = subcommands[0]\n)\n\n\/\/Usage prints to stdout information about the tool\nfunc Usage() {\n\tfmt.Fprintf(errorWriter, \"Japanese Morphological Analyzer -- github.com\/ikawaha\/kagome\\n\")\n\tfmt.Fprintf(errorWriter, \"usage: %s <command>\\n\", filepath.Base(os.Args[0]))\n}\n\n\/\/ PrintDefaults prints out the default flags\nfunc PrintDefaults() {\n\tfmt.Fprintln(errorWriter, \"The commands are:\")\n\tfor _, c := range subcommands {\n\t\tif c.Name == defaultSubcommand.Name {\n\t\t\tfmt.Fprintf(errorWriter, \" [%s] - %s (*default)\\n\", c.Name, c.Description)\n\t\t} else {\n\t\t\tfmt.Fprintf(errorWriter, \" %s - %s\\n\", c.Name, c.Description)\n\t\t}\n\t}\n}\n\nfunc main() {\n\tvar (\n\t\tcmd func([]string) error\n\t\toptions []string\n\t)\n\tif len(os.Args) >= 2 {\n\t\toptions = os.Args[2:]\n\t\tfor i := range subcommands {\n\t\t\tif os.Args[1] == subcommands[i].Name {\n\t\t\t\tcmd = subcommands[i].Run\n\t\t\t}\n\t\t}\n\t}\n\tif cmd == nil {\n\t\toptions = os.Args[1:]\n\t\tif e := defaultSubcommand.OptionCheck(options); e != nil {\n\t\t\tUsage()\n\t\t\tPrintDefaults()\n\t\t\tfmt.Fprintln(errorWriter)\n\t\t\tdefaultSubcommand.Usage()\n\t\t\tdefaultSubcommand.PrintDefaults(flag.ExitOnError)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tcmd = defaultSubcommand.Run\n\t}\n\tif e := cmd(options); e != nil {\n\t\tfmt.Fprintf(errorWriter, \"%v\\n\", e)\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>feat: version printing<commit_after>\/\/ Copyright 2015 ikawaha\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ \tYou may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/ikawaha\/kagome\/cmd\/kagome\/lattice\"\n\t\"github.com\/ikawaha\/kagome\/cmd\/kagome\/server\"\n\t\"github.com\/ikawaha\/kagome\/cmd\/kagome\/tokenize\"\n)\n\nvar (\n\terrorWriter = os.Stderr\n\n\tversion_kagome string\n\n\tsubcommands = []struct {\n\t\tName string\n\t\tDescription string\n\t\tRun func([]string) error\n\t\tUsage func()\n\t\tOptionCheck func([]string) error\n\t\tPrintDefaults func(flag.ErrorHandling)\n\t}{\n\t\t{\n\t\t\ttokenize.CommandName, tokenize.Description,\n\t\t\ttokenize.Run,\n\t\t\ttokenize.Usage, tokenize.OptionCheck, tokenize.PrintDefaults,\n\t\t},\n\t\t{\n\t\t\tserver.CommandName, server.Description,\n\t\t\tserver.Run,\n\t\t\tserver.Usage, server.OptionCheck, server.PrintDefaults,\n\t\t},\n\t\t{\n\t\t\tlattice.CommandName, lattice.Description,\n\t\t\tlattice.Run,\n\t\t\tlattice.Usage, lattice.OptionCheck, lattice.PrintDefaults,\n\t\t},\n\t}\n\n\tdefaultSubcommand = subcommands[0]\n)\n\n\/\/Usage prints to stdout information about the tool\nfunc Usage() {\n\tfmt.Fprintf(errorWriter, \"Japanese Morphological Analyzer -- github.com\/ikawaha\/kagome\\n\")\n\tfmt.Fprintf(errorWriter, \"usage: %s <command>\\n\", filepath.Base(os.Args[0]))\n}\n\n\/\/ PrintDefaults prints out the default flags\nfunc PrintDefaults() {\n\tfmt.Fprintln(errorWriter, \"The commands are:\")\n\tfor _, c := range subcommands {\n\t\tif c.Name == defaultSubcommand.Name {\n\t\t\tfmt.Fprintf(errorWriter, \" [%s] - %s (*default)\\n\", c.Name, c.Description)\n\t\t} else {\n\t\t\tfmt.Fprintf(errorWriter, \" %s - %s\\n\", c.Name, c.Description)\n\t\t}\n\t}\n\tfmt.Fprintf(errorWriter, \" %s - %s\\n\", \"version\", \"show version\")\n}\n\n\/\/ PrintVersion prints out the app version.\n\/\/ This must be specified by \"-X\" option during the go build. Such like:\n\/\/ $ go build --ldflags \"-X 'main.version_kagome=${version_app}'\"\nfunc PrintVersion() {\n\tif version_kagome == \"\" {\n\t\tversion_kagome = \"(version not defined)\"\n\t}\n\tfmt.Printf(\"%s %s\\n\", filepath.Base(os.Args[0]), version_kagome)\n}\n\nfunc main() {\n\tvar (\n\t\tcmd func([]string) error\n\t\toptions []string\n\t)\n\tif len(os.Args) >= 2 {\n\t\toptions = os.Args[2:]\n\t\tfor i := range subcommands {\n\t\t\tif os.Args[1] == subcommands[i].Name {\n\t\t\t\tcmd = subcommands[i].Run\n\t\t\t}\n\t\t}\n\t}\n\tif cmd == nil {\n\t\toptions = os.Args[1:]\n\t\tif options[0] == \"version\" || options[0] == \"-v\" || options[0] == \"--version\" {\n\t\t\tPrintVersion()\n\t\t\tos.Exit(0)\n\t\t}\n\t\tif e := defaultSubcommand.OptionCheck(options); e != nil {\n\t\t\tUsage()\n\t\t\tPrintDefaults()\n\t\t\tfmt.Fprintln(errorWriter)\n\t\t\tdefaultSubcommand.Usage()\n\t\t\tdefaultSubcommand.PrintDefaults(flag.ExitOnError)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tcmd = defaultSubcommand.Run\n\t}\n\tif e := cmd(options); e != nil {\n\t\tfmt.Fprintf(errorWriter, \"%v\\n\", e)\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package serve\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com\/codegangsta\/cli\"\n)\n\nfunc server(port string) {\n\tfmt.Printf(\"Port is %s.\\n\", port)\n\tln, _ := net.Listen(\"tcp\", \":\"+port)\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t\tcontinue\n\t\t}\n\t\tgo handleConnection(conn)\n\t}\n}\n\nfunc handleConnection(conn net.Conn) {\n\tdefer conn.Close()\n\tf, _ := os.Open(\"go.pac\")\n\tdefer f.Close()\n\tio.Copy(conn, f)\n}\n\nfunc Action(c *cli.Context) {\n\tvar port = c.Int(\"port\")\n\tif port <= 0 {\n\t\treturn\n\t}\n\n\tserver(strconv.Itoa(port))\n}\n<commit_msg>improve http server<commit_after>package serve\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/codegangsta\/cli\"\n)\n\ntype Tiny struct {\n\tfile string\n}\n\nfunc (t *Tiny) ServeHTTP(rw http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" && r.Method != \"HEAD\" {\n\t\treturn\n\t}\n\tf, _ := os.Open(t.file)\n\tdefer f.Close()\n\tfi, _ := f.Stat()\n\thttp.ServeContent(rw, r, t.file, fi.ModTime(), f)\n}\n\nfunc Action(c *cli.Context) {\n\tvar port = c.String(\"port\")\n\tif port == \"\" {\n\t\treturn\n\t}\n\n\tt := Tiny{\"go.pac\"}\n\thttp.ListenAndServe(\":\"+port, t)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/containers\/image\/copy\"\n\t\"github.com\/containers\/image\/docker\/reference\"\n\t\"github.com\/containers\/image\/manifest\"\n\t\"github.com\/containers\/image\/transports\"\n\t\"github.com\/containers\/image\/transports\/alltransports\"\n\t\"github.com\/containers\/image\/types\"\n\timgspecv1 \"github.com\/opencontainers\/image-spec\/specs-go\/v1\"\n\t\"github.com\/urfave\/cli\"\n)\n\n\/\/ contextsFromGlobalOptions returns source and destionation types.SystemContext depending on c.\nfunc contextsFromGlobalOptions(c *cli.Context) (*types.SystemContext, *types.SystemContext, error) {\n\tsourceCtx, err := contextFromGlobalOptions(c, \"src-\")\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdestinationCtx, err := contextFromGlobalOptions(c, \"dest-\")\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn sourceCtx, destinationCtx, nil\n}\n\nfunc copyHandler(c *cli.Context) error {\n\tif len(c.Args()) != 2 {\n\t\treturn cli.ShowCommandHelp(c, \"copy\")\n\t}\n\n\tpolicyContext, err := getPolicyContext(c)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error loading trust policy: %v\", err)\n\t}\n\tdefer policyContext.Destroy()\n\n\tsrcRef, err := alltransports.ParseImageName(c.Args()[0])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Invalid source name %s: %v\", c.Args()[0], err)\n\t}\n\tdestRef, err := alltransports.ParseImageName(c.Args()[1])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Invalid destination name %s: %v\", c.Args()[1], err)\n\t}\n\tsignBy := c.String(\"sign-by\")\n\tremoveSignatures := c.Bool(\"remove-signatures\")\n\n\tsourceCtx, destinationCtx, err := contextsFromGlobalOptions(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar manifestType string\n\tif c.IsSet(\"format\") {\n\t\tswitch c.String(\"format\") {\n\t\tcase \"oci\":\n\t\t\tmanifestType = imgspecv1.MediaTypeImageManifest\n\t\tcase \"v2s1\":\n\t\t\tmanifestType = manifest.DockerV2Schema1SignedMediaType\n\t\tcase \"v2s2\":\n\t\t\tmanifestType = manifest.DockerV2Schema2MediaType\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'\", c.String(\"format\"))\n\t\t}\n\t}\n\n\tif c.IsSet(\"additional-tag\") {\n\t\tfor _, image := range c.StringSlice(\"additional-tag\") {\n\t\t\tref, err := reference.ParseNormalizedNamed(image)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error parsing additional-tag '%s': %v\", image, err)\n\t\t\t}\n\t\t\tnamedTagged, isNamedTagged := ref.(reference.NamedTagged)\n\t\t\tif !isNamedTagged {\n\t\t\t\treturn fmt.Errorf(\"additional-tag '%s' must be a tagged reference\", image)\n\t\t\t}\n\t\t\tdestinationCtx.DockerArchiveAdditionalTags = append(destinationCtx.DockerArchiveAdditionalTags, namedTagged)\n\t\t}\n\t}\n\n\treturn copy.Image(context.Background(), policyContext, destRef, srcRef, ©.Options{\n\t\tRemoveSignatures: removeSignatures,\n\t\tSignBy: signBy,\n\t\tReportWriter: os.Stdout,\n\t\tSourceCtx: sourceCtx,\n\t\tDestinationCtx: destinationCtx,\n\t\tForceManifestMIMEType: manifestType,\n\t})\n}\n\nvar copyCmd = cli.Command{\n\tName: \"copy\",\n\tUsage: \"Copy an IMAGE-NAME from one location to another\",\n\tDescription: fmt.Sprintf(`\n\n\tContainer \"IMAGE-NAME\" uses a \"transport\":\"details\" format.\n\n\tSupported transports:\n\t%s\n\n\tSee skopeo(1) section \"IMAGE NAMES\" for the expected format\n\t`, strings.Join(transports.ListNames(), \", \")),\n\tArgsUsage: \"SOURCE-IMAGE DESTINATION-IMAGE\",\n\tAction: copyHandler,\n\t\/\/ FIXME: Do we need to namespace the GPG aspect?\n\tFlags: []cli.Flag{\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"additional-tag\",\n\t\t\tUsage: \"additional tags (supports docker-archive)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"authfile\",\n\t\t\tUsage: \"path of the authentication file. Default is ${XDG_RUNTIME_DIR}\/containers\/auth.json\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"remove-signatures\",\n\t\t\tUsage: \"Do not copy signatures from SOURCE-IMAGE\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"sign-by\",\n\t\t\tUsage: \"Sign the image using a GPG key with the specified `FINGERPRINT`\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"src-creds, screds\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Use `USERNAME[:PASSWORD]` for accessing the source registry\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dest-creds, dcreds\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Use `USERNAME[:PASSWORD]` for accessing the destination registry\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"src-cert-dir\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the source registry or daemon\",\n\t\t},\n\t\tcli.BoolTFlag{\n\t\t\tName: \"src-tls-verify\",\n\t\t\tUsage: \"require HTTPS and verify certificates when talking to the container source registry or daemon (defaults to true)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dest-cert-dir\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the destination registry or daemon\",\n\t\t},\n\t\tcli.BoolTFlag{\n\t\t\tName: \"dest-tls-verify\",\n\t\t\tUsage: \"require HTTPS and verify certificates when talking to the container destination registry or daemon (defaults to true)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dest-ostree-tmp-dir\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"`DIRECTORY` to use for OSTree temporary files\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"src-shared-blob-dir\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"`DIRECTORY` to use to fetch retrieved blobs (OCI layout sources only)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dest-shared-blob-dir\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"`DIRECTORY` to use to store retrieved blobs (OCI layout destinations only)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"format, f\",\n\t\t\tUsage: \"`MANIFEST TYPE` (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"dest-compress\",\n\t\t\tUsage: \"Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"src-daemon-host\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"use docker daemon host at `HOST` (docker-daemon sources only)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dest-daemon-host\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"use docker daemon host at `HOST` (docker-daemon destinations only)\",\n\t\t},\n\t},\n}\n<commit_msg>Ensure that we still return 1, and print something to stderr, on (skopeo copy) failure<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/containers\/image\/copy\"\n\t\"github.com\/containers\/image\/docker\/reference\"\n\t\"github.com\/containers\/image\/manifest\"\n\t\"github.com\/containers\/image\/transports\"\n\t\"github.com\/containers\/image\/transports\/alltransports\"\n\t\"github.com\/containers\/image\/types\"\n\timgspecv1 \"github.com\/opencontainers\/image-spec\/specs-go\/v1\"\n\t\"github.com\/urfave\/cli\"\n)\n\n\/\/ contextsFromGlobalOptions returns source and destionation types.SystemContext depending on c.\nfunc contextsFromGlobalOptions(c *cli.Context) (*types.SystemContext, *types.SystemContext, error) {\n\tsourceCtx, err := contextFromGlobalOptions(c, \"src-\")\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdestinationCtx, err := contextFromGlobalOptions(c, \"dest-\")\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn sourceCtx, destinationCtx, nil\n}\n\nfunc copyHandler(c *cli.Context) error {\n\tif len(c.Args()) != 2 {\n\t\tcli.ShowCommandHelp(c, \"copy\")\n\t\treturn errors.New(\"Exactly two arguments expected\")\n\t}\n\n\tpolicyContext, err := getPolicyContext(c)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error loading trust policy: %v\", err)\n\t}\n\tdefer policyContext.Destroy()\n\n\tsrcRef, err := alltransports.ParseImageName(c.Args()[0])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Invalid source name %s: %v\", c.Args()[0], err)\n\t}\n\tdestRef, err := alltransports.ParseImageName(c.Args()[1])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Invalid destination name %s: %v\", c.Args()[1], err)\n\t}\n\tsignBy := c.String(\"sign-by\")\n\tremoveSignatures := c.Bool(\"remove-signatures\")\n\n\tsourceCtx, destinationCtx, err := contextsFromGlobalOptions(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar manifestType string\n\tif c.IsSet(\"format\") {\n\t\tswitch c.String(\"format\") {\n\t\tcase \"oci\":\n\t\t\tmanifestType = imgspecv1.MediaTypeImageManifest\n\t\tcase \"v2s1\":\n\t\t\tmanifestType = manifest.DockerV2Schema1SignedMediaType\n\t\tcase \"v2s2\":\n\t\t\tmanifestType = manifest.DockerV2Schema2MediaType\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'\", c.String(\"format\"))\n\t\t}\n\t}\n\n\tif c.IsSet(\"additional-tag\") {\n\t\tfor _, image := range c.StringSlice(\"additional-tag\") {\n\t\t\tref, err := reference.ParseNormalizedNamed(image)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error parsing additional-tag '%s': %v\", image, err)\n\t\t\t}\n\t\t\tnamedTagged, isNamedTagged := ref.(reference.NamedTagged)\n\t\t\tif !isNamedTagged {\n\t\t\t\treturn fmt.Errorf(\"additional-tag '%s' must be a tagged reference\", image)\n\t\t\t}\n\t\t\tdestinationCtx.DockerArchiveAdditionalTags = append(destinationCtx.DockerArchiveAdditionalTags, namedTagged)\n\t\t}\n\t}\n\n\treturn copy.Image(context.Background(), policyContext, destRef, srcRef, ©.Options{\n\t\tRemoveSignatures: removeSignatures,\n\t\tSignBy: signBy,\n\t\tReportWriter: os.Stdout,\n\t\tSourceCtx: sourceCtx,\n\t\tDestinationCtx: destinationCtx,\n\t\tForceManifestMIMEType: manifestType,\n\t})\n}\n\nvar copyCmd = cli.Command{\n\tName: \"copy\",\n\tUsage: \"Copy an IMAGE-NAME from one location to another\",\n\tDescription: fmt.Sprintf(`\n\n\tContainer \"IMAGE-NAME\" uses a \"transport\":\"details\" format.\n\n\tSupported transports:\n\t%s\n\n\tSee skopeo(1) section \"IMAGE NAMES\" for the expected format\n\t`, strings.Join(transports.ListNames(), \", \")),\n\tArgsUsage: \"SOURCE-IMAGE DESTINATION-IMAGE\",\n\tAction: copyHandler,\n\t\/\/ FIXME: Do we need to namespace the GPG aspect?\n\tFlags: []cli.Flag{\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"additional-tag\",\n\t\t\tUsage: \"additional tags (supports docker-archive)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"authfile\",\n\t\t\tUsage: \"path of the authentication file. Default is ${XDG_RUNTIME_DIR}\/containers\/auth.json\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"remove-signatures\",\n\t\t\tUsage: \"Do not copy signatures from SOURCE-IMAGE\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"sign-by\",\n\t\t\tUsage: \"Sign the image using a GPG key with the specified `FINGERPRINT`\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"src-creds, screds\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Use `USERNAME[:PASSWORD]` for accessing the source registry\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dest-creds, dcreds\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Use `USERNAME[:PASSWORD]` for accessing the destination registry\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"src-cert-dir\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the source registry or daemon\",\n\t\t},\n\t\tcli.BoolTFlag{\n\t\t\tName: \"src-tls-verify\",\n\t\t\tUsage: \"require HTTPS and verify certificates when talking to the container source registry or daemon (defaults to true)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dest-cert-dir\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the destination registry or daemon\",\n\t\t},\n\t\tcli.BoolTFlag{\n\t\t\tName: \"dest-tls-verify\",\n\t\t\tUsage: \"require HTTPS and verify certificates when talking to the container destination registry or daemon (defaults to true)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dest-ostree-tmp-dir\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"`DIRECTORY` to use for OSTree temporary files\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"src-shared-blob-dir\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"`DIRECTORY` to use to fetch retrieved blobs (OCI layout sources only)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dest-shared-blob-dir\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"`DIRECTORY` to use to store retrieved blobs (OCI layout destinations only)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"format, f\",\n\t\t\tUsage: \"`MANIFEST TYPE` (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"dest-compress\",\n\t\t\tUsage: \"Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"src-daemon-host\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"use docker daemon host at `HOST` (docker-daemon sources only)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dest-daemon-host\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"use docker daemon host at `HOST` (docker-daemon destinations only)\",\n\t\t},\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/doc\"\n\t\"html\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"net\/textproto\"\n\t\"net\/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/serge-v\/toolbox\/common\"\n)\n\nvar apiURL = \"http:\/\/lite.cnn.io\/\"\nvar cacheDir string\n\ntype topic struct {\n\ttitle string\n\turl string\n}\n\nfunc init() {\n\thome := os.Getenv(\"HOME\")\n\tif home == \"\" || home == \"\/nonexistent\" {\n\t\tcacheDir = \"\/var\/cache\/cnn\/\"\n\t} else {\n\t\tcacheDir = home + \"\/.cache\/cnn\/\"\n\t\tif err := os.MkdirAll(cacheDir, 0777); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\nfunc parseTopics(lang string) ([]topic, error) {\n\tu := apiURL + lang\n\tlog.Println(\"loading\", u)\n\treq, err := http.Get(u)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"cannot get url \"+u)\n\t}\n\tdefer req.Body.Close()\n\tbuf, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"cannot read body for url \"+u)\n\t}\n\tlog.Println(\"loaded\", len(buf))\n\tioutil.WriteFile(cacheDir+\"main\"+lang+\".html\", buf, 0666)\n\n\tre := regexp.MustCompile(\"<li><a href=\\\"(\/e[ns]\/article\/[^\\\"]*)\\\">([^<]*)<\/a><\/li>\")\n\tmatches := re.FindAllStringSubmatch(string(buf), -1)\n\tif len(matches) == 0 {\n\t\treturn nil, errors.Wrap(err, \"no articles for url \"+u)\n\t}\n\n\tlist := []topic{}\n\tfor _, m := range matches {\n\t\tlist = append(list, topic{title: strings.TrimSpace(m[2]), url: m[1]})\n\t}\n\treturn list, nil\n}\n\nfunc parseArticle(buf []byte) (string, error) {\n\tre := regexp.MustCompile(\"(?s)<main>(.*)<\/main>\")\n\tmatches := re.FindAllStringSubmatch(string(buf), 1)\n\tif len(matches) == 0 {\n\t\treturn \"\", errors.New(\"cannot match article\")\n\t}\n\tvar s string\n\tfor _, m := range matches {\n\t\ts += m[1]\n\t}\n\treturn s, nil\n}\n\nfunc getArticle(articlePath string, w io.Writer) error {\n\taurl := apiURL + articlePath\n\tpu, err := url.Parse(aurl)\n\tfname := strings.Replace(pu.Path, \"\/\", \"-\", -1)\n\tfname = cacheDir + fname\n\tvar buf []byte\n\tbuf, err = ioutil.ReadFile(fname)\n\tprintln(\"read fname:\", fname, err)\n\tif err != nil {\n\t\tresp, err := http.Get(\"http:\/\/lite.cnn.io\/\" + articlePath)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"cannot get article \"+articlePath)\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tbuf, err = ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"cannot get article body \"+articlePath)\n\t\t}\n\t\tioutil.WriteFile(fname, buf, 0666)\n\t}\n\thtml, err := parseArticle(buf)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot parse article \"+articlePath)\n\t}\n\tfmt.Fprintln(w, html)\n\treturn nil\n}\n\nfunc getNews(w io.Writer, plain bool) error {\n\tenTopics, err := parseTopics(\"en\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error in en topics\")\n\t}\n\n\tesTopics, err := parseTopics(\"es\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error in es topics\")\n\t}\n\n\tmax := len(enTopics)\n\tif max > len(esTopics) {\n\t\tmax = len(esTopics)\n\t}\n\tesTopicsMap := make(map[string]topic)\n\n\tfor _, t := range esTopics {\n\t\tkey := strings.Replace(t.url, \"\/es\/\", \"\/en\/\", 1)\n\t\tesTopicsMap[key] = t\n\t}\n\n\tif plain {\n\t\tfor i := 0; i < max; i++ {\n\t\t\tkey := enTopics[i].url\n\t\t\tprintln(\"es:\", esTopicsMap[key].title)\n\t\t\tprintln(\"en:\", enTopics[i].title)\n\t\t\tprintln(\"----------------\")\n\t\t}\n\n\t\tincludeArticles := false\n\n\t\tif includeArticles {\n\t\t\tfmt.Fprintf(w, \" === \\x1b[33mTopics\\x1b[0m\\n\\n\")\n\t\t\tfor i := 0; i < max; i++ {\n\t\t\t\tfmt.Fprintln(w, \" \", html.UnescapeString(enTopics[i].title))\n\t\t\t\tfmt.Fprintln(w, \" \", html.UnescapeString(esTopics[i].title))\n\t\t\t\tfmt.Fprintln(w)\n\t\t\t}\n\n\t\t\tfor i := 0; i < max; i++ {\n\t\t\t\tfmt.Fprintf(w, \" === \\x1b[33m%s\\x1b[0m\\n\", html.UnescapeString(enTopics[i].title))\n\t\t\t\tvar pbuf bytes.Buffer\n\t\t\t\tgetArticle(enTopics[i].url, &pbuf)\n\t\t\t\ts := pbuf.String()\n\t\t\t\ts = strings.Replace(s, \"<div id=\\\"published datetime\\\">\", \"\", 1)\n\t\t\t\ts = strings.Replace(s, \"<div>\", \"\\n\", -1)\n\t\t\t\ts = strings.Replace(s, \"<\/div>\", \"\\n\", -1)\n\t\t\t\ts = strings.Replace(s, \"<p>\", \"\\n\", -1)\n\t\t\t\ts = strings.Replace(s, \"<\/p>\", \"\\n\", -1)\n\t\t\t\ts = strings.Replace(s, \"•\", \"*\", -1)\n\t\t\t\ts = html.UnescapeString(s)\n\t\t\t\tdoc.ToText(w, s, \" \", \" \", 72)\n\t\t\t\tfmt.Fprintln(w)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tfmt.Fprint(w, `<div style=\"width: 600px; font-size:15px\">`)\n\tfor i := 0; i < max; i++ {\n\t\tkey := enTopics[i].url\n\t\tfmt.Fprintln(w, \"<p>\", esTopicsMap[key].title, \"<\/p>\")\n\t\tfmt.Fprintln(w, \"<p>\", enTopics[i].title, \"<\/p>\")\n\t\tfmt.Fprintln(w, \"<hr>\")\n\t}\n\tfmt.Fprintf(w, \"<\/div>\")\n\n\tif max > 5 {\n\t\tmax = 5\n\t}\n\n\tfor i := 0; i < max; i++ {\n\t\tfmt.Fprint(w, `<div style=\"width: 600px; font-size:15px\">`)\n\t\tgetArticle(enTopics[i].url, w)\n\t\tfmt.Fprintf(w, \"<\/div>\")\n\t}\n\n\treturn nil\n}\n\nfunc sendNewsEmail(html io.Reader) {\n\tvar b bytes.Buffer\n\n\tmwr := multipart.NewWriter(&b)\n\n\tfmt.Fprintf(&b, \"From: wx <serge0x76@gmail.com>\\n\")\n\tfmt.Fprintf(&b, \"Subject: news\\n\")\n\tfmt.Fprintf(&b, \"Content-Type: multipart\/mixed; boundary=%s\\n\\n\", mwr.Boundary())\n\n\theaders := make(textproto.MIMEHeader)\n\theaders.Add(\"Content-Type\", \"text\/html\")\n\tpart, err := mwr.CreatePart(headers)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tfmt.Fprintln(part, html)\n\tfmt.Fprintf(&b, \".\\n\")\n\tcommon.Sendmail(\"serge0x76@gmail.com\", b.Bytes())\n}\n\nfunc printNews(email bool) {\n\tvar buf bytes.Buffer\n\terr := getNews(&buf, !email)\n\tif err != nil {\n\t\tfmt.Fprintf(&buf, \"error: %s\", err.Error())\n\t}\n\tif email {\n\t\tsendNewsEmail(&buf)\n\t} else {\n\t\tfmt.Println(buf.String())\n\t}\n}\n<commit_msg>Fix sender<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/doc\"\n\t\"html\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"net\/textproto\"\n\t\"net\/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/serge-v\/toolbox\/common\"\n)\n\nvar apiURL = \"http:\/\/lite.cnn.io\/\"\nvar cacheDir string\n\ntype topic struct {\n\ttitle string\n\turl string\n}\n\nfunc init() {\n\thome := os.Getenv(\"HOME\")\n\tif home == \"\" || home == \"\/nonexistent\" {\n\t\tcacheDir = \"\/var\/cache\/cnn\/\"\n\t} else {\n\t\tcacheDir = home + \"\/.cache\/cnn\/\"\n\t\tif err := os.MkdirAll(cacheDir, 0777); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\nfunc parseTopics(lang string) ([]topic, error) {\n\tu := apiURL + lang\n\tlog.Println(\"loading\", u)\n\treq, err := http.Get(u)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"cannot get url \"+u)\n\t}\n\tdefer req.Body.Close()\n\tbuf, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"cannot read body for url \"+u)\n\t}\n\tlog.Println(\"loaded\", len(buf))\n\tioutil.WriteFile(cacheDir+\"main\"+lang+\".html\", buf, 0666)\n\n\tre := regexp.MustCompile(\"<li><a href=\\\"(\/e[ns]\/article\/[^\\\"]*)\\\">([^<]*)<\/a><\/li>\")\n\tmatches := re.FindAllStringSubmatch(string(buf), -1)\n\tif len(matches) == 0 {\n\t\treturn nil, errors.Wrap(err, \"no articles for url \"+u)\n\t}\n\n\tlist := []topic{}\n\tfor _, m := range matches {\n\t\tlist = append(list, topic{title: strings.TrimSpace(m[2]), url: m[1]})\n\t}\n\treturn list, nil\n}\n\nfunc parseArticle(buf []byte) (string, error) {\n\tre := regexp.MustCompile(\"(?s)<main>(.*)<\/main>\")\n\tmatches := re.FindAllStringSubmatch(string(buf), 1)\n\tif len(matches) == 0 {\n\t\treturn \"\", errors.New(\"cannot match article\")\n\t}\n\tvar s string\n\tfor _, m := range matches {\n\t\ts += m[1]\n\t}\n\treturn s, nil\n}\n\nfunc getArticle(articlePath string, w io.Writer) error {\n\taurl := apiURL + articlePath\n\tpu, err := url.Parse(aurl)\n\tfname := strings.Replace(pu.Path, \"\/\", \"-\", -1)\n\tfname = cacheDir + fname\n\tvar buf []byte\n\tbuf, err = ioutil.ReadFile(fname)\n\tprintln(\"read fname:\", fname, err)\n\tif err != nil {\n\t\tresp, err := http.Get(\"http:\/\/lite.cnn.io\/\" + articlePath)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"cannot get article \"+articlePath)\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tbuf, err = ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"cannot get article body \"+articlePath)\n\t\t}\n\t\tioutil.WriteFile(fname, buf, 0666)\n\t}\n\thtml, err := parseArticle(buf)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot parse article \"+articlePath)\n\t}\n\tfmt.Fprintln(w, html)\n\treturn nil\n}\n\nfunc getNews(w io.Writer, plain bool) error {\n\tenTopics, err := parseTopics(\"en\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error in en topics\")\n\t}\n\n\tesTopics, err := parseTopics(\"es\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error in es topics\")\n\t}\n\n\tmax := len(enTopics)\n\tif max > len(esTopics) {\n\t\tmax = len(esTopics)\n\t}\n\tesTopicsMap := make(map[string]topic)\n\n\tfor _, t := range esTopics {\n\t\tkey := strings.Replace(t.url, \"\/es\/\", \"\/en\/\", 1)\n\t\tesTopicsMap[key] = t\n\t}\n\n\tif plain {\n\t\tfor i := 0; i < max; i++ {\n\t\t\tkey := enTopics[i].url\n\t\t\tprintln(\"es:\", esTopicsMap[key].title)\n\t\t\tprintln(\"en:\", enTopics[i].title)\n\t\t\tprintln(\"----------------\")\n\t\t}\n\n\t\tincludeArticles := false\n\n\t\tif includeArticles {\n\t\t\tfmt.Fprintf(w, \" === \\x1b[33mTopics\\x1b[0m\\n\\n\")\n\t\t\tfor i := 0; i < max; i++ {\n\t\t\t\tfmt.Fprintln(w, \" \", html.UnescapeString(enTopics[i].title))\n\t\t\t\tfmt.Fprintln(w, \" \", html.UnescapeString(esTopics[i].title))\n\t\t\t\tfmt.Fprintln(w)\n\t\t\t}\n\n\t\t\tfor i := 0; i < max; i++ {\n\t\t\t\tfmt.Fprintf(w, \" === \\x1b[33m%s\\x1b[0m\\n\", html.UnescapeString(enTopics[i].title))\n\t\t\t\tvar pbuf bytes.Buffer\n\t\t\t\tgetArticle(enTopics[i].url, &pbuf)\n\t\t\t\ts := pbuf.String()\n\t\t\t\ts = strings.Replace(s, \"<div id=\\\"published datetime\\\">\", \"\", 1)\n\t\t\t\ts = strings.Replace(s, \"<div>\", \"\\n\", -1)\n\t\t\t\ts = strings.Replace(s, \"<\/div>\", \"\\n\", -1)\n\t\t\t\ts = strings.Replace(s, \"<p>\", \"\\n\", -1)\n\t\t\t\ts = strings.Replace(s, \"<\/p>\", \"\\n\", -1)\n\t\t\t\ts = strings.Replace(s, \"•\", \"*\", -1)\n\t\t\t\ts = html.UnescapeString(s)\n\t\t\t\tdoc.ToText(w, s, \" \", \" \", 72)\n\t\t\t\tfmt.Fprintln(w)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tfmt.Fprint(w, `<div style=\"width: 600px; font-size:15px\">`)\n\tfor i := 0; i < max; i++ {\n\t\tkey := enTopics[i].url\n\t\tfmt.Fprintln(w, \"<p>\", esTopicsMap[key].title, \"<\/p>\")\n\t\tfmt.Fprintln(w, \"<p>\", enTopics[i].title, \"<\/p>\")\n\t\tfmt.Fprintln(w, \"<hr>\")\n\t}\n\tfmt.Fprintf(w, \"<\/div>\")\n\n\tif max > 5 {\n\t\tmax = 5\n\t}\n\n\tfor i := 0; i < max; i++ {\n\t\tfmt.Fprint(w, `<div style=\"width: 600px; font-size:15px\">`)\n\t\tgetArticle(enTopics[i].url, w)\n\t\tfmt.Fprintf(w, \"<\/div>\")\n\t}\n\n\treturn nil\n}\n\nfunc sendNewsEmail(html io.Reader) {\n\tvar b bytes.Buffer\n\n\tmwr := multipart.NewWriter(&b)\n\n\tfmt.Fprintf(&b, \"From: \"+common.GetRcVar(\"WXFROM\")+\"\\n\")\n\tfmt.Fprintf(&b, \"Subject: news\\n\")\n\tfmt.Fprintf(&b, \"Content-Type: multipart\/mixed; boundary=%s\\n\\n\", mwr.Boundary())\n\n\theaders := make(textproto.MIMEHeader)\n\theaders.Add(\"Content-Type\", \"text\/html\")\n\tpart, err := mwr.CreatePart(headers)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tfmt.Fprintln(part, html)\n\tfmt.Fprintf(&b, \".\\n\")\n\tcommon.Sendmail(\"serge0x76@gmail.com\", b.Bytes())\n}\n\nfunc printNews(email bool) {\n\tvar buf bytes.Buffer\n\terr := getNews(&buf, !email)\n\tif err != nil {\n\t\tfmt.Fprintf(&buf, \"error: %s\", err.Error())\n\t}\n\tif email {\n\t\tsendNewsEmail(&buf)\n\t} else {\n\t\tfmt.Println(buf.String())\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015, David Howden\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\ntchaik creates a webserver which serves the web UI.\n\nIt is assumed that tchaik is run relatively local to the user (i.e. serving pages to the local machine, or a local\nnetwork).\n\nAll configuration is done through command line parameters.\n\nA common use case is to begin by use using an existing iTunes Library file:\n\n tchaik -itlXML \/path\/to\/iTunesMusicLibrary.xml\n\n*\/\npackage main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"tchaik.com\/index\"\n\t\"tchaik.com\/index\/attr\"\n\t\"tchaik.com\/index\/checklist\"\n\t\"tchaik.com\/index\/cursor\"\n\t\"tchaik.com\/index\/favourite\"\n\t\"tchaik.com\/index\/history\"\n\t\"tchaik.com\/index\/itl\"\n\t\"tchaik.com\/index\/playlist\"\n\t\"tchaik.com\/index\/walk\"\n\t\"tchaik.com\/store\"\n\t\"tchaik.com\/store\/cmdflag\"\n)\n\nvar debug bool\nvar itlXML, tchLib, walkPath string\n\nvar playHistoryPath, favouritesPath, checklistPath, playlistPath, cursorPath string\n\nvar listenAddr string\nvar uiDir string\nvar certFile, keyFile string\n\nvar authUser, authPassword string\n\nvar traceListenAddr string\n\nfunc init() {\n\tflag.BoolVar(&debug, \"debug\", false, \"print debugging information\")\n\n\tflag.StringVar(&listenAddr, \"listen\", \"localhost:8080\", \"bind `address` for main HTTP server\")\n\tflag.StringVar(&certFile, \"tls-cert\", \"\", \"certificate `file`, must also specify -tls-key\")\n\tflag.StringVar(&keyFile, \"tls-key\", \"\", \"certificate key `file`, must also specify -tls-cert\")\n\n\tflag.StringVar(&itlXML, \"itlXML\", \"\", \"iTunes Library XML `file`\")\n\tflag.StringVar(&tchLib, \"lib\", \"\", \"Tchaik library `file`\")\n\tflag.StringVar(&walkPath, \"path\", \"\", \"`directory` containing music files\")\n\n\tflag.StringVar(&playHistoryPath, \"play-history\", \"history.json\", \"play history `file`\")\n\tflag.StringVar(&favouritesPath, \"favourites\", \"favourites.json\", \"favourites `file`\")\n\tflag.StringVar(&checklistPath, \"checklist\", \"checklist.json\", \"checklist `file`\")\n\tflag.StringVar(&playlistPath, \"playlists\", \"playlists.json\", \"playlists `file`\")\n\tflag.StringVar(&cursorPath, \"cursors\", \"cursors.json\", \"cursors `file`\")\n\n\tflag.StringVar(&uiDir, \"ui-dir\", \"ui\", \"UI asset `directory`\")\n\n\tflag.StringVar(&authUser, \"auth-user\", \"\", \"`user` to use for HTTP authentication (set to enable)\")\n\tflag.StringVar(&authPassword, \"auth-password\", \"\", \"`password` to use for HTTP authentication\")\n\n\tflag.StringVar(&traceListenAddr, \"trace-listen\", \"\", \"bind `address` for trace HTTP server\")\n}\n\nfunc readLibrary() (index.Library, error) {\n\tvar count int\n\tcheck := func(x string) {\n\t\tif x != \"\" {\n\t\t\tcount++\n\t\t}\n\t}\n\tcheck(itlXML)\n\tcheck(tchLib)\n\tcheck(walkPath)\n\n\tswitch {\n\tcase count == 0:\n\t\treturn nil, fmt.Errorf(\"must specify one library file or a path to build one from (-itlXML, -lib or -path)\")\n\tcase count > 1:\n\t\treturn nil, fmt.Errorf(\"must only specify one library file or a path to build one from (-itlXML, -lib or -path)\")\n\t}\n\n\tvar lib index.Library\n\tswitch {\n\tcase tchLib != \"\":\n\t\tf, err := os.Open(tchLib)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not open Tchaik library file: %v\", err)\n\t\t}\n\t\tdefer f.Close()\n\n\t\tfmt.Printf(\"Parsing %v...\", tchLib)\n\t\tlib, err = index.ReadFrom(f)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error parsing Tchaik library file: %v\\n\", err)\n\t\t}\n\t\tfmt.Println(\"done.\")\n\t\treturn lib, nil\n\n\tcase itlXML != \"\":\n\t\tf, err := os.Open(itlXML)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could open iTunes library file: %v\", err)\n\t\t}\n\t\tdefer f.Close()\n\n\t\tlib, err = itl.ReadFrom(f)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error parsing iTunes library file: %v\", err)\n\t\t}\n\n\tcase walkPath != \"\":\n\t\tfmt.Printf(\"Walking %v...\\n\", walkPath)\n\t\tlib = walk.NewLibrary(walkPath)\n\t\tfmt.Println(\"Finished walking.\")\n\t}\n\n\tfmt.Printf(\"Building Tchaik Library...\")\n\tlib = index.Convert(lib, \"ID\")\n\tfmt.Println(\"done.\")\n\treturn lib, nil\n}\n\nfunc buildRootCollection(l index.Library) index.Collection {\n\troot := index.Collect(l, index.By(attr.String(\"Album\")))\n\tindex.SortKeysByGroupName(root)\n\treturn root\n}\n\nfunc buildSearchIndex(c index.Collection) index.Searcher {\n\twi := index.BuildCollectionWordIndex(c, []string{\"Composer\", \"Artist\", \"Album\", \"Name\"})\n\treturn index.FlatSearcher{\n\t\tSearcher: index.WordsIntersectSearcher(index.BuildPrefixExpandSearcher(wi, wi, 10)),\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tl, err := readLibrary()\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"Building root collection...\")\n\troot := buildRootCollection(l)\n\tfmt.Println(\"done.\")\n\n\tfmt.Printf(\"Building artists filter...\")\n\tartists := index.Filter(root, attr.Strings(\"Artist\"))\n\tfmt.Println(\"done.\")\n\n\tfmt.Printf(\"Building composers filter...\")\n\tcomposers := index.Filter(root, attr.Strings(\"Composer\"))\n\tfmt.Println(\"done.\")\n\n\tfmt.Printf(\"Building recent index...\")\n\trecent := index.Recent(root, 150)\n\tfmt.Println(\"done.\")\n\n\tfmt.Printf(\"Building search index...\")\n\tsearcher := buildSearchIndex(root)\n\tfmt.Println(\"done.\")\n\n\tfmt.Printf(\"Loading play history...\")\n\ths, err := history.NewStore(playHistoryPath)\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror loading play history: %v\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Println(\"done.\")\n\n\tfmt.Printf(\"Loading favourites...\")\n\tfavourites, err := favourite.NewStore(favouritesPath)\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror loading favourites: %v\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Println(\"done.\")\n\n\tfmt.Printf(\"Loading checklist...\")\n\tchecklist, err := checklist.NewStore(checklistPath)\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror loading checklist: %v\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Println(\"done.\")\n\n\tfmt.Printf(\"Loading playlists...\")\n\tplaylistStore, err := playlist.NewStore(playlistPath)\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror loading playlists: %v\", err)\n\t\tos.Exit(1)\n\t}\n\t\/\/ TODO(dhowden): remove this once we can better intialise the \"Default\" playlist\n\tif p := playlistStore.Get(\"Default\"); p == nil {\n\t\tplaylistStore.Set(\"Default\", &playlist.Playlist{})\n\t}\n\tfmt.Println(\"done\")\n\n\tfmt.Printf(\"Loading cursors...\")\n\tcursorStore, err := cursor.NewStore(cursorPath)\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror loading cursor: %v\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Println(\"done\")\n\n\tmediaFileSystem, artworkFileSystem, err := cmdflag.Stores()\n\tif err != nil {\n\t\tfmt.Println(\"error setting up stores:\", err)\n\t\tos.Exit(1)\n\t}\n\n\tif debug {\n\t\tmediaFileSystem = store.LogFileSystem(\"Media\", mediaFileSystem)\n\t\tartworkFileSystem = store.LogFileSystem(\"Artwork\", artworkFileSystem)\n\t}\n\n\tif traceListenAddr != \"\" {\n\t\tfmt.Printf(\"Starting trace server on http:\/\/%v\\n\", traceListenAddr)\n\t\tgo func() {\n\t\t\tlog.Fatal(http.ListenAndServe(traceListenAddr, nil))\n\t\t}()\n\t}\n\n\tlib := Library{\n\t\tLibrary: l,\n\t\tcollections: map[string]index.Collection{\n\t\t\t\"Root\": root,\n\t\t},\n\t\tfilters: map[string][]index.FilterItem{\n\t\t\t\"Artist\": artists,\n\t\t\t\"Composer\": composers,\n\t\t},\n\t\trecent: recent,\n\t\tsearcher: searcher,\n\t}\n\n\tmeta := &Meta{\n\t\thistory: hs,\n\t\tfavourites: favourites,\n\t\tchecklist: checklist,\n\t\tplaylists: playlistStore,\n\t\tcursors: cursorStore,\n\t}\n\n\th := NewHandler(lib, meta, mediaFileSystem, artworkFileSystem)\n\n\tif certFile != \"\" && keyFile != \"\" {\n\t\tfmt.Printf(\"Web server is running on https:\/\/%v\\n\", listenAddr)\n\t\tfmt.Println(\"Quit the server with CTRL-C.\")\n\n\t\ttlsConfig := &tls.Config{MinVersion: tls.VersionTLS10}\n\t\tserver := &http.Server{Addr: listenAddr, Handler: h, TLSConfig: tlsConfig}\n\t\tlog.Fatal(server.ListenAndServeTLS(certFile, keyFile))\n\t}\n\n\tfmt.Printf(\"Web server is running on http:\/\/%v\\n\", listenAddr)\n\tfmt.Println(\"Quit the server with CTRL-C.\")\n\n\tlog.Fatal(http.ListenAndServe(listenAddr, h))\n}\n<commit_msg>Refactor meta store variable names.<commit_after>\/\/ Copyright 2015, David Howden\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\ntchaik creates a webserver which serves the web UI.\n\nIt is assumed that tchaik is run relatively local to the user (i.e. serving pages to the local machine, or a local\nnetwork).\n\nAll configuration is done through command line parameters.\n\nA common use case is to begin by use using an existing iTunes Library file:\n\n tchaik -itlXML \/path\/to\/iTunesMusicLibrary.xml\n\n*\/\npackage main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"tchaik.com\/index\"\n\t\"tchaik.com\/index\/attr\"\n\t\"tchaik.com\/index\/checklist\"\n\t\"tchaik.com\/index\/cursor\"\n\t\"tchaik.com\/index\/favourite\"\n\t\"tchaik.com\/index\/history\"\n\t\"tchaik.com\/index\/itl\"\n\t\"tchaik.com\/index\/playlist\"\n\t\"tchaik.com\/index\/walk\"\n\t\"tchaik.com\/store\"\n\t\"tchaik.com\/store\/cmdflag\"\n)\n\nvar debug bool\nvar itlXML, tchLib, walkPath string\n\nvar playHistoryPath, favouritesPath, checklistPath, playlistPath, cursorPath string\n\nvar listenAddr string\nvar uiDir string\nvar certFile, keyFile string\n\nvar authUser, authPassword string\n\nvar traceListenAddr string\n\nfunc init() {\n\tflag.BoolVar(&debug, \"debug\", false, \"print debugging information\")\n\n\tflag.StringVar(&listenAddr, \"listen\", \"localhost:8080\", \"bind `address` for main HTTP server\")\n\tflag.StringVar(&certFile, \"tls-cert\", \"\", \"certificate `file`, must also specify -tls-key\")\n\tflag.StringVar(&keyFile, \"tls-key\", \"\", \"certificate key `file`, must also specify -tls-cert\")\n\n\tflag.StringVar(&itlXML, \"itlXML\", \"\", \"iTunes Library XML `file`\")\n\tflag.StringVar(&tchLib, \"lib\", \"\", \"Tchaik library `file`\")\n\tflag.StringVar(&walkPath, \"path\", \"\", \"`directory` containing music files\")\n\n\tflag.StringVar(&playHistoryPath, \"play-history\", \"history.json\", \"play history `file`\")\n\tflag.StringVar(&favouritesPath, \"favourites\", \"favourites.json\", \"favourites `file`\")\n\tflag.StringVar(&checklistPath, \"checklist\", \"checklist.json\", \"checklist `file`\")\n\tflag.StringVar(&playlistPath, \"playlists\", \"playlists.json\", \"playlists `file`\")\n\tflag.StringVar(&cursorPath, \"cursors\", \"cursors.json\", \"cursors `file`\")\n\n\tflag.StringVar(&uiDir, \"ui-dir\", \"ui\", \"UI asset `directory`\")\n\n\tflag.StringVar(&authUser, \"auth-user\", \"\", \"`user` to use for HTTP authentication (set to enable)\")\n\tflag.StringVar(&authPassword, \"auth-password\", \"\", \"`password` to use for HTTP authentication\")\n\n\tflag.StringVar(&traceListenAddr, \"trace-listen\", \"\", \"bind `address` for trace HTTP server\")\n}\n\nfunc readLibrary() (index.Library, error) {\n\tvar count int\n\tcheck := func(x string) {\n\t\tif x != \"\" {\n\t\t\tcount++\n\t\t}\n\t}\n\tcheck(itlXML)\n\tcheck(tchLib)\n\tcheck(walkPath)\n\n\tswitch {\n\tcase count == 0:\n\t\treturn nil, fmt.Errorf(\"must specify one library file or a path to build one from (-itlXML, -lib or -path)\")\n\tcase count > 1:\n\t\treturn nil, fmt.Errorf(\"must only specify one library file or a path to build one from (-itlXML, -lib or -path)\")\n\t}\n\n\tvar lib index.Library\n\tswitch {\n\tcase tchLib != \"\":\n\t\tf, err := os.Open(tchLib)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not open Tchaik library file: %v\", err)\n\t\t}\n\t\tdefer f.Close()\n\n\t\tfmt.Printf(\"Parsing %v...\", tchLib)\n\t\tlib, err = index.ReadFrom(f)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error parsing Tchaik library file: %v\\n\", err)\n\t\t}\n\t\tfmt.Println(\"done.\")\n\t\treturn lib, nil\n\n\tcase itlXML != \"\":\n\t\tf, err := os.Open(itlXML)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could open iTunes library file: %v\", err)\n\t\t}\n\t\tdefer f.Close()\n\n\t\tlib, err = itl.ReadFrom(f)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error parsing iTunes library file: %v\", err)\n\t\t}\n\n\tcase walkPath != \"\":\n\t\tfmt.Printf(\"Walking %v...\\n\", walkPath)\n\t\tlib = walk.NewLibrary(walkPath)\n\t\tfmt.Println(\"Finished walking.\")\n\t}\n\n\tfmt.Printf(\"Building Tchaik Library...\")\n\tlib = index.Convert(lib, \"ID\")\n\tfmt.Println(\"done.\")\n\treturn lib, nil\n}\n\nfunc buildRootCollection(l index.Library) index.Collection {\n\troot := index.Collect(l, index.By(attr.String(\"Album\")))\n\tindex.SortKeysByGroupName(root)\n\treturn root\n}\n\nfunc buildSearchIndex(c index.Collection) index.Searcher {\n\twi := index.BuildCollectionWordIndex(c, []string{\"Composer\", \"Artist\", \"Album\", \"Name\"})\n\treturn index.FlatSearcher{\n\t\tSearcher: index.WordsIntersectSearcher(index.BuildPrefixExpandSearcher(wi, wi, 10)),\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tl, err := readLibrary()\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"Building root collection...\")\n\troot := buildRootCollection(l)\n\tfmt.Println(\"done.\")\n\n\tfmt.Printf(\"Building artists filter...\")\n\tartists := index.Filter(root, attr.Strings(\"Artist\"))\n\tfmt.Println(\"done.\")\n\n\tfmt.Printf(\"Building composers filter...\")\n\tcomposers := index.Filter(root, attr.Strings(\"Composer\"))\n\tfmt.Println(\"done.\")\n\n\tfmt.Printf(\"Building recent index...\")\n\trecent := index.Recent(root, 150)\n\tfmt.Println(\"done.\")\n\n\tfmt.Printf(\"Building search index...\")\n\tsearcher := buildSearchIndex(root)\n\tfmt.Println(\"done.\")\n\n\tfmt.Printf(\"Loading play history...\")\n\tplayHistoryStore, err := history.NewStore(playHistoryPath)\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror loading play history: %v\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Println(\"done.\")\n\n\tfmt.Printf(\"Loading favourites...\")\n\tfavouriteStore, err := favourite.NewStore(favouritesPath)\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror loading favourites: %v\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Println(\"done.\")\n\n\tfmt.Printf(\"Loading checklist...\")\n\tchecklistStore, err := checklist.NewStore(checklistPath)\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror loading checklist: %v\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Println(\"done.\")\n\n\tfmt.Printf(\"Loading playlists...\")\n\tplaylistStore, err := playlist.NewStore(playlistPath)\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror loading playlists: %v\", err)\n\t\tos.Exit(1)\n\t}\n\t\/\/ TODO(dhowden): remove this once we can better intialise the \"Default\" playlist\n\tif p := playlistStore.Get(\"Default\"); p == nil {\n\t\tplaylistStore.Set(\"Default\", &playlist.Playlist{})\n\t}\n\tfmt.Println(\"done\")\n\n\tfmt.Printf(\"Loading cursors...\")\n\tcursorStore, err := cursor.NewStore(cursorPath)\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror loading cursor: %v\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Println(\"done\")\n\n\tmediaFileSystem, artworkFileSystem, err := cmdflag.Stores()\n\tif err != nil {\n\t\tfmt.Println(\"error setting up stores:\", err)\n\t\tos.Exit(1)\n\t}\n\n\tif debug {\n\t\tmediaFileSystem = store.LogFileSystem(\"Media\", mediaFileSystem)\n\t\tartworkFileSystem = store.LogFileSystem(\"Artwork\", artworkFileSystem)\n\t}\n\n\tif traceListenAddr != \"\" {\n\t\tfmt.Printf(\"Starting trace server on http:\/\/%v\\n\", traceListenAddr)\n\t\tgo func() {\n\t\t\tlog.Fatal(http.ListenAndServe(traceListenAddr, nil))\n\t\t}()\n\t}\n\n\tlib := Library{\n\t\tLibrary: l,\n\t\tcollections: map[string]index.Collection{\n\t\t\t\"Root\": root,\n\t\t},\n\t\tfilters: map[string][]index.FilterItem{\n\t\t\t\"Artist\": artists,\n\t\t\t\"Composer\": composers,\n\t\t},\n\t\trecent: recent,\n\t\tsearcher: searcher,\n\t}\n\n\tmeta := &Meta{\n\t\thistory: playHistoryStore,\n\t\tfavourites: favouriteStore,\n\t\tchecklist: checklistStore,\n\t\tplaylists: playlistStore,\n\t\tcursors: cursorStore,\n\t}\n\n\th := NewHandler(lib, meta, mediaFileSystem, artworkFileSystem)\n\n\tif certFile != \"\" && keyFile != \"\" {\n\t\tfmt.Printf(\"Web server is running on https:\/\/%v\\n\", listenAddr)\n\t\tfmt.Println(\"Quit the server with CTRL-C.\")\n\n\t\ttlsConfig := &tls.Config{MinVersion: tls.VersionTLS10}\n\t\tserver := &http.Server{Addr: listenAddr, Handler: h, TLSConfig: tlsConfig}\n\t\tlog.Fatal(server.ListenAndServeTLS(certFile, keyFile))\n\t}\n\n\tfmt.Printf(\"Web server is running on http:\/\/%v\\n\", listenAddr)\n\tfmt.Println(\"Quit the server with CTRL-C.\")\n\n\tlog.Fatal(http.ListenAndServe(listenAddr, h))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/base64\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"github.com\/drosseau\/degob\"\n)\n\nvar (\n\toutFile = flag.String(\"ofile\", \"\", \"Output file (defaults to stdout)\")\n\tinFile = flag.String(\"ifile\", \"\", \"Input file (defaults to stdin)\")\n\ttruncateOut = flag.Bool(\"trunc\", false, \"Truncate output file\")\n\tbase64d = flag.Bool(\"b64\", false, \"base64 input\")\n\tbase64urld = flag.Bool(\"b64url\", false, \"base64url input\")\n\tnoComments = flag.Bool(\"nc\", false, \"don't print additional comments\")\n\tnoTypes = flag.Bool(\"nt\", false, \"don't print type information\")\n\tjson = flag.Bool(\"json\", false, \"show value as json\")\n\tpkgName = flag.String(\"pkg\", \"\", \"include a package definition in the output with the given name\")\n)\n\nfunc errorf(s string, v ...interface{}) {\n\t_, _ = fmt.Fprintf(os.Stderr, s, v...)\n\tos.Exit(1)\n}\n\nfunc getWriter() io.WriteCloser {\n\tif *outFile != \"\" {\n\t\topts := os.O_WRONLY | os.O_CREATE\n\t\tif *truncateOut {\n\t\t\topts |= os.O_TRUNC\n\t\t} else {\n\t\t\topts |= os.O_APPEND\n\t\t}\n\t\tf, err := os.OpenFile(*outFile, opts, 0644)\n\t\tif err != nil {\n\t\t\terrorf(\"failed to open `%s` for writing: %v\\n\", *outFile, err)\n\t\t}\n\t\treturn f\n\t}\n\treturn os.Stdout\n}\n\nfunc getReader() io.ReadCloser {\n\tif *inFile != \"\" {\n\t\tf, err := os.Open(*outFile)\n\t\tif err != nil {\n\t\t\terrorf(\"failed to open `%s` for reading: %v\\n\", *inFile, err)\n\t\t}\n\t\treturn f\n\t}\n\treturn ioutil.NopCloser(os.Stdin)\n}\n\ntype writer struct {\n\tw io.Writer\n\terr error\n}\n\nfunc (w writer) Write(b []byte) (int, error) {\n\tif w.err != nil {\n\t\terrorf(\"error writing output: %v\\n\", w.err)\n\t}\n\tvar n int\n\tn, w.err = w.w.Write(b)\n\treturn n, w.err\n}\n\nfunc (w writer) writeComment(s string, v ...interface{}) {\n\tif *noComments {\n\t\treturn\n\t}\n\tw.writeStr(s, v...)\n}\n\nfunc (w writer) writeStr(s string, v ...interface{}) {\n\tif w.err != nil {\n\t\terrorf(\"error writing output: %v\\n\")\n\t}\n\t_, w.err = fmt.Fprintf(w.w, s, v...)\n}\n\nfunc main() {\n\tflag.Parse()\n\tout := getWriter()\n\tdefer out.Close()\n\tin := getReader()\n\tdefer in.Close()\n\n\tif *base64d {\n\t\tin = ioutil.NopCloser(base64.NewDecoder(base64.StdEncoding, in))\n\t} else if *base64urld {\n\t\tin = ioutil.NopCloser(base64.NewDecoder(base64.URLEncoding, in))\n\t}\n\n\tw := writer{w: out}\n\n\tdec := degob.NewDecoder(in)\n\tgobs, err := dec.Decode()\n\tif err != nil {\n\t\terrorf(\"failed to decode gob: %s\\n\", err)\n\t}\n\tif *pkgName != \"\" {\n\t\tw.writeStr(\"package %s\\n\\n\", *pkgName)\n\t}\n\tfor i, g := range gobs {\n\t\tw.writeComment(\"\/\/ Decoded gob %d\\n\\n\", i+1)\n\t\tif !*noTypes && g.Types != nil {\n\t\t\tw.writeComment(\"\/\/Types\\n\")\n\t\t\terr = g.WriteTypes(w)\n\t\t\tif err != nil {\n\t\t\t\terrorf(\"error writing types: %v\\n\", err)\n\t\t\t}\n\t\t}\n\t\tw.writeComment(\"\/\/ Value: \")\n\t\tif *json {\n\t\t\terr = g.WriteValue(w, degob.JSON)\n\t\t} else {\n\t\t\terr = g.WriteValue(w, degob.SingleLine)\n\t\t}\n\t\tif err != nil {\n\t\t\terrorf(\"error writing values: %v\\n\", err)\n\t\t}\n\t\tw.writeComment(\"\\n\/\/ End gob %d\\n\\n\", i+1)\n\t}\n}\n<commit_msg>github -> gitlab<commit_after>package main\n\nimport (\n\t\"encoding\/base64\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"gitlab.com\/drosseau\/degob\"\n)\n\nvar (\n\toutFile = flag.String(\"ofile\", \"\", \"Output file (defaults to stdout)\")\n\tinFile = flag.String(\"ifile\", \"\", \"Input file (defaults to stdin)\")\n\ttruncateOut = flag.Bool(\"trunc\", false, \"Truncate output file\")\n\tbase64d = flag.Bool(\"b64\", false, \"base64 input\")\n\tbase64urld = flag.Bool(\"b64url\", false, \"base64url input\")\n\tnoComments = flag.Bool(\"nc\", false, \"don't print additional comments\")\n\tnoTypes = flag.Bool(\"nt\", false, \"don't print type information\")\n\tjson = flag.Bool(\"json\", false, \"show value as json\")\n\tpkgName = flag.String(\"pkg\", \"\", \"include a package definition in the output with the given name\")\n)\n\nfunc errorf(s string, v ...interface{}) {\n\t_, _ = fmt.Fprintf(os.Stderr, s, v...)\n\tos.Exit(1)\n}\n\nfunc getWriter() io.WriteCloser {\n\tif *outFile != \"\" {\n\t\topts := os.O_WRONLY | os.O_CREATE\n\t\tif *truncateOut {\n\t\t\topts |= os.O_TRUNC\n\t\t} else {\n\t\t\topts |= os.O_APPEND\n\t\t}\n\t\tf, err := os.OpenFile(*outFile, opts, 0644)\n\t\tif err != nil {\n\t\t\terrorf(\"failed to open `%s` for writing: %v\\n\", *outFile, err)\n\t\t}\n\t\treturn f\n\t}\n\treturn os.Stdout\n}\n\nfunc getReader() io.ReadCloser {\n\tif *inFile != \"\" {\n\t\tf, err := os.Open(*outFile)\n\t\tif err != nil {\n\t\t\terrorf(\"failed to open `%s` for reading: %v\\n\", *inFile, err)\n\t\t}\n\t\treturn f\n\t}\n\treturn ioutil.NopCloser(os.Stdin)\n}\n\ntype writer struct {\n\tw io.Writer\n\terr error\n}\n\nfunc (w writer) Write(b []byte) (int, error) {\n\tif w.err != nil {\n\t\terrorf(\"error writing output: %v\\n\", w.err)\n\t}\n\tvar n int\n\tn, w.err = w.w.Write(b)\n\treturn n, w.err\n}\n\nfunc (w writer) writeComment(s string, v ...interface{}) {\n\tif *noComments {\n\t\treturn\n\t}\n\tw.writeStr(s, v...)\n}\n\nfunc (w writer) writeStr(s string, v ...interface{}) {\n\tif w.err != nil {\n\t\terrorf(\"error writing output: %v\\n\")\n\t}\n\t_, w.err = fmt.Fprintf(w.w, s, v...)\n}\n\nfunc main() {\n\tflag.Parse()\n\tout := getWriter()\n\tdefer out.Close()\n\tin := getReader()\n\tdefer in.Close()\n\n\tif *base64d {\n\t\tin = ioutil.NopCloser(base64.NewDecoder(base64.StdEncoding, in))\n\t} else if *base64urld {\n\t\tin = ioutil.NopCloser(base64.NewDecoder(base64.URLEncoding, in))\n\t}\n\n\tw := writer{w: out}\n\n\tdec := degob.NewDecoder(in)\n\tgobs, err := dec.Decode()\n\tif err != nil {\n\t\terrorf(\"failed to decode gob: %s\\n\", err)\n\t}\n\tif *pkgName != \"\" {\n\t\tw.writeStr(\"package %s\\n\\n\", *pkgName)\n\t}\n\tfor i, g := range gobs {\n\t\tw.writeComment(\"\/\/ Decoded gob %d\\n\\n\", i+1)\n\t\tif !*noTypes && g.Types != nil {\n\t\t\tw.writeComment(\"\/\/Types\\n\")\n\t\t\terr = g.WriteTypes(w)\n\t\t\tif err != nil {\n\t\t\t\terrorf(\"error writing types: %v\\n\", err)\n\t\t\t}\n\t\t}\n\t\tw.writeComment(\"\/\/ Value: \")\n\t\tif *json {\n\t\t\terr = g.WriteValue(w, degob.JSON)\n\t\t} else {\n\t\t\terr = g.WriteValue(w, degob.SingleLine)\n\t\t}\n\t\tif err != nil {\n\t\t\terrorf(\"error writing values: %v\\n\", err)\n\t\t}\n\t\tw.writeComment(\"\\n\/\/ End gob %d\\n\\n\", i+1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gestic\n\nimport (\n\t\"log\"\n\t\"math\"\n\n\t\"github.com\/joshlf13\/gopack\"\n\t\"github.com\/ninjasphere\/go-ninja\/logger\"\n\t\"github.com\/wolfeidau\/epoller\"\n)\n\nconst (\n\tDSPIfoFlag uint16 = 1 << iota\n\tGestureInfoFlag\n\tTouchInfoFlag\n\tAirWheelInfoFlag\n\tCoordinateInfoFlag\n)\n\n\/\/ Gestic\n\/\/ http:\/\/ww1.microchip.com\/downloads\/en\/DeviceDoc\/40001718B.pdf\n\/\/ Page 36\n\n\/\/ Gestic device path\nconst GesticDevicePath = \"\/dev\/gestic\"\n\n\/\/ Flag which indicates if the payload contains data\nconst SensorDataPresentFlag = 0x91\n\nconst (\n\tIdSensorDataOutput = 0x91\n)\n\ntype Reader struct {\n\tonGesture func(*GestureData)\n\tlog *logger.Logger\n\tcurrentGesture *GestureData\n}\n\nfunc NewReader(log *logger.Logger, onGesture func(*GestureData)) *Reader {\n\treturn &Reader{onGesture: onGesture, log: log}\n}\n\ntype GestureData struct {\n\tEvent *EventHeader\n\tDataHeader *DataHeader\n\tGesture *GestureInfo\n\tTouch *TouchInfo\n\tAirWheel *AirWheelInfo\n\tCoordinates *CoordinateInfo\n}\n\nfunc NewGestureData() *GestureData {\n\treturn &GestureData{\n\t\tEvent: &EventHeader{},\n\t\tDataHeader: &DataHeader{},\n\t\tGesture: &GestureInfo{},\n\t\tTouch: &TouchInfo{},\n\t\tAirWheel: &AirWheelInfo{},\n\t\tCoordinates: &CoordinateInfo{},\n\t}\n}\n\ntype EventHeader struct {\n\tLength, Flags, Seq, Id uint8\n}\n\ntype DataHeader struct {\n\tDataMask uint16\n\tTimeStamp, SystemInfo uint8\n}\n\ntype GestureInfo struct {\n\tGestureVal uint32\n}\n\nfunc (gi *GestureInfo) Name() string {\n\tif int(gi.GestureVal) < len(Gestures) {\n\t\treturn Gestures[gi.GestureVal]\n\t} else {\n\t\treturn Gestures[0]\n\t}\n}\n\ntype TouchInfo struct {\n\tTouchVal uint32\n}\n\nfunc (ti *TouchInfo) Name() string {\n\tif int(ti.TouchVal) > 0 {\n\t\ti := int(math.Logb(float64(ti.TouchVal)))\n\t\tif i < len(TouchList) {\n\t\t\treturn TouchList[i]\n\t\t}\n\t}\n\treturn \"None\"\n}\n\ntype AirWheelInfo struct {\n\tAirWheelVal uint8\n\tCrap uint8\n}\n\ntype CoordinateInfo struct {\n\tX uint16\n\tY uint16\n\tZ uint16\n}\n\nvar Gestures = []string{\n\t\"None\",\n\t\"Garbage\",\n\t\"WestToEast\",\n\t\"EastToWest\",\n\t\"SouthToNorth\",\n\t\"NorthToSouth\",\n\t\"CircleClockwise\",\n\t\"CircleCounterClockwise\",\n}\n\nvar TouchList = []string{\n\t\"TouchSouth\",\n\t\"TouchWest\",\n\t\"TouchNorth\",\n\t\"TouchEast\",\n\t\"TouchCenter\",\n\t\"TapSouth\",\n\t\"TapWest\",\n\t\"TapNorth\",\n\t\"TapEast\",\n\t\"TapCenter\",\n\t\"DoubleTapSouth\",\n\t\"DoubleTapWest\",\n\t\"DoubleTapNorth\",\n\t\"DoubleTapEast\",\n\t\"DoubleTapCenter\",\n}\n\nfunc (r *Reader) Start() {\n\tr.log.Infof(\"Opening %s\", GesticDevicePath)\n\n\tr.currentGesture = NewGestureData()\n\n\tif err := epoller.OpenAndDispatchEvents(GesticDevicePath, r.buildGestureEvent); err != nil {\n\t\tlog.Fatalf(\"Error opening device reader %v\", err)\n\t}\n}\n\nfunc (r *Reader) buildGestureEvent(buf []byte, n int) {\n\n\tg := r.currentGesture\n\n\tgopack.Unpack(buf[:4], g.Event)\n\tgopack.Unpack(buf[4:8], g.DataHeader)\n\n\t\/\/ var for offset\n\toffset := 8\n\n\t\/\/ grab the DSPIfo\n\tif g.DataHeader.DataMask&DSPIfoFlag == DSPIfoFlag {\n\t\toffset += 2\n\t}\n\n\t\/\/ grab the GestureInfo\n\tif g.DataHeader.DataMask&GestureInfoFlag == GestureInfoFlag {\n\n\t\tgopack.Unpack(buf[offset:offset+4], g.Gesture)\n\t\tg.Gesture.GestureVal = g.Gesture.GestureVal & uint32(0xff)\n\t\toffset += 4\n\t}\n\n\t\/\/ grab the TouchInfo\n\tif g.DataHeader.DataMask&TouchInfoFlag == TouchInfoFlag {\n\t\tgopack.Unpack(buf[offset:offset+4], g.Touch)\n\t\toffset += 4\n\t}\n\n\t\/\/ grab the AirWheelInfo\n\tif g.DataHeader.DataMask&AirWheelInfoFlag == AirWheelInfoFlag {\n\t\tgopack.Unpack(buf[offset:offset+2], g.AirWheel)\n\t\toffset += 2\n\t}\n\n\t\/\/ grab the CoordinateInfo\n\tif g.DataHeader.DataMask&CoordinateInfoFlag == CoordinateInfoFlag {\n\t\tgopack.Unpack(buf[offset:offset+6], g.Coordinates)\n\t\toffset += 6\n\t}\n\n\tr.log.Debugf(\"Gesture: %s, Airwheel: %d, Touch: %s\", g.Gesture.Name(), g.AirWheel.AirWheelVal, g.Touch.Name())\n\n\tgo r.onGesture(g)\n}\n<commit_msg>TEMP: Ignore any unknown messages from the gestic<commit_after>package gestic\n\nimport (\n\t\"log\"\n\t\"math\"\n\n\t\"github.com\/joshlf13\/gopack\"\n\t\"github.com\/ninjasphere\/go-ninja\/logger\"\n\t\"github.com\/wolfeidau\/epoller\"\n)\n\nconst (\n\tDSPIfoFlag uint16 = 1 << iota\n\tGestureInfoFlag\n\tTouchInfoFlag\n\tAirWheelInfoFlag\n\tCoordinateInfoFlag\n)\n\n\/\/ Gestic\n\/\/ http:\/\/ww1.microchip.com\/downloads\/en\/DeviceDoc\/40001718B.pdf\n\/\/ Page 36\n\n\/\/ Gestic device path\nconst GesticDevicePath = \"\/dev\/gestic\"\n\n\/\/ Flag which indicates if the payload contains data\nconst SensorDataPresentFlag = 0x91\n\nconst (\n\tIdSensorDataOutput = 0x91\n)\n\ntype Reader struct {\n\tonGesture func(*GestureData)\n\tlog *logger.Logger\n\tcurrentGesture *GestureData\n}\n\nfunc NewReader(log *logger.Logger, onGesture func(*GestureData)) *Reader {\n\treturn &Reader{onGesture: onGesture, log: log}\n}\n\ntype GestureData struct {\n\tEvent *EventHeader\n\tDataHeader *DataHeader\n\tGesture *GestureInfo\n\tTouch *TouchInfo\n\tAirWheel *AirWheelInfo\n\tCoordinates *CoordinateInfo\n}\n\nfunc NewGestureData() *GestureData {\n\treturn &GestureData{\n\t\tEvent: &EventHeader{},\n\t\tDataHeader: &DataHeader{},\n\t\tGesture: &GestureInfo{},\n\t\tTouch: &TouchInfo{},\n\t\tAirWheel: &AirWheelInfo{},\n\t\tCoordinates: &CoordinateInfo{},\n\t}\n}\n\ntype EventHeader struct {\n\tLength, Flags, Seq, Id uint8\n}\n\ntype DataHeader struct {\n\tDataMask uint16\n\tTimeStamp, SystemInfo uint8\n}\n\ntype GestureInfo struct {\n\tGestureVal uint32\n}\n\nfunc (gi *GestureInfo) Name() string {\n\tif int(gi.GestureVal) < len(Gestures) {\n\t\treturn Gestures[gi.GestureVal]\n\t} else {\n\t\treturn Gestures[0]\n\t}\n}\n\ntype TouchInfo struct {\n\tTouchVal uint32\n}\n\nfunc (ti *TouchInfo) Name() string {\n\tif int(ti.TouchVal) > 0 {\n\t\ti := int(math.Logb(float64(ti.TouchVal)))\n\t\tif i < len(TouchList) {\n\t\t\treturn TouchList[i]\n\t\t}\n\t}\n\treturn \"None\"\n}\n\ntype AirWheelInfo struct {\n\tAirWheelVal uint8\n\tCrap uint8\n}\n\ntype CoordinateInfo struct {\n\tX uint16\n\tY uint16\n\tZ uint16\n}\n\nvar Gestures = []string{\n\t\"None\",\n\t\"Garbage\",\n\t\"WestToEast\",\n\t\"EastToWest\",\n\t\"SouthToNorth\",\n\t\"NorthToSouth\",\n\t\"CircleClockwise\",\n\t\"CircleCounterClockwise\",\n}\n\nvar TouchList = []string{\n\t\"TouchSouth\",\n\t\"TouchWest\",\n\t\"TouchNorth\",\n\t\"TouchEast\",\n\t\"TouchCenter\",\n\t\"TapSouth\",\n\t\"TapWest\",\n\t\"TapNorth\",\n\t\"TapEast\",\n\t\"TapCenter\",\n\t\"DoubleTapSouth\",\n\t\"DoubleTapWest\",\n\t\"DoubleTapNorth\",\n\t\"DoubleTapEast\",\n\t\"DoubleTapCenter\",\n}\n\nfunc (r *Reader) Start() {\n\tr.log.Infof(\"Opening %s\", GesticDevicePath)\n\n\tr.currentGesture = NewGestureData()\n\n\tif err := epoller.OpenAndDispatchEvents(GesticDevicePath, r.buildGestureEvent); err != nil {\n\t\tlog.Fatalf(\"Error opening device reader %v\", err)\n\t}\n}\n\nfunc (r *Reader) buildGestureEvent(buf []byte, n int) {\n\n\tg := r.currentGesture\n\n\tgopack.Unpack(buf[:4], g.Event)\n\tgopack.Unpack(buf[4:8], g.DataHeader)\n\n\t\/\/ var for offset\n\toffset := 8\n\n\t\/\/ grab the DSPIfo\n\tif g.DataHeader.DataMask&DSPIfoFlag == DSPIfoFlag {\n\t\toffset += 2\n\t}\n\n\t\/\/ grab the GestureInfo\n\tif g.DataHeader.DataMask&GestureInfoFlag == GestureInfoFlag {\n\n\t\tgopack.Unpack(buf[offset:offset+4], g.Gesture)\n\t\tg.Gesture.GestureVal = g.Gesture.GestureVal & uint32(0xff)\n\t\toffset += 4\n\t}\n\n\t\/\/ grab the TouchInfo\n\tif g.DataHeader.DataMask&TouchInfoFlag == TouchInfoFlag {\n\t\tgopack.Unpack(buf[offset:offset+4], g.Touch)\n\t\toffset += 4\n\t}\n\n\t\/\/ grab the AirWheelInfo\n\tif g.DataHeader.DataMask&AirWheelInfoFlag == AirWheelInfoFlag {\n\t\tgopack.Unpack(buf[offset:offset+2], g.AirWheel)\n\t\toffset += 2\n\t}\n\n\t\/\/ grab the CoordinateInfo\n\tif g.DataHeader.DataMask&CoordinateInfoFlag == CoordinateInfoFlag {\n\t\tgopack.Unpack(buf[offset:offset+6], g.Coordinates)\n\t\toffset += 6\n\t}\n\n\tr.log.Debugf(\"Gesture: %s, Airwheel: %d, Touch: %s\", g.Gesture.Name(), g.AirWheel.AirWheelVal, g.Touch.Name())\n\n\t\/\/ XXX: TODO: If we haven't read anything else... just ignore it for now as it's some other kind of message\n\tif offset > 8 {\n\t\tgo r.onGesture(g)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014-2015 The Dename Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n\/\/ use this file except in compliance with the License. You may obtain a copy of\n\/\/ the License at\n\/\/\n\/\/ \thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations under\n\/\/ the License.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/rand\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"flag\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/andres-erbsen\/protobuf\/jsonpb\"\n\t\"golang.org\/x\/crypto\/sha3\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n\n\t\"github.com\/yahoo\/coname\"\n\t\"github.com\/yahoo\/coname\/proto\"\n)\n\nfunc main() {\n\tconfigPathPtr := flag.String(\"config\", \"config.json\", \"path to config file\")\n\tflag.Parse()\n\n\tconfigReader, err := os.Open(*configPathPtr)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to open configuration file: %s\", err)\n\t}\n\tcfg := &proto.Config{}\n\terr = jsonpb.Unmarshal(configReader, cfg)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to parse configuration file: %s\", err)\n\t}\n\n\t\/\/ TODO: eliminate this once we have real certs\n\tcertFile := \"ca_cert.pem\"\n\tcaCertPEM, err := ioutil.ReadFile(certFile)\n\tif err != nil {\n\t\tlog.Fatalf(\"couldn't read certs from %s\", certFile)\n\t}\n\tcaCertDER, caCertPEM := pem.Decode(caCertPEM)\n\tif caCertDER == nil {\n\t\tlog.Fatalf(\"failed to parse key PEM\")\n\t}\n\tcaCert, err := x509.ParseCertificate(caCertDER.Bytes)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tcaPool := x509.NewCertPool()\n\tcaPool.AddCert(caCert)\n\n\trealm := cfg.Realms[0]\n\n\tconn, err := grpc.Dial(realm.Addr, grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{RootCAs: caPool})))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tname := \"alice@example.com\"\n\tnonce := make([]byte, 16)\n\t_, err = rand.Read(nonce)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tprofile := proto.EncodedProfile{\n\t\tProfile: proto.Profile{\n\t\t\tNonce: nonce,\n\t\t\tKeys: map[string][]byte{\"abc\": []byte{1, 2, 3}, \"xyz\": []byte(\"TEST 456\")},\n\t\t},\n\t}\n\tprofile.UpdateEncoding()\n\tvar commitment [64]byte\n\tsha3.ShakeSum256(commitment[:], profile.Encoding)\n\tentry := proto.EncodedEntry{\n\t\tEntry: proto.Entry{\n\t\t\tVersion: 0,\n\t\t\tUpdatePolicy: &proto.AuthorizationPolicy{\n\t\t\t\tPublicKeys: make(map[uint64]*proto.PublicKey),\n\t\t\t\tQuorum: &proto.QuorumExpr{\n\t\t\t\t\tThreshold: 0,\n\t\t\t\t\tCandidates: []uint64{},\n\t\t\t\t\tSubexpressions: []*proto.QuorumExpr{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tProfileCommitment: commitment[:],\n\t\t},\n\t}\n\tentry.UpdateEncoding()\n\tupdateC := proto.NewE2EKSPublicClient(conn)\n\tproof, err := updateC.Update(context.Background(), &proto.UpdateRequest{\n\t\tUpdate: &proto.SignedEntryUpdate{\n\t\t\tNewEntry: entry,\n\t\t\tSignatures: make(map[uint64][]byte),\n\t\t},\n\t\tProfile: profile,\n\t\tLookupParameters: &proto.LookupRequest{\n\t\t\tUserId: name,\n\t\t\tQuorumRequirement: realm.VerificationPolicy.Quorum,\n\t\t},\n\t})\n\tif err != nil {\n\t\tlog.Fatalf(\"update failed on %s: %s\", realm.Addr, err)\n\t}\n\tif got, want := proof.Profile.Encoding, profile.Encoding; !bytes.Equal(got, want) {\n\t\tlog.Fatalf(\"created profile didn't roundtrip: %x != %x\", got, want)\n\t}\n\t_, err = coname.VerifyLookup(cfg, name, proof, time.Now())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>client for test purposes<commit_after>\/\/ Copyright 2014-2015 The Dename Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n\/\/ use this file except in compliance with the License. You may obtain a copy of\n\/\/ the License at\n\/\/\n\/\/ \thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations under\n\/\/ the License.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\/\/\"crypto\/rand\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"flag\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/andres-erbsen\/protobuf\/jsonpb\"\n\t\"golang.org\/x\/crypto\/sha3\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n\n\t\"github.com\/yahoo\/coname\"\n\t\"github.com\/yahoo\/coname\/proto\"\n)\n\nfunc main() {\n\tconfigPathPtr := flag.String(\"config\", \"config.json\", \"path to config file\")\n\tflag.Parse()\n\n\tconfigReader, err := os.Open(*configPathPtr)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to open configuration file: %s\", err)\n\t}\n\tcfg := &proto.Config{}\n\terr = jsonpb.Unmarshal(configReader, cfg)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to parse configuration file: %s\", err)\n\t}\n\n\tcertFile := \"ca_cert.pem\"\n\tcaCertPEM, err := ioutil.ReadFile(certFile)\n\tif err != nil {\n\t\tlog.Fatalf(\"couldn't read certs from %s\", certFile)\n\t}\n\tcaCertDER, caCertPEM := pem.Decode(caCertPEM)\n\tif caCertDER == nil {\n\t\tlog.Fatalf(\"failed to parse key PEM\")\n\t}\n\tcaCert, err := x509.ParseCertificate(caCertDER.Bytes)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tcaPool := x509.NewCertPool()\n\tcaPool.AddCert(caCert)\n\n\trealm := cfg.Realms[0]\n\n\tname := \"dmz@yahoo-inc.com\"\n\t\/*nonce := make([]byte, 16)\n\t _, err = rand.Read(nonce)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t} *\/\n\tnonce := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf}\n\tprofile := proto.EncodedProfile{\n\t\tProfile: proto.Profile{\n\t\t\tNonce: nonce,\n\t\t\tKeys: map[string][]byte{\"abc\": []byte{1, 2, 3}, \"xyz\": []byte(\"TEST 456\")},\n\t\t},\n\t}\n\tprofile.UpdateEncoding()\n\tvar commitment [64]byte\n\tsha3.ShakeSum256(commitment[:], profile.Encoding)\n\tentry := proto.EncodedEntry{\n\t\tEntry: proto.Entry{\n\t\t\tVersion: 0,\n\t\t\tUpdatePolicy: &proto.AuthorizationPolicy{\n\t\t\t\tPublicKeys: make(map[uint64]*proto.PublicKey),\n\t\t\t\tQuorum: &proto.QuorumExpr{\n\t\t\t\t\tThreshold: 0,\n\t\t\t\t\tCandidates: []uint64{},\n\t\t\t\t\tSubexpressions: []*proto.QuorumExpr{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tProfileCommitment: commitment[:],\n\t\t},\n\t}\n\tentry.UpdateEncoding()\n\tvar entryHash [32]byte\n\tsha3.ShakeSum256(entryHash[:], entry.Encoding)\n\tlog.Printf(\"entry: %x\", entry.Encoding)\n\n\tconn, err := grpc.Dial(realm.Addr, grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{RootCAs: caPool})))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tupdateC := proto.NewE2EKSPublicClient(conn)\n\tproof, err := updateC.Update(context.Background(), &proto.UpdateRequest{\n\t\tUpdate: &proto.SignedEntryUpdate{\n\t\t\tNewEntry: entry,\n\t\t\tSignatures: make(map[uint64][]byte),\n\t\t},\n\t\tProfile: profile,\n\t\tLookupParameters: &proto.LookupRequest{\n\t\t\tUserId: name,\n\t\t\tQuorumRequirement: realm.VerificationPolicy.Quorum,\n\t\t},\n\t\tDKIMProof: []byte(\"X-Apparently-To: dmz@yahoo-inc.com; Mon, 31 Aug 2015 20:22:05 +0000\\r\\nReturn-Path: <dmz@yahoo-inc.com>\\r\\nReceived-SPF: pass (domain of yahoo-inc.com designates 216.145.54.155 as permitted sender)\\r\\nX-YMailISG: 1EhwNaYWLDv2gTUu.9nt0jxdqITV9b92O6Cwv1fdJI0Znuzw\\r\\n JiRKT62YsHZWJxcnlzt4SKtiChHTSAwHSy9.w97NwmcUQYqFkJkXmsqAjscY\\r\\n 0KW9efirs1q3YEvUtmN3BkIhbXujDW5L0Ne0YQtZnK.DF_Yi6dp7RCvfzUZf\\r\\n RskRPc2qP.tEMNji9_S1kSiPCtn8AwFsPHgGZtQOTtEYsTYUCsD4oEnjAJy1\\r\\n 332kCwDAOk1wj5l7.PwstEHF438ER_KZaMzfFq8UNrhdEwoFYZGSOfYeFV8W\\r\\n d2XMmN4vAJRV4D0FV9_cYwJRktzNvKqq.WddXZLuc96QR4.hxOOsq4gEEfJ8\\r\\n rkw8VodZEu_GlY_2lmyW8UBqmZLhO9lFuNsjfPVSVRyTxt4mMFBG_5XQudBO\\r\\n xudVy7LzRGiwH.3UlpR4Vl7smmSRC2bf4OrR0bZthRngty1VipzNCRVAE8Od\\r\\n ZjoR092JxYCF.B94f6ZF5FKqbB2QCjns3WKF0aExw.lzX8_Ral0DsXpBdan5\\r\\n aIU8cGBQuJBoaAjbaEanoVUXlvz0qXHLEDisvKKsc3w.igAQy01OCSik.2Gx\\r\\n m2XkGfeZ37mSdZhzAVKMR8eM8WkXl_O.uf7kq6vEyWXuhaMQonTjFYcdAtZX\\r\\n FMxFdkDTLp4bdEn1gu9JWyRYZTYnj3igBJxc0z_tIBlMrVpcf_NFpjBn1di2\\r\\n _sfojiKsnhUoYHwbjX7KT7yblcK8Re5Fp57g7XWIc2_tmRbY_iaiGhK0qr4x\\r\\n 72_hfaF0OBgArvDUPYWF50HYIP597qv1ucNyymAPVGxOx1UNOFWej494R6N1\\r\\n C8VTghtzKCbklLGGnmSIqPRZCZntvEsWHmmabhFwxRiTZQeu_HfN8CA_Gqoq\\r\\n k4qQgMbq9o22ekLsAj_jDEnUW343npUbD0XAYDOBCKuSRxZubpmXAu1HqmWL\\r\\n OTbpjEw8lvF_71EyG5qKVZoEiNCxXMUxia1FBp6RS_uDb4E73TgPxe1v1Ctw\\r\\n dzqBdg4G0DKkeMqDmDUzQUynHnonJpO4M5Bl6cdr2OfvYK7iDpPf1xeqoexb\\r\\n McFitNfwU6kax4VRMGahTFv2L56ovtsgV.NMuNEAwad_p3zI0kwi3OMfan9K\\r\\n _5pIOwQYuKc5vD.5Twq9KSOdYHGYZp_8cBv6dZ4UXvc9k1M5491nJ164VQo.\\r\\n v.qr6ufI2qhXdMTNxzsLw372.iYBA_5xPeT3dvaYt35sjzmjZnpN\\r\\nX-Originating-IP: [216.145.54.155]\\r\\nAuthentication-Results: mta2002.corp.mail.gq1.yahoo.com from=yahoo-inc.com; domainkeys=neutral (no sig); from=yahoo-inc.com; dkim=pass (ok)\\r\\nReceived: from 127.0.0.1 (EHLO mrout6.yahoo.com) (216.145.54.155)\\r\\n by mta2002.corp.mail.gq1.yahoo.com with SMTPS; Mon, 31 Aug 2015 20:22:05 +0000\\r\\nReceived: from omp1018.mail.ne1.yahoo.com (omp1018.mail.ne1.yahoo.com [98.138.89.162])\\r\\n\tby mrout6.yahoo.com (8.14.9\/8.14.9\/y.out) with ESMTP id t7VKLum2007505\\r\\n\t(version=TLSv1\/SSLv3 cipher=DHE-RSA-CAMELLIA256-SHA bits=256 verify=NO)\\r\\n\tfor <dmz@yahoo-inc.com>; Mon, 31 Aug 2015 13:21:56 -0700 (PDT)\\r\\nDKIM-Signature: v=1; a=rsa-sha256; c=relaxed\/simple; d=yahoo-inc.com;\\r\\n\ts=cobra; t=1441052517;\\r\\n\tbh=frcCV1k9oG9oKj3dpUqdJg1PxRT2RSN\/XKdLCPjaYaY=;\\r\\n\th=Date:From:Reply-To:To:Subject;\\r\\n\tb=bzbQnVqgZoDSfLYqlVlyv++8CntLQ2AQJR84uPzh2OF\/mnz+m+H+YfzAzYQc7b+GU\\r\\n\t lGgG0DX89hnWgmp4U1LlzNqwUFrhmL8muanSejVHWkrX48grrG1OwNd5z04oO0hFJE\\r\\n\t 20ql0lI4tkgSNR7JLoedMVNm\/YdrmCiHbuMj2cxg=\\r\\nReceived: (qmail 99407 invoked by uid 1000); 31 Aug 2015 20:21:56 -0000\\r\\nDKIM-Signature: v=1; a=rsa-sha256; c=relaxed\/relaxed; d=yahoo-inc.com; s=ginc1024; t=1441052516; bh=47DEQpj8HBSa+\/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=; h=Date:From:Reply-To:To:Message-ID:Subject:MIME-Version:Content-Type:Content-Transfer-Encoding; b=r+c21Nf78CMLaL0K1FmgE\/CQz70nUJyVsSgQWqu9OlGdIf2GrY9OmwueQ4xHiLu4A5Jd0CAFgxLS5ZkBkPf4xUCvaLmQOYyY+8bfM2JhuG+g2dOMmjjajNqs+iyXfyx+Ak0T2Kahom\/b7cpmr5\/PiAb2JpL3O0p2StukvGAolp0=\\r\\nX-YMail-OSG: 0DtuWrwLUzvKrUMVVf5jtWesdteRmLR6oLTuKAGepU.3rTsZeR6zFmacnp0O_Dj\\r\\n RWZU-\\r\\nReceived: by 98.138.105.210; Mon, 31 Aug 2015 20:21:55 +0000 \\r\\nDate: Mon, 31 Aug 2015 20:21:55 +0000 (UTC)\\r\\nFrom: Daniel Ziegler <dmz@yahoo-inc.com>\\r\\nReply-To: Daniel Ziegler <dmz@yahoo-inc.com>\\r\\nTo: Daniel Ziegler <dmz@yahoo-inc.com>\\r\\nMessage-ID: <1257428900.917316.1441052515588.JavaMail.yahoo@mail.yahoo.com>\\r\\nSubject: _YAHOO_E2E_KEYSERVER_PROOF_Y4ncLhJeE\/qmdXlzCRp5JtgFHy4F4NbKPawk9U8vJ1E=\\r\\nMIME-Version: 1.0\\r\\nContent-Type: text\/plain; charset=UTF-8\\r\\nContent-Transfer-Encoding: 7bit\\r\\nContent-Length: 1\\r\\n\\r\\n\"),\n\t})\n\tif err != nil {\n\t\tlog.Fatalf(\"update failed on %s: %s\", realm.Addr, err)\n\t}\n\tif got, want := proof.Profile.Encoding, profile.Encoding; !bytes.Equal(got, want) {\n\t\tlog.Fatalf(\"created profile didn't roundtrip: %x != %x\", got, want)\n\t}\n\t_, err = coname.VerifyLookup(cfg, name, proof, time.Now())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package colonycore\n\nimport (\n\t\"github.com\/eaciit\/dbox\"\n\t\"github.com\/eaciit\/orm\/v1\"\n)\n\nconst (\n\tCONF_DB_ACL string = \"db_acl\"\n)\n\ntype Configuration struct {\n\torm.ModelBase\n\tKey string `json:\"_id\",bson:\"_id\"`\n\tValue interface{}\n}\n\nfunc (a *Configuration) TableName() string {\n\treturn \"configurations\"\n}\n\nfunc (a *Configuration) RecordID() interface{} {\n\treturn a.Key\n}\n\nfunc GetConfig(key string, args ...string) interface{} {\n\tvar res interface{} = nil\n\tif len(args) > 0 {\n\t\tres = args[0]\n\t}\n\n\tcursor, err := Find(new(Configuration), dbox.Eq(\"_id\", key))\n\tif err != nil {\n\t\treturn res\n\t}\n\n\tif cursor.Count() == 0 {\n\t\treturn res\n\t}\n\n\tdata := Configuration{}\n\terr = cursor.Fetch(&data, 1, false)\n\tif err != nil {\n\t\treturn res\n\t}\n\n\treturn data.Value\n}\n\nfunc SetConfig(key string, value interface{}) {\n\to := new(Configuration)\n\to.Key = key\n\to.Value = value\n\tSave(o)\n}\n<commit_msg>no message<commit_after>package colonycore\n\nimport (\n\t\"github.com\/eaciit\/dbox\"\n\t\"github.com\/eaciit\/orm\/v1\"\n)\n\nconst (\n\tCONF_DB_ACL string = \"db_acl\"\n)\n\ntype Configuration struct {\n\torm.ModelBase\n\tID string `json:\"_id\",bson:\"_id\"`\n\tValue interface{}\n}\n\nfunc (a *Configuration) TableName() string {\n\treturn \"configurations\"\n}\n\nfunc (a *Configuration) RecordID() interface{} {\n\treturn a.ID\n}\n\nfunc GetConfig(key string, args ...string) interface{} {\n\tvar res interface{} = nil\n\tif len(args) > 0 {\n\t\tres = args[0]\n\t}\n\n\tcursor, err := Find(new(Configuration), dbox.Eq(\"_id\", key))\n\tif err != nil {\n\t\treturn res\n\t}\n\n\tif cursor.Count() == 0 {\n\t\treturn res\n\t}\n\n\tdata := Configuration{}\n\terr = cursor.Fetch(&data, 1, false)\n\tif err != nil {\n\t\treturn res\n\t}\n\n\treturn data.Value\n}\n\nfunc SetConfig(key string, value interface{}) {\n\to := new(Configuration)\n\to.ID = key\n\to.Value = value\n\tSave(o)\n}\n<|endoftext|>"} {"text":"<commit_before>package brokers\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/RichardKnop\/machinery\/v1\/config\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/signatures\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/utils\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n)\n\n\/\/ RedisBroker represents a Redis broker\ntype RedisBroker struct {\n\tconfig *config.Config\n\tregisteredTaskNames []string\n\thost string\n\tpassword string\n\tpool *redis.Pool\n\tretryFunc func()\n\tstopChan chan int\n\tstopReceivingChan chan int\n\terrorsChan chan error\n\twg sync.WaitGroup\n}\n\n\/\/ NewRedisBroker creates new RedisBroker instance\nfunc NewRedisBroker(cnf *config.Config, host, password string) Broker {\n\treturn Broker(&RedisBroker{\n\t\tconfig: cnf,\n\t\thost: host,\n\t\tpassword: password,\n\t})\n}\n\n\/\/ SetRegisteredTaskNames sets registered task names\nfunc (redisBroker *RedisBroker) SetRegisteredTaskNames(names []string) {\n\tredisBroker.registeredTaskNames = names\n}\n\n\/\/ IsTaskRegistered returns true if the task is registered with this broker\nfunc (redisBroker *RedisBroker) IsTaskRegistered(name string) bool {\n\tfor _, registeredTaskName := range redisBroker.registeredTaskNames {\n\t\tif registeredTaskName == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ StartConsuming enters a loop and waits for incoming messages\nfunc (redisBroker *RedisBroker) StartConsuming(consumerTag string, taskProcessor TaskProcessor) (bool, error) {\n\tif redisBroker.retryFunc == nil {\n\t\tredisBroker.retryFunc = utils.RetryClosure()\n\t}\n\n\tredisBroker.pool = redisBroker.newPool()\n\tdefer redisBroker.pool.Close()\n\n\t_, err := redisBroker.pool.Get().Do(\"PING\")\n\tif err != nil {\n\t\tredisBroker.retryFunc()\n\t\treturn true, err \/\/ retry true\n\t}\n\n\tredisBroker.retryFunc = utils.RetryClosure()\n\tredisBroker.stopChan = make(chan int)\n\tredisBroker.stopReceivingChan = make(chan int)\n\tredisBroker.errorsChan = make(chan error)\n\tdeliveries := make(chan []byte)\n\n\tredisBroker.wg.Add(1)\n\n\tgo func() {\n\t\tdefer redisBroker.wg.Done()\n\n\t\tlog.Print(\"[*] Waiting for messages. To exit press CTRL+C\")\n\n\t\tconn := redisBroker.pool.Get()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\t\/\/ A way to stop this goroutine from redisBroker.StopConsuming\n\t\t\tcase <-redisBroker.stopReceivingChan:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\titemBytes, err := conn.Do(\"LPOP\", redisBroker.config.DefaultQueue)\n\t\t\t\tif err != nil {\n\t\t\t\t\tredisBroker.errorsChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t\/\/ Unline BLPOP, LPOP is non blocking so nil means we can keep iterating\n\t\t\t\tif itemBytes == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\titem, err := redis.Bytes(itemBytes, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tredisBroker.errorsChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tsignature := signatures.TaskSignature{}\n\t\t\t\tif err := json.Unmarshal(item, &signature); err != nil {\n\t\t\t\t\tredisBroker.errorsChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ If the task is not registered, we requeue it,\n\t\t\t\t\/\/ there might be different workers for processing specific tasks\n\t\t\t\tif !redisBroker.IsTaskRegistered(signature.Name) {\n\t\t\t\t\t_, err := conn.Do(\"RPUSH\", redisBroker.config.DefaultQueue, item)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tredisBroker.errorsChan <- err\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tdeliveries <- item\n\t\t\t}\n\t\t}\n\t}()\n\n\tif err := redisBroker.consume(deliveries, taskProcessor); err != nil {\n\t\treturn true, err \/\/ retry true\n\t}\n\n\treturn false, nil\n}\n\n\/\/ StopConsuming quits the loop\nfunc (redisBroker *RedisBroker) StopConsuming() {\n\tredisBroker.stopReceiving()\n\t\/\/ Notifying the stop channel stops consuming of messages\n\tredisBroker.stopChan <- 1\n}\n\n\/\/ Publish places a new message on the default queue\nfunc (redisBroker *RedisBroker) Publish(signature *signatures.TaskSignature) error {\n\tconn, err := redisBroker.open()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Dial: %s\", err)\n\t}\n\tdefer conn.Close()\n\n\tmessage, err := json.Marshal(signature)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"JSON Encode Message: %v\", err)\n\t}\n\n\t_, err = conn.Do(\"RPUSH\", redisBroker.config.DefaultQueue, message)\n\treturn err\n}\n\n\/\/ Consume a single message\nfunc (redisBroker *RedisBroker) consumeOne(item []byte, taskProcessor TaskProcessor) {\n\tlog.Printf(\"Received new message: %s\", item)\n\n\tsignature := signatures.TaskSignature{}\n\tif err := json.Unmarshal(item, &signature); err != nil {\n\t\tredisBroker.errorsChan <- err\n\t\treturn\n\t}\n\n\tif err := taskProcessor.Process(&signature); err != nil {\n\t\tredisBroker.errorsChan <- err\n\t}\n}\n\n\/\/ Consumes messages...\nfunc (redisBroker *RedisBroker) consume(deliveries <-chan []byte, taskProcessor TaskProcessor) error {\n\tfor {\n\t\tselect {\n\t\tcase err := <-redisBroker.errorsChan:\n\t\t\treturn err\n\t\tcase d := <-deliveries:\n\t\t\t\/\/ Consume the task inside a gotourine so multiple tasks\n\t\t\t\/\/ can be processed concurrently\n\t\t\tgo func() {\n\t\t\t\tredisBroker.consumeOne(d, taskProcessor)\n\t\t\t}()\n\t\tcase <-redisBroker.stopChan:\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\n\/\/ Stops the receving goroutine\nfunc (redisBroker *RedisBroker) stopReceiving() {\n\tredisBroker.stopReceivingChan <- 1\n\t\/\/ Waiting for the receiving goroutine to have stopped\n\tredisBroker.wg.Wait()\n}\n\n\/\/ Returns \/ creates instance of Redis connection\nfunc (redisBroker *RedisBroker) open() (redis.Conn, error) {\n\tif redisBroker.password != \"\" {\n\t\treturn redis.Dial(\"tcp\", redisBroker.host,\n\t\t\tredis.DialPassword(redisBroker.password))\n\t}\n\treturn redis.Dial(\"tcp\", redisBroker.host)\n}\n\n\/\/ Returns a new pool of Redis connections\nfunc (redisBroker *RedisBroker) newPool() *redis.Pool {\n\treturn &redis.Pool{\n\t\tMaxIdle: 3,\n\t\tIdleTimeout: 240 * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tvar c redis.Conn\n\t\t\tvar err error\n\n\t\t\tif redisBroker.password != \"\" {\n\t\t\t\tc, err = redis.Dial(\"tcp\", redisBroker.host,\n\t\t\t\t\tredis.DialPassword(redisBroker.password))\n\t\t\t} else {\n\t\t\t\tc, err = redis.Dial(\"tcp\", redisBroker.host)\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, err\n\t\t},\n\t\tTestOnBorrow: func(c redis.Conn, t time.Time) error {\n\t\t\t_, err := c.Do(\"PING\")\n\t\t\treturn err\n\t\t},\n\t}\n}\n<commit_msg>Fixing an issue with Redis broker and Quit method.<commit_after>package brokers\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/RichardKnop\/machinery\/v1\/config\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/signatures\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/utils\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n)\n\n\/\/ RedisBroker represents a Redis broker\ntype RedisBroker struct {\n\tconfig *config.Config\n\tregisteredTaskNames []string\n\thost string\n\tpassword string\n\tpool *redis.Pool\n\tretry bool\n\tretryFunc func()\n\tstopChan chan int\n\tstopReceivingChan chan int\n\terrorsChan chan error\n\twg sync.WaitGroup\n}\n\n\/\/ NewRedisBroker creates new RedisBroker instance\nfunc NewRedisBroker(cnf *config.Config, host, password string) Broker {\n\treturn Broker(&RedisBroker{\n\t\tconfig: cnf,\n\t\thost: host,\n\t\tpassword: password,\n\t\tretry: true,\n\t})\n}\n\n\/\/ SetRegisteredTaskNames sets registered task names\nfunc (redisBroker *RedisBroker) SetRegisteredTaskNames(names []string) {\n\tredisBroker.registeredTaskNames = names\n}\n\n\/\/ IsTaskRegistered returns true if the task is registered with this broker\nfunc (redisBroker *RedisBroker) IsTaskRegistered(name string) bool {\n\tfor _, registeredTaskName := range redisBroker.registeredTaskNames {\n\t\tif registeredTaskName == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ StartConsuming enters a loop and waits for incoming messages\nfunc (redisBroker *RedisBroker) StartConsuming(consumerTag string, taskProcessor TaskProcessor) (bool, error) {\n\tif redisBroker.retryFunc == nil {\n\t\tredisBroker.retryFunc = utils.RetryClosure()\n\t}\n\n\tredisBroker.pool = redisBroker.newPool()\n\tdefer redisBroker.pool.Close()\n\n\t_, err := redisBroker.pool.Get().Do(\"PING\")\n\tif err != nil {\n\t\tredisBroker.retryFunc()\n\t\treturn redisBroker.retry, err \/\/ retry true\n\t}\n\n\tredisBroker.retryFunc = utils.RetryClosure()\n\tredisBroker.stopChan = make(chan int)\n\tredisBroker.stopReceivingChan = make(chan int)\n\tredisBroker.errorsChan = make(chan error)\n\tdeliveries := make(chan []byte)\n\n\tredisBroker.wg.Add(1)\n\n\tgo func() {\n\t\tdefer redisBroker.wg.Done()\n\n\t\tlog.Print(\"[*] Waiting for messages. To exit press CTRL+C\")\n\n\t\tconn := redisBroker.pool.Get()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\t\/\/ A way to stop this goroutine from redisBroker.StopConsuming\n\t\t\tcase <-redisBroker.stopReceivingChan:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\titemBytes, err := conn.Do(\"LPOP\", redisBroker.config.DefaultQueue)\n\t\t\t\tif err != nil {\n\t\t\t\t\tredisBroker.errorsChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t\/\/ Unline BLPOP, LPOP is non blocking so nil means we can keep iterating\n\t\t\t\tif itemBytes == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\titem, err := redis.Bytes(itemBytes, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tredisBroker.errorsChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tsignature := signatures.TaskSignature{}\n\t\t\t\tif err := json.Unmarshal(item, &signature); err != nil {\n\t\t\t\t\tredisBroker.errorsChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ If the task is not registered, we requeue it,\n\t\t\t\t\/\/ there might be different workers for processing specific tasks\n\t\t\t\tif !redisBroker.IsTaskRegistered(signature.Name) {\n\t\t\t\t\t_, err := conn.Do(\"RPUSH\", redisBroker.config.DefaultQueue, item)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tredisBroker.errorsChan <- err\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tdeliveries <- item\n\t\t\t}\n\t\t}\n\t}()\n\n\tif err := redisBroker.consume(deliveries, taskProcessor); err != nil {\n\t\treturn redisBroker.retry, err \/\/ retry true\n\t}\n\n\treturn redisBroker.retry, nil\n}\n\n\/\/ StopConsuming quits the loop\nfunc (redisBroker *RedisBroker) StopConsuming() {\n\t\/\/ Do not retry from now on\n\tredisBroker.retry = false\n\t\/\/ Stop the receiving goroutine\n\tredisBroker.stopReceiving()\n\t\/\/ Notifying the stop channel stops consuming of messages\n\tredisBroker.stopChan <- 1\n}\n\n\/\/ Publish places a new message on the default queue\nfunc (redisBroker *RedisBroker) Publish(signature *signatures.TaskSignature) error {\n\tconn, err := redisBroker.open()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Dial: %s\", err)\n\t}\n\tdefer conn.Close()\n\n\tmessage, err := json.Marshal(signature)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"JSON Encode Message: %v\", err)\n\t}\n\n\t_, err = conn.Do(\"RPUSH\", redisBroker.config.DefaultQueue, message)\n\treturn err\n}\n\n\/\/ Consume a single message\nfunc (redisBroker *RedisBroker) consumeOne(item []byte, taskProcessor TaskProcessor) {\n\tlog.Printf(\"Received new message: %s\", item)\n\n\tsignature := signatures.TaskSignature{}\n\tif err := json.Unmarshal(item, &signature); err != nil {\n\t\tredisBroker.errorsChan <- err\n\t\treturn\n\t}\n\n\tif err := taskProcessor.Process(&signature); err != nil {\n\t\tredisBroker.errorsChan <- err\n\t}\n}\n\n\/\/ Consumes messages...\nfunc (redisBroker *RedisBroker) consume(deliveries <-chan []byte, taskProcessor TaskProcessor) error {\n\tfor {\n\t\tselect {\n\t\tcase err := <-redisBroker.errorsChan:\n\t\t\treturn err\n\t\tcase d := <-deliveries:\n\t\t\t\/\/ Consume the task inside a gotourine so multiple tasks\n\t\t\t\/\/ can be processed concurrently\n\t\t\tgo func() {\n\t\t\t\tredisBroker.consumeOne(d, taskProcessor)\n\t\t\t}()\n\t\tcase <-redisBroker.stopChan:\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\n\/\/ Stops the receving goroutine\nfunc (redisBroker *RedisBroker) stopReceiving() {\n\tredisBroker.stopReceivingChan <- 1\n\t\/\/ Waiting for the receiving goroutine to have stopped\n\tredisBroker.wg.Wait()\n}\n\n\/\/ Returns \/ creates instance of Redis connection\nfunc (redisBroker *RedisBroker) open() (redis.Conn, error) {\n\tif redisBroker.password != \"\" {\n\t\treturn redis.Dial(\"tcp\", redisBroker.host,\n\t\t\tredis.DialPassword(redisBroker.password))\n\t}\n\treturn redis.Dial(\"tcp\", redisBroker.host)\n}\n\n\/\/ Returns a new pool of Redis connections\nfunc (redisBroker *RedisBroker) newPool() *redis.Pool {\n\treturn &redis.Pool{\n\t\tMaxIdle: 3,\n\t\tIdleTimeout: 240 * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tvar c redis.Conn\n\t\t\tvar err error\n\n\t\t\tif redisBroker.password != \"\" {\n\t\t\t\tc, err = redis.Dial(\"tcp\", redisBroker.host,\n\t\t\t\t\tredis.DialPassword(redisBroker.password))\n\t\t\t} else {\n\t\t\t\tc, err = redis.Dial(\"tcp\", redisBroker.host)\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, err\n\t\t},\n\t\tTestOnBorrow: func(c redis.Conn, t time.Time) error {\n\t\t\t_, err := c.Do(\"PING\")\n\t\t\treturn err\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package websocket\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com\/bitfinexcom\/bitfinex-api-go\/pkg\/models\/fundingloan\"\n\t\"github.com\/bitfinexcom\/bitfinex-api-go\/v2\"\n)\n\ntype FlagRequest struct {\n\tEvent string `json:\"event\"`\n\tFlags int `json:\"flags\"`\n}\n\n\/\/ API for end-users to interact with Bitfinex.\n\n\/\/ Send publishes a generic message to the Bitfinex API.\nfunc (c *Client) Send(ctx context.Context, msg interface{}) error {\n\tsocket, err := c.getSocket()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn socket.Asynchronous.Send(ctx, msg)\n}\n\n\/\/ Submit a request to enable the given flag\nfunc (c *Client) EnableFlag(ctx context.Context, flag int) (string, error) {\n\treq := &FlagRequest{\n\t\tEvent: \"conf\",\n\t\tFlags: flag,\n\t}\n\t\/\/ TODO enable flag on reconnect?\n\t\/\/ create sublist to stop concurrent map read\n\tsocks := make([]*Socket, len(c.sockets))\n\tc.mtx.RLock()\n\tfor i, socket := range c.sockets {\n\t\tsocks[i] = socket\n\t}\n\tc.mtx.RUnlock()\n\tfor _, socket := range socks {\n\t\terr := socket.Asynchronous.Send(ctx, req)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn \"\", nil\n}\n\n\/\/ Gen the count of currently active websocket connections\nfunc (c *Client) ConnectionCount() int {\n\tc.mtx.RLock()\n\tdefer c.mtx.RUnlock()\n\treturn len(c.sockets)\n}\n\n\/\/ Get the available capacity of the current\n\/\/ websocket connections\nfunc (c *Client) AvailableCapacity() int {\n\treturn c.getTotalAvailableSocketCapacity()\n}\n\n\/\/ Start a new websocket connection. This function is only exposed in case you want to\n\/\/ implicitly add new connections otherwise connection management is already handled for you.\nfunc (c *Client) StartNewConnection() error {\n\treturn c.connectSocket(SocketId(c.ConnectionCount()))\n}\n\nfunc (c *Client) subscribeBySocket(ctx context.Context, socket *Socket, req *SubscriptionRequest) (string, error) {\n\tc.subscriptions.add(socket.Id, req)\n\terr := socket.Asynchronous.Send(ctx, req)\n\tif err != nil {\n\t\t\/\/ propagate send error\n\t\treturn \"\", err\n\t}\n\treturn req.SubID, nil\n}\n\n\/\/ Submit a request to subscribe to the given SubscriptionRequuest\nfunc (c *Client) Subscribe(ctx context.Context, req *SubscriptionRequest) (string, error) {\n\tif c.getTotalAvailableSocketCapacity() <= 1 {\n\t\terr := c.StartNewConnection()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\t\/\/ get socket with the highest available capacity\n\tsocket, err := c.getMostAvailableSocket()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn c.subscribeBySocket(ctx, socket, req)\n}\n\n\/\/ Submit a request to receive ticker updates\nfunc (c *Client) SubscribeTicker(ctx context.Context, symbol string) (string, error) {\n\treq := &SubscriptionRequest{\n\t\tSubID: c.nonce.GetNonce(),\n\t\tEvent: EventSubscribe,\n\t\tChannel: ChanTicker,\n\t\tSymbol: symbol,\n\t}\n\treturn c.Subscribe(ctx, req)\n}\n\n\/\/ Submit a request to receive trade updates\nfunc (c *Client) SubscribeTrades(ctx context.Context, symbol string) (string, error) {\n\treq := &SubscriptionRequest{\n\t\tSubID: c.nonce.GetNonce(),\n\t\tEvent: EventSubscribe,\n\t\tChannel: ChanTrades,\n\t\tSymbol: symbol,\n\t}\n\treturn c.Subscribe(ctx, req)\n}\n\n\/\/ Submit a subscription request for market data for the given symbol, at the given frequency, with the given precision, returning no more than priceLevels price entries.\n\/\/ Default values are Precision0, Frequency0, and priceLevels=25.\nfunc (c *Client) SubscribeBook(ctx context.Context, symbol string, precision bitfinex.BookPrecision, frequency bitfinex.BookFrequency, priceLevel int) (string, error) {\n\tif priceLevel < 0 {\n\t\treturn \"\", fmt.Errorf(\"negative price levels not supported: %d\", priceLevel)\n\t}\n\treq := &SubscriptionRequest{\n\t\tSubID: c.nonce.GetNonce(),\n\t\tEvent: EventSubscribe,\n\t\tChannel: ChanBook,\n\t\tSymbol: symbol,\n\t\tPrecision: string(precision),\n\t\tLen: fmt.Sprintf(\"%d\", priceLevel), \/\/ needed for R0?\n\t}\n\tif !bitfinex.IsRawBook(string(precision)) {\n\t\treq.Frequency = string(frequency)\n\t}\n\treturn c.Subscribe(ctx, req)\n}\n\n\/\/ Submit a subscription request to receive candle updates\nfunc (c *Client) SubscribeCandles(ctx context.Context, symbol string, resolution bitfinex.CandleResolution) (string, error) {\n\treq := &SubscriptionRequest{\n\t\tSubID: c.nonce.GetNonce(),\n\t\tEvent: EventSubscribe,\n\t\tChannel: ChanCandles,\n\t\tKey: fmt.Sprintf(\"trade:%s:%s\", resolution, symbol),\n\t}\n\treturn c.Subscribe(ctx, req)\n}\n\n\/\/ Submit a subscription request for status updates\nfunc (c *Client) SubscribeStatus(ctx context.Context, symbol string, sType bitfinex.StatusType) (string, error) {\n\treq := &SubscriptionRequest{\n\t\tSubID: c.nonce.GetNonce(),\n\t\tEvent: EventSubscribe,\n\t\tChannel: ChanStatus,\n\t\tKey: fmt.Sprintf(\"%s:%s\", string(sType), symbol),\n\t}\n\treturn c.Subscribe(ctx, req)\n}\n\n\/\/ Retrieve the Orderbook for the given symbol which is managed locally.\n\/\/ This requires ManageOrderbook=True and an active chanel subscribed to the given\n\/\/ symbols orderbook\nfunc (c *Client) GetOrderbook(symbol string) (*Orderbook, error) {\n\tc.mtx.RLock()\n\tdefer c.mtx.RUnlock()\n\tif val, ok := c.orderbooks[symbol]; ok {\n\t\t\/\/ take dereferenced copy of orderbook\n\t\treturn val, nil\n\t}\n\treturn nil, fmt.Errorf(\"Orderbook %s does not exist\", symbol)\n}\n\n\/\/ Submit a request to create a new order\nfunc (c *Client) SubmitOrder(ctx context.Context, order *bitfinex.OrderNewRequest) error {\n\tsocket, err := c.GetAuthenticatedSocket()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn socket.Asynchronous.Send(ctx, order)\n}\n\n\/\/ Submit and update request to change an existing orders values\nfunc (c *Client) SubmitUpdateOrder(ctx context.Context, orderUpdate *bitfinex.OrderUpdateRequest) error {\n\tsocket, err := c.GetAuthenticatedSocket()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn socket.Asynchronous.Send(ctx, orderUpdate)\n}\n\n\/\/ Submit a cancel request for an existing order\nfunc (c *Client) SubmitCancel(ctx context.Context, cancel *bitfinex.OrderCancelRequest) error {\n\tsocket, err := c.GetAuthenticatedSocket()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn socket.Asynchronous.Send(ctx, cancel)\n}\n\n\/\/ Get a subscription request using a subscription ID\nfunc (c *Client) LookupSubscription(subID string) (*SubscriptionRequest, error) {\n\ts, err := c.subscriptions.lookupBySubscriptionID(subID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s.Request, nil\n}\n\n\/\/ Submit a new funding offer request\nfunc (c *Client) SubmitFundingOffer(ctx context.Context, fundingOffer *bitfinex.FundingOfferRequest) error {\n\tsocket, err := c.GetAuthenticatedSocket()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn socket.Asynchronous.Send(ctx, fundingOffer)\n}\n\n\/\/ Submit a request to cancel and existing funding offer\nfunc (c *Client) SubmitFundingCancel(ctx context.Context, fundingOffer *bitfinex.FundingOfferCancelRequest) error {\n\tsocket, err := c.GetAuthenticatedSocket()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn socket.Asynchronous.Send(ctx, fundingOffer)\n}\n\n\/\/ CloseFundingLoan - cancels funding loan by ID. Emits an error if not authenticated.\nfunc (c *Client) CloseFundingLoan(ctx context.Context, flcr *fundingloan.CancelRequest) error {\n\tsocket, err := c.GetAuthenticatedSocket()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn socket.Asynchronous.Send(ctx, flcr)\n}\n\n\/\/ CloseFundingCredit - cancels funding credit by ID. Emits an error if not authenticated.\nfunc (c *Client) CloseFundingCredit(ctx context.Context, fundingOffer *bitfinex.FundingCreditCancelRequest) error {\n\tsocket, err := c.GetAuthenticatedSocket()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn socket.Asynchronous.Send(ctx, fundingOffer)\n}\n<commit_msg>v2\/websocket\/api.go putting new fundingoffer package to work<commit_after>package websocket\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com\/bitfinexcom\/bitfinex-api-go\/pkg\/models\/fundingloan\"\n\t\"github.com\/bitfinexcom\/bitfinex-api-go\/pkg\/models\/fundingoffer\"\n\t\"github.com\/bitfinexcom\/bitfinex-api-go\/v2\"\n)\n\ntype FlagRequest struct {\n\tEvent string `json:\"event\"`\n\tFlags int `json:\"flags\"`\n}\n\n\/\/ API for end-users to interact with Bitfinex.\n\n\/\/ Send publishes a generic message to the Bitfinex API.\nfunc (c *Client) Send(ctx context.Context, msg interface{}) error {\n\tsocket, err := c.getSocket()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn socket.Asynchronous.Send(ctx, msg)\n}\n\n\/\/ Submit a request to enable the given flag\nfunc (c *Client) EnableFlag(ctx context.Context, flag int) (string, error) {\n\treq := &FlagRequest{\n\t\tEvent: \"conf\",\n\t\tFlags: flag,\n\t}\n\t\/\/ TODO enable flag on reconnect?\n\t\/\/ create sublist to stop concurrent map read\n\tsocks := make([]*Socket, len(c.sockets))\n\tc.mtx.RLock()\n\tfor i, socket := range c.sockets {\n\t\tsocks[i] = socket\n\t}\n\tc.mtx.RUnlock()\n\tfor _, socket := range socks {\n\t\terr := socket.Asynchronous.Send(ctx, req)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn \"\", nil\n}\n\n\/\/ Gen the count of currently active websocket connections\nfunc (c *Client) ConnectionCount() int {\n\tc.mtx.RLock()\n\tdefer c.mtx.RUnlock()\n\treturn len(c.sockets)\n}\n\n\/\/ Get the available capacity of the current\n\/\/ websocket connections\nfunc (c *Client) AvailableCapacity() int {\n\treturn c.getTotalAvailableSocketCapacity()\n}\n\n\/\/ Start a new websocket connection. This function is only exposed in case you want to\n\/\/ implicitly add new connections otherwise connection management is already handled for you.\nfunc (c *Client) StartNewConnection() error {\n\treturn c.connectSocket(SocketId(c.ConnectionCount()))\n}\n\nfunc (c *Client) subscribeBySocket(ctx context.Context, socket *Socket, req *SubscriptionRequest) (string, error) {\n\tc.subscriptions.add(socket.Id, req)\n\terr := socket.Asynchronous.Send(ctx, req)\n\tif err != nil {\n\t\t\/\/ propagate send error\n\t\treturn \"\", err\n\t}\n\treturn req.SubID, nil\n}\n\n\/\/ Submit a request to subscribe to the given SubscriptionRequuest\nfunc (c *Client) Subscribe(ctx context.Context, req *SubscriptionRequest) (string, error) {\n\tif c.getTotalAvailableSocketCapacity() <= 1 {\n\t\terr := c.StartNewConnection()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\t\/\/ get socket with the highest available capacity\n\tsocket, err := c.getMostAvailableSocket()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn c.subscribeBySocket(ctx, socket, req)\n}\n\n\/\/ Submit a request to receive ticker updates\nfunc (c *Client) SubscribeTicker(ctx context.Context, symbol string) (string, error) {\n\treq := &SubscriptionRequest{\n\t\tSubID: c.nonce.GetNonce(),\n\t\tEvent: EventSubscribe,\n\t\tChannel: ChanTicker,\n\t\tSymbol: symbol,\n\t}\n\treturn c.Subscribe(ctx, req)\n}\n\n\/\/ Submit a request to receive trade updates\nfunc (c *Client) SubscribeTrades(ctx context.Context, symbol string) (string, error) {\n\treq := &SubscriptionRequest{\n\t\tSubID: c.nonce.GetNonce(),\n\t\tEvent: EventSubscribe,\n\t\tChannel: ChanTrades,\n\t\tSymbol: symbol,\n\t}\n\treturn c.Subscribe(ctx, req)\n}\n\n\/\/ Submit a subscription request for market data for the given symbol, at the given frequency, with the given precision, returning no more than priceLevels price entries.\n\/\/ Default values are Precision0, Frequency0, and priceLevels=25.\nfunc (c *Client) SubscribeBook(ctx context.Context, symbol string, precision bitfinex.BookPrecision, frequency bitfinex.BookFrequency, priceLevel int) (string, error) {\n\tif priceLevel < 0 {\n\t\treturn \"\", fmt.Errorf(\"negative price levels not supported: %d\", priceLevel)\n\t}\n\treq := &SubscriptionRequest{\n\t\tSubID: c.nonce.GetNonce(),\n\t\tEvent: EventSubscribe,\n\t\tChannel: ChanBook,\n\t\tSymbol: symbol,\n\t\tPrecision: string(precision),\n\t\tLen: fmt.Sprintf(\"%d\", priceLevel), \/\/ needed for R0?\n\t}\n\tif !bitfinex.IsRawBook(string(precision)) {\n\t\treq.Frequency = string(frequency)\n\t}\n\treturn c.Subscribe(ctx, req)\n}\n\n\/\/ Submit a subscription request to receive candle updates\nfunc (c *Client) SubscribeCandles(ctx context.Context, symbol string, resolution bitfinex.CandleResolution) (string, error) {\n\treq := &SubscriptionRequest{\n\t\tSubID: c.nonce.GetNonce(),\n\t\tEvent: EventSubscribe,\n\t\tChannel: ChanCandles,\n\t\tKey: fmt.Sprintf(\"trade:%s:%s\", resolution, symbol),\n\t}\n\treturn c.Subscribe(ctx, req)\n}\n\n\/\/ Submit a subscription request for status updates\nfunc (c *Client) SubscribeStatus(ctx context.Context, symbol string, sType bitfinex.StatusType) (string, error) {\n\treq := &SubscriptionRequest{\n\t\tSubID: c.nonce.GetNonce(),\n\t\tEvent: EventSubscribe,\n\t\tChannel: ChanStatus,\n\t\tKey: fmt.Sprintf(\"%s:%s\", string(sType), symbol),\n\t}\n\treturn c.Subscribe(ctx, req)\n}\n\n\/\/ Retrieve the Orderbook for the given symbol which is managed locally.\n\/\/ This requires ManageOrderbook=True and an active chanel subscribed to the given\n\/\/ symbols orderbook\nfunc (c *Client) GetOrderbook(symbol string) (*Orderbook, error) {\n\tc.mtx.RLock()\n\tdefer c.mtx.RUnlock()\n\tif val, ok := c.orderbooks[symbol]; ok {\n\t\t\/\/ take dereferenced copy of orderbook\n\t\treturn val, nil\n\t}\n\treturn nil, fmt.Errorf(\"Orderbook %s does not exist\", symbol)\n}\n\n\/\/ Submit a request to create a new order\nfunc (c *Client) SubmitOrder(ctx context.Context, order *bitfinex.OrderNewRequest) error {\n\tsocket, err := c.GetAuthenticatedSocket()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn socket.Asynchronous.Send(ctx, order)\n}\n\n\/\/ Submit and update request to change an existing orders values\nfunc (c *Client) SubmitUpdateOrder(ctx context.Context, orderUpdate *bitfinex.OrderUpdateRequest) error {\n\tsocket, err := c.GetAuthenticatedSocket()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn socket.Asynchronous.Send(ctx, orderUpdate)\n}\n\n\/\/ Submit a cancel request for an existing order\nfunc (c *Client) SubmitCancel(ctx context.Context, cancel *bitfinex.OrderCancelRequest) error {\n\tsocket, err := c.GetAuthenticatedSocket()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn socket.Asynchronous.Send(ctx, cancel)\n}\n\n\/\/ Get a subscription request using a subscription ID\nfunc (c *Client) LookupSubscription(subID string) (*SubscriptionRequest, error) {\n\ts, err := c.subscriptions.lookupBySubscriptionID(subID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s.Request, nil\n}\n\n\/\/ Submit a new funding offer request\nfunc (c *Client) SubmitFundingOffer(ctx context.Context, fundingOffer *fundingoffer.SubmitRequest) error {\n\tsocket, err := c.GetAuthenticatedSocket()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn socket.Asynchronous.Send(ctx, fundingOffer)\n}\n\n\/\/ Submit a request to cancel and existing funding offer\nfunc (c *Client) SubmitFundingCancel(ctx context.Context, fundingOffer *fundingoffer.CancelRequest) error {\n\tsocket, err := c.GetAuthenticatedSocket()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn socket.Asynchronous.Send(ctx, fundingOffer)\n}\n\n\/\/ CloseFundingLoan - cancels funding loan by ID. Emits an error if not authenticated.\nfunc (c *Client) CloseFundingLoan(ctx context.Context, flcr *fundingloan.CancelRequest) error {\n\tsocket, err := c.GetAuthenticatedSocket()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn socket.Asynchronous.Send(ctx, flcr)\n}\n\n\/\/ CloseFundingCredit - cancels funding credit by ID. Emits an error if not authenticated.\nfunc (c *Client) CloseFundingCredit(ctx context.Context, fundingOffer *bitfinex.FundingCreditCancelRequest) error {\n\tsocket, err := c.GetAuthenticatedSocket()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn socket.Asynchronous.Send(ctx, fundingOffer)\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n)\n\nvar CastError = errors.New(\"cast error\")\n\n\/\/ Types of Command options\nconst (\n\tInvalid = reflect.Invalid\n\tBool = reflect.Bool\n\tInt = reflect.Int\n\tUint = reflect.Uint\n\tFloat = reflect.Float64\n\tString = reflect.String\n)\n\n\/\/ Option is used to specify a field that will be provided by a consumer\ntype Option struct {\n\tNames []string \/\/ a list of unique names to\n\tType reflect.Kind \/\/ value must be this type\n\tDescription string \/\/ a short string to describe this option\n\n\t\/\/ MAYBE_TODO: add more features(?):\n\t\/\/Default interface{} \/\/ the default value (ignored if `Required` is true)\n\t\/\/Required bool \/\/ whether or not the option must be provided\n}\n\n\/\/ constructor helper functions\nfunc NewOption(kind reflect.Kind, names ...string) Option {\n\tif len(names) < 2 {\n\t\t\/\/ FIXME(btc) don't panic (fix_before_merge)\n\t\tpanic(\"Options require at least two string values (name and description)\")\n\t}\n\n\tdesc := names[len(names)-1]\n\tnames = names[:len(names)-1]\n\n\treturn Option{\n\t\tNames: names,\n\t\tType: kind,\n\t\tDescription: desc,\n\t}\n}\n\n\/\/ TODO handle description separately. this will take care of the panic case in\n\/\/ NewOption\n\n\/\/ For all func {Type}Option(...string) functions, the last variadic argument\n\/\/ is treated as the description field.\n\nfunc BoolOption(names ...string) Option {\n\treturn NewOption(Bool, names...)\n}\nfunc IntOption(names ...string) Option {\n\treturn NewOption(Int, names...)\n}\nfunc UintOption(names ...string) Option {\n\treturn NewOption(Uint, names...)\n}\nfunc FloatOption(names ...string) Option {\n\treturn NewOption(Float, names...)\n}\nfunc StringOption(names ...string) Option {\n\treturn NewOption(String, names...)\n}\n\ntype OptionValue struct {\n\tvalue interface{}\n\tfound bool\n}\n\n\/\/ Found returns true if the option value was provided by the user (not a default value)\nfunc (ov OptionValue) Found() bool {\n\treturn ov.found\n}\n\n\/\/ value accessor methods, gets the value as a certain type\nfunc (ov OptionValue) Bool() (value bool, found bool, err error) {\n\tif !ov.found {\n\t\treturn false, false, nil\n\t}\n\tval, ok := ov.value.(bool)\n\tif !ok {\n\t\terr = CastError\n\t}\n\treturn val, ov.found, err\n}\n\nfunc (ov OptionValue) Int() (val int, found bool, err error) {\n\tif !ov.found {\n\t\treturn 0, false, nil\n\t}\n\tval, ok := ov.value.(int)\n\tif !ok {\n\t\terr = CastError\n\t}\n\treturn val, ov.found, err\n}\n\nfunc (ov OptionValue) Uint() (val uint, found bool, err error) {\n\tif !ov.found {\n\t\treturn 0, false, nil\n\t}\n\tval, ok := ov.value.(uint)\n\tif !ok {\n\t\terr = CastError\n\t}\n\treturn val, ov.found, err\n}\n\nfunc (ov OptionValue) Float() (val float64, found bool, err error) {\n\tif !ov.found {\n\t\treturn 0, false, nil\n\t}\n\tval, ok := ov.value.(float64)\n\tif !ok {\n\t\terr = CastError\n\t}\n\treturn val, ov.found, err\n}\n\nfunc (ov OptionValue) String() (val string, found bool, err error) {\n\tif !ov.found {\n\t\treturn \"\", false, nil\n\t}\n\tval, ok := ov.value.(string)\n\tif !ok {\n\t\terr = CastError\n\t}\n\treturn val, ov.found, err\n}\n\n\/\/ Flag names\nconst (\n\tEncShort = \"enc\"\n\tEncLong = \"encoding\"\n)\n\n\/\/ options that are used by this package\nvar globalOptions = []Option{\n\tOption{[]string{EncShort, EncLong}, String,\n\t\t\"The encoding type the output should be encoded with (json, xml, or text)\"},\n}\n\n\/\/ the above array of Options, wrapped in a Command\nvar globalCommand = &Command{\n\tOptions: globalOptions,\n}\n<commit_msg>fix(commands\/optionvalue) don't shadow the return variable<commit_after>package commands\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n)\n\nvar CastError = errors.New(\"cast error\")\n\n\/\/ Types of Command options\nconst (\n\tInvalid = reflect.Invalid\n\tBool = reflect.Bool\n\tInt = reflect.Int\n\tUint = reflect.Uint\n\tFloat = reflect.Float64\n\tString = reflect.String\n)\n\n\/\/ Option is used to specify a field that will be provided by a consumer\ntype Option struct {\n\tNames []string \/\/ a list of unique names to\n\tType reflect.Kind \/\/ value must be this type\n\tDescription string \/\/ a short string to describe this option\n\n\t\/\/ MAYBE_TODO: add more features(?):\n\t\/\/Default interface{} \/\/ the default value (ignored if `Required` is true)\n\t\/\/Required bool \/\/ whether or not the option must be provided\n}\n\n\/\/ constructor helper functions\nfunc NewOption(kind reflect.Kind, names ...string) Option {\n\tif len(names) < 2 {\n\t\t\/\/ FIXME(btc) don't panic (fix_before_merge)\n\t\tpanic(\"Options require at least two string values (name and description)\")\n\t}\n\n\tdesc := names[len(names)-1]\n\tnames = names[:len(names)-1]\n\n\treturn Option{\n\t\tNames: names,\n\t\tType: kind,\n\t\tDescription: desc,\n\t}\n}\n\n\/\/ TODO handle description separately. this will take care of the panic case in\n\/\/ NewOption\n\n\/\/ For all func {Type}Option(...string) functions, the last variadic argument\n\/\/ is treated as the description field.\n\nfunc BoolOption(names ...string) Option {\n\treturn NewOption(Bool, names...)\n}\nfunc IntOption(names ...string) Option {\n\treturn NewOption(Int, names...)\n}\nfunc UintOption(names ...string) Option {\n\treturn NewOption(Uint, names...)\n}\nfunc FloatOption(names ...string) Option {\n\treturn NewOption(Float, names...)\n}\nfunc StringOption(names ...string) Option {\n\treturn NewOption(String, names...)\n}\n\ntype OptionValue struct {\n\tvalue interface{}\n\tfound bool\n}\n\n\/\/ Found returns true if the option value was provided by the user (not a default value)\nfunc (ov OptionValue) Found() bool {\n\treturn ov.found\n}\n\n\/\/ value accessor methods, gets the value as a certain type\nfunc (ov OptionValue) Bool() (value bool, found bool, err error) {\n\tif !ov.found {\n\t\treturn false, false, nil\n\t}\n\tval, ok := ov.value.(bool)\n\tif !ok {\n\t\terr = CastError\n\t}\n\treturn val, ov.found, err\n}\n\nfunc (ov OptionValue) Int() (value int, found bool, err error) {\n\tif !ov.found {\n\t\treturn 0, false, nil\n\t}\n\tval, ok := ov.value.(int)\n\tif !ok {\n\t\terr = CastError\n\t}\n\treturn val, ov.found, err\n}\n\nfunc (ov OptionValue) Uint() (value uint, found bool, err error) {\n\tif !ov.found {\n\t\treturn 0, false, nil\n\t}\n\tval, ok := ov.value.(uint)\n\tif !ok {\n\t\terr = CastError\n\t}\n\treturn val, ov.found, err\n}\n\nfunc (ov OptionValue) Float() (value float64, found bool, err error) {\n\tif !ov.found {\n\t\treturn 0, false, nil\n\t}\n\tval, ok := ov.value.(float64)\n\tif !ok {\n\t\terr = CastError\n\t}\n\treturn val, ov.found, err\n}\n\nfunc (ov OptionValue) String() (value string, found bool, err error) {\n\tif !ov.found {\n\t\treturn \"\", false, nil\n\t}\n\tval, ok := ov.value.(string)\n\tif !ok {\n\t\terr = CastError\n\t}\n\treturn val, ov.found, err\n}\n\n\/\/ Flag names\nconst (\n\tEncShort = \"enc\"\n\tEncLong = \"encoding\"\n)\n\n\/\/ options that are used by this package\nvar globalOptions = []Option{\n\tOption{[]string{EncShort, EncLong}, String,\n\t\t\"The encoding type the output should be encoded with (json, xml, or text)\"},\n}\n\n\/\/ the above array of Options, wrapped in a Command\nvar globalCommand = &Command{\n\tOptions: globalOptions,\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"github.com\/mislav\/everyenv\/cli\"\n\t\"github.com\/mislav\/everyenv\/config\"\n)\n\nvar prefixHelp = `\nUsage: $ProgramName prefix [<version>]\n\nDisplays the directory where a specific version is installed. If no version is\ngiven, display the location of the currently selected version.\n`\n\nfunc prefixCmd(args cli.Args) {\n\tversion := args.At(0)\n\tif version == \"\" {\n\t\tcurrentVersion := detectVersion()\n\t\tversion = currentVersion.Name\n\t}\n\n\tversionDir := config.VersionDir(version)\n\tif versionDir.Exists() {\n\t\tcli.Println(versionDir)\n\t} else {\n\t\terr := VersionNotFound{version}\n\t\tcli.Errorf(\"%s: %s\\n\", args.ProgramName(), err)\n\t\tcli.Exit(1)\n\t}\n}\n\nfunc init() {\n\tcli.Register(\"prefix\", prefixCmd, prefixHelp)\n}\n<commit_msg>Have `prefix` command handle \"system\" version too<commit_after>package commands\n\nimport (\n\t\"github.com\/mislav\/everyenv\/cli\"\n\t\"github.com\/mislav\/everyenv\/config\"\n)\n\nvar prefixHelp = `\nUsage: $ProgramName prefix [<version>]\n\nDisplays the directory where a specific version is installed. If no version is\ngiven, display the location of the currently selected version.\n`\n\nfunc prefixCmd(args cli.Args) {\n\tversion := args.At(0)\n\tif version == \"\" {\n\t\tcurrentVersion := detectVersion()\n\t\tversion = currentVersion.Name\n\t}\n\n\tif version == \"system\" {\n\t\texePath := findInPath(config.MainExecutable)\n\t\tif exePath.IsBlank() {\n\t\t\tcli.Errorf(\"%s: system version not found in PATH\\n\", args.ProgramName())\n\t\t\tcli.Exit(1)\n\t\t} else {\n\t\t\tcli.Println(exePath.Dir().Dir())\n\t\t}\n\t} else {\n\t\tversionDir := config.VersionDir(version)\n\t\tif versionDir.Exists() {\n\t\t\tcli.Println(versionDir)\n\t\t} else {\n\t\t\terr := VersionNotFound{version}\n\t\t\tcli.Errorf(\"%s: %s\\n\", args.ProgramName(), err)\n\t\t\tcli.Exit(1)\n\t\t}\n\t}\n}\n\nfunc init() {\n\tcli.Register(\"prefix\", prefixCmd, prefixHelp)\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"fmt\"\n\t\"github.com\/0xfoo\/punchcard\/schedule\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar minCommits, maxCommits int\n\nvar randomCmd = &cobra.Command{\n\tUse: \"random\",\n\tShort: \"Random will add commits throughout the past 365 days.\",\n\tLong: `Random will create a git repo at the given location and create\nrandom commits, random meaning the number of commits per day.\nThis will be done for the past 365 days and the commits are in the range of\n--min and --max commits.`,\n\tRun: randomRun,\n}\n\nfunc randomRun(cmd *cobra.Command, args []string) {\n\tschedulers.RandomSchedule()\n}\n\nfunc init() {\n\trandomCmd.Flags().IntVar(&minCommits, \"min\", 1,\n\t\t\"minimal #commits on a given day.\")\n\trandomCmd.Flags().IntVar(&maxCommits, \"max\", 10,\n\t\t\"maximal #commits on a given day.\")\n\tPunchCardCmd.AddCommand(randomCmd)\n}\n<commit_msg>Add min and max to random schedule call.<commit_after>package commands\n\nimport (\n\t\"fmt\"\n\t\"github.com\/0xfoo\/punchcard\/schedule\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar minCommits, maxCommits int\n\nvar randomCmd = &cobra.Command{\n\tUse: \"random\",\n\tShort: \"Random will add commits throughout the past 365 days.\",\n\tLong: `Random will create a git repo at the given location and create\nrandom commits, random meaning the number of commits per day.\nThis will be done for the past 365 days and the commits are in the range of\n--min and --max commits.`,\n\tRun: randomRun,\n}\n\nfunc randomRun(cmd *cobra.Command, args []string) {\n\tschedule.RandomSchedule(minCommits, maxCommits)\n}\n\nfunc init() {\n\trandomCmd.Flags().IntVar(&minCommits, \"min\", 1,\n\t\t\"minimal #commits on a given day.\")\n\trandomCmd.Flags().IntVar(&maxCommits, \"max\", 10,\n\t\t\"maximal #commits on a given day.\")\n\tPunchCardCmd.AddCommand(randomCmd)\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/jingweno\/gh\/cmd\"\n\t\"github.com\/jingweno\/gh\/git\"\n\tshellquote \"github.com\/kballard\/go-shellquote\"\n\t\"os\/exec\"\n\t\"syscall\"\n)\n\ntype ExecError struct {\n\tErr error\n\tExitCode int\n}\n\nfunc (execError *ExecError) Error() string {\n\treturn execError.Err.Error()\n}\n\nfunc newExecError(err error) ExecError {\n\texitCode := 0\n\tif err != nil {\n\t\texitCode = 1\n\t\tif exitError, ok := err.(*exec.ExitError); ok {\n\t\t\tif status, ok := exitError.Sys().(syscall.WaitStatus); ok {\n\t\t\t\texitCode = status.ExitStatus()\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ExecError{Err: err, ExitCode: exitCode}\n}\n\ntype Runner struct {\n\tArgs []string\n}\n\nfunc (r *Runner) Execute() ExecError {\n\targs := NewArgs(r.Args)\n\tif args.Command == \"\" {\n\t\tusage()\n\t}\n\n\texpandAlias(args)\n\tslurpGlobalFlags(args)\n\n\tfor _, cmd := range All() {\n\t\tif cmd.Name() == args.Command && cmd.Runnable() {\n\t\t\tif !cmd.GitExtension {\n\t\t\t\tcmd.Flag.Usage = func() {\n\t\t\t\t\tcmd.PrintUsage()\n\t\t\t\t}\n\t\t\t\tif err := cmd.Flag.Parse(args.Params); err != nil {\n\t\t\t\t\tif err == flag.ErrHelp {\n\t\t\t\t\t\treturn newExecError(nil)\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn newExecError(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\targs.Params = cmd.Flag.Args()\n\t\t\t}\n\n\t\t\tcmd.Run(cmd, args)\n\n\t\t\tcmds := args.Commands()\n\t\t\tvar err error\n\t\t\tif args.Noop {\n\t\t\t\tprintCommands(cmds)\n\t\t\t} else {\n\t\t\t\terr = executeCommands(cmds)\n\t\t\t}\n\n\t\t\treturn newExecError(err)\n\t\t}\n\t}\n\n\terr := git.Spawn(args.Command, args.Params...)\n\treturn newExecError(err)\n}\n\nfunc slurpGlobalFlags(args *Args) {\n\tfor i, p := range args.Params {\n\t\tif p == \"--no-op\" {\n\t\t\targs.Noop = true\n\t\t\targs.RemoveParam(i)\n\t\t}\n\t}\n}\n\nfunc printCommands(cmds []*cmd.Cmd) {\n\tfor _, c := range cmds {\n\t\tfmt.Println(c)\n\t}\n}\n\nfunc executeCommands(cmds []*cmd.Cmd) error {\n\tfor _, c := range cmds {\n\t\terr := c.Exec()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc expandAlias(args *Args) {\n\tcmd := args.Command\n\texpandedCmd, err := git.Config(fmt.Sprintf(\"alias.%s\", cmd))\n\tif err == nil && expandedCmd != \"\" {\n\t\twords, err := shellquote.Split(expandedCmd)\n\t\tif err != nil {\n\t\t\targs.Command = words[0]\n\t\t\targs.PrependParams(words[1:]...)\n\t\t}\n\t}\n}\n<commit_msg>Use noop flag instead of no-op<commit_after>package commands\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/jingweno\/gh\/cmd\"\n\t\"github.com\/jingweno\/gh\/git\"\n\tshellquote \"github.com\/kballard\/go-shellquote\"\n\t\"os\/exec\"\n\t\"syscall\"\n)\n\ntype ExecError struct {\n\tErr error\n\tExitCode int\n}\n\nfunc (execError *ExecError) Error() string {\n\treturn execError.Err.Error()\n}\n\nfunc newExecError(err error) ExecError {\n\texitCode := 0\n\tif err != nil {\n\t\texitCode = 1\n\t\tif exitError, ok := err.(*exec.ExitError); ok {\n\t\t\tif status, ok := exitError.Sys().(syscall.WaitStatus); ok {\n\t\t\t\texitCode = status.ExitStatus()\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ExecError{Err: err, ExitCode: exitCode}\n}\n\ntype Runner struct {\n\tArgs []string\n}\n\nfunc (r *Runner) Execute() ExecError {\n\targs := NewArgs(r.Args)\n\tif args.Command == \"\" {\n\t\tusage()\n\t}\n\n\texpandAlias(args)\n\tslurpGlobalFlags(args)\n\n\tfor _, cmd := range All() {\n\t\tif cmd.Name() == args.Command && cmd.Runnable() {\n\t\t\tif !cmd.GitExtension {\n\t\t\t\tcmd.Flag.Usage = func() {\n\t\t\t\t\tcmd.PrintUsage()\n\t\t\t\t}\n\t\t\t\tif err := cmd.Flag.Parse(args.Params); err != nil {\n\t\t\t\t\tif err == flag.ErrHelp {\n\t\t\t\t\t\treturn newExecError(nil)\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn newExecError(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\targs.Params = cmd.Flag.Args()\n\t\t\t}\n\n\t\t\tcmd.Run(cmd, args)\n\n\t\t\tcmds := args.Commands()\n\t\t\tvar err error\n\t\t\tif args.Noop {\n\t\t\t\tprintCommands(cmds)\n\t\t\t} else {\n\t\t\t\terr = executeCommands(cmds)\n\t\t\t}\n\n\t\t\treturn newExecError(err)\n\t\t}\n\t}\n\n\terr := git.Spawn(args.Command, args.Params...)\n\treturn newExecError(err)\n}\n\nfunc slurpGlobalFlags(args *Args) {\n\tfor i, p := range args.Params {\n\t\tif p == \"--noop\" {\n\t\t\targs.Noop = true\n\t\t\targs.RemoveParam(i)\n\t\t}\n\t}\n}\n\nfunc printCommands(cmds []*cmd.Cmd) {\n\tfor _, c := range cmds {\n\t\tfmt.Println(c)\n\t}\n}\n\nfunc executeCommands(cmds []*cmd.Cmd) error {\n\tfor _, c := range cmds {\n\t\terr := c.Exec()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc expandAlias(args *Args) {\n\tcmd := args.Command\n\texpandedCmd, err := git.Config(fmt.Sprintf(\"alias.%s\", cmd))\n\tif err == nil && expandedCmd != \"\" {\n\t\twords, err := shellquote.Split(expandedCmd)\n\t\tif err != nil {\n\t\t\targs.Command = words[0]\n\t\t\targs.PrependParams(words[1:]...)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport \"bufio\"\nimport \"path\/filepath\"\nimport \"fmt\"\nimport \"os\"\nimport \"os\/exec\"\nimport \"strings\"\n\nimport \"..\/interpreter\"\n\nfunc cmd_source(cmd *exec.Cmd) (interpreter.NextT, error) {\n\tif len(cmd.Args) < 2 {\n\t\treturn interpreter.CONTINUE, nil\n\t}\n\tenvTxtPath := filepath.Join(\n\t\tos.TempDir(),\n\t\tfmt.Sprintf(\"nyagos-%d.tmp\", os.Getpid()))\n\n\targs := make([]string, 2)\n\targs[0] = os.Getenv(\"COMSPEC\")\n\targs[1] = \"\/C\"\n\tfor _, v := range cmd.Args[1:] {\n\t\targs = append(args, strings.Replace(v, \" \", \"^ \", -1))\n\t}\n\targs = append(args, \"&\")\n\targs = append(args, \"set\")\n\targs = append(args, \">\")\n\targs = append(args, envTxtPath)\n\n\tvar cmd2 exec.Cmd\n\tcmd2.Path = args[0]\n\tcmd2.Args = args\n\tcmd2.Env = nil\n\tcmd2.Dir = \"\"\n\tif err := cmd2.Run(); err != nil {\n\t\treturn interpreter.CONTINUE, err\n\t}\n\tfp, err := os.Open(envTxtPath)\n\tif err != nil {\n\t\treturn interpreter.CONTINUE, err\n\t}\n\tdefer os.Remove(envTxtPath)\n\tdefer fp.Close()\n\n\tscr := bufio.NewScanner(fp)\n\tfor scr.Scan() {\n\t\tline := scr.Text()\n\t\teqlPos := strings.Index(line, \"=\")\n\t\tif eqlPos > 0 {\n\t\t\tos.Setenv(line[:eqlPos], line[eqlPos+1:])\n\t\t}\n\t}\n\treturn interpreter.CONTINUE, nil\n}\n<commit_msg>Fix #13: sourceのコマンド名の ( ) を ^( ^) へ置換<commit_after>package commands\n\nimport \"bufio\"\nimport \"path\/filepath\"\nimport \"fmt\"\nimport \"os\"\nimport \"os\/exec\"\nimport \"strings\"\n\nimport \"..\/interpreter\"\n\nfunc cmd_source(cmd *exec.Cmd) (interpreter.NextT, error) {\n\tif len(cmd.Args) < 2 {\n\t\treturn interpreter.CONTINUE, nil\n\t}\n\tenvTxtPath := filepath.Join(\n\t\tos.TempDir(),\n\t\tfmt.Sprintf(\"nyagos-%d.tmp\", os.Getpid()))\n\n\targs := make([]string, 2)\n\targs[0] = os.Getenv(\"COMSPEC\")\n\targs[1] = \"\/C\"\n\tfor _, v := range cmd.Args[1:] {\n\t\targs = append(args,\n\t\t\tstrings.Replace(\n\t\t\t\tstrings.Replace(\n\t\t\t\t\tstrings.Replace(v, \" \", \"^ \", -1), \"(\", \"^(\", -1),\n\t\t\t\t\")\", \"^)\", -1))\n\t}\n\targs = append(args, \"&\")\n\targs = append(args, \"set\")\n\targs = append(args, \">\")\n\targs = append(args, envTxtPath)\n\n\tvar cmd2 exec.Cmd\n\tcmd2.Path = args[0]\n\tcmd2.Args = args\n\tcmd2.Env = nil\n\tcmd2.Dir = \"\"\n\tif err := cmd2.Run(); err != nil {\n\t\treturn interpreter.CONTINUE, err\n\t}\n\tfp, err := os.Open(envTxtPath)\n\tif err != nil {\n\t\treturn interpreter.CONTINUE, err\n\t}\n\tdefer os.Remove(envTxtPath)\n\tdefer fp.Close()\n\n\tscr := bufio.NewScanner(fp)\n\tfor scr.Scan() {\n\t\tline := scr.Text()\n\t\teqlPos := strings.Index(line, \"=\")\n\t\tif eqlPos > 0 {\n\t\t\tos.Setenv(line[:eqlPos], line[eqlPos+1:])\n\t\t}\n\t}\n\treturn interpreter.CONTINUE, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ @author Couchbase <info@couchbase.com>\n\/\/ @copyright 2014 Couchbase, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage common\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/couchbase\/gometa\/log\"\n\t\"net\"\n\t\"sync\"\n)\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Type Declaration\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/\n\/\/ PeerPipe is to maintain the messaging channel\n\/\/ between two peers. This is not used for leader\n\/\/ election.\n\/\/\ntype PeerPipe struct {\n\tconn net.Conn\n\tsendch chan Packet\n\treceivech chan Packet\n\tmutex sync.Mutex\n\tisClosed bool\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Public Function\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/\n\/\/ Create a new PeerPipe. The consumer can call\n\/\/ ReceiveChannel() to get the channel for receving\n\/\/ message packets.\n\/\/\nfunc NewPeerPipe(pconn net.Conn) *PeerPipe {\n\n\tpipe := &PeerPipe{conn: pconn,\n\t\tsendch: make(chan Packet, MAX_PROPOSALS*2),\n\t\treceivech: make(chan Packet, MAX_PROPOSALS*2),\n\t\tisClosed: false}\n\n\tgo pipe.doSend()\n\tgo pipe.doReceive()\n\treturn pipe\n}\n\n\/\/\n\/\/ Get the net address of the remote peer.\n\/\/\nfunc (p *PeerPipe) GetAddr() string {\n\treturn p.conn.RemoteAddr().String()\n}\n\n\/\/\n\/\/ Return the receive channel.\n\/\/\nfunc (p *PeerPipe) ReceiveChannel() <-chan Packet {\n\n\t\/\/ Just return receivech even if it is closed. The caller\n\t\/\/ can tell if the channel is closed by using multi-value\n\t\/\/ recieve operator. Returning a nil channel can cause\n\t\/\/ the caller being block forever.\n\t\/\/\n\treturn (<-chan Packet)(p.receivech)\n}\n\n\/\/\n\/\/ Close the PeerPipe. It is safe to call this\n\/\/ method multiple times without causing panic.\n\/\/\nfunc (p *PeerPipe) Close() bool {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tif !p.isClosed {\n\n\t\tlog.Current.Debugf(\"PeerPipe.Close(): Remote Address %s\", p.GetAddr())\n\t\tlog.Current.Tracef(\"%s\", \"PeerPipe.Close() : Diagnostic Stack ...\")\n\t\tlog.Current.Tracef(log.Current.StackTrace())\n\n\t\tp.isClosed = true\n\n\t\tSafeRun(\"PeerPipe.Close()\",\n\t\t\tfunc() {\n\t\t\t\tp.conn.Close()\n\t\t\t})\n\t\tSafeRun(\"PeerPipe.Close()\",\n\t\t\tfunc() {\n\t\t\t\tclose(p.sendch)\n\t\t\t})\n\t\tSafeRun(\"PeerPipe.Close()\",\n\t\t\tfunc() {\n\t\t\t\tclose(p.receivech)\n\t\t\t})\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/\n\/\/ Send a packet to the peer. This method will return\n\/\/ false if the pipe is already closed.\n\/\/\nfunc (p *PeerPipe) Send(packet Packet) bool {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tif !p.isClosed {\n\t\tp.sendch <- packet\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Private Function\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/\n\/\/ Goroutine. Go through the send channel and\n\/\/ send out each packet to the peer as bytes.\n\/\/\nfunc (p *PeerPipe) doSend() {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Current.Errorf(\"caught panic in PeerPipe.doSend() : %s. Terminate connection upon panic.\\n\", r)\n\t\t\tlog.Current.Errorf(\"%s\", log.Current.StackTrace())\n\t\t}\n\n\t\t\/\/ This will close the Send and Receive channel\n\t\tp.Close()\n\t}()\n\n\tfor {\n\t\tpacket, ok := <-p.sendch\n\t\tif !ok {\n\t\t\t\/\/ channel close. Terminate the loop.\n\t\t\tlog.Current.Debugf(\"%s\", \"PeerPipe.doSend() : Send channel closed. Terminate.\")\n\t\t\treturn\n\t\t}\n\n\t\tlog.Current.Tracef(\"PeerPipe.doSend() : Prepare to send message %s to Peer %s\", packet.Name(), p.GetAddr())\n\n\t\tmsg, err := Marshall(packet)\n\t\tif err != nil {\n\t\t\tlog.Current.Errorf(\"PeerPipe.doSend() : Fail to marshall message %s to Peer %s. Terminate.\", packet.Name(), p.GetAddr())\n\t\t\treturn\n\t\t}\n\t\tsize := len(msg)\n\n\t\t\/\/ write the packet\n\t\tlog.Current.Debugf(\"PeerPipe.doSend() : Sending message %s (len %d) to Peer %s\", packet.Name(), size, p.GetAddr())\n\t\tlog.Current.LazyDebug(packet.String)\n\n\t\tn, err := p.conn.Write(msg)\n\t\tif n < size || err != nil {\n\t\t\t\/\/ Network error. Close the loop. The pipe will\n\t\t\t\/\/ close and cause subsequent Send() to fail.\n\t\t\tlog.Current.Errorf(\"PeerPipe.doSend() : ecounter error when sending mesasage to Peer %s. Error = %s. Terminate.\",\n\t\t\t\tp.GetAddr(), err.Error())\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/\n\/\/ Goroutine. Listen to the connection and\n\/\/ unmarshall each packet. Forward the packet to\n\/\/ receive channel.\nfunc (p *PeerPipe) doReceive() {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Current.Errorf(\"panic in PeerPipe.doReceive() : %s\\n\", r)\n\t\t\tlog.Current.Errorf(\"%s\", log.Current.StackTrace())\n\t\t}\n\n\t\t\/\/ This will close the Send and Receive channel\n\t\tp.Close()\n\t}()\n\n\tfor {\n\t\t\/\/ read packet len\n\t\tlenBuf, err := p.readBytes(8, nil)\n\t\tif err != nil {\n\t\t\t\/\/ if encountering an error, kill the pipe.\n\t\t\tlog.Current.Errorf(\"PeerPipe.doRecieve() : ecounter error when received mesasage from Peer %s. Error = %s. Kill Pipe.\",\n\t\t\t\tp.GetAddr(), err.Error())\n\t\t\treturn\n\t\t}\n\t\tsize := binary.BigEndian.Uint64(lenBuf)\n\n\t\t\/\/ read the content\n\t\treadahead, err := p.validateHeader(size)\n\t\tif err != nil {\n\t\t\tlog.Current.Errorf(\"PeerPipe.doRecieve() : ecounter error when received mesasage from Peer %s. Error = %s. Kill Pipe.\",\n\t\t\t\tp.GetAddr(), err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tbuf, err := p.readBytes(size, readahead)\n\t\tif err != nil {\n\t\t\t\/\/ if encountering an error, kill the pipe.\n\t\t\tlog.Current.Errorf(\"PeerPipe.doRecieve() : ecounter error when received mesasage from Peer %s. Error = %s. Kill Pipe.\",\n\t\t\t\tp.GetAddr(), err.Error())\n\t\t\treturn\n\t\t}\n\t\t\/\/ unmarshall the content and put it in the channel\n\t\tpacket, err := UnMarshall(buf)\n\t\tif err != nil {\n\t\t\tlog.Current.Errorf(\"PeerPipe.doRecieve() : ecounter error when unmarshalling mesasage from Peer %s. Error = %s. Terminate.\",\n\t\t\t\tp.GetAddr(), err.Error())\n\t\t\treturn\n\t\t}\n\t\tlog.Current.Debugf(\"PeerPipe.doRecieve() : Message decoded. Packet = %s\", packet.Name())\n\t\tlog.Current.LazyDebug(packet.String)\n\n\t\t\/\/ This can block if the reciever of the channel is slow or terminated premauturely (which cause channel to fill up).\n\t\t\/\/ In this case, this can cause the TCP connection to fail. The other end of the pipe will close as a result\n\t\t\/\/ of this. This end of the pipe will eventually close since it can no longer send message to the other end.\n\t\tp.queue(packet)\n\t}\n}\n\n\/\/\n\/\/ Queue the packe to the recieve channel if\n\/\/ the channel has not been closed.\n\/\/\nfunc (p *PeerPipe) queue(packet Packet) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tif !p.isClosed {\n\t\tp.receivech <- packet\n\t}\n}\n\nfunc (p *PeerPipe) readBytes(size uint64, readahead []byte) ([]byte, error) {\n\n\tresult := new(bytes.Buffer)\n\tremaining := size\n\n\tif readahead != nil {\n\t\tresult.Write(readahead)\n\t\tremaining -= uint64(len(readahead))\n\t}\n\n\tfor {\n\t\t\/\/ read the size of the packet (uint64)\n\t\tbuf := make([]byte, remaining)\n\t\tn, err := p.conn.Read(buf)\n\t\tlog.Current.Tracef(\"PeerPipe.readBytes() : Receiving message from Peer %s, bytes read %d\", p.GetAddr(), n)\n\n\t\tif n != 0 {\n\t\t\tresult.Write(buf[0:n])\n\t\t\tremaining = remaining - uint64(n)\n\n\t\t\tif remaining == 0 {\n\t\t\t\treturn result.Bytes(), nil\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn result.Bytes(), nil\n}\n\nfunc (p *PeerPipe) validateHeader(size uint64) ([]byte, error) {\n\n\t\/\/ validate the size of packet\n\tif size > 20*1024*1024 {\n\t\treturn nil, errors.New(fmt.Sprintf(\"Validate packet header: Invalid size %v\", size))\n\t}\n\n\tresult := new(bytes.Buffer)\n\n\t\/\/ read the type length\n\tbuf, err := p.readBytes(8, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult.Write(buf)\n\n\t\/\/ validate the length of type string\n\ttyLen := binary.BigEndian.Uint64(buf)\n\tif tyLen > 2048 {\n\t\treturn nil, errors.New(fmt.Sprintf(\"Validate packet header: Invalid type length %v\", tyLen))\n\t}\n\n\t\/\/ read the type string\n\tbuf, err = p.readBytes(tyLen, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult.Write(buf)\n\n\t\/\/ validate type string\n\tif !IsValidType(string(buf)) {\n\t\treturn nil, errors.New(fmt.Sprint(\"Validate packet header: Unrecognize packet type\"))\n\t}\n\n\treturn result.Bytes(), nil\n}\n<commit_msg>MB-30774: Do not hold lock while putting packets onto channel<commit_after>\/\/ @author Couchbase <info@couchbase.com>\n\/\/ @copyright 2014 Couchbase, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage common\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/couchbase\/gometa\/log\"\n\t\"net\"\n\t\"sync\"\n)\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Type Declaration\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/\n\/\/ PeerPipe is to maintain the messaging channel\n\/\/ between two peers. This is not used for leader\n\/\/ election.\n\/\/\ntype PeerPipe struct {\n\tconn net.Conn\n\tsendch chan Packet\n\treceivech chan Packet\n\tmutex sync.Mutex\n\tisClosed bool\n\tclosech chan bool\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Public Function\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/\n\/\/ Create a new PeerPipe. The consumer can call\n\/\/ ReceiveChannel() to get the channel for receving\n\/\/ message packets.\n\/\/\nfunc NewPeerPipe(pconn net.Conn) *PeerPipe {\n\n\tpipe := &PeerPipe{conn: pconn,\n\t\tsendch: make(chan Packet, MAX_PROPOSALS*5),\n\t\treceivech: make(chan Packet, MAX_PROPOSALS*5),\n\t\tisClosed: false,\n\t\tclosech: make(chan bool)}\n\n\tgo pipe.doSend()\n\tgo pipe.doReceive()\n\treturn pipe\n}\n\n\/\/\n\/\/ Get the net address of the remote peer.\n\/\/\nfunc (p *PeerPipe) GetAddr() string {\n\treturn p.conn.RemoteAddr().String()\n}\n\n\/\/\n\/\/ Return the receive channel.\n\/\/\nfunc (p *PeerPipe) ReceiveChannel() <-chan Packet {\n\n\t\/\/ Just return receivech even if it is closed. The caller\n\t\/\/ can tell if the channel is closed by using multi-value\n\t\/\/ recieve operator. Returning a nil channel can cause\n\t\/\/ the caller being block forever.\n\t\/\/\n\treturn (<-chan Packet)(p.receivech)\n}\n\n\/\/\n\/\/ Close the PeerPipe. It is safe to call this\n\/\/ method multiple times without causing panic.\n\/\/\nfunc (p *PeerPipe) Close() bool {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tif !p.isClosed {\n\n\t\tlog.Current.Debugf(\"PeerPipe.Close(): Remote Address %s\", p.GetAddr())\n\t\tlog.Current.Tracef(\"%s\", \"PeerPipe.Close() : Diagnostic Stack ...\")\n\t\tlog.Current.Tracef(log.Current.StackTrace())\n\n\t\tp.isClosed = true\n\n\t\tSafeRun(\"PeerPipe.Close()\",\n\t\t\tfunc() {\n\t\t\t\tp.conn.Close()\n\t\t\t})\n\t\tSafeRun(\"PeerPipe.Close()\",\n\t\t\tfunc() {\n\t\t\t\tclose(p.sendch)\n\t\t\t})\n\t\tSafeRun(\"PeerPipe.Close()\",\n\t\t\tfunc() {\n\t\t\t\tclose(p.receivech)\n\t\t\t})\n\t\tSafeRun(\"PeerPipe.Close()\",\n\t\t\tfunc() {\n\t\t\t\tclose(p.closech)\n\t\t\t})\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/\n\/\/ Send a packet to the peer. This method will return\n\/\/ false if the pipe is already closed.\n\/\/\nfunc (p *PeerPipe) Send(packet Packet) bool {\n\tdefer func() {\n\t\trecover()\n\t}()\n\n\tselect {\n\tcase p.sendch <- packet:\n\t\treturn true\n\tcase <-p.closech:\n\t\treturn false\n\t}\n\n\treturn false\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Private Function\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/\n\/\/ Goroutine. Go through the send channel and\n\/\/ send out each packet to the peer as bytes.\n\/\/\nfunc (p *PeerPipe) doSend() {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Current.Errorf(\"caught panic in PeerPipe.doSend() : %s. Terminate connection upon panic.\\n\", r)\n\t\t\tlog.Current.Errorf(\"%s\", log.Current.StackTrace())\n\t\t}\n\n\t\t\/\/ This will close the Send and Receive channel\n\t\tp.Close()\n\t}()\n\n\tfor {\n\t\tpacket, ok := <-p.sendch\n\t\tif !ok {\n\t\t\t\/\/ channel close. Terminate the loop.\n\t\t\tlog.Current.Debugf(\"%s\", \"PeerPipe.doSend() : Send channel closed. Terminate.\")\n\t\t\treturn\n\t\t}\n\n\t\tlog.Current.Tracef(\"PeerPipe.doSend() : Prepare to send message %s to Peer %s\", packet.Name(), p.GetAddr())\n\n\t\tmsg, err := Marshall(packet)\n\t\tif err != nil {\n\t\t\tlog.Current.Errorf(\"PeerPipe.doSend() : Fail to marshall message %s to Peer %s. Terminate.\", packet.Name(), p.GetAddr())\n\t\t\treturn\n\t\t}\n\t\tsize := len(msg)\n\n\t\t\/\/ write the packet\n\t\tlog.Current.Debugf(\"PeerPipe.doSend() : Sending message %s (len %d) to Peer %s\", packet.Name(), size, p.GetAddr())\n\t\tlog.Current.LazyDebug(packet.String)\n\n\t\tn, err := p.conn.Write(msg)\n\t\tif n < size || err != nil {\n\t\t\t\/\/ Network error. Close the loop. The pipe will\n\t\t\t\/\/ close and cause subsequent Send() to fail.\n\t\t\tlog.Current.Errorf(\"PeerPipe.doSend() : ecounter error when sending mesasage to Peer %s. Error = %s. Terminate.\",\n\t\t\t\tp.GetAddr(), err.Error())\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/\n\/\/ Goroutine. Listen to the connection and\n\/\/ unmarshall each packet. Forward the packet to\n\/\/ receive channel.\nfunc (p *PeerPipe) doReceive() {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Current.Errorf(\"panic in PeerPipe.doReceive() : %s\\n\", r)\n\t\t\tlog.Current.Errorf(\"%s\", log.Current.StackTrace())\n\t\t}\n\n\t\t\/\/ This will close the Send and Receive channel\n\t\tp.Close()\n\t}()\n\n\tfor {\n\t\t\/\/ read packet len\n\t\tlenBuf, err := p.readBytes(8, nil)\n\t\tif err != nil {\n\t\t\t\/\/ if encountering an error, kill the pipe.\n\t\t\tlog.Current.Errorf(\"PeerPipe.doRecieve() : ecounter error when received mesasage from Peer %s. Error = %s. Kill Pipe.\",\n\t\t\t\tp.GetAddr(), err.Error())\n\t\t\treturn\n\t\t}\n\t\tsize := binary.BigEndian.Uint64(lenBuf)\n\n\t\t\/\/ read the content\n\t\treadahead, err := p.validateHeader(size)\n\t\tif err != nil {\n\t\t\tlog.Current.Errorf(\"PeerPipe.doRecieve() : ecounter error when received mesasage from Peer %s. Error = %s. Kill Pipe.\",\n\t\t\t\tp.GetAddr(), err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tbuf, err := p.readBytes(size, readahead)\n\t\tif err != nil {\n\t\t\t\/\/ if encountering an error, kill the pipe.\n\t\t\tlog.Current.Errorf(\"PeerPipe.doRecieve() : ecounter error when received mesasage from Peer %s. Error = %s. Kill Pipe.\",\n\t\t\t\tp.GetAddr(), err.Error())\n\t\t\treturn\n\t\t}\n\t\t\/\/ unmarshall the content and put it in the channel\n\t\tpacket, err := UnMarshall(buf)\n\t\tif err != nil {\n\t\t\tlog.Current.Errorf(\"PeerPipe.doRecieve() : ecounter error when unmarshalling mesasage from Peer %s. Error = %s. Terminate.\",\n\t\t\t\tp.GetAddr(), err.Error())\n\t\t\treturn\n\t\t}\n\t\tlog.Current.Debugf(\"PeerPipe.doRecieve() : Message decoded. Packet = %s\", packet.Name())\n\t\tlog.Current.LazyDebug(packet.String)\n\n\t\t\/\/ This can block if the reciever of the channel is slow or terminated premauturely (which cause channel to fill up).\n\t\t\/\/ In this case, this can cause the TCP connection to fail. The other end of the pipe will close as a result\n\t\t\/\/ of this. This end of the pipe will eventually close since it can no longer send message to the other end.\n\t\tp.queue(packet)\n\t}\n}\n\n\/\/\n\/\/ Queue the packe to the recieve channel if\n\/\/ the channel has not been closed.\n\/\/\nfunc (p *PeerPipe) queue(packet Packet) {\n\tdefer func() {\n\t\trecover()\n\t}()\n\n\tselect {\n\tcase p.receivech <- packet:\n\t\treturn\n\tcase <-p.closech:\n\t\treturn\n\t}\n}\n\nfunc (p *PeerPipe) readBytes(size uint64, readahead []byte) ([]byte, error) {\n\n\tresult := new(bytes.Buffer)\n\tremaining := size\n\n\tif readahead != nil {\n\t\tresult.Write(readahead)\n\t\tremaining -= uint64(len(readahead))\n\t}\n\n\tfor {\n\t\t\/\/ read the size of the packet (uint64)\n\t\tbuf := make([]byte, remaining)\n\t\tn, err := p.conn.Read(buf)\n\t\tlog.Current.Tracef(\"PeerPipe.readBytes() : Receiving message from Peer %s, bytes read %d\", p.GetAddr(), n)\n\n\t\tif n != 0 {\n\t\t\tresult.Write(buf[0:n])\n\t\t\tremaining = remaining - uint64(n)\n\n\t\t\tif remaining == 0 {\n\t\t\t\treturn result.Bytes(), nil\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn result.Bytes(), nil\n}\n\nfunc (p *PeerPipe) validateHeader(size uint64) ([]byte, error) {\n\n\t\/\/ validate the size of packet\n\tif size > 20*1024*1024 {\n\t\treturn nil, errors.New(fmt.Sprintf(\"Validate packet header: Invalid size %v\", size))\n\t}\n\n\tresult := new(bytes.Buffer)\n\n\t\/\/ read the type length\n\tbuf, err := p.readBytes(8, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult.Write(buf)\n\n\t\/\/ validate the length of type string\n\ttyLen := binary.BigEndian.Uint64(buf)\n\tif tyLen > 2048 {\n\t\treturn nil, errors.New(fmt.Sprintf(\"Validate packet header: Invalid type length %v\", tyLen))\n\t}\n\n\t\/\/ read the type string\n\tbuf, err = p.readBytes(tyLen, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult.Write(buf)\n\n\t\/\/ validate type string\n\tif !IsValidType(string(buf)) {\n\t\treturn nil, errors.New(fmt.Sprint(\"Validate packet header: Unrecognize packet type\"))\n\t}\n\n\treturn result.Bytes(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package compare\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com\/cloudfoundry-incubator\/spiff\/yaml\"\n)\n\ntype Diff struct {\n\tA yaml.Node\n\tB yaml.Node\n\n\tPath []string\n}\n\nfunc Compare(a, b yaml.Node) []Diff {\n\treturn compare(a, b, []string{})\n}\n\nfunc compare(a, b yaml.Node, path []string) []Diff {\n\tmismatch := Diff{A: a, B: b, Path: path}\n\n\tswitch a.(type) {\n\tcase map[string]yaml.Node:\n\t\tswitch b.(type) {\n\t\tcase map[string]yaml.Node:\n\t\t\treturn compareMap(a.(map[string]yaml.Node), b.(map[string]yaml.Node), path)\n\n\t\tcase []yaml.Node:\n\t\t\ttoMap := listToMap(b.([]yaml.Node))\n\n\t\t\tif toMap != nil {\n\t\t\t\treturn compareMap(a.(map[string]yaml.Node), toMap, path)\n\t\t\t} else {\n\t\t\t\treturn []Diff{mismatch}\n\t\t\t}\n\n\t\tdefault:\n\t\t\treturn []Diff{mismatch}\n\t\t}\n\n\tcase []yaml.Node:\n\t\tswitch b.(type) {\n\t\tcase []yaml.Node:\n\t\t\treturn compareList(a.([]yaml.Node), b.([]yaml.Node), path)\n\t\tdefault:\n\t\t\treturn []Diff{mismatch}\n\t\t}\n\n\tdefault:\n\t\tatype := reflect.TypeOf(a)\n\t\tbtype := reflect.TypeOf(b)\n\n\t\tif atype != btype {\n\t\t\treturn []Diff{mismatch}\n\t\t}\n\n\t\tif a != b {\n\t\t\treturn []Diff{Diff{A: a, B: b, Path: path}}\n\t\t}\n\t}\n\n\treturn []Diff{}\n}\n\nfunc listToMap(list []yaml.Node) map[string]yaml.Node {\n\ttoMap := make(map[string]yaml.Node)\n\n\tfor _, val := range list {\n\t\tname, ok := yaml.FindString(val, \"name\")\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\n\t\tasMap, ok := val.(map[string]yaml.Node)\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\n\t\tnewMap := make(map[string]yaml.Node)\n\t\tfor key, val := range asMap {\n\t\t\tif key != \"name\" {\n\t\t\t\tnewMap[key] = val\n\t\t\t}\n\t\t}\n\n\t\ttoMap[name] = newMap\n\t}\n\n\treturn toMap\n}\n\nfunc compareMap(a, b map[string]yaml.Node, path []string) []Diff {\n\tdiff := []Diff{}\n\n\tfor key, aval := range a {\n\t\tbval, present := b[key]\n\t\tif present {\n\t\t\tdiff = append(diff, compare(aval, bval, addPath(path, key))...)\n\t\t} else {\n\t\t\tdiff = append(diff, Diff{A: aval, B: nil, Path: addPath(path, key)})\n\t\t}\n\t}\n\n\tfor key, bval := range b {\n\t\t_, present := a[key]\n\t\tif !present {\n\t\t\tdiff = append(diff, Diff{A: nil, B: bval, Path: addPath(path, key)})\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn diff\n}\n\nfunc compareList(a, b []yaml.Node, path []string) []Diff {\n\tdiff := []Diff{}\n\n\tif len(path) == 1 && path[0] == \"jobs\" {\n\t\treturn compareJobs(a, b, path)\n\t}\n\n\tfor index, aval := range a {\n\t\tkey, bval, found := findByNameOrIndex(aval, b, index)\n\n\t\tif !found {\n\t\t\tdiff = append(diff, Diff{A: aval, B: nil, Path: addPath(path, key)})\n\t\t\tcontinue\n\t\t}\n\n\t\tdiff = append(diff, compare(aval, bval, addPath(path, key))...)\n\t}\n\n\tfor index, bval := range b {\n\t\tkey := fmt.Sprintf(\"[%d]\", index)\n\n\t\tif len(a) <= index {\n\t\t\tdiff = append(diff, Diff{A: nil, B: bval, Path: addPath(path, key)})\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn diff\n}\n\nfunc compareJobs(ajobs, bjobs []yaml.Node, path []string) []Diff {\n\treturn compareMap(jobMap(ajobs), jobMap(bjobs), path)\n}\n\nfunc jobMap(jobs []yaml.Node) map[string]yaml.Node {\n\tbyName := make(map[string]yaml.Node)\n\n\tfor index, job := range jobs {\n\t\tattrs, ok := job.(map[string]yaml.Node)\n\t\tattrs[\"index\"] = index\n\n\t\tname, ok := yaml.FindString(job, \"name\")\n\t\tif !ok {\n\t\t\tpanic(\"job without string name\")\n\t\t}\n\n\t\tbyName[name] = attrs\n\t}\n\n\treturn byName\n}\n\nfunc findByNameOrIndex(node yaml.Node, others []yaml.Node, index int) (string, yaml.Node, bool) {\n\tname, ok := yaml.FindString(node, \"name\")\n\tif !ok {\n\t\treturn findByIndex(others, index)\n\t}\n\n\tkey, node, found := findByName(name, others)\n\tif !found {\n\t\treturn findByIndex(others, index)\n\t}\n\n\treturn key, node, true\n}\n\nfunc findByName(name string, nodes []yaml.Node) (string, yaml.Node, bool) {\n\tfor _, node := range nodes {\n\t\totherName, ok := yaml.FindString(node, \"name\")\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tif otherName == name {\n\t\t\treturn name, node, true\n\t\t}\n\t}\n\n\treturn \"\", nil, false\n}\n\nfunc findByIndex(nodes []yaml.Node, index int) (string, yaml.Node, bool) {\n\tkey := fmt.Sprintf(\"[%d]\", index)\n\n\tif len(nodes) <= index {\n\t\treturn key, nil, false\n\t}\n\n\treturn key, nodes[index], true\n}\n\n\/\/ cannot use straight append for this, as it will overwrite\n\/\/ previous steps, since it reuses the slice\n\/\/\n\/\/ e.g. with inital path A:\n\/\/ append(A, \"a\")\n\/\/ append(A, \"b\")\n\/\/\n\/\/ will result in all previous A\/a paths becoming A\/b\nfunc addPath(path []string, steps ...string) []string {\n\tnewPath := make([]string, len(path))\n\tcopy(newPath, path)\n\treturn append(newPath, steps...)\n}\n<commit_msg>use assignment + type match<commit_after>package compare\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com\/cloudfoundry-incubator\/spiff\/yaml\"\n)\n\ntype Diff struct {\n\tA yaml.Node\n\tB yaml.Node\n\n\tPath []string\n}\n\nfunc Compare(a, b yaml.Node) []Diff {\n\treturn compare(a, b, []string{})\n}\n\nfunc compare(a, b yaml.Node, path []string) []Diff {\n\tmismatch := Diff{A: a, B: b, Path: path}\n\n\tswitch av := a.(type) {\n\tcase map[string]yaml.Node:\n\t\tswitch bv := b.(type) {\n\t\tcase map[string]yaml.Node:\n\t\t\treturn compareMap(av, bv, path)\n\n\t\tcase []yaml.Node:\n\t\t\ttoMap := listToMap(bv)\n\n\t\t\tif toMap != nil {\n\t\t\t\treturn compareMap(av, toMap, path)\n\t\t\t} else {\n\t\t\t\treturn []Diff{mismatch}\n\t\t\t}\n\n\t\tdefault:\n\t\t\treturn []Diff{mismatch}\n\t\t}\n\n\tcase []yaml.Node:\n\t\tswitch bv := b.(type) {\n\t\tcase []yaml.Node:\n\t\t\treturn compareList(av, bv, path)\n\t\tdefault:\n\t\t\treturn []Diff{mismatch}\n\t\t}\n\n\tdefault:\n\t\tatype := reflect.TypeOf(a)\n\t\tbtype := reflect.TypeOf(b)\n\n\t\tif atype != btype {\n\t\t\treturn []Diff{mismatch}\n\t\t}\n\n\t\tif a != b {\n\t\t\treturn []Diff{Diff{A: a, B: b, Path: path}}\n\t\t}\n\t}\n\n\treturn []Diff{}\n}\n\nfunc listToMap(list []yaml.Node) map[string]yaml.Node {\n\ttoMap := make(map[string]yaml.Node)\n\n\tfor _, val := range list {\n\t\tname, ok := yaml.FindString(val, \"name\")\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\n\t\tasMap, ok := val.(map[string]yaml.Node)\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\n\t\tnewMap := make(map[string]yaml.Node)\n\t\tfor key, val := range asMap {\n\t\t\tif key != \"name\" {\n\t\t\t\tnewMap[key] = val\n\t\t\t}\n\t\t}\n\n\t\ttoMap[name] = newMap\n\t}\n\n\treturn toMap\n}\n\nfunc compareMap(a, b map[string]yaml.Node, path []string) []Diff {\n\tdiff := []Diff{}\n\n\tfor key, aval := range a {\n\t\tbval, present := b[key]\n\t\tif present {\n\t\t\tdiff = append(diff, compare(aval, bval, addPath(path, key))...)\n\t\t} else {\n\t\t\tdiff = append(diff, Diff{A: aval, B: nil, Path: addPath(path, key)})\n\t\t}\n\t}\n\n\tfor key, bval := range b {\n\t\t_, present := a[key]\n\t\tif !present {\n\t\t\tdiff = append(diff, Diff{A: nil, B: bval, Path: addPath(path, key)})\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn diff\n}\n\nfunc compareList(a, b []yaml.Node, path []string) []Diff {\n\tdiff := []Diff{}\n\n\tif len(path) == 1 && path[0] == \"jobs\" {\n\t\treturn compareJobs(a, b, path)\n\t}\n\n\tfor index, aval := range a {\n\t\tkey, bval, found := findByNameOrIndex(aval, b, index)\n\n\t\tif !found {\n\t\t\tdiff = append(diff, Diff{A: aval, B: nil, Path: addPath(path, key)})\n\t\t\tcontinue\n\t\t}\n\n\t\tdiff = append(diff, compare(aval, bval, addPath(path, key))...)\n\t}\n\n\tfor index, bval := range b {\n\t\tkey := fmt.Sprintf(\"[%d]\", index)\n\n\t\tif len(a) <= index {\n\t\t\tdiff = append(diff, Diff{A: nil, B: bval, Path: addPath(path, key)})\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn diff\n}\n\nfunc compareJobs(ajobs, bjobs []yaml.Node, path []string) []Diff {\n\treturn compareMap(jobMap(ajobs), jobMap(bjobs), path)\n}\n\nfunc jobMap(jobs []yaml.Node) map[string]yaml.Node {\n\tbyName := make(map[string]yaml.Node)\n\n\tfor index, job := range jobs {\n\t\tattrs, ok := job.(map[string]yaml.Node)\n\t\tattrs[\"index\"] = index\n\n\t\tname, ok := yaml.FindString(job, \"name\")\n\t\tif !ok {\n\t\t\tpanic(\"job without string name\")\n\t\t}\n\n\t\tbyName[name] = attrs\n\t}\n\n\treturn byName\n}\n\nfunc findByNameOrIndex(node yaml.Node, others []yaml.Node, index int) (string, yaml.Node, bool) {\n\tname, ok := yaml.FindString(node, \"name\")\n\tif !ok {\n\t\treturn findByIndex(others, index)\n\t}\n\n\tkey, node, found := findByName(name, others)\n\tif !found {\n\t\treturn findByIndex(others, index)\n\t}\n\n\treturn key, node, true\n}\n\nfunc findByName(name string, nodes []yaml.Node) (string, yaml.Node, bool) {\n\tfor _, node := range nodes {\n\t\totherName, ok := yaml.FindString(node, \"name\")\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tif otherName == name {\n\t\t\treturn name, node, true\n\t\t}\n\t}\n\n\treturn \"\", nil, false\n}\n\nfunc findByIndex(nodes []yaml.Node, index int) (string, yaml.Node, bool) {\n\tkey := fmt.Sprintf(\"[%d]\", index)\n\n\tif len(nodes) <= index {\n\t\treturn key, nil, false\n\t}\n\n\treturn key, nodes[index], true\n}\n\n\/\/ cannot use straight append for this, as it will overwrite\n\/\/ previous steps, since it reuses the slice\n\/\/\n\/\/ e.g. with inital path A:\n\/\/ append(A, \"a\")\n\/\/ append(A, \"b\")\n\/\/\n\/\/ will result in all previous A\/a paths becoming A\/b\nfunc addPath(path []string, steps ...string) []string {\n\tnewPath := make([]string, len(path))\n\tcopy(newPath, path)\n\treturn append(newPath, steps...)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/coreos\/fleet\/machine\"\n\t\"github.com\/coreos\/fleet\/schema\"\n)\n\nconst (\n\tdefaultListUnitFilesFields = \"unit,hash,dstate,state,target\"\n)\n\nfunc mapTargetField(u schema.Unit, full bool) string {\n\tif suToGlobal(u) {\n\t\treturn \"global\"\n\t}\n\tif u.MachineID == \"\" {\n\t\treturn \"-\"\n\t}\n\tms := cachedMachineState(u.MachineID)\n\tif ms == nil {\n\t\tms = &machine.MachineState{ID: u.MachineID}\n\t}\n\n\treturn machineFullLegend(*ms, full)\n}\n\nvar (\n\tlistUnitFilesFieldsFlag string\n\tcmdListUnitFiles = &Command{\n\t\tName: \"list-unit-files\",\n\t\tSummary: \"List the units that exist in the cluster.\",\n\t\tUsage: \"[--fields]\",\n\t\tDescription: `Lists all unit files that exist in the cluster (whether or not they are loaded onto a machine).`,\n\t\tRun: runListUnitFiles,\n\t}\n\tlistUnitFilesFields = map[string]unitToField{\n\t\t\"unit\": func(u schema.Unit, full bool) string {\n\t\t\treturn u.Name\n\t\t},\n\t\t\"global\": func(u schema.Unit, full bool) string {\n\t\t\treturn strconv.FormatBool(suToGlobal(u))\n\t\t},\n\t\t\"dstate\": func(u schema.Unit, full bool) string {\n\t\t\tif u.DesiredState == \"\" {\n\t\t\t\treturn \"-\"\n\t\t\t}\n\t\t\treturn u.DesiredState\n\t\t},\n\t\t\"target\": mapTargetField,\n\t\t\"tmachine\": mapTargetField,\n\t\t\"state\": func(u schema.Unit, full bool) string {\n\t\t\tif suToGlobal(u) || u.CurrentState == \"\" {\n\t\t\t\treturn \"-\"\n\t\t\t}\n\t\t\treturn u.CurrentState\n\t\t},\n\t\t\"hash\": func(u schema.Unit, full bool) string {\n\t\t\tuf := schema.MapSchemaUnitOptionsToUnitFile(u.Options)\n\t\t\tif !full {\n\t\t\t\treturn uf.Hash().Short()\n\t\t\t}\n\t\t\treturn uf.Hash().String()\n\t\t},\n\t\t\"desc\": func(u schema.Unit, full bool) string {\n\t\t\tuf := schema.MapSchemaUnitOptionsToUnitFile(u.Options)\n\t\t\td := uf.Description()\n\t\t\tif d == \"\" {\n\t\t\t\treturn \"-\"\n\t\t\t}\n\t\t\treturn d\n\t\t},\n\t}\n)\n\ntype unitToField func(u schema.Unit, full bool) string\n\nfunc init() {\n\tcmdListUnitFiles.Flags.BoolVar(&sharedFlags.Full, \"full\", false, \"Do not ellipsize fields on output\")\n\tcmdListUnitFiles.Flags.BoolVar(&sharedFlags.NoLegend, \"no-legend\", false, \"Do not print a legend (column headers)\")\n\tcmdListUnitFiles.Flags.StringVar(&listUnitFilesFieldsFlag, \"fields\", defaultListUnitFilesFields, fmt.Sprintf(\"Columns to print for each Unit file. Valid fields are %q\", strings.Join(unitToFieldKeys(listUnitFilesFields), \",\")))\n}\n\nfunc runListUnitFiles(args []string) (exit int) {\n\tif listUnitFilesFieldsFlag == \"\" {\n\t\tstderr(\"Must define output format\")\n\t\treturn 1\n\t}\n\n\tcols := strings.Split(listUnitFilesFieldsFlag, \",\")\n\tfor _, s := range cols {\n\t\tif _, ok := listUnitFilesFields[s]; !ok {\n\t\t\tstderr(\"Invalid key in output format: %q\", s)\n\t\t\treturn 1\n\t\t}\n\t\tif s == \"tmachine\" {\n\t\t\tstderr(\"WARNING: The \\\"tmachine\\\" field is deprecated. Use \\\"target\\\" instead\")\n\t\t}\n\t}\n\n\tunits, err := cAPI.Units()\n\tif err != nil {\n\t\tstderr(\"Error retrieving list of units from repository: %v\", err)\n\t\treturn 1\n\t}\n\n\tif !sharedFlags.NoLegend {\n\t\tfmt.Fprintln(out, strings.ToUpper(strings.Join(cols, \"\\t\")))\n\t}\n\n\tfor _, u := range units {\n\t\tvar f []string\n\t\tfor _, c := range cols {\n\t\t\tf = append(f, listUnitFilesFields[c](*u, sharedFlags.Full))\n\t\t}\n\t\tfmt.Fprintln(out, strings.Join(f, \"\\t\"))\n\t}\n\n\tout.Flush()\n\treturn\n}\n\nfunc unitToFieldKeys(m map[string]unitToField) (keys []string) {\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn\n}\n<commit_msg>fleetctl: list-unit-files fully populated for global units<commit_after>\/\/ Copyright 2014 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/coreos\/fleet\/machine\"\n\t\"github.com\/coreos\/fleet\/schema\"\n)\n\nconst (\n\tdefaultListUnitFilesFields = \"unit,hash,dstate,state,target\"\n)\n\nfunc mapTargetField(u schema.Unit, full bool) string {\n\tif suToGlobal(u) {\n\t\treturn \"global\"\n\t}\n\tif u.MachineID == \"\" {\n\t\treturn \"-\"\n\t}\n\tms := cachedMachineState(u.MachineID)\n\tif ms == nil {\n\t\tms = &machine.MachineState{ID: u.MachineID}\n\t}\n\n\treturn machineFullLegend(*ms, full)\n}\n\nvar (\n\tlistUnitFilesFieldsFlag string\n\tcmdListUnitFiles = &Command{\n\t\tName: \"list-unit-files\",\n\t\tSummary: \"List the units that exist in the cluster.\",\n\t\tUsage: \"[--fields]\",\n\t\tDescription: `Lists all unit files that exist in the cluster (whether or not they are loaded onto a machine).`,\n\t\tRun: runListUnitFiles,\n\t}\n\tlistUnitFilesFields = map[string]unitToField{\n\t\t\"unit\": func(u schema.Unit, full bool) string {\n\t\t\treturn u.Name\n\t\t},\n\t\t\"global\": func(u schema.Unit, full bool) string {\n\t\t\treturn strconv.FormatBool(suToGlobal(u))\n\t\t},\n\t\t\"dstate\": func(u schema.Unit, full bool) string {\n\t\t\tif u.DesiredState == \"\" {\n\t\t\t\treturn \"-\"\n\t\t\t}\n\t\t\treturn u.DesiredState\n\t\t},\n\t\t\"target\": mapTargetField,\n\t\t\"tmachine\": mapTargetField,\n\t\t\"state\": func(u schema.Unit, full bool) string {\n\t\t\tif suToGlobal(u) {\n\t\t\t\tstates, err := cAPI.UnitStates()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"-\"\n\t\t\t\t}\n\t\t\t\tvar (\n\t\t\t\t\tinactive int\n\t\t\t\t\tloaded int\n\t\t\t\t\tlaunched int\n\t\t\t\t\tcurrentStates []string\n\t\t\t\t)\n\t\t\t\tfor _, state := range states {\n\t\t\t\t\tif state.Name == u.Name {\n\t\t\t\t\t\tif (state.SystemdActiveState == \"active\") || (state.SystemdSubState == \"running\") {\n\t\t\t\t\t\t\tlaunched++\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif state.SystemdLoadState == \"loaded\" {\n\t\t\t\t\t\t\tloaded++\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tinactive++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif launched != 0 {\n\t\t\t\t\ts := fmt.Sprintf(\"launched(%d)\", launched)\n\t\t\t\t\tcurrentStates = append(currentStates, s)\n\t\t\t\t}\n\t\t\t\tif loaded != 0 {\n\t\t\t\t\ts := fmt.Sprintf(\"loaded(%d)\", loaded)\n\t\t\t\t\tcurrentStates = append(currentStates, s)\n\t\t\t\t}\n\t\t\t\tif inactive != 0 {\n\t\t\t\t\ts := fmt.Sprintf(\"inactive(%d)\", inactive)\n\t\t\t\t\tcurrentStates = append(currentStates, s)\n\t\t\t\t}\n\t\t\t\treturn strings.Join(currentStates, \" \")\n\t\t\t}\n\t\t\tif u.CurrentState == \"\" {\n\t\t\t\treturn \"-\"\n\t\t\t}\n\t\t\treturn u.CurrentState\n\t\t},\n\t\t\"hash\": func(u schema.Unit, full bool) string {\n\t\t\tuf := schema.MapSchemaUnitOptionsToUnitFile(u.Options)\n\t\t\tif !full {\n\t\t\t\treturn uf.Hash().Short()\n\t\t\t}\n\t\t\treturn uf.Hash().String()\n\t\t},\n\t\t\"desc\": func(u schema.Unit, full bool) string {\n\t\t\tuf := schema.MapSchemaUnitOptionsToUnitFile(u.Options)\n\t\t\td := uf.Description()\n\t\t\tif d == \"\" {\n\t\t\t\treturn \"-\"\n\t\t\t}\n\t\t\treturn d\n\t\t},\n\t}\n)\n\ntype unitToField func(u schema.Unit, full bool) string\n\nfunc init() {\n\tcmdListUnitFiles.Flags.BoolVar(&sharedFlags.Full, \"full\", false, \"Do not ellipsize fields on output\")\n\tcmdListUnitFiles.Flags.BoolVar(&sharedFlags.NoLegend, \"no-legend\", false, \"Do not print a legend (column headers)\")\n\tcmdListUnitFiles.Flags.StringVar(&listUnitFilesFieldsFlag, \"fields\", defaultListUnitFilesFields, fmt.Sprintf(\"Columns to print for each Unit file. Valid fields are %q\", strings.Join(unitToFieldKeys(listUnitFilesFields), \",\")))\n}\n\nfunc runListUnitFiles(args []string) (exit int) {\n\tif listUnitFilesFieldsFlag == \"\" {\n\t\tstderr(\"Must define output format\")\n\t\treturn 1\n\t}\n\n\tcols := strings.Split(listUnitFilesFieldsFlag, \",\")\n\tfor _, s := range cols {\n\t\tif _, ok := listUnitFilesFields[s]; !ok {\n\t\t\tstderr(\"Invalid key in output format: %q\", s)\n\t\t\treturn 1\n\t\t}\n\t\tif s == \"tmachine\" {\n\t\t\tstderr(\"WARNING: The \\\"tmachine\\\" field is deprecated. Use \\\"target\\\" instead\")\n\t\t}\n\t}\n\n\tunits, err := cAPI.Units()\n\tif err != nil {\n\t\tstderr(\"Error retrieving list of units from repository: %v\", err)\n\t\treturn 1\n\t}\n\n\tif !sharedFlags.NoLegend {\n\t\tfmt.Fprintln(out, strings.ToUpper(strings.Join(cols, \"\\t\")))\n\t}\n\n\tfor _, u := range units {\n\t\tvar f []string\n\t\tfor _, c := range cols {\n\t\t\tf = append(f, listUnitFilesFields[c](*u, sharedFlags.Full))\n\t\t}\n\t\tfmt.Fprintln(out, strings.Join(f, \"\\t\"))\n\t}\n\n\tout.Flush()\n\treturn\n}\n\nfunc unitToFieldKeys(m map[string]unitToField) (keys []string) {\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package sarama\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\ttoxiproxy \"github.com\/Shopify\/toxiproxy\/client\"\n\t\"github.com\/rcrowley\/go-metrics\"\n)\n\nconst TestBatchSize = 1000\n\nfunc TestFuncProducing(t *testing.T) {\n\tconfig := NewConfig()\n\ttestProducingMessages(t, config)\n}\n\nfunc TestFuncProducingGzip(t *testing.T) {\n\tconfig := NewConfig()\n\tconfig.Producer.Compression = CompressionGZIP\n\ttestProducingMessages(t, config)\n}\n\nfunc TestFuncProducingSnappy(t *testing.T) {\n\tconfig := NewConfig()\n\tconfig.Producer.Compression = CompressionSnappy\n\ttestProducingMessages(t, config)\n}\n\nfunc TestFuncProducingZstd(t *testing.T) {\n\tconfig := NewConfig()\n\tconfig.Producer.Compression = CompressionZSTD\n\ttestProducingMessages(t, config)\n}\n\nfunc TestFuncProducingNoResponse(t *testing.T) {\n\tconfig := NewConfig()\n\tconfig.Producer.RequiredAcks = NoResponse\n\ttestProducingMessages(t, config)\n}\n\nfunc TestFuncProducingFlushing(t *testing.T) {\n\tconfig := NewConfig()\n\tconfig.Producer.Flush.Messages = TestBatchSize \/ 8\n\tconfig.Producer.Flush.Frequency = 250 * time.Millisecond\n\ttestProducingMessages(t, config)\n}\n\nfunc TestFuncMultiPartitionProduce(t *testing.T) {\n\tsetupFunctionalTest(t)\n\tdefer teardownFunctionalTest(t)\n\n\tconfig := NewConfig()\n\tconfig.ChannelBufferSize = 20\n\tconfig.Producer.Flush.Frequency = 50 * time.Millisecond\n\tconfig.Producer.Flush.Messages = 200\n\tconfig.Producer.Return.Successes = true\n\tproducer, err := NewSyncProducer(kafkaBrokers, config)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(TestBatchSize)\n\n\tfor i := 1; i <= TestBatchSize; i++ {\n\t\tgo func(i int) {\n\t\t\tdefer wg.Done()\n\t\t\tmsg := &ProducerMessage{Topic: \"test.64\", Key: nil, Value: StringEncoder(fmt.Sprintf(\"hur %d\", i))}\n\t\t\tif _, _, err := producer.SendMessage(msg); err != nil {\n\t\t\t\tt.Error(i, err)\n\t\t\t}\n\t\t}(i)\n\t}\n\n\twg.Wait()\n\tif err := producer.Close(); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestFuncProducingToInvalidTopic(t *testing.T) {\n\tsetupFunctionalTest(t)\n\tdefer teardownFunctionalTest(t)\n\n\tproducer, err := NewSyncProducer(kafkaBrokers, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, _, err := producer.SendMessage(&ProducerMessage{Topic: \"in\/valid\"}); err != ErrUnknownTopicOrPartition {\n\t\tt.Error(\"Expected ErrUnknownTopicOrPartition, found\", err)\n\t}\n\n\tif _, _, err := producer.SendMessage(&ProducerMessage{Topic: \"in\/valid\"}); err != ErrUnknownTopicOrPartition {\n\t\tt.Error(\"Expected ErrUnknownTopicOrPartition, found\", err)\n\t}\n\n\tsafeClose(t, producer)\n}\n\nfunc TestFuncProducingIdempotentWithBrokerFailure(t *testing.T) {\n\tsetupFunctionalTest(t)\n\tdefer teardownFunctionalTest(t)\n\n\tconfig := NewConfig()\n\tconfig.Producer.Flush.Frequency = 250 * time.Millisecond\n\tconfig.Producer.Idempotent = true\n\tconfig.Producer.Timeout = 500 * time.Millisecond\n\tconfig.Producer.Retry.Max = 1\n\tconfig.Producer.Retry.Backoff = 500 * time.Millisecond\n\tconfig.Producer.Return.Successes = true\n\tconfig.Producer.Return.Errors = true\n\tconfig.Producer.RequiredAcks = WaitForAll\n\tconfig.Net.MaxOpenRequests = 1\n\tconfig.Version = V0_11_0_0\n\n\tproducer, err := NewSyncProducer(kafkaBrokers, config)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer safeClose(t, producer)\n\n\t\/\/ Successfully publish a few messages\n\tfor i := 0; i < 10; i++ {\n\t\t_, _, err = producer.SendMessage(&ProducerMessage{\n\t\t\tTopic: \"test.1\",\n\t\t\tValue: StringEncoder(fmt.Sprintf(\"%d message\", i)),\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\t\/\/ break the brokers.\n\tfor proxyName, proxy := range Proxies {\n\t\tif !strings.Contains(proxyName, \"kafka\") {\n\t\t\tcontinue\n\t\t}\n\t\tif err := proxy.Disable(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\t\/\/ This should fail hard now\n\tfor i := 10; i < 20; i++ {\n\t\t_, _, err = producer.SendMessage(&ProducerMessage{\n\t\t\tTopic: \"test.1\",\n\t\t\tValue: StringEncoder(fmt.Sprintf(\"%d message\", i)),\n\t\t})\n\t\tif err == nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\t\/\/ Now bring the proxy back up\n\tfor proxyName, proxy := range Proxies {\n\t\tif !strings.Contains(proxyName, \"kafka\") {\n\t\t\tcontinue\n\t\t}\n\t\tif err := proxy.Enable(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\t\/\/ We should be able to publish again (once everything calms down)\n\t\/\/ (otherwise it times out)\n\tfor {\n\t\t_, _, err = producer.SendMessage(&ProducerMessage{\n\t\t\tTopic: \"test.1\",\n\t\t\tValue: StringEncoder(\"comeback message\"),\n\t\t})\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc testProducingMessages(t *testing.T, config *Config) {\n\tsetupFunctionalTest(t)\n\tdefer teardownFunctionalTest(t)\n\n\t\/\/ Configure some latency in order to properly validate the request latency metric\n\tfor _, proxy := range Proxies {\n\t\tif _, err := proxy.AddToxic(\"\", \"latency\", \"\", 1, toxiproxy.Attributes{\"latency\": 10}); err != nil {\n\t\t\tt.Fatal(\"Unable to configure latency toxicity\", err)\n\t\t}\n\t}\n\n\tconfig.Producer.Return.Successes = true\n\tconfig.Consumer.Return.Errors = true\n\n\tclient, err := NewClient(kafkaBrokers, config)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Keep in mind the current offset\n\tinitialOffset, err := client.GetOffset(\"test.1\", 0, OffsetNewest)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tproducer, err := NewAsyncProducerFromClient(client)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texpectedResponses := TestBatchSize\n\tfor i := 1; i <= TestBatchSize; {\n\t\tmsg := &ProducerMessage{Topic: \"test.1\", Key: nil, Value: StringEncoder(fmt.Sprintf(\"testing %d\", i))}\n\t\tselect {\n\t\tcase producer.Input() <- msg:\n\t\t\ti++\n\t\tcase ret := <-producer.Errors():\n\t\t\tt.Fatal(ret.Err)\n\t\tcase <-producer.Successes():\n\t\t\texpectedResponses--\n\t\t}\n\t}\n\tfor expectedResponses > 0 {\n\t\tselect {\n\t\tcase ret := <-producer.Errors():\n\t\t\tt.Fatal(ret.Err)\n\t\tcase <-producer.Successes():\n\t\t\texpectedResponses--\n\t\t}\n\t}\n\tsafeClose(t, producer)\n\n\t\/\/ Validate producer metrics before using the consumer minus the offset request\n\tvalidateMetrics(t, client)\n\n\tmaster, err := NewConsumerFromClient(client)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tconsumer, err := master.ConsumePartition(\"test.1\", 0, initialOffset)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor i := 1; i <= TestBatchSize; i++ {\n\t\tselect {\n\t\tcase <-time.After(10 * time.Second):\n\t\t\tt.Fatal(\"Not received any more events in the last 10 seconds.\")\n\n\t\tcase err := <-consumer.Errors():\n\t\t\tt.Error(err)\n\n\t\tcase message := <-consumer.Messages():\n\t\t\tif string(message.Value) != fmt.Sprintf(\"testing %d\", i) {\n\t\t\t\tt.Fatalf(\"Unexpected message with index %d: %s\", i, message.Value)\n\t\t\t}\n\t\t}\n\t}\n\tsafeClose(t, consumer)\n\tsafeClose(t, client)\n}\n\nfunc validateMetrics(t *testing.T, client Client) {\n\t\/\/ Get the broker used by test1 topic\n\tvar broker *Broker\n\tif partitions, err := client.Partitions(\"test.1\"); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tfor _, partition := range partitions {\n\t\t\tif b, err := client.Leader(\"test.1\", partition); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t} else {\n\t\t\t\tif broker != nil && b != broker {\n\t\t\t\t\tt.Fatal(\"Expected only one broker, got at least 2\")\n\t\t\t\t}\n\t\t\t\tbroker = b\n\t\t\t}\n\t\t}\n\t}\n\n\tmetricValidators := newMetricValidators()\n\tnoResponse := client.Config().Producer.RequiredAcks == NoResponse\n\tcompressionEnabled := client.Config().Producer.Compression != CompressionNone\n\n\t\/\/ We are adding 10ms of latency to all requests with toxiproxy\n\tminRequestLatencyInMs := 10\n\tif noResponse {\n\t\t\/\/ but when we do not wait for a response it can be less than 1ms\n\t\tminRequestLatencyInMs = 0\n\t}\n\n\t\/\/ We read at least 1 byte from the broker\n\tmetricValidators.registerForAllBrokers(broker, minCountMeterValidator(\"incoming-byte-rate\", 1))\n\t\/\/ in at least 3 global requests (1 for metadata request, 1 for offset request and N for produce request)\n\tmetricValidators.register(minCountMeterValidator(\"request-rate\", 3))\n\tmetricValidators.register(minCountHistogramValidator(\"request-size\", 3))\n\tmetricValidators.register(minValHistogramValidator(\"request-size\", 1))\n\t\/\/ and at least 2 requests to the registered broker (offset + produces)\n\tmetricValidators.registerForBroker(broker, minCountMeterValidator(\"request-rate\", 2))\n\tmetricValidators.registerForBroker(broker, minCountHistogramValidator(\"request-size\", 2))\n\tmetricValidators.registerForBroker(broker, minValHistogramValidator(\"request-size\", 1))\n\tmetricValidators.registerForBroker(broker, minValHistogramValidator(\"request-latency-in-ms\", minRequestLatencyInMs))\n\n\t\/\/ We send at least 1 batch\n\tmetricValidators.registerForGlobalAndTopic(\"test_1\", minCountHistogramValidator(\"batch-size\", 1))\n\tmetricValidators.registerForGlobalAndTopic(\"test_1\", minValHistogramValidator(\"batch-size\", 1))\n\tif compressionEnabled {\n\t\t\/\/ We record compression ratios between [0.50,-10.00] (50-1000 with a histogram) for at least one \"fake\" record\n\t\tmetricValidators.registerForGlobalAndTopic(\"test_1\", minCountHistogramValidator(\"compression-ratio\", 1))\n\t\tmetricValidators.registerForGlobalAndTopic(\"test_1\", minValHistogramValidator(\"compression-ratio\", 50))\n\t\tmetricValidators.registerForGlobalAndTopic(\"test_1\", maxValHistogramValidator(\"compression-ratio\", 1000))\n\t} else {\n\t\t\/\/ We record compression ratios of 1.00 (100 with a histogram) for every TestBatchSize record\n\t\tmetricValidators.registerForGlobalAndTopic(\"test_1\", countHistogramValidator(\"compression-ratio\", TestBatchSize))\n\t\tmetricValidators.registerForGlobalAndTopic(\"test_1\", minValHistogramValidator(\"compression-ratio\", 100))\n\t\tmetricValidators.registerForGlobalAndTopic(\"test_1\", maxValHistogramValidator(\"compression-ratio\", 100))\n\t}\n\n\t\/\/ We send exactly TestBatchSize messages\n\tmetricValidators.registerForGlobalAndTopic(\"test_1\", countMeterValidator(\"record-send-rate\", TestBatchSize))\n\t\/\/ We send at least one record per request\n\tmetricValidators.registerForGlobalAndTopic(\"test_1\", minCountHistogramValidator(\"records-per-request\", 1))\n\tmetricValidators.registerForGlobalAndTopic(\"test_1\", minValHistogramValidator(\"records-per-request\", 1))\n\n\t\/\/ We receive at least 1 byte from the broker\n\tmetricValidators.registerForAllBrokers(broker, minCountMeterValidator(\"outgoing-byte-rate\", 1))\n\tif noResponse {\n\t\t\/\/ in exactly 2 global responses (metadata + offset)\n\t\tmetricValidators.register(countMeterValidator(\"response-rate\", 2))\n\t\tmetricValidators.register(minCountHistogramValidator(\"response-size\", 2))\n\t\t\/\/ and exactly 1 offset response for the registered broker\n\t\tmetricValidators.registerForBroker(broker, countMeterValidator(\"response-rate\", 1))\n\t\tmetricValidators.registerForBroker(broker, minCountHistogramValidator(\"response-size\", 1))\n\t\tmetricValidators.registerForBroker(broker, minValHistogramValidator(\"response-size\", 1))\n\t} else {\n\t\t\/\/ in at least 3 global responses (metadata + offset + produces)\n\t\tmetricValidators.register(minCountMeterValidator(\"response-rate\", 3))\n\t\tmetricValidators.register(minCountHistogramValidator(\"response-size\", 3))\n\t\t\/\/ and at least 2 for the registered broker\n\t\tmetricValidators.registerForBroker(broker, minCountMeterValidator(\"response-rate\", 2))\n\t\tmetricValidators.registerForBroker(broker, minCountHistogramValidator(\"response-size\", 2))\n\t\tmetricValidators.registerForBroker(broker, minValHistogramValidator(\"response-size\", 1))\n\t}\n\n\t\/\/ There should be no requests in flight anymore\n\tmetricValidators.registerForAllBrokers(broker, counterValidator(\"requests-in-flight\", 0))\n\n\t\/\/ Run the validators\n\tmetricValidators.run(t, client.Config().MetricRegistry)\n}\n\n\/\/ Benchmarks\n\nfunc BenchmarkProducerSmall(b *testing.B) {\n\tbenchmarkProducer(b, nil, \"test.64\", ByteEncoder(make([]byte, 128)))\n}\nfunc BenchmarkProducerMedium(b *testing.B) {\n\tbenchmarkProducer(b, nil, \"test.64\", ByteEncoder(make([]byte, 1024)))\n}\nfunc BenchmarkProducerLarge(b *testing.B) {\n\tbenchmarkProducer(b, nil, \"test.64\", ByteEncoder(make([]byte, 8192)))\n}\nfunc BenchmarkProducerSmallSinglePartition(b *testing.B) {\n\tbenchmarkProducer(b, nil, \"test.1\", ByteEncoder(make([]byte, 128)))\n}\nfunc BenchmarkProducerMediumSnappy(b *testing.B) {\n\tconf := NewConfig()\n\tconf.Producer.Compression = CompressionSnappy\n\tbenchmarkProducer(b, conf, \"test.1\", ByteEncoder(make([]byte, 1024)))\n}\n\nfunc benchmarkProducer(b *testing.B, conf *Config, topic string, value Encoder) {\n\tsetupFunctionalTest(b)\n\tdefer teardownFunctionalTest(b)\n\n\tmetricsDisable := os.Getenv(\"METRICS_DISABLE\")\n\tif metricsDisable != \"\" {\n\t\tpreviousUseNilMetrics := metrics.UseNilMetrics\n\t\tLogger.Println(\"Disabling metrics using no-op implementation\")\n\t\tmetrics.UseNilMetrics = true\n\t\t\/\/ Restore previous setting\n\t\tdefer func() {\n\t\t\tmetrics.UseNilMetrics = previousUseNilMetrics\n\t\t}()\n\t}\n\n\tproducer, err := NewAsyncProducer(kafkaBrokers, conf)\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\tb.ResetTimer()\n\n\tfor i := 1; i <= b.N; {\n\t\tmsg := &ProducerMessage{Topic: topic, Key: StringEncoder(fmt.Sprintf(\"%d\", i)), Value: value}\n\t\tselect {\n\t\tcase producer.Input() <- msg:\n\t\t\ti++\n\t\tcase ret := <-producer.Errors():\n\t\t\tb.Fatal(ret.Err)\n\t\t}\n\t}\n\tsafeClose(b, producer)\n}\n<commit_msg>zstd needs client >= 2.1.0.0<commit_after>package sarama\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\ttoxiproxy \"github.com\/Shopify\/toxiproxy\/client\"\n\t\"github.com\/rcrowley\/go-metrics\"\n)\n\nconst TestBatchSize = 1000\n\nfunc TestFuncProducing(t *testing.T) {\n\tconfig := NewConfig()\n\ttestProducingMessages(t, config)\n}\n\nfunc TestFuncProducingGzip(t *testing.T) {\n\tconfig := NewConfig()\n\tconfig.Producer.Compression = CompressionGZIP\n\ttestProducingMessages(t, config)\n}\n\nfunc TestFuncProducingSnappy(t *testing.T) {\n\tconfig := NewConfig()\n\tconfig.Producer.Compression = CompressionSnappy\n\ttestProducingMessages(t, config)\n}\n\nfunc TestFuncProducingZstd(t *testing.T) {\n\tconfig := NewConfig()\n\tconfig.Version = V2_1_0_0\n\tconfig.Producer.Compression = CompressionZSTD\n\ttestProducingMessages(t, config)\n}\n\nfunc TestFuncProducingNoResponse(t *testing.T) {\n\tconfig := NewConfig()\n\tconfig.Producer.RequiredAcks = NoResponse\n\ttestProducingMessages(t, config)\n}\n\nfunc TestFuncProducingFlushing(t *testing.T) {\n\tconfig := NewConfig()\n\tconfig.Producer.Flush.Messages = TestBatchSize \/ 8\n\tconfig.Producer.Flush.Frequency = 250 * time.Millisecond\n\ttestProducingMessages(t, config)\n}\n\nfunc TestFuncMultiPartitionProduce(t *testing.T) {\n\tsetupFunctionalTest(t)\n\tdefer teardownFunctionalTest(t)\n\n\tconfig := NewConfig()\n\tconfig.ChannelBufferSize = 20\n\tconfig.Producer.Flush.Frequency = 50 * time.Millisecond\n\tconfig.Producer.Flush.Messages = 200\n\tconfig.Producer.Return.Successes = true\n\tproducer, err := NewSyncProducer(kafkaBrokers, config)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(TestBatchSize)\n\n\tfor i := 1; i <= TestBatchSize; i++ {\n\t\tgo func(i int) {\n\t\t\tdefer wg.Done()\n\t\t\tmsg := &ProducerMessage{Topic: \"test.64\", Key: nil, Value: StringEncoder(fmt.Sprintf(\"hur %d\", i))}\n\t\t\tif _, _, err := producer.SendMessage(msg); err != nil {\n\t\t\t\tt.Error(i, err)\n\t\t\t}\n\t\t}(i)\n\t}\n\n\twg.Wait()\n\tif err := producer.Close(); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestFuncProducingToInvalidTopic(t *testing.T) {\n\tsetupFunctionalTest(t)\n\tdefer teardownFunctionalTest(t)\n\n\tproducer, err := NewSyncProducer(kafkaBrokers, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, _, err := producer.SendMessage(&ProducerMessage{Topic: \"in\/valid\"}); err != ErrUnknownTopicOrPartition {\n\t\tt.Error(\"Expected ErrUnknownTopicOrPartition, found\", err)\n\t}\n\n\tif _, _, err := producer.SendMessage(&ProducerMessage{Topic: \"in\/valid\"}); err != ErrUnknownTopicOrPartition {\n\t\tt.Error(\"Expected ErrUnknownTopicOrPartition, found\", err)\n\t}\n\n\tsafeClose(t, producer)\n}\n\nfunc TestFuncProducingIdempotentWithBrokerFailure(t *testing.T) {\n\tsetupFunctionalTest(t)\n\tdefer teardownFunctionalTest(t)\n\n\tconfig := NewConfig()\n\tconfig.Producer.Flush.Frequency = 250 * time.Millisecond\n\tconfig.Producer.Idempotent = true\n\tconfig.Producer.Timeout = 500 * time.Millisecond\n\tconfig.Producer.Retry.Max = 1\n\tconfig.Producer.Retry.Backoff = 500 * time.Millisecond\n\tconfig.Producer.Return.Successes = true\n\tconfig.Producer.Return.Errors = true\n\tconfig.Producer.RequiredAcks = WaitForAll\n\tconfig.Net.MaxOpenRequests = 1\n\tconfig.Version = V0_11_0_0\n\n\tproducer, err := NewSyncProducer(kafkaBrokers, config)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer safeClose(t, producer)\n\n\t\/\/ Successfully publish a few messages\n\tfor i := 0; i < 10; i++ {\n\t\t_, _, err = producer.SendMessage(&ProducerMessage{\n\t\t\tTopic: \"test.1\",\n\t\t\tValue: StringEncoder(fmt.Sprintf(\"%d message\", i)),\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\t\/\/ break the brokers.\n\tfor proxyName, proxy := range Proxies {\n\t\tif !strings.Contains(proxyName, \"kafka\") {\n\t\t\tcontinue\n\t\t}\n\t\tif err := proxy.Disable(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\t\/\/ This should fail hard now\n\tfor i := 10; i < 20; i++ {\n\t\t_, _, err = producer.SendMessage(&ProducerMessage{\n\t\t\tTopic: \"test.1\",\n\t\t\tValue: StringEncoder(fmt.Sprintf(\"%d message\", i)),\n\t\t})\n\t\tif err == nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\t\/\/ Now bring the proxy back up\n\tfor proxyName, proxy := range Proxies {\n\t\tif !strings.Contains(proxyName, \"kafka\") {\n\t\t\tcontinue\n\t\t}\n\t\tif err := proxy.Enable(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\t\/\/ We should be able to publish again (once everything calms down)\n\t\/\/ (otherwise it times out)\n\tfor {\n\t\t_, _, err = producer.SendMessage(&ProducerMessage{\n\t\t\tTopic: \"test.1\",\n\t\t\tValue: StringEncoder(\"comeback message\"),\n\t\t})\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc testProducingMessages(t *testing.T, config *Config) {\n\tsetupFunctionalTest(t)\n\tdefer teardownFunctionalTest(t)\n\n\t\/\/ Configure some latency in order to properly validate the request latency metric\n\tfor _, proxy := range Proxies {\n\t\tif _, err := proxy.AddToxic(\"\", \"latency\", \"\", 1, toxiproxy.Attributes{\"latency\": 10}); err != nil {\n\t\t\tt.Fatal(\"Unable to configure latency toxicity\", err)\n\t\t}\n\t}\n\n\tconfig.Producer.Return.Successes = true\n\tconfig.Consumer.Return.Errors = true\n\n\tclient, err := NewClient(kafkaBrokers, config)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Keep in mind the current offset\n\tinitialOffset, err := client.GetOffset(\"test.1\", 0, OffsetNewest)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tproducer, err := NewAsyncProducerFromClient(client)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texpectedResponses := TestBatchSize\n\tfor i := 1; i <= TestBatchSize; {\n\t\tmsg := &ProducerMessage{Topic: \"test.1\", Key: nil, Value: StringEncoder(fmt.Sprintf(\"testing %d\", i))}\n\t\tselect {\n\t\tcase producer.Input() <- msg:\n\t\t\ti++\n\t\tcase ret := <-producer.Errors():\n\t\t\tt.Fatal(ret.Err)\n\t\tcase <-producer.Successes():\n\t\t\texpectedResponses--\n\t\t}\n\t}\n\tfor expectedResponses > 0 {\n\t\tselect {\n\t\tcase ret := <-producer.Errors():\n\t\t\tt.Fatal(ret.Err)\n\t\tcase <-producer.Successes():\n\t\t\texpectedResponses--\n\t\t}\n\t}\n\tsafeClose(t, producer)\n\n\t\/\/ Validate producer metrics before using the consumer minus the offset request\n\tvalidateMetrics(t, client)\n\n\tmaster, err := NewConsumerFromClient(client)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tconsumer, err := master.ConsumePartition(\"test.1\", 0, initialOffset)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor i := 1; i <= TestBatchSize; i++ {\n\t\tselect {\n\t\tcase <-time.After(10 * time.Second):\n\t\t\tt.Fatal(\"Not received any more events in the last 10 seconds.\")\n\n\t\tcase err := <-consumer.Errors():\n\t\t\tt.Error(err)\n\n\t\tcase message := <-consumer.Messages():\n\t\t\tif string(message.Value) != fmt.Sprintf(\"testing %d\", i) {\n\t\t\t\tt.Fatalf(\"Unexpected message with index %d: %s\", i, message.Value)\n\t\t\t}\n\t\t}\n\t}\n\tsafeClose(t, consumer)\n\tsafeClose(t, client)\n}\n\nfunc validateMetrics(t *testing.T, client Client) {\n\t\/\/ Get the broker used by test1 topic\n\tvar broker *Broker\n\tif partitions, err := client.Partitions(\"test.1\"); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tfor _, partition := range partitions {\n\t\t\tif b, err := client.Leader(\"test.1\", partition); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t} else {\n\t\t\t\tif broker != nil && b != broker {\n\t\t\t\t\tt.Fatal(\"Expected only one broker, got at least 2\")\n\t\t\t\t}\n\t\t\t\tbroker = b\n\t\t\t}\n\t\t}\n\t}\n\n\tmetricValidators := newMetricValidators()\n\tnoResponse := client.Config().Producer.RequiredAcks == NoResponse\n\tcompressionEnabled := client.Config().Producer.Compression != CompressionNone\n\n\t\/\/ We are adding 10ms of latency to all requests with toxiproxy\n\tminRequestLatencyInMs := 10\n\tif noResponse {\n\t\t\/\/ but when we do not wait for a response it can be less than 1ms\n\t\tminRequestLatencyInMs = 0\n\t}\n\n\t\/\/ We read at least 1 byte from the broker\n\tmetricValidators.registerForAllBrokers(broker, minCountMeterValidator(\"incoming-byte-rate\", 1))\n\t\/\/ in at least 3 global requests (1 for metadata request, 1 for offset request and N for produce request)\n\tmetricValidators.register(minCountMeterValidator(\"request-rate\", 3))\n\tmetricValidators.register(minCountHistogramValidator(\"request-size\", 3))\n\tmetricValidators.register(minValHistogramValidator(\"request-size\", 1))\n\t\/\/ and at least 2 requests to the registered broker (offset + produces)\n\tmetricValidators.registerForBroker(broker, minCountMeterValidator(\"request-rate\", 2))\n\tmetricValidators.registerForBroker(broker, minCountHistogramValidator(\"request-size\", 2))\n\tmetricValidators.registerForBroker(broker, minValHistogramValidator(\"request-size\", 1))\n\tmetricValidators.registerForBroker(broker, minValHistogramValidator(\"request-latency-in-ms\", minRequestLatencyInMs))\n\n\t\/\/ We send at least 1 batch\n\tmetricValidators.registerForGlobalAndTopic(\"test_1\", minCountHistogramValidator(\"batch-size\", 1))\n\tmetricValidators.registerForGlobalAndTopic(\"test_1\", minValHistogramValidator(\"batch-size\", 1))\n\tif compressionEnabled {\n\t\t\/\/ We record compression ratios between [0.50,-10.00] (50-1000 with a histogram) for at least one \"fake\" record\n\t\tmetricValidators.registerForGlobalAndTopic(\"test_1\", minCountHistogramValidator(\"compression-ratio\", 1))\n\t\tmetricValidators.registerForGlobalAndTopic(\"test_1\", minValHistogramValidator(\"compression-ratio\", 50))\n\t\tmetricValidators.registerForGlobalAndTopic(\"test_1\", maxValHistogramValidator(\"compression-ratio\", 1000))\n\t} else {\n\t\t\/\/ We record compression ratios of 1.00 (100 with a histogram) for every TestBatchSize record\n\t\tmetricValidators.registerForGlobalAndTopic(\"test_1\", countHistogramValidator(\"compression-ratio\", TestBatchSize))\n\t\tmetricValidators.registerForGlobalAndTopic(\"test_1\", minValHistogramValidator(\"compression-ratio\", 100))\n\t\tmetricValidators.registerForGlobalAndTopic(\"test_1\", maxValHistogramValidator(\"compression-ratio\", 100))\n\t}\n\n\t\/\/ We send exactly TestBatchSize messages\n\tmetricValidators.registerForGlobalAndTopic(\"test_1\", countMeterValidator(\"record-send-rate\", TestBatchSize))\n\t\/\/ We send at least one record per request\n\tmetricValidators.registerForGlobalAndTopic(\"test_1\", minCountHistogramValidator(\"records-per-request\", 1))\n\tmetricValidators.registerForGlobalAndTopic(\"test_1\", minValHistogramValidator(\"records-per-request\", 1))\n\n\t\/\/ We receive at least 1 byte from the broker\n\tmetricValidators.registerForAllBrokers(broker, minCountMeterValidator(\"outgoing-byte-rate\", 1))\n\tif noResponse {\n\t\t\/\/ in exactly 2 global responses (metadata + offset)\n\t\tmetricValidators.register(countMeterValidator(\"response-rate\", 2))\n\t\tmetricValidators.register(minCountHistogramValidator(\"response-size\", 2))\n\t\t\/\/ and exactly 1 offset response for the registered broker\n\t\tmetricValidators.registerForBroker(broker, countMeterValidator(\"response-rate\", 1))\n\t\tmetricValidators.registerForBroker(broker, minCountHistogramValidator(\"response-size\", 1))\n\t\tmetricValidators.registerForBroker(broker, minValHistogramValidator(\"response-size\", 1))\n\t} else {\n\t\t\/\/ in at least 3 global responses (metadata + offset + produces)\n\t\tmetricValidators.register(minCountMeterValidator(\"response-rate\", 3))\n\t\tmetricValidators.register(minCountHistogramValidator(\"response-size\", 3))\n\t\t\/\/ and at least 2 for the registered broker\n\t\tmetricValidators.registerForBroker(broker, minCountMeterValidator(\"response-rate\", 2))\n\t\tmetricValidators.registerForBroker(broker, minCountHistogramValidator(\"response-size\", 2))\n\t\tmetricValidators.registerForBroker(broker, minValHistogramValidator(\"response-size\", 1))\n\t}\n\n\t\/\/ There should be no requests in flight anymore\n\tmetricValidators.registerForAllBrokers(broker, counterValidator(\"requests-in-flight\", 0))\n\n\t\/\/ Run the validators\n\tmetricValidators.run(t, client.Config().MetricRegistry)\n}\n\n\/\/ Benchmarks\n\nfunc BenchmarkProducerSmall(b *testing.B) {\n\tbenchmarkProducer(b, nil, \"test.64\", ByteEncoder(make([]byte, 128)))\n}\nfunc BenchmarkProducerMedium(b *testing.B) {\n\tbenchmarkProducer(b, nil, \"test.64\", ByteEncoder(make([]byte, 1024)))\n}\nfunc BenchmarkProducerLarge(b *testing.B) {\n\tbenchmarkProducer(b, nil, \"test.64\", ByteEncoder(make([]byte, 8192)))\n}\nfunc BenchmarkProducerSmallSinglePartition(b *testing.B) {\n\tbenchmarkProducer(b, nil, \"test.1\", ByteEncoder(make([]byte, 128)))\n}\nfunc BenchmarkProducerMediumSnappy(b *testing.B) {\n\tconf := NewConfig()\n\tconf.Producer.Compression = CompressionSnappy\n\tbenchmarkProducer(b, conf, \"test.1\", ByteEncoder(make([]byte, 1024)))\n}\n\nfunc benchmarkProducer(b *testing.B, conf *Config, topic string, value Encoder) {\n\tsetupFunctionalTest(b)\n\tdefer teardownFunctionalTest(b)\n\n\tmetricsDisable := os.Getenv(\"METRICS_DISABLE\")\n\tif metricsDisable != \"\" {\n\t\tpreviousUseNilMetrics := metrics.UseNilMetrics\n\t\tLogger.Println(\"Disabling metrics using no-op implementation\")\n\t\tmetrics.UseNilMetrics = true\n\t\t\/\/ Restore previous setting\n\t\tdefer func() {\n\t\t\tmetrics.UseNilMetrics = previousUseNilMetrics\n\t\t}()\n\t}\n\n\tproducer, err := NewAsyncProducer(kafkaBrokers, conf)\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\tb.ResetTimer()\n\n\tfor i := 1; i <= b.N; {\n\t\tmsg := &ProducerMessage{Topic: topic, Key: StringEncoder(fmt.Sprintf(\"%d\", i)), Value: value}\n\t\tselect {\n\t\tcase producer.Input() <- msg:\n\t\t\ti++\n\t\tcase ret := <-producer.Errors():\n\t\t\tb.Fatal(ret.Err)\n\t\t}\n\t}\n\tsafeClose(b, producer)\n}\n<|endoftext|>"} {"text":"<commit_before>package rccsv\n\n\/*\n\tCSV reader for RingCentral Call Log CSV files\n\n\tSYNOPSIS\n\n \tcsv := rccsv.NewCallLogRecordsCsvReader()\n\terr := csv.ReadFile(\"\/path\/to\/my.csv\")\n\tif err != nil {\n\t\tfmt.Printf(\"ERR %v\", err)\n\t}\n\tstats := csv.CallLogRecordsCsv.GetStatsForVoiceRecordings()\n\n\t\/\/ NOTE on stripping UTF-8 BOM\n\thttps:\/\/github.com\/golang\/go\/issues\/9588\n\thttps:\/\/groups.google.com\/forum\/#!topic\/golang-nuts\/OToNIPdfkks\n\thttp:\/\/cautery.blogspot.com\/2013\/04\/stripping-utf-8-byte-order-mark-with-go.html\n*\/\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/grokify\/gotilla\/encoding\/csvutil\"\n\t\"github.com\/grokify\/gotilla\/time\/timeutil\"\n\t\"github.com\/ttacon\/libphonenumber\"\n)\n\ntype CallLogStats struct {\n\tDays float64\n\tNumCalls int64\n\tTotalSeconds int64\n\tTotalMinutes float32\n\tTotalHours float32\n\tAverageMinutes float32\n\tDailyCalls float32\n\tDailyMinutes float32\n\tDailyHours float32\n\tDt14Start int64\n\tDt14End int64\n}\n\nfunc NewCallLogStats(days int64) CallLogStats {\n\ts := CallLogStats{}\n\ts.Days = -1\n\ts.NumCalls = 0\n\ts.TotalSeconds = 0\n\ts.TotalMinutes = 0.0\n\ts.TotalHours = 0.0\n\ts.AverageMinutes = 0.0\n\ts.DailyCalls = 0.0\n\ts.DailyMinutes = 0.0\n\ts.DailyHours = 0.0\n\ts.Dt14Start = -1\n\ts.Dt14End = -1\n\treturn s\n}\n\nfunc (s *CallLogStats) Inflate() {\n\tif s.NumCalls < 1 {\n\t\treturn\n\t}\n\tif s.Dt14Start > 0 && s.Dt14End > 0 && s.Dt14Start < s.Dt14End {\n\t\tdtStart, err1 := timeutil.TimeForDt14(s.Dt14Start)\n\t\tif err1 == nil {\n\t\t\tdtEnd, err2 := timeutil.TimeForDt14(s.Dt14End)\n\t\t\tif err2 == nil {\n\t\t\t\tdur := dtEnd.Sub(dtStart)\n\t\t\t\tdurHrs := dur.Hours()\n\t\t\t\tdays := durHrs \/ 24\n\t\t\t\ts.Days = days\n\t\t\t}\n\t\t}\n\t}\n\ts.TotalMinutes = float32(s.TotalSeconds \/ 60)\n\ts.TotalHours = float32(s.TotalSeconds \/ 60 \/ 60)\n\ts.AverageMinutes = s.TotalMinutes \/ float32(s.NumCalls)\n\tif s.Days > 0 {\n\t\ts.DailyCalls = float32(s.NumCalls) \/ float32(s.Days)\n\t\ts.DailyMinutes = s.DailyCalls * s.AverageMinutes\n\t\ts.DailyHours = s.DailyMinutes \/ 60\n\t}\n}\n\ntype CallLogRecordsCsv struct {\n\tCallLogRecords []CallLogRecordCsv\n}\n\nfunc (rs *CallLogRecordsCsv) GetStatsForVoiceRecordings() CallLogStats {\n\tstats := NewCallLogStats(int64(30))\n\tstats.NumCalls = 0\n\tfor _, rec := range rs.CallLogRecords {\n\t\trec.Inflate()\n\t\tif rec.Action != \"Phone Call\" && rec.Action != \"VoIP Call\" && rec.Action != \"FindMe\" {\n\t\t\t\/\/continue\n\t\t}\n\t\tif rec.ActionResult != \"Accepted\" && rec.ActionResult != \"Call connected\" {\n\t\t\tcontinue\n\t\t}\n\t\tstats.NumCalls += 1\n\t\tstats.TotalSeconds += rec.DurationSeconds\n\n\t\tif len(rec.TimeRfc3339) > 0 {\n\t\t\tdt, err := time.Parse(time.RFC3339, rec.TimeRfc3339)\n\t\t\tif err == nil {\n\t\t\t\tdt14 := timeutil.Dt14ForTime(dt)\n\t\t\t\tif stats.Dt14Start < 0 || dt14 < stats.Dt14Start {\n\t\t\t\t\tstats.Dt14Start = dt14\n\t\t\t\t}\n\t\t\t\tif stats.Dt14End < 0 || dt14 > stats.Dt14Start {\n\t\t\t\t\tstats.Dt14End = dt14\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tstats.Inflate()\n\treturn stats\n}\n\ntype CallLogRecordCsv struct {\n\tType string\n\tPhoneNumber string\n\tPhoneNumberE164 string\n\tPhoneNumberIso31661a2 string\n\tName string\n\tDate string\n\tTime string\n\tTimeRfc3339 string\n\tAction string\n\tActionResult string\n\tResultDescription string\n\tDuration string\n\tDurationSeconds int64\n}\n\nfunc (r *CallLogRecordCsv) SetCountry() {}\n\nfunc (r *CallLogRecordCsv) Inflate() {\n\tr.PhoneNumberIso31661a2 = \"\"\n\tif 1 == 0 && len(r.PhoneNumber) > 0 {\n\t\tcountry := \"US\"\n\t\tnum, err := libphonenumber.Parse(r.PhoneNumber, country)\n\t\tif err == nil {\n\t\t\tr.PhoneNumberE164 = libphonenumber.Format(num, libphonenumber.E164)\n\t\t\tr.PhoneNumberIso31661a2 = country\n\t\t}\n\t}\n\trx1 := regexp.MustCompile(`^([0-9]{1,2}):([0-9]{1,2}):([0-9]{1,2})$`)\n\trs1 := rx1.FindStringSubmatch(r.Duration)\n\tif len(rs1) > 0 {\n\t\thr, err := strconv.ParseInt(rs1[1], 10, 64)\n\t\tif err != nil {\n\t\t\thr = 0\n\t\t}\n\t\tmn, err := strconv.ParseInt(rs1[2], 10, 64)\n\t\tif err != nil {\n\t\t\tmn = 0\n\t\t}\n\t\tsc, err := strconv.ParseInt(rs1[3], 10, 64)\n\t\tif err != nil {\n\t\t\tsc = 0\n\t\t}\n\t\tsc = sc + (mn * 60) + (hr * 60 * 60)\n\t\tr.DurationSeconds = sc\n\t}\n\tif len(r.Date) > 0 && len(r.Time) > 0 {\n\t\tdateTimeRaw := r.Date + \" \" + r.Time\n\t\tlayout := \"Mon 01\/02\/2006 3:04 PM\"\n\t\tmyTime, err := time.Parse(layout, dateTimeRaw)\n\t\tif err == nil {\n\t\t\tr.TimeRfc3339 = myTime.Format(time.RFC3339)\n\t\t}\n\t}\n}\n\nfunc (r *CallLogRecordCsv) LoadRow(cols []string, vals []string) error {\n\tlc := len(cols)\n\tlv := len(vals)\n\tif lc != lv {\n\t\treturn errors.New(\"RC CSV COL ROW LENGTH MISMATCH\")\n\t}\n\trx1 := regexp.MustCompile(`\\s+`)\n\tfor i := 0; i < lc; i++ {\n\t\tcol := cols[i]\n\t\tval := vals[i]\n\t\tkey := rx1.ReplaceAllString(col, \"\")\n\t\treflect.ValueOf(r).Elem().FieldByName(key).Set(reflect.ValueOf(val))\n\t}\n\treturn nil\n}\n\ntype CallLogRecordsCsvReader struct {\n\tCallLogRecordsCsv CallLogRecordsCsv\n}\n\nfunc NewCallLogRecordsCsvReader() CallLogRecordsCsvReader {\n\trd := CallLogRecordsCsvReader{}\n\treturn rd\n}\n\nfunc (rd *CallLogRecordsCsvReader) ReadFile(path string) error {\n\treturn rd.ReadFileBom(path, false)\n}\n\nfunc (rd *CallLogRecordsCsvReader) ReadFileBom(path string, stripBom bool) error {\n\treader, file, err := csvutil.NewReader(path, ',', stripBom)\n\tif err != nil {\n\t\treturn err\n\t}\n\ti := 0\n\tcols := []string{}\n\tfor {\n\t\trecord, err := reader.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tfile.Close()\n\t\t\tif stripBom == false && err.Error() == \"line 1, column 1: bare \\\" in non-quoted-field\" {\n\t\t\t\treturn rd.ReadFileBom(path, true)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tif i == 0 {\n\t\t\tcols = record\n\t\t\ti += 1\n\t\t\tcontinue\n\t\t}\n\t\tobj := CallLogRecordCsv{}\n\t\t_ = obj.LoadRow(cols, record)\n\t\tobj.Inflate()\n\t\trd.CallLogRecordsCsv.CallLogRecords = append(rd.CallLogRecordsCsv.CallLogRecords, obj)\n\t\ti += 1\n\t}\n\tfile.Close()\n\treturn nil\n}\n<commit_msg>minor formatting<commit_after>package rccsv\n\n\/*\n\tCSV reader for RingCentral Call Log CSV files\n\n\tSYNOPSIS\n\n \tcsv := rccsv.NewCallLogRecordsCsvReader()\n\terr := csv.ReadFile(\"\/path\/to\/my.csv\")\n\tif err != nil {\n\t\tfmt.Printf(\"ERR %v\", err)\n\t}\n\tstats := csv.CallLogRecordsCsv.GetStatsForVoiceRecordings()\n\n\t\/\/ NOTE on stripping UTF-8 BOM\n\thttps:\/\/github.com\/golang\/go\/issues\/9588\n\thttps:\/\/groups.google.com\/forum\/#!topic\/golang-nuts\/OToNIPdfkks\n\thttp:\/\/cautery.blogspot.com\/2013\/04\/stripping-utf-8-byte-order-mark-with-go.html\n*\/\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/grokify\/gotilla\/encoding\/csvutil\"\n\t\"github.com\/grokify\/gotilla\/time\/timeutil\"\n\t\"github.com\/ttacon\/libphonenumber\"\n)\n\ntype CallLogStats struct {\n\tDays float64\n\tNumCalls int64\n\tTotalSeconds int64\n\tTotalMinutes float32\n\tTotalHours float32\n\tAverageMinutes float32\n\tDailyCalls float32\n\tDailyMinutes float32\n\tDailyHours float32\n\tDt14Start int64\n\tDt14End int64\n}\n\nfunc NewCallLogStats(days int64) CallLogStats {\n\ts := CallLogStats{\n\t\tDays: -1,\n\t\tNumCalls: 0,\n\t\tTotalSeconds: 0,\n\t\tTotalMinutes: 0.0,\n\t\tTotalHours: 0.0,\n\t\tAverageMinutes: 0.0,\n\t\tDailyCalls: 0.0,\n\t\tDailyMinutes: 0.0,\n\t\tDailyHours: 0.0,\n\t\tDt14Start: -1,\n\t\tDt14End: -1}\n\treturn s\n}\n\nfunc (s *CallLogStats) Inflate() {\n\tif s.NumCalls < 1 {\n\t\treturn\n\t}\n\tif s.Dt14Start > 0 && s.Dt14End > 0 && s.Dt14Start < s.Dt14End {\n\t\tdtStart, err1 := timeutil.TimeForDt14(s.Dt14Start)\n\t\tif err1 == nil {\n\t\t\tdtEnd, err2 := timeutil.TimeForDt14(s.Dt14End)\n\t\t\tif err2 == nil {\n\t\t\t\tdur := dtEnd.Sub(dtStart)\n\t\t\t\tdurHrs := dur.Hours()\n\t\t\t\tdays := durHrs \/ 24\n\t\t\t\ts.Days = days\n\t\t\t}\n\t\t}\n\t}\n\ts.TotalMinutes = float32(s.TotalSeconds \/ 60)\n\ts.TotalHours = float32(s.TotalSeconds \/ 60 \/ 60)\n\ts.AverageMinutes = s.TotalMinutes \/ float32(s.NumCalls)\n\tif s.Days > 0 {\n\t\ts.DailyCalls = float32(s.NumCalls) \/ float32(s.Days)\n\t\ts.DailyMinutes = s.DailyCalls * s.AverageMinutes\n\t\ts.DailyHours = s.DailyMinutes \/ 60\n\t}\n}\n\ntype CallLogRecordsCsv struct {\n\tCallLogRecords []CallLogRecordCsv\n}\n\nfunc (rs *CallLogRecordsCsv) GetStatsForVoiceRecordings() CallLogStats {\n\tstats := NewCallLogStats(int64(30))\n\tstats.NumCalls = 0\n\tfor _, rec := range rs.CallLogRecords {\n\t\trec.Inflate()\n\t\tif rec.Action != \"Phone Call\" && rec.Action != \"VoIP Call\" && rec.Action != \"FindMe\" {\n\t\t\t\/\/continue\n\t\t}\n\t\tif rec.ActionResult != \"Accepted\" && rec.ActionResult != \"Call connected\" {\n\t\t\tcontinue\n\t\t}\n\t\tstats.NumCalls += 1\n\t\tstats.TotalSeconds += rec.DurationSeconds\n\n\t\tif len(rec.TimeRfc3339) > 0 {\n\t\t\tdt, err := time.Parse(time.RFC3339, rec.TimeRfc3339)\n\t\t\tif err == nil {\n\t\t\t\tdt14 := timeutil.Dt14ForTime(dt)\n\t\t\t\tif stats.Dt14Start < 0 || dt14 < stats.Dt14Start {\n\t\t\t\t\tstats.Dt14Start = dt14\n\t\t\t\t}\n\t\t\t\tif stats.Dt14End < 0 || dt14 > stats.Dt14Start {\n\t\t\t\t\tstats.Dt14End = dt14\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tstats.Inflate()\n\treturn stats\n}\n\ntype CallLogRecordCsv struct {\n\tType string\n\tPhoneNumber string\n\tPhoneNumberE164 string\n\tPhoneNumberIso31661a2 string\n\tName string\n\tDate string\n\tTime string\n\tTimeRfc3339 string\n\tAction string\n\tActionResult string\n\tResultDescription string\n\tDuration string\n\tDurationSeconds int64\n}\n\nfunc (r *CallLogRecordCsv) SetCountry() {}\n\nfunc (r *CallLogRecordCsv) Inflate() {\n\tr.PhoneNumberIso31661a2 = \"\"\n\tif 1 == 0 && len(r.PhoneNumber) > 0 {\n\t\tcountry := \"US\"\n\t\tnum, err := libphonenumber.Parse(r.PhoneNumber, country)\n\t\tif err == nil {\n\t\t\tr.PhoneNumberE164 = libphonenumber.Format(num, libphonenumber.E164)\n\t\t\tr.PhoneNumberIso31661a2 = country\n\t\t}\n\t}\n\trx1 := regexp.MustCompile(`^([0-9]{1,2}):([0-9]{1,2}):([0-9]{1,2})$`)\n\trs1 := rx1.FindStringSubmatch(r.Duration)\n\tif len(rs1) > 0 {\n\t\thr, err := strconv.ParseInt(rs1[1], 10, 64)\n\t\tif err != nil {\n\t\t\thr = 0\n\t\t}\n\t\tmn, err := strconv.ParseInt(rs1[2], 10, 64)\n\t\tif err != nil {\n\t\t\tmn = 0\n\t\t}\n\t\tsc, err := strconv.ParseInt(rs1[3], 10, 64)\n\t\tif err != nil {\n\t\t\tsc = 0\n\t\t}\n\t\tsc = sc + (mn * 60) + (hr * 60 * 60)\n\t\tr.DurationSeconds = sc\n\t}\n\tif len(r.Date) > 0 && len(r.Time) > 0 {\n\t\tdateTimeRaw := r.Date + \" \" + r.Time\n\t\tlayout := \"Mon 01\/02\/2006 3:04 PM\"\n\t\tmyTime, err := time.Parse(layout, dateTimeRaw)\n\t\tif err == nil {\n\t\t\tr.TimeRfc3339 = myTime.Format(time.RFC3339)\n\t\t}\n\t}\n}\n\nfunc (r *CallLogRecordCsv) LoadRow(cols []string, vals []string) error {\n\tlc := len(cols)\n\tlv := len(vals)\n\tif lc != lv {\n\t\treturn errors.New(\"RC CSV COL ROW LENGTH MISMATCH\")\n\t}\n\trx1 := regexp.MustCompile(`\\s+`)\n\tfor i := 0; i < lc; i++ {\n\t\tcol := cols[i]\n\t\tval := vals[i]\n\t\tkey := rx1.ReplaceAllString(col, \"\")\n\t\treflect.ValueOf(r).Elem().FieldByName(key).Set(reflect.ValueOf(val))\n\t}\n\treturn nil\n}\n\ntype CallLogRecordsCsvReader struct {\n\tCallLogRecordsCsv CallLogRecordsCsv\n}\n\nfunc NewCallLogRecordsCsvReader() CallLogRecordsCsvReader {\n\trd := CallLogRecordsCsvReader{}\n\treturn rd\n}\n\nfunc (rd *CallLogRecordsCsvReader) ReadFile(path string) error {\n\treturn rd.ReadFileBom(path, false)\n}\n\nfunc (rd *CallLogRecordsCsvReader) ReadFileBom(path string, stripBom bool) error {\n\treader, file, err := csvutil.NewReader(path, ',', stripBom)\n\tif err != nil {\n\t\treturn err\n\t}\n\ti := 0\n\tcols := []string{}\n\tfor {\n\t\trecord, err := reader.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tfile.Close()\n\t\t\tif stripBom == false && err.Error() == \"line 1, column 1: bare \\\" in non-quoted-field\" {\n\t\t\t\treturn rd.ReadFileBom(path, true)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tif i == 0 {\n\t\t\tcols = record\n\t\t\ti += 1\n\t\t\tcontinue\n\t\t}\n\t\tobj := CallLogRecordCsv{}\n\t\t_ = obj.LoadRow(cols, record)\n\t\tobj.Inflate()\n\t\trd.CallLogRecordsCsv.CallLogRecords = append(rd.CallLogRecordsCsv.CallLogRecords, obj)\n\t\ti += 1\n\t}\n\tfile.Close()\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package procexp\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/HouzuoGuo\/laitos\/platform\"\n)\n\nfunc TestGetProcIDs(t *testing.T) {\n\tif platform.HostIsWindows() {\n\t\tt.Skip(\"this test will not run on windows\")\n\t\treturn\n\t}\n\tallProcIDs := GetProcIDs()\n\tvar mustFindPIDs = []struct {\n\t\tpid int\n\t}{{1}, {os.Getpid()}}\n\tfor _, findPID := range mustFindPIDs {\n\t\tt.Run(fmt.Sprintf(\"find PID %d\", findPID.pid), func(t *testing.T) {\n\t\t\tif foundAt := sort.SearchInts(allProcIDs, findPID.pid); allProcIDs[foundAt] != findPID.pid {\n\t\t\t\tt.Fatal(findPID, allProcIDs)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGetProcTaskIDs(t *testing.T) {\n\tif platform.HostIsWindows() {\n\t\tt.Skip(\"this test will not run on windows\")\n\t\treturn\n\t}\n\t\/\/ The program's main thread (task ID == program PID) must show up\n\tallTaskIDs := GetProcTaskIDs(os.Getpid())\n\tif foundAt := sort.SearchInts(allTaskIDs, os.Getpid()); allTaskIDs[foundAt] != os.Getpid() {\n\t\tt.Fatal(allTaskIDs)\n\t}\n\t\/\/ Get this process' own task IDs\n\tallTaskIDs = GetProcTaskIDs(0)\n\tif foundAt := sort.SearchInts(allTaskIDs, os.Getpid()); allTaskIDs[foundAt] != os.Getpid() {\n\t\tt.Fatal(allTaskIDs)\n\t}\n}\n\nfunc TestGetTaskStatus_WaitChannel(t *testing.T) {\n\tif platform.HostIsWindows() {\n\t\tt.Skip(\"this test will not run on windows\")\n\t\treturn\n\t}\n\tcmd := exec.Command(\"\/bin\/cat\")\n\tcmd.Stdout = ioutil.Discard\n\tcmd.Stderr = ioutil.Discard\n\tif err := cmd.Start(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\t_ = cmd.Process.Kill()\n\t\t_, _ = cmd.Process.Wait()\n\t}()\n\ttime.Sleep(1 * time.Second)\n\t\/\/ By now kernel should see that sleep command is sleeping nicely\n\tstatus := GetTaskStatus(cmd.Process.Pid, cmd.Process.Pid)\n\t\/\/ On the stack there shall be at very least syscall entry, architecture-dependent sleep function, and a common sleep function.\n\tif status.ID != cmd.Process.Pid || status.WaitChannelName == \"\" {\n\t\tt.Fatalf(\"%+v\", status)\n\t}\n\t\/\/ The stack file can only be read by root, hence it's not part of this test yet.\n}\n\nfunc TestGetTaskStatus_SchedulerStats(t *testing.T) {\n\tif platform.HostIsWindows() {\n\t\tt.Skip(\"this test will not run on windows\")\n\t\treturn\n\t}\n\tcmd := exec.Command(\"\/usr\/bin\/yes\")\n\tcmd.Stdout = ioutil.Discard\n\tcmd.Stderr = ioutil.Discard\n\tif err := cmd.Start(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\t_ = cmd.Process.Kill()\n\t\t_, _ = cmd.Process.Wait()\n\t}()\n\t\/\/ It takes couple of seconds for the process to use up some CPU time\n\ttime.Sleep(8 * time.Second)\n\tstatus := GetTaskStatus(cmd.Process.Pid, cmd.Process.Pid)\n\tif status.SchedulerStats.NumVoluntarySwitches == 0 || status.SchedulerStats.NumInvoluntarySwitches == 0 || status.SchedulerStats.NumRunSec == 0 {\n\t\tt.Fatalf(\"%+v\", status)\n\t}\n}\n\nfunc TestGetProcAndTaskStatus(t *testing.T) {\n\tif platform.HostIsWindows() {\n\t\tt.Skip(\"this test will not run on windows\")\n\t\treturn\n\t}\n\tcmd := exec.Command(\"\/usr\/bin\/yes\")\n\tcmd.Stdout = ioutil.Discard\n\tcmd.Stderr = ioutil.Discard\n\tif err := cmd.Start(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\t_ = cmd.Process.Kill()\n\t\t_, _ = cmd.Process.Wait()\n\t}()\n\t\/\/ It takes couple of seconds for the process to use up some CPU time\n\ttime.Sleep(8 * time.Second)\n\tstatus, err := GetProcAndTaskStatus(cmd.Process.Pid)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif status.Stats.NumUserModeSecInclChildren+status.Stats.NumKernelModeSecInclChildren < 1 ||\n\t\tstatus.Stats.ResidentSetMemSizeBytes < 100 || status.Stats.VirtualMemSizeBytes < 100 ||\n\t\tstatus.Stats.State == \"\" || status.Status.Name != \"yes\" ||\n\t\tlen(status.Tasks) < 1 {\n\t\tt.Fatalf(\"%+v\", status)\n\t}\n\tmainTask := status.Tasks[cmd.Process.Pid]\n\tif mainTask.ID != cmd.Process.Pid || mainTask.SchedulerStats.NumRunSec < 1 || mainTask.SchedulerStats.NumWaitSec < 0.001 ||\n\t\tmainTask.SchedulerStats.NumVoluntarySwitches < 10 || mainTask.SchedulerStats.NumInvoluntarySwitches < 10 {\n\t\tt.Fatalf(\"%+v\", status)\n\t}\n\tif status.SchedulerStatsSum.NumRunSec < 1 || status.SchedulerStatsSum.NumWaitSec < 0.001 ||\n\t\tstatus.SchedulerStatsSum.NumVoluntarySwitches < 10 || status.SchedulerStatsSum.NumInvoluntarySwitches < 10 {\n\t\tt.Fatalf(\"%+v\", status)\n\t}\n}\n<commit_msg>procexp: skip tests that will not run on wsl<commit_after>package procexp\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/HouzuoGuo\/laitos\/platform\"\n)\n\nfunc TestGetProcIDs(t *testing.T) {\n\tif platform.HostIsWindows() {\n\t\tt.Skip(\"this test will not run on windows\")\n\t\treturn\n\t}\n\tallProcIDs := GetProcIDs()\n\tvar mustFindPIDs = []struct {\n\t\tpid int\n\t}{{1}, {os.Getpid()}}\n\tfor _, findPID := range mustFindPIDs {\n\t\tt.Run(fmt.Sprintf(\"find PID %d\", findPID.pid), func(t *testing.T) {\n\t\t\tif foundAt := sort.SearchInts(allProcIDs, findPID.pid); allProcIDs[foundAt] != findPID.pid {\n\t\t\t\tt.Fatal(findPID, allProcIDs)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGetProcTaskIDs(t *testing.T) {\n\tif platform.HostIsWindows() {\n\t\tt.Skip(\"this test will not run on windows\")\n\t\treturn\n\t}\n\t\/\/ The program's main thread (task ID == program PID) must show up\n\tallTaskIDs := GetProcTaskIDs(os.Getpid())\n\tif foundAt := sort.SearchInts(allTaskIDs, os.Getpid()); allTaskIDs[foundAt] != os.Getpid() {\n\t\tt.Fatal(allTaskIDs)\n\t}\n\t\/\/ Get this process' own task IDs\n\tallTaskIDs = GetProcTaskIDs(0)\n\tif foundAt := sort.SearchInts(allTaskIDs, os.Getpid()); allTaskIDs[foundAt] != os.Getpid() {\n\t\tt.Fatal(allTaskIDs)\n\t}\n}\n\nfunc TestGetTaskStatus_WaitChannel(t *testing.T) {\n\tif platform.HostIsWindows() || platform.HostIsWSL() {\n\t\tt.Skip(\"this test will not run on windows\")\n\t\treturn\n\t}\n\tcmd := exec.Command(\"\/bin\/cat\")\n\tcmd.Stdout = ioutil.Discard\n\tcmd.Stderr = ioutil.Discard\n\tif err := cmd.Start(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\t_ = cmd.Process.Kill()\n\t\t_, _ = cmd.Process.Wait()\n\t}()\n\ttime.Sleep(1 * time.Second)\n\t\/\/ By now kernel should see that sleep command is sleeping nicely\n\tstatus := GetTaskStatus(cmd.Process.Pid, cmd.Process.Pid)\n\t\/\/ On the stack there shall be at very least syscall entry, architecture-dependent sleep function, and a common sleep function.\n\tif status.ID != cmd.Process.Pid || status.WaitChannelName == \"\" {\n\t\tt.Fatalf(\"%+v\", status)\n\t}\n\t\/\/ The stack file can only be read by root, hence it's not part of this test yet.\n}\n\nfunc TestGetTaskStatus_SchedulerStats(t *testing.T) {\n\tif platform.HostIsWindows() || platform.HostIsWSL() {\n\t\tt.Skip(\"this test will not run on windows\")\n\t\treturn\n\t}\n\tcmd := exec.Command(\"\/usr\/bin\/yes\")\n\tcmd.Stdout = ioutil.Discard\n\tcmd.Stderr = ioutil.Discard\n\tif err := cmd.Start(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\t_ = cmd.Process.Kill()\n\t\t_, _ = cmd.Process.Wait()\n\t}()\n\t\/\/ It takes couple of seconds for the process to use up some CPU time\n\ttime.Sleep(8 * time.Second)\n\tstatus := GetTaskStatus(cmd.Process.Pid, cmd.Process.Pid)\n\tif status.SchedulerStats.NumVoluntarySwitches == 0 || status.SchedulerStats.NumInvoluntarySwitches == 0 || status.SchedulerStats.NumRunSec == 0 {\n\t\tt.Fatalf(\"%+v\", status)\n\t}\n}\n\nfunc TestGetProcAndTaskStatus(t *testing.T) {\n\tif platform.HostIsWindows() || platform.HostIsWSL() {\n\t\tt.Skip(\"this test will not run on windows\")\n\t\treturn\n\t}\n\tcmd := exec.Command(\"\/usr\/bin\/yes\")\n\tcmd.Stdout = ioutil.Discard\n\tcmd.Stderr = ioutil.Discard\n\tif err := cmd.Start(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\t_ = cmd.Process.Kill()\n\t\t_, _ = cmd.Process.Wait()\n\t}()\n\t\/\/ It takes couple of seconds for the process to use up some CPU time\n\ttime.Sleep(8 * time.Second)\n\tstatus, err := GetProcAndTaskStatus(cmd.Process.Pid)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif status.Stats.NumUserModeSecInclChildren+status.Stats.NumKernelModeSecInclChildren < 1 ||\n\t\tstatus.Stats.ResidentSetMemSizeBytes < 100 || status.Stats.VirtualMemSizeBytes < 100 ||\n\t\tstatus.Stats.State == \"\" || status.Status.Name != \"yes\" ||\n\t\tlen(status.Tasks) < 1 {\n\t\tt.Fatalf(\"%+v\", status)\n\t}\n\tmainTask := status.Tasks[cmd.Process.Pid]\n\tif mainTask.ID != cmd.Process.Pid || mainTask.SchedulerStats.NumRunSec < 1 || mainTask.SchedulerStats.NumWaitSec < 0.001 ||\n\t\tmainTask.SchedulerStats.NumVoluntarySwitches < 10 || mainTask.SchedulerStats.NumInvoluntarySwitches < 10 {\n\t\tt.Fatalf(\"%+v\", status)\n\t}\n\tif status.SchedulerStatsSum.NumRunSec < 1 || status.SchedulerStatsSum.NumWaitSec < 0.001 ||\n\t\tstatus.SchedulerStatsSum.NumVoluntarySwitches < 10 || status.SchedulerStatsSum.NumInvoluntarySwitches < 10 {\n\t\tt.Fatalf(\"%+v\", status)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package saml2\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\n\t\"github.com\/beevik\/etree\"\n)\n\n\/\/ErrMissingElement is the error type that indicates an element and\/or attribute is\n\/\/missing. It provides a structured error that can be more appropriately acted\n\/\/upon.\ntype ErrMissingElement struct {\n\tTag, Attribute string\n}\n\n\/\/ErrMissingAssertion indicates that an appropriate assertion element could not\n\/\/be found in the SAML Response\nvar (\n\tErrMissingAssertion = ErrMissingElement{Tag: AssertionTag}\n)\n\nfunc (e ErrMissingElement) Error() string {\n\tif e.Attribute != \"\" {\n\t\treturn fmt.Sprintf(\"missing %s attribute on %s element\", e.Attribute, e.Tag)\n\t}\n\treturn fmt.Sprintf(\"missing %s element\", e.Tag)\n}\n\n\/\/RetrieveAssertionInfo takes an encoded response and returns the AssertionInfo\n\/\/contained, or an error message if an error has been encountered.\nfunc (sp *SAMLServiceProvider) RetrieveAssertionInfo(encodedResponse string) (*AssertionInfo, error) {\n\tassertionInfo := &AssertionInfo{}\n\n\tel, err := sp.ValidateEncodedResponse(encodedResponse)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error validating response: %v\", err)\n\t}\n\n\tassertionElement := el.FindElement(AssertionTag)\n\tif assertionElement == nil {\n\t\treturn nil, ErrMissingAssertion\n\t}\n\n\t\/\/Verify all conditions for the assertion\n\tconditionsStatement := assertionElement.FindElement(childPath(assertionElement.Space, ConditionsTag))\n\tif conditionsStatement == nil {\n\t\treturn nil, ErrMissingElement{Tag: ConditionsTag}\n\t}\n\n\twarningInfo, err := sp.VerifyAssertionConditions(assertionElement, conditionsStatement)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/Get the NameID\n\tsubjectStatement := assertionElement.FindElement(childPath(assertionElement.Space, SubjectTag))\n\tif subjectStatement == nil {\n\t\treturn nil, ErrMissingElement{Tag: SubjectTag}\n\t}\n\n\tnameIDStatement := subjectStatement.FindElement(childPath(assertionElement.Space, NameIdTag))\n\tif nameIDStatement == nil {\n\t\treturn nil, ErrMissingElement{Tag: NameIdTag}\n\t}\n\tassertionInfo.NameID = nameIDStatement.Text()\n\n\t\/\/Get the actual assertion attributes\n\tattributeStatement := assertionElement.FindElement(childPath(assertionElement.Space, AttributeStatementTag))\n\tif attributeStatement == nil && !sp.AllowMissingAttributes {\n\t\treturn nil, ErrMissingElement{Tag: AttributeStatementTag}\n\t}\n\n\tif attributeStatement != nil {\n\t\tdoc := etree.NewDocument()\n\t\tdoc.SetRoot(attributeStatement)\n\t\tbs, err := doc.WriteToBytes()\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = xml.Unmarshal(bs, &assertionInfo.Values)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tassertionInfo.WarningInfo = warningInfo\n\treturn assertionInfo, nil\n}\n<commit_msg>Expose typed signature verification errors<commit_after>package saml2\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\n\t\"github.com\/beevik\/etree\"\n)\n\n\/\/ErrMissingElement is the error type that indicates an element and\/or attribute is\n\/\/missing. It provides a structured error that can be more appropriately acted\n\/\/upon.\ntype ErrMissingElement struct {\n\tTag, Attribute string\n}\n\ntype ErrVerification struct {\n\tCause error\n}\n\nfunc (e ErrVerification) Error() string {\n\treturn fmt.Sprintf(\"error validating response: %s\", e.Cause.Error())\n}\n\n\/\/ErrMissingAssertion indicates that an appropriate assertion element could not\n\/\/be found in the SAML Response\nvar (\n\tErrMissingAssertion = ErrMissingElement{Tag: AssertionTag}\n)\n\nfunc (e ErrMissingElement) Error() string {\n\tif e.Attribute != \"\" {\n\t\treturn fmt.Sprintf(\"missing %s attribute on %s element\", e.Attribute, e.Tag)\n\t}\n\treturn fmt.Sprintf(\"missing %s element\", e.Tag)\n}\n\n\/\/RetrieveAssertionInfo takes an encoded response and returns the AssertionInfo\n\/\/contained, or an error message if an error has been encountered.\nfunc (sp *SAMLServiceProvider) RetrieveAssertionInfo(encodedResponse string) (*AssertionInfo, error) {\n\tassertionInfo := &AssertionInfo{}\n\n\tel, err := sp.ValidateEncodedResponse(encodedResponse)\n\tif err != nil {\n\t\treturn nil, ErrVerification{Cause: err}\n\t}\n\n\tassertionElement := el.FindElement(AssertionTag)\n\tif assertionElement == nil {\n\t\treturn nil, ErrMissingAssertion\n\t}\n\n\t\/\/Verify all conditions for the assertion\n\tconditionsStatement := assertionElement.FindElement(childPath(assertionElement.Space, ConditionsTag))\n\tif conditionsStatement == nil {\n\t\treturn nil, ErrMissingElement{Tag: ConditionsTag}\n\t}\n\n\twarningInfo, err := sp.VerifyAssertionConditions(assertionElement, conditionsStatement)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/Get the NameID\n\tsubjectStatement := assertionElement.FindElement(childPath(assertionElement.Space, SubjectTag))\n\tif subjectStatement == nil {\n\t\treturn nil, ErrMissingElement{Tag: SubjectTag}\n\t}\n\n\tnameIDStatement := subjectStatement.FindElement(childPath(assertionElement.Space, NameIdTag))\n\tif nameIDStatement == nil {\n\t\treturn nil, ErrMissingElement{Tag: NameIdTag}\n\t}\n\tassertionInfo.NameID = nameIDStatement.Text()\n\n\t\/\/Get the actual assertion attributes\n\tattributeStatement := assertionElement.FindElement(childPath(assertionElement.Space, AttributeStatementTag))\n\tif attributeStatement == nil && !sp.AllowMissingAttributes {\n\t\treturn nil, ErrMissingElement{Tag: AttributeStatementTag}\n\t}\n\n\tif attributeStatement != nil {\n\t\tdoc := etree.NewDocument()\n\t\tdoc.SetRoot(attributeStatement)\n\t\tbs, err := doc.WriteToBytes()\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = xml.Unmarshal(bs, &assertionInfo.Values)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tassertionInfo.WarningInfo = warningInfo\n\treturn assertionInfo, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package robots\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/trinchan\/slackbot\/robots\"\n)\n\ntype bot struct{}\n\nfunc init() {\n\tp := &bot{}\n\trobots.RegisterRobot(\"raffl\", p)\n}\n\nfunc (pb bot) Run(p *robots.Payload) (slashCommandImmediateReturn string) {\n\tgo pb.DeferredAction(p)\n\treturn \"raffl this!\"\n}\n\nfunc (pb bot) DeferredAction(p *robots.Payload) {\n\tresponse := &robots.IncomingWebhook{\n\t\tDomain: p.TeamDomain,\n\t\tChannel: p.ChannelID,\n\t\tUsername: \"r\taffl\",\n\t\tText: fmt.Sprintf(\"@%s Raffl!\", p.UserName),\n\t\tIconEmoji: \":gift:\",\n\t\tUnfurlLinks: true,\n\t\tParse: robots.ParseStyleFull,\n\t}\n\tresponse.Send()\n}\n\nfunc (pb bot) Description() (description string) {\n\treturn \"Raffl bot!\\n\\tUsage: \/raffl\\n\\tExpected Response: @user: Raffl this!\"\n}\n<commit_msg>Change immediate response<commit_after>package robots\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/trinchan\/slackbot\/robots\"\n)\n\ntype bot struct{}\n\nfunc init() {\n\tp := &bot{}\n\trobots.RegisterRobot(\"raffl\", p)\n}\n\nfunc (pb bot) Run(p *robots.Payload) (slashCommandImmediateReturn string) {\n\tgo pb.DeferredAction(p)\n\t\/\/return \"raffl this!\"\n\treturn \"raffl\"\n}\n\nfunc (pb bot) DeferredAction(p *robots.Payload) {\n\tresponse := &robots.IncomingWebhook{\n\t\tDomain: p.TeamDomain,\n\t\tChannel: p.ChannelID,\n\t\tUsername: \"raffl\",\n\t\tText: fmt.Sprintf(\"@%s Raffl!\", p.UserName),\n\t\tIconEmoji: \":gift:\",\n\t\tUnfurlLinks: true,\n\t\tParse: robots.ParseStyleFull,\n\t}\n\tresponse.Send()\n}\n\nfunc (pb bot) Description() (description string) {\n\treturn \"Raffl bot!\\n\\tUsage: \/raffl\\n\\tExpected Response: @user: Raffl this!\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ The ct_server binary runs the CT personality.\npackage main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/clientv3\"\n\tetcdnaming \"github.com\/coreos\/etcd\/clientv3\/naming\"\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/google\/certificate-transparency-go\/trillian\/ctfe\"\n\t\"github.com\/google\/certificate-transparency-go\/trillian\/ctfe\/configpb\"\n\t\"github.com\/google\/certificate-transparency-go\/trillian\/util\"\n\t\"github.com\/google\/trillian\"\n\t\"github.com\/google\/trillian\/monitoring\/opencensus\"\n\t\"github.com\/google\/trillian\/monitoring\/prometheus\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\/promhttp\"\n\t\"github.com\/rs\/cors\"\n\t\"github.com\/tomasen\/realip\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/balancer\/roundrobin\"\n\t\"google.golang.org\/grpc\/naming\"\n\n\t\/\/ Register PEMKeyFile, PrivateKey and PKCS11Config ProtoHandlers\n\t_ \"github.com\/google\/trillian\/crypto\/keys\/der\/proto\"\n\t_ \"github.com\/google\/trillian\/crypto\/keys\/pem\/proto\"\n\t_ \"github.com\/google\/trillian\/crypto\/keys\/pkcs11\/proto\"\n)\n\n\/\/ Global flags that affect all log instances.\nvar (\n\thttpEndpoint = flag.String(\"http_endpoint\", \"localhost:6962\", \"Endpoint for HTTP (host:port)\")\n\tmetricsEndpoint = flag.String(\"metrics_endpoint\", \"\", \"Endpoint for serving metrics; if left empty, metrics will be visible on --http_endpoint\")\n\trpcBackend = flag.String(\"log_rpc_server\", \"\", \"Backend specification; comma-separated list or etcd service name (if --etcd_servers specified). If unset backends are specified in config (as a LogMultiConfig proto)\")\n\trpcDeadline = flag.Duration(\"rpc_deadline\", time.Second*10, \"Deadline for backend RPC requests\")\n\tgetSTHInterval = flag.Duration(\"get_sth_interval\", time.Second*180, \"Interval between internal get-sth operations (0 to disable)\")\n\tlogConfig = flag.String(\"log_config\", \"\", \"File holding log config in text proto format\")\n\tmaxGetEntries = flag.Int64(\"max_get_entries\", 0, \"Max number of entries we allow in a get-entries request (0=>use default 1000)\")\n\tetcdServers = flag.String(\"etcd_servers\", \"\", \"A comma-separated list of etcd servers\")\n\tetcdHTTPService = flag.String(\"etcd_http_service\", \"trillian-ctfe-http\", \"Service name to announce our HTTP endpoint under\")\n\tetcdMetricsService = flag.String(\"etcd_metrics_service\", \"trillian-ctfe-metrics-http\", \"Service name to announce our HTTP metrics endpoint under\")\n\tmaskInternalErrors = flag.Bool(\"mask_internal_errors\", false, \"Don't return error strings with Internal Server Error HTTP responses\")\n\ttracing = flag.Bool(\"tracing\", false, \"If true opencensus Stackdriver tracing will be enabled. See https:\/\/opencensus.io\/.\")\n\ttracingProjectID = flag.String(\"tracing_project_id\", \"\", \"project ID to pass to stackdriver. Can be empty for GCP, consult docs for other platforms.\")\n\ttracingPercent = flag.Int(\"tracing_percent\", 0, \"Percent of requests to be traced. Zero is a special case to use the DefaultSampler\")\n\tquotaRemote = flag.Bool(\"quota_remote\", true, \"Enable requesting of quota for IP address sending incoming requests\")\n\tquotaIntermediate = flag.Bool(\"quota_intermediate\", true, \"Enable requesting of quota for intermediate certificates in sumbmitted chains\")\n\thandlerPrefix = flag.String(\"handler_prefix\", \"\", \"If set e.g. to '\/logs' will prefix all handlers that don't define a custom prefix\")\n)\n\n\/\/ nolint:staticcheck\nfunc main() {\n\tflag.Parse()\n\tctx := context.Background()\n\n\tif *maxGetEntries > 0 {\n\t\tctfe.MaxGetEntriesAllowed = *maxGetEntries\n\t}\n\n\tvar cfg *configpb.LogMultiConfig\n\tvar err error\n\t\/\/ Get log config from file before we start. This is a different proto\n\t\/\/ type if we're using a multi backend configuration (no rpcBackend set\n\t\/\/ in flags). The single-backend config is converted to a multi config so\n\t\/\/ they can be treated the same.\n\tif len(*rpcBackend) > 0 {\n\t\tvar cfgs []*configpb.LogConfig\n\t\tif cfgs, err = ctfe.LogConfigFromFile(*logConfig); err == nil {\n\t\t\tcfg = ctfe.ToMultiLogConfig(cfgs, *rpcBackend)\n\t\t}\n\t} else {\n\t\tcfg, err = ctfe.MultiLogConfigFromFile(*logConfig)\n\t}\n\n\tif err != nil {\n\t\tglog.Exitf(\"Failed to read config: %v\", err)\n\t}\n\n\tbeMap, err := ctfe.ValidateLogMultiConfig(cfg)\n\tif err != nil {\n\t\tglog.Exitf(\"Invalid config: %v\", err)\n\t}\n\n\tglog.CopyStandardLogTo(\"WARNING\")\n\tglog.Info(\"**** CT HTTP Server Starting ****\")\n\n\tmetricsAt := *metricsEndpoint\n\tif metricsAt == \"\" {\n\t\tmetricsAt = *httpEndpoint\n\t}\n\n\tdialOpts := []grpc.DialOption{grpc.WithInsecure()}\n\tif len(*etcdServers) > 0 {\n\t\t\/\/ Use etcd to provide endpoint resolution.\n\t\tcfg := clientv3.Config{Endpoints: strings.Split(*etcdServers, \",\"), DialTimeout: 5 * time.Second}\n\t\tclient, err := clientv3.New(cfg)\n\t\tif err != nil {\n\t\t\tglog.Exitf(\"Failed to connect to etcd at %v: %v\", *etcdServers, err)\n\t\t}\n\t\tetcdRes := &etcdnaming.GRPCResolver{Client: client}\n\t\tdialOpts = append(dialOpts, grpc.WithBalancer(grpc.RoundRobin(etcdRes)))\n\n\t\t\/\/ Also announce ourselves.\n\t\tupdateHTTP := naming.Update{Op: naming.Add, Addr: *httpEndpoint}\n\t\tupdateMetrics := naming.Update{Op: naming.Add, Addr: metricsAt}\n\t\tglog.Infof(\"Announcing our presence in %v with %+v\", *etcdHTTPService, updateHTTP)\n\t\tetcdRes.Update(ctx, *etcdHTTPService, updateHTTP)\n\t\tglog.Infof(\"Announcing our presence in %v with %+v\", *etcdMetricsService, updateMetrics)\n\t\tetcdRes.Update(ctx, *etcdMetricsService, updateMetrics)\n\n\t\tbyeHTTP := naming.Update{Op: naming.Delete, Addr: *httpEndpoint}\n\t\tbyeMetrics := naming.Update{Op: naming.Delete, Addr: metricsAt}\n\t\tdefer func() {\n\t\t\tglog.Infof(\"Removing our presence in %v with %+v\", *etcdHTTPService, byeHTTP)\n\t\t\tetcdRes.Update(ctx, *etcdHTTPService, byeHTTP)\n\t\t\tglog.Infof(\"Removing our presence in %v with %+v\", *etcdMetricsService, byeMetrics)\n\t\t\tetcdRes.Update(ctx, *etcdMetricsService, byeMetrics)\n\t\t}()\n\t} else if strings.Contains(*rpcBackend, \",\") {\n\t\tglog.Infof(\"Using FixedBackendResolver\")\n\t\t\/\/ Use a fixed endpoint resolution that just returns the addresses configured on the command line.\n\t\tres := util.FixedBackendResolver{}\n\t\tdialOpts = append(dialOpts, grpc.WithBalancer(grpc.RoundRobin(res)))\n\t} else {\n\t\tglog.Infof(\"Using regular DNS resolver\")\n\t\tdialOpts = append(dialOpts, grpc.WithBalancerName(roundrobin.Name))\n\t}\n\n\t\/\/ Dial all our log backends.\n\tclientMap := make(map[string]trillian.TrillianLogClient)\n\tfor _, be := range beMap {\n\t\tglog.Infof(\"Dialling backend: %v\", be)\n\t\tif len(beMap) == 1 {\n\t\t\t\/\/ If there's only one of them we use the blocking option as we can't\n\t\t\t\/\/ serve anything until connected.\n\t\t\tdialOpts = append(dialOpts, grpc.WithBlock())\n\t\t}\n\t\tconn, err := grpc.Dial(be.BackendSpec, dialOpts...)\n\t\tif err != nil {\n\t\t\tglog.Exitf(\"Could not dial RPC server: %v: %v\", be, err)\n\t\t}\n\t\tdefer conn.Close()\n\t\tclientMap[be.Name] = trillian.NewTrillianLogClient(conn)\n\t}\n\n\t\/\/ Allow cross-origin requests to all handlers registered on corsMux.\n\t\/\/ This is safe for CT log handlers because the log is public and\n\t\/\/ unauthenticated so cross-site scripting attacks are not a concern.\n\tcorsMux := http.NewServeMux()\n\tcorsHandler := cors.AllowAll().Handler(corsMux)\n\thttp.Handle(\"\/\", corsHandler)\n\n\t\/\/ Register handlers for all the configured logs using the correct RPC\n\t\/\/ client.\n\tfor _, c := range cfg.LogConfigs.Config {\n\t\tinst, err := setupAndRegister(ctx, clientMap[c.LogBackendName], *rpcDeadline, c, corsMux, *handlerPrefix, *maskInternalErrors)\n\t\tif err != nil {\n\t\t\tglog.Exitf(\"Failed to set up log instance for %+v: %v\", cfg, err)\n\t\t}\n\t\tif *getSTHInterval > 0 {\n\t\t\tgo inst.RunUpdateSTH(ctx, *getSTHInterval)\n\t\t}\n\t}\n\n\t\/\/ Return a 200 on the root, for GCE default health checking :\/\n\tcorsMux.HandleFunc(\"\/\", func(resp http.ResponseWriter, req *http.Request) {\n\t\tif req.URL.Path == \"\/\" {\n\t\t\tresp.WriteHeader(http.StatusOK)\n\t\t} else {\n\t\t\tresp.WriteHeader(http.StatusNotFound)\n\t\t}\n\t})\n\n\t\/\/ Export a healthz target.\n\tcorsMux.HandleFunc(\"\/healthz\", func(resp http.ResponseWriter, req *http.Request) {\n\t\t\/\/ TODO(al): Wire this up to tell the truth.\n\t\tresp.Write([]byte(\"ok\"))\n\t})\n\n\tif metricsAt != *httpEndpoint {\n\t\t\/\/ Run a separate handler for metrics.\n\t\tgo func() {\n\t\t\tmux := http.NewServeMux()\n\t\t\tmux.Handle(\"\/metrics\", promhttp.Handler())\n\t\t\tmetricsServer := http.Server{Addr: metricsAt, Handler: mux}\n\t\t\terr := metricsServer.ListenAndServe()\n\t\t\tglog.Warningf(\"Metrics server exited: %v\", err)\n\t\t}()\n\t} else {\n\t\t\/\/ Handle metrics on the DefaultServeMux.\n\t\thttp.Handle(\"\/metrics\", promhttp.Handler())\n\t}\n\n\t\/\/ If we're enabling tracing we need to use an instrumented http.Handler.\n\tvar handler http.Handler\n\tif *tracing {\n\t\thandler, err = opencensus.EnableHTTPServerTracing(*tracingProjectID, *tracingPercent)\n\t\tif err != nil {\n\t\t\tglog.Exitf(\"Failed to initialize stackdriver \/ opencensus tracing: %v\", err)\n\t\t}\n\t}\n\n\t\/\/ Bring up the HTTP server and serve until we get a signal not to.\n\tsrv := http.Server{Addr: *httpEndpoint, Handler: handler}\n\tshutdownWG := new(sync.WaitGroup)\n\tgo awaitSignal(func() {\n\t\tshutdownWG.Add(1)\n\t\tdefer shutdownWG.Done()\n\t\t\/\/ Allow 60s for any pending requests to finish then terminate any stragglers\n\t\tctx, cancel := context.WithTimeout(context.Background(), time.Second*60)\n\t\tdefer cancel()\n\t\tglog.Info(\"Shutting down HTTP server...\")\n\t\tsrv.Shutdown(ctx)\n\t\tglog.Info(\"HTTP server shutdown\")\n\t})\n\n\terr = srv.ListenAndServe()\n\tif err != http.ErrServerClosed {\n\t\tglog.Warningf(\"Server exited: %v\", err)\n\t}\n\t\/\/ Wait will only block if the function passed to awaitSignal was called,\n\t\/\/ in which case it'll block until the HTTP server has gracefully shutdown\n\tshutdownWG.Wait()\n\tglog.Flush()\n}\n\n\/\/ awaitSignal waits for standard termination signals, then runs the given\n\/\/ function; it should be run as a separate goroutine.\nfunc awaitSignal(doneFn func()) {\n\t\/\/ Arrange notification for the standard set of signals used to terminate a server\n\tsigs := make(chan os.Signal, 1)\n\tsignal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)\n\n\t\/\/ Now block main and wait for a signal\n\tsig := <-sigs\n\tglog.Warningf(\"Signal received: %v\", sig)\n\tglog.Flush()\n\n\tdoneFn()\n}\n\nfunc setupAndRegister(ctx context.Context, client trillian.TrillianLogClient, deadline time.Duration, cfg *configpb.LogConfig, mux *http.ServeMux, globalHandlerPrefix string, maskInternalErrors bool) (*ctfe.Instance, error) {\n\tvCfg, err := ctfe.ValidateLogConfig(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\topts := ctfe.InstanceOptions{\n\t\tValidated: vCfg,\n\t\tClient: client,\n\t\tDeadline: deadline,\n\t\tMetricFactory: prometheus.MetricFactory{},\n\t\tRequestLog: new(ctfe.DefaultRequestLog),\n\t\tMaskInternalErrors: maskInternalErrors,\n\t}\n\tif *quotaRemote {\n\t\tglog.Info(\"Enabling quota for requesting IP\")\n\t\topts.RemoteQuotaUser = realip.FromRequest\n\t}\n\tif *quotaIntermediate {\n\t\tglog.Info(\"Enabling quota for intermediate certificates\")\n\t\topts.CertificateQuotaUser = ctfe.QuotaUserForCert\n\t}\n\t\/\/ Full handler pattern will be of the form \"\/logs\/yyz\/ct\/v1\/add-chain\", where \"\/logs\" is the\n\t\/\/ HandlerPrefix and \"yyz\" is the c.Prefix for this particular log. Use the default\n\t\/\/ HandlerPrefix unless the log config overrides it. The custom prefix in\n\t\/\/ the log configuration intended for use in migration scenarios where logs\n\t\/\/ have an existing URL path that differs from the global one. For example\n\t\/\/ if all new logs are served on \"\/logs\/log\/...\" and a previously existing\n\t\/\/ log is at \"\/log\/...\" this is now supported.\n\tlhp := globalHandlerPrefix\n\tif ohPrefix := cfg.OverrideHandlerPrefix; len(ohPrefix) > 0 {\n\t\tglog.Infof(\"Log with prefix: %s is using a custom HandlerPrefix: %s\", cfg.Prefix, ohPrefix)\n\t\tlhp = \"\/\" + strings.Trim(ohPrefix, \"\/\")\n\t}\n\tinst, err := ctfe.SetUpInstance(ctx, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor path, handler := range inst.Handlers {\n\t\tmux.Handle(lhp+path, handler)\n\t}\n\treturn inst, nil\n}\n<commit_msg>If realip is determined to be empty string, return clearer sentinel (#624)<commit_after>\/\/ Copyright 2016 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ The ct_server binary runs the CT personality.\npackage main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/clientv3\"\n\tetcdnaming \"github.com\/coreos\/etcd\/clientv3\/naming\"\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/google\/certificate-transparency-go\/trillian\/ctfe\"\n\t\"github.com\/google\/certificate-transparency-go\/trillian\/ctfe\/configpb\"\n\t\"github.com\/google\/certificate-transparency-go\/trillian\/util\"\n\t\"github.com\/google\/trillian\"\n\t\"github.com\/google\/trillian\/monitoring\/opencensus\"\n\t\"github.com\/google\/trillian\/monitoring\/prometheus\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\/promhttp\"\n\t\"github.com\/rs\/cors\"\n\t\"github.com\/tomasen\/realip\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/balancer\/roundrobin\"\n\t\"google.golang.org\/grpc\/naming\"\n\n\t\/\/ Register PEMKeyFile, PrivateKey and PKCS11Config ProtoHandlers\n\t_ \"github.com\/google\/trillian\/crypto\/keys\/der\/proto\"\n\t_ \"github.com\/google\/trillian\/crypto\/keys\/pem\/proto\"\n\t_ \"github.com\/google\/trillian\/crypto\/keys\/pkcs11\/proto\"\n)\n\n\/\/ Global flags that affect all log instances.\nvar (\n\thttpEndpoint = flag.String(\"http_endpoint\", \"localhost:6962\", \"Endpoint for HTTP (host:port)\")\n\tmetricsEndpoint = flag.String(\"metrics_endpoint\", \"\", \"Endpoint for serving metrics; if left empty, metrics will be visible on --http_endpoint\")\n\trpcBackend = flag.String(\"log_rpc_server\", \"\", \"Backend specification; comma-separated list or etcd service name (if --etcd_servers specified). If unset backends are specified in config (as a LogMultiConfig proto)\")\n\trpcDeadline = flag.Duration(\"rpc_deadline\", time.Second*10, \"Deadline for backend RPC requests\")\n\tgetSTHInterval = flag.Duration(\"get_sth_interval\", time.Second*180, \"Interval between internal get-sth operations (0 to disable)\")\n\tlogConfig = flag.String(\"log_config\", \"\", \"File holding log config in text proto format\")\n\tmaxGetEntries = flag.Int64(\"max_get_entries\", 0, \"Max number of entries we allow in a get-entries request (0=>use default 1000)\")\n\tetcdServers = flag.String(\"etcd_servers\", \"\", \"A comma-separated list of etcd servers\")\n\tetcdHTTPService = flag.String(\"etcd_http_service\", \"trillian-ctfe-http\", \"Service name to announce our HTTP endpoint under\")\n\tetcdMetricsService = flag.String(\"etcd_metrics_service\", \"trillian-ctfe-metrics-http\", \"Service name to announce our HTTP metrics endpoint under\")\n\tmaskInternalErrors = flag.Bool(\"mask_internal_errors\", false, \"Don't return error strings with Internal Server Error HTTP responses\")\n\ttracing = flag.Bool(\"tracing\", false, \"If true opencensus Stackdriver tracing will be enabled. See https:\/\/opencensus.io\/.\")\n\ttracingProjectID = flag.String(\"tracing_project_id\", \"\", \"project ID to pass to stackdriver. Can be empty for GCP, consult docs for other platforms.\")\n\ttracingPercent = flag.Int(\"tracing_percent\", 0, \"Percent of requests to be traced. Zero is a special case to use the DefaultSampler\")\n\tquotaRemote = flag.Bool(\"quota_remote\", true, \"Enable requesting of quota for IP address sending incoming requests\")\n\tquotaIntermediate = flag.Bool(\"quota_intermediate\", true, \"Enable requesting of quota for intermediate certificates in sumbmitted chains\")\n\thandlerPrefix = flag.String(\"handler_prefix\", \"\", \"If set e.g. to '\/logs' will prefix all handlers that don't define a custom prefix\")\n)\n\nconst unknownRemoteUser = \"UNKNOWN_REMOTE\"\n\n\/\/ nolint:staticcheck\nfunc main() {\n\tflag.Parse()\n\tctx := context.Background()\n\n\tif *maxGetEntries > 0 {\n\t\tctfe.MaxGetEntriesAllowed = *maxGetEntries\n\t}\n\n\tvar cfg *configpb.LogMultiConfig\n\tvar err error\n\t\/\/ Get log config from file before we start. This is a different proto\n\t\/\/ type if we're using a multi backend configuration (no rpcBackend set\n\t\/\/ in flags). The single-backend config is converted to a multi config so\n\t\/\/ they can be treated the same.\n\tif len(*rpcBackend) > 0 {\n\t\tvar cfgs []*configpb.LogConfig\n\t\tif cfgs, err = ctfe.LogConfigFromFile(*logConfig); err == nil {\n\t\t\tcfg = ctfe.ToMultiLogConfig(cfgs, *rpcBackend)\n\t\t}\n\t} else {\n\t\tcfg, err = ctfe.MultiLogConfigFromFile(*logConfig)\n\t}\n\n\tif err != nil {\n\t\tglog.Exitf(\"Failed to read config: %v\", err)\n\t}\n\n\tbeMap, err := ctfe.ValidateLogMultiConfig(cfg)\n\tif err != nil {\n\t\tglog.Exitf(\"Invalid config: %v\", err)\n\t}\n\n\tglog.CopyStandardLogTo(\"WARNING\")\n\tglog.Info(\"**** CT HTTP Server Starting ****\")\n\n\tmetricsAt := *metricsEndpoint\n\tif metricsAt == \"\" {\n\t\tmetricsAt = *httpEndpoint\n\t}\n\n\tdialOpts := []grpc.DialOption{grpc.WithInsecure()}\n\tif len(*etcdServers) > 0 {\n\t\t\/\/ Use etcd to provide endpoint resolution.\n\t\tcfg := clientv3.Config{Endpoints: strings.Split(*etcdServers, \",\"), DialTimeout: 5 * time.Second}\n\t\tclient, err := clientv3.New(cfg)\n\t\tif err != nil {\n\t\t\tglog.Exitf(\"Failed to connect to etcd at %v: %v\", *etcdServers, err)\n\t\t}\n\t\tetcdRes := &etcdnaming.GRPCResolver{Client: client}\n\t\tdialOpts = append(dialOpts, grpc.WithBalancer(grpc.RoundRobin(etcdRes)))\n\n\t\t\/\/ Also announce ourselves.\n\t\tupdateHTTP := naming.Update{Op: naming.Add, Addr: *httpEndpoint}\n\t\tupdateMetrics := naming.Update{Op: naming.Add, Addr: metricsAt}\n\t\tglog.Infof(\"Announcing our presence in %v with %+v\", *etcdHTTPService, updateHTTP)\n\t\tetcdRes.Update(ctx, *etcdHTTPService, updateHTTP)\n\t\tglog.Infof(\"Announcing our presence in %v with %+v\", *etcdMetricsService, updateMetrics)\n\t\tetcdRes.Update(ctx, *etcdMetricsService, updateMetrics)\n\n\t\tbyeHTTP := naming.Update{Op: naming.Delete, Addr: *httpEndpoint}\n\t\tbyeMetrics := naming.Update{Op: naming.Delete, Addr: metricsAt}\n\t\tdefer func() {\n\t\t\tglog.Infof(\"Removing our presence in %v with %+v\", *etcdHTTPService, byeHTTP)\n\t\t\tetcdRes.Update(ctx, *etcdHTTPService, byeHTTP)\n\t\t\tglog.Infof(\"Removing our presence in %v with %+v\", *etcdMetricsService, byeMetrics)\n\t\t\tetcdRes.Update(ctx, *etcdMetricsService, byeMetrics)\n\t\t}()\n\t} else if strings.Contains(*rpcBackend, \",\") {\n\t\tglog.Infof(\"Using FixedBackendResolver\")\n\t\t\/\/ Use a fixed endpoint resolution that just returns the addresses configured on the command line.\n\t\tres := util.FixedBackendResolver{}\n\t\tdialOpts = append(dialOpts, grpc.WithBalancer(grpc.RoundRobin(res)))\n\t} else {\n\t\tglog.Infof(\"Using regular DNS resolver\")\n\t\tdialOpts = append(dialOpts, grpc.WithBalancerName(roundrobin.Name))\n\t}\n\n\t\/\/ Dial all our log backends.\n\tclientMap := make(map[string]trillian.TrillianLogClient)\n\tfor _, be := range beMap {\n\t\tglog.Infof(\"Dialling backend: %v\", be)\n\t\tif len(beMap) == 1 {\n\t\t\t\/\/ If there's only one of them we use the blocking option as we can't\n\t\t\t\/\/ serve anything until connected.\n\t\t\tdialOpts = append(dialOpts, grpc.WithBlock())\n\t\t}\n\t\tconn, err := grpc.Dial(be.BackendSpec, dialOpts...)\n\t\tif err != nil {\n\t\t\tglog.Exitf(\"Could not dial RPC server: %v: %v\", be, err)\n\t\t}\n\t\tdefer conn.Close()\n\t\tclientMap[be.Name] = trillian.NewTrillianLogClient(conn)\n\t}\n\n\t\/\/ Allow cross-origin requests to all handlers registered on corsMux.\n\t\/\/ This is safe for CT log handlers because the log is public and\n\t\/\/ unauthenticated so cross-site scripting attacks are not a concern.\n\tcorsMux := http.NewServeMux()\n\tcorsHandler := cors.AllowAll().Handler(corsMux)\n\thttp.Handle(\"\/\", corsHandler)\n\n\t\/\/ Register handlers for all the configured logs using the correct RPC\n\t\/\/ client.\n\tfor _, c := range cfg.LogConfigs.Config {\n\t\tinst, err := setupAndRegister(ctx, clientMap[c.LogBackendName], *rpcDeadline, c, corsMux, *handlerPrefix, *maskInternalErrors)\n\t\tif err != nil {\n\t\t\tglog.Exitf(\"Failed to set up log instance for %+v: %v\", cfg, err)\n\t\t}\n\t\tif *getSTHInterval > 0 {\n\t\t\tgo inst.RunUpdateSTH(ctx, *getSTHInterval)\n\t\t}\n\t}\n\n\t\/\/ Return a 200 on the root, for GCE default health checking :\/\n\tcorsMux.HandleFunc(\"\/\", func(resp http.ResponseWriter, req *http.Request) {\n\t\tif req.URL.Path == \"\/\" {\n\t\t\tresp.WriteHeader(http.StatusOK)\n\t\t} else {\n\t\t\tresp.WriteHeader(http.StatusNotFound)\n\t\t}\n\t})\n\n\t\/\/ Export a healthz target.\n\tcorsMux.HandleFunc(\"\/healthz\", func(resp http.ResponseWriter, req *http.Request) {\n\t\t\/\/ TODO(al): Wire this up to tell the truth.\n\t\tresp.Write([]byte(\"ok\"))\n\t})\n\n\tif metricsAt != *httpEndpoint {\n\t\t\/\/ Run a separate handler for metrics.\n\t\tgo func() {\n\t\t\tmux := http.NewServeMux()\n\t\t\tmux.Handle(\"\/metrics\", promhttp.Handler())\n\t\t\tmetricsServer := http.Server{Addr: metricsAt, Handler: mux}\n\t\t\terr := metricsServer.ListenAndServe()\n\t\t\tglog.Warningf(\"Metrics server exited: %v\", err)\n\t\t}()\n\t} else {\n\t\t\/\/ Handle metrics on the DefaultServeMux.\n\t\thttp.Handle(\"\/metrics\", promhttp.Handler())\n\t}\n\n\t\/\/ If we're enabling tracing we need to use an instrumented http.Handler.\n\tvar handler http.Handler\n\tif *tracing {\n\t\thandler, err = opencensus.EnableHTTPServerTracing(*tracingProjectID, *tracingPercent)\n\t\tif err != nil {\n\t\t\tglog.Exitf(\"Failed to initialize stackdriver \/ opencensus tracing: %v\", err)\n\t\t}\n\t}\n\n\t\/\/ Bring up the HTTP server and serve until we get a signal not to.\n\tsrv := http.Server{Addr: *httpEndpoint, Handler: handler}\n\tshutdownWG := new(sync.WaitGroup)\n\tgo awaitSignal(func() {\n\t\tshutdownWG.Add(1)\n\t\tdefer shutdownWG.Done()\n\t\t\/\/ Allow 60s for any pending requests to finish then terminate any stragglers\n\t\tctx, cancel := context.WithTimeout(context.Background(), time.Second*60)\n\t\tdefer cancel()\n\t\tglog.Info(\"Shutting down HTTP server...\")\n\t\tsrv.Shutdown(ctx)\n\t\tglog.Info(\"HTTP server shutdown\")\n\t})\n\n\terr = srv.ListenAndServe()\n\tif err != http.ErrServerClosed {\n\t\tglog.Warningf(\"Server exited: %v\", err)\n\t}\n\t\/\/ Wait will only block if the function passed to awaitSignal was called,\n\t\/\/ in which case it'll block until the HTTP server has gracefully shutdown\n\tshutdownWG.Wait()\n\tglog.Flush()\n}\n\n\/\/ awaitSignal waits for standard termination signals, then runs the given\n\/\/ function; it should be run as a separate goroutine.\nfunc awaitSignal(doneFn func()) {\n\t\/\/ Arrange notification for the standard set of signals used to terminate a server\n\tsigs := make(chan os.Signal, 1)\n\tsignal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)\n\n\t\/\/ Now block main and wait for a signal\n\tsig := <-sigs\n\tglog.Warningf(\"Signal received: %v\", sig)\n\tglog.Flush()\n\n\tdoneFn()\n}\n\nfunc setupAndRegister(ctx context.Context, client trillian.TrillianLogClient, deadline time.Duration, cfg *configpb.LogConfig, mux *http.ServeMux, globalHandlerPrefix string, maskInternalErrors bool) (*ctfe.Instance, error) {\n\tvCfg, err := ctfe.ValidateLogConfig(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\topts := ctfe.InstanceOptions{\n\t\tValidated: vCfg,\n\t\tClient: client,\n\t\tDeadline: deadline,\n\t\tMetricFactory: prometheus.MetricFactory{},\n\t\tRequestLog: new(ctfe.DefaultRequestLog),\n\t\tMaskInternalErrors: maskInternalErrors,\n\t}\n\tif *quotaRemote {\n\t\tglog.Info(\"Enabling quota for requesting IP\")\n\t\topts.RemoteQuotaUser = func(r *http.Request) string {\n\t\t\tvar remoteUser = realip.FromRequest(r)\n\t\t\tif len(remoteUser) == 0 {\n\t\t\t\treturn unknownRemoteUser\n\t\t\t}\n\t\t\treturn remoteUser\n\t\t}\n\t}\n\tif *quotaIntermediate {\n\t\tglog.Info(\"Enabling quota for intermediate certificates\")\n\t\topts.CertificateQuotaUser = ctfe.QuotaUserForCert\n\t}\n\t\/\/ Full handler pattern will be of the form \"\/logs\/yyz\/ct\/v1\/add-chain\", where \"\/logs\" is the\n\t\/\/ HandlerPrefix and \"yyz\" is the c.Prefix for this particular log. Use the default\n\t\/\/ HandlerPrefix unless the log config overrides it. The custom prefix in\n\t\/\/ the log configuration intended for use in migration scenarios where logs\n\t\/\/ have an existing URL path that differs from the global one. For example\n\t\/\/ if all new logs are served on \"\/logs\/log\/...\" and a previously existing\n\t\/\/ log is at \"\/log\/...\" this is now supported.\n\tlhp := globalHandlerPrefix\n\tif ohPrefix := cfg.OverrideHandlerPrefix; len(ohPrefix) > 0 {\n\t\tglog.Infof(\"Log with prefix: %s is using a custom HandlerPrefix: %s\", cfg.Prefix, ohPrefix)\n\t\tlhp = \"\/\" + strings.Trim(ohPrefix, \"\/\")\n\t}\n\tinst, err := ctfe.SetUpInstance(ctx, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor path, handler := range inst.Handlers {\n\t\tmux.Handle(lhp+path, handler)\n\t}\n\treturn inst, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package compare\n\nimport (\n\t\"testing\"\n)\n\n\nfunc TestCompare(t *testing.T){\n\ttt := []struct{\n\t\tname string\n\t\tactual interface{}\n\t\texpected interface{}\n\t\tisEqual bool\n\t}{\n\t\t{\"a equals a\", \"a\", \"a\", true},\n\t\t{\"a not equals b\", \"a\", \"b\", false},\n\t\t{\"bool equals bool\", true, true, true},\n\t\t{\"bool not equals bool\", true, false, false},\n\t\t{\"1 equals 1\", 1, 1, true},\n\t\t{\"1 not equals 1\", 1, 0, false},\n\t\t{\"slice equals slice\", []string{\"one\", \"two\", \"three\"}, []string{\"one\", \"two\", \"three\"}, true},\n\t\t{\"slice not equals slice\", []string{\"one\", \"two\", \"three\"}, []string{\"one\", \"four\", \"three\"}, false},\n\t\t{\"map equals map\", map[string]int{\"one\": 1, \"two\": 2}, map[string]int{\"one\": 1, \"two\": 2}, true},\n\t\t{\"map not equals map\", map[string]int{\"one\": 1, \"two\": 2}, map[string]int{\"one\": 1, \"two\": 3}, false},\n\t}\n\n\tfor _, tc := range tt {\n\t\tt.Run(tc.name, func(t *testing.T){\n\t\t\tisEqual, _ := IsEqual(tc.actual,tc.expected)\n\t\t\tif isEqual != tc.isEqual{\n\t\t\t\tt.Fatalf(\"%v should be %v got %v\\n\", tc.name, tc.isEqual, isEqual )\n\t\t\t}\n\t\t})\n\t}\n\n}<commit_msg>Added missing Header\/License<commit_after>\/\/ Copyright © 2017 The Kubicorn Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage compare\n\nimport (\n\t\"testing\"\n)\n\n\nfunc TestCompare(t *testing.T){\n\ttt := []struct{\n\t\tname string\n\t\tactual interface{}\n\t\texpected interface{}\n\t\tisEqual bool\n\t}{\n\t\t{\"a equals a\", \"a\", \"a\", true},\n\t\t{\"a not equals b\", \"a\", \"b\", false},\n\t\t{\"bool equals bool\", true, true, true},\n\t\t{\"bool not equals bool\", true, false, false},\n\t\t{\"1 equals 1\", 1, 1, true},\n\t\t{\"1 not equals 1\", 1, 0, false},\n\t\t{\"slice equals slice\", []string{\"one\", \"two\", \"three\"}, []string{\"one\", \"two\", \"three\"}, true},\n\t\t{\"slice not equals slice\", []string{\"one\", \"two\", \"three\"}, []string{\"one\", \"four\", \"three\"}, false},\n\t\t{\"map equals map\", map[string]int{\"one\": 1, \"two\": 2}, map[string]int{\"one\": 1, \"two\": 2}, true},\n\t\t{\"map not equals map\", map[string]int{\"one\": 1, \"two\": 2}, map[string]int{\"one\": 1, \"two\": 3}, false},\n\t}\n\n\tfor _, tc := range tt {\n\t\tt.Run(tc.name, func(t *testing.T){\n\t\t\tisEqual, _ := IsEqual(tc.actual,tc.expected)\n\t\t\tif isEqual != tc.isEqual{\n\t\t\t\tt.Fatalf(\"%v should be %v got %v\\n\", tc.name, tc.isEqual, isEqual )\n\t\t\t}\n\t\t})\n\t}\n\n}<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage eventpb\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"google.golang.org\/protobuf\/proto\"\n\t\"google.golang.org\/protobuf\/types\/known\/timestamppb\"\n\n\t\"go.chromium.org\/luci\/common\/clock\"\n\t\"go.chromium.org\/luci\/gae\/service\/datastore\"\n\t\"go.chromium.org\/luci\/server\/tq\"\n\n\t\"go.chromium.org\/luci\/cv\/internal\/common\"\n)\n\nconst (\n\t\/\/ ManageRunTaskClass is the ID of ManageRunTask Class.\n\tManageRunTaskClass = \"manage-run\"\n\t\/\/ ManageRunLongOpTaskClass is the ID of the ManageRunLongOp Class.\n\tManageRunLongOpTaskClass = \"manage-run-long-op\"\n\t\/\/ taskInterval is target frequency of executions of ManageRunTask.\n\t\/\/\n\t\/\/ See Dispatch() for details.\n\ttaskInterval = time.Second\n)\n\n\/\/ TasksBinding binds Run Manager tasks to a TQ Dispatcher.\n\/\/\n\/\/ This struct exists to separate task creation and handling,\n\/\/ which in turns avoids circular dependency.\ntype TasksBinding struct {\n\tManageRun tq.TaskClassRef\n\tKickManage tq.TaskClassRef\n\tManageRunLongOp tq.TaskClassRef\n\tTQDispatcher *tq.Dispatcher\n}\n\n\/\/ Register registers tasks with the given TQ Dispatcher.\nfunc Register(tqd *tq.Dispatcher) TasksBinding {\n\tt := TasksBinding{\n\t\tManageRun: tqd.RegisterTaskClass(tq.TaskClass{\n\t\t\tID: ManageRunTaskClass,\n\t\t\tPrototype: &ManageRunTask{},\n\t\t\tQueue: \"manage-run\",\n\t\t\tKind: tq.NonTransactional,\n\t\t\tQuietOnError: true,\n\t\t\tQuiet: true,\n\t\t}),\n\t\tKickManage: tqd.RegisterTaskClass(tq.TaskClass{\n\t\t\tID: fmt.Sprintf(\"kick-%s\", ManageRunTaskClass),\n\t\t\tPrototype: &KickManageRunTask{},\n\t\t\tQueue: \"kick-manage-run\",\n\t\t\tKind: tq.Transactional,\n\t\t\tQuietOnError: true,\n\t\t\tQuiet: true,\n\t\t}),\n\t\tManageRunLongOp: tqd.RegisterTaskClass(tq.TaskClass{\n\t\t\tID: ManageRunLongOpTaskClass,\n\t\t\tPrototype: &ManageRunLongOpTask{},\n\t\t\tQueue: \"manage-run-long-op\",\n\t\t\tKind: tq.Transactional,\n\t\t\t\/\/ TODO(tandrii): switch to quiet once stable.\n\t\t\tQuietOnError: false,\n\t\t\tQuiet: false,\n\t\t}),\n\t\tTQDispatcher: tqd,\n\t}\n\tt.KickManage.AttachHandler(func(ctx context.Context, payload proto.Message) error {\n\t\ttask := payload.(*KickManageRunTask)\n\t\tvar eta time.Time\n\t\tif t := task.GetEta(); t != nil {\n\t\t\teta = t.AsTime()\n\t\t}\n\t\terr := t.Dispatch(ctx, task.GetRunId(), eta)\n\t\treturn common.TQifyError(ctx, err)\n\t})\n\treturn t\n}\n\n\/\/ Dispatch ensures invocation of RunManager via ManageRunTask.\n\/\/\n\/\/ RunManager will be invoked at approximately no earlier than both:\n\/\/ * eta time (if given)\n\/\/ * next possible.\n\/\/\n\/\/ To avoid actually dispatching TQ tasks in tests, use runtest.MockDispatch().\nfunc (tr TasksBinding) Dispatch(ctx context.Context, runID string, eta time.Time) error {\n\tmock, mocked := ctx.Value(&mockDispatcherContextKey).(func(string, time.Time))\n\n\tif datastore.CurrentTransaction(ctx) != nil {\n\t\tpayload := &KickManageRunTask{RunId: runID}\n\t\tif !eta.IsZero() {\n\t\t\tpayload.Eta = timestamppb.New(eta)\n\t\t}\n\n\t\tif mocked {\n\t\t\tmock(runID, eta)\n\t\t\treturn nil\n\t\t}\n\t\treturn tr.TQDispatcher.AddTask(ctx, &tq.Task{\n\t\t\tDeduplicationKey: \"\", \/\/ not allowed in a transaction\n\t\t\tPayload: payload,\n\t\t})\n\t}\n\n\t\/\/ If actual local clock is more than `clockDrift` behind, the \"next\" computed\n\t\/\/ ManageRunTask moment might be already executing, meaning task dedup will\n\t\/\/ ensure no new task will be scheduled AND the already executing run\n\t\/\/ might not have read the Event that was just written.\n\t\/\/ Thus, for safety, this should be large, however, will also leads to higher\n\t\/\/ latency of event processing of non-busy RunManager.\n\t\/\/ TODO(tandrii\/yiwzhang): this can be reduced significantly once safety\n\t\/\/ \"ping\" events are originated from Config import cron tasks.\n\tconst clockDrift = 100 * time.Millisecond\n\tnow := clock.Now(ctx).Add(clockDrift) \/\/ Use the worst possible time.\n\tif eta.IsZero() || eta.Before(now) {\n\t\teta = now\n\t}\n\teta = eta.Truncate(taskInterval).Add(taskInterval)\n\n\tif mocked {\n\t\tmock(runID, eta)\n\t\treturn nil\n\t}\n\treturn tr.TQDispatcher.AddTask(ctx, &tq.Task{\n\t\tTitle: runID,\n\t\tDeduplicationKey: fmt.Sprintf(\"%s\\n%d\", runID, eta.UnixNano()),\n\t\tETA: eta,\n\t\tPayload: &ManageRunTask{RunId: runID},\n\t})\n}\n\nvar mockDispatcherContextKey = \"eventpb.mockDispatcher\"\n\n\/\/ InstallMockDispatcher is used in test to run tests emitting RM events without\n\/\/ actually dispatching RM tasks.\n\/\/\n\/\/ See runtest.MockDispatch().\nfunc InstallMockDispatcher(ctx context.Context, f func(runID string, eta time.Time)) context.Context {\n\treturn context.WithValue(ctx, &mockDispatcherContextKey, f)\n}\n<commit_msg>[cv] make TQ handler for long ops quiet.<commit_after>\/\/ Copyright 2020 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage eventpb\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"google.golang.org\/protobuf\/proto\"\n\t\"google.golang.org\/protobuf\/types\/known\/timestamppb\"\n\n\t\"go.chromium.org\/luci\/common\/clock\"\n\t\"go.chromium.org\/luci\/gae\/service\/datastore\"\n\t\"go.chromium.org\/luci\/server\/tq\"\n\n\t\"go.chromium.org\/luci\/cv\/internal\/common\"\n)\n\nconst (\n\t\/\/ ManageRunTaskClass is the ID of ManageRunTask Class.\n\tManageRunTaskClass = \"manage-run\"\n\t\/\/ ManageRunLongOpTaskClass is the ID of the ManageRunLongOp Class.\n\tManageRunLongOpTaskClass = \"manage-run-long-op\"\n\t\/\/ taskInterval is target frequency of executions of ManageRunTask.\n\t\/\/\n\t\/\/ See Dispatch() for details.\n\ttaskInterval = time.Second\n)\n\n\/\/ TasksBinding binds Run Manager tasks to a TQ Dispatcher.\n\/\/\n\/\/ This struct exists to separate task creation and handling,\n\/\/ which in turns avoids circular dependency.\ntype TasksBinding struct {\n\tManageRun tq.TaskClassRef\n\tKickManage tq.TaskClassRef\n\tManageRunLongOp tq.TaskClassRef\n\tTQDispatcher *tq.Dispatcher\n}\n\n\/\/ Register registers tasks with the given TQ Dispatcher.\nfunc Register(tqd *tq.Dispatcher) TasksBinding {\n\tt := TasksBinding{\n\t\tManageRun: tqd.RegisterTaskClass(tq.TaskClass{\n\t\t\tID: ManageRunTaskClass,\n\t\t\tPrototype: &ManageRunTask{},\n\t\t\tQueue: \"manage-run\",\n\t\t\tKind: tq.NonTransactional,\n\t\t\tQuietOnError: true,\n\t\t\tQuiet: true,\n\t\t}),\n\t\tKickManage: tqd.RegisterTaskClass(tq.TaskClass{\n\t\t\tID: fmt.Sprintf(\"kick-%s\", ManageRunTaskClass),\n\t\t\tPrototype: &KickManageRunTask{},\n\t\t\tQueue: \"kick-manage-run\",\n\t\t\tKind: tq.Transactional,\n\t\t\tQuietOnError: true,\n\t\t\tQuiet: true,\n\t\t}),\n\t\tManageRunLongOp: tqd.RegisterTaskClass(tq.TaskClass{\n\t\t\tID: ManageRunLongOpTaskClass,\n\t\t\tPrototype: &ManageRunLongOpTask{},\n\t\t\tQueue: \"manage-run-long-op\",\n\t\t\tKind: tq.Transactional,\n\t\t\tQuietOnError: true,\n\t\t\tQuiet: true,\n\t\t}),\n\t\tTQDispatcher: tqd,\n\t}\n\tt.KickManage.AttachHandler(func(ctx context.Context, payload proto.Message) error {\n\t\ttask := payload.(*KickManageRunTask)\n\t\tvar eta time.Time\n\t\tif t := task.GetEta(); t != nil {\n\t\t\teta = t.AsTime()\n\t\t}\n\t\terr := t.Dispatch(ctx, task.GetRunId(), eta)\n\t\treturn common.TQifyError(ctx, err)\n\t})\n\treturn t\n}\n\n\/\/ Dispatch ensures invocation of RunManager via ManageRunTask.\n\/\/\n\/\/ RunManager will be invoked at approximately no earlier than both:\n\/\/ * eta time (if given)\n\/\/ * next possible.\n\/\/\n\/\/ To avoid actually dispatching TQ tasks in tests, use runtest.MockDispatch().\nfunc (tr TasksBinding) Dispatch(ctx context.Context, runID string, eta time.Time) error {\n\tmock, mocked := ctx.Value(&mockDispatcherContextKey).(func(string, time.Time))\n\n\tif datastore.CurrentTransaction(ctx) != nil {\n\t\tpayload := &KickManageRunTask{RunId: runID}\n\t\tif !eta.IsZero() {\n\t\t\tpayload.Eta = timestamppb.New(eta)\n\t\t}\n\n\t\tif mocked {\n\t\t\tmock(runID, eta)\n\t\t\treturn nil\n\t\t}\n\t\treturn tr.TQDispatcher.AddTask(ctx, &tq.Task{\n\t\t\tDeduplicationKey: \"\", \/\/ not allowed in a transaction\n\t\t\tPayload: payload,\n\t\t})\n\t}\n\n\t\/\/ If actual local clock is more than `clockDrift` behind, the \"next\" computed\n\t\/\/ ManageRunTask moment might be already executing, meaning task dedup will\n\t\/\/ ensure no new task will be scheduled AND the already executing run\n\t\/\/ might not have read the Event that was just written.\n\t\/\/ Thus, for safety, this should be large, however, will also leads to higher\n\t\/\/ latency of event processing of non-busy RunManager.\n\t\/\/ TODO(tandrii\/yiwzhang): this can be reduced significantly once safety\n\t\/\/ \"ping\" events are originated from Config import cron tasks.\n\tconst clockDrift = 100 * time.Millisecond\n\tnow := clock.Now(ctx).Add(clockDrift) \/\/ Use the worst possible time.\n\tif eta.IsZero() || eta.Before(now) {\n\t\teta = now\n\t}\n\teta = eta.Truncate(taskInterval).Add(taskInterval)\n\n\tif mocked {\n\t\tmock(runID, eta)\n\t\treturn nil\n\t}\n\treturn tr.TQDispatcher.AddTask(ctx, &tq.Task{\n\t\tTitle: runID,\n\t\tDeduplicationKey: fmt.Sprintf(\"%s\\n%d\", runID, eta.UnixNano()),\n\t\tETA: eta,\n\t\tPayload: &ManageRunTask{RunId: runID},\n\t})\n}\n\nvar mockDispatcherContextKey = \"eventpb.mockDispatcher\"\n\n\/\/ InstallMockDispatcher is used in test to run tests emitting RM events without\n\/\/ actually dispatching RM tasks.\n\/\/\n\/\/ See runtest.MockDispatch().\nfunc InstallMockDispatcher(ctx context.Context, f func(runID string, eta time.Time)) context.Context {\n\treturn context.WithValue(ctx, &mockDispatcherContextKey, f)\n}\n<|endoftext|>"} {"text":"<commit_before>package e2e\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/oinume\/lekcije\/server\/controller\"\n\t\"github.com\/oinume\/lekcije\/server\/errors\"\n\t\"github.com\/oinume\/lekcije\/server\/model\"\n\t\"github.com\/oinume\/lekcije\/server\/util\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nvar _ = time.UTC\nvar _ = fmt.Print\n\nfunc TestOAuthGoogleLogin(t *testing.T) {\n\tif os.Getenv(\"CIRCLECI\") != \"\" {\n\t\tt.Skipf(\"Skip because it can't render Google login page.\")\n\t}\n\ta := assert.New(t)\n\trequire := require.New(t)\n\tdriver := newWebDriver()\n\terr := driver.Start()\n\ta.Nil(err)\n\tdefer driver.Stop()\n\n\tpage, err := driver.NewPage()\n\trequire.Nil(err)\n\trequire.Nil(page.Navigate(server.URL))\n\t\/\/time.Sleep(10 * time.Second)\n\n\t\/\/ Check trackingId is set\n\tcookies, err := page.GetCookies()\n\ta.Nil(err)\n\ttrackingIDCookie, err := getCookie(cookies, controller.TrackingIDCookieName)\n\tfmt.Printf(\"trackingId = %v\\n\", trackingIDCookie.Value)\n\ta.Nil(err)\n\ta.NotEmpty(trackingIDCookie.Value)\n\n\tsignupButton := page.FindByXPath(\"\/\/a[contains(@class, 'button-signup')]\")\n\tsignupURL, err := signupButton.Attribute(\"href\")\n\trequire.Nil(err)\n\n\trequire.Nil(page.Navigate(signupURL))\n\tbuttonGoogle := page.FindByXPath(\"\/\/button[contains(@class, 'button-google')]\")\n\trequire.Nil(buttonGoogle.Click())\n\t\/\/time.Sleep(10 * time.Second)\n\n\tgoogleAccount := os.Getenv(\"E2E_GOOGLE_ACCOUNT\")\n\terr = page.Find(\"#Email\").Fill(googleAccount)\n\trequire.Nil(err)\n\tpage.Find(\"#gaia_loginform\").Submit()\n\trequire.Nil(err)\n\n\ttime.Sleep(time.Second * 1)\n\tpage.Find(\"#Passwd\").Fill(os.Getenv(\"E2E_GOOGLE_PASSWORD\"))\n\trequire.Nil(err)\n\tpage.Find(\"#gaia_loginform\").Submit()\n\trequire.Nil(err)\n\n\ttime.Sleep(time.Second * 3)\n\terr = page.Find(\"#submit_approve_access\").Click()\n\trequire.Nil(err)\n\t\/\/time.Sleep(time.Second * 10)\n\t\/\/ TODO: Check HTML content\n\n\tcookies, err = page.GetCookies()\n\trequire.Nil(err)\n\tapiToken := getAPIToken(cookies)\n\ta.NotEmpty(apiToken)\n\n\tuser, err := model.NewUserService(db).FindByUserAPIToken(apiToken)\n\trequire.Nil(err)\n\ta.Equal(googleAccount, user.Email)\n}\n\nfunc TestOAuthGoogleLogout(t *testing.T) {\n\tif os.Getenv(\"CIRCLECI\") != \"\" {\n\t\tt.Skipf(\"Skip because PhantomJS can't SetCookie.\")\n\t}\n\n\ta := assert.New(t)\n\n\t_, apiToken, err := createUserAndLogin(\"oinume\", randomEmail(\"oinume\"), util.RandomString(16))\n\ta.Nil(err)\n\n\tdriver := newWebDriver()\n\ta.Nil(driver.Start())\n\tdefer driver.Stop()\n\n\tpage, err := driver.NewPage()\n\ta.Nil(err)\n\ta.Nil(page.Navigate(server.URL))\n\tu, err := url.Parse(server.URL)\n\ta.Nil(err)\n\tcookie := &http.Cookie{\n\t\tName: controller.APITokenCookieName,\n\t\tDomain: u.Host,\n\t\tValue: apiToken,\n\t\tPath: \"\/\",\n\t\tExpires: time.Now().Add(model.UserAPITokenExpiration),\n\t\tHttpOnly: false,\n\t}\n\ta.Nil(page.SetCookie(cookie))\n\ta.Nil(page.Navigate(server.URL + \"\/me\"))\n\n\ta.Nil(page.Navigate(server.URL + \"\/me\/logout\"))\n\tuserAPITokenService := model.NewUserAPITokenService(db)\n\t_, err = userAPITokenService.FindByPK(apiToken)\n\ta.IsType(&errors.NotFound{}, err)\n}\n\nfunc getAPIToken(cookies []*http.Cookie) string {\n\tfor _, cookie := range cookies {\n\t\tif cookie.Name == \"apiToken\" {\n\t\t\treturn cookie.Value\n\t\t}\n\t}\n\treturn \"\"\n}\n<commit_msg>Fix TestOAuthGoogleLogin<commit_after>package e2e\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/oinume\/lekcije\/server\/controller\"\n\t\"github.com\/oinume\/lekcije\/server\/errors\"\n\t\"github.com\/oinume\/lekcije\/server\/model\"\n\t\"github.com\/oinume\/lekcije\/server\/util\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nvar _ = time.UTC\nvar _ = fmt.Print\n\nfunc TestOAuthGoogleLogin(t *testing.T) {\n\tif os.Getenv(\"CIRCLECI\") != \"\" {\n\t\tt.Skipf(\"Skip because it can't render Google login page.\")\n\t}\n\ta := assert.New(t)\n\trequire := require.New(t)\n\tdriver := newWebDriver()\n\terr := driver.Start()\n\ta.Nil(err)\n\tdefer driver.Stop()\n\n\tpage, err := driver.NewPage()\n\trequire.Nil(err)\n\trequire.Nil(page.Navigate(server.URL))\n\t\/\/time.Sleep(15 * time.Second)\n\n\t\/\/ Check trackingId is set\n\tcookies, err := page.GetCookies()\n\ta.Nil(err)\n\ttrackingIDCookie, err := getCookie(cookies, controller.TrackingIDCookieName)\n\tfmt.Printf(\"trackingId = %v\\n\", trackingIDCookie.Value)\n\ta.Nil(err)\n\ta.NotEmpty(trackingIDCookie.Value)\n\n\tsignupButton := page.FindByXPath(\"\/\/a[contains(@class, 'button-signup')]\")\n\tsignupURL, err := signupButton.Attribute(\"href\")\n\trequire.Nil(err)\n\n\trequire.Nil(page.Navigate(signupURL))\n\tbuttonGoogle := page.FindByXPath(\"\/\/button[contains(@class, 'button-google')]\")\n\trequire.Nil(buttonGoogle.Click())\n\t\/\/time.Sleep(15 * time.Second)\n\n\ttime.Sleep(time.Second * 1)\n\tgoogleAccount := os.Getenv(\"E2E_GOOGLE_ACCOUNT\")\n\terr = page.FindByXPath(\"\/\/input[@name='identifier']\").Fill(googleAccount)\n\trequire.Nil(err)\n\terr = page.FindByXPath(\"\/\/div[@id='identifierNext']\/content\/span\").Click()\n\trequire.Nil(err)\n\n\ttime.Sleep(time.Second * 1)\n\terr = page.FindByXPath(\"\/\/input[@name='password']\").Fill(os.Getenv(\"E2E_GOOGLE_PASSWORD\"))\n\trequire.Nil(err)\n\terr = page.FindByXPath(\"\/\/div[@id='passwordNext']\/content\/span\").Click()\n\trequire.Nil(err)\n\n\ttime.Sleep(time.Second * 3)\n\n\tcookies, err = page.GetCookies()\n\trequire.Nil(err)\n\tapiToken := getAPIToken(cookies)\n\ta.NotEmpty(apiToken)\n\n\tuser, err := model.NewUserService(db).FindByUserAPIToken(apiToken)\n\trequire.Nil(err)\n\ta.Equal(googleAccount, user.Email)\n\n\t\/\/ TODO: Check HTML content\n}\n\nfunc TestOAuthGoogleLogout(t *testing.T) {\n\tif os.Getenv(\"CIRCLECI\") != \"\" {\n\t\tt.Skipf(\"Skip because PhantomJS can't SetCookie.\")\n\t}\n\n\ta := assert.New(t)\n\n\t_, apiToken, err := createUserAndLogin(\"oinume\", randomEmail(\"oinume\"), util.RandomString(16))\n\ta.Nil(err)\n\n\tdriver := newWebDriver()\n\ta.Nil(driver.Start())\n\tdefer driver.Stop()\n\n\tpage, err := driver.NewPage()\n\ta.Nil(err)\n\ta.Nil(page.Navigate(server.URL))\n\tu, err := url.Parse(server.URL)\n\ta.Nil(err)\n\tcookie := &http.Cookie{\n\t\tName: controller.APITokenCookieName,\n\t\tDomain: u.Host,\n\t\tValue: apiToken,\n\t\tPath: \"\/\",\n\t\tExpires: time.Now().Add(model.UserAPITokenExpiration),\n\t\tHttpOnly: false,\n\t}\n\ta.Nil(page.SetCookie(cookie))\n\ta.Nil(page.Navigate(server.URL + \"\/me\"))\n\n\ta.Nil(page.Navigate(server.URL + \"\/me\/logout\"))\n\tuserAPITokenService := model.NewUserAPITokenService(db)\n\t_, err = userAPITokenService.FindByPK(apiToken)\n\ta.IsType(&errors.NotFound{}, err)\n}\n\nfunc getAPIToken(cookies []*http.Cookie) string {\n\tfor _, cookie := range cookies {\n\t\tif cookie.Name == \"apiToken\" {\n\t\t\treturn cookie.Value\n\t\t}\n\t}\n\treturn \"\"\n}\n<|endoftext|>"} {"text":"<commit_before>package snowdrift\n\nimport (\n\t\"crypto\/sha512\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"net\/http\"\n\t\"net\/url\"\n\n\t\"bitbucket.org\/ww\/goautoneg\"\n\t\"github.com\/codegangsta\/martini\"\n\t\"github.com\/codegangsta\/martini-contrib\/binding\"\n\t\"github.com\/codegangsta\/martini-contrib\/render\"\n\t\"github.com\/speps\/go-hashids\"\n)\n\ntype Config struct {\n\tBackend Backend\n\tHashSalt string\n\tURLPrefix string\n\tRootRedirect string\n\tReportErr func(err error, req *http.Request)\n}\n\nfunc New(c *Config) *martini.Martini {\n\tr := martini.NewRouter()\n\tm := martini.New()\n\tm.Action(r.Handle)\n\n\tctx := &context{\n\t\tBackend: c.Backend,\n\t\tShortHash: hashids.New(),\n\t\tLongHash: hashids.New(),\n\t\tURLPrefix: c.URLPrefix,\n\t\tReportErr: c.ReportErr,\n\t}\n\tif c.HashSalt == \"\" {\n\t\tc.HashSalt = \"salt\"\n\t}\n\tctx.ShortHash.Salt = c.HashSalt\n\tctx.LongHash.Salt = c.HashSalt\n\tctx.LongHash.MinLength = 12\n\tm.Map(ctx)\n\tm.Use(render.Renderer())\n\n\tr.Get(\"\/\", func(r *http.Request, w http.ResponseWriter) {\n\t\thttp.Redirect(w, r, c.RootRedirect, http.StatusFound)\n\t})\n\tr.Post(\"\/\", binding.Bind(link{}), createLink)\n\tr.Get(\"\/:code\", getLink)\n\n\treturn m\n}\n\nvar ErrNotFound = errors.New(\"snowdrift: not found\")\nvar ErrURLExists = errors.New(\"snowdrift: url already exists\")\nvar ErrCodeExists = errors.New(\"snowdrift: code already exists\")\n\ntype Backend interface {\n\tAdd(url, digest, code string) error\n\tGetCode(digest string) (string, error)\n\tGetURL(code string) (string, error)\n\tNextID() (int, error)\n}\n\ntype link struct {\n\tLongURL string `json:\"long_url\" riak:\"long_url\"`\n\tShortURL string `json:\"short_url\" riak:\"-\"`\n\tObscure *bool `json:\"obscure,omitempty\" riak:\"-\"`\n}\n\ntype context struct {\n\tBackend\n\tShortHash *hashids.HashID\n\tLongHash *hashids.HashID\n\tURLPrefix string\n\tReportErr func(err error, req *http.Request)\n}\n\nfunc urlDigest(url string) string {\n\tdigest := sha512.Sum512([]byte(url))\n\treturn hex.EncodeToString(digest[:32])\n}\n\nfunc createLink(c *context, link link, r render.Render, req *http.Request) {\n\tu, err := url.Parse(link.LongURL)\n\tif err != nil || u.Scheme != \"http\" && u.Scheme != \"https\" {\n\t\tr.Error(400)\n\t\treturn\n\t}\n\n\tdigest := urlDigest(link.LongURL)\n\n\tcode, err := c.GetCode(digest)\n\tif err == nil {\n\t\tlink.ShortURL = c.URLPrefix + code\n\t\tlink.Obscure = nil\n\t\tr.JSON(200, link)\n\t\treturn\n\t}\n\n\tid, err := c.NextID()\n\tif err != nil {\n\t\tr.Error(500)\n\t\tif c.ReportErr != nil {\n\t\t\tc.ReportErr(err, req)\n\t\t}\n\t\treturn\n\t}\n\tif link.Obscure != nil && *link.Obscure {\n\t\tcode = c.LongHash.Encrypt([]int{id})\n\t} else {\n\t\tcode = c.ShortHash.Encrypt([]int{id})\n\t}\n\n\tif err := c.Add(link.LongURL, digest, code); err != nil {\n\t\tr.Error(500)\n\t\tif c.ReportErr != nil {\n\t\t\tc.ReportErr(err, req)\n\t\t}\n\t\treturn\n\t}\n\tlink.Obscure = nil\n\tlink.ShortURL = c.URLPrefix + code\n\tr.JSON(200, link)\n}\n\nfunc getLink(c *context, params martini.Params, r render.Render, req *http.Request, w http.ResponseWriter) {\n\turl, err := c.GetURL(params[\"code\"])\n\tif err == ErrNotFound {\n\t\tr.Error(404)\n\t\treturn\n\t}\n\tif err != nil {\n\t\tr.Error(500)\n\t\tif c.ReportErr != nil {\n\t\t\tc.ReportErr(err, req)\n\t\t}\n\t\treturn\n\t}\n\tcontentType := goautoneg.Negotiate(req.Header.Get(\"Accept\"), []string{\"text\/html\", \"application\/json\"})\n\tif contentType == \"application\/json\" {\n\t\tr.JSON(200, link{LongURL: url, ShortURL: c.URLPrefix + params[\"code\"]})\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Location\", url)\n\tw.WriteHeader(http.StatusMovedPermanently)\n}\n<commit_msg>Use http helpers for 404 and redirect<commit_after>package snowdrift\n\nimport (\n\t\"crypto\/sha512\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"net\/http\"\n\t\"net\/url\"\n\n\t\"bitbucket.org\/ww\/goautoneg\"\n\t\"github.com\/codegangsta\/martini\"\n\t\"github.com\/codegangsta\/martini-contrib\/binding\"\n\t\"github.com\/codegangsta\/martini-contrib\/render\"\n\t\"github.com\/speps\/go-hashids\"\n)\n\ntype Config struct {\n\tBackend Backend\n\tHashSalt string\n\tURLPrefix string\n\tRootRedirect string\n\tReportErr func(err error, req *http.Request)\n}\n\nfunc New(c *Config) *martini.Martini {\n\tr := martini.NewRouter()\n\tm := martini.New()\n\tm.Action(r.Handle)\n\n\tctx := &context{\n\t\tBackend: c.Backend,\n\t\tShortHash: hashids.New(),\n\t\tLongHash: hashids.New(),\n\t\tURLPrefix: c.URLPrefix,\n\t\tReportErr: c.ReportErr,\n\t}\n\tif c.HashSalt == \"\" {\n\t\tc.HashSalt = \"salt\"\n\t}\n\tctx.ShortHash.Salt = c.HashSalt\n\tctx.LongHash.Salt = c.HashSalt\n\tctx.LongHash.MinLength = 12\n\tm.Map(ctx)\n\tm.Use(render.Renderer())\n\n\tr.Get(\"\/\", func(r *http.Request, w http.ResponseWriter) {\n\t\thttp.Redirect(w, r, c.RootRedirect, http.StatusFound)\n\t})\n\tr.Post(\"\/\", binding.Bind(link{}), createLink)\n\tr.Get(\"\/:code\", getLink)\n\n\treturn m\n}\n\nvar ErrNotFound = errors.New(\"snowdrift: not found\")\nvar ErrURLExists = errors.New(\"snowdrift: url already exists\")\nvar ErrCodeExists = errors.New(\"snowdrift: code already exists\")\n\ntype Backend interface {\n\tAdd(url, digest, code string) error\n\tGetCode(digest string) (string, error)\n\tGetURL(code string) (string, error)\n\tNextID() (int, error)\n}\n\ntype link struct {\n\tLongURL string `json:\"long_url\" riak:\"long_url\"`\n\tShortURL string `json:\"short_url\" riak:\"-\"`\n\tObscure *bool `json:\"obscure,omitempty\" riak:\"-\"`\n}\n\ntype context struct {\n\tBackend\n\tShortHash *hashids.HashID\n\tLongHash *hashids.HashID\n\tURLPrefix string\n\tReportErr func(err error, req *http.Request)\n}\n\nfunc urlDigest(url string) string {\n\tdigest := sha512.Sum512([]byte(url))\n\treturn hex.EncodeToString(digest[:32])\n}\n\nfunc createLink(c *context, link link, r render.Render, req *http.Request) {\n\tu, err := url.Parse(link.LongURL)\n\tif err != nil || u.Scheme != \"http\" && u.Scheme != \"https\" {\n\t\tr.Error(400)\n\t\treturn\n\t}\n\n\tdigest := urlDigest(link.LongURL)\n\n\tcode, err := c.GetCode(digest)\n\tif err == nil {\n\t\tlink.ShortURL = c.URLPrefix + code\n\t\tlink.Obscure = nil\n\t\tr.JSON(200, link)\n\t\treturn\n\t}\n\n\tid, err := c.NextID()\n\tif err != nil {\n\t\tr.Error(500)\n\t\tif c.ReportErr != nil {\n\t\t\tc.ReportErr(err, req)\n\t\t}\n\t\treturn\n\t}\n\tif link.Obscure != nil && *link.Obscure {\n\t\tcode = c.LongHash.Encrypt([]int{id})\n\t} else {\n\t\tcode = c.ShortHash.Encrypt([]int{id})\n\t}\n\n\tif err := c.Add(link.LongURL, digest, code); err != nil {\n\t\tr.Error(500)\n\t\tif c.ReportErr != nil {\n\t\t\tc.ReportErr(err, req)\n\t\t}\n\t\treturn\n\t}\n\tlink.Obscure = nil\n\tlink.ShortURL = c.URLPrefix + code\n\tr.JSON(200, link)\n}\n\nfunc getLink(c *context, params martini.Params, r render.Render, req *http.Request, w http.ResponseWriter) {\n\turl, err := c.GetURL(params[\"code\"])\n\tif err == ErrNotFound {\n\t\thttp.NotFound(w, req)\n\t\treturn\n\t}\n\tif err != nil {\n\t\tr.Error(500)\n\t\tif c.ReportErr != nil {\n\t\t\tc.ReportErr(err, req)\n\t\t}\n\t\treturn\n\t}\n\tcontentType := goautoneg.Negotiate(req.Header.Get(\"Accept\"), []string{\"text\/html\", \"application\/json\"})\n\tif contentType == \"application\/json\" {\n\t\tr.JSON(200, link{LongURL: url, ShortURL: c.URLPrefix + params[\"code\"]})\n\t\treturn\n\t}\n\n\thttp.Redirect(w, req, url, http.StatusMovedPermanently)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/go-martini\/martini\"\n\t\"github.com\/vanng822\/uploader\"\n\t\"net\/http\"\n\t\"os\"\n)\n\nconst (\n\tSTORAGE_TYPE_FILE = \"file\"\n)\n\ntype StorageConfig struct {\n\tType string\n\tConfiguration string\n}\n\ntype EndpointConfig struct {\n\tEndpoint string\n\tFileField string\n\tStorage *StorageConfig\n}\n\ntype Config struct {\n\tHost string\n\tPort int\n\tEndpoints []*EndpointConfig\n}\n\nfunc LoadConfig(filename string) *Config {\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer file.Close()\n\tdecoder := json.NewDecoder(file)\n\tconf := Config{}\n\terr = decoder.Decode(&conf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &conf\n}\n\nfunc main() {\n\n\tvar (\n\t\tconfig string\n\t\thost string\n\t\tport int\n\t)\n\n\tflag.StringVar(&host, \"h\", \"127.0.0.1\", \"Host to listen on\")\n\tflag.IntVar(&port, \"p\", 8080, \"Port number to listen on\")\n\tflag.StringVar(&config, \"c\", \".\/config\/app.json\", \"Path to configurations\")\n\n\tconf := LoadConfig(config)\n\n\tm := martini.Classic()\n\n\tfor _, endpoint := range conf.Endpoints {\n\n\t\tgo func(endpoint *EndpointConfig) {\n\t\t\tvar storage uploader.ImageStorage\n\t\t\tswitch endpoint.Storage.Type {\n\t\t\tcase STORAGE_TYPE_FILE:\n\t\t\t\tstorage = uploader.NewImageStorageFile(endpoint.Storage.Configuration)\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"Unsupported storage type %s\", endpoint.Storage.Type))\n\t\t\t}\n\t\t\tu := uploader.NewUploader(storage)\n\t\t\thandler := uploader.NewHandler(u)\n\n\t\t\tm.Group(endpoint.Endpoint, func(r martini.Router) {\n\t\t\t\tr.Get(\"\/:filename\", func(res http.ResponseWriter, req *http.Request, params martini.Params) {\n\t\t\t\t\thandler.HandleGet(res, params[\"filename\"])\n\t\t\t\t})\n\t\t\t\tr.Post(\"\/\", func(res http.ResponseWriter, req *http.Request, params martini.Params) {\n\t\t\t\t\tfile, _, err := req.FormFile(endpoint.FileField)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tres.WriteHeader(http.StatusBadRequest)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\thandler.HandlePost(res, file)\n\t\t\t\t})\n\t\t\t\tr.Put(\"\/:filename\", func(res http.ResponseWriter, req *http.Request, params martini.Params) {\n\t\t\t\t\tfile, _, err := req.FormFile(endpoint.FileField)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tres.WriteHeader(http.StatusBadRequest)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\thandler.HandlePut(res, file, params[\"filename\"])\n\t\t\t\t})\n\t\t\t\tr.Delete(\"\/:filename\", func(res http.ResponseWriter, req *http.Request, params martini.Params) {\n\t\t\t\t\thandler.HandleDelete(res, params[\"filename\"])\n\t\t\t\t})\n\t\t\t})\n\t\t}(endpoint)\n\t}\n\n\thttp.ListenAndServe(fmt.Sprintf(\"%s:%d\", conf.Host, conf.Port), m)\n}\n<commit_msg>Use storage factory<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/go-martini\/martini\"\n\t\"github.com\/vanng822\/uploader\"\n\t\"net\/http\"\n\t\"os\"\n)\n\nconst (\n\tSTORAGE_TYPE_FILE = \"file\"\n)\n\ntype StorageConfig struct {\n\tType string\n\tConfiguration string\n}\n\ntype EndpointConfig struct {\n\tEndpoint string\n\tFileField string\n\tStorage *StorageConfig\n}\n\ntype Config struct {\n\tHost string\n\tPort int\n\tEndpoints []*EndpointConfig\n}\n\nfunc LoadConfig(filename string) *Config {\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer file.Close()\n\tdecoder := json.NewDecoder(file)\n\tconf := Config{}\n\terr = decoder.Decode(&conf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &conf\n}\n\nfunc main() {\n\n\tvar (\n\t\tconfig string\n\t\thost string\n\t\tport int\n\t)\n\n\tflag.StringVar(&host, \"h\", \"\", \"Host to listen on\")\n\tflag.IntVar(&port, \"p\", 0, \"Port number to listen on\")\n\tflag.StringVar(&config, \"c\", \".\/config\/app.json\", \"Path to configurations\")\n\tflag.Parse()\n\t\n\tconf := LoadConfig(config)\n\t\n\tif host != \"\" {\n\t\tconf.Host = host\n\t}\n\t\n\tif port != 0 {\n\t\tconf.Port = port\n\t}\n\n\tm := martini.Classic()\n\tfor _, endpoint := range conf.Endpoints {\n\n\t\tgo func(endpoint *EndpointConfig) {\n\t\t\tu := uploader.NewUploader(uploader.GetStorage(endpoint.Storage.Type, endpoint.Storage.Configuration))\n\t\t\thandler := uploader.NewHandler(u)\n\n\t\t\tm.Group(endpoint.Endpoint, func(r martini.Router) {\n\t\t\t\tr.Get(\"\/:filename\", func(res http.ResponseWriter, req *http.Request, params martini.Params) {\n\t\t\t\t\thandler.HandleGet(res, params[\"filename\"])\n\t\t\t\t})\n\t\t\t\tr.Post(\"\/\", func(res http.ResponseWriter, req *http.Request, params martini.Params) {\n\t\t\t\t\tfile, _, err := req.FormFile(endpoint.FileField)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tres.WriteHeader(http.StatusBadRequest)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\thandler.HandlePost(res, file)\n\t\t\t\t})\n\t\t\t\tr.Put(\"\/:filename\", func(res http.ResponseWriter, req *http.Request, params martini.Params) {\n\t\t\t\t\tfile, _, err := req.FormFile(endpoint.FileField)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tres.WriteHeader(http.StatusBadRequest)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\thandler.HandlePut(res, file, params[\"filename\"])\n\t\t\t\t})\n\t\t\t\tr.Delete(\"\/:filename\", func(res http.ResponseWriter, req *http.Request, params martini.Params) {\n\t\t\t\t\thandler.HandleDelete(res, params[\"filename\"])\n\t\t\t\t})\n\t\t\t})\n\t\t}(endpoint)\n\t}\n\n\thttp.ListenAndServe(fmt.Sprintf(\"%s:%d\", conf.Host, conf.Port), m)\n}\n<|endoftext|>"} {"text":"<commit_before>package lazycache\n\nimport (\n\t\"fmt\"\n\tkitlog \"github.com\/go-kit\/kit\/log\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"sync\"\n)\n\ntype LocalImageStore struct {\n\tLocalRoot string\n\tUrlRoot string\n\tlogger kitlog.Logger\n\tcache map[string]int\n\n\tmutex \t\tsync.Mutex\n\n\tStats struct {\n\t\tcacheRequests int\n\t\tcacheMisses int\n\t}\n}\n\nfunc (store *LocalImageStore) Has(key string) bool {\n\tfilename := store.LocalRoot + key\n\n\tstore.mutex.Lock()\n\tdefer store.mutex.Unlock()\n\n\t_, has := store.cache[filename]\n\tif has {\n\t\tstore.logger.Log(\"level\", \"debug\", \"msg\", fmt.Sprintf(\"Image exists in cache: %s\", filename))\n\t\tstore.cache[filename]++\n\t\treturn true\n\t}\n\n\tstore.logger.Log(\"level\", \"debug\", \"msg\", fmt.Sprintf(\"Checking local image store for \\\"%s\\\"\", filename))\n\t_, err := os.Stat(filename)\n\tif err == nil {\n\t\tstore.cache[filename] = 1\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (store *LocalImageStore) Url(key string) (string, bool) {\n\n\tstore.Stats.cacheRequests++\n\n\tif store.Has(key) {\n\t\treturn store.UrlRoot + key, true\n\t} else {\n\t\tstore.Stats.cacheMisses++\n\t\treturn \"\", false\n\t}\n}\n\nfunc RecursiveMkdir(dir string) {\n\t_, err := os.Stat(dir)\n\tif err != nil {\n\t\tRecursiveMkdir(path.Dir(dir))\n\n\t\tos.Mkdir(dir, 0755)\n\t}\n}\n\nfunc (store *LocalImageStore) Store(key string, data io.Reader) {\n\tfilename := store.LocalRoot + key\n\tRecursiveMkdir(path.Dir(filename))\n\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\tstore.logger.Log(\"msg\", err.Error(), \"type\", \"error\")\n\t}\n\t\n\tstore.mutex.Lock()\n\tdefer store.mutex.Unlock()\n\n\tstore.cache[filename] = 1\n\tio.Copy(f, data)\n}\n\nfunc (store LocalImageStore) Retrieve(key string) (io.Reader, error) {\n\n\tf, err := os.Open(store.LocalRoot + key)\n\n\treturn f, err\n}\n\nfunc (store LocalImageStore) Statistics() interface{} {\n\treturn struct {\n\t\tType string\n\t\tCacheRequests int `json: \"cache_requests\"`\n\t\tCacheMisses int `json: \"cache_misses\"`\n\t}{\n\t\tType: \"local_storage\",\n\t\tCacheRequests: store.Stats.cacheRequests,\n\t\tCacheMisses: store.Stats.cacheMisses,\n\t}\n}\n\nfunc (store LocalImageStore) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlocalPath := path.Join(store.LocalRoot, r.URL.Path)\n\n\tif _, err := os.Stat(localPath); err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"Could not find \\\"%s\\\"\", localPath), 404)\n\t} else {\n\t\thttp.ServeFile(w, r, localPath)\n\t}\n}\n\nfunc CreateLocalStore(localRoot string, addr string) *LocalImageStore {\n\n\t\/\/ port := 7080\n\t\/\/ addr := fmt.Sprintf(\"%s:%d\", host, port)\n\n\tstore := &LocalImageStore{\n\t\tLocalRoot: localRoot,\n\t\tUrlRoot: addr,\n\t\tlogger: kitlog.With(DefaultLogger, \"module\", \"LocalImageStore\"),\n\t\tcache: make(map[string]int),\n\t}\n\n\tDefaultLogger.Log(\"msg\",\n\t\tfmt.Sprintf(\"Creating local image store at \\\"%s\\\", exposed at \\\"%s\\\"\\n\", store.LocalRoot, store.UrlRoot))\n\n\ts := &http.Server{\n\t\tAddr: addr,\n\t\tHandler: store,\n\t}\n\n\tgo s.ListenAndServe()\n\n\treturn store\n}\n<commit_msg>gofmt on local_image_store.go<commit_after>package lazycache\n\nimport (\n\t\"fmt\"\n\tkitlog \"github.com\/go-kit\/kit\/log\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"sync\"\n)\n\ntype LocalImageStore struct {\n\tLocalRoot string\n\tUrlRoot string\n\tlogger kitlog.Logger\n\tcache map[string]int\n\n\tmutex sync.Mutex\n\n\tStats struct {\n\t\tcacheRequests int\n\t\tcacheMisses int\n\t}\n}\n\nfunc (store *LocalImageStore) Has(key string) bool {\n\tfilename := store.LocalRoot + key\n\n\tstore.mutex.Lock()\n\tdefer store.mutex.Unlock()\n\n\t_, has := store.cache[filename]\n\tif has {\n\t\tstore.logger.Log(\"level\", \"debug\", \"msg\", fmt.Sprintf(\"Image exists in cache: %s\", filename))\n\t\tstore.cache[filename]++\n\t\treturn true\n\t}\n\n\tstore.logger.Log(\"level\", \"debug\", \"msg\", fmt.Sprintf(\"Checking local image store for \\\"%s\\\"\", filename))\n\t_, err := os.Stat(filename)\n\tif err == nil {\n\t\tstore.cache[filename] = 1\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (store *LocalImageStore) Url(key string) (string, bool) {\n\n\tstore.Stats.cacheRequests++\n\n\tif store.Has(key) {\n\t\treturn store.UrlRoot + key, true\n\t} else {\n\t\tstore.Stats.cacheMisses++\n\t\treturn \"\", false\n\t}\n}\n\nfunc RecursiveMkdir(dir string) {\n\t_, err := os.Stat(dir)\n\tif err != nil {\n\t\tRecursiveMkdir(path.Dir(dir))\n\n\t\tos.Mkdir(dir, 0755)\n\t}\n}\n\nfunc (store *LocalImageStore) Store(key string, data io.Reader) {\n\tfilename := store.LocalRoot + key\n\tRecursiveMkdir(path.Dir(filename))\n\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\tstore.logger.Log(\"msg\", err.Error(), \"type\", \"error\")\n\t}\n\n\tstore.mutex.Lock()\n\tdefer store.mutex.Unlock()\n\n\tstore.cache[filename] = 1\n\tio.Copy(f, data)\n}\n\nfunc (store LocalImageStore) Retrieve(key string) (io.Reader, error) {\n\n\tf, err := os.Open(store.LocalRoot + key)\n\n\treturn f, err\n}\n\nfunc (store LocalImageStore) Statistics() interface{} {\n\treturn struct {\n\t\tType string\n\t\tCacheRequests int `json: \"cache_requests\"`\n\t\tCacheMisses int `json: \"cache_misses\"`\n\t}{\n\t\tType: \"local_storage\",\n\t\tCacheRequests: store.Stats.cacheRequests,\n\t\tCacheMisses: store.Stats.cacheMisses,\n\t}\n}\n\nfunc (store LocalImageStore) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlocalPath := path.Join(store.LocalRoot, r.URL.Path)\n\n\tif _, err := os.Stat(localPath); err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"Could not find \\\"%s\\\"\", localPath), 404)\n\t} else {\n\t\thttp.ServeFile(w, r, localPath)\n\t}\n}\n\nfunc CreateLocalStore(localRoot string, addr string) *LocalImageStore {\n\n\t\/\/ port := 7080\n\t\/\/ addr := fmt.Sprintf(\"%s:%d\", host, port)\n\n\tstore := &LocalImageStore{\n\t\tLocalRoot: localRoot,\n\t\tUrlRoot: addr,\n\t\tlogger: kitlog.With(DefaultLogger, \"module\", \"LocalImageStore\"),\n\t\tcache: make(map[string]int),\n\t}\n\n\tDefaultLogger.Log(\"msg\",\n\t\tfmt.Sprintf(\"Creating local image store at \\\"%s\\\", exposed at \\\"%s\\\"\\n\", store.LocalRoot, store.UrlRoot))\n\n\ts := &http.Server{\n\t\tAddr: addr,\n\t\tHandler: store,\n\t}\n\n\tgo s.ListenAndServe()\n\n\treturn store\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 M-Lab\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build appengine\n\n\/\/ The data package provides the mlab-ns2 datastore sturcture.\npackage data\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\n\/\/TODO: Data interface and Get,Put,Cache,Rm functions?\n\/\/TODO: only index the columns that are needed\n\/\/TODO: add json tags\n\ntype SliverTool struct {\n\tToolID string `datastore:\"tool_id\"`\n\tSliceID string `datastore:\"slice_id\"`\n\tSiteID string `datastore:\"site_id\"`\n\tServerID string `datastore:\"server_id\"`\n\tServerPort string `datastore:\"server_port\"`\n\tHTTPPort string `datastore:\"http_port\"` \/\/ For web-based tools, this is used to build the URL the client is redirected to: http:\/\/fqdn[ipv4|ipv6]:http_port\n\tFQDN string `datastore:\"fqdn\"` \/\/ Unannotated fqdn. v4 and v6 versions can be built if necessary.\n\tSliverIPv4 string `datastore:\"sliver_ipv4\"` \/\/ IP addresses. Can be 'off'\n\tSliverIPv6 string `datastore:\"sliver_ipv6\"` \/\/ IP addresses. Can be 'off'\n\tStatusIPv4 string `datastore:\"status_ipv4\"` \/\/ These can have the following values: online and offline.\n\tStatusIPv6 string `datastore:\"status_ipv6\"` \/\/ These can have the following values: online and offline.\n\tUpdateRequestTimestamp int64 `datastore:\"update_request_timestamp\"` \/\/ To avoid an additional lookup in the datastore\n\tLatitude float64 `datastore:\"latitude\"` \/\/ To avoid an additional lookup in the datastore\n\tLongitude float64 `datastore:\"longitude\"` \/\/ To avoid an additional lookup in the datastore\n\tCity string `datastore:\"city\"` \/\/ To avoid an additional lookup in the datastore\n\tCountry string `datastore:\"country\"` \/\/ To avoid an additional lookup in the datastore\n\tWhen time.Time `datastore:\"when\"` \/\/ Date representing the last modification time of this entity.\n}\n\ntype Site struct {\n\tSiteID string `datastore:\"site_id\"`\n\tCity string `datastore:\"city\"`\n\tCountry string `datastore:\"country\"`\n\tLatitude float64 `datastore:\"latitude\"` \/\/ Latitude of the airport that uniquely identifies an M-Lab site.\n\tLongitude float64 `datastore:\"longitude\"` \/\/ Longitude of the airport that uniquely identifies an M-Lab site.\n\tMetro []string `datastore:\"metro\"` \/\/ List of sites and metros, e.g., [ath, ath01].\n\tRegistrationTimestamp int64 `datastore:\"registration_timestamp\"` \/\/ Date representing the registration time (the first time a new site is added to mlab-ns).\n\tWhen time.Time `datastore:\"when\"` \/\/ Date representing the last modification time of this entity.\n}\n\n\/\/ MMLocation is a format that comes from pre-processed geolocation data. It is\n\/\/ intended to be used with the iptrie location map.\n\/\/\n\/\/ The utility takes maximind data as input, pre-processes the data so that it\n\/\/ has just the amount of detail that we need and produces a binary file that is\n\/\/ uploaded to the GAE instance. On upload the handler will copy it into the\n\/\/ blobstore and update the datastore with this format.\n\/\/\n\/\/TODO: reference to the location map compression utility.\n\/\/TODO: IPv6 addresses are truncated to a \/64 before inclusion?\ntype MMLocation struct {\n\tRangeStart int64 \/\/ first IP address in the block\n\tRangeEnd int64 \/\/ last IP address in the block\n\tLatitude int \/\/ latitude rounded to the nearest integer\n\tLongitude int \/\/ longitude rounded to the nearest integer\n}\n\n\/\/XXX deprecated\ntype MaxmindCityLocation struct {\n\tLocationID string `datastore:\"location_id\"`\n\tCountry string `datastore:\"country\"`\n\tRegion string `datastore:\"region\"`\n\tCity string `datastore:\"city\"`\n\tLatitude float64 `datastore:\"latitude\"`\n\tLongitude float64 `datastore:\"longitude\"`\n\tWhen time.Time `datastore:\"when\"`\n}\n\n\/\/XXX deprecated\ntype MaxmindCityBlock struct {\n\tStartIPNum int64 `datastore:\"start_ip_num\"`\n\tEndIPNum int64 `datastore:\"end_ip_num\"`\n\tLocationID string `datastore:\"location_id\"`\n\tWhen time.Time `datastore:\"when\"`\n}\n\n\/\/XXX deprecated\ntype MaxmindCityBlockv6 struct {\n\tStartIPNum int64 `datastore:\"start_ip_num\"`\n\tEndIPNum int64 `datastore:\"end_ip_num\"`\n\tCountry string `datastore:\"country\"`\n\tLatitude float64 `datastore:\"latitude\"`\n\tLongitude float64 `datastore:\"longitude\"`\n\tWhen time.Time `datastore:\"when\"`\n}\n\n\/\/XXX deprecated\ntype CountryCode struct {\n\tName string `datastore:\"name\"`\n\tAlpha2Code string `datastore:\"alpha2_code\"`\n\tAlpha3Code string `datastore:\"alpha3_code\"`\n\tNumericCode int64 `datastore:\"numeric_code\"`\n\tLatitude float64 `datastore:\"latitude\"`\n\tLongitude float64 `datastore:\"longitude\"`\n\tWhen time.Time `datastore:\"when\"`\n}\n\ntype EncryptionKey struct {\n\tKeyID string `datastore:\"key_id\"` \/\/ Name of the key (by default is 'admin').\n\tEncryptionKey string `datastore:\"encryption_key\"` \/\/ 16 bytes encryption key (AES).\n}\n\ntype Slice struct {\n\tSliceID string `datastore:\"slice_id\"`\n\tToolID string `datastore:\"tool_id\"`\n}\n\ntype Tool struct {\n\tSliceID string `datastore:\"slice_id\"`\n\tToolID string `datastore:\"tool_id\"`\n\tHTTPPort string `datastore:\"http_port\"`\n}\n\n\/\/TODO(gavaletz): generalize this to credentials?\ntype Nagios struct {\n\tKeyID string `datastore:\"key_id\"`\n\tUsername string `datastore:\"username\"`\n\tPassword string `datastore:\"password\"`\n\tURL string `datastore:\"url\"`\n}\n\ntype Ping struct {\n\tLatitude float64 `datastore:\"latitude\"`\n\tLongitude float64 `datastore:\"longitude\"`\n\tToolID string `datastore:\"tool_id\"`\n\tAddressFamily string `datastore:\"address_family\"`\n\tTime float64 `datastore:\"time\"`\n}\n\nfunc GetSliverToolID(toolID, sliceID, serverID, siteID string) string {\n\treturn fmt.Sprintf(\"%s-%s-%s-%s\", toolID, sliceID, serverID, siteID)\n}\n<commit_msg>data: Added json tags to Site struct<commit_after>\/\/ Copyright 2013 M-Lab\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build appengine\n\n\/\/ The data package provides the mlab-ns2 datastore sturcture.\npackage data\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\n\/\/TODO: Data interface and Get,Put,Cache,Rm functions?\n\/\/TODO: only index the columns that are needed\n\/\/TODO: add json tags\n\ntype SliverTool struct {\n\tToolID string `datastore:\"tool_id\"`\n\tSliceID string `datastore:\"slice_id\"`\n\tSiteID string `datastore:\"site_id\"`\n\tServerID string `datastore:\"server_id\"`\n\tServerPort string `datastore:\"server_port\"`\n\tHTTPPort string `datastore:\"http_port\"` \/\/ For web-based tools, this is used to build the URL the client is redirected to: http:\/\/fqdn[ipv4|ipv6]:http_port\n\tFQDN string `datastore:\"fqdn\"` \/\/ Unannotated fqdn. v4 and v6 versions can be built if necessary.\n\tSliverIPv4 string `datastore:\"sliver_ipv4\"` \/\/ IP addresses. Can be 'off'\n\tSliverIPv6 string `datastore:\"sliver_ipv6\"` \/\/ IP addresses. Can be 'off'\n\tStatusIPv4 string `datastore:\"status_ipv4\"` \/\/ These can have the following values: online and offline.\n\tStatusIPv6 string `datastore:\"status_ipv6\"` \/\/ These can have the following values: online and offline.\n\tUpdateRequestTimestamp int64 `datastore:\"update_request_timestamp\"` \/\/ To avoid an additional lookup in the datastore\n\tLatitude float64 `datastore:\"latitude\"` \/\/ To avoid an additional lookup in the datastore\n\tLongitude float64 `datastore:\"longitude\"` \/\/ To avoid an additional lookup in the datastore\n\tCity string `datastore:\"city\"` \/\/ To avoid an additional lookup in the datastore\n\tCountry string `datastore:\"country\"` \/\/ To avoid an additional lookup in the datastore\n\tWhen time.Time `datastore:\"when\"` \/\/ Date representing the last modification time of this entity.\n}\n\ntype Site struct {\n\tSiteID string `datastore:\"site_id\" json:\"site\"`\n\tCity string `datastore:\"city\" json:\"city\"`\n\tCountry string `datastore:\"country\" json:\"country\"`\n\tLatitude float64 `datastore:\"latitude\" json:\"latitude\"` \/\/ Latitude of the airport that uniquely identifies an M-Lab site.\n\tLongitude float64 `datastore:\"longitude\" json:\"longitude\"` \/\/ Longitude of the airport that uniquely identifies an M-Lab site.\n\tMetro []string `datastore:\"metro\" json:\"metro\"` \/\/ List of sites and metros, e.g., [ath, ath01].\n\tRegistrationTimestamp int64 `datastore:\"registration_timestamp\" json:\"created\"` \/\/ Date representing the registration time (the first time a new site is added to mlab-ns).\n\tWhen time.Time `datastore:\"when\" json:\"-\"` \/\/ Date representing the last modification time of this entity.\n}\n\n\/\/ MMLocation is a format that comes from pre-processed geolocation data. It is\n\/\/ intended to be used with the iptrie location map.\n\/\/\n\/\/ The utility takes maximind data as input, pre-processes the data so that it\n\/\/ has just the amount of detail that we need and produces a binary file that is\n\/\/ uploaded to the GAE instance. On upload the handler will copy it into the\n\/\/ blobstore and update the datastore with this format.\n\/\/\n\/\/TODO: reference to the location map compression utility.\n\/\/TODO: IPv6 addresses are truncated to a \/64 before inclusion?\ntype MMLocation struct {\n\tRangeStart int64 \/\/ first IP address in the block\n\tRangeEnd int64 \/\/ last IP address in the block\n\tLatitude int \/\/ latitude rounded to the nearest integer\n\tLongitude int \/\/ longitude rounded to the nearest integer\n}\n\n\/\/XXX deprecated\ntype MaxmindCityLocation struct {\n\tLocationID string `datastore:\"location_id\"`\n\tCountry string `datastore:\"country\"`\n\tRegion string `datastore:\"region\"`\n\tCity string `datastore:\"city\"`\n\tLatitude float64 `datastore:\"latitude\"`\n\tLongitude float64 `datastore:\"longitude\"`\n\tWhen time.Time `datastore:\"when\"`\n}\n\n\/\/XXX deprecated\ntype MaxmindCityBlock struct {\n\tStartIPNum int64 `datastore:\"start_ip_num\"`\n\tEndIPNum int64 `datastore:\"end_ip_num\"`\n\tLocationID string `datastore:\"location_id\"`\n\tWhen time.Time `datastore:\"when\"`\n}\n\n\/\/XXX deprecated\ntype MaxmindCityBlockv6 struct {\n\tStartIPNum int64 `datastore:\"start_ip_num\"`\n\tEndIPNum int64 `datastore:\"end_ip_num\"`\n\tCountry string `datastore:\"country\"`\n\tLatitude float64 `datastore:\"latitude\"`\n\tLongitude float64 `datastore:\"longitude\"`\n\tWhen time.Time `datastore:\"when\"`\n}\n\n\/\/XXX deprecated\ntype CountryCode struct {\n\tName string `datastore:\"name\"`\n\tAlpha2Code string `datastore:\"alpha2_code\"`\n\tAlpha3Code string `datastore:\"alpha3_code\"`\n\tNumericCode int64 `datastore:\"numeric_code\"`\n\tLatitude float64 `datastore:\"latitude\"`\n\tLongitude float64 `datastore:\"longitude\"`\n\tWhen time.Time `datastore:\"when\"`\n}\n\ntype EncryptionKey struct {\n\tKeyID string `datastore:\"key_id\"` \/\/ Name of the key (by default is 'admin').\n\tEncryptionKey string `datastore:\"encryption_key\"` \/\/ 16 bytes encryption key (AES).\n}\n\ntype Slice struct {\n\tSliceID string `datastore:\"slice_id\"`\n\tToolID string `datastore:\"tool_id\"`\n}\n\ntype Tool struct {\n\tSliceID string `datastore:\"slice_id\"`\n\tToolID string `datastore:\"tool_id\"`\n\tHTTPPort string `datastore:\"http_port\"`\n}\n\n\/\/TODO(gavaletz): generalize this to credentials?\ntype Nagios struct {\n\tKeyID string `datastore:\"key_id\"`\n\tUsername string `datastore:\"username\"`\n\tPassword string `datastore:\"password\"`\n\tURL string `datastore:\"url\"`\n}\n\ntype Ping struct {\n\tLatitude float64 `datastore:\"latitude\"`\n\tLongitude float64 `datastore:\"longitude\"`\n\tToolID string `datastore:\"tool_id\"`\n\tAddressFamily string `datastore:\"address_family\"`\n\tTime float64 `datastore:\"time\"`\n}\n\nfunc GetSliverToolID(toolID, sliceID, serverID, siteID string) string {\n\treturn fmt.Sprintf(\"%s-%s-%s-%s\", toolID, sliceID, serverID, siteID)\n}\n<|endoftext|>"} {"text":"<commit_before>package locket_test\n\nimport (\n\t\"code.cloudfoundry.org\/consuladapter\/consulrunner\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t\"github.com\/onsi\/ginkgo\/config\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"testing\"\n)\n\nvar (\n\tconsulStartingPort int\n\tconsulRunner *consulrunner.ClusterRunner\n)\n\nconst (\n\tdefaultScheme = \"http\"\n)\n\nfunc TestLocket(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"Locket Suite\")\n}\n\nvar _ = BeforeSuite(func() {\n\tconsulStartingPort = 5001 + config.GinkgoConfig.ParallelNode*consulrunner.PortOffsetLength\n\tconsulRunner = consulrunner.NewClusterRunner(consulStartingPort, 1, defaultScheme)\n\n\tconsulRunner.Start()\n\tconsulRunner.WaitUntilReady()\n})\n\nvar _ = BeforeEach(func() {\n\tconsulRunner.Reset()\n})\n\nvar _ = AfterSuite(func() {\n\tconsulRunner.Stop()\n})\n<commit_msg>pass ClusterRunnerConfig to NewClusterRunner<commit_after>package locket_test\n\nimport (\n\t\"code.cloudfoundry.org\/consuladapter\/consulrunner\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t\"github.com\/onsi\/ginkgo\/config\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"testing\"\n)\n\nvar (\n\tconsulStartingPort int\n\tconsulRunner *consulrunner.ClusterRunner\n)\n\nconst (\n\tdefaultScheme = \"http\"\n)\n\nfunc TestLocket(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"Locket Suite\")\n}\n\nvar _ = BeforeSuite(func() {\n\tconsulStartingPort = 5001 + config.GinkgoConfig.ParallelNode*consulrunner.PortOffsetLength\n\tconsulRunner = consulrunner.NewClusterRunner(\n\t\tconsulrunner.ClusterRunnerConfig{\n\t\t\tStartingPort: consulStartingPort,\n\t\t\tNumNodes: 1,\n\t\t\tScheme: defaultScheme,\n\t\t},\n\t)\n\n\tconsulRunner.Start()\n\tconsulRunner.WaitUntilReady()\n})\n\nvar _ = BeforeEach(func() {\n\tconsulRunner.Reset()\n})\n\nvar _ = AfterSuite(func() {\n\tconsulRunner.Stop()\n})\n<|endoftext|>"} {"text":"<commit_before>package revert\n\n\/\/ Reverter is a helper type to manage revert functions.\ntype Reverter struct {\n\trevertFuncs []func()\n}\n\n\/\/ New returns a new Reverter.\nfunc New() *Reverter {\n\treturn &Reverter{}\n}\n\n\/\/ Add adds a revert function to the list to be run when Revert() is called.\nfunc (r *Reverter) Add(f func()) {\n\tr.revertFuncs = append(r.revertFuncs, f)\n}\n\n\/\/ Fail runs any revert functions in the reverse order they were added.\n\/\/ Should be used with defer or when a task has encountered an error and needs to be reverted.\nfunc (r *Reverter) Fail() {\n\tfuncCount := len(r.revertFuncs)\n\tfor k := range r.revertFuncs {\n\t\t\/\/ Run the revert functions in reverse order.\n\t\tk = funcCount - 1 - k\n\t\tr.revertFuncs[k]()\n\t}\n}\n\n\/\/ Success clears the revert functions previously added.\n\/\/ Should be called on successful completion of a task to prevent revert functions from being run.\nfunc (r *Reverter) Success() {\n\tr.revertFuncs = nil\n}\n\n\/\/ Clone returns a copy of the reverter with the current set of revert functions added.\n\/\/ This can be used if you want to return a reverting function to an external caller but do not want to actually\n\/\/ execute the previously deferred reverter.Fail() function.\nfunc (r *Reverter) Clone() *Reverter {\n\trNew := New()\n\trNew.revertFuncs = append(make([]func(), 0, len(r.revertFuncs)), r.revertFuncs...)\n\n\treturn rNew\n}\n<commit_msg>lxd\/revert\/revert: Add Hook function type<commit_after>package revert\n\n\/\/ Hook is a function that can be added to the revert via the Add() function.\n\/\/ These will be run in the reverse order that they were added if the reverter's Fail() function is called.\ntype Hook func()\n\n\/\/ Reverter is a helper type to manage revert functions.\ntype Reverter struct {\n\trevertFuncs []Hook\n}\n\n\/\/ New returns a new Reverter.\nfunc New() *Reverter {\n\treturn &Reverter{}\n}\n\n\/\/ Add adds a revert function to the list to be run when Revert() is called.\nfunc (r *Reverter) Add(f Hook) {\n\tr.revertFuncs = append(r.revertFuncs, f)\n}\n\n\/\/ Fail runs any revert functions in the reverse order they were added.\n\/\/ Should be used with defer or when a task has encountered an error and needs to be reverted.\nfunc (r *Reverter) Fail() {\n\tfuncCount := len(r.revertFuncs)\n\tfor k := range r.revertFuncs {\n\t\t\/\/ Run the revert functions in reverse order.\n\t\tk = funcCount - 1 - k\n\t\tr.revertFuncs[k]()\n\t}\n}\n\n\/\/ Success clears the revert functions previously added.\n\/\/ Should be called on successful completion of a task to prevent revert functions from being run.\nfunc (r *Reverter) Success() {\n\tr.revertFuncs = nil\n}\n\n\/\/ Clone returns a copy of the reverter with the current set of revert functions added.\n\/\/ This can be used if you want to return a reverting function to an external caller but do not want to actually\n\/\/ execute the previously deferred reverter.Fail() function.\nfunc (r *Reverter) Clone() *Reverter {\n\trNew := New()\n\trNew.revertFuncs = append(make([]Hook, 0, len(r.revertFuncs)), r.revertFuncs...)\n\n\treturn rNew\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package sliceDiff provides functions to determine the difference\n\/\/ between two slices. There are tests and limited benchmarks for this package.\n\/\/\n\/\/ THIS IS INTENDED ONLY FOR \"SMALL\" SLICES. \n\/\/\n\/\/ Using this package on slices > 30,000 entries can get slow.. I would\n\/\/ suggest benchmarking the number \/ type of slices you intend to use\n\/\/ in sliceDiff_test.go *before* using on slices with a large number \n\/\/ of entries.\n\/\/\n\/\/ Use of this source code is governed by a BSD-style license\n\/\/ which can be found in the LICENSE file\npackage sliceDiff\n\n\/\/ Int64SliceDiff returns the int64 values that are not in\n\/\/ both source slices\nfunc Int64SliceDiff(sliceOne []int64, sliceTwo []int64) []int64 {\n\tvar diff []int64\n\tfor i := 0; i < 2; i++ {\n\t\tfor _, s1 := range sliceOne {\n\t\t\tfound := false\n\t\t\tfor _, s2 := range sliceTwo {\n\t\t\t\tif s1 == s2 {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tdiff = append(diff,s1)\n\t\t\t}\n\t\t}\n\t\tif i == 0 {\n\t\t\tsliceOne, sliceTwo = sliceTwo, sliceOne\n\t\t}\n\t}\n\treturn diff\n}\n\n\/\/ Uint64SliceDiff returns the uint64 values that are not in\n\/\/ both source slices\nfunc Uint64SliceDiff(sliceOne []uint64, sliceTwo []uint64) []uint64 {\n var diff []uint64\n for i := 0; i < 2; i++ {\n for _, s1 := range sliceOne {\n found := false\n for _, s2 := range sliceTwo {\n if s1 == s2 {\n found = true\n break\n }\n }\n if !found {\n diff = append(diff,s1)\n }\n }\n if i == 0 {\n sliceOne, sliceTwo = sliceTwo, sliceOne\n }\n }\n return diff\n}\n\n\n\/\/ Int32SliceDiff returns the int32 values that are not in\n\/\/ both soruce slices\nfunc Int32SliceDiff(sliceOne []int32, sliceTwo []int32) []int32 {\n\tvar diff []int32\n\tfor i := 0; i < 2; i++ {\n\t\tfor _, s1 := range sliceOne {\n\t\t\tfound := false\n\t\t\tfor _, s2 := range sliceTwo {\n\t\t\t\tif s1 == s2 {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tdiff = append(diff,s1)\n\t\t\t}\n\t\t}\n\t\tif i == 0 {\n\t\t\tsliceOne, sliceTwo = sliceTwo, sliceOne\n\t\t}\n\t}\n\treturn diff\n}\n\n\/\/ Uint32SliceDiff returns the uint32 values that are not in\n\/\/ both soruce slices\nfunc Uint32SliceDiff(sliceOne []uint32, sliceTwo []uint32) []uint32 {\n var diff []uint32\n for i := 0; i < 2; i++ {\n for _, s1 := range sliceOne {\n found := false\n for _, s2 := range sliceTwo {\n if s1 == s2 {\n found = true\n break\n }\n }\n if !found {\n diff = append(diff,s1)\n }\n }\n if i == 0 {\n sliceOne, sliceTwo = sliceTwo, sliceOne\n }\n }\n return diff\n}\n\n\/\/ Int16SliceDiff returns the int16 values that are not in\n\/\/ both soruce slices\nfunc Int16SliceDiff(sliceOne []int16, sliceTwo []int16) []int16 {\n var diff []int16\n for i := 0; i < 2; i++ {\n for _, s1 := range sliceOne {\n found := false\n for _, s2 := range sliceTwo {\n if s1 == s2 {\n found = true\n break\n }\n }\n if !found {\n diff = append(diff,s1)\n }\n }\n if i == 0 {\n sliceOne, sliceTwo = sliceTwo, sliceOne\n }\n }\n return diff\n}\n\n\/\/ Uint16SliceDiff returns the uint16 values that are not in\n\/\/ both soruce slices\nfunc Uint16SliceDiff(sliceOne []uint16, sliceTwo []uint16) []uint16 {\n var diff []uint16\n for i := 0; i < 2; i++ {\n for _, s1 := range sliceOne {\n found := false\n for _, s2 := range sliceTwo {\n if s1 == s2 {\n found = true\n break\n }\n }\n if !found {\n diff = append(diff,s1)\n }\n }\n if i == 0 {\n sliceOne, sliceTwo = sliceTwo, sliceOne\n }\n }\n return diff\n}\n\n\/\/ Int8SliceDiff returns the int16 values that are not in\n\/\/ both soruce slices\nfunc Int8SliceDiff(sliceOne []int8, sliceTwo []int8) []int8 {\n var diff []int8\n for i := 0; i < 2; i++ {\n for _, s1 := range sliceOne {\n found := false\n for _, s2 := range sliceTwo {\n if s1 == s2 {\n found = true\n break\n }\n }\n if !found {\n diff = append(diff,s1)\n }\n }\n if i == 0 {\n sliceOne, sliceTwo = sliceTwo, sliceOne\n }\n }\n return diff\n}\n\n\/\/ Uint8SliceDiff returns the uint16 values that are not in\n\/\/ both soruce slices\nfunc Uint8SliceDiff(sliceOne []uint8, sliceTwo []uint8) []uint8 {\n var diff []uint8\n for i := 0; i < 2; i++ {\n for _, s1 := range sliceOne {\n found := false\n for _, s2 := range sliceTwo {\n if s1 == s2 {\n found = true\n break\n }\n }\n if !found {\n diff = append(diff,s1)\n }\n }\n if i == 0 {\n sliceOne, sliceTwo = sliceTwo, sliceOne\n }\n }\n return diff\n}\n\n\/\/ StringSliceDiff returns the string values that are not in\n\/\/ both source slices\nfunc StringSliceDiff(sliceOne []string, sliceTwo []string) []string {\n\tvar diff []string\n\tfor i := 0; i < 2; i++ {\n\t\tfor _, s1 := range sliceOne {\n\t\t\tfound := false\n\t\t\tfor _, s2 := range sliceTwo {\n\t\t\t\tif s1 == s2 {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tdiff = append(diff,s1)\n\t\t\t}\n\t\t}\n\t\tif i == 0 {\n\t\t\tsliceOne, sliceTwo = sliceTwo, sliceOne\n\t\t}\n\t}\n\treturn diff\n}\n<commit_msg>Fixed documentation error<commit_after>\/\/ Package sliceDiff provides functions to determine the difference\n\/\/ between two slices. There are tests and limited benchmarks for this package.\n\/\/\n\/\/ THIS IS INTENDED ONLY FOR \"SMALL\" SLICES. \n\/\/\n\/\/ Using this package on slices > 30,000 entries can get slow.. I would\n\/\/ suggest benchmarking the number \/ type of slices you intend to use\n\/\/ in sliceDiff_test.go *before* using on slices with a large number \n\/\/ of entries.\n\/\/\n\/\/ Use of this source code is governed by a BSD-style license\n\/\/ which can be found in the LICENSE file\npackage sliceDiff\n\n\/\/ Int64SliceDiff returns the int64 values that are not in\n\/\/ both source slices\nfunc Int64SliceDiff(sliceOne []int64, sliceTwo []int64) []int64 {\n\tvar diff []int64\n\tfor i := 0; i < 2; i++ {\n\t\tfor _, s1 := range sliceOne {\n\t\t\tfound := false\n\t\t\tfor _, s2 := range sliceTwo {\n\t\t\t\tif s1 == s2 {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tdiff = append(diff,s1)\n\t\t\t}\n\t\t}\n\t\tif i == 0 {\n\t\t\tsliceOne, sliceTwo = sliceTwo, sliceOne\n\t\t}\n\t}\n\treturn diff\n}\n\n\/\/ Uint64SliceDiff returns the uint64 values that are not in\n\/\/ both source slices\nfunc Uint64SliceDiff(sliceOne []uint64, sliceTwo []uint64) []uint64 {\n var diff []uint64\n for i := 0; i < 2; i++ {\n for _, s1 := range sliceOne {\n found := false\n for _, s2 := range sliceTwo {\n if s1 == s2 {\n found = true\n break\n }\n }\n if !found {\n diff = append(diff,s1)\n }\n }\n if i == 0 {\n sliceOne, sliceTwo = sliceTwo, sliceOne\n }\n }\n return diff\n}\n\n\n\/\/ Int32SliceDiff returns the int32 values that are not in\n\/\/ both soruce slices\nfunc Int32SliceDiff(sliceOne []int32, sliceTwo []int32) []int32 {\n\tvar diff []int32\n\tfor i := 0; i < 2; i++ {\n\t\tfor _, s1 := range sliceOne {\n\t\t\tfound := false\n\t\t\tfor _, s2 := range sliceTwo {\n\t\t\t\tif s1 == s2 {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tdiff = append(diff,s1)\n\t\t\t}\n\t\t}\n\t\tif i == 0 {\n\t\t\tsliceOne, sliceTwo = sliceTwo, sliceOne\n\t\t}\n\t}\n\treturn diff\n}\n\n\/\/ Uint32SliceDiff returns the uint32 values that are not in\n\/\/ both soruce slices\nfunc Uint32SliceDiff(sliceOne []uint32, sliceTwo []uint32) []uint32 {\n var diff []uint32\n for i := 0; i < 2; i++ {\n for _, s1 := range sliceOne {\n found := false\n for _, s2 := range sliceTwo {\n if s1 == s2 {\n found = true\n break\n }\n }\n if !found {\n diff = append(diff,s1)\n }\n }\n if i == 0 {\n sliceOne, sliceTwo = sliceTwo, sliceOne\n }\n }\n return diff\n}\n\n\/\/ Int16SliceDiff returns the int16 values that are not in\n\/\/ both soruce slices\nfunc Int16SliceDiff(sliceOne []int16, sliceTwo []int16) []int16 {\n var diff []int16\n for i := 0; i < 2; i++ {\n for _, s1 := range sliceOne {\n found := false\n for _, s2 := range sliceTwo {\n if s1 == s2 {\n found = true\n break\n }\n }\n if !found {\n diff = append(diff,s1)\n }\n }\n if i == 0 {\n sliceOne, sliceTwo = sliceTwo, sliceOne\n }\n }\n return diff\n}\n\n\/\/ Uint16SliceDiff returns the uint16 values that are not in\n\/\/ both soruce slices\nfunc Uint16SliceDiff(sliceOne []uint16, sliceTwo []uint16) []uint16 {\n var diff []uint16\n for i := 0; i < 2; i++ {\n for _, s1 := range sliceOne {\n found := false\n for _, s2 := range sliceTwo {\n if s1 == s2 {\n found = true\n break\n }\n }\n if !found {\n diff = append(diff,s1)\n }\n }\n if i == 0 {\n sliceOne, sliceTwo = sliceTwo, sliceOne\n }\n }\n return diff\n}\n\n\/\/ Int8SliceDiff returns the int8 values that are not in\n\/\/ both soruce slices\nfunc Int8SliceDiff(sliceOne []int8, sliceTwo []int8) []int8 {\n var diff []int8\n for i := 0; i < 2; i++ {\n for _, s1 := range sliceOne {\n found := false\n for _, s2 := range sliceTwo {\n if s1 == s2 {\n found = true\n break\n }\n }\n if !found {\n diff = append(diff,s1)\n }\n }\n if i == 0 {\n sliceOne, sliceTwo = sliceTwo, sliceOne\n }\n }\n return diff\n}\n\n\/\/ Uint8SliceDiff returns the uint8 values that are not in\n\/\/ both soruce slices\nfunc Uint8SliceDiff(sliceOne []uint8, sliceTwo []uint8) []uint8 {\n var diff []uint8\n for i := 0; i < 2; i++ {\n for _, s1 := range sliceOne {\n found := false\n for _, s2 := range sliceTwo {\n if s1 == s2 {\n found = true\n break\n }\n }\n if !found {\n diff = append(diff,s1)\n }\n }\n if i == 0 {\n sliceOne, sliceTwo = sliceTwo, sliceOne\n }\n }\n return diff\n}\n\n\/\/ StringSliceDiff returns the string values that are not in\n\/\/ both source slices\nfunc StringSliceDiff(sliceOne []string, sliceTwo []string) []string {\n\tvar diff []string\n\tfor i := 0; i < 2; i++ {\n\t\tfor _, s1 := range sliceOne {\n\t\t\tfound := false\n\t\t\tfor _, s2 := range sliceTwo {\n\t\t\t\tif s1 == s2 {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tdiff = append(diff,s1)\n\t\t\t}\n\t\t}\n\t\tif i == 0 {\n\t\t\tsliceOne, sliceTwo = sliceTwo, sliceOne\n\t\t}\n\t}\n\treturn diff\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"gopkg.in\/jcelliott\/turnpike.v2\"\n\t\"log\"\n\t\"strconv\"\n)\n\nconst (\n\tPOLONIEX_WEBSOCKET_ADDRESS = \"wss:\/\/api.poloniex.com\"\n\tPOLONIEX_WEBSOCKET_REALM = \"realm1\"\n\tPOLONIEX_WEBSOCKET_TICKER = \"ticker\"\n\tPOLONIEX_WEBSOCKET_TROLLBOX = \"trollbox\"\n)\n\ntype PoloniexWebsocketTicker struct {\n\tCurrencyPair string\n\tLast float64\n\tLowestAsk float64\n\tHighestBid float64\n\tPercentChange float64\n\tBaseVolume float64\n\tQuoteVolume float64\n\tIsFrozen bool\n\tHigh float64\n\tLow float64\n}\n\nfunc PoloniexOnTicker(args []interface{}, kwargs map[string]interface{}) {\n\tticker := PoloniexWebsocketTicker{}\n\tticker.CurrencyPair = args[0].(string)\n\tticker.Last, _ = strconv.ParseFloat(args[1].(string), 64)\n\tticker.LowestAsk, _ = strconv.ParseFloat(args[2].(string), 64)\n\tticker.HighestBid, _ = strconv.ParseFloat(args[3].(string), 64)\n\tticker.PercentChange, _ = strconv.ParseFloat(args[4].(string), 64)\n\tticker.BaseVolume, _ = strconv.ParseFloat(args[5].(string), 64)\n\tticker.QuoteVolume, _ = strconv.ParseFloat(args[6].(string), 64)\n\n\tif args[7].(float64) != 0 {\n\t\tticker.IsFrozen = true\n\t} else {\n\t\tticker.IsFrozen = false\n\t}\n\n\tticker.High, _ = strconv.ParseFloat(args[8].(string), 64)\n\tticker.Low, _ = strconv.ParseFloat(args[9].(string), 64)\n}\n\ntype PoloniexWebsocketTrollboxMessage struct {\n\tMessageNumber float64\n\tUsername string\n\tMessage string\n\tReputation float64\n}\n\nfunc PoloniexOnTrollbox(args []interface{}, kwargs map[string]interface{}) {\n\tmessage := PoloniexWebsocketTrollboxMessage{}\n\tmessage.MessageNumber, _ = args[1].(float64)\n\tmessage.Username = args[2].(string)\n\tmessage.Message = args[3].(string)\n\tmessage.Reputation = args[4].(float64)\n}\n\nfunc PoloniexOnDepthOrTrade(args []interface{}, kwargs map[string]interface{}) {\n\tfor x := range args {\n\t\tdata := args[x].(map[string]interface{})\n\t\tmsgData := data[\"data\"].(map[string]interface{})\n\t\tmsgType := data[\"type\"].(string)\n\n\t\tswitch msgType {\n\t\tcase \"orderBookModify\":\n\t\t\t{\n\t\t\t\ttype PoloniexWebsocketOrderbookModify struct {\n\t\t\t\t\tType string\n\t\t\t\t\tRate float64\n\t\t\t\t\tAmount float64\n\t\t\t\t}\n\n\t\t\t\torderModify := PoloniexWebsocketOrderbookModify{}\n\t\t\t\torderModify.Type = msgData[\"type\"].(string)\n\n\t\t\t\trateStr := msgData[\"rate\"].(string)\n\t\t\t\torderModify.Rate, _ = strconv.ParseFloat(rateStr, 64)\n\n\t\t\t\tamountStr := msgData[\"amount\"].(string)\n\t\t\t\torderModify.Amount, _ = strconv.ParseFloat(amountStr, 64)\n\t\t\t}\n\t\tcase \"orderBookRemove\":\n\t\t\t{\n\t\t\t\ttype PoloniexWebsocketOrderbookRemove struct {\n\t\t\t\t\tType string\n\t\t\t\t\tRate float64\n\t\t\t\t}\n\n\t\t\t\torderRemoval := PoloniexWebsocketOrderbookRemove{}\n\t\t\t\torderRemoval.Type = msgData[\"type\"].(string)\n\n\t\t\t\trateStr := msgData[\"rate\"].(string)\n\t\t\t\torderRemoval.Rate, _ = strconv.ParseFloat(rateStr, 64)\n\t\t\t}\n\t\tcase \"newTrade\":\n\t\t\t{\n\t\t\t\ttype PoloniexWebsocketNewTrade struct {\n\t\t\t\t\tType string\n\t\t\t\t\tTradeID int64\n\t\t\t\t\tRate float64\n\t\t\t\t\tAmount float64\n\t\t\t\t\tDate string\n\t\t\t\t\tTotal float64\n\t\t\t\t}\n\n\t\t\t\ttrade := PoloniexWebsocketNewTrade{}\n\t\t\t\ttrade.Type = msgData[\"type\"].(string)\n\n\t\t\t\ttradeIDstr := msgData[\"tradeID\"].(string)\n\t\t\t\ttrade.TradeID, _ = strconv.ParseInt(tradeIDstr, 10, 64)\n\n\t\t\t\trateStr := msgData[\"rate\"].(string)\n\t\t\t\ttrade.Rate, _ = strconv.ParseFloat(rateStr, 64)\n\n\t\t\t\tamountStr := msgData[\"amount\"].(string)\n\t\t\t\ttrade.Amount, _ = strconv.ParseFloat(amountStr, 64)\n\n\t\t\t\ttotalStr := msgData[\"total\"].(string)\n\t\t\t\ttrade.Rate, _ = strconv.ParseFloat(totalStr, 64)\n\n\t\t\t\ttrade.Date = msgData[\"date\"].(string)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (p *Poloniex) WebsocketClient() {\n\tfor p.Enabled && p.Websocket {\n\t\tc, err := turnpike.NewWebsocketClient(turnpike.JSON, POLONIEX_WEBSOCKET_ADDRESS)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s Unable to connect to Websocket. Error: %s\\n\", p.GetName(), err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif p.Verbose {\n\t\t\tlog.Printf(\"%s Connected to Websocket.\\n\", p.GetName())\n\t\t}\n\n\t\t_, err = c.JoinRealm(POLONIEX_WEBSOCKET_REALM, nil)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s Unable to join realm. Error: %s\\n\", p.GetName(), err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif p.Verbose {\n\t\t\tlog.Printf(\"%s Joined Websocket realm.\\n\", p.GetName())\n\t\t}\n\n\t\tc.ReceiveDone = make(chan bool)\n\n\t\tif err := c.Subscribe(POLONIEX_WEBSOCKET_TICKER, PoloniexOnTicker); err != nil {\n\t\t\tlog.Printf(\"%s Error subscribing to ticker channel: %s\\n\", p.GetName(), err)\n\t\t}\n\n\t\tif err := c.Subscribe(POLONIEX_WEBSOCKET_TROLLBOX, PoloniexOnTrollbox); err != nil {\n\t\t\tlog.Printf(\"%s Error subscribing to trollbox channel: %s\\n\", p.GetName(), err)\n\t\t}\n\n\t\tfor x := range p.EnabledPairs {\n\t\t\tcurrency := p.EnabledPairs[x]\n\t\t\tif err := c.Subscribe(currency, PoloniexOnDepthOrTrade); err != nil {\n\t\t\t\tlog.Printf(\"%s Error subscribing to %s channel: %s\\n\", p.GetName(), currency, err)\n\t\t\t}\n\t\t}\n\n\t\tif p.Verbose {\n\t\t\tlog.Printf(\"%s Subscribed to websocket channels.\\n\", p.GetName())\n\t\t}\n\n\t\t<-c.ReceiveDone\n\t\tlog.Printf(\"%s Websocket client disconnected.\\n\", p.GetName())\n\t}\n}\n<commit_msg>Added support for latest turnpike library.<commit_after>package main\n\nimport (\n\t\"gopkg.in\/jcelliott\/turnpike.v2\"\n\t\"log\"\n\t\"strconv\"\n)\n\nconst (\n\tPOLONIEX_WEBSOCKET_ADDRESS = \"wss:\/\/api.poloniex.com\"\n\tPOLONIEX_WEBSOCKET_REALM = \"realm1\"\n\tPOLONIEX_WEBSOCKET_TICKER = \"ticker\"\n\tPOLONIEX_WEBSOCKET_TROLLBOX = \"trollbox\"\n)\n\ntype PoloniexWebsocketTicker struct {\n\tCurrencyPair string\n\tLast float64\n\tLowestAsk float64\n\tHighestBid float64\n\tPercentChange float64\n\tBaseVolume float64\n\tQuoteVolume float64\n\tIsFrozen bool\n\tHigh float64\n\tLow float64\n}\n\nfunc PoloniexOnTicker(args []interface{}, kwargs map[string]interface{}) {\n\tticker := PoloniexWebsocketTicker{}\n\tticker.CurrencyPair = args[0].(string)\n\tticker.Last, _ = strconv.ParseFloat(args[1].(string), 64)\n\tticker.LowestAsk, _ = strconv.ParseFloat(args[2].(string), 64)\n\tticker.HighestBid, _ = strconv.ParseFloat(args[3].(string), 64)\n\tticker.PercentChange, _ = strconv.ParseFloat(args[4].(string), 64)\n\tticker.BaseVolume, _ = strconv.ParseFloat(args[5].(string), 64)\n\tticker.QuoteVolume, _ = strconv.ParseFloat(args[6].(string), 64)\n\n\tif args[7].(float64) != 0 {\n\t\tticker.IsFrozen = true\n\t} else {\n\t\tticker.IsFrozen = false\n\t}\n\n\tticker.High, _ = strconv.ParseFloat(args[8].(string), 64)\n\tticker.Low, _ = strconv.ParseFloat(args[9].(string), 64)\n}\n\ntype PoloniexWebsocketTrollboxMessage struct {\n\tMessageNumber float64\n\tUsername string\n\tMessage string\n\tReputation float64\n}\n\nfunc PoloniexOnTrollbox(args []interface{}, kwargs map[string]interface{}) {\n\tmessage := PoloniexWebsocketTrollboxMessage{}\n\tmessage.MessageNumber, _ = args[1].(float64)\n\tmessage.Username = args[2].(string)\n\tmessage.Message = args[3].(string)\n\tmessage.Reputation = args[4].(float64)\n}\n\nfunc PoloniexOnDepthOrTrade(args []interface{}, kwargs map[string]interface{}) {\n\tfor x := range args {\n\t\tdata := args[x].(map[string]interface{})\n\t\tmsgData := data[\"data\"].(map[string]interface{})\n\t\tmsgType := data[\"type\"].(string)\n\n\t\tswitch msgType {\n\t\tcase \"orderBookModify\":\n\t\t\t{\n\t\t\t\ttype PoloniexWebsocketOrderbookModify struct {\n\t\t\t\t\tType string\n\t\t\t\t\tRate float64\n\t\t\t\t\tAmount float64\n\t\t\t\t}\n\n\t\t\t\torderModify := PoloniexWebsocketOrderbookModify{}\n\t\t\t\torderModify.Type = msgData[\"type\"].(string)\n\n\t\t\t\trateStr := msgData[\"rate\"].(string)\n\t\t\t\torderModify.Rate, _ = strconv.ParseFloat(rateStr, 64)\n\n\t\t\t\tamountStr := msgData[\"amount\"].(string)\n\t\t\t\torderModify.Amount, _ = strconv.ParseFloat(amountStr, 64)\n\t\t\t}\n\t\tcase \"orderBookRemove\":\n\t\t\t{\n\t\t\t\ttype PoloniexWebsocketOrderbookRemove struct {\n\t\t\t\t\tType string\n\t\t\t\t\tRate float64\n\t\t\t\t}\n\n\t\t\t\torderRemoval := PoloniexWebsocketOrderbookRemove{}\n\t\t\t\torderRemoval.Type = msgData[\"type\"].(string)\n\n\t\t\t\trateStr := msgData[\"rate\"].(string)\n\t\t\t\torderRemoval.Rate, _ = strconv.ParseFloat(rateStr, 64)\n\t\t\t}\n\t\tcase \"newTrade\":\n\t\t\t{\n\t\t\t\ttype PoloniexWebsocketNewTrade struct {\n\t\t\t\t\tType string\n\t\t\t\t\tTradeID int64\n\t\t\t\t\tRate float64\n\t\t\t\t\tAmount float64\n\t\t\t\t\tDate string\n\t\t\t\t\tTotal float64\n\t\t\t\t}\n\n\t\t\t\ttrade := PoloniexWebsocketNewTrade{}\n\t\t\t\ttrade.Type = msgData[\"type\"].(string)\n\n\t\t\t\ttradeIDstr := msgData[\"tradeID\"].(string)\n\t\t\t\ttrade.TradeID, _ = strconv.ParseInt(tradeIDstr, 10, 64)\n\n\t\t\t\trateStr := msgData[\"rate\"].(string)\n\t\t\t\ttrade.Rate, _ = strconv.ParseFloat(rateStr, 64)\n\n\t\t\t\tamountStr := msgData[\"amount\"].(string)\n\t\t\t\ttrade.Amount, _ = strconv.ParseFloat(amountStr, 64)\n\n\t\t\t\ttotalStr := msgData[\"total\"].(string)\n\t\t\t\ttrade.Rate, _ = strconv.ParseFloat(totalStr, 64)\n\n\t\t\t\ttrade.Date = msgData[\"date\"].(string)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (p *Poloniex) WebsocketClient() {\n\tfor p.Enabled && p.Websocket {\n\t\tc, err := turnpike.NewWebsocketClient(turnpike.JSON, POLONIEX_WEBSOCKET_ADDRESS, nil)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s Unable to connect to Websocket. Error: %s\\n\", p.GetName(), err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif p.Verbose {\n\t\t\tlog.Printf(\"%s Connected to Websocket.\\n\", p.GetName())\n\t\t}\n\n\t\t_, err = c.JoinRealm(POLONIEX_WEBSOCKET_REALM, nil)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s Unable to join realm. Error: %s\\n\", p.GetName(), err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif p.Verbose {\n\t\t\tlog.Printf(\"%s Joined Websocket realm.\\n\", p.GetName())\n\t\t}\n\n\t\tc.ReceiveDone = make(chan bool)\n\n\t\tif err := c.Subscribe(POLONIEX_WEBSOCKET_TICKER, PoloniexOnTicker); err != nil {\n\t\t\tlog.Printf(\"%s Error subscribing to ticker channel: %s\\n\", p.GetName(), err)\n\t\t}\n\n\t\tif err := c.Subscribe(POLONIEX_WEBSOCKET_TROLLBOX, PoloniexOnTrollbox); err != nil {\n\t\t\tlog.Printf(\"%s Error subscribing to trollbox channel: %s\\n\", p.GetName(), err)\n\t\t}\n\n\t\tfor x := range p.EnabledPairs {\n\t\t\tcurrency := p.EnabledPairs[x]\n\t\t\tif err := c.Subscribe(currency, PoloniexOnDepthOrTrade); err != nil {\n\t\t\t\tlog.Printf(\"%s Error subscribing to %s channel: %s\\n\", p.GetName(), currency, err)\n\t\t\t}\n\t\t}\n\n\t\tif p.Verbose {\n\t\t\tlog.Printf(\"%s Subscribed to websocket channels.\\n\", p.GetName())\n\t\t}\n\n\t\t<-c.ReceiveDone\n\t\tlog.Printf(\"%s Websocket client disconnected.\\n\", p.GetName())\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*--------------------------------------------------------*\\\n| |\n| hprose |\n| |\n| Official WebSite: https:\/\/hprose.com |\n| |\n| encoding\/error.go |\n| |\n| LastModified: Mar 22, 2020 |\n| Author: Ma Bingyao <andot@hprose.com> |\n| |\n\\*________________________________________________________*\/\n\npackage encoding\n\nimport (\n\t\"reflect\"\n)\n\n\/\/ An UnsupportedTypeError is returned by Encode\/Marshal when attempting\n\/\/ to encode an unsupported value type.\ntype UnsupportedTypeError struct {\n\tType reflect.Type\n}\n\nfunc (e *UnsupportedTypeError) Error() string {\n\treturn \"hprose: unsupported type: \" + e.Type.String()\n}\n<commit_msg>Update error.go<commit_after>\/*--------------------------------------------------------*\\\n| |\n| hprose |\n| |\n| Official WebSite: https:\/\/hprose.com |\n| |\n| encoding\/error.go |\n| |\n| LastModified: Mar 22, 2020 |\n| Author: Ma Bingyao <andot@hprose.com> |\n| |\n\\*________________________________________________________*\/\n\npackage encoding\n\nimport (\n\t\"reflect\"\n)\n\n\/\/ An UnsupportedTypeError is returned by Encode\/Marshal when attempting\n\/\/ to encode an unsupported value type.\ntype UnsupportedTypeError struct {\n\tType reflect.Type\n}\n\nfunc (e *UnsupportedTypeError) Error() string {\n\treturn \"hprose\/encoding: unsupported type: \" + e.Type.String()\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/vaughan0\/go-ini\"\n\tconfig2 \"go.zhuzi.me\/config\/config\"\n)\n\n\/\/ Test_ManualParser 测试manual解析\nfunc Test_ManualParser(t *testing.T) {\n\tp := config2.NewManualParser()\n\tp.SetConfig(\"app\", \"system\", \"version\", \"beta0.1\")\n\tif err := Init(false, p); err != nil {\n\t\tt.Error(err)\n\t}\n\tif v, ok := Data(\"app\").Get(\"system\", \"version\"); !ok {\n\t\tt.Error(\"没有找到设置的配置\")\n\t} else if v != \"beta0.1\" {\n\t\tt.Error(\"找到的值与预设值不一致,设置的值:beta0.1,获取到的值:\", v)\n\t}\n\tt.Log(\"test config success\")\n\n\tp = config2.NewManualParser()\n\tp.SetSection(\"app\", \"system\", ini.Section{\"appName\": \"config\"})\n\tif err := Init(false, p); err != nil {\n\t\tt.Error(err)\n\t}\n\tif v, ok := Data(\"app\").Get(\"system\", \"appName\"); !ok {\n\t\tt.Error(\"没有找到设置的配置\")\n\t} else if v != \"config\" {\n\t\tt.Error(\"找到的值与预设值不一致,设置的值:config,获取到的值:\", v)\n\t}\n\tt.Log(\"test section success\")\n\n\tp = config2.NewManualParser()\n\tp.SetFile(\"app\", ini.File{\"system\": ini.Section{\"author\": \"scofield\"}})\n\tif err := Init(false, p); err != nil {\n\t\tt.Error(err)\n\t}\n\tif v, ok := Data(\"app\").Get(\"system\", \"author\"); !ok {\n\t\tt.Error(\"没有找到设置的配置\")\n\t} else if v != \"scofield\" {\n\t\tt.Error(\"找到的值与预设值不一致,设置的值:scofield,获取到的值:\", v)\n\t}\n\tt.Log(\"test app success\")\n\n}\n<commit_msg>fix bug<commit_after>package config\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/vaughan0\/go-ini\"\n)\n\n\/\/ Test_ManualParser 测试manual解析\nfunc Test_ManualParser(t *testing.T) {\n\tp := NewManualParser()\n\tp.SetConfig(\"app\", \"system\", \"version\", \"beta0.1\")\n\tif err := Init(false, p); err != nil {\n\t\tt.Error(err)\n\t}\n\tif v, ok := Data(\"app\").Get(\"system\", \"version\"); !ok {\n\t\tt.Error(\"没有找到设置的配置\")\n\t} else if v != \"beta0.1\" {\n\t\tt.Error(\"找到的值与预设值不一致,设置的值:beta0.1,获取到的值:\", v)\n\t}\n\tt.Log(\"test config success\")\n\n\tp = NewManualParser()\n\tp.SetSection(\"app\", \"system\", ini.Section{\"appName\": \"config\"})\n\tif err := Init(false, p); err != nil {\n\t\tt.Error(err)\n\t}\n\tif v, ok := Data(\"app\").Get(\"system\", \"appName\"); !ok {\n\t\tt.Error(\"没有找到设置的配置\")\n\t} else if v != \"config\" {\n\t\tt.Error(\"找到的值与预设值不一致,设置的值:config,获取到的值:\", v)\n\t}\n\tt.Log(\"test section success\")\n\n\tp = NewManualParser()\n\tp.SetFile(\"app\", ini.File{\"system\": ini.Section{\"author\": \"scofield\"}})\n\tif err := Init(false, p); err != nil {\n\t\tt.Error(err)\n\t}\n\tif v, ok := Data(\"app\").Get(\"system\", \"author\"); !ok {\n\t\tt.Error(\"没有找到设置的配置\")\n\t} else if v != \"scofield\" {\n\t\tt.Error(\"找到的值与预设值不一致,设置的值:scofield,获取到的值:\", v)\n\t}\n\tt.Log(\"test app success\")\n\n}\n<|endoftext|>"} {"text":"<commit_before>package envstruct_test\n\nimport (\n\t\"envstruct\"\n\t\"os\"\n\n\t\"fmt\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\ntype TestStruct struct {\n\tNonEnvThing string\n\tStringThing string `env:\"string_thing\"`\n\tRequiredThing string `env:\"required_thing,required\"`\n\n\tBoolThing bool `env:\"bool_thing\"`\n\n\tIntThing int `env:\"int_thing\"`\n\tInt8Thing int8 `env:\"int8_thing\"`\n\tInt16Thing int16 `env:\"int16_thing\"`\n\tInt32Thing int32 `env:\"int32_thing\"`\n\tInt64Thing int64 `env:\"int64_thing\"`\n\tUintThing uint `env:\"uint_thing\"`\n\tUint8Thing uint8 `env:\"uint8_thing\"`\n\tUint16Thing uint16 `env:\"uint16_thing\"`\n\tUint32Thing uint32 `env:\"uint32_thing\"`\n\tUint64Thing uint64 `env:\"uint64_thing\"`\n\n\tStringSliceThing []string `env:\"string_slice_thing\"`\n\tIntSliceThing []int `env:\"int_slice_thing\"`\n}\n\nvar _ = Describe(\"envstruct\", func() {\n\tDescribe(\"Load()\", func() {\n\t\tvar (\n\t\t\tts TestStruct\n\t\t\tloadError error\n\t\t\tenvVars map[string]string\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tenvVars = map[string]string{\n\t\t\t\t\"STRING_THING\": \"stringy thingy\",\n\t\t\t\t\"REQUIRED_THING\": \"im so required\",\n\t\t\t\t\"BOOL_THING\": \"true\",\n\t\t\t\t\"INT_THING\": \"100\",\n\t\t\t\t\"INT8_THING\": \"20\",\n\t\t\t\t\"INT16_THING\": \"2000\",\n\t\t\t\t\"INT32_THING\": \"200000\",\n\t\t\t\t\"INT64_THING\": \"200000000\",\n\t\t\t\t\"UINT_THING\": \"100\",\n\t\t\t\t\"UINT8_THING\": \"20\",\n\t\t\t\t\"UINT16_THING\": \"2000\",\n\t\t\t\t\"UINT32_THING\": \"200000\",\n\t\t\t\t\"UINT64_THING\": \"200000000\",\n\t\t\t\t\"STRING_SLICE_THING\": \"one,two,three\",\n\t\t\t\t\"INT_SLICE_THING\": \"1,2,3\",\n\t\t\t}\n\t\t})\n\n\t\tJustBeforeEach(func() {\n\t\t\tfor k, v := range envVars {\n\t\t\t\tos.Setenv(k, v)\n\t\t\t}\n\t\t})\n\n\t\tContext(\"when load is successfull\", func() {\n\t\t\tJustBeforeEach(func() {\n\t\t\t\tloadError = envstruct.Load(&ts)\n\t\t\t})\n\n\t\t\tAfterEach(func() {\n\t\t\t\tfor k := range envVars {\n\t\t\t\t\tos.Setenv(k, \"\")\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tIt(\"does not return an error\", func() {\n\t\t\t\tExpect(loadError).ToNot(HaveOccurred())\n\t\t\t})\n\n\t\t\tContext(\"with strings\", func() {\n\t\t\t\tIt(\"populates the string thing\", func() {\n\t\t\t\t\tExpect(ts.StringThing).To(Equal(\"stringy thingy\"))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"with bools\", func() {\n\t\t\t\tContext(\"with 'true'\", func() {\n\t\t\t\t\tIt(\"is true\", func() {\n\t\t\t\t\t\tExpect(ts.BoolThing).To(BeTrue())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"with 'false'\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tenvVars[\"BOOL_THING\"] = \"false\"\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"is true\", func() {\n\t\t\t\t\t\tExpect(ts.BoolThing).To(BeFalse())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"with '1'\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tenvVars[\"BOOL_THING\"] = \"1\"\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"is true\", func() {\n\t\t\t\t\t\tExpect(ts.BoolThing).To(BeTrue())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"with '0'\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tenvVars[\"BOOL_THING\"] = \"0\"\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"is false\", func() {\n\t\t\t\t\t\tExpect(ts.BoolThing).To(BeFalse())\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"with ints\", func() {\n\t\t\t\tIt(\"populates the int thing\", func() {\n\t\t\t\t\tExpect(ts.IntThing).To(Equal(100))\n\t\t\t\t})\n\n\t\t\t\tIt(\"populates the int 8 thing\", func() {\n\t\t\t\t\tExpect(ts.Int8Thing).To(Equal(int8(20)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"populates the int 16 thing\", func() {\n\t\t\t\t\tExpect(ts.Int16Thing).To(Equal(int16(2000)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"populates the int 32 thing\", func() {\n\t\t\t\t\tExpect(ts.Int32Thing).To(Equal(int32(200000)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"populates the int 64 thing\", func() {\n\t\t\t\t\tExpect(ts.Int64Thing).To(Equal(int64(200000000)))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"with uints\", func() {\n\t\t\t\tIt(\"populates the uint thing\", func() {\n\t\t\t\t\tExpect(ts.UintThing).To(Equal(uint(100)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"populates the uint 8 thing\", func() {\n\t\t\t\t\tExpect(ts.Uint8Thing).To(Equal(uint8(20)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"populates the uint 16 thing\", func() {\n\t\t\t\t\tExpect(ts.Uint16Thing).To(Equal(uint16(2000)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"populates the uint 32 thing\", func() {\n\t\t\t\t\tExpect(ts.Uint32Thing).To(Equal(uint32(200000)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"populates the uint 64 thing\", func() {\n\t\t\t\t\tExpect(ts.Uint64Thing).To(Equal(uint64(200000000)))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"with comma separated strings\", func() {\n\t\t\t\tContext(\"slice of strings\", func() {\n\t\t\t\t\tIt(\"populates a slice of strings\", func() {\n\t\t\t\t\t\tExpect(ts.StringSliceThing).To(Equal([]string{\"one\", \"two\", \"three\"}))\n\t\t\t\t\t})\n\n\t\t\t\t\tContext(\"with leading and trailing spaces\", func() {\n\t\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\t\tenvVars[\"STRING_SLICE_THING\"] = \"one , two , three\"\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tIt(\"populates a slice of strings\", func() {\n\t\t\t\t\t\t\tExpect(ts.StringSliceThing).To(Equal([]string{\"one\", \"two\", \"three\"}))\n\t\t\t\t\t\t})\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"slice of ints\", func() {\n\t\t\t\t\tIt(\"populates a slice of ints\", func() {\n\t\t\t\t\t\tExpect(ts.IntSliceThing).To(Equal([]int{1, 2, 3}))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when load is unsuccessfull\", func() {\n\t\t\tContext(\"when a required environment variable is not given\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tenvVars[\"REQUIRED_THING\"] = \"\"\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns a validation error\", func() {\n\t\t\t\t\tloadError = envstruct.Load(&ts)\n\n\t\t\t\t\tExpect(loadError).To(MatchError(fmt.Errorf(\"REQUIRED_THING is required but was empty\")))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"with an invalid int\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tenvVars[\"INT_THING\"] = \"Hello!\"\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an error\", func() {\n\t\t\t\t\tExpect(envstruct.Load(&ts)).ToNot(Succeed())\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"with an invalid uint\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tenvVars[\"UINT_THING\"] = \"Hello!\"\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an error\", func() {\n\t\t\t\t\tExpect(envstruct.Load(&ts)).ToNot(Succeed())\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<commit_msg>Fix typo<commit_after>package envstruct_test\n\nimport (\n\t\"envstruct\"\n\t\"os\"\n\n\t\"fmt\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\ntype TestStruct struct {\n\tNonEnvThing string\n\tStringThing string `env:\"string_thing\"`\n\tRequiredThing string `env:\"required_thing,required\"`\n\n\tBoolThing bool `env:\"bool_thing\"`\n\n\tIntThing int `env:\"int_thing\"`\n\tInt8Thing int8 `env:\"int8_thing\"`\n\tInt16Thing int16 `env:\"int16_thing\"`\n\tInt32Thing int32 `env:\"int32_thing\"`\n\tInt64Thing int64 `env:\"int64_thing\"`\n\tUintThing uint `env:\"uint_thing\"`\n\tUint8Thing uint8 `env:\"uint8_thing\"`\n\tUint16Thing uint16 `env:\"uint16_thing\"`\n\tUint32Thing uint32 `env:\"uint32_thing\"`\n\tUint64Thing uint64 `env:\"uint64_thing\"`\n\n\tStringSliceThing []string `env:\"string_slice_thing\"`\n\tIntSliceThing []int `env:\"int_slice_thing\"`\n}\n\nvar _ = Describe(\"envstruct\", func() {\n\tDescribe(\"Load()\", func() {\n\t\tvar (\n\t\t\tts TestStruct\n\t\t\tloadError error\n\t\t\tenvVars map[string]string\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tenvVars = map[string]string{\n\t\t\t\t\"STRING_THING\": \"stringy thingy\",\n\t\t\t\t\"REQUIRED_THING\": \"im so required\",\n\t\t\t\t\"BOOL_THING\": \"true\",\n\t\t\t\t\"INT_THING\": \"100\",\n\t\t\t\t\"INT8_THING\": \"20\",\n\t\t\t\t\"INT16_THING\": \"2000\",\n\t\t\t\t\"INT32_THING\": \"200000\",\n\t\t\t\t\"INT64_THING\": \"200000000\",\n\t\t\t\t\"UINT_THING\": \"100\",\n\t\t\t\t\"UINT8_THING\": \"20\",\n\t\t\t\t\"UINT16_THING\": \"2000\",\n\t\t\t\t\"UINT32_THING\": \"200000\",\n\t\t\t\t\"UINT64_THING\": \"200000000\",\n\t\t\t\t\"STRING_SLICE_THING\": \"one,two,three\",\n\t\t\t\t\"INT_SLICE_THING\": \"1,2,3\",\n\t\t\t}\n\t\t})\n\n\t\tJustBeforeEach(func() {\n\t\t\tfor k, v := range envVars {\n\t\t\t\tos.Setenv(k, v)\n\t\t\t}\n\t\t})\n\n\t\tContext(\"when load is successful\", func() {\n\t\t\tJustBeforeEach(func() {\n\t\t\t\tloadError = envstruct.Load(&ts)\n\t\t\t})\n\n\t\t\tAfterEach(func() {\n\t\t\t\tfor k := range envVars {\n\t\t\t\t\tos.Setenv(k, \"\")\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tIt(\"does not return an error\", func() {\n\t\t\t\tExpect(loadError).ToNot(HaveOccurred())\n\t\t\t})\n\n\t\t\tContext(\"with strings\", func() {\n\t\t\t\tIt(\"populates the string thing\", func() {\n\t\t\t\t\tExpect(ts.StringThing).To(Equal(\"stringy thingy\"))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"with bools\", func() {\n\t\t\t\tContext(\"with 'true'\", func() {\n\t\t\t\t\tIt(\"is true\", func() {\n\t\t\t\t\t\tExpect(ts.BoolThing).To(BeTrue())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"with 'false'\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tenvVars[\"BOOL_THING\"] = \"false\"\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"is true\", func() {\n\t\t\t\t\t\tExpect(ts.BoolThing).To(BeFalse())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"with '1'\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tenvVars[\"BOOL_THING\"] = \"1\"\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"is true\", func() {\n\t\t\t\t\t\tExpect(ts.BoolThing).To(BeTrue())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"with '0'\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tenvVars[\"BOOL_THING\"] = \"0\"\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"is false\", func() {\n\t\t\t\t\t\tExpect(ts.BoolThing).To(BeFalse())\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"with ints\", func() {\n\t\t\t\tIt(\"populates the int thing\", func() {\n\t\t\t\t\tExpect(ts.IntThing).To(Equal(100))\n\t\t\t\t})\n\n\t\t\t\tIt(\"populates the int 8 thing\", func() {\n\t\t\t\t\tExpect(ts.Int8Thing).To(Equal(int8(20)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"populates the int 16 thing\", func() {\n\t\t\t\t\tExpect(ts.Int16Thing).To(Equal(int16(2000)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"populates the int 32 thing\", func() {\n\t\t\t\t\tExpect(ts.Int32Thing).To(Equal(int32(200000)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"populates the int 64 thing\", func() {\n\t\t\t\t\tExpect(ts.Int64Thing).To(Equal(int64(200000000)))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"with uints\", func() {\n\t\t\t\tIt(\"populates the uint thing\", func() {\n\t\t\t\t\tExpect(ts.UintThing).To(Equal(uint(100)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"populates the uint 8 thing\", func() {\n\t\t\t\t\tExpect(ts.Uint8Thing).To(Equal(uint8(20)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"populates the uint 16 thing\", func() {\n\t\t\t\t\tExpect(ts.Uint16Thing).To(Equal(uint16(2000)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"populates the uint 32 thing\", func() {\n\t\t\t\t\tExpect(ts.Uint32Thing).To(Equal(uint32(200000)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"populates the uint 64 thing\", func() {\n\t\t\t\t\tExpect(ts.Uint64Thing).To(Equal(uint64(200000000)))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"with comma separated strings\", func() {\n\t\t\t\tContext(\"slice of strings\", func() {\n\t\t\t\t\tIt(\"populates a slice of strings\", func() {\n\t\t\t\t\t\tExpect(ts.StringSliceThing).To(Equal([]string{\"one\", \"two\", \"three\"}))\n\t\t\t\t\t})\n\n\t\t\t\t\tContext(\"with leading and trailing spaces\", func() {\n\t\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\t\tenvVars[\"STRING_SLICE_THING\"] = \"one , two , three\"\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tIt(\"populates a slice of strings\", func() {\n\t\t\t\t\t\t\tExpect(ts.StringSliceThing).To(Equal([]string{\"one\", \"two\", \"three\"}))\n\t\t\t\t\t\t})\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"slice of ints\", func() {\n\t\t\t\t\tIt(\"populates a slice of ints\", func() {\n\t\t\t\t\t\tExpect(ts.IntSliceThing).To(Equal([]int{1, 2, 3}))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when load is unsuccessfull\", func() {\n\t\t\tContext(\"when a required environment variable is not given\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tenvVars[\"REQUIRED_THING\"] = \"\"\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns a validation error\", func() {\n\t\t\t\t\tloadError = envstruct.Load(&ts)\n\n\t\t\t\t\tExpect(loadError).To(MatchError(fmt.Errorf(\"REQUIRED_THING is required but was empty\")))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"with an invalid int\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tenvVars[\"INT_THING\"] = \"Hello!\"\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an error\", func() {\n\t\t\t\t\tExpect(envstruct.Load(&ts)).ToNot(Succeed())\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"with an invalid uint\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tenvVars[\"UINT_THING\"] = \"Hello!\"\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an error\", func() {\n\t\t\t\t\tExpect(envstruct.Load(&ts)).ToNot(Succeed())\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ Definition for the SOAP structure required for UPnP's SOAP usage.\n\npackage soap\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"reflect\"\n)\n\nconst (\n\tsoapEncodingStyle = \"http:\/\/schemas.xmlsoap.org\/soap\/encoding\/\"\n\tsoapPrefix = xml.Header + `<s:Envelope xmlns:s=\"http:\/\/schemas.xmlsoap.org\/soap\/envelope\/\" s:encodingStyle=\"http:\/\/schemas.xmlsoap.org\/soap\/encoding\/\"><s:Body>`\n\tsoapSuffix = `<\/s:Body><\/s:Envelope>`\n)\n\ntype SOAPClient struct {\n\tEndpointURL url.URL\n\tHTTPClient http.Client\n}\n\nfunc NewSOAPClient(endpointURL url.URL) *SOAPClient {\n\treturn &SOAPClient{\n\t\tEndpointURL: endpointURL,\n\t}\n}\n\n\/\/ PerformSOAPAction makes a SOAP request, with the given action.\n\/\/ inAction and outAction must both be pointers to structs with string fields\n\/\/ only.\nfunc (client *SOAPClient) PerformAction(actionNamespace, actionName string, inAction interface{}, outAction interface{}) error {\n\trequestBytes, err := encodeRequestAction(actionNamespace, actionName, inAction)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresponse, err := client.HTTPClient.Do(&http.Request{\n\t\tMethod: \"POST\",\n\t\tURL: &client.EndpointURL,\n\t\tHeader: http.Header{\n\t\t\t\"SOAPACTION\": []string{actionNamespace + \"#\" + actionName},\n\t\t\t\"CONTENT-TYPE\": []string{\"text\/xml; charset=\\\"utf-8\\\"\"},\n\t\t},\n\t\tBody: ioutil.NopCloser(bytes.NewBuffer(requestBytes)),\n\t\t\/\/ Set ContentLength to avoid chunked encoding - some servers might not support it.\n\t\tContentLength: int64(len(requestBytes)),\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"goupnp: error performing SOAP HTTP request: %v\", err)\n\t}\n\tdefer response.Body.Close()\n\tif response.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"goupnp: SOAP request got HTTP %s\", response.Status)\n\t}\n\n\tresponseEnv := newSOAPEnvelope()\n\tdecoder := xml.NewDecoder(response.Body)\n\tif err := decoder.Decode(responseEnv); err != nil {\n\t\treturn fmt.Errorf(\"goupnp: error decoding response body: %v\", err)\n\t}\n\n\tif responseEnv.Body.Fault != nil {\n\t\treturn responseEnv.Body.Fault\n\t}\n\n\tif outAction != nil {\n\t\tif err := xml.Unmarshal(responseEnv.Body.RawAction, outAction); err != nil {\n\t\t\treturn fmt.Errorf(\"goupnp: error unmarshalling out action: %v, %v\", err, responseEnv.Body.RawAction)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ newSOAPAction creates a soapEnvelope with the given action and arguments.\nfunc newSOAPEnvelope() *soapEnvelope {\n\treturn &soapEnvelope{\n\t\tEncodingStyle: soapEncodingStyle,\n\t}\n}\n\n\/\/ encodeRequestAction is a hacky way to create an encoded SOAP envelope\n\/\/ containing the given action. Experiments with one router have shown that it\n\/\/ 500s for requests where the outer default xmlns is set to the SOAP\n\/\/ namespace, and then reassigning the default namespace within that to the\n\/\/ service namespace. Hand-coding the outer XML to work-around this.\nfunc encodeRequestAction(actionNamespace, actionName string, inAction interface{}) ([]byte, error) {\n\trequestBuf := new(bytes.Buffer)\n\trequestBuf.WriteString(soapPrefix)\n\trequestBuf.WriteString(`<u:`)\n\txml.EscapeText(requestBuf, []byte(actionName))\n\trequestBuf.WriteString(` xmlns:u=\"`)\n\txml.EscapeText(requestBuf, []byte(actionNamespace))\n\trequestBuf.WriteString(`\">`)\n\tif inAction != nil {\n\t\tif err := encodeRequestArgs(requestBuf, inAction); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\trequestBuf.WriteString(`<\/u:`)\n\txml.EscapeText(requestBuf, []byte(actionName))\n\trequestBuf.WriteString(`>`)\n\trequestBuf.WriteString(soapSuffix)\n\treturn requestBuf.Bytes(), nil\n}\n\nfunc encodeRequestArgs(w *bytes.Buffer, inAction interface{}) error {\n\tin := reflect.Indirect(reflect.ValueOf(inAction))\n\tif in.Kind() != reflect.Struct {\n\t\treturn fmt.Errorf(\"goupnp: SOAP inAction is not a struct but of type %v\", in.Type())\n\t}\n\tenc := xml.NewEncoder(w)\n\tnFields := in.NumField()\n\tinType := in.Type()\n\tfor i := 0; i < nFields; i++ {\n\t\tfield := inType.Field(i)\n\t\targName := field.Name\n\t\tif nameOverride := field.Tag.Get(\"soap\"); nameOverride != \"\" {\n\t\t\targName = nameOverride\n\t\t}\n\t\tvalue := in.Field(i)\n\t\tif value.Kind() != reflect.String {\n\t\t\treturn fmt.Errorf(\"goupnp: SOAP arg %q is not of type string, but of type %v\", argName, value.Type())\n\t\t}\n\t\tif err := enc.EncodeElement(value.Interface(), xml.StartElement{xml.Name{\"\", argName}, nil}); err != nil {\n\t\t\treturn fmt.Errorf(\"goupnp: error encoding SOAP arg %q: %v\", argName, err)\n\t\t}\n\t}\n\tenc.Flush()\n\treturn nil\n}\n\ntype soapEnvelope struct {\n\tXMLName xml.Name `xml:\"http:\/\/schemas.xmlsoap.org\/soap\/envelope\/ Envelope\"`\n\tEncodingStyle string `xml:\"http:\/\/schemas.xmlsoap.org\/soap\/envelope\/ encodingStyle,attr\"`\n\tBody soapBody `xml:\"http:\/\/schemas.xmlsoap.org\/soap\/envelope\/ Body\"`\n}\n\ntype soapBody struct {\n\tFault *SOAPFaultError `xml:\"Fault\"`\n\tRawAction []byte `xml:\",innerxml\"`\n}\n\n\/\/ SOAPFaultError implements error, and contains SOAP fault information.\ntype SOAPFaultError struct {\n\tFaultCode string `xml:\"faultcode\"`\n\tFaultString string `xml:\"faultstring\"`\n\tDetail string `xml:\"detail\"`\n}\n\nfunc (err *SOAPFaultError) Error() string {\n\treturn fmt.Sprintf(\"SOAP fault: %s\", err.FaultString)\n}\n<commit_msg>soap: quote action names in header<commit_after>\/\/ Definition for the SOAP structure required for UPnP's SOAP usage.\n\npackage soap\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"reflect\"\n)\n\nconst (\n\tsoapEncodingStyle = \"http:\/\/schemas.xmlsoap.org\/soap\/encoding\/\"\n\tsoapPrefix = xml.Header + `<s:Envelope xmlns:s=\"http:\/\/schemas.xmlsoap.org\/soap\/envelope\/\" s:encodingStyle=\"http:\/\/schemas.xmlsoap.org\/soap\/encoding\/\"><s:Body>`\n\tsoapSuffix = `<\/s:Body><\/s:Envelope>`\n)\n\ntype SOAPClient struct {\n\tEndpointURL url.URL\n\tHTTPClient http.Client\n}\n\nfunc NewSOAPClient(endpointURL url.URL) *SOAPClient {\n\treturn &SOAPClient{\n\t\tEndpointURL: endpointURL,\n\t}\n}\n\n\/\/ PerformSOAPAction makes a SOAP request, with the given action.\n\/\/ inAction and outAction must both be pointers to structs with string fields\n\/\/ only.\nfunc (client *SOAPClient) PerformAction(actionNamespace, actionName string, inAction interface{}, outAction interface{}) error {\n\trequestBytes, err := encodeRequestAction(actionNamespace, actionName, inAction)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresponse, err := client.HTTPClient.Do(&http.Request{\n\t\tMethod: \"POST\",\n\t\tURL: &client.EndpointURL,\n\t\tHeader: http.Header{\n\t\t\t\"SOAPACTION\": []string{`\"` + actionNamespace + \"#\" + actionName + `\"`},\n\t\t\t\"CONTENT-TYPE\": []string{\"text\/xml; charset=\\\"utf-8\\\"\"},\n\t\t},\n\t\tBody: ioutil.NopCloser(bytes.NewBuffer(requestBytes)),\n\t\t\/\/ Set ContentLength to avoid chunked encoding - some servers might not support it.\n\t\tContentLength: int64(len(requestBytes)),\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"goupnp: error performing SOAP HTTP request: %v\", err)\n\t}\n\tdefer response.Body.Close()\n\tif response.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"goupnp: SOAP request got HTTP %s\", response.Status)\n\t}\n\n\tresponseEnv := newSOAPEnvelope()\n\tdecoder := xml.NewDecoder(response.Body)\n\tif err := decoder.Decode(responseEnv); err != nil {\n\t\treturn fmt.Errorf(\"goupnp: error decoding response body: %v\", err)\n\t}\n\n\tif responseEnv.Body.Fault != nil {\n\t\treturn responseEnv.Body.Fault\n\t}\n\n\tif outAction != nil {\n\t\tif err := xml.Unmarshal(responseEnv.Body.RawAction, outAction); err != nil {\n\t\t\treturn fmt.Errorf(\"goupnp: error unmarshalling out action: %v, %v\", err, responseEnv.Body.RawAction)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ newSOAPAction creates a soapEnvelope with the given action and arguments.\nfunc newSOAPEnvelope() *soapEnvelope {\n\treturn &soapEnvelope{\n\t\tEncodingStyle: soapEncodingStyle,\n\t}\n}\n\n\/\/ encodeRequestAction is a hacky way to create an encoded SOAP envelope\n\/\/ containing the given action. Experiments with one router have shown that it\n\/\/ 500s for requests where the outer default xmlns is set to the SOAP\n\/\/ namespace, and then reassigning the default namespace within that to the\n\/\/ service namespace. Hand-coding the outer XML to work-around this.\nfunc encodeRequestAction(actionNamespace, actionName string, inAction interface{}) ([]byte, error) {\n\trequestBuf := new(bytes.Buffer)\n\trequestBuf.WriteString(soapPrefix)\n\trequestBuf.WriteString(`<u:`)\n\txml.EscapeText(requestBuf, []byte(actionName))\n\trequestBuf.WriteString(` xmlns:u=\"`)\n\txml.EscapeText(requestBuf, []byte(actionNamespace))\n\trequestBuf.WriteString(`\">`)\n\tif inAction != nil {\n\t\tif err := encodeRequestArgs(requestBuf, inAction); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\trequestBuf.WriteString(`<\/u:`)\n\txml.EscapeText(requestBuf, []byte(actionName))\n\trequestBuf.WriteString(`>`)\n\trequestBuf.WriteString(soapSuffix)\n\treturn requestBuf.Bytes(), nil\n}\n\nfunc encodeRequestArgs(w *bytes.Buffer, inAction interface{}) error {\n\tin := reflect.Indirect(reflect.ValueOf(inAction))\n\tif in.Kind() != reflect.Struct {\n\t\treturn fmt.Errorf(\"goupnp: SOAP inAction is not a struct but of type %v\", in.Type())\n\t}\n\tenc := xml.NewEncoder(w)\n\tnFields := in.NumField()\n\tinType := in.Type()\n\tfor i := 0; i < nFields; i++ {\n\t\tfield := inType.Field(i)\n\t\targName := field.Name\n\t\tif nameOverride := field.Tag.Get(\"soap\"); nameOverride != \"\" {\n\t\t\targName = nameOverride\n\t\t}\n\t\tvalue := in.Field(i)\n\t\tif value.Kind() != reflect.String {\n\t\t\treturn fmt.Errorf(\"goupnp: SOAP arg %q is not of type string, but of type %v\", argName, value.Type())\n\t\t}\n\t\tif err := enc.EncodeElement(value.Interface(), xml.StartElement{xml.Name{\"\", argName}, nil}); err != nil {\n\t\t\treturn fmt.Errorf(\"goupnp: error encoding SOAP arg %q: %v\", argName, err)\n\t\t}\n\t}\n\tenc.Flush()\n\treturn nil\n}\n\ntype soapEnvelope struct {\n\tXMLName xml.Name `xml:\"http:\/\/schemas.xmlsoap.org\/soap\/envelope\/ Envelope\"`\n\tEncodingStyle string `xml:\"http:\/\/schemas.xmlsoap.org\/soap\/envelope\/ encodingStyle,attr\"`\n\tBody soapBody `xml:\"http:\/\/schemas.xmlsoap.org\/soap\/envelope\/ Body\"`\n}\n\ntype soapBody struct {\n\tFault *SOAPFaultError `xml:\"Fault\"`\n\tRawAction []byte `xml:\",innerxml\"`\n}\n\n\/\/ SOAPFaultError implements error, and contains SOAP fault information.\ntype SOAPFaultError struct {\n\tFaultCode string `xml:\"faultcode\"`\n\tFaultString string `xml:\"faultstring\"`\n\tDetail string `xml:\"detail\"`\n}\n\nfunc (err *SOAPFaultError) Error() string {\n\treturn fmt.Sprintf(\"SOAP fault: %s\", err.FaultString)\n}\n<|endoftext|>"} {"text":"<commit_before>package etcd\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/coreos\/etcd\/config\"\n\tetcdErr \"github.com\/coreos\/etcd\/error\"\n)\n\n\/\/ v2client sends various requests using HTTP API.\n\/\/ It is different from raft communication, and doesn't record anything in the log.\n\/\/ The argument url is required to contain scheme and host only, and\n\/\/ there is no trailing slash in it.\n\/\/ Public functions return \"etcd\/error\".Error intentionally to figure out\n\/\/ etcd error code easily.\ntype v2client struct {\n\thttp.Client\n}\n\nfunc newClient(tc *tls.Config) *v2client {\n\ttr := new(http.Transport)\n\ttr.TLSClientConfig = tc\n\treturn &v2client{http.Client{Transport: tr}}\n}\n\n\/\/ CheckVersion returns true when the version check on the server returns 200.\nfunc (c *v2client) CheckVersion(url string, version int) (bool, *etcdErr.Error) {\n\tresp, err := c.Get(url + fmt.Sprintf(\"\/version\/%d\/check\", version))\n\tif err != nil {\n\t\treturn false, clientError(err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\treturn resp.StatusCode == 200, nil\n}\n\n\/\/ GetVersion fetches the peer version of a cluster.\nfunc (c *v2client) GetVersion(url string) (int, *etcdErr.Error) {\n\tresp, err := c.Get(url + \"\/version\")\n\tif err != nil {\n\t\treturn 0, clientError(err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn 0, clientError(err)\n\t}\n\n\t\/\/ Parse version number.\n\tversion, err := strconv.Atoi(string(body))\n\tif err != nil {\n\t\treturn 0, clientError(err)\n\t}\n\treturn version, nil\n}\n\nfunc (c *v2client) GetMachines(url string) ([]*machineMessage, *etcdErr.Error) {\n\tresp, err := c.Get(url + \"\/v2\/admin\/machines\/\")\n\tif err != nil {\n\t\treturn nil, clientError(err)\n\t}\n\n\tmsgs := new([]*machineMessage)\n\tif uerr := c.readJSONResponse(resp, msgs); uerr != nil {\n\t\treturn nil, uerr\n\t}\n\treturn *msgs, nil\n}\n\nfunc (c *v2client) GetClusterConfig(url string) (*config.ClusterConfig, *etcdErr.Error) {\n\tresp, err := c.Get(url + \"\/v2\/admin\/config\")\n\tif err != nil {\n\t\treturn nil, clientError(err)\n\t}\n\n\tconfig := new(config.ClusterConfig)\n\tif uerr := c.readJSONResponse(resp, config); uerr != nil {\n\t\treturn nil, uerr\n\t}\n\treturn config, nil\n}\n\n\/\/ AddMachine adds machine to the cluster.\n\/\/ The first return value is the commit index of join command.\nfunc (c *v2client) AddMachine(url string, name string, info *context) *etcdErr.Error {\n\tb, _ := json.Marshal(info)\n\turl = url + \"\/v2\/admin\/machines\/\" + name\n\n\tlog.Printf(\"Send Join Request to %s\", url)\n\tresp, err := c.put(url, b)\n\tif err != nil {\n\t\treturn clientError(err)\n\t}\n\n\tif err := c.readErrorResponse(resp); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *v2client) readJSONResponse(resp *http.Response, val interface{}) *etcdErr.Error {\n\tif err := c.readErrorResponse(resp); err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tdefer ioutil.ReadAll(resp.Body)\n\n\tif err := json.NewDecoder(resp.Body).Decode(val); err != nil {\n\t\tlog.Printf(\"Error parsing join response: %v\", err)\n\t\treturn clientError(err)\n\t}\n\treturn nil\n}\n\nfunc (c *v2client) readErrorResponse(resp *http.Response) *etcdErr.Error {\n\tif resp.StatusCode == http.StatusOK {\n\t\treturn nil\n\t}\n\tdefer resp.Body.Close()\n\tdefer ioutil.ReadAll(resp.Body)\n\n\tuerr := &etcdErr.Error{}\n\tif err := json.NewDecoder(resp.Body).Decode(uerr); err != nil {\n\t\tlog.Printf(\"Error parsing response to etcd error: %v\", err)\n\t\treturn clientError(err)\n\t}\n\treturn uerr\n}\n\n\/\/ put sends server side PUT request.\n\/\/ It always follows redirects instead of stopping according to RFC 2616.\nfunc (c *v2client) put(urlStr string, body []byte) (*http.Response, error) {\n\treturn c.doAlwaysFollowingRedirects(\"PUT\", urlStr, body)\n}\n\nfunc (c *v2client) doAlwaysFollowingRedirects(method string, urlStr string, body []byte) (resp *http.Response, err error) {\n\tvar req *http.Request\n\n\tfor redirect := 0; redirect < 10; redirect++ {\n\t\treq, err = http.NewRequest(method, urlStr, bytes.NewBuffer(body))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif resp, err = c.Do(req); err != nil {\n\t\t\tif resp != nil {\n\t\t\t\tresp.Body.Close()\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tif resp.StatusCode == http.StatusMovedPermanently || resp.StatusCode == http.StatusTemporaryRedirect {\n\t\t\tresp.Body.Close()\n\t\t\tif urlStr = resp.Header.Get(\"Location\"); urlStr == \"\" {\n\t\t\t\terr = errors.New(fmt.Sprintf(\"%d response missing Location header\", resp.StatusCode))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\treturn\n\t}\n\n\terr = errors.New(\"stopped after 10 redirects\")\n\treturn\n}\n\nfunc clientError(err error) *etcdErr.Error {\n\treturn etcdErr.NewError(etcdErr.EcodeClientInternal, err.Error(), 0)\n}\n<commit_msg>server: refactor client<commit_after>package etcd\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/coreos\/etcd\/config\"\n\tetcdErr \"github.com\/coreos\/etcd\/error\"\n)\n\n\/\/ v2client sends various requests using HTTP API.\n\/\/ It is different from raft communication, and doesn't record anything in the log.\n\/\/ The argument url is required to contain scheme and host only, and\n\/\/ there is no trailing slash in it.\n\/\/ Public functions return \"etcd\/error\".Error intentionally to figure out\n\/\/ etcd error code easily.\ntype v2client struct {\n\thttp.Client\n}\n\nfunc newClient(tc *tls.Config) *v2client {\n\ttr := new(http.Transport)\n\ttr.TLSClientConfig = tc\n\treturn &v2client{http.Client{Transport: tr}}\n}\n\n\/\/ CheckVersion returns true when the version check on the server returns 200.\nfunc (c *v2client) CheckVersion(url string, version int) (bool, *etcdErr.Error) {\n\tresp, err := c.Get(url + fmt.Sprintf(\"\/version\/%d\/check\", version))\n\tif err != nil {\n\t\treturn false, clientError(err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\treturn resp.StatusCode == 200, nil\n}\n\n\/\/ GetVersion fetches the peer version of a cluster.\nfunc (c *v2client) GetVersion(url string) (int, *etcdErr.Error) {\n\tresp, err := c.Get(url + \"\/version\")\n\tif err != nil {\n\t\treturn 0, clientError(err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn 0, clientError(err)\n\t}\n\n\t\/\/ Parse version number.\n\tversion, err := strconv.Atoi(string(body))\n\tif err != nil {\n\t\treturn 0, clientError(err)\n\t}\n\treturn version, nil\n}\n\nfunc (c *v2client) GetMachines(url string) ([]*machineMessage, *etcdErr.Error) {\n\tresp, err := c.Get(url + \"\/v2\/admin\/machines\/\")\n\tif err != nil {\n\t\treturn nil, clientError(err)\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, c.readErrorBody(resp.Body)\n\t}\n\n\tmsgs := new([]*machineMessage)\n\tif uerr := c.readJSONBody(resp.Body, msgs); uerr != nil {\n\t\treturn nil, uerr\n\t}\n\treturn *msgs, nil\n}\n\nfunc (c *v2client) GetClusterConfig(url string) (*config.ClusterConfig, *etcdErr.Error) {\n\tresp, err := c.Get(url + \"\/v2\/admin\/config\")\n\tif err != nil {\n\t\treturn nil, clientError(err)\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, c.readErrorBody(resp.Body)\n\t}\n\n\tconfig := new(config.ClusterConfig)\n\tif uerr := c.readJSONBody(resp.Body, config); uerr != nil {\n\t\treturn nil, uerr\n\t}\n\treturn config, nil\n}\n\n\/\/ AddMachine adds machine to the cluster.\n\/\/ The first return value is the commit index of join command.\nfunc (c *v2client) AddMachine(url string, name string, info *context) *etcdErr.Error {\n\tb, _ := json.Marshal(info)\n\turl = url + \"\/v2\/admin\/machines\/\" + name\n\n\tlog.Printf(\"Send Join Request to %s\", url)\n\tresp, err := c.put(url, b)\n\tif err != nil {\n\t\treturn clientError(err)\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn c.readErrorBody(resp.Body)\n\t}\n\tc.readBody(resp.Body)\n\treturn nil\n}\n\nfunc (c *v2client) readErrorBody(body io.ReadCloser) *etcdErr.Error {\n\tb, err := c.readBody(body)\n\tif err != nil {\n\t\treturn clientError(err)\n\t}\n\tuerr := &etcdErr.Error{}\n\tif err := json.Unmarshal(b, uerr); err != nil {\n\t\tstr := strings.TrimSpace(string(b))\n\t\treturn etcdErr.NewError(etcdErr.EcodeClientInternal, str, 0)\n\t}\n\treturn nil\n}\n\nfunc (c *v2client) readJSONBody(body io.ReadCloser, val interface{}) *etcdErr.Error {\n\tif err := json.NewDecoder(body).Decode(val); err != nil {\n\t\tlog.Printf(\"Error parsing join response: %v\", err)\n\t\treturn clientError(err)\n\t}\n\tc.readBody(body)\n\treturn nil\n}\n\nfunc (c *v2client) readBody(body io.ReadCloser) ([]byte, error) {\n\tb, err := ioutil.ReadAll(body)\n\tbody.Close()\n\treturn b, err\n}\n\n\/\/ put sends server side PUT request.\n\/\/ It always follows redirects instead of stopping according to RFC 2616.\nfunc (c *v2client) put(urlStr string, body []byte) (*http.Response, error) {\n\treturn c.doAlwaysFollowingRedirects(\"PUT\", urlStr, body)\n}\n\nfunc (c *v2client) doAlwaysFollowingRedirects(method string, urlStr string, body []byte) (resp *http.Response, err error) {\n\tvar req *http.Request\n\n\tfor redirect := 0; redirect < 10; redirect++ {\n\t\treq, err = http.NewRequest(method, urlStr, bytes.NewBuffer(body))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif resp, err = c.Do(req); err != nil {\n\t\t\tif resp != nil {\n\t\t\t\tresp.Body.Close()\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tif resp.StatusCode == http.StatusMovedPermanently || resp.StatusCode == http.StatusTemporaryRedirect {\n\t\t\tresp.Body.Close()\n\t\t\tif urlStr = resp.Header.Get(\"Location\"); urlStr == \"\" {\n\t\t\t\terr = errors.New(fmt.Sprintf(\"%d response missing Location header\", resp.StatusCode))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\treturn\n\t}\n\n\terr = errors.New(\"stopped after 10 redirects\")\n\treturn\n}\n\nfunc clientError(err error) *etcdErr.Error {\n\treturn etcdErr.NewError(etcdErr.EcodeClientInternal, err.Error(), 0)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2013-2014 by Michael Dvorkin. All Rights Reserved.\n\/\/ Use of this source code is governed by a MIT-style license that can\n\/\/ be found in the LICENSE file.\n\npackage donna\n\ntype PawnEntry struct {\n\thash uint64 \t\/\/ Pawn hash key.\n\tscore Score \t\/\/ Static score for the given pawn structure.\n\tking [2]int \t\/\/ King square for both sides.\n\tcover [2]Score \t\/\/ King cover penalties for both sides.\n\tpassers [2]Bitmask \t\/\/ Passed pawn bitmasks for both sides.\n}\n\ntype PawnCache [8192]PawnEntry\n\nfunc (e *Evaluation) analyzePawns() {\n\tkey := e.position.pawnHash\n\n\t\/\/ Since pawn hash is fairly small we can use much faster 32-bit index.\n\tindex := uint32(key) % uint32(len(game.pawnCache))\n\te.pawns = &game.pawnCache[index]\n\n\t\/\/ Bypass pawns cache if evaluation tracing is enabled.\n\tif e.pawns.hash != key || engine.trace {\n\t\twhite, black := e.pawnStructure(White), e.pawnStructure(Black)\n\t\twhite.apply(weights[1]); black.apply(weights[1]) \/\/ <-- Pawn structure weight.\n\t\te.pawns.score.clear().add(white).subtract(black)\n\t\te.pawns.hash = key\n\n\t\t\/\/ Force full king shelter evaluation since any legit king square\n\t\t\/\/ will be viewed as if the king has moved.\n\t\te.pawns.king[White], e.pawns.king[Black] = -1, -1\n\n\t\tif engine.trace {\n\t\t\te.checkpoint(`Pawns`, Total{white, black})\n\t\t}\n\t}\n\n\te.score.add(e.pawns.score)\n}\n\nfunc (e *Evaluation) analyzePassers() {\n\tvar white, black Score\n\n\tif engine.trace {\n\t\tdefer func() {\n\t\t\te.checkpoint(`Passers`, Total{white, black})\n\t\t}()\n\t}\n\n\twhite, black = e.pawnPassers(White), e.pawnPassers(Black)\n\twhite.apply(weights[2]); black.apply(weights[2]) \/\/ <-- Passed pawns weight.\n\te.score.add(white).subtract(black)\n}\n\n\/\/ Calculates extra bonus and penalty based on pawn structure. Specifically,\n\/\/ a bonus is awarded for passed pawns, and penalty applied for isolated and\n\/\/ doubled pawns.\nfunc (e *Evaluation) pawnStructure(color int) (score Score) {\n\thisPawns := e.position.outposts[pawn(color)]\n\therPawns := e.position.outposts[pawn(color^1)]\n\te.pawns.passers[color] = 0\n\n\t\/\/ Encourage center pawn moves in the opening.\n\tpawns := hisPawns\n\tif e.material.phase > 240 && (pawns & maskCenter) == 0 {\n\t\tscore.midgame += bonusPawnCenter[(pawns & maskCenter).count()]\n\t}\n\n\tfor pawns != 0 {\n\t\tsquare := pawns.pop()\n\t\trow, col := Coordinate(square)\n\n\t\t\/\/ Penalty if the pawn is isolated, i.e. has no friendly pawns\n\t\t\/\/ on adjacent files. The penalty goes up if isolated pawn is\n\t\t\/\/ exposed on semi-open file.\n\t\tisolated := (maskIsolated[col] & hisPawns == 0)\n\t\texposed := (maskInFront[color][square] & herPawns == 0)\n\t\tif isolated {\n\t\t\tif !exposed {\n\t\t\t\tscore.subtract(penaltyIsolatedPawn[col])\n\t\t\t} else {\n\t\t\t\tscore.subtract(penaltyWeakIsolatedPawn[col])\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Penalty if the pawn is doubled, i.e. there is another friendly\n\t\t\/\/ pawn in front of us. The penalty goes up if doubled pawns are\n\t\t\/\/ isolated.\n\t\tdoubled := (maskInFront[color][square] & hisPawns != 0)\n\t\tif doubled {\n\t\t\tscore.subtract(penaltyDoubledPawn[col])\n\t\t}\n\n\t\t\/\/ Bonus if the pawn is supported by friendly pawn(s) on the same\n\t\t\/\/ or previous ranks.\n\t\tsupported := (maskIsolated[col] & (maskRank[row] | maskRank[row].pushed(color^1)) & hisPawns != 0)\n\t\tif supported {\n\t\t\tflip := Flip(color, square)\n\t\t\tscore.add(Score{bonusSupportedPawn[flip], bonusSupportedPawn[flip]})\n\t\t}\n\n\t\t\/\/ The pawn is passed if a) there are no enemy pawns in the same\n\t\t\/\/ and adjacent columns; and b) there are no same color pawns in\n\t\t\/\/ front of us.\n\t\tpassed := (maskPassed[color][square] & herPawns == 0 && !doubled)\n\t\tif passed {\n\t\t\te.pawns.passers[color] |= bit[square]\n\t\t}\n\n\t\t\/\/ Penalty if the pawn is backward.\n\t\tif (!passed && !supported && !isolated) {\n\n\t\t\t\/\/ Backward pawn should not be attacking enemy pawns.\n\t\t\tif pawnMoves[color][square] & herPawns == 0 {\n\n\t\t\t\t\/\/ Backward pawn should not have friendly pawns behind.\n\t\t\t\tif maskPassed[color^1][square] & maskIsolated[col] & hisPawns == 0 {\n\n\t\t\t\t\t\/\/ Backward pawn should face enemy pawns on the next two ranks\n\t\t\t\t\t\/\/ preventing its advance.\n\t\t\t\t\tenemy := pawnMoves[color][square].pushed(color)\n\t\t\t\t\tif (enemy | enemy.pushed(color)) & herPawns != 0 {\n\t\t\t\t\t\tif !exposed {\n\t\t\t\t\t\t\tscore.subtract(penaltyBackwardPawn[col])\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tscore.subtract(penaltyWeakBackwardPawn[col])\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ TODO: Bonus if the pawn has good chance to become a passed pawn.\n\t}\n\n\treturn\n}\n\nfunc (e *Evaluation) pawnPassers(color int) (score Score) {\n\tp := e.position\n\n\tpawns := e.pawns.passers[color]\n\tfor pawns != 0 {\n\t\tsquare := pawns.pop()\n\t\trow := RelRow(square, color)\n\t\tbonus := bonusPassedPawn[row]\n\n\t\tif row > A2H2 {\n\t\t\textra := extraPassedPawn[row]\n\t\t\tnextSquare := square + eight[color]\n\n\t\t\t\/\/ Adjust endgame bonus based on how close the kings are from the\n\t\t\t\/\/ step forward square.\n\t\t\tbonus.endgame += (distance[p.king[color^1]][nextSquare] * 5 - distance[p.king[color]][nextSquare] * 2) * extra\n\n\t\t\t\/\/ Check if the pawn can step forward.\n\t\t\tif p.board.isClear(nextSquare) {\n\t\t\t\tboost := 0\n\n\t\t\t\t\/\/ Assume all squares in front of the pawn are under attack.\n\t\t\t\tattacked := maskInFront[color][square]\n\t\t\t\tprotected := attacked & e.attacks[color]\n\n\t\t\t\t\/\/ Boost the bonus if squares in front of the pawn are protected.\n\t\t\t\tif protected == attacked {\n\t\t\t\t\tboost += 6 \/\/ All squares.\n\t\t\t\t} else if protected.isSet(nextSquare) {\n\t\t\t\t\tboost += 4 \/\/ Next square only.\n\t\t\t\t}\n\n\t\t\t\t\/\/ Check who is attacking the squares in front of the pawn including\n\t\t\t\t\/\/ queen and rook x-ray attacks from behind.\n\t\t\t\tenemy := maskInFront[color^1][square] & (p.outposts[queen(color^1)] | p.outposts[rook(color^1)])\n\t\t\t\tif enemy == 0 || enemy & p.rookMoves(square) == 0 {\n\n\t\t\t\t\t\/\/ Since nobody attacks the pawn from behind adjust the attacked\n\t\t\t\t\t\/\/ bitmask to only include squares attacked or occupied by the enemy.\n\t\t\t\t\tattacked &= (e.attacks[color^1] | p.outposts[color^1])\n\t\t\t\t}\n\n\t\t\t\t\/\/ Boost the bonus if passed pawn is free to advance to the 8th rank\n\t\t\t\t\/\/ or at least safely step forward.\n\t\t\t\tif attacked == 0 {\n\t\t\t\t\tboost += 15 \/\/ Remaining squares are not under attack.\n\t\t\t\t} else if attacked.isClear(nextSquare) {\n\t\t\t\t\tboost += 9 \/\/ Next square is not under attack.\n\t\t\t\t}\n\n\t\t\t\tif boost > 0 {\n\t\t\t\t\tbonus.adjust(extra * boost)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tscore.add(bonus)\n\t}\n\n\t\/\/ Penalty for blocked pawns.\n\tblocked := (p.outposts[pawn(color)].pushed(color) & p.board).count()\n\tscore.subtract(pawnBlocked.times(blocked))\n\n\treturn\n}\n\n<commit_msg>Semi-passed pawn evaluation<commit_after>\/\/ Copyright (c) 2013-2014 by Michael Dvorkin. All Rights Reserved.\n\/\/ Use of this source code is governed by a MIT-style license that can\n\/\/ be found in the LICENSE file.\n\npackage donna\n\ntype PawnEntry struct {\n\thash uint64 \t\/\/ Pawn hash key.\n\tscore Score \t\/\/ Static score for the given pawn structure.\n\tking [2]int \t\/\/ King square for both sides.\n\tcover [2]Score \t\/\/ King cover penalties for both sides.\n\tpassers [2]Bitmask \t\/\/ Passed pawn bitmasks for both sides.\n}\n\ntype PawnCache [8192]PawnEntry\n\nfunc (e *Evaluation) analyzePawns() {\n\tkey := e.position.pawnHash\n\n\t\/\/ Since pawn hash is fairly small we can use much faster 32-bit index.\n\tindex := uint32(key) % uint32(len(game.pawnCache))\n\te.pawns = &game.pawnCache[index]\n\n\t\/\/ Bypass pawns cache if evaluation tracing is enabled.\n\tif e.pawns.hash != key || engine.trace {\n\t\twhite, black := e.pawnStructure(White), e.pawnStructure(Black)\n\t\twhite.apply(weights[1]); black.apply(weights[1]) \/\/ <-- Pawn structure weight.\n\t\te.pawns.score.clear().add(white).subtract(black)\n\t\te.pawns.hash = key\n\n\t\t\/\/ Force full king shelter evaluation since any legit king square\n\t\t\/\/ will be viewed as if the king has moved.\n\t\te.pawns.king[White], e.pawns.king[Black] = -1, -1\n\n\t\tif engine.trace {\n\t\t\te.checkpoint(`Pawns`, Total{white, black})\n\t\t}\n\t}\n\n\te.score.add(e.pawns.score)\n}\n\nfunc (e *Evaluation) analyzePassers() {\n\tvar white, black Score\n\n\tif engine.trace {\n\t\tdefer func() {\n\t\t\te.checkpoint(`Passers`, Total{white, black})\n\t\t}()\n\t}\n\n\twhite, black = e.pawnPassers(White), e.pawnPassers(Black)\n\twhite.apply(weights[2]); black.apply(weights[2]) \/\/ <-- Passed pawns weight.\n\te.score.add(white).subtract(black)\n}\n\n\/\/ Calculates extra bonus and penalty based on pawn structure. Specifically,\n\/\/ a bonus is awarded for passed pawns, and penalty applied for isolated and\n\/\/ doubled pawns.\nfunc (e *Evaluation) pawnStructure(color int) (score Score) {\n\thisPawns := e.position.outposts[pawn(color)]\n\therPawns := e.position.outposts[pawn(color^1)]\n\te.pawns.passers[color] = 0\n\n\t\/\/ Encourage center pawn moves in the opening.\n\tpawns := hisPawns\n\tif e.material.phase > 255 && (pawns & maskCenter) == 0 {\n\t\tscore.midgame += bonusPawnCenter[(pawns & maskCenter).count()]\n\t}\n\n\tfor pawns != 0 {\n\t\tsquare := pawns.pop()\n\t\trow, col := Coordinate(square)\n\n\t\t\/\/ Penalty if the pawn is isolated, i.e. has no friendly pawns\n\t\t\/\/ on adjacent files. The penalty goes up if isolated pawn is\n\t\t\/\/ exposed on semi-open file.\n\t\tisolated := (maskIsolated[col] & hisPawns == 0)\n\t\texposed := (maskInFront[color][square] & herPawns == 0)\n\t\tif isolated {\n\t\t\tif !exposed {\n\t\t\t\tscore.subtract(penaltyIsolatedPawn[col])\n\t\t\t} else {\n\t\t\t\tscore.subtract(penaltyWeakIsolatedPawn[col])\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Penalty if the pawn is doubled, i.e. there is another friendly\n\t\t\/\/ pawn in front of us. The penalty goes up if doubled pawns are\n\t\t\/\/ isolated.\n\t\tdoubled := (maskInFront[color][square] & hisPawns != 0)\n\t\tif doubled {\n\t\t\tscore.subtract(penaltyDoubledPawn[col])\n\t\t}\n\n\t\t\/\/ Bonus if the pawn is supported by friendly pawn(s) on the same\n\t\t\/\/ or previous ranks.\n\t\tsupported := (maskIsolated[col] & (maskRank[row] | maskRank[row].pushed(color^1)) & hisPawns != 0)\n\t\tif supported {\n\t\t\tflip := Flip(color, square)\n\t\t\tscore.add(Score{bonusSupportedPawn[flip], bonusSupportedPawn[flip]})\n\t\t}\n\n\t\t\/\/ The pawn is passed if a) there are no enemy pawns in the same\n\t\t\/\/ and adjacent columns; and b) there are no same color pawns in\n\t\t\/\/ front of us.\n\t\tpassed := (maskPassed[color][square] & herPawns == 0 && !doubled)\n\t\tif passed {\n\t\t\te.pawns.passers[color] |= bit[square]\n\t\t}\n\n\t\t\/\/ Penalty if the pawn is backward.\n\t\tbackward := false\n\t\tif (!passed && !supported && !isolated) {\n\n\t\t\t\/\/ Backward pawn should not be attacking enemy pawns.\n\t\t\tif pawnMoves[color][square] & herPawns == 0 {\n\n\t\t\t\t\/\/ Backward pawn should not have friendly pawns behind.\n\t\t\t\tif maskPassed[color^1][square] & maskIsolated[col] & hisPawns == 0 {\n\n\t\t\t\t\t\/\/ Backward pawn should face enemy pawns on the next two ranks\n\t\t\t\t\t\/\/ preventing its advance.\n\t\t\t\t\tenemy := pawnMoves[color][square].pushed(color)\n\t\t\t\t\tif (enemy | enemy.pushed(color)) & herPawns != 0 {\n\t\t\t\t\t\tbackward = true\n\t\t\t\t\t\tif !exposed {\n\t\t\t\t\t\t\tscore.subtract(penaltyBackwardPawn[col])\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tscore.subtract(penaltyWeakBackwardPawn[col])\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ TODO: Bonus if the pawn has good chance to become a passed pawn.\n\t\tif exposed && supported && !passed && !backward {\n\t\t\this := maskPassed[color^1][square + eight[color]] & maskIsolated[col] & hisPawns\n\t\t\ther := maskPassed[color][square] & maskIsolated[col] & herPawns\n\t\t\tif his.count() >= her.count() {\n\t\t\t\tscore.add(bonusSemiPassedPawn[RelRow(square, color)])\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Penalty for blocked pawns.\n\tblocked := (hisPawns.pushed(color) & e.position.board).count()\n\tscore.subtract(pawnBlocked.times(blocked))\n\n\treturn\n}\n\nfunc (e *Evaluation) pawnPassers(color int) (score Score) {\n\tp := e.position\n\n\tpawns := e.pawns.passers[color]\n\tfor pawns != 0 {\n\t\tsquare := pawns.pop()\n\t\trow := RelRow(square, color)\n\t\tbonus := bonusPassedPawn[row]\n\n\t\tif row > A2H2 {\n\t\t\textra := extraPassedPawn[row]\n\t\t\tnextSquare := square + eight[color]\n\n\t\t\t\/\/ Adjust endgame bonus based on how close the kings are from the\n\t\t\t\/\/ step forward square.\n\t\t\tbonus.endgame += (distance[p.king[color^1]][nextSquare] * 5 - distance[p.king[color]][nextSquare] * 2) * extra\n\n\t\t\t\/\/ Check if the pawn can step forward.\n\t\t\tif p.board.isClear(nextSquare) {\n\t\t\t\tboost := 0\n\n\t\t\t\t\/\/ Assume all squares in front of the pawn are under attack.\n\t\t\t\tattacked := maskInFront[color][square]\n\t\t\t\tprotected := attacked & e.attacks[color]\n\n\t\t\t\t\/\/ Boost the bonus if squares in front of the pawn are protected.\n\t\t\t\tif protected == attacked {\n\t\t\t\t\tboost += 6 \/\/ All squares.\n\t\t\t\t} else if protected.isSet(nextSquare) {\n\t\t\t\t\tboost += 4 \/\/ Next square only.\n\t\t\t\t}\n\n\t\t\t\t\/\/ Check who is attacking the squares in front of the pawn including\n\t\t\t\t\/\/ queen and rook x-ray attacks from behind.\n\t\t\t\tenemy := maskInFront[color^1][square] & (p.outposts[queen(color^1)] | p.outposts[rook(color^1)])\n\t\t\t\tif enemy == 0 || enemy & p.rookMoves(square) == 0 {\n\n\t\t\t\t\t\/\/ Since nobody attacks the pawn from behind adjust the attacked\n\t\t\t\t\t\/\/ bitmask to only include squares attacked or occupied by the enemy.\n\t\t\t\t\tattacked &= (e.attacks[color^1] | p.outposts[color^1])\n\t\t\t\t}\n\n\t\t\t\t\/\/ Boost the bonus if passed pawn is free to advance to the 8th rank\n\t\t\t\t\/\/ or at least safely step forward.\n\t\t\t\tif attacked == 0 {\n\t\t\t\t\tboost += 15 \/\/ Remaining squares are not under attack.\n\t\t\t\t} else if attacked.isClear(nextSquare) {\n\t\t\t\t\tboost += 9 \/\/ Next square is not under attack.\n\t\t\t\t}\n\n\t\t\t\tif boost > 0 {\n\t\t\t\t\tbonus.adjust(extra * boost)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tscore.add(bonus)\n\t}\n\n\treturn\n}\n\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package sqlboiler has types and methods useful for generating code that\n\/\/ acts as a fully dynamic ORM might.\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/vattle\/sqlboiler\/bdb\"\n\t\"github.com\/vattle\/sqlboiler\/bdb\/drivers\"\n\t\"github.com\/vattle\/sqlboiler\/boil\"\n)\n\nconst (\n\ttemplatesDirectory = \"templates\"\n\ttemplatesSingletonDirectory = \"templates\/singleton\"\n\n\ttemplatesTestDirectory = \"templates_test\"\n\ttemplatesSingletonTestDirectory = \"templates_test\/singleton\"\n\n\ttemplatesTestMainDirectory = \"templates_test\/main_test\"\n)\n\n\/\/ State holds the global data needed by most pieces to run\ntype State struct {\n\tConfig *Config\n\n\tDriver bdb.Interface\n\tTables []bdb.Table\n\tDialect boil.Dialect\n\n\tTemplates *templateList\n\tTestTemplates *templateList\n\tSingletonTemplates *templateList\n\tSingletonTestTemplates *templateList\n\n\tTestMainTemplate *template.Template\n}\n\n\/\/ New creates a new state based off of the config\nfunc New(config *Config) (*State, error) {\n\ts := &State{\n\t\tConfig: config,\n\t}\n\n\terr := s.initDriver(config.DriverName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Connect to the driver database\n\tif err = s.Driver.Open(); err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to connect to the database\")\n\t}\n\n\terr = s.initTables(config.Schema, config.WhitelistTables, config.BlacklistTables)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to initialize tables\")\n\t}\n\n\tif s.Config.Debug {\n\t\tb, err := json.Marshal(s.Tables)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"unable to json marshal tables\")\n\t\t}\n\t\tboil.DebugWriter.Write(b)\n\t\tfmt.Fprintln(boil.DebugWriter)\n\t}\n\n\terr = s.initOutFolder()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to initialize the output folder\")\n\t}\n\n\terr = s.initTemplates()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to initialize templates\")\n\t}\n\n\terr = s.initTags(config.Tags)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to initialize struct tags\")\n\t}\n\n\treturn s, nil\n}\n\n\/\/ Run executes the sqlboiler templates and outputs them to files based on the\n\/\/ state given.\nfunc (s *State) Run(includeTests bool) error {\n\tsingletonData := &templateData{\n\t\tTables: s.Tables,\n\t\tSchema: s.Config.Schema,\n\t\tDriverName: s.Config.DriverName,\n\t\tUseLastInsertID: s.Driver.UseLastInsertID(),\n\t\tPkgName: s.Config.PkgName,\n\t\tNoHooks: s.Config.NoHooks,\n\t\tNoAutoTimestamps: s.Config.NoAutoTimestamps,\n\t\tDialect: s.Dialect,\n\n\t\tStringFuncs: templateStringMappers,\n\t}\n\n\tif err := generateSingletonOutput(s, singletonData); err != nil {\n\t\treturn errors.Wrap(err, \"singleton template output\")\n\t}\n\n\tif !s.Config.NoTests && includeTests {\n\t\tif err := generateTestMainOutput(s, singletonData); err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to generate TestMain output\")\n\t\t}\n\n\t\tif err := generateSingletonTestOutput(s, singletonData); err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to generate singleton test template output\")\n\t\t}\n\t}\n\n\tfor _, table := range s.Tables {\n\t\tif table.IsJoinTable {\n\t\t\tcontinue\n\t\t}\n\n\t\tdata := &templateData{\n\t\t\tTables: s.Tables,\n\t\t\tTable: table,\n\t\t\tSchema: s.Config.Schema,\n\t\t\tDriverName: s.Config.DriverName,\n\t\t\tUseLastInsertID: s.Driver.UseLastInsertID(),\n\t\t\tPkgName: s.Config.PkgName,\n\t\t\tNoHooks: s.Config.NoHooks,\n\t\t\tNoAutoTimestamps: s.Config.NoAutoTimestamps,\n\t\t\tTags: s.Config.Tags,\n\t\t\tDialect: s.Dialect,\n\n\t\t\tStringFuncs: templateStringMappers,\n\t\t}\n\n\t\t\/\/ Generate the regular templates\n\t\tif err := generateOutput(s, data); err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to generate output\")\n\t\t}\n\n\t\t\/\/ Generate the test templates\n\t\tif !s.Config.NoTests && includeTests {\n\t\t\tif err := generateTestOutput(s, data); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"unable to generate test output\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Cleanup closes any resources that must be closed\nfunc (s *State) Cleanup() error {\n\ts.Driver.Close()\n\treturn nil\n}\n\n\/\/ initTemplates loads all template folders into the state object.\nfunc (s *State) initTemplates() error {\n\tvar err error\n\n\tbasePath, err := getBasePath(s.Config.BaseDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.Templates, err = loadTemplates(filepath.Join(basePath, templatesDirectory))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.SingletonTemplates, err = loadTemplates(filepath.Join(basePath, templatesSingletonDirectory))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !s.Config.NoTests {\n\t\ts.TestTemplates, err = loadTemplates(filepath.Join(basePath, templatesTestDirectory))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ts.SingletonTestTemplates, err = loadTemplates(filepath.Join(basePath, templatesSingletonTestDirectory))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ts.TestMainTemplate, err = loadTemplate(filepath.Join(basePath, templatesTestMainDirectory), s.Config.DriverName+\"_main.tpl\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nvar basePackage = \"github.com\/vattle\/sqlboiler\"\n\nfunc getBasePath(baseDirConfig string) (string, error) {\n\tif len(baseDirConfig) > 0 {\n\t\treturn baseDirConfig, nil\n\t}\n\n\tp, _ := build.Default.Import(basePackage, \"\", build.FindOnly)\n\tif p != nil && len(p.Dir) > 0 {\n\t\treturn p.Dir, nil\n\t}\n\n\treturn os.Getwd()\n}\n\n\/\/ initDriver attempts to set the state Interface based off the passed in\n\/\/ driver flag value. If an invalid flag string is provided an error is returned.\nfunc (s *State) initDriver(driverName string) error {\n\t\/\/ Create a driver based off driver flag\n\tswitch driverName {\n\tcase \"postgres\":\n\t\ts.Driver = drivers.NewPostgresDriver(\n\t\t\ts.Config.Postgres.User,\n\t\t\ts.Config.Postgres.Pass,\n\t\t\ts.Config.Postgres.DBName,\n\t\t\ts.Config.Postgres.Host,\n\t\t\ts.Config.Postgres.Port,\n\t\t\ts.Config.Postgres.SSLMode,\n\t\t)\n\tcase \"mysql\":\n\t\ts.Driver = drivers.NewMySQLDriver(\n\t\t\ts.Config.MySQL.User,\n\t\t\ts.Config.MySQL.Pass,\n\t\t\ts.Config.MySQL.DBName,\n\t\t\ts.Config.MySQL.Host,\n\t\t\ts.Config.MySQL.Port,\n\t\t\ts.Config.MySQL.SSLMode,\n\t\t)\n\tcase \"mock\":\n\t\ts.Driver = &drivers.MockDriver{}\n\t}\n\n\tif s.Driver == nil {\n\t\treturn errors.New(\"An invalid driver name was provided\")\n\t}\n\n\ts.Dialect.LQ = s.Driver.LeftQuote()\n\ts.Dialect.RQ = s.Driver.RightQuote()\n\ts.Dialect.IndexPlaceholders = s.Driver.IndexPlaceholders()\n\n\treturn nil\n}\n\n\/\/ initTables retrieves all \"public\" schema table names from the database.\nfunc (s *State) initTables(schema string, whitelist, blacklist []string) error {\n\tvar err error\n\ts.Tables, err = bdb.Tables(s.Driver, schema, whitelist, blacklist)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to fetch table data\")\n\t}\n\n\tif len(s.Tables) == 0 {\n\t\treturn errors.New(\"no tables found in database\")\n\t}\n\n\tif err := checkPKeys(s.Tables); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Tags must be in a format like: json, xml, etc.\nvar rgxValidTag = regexp.MustCompile(`[a-zA-Z_\\.]+`)\n\n\/\/ initTags removes duplicate tags and validates the format\n\/\/ of all user tags are simple strings without quotes: [a-zA-Z_\\.]+\nfunc (s *State) initTags(tags []string) error {\n\ts.Config.Tags = removeDuplicates(s.Config.Tags)\n\tfor _, v := range s.Config.Tags {\n\t\tif !rgxValidTag.MatchString(v) {\n\t\t\treturn errors.New(\"Invalid tag format %q supplied, only specify name, eg: xml\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ initOutFolder creates the folder that will hold the generated output.\nfunc (s *State) initOutFolder() error {\n\treturn os.MkdirAll(s.Config.OutFolder, os.ModePerm)\n}\n\n\/\/ checkPKeys ensures every table has a primary key column\nfunc checkPKeys(tables []bdb.Table) error {\n\tvar missingPkey []string\n\tfor _, t := range tables {\n\t\tif t.PKey == nil {\n\t\t\tmissingPkey = append(missingPkey, t.Name)\n\t\t}\n\t}\n\n\tif len(missingPkey) != 0 {\n\t\treturn errors.Errorf(\"primary key missing in tables (%s)\", strings.Join(missingPkey, \", \"))\n\t}\n\n\treturn nil\n}\n<commit_msg>Fix bug in debug output<commit_after>\/\/ Package sqlboiler has types and methods useful for generating code that\n\/\/ acts as a fully dynamic ORM might.\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/vattle\/sqlboiler\/bdb\"\n\t\"github.com\/vattle\/sqlboiler\/bdb\/drivers\"\n\t\"github.com\/vattle\/sqlboiler\/boil\"\n)\n\nconst (\n\ttemplatesDirectory = \"templates\"\n\ttemplatesSingletonDirectory = \"templates\/singleton\"\n\n\ttemplatesTestDirectory = \"templates_test\"\n\ttemplatesSingletonTestDirectory = \"templates_test\/singleton\"\n\n\ttemplatesTestMainDirectory = \"templates_test\/main_test\"\n)\n\n\/\/ State holds the global data needed by most pieces to run\ntype State struct {\n\tConfig *Config\n\n\tDriver bdb.Interface\n\tTables []bdb.Table\n\tDialect boil.Dialect\n\n\tTemplates *templateList\n\tTestTemplates *templateList\n\tSingletonTemplates *templateList\n\tSingletonTestTemplates *templateList\n\n\tTestMainTemplate *template.Template\n}\n\n\/\/ New creates a new state based off of the config\nfunc New(config *Config) (*State, error) {\n\ts := &State{\n\t\tConfig: config,\n\t}\n\n\terr := s.initDriver(config.DriverName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Connect to the driver database\n\tif err = s.Driver.Open(); err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to connect to the database\")\n\t}\n\n\terr = s.initTables(config.Schema, config.WhitelistTables, config.BlacklistTables)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to initialize tables\")\n\t}\n\n\tif s.Config.Debug {\n\t\tb, err := json.Marshal(s.Tables)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"unable to json marshal tables\")\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", b)\n\t}\n\n\terr = s.initOutFolder()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to initialize the output folder\")\n\t}\n\n\terr = s.initTemplates()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to initialize templates\")\n\t}\n\n\terr = s.initTags(config.Tags)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to initialize struct tags\")\n\t}\n\n\treturn s, nil\n}\n\n\/\/ Run executes the sqlboiler templates and outputs them to files based on the\n\/\/ state given.\nfunc (s *State) Run(includeTests bool) error {\n\tsingletonData := &templateData{\n\t\tTables: s.Tables,\n\t\tSchema: s.Config.Schema,\n\t\tDriverName: s.Config.DriverName,\n\t\tUseLastInsertID: s.Driver.UseLastInsertID(),\n\t\tPkgName: s.Config.PkgName,\n\t\tNoHooks: s.Config.NoHooks,\n\t\tNoAutoTimestamps: s.Config.NoAutoTimestamps,\n\t\tDialect: s.Dialect,\n\n\t\tStringFuncs: templateStringMappers,\n\t}\n\n\tif err := generateSingletonOutput(s, singletonData); err != nil {\n\t\treturn errors.Wrap(err, \"singleton template output\")\n\t}\n\n\tif !s.Config.NoTests && includeTests {\n\t\tif err := generateTestMainOutput(s, singletonData); err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to generate TestMain output\")\n\t\t}\n\n\t\tif err := generateSingletonTestOutput(s, singletonData); err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to generate singleton test template output\")\n\t\t}\n\t}\n\n\tfor _, table := range s.Tables {\n\t\tif table.IsJoinTable {\n\t\t\tcontinue\n\t\t}\n\n\t\tdata := &templateData{\n\t\t\tTables: s.Tables,\n\t\t\tTable: table,\n\t\t\tSchema: s.Config.Schema,\n\t\t\tDriverName: s.Config.DriverName,\n\t\t\tUseLastInsertID: s.Driver.UseLastInsertID(),\n\t\t\tPkgName: s.Config.PkgName,\n\t\t\tNoHooks: s.Config.NoHooks,\n\t\t\tNoAutoTimestamps: s.Config.NoAutoTimestamps,\n\t\t\tTags: s.Config.Tags,\n\t\t\tDialect: s.Dialect,\n\n\t\t\tStringFuncs: templateStringMappers,\n\t\t}\n\n\t\t\/\/ Generate the regular templates\n\t\tif err := generateOutput(s, data); err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to generate output\")\n\t\t}\n\n\t\t\/\/ Generate the test templates\n\t\tif !s.Config.NoTests && includeTests {\n\t\t\tif err := generateTestOutput(s, data); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"unable to generate test output\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Cleanup closes any resources that must be closed\nfunc (s *State) Cleanup() error {\n\ts.Driver.Close()\n\treturn nil\n}\n\n\/\/ initTemplates loads all template folders into the state object.\nfunc (s *State) initTemplates() error {\n\tvar err error\n\n\tbasePath, err := getBasePath(s.Config.BaseDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.Templates, err = loadTemplates(filepath.Join(basePath, templatesDirectory))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.SingletonTemplates, err = loadTemplates(filepath.Join(basePath, templatesSingletonDirectory))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !s.Config.NoTests {\n\t\ts.TestTemplates, err = loadTemplates(filepath.Join(basePath, templatesTestDirectory))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ts.SingletonTestTemplates, err = loadTemplates(filepath.Join(basePath, templatesSingletonTestDirectory))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ts.TestMainTemplate, err = loadTemplate(filepath.Join(basePath, templatesTestMainDirectory), s.Config.DriverName+\"_main.tpl\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nvar basePackage = \"github.com\/vattle\/sqlboiler\"\n\nfunc getBasePath(baseDirConfig string) (string, error) {\n\tif len(baseDirConfig) > 0 {\n\t\treturn baseDirConfig, nil\n\t}\n\n\tp, _ := build.Default.Import(basePackage, \"\", build.FindOnly)\n\tif p != nil && len(p.Dir) > 0 {\n\t\treturn p.Dir, nil\n\t}\n\n\treturn os.Getwd()\n}\n\n\/\/ initDriver attempts to set the state Interface based off the passed in\n\/\/ driver flag value. If an invalid flag string is provided an error is returned.\nfunc (s *State) initDriver(driverName string) error {\n\t\/\/ Create a driver based off driver flag\n\tswitch driverName {\n\tcase \"postgres\":\n\t\ts.Driver = drivers.NewPostgresDriver(\n\t\t\ts.Config.Postgres.User,\n\t\t\ts.Config.Postgres.Pass,\n\t\t\ts.Config.Postgres.DBName,\n\t\t\ts.Config.Postgres.Host,\n\t\t\ts.Config.Postgres.Port,\n\t\t\ts.Config.Postgres.SSLMode,\n\t\t)\n\tcase \"mysql\":\n\t\ts.Driver = drivers.NewMySQLDriver(\n\t\t\ts.Config.MySQL.User,\n\t\t\ts.Config.MySQL.Pass,\n\t\t\ts.Config.MySQL.DBName,\n\t\t\ts.Config.MySQL.Host,\n\t\t\ts.Config.MySQL.Port,\n\t\t\ts.Config.MySQL.SSLMode,\n\t\t)\n\tcase \"mock\":\n\t\ts.Driver = &drivers.MockDriver{}\n\t}\n\n\tif s.Driver == nil {\n\t\treturn errors.New(\"An invalid driver name was provided\")\n\t}\n\n\ts.Dialect.LQ = s.Driver.LeftQuote()\n\ts.Dialect.RQ = s.Driver.RightQuote()\n\ts.Dialect.IndexPlaceholders = s.Driver.IndexPlaceholders()\n\n\treturn nil\n}\n\n\/\/ initTables retrieves all \"public\" schema table names from the database.\nfunc (s *State) initTables(schema string, whitelist, blacklist []string) error {\n\tvar err error\n\ts.Tables, err = bdb.Tables(s.Driver, schema, whitelist, blacklist)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to fetch table data\")\n\t}\n\n\tif len(s.Tables) == 0 {\n\t\treturn errors.New(\"no tables found in database\")\n\t}\n\n\tif err := checkPKeys(s.Tables); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Tags must be in a format like: json, xml, etc.\nvar rgxValidTag = regexp.MustCompile(`[a-zA-Z_\\.]+`)\n\n\/\/ initTags removes duplicate tags and validates the format\n\/\/ of all user tags are simple strings without quotes: [a-zA-Z_\\.]+\nfunc (s *State) initTags(tags []string) error {\n\ts.Config.Tags = removeDuplicates(s.Config.Tags)\n\tfor _, v := range s.Config.Tags {\n\t\tif !rgxValidTag.MatchString(v) {\n\t\t\treturn errors.New(\"Invalid tag format %q supplied, only specify name, eg: xml\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ initOutFolder creates the folder that will hold the generated output.\nfunc (s *State) initOutFolder() error {\n\treturn os.MkdirAll(s.Config.OutFolder, os.ModePerm)\n}\n\n\/\/ checkPKeys ensures every table has a primary key column\nfunc checkPKeys(tables []bdb.Table) error {\n\tvar missingPkey []string\n\tfor _, t := range tables {\n\t\tif t.PKey == nil {\n\t\t\tmissingPkey = append(missingPkey, t.Name)\n\t\t}\n\t}\n\n\tif len(missingPkey) != 0 {\n\t\treturn errors.Errorf(\"primary key missing in tables (%s)\", strings.Join(missingPkey, \", \"))\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package postgresql\n\nimport (\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/amonapp\/amonagent\/logging\"\n\t\"github.com\/amonapp\/amonagent\/plugins\"\n\t\"github.com\/mitchellh\/mapstructure\"\n\t\/\/ Postgres Driver\n\t_ \"github.com\/lib\/pq\"\n)\n\nvar pluginLogger = logging.GetLogger(\"amonagent.postgresql\")\n\n\/\/ Counters - XXX\nvar Counters = map[string]string{\n\t\"xact_commit\": \"xact.commits\",\n\t\"xact_rollback\": \"xact.rollbacks\",\n\t\"blks_read\": \"performance.disk_read\",\n\t\"blks_hit\": \"performance.buffer_hit\",\n\t\"tup_returned\": \"rows.returned\",\n\t\"tup_fetched\": \"rows.fetched\",\n\t\"tup_inserted\": \"rows.inserted\",\n\t\"tup_updated\": \"rows.updated\",\n\t\"tup_deleted\": \"rows.deleted\",\n}\n\n\/\/ Gauges - XXX\nvar Gauges = map[string]string{\n\t\"numbackends\": \"connections\",\n}\n\n\/\/ DatabaseStatsSQL - XXX\nvar DatabaseStatsSQL = `SELECT %s FROM pg_stat_database WHERE datname IN ('%s')`\n\n\/\/ MissingIndexesSQL - XXX\nvar MissingIndexesSQL = `SELECT\n\t\t relname AS table,\n\t\t CASE idx_scan\n\t\t\tWHEN 0 THEN 'Insufficient data'\n\t\t\tELSE (100 * idx_scan \/ (seq_scan + idx_scan))::text\n\t\t END percent_of_times_index_used,\n\t\t n_live_tup rows_in_table\n\t\tFROM\n\t\t pg_stat_user_tables\n\t\tWHERE\n\t\t idx_scan > 0\n\t\t AND (100 * idx_scan \/ (seq_scan + idx_scan)) < 95\n\t\t AND n_live_tup >= 10000\n\t\tORDER BY\n\t\t n_live_tup DESC,\n\t\t relname ASC\n`\n\n\/\/ SlowQueriesSQL - XXX\nvar SlowQueriesSQL = `SELECT * FROM\n\t\t(SELECT\n\t\t\t\tcalls,\n\t\t\t\tround(total_time :: NUMERIC, 1) AS total,\n\t\t\t\tround(\n\t\t\t\t\t(total_time \/ calls) :: NUMERIC,\n\t\t\t\t\t3\n\t\t\t\t) AS per_call,\n\t\t\t\tregexp_replace(query, '[ \\t\\n]+', ' ', 'g') AS query\n\t\tFROM\n\t\t\t\tpg_stat_statements\n\t\tWHERE\n\t\tcalls > 100\n\t\tORDER BY\n\t\t\t\ttotal_time \/ calls DESC\n\t\tLIMIT 15\n\t\t) AS inner_table\n\t\tWHERE\n\t\t per_call > 5\n`\n\n\/\/ https:\/\/gist.github.com\/mattsoldo\/3853455\nvar IndexCacheHitRateSQL = `\n\t\t-- Index hit rate\n\t\tWITH idx_hit_rate as (\n\t\tSELECT\n\t\t relname as table_name,\n\t\t n_live_tup,\n\t\t round(100.0 * idx_scan \/ (seq_scan + idx_scan + 0.000001),2) as idx_hit_rate\n\t\tFROM pg_stat_user_tables\n\t\tORDER BY n_live_tup DESC\n\t\t),\n\n\t\t-- Cache hit rate\n\t\tcache_hit_rate as (\n\t\tSELECT\n\t\t relname as table_name,\n\t\t heap_blks_read + heap_blks_hit as reads,\n\t\t round(100.0 * sum (heap_blks_read + heap_blks_hit) over (ORDER BY heap_blks_read + heap_blks_hit DESC) \/ sum(heap_blks_read + heap_blks_hit + 0.000001) over (),4) as cumulative_pct_reads,\n\t\t round(100.0 * heap_blks_hit \/ (heap_blks_hit + heap_blks_read + 0.000001),2) as cache_hit_rate\n\t\tFROM pg_statio_user_tables\n\t\tWHERE heap_blks_hit + heap_blks_read > 0\n\t\tORDER BY 2 DESC\n\t\t)\n\n\t\tSELECT\n\t\t idx_hit_rate.table_name,\n\t\t idx_hit_rate.n_live_tup as size,\n\t\t cache_hit_rate.reads,\n\t\t cache_hit_rate.cumulative_pct_reads,\n\t\t idx_hit_rate.idx_hit_rate,\n\t\t cache_hit_rate.cache_hit_rate\n\t\tFROM idx_hit_rate, cache_hit_rate\n\t\tWHERE idx_hit_rate.table_name = cache_hit_rate.table_name\n\t\t AND cumulative_pct_reads < 100.0\n\t\tORDER BY reads DESC;\n`\n\n\/\/ TableSizeSQL - XXX\nvar TableSizeSQL = `\n\t\tSELECT\n\t\t\tC .relname AS NAME,\n\t\t\tCASE\n\t\tWHEN C .relkind = 'r' THEN\n\t\t\t'table'\n\t\tELSE\n\t\t\t'index'\n\t\tEND AS TYPE,\n\t\t pg_table_size(C .oid) AS SIZE\n\t\tFROM\n\t\t\tpg_class C\n\t\tLEFT JOIN pg_namespace n ON (n.oid = C .relnamespace)\n\t\tWHERE\n\t\t\tn.nspname NOT IN (\n\t\t\t\t'pg_catalog',\n\t\t\t\t'information_schema'\n\t\t\t)\n\t\tAND n.nspname !~ '^pg_toast'\n\t\tAND C .relkind IN ('r', 'i')\n\t\tORDER BY\n\t\t\tpg_table_size (C .oid) DESC,\n\t\t\tNAME ASC\n`\n\n\/\/ IndexHitRateData - XXX\ntype IndexHitRateData struct {\n\tHeaders []string `json:\"headers\"`\n\tData []interface{} `json:\"data\"`\n}\n\n\/\/ TableSizeData - XXX\ntype TableSizeData struct {\n\tHeaders []string `json:\"headers\"`\n\tData []interface{} `json:\"data\"`\n}\n\n\/\/ SlowQueriesData - XXX\ntype SlowQueriesData struct {\n\tHeaders []string `json:\"headers\"`\n\tData []interface{} `json:\"data\"`\n}\n\nfunc (p PerformanceStruct) String() string {\n\ts, _ := json.Marshal(p)\n\treturn string(s)\n}\n\n\/\/ Config - XXX\ntype Config struct {\n\tHost string\n\tDB string\n}\n\nvar sampleConfig = `\n# Available config options:\n#\n# {\"host\": \"postgres:\/\/user:password@localhost:port\/dbname\"}\n#\n# Config location: \/etc\/opt\/amonagent\/plugins-enabled\/postgresql.conf\n`\n\n\/\/ SampleConfig - XXX\nfunc (p *PostgreSQL) SampleConfig() string {\n\treturn sampleConfig\n}\n\n\/\/ SetConfigDefaults - XXX\nfunc (p *PostgreSQL) SetConfigDefaults(configPath string) error {\n\tc, err := plugins.ReadConfigPath(configPath)\n\tif err != nil {\n\t\tfmt.Printf(\"Can't read config file: %s %v\\n\", configPath, err)\n\t}\n\tvar config Config\n\tdecodeError := mapstructure.Decode(c, &config)\n\tif decodeError != nil {\n\t\tfmt.Print(\"Can't decode config file\", decodeError.Error())\n\t}\n\n\tu, _ := url.Parse(config.Host)\n\tconfig.DB = strings.Trim(u.Path, \"\/\")\n\n\tp.Config = config\n\n\treturn nil\n}\n\n\/\/ PostgreSQL - XXX\ntype PostgreSQL struct {\n\tConfig Config\n}\n\n\/\/ PerformanceStruct - XXX\ntype PerformanceStruct struct {\n\tTableSizeData `json:\"tables_size\"`\n\tIndexHitRateData `json:\"index_hit_rate\"`\n\tSlowQueriesData `json:\"slow_queries\"`\n\tGauges map[string]interface{} `json:\"gauges\"`\n\tCounters map[string]interface{} `json:\"counters\"`\n}\n\n\/\/ Description - XXX\nfunc (p *PostgreSQL) Description() string {\n\treturn \"Read metrics from a PostgreSQL server\"\n}\n\n\/\/ Collect - XXX\nfunc (p *PostgreSQL) Collect(configPath string) (interface{}, error) {\n\tPerformanceStruct := PerformanceStruct{}\n\tp.SetConfigDefaults(configPath)\n\n\tdb, err := sql.Open(\"postgres\", p.Config.Host)\n\tif err != nil {\n\t\tpluginLogger.Errorf(\"Can't connect to database': %v\", err)\n\t\treturn PerformanceStruct, err\n\t}\n\n\tdefer db.Close()\n\n\trawResult := make([][]byte, len(Counters))\n\tcounters := make(map[string]interface{})\n\tdest := make([]interface{}, len(Counters)) \/\/ A temporary interface{} slice\n\tfor i := range rawResult {\n\t\tdest[i] = &rawResult[i] \/\/ Put pointers to each string in the interface slice\n\t}\n\tvar counterColumns []string\n\tfor key := range Counters {\n\t\tcounterColumns = append(counterColumns, key)\n\t}\n\tCountersQuery := fmt.Sprintf(DatabaseStatsSQL, strings.Join(counterColumns, \", \"), p.Config.DB)\n\tCounterRows, errCounterRows := db.Query(CountersQuery)\n\tif errCounterRows == nil {\n\n\t\tfor CounterRows.Next() {\n\t\t\terr = CounterRows.Scan(dest...)\n\t\t\tif err != nil {\n\t\t\t\tpluginLogger.Errorf(\"Can't get Counter stats': %v\", err)\n\n\t\t\t}\n\n\t\t\tfor i, val := range rawResult {\n\t\t\t\tkey := counterColumns[i]\n\t\t\t\tcounters[key] = string(val)\n\t\t\t}\n\t\t}\n\t}\n\tdefer CounterRows.Close()\n\n\tgauges := make(map[string]interface{})\n\tGaugesQuery := fmt.Sprintf(DatabaseStatsSQL, \"numbackends\", \"amon\")\n\tGaugeRows, errGaugeRows := db.Query(GaugesQuery)\n\tdefer GaugeRows.Close()\n\tif errGaugeRows == nil {\n\n\t\tfor GaugeRows.Next() {\n\t\t\tvar connections int\n\t\t\terr = GaugeRows.Scan(&connections)\n\t\t\tif err != nil {\n\t\t\t\tpluginLogger.Errorf(\"Can't get Gauges': %v\", err)\n\n\t\t\t}\n\t\t\tgauges[\"connections\"] = connections\n\n\t\t}\n\n\t}\n\n\tTableSizeRows, errTableSizeRows := db.Query(TableSizeSQL)\n\tTableSizeHeaders := []string{\"name\", \"type\", \"size\"}\n\tTableSizeData := TableSizeData{Headers: TableSizeHeaders}\n\n\tdefer TableSizeRows.Close()\n\tif errTableSizeRows == nil {\n\n\t\tfor TableSizeRows.Next() {\n\t\t\t\/\/ TABLES_SIZE_ROWS = ['name','type','size']\n\t\t\tvar name string\n\t\t\tvar Type string\n\t\t\tvar size int64\n\n\t\t\terr = TableSizeRows.Scan(&name, &Type, &size)\n\t\t\tif err != nil {\n\t\t\t\tpluginLogger.Errorf(\"Can't get Table size rows': %v\", err)\n\t\t\t}\n\t\t\tfields := []interface{}{}\n\t\t\tfields = append(fields, name)\n\t\t\tfields = append(fields, Type)\n\t\t\tfields = append(fields, size)\n\n\t\t\tTableSizeData.Data = append(TableSizeData.Data, fields)\n\n\t\t}\n\n\t}\n\n\tPerformanceStruct.TableSizeData = TableSizeData\n\n\tIndexHitRows, errIndexHitRows := db.Query(IndexCacheHitRateSQL)\n\tIndexHitHeaders := []string{\"table_name\", \"size\", \"reads\",\n\t\t\"cumulative_pct_reads\", \"index_hit_rate\", \"cache_hit_rate\"}\n\tIndexHitData := IndexHitRateData{Headers: IndexHitHeaders}\n\tdefer IndexHitRows.Close()\n\n\tif errIndexHitRows == nil {\n\n\t\tfor IndexHitRows.Next() {\n\t\t\t\/\/ INDEX_CACHE_HIT_RATE_ROWS = ['table_name','size','reads',\n\t\t\t\/\/ 'cumulative_pct_reads', 'index_hit_rate', 'cache_hit_rate']\n\n\t\t\tvar TableName string\n\t\t\tvar Size string\n\t\t\tvar Read int64\n\t\t\tvar CumulativeReads float64\n\t\t\tvar IndexHitRate float64\n\t\t\tvar CacheHitRate float64\n\n\t\t\terr = IndexHitRows.Scan(&TableName, &Size, &Read, &CumulativeReads, &IndexHitRate, &CacheHitRate)\n\t\t\tif err != nil {\n\t\t\t\tpluginLogger.Errorf(\"Can't get Index Hit Rate tables': %v\", err)\n\n\t\t\t}\n\t\t\tfields := []interface{}{}\n\t\t\tfields = append(fields, TableName)\n\t\t\tfields = append(fields, Size)\n\t\t\tfields = append(fields, Read)\n\t\t\tfields = append(fields, CumulativeReads)\n\t\t\tfields = append(fields, IndexHitRate)\n\t\t\tfields = append(fields, CacheHitRate)\n\n\t\t\tIndexHitData.Data = append(IndexHitData.Data, fields)\n\n\t\t}\n\t}\n\n\tPerformanceStruct.IndexHitRateData = IndexHitData\n\n\tSlowQueriesRows, errSlowQueriesRows := db.Query(SlowQueriesSQL)\n\tSlowQueriesHeaders := []string{\"calls\", \"total\", \"per_call\", \"query\"}\n\tSlowQueriesData := SlowQueriesData{Headers: SlowQueriesHeaders}\n\n\tdefer SlowQueriesRows.Close()\n\n\tif errSlowQueriesRows == nil {\n\t\tfor SlowQueriesRows.Next() {\n\n\t\t\tvar Calls int64\n\t\t\tvar Total float64\n\t\t\tvar PerCall float64\n\t\t\tvar Query string\n\n\t\t\terr = SlowQueriesRows.Scan(&Calls, &Total, &PerCall, &Query)\n\t\t\tif err != nil {\n\t\t\t\tpluginLogger.Errorf(\"Can't get Slow Queries': %v\", err)\n\t\t\t}\n\t\t\tfields := []interface{}{}\n\t\t\tfields = append(fields, Calls)\n\t\t\tfields = append(fields, Total)\n\t\t\tfields = append(fields, PerCall)\n\t\t\tfields = append(fields, Query)\n\n\t\t\tSlowQueriesData.Data = append(SlowQueriesData.Data, fields)\n\n\t\t}\n\t}\n\n\tPerformanceStruct.SlowQueriesData = SlowQueriesData\n\n\tPerformanceStruct.Counters = counters\n\tPerformanceStruct.Gauges = gauges\n\n\treturn PerformanceStruct, nil\n}\n\nfunc init() {\n\tplugins.Add(\"postgresql\", func() plugins.Plugin {\n\t\treturn &PostgreSQL{}\n\t})\n}\n<commit_msg>Update postgres plugin<commit_after>package postgresql\n\nimport (\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/amonapp\/amonagent\/logging\"\n\t\"github.com\/amonapp\/amonagent\/plugins\"\n\t\"github.com\/mitchellh\/mapstructure\"\n\t\/\/ Postgres Driver\n\t_ \"github.com\/lib\/pq\"\n)\n\nvar pluginLogger = logging.GetLogger(\"amonagent.postgresql\")\n\n\/\/ Counters - XXX\nvar Counters = map[string]string{\n\t\"xact_commit\": \"xact.commits\",\n\t\"xact_rollback\": \"xact.rollbacks\",\n\t\"blks_read\": \"performance.disk_read\",\n\t\"blks_hit\": \"performance.buffer_hit\",\n\t\"tup_returned\": \"rows.returned\",\n\t\"tup_fetched\": \"rows.fetched\",\n\t\"tup_inserted\": \"rows.inserted\",\n\t\"tup_updated\": \"rows.updated\",\n\t\"tup_deleted\": \"rows.deleted\",\n}\n\n\/\/ Gauges - XXX\nvar Gauges = map[string]string{\n\t\"numbackends\": \"connections\",\n}\n\n\/\/ DatabaseStatsSQL - XXX\nvar DatabaseStatsSQL = `SELECT %s FROM pg_stat_database WHERE datname IN ('%s')`\n\n\/\/ MissingIndexesSQL - XXX\nvar MissingIndexesSQL = `SELECT\n\t\t relname AS table,\n\t\t CASE idx_scan\n\t\t\tWHEN 0 THEN 'Insufficient data'\n\t\t\tELSE (100 * idx_scan \/ (seq_scan + idx_scan))::text\n\t\t END percent_of_times_index_used,\n\t\t n_live_tup rows_in_table\n\t\tFROM\n\t\t pg_stat_user_tables\n\t\tWHERE\n\t\t idx_scan > 0\n\t\t AND (100 * idx_scan \/ (seq_scan + idx_scan)) < 95\n\t\t AND n_live_tup >= 10000\n\t\tORDER BY\n\t\t n_live_tup DESC,\n\t\t relname ASC\n`\n\n\/\/ SlowQueriesSQL - XXX\nvar SlowQueriesSQL = `SELECT * FROM\n\t\t(SELECT\n\t\t\t\tcalls,\n\t\t\t\tround(total_time :: NUMERIC, 1) AS total,\n\t\t\t\tround(\n\t\t\t\t\t(total_time \/ calls) :: NUMERIC,\n\t\t\t\t\t3\n\t\t\t\t) AS per_call,\n\t\t\t\tregexp_replace(query, '[ \\t\\n]+', ' ', 'g') AS query\n\t\tFROM\n\t\t\t\tpg_stat_statements\n\t\tWHERE\n\t\tcalls > 100\n\t\tORDER BY\n\t\t\t\ttotal_time \/ calls DESC\n\t\tLIMIT 15\n\t\t) AS inner_table\n\t\tWHERE\n\t\t per_call > 5\n`\n\n\/\/ https:\/\/gist.github.com\/mattsoldo\/3853455\nvar IndexCacheHitRateSQL = `\n\t\t-- Index hit rate\n\t\tWITH idx_hit_rate as (\n\t\tSELECT\n\t\t relname as table_name,\n\t\t n_live_tup,\n\t\t round(100.0 * idx_scan \/ (seq_scan + idx_scan + 0.000001),2) as idx_hit_rate\n\t\tFROM pg_stat_user_tables\n\t\tORDER BY n_live_tup DESC\n\t\t),\n\n\t\t-- Cache hit rate\n\t\tcache_hit_rate as (\n\t\tSELECT\n\t\t relname as table_name,\n\t\t heap_blks_read + heap_blks_hit as reads,\n\t\t round(100.0 * sum (heap_blks_read + heap_blks_hit) over (ORDER BY heap_blks_read + heap_blks_hit DESC) \/ sum(heap_blks_read + heap_blks_hit + 0.000001) over (),4) as cumulative_pct_reads,\n\t\t round(100.0 * heap_blks_hit \/ (heap_blks_hit + heap_blks_read + 0.000001),2) as cache_hit_rate\n\t\tFROM pg_statio_user_tables\n\t\tWHERE heap_blks_hit + heap_blks_read > 0\n\t\tORDER BY 2 DESC\n\t\t)\n\n\t\tSELECT\n\t\t idx_hit_rate.table_name,\n\t\t idx_hit_rate.n_live_tup as size,\n\t\t cache_hit_rate.reads,\n\t\t cache_hit_rate.cumulative_pct_reads,\n\t\t idx_hit_rate.idx_hit_rate,\n\t\t cache_hit_rate.cache_hit_rate\n\t\tFROM idx_hit_rate, cache_hit_rate\n\t\tWHERE idx_hit_rate.table_name = cache_hit_rate.table_name\n\t\t AND cumulative_pct_reads < 100.0\n\t\tORDER BY reads DESC;\n`\n\n\/\/ TableSizeSQL - XXX\nvar TableSizeSQL = `\n\t\tSELECT\n\t\t\tC .relname AS NAME,\n\t\t\tCASE\n\t\tWHEN C .relkind = 'r' THEN\n\t\t\t'table'\n\t\tELSE\n\t\t\t'index'\n\t\tEND AS TYPE,\n\t\t pg_table_size(C .oid) AS SIZE\n\t\tFROM\n\t\t\tpg_class C\n\t\tLEFT JOIN pg_namespace n ON (n.oid = C .relnamespace)\n\t\tWHERE\n\t\t\tn.nspname NOT IN (\n\t\t\t\t'pg_catalog',\n\t\t\t\t'information_schema'\n\t\t\t)\n\t\tAND n.nspname !~ '^pg_toast'\n\t\tAND C .relkind IN ('r', 'i')\n\t\tORDER BY\n\t\t\tpg_table_size (C .oid) DESC,\n\t\t\tNAME ASC\n`\n\n\/\/ IndexHitRateData - XXX\ntype IndexHitRateData struct {\n\tHeaders []string `json:\"headers\"`\n\tData []interface{} `json:\"data\"`\n}\n\n\/\/ TableSizeData - XXX\ntype TableSizeData struct {\n\tHeaders []string `json:\"headers\"`\n\tData []interface{} `json:\"data\"`\n}\n\n\/\/ SlowQueriesData - XXX\ntype SlowQueriesData struct {\n\tHeaders []string `json:\"headers\"`\n\tData []interface{} `json:\"data\"`\n}\n\nfunc (p PerformanceStruct) String() string {\n\ts, _ := json.Marshal(p)\n\treturn string(s)\n}\n\n\/\/ Config - XXX\ntype Config struct {\n\tHost string\n\tDB string\n}\n\nvar sampleConfig = `\n# Available config options:\n#\n# {\"host\": \"postgres:\/\/user:password@localhost:port\/dbname\"}\n#\n# Config location: \/etc\/opt\/amonagent\/plugins-enabled\/postgresql.conf\n`\n\n\/\/ SampleConfig - XXX\nfunc (p *PostgreSQL) SampleConfig() string {\n\treturn sampleConfig\n}\n\n\/\/ SetConfigDefaults - XXX\nfunc (p *PostgreSQL) SetConfigDefaults(configPath string) error {\n\tc, err := plugins.ReadConfigPath(configPath)\n\tif err != nil {\n\t\tfmt.Printf(\"Can't read config file: %s %v\\n\", configPath, err)\n\t}\n\tvar config Config\n\tdecodeError := mapstructure.Decode(c, &config)\n\tif decodeError != nil {\n\t\tfmt.Print(\"Can't decode config file\", decodeError.Error())\n\t}\n\n\tu, _ := url.Parse(config.Host)\n\tconfig.DB = strings.Trim(u.Path, \"\/\")\n\n\tp.Config = config\n\n\treturn nil\n}\n\n\/\/ PostgreSQL - XXX\ntype PostgreSQL struct {\n\tConfig Config\n}\n\n\/\/ PerformanceStruct - XXX\ntype PerformanceStruct struct {\n\tTableSizeData `json:\"tables_size\"`\n\tIndexHitRateData `json:\"index_hit_rate\"`\n\tSlowQueriesData `json:\"slow_queries\"`\n\tGauges map[string]interface{} `json:\"gauges\"`\n\tCounters map[string]interface{} `json:\"counters\"`\n}\n\n\/\/ Description - XXX\nfunc (p *PostgreSQL) Description() string {\n\treturn \"Read metrics from a PostgreSQL server\"\n}\n\n\/\/ Collect - XXX\nfunc (p *PostgreSQL) Collect(configPath string) (interface{}, error) {\n\tPerformanceStruct := PerformanceStruct{}\n\tp.SetConfigDefaults(configPath)\n\n\tdb, err := sql.Open(\"postgres\", p.Config.Host)\n\tif err != nil {\n\t\tpluginLogger.Errorf(\"Can't connect to database': %v\", err)\n\t\treturn PerformanceStruct, err\n\t}\n\n\tdefer db.Close()\n\n\trawResult := make([][]byte, len(Counters))\n\tcounters := make(map[string]interface{})\n\tdest := make([]interface{}, len(Counters)) \/\/ A temporary interface{} slice\n\tfor i := range rawResult {\n\t\tdest[i] = &rawResult[i] \/\/ Put pointers to each string in the interface slice\n\t}\n\tvar counterColumns []string\n\tfor key := range Counters {\n\t\tcounterColumns = append(counterColumns, key)\n\t}\n\tCountersQuery := fmt.Sprintf(DatabaseStatsSQL, strings.Join(counterColumns, \", \"), p.Config.DB)\n\tCounterRows, errCounterRows := db.Query(CountersQuery)\n\tif errCounterRows == nil {\n\t\tdefer CounterRows.Close()\n\n\t\tfor CounterRows.Next() {\n\t\t\terr = CounterRows.Scan(dest...)\n\t\t\tif err != nil {\n\t\t\t\tpluginLogger.Errorf(\"Can't get Counter stats': %v\", err)\n\n\t\t\t}\n\n\t\t\tfor i, val := range rawResult {\n\t\t\t\tkey := counterColumns[i]\n\t\t\t\tcounters[key] = string(val)\n\t\t\t}\n\t\t}\n\t}\n\n\tgauges := make(map[string]interface{})\n\tGaugesQuery := fmt.Sprintf(DatabaseStatsSQL, \"numbackends\", \"amon\")\n\tGaugeRows, errGaugeRows := db.Query(GaugesQuery)\n\n\tif errGaugeRows == nil {\n\t\tdefer GaugeRows.Close()\n\n\t\tfor GaugeRows.Next() {\n\t\t\tvar connections int\n\t\t\terr = GaugeRows.Scan(&connections)\n\t\t\tif err != nil {\n\t\t\t\tpluginLogger.Errorf(\"Can't get Gauges': %v\", err)\n\n\t\t\t}\n\t\t\tgauges[\"connections\"] = connections\n\n\t\t}\n\n\t}\n\n\tTableSizeRows, errTableSizeRows := db.Query(TableSizeSQL)\n\tTableSizeHeaders := []string{\"name\", \"type\", \"size\"}\n\tTableSizeData := TableSizeData{Headers: TableSizeHeaders}\n\n\tif errTableSizeRows == nil {\n\t\tdefer TableSizeRows.Close()\n\n\t\tfor TableSizeRows.Next() {\n\t\t\t\/\/ TABLES_SIZE_ROWS = ['name','type','size']\n\t\t\tvar name string\n\t\t\tvar Type string\n\t\t\tvar size int64\n\n\t\t\terr = TableSizeRows.Scan(&name, &Type, &size)\n\t\t\tif err != nil {\n\t\t\t\tpluginLogger.Errorf(\"Can't get Table size rows': %v\", err)\n\t\t\t}\n\t\t\tfields := []interface{}{}\n\t\t\tfields = append(fields, name)\n\t\t\tfields = append(fields, Type)\n\t\t\tfields = append(fields, size)\n\n\t\t\tTableSizeData.Data = append(TableSizeData.Data, fields)\n\n\t\t}\n\n\t\tPerformanceStruct.TableSizeData = TableSizeData\n\n\t}\n\n\tIndexHitRows, errIndexHitRows := db.Query(IndexCacheHitRateSQL)\n\tIndexHitHeaders := []string{\"table_name\", \"size\", \"reads\",\n\t\t\"cumulative_pct_reads\", \"index_hit_rate\", \"cache_hit_rate\"}\n\tIndexHitData := IndexHitRateData{Headers: IndexHitHeaders}\n\n\tif errIndexHitRows == nil {\n\t\tdefer IndexHitRows.Close()\n\t\tfor IndexHitRows.Next() {\n\t\t\t\/\/ INDEX_CACHE_HIT_RATE_ROWS = ['table_name','size','reads',\n\t\t\t\/\/ 'cumulative_pct_reads', 'index_hit_rate', 'cache_hit_rate']\n\n\t\t\tvar TableName string\n\t\t\tvar Size string\n\t\t\tvar Read int64\n\t\t\tvar CumulativeReads float64\n\t\t\tvar IndexHitRate float64\n\t\t\tvar CacheHitRate float64\n\n\t\t\terr = IndexHitRows.Scan(&TableName, &Size, &Read, &CumulativeReads, &IndexHitRate, &CacheHitRate)\n\t\t\tif err != nil {\n\t\t\t\tpluginLogger.Errorf(\"Can't get Index Hit Rate tables': %v\", err)\n\n\t\t\t}\n\t\t\tfields := []interface{}{}\n\t\t\tfields = append(fields, TableName)\n\t\t\tfields = append(fields, Size)\n\t\t\tfields = append(fields, Read)\n\t\t\tfields = append(fields, CumulativeReads)\n\t\t\tfields = append(fields, IndexHitRate)\n\t\t\tfields = append(fields, CacheHitRate)\n\n\t\t\tIndexHitData.Data = append(IndexHitData.Data, fields)\n\n\t\t}\n\n\t\tPerformanceStruct.IndexHitRateData = IndexHitData\n\t}\n\n\tSlowQueriesRows, errSlowQueriesRows := db.Query(SlowQueriesSQL)\n\tSlowQueriesHeaders := []string{\"calls\", \"total\", \"per_call\", \"query\"}\n\tSlowQueriesData := SlowQueriesData{Headers: SlowQueriesHeaders}\n\n\tif errSlowQueriesRows == nil {\n\t\tdefer SlowQueriesRows.Close()\n\t\tfor SlowQueriesRows.Next() {\n\n\t\t\tvar Calls int64\n\t\t\tvar Total float64\n\t\t\tvar PerCall float64\n\t\t\tvar Query string\n\n\t\t\terr = SlowQueriesRows.Scan(&Calls, &Total, &PerCall, &Query)\n\t\t\tif err != nil {\n\t\t\t\tpluginLogger.Errorf(\"Can't get Slow Queries': %v\", err)\n\t\t\t}\n\t\t\tfields := []interface{}{}\n\t\t\tfields = append(fields, Calls)\n\t\t\tfields = append(fields, Total)\n\t\t\tfields = append(fields, PerCall)\n\t\t\tfields = append(fields, Query)\n\n\t\t\tSlowQueriesData.Data = append(SlowQueriesData.Data, fields)\n\n\t\t}\n\n\t\tPerformanceStruct.SlowQueriesData = SlowQueriesData\n\t}\n\n\tPerformanceStruct.Counters = counters\n\tPerformanceStruct.Gauges = gauges\n\n\treturn PerformanceStruct, nil\n}\n\nfunc init() {\n\tplugins.Add(\"postgresql\", func() plugins.Plugin {\n\t\treturn &PostgreSQL{}\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ Match represents a game being played\ntype Match struct {\n\tPlayers []Player `json:\"players\"`\n\tJudges []Judge `json:\"judges\"`\n\tKind string `json:\"kind\"`\n\tIndex int `json:\"index\"`\n\tStarted time.Time `json:\"started\"`\n\tEnded time.Time `json:\"ended\"`\n\ttournament *Tournament\n}\n\n\/\/ NewMatch creates a new Match for usage!\nfunc NewMatch(t *Tournament, index int, kind string) *Match {\n\treturn &Match{\n\t\tIndex: index,\n\t\tKind: kind,\n\t\ttournament: t,\n\t}\n}\n\nfunc (m *Match) String() string {\n\tvar tempo string\n\tvar name string\n\n\tif !m.IsStarted() {\n\t\ttempo = \"not started\"\n\t} else if m.IsEnded() {\n\t\ttempo = \"ended\"\n\t} else {\n\t\ttempo = \"playing\"\n\t}\n\n\tif m.Kind == \"final\" {\n\t\tname = \"Final\"\n\t} else {\n\t\tname = fmt.Sprintf(\"%s %d\", strings.Title(m.Kind), m.Index+1)\n\t}\n\n\tnames := make([]string, 0, len(m.Players))\n\tfor _, p := range m.Players {\n\t\tnames = append(names, p.Name)\n\t}\n\n\treturn fmt.Sprintf(\n\t\t\"<%s: %s - %s>\",\n\t\tname,\n\t\tstrings.Join(names, \" \/ \"),\n\t\ttempo,\n\t)\n}\n\n\/\/ AddPlayer adds a player to the match\nfunc (m *Match) AddPlayer(p Player) error {\n\tif len(m.Players) == 4 {\n\t\treturn errors.New(\"cannot add fifth player\")\n\t}\n\n\tm.Players = append(m.Players, p)\n\treturn nil\n}\n\n\/\/ Prefill fills remaining player slots with nil players\nfunc (m *Match) Prefill() error {\n\tfor i := len(m.Players); i < 4; i++ {\n\t\tm.AddPlayer(Player{})\n\t}\n\treturn nil\n}\n\n\/\/ Start starts the match\nfunc (m *Match) Start() error {\n\tif !m.Started.IsZero() {\n\t\treturn errors.New(\"match already started\")\n\t}\n\n\t\/\/ If there are not four players in the match, we need to populate\n\t\/\/ the match with runnerups from the tournament\n\tif len(m.Players) != 4 {\n\t\tm.tournament.PopulateRunnerups(m)\n\t}\n\n\tfor i := range m.Players {\n\t\tm.Players[i].Reset()\n\t\tm.Players[i].match = m\n\t}\n\n\tm.Started = time.Now()\n\treturn nil\n}\n\n\/\/ End signals that the match has ended\n\/\/\n\/\/ It is also the place that moves players into either the Runnerup bracket\n\/\/ or into their place in the semis.\nfunc (m *Match) End() error {\n\tif !m.Ended.IsZero() {\n\t\treturn errors.New(\"match already ended\")\n\t}\n\n\tif m.Kind == \"final\" {\n\t\tm.tournament.AwardMedals(m)\n\t} else {\n\t\tm.tournament.MovePlayers(m)\n\t}\n\tm.Ended = time.Now()\n\treturn nil\n}\n\n\/\/ IsStarted returns boolean whether the match has started or not\nfunc (m *Match) IsStarted() bool {\n\treturn !m.Started.IsZero()\n}\n\n\/\/ IsEnded returns boolean whether the match has ended or not\nfunc (m *Match) IsEnded() bool {\n\treturn !m.Ended.IsZero()\n}\n<commit_msg>Add prefilling on Match creation<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ Match represents a game being played\ntype Match struct {\n\tPlayers []Player `json:\"players\"`\n\tJudges []Judge `json:\"judges\"`\n\tKind string `json:\"kind\"`\n\tIndex int `json:\"index\"`\n\tStarted time.Time `json:\"started\"`\n\tEnded time.Time `json:\"ended\"`\n\ttournament *Tournament\n}\n\n\/\/ NewMatch creates a new Match for usage!\nfunc NewMatch(t *Tournament, index int, kind string) *Match {\n\tm := Match{\n\t\tIndex: index,\n\t\tKind: kind,\n\t\ttournament: t,\n\t}\n\tm.Prefill()\n\treturn &m\n}\n\nfunc (m *Match) String() string {\n\tvar tempo string\n\tvar name string\n\n\tif !m.IsStarted() {\n\t\ttempo = \"not started\"\n\t} else if m.IsEnded() {\n\t\ttempo = \"ended\"\n\t} else {\n\t\ttempo = \"playing\"\n\t}\n\n\tif m.Kind == \"final\" {\n\t\tname = \"Final\"\n\t} else {\n\t\tname = fmt.Sprintf(\"%s %d\", strings.Title(m.Kind), m.Index+1)\n\t}\n\n\tnames := make([]string, 0, len(m.Players))\n\tfor _, p := range m.Players {\n\t\tnames = append(names, p.Name)\n\t}\n\n\treturn fmt.Sprintf(\n\t\t\"<%s: %s - %s>\",\n\t\tname,\n\t\tstrings.Join(names, \" \/ \"),\n\t\ttempo,\n\t)\n}\n\n\/\/ AddPlayer adds a player to the match\nfunc (m *Match) AddPlayer(p Player) error {\n\tif len(m.Players) == 4 {\n\t\treturn errors.New(\"cannot add fifth player\")\n\t}\n\n\tm.Players = append(m.Players, p)\n\treturn nil\n}\n\n\/\/ Prefill fills remaining player slots with nil players\nfunc (m *Match) Prefill() error {\n\tfor i := len(m.Players); i < 4; i++ {\n\t\tm.AddPlayer(Player{})\n\t}\n\treturn nil\n}\n\n\/\/ Start starts the match\nfunc (m *Match) Start() error {\n\tif !m.Started.IsZero() {\n\t\treturn errors.New(\"match already started\")\n\t}\n\n\t\/\/ If there are not four players in the match, we need to populate\n\t\/\/ the match with runnerups from the tournament\n\tif len(m.Players) != 4 {\n\t\tm.tournament.PopulateRunnerups(m)\n\t}\n\n\tfor i := range m.Players {\n\t\tm.Players[i].Reset()\n\t\tm.Players[i].match = m\n\t}\n\n\tm.Started = time.Now()\n\treturn nil\n}\n\n\/\/ End signals that the match has ended\n\/\/\n\/\/ It is also the place that moves players into either the Runnerup bracket\n\/\/ or into their place in the semis.\nfunc (m *Match) End() error {\n\tif !m.Ended.IsZero() {\n\t\treturn errors.New(\"match already ended\")\n\t}\n\n\tif m.Kind == \"final\" {\n\t\tm.tournament.AwardMedals(m)\n\t} else {\n\t\tm.tournament.MovePlayers(m)\n\t}\n\tm.Ended = time.Now()\n\treturn nil\n}\n\n\/\/ IsStarted returns boolean whether the match has started or not\nfunc (m *Match) IsStarted() bool {\n\treturn !m.Started.IsZero()\n}\n\n\/\/ IsEnded returns boolean whether the match has ended or not\nfunc (m *Match) IsEnded() bool {\n\treturn !m.Ended.IsZero()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/bcicen\/ctop\/config\"\n\t\"github.com\/bcicen\/ctop\/container\"\n\t\"github.com\/bcicen\/ctop\/widgets\"\n\t\"github.com\/bcicen\/ctop\/widgets\/menu\"\n\tui \"github.com\/gizak\/termui\"\n)\n\n\/\/ MenuFn executes a menu window, returning the next menu or nil\ntype MenuFn func() MenuFn\n\nvar helpDialog = []menu.Item{\n\t{\"<enter> - open container menu\", \"\"},\n\t{\"\", \"\"},\n\t{\"[a] - toggle display of all containers\", \"\"},\n\t{\"[f] - filter displayed containers\", \"\"},\n\t{\"[h] - open this help dialog\", \"\"},\n\t{\"[H] - toggle ctop header\", \"\"},\n\t{\"[s] - select container sort field\", \"\"},\n\t{\"[r] - reverse container sort order\", \"\"},\n\t{\"[o] - open single view\", \"\"},\n\t{\"[l] - view container logs ([t] to toggle timestamp when open)\", \"\"},\n\t{\"[e] - exec shell\", \"\"},\n\t{\"[c] - configure columns\", \"\"},\n\t{\"[S] - save current configuration to file\", \"\"},\n\t{\"[q] - exit ctop\", \"\"},\n}\n\nfunc HelpMenu() MenuFn {\n\tui.Clear()\n\tui.DefaultEvtStream.ResetHandlers()\n\tdefer ui.DefaultEvtStream.ResetHandlers()\n\n\tm := menu.NewMenu()\n\tm.BorderLabel = \"Help\"\n\tm.AddItems(helpDialog...)\n\tui.Handle(\"\/sys\/wnd\/resize\", func(e ui.Event) {\n\t\tui.Clear()\n\t\tui.Render(m)\n\t})\n\tui.Handle(\"\/sys\/kbd\/\", func(ui.Event) {\n\t\tui.StopLoop()\n\t})\n\tui.Loop()\n\treturn nil\n}\n\nfunc FilterMenu() MenuFn {\n\tui.DefaultEvtStream.ResetHandlers()\n\tdefer ui.DefaultEvtStream.ResetHandlers()\n\n\ti := widgets.NewInput()\n\ti.BorderLabel = \"Filter\"\n\ti.SetY(ui.TermHeight() - i.Height)\n\ti.Data = config.GetVal(\"filterStr\")\n\tui.Render(i)\n\n\t\/\/ refresh container rows on input\n\tstream := i.Stream()\n\tgo func() {\n\t\tfor s := range stream {\n\t\t\tconfig.Update(\"filterStr\", s)\n\t\t\tRefreshDisplay()\n\t\t\tui.Render(i)\n\t\t}\n\t}()\n\n\ti.InputHandlers()\n\tui.Handle(\"\/sys\/kbd\/<escape>\", func(ui.Event) {\n\t\tconfig.Update(\"filterStr\", \"\")\n\t\tui.StopLoop()\n\t})\n\tui.Handle(\"\/sys\/kbd\/<enter>\", func(ui.Event) {\n\t\tconfig.Update(\"filterStr\", i.Data)\n\t\tui.StopLoop()\n\t})\n\tui.Loop()\n\treturn nil\n}\n\nfunc SortMenu() MenuFn {\n\tui.Clear()\n\tui.DefaultEvtStream.ResetHandlers()\n\tdefer ui.DefaultEvtStream.ResetHandlers()\n\n\tm := menu.NewMenu()\n\tm.Selectable = true\n\tm.SortItems = true\n\tm.BorderLabel = \"Sort Field\"\n\n\tfor _, field := range container.SortFields() {\n\t\tm.AddItems(menu.Item{field, \"\"})\n\t}\n\n\t\/\/ set cursor position to current sort field\n\tm.SetCursor(config.GetVal(\"sortField\"))\n\n\tHandleKeys(\"up\", m.Up)\n\tHandleKeys(\"down\", m.Down)\n\tHandleKeys(\"exit\", ui.StopLoop)\n\n\tui.Handle(\"\/sys\/kbd\/<enter>\", func(ui.Event) {\n\t\tconfig.Update(\"sortField\", m.SelectedValue())\n\t\tui.StopLoop()\n\t})\n\n\tui.Render(m)\n\tui.Loop()\n\treturn nil\n}\n\nfunc ColumnsMenu() MenuFn {\n\tconst (\n\t\tenabledStr = \"[X]\"\n\t\tdisabledStr = \"[ ]\"\n\t\tpadding = 2\n\t)\n\n\tui.Clear()\n\tui.DefaultEvtStream.ResetHandlers()\n\tdefer ui.DefaultEvtStream.ResetHandlers()\n\n\tm := menu.NewMenu()\n\tm.Selectable = true\n\tm.SortItems = false\n\tm.BorderLabel = \"Columns\"\n\t\/\/m.SubText = \"Enabled Columns\"\n\n\trebuild := func() {\n\t\t\/\/ get padding for right alignment of enabled status\n\t\tvar maxLen int\n\t\tfor _, col := range config.GlobalColumns {\n\t\t\tif len(col.Label) > maxLen {\n\t\t\t\tmaxLen = len(col.Label)\n\t\t\t}\n\t\t}\n\t\tmaxLen += padding\n\n\t\t\/\/ rebuild menu items\n\t\tm.ClearItems()\n\t\tfor _, col := range config.GlobalColumns {\n\t\t\ttxt := col.Label + strings.Repeat(\" \", maxLen-len(col.Label))\n\t\t\tif col.Enabled {\n\t\t\t\ttxt += enabledStr\n\t\t\t} else {\n\t\t\t\ttxt += disabledStr\n\t\t\t}\n\t\t\tm.AddItems(menu.Item{col.Name, txt})\n\t\t}\n\t}\n\n\tupFn := func() {\n\t\tconfig.ColumnLeft(m.SelectedValue())\n\t\tm.Up()\n\t\trebuild()\n\t}\n\n\tdownFn := func() {\n\t\tconfig.ColumnRight(m.SelectedValue())\n\t\tm.Down()\n\t\trebuild()\n\t}\n\n\ttoggleFn := func() {\n\t\tconfig.ColumnToggle(m.SelectedValue())\n\t\trebuild()\n\t}\n\n\trebuild()\n\n\tHandleKeys(\"up\", m.Up)\n\tHandleKeys(\"down\", m.Down)\n\tHandleKeys(\"enter\", toggleFn)\n\tHandleKeys(\"pgup\", upFn)\n\tHandleKeys(\"pgdown\", downFn)\n\n\tui.Handle(\"\/sys\/kbd\/x\", func(ui.Event) { toggleFn() })\n\tui.Handle(\"\/sys\/kbd\/<enter>\", func(ui.Event) { toggleFn() })\n\n\tHandleKeys(\"exit\", func() {\n\t\tcSource, err := cursor.cSuper.Get()\n\t\tif err == nil {\n\t\t\tfor _, c := range cSource.All() {\n\t\t\t\tc.RecreateWidgets()\n\t\t\t}\n\t\t}\n\t\tui.StopLoop()\n\t})\n\n\tui.Render(m)\n\tui.Loop()\n\treturn nil\n}\n\nfunc ContainerMenu() MenuFn {\n\tc := cursor.Selected()\n\tif c == nil {\n\t\treturn nil\n\t}\n\n\tui.DefaultEvtStream.ResetHandlers()\n\tdefer ui.DefaultEvtStream.ResetHandlers()\n\n\tm := menu.NewMenu()\n\tm.Selectable = true\n\tm.BorderLabel = \"Menu\"\n\n\titems := []menu.Item{\n\t\tmenu.Item{Val: \"single\", Label: \"[o] single view\"},\n\t\tmenu.Item{Val: \"logs\", Label: \"[l] log view\"},\n\t}\n\n\tif c.Meta[\"state\"] == \"running\" {\n\t\titems = append(items, menu.Item{Val: \"stop\", Label: \"[s] stop\"})\n\t\titems = append(items, menu.Item{Val: \"pause\", Label: \"[p] pause\"})\n\t\titems = append(items, menu.Item{Val: \"restart\", Label: \"[r] restart\"})\n\t\titems = append(items, menu.Item{Val: \"exec\", Label: \"[e] exec shell\"})\n\t}\n\tif c.Meta[\"state\"] == \"exited\" || c.Meta[\"state\"] == \"created\" {\n\t\titems = append(items, menu.Item{Val: \"start\", Label: \"[s] start\"})\n\t\titems = append(items, menu.Item{Val: \"remove\", Label: \"[R] remove\"})\n\t}\n\tif c.Meta[\"state\"] == \"paused\" {\n\t\titems = append(items, menu.Item{Val: \"unpause\", Label: \"[p] unpause\"})\n\t}\n\titems = append(items, menu.Item{Val: \"cancel\", Label: \"[c] cancel\"})\n\n\tm.AddItems(items...)\n\tui.Render(m)\n\n\tHandleKeys(\"up\", m.Up)\n\tHandleKeys(\"down\", m.Down)\n\n\tvar selected string\n\n\t\/\/ shortcuts\n\tui.Handle(\"\/sys\/kbd\/o\", func(ui.Event) {\n\t\tselected = \"single\"\n\t\tui.StopLoop()\n\t})\n\tui.Handle(\"\/sys\/kbd\/l\", func(ui.Event) {\n\t\tselected = \"logs\"\n\t\tui.StopLoop()\n\t})\n\tif c.Meta[\"state\"] != \"paused\" {\n\t\tui.Handle(\"\/sys\/kbd\/s\", func(ui.Event) {\n\t\t\tif c.Meta[\"state\"] == \"running\" {\n\t\t\t\tselected = \"stop\"\n\t\t\t} else {\n\t\t\t\tselected = \"start\"\n\t\t\t}\n\t\t\tui.StopLoop()\n\t\t})\n\t}\n\tif c.Meta[\"state\"] != \"exited\" || c.Meta[\"state\"] != \"created\" {\n\t\tui.Handle(\"\/sys\/kbd\/p\", func(ui.Event) {\n\t\t\tif c.Meta[\"state\"] == \"paused\" {\n\t\t\t\tselected = \"unpause\"\n\t\t\t} else {\n\t\t\t\tselected = \"pause\"\n\t\t\t}\n\t\t\tui.StopLoop()\n\t\t})\n\t}\n\tif c.Meta[\"state\"] == \"running\" {\n\t\tui.Handle(\"\/sys\/kbd\/e\", func(ui.Event) {\n\t\t\tselected = \"exec\"\n\t\t\tui.StopLoop()\n\t\t})\n\t\tui.Handle(\"\/sys\/kbd\/r\", func(ui.Event) {\n\t\t\tselected = \"restart\"\n\t\t\tui.StopLoop()\n\t\t})\n\t}\n\tui.Handle(\"\/sys\/kbd\/R\", func(ui.Event) {\n\t\tselected = \"remove\"\n\t\tui.StopLoop()\n\t})\n\tui.Handle(\"\/sys\/kbd\/c\", func(ui.Event) {\n\t\tui.StopLoop()\n\t})\n\n\tui.Handle(\"\/sys\/kbd\/<enter>\", func(ui.Event) {\n\t\tselected = m.SelectedValue()\n\t\tui.StopLoop()\n\t})\n\tui.Handle(\"\/sys\/kbd\/\", func(ui.Event) {\n\t\tui.StopLoop()\n\t})\n\tui.Loop()\n\n\tvar nextMenu MenuFn\n\tswitch selected {\n\tcase \"single\":\n\t\tnextMenu = SingleView\n\tcase \"logs\":\n\t\tnextMenu = LogMenu\n\tcase \"exec\":\n\t\tnextMenu = ExecShell\n\tcase \"start\":\n\t\tnextMenu = Confirm(confirmTxt(\"start\", c.GetMeta(\"name\")), c.Start)\n\tcase \"stop\":\n\t\tnextMenu = Confirm(confirmTxt(\"stop\", c.GetMeta(\"name\")), c.Stop)\n\tcase \"remove\":\n\t\tnextMenu = Confirm(confirmTxt(\"remove\", c.GetMeta(\"name\")), c.Remove)\n\tcase \"pause\":\n\t\tnextMenu = Confirm(confirmTxt(\"pause\", c.GetMeta(\"name\")), c.Pause)\n\tcase \"unpause\":\n\t\tnextMenu = Confirm(confirmTxt(\"unpause\", c.GetMeta(\"name\")), c.Unpause)\n\tcase \"restart\":\n\t\tnextMenu = Confirm(confirmTxt(\"restart\", c.GetMeta(\"name\")), c.Restart)\n\t}\n\n\treturn nextMenu\n}\n\nfunc LogMenu() MenuFn {\n\n\tc := cursor.Selected()\n\tif c == nil {\n\t\treturn nil\n\t}\n\n\tui.DefaultEvtStream.ResetHandlers()\n\tdefer ui.DefaultEvtStream.ResetHandlers()\n\n\tlogs, quit := logReader(c)\n\tm := widgets.NewTextView(logs)\n\tm.BorderLabel = fmt.Sprintf(\"Logs [%s]\", c.GetMeta(\"name\"))\n\tui.Render(m)\n\n\tui.Handle(\"\/sys\/wnd\/resize\", func(e ui.Event) {\n\t\tm.Resize()\n\t})\n\tui.Handle(\"\/sys\/kbd\/t\", func(ui.Event) {\n\t\tm.Toggle()\n\t})\n\tui.Handle(\"\/sys\/kbd\/\", func(ui.Event) {\n\t\tquit <- true\n\t\tui.StopLoop()\n\t})\n\tui.Loop()\n\treturn nil\n}\n\nfunc ExecShell() MenuFn {\n\tc := cursor.Selected()\n\n\tif c == nil {\n\t\treturn nil\n\t}\n\n\tui.DefaultEvtStream.ResetHandlers()\n\tdefer ui.DefaultEvtStream.ResetHandlers()\n\t\/\/ Detect and execute default shell in container.\n\t\/\/ Execute Ash shell command: \/bin\/sh -c\n\t\/\/ Reset colors: printf '\\e[0m\\e[?25h'\n\t\/\/ Clear screen\n\t\/\/ Run default shell for the user. It's configured in \/etc\/passwd and looks like root:x:0:0:root:\/root:\/bin\/bash:\n\t\/\/ 1. Get current user id: id -un\n\t\/\/ 2. Find user's line in \/etc\/passwd by grep\n\t\/\/ 3. Extract default user's shell by cutting seven's column separated by :\n\t\/\/ 4. Execute the shell path with eval\n\tif err := c.Exec([]string{\"\/bin\/sh\", \"-c\", \"printf '\\\\e[0m\\\\e[?25h' && clear && eval `grep ^$(id -un): \/etc\/passwd | cut -d : -f 7-`\"}); err != nil {\n\t\tlog.StatusErr(err)\n\t}\n\n\treturn nil\n}\n\n\/\/ Create a confirmation dialog with a given description string and\n\/\/ func to perform if confirmed\nfunc Confirm(txt string, fn func()) MenuFn {\n\tmenu := func() MenuFn {\n\t\tui.DefaultEvtStream.ResetHandlers()\n\t\tdefer ui.DefaultEvtStream.ResetHandlers()\n\n\t\tm := menu.NewMenu()\n\t\tm.Selectable = true\n\t\tm.BorderLabel = \"Confirm\"\n\t\tm.SubText = txt\n\n\t\titems := []menu.Item{\n\t\t\tmenu.Item{Val: \"cancel\", Label: \"[c]ancel\"},\n\t\t\tmenu.Item{Val: \"yes\", Label: \"[y]es\"},\n\t\t}\n\n\t\tvar response bool\n\n\t\tm.AddItems(items...)\n\t\tui.Render(m)\n\n\t\tyes := func() {\n\t\t\tresponse = true\n\t\t\tui.StopLoop()\n\t\t}\n\n\t\tno := func() {\n\t\t\tresponse = false\n\t\t\tui.StopLoop()\n\t\t}\n\n\t\tHandleKeys(\"up\", m.Up)\n\t\tHandleKeys(\"down\", m.Down)\n\t\tHandleKeys(\"exit\", no)\n\t\tui.Handle(\"\/sys\/kbd\/c\", func(ui.Event) { no() })\n\t\tui.Handle(\"\/sys\/kbd\/y\", func(ui.Event) { yes() })\n\n\t\tui.Handle(\"\/sys\/kbd\/<enter>\", func(ui.Event) {\n\t\t\tswitch m.SelectedValue() {\n\t\t\tcase \"cancel\":\n\t\t\t\tno()\n\t\t\tcase \"yes\":\n\t\t\t\tyes()\n\t\t\t}\n\t\t})\n\n\t\tui.Loop()\n\t\tif response {\n\t\t\tfn()\n\t\t}\n\t\treturn nil\n\t}\n\treturn menu\n}\n\ntype toggleLog struct {\n\ttimestamp time.Time\n\tmessage string\n}\n\nfunc (t *toggleLog) Toggle(on bool) string {\n\tif on {\n\t\treturn fmt.Sprintf(\"%s %s\", t.timestamp.Format(\"2006-01-02T15:04:05.999Z07:00\"), t.message)\n\t}\n\treturn t.message\n}\n\nfunc logReader(container *container.Container) (logs chan widgets.ToggleText, quit chan bool) {\n\n\tlogCollector := container.Logs()\n\tstream := logCollector.Stream()\n\tlogs = make(chan widgets.ToggleText)\n\tquit = make(chan bool)\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-stream:\n\t\t\t\tlogs <- &toggleLog{timestamp: log.Timestamp, message: log.Message}\n\t\t\tcase <-quit:\n\t\t\t\tlogCollector.Stop()\n\t\t\t\tclose(logs)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn\n}\n\nfunc confirmTxt(a, n string) string { return fmt.Sprintf(\"%s container %s?\", a, n) }\n<commit_msg>Fix: enable pause [p] only for running or paused container<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/bcicen\/ctop\/config\"\n\t\"github.com\/bcicen\/ctop\/container\"\n\t\"github.com\/bcicen\/ctop\/widgets\"\n\t\"github.com\/bcicen\/ctop\/widgets\/menu\"\n\tui \"github.com\/gizak\/termui\"\n)\n\n\/\/ MenuFn executes a menu window, returning the next menu or nil\ntype MenuFn func() MenuFn\n\nvar helpDialog = []menu.Item{\n\t{\"<enter> - open container menu\", \"\"},\n\t{\"\", \"\"},\n\t{\"[a] - toggle display of all containers\", \"\"},\n\t{\"[f] - filter displayed containers\", \"\"},\n\t{\"[h] - open this help dialog\", \"\"},\n\t{\"[H] - toggle ctop header\", \"\"},\n\t{\"[s] - select container sort field\", \"\"},\n\t{\"[r] - reverse container sort order\", \"\"},\n\t{\"[o] - open single view\", \"\"},\n\t{\"[l] - view container logs ([t] to toggle timestamp when open)\", \"\"},\n\t{\"[e] - exec shell\", \"\"},\n\t{\"[c] - configure columns\", \"\"},\n\t{\"[S] - save current configuration to file\", \"\"},\n\t{\"[q] - exit ctop\", \"\"},\n}\n\nfunc HelpMenu() MenuFn {\n\tui.Clear()\n\tui.DefaultEvtStream.ResetHandlers()\n\tdefer ui.DefaultEvtStream.ResetHandlers()\n\n\tm := menu.NewMenu()\n\tm.BorderLabel = \"Help\"\n\tm.AddItems(helpDialog...)\n\tui.Handle(\"\/sys\/wnd\/resize\", func(e ui.Event) {\n\t\tui.Clear()\n\t\tui.Render(m)\n\t})\n\tui.Handle(\"\/sys\/kbd\/\", func(ui.Event) {\n\t\tui.StopLoop()\n\t})\n\tui.Loop()\n\treturn nil\n}\n\nfunc FilterMenu() MenuFn {\n\tui.DefaultEvtStream.ResetHandlers()\n\tdefer ui.DefaultEvtStream.ResetHandlers()\n\n\ti := widgets.NewInput()\n\ti.BorderLabel = \"Filter\"\n\ti.SetY(ui.TermHeight() - i.Height)\n\ti.Data = config.GetVal(\"filterStr\")\n\tui.Render(i)\n\n\t\/\/ refresh container rows on input\n\tstream := i.Stream()\n\tgo func() {\n\t\tfor s := range stream {\n\t\t\tconfig.Update(\"filterStr\", s)\n\t\t\tRefreshDisplay()\n\t\t\tui.Render(i)\n\t\t}\n\t}()\n\n\ti.InputHandlers()\n\tui.Handle(\"\/sys\/kbd\/<escape>\", func(ui.Event) {\n\t\tconfig.Update(\"filterStr\", \"\")\n\t\tui.StopLoop()\n\t})\n\tui.Handle(\"\/sys\/kbd\/<enter>\", func(ui.Event) {\n\t\tconfig.Update(\"filterStr\", i.Data)\n\t\tui.StopLoop()\n\t})\n\tui.Loop()\n\treturn nil\n}\n\nfunc SortMenu() MenuFn {\n\tui.Clear()\n\tui.DefaultEvtStream.ResetHandlers()\n\tdefer ui.DefaultEvtStream.ResetHandlers()\n\n\tm := menu.NewMenu()\n\tm.Selectable = true\n\tm.SortItems = true\n\tm.BorderLabel = \"Sort Field\"\n\n\tfor _, field := range container.SortFields() {\n\t\tm.AddItems(menu.Item{field, \"\"})\n\t}\n\n\t\/\/ set cursor position to current sort field\n\tm.SetCursor(config.GetVal(\"sortField\"))\n\n\tHandleKeys(\"up\", m.Up)\n\tHandleKeys(\"down\", m.Down)\n\tHandleKeys(\"exit\", ui.StopLoop)\n\n\tui.Handle(\"\/sys\/kbd\/<enter>\", func(ui.Event) {\n\t\tconfig.Update(\"sortField\", m.SelectedValue())\n\t\tui.StopLoop()\n\t})\n\n\tui.Render(m)\n\tui.Loop()\n\treturn nil\n}\n\nfunc ColumnsMenu() MenuFn {\n\tconst (\n\t\tenabledStr = \"[X]\"\n\t\tdisabledStr = \"[ ]\"\n\t\tpadding = 2\n\t)\n\n\tui.Clear()\n\tui.DefaultEvtStream.ResetHandlers()\n\tdefer ui.DefaultEvtStream.ResetHandlers()\n\n\tm := menu.NewMenu()\n\tm.Selectable = true\n\tm.SortItems = false\n\tm.BorderLabel = \"Columns\"\n\t\/\/m.SubText = \"Enabled Columns\"\n\n\trebuild := func() {\n\t\t\/\/ get padding for right alignment of enabled status\n\t\tvar maxLen int\n\t\tfor _, col := range config.GlobalColumns {\n\t\t\tif len(col.Label) > maxLen {\n\t\t\t\tmaxLen = len(col.Label)\n\t\t\t}\n\t\t}\n\t\tmaxLen += padding\n\n\t\t\/\/ rebuild menu items\n\t\tm.ClearItems()\n\t\tfor _, col := range config.GlobalColumns {\n\t\t\ttxt := col.Label + strings.Repeat(\" \", maxLen-len(col.Label))\n\t\t\tif col.Enabled {\n\t\t\t\ttxt += enabledStr\n\t\t\t} else {\n\t\t\t\ttxt += disabledStr\n\t\t\t}\n\t\t\tm.AddItems(menu.Item{col.Name, txt})\n\t\t}\n\t}\n\n\tupFn := func() {\n\t\tconfig.ColumnLeft(m.SelectedValue())\n\t\tm.Up()\n\t\trebuild()\n\t}\n\n\tdownFn := func() {\n\t\tconfig.ColumnRight(m.SelectedValue())\n\t\tm.Down()\n\t\trebuild()\n\t}\n\n\ttoggleFn := func() {\n\t\tconfig.ColumnToggle(m.SelectedValue())\n\t\trebuild()\n\t}\n\n\trebuild()\n\n\tHandleKeys(\"up\", m.Up)\n\tHandleKeys(\"down\", m.Down)\n\tHandleKeys(\"enter\", toggleFn)\n\tHandleKeys(\"pgup\", upFn)\n\tHandleKeys(\"pgdown\", downFn)\n\n\tui.Handle(\"\/sys\/kbd\/x\", func(ui.Event) { toggleFn() })\n\tui.Handle(\"\/sys\/kbd\/<enter>\", func(ui.Event) { toggleFn() })\n\n\tHandleKeys(\"exit\", func() {\n\t\tcSource, err := cursor.cSuper.Get()\n\t\tif err == nil {\n\t\t\tfor _, c := range cSource.All() {\n\t\t\t\tc.RecreateWidgets()\n\t\t\t}\n\t\t}\n\t\tui.StopLoop()\n\t})\n\n\tui.Render(m)\n\tui.Loop()\n\treturn nil\n}\n\nfunc ContainerMenu() MenuFn {\n\tc := cursor.Selected()\n\tif c == nil {\n\t\treturn nil\n\t}\n\n\tui.DefaultEvtStream.ResetHandlers()\n\tdefer ui.DefaultEvtStream.ResetHandlers()\n\n\tm := menu.NewMenu()\n\tm.Selectable = true\n\tm.BorderLabel = \"Menu\"\n\n\titems := []menu.Item{\n\t\tmenu.Item{Val: \"single\", Label: \"[o] single view\"},\n\t\tmenu.Item{Val: \"logs\", Label: \"[l] log view\"},\n\t}\n\n\tif c.Meta[\"state\"] == \"running\" {\n\t\titems = append(items, menu.Item{Val: \"stop\", Label: \"[s] stop\"})\n\t\titems = append(items, menu.Item{Val: \"pause\", Label: \"[p] pause\"})\n\t\titems = append(items, menu.Item{Val: \"restart\", Label: \"[r] restart\"})\n\t\titems = append(items, menu.Item{Val: \"exec\", Label: \"[e] exec shell\"})\n\t}\n\tif c.Meta[\"state\"] == \"exited\" || c.Meta[\"state\"] == \"created\" {\n\t\titems = append(items, menu.Item{Val: \"start\", Label: \"[s] start\"})\n\t\titems = append(items, menu.Item{Val: \"remove\", Label: \"[R] remove\"})\n\t}\n\tif c.Meta[\"state\"] == \"paused\" {\n\t\titems = append(items, menu.Item{Val: \"unpause\", Label: \"[p] unpause\"})\n\t}\n\titems = append(items, menu.Item{Val: \"cancel\", Label: \"[c] cancel\"})\n\n\tm.AddItems(items...)\n\tui.Render(m)\n\n\tHandleKeys(\"up\", m.Up)\n\tHandleKeys(\"down\", m.Down)\n\n\tvar selected string\n\n\t\/\/ shortcuts\n\tui.Handle(\"\/sys\/kbd\/o\", func(ui.Event) {\n\t\tselected = \"single\"\n\t\tui.StopLoop()\n\t})\n\tui.Handle(\"\/sys\/kbd\/l\", func(ui.Event) {\n\t\tselected = \"logs\"\n\t\tui.StopLoop()\n\t})\n\tif c.Meta[\"state\"] != \"paused\" {\n\t\tui.Handle(\"\/sys\/kbd\/s\", func(ui.Event) {\n\t\t\tif c.Meta[\"state\"] == \"running\" {\n\t\t\t\tselected = \"stop\"\n\t\t\t} else {\n\t\t\t\tselected = \"start\"\n\t\t\t}\n\t\t\tui.StopLoop()\n\t\t})\n\t}\n\tif c.Meta[\"state\"] != \"exited\" && c.Meta[\"state\"] != \"created\" {\n\t\tui.Handle(\"\/sys\/kbd\/p\", func(ui.Event) {\n\t\t\tif c.Meta[\"state\"] == \"paused\" {\n\t\t\t\tselected = \"unpause\"\n\t\t\t} else {\n\t\t\t\tselected = \"pause\"\n\t\t\t}\n\t\t\tui.StopLoop()\n\t\t})\n\t}\n\tif c.Meta[\"state\"] == \"running\" {\n\t\tui.Handle(\"\/sys\/kbd\/e\", func(ui.Event) {\n\t\t\tselected = \"exec\"\n\t\t\tui.StopLoop()\n\t\t})\n\t\tui.Handle(\"\/sys\/kbd\/r\", func(ui.Event) {\n\t\t\tselected = \"restart\"\n\t\t\tui.StopLoop()\n\t\t})\n\t}\n\tui.Handle(\"\/sys\/kbd\/R\", func(ui.Event) {\n\t\tselected = \"remove\"\n\t\tui.StopLoop()\n\t})\n\tui.Handle(\"\/sys\/kbd\/c\", func(ui.Event) {\n\t\tui.StopLoop()\n\t})\n\n\tui.Handle(\"\/sys\/kbd\/<enter>\", func(ui.Event) {\n\t\tselected = m.SelectedValue()\n\t\tui.StopLoop()\n\t})\n\tui.Handle(\"\/sys\/kbd\/\", func(ui.Event) {\n\t\tui.StopLoop()\n\t})\n\tui.Loop()\n\n\tvar nextMenu MenuFn\n\tswitch selected {\n\tcase \"single\":\n\t\tnextMenu = SingleView\n\tcase \"logs\":\n\t\tnextMenu = LogMenu\n\tcase \"exec\":\n\t\tnextMenu = ExecShell\n\tcase \"start\":\n\t\tnextMenu = Confirm(confirmTxt(\"start\", c.GetMeta(\"name\")), c.Start)\n\tcase \"stop\":\n\t\tnextMenu = Confirm(confirmTxt(\"stop\", c.GetMeta(\"name\")), c.Stop)\n\tcase \"remove\":\n\t\tnextMenu = Confirm(confirmTxt(\"remove\", c.GetMeta(\"name\")), c.Remove)\n\tcase \"pause\":\n\t\tnextMenu = Confirm(confirmTxt(\"pause\", c.GetMeta(\"name\")), c.Pause)\n\tcase \"unpause\":\n\t\tnextMenu = Confirm(confirmTxt(\"unpause\", c.GetMeta(\"name\")), c.Unpause)\n\tcase \"restart\":\n\t\tnextMenu = Confirm(confirmTxt(\"restart\", c.GetMeta(\"name\")), c.Restart)\n\t}\n\n\treturn nextMenu\n}\n\nfunc LogMenu() MenuFn {\n\n\tc := cursor.Selected()\n\tif c == nil {\n\t\treturn nil\n\t}\n\n\tui.DefaultEvtStream.ResetHandlers()\n\tdefer ui.DefaultEvtStream.ResetHandlers()\n\n\tlogs, quit := logReader(c)\n\tm := widgets.NewTextView(logs)\n\tm.BorderLabel = fmt.Sprintf(\"Logs [%s]\", c.GetMeta(\"name\"))\n\tui.Render(m)\n\n\tui.Handle(\"\/sys\/wnd\/resize\", func(e ui.Event) {\n\t\tm.Resize()\n\t})\n\tui.Handle(\"\/sys\/kbd\/t\", func(ui.Event) {\n\t\tm.Toggle()\n\t})\n\tui.Handle(\"\/sys\/kbd\/\", func(ui.Event) {\n\t\tquit <- true\n\t\tui.StopLoop()\n\t})\n\tui.Loop()\n\treturn nil\n}\n\nfunc ExecShell() MenuFn {\n\tc := cursor.Selected()\n\n\tif c == nil {\n\t\treturn nil\n\t}\n\n\tui.DefaultEvtStream.ResetHandlers()\n\tdefer ui.DefaultEvtStream.ResetHandlers()\n\t\/\/ Detect and execute default shell in container.\n\t\/\/ Execute Ash shell command: \/bin\/sh -c\n\t\/\/ Reset colors: printf '\\e[0m\\e[?25h'\n\t\/\/ Clear screen\n\t\/\/ Run default shell for the user. It's configured in \/etc\/passwd and looks like root:x:0:0:root:\/root:\/bin\/bash:\n\t\/\/ 1. Get current user id: id -un\n\t\/\/ 2. Find user's line in \/etc\/passwd by grep\n\t\/\/ 3. Extract default user's shell by cutting seven's column separated by :\n\t\/\/ 4. Execute the shell path with eval\n\tif err := c.Exec([]string{\"\/bin\/sh\", \"-c\", \"printf '\\\\e[0m\\\\e[?25h' && clear && eval `grep ^$(id -un): \/etc\/passwd | cut -d : -f 7-`\"}); err != nil {\n\t\tlog.StatusErr(err)\n\t}\n\n\treturn nil\n}\n\n\/\/ Create a confirmation dialog with a given description string and\n\/\/ func to perform if confirmed\nfunc Confirm(txt string, fn func()) MenuFn {\n\tmenu := func() MenuFn {\n\t\tui.DefaultEvtStream.ResetHandlers()\n\t\tdefer ui.DefaultEvtStream.ResetHandlers()\n\n\t\tm := menu.NewMenu()\n\t\tm.Selectable = true\n\t\tm.BorderLabel = \"Confirm\"\n\t\tm.SubText = txt\n\n\t\titems := []menu.Item{\n\t\t\tmenu.Item{Val: \"cancel\", Label: \"[c]ancel\"},\n\t\t\tmenu.Item{Val: \"yes\", Label: \"[y]es\"},\n\t\t}\n\n\t\tvar response bool\n\n\t\tm.AddItems(items...)\n\t\tui.Render(m)\n\n\t\tyes := func() {\n\t\t\tresponse = true\n\t\t\tui.StopLoop()\n\t\t}\n\n\t\tno := func() {\n\t\t\tresponse = false\n\t\t\tui.StopLoop()\n\t\t}\n\n\t\tHandleKeys(\"up\", m.Up)\n\t\tHandleKeys(\"down\", m.Down)\n\t\tHandleKeys(\"exit\", no)\n\t\tui.Handle(\"\/sys\/kbd\/c\", func(ui.Event) { no() })\n\t\tui.Handle(\"\/sys\/kbd\/y\", func(ui.Event) { yes() })\n\n\t\tui.Handle(\"\/sys\/kbd\/<enter>\", func(ui.Event) {\n\t\t\tswitch m.SelectedValue() {\n\t\t\tcase \"cancel\":\n\t\t\t\tno()\n\t\t\tcase \"yes\":\n\t\t\t\tyes()\n\t\t\t}\n\t\t})\n\n\t\tui.Loop()\n\t\tif response {\n\t\t\tfn()\n\t\t}\n\t\treturn nil\n\t}\n\treturn menu\n}\n\ntype toggleLog struct {\n\ttimestamp time.Time\n\tmessage string\n}\n\nfunc (t *toggleLog) Toggle(on bool) string {\n\tif on {\n\t\treturn fmt.Sprintf(\"%s %s\", t.timestamp.Format(\"2006-01-02T15:04:05.999Z07:00\"), t.message)\n\t}\n\treturn t.message\n}\n\nfunc logReader(container *container.Container) (logs chan widgets.ToggleText, quit chan bool) {\n\n\tlogCollector := container.Logs()\n\tstream := logCollector.Stream()\n\tlogs = make(chan widgets.ToggleText)\n\tquit = make(chan bool)\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-stream:\n\t\t\t\tlogs <- &toggleLog{timestamp: log.Timestamp, message: log.Message}\n\t\t\tcase <-quit:\n\t\t\t\tlogCollector.Stop()\n\t\t\t\tclose(logs)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn\n}\n\nfunc confirmTxt(a, n string) string { return fmt.Sprintf(\"%s container %s?\", a, n) }\n<|endoftext|>"} {"text":"<commit_before>package jsonpatch\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {\n\tcurDoc, err := cur.intoDoc()\n\n\tif err != nil {\n\t\tpruneNulls(patch)\n\t\treturn patch\n\t}\n\n\tpatchDoc, err := patch.intoDoc()\n\n\tif err != nil {\n\t\treturn patch\n\t}\n\n\tmergeDocs(curDoc, patchDoc, mergeMerge)\n\n\treturn cur\n}\n\nfunc mergeDocs(doc, patch *partialDoc, mergeMerge bool) {\n\tfor k, v := range *patch {\n\t\tif v == nil {\n\t\t\tif mergeMerge {\n\t\t\t\t(*doc)[k] = nil\n\t\t\t} else {\n\t\t\t\tdelete(*doc, k)\n\t\t\t}\n\t\t} else {\n\t\t\tcur, ok := (*doc)[k]\n\n\t\t\tif !ok || cur == nil {\n\t\t\t\tpruneNulls(v)\n\t\t\t\t(*doc)[k] = v\n\t\t\t} else {\n\t\t\t\t(*doc)[k] = merge(cur, v, mergeMerge)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc pruneNulls(n *lazyNode) {\n\tsub, err := n.intoDoc()\n\n\tif err == nil {\n\t\tpruneDocNulls(sub)\n\t} else {\n\t\tary, err := n.intoAry()\n\n\t\tif err == nil {\n\t\t\tpruneAryNulls(ary)\n\t\t}\n\t}\n}\n\nfunc pruneDocNulls(doc *partialDoc) *partialDoc {\n\tfor k, v := range *doc {\n\t\tif v == nil {\n\t\t\tdelete(*doc, k)\n\t\t} else {\n\t\t\tpruneNulls(v)\n\t\t}\n\t}\n\n\treturn doc\n}\n\nfunc pruneAryNulls(ary *partialArray) *partialArray {\n\tnewAry := []*lazyNode{}\n\n\tfor _, v := range *ary {\n\t\tif v != nil {\n\t\t\tpruneNulls(v)\n\t\t\tnewAry = append(newAry, v)\n\t\t}\n\t}\n\n\t*ary = newAry\n\n\treturn ary\n}\n\nvar errBadJSONDoc = fmt.Errorf(\"Invalid JSON Document\")\nvar errBadJSONPatch = fmt.Errorf(\"Invalid JSON Patch\")\nvar errBadMergeTypes = fmt.Errorf(\"Mismatched JSON Documents\")\n\n\/\/ MergeMergePatches merges two merge patches together, such that\n\/\/ applying this resulting merged merge patch to a document yields the same\n\/\/ as merging each merge patch to the document in succession.\nfunc MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {\n\treturn doMergePatch(patch1Data, patch2Data, true)\n}\n\n\/\/ MergePatch merges the patchData into the docData.\nfunc MergePatch(docData, patchData []byte) ([]byte, error) {\n\treturn doMergePatch(docData, patchData, false)\n}\n\nfunc doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {\n\tdoc := &partialDoc{}\n\n\tdocErr := json.Unmarshal(docData, doc)\n\n\tpatch := &partialDoc{}\n\n\tpatchErr := json.Unmarshal(patchData, patch)\n\n\tif _, ok := docErr.(*json.SyntaxError); ok {\n\t\treturn nil, errBadJSONDoc\n\t}\n\n\tif _, ok := patchErr.(*json.SyntaxError); ok {\n\t\treturn nil, errBadJSONPatch\n\t}\n\n\tif docErr == nil && *doc == nil {\n\t\treturn nil, errBadJSONDoc\n\t}\n\n\tif patchErr == nil && *patch == nil {\n\t\treturn nil, errBadJSONPatch\n\t}\n\n\tif docErr != nil || patchErr != nil {\n\t\t\/\/ Not an error, just not a doc, so we turn straight into the patch\n\t\tif patchErr == nil {\n\t\t\tif mergeMerge {\n\t\t\t\tdoc = patch\n\t\t\t} else {\n\t\t\t\tdoc = pruneDocNulls(patch)\n\t\t\t}\n\t\t} else {\n\t\t\tpatchAry := &partialArray{}\n\t\t\tpatchErr = json.Unmarshal(patchData, patchAry)\n\n\t\t\tif patchErr != nil {\n\t\t\t\treturn nil, errBadJSONPatch\n\t\t\t}\n\n\t\t\tpruneAryNulls(patchAry)\n\n\t\t\tout, patchErr := json.Marshal(patchAry)\n\n\t\t\tif patchErr != nil {\n\t\t\t\treturn nil, errBadJSONPatch\n\t\t\t}\n\n\t\t\treturn out, nil\n\t\t}\n\t} else {\n\t\tmergeDocs(doc, patch, mergeMerge)\n\t}\n\n\treturn json.Marshal(doc)\n}\n\n\/\/ resemblesJSONArray indicates whether the byte-slice \"appears\" to be\n\/\/ a JSON array or not.\n\/\/ False-positives are possible, as this function does not check the internal\n\/\/ structure of the array. It only checks that the outer syntax is present and\n\/\/ correct.\nfunc resemblesJSONArray(input []byte) bool {\n\tinput = bytes.TrimSpace(input)\n\n\thasPrefix := bytes.HasPrefix(input, []byte(\"[\"))\n\thasSuffix := bytes.HasSuffix(input, []byte(\"]\"))\n\n\treturn hasPrefix && hasSuffix\n}\n\n\/\/ CreateMergePatch will return a merge patch document capable of converting\n\/\/ the original document(s) to the modified document(s).\n\/\/ The parameters can be bytes of either two JSON Documents, or two arrays of\n\/\/ JSON documents.\n\/\/ The merge patch returned follows the specification defined at http:\/\/tools.ietf.org\/html\/draft-ietf-appsawg-json-merge-patch-07\nfunc CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {\n\toriginalResemblesArray := resemblesJSONArray(originalJSON)\n\tmodifiedResemblesArray := resemblesJSONArray(modifiedJSON)\n\n\tif originalResemblesArray && modifiedResemblesArray {\n\t\treturn createArrayMergePatch(originalJSON, modifiedJSON)\n\t}\n\n\tif !originalResemblesArray && !modifiedResemblesArray {\n\t\treturn createObjectMergePatch(originalJSON, modifiedJSON)\n\t}\n\n\treturn nil, errBadMergeTypes\n}\n\n\/\/ createObjectMergePatch will return a merge-patch document capable of\n\/\/ converting the original document to the modified document.\nfunc createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {\n\toriginalDoc := map[string]interface{}{}\n\tmodifiedDoc := map[string]interface{}{}\n\n\terr := json.Unmarshal(originalJSON, &originalDoc)\n\tif err != nil {\n\t\treturn nil, errBadJSONDoc\n\t}\n\n\terr = json.Unmarshal(modifiedJSON, &modifiedDoc)\n\tif err != nil {\n\t\treturn nil, errBadJSONDoc\n\t}\n\n\tdest, err := getDiff(originalDoc, modifiedDoc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn json.Marshal(dest)\n}\n\n\/\/ createArrayMergePatch will return an array of merge-patch documents capable\n\/\/ of converting the original document to the modified document for each\n\/\/ pair of JSON documents provided in the arrays.\n\/\/ Arrays of mismatched sizes will result in an error.\nfunc createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {\n\toriginalDocs := []json.RawMessage{}\n\tmodifiedDocs := []json.RawMessage{}\n\n\terr := json.Unmarshal(originalJSON, &originalDocs)\n\tif err != nil {\n\t\treturn nil, errBadJSONDoc\n\t}\n\n\terr = json.Unmarshal(modifiedJSON, &modifiedDocs)\n\tif err != nil {\n\t\treturn nil, errBadJSONDoc\n\t}\n\n\ttotal := len(originalDocs)\n\tif len(modifiedDocs) != total {\n\t\treturn nil, errBadJSONDoc\n\t}\n\n\tresult := []json.RawMessage{}\n\tfor i := 0; i < len(originalDocs); i++ {\n\t\toriginal := originalDocs[i]\n\t\tmodified := modifiedDocs[i]\n\n\t\tpatch, err := createObjectMergePatch(original, modified)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresult = append(result, json.RawMessage(patch))\n\t}\n\n\treturn json.Marshal(result)\n}\n\n\/\/ Returns true if the array matches (must be json types).\n\/\/ As is idiomatic for go, an empty array is not the same as a nil array.\nfunc matchesArray(a, b []interface{}) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tif (a == nil && b != nil) || (a != nil && b == nil) {\n\t\treturn false\n\t}\n\tfor i := range a {\n\t\tif !matchesValue(a[i], b[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Returns true if the values matches (must be json types)\n\/\/ The types of the values must match, otherwise it will always return false\n\/\/ If two map[string]interface{} are given, all elements must match.\nfunc matchesValue(av, bv interface{}) bool {\n\tif reflect.TypeOf(av) != reflect.TypeOf(bv) {\n\t\treturn false\n\t}\n\tswitch at := av.(type) {\n\tcase string:\n\t\tbt := bv.(string)\n\t\tif bt == at {\n\t\t\treturn true\n\t\t}\n\tcase float64:\n\t\tbt := bv.(float64)\n\t\tif bt == at {\n\t\t\treturn true\n\t\t}\n\tcase bool:\n\t\tbt := bv.(bool)\n\t\tif bt == at {\n\t\t\treturn true\n\t\t}\n\tcase nil:\n\t\t\/\/ Both nil, fine.\n\t\treturn true\n\tcase map[string]interface{}:\n\t\tbt := bv.(map[string]interface{})\n\t\tfor key := range at {\n\t\t\tif !matchesValue(at[key], bt[key]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tfor key := range bt {\n\t\t\tif !matchesValue(at[key], bt[key]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase []interface{}:\n\t\tbt := bv.([]interface{})\n\t\treturn matchesArray(at, bt)\n\t}\n\treturn false\n}\n\n\/\/ getDiff returns the (recursive) difference between a and b as a map[string]interface{}.\nfunc getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {\n\tinto := map[string]interface{}{}\n\tfor key, bv := range b {\n\t\tav, ok := a[key]\n\t\t\/\/ value was added\n\t\tif !ok {\n\t\t\tinto[key] = bv\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ If types have changed, replace completely\n\t\tif reflect.TypeOf(av) != reflect.TypeOf(bv) {\n\t\t\tinto[key] = bv\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Types are the same, compare values\n\t\tswitch at := av.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tbt := bv.(map[string]interface{})\n\t\t\tdst := make(map[string]interface{}, len(bt))\n\t\t\tdst, err := getDiff(at, bt)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif len(dst) > 0 {\n\t\t\t\tinto[key] = dst\n\t\t\t}\n\t\tcase string, float64, bool:\n\t\t\tif !matchesValue(av, bv) {\n\t\t\t\tinto[key] = bv\n\t\t\t}\n\t\tcase []interface{}:\n\t\t\tbt := bv.([]interface{})\n\t\t\tif !matchesArray(at, bt) {\n\t\t\t\tinto[key] = bv\n\t\t\t}\n\t\tcase nil:\n\t\t\tswitch bv.(type) {\n\t\t\tcase nil:\n\t\t\t\t\/\/ Both nil, fine.\n\t\t\tdefault:\n\t\t\t\tinto[key] = bv\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"Unknown type:%T in key %s\", av, key))\n\t\t}\n\t}\n\t\/\/ Now add all deleted values as nil\n\tfor key := range a {\n\t\t_, found := b[key]\n\t\tif !found {\n\t\t\tinto[key] = nil\n\t\t}\n\t}\n\treturn into, nil\n}\n<commit_msg>add comments to CreateMergePatch<commit_after>package jsonpatch\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {\n\tcurDoc, err := cur.intoDoc()\n\n\tif err != nil {\n\t\tpruneNulls(patch)\n\t\treturn patch\n\t}\n\n\tpatchDoc, err := patch.intoDoc()\n\n\tif err != nil {\n\t\treturn patch\n\t}\n\n\tmergeDocs(curDoc, patchDoc, mergeMerge)\n\n\treturn cur\n}\n\nfunc mergeDocs(doc, patch *partialDoc, mergeMerge bool) {\n\tfor k, v := range *patch {\n\t\tif v == nil {\n\t\t\tif mergeMerge {\n\t\t\t\t(*doc)[k] = nil\n\t\t\t} else {\n\t\t\t\tdelete(*doc, k)\n\t\t\t}\n\t\t} else {\n\t\t\tcur, ok := (*doc)[k]\n\n\t\t\tif !ok || cur == nil {\n\t\t\t\tpruneNulls(v)\n\t\t\t\t(*doc)[k] = v\n\t\t\t} else {\n\t\t\t\t(*doc)[k] = merge(cur, v, mergeMerge)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc pruneNulls(n *lazyNode) {\n\tsub, err := n.intoDoc()\n\n\tif err == nil {\n\t\tpruneDocNulls(sub)\n\t} else {\n\t\tary, err := n.intoAry()\n\n\t\tif err == nil {\n\t\t\tpruneAryNulls(ary)\n\t\t}\n\t}\n}\n\nfunc pruneDocNulls(doc *partialDoc) *partialDoc {\n\tfor k, v := range *doc {\n\t\tif v == nil {\n\t\t\tdelete(*doc, k)\n\t\t} else {\n\t\t\tpruneNulls(v)\n\t\t}\n\t}\n\n\treturn doc\n}\n\nfunc pruneAryNulls(ary *partialArray) *partialArray {\n\tnewAry := []*lazyNode{}\n\n\tfor _, v := range *ary {\n\t\tif v != nil {\n\t\t\tpruneNulls(v)\n\t\t\tnewAry = append(newAry, v)\n\t\t}\n\t}\n\n\t*ary = newAry\n\n\treturn ary\n}\n\nvar errBadJSONDoc = fmt.Errorf(\"Invalid JSON Document\")\nvar errBadJSONPatch = fmt.Errorf(\"Invalid JSON Patch\")\nvar errBadMergeTypes = fmt.Errorf(\"Mismatched JSON Documents\")\n\n\/\/ MergeMergePatches merges two merge patches together, such that\n\/\/ applying this resulting merged merge patch to a document yields the same\n\/\/ as merging each merge patch to the document in succession.\nfunc MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {\n\treturn doMergePatch(patch1Data, patch2Data, true)\n}\n\n\/\/ MergePatch merges the patchData into the docData.\nfunc MergePatch(docData, patchData []byte) ([]byte, error) {\n\treturn doMergePatch(docData, patchData, false)\n}\n\nfunc doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {\n\tdoc := &partialDoc{}\n\n\tdocErr := json.Unmarshal(docData, doc)\n\n\tpatch := &partialDoc{}\n\n\tpatchErr := json.Unmarshal(patchData, patch)\n\n\tif _, ok := docErr.(*json.SyntaxError); ok {\n\t\treturn nil, errBadJSONDoc\n\t}\n\n\tif _, ok := patchErr.(*json.SyntaxError); ok {\n\t\treturn nil, errBadJSONPatch\n\t}\n\n\tif docErr == nil && *doc == nil {\n\t\treturn nil, errBadJSONDoc\n\t}\n\n\tif patchErr == nil && *patch == nil {\n\t\treturn nil, errBadJSONPatch\n\t}\n\n\tif docErr != nil || patchErr != nil {\n\t\t\/\/ Not an error, just not a doc, so we turn straight into the patch\n\t\tif patchErr == nil {\n\t\t\tif mergeMerge {\n\t\t\t\tdoc = patch\n\t\t\t} else {\n\t\t\t\tdoc = pruneDocNulls(patch)\n\t\t\t}\n\t\t} else {\n\t\t\tpatchAry := &partialArray{}\n\t\t\tpatchErr = json.Unmarshal(patchData, patchAry)\n\n\t\t\tif patchErr != nil {\n\t\t\t\treturn nil, errBadJSONPatch\n\t\t\t}\n\n\t\t\tpruneAryNulls(patchAry)\n\n\t\t\tout, patchErr := json.Marshal(patchAry)\n\n\t\t\tif patchErr != nil {\n\t\t\t\treturn nil, errBadJSONPatch\n\t\t\t}\n\n\t\t\treturn out, nil\n\t\t}\n\t} else {\n\t\tmergeDocs(doc, patch, mergeMerge)\n\t}\n\n\treturn json.Marshal(doc)\n}\n\n\/\/ resemblesJSONArray indicates whether the byte-slice \"appears\" to be\n\/\/ a JSON array or not.\n\/\/ False-positives are possible, as this function does not check the internal\n\/\/ structure of the array. It only checks that the outer syntax is present and\n\/\/ correct.\nfunc resemblesJSONArray(input []byte) bool {\n\tinput = bytes.TrimSpace(input)\n\n\thasPrefix := bytes.HasPrefix(input, []byte(\"[\"))\n\thasSuffix := bytes.HasSuffix(input, []byte(\"]\"))\n\n\treturn hasPrefix && hasSuffix\n}\n\n\/\/ CreateMergePatch will return a merge patch document capable of converting\n\/\/ the original document(s) to the modified document(s).\n\/\/ The parameters can be bytes of either two JSON Documents, or two arrays of\n\/\/ JSON documents.\n\/\/ The merge patch returned follows the specification defined at http:\/\/tools.ietf.org\/html\/draft-ietf-appsawg-json-merge-patch-07\nfunc CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {\n\toriginalResemblesArray := resemblesJSONArray(originalJSON)\n\tmodifiedResemblesArray := resemblesJSONArray(modifiedJSON)\n\n\t\/\/ Do both byte-slices seem like JSON arrays?\n\tif originalResemblesArray && modifiedResemblesArray {\n\t\treturn createArrayMergePatch(originalJSON, modifiedJSON)\n\t}\n\n\t\/\/ Are both byte-slices are not arrays? Then they are likely JSON objects...\n\tif !originalResemblesArray && !modifiedResemblesArray {\n\t\treturn createObjectMergePatch(originalJSON, modifiedJSON)\n\t}\n\n\t\/\/ None of the above? Then return an error because of mismatched types.\n\treturn nil, errBadMergeTypes\n}\n\n\/\/ createObjectMergePatch will return a merge-patch document capable of\n\/\/ converting the original document to the modified document.\nfunc createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {\n\toriginalDoc := map[string]interface{}{}\n\tmodifiedDoc := map[string]interface{}{}\n\n\terr := json.Unmarshal(originalJSON, &originalDoc)\n\tif err != nil {\n\t\treturn nil, errBadJSONDoc\n\t}\n\n\terr = json.Unmarshal(modifiedJSON, &modifiedDoc)\n\tif err != nil {\n\t\treturn nil, errBadJSONDoc\n\t}\n\n\tdest, err := getDiff(originalDoc, modifiedDoc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn json.Marshal(dest)\n}\n\n\/\/ createArrayMergePatch will return an array of merge-patch documents capable\n\/\/ of converting the original document to the modified document for each\n\/\/ pair of JSON documents provided in the arrays.\n\/\/ Arrays of mismatched sizes will result in an error.\nfunc createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {\n\toriginalDocs := []json.RawMessage{}\n\tmodifiedDocs := []json.RawMessage{}\n\n\terr := json.Unmarshal(originalJSON, &originalDocs)\n\tif err != nil {\n\t\treturn nil, errBadJSONDoc\n\t}\n\n\terr = json.Unmarshal(modifiedJSON, &modifiedDocs)\n\tif err != nil {\n\t\treturn nil, errBadJSONDoc\n\t}\n\n\ttotal := len(originalDocs)\n\tif len(modifiedDocs) != total {\n\t\treturn nil, errBadJSONDoc\n\t}\n\n\tresult := []json.RawMessage{}\n\tfor i := 0; i < len(originalDocs); i++ {\n\t\toriginal := originalDocs[i]\n\t\tmodified := modifiedDocs[i]\n\n\t\tpatch, err := createObjectMergePatch(original, modified)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresult = append(result, json.RawMessage(patch))\n\t}\n\n\treturn json.Marshal(result)\n}\n\n\/\/ Returns true if the array matches (must be json types).\n\/\/ As is idiomatic for go, an empty array is not the same as a nil array.\nfunc matchesArray(a, b []interface{}) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tif (a == nil && b != nil) || (a != nil && b == nil) {\n\t\treturn false\n\t}\n\tfor i := range a {\n\t\tif !matchesValue(a[i], b[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Returns true if the values matches (must be json types)\n\/\/ The types of the values must match, otherwise it will always return false\n\/\/ If two map[string]interface{} are given, all elements must match.\nfunc matchesValue(av, bv interface{}) bool {\n\tif reflect.TypeOf(av) != reflect.TypeOf(bv) {\n\t\treturn false\n\t}\n\tswitch at := av.(type) {\n\tcase string:\n\t\tbt := bv.(string)\n\t\tif bt == at {\n\t\t\treturn true\n\t\t}\n\tcase float64:\n\t\tbt := bv.(float64)\n\t\tif bt == at {\n\t\t\treturn true\n\t\t}\n\tcase bool:\n\t\tbt := bv.(bool)\n\t\tif bt == at {\n\t\t\treturn true\n\t\t}\n\tcase nil:\n\t\t\/\/ Both nil, fine.\n\t\treturn true\n\tcase map[string]interface{}:\n\t\tbt := bv.(map[string]interface{})\n\t\tfor key := range at {\n\t\t\tif !matchesValue(at[key], bt[key]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tfor key := range bt {\n\t\t\tif !matchesValue(at[key], bt[key]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase []interface{}:\n\t\tbt := bv.([]interface{})\n\t\treturn matchesArray(at, bt)\n\t}\n\treturn false\n}\n\n\/\/ getDiff returns the (recursive) difference between a and b as a map[string]interface{}.\nfunc getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {\n\tinto := map[string]interface{}{}\n\tfor key, bv := range b {\n\t\tav, ok := a[key]\n\t\t\/\/ value was added\n\t\tif !ok {\n\t\t\tinto[key] = bv\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ If types have changed, replace completely\n\t\tif reflect.TypeOf(av) != reflect.TypeOf(bv) {\n\t\t\tinto[key] = bv\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Types are the same, compare values\n\t\tswitch at := av.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tbt := bv.(map[string]interface{})\n\t\t\tdst := make(map[string]interface{}, len(bt))\n\t\t\tdst, err := getDiff(at, bt)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif len(dst) > 0 {\n\t\t\t\tinto[key] = dst\n\t\t\t}\n\t\tcase string, float64, bool:\n\t\t\tif !matchesValue(av, bv) {\n\t\t\t\tinto[key] = bv\n\t\t\t}\n\t\tcase []interface{}:\n\t\t\tbt := bv.([]interface{})\n\t\t\tif !matchesArray(at, bt) {\n\t\t\t\tinto[key] = bv\n\t\t\t}\n\t\tcase nil:\n\t\t\tswitch bv.(type) {\n\t\t\tcase nil:\n\t\t\t\t\/\/ Both nil, fine.\n\t\t\tdefault:\n\t\t\t\tinto[key] = bv\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"Unknown type:%T in key %s\", av, key))\n\t\t}\n\t}\n\t\/\/ Now add all deleted values as nil\n\tfor key := range a {\n\t\t_, found := b[key]\n\t\tif !found {\n\t\t\tinto[key] = nil\n\t\t}\n\t}\n\treturn into, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage api\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strings\"\n\n\t\"github.com\/tsuru\/config\"\n\t\"github.com\/tsuru\/tsuru\/app\"\n\t\"github.com\/tsuru\/tsuru\/db\"\n\t\"github.com\/tsuru\/tsuru\/db\/dbtest\"\n\t\"github.com\/tsuru\/tsuru\/io\"\n\t\"github.com\/tsuru\/tsuru\/provision\/provisiontest\"\n\t\"gopkg.in\/check.v1\"\n)\n\ntype PlatformSuite struct{}\n\nvar _ = check.Suite(&PlatformSuite{})\n\nfunc (s *PlatformSuite) SetUpTest(c *check.C) {\n\tconfig.Set(\"database:url\", \"127.0.0.1:27017\")\n\tconfig.Set(\"database:name\", \"tsuru_api_platform_test\")\n\tconn, err := db.Conn()\n\tc.Assert(err, check.IsNil)\n\tdefer conn.Close()\n\tdbtest.ClearAllCollections(conn.Apps().Database)\n}\n\nfunc (p *PlatformSuite) TestPlatformAdd(c *check.C) {\n\tprovisioner := provisiontest.ExtensibleFakeProvisioner{\n\t\tFakeProvisioner: provisiontest.NewFakeProvisioner(),\n\t}\n\toldProvisioner := app.Provisioner\n\tapp.Provisioner = &provisioner\n\tdefer func() {\n\t\tapp.Provisioner = oldProvisioner\n\t}()\n\tdockerfile_url := \"http:\/\/localhost\/Dockerfile\"\n\tbody := fmt.Sprintf(\"name=%s&dockerfile=%s\", \"teste\", dockerfile_url)\n\trequest, _ := http.NewRequest(\"POST\", \"\/platforms\/add\", strings.NewReader(body))\n\trequest.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\trecorder := httptest.NewRecorder()\n\tresult := platformAdd(recorder, request, nil)\n\tc.Assert(result, check.IsNil)\n\tc.Assert(recorder.Body.String(), check.Equals, \"\\nOK!\\n\")\n}\n\nfunc (p *PlatformSuite) TestPlatformUpdate(c *check.C) {\n\tprovisioner := provisiontest.ExtensibleFakeProvisioner{\n\t\tFakeProvisioner: provisiontest.NewFakeProvisioner(),\n\t}\n\toldProvisioner := app.Provisioner\n\tapp.Provisioner = &provisioner\n\tdefer func() {\n\t\tapp.Provisioner = oldProvisioner\n\t}()\n\terr := app.PlatformAdd(\"wat\", nil, nil)\n\tc.Assert(err, check.IsNil)\n\tdockerfile_url := \"http:\/\/localhost\/Dockerfile\"\n\tbody := fmt.Sprintf(\"dockerfile=%s\", dockerfile_url)\n\trequest, _ := http.NewRequest(\"PUT\", \"\/platforms\/wat?:name=wat\", strings.NewReader(body))\n\trequest.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\trecorder := httptest.NewRecorder()\n\tresult := platformUpdate(recorder, request, nil)\n\tc.Assert(result, check.IsNil)\n\tb, err := ioutil.ReadAll(recorder.Body)\n\tc.Assert(err, check.IsNil)\n\tvar msg io.SimpleJsonMessage\n\tjson.Unmarshal(b, &msg)\n\tc.Assert(errors.New(msg.Error), check.ErrorMatches, \"\")\n}\n\nfunc (p *PlatformSuite) TestPlatformUpdateOnlyDisableTrue(c *check.C) {\n\tprovisioner := provisiontest.ExtensibleFakeProvisioner{\n\t\tFakeProvisioner: provisiontest.NewFakeProvisioner(),\n\t}\n\toldProvisioner := app.Provisioner\n\tapp.Provisioner = &provisioner\n\tdefer func() {\n\t\tapp.Provisioner = oldProvisioner\n\t}()\n\terr := app.PlatformAdd(\"wat\", nil, nil)\n\tc.Assert(err, check.IsNil)\n\tdockerfile_url := \"\"\n\tbody := fmt.Sprintf(\"dockerfile=%s\", dockerfile_url)\n\trequest, _ := http.NewRequest(\"PUT\", \"\/platforms\/wat?:name=wat&disabled=true\", strings.NewReader(body))\n\trequest.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\trecorder := httptest.NewRecorder()\n\tresult := platformUpdate(recorder, request, nil)\n\tc.Assert(result, check.IsNil)\n\tb, err := ioutil.ReadAll(recorder.Body)\n\tc.Assert(err, check.IsNil)\n\tvar msg io.SimpleJsonMessage\n\tjson.Unmarshal(b, &msg)\n\tc.Assert(errors.New(msg.Error), check.ErrorMatches, \"\")\n}\n\nfunc (p *PlatformSuite) TestPlatformUpdateDisableTrueAndDockerfile(c *check.C) {\n\tprovisioner := provisiontest.ExtensibleFakeProvisioner{\n\t\tFakeProvisioner: provisiontest.NewFakeProvisioner(),\n\t}\n\toldProvisioner := app.Provisioner\n\tapp.Provisioner = &provisioner\n\tdefer func() {\n\t\tapp.Provisioner = oldProvisioner\n\t}()\n\terr := app.PlatformAdd(\"wat\", nil, nil)\n\tc.Assert(err, check.IsNil)\n\tdockerfile_url := \"http:\/\/localhost\/Dockerfile\"\n\tbody := fmt.Sprintf(\"dockerfile=%s\", dockerfile_url)\n\trequest, _ := http.NewRequest(\"PUT\", \"\/platforms\/wat?:name=wat&disabled=true\", strings.NewReader(body))\n\trequest.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\trecorder := httptest.NewRecorder()\n\tresult := platformUpdate(recorder, request, nil)\n\tc.Assert(result, check.IsNil)\n\tb, err := ioutil.ReadAll(recorder.Body)\n\tc.Assert(err, check.IsNil)\n\tvar msg io.SimpleJsonMessage\n\tjson.Unmarshal(b, &msg)\n\tc.Assert(errors.New(msg.Error), check.ErrorMatches, \"\")\n}\n\nfunc (p *PlatformSuite) TestPlatformUpdateOnlyDisableFalse(c *check.C) {\n\tprovisioner := provisiontest.ExtensibleFakeProvisioner{\n\t\tFakeProvisioner: provisiontest.NewFakeProvisioner(),\n\t}\n\toldProvisioner := app.Provisioner\n\tapp.Provisioner = &provisioner\n\tdefer func() {\n\t\tapp.Provisioner = oldProvisioner\n\t}()\n\terr := app.PlatformAdd(\"wat\", nil, nil)\n\tc.Assert(err, check.IsNil)\n\tdockerfile_url := \"\"\n\tbody := fmt.Sprintf(\"dockerfile=%s\", dockerfile_url)\n\trequest, _ := http.NewRequest(\"PUT\", \"\/platforms\/wat?:name=wat&disabled=false\", strings.NewReader(body))\n\trequest.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\trecorder := httptest.NewRecorder()\n\tresult := platformUpdate(recorder, request, nil)\n\tc.Assert(result, check.IsNil)\n\tb, err := ioutil.ReadAll(recorder.Body)\n\tc.Assert(err, check.IsNil)\n\tvar msg io.SimpleJsonMessage\n\tjson.Unmarshal(b, &msg)\n\tc.Assert(errors.New(msg.Error), check.ErrorMatches, \"\")\n}\n\nfunc (p *PlatformSuite) TestPlatformUpdateDisableFalseAndDockerfile(c *check.C) {\n\tprovisioner := provisiontest.ExtensibleFakeProvisioner{\n\t\tFakeProvisioner: provisiontest.NewFakeProvisioner(),\n\t}\n\toldProvisioner := app.Provisioner\n\tapp.Provisioner = &provisioner\n\tdefer func() {\n\t\tapp.Provisioner = oldProvisioner\n\t}()\n\terr := app.PlatformAdd(\"wat\", nil, nil)\n\tc.Assert(err, check.IsNil)\n\tdockerfile_url := \"http:\/\/localhost\/Dockerfile\"\n\tbody := fmt.Sprintf(\"dockerfile=%s\", dockerfile_url)\n\trequest, _ := http.NewRequest(\"PUT\", \"\/platforms\/wat?:name=wat&disabled=false\", strings.NewReader(body))\n\trequest.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\trecorder := httptest.NewRecorder()\n\tresult := platformUpdate(recorder, request, nil)\n\tc.Assert(result, check.IsNil)\n\tb, err := ioutil.ReadAll(recorder.Body)\n\tc.Assert(err, check.IsNil)\n\tvar msg io.SimpleJsonMessage\n\tjson.Unmarshal(b, &msg)\n\tc.Assert(errors.New(msg.Error), check.ErrorMatches, \"\")\n}\n\nfunc (p *PlatformSuite) TestPlatformRemove(c *check.C) {\n\tprovisioner := provisiontest.ExtensibleFakeProvisioner{\n\t\tFakeProvisioner: provisiontest.NewFakeProvisioner(),\n\t}\n\toldProvisioner := app.Provisioner\n\tapp.Provisioner = &provisioner\n\tdefer func() {\n\t\tapp.Provisioner = oldProvisioner\n\t}()\n\terr := app.PlatformAdd(\"test\", nil, nil)\n\tc.Assert(err, check.IsNil)\n\trequest, _ := http.NewRequest(\"DELETE\", \"\/platforms\/test?:name=test\", nil)\n\trecorder := httptest.NewRecorder()\n\terr = platformRemove(recorder, request, nil)\n\tc.Assert(err, check.IsNil)\n}\n<commit_msg>api: fix build<commit_after>\/\/ Copyright 2015 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage api\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strings\"\n\n\t\"github.com\/tsuru\/config\"\n\t\"github.com\/tsuru\/tsuru\/app\"\n\t\"github.com\/tsuru\/tsuru\/db\"\n\t\"github.com\/tsuru\/tsuru\/db\/dbtest\"\n\t\"github.com\/tsuru\/tsuru\/io\"\n\t\"github.com\/tsuru\/tsuru\/provision\"\n\t\"github.com\/tsuru\/tsuru\/provision\/provisiontest\"\n\t\"gopkg.in\/check.v1\"\n)\n\ntype PlatformSuite struct{}\n\nvar _ = check.Suite(&PlatformSuite{})\n\nfunc (s *PlatformSuite) SetUpTest(c *check.C) {\n\tconfig.Set(\"database:url\", \"127.0.0.1:27017\")\n\tconfig.Set(\"database:name\", \"tsuru_api_platform_test\")\n\tconn, err := db.Conn()\n\tc.Assert(err, check.IsNil)\n\tdefer conn.Close()\n\tdbtest.ClearAllCollections(conn.Apps().Database)\n}\n\nfunc (p *PlatformSuite) TestPlatformAdd(c *check.C) {\n\tprovisioner := provisiontest.ExtensibleFakeProvisioner{\n\t\tFakeProvisioner: provisiontest.NewFakeProvisioner(),\n\t}\n\toldProvisioner := app.Provisioner\n\tapp.Provisioner = &provisioner\n\tdefer func() {\n\t\tapp.Provisioner = oldProvisioner\n\t}()\n\tdockerfile_url := \"http:\/\/localhost\/Dockerfile\"\n\tbody := fmt.Sprintf(\"name=%s&dockerfile=%s\", \"teste\", dockerfile_url)\n\trequest, _ := http.NewRequest(\"POST\", \"\/platforms\/add\", strings.NewReader(body))\n\trequest.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\trecorder := httptest.NewRecorder()\n\tresult := platformAdd(recorder, request, nil)\n\tc.Assert(result, check.IsNil)\n\tc.Assert(recorder.Body.String(), check.Equals, \"\\nOK!\\n\")\n}\n\nfunc (p *PlatformSuite) TestPlatformUpdate(c *check.C) {\n\tprovisioner := provisiontest.ExtensibleFakeProvisioner{\n\t\tFakeProvisioner: provisiontest.NewFakeProvisioner(),\n\t}\n\toldProvisioner := app.Provisioner\n\tapp.Provisioner = &provisioner\n\tdefer func() {\n\t\tapp.Provisioner = oldProvisioner\n\t}()\n\terr := app.PlatformAdd(provision.PlatformOptions{Name: \"wat\", Args: nil, Output: nil})\n\tc.Assert(err, check.IsNil)\n\tdockerfile_url := \"http:\/\/localhost\/Dockerfile\"\n\tbody := fmt.Sprintf(\"dockerfile=%s\", dockerfile_url)\n\trequest, _ := http.NewRequest(\"PUT\", \"\/platforms\/wat?:name=wat\", strings.NewReader(body))\n\trequest.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\trecorder := httptest.NewRecorder()\n\tresult := platformUpdate(recorder, request, nil)\n\tc.Assert(result, check.IsNil)\n\tb, err := ioutil.ReadAll(recorder.Body)\n\tc.Assert(err, check.IsNil)\n\tvar msg io.SimpleJsonMessage\n\tjson.Unmarshal(b, &msg)\n\tc.Assert(errors.New(msg.Error), check.ErrorMatches, \"\")\n}\n\nfunc (p *PlatformSuite) TestPlatformUpdateOnlyDisableTrue(c *check.C) {\n\tprovisioner := provisiontest.ExtensibleFakeProvisioner{\n\t\tFakeProvisioner: provisiontest.NewFakeProvisioner(),\n\t}\n\toldProvisioner := app.Provisioner\n\tapp.Provisioner = &provisioner\n\tdefer func() {\n\t\tapp.Provisioner = oldProvisioner\n\t}()\n\terr := app.PlatformAdd(provision.PlatformOptions{Name: \"wat\", Args: nil, Output: nil})\n\tc.Assert(err, check.IsNil)\n\tdockerfile_url := \"\"\n\tbody := fmt.Sprintf(\"dockerfile=%s\", dockerfile_url)\n\trequest, _ := http.NewRequest(\"PUT\", \"\/platforms\/wat?:name=wat&disabled=true\", strings.NewReader(body))\n\trequest.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\trecorder := httptest.NewRecorder()\n\tresult := platformUpdate(recorder, request, nil)\n\tc.Assert(result, check.IsNil)\n\tb, err := ioutil.ReadAll(recorder.Body)\n\tc.Assert(err, check.IsNil)\n\tvar msg io.SimpleJsonMessage\n\tjson.Unmarshal(b, &msg)\n\tc.Assert(errors.New(msg.Error), check.ErrorMatches, \"\")\n}\n\nfunc (p *PlatformSuite) TestPlatformUpdateDisableTrueAndDockerfile(c *check.C) {\n\tprovisioner := provisiontest.ExtensibleFakeProvisioner{\n\t\tFakeProvisioner: provisiontest.NewFakeProvisioner(),\n\t}\n\toldProvisioner := app.Provisioner\n\tapp.Provisioner = &provisioner\n\tdefer func() {\n\t\tapp.Provisioner = oldProvisioner\n\t}()\n\terr := app.PlatformAdd(provision.PlatformOptions{Name: \"wat\", Args: nil, Output: nil})\n\tc.Assert(err, check.IsNil)\n\tdockerfile_url := \"http:\/\/localhost\/Dockerfile\"\n\tbody := fmt.Sprintf(\"dockerfile=%s\", dockerfile_url)\n\trequest, _ := http.NewRequest(\"PUT\", \"\/platforms\/wat?:name=wat&disabled=true\", strings.NewReader(body))\n\trequest.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\trecorder := httptest.NewRecorder()\n\tresult := platformUpdate(recorder, request, nil)\n\tc.Assert(result, check.IsNil)\n\tb, err := ioutil.ReadAll(recorder.Body)\n\tc.Assert(err, check.IsNil)\n\tvar msg io.SimpleJsonMessage\n\tjson.Unmarshal(b, &msg)\n\tc.Assert(errors.New(msg.Error), check.ErrorMatches, \"\")\n}\n\nfunc (p *PlatformSuite) TestPlatformUpdateOnlyDisableFalse(c *check.C) {\n\tprovisioner := provisiontest.ExtensibleFakeProvisioner{\n\t\tFakeProvisioner: provisiontest.NewFakeProvisioner(),\n\t}\n\toldProvisioner := app.Provisioner\n\tapp.Provisioner = &provisioner\n\tdefer func() {\n\t\tapp.Provisioner = oldProvisioner\n\t}()\n\terr := app.PlatformAdd(provision.PlatformOptions{Name: \"wat\", Args: nil, Output: nil})\n\tc.Assert(err, check.IsNil)\n\tdockerfile_url := \"\"\n\tbody := fmt.Sprintf(\"dockerfile=%s\", dockerfile_url)\n\trequest, _ := http.NewRequest(\"PUT\", \"\/platforms\/wat?:name=wat&disabled=false\", strings.NewReader(body))\n\trequest.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\trecorder := httptest.NewRecorder()\n\tresult := platformUpdate(recorder, request, nil)\n\tc.Assert(result, check.IsNil)\n\tb, err := ioutil.ReadAll(recorder.Body)\n\tc.Assert(err, check.IsNil)\n\tvar msg io.SimpleJsonMessage\n\tjson.Unmarshal(b, &msg)\n\tc.Assert(errors.New(msg.Error), check.ErrorMatches, \"\")\n}\n\nfunc (p *PlatformSuite) TestPlatformUpdateDisableFalseAndDockerfile(c *check.C) {\n\tprovisioner := provisiontest.ExtensibleFakeProvisioner{\n\t\tFakeProvisioner: provisiontest.NewFakeProvisioner(),\n\t}\n\toldProvisioner := app.Provisioner\n\tapp.Provisioner = &provisioner\n\tdefer func() {\n\t\tapp.Provisioner = oldProvisioner\n\t}()\n\terr := app.PlatformAdd(provision.PlatformOptions{Name: \"wat\", Args: nil, Output: nil})\n\tc.Assert(err, check.IsNil)\n\tdockerfile_url := \"http:\/\/localhost\/Dockerfile\"\n\tbody := fmt.Sprintf(\"dockerfile=%s\", dockerfile_url)\n\trequest, _ := http.NewRequest(\"PUT\", \"\/platforms\/wat?:name=wat&disabled=false\", strings.NewReader(body))\n\trequest.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\trecorder := httptest.NewRecorder()\n\tresult := platformUpdate(recorder, request, nil)\n\tc.Assert(result, check.IsNil)\n\tb, err := ioutil.ReadAll(recorder.Body)\n\tc.Assert(err, check.IsNil)\n\tvar msg io.SimpleJsonMessage\n\tjson.Unmarshal(b, &msg)\n\tc.Assert(errors.New(msg.Error), check.ErrorMatches, \"\")\n}\n\nfunc (p *PlatformSuite) TestPlatformRemove(c *check.C) {\n\tprovisioner := provisiontest.ExtensibleFakeProvisioner{\n\t\tFakeProvisioner: provisiontest.NewFakeProvisioner(),\n\t}\n\toldProvisioner := app.Provisioner\n\tapp.Provisioner = &provisioner\n\tdefer func() {\n\t\tapp.Provisioner = oldProvisioner\n\t}()\n\terr := app.PlatformAdd(provision.PlatformOptions{Name: \"test\", Args: nil, Output: nil})\n\tc.Assert(err, check.IsNil)\n\trequest, _ := http.NewRequest(\"DELETE\", \"\/platforms\/test?:name=test\", nil)\n\trecorder := httptest.NewRecorder()\n\terr = platformRemove(recorder, request, nil)\n\tc.Assert(err, check.IsNil)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2017 The Things Network\n\/\/ Use of this source code is governed by the MIT license that can be found in the LICENSE file.\n\npackage router\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n\t\"sync\"\n\n\t\"github.com\/TheThingsNetwork\/go-utils\/grpc\/restartstream\"\n\t\"github.com\/TheThingsNetwork\/go-utils\/log\"\n\t\"github.com\/TheThingsNetwork\/ttn\/api\"\n\t\"github.com\/TheThingsNetwork\/ttn\/api\/gateway\"\n\t\"github.com\/TheThingsNetwork\/ttn\/utils\"\n\t\"github.com\/golang\/protobuf\/ptypes\/empty\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n)\n\n\/\/ GenericStream is used for sending to and receiving from the router.\ntype GenericStream interface {\n\tUplink(*UplinkMessage)\n\tStatus(*gateway.Status)\n\tDownlink() (<-chan *DownlinkMessage, error)\n\tClose()\n}\n\n\/\/ ClientConfig for router Client\ntype ClientConfig struct {\n\tBackgroundContext context.Context\n\tBufferSize int\n}\n\n\/\/ DefaultClientConfig for router Client\nvar DefaultClientConfig = ClientConfig{\n\tBackgroundContext: context.Background(),\n\tBufferSize: 10,\n}\n\n\/\/ NewClient creates a new Client with the given configuration\nfunc NewClient(config ClientConfig) *Client {\n\tctx, cancel := context.WithCancel(config.BackgroundContext)\n\n\treturn &Client{\n\t\tlog: log.Get(),\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\n\t\tconfig: config,\n\t}\n}\n\n\/\/ Client for router\ntype Client struct {\n\tlog log.Interface\n\tctx context.Context\n\tcancel context.CancelFunc\n\n\tconfig ClientConfig\n\tserverConns []*serverConn\n}\n\n\/\/ AddServer adds a router server\nfunc (c *Client) AddServer(name string, conn *grpc.ClientConn) {\n\tlog := c.log.WithField(\"Router\", name)\n\tlog.Info(\"Adding Router server\")\n\ts := &serverConn{\n\t\tctx: log,\n\t\tname: name,\n\t\tconn: conn,\n\t}\n\tc.serverConns = append(c.serverConns, s)\n}\n\n\/\/ Close the client and all its connections\nfunc (c *Client) Close() {\n\tc.cancel()\n\tfor _, server := range c.serverConns {\n\t\tserver.Close()\n\t}\n}\n\ntype serverConn struct {\n\tctx log.Interface\n\tname string\n\n\tready chan struct{}\n\tconn *grpc.ClientConn\n}\n\nfunc (c *serverConn) Close() {\n\tif c.ready != nil {\n\t\t<-c.ready\n\t}\n\tif c.conn != nil {\n\t\tc.conn.Close()\n\t}\n}\n\ntype gatewayStreams struct {\n\tlog log.Interface\n\tctx context.Context\n\tcancel context.CancelFunc\n\n\tmu sync.RWMutex\n\tuplink map[string]chan *UplinkMessage\n\tstatus map[string]chan *gateway.Status\n\n\tdownlink chan *DownlinkMessage\n}\n\nfunc (s *gatewayStreams) Uplink(msg *UplinkMessage) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\ts.log.WithField(\"Routers\", len(s.uplink)).Debug(\"Sending UplinkMessage to router\")\n\tfor serverName, ch := range s.uplink {\n\t\tselect {\n\t\tcase ch <- msg:\n\t\tdefault:\n\t\t\ts.log.WithField(\"Router\", serverName).Warn(\"UplinkMessage buffer full\")\n\t\t}\n\t}\n}\n\nfunc (s *gatewayStreams) Status(msg *gateway.Status) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\ts.log.WithField(\"Routers\", len(s.status)).Debug(\"Sending Status to router\")\n\tfor serverName, ch := range s.status {\n\t\tselect {\n\t\tcase ch <- msg:\n\t\tdefault:\n\t\t\ts.log.WithField(\"Router\", serverName).Warn(\"GatewayStatus buffer full\")\n\t\t}\n\t}\n}\n\n\/\/ ErrDownlinkInactive is returned by the Downlink func if downlink is inactive\nvar ErrDownlinkInactive = errors.New(\"Downlink stream not active\")\n\nfunc (s *gatewayStreams) Downlink() (<-chan *DownlinkMessage, error) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\tif s.downlink == nil {\n\t\treturn nil, ErrDownlinkInactive\n\t}\n\treturn s.downlink, nil\n}\n\nfunc (s *gatewayStreams) Close() {\n\ts.cancel()\n}\n\n\/\/ NewGatewayStreams returns new streams using the given gateway ID and token\nfunc (c *Client) NewGatewayStreams(id string, token string, downlinkActive bool) GenericStream {\n\tlog := c.log.WithField(\"GatewayID\", id)\n\tctx, cancel := context.WithCancel(c.ctx)\n\tctx = api.ContextWithID(ctx, id)\n\tctx = api.ContextWithToken(ctx, token)\n\ts := &gatewayStreams{\n\t\tlog: log,\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\n\t\tuplink: make(map[string]chan *UplinkMessage),\n\t\tstatus: make(map[string]chan *gateway.Status),\n\t}\n\n\tvar wg utils.WaitGroup\n\n\tvar wgDown sync.WaitGroup\n\tif downlinkActive {\n\t\ts.downlink = make(chan *DownlinkMessage, c.config.BufferSize)\n\t\tdefer func() {\n\t\t\tgo func() {\n\t\t\t\twgDown.Wait()\n\t\t\t\tclose(s.downlink)\n\t\t\t}()\n\t\t}()\n\t}\n\n\t\/\/ Hook up the router servers\n\tfor _, server := range c.serverConns {\n\t\twg.Add(1)\n\t\twgDown.Add(1)\n\t\tgo func(server *serverConn) {\n\t\t\tif server.ready != nil {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase <-server.ready:\n\t\t\t\t}\n\t\t\t}\n\t\t\tif server.conn == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog := log.WithField(\"Router\", server.name)\n\t\t\tcli := NewRouterClient(server.conn)\n\n\t\t\tlogStreamErr := func(streamName string, err error) {\n\t\t\t\tswitch {\n\t\t\t\tcase err == nil:\n\t\t\t\t\tlog.Debugf(\"%s stream closed\", streamName)\n\t\t\t\tcase err == io.EOF:\n\t\t\t\t\tlog.WithError(err).Debugf(\"%s stream ended\", streamName)\n\t\t\t\tcase err == context.Canceled || grpc.Code(err) == codes.Canceled:\n\t\t\t\t\tlog.WithError(err).Debugf(\"%s stream canceled\", streamName)\n\t\t\t\tcase err == context.DeadlineExceeded || grpc.Code(err) == codes.DeadlineExceeded:\n\t\t\t\t\tlog.WithError(err).Debugf(\"%s stream deadline exceeded\", streamName)\n\t\t\t\tcase grpc.ErrorDesc(err) == grpc.ErrClientConnClosing.Error():\n\t\t\t\t\tlog.WithError(err).Debugf(\"%s stream connection closed\", streamName)\n\t\t\t\tdefault:\n\t\t\t\t\tlog.WithError(err).Warnf(\"%s stream closed unexpectedly\", streamName)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Stream channels\n\t\t\tchUplink := make(chan *UplinkMessage, c.config.BufferSize)\n\t\t\tchStatus := make(chan *gateway.Status, c.config.BufferSize)\n\n\t\t\tdefer func() {\n\t\t\t\ts.mu.Lock()\n\t\t\t\tdefer s.mu.Unlock()\n\t\t\t\tdelete(s.uplink, server.name)\n\t\t\t\tdelete(s.status, server.name)\n\t\t\t\tclose(chUplink)\n\t\t\t\tclose(chStatus)\n\t\t\t}()\n\n\t\t\t\/\/ Uplink stream\n\t\t\tuplink, err := cli.Uplink(ctx)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Warn(\"Could not set up Uplink stream\")\n\t\t\t} else {\n\t\t\t\ts.mu.Lock()\n\t\t\t\ts.uplink[server.name] = chUplink\n\t\t\t\ts.mu.Unlock()\n\t\t\t\tgo func() {\n\t\t\t\t\terr := uplink.RecvMsg(new(empty.Empty))\n\t\t\t\t\tlogStreamErr(\"Uplink\", err)\n\t\t\t\t\ts.mu.Lock()\n\t\t\t\t\tdefer s.mu.Unlock()\n\t\t\t\t\tdelete(s.uplink, server.name)\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\t\/\/ Downlink stream\n\t\t\tif downlinkActive {\n\t\t\t\tdownlink, err := cli.Subscribe(ctx, &SubscribeRequest{})\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.WithError(err).Warn(\"Could not set up Subscribe stream\")\n\t\t\t\t\twgDown.Done()\n\t\t\t\t} else {\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tdefer func() {\n\t\t\t\t\t\t\twgDown.Done()\n\t\t\t\t\t\t}()\n\t\t\t\t\t\tfor {\n\t\t\t\t\t\t\tmsg, err := downlink.Recv()\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tlogStreamErr(\"Subscribe\", err)\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tselect {\n\t\t\t\t\t\t\tcase s.downlink <- msg:\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\tlog.Warn(\"Downlink buffer full\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Status stream\n\t\t\tstatus, err := cli.GatewayStatus(ctx)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Warn(\"Could not set up GatewayStatus stream\")\n\t\t\t} else {\n\t\t\t\ts.mu.Lock()\n\t\t\t\ts.status[server.name] = chStatus\n\t\t\t\ts.mu.Unlock()\n\t\t\t\tgo func() {\n\t\t\t\t\terr := status.RecvMsg(new(empty.Empty))\n\t\t\t\t\tlogStreamErr(\"GatewayStatus\", err)\n\t\t\t\t\ts.mu.Lock()\n\t\t\t\t\tdefer s.mu.Unlock()\n\t\t\t\t\tdelete(s.status, server.name)\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\twg.Done()\n\t\t\tlog.Debug(\"Start handling Gateway streams\")\n\t\t\tdefer log.Debug(\"Done handling Gateway streams\")\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase msg := <-chStatus:\n\t\t\t\t\tif err := status.Send(msg); err != nil {\n\t\t\t\t\t\tlog.WithError(err).Warn(\"Could not send GatewayStatus to router\")\n\t\t\t\t\t\tif err == restartstream.ErrStreamClosed {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase msg := <-chUplink:\n\t\t\t\t\tif err := uplink.Send(msg); err != nil {\n\t\t\t\t\t\tlog.WithError(err).Warn(\"Could not send UplinkMessage to router\")\n\t\t\t\t\t\tif err == restartstream.ErrStreamClosed {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t}(server)\n\t}\n\n\tif api.WaitForStreams > 0 {\n\t\twg.WaitForMax(api.WaitForStreams)\n\t}\n\n\treturn s\n}\n<commit_msg>Assume successful stream setup<commit_after>\/\/ Copyright © 2017 The Things Network\n\/\/ Use of this source code is governed by the MIT license that can be found in the LICENSE file.\n\npackage router\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n\t\"sync\"\n\n\t\"github.com\/TheThingsNetwork\/go-utils\/grpc\/restartstream\"\n\t\"github.com\/TheThingsNetwork\/go-utils\/log\"\n\t\"github.com\/TheThingsNetwork\/ttn\/api\"\n\t\"github.com\/TheThingsNetwork\/ttn\/api\/gateway\"\n\t\"github.com\/TheThingsNetwork\/ttn\/utils\"\n\t\"github.com\/golang\/protobuf\/ptypes\/empty\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n)\n\n\/\/ GenericStream is used for sending to and receiving from the router.\ntype GenericStream interface {\n\tUplink(*UplinkMessage)\n\tStatus(*gateway.Status)\n\tDownlink() (<-chan *DownlinkMessage, error)\n\tClose()\n}\n\n\/\/ ClientConfig for router Client\ntype ClientConfig struct {\n\tBackgroundContext context.Context\n\tBufferSize int\n}\n\n\/\/ DefaultClientConfig for router Client\nvar DefaultClientConfig = ClientConfig{\n\tBackgroundContext: context.Background(),\n\tBufferSize: 10,\n}\n\n\/\/ NewClient creates a new Client with the given configuration\nfunc NewClient(config ClientConfig) *Client {\n\tctx, cancel := context.WithCancel(config.BackgroundContext)\n\n\treturn &Client{\n\t\tlog: log.Get(),\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\n\t\tconfig: config,\n\t}\n}\n\n\/\/ Client for router\ntype Client struct {\n\tlog log.Interface\n\tctx context.Context\n\tcancel context.CancelFunc\n\n\tconfig ClientConfig\n\tserverConns []*serverConn\n}\n\n\/\/ AddServer adds a router server\nfunc (c *Client) AddServer(name string, conn *grpc.ClientConn) {\n\tlog := c.log.WithField(\"Router\", name)\n\tlog.Info(\"Adding Router server\")\n\ts := &serverConn{\n\t\tctx: log,\n\t\tname: name,\n\t\tconn: conn,\n\t}\n\tc.serverConns = append(c.serverConns, s)\n}\n\n\/\/ Close the client and all its connections\nfunc (c *Client) Close() {\n\tc.cancel()\n\tfor _, server := range c.serverConns {\n\t\tserver.Close()\n\t}\n}\n\ntype serverConn struct {\n\tctx log.Interface\n\tname string\n\n\tready chan struct{}\n\tconn *grpc.ClientConn\n}\n\nfunc (c *serverConn) Close() {\n\tif c.ready != nil {\n\t\t<-c.ready\n\t}\n\tif c.conn != nil {\n\t\tc.conn.Close()\n\t}\n}\n\ntype gatewayStreams struct {\n\tlog log.Interface\n\tctx context.Context\n\tcancel context.CancelFunc\n\n\tmu sync.RWMutex\n\tuplink map[string]chan *UplinkMessage\n\tstatus map[string]chan *gateway.Status\n\n\tdownlink chan *DownlinkMessage\n}\n\nfunc (s *gatewayStreams) Uplink(msg *UplinkMessage) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\ts.log.WithField(\"Routers\", len(s.uplink)).Debug(\"Sending UplinkMessage to router\")\n\tfor serverName, ch := range s.uplink {\n\t\tselect {\n\t\tcase ch <- msg:\n\t\tdefault:\n\t\t\ts.log.WithField(\"Router\", serverName).Warn(\"UplinkMessage buffer full\")\n\t\t}\n\t}\n}\n\nfunc (s *gatewayStreams) Status(msg *gateway.Status) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\ts.log.WithField(\"Routers\", len(s.status)).Debug(\"Sending Status to router\")\n\tfor serverName, ch := range s.status {\n\t\tselect {\n\t\tcase ch <- msg:\n\t\tdefault:\n\t\t\ts.log.WithField(\"Router\", serverName).Warn(\"GatewayStatus buffer full\")\n\t\t}\n\t}\n}\n\n\/\/ ErrDownlinkInactive is returned by the Downlink func if downlink is inactive\nvar ErrDownlinkInactive = errors.New(\"Downlink stream not active\")\n\nfunc (s *gatewayStreams) Downlink() (<-chan *DownlinkMessage, error) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\tif s.downlink == nil {\n\t\treturn nil, ErrDownlinkInactive\n\t}\n\treturn s.downlink, nil\n}\n\nfunc (s *gatewayStreams) Close() {\n\ts.cancel()\n}\n\n\/\/ NewGatewayStreams returns new streams using the given gateway ID and token\nfunc (c *Client) NewGatewayStreams(id string, token string, downlinkActive bool) GenericStream {\n\tlog := c.log.WithField(\"GatewayID\", id)\n\tctx, cancel := context.WithCancel(c.ctx)\n\tctx = api.ContextWithID(ctx, id)\n\tctx = api.ContextWithToken(ctx, token)\n\ts := &gatewayStreams{\n\t\tlog: log,\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\n\t\tuplink: make(map[string]chan *UplinkMessage),\n\t\tstatus: make(map[string]chan *gateway.Status),\n\t}\n\n\tvar wg utils.WaitGroup\n\n\tvar wgDown sync.WaitGroup\n\tif downlinkActive {\n\t\ts.downlink = make(chan *DownlinkMessage, c.config.BufferSize)\n\t\tdefer func() {\n\t\t\tgo func() {\n\t\t\t\twgDown.Wait()\n\t\t\t\tclose(s.downlink)\n\t\t\t}()\n\t\t}()\n\t}\n\n\t\/\/ Hook up the router servers\n\tfor _, server := range c.serverConns {\n\t\twg.Add(1)\n\t\twgDown.Add(1)\n\n\t\t\/\/ Stream channels\n\t\tchUplink := make(chan *UplinkMessage, c.config.BufferSize)\n\t\tchStatus := make(chan *gateway.Status, c.config.BufferSize)\n\n\t\ts.mu.Lock()\n\t\ts.uplink[server.name] = chUplink\n\t\ts.status[server.name] = chStatus\n\t\ts.mu.Unlock()\n\n\t\tgo func(server *serverConn) {\n\t\t\tdefer func() {\n\t\t\t\ts.mu.Lock()\n\t\t\t\tdefer s.mu.Unlock()\n\t\t\t\tdelete(s.uplink, server.name)\n\t\t\t\tdelete(s.status, server.name)\n\t\t\t\tclose(chUplink)\n\t\t\t\tclose(chStatus)\n\t\t\t}()\n\n\t\t\tif server.ready != nil {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase <-server.ready:\n\t\t\t\t}\n\t\t\t}\n\t\t\tif server.conn == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog := log.WithField(\"Router\", server.name)\n\t\t\tcli := NewRouterClient(server.conn)\n\n\t\t\tlogStreamErr := func(streamName string, err error) {\n\t\t\t\tswitch {\n\t\t\t\tcase err == nil:\n\t\t\t\t\tlog.Debugf(\"%s stream closed\", streamName)\n\t\t\t\tcase err == io.EOF:\n\t\t\t\t\tlog.WithError(err).Debugf(\"%s stream ended\", streamName)\n\t\t\t\tcase err == context.Canceled || grpc.Code(err) == codes.Canceled:\n\t\t\t\t\tlog.WithError(err).Debugf(\"%s stream canceled\", streamName)\n\t\t\t\tcase err == context.DeadlineExceeded || grpc.Code(err) == codes.DeadlineExceeded:\n\t\t\t\t\tlog.WithError(err).Debugf(\"%s stream deadline exceeded\", streamName)\n\t\t\t\tcase grpc.ErrorDesc(err) == grpc.ErrClientConnClosing.Error():\n\t\t\t\t\tlog.WithError(err).Debugf(\"%s stream connection closed\", streamName)\n\t\t\t\tdefault:\n\t\t\t\t\tlog.WithError(err).Warnf(\"%s stream closed unexpectedly\", streamName)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Uplink stream\n\t\t\tuplink, err := cli.Uplink(ctx)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Warn(\"Could not set up Uplink stream\")\n\t\t\t\ts.mu.Lock()\n\t\t\t\tdelete(s.uplink, server.name)\n\t\t\t\ts.mu.Unlock()\n\t\t\t} else {\n\t\t\t\tgo func() {\n\t\t\t\t\terr := uplink.RecvMsg(new(empty.Empty))\n\t\t\t\t\tlogStreamErr(\"Uplink\", err)\n\t\t\t\t\ts.mu.Lock()\n\t\t\t\t\tdefer s.mu.Unlock()\n\t\t\t\t\tdelete(s.uplink, server.name)\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\t\/\/ Downlink stream\n\t\t\tif downlinkActive {\n\t\t\t\tdownlink, err := cli.Subscribe(ctx, &SubscribeRequest{})\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.WithError(err).Warn(\"Could not set up Subscribe stream\")\n\t\t\t\t\twgDown.Done()\n\t\t\t\t} else {\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tdefer func() {\n\t\t\t\t\t\t\twgDown.Done()\n\t\t\t\t\t\t}()\n\t\t\t\t\t\tfor {\n\t\t\t\t\t\t\tmsg, err := downlink.Recv()\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tlogStreamErr(\"Subscribe\", err)\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tselect {\n\t\t\t\t\t\t\tcase s.downlink <- msg:\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\tlog.Warn(\"Downlink buffer full\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Status stream\n\t\t\tstatus, err := cli.GatewayStatus(ctx)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Warn(\"Could not set up GatewayStatus stream\")\n\t\t\t\ts.mu.Lock()\n\t\t\t\tdelete(s.status, server.name)\n\t\t\t\ts.mu.Unlock()\n\t\t\t} else {\n\t\t\t\tgo func() {\n\t\t\t\t\terr := status.RecvMsg(new(empty.Empty))\n\t\t\t\t\tlogStreamErr(\"GatewayStatus\", err)\n\t\t\t\t\ts.mu.Lock()\n\t\t\t\t\tdefer s.mu.Unlock()\n\t\t\t\t\tdelete(s.status, server.name)\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\twg.Done()\n\t\t\tlog.Debug(\"Start handling Gateway streams\")\n\t\t\tdefer log.Debug(\"Done handling Gateway streams\")\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase msg := <-chStatus:\n\t\t\t\t\tif err := status.Send(msg); err != nil {\n\t\t\t\t\t\tlog.WithError(err).Warn(\"Could not send GatewayStatus to router\")\n\t\t\t\t\t\tif err == restartstream.ErrStreamClosed {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase msg := <-chUplink:\n\t\t\t\t\tif err := uplink.Send(msg); err != nil {\n\t\t\t\t\t\tlog.WithError(err).Warn(\"Could not send UplinkMessage to router\")\n\t\t\t\t\t\tif err == restartstream.ErrStreamClosed {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t}(server)\n\t}\n\n\tif api.WaitForStreams > 0 {\n\t\twg.WaitForMax(api.WaitForStreams)\n\t}\n\n\treturn s\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/fnproject\/fn\/api\"\n\t\"github.com\/fnproject\/fn\/api\/id\"\n\t\"github.com\/fnproject\/fn\/api\/models\"\n\t\"github.com\/fnproject\/fn\/api\/runner\"\n\t\"github.com\/fnproject\/fn\/api\/runner\/common\"\n\t\"github.com\/fnproject\/fn\/api\/runner\/task\"\n\t\"github.com\/gin-gonic\/gin\"\n\tcache \"github.com\/patrickmn\/go-cache\"\n)\n\ntype runnerResponse struct {\n\tRequestID string `json:\"request_id,omitempty\"`\n\tError *models.ErrorBody `json:\"error,omitempty\"`\n}\n\nfunc toEnvName(envtype, name string) string {\n\tname = strings.ToUpper(strings.Replace(name, \"-\", \"_\", -1))\n\tif envtype == \"\" {\n\t\treturn name\n\t}\n\treturn fmt.Sprintf(\"%s_%s\", envtype, name)\n}\n\nfunc (s *Server) handleRequest(c *gin.Context, enqueue models.Enqueue) {\n\tif strings.HasPrefix(c.Request.URL.Path, \"\/v1\") {\n\t\tc.Status(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tctx := c.Request.Context()\n\n\treqID := id.New().String()\n\tctx, log := common.LoggerWithFields(ctx, logrus.Fields{\"call_id\": reqID})\n\n\tvar err error\n\tvar payload io.Reader\n\n\tif c.Request.Method == \"POST\" {\n\t\tpayload = c.Request.Body\n\t\t\/\/ Load complete body and close\n\t\tdefer func() {\n\t\t\tio.Copy(ioutil.Discard, c.Request.Body)\n\t\t\tc.Request.Body.Close()\n\t\t}()\n\t} else if c.Request.Method == \"GET\" {\n\t\treqPayload := c.Request.URL.Query().Get(\"payload\")\n\t\tpayload = strings.NewReader(reqPayload)\n\t}\n\n\tr, routeExists := c.Get(api.Path)\n\tif !routeExists {\n\t\tr = \"\/\"\n\t}\n\n\treqRoute := &models.Route{\n\t\tAppName: c.MustGet(api.AppName).(string),\n\t\tPath: path.Clean(r.(string)),\n\t}\n\n\ts.FireBeforeDispatch(ctx, reqRoute)\n\n\tappName := reqRoute.AppName\n\tpath := reqRoute.Path\n\n\tapp, err := s.Datastore.GetApp(ctx, appName)\n\tif err != nil {\n\t\thandleErrorResponse(c, err)\n\t\treturn\n\t} else if app == nil {\n\t\thandleErrorResponse(c, models.ErrAppsNotFound)\n\t\treturn\n\t}\n\n\tlog.WithFields(logrus.Fields{\"app\": appName, \"path\": path}).Debug(\"Finding route on datastore\")\n\troute, err := s.loadroute(ctx, appName, path)\n\tif err != nil {\n\t\thandleErrorResponse(c, err)\n\t\treturn\n\t}\n\n\tif route == nil {\n\t\thandleErrorResponse(c, models.ErrRoutesNotFound)\n\t\treturn\n\t}\n\n\tlog = log.WithFields(logrus.Fields{\"app\": appName, \"path\": route.Path, \"image\": route.Image})\n\tlog.Debug(\"Got route from datastore\")\n\n\tif s.serve(ctx, c, appName, route, app, path, reqID, payload, enqueue) {\n\t\ts.FireAfterDispatch(ctx, reqRoute)\n\t\treturn\n\t}\n\n\thandleErrorResponse(c, models.ErrRoutesNotFound)\n}\n\nfunc (s *Server) loadroute(ctx context.Context, appName, path string) (*models.Route, error) {\n\tif route, ok := s.cacheget(appName, path); ok {\n\t\treturn route, nil\n\t}\n\tkey := routeCacheKey(appName, path)\n\tresp, err := s.singleflight.do(\n\t\tkey,\n\t\tfunc() (interface{}, error) {\n\t\t\treturn s.Datastore.GetRoute(ctx, appName, path)\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\troute := resp.(*models.Route)\n\ts.routeCache.Set(key, route, cache.DefaultExpiration)\n\treturn route, nil\n}\n\n\/\/ TODO: Should remove *gin.Context from these functions, should use only context.Context\nfunc (s *Server) serve(ctx context.Context, c *gin.Context, appName string, route *models.Route, app *models.App, path, reqID string, payload io.Reader, enqueue models.Enqueue) (ok bool) {\n\tctx, log := common.LoggerWithFields(ctx, logrus.Fields{\"app\": appName, \"route\": route.Path, \"image\": route.Image})\n\n\tparams, match := matchRoute(route.Path, path)\n\tif !match {\n\t\treturn false\n\t}\n\n\tvar stdout bytes.Buffer \/\/ TODO: should limit the size of this, error if gets too big. akin to: https:\/\/golang.org\/pkg\/io\/#LimitReader\n\n\tif route.Format == \"\" {\n\t\troute.Format = \"default\"\n\t}\n\n\t\/\/ baseVars are the vars on the route & app, not on this specific request [for hot functions]\n\tbaseVars := make(map[string]string, len(app.Config)+len(route.Config)+3)\n\tbaseVars[\"FN_FORMAT\"] = route.Format\n\tbaseVars[\"APP_NAME\"] = appName\n\tbaseVars[\"ROUTE\"] = route.Path\n\tbaseVars[\"MEMORY_MB\"] = fmt.Sprintf(\"%d\", route.Memory)\n\n\t\/\/ app config\n\tfor k, v := range app.Config {\n\t\tk = toEnvName(\"\", k)\n\t\tbaseVars[k] = v\n\t}\n\tfor k, v := range route.Config {\n\t\tk = toEnvName(\"\", k)\n\t\tbaseVars[k] = v\n\t}\n\n\t\/\/ envVars contains the full set of env vars, per request + base\n\tenvVars := make(map[string]string, len(baseVars)+len(params)+len(c.Request.Header)+3)\n\n\tfor k, v := range baseVars {\n\t\tenvVars[k] = v\n\t}\n\n\tenvVars[\"CALL_ID\"] = reqID\n\tenvVars[\"METHOD\"] = c.Request.Method\n\tenvVars[\"REQUEST_URL\"] = fmt.Sprintf(\"%v\/\/%v%v\", func() string {\n\t\tif c.Request.TLS == nil {\n\t\t\treturn \"http\"\n\t\t}\n\t\treturn \"https\"\n\t}(), c.Request.Host, c.Request.URL.String())\n\n\t\/\/ params\n\tfor _, param := range params {\n\t\tenvVars[toEnvName(\"PARAM\", param.Key)] = param.Value\n\t}\n\n\t\/\/ headers\n\tfor header, value := range c.Request.Header {\n\t\tenvVars[toEnvName(\"HEADER\", header)] = strings.Join(value, \", \")\n\t}\n\n\tcfg := &task.Config{\n\t\tAppName: appName,\n\t\tPath: route.Path,\n\t\tBaseEnv: baseVars,\n\t\tEnv: envVars,\n\t\tFormat: route.Format,\n\t\tID: reqID,\n\t\tImage: route.Image,\n\t\tMemory: route.Memory,\n\t\tStdin: payload,\n\t\tStdout: &stdout,\n\t\tTimeout: time.Duration(route.Timeout) * time.Second,\n\t\tIdleTimeout: time.Duration(route.IdleTimeout) * time.Second,\n\t\tReceivedTime: time.Now(),\n\t\tReady: make(chan struct{}),\n\t}\n\n\t\/\/ ensure valid values\n\tif cfg.Timeout <= 0 {\n\t\tcfg.Timeout = runner.DefaultTimeout\n\t}\n\tif cfg.IdleTimeout <= 0 {\n\t\tcfg.IdleTimeout = runner.DefaultIdleTimeout\n\t}\n\n\ts.Runner.Enqueue()\n\tnewTask := task.TaskFromConfig(cfg)\n\n\tswitch route.Type {\n\tcase \"async\":\n\t\t\/\/ TODO we should be able to do hot input to async. plumb protocol stuff\n\t\t\/\/ TODO enqueue should unravel the payload?\n\n\t\t\/\/ Read payload\n\t\tpl, err := ioutil.ReadAll(cfg.Stdin)\n\t\tif err != nil {\n\t\t\thandleErrorResponse(c, models.ErrInvalidPayload)\n\t\t\treturn true\n\t\t}\n\t\t\/\/ Add in payload\n\t\tnewTask.Payload = string(pl)\n\n\t\t\/\/ Push to queue\n\t\t_, err = enqueue(c, s.MQ, newTask)\n\t\tif err != nil {\n\t\t\thandleErrorResponse(c, err)\n\t\t\treturn true\n\t\t}\n\n\t\tlog.Info(\"Added new task to queue\")\n\t\tc.JSON(http.StatusAccepted, map[string]string{\"call_id\": cfg.ID})\n\n\tdefault:\n\t\tresult, err := s.Runner.RunTrackedTask(newTask, ctx, cfg)\n\t\tif result != nil {\n\t\t\twaitTime := result.StartTime().Sub(cfg.ReceivedTime)\n\t\t\tc.Header(\"XXX-FXLB-WAIT\", waitTime.String())\n\t\t}\n\n\t\tif err != nil {\n\t\t\tc.JSON(http.StatusInternalServerError, runnerResponse{\n\t\t\t\tRequestID: cfg.ID,\n\t\t\t\tError: &models.ErrorBody{\n\t\t\t\t\tMessage: err.Error(),\n\t\t\t\t},\n\t\t\t})\n\t\t\tlog.WithError(err).Error(\"Failed to run task\")\n\t\t\tbreak\n\t\t}\n\n\t\tfor k, v := range route.Headers {\n\t\t\tc.Header(k, v[0])\n\t\t}\n\n\t\t\/\/ this will help users to track sync execution in a manner of async\n\t\t\/\/ FN_CALL_ID is an equivalent of call_id\n\t\tc.Header(\"FN_CALL_ID\", newTask.ID)\n\n\t\tswitch result.Status() {\n\t\tcase \"success\":\n\t\t\tc.Data(http.StatusOK, \"\", stdout.Bytes())\n\t\tcase \"timeout\":\n\t\t\tc.JSON(http.StatusGatewayTimeout, runnerResponse{\n\t\t\t\tRequestID: cfg.ID,\n\t\t\t\tError: &models.ErrorBody{\n\t\t\t\t\tMessage: models.ErrRunnerTimeout.Error(),\n\t\t\t\t},\n\t\t\t})\n\t\tdefault:\n\t\t\tc.JSON(http.StatusInternalServerError, runnerResponse{\n\t\t\t\tRequestID: cfg.ID,\n\t\t\t\tError: &models.ErrorBody{\n\t\t\t\t\tMessage: result.Error(),\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t}\n\n\treturn true\n}\n\nvar fakeHandler = func(http.ResponseWriter, *http.Request, Params) {}\n\nfunc matchRoute(baseRoute, route string) (Params, bool) {\n\ttree := &node{}\n\ttree.addRoute(baseRoute, fakeHandler)\n\thandler, p, _ := tree.getValue(route)\n\tif handler == nil {\n\t\treturn nil, false\n\t}\n\n\treturn p, true\n}\n<commit_msg>Fixed the formatting of the passed URL<commit_after>package server\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/fnproject\/fn\/api\"\n\t\"github.com\/fnproject\/fn\/api\/id\"\n\t\"github.com\/fnproject\/fn\/api\/models\"\n\t\"github.com\/fnproject\/fn\/api\/runner\"\n\t\"github.com\/fnproject\/fn\/api\/runner\/common\"\n\t\"github.com\/fnproject\/fn\/api\/runner\/task\"\n\t\"github.com\/gin-gonic\/gin\"\n\tcache \"github.com\/patrickmn\/go-cache\"\n)\n\ntype runnerResponse struct {\n\tRequestID string `json:\"request_id,omitempty\"`\n\tError *models.ErrorBody `json:\"error,omitempty\"`\n}\n\nfunc toEnvName(envtype, name string) string {\n\tname = strings.ToUpper(strings.Replace(name, \"-\", \"_\", -1))\n\tif envtype == \"\" {\n\t\treturn name\n\t}\n\treturn fmt.Sprintf(\"%s_%s\", envtype, name)\n}\n\nfunc (s *Server) handleRequest(c *gin.Context, enqueue models.Enqueue) {\n\tif strings.HasPrefix(c.Request.URL.Path, \"\/v1\") {\n\t\tc.Status(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tctx := c.Request.Context()\n\n\treqID := id.New().String()\n\tctx, log := common.LoggerWithFields(ctx, logrus.Fields{\"call_id\": reqID})\n\n\tvar err error\n\tvar payload io.Reader\n\n\tif c.Request.Method == \"POST\" {\n\t\tpayload = c.Request.Body\n\t\t\/\/ Load complete body and close\n\t\tdefer func() {\n\t\t\tio.Copy(ioutil.Discard, c.Request.Body)\n\t\t\tc.Request.Body.Close()\n\t\t}()\n\t} else if c.Request.Method == \"GET\" {\n\t\treqPayload := c.Request.URL.Query().Get(\"payload\")\n\t\tpayload = strings.NewReader(reqPayload)\n\t}\n\n\tr, routeExists := c.Get(api.Path)\n\tif !routeExists {\n\t\tr = \"\/\"\n\t}\n\n\treqRoute := &models.Route{\n\t\tAppName: c.MustGet(api.AppName).(string),\n\t\tPath: path.Clean(r.(string)),\n\t}\n\n\ts.FireBeforeDispatch(ctx, reqRoute)\n\n\tappName := reqRoute.AppName\n\tpath := reqRoute.Path\n\n\tapp, err := s.Datastore.GetApp(ctx, appName)\n\tif err != nil {\n\t\thandleErrorResponse(c, err)\n\t\treturn\n\t} else if app == nil {\n\t\thandleErrorResponse(c, models.ErrAppsNotFound)\n\t\treturn\n\t}\n\n\tlog.WithFields(logrus.Fields{\"app\": appName, \"path\": path}).Debug(\"Finding route on datastore\")\n\troute, err := s.loadroute(ctx, appName, path)\n\tif err != nil {\n\t\thandleErrorResponse(c, err)\n\t\treturn\n\t}\n\n\tif route == nil {\n\t\thandleErrorResponse(c, models.ErrRoutesNotFound)\n\t\treturn\n\t}\n\n\tlog = log.WithFields(logrus.Fields{\"app\": appName, \"path\": route.Path, \"image\": route.Image})\n\tlog.Debug(\"Got route from datastore\")\n\n\tif s.serve(ctx, c, appName, route, app, path, reqID, payload, enqueue) {\n\t\ts.FireAfterDispatch(ctx, reqRoute)\n\t\treturn\n\t}\n\n\thandleErrorResponse(c, models.ErrRoutesNotFound)\n}\n\nfunc (s *Server) loadroute(ctx context.Context, appName, path string) (*models.Route, error) {\n\tif route, ok := s.cacheget(appName, path); ok {\n\t\treturn route, nil\n\t}\n\tkey := routeCacheKey(appName, path)\n\tresp, err := s.singleflight.do(\n\t\tkey,\n\t\tfunc() (interface{}, error) {\n\t\t\treturn s.Datastore.GetRoute(ctx, appName, path)\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\troute := resp.(*models.Route)\n\ts.routeCache.Set(key, route, cache.DefaultExpiration)\n\treturn route, nil\n}\n\n\/\/ TODO: Should remove *gin.Context from these functions, should use only context.Context\nfunc (s *Server) serve(ctx context.Context, c *gin.Context, appName string, route *models.Route, app *models.App, path, reqID string, payload io.Reader, enqueue models.Enqueue) (ok bool) {\n\tctx, log := common.LoggerWithFields(ctx, logrus.Fields{\"app\": appName, \"route\": route.Path, \"image\": route.Image})\n\n\tparams, match := matchRoute(route.Path, path)\n\tif !match {\n\t\treturn false\n\t}\n\n\tvar stdout bytes.Buffer \/\/ TODO: should limit the size of this, error if gets too big. akin to: https:\/\/golang.org\/pkg\/io\/#LimitReader\n\n\tif route.Format == \"\" {\n\t\troute.Format = \"default\"\n\t}\n\n\t\/\/ baseVars are the vars on the route & app, not on this specific request [for hot functions]\n\tbaseVars := make(map[string]string, len(app.Config)+len(route.Config)+3)\n\tbaseVars[\"FN_FORMAT\"] = route.Format\n\tbaseVars[\"APP_NAME\"] = appName\n\tbaseVars[\"ROUTE\"] = route.Path\n\tbaseVars[\"MEMORY_MB\"] = fmt.Sprintf(\"%d\", route.Memory)\n\n\t\/\/ app config\n\tfor k, v := range app.Config {\n\t\tk = toEnvName(\"\", k)\n\t\tbaseVars[k] = v\n\t}\n\tfor k, v := range route.Config {\n\t\tk = toEnvName(\"\", k)\n\t\tbaseVars[k] = v\n\t}\n\n\t\/\/ envVars contains the full set of env vars, per request + base\n\tenvVars := make(map[string]string, len(baseVars)+len(params)+len(c.Request.Header)+3)\n\n\tfor k, v := range baseVars {\n\t\tenvVars[k] = v\n\t}\n\n\tenvVars[\"CALL_ID\"] = reqID\n\tenvVars[\"METHOD\"] = c.Request.Method\n\tenvVars[\"REQUEST_URL\"] = fmt.Sprintf(\"%v:\/\/%v%v\", func() string {\n\t\tif c.Request.TLS == nil {\n\t\t\treturn \"http\"\n\t\t}\n\t\treturn \"https\"\n\t}(), c.Request.Host, c.Request.URL.String())\n\n\t\/\/ params\n\tfor _, param := range params {\n\t\tenvVars[toEnvName(\"PARAM\", param.Key)] = param.Value\n\t}\n\n\t\/\/ headers\n\tfor header, value := range c.Request.Header {\n\t\tenvVars[toEnvName(\"HEADER\", header)] = strings.Join(value, \", \")\n\t}\n\n\tcfg := &task.Config{\n\t\tAppName: appName,\n\t\tPath: route.Path,\n\t\tBaseEnv: baseVars,\n\t\tEnv: envVars,\n\t\tFormat: route.Format,\n\t\tID: reqID,\n\t\tImage: route.Image,\n\t\tMemory: route.Memory,\n\t\tStdin: payload,\n\t\tStdout: &stdout,\n\t\tTimeout: time.Duration(route.Timeout) * time.Second,\n\t\tIdleTimeout: time.Duration(route.IdleTimeout) * time.Second,\n\t\tReceivedTime: time.Now(),\n\t\tReady: make(chan struct{}),\n\t}\n\n\t\/\/ ensure valid values\n\tif cfg.Timeout <= 0 {\n\t\tcfg.Timeout = runner.DefaultTimeout\n\t}\n\tif cfg.IdleTimeout <= 0 {\n\t\tcfg.IdleTimeout = runner.DefaultIdleTimeout\n\t}\n\n\ts.Runner.Enqueue()\n\tnewTask := task.TaskFromConfig(cfg)\n\n\tswitch route.Type {\n\tcase \"async\":\n\t\t\/\/ TODO we should be able to do hot input to async. plumb protocol stuff\n\t\t\/\/ TODO enqueue should unravel the payload?\n\n\t\t\/\/ Read payload\n\t\tpl, err := ioutil.ReadAll(cfg.Stdin)\n\t\tif err != nil {\n\t\t\thandleErrorResponse(c, models.ErrInvalidPayload)\n\t\t\treturn true\n\t\t}\n\t\t\/\/ Add in payload\n\t\tnewTask.Payload = string(pl)\n\n\t\t\/\/ Push to queue\n\t\t_, err = enqueue(c, s.MQ, newTask)\n\t\tif err != nil {\n\t\t\thandleErrorResponse(c, err)\n\t\t\treturn true\n\t\t}\n\n\t\tlog.Info(\"Added new task to queue\")\n\t\tc.JSON(http.StatusAccepted, map[string]string{\"call_id\": cfg.ID})\n\n\tdefault:\n\t\tresult, err := s.Runner.RunTrackedTask(newTask, ctx, cfg)\n\t\tif result != nil {\n\t\t\twaitTime := result.StartTime().Sub(cfg.ReceivedTime)\n\t\t\tc.Header(\"XXX-FXLB-WAIT\", waitTime.String())\n\t\t}\n\n\t\tif err != nil {\n\t\t\tc.JSON(http.StatusInternalServerError, runnerResponse{\n\t\t\t\tRequestID: cfg.ID,\n\t\t\t\tError: &models.ErrorBody{\n\t\t\t\t\tMessage: err.Error(),\n\t\t\t\t},\n\t\t\t})\n\t\t\tlog.WithError(err).Error(\"Failed to run task\")\n\t\t\tbreak\n\t\t}\n\n\t\tfor k, v := range route.Headers {\n\t\t\tc.Header(k, v[0])\n\t\t}\n\n\t\t\/\/ this will help users to track sync execution in a manner of async\n\t\t\/\/ FN_CALL_ID is an equivalent of call_id\n\t\tc.Header(\"FN_CALL_ID\", newTask.ID)\n\n\t\tswitch result.Status() {\n\t\tcase \"success\":\n\t\t\tc.Data(http.StatusOK, \"\", stdout.Bytes())\n\t\tcase \"timeout\":\n\t\t\tc.JSON(http.StatusGatewayTimeout, runnerResponse{\n\t\t\t\tRequestID: cfg.ID,\n\t\t\t\tError: &models.ErrorBody{\n\t\t\t\t\tMessage: models.ErrRunnerTimeout.Error(),\n\t\t\t\t},\n\t\t\t})\n\t\tdefault:\n\t\t\tc.JSON(http.StatusInternalServerError, runnerResponse{\n\t\t\t\tRequestID: cfg.ID,\n\t\t\t\tError: &models.ErrorBody{\n\t\t\t\t\tMessage: result.Error(),\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t}\n\n\treturn true\n}\n\nvar fakeHandler = func(http.ResponseWriter, *http.Request, Params) {}\n\nfunc matchRoute(baseRoute, route string) (Params, bool) {\n\ttree := &node{}\n\ttree.addRoute(baseRoute, fakeHandler)\n\thandler, p, _ := tree.getValue(route)\n\tif handler == nil {\n\t\treturn nil, false\n\t}\n\n\treturn p, true\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package models creates the models for the modules\npackage models\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/cihangir\/gene\/generators\/constants\"\n\t\"github.com\/cihangir\/gene\/generators\/constructors\"\n\t\"github.com\/cihangir\/gene\/generators\/sql\/statements\"\n\t\"github.com\/cihangir\/gene\/generators\/validators\"\n\t\"github.com\/cihangir\/gene\/writers\"\n\t\"github.com\/cihangir\/schema\"\n\t\"github.com\/cihangir\/stringext\"\n)\n\n\/\/ Generate creates the models and write them to the required paths\nfunc Generate(rootPath string, s *schema.Schema) error {\n\n\tfor _, def := range s.Definitions {\n\t\t\/\/ create models only for objects\n\t\tif def.Type != nil {\n\t\t\tif t, ok := def.Type.(string); ok {\n\t\t\t\tif t != \"object\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tmoduleName := strings.ToLower(def.Title)\n\n\t\tmodelFilePath := fmt.Sprintf(\n\t\t\t\"%smodels\/%s.go\",\n\t\t\trootPath,\n\t\t\tmoduleName,\n\t\t)\n\n\t\tf, err := GenerateModel(def)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := writers.WriteFormattedFile(modelFilePath, f); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ GenerateStatements generates the basic CRUD statements for the models\nfunc GenerateStatements(rootPath string, s *schema.Schema) error {\n\n\tfor _, def := range s.Definitions {\n\t\t\/\/ create models only for objects\n\t\tif def.Type != nil {\n\t\t\tif t, ok := def.Type.(string); ok {\n\t\t\t\tif t != \"object\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tmoduleName := strings.ToLower(def.Title)\n\n\t\tmodelFilePath := fmt.Sprintf(\n\t\t\t\"%smodels\/%s_statements.go\",\n\t\t\trootPath,\n\t\t\tmoduleName,\n\t\t)\n\n\t\tf, err := GenerateModelStatements(def)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := writers.WriteFormattedFile(modelFilePath, f); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ GenerateModel generates the model itself\nfunc GenerateModel(s *schema.Schema) ([]byte, error) {\n\tvar buf bytes.Buffer\n\n\tpackageLine, err := GeneratePackage(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconsts, err := constants.Generate(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tschema, err := GenerateSchema(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconstructor, err := constructors.Generate(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalidators, err := validators.Generate(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf.Write(packageLine)\n\tbuf.Write(consts)\n\tbuf.Write(schema)\n\tbuf.Write(constructor)\n\tif validators != nil {\n\t\tbuf.Write(validators)\n\t}\n\n\treturn writers.Clear(buf)\n}\n\n\/\/ GenerateModelStatements generates the CRUD statements for the model struct\nfunc GenerateModelStatements(s *schema.Schema) ([]byte, error) {\n\tvar buf bytes.Buffer\n\n\tpackageLine, err := GeneratePackage(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcreateStatements, err := statements.GenerateCreate(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tupdateStatements, err := statements.GenerateUpdate(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdeleteStatements, err := statements.GenerateDelete(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tselectStatements, err := statements.GenerateSelect(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttableName, err := statements.GenerateTableName(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf.Write(packageLine)\n\tbuf.Write(createStatements)\n\tbuf.Write(updateStatements)\n\tbuf.Write(deleteStatements)\n\tbuf.Write(selectStatements)\n\tbuf.Write(tableName)\n\n\treturn writers.Clear(buf)\n}\n\n\/\/ GeneratePackage generates the imports according to the schema.\n\/\/ TODO remove this function\nfunc GeneratePackage(s *schema.Schema) ([]byte, error) {\n\ttemp := template.New(\"package.tmpl\")\n\t_, err := temp.Parse(PackageTemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buf bytes.Buffer\n\n\t\/\/ name := strings.ToLower(strings.Split(s.Title, \" \")[0])\n\tname := \"models\"\n\terr = temp.ExecuteTemplate(&buf, \"package.tmpl\", name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}\n\n\/\/ GenerateSchema generates the schema.\nfunc GenerateSchema(s *schema.Schema) ([]byte, error) {\n\n\ttemp := template.New(\"schema.tmpl\")\n\ttemp.Funcs(schema.Helpers)\n\n\t_, err := temp.Parse(StructTemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buf bytes.Buffer\n\n\tcontext := struct {\n\t\tName string\n\t\tDefinition *schema.Schema\n\t}{\n\t\tName: s.Title,\n\t\tDefinition: s,\n\t}\n\n\terr = temp.ExecuteTemplate(&buf, \"schema.tmpl\", context)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn writers.Clear(buf)\n}\n\n\/\/ GenerateFunctions generates the functions according to the schema.\nfunc GenerateFunctions(s *schema.Schema) ([]byte, error) {\n\n\ttemp := template.New(\"functions.tmpl\")\n\ttemp.Funcs(template.FuncMap{\n\t\t\"Pointerize\": stringext.Pointerize,\n\t})\n\n\t_, err := temp.Parse(FunctionsTemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buf bytes.Buffer\n\n\tcontext := struct {\n\t\tName string\n\t\tFuncs []string\n\t}{\n\t\tName: s.Title,\n\t\tFuncs: []string{\n\t\t\t\"Create\", \"Update\", \"Delete\", \"ById\",\n\t\t\t\"Some\", \"One\",\n\t\t},\n\t}\n\n\tif err := temp.ExecuteTemplate(&buf, \"functions.tmpl\", context); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn writers.Clear(buf)\n}\n<commit_msg>generator: instead of only object items, do limit for configs<commit_after>\/\/ Package models creates the models for the modules\npackage models\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/cihangir\/gene\/generators\/constants\"\n\t\"github.com\/cihangir\/gene\/generators\/constructors\"\n\t\"github.com\/cihangir\/gene\/generators\/sql\/statements\"\n\t\"github.com\/cihangir\/gene\/generators\/validators\"\n\t\"github.com\/cihangir\/gene\/writers\"\n\t\"github.com\/cihangir\/schema\"\n\t\"github.com\/cihangir\/stringext\"\n)\n\n\/\/ Generate creates the models and write them to the required paths\nfunc Generate(rootPath string, s *schema.Schema) error {\n\n\tfor _, def := range s.Definitions {\n\t\t\/\/ create models only for objects\n\t\tif def.Type != nil {\n\t\t\tif t, ok := def.Type.(string); ok {\n\t\t\t\tif t == \"config\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tmoduleName := strings.ToLower(def.Title)\n\n\t\tmodelFilePath := fmt.Sprintf(\n\t\t\t\"%smodels\/%s.go\",\n\t\t\trootPath,\n\t\t\tmoduleName,\n\t\t)\n\n\t\tf, err := GenerateModel(def)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := writers.WriteFormattedFile(modelFilePath, f); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ GenerateStatements generates the basic CRUD statements for the models\nfunc GenerateStatements(rootPath string, s *schema.Schema) error {\n\n\tfor _, def := range s.Definitions {\n\t\t\/\/ create models only for objects\n\t\tif def.Type != nil {\n\t\t\tif t, ok := def.Type.(string); ok {\n\t\t\t\tif t != \"object\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tmoduleName := strings.ToLower(def.Title)\n\n\t\tmodelFilePath := fmt.Sprintf(\n\t\t\t\"%smodels\/%s_statements.go\",\n\t\t\trootPath,\n\t\t\tmoduleName,\n\t\t)\n\n\t\tf, err := GenerateModelStatements(def)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := writers.WriteFormattedFile(modelFilePath, f); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ GenerateModel generates the model itself\nfunc GenerateModel(s *schema.Schema) ([]byte, error) {\n\tvar buf bytes.Buffer\n\n\tpackageLine, err := GeneratePackage(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconsts, err := constants.Generate(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tschema, err := GenerateSchema(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconstructor, err := constructors.Generate(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalidators, err := validators.Generate(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf.Write(packageLine)\n\tbuf.Write(consts)\n\tbuf.Write(schema)\n\tbuf.Write(constructor)\n\tif validators != nil {\n\t\tbuf.Write(validators)\n\t}\n\n\treturn writers.Clear(buf)\n}\n\n\/\/ GenerateModelStatements generates the CRUD statements for the model struct\nfunc GenerateModelStatements(s *schema.Schema) ([]byte, error) {\n\tvar buf bytes.Buffer\n\n\tpackageLine, err := GeneratePackage(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcreateStatements, err := statements.GenerateCreate(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tupdateStatements, err := statements.GenerateUpdate(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdeleteStatements, err := statements.GenerateDelete(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tselectStatements, err := statements.GenerateSelect(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttableName, err := statements.GenerateTableName(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf.Write(packageLine)\n\tbuf.Write(createStatements)\n\tbuf.Write(updateStatements)\n\tbuf.Write(deleteStatements)\n\tbuf.Write(selectStatements)\n\tbuf.Write(tableName)\n\n\treturn writers.Clear(buf)\n}\n\n\/\/ GeneratePackage generates the imports according to the schema.\n\/\/ TODO remove this function\nfunc GeneratePackage(s *schema.Schema) ([]byte, error) {\n\ttemp := template.New(\"package.tmpl\")\n\t_, err := temp.Parse(PackageTemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buf bytes.Buffer\n\n\t\/\/ name := strings.ToLower(strings.Split(s.Title, \" \")[0])\n\tname := \"models\"\n\terr = temp.ExecuteTemplate(&buf, \"package.tmpl\", name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}\n\n\/\/ GenerateSchema generates the schema.\nfunc GenerateSchema(s *schema.Schema) ([]byte, error) {\n\n\ttemp := template.New(\"schema.tmpl\")\n\ttemp.Funcs(schema.Helpers)\n\n\t_, err := temp.Parse(StructTemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buf bytes.Buffer\n\n\tcontext := struct {\n\t\tName string\n\t\tDefinition *schema.Schema\n\t}{\n\t\tName: s.Title,\n\t\tDefinition: s,\n\t}\n\n\terr = temp.ExecuteTemplate(&buf, \"schema.tmpl\", context)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn writers.Clear(buf)\n}\n\n\/\/ GenerateFunctions generates the functions according to the schema.\nfunc GenerateFunctions(s *schema.Schema) ([]byte, error) {\n\n\ttemp := template.New(\"functions.tmpl\")\n\ttemp.Funcs(template.FuncMap{\n\t\t\"Pointerize\": stringext.Pointerize,\n\t})\n\n\t_, err := temp.Parse(FunctionsTemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buf bytes.Buffer\n\n\tcontext := struct {\n\t\tName string\n\t\tFuncs []string\n\t}{\n\t\tName: s.Title,\n\t\tFuncs: []string{\n\t\t\t\"Create\", \"Update\", \"Delete\", \"ById\",\n\t\t\t\"Some\", \"One\",\n\t\t},\n\t}\n\n\tif err := temp.ExecuteTemplate(&buf, \"functions.tmpl\", context); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn writers.Clear(buf)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\n\t\"github.com\/fjukstad\/kvik\/utils\"\n\tzmq \"github.com\/pebbe\/zmq4\"\n\n\t\"flag\"\n\t\"fmt\"\n)\n\n\/\/ workerPort holds the next available port to start up a worker.\nvar workerPort int\n\nfunc worker(b []byte, filename string) error {\n\terr := storeScript(b, filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = startWorker(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc startWorker(filename string) error {\n\tcmd := exec.Command(\"python\", \"worker\/worker.py\", strconv.Itoa(workerPort),\n\t\tfilename)\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ storeScript stores the r script from *b* into filename\nfunc storeScript(b []byte, filename string) error {\n\terr := utils.CreateDirectories(filename)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tfile, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\t_, err = file.Write(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc killAllWorkers() error {\n\tcmd := exec.Command(\"killall\", \"Python\")\n\treturn cmd.Start()\n}\n\nfunc errorResponse(msg string) utils.ComputeResponse {\n\treturn utils.ComputeResponse{-1, msg}\n}\nfunc successResponse(msg string) utils.ComputeResponse {\n\treturn utils.ComputeResponse{1, msg}\n}\n\n\/\/ Compute is the component of Kvik responsible for starting\/stopping workers\n\/\/ that perform the statistical analyses. It exposes a http rest interface to\n\/\/ the outside world.\nfunc main() {\n\n\tvar ip = flag.String(\"ip\", \"*\", \"ip to run on\")\n\tvar port = flag.String(\"port\", \":8888\", \"port to run on\")\n\n\tflag.Parse()\n\n\tresponder, _ := zmq.NewSocket(zmq.REP)\n\tdefer responder.Close()\n\n\taddr := \"tcp:\/\/\" + *ip + *port\n\n\terr := responder.Bind(addr)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\t\/\/ ID to identify client\n\tid := 0\n\n\tworkerPort = 5000\n\n\tfor {\n\t\tmsg, err := responder.Recv(0)\n\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(\"Received \", msg)\n\n\t\tcmd := new(utils.Command)\n\t\terr = json.Unmarshal([]byte(msg), cmd)\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\n\t\tvar Response utils.ComputeResponse\n\n\t\tswitch {\n\t\tcase cmd.Command == \"stop\":\n\t\t\tbreak\n\n\t\tcase cmd.Command == \"startWorker\":\n\t\t\tpath := \"scripts\/\" + strconv.Itoa(id) + \"\/script.r\"\n\t\t\terr = worker(cmd.File, path)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tResponse = errorResponse(\"Could not start worker\")\n\t\t\t} else {\n\t\t\t\tResponse = successResponse(strconv.Itoa(workerPort))\n\t\t\t}\n\n\t\tcase cmd.Command == \"killall\":\n\t\t\terr = killAllWorkers()\n\t\t\tif err != nil {\n\t\t\t\tResponse = errorResponse(\"Could not kill all workers\")\n\t\t\t} else {\n\t\t\t\tResponse = successResponse(\"Killed off all workers\")\n\t\t\t}\n\t\t}\n\n\t\tresp, _ := json.Marshal(Response)\n\t\tresponder.Send(string(resp), 0)\n\t\tid += 1\n\t\tworkerPort += 1\n\t}\n}\n<commit_msg>now starts a webserver to host images<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strconv\"\n\n\t\"github.com\/fjukstad\/kvik\/utils\"\n\tzmq \"github.com\/pebbe\/zmq4\"\n\n\t\"flag\"\n\t\"fmt\"\n)\n\n\/\/ workerPort holds the next available port to start up a worker.\nvar workerPort int\n\nfunc worker(b []byte, filename string) error {\n\terr := storeScript(b, filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = startWorker(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tworkerPort += 1\n\n\tpath := path.Dir(filename) + \"\/images\"\n\terr = startWebServer(path)\n\n\treturn err\n}\n\nfunc startWebServer(path string) error {\n\tp := strconv.Itoa(workerPort)\n\tport := \":\" + p\n\tworkerPort += 1\n\tfmt.Println(\"Starting web server at\", port, path)\n\terr := http.ListenAndServe(port, http.FileServer(http.Dir(path)))\n\treturn err\n}\n\nfunc startWorker(filename string) error {\n\tcmd := exec.Command(\"python\", \"worker\/worker.py\", strconv.Itoa(workerPort),\n\t\tfilename)\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ storeScript stores the r script from *b* into filename\nfunc storeScript(b []byte, filename string) error {\n\terr := utils.CreateDirectories(filename)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tfile, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\t_, err = file.Write(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc killAllWorkers() error {\n\tcmd := exec.Command(\"killall\", \"Python\")\n\treturn cmd.Start()\n}\n\nfunc errorResponse(msg string) utils.ComputeResponse {\n\treturn utils.ComputeResponse{-1, msg}\n}\nfunc successResponse(msg string) utils.ComputeResponse {\n\treturn utils.ComputeResponse{1, msg}\n}\n\n\/\/ Compute is the component of Kvik responsible for starting\/stopping workers\n\/\/ that perform the statistical analyses. It exposes a http rest interface to\n\/\/ the outside world.\nfunc main() {\n\n\tvar ip = flag.String(\"ip\", \"*\", \"ip to run on\")\n\tvar port = flag.String(\"port\", \":8888\", \"port to run on\")\n\n\tflag.Parse()\n\n\tresponder, _ := zmq.NewSocket(zmq.REP)\n\tdefer responder.Close()\n\n\taddr := \"tcp:\/\/\" + *ip + *port\n\n\terr := responder.Bind(addr)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\t\/\/ ID to identify client\n\tid := 0\n\n\tworkerPort = 5000\n\n\tfor {\n\t\tmsg, err := responder.Recv(0)\n\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(\"Received \", msg)\n\n\t\tcmd := new(utils.Command)\n\t\terr = json.Unmarshal([]byte(msg), cmd)\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\n\t\tvar Response utils.ComputeResponse\n\n\t\tswitch {\n\t\tcase cmd.Command == \"stop\":\n\t\t\tbreak\n\n\t\tcase cmd.Command == \"startWorker\":\n\t\t\tpath := \"scripts\/\" + strconv.Itoa(id) + \"\/script.r\"\n\t\t\terr = worker(cmd.File, path)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tResponse = errorResponse(\"Could not start worker\")\n\t\t\t} else {\n\t\t\t\tResponse = successResponse(strconv.Itoa(workerPort))\n\t\t\t}\n\n\t\tcase cmd.Command == \"killall\":\n\t\t\terr = killAllWorkers()\n\t\t\tif err != nil {\n\t\t\t\tResponse = errorResponse(\"Could not kill all workers\")\n\t\t\t} else {\n\t\t\t\tResponse = successResponse(\"Killed off all workers\")\n\t\t\t}\n\t\t}\n\n\t\tresp, _ := json.Marshal(Response)\n\t\tresponder.Send(string(resp), 0)\n\t\tid += 1\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The go-github AUTHORS. All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage github\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n)\n\n\/\/ Deployment represents a deployment in a repo\ntype Deployment struct {\n\tUrl *string `json:\"url,omitempty\"`\n\tID *int `json:\"id,omitempty\"`\n\tSHA *string `json:\"sha,omitempty\"`\n\tRef *string `json:\"ref,omitempty\"`\n\tTask *string `json:\"task,omitempty\"`\n\tPayload json.RawMessage `json:\"payload,omitempty\"`\n\tEnvironment *string `json:\"environment,omitempty\"`\n\tDescription *string `json:\"description,omitempty\"`\n\tCreator *User `json:\"creator,omitempty\"`\n\tCreatedAt *Timestamp `json:\"created_at,omitempty\"`\n\tUpdatedAt *Timestamp `json:\"pushed_at,omitempty\"`\n}\n\n\/\/ DeploymentRequest represents a deployment request\ntype DeploymentRequest struct {\n\tRef *string `json:\"ref,omitempty\"`\n\tTask *string `json:\"task,omitempty\"`\n\tAutoMerge *bool `json:\"auto_merge,omitempty\"`\n\tRequiredContexts []string `json:\"required_contexts,omitempty\"`\n\tPayload *string `json:\"payload,omitempty\"`\n\tEnvironment *string `json:\"environment,omitempty\"`\n\tDescription *string `json:\"description,omitempty\"`\n}\n\n\/\/ DeploymentsListOptions specifies the optional parameters to the\n\/\/ RepositoriesService.ListDeployments method.\ntype DeploymentsListOptions struct {\n\t\/\/ SHA of the Deployment.\n\tSHA string `url:\"sha,omitempty\"`\n\n\t\/\/ List deployments for a given ref.\n\tRef string `url:\"ref,omitempty\"`\n\n\t\/\/ List deployments for a given task.\n\tTask string `url:\"task,omitempty\"`\n\n\t\/\/ List deployments for a given environment.\n\tEnvironment string `url:\"environment,omitempty\"`\n\n\tListOptions\n}\n\n\/\/ ListDeployments lists the deployments of a repository.\n\/\/\n\/\/ GitHub API docs: https:\/\/developer.github.com\/v3\/repos\/deployments\/#list-deployments\nfunc (s *RepositoriesService) ListDeployments(owner, repo string, opt *DeploymentsListOptions) ([]Deployment, *Response, error) {\n\tu := fmt.Sprintf(\"repos\/%v\/%v\/deployments\", owner, repo)\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ TODO: remove custom Accept header when this API fully launches\n\treq.Header.Set(\"Accept\", mediaTypeDeploymentPreview)\n\n\tdeployments := new([]Deployment)\n\tresp, err := s.client.Do(req, deployments)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn *deployments, resp, err\n}\n\n\/\/ CreateDeployment creates a new deployment for a repository.\n\/\/\n\/\/ GitHub API docs: https:\/\/developer.github.com\/v3\/repos\/deployments\/#create-a-deployment\nfunc (s *RepositoriesService) CreateDeployment(owner, repo string, request *DeploymentRequest) (*Deployment, *Response, error) {\n\tu := fmt.Sprintf(\"repos\/%v\/%v\/deployments\", owner, repo)\n\n\treq, err := s.client.NewRequest(\"POST\", u, request)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ TODO: remove custom Accept header when this API fully launches\n\treq.Header.Set(\"Accept\", mediaTypeDeploymentPreview)\n\n\td := new(Deployment)\n\tresp, err := s.client.Do(req, d)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn d, resp, err\n}\n\n\/\/ DeploymentStatus represents the status of a\n\/\/ particular deployment.\ntype DeploymentStatus struct {\n\tID *int `json:\"id,omitempty\"`\n\tState *string `json:\"state,omitempty\"`\n\tCreator *User `json:\"creator,omitempty\"`\n\tDescription *string `json:\"description,omitempty\"`\n\tTargetUrl *string `json:\"target_url,omitempty\"`\n\tCreatedAt *Timestamp `json:\"created_at,omitempty\"`\n\tUpdatedAt *Timestamp `json:\"pushed_at,omitempty\"`\n}\n\n\/\/ DeploymentRequest represents a deployment request\ntype DeploymentStatusRequest struct {\n\tState *string `json:\"state,omitempty\"`\n\tTargetUrl *string `json:\"target_url,omitempty\"`\n\tDescription *string `json:\"description,omitempty\"`\n}\n\n\/\/ ListDeploymentStatuses lists the statuses of a given deployment of a repository.\n\/\/\n\/\/ GitHub API docs: https:\/\/developer.github.com\/v3\/repos\/deployments\/#list-deployment-statuses\nfunc (s *RepositoriesService) ListDeploymentStatuses(owner, repo string, deployment int, opt *ListOptions) ([]DeploymentStatus, *Response, error) {\n\tu := fmt.Sprintf(\"repos\/%v\/%v\/deployments\/%v\/statuses\", owner, repo, deployment)\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ TODO: remove custom Accept header when this API fully launches\n\treq.Header.Set(\"Accept\", mediaTypeDeploymentPreview)\n\n\tstatuses := new([]DeploymentStatus)\n\tresp, err := s.client.Do(req, statuses)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn *statuses, resp, err\n}\n\n\/\/ CreateDeploymentStatus creates a new status for a deployment.\n\/\/\n\/\/ GitHub API docs: https:\/\/developer.github.com\/v3\/repos\/deployments\/#create-a-deployment-status\nfunc (s *RepositoriesService) CreateDeploymentStatus(owner, repo string, deployment int, request *DeploymentStatusRequest) (*DeploymentStatus, *Response, error) {\n\tu := fmt.Sprintf(\"repos\/%v\/%v\/deployments\/%v\/statuses\", owner, repo, deployment)\n\n\treq, err := s.client.NewRequest(\"POST\", u, request)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ TODO: remove custom Accept header when this API fully launches\n\treq.Header.Set(\"Accept\", mediaTypeDeploymentPreview)\n\n\td := new(DeploymentStatus)\n\tresp, err := s.client.Do(req, d)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn d, resp, err\n}\n<commit_msg>fix minor golint issues<commit_after>\/\/ Copyright 2014 The go-github AUTHORS. All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage github\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n)\n\n\/\/ Deployment represents a deployment in a repo\ntype Deployment struct {\n\tURL *string `json:\"url,omitempty\"`\n\tID *int `json:\"id,omitempty\"`\n\tSHA *string `json:\"sha,omitempty\"`\n\tRef *string `json:\"ref,omitempty\"`\n\tTask *string `json:\"task,omitempty\"`\n\tPayload json.RawMessage `json:\"payload,omitempty\"`\n\tEnvironment *string `json:\"environment,omitempty\"`\n\tDescription *string `json:\"description,omitempty\"`\n\tCreator *User `json:\"creator,omitempty\"`\n\tCreatedAt *Timestamp `json:\"created_at,omitempty\"`\n\tUpdatedAt *Timestamp `json:\"pushed_at,omitempty\"`\n}\n\n\/\/ DeploymentRequest represents a deployment request\ntype DeploymentRequest struct {\n\tRef *string `json:\"ref,omitempty\"`\n\tTask *string `json:\"task,omitempty\"`\n\tAutoMerge *bool `json:\"auto_merge,omitempty\"`\n\tRequiredContexts []string `json:\"required_contexts,omitempty\"`\n\tPayload *string `json:\"payload,omitempty\"`\n\tEnvironment *string `json:\"environment,omitempty\"`\n\tDescription *string `json:\"description,omitempty\"`\n}\n\n\/\/ DeploymentsListOptions specifies the optional parameters to the\n\/\/ RepositoriesService.ListDeployments method.\ntype DeploymentsListOptions struct {\n\t\/\/ SHA of the Deployment.\n\tSHA string `url:\"sha,omitempty\"`\n\n\t\/\/ List deployments for a given ref.\n\tRef string `url:\"ref,omitempty\"`\n\n\t\/\/ List deployments for a given task.\n\tTask string `url:\"task,omitempty\"`\n\n\t\/\/ List deployments for a given environment.\n\tEnvironment string `url:\"environment,omitempty\"`\n\n\tListOptions\n}\n\n\/\/ ListDeployments lists the deployments of a repository.\n\/\/\n\/\/ GitHub API docs: https:\/\/developer.github.com\/v3\/repos\/deployments\/#list-deployments\nfunc (s *RepositoriesService) ListDeployments(owner, repo string, opt *DeploymentsListOptions) ([]Deployment, *Response, error) {\n\tu := fmt.Sprintf(\"repos\/%v\/%v\/deployments\", owner, repo)\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ TODO: remove custom Accept header when this API fully launches\n\treq.Header.Set(\"Accept\", mediaTypeDeploymentPreview)\n\n\tdeployments := new([]Deployment)\n\tresp, err := s.client.Do(req, deployments)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn *deployments, resp, err\n}\n\n\/\/ CreateDeployment creates a new deployment for a repository.\n\/\/\n\/\/ GitHub API docs: https:\/\/developer.github.com\/v3\/repos\/deployments\/#create-a-deployment\nfunc (s *RepositoriesService) CreateDeployment(owner, repo string, request *DeploymentRequest) (*Deployment, *Response, error) {\n\tu := fmt.Sprintf(\"repos\/%v\/%v\/deployments\", owner, repo)\n\n\treq, err := s.client.NewRequest(\"POST\", u, request)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ TODO: remove custom Accept header when this API fully launches\n\treq.Header.Set(\"Accept\", mediaTypeDeploymentPreview)\n\n\td := new(Deployment)\n\tresp, err := s.client.Do(req, d)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn d, resp, err\n}\n\n\/\/ DeploymentStatus represents the status of a\n\/\/ particular deployment.\ntype DeploymentStatus struct {\n\tID *int `json:\"id,omitempty\"`\n\tState *string `json:\"state,omitempty\"`\n\tCreator *User `json:\"creator,omitempty\"`\n\tDescription *string `json:\"description,omitempty\"`\n\tTargetURL *string `json:\"target_url,omitempty\"`\n\tCreatedAt *Timestamp `json:\"created_at,omitempty\"`\n\tUpdatedAt *Timestamp `json:\"pushed_at,omitempty\"`\n}\n\n\/\/ DeploymentStatusRequest represents a deployment request\ntype DeploymentStatusRequest struct {\n\tState *string `json:\"state,omitempty\"`\n\tTargetURL *string `json:\"target_url,omitempty\"`\n\tDescription *string `json:\"description,omitempty\"`\n}\n\n\/\/ ListDeploymentStatuses lists the statuses of a given deployment of a repository.\n\/\/\n\/\/ GitHub API docs: https:\/\/developer.github.com\/v3\/repos\/deployments\/#list-deployment-statuses\nfunc (s *RepositoriesService) ListDeploymentStatuses(owner, repo string, deployment int, opt *ListOptions) ([]DeploymentStatus, *Response, error) {\n\tu := fmt.Sprintf(\"repos\/%v\/%v\/deployments\/%v\/statuses\", owner, repo, deployment)\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ TODO: remove custom Accept header when this API fully launches\n\treq.Header.Set(\"Accept\", mediaTypeDeploymentPreview)\n\n\tstatuses := new([]DeploymentStatus)\n\tresp, err := s.client.Do(req, statuses)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn *statuses, resp, err\n}\n\n\/\/ CreateDeploymentStatus creates a new status for a deployment.\n\/\/\n\/\/ GitHub API docs: https:\/\/developer.github.com\/v3\/repos\/deployments\/#create-a-deployment-status\nfunc (s *RepositoriesService) CreateDeploymentStatus(owner, repo string, deployment int, request *DeploymentStatusRequest) (*DeploymentStatus, *Response, error) {\n\tu := fmt.Sprintf(\"repos\/%v\/%v\/deployments\/%v\/statuses\", owner, repo, deployment)\n\n\treq, err := s.client.NewRequest(\"POST\", u, request)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ TODO: remove custom Accept header when this API fully launches\n\treq.Header.Set(\"Accept\", mediaTypeDeploymentPreview)\n\n\td := new(DeploymentStatus)\n\tresp, err := s.client.Do(req, d)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn d, resp, err\n}\n<|endoftext|>"} {"text":"<commit_before>package routes\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/generationtux\/brizo\/app\/handlers\/web\"\n\t\"github.com\/go-zoo\/bone\"\n)\n\n\/\/ BuildRouter configures the application router\nfunc BuildRouter() *bone.Mux {\n\tr := mainRoutes()\n\tr.SubRoute(\"\/api\", apiRoutes())\n\n\treturn r\n}\n\n\/\/ mainRoutes registers the routes for the user interface\nfunc mainRoutes() *bone.Mux {\n\trouter := bone.New()\n\n\t\/\/ Home page\n\trouter.GetFunc(\"\/\", web.RootHandler)\n\n\t\/\/ Javascript UI\n\trouter.GetFunc(\"\/app\", web.UIHandler)\n\trouter.GetFunc(\"\/app\/*\", web.UIHandler)\n\trouter.Get(\"\/assets\/\", http.StripPrefix(\"\/assets\/\", http.FileServer(http.Dir(\"ui\/dist\/\"))))\n\n\t\/\/ Healthz endpoint\n\trouter.GetFunc(\"\/healthz\", web.HealthzHandler)\n\n\t\/\/ OAuth endpoints\n\trouter.GetFunc(\"\/o\/auth\/login\/github\", web.AuthGithubHandler)\n\trouter.GetFunc(\"\/o\/auth\/callback\/github\", web.AuthGithubCallbackHandler)\n\n\t\/\/ @todo move to JS UI\n\trouter.GetFunc(\"\/users\", web.AuthAddNewUser)\n\trouter.GetFunc(\"\/login\", web.AuthMainHandler)\n\n\treturn router\n}\n<commit_msg>v1 prefix<commit_after>package routes\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/generationtux\/brizo\/app\/handlers\/web\"\n\t\"github.com\/go-zoo\/bone\"\n)\n\n\/\/ BuildRouter configures the application router\nfunc BuildRouter() *bone.Mux {\n\tr := mainRoutes()\n\tr.SubRoute(\"\/api\/v1\", apiRoutes())\n\n\treturn r\n}\n\n\/\/ mainRoutes registers the routes for the user interface\nfunc mainRoutes() *bone.Mux {\n\trouter := bone.New()\n\n\t\/\/ Home page\n\trouter.GetFunc(\"\/\", web.RootHandler)\n\n\t\/\/ Javascript UI\n\trouter.GetFunc(\"\/app\", web.UIHandler)\n\trouter.GetFunc(\"\/app\/*\", web.UIHandler)\n\trouter.Get(\"\/assets\/\", http.StripPrefix(\"\/assets\/\", http.FileServer(http.Dir(\"ui\/dist\/\"))))\n\n\t\/\/ Healthz endpoint\n\trouter.GetFunc(\"\/healthz\", web.HealthzHandler)\n\n\t\/\/ OAuth endpoints\n\trouter.GetFunc(\"\/o\/auth\/login\/github\", web.AuthGithubHandler)\n\trouter.GetFunc(\"\/o\/auth\/callback\/github\", web.AuthGithubCallbackHandler)\n\n\t\/\/ @todo move to JS UI\n\trouter.GetFunc(\"\/users\", web.AuthAddNewUser)\n\trouter.GetFunc(\"\/login\", web.AuthMainHandler)\n\n\treturn router\n}\n<|endoftext|>"} {"text":"<commit_before>package chat\n\nimport (\n\t\"crypto\/rand\"\n\t\"errors\"\n\t\"io\"\n\n\t\"github.com\/agl\/ed25519\"\n\t\"github.com\/keybase\/client\/go\/chat\/signencrypt\"\n\t\"github.com\/keybase\/client\/go\/libkb\"\n)\n\ntype Encrypter interface {\n\t\/\/ EncryptedLen returns the number of bytes that the ciphertext of\n\t\/\/ size plaintext bytes will be.\n\tEncryptedLen(size int) int\n\n\t\/\/ Encrypt takes a plaintext reader and returns a ciphertext reader.\n\tEncrypt(plaintext io.Reader) (ciphertext io.Reader)\n\n\t\/\/ EncryptKey returns the ephemeral key that was used during Encrypt.\n\tEncryptKey() []byte\n\n\t\/\/ VerifyKey returns the public portion of the signing key that\n\t\/\/ can be used for signature verification.\n\tVerifyKey() []byte\n}\n\ntype Decrypter interface {\n\t\/\/ Decrypt takes a ciphertext reader, encryption and verify keys.\n\t\/\/ It returns a plaintext reader.\n\tDecrypt(ciphertext io.Reader, encKey, verifyKey []byte) (plaintext io.Reader)\n}\n\nvar nonce = &[signencrypt.NonceSize]byte{\n\t0x10, 0x10, 0x10, 0x10, 0x20, 0x20, 0x20, 0x20,\n\t0x30, 0x30, 0x30, 0x30, 0x40, 0x40, 0x40, 0x40,\n}\n\ntype SignEncrypter struct {\n\tencKey signencrypt.SecretboxKey\n\tsignKey signencrypt.SignKey\n\tverifyKey signencrypt.VerifyKey\n}\n\nfunc NewSignEncrypter() (*SignEncrypter, error) {\n\tvar encKey [signencrypt.SecretboxKeySize]byte\n\tn, err := rand.Read(encKey[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n != signencrypt.SecretboxKeySize {\n\t\treturn nil, errors.New(\"failed to rand.Read the correct number of bytes\")\n\t}\n\n\tsign, err := libkb.GenerateNaclSigningKeyPair()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar signKey [ed25519.PrivateKeySize]byte\n\tcopy(signKey[:], (*sign.Private)[:])\n\tvar verifyKey [ed25519.PublicKeySize]byte\n\tcopy(verifyKey[:], sign.Public[:])\n\n\tres := SignEncrypter{\n\t\tencKey: &encKey,\n\t\tsignKey: &signKey,\n\t\tverifyKey: &verifyKey,\n\t}\n\n\treturn &res, nil\n}\n\nfunc (s *SignEncrypter) EncryptedLen(size int) int {\n\treturn signencrypt.GetSealedSize(size)\n}\n\nfunc (s *SignEncrypter) Encrypt(r io.Reader) io.Reader {\n\treturn signencrypt.NewEncodingReader(s.encKey, s.signKey, nonce, r)\n}\n\nfunc (s *SignEncrypter) EncryptKey() []byte {\n\treturn []byte((*s.encKey)[:])\n}\n\nfunc (s *SignEncrypter) VerifyKey() []byte {\n\treturn []byte((*s.verifyKey)[:])\n}\n\ntype SignDecrypter struct{}\n\nfunc NewSignDecrypter() *SignDecrypter {\n\treturn &SignDecrypter{}\n}\n\nfunc (s *SignDecrypter) Decrypt(r io.Reader, encKey, verifyKey []byte) io.Reader {\n\tvar xencKey [signencrypt.SecretboxKeySize]byte\n\tcopy(xencKey[:], encKey)\n\tvar xverifyKey [ed25519.PublicKeySize]byte\n\tcopy(xverifyKey[:], verifyKey)\n\treturn signencrypt.NewDecodingReader(&xencKey, &xverifyKey, nonce, r)\n}\n<commit_msg>Change nonce<commit_after>package chat\n\nimport (\n\t\"crypto\/rand\"\n\t\"errors\"\n\t\"io\"\n\n\t\"github.com\/agl\/ed25519\"\n\t\"github.com\/keybase\/client\/go\/chat\/signencrypt\"\n\t\"github.com\/keybase\/client\/go\/libkb\"\n)\n\ntype Encrypter interface {\n\t\/\/ EncryptedLen returns the number of bytes that the ciphertext of\n\t\/\/ size plaintext bytes will be.\n\tEncryptedLen(size int) int\n\n\t\/\/ Encrypt takes a plaintext reader and returns a ciphertext reader.\n\tEncrypt(plaintext io.Reader) (ciphertext io.Reader)\n\n\t\/\/ EncryptKey returns the ephemeral key that was used during Encrypt.\n\tEncryptKey() []byte\n\n\t\/\/ VerifyKey returns the public portion of the signing key that\n\t\/\/ can be used for signature verification.\n\tVerifyKey() []byte\n}\n\ntype Decrypter interface {\n\t\/\/ Decrypt takes a ciphertext reader, encryption and verify keys.\n\t\/\/ It returns a plaintext reader.\n\tDecrypt(ciphertext io.Reader, encKey, verifyKey []byte) (plaintext io.Reader)\n}\n\nvar nonce signencrypt.Nonce\n\nfunc init() {\n\tvar n [signencrypt.NonceSize]byte\n\tcopy(n[:], \"kbchatattachment\")\n\tnonce = &n\n}\n\ntype SignEncrypter struct {\n\tencKey signencrypt.SecretboxKey\n\tsignKey signencrypt.SignKey\n\tverifyKey signencrypt.VerifyKey\n}\n\nfunc NewSignEncrypter() (*SignEncrypter, error) {\n\tvar encKey [signencrypt.SecretboxKeySize]byte\n\tn, err := rand.Read(encKey[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n != signencrypt.SecretboxKeySize {\n\t\treturn nil, errors.New(\"failed to rand.Read the correct number of bytes\")\n\t}\n\n\tsign, err := libkb.GenerateNaclSigningKeyPair()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar signKey [ed25519.PrivateKeySize]byte\n\tcopy(signKey[:], (*sign.Private)[:])\n\tvar verifyKey [ed25519.PublicKeySize]byte\n\tcopy(verifyKey[:], sign.Public[:])\n\n\tres := SignEncrypter{\n\t\tencKey: &encKey,\n\t\tsignKey: &signKey,\n\t\tverifyKey: &verifyKey,\n\t}\n\n\treturn &res, nil\n}\n\nfunc (s *SignEncrypter) EncryptedLen(size int) int {\n\treturn signencrypt.GetSealedSize(size)\n}\n\nfunc (s *SignEncrypter) Encrypt(r io.Reader) io.Reader {\n\treturn signencrypt.NewEncodingReader(s.encKey, s.signKey, nonce, r)\n}\n\nfunc (s *SignEncrypter) EncryptKey() []byte {\n\treturn []byte((*s.encKey)[:])\n}\n\nfunc (s *SignEncrypter) VerifyKey() []byte {\n\treturn []byte((*s.verifyKey)[:])\n}\n\ntype SignDecrypter struct{}\n\nfunc NewSignDecrypter() *SignDecrypter {\n\treturn &SignDecrypter{}\n}\n\nfunc (s *SignDecrypter) Decrypt(r io.Reader, encKey, verifyKey []byte) io.Reader {\n\tvar xencKey [signencrypt.SecretboxKeySize]byte\n\tcopy(xencKey[:], encKey)\n\tvar xverifyKey [ed25519.PublicKeySize]byte\n\tcopy(xverifyKey[:], verifyKey)\n\treturn signencrypt.NewDecodingReader(&xencKey, &xverifyKey, nonce, r)\n}\n<|endoftext|>"} {"text":"<commit_before>package client\n\nimport (\n\t\"fmt\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/keybase\/cli\"\n\t\"github.com\/keybase\/client\/go\/libcmdline\"\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\trpc \"github.com\/keybase\/go-framed-msgpack-rpc\"\n)\n\n\/\/ CmdDeviceAdd is the 'device add' command. It is used for\n\/\/ device provisioning on the provisioner\/device X\/C1.\ntype CmdDeviceAdd struct {\n\tlibkb.Contextified\n}\n\nconst cmdDevAddDesc = `When you are adding a new device to your account and you have an \nexisting device, you will be prompted to use this command on your\nexisting device to authorize the new device.`\n\n\/\/ NewCmdDeviceAdd creates a new cli.Command.\nfunc NewCmdDeviceAdd(cl *libcmdline.CommandLine, g *libkb.GlobalContext) cli.Command {\n\treturn cli.Command{\n\t\tName: \"add\",\n\t\tUsage: \"Authorize a new device\",\n\t\tDescription: cmdDevAddDesc,\n\t\tAction: func(c *cli.Context) {\n\t\t\tcl.ChooseCommand(&CmdDeviceAdd{Contextified: libkb.NewContextified(g)}, \"add\", c)\n\t\t},\n\t}\n}\n\n\/\/ RunClient runs the command in client\/server mode.\nfunc (c *CmdDeviceAdd) Run() error {\n\tvar err error\n\tcli, err := GetDeviceClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tprotocols := []rpc.Protocol{\n\t\tNewProvisionUIProtocol(c.G(), libkb.KexRoleProvisioner),\n\t\tNewSecretUIProtocol(c.G()),\n\t}\n\tif err := RegisterProtocols(protocols); err != nil {\n\t\treturn err\n\t}\n\n\treturn cli.DeviceXAdd(context.TODO(), 0)\n}\n\n\/\/ ParseArgv gets the secret phrase from the command args.\nfunc (c *CmdDeviceAdd) ParseArgv(ctx *cli.Context) error {\n\tif len(ctx.Args()) != 0 {\n\t\treturn fmt.Errorf(\"device xadd takes zero arguments\")\n\t}\n\treturn nil\n}\n\n\/\/ GetUsage says what this command needs to operate.\nfunc (c *CmdDeviceAdd) GetUsage() libkb.Usage {\n\treturn libkb.Usage{\n\t\tConfig: true,\n\t\tKbKeyring: true,\n\t\tAPI: true,\n\t}\n}\n<commit_msg>Fix xadd -> add in error<commit_after>package client\n\nimport (\n\t\"fmt\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/keybase\/cli\"\n\t\"github.com\/keybase\/client\/go\/libcmdline\"\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\trpc \"github.com\/keybase\/go-framed-msgpack-rpc\"\n)\n\n\/\/ CmdDeviceAdd is the 'device add' command. It is used for\n\/\/ device provisioning on the provisioner\/device X\/C1.\ntype CmdDeviceAdd struct {\n\tlibkb.Contextified\n}\n\nconst cmdDevAddDesc = `When you are adding a new device to your account and you have an \nexisting device, you will be prompted to use this command on your\nexisting device to authorize the new device.`\n\n\/\/ NewCmdDeviceAdd creates a new cli.Command.\nfunc NewCmdDeviceAdd(cl *libcmdline.CommandLine, g *libkb.GlobalContext) cli.Command {\n\treturn cli.Command{\n\t\tName: \"add\",\n\t\tUsage: \"Authorize a new device\",\n\t\tDescription: cmdDevAddDesc,\n\t\tAction: func(c *cli.Context) {\n\t\t\tcl.ChooseCommand(&CmdDeviceAdd{Contextified: libkb.NewContextified(g)}, \"add\", c)\n\t\t},\n\t}\n}\n\n\/\/ RunClient runs the command in client\/server mode.\nfunc (c *CmdDeviceAdd) Run() error {\n\tvar err error\n\tcli, err := GetDeviceClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tprotocols := []rpc.Protocol{\n\t\tNewProvisionUIProtocol(c.G(), libkb.KexRoleProvisioner),\n\t\tNewSecretUIProtocol(c.G()),\n\t}\n\tif err := RegisterProtocols(protocols); err != nil {\n\t\treturn err\n\t}\n\n\treturn cli.DeviceXAdd(context.TODO(), 0)\n}\n\n\/\/ ParseArgv gets the secret phrase from the command args.\nfunc (c *CmdDeviceAdd) ParseArgv(ctx *cli.Context) error {\n\tif len(ctx.Args()) != 0 {\n\t\treturn fmt.Errorf(\"device add takes zero arguments\")\n\t}\n\treturn nil\n}\n\n\/\/ GetUsage says what this command needs to operate.\nfunc (c *CmdDeviceAdd) GetUsage() libkb.Usage {\n\treturn libkb.Usage{\n\t\tConfig: true,\n\t\tKbKeyring: true,\n\t\tAPI: true,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/olekukonko\/tablewriter\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/concurrency\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/logutil\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/sqlparser\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/vitessdriver\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/vterrors\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/vtgate\/vtgateconn\"\n\n\tvtrpcpb \"github.com\/youtube\/vitess\/go\/vt\/proto\/vtrpc\"\n)\n\nvar (\n\tusage = `\nvtclient connects to a vtgate server using the standard go driver API.\nVersion 3 of the API is used, we do not send any hint to the server.\n\nFor query bound variables, we assume place-holders in the query string\nin the form of :v1, :v2, etc.\n\nExamples:\n\n $ vtclient -server vtgate:15991 \"SELECT * FROM messages\"\n\n $ vtclient -server vtgate:15991 -target '@master' -bind_variables '[ 12345, 1, \"msg 12345\" ]' \"INSERT INTO messages (page,time_created_ns,message) VALUES (:v1, :v2, :v3)\"\n\n`\n\tserver = flag.String(\"server\", \"\", \"vtgate server to connect to\")\n\ttimeout = flag.Duration(\"timeout\", 30*time.Second, \"timeout for queries\")\n\tstreaming = flag.Bool(\"streaming\", false, \"use a streaming query\")\n\tbindVariables = newBindvars(\"bind_variables\", \"bind variables as a json list\")\n\ttargetString = flag.String(\"target\", \"\", \"keyspace:shard@tablet_type\")\n\tjsonOutput = flag.Bool(\"json\", false, \"Output JSON instead of human-readable table\")\n\tparallel = flag.Int(\"parallel\", 1, \"DMLs only: Number of threads executing the same query in parallel. Useful for simple load testing.\")\n\tcount = flag.Int(\"count\", 1, \"DMLs only: Number of times each thread executes the query. Useful for simple, sustained load testing.\")\n)\n\nfunc init() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage of %s:\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t\tfmt.Fprintf(os.Stderr, usage)\n\t}\n}\n\ntype bindvars []interface{}\n\nfunc (bv *bindvars) String() string {\n\tb, err := json.Marshal(bv)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\treturn string(b)\n}\n\nfunc (bv *bindvars) Set(s string) (err error) {\n\terr = json.Unmarshal([]byte(s), &bv)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ json reads all numbers as float64\n\t\/\/ So, we just ditch floats for bindvars\n\tfor i, v := range *bv {\n\t\tif f, ok := v.(float64); ok {\n\t\t\tif f > 0 {\n\t\t\t\t(*bv)[i] = uint64(f)\n\t\t\t} else {\n\t\t\t\t(*bv)[i] = int64(f)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ For internal flag compatibility\nfunc (bv *bindvars) Get() interface{} {\n\treturn bv\n}\n\nfunc newBindvars(name, usage string) *bindvars {\n\tvar bv bindvars\n\tflag.Var(&bv, name, usage)\n\treturn &bv\n}\n\nfunc main() {\n\tdefer logutil.Flush()\n\n\tqr, err := run()\n\tif *jsonOutput && qr != nil {\n\t\tdata, err := json.MarshalIndent(qr, \"\", \" \")\n\t\tif err != nil {\n\t\t\tlog.Exitf(\"cannot marshal data: %v\", err)\n\t\t}\n\t\tfmt.Print(string(data))\n\t\treturn\n\t}\n\n\tqr.print()\n\n\tif err != nil {\n\t\tlog.Exit(err)\n\t}\n}\n\nfunc run() (*results, error) {\n\tflag.Parse()\n\targs := flag.Args()\n\n\tif len(args) == 0 {\n\t\tflag.Usage()\n\t\treturn nil, errors.New(\"no arguments provided. See usage above\")\n\t}\n\tif len(args) > 1 {\n\t\treturn nil, errors.New(\"no additional arguments after the query allowed\")\n\t}\n\n\tc := vitessdriver.Configuration{\n\t\tProtocol: *vtgateconn.VtgateProtocol,\n\t\tAddress: *server,\n\t\tTarget: *targetString,\n\t\tTimeout: *timeout,\n\t\tStreaming: *streaming,\n\t}\n\tdb, err := vitessdriver.OpenWithConfiguration(c)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"client error: %v\", err)\n\t}\n\n\tlog.Infof(\"Sending the query...\")\n\n\treturn execMulti(db, args[0])\n}\n\nfunc execMulti(db *sql.DB, sql string) (*results, error) {\n\tall := newResults()\n\tec := concurrency.FirstErrorRecorder{}\n\twg := sync.WaitGroup{}\n\tisDML := sqlparser.IsDML(sql)\n\n\tstart := time.Now()\n\tfor i := 0; i < *parallel; i++ {\n\t\twg.Add(1)\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tfor j := 0; j < *count; j++ {\n\t\t\t\tvar qr *results\n\t\t\t\tvar err error\n\t\t\t\tif isDML {\n\t\t\t\t\tqr, err = execDml(db, sql)\n\t\t\t\t} else {\n\t\t\t\t\tqr, err = execNonDml(db, sql)\n\t\t\t\t}\n\t\t\t\tif *count == 1 {\n\t\t\t\t\tall = qr\n\t\t\t\t} else {\n\t\t\t\t\tall.merge(qr)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tec.RecordError(err)\n\t\t\t\t\tall.recordError(err)\n\t\t\t\t\t\/\/ We keep going and do not return early purpose.\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n\tall.duration = time.Since(start)\n\n\treturn all, ec.Error()\n}\n\nfunc execDml(db *sql.DB, sql string) (*results, error) {\n\tstart := time.Now()\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn nil, vterrors.Wrap(err, \"BEGIN failed\")\n\t}\n\n\tresult, err := tx.Exec(sql, []interface{}(*bindVariables)...)\n\tif err != nil {\n\t\treturn nil, vterrors.Wrap(err, \"failed to execute DML\")\n\t}\n\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn nil, vterrors.Wrap(err, \"COMMIT failed\")\n\t}\n\n\trowsAffected, err := result.RowsAffected()\n\tlastInsertID, err := result.LastInsertId()\n\treturn &results{\n\t\trowsAffected: rowsAffected,\n\t\tlastInsertID: lastInsertID,\n\t\tduration: time.Since(start),\n\t}, nil\n}\n\nfunc execNonDml(db *sql.DB, sql string) (*results, error) {\n\tstart := time.Now()\n\trows, err := db.Query(sql, []interface{}(*bindVariables)...)\n\tif err != nil {\n\t\treturn nil, vterrors.Wrap(err, \"client error\")\n\t}\n\tdefer rows.Close()\n\n\t\/\/ get the headers\n\tvar qr results\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"client error: %v\", err)\n\t}\n\tqr.Fields = cols\n\n\t\/\/ get the rows\n\tfor rows.Next() {\n\t\trow := make([]interface{}, len(cols))\n\t\tfor i := range row {\n\t\t\tvar col string\n\t\t\trow[i] = &col\n\t\t}\n\t\tif err := rows.Scan(row...); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"client error: %v\", err)\n\t\t}\n\n\t\t\/\/ unpack []*string into []string\n\t\tvals := make([]string, 0, len(row))\n\t\tfor _, value := range row {\n\t\t\tvals = append(vals, *(value.(*string)))\n\t\t}\n\t\tqr.Rows = append(qr.Rows, vals)\n\t}\n\tqr.rowsAffected = int64(len(qr.Rows))\n\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, fmt.Errorf(\"Vitess returned an error: %v\", err)\n\t}\n\n\tqr.duration = time.Since(start)\n\treturn &qr, nil\n}\n\ntype results struct {\n\tmu sync.Mutex\n\tFields []string `json:\"fields\"`\n\tRows [][]string `json:\"rows\"`\n\trowsAffected int64\n\tlastInsertID int64\n\tduration time.Duration\n\tcumulativeDuration time.Duration\n\n\t\/\/ Multi DML mode: Track total error count, error count per code and the first error.\n\ttotalErrorCount int\n\terrorCount map[vtrpcpb.Code]int\n\tfirstError map[vtrpcpb.Code]error\n}\n\nfunc newResults() *results {\n\treturn &results{\n\t\terrorCount: make(map[vtrpcpb.Code]int),\n\t\tfirstError: make(map[vtrpcpb.Code]error),\n\t}\n}\n\n\/\/ merge aggregates \"other\" into \"r\".\n\/\/ This is only used for executing DMLs concurrently and repeatedly.\n\/\/ Therefore, \"Fields\" and \"Rows\" are not merged.\nfunc (r *results) merge(other *results) {\n\tif other == nil {\n\t\treturn\n\t}\n\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tr.rowsAffected += other.rowsAffected\n\tif other.lastInsertID > r.lastInsertID {\n\t\tr.lastInsertID = other.lastInsertID\n\t}\n\tr.cumulativeDuration += other.duration\n}\n\nfunc (r *results) recordError(err error) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tr.totalErrorCount++\n\tcode := vterrors.Code(err)\n\tr.errorCount[code]++\n\n\tif r.errorCount[code] == 1 {\n\t\tr.firstError[code] = err\n\t}\n}\n\nfunc (r *results) print() {\n\tif r == nil {\n\t\treturn\n\t}\n\n\ttable := tablewriter.NewWriter(os.Stdout)\n\ttable.SetHeader(r.Fields)\n\ttable.SetAutoFormatHeaders(false)\n\ttable.AppendBulk(r.Rows)\n\ttable.Render()\n\tfmt.Printf(\"%v row(s) affected (%v, cum: %v)\\n\", r.rowsAffected, r.duration, r.cumulativeDuration)\n\tif r.lastInsertID != 0 {\n\t\tfmt.Printf(\"Last insert ID: %v\\n\", r.lastInsertID)\n\t}\n\n\tif r.totalErrorCount == 0 {\n\t\treturn\n\t}\n\n\tfmt.Printf(\"%d error(s) were returned. Number of errors by error code:\\n\\n\", r.totalErrorCount)\n\t\/\/ Sort different error codes by count (descending).\n\ttype errorCounts struct {\n\t\tcode vtrpcpb.Code\n\t\tcount int\n\t}\n\tvar counts []errorCounts\n\tfor code, count := range r.errorCount {\n\t\tcounts = append(counts, errorCounts{code, count})\n\t}\n\tsort.Slice(counts, func(i, j int) bool { return counts[i].count >= counts[j].count })\n\tfor _, c := range counts {\n\t\tfmt.Printf(\"%- 30v= % 5d\\n\", c.code, c.count)\n\t}\n\n\tfmt.Printf(\"\\nFirst error per code:\\n\\n\")\n\tfor code, err := range r.firstError {\n\t\tfmt.Printf(\"Code: %v\\nError: %v\\n\\n\", code, err)\n\t}\n}\n<commit_msg>vtclient: Feature to generate queries with random IDs<commit_after>\/*\nCopyright 2017 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/olekukonko\/tablewriter\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/concurrency\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/logutil\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/sqlparser\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/vitessdriver\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/vterrors\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/vtgate\/vtgateconn\"\n\n\tvtrpcpb \"github.com\/youtube\/vitess\/go\/vt\/proto\/vtrpc\"\n)\n\nvar (\n\tusage = `\nvtclient connects to a vtgate server using the standard go driver API.\nVersion 3 of the API is used, we do not send any hint to the server.\n\nFor query bound variables, we assume place-holders in the query string\nin the form of :v1, :v2, etc.\n\nExamples:\n\n $ vtclient -server vtgate:15991 \"SELECT * FROM messages\"\n\n $ vtclient -server vtgate:15991 -target '@master' -bind_variables '[ 12345, 1, \"msg 12345\" ]' \"INSERT INTO messages (page,time_created_ns,message) VALUES (:v1, :v2, :v3)\"\n\n`\n\tserver = flag.String(\"server\", \"\", \"vtgate server to connect to\")\n\ttimeout = flag.Duration(\"timeout\", 30*time.Second, \"timeout for queries\")\n\tstreaming = flag.Bool(\"streaming\", false, \"use a streaming query\")\n\tbindVariables = newBindvars(\"bind_variables\", \"bind variables as a json list\")\n\ttargetString = flag.String(\"target\", \"\", \"keyspace:shard@tablet_type\")\n\tjsonOutput = flag.Bool(\"json\", false, \"Output JSON instead of human-readable table\")\n\tparallel = flag.Int(\"parallel\", 1, \"DMLs only: Number of threads executing the same query in parallel. Useful for simple load testing.\")\n\tcount = flag.Int(\"count\", 1, \"DMLs only: Number of times each thread executes the query. Useful for simple, sustained load testing.\")\n\tminRandomID = flag.Int(\"min_random_id\", 0, \"min random ID to generate. When max_random_id > min_random_id, for each query, a random number is generated in [min_random_id, max_random_id) and attached to the end of the bind variables.\")\n\tmaxRandomID = flag.Int(\"max_random_id\", 0, \"max random ID.\")\n)\n\nfunc init() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage of %s:\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t\tfmt.Fprintf(os.Stderr, usage)\n\t}\n}\n\ntype bindvars []interface{}\n\nfunc (bv *bindvars) String() string {\n\tb, err := json.Marshal(bv)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\treturn string(b)\n}\n\nfunc (bv *bindvars) Set(s string) (err error) {\n\terr = json.Unmarshal([]byte(s), &bv)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ json reads all numbers as float64\n\t\/\/ So, we just ditch floats for bindvars\n\tfor i, v := range *bv {\n\t\tif f, ok := v.(float64); ok {\n\t\t\tif f > 0 {\n\t\t\t\t(*bv)[i] = uint64(f)\n\t\t\t} else {\n\t\t\t\t(*bv)[i] = int64(f)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ For internal flag compatibility\nfunc (bv *bindvars) Get() interface{} {\n\treturn bv\n}\n\nfunc newBindvars(name, usage string) *bindvars {\n\tvar bv bindvars\n\tflag.Var(&bv, name, usage)\n\treturn &bv\n}\n\nfunc main() {\n\tdefer logutil.Flush()\n\n\tqr, err := run()\n\tif *jsonOutput && qr != nil {\n\t\tdata, err := json.MarshalIndent(qr, \"\", \" \")\n\t\tif err != nil {\n\t\t\tlog.Exitf(\"cannot marshal data: %v\", err)\n\t\t}\n\t\tfmt.Print(string(data))\n\t\treturn\n\t}\n\n\tqr.print()\n\n\tif err != nil {\n\t\tlog.Exit(err)\n\t}\n}\n\nfunc run() (*results, error) {\n\tflag.Parse()\n\targs := flag.Args()\n\n\tif len(args) == 0 {\n\t\tflag.Usage()\n\t\treturn nil, errors.New(\"no arguments provided. See usage above\")\n\t}\n\tif len(args) > 1 {\n\t\treturn nil, errors.New(\"no additional arguments after the query allowed\")\n\t}\n\n\tc := vitessdriver.Configuration{\n\t\tProtocol: *vtgateconn.VtgateProtocol,\n\t\tAddress: *server,\n\t\tTarget: *targetString,\n\t\tTimeout: *timeout,\n\t\tStreaming: *streaming,\n\t}\n\tdb, err := vitessdriver.OpenWithConfiguration(c)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"client error: %v\", err)\n\t}\n\n\tlog.Infof(\"Sending the query...\")\n\n\treturn execMulti(db, args[0])\n}\n\nfunc prepareBindVariables() []interface{} {\n\tbv := *bindVariables\n\tif *maxRandomID > *minRandomID {\n\t\tbv = append(bv, rand.Intn(*maxRandomID-*minRandomID)+*minRandomID)\n\t}\n\treturn bv\n}\n\nfunc execMulti(db *sql.DB, sql string) (*results, error) {\n\tall := newResults()\n\tec := concurrency.FirstErrorRecorder{}\n\twg := sync.WaitGroup{}\n\tisDML := sqlparser.IsDML(sql)\n\n\tstart := time.Now()\n\tfor i := 0; i < *parallel; i++ {\n\t\twg.Add(1)\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tfor j := 0; j < *count; j++ {\n\t\t\t\tvar qr *results\n\t\t\t\tvar err error\n\t\t\t\tif isDML {\n\t\t\t\t\tqr, err = execDml(db, sql)\n\t\t\t\t} else {\n\t\t\t\t\tqr, err = execNonDml(db, sql)\n\t\t\t\t}\n\t\t\t\tif *count == 1 {\n\t\t\t\t\tall = qr\n\t\t\t\t} else {\n\t\t\t\t\tall.merge(qr)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tec.RecordError(err)\n\t\t\t\t\tall.recordError(err)\n\t\t\t\t\t\/\/ We keep going and do not return early purpose.\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n\tall.duration = time.Since(start)\n\n\treturn all, ec.Error()\n}\n\nfunc execDml(db *sql.DB, sql string) (*results, error) {\n\tstart := time.Now()\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn nil, vterrors.Wrap(err, \"BEGIN failed\")\n\t}\n\n\tresult, err := tx.Exec(sql, []interface{}(prepareBindVariables())...)\n\tif err != nil {\n\t\treturn nil, vterrors.Wrap(err, \"failed to execute DML\")\n\t}\n\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn nil, vterrors.Wrap(err, \"COMMIT failed\")\n\t}\n\n\trowsAffected, err := result.RowsAffected()\n\tlastInsertID, err := result.LastInsertId()\n\treturn &results{\n\t\trowsAffected: rowsAffected,\n\t\tlastInsertID: lastInsertID,\n\t\tduration: time.Since(start),\n\t}, nil\n}\n\nfunc execNonDml(db *sql.DB, sql string) (*results, error) {\n\tstart := time.Now()\n\trows, err := db.Query(sql, []interface{}(prepareBindVariables())...)\n\tif err != nil {\n\t\treturn nil, vterrors.Wrap(err, \"client error\")\n\t}\n\tdefer rows.Close()\n\n\t\/\/ get the headers\n\tvar qr results\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"client error: %v\", err)\n\t}\n\tqr.Fields = cols\n\n\t\/\/ get the rows\n\tfor rows.Next() {\n\t\trow := make([]interface{}, len(cols))\n\t\tfor i := range row {\n\t\t\tvar col string\n\t\t\trow[i] = &col\n\t\t}\n\t\tif err := rows.Scan(row...); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"client error: %v\", err)\n\t\t}\n\n\t\t\/\/ unpack []*string into []string\n\t\tvals := make([]string, 0, len(row))\n\t\tfor _, value := range row {\n\t\t\tvals = append(vals, *(value.(*string)))\n\t\t}\n\t\tqr.Rows = append(qr.Rows, vals)\n\t}\n\tqr.rowsAffected = int64(len(qr.Rows))\n\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, fmt.Errorf(\"Vitess returned an error: %v\", err)\n\t}\n\n\tqr.duration = time.Since(start)\n\treturn &qr, nil\n}\n\ntype results struct {\n\tmu sync.Mutex\n\tFields []string `json:\"fields\"`\n\tRows [][]string `json:\"rows\"`\n\trowsAffected int64\n\tlastInsertID int64\n\tduration time.Duration\n\tcumulativeDuration time.Duration\n\n\t\/\/ Multi DML mode: Track total error count, error count per code and the first error.\n\ttotalErrorCount int\n\terrorCount map[vtrpcpb.Code]int\n\tfirstError map[vtrpcpb.Code]error\n}\n\nfunc newResults() *results {\n\treturn &results{\n\t\terrorCount: make(map[vtrpcpb.Code]int),\n\t\tfirstError: make(map[vtrpcpb.Code]error),\n\t}\n}\n\n\/\/ merge aggregates \"other\" into \"r\".\n\/\/ This is only used for executing DMLs concurrently and repeatedly.\n\/\/ Therefore, \"Fields\" and \"Rows\" are not merged.\nfunc (r *results) merge(other *results) {\n\tif other == nil {\n\t\treturn\n\t}\n\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tr.rowsAffected += other.rowsAffected\n\tif other.lastInsertID > r.lastInsertID {\n\t\tr.lastInsertID = other.lastInsertID\n\t}\n\tr.cumulativeDuration += other.duration\n}\n\nfunc (r *results) recordError(err error) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tr.totalErrorCount++\n\tcode := vterrors.Code(err)\n\tr.errorCount[code]++\n\n\tif r.errorCount[code] == 1 {\n\t\tr.firstError[code] = err\n\t}\n}\n\nfunc (r *results) print() {\n\tif r == nil {\n\t\treturn\n\t}\n\n\ttable := tablewriter.NewWriter(os.Stdout)\n\ttable.SetHeader(r.Fields)\n\ttable.SetAutoFormatHeaders(false)\n\ttable.AppendBulk(r.Rows)\n\ttable.Render()\n\tfmt.Printf(\"%v row(s) affected (%v, cum: %v)\\n\", r.rowsAffected, r.duration, r.cumulativeDuration)\n\tif r.lastInsertID != 0 {\n\t\tfmt.Printf(\"Last insert ID: %v\\n\", r.lastInsertID)\n\t}\n\n\tif r.totalErrorCount == 0 {\n\t\treturn\n\t}\n\n\tfmt.Printf(\"%d error(s) were returned. Number of errors by error code:\\n\\n\", r.totalErrorCount)\n\t\/\/ Sort different error codes by count (descending).\n\ttype errorCounts struct {\n\t\tcode vtrpcpb.Code\n\t\tcount int\n\t}\n\tvar counts []errorCounts\n\tfor code, count := range r.errorCount {\n\t\tcounts = append(counts, errorCounts{code, count})\n\t}\n\tsort.Slice(counts, func(i, j int) bool { return counts[i].count >= counts[j].count })\n\tfor _, c := range counts {\n\t\tfmt.Printf(\"%- 30v= % 5d\\n\", c.code, c.count)\n\t}\n\n\tfmt.Printf(\"\\nFirst error per code:\\n\\n\")\n\tfor code, err := range r.firstError {\n\t\tfmt.Printf(\"Code: %v\\nError: %v\\n\\n\", code, err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ vt tablet server: Serves queries and performs housekeeping jobs.\npackage main\n\nimport (\n\t\"flag\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"vitess.io\/vitess\/go\/vt\/dbconfigs\"\n\t\"vitess.io\/vitess\/go\/vt\/log\"\n\t\"vitess.io\/vitess\/go\/vt\/mysqlctl\"\n\t\"vitess.io\/vitess\/go\/vt\/servenv\"\n\t\"vitess.io\/vitess\/go\/vt\/tableacl\"\n\t\"vitess.io\/vitess\/go\/vt\/tableacl\/simpleacl\"\n\t\"vitess.io\/vitess\/go\/vt\/topo\"\n\t\"vitess.io\/vitess\/go\/vt\/topo\/topoproto\"\n\t\"vitess.io\/vitess\/go\/vt\/vttablet\/tabletmanager\"\n\t\"vitess.io\/vitess\/go\/vt\/vttablet\/tabletserver\"\n\t\"vitess.io\/vitess\/go\/vt\/vttablet\/tabletserver\/tabletenv\"\n)\n\nvar (\n\tenforceTableACLConfig = flag.Bool(\"enforce-tableacl-config\", false, \"if this flag is true, vttablet will fail to start if a valid tableacl config does not exist\")\n\ttableACLConfig = flag.String(\"table-acl-config\", \"\", \"path to table access checker config file; send SIGHUP to reload this file\")\n\ttableACLConfigReloadInterval = flag.Duration(\"table-acl-config-reload-interval\", 0, \"Ticker to reload ACLs\")\n\ttabletPath = flag.String(\"tablet-path\", \"\", \"tablet alias\")\n\n\tagent *tabletmanager.ActionAgent\n)\n\nfunc init() {\n\tservenv.RegisterDefaultFlags()\n}\n\nfunc main() {\n\tdbconfigs.RegisterFlags(dbconfigs.All...)\n\tmysqlctl.RegisterFlags()\n\n\tservenv.ParseFlags(\"vttablet\")\n\n\tif err := tabletenv.NewCurrentConfig().Verify(); err != nil {\n\t\tlog.Exitf(\"invalid config: %v\", err)\n\t}\n\n\ttabletenv.Init()\n\n\tservenv.Init()\n\n\tif *tabletPath == \"\" {\n\t\tlog.Exit(\"-tablet-path required\")\n\t}\n\ttabletAlias, err := topoproto.ParseTabletAlias(*tabletPath)\n\tif err != nil {\n\t\tlog.Exitf(\"failed to parse -tablet-path: %v\", err)\n\t}\n\n\tvar mycnf *mysqlctl.Mycnf\n\tvar socketFile string\n\t\/\/ If no connection parameters were specified, load the mycnf file\n\t\/\/ and use the socket from it. If connection parameters were specified,\n\t\/\/ we assume that the mysql is not local, and we skip loading mycnf.\n\t\/\/ This also means that backup and restore will not be allowed.\n\tif !dbconfigs.HasConnectionParams() {\n\t\tvar err error\n\t\tif mycnf, err = mysqlctl.NewMycnfFromFlags(tabletAlias.Uid); err != nil {\n\t\t\tlog.Exitf(\"mycnf read failed: %v\", err)\n\t\t}\n\t\tsocketFile = mycnf.SocketFile\n\t} else {\n\t\tlog.Info(\"connection parameters were specified. Not loading my.cnf.\")\n\t}\n\n\t\/\/ If connection parameters were specified, socketFile will be empty.\n\t\/\/ Otherwise, the socketFile (read from mycnf) will be used to initialize\n\t\/\/ dbconfigs.\n\tdbcfgs, err := dbconfigs.Init(socketFile)\n\tif err != nil {\n\t\tlog.Warning(err)\n\t}\n\n\tif *tableACLConfig != \"\" {\n\t\t\/\/ To override default simpleacl, other ACL plugins must set themselves to be default ACL factory\n\t\ttableacl.Register(\"simpleacl\", &simpleacl.Factory{})\n\t} else if *enforceTableACLConfig {\n\t\tlog.Exit(\"table acl config has to be specified with table-acl-config flag because enforce-tableacl-config is set.\")\n\t}\n\n\t\/\/ creates and registers the query service\n\tts := topo.Open()\n\tqsc := tabletserver.NewServer(\"\", ts, *tabletAlias)\n\tservenv.OnRun(func() {\n\t\tqsc.Register()\n\t\taddStatusParts(qsc)\n\t})\n\tservenv.OnClose(func() {\n\t\t\/\/ We now leave the queryservice running during lameduck,\n\t\t\/\/ so stop it in OnClose(), after lameduck is over.\n\t\tqsc.StopService()\n\t})\n\n\tqsc.InitACL(*tableACLConfig, *enforceTableACLConfig, *tableACLConfigReloadInterval)\n\n\t\/\/ Create mysqld and register the health reporter (needs to be done\n\t\/\/ before initializing the agent, so the initial health check\n\t\/\/ done by the agent has the right reporter)\n\tmysqld := mysqlctl.NewMysqld(dbcfgs)\n\tservenv.OnClose(mysqld.Close)\n\n\t\/\/ Depends on both query and updateStream.\n\tgRPCPort := int32(0)\n\tif servenv.GRPCPort != nil {\n\t\tgRPCPort = int32(*servenv.GRPCPort)\n\t}\n\tagent, err = tabletmanager.NewActionAgent(context.Background(), ts, mysqld, qsc, tabletAlias, dbcfgs, mycnf, int32(*servenv.Port), gRPCPort)\n\tif err != nil {\n\t\tlog.Exitf(\"NewActionAgent() failed: %v\", err)\n\t}\n\n\tservenv.OnClose(func() {\n\t\t\/\/ Close the agent so that our topo entry gets pruned properly and any\n\t\t\/\/ background goroutines that use the topo connection are stopped.\n\t\tagent.Close()\n\n\t\t\/\/ We will still use the topo server during lameduck period\n\t\t\/\/ to update our state, so closing it in OnClose()\n\t\tts.Close()\n\t})\n\n\tservenv.RunDefault()\n}\n<commit_msg>yaml: add -tablet_config to read yaml<commit_after>\/*\nCopyright 2019 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ vt tablet server: Serves queries and performs housekeeping jobs.\npackage main\n\nimport (\n\t\"flag\"\n\t\"io\/ioutil\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"sigs.k8s.io\/yaml\"\n\t\"vitess.io\/vitess\/go\/vt\/dbconfigs\"\n\t\"vitess.io\/vitess\/go\/vt\/log\"\n\t\"vitess.io\/vitess\/go\/vt\/mysqlctl\"\n\t\"vitess.io\/vitess\/go\/vt\/servenv\"\n\t\"vitess.io\/vitess\/go\/vt\/tableacl\"\n\t\"vitess.io\/vitess\/go\/vt\/tableacl\/simpleacl\"\n\t\"vitess.io\/vitess\/go\/vt\/topo\"\n\t\"vitess.io\/vitess\/go\/vt\/topo\/topoproto\"\n\t\"vitess.io\/vitess\/go\/vt\/vttablet\/tabletmanager\"\n\t\"vitess.io\/vitess\/go\/vt\/vttablet\/tabletserver\"\n\t\"vitess.io\/vitess\/go\/vt\/vttablet\/tabletserver\/tabletenv\"\n)\n\nvar (\n\tenforceTableACLConfig = flag.Bool(\"enforce-tableacl-config\", false, \"if this flag is true, vttablet will fail to start if a valid tableacl config does not exist\")\n\ttableACLConfig = flag.String(\"table-acl-config\", \"\", \"path to table access checker config file; send SIGHUP to reload this file\")\n\ttableACLConfigReloadInterval = flag.Duration(\"table-acl-config-reload-interval\", 0, \"Ticker to reload ACLs\")\n\ttabletPath = flag.String(\"tablet-path\", \"\", \"tablet alias\")\n\ttabletConfig = flag.String(\"tablet_config\", \"\", \"YAML file config for tablet\")\n\n\tagent *tabletmanager.ActionAgent\n)\n\nfunc init() {\n\tservenv.RegisterDefaultFlags()\n}\n\nfunc main() {\n\tdbconfigs.RegisterFlags(dbconfigs.All...)\n\tmysqlctl.RegisterFlags()\n\n\tservenv.ParseFlags(\"vttablet\")\n\n\tconfig := tabletenv.NewCurrentConfig()\n\tif err := config.Verify(); err != nil {\n\t\tlog.Exitf(\"invalid config: %v\", err)\n\t}\n\n\ttabletenv.Init()\n\tif *tabletConfig != \"\" {\n\t\tbytes, err := ioutil.ReadFile(*tabletConfig)\n\t\tif err != nil {\n\t\t\tlog.Exitf(\"error reading config file %s: %v\", *tabletConfig, err)\n\t\t}\n\t\tif err := yaml.Unmarshal(bytes, config); err != nil {\n\t\t\tlog.Exitf(\"error parsing config file %s: %v\", bytes, err)\n\t\t}\n\t\tgotBytes, _ := yaml.Marshal(config)\n\t\tlog.Infof(\"Loaded config file %s successfully:\\n%s\", *tabletConfig, gotBytes)\n\t}\n\n\tservenv.Init()\n\n\tif *tabletPath == \"\" {\n\t\tlog.Exit(\"-tablet-path required\")\n\t}\n\ttabletAlias, err := topoproto.ParseTabletAlias(*tabletPath)\n\tif err != nil {\n\t\tlog.Exitf(\"failed to parse -tablet-path: %v\", err)\n\t}\n\n\tvar mycnf *mysqlctl.Mycnf\n\tvar socketFile string\n\t\/\/ If no connection parameters were specified, load the mycnf file\n\t\/\/ and use the socket from it. If connection parameters were specified,\n\t\/\/ we assume that the mysql is not local, and we skip loading mycnf.\n\t\/\/ This also means that backup and restore will not be allowed.\n\tif !dbconfigs.HasConnectionParams() {\n\t\tvar err error\n\t\tif mycnf, err = mysqlctl.NewMycnfFromFlags(tabletAlias.Uid); err != nil {\n\t\t\tlog.Exitf(\"mycnf read failed: %v\", err)\n\t\t}\n\t\tsocketFile = mycnf.SocketFile\n\t} else {\n\t\tlog.Info(\"connection parameters were specified. Not loading my.cnf.\")\n\t}\n\n\t\/\/ If connection parameters were specified, socketFile will be empty.\n\t\/\/ Otherwise, the socketFile (read from mycnf) will be used to initialize\n\t\/\/ dbconfigs.\n\tdbcfgs, err := dbconfigs.Init(socketFile)\n\tif err != nil {\n\t\tlog.Warning(err)\n\t}\n\n\tif *tableACLConfig != \"\" {\n\t\t\/\/ To override default simpleacl, other ACL plugins must set themselves to be default ACL factory\n\t\ttableacl.Register(\"simpleacl\", &simpleacl.Factory{})\n\t} else if *enforceTableACLConfig {\n\t\tlog.Exit(\"table acl config has to be specified with table-acl-config flag because enforce-tableacl-config is set.\")\n\t}\n\n\t\/\/ creates and registers the query service\n\tts := topo.Open()\n\tqsc := tabletserver.NewTabletServer(\"\", config, ts, *tabletAlias)\n\tservenv.OnRun(func() {\n\t\tqsc.Register()\n\t\taddStatusParts(qsc)\n\t})\n\tservenv.OnClose(func() {\n\t\t\/\/ We now leave the queryservice running during lameduck,\n\t\t\/\/ so stop it in OnClose(), after lameduck is over.\n\t\tqsc.StopService()\n\t})\n\n\tqsc.InitACL(*tableACLConfig, *enforceTableACLConfig, *tableACLConfigReloadInterval)\n\n\t\/\/ Create mysqld and register the health reporter (needs to be done\n\t\/\/ before initializing the agent, so the initial health check\n\t\/\/ done by the agent has the right reporter)\n\tmysqld := mysqlctl.NewMysqld(dbcfgs)\n\tservenv.OnClose(mysqld.Close)\n\n\t\/\/ Depends on both query and updateStream.\n\tgRPCPort := int32(0)\n\tif servenv.GRPCPort != nil {\n\t\tgRPCPort = int32(*servenv.GRPCPort)\n\t}\n\tagent, err = tabletmanager.NewActionAgent(context.Background(), ts, mysqld, qsc, tabletAlias, dbcfgs, mycnf, int32(*servenv.Port), gRPCPort)\n\tif err != nil {\n\t\tlog.Exitf(\"NewActionAgent() failed: %v\", err)\n\t}\n\n\tservenv.OnClose(func() {\n\t\t\/\/ Close the agent so that our topo entry gets pruned properly and any\n\t\t\/\/ background goroutines that use the topo connection are stopped.\n\t\tagent.Close()\n\n\t\t\/\/ We will still use the topo server during lameduck period\n\t\t\/\/ to update our state, so closing it in OnClose()\n\t\tts.Close()\n\t})\n\n\tservenv.RunDefault()\n}\n<|endoftext|>"} {"text":"<commit_before>package systests\n\nimport (\n\t\"bytes\"\n\t\"net\/http\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/keybase\/client\/go\/client\"\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\tkeybase1 \"github.com\/keybase\/client\/go\/protocol\/keybase1\"\n\t\"github.com\/keybase\/client\/go\/protocol\/stellar1\"\n\t\"github.com\/keybase\/client\/go\/stellar\"\n\t\"github.com\/keybase\/client\/go\/teams\"\n\t\"github.com\/keybase\/stellarnet\"\n\t\"github.com\/stellar\/go\/build\"\n\t\"github.com\/stellar\/go\/clients\/horizon\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestStellarNoteRoundtripAndResets(t *testing.T) {\n\tctx := newSMUContext(t)\n\tdefer ctx.cleanup()\n\n\t\/\/ Sign up two users, bob and alice.\n\talice := ctx.installKeybaseForUser(\"alice\", 10)\n\talice.signup()\n\tdivDebug(ctx, \"Signed up alice (%s)\", alice.username)\n\tbob := ctx.installKeybaseForUser(\"bob\", 10)\n\tbob.signup()\n\tdivDebug(ctx, \"Signed up bob (%s)\", bob.username)\n\n\tt.Logf(\"note to self\")\n\tencB64, err := stellar.NoteEncryptB64(context.Background(), alice.getPrimaryGlobalContext(), sampleNote(), nil)\n\trequire.NoError(t, err)\n\tnote, err := stellar.NoteDecryptB64(context.Background(), alice.getPrimaryGlobalContext(), encB64)\n\trequire.NoError(t, err)\n\trequire.Equal(t, sampleNote(), note)\n\n\tt.Logf(\"note to both users\")\n\tother := bob.userVersion()\n\tencB64, err = stellar.NoteEncryptB64(context.Background(), alice.getPrimaryGlobalContext(), sampleNote(), &other)\n\trequire.NoError(t, err)\n\n\tt.Logf(\"decrypt as self\")\n\tnote, err = stellar.NoteDecryptB64(context.Background(), alice.getPrimaryGlobalContext(), encB64)\n\trequire.NoError(t, err)\n\trequire.Equal(t, sampleNote(), note)\n\n\tt.Logf(\"decrypt as other\")\n\tnote, err = stellar.NoteDecryptB64(context.Background(), bob.getPrimaryGlobalContext(), encB64)\n\trequire.NoError(t, err)\n\trequire.Equal(t, sampleNote(), note)\n\n\tt.Logf(\"reset sender\")\n\talice.reset()\n\tdivDebug(ctx, \"Reset bob (%s)\", bob.username)\n\talice.loginAfterReset(10)\n\tdivDebug(ctx, \"Bob logged in after reset\")\n\n\tt.Logf(\"fail to decrypt as post-reset self\")\n\tnote, err = stellar.NoteDecryptB64(context.Background(), alice.getPrimaryGlobalContext(), encB64)\n\trequire.Error(t, err)\n\trequire.Equal(t, \"note not encrypted for logged-in user\", err.Error())\n\n\tt.Logf(\"decrypt as other\")\n\tnote, err = stellar.NoteDecryptB64(context.Background(), bob.getPrimaryGlobalContext(), encB64)\n\trequire.NoError(t, err)\n\trequire.Equal(t, sampleNote(), note)\n}\n\n\/\/ Test took 38s on a dev server 2018-06-07\nfunc TestStellarRelayAutoClaims(t *testing.T) {\n\ttestStellarRelayAutoClaims(t, false, false)\n}\n\n\/\/ Test took 29s on a dev server 2018-06-07\nfunc TestStellarRelayAutoClaimsWithPUK(t *testing.T) {\n\ttestStellarRelayAutoClaims(t, true, true)\n}\n\n\/\/ Part 1:\n\/\/ XLM is sent to a user before they have a [PUK \/ wallet].\n\/\/ In the form of multiple relay payments.\n\/\/ They then [get a PUK,] add a wallet, and enter the impteam,\n\/\/ which all kick the autoclaim into gear.\n\/\/\n\/\/ Part 2:\n\/\/ A relay payment is sent to the user who already has a wallet.\n\/\/ The funds should be claimed asap.\n\/\/\n\/\/ To debug this test use log filter \"stellar_test|poll-|AutoClaim|stellar.claim|pollfor\"\nfunc testStellarRelayAutoClaims(t *testing.T, startWithPUK, skipPart2 bool) {\n\ttt := newTeamTester(t)\n\tdefer tt.cleanup()\n\tuseStellarTestNet(t)\n\n\talice := tt.addUser(\"alice\")\n\tvar bob *userPlusDevice\n\tif startWithPUK {\n\t\tbob = tt.addWalletlessUser(\"bob\")\n\t} else {\n\t\tbob = tt.addPuklessUser(\"bob\")\n\t}\n\talice.kickTeamRekeyd()\n\n\tt.Logf(\"alice gets funded\")\n\tres, err := alice.stellarClient.GetWalletAccountsLocal(context.Background(), 0)\n\trequire.NoError(t, err)\n\tgift(t, res[0].AccountID)\n\n\tt.Logf(\"alice sends a first relay payment to bob P1\")\n\tattachIdentifyUI(t, alice.tc.G, newSimpleIdentifyUI())\n\tcmd := client.CmdWalletSend{\n\t\tContextified: libkb.NewContextified(alice.tc.G),\n\t\tRecipient: bob.username,\n\t\tAmount: \"50\",\n\t}\n\trequire.NoError(t, cmd.Run())\n\n\tt.Logf(\"alice sends a second relay payment to bob P2\")\n\tcmd = client.CmdWalletSend{\n\t\tContextified: libkb.NewContextified(alice.tc.G),\n\t\tRecipient: bob.username,\n\t\tAmount: \"30\",\n\t}\n\trequire.NoError(t, cmd.Run())\n\n\tt.Logf(\"get the impteam seqno to wait on later\")\n\tteam, _, _, err := teams.LookupImplicitTeam(context.Background(), alice.tc.G, alice.username+\",\"+bob.username, false)\n\trequire.NoError(t, err)\n\tnextSeqno := team.NextSeqno()\n\n\tif startWithPUK {\n\t\tt.Logf(\"bob gets a wallet\")\n\t\tbob.tc.Tp.DisableAutoWallet = false\n\t\tbob.tc.G.GetStellar().CreateWalletSoft(context.Background())\n\t} else {\n\t\tt.Logf(\"bob gets a PUK and wallet\")\n\t\tbob.perUserKeyUpgrade()\n\t\tbob.tc.G.GetStellar().CreateWalletSoft(context.Background())\n\n\t\tt.Logf(\"wait for alice to add bob to their impteam\")\n\t\talice.pollForTeamSeqnoLinkWithLoadArgs(keybase1.LoadTeamArg{ID: team.ID}, nextSeqno)\n\t}\n\n\tpollTime := 10 * time.Second\n\tif libkb.UseCITime(bob.tc.G) {\n\t\t\/\/ This test is especially slow.\n\t\tpollTime = 15 * time.Second\n\t}\n\n\tpollFor(t, \"claims to complete\", pollTime, bob.tc.G, func(i int) bool {\n\t\tres, err = bob.stellarClient.GetWalletAccountsLocal(context.Background(), 0)\n\t\trequire.NoError(t, err)\n\t\tt.Logf(\"poll-1-%v: %v\", i, res[0].BalanceDescription)\n\t\tif res[0].BalanceDescription == \"0 XLM\" {\n\t\t\treturn false\n\t\t}\n\t\tif res[0].BalanceDescription == \"49.9999800 XLM\" {\n\t\t\tt.Logf(\"poll-1-%v: received T1 but not T2\", i)\n\t\t\treturn false\n\t\t}\n\t\tif res[0].BalanceDescription == \"29.9999800 XLM\" {\n\t\t\tt.Logf(\"poll-1-%v: received T2 but not T1\", i)\n\t\t\treturn false\n\t\t}\n\t\tt.Logf(\"poll-1-%v: received both payments\", i)\n\t\trequire.Equal(t, \"79.9999700 XLM\", res[0].BalanceDescription)\n\t\treturn true\n\t})\n\n\tif skipPart2 {\n\t\tt.Logf(\"Skipping part 2\")\n\t\treturn\n\t}\n\n\tt.Logf(\"--------------------\")\n\tt.Logf(\"Part 2: Alice sends a relay payment to bob who now already has a wallet\")\n\tcmd = client.CmdWalletSend{\n\t\tContextified: libkb.NewContextified(alice.tc.G),\n\t\tRecipient: bob.username,\n\t\tAmount: \"10\",\n\t\tForceRelay: true,\n\t}\n\trequire.NoError(t, cmd.Run())\n\n\tpollFor(t, \"final claim to complete\", pollTime, bob.tc.G, func(i int) bool {\n\t\tres, err = bob.stellarClient.GetWalletAccountsLocal(context.Background(), 0)\n\t\trequire.NoError(t, err)\n\t\tt.Logf(\"poll-2-%v: %v\", i, res[0].BalanceDescription)\n\t\tif res[0].BalanceDescription == \"79.9999700 XLM\" {\n\t\t\treturn false\n\t\t}\n\t\tt.Logf(\"poll-1-%v: received final payment\", i)\n\t\trequire.Equal(t, \"89.9999600 XLM\", res[0].BalanceDescription)\n\t\treturn true\n\t})\n\n}\n\nfunc sampleNote() stellar1.NoteContents {\n\treturn stellar1.NoteContents{\n\t\tNote: \"wizbang\",\n\t\tStellarID: stellar1.TransactionID(\"6653fc2fdbc42ad51ccbe77ee0a3c29e258a5513c62fdc532cbfff91ab101abf\"),\n\t}\n}\n\n\/\/ Friendbot sends someone XLM\nfunc gift(t testing.TB, accountID stellar1.AccountID) {\n\tt.Logf(\"gift -> %v\", accountID)\n\turl := \"https:\/\/friendbot.stellar.org\/?addr=\" + accountID.String()\n\tt.Logf(\"gift url: %v\", url)\n\tres, err := http.Get(url)\n\trequire.NoError(t, err, \"friendbot request error\")\n\tbodyBuf := new(bytes.Buffer)\n\tbodyBuf.ReadFrom(res.Body)\n\tt.Logf(\"gift res: %v\", bodyBuf.String())\n\trequire.Equal(t, 200, res.StatusCode, \"friendbot response status code\")\n}\n\nfunc useStellarTestNet(t testing.TB) {\n\tstellarnet.SetClientAndNetwork(horizon.DefaultTestNetClient, build.TestNetwork)\n}\n<commit_msg>skip test<commit_after>package systests\n\nimport (\n\t\"bytes\"\n\t\"net\/http\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/keybase\/client\/go\/client\"\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\tkeybase1 \"github.com\/keybase\/client\/go\/protocol\/keybase1\"\n\t\"github.com\/keybase\/client\/go\/protocol\/stellar1\"\n\t\"github.com\/keybase\/client\/go\/stellar\"\n\t\"github.com\/keybase\/client\/go\/teams\"\n\t\"github.com\/keybase\/stellarnet\"\n\t\"github.com\/stellar\/go\/build\"\n\t\"github.com\/stellar\/go\/clients\/horizon\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestStellarNoteRoundtripAndResets(t *testing.T) {\n\tctx := newSMUContext(t)\n\tdefer ctx.cleanup()\n\n\t\/\/ Sign up two users, bob and alice.\n\talice := ctx.installKeybaseForUser(\"alice\", 10)\n\talice.signup()\n\tdivDebug(ctx, \"Signed up alice (%s)\", alice.username)\n\tbob := ctx.installKeybaseForUser(\"bob\", 10)\n\tbob.signup()\n\tdivDebug(ctx, \"Signed up bob (%s)\", bob.username)\n\n\tt.Logf(\"note to self\")\n\tencB64, err := stellar.NoteEncryptB64(context.Background(), alice.getPrimaryGlobalContext(), sampleNote(), nil)\n\trequire.NoError(t, err)\n\tnote, err := stellar.NoteDecryptB64(context.Background(), alice.getPrimaryGlobalContext(), encB64)\n\trequire.NoError(t, err)\n\trequire.Equal(t, sampleNote(), note)\n\n\tt.Logf(\"note to both users\")\n\tother := bob.userVersion()\n\tencB64, err = stellar.NoteEncryptB64(context.Background(), alice.getPrimaryGlobalContext(), sampleNote(), &other)\n\trequire.NoError(t, err)\n\n\tt.Logf(\"decrypt as self\")\n\tnote, err = stellar.NoteDecryptB64(context.Background(), alice.getPrimaryGlobalContext(), encB64)\n\trequire.NoError(t, err)\n\trequire.Equal(t, sampleNote(), note)\n\n\tt.Logf(\"decrypt as other\")\n\tnote, err = stellar.NoteDecryptB64(context.Background(), bob.getPrimaryGlobalContext(), encB64)\n\trequire.NoError(t, err)\n\trequire.Equal(t, sampleNote(), note)\n\n\tt.Logf(\"reset sender\")\n\talice.reset()\n\tdivDebug(ctx, \"Reset bob (%s)\", bob.username)\n\talice.loginAfterReset(10)\n\tdivDebug(ctx, \"Bob logged in after reset\")\n\n\tt.Logf(\"fail to decrypt as post-reset self\")\n\tnote, err = stellar.NoteDecryptB64(context.Background(), alice.getPrimaryGlobalContext(), encB64)\n\trequire.Error(t, err)\n\trequire.Equal(t, \"note not encrypted for logged-in user\", err.Error())\n\n\tt.Logf(\"decrypt as other\")\n\tnote, err = stellar.NoteDecryptB64(context.Background(), bob.getPrimaryGlobalContext(), encB64)\n\trequire.NoError(t, err)\n\trequire.Equal(t, sampleNote(), note)\n}\n\n\/\/ Test took 38s on a dev server 2018-06-07\nfunc TestStellarRelayAutoClaims(t *testing.T) {\n\ttestStellarRelayAutoClaims(t, false, false)\n}\n\n\/\/ Test took 29s on a dev server 2018-06-07\nfunc TestStellarRelayAutoClaimsWithPUK(t *testing.T) {\n\ttestStellarRelayAutoClaims(t, true, true)\n}\n\n\/\/ Part 1:\n\/\/ XLM is sent to a user before they have a [PUK \/ wallet].\n\/\/ In the form of multiple relay payments.\n\/\/ They then [get a PUK,] add a wallet, and enter the impteam,\n\/\/ which all kick the autoclaim into gear.\n\/\/\n\/\/ Part 2:\n\/\/ A relay payment is sent to the user who already has a wallet.\n\/\/ The funds should be claimed asap.\n\/\/\n\/\/ To debug this test use log filter \"stellar_test|poll-|AutoClaim|stellar.claim|pollfor\"\nfunc testStellarRelayAutoClaims(t *testing.T, startWithPUK, skipPart2 bool) {\n\tif os.Getenv(\"UNSKIP_CORE_8044\") != \"1\" {\n\t\tt.Skip(\"CORE-8044\")\n\t}\n\ttt := newTeamTester(t)\n\tdefer tt.cleanup()\n\tuseStellarTestNet(t)\n\n\talice := tt.addUser(\"alice\")\n\tvar bob *userPlusDevice\n\tif startWithPUK {\n\t\tbob = tt.addWalletlessUser(\"bob\")\n\t} else {\n\t\tbob = tt.addPuklessUser(\"bob\")\n\t}\n\talice.kickTeamRekeyd()\n\n\tt.Logf(\"alice gets funded\")\n\tres, err := alice.stellarClient.GetWalletAccountsLocal(context.Background(), 0)\n\trequire.NoError(t, err)\n\tgift(t, res[0].AccountID)\n\n\tt.Logf(\"alice sends a first relay payment to bob P1\")\n\tattachIdentifyUI(t, alice.tc.G, newSimpleIdentifyUI())\n\tcmd := client.CmdWalletSend{\n\t\tContextified: libkb.NewContextified(alice.tc.G),\n\t\tRecipient: bob.username,\n\t\tAmount: \"50\",\n\t}\n\trequire.NoError(t, cmd.Run())\n\n\tt.Logf(\"alice sends a second relay payment to bob P2\")\n\tcmd = client.CmdWalletSend{\n\t\tContextified: libkb.NewContextified(alice.tc.G),\n\t\tRecipient: bob.username,\n\t\tAmount: \"30\",\n\t}\n\trequire.NoError(t, cmd.Run())\n\n\tt.Logf(\"get the impteam seqno to wait on later\")\n\tteam, _, _, err := teams.LookupImplicitTeam(context.Background(), alice.tc.G, alice.username+\",\"+bob.username, false)\n\trequire.NoError(t, err)\n\tnextSeqno := team.NextSeqno()\n\n\tif startWithPUK {\n\t\tt.Logf(\"bob gets a wallet\")\n\t\tbob.tc.Tp.DisableAutoWallet = false\n\t\tbob.tc.G.GetStellar().CreateWalletSoft(context.Background())\n\t} else {\n\t\tt.Logf(\"bob gets a PUK and wallet\")\n\t\tbob.perUserKeyUpgrade()\n\t\tbob.tc.G.GetStellar().CreateWalletSoft(context.Background())\n\n\t\tt.Logf(\"wait for alice to add bob to their impteam\")\n\t\talice.pollForTeamSeqnoLinkWithLoadArgs(keybase1.LoadTeamArg{ID: team.ID}, nextSeqno)\n\t}\n\n\tpollTime := 10 * time.Second\n\tif libkb.UseCITime(bob.tc.G) {\n\t\t\/\/ This test is especially slow.\n\t\tpollTime = 15 * time.Second\n\t}\n\n\tpollFor(t, \"claims to complete\", pollTime, bob.tc.G, func(i int) bool {\n\t\tres, err = bob.stellarClient.GetWalletAccountsLocal(context.Background(), 0)\n\t\trequire.NoError(t, err)\n\t\tt.Logf(\"poll-1-%v: %v\", i, res[0].BalanceDescription)\n\t\tif res[0].BalanceDescription == \"0 XLM\" {\n\t\t\treturn false\n\t\t}\n\t\tif res[0].BalanceDescription == \"49.9999800 XLM\" {\n\t\t\tt.Logf(\"poll-1-%v: received T1 but not T2\", i)\n\t\t\treturn false\n\t\t}\n\t\tif res[0].BalanceDescription == \"29.9999800 XLM\" {\n\t\t\tt.Logf(\"poll-1-%v: received T2 but not T1\", i)\n\t\t\treturn false\n\t\t}\n\t\tt.Logf(\"poll-1-%v: received both payments\", i)\n\t\trequire.Equal(t, \"79.9999700 XLM\", res[0].BalanceDescription)\n\t\treturn true\n\t})\n\n\tif skipPart2 {\n\t\tt.Logf(\"Skipping part 2\")\n\t\treturn\n\t}\n\n\tt.Logf(\"--------------------\")\n\tt.Logf(\"Part 2: Alice sends a relay payment to bob who now already has a wallet\")\n\tcmd = client.CmdWalletSend{\n\t\tContextified: libkb.NewContextified(alice.tc.G),\n\t\tRecipient: bob.username,\n\t\tAmount: \"10\",\n\t\tForceRelay: true,\n\t}\n\trequire.NoError(t, cmd.Run())\n\n\tpollFor(t, \"final claim to complete\", pollTime, bob.tc.G, func(i int) bool {\n\t\tres, err = bob.stellarClient.GetWalletAccountsLocal(context.Background(), 0)\n\t\trequire.NoError(t, err)\n\t\tt.Logf(\"poll-2-%v: %v\", i, res[0].BalanceDescription)\n\t\tif res[0].BalanceDescription == \"79.9999700 XLM\" {\n\t\t\treturn false\n\t\t}\n\t\tt.Logf(\"poll-1-%v: received final payment\", i)\n\t\trequire.Equal(t, \"89.9999600 XLM\", res[0].BalanceDescription)\n\t\treturn true\n\t})\n\n}\n\nfunc sampleNote() stellar1.NoteContents {\n\treturn stellar1.NoteContents{\n\t\tNote: \"wizbang\",\n\t\tStellarID: stellar1.TransactionID(\"6653fc2fdbc42ad51ccbe77ee0a3c29e258a5513c62fdc532cbfff91ab101abf\"),\n\t}\n}\n\n\/\/ Friendbot sends someone XLM\nfunc gift(t testing.TB, accountID stellar1.AccountID) {\n\tt.Logf(\"gift -> %v\", accountID)\n\turl := \"https:\/\/friendbot.stellar.org\/?addr=\" + accountID.String()\n\tt.Logf(\"gift url: %v\", url)\n\tres, err := http.Get(url)\n\trequire.NoError(t, err, \"friendbot request error\")\n\tbodyBuf := new(bytes.Buffer)\n\tbodyBuf.ReadFrom(res.Body)\n\tt.Logf(\"gift res: %v\", bodyBuf.String())\n\trequire.Equal(t, 200, res.StatusCode, \"friendbot response status code\")\n}\n\nfunc useStellarTestNet(t testing.TB) {\n\tstellarnet.SetClientAndNetwork(horizon.DefaultTestNetClient, build.TestNetwork)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014, Truveris Inc. All Rights Reserved.\n\/\/ Use of this source code is governed by the ISC license in the LICENSE file.\n\/\/\n\/\/ This is ygord's tokenizer. It takes strings of characters such as:\n\/\/\n\/\/ unalias foo;alias foo \"play http:\/\/foo\/music.mp3; image bar.jpg\"\n\/\/\n\/\/ And creates a list of tokens:\n\/\/\n\/\/ - [\"unalias\", \"foo\", \";\", \"alias\", \"foo\", \"play http:\/\/foo\/music.mp3; image bar.jpg\"]\n\/\/\n\/\/ Note that unescaped semi-colons are singled out as tokens in a shell\n\/\/ fashion.\n\/\/\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"io\"\n\t\"strings\"\n)\n\nvar (\n)\n\nconst (\n\tSTATE_START TokenizerState = iota\n\tSTATE_INWORD TokenizerState = iota\n\tSTATE_QUOTED TokenizerState = iota\n\tSTATE_QUOTED_ESCAPED TokenizerState = iota\n\tSTATE_ESCAPED TokenizerState = iota\n\tSTATE_INWORD_ESCAPED TokenizerState = iota\n\n\tRUNETYPE_EOF RuneType = iota\n\tRUNETYPE_EOS RuneType = iota\n\tRUNETYPE_CHAR RuneType = iota\n\tRUNETYPE_SPACE RuneType = iota\n\tRUNETYPE_QUOTE RuneType = iota\n\tRUNETYPE_BACKSLASH RuneType = iota\n)\n\ntype RuneType int\ntype TokenizerState int\n\ntype Tokenizer struct {\n\tinput *bufio.Reader\n}\n\nfunc NewTokenizer(input io.Reader) *Tokenizer {\n\tt := &Tokenizer{bufio.NewReader(input)}\n\treturn t\n}\n\nfunc (t *Tokenizer) GetRuneType(r rune) RuneType {\n\tswitch {\n\tcase r == ';':\n\t\treturn RUNETYPE_EOS\n\tcase r == '\"':\n\t\treturn RUNETYPE_QUOTE\n\tcase r == '\\\\':\n\t\treturn RUNETYPE_BACKSLASH\n\tcase strings.ContainsRune(\" \\t\\n\\r\", r):\n\t\treturn RUNETYPE_SPACE\n\t}\n\n\treturn RUNETYPE_CHAR\n}\n\nfunc (t *Tokenizer) NextToken() (string, error) {\n\tstate := STATE_START\n\ttoken := make([]rune, 0)\n\tfor {\n\t\tnextRune, _, err := t.input.ReadRune()\n\t\tnextRuneType := t.GetRuneType(nextRune)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tnextRuneType = RUNETYPE_EOF\n\t\t\t} else {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\n\t\tswitch state {\n\t\tcase STATE_START:\n\t\t\tswitch nextRuneType {\n\t\t\tcase RUNETYPE_EOF:\n\t\t\t\treturn \"\", io.EOF\n\t\t\tcase RUNETYPE_CHAR:\n\t\t\t\tstate = STATE_INWORD\n\t\t\t\ttoken = append(token, nextRune)\n\t\t\tcase RUNETYPE_EOS:\n\t\t\t\treturn \";\", nil\n\t\t\tcase RUNETYPE_SPACE:\n\t\t\t\tcontinue\n\t\t\tcase RUNETYPE_BACKSLASH:\n\t\t\t\tstate = STATE_INWORD_ESCAPED\n\t\t\tcase RUNETYPE_QUOTE:\n\t\t\t\tstate = STATE_QUOTED\n\t\t\t}\n\t\tcase STATE_INWORD:\n\t\t\tswitch nextRuneType {\n\t\t\tcase RUNETYPE_EOF:\n\t\t\t\treturn string(token), io.EOF\n\t\t\tcase RUNETYPE_CHAR:\n\t\t\t\ttoken = append(token, nextRune)\n\t\t\tcase RUNETYPE_SPACE:\n\t\t\t\treturn string(token), nil\n\t\t\tcase RUNETYPE_BACKSLASH:\n\t\t\t\tstate = STATE_INWORD_ESCAPED\n\t\t\tcase RUNETYPE_EOS:\n\t\t\t\tt.input.UnreadRune()\n\t\t\t\treturn string(token), nil\n\t\t\tcase RUNETYPE_QUOTE:\n\t\t\t\tstate = STATE_QUOTED\n\t\t\t}\n\t\tcase STATE_QUOTED:\n\t\t\tswitch nextRuneType {\n\t\t\tcase RUNETYPE_EOF:\n\t\t\t\treturn string(token), errors.New(\"missing quote termination\")\n\t\t\tcase RUNETYPE_CHAR, RUNETYPE_SPACE, RUNETYPE_EOS:\n\t\t\t\ttoken = append(token, nextRune)\n\t\t\tcase RUNETYPE_BACKSLASH:\n\t\t\t\tstate = STATE_QUOTED_ESCAPED\n\t\t\tcase RUNETYPE_QUOTE:\n\t\t\t\tstate = STATE_INWORD\n\t\t\t}\n\t\tcase STATE_INWORD_ESCAPED:\n\t\t\tswitch nextRuneType {\n\t\t\tcase RUNETYPE_EOF:\n\t\t\t\treturn string(token), errors.New(\"unterminated escape character\")\n\t\t\tcase RUNETYPE_QUOTE:\n\t\t\t\ttoken = append(token, '\"')\n\t\t\t\tstate = STATE_INWORD\n\t\t\tcase RUNETYPE_BACKSLASH:\n\t\t\t\ttoken = append(token, '\\\\')\n\t\t\t\tstate = STATE_INWORD\n\t\t\tdefault:\n\t\t\t\treturn string(token), errors.New(\"unknown escape character\")\n\t\t\t}\n\t\tcase STATE_QUOTED_ESCAPED:\n\t\t\tswitch nextRuneType {\n\t\t\tcase RUNETYPE_EOF:\n\t\t\t\treturn string(token), errors.New(\"unterminated escape character\")\n\t\t\tcase RUNETYPE_QUOTE:\n\t\t\t\ttoken = append(token, '\"')\n\t\t\t\tstate = STATE_QUOTED\n\t\t\tcase RUNETYPE_BACKSLASH:\n\t\t\t\ttoken = append(token, '\\\\')\n\t\t\t\tstate = STATE_QUOTED\n\t\t\tdefault:\n\t\t\t\treturn string(token), errors.New(\"unknown escape character\")\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\", nil\n}\n<commit_msg>Don't look for magic in escaped letters.<commit_after>\/\/ Copyright 2014, Truveris Inc. All Rights Reserved.\n\/\/ Use of this source code is governed by the ISC license in the LICENSE file.\n\/\/\n\/\/ This is ygord's tokenizer. It takes strings of characters such as:\n\/\/\n\/\/ unalias foo;alias foo \"play http:\/\/foo\/music.mp3; image bar.jpg\"\n\/\/\n\/\/ And creates a list of tokens:\n\/\/\n\/\/ - [\"unalias\", \"foo\", \";\", \"alias\", \"foo\", \"play http:\/\/foo\/music.mp3; image bar.jpg\"]\n\/\/\n\/\/ Note that unescaped semi-colons are singled out as tokens in a shell\n\/\/ fashion.\n\/\/\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"io\"\n\t\"strings\"\n)\n\nvar (\n)\n\nconst (\n\tSTATE_START TokenizerState = iota\n\tSTATE_INWORD TokenizerState = iota\n\tSTATE_QUOTED TokenizerState = iota\n\tSTATE_QUOTED_ESCAPED TokenizerState = iota\n\tSTATE_ESCAPED TokenizerState = iota\n\tSTATE_INWORD_ESCAPED TokenizerState = iota\n\n\tRUNETYPE_EOF RuneType = iota\n\tRUNETYPE_EOS RuneType = iota\n\tRUNETYPE_CHAR RuneType = iota\n\tRUNETYPE_SPACE RuneType = iota\n\tRUNETYPE_QUOTE RuneType = iota\n\tRUNETYPE_BACKSLASH RuneType = iota\n)\n\ntype RuneType int\ntype TokenizerState int\n\ntype Tokenizer struct {\n\tinput *bufio.Reader\n}\n\nfunc NewTokenizer(input io.Reader) *Tokenizer {\n\tt := &Tokenizer{bufio.NewReader(input)}\n\treturn t\n}\n\nfunc (t *Tokenizer) GetRuneType(r rune) RuneType {\n\tswitch {\n\tcase r == ';':\n\t\treturn RUNETYPE_EOS\n\tcase r == '\"':\n\t\treturn RUNETYPE_QUOTE\n\tcase r == '\\\\':\n\t\treturn RUNETYPE_BACKSLASH\n\tcase strings.ContainsRune(\" \\t\\n\\r\", r):\n\t\treturn RUNETYPE_SPACE\n\t}\n\n\treturn RUNETYPE_CHAR\n}\n\nfunc (t *Tokenizer) NextToken() (string, error) {\n\tstate := STATE_START\n\ttoken := make([]rune, 0)\n\tfor {\n\t\tnextRune, _, err := t.input.ReadRune()\n\t\tnextRuneType := t.GetRuneType(nextRune)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tnextRuneType = RUNETYPE_EOF\n\t\t\t} else {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\n\t\tswitch state {\n\t\tcase STATE_START:\n\t\t\tswitch nextRuneType {\n\t\t\tcase RUNETYPE_EOF:\n\t\t\t\treturn \"\", io.EOF\n\t\t\tcase RUNETYPE_CHAR:\n\t\t\t\tstate = STATE_INWORD\n\t\t\t\ttoken = append(token, nextRune)\n\t\t\tcase RUNETYPE_EOS:\n\t\t\t\treturn \";\", nil\n\t\t\tcase RUNETYPE_SPACE:\n\t\t\t\tcontinue\n\t\t\tcase RUNETYPE_BACKSLASH:\n\t\t\t\tstate = STATE_INWORD_ESCAPED\n\t\t\tcase RUNETYPE_QUOTE:\n\t\t\t\tstate = STATE_QUOTED\n\t\t\t}\n\t\tcase STATE_INWORD:\n\t\t\tswitch nextRuneType {\n\t\t\tcase RUNETYPE_EOF:\n\t\t\t\treturn string(token), io.EOF\n\t\t\tcase RUNETYPE_CHAR:\n\t\t\t\ttoken = append(token, nextRune)\n\t\t\tcase RUNETYPE_SPACE:\n\t\t\t\treturn string(token), nil\n\t\t\tcase RUNETYPE_BACKSLASH:\n\t\t\t\tstate = STATE_INWORD_ESCAPED\n\t\t\tcase RUNETYPE_EOS:\n\t\t\t\tt.input.UnreadRune()\n\t\t\t\treturn string(token), nil\n\t\t\tcase RUNETYPE_QUOTE:\n\t\t\t\tstate = STATE_QUOTED\n\t\t\t}\n\t\tcase STATE_QUOTED:\n\t\t\tswitch nextRuneType {\n\t\t\tcase RUNETYPE_EOF:\n\t\t\t\treturn string(token), errors.New(\"missing quote termination\")\n\t\t\tcase RUNETYPE_CHAR, RUNETYPE_SPACE, RUNETYPE_EOS:\n\t\t\t\ttoken = append(token, nextRune)\n\t\t\tcase RUNETYPE_BACKSLASH:\n\t\t\t\tstate = STATE_QUOTED_ESCAPED\n\t\t\tcase RUNETYPE_QUOTE:\n\t\t\t\tstate = STATE_INWORD\n\t\t\t}\n\t\tcase STATE_INWORD_ESCAPED:\n\t\t\tswitch nextRuneType {\n\t\t\tcase RUNETYPE_EOF:\n\t\t\t\treturn string(token), errors.New(\"unterminated escape character\")\n\t\t\tcase RUNETYPE_CHAR:\n\t\t\t\ttoken = append(token, nextRune)\n\t\t\tcase RUNETYPE_QUOTE:\n\t\t\t\ttoken = append(token, '\"')\n\t\t\t\tstate = STATE_INWORD\n\t\t\tcase RUNETYPE_BACKSLASH:\n\t\t\t\ttoken = append(token, '\\\\')\n\t\t\t\tstate = STATE_INWORD\n\t\t\tdefault:\n\t\t\t\treturn string(token), errors.New(\"unknown escape character\")\n\t\t\t}\n\t\tcase STATE_QUOTED_ESCAPED:\n\t\t\tswitch nextRuneType {\n\t\t\tcase RUNETYPE_EOF:\n\t\t\t\treturn string(token), errors.New(\"unterminated escape character\")\n\t\t\tcase RUNETYPE_CHAR:\n\t\t\t\ttoken = append(token, nextRune)\n\t\t\tcase RUNETYPE_QUOTE:\n\t\t\t\ttoken = append(token, '\"')\n\t\t\t\tstate = STATE_QUOTED\n\t\t\tcase RUNETYPE_BACKSLASH:\n\t\t\t\ttoken = append(token, '\\\\')\n\t\t\t\tstate = STATE_QUOTED\n\t\t\tdefault:\n\t\t\t\treturn string(token), errors.New(\"unknown escape character\")\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\", nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/miekg\/dns\"\n\t\"github.com\/shane-kerr\/ymmv\/dnsstub\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ starting point to find Yeti root servers\nvar yeti_root_hints = []string{\n\t\"bii.dns-lab.net.\",\n\t\"yeti-ns.wide.ad.jp.\",\n\t\"yeti-ns.tisf.net.\",\n}\n\n\/\/ allowed server-selection algorithms\nvar server_algorithms = map[string]bool{\n\t\"rtt\": true,\n\t\"round-robin\": true,\n\t\"random\": true,\n\t\"all\": true,\n}\n\n\/\/ maximum TTL to use\n\/\/const MAX_TTL = 30\n\/\/const MAX_TTL = 300\nconst MAX_TTL = 86400\n\n\/\/ convert a TTL into a bounded duration\nfunc use_ttl(ttl uint32) time.Duration {\n\tif ttl > MAX_TTL {\n\t\tttl = MAX_TTL\n\t}\n\tresult := time.Second * time.Duration(ttl+1)\n\treturn result\n}\n\n\/\/ information about each IP address for each Yeti name server\ntype ip_info struct {\n\t\/\/ IP address\n\tip net.IP\n\t\/\/ smoothed round-trip time (SRTT) for this IP address\n\tsrtt time.Duration\n}\n\n\/\/ information about each Yeti name server\ntype ns_info struct {\n\t\/\/ name of the name server, like \"bii.dns-lab.net\" (may be \"\")\n\tname string\n\n\t\/\/ IP addresses of the name server\n\tip_info []*ip_info\n\n\t\/\/ timer for seeing if the IP address changed (may be nil)\n\ttimer *time.Timer\n\n\t\/\/ server set we belong to\n\tsrvs *yeti_server_set\n}\n\ntype yeti_server_set struct {\n\t\/\/ prevent use of data during modification\n\tlock sync.Mutex\n\n\t\/\/ timer for refreshing the root zone NS RRset\n\troot_ns_timer *time.Timer\n\n\t\/\/ information about each IP address\n\tns []*ns_info\n\n\t\/\/ algorithm used to pick server\n\talgorithm string\n\n\t\/\/ next server to use (for round-robin)\n\tnext_server int\n\tnext_ip int\n\n\t\/\/ resolver for lookups\n\tresolver *dnsstub.StubResolver\n}\n\n\/\/ does a lookup and gets the root NS RRset\nfunc get_root_ns(hints []string) (ns_names []string, ns_ttl uint32) {\n\tns_query := new(dns.Msg)\n\tns_query.SetQuestion(\".\", dns.TypeNS)\n\tns_query.RecursionDesired = true\n\n\t\/\/ Try each of our servers listed in our hints until one works.\n\t\/\/ Use the rand.Perm() function so we try them in a random order.\n\tvar ns_response *dns.Msg\n\tfor _, n := range rand.Perm(len(hints)) {\n\t\troot := hints[n]\n\t\tif !strings.ContainsRune(root, ':') {\n\t\t\troot = root + \":53\"\n\t\t}\n\t\tvar err error\n\t\tns_response, _, err = dnsstub.DnsQuery(root, ns_query)\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"Error looking up Yeti root server NS from %s; %s\", root, err)\n\t\t}\n\t\tif ns_response != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif ns_response == nil {\n\t\tglog.Fatalf(\"Unable to get NS from any Yeti root server\")\n\t}\n\n\t\/\/ extract out the names from the NS RRset, and also save the TTL\n\tfor _, root_server := range ns_response.Answer {\n\t\tswitch root_server.(type) {\n\t\tcase *dns.NS:\n\t\t\tns_names = append(ns_names, root_server.(*dns.NS).Ns)\n\t\t\tns_ttl = root_server.Header().Ttl\n\t\t}\n\t}\n\n\tif len(ns_names) == 0 {\n\t\tglog.Fatalf(\"No NS found for Yeti root\")\n\t}\n\n\t\/\/ insure our order is repeatable\n\tsort.Strings(ns_names)\n\n\treturn ns_names, ns_ttl\n}\n\nfunc refresh_ns(srvs *yeti_server_set) {\n\tglog.V(1).Infof(\"refreshing root NS list\")\n\n\tns_names, ns_ttl := get_root_ns(yeti_root_hints)\n\n\tsrvs.lock.Lock()\n\tn := 0\n\tfor _, name := range ns_names {\n\t\t\/\/ remove any names that are no longer present\n\t\tfor (len(srvs.ns) > n) && (srvs.ns[n].name < name) {\n\t\t\tif !srvs.ns[n].timer.Stop() {\n\t\t\t\t<-srvs.ns[n].timer.C\n\t\t\t}\n\t\t\tsrvs.ns = append(srvs.ns[:n], srvs.ns[n+1:]...)\n\t\t}\n\t\t\/\/ if name is the same, we are good, advance to next name\n\t\tif srvs.ns[n].name == name {\n\t\t\tn++\n\t\t\t\/\/ otherwise we have to add it\n\t\t} else {\n\t\t\tnew_ns := &ns_info{name: name, srvs: srvs}\n\t\t\tsrvs.ns = append(srvs.ns, nil)\n\t\t\tcopy(srvs.ns[n+1:], srvs.ns[n:])\n\t\t\tsrvs.ns[n] = new_ns\n\t\t\tgo refresh_aaaa(new_ns, nil)\n\t\t}\n\t}\n\tsrvs.lock.Unlock()\n\n\t\/\/ set timer to refresh our NS RRset\n\tsrvs.root_ns_timer = time.AfterFunc(use_ttl(ns_ttl), func() { refresh_ns(srvs) })\n}\n\n\/\/ Note that on error we don't use this name server until we can find out\n\/\/ the IP address for it. In theory we could use the name server until the\n\/\/ TTL for the address expires, but rather than track that we just\n\/\/ temporarily stop using it.\nfunc refresh_aaaa(ns *ns_info, done chan int) {\n\tglog.V(1).Infof(\"refreshing %s\", ns.name)\n\n\tvar new_ip []net.IP\n\tvar this_ttl uint32\n\n\tanswer, _, err := ns.srvs.resolver.SyncQuery(ns.name, dns.TypeAAAA)\n\tif err != nil {\n\t\tglog.Warningf(\"Error looking up %s: %s\\n\", ns.name, err)\n\t}\n\n\tif answer != nil {\n\t\tfor _, root_address := range answer.Answer {\n\t\t\tswitch root_address.(type) {\n\t\t\tcase *dns.AAAA:\n\t\t\t\taaaa := root_address.(*dns.AAAA).AAAA\n\t\t\t\tglog.V(2).Infof(\"AAAA for %s = %s\", ns.name, aaaa)\n\t\t\t\tnew_ip = append(new_ip, aaaa)\n\t\t\t\tthis_ttl = root_address.Header().Ttl\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(new_ip) == 0 {\n\t\tglog.Warningf(\"No AAAA for %s, checking again in 300 seconds\", ns.name)\n\t\tthis_ttl = 300\n\t}\n\n\t\/\/ make a set of IP information, copying old SRTT if present in the old set\n\tvar new_ip_info []*ip_info\n\tfor _, ip := range new_ip {\n\t\tfound := false\n\t\tfor _, info := range ns.ip_info {\n\t\t\tif ip.Equal(info.ip) {\n\t\t\t\tglog.V(1).Infof(\"%s: copying old IP information ip=%s, srtt=%s\", ns.name, info.ip, info.srtt)\n\t\t\t\tnew_ip_info = append(new_ip_info, info)\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\t\/\/ if we are updating a name server that already had an IP, warn about this\n\t\t\tif len(ns.ip_info) > 0 {\n\t\t\t\tglog.Warningf(\"%s: adding new IP information ip=%s, srtt=0\", ns.name, ip)\n\t\t\t}\n\t\t\tnew_ip_info = append(new_ip_info, &ip_info{ip: ip, srtt: 0})\n\t\t}\n\t}\n\n\tns.srvs.lock.Lock()\n\tns.ip_info = new_ip_info\n\twhen := use_ttl(this_ttl)\n\tglog.V(1).Infof(\"scheduling refresh of AAAA for %s in %s\\n\", ns.name, when)\n\tns.timer = time.AfterFunc(when, func() { refresh_aaaa(ns, nil) })\n\tns.srvs.lock.Unlock()\n\n\tdone <- len(new_ip)\n}\n\n\/\/ get the list of root servers from known Yeti root servers\nfunc yeti_priming(srvs *yeti_server_set) {\n\t\/\/ get the names from the NS RRset\n\tglog.V(1).Infof(\"getting root NS RRset\")\n\tns_names, ns_ttl := get_root_ns(yeti_root_hints)\n\n\taaaa_done := make(chan int)\n\tfor _, ns_name := range ns_names {\n\t\tthis_ns := &ns_info{name: ns_name, srvs: srvs}\n\t\t\/\/ we want to complete at least one lookup before we return from priming\n\t\tgo refresh_aaaa(this_ns, aaaa_done)\n\t\tsrvs.ns = append(srvs.ns, this_ns)\n\t}\n\n\tfound_aaaa := false\n\tfor _ = range ns_names {\n\t\tnum_ip := <-aaaa_done\n\t\tif num_ip > 0 {\n\t\t\tfound_aaaa = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found_aaaa {\n\t\tglog.Fatalf(\"No AAAA found for Yeti root NS\")\n\t}\n\n\t\/\/ set timer to refresh our NS RRset\n\twhen := use_ttl(ns_ttl)\n\tglog.V(1).Infof(\"scheduling refresh of NS in %s\\n\", when)\n\tsrvs.root_ns_timer = time.AfterFunc(when, func() { refresh_ns(srvs) })\n}\n\nfunc init_yeti_server_set(ips []net.IP, algo string) (srvs *yeti_server_set) {\n\tsrvs = new(yeti_server_set)\n\n\tif len(ips) == 0 {\n\t\tglog.Infof(\"no IP's passed, performing Yeti priming\\n\")\n\t\tvar err error\n\t\tsrvs.resolver, err = dnsstub.Init(4, nil)\n\t\tif err != nil {\n\t\t\tglog.Fatalf(\"Error setting up DNS stub resolver: %s\\n\", err)\n\t\t}\n\t\tyeti_priming(srvs)\n\t\tglog.V(1).Infof(\"priming done\\n\")\n\t} else {\n\t\tvar ns_ip_info []*ip_info\n\t\tfor _, ip := range ips {\n\t\t\tns_ip_info = append(ns_ip_info, &ip_info{ip: ip, srtt: 0})\n\t\t}\n\t\tsrvs.ns = append(srvs.ns, &ns_info{ip_info: ns_ip_info})\n\t}\n\n\tsrvs.algorithm = algo\n\tsrvs.next_server = 0\n\tsrvs.next_ip = 0\n\n\treturn srvs\n}\n\ntype query_target struct {\n\tip net.IP\n\tns_name string\n}\n\n\/\/ Get the next set of IP addresses to query.\n\/\/ For most algorithms this is a single address, but it may be more (for \"all\").\nfunc (srvs *yeti_server_set) next() (targets []*query_target) {\n\tsrvs.lock.Lock()\n\tdefer srvs.lock.Unlock()\n\n\tif srvs.algorithm == \"round-robin\" {\n\t\tfor srvs.next_ip >= len(srvs.ns[srvs.next_server].ip_info) {\n\t\t\tsrvs.next_server = (srvs.next_server + 1) % len(srvs.ns)\n\t\t\tsrvs.next_ip = 0\n\t\t}\n\t\tns := srvs.ns[srvs.next_server]\n\t\tip := ns.ip_info[srvs.next_ip].ip\n\t\ttargets = append(targets, &query_target{ip: ip, ns_name: ns.name})\n\t\tsrvs.next_ip = srvs.next_ip + 1\n\t} else if srvs.algorithm == \"rtt\" {\n\t\tvar lowest_ip_info *ip_info = nil\n\t\tvar ns_name string\n\t\tfor _, ns := range srvs.ns {\n\t\t\tfor _, info := range ns.ip_info {\n\t\t\t\tif (lowest_ip_info == nil) || (lowest_ip_info.srtt > info.srtt) {\n\t\t\t\t\tlowest_ip_info = info\n\t\t\t\t\tns_name = ns.name\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ttargets = append(targets, &query_target{ip: lowest_ip_info.ip, ns_name: ns_name})\n\t} else {\n\t\tvar all_targets []*query_target\n\t\tfor _, ns := range srvs.ns {\n\t\t\tfor _, info := range ns.ip_info {\n\t\t\t\tall_targets = append(all_targets, &query_target{ip: info.ip, ns_name: ns.name})\n\t\t\t}\n\t\t}\n\t\tif srvs.algorithm == \"all\" {\n\t\t\ttargets = all_targets\n\t\t} else if srvs.algorithm == \"random\" {\n\t\t\ttargets = append(targets, all_targets[rand.Intn(len(all_targets))])\n\t\t}\n\t}\n\treturn targets\n}\n\nfunc (srvs *yeti_server_set) update_srtt(ip net.IP, rtt time.Duration) {\n\tglog.V(3).Infof(\"update_srtt ip=%s, rtt=%s\", ip, rtt)\n\tsrvs.lock.Lock()\n\tdefer srvs.lock.Unlock()\n\n\tfor _, ns_info := range srvs.ns {\n\t\tfor _, ip_info := range ns_info.ip_info {\n\t\t\t\/\/ update the time for the IP that we just queried\n\t\t\tif ip_info.ip.Equal(ip) {\n\t\t\t\tif ip_info.srtt == 0 {\n\t\t\t\t\tip_info.srtt = rtt\n\t\t\t\t} else {\n\t\t\t\t\tip_info.srtt = ((ip_info.srtt * 7) + (rtt * 3)) \/ 10\n\t\t\t\t}\n\t\t\t\tglog.V(2).Infof(\"%s: update SRTT ip=%s, srtt=%s\", ns_info.name, ip_info.ip, ip_info.srtt)\n\t\t\t\t\/\/ all other IP have their time decayed a bit\n\t\t\t} else {\n\t\t\t\t\/\/ There may be overflow issues to worry about here. Durations\n\t\t\t\t\/\/ are 64-bit nanoseconds, so we should be able to handle any\n\t\t\t\t\/\/\n\t\t\t\tip_info.srtt = (ip_info.srtt * 49) \/ 50\n\t\t\t\tglog.V(3).Infof(\"%s: decay SRTT ip=%s, srtt=%s\", ns_info.name, ip_info.ip, ip_info.srtt)\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>Change priming to work with IPv6-only servers<commit_after>package main\n\nimport (\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/miekg\/dns\"\n\t\"github.com\/shane-kerr\/ymmv\/dnsstub\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ starting point to find Yeti root servers\nvar yeti_root_hints = []string{\n\t\/\/ we use IPv6 literals here, since for some reason the Go\n\t\/\/ net.DialTimeout() function seems to have stopped working\n\t\/\/ for IPv6-only hostnames\n\t\"[240c:f:1:22::6]:53\", \/\/ bii.dns-lab.net\n\t\"[2001:200:1d9::35]:53\", \/\/ yeti-ns.wide.ad.jp\n\t\"[2001:559:8000::6]:53\", \/\/ yeti-ns.tisf.net\n}\n\n\/\/ allowed server-selection algorithms\nvar server_algorithms = map[string]bool{\n\t\"rtt\": true,\n\t\"round-robin\": true,\n\t\"random\": true,\n\t\"all\": true,\n}\n\n\/\/ maximum TTL to use\n\/\/const MAX_TTL = 30\n\/\/const MAX_TTL = 300\nconst MAX_TTL = 86400\n\n\/\/ convert a TTL into a bounded duration\nfunc use_ttl(ttl uint32) time.Duration {\n\tif ttl > MAX_TTL {\n\t\tttl = MAX_TTL\n\t}\n\tresult := time.Second * time.Duration(ttl+1)\n\treturn result\n}\n\n\/\/ information about each IP address for each Yeti name server\ntype ip_info struct {\n\t\/\/ IP address\n\tip net.IP\n\t\/\/ smoothed round-trip time (SRTT) for this IP address\n\tsrtt time.Duration\n}\n\n\/\/ information about each Yeti name server\ntype ns_info struct {\n\t\/\/ name of the name server, like \"bii.dns-lab.net\" (may be \"\")\n\tname string\n\n\t\/\/ IP addresses of the name server\n\tip_info []*ip_info\n\n\t\/\/ timer for seeing if the IP address changed (may be nil)\n\ttimer *time.Timer\n\n\t\/\/ server set we belong to\n\tsrvs *yeti_server_set\n}\n\ntype yeti_server_set struct {\n\t\/\/ prevent use of data during modification\n\tlock sync.Mutex\n\n\t\/\/ timer for refreshing the root zone NS RRset\n\troot_ns_timer *time.Timer\n\n\t\/\/ information about each IP address\n\tns []*ns_info\n\n\t\/\/ algorithm used to pick server\n\talgorithm string\n\n\t\/\/ next server to use (for round-robin)\n\tnext_server int\n\tnext_ip int\n\n\t\/\/ resolver for lookups\n\tresolver *dnsstub.StubResolver\n}\n\n\/\/ does a lookup and gets the root NS RRset\nfunc get_root_ns(hints []string) (ns_names []string, ns_ttl uint32) {\n\tns_query := new(dns.Msg)\n\tns_query.SetQuestion(\".\", dns.TypeNS)\n\tns_query.RecursionDesired = true\n\n\t\/\/ Try each of our servers listed in our hints until one works.\n\t\/\/ Use the rand.Perm() function so we try them in a random order.\n\tvar ns_response *dns.Msg\n\tfor _, n := range rand.Perm(len(hints)) {\n\t\troot := hints[n]\n\t\tif !strings.ContainsRune(root, ':') {\n\t\t\troot = root + \":53\"\n\t\t}\n\t\tvar err error\n\t\tns_response, _, err = dnsstub.DnsQuery(root, ns_query)\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"Error looking up Yeti root server NS from %s; %s\", root, err)\n\t\t}\n\t\tif ns_response != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif ns_response == nil {\n\t\tglog.Fatalf(\"Unable to get NS from any Yeti root server\")\n\t}\n\n\t\/\/ extract out the names from the NS RRset, and also save the TTL\n\tfor _, root_server := range ns_response.Answer {\n\t\tswitch root_server.(type) {\n\t\tcase *dns.NS:\n\t\t\tns_names = append(ns_names, root_server.(*dns.NS).Ns)\n\t\t\tns_ttl = root_server.Header().Ttl\n\t\t}\n\t}\n\n\tif len(ns_names) == 0 {\n\t\tglog.Fatalf(\"No NS found for Yeti root\")\n\t}\n\n\t\/\/ insure our order is repeatable\n\tsort.Strings(ns_names)\n\n\treturn ns_names, ns_ttl\n}\n\nfunc refresh_ns(srvs *yeti_server_set) {\n\tglog.V(1).Infof(\"refreshing root NS list\")\n\n\tns_names, ns_ttl := get_root_ns(yeti_root_hints)\n\n\tsrvs.lock.Lock()\n\tn := 0\n\tfor _, name := range ns_names {\n\t\t\/\/ remove any names that are no longer present\n\t\tfor (len(srvs.ns) > n) && (srvs.ns[n].name < name) {\n\t\t\tif !srvs.ns[n].timer.Stop() {\n\t\t\t\t<-srvs.ns[n].timer.C\n\t\t\t}\n\t\t\tsrvs.ns = append(srvs.ns[:n], srvs.ns[n+1:]...)\n\t\t}\n\t\t\/\/ if name is the same, we are good, advance to next name\n\t\tif srvs.ns[n].name == name {\n\t\t\tn++\n\t\t\t\/\/ otherwise we have to add it\n\t\t} else {\n\t\t\tnew_ns := &ns_info{name: name, srvs: srvs}\n\t\t\tsrvs.ns = append(srvs.ns, nil)\n\t\t\tcopy(srvs.ns[n+1:], srvs.ns[n:])\n\t\t\tsrvs.ns[n] = new_ns\n\t\t\tgo refresh_aaaa(new_ns, nil)\n\t\t}\n\t}\n\tsrvs.lock.Unlock()\n\n\t\/\/ set timer to refresh our NS RRset\n\tsrvs.root_ns_timer = time.AfterFunc(use_ttl(ns_ttl), func() { refresh_ns(srvs) })\n}\n\n\/\/ Note that on error we don't use this name server until we can find out\n\/\/ the IP address for it. In theory we could use the name server until the\n\/\/ TTL for the address expires, but rather than track that we just\n\/\/ temporarily stop using it.\nfunc refresh_aaaa(ns *ns_info, done chan int) {\n\tglog.V(1).Infof(\"refreshing %s\", ns.name)\n\n\tvar new_ip []net.IP\n\tvar this_ttl uint32\n\n\tanswer, _, err := ns.srvs.resolver.SyncQuery(ns.name, dns.TypeAAAA)\n\tif err != nil {\n\t\tglog.Warningf(\"Error looking up %s: %s\\n\", ns.name, err)\n\t}\n\n\tif answer != nil {\n\t\tfor _, root_address := range answer.Answer {\n\t\t\tswitch root_address.(type) {\n\t\t\tcase *dns.AAAA:\n\t\t\t\taaaa := root_address.(*dns.AAAA).AAAA\n\t\t\t\tglog.V(2).Infof(\"AAAA for %s = %s\", ns.name, aaaa)\n\t\t\t\tnew_ip = append(new_ip, aaaa)\n\t\t\t\tthis_ttl = root_address.Header().Ttl\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(new_ip) == 0 {\n\t\tglog.Warningf(\"No AAAA for %s, checking again in 300 seconds\", ns.name)\n\t\tthis_ttl = 300\n\t}\n\n\t\/\/ make a set of IP information, copying old SRTT if present in the old set\n\tvar new_ip_info []*ip_info\n\tfor _, ip := range new_ip {\n\t\tfound := false\n\t\tfor _, info := range ns.ip_info {\n\t\t\tif ip.Equal(info.ip) {\n\t\t\t\tglog.V(1).Infof(\"%s: copying old IP information ip=%s, srtt=%s\", ns.name, info.ip, info.srtt)\n\t\t\t\tnew_ip_info = append(new_ip_info, info)\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\t\/\/ if we are updating a name server that already had an IP, warn about this\n\t\t\tif len(ns.ip_info) > 0 {\n\t\t\t\tglog.Warningf(\"%s: adding new IP information ip=%s, srtt=0\", ns.name, ip)\n\t\t\t}\n\t\t\tnew_ip_info = append(new_ip_info, &ip_info{ip: ip, srtt: 0})\n\t\t}\n\t}\n\n\tns.srvs.lock.Lock()\n\tns.ip_info = new_ip_info\n\twhen := use_ttl(this_ttl)\n\tglog.V(1).Infof(\"scheduling refresh of AAAA for %s in %s\\n\", ns.name, when)\n\tns.timer = time.AfterFunc(when, func() { refresh_aaaa(ns, nil) })\n\tns.srvs.lock.Unlock()\n\n\tdone <- len(new_ip)\n}\n\n\/\/ get the list of root servers from known Yeti root servers\nfunc yeti_priming(srvs *yeti_server_set) {\n\t\/\/ get the names from the NS RRset\n\tglog.V(1).Infof(\"getting root NS RRset\")\n\tns_names, ns_ttl := get_root_ns(yeti_root_hints)\n\n\taaaa_done := make(chan int)\n\tfor _, ns_name := range ns_names {\n\t\tthis_ns := &ns_info{name: ns_name, srvs: srvs}\n\t\t\/\/ we want to complete at least one lookup before we return from priming\n\t\tgo refresh_aaaa(this_ns, aaaa_done)\n\t\tsrvs.ns = append(srvs.ns, this_ns)\n\t}\n\n\tfound_aaaa := false\n\tfor _ = range ns_names {\n\t\tnum_ip := <-aaaa_done\n\t\tif num_ip > 0 {\n\t\t\tfound_aaaa = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found_aaaa {\n\t\tglog.Fatalf(\"No AAAA found for Yeti root NS\")\n\t}\n\n\t\/\/ set timer to refresh our NS RRset\n\twhen := use_ttl(ns_ttl)\n\tglog.V(1).Infof(\"scheduling refresh of NS in %s\\n\", when)\n\tsrvs.root_ns_timer = time.AfterFunc(when, func() { refresh_ns(srvs) })\n}\n\nfunc init_yeti_server_set(ips []net.IP, algo string) (srvs *yeti_server_set) {\n\tsrvs = new(yeti_server_set)\n\n\tif len(ips) == 0 {\n\t\tglog.Infof(\"no IP's passed, performing Yeti priming\\n\")\n\t\tvar err error\n\t\tsrvs.resolver, err = dnsstub.Init(4, nil)\n\t\tif err != nil {\n\t\t\tglog.Fatalf(\"Error setting up DNS stub resolver: %s\\n\", err)\n\t\t}\n\t\tyeti_priming(srvs)\n\t\tglog.V(1).Infof(\"priming done\\n\")\n\t} else {\n\t\tvar ns_ip_info []*ip_info\n\t\tfor _, ip := range ips {\n\t\t\tns_ip_info = append(ns_ip_info, &ip_info{ip: ip, srtt: 0})\n\t\t}\n\t\tsrvs.ns = append(srvs.ns, &ns_info{ip_info: ns_ip_info})\n\t}\n\n\tsrvs.algorithm = algo\n\tsrvs.next_server = 0\n\tsrvs.next_ip = 0\n\n\treturn srvs\n}\n\ntype query_target struct {\n\tip net.IP\n\tns_name string\n}\n\n\/\/ Get the next set of IP addresses to query.\n\/\/ For most algorithms this is a single address, but it may be more (for \"all\").\nfunc (srvs *yeti_server_set) next() (targets []*query_target) {\n\tsrvs.lock.Lock()\n\tdefer srvs.lock.Unlock()\n\n\tif srvs.algorithm == \"round-robin\" {\n\t\tfor srvs.next_ip >= len(srvs.ns[srvs.next_server].ip_info) {\n\t\t\tsrvs.next_server = (srvs.next_server + 1) % len(srvs.ns)\n\t\t\tsrvs.next_ip = 0\n\t\t}\n\t\tns := srvs.ns[srvs.next_server]\n\t\tip := ns.ip_info[srvs.next_ip].ip\n\t\ttargets = append(targets, &query_target{ip: ip, ns_name: ns.name})\n\t\tsrvs.next_ip = srvs.next_ip + 1\n\t} else if srvs.algorithm == \"rtt\" {\n\t\tvar lowest_ip_info *ip_info = nil\n\t\tvar ns_name string\n\t\tfor _, ns := range srvs.ns {\n\t\t\tfor _, info := range ns.ip_info {\n\t\t\t\tif (lowest_ip_info == nil) || (lowest_ip_info.srtt > info.srtt) {\n\t\t\t\t\tlowest_ip_info = info\n\t\t\t\t\tns_name = ns.name\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ttargets = append(targets, &query_target{ip: lowest_ip_info.ip, ns_name: ns_name})\n\t} else {\n\t\tvar all_targets []*query_target\n\t\tfor _, ns := range srvs.ns {\n\t\t\tfor _, info := range ns.ip_info {\n\t\t\t\tall_targets = append(all_targets, &query_target{ip: info.ip, ns_name: ns.name})\n\t\t\t}\n\t\t}\n\t\tif srvs.algorithm == \"all\" {\n\t\t\ttargets = all_targets\n\t\t} else if srvs.algorithm == \"random\" {\n\t\t\ttargets = append(targets, all_targets[rand.Intn(len(all_targets))])\n\t\t}\n\t}\n\treturn targets\n}\n\nfunc (srvs *yeti_server_set) update_srtt(ip net.IP, rtt time.Duration) {\n\tglog.V(3).Infof(\"update_srtt ip=%s, rtt=%s\", ip, rtt)\n\tsrvs.lock.Lock()\n\tdefer srvs.lock.Unlock()\n\n\tfor _, ns_info := range srvs.ns {\n\t\tfor _, ip_info := range ns_info.ip_info {\n\t\t\t\/\/ update the time for the IP that we just queried\n\t\t\tif ip_info.ip.Equal(ip) {\n\t\t\t\tif ip_info.srtt == 0 {\n\t\t\t\t\tip_info.srtt = rtt\n\t\t\t\t} else {\n\t\t\t\t\tip_info.srtt = ((ip_info.srtt * 7) + (rtt * 3)) \/ 10\n\t\t\t\t}\n\t\t\t\tglog.V(2).Infof(\"%s: update SRTT ip=%s, srtt=%s\", ns_info.name, ip_info.ip, ip_info.srtt)\n\t\t\t\t\/\/ all other IP have their time decayed a bit\n\t\t\t} else {\n\t\t\t\t\/\/ There may be overflow issues to worry about here. Durations\n\t\t\t\t\/\/ are 64-bit nanoseconds, so we should be able to handle any\n\t\t\t\t\/\/\n\t\t\t\tip_info.srtt = (ip_info.srtt * 49) \/ 50\n\t\t\t\tglog.V(3).Infof(\"%s: decay SRTT ip=%s, srtt=%s\", ns_info.name, ip_info.ip, ip_info.srtt)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package screenshot\n\nimport (\n\t\"image\"\n)\n\nfunc ScreenRect() (image.Rectangle, error) {\n\tpanic(\"ScreenRect is not supported on this platform.\")\n}\n\nfunc CaptureScreen() (*image.RGBA, error) {\n\tpanic(\"CaptureScreen is not supported on this platform.\")\n}\n\nfunc CaptureRect(rect image.Rectangle) (*image.RGBA, error) {\n\tpanic(\"CaptureRect is not supported on this platform.\")\n}\n<commit_msg>Implemented all functions using native frameworks<commit_after>package screenshot\n\nimport (\n\t\/\/ #cgo LDFLAGS: -framework CoreGraphics\n\t\/\/ #cgo LDFLAGS: -framework CoreFoundation\n\t\/\/ #include <CoreGraphics\/CoreGraphics.h>\n\t\/\/ #include <CoreFoundation\/CoreFoundation.h>\n\t\"C\"\n\t\"image\"\n\t\"reflect\"\n\t\"unsafe\"\n)\n\nfunc ScreenRect() (image.Rectangle, error) {\n\tdisplayID := C.CGMainDisplayID()\n\twidth := int(C.CGDisplayPixelsWide(displayID))\n\theight := int(C.CGDisplayPixelsHigh(displayID))\n\treturn image.Rect(0, 0, width, height), nil\n}\n\nfunc CaptureScreen() (*image.RGBA, error) {\n\trect, err := ScreenRect()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn CaptureRect(rect)\n}\n\nfunc CaptureRect(rect image.Rectangle) (*image.RGBA, error) {\n\tdisplayID := C.CGMainDisplayID()\n\twidth := int(C.CGDisplayPixelsWide(displayID))\n\trawData := C.CGDataProviderCopyData(C.CGImageGetDataProvider(C.CGDisplayCreateImage(displayID)))\n\n\tlength := int(C.CFDataGetLength(rawData))\n\tptr := unsafe.Pointer(C.CFDataGetBytePtr(rawData))\n\n\tvar slice []byte\n\thdrp := (*reflect.SliceHeader)(unsafe.Pointer(&slice))\n\thdrp.Data = uintptr(ptr)\n\thdrp.Len = length\n\thdrp.Cap = length\n\n\timageBytes := make([]byte, length)\n\n\tfor i := 0; i < length; i += 4 {\n\t\timageBytes[i], imageBytes[i+2], imageBytes[i+1], imageBytes[i+3] = slice[i+2], slice[i], slice[i+1], slice[i+3]\n\t}\n\n\tC.CFRelease(rawData)\n\n\timg := &image.RGBA{Pix: imageBytes, Stride: 4 * width, Rect: rect}\n\treturn img, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t. \"github.com\/flynn\/noise\"\n)\n\nfunc main() {\n\tfor ci, cipher := range []CipherFunc{CipherAESGCM, CipherChaChaPoly} {\n\t\tfor _, hash := range []HashFunc{HashSHA256, HashSHA512, HashBLAKE2b, HashBLAKE2s} {\n\t\t\tfor hi, handshake := range []HandshakePattern{\n\t\t\t\tHandshakeNN,\n\t\t\t\tHandshakeKN,\n\t\t\t\tHandshakeNK,\n\t\t\t\tHandshakeKK,\n\t\t\t\tHandshakeNX,\n\t\t\t\tHandshakeKX,\n\t\t\t\tHandshakeXN,\n\t\t\t\tHandshakeIN,\n\t\t\t\tHandshakeXK,\n\t\t\t\tHandshakeIK,\n\t\t\t\tHandshakeXX,\n\t\t\t\tHandshakeIX,\n\t\t\t\tHandshakeN,\n\t\t\t\tHandshakeK,\n\t\t\t\tHandshakeX,\n\t\t\t\tHandshakeXR,\n\t\t\t} {\n\t\t\t\tfor _, psk := range []bool{false, true} {\n\t\t\t\t\tpayloads := (psk && hi%2 == 0) || (!psk && hi%2 != 0)\n\t\t\t\t\tprologue := ci == 0\n\t\t\t\t\twriteHandshake(\n\t\t\t\t\t\tos.Stdout,\n\t\t\t\t\t\tNewCipherSuite(DH25519, cipher, hash),\n\t\t\t\t\t\thandshake, psk, prologue, payloads,\n\t\t\t\t\t)\n\t\t\t\t\tfmt.Fprintln(os.Stdout)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc hexReader(s string) io.Reader {\n\tres, err := hex.DecodeString(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn bytes.NewBuffer(res)\n}\n\nconst (\n\tkey0 = \"000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f\"\n\tkey1 = \"0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20\"\n\tkey2 = \"2122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f40\"\n\tkey3 = \"202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f\"\n\tkey4 = \"4142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f60\"\n)\n\nfunc writeHandshake(out io.Writer, cs CipherSuite, h HandshakePattern, hasPSK, hasPrologue, payloads bool) {\n\tvar prologue, psk []byte\n\tif hasPrologue {\n\t\tprologue = []byte(\"notsecret\")\n\t}\n\tif hasPSK {\n\t\tpsk = []byte(\"verysecret\")\n\t}\n\n\tstaticI := cs.GenerateKeypair(hexReader(key0))\n\tstaticR := cs.GenerateKeypair(hexReader(key1))\n\tephR := cs.GenerateKeypair(hexReader(key2))\n\n\tconfigI := Config{\n\t\tCipherSuite: cs,\n\t\tRandom: hexReader(key3),\n\t\tPattern: h,\n\t\tInitiator: true,\n\t\tPrologue: prologue,\n\t\tPresharedKey: psk,\n\t}\n\tconfigR := configI\n\tconfigR.Random = hexReader(key4)\n\tconfigR.Initiator = false\n\n\tvar pskName string\n\tif hasPSK {\n\t\tpskName = \"PSK\"\n\t}\n\n\tfmt.Fprintf(out, \"handshake=Noise%s_%s_%s\\n\", pskName, h.Name, cs.Name())\n\n\tif len(h.Name) == 1 {\n\t\tswitch h.Name {\n\t\tcase \"N\":\n\t\t\tconfigR.StaticKeypair = staticR\n\t\t\tconfigI.PeerStatic = staticR.Public\n\t\t\tfmt.Fprintf(out, \"resp_static=%x\\n\", staticR.Private)\n\t\tcase \"K\":\n\t\t\tconfigI.StaticKeypair = staticI\n\t\t\tconfigR.PeerStatic = staticI.Public\n\t\t\tconfigR.StaticKeypair = staticR\n\t\t\tconfigI.PeerStatic = staticR.Public\n\t\t\tfmt.Fprintf(out, \"init_static=%x\\n\", staticI.Private)\n\t\t\tfmt.Fprintf(out, \"resp_static=%x\\n\", staticR.Private)\n\t\tcase \"X\":\n\t\t\tconfigI.StaticKeypair = staticI\n\t\t\tconfigR.StaticKeypair = staticR\n\t\t\tconfigI.PeerStatic = staticR.Public\n\t\t\tfmt.Fprintf(out, \"init_static=%x\\n\", staticI.Private)\n\t\t\tfmt.Fprintf(out, \"resp_static=%x\\n\", staticR.Private)\n\t\t}\n\t} else {\n\t\tswitch h.Name[0] {\n\t\tcase 'K', 'X', 'I':\n\t\t\tconfigI.StaticKeypair = staticI\n\t\t\tif h.Name[0] == 'K' {\n\t\t\t\tconfigR.PeerStatic = staticI.Public\n\t\t\t}\n\t\t\tfmt.Fprintf(out, \"init_static=%x\\n\", staticI.Private)\n\t\t}\n\t\tswitch h.Name[1] {\n\t\tcase 'K', 'E', 'X', 'R':\n\t\t\tconfigR.StaticKeypair = staticR\n\t\t\tfmt.Fprintf(out, \"resp_static=%x\\n\", staticR.Private)\n\t\t\tswitch h.Name[1] {\n\t\t\tcase 'K':\n\t\t\t\tconfigI.PeerStatic = staticR.Public\n\t\t\tcase 'E':\n\t\t\t\tconfigR.EphemeralKeypair = ephR\n\t\t\t\tconfigI.PeerEphemeral = ephR.Public\n\t\t\t\tconfigI.PeerStatic = staticR.Public\n\t\t\t\tfmt.Fprintf(out, \"resp_ephemeral=%x\\n\", ephR.Private)\n\t\t\t}\n\t\t}\n\t}\n\n\tfmt.Fprintf(out, \"gen_init_ephemeral=%s\\n\", key3)\n\tfmt.Fprintf(out, \"gen_resp_ephemeral=%s\\n\", key4)\n\tif len(prologue) > 0 {\n\t\tfmt.Fprintf(out, \"prologue=%x\\n\", prologue)\n\t}\n\tif len(psk) > 0 {\n\t\tfmt.Fprintf(out, \"preshared_key=%x\\n\", psk)\n\t}\n\n\thsI := NewHandshakeState(configI)\n\thsR := NewHandshakeState(configR)\n\n\tvar cs0, cs1 *CipherState\n\tfor i := range h.Messages {\n\t\twriter, reader := hsI, hsR\n\t\tif i%2 != 0 {\n\t\t\twriter, reader = hsR, hsI\n\t\t}\n\n\t\tvar payload string\n\t\tif payloads {\n\t\t\tpayload = fmt.Sprintf(\"test_msg_%d\", i)\n\t\t}\n\t\tvar msg []byte\n\t\tmsg, cs0, cs1 = writer.WriteMessage(nil, []byte(payload))\n\t\t_, _, _, err := reader.ReadMessage(nil, msg)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfmt.Fprintf(out, \"msg_%d_payload=%x\\n\", i, payload)\n\t\tfmt.Fprintf(out, \"msg_%d_ciphertext=%x\\n\", i, msg)\n\t}\n\n\tpayload0 := []byte(\"yellowsubmarine\")\n\tpayload1 := []byte(\"submarineyellow\")\n\tfmt.Fprintf(out, \"msg_%d_payload=%x\\n\", len(h.Messages), payload0)\n\tfmt.Fprintf(out, \"msg_%d_ciphertext=%x\\n\", len(h.Messages), cs0.Encrypt(nil, nil, payload0))\n\tfmt.Fprintf(out, \"msg_%d_payload=%x\\n\", len(h.Messages)+1, payload1)\n\tfmt.Fprintf(out, \"msg_%d_ciphertext=%x\\n\", len(h.Messages)+1, cs1.Encrypt(nil, nil, payload1))\n}\n<commit_msg>vectorgen: use 32-byte psk<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t. \"github.com\/flynn\/noise\"\n)\n\nfunc main() {\n\tfor ci, cipher := range []CipherFunc{CipherAESGCM, CipherChaChaPoly} {\n\t\tfor _, hash := range []HashFunc{HashSHA256, HashSHA512, HashBLAKE2b, HashBLAKE2s} {\n\t\t\tfor hi, handshake := range []HandshakePattern{\n\t\t\t\tHandshakeNN,\n\t\t\t\tHandshakeKN,\n\t\t\t\tHandshakeNK,\n\t\t\t\tHandshakeKK,\n\t\t\t\tHandshakeNX,\n\t\t\t\tHandshakeKX,\n\t\t\t\tHandshakeXN,\n\t\t\t\tHandshakeIN,\n\t\t\t\tHandshakeXK,\n\t\t\t\tHandshakeIK,\n\t\t\t\tHandshakeXX,\n\t\t\t\tHandshakeIX,\n\t\t\t\tHandshakeN,\n\t\t\t\tHandshakeK,\n\t\t\t\tHandshakeX,\n\t\t\t\tHandshakeXR,\n\t\t\t} {\n\t\t\t\tfor _, psk := range []bool{false, true} {\n\t\t\t\t\tpayloads := (psk && hi%2 == 0) || (!psk && hi%2 != 0)\n\t\t\t\t\tprologue := ci == 0\n\t\t\t\t\twriteHandshake(\n\t\t\t\t\t\tos.Stdout,\n\t\t\t\t\t\tNewCipherSuite(DH25519, cipher, hash),\n\t\t\t\t\t\thandshake, psk, prologue, payloads,\n\t\t\t\t\t)\n\t\t\t\t\tfmt.Fprintln(os.Stdout)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc hexReader(s string) io.Reader {\n\tres, err := hex.DecodeString(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn bytes.NewBuffer(res)\n}\n\nconst (\n\tkey0 = \"000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f\"\n\tkey1 = \"0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20\"\n\tkey2 = \"2122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f40\"\n\tkey3 = \"202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f\"\n\tkey4 = \"4142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f60\"\n)\n\nfunc writeHandshake(out io.Writer, cs CipherSuite, h HandshakePattern, hasPSK, hasPrologue, payloads bool) {\n\tvar prologue, psk []byte\n\tif hasPrologue {\n\t\tprologue = []byte(\"notsecret\")\n\t}\n\tif hasPSK {\n\t\tpsk = []byte(\"!verysecretverysecretverysecret!\")\n\t}\n\n\tstaticI := cs.GenerateKeypair(hexReader(key0))\n\tstaticR := cs.GenerateKeypair(hexReader(key1))\n\tephR := cs.GenerateKeypair(hexReader(key2))\n\n\tconfigI := Config{\n\t\tCipherSuite: cs,\n\t\tRandom: hexReader(key3),\n\t\tPattern: h,\n\t\tInitiator: true,\n\t\tPrologue: prologue,\n\t\tPresharedKey: psk,\n\t}\n\tconfigR := configI\n\tconfigR.Random = hexReader(key4)\n\tconfigR.Initiator = false\n\n\tvar pskName string\n\tif hasPSK {\n\t\tpskName = \"PSK\"\n\t}\n\n\tfmt.Fprintf(out, \"handshake=Noise%s_%s_%s\\n\", pskName, h.Name, cs.Name())\n\n\tif len(h.Name) == 1 {\n\t\tswitch h.Name {\n\t\tcase \"N\":\n\t\t\tconfigR.StaticKeypair = staticR\n\t\t\tconfigI.PeerStatic = staticR.Public\n\t\t\tfmt.Fprintf(out, \"resp_static=%x\\n\", staticR.Private)\n\t\tcase \"K\":\n\t\t\tconfigI.StaticKeypair = staticI\n\t\t\tconfigR.PeerStatic = staticI.Public\n\t\t\tconfigR.StaticKeypair = staticR\n\t\t\tconfigI.PeerStatic = staticR.Public\n\t\t\tfmt.Fprintf(out, \"init_static=%x\\n\", staticI.Private)\n\t\t\tfmt.Fprintf(out, \"resp_static=%x\\n\", staticR.Private)\n\t\tcase \"X\":\n\t\t\tconfigI.StaticKeypair = staticI\n\t\t\tconfigR.StaticKeypair = staticR\n\t\t\tconfigI.PeerStatic = staticR.Public\n\t\t\tfmt.Fprintf(out, \"init_static=%x\\n\", staticI.Private)\n\t\t\tfmt.Fprintf(out, \"resp_static=%x\\n\", staticR.Private)\n\t\t}\n\t} else {\n\t\tswitch h.Name[0] {\n\t\tcase 'K', 'X', 'I':\n\t\t\tconfigI.StaticKeypair = staticI\n\t\t\tif h.Name[0] == 'K' {\n\t\t\t\tconfigR.PeerStatic = staticI.Public\n\t\t\t}\n\t\t\tfmt.Fprintf(out, \"init_static=%x\\n\", staticI.Private)\n\t\t}\n\t\tswitch h.Name[1] {\n\t\tcase 'K', 'E', 'X', 'R':\n\t\t\tconfigR.StaticKeypair = staticR\n\t\t\tfmt.Fprintf(out, \"resp_static=%x\\n\", staticR.Private)\n\t\t\tswitch h.Name[1] {\n\t\t\tcase 'K':\n\t\t\t\tconfigI.PeerStatic = staticR.Public\n\t\t\tcase 'E':\n\t\t\t\tconfigR.EphemeralKeypair = ephR\n\t\t\t\tconfigI.PeerEphemeral = ephR.Public\n\t\t\t\tconfigI.PeerStatic = staticR.Public\n\t\t\t\tfmt.Fprintf(out, \"resp_ephemeral=%x\\n\", ephR.Private)\n\t\t\t}\n\t\t}\n\t}\n\n\tfmt.Fprintf(out, \"gen_init_ephemeral=%s\\n\", key3)\n\tfmt.Fprintf(out, \"gen_resp_ephemeral=%s\\n\", key4)\n\tif len(prologue) > 0 {\n\t\tfmt.Fprintf(out, \"prologue=%x\\n\", prologue)\n\t}\n\tif len(psk) > 0 {\n\t\tfmt.Fprintf(out, \"preshared_key=%x\\n\", psk)\n\t}\n\n\thsI := NewHandshakeState(configI)\n\thsR := NewHandshakeState(configR)\n\n\tvar cs0, cs1 *CipherState\n\tfor i := range h.Messages {\n\t\twriter, reader := hsI, hsR\n\t\tif i%2 != 0 {\n\t\t\twriter, reader = hsR, hsI\n\t\t}\n\n\t\tvar payload string\n\t\tif payloads {\n\t\t\tpayload = fmt.Sprintf(\"test_msg_%d\", i)\n\t\t}\n\t\tvar msg []byte\n\t\tmsg, cs0, cs1 = writer.WriteMessage(nil, []byte(payload))\n\t\t_, _, _, err := reader.ReadMessage(nil, msg)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfmt.Fprintf(out, \"msg_%d_payload=%x\\n\", i, payload)\n\t\tfmt.Fprintf(out, \"msg_%d_ciphertext=%x\\n\", i, msg)\n\t}\n\n\tpayload0 := []byte(\"yellowsubmarine\")\n\tpayload1 := []byte(\"submarineyellow\")\n\tfmt.Fprintf(out, \"msg_%d_payload=%x\\n\", len(h.Messages), payload0)\n\tfmt.Fprintf(out, \"msg_%d_ciphertext=%x\\n\", len(h.Messages), cs0.Encrypt(nil, nil, payload0))\n\tfmt.Fprintf(out, \"msg_%d_payload=%x\\n\", len(h.Messages)+1, payload1)\n\tfmt.Fprintf(out, \"msg_%d_ciphertext=%x\\n\", len(h.Messages)+1, cs1.Encrypt(nil, nil, payload1))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package osmpbf decodes OpenStreetMap (OSM) PBF files.\n\/\/ Use this package by creating a NewDecoder and passing it a PBF file.\n\/\/ Use Start to start decoding process.\n\/\/ Use Decode to return Node, Way and Relation structs.\npackage osmpbf\n\nimport (\n\t\"bytes\"\n\t\"compress\/zlib\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/qedus\/osmpbf\/OSMPBF\"\n)\n\nconst (\n\tmaxBlobHeaderSize = 64 * 1024\n\tmaxBlobSize = 32 * 1024 * 1024\n)\n\nvar (\n\tparseCapabilities = map[string]bool{\n\t\t\"OsmSchema-V0.6\": true,\n\t\t\"DenseNodes\": true,\n\t}\n)\n\ntype Info struct {\n\tVersion int32\n\tTimestamp time.Time\n\tChangeset int64\n\tUid int32\n\tUser string\n\tVisible bool\n}\n\ntype Node struct {\n\tID int64\n\tLat float64\n\tLon float64\n\tTags map[string]string\n\tInfo Info\n}\n\ntype Way struct {\n\tID int64\n\tTags map[string]string\n\tNodeIDs []int64\n\tInfo Info\n}\n\ntype Relation struct {\n\tID int64\n\tTags map[string]string\n\tMembers []Member\n\tInfo Info\n}\n\ntype MemberType int\n\nconst (\n\tNodeType MemberType = iota\n\tWayType\n\tRelationType\n)\n\ntype Member struct {\n\tID int64\n\tType MemberType\n\tRole string\n}\n\ntype pair struct {\n\ti interface{}\n\te error\n}\n\n\/\/ A Decoder reads and decodes OpenStreetMap PBF data from an input stream.\ntype Decoder struct {\n\tsizeBuf []byte\n\theaderBuf []byte\n\tblobBuf []byte\n\n\tr io.Reader\n\tserializer chan pair\n\n\t\/\/ for data decoders\n\tinputs []chan<- pair\n\toutputs []<-chan pair\n}\n\n\/\/ NewDecoder returns a new decoder that reads from r.\nfunc NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{\n\t\tsizeBuf: make([]byte, 4),\n\t\theaderBuf: make([]byte, maxBlobHeaderSize),\n\t\tblobBuf: make([]byte, maxBlobSize),\n\t\tr: r,\n\t\tserializer: make(chan pair, 8000), \/\/ typical PrimitiveBlock contains 8k OSM entities\n\t}\n}\n\n\/\/ Start decoding process using n goroutines.\nfunc (dec *Decoder) Start(n int) error {\n\tif n < 1 {\n\t\tn = 1\n\t}\n\n\t\/\/ read OSMHeader\n\tblobHeader, blob, err := dec.readFileBlock()\n\tif err == nil {\n\t\tif blobHeader.GetType() == \"OSMHeader\" {\n\t\t\terr = decodeOSMHeader(blob)\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"unexpected first fileblock of type %s\", blobHeader.GetType())\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ start data decoders\n\tfor i := 0; i < n; i++ {\n\t\tinput := make(chan pair)\n\t\toutput := make(chan pair)\n\t\tgo func() {\n\t\t\tdd := new(dataDecoder)\n\t\t\tfor p := range input {\n\t\t\t\tif p.e == nil {\n\t\t\t\t\t\/\/ send decoded objects or decoding error\n\t\t\t\t\tobjects, err := dd.Decode(p.i.(*OSMPBF.Blob))\n\t\t\t\t\toutput <- pair{objects, err}\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ send input error as is\n\t\t\t\t\toutput <- pair{nil, p.e}\n\t\t\t\t}\n\t\t\t}\n\t\t\tclose(output)\n\t\t}()\n\n\t\tdec.inputs = append(dec.inputs, input)\n\t\tdec.outputs = append(dec.outputs, output)\n\t}\n\n\t\/\/ start reading OSMData\n\tgo func() {\n\t\tvar inputIndex int\n\t\tfor {\n\t\t\tinput := dec.inputs[inputIndex]\n\t\t\tinputIndex = (inputIndex + 1) % n\n\n\t\t\tblobHeader, blob, err = dec.readFileBlock()\n\t\t\tif err == nil && blobHeader.GetType() != \"OSMData\" {\n\t\t\t\terr = fmt.Errorf(\"unexpected fileblock of type %s\", blobHeader.GetType())\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\t\/\/ send blob for decoding\n\t\t\t\tinput <- pair{blob, nil}\n\t\t\t} else {\n\t\t\t\t\/\/ send input error as is\n\t\t\t\tinput <- pair{nil, err}\n\t\t\t\tfor _, input := range dec.inputs {\n\t\t\t\t\tclose(input)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tvar outputIndex int\n\t\tfor {\n\t\t\toutput := dec.outputs[outputIndex]\n\t\t\toutputIndex = (outputIndex + 1) % n\n\n\t\t\tp := <-output\n\t\t\tif p.i != nil {\n\t\t\t\t\/\/ send decoded objects one by one\n\t\t\t\tfor _, o := range p.i.([]interface{}) {\n\t\t\t\t\tdec.serializer <- pair{o, nil}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif p.e != nil {\n\t\t\t\t\/\/ send input or decoding error\n\t\t\t\tdec.serializer <- pair{nil, p.e}\n\t\t\t\tclose(dec.serializer)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n\n\/\/ Decode reads the next object from the input stream and returns either a\n\/\/ Node, Way or Relation struct representing the underlying OpenStreetMap PBF\n\/\/ data, or error encountered. The end of the input stream is reported by an io.EOF error.\n\/\/\n\/\/ Decode is safe for parallel execution. Only first error encountered will be returned,\n\/\/ subsequent invocations will return io.EOF.\nfunc (dec *Decoder) Decode() (interface{}, error) {\n\tp, ok := <-dec.serializer\n\tif !ok {\n\t\treturn nil, io.EOF\n\t}\n\treturn p.i, p.e\n}\n\nfunc (dec *Decoder) readFileBlock() (*OSMPBF.BlobHeader, *OSMPBF.Blob, error) {\n\tblobHeaderSize, err := dec.readBlobHeaderSize()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tblobHeader, err := dec.readBlobHeader(blobHeaderSize)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tblob, err := dec.readBlob(blobHeader)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn blobHeader, blob, err\n}\n\nfunc (dec *Decoder) readBlobHeaderSize() (uint32, error) {\n\tif _, err := io.ReadFull(dec.r, dec.sizeBuf); err != nil {\n\t\treturn 0, err\n\t}\n\tsize := binary.BigEndian.Uint32(dec.sizeBuf)\n\n\tif size >= maxBlobHeaderSize {\n\t\treturn 0, errors.New(\"BlobHeader size >= 64Kb\")\n\t}\n\treturn size, nil\n}\n\nfunc (dec *Decoder) readBlobHeader(size uint32) (*OSMPBF.BlobHeader, error) {\n\tdec.headerBuf = dec.headerBuf[:size]\n\tif _, err := io.ReadFull(dec.r, dec.headerBuf); err != nil {\n\t\treturn nil, err\n\t}\n\n\tblobHeader := new(OSMPBF.BlobHeader)\n\tif err := proto.Unmarshal(dec.headerBuf, blobHeader); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif blobHeader.GetDatasize() >= maxBlobSize {\n\t\treturn nil, errors.New(\"Blob size >= 32Mb\")\n\t}\n\treturn blobHeader, nil\n}\n\nfunc (dec *Decoder) readBlob(blobHeader *OSMPBF.BlobHeader) (*OSMPBF.Blob, error) {\n\tdec.blobBuf = dec.blobBuf[:blobHeader.GetDatasize()]\n\tif _, err := io.ReadFull(dec.r, dec.blobBuf); err != nil {\n\t\treturn nil, err\n\t}\n\n\tblob := new(OSMPBF.Blob)\n\tif err := proto.Unmarshal(dec.blobBuf, blob); err != nil {\n\t\treturn nil, err\n\t}\n\treturn blob, nil\n}\n\nfunc getData(blob *OSMPBF.Blob) ([]byte, error) {\n\tswitch {\n\tcase blob.Raw != nil:\n\t\treturn blob.GetRaw(), nil\n\n\tcase blob.ZlibData != nil:\n\t\tr, err := zlib.NewReader(bytes.NewReader(blob.GetZlibData()))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf := bytes.NewBuffer(make([]byte, 0, blob.GetRawSize()+bytes.MinRead))\n\t\t_, err = buf.ReadFrom(r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif buf.Len() != int(blob.GetRawSize()) {\n\t\t\terr = fmt.Errorf(\"raw blob data size %d but expected %d\", buf.Len(), blob.GetRawSize())\n\t\t\treturn nil, err\n\t\t}\n\t\treturn buf.Bytes(), nil\n\n\tdefault:\n\t\treturn nil, errors.New(\"unknown blob data\")\n\t}\n}\n\nfunc decodeOSMHeader(blob *OSMPBF.Blob) error {\n\tdata, err := getData(blob)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\theaderBlock := new(OSMPBF.HeaderBlock)\n\tif err := proto.Unmarshal(data, headerBlock); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Check we have the parse capabilities\n\trequiredFeatures := headerBlock.GetRequiredFeatures()\n\tfor _, feature := range requiredFeatures {\n\t\tif !parseCapabilities[feature] {\n\t\t\treturn fmt.Errorf(\"parser does not have %s capability\", feature)\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>Use bytes.Buffer for blob buffer.<commit_after>\/\/ Package osmpbf decodes OpenStreetMap (OSM) PBF files.\n\/\/ Use this package by creating a NewDecoder and passing it a PBF file.\n\/\/ Use Start to start decoding process.\n\/\/ Use Decode to return Node, Way and Relation structs.\npackage osmpbf\n\nimport (\n\t\"bytes\"\n\t\"compress\/zlib\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/qedus\/osmpbf\/OSMPBF\"\n)\n\nconst (\n\tmaxBlobHeaderSize = 64 * 1024\n\tmaxBlobSize = 32 * 1024 * 1024\n)\n\nvar (\n\tparseCapabilities = map[string]bool{\n\t\t\"OsmSchema-V0.6\": true,\n\t\t\"DenseNodes\": true,\n\t}\n)\n\ntype Info struct {\n\tVersion int32\n\tTimestamp time.Time\n\tChangeset int64\n\tUid int32\n\tUser string\n\tVisible bool\n}\n\ntype Node struct {\n\tID int64\n\tLat float64\n\tLon float64\n\tTags map[string]string\n\tInfo Info\n}\n\ntype Way struct {\n\tID int64\n\tTags map[string]string\n\tNodeIDs []int64\n\tInfo Info\n}\n\ntype Relation struct {\n\tID int64\n\tTags map[string]string\n\tMembers []Member\n\tInfo Info\n}\n\ntype MemberType int\n\nconst (\n\tNodeType MemberType = iota\n\tWayType\n\tRelationType\n)\n\ntype Member struct {\n\tID int64\n\tType MemberType\n\tRole string\n}\n\ntype pair struct {\n\ti interface{}\n\te error\n}\n\n\/\/ A Decoder reads and decodes OpenStreetMap PBF data from an input stream.\ntype Decoder struct {\n\tsizeBuf []byte\n\theaderBuf []byte\n\tblobBuf *bytes.Buffer\n\n\tr io.Reader\n\tserializer chan pair\n\n\t\/\/ for data decoders\n\tinputs []chan<- pair\n\toutputs []<-chan pair\n}\n\n\/\/ NewDecoder returns a new decoder that reads from r.\nfunc NewDecoder(r io.Reader) *Decoder {\n\treturn &Decoder{\n\t\tsizeBuf: make([]byte, 4),\n\t\theaderBuf: make([]byte, maxBlobHeaderSize),\n\t\tblobBuf: bytes.NewBuffer(make([]byte, 0, maxBlobSize)),\n\t\tr: r,\n\t\tserializer: make(chan pair, 8000), \/\/ typical PrimitiveBlock contains 8k OSM entities\n\t}\n}\n\n\/\/ Start decoding process using n goroutines.\nfunc (dec *Decoder) Start(n int) error {\n\tif n < 1 {\n\t\tn = 1\n\t}\n\n\t\/\/ read OSMHeader\n\tblobHeader, blob, err := dec.readFileBlock()\n\tif err == nil {\n\t\tif blobHeader.GetType() == \"OSMHeader\" {\n\t\t\terr = decodeOSMHeader(blob)\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"unexpected first fileblock of type %s\", blobHeader.GetType())\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ start data decoders\n\tfor i := 0; i < n; i++ {\n\t\tinput := make(chan pair)\n\t\toutput := make(chan pair)\n\t\tgo func() {\n\t\t\tdd := new(dataDecoder)\n\t\t\tfor p := range input {\n\t\t\t\tif p.e == nil {\n\t\t\t\t\t\/\/ send decoded objects or decoding error\n\t\t\t\t\tobjects, err := dd.Decode(p.i.(*OSMPBF.Blob))\n\t\t\t\t\toutput <- pair{objects, err}\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ send input error as is\n\t\t\t\t\toutput <- pair{nil, p.e}\n\t\t\t\t}\n\t\t\t}\n\t\t\tclose(output)\n\t\t}()\n\n\t\tdec.inputs = append(dec.inputs, input)\n\t\tdec.outputs = append(dec.outputs, output)\n\t}\n\n\t\/\/ start reading OSMData\n\tgo func() {\n\t\tvar inputIndex int\n\t\tfor {\n\t\t\tinput := dec.inputs[inputIndex]\n\t\t\tinputIndex = (inputIndex + 1) % n\n\n\t\t\tblobHeader, blob, err = dec.readFileBlock()\n\t\t\tif err == nil && blobHeader.GetType() != \"OSMData\" {\n\t\t\t\terr = fmt.Errorf(\"unexpected fileblock of type %s\", blobHeader.GetType())\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\t\/\/ send blob for decoding\n\t\t\t\tinput <- pair{blob, nil}\n\t\t\t} else {\n\t\t\t\t\/\/ send input error as is\n\t\t\t\tinput <- pair{nil, err}\n\t\t\t\tfor _, input := range dec.inputs {\n\t\t\t\t\tclose(input)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tvar outputIndex int\n\t\tfor {\n\t\t\toutput := dec.outputs[outputIndex]\n\t\t\toutputIndex = (outputIndex + 1) % n\n\n\t\t\tp := <-output\n\t\t\tif p.i != nil {\n\t\t\t\t\/\/ send decoded objects one by one\n\t\t\t\tfor _, o := range p.i.([]interface{}) {\n\t\t\t\t\tdec.serializer <- pair{o, nil}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif p.e != nil {\n\t\t\t\t\/\/ send input or decoding error\n\t\t\t\tdec.serializer <- pair{nil, p.e}\n\t\t\t\tclose(dec.serializer)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n\n\/\/ Decode reads the next object from the input stream and returns either a\n\/\/ Node, Way or Relation struct representing the underlying OpenStreetMap PBF\n\/\/ data, or error encountered. The end of the input stream is reported by an io.EOF error.\n\/\/\n\/\/ Decode is safe for parallel execution. Only first error encountered will be returned,\n\/\/ subsequent invocations will return io.EOF.\nfunc (dec *Decoder) Decode() (interface{}, error) {\n\tp, ok := <-dec.serializer\n\tif !ok {\n\t\treturn nil, io.EOF\n\t}\n\treturn p.i, p.e\n}\n\nfunc (dec *Decoder) readFileBlock() (*OSMPBF.BlobHeader, *OSMPBF.Blob, error) {\n\tblobHeaderSize, err := dec.readBlobHeaderSize()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tblobHeader, err := dec.readBlobHeader(blobHeaderSize)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tblob, err := dec.readBlob(blobHeader)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn blobHeader, blob, err\n}\n\nfunc (dec *Decoder) readBlobHeaderSize() (uint32, error) {\n\tif _, err := io.ReadFull(dec.r, dec.sizeBuf); err != nil {\n\t\treturn 0, err\n\t}\n\tsize := binary.BigEndian.Uint32(dec.sizeBuf)\n\n\tif size >= maxBlobHeaderSize {\n\t\treturn 0, errors.New(\"BlobHeader size >= 64Kb\")\n\t}\n\treturn size, nil\n}\n\nfunc (dec *Decoder) readBlobHeader(size uint32) (*OSMPBF.BlobHeader, error) {\n\tdec.headerBuf = dec.headerBuf[:size]\n\tif _, err := io.ReadFull(dec.r, dec.headerBuf); err != nil {\n\t\treturn nil, err\n\t}\n\n\tblobHeader := new(OSMPBF.BlobHeader)\n\tif err := proto.Unmarshal(dec.headerBuf, blobHeader); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif blobHeader.GetDatasize() >= maxBlobSize {\n\t\treturn nil, errors.New(\"Blob size >= 32Mb\")\n\t}\n\treturn blobHeader, nil\n}\n\nfunc (dec *Decoder) readBlob(blobHeader *OSMPBF.BlobHeader) (*OSMPBF.Blob, error) {\n\tdec.blobBuf.Reset()\n\tif _, err := io.CopyN(dec.blobBuf, dec.r, int64(blobHeader.GetDatasize())); err != nil {\n\t\treturn nil, err\n\t}\n\n\tblob := new(OSMPBF.Blob)\n\tif err := proto.Unmarshal(dec.blobBuf.Bytes(), blob); err != nil {\n\t\treturn nil, err\n\t}\n\treturn blob, nil\n}\n\nfunc getData(blob *OSMPBF.Blob) ([]byte, error) {\n\tswitch {\n\tcase blob.Raw != nil:\n\t\treturn blob.GetRaw(), nil\n\n\tcase blob.ZlibData != nil:\n\t\tr, err := zlib.NewReader(bytes.NewReader(blob.GetZlibData()))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf := bytes.NewBuffer(make([]byte, 0, blob.GetRawSize()+bytes.MinRead))\n\t\t_, err = buf.ReadFrom(r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif buf.Len() != int(blob.GetRawSize()) {\n\t\t\terr = fmt.Errorf(\"raw blob data size %d but expected %d\", buf.Len(), blob.GetRawSize())\n\t\t\treturn nil, err\n\t\t}\n\t\treturn buf.Bytes(), nil\n\n\tdefault:\n\t\treturn nil, errors.New(\"unknown blob data\")\n\t}\n}\n\nfunc decodeOSMHeader(blob *OSMPBF.Blob) error {\n\tdata, err := getData(blob)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\theaderBlock := new(OSMPBF.HeaderBlock)\n\tif err := proto.Unmarshal(data, headerBlock); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Check we have the parse capabilities\n\trequiredFeatures := headerBlock.GetRequiredFeatures()\n\tfor _, feature := range requiredFeatures {\n\t\tif !parseCapabilities[feature] {\n\t\t\treturn fmt.Errorf(\"parser does not have %s capability\", feature)\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package tga\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"image\"\n\t\"image\/color\"\n\t\"io\"\n)\n\ntype tga struct {\n\tr *bytes.Reader\n\traw rawHeader\n\tisPaletted bool\n\thasAlpha bool\n\twidth int\n\theight int\n\tpixelSize int\n\tpalette []byte\n\tpaletteLength int\n\tColorModel color.Model\n\ttmp [4]byte\n\tdecode func(tga *tga, out []byte) (err error)\n}\n\n\/\/ Decode decodes a TARGA image.\nfunc Decode(r io.Reader) (outImage image.Image, err error) {\n\tvar tga tga\n\tvar data bytes.Buffer\n\n\tif _, err = data.ReadFrom(r); err != nil {\n\t\treturn\n\t}\n\n\ttga.r = bytes.NewReader(data.Bytes())\n\n\tif err = tga.getHeader(); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ skip header\n\tif _, err = tga.r.Seek(int64(tgaRawHeaderSize+tga.raw.IdLength), 0); err != nil {\n\t\treturn\n\t}\n\n\tif tga.isPaletted {\n\t\t\/\/ read palette\n\t\tentrySize := int((tga.raw.PaletteBPP + 1) >> 3)\n\t\ttga.paletteLength = int(tga.raw.PaletteLength - tga.raw.PaletteFirst)\n\t\ttga.palette = make([]byte, entrySize*tga.paletteLength)\n\n\t\t\/\/ skip to colormap\n\t\tif _, err = tga.r.Seek(int64(entrySize)*int64(tga.raw.PaletteFirst), 1); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif _, err = io.ReadFull(tga.r, tga.palette); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\trect := image.Rect(0, 0, tga.width, tga.height)\n\tvar pixels []byte\n\n\t\/\/ choose a right color model\n\tif tga.ColorModel == color.NRGBAModel {\n\t\tim := image.NewNRGBA(rect)\n\t\toutImage = im\n\t\tpixels = im.Pix\n\t} else {\n\t\tim := image.NewRGBA(rect)\n\t\toutImage = im\n\t\tpixels = im.Pix\n\t}\n\n\tif err = tga.decode(&tga, pixels); err == nil {\n\t\ttga.flip(pixels)\n\t}\n\n\treturn\n}\n\n\/\/ DecodeConfig decodes a header of TARGA image and returns its configuration.\nfunc DecodeConfig(r io.Reader) (cfg image.Config, err error) {\n\tvar tga tga\n\tvar data bytes.Buffer\n\n\tif _, err = data.ReadFrom(r); err != nil {\n\t\treturn\n\t}\n\n\ttga.r = bytes.NewReader(data.Bytes())\n\n\tif err = tga.getHeader(); err == nil {\n\t\tcfg = image.Config{\n\t\t\tColorModel: tga.ColorModel,\n\t\t\tWidth: tga.width,\n\t\t\tHeight: tga.height,\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc init() {\n\timage.RegisterFormat(\"tga\", \"\", Decode, DecodeConfig)\n}\n\n\/\/ applyExtensions reads extensions section (if it exists) and parses attribute type.\nfunc (tga *tga) applyExtensions() (err error) {\n\tvar rawFooter rawFooter\n\n\tif _, err = tga.r.Seek(int64(-tgaRawFooterSize), 2); err != nil {\n\t\treturn\n\t} else if err = binary.Read(tga.r, binary.LittleEndian, &rawFooter); err != nil {\n\t\treturn\n\t} else if bytes.Equal(rawFooter.Signature[:], tgaSignature[:]) && rawFooter.ExtAreaOffset != 0 {\n\t\toffset := int64(rawFooter.ExtAreaOffset + extAreaAttrTypeOffset)\n\n\t\tvar n int64\n\t\tvar t byte\n\n\t\tif n, err = tga.r.Seek(offset, 0); err != nil || n != offset {\n\t\t\treturn\n\t\t} else if t, err = tga.r.ReadByte(); err != nil {\n\t\t\treturn\n\t\t} else if t == attrTypeAlpha {\n\t\t\t\/\/ alpha\n\t\t\ttga.hasAlpha = true\n\t\t} else if t == attrTypePremultipliedAlpha {\n\t\t\t\/\/ premultiplied alpha\n\t\t\ttga.hasAlpha = true\n\t\t\ttga.ColorModel = color.RGBAModel\n\t\t} else {\n\t\t\t\/\/ attribute is not an alpha channel value, ignore it\n\t\t\ttga.hasAlpha = false\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ decodeRaw decodes a raw (uncompressed) data.\nfunc decodeRaw(tga *tga, out []byte) (err error) {\n\tfor i := 0; i < len(out) && err == nil; i += 4 {\n\t\terr = tga.getPixel(out[i:])\n\t}\n\n\treturn\n}\n\n\/\/ decodeRLE decodes run-length encoded data.\nfunc decodeRLE(tga *tga, out []byte) (err error) {\n\tsize := tga.width * tga.height * 4\n\n\tfor i := 0; i < size && err == nil; {\n\t\tvar b byte\n\n\t\tif b, err = tga.r.ReadByte(); err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tcount := uint(b)\n\n\t\tif count&(1<<7) != 0 {\n\t\t\t\/\/ encoded packet\n\t\t\tcount &= ^uint(1 << 7)\n\n\t\t\tif err = tga.getPixel(tga.tmp[:]); err == nil {\n\t\t\t\tfor count++; count > 0 && i < size; count-- {\n\t\t\t\t\tcopy(out[i:], tga.tmp[:])\n\t\t\t\t\ti += 4\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ raw packet\n\t\t\tfor count++; count > 0 && i < size && err == nil; count-- {\n\t\t\t\terr = tga.getPixel(out[i:])\n\t\t\t\ti += 4\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ flip flips pixels of image based on its origin.\nfunc (tga *tga) flip(out []byte) {\n\tflipH := tga.raw.Flags&flagOriginRight != 0\n\tflipV := tga.raw.Flags&flagOriginTop == 0\n\trowSize := tga.width * 4\n\n\tif flipH {\n\t\tfor y := 0; y < tga.height; y++ {\n\t\t\tfor x, offset := 0, y*rowSize; x < tga.width\/2; x++ {\n\t\t\t\ta := out[offset+x*4:]\n\t\t\t\tb := out[offset+(tga.width-x-1)*4:]\n\n\t\t\t\ta[0], a[1], a[2], a[3], b[0], b[1], b[2], b[3] = b[0], b[1], b[2], b[3], a[0], a[1], a[2], a[3]\n\t\t\t}\n\t\t}\n\t}\n\n\tif flipV {\n\t\tfor y := 0; y < tga.height\/2; y++ {\n\t\t\tfor x := 0; x < tga.width; x++ {\n\t\t\t\ta := out[y*rowSize+x*4:]\n\t\t\t\tb := out[(tga.height-y-1)*rowSize+x*4:]\n\n\t\t\t\ta[0], a[1], a[2], a[3], b[0], b[1], b[2], b[3] = b[0], b[1], b[2], b[3], a[0], a[1], a[2], a[3]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ getHeader reads and validates TGA header.\nfunc (tga *tga) getHeader() (err error) {\n\tif err = binary.Read(tga.r, binary.LittleEndian, &tga.raw); err != nil {\n\t\treturn\n\t}\n\n\tif tga.raw.ImageType&imageTypeFlagRLE != 0 {\n\t\ttga.decode = decodeRLE\n\t} else {\n\t\ttga.decode = decodeRaw\n\t}\n\n\ttga.raw.ImageType &= imageTypeMask\n\talphaSize := tga.raw.Flags & flagAlphaSizeMask\n\n\tif alphaSize != 0 && alphaSize != 1 && alphaSize != 8 {\n\t\terr = errors.New(\"invalid alpha size\")\n\t\treturn\n\t}\n\n\ttga.hasAlpha = ((alphaSize != 0 || tga.raw.BPP == 32) ||\n\t\t(tga.raw.ImageType == imageTypeMonoChrome && tga.raw.BPP == 16) ||\n\t\t(tga.raw.ImageType == imageTypePaletted && tga.raw.PaletteBPP == 32))\n\n\ttga.width = int(tga.raw.Width)\n\ttga.height = int(tga.raw.Height)\n\ttga.pixelSize = int(tga.raw.BPP) >> 3\n\n\t\/\/ default is NOT premultiplied alpha model\n\ttga.ColorModel = color.NRGBAModel\n\n\tif err = tga.applyExtensions(); err != nil {\n\t\treturn\n\t}\n\n\tvar formatIsInvalid bool\n\n\tswitch tga.raw.ImageType {\n\tcase imageTypePaletted:\n\t\tformatIsInvalid = (tga.raw.PaletteType != 1 ||\n\t\t\ttga.raw.BPP != 8 ||\n\t\t\ttga.raw.PaletteFirst >= tga.raw.PaletteLength ||\n\t\t\t(tga.raw.PaletteBPP != 15 && tga.raw.PaletteBPP != 16 && tga.raw.PaletteBPP != 24 && tga.raw.PaletteBPP != 32))\n\t\ttga.isPaletted = true\n\n\tcase imageTypeTrueColor:\n\t\tformatIsInvalid = (tga.raw.BPP != 32 &&\n\t\t\ttga.raw.BPP != 16 &&\n\t\t\t(tga.raw.BPP != 24 || tga.hasAlpha))\n\n\tcase imageTypeMonoChrome:\n\t\tformatIsInvalid = ((tga.hasAlpha && tga.raw.BPP != 16) ||\n\t\t\t(!tga.hasAlpha && tga.raw.BPP != 8))\n\n\tdefault:\n\t\terr = errors.New(\"invalid or unsupported image type\")\n\t}\n\n\tif err == nil && formatIsInvalid {\n\t\terr = errors.New(\"invalid image format\")\n\t}\n\n\treturn\n}\n\nfunc (tga *tga) getPixel(dst []byte) (err error) {\n\tvar R, G, B, A uint8 = 0xff, 0xff, 0xff, 0xff\n\tsrc := tga.tmp\n\n\tif _, err = io.ReadFull(tga.r, src[0:tga.pixelSize]); err != nil {\n\t\treturn\n\t}\n\n\tswitch tga.pixelSize {\n\tcase 4:\n\t\tif tga.hasAlpha {\n\t\t\tA = src[3]\n\t\t}\n\t\tfallthrough\n\n\tcase 3:\n\t\tB, G, R = src[0], src[1], src[2]\n\n\tcase 2:\n\t\tif tga.raw.ImageType == imageTypeMonoChrome {\n\t\t\tB, G, R = src[0], src[0], src[0]\n\n\t\t\tif tga.hasAlpha {\n\t\t\t\tA = src[1]\n\t\t\t}\n\t\t} else {\n\t\t\tword := uint16(src[0]) | (uint16(src[1]) << 8)\n\t\t\tB, G, R = wordToBGR(word)\n\n\t\t\tif tga.hasAlpha && (word&(1<<15)) == 0 {\n\t\t\t\tA = 0\n\t\t\t}\n\t\t}\n\n\tcase 1:\n\t\tif tga.isPaletted {\n\t\t\tindex := int(src[0])\n\n\t\t\tif int(index) >= tga.paletteLength {\n\t\t\t\treturn errors.New(\"palette index out of range\")\n\t\t\t}\n\n\t\t\tvar m int\n\n\t\t\tif tga.raw.PaletteBPP == 24 {\n\t\t\t\tm = index * 3\n\t\t\t\tB, G, R = tga.palette[m+0], tga.palette[m+1], tga.palette[m+2]\n\t\t\t} else if tga.raw.PaletteBPP == 32 {\n\t\t\t\tm = index * 4\n\t\t\t\tB, G, R = tga.palette[m+0], tga.palette[m+1], tga.palette[m+2]\n\n\t\t\t\tif tga.hasAlpha {\n\t\t\t\t\tA = tga.palette[m+3]\n\t\t\t\t}\n\t\t\t} else if tga.raw.PaletteBPP == 16 {\n\t\t\t\tm = index * 2\n\t\t\t\tword := uint16(tga.palette[m+0]) | (uint16(tga.palette[m+1]) << 8)\n\t\t\t\tB, G, R = wordToBGR(word)\n\t\t\t}\n\t\t} else {\n\t\t\tB, G, R = src[0], src[0], src[0]\n\t\t}\n\t}\n\n\tdst[0], dst[1], dst[2], dst[3] = R, G, B, A\n\n\treturn nil\n}\n\n\/\/ wordToBGR converts 15-bit color to BGR\nfunc wordToBGR(word uint16) (B, G, R uint8) {\n\tB = uint8((word >> 0) & 31)\n\tB = uint8((B << 3) + (B >> 2))\n\tG = uint8((word >> 5) & 31)\n\tG = uint8((G << 3) + (G >> 2))\n\tR = uint8((word >> 10) & 31)\n\tR = uint8((R << 3) + (R >> 2))\n\treturn\n}\n<commit_msg>decode: better error handling<commit_after>package tga\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"image\"\n\t\"image\/color\"\n\t\"io\"\n)\n\ntype tga struct {\n\tr *bytes.Reader\n\traw rawHeader\n\tisPaletted bool\n\thasAlpha bool\n\twidth int\n\theight int\n\tpixelSize int\n\tpalette []byte\n\tpaletteLength int\n\tColorModel color.Model\n\ttmp [4]byte\n\tdecode func(tga *tga, out []byte) (err error)\n}\n\nvar (\n\tErrAlphaSize = errors.New(\"tga: invalid alpha size\")\n\tErrFormat = errors.New(\"tga: invalid format\")\n\tErrPaletteIndex = errors.New(\"tga: palette index out of range\")\n)\n\n\/\/ Decode decodes a TARGA image.\nfunc Decode(r io.Reader) (outImage image.Image, err error) {\n\tvar tga tga\n\tvar data bytes.Buffer\n\n\tif _, err = data.ReadFrom(r); err != nil {\n\t\treturn\n\t}\n\n\ttga.r = bytes.NewReader(data.Bytes())\n\n\tif err = tga.getHeader(); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ skip header\n\tif _, err = tga.r.Seek(int64(tgaRawHeaderSize+tga.raw.IdLength), 0); err != nil {\n\t\treturn\n\t}\n\n\tif tga.isPaletted {\n\t\t\/\/ read palette\n\t\tentrySize := int((tga.raw.PaletteBPP + 1) >> 3)\n\t\ttga.paletteLength = int(tga.raw.PaletteLength - tga.raw.PaletteFirst)\n\t\ttga.palette = make([]byte, entrySize*tga.paletteLength)\n\n\t\t\/\/ skip to colormap\n\t\tif _, err = tga.r.Seek(int64(entrySize)*int64(tga.raw.PaletteFirst), 1); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif _, err = io.ReadFull(tga.r, tga.palette); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\trect := image.Rect(0, 0, tga.width, tga.height)\n\tvar pixels []byte\n\n\t\/\/ choose a right color model\n\tif tga.ColorModel == color.NRGBAModel {\n\t\tim := image.NewNRGBA(rect)\n\t\toutImage = im\n\t\tpixels = im.Pix\n\t} else {\n\t\tim := image.NewRGBA(rect)\n\t\toutImage = im\n\t\tpixels = im.Pix\n\t}\n\n\tif err = tga.decode(&tga, pixels); err == nil {\n\t\ttga.flip(pixels)\n\t}\n\n\treturn\n}\n\n\/\/ DecodeConfig decodes a header of TARGA image and returns its configuration.\nfunc DecodeConfig(r io.Reader) (cfg image.Config, err error) {\n\tvar tga tga\n\tvar data bytes.Buffer\n\n\tif _, err = data.ReadFrom(r); err != nil {\n\t\treturn\n\t}\n\n\ttga.r = bytes.NewReader(data.Bytes())\n\n\tif err = tga.getHeader(); err == nil {\n\t\tcfg = image.Config{\n\t\t\tColorModel: tga.ColorModel,\n\t\t\tWidth: tga.width,\n\t\t\tHeight: tga.height,\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc init() {\n\timage.RegisterFormat(\"tga\", \"\", Decode, DecodeConfig)\n}\n\n\/\/ applyExtensions reads extensions section (if it exists) and parses attribute type.\nfunc (tga *tga) applyExtensions() (err error) {\n\tvar rawFooter rawFooter\n\n\tif _, err = tga.r.Seek(int64(-tgaRawFooterSize), 2); err != nil {\n\t\treturn\n\t} else if err = binary.Read(tga.r, binary.LittleEndian, &rawFooter); err != nil {\n\t\treturn\n\t} else if bytes.Equal(rawFooter.Signature[:], tgaSignature[:]) && rawFooter.ExtAreaOffset != 0 {\n\t\toffset := int64(rawFooter.ExtAreaOffset + extAreaAttrTypeOffset)\n\n\t\tvar n int64\n\t\tvar t byte\n\n\t\tif n, err = tga.r.Seek(offset, 0); err != nil || n != offset {\n\t\t\treturn\n\t\t} else if t, err = tga.r.ReadByte(); err != nil {\n\t\t\treturn\n\t\t} else if t == attrTypeAlpha {\n\t\t\t\/\/ alpha\n\t\t\ttga.hasAlpha = true\n\t\t} else if t == attrTypePremultipliedAlpha {\n\t\t\t\/\/ premultiplied alpha\n\t\t\ttga.hasAlpha = true\n\t\t\ttga.ColorModel = color.RGBAModel\n\t\t} else {\n\t\t\t\/\/ attribute is not an alpha channel value, ignore it\n\t\t\ttga.hasAlpha = false\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ decodeRaw decodes a raw (uncompressed) data.\nfunc decodeRaw(tga *tga, out []byte) (err error) {\n\tfor i := 0; i < len(out) && err == nil; i += 4 {\n\t\terr = tga.getPixel(out[i:])\n\t}\n\n\treturn\n}\n\n\/\/ decodeRLE decodes run-length encoded data.\nfunc decodeRLE(tga *tga, out []byte) (err error) {\n\tsize := tga.width * tga.height * 4\n\n\tfor i := 0; i < size && err == nil; {\n\t\tvar b byte\n\n\t\tif b, err = tga.r.ReadByte(); err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tcount := uint(b)\n\n\t\tif count&(1<<7) != 0 {\n\t\t\t\/\/ encoded packet\n\t\t\tcount &= ^uint(1 << 7)\n\n\t\t\tif err = tga.getPixel(tga.tmp[:]); err == nil {\n\t\t\t\tfor count++; count > 0 && i < size; count-- {\n\t\t\t\t\tcopy(out[i:], tga.tmp[:])\n\t\t\t\t\ti += 4\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ raw packet\n\t\t\tfor count++; count > 0 && i < size && err == nil; count-- {\n\t\t\t\terr = tga.getPixel(out[i:])\n\t\t\t\ti += 4\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ flip flips pixels of image based on its origin.\nfunc (tga *tga) flip(out []byte) {\n\tflipH := tga.raw.Flags&flagOriginRight != 0\n\tflipV := tga.raw.Flags&flagOriginTop == 0\n\trowSize := tga.width * 4\n\n\tif flipH {\n\t\tfor y := 0; y < tga.height; y++ {\n\t\t\tfor x, offset := 0, y*rowSize; x < tga.width\/2; x++ {\n\t\t\t\ta := out[offset+x*4:]\n\t\t\t\tb := out[offset+(tga.width-x-1)*4:]\n\n\t\t\t\ta[0], a[1], a[2], a[3], b[0], b[1], b[2], b[3] = b[0], b[1], b[2], b[3], a[0], a[1], a[2], a[3]\n\t\t\t}\n\t\t}\n\t}\n\n\tif flipV {\n\t\tfor y := 0; y < tga.height\/2; y++ {\n\t\t\tfor x := 0; x < tga.width; x++ {\n\t\t\t\ta := out[y*rowSize+x*4:]\n\t\t\t\tb := out[(tga.height-y-1)*rowSize+x*4:]\n\n\t\t\t\ta[0], a[1], a[2], a[3], b[0], b[1], b[2], b[3] = b[0], b[1], b[2], b[3], a[0], a[1], a[2], a[3]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ getHeader reads and validates TGA header.\nfunc (tga *tga) getHeader() (err error) {\n\tif err = binary.Read(tga.r, binary.LittleEndian, &tga.raw); err != nil {\n\t\treturn\n\t}\n\n\tif tga.raw.ImageType&imageTypeFlagRLE != 0 {\n\t\ttga.decode = decodeRLE\n\t} else {\n\t\ttga.decode = decodeRaw\n\t}\n\n\ttga.raw.ImageType &= imageTypeMask\n\talphaSize := tga.raw.Flags & flagAlphaSizeMask\n\n\tif alphaSize != 0 && alphaSize != 1 && alphaSize != 8 {\n\t\terr = ErrAlphaSize\n\t\treturn\n\t}\n\n\ttga.hasAlpha = ((alphaSize != 0 || tga.raw.BPP == 32) ||\n\t\t(tga.raw.ImageType == imageTypeMonoChrome && tga.raw.BPP == 16) ||\n\t\t(tga.raw.ImageType == imageTypePaletted && tga.raw.PaletteBPP == 32))\n\n\ttga.width = int(tga.raw.Width)\n\ttga.height = int(tga.raw.Height)\n\ttga.pixelSize = int(tga.raw.BPP) >> 3\n\n\t\/\/ default is NOT premultiplied alpha model\n\ttga.ColorModel = color.NRGBAModel\n\n\tif err = tga.applyExtensions(); err != nil {\n\t\treturn\n\t}\n\n\tvar formatIsInvalid bool\n\n\tswitch tga.raw.ImageType {\n\tcase imageTypePaletted:\n\t\tformatIsInvalid = (tga.raw.PaletteType != 1 ||\n\t\t\ttga.raw.BPP != 8 ||\n\t\t\ttga.raw.PaletteFirst >= tga.raw.PaletteLength ||\n\t\t\t(tga.raw.PaletteBPP != 15 && tga.raw.PaletteBPP != 16 && tga.raw.PaletteBPP != 24 && tga.raw.PaletteBPP != 32))\n\t\ttga.isPaletted = true\n\n\tcase imageTypeTrueColor:\n\t\tformatIsInvalid = (tga.raw.BPP != 32 &&\n\t\t\ttga.raw.BPP != 16 &&\n\t\t\t(tga.raw.BPP != 24 || tga.hasAlpha))\n\n\tcase imageTypeMonoChrome:\n\t\tformatIsInvalid = ((tga.hasAlpha && tga.raw.BPP != 16) ||\n\t\t\t(!tga.hasAlpha && tga.raw.BPP != 8))\n\n\tdefault:\n\t\terr = fmt.Errorf(\"tga: unknown image type %d\", tga.raw.ImageType)\n\t}\n\n\tif err == nil && formatIsInvalid {\n\t\terr = ErrFormat\n\t}\n\n\treturn\n}\n\nfunc (tga *tga) getPixel(dst []byte) (err error) {\n\tvar R, G, B, A uint8 = 0xff, 0xff, 0xff, 0xff\n\tsrc := tga.tmp\n\n\tif _, err = io.ReadFull(tga.r, src[0:tga.pixelSize]); err != nil {\n\t\treturn\n\t}\n\n\tswitch tga.pixelSize {\n\tcase 4:\n\t\tif tga.hasAlpha {\n\t\t\tA = src[3]\n\t\t}\n\t\tfallthrough\n\n\tcase 3:\n\t\tB, G, R = src[0], src[1], src[2]\n\n\tcase 2:\n\t\tif tga.raw.ImageType == imageTypeMonoChrome {\n\t\t\tB, G, R = src[0], src[0], src[0]\n\n\t\t\tif tga.hasAlpha {\n\t\t\t\tA = src[1]\n\t\t\t}\n\t\t} else {\n\t\t\tword := uint16(src[0]) | (uint16(src[1]) << 8)\n\t\t\tB, G, R = wordToBGR(word)\n\n\t\t\tif tga.hasAlpha && (word&(1<<15)) == 0 {\n\t\t\t\tA = 0\n\t\t\t}\n\t\t}\n\n\tcase 1:\n\t\tif tga.isPaletted {\n\t\t\tindex := int(src[0])\n\n\t\t\tif int(index) >= tga.paletteLength {\n\t\t\t\treturn ErrPaletteIndex\n\t\t\t}\n\n\t\t\tvar m int\n\n\t\t\tif tga.raw.PaletteBPP == 24 {\n\t\t\t\tm = index * 3\n\t\t\t\tB, G, R = tga.palette[m+0], tga.palette[m+1], tga.palette[m+2]\n\t\t\t} else if tga.raw.PaletteBPP == 32 {\n\t\t\t\tm = index * 4\n\t\t\t\tB, G, R = tga.palette[m+0], tga.palette[m+1], tga.palette[m+2]\n\n\t\t\t\tif tga.hasAlpha {\n\t\t\t\t\tA = tga.palette[m+3]\n\t\t\t\t}\n\t\t\t} else if tga.raw.PaletteBPP == 16 {\n\t\t\t\tm = index * 2\n\t\t\t\tword := uint16(tga.palette[m+0]) | (uint16(tga.palette[m+1]) << 8)\n\t\t\t\tB, G, R = wordToBGR(word)\n\t\t\t}\n\t\t} else {\n\t\t\tB, G, R = src[0], src[0], src[0]\n\t\t}\n\t}\n\n\tdst[0], dst[1], dst[2], dst[3] = R, G, B, A\n\n\treturn nil\n}\n\n\/\/ wordToBGR converts 15-bit color to BGR\nfunc wordToBGR(word uint16) (B, G, R uint8) {\n\tB = uint8((word >> 0) & 31)\n\tB = uint8((B << 3) + (B >> 2))\n\tG = uint8((word >> 5) & 31)\n\tG = uint8((G << 3) + (G >> 2))\n\tR = uint8((word >> 10) & 31)\n\tR = uint8((R << 3) + (R >> 2))\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage vclib\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/vmware\/govmomi\/find\"\n\t\"github.com\/vmware\/govmomi\/object\"\n\t\"github.com\/vmware\/govmomi\/property\"\n\t\"github.com\/vmware\/govmomi\/vim25\/mo\"\n\t\"github.com\/vmware\/govmomi\/vim25\/types\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ Datacenter extends the govmomi Datacenter object\ntype Datacenter struct {\n\t*object.Datacenter\n}\n\n\/\/ GetDatacenter returns the DataCenter Object for the given datacenterPath\n\/\/ If datacenter is located in a folder, include full path to datacenter else just provide the datacenter name\nfunc GetDatacenter(ctx context.Context, connection *VSphereConnection, datacenterPath string) (*Datacenter, error) {\n\tfinder := find.NewFinder(connection.GoVmomiClient.Client, true)\n\tdatacenter, err := finder.Datacenter(ctx, datacenterPath)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to find the datacenter: %s. err: %+v\", datacenterPath, err)\n\t\treturn nil, err\n\t}\n\tdc := Datacenter{datacenter}\n\treturn &dc, nil\n}\n\n\/\/ GetAllDatacenter returns all the DataCenter Objects\nfunc GetAllDatacenter(ctx context.Context, connection *VSphereConnection) ([]*Datacenter, error) {\n\tvar dc []*Datacenter\n\tfinder := find.NewFinder(connection.GoVmomiClient.Client, true)\n\tdatacenters, err := finder.DatacenterList(ctx, \"*\")\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to find the datacenter. err: %+v\", err)\n\t\treturn nil, err\n\t}\n\tfor _, datacenter := range datacenters {\n\t\tdc = append(dc, &(Datacenter{datacenter}))\n\t}\n\n\treturn dc, nil\n}\n\n\/\/ GetVMByUUID gets the VM object from the given vmUUID\nfunc (dc *Datacenter) GetVMByUUID(ctx context.Context, vmUUID string) (*VirtualMachine, error) {\n\ts := object.NewSearchIndex(dc.Client())\n\tvmUUID = strings.ToLower(strings.TrimSpace(vmUUID))\n\tsvm, err := s.FindByUuid(ctx, dc.Datacenter, vmUUID, true, nil)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to find VM by UUID. VM UUID: %s, err: %+v\", vmUUID, err)\n\t\treturn nil, err\n\t}\n\tif svm == nil {\n\t\tglog.Errorf(\"Unable to find VM by UUID. VM UUID: %s\", vmUUID)\n\t\treturn nil, ErrNoVMFound\n\t}\n\tvirtualMachine := VirtualMachine{object.NewVirtualMachine(dc.Client(), svm.Reference()), dc}\n\treturn &virtualMachine, nil\n}\n\n\/\/ GetVMByPath gets the VM object from the given vmPath\n\/\/ vmPath should be the full path to VM and not just the name\nfunc (dc *Datacenter) GetVMByPath(ctx context.Context, vmPath string) (*VirtualMachine, error) {\n\tfinder := getFinder(dc)\n\tvm, err := finder.VirtualMachine(ctx, vmPath)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to find VM by Path. VM Path: %s, err: %+v\", vmPath, err)\n\t\treturn nil, err\n\t}\n\tvirtualMachine := VirtualMachine{vm, dc}\n\treturn &virtualMachine, nil\n}\n\n\/\/ GetAllDatastores gets the datastore URL to DatastoreInfo map for all the datastores in\n\/\/ the datacenter.\nfunc (dc *Datacenter) GetAllDatastores(ctx context.Context) (map[string]*DatastoreInfo, error) {\n\tfinder := getFinder(dc)\n\tdatastores, err := finder.DatastoreList(ctx, \"*\")\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to get all the datastores. err: %+v\", err)\n\t\treturn nil, err\n\t}\n\tvar dsList []types.ManagedObjectReference\n\tfor _, ds := range datastores {\n\t\tdsList = append(dsList, ds.Reference())\n\t}\n\n\tvar dsMoList []mo.Datastore\n\tpc := property.DefaultCollector(dc.Client())\n\tproperties := []string{DatastoreInfoProperty}\n\terr = pc.Retrieve(ctx, dsList, properties, &dsMoList)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to get Datastore managed objects from datastore objects.\"+\n\t\t\t\" dsObjList: %+v, properties: %+v, err: %v\", dsList, properties, err)\n\t\treturn nil, err\n\t}\n\n\tdsURLInfoMap := make(map[string]*DatastoreInfo)\n\tfor _, dsMo := range dsMoList {\n\t\tdsURLInfoMap[dsMo.Info.GetDatastoreInfo().Url] = &DatastoreInfo{\n\t\t\t&Datastore{object.NewDatastore(dc.Client(), dsMo.Reference()),\n\t\t\t\tdc},\n\t\t\tdsMo.Info.GetDatastoreInfo()}\n\t}\n\tglog.V(9).Infof(\"dsURLInfoMap : %+v\", dsURLInfoMap)\n\treturn dsURLInfoMap, nil\n}\n\n\/\/ GetDatastoreByPath gets the Datastore object from the given vmDiskPath\nfunc (dc *Datacenter) GetDatastoreByPath(ctx context.Context, vmDiskPath string) (*Datastore, error) {\n\tdatastorePathObj := new(object.DatastorePath)\n\tisSuccess := datastorePathObj.FromString(vmDiskPath)\n\tif !isSuccess {\n\t\tglog.Errorf(\"Failed to parse vmDiskPath: %s\", vmDiskPath)\n\t\treturn nil, errors.New(\"Failed to parse vmDiskPath\")\n\t}\n\tfinder := getFinder(dc)\n\tds, err := finder.Datastore(ctx, datastorePathObj.Datastore)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed while searching for datastore: %s. err: %+v\", datastorePathObj.Datastore, err)\n\t\treturn nil, err\n\t}\n\tdatastore := Datastore{ds, dc}\n\treturn &datastore, nil\n}\n\n\/\/ GetDatastoreByName gets the Datastore object for the given datastore name\nfunc (dc *Datacenter) GetDatastoreByName(ctx context.Context, name string) (*Datastore, error) {\n\tfinder := getFinder(dc)\n\tds, err := finder.Datastore(ctx, name)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed while searching for datastore: %s. err: %+v\", name, err)\n\t\treturn nil, err\n\t}\n\tdatastore := Datastore{ds, dc}\n\treturn &datastore, nil\n}\n\n\/\/ GetResourcePool gets the resource pool for the given path\nfunc (dc *Datacenter) GetResourcePool(ctx context.Context, computePath string) (*object.ResourcePool, error) {\n\tfinder := getFinder(dc)\n\tvar computeResource *object.ComputeResource\n\tvar err error\n\tif computePath == \"\" {\n\t\tcomputeResource, err = finder.DefaultComputeResource(ctx)\n\t} else {\n\t\tcomputeResource, err = finder.ComputeResource(ctx, computePath)\n\t}\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to get the ResourcePool for computePath '%s'. err: %+v\", computePath, err)\n\t\treturn nil, err\n\t}\n\treturn computeResource.ResourcePool(ctx)\n}\n\n\/\/ GetFolderByPath gets the Folder Object from the given folder path\n\/\/ folderPath should be the full path to folder\nfunc (dc *Datacenter) GetFolderByPath(ctx context.Context, folderPath string) (*Folder, error) {\n\tfinder := getFinder(dc)\n\tvmFolder, err := finder.Folder(ctx, folderPath)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to get the folder reference for %s. err: %+v\", folderPath, err)\n\t\treturn nil, err\n\t}\n\tfolder := Folder{vmFolder, dc}\n\treturn &folder, nil\n}\n\n\/\/ GetVMMoList gets the VM Managed Objects with the given properties from the VM object\nfunc (dc *Datacenter) GetVMMoList(ctx context.Context, vmObjList []*VirtualMachine, properties []string) ([]mo.VirtualMachine, error) {\n\tvar vmMoList []mo.VirtualMachine\n\tvar vmRefs []types.ManagedObjectReference\n\tif len(vmObjList) < 1 {\n\t\tglog.Errorf(\"VirtualMachine Object list is empty\")\n\t\treturn nil, fmt.Errorf(\"VirtualMachine Object list is empty\")\n\t}\n\n\tfor _, vmObj := range vmObjList {\n\t\tvmRefs = append(vmRefs, vmObj.Reference())\n\t}\n\tpc := property.DefaultCollector(dc.Client())\n\terr := pc.Retrieve(ctx, vmRefs, properties, &vmMoList)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to get VM managed objects from VM objects. vmObjList: %+v, properties: %+v, err: %v\", vmObjList, properties, err)\n\t\treturn nil, err\n\t}\n\treturn vmMoList, nil\n}\n\n\/\/ GetVirtualDiskPage83Data gets the virtual disk UUID by diskPath\nfunc (dc *Datacenter) GetVirtualDiskPage83Data(ctx context.Context, diskPath string) (string, error) {\n\tif len(diskPath) > 0 && filepath.Ext(diskPath) != \".vmdk\" {\n\t\tdiskPath += \".vmdk\"\n\t}\n\tvdm := object.NewVirtualDiskManager(dc.Client())\n\t\/\/ Returns uuid of vmdk virtual disk\n\tdiskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc.Datacenter)\n\n\tif err != nil {\n\t\tglog.Warningf(\"QueryVirtualDiskUuid failed for diskPath: %q. err: %+v\", diskPath, err)\n\t\treturn \"\", err\n\t}\n\tdiskUUID = formatVirtualDiskUUID(diskUUID)\n\treturn diskUUID, nil\n}\n\n\/\/ GetDatastoreMoList gets the Datastore Managed Objects with the given properties from the datastore objects\nfunc (dc *Datacenter) GetDatastoreMoList(ctx context.Context, dsObjList []*Datastore, properties []string) ([]mo.Datastore, error) {\n\tvar dsMoList []mo.Datastore\n\tvar dsRefs []types.ManagedObjectReference\n\tif len(dsObjList) < 1 {\n\t\tglog.Errorf(\"Datastore Object list is empty\")\n\t\treturn nil, fmt.Errorf(\"Datastore Object list is empty\")\n\t}\n\n\tfor _, dsObj := range dsObjList {\n\t\tdsRefs = append(dsRefs, dsObj.Reference())\n\t}\n\tpc := property.DefaultCollector(dc.Client())\n\terr := pc.Retrieve(ctx, dsRefs, properties, &dsMoList)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to get Datastore managed objects from datastore objects. dsObjList: %+v, properties: %+v, err: %v\", dsObjList, properties, err)\n\t\treturn nil, err\n\t}\n\treturn dsMoList, nil\n}\n\n\/\/ CheckDisksAttached checks if the disk is attached to node.\n\/\/ This is done by comparing the volume path with the backing.FilePath on the VM Virtual disk devices.\nfunc (dc *Datacenter) CheckDisksAttached(ctx context.Context, nodeVolumes map[string][]string) (map[string]map[string]bool, error) {\n\tattached := make(map[string]map[string]bool)\n\tvar vmList []*VirtualMachine\n\tfor nodeName, volPaths := range nodeVolumes {\n\t\tfor _, volPath := range volPaths {\n\t\t\tsetNodeVolumeMap(attached, volPath, nodeName, false)\n\t\t}\n\t\tvm, err := dc.GetVMByPath(ctx, nodeName)\n\t\tif err != nil {\n\t\t\tif IsNotFound(err) {\n\t\t\t\tglog.Warningf(\"Node %q does not exist, vSphere CP will assume disks %v are not attached to it.\", nodeName, volPaths)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tvmList = append(vmList, vm)\n\t}\n\tif len(vmList) == 0 {\n\t\tglog.V(2).Infof(\"vSphere CP will assume no disks are attached to any node.\")\n\t\treturn attached, nil\n\t}\n\tvmMoList, err := dc.GetVMMoList(ctx, vmList, []string{\"config.hardware.device\", \"name\"})\n\tif err != nil {\n\t\t\/\/ When there is an error fetching instance information\n\t\t\/\/ it is safer to return nil and let volume information not be touched.\n\t\tglog.Errorf(\"Failed to get VM Managed object for nodes: %+v. err: +%v\", vmList, err)\n\t\treturn nil, err\n\t}\n\n\tfor _, vmMo := range vmMoList {\n\t\tif vmMo.Config == nil {\n\t\t\tglog.Errorf(\"Config is not available for VM: %q\", vmMo.Name)\n\t\t\tcontinue\n\t\t}\n\t\tfor nodeName, volPaths := range nodeVolumes {\n\t\t\tif nodeName == vmMo.Name {\n\t\t\t\tverifyVolumePathsForVM(vmMo, volPaths, attached)\n\t\t\t}\n\t\t}\n\t}\n\treturn attached, nil\n}\n\n\/\/ VerifyVolumePathsForVM verifies if the volume paths (volPaths) are attached to VM.\nfunc verifyVolumePathsForVM(vmMo mo.VirtualMachine, volPaths []string, nodeVolumeMap map[string]map[string]bool) {\n\t\/\/ Verify if the volume paths are present on the VM backing virtual disk devices\n\tfor _, volPath := range volPaths {\n\t\tvmDevices := object.VirtualDeviceList(vmMo.Config.Hardware.Device)\n\t\tfor _, device := range vmDevices {\n\t\t\tif vmDevices.TypeName(device) == \"VirtualDisk\" {\n\t\t\t\tvirtualDevice := device.GetVirtualDevice()\n\t\t\t\tif backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {\n\t\t\t\t\tif backing.FileName == volPath {\n\t\t\t\t\t\tsetNodeVolumeMap(nodeVolumeMap, volPath, vmMo.Name, true)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc setNodeVolumeMap(\n\tnodeVolumeMap map[string]map[string]bool,\n\tvolumePath string,\n\tnodeName string,\n\tcheck bool) {\n\tvolumeMap := nodeVolumeMap[nodeName]\n\tif volumeMap == nil {\n\t\tvolumeMap = make(map[string]bool)\n\t\tnodeVolumeMap[nodeName] = volumeMap\n\t}\n\tvolumeMap[volumePath] = check\n}\n<commit_msg>[cloudprovider]should reuse code rather than rewrite it<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage vclib\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/vmware\/govmomi\/find\"\n\t\"github.com\/vmware\/govmomi\/object\"\n\t\"github.com\/vmware\/govmomi\/property\"\n\t\"github.com\/vmware\/govmomi\/vim25\/mo\"\n\t\"github.com\/vmware\/govmomi\/vim25\/types\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ Datacenter extends the govmomi Datacenter object\ntype Datacenter struct {\n\t*object.Datacenter\n}\n\n\/\/ GetDatacenter returns the DataCenter Object for the given datacenterPath\n\/\/ If datacenter is located in a folder, include full path to datacenter else just provide the datacenter name\nfunc GetDatacenter(ctx context.Context, connection *VSphereConnection, datacenterPath string) (*Datacenter, error) {\n\tfinder := find.NewFinder(connection.GoVmomiClient.Client, true)\n\tdatacenter, err := finder.Datacenter(ctx, datacenterPath)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to find the datacenter: %s. err: %+v\", datacenterPath, err)\n\t\treturn nil, err\n\t}\n\tdc := Datacenter{datacenter}\n\treturn &dc, nil\n}\n\n\/\/ GetAllDatacenter returns all the DataCenter Objects\nfunc GetAllDatacenter(ctx context.Context, connection *VSphereConnection) ([]*Datacenter, error) {\n\tvar dc []*Datacenter\n\tfinder := find.NewFinder(connection.GoVmomiClient.Client, true)\n\tdatacenters, err := finder.DatacenterList(ctx, \"*\")\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to find the datacenter. err: %+v\", err)\n\t\treturn nil, err\n\t}\n\tfor _, datacenter := range datacenters {\n\t\tdc = append(dc, &(Datacenter{datacenter}))\n\t}\n\n\treturn dc, nil\n}\n\n\/\/ GetVMByUUID gets the VM object from the given vmUUID\nfunc (dc *Datacenter) GetVMByUUID(ctx context.Context, vmUUID string) (*VirtualMachine, error) {\n\ts := object.NewSearchIndex(dc.Client())\n\tvmUUID = strings.ToLower(strings.TrimSpace(vmUUID))\n\tsvm, err := s.FindByUuid(ctx, dc.Datacenter, vmUUID, true, nil)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to find VM by UUID. VM UUID: %s, err: %+v\", vmUUID, err)\n\t\treturn nil, err\n\t}\n\tif svm == nil {\n\t\tglog.Errorf(\"Unable to find VM by UUID. VM UUID: %s\", vmUUID)\n\t\treturn nil, ErrNoVMFound\n\t}\n\tvirtualMachine := VirtualMachine{object.NewVirtualMachine(dc.Client(), svm.Reference()), dc}\n\treturn &virtualMachine, nil\n}\n\n\/\/ GetVMByPath gets the VM object from the given vmPath\n\/\/ vmPath should be the full path to VM and not just the name\nfunc (dc *Datacenter) GetVMByPath(ctx context.Context, vmPath string) (*VirtualMachine, error) {\n\tfinder := getFinder(dc)\n\tvm, err := finder.VirtualMachine(ctx, vmPath)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to find VM by Path. VM Path: %s, err: %+v\", vmPath, err)\n\t\treturn nil, err\n\t}\n\tvirtualMachine := VirtualMachine{vm, dc}\n\treturn &virtualMachine, nil\n}\n\n\/\/ GetAllDatastores gets the datastore URL to DatastoreInfo map for all the datastores in\n\/\/ the datacenter.\nfunc (dc *Datacenter) GetAllDatastores(ctx context.Context) (map[string]*DatastoreInfo, error) {\n\tfinder := getFinder(dc)\n\tdatastores, err := finder.DatastoreList(ctx, \"*\")\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to get all the datastores. err: %+v\", err)\n\t\treturn nil, err\n\t}\n\tvar dsList []types.ManagedObjectReference\n\tfor _, ds := range datastores {\n\t\tdsList = append(dsList, ds.Reference())\n\t}\n\n\tvar dsMoList []mo.Datastore\n\tpc := property.DefaultCollector(dc.Client())\n\tproperties := []string{DatastoreInfoProperty}\n\terr = pc.Retrieve(ctx, dsList, properties, &dsMoList)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to get Datastore managed objects from datastore objects.\"+\n\t\t\t\" dsObjList: %+v, properties: %+v, err: %v\", dsList, properties, err)\n\t\treturn nil, err\n\t}\n\n\tdsURLInfoMap := make(map[string]*DatastoreInfo)\n\tfor _, dsMo := range dsMoList {\n\t\tdsURLInfoMap[dsMo.Info.GetDatastoreInfo().Url] = &DatastoreInfo{\n\t\t\t&Datastore{object.NewDatastore(dc.Client(), dsMo.Reference()),\n\t\t\t\tdc},\n\t\t\tdsMo.Info.GetDatastoreInfo()}\n\t}\n\tglog.V(9).Infof(\"dsURLInfoMap : %+v\", dsURLInfoMap)\n\treturn dsURLInfoMap, nil\n}\n\n\/\/ GetDatastoreByPath gets the Datastore object from the given vmDiskPath\nfunc (dc *Datacenter) GetDatastoreByPath(ctx context.Context, vmDiskPath string) (*Datastore, error) {\n\tdatastorePathObj := new(object.DatastorePath)\n\tisSuccess := datastorePathObj.FromString(vmDiskPath)\n\tif !isSuccess {\n\t\tglog.Errorf(\"Failed to parse vmDiskPath: %s\", vmDiskPath)\n\t\treturn nil, errors.New(\"Failed to parse vmDiskPath\")\n\t}\n\n\treturn dc.GetDatastoreByName(ctx, datastorePathObj.Datastore)\n}\n\n\/\/ GetDatastoreByName gets the Datastore object for the given datastore name\nfunc (dc *Datacenter) GetDatastoreByName(ctx context.Context, name string) (*Datastore, error) {\n\tfinder := getFinder(dc)\n\tds, err := finder.Datastore(ctx, name)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed while searching for datastore: %s. err: %+v\", name, err)\n\t\treturn nil, err\n\t}\n\tdatastore := Datastore{ds, dc}\n\treturn &datastore, nil\n}\n\n\/\/ GetResourcePool gets the resource pool for the given path\nfunc (dc *Datacenter) GetResourcePool(ctx context.Context, computePath string) (*object.ResourcePool, error) {\n\tfinder := getFinder(dc)\n\tvar computeResource *object.ComputeResource\n\tvar err error\n\tif computePath == \"\" {\n\t\tcomputeResource, err = finder.DefaultComputeResource(ctx)\n\t} else {\n\t\tcomputeResource, err = finder.ComputeResource(ctx, computePath)\n\t}\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to get the ResourcePool for computePath '%s'. err: %+v\", computePath, err)\n\t\treturn nil, err\n\t}\n\treturn computeResource.ResourcePool(ctx)\n}\n\n\/\/ GetFolderByPath gets the Folder Object from the given folder path\n\/\/ folderPath should be the full path to folder\nfunc (dc *Datacenter) GetFolderByPath(ctx context.Context, folderPath string) (*Folder, error) {\n\tfinder := getFinder(dc)\n\tvmFolder, err := finder.Folder(ctx, folderPath)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to get the folder reference for %s. err: %+v\", folderPath, err)\n\t\treturn nil, err\n\t}\n\tfolder := Folder{vmFolder, dc}\n\treturn &folder, nil\n}\n\n\/\/ GetVMMoList gets the VM Managed Objects with the given properties from the VM object\nfunc (dc *Datacenter) GetVMMoList(ctx context.Context, vmObjList []*VirtualMachine, properties []string) ([]mo.VirtualMachine, error) {\n\tvar vmMoList []mo.VirtualMachine\n\tvar vmRefs []types.ManagedObjectReference\n\tif len(vmObjList) < 1 {\n\t\tglog.Errorf(\"VirtualMachine Object list is empty\")\n\t\treturn nil, fmt.Errorf(\"VirtualMachine Object list is empty\")\n\t}\n\n\tfor _, vmObj := range vmObjList {\n\t\tvmRefs = append(vmRefs, vmObj.Reference())\n\t}\n\tpc := property.DefaultCollector(dc.Client())\n\terr := pc.Retrieve(ctx, vmRefs, properties, &vmMoList)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to get VM managed objects from VM objects. vmObjList: %+v, properties: %+v, err: %v\", vmObjList, properties, err)\n\t\treturn nil, err\n\t}\n\treturn vmMoList, nil\n}\n\n\/\/ GetVirtualDiskPage83Data gets the virtual disk UUID by diskPath\nfunc (dc *Datacenter) GetVirtualDiskPage83Data(ctx context.Context, diskPath string) (string, error) {\n\tif len(diskPath) > 0 && filepath.Ext(diskPath) != \".vmdk\" {\n\t\tdiskPath += \".vmdk\"\n\t}\n\tvdm := object.NewVirtualDiskManager(dc.Client())\n\t\/\/ Returns uuid of vmdk virtual disk\n\tdiskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc.Datacenter)\n\n\tif err != nil {\n\t\tglog.Warningf(\"QueryVirtualDiskUuid failed for diskPath: %q. err: %+v\", diskPath, err)\n\t\treturn \"\", err\n\t}\n\tdiskUUID = formatVirtualDiskUUID(diskUUID)\n\treturn diskUUID, nil\n}\n\n\/\/ GetDatastoreMoList gets the Datastore Managed Objects with the given properties from the datastore objects\nfunc (dc *Datacenter) GetDatastoreMoList(ctx context.Context, dsObjList []*Datastore, properties []string) ([]mo.Datastore, error) {\n\tvar dsMoList []mo.Datastore\n\tvar dsRefs []types.ManagedObjectReference\n\tif len(dsObjList) < 1 {\n\t\tglog.Errorf(\"Datastore Object list is empty\")\n\t\treturn nil, fmt.Errorf(\"Datastore Object list is empty\")\n\t}\n\n\tfor _, dsObj := range dsObjList {\n\t\tdsRefs = append(dsRefs, dsObj.Reference())\n\t}\n\tpc := property.DefaultCollector(dc.Client())\n\terr := pc.Retrieve(ctx, dsRefs, properties, &dsMoList)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to get Datastore managed objects from datastore objects. dsObjList: %+v, properties: %+v, err: %v\", dsObjList, properties, err)\n\t\treturn nil, err\n\t}\n\treturn dsMoList, nil\n}\n\n\/\/ CheckDisksAttached checks if the disk is attached to node.\n\/\/ This is done by comparing the volume path with the backing.FilePath on the VM Virtual disk devices.\nfunc (dc *Datacenter) CheckDisksAttached(ctx context.Context, nodeVolumes map[string][]string) (map[string]map[string]bool, error) {\n\tattached := make(map[string]map[string]bool)\n\tvar vmList []*VirtualMachine\n\tfor nodeName, volPaths := range nodeVolumes {\n\t\tfor _, volPath := range volPaths {\n\t\t\tsetNodeVolumeMap(attached, volPath, nodeName, false)\n\t\t}\n\t\tvm, err := dc.GetVMByPath(ctx, nodeName)\n\t\tif err != nil {\n\t\t\tif IsNotFound(err) {\n\t\t\t\tglog.Warningf(\"Node %q does not exist, vSphere CP will assume disks %v are not attached to it.\", nodeName, volPaths)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tvmList = append(vmList, vm)\n\t}\n\tif len(vmList) == 0 {\n\t\tglog.V(2).Infof(\"vSphere CP will assume no disks are attached to any node.\")\n\t\treturn attached, nil\n\t}\n\tvmMoList, err := dc.GetVMMoList(ctx, vmList, []string{\"config.hardware.device\", \"name\"})\n\tif err != nil {\n\t\t\/\/ When there is an error fetching instance information\n\t\t\/\/ it is safer to return nil and let volume information not be touched.\n\t\tglog.Errorf(\"Failed to get VM Managed object for nodes: %+v. err: +%v\", vmList, err)\n\t\treturn nil, err\n\t}\n\n\tfor _, vmMo := range vmMoList {\n\t\tif vmMo.Config == nil {\n\t\t\tglog.Errorf(\"Config is not available for VM: %q\", vmMo.Name)\n\t\t\tcontinue\n\t\t}\n\t\tfor nodeName, volPaths := range nodeVolumes {\n\t\t\tif nodeName == vmMo.Name {\n\t\t\t\tverifyVolumePathsForVM(vmMo, volPaths, attached)\n\t\t\t}\n\t\t}\n\t}\n\treturn attached, nil\n}\n\n\/\/ VerifyVolumePathsForVM verifies if the volume paths (volPaths) are attached to VM.\nfunc verifyVolumePathsForVM(vmMo mo.VirtualMachine, volPaths []string, nodeVolumeMap map[string]map[string]bool) {\n\t\/\/ Verify if the volume paths are present on the VM backing virtual disk devices\n\tfor _, volPath := range volPaths {\n\t\tvmDevices := object.VirtualDeviceList(vmMo.Config.Hardware.Device)\n\t\tfor _, device := range vmDevices {\n\t\t\tif vmDevices.TypeName(device) == \"VirtualDisk\" {\n\t\t\t\tvirtualDevice := device.GetVirtualDevice()\n\t\t\t\tif backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {\n\t\t\t\t\tif backing.FileName == volPath {\n\t\t\t\t\t\tsetNodeVolumeMap(nodeVolumeMap, volPath, vmMo.Name, true)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc setNodeVolumeMap(\n\tnodeVolumeMap map[string]map[string]bool,\n\tvolumePath string,\n\tnodeName string,\n\tcheck bool) {\n\tvolumeMap := nodeVolumeMap[nodeName]\n\tif volumeMap == nil {\n\t\tvolumeMap = make(map[string]bool)\n\t\tnodeVolumeMap[nodeName] = volumeMap\n\t}\n\tvolumeMap[volumePath] = check\n}\n<|endoftext|>"} {"text":"<commit_before>package deploy\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/remind101\/deploy\/Godeps\/_workspace\/src\/github.com\/codegangsta\/cli\"\n\t\"github.com\/remind101\/deploy\/Godeps\/_workspace\/src\/github.com\/github\/hub\/git\"\n\thub \"github.com\/remind101\/deploy\/Godeps\/_workspace\/src\/github.com\/github\/hub\/github\"\n\t\"github.com\/remind101\/deploy\/Godeps\/_workspace\/src\/github.com\/google\/go-github\/github\"\n)\n\nconst (\n\tName = \"deploy\"\n\tUsage = \"A command for creating GitHub deployments\"\n)\n\nconst DefaultRef = \"master\"\n\nfunc init() {\n\tcli.AppHelpTemplate = `USAGE:\n # Deploy the master branch of remind101\/acme-inc to staging\n {{.Name}} --env=staging --ref=master remind101\/acme-inc\n\n # Deploy HEAD of the current branch to staging\n {{.Name}} --env=staging remind101\/acme-inc\n\n # Deploy the current GitHub repo to staging\n {{.Name}} --env=staging\n{{if .Flags}}\nOPTIONS:\n {{range .Flags}}{{.}}\n {{end}}{{end}}\n`\n}\n\nvar flags = []cli.Flag{\n\tcli.StringFlag{\n\t\tName: \"ref, branch, commit, tag\",\n\t\tValue: \"\",\n\t\tUsage: \"The git ref to deploy. Can be a git commit, branch or tag.\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"env, e\",\n\t\tValue: \"\",\n\t\tUsage: \"The environment to deploy to.\",\n\t},\n\tcli.BoolFlag{\n\t\tName: \"force, f\",\n\t\tUsage: \"Ignore commit status checks.\",\n\t},\n}\n\n\/\/ NewApp returns a new cli.App for the deploy command.\nfunc NewApp() *cli.App {\n\tapp := cli.NewApp()\n\tapp.Version = \"0.0.1\"\n\tapp.Name = Name\n\tapp.Usage = Usage\n\tapp.Flags = flags\n\tapp.Action = func(c *cli.Context) {\n\t\tif err := RunDeploy(c); err != nil {\n\t\t\tmsg := err.Error()\n\t\t\tif err, ok := err.(*github.ErrorResponse); ok {\n\t\t\t\tif strings.HasPrefix(err.Message, \"Conflict: Commit status checks failed for\") {\n\t\t\t\t\tmsg = \"Commit status checks failed. You can bypass commit status checks with the --force flag.\"\n\t\t\t\t} else if strings.HasPrefix(err.Message, \"No ref found for\") {\n\t\t\t\t\tmsg = fmt.Sprintf(\"%s. Did you push it to GitHub?\", err.Message)\n\t\t\t\t} else {\n\t\t\t\t\tmsg = err.Message\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfmt.Println(msg)\n\t\t\tos.Exit(-1)\n\t\t}\n\t}\n\n\treturn app\n}\n\n\/\/ RunDeploy performs a deploy.\nfunc RunDeploy(c *cli.Context) error {\n\tw := c.App.Writer\n\n\th, err := hub.CurrentConfig().PromptForHost(\"github.com\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient, err := newGitHubClient(h)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnwo, err := Repo(c.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\towner, repo, err := SplitRepo(nwo)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Invalid GitHub repo: %s\", nwo)\n\t}\n\n\tr, err := newDeploymentRequest(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Fprintf(w, \"Deploying %s@%s to %s...\\n\", nwo, *r.Ref, *r.Environment)\n\n\td, _, err := client.Repositories.CreateDeployment(owner, repo, r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstarted := make(chan *github.DeploymentStatus)\n\tcompleted := make(chan *github.DeploymentStatus)\n\n\tgo func() {\n\t\tstarted <- waitState(pendingStates, owner, repo, *d.ID, client)\n\t}()\n\n\tgo func() {\n\t\tcompleted <- waitState(completedStates, owner, repo, *d.ID, client)\n\t}()\n\n\tstatus := <-started\n\tvar url string\n\tif status.TargetURL != nil {\n\t\turl = *status.TargetURL\n\t}\n\tfmt.Fprintf(w, \"%s\\n\", url)\n\n\tstatus = <-completed\n\n\tif isFailed(*status.State) {\n\t\treturn errors.New(\"Failed to deploy\")\n\t}\n\n\treturn nil\n}\n\nfunc newDeploymentRequest(c *cli.Context) (*github.DeploymentRequest, error) {\n\tref := c.String(\"ref\")\n\tif ref == \"\" {\n\t\tr, err := git.Ref(\"HEAD\")\n\t\tif err == nil {\n\t\t\tref = r\n\t\t} else {\n\t\t\tref = DefaultRef\n\t\t}\n\t}\n\n\tenv := c.String(\"env\")\n\tif env == \"\" {\n\t\treturn nil, fmt.Errorf(\"--env flag is required\")\n\t}\n\n\tvar contexts *[]string\n\tif c.Bool(\"force\") {\n\t\ts := []string{}\n\t\tcontexts = &s\n\t}\n\n\treturn &github.DeploymentRequest{\n\t\tRef: github.String(ref),\n\t\tTask: github.String(\"deploy\"),\n\t\tAutoMerge: github.Bool(false),\n\t\tEnvironment: github.String(env),\n\t\tRequiredContexts: contexts,\n\t\t\/\/ TODO Description:\n\t}, nil\n}\n\nvar (\n\tpendingStates = []string{\"pending\"}\n\tcompletedStates = []string{\"success\", \"error\", \"failure\"}\n)\n\nfunc isFailed(state string) bool {\n\treturn state == \"error\" || state == \"failure\"\n}\n\n\/\/ waitState waits for a deployment status that matches the given states, then\n\/\/ sends on the returned channel.\nfunc waitState(states []string, owner, repo string, deploymentID int, c *github.Client) *github.DeploymentStatus {\n\tfor {\n\t\t<-time.After(1 * time.Second)\n\n\t\tstatuses, _, err := c.Repositories.ListDeploymentStatuses(owner, repo, deploymentID, nil)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tstatus := firstStatus(states, statuses)\n\t\tif status != nil {\n\t\t\treturn status\n\t\t}\n\t}\n}\n\n\/\/ firstStatus takes a slice of github.DeploymentStatus and returns the\n\/\/ first status that matches the provided slice of states.\nfunc firstStatus(states []string, statuses []github.DeploymentStatus) *github.DeploymentStatus {\n\tfor _, ds := range statuses {\n\t\tfor _, s := range states {\n\t\t\tif ds.State != nil && *ds.State == s {\n\t\t\t\treturn &ds\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Repo will determine the correct GitHub repo to deploy to, based on a set of\n\/\/ arguments.\nfunc Repo(arguments []string) (string, error) {\n\tif len(arguments) != 0 {\n\t\treturn arguments[0], nil\n\t}\n\n\tremotes, err := hub.Remotes()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\trepo := GitHubRepo(remotes)\n\tif repo == \"\" {\n\t\treturn repo, errors.New(\"no GitHub repo found in .git\/config\")\n\t}\n\n\treturn repo, nil\n}\n\n\/\/ A regular expression that can convert a URL.Path into a GitHub repo name.\nvar remoteRegex = regexp.MustCompile(`^\/(.*)\\.git$`)\n\n\/\/ GitHubRepo, given a list of git remotes, will determine what the GitHub repo\n\/\/ is.\nfunc GitHubRepo(remotes []hub.Remote) string {\n\t\/\/ We only want to look at the `origin` remote.\n\tremote := findRemote(\"origin\", remotes)\n\tif remote == nil {\n\t\treturn \"\"\n\t}\n\n\t\/\/ Remotes that are not pointed at a GitHub repo are not valid.\n\tif remote.URL.Host != \"github.com\" {\n\t\treturn \"\"\n\t}\n\n\t\/\/ Convert `\/remind101\/acme-inc.git` => `remind101\/acme-inc`.\n\treturn remoteRegex.ReplaceAllString(remote.URL.Path, \"$1\")\n}\n\nfunc findRemote(name string, remotes []hub.Remote) *hub.Remote {\n\tfor _, r := range remotes {\n\t\tif r.Name == name {\n\t\t\treturn &r\n\t\t}\n\t}\n\n\treturn nil\n}\n\nvar errInvalidRepo = errors.New(\"invalid repo\")\n\n\/\/ SplitRepo splits a repo string in the form remind101\/acme-inc into it's owner\n\/\/ and repo components.\nfunc SplitRepo(nwo string) (owner string, repo string, err error) {\n\tparts := strings.Split(nwo, \"\/\")\n\n\tif len(parts) != 2 {\n\t\terr = errInvalidRepo\n\t\treturn\n\t}\n\n\towner = parts[0]\n\trepo = parts[1]\n\n\treturn\n}\n<commit_msg>Timeout waiting for build to start.<commit_after>package deploy\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/remind101\/deploy\/Godeps\/_workspace\/src\/github.com\/codegangsta\/cli\"\n\t\"github.com\/remind101\/deploy\/Godeps\/_workspace\/src\/github.com\/github\/hub\/git\"\n\thub \"github.com\/remind101\/deploy\/Godeps\/_workspace\/src\/github.com\/github\/hub\/github\"\n\t\"github.com\/remind101\/deploy\/Godeps\/_workspace\/src\/github.com\/google\/go-github\/github\"\n)\n\nconst (\n\tName = \"deploy\"\n\tUsage = \"A command for creating GitHub deployments\"\n)\n\nconst (\n\tDefaultRef = \"master\"\n\tDefaultTimeout = 20 * time.Second\n)\n\nvar errTimeout = errors.New(\"timed out waiting for build to start\")\n\nfunc init() {\n\tcli.AppHelpTemplate = `USAGE:\n # Deploy the master branch of remind101\/acme-inc to staging\n {{.Name}} --env=staging --ref=master remind101\/acme-inc\n\n # Deploy HEAD of the current branch to staging\n {{.Name}} --env=staging remind101\/acme-inc\n\n # Deploy the current GitHub repo to staging\n {{.Name}} --env=staging\n{{if .Flags}}\nOPTIONS:\n {{range .Flags}}{{.}}\n {{end}}{{end}}\n`\n}\n\nvar flags = []cli.Flag{\n\tcli.StringFlag{\n\t\tName: \"ref, branch, commit, tag\",\n\t\tValue: \"\",\n\t\tUsage: \"The git ref to deploy. Can be a git commit, branch or tag.\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"env, e\",\n\t\tValue: \"\",\n\t\tUsage: \"The environment to deploy to.\",\n\t},\n\tcli.BoolFlag{\n\t\tName: \"force, f\",\n\t\tUsage: \"Ignore commit status checks.\",\n\t},\n}\n\n\/\/ NewApp returns a new cli.App for the deploy command.\nfunc NewApp() *cli.App {\n\tapp := cli.NewApp()\n\tapp.Version = \"0.0.1\"\n\tapp.Name = Name\n\tapp.Usage = Usage\n\tapp.Flags = flags\n\tapp.Action = func(c *cli.Context) {\n\t\tif err := RunDeploy(c); err != nil {\n\t\t\tmsg := err.Error()\n\t\t\tif err, ok := err.(*github.ErrorResponse); ok {\n\t\t\t\tif strings.HasPrefix(err.Message, \"Conflict: Commit status checks failed for\") {\n\t\t\t\t\tmsg = \"Commit status checks failed. You can bypass commit status checks with the --force flag.\"\n\t\t\t\t} else if strings.HasPrefix(err.Message, \"No ref found for\") {\n\t\t\t\t\tmsg = fmt.Sprintf(\"%s. Did you push it to GitHub?\", err.Message)\n\t\t\t\t} else {\n\t\t\t\t\tmsg = err.Message\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfmt.Println(msg)\n\t\t\tos.Exit(-1)\n\t\t}\n\t}\n\n\treturn app\n}\n\n\/\/ RunDeploy performs a deploy.\nfunc RunDeploy(c *cli.Context) error {\n\tw := c.App.Writer\n\n\th, err := hub.CurrentConfig().PromptForHost(\"github.com\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient, err := newGitHubClient(h)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnwo, err := Repo(c.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\towner, repo, err := SplitRepo(nwo)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Invalid GitHub repo: %s\", nwo)\n\t}\n\n\tr, err := newDeploymentRequest(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Fprintf(w, \"Deploying %s@%s to %s...\\n\", nwo, *r.Ref, *r.Environment)\n\n\td, _, err := client.Repositories.CreateDeployment(owner, repo, r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstarted := make(chan *github.DeploymentStatus)\n\tcompleted := make(chan *github.DeploymentStatus)\n\n\tgo func() {\n\t\tstarted <- waitState(pendingStates, owner, repo, *d.ID, client)\n\t}()\n\n\tgo func() {\n\t\tcompleted <- waitState(completedStates, owner, repo, *d.ID, client)\n\t}()\n\n\tselect {\n\tcase <-time.After(DefaultTimeout):\n\t\treturn errTimeout\n\tcase status := <-started:\n\t\tvar url string\n\t\tif status.TargetURL != nil {\n\t\t\turl = *status.TargetURL\n\t\t}\n\t\tfmt.Fprintf(w, \"%s\\n\", url)\n\t}\n\n\tstatus := <-completed\n\n\tif isFailed(*status.State) {\n\t\treturn errors.New(\"Failed to deploy\")\n\t}\n\n\treturn nil\n}\n\nfunc newDeploymentRequest(c *cli.Context) (*github.DeploymentRequest, error) {\n\tref := c.String(\"ref\")\n\tif ref == \"\" {\n\t\tr, err := git.Ref(\"HEAD\")\n\t\tif err == nil {\n\t\t\tref = r\n\t\t} else {\n\t\t\tref = DefaultRef\n\t\t}\n\t}\n\n\tenv := c.String(\"env\")\n\tif env == \"\" {\n\t\treturn nil, fmt.Errorf(\"--env flag is required\")\n\t}\n\n\tvar contexts *[]string\n\tif c.Bool(\"force\") {\n\t\ts := []string{}\n\t\tcontexts = &s\n\t}\n\n\treturn &github.DeploymentRequest{\n\t\tRef: github.String(ref),\n\t\tTask: github.String(\"deploy\"),\n\t\tAutoMerge: github.Bool(false),\n\t\tEnvironment: github.String(env),\n\t\tRequiredContexts: contexts,\n\t\t\/\/ TODO Description:\n\t}, nil\n}\n\nvar (\n\tpendingStates = []string{\"pending\"}\n\tcompletedStates = []string{\"success\", \"error\", \"failure\"}\n)\n\nfunc isFailed(state string) bool {\n\treturn state == \"error\" || state == \"failure\"\n}\n\n\/\/ waitState waits for a deployment status that matches the given states, then\n\/\/ sends on the returned channel.\nfunc waitState(states []string, owner, repo string, deploymentID int, c *github.Client) *github.DeploymentStatus {\n\tfor {\n\t\t<-time.After(1 * time.Second)\n\n\t\tstatuses, _, err := c.Repositories.ListDeploymentStatuses(owner, repo, deploymentID, nil)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tstatus := firstStatus(states, statuses)\n\t\tif status != nil {\n\t\t\treturn status\n\t\t}\n\t}\n}\n\n\/\/ firstStatus takes a slice of github.DeploymentStatus and returns the\n\/\/ first status that matches the provided slice of states.\nfunc firstStatus(states []string, statuses []github.DeploymentStatus) *github.DeploymentStatus {\n\tfor _, ds := range statuses {\n\t\tfor _, s := range states {\n\t\t\tif ds.State != nil && *ds.State == s {\n\t\t\t\treturn &ds\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Repo will determine the correct GitHub repo to deploy to, based on a set of\n\/\/ arguments.\nfunc Repo(arguments []string) (string, error) {\n\tif len(arguments) != 0 {\n\t\treturn arguments[0], nil\n\t}\n\n\tremotes, err := hub.Remotes()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\trepo := GitHubRepo(remotes)\n\tif repo == \"\" {\n\t\treturn repo, errors.New(\"no GitHub repo found in .git\/config\")\n\t}\n\n\treturn repo, nil\n}\n\n\/\/ A regular expression that can convert a URL.Path into a GitHub repo name.\nvar remoteRegex = regexp.MustCompile(`^\/(.*)\\.git$`)\n\n\/\/ GitHubRepo, given a list of git remotes, will determine what the GitHub repo\n\/\/ is.\nfunc GitHubRepo(remotes []hub.Remote) string {\n\t\/\/ We only want to look at the `origin` remote.\n\tremote := findRemote(\"origin\", remotes)\n\tif remote == nil {\n\t\treturn \"\"\n\t}\n\n\t\/\/ Remotes that are not pointed at a GitHub repo are not valid.\n\tif remote.URL.Host != \"github.com\" {\n\t\treturn \"\"\n\t}\n\n\t\/\/ Convert `\/remind101\/acme-inc.git` => `remind101\/acme-inc`.\n\treturn remoteRegex.ReplaceAllString(remote.URL.Path, \"$1\")\n}\n\nfunc findRemote(name string, remotes []hub.Remote) *hub.Remote {\n\tfor _, r := range remotes {\n\t\tif r.Name == name {\n\t\t\treturn &r\n\t\t}\n\t}\n\n\treturn nil\n}\n\nvar errInvalidRepo = errors.New(\"invalid repo\")\n\n\/\/ SplitRepo splits a repo string in the form remind101\/acme-inc into it's owner\n\/\/ and repo components.\nfunc SplitRepo(nwo string) (owner string, repo string, err error) {\n\tparts := strings.Split(nwo, \"\/\")\n\n\tif len(parts) != 2 {\n\t\terr = errInvalidRepo\n\t\treturn\n\t}\n\n\towner = parts[0]\n\trepo = parts[1]\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/github.com\/flynn\/go-sql\"\n\t\"github.com\/flynn\/flynn\/pkg\/postgres\"\n)\n\nfunc migrateDB(db *sql.DB) error {\n\tm := postgres.NewMigrations()\n\tm.Add(1,\n\t\t`CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\"`,\n\t\t`CREATE EXTENSION IF NOT EXISTS \"hstore\"`,\n\n\t\t`CREATE TABLE artifacts (\n artifact_id uuid PRIMARY KEY DEFAULT uuid_generate_v4(),\n type text NOT NULL,\n uri text NOT NULL,\n created_at timestamptz NOT NULL DEFAULT now(),\n deleted_at timestamptz\n)`,\n\t\t`CREATE UNIQUE INDEX ON artifacts (type, uri) WHERE deleted_at IS NULL`,\n\n\t\t`CREATE TABLE releases (\n release_id uuid PRIMARY KEY DEFAULT uuid_generate_v4(),\n artifact_id uuid REFERENCES artifacts (artifact_id),\n data text NOT NULL,\n created_at timestamptz NOT NULL DEFAULT now(),\n deleted_at timestamptz\n)`,\n\n\t\t`CREATE TYPE deployment_strategy AS ENUM ('all-at-once', 'one-by-one', 'postgres')`,\n\n\t\t`CREATE TABLE apps (\n app_id uuid PRIMARY KEY DEFAULT uuid_generate_v4(),\n name text NOT NULL,\n release_id uuid REFERENCES releases (release_id),\n\tmeta hstore,\n\tstrategy deployment_strategy NOT NULL DEFAULT 'all-at-once',\n created_at timestamptz NOT NULL DEFAULT now(),\n updated_at timestamptz NOT NULL DEFAULT now(),\n deleted_at timestamptz\n)`,\n\t\t`CREATE UNIQUE INDEX ON apps (name) WHERE deleted_at IS NULL`,\n\n\t\t`CREATE TABLE formations (\n app_id uuid NOT NULL REFERENCES apps (app_id),\n release_id uuid NOT NULL REFERENCES releases (release_id),\n processes hstore,\n created_at timestamptz NOT NULL DEFAULT now(),\n updated_at timestamptz NOT NULL DEFAULT now(),\n deleted_at timestamptz,\n PRIMARY KEY (app_id, release_id)\n)`,\n\n\t\t`CREATE FUNCTION notify_formation() RETURNS TRIGGER AS $$\n BEGIN\n PERFORM pg_notify('formations', NEW.app_id || ':' || NEW.release_id);\n RETURN NULL;\n END;\n$$ LANGUAGE plpgsql`,\n\n\t\t`CREATE TRIGGER notify_formation\n AFTER INSERT OR UPDATE ON formations\n FOR EACH ROW EXECUTE PROCEDURE notify_formation()`,\n\n\t\t`CREATE TABLE keys (\n key_id uuid PRIMARY KEY DEFAULT uuid_generate_v4(),\n fingerprint text NOT NULL,\n key text NOT NULL,\n comment text,\n created_at timestamptz NOT NULL DEFAULT now(),\n deleted_at timestamptz\n)`,\n\t\t`CREATE UNIQUE INDEX ON keys (fingerprint) WHERE deleted_at IS NULL`,\n\n\t\t`CREATE TABLE providers (\n provider_id uuid PRIMARY KEY DEFAULT uuid_generate_v4(),\n name text NOT NULL UNIQUE,\n url text NOT NULL UNIQUE,\n created_at timestamptz NOT NULL DEFAULT now(),\n updated_at timestamptz NOT NULL DEFAULT now(),\n deleted_at timestamptz\n)`,\n\n\t\t`CREATE TABLE resources (\n resource_id uuid PRIMARY KEY DEFAULT uuid_generate_v4(),\n provider_id uuid NOT NULL REFERENCES providers (provider_id),\n external_id text NOT NULL,\n env hstore,\n created_at timestamptz NOT NULL DEFAULT now(),\n deleted_at timestamptz,\n UNIQUE (provider_id, external_id)\n)`,\n\n\t\t`CREATE TABLE app_resources (\n app_id uuid NOT NULL REFERENCES apps (app_id),\n resource_id uuid NOT NULL REFERENCES resources (resource_id),\n created_at timestamptz NOT NULL DEFAULT now(),\n deleted_at timestamptz,\n PRIMARY KEY (app_id, resource_id)\n)`,\n\t\t`CREATE INDEX ON app_resources (resource_id)`,\n\n\t\t`CREATE TYPE job_state AS ENUM ('starting', 'up', 'down', 'crashed', 'failed')`,\n\t\t`CREATE TABLE job_cache (\n job_id text NOT NULL,\n host_id text NOT NULL,\n app_id uuid NOT NULL REFERENCES apps (app_id),\n release_id uuid NOT NULL REFERENCES releases (release_id),\n process_type text,\n state job_state NOT NULL,\n meta hstore,\n created_at timestamptz NOT NULL DEFAULT now(),\n updated_at timestamptz NOT NULL DEFAULT now(),\n PRIMARY KEY (job_id, host_id)\n)`,\n\t\t`CREATE FUNCTION check_job_state() RETURNS OPAQUE AS $$\n BEGIN\n IF NEW.state < OLD.state THEN\n\t RAISE EXCEPTION 'invalid job state transition: % -> %', OLD.state, NEW.state USING ERRCODE = 'check_violation';\n ELSE\n\t RETURN NEW;\n END IF;\n END;\n$$ LANGUAGE plpgsql`,\n\t\t`CREATE TRIGGER job_state_trigger\n AFTER UPDATE ON job_cache\n FOR EACH ROW EXECUTE PROCEDURE check_job_state()`,\n\n\t\t`CREATE SEQUENCE job_event_ids`,\n\t\t`CREATE TABLE job_events (\n event_id bigint PRIMARY KEY DEFAULT nextval('job_event_ids'),\n job_id text NOT NULL,\n host_id text NOT NULL,\n app_id uuid NOT NULL REFERENCES apps (app_id),\n state job_state NOT NULL,\n created_at timestamptz NOT NULL DEFAULT now(),\n FOREIGN KEY (job_id, host_id) REFERENCES job_cache (job_id, host_id)\n)`,\n\t\t`CREATE UNIQUE INDEX ON job_events (job_id, host_id, app_id, state)`,\n\t\t`CREATE FUNCTION notify_job_event() RETURNS TRIGGER AS $$\n BEGIN\n PERFORM pg_notify('job_events:' || NEW.app_id, NEW.event_id || '');\n RETURN NULL;\n END;\n$$ LANGUAGE plpgsql`,\n\n\t\t`CREATE TRIGGER notify_job_event\n AFTER INSERT ON job_events\n FOR EACH ROW EXECUTE PROCEDURE notify_job_event()`,\n\n\t\t`CREATE SEQUENCE name_ids MAXVALUE 4294967295`,\n\t\t`CREATE TABLE deployments (\n deployment_id uuid PRIMARY KEY DEFAULT uuid_generate_v4(),\n app_id uuid NOT NULL,\n old_release_id uuid REFERENCES releases (release_id),\n new_release_id uuid NOT NULL REFERENCES releases (release_id),\n strategy deployment_strategy NOT NULL,\n processes hstore,\n created_at timestamptz NOT NULL DEFAULT now(),\n finished_at timestamptz)`,\n\n\t\t`CREATE UNIQUE INDEX isolate_deploys ON deployments (app_id)\n WHERE finished_at is NULL`,\n\n\t\t`CREATE SEQUENCE deployment_event_ids`,\n\t\t`CREATE TYPE deployment_status AS ENUM ('running', 'complete', 'failed')`,\n\t\t`CREATE TABLE deployment_events (\n event_id bigint PRIMARY KEY DEFAULT nextval('deployment_event_ids'),\n deployment_id uuid NOT NULL REFERENCES deployments (deployment_id),\n release_id uuid NOT NULL REFERENCES releases (release_id),\n status deployment_status NOT NULL DEFAULT 'running',\n job_type text,\n job_state text,\n error text,\n created_at timestamptz NOT NULL DEFAULT now())`,\n\n\t\t`CREATE FUNCTION notify_deployment_event() RETURNS TRIGGER AS $$\n BEGIN\n PERFORM pg_notify('deployment_events:' || NEW.deployment_id, NEW.event_id || '');\n RETURN NULL;\n END;\n$$ LANGUAGE plpgsql`,\n\n\t\t`CREATE TRIGGER notify_deployment_event\n AFTER INSERT ON deployment_events\n FOR EACH ROW EXECUTE PROCEDURE notify_deployment_event()`,\n\t)\n\tm.Add(2,\n\t\t`CREATE TABLE que_jobs (\n priority smallint NOT NULL DEFAULT 100,\n run_at timestamptz NOT NULL DEFAULT now(),\n job_id bigserial NOT NULL,\n job_class text NOT NULL,\n args json NOT NULL DEFAULT '[]'::json,\n error_count integer NOT NULL DEFAULT 0,\n last_error text,\n queue text NOT NULL DEFAULT '',\n\n CONSTRAINT que_jobs_pkey PRIMARY KEY (queue, priority, run_at, job_id))`,\n\t\t`COMMENT ON TABLE que_jobs IS '3'`,\n\t)\n\treturn m.Migrate(db)\n}\n<commit_msg>controller: Add locked_until column to que_jobs table<commit_after>package main\n\nimport (\n\t\"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/github.com\/flynn\/go-sql\"\n\t\"github.com\/flynn\/flynn\/pkg\/postgres\"\n)\n\nfunc migrateDB(db *sql.DB) error {\n\tm := postgres.NewMigrations()\n\tm.Add(1,\n\t\t`CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\"`,\n\t\t`CREATE EXTENSION IF NOT EXISTS \"hstore\"`,\n\n\t\t`CREATE TABLE artifacts (\n artifact_id uuid PRIMARY KEY DEFAULT uuid_generate_v4(),\n type text NOT NULL,\n uri text NOT NULL,\n created_at timestamptz NOT NULL DEFAULT now(),\n deleted_at timestamptz\n)`,\n\t\t`CREATE UNIQUE INDEX ON artifacts (type, uri) WHERE deleted_at IS NULL`,\n\n\t\t`CREATE TABLE releases (\n release_id uuid PRIMARY KEY DEFAULT uuid_generate_v4(),\n artifact_id uuid REFERENCES artifacts (artifact_id),\n data text NOT NULL,\n created_at timestamptz NOT NULL DEFAULT now(),\n deleted_at timestamptz\n)`,\n\n\t\t`CREATE TYPE deployment_strategy AS ENUM ('all-at-once', 'one-by-one', 'postgres')`,\n\n\t\t`CREATE TABLE apps (\n app_id uuid PRIMARY KEY DEFAULT uuid_generate_v4(),\n name text NOT NULL,\n release_id uuid REFERENCES releases (release_id),\n\tmeta hstore,\n\tstrategy deployment_strategy NOT NULL DEFAULT 'all-at-once',\n created_at timestamptz NOT NULL DEFAULT now(),\n updated_at timestamptz NOT NULL DEFAULT now(),\n deleted_at timestamptz\n)`,\n\t\t`CREATE UNIQUE INDEX ON apps (name) WHERE deleted_at IS NULL`,\n\n\t\t`CREATE TABLE formations (\n app_id uuid NOT NULL REFERENCES apps (app_id),\n release_id uuid NOT NULL REFERENCES releases (release_id),\n processes hstore,\n created_at timestamptz NOT NULL DEFAULT now(),\n updated_at timestamptz NOT NULL DEFAULT now(),\n deleted_at timestamptz,\n PRIMARY KEY (app_id, release_id)\n)`,\n\n\t\t`CREATE FUNCTION notify_formation() RETURNS TRIGGER AS $$\n BEGIN\n PERFORM pg_notify('formations', NEW.app_id || ':' || NEW.release_id);\n RETURN NULL;\n END;\n$$ LANGUAGE plpgsql`,\n\n\t\t`CREATE TRIGGER notify_formation\n AFTER INSERT OR UPDATE ON formations\n FOR EACH ROW EXECUTE PROCEDURE notify_formation()`,\n\n\t\t`CREATE TABLE keys (\n key_id uuid PRIMARY KEY DEFAULT uuid_generate_v4(),\n fingerprint text NOT NULL,\n key text NOT NULL,\n comment text,\n created_at timestamptz NOT NULL DEFAULT now(),\n deleted_at timestamptz\n)`,\n\t\t`CREATE UNIQUE INDEX ON keys (fingerprint) WHERE deleted_at IS NULL`,\n\n\t\t`CREATE TABLE providers (\n provider_id uuid PRIMARY KEY DEFAULT uuid_generate_v4(),\n name text NOT NULL UNIQUE,\n url text NOT NULL UNIQUE,\n created_at timestamptz NOT NULL DEFAULT now(),\n updated_at timestamptz NOT NULL DEFAULT now(),\n deleted_at timestamptz\n)`,\n\n\t\t`CREATE TABLE resources (\n resource_id uuid PRIMARY KEY DEFAULT uuid_generate_v4(),\n provider_id uuid NOT NULL REFERENCES providers (provider_id),\n external_id text NOT NULL,\n env hstore,\n created_at timestamptz NOT NULL DEFAULT now(),\n deleted_at timestamptz,\n UNIQUE (provider_id, external_id)\n)`,\n\n\t\t`CREATE TABLE app_resources (\n app_id uuid NOT NULL REFERENCES apps (app_id),\n resource_id uuid NOT NULL REFERENCES resources (resource_id),\n created_at timestamptz NOT NULL DEFAULT now(),\n deleted_at timestamptz,\n PRIMARY KEY (app_id, resource_id)\n)`,\n\t\t`CREATE INDEX ON app_resources (resource_id)`,\n\n\t\t`CREATE TYPE job_state AS ENUM ('starting', 'up', 'down', 'crashed', 'failed')`,\n\t\t`CREATE TABLE job_cache (\n job_id text NOT NULL,\n host_id text NOT NULL,\n app_id uuid NOT NULL REFERENCES apps (app_id),\n release_id uuid NOT NULL REFERENCES releases (release_id),\n process_type text,\n state job_state NOT NULL,\n meta hstore,\n created_at timestamptz NOT NULL DEFAULT now(),\n updated_at timestamptz NOT NULL DEFAULT now(),\n PRIMARY KEY (job_id, host_id)\n)`,\n\t\t`CREATE FUNCTION check_job_state() RETURNS OPAQUE AS $$\n BEGIN\n IF NEW.state < OLD.state THEN\n\t RAISE EXCEPTION 'invalid job state transition: % -> %', OLD.state, NEW.state USING ERRCODE = 'check_violation';\n ELSE\n\t RETURN NEW;\n END IF;\n END;\n$$ LANGUAGE plpgsql`,\n\t\t`CREATE TRIGGER job_state_trigger\n AFTER UPDATE ON job_cache\n FOR EACH ROW EXECUTE PROCEDURE check_job_state()`,\n\n\t\t`CREATE SEQUENCE job_event_ids`,\n\t\t`CREATE TABLE job_events (\n event_id bigint PRIMARY KEY DEFAULT nextval('job_event_ids'),\n job_id text NOT NULL,\n host_id text NOT NULL,\n app_id uuid NOT NULL REFERENCES apps (app_id),\n state job_state NOT NULL,\n created_at timestamptz NOT NULL DEFAULT now(),\n FOREIGN KEY (job_id, host_id) REFERENCES job_cache (job_id, host_id)\n)`,\n\t\t`CREATE UNIQUE INDEX ON job_events (job_id, host_id, app_id, state)`,\n\t\t`CREATE FUNCTION notify_job_event() RETURNS TRIGGER AS $$\n BEGIN\n PERFORM pg_notify('job_events:' || NEW.app_id, NEW.event_id || '');\n RETURN NULL;\n END;\n$$ LANGUAGE plpgsql`,\n\n\t\t`CREATE TRIGGER notify_job_event\n AFTER INSERT ON job_events\n FOR EACH ROW EXECUTE PROCEDURE notify_job_event()`,\n\n\t\t`CREATE SEQUENCE name_ids MAXVALUE 4294967295`,\n\t\t`CREATE TABLE deployments (\n deployment_id uuid PRIMARY KEY DEFAULT uuid_generate_v4(),\n app_id uuid NOT NULL,\n old_release_id uuid REFERENCES releases (release_id),\n new_release_id uuid NOT NULL REFERENCES releases (release_id),\n strategy deployment_strategy NOT NULL,\n processes hstore,\n created_at timestamptz NOT NULL DEFAULT now(),\n finished_at timestamptz)`,\n\n\t\t`CREATE UNIQUE INDEX isolate_deploys ON deployments (app_id)\n WHERE finished_at is NULL`,\n\n\t\t`CREATE SEQUENCE deployment_event_ids`,\n\t\t`CREATE TYPE deployment_status AS ENUM ('running', 'complete', 'failed')`,\n\t\t`CREATE TABLE deployment_events (\n event_id bigint PRIMARY KEY DEFAULT nextval('deployment_event_ids'),\n deployment_id uuid NOT NULL REFERENCES deployments (deployment_id),\n release_id uuid NOT NULL REFERENCES releases (release_id),\n status deployment_status NOT NULL DEFAULT 'running',\n job_type text,\n job_state text,\n error text,\n created_at timestamptz NOT NULL DEFAULT now())`,\n\n\t\t`CREATE FUNCTION notify_deployment_event() RETURNS TRIGGER AS $$\n BEGIN\n PERFORM pg_notify('deployment_events:' || NEW.deployment_id, NEW.event_id || '');\n RETURN NULL;\n END;\n$$ LANGUAGE plpgsql`,\n\n\t\t`CREATE TRIGGER notify_deployment_event\n AFTER INSERT ON deployment_events\n FOR EACH ROW EXECUTE PROCEDURE notify_deployment_event()`,\n\t)\n\tm.Add(2,\n\t\t`CREATE TABLE que_jobs (\n priority smallint NOT NULL DEFAULT 100,\n run_at timestamptz NOT NULL DEFAULT now(),\n job_id bigserial NOT NULL,\n job_class text NOT NULL,\n args json NOT NULL DEFAULT '[]'::json,\n error_count integer NOT NULL DEFAULT 0,\n last_error text,\n queue text NOT NULL DEFAULT '',\n locked_until timestamptz NOT NULL DEFAULT now(),\n\n CONSTRAINT que_jobs_pkey PRIMARY KEY (queue, priority, run_at, job_id))`,\n\t\t`COMMENT ON TABLE que_jobs IS '3'`,\n\t)\n\treturn m.Migrate(db)\n}\n<|endoftext|>"} {"text":"<commit_before>package shorturl\n\nimport (\n\t\"crypto\/md5\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/garyburd\/redigo\/redis\"\n\tlog \"github.com\/meifamily\/logrus\"\n\n\t\"strconv\"\n\n\t\"github.com\/meifamily\/ptt-alertor\/connections\"\n\t\"github.com\/meifamily\/ptt-alertor\/myutil\"\n)\n\nconst redisPrefix = \"sum:\"\n\nvar url = os.Getenv(\"APP_HOST\") + \"\/redirect\/\"\n\nfunc Gen(longURL string) string {\n\tdata := []byte(longURL)\n\tsum := fmt.Sprintf(\"%x\", md5.Sum(data))\n\tsum += strconv.FormatInt(time.Now().Unix(), 10)\n\tconn := connections.Redis()\n\tdefer conn.Close()\n\t_, err := conn.Do(\"SET\", redisPrefix+sum, longURL, \"EX\", 600)\n\tif err != nil {\n\t\tlog.WithField(\"runtime\", myutil.BasicRuntimeInfo()).WithError(err).Error()\n\t}\n\tshortURL := url + sum\n\treturn shortURL\n}\n\nfunc Original(sum string) string {\n\tconn := connections.Redis()\n\tdefer conn.Close()\n\tkey := redisPrefix + sum\n\tconn.Send(\"MULTI\")\n\tconn.Send(\"GET\", key)\n\tconn.Send(\"DEL\", key)\n\tresult, err := redis.Values(conn.Do(\"EXEC\"))\n\tif err != nil {\n\t\tlog.WithField(\"runtime\", myutil.BasicRuntimeInfo()).WithError(err).Error()\n\t}\n\tif result[0] == nil {\n\t\treturn \"\"\n\t}\n\treturn string(result[0].([]byte))\n}\n<commit_msg>:alien: shorturl: remove delete key after get origin url due to line's new prefetch feature<commit_after>package shorturl\n\nimport (\n\t\"crypto\/md5\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/garyburd\/redigo\/redis\"\n\tlog \"github.com\/meifamily\/logrus\"\n\n\t\"strconv\"\n\n\t\"github.com\/meifamily\/ptt-alertor\/connections\"\n\t\"github.com\/meifamily\/ptt-alertor\/myutil\"\n)\n\nconst redisPrefix = \"sum:\"\n\nvar url = os.Getenv(\"APP_HOST\") + \"\/redirect\/\"\n\nfunc Gen(longURL string) string {\n\tdata := []byte(longURL)\n\tsum := fmt.Sprintf(\"%x\", md5.Sum(data))\n\tsum += strconv.FormatInt(time.Now().Unix(), 10)\n\tconn := connections.Redis()\n\tdefer conn.Close()\n\t_, err := conn.Do(\"SET\", redisPrefix+sum, longURL, \"EX\", 600)\n\tif err != nil {\n\t\tlog.WithField(\"runtime\", myutil.BasicRuntimeInfo()).WithError(err).Error()\n\t}\n\tshortURL := url + sum\n\treturn shortURL\n}\n\nfunc Original(sum string) string {\n\tconn := connections.Redis()\n\tdefer conn.Close()\n\tkey := redisPrefix + sum\n\tu, err := redis.String(conn.Do(\"GET\", key))\n\tif err != nil {\n\t\tlog.WithField(\"runtime\", myutil.BasicRuntimeInfo()).WithError(err).Error()\n\t}\n\treturn u\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ (C) 1990-2016. Robert W Solomon. All rights reserved.\n\/\/ ShowUtf-8 codes. Based on utf8toascii, based on nocr.go\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n\t\/\/\n\t\"getcommandline\"\n)\n\nconst lastCompiled = \"6 May 17\"\n\n\/\/const openQuoteRune = 0xe2809c\n\/\/const closeQuoteRune = 0xe2809d\n\/\/const squoteRune = 0xe28099\n\/\/const emdashRune = 0xe28094\nconst openQuoteRune = 8220\nconst closeQuoteRune = 8221\nconst squoteRune = 8217\nconst emdashRune = 8212\nconst quoteString = \"\\\"\"\nconst squoteString = \"'\"\nconst emdashStr = \" -- \"\n\n\/*\n REVISION HISTORY\n ----------------\n 17 Apr 17 -- Started writing nocr, based on rpn.go\n 18 Apr 17 -- It worked yesterday. Now I'll rename files as in Modula-2.\n 5 May 17 -- Now will convert utf8 to ascii, based on nocr.go\n\t6 May 17 -- Need to know the utf8 codes before I can convert 'em.\n*\/\n\nfunc main() {\n\tvar instr, outstr, str string\n\t\/\/\tvar err error\n\n\tfmt.Println(\" ShowUtf8. Last compiled \", lastCompiled)\n\tfmt.Println()\n\n\tif len(os.Args) <= 1 {\n\t\tfmt.Println(\" Usage: utf8toascii <filename> \")\n\t\tos.Exit(1)\n\t}\n\n\tcommandline := getcommandline.GetCommandLineString()\n\tBaseFilename := filepath.Clean(commandline)\n\tInFilename := \"\"\n\tInFileExists := false\n\tExt1Default := \".txt\"\n\tOutFileSuffix := \".out\"\n\n\tif strings.Contains(BaseFilename, \".\") {\n\t\tInFilename = BaseFilename\n\t\t_, err := os.Stat(InFilename)\n\t\tif err == nil {\n\t\t\tInFileExists = true\n\t\t}\n\t} else {\n\t\tInFilename = BaseFilename + Ext1Default\n\t\t_, err := os.Stat(InFilename)\n\t\tif err == nil {\n\t\t\tInFileExists = true\n\t\t}\n\t}\n\n\tif !InFileExists {\n\t\tfmt.Println(\" File \", BaseFilename, \" or \", InFilename, \" does not exist. Exiting.\")\n\t\tos.Exit(1)\n\t}\n\n\tInputFile, err := os.Open(InFilename)\n\tif err != nil {\n\t\tfmt.Println(\" Error while opening \", InFilename, \". Exiting.\")\n\t\tos.Exit(1)\n\t}\n\tdefer InputFile.Close()\n\n\tOutFilename := BaseFilename + OutFileSuffix\n\tOutputFile, err := os.Create(OutFilename)\n\tif err != nil {\n\t\tfmt.Println(\" Error while opening OutputFile \", OutFilename, \". Exiting.\")\n\t\tos.Exit(1)\n\t}\n\tdefer OutputFile.Close()\n\n\tInBufioScanner := bufio.NewScanner(InputFile)\n\tOutBufioWriter := bufio.NewWriter(OutputFile)\n\tdefer OutBufioWriter.Flush()\n\n\tfor InBufioScanner.Scan() {\n\t\tinstr = InBufioScanner.Text() \/\/ does not include the trailing EOL char\n\t\trunecount := utf8.RuneCountInString(instr)\n\t\t\/\/\t\tfmt.Println(\" Len of instr is \", len(instr), \", runecount is \", runecount)\n\t\tif len(instr) == runecount {\n\t\t\toutstr = instr\n\t\t} else { \/\/ a mismatch btwn instr length and rune count means that a multibyte rune is in this instr\n\t\t\tstringslice := make([]string, 0, runecount)\n\t\t\tfor dnctr := runecount; dnctr > 0; dnctr-- {\n\t\t\t\tr, siz := utf8.DecodeRuneInString(instr) \/\/ front rune in r\n\t\t\t\tinstr = instr[siz:] \/\/ chop off the first rune\n\t\t\t\tfmt.Print(\" r, siz: \", r, siz, \". \")\n\t\t\t\tif r == openQuoteRune {\n\t\t\t\t\tstr = quoteString\n\t\t\t\t} else if r == closeQuoteRune {\n\t\t\t\t\tstr = quoteString\n\t\t\t\t} else if r == squoteRune {\n\t\t\t\t\tstr = squoteString\n\t\t\t\t} else if r == emdashRune {\n\t\t\t\t\tstr = emdashStr\n\t\t\t\t} else {\n\t\t\t\t\tstr = string(r)\n\t\t\t\t}\n\t\t\t\tstringslice = append(stringslice, str)\n\n\t\t\t}\n\t\t\toutstr = strings.Join(stringslice, \"\")\n\t\t\tfmt.Println()\n\t\t}\n\t\t_, err := OutBufioWriter.WriteString(outstr)\n\t\tcheck(err)\n\t\t_, err = OutBufioWriter.WriteRune('\\n')\n\t\tcheck(err)\n\t}\n\n\tInputFile.Close()\n\tOutBufioWriter.Flush() \/\/ code did not work without this line.\n\tOutputFile.Close()\n\n\t\/\/ Make the processed file the same name as the input file. IE, swap in and\n\t\/\/ out files.\n\tTempFilename := InFilename + OutFilename + \".tmp\"\n\tos.Rename(InFilename, TempFilename)\n\tos.Rename(OutFilename, InFilename)\n\tos.Rename(TempFilename, OutFilename)\n\n\tFI, err := os.Stat(InFilename)\n\tInputFileSize := FI.Size()\n\n\tFI, err = os.Stat(OutFilename)\n\tOutputFileSize := FI.Size()\n\n\tfmt.Println(\" Original file is now \", OutFilename, \" and size is \", OutputFileSize)\n\tfmt.Println(\" Output File is now \", InFilename, \" and size is \", InputFileSize)\n\tfmt.Println()\n\n} \/\/ main in utf8toascii.go\n\nfunc check(e error) {\n\tif e != nil {\n\t\tpanic(e)\n\t}\n}\n<commit_msg>modified: showutf8\/showutf8.go -- works and does what I want<commit_after>\/\/ (C) 1990-2016. Robert W Solomon. All rights reserved.\n\/\/ ShowUtf-8 codes. Based on utf8toascii, based on nocr.go\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n\t\/\/\n\t\"getcommandline\"\n)\n\nconst lastCompiled = \"6 May 17\"\n\n\/\/const openQuoteRune = 0xe2809c\n\/\/const closeQuoteRune = 0xe2809d\n\/\/const squoteRune = 0xe28099\n\/\/const emdashRune = 0xe28094\nconst openQuoteRune = 8220\nconst closeQuoteRune = 8221\nconst squoteRune = 8217\nconst emdashRune = 8212\nconst bulletpointRune = 8226\nconst quoteString = \"\\\"\"\nconst squoteString = \"'\"\nconst emdashStr = \" -- \"\nconst bulletpointStr = \"--\"\n\n\/*\n REVISION HISTORY\n ----------------\n 17 Apr 17 -- Started writing nocr, based on rpn.go\n 18 Apr 17 -- It worked yesterday. Now I'll rename files as in Modula-2.\n 5 May 17 -- Now will convert utf8 to ascii, based on nocr.go\n\t6 May 17 -- Need to know the utf8 codes before I can convert 'em.\n*\/\n\nfunc main() {\n\tvar instr string\n\t\/\/\tvar err error\n\n\tfmt.Println(\" ShowUtf8. Last compiled \", lastCompiled)\n\tfmt.Println()\n\n\tif len(os.Args) <= 1 {\n\t\tfmt.Println(\" Usage: utf8toascii <filename> \")\n\t\tos.Exit(1)\n\t}\n\n\tcommandline := getcommandline.GetCommandLineString()\n\tBaseFilename := filepath.Clean(commandline)\n\tInFilename := \"\"\n\tInFileExists := false\n\tExt1Default := \".txt\"\n\t\/\/\tOutFileSuffix := \".out\"\n\n\tif strings.Contains(BaseFilename, \".\") {\n\t\tInFilename = BaseFilename\n\t\t_, err := os.Stat(InFilename)\n\t\tif err == nil {\n\t\t\tInFileExists = true\n\t\t}\n\t} else {\n\t\tInFilename = BaseFilename + Ext1Default\n\t\t_, err := os.Stat(InFilename)\n\t\tif err == nil {\n\t\t\tInFileExists = true\n\t\t}\n\t}\n\n\tif !InFileExists {\n\t\tfmt.Println(\" File \", BaseFilename, \" or \", InFilename, \" does not exist. Exiting.\")\n\t\tos.Exit(1)\n\t}\n\n\tInputFile, err := os.Open(InFilename)\n\tif err != nil {\n\t\tfmt.Println(\" Error while opening \", InFilename, \". Exiting.\")\n\t\tos.Exit(1)\n\t}\n\tdefer InputFile.Close()\n\tInBufioScanner := bufio.NewScanner(InputFile)\n\tlinecounter := 0\n\tfor InBufioScanner.Scan() {\n\t\tinstr = InBufioScanner.Text() \/\/ does not include the trailing EOL char\n\t\tlinecounter++\n\t\trunecount := utf8.RuneCountInString(instr)\n\t\tif len(instr) == runecount {\n\t\t\tcontinue\n\t\t} else { \/\/ a mismatch btwn instr length and rune count means that a multibyte rune is in this instr\n\t\t\tfmt.Print(\" Line \", linecounter, \" : \")\n\t\t\tfor dnctr := runecount; dnctr > 0; dnctr-- {\n\t\t\t\tr, siz := utf8.DecodeRuneInString(instr) \/\/ front rune in r\n\t\t\t\tinstr = instr[siz:] \/\/ chop off the first rune\n\t\t\t\tif r > 128 {\n\t\t\t\t\tfmt.Print(\" r: \", r, \", siz: \", siz, \"; \")\n\t\t\t\t\tif r == openQuoteRune {\n\t\t\t\t\t\tfmt.Print(\" rune is opening\", quoteString, \"; \")\n\t\t\t\t\t} else if r == closeQuoteRune {\n\t\t\t\t\t\tfmt.Print(\" rune is closing\", quoteString, \"; \")\n\t\t\t\t\t} else if r == squoteRune {\n\t\t\t\t\t\tfmt.Print(\" rune is \", squoteString, \"; \")\n\t\t\t\t\t} else if r == emdashRune {\n\t\t\t\t\t\tfmt.Print(\" rune is \", emdashStr, \"; \")\n\t\t\t\t\t} else if r == bulletpointRune {\n\t\t\t\t\t\tfmt.Print(\" rune is bulletpoint; \")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfmt.Print(\" rune is new \")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Println()\n\t\t}\n\t}\n\n\t\/\/\tInputFile.Close()\n\tfmt.Println()\n\n} \/\/ main in ShowUtf8.go\n\/*\nfunc check(e error) {\n\tif e != nil {\n\t\tpanic(e)\n\t}\n}\n*\/\n<|endoftext|>"} {"text":"<commit_before>package controllers\n\nimport (\n\t\"fmt\"\n\n\t\"bitbucket.org\/ikeikeikeike\/antenna\/lib\/mailer\"\n\t\"github.com\/astaxie\/beego\/context\"\n)\n\ntype ErrorController struct {\n\tBaseController\n}\n\nfunc (c *ErrorController) Error400() {\n\tmailer.SendStackTrace(traceMessage(c.Ctx))\n\tc.TplNames = \"errors\/400.tpl\"\n}\n\nfunc (c *ErrorController) Error401() {\n\tmailer.SendStackTrace(traceMessage(c.Ctx))\n\tc.TplNames = \"errors\/401.tpl\"\n}\n\nfunc (c *ErrorController) Error403() {\n\tmailer.SendStackTrace(traceMessage(c.Ctx))\n\tc.TplNames = \"errors\/403.tpl\"\n}\n\nfunc (c *ErrorController) Error404() {\n\tc.TplNames = \"errors\/404.tpl\"\n}\n\nfunc (c *ErrorController) Error500() {\n\tmailer.SendStackTrace(traceMessage(c.Ctx))\n\tc.TplNames = \"errors\/500.tpl\"\n}\n\nfunc (c *ErrorController) Error503() {\n\tmailer.SendStackTrace(traceMessage(c.Ctx))\n\tc.TplNames = \"errors\/503.tpl\"\n}\n\nfunc (c *ErrorController) ErrorDb() {\n\tmailer.SendStackTrace(traceMessage(c.Ctx))\n\tc.TplNames = \"errors\/dberror.tpl\"\n}\n\nfunc traceMessage(ctx *context.Context) []string {\n\treturn []string{\n\t\tfmt.Sprintf(\"Url: %s\", ctx.Input.Url()),\n\t\tfmt.Sprintf(\"Uri: %s\", ctx.Input.Uri()),\n\t\tfmt.Sprintf(\"Method: %s\", ctx.Input.Method()),\n\t\tfmt.Sprintf(\"Params: %v\", ctx.Input.Params),\n\t\tfmt.Sprintf(\"UserAgent: %s\", ctx.Input.UserAgent()),\n\t\t\"\",\n\t}\n}\n<commit_msg>More section refer in error message template<commit_after>package controllers\n\nimport (\n\t\"fmt\"\n\n\t\"bitbucket.org\/ikeikeikeike\/antenna\/lib\/mailer\"\n\t\"github.com\/astaxie\/beego\/context\"\n)\n\ntype ErrorController struct {\n\tBaseController\n}\n\nfunc (c *ErrorController) Error400() {\n\tmailer.SendStackTrace(traceMessage(c.Ctx))\n\tc.TplNames = \"errors\/400.tpl\"\n}\n\nfunc (c *ErrorController) Error401() {\n\tmailer.SendStackTrace(traceMessage(c.Ctx))\n\tc.TplNames = \"errors\/401.tpl\"\n}\n\nfunc (c *ErrorController) Error403() {\n\tmailer.SendStackTrace(traceMessage(c.Ctx))\n\tc.TplNames = \"errors\/403.tpl\"\n}\n\nfunc (c *ErrorController) Error404() {\n\tc.TplNames = \"errors\/404.tpl\"\n}\n\nfunc (c *ErrorController) Error500() {\n\tmailer.SendStackTrace(traceMessage(c.Ctx))\n\tc.TplNames = \"errors\/500.tpl\"\n}\n\nfunc (c *ErrorController) Error503() {\n\tmailer.SendStackTrace(traceMessage(c.Ctx))\n\tc.TplNames = \"errors\/503.tpl\"\n}\n\nfunc (c *ErrorController) ErrorDb() {\n\tmailer.SendStackTrace(traceMessage(c.Ctx))\n\tc.TplNames = \"errors\/dberror.tpl\"\n}\n\nfunc traceMessage(ctx *context.Context) []string {\n\treturn []string{\n\t\tfmt.Sprintf(\"Url: %s\", ctx.Input.Url()),\n\t\tfmt.Sprintf(\"Uri: %s\", ctx.Input.Uri()),\n\t\tfmt.Sprintf(\"Method: %s\", ctx.Input.Method()),\n\t\tfmt.Sprintf(\"Params: %v\", ctx.Input.Params),\n\t\tfmt.Sprintf(\"Referer: %s\", ctx.Input.Referer()),\n\t\tfmt.Sprintf(\"UserAgent: %s\", ctx.Input.UserAgent()),\n\t\t\"\",\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Gorilla Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage securecookie\n\nimport (\n\t\"crypto\/aes\"\n\t\"crypto\/hmac\"\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar testCookies = []interface{}{\n\tmap[string]string{\"foo\": \"bar\"},\n\tmap[string]string{\"baz\": \"ding\"},\n}\n\nvar testStrings = []string{\"foo\", \"bar\", \"baz\"}\n\nfunc TestSecureCookie(t *testing.T) {\n\t\/\/ TODO test too old \/ too new timestamps\n\tcompareMaps := func(m1, m2 map[string]interface{}) error {\n\t\tif len(m1) != len(m2) {\n\t\t\treturn errors.New(\"different maps\")\n\t\t}\n\t\tfor k, v := range m1 {\n\t\t\tif m2[k] != v {\n\t\t\t\treturn fmt.Errorf(\"Different value for key %v: expected %v, got %v\", k, m2[k], v)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\ts1 := New([]byte(\"12345\"), []byte(\"1234567890123456\"))\n\ts2 := New([]byte(\"54321\"), []byte(\"6543210987654321\"))\n\tvalue := map[string]interface{}{\n\t\t\"foo\": \"bar\",\n\t\t\"baz\": 128,\n\t}\n\n\tfor i := 0; i < 50; i++ {\n\t\t\/\/ Running this multiple times to check if any special character\n\t\t\/\/ breaks encoding\/decoding.\n\t\tencoded, err1 := s1.Encode(\"sid\", value)\n\t\tif err1 != nil {\n\t\t\tt.Error(err1)\n\t\t\tcontinue\n\t\t}\n\t\tdst := make(map[string]interface{})\n\t\terr2 := s1.Decode(\"sid\", encoded, &dst)\n\t\tif err2 != nil {\n\t\t\tt.Fatalf(\"%v: %v\", err2, encoded)\n\t\t}\n\t\tif err := compareMaps(dst, value); err != nil {\n\t\t\tt.Fatalf(\"Expected %v, got %v.\", value, dst)\n\t\t}\n\t\tdst2 := make(map[string]interface{})\n\t\terr3 := s2.Decode(\"sid\", encoded, &dst2)\n\t\tif err3 == nil {\n\t\t\tt.Fatalf(\"Expected failure decoding.\")\n\t\t}\n\t}\n}\n\nfunc TestDecodeInvalid(t *testing.T) {\n\t\/\/ List of invalid cookies, which must not be accepted, base64-decoded\n\t\/\/ (they will be encoded before passing to Decode).\n\tinvalidCookies := []string{\n\t\t\"\",\n\t\t\" \",\n\t\t\"\\n\",\n\t\t\"||\",\n\t\t\"|||\",\n\t\t\"cookie\",\n\t}\n\ts := New([]byte(\"12345\"), nil)\n\tvar dst string\n\tfor i, v := range invalidCookies {\n\t\terr := s.Decode(\"name\", base64.StdEncoding.EncodeToString([]byte(v)), &dst)\n\t\tif err == nil {\n\t\t\tt.Fatalf(\"%d: expected failure decoding\", i)\n\t\t}\n\t}\n}\n\nfunc TestAuthentication(t *testing.T) {\n\thash := hmac.New(sha256.New, []byte(\"secret-key\"))\n\tfor _, value := range testStrings {\n\t\thash.Reset()\n\t\tsigned := createMac(hash, []byte(value))\n\t\thash.Reset()\n\t\terr := verifyMac(hash, []byte(value), signed)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n}\n\nfunc TestEncription(t *testing.T) {\n\tblock, err := aes.NewCipher([]byte(\"1234567890123456\"))\n\tif err != nil {\n\t\tt.Fatalf(\"Block could not be created\")\n\t}\n\tvar encrypted, decrypted []byte\n\tfor _, value := range testStrings {\n\t\tif encrypted, err = encrypt(block, []byte(value)); err != nil {\n\t\t\tt.Error(err)\n\t\t} else {\n\t\t\tif decrypted, err = decrypt(block, encrypted); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t\tif string(decrypted) != value {\n\t\t\t\tt.Errorf(\"Expected %v, got %v.\", value, string(decrypted))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestSerialization(t *testing.T) {\n\tvar (\n\t\tserialized []byte\n\t\tdeserialized map[string]string\n\t\terr error\n\t)\n\tfor _, value := range testCookies {\n\t\tif serialized, err = serialize(value); err != nil {\n\t\t\tt.Error(err)\n\t\t} else {\n\t\t\tdeserialized = make(map[string]string)\n\t\t\tif err = deserialize(serialized, &deserialized); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t\tif fmt.Sprintf(\"%v\", deserialized) != fmt.Sprintf(\"%v\", value) {\n\t\t\t\tt.Errorf(\"Expected %v, got %v.\", value, deserialized)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestEncoding(t *testing.T) {\n\tfor _, value := range testStrings {\n\t\tencoded := encode([]byte(value))\n\t\tdecoded, err := decode(encoded)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t} else if string(decoded) != value {\n\t\t\tt.Errorf(\"Expected %v, got %s.\", value, string(decoded))\n\t\t}\n\t}\n}\n\nfunc TestMultiError(t *testing.T) {\n\ts1, s2 := New(nil, nil), New(nil, nil)\n\t_, err := EncodeMulti(\"sid\", \"value\", s1, s2)\n\tif len(err.(MultiError)) != 2 {\n\t\tt.Errorf(\"Expected 2 errors, got %s.\", err)\n\t} else {\n\t\tif strings.Index(err.Error(), \"hash key is not set\") == -1 {\n\t\t\tt.Errorf(\"Expected missing hash key error, got %s.\", err.Error())\n\t\t}\n\t}\n}\n\nfunc TestMultiNoCodecs(t *testing.T) {\n\t_, err := EncodeMulti(\"foo\", \"bar\")\n\tif err != errNoCodecs {\n\t\tt.Errorf(\"EncodeMulti: bad value for error, got: %v\", err)\n\t}\n\n\tvar dst []byte\n\terr = DecodeMulti(\"foo\", \"bar\", &dst)\n\tif err != errNoCodecs {\n\t\tt.Errorf(\"DecodeMulti: bad value for error, got: %v\", err)\n\t}\n}\n\nfunc TestMissingKey(t *testing.T) {\n\ts1 := New(nil, nil)\n\n\tvar dst []byte\n\terr := s1.Decode(\"sid\", \"value\", &dst)\n\tif err != errHashKeyNotSet {\n\t\tt.Fatalf(\"Expected %#v, got %#v\", errHashKeyNotSet, err)\n\t}\n}\n\n\/\/ ----------------------------------------------------------------------------\n\ntype FooBar struct {\n\tFoo int\n\tBar string\n}\n\nfunc TestCustomType(t *testing.T) {\n\ts1 := New([]byte(\"12345\"), []byte(\"1234567890123456\"))\n\t\/\/ Type is not registered in gob. (!!!)\n\tsrc := &FooBar{42, \"bar\"}\n\tencoded, _ := s1.Encode(\"sid\", src)\n\n\tdst := &FooBar{}\n\t_ = s1.Decode(\"sid\", encoded, dst)\n\tif dst.Foo != 42 || dst.Bar != \"bar\" {\n\t\tt.Fatalf(\"Expected %#v, got %#v\", src, dst)\n\t}\n}\n<commit_msg>Use reflect.DeepEqual instead<commit_after>\/\/ Copyright 2012 The Gorilla Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage securecookie\n\nimport (\n\t\"crypto\/aes\"\n\t\"crypto\/hmac\"\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar testCookies = []interface{}{\n\tmap[string]string{\"foo\": \"bar\"},\n\tmap[string]string{\"baz\": \"ding\"},\n}\n\nvar testStrings = []string{\"foo\", \"bar\", \"baz\"}\n\nfunc TestSecureCookie(t *testing.T) {\n\t\/\/ TODO test too old \/ too new timestamps\n\ts1 := New([]byte(\"12345\"), []byte(\"1234567890123456\"))\n\ts2 := New([]byte(\"54321\"), []byte(\"6543210987654321\"))\n\tvalue := map[string]interface{}{\n\t\t\"foo\": \"bar\",\n\t\t\"baz\": 128,\n\t}\n\n\tfor i := 0; i < 50; i++ {\n\t\t\/\/ Running this multiple times to check if any special character\n\t\t\/\/ breaks encoding\/decoding.\n\t\tencoded, err1 := s1.Encode(\"sid\", value)\n\t\tif err1 != nil {\n\t\t\tt.Error(err1)\n\t\t\tcontinue\n\t\t}\n\t\tdst := make(map[string]interface{})\n\t\terr2 := s1.Decode(\"sid\", encoded, &dst)\n\t\tif err2 != nil {\n\t\t\tt.Fatalf(\"%v: %v\", err2, encoded)\n\t\t}\n\t\tif !reflect.DeepEqual(dst, value) {\n\t\t\tt.Fatalf(\"Expected %v, got %v.\", value, dst)\n\t\t}\n\t\tdst2 := make(map[string]interface{})\n\t\terr3 := s2.Decode(\"sid\", encoded, &dst2)\n\t\tif err3 == nil {\n\t\t\tt.Fatalf(\"Expected failure decoding.\")\n\t\t}\n\t}\n}\n\nfunc TestDecodeInvalid(t *testing.T) {\n\t\/\/ List of invalid cookies, which must not be accepted, base64-decoded\n\t\/\/ (they will be encoded before passing to Decode).\n\tinvalidCookies := []string{\n\t\t\"\",\n\t\t\" \",\n\t\t\"\\n\",\n\t\t\"||\",\n\t\t\"|||\",\n\t\t\"cookie\",\n\t}\n\ts := New([]byte(\"12345\"), nil)\n\tvar dst string\n\tfor i, v := range invalidCookies {\n\t\terr := s.Decode(\"name\", base64.StdEncoding.EncodeToString([]byte(v)), &dst)\n\t\tif err == nil {\n\t\t\tt.Fatalf(\"%d: expected failure decoding\", i)\n\t\t}\n\t}\n}\n\nfunc TestAuthentication(t *testing.T) {\n\thash := hmac.New(sha256.New, []byte(\"secret-key\"))\n\tfor _, value := range testStrings {\n\t\thash.Reset()\n\t\tsigned := createMac(hash, []byte(value))\n\t\thash.Reset()\n\t\terr := verifyMac(hash, []byte(value), signed)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n}\n\nfunc TestEncription(t *testing.T) {\n\tblock, err := aes.NewCipher([]byte(\"1234567890123456\"))\n\tif err != nil {\n\t\tt.Fatalf(\"Block could not be created\")\n\t}\n\tvar encrypted, decrypted []byte\n\tfor _, value := range testStrings {\n\t\tif encrypted, err = encrypt(block, []byte(value)); err != nil {\n\t\t\tt.Error(err)\n\t\t} else {\n\t\t\tif decrypted, err = decrypt(block, encrypted); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t\tif string(decrypted) != value {\n\t\t\t\tt.Errorf(\"Expected %v, got %v.\", value, string(decrypted))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestSerialization(t *testing.T) {\n\tvar (\n\t\tserialized []byte\n\t\tdeserialized map[string]string\n\t\terr error\n\t)\n\tfor _, value := range testCookies {\n\t\tif serialized, err = serialize(value); err != nil {\n\t\t\tt.Error(err)\n\t\t} else {\n\t\t\tdeserialized = make(map[string]string)\n\t\t\tif err = deserialize(serialized, &deserialized); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t\tif fmt.Sprintf(\"%v\", deserialized) != fmt.Sprintf(\"%v\", value) {\n\t\t\t\tt.Errorf(\"Expected %v, got %v.\", value, deserialized)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestEncoding(t *testing.T) {\n\tfor _, value := range testStrings {\n\t\tencoded := encode([]byte(value))\n\t\tdecoded, err := decode(encoded)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t} else if string(decoded) != value {\n\t\t\tt.Errorf(\"Expected %v, got %s.\", value, string(decoded))\n\t\t}\n\t}\n}\n\nfunc TestMultiError(t *testing.T) {\n\ts1, s2 := New(nil, nil), New(nil, nil)\n\t_, err := EncodeMulti(\"sid\", \"value\", s1, s2)\n\tif len(err.(MultiError)) != 2 {\n\t\tt.Errorf(\"Expected 2 errors, got %s.\", err)\n\t} else {\n\t\tif strings.Index(err.Error(), \"hash key is not set\") == -1 {\n\t\t\tt.Errorf(\"Expected missing hash key error, got %s.\", err.Error())\n\t\t}\n\t}\n}\n\nfunc TestMultiNoCodecs(t *testing.T) {\n\t_, err := EncodeMulti(\"foo\", \"bar\")\n\tif err != errNoCodecs {\n\t\tt.Errorf(\"EncodeMulti: bad value for error, got: %v\", err)\n\t}\n\n\tvar dst []byte\n\terr = DecodeMulti(\"foo\", \"bar\", &dst)\n\tif err != errNoCodecs {\n\t\tt.Errorf(\"DecodeMulti: bad value for error, got: %v\", err)\n\t}\n}\n\nfunc TestMissingKey(t *testing.T) {\n\ts1 := New(nil, nil)\n\n\tvar dst []byte\n\terr := s1.Decode(\"sid\", \"value\", &dst)\n\tif err != errHashKeyNotSet {\n\t\tt.Fatalf(\"Expected %#v, got %#v\", errHashKeyNotSet, err)\n\t}\n}\n\n\/\/ ----------------------------------------------------------------------------\n\ntype FooBar struct {\n\tFoo int\n\tBar string\n}\n\nfunc TestCustomType(t *testing.T) {\n\ts1 := New([]byte(\"12345\"), []byte(\"1234567890123456\"))\n\t\/\/ Type is not registered in gob. (!!!)\n\tsrc := &FooBar{42, \"bar\"}\n\tencoded, _ := s1.Encode(\"sid\", src)\n\n\tdst := &FooBar{}\n\t_ = s1.Decode(\"sid\", encoded, dst)\n\tif dst.Foo != 42 || dst.Bar != \"bar\" {\n\t\tt.Fatalf(\"Expected %#v, got %#v\", src, dst)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package exec\n\nimport (\n\t\"archive\/tar\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"code.cloudfoundry.org\/clock\"\n\t\"code.cloudfoundry.org\/garden\"\n\t\"code.cloudfoundry.org\/lager\"\n\t\"github.com\/concourse\/atc\"\n\t\"github.com\/concourse\/atc\/db\"\n\t\"github.com\/concourse\/atc\/worker\"\n)\n\nconst taskProcessPropertyName = \"concourse:task-process\"\nconst taskExitStatusPropertyName = \"concourse:exit-status\"\n\n\/\/ MissingInputsError is returned when any of the task's required inputs are\n\/\/ missing.\ntype MissingInputsError struct {\n\tInputs []string\n}\n\n\/\/ Error prints a human-friendly message listing the inputs that were missing.\nfunc (err MissingInputsError) Error() string {\n\treturn fmt.Sprintf(\"missing inputs: %s\", strings.Join(err.Inputs, \", \"))\n}\n\ntype MissingTaskImageSourceError struct {\n\tSourceName string\n}\n\nfunc (err MissingTaskImageSourceError) Error() string {\n\treturn fmt.Sprintf(`missing image artifact source: %s\nmake sure there's a corresponding 'get' step, or a task that produces it as an output`, err.SourceName)\n}\n\n\/\/ TaskStep executes a TaskConfig, whose inputs will be fetched from the\n\/\/ worker.ArtifactRepository and outputs will be added to the worker.ArtifactRepository.\ntype TaskStep struct {\n\tlogger lager.Logger\n\tcontainerID worker.Identifier\n\tmetadata worker.Metadata\n\ttags atc.Tags\n\tteamID int\n\tdelegate TaskDelegate\n\tprivileged Privileged\n\tconfigSource TaskConfigSource\n\tworkerPool worker.Client\n\tartifactsRoot string\n\tresourceTypes atc.VersionedResourceTypes\n\tinputMapping map[string]string\n\toutputMapping map[string]string\n\timageArtifactName string\n\tclock clock.Clock\n\trepo *worker.ArtifactRepository\n\n\tprocess garden.Process\n\n\texitStatus int\n}\n\nfunc newTaskStep(\n\tlogger lager.Logger,\n\tcontainerID worker.Identifier,\n\tmetadata worker.Metadata,\n\ttags atc.Tags,\n\tteamID int,\n\tdelegate TaskDelegate,\n\tprivileged Privileged,\n\tconfigSource TaskConfigSource,\n\tworkerPool worker.Client,\n\tartifactsRoot string,\n\tresourceTypes atc.VersionedResourceTypes,\n\tinputMapping map[string]string,\n\toutputMapping map[string]string,\n\timageArtifactName string,\n\tclock clock.Clock,\n) TaskStep {\n\treturn TaskStep{\n\t\tlogger: logger,\n\t\tcontainerID: containerID,\n\t\tmetadata: metadata,\n\t\ttags: tags,\n\t\tteamID: teamID,\n\t\tdelegate: delegate,\n\t\tprivileged: privileged,\n\t\tconfigSource: configSource,\n\t\tworkerPool: workerPool,\n\t\tartifactsRoot: artifactsRoot,\n\t\tresourceTypes: resourceTypes,\n\t\tinputMapping: inputMapping,\n\t\toutputMapping: outputMapping,\n\t\timageArtifactName: imageArtifactName,\n\t\tclock: clock,\n\t}\n}\n\n\/\/ Using finishes construction of the TaskStep and returns a *TaskStep. If the\n\/\/ *TaskStep errors, its error is reported to the delegate.\nfunc (step TaskStep) Using(prev Step, repo *worker.ArtifactRepository) Step {\n\tstep.repo = repo\n\n\treturn errorReporter{\n\t\tStep: &step,\n\t\tReportFailure: step.delegate.Failed,\n\t}\n}\n\n\/\/ Run will first load the TaskConfig. A worker will be selected based on the\n\/\/ TaskConfig's platform, the TaskStep's tags, and prioritized by availability\n\/\/ of volumes for the TaskConfig's inputs. Inputs that did not have volumes\n\/\/ available on the worker will be streamed in to the container.\n\/\/\n\/\/ If any inputs are not available in the worker.ArtifactRepository, MissingInputsError\n\/\/ is returned.\n\/\/\n\/\/ Once all the inputs are satisfies, the task's script will be executed, and\n\/\/ the RunStep indicates that it's ready, and any signals will be forwarded to\n\/\/ the script.\n\/\/\n\/\/ If the script exits successfully, the outputs specified in the TaskConfig\n\/\/ are registered with the worker.ArtifactRepository. If no outputs are specified, the\n\/\/ task's entire working directory is registered as an ArtifactSource under the\n\/\/ name of the task.\nfunc (step *TaskStep) Run(signals <-chan os.Signal, ready chan<- struct{}) error {\n\tvar err error\n\tvar found bool\n\n\tprocessIO := garden.ProcessIO{\n\t\tStdout: step.delegate.Stdout(),\n\t\tStderr: step.delegate.Stderr(),\n\t}\n\n\tdeprecationConfigSource := DeprecationConfigSource{\n\t\tDelegate: step.configSource,\n\t\tStderr: step.delegate.Stderr(),\n\t}\n\n\tconfig, err := deprecationConfigSource.FetchConfig(step.repo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstep.metadata.EnvironmentVariables = step.envForParams(config.Params)\n\n\trunContainerID := step.containerID\n\trunContainerID.Stage = db.ContainerStageRun\n\n\tcontainer, found, err := step.workerPool.FindContainerForIdentifier(\n\t\tstep.logger.Session(\"found-container\"),\n\t\trunContainerID,\n\t)\n\n\tif err == nil && found {\n\t\texitStatusProp, err := container.Property(taskExitStatusPropertyName)\n\t\tif err == nil {\n\t\t\tstep.logger.Info(\"already-exited\", lager.Data{\"status\": exitStatusProp})\n\n\t\t\t\/\/ process already completed; recover result\n\n\t\t\t_, err = fmt.Sscanf(exitStatusProp, \"%d\", &step.exitStatus)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tstep.registerSource(config, container)\n\t\t\treturn nil\n\t\t}\n\n\t\tprocessID, err := container.Property(taskProcessPropertyName)\n\t\tif err != nil {\n\t\t\t\/\/ rogue container? perhaps did not shut down cleanly.\n\t\t\treturn err\n\t\t}\n\n\t\tstep.logger.Info(\"already-running\", lager.Data{\"process-id\": processID})\n\n\t\t\/\/ process still running; re-attach\n\t\tstep.process, err = container.Attach(processID, processIO)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tstep.logger.Info(\"attached\")\n\t} else {\n\t\t\/\/ container does not exist; new session\n\n\t\tstep.delegate.Initializing(config)\n\n\t\tworkerSpec := worker.WorkerSpec{\n\t\t\tPlatform: config.Platform,\n\t\t\tTags: step.tags,\n\t\t\tTeamID: step.teamID,\n\t\t}\n\n\t\tif config.ImageResource != nil {\n\t\t\tworkerSpec.ResourceType = config.ImageResource.Type\n\t\t}\n\n\t\tcontainer, err = step.createContainer(config, signals)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tstep.delegate.Started()\n\n\t\tstep.process, err = container.Run(garden.ProcessSpec{\n\t\t\tPath: config.Run.Path,\n\t\t\tArgs: config.Run.Args,\n\t\t\tEnv: step.envForParams(config.Params),\n\n\t\t\tDir: path.Join(step.artifactsRoot, config.Run.Dir),\n\t\t\tTTY: &garden.TTYSpec{},\n\t\t}, processIO)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = container.SetProperty(taskProcessPropertyName, step.process.ID())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tclose(ready)\n\n\texited := make(chan struct{})\n\tvar processStatus int\n\tvar processErr error\n\n\tgo func() {\n\t\tprocessStatus, processErr = step.process.Wait()\n\t\tclose(exited)\n\t}()\n\n\tselect {\n\tcase <-signals:\n\t\tstep.registerSource(config, container)\n\n\t\terr = container.Stop(false)\n\t\tif err != nil {\n\t\t\tstep.logger.Error(\"stopping-container\", err)\n\t\t}\n\n\t\t<-exited\n\n\t\treturn ErrInterrupted\n\n\tcase <-exited:\n\t\tif processErr != nil {\n\t\t\treturn processErr\n\t\t}\n\n\t\tstep.registerSource(config, container)\n\n\t\tstep.exitStatus = processStatus\n\n\t\terr := container.SetProperty(taskExitStatusPropertyName, fmt.Sprintf(\"%d\", processStatus))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tstep.delegate.Finished(ExitStatus(processStatus))\n\n\t\treturn nil\n\t}\n}\n\nfunc (step *TaskStep) createContainer(config atc.TaskConfig, signals <-chan os.Signal) (worker.Container, error) {\n\timageSpec := worker.ImageSpec{\n\t\tPrivileged: bool(step.privileged),\n\t}\n\tif step.imageArtifactName != \"\" {\n\t\tsource, found := step.repo.SourceFor(worker.ArtifactName(step.imageArtifactName))\n\t\tif !found {\n\t\t\treturn nil, MissingTaskImageSourceError{step.imageArtifactName}\n\t\t}\n\n\t\timageSpec.ImageArtifactSource = source\n\t\timageSpec.ImageArtifactName = worker.ArtifactName(step.imageArtifactName)\n\t} else {\n\t\timageSpec.ImageURL = config.Image\n\t\timageSpec.ImageResource = config.ImageResource\n\t}\n\n\trunContainerID := step.containerID\n\trunContainerID.Stage = db.ContainerStageRun\n\n\tcontainerSpec := worker.ContainerSpec{\n\t\tPlatform: config.Platform,\n\t\tTags: step.tags,\n\t\tTeamID: step.teamID,\n\t\tImageSpec: imageSpec,\n\t\tUser: config.Run.User,\n\t\tDir: step.artifactsRoot,\n\n\t\tInputs: []worker.InputSource{},\n\t\tOutputs: worker.OutputPaths{},\n\t}\n\n\tvar missingInputs []string\n\tfor _, input := range config.Inputs {\n\t\tinputName := input.Name\n\t\tif sourceName, ok := step.inputMapping[inputName]; ok {\n\t\t\tinputName = sourceName\n\t\t}\n\n\t\tsource, found := step.repo.SourceFor(worker.ArtifactName(inputName))\n\t\tif !found {\n\t\t\tmissingInputs = append(missingInputs, inputName)\n\t\t\tcontinue\n\t\t}\n\n\t\tcontainerSpec.Inputs = append(containerSpec.Inputs, &taskInputSource{\n\t\t\tname: worker.ArtifactName(inputName),\n\t\t\tconfig: input,\n\t\t\tsource: source,\n\t\t\tartifactsRoot: step.artifactsRoot,\n\t\t})\n\t}\n\n\tif len(missingInputs) > 0 {\n\t\treturn nil, MissingInputsError{missingInputs}\n\t}\n\n\tfor _, output := range config.Outputs {\n\t\tpath := artifactsPath(output, step.artifactsRoot)\n\t\tcontainerSpec.Outputs[output.Name] = path\n\t}\n\n\tcontainer, err := step.workerPool.FindOrCreateBuildContainer(\n\t\tstep.logger,\n\t\tsignals,\n\t\tstep.delegate,\n\t\trunContainerID,\n\t\tstep.metadata,\n\t\tcontainerSpec,\n\t\tstep.resourceTypes,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn container, nil\n}\n\nfunc (step *TaskStep) registerSource(config atc.TaskConfig, container worker.Container) {\n\tvolumeMounts := container.VolumeMounts()\n\n\tstep.logger.Debug(\"registering-outputs\", lager.Data{\"config\": config})\n\n\tfor _, output := range config.Outputs {\n\t\toutputName := output.Name\n\t\tif destinationName, ok := step.outputMapping[output.Name]; ok {\n\t\t\toutputName = destinationName\n\t\t}\n\n\t\toutputPath := artifactsPath(output, step.artifactsRoot)\n\n\t\tfor _, mount := range volumeMounts {\n\t\t\tif mount.MountPath == outputPath {\n\t\t\t\tsource := newVolumeSource(step.logger, mount.Volume)\n\t\t\t\tstep.repo.RegisterSource(worker.ArtifactName(outputName), source)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Result indicates Success as true if the script's exit status was 0.\n\/\/\n\/\/ It also indicates ExitStatus as the exit status of the script.\n\/\/\n\/\/ All other types are ignored.\nfunc (step *TaskStep) Result(x interface{}) bool {\n\tswitch v := x.(type) {\n\tcase *Success:\n\t\t*v = step.exitStatus == 0\n\t\treturn true\n\n\tcase *ExitStatus:\n\t\t*v = ExitStatus(step.exitStatus)\n\t\treturn true\n\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (TaskStep) envForParams(params map[string]string) []string {\n\tenv := make([]string, 0, len(params))\n\n\tfor k, v := range params {\n\t\tenv = append(env, k+\"=\"+v)\n\t}\n\n\treturn env\n}\n\ntype volumeSource struct {\n\tlogger lager.Logger\n\tvolume worker.Volume\n}\n\nfunc newVolumeSource(\n\tlogger lager.Logger,\n\tvolume worker.Volume,\n) *volumeSource {\n\treturn &volumeSource{\n\t\tlogger: logger,\n\t\tvolume: volume,\n\t}\n}\n\nfunc (src *volumeSource) StreamTo(destination worker.ArtifactDestination) error {\n\tout, err := src.volume.StreamOut(\".\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer out.Close()\n\n\treturn destination.StreamIn(\".\", out)\n}\n\nfunc (src *volumeSource) StreamFile(filename string) (io.ReadCloser, error) {\n\tout, err := src.volume.StreamOut(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttarReader := tar.NewReader(out)\n\n\t_, err = tarReader.Next()\n\tif err != nil {\n\t\treturn nil, FileNotFoundError{Path: filename}\n\t}\n\n\treturn fileReadCloser{\n\t\tReader: tarReader,\n\t\tCloser: out,\n\t}, nil\n}\n\nfunc (src *volumeSource) VolumeOn(w worker.Worker) (worker.Volume, bool, error) {\n\treturn w.LookupVolume(src.logger, src.volume.Handle())\n}\n\ntype taskInputSource struct {\n\tname worker.ArtifactName\n\tconfig atc.TaskInputConfig\n\tsource worker.ArtifactSource\n\tartifactsRoot string\n}\n\nfunc (s *taskInputSource) Name() worker.ArtifactName { return s.name }\nfunc (s *taskInputSource) Source() worker.ArtifactSource { return s.source }\n\nfunc (s *taskInputSource) DestinationPath() string {\n\tsubdir := s.config.Path\n\tif s.config.Path == \"\" {\n\t\tsubdir = s.config.Name\n\t}\n\n\treturn filepath.Join(s.artifactsRoot, subdir)\n}\n\nfunc artifactsPath(outputConfig atc.TaskOutputConfig, artifactsRoot string) string {\n\toutputSrc := outputConfig.Path\n\tif len(outputSrc) == 0 {\n\t\toutputSrc = outputConfig.Name\n\t}\n\n\treturn path.Join(artifactsRoot, outputSrc) + \"\/\"\n}\n<commit_msg>remove unused workerSpec var<commit_after>package exec\n\nimport (\n\t\"archive\/tar\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"code.cloudfoundry.org\/clock\"\n\t\"code.cloudfoundry.org\/garden\"\n\t\"code.cloudfoundry.org\/lager\"\n\t\"github.com\/concourse\/atc\"\n\t\"github.com\/concourse\/atc\/db\"\n\t\"github.com\/concourse\/atc\/worker\"\n)\n\nconst taskProcessPropertyName = \"concourse:task-process\"\nconst taskExitStatusPropertyName = \"concourse:exit-status\"\n\n\/\/ MissingInputsError is returned when any of the task's required inputs are\n\/\/ missing.\ntype MissingInputsError struct {\n\tInputs []string\n}\n\n\/\/ Error prints a human-friendly message listing the inputs that were missing.\nfunc (err MissingInputsError) Error() string {\n\treturn fmt.Sprintf(\"missing inputs: %s\", strings.Join(err.Inputs, \", \"))\n}\n\ntype MissingTaskImageSourceError struct {\n\tSourceName string\n}\n\nfunc (err MissingTaskImageSourceError) Error() string {\n\treturn fmt.Sprintf(`missing image artifact source: %s\nmake sure there's a corresponding 'get' step, or a task that produces it as an output`, err.SourceName)\n}\n\n\/\/ TaskStep executes a TaskConfig, whose inputs will be fetched from the\n\/\/ worker.ArtifactRepository and outputs will be added to the worker.ArtifactRepository.\ntype TaskStep struct {\n\tlogger lager.Logger\n\tcontainerID worker.Identifier\n\tmetadata worker.Metadata\n\ttags atc.Tags\n\tteamID int\n\tdelegate TaskDelegate\n\tprivileged Privileged\n\tconfigSource TaskConfigSource\n\tworkerPool worker.Client\n\tartifactsRoot string\n\tresourceTypes atc.VersionedResourceTypes\n\tinputMapping map[string]string\n\toutputMapping map[string]string\n\timageArtifactName string\n\tclock clock.Clock\n\trepo *worker.ArtifactRepository\n\n\tprocess garden.Process\n\n\texitStatus int\n}\n\nfunc newTaskStep(\n\tlogger lager.Logger,\n\tcontainerID worker.Identifier,\n\tmetadata worker.Metadata,\n\ttags atc.Tags,\n\tteamID int,\n\tdelegate TaskDelegate,\n\tprivileged Privileged,\n\tconfigSource TaskConfigSource,\n\tworkerPool worker.Client,\n\tartifactsRoot string,\n\tresourceTypes atc.VersionedResourceTypes,\n\tinputMapping map[string]string,\n\toutputMapping map[string]string,\n\timageArtifactName string,\n\tclock clock.Clock,\n) TaskStep {\n\treturn TaskStep{\n\t\tlogger: logger,\n\t\tcontainerID: containerID,\n\t\tmetadata: metadata,\n\t\ttags: tags,\n\t\tteamID: teamID,\n\t\tdelegate: delegate,\n\t\tprivileged: privileged,\n\t\tconfigSource: configSource,\n\t\tworkerPool: workerPool,\n\t\tartifactsRoot: artifactsRoot,\n\t\tresourceTypes: resourceTypes,\n\t\tinputMapping: inputMapping,\n\t\toutputMapping: outputMapping,\n\t\timageArtifactName: imageArtifactName,\n\t\tclock: clock,\n\t}\n}\n\n\/\/ Using finishes construction of the TaskStep and returns a *TaskStep. If the\n\/\/ *TaskStep errors, its error is reported to the delegate.\nfunc (step TaskStep) Using(prev Step, repo *worker.ArtifactRepository) Step {\n\tstep.repo = repo\n\n\treturn errorReporter{\n\t\tStep: &step,\n\t\tReportFailure: step.delegate.Failed,\n\t}\n}\n\n\/\/ Run will first load the TaskConfig. A worker will be selected based on the\n\/\/ TaskConfig's platform, the TaskStep's tags, and prioritized by availability\n\/\/ of volumes for the TaskConfig's inputs. Inputs that did not have volumes\n\/\/ available on the worker will be streamed in to the container.\n\/\/\n\/\/ If any inputs are not available in the worker.ArtifactRepository, MissingInputsError\n\/\/ is returned.\n\/\/\n\/\/ Once all the inputs are satisfies, the task's script will be executed, and\n\/\/ the RunStep indicates that it's ready, and any signals will be forwarded to\n\/\/ the script.\n\/\/\n\/\/ If the script exits successfully, the outputs specified in the TaskConfig\n\/\/ are registered with the worker.ArtifactRepository. If no outputs are specified, the\n\/\/ task's entire working directory is registered as an ArtifactSource under the\n\/\/ name of the task.\nfunc (step *TaskStep) Run(signals <-chan os.Signal, ready chan<- struct{}) error {\n\tvar err error\n\tvar found bool\n\n\tprocessIO := garden.ProcessIO{\n\t\tStdout: step.delegate.Stdout(),\n\t\tStderr: step.delegate.Stderr(),\n\t}\n\n\tdeprecationConfigSource := DeprecationConfigSource{\n\t\tDelegate: step.configSource,\n\t\tStderr: step.delegate.Stderr(),\n\t}\n\n\tconfig, err := deprecationConfigSource.FetchConfig(step.repo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstep.metadata.EnvironmentVariables = step.envForParams(config.Params)\n\n\trunContainerID := step.containerID\n\trunContainerID.Stage = db.ContainerStageRun\n\n\tcontainer, found, err := step.workerPool.FindContainerForIdentifier(\n\t\tstep.logger.Session(\"found-container\"),\n\t\trunContainerID,\n\t)\n\n\tif err == nil && found {\n\t\texitStatusProp, err := container.Property(taskExitStatusPropertyName)\n\t\tif err == nil {\n\t\t\tstep.logger.Info(\"already-exited\", lager.Data{\"status\": exitStatusProp})\n\n\t\t\t\/\/ process already completed; recover result\n\n\t\t\t_, err = fmt.Sscanf(exitStatusProp, \"%d\", &step.exitStatus)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tstep.registerSource(config, container)\n\t\t\treturn nil\n\t\t}\n\n\t\tprocessID, err := container.Property(taskProcessPropertyName)\n\t\tif err != nil {\n\t\t\t\/\/ rogue container? perhaps did not shut down cleanly.\n\t\t\treturn err\n\t\t}\n\n\t\tstep.logger.Info(\"already-running\", lager.Data{\"process-id\": processID})\n\n\t\t\/\/ process still running; re-attach\n\t\tstep.process, err = container.Attach(processID, processIO)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tstep.logger.Info(\"attached\")\n\t} else {\n\t\t\/\/ container does not exist; new session\n\n\t\tstep.delegate.Initializing(config)\n\n\t\tcontainer, err = step.createContainer(config, signals)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tstep.delegate.Started()\n\n\t\tstep.process, err = container.Run(garden.ProcessSpec{\n\t\t\tPath: config.Run.Path,\n\t\t\tArgs: config.Run.Args,\n\t\t\tEnv: step.envForParams(config.Params),\n\n\t\t\tDir: path.Join(step.artifactsRoot, config.Run.Dir),\n\t\t\tTTY: &garden.TTYSpec{},\n\t\t}, processIO)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = container.SetProperty(taskProcessPropertyName, step.process.ID())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tclose(ready)\n\n\texited := make(chan struct{})\n\tvar processStatus int\n\tvar processErr error\n\n\tgo func() {\n\t\tprocessStatus, processErr = step.process.Wait()\n\t\tclose(exited)\n\t}()\n\n\tselect {\n\tcase <-signals:\n\t\tstep.registerSource(config, container)\n\n\t\terr = container.Stop(false)\n\t\tif err != nil {\n\t\t\tstep.logger.Error(\"stopping-container\", err)\n\t\t}\n\n\t\t<-exited\n\n\t\treturn ErrInterrupted\n\n\tcase <-exited:\n\t\tif processErr != nil {\n\t\t\treturn processErr\n\t\t}\n\n\t\tstep.registerSource(config, container)\n\n\t\tstep.exitStatus = processStatus\n\n\t\terr := container.SetProperty(taskExitStatusPropertyName, fmt.Sprintf(\"%d\", processStatus))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tstep.delegate.Finished(ExitStatus(processStatus))\n\n\t\treturn nil\n\t}\n}\n\nfunc (step *TaskStep) createContainer(config atc.TaskConfig, signals <-chan os.Signal) (worker.Container, error) {\n\timageSpec := worker.ImageSpec{\n\t\tPrivileged: bool(step.privileged),\n\t}\n\tif step.imageArtifactName != \"\" {\n\t\tsource, found := step.repo.SourceFor(worker.ArtifactName(step.imageArtifactName))\n\t\tif !found {\n\t\t\treturn nil, MissingTaskImageSourceError{step.imageArtifactName}\n\t\t}\n\n\t\timageSpec.ImageArtifactSource = source\n\t\timageSpec.ImageArtifactName = worker.ArtifactName(step.imageArtifactName)\n\t} else {\n\t\timageSpec.ImageURL = config.Image\n\t\timageSpec.ImageResource = config.ImageResource\n\t}\n\n\trunContainerID := step.containerID\n\trunContainerID.Stage = db.ContainerStageRun\n\n\tcontainerSpec := worker.ContainerSpec{\n\t\tPlatform: config.Platform,\n\t\tTags: step.tags,\n\t\tTeamID: step.teamID,\n\t\tImageSpec: imageSpec,\n\t\tUser: config.Run.User,\n\t\tDir: step.artifactsRoot,\n\n\t\tInputs: []worker.InputSource{},\n\t\tOutputs: worker.OutputPaths{},\n\t}\n\n\tvar missingInputs []string\n\tfor _, input := range config.Inputs {\n\t\tinputName := input.Name\n\t\tif sourceName, ok := step.inputMapping[inputName]; ok {\n\t\t\tinputName = sourceName\n\t\t}\n\n\t\tsource, found := step.repo.SourceFor(worker.ArtifactName(inputName))\n\t\tif !found {\n\t\t\tmissingInputs = append(missingInputs, inputName)\n\t\t\tcontinue\n\t\t}\n\n\t\tcontainerSpec.Inputs = append(containerSpec.Inputs, &taskInputSource{\n\t\t\tname: worker.ArtifactName(inputName),\n\t\t\tconfig: input,\n\t\t\tsource: source,\n\t\t\tartifactsRoot: step.artifactsRoot,\n\t\t})\n\t}\n\n\tif len(missingInputs) > 0 {\n\t\treturn nil, MissingInputsError{missingInputs}\n\t}\n\n\tfor _, output := range config.Outputs {\n\t\tpath := artifactsPath(output, step.artifactsRoot)\n\t\tcontainerSpec.Outputs[output.Name] = path\n\t}\n\n\tcontainer, err := step.workerPool.FindOrCreateBuildContainer(\n\t\tstep.logger,\n\t\tsignals,\n\t\tstep.delegate,\n\t\trunContainerID,\n\t\tstep.metadata,\n\t\tcontainerSpec,\n\t\tstep.resourceTypes,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn container, nil\n}\n\nfunc (step *TaskStep) registerSource(config atc.TaskConfig, container worker.Container) {\n\tvolumeMounts := container.VolumeMounts()\n\n\tstep.logger.Debug(\"registering-outputs\", lager.Data{\"config\": config})\n\n\tfor _, output := range config.Outputs {\n\t\toutputName := output.Name\n\t\tif destinationName, ok := step.outputMapping[output.Name]; ok {\n\t\t\toutputName = destinationName\n\t\t}\n\n\t\toutputPath := artifactsPath(output, step.artifactsRoot)\n\n\t\tfor _, mount := range volumeMounts {\n\t\t\tif mount.MountPath == outputPath {\n\t\t\t\tsource := newVolumeSource(step.logger, mount.Volume)\n\t\t\t\tstep.repo.RegisterSource(worker.ArtifactName(outputName), source)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Result indicates Success as true if the script's exit status was 0.\n\/\/\n\/\/ It also indicates ExitStatus as the exit status of the script.\n\/\/\n\/\/ All other types are ignored.\nfunc (step *TaskStep) Result(x interface{}) bool {\n\tswitch v := x.(type) {\n\tcase *Success:\n\t\t*v = step.exitStatus == 0\n\t\treturn true\n\n\tcase *ExitStatus:\n\t\t*v = ExitStatus(step.exitStatus)\n\t\treturn true\n\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (TaskStep) envForParams(params map[string]string) []string {\n\tenv := make([]string, 0, len(params))\n\n\tfor k, v := range params {\n\t\tenv = append(env, k+\"=\"+v)\n\t}\n\n\treturn env\n}\n\ntype volumeSource struct {\n\tlogger lager.Logger\n\tvolume worker.Volume\n}\n\nfunc newVolumeSource(\n\tlogger lager.Logger,\n\tvolume worker.Volume,\n) *volumeSource {\n\treturn &volumeSource{\n\t\tlogger: logger,\n\t\tvolume: volume,\n\t}\n}\n\nfunc (src *volumeSource) StreamTo(destination worker.ArtifactDestination) error {\n\tout, err := src.volume.StreamOut(\".\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer out.Close()\n\n\treturn destination.StreamIn(\".\", out)\n}\n\nfunc (src *volumeSource) StreamFile(filename string) (io.ReadCloser, error) {\n\tout, err := src.volume.StreamOut(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttarReader := tar.NewReader(out)\n\n\t_, err = tarReader.Next()\n\tif err != nil {\n\t\treturn nil, FileNotFoundError{Path: filename}\n\t}\n\n\treturn fileReadCloser{\n\t\tReader: tarReader,\n\t\tCloser: out,\n\t}, nil\n}\n\nfunc (src *volumeSource) VolumeOn(w worker.Worker) (worker.Volume, bool, error) {\n\treturn w.LookupVolume(src.logger, src.volume.Handle())\n}\n\ntype taskInputSource struct {\n\tname worker.ArtifactName\n\tconfig atc.TaskInputConfig\n\tsource worker.ArtifactSource\n\tartifactsRoot string\n}\n\nfunc (s *taskInputSource) Name() worker.ArtifactName { return s.name }\nfunc (s *taskInputSource) Source() worker.ArtifactSource { return s.source }\n\nfunc (s *taskInputSource) DestinationPath() string {\n\tsubdir := s.config.Path\n\tif s.config.Path == \"\" {\n\t\tsubdir = s.config.Name\n\t}\n\n\treturn filepath.Join(s.artifactsRoot, subdir)\n}\n\nfunc artifactsPath(outputConfig atc.TaskOutputConfig, artifactsRoot string) string {\n\toutputSrc := outputConfig.Path\n\tif len(outputSrc) == 0 {\n\t\toutputSrc = outputConfig.Name\n\t}\n\n\treturn path.Join(artifactsRoot, outputSrc) + \"\/\"\n}\n<|endoftext|>"} {"text":"<commit_before>package core\n\nimport (\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com\/secsy\/goftp\"\n\t\"github.com\/snickers\/snickers\/db\"\n\t\"github.com\/snickers\/snickers\/types\"\n)\n\n\/\/ FTPUpload uploades the file using FTP. Job Destination should be\n\/\/ in format: ftp:\/\/login:password@host\/path\nfunc FTPUpload(jobID string) error {\n\tdbInstance, err := db.GetDatabase()\n\tjob, _ := dbInstance.RetrieveJob(jobID)\n\n\tjob.Status = types.jobUploading\n\tdbInstance.UpdateJob(job.ID, job)\n\n\tu, err := url.Parse(job.Destination)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpw, isSet := u.User.Password()\n\tif !isSet {\n\t\tpw = \"\"\n\t}\n\n\tconfig := goftp.Config{\n\t\tUser: u.User.Username(),\n\t\tPassword: pw,\n\t\tConnectionsPerHost: 10,\n\t\tTimeout: 10 * time.Second,\n\t\tLogger: os.Stderr,\n\t}\n\n\tclient, err := goftp.DialConfig(config, u.Host+\":21\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlocalFile, err := os.Open(job.LocalDestination)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = client.Store(u.Path, localFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>fix tests<commit_after>package core\n\nimport (\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/secsy\/goftp\"\n\t\"github.com\/snickers\/snickers\/db\"\n\t\"github.com\/snickers\/snickers\/types\"\n)\n\n\/\/ FTPUpload uploades the file using FTP. Job Destination should be\n\/\/ in format: ftp:\/\/login:password@host\/path\nfunc FTPUpload(jobID string) error {\n\tdbInstance, err := db.GetDatabase()\n\tjob, _ := dbInstance.RetrieveJob(jobID)\n\n\tjob.Status = types.JobUploading\n\tdbInstance.UpdateJob(job.ID, job)\n\n\tu, err := url.Parse(job.Destination)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpw, isSet := u.User.Password()\n\tif !isSet {\n\t\tpw = \"\"\n\t}\n\n\tconfig := goftp.Config{\n\t\tUser: u.User.Username(),\n\t\tPassword: pw,\n\t\tConnectionsPerHost: 10,\n\t\tTimeout: 10 * time.Second,\n\t\tLogger: os.Stderr,\n\t}\n\n\tclient, err := goftp.DialConfig(config, u.Host+\":21\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlocalFile, err := os.Open(job.LocalDestination)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = client.Store(u.Path, localFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package message\n\ntype Message struct {\n\tFrom string\n\tPrevious string\n\tSeq string\n\tTimestamp string\n\n\tContent string\n\n\tHash string\n\tSignature string\n}\n<commit_msg>[message] Update placeholder<commit_after>package message\n\nimport (\n\t\"time\"\n)\n\ntype Message struct {\n\tFrom string\n\tPrevious string\n\tSeq int\n\tTimestamp time.Time\n\n\tContent string\n\n\tHash string\n\tSignature string\n}\n\nfunc (m *Message) IsValid() bool {\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package core\n\nimport (\n\t\"encoding\/json\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/af83\/edwig\/logger\"\n\t\"github.com\/af83\/edwig\/model\"\n)\n\ntype SubscriptionId string\n\ntype Subscription struct {\n\tmodel.ClockConsumer\n\tmanager Subscriptions\n\n\tid SubscriptionId\n\tkind string\n\texternalId string\n\n\tresourcesByObjectID map[string]*SubscribedResource\n\tsubscriptionOptions map[string]string\n}\n\ntype SubscribedResource struct {\n\tReference model.Reference\n\tRetryCount int\n\tSubscribedAt time.Time\n\tSubscribedUntil time.Time\n\tLastStates map[string]lastState `json:\",omitempty\"`\n\tresourcesOptions map[string]string\n}\n\nfunc NewResource(ref model.Reference) SubscribedResource {\n\tressource := SubscribedResource{\n\t\tReference: ref,\n\t\tLastStates: make(map[string]lastState),\n\t\tresourcesOptions: make(map[string]string),\n\t}\n\n\treturn ressource\n}\n\nfunc (sr *SubscribedResource) ResourcesOptions() map[string]string {\n\treturn sr.resourcesOptions\n}\n\ntype APISubscription struct {\n\tKind string\n\tReferences []model.Reference\n}\n\nfunc (subscription *Subscription) SetDefinition(apisub *APISubscription) {\n\tsubscription.kind = apisub.Kind\n\tfor _, ref := range apisub.References {\n\t\tif ref.ObjectId != nil {\n\t\t\tsubscription.CreateAddNewResource(ref)\n\t\t}\n\t}\n}\n\nfunc (subscription *Subscription) Id() SubscriptionId {\n\treturn subscription.id\n}\n\nfunc (subscription *Subscription) Kind() string {\n\treturn subscription.kind\n}\n\nfunc (subscription *Subscription) ExternalId() string {\n\treturn subscription.externalId\n}\n\nfunc (subscription *Subscription) SubscriptionOptions() map[string]string {\n\treturn subscription.subscriptionOptions\n}\n\nfunc (subscription *Subscription) SetKind(kind string) {\n\tsubscription.kind = kind\n}\n\nfunc (subscription *Subscription) SetExternalId(externalId string) {\n\tsubscription.externalId = externalId\n}\n\nfunc (subscription *Subscription) Save() (ok bool) {\n\tok = subscription.manager.Save(subscription)\n\treturn\n}\n\nfunc (subscription *Subscription) Delete() (ok bool) {\n\tok = subscription.manager.Delete(subscription)\n\treturn\n}\n\nfunc (subscription *Subscription) ResourcesByObjectID() map[string]*SubscribedResource {\n\treturn subscription.resourcesByObjectID\n}\n\nfunc (subscription *Subscription) MarshalJSON() ([]byte, error) {\n\tresources := make([]*SubscribedResource, 0)\n\tfor _, resource := range subscription.resourcesByObjectID {\n\t\tresources = append(resources, resource)\n\t}\n\n\taux := struct {\n\t\tId SubscriptionId `json:\"SubscriptionRef,omitempty\"`\n\t\tExternalId string `json:\"ExternalId,omitempty\"`\n\t\tKind string `json:\",omitempty\"`\n\t\tResources []*SubscribedResource `json:\",omitempty\"`\n\t}{\n\t\tId: subscription.id,\n\t\tExternalId: subscription.externalId,\n\t\tKind: subscription.kind,\n\t\tResources: resources,\n\t}\n\treturn json.Marshal(&aux)\n}\n\nfunc (subscription *Subscription) Resource(obj model.ObjectID) *SubscribedResource {\n\tsub, present := subscription.resourcesByObjectID[obj.String()]\n\tif !present {\n\t\treturn nil\n\t}\n\treturn sub\n}\n\nfunc (subscription *Subscription) Resources(now time.Time) []*SubscribedResource {\n\tressources := []*SubscribedResource{}\n\n\tfor _, ressource := range subscription.resourcesByObjectID {\n\t\tif ressource.SubscribedUntil.After(subscription.Clock().Now()) {\n\t\t\tressources = append(ressources, ressource)\n\t\t}\n\t}\n\treturn ressources\n}\n\nfunc (subscription *Subscription) AddNewResource(resource SubscribedResource) {\n\tsubscription.resourcesByObjectID[resource.Reference.ObjectId.String()] = &resource\n}\n\nfunc (subscription *Subscription) CreateAddNewResource(reference model.Reference) *SubscribedResource {\n\tlogger.Log.Debugf(\"Create subscribed resource for %v\", reference.ObjectId.String())\n\n\tresource := SubscribedResource{\n\t\tReference: reference,\n\t\tSubscribedUntil: subscription.Clock().Now().Add(1 * time.Minute),\n\t\tLastStates: make(map[string]lastState),\n\t\tresourcesOptions: make(map[string]string),\n\t}\n\tsubscription.resourcesByObjectID[reference.ObjectId.String()] = &resource\n\treturn &resource\n}\n\nfunc (subscription *Subscription) DeleteResource(key string) {\n\tdelete(subscription.resourcesByObjectID, key)\n}\n\ntype MemorySubscriptions struct {\n\tmodel.UUIDConsumer\n\n\tmutex *sync.RWMutex\n\tpartner *Partner\n\n\tbyIdentifier map[SubscriptionId]*Subscription\n}\n\nfunc (manager *MemorySubscriptions) MarshalJSON() ([]byte, error) {\n\tsubscriptions := make([]*Subscription, 0)\n\tfor _, subscription := range manager.byIdentifier {\n\t\tsubscriptions = append(subscriptions, subscription)\n\t}\n\n\treturn json.Marshal(subscriptions)\n}\n\ntype Subscriptions interface {\n\tmodel.UUIDInterface\n\n\tNew(kind string) *Subscription\n\tFind(id SubscriptionId) (*Subscription, bool)\n\tFindAll() []*Subscription\n\tFindOrCreateByKind(string) *Subscription\n\tFindByKind(string) (*Subscription, bool)\n\tFindSubscriptionsByKind(string) []*Subscription\n\tFindBroadcastSubscriptions() []*Subscription\n\tSave(Subscription *Subscription) bool\n\tDelete(Subscription *Subscription) bool\n\tDeleteById(id SubscriptionId)\n\tCancelSubscriptions()\n\tCancelBroadcastSubscriptions()\n\tCancelCollectSubscriptions()\n\tFindByRessourceId(id, kind string) []*Subscription\n\tFindByExternalId(externalId string) (*Subscription, bool)\n}\n\nfunc NewMemorySubscriptions(partner *Partner) *MemorySubscriptions {\n\treturn &MemorySubscriptions{\n\t\tmutex: &sync.RWMutex{},\n\t\tbyIdentifier: make(map[SubscriptionId]*Subscription),\n\t\tpartner: partner,\n\t}\n}\n\nfunc (manager *MemorySubscriptions) New(kind string) *Subscription {\n\tlogger.Log.Debugf(\"Creating subscription with kind %v\", kind)\n\tsubscription := &Subscription{\n\t\tkind: kind,\n\t\tmanager: manager,\n\t\tresourcesByObjectID: make(map[string]*SubscribedResource),\n\t\tsubscriptionOptions: make(map[string]string),\n\t}\n\tsubscription.Save()\n\treturn subscription\n}\n\nfunc (manager *MemorySubscriptions) FindByKind(kind string) (*Subscription, bool) {\n\tmanager.mutex.RLock()\n\tdefer manager.mutex.RUnlock()\n\n\tfor _, subscription := range manager.byIdentifier {\n\t\tif subscription.Kind() == kind {\n\t\t\treturn subscription, true\n\t\t}\n\t}\n\treturn nil, false\n}\n\nfunc (manager *MemorySubscriptions) FindSubscriptionsByKind(kind string) (subscriptions []*Subscription) {\n\tmanager.mutex.RLock()\n\tdefer manager.mutex.RUnlock()\n\n\tfor _, subscription := range manager.byIdentifier {\n\t\tif subscription.Kind() == kind {\n\t\t\tsubscriptions = append(subscriptions, subscription)\n\t\t}\n\t}\n\treturn subscriptions\n}\n\nfunc (manager *MemorySubscriptions) FindBroadcastSubscriptions() (subscriptions []*Subscription) {\n\tmanager.mutex.RLock()\n\tdefer manager.mutex.RUnlock()\n\n\tfor _, subscription := range manager.byIdentifier {\n\t\tif subscription.externalId != \"\" {\n\t\t\tsubscriptions = append(subscriptions, subscription)\n\t\t}\n\t}\n\treturn subscriptions\n}\n\nfunc (manager *MemorySubscriptions) FindByExternalId(externalId string) (*Subscription, bool) {\n\tmanager.mutex.RLock()\n\tdefer manager.mutex.RUnlock()\n\n\tfor _, subscription := range manager.byIdentifier {\n\t\tif subscription.ExternalId() == externalId {\n\t\t\treturn subscription, true\n\t\t}\n\t}\n\treturn nil, false\n}\n\nfunc (manager *MemorySubscriptions) FindByRessourceId(id, kind string) []*Subscription {\n\tmanager.mutex.RLock()\n\tdefer manager.mutex.RUnlock()\n\tsubscriptions := []*Subscription{}\n\n\tfor _, subscription := range manager.byIdentifier {\n\t\t_, ok := subscription.resourcesByObjectID[id]\n\t\tif ok && subscription.kind == kind {\n\t\t\tsubscriptions = append(subscriptions, subscription)\n\t\t}\n\t}\n\treturn subscriptions\n}\n\nfunc (manager *MemorySubscriptions) FindOrCreateByKind(kind string) *Subscription {\n\tmaxResource, _ := strconv.Atoi(manager.partner.Setting(\"subscriptions.maximum_resources\"))\n\tif maxResource == 1 {\n\t\treturn manager.New(kind)\n\t}\n\n\tmanager.mutex.RLock()\n\tfor _, subscription := range manager.byIdentifier {\n\t\tif subscription.Kind() == kind && (maxResource < 1 || len(subscription.resourcesByObjectID) < maxResource) {\n\t\t\tmanager.mutex.RUnlock()\n\t\t\treturn subscription\n\t\t}\n\t}\n\tmanager.mutex.RUnlock()\n\n\treturn manager.New(kind)\n}\n\nfunc (manager *MemorySubscriptions) Find(id SubscriptionId) (*Subscription, bool) {\n\tmanager.mutex.RLock()\n\tdefer manager.mutex.RUnlock()\n\n\tsubscription, ok := manager.byIdentifier[id]\n\tif ok {\n\t\treturn subscription, true\n\t} else {\n\t\treturn nil, false\n\t}\n}\n\nfunc (manager *MemorySubscriptions) FindAll() (subscriptions []*Subscription) {\n\tmanager.mutex.RLock()\n\tdefer manager.mutex.RUnlock()\n\n\tif len(manager.byIdentifier) == 0 {\n\t\treturn []*Subscription{}\n\t}\n\tfor _, subscription := range manager.byIdentifier {\n\t\tsubscriptions = append(subscriptions, subscription)\n\t}\n\treturn\n}\n\nfunc (manager *MemorySubscriptions) Save(subscription *Subscription) bool {\n\tmanager.mutex.Lock()\n\tdefer manager.mutex.Unlock()\n\n\tif subscription.Id() == \"\" {\n\t\tgenerator := manager.partner.Generator(\"subscription_identifier\")\n\t\tsubscription.id = SubscriptionId(generator.NewIdentifier(IdentifierAttributes{Id: manager.NewUUID()}))\n\t}\n\n\tsubscription.manager = manager\n\tmanager.byIdentifier[subscription.Id()] = subscription\n\n\treturn true\n}\n\nfunc (manager *MemorySubscriptions) Delete(subscription *Subscription) bool {\n\tmanager.mutex.Lock()\n\tdefer manager.mutex.Unlock()\n\n\tdelete(manager.byIdentifier, subscription.Id())\n\treturn true\n}\n\nfunc (manager *MemorySubscriptions) DeleteById(id SubscriptionId) {\n\tmanager.mutex.Lock()\n\tdefer manager.mutex.Unlock()\n\n\tdelete(manager.byIdentifier, id)\n}\n\nfunc (manager *MemorySubscriptions) CancelSubscriptions() {\n\tmanager.mutex.Lock()\n\tdefer manager.mutex.Unlock()\n\n\tfor id := range manager.byIdentifier {\n\t\tdelete(manager.byIdentifier, id)\n\t}\n}\n\nfunc (manager *MemorySubscriptions) CancelBroadcastSubscriptions() {\n\tmanager.mutex.Lock()\n\tdefer manager.mutex.Unlock()\n\n\tfor id, subscription := range manager.byIdentifier {\n\t\tif subscription.externalId != \"\" {\n\t\t\tdelete(manager.byIdentifier, id)\n\t\t}\n\t}\n}\n\nfunc (manager *MemorySubscriptions) CancelCollectSubscriptions() {\n\tmanager.mutex.Lock()\n\tdefer manager.mutex.Unlock()\n\n\tfor id, subscription := range manager.byIdentifier {\n\t\tif subscription.externalId == \"\" {\n\t\t\tdelete(manager.byIdentifier, id)\n\t\t}\n\t}\n}\n<commit_msg>don't show lastStates in Subscription api<commit_after>package core\n\nimport (\n\t\"encoding\/json\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/af83\/edwig\/logger\"\n\t\"github.com\/af83\/edwig\/model\"\n)\n\ntype SubscriptionId string\n\ntype Subscription struct {\n\tmodel.ClockConsumer\n\tmanager Subscriptions\n\n\tid SubscriptionId\n\tkind string\n\texternalId string\n\n\tresourcesByObjectID map[string]*SubscribedResource\n\tsubscriptionOptions map[string]string\n}\n\ntype SubscribedResource struct {\n\tReference model.Reference\n\tRetryCount int\n\tSubscribedAt time.Time\n\tSubscribedUntil time.Time\n\tLastStates map[string]lastState `json:\"-\"`\n\tresourcesOptions map[string]string\n}\n\nfunc NewResource(ref model.Reference) SubscribedResource {\n\tressource := SubscribedResource{\n\t\tReference: ref,\n\t\tLastStates: make(map[string]lastState),\n\t\tresourcesOptions: make(map[string]string),\n\t}\n\n\treturn ressource\n}\n\nfunc (sr *SubscribedResource) ResourcesOptions() map[string]string {\n\treturn sr.resourcesOptions\n}\n\ntype APISubscription struct {\n\tKind string\n\tReferences []model.Reference\n}\n\nfunc (subscription *Subscription) SetDefinition(apisub *APISubscription) {\n\tsubscription.kind = apisub.Kind\n\tfor _, ref := range apisub.References {\n\t\tif ref.ObjectId != nil {\n\t\t\tsubscription.CreateAddNewResource(ref)\n\t\t}\n\t}\n}\n\nfunc (subscription *Subscription) Id() SubscriptionId {\n\treturn subscription.id\n}\n\nfunc (subscription *Subscription) Kind() string {\n\treturn subscription.kind\n}\n\nfunc (subscription *Subscription) ExternalId() string {\n\treturn subscription.externalId\n}\n\nfunc (subscription *Subscription) SubscriptionOptions() map[string]string {\n\treturn subscription.subscriptionOptions\n}\n\nfunc (subscription *Subscription) SetKind(kind string) {\n\tsubscription.kind = kind\n}\n\nfunc (subscription *Subscription) SetExternalId(externalId string) {\n\tsubscription.externalId = externalId\n}\n\nfunc (subscription *Subscription) Save() (ok bool) {\n\tok = subscription.manager.Save(subscription)\n\treturn\n}\n\nfunc (subscription *Subscription) Delete() (ok bool) {\n\tok = subscription.manager.Delete(subscription)\n\treturn\n}\n\nfunc (subscription *Subscription) ResourcesByObjectID() map[string]*SubscribedResource {\n\treturn subscription.resourcesByObjectID\n}\n\nfunc (subscription *Subscription) MarshalJSON() ([]byte, error) {\n\tresources := make([]*SubscribedResource, 0)\n\tfor _, resource := range subscription.resourcesByObjectID {\n\t\tresources = append(resources, resource)\n\t}\n\n\taux := struct {\n\t\tId SubscriptionId `json:\"SubscriptionRef,omitempty\"`\n\t\tExternalId string `json:\"ExternalId,omitempty\"`\n\t\tKind string `json:\",omitempty\"`\n\t\tResources []*SubscribedResource `json:\",omitempty\"`\n\t}{\n\t\tId: subscription.id,\n\t\tExternalId: subscription.externalId,\n\t\tKind: subscription.kind,\n\t\tResources: resources,\n\t}\n\treturn json.Marshal(&aux)\n}\n\nfunc (subscription *Subscription) Resource(obj model.ObjectID) *SubscribedResource {\n\tsub, present := subscription.resourcesByObjectID[obj.String()]\n\tif !present {\n\t\treturn nil\n\t}\n\treturn sub\n}\n\nfunc (subscription *Subscription) Resources(now time.Time) []*SubscribedResource {\n\tressources := []*SubscribedResource{}\n\n\tfor _, ressource := range subscription.resourcesByObjectID {\n\t\tif ressource.SubscribedUntil.After(subscription.Clock().Now()) {\n\t\t\tressources = append(ressources, ressource)\n\t\t}\n\t}\n\treturn ressources\n}\n\nfunc (subscription *Subscription) AddNewResource(resource SubscribedResource) {\n\tsubscription.resourcesByObjectID[resource.Reference.ObjectId.String()] = &resource\n}\n\nfunc (subscription *Subscription) CreateAddNewResource(reference model.Reference) *SubscribedResource {\n\tlogger.Log.Debugf(\"Create subscribed resource for %v\", reference.ObjectId.String())\n\n\tresource := SubscribedResource{\n\t\tReference: reference,\n\t\tSubscribedUntil: subscription.Clock().Now().Add(1 * time.Minute),\n\t\tLastStates: make(map[string]lastState),\n\t\tresourcesOptions: make(map[string]string),\n\t}\n\tsubscription.resourcesByObjectID[reference.ObjectId.String()] = &resource\n\treturn &resource\n}\n\nfunc (subscription *Subscription) DeleteResource(key string) {\n\tdelete(subscription.resourcesByObjectID, key)\n}\n\ntype MemorySubscriptions struct {\n\tmodel.UUIDConsumer\n\n\tmutex *sync.RWMutex\n\tpartner *Partner\n\n\tbyIdentifier map[SubscriptionId]*Subscription\n}\n\nfunc (manager *MemorySubscriptions) MarshalJSON() ([]byte, error) {\n\tsubscriptions := make([]*Subscription, 0)\n\tfor _, subscription := range manager.byIdentifier {\n\t\tsubscriptions = append(subscriptions, subscription)\n\t}\n\n\treturn json.Marshal(subscriptions)\n}\n\ntype Subscriptions interface {\n\tmodel.UUIDInterface\n\n\tNew(kind string) *Subscription\n\tFind(id SubscriptionId) (*Subscription, bool)\n\tFindAll() []*Subscription\n\tFindOrCreateByKind(string) *Subscription\n\tFindByKind(string) (*Subscription, bool)\n\tFindSubscriptionsByKind(string) []*Subscription\n\tFindBroadcastSubscriptions() []*Subscription\n\tSave(Subscription *Subscription) bool\n\tDelete(Subscription *Subscription) bool\n\tDeleteById(id SubscriptionId)\n\tCancelSubscriptions()\n\tCancelBroadcastSubscriptions()\n\tCancelCollectSubscriptions()\n\tFindByRessourceId(id, kind string) []*Subscription\n\tFindByExternalId(externalId string) (*Subscription, bool)\n}\n\nfunc NewMemorySubscriptions(partner *Partner) *MemorySubscriptions {\n\treturn &MemorySubscriptions{\n\t\tmutex: &sync.RWMutex{},\n\t\tbyIdentifier: make(map[SubscriptionId]*Subscription),\n\t\tpartner: partner,\n\t}\n}\n\nfunc (manager *MemorySubscriptions) New(kind string) *Subscription {\n\tlogger.Log.Debugf(\"Creating subscription with kind %v\", kind)\n\tsubscription := &Subscription{\n\t\tkind: kind,\n\t\tmanager: manager,\n\t\tresourcesByObjectID: make(map[string]*SubscribedResource),\n\t\tsubscriptionOptions: make(map[string]string),\n\t}\n\tsubscription.Save()\n\treturn subscription\n}\n\nfunc (manager *MemorySubscriptions) FindByKind(kind string) (*Subscription, bool) {\n\tmanager.mutex.RLock()\n\tdefer manager.mutex.RUnlock()\n\n\tfor _, subscription := range manager.byIdentifier {\n\t\tif subscription.Kind() == kind {\n\t\t\treturn subscription, true\n\t\t}\n\t}\n\treturn nil, false\n}\n\nfunc (manager *MemorySubscriptions) FindSubscriptionsByKind(kind string) (subscriptions []*Subscription) {\n\tmanager.mutex.RLock()\n\tdefer manager.mutex.RUnlock()\n\n\tfor _, subscription := range manager.byIdentifier {\n\t\tif subscription.Kind() == kind {\n\t\t\tsubscriptions = append(subscriptions, subscription)\n\t\t}\n\t}\n\treturn subscriptions\n}\n\nfunc (manager *MemorySubscriptions) FindBroadcastSubscriptions() (subscriptions []*Subscription) {\n\tmanager.mutex.RLock()\n\tdefer manager.mutex.RUnlock()\n\n\tfor _, subscription := range manager.byIdentifier {\n\t\tif subscription.externalId != \"\" {\n\t\t\tsubscriptions = append(subscriptions, subscription)\n\t\t}\n\t}\n\treturn subscriptions\n}\n\nfunc (manager *MemorySubscriptions) FindByExternalId(externalId string) (*Subscription, bool) {\n\tmanager.mutex.RLock()\n\tdefer manager.mutex.RUnlock()\n\n\tfor _, subscription := range manager.byIdentifier {\n\t\tif subscription.ExternalId() == externalId {\n\t\t\treturn subscription, true\n\t\t}\n\t}\n\treturn nil, false\n}\n\nfunc (manager *MemorySubscriptions) FindByRessourceId(id, kind string) []*Subscription {\n\tmanager.mutex.RLock()\n\tdefer manager.mutex.RUnlock()\n\tsubscriptions := []*Subscription{}\n\n\tfor _, subscription := range manager.byIdentifier {\n\t\t_, ok := subscription.resourcesByObjectID[id]\n\t\tif ok && subscription.kind == kind {\n\t\t\tsubscriptions = append(subscriptions, subscription)\n\t\t}\n\t}\n\treturn subscriptions\n}\n\nfunc (manager *MemorySubscriptions) FindOrCreateByKind(kind string) *Subscription {\n\tmaxResource, _ := strconv.Atoi(manager.partner.Setting(\"subscriptions.maximum_resources\"))\n\tif maxResource == 1 {\n\t\treturn manager.New(kind)\n\t}\n\n\tmanager.mutex.RLock()\n\tfor _, subscription := range manager.byIdentifier {\n\t\tif subscription.Kind() == kind && (maxResource < 1 || len(subscription.resourcesByObjectID) < maxResource) {\n\t\t\tmanager.mutex.RUnlock()\n\t\t\treturn subscription\n\t\t}\n\t}\n\tmanager.mutex.RUnlock()\n\n\treturn manager.New(kind)\n}\n\nfunc (manager *MemorySubscriptions) Find(id SubscriptionId) (*Subscription, bool) {\n\tmanager.mutex.RLock()\n\tdefer manager.mutex.RUnlock()\n\n\tsubscription, ok := manager.byIdentifier[id]\n\tif ok {\n\t\treturn subscription, true\n\t} else {\n\t\treturn nil, false\n\t}\n}\n\nfunc (manager *MemorySubscriptions) FindAll() (subscriptions []*Subscription) {\n\tmanager.mutex.RLock()\n\tdefer manager.mutex.RUnlock()\n\n\tif len(manager.byIdentifier) == 0 {\n\t\treturn []*Subscription{}\n\t}\n\tfor _, subscription := range manager.byIdentifier {\n\t\tsubscriptions = append(subscriptions, subscription)\n\t}\n\treturn\n}\n\nfunc (manager *MemorySubscriptions) Save(subscription *Subscription) bool {\n\tmanager.mutex.Lock()\n\tdefer manager.mutex.Unlock()\n\n\tif subscription.Id() == \"\" {\n\t\tgenerator := manager.partner.Generator(\"subscription_identifier\")\n\t\tsubscription.id = SubscriptionId(generator.NewIdentifier(IdentifierAttributes{Id: manager.NewUUID()}))\n\t}\n\n\tsubscription.manager = manager\n\tmanager.byIdentifier[subscription.Id()] = subscription\n\n\treturn true\n}\n\nfunc (manager *MemorySubscriptions) Delete(subscription *Subscription) bool {\n\tmanager.mutex.Lock()\n\tdefer manager.mutex.Unlock()\n\n\tdelete(manager.byIdentifier, subscription.Id())\n\treturn true\n}\n\nfunc (manager *MemorySubscriptions) DeleteById(id SubscriptionId) {\n\tmanager.mutex.Lock()\n\tdefer manager.mutex.Unlock()\n\n\tdelete(manager.byIdentifier, id)\n}\n\nfunc (manager *MemorySubscriptions) CancelSubscriptions() {\n\tmanager.mutex.Lock()\n\tdefer manager.mutex.Unlock()\n\n\tfor id := range manager.byIdentifier {\n\t\tdelete(manager.byIdentifier, id)\n\t}\n}\n\nfunc (manager *MemorySubscriptions) CancelBroadcastSubscriptions() {\n\tmanager.mutex.Lock()\n\tdefer manager.mutex.Unlock()\n\n\tfor id, subscription := range manager.byIdentifier {\n\t\tif subscription.externalId != \"\" {\n\t\t\tdelete(manager.byIdentifier, id)\n\t\t}\n\t}\n}\n\nfunc (manager *MemorySubscriptions) CancelCollectSubscriptions() {\n\tmanager.mutex.Lock()\n\tdefer manager.mutex.Unlock()\n\n\tfor id, subscription := range manager.byIdentifier {\n\t\tif subscription.externalId == \"\" {\n\t\t\tdelete(manager.byIdentifier, id)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/influxdata\/chronograf\"\n)\n\ntype dashboardLinks struct {\n\tSelf string `json:\"self\"` \/\/ Self link mapping to this resource\n}\n\ntype dashboardResponse struct {\n\tchronograf.Dashboard\n\tLinks dashboardLinks `json:\"links\"`\n}\n\ntype getDashboardsResponse struct {\n\tDashboards []dashboardResponse `json:\"dashboards\"`\n}\n\nfunc newDashboardResponse(d chronograf.Dashboard) dashboardResponse {\n\tbase := \"\/chronograf\/v1\/dashboards\"\n\treturn dashboardResponse{\n\t\tDashboard: d,\n\t\tLinks: dashboardLinks{\n\t\t\tSelf: fmt.Sprintf(\"%s\/%d\", base, d.ID),\n\t\t},\n\t}\n}\n\n\/\/ Dashboards returns all dashboards within the store\nfunc (s *Service) Dashboards(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tdashboards, err := s.DashboardsStore.All(ctx)\n\tif err != nil {\n\t\tError(w, http.StatusInternalServerError, \"Error loading layouts\", s.Logger)\n\t\treturn\n\t}\n\n\tres := getDashboardsResponse{\n\t\tDashboards: []dashboardResponse{},\n\t}\n\n\tfor _, dashboard := range dashboards {\n\t\tres.Dashboards = append(res.Dashboards, newDashboardResponse(dashboard))\n\t}\n\n\tencodeJSON(w, http.StatusOK, res, s.Logger)\n}\n\n\/\/ DashboardID returns a single specified dashboard\nfunc (s *Service) DashboardID(w http.ResponseWriter, r *http.Request) {\n\tid, err := paramID(\"id\", r)\n\tif err != nil {\n\t\tError(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)\n\t\treturn\n\t}\n\n\tctx := r.Context()\n\te, err := s.DashboardsStore.Get(ctx, chronograf.DashboardID(id))\n\tif err != nil {\n\t\tnotFound(w, id, s.Logger)\n\t\treturn\n\t}\n\n\tres := newDashboardResponse(*e)\n\tencodeJSON(w, http.StatusOK, res, s.Logger)\n}\n\n\/\/ NewDashboard creates and returns a new dashboard object\nfunc (s *Service) NewDashboard(w http.ResponseWriter, r *http.Request) {\n\tvar dashboard *chronograf.Dashboard\n\tif err := json.NewDecoder(r.Body).Decode(&dashboard); err != nil {\n\t\tinvalidJSON(w, s.Logger)\n\t\treturn\n\t}\n\n\tvar err error\n\tif dashboard, err = s.DashboardsStore.Add(r.Context(), dashboard); err != nil {\n\t\tmsg := fmt.Errorf(\"Error storing layout %v: %v\", dashboard, err)\n\t\tunknownErrorWithMessage(w, msg, s.Logger)\n\t\treturn\n\t}\n\n\tres := newDashboardResponse(*dashboard)\n\tw.Header().Add(\"Location\", res.Links.Self)\n\tencodeJSON(w, http.StatusCreated, res, s.Logger)\n}\n\n\/\/ RemoveDashboard deletes a dashboard\nfunc (s *Service) RemoveDashboard(w http.ResponseWriter, r *http.Request) {\n\tid, err := paramID(\"id\", r)\n\tif err != nil {\n\t\tError(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)\n\t\treturn\n\t}\n\n\tctx := r.Context()\n\te, err := s.DashboardsStore.Get(ctx, chronograf.DashboardID(id))\n\tif err != nil {\n\t\tnotFound(w, id, s.Logger)\n\t\treturn\n\t}\n\n\tif err := s.DashboardsStore.Delete(ctx, e); err != nil {\n\t\tunknownErrorWithMessage(w, err, s.Logger)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusNoContent)\n}\n\n\/\/ UpdateDashboard updates a dashboard\nfunc (s *Service) UpdateDashboard(w http.ResponseWriter, r *http.Request) {\n\n}\n<commit_msg>add update function for dashboards api<commit_after>package server\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/bouk\/httprouter\"\n\t\"github.com\/influxdata\/chronograf\"\n)\n\ntype dashboardLinks struct {\n\tSelf string `json:\"self\"` \/\/ Self link mapping to this resource\n}\n\ntype dashboardResponse struct {\n\tchronograf.Dashboard\n\tLinks dashboardLinks `json:\"links\"`\n}\n\ntype getDashboardsResponse struct {\n\tDashboards []dashboardResponse `json:\"dashboards\"`\n}\n\nfunc newDashboardResponse(d chronograf.Dashboard) dashboardResponse {\n\tbase := \"\/chronograf\/v1\/dashboards\"\n\treturn dashboardResponse{\n\t\tDashboard: d,\n\t\tLinks: dashboardLinks{\n\t\t\tSelf: fmt.Sprintf(\"%s\/%d\", base, d.ID),\n\t\t},\n\t}\n}\n\n\/\/ Dashboards returns all dashboards within the store\nfunc (s *Service) Dashboards(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tdashboards, err := s.DashboardsStore.All(ctx)\n\tif err != nil {\n\t\tError(w, http.StatusInternalServerError, \"Error loading layouts\", s.Logger)\n\t\treturn\n\t}\n\n\tres := getDashboardsResponse{\n\t\tDashboards: []dashboardResponse{},\n\t}\n\n\tfor _, dashboard := range dashboards {\n\t\tres.Dashboards = append(res.Dashboards, newDashboardResponse(dashboard))\n\t}\n\n\tencodeJSON(w, http.StatusOK, res, s.Logger)\n}\n\n\/\/ DashboardID returns a single specified dashboard\nfunc (s *Service) DashboardID(w http.ResponseWriter, r *http.Request) {\n\tid, err := paramID(\"id\", r)\n\tif err != nil {\n\t\tError(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)\n\t\treturn\n\t}\n\n\tctx := r.Context()\n\te, err := s.DashboardsStore.Get(ctx, chronograf.DashboardID(id))\n\tif err != nil {\n\t\tnotFound(w, id, s.Logger)\n\t\treturn\n\t}\n\n\tres := newDashboardResponse(*e)\n\tencodeJSON(w, http.StatusOK, res, s.Logger)\n}\n\n\/\/ NewDashboard creates and returns a new dashboard object\nfunc (s *Service) NewDashboard(w http.ResponseWriter, r *http.Request) {\n\tvar dashboard *chronograf.Dashboard\n\tif err := json.NewDecoder(r.Body).Decode(&dashboard); err != nil {\n\t\tinvalidJSON(w, s.Logger)\n\t\treturn\n\t}\n\n\tvar err error\n\tif dashboard, err = s.DashboardsStore.Add(r.Context(), dashboard); err != nil {\n\t\tmsg := fmt.Errorf(\"Error storing layout %v: %v\", dashboard, err)\n\t\tunknownErrorWithMessage(w, msg, s.Logger)\n\t\treturn\n\t}\n\n\tres := newDashboardResponse(*dashboard)\n\tw.Header().Add(\"Location\", res.Links.Self)\n\tencodeJSON(w, http.StatusCreated, res, s.Logger)\n}\n\n\/\/ RemoveDashboard deletes a dashboard\nfunc (s *Service) RemoveDashboard(w http.ResponseWriter, r *http.Request) {\n\tid, err := paramID(\"id\", r)\n\tif err != nil {\n\t\tError(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)\n\t\treturn\n\t}\n\n\tctx := r.Context()\n\te, err := s.DashboardsStore.Get(ctx, chronograf.DashboardID(id))\n\tif err != nil {\n\t\tnotFound(w, id, s.Logger)\n\t\treturn\n\t}\n\n\tif err := s.DashboardsStore.Delete(ctx, e); err != nil {\n\t\tunknownErrorWithMessage(w, err, s.Logger)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusNoContent)\n}\n\n\/\/ UpdateDashboard replaces a dashboard\nfunc (s *Service) UpdateDashboard(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tidParam, err := strconv.Atoi(httprouter.GetParamFromContext(ctx, \"id\"))\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Could not parse dashboard ID: %s\", err)\n\t\tError(w, http.StatusInternalServerError, msg, s.Logger)\n\t}\n\tid := chronograf.DashboardID(idParam)\n\n\t_, err = s.DashboardsStore.Get(ctx, id)\n\tif err != nil {\n\t\tError(w, http.StatusNotFound, fmt.Sprintf(\"ID %s not found\", id), s.Logger)\n\t\treturn\n\t}\n\n\tvar req *chronograf.Dashboard\n\tif err := json.NewDecoder(r.Body).Decode(req); err != nil {\n\t\tinvalidJSON(w, s.Logger)\n\t\treturn\n\t}\n\treq.ID = id\n\n\tif err := s.DashboardsStore.Update(ctx, req); err != nil {\n\t\tmsg := fmt.Sprintf(\"Error updating dashboard ID %s: %v\", id, err)\n\t\tError(w, http.StatusInternalServerError, msg, s.Logger)\n\t\treturn\n\t}\n\n\tres := newDashboardResponse(*req)\n\tencodeJSON(w, http.StatusOK, res, s.Logger)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nfunc init() {\n\tRegisterExporter(\"queue\", newExporterQueue)\n}\n\nvar (\n\tqueueLabels = []string{\"cluster\", \"vhost\", \"queue\", \"durable\", \"policy\", \"self\"}\n\tqueueLabelKeys = []string{\"vhost\", \"name\", \"durable\", \"policy\", \"state\", \"node\", \"idle_since\"}\n\n\tqueueGaugeVec = map[string]*prometheus.GaugeVec{\n\t\t\"messages_ready\": newGaugeVec(\"queue_messages_ready\", \"Number of messages ready to be delivered to clients.\", queueLabels),\n\t\t\"messages_unacknowledged\": newGaugeVec(\"queue_messages_unacknowledged\", \"Number of messages delivered to clients but not yet acknowledged.\", queueLabels),\n\t\t\"messages\": newGaugeVec(\"queue_messages\", \"Sum of ready and unacknowledged messages (queue depth).\", queueLabels),\n\t\t\"messages_ready_ram\": newGaugeVec(\"queue_messages_ready_ram\", \"Number of messages from messages_ready which are resident in ram.\", queueLabels),\n\t\t\"messages_unacknowledged_ram\": newGaugeVec(\"queue_messages_unacknowledged_ram\", \"Number of messages from messages_unacknowledged which are resident in ram.\", queueLabels),\n\t\t\"messages_ram\": newGaugeVec(\"queue_messages_ram\", \"Total number of messages which are resident in ram.\", queueLabels),\n\t\t\"messages_persistent\": newGaugeVec(\"queue_messages_persistent\", \"Total number of persistent messages in the queue (will always be 0 for transient queues).\", queueLabels),\n\t\t\"message_bytes\": newGaugeVec(\"queue_message_bytes\", \"Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead.\", queueLabels),\n\t\t\"message_bytes_ready\": newGaugeVec(\"queue_message_bytes_ready\", \"Like message_bytes but counting only those messages ready to be delivered to clients.\", queueLabels),\n\t\t\"message_bytes_unacknowledged\": newGaugeVec(\"queue_message_bytes_unacknowledged\", \"Like message_bytes but counting only those messages delivered to clients but not yet acknowledged.\", queueLabels),\n\t\t\"message_bytes_ram\": newGaugeVec(\"queue_message_bytes_ram\", \"Like message_bytes but counting only those messages which are in RAM.\", queueLabels),\n\t\t\"message_bytes_persistent\": newGaugeVec(\"queue_message_bytes_persistent\", \"Like message_bytes but counting only those messages which are persistent.\", queueLabels),\n\t\t\"consumers\": newGaugeVec(\"queue_consumers\", \"Number of consumers.\", queueLabels),\n\t\t\"consumer_utilisation\": newGaugeVec(\"queue_consumer_utilisation\", \"Fraction of the time (between 0.0 and 1.0) that the queue is able to immediately deliver messages to consumers. This can be less than 1.0 if consumers are limited by network congestion or prefetch count.\", queueLabels),\n\t\t\"memory\": newGaugeVec(\"queue_memory\", \"Bytes of memory consumed by the Erlang process associated with the queue, including stack, heap and internal structures.\", queueLabels),\n\t\t\"head_message_timestamp\": newGaugeVec(\"queue_head_message_timestamp\", \"The timestamp property of the first message in the queue, if present. Timestamps of messages only appear when they are in the paged-in state.\", queueLabels), \/\/https:\/\/github.com\/rabbitmq\/rabbitmq-server\/pull\/54\n\t\t\"garbage_collection.min_heap_size\": newGaugeVec(\"queue_gc_min_heap\", \"Minimum heap size in words\", queueLabels),\n\t\t\"garbage_collection.min_bin_vheap_size\": newGaugeVec(\"queue_gc_min_vheap\", \"Minimum binary virtual heap size in words\", queueLabels),\n\t\t\"garbage_collection.fullsweep_after\": newGaugeVec(\"queue_gc_collections_before_fullsweep\", \"Maximum generational collections before fullsweep\", queueLabels),\n\t\t\"slave_nodes_len\": newGaugeVec(\"queue_slaves_nodes_len\", \"Number of slave nodes attached to the queue\", queueLabels),\n\t\t\"synchronised_slave_nodes_len\": newGaugeVec(\"queue_synchronised_slave_nodes_len\", \"Number of slave nodes in sync to the queue\", queueLabels),\n\t\t\"message_stats.publish_details.rate\": newGaugeVec(\"queue_messages_publish_rate\", \"Rate at which messages are entering the server.\", queueLabels),\n\t\t\"message_stats.deliver_no_ack_details.rate\": newGaugeVec(\"queue_messages_deliver_no_ack_rate\", \"Rate at which messages are delivered to consumers that use automatic acknowledgements.\", queueLabels),\n\t\t\"message_stats.deliver_details.rate\": newGaugeVec(\"queue_messages_deliver_rate\", \"Rate at which messages are delivered to consumers that use manual acknowledgements.\", queueLabels),\n\t}\n\tlimitsGaugeVec = map[string]*prometheus.GaugeVec{\n\t\t\"max-length-bytes\": newGaugeVec(\"queue_max_length_bytes\", \"Total body size for ready messages a queue can contain before it starts to drop them from its head.\", queueLabels),\n\t\t\"max-length\": newGaugeVec(\"queue_max_length\", \"How many (ready) messages a queue can contain before it starts to drop them from its head.\", queueLabels),\n\t}\n\n\tqueueCounterVec = map[string]*prometheus.Desc{\n\t\t\"disk_reads\": newDesc(\"queue_disk_reads_total\", \"Total number of times messages have been read from disk by this queue since it started.\", queueLabels),\n\t\t\"disk_writes\": newDesc(\"queue_disk_writes_total\", \"Total number of times messages have been written to disk by this queue since it started.\", queueLabels),\n\t\t\"message_stats.publish\": newDesc(\"queue_messages_published_total\", \"Count of messages published.\", queueLabels),\n\t\t\"message_stats.confirm\": newDesc(\"queue_messages_confirmed_total\", \"Count of messages confirmed. \", queueLabels),\n\t\t\"message_stats.deliver\": newDesc(\"queue_messages_delivered_total\", \"Count of messages delivered in acknowledgement mode to consumers.\", queueLabels),\n\t\t\"message_stats.deliver_no_ack\": newDesc(\"queue_messages_delivered_noack_total\", \"Count of messages delivered in no-acknowledgement mode to consumers. \", queueLabels),\n\t\t\"message_stats.get\": newDesc(\"queue_messages_get_total\", \"Count of messages delivered in acknowledgement mode in response to basic.get.\", queueLabels),\n\t\t\"message_stats.get_no_ack\": newDesc(\"queue_messages_get_noack_total\", \"Count of messages delivered in no-acknowledgement mode in response to basic.get.\", queueLabels),\n\t\t\"message_stats.redeliver\": newDesc(\"queue_messages_redelivered_total\", \"Count of subset of messages in deliver_get which had the redelivered flag set.\", queueLabels),\n\t\t\"message_stats.return\": newDesc(\"queue_messages_returned_total\", \"Count of messages returned to publisher as unroutable.\", queueLabels),\n\t\t\"message_stats.ack\": newDesc(\"queue_messages_ack_total\", \"Count of messages delivered in acknowledgement mode in response to basic.get.\", queueLabels),\n\t\t\"reductions\": newDesc(\"queue_reductions_total\", \"Count of reductions which take place on this process. .\", queueLabels),\n\t\t\"garbage_collection.minor_gcs\": newDesc(\"queue_gc_minor_collections_total\", \"Number of minor GCs\", queueLabels),\n\t}\n)\n\ntype exporterQueue struct {\n\tlimitsGauge map[string]*prometheus.GaugeVec\n\tqueueMetricsGauge map[string]*prometheus.GaugeVec\n\tqueueMetricsCounter map[string]*prometheus.Desc\n\tstateMetric *prometheus.GaugeVec\n\tidleSinceMetric *prometheus.GaugeVec\n}\n\nfunc newExporterQueue() Exporter {\n\tqueueGaugeVecActual := queueGaugeVec\n\tqueueCounterVecActual := queueCounterVec\n\tlitmitsGaugeVecActual := limitsGaugeVec\n\n\tif len(config.ExcludeMetrics) > 0 {\n\t\tfor _, metric := range config.ExcludeMetrics {\n\t\t\tif queueGaugeVecActual[metric] != nil {\n\t\t\t\tdelete(queueGaugeVecActual, metric)\n\t\t\t}\n\t\t\tif queueCounterVecActual[metric] != nil {\n\t\t\t\tdelete(queueCounterVecActual, metric)\n\t\t\t}\n\t\t\tif litmitsGaugeVecActual[metric] != nil {\n\t\t\t\tdelete(litmitsGaugeVecActual, metric)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn exporterQueue{\n\t\tlimitsGauge: litmitsGaugeVecActual,\n\t\tqueueMetricsGauge: queueGaugeVecActual,\n\t\tqueueMetricsCounter: queueCounterVecActual,\n\t\tstateMetric: newGaugeVec(\"queue_state\", \"A metric with a value of constant '1' if the queue is in a certain state\", append(queueLabels, \"state\")),\n\t\tidleSinceMetric: newGaugeVec(\"queue_idle_since_seconds\", \"starttime where the queue switched to idle state; in seconds since epoch (1970).\", queueLabels),\n\t}\n}\n\nfunc collectLowerMetric(metricA, metricB string, stats StatsInfo) float64 {\n\tmA, okA := stats.metrics[metricA]\n\tmB, okB := stats.metrics[metricB]\n\n\tif okA && okB {\n\t\tif mA < mB {\n\t\t\treturn mA\n\t\t} else {\n\t\t\treturn mB\n\t\t}\n\t}\n\tif okA {\n\t\treturn mA\n\t}\n\tif okB {\n\t\treturn mB\n\t}\n\treturn -1.0\n}\n\nfunc (e exporterQueue) Collect(ctx context.Context, ch chan<- prometheus.Metric) error {\n\tfor _, gaugevec := range e.queueMetricsGauge {\n\t\tgaugevec.Reset()\n\t}\n\tfor _, m := range e.limitsGauge {\n\t\tm.Reset()\n\t}\n\te.stateMetric.Reset()\n\te.idleSinceMetric.Reset()\n\n\tif config.MaxQueues > 0 {\n\t\t\/\/ Get overview info to check total queues\n\t\ttotalQueues, ok := ctx.Value(totalQueues).(int)\n\t\tif !ok {\n\t\t\treturn errors.New(\"total Queue counter missing\")\n\t\t}\n\n\t\tif totalQueues > config.MaxQueues {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"MaxQueues\": config.MaxQueues,\n\t\t\t\t\"TotalQueues\": totalQueues,\n\t\t\t}).Debug(\"MaxQueues exceeded.\")\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tselfNode := \"\"\n\tif n, ok := ctx.Value(nodeName).(string); ok {\n\t\tselfNode = n\n\t}\n\tcluster := \"\"\n\tif n, ok := ctx.Value(clusterName).(string); ok {\n\t\tcluster = n\n\t}\n\n\trabbitMqQueueData, err := getStatsInfo(config, \"queues\", queueLabelKeys)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.WithField(\"queueData\", rabbitMqQueueData).Debug(\"Queue data\")\n\tfor _, queue := range rabbitMqQueueData {\n\t\tqname := queue.labels[\"name\"]\n\t\tvname := queue.labels[\"vhost\"]\n\t\tif vhostIncluded := config.IncludeVHost.MatchString(vname); !vhostIncluded {\n\t\t\tcontinue\n\t\t}\n\t\tif skipVhost := config.SkipVHost.MatchString(vname); skipVhost {\n\t\t\tcontinue\n\t\t}\n\t\tif queueIncluded := config.IncludeQueues.MatchString(qname); !queueIncluded {\n\t\t\tcontinue\n\t\t}\n\t\tif queueSkipped := config.SkipQueues.MatchString(qname); queueSkipped {\n\t\t\tcontinue\n\t\t}\n\n\t\tself := \"0\"\n\t\tif queue.labels[\"node\"] == selfNode {\n\t\t\tself = \"1\"\n\t\t}\n\t\tlabelValues := []string{cluster, queue.labels[\"vhost\"], queue.labels[\"name\"], queue.labels[\"durable\"], queue.labels[\"policy\"], self}\n\n\t\tfor key, gaugevec := range e.queueMetricsGauge {\n\t\t\tif value, ok := queue.metrics[key]; ok {\n\t\t\t\t\/\/ log.WithFields(log.Fields{\"vhost\": queue.labels[\"vhost\"], \"queue\": queue.labels[\"name\"], \"key\": key, \"value\": value}).Info(\"Set queue metric for key\")\n\t\t\t\tgaugevec.WithLabelValues(labelValues...).Set(value)\n\t\t\t}\n\t\t}\n\n\t\tfor key, countvec := range e.queueMetricsCounter {\n\t\t\tif value, ok := queue.metrics[key]; ok {\n\t\t\t\tch <- prometheus.MustNewConstMetric(countvec, prometheus.CounterValue, value, labelValues...)\n\t\t\t} else {\n\t\t\t\tch <- prometheus.MustNewConstMetric(countvec, prometheus.CounterValue, 0, labelValues...)\n\t\t\t}\n\t\t}\n\n\t\tstate := queue.labels[\"state\"]\n\t\tidleSince, exists := queue.labels[\"idle_since\"]\n\t\tif exists && idleSince != \"\" {\n\t\t\tif t, err := time.Parse(\"2006-01-02 15:04:05\", idleSince); err == nil {\n\t\t\t\tunixSeconds := float64(t.UnixNano()) \/ 1e9\n\n\t\t\t\tif state == \"running\" { \/\/replace running state with idle if idle_since time is provided. Other states (flow, etc.) are not replaced\n\t\t\t\t\tstate = \"idle\"\n\t\t\t\t}\n\t\t\t\te.idleSinceMetric.WithLabelValues(labelValues...).Set(unixSeconds)\n\t\t\t} else {\n\t\t\t\tlog.WithError(err).WithField(\"idle_since\", idleSince).Warn(\"error parsing idle since time\")\n\t\t\t}\n\t\t}\n\t\te.stateMetric.WithLabelValues(append(labelValues, state)...).Set(1)\n\n\t\tif f := collectLowerMetric(\"arguments.x-max-length\", \"effective_policy_definition.max-length\", queue); f >= 0 {\n\t\t\tlimitsGaugeVec[\"max-length\"].WithLabelValues(labelValues...).Set(f)\n\t\t}\n\t\tif f := collectLowerMetric(\"arguments.x-max-length-bytes\", \"effective_policy_definition.max-length-bytes\", queue); f >= 0 {\n\t\t\tlimitsGaugeVec[\"max-length-bytes\"].WithLabelValues(labelValues...).Set(f)\n\t\t}\n\n\t}\n\n\tfor _, metric := range e.limitsGauge {\n\t\tmetric.Collect(ch)\n\t}\n\tfor _, gaugevec := range e.queueMetricsGauge {\n\t\tgaugevec.Collect(ch)\n\t}\n\te.stateMetric.Collect(ch)\n\te.idleSinceMetric.Collect(ch)\n\n\treturn nil\n}\n\nfunc (e exporterQueue) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, metric := range e.limitsGauge {\n\t\tmetric.Describe(ch)\n\t}\n\tfor _, gaugevec := range e.queueMetricsGauge {\n\t\tgaugevec.Describe(ch)\n\t}\n\te.stateMetric.Describe(ch)\n\te.idleSinceMetric.Describe(ch)\n\tfor _, countervec := range e.queueMetricsCounter {\n\t\tch <- countervec\n\t}\n}\n<commit_msg>Fix: panic when max-length metrics are excluded<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nfunc init() {\n\tRegisterExporter(\"queue\", newExporterQueue)\n}\n\nvar (\n\tqueueLabels = []string{\"cluster\", \"vhost\", \"queue\", \"durable\", \"policy\", \"self\"}\n\tqueueLabelKeys = []string{\"vhost\", \"name\", \"durable\", \"policy\", \"state\", \"node\", \"idle_since\"}\n\n\tqueueGaugeVec = map[string]*prometheus.GaugeVec{\n\t\t\"messages_ready\": newGaugeVec(\"queue_messages_ready\", \"Number of messages ready to be delivered to clients.\", queueLabels),\n\t\t\"messages_unacknowledged\": newGaugeVec(\"queue_messages_unacknowledged\", \"Number of messages delivered to clients but not yet acknowledged.\", queueLabels),\n\t\t\"messages\": newGaugeVec(\"queue_messages\", \"Sum of ready and unacknowledged messages (queue depth).\", queueLabels),\n\t\t\"messages_ready_ram\": newGaugeVec(\"queue_messages_ready_ram\", \"Number of messages from messages_ready which are resident in ram.\", queueLabels),\n\t\t\"messages_unacknowledged_ram\": newGaugeVec(\"queue_messages_unacknowledged_ram\", \"Number of messages from messages_unacknowledged which are resident in ram.\", queueLabels),\n\t\t\"messages_ram\": newGaugeVec(\"queue_messages_ram\", \"Total number of messages which are resident in ram.\", queueLabels),\n\t\t\"messages_persistent\": newGaugeVec(\"queue_messages_persistent\", \"Total number of persistent messages in the queue (will always be 0 for transient queues).\", queueLabels),\n\t\t\"message_bytes\": newGaugeVec(\"queue_message_bytes\", \"Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead.\", queueLabels),\n\t\t\"message_bytes_ready\": newGaugeVec(\"queue_message_bytes_ready\", \"Like message_bytes but counting only those messages ready to be delivered to clients.\", queueLabels),\n\t\t\"message_bytes_unacknowledged\": newGaugeVec(\"queue_message_bytes_unacknowledged\", \"Like message_bytes but counting only those messages delivered to clients but not yet acknowledged.\", queueLabels),\n\t\t\"message_bytes_ram\": newGaugeVec(\"queue_message_bytes_ram\", \"Like message_bytes but counting only those messages which are in RAM.\", queueLabels),\n\t\t\"message_bytes_persistent\": newGaugeVec(\"queue_message_bytes_persistent\", \"Like message_bytes but counting only those messages which are persistent.\", queueLabels),\n\t\t\"consumers\": newGaugeVec(\"queue_consumers\", \"Number of consumers.\", queueLabels),\n\t\t\"consumer_utilisation\": newGaugeVec(\"queue_consumer_utilisation\", \"Fraction of the time (between 0.0 and 1.0) that the queue is able to immediately deliver messages to consumers. This can be less than 1.0 if consumers are limited by network congestion or prefetch count.\", queueLabels),\n\t\t\"memory\": newGaugeVec(\"queue_memory\", \"Bytes of memory consumed by the Erlang process associated with the queue, including stack, heap and internal structures.\", queueLabels),\n\t\t\"head_message_timestamp\": newGaugeVec(\"queue_head_message_timestamp\", \"The timestamp property of the first message in the queue, if present. Timestamps of messages only appear when they are in the paged-in state.\", queueLabels), \/\/https:\/\/github.com\/rabbitmq\/rabbitmq-server\/pull\/54\n\t\t\"garbage_collection.min_heap_size\": newGaugeVec(\"queue_gc_min_heap\", \"Minimum heap size in words\", queueLabels),\n\t\t\"garbage_collection.min_bin_vheap_size\": newGaugeVec(\"queue_gc_min_vheap\", \"Minimum binary virtual heap size in words\", queueLabels),\n\t\t\"garbage_collection.fullsweep_after\": newGaugeVec(\"queue_gc_collections_before_fullsweep\", \"Maximum generational collections before fullsweep\", queueLabels),\n\t\t\"slave_nodes_len\": newGaugeVec(\"queue_slaves_nodes_len\", \"Number of slave nodes attached to the queue\", queueLabels),\n\t\t\"synchronised_slave_nodes_len\": newGaugeVec(\"queue_synchronised_slave_nodes_len\", \"Number of slave nodes in sync to the queue\", queueLabels),\n\t\t\"message_stats.publish_details.rate\": newGaugeVec(\"queue_messages_publish_rate\", \"Rate at which messages are entering the server.\", queueLabels),\n\t\t\"message_stats.deliver_no_ack_details.rate\": newGaugeVec(\"queue_messages_deliver_no_ack_rate\", \"Rate at which messages are delivered to consumers that use automatic acknowledgements.\", queueLabels),\n\t\t\"message_stats.deliver_details.rate\": newGaugeVec(\"queue_messages_deliver_rate\", \"Rate at which messages are delivered to consumers that use manual acknowledgements.\", queueLabels),\n\t}\n\tlimitsGaugeVec = map[string]*prometheus.GaugeVec{\n\t\t\"max-length-bytes\": newGaugeVec(\"queue_max_length_bytes\", \"Total body size for ready messages a queue can contain before it starts to drop them from its head.\", queueLabels),\n\t\t\"max-length\": newGaugeVec(\"queue_max_length\", \"How many (ready) messages a queue can contain before it starts to drop them from its head.\", queueLabels),\n\t}\n\n\tqueueCounterVec = map[string]*prometheus.Desc{\n\t\t\"disk_reads\": newDesc(\"queue_disk_reads_total\", \"Total number of times messages have been read from disk by this queue since it started.\", queueLabels),\n\t\t\"disk_writes\": newDesc(\"queue_disk_writes_total\", \"Total number of times messages have been written to disk by this queue since it started.\", queueLabels),\n\t\t\"message_stats.publish\": newDesc(\"queue_messages_published_total\", \"Count of messages published.\", queueLabels),\n\t\t\"message_stats.confirm\": newDesc(\"queue_messages_confirmed_total\", \"Count of messages confirmed. \", queueLabels),\n\t\t\"message_stats.deliver\": newDesc(\"queue_messages_delivered_total\", \"Count of messages delivered in acknowledgement mode to consumers.\", queueLabels),\n\t\t\"message_stats.deliver_no_ack\": newDesc(\"queue_messages_delivered_noack_total\", \"Count of messages delivered in no-acknowledgement mode to consumers. \", queueLabels),\n\t\t\"message_stats.get\": newDesc(\"queue_messages_get_total\", \"Count of messages delivered in acknowledgement mode in response to basic.get.\", queueLabels),\n\t\t\"message_stats.get_no_ack\": newDesc(\"queue_messages_get_noack_total\", \"Count of messages delivered in no-acknowledgement mode in response to basic.get.\", queueLabels),\n\t\t\"message_stats.redeliver\": newDesc(\"queue_messages_redelivered_total\", \"Count of subset of messages in deliver_get which had the redelivered flag set.\", queueLabels),\n\t\t\"message_stats.return\": newDesc(\"queue_messages_returned_total\", \"Count of messages returned to publisher as unroutable.\", queueLabels),\n\t\t\"message_stats.ack\": newDesc(\"queue_messages_ack_total\", \"Count of messages delivered in acknowledgement mode in response to basic.get.\", queueLabels),\n\t\t\"reductions\": newDesc(\"queue_reductions_total\", \"Count of reductions which take place on this process. .\", queueLabels),\n\t\t\"garbage_collection.minor_gcs\": newDesc(\"queue_gc_minor_collections_total\", \"Number of minor GCs\", queueLabels),\n\t}\n)\n\ntype exporterQueue struct {\n\tlimitsGauge map[string]*prometheus.GaugeVec\n\tqueueMetricsGauge map[string]*prometheus.GaugeVec\n\tqueueMetricsCounter map[string]*prometheus.Desc\n\tstateMetric *prometheus.GaugeVec\n\tidleSinceMetric *prometheus.GaugeVec\n}\n\nfunc newExporterQueue() Exporter {\n\tqueueGaugeVecActual := queueGaugeVec\n\tqueueCounterVecActual := queueCounterVec\n\tlitmitsGaugeVecActual := limitsGaugeVec\n\n\tif len(config.ExcludeMetrics) > 0 {\n\t\tfor _, metric := range config.ExcludeMetrics {\n\t\t\tif queueGaugeVecActual[metric] != nil {\n\t\t\t\tdelete(queueGaugeVecActual, metric)\n\t\t\t}\n\t\t\tif queueCounterVecActual[metric] != nil {\n\t\t\t\tdelete(queueCounterVecActual, metric)\n\t\t\t}\n\t\t\tif litmitsGaugeVecActual[metric] != nil {\n\t\t\t\tdelete(litmitsGaugeVecActual, metric)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn exporterQueue{\n\t\tlimitsGauge: litmitsGaugeVecActual,\n\t\tqueueMetricsGauge: queueGaugeVecActual,\n\t\tqueueMetricsCounter: queueCounterVecActual,\n\t\tstateMetric: newGaugeVec(\"queue_state\", \"A metric with a value of constant '1' if the queue is in a certain state\", append(queueLabels, \"state\")),\n\t\tidleSinceMetric: newGaugeVec(\"queue_idle_since_seconds\", \"starttime where the queue switched to idle state; in seconds since epoch (1970).\", queueLabels),\n\t}\n}\n\nfunc collectLowerMetric(metricA, metricB string, stats StatsInfo) float64 {\n\tmA, okA := stats.metrics[metricA]\n\tmB, okB := stats.metrics[metricB]\n\n\tif okA && okB {\n\t\tif mA < mB {\n\t\t\treturn mA\n\t\t} else {\n\t\t\treturn mB\n\t\t}\n\t}\n\tif okA {\n\t\treturn mA\n\t}\n\tif okB {\n\t\treturn mB\n\t}\n\treturn -1.0\n}\n\nfunc (e exporterQueue) Collect(ctx context.Context, ch chan<- prometheus.Metric) error {\n\tfor _, gaugevec := range e.queueMetricsGauge {\n\t\tgaugevec.Reset()\n\t}\n\tfor _, m := range e.limitsGauge {\n\t\tm.Reset()\n\t}\n\te.stateMetric.Reset()\n\te.idleSinceMetric.Reset()\n\n\tif config.MaxQueues > 0 {\n\t\t\/\/ Get overview info to check total queues\n\t\ttotalQueues, ok := ctx.Value(totalQueues).(int)\n\t\tif !ok {\n\t\t\treturn errors.New(\"total Queue counter missing\")\n\t\t}\n\n\t\tif totalQueues > config.MaxQueues {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"MaxQueues\": config.MaxQueues,\n\t\t\t\t\"TotalQueues\": totalQueues,\n\t\t\t}).Debug(\"MaxQueues exceeded.\")\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tselfNode := \"\"\n\tif n, ok := ctx.Value(nodeName).(string); ok {\n\t\tselfNode = n\n\t}\n\tcluster := \"\"\n\tif n, ok := ctx.Value(clusterName).(string); ok {\n\t\tcluster = n\n\t}\n\n\trabbitMqQueueData, err := getStatsInfo(config, \"queues\", queueLabelKeys)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.WithField(\"queueData\", rabbitMqQueueData).Debug(\"Queue data\")\n\tfor _, queue := range rabbitMqQueueData {\n\t\tqname := queue.labels[\"name\"]\n\t\tvname := queue.labels[\"vhost\"]\n\t\tif vhostIncluded := config.IncludeVHost.MatchString(vname); !vhostIncluded {\n\t\t\tcontinue\n\t\t}\n\t\tif skipVhost := config.SkipVHost.MatchString(vname); skipVhost {\n\t\t\tcontinue\n\t\t}\n\t\tif queueIncluded := config.IncludeQueues.MatchString(qname); !queueIncluded {\n\t\t\tcontinue\n\t\t}\n\t\tif queueSkipped := config.SkipQueues.MatchString(qname); queueSkipped {\n\t\t\tcontinue\n\t\t}\n\n\t\tself := \"0\"\n\t\tif queue.labels[\"node\"] == selfNode {\n\t\t\tself = \"1\"\n\t\t}\n\t\tlabelValues := []string{cluster, queue.labels[\"vhost\"], queue.labels[\"name\"], queue.labels[\"durable\"], queue.labels[\"policy\"], self}\n\n\t\tfor key, gaugevec := range e.queueMetricsGauge {\n\t\t\tif value, ok := queue.metrics[key]; ok {\n\t\t\t\t\/\/ log.WithFields(log.Fields{\"vhost\": queue.labels[\"vhost\"], \"queue\": queue.labels[\"name\"], \"key\": key, \"value\": value}).Info(\"Set queue metric for key\")\n\t\t\t\tgaugevec.WithLabelValues(labelValues...).Set(value)\n\t\t\t}\n\t\t}\n\n\t\tfor key, countvec := range e.queueMetricsCounter {\n\t\t\tif value, ok := queue.metrics[key]; ok {\n\t\t\t\tch <- prometheus.MustNewConstMetric(countvec, prometheus.CounterValue, value, labelValues...)\n\t\t\t} else {\n\t\t\t\tch <- prometheus.MustNewConstMetric(countvec, prometheus.CounterValue, 0, labelValues...)\n\t\t\t}\n\t\t}\n\n\t\tstate := queue.labels[\"state\"]\n\t\tidleSince, exists := queue.labels[\"idle_since\"]\n\t\tif exists && idleSince != \"\" {\n\t\t\tif t, err := time.Parse(\"2006-01-02 15:04:05\", idleSince); err == nil {\n\t\t\t\tunixSeconds := float64(t.UnixNano()) \/ 1e9\n\n\t\t\t\tif state == \"running\" { \/\/replace running state with idle if idle_since time is provided. Other states (flow, etc.) are not replaced\n\t\t\t\t\tstate = \"idle\"\n\t\t\t\t}\n\t\t\t\te.idleSinceMetric.WithLabelValues(labelValues...).Set(unixSeconds)\n\t\t\t} else {\n\t\t\t\tlog.WithError(err).WithField(\"idle_since\", idleSince).Warn(\"error parsing idle since time\")\n\t\t\t}\n\t\t}\n\t\te.stateMetric.WithLabelValues(append(labelValues, state)...).Set(1)\n\n\t\tif _, ok := limitsGaugeVec[\"max-length\"]; ok {\n\t\t\tif f := collectLowerMetric(\"arguments.x-max-length\", \"effective_policy_definition.max-length\", queue); f >= 0 {\n\t\t\t\tlimitsGaugeVec[\"max-length\"].WithLabelValues(labelValues...).Set(f)\n\t\t\t}\n\t\t}\n\n\t\tif _, ok := limitsGaugeVec[\"max-length-bytes\"]; ok {\n\t\t\tif f := collectLowerMetric(\"arguments.x-max-length-bytes\", \"effective_policy_definition.max-length-bytes\", queue); f >= 0 {\n\t\t\t\tlimitsGaugeVec[\"max-length-bytes\"].WithLabelValues(labelValues...).Set(f)\n\t\t\t}\n\t\t}\n\n\t}\n\n\tfor _, metric := range e.limitsGauge {\n\t\tmetric.Collect(ch)\n\t}\n\tfor _, gaugevec := range e.queueMetricsGauge {\n\t\tgaugevec.Collect(ch)\n\t}\n\te.stateMetric.Collect(ch)\n\te.idleSinceMetric.Collect(ch)\n\n\treturn nil\n}\n\nfunc (e exporterQueue) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, metric := range e.limitsGauge {\n\t\tmetric.Describe(ch)\n\t}\n\tfor _, gaugevec := range e.queueMetricsGauge {\n\t\tgaugevec.Describe(ch)\n\t}\n\te.stateMetric.Describe(ch)\n\te.idleSinceMetric.Describe(ch)\n\tfor _, countervec := range e.queueMetricsCounter {\n\t\tch <- countervec\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Required to be main when building a shared library\npackage main\n\n\/\/ typedef struct StartSessionReturn {\n\/\/ char *irmaQr;\n\/\/ char *token;\n\/\/ char *error;\n\/\/ } StartSessionReturn;\n\/\/\n\/\/ typedef struct HttpHeaders {\n\/\/ char **headerKeys;\n\/\/ char **headerValues;\n\/\/ int length;\n\/\/ } HttpHeaders;\n\/\/\n\/\/ typedef struct HandleProtocolMessageReturn {\n\/\/ int status;\n\/\/ char *body;\n\/\/ char *SessionResult;\n\/\/ } HandleProtocolMessageReturn;\nimport \"C\"\n\nimport (\n\t\"encoding\/json\"\n\t\"unsafe\"\n\n\t\"github.com\/go-errors\/errors\"\n\t\"github.com\/privacybydesign\/irmago\/server\"\n\t\"github.com\/privacybydesign\/irmago\/internal\/servercore\"\n)\n\nvar s *servercore.Server\n\n\/\/export Initialize\nfunc Initialize(IrmaConfiguration *C.char) *C.char {\n\tif IrmaConfiguration == nil {\n\t\treturn C.CString(\"Missing IrmaConfiguration.\")\n\t}\n\t\n\t\/\/ Build the configuration structure\n\tconf := new(server.Configuration)\n\terr := json.Unmarshal([]byte(C.GoString(IrmaConfiguration)), conf)\n\tif err != nil {\n\t\treturn C.CString(err.Error())\n\t}\n\t\n\t\/\/ Run the actual core function\n\ts, err = servercore.New(conf)\n\t\n\t\/\/ And properly return any errors\n\tif err == nil {\n\t\treturn nil\n\t} else {\n\t\treturn C.CString(err.Error())\n\t}\n}\n\n\/\/export StartSession\nfunc StartSession(requestString *C.char) C.struct_StartSessionReturn {\n\t\/\/ Create struct for return information\n\tvar result C.struct_StartSessionReturn\n\t\n\t\/\/ Check that we have required input\n\tif requestString == nil {\n\t\tresult.irmaQr = nil\n\t\tresult.token = nil\n\t\tresult.error = C.CString(\"Missing request string.\")\n\t\treturn result\n\t}\n\t\n\t\/\/ Run the actual core function\n\tqr, token, err := s.StartSession(C.GoString(requestString))\n\t\n\t\/\/ And properly return the result\n\tif err != nil {\n\t\t\/\/ return the core error\n\t\tresult.irmaQr = nil\n\t\tresult.token = nil\n\t\tresult.error = C.CString(err.Error())\n\t\treturn result\n\t}\n\tqrJson, err := json.Marshal(qr)\n\tif err != nil {\n\t\t\/\/ return encoding error\n\t\tresult.irmaQr = nil\n\t\tresult.token = nil\n\t\tresult.error = C.CString(err.Error())\n\t\treturn result\n\t}\n\t\/\/ return actual results\n\tresult.irmaQr = C.CString(string(qrJson))\n\tresult.token = C.CString(token)\n\tresult.error = nil\n\treturn result\n}\n\n\/\/export GetSessionResult\nfunc GetSessionResult(token *C.char) *C.char{\n\t\/\/ Check that we have required input\n\tif token == nil {\n\t\treturn nil\n\t}\n\t\n\t\/\/ Run the actual core function\n\tresult := s.GetSessionResult(C.GoString(token))\n\t\n\t\/\/ And properly return results\n\tif result == nil {\n\t\t\/\/ core error\n\t\treturn nil\n\t}\n\tresultJson, err := json.Marshal(result)\n\tif err != nil {\n\t\t\/\/ encoding error\n\t\treturn nil\n\t}\n\treturn C.CString(string(resultJson))\n}\n\n\/\/export GetRequest\nfunc GetRequest(token *C.char) *C.char{\n\t\/\/ Check that we have required input\n\tif token == nil {\n\t\treturn nil\n\t}\n\t\n\t\/\/ Run the core function\n\tresult := s.GetRequest(C.GoString(token))\n\t\n\t\/\/ And properly return results\n\tif result == nil {\n\t\t\/\/ core error\n\t\treturn nil\n\t}\n\tresultJson, err := json.Marshal(result)\n\tif err != nil {\n\t\t\/\/ encoding error\n\t\treturn nil\n\t}\n\treturn C.CString(string(resultJson))\n}\n\n\/\/export CancelSession\nfunc CancelSession(token *C.char) *C.char{\n\t\/\/ Check that we have required input\n\tif token == nil {\n\t\treturn C.CString(\"Missing token.\")\n\t}\n\t\n\t\/\/ Run the core function\n\terr := s.CancelSession(C.GoString(token))\n\t\n\tif err != nil {\n\t\treturn C.CString(err.Error())\n\t}\n\treturn nil\n}\n\nfunc convertHeaders(headers C.struct_HttpHeaders) (map[string][]string, error) {\n\t\/\/ Make the two arrays accessible via slices (https:\/\/github.com\/golang\/go\/wiki\/cgo#turning-c-arrays-into-go-slices)\n\theaderKeys := (*[1<<30]*C.char)(unsafe.Pointer(headers.headerKeys))[:headers.length:headers.length]\n\theaderValues := (*[1<<30]*C.char)(unsafe.Pointer(headers.headerValues))[:headers.length:headers.length]\n\t\n\t\/\/ Check and convert the input\n\tresult := map[string][]string{}\n\tfor i := 0; i<int(headers.length); i++ {\n\t\tif headerKeys[i] == nil || headerValues[i] == nil {\n\t\t\treturn map[string][]string{}, errors.New(\"Missing header key or value\")\n\t\t}\n\t\tkey := C.GoString(headerKeys[i])\n\t\tvalue := C.GoString(headerValues[i])\n\t\tresult[key] = append(result[key], value)\n\t}\n\t\n\treturn result, nil\n}\n\n\/\/export HandleProtocolMessage\nfunc HandleProtocolMessage(path *C.char, method *C.char, headers C.struct_HttpHeaders, message *C.char) C.struct_HandleProtocolMessageReturn {\n\t\/\/ Space for result\n\tvar result C.struct_HandleProtocolMessageReturn\n\t\n\t\/\/ Check input\n\tif path == nil || method == nil || message == nil {\n\t\tresult.status = 500\n\t\tresult.body = C.CString(\"\")\n\t\tresult.SessionResult = nil\n\t\treturn result\n\t}\n\t\n\t\/\/ Extract headers\n\theaderMap, err := convertHeaders(headers)\n\tif err != nil {\n\t\tresult.status = 500\n\t\tresult.body = C.CString(\"\")\n\t\tresult.SessionResult = nil\n\t\treturn result\n\t}\n\t\n\t\/\/ Prepare return values\n\tstatus, body, session := s.HandleProtocolMessage(C.GoString(path), C.GoString(method), headerMap, []byte(C.GoString(message)))\n\tif session == nil {\n\t\tresult.SessionResult = nil\n\t} else {\n\t\t\/\/ Convert SessionResult to JSON for return\n\t\tsessionJson, err := json.Marshal(session)\n\t\tif err != nil {\n\t\t\tresult.status = 500\n\t\t\tresult.body = C.CString(\"\")\n\t\t\tresult.SessionResult = nil\n\t\t\treturn result\n\t\t}\n\t\tresult.SessionResult = C.CString(string(sessionJson))\n\t}\n\tresult.status = C.int(status)\n\tresult.body = C.CString(string(body))\n\treturn result\n}\n\n\/\/ Required to build a shared library\nfunc main() {}\n<commit_msg>Format code in irmac with goimports<commit_after>\/\/ Required to be main when building a shared library\npackage main\n\n\/\/ typedef struct StartSessionReturn {\n\/\/ char *irmaQr;\n\/\/ char *token;\n\/\/ char *error;\n\/\/ } StartSessionReturn;\n\/\/\n\/\/ typedef struct HttpHeaders {\n\/\/ char **headerKeys;\n\/\/ char **headerValues;\n\/\/ int length;\n\/\/ } HttpHeaders;\n\/\/\n\/\/ typedef struct HandleProtocolMessageReturn {\n\/\/ int status;\n\/\/ char *body;\n\/\/ char *SessionResult;\n\/\/ } HandleProtocolMessageReturn;\nimport \"C\"\n\nimport (\n\t\"encoding\/json\"\n\t\"unsafe\"\n\n\t\"github.com\/go-errors\/errors\"\n\t\"github.com\/privacybydesign\/irmago\/internal\/servercore\"\n\t\"github.com\/privacybydesign\/irmago\/server\"\n)\n\nvar s *servercore.Server\n\n\/\/export Initialize\nfunc Initialize(IrmaConfiguration *C.char) *C.char {\n\tif IrmaConfiguration == nil {\n\t\treturn C.CString(\"Missing IrmaConfiguration.\")\n\t}\n\n\t\/\/ Build the configuration structure\n\tconf := new(server.Configuration)\n\terr := json.Unmarshal([]byte(C.GoString(IrmaConfiguration)), conf)\n\tif err != nil {\n\t\treturn C.CString(err.Error())\n\t}\n\n\t\/\/ Run the actual core function\n\ts, err = servercore.New(conf)\n\n\t\/\/ And properly return any errors\n\tif err == nil {\n\t\treturn nil\n\t} else {\n\t\treturn C.CString(err.Error())\n\t}\n}\n\n\/\/export StartSession\nfunc StartSession(requestString *C.char) C.struct_StartSessionReturn {\n\t\/\/ Create struct for return information\n\tvar result C.struct_StartSessionReturn\n\n\t\/\/ Check that we have required input\n\tif requestString == nil {\n\t\tresult.irmaQr = nil\n\t\tresult.token = nil\n\t\tresult.error = C.CString(\"Missing request string.\")\n\t\treturn result\n\t}\n\n\t\/\/ Run the actual core function\n\tqr, token, err := s.StartSession(C.GoString(requestString))\n\n\t\/\/ And properly return the result\n\tif err != nil {\n\t\t\/\/ return the core error\n\t\tresult.irmaQr = nil\n\t\tresult.token = nil\n\t\tresult.error = C.CString(err.Error())\n\t\treturn result\n\t}\n\tqrJson, err := json.Marshal(qr)\n\tif err != nil {\n\t\t\/\/ return encoding error\n\t\tresult.irmaQr = nil\n\t\tresult.token = nil\n\t\tresult.error = C.CString(err.Error())\n\t\treturn result\n\t}\n\t\/\/ return actual results\n\tresult.irmaQr = C.CString(string(qrJson))\n\tresult.token = C.CString(token)\n\tresult.error = nil\n\treturn result\n}\n\n\/\/export GetSessionResult\nfunc GetSessionResult(token *C.char) *C.char {\n\t\/\/ Check that we have required input\n\tif token == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Run the actual core function\n\tresult := s.GetSessionResult(C.GoString(token))\n\n\t\/\/ And properly return results\n\tif result == nil {\n\t\t\/\/ core error\n\t\treturn nil\n\t}\n\tresultJson, err := json.Marshal(result)\n\tif err != nil {\n\t\t\/\/ encoding error\n\t\treturn nil\n\t}\n\treturn C.CString(string(resultJson))\n}\n\n\/\/export GetRequest\nfunc GetRequest(token *C.char) *C.char {\n\t\/\/ Check that we have required input\n\tif token == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Run the core function\n\tresult := s.GetRequest(C.GoString(token))\n\n\t\/\/ And properly return results\n\tif result == nil {\n\t\t\/\/ core error\n\t\treturn nil\n\t}\n\tresultJson, err := json.Marshal(result)\n\tif err != nil {\n\t\t\/\/ encoding error\n\t\treturn nil\n\t}\n\treturn C.CString(string(resultJson))\n}\n\n\/\/export CancelSession\nfunc CancelSession(token *C.char) *C.char {\n\t\/\/ Check that we have required input\n\tif token == nil {\n\t\treturn C.CString(\"Missing token.\")\n\t}\n\n\t\/\/ Run the core function\n\terr := s.CancelSession(C.GoString(token))\n\n\tif err != nil {\n\t\treturn C.CString(err.Error())\n\t}\n\treturn nil\n}\n\nfunc convertHeaders(headers C.struct_HttpHeaders) (map[string][]string, error) {\n\t\/\/ Make the two arrays accessible via slices (https:\/\/github.com\/golang\/go\/wiki\/cgo#turning-c-arrays-into-go-slices)\n\theaderKeys := (*[1 << 30]*C.char)(unsafe.Pointer(headers.headerKeys))[:headers.length:headers.length]\n\theaderValues := (*[1 << 30]*C.char)(unsafe.Pointer(headers.headerValues))[:headers.length:headers.length]\n\n\t\/\/ Check and convert the input\n\tresult := map[string][]string{}\n\tfor i := 0; i < int(headers.length); i++ {\n\t\tif headerKeys[i] == nil || headerValues[i] == nil {\n\t\t\treturn map[string][]string{}, errors.New(\"Missing header key or value\")\n\t\t}\n\t\tkey := C.GoString(headerKeys[i])\n\t\tvalue := C.GoString(headerValues[i])\n\t\tresult[key] = append(result[key], value)\n\t}\n\n\treturn result, nil\n}\n\n\/\/export HandleProtocolMessage\nfunc HandleProtocolMessage(path *C.char, method *C.char, headers C.struct_HttpHeaders, message *C.char) C.struct_HandleProtocolMessageReturn {\n\t\/\/ Space for result\n\tvar result C.struct_HandleProtocolMessageReturn\n\n\t\/\/ Check input\n\tif path == nil || method == nil || message == nil {\n\t\tresult.status = 500\n\t\tresult.body = C.CString(\"\")\n\t\tresult.SessionResult = nil\n\t\treturn result\n\t}\n\n\t\/\/ Extract headers\n\theaderMap, err := convertHeaders(headers)\n\tif err != nil {\n\t\tresult.status = 500\n\t\tresult.body = C.CString(\"\")\n\t\tresult.SessionResult = nil\n\t\treturn result\n\t}\n\n\t\/\/ Prepare return values\n\tstatus, body, session := s.HandleProtocolMessage(C.GoString(path), C.GoString(method), headerMap, []byte(C.GoString(message)))\n\tif session == nil {\n\t\tresult.SessionResult = nil\n\t} else {\n\t\t\/\/ Convert SessionResult to JSON for return\n\t\tsessionJson, err := json.Marshal(session)\n\t\tif err != nil {\n\t\t\tresult.status = 500\n\t\t\tresult.body = C.CString(\"\")\n\t\t\tresult.SessionResult = nil\n\t\t\treturn result\n\t\t}\n\t\tresult.SessionResult = C.CString(string(sessionJson))\n\t}\n\tresult.status = C.int(status)\n\tresult.body = C.CString(string(body))\n\treturn result\n}\n\n\/\/ Required to build a shared library\nfunc main() {}\n<|endoftext|>"} {"text":"<commit_before>package promclient\n\nimport (\n\t\"github.com\/jacksontj\/promxy\/promhttputil\"\n\t\"github.com\/mailru\/easyjson\/jlexer\"\n)\n\ntype DataResult struct {\n\tStatus promhttputil.Status `json:\"status\"`\n\tData promhttputil.QueryData `json:\"data\"`\n\tErrorType promhttputil.ErrorType `json:\"errorType,omitempty\"`\n\tError string `json:\"error,omitempty\"`\n}\n\nfunc (s *DataResult) Merge(o *DataResult) error {\n\tresult, err := promhttputil.MergeValues(s.Data.Result, o.Data.Result)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.Data.Result = result\n\treturn nil\n}\n\n\/\/ UnmarshalJSON supports json.Unmarshaler interface\nfunc (v *DataResult) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\tv.UnmarshalEasyJSON(&r)\n\treturn r.Error()\n}\n\n\/\/ UnmarshalEasyJSON supports easyjson.Unmarshaler interface\nfunc (out *DataResult) UnmarshalEasyJSON(in *jlexer.Lexer) {\n\tisTopLevel := in.IsStart()\n\tif in.IsNull() {\n\t\tif isTopLevel {\n\t\t\tin.Consumed()\n\t\t}\n\t\tin.Skip()\n\t\treturn\n\t}\n\tin.Delim('{')\n\tfor !in.IsDelim('}') {\n\t\tkey := in.UnsafeString()\n\t\tin.WantColon()\n\t\tif in.IsNull() {\n\t\t\tin.Skip()\n\t\t\tin.WantComma()\n\t\t\tcontinue\n\t\t}\n\t\tswitch key {\n\t\tcase \"status\":\n\t\t\tout.Status = promhttputil.Status(in.String())\n\t\tcase \"data\":\n\t\t\tin.AddError(out.Data.UnmarshalJSON(in.Raw()))\n\t\tcase \"errorType\":\n\t\t\tout.ErrorType = promhttputil.ErrorType(in.String())\n\t\tcase \"error\":\n\t\t\tout.Error = string(in.String())\n\t\tdefault:\n\t\t\tin.SkipRecursive()\n\t\t}\n\t\tin.WantComma()\n\t}\n\tin.Delim('}')\n\tif isTopLevel {\n\t\tin.Consumed()\n\t}\n}\n<commit_msg>Remove unused method<commit_after>package promclient\n\nimport (\n\t\"github.com\/jacksontj\/promxy\/promhttputil\"\n\t\"github.com\/mailru\/easyjson\/jlexer\"\n)\n\ntype DataResult struct {\n\tStatus promhttputil.Status `json:\"status\"`\n\tData promhttputil.QueryData `json:\"data\"`\n\tErrorType promhttputil.ErrorType `json:\"errorType,omitempty\"`\n\tError string `json:\"error,omitempty\"`\n}\n\n\/\/ UnmarshalJSON supports json.Unmarshaler interface\nfunc (v *DataResult) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\tv.UnmarshalEasyJSON(&r)\n\treturn r.Error()\n}\n\n\/\/ UnmarshalEasyJSON supports easyjson.Unmarshaler interface\nfunc (out *DataResult) UnmarshalEasyJSON(in *jlexer.Lexer) {\n\tisTopLevel := in.IsStart()\n\tif in.IsNull() {\n\t\tif isTopLevel {\n\t\t\tin.Consumed()\n\t\t}\n\t\tin.Skip()\n\t\treturn\n\t}\n\tin.Delim('{')\n\tfor !in.IsDelim('}') {\n\t\tkey := in.UnsafeString()\n\t\tin.WantColon()\n\t\tif in.IsNull() {\n\t\t\tin.Skip()\n\t\t\tin.WantComma()\n\t\t\tcontinue\n\t\t}\n\t\tswitch key {\n\t\tcase \"status\":\n\t\t\tout.Status = promhttputil.Status(in.String())\n\t\tcase \"data\":\n\t\t\tin.AddError(out.Data.UnmarshalJSON(in.Raw()))\n\t\tcase \"errorType\":\n\t\t\tout.ErrorType = promhttputil.ErrorType(in.String())\n\t\tcase \"error\":\n\t\t\tout.Error = string(in.String())\n\t\tdefault:\n\t\t\tin.SkipRecursive()\n\t\t}\n\t\tin.WantComma()\n\t}\n\tin.Delim('}')\n\tif isTopLevel {\n\t\tin.Consumed()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package boilingcore\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/format\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"text\/template\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/volatiletech\/sqlboiler\/importers\"\n)\n\nvar noEditDisclaimer = []byte(`\/\/ Code generated by SQLBoiler (https:\/\/github.com\/volatiletech\/sqlboiler). DO NOT EDIT.\n\/\/ This file is meant to be re-generated in place and\/or deleted at any time.\n\n`)\n\nvar (\n\t\/\/ templateByteBuffer is re-used by all template construction to avoid\n\t\/\/ allocating more memory than is needed. This will later be a problem for\n\t\/\/ concurrency, address it then.\n\ttemplateByteBuffer = &bytes.Buffer{}\n\n\trgxRemoveNumberedPrefix = regexp.MustCompile(`[0-9]+_`)\n\trgxSyntaxError = regexp.MustCompile(`(\\d+):\\d+: `)\n\n\ttestHarnessWriteFile = ioutil.WriteFile\n)\n\n\/\/ generateOutput builds the file output and sends it to outHandler for saving\nfunc generateOutput(state *State, data *templateData) error {\n\treturn executeTemplates(executeTemplateData{\n\t\tstate: state,\n\t\tdata: data,\n\t\ttemplates: state.Templates,\n\t\timportSet: state.Config.Imports.All,\n\t\tcombineImportsOnType: true,\n\t\tfileSuffix: \".go\",\n\t})\n}\n\n\/\/ generateTestOutput builds the test file output and sends it to outHandler for saving\nfunc generateTestOutput(state *State, data *templateData) error {\n\treturn executeTemplates(executeTemplateData{\n\t\tstate: state,\n\t\tdata: data,\n\t\ttemplates: state.TestTemplates,\n\t\timportSet: state.Config.Imports.Test,\n\t\tcombineImportsOnType: false,\n\t\tfileSuffix: \"_test.go\",\n\t})\n}\n\n\/\/ generateSingletonOutput processes the templates that should only be run\n\/\/ one time.\nfunc generateSingletonOutput(state *State, data *templateData) error {\n\treturn executeSingletonTemplates(executeTemplateData{\n\t\tstate: state,\n\t\tdata: data,\n\t\ttemplates: state.SingletonTemplates,\n\t\timportNamedSet: state.Config.Imports.Singleton,\n\t\tfileSuffix: \".go\",\n\t})\n}\n\n\/\/ generateSingletonTestOutput processes the templates that should only be run\n\/\/ one time.\nfunc generateSingletonTestOutput(state *State, data *templateData) error {\n\treturn executeSingletonTemplates(executeTemplateData{\n\t\tstate: state,\n\t\tdata: data,\n\t\ttemplates: state.SingletonTestTemplates,\n\t\timportNamedSet: state.Config.Imports.TestSingleton,\n\t\tfileSuffix: \".go\",\n\t})\n}\n\ntype executeTemplateData struct {\n\tstate *State\n\tdata *templateData\n\n\ttemplates *templateList\n\n\timportSet importers.Set\n\timportNamedSet importers.Map\n\n\tcombineImportsOnType bool\n\n\tfileSuffix string\n}\n\nfunc executeTemplates(e executeTemplateData) error {\n\tif e.data.Table.IsJoinTable {\n\t\treturn nil\n\t}\n\n\tout := templateByteBuffer\n\tout.Reset()\n\n\tvar imps importers.Set\n\timps.Standard = e.importSet.Standard\n\timps.ThirdParty = e.importSet.ThirdParty\n\tif e.combineImportsOnType {\n\t\tcolTypes := make([]string, len(e.data.Table.Columns))\n\t\tfor i, ct := range e.data.Table.Columns {\n\t\t\tcolTypes[i] = ct.Type\n\t\t}\n\n\t\timps = importers.AddTypeImports(imps, e.state.Config.Imports.BasedOnType, colTypes)\n\t}\n\n\twriteFileDisclaimer(out)\n\twritePackageName(out, e.state.Config.PkgName)\n\twriteImports(out, imps)\n\n\tfor _, tplName := range e.templates.Templates() {\n\t\tif err := executeTemplate(out, e.templates.Template, tplName, e.data); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfName := e.data.Table.Name + e.fileSuffix\n\tif err := writeFile(e.state.Config.OutFolder, fName, out); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc executeSingletonTemplates(e executeTemplateData) error {\n\tif e.data.Table.IsJoinTable {\n\t\treturn nil\n\t}\n\n\tout := templateByteBuffer\n\tfor _, tplName := range e.templates.Templates() {\n\t\tout.Reset()\n\n\t\tfName := tplName\n\t\text := filepath.Ext(fName)\n\t\tfName = rgxRemoveNumberedPrefix.ReplaceAllString(fName[:len(fName)-len(ext)], \"\")\n\n\t\timps := importers.Set{\n\t\t\tStandard: e.importNamedSet[fName].Standard,\n\t\t\tThirdParty: e.importNamedSet[fName].ThirdParty,\n\t\t}\n\n\t\twriteFileDisclaimer(out)\n\t\twritePackageName(out, e.state.Config.PkgName)\n\t\twriteImports(out, imps)\n\n\t\tif err := executeTemplate(out, e.templates.Template, tplName, e.data); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := writeFile(e.state.Config.OutFolder, fName+e.fileSuffix, out); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ writeFileDisclaimer writes the disclaimer at the top with a trailing\n\/\/ newline so the package name doesn't get attached to it.\nfunc writeFileDisclaimer(out *bytes.Buffer) {\n\t_, _ = out.Write(noEditDisclaimer)\n}\n\n\/\/ writePackageName writes the package name correctly, ignores errors\n\/\/ since it's to the concrete buffer type which produces none\nfunc writePackageName(out *bytes.Buffer, pkgName string) {\n\t_, _ = fmt.Fprintf(out, \"package %s\\n\\n\", pkgName)\n}\n\n\/\/ writeImports writes the package imports correctly, ignores errors\n\/\/ since it's to the concrete buffer type which produces none\nfunc writeImports(out *bytes.Buffer, imps importers.Set) {\n\tif impStr := imps.Format(); len(impStr) > 0 {\n\t\t_, _ = fmt.Fprintf(out, \"%s\\n\", impStr)\n\t}\n}\n\n\/\/ writeFile writes to the given folder and filename, formatting the buffer\n\/\/ given.\nfunc writeFile(outFolder string, fileName string, input *bytes.Buffer) error {\n\tbyt, err := formatBuffer(input)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpath := filepath.Join(outFolder, fileName)\n\tif err = testHarnessWriteFile(path, byt, 0666); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to write output file %s\", path)\n\t}\n\n\treturn nil\n}\n\n\/\/ executeTemplate takes a template and returns the output of the template\n\/\/ execution.\nfunc executeTemplate(buf *bytes.Buffer, t *template.Template, name string, data *templateData) (err error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = errors.Errorf(\"failed to execute template: %s\\npanic: %+v\\n\", name, r)\n\t\t}\n\t}()\n\n\tif err := t.ExecuteTemplate(buf, name, data); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to execute template: %s\", name)\n\t}\n\treturn nil\n}\n\nfunc formatBuffer(buf *bytes.Buffer) ([]byte, error) {\n\toutput, err := format.Source(buf.Bytes())\n\tif err == nil {\n\t\treturn output, nil\n\t}\n\n\tmatches := rgxSyntaxError.FindStringSubmatch(err.Error())\n\tif matches == nil {\n\t\treturn nil, errors.Wrap(err, \"failed to format template\")\n\t}\n\n\tlineNum, _ := strconv.Atoi(matches[1])\n\tscanner := bufio.NewScanner(buf)\n\terrBuf := &bytes.Buffer{}\n\tline := 1\n\tfor ; scanner.Scan(); line++ {\n\t\tif delta := line - lineNum; delta < -5 || delta > 5 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif line == lineNum {\n\t\t\terrBuf.WriteString(\">>>> \")\n\t\t} else {\n\t\t\tfmt.Fprintf(errBuf, \"% 4d \", line)\n\t\t}\n\t\terrBuf.Write(scanner.Bytes())\n\t\terrBuf.WriteByte('\\n')\n\t}\n\n\treturn nil, errors.Wrapf(err, \"failed to format template\\n\\n%s\\n\", errBuf.Bytes())\n}\n<commit_msg>Fix bug in template generation<commit_after>package boilingcore\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/format\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"text\/template\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/volatiletech\/sqlboiler\/importers\"\n)\n\nvar noEditDisclaimer = []byte(`\/\/ Code generated by SQLBoiler (https:\/\/github.com\/volatiletech\/sqlboiler). DO NOT EDIT.\n\/\/ This file is meant to be re-generated in place and\/or deleted at any time.\n\n`)\n\nvar (\n\t\/\/ templateByteBuffer is re-used by all template construction to avoid\n\t\/\/ allocating more memory than is needed. This will later be a problem for\n\t\/\/ concurrency, address it then.\n\ttemplateByteBuffer = &bytes.Buffer{}\n\n\trgxRemoveNumberedPrefix = regexp.MustCompile(`^[0-9]+_`)\n\trgxSyntaxError = regexp.MustCompile(`(\\d+):\\d+: `)\n\n\ttestHarnessWriteFile = ioutil.WriteFile\n)\n\n\/\/ generateOutput builds the file output and sends it to outHandler for saving\nfunc generateOutput(state *State, data *templateData) error {\n\treturn executeTemplates(executeTemplateData{\n\t\tstate: state,\n\t\tdata: data,\n\t\ttemplates: state.Templates,\n\t\timportSet: state.Config.Imports.All,\n\t\tcombineImportsOnType: true,\n\t\tfileSuffix: \".go\",\n\t})\n}\n\n\/\/ generateTestOutput builds the test file output and sends it to outHandler for saving\nfunc generateTestOutput(state *State, data *templateData) error {\n\treturn executeTemplates(executeTemplateData{\n\t\tstate: state,\n\t\tdata: data,\n\t\ttemplates: state.TestTemplates,\n\t\timportSet: state.Config.Imports.Test,\n\t\tcombineImportsOnType: false,\n\t\tfileSuffix: \"_test.go\",\n\t})\n}\n\n\/\/ generateSingletonOutput processes the templates that should only be run\n\/\/ one time.\nfunc generateSingletonOutput(state *State, data *templateData) error {\n\treturn executeSingletonTemplates(executeTemplateData{\n\t\tstate: state,\n\t\tdata: data,\n\t\ttemplates: state.SingletonTemplates,\n\t\timportNamedSet: state.Config.Imports.Singleton,\n\t\tfileSuffix: \".go\",\n\t})\n}\n\n\/\/ generateSingletonTestOutput processes the templates that should only be run\n\/\/ one time.\nfunc generateSingletonTestOutput(state *State, data *templateData) error {\n\treturn executeSingletonTemplates(executeTemplateData{\n\t\tstate: state,\n\t\tdata: data,\n\t\ttemplates: state.SingletonTestTemplates,\n\t\timportNamedSet: state.Config.Imports.TestSingleton,\n\t\tfileSuffix: \".go\",\n\t})\n}\n\ntype executeTemplateData struct {\n\tstate *State\n\tdata *templateData\n\n\ttemplates *templateList\n\n\timportSet importers.Set\n\timportNamedSet importers.Map\n\n\tcombineImportsOnType bool\n\n\tfileSuffix string\n}\n\nfunc executeTemplates(e executeTemplateData) error {\n\tif e.data.Table.IsJoinTable {\n\t\treturn nil\n\t}\n\n\tout := templateByteBuffer\n\tout.Reset()\n\n\tvar imps importers.Set\n\timps.Standard = e.importSet.Standard\n\timps.ThirdParty = e.importSet.ThirdParty\n\tif e.combineImportsOnType {\n\t\tcolTypes := make([]string, len(e.data.Table.Columns))\n\t\tfor i, ct := range e.data.Table.Columns {\n\t\t\tcolTypes[i] = ct.Type\n\t\t}\n\n\t\timps = importers.AddTypeImports(imps, e.state.Config.Imports.BasedOnType, colTypes)\n\t}\n\n\twriteFileDisclaimer(out)\n\twritePackageName(out, e.state.Config.PkgName)\n\twriteImports(out, imps)\n\n\tfor _, tplName := range e.templates.Templates() {\n\t\tif err := executeTemplate(out, e.templates.Template, tplName, e.data); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfName := e.data.Table.Name + e.fileSuffix\n\tif err := writeFile(e.state.Config.OutFolder, fName, out); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc executeSingletonTemplates(e executeTemplateData) error {\n\tif e.data.Table.IsJoinTable {\n\t\treturn nil\n\t}\n\n\tout := templateByteBuffer\n\tfor _, tplName := range e.templates.Templates() {\n\t\tout.Reset()\n\n\t\tfName := tplName\n\t\text := filepath.Ext(fName)\n\t\tfName = rgxRemoveNumberedPrefix.ReplaceAllString(fName[:len(fName)-len(ext)], \"\")\n\n\t\timps := importers.Set{\n\t\t\tStandard: e.importNamedSet[fName].Standard,\n\t\t\tThirdParty: e.importNamedSet[fName].ThirdParty,\n\t\t}\n\n\t\twriteFileDisclaimer(out)\n\t\twritePackageName(out, e.state.Config.PkgName)\n\t\twriteImports(out, imps)\n\n\t\tif err := executeTemplate(out, e.templates.Template, tplName, e.data); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := writeFile(e.state.Config.OutFolder, fName+e.fileSuffix, out); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ writeFileDisclaimer writes the disclaimer at the top with a trailing\n\/\/ newline so the package name doesn't get attached to it.\nfunc writeFileDisclaimer(out *bytes.Buffer) {\n\t_, _ = out.Write(noEditDisclaimer)\n}\n\n\/\/ writePackageName writes the package name correctly, ignores errors\n\/\/ since it's to the concrete buffer type which produces none\nfunc writePackageName(out *bytes.Buffer, pkgName string) {\n\t_, _ = fmt.Fprintf(out, \"package %s\\n\\n\", pkgName)\n}\n\n\/\/ writeImports writes the package imports correctly, ignores errors\n\/\/ since it's to the concrete buffer type which produces none\nfunc writeImports(out *bytes.Buffer, imps importers.Set) {\n\tif impStr := imps.Format(); len(impStr) > 0 {\n\t\t_, _ = fmt.Fprintf(out, \"%s\\n\", impStr)\n\t}\n}\n\n\/\/ writeFile writes to the given folder and filename, formatting the buffer\n\/\/ given.\nfunc writeFile(outFolder string, fileName string, input *bytes.Buffer) error {\n\tbyt, err := formatBuffer(input)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpath := filepath.Join(outFolder, fileName)\n\tif err = testHarnessWriteFile(path, byt, 0666); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to write output file %s\", path)\n\t}\n\n\treturn nil\n}\n\n\/\/ executeTemplate takes a template and returns the output of the template\n\/\/ execution.\nfunc executeTemplate(buf *bytes.Buffer, t *template.Template, name string, data *templateData) (err error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = errors.Errorf(\"failed to execute template: %s\\npanic: %+v\\n\", name, r)\n\t\t}\n\t}()\n\n\tif err := t.ExecuteTemplate(buf, name, data); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to execute template: %s\", name)\n\t}\n\treturn nil\n}\n\nfunc formatBuffer(buf *bytes.Buffer) ([]byte, error) {\n\toutput, err := format.Source(buf.Bytes())\n\tif err == nil {\n\t\treturn output, nil\n\t}\n\n\tmatches := rgxSyntaxError.FindStringSubmatch(err.Error())\n\tif matches == nil {\n\t\treturn nil, errors.Wrap(err, \"failed to format template\")\n\t}\n\n\tlineNum, _ := strconv.Atoi(matches[1])\n\tscanner := bufio.NewScanner(buf)\n\terrBuf := &bytes.Buffer{}\n\tline := 1\n\tfor ; scanner.Scan(); line++ {\n\t\tif delta := line - lineNum; delta < -5 || delta > 5 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif line == lineNum {\n\t\t\terrBuf.WriteString(\">>>> \")\n\t\t} else {\n\t\t\tfmt.Fprintf(errBuf, \"% 4d \", line)\n\t\t}\n\t\terrBuf.Write(scanner.Bytes())\n\t\terrBuf.WriteByte('\\n')\n\t}\n\n\treturn nil, errors.Wrapf(err, \"failed to format template\\n\\n%s\\n\", errBuf.Bytes())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Walk Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage walk\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nimport . \"github.com\/lxn\/go-winapi\"\n\nconst (\n\tDlgCmdOK = IDOK\n\tDlgCmdCancel = IDCANCEL\n\tDlgCmdAbort = IDABORT\n\tDlgCmdRetry = IDRETRY\n\tDlgCmdIgnore = IDIGNORE\n\tDlgCmdYes = IDYES\n\tDlgCmdNo = IDNO\n\tDlgCmdClose = IDCLOSE\n\tDlgCmdHelp = IDHELP\n\tDlgCmdTryAgain = IDTRYAGAIN\n\tDlgCmdContinue = IDCONTINUE\n\tDlgCmdTimeout = IDTIMEOUT\n)\n\nconst dialogWindowClass = `\\o\/ Walk_Dialog_Class \\o\/`\n\nvar dialogWindowClassRegistered bool\n\ntype dialogish interface {\n\tDefaultButton() *PushButton\n\tCancelButton() *PushButton\n}\n\ntype Dialog struct {\n\tTopLevelWindow\n\tresult int\n\tdefaultButton *PushButton\n\tcancelButton *PushButton\n\tcenterInOwnerWhenRun bool\n}\n\nfunc NewDialog(owner RootWidget) (*Dialog, error) {\n\tensureRegisteredWindowClass(dialogWindowClass, &dialogWindowClassRegistered)\n\n\tdlg := &Dialog{\n\t\tTopLevelWindow: TopLevelWindow{\n\t\t\towner: owner,\n\t\t},\n\t}\n\n\tif err := initWidget(\n\t\tdlg,\n\t\towner,\n\t\tdialogWindowClass,\n\t\tWS_CAPTION|WS_SYSMENU|WS_THICKFRAME,\n\t\tWS_EX_DLGMODALFRAME); err != nil {\n\t\treturn nil, err\n\t}\n\n\tdlg.centerInOwnerWhenRun = owner != nil\n\n\tdlg.children = newWidgetList(dlg)\n\n\t\/\/ This forces display of focus rectangles, as soon as the user starts to type.\n\tSendMessage(dlg.hWnd, WM_CHANGEUISTATE, UIS_INITIALIZE, 0)\n\n\tdlg.result = DlgCmdClose\n\n\treturn dlg, nil\n}\n\nfunc (dlg *Dialog) DefaultButton() *PushButton {\n\treturn dlg.defaultButton\n}\n\nfunc (dlg *Dialog) SetDefaultButton(button *PushButton) error {\n\tif button != nil && !IsChild(dlg.hWnd, button.hWnd) {\n\t\treturn newError(\"not a descendant of the dialog\")\n\t}\n\n\tsucceeded := false\n\tif dlg.defaultButton != nil {\n\t\tif err := dlg.defaultButton.setAndClearStyleBits(BS_PUSHBUTTON, BS_DEFPUSHBUTTON); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer func() {\n\t\t\tif !succeeded {\n\t\t\t\tdlg.defaultButton.setAndClearStyleBits(BS_DEFPUSHBUTTON, BS_PUSHBUTTON)\n\t\t\t}\n\t\t}()\n\t}\n\n\tif button != nil {\n\t\tif err := button.setAndClearStyleBits(BS_DEFPUSHBUTTON, BS_PUSHBUTTON); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tdlg.defaultButton = button\n\n\tsucceeded = true\n\n\treturn nil\n}\n\nfunc (dlg *Dialog) CancelButton() *PushButton {\n\treturn dlg.cancelButton\n}\n\nfunc (dlg *Dialog) SetCancelButton(button *PushButton) error {\n\tif button != nil && !IsChild(dlg.hWnd, button.hWnd) {\n\t\treturn newError(\"not a descendant of the dialog\")\n\t}\n\n\tdlg.cancelButton = button\n\n\treturn nil\n}\n\nfunc (dlg *Dialog) Accept() {\n\tdlg.Close(DlgCmdOK)\n}\n\nfunc (dlg *Dialog) Cancel() {\n\tdlg.Close(DlgCmdCancel)\n}\n\nfunc (dlg *Dialog) Close(result int) {\n\tdlg.result = result\n\n\tdlg.TopLevelWindow.Close()\n}\n\nfunc firstFocusableDescendantCallback(hwnd HWND, lParam uintptr) uintptr {\n\twidget := widgetFromHWND(hwnd)\n\n\tif widget == nil || !widget.Visible() || !widget.Enabled() {\n\t\treturn 1\n\t}\n\n\tstyle := uint(GetWindowLong(hwnd, GWL_STYLE))\n\t\/\/ FIXME: Ugly workaround for NumberEdit\n\t_, isTextSelectable := widget.(textSelectable)\n\tif style&WS_TABSTOP > 0 || isTextSelectable {\n\t\thwndPtr := (*HWND)(unsafe.Pointer(lParam))\n\t\t*hwndPtr = hwnd\n\t\treturn 0\n\t}\n\n\treturn 1\n}\n\nvar firstFocusableDescendantCallbackPtr = syscall.NewCallback(firstFocusableDescendantCallback)\n\nfunc firstFocusableDescendant(container Container) Widget {\n\tvar hwnd HWND\n\n\tEnumChildWindows(container.BaseWidget().hWnd, firstFocusableDescendantCallbackPtr, uintptr(unsafe.Pointer(&hwnd)))\n\n\treturn widgetFromHWND(hwnd)\n}\n\ntype textSelectable interface {\n\tSetTextSelection(start, end int)\n}\n\nfunc (dlg *Dialog) focusFirstCandidateDescendant() {\n\twidget := firstFocusableDescendant(dlg)\n\tif widget == nil {\n\t\treturn\n\t}\n\n\tif err := widget.SetFocus(); err != nil {\n\t\treturn\n\t}\n\n\tif textSel, ok := widget.(textSelectable); ok {\n\t\ttextSel.SetTextSelection(0, -1)\n\t}\n}\n\nfunc (dlg *Dialog) Show() {\n\tdlg.TopLevelWindow.Show()\n\n\tdlg.focusFirstCandidateDescendant()\n}\n\nfunc (dlg *Dialog) Run() int {\n\tif dlg.owner != nil {\n\t\tob := dlg.owner.Bounds()\n\t\tb := dlg.Bounds()\n\t\tif dlg.centerInOwnerWhenRun {\n\t\t\tdlg.SetBounds(Rectangle{\n\t\t\t\tob.X + (ob.Width-b.Width)\/2,\n\t\t\t\tob.Y + (ob.Height-b.Height)\/2,\n\t\t\t\tb.Width,\n\t\t\t\tb.Height,\n\t\t\t})\n\t\t}\n\t}\n\n\tdlg.Show()\n\n\tif dlg.owner != nil {\n\t\tdlg.owner.SetEnabled(false)\n\t}\n\n\tdlg.TopLevelWindow.Run()\n\n\treturn dlg.result\n}\n<commit_msg>Make sure standalone dialogs start up with appropriate size.<commit_after>\/\/ Copyright 2010 The Walk Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage walk\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nimport . \"github.com\/lxn\/go-winapi\"\n\nconst (\n\tDlgCmdOK = IDOK\n\tDlgCmdCancel = IDCANCEL\n\tDlgCmdAbort = IDABORT\n\tDlgCmdRetry = IDRETRY\n\tDlgCmdIgnore = IDIGNORE\n\tDlgCmdYes = IDYES\n\tDlgCmdNo = IDNO\n\tDlgCmdClose = IDCLOSE\n\tDlgCmdHelp = IDHELP\n\tDlgCmdTryAgain = IDTRYAGAIN\n\tDlgCmdContinue = IDCONTINUE\n\tDlgCmdTimeout = IDTIMEOUT\n)\n\nconst dialogWindowClass = `\\o\/ Walk_Dialog_Class \\o\/`\n\nvar dialogWindowClassRegistered bool\n\ntype dialogish interface {\n\tDefaultButton() *PushButton\n\tCancelButton() *PushButton\n}\n\ntype Dialog struct {\n\tTopLevelWindow\n\tresult int\n\tdefaultButton *PushButton\n\tcancelButton *PushButton\n\tcenterInOwnerWhenRun bool\n}\n\nfunc NewDialog(owner RootWidget) (*Dialog, error) {\n\tensureRegisteredWindowClass(dialogWindowClass, &dialogWindowClassRegistered)\n\n\tdlg := &Dialog{\n\t\tTopLevelWindow: TopLevelWindow{\n\t\t\towner: owner,\n\t\t},\n\t}\n\n\tif err := initWidget(\n\t\tdlg,\n\t\towner,\n\t\tdialogWindowClass,\n\t\tWS_CAPTION|WS_SYSMENU|WS_THICKFRAME,\n\t\tWS_EX_DLGMODALFRAME); err != nil {\n\t\treturn nil, err\n\t}\n\n\tdlg.centerInOwnerWhenRun = owner != nil\n\n\tdlg.children = newWidgetList(dlg)\n\n\t\/\/ This forces display of focus rectangles, as soon as the user starts to type.\n\tSendMessage(dlg.hWnd, WM_CHANGEUISTATE, UIS_INITIALIZE, 0)\n\n\tdlg.result = DlgCmdClose\n\n\treturn dlg, nil\n}\n\nfunc (dlg *Dialog) DefaultButton() *PushButton {\n\treturn dlg.defaultButton\n}\n\nfunc (dlg *Dialog) SetDefaultButton(button *PushButton) error {\n\tif button != nil && !IsChild(dlg.hWnd, button.hWnd) {\n\t\treturn newError(\"not a descendant of the dialog\")\n\t}\n\n\tsucceeded := false\n\tif dlg.defaultButton != nil {\n\t\tif err := dlg.defaultButton.setAndClearStyleBits(BS_PUSHBUTTON, BS_DEFPUSHBUTTON); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer func() {\n\t\t\tif !succeeded {\n\t\t\t\tdlg.defaultButton.setAndClearStyleBits(BS_DEFPUSHBUTTON, BS_PUSHBUTTON)\n\t\t\t}\n\t\t}()\n\t}\n\n\tif button != nil {\n\t\tif err := button.setAndClearStyleBits(BS_DEFPUSHBUTTON, BS_PUSHBUTTON); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tdlg.defaultButton = button\n\n\tsucceeded = true\n\n\treturn nil\n}\n\nfunc (dlg *Dialog) CancelButton() *PushButton {\n\treturn dlg.cancelButton\n}\n\nfunc (dlg *Dialog) SetCancelButton(button *PushButton) error {\n\tif button != nil && !IsChild(dlg.hWnd, button.hWnd) {\n\t\treturn newError(\"not a descendant of the dialog\")\n\t}\n\n\tdlg.cancelButton = button\n\n\treturn nil\n}\n\nfunc (dlg *Dialog) Accept() {\n\tdlg.Close(DlgCmdOK)\n}\n\nfunc (dlg *Dialog) Cancel() {\n\tdlg.Close(DlgCmdCancel)\n}\n\nfunc (dlg *Dialog) Close(result int) {\n\tdlg.result = result\n\n\tdlg.TopLevelWindow.Close()\n}\n\nfunc firstFocusableDescendantCallback(hwnd HWND, lParam uintptr) uintptr {\n\twidget := widgetFromHWND(hwnd)\n\n\tif widget == nil || !widget.Visible() || !widget.Enabled() {\n\t\treturn 1\n\t}\n\n\tstyle := uint(GetWindowLong(hwnd, GWL_STYLE))\n\t\/\/ FIXME: Ugly workaround for NumberEdit\n\t_, isTextSelectable := widget.(textSelectable)\n\tif style&WS_TABSTOP > 0 || isTextSelectable {\n\t\thwndPtr := (*HWND)(unsafe.Pointer(lParam))\n\t\t*hwndPtr = hwnd\n\t\treturn 0\n\t}\n\n\treturn 1\n}\n\nvar firstFocusableDescendantCallbackPtr = syscall.NewCallback(firstFocusableDescendantCallback)\n\nfunc firstFocusableDescendant(container Container) Widget {\n\tvar hwnd HWND\n\n\tEnumChildWindows(container.BaseWidget().hWnd, firstFocusableDescendantCallbackPtr, uintptr(unsafe.Pointer(&hwnd)))\n\n\treturn widgetFromHWND(hwnd)\n}\n\ntype textSelectable interface {\n\tSetTextSelection(start, end int)\n}\n\nfunc (dlg *Dialog) focusFirstCandidateDescendant() {\n\twidget := firstFocusableDescendant(dlg)\n\tif widget == nil {\n\t\treturn\n\t}\n\n\tif err := widget.SetFocus(); err != nil {\n\t\treturn\n\t}\n\n\tif textSel, ok := widget.(textSelectable); ok {\n\t\ttextSel.SetTextSelection(0, -1)\n\t}\n}\n\nfunc (dlg *Dialog) Show() {\n\tdlg.TopLevelWindow.Show()\n\n\tdlg.focusFirstCandidateDescendant()\n}\n\nfunc (dlg *Dialog) Run() int {\n\tif dlg.owner != nil {\n\t\tob := dlg.owner.Bounds()\n\t\tb := dlg.Bounds()\n\t\tif dlg.centerInOwnerWhenRun {\n\t\t\tdlg.SetBounds(Rectangle{\n\t\t\t\tob.X + (ob.Width-b.Width)\/2,\n\t\t\t\tob.Y + (ob.Height-b.Height)\/2,\n\t\t\t\tb.Width,\n\t\t\t\tb.Height,\n\t\t\t})\n\t\t}\n\t} else {\n\t\tdlg.SetBounds(dlg.Bounds())\n\t}\n\n\tdlg.Show()\n\n\tif dlg.owner != nil {\n\t\tdlg.owner.SetEnabled(false)\n\t}\n\n\tdlg.TopLevelWindow.Run()\n\n\treturn dlg.result\n}\n<|endoftext|>"} {"text":"<commit_before>package auth\n\nimport (\n\t\"crypto\/subtle\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype digest_client struct {\n\tnc uint64\n\tlast_seen int64\n}\n\ntype DigestAuth struct {\n\tRealm string\n\tOpaque string\n\tSecrets SecretProvider\n\tPlainTextSecrets bool\n\tIgnoreNonceCount bool\n\t\/\/ Headers used by authenticator. Set to ProxyHeaders to use with\n\t\/\/ proxy server. When nil, NormalHeaders are used.\n\tHeaders *Headers\n\n\t\/*\n\t Approximate size of Client's Cache. When actual number of\n\t tracked client nonces exceeds\n\t ClientCacheSize+ClientCacheTolerance, ClientCacheTolerance*2\n\t older entries are purged.\n\t*\/\n\tClientCacheSize int\n\tClientCacheTolerance int\n\n\tclients map[string]*digest_client\n\tmutex sync.RWMutex\n}\n\n\/\/ check that DigestAuth implements AuthenticatorInterface\nvar _ = (AuthenticatorInterface)((*DigestAuth)(nil))\n\ntype digest_cache_entry struct {\n\tnonce string\n\tlast_seen int64\n}\n\ntype digest_cache []digest_cache_entry\n\nfunc (c digest_cache) Less(i, j int) bool {\n\treturn c[i].last_seen < c[j].last_seen\n}\n\nfunc (c digest_cache) Len() int {\n\treturn len(c)\n}\n\nfunc (c digest_cache) Swap(i, j int) {\n\tc[i], c[j] = c[j], c[i]\n}\n\n\/*\n Purge removes count oldest entries from DigestAuth.clients\n*\/\nfunc (a *DigestAuth) Purge(count int) {\n\ta.mutex.Lock()\n\tentries := make([]digest_cache_entry, 0, len(a.clients))\n\tfor nonce, client := range a.clients {\n\t\tentries = append(entries, digest_cache_entry{nonce, client.last_seen})\n\t}\n\tcache := digest_cache(entries)\n\tsort.Sort(cache)\n\tfor _, client := range cache[:count] {\n\t\tdelete(a.clients, client.nonce)\n\t}\n\ta.mutex.Unlock()\n}\n\n\/*\n http.Handler for DigestAuth which initiates the authentication process\n (or requires reauthentication).\n*\/\nfunc (a *DigestAuth) RequireAuth(w http.ResponseWriter, r *http.Request) {\n\ta.mutex.RLock()\n\tif len(a.clients) > a.ClientCacheSize+a.ClientCacheTolerance {\n\t\ta.mutex.RUnlock()\n\t\ta.Purge(a.ClientCacheTolerance * 2)\n\t} else {\n\t\ta.mutex.RUnlock()\n\t}\n\tnonce := RandomKey()\n\n\ta.mutex.Lock()\n\ta.clients[nonce] = &digest_client{nc: 0, last_seen: time.Now().UnixNano()}\n\ta.mutex.Unlock()\n\n\ta.mutex.RLock()\n\tw.Header().Set(contentType, a.Headers.V().UnauthContentType)\n\tw.Header().Set(a.Headers.V().Authenticate,\n\t\tfmt.Sprintf(`Digest realm=\"%s\", nonce=\"%s\", opaque=\"%s\", algorithm=\"MD5\", qop=\"auth\"`,\n\t\t\ta.Realm, nonce, a.Opaque))\n\tw.WriteHeader(a.Headers.V().UnauthCode)\n\tw.Write([]byte(a.Headers.V().UnauthResponse))\n\ta.mutex.RUnlock()\n}\n\n\/*\n Parse Authorization header from the http.Request. Returns a map of\n auth parameters or nil if the header is not a valid parsable Digest\n auth header.\n*\/\nfunc DigestAuthParams(authorization string) map[string]string {\n\ts := strings.SplitN(authorization, \" \", 2)\n\tif len(s) != 2 || s[0] != \"Digest\" {\n\t\treturn nil\n\t}\n\n\treturn ParsePairs(s[1])\n}\n\n\/*\n Check if request contains valid authentication data. Returns a pair\n of username, authinfo where username is the name of the authenticated\n user or an empty string and authinfo is the contents for the optional\n Authentication-Info response header.\n*\/\nfunc (da *DigestAuth) CheckAuth(r *http.Request) (username string, authinfo *string) {\n\tda.mutex.RLock()\n\tdefer da.mutex.RUnlock()\n\tusername = \"\"\n\tauthinfo = nil\n\tauth := DigestAuthParams(r.Header.Get(da.Headers.V().Authorization))\n\tif auth == nil {\n\t\treturn \"\", nil\n\t}\n\t\/\/ RFC2617 Section 3.2.1 specifies that unset value of algorithm in\n\t\/\/ WWW-Authenticate Response header should be treated as\n\t\/\/ \"MD5\". According to section 3.2.2 the \"algorithm\" value in\n\t\/\/ subsequent Request Authorization header must be set to whatever\n\t\/\/ was supplied in the WWW-Authenticate Response header. This\n\t\/\/ implementation always returns an algorithm in WWW-Authenticate\n\t\/\/ header, however there seems to be broken clients in the wild\n\t\/\/ which do not set the algorithm. Assume the unset algorithm in\n\t\/\/ Authorization header to be equal to MD5.\n\tif _, ok := auth[\"algorithm\"]; !ok {\n\t\tauth[\"algorithm\"] = \"MD5\"\n\t}\n\tif da.Opaque != auth[\"opaque\"] || auth[\"algorithm\"] != \"MD5\" || auth[\"qop\"] != \"auth\" {\n\t\treturn \"\", nil\n\t}\n\n\t\/\/ Check if the requested URI matches auth header\n\tif r.RequestURI != auth[\"uri\"] {\n\t\t\/\/ We allow auth[\"uri\"] to be a full path prefix of request-uri\n\t\t\/\/ for some reason lost in history, which is probably wrong, but\n\t\t\/\/ used to be like that for quite some time\n\t\t\/\/ (https:\/\/tools.ietf.org\/html\/rfc2617#section-3.2.2 explicitly\n\t\t\/\/ says that auth[\"uri\"] is the request-uri).\n\t\t\/\/\n\t\t\/\/ TODO: make an option to allow only strict checking.\n\t\tswitch u, err := url.Parse(auth[\"uri\"]); {\n\t\tcase err != nil:\n\t\t\treturn \"\", nil\n\t\tcase r.URL == nil:\n\t\t\treturn \"\", nil\n\t\tcase len(u.Path) > len(r.URL.Path):\n\t\t\treturn \"\", nil\n\t\tcase !strings.HasPrefix(r.URL.Path, u.Path):\n\t\t\treturn \"\", nil\n\t\t}\n\t}\n\n\tHA1 := da.Secrets(auth[\"username\"], da.Realm)\n\tif da.PlainTextSecrets {\n\t\tHA1 = H(auth[\"username\"] + \":\" + da.Realm + \":\" + HA1)\n\t}\n\tHA2 := H(r.Method + \":\" + auth[\"uri\"])\n\tKD := H(strings.Join([]string{HA1, auth[\"nonce\"], auth[\"nc\"], auth[\"cnonce\"], auth[\"qop\"], HA2}, \":\"))\n\n\tif subtle.ConstantTimeCompare([]byte(KD), []byte(auth[\"response\"])) != 1 {\n\t\treturn \"\", nil\n\t}\n\n\t\/\/ At this point crypto checks are completed and validated.\n\t\/\/ Now check if the session is valid.\n\n\tnc, err := strconv.ParseUint(auth[\"nc\"], 16, 64)\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\n\tif client, ok := da.clients[auth[\"nonce\"]]; !ok {\n\t\treturn \"\", nil\n\t} else {\n\t\tif client.nc != 0 && client.nc >= nc && !da.IgnoreNonceCount {\n\t\t\treturn \"\", nil\n\t\t}\n\t\tclient.nc = nc\n\t\tclient.last_seen = time.Now().UnixNano()\n\t}\n\n\tresp_HA2 := H(\":\" + auth[\"uri\"])\n\trspauth := H(strings.Join([]string{HA1, auth[\"nonce\"], auth[\"nc\"], auth[\"cnonce\"], auth[\"qop\"], resp_HA2}, \":\"))\n\n\tinfo := fmt.Sprintf(`qop=\"auth\", rspauth=\"%s\", cnonce=\"%s\", nc=\"%s\"`, rspauth, auth[\"cnonce\"], auth[\"nc\"])\n\treturn auth[\"username\"], &info\n}\n\n\/*\n Default values for ClientCacheSize and ClientCacheTolerance for DigestAuth\n*\/\nconst DefaultClientCacheSize = 1000\nconst DefaultClientCacheTolerance = 100\n\n\/*\n Wrap returns an Authenticator which uses HTTP Digest\n authentication. Arguments:\n\n realm: The authentication realm.\n\n secrets: SecretProvider which must return HA1 digests for the same\n realm as above.\n*\/\nfunc (a *DigestAuth) Wrap(wrapped AuthenticatedHandlerFunc) http.HandlerFunc {\n\ta.mutex.RLock()\n\tdefer a.mutex.RUnlock()\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif username, authinfo := a.CheckAuth(r); username == \"\" {\n\t\t\ta.RequireAuth(w, r)\n\t\t} else {\n\t\t\tar := &AuthenticatedRequest{Request: *r, Username: username}\n\t\t\tif authinfo != nil {\n\t\t\t\tw.Header().Set(a.Headers.V().AuthInfo, *authinfo)\n\t\t\t}\n\t\t\twrapped(w, ar)\n\t\t}\n\t}\n}\n\n\/*\n JustCheck returns function which converts an http.HandlerFunc into a\n http.HandlerFunc which requires authentication. Username is passed as\n an extra X-Authenticated-Username header.\n*\/\nfunc (a *DigestAuth) JustCheck(wrapped http.HandlerFunc) http.HandlerFunc {\n\treturn a.Wrap(func(w http.ResponseWriter, ar *AuthenticatedRequest) {\n\t\tar.Header.Set(AuthUsernameHeader, ar.Username)\n\t\twrapped(w, &ar.Request)\n\t})\n}\n\n\/\/ NewContext returns a context carrying authentication information for the request.\nfunc (a *DigestAuth) NewContext(ctx context.Context, r *http.Request) context.Context {\n\ta.mutex.Lock()\n\tusername, authinfo := a.CheckAuth(r)\n\tinfo := &Info{Username: username, ResponseHeaders: make(http.Header)}\n\tif username != \"\" {\n\t\tinfo.Authenticated = true\n\t\tinfo.ResponseHeaders.Set(a.Headers.V().AuthInfo, *authinfo)\n\t} else {\n\t\t\/\/ return back digest WWW-Authenticate header\n\t\tif len(a.clients) > a.ClientCacheSize+a.ClientCacheTolerance {\n\t\t\ta.Purge(a.ClientCacheTolerance * 2)\n\t\t}\n\t\tnonce := RandomKey()\n\t\ta.clients[nonce] = &digest_client{nc: 0, last_seen: time.Now().UnixNano()}\n\t\tinfo.ResponseHeaders.Set(a.Headers.V().Authenticate,\n\t\t\tfmt.Sprintf(`Digest realm=\"%s\", nonce=\"%s\", opaque=\"%s\", algorithm=\"MD5\", qop=\"auth\"`,\n\t\t\t\ta.Realm, nonce, a.Opaque))\n\t}\n\ta.mutex.Unlock()\n\treturn context.WithValue(ctx, infoKey, info)\n}\n\n\/\/ NewDigestAuthenticator generates a new DigestAuth object\nfunc NewDigestAuthenticator(realm string, secrets SecretProvider) *DigestAuth {\n\tda := &DigestAuth{\n\t\tOpaque: RandomKey(),\n\t\tRealm: realm,\n\t\tSecrets: secrets,\n\t\tPlainTextSecrets: false,\n\t\tClientCacheSize: DefaultClientCacheSize,\n\t\tClientCacheTolerance: DefaultClientCacheTolerance,\n\t\tclients: map[string]*digest_client{}}\n\treturn da\n}\n<commit_msg>Cleanup locking logic.<commit_after>package auth\n\nimport (\n\t\"crypto\/subtle\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype digest_client struct {\n\tnc uint64\n\tlast_seen int64\n}\n\ntype DigestAuth struct {\n\tRealm string\n\tOpaque string\n\tSecrets SecretProvider\n\tPlainTextSecrets bool\n\tIgnoreNonceCount bool\n\t\/\/ Headers used by authenticator. Set to ProxyHeaders to use with\n\t\/\/ proxy server. When nil, NormalHeaders are used.\n\tHeaders *Headers\n\n\t\/*\n\t Approximate size of Client's Cache. When actual number of\n\t tracked client nonces exceeds\n\t ClientCacheSize+ClientCacheTolerance, ClientCacheTolerance*2\n\t older entries are purged.\n\t*\/\n\tClientCacheSize int\n\tClientCacheTolerance int\n\n\tclients map[string]*digest_client\n\tmutex sync.RWMutex\n}\n\n\/\/ check that DigestAuth implements AuthenticatorInterface\nvar _ = (AuthenticatorInterface)((*DigestAuth)(nil))\n\ntype digest_cache_entry struct {\n\tnonce string\n\tlast_seen int64\n}\n\ntype digest_cache []digest_cache_entry\n\nfunc (c digest_cache) Less(i, j int) bool {\n\treturn c[i].last_seen < c[j].last_seen\n}\n\nfunc (c digest_cache) Len() int {\n\treturn len(c)\n}\n\nfunc (c digest_cache) Swap(i, j int) {\n\tc[i], c[j] = c[j], c[i]\n}\n\n\/*\n Purge removes count oldest entries from DigestAuth.clients\n*\/\nfunc (a *DigestAuth) Purge(count int) {\n\ta.mutex.Lock()\n\tdefer a.mutex.Unlock()\n\tentries := make([]digest_cache_entry, 0, len(a.clients))\n\tfor nonce, client := range a.clients {\n\t\tentries = append(entries, digest_cache_entry{nonce, client.last_seen})\n\t}\n\tcache := digest_cache(entries)\n\tsort.Sort(cache)\n\tfor _, client := range cache[:count] {\n\t\tdelete(a.clients, client.nonce)\n\t}\n}\n\n\/*\n http.Handler for DigestAuth which initiates the authentication process\n (or requires reauthentication).\n*\/\nfunc (a *DigestAuth) RequireAuth(w http.ResponseWriter, r *http.Request) {\n\ta.mutex.RLock()\n\tclientsLen := len(a.clients)\n\ta.mutex.RUnlock()\n\n\tif clientsLen > a.ClientCacheSize+a.ClientCacheTolerance {\n\t\ta.Purge(a.ClientCacheTolerance * 2)\n\t}\n\tnonce := RandomKey()\n\n\ta.mutex.Lock()\n\ta.clients[nonce] = &digest_client{nc: 0, last_seen: time.Now().UnixNano()}\n\ta.mutex.Unlock()\n\n\ta.mutex.RLock()\n\tw.Header().Set(contentType, a.Headers.V().UnauthContentType)\n\tw.Header().Set(a.Headers.V().Authenticate,\n\t\tfmt.Sprintf(`Digest realm=\"%s\", nonce=\"%s\", opaque=\"%s\", algorithm=\"MD5\", qop=\"auth\"`,\n\t\t\ta.Realm, nonce, a.Opaque))\n\tw.WriteHeader(a.Headers.V().UnauthCode)\n\tw.Write([]byte(a.Headers.V().UnauthResponse))\n\ta.mutex.RUnlock()\n}\n\n\/*\n Parse Authorization header from the http.Request. Returns a map of\n auth parameters or nil if the header is not a valid parsable Digest\n auth header.\n*\/\nfunc DigestAuthParams(authorization string) map[string]string {\n\ts := strings.SplitN(authorization, \" \", 2)\n\tif len(s) != 2 || s[0] != \"Digest\" {\n\t\treturn nil\n\t}\n\n\treturn ParsePairs(s[1])\n}\n\n\/*\n Check if request contains valid authentication data. Returns a pair\n of username, authinfo where username is the name of the authenticated\n user or an empty string and authinfo is the contents for the optional\n Authentication-Info response header.\n*\/\nfunc (da *DigestAuth) CheckAuth(r *http.Request) (username string, authinfo *string) {\n\tda.mutex.RLock()\n\tdefer da.mutex.RUnlock()\n\tusername = \"\"\n\tauthinfo = nil\n\tauth := DigestAuthParams(r.Header.Get(da.Headers.V().Authorization))\n\tif auth == nil {\n\t\treturn \"\", nil\n\t}\n\t\/\/ RFC2617 Section 3.2.1 specifies that unset value of algorithm in\n\t\/\/ WWW-Authenticate Response header should be treated as\n\t\/\/ \"MD5\". According to section 3.2.2 the \"algorithm\" value in\n\t\/\/ subsequent Request Authorization header must be set to whatever\n\t\/\/ was supplied in the WWW-Authenticate Response header. This\n\t\/\/ implementation always returns an algorithm in WWW-Authenticate\n\t\/\/ header, however there seems to be broken clients in the wild\n\t\/\/ which do not set the algorithm. Assume the unset algorithm in\n\t\/\/ Authorization header to be equal to MD5.\n\tif _, ok := auth[\"algorithm\"]; !ok {\n\t\tauth[\"algorithm\"] = \"MD5\"\n\t}\n\tif da.Opaque != auth[\"opaque\"] || auth[\"algorithm\"] != \"MD5\" || auth[\"qop\"] != \"auth\" {\n\t\treturn \"\", nil\n\t}\n\n\t\/\/ Check if the requested URI matches auth header\n\tif r.RequestURI != auth[\"uri\"] {\n\t\t\/\/ We allow auth[\"uri\"] to be a full path prefix of request-uri\n\t\t\/\/ for some reason lost in history, which is probably wrong, but\n\t\t\/\/ used to be like that for quite some time\n\t\t\/\/ (https:\/\/tools.ietf.org\/html\/rfc2617#section-3.2.2 explicitly\n\t\t\/\/ says that auth[\"uri\"] is the request-uri).\n\t\t\/\/\n\t\t\/\/ TODO: make an option to allow only strict checking.\n\t\tswitch u, err := url.Parse(auth[\"uri\"]); {\n\t\tcase err != nil:\n\t\t\treturn \"\", nil\n\t\tcase r.URL == nil:\n\t\t\treturn \"\", nil\n\t\tcase len(u.Path) > len(r.URL.Path):\n\t\t\treturn \"\", nil\n\t\tcase !strings.HasPrefix(r.URL.Path, u.Path):\n\t\t\treturn \"\", nil\n\t\t}\n\t}\n\n\tHA1 := da.Secrets(auth[\"username\"], da.Realm)\n\tif da.PlainTextSecrets {\n\t\tHA1 = H(auth[\"username\"] + \":\" + da.Realm + \":\" + HA1)\n\t}\n\tHA2 := H(r.Method + \":\" + auth[\"uri\"])\n\tKD := H(strings.Join([]string{HA1, auth[\"nonce\"], auth[\"nc\"], auth[\"cnonce\"], auth[\"qop\"], HA2}, \":\"))\n\n\tif subtle.ConstantTimeCompare([]byte(KD), []byte(auth[\"response\"])) != 1 {\n\t\treturn \"\", nil\n\t}\n\n\t\/\/ At this point crypto checks are completed and validated.\n\t\/\/ Now check if the session is valid.\n\n\tnc, err := strconv.ParseUint(auth[\"nc\"], 16, 64)\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\n\tif client, ok := da.clients[auth[\"nonce\"]]; !ok {\n\t\treturn \"\", nil\n\t} else {\n\t\tif client.nc != 0 && client.nc >= nc && !da.IgnoreNonceCount {\n\t\t\treturn \"\", nil\n\t\t}\n\t\tclient.nc = nc\n\t\tclient.last_seen = time.Now().UnixNano()\n\t}\n\n\tresp_HA2 := H(\":\" + auth[\"uri\"])\n\trspauth := H(strings.Join([]string{HA1, auth[\"nonce\"], auth[\"nc\"], auth[\"cnonce\"], auth[\"qop\"], resp_HA2}, \":\"))\n\n\tinfo := fmt.Sprintf(`qop=\"auth\", rspauth=\"%s\", cnonce=\"%s\", nc=\"%s\"`, rspauth, auth[\"cnonce\"], auth[\"nc\"])\n\treturn auth[\"username\"], &info\n}\n\n\/*\n Default values for ClientCacheSize and ClientCacheTolerance for DigestAuth\n*\/\nconst DefaultClientCacheSize = 1000\nconst DefaultClientCacheTolerance = 100\n\n\/*\n Wrap returns an Authenticator which uses HTTP Digest\n authentication. Arguments:\n\n realm: The authentication realm.\n\n secrets: SecretProvider which must return HA1 digests for the same\n realm as above.\n*\/\nfunc (a *DigestAuth) Wrap(wrapped AuthenticatedHandlerFunc) http.HandlerFunc {\n\ta.mutex.RLock()\n\tdefer a.mutex.RUnlock()\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif username, authinfo := a.CheckAuth(r); username == \"\" {\n\t\t\ta.RequireAuth(w, r)\n\t\t} else {\n\t\t\tar := &AuthenticatedRequest{Request: *r, Username: username}\n\t\t\tif authinfo != nil {\n\t\t\t\tw.Header().Set(a.Headers.V().AuthInfo, *authinfo)\n\t\t\t}\n\t\t\twrapped(w, ar)\n\t\t}\n\t}\n}\n\n\/*\n JustCheck returns function which converts an http.HandlerFunc into a\n http.HandlerFunc which requires authentication. Username is passed as\n an extra X-Authenticated-Username header.\n*\/\nfunc (a *DigestAuth) JustCheck(wrapped http.HandlerFunc) http.HandlerFunc {\n\treturn a.Wrap(func(w http.ResponseWriter, ar *AuthenticatedRequest) {\n\t\tar.Header.Set(AuthUsernameHeader, ar.Username)\n\t\twrapped(w, &ar.Request)\n\t})\n}\n\n\/\/ NewContext returns a context carrying authentication information for the request.\nfunc (a *DigestAuth) NewContext(ctx context.Context, r *http.Request) context.Context {\n\ta.mutex.Lock()\n\tdefer a.mutex.Unlock()\n\tusername, authinfo := a.CheckAuth(r)\n\tinfo := &Info{Username: username, ResponseHeaders: make(http.Header)}\n\tif username != \"\" {\n\t\tinfo.Authenticated = true\n\t\tinfo.ResponseHeaders.Set(a.Headers.V().AuthInfo, *authinfo)\n\t} else {\n\t\t\/\/ return back digest WWW-Authenticate header\n\t\tif len(a.clients) > a.ClientCacheSize+a.ClientCacheTolerance {\n\t\t\ta.Purge(a.ClientCacheTolerance * 2)\n\t\t}\n\t\tnonce := RandomKey()\n\t\ta.clients[nonce] = &digest_client{nc: 0, last_seen: time.Now().UnixNano()}\n\t\tinfo.ResponseHeaders.Set(a.Headers.V().Authenticate,\n\t\t\tfmt.Sprintf(`Digest realm=\"%s\", nonce=\"%s\", opaque=\"%s\", algorithm=\"MD5\", qop=\"auth\"`,\n\t\t\t\ta.Realm, nonce, a.Opaque))\n\t}\n\treturn context.WithValue(ctx, infoKey, info)\n}\n\n\/\/ NewDigestAuthenticator generates a new DigestAuth object\nfunc NewDigestAuthenticator(realm string, secrets SecretProvider) *DigestAuth {\n\tda := &DigestAuth{\n\t\tOpaque: RandomKey(),\n\t\tRealm: realm,\n\t\tSecrets: secrets,\n\t\tPlainTextSecrets: false,\n\t\tClientCacheSize: DefaultClientCacheSize,\n\t\tClientCacheTolerance: DefaultClientCacheTolerance,\n\t\tclients: map[string]*digest_client{}}\n\treturn da\n}\n<|endoftext|>"} {"text":"<commit_before>package bootstrap\n\nimport (\n\t\"bytes\"\n\t\"io\"\n)\n\ntype Redactor struct {\n\treplacement []byte\n\n\t\/\/ Current offset from the start of the next input segment\n\toffset int\n\n\t\/\/ Minimum and maximum length of redactable string\n\tminlen int\n\tmaxlen int\n\n\t\/\/ Table of Boyer-Moore skip distances, and values to redact matching this end byte\n\ttable [255]struct {\n\t\tskip int\n\t\tneedles [][]byte\n\t}\n\n\t\/\/ Internal buffer for building redacted input into\n\t\/\/ Also holds the final portion of the previous Write call, in case of\n\t\/\/ sensitive values that cross Write boundaries\n\toutbuf []byte\n\n\t\/\/ Wrapped Writer that we'll send redacted output to\n\toutput io.Writer\n}\n\n\/\/ Construct a new Redactor, and pre-compile the Boyer-Moore skip table\nfunc NewRedactor(output io.Writer, replacement string, needles []string) *Redactor {\n\tminNeedleLen := 0\n\tmaxNeedleLen := 0\n\tfor _, needle := range needles {\n\t\tif len(needle) < minNeedleLen || minNeedleLen == 0 {\n\t\t\tminNeedleLen = len(needle)\n\t\t}\n\t\tif len(needle) > maxNeedleLen {\n\t\t\tmaxNeedleLen = len(needle)\n\t\t}\n\t}\n\n\tredactor := &Redactor{\n\t\treplacement: []byte(replacement),\n\t\toutput: output,\n\n\t\t\/\/ Linux pipes can buffer up to 65536 bytes before flushing, so there's\n\t\t\/\/ a reasonable chance that's how much we'll get in a single Write().\n\t\t\/\/ maxNeedleLen is added since we may retain that many bytes to handle\n\t\t\/\/ matches crossing Write boundaries.\n\t\t\/\/ It's a reasonable starting capacity which hopefully means we don't\n\t\t\/\/ have to reallocate the array, but append() will grow it if necessary\n\t\toutbuf: make([]byte, 0, 65536+maxNeedleLen),\n\n\t\t\/\/ Since Boyer-Moore looks for the end of substrings, we can safely offset\n\t\t\/\/ processing by the length of the shortest string we're checking for\n\t\t\/\/ Since Boyer-Moore looks for the end of substrings, only bytes further\n\t\t\/\/ behind the iterator than the longest search string are guaranteed to not\n\t\t\/\/ be part of a match\n\t\tminlen: minNeedleLen,\n\t\tmaxlen: maxNeedleLen,\n\t\toffset: minNeedleLen - 1,\n\t}\n\n\t\/\/ For bytes that don't appear in any of the substrings we're searching\n\t\/\/ for, it's safe to skip forward the length of the shortest search\n\t\/\/ string.\n\t\/\/ Start by setting this as a default for all bytes\n\tfor i := range redactor.table {\n\t\tredactor.table[i].skip = minNeedleLen\n\t}\n\n\tfor _, needle := range needles {\n\t\tfor i, ch := range needle {\n\t\t\t\/\/ For bytes that do exist in search strings, find the shortest distance\n\t\t\t\/\/ between that byte appearing to the end of the same search string\n\t\t\tskip := len(needle) - i - 1\n\t\t\tif skip < redactor.table[ch].skip {\n\t\t\t\tredactor.table[ch].skip = skip\n\t\t\t}\n\n\t\t\t\/\/ Build a cache of which search substrings end in which bytes\n\t\t\tif skip == 0 {\n\t\t\t\tredactor.table[ch].needles = append(redactor.table[ch].needles, []byte(needle))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn redactor\n}\n\nfunc (redactor *Redactor) Write(input []byte) (int, error) {\n\t\/\/ Current iterator index, which may be a safe offset from 0\n\tcursor := redactor.offset\n\n\t\/\/ Current index which is guaranteed to be completely redacted\n\t\/\/ May lag behind cursor by up to the length of the longest search string\n\tdoneTo := 0\n\n\tfor cursor < len(input) {\n\t\tch := input[cursor]\n\t\tskip := redactor.table[ch].skip\n\n\t\t\/\/ If the skip table tells us that there is no search string ending in\n\t\t\/\/ the current byte, skip forward by the indicated distance.\n\t\tif skip != 0 {\n\t\t\tcursor += skip\n\n\t\t\t\/\/ Also copy any content behind the cursor which is guaranteed not\n\t\t\t\/\/ to fall under a match\n\t\t\tconfirmedTo := cursor - redactor.maxlen - 1\n\t\t\tif confirmedTo > len(input) {\n\t\t\t\tconfirmedTo = len(input)\n\t\t\t}\n\t\t\tif confirmedTo > doneTo {\n\t\t\t\tredactor.outbuf = append(redactor.outbuf, input[doneTo:confirmedTo]...)\n\t\t\t\tdoneTo = confirmedTo\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ We'll check for matching search strings here, but we'll still need\n\t\t\/\/ to move the cursor forward\n\t\t\/\/ Since Go slice syntax is not inclusive of the end index, moving it\n\t\t\/\/ forward now reduces the need to use `cursor-1` everywhere\n\t\tcursor++\n\t\tfor _, needle := range redactor.table[ch].needles {\n\t\t\t\/\/ Since we're working backwards from what may be the end of a\n\t\t\t\/\/ string, it's possible that the start would be out of bounds\n\t\t\tstartSubstr := cursor - len(needle)\n\t\t\tvar candidate []byte\n\n\t\t\tif startSubstr >= 0 {\n\t\t\t\t\/\/ If the candidate string falls entirely within input, then just slice into input\n\t\t\t\tcandidate = input[startSubstr:cursor]\n\t\t\t} else if -startSubstr < len(redactor.outbuf) {\n\t\t\t\t\/\/ If the candidate crosses the Write boundary, we need to\n\t\t\t\t\/\/ concatenate the two sections to compare against\n\t\t\t\tcandidate = make([]byte, 0, len(needle))\n\t\t\t\tcandidate = append(candidate, redactor.outbuf[-startSubstr:]...)\n\t\t\t\tcandidate = append(candidate, input[:cursor]...)\n\t\t\t} else {\n\t\t\t\t\/\/ Final case is that the start index is out of bounds, and\n\t\t\t\t\/\/ it's impossible for it to match. Just move on to the next\n\t\t\t\t\/\/ search substring\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif bytes.Equal(needle, candidate) {\n\t\t\t\tif startSubstr < 0 {\n\t\t\t\t\t\/\/ If we accepted a negative startSubstr, the output buffer\n\t\t\t\t\t\/\/ needs to be truncated to remove the partial match\n\t\t\t\t\tredactor.outbuf = redactor.outbuf[:len(redactor.outbuf)+startSubstr]\n\t\t\t\t} else if startSubstr > doneTo {\n\t\t\t\t\t\/\/ First, copy over anything behind the matched substring unmodified\n\t\t\t\t\tredactor.outbuf = append(redactor.outbuf, input[doneTo:startSubstr]...)\n\t\t\t\t}\n\t\t\t\t\/\/ Then, write a fixed string into the output, and move doneTo past the redaction\n\t\t\t\tredactor.outbuf = append(redactor.outbuf, redactor.replacement...)\n\t\t\t\tdoneTo = cursor\n\n\t\t\t\t\/\/ The next end-of-string will be at least this far away so\n\t\t\t\t\/\/ it's safe to skip forward a bit\n\t\t\t\tcursor += redactor.minlen - 1\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Push the output buffer down\n\t_, err := redactor.output.Write(redactor.outbuf)\n\n\t\/\/ There will probably be a segment at the end of the input which may be a\n\t\/\/ partial match crossing the Write boundary. This is retained in the\n\t\/\/ output buffer to compare against on the next call\n\t\/\/ Flush() needs to be called after the final Write(), or this bit won't\n\t\/\/ get written\n\tredactor.outbuf = append(redactor.outbuf[:0], input[doneTo:]...)\n\n\t\/\/ We can offset the next Write processing by how far cursor is ahead of\n\t\/\/ the end of this input segment\n\tredactor.offset = cursor - len(input)\n\n\treturn len(input), err\n}\n\n\/\/ Flush should be called after the final Write. This will Write() anything\n\/\/ retained in case of a partial match and reset the output buffer.\nfunc (redactor Redactor) Sync() error {\n\t_, err := redactor.output.Write(redactor.outbuf)\n\tredactor.outbuf = redactor.outbuf[:0]\n\treturn err\n}\n<commit_msg>Keep line endings out of the buffer<commit_after>package bootstrap\n\nimport (\n\t\"bytes\"\n\t\"io\"\n)\n\ntype Redactor struct {\n\treplacement []byte\n\n\t\/\/ Current offset from the start of the next input segment\n\toffset int\n\n\t\/\/ Minimum and maximum length of redactable string\n\tminlen int\n\tmaxlen int\n\n\t\/\/ Table of Boyer-Moore skip distances, and values to redact matching this end byte\n\ttable [255]struct {\n\t\tskip int\n\t\tneedles [][]byte\n\t}\n\n\t\/\/ Internal buffer for building redacted input into\n\t\/\/ Also holds the final portion of the previous Write call, in case of\n\t\/\/ sensitive values that cross Write boundaries\n\toutbuf []byte\n\n\t\/\/ Wrapped Writer that we'll send redacted output to\n\toutput io.Writer\n}\n\n\/\/ Construct a new Redactor, and pre-compile the Boyer-Moore skip table\nfunc NewRedactor(output io.Writer, replacement string, needles []string) *Redactor {\n\tminNeedleLen := 0\n\tmaxNeedleLen := 0\n\tfor _, needle := range needles {\n\t\tif len(needle) < minNeedleLen || minNeedleLen == 0 {\n\t\t\tminNeedleLen = len(needle)\n\t\t}\n\t\tif len(needle) > maxNeedleLen {\n\t\t\tmaxNeedleLen = len(needle)\n\t\t}\n\t}\n\n\tredactor := &Redactor{\n\t\treplacement: []byte(replacement),\n\t\toutput: output,\n\n\t\t\/\/ Linux pipes can buffer up to 65536 bytes before flushing, so there's\n\t\t\/\/ a reasonable chance that's how much we'll get in a single Write().\n\t\t\/\/ maxNeedleLen is added since we may retain that many bytes to handle\n\t\t\/\/ matches crossing Write boundaries.\n\t\t\/\/ It's a reasonable starting capacity which hopefully means we don't\n\t\t\/\/ have to reallocate the array, but append() will grow it if necessary\n\t\toutbuf: make([]byte, 0, 65536+maxNeedleLen),\n\n\t\t\/\/ Since Boyer-Moore looks for the end of substrings, we can safely offset\n\t\t\/\/ processing by the length of the shortest string we're checking for\n\t\t\/\/ Since Boyer-Moore looks for the end of substrings, only bytes further\n\t\t\/\/ behind the iterator than the longest search string are guaranteed to not\n\t\t\/\/ be part of a match\n\t\tminlen: minNeedleLen,\n\t\tmaxlen: maxNeedleLen,\n\t\toffset: minNeedleLen - 1,\n\t}\n\n\t\/\/ For bytes that don't appear in any of the substrings we're searching\n\t\/\/ for, it's safe to skip forward the length of the shortest search\n\t\/\/ string.\n\t\/\/ Start by setting this as a default for all bytes\n\tfor i := range redactor.table {\n\t\tredactor.table[i].skip = minNeedleLen\n\t}\n\n\tfor _, needle := range needles {\n\t\tfor i, ch := range needle {\n\t\t\t\/\/ For bytes that do exist in search strings, find the shortest distance\n\t\t\t\/\/ between that byte appearing to the end of the same search string\n\t\t\tskip := len(needle) - i - 1\n\t\t\tif skip < redactor.table[ch].skip {\n\t\t\t\tredactor.table[ch].skip = skip\n\t\t\t}\n\n\t\t\t\/\/ Build a cache of which search substrings end in which bytes\n\t\t\tif skip == 0 {\n\t\t\t\tredactor.table[ch].needles = append(redactor.table[ch].needles, []byte(needle))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn redactor\n}\n\nfunc (redactor *Redactor) Write(input []byte) (int, error) {\n\t\/\/ Current iterator index, which may be a safe offset from 0\n\tcursor := redactor.offset\n\n\t\/\/ Current index which is guaranteed to be completely redacted\n\t\/\/ May lag behind cursor by up to the length of the longest search string\n\tdoneTo := 0\n\n\tfor cursor < len(input) {\n\t\tch := input[cursor]\n\t\tskip := redactor.table[ch].skip\n\n\t\t\/\/ If the skip table tells us that there is no search string ending in\n\t\t\/\/ the current byte, skip forward by the indicated distance.\n\t\tif skip != 0 {\n\t\t\tcursor += skip\n\n\t\t\t\/\/ Also copy any content behind the cursor which is guaranteed not\n\t\t\t\/\/ to fall under a match\n\t\t\tconfirmedTo := cursor - redactor.maxlen - 1\n\t\t\tif confirmedTo > len(input) {\n\t\t\t\tconfirmedTo = len(input)\n\t\t\t}\n\t\t\tif confirmedTo > doneTo {\n\t\t\t\tredactor.outbuf = append(redactor.outbuf, input[doneTo:confirmedTo]...)\n\t\t\t\tdoneTo = confirmedTo\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ We'll check for matching search strings here, but we'll still need\n\t\t\/\/ to move the cursor forward\n\t\t\/\/ Since Go slice syntax is not inclusive of the end index, moving it\n\t\t\/\/ forward now reduces the need to use `cursor-1` everywhere\n\t\tcursor++\n\t\tfor _, needle := range redactor.table[ch].needles {\n\t\t\t\/\/ Since we're working backwards from what may be the end of a\n\t\t\t\/\/ string, it's possible that the start would be out of bounds\n\t\t\tstartSubstr := cursor - len(needle)\n\t\t\tvar candidate []byte\n\n\t\t\tif startSubstr >= 0 {\n\t\t\t\t\/\/ If the candidate string falls entirely within input, then just slice into input\n\t\t\t\tcandidate = input[startSubstr:cursor]\n\t\t\t} else if -startSubstr < len(redactor.outbuf) {\n\t\t\t\t\/\/ If the candidate crosses the Write boundary, we need to\n\t\t\t\t\/\/ concatenate the two sections to compare against\n\t\t\t\tcandidate = make([]byte, 0, len(needle))\n\t\t\t\tcandidate = append(candidate, redactor.outbuf[-startSubstr:]...)\n\t\t\t\tcandidate = append(candidate, input[:cursor]...)\n\t\t\t} else {\n\t\t\t\t\/\/ Final case is that the start index is out of bounds, and\n\t\t\t\t\/\/ it's impossible for it to match. Just move on to the next\n\t\t\t\t\/\/ search substring\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif bytes.Equal(needle, candidate) {\n\t\t\t\tif startSubstr < 0 {\n\t\t\t\t\t\/\/ If we accepted a negative startSubstr, the output buffer\n\t\t\t\t\t\/\/ needs to be truncated to remove the partial match\n\t\t\t\t\tredactor.outbuf = redactor.outbuf[:len(redactor.outbuf)+startSubstr]\n\t\t\t\t} else if startSubstr > doneTo {\n\t\t\t\t\t\/\/ First, copy over anything behind the matched substring unmodified\n\t\t\t\t\tredactor.outbuf = append(redactor.outbuf, input[doneTo:startSubstr]...)\n\t\t\t\t}\n\t\t\t\t\/\/ Then, write a fixed string into the output, and move doneTo past the redaction\n\t\t\t\tredactor.outbuf = append(redactor.outbuf, redactor.replacement...)\n\t\t\t\tdoneTo = cursor\n\n\t\t\t\t\/\/ The next end-of-string will be at least this far away so\n\t\t\t\t\/\/ it's safe to skip forward a bit\n\t\t\t\tcursor += redactor.minlen - 1\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ We buffer the end of the input in order to catch passwords that fall over Write boundaries.\n\t\/\/ In the case of line-buffered input, that means we would hold back the\n\t\/\/ end of the line in a user-visible way. For this reason, we push through\n\t\/\/ any line endings immediately rather than hold them back.\n\t\/\/ Technically this means that passwords containing newlines aren't\n\t\/\/ guarateed to get redacted, but who does that anyway?\n\tfor i := doneTo; i < len(input); i++ {\n\t\tif input[i] == byte('\\n') {\n\t\t\tredactor.outbuf = append(redactor.outbuf, input[doneTo:i+1]...)\n\t\t\tdoneTo = i+1\n\t\t}\n\t}\n\n\t\/\/ Push the output buffer down\n\t_, err := redactor.output.Write(redactor.outbuf)\n\n\t\/\/ There will probably be a segment at the end of the input which may be a\n\t\/\/ partial match crossing the Write boundary. This is retained in the\n\t\/\/ output buffer to compare against on the next call\n\t\/\/ Flush() needs to be called after the final Write(), or this bit won't\n\t\/\/ get written\n\tredactor.outbuf = append(redactor.outbuf[:0], input[doneTo:]...)\n\n\t\/\/ We can offset the next Write processing by how far cursor is ahead of\n\t\/\/ the end of this input segment\n\tredactor.offset = cursor - len(input)\n\n\treturn len(input), err\n}\n\n\/\/ Flush should be called after the final Write. This will Write() anything\n\/\/ retained in case of a partial match and reset the output buffer.\nfunc (redactor Redactor) Sync() error {\n\t_, err := redactor.output.Write(redactor.outbuf)\n\tredactor.outbuf = redactor.outbuf[:0]\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package fetcher\n\nimport (\n\t\"fmt\"\n)\n\nconst community = \"https:\/\/git.alpinelinux.org\/cgit\/alpine-secdb\/plain\/v%s\/community.yaml\"\nconst main = \"https:\/\/git.alpinelinux.org\/cgit\/alpine-secdb\/plain\/v%s\/main.yaml\"\n\nfunc newAlpineFetchRequests(target []string) (reqs []fetchRequest) {\n\tfor _, v := range target {\n\t\treqs = append(reqs, fetchRequest{\n\t\t\ttarget: v,\n\t\t\turl: fmt.Sprintf(community, v),\n\t\t}, fetchRequest{\n\t\t\ttarget: v,\n\t\t\turl: fmt.Sprintf(main, v),\n\t\t})\n\t}\n\treturn\n}\n\n\/\/ FetchAlpineFiles fetch from alpine secdb\n\/\/ https:\/\/git.alpinelinux.org\/cgit\/alpine-secdb\/tree\/\nfunc FetchAlpineFiles(versions []string) ([]FetchResult, error) {\n\treqs := newAlpineFetchRequests(versions)\n\tif len(reqs) == 0 {\n\t\treturn nil,\n\t\t\tfmt.Errorf(\"There are no versions to fetch\")\n\t}\n\tresults, err := fetchFeedFiles(reqs)\n\tif err != nil {\n\t\treturn nil,\n\t\t\tfmt.Errorf(\"Failed to fetch. err: %s\", err)\n\t}\n\treturn results, nil\n}\n<commit_msg>fix(fetcher): fix URL of alpine sec-db<commit_after>package fetcher\n\nimport (\n\t\"fmt\"\n)\n\nconst community = \"https:\/\/raw.githubusercontent.com\/alpinelinux\/alpine-secdb\/master\/v%s\/community.yaml\"\nconst main = \"https:\/\/raw.githubusercontent.com\/alpinelinux\/alpine-secdb\/master\/v%s\/main.yaml\"\n\nfunc newAlpineFetchRequests(target []string) (reqs []fetchRequest) {\n\tfor _, v := range target {\n\t\treqs = append(reqs, fetchRequest{\n\t\t\ttarget: v,\n\t\t\turl: fmt.Sprintf(community, v),\n\t\t}, fetchRequest{\n\t\t\ttarget: v,\n\t\t\turl: fmt.Sprintf(main, v),\n\t\t})\n\t}\n\treturn\n}\n\n\/\/ FetchAlpineFiles fetch from alpine secdb\n\/\/ https:\/\/git.alpinelinux.org\/cgit\/alpine-secdb\/tree\/\nfunc FetchAlpineFiles(versions []string) ([]FetchResult, error) {\n\treqs := newAlpineFetchRequests(versions)\n\tif len(reqs) == 0 {\n\t\treturn nil,\n\t\t\tfmt.Errorf(\"There are no versions to fetch\")\n\t}\n\tresults, err := fetchFeedFiles(reqs)\n\tif err != nil {\n\t\treturn nil,\n\t\t\tfmt.Errorf(\"Failed to fetch. err: %s\", err)\n\t}\n\treturn results, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n)\n\ntype NinjaGenerator struct {\n\tf *os.File\n\tnodes []*DepNode\n\tvars Vars\n\tex *Executor\n\truleId int\n\tdone map[string]bool\n}\n\nfunc NewNinjaGenerator(g *DepGraph) *NinjaGenerator {\n\tf, err := os.Create(\"build.ninja\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &NinjaGenerator{\n\t\tf: f,\n\t\tnodes: g.nodes,\n\t\tvars: g.vars,\n\t\tdone: make(map[string]bool),\n\t}\n}\n\nfunc stripShellComment(s string) string {\n\tif strings.IndexByte(s, '#') < 0 {\n\t\t\/\/ Fast path.\n\t\treturn s\n\t}\n\tvar escape bool\n\tvar quote rune\n\tfor i, c := range s {\n\t\tif quote > 0 {\n\t\t\tif quote == c && (quote == '\\'' || !escape) {\n\t\t\t\tquote = 0\n\t\t\t}\n\t\t} else if !escape {\n\t\t\tif c == '#' {\n\t\t\t\treturn s[:i]\n\t\t\t} else if c == '\\'' || c == '\"' || c == '`' {\n\t\t\t\tquote = c\n\t\t\t}\n\t\t}\n\t\tif escape {\n\t\t\tescape = false\n\t\t} else if c == '\\\\' {\n\t\t\tescape = true\n\t\t} else {\n\t\t\tescape = false\n\t\t}\n\t}\n\treturn s\n}\n\nfunc genShellScript(runners []runner) string {\n\tvar buf bytes.Buffer\n\tfor i, r := range runners {\n\t\tif i > 0 {\n\t\t\tif runners[i-1].ignoreError {\n\t\t\t\tbuf.WriteString(\" ; \")\n\t\t\t} else {\n\t\t\t\tbuf.WriteString(\" && \")\n\t\t\t}\n\t\t}\n\t\tcmd := stripShellComment(r.cmd)\n\t\tcmd = trimLeftSpace(cmd)\n\t\tcmd = strings.Replace(cmd, \"\\\\\\n\", \" \", -1)\n\t\tcmd = strings.TrimRight(cmd, \" \\t\\n;\")\n\t\tcmd = strings.Replace(cmd, \"$\", \"$$\", -1)\n\t\tcmd = strings.Replace(cmd, \"\\t\", \" \", -1)\n\t\tbuf.WriteString(cmd)\n\t\tif i == len(runners)-1 && r.ignoreError {\n\t\t\tbuf.WriteString(\" ; true\")\n\t\t}\n\t}\n\treturn buf.String()\n}\n\nfunc (n *NinjaGenerator) genRuleName() string {\n\truleName := fmt.Sprintf(\"rule%d\", n.ruleId)\n\tn.ruleId++\n\treturn ruleName\n}\n\nfunc (n *NinjaGenerator) emitBuild(output, rule, dep string) {\n\tfmt.Fprintf(n.f, \"build %s: %s\", output, rule)\n\tif dep != \"\" {\n\t\tfmt.Fprintf(n.f, \" %s\", dep)\n\t}\n\tfmt.Fprintf(n.f, \"\\n\")\n}\n\nfunc getDepString(node *DepNode) string {\n\tvar deps []string\n\tvar orderOnlys []string\n\tfor _, d := range node.Deps {\n\t\tif d.IsOrderOnly {\n\t\t\torderOnlys = append(orderOnlys, d.Output)\n\t\t} else {\n\t\t\tdeps = append(deps, d.Output)\n\t\t}\n\t}\n\tdep := \"\"\n\tif len(deps) > 0 {\n\t\tdep += fmt.Sprintf(\" %s\", strings.Join(deps, \" \"))\n\t}\n\tif len(orderOnlys) > 0 {\n\t\tdep += fmt.Sprintf(\" || %s\", strings.Join(orderOnlys, \" \"))\n\t}\n\treturn dep\n}\n\nfunc genIntermediateTargetName(o string, i int) string {\n\treturn fmt.Sprintf(\".make_targets\/%s@%d\", o, i)\n}\n\nfunc (n *NinjaGenerator) emitNode(node *DepNode) {\n\tif n.done[node.Output] {\n\t\treturn\n\t}\n\tn.done[node.Output] = true\n\n\tif len(node.Cmds) == 0 && len(node.Deps) == 0 && !node.IsPhony {\n\t\treturn\n\t}\n\n\trunners, _ := n.ex.createRunners(node, true)\n\truleName := \"phony\"\n\tif len(runners) > 0 {\n\t\truleName = n.genRuleName()\n\t\tfmt.Fprintf(n.f, \"rule %s\\n\", ruleName)\n\t\tfmt.Fprintf(n.f, \" description = build $out\\n\")\n\n\t\tss := genShellScript(runners)\n\t\t\/\/ It seems Linux is OK with ~130kB.\n\t\t\/\/ TODO: Find this number automatically.\n\t\tArgLenLimit := 100 * 1000\n\t\tif len(ss) > ArgLenLimit {\n\t\t\tfmt.Fprintf(n.f, \" rspfile = $out.rsp\\n\")\n\t\t\tfmt.Fprintf(n.f, \" rspfile_content = %s\\n\", ss)\n\t\t\tss = \"sh $out.rsp\"\n\t\t}\n\t\tfmt.Fprintf(n.f, \" command = %s\\n\", ss)\n\n\t}\n\tn.emitBuild(node.Output, ruleName, getDepString(node))\n\n\tfor _, d := range node.Deps {\n\t\tn.emitNode(d)\n\t}\n}\n\nfunc (n *NinjaGenerator) run() {\n\tn.ex = NewExecutor(n.vars)\n\tfor _, node := range n.nodes {\n\t\tn.emitNode(node)\n\t}\n\tn.f.Close()\n}\n\nfunc GenerateNinja(g *DepGraph) {\n\tNewNinjaGenerator(g).run()\n}\n<commit_msg>Use \"true\" instead of an empty command<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n)\n\ntype NinjaGenerator struct {\n\tf *os.File\n\tnodes []*DepNode\n\tvars Vars\n\tex *Executor\n\truleId int\n\tdone map[string]bool\n}\n\nfunc NewNinjaGenerator(g *DepGraph) *NinjaGenerator {\n\tf, err := os.Create(\"build.ninja\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &NinjaGenerator{\n\t\tf: f,\n\t\tnodes: g.nodes,\n\t\tvars: g.vars,\n\t\tdone: make(map[string]bool),\n\t}\n}\n\nfunc stripShellComment(s string) string {\n\tif strings.IndexByte(s, '#') < 0 {\n\t\t\/\/ Fast path.\n\t\treturn s\n\t}\n\tvar escape bool\n\tvar quote rune\n\tfor i, c := range s {\n\t\tif quote > 0 {\n\t\t\tif quote == c && (quote == '\\'' || !escape) {\n\t\t\t\tquote = 0\n\t\t\t}\n\t\t} else if !escape {\n\t\t\tif c == '#' {\n\t\t\t\treturn s[:i]\n\t\t\t} else if c == '\\'' || c == '\"' || c == '`' {\n\t\t\t\tquote = c\n\t\t\t}\n\t\t}\n\t\tif escape {\n\t\t\tescape = false\n\t\t} else if c == '\\\\' {\n\t\t\tescape = true\n\t\t} else {\n\t\t\tescape = false\n\t\t}\n\t}\n\treturn s\n}\n\nfunc genShellScript(runners []runner) string {\n\tvar buf bytes.Buffer\n\tfor i, r := range runners {\n\t\tif i > 0 {\n\t\t\tif runners[i-1].ignoreError {\n\t\t\t\tbuf.WriteString(\" ; \")\n\t\t\t} else {\n\t\t\t\tbuf.WriteString(\" && \")\n\t\t\t}\n\t\t}\n\t\tcmd := stripShellComment(r.cmd)\n\t\tcmd = trimLeftSpace(cmd)\n\t\tcmd = strings.Replace(cmd, \"\\\\\\n\", \" \", -1)\n\t\tcmd = strings.TrimRight(cmd, \" \\t\\n;\")\n\t\tcmd = strings.Replace(cmd, \"$\", \"$$\", -1)\n\t\tcmd = strings.Replace(cmd, \"\\t\", \" \", -1)\n\t\tif cmd == \"\" {\n\t\t\tcmd = \"true\"\n\t\t}\n\t\tbuf.WriteString(cmd)\n\t\tif i == len(runners)-1 && r.ignoreError {\n\t\t\tbuf.WriteString(\" ; true\")\n\t\t}\n\t}\n\treturn buf.String()\n}\n\nfunc (n *NinjaGenerator) genRuleName() string {\n\truleName := fmt.Sprintf(\"rule%d\", n.ruleId)\n\tn.ruleId++\n\treturn ruleName\n}\n\nfunc (n *NinjaGenerator) emitBuild(output, rule, dep string) {\n\tfmt.Fprintf(n.f, \"build %s: %s\", output, rule)\n\tif dep != \"\" {\n\t\tfmt.Fprintf(n.f, \" %s\", dep)\n\t}\n\tfmt.Fprintf(n.f, \"\\n\")\n}\n\nfunc getDepString(node *DepNode) string {\n\tvar deps []string\n\tvar orderOnlys []string\n\tfor _, d := range node.Deps {\n\t\tif d.IsOrderOnly {\n\t\t\torderOnlys = append(orderOnlys, d.Output)\n\t\t} else {\n\t\t\tdeps = append(deps, d.Output)\n\t\t}\n\t}\n\tdep := \"\"\n\tif len(deps) > 0 {\n\t\tdep += fmt.Sprintf(\" %s\", strings.Join(deps, \" \"))\n\t}\n\tif len(orderOnlys) > 0 {\n\t\tdep += fmt.Sprintf(\" || %s\", strings.Join(orderOnlys, \" \"))\n\t}\n\treturn dep\n}\n\nfunc genIntermediateTargetName(o string, i int) string {\n\treturn fmt.Sprintf(\".make_targets\/%s@%d\", o, i)\n}\n\nfunc (n *NinjaGenerator) emitNode(node *DepNode) {\n\tif n.done[node.Output] {\n\t\treturn\n\t}\n\tn.done[node.Output] = true\n\n\tif len(node.Cmds) == 0 && len(node.Deps) == 0 && !node.IsPhony {\n\t\treturn\n\t}\n\n\trunners, _ := n.ex.createRunners(node, true)\n\truleName := \"phony\"\n\tif len(runners) > 0 {\n\t\truleName = n.genRuleName()\n\t\tfmt.Fprintf(n.f, \"rule %s\\n\", ruleName)\n\t\tfmt.Fprintf(n.f, \" description = build $out\\n\")\n\n\t\tss := genShellScript(runners)\n\t\t\/\/ It seems Linux is OK with ~130kB.\n\t\t\/\/ TODO: Find this number automatically.\n\t\tArgLenLimit := 100 * 1000\n\t\tif len(ss) > ArgLenLimit {\n\t\t\tfmt.Fprintf(n.f, \" rspfile = $out.rsp\\n\")\n\t\t\tfmt.Fprintf(n.f, \" rspfile_content = %s\\n\", ss)\n\t\t\tss = \"sh $out.rsp\"\n\t\t}\n\t\tfmt.Fprintf(n.f, \" command = %s\\n\", ss)\n\n\t}\n\tn.emitBuild(node.Output, ruleName, getDepString(node))\n\n\tfor _, d := range node.Deps {\n\t\tn.emitNode(d)\n\t}\n}\n\nfunc (n *NinjaGenerator) run() {\n\tn.ex = NewExecutor(n.vars)\n\tfor _, node := range n.nodes {\n\t\tn.emitNode(node)\n\t}\n\tn.f.Close()\n}\n\nfunc GenerateNinja(g *DepGraph) {\n\tNewNinjaGenerator(g).run()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"github.com\/russross\/blackfriday\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype FileRepository struct {\n\tpostDirectory string\n}\n\nfunc (f *FileRepository) FetchAllTags() ([]string, error) {\n\tposts, err := f.FetchAllPosts()\n\n\t\/\/ We're using a map to simulate a set\n\ttagMap := make(map[string]bool)\n\n\tfor i := range posts {\n\t\tfor j := range posts[i].Tags() {\n\t\t\ttagMap[posts[i].Tags()[j]] = true\n\t\t}\n\t}\n\n\ttags := []string{}\n\n\tfor key := range tagMap {\n\t\ttags = append(tags, key)\n\t}\n\n\tsort.Strings(tags)\n\n\treturn tags, err\n}\n\nfunc (f *FileRepository) FetchPostWithUrl(url string) (*BlogPost, error) {\n\tposts, err := f.FetchAllPosts()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor i := range posts {\n\t\tif posts[i].Url() == url {\n\t\t\treturn posts[i], err\n\t\t}\n\t}\n\n\terr = errors.New(\"Could not find post\")\n\n\treturn nil, err\n}\n\nfunc (f *FileRepository) FetchPostsWithTag(tag string) ([]*BlogPost, error) {\n\tposts, err := f.FetchAllPosts()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfilteredPosts := []*BlogPost{}\n\n\tfor i := range posts {\n\t\tif posts[i].ContainsTag(tag) {\n\t\t\tfilteredPosts = append(filteredPosts, posts[i])\n\t\t}\n\t}\n\n\treturn filteredPosts, err\n}\n\nfunc (f *FileRepository) PostDirectory() string {\n\treturn f.postDirectory\n}\n\nfunc (f *FileRepository) SetPostDirectory(s string) {\n\tf.postDirectory = s\n}\n\nfunc (f *FileRepository) FetchPostsInRange(start, count int) (BlogPosts, error) {\n\tposts, err := f.FetchAllPosts()\n\n\tif start + count > len(posts) {\n\t\tcount = len(posts) - start\n\t}\n\n\tlog.Println(start)\n\tlog.Println(count)\n\tlog.Println(len(posts))\n\n\treturn posts[start:start + count + 1], err\n}\n\nfunc (f *FileRepository) FetchAllPosts() (BlogPosts, error) {\n\n\tdirname := f.postDirectory + string(filepath.Separator)\n\n\tfiles, err := ioutil.ReadDir(dirname)\n\n\tposts := BlogPosts{}\n\n\tfor i := range files {\n\n\t\tif files[i].IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif filepath.Ext(files[i].Name()) != \".md\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tpost, err := f.FetchPost(files[i].Name())\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tposts = append(posts, post)\n\t}\n\n\tsort.Sort(posts)\n\n\treturn posts, err\n}\n\nfunc (f *FileRepository) FetchPost(filename string) (*BlogPost, error) {\n\n\tpost := new(BlogPost)\n\n\tdirname := f.postDirectory + string(filepath.Separator)\n\n\tfile, err := ioutil.ReadFile(dirname + filename)\n\n\tif err != nil {\n\t\treturn post, err\n\t}\n\n\tfile = []byte(f.extractHeader(string(file), post))\n\n\thtmlFlags := blackfriday.HTML_USE_SMARTYPANTS\n\textensions := blackfriday.EXTENSION_HARD_LINE_BREAK | blackfriday.EXTENSION_FENCED_CODE | blackfriday.EXTENSION_NO_INTRA_EMPHASIS\n\n\trenderer := blackfriday.HtmlRenderer(htmlFlags, post.Title(), \"\")\n\n\toutput := blackfriday.Markdown(file, renderer, extensions)\n\n\tpost.SetBody(string(output))\n\n\treturn post, nil\n}\n\nfunc (f *FileRepository) extractHeader(text string, post *BlogPost) string {\n\n\tlines := strings.Split(text, \"\\n\")\n\n\theaderSize := 0\n\n\tfor i := range lines {\n\t\tif strings.Contains(lines[i], \":\") {\n\t\t\tcomponents := strings.Split(lines[i], \":\")\n\n\t\t\theader := strings.ToLower(strings.Trim(components[0], \" \"))\n\t\t\tseparatorIndex := strings.Index(lines[i], \":\") + 1\n\t\t\tdata := strings.Trim(lines[i][separatorIndex:], \" \")\n\n\t\t\tswitch header {\n\t\t\tcase \"title\":\n\t\t\t\tpost.SetTitle(data)\n\t\t\tcase \"tags\":\n\n\t\t\t\ttags := strings.Split(data, \",\")\n\n\t\t\t\tformattedTags := []string{}\n\n\t\t\t\tfor j := range tags {\n\t\t\t\t\ttags[j] = strings.Trim(tags[j], \" \")\n\t\t\t\t\ttags[j] = strings.Replace(tags[j], \" \", \"-\", -1)\n\n\t\t\t\t\tif tags[j] != \"\" {\n\t\t\t\t\t\tformattedTags = append(formattedTags, tags[j])\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tpost.SetTags(formattedTags)\n\t\t\tcase \"date\":\n\t\t\t\tpost.SetPublishDate(stringToTime(data))\n\t\t\tdefault:\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\theaderSize += len(lines[i]) + 1\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn text[headerSize:]\n}\n\nfunc stringToTime(s string) time.Time {\n\n\tyear, err := strconv.Atoi(s[:4])\n\n\tif err != nil {\n\t\treturn time.Unix(0, 0)\n\t}\n\n\tmonth, err := strconv.Atoi(s[5:7])\n\n\tif err != nil {\n\t\treturn time.Unix(0, 0)\n\t}\n\n\tday, err := strconv.Atoi(s[8:10])\n\n\tif err != nil {\n\t\treturn time.Unix(0, 0)\n\t}\n\n\thour, err := strconv.Atoi(s[11:13])\n\n\tif err != nil {\n\t\treturn time.Unix(0, 0)\n\t}\n\n\tminute, err := strconv.Atoi(s[14:16])\n\n\tif err != nil {\n\t\treturn time.Unix(0, 0)\n\t}\n\n\tseconds, err := strconv.Atoi(s[17:19])\n\n\tif err != nil {\n\t\treturn time.Unix(0, 0)\n\t}\n\n\tlocation, err := time.LoadLocation(\"UTC\")\n\n\treturn time.Date(year, time.Month(month), day, hour, minute, seconds, 0, location)\n}\n<commit_msg>Another fix for the ranged post retriever.<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"github.com\/russross\/blackfriday\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\t\"log\"\n)\n\ntype FileRepository struct {\n\tpostDirectory string\n}\n\nfunc (f *FileRepository) FetchAllTags() ([]string, error) {\n\tposts, err := f.FetchAllPosts()\n\n\t\/\/ We're using a map to simulate a set\n\ttagMap := make(map[string]bool)\n\n\tfor i := range posts {\n\t\tfor j := range posts[i].Tags() {\n\t\t\ttagMap[posts[i].Tags()[j]] = true\n\t\t}\n\t}\n\n\ttags := []string{}\n\n\tfor key := range tagMap {\n\t\ttags = append(tags, key)\n\t}\n\n\tsort.Strings(tags)\n\n\treturn tags, err\n}\n\nfunc (f *FileRepository) FetchPostWithUrl(url string) (*BlogPost, error) {\n\tposts, err := f.FetchAllPosts()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor i := range posts {\n\t\tif posts[i].Url() == url {\n\t\t\treturn posts[i], err\n\t\t}\n\t}\n\n\terr = errors.New(\"Could not find post\")\n\n\treturn nil, err\n}\n\nfunc (f *FileRepository) FetchPostsWithTag(tag string) ([]*BlogPost, error) {\n\tposts, err := f.FetchAllPosts()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfilteredPosts := []*BlogPost{}\n\n\tfor i := range posts {\n\t\tif posts[i].ContainsTag(tag) {\n\t\t\tfilteredPosts = append(filteredPosts, posts[i])\n\t\t}\n\t}\n\n\treturn filteredPosts, err\n}\n\nfunc (f *FileRepository) PostDirectory() string {\n\treturn f.postDirectory\n}\n\nfunc (f *FileRepository) SetPostDirectory(s string) {\n\tf.postDirectory = s\n}\n\nfunc (f *FileRepository) FetchPostsInRange(start, count int) (BlogPosts, error) {\n\tposts, err := f.FetchAllPosts()\n\n\tif start + count > len(posts) {\n\t\tcount = len(posts) - start\n\t}\n\n\tlog.Println(start)\n\tlog.Println(count)\n\tlog.Println(len(posts))\n\n\treturn posts[start:start + count + 1], err\n}\n\nfunc (f *FileRepository) FetchAllPosts() (BlogPosts, error) {\n\n\tdirname := f.postDirectory + string(filepath.Separator)\n\n\tfiles, err := ioutil.ReadDir(dirname)\n\n\tposts := BlogPosts{}\n\n\tfor i := range files {\n\n\t\tif files[i].IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif filepath.Ext(files[i].Name()) != \".md\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tpost, err := f.FetchPost(files[i].Name())\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tposts = append(posts, post)\n\t}\n\n\tsort.Sort(posts)\n\n\treturn posts, err\n}\n\nfunc (f *FileRepository) FetchPost(filename string) (*BlogPost, error) {\n\n\tpost := new(BlogPost)\n\n\tdirname := f.postDirectory + string(filepath.Separator)\n\n\tfile, err := ioutil.ReadFile(dirname + filename)\n\n\tif err != nil {\n\t\treturn post, err\n\t}\n\n\tfile = []byte(f.extractHeader(string(file), post))\n\n\thtmlFlags := blackfriday.HTML_USE_SMARTYPANTS\n\textensions := blackfriday.EXTENSION_HARD_LINE_BREAK | blackfriday.EXTENSION_FENCED_CODE | blackfriday.EXTENSION_NO_INTRA_EMPHASIS\n\n\trenderer := blackfriday.HtmlRenderer(htmlFlags, post.Title(), \"\")\n\n\toutput := blackfriday.Markdown(file, renderer, extensions)\n\n\tpost.SetBody(string(output))\n\n\treturn post, nil\n}\n\nfunc (f *FileRepository) extractHeader(text string, post *BlogPost) string {\n\n\tlines := strings.Split(text, \"\\n\")\n\n\theaderSize := 0\n\n\tfor i := range lines {\n\t\tif strings.Contains(lines[i], \":\") {\n\t\t\tcomponents := strings.Split(lines[i], \":\")\n\n\t\t\theader := strings.ToLower(strings.Trim(components[0], \" \"))\n\t\t\tseparatorIndex := strings.Index(lines[i], \":\") + 1\n\t\t\tdata := strings.Trim(lines[i][separatorIndex:], \" \")\n\n\t\t\tswitch header {\n\t\t\tcase \"title\":\n\t\t\t\tpost.SetTitle(data)\n\t\t\tcase \"tags\":\n\n\t\t\t\ttags := strings.Split(data, \",\")\n\n\t\t\t\tformattedTags := []string{}\n\n\t\t\t\tfor j := range tags {\n\t\t\t\t\ttags[j] = strings.Trim(tags[j], \" \")\n\t\t\t\t\ttags[j] = strings.Replace(tags[j], \" \", \"-\", -1)\n\n\t\t\t\t\tif tags[j] != \"\" {\n\t\t\t\t\t\tformattedTags = append(formattedTags, tags[j])\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tpost.SetTags(formattedTags)\n\t\t\tcase \"date\":\n\t\t\t\tpost.SetPublishDate(stringToTime(data))\n\t\t\tdefault:\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\theaderSize += len(lines[i]) + 1\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn text[headerSize:]\n}\n\nfunc stringToTime(s string) time.Time {\n\n\tyear, err := strconv.Atoi(s[:4])\n\n\tif err != nil {\n\t\treturn time.Unix(0, 0)\n\t}\n\n\tmonth, err := strconv.Atoi(s[5:7])\n\n\tif err != nil {\n\t\treturn time.Unix(0, 0)\n\t}\n\n\tday, err := strconv.Atoi(s[8:10])\n\n\tif err != nil {\n\t\treturn time.Unix(0, 0)\n\t}\n\n\thour, err := strconv.Atoi(s[11:13])\n\n\tif err != nil {\n\t\treturn time.Unix(0, 0)\n\t}\n\n\tminute, err := strconv.Atoi(s[14:16])\n\n\tif err != nil {\n\t\treturn time.Unix(0, 0)\n\t}\n\n\tseconds, err := strconv.Atoi(s[17:19])\n\n\tif err != nil {\n\t\treturn time.Unix(0, 0)\n\t}\n\n\tlocation, err := time.LoadLocation(\"UTC\")\n\n\treturn time.Date(year, time.Month(month), day, hour, minute, seconds, 0, location)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\nconst (\n VERSION = \"1.0-alpha\"\n BUILD_DATE = \"2016-04-15 09:25\"\n)\n<commit_msg>Updated version<commit_after>package main\nconst (\n VERSION = \"1.0-alpha\"\n BUILD_DATE = \"2016-04-22 12:05\"\n)\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage drive\n\nimport (\n\t\"fmt\"\n\n\tdrive \"github.com\/odeke-em\/google-api-go-client\/drive\/v2\"\n\t\"github.com\/odeke-em\/log\"\n)\n\nconst Version = \"0.1.8\"\n\nconst (\n\tBarely = iota\n\tAlmostExceeded\n\tHalfwayExceeded\n\tExceeded\n\tUnknown\n)\n\nconst (\n\tAboutNone = 1 << iota\n\tAboutQuota\n\tAboutFileSizes\n\tAboutFeatures\n)\n\nfunc (g *Commands) About(mask int) (err error) {\n\tdefer PrintVersion()\n\tif mask == AboutNone {\n\t\treturn nil\n\t}\n\n\tabout, err := g.rem.About()\n\tif err != nil {\n\t\treturn err\n\t}\n\tprintSummary(g.log, about, mask)\n\n\treturn nil\n}\n\nfunc quotaRequested(mask int) bool {\n\treturn (mask & AboutQuota) != 0\n}\n\nfunc fileSizesRequested(mask int) bool {\n\treturn (mask & AboutFileSizes) != 0\n}\n\nfunc featuresRequested(mask int) bool {\n\treturn (mask & AboutFeatures) != 0\n}\n\nfunc printSummary(logy *log.Logger, about *drive.About, mask int) {\n\tif quotaRequested(mask) {\n\t\tquotaInformation(logy, about)\n\t}\n\tif fileSizesRequested(mask) {\n\t\tfileSizesInfo(logy, about)\n\t}\n\n\tif featuresRequested(mask) {\n\t\tfeaturesInformation(logy, about)\n\t}\n}\n\nfunc fileSizesInfo(logy *log.Logger, about *drive.About) {\n\tif len(about.MaxUploadSizes) >= 1 {\n\t\tlogy.Logln(\"\\n* Maximum upload sizes per file type *\")\n\t\tlogy.Logf(\"%-50s %-20s\\n\", \"FileType\", \"Size\")\n\t\tfor _, uploadInfo := range about.MaxUploadSizes {\n\t\t\tlogy.Logf(\"%-50s %-20s\\n\", uploadInfo.Type, prettyBytes(uploadInfo.Size))\n\t\t}\n\t\tlogy.Logln()\n\t}\n\treturn\n}\n\nfunc featuresInformation(logy *log.Logger, about *drive.About) {\n\tif len(about.Features) >= 1 {\n\t\tlogy.Logf(\"%-30s %-30s\\n\", \"Feature\", \"Request limit (queries\/second)\")\n\t\tfor _, feature := range about.Features {\n\t\t\tif feature.FeatureName == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlogy.Logf(\"%-30s %-30f\\n\", feature.FeatureName, feature.FeatureRate)\n\t\t}\n\t\tlogy.Logln()\n\t}\n}\n\nfunc quotaInformation(logy *log.Logger, about *drive.About) {\n\tfreeBytes := about.QuotaBytesTotal - about.QuotaBytesUsed\n\n\tlogy.Logf(\n\t\t\"Name: %s\\nAccount type:\\t%s\\nBytes Used:\\t%-20d (%s)\\n\"+\n\t\t\t\"Bytes Free:\\t%-20d (%s)\\nBytes InTrash:\\t%-20d (%s)\\n\"+\n\t\t\t\"Total Bytes:\\t%-20d (%s)\\n\",\n\t\tabout.Name, about.QuotaType,\n\t\tabout.QuotaBytesUsed, prettyBytes(about.QuotaBytesUsed),\n\t\tfreeBytes, prettyBytes(freeBytes),\n\t\tabout.QuotaBytesUsedInTrash, prettyBytes(about.QuotaBytesUsedInTrash),\n\t\tabout.QuotaBytesTotal, prettyBytes(about.QuotaBytesTotal))\n\n\tif len(about.QuotaBytesByService) >= 1 {\n\t\tlogy.Logln(\"\\n* Space used by Google Services *\")\n\t\tlogy.Logf(\"%-36s %-36s\\n\", \"Service\", \"Bytes\")\n\t\tfor _, quotaService := range about.QuotaBytesByService {\n\t\t\tlogy.Logf(\"%-36s %-36s\\n\", quotaService.ServiceName, prettyBytes(quotaService.BytesUsed))\n\t\t}\n\t\tlogy.Logf(\"%-36s %-36s\\n\", \"Space used by all Google Apps\",\n\t\t\tprettyBytes(about.QuotaBytesUsedAggregate))\n\t}\n\tlogy.Logln()\n}\n\nfunc (g *Commands) QuotaStatus(query int64) (status int, err error) {\n\tif query < 0 {\n\t\treturn Unknown, err\n\t}\n\n\tabout, err := g.rem.About()\n\tif err != nil {\n\t\treturn Unknown, err\n\t}\n\n\t\/\/ Sanity check\n\tif about.QuotaBytesTotal < 1 {\n\t\treturn Unknown, fmt.Errorf(\"QuotaBytesTotal < 1\")\n\t}\n\n\ttoBeUsed := query + about.QuotaBytesUsed\n\tif toBeUsed >= about.QuotaBytesTotal {\n\t\treturn Exceeded, nil\n\t}\n\n\tpercentage := float64(toBeUsed) \/ float64(about.QuotaBytesTotal)\n\tif percentage < 0.5 {\n\t\treturn Barely, nil\n\t}\n\tif percentage < 0.8 {\n\t\treturn HalfwayExceeded, nil\n\t}\n\treturn AlmostExceeded, nil\n}\n<commit_msg>version bump<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage drive\n\nimport (\n\t\"fmt\"\n\n\tdrive \"github.com\/odeke-em\/google-api-go-client\/drive\/v2\"\n\t\"github.com\/odeke-em\/log\"\n)\n\nconst Version = \"0.1.9\"\n\nconst (\n\tBarely = iota\n\tAlmostExceeded\n\tHalfwayExceeded\n\tExceeded\n\tUnknown\n)\n\nconst (\n\tAboutNone = 1 << iota\n\tAboutQuota\n\tAboutFileSizes\n\tAboutFeatures\n)\n\nfunc (g *Commands) About(mask int) (err error) {\n\tdefer PrintVersion()\n\tif mask == AboutNone {\n\t\treturn nil\n\t}\n\n\tabout, err := g.rem.About()\n\tif err != nil {\n\t\treturn err\n\t}\n\tprintSummary(g.log, about, mask)\n\n\treturn nil\n}\n\nfunc quotaRequested(mask int) bool {\n\treturn (mask & AboutQuota) != 0\n}\n\nfunc fileSizesRequested(mask int) bool {\n\treturn (mask & AboutFileSizes) != 0\n}\n\nfunc featuresRequested(mask int) bool {\n\treturn (mask & AboutFeatures) != 0\n}\n\nfunc printSummary(logy *log.Logger, about *drive.About, mask int) {\n\tif quotaRequested(mask) {\n\t\tquotaInformation(logy, about)\n\t}\n\tif fileSizesRequested(mask) {\n\t\tfileSizesInfo(logy, about)\n\t}\n\n\tif featuresRequested(mask) {\n\t\tfeaturesInformation(logy, about)\n\t}\n}\n\nfunc fileSizesInfo(logy *log.Logger, about *drive.About) {\n\tif len(about.MaxUploadSizes) >= 1 {\n\t\tlogy.Logln(\"\\n* Maximum upload sizes per file type *\")\n\t\tlogy.Logf(\"%-50s %-20s\\n\", \"FileType\", \"Size\")\n\t\tfor _, uploadInfo := range about.MaxUploadSizes {\n\t\t\tlogy.Logf(\"%-50s %-20s\\n\", uploadInfo.Type, prettyBytes(uploadInfo.Size))\n\t\t}\n\t\tlogy.Logln()\n\t}\n\treturn\n}\n\nfunc featuresInformation(logy *log.Logger, about *drive.About) {\n\tif len(about.Features) >= 1 {\n\t\tlogy.Logf(\"%-30s %-30s\\n\", \"Feature\", \"Request limit (queries\/second)\")\n\t\tfor _, feature := range about.Features {\n\t\t\tif feature.FeatureName == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlogy.Logf(\"%-30s %-30f\\n\", feature.FeatureName, feature.FeatureRate)\n\t\t}\n\t\tlogy.Logln()\n\t}\n}\n\nfunc quotaInformation(logy *log.Logger, about *drive.About) {\n\tfreeBytes := about.QuotaBytesTotal - about.QuotaBytesUsed\n\n\tlogy.Logf(\n\t\t\"Name: %s\\nAccount type:\\t%s\\nBytes Used:\\t%-20d (%s)\\n\"+\n\t\t\t\"Bytes Free:\\t%-20d (%s)\\nBytes InTrash:\\t%-20d (%s)\\n\"+\n\t\t\t\"Total Bytes:\\t%-20d (%s)\\n\",\n\t\tabout.Name, about.QuotaType,\n\t\tabout.QuotaBytesUsed, prettyBytes(about.QuotaBytesUsed),\n\t\tfreeBytes, prettyBytes(freeBytes),\n\t\tabout.QuotaBytesUsedInTrash, prettyBytes(about.QuotaBytesUsedInTrash),\n\t\tabout.QuotaBytesTotal, prettyBytes(about.QuotaBytesTotal))\n\n\tif len(about.QuotaBytesByService) >= 1 {\n\t\tlogy.Logln(\"\\n* Space used by Google Services *\")\n\t\tlogy.Logf(\"%-36s %-36s\\n\", \"Service\", \"Bytes\")\n\t\tfor _, quotaService := range about.QuotaBytesByService {\n\t\t\tlogy.Logf(\"%-36s %-36s\\n\", quotaService.ServiceName, prettyBytes(quotaService.BytesUsed))\n\t\t}\n\t\tlogy.Logf(\"%-36s %-36s\\n\", \"Space used by all Google Apps\",\n\t\t\tprettyBytes(about.QuotaBytesUsedAggregate))\n\t}\n\tlogy.Logln()\n}\n\nfunc (g *Commands) QuotaStatus(query int64) (status int, err error) {\n\tif query < 0 {\n\t\treturn Unknown, err\n\t}\n\n\tabout, err := g.rem.About()\n\tif err != nil {\n\t\treturn Unknown, err\n\t}\n\n\t\/\/ Sanity check\n\tif about.QuotaBytesTotal < 1 {\n\t\treturn Unknown, fmt.Errorf(\"QuotaBytesTotal < 1\")\n\t}\n\n\ttoBeUsed := query + about.QuotaBytesUsed\n\tif toBeUsed >= about.QuotaBytesTotal {\n\t\treturn Exceeded, nil\n\t}\n\n\tpercentage := float64(toBeUsed) \/ float64(about.QuotaBytesTotal)\n\tif percentage < 0.5 {\n\t\treturn Barely, nil\n\t}\n\tif percentage < 0.8 {\n\t\treturn HalfwayExceeded, nil\n\t}\n\treturn AlmostExceeded, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"admincall\"\n\t\"basic\/ssdb\/gossdb\"\n\t\"basic\/utils\"\n\t\"data\"\n\n\t\"flag\"\n\t\"net\/http\"\n\t\"operation\"\n\t\"role\"\n\t\"statistics\"\n\t\"user\"\n\n\t_ \"csv\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/labstack\/echo\/middleware\"\n\n\t\"errors\"\n)\n\nfunc loginMiddleware(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tcookie, err := c.Cookie(\"login\")\n\t\tif err != nil || cookie == nil || len(cookie.Value) <= 0 || data.Sessions.Get(cookie.Value) == nil {\n\t\t\tif c.Request().Method == \"GET\" {\n\t\t\t\tc.Request().Header.Add(\"Cache-Control\", \"no-cache\")\n\t\t\t\treturn c.Redirect(http.StatusTemporaryRedirect, \"\/login\/login.html\")\n\t\t\t} else if c.Request().Method == \"POST\" {\n\t\t\t\tc.JSON(http.StatusOK, data.H{\"status\": \"fail\", \"errorcode\": 100, \"msg\": \"未登陆\"})\n\t\t\t\treturn errors.New(\"未登陆\")\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\treturn next(c)\n\t}\n}\n\nfunc main() {\n\tvar config string\n\tflag.StringVar(&config, \"conf\", \".\/conf.json\", \"config path\")\n\tflag.Parse()\n\tdata.LoadConf(config)\n\n\te := echo.New()\n\te.Use(middleware.Recover())\n\te.Use(middleware.Gzip())\n\t\/\/\te.Use(middleware.CSRF())\n\te.Use(middleware.Secure())\n\te.Use(middleware.BodyLimit(\"1M\"))\n\n\te.Static(\"\/assets\", \"AmazeUI\/assets\")\n\n\te.Static(\"\/users\", \"AmazeUI\/users\", loginMiddleware)\n\te.Static(\"\/operation\", \"AmazeUI\/operation\", loginMiddleware)\n\te.Static(\"\/roles\", \"AmazeUI\/roles\", loginMiddleware)\n\te.Static(\"\/room\", \"AmazeUI\/room\", loginMiddleware)\n\te.Static(\"\/statistics\", \"AmazeUI\/statistics\", loginMiddleware)\n\te.Static(\"\/tools\", \"AmazeUI\/tools\", loginMiddleware)\n\n\te.Static(\"\/login\", \"AmazeUI\/login\")\n\n\te.POST(\"\/users\/login\", user.Login)\n\te.POST(\"\/users\/logout\", user.Logout)\n\n\te.GET(\"\/\", func(c echo.Context) error {\n\t\treturn c.Redirect(http.StatusTemporaryRedirect, \"\/statistics\/index.html?v=\"+data.Conf.Version)\n\t})\n\n\te.POST(\"\/roles\/list\", role.List, loginMiddleware)\n\te.POST(\"\/roles\/search\", role.Search, loginMiddleware)\n\te.POST(\"\/roles\/edit\", role.Edit, loginMiddleware)\n\n\te.POST(\"\/users\/create\", user.Create, loginMiddleware)\n\te.POST(\"\/users\/edit\", user.Edit, loginMiddleware)\n\te.POST(\"\/users\/list\", user.List, loginMiddleware)\n\te.POST(\"\/users\/delete\", user.Delete, loginMiddleware)\n\te.POST(\"\/users\/getdetail\", user.GetSelfDetail, loginMiddleware)\n\te.POST(\"\/users\/record\", user.Record, loginMiddleware)\n\n\te.POST(\"\/users\/getloginlimit\", user.GetLoginLimit, loginMiddleware)\n\te.POST(\"\/users\/delloginlimit\", user.DelLoginLimit, loginMiddleware)\n\n\te.POST(\"\/group\/create\", user.CreateGroup, loginMiddleware)\n\te.POST(\"\/group\/edit\", user.EditGroup, loginMiddleware)\n\te.POST(\"\/group\/list\", user.Groups, loginMiddleware)\n\te.POST(\"\/group\/delete\", user.DeleteGroup, loginMiddleware)\n\n\te.POST(\"\/roles\/listonline\", role.ListOnline, loginMiddleware) \/\/ 在线玩家列表\n\n\te.POST(\"\/operation\/privaterecord\", operation.PrivateRecord, loginMiddleware) \/\/ 私人局牌局记录\n\te.POST(\"\/operation\/matchrecord\", operation.MatchRecord, loginMiddleware) \/\/ 比赛场牌局记录\n\te.POST(\"\/operation\/normalrecord\", operation.NormalRecord, loginMiddleware) \/\/ 金币场牌局记录\n\te.POST(\"\/operation\/cardrecord\", operation.CardRecode, loginMiddleware) \/\/ 金币场牌打牌记录\n\te.POST(\"\/operation\/issueprops\", operation.IssueProps, loginMiddleware) \/\/\n\te.POST(\"\/operation\/postbox\", operation.Postbox, loginMiddleware) \/\/\n\te.POST(\"\/operation\/issuelist\", operation.IssuePropsList, loginMiddleware) \/\/\n\te.POST(\"\/operation\/loginrecord\", operation.LoginRecord, loginMiddleware) \/\/\n\te.POST(\"\/operation\/roomcreaterecord\", operation.RoomCreateRecord, loginMiddleware) \/\/ 私人房创建记录\n\n\te.POST(\"\/operation\/charge\", operation.GetChargeOrder, loginMiddleware) \/\/ 下单记录\n\te.POST(\"\/operation\/transition\", operation.GetTransition, loginMiddleware) \/\/ 交易记录\n\n\te.POST(\"\/statistics\/online\", statistics.Online, loginMiddleware)\n\te.POST(\"\/statistics\/newuser\", statistics.NewUser, loginMiddleware)\n\te.POST(\"\/statistics\/index\", statistics.Index, loginMiddleware)\n\n\te.POST(\"\/tools\/getnotice\", operation.GetNotice, loginMiddleware)\n\te.POST(\"\/tools\/issuenotice\", operation.AddNotice, loginMiddleware)\n\tconndb()\n\tglog.Infoln(utils.TimestampToday())\n\t\/\/for i := 0; i < 1000; i++ {\n\t\/\/\tgo func() {\n\t\/\/\t\tlastID, err := gossdb.C().Get(data.KEY_LAST_USER_ID)\n\t\/\/\t\tglog.Errorln(err, lastID, data.KEY_LAST_USER_ID)\n\t\/\/\t\tif err != nil {\n\t\/\/\t\t\tglog.Errorln(err, lastID, data.KEY_LAST_USER_ID)\n\t\/\/\t\t}\n\t\/\/\t}()\n\t\/\/}\n\n\tdata.InitAdmin()\n\tdata.LoadLimitIPs()\n\tadmincall.Init(data.Conf.CallServer)\n\te.Start(data.Conf.Port)\n}\n\n\/\/ 链接数据库\nfunc conndb() {\n\tglog.Infoln(\"Config: \", data.Conf)\n\tgossdb.Connect(data.Conf.Db.Ip, data.Conf.Db.Port, data.Conf.Db.Thread)\n\tdefer glog.Flush()\n}\n<commit_msg>解决SSDB网络缓存设置太小导致获取数据不全的问题<commit_after>package main\n\nimport (\n\t\"admincall\"\n\t\"basic\/ssdb\/gossdb\"\n\t\"data\"\n\n\t\"flag\"\n\t\"net\/http\"\n\t\"operation\"\n\t\"role\"\n\t\"statistics\"\n\t\"user\"\n\n\t_ \"csv\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/labstack\/echo\/middleware\"\n\n\t\"errors\"\n)\n\nfunc loginMiddleware(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tcookie, err := c.Cookie(\"login\")\n\t\tif err != nil || cookie == nil || len(cookie.Value) <= 0 || data.Sessions.Get(cookie.Value) == nil {\n\t\t\tif c.Request().Method == \"GET\" {\n\t\t\t\tc.Request().Header.Add(\"Cache-Control\", \"no-cache\")\n\t\t\t\treturn c.Redirect(http.StatusTemporaryRedirect, \"\/login\/login.html\")\n\t\t\t} else if c.Request().Method == \"POST\" {\n\t\t\t\tc.JSON(http.StatusOK, data.H{\"status\": \"fail\", \"errorcode\": 100, \"msg\": \"未登陆\"})\n\t\t\t\treturn errors.New(\"未登陆\")\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\treturn next(c)\n\t}\n}\n\nfunc main() {\n\tvar config string\n\tflag.StringVar(&config, \"conf\", \".\/conf.json\", \"config path\")\n\tflag.Parse()\n\tdata.LoadConf(config)\n\n\te := echo.New()\n\te.Use(middleware.Recover())\n\te.Use(middleware.Gzip())\n\t\/\/\te.Use(middleware.CSRF())\n\te.Use(middleware.Secure())\n\te.Use(middleware.BodyLimit(\"1M\"))\n\n\te.Static(\"\/assets\", \"AmazeUI\/assets\")\n\n\te.Static(\"\/users\", \"AmazeUI\/users\", loginMiddleware)\n\te.Static(\"\/operation\", \"AmazeUI\/operation\", loginMiddleware)\n\te.Static(\"\/roles\", \"AmazeUI\/roles\", loginMiddleware)\n\te.Static(\"\/room\", \"AmazeUI\/room\", loginMiddleware)\n\te.Static(\"\/statistics\", \"AmazeUI\/statistics\", loginMiddleware)\n\te.Static(\"\/tools\", \"AmazeUI\/tools\", loginMiddleware)\n\n\te.Static(\"\/login\", \"AmazeUI\/login\")\n\n\te.POST(\"\/users\/login\", user.Login)\n\te.POST(\"\/users\/logout\", user.Logout)\n\n\te.GET(\"\/\", func(c echo.Context) error {\n\t\treturn c.Redirect(http.StatusTemporaryRedirect, \"\/statistics\/index.html?v=\"+data.Conf.Version)\n\t})\n\n\te.POST(\"\/roles\/list\", role.List, loginMiddleware)\n\te.POST(\"\/roles\/search\", role.Search, loginMiddleware)\n\te.POST(\"\/roles\/edit\", role.Edit, loginMiddleware)\n\n\te.POST(\"\/users\/create\", user.Create, loginMiddleware)\n\te.POST(\"\/users\/edit\", user.Edit, loginMiddleware)\n\te.POST(\"\/users\/list\", user.List, loginMiddleware)\n\te.POST(\"\/users\/delete\", user.Delete, loginMiddleware)\n\te.POST(\"\/users\/getdetail\", user.GetSelfDetail, loginMiddleware)\n\te.POST(\"\/users\/record\", user.Record, loginMiddleware)\n\n\te.POST(\"\/users\/getloginlimit\", user.GetLoginLimit, loginMiddleware)\n\te.POST(\"\/users\/delloginlimit\", user.DelLoginLimit, loginMiddleware)\n\n\te.POST(\"\/group\/create\", user.CreateGroup, loginMiddleware)\n\te.POST(\"\/group\/edit\", user.EditGroup, loginMiddleware)\n\te.POST(\"\/group\/list\", user.Groups, loginMiddleware)\n\te.POST(\"\/group\/delete\", user.DeleteGroup, loginMiddleware)\n\n\te.POST(\"\/roles\/listonline\", role.ListOnline, loginMiddleware) \/\/ 在线玩家列表\n\n\te.POST(\"\/operation\/privaterecord\", operation.PrivateRecord, loginMiddleware) \/\/ 私人局牌局记录\n\te.POST(\"\/operation\/matchrecord\", operation.MatchRecord, loginMiddleware) \/\/ 比赛场牌局记录\n\te.POST(\"\/operation\/normalrecord\", operation.NormalRecord, loginMiddleware) \/\/ 金币场牌局记录\n\te.POST(\"\/operation\/cardrecord\", operation.CardRecode, loginMiddleware) \/\/ 金币场牌打牌记录\n\te.POST(\"\/operation\/issueprops\", operation.IssueProps, loginMiddleware) \/\/\n\te.POST(\"\/operation\/postbox\", operation.Postbox, loginMiddleware) \/\/\n\te.POST(\"\/operation\/issuelist\", operation.IssuePropsList, loginMiddleware) \/\/\n\te.POST(\"\/operation\/loginrecord\", operation.LoginRecord, loginMiddleware) \/\/\n\te.POST(\"\/operation\/roomcreaterecord\", operation.RoomCreateRecord, loginMiddleware) \/\/ 私人房创建记录\n\n\te.POST(\"\/operation\/charge\", operation.GetChargeOrder, loginMiddleware) \/\/ 下单记录\n\te.POST(\"\/operation\/transition\", operation.GetTransition, loginMiddleware) \/\/ 交易记录\n\n\te.POST(\"\/statistics\/online\", statistics.Online, loginMiddleware)\n\te.POST(\"\/statistics\/newuser\", statistics.NewUser, loginMiddleware)\n\te.POST(\"\/statistics\/index\", statistics.Index, loginMiddleware)\n\n\te.POST(\"\/tools\/getnotice\", operation.GetNotice, loginMiddleware)\n\te.POST(\"\/tools\/issuenotice\", operation.AddNotice, loginMiddleware)\n\tconndb()\n\n\n\tdata.InitAdmin()\n\tdata.LoadLimitIPs()\n\tadmincall.Init(data.Conf.CallServer)\n\te.Start(data.Conf.Port)\n}\n\n\/\/ 链接数据库\nfunc conndb() {\n\tglog.Infoln(\"Config: \", data.Conf)\n\tgossdb.Connect(data.Conf.Db.Ip, data.Conf.Db.Port, data.Conf.Db.Thread)\n\tdefer glog.Flush()\n}\n<|endoftext|>"} {"text":"<commit_before>package worker_test\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\t\"time\"\n\n\t\"restic\/worker\"\n)\n\nconst concurrency = 10\n\nvar errTooLarge = errors.New(\"too large\")\n\nfunc square(job worker.Job, done <-chan struct{}) (interface{}, error) {\n\tn := job.Data.(int)\n\tif n > 2000 {\n\t\treturn nil, errTooLarge\n\t}\n\treturn n * n, nil\n}\n\nfunc newBufferedPool(bufsize int, n int, f worker.Func) (chan worker.Job, chan worker.Job, *worker.Pool) {\n\tinCh := make(chan worker.Job, bufsize)\n\toutCh := make(chan worker.Job, bufsize)\n\n\treturn inCh, outCh, worker.New(n, f, inCh, outCh)\n}\n\nfunc TestPool(t *testing.T) {\n\tinCh, outCh, p := newBufferedPool(200, concurrency, square)\n\n\tfor i := 0; i < 150; i++ {\n\t\tinCh <- worker.Job{Data: i}\n\t}\n\n\tclose(inCh)\n\tp.Wait()\n\n\tfor res := range outCh {\n\t\tif res.Error != nil {\n\t\t\tt.Errorf(\"unexpected error for job %v received: %v\", res.Data, res.Error)\n\t\t\tcontinue\n\t\t}\n\n\t\tn := res.Data.(int)\n\t\tm := res.Result.(int)\n\n\t\tif m != n*n {\n\t\t\tt.Errorf(\"wrong value for job %d returned: want %d, got %d\", n, n*n, m)\n\t\t}\n\t}\n}\n\nfunc TestPoolErrors(t *testing.T) {\n\tinCh, outCh, p := newBufferedPool(200, concurrency, square)\n\n\tfor i := 0; i < 150; i++ {\n\t\tinCh <- worker.Job{Data: i + 1900}\n\t}\n\n\tclose(inCh)\n\tp.Wait()\n\n\tfor res := range outCh {\n\t\tn := res.Data.(int)\n\n\t\tif n > 2000 {\n\t\t\tif res.Error == nil {\n\t\t\t\tt.Errorf(\"expected error not found, result is %v\", res)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif res.Error != errTooLarge {\n\t\t\t\tt.Errorf(\"unexpected error found, result is %v\", res)\n\t\t\t}\n\n\t\t\tcontinue\n\t\t} else {\n\t\t\tif res.Error != nil {\n\t\t\t\tt.Errorf(\"unexpected error for job %v received: %v\", res.Data, res.Error)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tm := res.Result.(int)\n\t\tif m != n*n {\n\t\t\tt.Errorf(\"wrong value for job %d returned: want %d, got %d\", n, n*n, m)\n\t\t}\n\t}\n}\n\nvar errCancelled = errors.New(\"cancelled\")\n\ntype Job struct {\n\tsuc chan struct{}\n}\n\nfunc TestPoolCancel(t *testing.T) {\n\tbarrier := make(chan struct{})\n\n\twait := func(job worker.Job, done <-chan struct{}) (interface{}, error) {\n\t\tj := job.Data.(Job)\n\n\t\t<-barrier\n\n\t\tselect {\n\t\tcase j.suc <- struct{}{}:\n\t\t\treturn time.Now(), nil\n\t\tcase <-done:\n\t\t\treturn nil, errCancelled\n\t\t}\n\t}\n\n\tjobCh, resCh, p := newBufferedPool(20, concurrency, wait)\n\n\tsuc := make(chan struct{})\n\tfor i := 0; i < 20; i++ {\n\t\tjobCh <- worker.Job{Data: Job{suc: suc}}\n\t}\n\n\tclose(barrier)\n\t<-suc\n\tp.Cancel()\n\tp.Wait()\n\n\tfoundResult := false\n\tfoundCancelError := false\n\tfor res := range resCh {\n\t\tif res.Error == nil {\n\t\t\tfoundResult = true\n\t\t}\n\n\t\tif res.Error == errCancelled {\n\t\t\tfoundCancelError = true\n\t\t}\n\t}\n\n\tif !foundResult {\n\t\tt.Error(\"did not find one expected result\")\n\t}\n\n\tif !foundCancelError {\n\t\tt.Error(\"did not find one expected cancel error\")\n\t}\n}\n<commit_msg>Fix flaky test<commit_after>package worker_test\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\t\"time\"\n\n\t\"restic\/worker\"\n)\n\nconst concurrency = 10\n\nvar errTooLarge = errors.New(\"too large\")\n\nfunc square(job worker.Job, done <-chan struct{}) (interface{}, error) {\n\tn := job.Data.(int)\n\tif n > 2000 {\n\t\treturn nil, errTooLarge\n\t}\n\treturn n * n, nil\n}\n\nfunc newBufferedPool(bufsize int, n int, f worker.Func) (chan worker.Job, chan worker.Job, *worker.Pool) {\n\tinCh := make(chan worker.Job, bufsize)\n\toutCh := make(chan worker.Job, bufsize)\n\n\treturn inCh, outCh, worker.New(n, f, inCh, outCh)\n}\n\nfunc TestPool(t *testing.T) {\n\tinCh, outCh, p := newBufferedPool(200, concurrency, square)\n\n\tfor i := 0; i < 150; i++ {\n\t\tinCh <- worker.Job{Data: i}\n\t}\n\n\tclose(inCh)\n\tp.Wait()\n\n\tfor res := range outCh {\n\t\tif res.Error != nil {\n\t\t\tt.Errorf(\"unexpected error for job %v received: %v\", res.Data, res.Error)\n\t\t\tcontinue\n\t\t}\n\n\t\tn := res.Data.(int)\n\t\tm := res.Result.(int)\n\n\t\tif m != n*n {\n\t\t\tt.Errorf(\"wrong value for job %d returned: want %d, got %d\", n, n*n, m)\n\t\t}\n\t}\n}\n\nfunc TestPoolErrors(t *testing.T) {\n\tinCh, outCh, p := newBufferedPool(200, concurrency, square)\n\n\tfor i := 0; i < 150; i++ {\n\t\tinCh <- worker.Job{Data: i + 1900}\n\t}\n\n\tclose(inCh)\n\tp.Wait()\n\n\tfor res := range outCh {\n\t\tn := res.Data.(int)\n\n\t\tif n > 2000 {\n\t\t\tif res.Error == nil {\n\t\t\t\tt.Errorf(\"expected error not found, result is %v\", res)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif res.Error != errTooLarge {\n\t\t\t\tt.Errorf(\"unexpected error found, result is %v\", res)\n\t\t\t}\n\n\t\t\tcontinue\n\t\t} else {\n\t\t\tif res.Error != nil {\n\t\t\t\tt.Errorf(\"unexpected error for job %v received: %v\", res.Data, res.Error)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tm := res.Result.(int)\n\t\tif m != n*n {\n\t\t\tt.Errorf(\"wrong value for job %d returned: want %d, got %d\", n, n*n, m)\n\t\t}\n\t}\n}\n\nvar errCancelled = errors.New(\"cancelled\")\n\ntype Job struct {\n\tsuc chan struct{}\n}\n\nfunc TestPoolCancel(t *testing.T) {\n\tbarrier := make(chan struct{})\n\n\tsendTime := func(job worker.Job, done <-chan struct{}) (interface{}, error) {\n\t\tj := job.Data.(Job)\n\n\t\t<-barrier\n\n\t\tselect {\n\t\tcase j.suc <- struct{}{}:\n\t\t\treturn time.Now(), nil\n\t\tcase <-done:\n\t\t\treturn nil, errCancelled\n\t\t}\n\t}\n\n\tjobCh, resCh, p := newBufferedPool(20, concurrency, sendTime)\n\n\tsuc := make(chan struct{})\n\tfor i := 0; i < 20; i++ {\n\t\tjobCh <- worker.Job{Data: Job{suc: suc}}\n\t}\n\n\tclose(barrier)\n\tfor i := 0; i < 10; i++ {\n\t\t<-suc\n\t}\n\tp.Cancel()\n\tp.Wait()\n\n\tfoundResult := false\n\tfoundCancelError := false\n\tfor res := range resCh {\n\t\tif res.Error == nil {\n\t\t\tfoundResult = true\n\t\t}\n\n\t\tif res.Error == errCancelled {\n\t\t\tfoundCancelError = true\n\t\t}\n\t}\n\n\tif !foundResult {\n\t\tt.Error(\"did not find one expected result\")\n\t}\n\n\tif !foundCancelError {\n\t\tt.Error(\"did not find one expected cancel error\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package http\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc Test_NilRequest(t *testing.T) {\n\t_, err := ParseRequest(nil)\n\tif err != ErrNoStatements {\n\t\tt.Fatalf(\"nil request did not result in correct error\")\n\t}\n}\n\nfunc Test_EmptyRequests(t *testing.T) {\n\tb := []byte(fmt.Sprintf(`[]`))\n\t_, err := ParseRequest(b)\n\tif err != ErrNoStatements {\n\t\tt.Fatalf(\"empty simple request did not result in correct error\")\n\t}\n\n\tb = []byte(fmt.Sprintf(`[[]]`))\n\t_, err = ParseRequest(b)\n\tif err != ErrNoStatements {\n\t\tt.Fatalf(\"empty parameterized request did not result in correct error\")\n\t}\n}\n\nfunc Test_SingleSimpleRequest(t *testing.T) {\n\ts := \"SELECT * FROM FOO\"\n\tb := []byte(fmt.Sprintf(`[\"%s\"]`, s))\n\n\tstmts, err := ParseRequest(b)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to parse request: %s\", err.Error())\n\t}\n\n\tif len(stmts) != 1 {\n\t\tt.Fatalf(\"incorrect number of statements returned: %d\", len(stmts))\n\t}\n\tif stmts[0].Sql != s {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s, stmts[0].Sql)\n\t}\n\tif stmts[0].Parameters != nil {\n\t\tt.Fatal(\"statement parameters are not nil\")\n\t}\n}\n\nfunc Test_SingleSimpleInvalidRequest(t *testing.T) {\n\ts := \"SELECT * FROM FOO\"\n\tb := []byte(fmt.Sprintf(`[\"%s\"`, s))\n\n\t_, err := ParseRequest(b)\n\tif err != ErrInvalidRequest {\n\t\tt.Fatal(\"got unexpected error for invalid request\")\n\t}\n}\n\nfunc Test_DoubleSimpleRequest(t *testing.T) {\n\ts0 := \"SELECT * FROM FOO\"\n\ts1 := \"SELECT * FROM BAR\"\n\tb := []byte(fmt.Sprintf(`[\"%s\", \"%s\"]`, s0, s1))\n\n\tstmts, err := ParseRequest(b)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to parse request: %s\", err.Error())\n\t}\n\n\tif len(stmts) != 2 {\n\t\tt.Fatalf(\"incorrect number of statements returned: %d\", len(stmts))\n\t}\n\tif stmts[0].Sql != s0 {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s0, stmts[0].Sql)\n\t}\n\tif stmts[0].Parameters != nil {\n\t\tt.Fatal(\"statement parameters are not nil\")\n\t}\n\tif stmts[1].Sql != s1 {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s1, stmts[1].Sql)\n\t}\n\tif stmts[1].Parameters != nil {\n\t\tt.Fatal(\"statement parameters are not nil\")\n\t}\n}\n\nfunc Test_SingleParameterizedRequest(t *testing.T) {\n\ts := \"SELECT * FROM ? WHERE bar=?\"\n\tp0 := \"FOO\"\n\tp1 := 1\n\tb := []byte(fmt.Sprintf(`[[\"%s\", \"%s\", %d]]`, s, p0, p1))\n\n\tstmts, err := ParseRequest(b)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to parse request: %s\", err.Error())\n\t}\n\n\tif len(stmts) != 1 {\n\t\tt.Fatalf(\"incorrect number of statements returned: %d\", len(stmts))\n\t}\n\tif stmts[0].Sql != s {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s, stmts[0].Sql)\n\t}\n\n\tif len(stmts[0].Parameters) != 2 {\n\t\tt.Fatalf(\"incorrect number of parameters returned: %d\", len(stmts[0].Parameters))\n\t}\n\tif stmts[0].Parameters[0].GetS() != p0 {\n\t\tt.Fatalf(\"incorrect parameter, exp %s, got %s\", p0, stmts[0].Parameters[0])\n\t}\n\tif int(stmts[0].Parameters[1].GetD()) != p1 {\n\t\tt.Fatalf(\"incorrect parameter, exp %d, got %d\", p1, int(stmts[0].Parameters[1].GetI()))\n\t}\n}\n\nfunc Test_SingleParameterizedRequestNoParams(t *testing.T) {\n\ts := \"SELECT * FROM foo\"\n\tb := []byte(fmt.Sprintf(`[[\"%s\"]]`, s))\n\n\tstmts, err := ParseRequest(b)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to parse request: %s\", err.Error())\n\t}\n\n\tif len(stmts) != 1 {\n\t\tt.Fatalf(\"incorrect number of statements returned: %d\", len(stmts))\n\t}\n\tif stmts[0].Sql != s {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s, stmts[0].Sql)\n\t}\n\n\tif len(stmts[0].Parameters) != 0 {\n\t\tt.Fatalf(\"incorrect number of parameters returned: %d\", len(stmts[0].Parameters))\n\t}\n}\n\nfunc Test_SingleParameterizedRequestNoParamsMixed(t *testing.T) {\n\ts1 := \"SELECT * FROM foo\"\n\ts2 := \"SELECT * FROM foo WHERE name=?\"\n\tp2 := \"bar\"\n\tb := []byte(fmt.Sprintf(`[[\"%s\"], [\"%s\", \"%s\"]]`, s1, s2, p2))\n\n\tstmts, err := ParseRequest(b)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to parse request: %s\", err.Error())\n\t}\n\n\tif len(stmts) != 2 {\n\t\tt.Fatalf(\"incorrect number of statements returned: %d\", len(stmts))\n\t}\n\tif stmts[0].Sql != s1 {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s1, stmts[0].Sql)\n\t}\n\tif len(stmts[0].Parameters) != 0 {\n\t\tt.Fatalf(\"incorrect number of parameters returned: %d\", len(stmts[0].Parameters))\n\t}\n\n\tif stmts[1].Sql != s2 {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s2, stmts[0].Sql)\n\t}\n\tif len(stmts[1].Parameters) != 1 {\n\t\tt.Fatalf(\"incorrect number of parameters returned: %d\", len(stmts[0].Parameters))\n\t}\n\tif stmts[1].Parameters[0].GetS() != p2 {\n\t\tt.Fatalf(\"incorrect parameter, exp %s, got %s\", p2, stmts[1].Parameters[0])\n\t}\n}\n\nfunc Test_SingleSimpleParameterizedRequest(t *testing.T) {\n\ts := \"SELECT * FROM ? ?\"\n\tb := []byte(fmt.Sprintf(`[[\"%s\"]]`, s))\n\n\tstmts, err := ParseRequest(b)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to parse request: %s\", err.Error())\n\t}\n\n\tif len(stmts) != 1 {\n\t\tt.Fatalf(\"incorrect number of statements returned: %d\", len(stmts))\n\t}\n\tif stmts[0].Sql != s {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s, stmts[0].Sql)\n\t}\n\n\tif stmts[0].Parameters != nil {\n\t\tt.Fatal(\"statement parameters are not nil\")\n\t}\n}\n\nfunc Test_SingleInvalidParameterizedRequest(t *testing.T) {\n\ts := \"SELECT * FROM ? ?\"\n\tp0 := \"FOO\"\n\tp1 := 1\n\tb := []byte(fmt.Sprintf(`[[\"%s\", \"%s\", %d]`, s, p0, p1))\n\n\t_, err := ParseRequest(b)\n\tif err != ErrInvalidRequest {\n\t\tt.Fatal(\"got unexpected error for invalid request\")\n\t}\n}\n\nfunc Test_MixedInvalidRequest(t *testing.T) {\n\tb := []byte(`[[\"SELECT * FROM foo\"], \"SELECT * FROM bar\"]`)\n\n\t_, err := ParseRequest(b)\n\tif err != ErrInvalidRequest {\n\t\tt.Fatal(\"got unexpected error for invalid request\")\n\t}\n}\n\nfunc Test_SingleInvalidTypeRequests(t *testing.T) {\n\t_, err := ParseRequest([]byte(fmt.Sprintf(`[1]`)))\n\tif err != ErrInvalidRequest {\n\t\tt.Fatal(\"got unexpected error for invalid request\")\n\t}\n\n\t_, err = ParseRequest([]byte(fmt.Sprintf(`[[1]]`)))\n\tif err != ErrInvalidRequest {\n\t\tt.Fatal(\"got unexpected error for invalid request\")\n\t}\n\n\t_, err = ParseRequest([]byte(fmt.Sprintf(`[[1, \"x\", 2]]`)))\n\tif err != ErrInvalidRequest {\n\t\tt.Fatal(\"got unexpected error for invalid request\")\n\t}\n}\n<commit_msg>Simple named param parser unit test<commit_after>package http\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc Test_NilRequest(t *testing.T) {\n\t_, err := ParseRequest(nil)\n\tif err != ErrNoStatements {\n\t\tt.Fatalf(\"nil request did not result in correct error\")\n\t}\n}\n\nfunc Test_EmptyRequests(t *testing.T) {\n\tb := []byte(fmt.Sprintf(`[]`))\n\t_, err := ParseRequest(b)\n\tif err != ErrNoStatements {\n\t\tt.Fatalf(\"empty simple request did not result in correct error\")\n\t}\n\n\tb = []byte(fmt.Sprintf(`[[]]`))\n\t_, err = ParseRequest(b)\n\tif err != ErrNoStatements {\n\t\tt.Fatalf(\"empty parameterized request did not result in correct error\")\n\t}\n}\n\nfunc Test_SingleSimpleRequest(t *testing.T) {\n\ts := \"SELECT * FROM FOO\"\n\tb := []byte(fmt.Sprintf(`[\"%s\"]`, s))\n\n\tstmts, err := ParseRequest(b)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to parse request: %s\", err.Error())\n\t}\n\n\tif len(stmts) != 1 {\n\t\tt.Fatalf(\"incorrect number of statements returned: %d\", len(stmts))\n\t}\n\tif stmts[0].Sql != s {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s, stmts[0].Sql)\n\t}\n\tif stmts[0].Parameters != nil {\n\t\tt.Fatal(\"statement parameters are not nil\")\n\t}\n}\n\nfunc Test_SingleSimpleInvalidRequest(t *testing.T) {\n\ts := \"SELECT * FROM FOO\"\n\tb := []byte(fmt.Sprintf(`[\"%s\"`, s))\n\n\t_, err := ParseRequest(b)\n\tif err != ErrInvalidRequest {\n\t\tt.Fatal(\"got unexpected error for invalid request\")\n\t}\n}\n\nfunc Test_DoubleSimpleRequest(t *testing.T) {\n\ts0 := \"SELECT * FROM FOO\"\n\ts1 := \"SELECT * FROM BAR\"\n\tb := []byte(fmt.Sprintf(`[\"%s\", \"%s\"]`, s0, s1))\n\n\tstmts, err := ParseRequest(b)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to parse request: %s\", err.Error())\n\t}\n\n\tif len(stmts) != 2 {\n\t\tt.Fatalf(\"incorrect number of statements returned: %d\", len(stmts))\n\t}\n\tif stmts[0].Sql != s0 {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s0, stmts[0].Sql)\n\t}\n\tif stmts[0].Parameters != nil {\n\t\tt.Fatal(\"statement parameters are not nil\")\n\t}\n\tif stmts[1].Sql != s1 {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s1, stmts[1].Sql)\n\t}\n\tif stmts[1].Parameters != nil {\n\t\tt.Fatal(\"statement parameters are not nil\")\n\t}\n}\n\nfunc Test_SingleParameterizedRequest(t *testing.T) {\n\ts := \"SELECT * FROM ? WHERE bar=?\"\n\tp0 := \"FOO\"\n\tp1 := 1\n\tb := []byte(fmt.Sprintf(`[[\"%s\", \"%s\", %d]]`, s, p0, p1))\n\n\tstmts, err := ParseRequest(b)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to parse request: %s\", err.Error())\n\t}\n\n\tif len(stmts) != 1 {\n\t\tt.Fatalf(\"incorrect number of statements returned: %d\", len(stmts))\n\t}\n\tif stmts[0].Sql != s {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s, stmts[0].Sql)\n\t}\n\n\tif len(stmts[0].Parameters) != 2 {\n\t\tt.Fatalf(\"incorrect number of parameters returned: %d\", len(stmts[0].Parameters))\n\t}\n\tif stmts[0].Parameters[0].GetS() != p0 {\n\t\tt.Fatalf(\"incorrect parameter, exp %s, got %s\", p0, stmts[0].Parameters[0])\n\t}\n\tif int(stmts[0].Parameters[1].GetD()) != p1 {\n\t\tt.Fatalf(\"incorrect parameter, exp %d, got %d\", p1, int(stmts[0].Parameters[1].GetI()))\n\t}\n}\n\nfunc Test_SingleNamedParameterizedRequest(t *testing.T) {\n\ts := \"SELECT * FROM foo WHERE bar=:bar AND qux=:qux\"\n\tb := []byte(fmt.Sprintf(`[[\"%s\", %s]]`, s, mustJSONMarshal(map[string]interface{}{\"bar\": 1, \"qux\": \"some string\"})))\n\n\tstmts, err := ParseRequest(b)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to parse request: %s\", err.Error())\n\t}\n\n\tif len(stmts) != 1 {\n\t\tt.Fatalf(\"incorrect number of statements returned: %d\", len(stmts))\n\t}\n\tif stmts[0].Sql != s {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s, stmts[0].Sql)\n\t}\n\n\tif len(stmts[0].Parameters) != 2 {\n\t\tt.Fatalf(\"incorrect number of parameters returned: %d\", len(stmts[0].Parameters))\n\t}\n}\n\nfunc Test_SingleParameterizedRequestNoParams(t *testing.T) {\n\ts := \"SELECT * FROM foo\"\n\tb := []byte(fmt.Sprintf(`[[\"%s\"]]`, s))\n\n\tstmts, err := ParseRequest(b)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to parse request: %s\", err.Error())\n\t}\n\n\tif len(stmts) != 1 {\n\t\tt.Fatalf(\"incorrect number of statements returned: %d\", len(stmts))\n\t}\n\tif stmts[0].Sql != s {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s, stmts[0].Sql)\n\t}\n\n\tif len(stmts[0].Parameters) != 0 {\n\t\tt.Fatalf(\"incorrect number of parameters returned: %d\", len(stmts[0].Parameters))\n\t}\n}\n\nfunc Test_SingleParameterizedRequestNoParamsMixed(t *testing.T) {\n\ts1 := \"SELECT * FROM foo\"\n\ts2 := \"SELECT * FROM foo WHERE name=?\"\n\tp2 := \"bar\"\n\tb := []byte(fmt.Sprintf(`[[\"%s\"], [\"%s\", \"%s\"]]`, s1, s2, p2))\n\n\tstmts, err := ParseRequest(b)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to parse request: %s\", err.Error())\n\t}\n\n\tif len(stmts) != 2 {\n\t\tt.Fatalf(\"incorrect number of statements returned: %d\", len(stmts))\n\t}\n\tif stmts[0].Sql != s1 {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s1, stmts[0].Sql)\n\t}\n\tif len(stmts[0].Parameters) != 0 {\n\t\tt.Fatalf(\"incorrect number of parameters returned: %d\", len(stmts[0].Parameters))\n\t}\n\n\tif stmts[1].Sql != s2 {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s2, stmts[0].Sql)\n\t}\n\tif len(stmts[1].Parameters) != 1 {\n\t\tt.Fatalf(\"incorrect number of parameters returned: %d\", len(stmts[0].Parameters))\n\t}\n\tif stmts[1].Parameters[0].GetS() != p2 {\n\t\tt.Fatalf(\"incorrect parameter, exp %s, got %s\", p2, stmts[1].Parameters[0])\n\t}\n}\n\nfunc Test_SingleSimpleParameterizedRequest(t *testing.T) {\n\ts := \"SELECT * FROM ? ?\"\n\tb := []byte(fmt.Sprintf(`[[\"%s\"]]`, s))\n\n\tstmts, err := ParseRequest(b)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to parse request: %s\", err.Error())\n\t}\n\n\tif len(stmts) != 1 {\n\t\tt.Fatalf(\"incorrect number of statements returned: %d\", len(stmts))\n\t}\n\tif stmts[0].Sql != s {\n\t\tt.Fatalf(\"incorrect statement parsed, exp %s, got %s\", s, stmts[0].Sql)\n\t}\n\n\tif stmts[0].Parameters != nil {\n\t\tt.Fatal(\"statement parameters are not nil\")\n\t}\n}\n\nfunc Test_SingleInvalidParameterizedRequest(t *testing.T) {\n\ts := \"SELECT * FROM ? ?\"\n\tp0 := \"FOO\"\n\tp1 := 1\n\tb := []byte(fmt.Sprintf(`[[\"%s\", \"%s\", %d]`, s, p0, p1))\n\n\t_, err := ParseRequest(b)\n\tif err != ErrInvalidRequest {\n\t\tt.Fatal(\"got unexpected error for invalid request\")\n\t}\n}\n\nfunc Test_MixedInvalidRequest(t *testing.T) {\n\tb := []byte(`[[\"SELECT * FROM foo\"], \"SELECT * FROM bar\"]`)\n\n\t_, err := ParseRequest(b)\n\tif err != ErrInvalidRequest {\n\t\tt.Fatal(\"got unexpected error for invalid request\")\n\t}\n}\n\nfunc Test_SingleInvalidTypeRequests(t *testing.T) {\n\t_, err := ParseRequest([]byte(fmt.Sprintf(`[1]`)))\n\tif err != ErrInvalidRequest {\n\t\tt.Fatal(\"got unexpected error for invalid request\")\n\t}\n\n\t_, err = ParseRequest([]byte(fmt.Sprintf(`[[1]]`)))\n\tif err != ErrInvalidRequest {\n\t\tt.Fatal(\"got unexpected error for invalid request\")\n\t}\n\n\t_, err = ParseRequest([]byte(fmt.Sprintf(`[[1, \"x\", 2]]`)))\n\tif err != ErrInvalidRequest {\n\t\tt.Fatal(\"got unexpected error for invalid request\")\n\t}\n}\n\nfunc mustJSONMarshal(v interface{}) []byte {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\tpanic(\"failed to JSON marshal value\")\n\t}\n\treturn b\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc dockerpid(name string) (pid int, err error) {\n\tcmd := exec.Command(\"docker\", \"inspect\", \"--format\", \"{{.State.Pid}}\", name)\n\toutput, err := cmd.Output()\n\tif err != nil {\n\t\treturn -1, errors.New(err.Error() + \":\\n\" + string(output))\n\t}\n\n\tpid, err = strconv.Atoi(strings.TrimSpace(string(output)))\n\n\tif err != nil {\n\t\treturn -1, errors.New(err.Error() + \":\\n\" + string(output))\n\t}\n\tif pid == 0 {\n\t\treturn -1, errors.New(\"Invalid PID\")\n\t}\n\treturn pid, nil\n}\n\nfunc dockersha(name string) (sha string, err error) {\n\tcmd := exec.Command(\"docker\", \"inspect\", \"--format\", \"{{.Id}}\", name)\n\toutput, err := cmd.Output()\n\tif err != nil {\n\t\treturn sha, errors.New(err.Error() + \":\\n\" + string(output))\n\t}\n\tsha = strings.TrimSpace(string(output))\n\tif sha == \"\" {\n\t\treturn \"\", errors.New(\"Invalid SHA\")\n\t}\n\treturn sha, nil\n}\n\nfunc dockerstart(username string, homedirfrom string, homedirto string, name string, container string, dockersock string, bindhome bool, bindtmp bool, binddocker bool, init string, cmdargs []string, dockeropts []string) (pid int, err error) {\n\tcmd := exec.Command(\"docker\", \"rm\", name)\n\terr = cmd.Run()\n\n\tbindSelfAsInit := false\n\tif init == \"internal\" {\n\t\tinit = \"\/init\"\n\t\tbindSelfAsInit = true\n\t}\n\tthisBinary := \"\/usr\/local\/bin\/dockersh\"\n\tif os.Getenv(\"SHELL\") != \"\/usr\/local\/bin\/dockersh\" {\n\t\tthisBinary, _ = filepath.Abs(os.Args[0])\n\t}\n\tvar cmdtxt = []string{\"run\", \"-d\", \"-u\", username,\n\t\t\"-v\", \"\/etc\/passwd:\/etc\/passwd:ro\", \"-v\", \"\/etc\/group:\/etc\/group:ro\"}\n\tif len(dockeropts) > 0 {\n\t\tfor _, element := range dockeropts {\n\t\t\tcmdtxt = append(cmdtxt, element)\n\t\t}\n\t}\n\tif bindtmp {\n\t\tcmdtxt = append(cmdtxt, \"-v\", \"\/tmp:\/tmp\")\n\t}\n\tif bindhome {\n\t\tcmdtxt = append(cmdtxt, \"-v\", fmt.Sprintf(\"%s:%s:rw\", homedirfrom, homedirto))\n\t}\n\tif bindSelfAsInit {\n\t\tcmdtxt = append(cmdtxt, \"-v\", thisBinary+\":\/init\")\n\t}\n\tif binddocker {\n\t\tcmdtxt = append(cmdtxt, \"-v\", dockersock+\":\/var\/run\/docker.sock\")\n\t}\n\tcmdtxt = append(cmdtxt, \"--name\", name, \"--entrypoint\", init, container)\n\tif len(cmdargs) > 0 {\n\t\tfor _, element := range cmdargs {\n\t\t\tcmdtxt = append(cmdtxt, element)\n\t\t}\n\t} else {\n\t\tcmdtxt = append(cmdtxt, \"\")\n\t}\n\t\/\/fmt.Fprintf(os.Stderr, \"docker %s\\n\", strings.Join(cmdtxt, \" \"))\n\tcmd = exec.Command(\"docker\", cmdtxt...)\n\tvar output bytes.Buffer\n\tcmd.Stdout = &output\n\tcmd.Stderr = &output\n\terr = cmd.Run()\n\tif err != nil {\n\t\treturn -1, errors.New(err.Error() + \":\\n\" + output.String())\n\t}\n\treturn dockerpid(name)\n}\n<commit_msg>Use the new --cap-drop flag instead<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc dockerpid(name string) (pid int, err error) {\n\tcmd := exec.Command(\"docker\", \"inspect\", \"--format\", \"{{.State.Pid}}\", name)\n\toutput, err := cmd.Output()\n\tif err != nil {\n\t\treturn -1, errors.New(err.Error() + \":\\n\" + string(output))\n\t}\n\n\tpid, err = strconv.Atoi(strings.TrimSpace(string(output)))\n\n\tif err != nil {\n\t\treturn -1, errors.New(err.Error() + \":\\n\" + string(output))\n\t}\n\tif pid == 0 {\n\t\treturn -1, errors.New(\"Invalid PID\")\n\t}\n\treturn pid, nil\n}\n\nfunc dockersha(name string) (sha string, err error) {\n\tcmd := exec.Command(\"docker\", \"inspect\", \"--format\", \"{{.Id}}\", name)\n\toutput, err := cmd.Output()\n\tif err != nil {\n\t\treturn sha, errors.New(err.Error() + \":\\n\" + string(output))\n\t}\n\tsha = strings.TrimSpace(string(output))\n\tif sha == \"\" {\n\t\treturn \"\", errors.New(\"Invalid SHA\")\n\t}\n\treturn sha, nil\n}\n\nfunc dockerstart(username string, homedirfrom string, homedirto string, name string, container string, dockersock string, bindhome bool, bindtmp bool, binddocker bool, init string, cmdargs []string, dockeropts []string) (pid int, err error) {\n\tcmd := exec.Command(\"docker\", \"rm\", name)\n\terr = cmd.Run()\n\n\tbindSelfAsInit := false\n\tif init == \"internal\" {\n\t\tinit = \"\/init\"\n\t\tbindSelfAsInit = true\n\t}\n\tthisBinary := \"\/usr\/local\/bin\/dockersh\"\n\tif os.Getenv(\"SHELL\") != \"\/usr\/local\/bin\/dockersh\" {\n\t\tthisBinary, _ = filepath.Abs(os.Args[0])\n\t}\n\tvar cmdtxt = []string{\"run\", \"-d\", \"-u\", username,\n\t\t\"-v\", \"\/etc\/passwd:\/etc\/passwd:ro\", \"-v\", \"\/etc\/group:\/etc\/group:ro\",\n\t\t\"--cap-drop\", \"SUID\", \"--cap-drop\", \"SGID\", \"--cap-drop\", \"NET_RAW\",\n\t\t\"--cap-drop\", \"MKNOD\"}\n\tif len(dockeropts) > 0 {\n\t\tfor _, element := range dockeropts {\n\t\t\tcmdtxt = append(cmdtxt, element)\n\t\t}\n\t}\n\tif bindtmp {\n\t\tcmdtxt = append(cmdtxt, \"-v\", \"\/tmp:\/tmp\")\n\t}\n\tif bindhome {\n\t\tcmdtxt = append(cmdtxt, \"-v\", fmt.Sprintf(\"%s:%s:rw\", homedirfrom, homedirto))\n\t}\n\tif bindSelfAsInit {\n\t\tcmdtxt = append(cmdtxt, \"-v\", thisBinary+\":\/init\")\n\t}\n\tif binddocker {\n\t\tcmdtxt = append(cmdtxt, \"-v\", dockersock+\":\/var\/run\/docker.sock\")\n\t}\n\tcmdtxt = append(cmdtxt, \"--name\", name, \"--entrypoint\", init, container)\n\tif len(cmdargs) > 0 {\n\t\tfor _, element := range cmdargs {\n\t\t\tcmdtxt = append(cmdtxt, element)\n\t\t}\n\t} else {\n\t\tcmdtxt = append(cmdtxt, \"\")\n\t}\n\t\/\/fmt.Fprintf(os.Stderr, \"docker %s\\n\", strings.Join(cmdtxt, \" \"))\n\tcmd = exec.Command(\"docker\", cmdtxt...)\n\tvar output bytes.Buffer\n\tcmd.Stdout = &output\n\tcmd.Stderr = &output\n\terr = cmd.Run()\n\tif err != nil {\n\t\treturn -1, errors.New(err.Error() + \":\\n\" + output.String())\n\t}\n\treturn dockerpid(name)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n* Copyright 2013 CoreOS, Inc\n* Copyright 2013 Docker Authors\n*\n* Licensed under the Apache License, Version 2.0 (the \"License\");\n* you may not use this file except in compliance with the License.\n* You may obtain a copy of the License at\n*\n* http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n*\n* Unless required by applicable law or agreed to in writing, software\n* distributed under the License is distributed on an \"AS IS\" BASIS,\n* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n* See the License for the specific language governing permissions and\n* limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/dotcloud\/docker\"\n\t\"github.com\/dotcloud\/docker\/registry\"\n\t\"github.com\/gorilla\/mux\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n)\n\nconst ContainerDir = \"\/var\/lib\/containers\/\"\n\ntype Context struct {\n\tPath string\n\tContainerPath string\n\tRegistry *registry.Registry\n\tGraph *docker.Graph\n\tRepositories *docker.TagStore\n}\n\nvar context Context\n\nfunc pullImage(c *Context, imgId, registry string, token []string) error {\n\thistory, err := c.Registry.GetRemoteHistory(imgId, registry, token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ FIXME: Try to stream the images?\n\t\/\/ FIXME: Launch the getRemoteImage() in goroutines\n\tfor _, id := range history {\n\t\tif !c.Graph.Exists(id) {\n\t\t\tlog.Printf(\"Pulling %s metadata\\r\\n\", id)\n\t\t\timgJson, err := c.Registry.GetRemoteImageJson(id, registry, token)\n\t\t\tif err != nil {\n\t\t\t\t\/\/ FIXME: Keep goging in case of error?\n\t\t\t\treturn err\n\t\t\t}\n\t\t\timg, err := docker.NewImgJson(imgJson)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to parse json: %s\", err)\n\t\t\t}\n\n\t\t\t\/\/ Get the layer\n\t\t\tlog.Printf(\"Pulling %s fs layer\\r\\n\", img.Id)\n\t\t\tlayer, _, err := c.Registry.GetRemoteImageLayer(img.Id, registry, token)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := c.Graph.Register(layer, false, img); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ TODO: add tag support\nfunc pullHandler(w http.ResponseWriter, r *http.Request, c *Context) {\n\tvars := mux.Vars(r)\n\tremote := vars[\"remote\"]\n\n\trepoData, err := c.Registry.GetRepositoryData(remote)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttagsList, err := c.Registry.GetRemoteTags(repoData.Endpoints, remote, repoData.Tokens)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor tag, id := range tagsList {\n\t\trepoData.ImgList[id].Tag = tag\n\t}\n\n\tfor _, img := range repoData.ImgList {\n\t\tlog.Printf(\"Pulling image %s (%s) from %s\\n\", img.Id, img.Tag, remote)\n\t\tsuccess := false\n\n\t\tfor _, ep := range repoData.Endpoints {\n\t\t\tif err := pullImage(c, img.Id, \"https:\/\/\"+ep+\"\/v1\", repoData.Tokens); err != nil {\n\t\t\t\tfmt.Printf(\"Error while retrieving image for tag: %s; checking next endpoint\\n\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsuccess = true\n\t\t\tbreak\n\t\t}\n\n\t\tif !success {\n\t\t\tlog.Fatal(\"Could not find repository on any of the indexed registries.\")\n\t\t}\n\t}\n\n\tfor tag, id := range tagsList {\n\t\tif err := c.Repositories.Set(remote, tag, id, true); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\t}\n\tif err := c.Repositories.Save(); err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\n\tfmt.Fprintf(w, \"%v\\n\", repoData)\n}\n\nfunc createHandler(w http.ResponseWriter, r *http.Request, c *Context) {\n\timageName := r.FormValue(\"image\")\n\n\t\/\/ TODO: @philips Don't hardcode the tag name here\n\timage, err := c.Repositories.GetImage(imageName, \"latest\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\n\tif imageName == \"\" {\n\t\tw.WriteHeader(404)\n\t\tfmt.Fprintf(w, \"Cannot find container image: %s\", imageName)\n\t\treturn\n\t}\n\n\t\/\/ Figure out the resting place of the container\n\tvars := mux.Vars(r)\n\tcontainer := vars[\"container\"]\n\n\tvalidID := regexp.MustCompile(`^[A-Za-z0-9]+$`)\n\tif !validID.MatchString(container) {\n\t\tw.WriteHeader(400)\n\t\tfmt.Fprintf(w, \"Invalid container name: %s\\n\", container)\n\t\treturn\n\t}\n\n\tcontainer = path.Join(c.ContainerPath, container)\n\n\terr = os.Mkdir(container, 0700)\n\tif os.IsExist(err) {\n\t\tw.WriteHeader(400)\n\t\tfmt.Fprintf(w, \"Existing container: %s\\n\", container)\n\t\treturn\n\t}\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tfmt.Fprint(w, \"Error\")\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\n\tvar images []docker.Image\n\n\tcreateList := func(img *docker.Image) (err error) {\n\t\timages = append(images, *img)\n\t\treturn\n\t}\n\terr = image.WalkHistory(createList)\n\n\tfor i := len(images) - 1; i >= 0; i-- {\n\t\timg := images[i]\n\t\tlog.Printf(\"Copying %s into %s\", img.Id, container)\n\t\ttarball, err := img.TarLayer(docker.Uncompressed)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\t\tif err := docker.Untar(tarball, container); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\n\tfmt.Fprint(w, \"ok\")\n\treturn\n}\n\nfunc setupDocker(r *mux.Router, o Options) {\n\t\/\/ Use the \/var\/lib\/containers directory by default\n\tcontext.ContainerPath = path.Join(o.Dir, ContainerDir)\n\tif err := os.MkdirAll(context.ContainerPath, 0700); err != nil && !os.IsExist(err) {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\tcontext.Registry = registry.NewRegistry(context.ContainerPath)\n\n\t\/\/ Put all docker images into the docker directory\n\tcontext.Path = path.Join(o.Dir, StateDir, \"docker\")\n\n\tp := path.Join(context.Path, \"graph\")\n\tif err := os.MkdirAll(p, 0700); err != nil && !os.IsExist(err) {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\tg, _ := docker.NewGraph(p)\n\tcontext.Graph = g\n\n\tp = path.Join(context.Path, \"repositories\")\n\tt, _ := docker.NewTagStore(p, g)\n\tcontext.Repositories = t\n\n\tmakeHandler := func(fn func(http.ResponseWriter, *http.Request, *Context)) http.HandlerFunc {\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\tfn(w, r, &context)\n\t\t}\n\t}\n\n\tr.HandleFunc(\"\/registry\/pull\/{remote:.*}\", makeHandler(pullHandler))\n\tr.HandleFunc(\"\/container\/create\/{container:.*}\", makeHandler(createHandler))\n}\n<commit_msg>fix(docker): don't return early<commit_after>\/*\n* Copyright 2013 CoreOS, Inc\n* Copyright 2013 Docker Authors\n*\n* Licensed under the Apache License, Version 2.0 (the \"License\");\n* you may not use this file except in compliance with the License.\n* You may obtain a copy of the License at\n*\n* http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n*\n* Unless required by applicable law or agreed to in writing, software\n* distributed under the License is distributed on an \"AS IS\" BASIS,\n* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n* See the License for the specific language governing permissions and\n* limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/dotcloud\/docker\"\n\t\"github.com\/dotcloud\/docker\/registry\"\n\t\"github.com\/gorilla\/mux\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n)\n\nconst ContainerDir = \"\/var\/lib\/containers\/\"\n\ntype Context struct {\n\tPath string\n\tContainerPath string\n\tRegistry *registry.Registry\n\tGraph *docker.Graph\n\tRepositories *docker.TagStore\n}\n\nvar context Context\n\nfunc pullImage(c *Context, imgId, registry string, token []string) error {\n\thistory, err := c.Registry.GetRemoteHistory(imgId, registry, token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ FIXME: Try to stream the images?\n\t\/\/ FIXME: Launch the getRemoteImage() in goroutines\n\tfor _, id := range history {\n\t\tif !c.Graph.Exists(id) {\n\t\t\tlog.Printf(\"Pulling %s metadata\\r\\n\", id)\n\t\t\timgJson, err := c.Registry.GetRemoteImageJson(id, registry, token)\n\t\t\tif err != nil {\n\t\t\t\t\/\/ FIXME: Keep goging in case of error?\n\t\t\t\treturn err\n\t\t\t}\n\t\t\timg, err := docker.NewImgJson(imgJson)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to parse json: %s\", err)\n\t\t\t}\n\n\t\t\t\/\/ Get the layer\n\t\t\tlog.Printf(\"Pulling %s fs layer\\r\\n\", img.Id)\n\t\t\tlayer, _, err := c.Registry.GetRemoteImageLayer(img.Id, registry, token)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := c.Graph.Register(layer, false, img); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ TODO: add tag support\nfunc pullHandler(w http.ResponseWriter, r *http.Request, c *Context) {\n\tvars := mux.Vars(r)\n\tremote := vars[\"remote\"]\n\n\trepoData, err := c.Registry.GetRepositoryData(remote)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttagsList, err := c.Registry.GetRemoteTags(repoData.Endpoints, remote, repoData.Tokens)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor tag, id := range tagsList {\n\t\trepoData.ImgList[id].Tag = tag\n\t}\n\n\tfor _, img := range repoData.ImgList {\n\t\tlog.Printf(\"Pulling image %s (%s) from %s\\n\", img.Id, img.Tag, remote)\n\t\tsuccess := false\n\n\t\tfor _, ep := range repoData.Endpoints {\n\t\t\tif err := pullImage(c, img.Id, \"https:\/\/\"+ep+\"\/v1\", repoData.Tokens); err != nil {\n\t\t\t\tfmt.Printf(\"Error while retrieving image for tag: %s; checking next endpoint\\n\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsuccess = true\n\t\t\tbreak\n\t\t}\n\n\t\tif !success {\n\t\t\tlog.Fatal(\"Could not find repository on any of the indexed registries.\")\n\t\t}\n\t}\n\n\tfor tag, id := range tagsList {\n\t\tif err := c.Repositories.Set(remote, tag, id, true); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\t}\n\tif err := c.Repositories.Save(); err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\n\tfmt.Fprintf(w, \"%v\\n\", repoData)\n}\n\nfunc createHandler(w http.ResponseWriter, r *http.Request, c *Context) {\n\timageName := r.FormValue(\"image\")\n\n\t\/\/ TODO: @philips Don't hardcode the tag name here\n\timage, err := c.Repositories.GetImage(imageName, \"latest\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\n\tif imageName == \"\" {\n\t\tw.WriteHeader(404)\n\t\tfmt.Fprintf(w, \"Cannot find container image: %s\", imageName)\n\t\treturn\n\t}\n\n\t\/\/ Figure out the resting place of the container\n\tvars := mux.Vars(r)\n\tcontainer := vars[\"container\"]\n\n\tvalidID := regexp.MustCompile(`^[A-Za-z0-9]+$`)\n\tif !validID.MatchString(container) {\n\t\tw.WriteHeader(400)\n\t\tfmt.Fprintf(w, \"Invalid container name: %s\\n\", container)\n\t\treturn\n\t}\n\n\tcontainer = path.Join(c.ContainerPath, container)\n\n\terr = os.Mkdir(container, 0700)\n\tif os.IsExist(err) {\n\t\tw.WriteHeader(400)\n\t\tfmt.Fprintf(w, \"Existing container: %s\\n\", container)\n\t\treturn\n\t}\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tfmt.Fprint(w, \"Error\")\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\n\tvar images []docker.Image\n\n\tcreateList := func(img *docker.Image) (err error) {\n\t\timages = append(images, *img)\n\t\treturn\n\t}\n\terr = image.WalkHistory(createList)\n\n\tfor i := len(images) - 1; i > 0; i-- {\n\t\timg := images[i]\n\t\tlog.Printf(\"Copying %s into %s\", img.Id, container)\n\t\ttarball, err := img.TarLayer(docker.Uncompressed)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\t\tif err := docker.Untar(tarball, container); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\n\tfmt.Fprint(w, \"ok\")\n\treturn\n}\n\nfunc setupDocker(r *mux.Router, o Options) {\n\t\/\/ Use the \/var\/lib\/containers directory by default\n\tcontext.ContainerPath = path.Join(o.Dir, ContainerDir)\n\tif err := os.MkdirAll(context.ContainerPath, 0700); err != nil && !os.IsExist(err) {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\tcontext.Registry = registry.NewRegistry(context.ContainerPath)\n\n\t\/\/ Put all docker images into the docker directory\n\tcontext.Path = path.Join(o.Dir, StateDir, \"docker\")\n\n\tp := path.Join(context.Path, \"graph\")\n\tif err := os.MkdirAll(p, 0700); err != nil && !os.IsExist(err) {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\tg, _ := docker.NewGraph(p)\n\tcontext.Graph = g\n\n\tp = path.Join(context.Path, \"repositories\")\n\tt, _ := docker.NewTagStore(p, g)\n\tcontext.Repositories = t\n\n\tmakeHandler := func(fn func(http.ResponseWriter, *http.Request, *Context)) http.HandlerFunc {\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\tfn(w, r, &context)\n\t\t}\n\t}\n\n\tr.HandleFunc(\"\/registry\/pull\/{remote:.*}\", makeHandler(pullHandler))\n\tr.HandleFunc(\"\/container\/create\/{container:.*}\", makeHandler(createHandler))\n}\n<|endoftext|>"} {"text":"<commit_before>package protocol\n\nimport (\n\t\"bufio\"\n\t\"compress\/flate\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/calmh\/syncthing\/xdr\"\n)\n\nconst BlockSize = 128 * 1024\n\nconst (\n\tmessageTypeClusterConfig = 0\n\tmessageTypeIndex = 1\n\tmessageTypeRequest = 2\n\tmessageTypeResponse = 3\n\tmessageTypePing = 4\n\tmessageTypePong = 5\n\tmessageTypeIndexUpdate = 6\n)\n\nconst (\n\tFlagDeleted uint32 = 1 << 12\n\tFlagInvalid = 1 << 13\n\tFlagDirectory = 1 << 14\n\tFlagNoPermBits = 1 << 15\n)\n\nconst (\n\tFlagShareTrusted uint32 = 1 << 0\n\tFlagShareReadOnly = 1 << 1\n\tFlagShareBits = 0x000000ff\n)\n\nvar (\n\tErrClusterHash = fmt.Errorf(\"configuration error: mismatched cluster hash\")\n\tErrClosed = errors.New(\"connection closed\")\n)\n\ntype Model interface {\n\t\/\/ An index was received from the peer node\n\tIndex(nodeID string, repo string, files []FileInfo)\n\t\/\/ An index update was received from the peer node\n\tIndexUpdate(nodeID string, repo string, files []FileInfo)\n\t\/\/ A request was made by the peer node\n\tRequest(nodeID string, repo string, name string, offset int64, size int) ([]byte, error)\n\t\/\/ A cluster configuration message was received\n\tClusterConfig(nodeID string, config ClusterConfigMessage)\n\t\/\/ The peer node closed the connection\n\tClose(nodeID string, err error)\n}\n\ntype Connection interface {\n\tID() string\n\tIndex(repo string, files []FileInfo)\n\tRequest(repo string, name string, offset int64, size int) ([]byte, error)\n\tClusterConfig(config ClusterConfigMessage)\n\tStatistics() Statistics\n}\n\ntype rawConnection struct {\n\tid string\n\treceiver Model\n\n\treader io.ReadCloser\n\tcr *countingReader\n\txr *xdr.Reader\n\twriter io.WriteCloser\n\n\tcw *countingWriter\n\twb *bufio.Writer\n\txw *xdr.Writer\n\twmut sync.Mutex\n\n\tindexSent map[string]map[string][2]int64\n\tawaiting []chan asyncResult\n\timut sync.Mutex\n\n\tnextID chan int\n\toutbox chan []encodable\n\tclosed chan struct{}\n}\n\ntype asyncResult struct {\n\tval []byte\n\terr error\n}\n\nconst (\n\tpingTimeout = 4 * time.Minute\n\tpingIdleTime = 5 * time.Minute\n)\n\nfunc NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver Model) Connection {\n\tcr := &countingReader{Reader: reader}\n\tcw := &countingWriter{Writer: writer}\n\n\tflrd := flate.NewReader(cr)\n\tflwr, err := flate.NewWriter(cw, flate.BestSpeed)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\twb := bufio.NewWriter(flwr)\n\n\tc := rawConnection{\n\t\tid: nodeID,\n\t\treceiver: nativeModel{receiver},\n\t\treader: flrd,\n\t\tcr: cr,\n\t\txr: xdr.NewReader(flrd),\n\t\twriter: flwr,\n\t\tcw: cw,\n\t\twb: wb,\n\t\txw: xdr.NewWriter(wb),\n\t\tawaiting: make([]chan asyncResult, 0x1000),\n\t\tindexSent: make(map[string]map[string][2]int64),\n\t\toutbox: make(chan []encodable),\n\t\tnextID: make(chan int),\n\t\tclosed: make(chan struct{}),\n\t}\n\n\tgo c.readerLoop()\n\tgo c.writerLoop()\n\tgo c.pingerLoop()\n\tgo c.idGenerator()\n\n\treturn wireFormatConnection{&c}\n}\n\nfunc (c *rawConnection) ID() string {\n\treturn c.id\n}\n\n\/\/ Index writes the list of file information to the connected peer node\nfunc (c *rawConnection) Index(repo string, idx []FileInfo) {\n\tc.imut.Lock()\n\tvar msgType int\n\tif c.indexSent[repo] == nil {\n\t\t\/\/ This is the first time we send an index.\n\t\tmsgType = messageTypeIndex\n\n\t\tc.indexSent[repo] = make(map[string][2]int64)\n\t\tfor _, f := range idx {\n\t\t\tc.indexSent[repo][f.Name] = [2]int64{f.Modified, int64(f.Version)}\n\t\t}\n\t} else {\n\t\t\/\/ We have sent one full index. Only send updates now.\n\t\tmsgType = messageTypeIndexUpdate\n\t\tvar diff []FileInfo\n\t\tfor _, f := range idx {\n\t\t\tif vs, ok := c.indexSent[repo][f.Name]; !ok || f.Modified != vs[0] || int64(f.Version) != vs[1] {\n\t\t\t\tdiff = append(diff, f)\n\t\t\t\tc.indexSent[repo][f.Name] = [2]int64{f.Modified, int64(f.Version)}\n\t\t\t}\n\t\t}\n\t\tidx = diff\n\t}\n\tc.imut.Unlock()\n\n\tc.send(header{0, -1, msgType}, IndexMessage{repo, idx})\n}\n\n\/\/ Request returns the bytes for the specified block after fetching them from the connected peer.\nfunc (c *rawConnection) Request(repo string, name string, offset int64, size int) ([]byte, error) {\n\tvar id int\n\tselect {\n\tcase id = <-c.nextID:\n\tcase <-c.closed:\n\t\treturn nil, ErrClosed\n\t}\n\n\tc.imut.Lock()\n\tif ch := c.awaiting[id]; ch != nil {\n\t\tpanic(\"id taken\")\n\t}\n\trc := make(chan asyncResult)\n\tc.awaiting[id] = rc\n\tc.imut.Unlock()\n\n\tok := c.send(header{0, id, messageTypeRequest},\n\t\tRequestMessage{repo, name, uint64(offset), uint32(size)})\n\tif !ok {\n\t\treturn nil, ErrClosed\n\t}\n\n\tres, ok := <-rc\n\tif !ok {\n\t\treturn nil, ErrClosed\n\t}\n\treturn res.val, res.err\n}\n\n\/\/ ClusterConfig send the cluster configuration message to the peer and returns any error\nfunc (c *rawConnection) ClusterConfig(config ClusterConfigMessage) {\n\tc.send(header{0, -1, messageTypeClusterConfig}, config)\n}\n\nfunc (c *rawConnection) ping() bool {\n\tvar id int\n\tselect {\n\tcase id = <-c.nextID:\n\tcase <-c.closed:\n\t\treturn false\n\t}\n\n\trc := make(chan asyncResult, 1)\n\tc.imut.Lock()\n\tc.awaiting[id] = rc\n\tc.imut.Unlock()\n\n\tok := c.send(header{0, id, messageTypePing})\n\tif !ok {\n\t\treturn false\n\t}\n\n\tres, ok := <-rc\n\treturn ok && res.err == nil\n}\n\nfunc (c *rawConnection) readerLoop() (err error) {\n\tdefer func() {\n\t\tc.close(err)\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-c.closed:\n\t\t\treturn ErrClosed\n\t\tdefault:\n\t\t}\n\n\t\tvar hdr header\n\t\thdr.decodeXDR(c.xr)\n\t\tif err := c.xr.Error(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif hdr.version != 0 {\n\t\t\treturn fmt.Errorf(\"protocol error: %s: unknown message version %#x\", c.id, hdr.version)\n\t\t}\n\n\t\tswitch hdr.msgType {\n\t\tcase messageTypeIndex:\n\t\t\tif err := c.handleIndex(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase messageTypeIndexUpdate:\n\t\t\tif err := c.handleIndexUpdate(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase messageTypeRequest:\n\t\t\tif err := c.handleRequest(hdr); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase messageTypeResponse:\n\t\t\tif err := c.handleResponse(hdr); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase messageTypePing:\n\t\t\tc.send(header{0, hdr.msgID, messageTypePong})\n\n\t\tcase messageTypePong:\n\t\t\tc.handlePong(hdr)\n\n\t\tcase messageTypeClusterConfig:\n\t\t\tif err := c.handleClusterConfig(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"protocol error: %s: unknown message type %#x\", c.id, hdr.msgType)\n\t\t}\n\t}\n}\n\nfunc (c *rawConnection) handleIndex() error {\n\tvar im IndexMessage\n\tim.decodeXDR(c.xr)\n\tif err := c.xr.Error(); err != nil {\n\t\treturn err\n\t} else {\n\n\t\t\/\/ We run this (and the corresponding one for update, below)\n\t\t\/\/ in a separate goroutine to avoid blocking the read loop.\n\t\t\/\/ There is otherwise a potential deadlock where both sides\n\t\t\/\/ has the model locked because it's sending a large index\n\t\t\/\/ update and can't receive the large index update from the\n\t\t\/\/ other side.\n\n\t\tgo c.receiver.Index(c.id, im.Repository, im.Files)\n\t}\n\treturn nil\n}\n\nfunc (c *rawConnection) handleIndexUpdate() error {\n\tvar im IndexMessage\n\tim.decodeXDR(c.xr)\n\tif err := c.xr.Error(); err != nil {\n\t\treturn err\n\t} else {\n\t\tgo c.receiver.IndexUpdate(c.id, im.Repository, im.Files)\n\t}\n\treturn nil\n}\n\nfunc (c *rawConnection) handleRequest(hdr header) error {\n\tvar req RequestMessage\n\treq.decodeXDR(c.xr)\n\tif err := c.xr.Error(); err != nil {\n\t\treturn err\n\t}\n\tgo c.processRequest(hdr.msgID, req)\n\treturn nil\n}\n\nfunc (c *rawConnection) handleResponse(hdr header) error {\n\tdata := c.xr.ReadBytesMax(256 * 1024) \/\/ Sufficiently larger than max expected block size\n\n\tif err := c.xr.Error(); err != nil {\n\t\treturn err\n\t}\n\n\tgo func(hdr header, err error) {\n\t\tc.imut.Lock()\n\t\trc := c.awaiting[hdr.msgID]\n\t\tc.awaiting[hdr.msgID] = nil\n\t\tc.imut.Unlock()\n\n\t\tif rc != nil {\n\t\t\trc <- asyncResult{data, err}\n\t\t\tclose(rc)\n\t\t}\n\t}(hdr, c.xr.Error())\n\n\treturn nil\n}\n\nfunc (c *rawConnection) handlePong(hdr header) {\n\tc.imut.Lock()\n\tif rc := c.awaiting[hdr.msgID]; rc != nil {\n\t\tgo func() {\n\t\t\trc <- asyncResult{}\n\t\t\tclose(rc)\n\t\t}()\n\n\t\tc.awaiting[hdr.msgID] = nil\n\t}\n\tc.imut.Unlock()\n}\n\nfunc (c *rawConnection) handleClusterConfig() error {\n\tvar cm ClusterConfigMessage\n\tcm.decodeXDR(c.xr)\n\tif err := c.xr.Error(); err != nil {\n\t\treturn err\n\t} else {\n\t\tgo c.receiver.ClusterConfig(c.id, cm)\n\t}\n\treturn nil\n}\n\ntype encodable interface {\n\tencodeXDR(*xdr.Writer) (int, error)\n}\ntype encodableBytes []byte\n\nfunc (e encodableBytes) encodeXDR(xw *xdr.Writer) (int, error) {\n\treturn xw.WriteBytes(e)\n}\n\nfunc (c *rawConnection) send(h header, es ...encodable) bool {\n\tif h.msgID < 0 {\n\t\tselect {\n\t\tcase id := <-c.nextID:\n\t\t\th.msgID = id\n\t\tcase <-c.closed:\n\t\t\treturn false\n\t\t}\n\t}\n\tmsg := append([]encodable{h}, es...)\n\n\tselect {\n\tcase c.outbox <- msg:\n\t\treturn true\n\tcase <-c.closed:\n\t\treturn false\n\t}\n}\n\nfunc (c *rawConnection) writerLoop() {\n\tvar err error\n\tfor es := range c.outbox {\n\t\tc.wmut.Lock()\n\t\tfor _, e := range es {\n\t\t\te.encodeXDR(c.xw)\n\t\t}\n\n\t\tif err = c.flush(); err != nil {\n\t\t\tc.wmut.Unlock()\n\t\t\tc.close(err)\n\t\t\treturn\n\t\t}\n\t\tc.wmut.Unlock()\n\t}\n}\n\ntype flusher interface {\n\tFlush() error\n}\n\nfunc (c *rawConnection) flush() error {\n\tif err := c.xw.Error(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.wb.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\tif f, ok := c.writer.(flusher); ok {\n\t\treturn f.Flush()\n\t}\n\n\treturn nil\n}\n\nfunc (c *rawConnection) close(err error) {\n\tc.imut.Lock()\n\tc.wmut.Lock()\n\tdefer c.imut.Unlock()\n\tdefer c.wmut.Unlock()\n\n\tselect {\n\tcase <-c.closed:\n\t\treturn\n\tdefault:\n\t\tclose(c.closed)\n\n\t\tfor i, ch := range c.awaiting {\n\t\t\tif ch != nil {\n\t\t\t\tclose(ch)\n\t\t\t\tc.awaiting[i] = nil\n\t\t\t}\n\t\t}\n\n\t\tc.writer.Close()\n\t\tc.reader.Close()\n\n\t\tgo c.receiver.Close(c.id, err)\n\t}\n}\n\nfunc (c *rawConnection) idGenerator() {\n\tnextID := 0\n\tfor {\n\t\tnextID = (nextID + 1) & 0xfff\n\t\tselect {\n\t\tcase c.nextID <- nextID:\n\t\tcase <-c.closed:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *rawConnection) pingerLoop() {\n\tvar rc = make(chan bool, 1)\n\tticker := time.Tick(pingIdleTime \/ 2)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker:\n\t\t\tgo func() {\n\t\t\t\trc <- c.ping()\n\t\t\t}()\n\t\t\tselect {\n\t\t\tcase ok := <-rc:\n\t\t\t\tif !ok {\n\t\t\t\t\tc.close(fmt.Errorf(\"ping failure\"))\n\t\t\t\t}\n\t\t\tcase <-time.After(pingTimeout):\n\t\t\t\tc.close(fmt.Errorf(\"ping timeout\"))\n\t\t\tcase <-c.closed:\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase <-c.closed:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *rawConnection) processRequest(msgID int, req RequestMessage) {\n\tdata, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size))\n\n\tc.send(header{0, msgID, messageTypeResponse},\n\t\tencodableBytes(data))\n}\n\ntype Statistics struct {\n\tAt time.Time\n\tInBytesTotal int\n\tOutBytesTotal int\n}\n\nfunc (c *rawConnection) Statistics() Statistics {\n\treturn Statistics{\n\t\tAt: time.Now(),\n\t\tInBytesTotal: int(c.cr.Tot()),\n\t\tOutBytesTotal: int(c.cw.Tot()),\n\t}\n}\n\nfunc IsDeleted(bits uint32) bool {\n\treturn bits&FlagDeleted != 0\n}\n\nfunc IsInvalid(bits uint32) bool {\n\treturn bits&FlagInvalid != 0\n}\n\nfunc IsDirectory(bits uint32) bool {\n\treturn bits&FlagDirectory != 0\n}\n\nfunc HasPermissionBits(bits uint32) bool {\n\treturn bits&FlagNoPermBits == 0\n}\n<commit_msg>More memory efficient index sending<commit_after>package protocol\n\nimport (\n\t\"bufio\"\n\t\"compress\/flate\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/calmh\/syncthing\/xdr\"\n)\n\nconst BlockSize = 128 * 1024\n\nconst (\n\tmessageTypeClusterConfig = 0\n\tmessageTypeIndex = 1\n\tmessageTypeRequest = 2\n\tmessageTypeResponse = 3\n\tmessageTypePing = 4\n\tmessageTypePong = 5\n\tmessageTypeIndexUpdate = 6\n)\n\nconst (\n\tFlagDeleted uint32 = 1 << 12\n\tFlagInvalid = 1 << 13\n\tFlagDirectory = 1 << 14\n\tFlagNoPermBits = 1 << 15\n)\n\nconst (\n\tFlagShareTrusted uint32 = 1 << 0\n\tFlagShareReadOnly = 1 << 1\n\tFlagShareBits = 0x000000ff\n)\n\nvar (\n\tErrClusterHash = fmt.Errorf(\"configuration error: mismatched cluster hash\")\n\tErrClosed = errors.New(\"connection closed\")\n)\n\ntype Model interface {\n\t\/\/ An index was received from the peer node\n\tIndex(nodeID string, repo string, files []FileInfo)\n\t\/\/ An index update was received from the peer node\n\tIndexUpdate(nodeID string, repo string, files []FileInfo)\n\t\/\/ A request was made by the peer node\n\tRequest(nodeID string, repo string, name string, offset int64, size int) ([]byte, error)\n\t\/\/ A cluster configuration message was received\n\tClusterConfig(nodeID string, config ClusterConfigMessage)\n\t\/\/ The peer node closed the connection\n\tClose(nodeID string, err error)\n}\n\ntype Connection interface {\n\tID() string\n\tIndex(repo string, files []FileInfo)\n\tRequest(repo string, name string, offset int64, size int) ([]byte, error)\n\tClusterConfig(config ClusterConfigMessage)\n\tStatistics() Statistics\n}\n\ntype rawConnection struct {\n\tid string\n\treceiver Model\n\n\treader io.ReadCloser\n\tcr *countingReader\n\txr *xdr.Reader\n\twriter io.WriteCloser\n\n\tcw *countingWriter\n\twb *bufio.Writer\n\txw *xdr.Writer\n\twmut sync.Mutex\n\n\tindexSent map[string]uint64\n\tawaiting []chan asyncResult\n\timut sync.Mutex\n\n\tnextID chan int\n\toutbox chan []encodable\n\tclosed chan struct{}\n}\n\ntype asyncResult struct {\n\tval []byte\n\terr error\n}\n\nconst (\n\tpingTimeout = 4 * time.Minute\n\tpingIdleTime = 5 * time.Minute\n)\n\nfunc NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver Model) Connection {\n\tcr := &countingReader{Reader: reader}\n\tcw := &countingWriter{Writer: writer}\n\n\tflrd := flate.NewReader(cr)\n\tflwr, err := flate.NewWriter(cw, flate.BestSpeed)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\twb := bufio.NewWriter(flwr)\n\n\tc := rawConnection{\n\t\tid: nodeID,\n\t\treceiver: nativeModel{receiver},\n\t\treader: flrd,\n\t\tcr: cr,\n\t\txr: xdr.NewReader(flrd),\n\t\twriter: flwr,\n\t\tcw: cw,\n\t\twb: wb,\n\t\txw: xdr.NewWriter(wb),\n\t\tindexSent: make(map[string]uint64),\n\t\tawaiting: make([]chan asyncResult, 0x1000),\n\t\toutbox: make(chan []encodable),\n\t\tnextID: make(chan int),\n\t\tclosed: make(chan struct{}),\n\t}\n\n\tgo c.readerLoop()\n\tgo c.writerLoop()\n\tgo c.pingerLoop()\n\tgo c.idGenerator()\n\n\treturn wireFormatConnection{&c}\n}\n\nfunc (c *rawConnection) ID() string {\n\treturn c.id\n}\n\n\/\/ Index writes the list of file information to the connected peer node\nfunc (c *rawConnection) Index(repo string, idx []FileInfo) {\n\tc.imut.Lock()\n\tvar msgType int\n\tmaxSent := c.indexSent[repo]\n\tvar newMaxSent uint64\n\tif maxSent == 0 {\n\t\t\/\/ This is the first time we send an index.\n\t\tmsgType = messageTypeIndex\n\t\tfor _, f := range idx {\n\t\t\tif f.Version > newMaxSent {\n\t\t\t\tnewMaxSent = f.Version\n\t\t\t}\n\t\t}\n\t} else {\n\t\t\/\/ We have sent one full index. Only send updates now.\n\t\tmsgType = messageTypeIndexUpdate\n\t\tvar diff []FileInfo\n\t\tfor _, f := range idx {\n\t\t\tif f.Version > maxSent {\n\t\t\t\tdiff = append(diff, f)\n\t\t\t\tnewMaxSent = f.Version\n\t\t\t}\n\t\t}\n\t\tidx = diff\n\t}\n\tc.indexSent[repo] = newMaxSent\n\tc.imut.Unlock()\n\n\tc.send(header{0, -1, msgType}, IndexMessage{repo, idx})\n}\n\n\/\/ Request returns the bytes for the specified block after fetching them from the connected peer.\nfunc (c *rawConnection) Request(repo string, name string, offset int64, size int) ([]byte, error) {\n\tvar id int\n\tselect {\n\tcase id = <-c.nextID:\n\tcase <-c.closed:\n\t\treturn nil, ErrClosed\n\t}\n\n\tc.imut.Lock()\n\tif ch := c.awaiting[id]; ch != nil {\n\t\tpanic(\"id taken\")\n\t}\n\trc := make(chan asyncResult)\n\tc.awaiting[id] = rc\n\tc.imut.Unlock()\n\n\tok := c.send(header{0, id, messageTypeRequest},\n\t\tRequestMessage{repo, name, uint64(offset), uint32(size)})\n\tif !ok {\n\t\treturn nil, ErrClosed\n\t}\n\n\tres, ok := <-rc\n\tif !ok {\n\t\treturn nil, ErrClosed\n\t}\n\treturn res.val, res.err\n}\n\n\/\/ ClusterConfig send the cluster configuration message to the peer and returns any error\nfunc (c *rawConnection) ClusterConfig(config ClusterConfigMessage) {\n\tc.send(header{0, -1, messageTypeClusterConfig}, config)\n}\n\nfunc (c *rawConnection) ping() bool {\n\tvar id int\n\tselect {\n\tcase id = <-c.nextID:\n\tcase <-c.closed:\n\t\treturn false\n\t}\n\n\trc := make(chan asyncResult, 1)\n\tc.imut.Lock()\n\tc.awaiting[id] = rc\n\tc.imut.Unlock()\n\n\tok := c.send(header{0, id, messageTypePing})\n\tif !ok {\n\t\treturn false\n\t}\n\n\tres, ok := <-rc\n\treturn ok && res.err == nil\n}\n\nfunc (c *rawConnection) readerLoop() (err error) {\n\tdefer func() {\n\t\tc.close(err)\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-c.closed:\n\t\t\treturn ErrClosed\n\t\tdefault:\n\t\t}\n\n\t\tvar hdr header\n\t\thdr.decodeXDR(c.xr)\n\t\tif err := c.xr.Error(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif hdr.version != 0 {\n\t\t\treturn fmt.Errorf(\"protocol error: %s: unknown message version %#x\", c.id, hdr.version)\n\t\t}\n\n\t\tswitch hdr.msgType {\n\t\tcase messageTypeIndex:\n\t\t\tif err := c.handleIndex(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase messageTypeIndexUpdate:\n\t\t\tif err := c.handleIndexUpdate(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase messageTypeRequest:\n\t\t\tif err := c.handleRequest(hdr); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase messageTypeResponse:\n\t\t\tif err := c.handleResponse(hdr); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase messageTypePing:\n\t\t\tc.send(header{0, hdr.msgID, messageTypePong})\n\n\t\tcase messageTypePong:\n\t\t\tc.handlePong(hdr)\n\n\t\tcase messageTypeClusterConfig:\n\t\t\tif err := c.handleClusterConfig(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"protocol error: %s: unknown message type %#x\", c.id, hdr.msgType)\n\t\t}\n\t}\n}\n\nfunc (c *rawConnection) handleIndex() error {\n\tvar im IndexMessage\n\tim.decodeXDR(c.xr)\n\tif err := c.xr.Error(); err != nil {\n\t\treturn err\n\t} else {\n\n\t\t\/\/ We run this (and the corresponding one for update, below)\n\t\t\/\/ in a separate goroutine to avoid blocking the read loop.\n\t\t\/\/ There is otherwise a potential deadlock where both sides\n\t\t\/\/ has the model locked because it's sending a large index\n\t\t\/\/ update and can't receive the large index update from the\n\t\t\/\/ other side.\n\n\t\tgo c.receiver.Index(c.id, im.Repository, im.Files)\n\t}\n\treturn nil\n}\n\nfunc (c *rawConnection) handleIndexUpdate() error {\n\tvar im IndexMessage\n\tim.decodeXDR(c.xr)\n\tif err := c.xr.Error(); err != nil {\n\t\treturn err\n\t} else {\n\t\tgo c.receiver.IndexUpdate(c.id, im.Repository, im.Files)\n\t}\n\treturn nil\n}\n\nfunc (c *rawConnection) handleRequest(hdr header) error {\n\tvar req RequestMessage\n\treq.decodeXDR(c.xr)\n\tif err := c.xr.Error(); err != nil {\n\t\treturn err\n\t}\n\tgo c.processRequest(hdr.msgID, req)\n\treturn nil\n}\n\nfunc (c *rawConnection) handleResponse(hdr header) error {\n\tdata := c.xr.ReadBytesMax(256 * 1024) \/\/ Sufficiently larger than max expected block size\n\n\tif err := c.xr.Error(); err != nil {\n\t\treturn err\n\t}\n\n\tgo func(hdr header, err error) {\n\t\tc.imut.Lock()\n\t\trc := c.awaiting[hdr.msgID]\n\t\tc.awaiting[hdr.msgID] = nil\n\t\tc.imut.Unlock()\n\n\t\tif rc != nil {\n\t\t\trc <- asyncResult{data, err}\n\t\t\tclose(rc)\n\t\t}\n\t}(hdr, c.xr.Error())\n\n\treturn nil\n}\n\nfunc (c *rawConnection) handlePong(hdr header) {\n\tc.imut.Lock()\n\tif rc := c.awaiting[hdr.msgID]; rc != nil {\n\t\tgo func() {\n\t\t\trc <- asyncResult{}\n\t\t\tclose(rc)\n\t\t}()\n\n\t\tc.awaiting[hdr.msgID] = nil\n\t}\n\tc.imut.Unlock()\n}\n\nfunc (c *rawConnection) handleClusterConfig() error {\n\tvar cm ClusterConfigMessage\n\tcm.decodeXDR(c.xr)\n\tif err := c.xr.Error(); err != nil {\n\t\treturn err\n\t} else {\n\t\tgo c.receiver.ClusterConfig(c.id, cm)\n\t}\n\treturn nil\n}\n\ntype encodable interface {\n\tencodeXDR(*xdr.Writer) (int, error)\n}\ntype encodableBytes []byte\n\nfunc (e encodableBytes) encodeXDR(xw *xdr.Writer) (int, error) {\n\treturn xw.WriteBytes(e)\n}\n\nfunc (c *rawConnection) send(h header, es ...encodable) bool {\n\tif h.msgID < 0 {\n\t\tselect {\n\t\tcase id := <-c.nextID:\n\t\t\th.msgID = id\n\t\tcase <-c.closed:\n\t\t\treturn false\n\t\t}\n\t}\n\tmsg := append([]encodable{h}, es...)\n\n\tselect {\n\tcase c.outbox <- msg:\n\t\treturn true\n\tcase <-c.closed:\n\t\treturn false\n\t}\n}\n\nfunc (c *rawConnection) writerLoop() {\n\tvar err error\n\tfor es := range c.outbox {\n\t\tc.wmut.Lock()\n\t\tfor _, e := range es {\n\t\t\te.encodeXDR(c.xw)\n\t\t}\n\n\t\tif err = c.flush(); err != nil {\n\t\t\tc.wmut.Unlock()\n\t\t\tc.close(err)\n\t\t\treturn\n\t\t}\n\t\tc.wmut.Unlock()\n\t}\n}\n\ntype flusher interface {\n\tFlush() error\n}\n\nfunc (c *rawConnection) flush() error {\n\tif err := c.xw.Error(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.wb.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\tif f, ok := c.writer.(flusher); ok {\n\t\treturn f.Flush()\n\t}\n\n\treturn nil\n}\n\nfunc (c *rawConnection) close(err error) {\n\tc.imut.Lock()\n\tc.wmut.Lock()\n\tdefer c.imut.Unlock()\n\tdefer c.wmut.Unlock()\n\n\tselect {\n\tcase <-c.closed:\n\t\treturn\n\tdefault:\n\t\tclose(c.closed)\n\n\t\tfor i, ch := range c.awaiting {\n\t\t\tif ch != nil {\n\t\t\t\tclose(ch)\n\t\t\t\tc.awaiting[i] = nil\n\t\t\t}\n\t\t}\n\n\t\tc.writer.Close()\n\t\tc.reader.Close()\n\n\t\tgo c.receiver.Close(c.id, err)\n\t}\n}\n\nfunc (c *rawConnection) idGenerator() {\n\tnextID := 0\n\tfor {\n\t\tnextID = (nextID + 1) & 0xfff\n\t\tselect {\n\t\tcase c.nextID <- nextID:\n\t\tcase <-c.closed:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *rawConnection) pingerLoop() {\n\tvar rc = make(chan bool, 1)\n\tticker := time.Tick(pingIdleTime \/ 2)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker:\n\t\t\tgo func() {\n\t\t\t\trc <- c.ping()\n\t\t\t}()\n\t\t\tselect {\n\t\t\tcase ok := <-rc:\n\t\t\t\tif !ok {\n\t\t\t\t\tc.close(fmt.Errorf(\"ping failure\"))\n\t\t\t\t}\n\t\t\tcase <-time.After(pingTimeout):\n\t\t\t\tc.close(fmt.Errorf(\"ping timeout\"))\n\t\t\tcase <-c.closed:\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase <-c.closed:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *rawConnection) processRequest(msgID int, req RequestMessage) {\n\tdata, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size))\n\n\tc.send(header{0, msgID, messageTypeResponse},\n\t\tencodableBytes(data))\n}\n\ntype Statistics struct {\n\tAt time.Time\n\tInBytesTotal int\n\tOutBytesTotal int\n}\n\nfunc (c *rawConnection) Statistics() Statistics {\n\treturn Statistics{\n\t\tAt: time.Now(),\n\t\tInBytesTotal: int(c.cr.Tot()),\n\t\tOutBytesTotal: int(c.cw.Tot()),\n\t}\n}\n\nfunc IsDeleted(bits uint32) bool {\n\treturn bits&FlagDeleted != 0\n}\n\nfunc IsInvalid(bits uint32) bool {\n\treturn bits&FlagInvalid != 0\n}\n\nfunc IsDirectory(bits uint32) bool {\n\treturn bits&FlagDirectory != 0\n}\n\nfunc HasPermissionBits(bits uint32) bool {\n\treturn bits&FlagNoPermBits == 0\n}\n<|endoftext|>"} {"text":"<commit_before>package vindinium\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype Client struct {\n\turl string\n\tkey string\n\tmode string\n\tbot string\n\tturns int\n\n\tstate State\n}\n\nfunc NewClient(url, key, mode, bot string, turns int) *Client {\n\treturn &Client{\n\t\turl: url,\n\t\tkey: key,\n\t\tmode: mode,\n\t\tbot: bot,\n\t\tturns: turns,\n\t}\n}\n\nfunc (c *Client) post(uri string, params map[string]string) error {\n\tp := url.Values{}\n\tfor key := range params {\n\t\tp.Set(key, params[key])\n\t}\n\n\t\/\/ client should timeout after 10 minutes\n\tdialFunc := func(network, addr string) (net.Conn, error) {\n\t\ttimeout := time.Duration(10 * time.Minute)\n\t\treturn net.DialTimeout(network, addr, timeout)\n\t}\n\n\ttransport := http.Transport{Dial: dialFunc}\n\tclient := http.Client{Transport: &transport}\n\n\tresponse, err := client.PostForm(uri, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\n\tdata, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdecoder := json.NewDecoder(bytes.NewReader(data))\n\tif err := decoder.Decode(&c.state); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *Client) Connect() error {\n\turi := c.url + \"\/api\/\" + c.mode\n\tparams := map[string]string{\n\t\t\"key\": c.key,\n\t\t\"turns\": strconv.Itoa(c.turns),\n\t\t\"map\": \"m1\",\n\t}\n\treturn c.post(uri, params)\n}\n\nfunc (c *Client) move(direction string) {\n}\n\nfunc (c *Client) isFinished() bool {\n\treturn c.state.Game.Finished\n}\n\nfunc (c *Client) Play() {\n\tfor !c.isFinished() {\n\t}\n}\n<commit_msg>Add timeout flag to POST request.<commit_after>package vindinium\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype Client struct {\n\turl string\n\tkey string\n\tmode string\n\tbot string\n\tturns int\n\n\tstate State\n}\n\nfunc NewClient(url, key, mode, bot string, turns int) *Client {\n\treturn &Client{\n\t\turl: url,\n\t\tkey: key,\n\t\tmode: mode,\n\t\tbot: bot,\n\t\tturns: turns,\n\t}\n}\n\nfunc (c *Client) post(uri string, params map[string]string, timeout time.Duration) error {\n\tp := url.Values{}\n\tfor key := range params {\n\t\tp.Set(key, params[key])\n\t}\n\n\t\/\/ client should timeout after 10 minutes\n\tdialFunc := func(network, addr string) (net.Conn, error) {\n\t\treturn net.DialTimeout(network, addr, timeout)\n\t}\n\n\ttransport := http.Transport{Dial: dialFunc}\n\tclient := http.Client{Transport: &transport}\n\n\tresponse, err := client.PostForm(uri, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\n\tdata, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdecoder := json.NewDecoder(bytes.NewReader(data))\n\tif err := decoder.Decode(&c.state); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *Client) Connect() error {\n\turi := c.url + \"\/api\/\" + c.mode\n\tparams := map[string]string{\n\t\t\"key\": c.key,\n\t\t\"turns\": strconv.Itoa(c.turns),\n\t\t\"map\": \"m1\",\n\t}\n\treturn c.post(uri, params, time.Duration(10 * time.Minute))\n}\n\nfunc (c *Client) move(direction string) {\n}\n\nfunc (c *Client) isFinished() bool {\n\treturn c.state.Game.Finished\n}\n\nfunc (c *Client) Play() {\n\tfor !c.isFinished() {\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport (\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n)\n\nvar RetryWaitTime = 6 * time.Second\n\nfunc Retry(name string, job func() error) (err error) {\n\twaitTime := time.Second\n\thasErr := false\n\tfor waitTime < RetryWaitTime {\n\t\terr = job()\n\t\tif err == nil {\n\t\t\tif hasErr {\n\t\t\t\tglog.V(0).Infof(\"retry %s successfully\", name)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif strings.Contains(err.Error(), \"transport\") {\n\t\t\thasErr = true\n\t\t\tglog.V(0).Infof(\"retry %s: err: %v\", name, err)\n\t\t\ttime.Sleep(waitTime)\n\t\t\twaitTime += waitTime \/ 2\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn err\n}\n\nfunc RetryForever(name string, job func() error, onErrFn func(err error) bool) {\n\twaitTime := time.Second\n\tfor {\n\t\terr := job()\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tif onErrFn(err) {\n\t\t\tif strings.Contains(err.Error(), \"transport\") {\n\t\t\t\tglog.V(0).Infof(\"retry %s: err: %v\", name, err)\n\t\t\t}\n\t\t\ttime.Sleep(waitTime)\n\t\t\tif waitTime < RetryWaitTime {\n\t\t\t\twaitTime += waitTime \/ 2\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\n\/\/ return the first non empty string\nfunc Nvl(values ...string) string {\n\tfor _, s := range values {\n\t\tif s != \"\" {\n\t\t\treturn s\n\t\t}\n\t}\n\treturn \"\"\n}\n<commit_msg>reset wait time<commit_after>package util\n\nimport (\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n)\n\nvar RetryWaitTime = 6 * time.Second\n\nfunc Retry(name string, job func() error) (err error) {\n\twaitTime := time.Second\n\thasErr := false\n\tfor waitTime < RetryWaitTime {\n\t\terr = job()\n\t\tif err == nil {\n\t\t\tif hasErr {\n\t\t\t\tglog.V(0).Infof(\"retry %s successfully\", name)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif strings.Contains(err.Error(), \"transport\") {\n\t\t\thasErr = true\n\t\t\tglog.V(0).Infof(\"retry %s: err: %v\", name, err)\n\t\t\ttime.Sleep(waitTime)\n\t\t\twaitTime += waitTime \/ 2\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn err\n}\n\nfunc RetryForever(name string, job func() error, onErrFn func(err error) bool) {\n\twaitTime := time.Second\n\tfor {\n\t\terr := job()\n\t\tif err == nil {\n\t\t\twaitTime = time.Second\n\t\t\tbreak\n\t\t}\n\t\tif onErrFn(err) {\n\t\t\tif strings.Contains(err.Error(), \"transport\") {\n\t\t\t\tglog.V(0).Infof(\"retry %s: err: %v\", name, err)\n\t\t\t}\n\t\t\ttime.Sleep(waitTime)\n\t\t\tif waitTime < RetryWaitTime {\n\t\t\t\twaitTime += waitTime \/ 2\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\n\/\/ return the first non empty string\nfunc Nvl(values ...string) string {\n\tfor _, s := range values {\n\t\tif s != \"\" {\n\t\t\treturn s\n\t\t}\n\t}\n\treturn \"\"\n}\n<|endoftext|>"} {"text":"<commit_before>package connection\n\nimport (\n\t\"net\"\n)\n\ntype Connection struct {\n\tRegistered bool\n\tAlias string\n\tIncommingMessages chan []byte\n\tReaderListening bool\n\tOutgoingMessages chan []byte\n\tWriterListening bool\n\tSocket net.Conn\n}\n\nfunc NewConnection(conn net.Conn) Connection {\n\treturn connection{\n\t\tRegisterd: false,\n\t\tReaderListening: false,\n\t\tWriterListening: false,\n\t\tSocket: conn,\n\t}\n}\n<commit_msg>typo in \/server\/connection\/connection.go<commit_after>package Connection\n\nimport (\n\t\"net\"\n)\n\ntype Connection struct {\n\tRegistered bool\n\tAlias string\n\tIncommingMessages chan []byte\n\tReaderListening bool\n\tOutgoingMessages chan []byte\n\tWriterListening bool\n\tSocket net.Conn\n}\n\nfunc NewConnection(conn net.Conn) Connection {\n\treturn connection{\n\t\tRegisterd: false,\n\t\tReaderListening: false,\n\t\tWriterListening: false,\n\t\tSocket: conn,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package providers\n\nimport (\n\t\"fmt\"\n\t\"github.com\/juju\/ratelimit\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/mitchellh\/goamz\/aws\"\n\t\"github.com\/mitchellh\/goamz\/route53\"\n\t\"github.com\/rancher\/external-dns\/dns\"\n\t\"math\"\n\t\"os\"\n)\n\nconst (\n\tname = \"Route53\"\n)\n\nvar (\n\tclient *route53.Route53\n\thostedZone *route53.HostedZone\n\tregion aws.Region\n\tlimiter *ratelimit.Bucket\n)\n\nfunc init() {\n\tif len(os.Getenv(\"AWS_REGION\")) == 0 {\n\t\tlogrus.Info(\"AWS_REGION is not set, skipping init of Route53 provider\")\n\t\treturn\n\t}\n\n\tif len(os.Getenv(\"AWS_ACCESS_KEY\")) == 0 {\n\t\tlogrus.Info(\"AWS_ACCESS_KEY is not set, skipping init of Route53 provider\")\n\t\treturn\n\t}\n\n\tif len(os.Getenv(\"AWS_SECRET_KEY\")) == 0 {\n\t\tlogrus.Info(\"AWS_SECRET_KEY is not set, skipping init of Route53 provider\")\n\t\treturn\n\t}\n\n\troute53Handler := &Route53Handler{}\n\tif err := RegisterProvider(\"route53\", route53Handler); err != nil {\n\t\tlogrus.Fatal(\"Could not register route53 provider\")\n\t}\n\n\tif err := setRegion(); err != nil {\n\t\tlogrus.Fatalf(\"Failed to set region: %v\", err)\n\t}\n\n\tif err := setHostedZone(); err != nil {\n\t\tlogrus.Fatalf(\"Failed to set hosted zone for root domain %s: %v\", dns.RootDomainName, err)\n\t}\n\n\t\/\/ Throttle Route53 API calls to 5 req\/s\n\tlimiter = ratelimit.NewBucketWithRate(5.0, 1)\n\n\tlogrus.Infof(\"Configured %s with hosted zone \\\"%s\\\" in region \\\"%s\\\" \", route53Handler.GetName(), dns.RootDomainName, region.Name)\n}\n\nfunc setRegion() error {\n\n\tregionName := os.Getenv(\"AWS_REGION\")\n\tr, ok := aws.Regions[regionName]\n\tif !ok {\n\t\treturn fmt.Errorf(\"Could not find region by name %s\", regionName)\n\t}\n\tregion = r\n\tauth, err := aws.EnvAuth()\n\tif err != nil {\n\t\tlogrus.Fatal(\"AWS failed to authenticate: %v\", err)\n\t}\n\tclient = route53.New(auth, region)\n\n\treturn nil\n}\n\nfunc setHostedZone() error {\n\tzoneResp, err := client.ListHostedZones(\"\", math.MaxInt64)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Failed to list hosted zones: %v\", err)\n\t}\n\tfor _, zone := range zoneResp.HostedZones {\n\t\tif zone.Name == dns.RootDomainName {\n\t\t\thostedZone = &zone\n\t\t\tbreak\n\t\t}\n\t}\n\tif hostedZone == nil {\n\t\tlogrus.Fatalf(\"Hosted zone %s is missing\", dns.RootDomainName)\n\t}\n\treturn nil\n}\n\ntype Route53Handler struct {\n}\n\nfunc (*Route53Handler) GetName() string {\n\treturn name\n}\n\nfunc (r *Route53Handler) AddRecord(record dns.DnsRecord) error {\n\treturn r.changeRecord(record, \"UPSERT\")\n}\n\nfunc (r *Route53Handler) UpdateRecord(record dns.DnsRecord) error {\n\treturn r.changeRecord(record, \"UPSERT\")\n}\n\nfunc (r *Route53Handler) RemoveRecord(record dns.DnsRecord) error {\n\treturn r.changeRecord(record, \"DELETE\")\n}\n\nfunc (*Route53Handler) changeRecord(record dns.DnsRecord, action string) error {\n\trecordSet := route53.ResourceRecordSet{Name: record.Fqdn, Type: record.Type, Records: record.Records, TTL: record.TTL}\n\tupdate := route53.Change{action, recordSet}\n\tchanges := []route53.Change{update}\n\treq := route53.ChangeResourceRecordSetsRequest{Comment: \"Updated by Rancher\", Changes: changes}\n\tlimiter.Wait(1)\n\t_, err := client.ChangeResourceRecordSets(hostedZone.ID, &req)\n\treturn err\n}\n\nfunc (*Route53Handler) GetRecords() ([]dns.DnsRecord, error) {\n\tvar records []dns.DnsRecord\n\topts := route53.ListOpts{}\n\tlimiter.Wait(1)\n\tresp, err := client.ListResourceRecordSets(hostedZone.ID, &opts)\n\tif err != nil {\n\t\treturn records, fmt.Errorf(\"Route53 API call has failed: %v\", err)\n\t}\n\n\tfor _, rec := range resp.Records {\n\t\trecord := dns.DnsRecord{Fqdn: rec.Name, Records: rec.Records, Type: rec.Type, TTL: rec.TTL}\n\t\trecords = append(records, record)\n\t}\n\n\treturn records, nil\n}\n<commit_msg>Throttle Route53 API calls to 3 req\/s<commit_after>package providers\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/juju\/ratelimit\"\n\t\"github.com\/mitchellh\/goamz\/aws\"\n\t\"github.com\/mitchellh\/goamz\/route53\"\n\t\"github.com\/rancher\/external-dns\/dns\"\n\t\"math\"\n\t\"os\"\n)\n\nconst (\n\tname = \"Route53\"\n)\n\nvar (\n\tclient *route53.Route53\n\thostedZone *route53.HostedZone\n\tregion aws.Region\n\tlimiter *ratelimit.Bucket\n)\n\nfunc init() {\n\tif len(os.Getenv(\"AWS_REGION\")) == 0 {\n\t\tlogrus.Info(\"AWS_REGION is not set, skipping init of Route53 provider\")\n\t\treturn\n\t}\n\n\tif len(os.Getenv(\"AWS_ACCESS_KEY\")) == 0 {\n\t\tlogrus.Info(\"AWS_ACCESS_KEY is not set, skipping init of Route53 provider\")\n\t\treturn\n\t}\n\n\tif len(os.Getenv(\"AWS_SECRET_KEY\")) == 0 {\n\t\tlogrus.Info(\"AWS_SECRET_KEY is not set, skipping init of Route53 provider\")\n\t\treturn\n\t}\n\n\troute53Handler := &Route53Handler{}\n\tif err := RegisterProvider(\"route53\", route53Handler); err != nil {\n\t\tlogrus.Fatal(\"Could not register route53 provider\")\n\t}\n\n\tif err := setRegion(); err != nil {\n\t\tlogrus.Fatalf(\"Failed to set region: %v\", err)\n\t}\n\n\tif err := setHostedZone(); err != nil {\n\t\tlogrus.Fatalf(\"Failed to set hosted zone for root domain %s: %v\", dns.RootDomainName, err)\n\t}\n\n\t\/\/ Throttle Route53 API calls to 3 req\/s\n\t\/\/ AWS limit is 5rec\/s per account, so leaving the room for other clients\n\tlimiter = ratelimit.NewBucketWithRate(3.0, 1)\n\n\tlogrus.Infof(\"Configured %s with hosted zone \\\"%s\\\" in region \\\"%s\\\" \", route53Handler.GetName(), dns.RootDomainName, region.Name)\n}\n\nfunc setRegion() error {\n\n\tregionName := os.Getenv(\"AWS_REGION\")\n\tr, ok := aws.Regions[regionName]\n\tif !ok {\n\t\treturn fmt.Errorf(\"Could not find region by name %s\", regionName)\n\t}\n\tregion = r\n\tauth, err := aws.EnvAuth()\n\tif err != nil {\n\t\tlogrus.Fatal(\"AWS failed to authenticate: %v\", err)\n\t}\n\tclient = route53.New(auth, region)\n\n\treturn nil\n}\n\nfunc setHostedZone() error {\n\tzoneResp, err := client.ListHostedZones(\"\", math.MaxInt64)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Failed to list hosted zones: %v\", err)\n\t}\n\tfor _, zone := range zoneResp.HostedZones {\n\t\tif zone.Name == dns.RootDomainName {\n\t\t\thostedZone = &zone\n\t\t\tbreak\n\t\t}\n\t}\n\tif hostedZone == nil {\n\t\tlogrus.Fatalf(\"Hosted zone %s is missing\", dns.RootDomainName)\n\t}\n\treturn nil\n}\n\ntype Route53Handler struct {\n}\n\nfunc (*Route53Handler) GetName() string {\n\treturn name\n}\n\nfunc (r *Route53Handler) AddRecord(record dns.DnsRecord) error {\n\treturn r.changeRecord(record, \"UPSERT\")\n}\n\nfunc (r *Route53Handler) UpdateRecord(record dns.DnsRecord) error {\n\treturn r.changeRecord(record, \"UPSERT\")\n}\n\nfunc (r *Route53Handler) RemoveRecord(record dns.DnsRecord) error {\n\treturn r.changeRecord(record, \"DELETE\")\n}\n\nfunc (*Route53Handler) changeRecord(record dns.DnsRecord, action string) error {\n\trecordSet := route53.ResourceRecordSet{Name: record.Fqdn, Type: record.Type, Records: record.Records, TTL: record.TTL}\n\tupdate := route53.Change{action, recordSet}\n\tchanges := []route53.Change{update}\n\treq := route53.ChangeResourceRecordSetsRequest{Comment: \"Updated by Rancher\", Changes: changes}\n\tlimiter.Wait(1)\n\t_, err := client.ChangeResourceRecordSets(hostedZone.ID, &req)\n\treturn err\n}\n\nfunc (*Route53Handler) GetRecords() ([]dns.DnsRecord, error) {\n\tvar records []dns.DnsRecord\n\topts := route53.ListOpts{}\n\tlimiter.Wait(1)\n\tresp, err := client.ListResourceRecordSets(hostedZone.ID, &opts)\n\tif err != nil {\n\t\treturn records, fmt.Errorf(\"Route53 API call has failed: %v\", err)\n\t}\n\n\tfor _, rec := range resp.Records {\n\t\trecord := dns.DnsRecord{Fqdn: rec.Name, Records: rec.Records, Type: rec.Type, TTL: rec.TTL}\n\t\trecords = append(records, record)\n\t}\n\n\treturn records, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright (c) 2013 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n *\/\n\n\/*\nThis test file is part of the spew package rather than than the spew_test\npackage because it needs access to internals to properly test certain cases\nwhich are not possible via the public interface since they should never happen.\n*\/\n\npackage spew\n\nimport (\n\t\"bytes\"\n\t\"reflect\"\n\t\"testing\"\n)\n\n\/\/ dummyFmtState implements a fake fmt.State to use for testing invalid\n\/\/ reflect.Value handling. This is necessary because the fmt package catches\n\/\/ invalid values before invoking the formatter on them.\ntype dummyFmtState struct {\n\tbytes.Buffer\n}\n\nfunc (dfs *dummyFmtState) Flag(f int) bool {\n\tif f == int('+') {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (dfs *dummyFmtState) Precision() (int, bool) {\n\treturn 0, false\n}\n\nfunc (dfs *dummyFmtState) Width() (int, bool) {\n\treturn 0, false\n}\n\n\/\/ TestInvalidReflectValue ensures the dump and formatter code handles an\n\/\/ invalid reflect value properly. This needs access to internal state since it\n\/\/ should never happen in real code and therefore can't be tested via the public\n\/\/ API.\nfunc TestInvalidReflectValue(t *testing.T) {\n\ti := 1\n\n\t\/\/ Dump invalid reflect value.\n\tv := new(reflect.Value)\n\tbuf := new(bytes.Buffer)\n\td := dumpState{w: buf, cs: &Config}\n\td.dump(*v)\n\ts := buf.String()\n\twant := \"<invalid>\"\n\tif s != want {\n\t\tt.Errorf(\"InvalidReflectValue #%d\\n got: %s want: %s\", i, s, want)\n\t}\n\ti++\n\n\t\/\/ Formatter invalid reflect value.\n\tbuf2 := new(dummyFmtState)\n\tf := formatState{value: *v, cs: &Config, fs: buf2}\n\tf.format(*v)\n\ts = buf2.String()\n\twant = \"<invalid>\"\n\tif s != want {\n\t\tt.Errorf(\"InvalidReflectValue #%d got: %s want: %s\", i, s, want)\n\t}\n}\n<commit_msg>Add tests for unrecognized reflect values.<commit_after>\/*\n * Copyright (c) 2013 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n *\/\n\n\/*\nThis test file is part of the spew package rather than than the spew_test\npackage because it needs access to internals to properly test certain cases\nwhich are not possible via the public interface since they should never happen.\n*\/\n\npackage spew\n\nimport (\n\t\"bytes\"\n\t\"reflect\"\n\t\"testing\"\n\t\"unsafe\"\n)\n\n\/\/ dummyFmtState implements a fake fmt.State to use for testing invalid\n\/\/ reflect.Value handling. This is necessary because the fmt package catches\n\/\/ invalid values before invoking the formatter on them.\ntype dummyFmtState struct {\n\tbytes.Buffer\n}\n\nfunc (dfs *dummyFmtState) Flag(f int) bool {\n\tif f == int('+') {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (dfs *dummyFmtState) Precision() (int, bool) {\n\treturn 0, false\n}\n\nfunc (dfs *dummyFmtState) Width() (int, bool) {\n\treturn 0, false\n}\n\n\/\/ TestInvalidReflectValue ensures the dump and formatter code handles an\n\/\/ invalid reflect value properly. This needs access to internal state since it\n\/\/ should never happen in real code and therefore can't be tested via the public\n\/\/ API.\nfunc TestInvalidReflectValue(t *testing.T) {\n\ti := 1\n\n\t\/\/ Dump invalid reflect value.\n\tv := new(reflect.Value)\n\tbuf := new(bytes.Buffer)\n\td := dumpState{w: buf, cs: &Config}\n\td.dump(*v)\n\ts := buf.String()\n\twant := \"<invalid>\"\n\tif s != want {\n\t\tt.Errorf(\"InvalidReflectValue #%d\\n got: %s want: %s\", i, s, want)\n\t}\n\ti++\n\n\t\/\/ Formatter invalid reflect value.\n\tbuf2 := new(dummyFmtState)\n\tf := formatState{value: *v, cs: &Config, fs: buf2}\n\tf.format(*v)\n\ts = buf2.String()\n\twant = \"<invalid>\"\n\tif s != want {\n\t\tt.Errorf(\"InvalidReflectValue #%d got: %s want: %s\", i, s, want)\n\t}\n}\n\n\/\/ flagRO, flagKindShift and flagKindWidth indicate various bit flags that the\n\/\/ reflect package uses internally to track kind and state information.\nconst flagRO = 1 << 0\nconst flagKindShift = 4\nconst flagKindWidth = 5\n\n\/\/ changeKind uses unsafe to intentionally change the kind of a reflect.Value to\n\/\/ the maximum kind value which does not exist. This is needed to test the\n\/\/ fallback code which punts to the standard fmt library for new types that\n\/\/ might get added to the language.\nfunc changeKind(v *reflect.Value, readOnly bool) {\n\trvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) +\n\t\tunsafe.Offsetof(reflectValue.flag)))\n\t*rvf = *rvf | ((1<<flagKindWidth - 1) << flagKindShift)\n\tif readOnly {\n\t\t*rvf |= flagRO\n\t} else {\n\t\t*rvf &= ^uintptr(flagRO)\n\t}\n}\n\n\/\/ TestAddedReflectValue tests functionaly of the dump and formatter code which\n\/\/ falls back to the standard fmt library for new types that might get added to\n\/\/ the language.\nfunc TestAddedReflectValue(t *testing.T) {\n\ti := 1\n\n\t\/\/ Dump using a reflect.Value that is exported.\n\tv := reflect.ValueOf(int8(5))\n\tchangeKind(&v, false)\n\tbuf := new(bytes.Buffer)\n\td := dumpState{w: buf, cs: &Config}\n\td.dump(v)\n\ts := buf.String()\n\twant := \"(int8) 5\"\n\tif s != want {\n\t\tt.Errorf(\"TestAddedReflectValue #%d\\n got: %s want: %s\", i, s, want)\n\t}\n\ti++\n\n\t\/\/ Dump using a reflect.Value that is not exported.\n\tchangeKind(&v, true)\n\tbuf.Reset()\n\td.dump(v)\n\ts = buf.String()\n\twant = \"(int8) <int8 Value>\"\n\tif s != want {\n\t\tt.Errorf(\"TestAddedReflectValue #%d\\n got: %s want: %s\", i, s, want)\n\t}\n\ti++\n\n\t\/\/ Formatter using a reflect.Value that is exported.\n\tchangeKind(&v, false)\n\tbuf2 := new(dummyFmtState)\n\tf := formatState{value: v, cs: &Config, fs: buf2}\n\tf.format(v)\n\ts = buf2.String()\n\twant = \"5\"\n\tif s != want {\n\t\tt.Errorf(\"TestAddedReflectValue #%d got: %s want: %s\", i, s, want)\n\t}\n\ti++\n\n\t\/\/ Formatter using a reflect.Value that is not exported.\n\tchangeKind(&v, true)\n\tbuf2.Reset()\n\tf = formatState{value: v, cs: &Config, fs: buf2}\n\tf.format(v)\n\ts = buf2.String()\n\twant = \"<int8 Value>\"\n\tif s != want {\n\t\tt.Errorf(\"TestAddedReflectValue #%d got: %s want: %s\", i, s, want)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The ContainerOps Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage handler\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"gopkg.in\/macaron.v1\"\n)\n\nfunc IndexV1Handler(ctx *macaron.Context) (int, []byte) {\n\tresult, _ := json.Marshal(map[string]string{\"message\": \"Wenduer Backend REST API Service\"})\n\treturn http.StatusOK, result\n}\n<commit_msg>Update IndexV1Handler function<commit_after>\/*\nCopyright 2015 The ContainerOps Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage handler\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"gopkg.in\/macaron.v1\"\n)\n\n\/\/\nfunc IndexV1Handler(ctx *macaron.Context) (int, []byte) {\n\tresult, _ := json.Marshal(map[string]string{\"message\": \"Dockyard Backend REST API Service\"})\n\treturn http.StatusOK, result\n}\n<|endoftext|>"} {"text":"<commit_before>package hbooksvc\n\nimport (\n\t\"reflect\"\n\n\t\"github.com\/go-hep\/fwk\"\n\t\"github.com\/go-hep\/fwk\/fsm\"\n\t\"github.com\/go-hep\/hbook\"\n)\n\ntype hsvc struct {\n\tfwk.SvcBase\n\n\th1ds map[fwk.HID]fwk.H1D\n}\n\nfunc (svc *hsvc) Configure(ctx fwk.Context) error {\n\tvar err error\n\n\t\/\/ err = svc.DeclInPort(svc.input, reflect.TypeOf(sometype{}))\n\t\/\/ if err != nil {\n\t\/\/\treturn err\n\t\/\/ }\n\n\t\/\/ err = svc.DeclOutPort(svc.output, reflect.TypeOf(sometype{}))\n\t\/\/ if err != nil {\n\t\/\/\treturn err\n\t\/\/ }\n\n\treturn err\n}\n\nfunc (svc *hsvc) StartSvc(ctx fwk.Context) error {\n\tvar err error\n\n\tsvc.h1ds = make(map[fwk.HID]fwk.H1D)\n\treturn err\n}\n\nfunc (svc *hsvc) StopSvc(ctx fwk.Context) error {\n\tvar err error\n\n\tsvc.h1ds = make(map[fwk.HID]fwk.H1D)\n\treturn err\n}\n\nfunc (svc *hsvc) BookH1D(name string, nbins int, low, high float64) (fwk.H1D, error) {\n\tvar err error\n\th1d := fwk.H1D{\n\t\tID: fwk.HID(name),\n\t\tHist: hbook.NewH1D(nbins, low, high),\n\t}\n\n\tif !(svc.FSMState() < fsm.Running) {\n\t\treturn h1d, fwk.Errorf(\"fwk: can not book histograms during FSM-state %v\", svc.FSMState())\n\t}\n\n\treturn h1d, err\n}\n\nfunc (svc *hsvc) FillH1D(id fwk.HID, x, w float64) {\n\t\/\/ FIXME(sbinet) make it concurrency-safe\n\tsvc.h1ds[id].Hist.Fill(x, w)\n}\n\nfunc newhsvc(typ, name string, mgr fwk.App) (fwk.Component, error) {\n\tvar err error\n\tsvc := &hsvc{\n\t\tSvcBase: fwk.NewSvc(typ, name, mgr),\n\t\t\/\/ input: \"Input\",\n\t\t\/\/ output: \"Output\",\n\t\th1ds: make(map[fwk.HID]fwk.H1D),\n\t}\n\n\t\/\/ err = svc.DeclProp(\"Input\", &svc.input)\n\t\/\/ if err != nil {\n\t\/\/ \treturn nil, err\n\t\/\/ }\n\n\t\/\/ err = svc.DeclProp(\"Output\", &svc.output)\n\t\/\/ if err != nil {\n\t\/\/\treturn nil, err\n\t\/\/ }\n\n\treturn svc, err\n}\n\nfunc init() {\n\tfwk.Register(reflect.TypeOf(hsvc{}), newhsvc)\n}\n\nvar _ fwk.HistSvc = (*hsvc)(nil)\n<commit_msg>hsvc: non-conc-safe impl.<commit_after>package hbooksvc\n\nimport (\n\t\"reflect\"\n\n\t\"github.com\/go-hep\/fwk\"\n\t\"github.com\/go-hep\/fwk\/fsm\"\n\t\"github.com\/go-hep\/hbook\"\n)\n\ntype hsvc struct {\n\tfwk.SvcBase\n\n\th1ds map[fwk.HID]fwk.H1D\n}\n\nfunc (svc *hsvc) Configure(ctx fwk.Context) error {\n\tvar err error\n\n\t\/\/ err = svc.DeclInPort(svc.input, reflect.TypeOf(sometype{}))\n\t\/\/ if err != nil {\n\t\/\/\treturn err\n\t\/\/ }\n\n\t\/\/ err = svc.DeclOutPort(svc.output, reflect.TypeOf(sometype{}))\n\t\/\/ if err != nil {\n\t\/\/\treturn err\n\t\/\/ }\n\n\treturn err\n}\n\nfunc (svc *hsvc) StartSvc(ctx fwk.Context) error {\n\tvar err error\n\n\treturn err\n}\n\nfunc (svc *hsvc) StopSvc(ctx fwk.Context) error {\n\tvar err error\n\n\treturn err\n}\n\nfunc (svc *hsvc) BookH1D(name string, nbins int, low, high float64) (fwk.H1D, error) {\n\tvar err error\n\th1d := fwk.H1D{\n\t\tID: fwk.HID(name),\n\t\tHist: hbook.NewH1D(nbins, low, high),\n\t}\n\n\tif !(svc.FSMState() < fsm.Running) {\n\t\treturn h1d, fwk.Errorf(\"fwk: can not book histograms during FSM-state %v\", svc.FSMState())\n\t}\n\n\tsvc.h1ds[h1d.ID] = h1d\n\treturn h1d, err\n}\n\nfunc (svc *hsvc) FillH1D(id fwk.HID, x, w float64) {\n\t\/\/ FIXME(sbinet) make it concurrency-safe\n\th := svc.h1ds[id]\n\th.Hist.Fill(x, w)\n\tsvc.h1ds[id] = h\n}\n\nfunc newhsvc(typ, name string, mgr fwk.App) (fwk.Component, error) {\n\tvar err error\n\tsvc := &hsvc{\n\t\tSvcBase: fwk.NewSvc(typ, name, mgr),\n\t\t\/\/ input: \"Input\",\n\t\t\/\/ output: \"Output\",\n\t\th1ds: make(map[fwk.HID]fwk.H1D),\n\t}\n\n\t\/\/ err = svc.DeclProp(\"Input\", &svc.input)\n\t\/\/ if err != nil {\n\t\/\/ \treturn nil, err\n\t\/\/ }\n\n\t\/\/ err = svc.DeclProp(\"Output\", &svc.output)\n\t\/\/ if err != nil {\n\t\/\/\treturn nil, err\n\t\/\/ }\n\n\treturn svc, err\n}\n\nfunc init() {\n\tfwk.Register(reflect.TypeOf(hsvc{}), newhsvc)\n}\n\nvar _ fwk.HistSvc = (*hsvc)(nil)\n<|endoftext|>"} {"text":"<commit_before>\/* heartthreader: mass, multithreaded testing for servers against Heartbleed (CVE-2014-0160)\n * Copyright (C) 2014, Cyphar All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * 1. Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n *\n * 2. Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and\/or other materials provided with the distribution.\n *\n * 3. Neither the name of the copyright holder nor the names of its contributors\n * may be used to endorse or promote products derived from this software without\n * specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\n * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\/\n\npackage main\n\nimport (\n\t\"os\"\n\t\"io\"\n\t\"bufio\"\n\t\"strings\"\n\t\"sync\"\n\t\"fmt\"\n\t\"flag\"\n\t\"time\"\n\n\ttls \"github.com\/titanous\/heartbleeder\/tls\"\n)\n\ntype Server struct {\n\tHost string\n\tVulnerable bool\n}\n\nconst LICENSE = \"heartthreader: mass, multithreaded testing for servers against Heartbleed (CVE-2014-0160)\\n\" +\n\t\"Copyright (C) 2014, Cyphar All rights reserved.\\n\" +\n\t\"\\n\" +\n\t\"Redistribution and use in source and binary forms, with or without\\n\" +\n\t\"modification, are permitted provided that the following conditions are met:\\n\" +\n\t\"\\n\" +\n\t\"1. Redistributions of source code must retain the above copyright notice,\\n\" +\n\t\" this list of conditions and the following disclaimer.\\n\" +\n\t\"\\n\" +\n\t\"2. Redistributions in binary form must reproduce the above copyright notice,\\n\" +\n\t\" this list of conditions and the following disclaimer in the documentation\\n\" +\n\t\" and\/or other materials provided with the distribution.\\n\" +\n\t\"\\n\" +\n\t\"3. Neither the name of the copyright holder nor the names of its contributors\\n\" +\n\t\" may be used to endorse or promote products derived from this software without\\n\" +\n\t\" specific prior written permission.\\n\" +\n\t\"\\n\" +\n\t\"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \\\"AS IS\\\"\\n\" +\n\t\"AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\\n\" +\n\t\"IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\\n\" +\n\t\"DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\\n\" +\n\t\"FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\\n\" +\n\t\"DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\\n\" +\n\t\"SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\\n\" +\n\t\"CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\\n\" +\n\t\"OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\\n\" +\n\t\"USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\\n\"\n\nvar Config struct {\n\tNumThreads int\n}\n\nfunc FixLine(line string) string {\n\tif strings.HasSuffix(line, \"\\n\") {\n\t\tl := len(line)\n\t\tline = line[:l-1]\n\t}\n\n\tif !strings.Contains(line, \":\") {\n\t\tline = line + \":443\"\n\t}\n\n\treturn line\n}\n\nfunc YieldTargets(done <-chan struct{}, files []string) <-chan string {\n\thosts := make(chan string)\n\n\tgo func() {\n\t\tdefer close(hosts)\n\t\tfor _, file := range files {\n\t\t\tf, err := os.Open(file)\n\t\t\tdefer f.Close()\n\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"E: %s\\n\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tbf := bufio.NewReader(f)\n\n\t\t\tvar line string\n\t\t\tfor line, err = bf.ReadString('\\n'); err == nil; line, err = bf.ReadString('\\n') {\n\t\t\t\thost := FixLine(line)\n\t\t\t\tselect {\n\t\t\t\t\tcase hosts <- host:\n\t\t\t\t\tcase <-done:\n\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"I: yielding canceled\\n\")\n\t\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tf.Close()\n\t\t}\n\t}()\n\n\treturn hosts\n}\n\nfunc DigestTarget(done <-chan struct{}, hosts <-chan string, c chan<- Server) {\n\tfor host := range hosts {\n\t\twait_err := func(conn *tls.Conn, ch chan<- error) {\n\t\t\tvar err error\n\n\t\t\terr = conn.WriteHeartbeat(32, nil)\n\t\t\t_, _, err = conn.ReadHeartbeat()\n\n\t\t\tch <- err\n\t\t}\n\n\t\tconn, err := tls.Dial(\"tcp\", host, &tls.Config{InsecureSkipVerify: true})\n\t\tdefer conn.Close()\n\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"E: %s\\n\", err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tquit := time.After(10 * time.Second)\n\t\tech := make(chan error)\n\n\t\tgo wait_err(conn, ech)\n\n\t\tsrv := Server{host, false}\n\n\t\tselect {\n\t\t\tcase err = <-ech:\n\t\t\t\tswitch err {\n\t\t\t\t\tcase nil:\n\t\t\t\t\t\t\/\/ VULNERABLE!!\n\t\t\t\t\t\tsrv.Vulnerable = true\n\t\t\t\t\tcase tls.ErrNoHeartbeat:\n\t\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\tcase <-quit:\n\t\t\tcase <-done:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"I: digest of %s canceled\\n\", host)\n\t\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\t\tcase c <- srv:\n\t\t\tcase <-done:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"I: digest of %s canceled\\n\", host)\n\t\t\t\treturn\n\t\t}\n\n\t\tconn.Close()\n\t}\n}\n\nfunc DigestAll(files []string, out chan<- Server) {\n\tservers := make(chan Server)\n\tdone := make(chan struct{})\n\tdefer close(done)\n\n\thosts := YieldTargets(done, files)\n\n\tvar wg sync.WaitGroup\n\twg.Add(Config.NumThreads)\n\n\tfor i := 0; i < Config.NumThreads; i++ {\n\t\tgo func() {\n\t\t\tDigestTarget(done, hosts, servers)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(servers)\n\t}()\n\n\tfor server := range servers {\n\t\tout <- server\n\t}\n}\n\nfunc License() {\n\tfmt.Printf(LICENSE)\n}\n\nfunc Help(out io.Writer) {\n\tfmt.Fprintf(out, \"Usage: %s [opts] hostfile[s]\\n\\n\", os.Args[0])\n\tfmt.Fprintf(out, \"Flags for %s:\\n\", os.Args[0])\n\tflag.PrintDefaults()\n}\n\nfunc main() {\n\tvar (\n\t\to_threads = flag.Int(\"threads\", 20, \"Specify the number of threads to use when scanning the input files.\")\n\n\t\to_license = flag.Bool(\"license\", false, \"Show the license text.\")\n\t\to_help = flag.Bool(\"help\", false, \"Show this help page.\")\n\t)\n\n\tflag.Usage = func() {\n\t\tHelp(os.Stderr)\n\t}\n\n\tflag.Parse()\n\n\t\/* set configs *\/\n\tConfig.NumThreads = *o_threads\n\n\tif *o_license {\n\t\tLicense()\n\t\tos.Exit(0)\n\t}\n\n\tif *o_help {\n\t\tHelp(os.Stdout)\n\t\tos.Exit(0)\n\t}\n\n\tif len(flag.Args()) < 1 {\n\t\tHelp(os.Stderr)\n\t\tos.Exit(2)\n\t}\n\n\tservers := make(chan Server)\n\tgo DigestAll(flag.Args(), servers)\n\n\tfor server := range servers {\n\t\tif server.Vulnerable {\n\t\t\tfmt.Printf(\"V: %s\\n\", server.Host)\n\t\t} else {\n\t\t\tfmt.Printf(\"N: %s\\n\", server.Host)\n\t\t}\n\t}\n}\n<commit_msg>heartthreader: fixed blocking at end of testing<commit_after>\/* heartthreader: mass, multithreaded testing for servers against Heartbleed (CVE-2014-0160)\n * Copyright (C) 2014, Cyphar All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * 1. Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n *\n * 2. Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and\/or other materials provided with the distribution.\n *\n * 3. Neither the name of the copyright holder nor the names of its contributors\n * may be used to endorse or promote products derived from this software without\n * specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\n * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\/\n\npackage main\n\nimport (\n\t\"os\"\n\t\"io\"\n\t\"bufio\"\n\t\"strings\"\n\t\"sync\"\n\t\"fmt\"\n\t\"flag\"\n\t\"time\"\n\n\ttls \"github.com\/titanous\/heartbleeder\/tls\"\n)\n\ntype Server struct {\n\tHost string\n\tVulnerable bool\n}\n\nconst LICENSE = \"heartthreader: mass, multithreaded testing for servers against Heartbleed (CVE-2014-0160)\\n\" +\n\t\"Copyright (C) 2014, Cyphar All rights reserved.\\n\" +\n\t\"\\n\" +\n\t\"Redistribution and use in source and binary forms, with or without\\n\" +\n\t\"modification, are permitted provided that the following conditions are met:\\n\" +\n\t\"\\n\" +\n\t\"1. Redistributions of source code must retain the above copyright notice,\\n\" +\n\t\" this list of conditions and the following disclaimer.\\n\" +\n\t\"\\n\" +\n\t\"2. Redistributions in binary form must reproduce the above copyright notice,\\n\" +\n\t\" this list of conditions and the following disclaimer in the documentation\\n\" +\n\t\" and\/or other materials provided with the distribution.\\n\" +\n\t\"\\n\" +\n\t\"3. Neither the name of the copyright holder nor the names of its contributors\\n\" +\n\t\" may be used to endorse or promote products derived from this software without\\n\" +\n\t\" specific prior written permission.\\n\" +\n\t\"\\n\" +\n\t\"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \\\"AS IS\\\"\\n\" +\n\t\"AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\\n\" +\n\t\"IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\\n\" +\n\t\"DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\\n\" +\n\t\"FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\\n\" +\n\t\"DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\\n\" +\n\t\"SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\\n\" +\n\t\"CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\\n\" +\n\t\"OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\\n\" +\n\t\"USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\\n\"\n\nvar Config struct {\n\tNumThreads int\n}\n\nfunc FixLine(line string) string {\n\tif strings.HasSuffix(line, \"\\n\") {\n\t\tl := len(line)\n\t\tline = line[:l-1]\n\t}\n\n\tif !strings.Contains(line, \":\") {\n\t\tline = line + \":443\"\n\t}\n\n\treturn line\n}\n\nfunc YieldTargets(done <-chan struct{}, files []string) <-chan string {\n\thosts := make(chan string)\n\n\tgo func() {\n\t\tdefer close(hosts)\n\t\tfor _, file := range files {\n\t\t\tf, err := os.Open(file)\n\t\t\tdefer f.Close()\n\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"E: %s\\n\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tbf := bufio.NewReader(f)\n\n\t\t\tvar line string\n\t\t\tfor line, err = bf.ReadString('\\n'); err == nil; line, err = bf.ReadString('\\n') {\n\t\t\t\thost := FixLine(line)\n\t\t\t\tselect {\n\t\t\t\t\tcase hosts <- host:\n\t\t\t\t\tcase <-done:\n\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"I: yielding canceled\\n\")\n\t\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tf.Close()\n\t\t}\n\t}()\n\n\treturn hosts\n}\n\nfunc DigestTarget(done <-chan struct{}, hosts <-chan string, c chan<- Server) {\n\tfor host := range hosts {\n\t\twait_err := func(conn *tls.Conn, ch chan<- error) {\n\t\t\tvar err error\n\n\t\t\terr = conn.WriteHeartbeat(32, nil)\n\t\t\t_, _, err = conn.ReadHeartbeat()\n\n\t\t\tch <- err\n\t\t}\n\n\t\tconn, err := tls.Dial(\"tcp\", host, &tls.Config{InsecureSkipVerify: true})\n\t\tdefer conn.Close()\n\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"E: %s\\n\", err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tquit := time.After(10 * time.Second)\n\t\tech := make(chan error)\n\n\t\tgo wait_err(conn, ech)\n\n\t\tsrv := Server{host, false}\n\n\t\tselect {\n\t\t\tcase err = <-ech:\n\t\t\t\tswitch err {\n\t\t\t\t\tcase nil:\n\t\t\t\t\t\t\/\/ VULNERABLE!!\n\t\t\t\t\t\tsrv.Vulnerable = true\n\t\t\t\t\tcase tls.ErrNoHeartbeat:\n\t\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\tcase <-quit:\n\t\t\tcase <-done:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"I: digest of %s canceled\\n\", host)\n\t\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\t\tcase c <- srv:\n\t\t\tcase <-done:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"I: digest of %s canceled\\n\", host)\n\t\t\t\treturn\n\t\t}\n\n\t\tconn.Close()\n\t}\n}\n\nfunc DigestAll(files []string, out chan<- Server) {\n\tdefer close(out)\n\n\tservers := make(chan Server)\n\tdone := make(chan struct{})\n\tdefer close(done)\n\n\thosts := YieldTargets(done, files)\n\n\tvar wg sync.WaitGroup\n\twg.Add(Config.NumThreads)\n\n\tfor i := 0; i < Config.NumThreads; i++ {\n\t\tgo func() {\n\t\t\tDigestTarget(done, hosts, servers)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(servers)\n\t}()\n\n\tfor server := range servers {\n\t\tout <- server\n\t}\n}\n\nfunc License() {\n\tfmt.Printf(LICENSE)\n}\n\nfunc Help(out io.Writer) {\n\tfmt.Fprintf(out, \"Usage: %s [opts] hostfile[s]\\n\\n\", os.Args[0])\n\tfmt.Fprintf(out, \"Flags for %s:\\n\", os.Args[0])\n\tflag.PrintDefaults()\n}\n\nfunc main() {\n\tvar (\n\t\to_threads = flag.Int(\"threads\", 20, \"Specify the number of threads to use when scanning the input files.\")\n\n\t\to_license = flag.Bool(\"license\", false, \"Show the license text.\")\n\t\to_help = flag.Bool(\"help\", false, \"Show this help page.\")\n\t)\n\n\tflag.Usage = func() {\n\t\tHelp(os.Stderr)\n\t}\n\n\tflag.Parse()\n\n\t\/* set configs *\/\n\tConfig.NumThreads = *o_threads\n\n\tif *o_license {\n\t\tLicense()\n\t\tos.Exit(0)\n\t}\n\n\tif *o_help {\n\t\tHelp(os.Stdout)\n\t\tos.Exit(0)\n\t}\n\n\tif len(flag.Args()) < 1 {\n\t\tHelp(os.Stderr)\n\t\tos.Exit(2)\n\t}\n\n\tservers := make(chan Server)\n\tgo DigestAll(flag.Args(), servers)\n\n\tfor server := range servers {\n\t\tif server.Vulnerable {\n\t\t\tfmt.Printf(\"V: %s\\n\", server.Host)\n\t\t} else {\n\t\t\tfmt.Printf(\"N: %s\\n\", server.Host)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package model\n\nimport (\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/oinume\/lekcije\/server\/errors\"\n)\n\n\/\/ TODO: Custom type\nconst (\n\tEmailTypeNewLessonNotifier = \"new_lesson_notifier\"\n\tEmailTypeFollowReminder = \"follow_reminder\"\n)\n\ntype EventLogEmail struct {\n\tDatetime time.Time\n\tEvent string\n\tEmailType string\n\tUserID uint32\n\tUserAgent string\n\tTeacherIDs string\n\tURL string\n}\n\nfunc (*EventLogEmail) TableName() string {\n\treturn \"event_log_email\"\n}\n\ntype EventLogEmailService struct {\n\tdb *gorm.DB\n}\n\nfunc NewEventLogEmailService(db *gorm.DB) *EventLogEmailService {\n\treturn &EventLogEmailService{db}\n}\n\nfunc (s *EventLogEmailService) Create(e *EventLogEmail) error {\n\treturn s.db.Create(e).Error\n}\n\nfunc (s *EventLogEmailService) FindStatsNewLessonNotifierByDate(date time.Time) ([]*StatNewLessonNotifier, error) {\n\tsql := `\nSELECT CAST(datetime AS DATE) AS date, event, COUNT(*) AS count\nFROM event_log_email\nWHERE\n datetime BETWEEN ? AND ?\n AND email_type = 'new_lesson_notifier'\nGROUP BY event;\n`\n\td := date.Format(\"2006-01-02\")\n\tvalues := make([]*StatNewLessonNotifier, 0, 100)\n\tif err := s.db.Raw(strings.TrimSpace(sql), d+\" 00:00:00\", d+\" 23:59:59\").Scan(&values).Error; err != nil {\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(err),\n\t\t\terrors.WithMessagef(\"select failed\"),\n\t\t\terrors.WithResource(errors.NewResource(\"event_log_email\", \"date\", date.Format(\"2006-01-02\"))),\n\t\t)\n\t}\n\treturn values, nil\n}\n\nfunc (s *EventLogEmailService) FindStatsNewLessonNotifierUUCountByDate(date time.Time) ([]*StatNewLessonNotifier, error) {\n\tsql := `\nSELECT s.date, s.event, COUNT(*) AS uu_count\nFROM (\n SELECT CAST(datetime AS DATE) AS date, event, user_id, COUNT(*) AS count\n FROM event_log_email\n WHERE\n datetime BETWEEN ? AND ?\n AND email_type = 'new_lesson_notifier'\n GROUP BY event, user_id\n) AS s\nGROUP BY s.event\n`\n\td := date.Format(\"2006-01-02\")\n\tvalues := make([]*StatNewLessonNotifier, 0, 100)\n\tif err := s.db.Raw(strings.TrimSpace(sql), d+\" 00:00:00\", d+\" 23:59:59\").Scan(&values).Error; err != nil {\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(err),\n\t\t\terrors.WithMessagef(\"select failed\"),\n\t\t\terrors.WithResource(errors.NewResource(\"event_log_email\", \"date\", date.Format(\"2006-01-02\"))),\n\t\t)\n\t}\n\treturn values, nil\n}\n\nfunc (s *EventLogEmailService) FindStatDailyNotificationEventByDate(date time.Time) ([]*StatDailyNotificationEvent, error) {\n\tsql := `\nSELECT CAST(datetime AS DATE) AS date, event, COUNT(*) AS count\nFROM event_log_email\nWHERE\n datetime BETWEEN ? AND ?\n AND email_type = 'new_lesson_notifier'\nGROUP BY event;\n`\n\td := date.Format(\"2006-01-02\")\n\tvalues := make([]*StatDailyNotificationEvent, 0, 100)\n\tif err := s.db.Raw(strings.TrimSpace(sql), d+\" 00:00:00\", d+\" 23:59:59\").Scan(&values).Error; err != nil {\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(err),\n\t\t\terrors.WithMessagef(\"select failed\"),\n\t\t\terrors.WithResource(errors.NewResource(\"event_log_email\", \"date\", date.Format(\"2006-01-02\"))),\n\t\t)\n\t}\n\treturn values, nil\n}\n\nfunc (s *EventLogEmailService) FindStatDailyNotificationEventUUCountByDate(date time.Time) ([]*StatDailyNotificationEvent, error) {\n\tsql := `\nSELECT s.date, s.event, COUNT(*) AS uu_count\nFROM (\n SELECT CAST(datetime AS DATE) AS date, event, user_id, COUNT(*) AS count\n FROM event_log_email\n WHERE\n datetime BETWEEN ? AND ?\n AND email_type = 'new_lesson_notifier'\n GROUP BY event, user_id\n) AS s\nGROUP BY s.event\n`\n\td := date.Format(\"2006-01-02\")\n\tvalues := make([]*StatDailyNotificationEvent, 0, 1000)\n\tif err := s.db.Raw(strings.TrimSpace(sql), d+\" 00:00:00\", d+\" 23:59:59\").Scan(&values).Error; err != nil {\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(err),\n\t\t\terrors.WithMessagef(\"select failed\"),\n\t\t\terrors.WithResource(errors.NewResource(\"event_log_email\", \"date\", date.Format(\"2006-01-02\"))),\n\t\t)\n\t}\n\treturn values, nil\n}\n\nfunc (s *EventLogEmailService) FindStatDailyUserNotificationEvent(date time.Time) ([]*StatDailyUserNotificationEvent, error) {\n\treturn nil, nil\n}\n<commit_msg>Fix CI failure<commit_after>package model\n\nimport (\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/oinume\/lekcije\/server\/errors\"\n)\n\n\/\/ TODO: Custom type\nconst (\n\tEmailTypeNewLessonNotifier = \"new_lesson_notifier\"\n\tEmailTypeFollowReminder = \"follow_reminder\"\n)\n\ntype EventLogEmail struct {\n\tDatetime time.Time\n\tEvent string\n\tEmailType string\n\tUserID uint32\n\tUserAgent string\n\tTeacherIDs string\n\tURL string\n}\n\nfunc (*EventLogEmail) TableName() string {\n\treturn \"event_log_email\"\n}\n\ntype EventLogEmailService struct {\n\tdb *gorm.DB\n}\n\nfunc NewEventLogEmailService(db *gorm.DB) *EventLogEmailService {\n\treturn &EventLogEmailService{db}\n}\n\nfunc (s *EventLogEmailService) Create(e *EventLogEmail) error {\n\treturn s.db.Create(e).Error\n}\n\nfunc (s *EventLogEmailService) FindStatsNewLessonNotifierByDate(date time.Time) ([]*StatNewLessonNotifier, error) {\n\tsql := `\nSELECT CAST(datetime AS DATE) AS date, event, COUNT(*) AS count\nFROM event_log_email\nWHERE\n datetime BETWEEN ? AND ?\n AND email_type = 'new_lesson_notifier'\nGROUP BY event;\n`\n\td := date.Format(\"2006-01-02\")\n\tvalues := make([]*StatNewLessonNotifier, 0, 100)\n\tif err := s.db.Raw(strings.TrimSpace(sql), d+\" 00:00:00\", d+\" 23:59:59\").Scan(&values).Error; err != nil {\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(err),\n\t\t\terrors.WithMessagef(\"select failed\"),\n\t\t\terrors.WithResource(errors.NewResource(\"event_log_email\", \"date\", date.Format(\"2006-01-02\"))),\n\t\t)\n\t}\n\treturn values, nil\n}\n\nfunc (s *EventLogEmailService) FindStatsNewLessonNotifierUUCountByDate(date time.Time) ([]*StatNewLessonNotifier, error) {\n\tsql := `\nSELECT s.date, s.event, COUNT(*) AS uu_count\nFROM (\n SELECT CAST(datetime AS DATE) AS date, event, user_id, COUNT(*) AS count\n FROM event_log_email\n WHERE\n datetime BETWEEN ? AND ?\n AND email_type = 'new_lesson_notifier'\n GROUP BY event, user_id\n) AS s\nGROUP BY s.event\n`\n\td := date.Format(\"2006-01-02\")\n\tvalues := make([]*StatNewLessonNotifier, 0, 100)\n\tif err := s.db.Raw(strings.TrimSpace(sql), d+\" 00:00:00\", d+\" 23:59:59\").Scan(&values).Error; err != nil {\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(err),\n\t\t\terrors.WithMessagef(\"select failed\"),\n\t\t\terrors.WithResource(errors.NewResource(\"event_log_email\", \"date\", date.Format(\"2006-01-02\"))),\n\t\t)\n\t}\n\treturn values, nil\n}\n\nfunc (s *EventLogEmailService) FindStatDailyNotificationEventByDate(date time.Time) ([]*StatDailyNotificationEvent, error) {\n\tsql := `\nSELECT CAST(datetime AS DATE) AS date, event, COUNT(*) AS count\nFROM event_log_email\nWHERE\n datetime BETWEEN ? AND ?\n AND email_type = 'new_lesson_notifier'\nGROUP BY date, event;\n`\n\td := date.Format(\"2006-01-02\")\n\tvalues := make([]*StatDailyNotificationEvent, 0, 100)\n\tif err := s.db.Raw(strings.TrimSpace(sql), d+\" 00:00:00\", d+\" 23:59:59\").Scan(&values).Error; err != nil {\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(err),\n\t\t\terrors.WithMessagef(\"select failed\"),\n\t\t\terrors.WithResource(errors.NewResource(\"event_log_email\", \"date\", date.Format(\"2006-01-02\"))),\n\t\t)\n\t}\n\treturn values, nil\n}\n\nfunc (s *EventLogEmailService) FindStatDailyNotificationEventUUCountByDate(date time.Time) ([]*StatDailyNotificationEvent, error) {\n\tsql := `\nSELECT s.date, s.event, COUNT(*) AS uu_count\nFROM (\n SELECT CAST(datetime AS DATE) AS date, event, user_id, COUNT(*) AS count\n FROM event_log_email\n WHERE\n datetime BETWEEN ? AND ?\n AND email_type = 'new_lesson_notifier'\n GROUP BY date, event, user_id\n) AS s\nGROUP BY s.date, s.event\n`\n\td := date.Format(\"2006-01-02\")\n\tvalues := make([]*StatDailyNotificationEvent, 0, 1000)\n\tif err := s.db.Raw(strings.TrimSpace(sql), d+\" 00:00:00\", d+\" 23:59:59\").Scan(&values).Error; err != nil {\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(err),\n\t\t\terrors.WithMessagef(\"select failed\"),\n\t\t\terrors.WithResource(errors.NewResource(\"event_log_email\", \"date\", date.Format(\"2006-01-02\"))),\n\t\t)\n\t}\n\treturn values, nil\n}\n\nfunc (s *EventLogEmailService) FindStatDailyUserNotificationEvent(date time.Time) ([]*StatDailyUserNotificationEvent, error) {\n\treturn nil, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cloudca\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/cloud-ca\/go-cloudca\/api\"\n\t\"github.com\/cloud-ca\/go-cloudca\/services\"\n)\n\ntype NetworkAcl struct {\n\tId string `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n}\n\ntype NetworkAclService interface {\n\tGet(id string) (*NetworkAcl, error)\n\tList() ([]NetworkAcl, error)\n\tListByVpcId(vpcId string) ([]NetworkAcl, error)\n\tListWithOptions(options map[string]string) ([]NetworkAcl, error)\n}\n\ntype NetworkAclApi struct {\n\tentityService services.EntityService\n}\n\nfunc NewNetworkAclService(apiClient api.ApiClient, serviceCode string, environmentName string) NetworkAclService {\n\treturn &NetworkAclApi{\n\t\tentityService: services.NewEntityService(apiClient, serviceCode, environmentName, NETWORK_ACL_ENTITY_TYPE),\n\t}\n}\n\nfunc parseNetworkAcl(data []byte) *NetworkAcl {\n\tnetworkAcl := NetworkAcl{}\n\tjson.Unmarshal(data, &networkAcl)\n\treturn &networkAcl\n}\n\nfunc parseNetworkAclList(data []byte) []NetworkAcl {\n\tnetworkAcls := []NetworkAcl{}\n\tjson.Unmarshal(data, &networkAcls)\n\treturn networkAcls\n}\n\n\/\/Get network acl with the specified id for the current environment\nfunc (networkAclApi *NetworkAclApi) Get(id string) (*NetworkAcl, error) {\n\tdata, err := networkAclApi.entityService.Get(id, map[string]string{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parseNetworkAcl(data), nil\n}\n\n\/\/List all network offerings for the current environment\nfunc (networkAclApi *NetworkAclApi) List() ([]NetworkAcl, error) {\n\treturn networkAclApi.ListWithOptions(map[string]string{})\n}\n\n\/\/List all network offerings for the current environment\nfunc (networkAclApi *NetworkAclApi) ListByVpcId(vpcId string) ([]NetworkAcl, error) {\n\treturn networkAclApi.ListWithOptions(map[string]string{\"vpc_id\": vpcId})\n}\n\n\/\/List all network offerings for the current environment. Can use options to do sorting and paging.\nfunc (networkAclApi *NetworkAclApi) ListWithOptions(options map[string]string) ([]NetworkAcl, error) {\n\tdata, err := networkAclApi.entityService.List(options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parseNetworkAclList(data), nil\n}\n<commit_msg>Added network ACL create and delete operations<commit_after>package cloudca\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/cloud-ca\/go-cloudca\/api\"\n\t\"github.com\/cloud-ca\/go-cloudca\/services\"\n)\n\ntype NetworkAcl struct {\n\tId string `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tVpcId string `json:\"vpcId,omitempty\"`\n}\n\ntype NetworkAclService interface {\n\tGet(id string) (*NetworkAcl, error)\n\tList() ([]NetworkAcl, error)\n\tListByVpcId(vpcId string) ([]NetworkAcl, error)\n\tListWithOptions(options map[string]string) ([]NetworkAcl, error)\n}\n\ntype NetworkAclApi struct {\n\tentityService services.EntityService\n}\n\nfunc NewNetworkAclService(apiClient api.ApiClient, serviceCode string, environmentName string) NetworkAclService {\n\treturn &NetworkAclApi{\n\t\tentityService: services.NewEntityService(apiClient, serviceCode, environmentName, NETWORK_ACL_ENTITY_TYPE),\n\t}\n}\n\nfunc parseNetworkAcl(data []byte) *NetworkAcl {\n\tnetworkAcl := NetworkAcl{}\n\tjson.Unmarshal(data, &networkAcl)\n\treturn &networkAcl\n}\n\nfunc parseNetworkAclList(data []byte) []NetworkAcl {\n\tnetworkAcls := []NetworkAcl{}\n\tjson.Unmarshal(data, &networkAcls)\n\treturn networkAcls\n}\n\n\/\/Get network acl with the specified id for the current environment\nfunc (networkAclApi *NetworkAclApi) Get(id string) (*NetworkAcl, error) {\n\tdata, err := networkAclApi.entityService.Get(id, map[string]string{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parseNetworkAcl(data), nil\n}\n\n\/\/List all network offerings for the current environment\nfunc (networkAclApi *NetworkAclApi) List() ([]NetworkAcl, error) {\n\treturn networkAclApi.ListWithOptions(map[string]string{})\n}\n\n\/\/List all network offerings for the current environment\nfunc (networkAclApi *NetworkAclApi) ListByVpcId(vpcId string) ([]NetworkAcl, error) {\n\treturn networkAclApi.ListWithOptions(map[string]string{\"vpc_id\": vpcId})\n}\n\n\/\/List all network offerings for the current environment. Can use options to do sorting and paging.\nfunc (networkAclApi *NetworkAclApi) ListWithOptions(options map[string]string) ([]NetworkAcl, error) {\n\tdata, err := networkAclApi.entityService.List(options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parseNetworkAclList(data), nil\n}\n\nfunc (networkAclApi *NetworkAclApi) Create(networkAcl NetworkAcl) (*NetworkAcl, error) {\n\tmsg, err := json.Marshal(networkAcl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult, err := networkAclApi.entityService.Create(msg, map[string]string{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parseNetworkAcl(result), nil\n}\n\nfunc (networkAclApi *NetworkAclApi) Delete(id string) (bool, error) {\n\t_, err := networkAclApi.entityService.Delete(id, []byte{}, map[string]string{})\n\treturn err == nil, err\n}\n<|endoftext|>"} {"text":"<commit_before>package services\n\nimport (\n\t. \"github.com\/panyam\/relay\/services\/msg\/core\"\n\t. \"gopkg.in\/check.v1\"\n)\n\nfunc (s *TestSuite) TestCreateUserService(c *C) {\n\tsvc := s.serviceGroup.UserService\n\tc.Assert(svc, Not(Equals), nil)\n}\n\nfunc (s *TestSuite) TestSaveUserEmptyId_ShouldCreateId(c *C) {\n\tsvc := s.serviceGroup.UserService\n\tteam, err := s.serviceGroup.TeamService.CreateTeam(1, \"org\", \"team\")\n\tuser := NewUser(0, \"user1\", team)\n\terr = svc.SaveUser(user, false)\n\tc.Assert(err, Equals, nil)\n\tc.Assert(user.Id, Not(Equals), 0)\n\tc.Assert(user.Username, Equals, \"user1\")\n\n\tfetched_user, err := svc.GetUserById(user.Id)\n\tc.Assert(fetched_user.Id, Equals, user.Id)\n\tc.Assert(fetched_user.Team, Not(Equals), nil)\n\tc.Assert(fetched_user.Team.Id, Equals, user.Team.Id)\n\n\tfetched_user, _ = svc.GetUser(\"user1\", team)\n\tc.Assert(fetched_user.Id, Equals, user.Id)\n\tc.Assert(fetched_user.Team, Not(Equals), nil)\n\tc.Assert(fetched_user.Team.Id, Equals, user.Team.Id)\n}\n\nfunc (s *TestSuite) TestGetUserFirstTime(c *C) {\n\tsvc := s.serviceGroup.UserService\n\tteam, _ := s.serviceGroup.TeamService.CreateTeam(1, \"org\", \"team\")\n\tuser, _ := svc.GetUserById(1)\n\tc.Assert(user, Equals, (*User)(nil))\n\t_, err := svc.GetUser(\"user1\", team)\n\tc.Assert(err, Not(Equals), nil)\n}\n\nfunc (s *TestSuite) TestSaveUserNormal(c *C) {\n\tsvc := s.serviceGroup.UserService\n\tteam, _ := s.serviceGroup.TeamService.CreateTeam(1, \"org\", \"team\")\n\tuser := NewUser(0, \"user1\", team)\n\tuser.Object = Object{Id: 1}\n\terr := svc.SaveUser(user, false)\n\tc.Assert(err, Equals, nil)\n}\n\nfunc (s *TestSuite) TestCreateUserDuplicate(c *C) {\n\tsvc := s.serviceGroup.UserService\n\tteam, _ := s.serviceGroup.TeamService.CreateTeam(1, \"org\", \"team\")\n\tuser := User{Username: \"user1\", Team: team}\n\n\terr := svc.SaveUser(&user, false)\n\tc.Assert(err, Equals, nil)\n\n\t\/\/ err = svc.SaveUser(&user, false)\n\t\/\/ c.Assert(err, Not(Equals), nil)\n}\n\n\/*\nfunc (s *TestSuite) TestSaveUserIdOrNameExists(c *C) {\n\tsvc := s.serviceGroup.UserService\n\tuser, err := svc.CreateUser(\"1\", \"user1\")\n\tc.Assert(err, Equals, nil)\n\tuser, err = svc.CreateUser(\"1\", \"user2\")\n\tc.Assert(err, Not(Equals), nil)\n\tc.Assert(user, Equals, (*User)(nil))\n\tuser, err = svc.CreateUser(\"2\", \"user1\")\n\tc.Assert(err, Not(Equals), nil)\n\tc.Assert(user, Equals, (*User)(nil))\n}\n*\/\n<commit_msg>Using IsNil and Not(IsNil)<commit_after>package services\n\nimport (\n\t. \"github.com\/panyam\/relay\/services\/msg\/core\"\n\t. \"gopkg.in\/check.v1\"\n)\n\nfunc (s *TestSuite) TestCreateUserService(c *C) {\n\tsvc := s.serviceGroup.UserService\n\tc.Assert(svc, Not(IsNil))\n}\n\nfunc (s *TestSuite) TestSaveUserEmptyId_ShouldCreateId(c *C) {\n\tsvc := s.serviceGroup.UserService\n\tteam, err := s.serviceGroup.TeamService.CreateTeam(1, \"org\", \"team\")\n\tuser := NewUser(0, \"user1\", team)\n\terr = svc.SaveUser(user, false)\n\tc.Assert(err, IsNil)\n\tc.Assert(user.Id, Not(Equals), 0)\n\tc.Assert(user.Username, Equals, \"user1\")\n\n\tfetched_user, err := svc.GetUserById(user.Id)\n\tc.Assert(fetched_user.Id, Equals, user.Id)\n\tc.Assert(fetched_user.Team, Not(IsNil))\n\tc.Assert(fetched_user.Team.Id, Equals, user.Team.Id)\n\n\tfetched_user, _ = svc.GetUser(\"user1\", team)\n\tc.Assert(fetched_user.Id, Equals, user.Id)\n\tc.Assert(fetched_user.Team, Not(IsNil))\n\tc.Assert(fetched_user.Team.Id, Equals, user.Team.Id)\n}\n\nfunc (s *TestSuite) TestGetUserFirstTime(c *C) {\n\tsvc := s.serviceGroup.UserService\n\tteam, _ := s.serviceGroup.TeamService.CreateTeam(1, \"org\", \"team\")\n\tuser, _ := svc.GetUserById(1)\n\tc.Assert(user, IsNil)\n\t_, err := svc.GetUser(\"user1\", team)\n\tc.Assert(err, Not(IsNil))\n}\n\nfunc (s *TestSuite) TestSaveUserNormal(c *C) {\n\tsvc := s.serviceGroup.UserService\n\tteam, _ := s.serviceGroup.TeamService.CreateTeam(1, \"org\", \"team\")\n\tuser := NewUser(0, \"user1\", team)\n\tuser.Object = Object{Id: 1}\n\terr := svc.SaveUser(user, false)\n\tc.Assert(err, IsNil)\n}\n\nfunc (s *TestSuite) TestCreateUserDuplicate(c *C) {\n\tsvc := s.serviceGroup.UserService\n\tteam, _ := s.serviceGroup.TeamService.CreateTeam(1, \"org\", \"team\")\n\tuser := User{Username: \"user1\", Team: team}\n\n\terr := svc.SaveUser(&user, false)\n\tc.Assert(err, IsNil)\n\n\t\/\/ err = svc.SaveUser(&user, false)\n\t\/\/ c.Assert(err, Not(IsNil))\n}\n\n\/*\nfunc (s *TestSuite) TestSaveUserIdOrNameExists(c *C) {\n\tsvc := s.serviceGroup.UserService\n\tuser, err := svc.CreateUser(\"1\", \"user1\")\n\tc.Assert(err, IsNil)\n\tuser, err = svc.CreateUser(\"1\", \"user2\")\n\tc.Assert(err, Not(IsNil))\n\tc.Assert(user, IsNil)\n\tuser, err = svc.CreateUser(\"2\", \"user1\")\n\tc.Assert(err, Not(IsNil))\n\tc.Assert(user, IsNil)\n}\n*\/\n<|endoftext|>"} {"text":"<commit_before>package html\n\nimport (\n\t\"code.google.com\/p\/go.net\/html\"\n\t\"errors\"\n\t\"github.com\/slyrz\/newscat\/util\"\n\t\"io\"\n\t\"regexp\"\n\t\"unicode\"\n)\n\nconst (\n\t\/\/ We remember a few special node types when descending into their\n\t\/\/ children.\n\tAncestorArticle = 1 << iota\n\tAncestorAside\n\tAncestorBlockquote\n\tAncestorList\n)\n\nvar (\n\tignorePattern = regexp.MustCompile(\"(?i)comment|community|gallery|caption|description|credit|foot|story-feature|related\")\n)\n\ntype Document struct {\n\tTitle *util.Text \/\/ the <title>...<\/title> text.\n\tChunks []*Chunk \/\/ list of all chunks found in this document\n\n\t\/\/ Unexported fields.\n\troot *html.Node \/\/ the <html>...<\/html> part\n\thead *html.Node \/\/ the <head>...<\/head> part\n\tbody *html.Node \/\/ the <body>...<\/body> part\n\n\t\/\/ State variables used when collectiong chunks.\n\tancestors int \/\/ bitmask which stores ancestor of the current node\n\n\t\/\/ Number of non-space characters inside link tags \/ normal tags\n\t\/\/ per html.ElementNode.\n\tlinkText map[*html.Node]int \/\/ length of text inside <a><\/a> tags\n\tnormText map[*html.Node]int \/\/ length of text outside <a><\/a> tags\n}\n\nfunc NewDocument(r io.Reader) (*Document, error) {\n\tdoc := new(Document)\n\tdoc.Chunks = make([]*Chunk, 0, 512)\n\tif err := doc.Parse(r); err != nil {\n\t\treturn nil, err\n\t}\n\treturn doc, nil\n}\n\nfunc (doc *Document) Parse(r io.Reader) error {\n\troot, err := html.Parse(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Assign the fields root, head and body from the HTML page.\n\tdoc.setNodes(root)\n\n\t\/\/ Check if <html>, <head> and <body> nodes were found.\n\tif doc.root == nil || doc.head == nil || doc.body == nil {\n\t\treturn errors.New(\"Document missing <html>, <head> or <body>.\")\n\t}\n\n\tdoc.parseHead(doc.head)\n\n\t\/\/ If no title was found, detecting the main heading might fail.\n\tif doc.Title == nil {\n\t\tdoc.Title = util.NewText()\n\t}\n\tdoc.linkText = make(map[*html.Node]int)\n\tdoc.normText = make(map[*html.Node]int)\n\n\tdoc.cleanBody(doc.body, 0)\n\tdoc.countText(doc.body, false)\n\tdoc.parseBody(doc.body)\n\n\t\/\/ Now link the chunks.\n\tfor i := range doc.Chunks {\n\t\tif i > 0 {\n\t\t\tdoc.Chunks[i].Prev = doc.Chunks[i-1]\n\t\t}\n\t\tif i < len(doc.Chunks)-1 {\n\t\t\tdoc.Chunks[i].Next = doc.Chunks[i+1]\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Assign the struct fields root, head and body from the HTML tree of node n.\n\/\/ doc.root -> <html>\n\/\/ doc.head -> <head>\n\/\/ doc.body -> <body>\nfunc (doc *Document) setNodes(n *html.Node) {\n\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\tswitch c.Data {\n\t\tcase \"html\":\n\t\t\tdoc.root = c\n\t\t\tdoc.setNodes(c)\n\t\tcase \"body\":\n\t\t\tdoc.body = c\n\t\tcase \"head\":\n\t\t\tdoc.head = c\n\t\t}\n\t}\n}\n\n\/\/ parseHead parses the <head>...<\/head> part of the HTML page. Right now it\n\/\/ only detects the <title>...<\/title>.\nfunc (doc *Document) parseHead(n *html.Node) {\n\tif n.Type == html.ElementNode && n.Data == \"title\" {\n\t\tif chunk, err := NewChunk(doc, n); err == nil {\n\t\t\tdoc.Title = chunk.Text\n\t\t}\n\t}\n\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\tdoc.parseHead(c)\n\t}\n}\n\n\/\/ countText counts the link text and the normal text per html.Node.\n\/\/ \"Link text\" is text inside <a> tags and \"normal text\" is text inside\n\/\/ anything but <a> tags. Of course, counting is done cumulative, so the\n\/\/ numbers of a parent node include the numbers of it's child nodes.\nfunc (doc *Document) countText(n *html.Node, insideLink bool) (linkText int, normText int) {\n\tlinkText = 0\n\tnormText = 0\n\tif n.Type == html.ElementNode && n.Data == \"a\" {\n\t\tinsideLink = true\n\t}\n\tfor s := n.FirstChild; s != nil; s = s.NextSibling {\n\t\tlinkTextChild, normTextChild := doc.countText(s, insideLink)\n\t\tlinkText += linkTextChild\n\t\tnormText += normTextChild\n\t}\n\tif n.Type == html.TextNode {\n\t\tcount := 0\n\t\tfor _, rune := range n.Data {\n\t\t\tif unicode.IsLetter(rune) {\n\t\t\t\tcount += 1\n\t\t\t}\n\t\t}\n\t\tif insideLink {\n\t\t\tlinkText += count\n\t\t} else {\n\t\t\tnormText += count\n\t\t}\n\t}\n\tdoc.linkText[n] = linkText\n\tdoc.normText[n] = normText\n\treturn\n}\n\n\/\/ cleanBody removes unwanted HTML elements from the HTML body.\nfunc (doc *Document) cleanBody(n *html.Node, level int) {\n\n\t\/\/ removeNode returns true if a node should be removed from HTML document.\n\tremoveNode := func(c *html.Node, level int) bool {\n\t\tswitch c.Data {\n\t\t\/\/ Elements save to ignore.\n\t\tcase \"address\", \"audio\", \"button\", \"canvas\", \"caption\", \"fieldset\",\n\t\t\t\"figcaption\", \"figure\", \"footer\", \"form\", \"frame\", \"iframe\",\n\t\t\t\"map\", \"menu\", \"nav\", \"noscript\", \"object\", \"option\", \"output\",\n\t\t\t\"script\", \"select\", \"style\", \"svg\", \"textarea\", \"video\":\n\t\t\treturn true\n\t\t\/\/ High-level tables might be used to layout the document, so we better\n\t\t\/\/ not ignore them.\n\t\tcase \"table\":\n\t\t\treturn level > 5\n\t\t}\n\t\treturn false\n\t}\n\n\tvar curr *html.Node = n.FirstChild\n\tvar next *html.Node = nil\n\tfor ; curr != nil; curr = next {\n\t\t\/\/ We have to remember the next sibling here becase calling RemoveChild\n\t\t\/\/ sets curr's NextSibling pointer to nil and we would quit the loop\n\t\t\/\/ prematurely.\n\t\tnext = curr.NextSibling\n\t\tif curr.Type == html.ElementNode {\n\t\t\tif removeNode(curr, level) {\n\t\t\t\tn.RemoveChild(curr)\n\t\t\t} else {\n\t\t\t\tdoc.cleanBody(curr, level+1)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ parseBody parses the <body>...<\/body> part of the HTML page. It creates\n\/\/ Chunks for every html.TextNode found in the body.\nfunc (doc *Document) parseBody(n *html.Node) {\n\tswitch n.Type {\n\tcase html.ElementNode:\n\t\t\/\/ We ignore the node if it has some nasty classes\/ids\/itemprobs.\n\t\tfor _, attr := range n.Attr {\n\t\t\tswitch attr.Key {\n\t\t\tcase \"id\", \"class\", \"itemprop\":\n\t\t\t\tif ignorePattern.FindStringIndex(attr.Val) != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tancestorMask := 0\n\t\tswitch n.Data {\n\t\t\/\/ We convert headings and links to text immediately. This is easier\n\t\t\/\/ and feasible because headings and links don't contain many children.\n\t\t\/\/ Descending into these children and handling every TextNode separately\n\t\t\/\/ would make things unnecessary complicated and our results noisy.\n\t\tcase \"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\", \"a\":\n\t\t\tif chunk, err := NewChunk(doc, n); err == nil {\n\t\t\t\tdoc.Chunks = append(doc.Chunks, chunk)\n\t\t\t}\n\t\t\treturn\n\t\t\/\/ Now mask the element type, but only if it isn't already set.\n\t\t\/\/ If we mask a bit which was already set by one of our callers, we'd also\n\t\t\/\/ clear it at the end of this function, though it actually should be cleared\n\t\t\/\/ by the caller.\n\t\tcase \"article\":\n\t\t\tancestorMask = AncestorArticle &^ doc.ancestors\n\t\tcase \"aside\":\n\t\t\tancestorMask = AncestorAside &^ doc.ancestors\n\t\tcase \"blockquote\":\n\t\t\tancestorMask = AncestorBlockquote &^ doc.ancestors\n\t\tcase \"ul\", \"ol\":\n\t\t\tancestorMask = AncestorList &^ doc.ancestors\n\t\t}\n\t\t\/\/ Add our mask to the ancestor bitmask.\n\t\tdoc.ancestors |= ancestorMask\n\t\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\t\tdoc.parseBody(c)\n\t\t}\n\t\t\/\/ Remove our mask from the ancestor bitmask.\n\t\tdoc.ancestors &^= ancestorMask\n\tcase html.TextNode:\n\t\tif chunk, err := NewChunk(doc, n); err == nil {\n\t\t\tdoc.Chunks = append(doc.Chunks, chunk)\n\t\t}\n\t}\n}\n\ntype TextStat struct {\n\tWords int\n\tSentences int\n\tCount int\n}\n\n\/\/ GetClassStats groups the document chunks by their classes (defined by the\n\/\/ class attribute of HTML nodes) and calculates TextStats for each class.\nfunc (doc *Document) GetClassStats() map[string]*TextStat {\n\tresult := make(map[string]*TextStat)\n\tfor _, chunk := range doc.Chunks {\n\t\tfor _, class := range chunk.Classes {\n\t\t\tif stat, ok := result[class]; ok {\n\t\t\t\tstat.Words += chunk.Text.Words\n\t\t\t\tstat.Sentences += chunk.Text.Sentences\n\t\t\t\tstat.Count += 1\n\t\t\t} else {\n\t\t\t\tresult[class] = &TextStat{chunk.Text.Words, chunk.Text.Sentences, 1}\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}\n\n\/\/ GetClusterStats groups the document chunks by common ancestors and\n\/\/ calculates TextStats for each group of chunks.\nfunc (doc *Document) GetClusterStats() map[*Chunk]*TextStat {\n\t\/\/ Don't ascend further than this constant.\n\tconst maxAncestors = 3\n\n\t\/\/ Count TextStats for Chunk ancestors.\n\tancestorStat := make(map[*html.Node]*TextStat)\n\tfor _, chunk := range doc.Chunks {\n\t\tnode, count := chunk.Block, 0\n\t\tfor node != nil && count < maxAncestors {\n\t\t\tif stat, ok := ancestorStat[node]; ok {\n\t\t\t\tstat.Words += chunk.Text.Words\n\t\t\t\tstat.Sentences += chunk.Text.Sentences\n\t\t\t\tstat.Count += 1\n\t\t\t} else {\n\t\t\t\tancestorStat[node] = &TextStat{chunk.Text.Words, chunk.Text.Sentences, 1}\n\t\t\t}\n\t\t\tnode, count = node.Parent, count+1\n\t\t}\n\t}\n\n\t\/\/ Generate result.\n\tresult := make(map[*Chunk]*TextStat)\n\tfor _, chunk := range doc.Chunks {\n\t\tnode := chunk.Block\n\t\tif node == nil {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Start with the parent's TextStat. Then ascend and check if the\n\t\t\/\/ current chunk has an ancestor with better stats. Use the best stat\n\t\t\/\/ as result.\n\t\tstat := ancestorStat[node]\n\t\tfor {\n\t\t\tif node = node.Parent; node == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif statPrev, ok := ancestorStat[node]; ok {\n\t\t\t\tif stat.Count < statPrev.Count {\n\t\t\t\t\tstat = statPrev\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tresult[chunk] = stat\n\t}\n\treturn result\n}\n<commit_msg>ignore hide and hidden classes<commit_after>package html\n\nimport (\n\t\"code.google.com\/p\/go.net\/html\"\n\t\"errors\"\n\t\"github.com\/slyrz\/newscat\/util\"\n\t\"io\"\n\t\"regexp\"\n\t\"unicode\"\n)\n\nconst (\n\t\/\/ We remember a few special node types when descending into their\n\t\/\/ children.\n\tAncestorArticle = 1 << iota\n\tAncestorAside\n\tAncestorBlockquote\n\tAncestorList\n)\n\nvar (\n\tignorePattern = regexp.MustCompile(\"(?i)comment|community|gallery|caption|description|credit|foot|story-feature|related|hide|hidden\")\n)\n\ntype Document struct {\n\tTitle *util.Text \/\/ the <title>...<\/title> text.\n\tChunks []*Chunk \/\/ list of all chunks found in this document\n\n\t\/\/ Unexported fields.\n\troot *html.Node \/\/ the <html>...<\/html> part\n\thead *html.Node \/\/ the <head>...<\/head> part\n\tbody *html.Node \/\/ the <body>...<\/body> part\n\n\t\/\/ State variables used when collectiong chunks.\n\tancestors int \/\/ bitmask which stores ancestor of the current node\n\n\t\/\/ Number of non-space characters inside link tags \/ normal tags\n\t\/\/ per html.ElementNode.\n\tlinkText map[*html.Node]int \/\/ length of text inside <a><\/a> tags\n\tnormText map[*html.Node]int \/\/ length of text outside <a><\/a> tags\n}\n\nfunc NewDocument(r io.Reader) (*Document, error) {\n\tdoc := new(Document)\n\tdoc.Chunks = make([]*Chunk, 0, 512)\n\tif err := doc.Parse(r); err != nil {\n\t\treturn nil, err\n\t}\n\treturn doc, nil\n}\n\nfunc (doc *Document) Parse(r io.Reader) error {\n\troot, err := html.Parse(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Assign the fields root, head and body from the HTML page.\n\tdoc.setNodes(root)\n\n\t\/\/ Check if <html>, <head> and <body> nodes were found.\n\tif doc.root == nil || doc.head == nil || doc.body == nil {\n\t\treturn errors.New(\"Document missing <html>, <head> or <body>.\")\n\t}\n\n\tdoc.parseHead(doc.head)\n\n\t\/\/ If no title was found, detecting the main heading might fail.\n\tif doc.Title == nil {\n\t\tdoc.Title = util.NewText()\n\t}\n\tdoc.linkText = make(map[*html.Node]int)\n\tdoc.normText = make(map[*html.Node]int)\n\n\tdoc.cleanBody(doc.body, 0)\n\tdoc.countText(doc.body, false)\n\tdoc.parseBody(doc.body)\n\n\t\/\/ Now link the chunks.\n\tfor i := range doc.Chunks {\n\t\tif i > 0 {\n\t\t\tdoc.Chunks[i].Prev = doc.Chunks[i-1]\n\t\t}\n\t\tif i < len(doc.Chunks)-1 {\n\t\t\tdoc.Chunks[i].Next = doc.Chunks[i+1]\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Assign the struct fields root, head and body from the HTML tree of node n.\n\/\/ doc.root -> <html>\n\/\/ doc.head -> <head>\n\/\/ doc.body -> <body>\nfunc (doc *Document) setNodes(n *html.Node) {\n\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\tswitch c.Data {\n\t\tcase \"html\":\n\t\t\tdoc.root = c\n\t\t\tdoc.setNodes(c)\n\t\tcase \"body\":\n\t\t\tdoc.body = c\n\t\tcase \"head\":\n\t\t\tdoc.head = c\n\t\t}\n\t}\n}\n\n\/\/ parseHead parses the <head>...<\/head> part of the HTML page. Right now it\n\/\/ only detects the <title>...<\/title>.\nfunc (doc *Document) parseHead(n *html.Node) {\n\tif n.Type == html.ElementNode && n.Data == \"title\" {\n\t\tif chunk, err := NewChunk(doc, n); err == nil {\n\t\t\tdoc.Title = chunk.Text\n\t\t}\n\t}\n\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\tdoc.parseHead(c)\n\t}\n}\n\n\/\/ countText counts the link text and the normal text per html.Node.\n\/\/ \"Link text\" is text inside <a> tags and \"normal text\" is text inside\n\/\/ anything but <a> tags. Of course, counting is done cumulative, so the\n\/\/ numbers of a parent node include the numbers of it's child nodes.\nfunc (doc *Document) countText(n *html.Node, insideLink bool) (linkText int, normText int) {\n\tlinkText = 0\n\tnormText = 0\n\tif n.Type == html.ElementNode && n.Data == \"a\" {\n\t\tinsideLink = true\n\t}\n\tfor s := n.FirstChild; s != nil; s = s.NextSibling {\n\t\tlinkTextChild, normTextChild := doc.countText(s, insideLink)\n\t\tlinkText += linkTextChild\n\t\tnormText += normTextChild\n\t}\n\tif n.Type == html.TextNode {\n\t\tcount := 0\n\t\tfor _, rune := range n.Data {\n\t\t\tif unicode.IsLetter(rune) {\n\t\t\t\tcount += 1\n\t\t\t}\n\t\t}\n\t\tif insideLink {\n\t\t\tlinkText += count\n\t\t} else {\n\t\t\tnormText += count\n\t\t}\n\t}\n\tdoc.linkText[n] = linkText\n\tdoc.normText[n] = normText\n\treturn\n}\n\n\/\/ cleanBody removes unwanted HTML elements from the HTML body.\nfunc (doc *Document) cleanBody(n *html.Node, level int) {\n\n\t\/\/ removeNode returns true if a node should be removed from HTML document.\n\tremoveNode := func(c *html.Node, level int) bool {\n\t\tswitch c.Data {\n\t\t\/\/ Elements save to ignore.\n\t\tcase \"address\", \"audio\", \"button\", \"canvas\", \"caption\", \"fieldset\",\n\t\t\t\"figcaption\", \"figure\", \"footer\", \"form\", \"frame\", \"iframe\",\n\t\t\t\"map\", \"menu\", \"nav\", \"noscript\", \"object\", \"option\", \"output\",\n\t\t\t\"script\", \"select\", \"style\", \"svg\", \"textarea\", \"video\":\n\t\t\treturn true\n\t\t\/\/ High-level tables might be used to layout the document, so we better\n\t\t\/\/ not ignore them.\n\t\tcase \"table\":\n\t\t\treturn level > 5\n\t\t}\n\t\treturn false\n\t}\n\n\tvar curr *html.Node = n.FirstChild\n\tvar next *html.Node = nil\n\tfor ; curr != nil; curr = next {\n\t\t\/\/ We have to remember the next sibling here becase calling RemoveChild\n\t\t\/\/ sets curr's NextSibling pointer to nil and we would quit the loop\n\t\t\/\/ prematurely.\n\t\tnext = curr.NextSibling\n\t\tif curr.Type == html.ElementNode {\n\t\t\tif removeNode(curr, level) {\n\t\t\t\tn.RemoveChild(curr)\n\t\t\t} else {\n\t\t\t\tdoc.cleanBody(curr, level+1)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ parseBody parses the <body>...<\/body> part of the HTML page. It creates\n\/\/ Chunks for every html.TextNode found in the body.\nfunc (doc *Document) parseBody(n *html.Node) {\n\tswitch n.Type {\n\tcase html.ElementNode:\n\t\t\/\/ We ignore the node if it has some nasty classes\/ids\/itemprobs.\n\t\tfor _, attr := range n.Attr {\n\t\t\tswitch attr.Key {\n\t\t\tcase \"id\", \"class\", \"itemprop\":\n\t\t\t\tif ignorePattern.FindStringIndex(attr.Val) != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tancestorMask := 0\n\t\tswitch n.Data {\n\t\t\/\/ We convert headings and links to text immediately. This is easier\n\t\t\/\/ and feasible because headings and links don't contain many children.\n\t\t\/\/ Descending into these children and handling every TextNode separately\n\t\t\/\/ would make things unnecessary complicated and our results noisy.\n\t\tcase \"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\", \"a\":\n\t\t\tif chunk, err := NewChunk(doc, n); err == nil {\n\t\t\t\tdoc.Chunks = append(doc.Chunks, chunk)\n\t\t\t}\n\t\t\treturn\n\t\t\/\/ Now mask the element type, but only if it isn't already set.\n\t\t\/\/ If we mask a bit which was already set by one of our callers, we'd also\n\t\t\/\/ clear it at the end of this function, though it actually should be cleared\n\t\t\/\/ by the caller.\n\t\tcase \"article\":\n\t\t\tancestorMask = AncestorArticle &^ doc.ancestors\n\t\tcase \"aside\":\n\t\t\tancestorMask = AncestorAside &^ doc.ancestors\n\t\tcase \"blockquote\":\n\t\t\tancestorMask = AncestorBlockquote &^ doc.ancestors\n\t\tcase \"ul\", \"ol\":\n\t\t\tancestorMask = AncestorList &^ doc.ancestors\n\t\t}\n\t\t\/\/ Add our mask to the ancestor bitmask.\n\t\tdoc.ancestors |= ancestorMask\n\t\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\t\tdoc.parseBody(c)\n\t\t}\n\t\t\/\/ Remove our mask from the ancestor bitmask.\n\t\tdoc.ancestors &^= ancestorMask\n\tcase html.TextNode:\n\t\tif chunk, err := NewChunk(doc, n); err == nil {\n\t\t\tdoc.Chunks = append(doc.Chunks, chunk)\n\t\t}\n\t}\n}\n\ntype TextStat struct {\n\tWords int\n\tSentences int\n\tCount int\n}\n\n\/\/ GetClassStats groups the document chunks by their classes (defined by the\n\/\/ class attribute of HTML nodes) and calculates TextStats for each class.\nfunc (doc *Document) GetClassStats() map[string]*TextStat {\n\tresult := make(map[string]*TextStat)\n\tfor _, chunk := range doc.Chunks {\n\t\tfor _, class := range chunk.Classes {\n\t\t\tif stat, ok := result[class]; ok {\n\t\t\t\tstat.Words += chunk.Text.Words\n\t\t\t\tstat.Sentences += chunk.Text.Sentences\n\t\t\t\tstat.Count += 1\n\t\t\t} else {\n\t\t\t\tresult[class] = &TextStat{chunk.Text.Words, chunk.Text.Sentences, 1}\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}\n\n\/\/ GetClusterStats groups the document chunks by common ancestors and\n\/\/ calculates TextStats for each group of chunks.\nfunc (doc *Document) GetClusterStats() map[*Chunk]*TextStat {\n\t\/\/ Don't ascend further than this constant.\n\tconst maxAncestors = 3\n\n\t\/\/ Count TextStats for Chunk ancestors.\n\tancestorStat := make(map[*html.Node]*TextStat)\n\tfor _, chunk := range doc.Chunks {\n\t\tnode, count := chunk.Block, 0\n\t\tfor node != nil && count < maxAncestors {\n\t\t\tif stat, ok := ancestorStat[node]; ok {\n\t\t\t\tstat.Words += chunk.Text.Words\n\t\t\t\tstat.Sentences += chunk.Text.Sentences\n\t\t\t\tstat.Count += 1\n\t\t\t} else {\n\t\t\t\tancestorStat[node] = &TextStat{chunk.Text.Words, chunk.Text.Sentences, 1}\n\t\t\t}\n\t\t\tnode, count = node.Parent, count+1\n\t\t}\n\t}\n\n\t\/\/ Generate result.\n\tresult := make(map[*Chunk]*TextStat)\n\tfor _, chunk := range doc.Chunks {\n\t\tnode := chunk.Block\n\t\tif node == nil {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Start with the parent's TextStat. Then ascend and check if the\n\t\t\/\/ current chunk has an ancestor with better stats. Use the best stat\n\t\t\/\/ as result.\n\t\tstat := ancestorStat[node]\n\t\tfor {\n\t\t\tif node = node.Parent; node == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif statPrev, ok := ancestorStat[node]; ok {\n\t\t\t\tif stat.Count < statPrev.Count {\n\t\t\t\t\tstat = statPrev\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tresult[chunk] = stat\n\t}\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017, Mitchell Cooper\npackage main\n\nimport (\n\twikiclient \"github.com\/cooper\/go-wikiclient\"\n\t\"net\/http\"\n\t\"regexp\"\n)\n\nvar imageRegex = regexp.MustCompile(\"\")\n\n\/\/ master handler\nfunc handleRoot(w http.ResponseWriter, r *http.Request) {\n\tvar delayedWiki wikiInfo\n\n\t\/\/ try each wiki\n\tfor _, wiki := range wikis {\n\n\t\t\/\/ wrong root\n\t\twikiRoot := wiki.conf.Get(\"root.wiki\")\n\t\tif r.URL.Path != wikiRoot && r.URL.Path != wikiRoot+\"\/\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ wrong host\n\t\tif wiki.host != r.Host {\n\n\t\t\t\/\/ if the wiki host is empty, it is the fallback wiki.\n\t\t\t\/\/ delay it until we've checked all other wikis.\n\t\t\tif wiki.host == \"\" && delayedWiki.name == \"\" {\n\t\t\t\tdelayedWiki = wiki\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ host matches\n\t\tif handleMainPage(wiki, w, r) {\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ try the delayed wiki\n\tif delayedWiki.name != \"\" {\n\t\tif handleMainPage(delayedWiki, w, r) {\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ anything else is a 404\n\thttp.NotFound(w, r)\n}\n\nfunc handleMainPage(wiki wikiInfo, w http.ResponseWriter, r *http.Request) bool {\n\n\t\/\/ no main page configured\n\tmainPage := wiki.conf.Get(\"main_page\")\n\tif mainPage == \"\" {\n\t\treturn false\n\t}\n\n\thttp.Redirect(w, r, wiki.conf.Get(\"root.page\")+\"\/\"+mainPage, http.StatusMovedPermanently)\n\treturn true\n}\n\n\/\/ page request\nfunc handlePage(wiki wikiInfo, relPath string, w http.ResponseWriter, r *http.Request) {\n\tres, err := wiki.client.DisplayPage(relPath)\n\n\t\/\/ wikiclient error or 'not found' response\n\tif handleError(wiki, err, w, r) || handleError(wiki, res, w, r) {\n\t\treturn\n\t}\n\n\t\/\/ other response\n\tswitch res.Get(\"type\") {\n\n\t\/\/ page redirect\n\tcase \"redirect\":\n\t\thttp.Redirect(w, r, wiki.conf.Get(\"root.page\")+\"\/\"+res.Get(\"name\"), http.StatusMovedPermanently)\n\n\t\/\/ page content\n\tcase \"page\":\n\t\trenderTemplate(wiki, w, \"page\", wikiPage{\n\t\t\tRes: res,\n\t\t\tTitle: res.Get(\"title\"),\n\t\t\tWikiTitle: wiki.title,\n\t\t\tWikiLogo: wiki.template.logo,\n\t\t\tWikiRoot: wiki.conf.Get(\"root.wiki\"),\n\t\t\tStaticRoot: wiki.template.staticRoot,\n\t\t\tnavigation: wiki.conf.GetSlice(\"navigation\"),\n\t\t})\n\t}\n\n\thttp.NotFound(w, r)\n}\n\n\/\/ image request\nfunc handleImage(wiki wikiInfo, relPath string, w http.ResponseWriter, r *http.Request) {\n\tres, err := wiki.client.DisplayImage(relPath, 0, 0)\n\tif handleError(wiki, err, w, r) || handleError(wiki, res, w, r) {\n\t\treturn\n\t}\n\thttp.ServeFile(w, r, res.Get(\"path\"))\n}\n\n\/\/ this is set true when calling handlePage for the error page. this way, if an\n\/\/ error occurs when trying to display the error page, we don't infinitely loop\n\/\/ between handleError and handlePage\nvar useLowLevelError bool\n\nfunc handleError(wiki wikiInfo, errMaybe interface{}, w http.ResponseWriter, r *http.Request) bool {\n\tmsg := \"An unknown error has occurred\"\n\tswitch err := errMaybe.(type) {\n\n\t\/\/ if there's no error, stop\n\tcase nil:\n\t\treturn false\n\n\t\/\/ message of type \"not found\" is an error; otherwise, stop\n\tcase wikiclient.Message:\n\t\tif err.Get(\"type\") != \"not found\" {\n\t\t\treturn false\n\t\t}\n\t\tmsg = err.Get(\"error\")\n\n\t\t\/\/ string\n\tcase string:\n\t\tmsg = err\n\n\t\t\/\/ error\n\tcase error:\n\t\tmsg = err.Error()\n\n\t}\n\n\t\/\/ if we have an error page, use it\n\terrorPage := wiki.conf.Get(\"error_page\")\n\tif !useLowLevelError && errorPage != \"\" {\n\t\tuseLowLevelError = true\n\t\thandlePage(wiki, errorPage, w, r)\n\t\tuseLowLevelError = false\n\t\treturn true\n\t}\n\n\t\/\/ generic error response\n\thttp.Error(w, msg, http.StatusNotFound)\n\treturn true\n}\n\nfunc renderTemplate(wiki wikiInfo, w http.ResponseWriter, templateName string, p wikiPage) {\n\terr := wiki.template.template.ExecuteTemplate(w, templateName+\".tpl\", p)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}\n<commit_msg>simplify main page checks<commit_after>\/\/ Copyright (c) 2017, Mitchell Cooper\npackage main\n\nimport (\n\twikiclient \"github.com\/cooper\/go-wikiclient\"\n\t\"net\/http\"\n\t\"regexp\"\n)\n\nvar imageRegex = regexp.MustCompile(\"\")\n\n\/\/ master handler\nfunc handleRoot(w http.ResponseWriter, r *http.Request) {\n\tvar delayedWiki wikiInfo\n\n\t\/\/ try each wiki\n\tfor _, wiki := range wikis {\n\n\t\t\/\/ wrong root\n\t\twikiRoot := wiki.conf.Get(\"root.wiki\")\n\t\tif r.URL.Path != wikiRoot && r.URL.Path != wikiRoot+\"\/\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ no main page configured\n\t\tmainPage := wiki.conf.Get(\"main_page\")\n\t\tif mainPage == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ wrong host\n\t\tif wiki.host != r.Host {\n\n\t\t\t\/\/ if the wiki host is empty, it is the fallback wiki.\n\t\t\t\/\/ delay it until we've checked all other wikis.\n\t\t\tif wiki.host == \"\" && delayedWiki.name == \"\" {\n\t\t\t\tdelayedWiki = wiki\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ host matches\n\t\tdelayedWiki = wiki\n\t\tbreak\n\t}\n\n\t\/\/ try the delayed wiki\n\tif delayedWiki.name != \"\" {\n\t\thttp.Redirect(\n\t\t\tw, r,\n\t\t\tdelayedWiki.conf.Get(\"root.page\")+\n\t\t\t\t\"\/\"+delayedWiki.conf.Get(\"main_page\"),\n\t\t\thttp.StatusMovedPermanently,\n\t\t)\n\t\treturn\n\t}\n\n\t\/\/ anything else is a 404\n\thttp.NotFound(w, r)\n}\n\n\/\/ page request\nfunc handlePage(wiki wikiInfo, relPath string, w http.ResponseWriter, r *http.Request) {\n\tres, err := wiki.client.DisplayPage(relPath)\n\n\t\/\/ wikiclient error or 'not found' response\n\tif handleError(wiki, err, w, r) || handleError(wiki, res, w, r) {\n\t\treturn\n\t}\n\n\t\/\/ other response\n\tswitch res.Get(\"type\") {\n\n\t\/\/ page redirect\n\tcase \"redirect\":\n\t\thttp.Redirect(w, r, wiki.conf.Get(\"root.page\")+\"\/\"+res.Get(\"name\"), http.StatusMovedPermanently)\n\n\t\/\/ page content\n\tcase \"page\":\n\t\trenderTemplate(wiki, w, \"page\", wikiPage{\n\t\t\tRes: res,\n\t\t\tTitle: res.Get(\"title\"),\n\t\t\tWikiTitle: wiki.title,\n\t\t\tWikiLogo: wiki.template.logo,\n\t\t\tWikiRoot: wiki.conf.Get(\"root.wiki\"),\n\t\t\tStaticRoot: wiki.template.staticRoot,\n\t\t\tnavigation: wiki.conf.GetSlice(\"navigation\"),\n\t\t})\n\t}\n\n\thttp.NotFound(w, r)\n}\n\n\/\/ image request\nfunc handleImage(wiki wikiInfo, relPath string, w http.ResponseWriter, r *http.Request) {\n\tres, err := wiki.client.DisplayImage(relPath, 0, 0)\n\tif handleError(wiki, err, w, r) || handleError(wiki, res, w, r) {\n\t\treturn\n\t}\n\thttp.ServeFile(w, r, res.Get(\"path\"))\n}\n\n\/\/ this is set true when calling handlePage for the error page. this way, if an\n\/\/ error occurs when trying to display the error page, we don't infinitely loop\n\/\/ between handleError and handlePage\nvar useLowLevelError bool\n\nfunc handleError(wiki wikiInfo, errMaybe interface{}, w http.ResponseWriter, r *http.Request) bool {\n\tmsg := \"An unknown error has occurred\"\n\tswitch err := errMaybe.(type) {\n\n\t\/\/ if there's no error, stop\n\tcase nil:\n\t\treturn false\n\n\t\/\/ message of type \"not found\" is an error; otherwise, stop\n\tcase wikiclient.Message:\n\t\tif err.Get(\"type\") != \"not found\" {\n\t\t\treturn false\n\t\t}\n\t\tmsg = err.Get(\"error\")\n\n\t\t\/\/ string\n\tcase string:\n\t\tmsg = err\n\n\t\t\/\/ error\n\tcase error:\n\t\tmsg = err.Error()\n\n\t}\n\n\t\/\/ if we have an error page, use it\n\terrorPage := wiki.conf.Get(\"error_page\")\n\tif !useLowLevelError && errorPage != \"\" {\n\t\tuseLowLevelError = true\n\t\thandlePage(wiki, errorPage, w, r)\n\t\tuseLowLevelError = false\n\t\treturn true\n\t}\n\n\t\/\/ generic error response\n\thttp.Error(w, msg, http.StatusNotFound)\n\treturn true\n}\n\nfunc renderTemplate(wiki wikiInfo, w http.ResponseWriter, templateName string, p wikiPage) {\n\terr := wiki.template.template.ExecuteTemplate(w, templateName+\".tpl\", p)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"text\/template\"\n)\n\nvar (\n\tTemplateSingleColumnTable = `\n<table>\n\t<thead>\n\t\t<tr>\n\t\t\t<th><\/th>\n\t<\/thead>\n\t<tbody>\n\t\t{{range .}}\n\t\t\t<tr>\n\t\t\t\t<td>\n\t\t\t\t\t{{.message}}\n\t\t\t\t<\/td>\n\t\t\t<\/tr>\n\t\t{{end}}\n\t<\/tbody>\n<\/table>\n`\n\n\tTemplateDNATable = `\n<table>\n\t<thead>\n\t\t<tr>\n\t\t\t<th><\/th>\n\t<\/thead>\n\t<tbody>\n\t\t{{range .}}\n\t\t\t<tr>\n\t\t\t\t<td>\n\t\t\t\t\t{{.name}}\n\t\t\t\t<\/td>\n\t\t\t\t<td>\n\t\t\t\t\t{{.dna}}\n\t\t\t\t<\/td>\n\t\t\t<\/tr>\n\t\t{{end}}\n\t<\/tbody>\n<\/table>\n`\n\n\tTemplateKawaiiTable = `\n<table>\n\t<thead>\n\t\t<tr>\n\t\t\t<th><\/th>\n\t<\/thead>\n\t<tbody>\n\t\t{{range .}}\n\t\t\t<tr>\n\t\t\t\t<td>\n\t\t\t\t\t{{.name}}\n\t\t\t\t<\/td>\n\t\t\t\t<td>\n\t\t\t\t\t{{.face}}\n\t\t\t\t<\/td>\n\t\t\t<\/tr>\n\t\t{{end}}\n\t<\/tbody>\n<\/table>\n`\n\tTemplateSkeleton = `\n<!doctype html>\n<html>\n\t<head>\n\t\t<style type=\"text\/css\"> \n\t\t\ta:visited{\n\t\t\t\tcolor:blue;\n\t\t\t}\n\t\t\tul.nav li {\n\t\t\t\tdisplay:inline;\n\t\t\t}\n\t\t<\/style>\n\t<\/head>\n\t<body>\n\t\tGuest: \n\t\t<ul class=\"nav\">\n\t\t\t\t<li><a href=\"\/kawaii\">Kawaiiface<\/a><\/li>\n\t\t\t\t<li><a href=\"\/dna\">DNA<\/a><\/li>\n\t\t\t\t<li><a href=\"\/joke\">Joke<\/a><\/li>\n\t\t\t\t<li><a href=\"\/rape\">Rape<\/a><\/li>\n\t\t\t\t<li><a href=\"\/kill\">Kill<\/a><\/li>\n\t\t\t\t<li><a href=\"\/factoid\">Factoid<\/a><\/li>\n\t\t\t\t<li><a href=\"\/yomama\">Yomama<\/a><\/li>\n\t\t\t\t<li><a href=\"\/Gelbooru\/Random\">Gelbooru Random<\/a><\/li>\n\t\t\t\t<li><a href=\"\/Files\">Files<\/a><\/li>\n\t\t\t\t<li><a href=\"\/Auth\">Login<\/a><\/li>\n\t\t\t\t\n\t\t\t\t<!--<li><a href=\"\/Irc\">Irc<\/a><\/li>\n\t\t\t\t<li><a href=\"\/Irc\/Latest\">IrcLog<\/a><\/li>--> \n\t\t<\/ul>\n\t\t\n\t\tAdmin: \n\t\t<ul class=\"nav\">\n\t\t\t<li><a href=\"\/FileUpload\">File Upload<\/a><\/li>\n\t\t\t<li><a href=\"\/CreateUser\">CreateUser<\/a><\/li>\n\t\t\t<li><a href=\"\/plugins\">Plugin Manager<\/a><\/li>\n\t\t<\/ul>\n\t\n\t\t{{template \"content\" .}}\n\t<\/body>\n<\/html>`\n\n\tTemplatePluginlist = `\n<h1> Plugin hub <\/h1>\n<table border=\"1\">\n\t<thead>\n\t\t<tr>\n\t\t\t<th>File Name<\/th>\n\t\t\t<th><\/th>\n\t\t\t<th>Functions<\/th>\n\t\t\t<th>Actions<\/th>\n\t\t<\/tr>\n\t<\/thead>\n\t<tbody>\n\t\t{{range .}}\n\t\t<tr>\n\t\t\t<td>{{.name}}<\/td>\n\t\t\t<td>{{if .file}}File Exists{{end}} {{if .loaded}}Loaded{{end}}<\/td>\n\t\t\t<td>{{range .functions}}{{.}}, {{end}}<\/td>\n\t\t\t<td>\n\t\t\t\t{{if .file}}<a href=\"\/plugins\/edit?name={{.name}}\">Edit<\/a> \n\t\t\t\t<a href=\"\/plugins\/delete?name={{.name}}\">Delete<\/a>\n\t\t\t\t<a href=\"\/plugins\/load?name={{.name}}\">(Re-)load<\/a>{{end}}\n\t\t\t\t{{if .loaded}}\n\t\t\t\t<a href=\"\/plugins\/unload?name={{.name}}\">Unload<\/a>\n\t\t\t\t{{end}}\n\t\t\t<\/td>\n\t\t<\/tr>\n\t\t{{end}}\n\t<\/tbody>\n<\/table>\n\nCreate new:\n\n<form action=\"\/plugins\/edit\" method=\"GET\">\n\t<input type=\"text\" name=\"name\" \/> <br \/>\n\t<input type=\"submit\" value=\"Create\" \/>\n<\/form>\n`\n\n\tTemplatePluginedit = `\n<script type=\"text\/javascript\" src=\"\/scripts\/ace\/ace.js\"><\/script>\n\t\n<h1> Edit plugin {{.name}} <\/h1>\n<form action=\"\/plugins\/edit\" method=\"POST\" onsubmit=\"document.getElementById('contentT').value = ace.edit('content').getSession().getValue();\">\n\t<input type=\"hidden\" value=\"{{.name}}\" name=\"name\" \/>\n\t<textarea cols=\"100\" rows=\"40\" name=\"content\" id=\"contentT\">{{.content}}<\/textarea> <br \/>\n\t<div id=\"content\" style=\"height: 550px; width: 900px;\"><\/div> <br \/>\n\tSave: <input type=\"text\" name=\"filename\" value=\"{{.name}}\" \/><input type=\"submit\" value=\"Submit\" \/>\n<\/form>\n\n<script type=\"text\/javascript\"> \n\tvar editor = ace.edit(\"content\");\n\tvar textarea = document.getElementById(\"contentT\");\n editor.setTheme(\"ace\/theme\/textmate\");\n editor.getSession().setMode(\"ace\/mode\/javascript\");\n\ttextarea.style.display = \"none\";\n\teditor.getSession().setValue(textarea.value);\n<\/script>\n\n<h2> API: <\/h2>\n\n\t<p>\n\t\t<h3>Defining a function:<\/h3>\n\t\tuse <pre> Subscribe(string trigger, function(sender, source, line, params) handler) <\/pre> to define a function. <br \/>\n\t\tThe following trigger types are defined:\n\t\t\n\t\t<ul>\n\t\t\t<li>!command - These are your bog standard irc commands<\/li>\n\t\t\t<li>privmsg - This will trigger on all messages received<\/li>\n\t\t\t<li>join - Triggers if someone joins the channel<\/li>\n\t\t\t<li>part - Triggers when someone leaves the channel<\/li>\n\t\t\t<li>quit - Triggers when someone disconnects from irc<\/li>\n\t\t\t<li>regex: - Type a regex on which it will trigger after the colon (:)<\/li>\n\t\t<\/ul>\n\t\t\n\t\tThe handler function has the following parameters:\n\t\t\n\t\t<ul>\n\t\t\t<li>sender - This is the nick name of the sender<\/li>\n\t\t\t<li>source - This is either the nick name of the sender on a direct message or the channel the sender posted in<\/li>\n\t\t\t<li>line - Either the complete message or the part after the !command<\/li>\n\t\t\t<li>params - In !commands this is a array of space delimited parameters. Parameters with a space in it can be consolidated with quotes. <br \/>\n\t\t\t\t<pre>!command param param21 \"pa sdf asdfj dflk\"<\/pre>\n\t\t\t\tis THREE parameters. \n\t\t\t\t<ol>\n\t\t\t\t\t<li><pre>param<\/pre><\/li>\n\t\t\t\t\t<li><pre>param21<\/pre>and<\/li>\n\t\t\t\t\t<li><pre>pa sdf asdfj dflk<\/pre><\/li>\n\t\t\t\t<ol>\n\t\t\t\tActual quotes can be escaped using \\. Matching quotes will otherwise be removed.\n\t\t\t<\/li>\n\t\t<\/ul>\t\n\t\t\t\n\t<\/p>\n\n\t<p>\n\t\t<h3>Function definitions<\/h3>\n\t\t\n\t\t<ul>\n\t\t\t<li>IRC\n\t\t\t\t<ul>\n\t\t\t\t\t<li>IRC.Privmsg(destination, message) - sends an IRC message to a channel or a nick<\/li>\n\t\t\t\t\t<li>IRC.Action(channel, message) - sends an \"action\" to a IRC channel<\/li>\n\t\t\t\t\t<li>IRC.Notice(nick, message) - sends a Notice towards a user<\/li>\n\t\t\t\t\t<li>IRC.Channel - Contains the main channel of the bot<\/li>\n\t\t\t\t\t<li>IRC.Server - Contains the current server name<\/li>\n\t\t\t\t\t<li>IRC.Nick - Contains the nick of the bot<\/li>\n\t\t\t\t<\/ul>\n\t\t\t<\/li>\n\t\t\t<li>\n\t\t\t\tDB\n\t\t\t\t<ul>\n\t\t\t\t\t<li>DB.Authenticate(user, pwd) - Authenticates against the user database<\/li>\n\t\t\t\t\t<li>DB.SaveToDb<\/li>\n\t\t\t\t\t<li>DB.ExistsInDB<\/li>\n\t\t\t\t\t<li>DB.GetRandomFromDB<\/li>\n\t\t\t\t\t<li>DB.GetAndDeleteFirstFromDB<\/li>\n\t\t\t\t\t<li>DB.GetNamedFromDB<\/li>\n\t\t\t\t\t<li>DB.SaveNamedToDB<\/li>\n\t\t\t\t<\/ul>\n\t\t\t<\/li>\n\t\t<\/ul>\n\t<\/p>\n`\n\n\tTemplateLogin = `\n\t{{if .}} Error: {{.}} {{end}}\n\t<form method=\"POST\">\n\t\tUser: <input type=\"text\" name=\"username\" \/> <br \/> \n\t\tPassword: <input type=\"password\" name=\"password\" \/> <br \/> \n\t\t<input type=\"submit\" name=\"submit\" value=\"Login\" \/> \n\t<\/form>\n`\n\n\tTemplateCreateUser = `\n\t<form method=\"POST\" action=\"\/CreateUser\">\n\t\tUsername: <input type=\"text\" name=\"username\" \/> <br \/> \n\t\tPassword: <input type=\"text\" name=\"password\" \/> <br \/> \n\t\t<input type=\"submit\" name=\"submit\" value=\"Create\" \/> \n\t<\/form>\n`\n\n\tTemplateFiles = `\n`\n\n\tTemplateFileUpload = `\n\n`\n)\n\nfunc Template(subtmpl string) *template.Template {\n\ttempl := template.New(\"main\")\n\ttempl.Parse(TemplateSkeleton)\n\tcontenttmpl := templ.New(\"content\")\n\tif subtmpl != \"\" {\n\t\tcontenttmpl.Parse(subtmpl)\n\t}\n\treturn templ\n}\n<commit_msg>Remove useless http handlers; template now.<commit_after>package main\n\nimport (\n\t\"text\/template\"\n)\n\nvar (\n\tTemplateSingleColumnTable = `\n<table>\n\t<thead>\n\t\t<tr>\n\t\t\t<th><\/th>\n\t<\/thead>\n\t<tbody>\n\t\t{{range .}}\n\t\t\t<tr>\n\t\t\t\t<td>\n\t\t\t\t\t{{.message}}\n\t\t\t\t<\/td>\n\t\t\t<\/tr>\n\t\t{{end}}\n\t<\/tbody>\n<\/table>\n`\n\n\tTemplateDNATable = `\n<table>\n\t<thead>\n\t\t<tr>\n\t\t\t<th><\/th>\n\t<\/thead>\n\t<tbody>\n\t\t{{range .}}\n\t\t\t<tr>\n\t\t\t\t<td>\n\t\t\t\t\t{{.name}}\n\t\t\t\t<\/td>\n\t\t\t\t<td>\n\t\t\t\t\t{{.dna}}\n\t\t\t\t<\/td>\n\t\t\t<\/tr>\n\t\t{{end}}\n\t<\/tbody>\n<\/table>\n`\n\n\tTemplateKawaiiTable = `\n<table>\n\t<thead>\n\t\t<tr>\n\t\t\t<th><\/th>\n\t<\/thead>\n\t<tbody>\n\t\t{{range .}}\n\t\t\t<tr>\n\t\t\t\t<td>\n\t\t\t\t\t{{.name}}\n\t\t\t\t<\/td>\n\t\t\t\t<td>\n\t\t\t\t\t{{.face}}\n\t\t\t\t<\/td>\n\t\t\t<\/tr>\n\t\t{{end}}\n\t<\/tbody>\n<\/table>\n`\n\tTemplateSkeleton = `\n<!doctype html>\n<html>\n\t<head>\n\t\t<style type=\"text\/css\"> \n\t\t\ta:visited{\n\t\t\t\tcolor:blue;\n\t\t\t}\n\t\t\tul.nav li {\n\t\t\t\tdisplay:inline;\n\t\t\t}\n\t\t<\/style>\n\t<\/head>\n\t<body>\n\t\tGuest: \n\t\t<ul class=\"nav\">\n\t\t\t\t<li><a href=\"\/Files\">Files<\/a><\/li>\n\t\t\t\t<li><a href=\"\/Auth\">Login<\/a><\/li>\n\t\t<\/ul>\n\t\t\n\t\tAdmin: \n\t\t<ul class=\"nav\">\n\t\t\t<li><a href=\"\/FileUpload\">File Upload<\/a><\/li>\n\t\t\t<li><a href=\"\/CreateUser\">CreateUser<\/a><\/li>\n\t\t\t<li><a href=\"\/plugins\">Plugin Manager<\/a><\/li>\n\t\t<\/ul>\n\t\n\t\t{{template \"content\" .}}\n\t<\/body>\n<\/html>`\n\n\tTemplatePluginlist = `\n<h1> Plugin hub <\/h1>\n<table border=\"1\">\n\t<thead>\n\t\t<tr>\n\t\t\t<th>File Name<\/th>\n\t\t\t<th><\/th>\n\t\t\t<th>Functions<\/th>\n\t\t\t<th>Actions<\/th>\n\t\t<\/tr>\n\t<\/thead>\n\t<tbody>\n\t\t{{range .}}\n\t\t<tr>\n\t\t\t<td>{{.name}}<\/td>\n\t\t\t<td>{{if .file}}File Exists{{end}} {{if .loaded}}Loaded{{end}}<\/td>\n\t\t\t<td>{{range .functions}}{{.}}, {{end}}<\/td>\n\t\t\t<td>\n\t\t\t\t{{if .file}}<a href=\"\/plugins\/edit?name={{.name}}\">Edit<\/a> \n\t\t\t\t<a href=\"\/plugins\/delete?name={{.name}}\">Delete<\/a>\n\t\t\t\t<a href=\"\/plugins\/load?name={{.name}}\">(Re-)load<\/a>{{end}}\n\t\t\t\t{{if .loaded}}\n\t\t\t\t<a href=\"\/plugins\/unload?name={{.name}}\">Unload<\/a>\n\t\t\t\t{{end}}\n\t\t\t<\/td>\n\t\t<\/tr>\n\t\t{{end}}\n\t<\/tbody>\n<\/table>\n\nCreate new:\n\n<form action=\"\/plugins\/edit\" method=\"GET\">\n\t<input type=\"text\" name=\"name\" \/> <br \/>\n\t<input type=\"submit\" value=\"Create\" \/>\n<\/form>\n`\n\n\tTemplatePluginedit = `\n<script type=\"text\/javascript\" src=\"\/scripts\/ace\/ace.js\"><\/script>\n\t\n<h1> Edit plugin {{.name}} <\/h1>\n<form action=\"\/plugins\/edit\" method=\"POST\" onsubmit=\"document.getElementById('contentT').value = ace.edit('content').getSession().getValue();\">\n\t<input type=\"hidden\" value=\"{{.name}}\" name=\"name\" \/>\n\t<textarea cols=\"100\" rows=\"40\" name=\"content\" id=\"contentT\">{{.content}}<\/textarea> <br \/>\n\t<div id=\"content\" style=\"height: 550px; width: 900px;\"><\/div> <br \/>\n\tSave: <input type=\"text\" name=\"filename\" value=\"{{.name}}\" \/><input type=\"submit\" value=\"Submit\" \/>\n<\/form>\n\n<script type=\"text\/javascript\"> \n\tvar editor = ace.edit(\"content\");\n\tvar textarea = document.getElementById(\"contentT\");\n editor.setTheme(\"ace\/theme\/textmate\");\n editor.getSession().setMode(\"ace\/mode\/javascript\");\n\ttextarea.style.display = \"none\";\n\teditor.getSession().setValue(textarea.value);\n<\/script>\n\n<h2> API: <\/h2>\n\n\t<p>\n\t\t<h3>Defining a function:<\/h3>\n\t\tuse <pre> Subscribe(string trigger, function(sender, source, line, params) handler) <\/pre> to define a function. <br \/>\n\t\tThe following trigger types are defined:\n\t\t\n\t\t<ul>\n\t\t\t<li>!command - These are your bog standard irc commands<\/li>\n\t\t\t<li>privmsg - This will trigger on all messages received<\/li>\n\t\t\t<li>join - Triggers if someone joins the channel<\/li>\n\t\t\t<li>part - Triggers when someone leaves the channel<\/li>\n\t\t\t<li>quit - Triggers when someone disconnects from irc<\/li>\n\t\t\t<li>regex: - Type a regex on which it will trigger after the colon (:)<\/li>\n\t\t<\/ul>\n\t\t\n\t\tThe handler function has the following parameters:\n\t\t\n\t\t<ul>\n\t\t\t<li>sender - This is the nick name of the sender<\/li>\n\t\t\t<li>source - This is either the nick name of the sender on a direct message or the channel the sender posted in<\/li>\n\t\t\t<li>line - Either the complete message or the part after the !command<\/li>\n\t\t\t<li>params - In !commands this is a array of space delimited parameters. Parameters with a space in it can be consolidated with quotes. <br \/>\n\t\t\t\t<pre>!command param param21 \"pa sdf asdfj dflk\"<\/pre>\n\t\t\t\tis THREE parameters. \n\t\t\t\t<ol>\n\t\t\t\t\t<li><pre>param<\/pre><\/li>\n\t\t\t\t\t<li><pre>param21<\/pre>and<\/li>\n\t\t\t\t\t<li><pre>pa sdf asdfj dflk<\/pre><\/li>\n\t\t\t\t<ol>\n\t\t\t\tActual quotes can be escaped using \\. Matching quotes will otherwise be removed.\n\t\t\t<\/li>\n\t\t<\/ul>\t\n\t\t\t\n\t<\/p>\n\n\t<p>\n\t\t<h3>Function definitions<\/h3>\n\t\t\n\t\t<ul>\n\t\t\t<li>IRC\n\t\t\t\t<ul>\n\t\t\t\t\t<li>IRC.Privmsg(destination, message) - sends an IRC message to a channel or a nick<\/li>\n\t\t\t\t\t<li>IRC.Action(channel, message) - sends an \"action\" to a IRC channel<\/li>\n\t\t\t\t\t<li>IRC.Notice(nick, message) - sends a Notice towards a user<\/li>\n\t\t\t\t\t<li>IRC.Channel - Contains the main channel of the bot<\/li>\n\t\t\t\t\t<li>IRC.Server - Contains the current server name<\/li>\n\t\t\t\t\t<li>IRC.Nick - Contains the nick of the bot<\/li>\n\t\t\t\t<\/ul>\n\t\t\t<\/li>\n\t\t\t<li>\n\t\t\t\tDB\n\t\t\t\t<ul>\n\t\t\t\t\t<li>DB.Authenticate(user, pwd) - Authenticates against the user database<\/li>\n\t\t\t\t\t<li>DB.SaveToDb<\/li>\n\t\t\t\t\t<li>DB.ExistsInDB<\/li>\n\t\t\t\t\t<li>DB.GetRandomFromDB<\/li>\n\t\t\t\t\t<li>DB.GetAndDeleteFirstFromDB<\/li>\n\t\t\t\t\t<li>DB.GetNamedFromDB<\/li>\n\t\t\t\t\t<li>DB.SaveNamedToDB<\/li>\n\t\t\t\t<\/ul>\n\t\t\t<\/li>\n\t\t<\/ul>\n\t<\/p>\n`\n\n\tTemplateLogin = `\n\t{{if .}} Error: {{.}} {{end}}\n\t<form method=\"POST\">\n\t\tUser: <input type=\"text\" name=\"username\" \/> <br \/> \n\t\tPassword: <input type=\"password\" name=\"password\" \/> <br \/> \n\t\t<input type=\"submit\" name=\"submit\" value=\"Login\" \/> \n\t<\/form>\n`\n\n\tTemplateCreateUser = `\n\t<form method=\"POST\" action=\"\/CreateUser\">\n\t\tUsername: <input type=\"text\" name=\"username\" \/> <br \/> \n\t\tPassword: <input type=\"text\" name=\"password\" \/> <br \/> \n\t\t<input type=\"submit\" name=\"submit\" value=\"Create\" \/> \n\t<\/form>\n`\n\n\tTemplateFiles = `\n`\n\n\tTemplateFileUpload = `\n\n`\n)\n\nfunc Template(subtmpl string) *template.Template {\n\ttempl := template.New(\"main\")\n\ttempl.Parse(TemplateSkeleton)\n\tcontenttmpl := templ.New(\"content\")\n\tif subtmpl != \"\" {\n\t\tcontenttmpl.Parse(subtmpl)\n\t}\n\treturn templ\n}\n<|endoftext|>"} {"text":"<commit_before>package hwaflib\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n)\n\ntype VcsPackage struct {\n\tType string \/\/ type of remote VCS (svn, git, ...)\n\tRepo string \/\/ remote repository URL\n\tPath string \/\/ path under which the package is locally checked out\n}\n\ntype PackageDb struct {\n\tdb map[string]VcsPackage\n\tfname string \/\/ file name where the db is backed up\n}\n\nfunc NewPackageDb(fname string) *PackageDb {\n\treturn &PackageDb{\n\t\tdb: make(map[string]VcsPackage),\n\t\tfname: fname,\n\t}\n}\n\nfunc (db *PackageDb) Load(fname string) error {\n\tf, err := os.Open(fname)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\terr = json.NewDecoder(f).Decode(&db.db)\n\treturn err\n}\n\nfunc (db *PackageDb) Add(vcs, pkguri, pkgname string) error {\n\t_, has := db.db[pkgname]\n\tif has {\n\t\treturn fmt.Errorf(\"hwaf.pkgdb: package [%s] already in db\", pkgname)\n\t}\n\tdb.db[pkgname] = VcsPackage{vcs, pkguri, pkgname}\n\n\terr := db.sync()\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ FIXME: should this return an error ?\n\texec.Command(\"git\", \"add\", db.fname).Run()\n\texec.Command(\n\t\t\"git\", \"commit\",\n\t\t\"-m\", fmt.Sprintf(\"adding package [%s]\", pkgname),\n\t).Run()\n\treturn nil\n}\n\nfunc (db *PackageDb) Remove(pkgname string) error {\n\t_, ok := db.db[pkgname]\n\tif !ok {\n\t\treturn fmt.Errorf(\"hwaf.pkgdb: package [%s] not in db\", pkgname)\n\t}\n\tdelete(db.db, pkgname)\n\n\terr := db.sync()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ FIXME: should this return an error ?\n\texec.Command(\"git\", \"add\", db.fname).Run()\n\texec.Command(\n\t\t\"git\", \"commit\",\n\t\t\"-m\", fmt.Sprintf(\"removing package [%s]\", pkgname),\n\t).Run()\n\treturn nil\n}\n\nfunc (db *PackageDb) HasPkg(pkgname string) bool {\n\t_, ok := db.db[pkgname]\n\treturn ok\n}\n\nfunc (db *PackageDb) GetPkg(pkgname string) (VcsPackage, error) {\n\tpkg, ok := db.db[pkgname]\n\tif !ok {\n\t\treturn pkg, fmt.Errorf(\"hwaf.pkgdb: package [%s] not in db\", pkgname)\n\t}\n\treturn pkg, nil\n}\n\n\/\/ Pkgs returns the list of packages this db holds\nfunc (db *PackageDb) Pkgs() []string {\n\tif db == nil || db.db == nil {\n\t\treturn []string{}\n\t}\n\n\tpkgs := make([]string, 0, len(db.db))\n\tfor k, _ := range db.db {\n\t\tpkgs = append(pkgs, k)\n\t}\n\treturn pkgs\n}\n\nfunc (db *PackageDb) sync() error {\n\tif path_exists(db.fname) {\n\t\tos.RemoveAll(db.fname)\n\t}\n\tf, err := os.Create(db.fname)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\terr = json.NewEncoder(f).Encode(&db.db)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = f.Sync()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn f.Close()\n}\n\n\/\/ EOF\n<commit_msg>pkgdb: commit changes to pkgdb.json to git (more explicit log message)<commit_after>package hwaflib\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n)\n\ntype VcsPackage struct {\n\tType string \/\/ type of remote VCS (svn, git, ...)\n\tRepo string \/\/ remote repository URL\n\tPath string \/\/ path under which the package is locally checked out\n}\n\ntype PackageDb struct {\n\tdb map[string]VcsPackage\n\tfname string \/\/ file name where the db is backed up\n}\n\nfunc NewPackageDb(fname string) *PackageDb {\n\treturn &PackageDb{\n\t\tdb: make(map[string]VcsPackage),\n\t\tfname: fname,\n\t}\n}\n\nfunc (db *PackageDb) Load(fname string) error {\n\tf, err := os.Open(fname)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\terr = json.NewDecoder(f).Decode(&db.db)\n\treturn err\n}\n\nfunc (db *PackageDb) Add(vcs, pkguri, pkgname string) error {\n\t_, has := db.db[pkgname]\n\tif has {\n\t\treturn fmt.Errorf(\"hwaf.pkgdb: package [%s] already in db\", pkgname)\n\t}\n\tdb.db[pkgname] = VcsPackage{vcs, pkguri, pkgname}\n\n\terr := db.sync()\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ FIXME: should this return an error ?\n\texec.Command(\"git\", \"add\", db.fname).Run()\n\texec.Command(\n\t\t\"git\", \"commit\",\n\t\t\"-m\", fmt.Sprintf(\"adding package [%s] to %s\", pkgname, db.fname),\n\t).Run()\n\treturn nil\n}\n\nfunc (db *PackageDb) Remove(pkgname string) error {\n\t_, ok := db.db[pkgname]\n\tif !ok {\n\t\treturn fmt.Errorf(\"hwaf.pkgdb: package [%s] not in db\", pkgname)\n\t}\n\tdelete(db.db, pkgname)\n\n\terr := db.sync()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ FIXME: should this return an error ?\n\texec.Command(\"git\", \"add\", db.fname).Run()\n\texec.Command(\n\t\t\"git\", \"commit\",\n\t\t\"-m\", fmt.Sprintf(\"removing package [%s] from %s\", pkgname, db.fname),\n\t).Run()\n\treturn nil\n}\n\nfunc (db *PackageDb) HasPkg(pkgname string) bool {\n\t_, ok := db.db[pkgname]\n\treturn ok\n}\n\nfunc (db *PackageDb) GetPkg(pkgname string) (VcsPackage, error) {\n\tpkg, ok := db.db[pkgname]\n\tif !ok {\n\t\treturn pkg, fmt.Errorf(\"hwaf.pkgdb: package [%s] not in db\", pkgname)\n\t}\n\treturn pkg, nil\n}\n\n\/\/ Pkgs returns the list of packages this db holds\nfunc (db *PackageDb) Pkgs() []string {\n\tif db == nil || db.db == nil {\n\t\treturn []string{}\n\t}\n\n\tpkgs := make([]string, 0, len(db.db))\n\tfor k, _ := range db.db {\n\t\tpkgs = append(pkgs, k)\n\t}\n\treturn pkgs\n}\n\nfunc (db *PackageDb) sync() error {\n\tif path_exists(db.fname) {\n\t\tos.RemoveAll(db.fname)\n\t}\n\tf, err := os.Create(db.fname)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\terr = json.NewEncoder(f).Encode(&db.db)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = f.Sync()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn f.Close()\n}\n\n\/\/ EOF\n<|endoftext|>"} {"text":"<commit_before>\/\/ FTP工具类\n\/\/ create by gloomy 2017-4-1 17:32:06\npackage gutil\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/jlaffaye\/ftp\"\n\t\"time\"\n)\n\n\/\/ FTP帮助类实体\n\/\/ create by gloomy 2017-4-1 17:34:16\ntype FtpHelpStruct struct {\n\tIpAddr string \/\/ ip 地址\n\tPort int \/\/ 端口\n\tTimeOut time.Duration \/\/ 超时时间\n\tUserName string \/\/ 用户名\n\tPassWord string \/\/ 密码\n\tFilePaths string \/\/ 目标服务器路径\n}\n\nconst ftpTimeOut = 60 * time.Minute\n\n\/\/ FTP文件传输\n\/\/ create by gloomy 2017-4-1 17:36:11\n\/\/ FTP配置实体 文件内容 创建目标服务器的文件名\n\/\/ 错误对象\nfunc FtpFileStor(model *FtpHelpStruct, ftpConntion *ftp.ServerConn, contentByte *[]byte, createFilePath string) error {\n\tvar (\n\t\terr error\n\t)\n\tif ftpConntion == nil || ftpConntion.NoOp() != nil {\n\t\tftpConntion, err = ftpLogin(model)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn ftpConntion.Stor(createFilePath, bytes.NewReader(*contentByte))\n}\n\n\/\/ FTP登录\n\/\/ create by gloomy 2017-4-1 17:39:59\n\/\/ 输入参数 FTP配置实体\n\/\/ 输出参数 FTP连接对象 错误对象\nfunc ftpLogin(model *FtpHelpStruct) (*ftp.ServerConn, error) {\n\tif model.TimeOut <= 0 {\n\t\tmodel.TimeOut = ftpTimeOut\n\t}\n\tc, err := ftp.DialTimeout(fmt.Sprintf(\"%s:%d\", model.IpAddr, model.Port), model.TimeOut)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = c.Login(model.UserName, model.PassWord)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = c.NoOp()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, err\n}\n\n\/\/ FTP文件删除\n\/\/ create by gloomy 2017-04-02 01:08:15\n\/\/ 文件名 ftp配置对象\n\/\/ 错误对象\nfunc FtpRemoveFile(filePathStr string, ftpConntion *ftp.ServerConn, model *FtpHelpStruct) error {\n\tvar (\n\t\terr error\n\t)\n\tif ftpConntion == nil || ftpConntion.NoOp() != nil {\n\t\tftpConntion, err = ftpLogin(model)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn ftpConntion.Delete(filePathStr)\n}\n\n\/\/ ftp修正远程服务器文件名称\n\/\/ create by gloomy 2017-04-04 21:26:48\n\/\/ 源文件 修正后的文件名称 ftp配置对象\n\/\/ 错误对象\nfunc FtpRenameFile(from, to string, ftpConntion *ftp.ServerConn, model *FtpHelpStruct) error {\n\tvar (\n\t\terr error\n\t)\n\tif ftpConntion == nil || ftpConntion.NoOp() != nil {\n\t\tftpConntion, err = ftpLogin(model)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn ftpConntion.Rename(from, to)\n}\n\n\/\/ 获取FTP上所有的文件列表\n\/\/ create by gloomy 2017-04-12 10:43:25\n\/\/ 目录地址\n\/\/ 文件列表集 错误对象\nfunc FtpNameList(pathStr string, ftpConntion *ftp.ServerConn, model *FtpHelpStruct) ([]string, error) {\n\tvar (\n\t\terr error\n\t)\n\tif ftpConntion == nil || ftpConntion.NoOp() != nil {\n\t\tftpConntion, err = ftpLogin(model)\n\t\tif err != nil {\n\t\t\treturn []string{}, err\n\t\t}\n\t}\n\treturn ftpConntion.NameList(pathStr)\n}\n\n\/\/ ftp退出\n\/\/ create by gloomy 2017-4-14 11:25:04\nfunc FtpExit(ftpConntion *ftp.ServerConn) {\n\tif ftpConntion != nil {\n\t\tftpConntion.Quit()\n\t}\n}\n<commit_msg>修正ftp程序运行<commit_after>\/\/ FTP工具类\n\/\/ create by gloomy 2017-4-1 17:32:06\npackage gutil\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/jlaffaye\/ftp\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ FTP帮助类实体\n\/\/ create by gloomy 2017-4-1 17:34:16\ntype FtpHelpStruct struct {\n\tIpAddr string \/\/ ip 地址\n\tPort int \/\/ 端口\n\tTimeOut time.Duration \/\/ 超时时间\n\tUserName string \/\/ 用户名\n\tPassWord string \/\/ 密码\n\tFilePaths string \/\/ 目标服务器路径\n}\n\nconst ftpTimeOut = 60 * time.Minute\n\n\/\/ FTP文件传输\n\/\/ create by gloomy 2017-4-1 17:36:11\n\/\/ FTP配置实体 文件内容 创建目标服务器的文件名\n\/\/ 错误对象\nfunc FtpFileStor(model *FtpHelpStruct, ftpConntion *ftp.ServerConn, contentByte *[]byte, createFilePath string) error {\n\tvar (\n\t\terr error\n\t)\n\tif ftpConntion == nil || ftpConntion.NoOp() != nil {\n\t\tftpConntion, err = ftpLogin(model)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn ftpConntion.Stor(createFilePath, bytes.NewReader(*contentByte))\n}\n\n\/\/ FTP登录\n\/\/ create by gloomy 2017-4-1 17:39:59\n\/\/ 输入参数 FTP配置实体\n\/\/ 输出参数 FTP连接对象 错误对象\nfunc ftpLogin(model *FtpHelpStruct) (*ftp.ServerConn, error) {\n\tif model.TimeOut <= 0 {\n\t\tmodel.TimeOut = ftpTimeOut\n\t}\n\tc, err := ftp.DialTimeout(fmt.Sprintf(\"%s:%d\", model.IpAddr, model.Port), model.TimeOut)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = c.Login(model.UserName, model.PassWord)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = c.NoOp()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, err\n}\n\n\/\/ FTP文件删除\n\/\/ create by gloomy 2017-04-02 01:08:15\n\/\/ 文件名 ftp配置对象\n\/\/ 错误对象\nfunc FtpRemoveFile(filePathStr string, ftpConntion *ftp.ServerConn, model *FtpHelpStruct) error {\n\tvar (\n\t\terr error\n\t)\n\tif ftpConntion == nil || ftpConntion.NoOp() != nil {\n\t\tftpConntion, err = ftpLogin(model)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn ftpConntion.Delete(filePathStr)\n}\n\n\/\/ ftp修正远程服务器文件名称\n\/\/ create by gloomy 2017-04-04 21:26:48\n\/\/ 源文件 修正后的文件名称 ftp配置对象\n\/\/ 错误对象\nfunc FtpRenameFile(from, to string, ftpConntion *ftp.ServerConn, model *FtpHelpStruct) error {\n\tvar (\n\t\terr error\n\t)\n\tif ftpConntion == nil || ftpConntion.NoOp() != nil {\n\t\tftpConntion, err = ftpLogin(model)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn ftpConntion.Rename(from, to)\n}\n\n\/\/ 获取FTP上所有的文件列表\n\/\/ create by gloomy 2017-04-12 10:43:25\n\/\/ 目录地址\n\/\/ 文件列表集 错误对象\nfunc FtpNameList(pathStr string, ftpConntion *ftp.ServerConn, model *FtpHelpStruct) ([]string, error) {\n\tvar (\n\t\tnameList []string\n\t)\n\terr := ftpList(pathStr, ftpConntion, model, &nameList)\n\treturn nameList, err\n}\n\nfunc ftpList(pathStr string, ftpConntion *ftp.ServerConn, model *FtpHelpStruct, nameListIn *[]string) error {\n\tvar (\n\t\terr error\n\t\tpathDir string\n\t)\n\tif ftpConntion == nil || ftpConntion.NoOp() != nil {\n\t\tftpConntion, err = ftpLogin(model)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tentryArray, err := ftpConntion.List(pathStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif strings.TrimSpace(pathStr) != \"\" && !strings.HasSuffix(pathStr, \"\/\") {\n\t\tpathStr += \"\/\"\n\t}\n\tfor _, item := range entryArray {\n\t\tpathDir = fmt.Sprintf(\"%s%s\", pathStr, item.Name)\n\t\tif item.Type == 0 {\n\t\t\t*nameListIn = append(*nameListIn, pathDir)\n\t\t} else {\n\t\t\terr = ftpList(pathDir, ftpConntion, model, nameListIn)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ ftp退出\n\/\/ create by gloomy 2017-4-14 11:25:04\nfunc FtpExit(ftpConntion *ftp.ServerConn) {\n\tif ftpConntion != nil {\n\t\tftpConntion.Quit()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * This file is part of the KubeVirt project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Copyright 2017 Red Hat, Inc.\n *\n *\/\n\npackage tests_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"testing\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t\"github.com\/onsi\/ginkgo\/config\"\n\tginkgo_reporters \"github.com\/onsi\/ginkgo\/reporters\"\n\n\t\"kubevirt.io\/kubevirt\/tests\/flags\"\n\n\t\"kubevirt.io\/kubevirt\/tests\/reporter\"\n\n\t\"kubevirt.io\/kubevirt\/tests\"\n\tqe_reporters \"kubevirt.io\/qe-tools\/pkg\/ginkgo-reporters\"\n\n\t_ \"kubevirt.io\/kubevirt\/tests\/launchsecurity\"\n\t_ \"kubevirt.io\/kubevirt\/tests\/monitoring\"\n\t_ \"kubevirt.io\/kubevirt\/tests\/network\"\n\t_ \"kubevirt.io\/kubevirt\/tests\/numa\"\n\t_ \"kubevirt.io\/kubevirt\/tests\/performance\"\n\t_ \"kubevirt.io\/kubevirt\/tests\/realtime\"\n\t_ \"kubevirt.io\/kubevirt\/tests\/storage\"\n\t_ \"kubevirt.io\/kubevirt\/tests\/virtoperatorbasic\"\n)\n\nvar justAfterEachReporter = []reporter.JustAfterEachReporter{}\n\nfunc TestTests(t *testing.T) {\n\tflags.NormalizeFlags()\n\ttests.CalculateNamespaces()\n\tmaxFails := getMaxFailsFromEnv()\n\tartifactsPath := path.Join(flags.ArtifactsDir, \"k8s-reporter\")\n\tjunitOutput := path.Join(flags.ArtifactsDir, \"junit.functest.xml\")\n\tif qe_reporters.JunitOutput != \"\" {\n\t\tjunitOutput = qe_reporters.JunitOutput\n\t}\n\tif config.GinkgoConfig.ParallelTotal > 1 {\n\t\tartifactsPath = path.Join(artifactsPath, strconv.Itoa(config.GinkgoConfig.ParallelNode))\n\t\tjunitOutput = path.Join(flags.ArtifactsDir, fmt.Sprintf(\"partial.junit.functest.%d.xml\", config.GinkgoConfig.ParallelNode))\n\t}\n\n\toutputEnricherReporter := reporter.NewCapturedOutputEnricher(\n\t\tginkgo_reporters.NewJUnitReporter(junitOutput),\n\t)\n\tk8sReporter := reporter.NewKubernetesReporter(artifactsPath, maxFails)\n\tjustAfterEachReporter = append(justAfterEachReporter, outputEnricherReporter, k8sReporter)\n\treporters := []Reporter{\n\t\toutputEnricherReporter,\n\t\tk8sReporter,\n\t}\n\tif qe_reporters.Polarion.Run {\n\t\treporters = append(reporters, &qe_reporters.Polarion)\n\t}\n\n\tRunSpecsWithDefaultAndCustomReporters(t, \"Tests Suite\", reporters)\n}\n\nvar _ = SynchronizedBeforeSuite(tests.SynchronizedBeforeTestSetup, tests.BeforeTestSuitSetup)\n\nvar _ = SynchronizedAfterSuite(tests.AfterTestSuitCleanup, tests.SynchronizedAfterTestSuiteCleanup)\n\nfunc getMaxFailsFromEnv() int {\n\tmaxFailsEnv := os.Getenv(\"REPORTER_MAX_FAILS\")\n\tif maxFailsEnv == \"\" {\n\t\treturn 10\n\t}\n\n\tmaxFails, err := strconv.Atoi(maxFailsEnv)\n\tif err != nil { \/\/ if the variable is set with a non int value\n\t\tfmt.Println(\"Invalid REPORTER_MAX_FAILS variable, defaulting to 10\")\n\t\treturn 10\n\t}\n\n\treturn maxFails\n}\n\n\/\/ Collect info directly after each `It` execution with our reporters\n\/\/ to collect the state directly after the spec.\nvar _ = JustAfterEach(func() {\n\tfor _, reporter := range justAfterEachReporter {\n\t\treporter.JustAfterEach(CurrentGinkgoTestDescription())\n\t}\n})\n<commit_msg>Pass through docker prefix and tag for vms-generator<commit_after>\/*\n * This file is part of the KubeVirt project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Copyright 2017 Red Hat, Inc.\n *\n *\/\n\npackage tests_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"testing\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t\"github.com\/onsi\/ginkgo\/config\"\n\tginkgo_reporters \"github.com\/onsi\/ginkgo\/reporters\"\n\n\t\"kubevirt.io\/kubevirt\/tests\/flags\"\n\n\t\"kubevirt.io\/kubevirt\/tests\/reporter\"\n\n\t\"kubevirt.io\/kubevirt\/tests\"\n\tqe_reporters \"kubevirt.io\/qe-tools\/pkg\/ginkgo-reporters\"\n\n\tvmsgeneratorutils \"kubevirt.io\/kubevirt\/tools\/vms-generator\/utils\"\n\n\t_ \"kubevirt.io\/kubevirt\/tests\/launchsecurity\"\n\t_ \"kubevirt.io\/kubevirt\/tests\/monitoring\"\n\t_ \"kubevirt.io\/kubevirt\/tests\/network\"\n\t_ \"kubevirt.io\/kubevirt\/tests\/numa\"\n\t_ \"kubevirt.io\/kubevirt\/tests\/performance\"\n\t_ \"kubevirt.io\/kubevirt\/tests\/realtime\"\n\t_ \"kubevirt.io\/kubevirt\/tests\/storage\"\n\t_ \"kubevirt.io\/kubevirt\/tests\/virtoperatorbasic\"\n)\n\nvar justAfterEachReporter = []reporter.JustAfterEachReporter{}\n\nfunc TestTests(t *testing.T) {\n\tflags.NormalizeFlags()\n\ttests.CalculateNamespaces()\n\tmaxFails := getMaxFailsFromEnv()\n\tartifactsPath := path.Join(flags.ArtifactsDir, \"k8s-reporter\")\n\tjunitOutput := path.Join(flags.ArtifactsDir, \"junit.functest.xml\")\n\tif qe_reporters.JunitOutput != \"\" {\n\t\tjunitOutput = qe_reporters.JunitOutput\n\t}\n\tif config.GinkgoConfig.ParallelTotal > 1 {\n\t\tartifactsPath = path.Join(artifactsPath, strconv.Itoa(config.GinkgoConfig.ParallelNode))\n\t\tjunitOutput = path.Join(flags.ArtifactsDir, fmt.Sprintf(\"partial.junit.functest.%d.xml\", config.GinkgoConfig.ParallelNode))\n\t}\n\n\toutputEnricherReporter := reporter.NewCapturedOutputEnricher(\n\t\tginkgo_reporters.NewJUnitReporter(junitOutput),\n\t)\n\tk8sReporter := reporter.NewKubernetesReporter(artifactsPath, maxFails)\n\tjustAfterEachReporter = append(justAfterEachReporter, outputEnricherReporter, k8sReporter)\n\treporters := []Reporter{\n\t\toutputEnricherReporter,\n\t\tk8sReporter,\n\t}\n\tif qe_reporters.Polarion.Run {\n\t\treporters = append(reporters, &qe_reporters.Polarion)\n\t}\n\n\tvmsgeneratorutils.DockerPrefix = flags.KubeVirtUtilityRepoPrefix\n\tvmsgeneratorutils.DockerTag = flags.KubeVirtVersionTag\n\n\tRunSpecsWithDefaultAndCustomReporters(t, \"Tests Suite\", reporters)\n}\n\nvar _ = SynchronizedBeforeSuite(tests.SynchronizedBeforeTestSetup, tests.BeforeTestSuitSetup)\n\nvar _ = SynchronizedAfterSuite(tests.AfterTestSuitCleanup, tests.SynchronizedAfterTestSuiteCleanup)\n\nfunc getMaxFailsFromEnv() int {\n\tmaxFailsEnv := os.Getenv(\"REPORTER_MAX_FAILS\")\n\tif maxFailsEnv == \"\" {\n\t\treturn 10\n\t}\n\n\tmaxFails, err := strconv.Atoi(maxFailsEnv)\n\tif err != nil { \/\/ if the variable is set with a non int value\n\t\tfmt.Println(\"Invalid REPORTER_MAX_FAILS variable, defaulting to 10\")\n\t\treturn 10\n\t}\n\n\treturn maxFails\n}\n\n\/\/ Collect info directly after each `It` execution with our reporters\n\/\/ to collect the state directly after the spec.\nvar _ = JustAfterEach(func() {\n\tfor _, reporter := range justAfterEachReporter {\n\t\treporter.JustAfterEach(CurrentGinkgoTestDescription())\n\t}\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017, TCN Inc.\n\/\/ All rights reserved.\n\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are\n\/\/ met:\n\n\/\/ * Redistributions of source code must retain the above copyright\n\/\/ notice, this list of conditions and the following disclaimer.\n\/\/ * Redistributions in binary form must reproduce the above\n\/\/ copyright notice, this list of conditions and the following disclaimer\n\/\/ in the documentation and\/or other materials provided with the\n\/\/ distribution.\n\/\/ * Neither the name of TCN Inc. nor the names of its\n\/\/ contributors may be used to endorse or promote products derived from\n\/\/ this software without specific prior written permission.\n\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\/\/ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n\/\/ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n\/\/ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n\/\/ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n\/\/ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n\/\/ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n\/\/ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n\/\/ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\/\/ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\/\/ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage generator\n\nimport (\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"os\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/golang\/protobuf\/protoc-gen-go\/descriptor\"\n\t\"github.com\/tcncloud\/protoc-gen-persist\/persist\"\n)\n\ntype FileStruct struct {\n\tDesc *descriptor.FileDescriptorProto\n\tImportList *Imports\n\tDependency bool \/\/ if is dependency\n\tStructures *StructList \/\/ all structures in the file\n\tAllStructures *StructList \/\/ all structures in all the files\n\tServiceList *Services\n}\n\nfunc NewFileStruct(desc *descriptor.FileDescriptorProto, allStructs *StructList, dependency bool) *FileStruct {\n\tret := &FileStruct{\n\t\tDesc: desc,\n\t\tImportList: EmptyImportList(),\n\t\tStructures: &StructList{},\n\t\tServiceList: &Services{},\n\t\tAllStructures: allStructs,\n\t\tDependency: dependency,\n\t}\n\treturn ret\n}\n\nfunc (f *FileStruct) GetOrigName() string {\n\treturn f.Desc.GetName()\n}\n\nfunc (f *FileStruct) GetPackageName() string {\n\treturn f.GetImplPackage()\n\t\/\/ return f.Desc.GetPackage()\n}\n\n\/\/ extract the persist.package file option\n\/\/ or return \"\" as default\nfunc (f *FileStruct) GetPersistPackageOption() string {\n\tif f.Desc == nil || f.Desc.GetOptions() == nil {\n\t\treturn \"\"\n\t}\n\tif proto.HasExtension(f.Desc.GetOptions(), persist.E_Package) {\n\t\tpkg, err := proto.GetExtension(f.Desc.GetOptions(), persist.E_Package)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Debug(\"Error\")\n\t\t\treturn \"\"\n\t\t}\n\t\tlogrus.WithField(\"pkg\", *pkg.(*string)).Info(\"Package\")\n\t\treturn *pkg.(*string)\n\t}\n\tlogrus.WithField(\"File Options\", f.Desc.GetOptions()).Debug(\"file options\")\n\treturn \"\"\n}\n\nfunc (f *FileStruct) GetImplFileName() string {\n\t_, file := filepath.Split(f.Desc.GetName())\n\treturn strings.Join([]string{\n\t\tf.GetImplDir(),\n\t\tstring(os.PathSeparator),\n\t\tstrings.Replace(file, \".proto\", \".persist.go\", -1),\n\t}, \"\")\n}\n\nfunc (f *FileStruct) GetImplDir() string {\n\tpkg := f.GetPersistPackageOption()\n\tif pkg == \"\" {\n\t\t\/\/ if the persist.package option is not present we will use\n\t\t\/\/ go_pacakge\n\t\tif f.Desc.GetOptions().GetGoPackage() == \"\" {\n\t\t\tpkg = f.Desc.GetOptions().GetGoPackage()\n\t\t} else {\n\t\t\t\/\/ last resort\n\t\t\tpkg = f.Desc.GetPackage()\n\t\t}\n\t}\n\t\/\/ process pkg\n\tif strings.Contains(pkg, \";\") {\n\t\t\/\/ we need to split by \";\"\n\t\tp := strings.Split(pkg, \";\")\n\t\tif len(p) > 2 {\n\t\t\tlogrus.WithField(\"persist package\", pkg).Panic(\"Invalid persist package\")\n\t\t}\n\t\treturn p[0]\n\t}\n\treturn pkg\n}\n\nfunc (f *FileStruct) GetImplPackage() string {\n\tpkg := f.GetPersistPackageOption()\n\tif pkg == \"\" {\n\t\t\/\/ if the persist.package option is not present we will use\n\t\t\/\/ go_pacakge\n\t\tif f.Desc.GetOptions().GetGoPackage() == \"\" {\n\t\t\tpkg = f.Desc.GetOptions().GetGoPackage()\n\t\t} else {\n\t\t\t\/\/ last resort\n\t\t\tpkg = f.Desc.GetPackage()\n\t\t}\n\t}\n\t\/\/ process pkg\n\tif strings.Contains(pkg, \";\") {\n\t\t\/\/ we need to split by \";\"\n\t\tp := strings.Split(pkg, \";\")\n\t\tif len(p) > 2 {\n\t\t\tlogrus.WithField(\"persist package\", pkg).Panic(\"Invalid persist package\")\n\t\t}\n\t\treturn p[len(p)-1]\n\t}\n\n\tif strings.Contains(pkg, \"\/\") {\n\t\t\/\/ return package after last \/\n\t\tp := strings.Split(pkg, \"\/\")\n\t\treturn p[len(p)-1]\n\t}\n\treturn strings.Replace(pkg, \".\", \"_\", -1)\n}\n\n\/\/ return the computed persist file taking in consideration\n\/\/ the persist.package option and original file name\nfunc (f *FileStruct) GetPersistFile() string {\n\treturn \"\"\n}\n\nfunc (f *FileStruct) GetFileName() string {\n\treturn strings.Replace(f.Desc.GetName(), \".proto\", \".persist.go\", -1)\n}\n\nfunc (f *FileStruct) GetServices() *Services {\n\treturn f.ServiceList\n}\n\nfunc (f *FileStruct) GetFullGoPackage() string {\n\tif f.Desc.Options != nil && f.Desc.GetOptions().GoPackage != nil {\n\t\tswitch {\n\t\tcase strings.Contains(f.Desc.GetOptions().GetGoPackage(), \";\"):\n\t\t\tidx := strings.Index(f.Desc.GetOptions().GetGoPackage(), \";\")\n\t\t\treturn f.Desc.GetOptions().GetGoPackage()[0:idx]\n\t\tdefault:\n\t\t\treturn f.Desc.GetOptions().GetGoPackage()\n\t\t}\n\n\t} else {\n\t\treturn strings.Replace(f.Desc.GetPackage(), \".\", \"_\", -1)\n\t}\n}\n\nfunc (f *FileStruct) DifferentImpl() bool {\n\treturn f.GetFullGoPackage() != f.GetImplPackage()\n}\n\nfunc (f *FileStruct) GetGoPackage() string {\n\tif f.Desc.Options != nil && f.Desc.GetOptions().GoPackage != nil {\n\t\tswitch {\n\t\tcase strings.Contains(f.Desc.GetOptions().GetGoPackage(), \";\"):\n\t\t\tidx := strings.LastIndex(f.Desc.GetOptions().GetGoPackage(), \";\")\n\t\t\treturn f.Desc.GetOptions().GetGoPackage()[idx+1:]\n\t\tcase strings.Contains(f.Desc.GetOptions().GetGoPackage(), \"\/\"):\n\t\t\tidx := strings.LastIndex(f.Desc.GetOptions().GetGoPackage(), \"\/\")\n\t\t\treturn f.Desc.GetOptions().GetGoPackage()[idx+1:]\n\t\tdefault:\n\t\t\treturn f.Desc.GetOptions().GetGoPackage()\n\t\t}\n\n\t} else {\n\t\treturn strings.Replace(f.Desc.GetPackage(), \".\", \"_\", -1)\n\t}\n}\n\nfunc (f *FileStruct) GetGoPath() string {\n\tif f.Desc.Options != nil && f.Desc.GetOptions().GoPackage != nil {\n\t\tswitch {\n\t\tcase strings.Contains(f.Desc.GetOptions().GetGoPackage(), \";\"):\n\t\t\tidx := strings.LastIndex(f.Desc.GetOptions().GetGoPackage(), \";\")\n\t\t\treturn f.Desc.GetOptions().GetGoPackage()[0:idx]\n\t\tcase strings.Contains(f.Desc.GetOptions().GetGoPackage(), \"\/\"):\n\t\t\treturn f.Desc.GetOptions().GetGoPackage()\n\t\tdefault:\n\t\t\treturn f.Desc.GetOptions().GetGoPackage()\n\t\t}\n\n\t} else {\n\t\treturn strings.Replace(f.Desc.GetPackage(), \".\", \"_\", -1)\n\t}\n}\n\nfunc (f *FileStruct) ProcessImportsForType(name string) {\n\ttyp := f.AllStructures.GetStructByProtoName(name)\n\tif typ != nil {\n\t\tfor _, file := range *typ.GetImportedFiles() {\n\t\t\tif f.GetPackageName() != file.GetPackageName() {\n\t\t\t\tf.ImportList.GetOrAddImport(file.GetGoPackage(), file.GetGoPath())\n\t\t\t}\n\t\t}\n\t} else {\n\t\tlogrus.WithField(\"all structures\", f.AllStructures).Fatalf(\"Can't find structure %s!\", name)\n\t}\n}\n\nfunc (f *FileStruct) ProcessImports() {\n\tlogrus.Debug(\"processing imports for file\")\n\tfor _, srv := range *f.ServiceList {\n\t\tlogrus.Debug(\"is service enabled? %t\", srv.IsServiceEnabled())\n\t\tif srv.IsServiceEnabled() {\n\t\t\tsrv.ProcessImports()\n\t\t\tfor _, m := range *srv.Methods {\n\t\t\t\tf.ProcessImportsForType(m.Desc.GetInputType())\n\t\t\t\tf.ProcessImportsForType(m.Desc.GetOutputType())\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (f *FileStruct) Process() error {\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"GetFileName()\": f.GetFileName(),\n\t}).Debug(\"processing file\")\n\t\/\/ collect file defined messages\n\tfor _, m := range f.Desc.GetMessageType() {\n\t\ts := f.AllStructures.AddMessage(m, nil, f.GetPackageName(), f)\n\t\tf.Structures.Append(s)\n\t}\n\t\/\/ collect file defined enums\n\tfor _, e := range f.Desc.GetEnumType() {\n\t\ts := f.AllStructures.AddEnum(e, nil, f.GetPackageName(), f)\n\t\tf.Structures.Append(s)\n\t}\n\n\tfor _, s := range f.Desc.GetService() {\n\t\tf.ServiceList.AddService(f.GetPackageName(), s, f.AllStructures, f)\n\t}\n\t\/\/return f.ServiceList.Process()\n\treturn nil\n\n}\n\nfunc (f *FileStruct) Generate() ([]byte, error) {\n\terr := f.ServiceList.PreGenerate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogrus.WithField(\"imports\", f.ImportList).Debug(\"import list\")\n\treturn ExecuteFileTemplate(f), nil\n}\n\n\/\/ FileList ----------------\n\ntype FileList []*FileStruct\n\nfunc NewFileList() *FileList {\n\treturn &FileList{}\n}\n\nfunc (fl *FileList) FindFile(desc *descriptor.FileDescriptorProto) *FileStruct {\n\tfor _, f := range *fl {\n\t\tif f.Desc.GetName() == desc.GetName() {\n\t\t\treturn f\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (fl *FileList) GetOrCreateFile(desc *descriptor.FileDescriptorProto, allStructs *StructList, dependency bool) *FileStruct {\n\tif f := fl.FindFile(desc); f != nil {\n\t\treturn f\n\t}\n\tf := NewFileStruct(desc, allStructs, dependency)\n\t*fl = append(*fl, f)\n\treturn f\n}\n\nfunc (fl *FileList) Process() error {\n\tfor _, file := range *fl {\n\t\terr := file.Process()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (fl *FileList) Append(file *FileStruct) {\n\t*fl = append(*fl, file)\n}\n<commit_msg>removed info log<commit_after>\/\/ Copyright 2017, TCN Inc.\n\/\/ All rights reserved.\n\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are\n\/\/ met:\n\n\/\/ * Redistributions of source code must retain the above copyright\n\/\/ notice, this list of conditions and the following disclaimer.\n\/\/ * Redistributions in binary form must reproduce the above\n\/\/ copyright notice, this list of conditions and the following disclaimer\n\/\/ in the documentation and\/or other materials provided with the\n\/\/ distribution.\n\/\/ * Neither the name of TCN Inc. nor the names of its\n\/\/ contributors may be used to endorse or promote products derived from\n\/\/ this software without specific prior written permission.\n\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\/\/ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n\/\/ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n\/\/ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n\/\/ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n\/\/ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n\/\/ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n\/\/ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n\/\/ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\/\/ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\/\/ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage generator\n\nimport (\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"os\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/golang\/protobuf\/protoc-gen-go\/descriptor\"\n\t\"github.com\/tcncloud\/protoc-gen-persist\/persist\"\n)\n\ntype FileStruct struct {\n\tDesc *descriptor.FileDescriptorProto\n\tImportList *Imports\n\tDependency bool \/\/ if is dependency\n\tStructures *StructList \/\/ all structures in the file\n\tAllStructures *StructList \/\/ all structures in all the files\n\tServiceList *Services\n}\n\nfunc NewFileStruct(desc *descriptor.FileDescriptorProto, allStructs *StructList, dependency bool) *FileStruct {\n\tret := &FileStruct{\n\t\tDesc: desc,\n\t\tImportList: EmptyImportList(),\n\t\tStructures: &StructList{},\n\t\tServiceList: &Services{},\n\t\tAllStructures: allStructs,\n\t\tDependency: dependency,\n\t}\n\treturn ret\n}\n\nfunc (f *FileStruct) GetOrigName() string {\n\treturn f.Desc.GetName()\n}\n\nfunc (f *FileStruct) GetPackageName() string {\n\treturn f.GetImplPackage()\n\t\/\/ return f.Desc.GetPackage()\n}\n\n\/\/ extract the persist.package file option\n\/\/ or return \"\" as default\nfunc (f *FileStruct) GetPersistPackageOption() string {\n\tif f.Desc == nil || f.Desc.GetOptions() == nil {\n\t\treturn \"\"\n\t}\n\tif proto.HasExtension(f.Desc.GetOptions(), persist.E_Package) {\n\t\tpkg, err := proto.GetExtension(f.Desc.GetOptions(), persist.E_Package)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Debug(\"Error\")\n\t\t\treturn \"\"\n\t\t}\n\t\t\/\/logrus.WithField(\"pkg\", *pkg.(*string)).Info(\"Package\")\n\t\treturn *pkg.(*string)\n\t}\n\tlogrus.WithField(\"File Options\", f.Desc.GetOptions()).Debug(\"file options\")\n\treturn \"\"\n}\n\nfunc (f *FileStruct) GetImplFileName() string {\n\t_, file := filepath.Split(f.Desc.GetName())\n\treturn strings.Join([]string{\n\t\tf.GetImplDir(),\n\t\tstring(os.PathSeparator),\n\t\tstrings.Replace(file, \".proto\", \".persist.go\", -1),\n\t}, \"\")\n}\n\nfunc (f *FileStruct) GetImplDir() string {\n\tpkg := f.GetPersistPackageOption()\n\tif pkg == \"\" {\n\t\t\/\/ if the persist.package option is not present we will use\n\t\t\/\/ go_pacakge\n\t\tif f.Desc.GetOptions().GetGoPackage() == \"\" {\n\t\t\tpkg = f.Desc.GetOptions().GetGoPackage()\n\t\t} else {\n\t\t\t\/\/ last resort\n\t\t\tpkg = f.Desc.GetPackage()\n\t\t}\n\t}\n\t\/\/ process pkg\n\tif strings.Contains(pkg, \";\") {\n\t\t\/\/ we need to split by \";\"\n\t\tp := strings.Split(pkg, \";\")\n\t\tif len(p) > 2 {\n\t\t\tlogrus.WithField(\"persist package\", pkg).Panic(\"Invalid persist package\")\n\t\t}\n\t\treturn p[0]\n\t}\n\treturn pkg\n}\n\nfunc (f *FileStruct) GetImplPackage() string {\n\tpkg := f.GetPersistPackageOption()\n\tif pkg == \"\" {\n\t\t\/\/ if the persist.package option is not present we will use\n\t\t\/\/ go_pacakge\n\t\tif f.Desc.GetOptions().GetGoPackage() == \"\" {\n\t\t\tpkg = f.Desc.GetOptions().GetGoPackage()\n\t\t} else {\n\t\t\t\/\/ last resort\n\t\t\tpkg = f.Desc.GetPackage()\n\t\t}\n\t}\n\t\/\/ process pkg\n\tif strings.Contains(pkg, \";\") {\n\t\t\/\/ we need to split by \";\"\n\t\tp := strings.Split(pkg, \";\")\n\t\tif len(p) > 2 {\n\t\t\tlogrus.WithField(\"persist package\", pkg).Panic(\"Invalid persist package\")\n\t\t}\n\t\treturn p[len(p)-1]\n\t}\n\n\tif strings.Contains(pkg, \"\/\") {\n\t\t\/\/ return package after last \/\n\t\tp := strings.Split(pkg, \"\/\")\n\t\treturn p[len(p)-1]\n\t}\n\treturn strings.Replace(pkg, \".\", \"_\", -1)\n}\n\n\/\/ return the computed persist file taking in consideration\n\/\/ the persist.package option and original file name\nfunc (f *FileStruct) GetPersistFile() string {\n\treturn \"\"\n}\n\nfunc (f *FileStruct) GetFileName() string {\n\treturn strings.Replace(f.Desc.GetName(), \".proto\", \".persist.go\", -1)\n}\n\nfunc (f *FileStruct) GetServices() *Services {\n\treturn f.ServiceList\n}\n\nfunc (f *FileStruct) GetFullGoPackage() string {\n\tif f.Desc.Options != nil && f.Desc.GetOptions().GoPackage != nil {\n\t\tswitch {\n\t\tcase strings.Contains(f.Desc.GetOptions().GetGoPackage(), \";\"):\n\t\t\tidx := strings.Index(f.Desc.GetOptions().GetGoPackage(), \";\")\n\t\t\treturn f.Desc.GetOptions().GetGoPackage()[0:idx]\n\t\tdefault:\n\t\t\treturn f.Desc.GetOptions().GetGoPackage()\n\t\t}\n\n\t} else {\n\t\treturn strings.Replace(f.Desc.GetPackage(), \".\", \"_\", -1)\n\t}\n}\n\nfunc (f *FileStruct) DifferentImpl() bool {\n\treturn f.GetFullGoPackage() != f.GetImplPackage()\n}\n\nfunc (f *FileStruct) GetGoPackage() string {\n\tif f.Desc.Options != nil && f.Desc.GetOptions().GoPackage != nil {\n\t\tswitch {\n\t\tcase strings.Contains(f.Desc.GetOptions().GetGoPackage(), \";\"):\n\t\t\tidx := strings.LastIndex(f.Desc.GetOptions().GetGoPackage(), \";\")\n\t\t\treturn f.Desc.GetOptions().GetGoPackage()[idx+1:]\n\t\tcase strings.Contains(f.Desc.GetOptions().GetGoPackage(), \"\/\"):\n\t\t\tidx := strings.LastIndex(f.Desc.GetOptions().GetGoPackage(), \"\/\")\n\t\t\treturn f.Desc.GetOptions().GetGoPackage()[idx+1:]\n\t\tdefault:\n\t\t\treturn f.Desc.GetOptions().GetGoPackage()\n\t\t}\n\n\t} else {\n\t\treturn strings.Replace(f.Desc.GetPackage(), \".\", \"_\", -1)\n\t}\n}\n\nfunc (f *FileStruct) GetGoPath() string {\n\tif f.Desc.Options != nil && f.Desc.GetOptions().GoPackage != nil {\n\t\tswitch {\n\t\tcase strings.Contains(f.Desc.GetOptions().GetGoPackage(), \";\"):\n\t\t\tidx := strings.LastIndex(f.Desc.GetOptions().GetGoPackage(), \";\")\n\t\t\treturn f.Desc.GetOptions().GetGoPackage()[0:idx]\n\t\tcase strings.Contains(f.Desc.GetOptions().GetGoPackage(), \"\/\"):\n\t\t\treturn f.Desc.GetOptions().GetGoPackage()\n\t\tdefault:\n\t\t\treturn f.Desc.GetOptions().GetGoPackage()\n\t\t}\n\n\t} else {\n\t\treturn strings.Replace(f.Desc.GetPackage(), \".\", \"_\", -1)\n\t}\n}\n\nfunc (f *FileStruct) ProcessImportsForType(name string) {\n\ttyp := f.AllStructures.GetStructByProtoName(name)\n\tif typ != nil {\n\t\tfor _, file := range *typ.GetImportedFiles() {\n\t\t\tif f.GetPackageName() != file.GetPackageName() {\n\t\t\t\tf.ImportList.GetOrAddImport(file.GetGoPackage(), file.GetGoPath())\n\t\t\t}\n\t\t}\n\t} else {\n\t\tlogrus.WithField(\"all structures\", f.AllStructures).Fatalf(\"Can't find structure %s!\", name)\n\t}\n}\n\nfunc (f *FileStruct) ProcessImports() {\n\tlogrus.Debug(\"processing imports for file\")\n\tfor _, srv := range *f.ServiceList {\n\t\tlogrus.Debug(\"is service enabled? %t\", srv.IsServiceEnabled())\n\t\tif srv.IsServiceEnabled() {\n\t\t\tsrv.ProcessImports()\n\t\t\tfor _, m := range *srv.Methods {\n\t\t\t\tf.ProcessImportsForType(m.Desc.GetInputType())\n\t\t\t\tf.ProcessImportsForType(m.Desc.GetOutputType())\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (f *FileStruct) Process() error {\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"GetFileName()\": f.GetFileName(),\n\t}).Debug(\"processing file\")\n\t\/\/ collect file defined messages\n\tfor _, m := range f.Desc.GetMessageType() {\n\t\ts := f.AllStructures.AddMessage(m, nil, f.GetPackageName(), f)\n\t\tf.Structures.Append(s)\n\t}\n\t\/\/ collect file defined enums\n\tfor _, e := range f.Desc.GetEnumType() {\n\t\ts := f.AllStructures.AddEnum(e, nil, f.GetPackageName(), f)\n\t\tf.Structures.Append(s)\n\t}\n\n\tfor _, s := range f.Desc.GetService() {\n\t\tf.ServiceList.AddService(f.GetPackageName(), s, f.AllStructures, f)\n\t}\n\t\/\/return f.ServiceList.Process()\n\treturn nil\n\n}\n\nfunc (f *FileStruct) Generate() ([]byte, error) {\n\terr := f.ServiceList.PreGenerate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogrus.WithField(\"imports\", f.ImportList).Debug(\"import list\")\n\treturn ExecuteFileTemplate(f), nil\n}\n\n\/\/ FileList ----------------\n\ntype FileList []*FileStruct\n\nfunc NewFileList() *FileList {\n\treturn &FileList{}\n}\n\nfunc (fl *FileList) FindFile(desc *descriptor.FileDescriptorProto) *FileStruct {\n\tfor _, f := range *fl {\n\t\tif f.Desc.GetName() == desc.GetName() {\n\t\t\treturn f\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (fl *FileList) GetOrCreateFile(desc *descriptor.FileDescriptorProto, allStructs *StructList, dependency bool) *FileStruct {\n\tif f := fl.FindFile(desc); f != nil {\n\t\treturn f\n\t}\n\tf := NewFileStruct(desc, allStructs, dependency)\n\t*fl = append(*fl, f)\n\treturn f\n}\n\nfunc (fl *FileList) Process() error {\n\tfor _, file := range *fl {\n\t\terr := file.Process()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (fl *FileList) Append(file *FileStruct) {\n\t*fl = append(*fl, file)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n)\n\nfunc CreateBasePath(filename string) error {\n\tbasePath := filepath.Dir(filename)\n\n\tif err := os.MkdirAll(basePath, 0700); err != nil {\n\t\treturn errors.New(fmt.Sprintf(\"Creating base path %s: %s\", basePath, err))\n\t}\n\n\treturn nil\n}\n\nfunc GetImportBasePath(absolutePath string) (string, error) {\n\tn := strings.Index(absolutePath, \"\/github.com\/\")\n\tif n == -1 {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"Path github.com not found in %s\", absolutePath))\n\t}\n\n\treturn absolutePath[n+1:], nil\n}\n\nfunc GetCommonImportPath(outputDir string, serviceName string) (string, error) {\n\tbaseSourceDir := outputDir + \"\/\"\n\n\tabsPath, err := filepath.Abs(baseSourceDir)\n\tif err != nil {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"When get abs(%s): %s\", baseSourceDir, err))\n\t}\n\n\timportBasePath, err := GetImportBasePath(absPath)\n\tif err != nil {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"When getting import path from %s: %s\", absPath, err))\n\t}\n\n\tcommonImportPath := importBasePath + \"\/\" + serviceName + \"_common\"\n\n\treturn commonImportPath, nil\n}\n\n\/\/ Conversion based on this table: https:\/\/github.com\/OAI\/OpenAPI-Specification\/blob\/master\/versions\/2.0.md#data-types\n\/\/\n\/\/\tCommon Name\t\ttype\t\t\tformat\t\tComments\n\/\/\t-------------------------------------------------\n\/\/\tinteger\t\t\tinteger\t\tint32\t\tsigned 32 bits\n\/\/\tlong\t\t\t\tinteger\t\tint64\t\tsigned 64 bits\n\/\/\tfloat\t\t\tnumber\t\tfloat\n\/\/\tdouble\t\t\tnumber\t\tdouble\n\/\/\tstring\t\t\tstring\n\/\/\tbyte\t\t\t\tstring\t\tbyte\t\t\tbase64 encoded characters\n\/\/\tbinary\t\t\tstring\t\tbinary\t\tany sequence of octets\n\/\/\tboolean\t\t\tboolean\n\/\/\tdate\t\t\t\tstring\t\tdate\t\t\tAs defined by full-date - RFC3339\n\/\/\tdateTime\t\t\tstring\t\tdate-time\tAs defined by date-time - RFC3339\n\/\/\tpassword\t\t\tstring\t\tpassword\t\tUsed to hint UIs the input needs to be obscured.\n\/\/\nfunc ToGolangType(swaggerType string, swaggerFormat string) string {\n\tgoType := map[string]string{\n\t\t\"integer|int32\": \"int32\",\n\t\t\"integer|int64\": \"int64\",\n\t\t\"integer|\": \"int64\",\n\t\t\"number|float\": \"float32\",\n\t\t\"number|double\": \"float64\",\n\t\t\"number|\": \"float64\",\n\t\t\"string|\": \"string\",\n\t\t\"string|byte\": \"undefined\",\n\t\t\"string|binary\": \"undefined\",\n\t\t\"boolean|\": \"bool\",\n\t\t\"string|date\": \"time.Time\",\n\t\t\"string|date-time\": \"time.Time\",\n\t\t\"string|password\": \"undefined\",\n\t}\n\n\tresult, ok := goType[swaggerType+\"|\"+swaggerFormat]\n\tif ok == false {\n\t\tresult = \"error\"\n\t}\n\n\treturn result\n}\n\nfunc GetPathWithoutParameter(path string) string {\n\t\/\/ Input example: \"\/get_method_4\/{par1}\"\n\t\/\/ Result: \"\/get_method_4\"\n\n\tn := strings.Index(path, \"\/{\")\n\tif n == -1 {\n\t\treturn path\n\t}\n\n\tresult := path[:n]\n\n\treturn result\n}\n<commit_msg>Function to convert CamelCase to snake_format.<commit_after>package main\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n)\n\nfunc CreateBasePath(filename string) error {\n\tbasePath := filepath.Dir(filename)\n\n\tif err := os.MkdirAll(basePath, 0700); err != nil {\n\t\treturn errors.New(fmt.Sprintf(\"Creating base path %s: %s\", basePath, err))\n\t}\n\n\treturn nil\n}\n\nfunc GetImportBasePath(absolutePath string) (string, error) {\n\tn := strings.Index(absolutePath, \"\/github.com\/\")\n\tif n == -1 {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"Path github.com not found in %s\", absolutePath))\n\t}\n\n\treturn absolutePath[n+1:], nil\n}\n\nfunc GetCommonImportPath(outputDir string, serviceName string) (string, error) {\n\tbaseSourceDir := outputDir + \"\/\"\n\n\tabsPath, err := filepath.Abs(baseSourceDir)\n\tif err != nil {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"When get abs(%s): %s\", baseSourceDir, err))\n\t}\n\n\timportBasePath, err := GetImportBasePath(absPath)\n\tif err != nil {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"When getting import path from %s: %s\", absPath, err))\n\t}\n\n\tcommonImportPath := importBasePath + \"\/\" + serviceName + \"_common\"\n\n\treturn commonImportPath, nil\n}\n\n\/\/ Conversion based on this table: https:\/\/github.com\/OAI\/OpenAPI-Specification\/blob\/master\/versions\/2.0.md#data-types\n\/\/\n\/\/\tCommon Name\t\ttype\t\t\tformat\t\tComments\n\/\/\t-------------------------------------------------\n\/\/\tinteger\t\t\tinteger\t\tint32\t\tsigned 32 bits\n\/\/\tlong\t\t\t\tinteger\t\tint64\t\tsigned 64 bits\n\/\/\tfloat\t\t\tnumber\t\tfloat\n\/\/\tdouble\t\t\tnumber\t\tdouble\n\/\/\tstring\t\t\tstring\n\/\/\tbyte\t\t\t\tstring\t\tbyte\t\t\tbase64 encoded characters\n\/\/\tbinary\t\t\tstring\t\tbinary\t\tany sequence of octets\n\/\/\tboolean\t\t\tboolean\n\/\/\tdate\t\t\t\tstring\t\tdate\t\t\tAs defined by full-date - RFC3339\n\/\/\tdateTime\t\t\tstring\t\tdate-time\tAs defined by date-time - RFC3339\n\/\/\tpassword\t\t\tstring\t\tpassword\t\tUsed to hint UIs the input needs to be obscured.\n\/\/\nfunc ToGolangType(swaggerType string, swaggerFormat string) string {\n\tgoType := map[string]string{\n\t\t\"integer|int32\": \"int32\",\n\t\t\"integer|int64\": \"int64\",\n\t\t\"integer|\": \"int64\",\n\t\t\"number|float\": \"float32\",\n\t\t\"number|double\": \"float64\",\n\t\t\"number|\": \"float64\",\n\t\t\"string|\": \"string\",\n\t\t\"string|byte\": \"undefined\",\n\t\t\"string|binary\": \"undefined\",\n\t\t\"boolean|\": \"bool\",\n\t\t\"string|date\": \"time.Time\",\n\t\t\"string|date-time\": \"time.Time\",\n\t\t\"string|password\": \"undefined\",\n\t}\n\n\tresult, ok := goType[swaggerType+\"|\"+swaggerFormat]\n\tif ok == false {\n\t\tresult = \"error\"\n\t}\n\n\treturn result\n}\n\nfunc GetPathWithoutParameter(path string) string {\n\t\/\/ Input example: \"\/get_method_4\/{par1}\"\n\t\/\/ Result: \"\/get_method_4\"\n\n\tn := strings.Index(path, \"\/{\")\n\tif n == -1 {\n\t\treturn path\n\t}\n\n\tresult := path[:n]\n\n\treturn result\n}\n\nfunc CamelToSnake(text string) string {\n\tresult := \"\"\n\n\tfor _, chr := range text {\n\t\tif chr >= 'A' && chr <= 'Z' && result != \"\" {\n\t\t\tresult += \"_\"\n\t\t\tresult += string(chr + ('a' - 'A'))\n\t\t} else {\n\t\t\tresult += string(chr)\n\t\t}\n\t}\n\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ timeutil provides a set of time utilities including comparisons,\n\/\/ conversion to \"DT8\" int32 and \"DT14\" int64 formats and other\n\/\/ capabilities.\npackage timeutil\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst (\n\tDT14 = \"20060102150405\"\n\tDT6 = \"200601\"\n\tDT8 = \"20060102\"\n)\n\n\/\/ IsGreaterThan compares two times and returns true if the left\n\/\/ time is greater than the right time.\nfunc IsGreaterThan(timeLeft time.Time, timeRight time.Time) bool {\n\tdurDelta := timeLeft.Sub(timeRight)\n\tif durZero, _ := time.ParseDuration(\"0ns\"); durDelta > durZero {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ IsLessThan compares two times and returns true if the left\n\/\/ time is less than the right time.\nfunc IsLessThan(timeLeft time.Time, timeRight time.Time) bool {\n\tdurDelta := timeLeft.Sub(timeRight)\n\tif durZero, _ := time.ParseDuration(\"0ns\"); durDelta < durZero {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ Dt6ForDt14 returns the Dt6 value for Dt14\nfunc Dt6ForDt14(dt14 int64) int32 {\n\tdt16f := float64(dt14) \/ float64(1000000)\n\treturn int32(dt16f)\n}\n\n\/\/ Dt8Now returns Dt8 value for the current time.\nfunc Dt8Now() int32 {\n\treturn Dt8ForTime(time.Now())\n}\n\n\/\/ Dt8ForString returns a Dt8 value given a layout and value to parse to time.Parse.\nfunc Dt8ForString(layout, value string) (int32, error) {\n\tdt8 := int32(0)\n\tt, err := time.Parse(layout, value)\n\tif err == nil {\n\t\tdt8 = Dt8ForTime(t)\n\t}\n\treturn dt8, err\n}\n\n\/\/ Dt8ForInts returns a Dt8 value for year, month, and day.\nfunc Dt8ForInts(yyyy int, mm int, dd int) int32 {\n\tsDt8 := fmt.Sprintf(\"%04d%02d%02d\", yyyy, mm, dd)\n\tiDt8, _ := strconv.ParseInt(sDt8, 10, 32)\n\treturn int32(iDt8)\n}\n\n\/\/ Dt8ForTime returns a Dt8 value given a time struct.\nfunc Dt8ForTime(t time.Time) int32 {\n\tu := t.UTC()\n\ts := u.Format(DT8)\n\tiDt8, _ := strconv.ParseInt(s, 10, 32)\n\treturn int32(iDt8)\n}\n\n\/\/ TimeForDt8 returns a time.Time value given a Dt8 value.\nfunc TimeForDt8(dt8 int32) (time.Time, error) {\n\treturn time.Parse(DT8, strconv.FormatInt(int64(dt8), 10))\n}\n\n\/\/ DurationForNowSubDt8 returns a duartion struct between a Dt8 value and the current time.\nfunc DurationForNowSubDt8(dt8 int32) (time.Duration, error) {\n\tt, err := TimeForDt8(dt8)\n\tif err != nil {\n\t\tvar d time.Duration\n\t\treturn d, err\n\t}\n\tnow := time.Now()\n\treturn now.Sub(t), nil\n}\n\n\/\/ Dt14Now returns a Dt14 value for the current time.\nfunc Dt14Now() int64 {\n\treturn Dt14ForTime(time.Now())\n}\n\n\/\/ Dt14ForString returns a Dt14 value given a layout and value to parse to time.Parse.\nfunc Dt14ForString(layout, value string) (int64, error) {\n\tdt14 := int64(0)\n\tt, err := time.Parse(layout, value)\n\tif err == nil {\n\t\tdt14 = Dt14ForTime(t)\n\t}\n\treturn dt14, err\n}\n\n\/\/ Dt8ForInts returns a Dt8 value for a UTC year, month, day, hour, minute and second.\nfunc Dt14ForInts(yyyy int, mm int, dd int, hr int, mn int, dy int) int64 {\n\tsDt14 := fmt.Sprintf(\"%04d%02d%02d%02d%02d%02d\", yyyy, mm, dd, hr, mn, dy)\n\tiDt14, _ := strconv.ParseInt(sDt14, 10, 64)\n\treturn int64(iDt14)\n}\n\n\/\/ Dt14ForTime returns a Dt14 value given a time.Time struct.\nfunc Dt14ForTime(t time.Time) int64 {\n\tu := t.UTC()\n\ts := u.Format(DT14)\n\tiDt14, _ := strconv.ParseInt(s, 10, 64)\n\treturn int64(iDt14)\n}\n\n\/\/ TimeForDt14 returns a time.Time value given a Dt14 value.\nfunc TimeForDt14(dt14 int64) (time.Time, error) {\n\treturn time.Parse(DT14, strconv.FormatInt(dt14, 10))\n}\n\nfunc FromTo(timeStringSrc string, fromFormat string, toFormat string) (string, error) {\n\tt, err := time.Parse(fromFormat, timeStringSrc)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ttimeStringOut := t.Format(toFormat)\n\treturn timeStringOut, nil\n}\n<commit_msg>update docs<commit_after>\/\/ timeutil provides a set of time utilities including comparisons,\n\/\/ conversion to \"DT8\" int32 and \"DT14\" int64 formats and other\n\/\/ capabilities.\npackage timeutil\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst (\n\tDT14 = \"20060102150405\"\n\tDT6 = \"200601\"\n\tDT8 = \"20060102\"\n)\n\n\/\/ IsGreaterThan compares two times and returns true if the left\n\/\/ time is greater than the right time.\nfunc IsGreaterThan(timeLeft time.Time, timeRight time.Time) bool {\n\tdurDelta := timeLeft.Sub(timeRight)\n\tif durZero, _ := time.ParseDuration(\"0ns\"); durDelta > durZero {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ IsLessThan compares two times and returns true if the left\n\/\/ time is less than the right time.\nfunc IsLessThan(timeLeft time.Time, timeRight time.Time) bool {\n\tdurDelta := timeLeft.Sub(timeRight)\n\tif durZero, _ := time.ParseDuration(\"0ns\"); durDelta < durZero {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ Dt6ForDt14 returns the Dt6 value for Dt14\nfunc Dt6ForDt14(dt14 int64) int32 {\n\tdt16f := float64(dt14) \/ float64(1000000)\n\treturn int32(dt16f)\n}\n\n\/\/ Dt8Now returns Dt8 value for the current time.\nfunc Dt8Now() int32 {\n\treturn Dt8ForTime(time.Now())\n}\n\n\/\/ Dt8ForString returns a Dt8 value given a layout and value to parse to time.Parse.\nfunc Dt8ForString(layout, value string) (int32, error) {\n\tdt8 := int32(0)\n\tt, err := time.Parse(layout, value)\n\tif err == nil {\n\t\tdt8 = Dt8ForTime(t)\n\t}\n\treturn dt8, err\n}\n\n\/\/ Dt8ForInts returns a Dt8 value for year, month, and day.\nfunc Dt8ForInts(yyyy int, mm int, dd int) int32 {\n\tsDt8 := fmt.Sprintf(\"%04d%02d%02d\", yyyy, mm, dd)\n\tiDt8, _ := strconv.ParseInt(sDt8, 10, 32)\n\treturn int32(iDt8)\n}\n\n\/\/ Dt8ForTime returns a Dt8 value given a time struct.\nfunc Dt8ForTime(t time.Time) int32 {\n\tu := t.UTC()\n\ts := u.Format(DT8)\n\tiDt8, _ := strconv.ParseInt(s, 10, 32)\n\treturn int32(iDt8)\n}\n\n\/\/ TimeForDt8 returns a time.Time value given a Dt8 value.\nfunc TimeForDt8(dt8 int32) (time.Time, error) {\n\treturn time.Parse(DT8, strconv.FormatInt(int64(dt8), 10))\n}\n\n\/\/ DurationForNowSubDt8 returns a duartion struct between a Dt8 value and the current time.\nfunc DurationForNowSubDt8(dt8 int32) (time.Duration, error) {\n\tt, err := TimeForDt8(dt8)\n\tif err != nil {\n\t\tvar d time.Duration\n\t\treturn d, err\n\t}\n\tnow := time.Now()\n\treturn now.Sub(t), nil\n}\n\n\/\/ Dt14Now returns a Dt14 value for the current time.\nfunc Dt14Now() int64 {\n\treturn Dt14ForTime(time.Now())\n}\n\n\/\/ Dt14ForString returns a Dt14 value given a layout and value to parse to time.Parse.\nfunc Dt14ForString(layout, value string) (int64, error) {\n\tdt14 := int64(0)\n\tt, err := time.Parse(layout, value)\n\tif err == nil {\n\t\tdt14 = Dt14ForTime(t)\n\t}\n\treturn dt14, err\n}\n\n\/\/ Dt8ForInts returns a Dt8 value for a UTC year, month, day, hour, minute and second.\nfunc Dt14ForInts(yyyy int, mm int, dd int, hr int, mn int, dy int) int64 {\n\tsDt14 := fmt.Sprintf(\"%04d%02d%02d%02d%02d%02d\", yyyy, mm, dd, hr, mn, dy)\n\tiDt14, _ := strconv.ParseInt(sDt14, 10, 64)\n\treturn int64(iDt14)\n}\n\n\/\/ Dt14ForTime returns a Dt14 value given a time.Time struct.\nfunc Dt14ForTime(t time.Time) int64 {\n\tu := t.UTC()\n\ts := u.Format(DT14)\n\tiDt14, _ := strconv.ParseInt(s, 10, 64)\n\treturn int64(iDt14)\n}\n\n\/\/ TimeForDt14 returns a time.Time value given a Dt14 value.\nfunc TimeForDt14(dt14 int64) (time.Time, error) {\n\treturn time.Parse(DT14, strconv.FormatInt(dt14, 10))\n}\n\n\/\/ Reformat a time string from one format to another\nfunc FromTo(timeStringSrc string, fromFormat string, toFormat string) (string, error) {\n\tt, err := time.Parse(fromFormat, timeStringSrc)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ttimeStringOut := t.Format(toFormat)\n\treturn timeStringOut, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build windows\n\npackage tools\n\nimport (\n\t\"os\"\n\t\"syscall\"\n\n\t\"github.com\/avast\/retry-go\"\n)\n\nconst (\n\t\/\/ This is private in [go]\/src\/internal\/syscall\/windows\/syscall_windows.go :(\n\tERROR_SHARING_VIOLATION syscall.Errno = 32\n)\n\nfunc underlyingError(err error) error {\n\tswitch err := err.(type) {\n\tcase *os.PathError:\n\t\treturn err.Err\n\tcase *os.LinkError:\n\t\treturn err.Err\n\tcase *os.SyscallError:\n\t\treturn err.Err\n\t}\n\treturn err\n}\n\n\/\/ isEphemeralError returns true if err may be resolved by waiting.\nfunc isEphemeralError(err error) bool {\n\t\/\/ TODO: Use this instead for Go >= 1.13\n\t\/\/ return errors.Is(err, ERROR_SHARING_VIOLATION)\n\n\terr = underlyingError(err)\n\treturn err == ERROR_SHARING_VIOLATION\n}\n\nfunc RobustRename(oldpath, newpath string) error {\n\treturn retry.Do(\n\t\tfunc() error {\n\t\t\treturn os.Rename(oldpath, newpath)\n\t\t},\n\t\tretry.RetryIf(isEphemeralError),\n\t\tretry.LastErrorOnly(true),\n\t)\n}\n\nfunc RobustOpen(name string) (*os.File, error) {\n\tvar result *os.File\n\treturn result, retry.Do(\n\t\tfunc() error {\n\t\t\tf, err := os.Open(name)\n\t\t\tresult = f\n\t\t\treturn err\n\t\t},\n\t\tretry.RetryIf(isEphemeralError),\n\t\tretry.LastErrorOnly(true),\n\t)\n}\n<commit_msg>tools: use ERROR_SHARING_VIOLATION const from golang.org\/x\/sys\/windows<commit_after>\/\/ +build windows\n\npackage tools\n\nimport (\n\t\"os\"\n\n\t\"github.com\/avast\/retry-go\"\n\t\"golang.org\/x\/sys\/windows\"\n)\n\nfunc underlyingError(err error) error {\n\tswitch err := err.(type) {\n\tcase *os.PathError:\n\t\treturn err.Err\n\tcase *os.LinkError:\n\t\treturn err.Err\n\tcase *os.SyscallError:\n\t\treturn err.Err\n\t}\n\treturn err\n}\n\n\/\/ isEphemeralError returns true if err may be resolved by waiting.\nfunc isEphemeralError(err error) bool {\n\t\/\/ TODO: Use this instead for Go >= 1.13\n\t\/\/ return errors.Is(err, windows.ERROR_SHARING_VIOLATION)\n\n\terr = underlyingError(err)\n\treturn err == windows.ERROR_SHARING_VIOLATION\n}\n\nfunc RobustRename(oldpath, newpath string) error {\n\treturn retry.Do(\n\t\tfunc() error {\n\t\t\treturn os.Rename(oldpath, newpath)\n\t\t},\n\t\tretry.RetryIf(isEphemeralError),\n\t\tretry.LastErrorOnly(true),\n\t)\n}\n\nfunc RobustOpen(name string) (*os.File, error) {\n\tvar result *os.File\n\treturn result, retry.Do(\n\t\tfunc() error {\n\t\t\tf, err := os.Open(name)\n\t\t\tresult = f\n\t\t\treturn err\n\t\t},\n\t\tretry.RetryIf(isEphemeralError),\n\t\tretry.LastErrorOnly(true),\n\t)\n}\n<|endoftext|>"} {"text":"<commit_before>package gitignore\n\nimport (\n\t\"strings\"\n\t\"testing\"\n)\n\ntype assert struct {\n\tpatterns []string\n\tfile file\n\texpect bool\n}\n\ntype file struct {\n\tpath string\n\tisDir bool\n}\n\nfunc TestMatch(t *testing.T) {\n\tasserts := []assert{\n\t\tassert{[]string{\"a.txt\"}, file{\"a.txt\", false}, true},\n\t\tassert{[]string{\"dir\/a.txt\"}, file{\"dir\/a.txt\", false}, true},\n\t\tassert{[]string{\"dir\/*.txt\"}, file{\"dir\/a.txt\", false}, true},\n\t\tassert{[]string{\"dir2\/a.txt\"}, file{\"dir1\/dir2\/a.txt\", false}, true},\n\t\tassert{[]string{\"dir3\/a.txt\"}, file{\"dir1\/dir2\/dir3\/a.txt\", false}, true},\n\t\tassert{[]string{\"a.txt\"}, file{\"dir\/a.txt\", false}, true},\n\t\tassert{[]string{\"a.txt\"}, file{\"dir1\/dir2\/a.txt\", false}, true},\n\t\tassert{[]string{\"dir2\/a.txt\"}, file{\"dir1\/dir2\/a.txt\", false}, true},\n\t\tassert{[]string{\"dir\"}, file{\"dir\", true}, true},\n\t\tassert{[]string{\"dir\/\"}, file{\"dir\", true}, true},\n\t\tassert{[]string{\"dir\/\"}, file{\"dir\", false}, false},\n\t\tassert{[]string{\"dir1\/dir2\/\"}, file{\"dir1\/dir2\", true}, true},\n\t\tassert{[]string{\"\/a.txt\"}, file{\"a.txt\", false}, true},\n\t\tassert{[]string{\"\/dir\/a.txt\"}, file{\"dir\/a.txt\", false}, true},\n\t\tassert{[]string{\"\/dir1\/a.txt\"}, file{\"dir\/dir1\/a.txt\", false}, false},\n\t\tassert{[]string{\"\/a.txt\"}, file{\"dir\/a.txt\", false}, false},\n\t\tassert{[]string{\"a.txt\", \"b.txt\"}, file{\"dir\/b.txt\", false}, true},\n\t\tassert{[]string{\"*.txt\", \"!b.txt\"}, file{\"dir\/b.txt\", false}, false},\n\t\tassert{[]string{\"dir\/*.txt\", \"!dir\/b.txt\"}, file{\"dir\/b.txt\", false}, false},\n\t\tassert{[]string{\"dir\/*.txt\", \"!\/b.txt\"}, file{\"dir\/b.txt\", false}, true},\n\t}\n\n\tfor _, assert := range asserts {\n\t\tgi := NewGitIgnoreFromReader(\".\", strings.NewReader(strings.Join(assert.patterns, \"\\n\")))\n\t\tresult := gi.Match(assert.file.path, assert.file.isDir)\n\t\tif result != assert.expect {\n\t\t\tt.Errorf(\"Match should return %t, got %t on %v\", assert.expect, result, assert)\n\t\t}\n\t}\n}\n<commit_msg>Add pattern which starts with glob tests<commit_after>package gitignore\n\nimport (\n\t\"strings\"\n\t\"testing\"\n)\n\ntype assert struct {\n\tpatterns []string\n\tfile file\n\texpect bool\n}\n\ntype file struct {\n\tpath string\n\tisDir bool\n}\n\nfunc TestMatch(t *testing.T) {\n\tasserts := []assert{\n\t\tassert{[]string{\"a.txt\"}, file{\"a.txt\", false}, true},\n\t\tassert{[]string{\"*.txt\"}, file{\"a.txt\", false}, true},\n\t\tassert{[]string{\"dir\/a.txt\"}, file{\"dir\/a.txt\", false}, true},\n\t\tassert{[]string{\"dir\/*.txt\"}, file{\"dir\/a.txt\", false}, true},\n\t\tassert{[]string{\"dir2\/a.txt\"}, file{\"dir1\/dir2\/a.txt\", false}, true},\n\t\tassert{[]string{\"dir3\/a.txt\"}, file{\"dir1\/dir2\/dir3\/a.txt\", false}, true},\n\t\tassert{[]string{\"a.txt\"}, file{\"dir\/a.txt\", false}, true},\n\t\tassert{[]string{\"*.txt\"}, file{\"dir\/a.txt\", false}, true},\n\t\tassert{[]string{\"a.txt\"}, file{\"dir1\/dir2\/a.txt\", false}, true},\n\t\tassert{[]string{\"dir2\/a.txt\"}, file{\"dir1\/dir2\/a.txt\", false}, true},\n\t\tassert{[]string{\"dir\"}, file{\"dir\", true}, true},\n\t\tassert{[]string{\"dir\/\"}, file{\"dir\", true}, true},\n\t\tassert{[]string{\"dir\/\"}, file{\"dir\", false}, false},\n\t\tassert{[]string{\"dir1\/dir2\/\"}, file{\"dir1\/dir2\", true}, true},\n\t\tassert{[]string{\"\/a.txt\"}, file{\"a.txt\", false}, true},\n\t\tassert{[]string{\"\/dir\/a.txt\"}, file{\"dir\/a.txt\", false}, true},\n\t\tassert{[]string{\"\/dir1\/a.txt\"}, file{\"dir\/dir1\/a.txt\", false}, false},\n\t\tassert{[]string{\"\/a.txt\"}, file{\"dir\/a.txt\", false}, false},\n\t\tassert{[]string{\"a.txt\", \"b.txt\"}, file{\"dir\/b.txt\", false}, true},\n\t\tassert{[]string{\"*.txt\", \"!b.txt\"}, file{\"dir\/b.txt\", false}, false},\n\t\tassert{[]string{\"dir\/*.txt\", \"!dir\/b.txt\"}, file{\"dir\/b.txt\", false}, false},\n\t\tassert{[]string{\"dir\/*.txt\", \"!\/b.txt\"}, file{\"dir\/b.txt\", false}, true},\n\t}\n\n\tfor _, assert := range asserts {\n\t\tgi := NewGitIgnoreFromReader(\".\", strings.NewReader(strings.Join(assert.patterns, \"\\n\")))\n\t\tresult := gi.Match(assert.file.path, assert.file.isDir)\n\t\tif result != assert.expect {\n\t\t\tt.Errorf(\"Match should return %t, got %t on %v\", assert.expect, result, assert)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package runtime_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc checkGdbPython(t *testing.T) {\n\tcmd := exec.Command(\"gdb\", \"-nx\", \"-q\", \"--batch\", \"-iex\", \"python import sys; print('go gdb python support')\")\n\tout, err := cmd.CombinedOutput()\n\n\tif err != nil {\n\t\tt.Skipf(\"skipping due to issue running gdb: %v\", err)\n\t}\n\tif string(out) != \"go gdb python support\\n\" {\n\t\tt.Skipf(\"skipping due to lack of python gdb support: %s\", out)\n\t}\n}\n\nconst helloSource = `\npackage main\nimport \"fmt\"\nfunc finish() {\n\tfmt.Println(\"hi\")\n}\nfunc main() {\n\tmapvar := make(map[string]string,5)\n\tmapvar[\"abc\"] = \"def\"\n\tmapvar[\"ghi\"] = \"jkl\"\n\tfinish()\n}\n`\n\nfunc TestGdbPython(t *testing.T) {\n\tif runtime.GOOS == \"darwin\" {\n\t\tt.Skip(\"gdb does not work on darwin\")\n\t}\n\tif strings.HasPrefix(runtime.GOARCH, \"ppc64\") {\n\t\tt.Skip(\"gdb does not work on ppc64 - issue 10017\")\n\t}\n\n\tif runtime.GOOS == \"linux\" && runtime.GOARCH == \"arm\" {\n\t\tt.Skip(\"issue 10002\")\n\t}\n\n\tcheckGdbPython(t)\n\n\tdir, err := ioutil.TempDir(\"\", \"go-build\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create temp directory: %v\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tsrc := filepath.Join(dir, \"main.go\")\n\terr = ioutil.WriteFile(src, []byte(helloSource), 0644)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create file: %v\", err)\n\t}\n\n\tcmd := exec.Command(\"go\", \"build\", \"-o\", \"a.exe\")\n\tcmd.Dir = dir\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"building source %v\\n%s\", err, out)\n\t}\n\n\tgot, _ := exec.Command(\"gdb\", \"-nx\", \"-q\", \"--batch\", \"-iex\",\n\t\tfmt.Sprintf(\"add-auto-load-safe-path %s\/src\/runtime\", runtime.GOROOT()),\n\t\t\"-ex\", \"br 'main.finish'\",\n\t\t\"-ex\", \"run\",\n\t\t\"-ex\", \"up\",\n\t\t\"-ex\", \"echo BEGIN info goroutines\\n\",\n\t\t\"-ex\", \"info goroutines\",\n\t\t\"-ex\", \"echo END\\n\",\n\t\t\"-ex\", \"echo BEGIN print mapvar\\n\",\n\t\t\"-ex\", \"print mapvar\",\n\t\t\"-ex\", \"echo END\\n\",\n\t\tfilepath.Join(dir, \"a.exe\")).CombinedOutput()\n\n\tfirstLine := bytes.SplitN(got, []byte(\"\\n\"), 2)[0]\n\tif string(firstLine) != \"Loading Go Runtime support.\" {\n\t\tt.Fatalf(\"failed to load Go runtime support: %s\", firstLine)\n\t}\n\n\t\/\/ Extract named BEGIN...END blocks from output\n\tpartRe := regexp.MustCompile(`(?ms)^BEGIN ([^\\n]*)\\n(.*?)\\nEND`)\n\tblocks := map[string]string{}\n\tfor _, subs := range partRe.FindAllSubmatch(got, -1) {\n\t\tblocks[string(subs[1])] = string(subs[2])\n\t}\n\n\tinfoGoroutinesRe := regexp.MustCompile(`\\*\\s+\\d+\\s+running\\s+`)\n\tif bl := blocks[\"info goroutines\"]; !infoGoroutinesRe.MatchString(bl) {\n\t\tt.Fatalf(\"info goroutines failed: %s\", bl)\n\t}\n\n\tprintMapvarRe := regexp.MustCompile(`\\Q = map[string]string = {[\"abc\"] = \"def\", [\"ghi\"] = \"jkl\"}\\E$`)\n\tif bl := blocks[\"print mapvar\"]; !printMapvarRe.MatchString(bl) {\n\t\tt.Fatalf(\"print mapvar failed: %s\", bl)\n\t}\n}\n<commit_msg>runtime: TestGdbPython 'print mapvar' should not need unwinding<commit_after>package runtime_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"testing\"\n)\n\nfunc checkGdbPython(t *testing.T) {\n\tcmd := exec.Command(\"gdb\", \"-nx\", \"-q\", \"--batch\", \"-iex\", \"python import sys; print('go gdb python support')\")\n\tout, err := cmd.CombinedOutput()\n\n\tif err != nil {\n\t\tt.Skipf(\"skipping due to issue running gdb: %v\", err)\n\t}\n\tif string(out) != \"go gdb python support\\n\" {\n\t\tt.Skipf(\"skipping due to lack of python gdb support: %s\", out)\n\t}\n}\n\nconst helloSource = `\npackage main\nimport \"fmt\"\nfunc main() {\n\tmapvar := make(map[string]string,5)\n\tmapvar[\"abc\"] = \"def\"\n\tmapvar[\"ghi\"] = \"jkl\"\n\tfmt.Println(\"hi\") \/\/ line 8\n}\n`\n\nfunc TestGdbPython(t *testing.T) {\n\tif runtime.GOOS == \"darwin\" {\n\t\tt.Skip(\"gdb does not work on darwin\")\n\t}\n\n\tcheckGdbPython(t)\n\n\tdir, err := ioutil.TempDir(\"\", \"go-build\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create temp directory: %v\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tsrc := filepath.Join(dir, \"main.go\")\n\terr = ioutil.WriteFile(src, []byte(helloSource), 0644)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create file: %v\", err)\n\t}\n\n\tcmd := exec.Command(\"go\", \"build\", \"-o\", \"a.exe\")\n\tcmd.Dir = dir\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"building source %v\\n%s\", err, out)\n\t}\n\n\tgot, _ := exec.Command(\"gdb\", \"-nx\", \"-q\", \"--batch\", \"-iex\",\n\t\tfmt.Sprintf(\"add-auto-load-safe-path %s\/src\/runtime\", runtime.GOROOT()),\n\t\t\"-ex\", \"br main.go:8\",\n\t\t\"-ex\", \"run\",\n\t\t\"-ex\", \"echo BEGIN info goroutines\\n\",\n\t\t\"-ex\", \"info goroutines\",\n\t\t\"-ex\", \"echo END\\n\",\n\t\t\"-ex\", \"echo BEGIN print mapvar\\n\",\n\t\t\"-ex\", \"print mapvar\",\n\t\t\"-ex\", \"echo END\\n\",\n\t\tfilepath.Join(dir, \"a.exe\")).CombinedOutput()\n\n\tfirstLine := bytes.SplitN(got, []byte(\"\\n\"), 2)[0]\n\tif string(firstLine) != \"Loading Go Runtime support.\" {\n\t\tt.Fatalf(\"failed to load Go runtime support: %s\", firstLine)\n\t}\n\n\t\/\/ Extract named BEGIN...END blocks from output\n\tpartRe := regexp.MustCompile(`(?ms)^BEGIN ([^\\n]*)\\n(.*?)\\nEND`)\n\tblocks := map[string]string{}\n\tfor _, subs := range partRe.FindAllSubmatch(got, -1) {\n\t\tblocks[string(subs[1])] = string(subs[2])\n\t}\n\n\tinfoGoroutinesRe := regexp.MustCompile(`\\*\\s+\\d+\\s+running\\s+`)\n\tif bl := blocks[\"info goroutines\"]; !infoGoroutinesRe.MatchString(bl) {\n\t\tt.Fatalf(\"info goroutines failed: %s\", bl)\n\t}\n\n\tprintMapvarRe := regexp.MustCompile(`\\Q = map[string]string = {[\"abc\"] = \"def\", [\"ghi\"] = \"jkl\"}\\E$`)\n\tif bl := blocks[\"print mapvar\"]; !printMapvarRe.MatchString(bl) {\n\t\tt.Fatalf(\"print mapvar failed: %s\", bl)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package runtime_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"testing\"\n)\n\nfunc checkGdbPython(t *testing.T) {\n\tcmd := exec.Command(\"gdb\", \"-nx\", \"-q\", \"--batch\", \"-iex\", \"python import sys; print('go gdb python support')\")\n\tout, err := cmd.CombinedOutput()\n\n\tif err != nil {\n\t\tt.Skipf(\"skipping due to issue running gdb: %v\", err)\n\t}\n\tif string(out) != \"go gdb python support\\n\" {\n\t\tt.Skipf(\"skipping due to lack of python gdb support: %s\", out)\n\t}\n}\n\nconst helloSource = `\npackage main\nimport \"fmt\"\nfunc finish() {\n\tfmt.Println(\"hi\")\n}\nfunc main() {\n\tmapvar := make(map[string]string,5)\n\tmapvar[\"abc\"] = \"def\"\n\tmapvar[\"ghi\"] = \"jkl\"\n\tfinish()\n}\n`\n\nfunc TestGdbPython(t *testing.T) {\n\tif runtime.GOOS == \"darwin\" {\n\t\tt.Skip(\"gdb does not work on darwin\")\n\t}\n\n\tif runtime.GOOS == \"linux\" && runtime.GOARCH == \"arm\" {\n\t\tt.Skip(\"issue 10002\")\n\t}\n\n\tcheckGdbPython(t)\n\n\tdir, err := ioutil.TempDir(\"\", \"go-build\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create temp directory: %v\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tsrc := filepath.Join(dir, \"main.go\")\n\terr = ioutil.WriteFile(src, []byte(helloSource), 0644)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create file: %v\", err)\n\t}\n\n\tcmd := exec.Command(\"go\", \"build\", \"-o\", \"a.exe\")\n\tcmd.Dir = dir\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"building source %v\\n%s\", err, out)\n\t}\n\n\tgot, _ := exec.Command(\"gdb\", \"-nx\", \"-q\", \"--batch\", \"-iex\",\n\t\tfmt.Sprintf(\"add-auto-load-safe-path %s\/src\/runtime\", runtime.GOROOT()),\n\t\t\"-ex\", \"br 'main.finish'\",\n\t\t\"-ex\", \"run\",\n\t\t\"-ex\", \"up\",\n\t\t\"-ex\", \"echo BEGIN info goroutines\\n\",\n\t\t\"-ex\", \"info goroutines\",\n\t\t\"-ex\", \"echo END\\n\",\n\t\t\"-ex\", \"echo BEGIN print mapvar\\n\",\n\t\t\"-ex\", \"print mapvar\",\n\t\t\"-ex\", \"echo END\\n\",\n\t\tfilepath.Join(dir, \"a.exe\")).CombinedOutput()\n\n\tfirstLine := bytes.SplitN(got, []byte(\"\\n\"), 2)[0]\n\tif string(firstLine) != \"Loading Go Runtime support.\" {\n\t\tt.Fatalf(\"failed to load Go runtime support: %s\", firstLine)\n\t}\n\n\t\/\/ Extract named BEGIN...END blocks from output\n\tpartRe := regexp.MustCompile(`(?ms)^BEGIN ([^\\n]*)\\n(.*?)\\nEND`)\n\tblocks := map[string]string{}\n\tfor _, subs := range partRe.FindAllSubmatch(got, -1) {\n\t\tblocks[string(subs[1])] = string(subs[2])\n\t}\n\n\tinfoGoroutinesRe := regexp.MustCompile(`\\*\\s+\\d+\\s+running\\s+`)\n\tif bl := blocks[\"info goroutines\"]; !infoGoroutinesRe.MatchString(bl) {\n\t\tt.Fatalf(\"info goroutines failed: %s\", bl)\n\t}\n\n\tprintMapvarRe := regexp.MustCompile(`\\Q = map[string]string = {[\"abc\"] = \"def\", [\"ghi\"] = \"jkl\"}\\E$`)\n\tif bl := blocks[\"print mapvar\"]; !printMapvarRe.MatchString(bl) {\n\t\tt.Fatalf(\"print mapvar failed: %s\", bl)\n\t}\n}\n<commit_msg>runtime: disable TestGdbPython on ppc64<commit_after>package runtime_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc checkGdbPython(t *testing.T) {\n\tcmd := exec.Command(\"gdb\", \"-nx\", \"-q\", \"--batch\", \"-iex\", \"python import sys; print('go gdb python support')\")\n\tout, err := cmd.CombinedOutput()\n\n\tif err != nil {\n\t\tt.Skipf(\"skipping due to issue running gdb: %v\", err)\n\t}\n\tif string(out) != \"go gdb python support\\n\" {\n\t\tt.Skipf(\"skipping due to lack of python gdb support: %s\", out)\n\t}\n}\n\nconst helloSource = `\npackage main\nimport \"fmt\"\nfunc finish() {\n\tfmt.Println(\"hi\")\n}\nfunc main() {\n\tmapvar := make(map[string]string,5)\n\tmapvar[\"abc\"] = \"def\"\n\tmapvar[\"ghi\"] = \"jkl\"\n\tfinish()\n}\n`\n\nfunc TestGdbPython(t *testing.T) {\n\tif runtime.GOOS == \"darwin\" {\n\t\tt.Skip(\"gdb does not work on darwin\")\n\t}\n\tif strings.HasPrefix(runtime.GOARCH, \"ppc64\") {\n\t\tt.Skip(\"gdb does not work on ppc64 - issue 10017\")\n\t}\n\n\tif runtime.GOOS == \"linux\" && runtime.GOARCH == \"arm\" {\n\t\tt.Skip(\"issue 10002\")\n\t}\n\n\tcheckGdbPython(t)\n\n\tdir, err := ioutil.TempDir(\"\", \"go-build\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create temp directory: %v\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tsrc := filepath.Join(dir, \"main.go\")\n\terr = ioutil.WriteFile(src, []byte(helloSource), 0644)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create file: %v\", err)\n\t}\n\n\tcmd := exec.Command(\"go\", \"build\", \"-o\", \"a.exe\")\n\tcmd.Dir = dir\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"building source %v\\n%s\", err, out)\n\t}\n\n\tgot, _ := exec.Command(\"gdb\", \"-nx\", \"-q\", \"--batch\", \"-iex\",\n\t\tfmt.Sprintf(\"add-auto-load-safe-path %s\/src\/runtime\", runtime.GOROOT()),\n\t\t\"-ex\", \"br 'main.finish'\",\n\t\t\"-ex\", \"run\",\n\t\t\"-ex\", \"up\",\n\t\t\"-ex\", \"echo BEGIN info goroutines\\n\",\n\t\t\"-ex\", \"info goroutines\",\n\t\t\"-ex\", \"echo END\\n\",\n\t\t\"-ex\", \"echo BEGIN print mapvar\\n\",\n\t\t\"-ex\", \"print mapvar\",\n\t\t\"-ex\", \"echo END\\n\",\n\t\tfilepath.Join(dir, \"a.exe\")).CombinedOutput()\n\n\tfirstLine := bytes.SplitN(got, []byte(\"\\n\"), 2)[0]\n\tif string(firstLine) != \"Loading Go Runtime support.\" {\n\t\tt.Fatalf(\"failed to load Go runtime support: %s\", firstLine)\n\t}\n\n\t\/\/ Extract named BEGIN...END blocks from output\n\tpartRe := regexp.MustCompile(`(?ms)^BEGIN ([^\\n]*)\\n(.*?)\\nEND`)\n\tblocks := map[string]string{}\n\tfor _, subs := range partRe.FindAllSubmatch(got, -1) {\n\t\tblocks[string(subs[1])] = string(subs[2])\n\t}\n\n\tinfoGoroutinesRe := regexp.MustCompile(`\\*\\s+\\d+\\s+running\\s+`)\n\tif bl := blocks[\"info goroutines\"]; !infoGoroutinesRe.MatchString(bl) {\n\t\tt.Fatalf(\"info goroutines failed: %s\", bl)\n\t}\n\n\tprintMapvarRe := regexp.MustCompile(`\\Q = map[string]string = {[\"abc\"] = \"def\", [\"ghi\"] = \"jkl\"}\\E$`)\n\tif bl := blocks[\"print mapvar\"]; !printMapvarRe.MatchString(bl) {\n\t\tt.Fatalf(\"print mapvar failed: %s\", bl)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package fs\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync\/atomic\"\n)\n\nvar ErrSpaceNotEnough = fmt.Errorf(\"buffer space is not enough\")\n\ntype Cobuffer struct {\n\tbuffer []byte\n\toffset int32\n\trw sync.RWMutex\n\tmaxSize int\n\tflushChan chan struct{}\n}\n\nfunc NewCobuffer(n int, maxSize int) *Cobuffer {\n\treturn &Cobuffer{\n\t\tbuffer: make([]byte, n),\n\t\tflushChan: make(chan struct{}, 1),\n\t}\n}\n\nfunc (c *Cobuffer) grow() bool {\n\tsuccess := false\n\tc.rw.Lock()\n\tif len(c.buffer) >= c.maxSize {\n\t\tgoto exit\n\t}\n\tc.buffer = append(c.buffer, 0)\n\tsuccess = true\n\nexit:\n\tc.rw.Unlock()\n\treturn success\n}\n\nfunc (c *Cobuffer) Flush() {\n\tselect {\n\tcase c.flushChan <- struct{}{}:\n\tdefault:\n\t}\n}\n\nfunc (c *Cobuffer) IsFlush() chan struct{} {\n\treturn c.flushChan\n}\n\nfunc (c *Cobuffer) GetData() []byte {\n\tc.rw.Lock()\n\tn := int(c.offset)\n\tbuf := make([]byte, n)\n\n\tcopy(buf, c.buffer)\n\tc.offset = 0\n\tc.rw.Unlock()\n\treturn buf\n}\n\nfunc (c *Cobuffer) WriteData(b []byte) {\n\tfor {\n\t\tif c.writeData(b) {\n\t\t\treturn\n\t\t}\n\t\tif !c.grow() {\n\t\t\tselect {\n\t\t\tcase c.flushChan <- struct{}{}:\n\t\t\tdefault:\n\t\t\t}\n\t\t\truntime.Gosched()\n\t\t}\n\t}\n}\n\nfunc (c *Cobuffer) writeData(b []byte) bool {\n\tsuccess := false\n\n\tc.rw.RLock()\n\n\tnewOff := atomic.AddInt32(&c.offset, int32(len(b)))\n\tif newOff >= int32(len(c.buffer)) {\n\t\tatomic.AddInt32(&c.offset, -int32(len(b)))\n\t\tgoto exit\n\t}\n\n\tcopy(c.buffer[newOff-int32(len(b)):newOff], b)\n\tsuccess = true\n\nexit:\n\tc.rw.RUnlock()\n\treturn success\n}\n\nfunc (c *Cobuffer) Close() {\n\tclose(c.flushChan)\n}\n<commit_msg>[fs] cobuffer: add write notify<commit_after>package fs\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync\/atomic\"\n)\n\nvar ErrSpaceNotEnough = fmt.Errorf(\"buffer space is not enough\")\n\ntype Cobuffer struct {\n\tbuffer []byte\n\toffset int32\n\trw sync.RWMutex\n\tmaxSize int\n\tflushChan chan struct{}\n\n\twriteChan chan struct{}\n\twriteChanSent int32\n}\n\nfunc NewCobuffer(n int, maxSize int) *Cobuffer {\n\treturn &Cobuffer{\n\t\tbuffer: make([]byte, n),\n\t\tflushChan: make(chan struct{}, 1),\n\t\twriteChan: make(chan struct{}, 1),\n\t}\n}\n\nfunc (c *Cobuffer) grow() bool {\n\tsuccess := false\n\tc.rw.Lock()\n\tif len(c.buffer) >= c.maxSize {\n\t\tgoto exit\n\t}\n\tc.buffer = append(c.buffer, 0)\n\tsuccess = true\n\nexit:\n\tc.rw.Unlock()\n\treturn success\n}\n\nfunc (c *Cobuffer) Flush() {\n\tselect {\n\tcase c.flushChan <- struct{}{}:\n\tdefault:\n\t}\n}\n\nfunc (c *Cobuffer) IsWritten() <-chan struct{} {\n\treturn c.writeChan\n}\n\nfunc (c *Cobuffer) IsFlush() <-chan struct{} {\n\treturn c.flushChan\n}\n\nfunc (c *Cobuffer) GetData() []byte {\n\tc.rw.Lock()\n\tn := int(c.offset)\n\tbuf := make([]byte, n)\n\n\tcopy(buf, c.buffer)\n\tc.offset = 0\n\n\tatomic.StoreInt32(&c.writeChanSent, 0)\n\tc.rw.Unlock()\n\treturn buf\n}\n\nfunc (c *Cobuffer) WriteData(b []byte) {\n\tfor {\n\t\tif c.writeData(b) {\n\t\t\treturn\n\t\t}\n\t\tif !c.grow() {\n\t\t\tselect {\n\t\t\tcase c.flushChan <- struct{}{}:\n\t\t\tdefault:\n\t\t\t}\n\t\t\truntime.Gosched()\n\t\t}\n\t}\n}\n\nfunc (c *Cobuffer) writeData(b []byte) bool {\n\tsuccess := false\n\n\tc.rw.RLock()\n\n\tnewOff := atomic.AddInt32(&c.offset, int32(len(b)))\n\tif newOff >= int32(len(c.buffer)) {\n\t\tatomic.AddInt32(&c.offset, -int32(len(b)))\n\t\tgoto exit\n\t}\n\n\tcopy(c.buffer[newOff-int32(len(b)):newOff], b)\n\tsuccess = true\n\n\tif atomic.CompareAndSwapInt32(&c.writeChanSent, 0, 1) {\n\t\tselect {\n\t\tcase c.writeChan <- struct{}{}:\n\t\tdefault:\n\t\t}\n\t}\n\nexit:\n\tc.rw.RUnlock()\n\treturn success\n}\n\nfunc (c *Cobuffer) Close() {\n\tclose(c.flushChan)\n\tclose(c.writeChan)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file implements the Check function, which drives type-checking.\n\npackage types\n\nimport (\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/token\"\n\n\t\"code.google.com\/p\/go.tools\/go\/exact\"\n)\n\n\/\/ debugging\/development support\nconst (\n\tdebug = false \/\/ leave on during development\n\ttrace = false \/\/ turn on for detailed type resolution traces\n)\n\n\/\/ exprInfo stores type and constant value for an untyped expression.\ntype exprInfo struct {\n\tisLhs bool \/\/ expression is lhs operand of a shift with delayed type-check\n\ttyp *Basic\n\tval exact.Value \/\/ constant value; or nil (if not a constant)\n}\n\n\/\/ funcInfo stores the information required for type-checking a function.\ntype funcInfo struct {\n\tname string \/\/ for debugging\/tracing only\n\tdecl *declInfo \/\/ for cycle detection\n\tsig *Signature\n\tbody *ast.BlockStmt\n}\n\n\/\/ A context represents the context within which an object is type-checked.\ntype context struct {\n\tdecl *declInfo \/\/ package-level declaration whose init expression\/function body is checked\n\tscope *Scope \/\/ top-most scope for lookups\n\tiota exact.Value \/\/ value of iota in a constant declaration; nil otherwise\n\tsig *Signature \/\/ function signature if inside a function; nil otherwise\n\thasLabel bool \/\/ set if a function makes use of labels (only ~1% of functions); unused outside functions\n\thasCallOrRecv bool \/\/ set if an expression contains a function call or channel receive operation\n}\n\n\/\/ A checker maintains the state of the type checker.\n\/\/ It must be created with newChecker.\ntype checker struct {\n\t\/\/ package information\n\t\/\/ (initialized by NewChecker, valid for the life-time of checker)\n\tconf *Config\n\tfset *token.FileSet\n\tpkg *Package\n\t*Info\n\tobjMap map[Object]*declInfo \/\/ maps package-level object to declaration info\n\n\t\/\/ information collected during type-checking of a set of package files\n\t\/\/ (initialized by Files, valid only for the duration of check.Files;\n\t\/\/ maps and lists are allocated on demand)\n\tfiles []*ast.File \/\/ package files\n\tfileScopes []*Scope \/\/ file scope for each file\n\tdotImports []map[*Package]token.Pos \/\/ positions of dot-imports for each file\n\n\tfirstErr error \/\/ first error encountered\n\tmethods map[string][]*Func \/\/ maps type names to associated methods\n\tuntyped map[ast.Expr]exprInfo \/\/ map of expressions without final type\n\tfuncs []funcInfo \/\/ list of functions to type-check\n\tdelayed []func() \/\/ delayed checks requiring fully setup types\n\n\t\/\/ context within which the current object is type-checked\n\t\/\/ (valid only for the duration of type-checking a specific object)\n\tcontext\n\n\t\/\/ debugging\n\tindent int \/\/ indentation for tracing\n}\n\n\/\/ addDeclDep adds the dependency edge (check.decl -> to)\n\/\/ if check.decl exists and to has an initializer.\nfunc (check *checker) addDeclDep(to Object) {\n\tfrom := check.decl\n\tif from == nil {\n\t\treturn \/\/ not in a package-level init expression\n\t}\n\tif decl := check.objMap[to]; decl == nil || !decl.hasInitializer() {\n\t\treturn \/\/ to is not a package-level object or has no initializer\n\t}\n\tfrom.addDep(to)\n}\n\nfunc (check *checker) assocMethod(tname string, meth *Func) {\n\tm := check.methods\n\tif m == nil {\n\t\tm = make(map[string][]*Func)\n\t\tcheck.methods = m\n\t}\n\tm[tname] = append(m[tname], meth)\n}\n\nfunc (check *checker) rememberUntyped(e ast.Expr, lhs bool, typ *Basic, val exact.Value) {\n\tm := check.untyped\n\tif m == nil {\n\t\tm = make(map[ast.Expr]exprInfo)\n\t\tcheck.untyped = m\n\t}\n\tm[e] = exprInfo{lhs, typ, val}\n}\n\nfunc (check *checker) later(name string, decl *declInfo, sig *Signature, body *ast.BlockStmt) {\n\tcheck.funcs = append(check.funcs, funcInfo{name, decl, sig, body})\n}\n\nfunc (check *checker) delay(f func()) {\n\tcheck.delayed = append(check.delayed, f)\n}\n\n\/\/ NewChecker returns a new Checker instance for a given package.\n\/\/ Package files may be incrementally added via checker.Files.\nfunc NewChecker(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *checker {\n\t\/\/ make sure we have a configuration\n\tif conf == nil {\n\t\tconf = new(Config)\n\t}\n\n\t\/\/ make sure we have a package canonicalization map\n\tif conf.Packages == nil {\n\t\tconf.Packages = make(map[string]*Package)\n\t}\n\n\t\/\/ make sure we have an info struct\n\tif info == nil {\n\t\tinfo = new(Info)\n\t}\n\n\treturn &checker{\n\t\tconf: conf,\n\t\tfset: fset,\n\t\tpkg: pkg,\n\t\tInfo: info,\n\t\tobjMap: make(map[Object]*declInfo),\n\t}\n}\n\n\/\/ initFiles initializes the files-specific portion of checker.\n\/\/ The provided files must all belong to the same package.\nfunc (check *checker) initFiles(files []*ast.File) {\n\t\/\/ start with a clean slate (check.Files may be called multiple times)\n\tcheck.files = nil\n\tcheck.fileScopes = nil\n\tcheck.dotImports = nil\n\n\tcheck.firstErr = nil\n\tcheck.methods = nil\n\tcheck.untyped = nil\n\tcheck.funcs = nil\n\tcheck.delayed = nil\n\n\t\/\/ determine package name, files, and set up file scopes, dotImports maps\n\tpkg := check.pkg\n\tfor i, file := range files {\n\t\tswitch name := file.Name.Name; pkg.name {\n\t\tcase \"\":\n\t\t\tpkg.name = name\n\t\t\tfallthrough\n\n\t\tcase name:\n\t\t\tcheck.files = append(check.files, file)\n\t\t\tvar comment string\n\t\t\tif pos := file.Pos(); pos.IsValid() {\n\t\t\t\tcomment = \"file \" + check.fset.File(pos).Name()\n\t\t\t} else {\n\t\t\t\tcomment = fmt.Sprintf(\"file[%d]\", i)\n\t\t\t}\n\t\t\tfileScope := NewScope(pkg.scope, comment)\n\t\t\tcheck.recordScope(file, fileScope)\n\t\t\tcheck.fileScopes = append(check.fileScopes, fileScope)\n\t\t\tcheck.dotImports = append(check.dotImports, nil) \/\/ element (map) is lazily allocated\n\n\t\tdefault:\n\t\t\tcheck.errorf(file.Package, \"package %s; expected %s\", name, pkg.name)\n\t\t\t\/\/ ignore this file\n\t\t}\n\t}\n}\n\n\/\/ A bailout panic is raised to indicate early termination.\ntype bailout struct{}\n\nfunc (check *checker) handleBailout(err *error) {\n\tswitch p := recover().(type) {\n\tcase nil, bailout:\n\t\t\/\/ normal return or early exit\n\t\t*err = check.firstErr\n\tdefault:\n\t\t\/\/ re-panic\n\t\tpanic(p)\n\t}\n}\n\n\/\/ Files checks the provided files as part of the checker's package.\nfunc (check *checker) Files(files []*ast.File) (err error) {\n\tdefer check.handleBailout(&err)\n\n\tcheck.initFiles(files)\n\n\tcheck.collectObjects()\n\n\tobjList := check.resolveOrder()\n\n\tcheck.packageObjects(objList)\n\n\tcheck.functionBodies()\n\n\tcheck.initDependencies(objList)\n\n\tcheck.unusedImports()\n\n\t\/\/ perform delayed checks\n\tfor _, f := range check.delayed {\n\t\tf()\n\t}\n\n\tcheck.recordUntyped()\n\n\tcheck.pkg.complete = true\n\treturn\n}\n\nfunc (check *checker) recordUntyped() {\n\tif !debug && check.Types == nil {\n\t\treturn \/\/ nothing to do\n\t}\n\n\tfor x, info := range check.untyped {\n\t\tif debug && isTyped(info.typ) {\n\t\t\tcheck.dump(\"%s: %s (type %s) is typed\", x.Pos(), x, info.typ)\n\t\t\tunreachable()\n\t\t}\n\t\tcheck.recordTypeAndValue(x, info.typ, info.val)\n\t}\n}\n\nfunc (check *checker) recordTypeAndValue(x ast.Expr, typ Type, val exact.Value) {\n\tassert(x != nil && typ != nil)\n\tif val != nil {\n\t\tassert(isConstType(typ))\n\t}\n\tif m := check.Types; m != nil {\n\t\tm[x] = TypeAndValue{typ, val}\n\t}\n}\n\nfunc (check *checker) recordBuiltinType(f ast.Expr, sig *Signature) {\n\t\/\/ f must be a (possibly parenthesized) identifier denoting a built-in\n\t\/\/ (built-ins in package unsafe always produce a constant result and\n\t\/\/ we don't record their signatures, so we don't see qualified idents\n\t\/\/ here): record the signature for f and possible children.\n\tfor {\n\t\tcheck.recordTypeAndValue(f, sig, nil)\n\t\tswitch p := f.(type) {\n\t\tcase *ast.Ident:\n\t\t\treturn \/\/ we're done\n\t\tcase *ast.ParenExpr:\n\t\t\tf = p.X\n\t\tdefault:\n\t\t\tunreachable()\n\t\t}\n\t}\n}\n\nfunc (check *checker) recordCommaOkTypes(x ast.Expr, a [2]Type) {\n\tassert(x != nil)\n\tif a[0] == nil || a[1] == nil {\n\t\treturn\n\t}\n\tassert(isTyped(a[0]) && isTyped(a[1]) && isBoolean(a[1]))\n\tif m := check.Types; m != nil {\n\t\tfor {\n\t\t\ttv := m[x]\n\t\t\tassert(tv.Type != nil) \/\/ should have been recorded already\n\t\t\tpos := x.Pos()\n\t\t\ttv.Type = NewTuple(\n\t\t\t\tNewVar(pos, check.pkg, \"\", a[0]),\n\t\t\t\tNewVar(pos, check.pkg, \"\", a[1]),\n\t\t\t)\n\t\t\tm[x] = tv\n\t\t\t\/\/ if x is a parenthesized expression (p.X), update p.X\n\t\t\tp, _ := x.(*ast.ParenExpr)\n\t\t\tif p == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tx = p.X\n\t\t}\n\t}\n}\n\nfunc (check *checker) recordDef(id *ast.Ident, obj Object) {\n\tassert(id != nil)\n\tif m := check.Defs; m != nil {\n\t\tm[id] = obj\n\t}\n}\n\nfunc (check *checker) recordUse(id *ast.Ident, obj Object) {\n\tassert(id != nil)\n\tassert(obj != nil)\n\tif m := check.Uses; m != nil {\n\t\tm[id] = obj\n\t}\n}\n\nfunc (check *checker) recordImplicit(node ast.Node, obj Object) {\n\tassert(node != nil && obj != nil)\n\tif m := check.Implicits; m != nil {\n\t\tm[node] = obj\n\t}\n}\n\nfunc (check *checker) recordSelection(x *ast.SelectorExpr, kind SelectionKind, recv Type, obj Object, index []int, indirect bool) {\n\tassert(obj != nil && (recv == nil || len(index) > 0))\n\tcheck.recordUse(x.Sel, obj)\n\t\/\/ TODO(gri) Should we also call recordTypeAndValue?\n\tif m := check.Selections; m != nil {\n\t\tm[x] = &Selection{kind, recv, obj, index, indirect}\n\t}\n}\n\nfunc (check *checker) recordScope(node ast.Node, scope *Scope) {\n\tassert(node != nil && scope != nil)\n\tif m := check.Scopes; m != nil {\n\t\tm[node] = scope\n\t}\n}\n<commit_msg>go.tools\/go\/types: fix typo in comment.<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file implements the Check function, which drives type-checking.\n\npackage types\n\nimport (\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/token\"\n\n\t\"code.google.com\/p\/go.tools\/go\/exact\"\n)\n\n\/\/ debugging\/development support\nconst (\n\tdebug = false \/\/ leave on during development\n\ttrace = false \/\/ turn on for detailed type resolution traces\n)\n\n\/\/ exprInfo stores type and constant value for an untyped expression.\ntype exprInfo struct {\n\tisLhs bool \/\/ expression is lhs operand of a shift with delayed type-check\n\ttyp *Basic\n\tval exact.Value \/\/ constant value; or nil (if not a constant)\n}\n\n\/\/ funcInfo stores the information required for type-checking a function.\ntype funcInfo struct {\n\tname string \/\/ for debugging\/tracing only\n\tdecl *declInfo \/\/ for cycle detection\n\tsig *Signature\n\tbody *ast.BlockStmt\n}\n\n\/\/ A context represents the context within which an object is type-checked.\ntype context struct {\n\tdecl *declInfo \/\/ package-level declaration whose init expression\/function body is checked\n\tscope *Scope \/\/ top-most scope for lookups\n\tiota exact.Value \/\/ value of iota in a constant declaration; nil otherwise\n\tsig *Signature \/\/ function signature if inside a function; nil otherwise\n\thasLabel bool \/\/ set if a function makes use of labels (only ~1% of functions); unused outside functions\n\thasCallOrRecv bool \/\/ set if an expression contains a function call or channel receive operation\n}\n\n\/\/ A checker maintains the state of the type checker.\n\/\/ It must be created with NewChecker.\ntype checker struct {\n\t\/\/ package information\n\t\/\/ (initialized by NewChecker, valid for the life-time of checker)\n\tconf *Config\n\tfset *token.FileSet\n\tpkg *Package\n\t*Info\n\tobjMap map[Object]*declInfo \/\/ maps package-level object to declaration info\n\n\t\/\/ information collected during type-checking of a set of package files\n\t\/\/ (initialized by Files, valid only for the duration of check.Files;\n\t\/\/ maps and lists are allocated on demand)\n\tfiles []*ast.File \/\/ package files\n\tfileScopes []*Scope \/\/ file scope for each file\n\tdotImports []map[*Package]token.Pos \/\/ positions of dot-imports for each file\n\n\tfirstErr error \/\/ first error encountered\n\tmethods map[string][]*Func \/\/ maps type names to associated methods\n\tuntyped map[ast.Expr]exprInfo \/\/ map of expressions without final type\n\tfuncs []funcInfo \/\/ list of functions to type-check\n\tdelayed []func() \/\/ delayed checks requiring fully setup types\n\n\t\/\/ context within which the current object is type-checked\n\t\/\/ (valid only for the duration of type-checking a specific object)\n\tcontext\n\n\t\/\/ debugging\n\tindent int \/\/ indentation for tracing\n}\n\n\/\/ addDeclDep adds the dependency edge (check.decl -> to)\n\/\/ if check.decl exists and to has an initializer.\nfunc (check *checker) addDeclDep(to Object) {\n\tfrom := check.decl\n\tif from == nil {\n\t\treturn \/\/ not in a package-level init expression\n\t}\n\tif decl := check.objMap[to]; decl == nil || !decl.hasInitializer() {\n\t\treturn \/\/ to is not a package-level object or has no initializer\n\t}\n\tfrom.addDep(to)\n}\n\nfunc (check *checker) assocMethod(tname string, meth *Func) {\n\tm := check.methods\n\tif m == nil {\n\t\tm = make(map[string][]*Func)\n\t\tcheck.methods = m\n\t}\n\tm[tname] = append(m[tname], meth)\n}\n\nfunc (check *checker) rememberUntyped(e ast.Expr, lhs bool, typ *Basic, val exact.Value) {\n\tm := check.untyped\n\tif m == nil {\n\t\tm = make(map[ast.Expr]exprInfo)\n\t\tcheck.untyped = m\n\t}\n\tm[e] = exprInfo{lhs, typ, val}\n}\n\nfunc (check *checker) later(name string, decl *declInfo, sig *Signature, body *ast.BlockStmt) {\n\tcheck.funcs = append(check.funcs, funcInfo{name, decl, sig, body})\n}\n\nfunc (check *checker) delay(f func()) {\n\tcheck.delayed = append(check.delayed, f)\n}\n\n\/\/ NewChecker returns a new Checker instance for a given package.\n\/\/ Package files may be incrementally added via checker.Files.\nfunc NewChecker(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *checker {\n\t\/\/ make sure we have a configuration\n\tif conf == nil {\n\t\tconf = new(Config)\n\t}\n\n\t\/\/ make sure we have a package canonicalization map\n\tif conf.Packages == nil {\n\t\tconf.Packages = make(map[string]*Package)\n\t}\n\n\t\/\/ make sure we have an info struct\n\tif info == nil {\n\t\tinfo = new(Info)\n\t}\n\n\treturn &checker{\n\t\tconf: conf,\n\t\tfset: fset,\n\t\tpkg: pkg,\n\t\tInfo: info,\n\t\tobjMap: make(map[Object]*declInfo),\n\t}\n}\n\n\/\/ initFiles initializes the files-specific portion of checker.\n\/\/ The provided files must all belong to the same package.\nfunc (check *checker) initFiles(files []*ast.File) {\n\t\/\/ start with a clean slate (check.Files may be called multiple times)\n\tcheck.files = nil\n\tcheck.fileScopes = nil\n\tcheck.dotImports = nil\n\n\tcheck.firstErr = nil\n\tcheck.methods = nil\n\tcheck.untyped = nil\n\tcheck.funcs = nil\n\tcheck.delayed = nil\n\n\t\/\/ determine package name, files, and set up file scopes, dotImports maps\n\tpkg := check.pkg\n\tfor i, file := range files {\n\t\tswitch name := file.Name.Name; pkg.name {\n\t\tcase \"\":\n\t\t\tpkg.name = name\n\t\t\tfallthrough\n\n\t\tcase name:\n\t\t\tcheck.files = append(check.files, file)\n\t\t\tvar comment string\n\t\t\tif pos := file.Pos(); pos.IsValid() {\n\t\t\t\tcomment = \"file \" + check.fset.File(pos).Name()\n\t\t\t} else {\n\t\t\t\tcomment = fmt.Sprintf(\"file[%d]\", i)\n\t\t\t}\n\t\t\tfileScope := NewScope(pkg.scope, comment)\n\t\t\tcheck.recordScope(file, fileScope)\n\t\t\tcheck.fileScopes = append(check.fileScopes, fileScope)\n\t\t\tcheck.dotImports = append(check.dotImports, nil) \/\/ element (map) is lazily allocated\n\n\t\tdefault:\n\t\t\tcheck.errorf(file.Package, \"package %s; expected %s\", name, pkg.name)\n\t\t\t\/\/ ignore this file\n\t\t}\n\t}\n}\n\n\/\/ A bailout panic is raised to indicate early termination.\ntype bailout struct{}\n\nfunc (check *checker) handleBailout(err *error) {\n\tswitch p := recover().(type) {\n\tcase nil, bailout:\n\t\t\/\/ normal return or early exit\n\t\t*err = check.firstErr\n\tdefault:\n\t\t\/\/ re-panic\n\t\tpanic(p)\n\t}\n}\n\n\/\/ Files checks the provided files as part of the checker's package.\nfunc (check *checker) Files(files []*ast.File) (err error) {\n\tdefer check.handleBailout(&err)\n\n\tcheck.initFiles(files)\n\n\tcheck.collectObjects()\n\n\tobjList := check.resolveOrder()\n\n\tcheck.packageObjects(objList)\n\n\tcheck.functionBodies()\n\n\tcheck.initDependencies(objList)\n\n\tcheck.unusedImports()\n\n\t\/\/ perform delayed checks\n\tfor _, f := range check.delayed {\n\t\tf()\n\t}\n\n\tcheck.recordUntyped()\n\n\tcheck.pkg.complete = true\n\treturn\n}\n\nfunc (check *checker) recordUntyped() {\n\tif !debug && check.Types == nil {\n\t\treturn \/\/ nothing to do\n\t}\n\n\tfor x, info := range check.untyped {\n\t\tif debug && isTyped(info.typ) {\n\t\t\tcheck.dump(\"%s: %s (type %s) is typed\", x.Pos(), x, info.typ)\n\t\t\tunreachable()\n\t\t}\n\t\tcheck.recordTypeAndValue(x, info.typ, info.val)\n\t}\n}\n\nfunc (check *checker) recordTypeAndValue(x ast.Expr, typ Type, val exact.Value) {\n\tassert(x != nil && typ != nil)\n\tif val != nil {\n\t\tassert(isConstType(typ))\n\t}\n\tif m := check.Types; m != nil {\n\t\tm[x] = TypeAndValue{typ, val}\n\t}\n}\n\nfunc (check *checker) recordBuiltinType(f ast.Expr, sig *Signature) {\n\t\/\/ f must be a (possibly parenthesized) identifier denoting a built-in\n\t\/\/ (built-ins in package unsafe always produce a constant result and\n\t\/\/ we don't record their signatures, so we don't see qualified idents\n\t\/\/ here): record the signature for f and possible children.\n\tfor {\n\t\tcheck.recordTypeAndValue(f, sig, nil)\n\t\tswitch p := f.(type) {\n\t\tcase *ast.Ident:\n\t\t\treturn \/\/ we're done\n\t\tcase *ast.ParenExpr:\n\t\t\tf = p.X\n\t\tdefault:\n\t\t\tunreachable()\n\t\t}\n\t}\n}\n\nfunc (check *checker) recordCommaOkTypes(x ast.Expr, a [2]Type) {\n\tassert(x != nil)\n\tif a[0] == nil || a[1] == nil {\n\t\treturn\n\t}\n\tassert(isTyped(a[0]) && isTyped(a[1]) && isBoolean(a[1]))\n\tif m := check.Types; m != nil {\n\t\tfor {\n\t\t\ttv := m[x]\n\t\t\tassert(tv.Type != nil) \/\/ should have been recorded already\n\t\t\tpos := x.Pos()\n\t\t\ttv.Type = NewTuple(\n\t\t\t\tNewVar(pos, check.pkg, \"\", a[0]),\n\t\t\t\tNewVar(pos, check.pkg, \"\", a[1]),\n\t\t\t)\n\t\t\tm[x] = tv\n\t\t\t\/\/ if x is a parenthesized expression (p.X), update p.X\n\t\t\tp, _ := x.(*ast.ParenExpr)\n\t\t\tif p == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tx = p.X\n\t\t}\n\t}\n}\n\nfunc (check *checker) recordDef(id *ast.Ident, obj Object) {\n\tassert(id != nil)\n\tif m := check.Defs; m != nil {\n\t\tm[id] = obj\n\t}\n}\n\nfunc (check *checker) recordUse(id *ast.Ident, obj Object) {\n\tassert(id != nil)\n\tassert(obj != nil)\n\tif m := check.Uses; m != nil {\n\t\tm[id] = obj\n\t}\n}\n\nfunc (check *checker) recordImplicit(node ast.Node, obj Object) {\n\tassert(node != nil && obj != nil)\n\tif m := check.Implicits; m != nil {\n\t\tm[node] = obj\n\t}\n}\n\nfunc (check *checker) recordSelection(x *ast.SelectorExpr, kind SelectionKind, recv Type, obj Object, index []int, indirect bool) {\n\tassert(obj != nil && (recv == nil || len(index) > 0))\n\tcheck.recordUse(x.Sel, obj)\n\t\/\/ TODO(gri) Should we also call recordTypeAndValue?\n\tif m := check.Selections; m != nil {\n\t\tm[x] = &Selection{kind, recv, obj, index, indirect}\n\t}\n}\n\nfunc (check *checker) recordScope(node ast.Node, scope *Scope) {\n\tassert(node != nil && scope != nil)\n\tif m := check.Scopes; m != nil {\n\t\tm[node] = scope\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"code.google.com\/p\/weed-fs\/go\/glog\"\n\t\"code.google.com\/p\/weed-fs\/go\/storage\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\nfunc init() {\n\tcmdExport.Run = runExport \/\/ break init cycle\n\tcmdExport.IsDebug = cmdExport.Flag.Bool(\"debug\", false, \"enable debug mode\")\n}\n\nconst (\n\tdefaultFnFormat = `{{.Mime}}\/{{.Id}}:{{.Name}}`\n)\n\nvar cmdExport = &Command{\n\tUsageLine: \"export -dir=\/tmp -volumeId=234 -o=\/dir\/name.tar -fileNameFormat={{.Name}}\",\n\tShort: \"list or export files from one volume data file\",\n\tLong: `List all files in a volume, or Export all files in a volume to a tar file if the output is specified.\n\t\n\tThe format of file name in the tar file can be customized. Default is {{.Mime}}\/{{.Id}}:{{.Name}}. Also available is {{.Key}}.\n\n `,\n}\n\nvar (\n\texportVolumePath = cmdExport.Flag.String(\"dir\", \"\/tmp\", \"input data directory to store volume data files\")\n\texportCollection = cmdExport.Flag.String(\"collection\", \"\", \"the volume collection name\")\n\texportVolumeId = cmdExport.Flag.Int(\"volumeId\", -1, \"a volume id. The volume should already exist in the dir. The volume index file should not exist.\")\n\tdest = cmdExport.Flag.String(\"o\", \"\", \"output tar file name, must ends with .tar, or just a \\\"-\\\" for stdout\")\n\tformat = cmdExport.Flag.String(\"fileNameFormat\", defaultFnFormat, \"filename format, default to {{.Mime}}\/{{.Id}}:{{.Name}}\")\n\ttarFh *tar.Writer\n\ttarHeader tar.Header\n\tfnTmpl *template.Template\n\tfnTmplBuf = bytes.NewBuffer(nil)\n)\n\nfunc runExport(cmd *Command, args []string) bool {\n\n\tif *exportVolumeId == -1 {\n\t\treturn false\n\t}\n\n\tvar err error\n\tif *dest != \"\" {\n\t\tif *dest != \"-\" && !strings.HasSuffix(*dest, \".tar\") {\n\t\t\tfmt.Println(\"the output file\", *dest, \"should be '-' or end with .tar\")\n\t\t\treturn false\n\t\t}\n\n\t\tif fnTmpl, err = template.New(\"name\").Parse(*format); err != nil {\n\t\t\tfmt.Println(\"cannot parse format \" + *format + \": \" + err.Error())\n\t\t\treturn false\n\t\t}\n\n\t\tvar fh *os.File\n\t\tif *dest == \"-\" {\n\t\t\tfh = os.Stdout\n\t\t} else {\n\t\t\tif fh, err = os.Create(*dest); err != nil {\n\t\t\t\tglog.Fatalf(\"cannot open output tar %s: %s\", *dest, err)\n\t\t\t}\n\t\t}\n\t\tdefer fh.Close()\n\t\ttarFh = tar.NewWriter(fh)\n\t\tdefer tarFh.Close()\n\t\tt := time.Now()\n\t\ttarHeader = tar.Header{Mode: 0644,\n\t\t\tModTime: t, Uid: os.Getuid(), Gid: os.Getgid(),\n\t\t\tTypeflag: tar.TypeReg,\n\t\t\tAccessTime: t, ChangeTime: t}\n\t}\n\n\tfileName := strconv.Itoa(*exportVolumeId)\n\tvid := storage.VolumeId(*exportVolumeId)\n\tindexFile, err := os.OpenFile(path.Join(*exportVolumePath, fileName+\".idx\"), os.O_RDONLY, 0644)\n\tif err != nil {\n\t\tglog.Fatalf(\"Create Volume Index [ERROR] %s\\n\", err)\n\t}\n\tdefer indexFile.Close()\n\n\tnm, err := storage.LoadNeedleMap(indexFile)\n\tif err != nil {\n\t\tglog.Fatalf(\"cannot load needle map from %s: %s\", indexFile, err)\n\t}\n\n\tvar version storage.Version\n\n\terr = storage.ScanVolumeFile(*exportVolumePath, *exportCollection, vid, func(superBlock storage.SuperBlock) error {\n\t\tversion = superBlock.Version\n\t\treturn nil\n\t}, func(n *storage.Needle, offset int64) error {\n\t\tdebug(\"key\", n.Id, \"offset\", offset, \"size\", n.Size, \"disk_size\", n.DiskSize(), \"gzip\", n.IsGzipped())\n\t\tnv, ok := nm.Get(n.Id)\n\t\tif ok && nv.Size > 0 {\n\t\t\treturn walker(vid, n, version)\n\t\t} else {\n\t\t\tif !ok {\n\t\t\t\tdebug(\"This seems deleted\", n.Id, \"size\", n.Size)\n\t\t\t} else {\n\t\t\t\tdebug(\"Id\", n.Id, \"size\", n.Size)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tglog.Fatalf(\"Export Volume File [ERROR] %s\\n\", err)\n\t}\n\treturn true\n}\n\ntype nameParams struct {\n\tName string\n\tId uint64\n\tMime string\n\tKey string\n}\n\nfunc walker(vid storage.VolumeId, n *storage.Needle, version storage.Version) (err error) {\n\tkey := storage.NewFileId(vid, n.Id, n.Cookie).String()\n\tif tarFh != nil {\n\t\tfnTmplBuf.Reset()\n\t\tif err = fnTmpl.Execute(fnTmplBuf,\n\t\t\tnameParams{Name: string(n.Name),\n\t\t\t\tId: n.Id,\n\t\t\t\tMime: string(n.Mime),\n\t\t\t\tKey: key,\n\t\t\t},\n\t\t); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnm := fnTmplBuf.String()\n\n\t\tif n.IsGzipped() && path.Ext(nm) != \".gz\" {\n\t\t\tnm = nm + \".gz\"\n\t\t}\n\n\t\ttarHeader.Name, tarHeader.Size = nm, int64(len(n.Data))\n\t\tif err = tarFh.WriteHeader(&tarHeader); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = tarFh.Write(n.Data)\n\t} else {\n\t\tsize := n.DataSize\n\t\tif version == storage.Version1 {\n\t\t\tsize = n.Size\n\t\t}\n\t\tfmt.Printf(\"key=%s Name=%s Size=%d gzip=%t mime=%s\\n\",\n\t\t\tkey,\n\t\t\tn.Name,\n\t\t\tsize,\n\t\t\tn.IsGzipped(),\n\t\t\tn.Mime,\n\t\t)\n\t}\n\treturn\n}\n<commit_msg>Issue 60:\tweed export -collection doesn't work<commit_after>package main\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"code.google.com\/p\/weed-fs\/go\/glog\"\n\t\"code.google.com\/p\/weed-fs\/go\/storage\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\nfunc init() {\n\tcmdExport.Run = runExport \/\/ break init cycle\n\tcmdExport.IsDebug = cmdExport.Flag.Bool(\"debug\", false, \"enable debug mode\")\n}\n\nconst (\n\tdefaultFnFormat = `{{.Mime}}\/{{.Id}}:{{.Name}}`\n)\n\nvar cmdExport = &Command{\n\tUsageLine: \"export -dir=\/tmp -volumeId=234 -o=\/dir\/name.tar -fileNameFormat={{.Name}}\",\n\tShort: \"list or export files from one volume data file\",\n\tLong: `List all files in a volume, or Export all files in a volume to a tar file if the output is specified.\n\t\n\tThe format of file name in the tar file can be customized. Default is {{.Mime}}\/{{.Id}}:{{.Name}}. Also available is {{.Key}}.\n\n `,\n}\n\nvar (\n\texportVolumePath = cmdExport.Flag.String(\"dir\", \"\/tmp\", \"input data directory to store volume data files\")\n\texportCollection = cmdExport.Flag.String(\"collection\", \"\", \"the volume collection name\")\n\texportVolumeId = cmdExport.Flag.Int(\"volumeId\", -1, \"a volume id. The volume should already exist in the dir. The volume index file should not exist.\")\n\tdest = cmdExport.Flag.String(\"o\", \"\", \"output tar file name, must ends with .tar, or just a \\\"-\\\" for stdout\")\n\tformat = cmdExport.Flag.String(\"fileNameFormat\", defaultFnFormat, \"filename format, default to {{.Mime}}\/{{.Id}}:{{.Name}}\")\n\ttarFh *tar.Writer\n\ttarHeader tar.Header\n\tfnTmpl *template.Template\n\tfnTmplBuf = bytes.NewBuffer(nil)\n)\n\nfunc runExport(cmd *Command, args []string) bool {\n\n\tif *exportVolumeId == -1 {\n\t\treturn false\n\t}\n\n\tvar err error\n\tif *dest != \"\" {\n\t\tif *dest != \"-\" && !strings.HasSuffix(*dest, \".tar\") {\n\t\t\tfmt.Println(\"the output file\", *dest, \"should be '-' or end with .tar\")\n\t\t\treturn false\n\t\t}\n\n\t\tif fnTmpl, err = template.New(\"name\").Parse(*format); err != nil {\n\t\t\tfmt.Println(\"cannot parse format \" + *format + \": \" + err.Error())\n\t\t\treturn false\n\t\t}\n\n\t\tvar fh *os.File\n\t\tif *dest == \"-\" {\n\t\t\tfh = os.Stdout\n\t\t} else {\n\t\t\tif fh, err = os.Create(*dest); err != nil {\n\t\t\t\tglog.Fatalf(\"cannot open output tar %s: %s\", *dest, err)\n\t\t\t}\n\t\t}\n\t\tdefer fh.Close()\n\t\ttarFh = tar.NewWriter(fh)\n\t\tdefer tarFh.Close()\n\t\tt := time.Now()\n\t\ttarHeader = tar.Header{Mode: 0644,\n\t\t\tModTime: t, Uid: os.Getuid(), Gid: os.Getgid(),\n\t\t\tTypeflag: tar.TypeReg,\n\t\t\tAccessTime: t, ChangeTime: t}\n\t}\n\n\tfileName := strconv.Itoa(*exportVolumeId)\n\tif *exportCollection!=\"\"{\n\t fileName = *exportCollection + \"_\" + fileName\n\t}\n\tvid := storage.VolumeId(*exportVolumeId)\n\tindexFile, err := os.OpenFile(path.Join(*exportVolumePath, fileName+\".idx\"), os.O_RDONLY, 0644)\n\tif err != nil {\n\t\tglog.Fatalf(\"Create Volume Index [ERROR] %s\\n\", err)\n\t}\n\tdefer indexFile.Close()\n\n\tnm, err := storage.LoadNeedleMap(indexFile)\n\tif err != nil {\n\t\tglog.Fatalf(\"cannot load needle map from %s: %s\", indexFile, err)\n\t}\n\n\tvar version storage.Version\n\n\terr = storage.ScanVolumeFile(*exportVolumePath, *exportCollection, vid, func(superBlock storage.SuperBlock) error {\n\t\tversion = superBlock.Version\n\t\treturn nil\n\t}, func(n *storage.Needle, offset int64) error {\n\t\tdebug(\"key\", n.Id, \"offset\", offset, \"size\", n.Size, \"disk_size\", n.DiskSize(), \"gzip\", n.IsGzipped())\n\t\tnv, ok := nm.Get(n.Id)\n\t\tif ok && nv.Size > 0 {\n\t\t\treturn walker(vid, n, version)\n\t\t} else {\n\t\t\tif !ok {\n\t\t\t\tdebug(\"This seems deleted\", n.Id, \"size\", n.Size)\n\t\t\t} else {\n\t\t\t\tdebug(\"Id\", n.Id, \"size\", n.Size)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tglog.Fatalf(\"Export Volume File [ERROR] %s\\n\", err)\n\t}\n\treturn true\n}\n\ntype nameParams struct {\n\tName string\n\tId uint64\n\tMime string\n\tKey string\n}\n\nfunc walker(vid storage.VolumeId, n *storage.Needle, version storage.Version) (err error) {\n\tkey := storage.NewFileId(vid, n.Id, n.Cookie).String()\n\tif tarFh != nil {\n\t\tfnTmplBuf.Reset()\n\t\tif err = fnTmpl.Execute(fnTmplBuf,\n\t\t\tnameParams{Name: string(n.Name),\n\t\t\t\tId: n.Id,\n\t\t\t\tMime: string(n.Mime),\n\t\t\t\tKey: key,\n\t\t\t},\n\t\t); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnm := fnTmplBuf.String()\n\n\t\tif n.IsGzipped() && path.Ext(nm) != \".gz\" {\n\t\t\tnm = nm + \".gz\"\n\t\t}\n\n\t\ttarHeader.Name, tarHeader.Size = nm, int64(len(n.Data))\n\t\tif err = tarFh.WriteHeader(&tarHeader); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = tarFh.Write(n.Data)\n\t} else {\n\t\tsize := n.DataSize\n\t\tif version == storage.Version1 {\n\t\t\tsize = n.Size\n\t\t}\n\t\tfmt.Printf(\"key=%s Name=%s Size=%d gzip=%t mime=%s\\n\",\n\t\t\tkey,\n\t\t\tn.Name,\n\t\t\tsize,\n\t\t\tn.IsGzipped(),\n\t\t\tn.Mime,\n\t\t)\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nGoat Latin is a made-up language based off of English, sort of like Pig Latin.\nThe rules of Goat Latin are as follows:\n\n1. If a word begins with a consonant (i.e. not a vowel), remove the first\nletter and append it to the end, then add 'ma'.\nFor example, the word 'goat' becomes 'oatgma'.\n\n2. If a word begins with a vowel, append 'ma' to the end of the word.\nFor example, the word 'I' becomes 'Ima'.\n\n3. Add one letter \"a\" to the end of each word per its word index in the\nsentence, starting with 1. That is, the first word gets \"a\" added to the\nend, the second word gets \"aa\" added to the end, the third word in the\nsentence gets \"aaa\" added to the end, and so on.\n\nWrite a function that, given a string of words making up one sentence, returns\nthat sentence in Goat Latin. For example:\n\n string_to_goat_latin('I speak Goat Latin')\n\nshould print: 'Imaa peaksmaaa oatGmaaaa atinLmaaaaa'\n*\/\npackage main\n\nimport (\n \"fmt\"\n \"regexp\"\n \"strings\"\n)\n\n\/\/ Global splitter\nvar splitter = regexp.MustCompile(\"[ \\t\\v]\")\n\nfunc main() {\n string_to_goat_latin(\"I speak Goat Latin\")\n}\n\nfunc string_to_goat_latin(s string) {\n words := splitter.Split(s, -1)\n trans := make([]string, 0, len(words))\n \/\/ Translate word by word\n for i, word := range words {\n trans = append(trans, translateWord(word, i+1))\n }\n \/\/ Print translation\n fmt.Println(strings.Join(trans, \" \"))\n}\n\n\/\/ Translate word\nfunc translateWord(word string, n int) string {\n runes := []rune(word)\n \/\/ Pre-allocate maximum needed buffer on the stack\n buf := make([]rune, len(runes)+1+2+n)\n \/\/ Split word into slice of runes\n copy(buf, runes)\n \/\/ Check if the first letter is a consonant\n if !isVowel(buf[0]) {\n buf = append(buf[1:], buf[0])\n }\n \/\/ Rule #2 + end of rule #1\n buf = append(buf, 'm', 'a')\n \/\/ Rule #3\n buf = append(buf, repeatRune('a', n)...)\n \/\/ Return translated word as a string\n return string(buf)\n}\n\n\/\/ Check if rune is an English vowel\nfunc isVowel(r rune) bool {\n switch r {\n \/\/ a e i o u A E I O U\n case 'a', 'A', 'e', 'E', 'i', 'I', 'o', 'O', 'u', 'U':\n return true\n default:\n return false\n }\n}\n\n\/\/ Generate slice of N runes\nfunc repeatRune(r rune, n int) []rune {\n res := make([]rune, n)\n for i := range res {\n res[i] = r\n }\n return res\n}\n<commit_msg>Updated comments<commit_after>\/*\nGoat Latin is a made-up language based off of English, sort of like Pig Latin.\nThe rules of Goat Latin are as follows:\n\n1. If a word begins with a consonant (i.e. not a vowel), remove the first\nletter and append it to the end, then add 'ma'.\nFor example, the word 'goat' becomes 'oatgma'.\n\n2. If a word begins with a vowel, append 'ma' to the end of the word.\nFor example, the word 'I' becomes 'Ima'.\n\n3. Add one letter \"a\" to the end of each word per its word index in the\nsentence, starting with 1. That is, the first word gets \"a\" added to the\nend, the second word gets \"aa\" added to the end, the third word in the\nsentence gets \"aaa\" added to the end, and so on.\n\nWrite a function that, given a string of words making up one sentence, returns\nthat sentence in Goat Latin. For example:\n\n string_to_goat_latin('I speak Goat Latin')\n\nshould print: 'Imaa peaksmaaa oatGmaaaa atinLmaaaaa'\n*\/\npackage main\n\nimport (\n \"fmt\"\n \"regexp\"\n \"strings\"\n)\n\n\/\/ Global splitter\nvar splitter = regexp.MustCompile(\"[ \\t\\v]\")\n\nfunc main() {\n string_to_goat_latin(\"I speak Goat Latin\")\n}\n\nfunc string_to_goat_latin(s string) {\n words := splitter.Split(s, -1)\n trans := make([]string, 0, len(words))\n \/\/ Translate word by word\n for i, word := range words {\n trans = append(trans, translateWord(word, i+1))\n }\n \/\/ Print translation\n fmt.Println(strings.Join(trans, \" \"))\n}\n\n\/\/ Translate word\nfunc translateWord(word string, n int) string {\n \/\/ Split word into slice of runes\n runes := []rune(word)\n \/\/ Pre-allocate maximum needed buffer on the stack\n buf := make([]rune, len(runes)+1+2+n)\n \/\/ Copy word's runes into our local buffer\n copy(buf, runes)\n \/\/ Check if the first letter is a consonant\n if !isVowel(buf[0]) {\n \/\/ Rule #1\n buf = append(buf[1:], buf[0])\n }\n \/\/ Rule #2 + end of rule #1\n buf = append(buf, 'm', 'a')\n \/\/ Rule #3\n buf = append(buf, repeatRune('a', n)...)\n \/\/ Return translated word as a string\n return string(buf)\n}\n\n\/\/ Check if rune is an English vowel\nfunc isVowel(r rune) bool {\n switch r {\n \/\/ a e i o u A E I O U\n case 'a', 'A', 'e', 'E', 'i', 'I', 'o', 'O', 'u', 'U':\n return true\n default:\n return false\n }\n}\n\n\/\/ Generate slice of N runes\nfunc repeatRune(r rune, n int) []rune {\n res := make([]rune, n)\n for i := range res {\n res[i] = r\n }\n return res\n}\n<|endoftext|>"} {"text":"<commit_before>package gocommend\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com\/garyburd\/redigo\/redis\"\n)\n\nfunc expect(t *testing.T, a interface{}, b interface{}) {\n\tif a != b {\n\t\tt.Errorf(\"Expected %v (type %v) - Got %v (type %v)\", b, reflect.TypeOf(b), a, reflect.TypeOf(a))\n\t}\n}\n\nfunc refute(t *testing.T, a interface{}, b interface{}) {\n\tif a == b {\n\t\tt.Errorf(\"Did not expect %v (type %v) - Got %v (type %v)\", b, reflect.TypeOf(b), a, reflect.TypeOf(a))\n\t}\n}\n\nfunc Test_redisTest(t *testing.T) {\n\tredisClient.Do(\"SET\", \"aaa\", 123)\n\ta, err := redis.Int(redisClient.Do(\"GET\", \"aaa\"))\n\texpect(t, err, nil)\n\texpect(t, a, 123)\n}\n\nfunc Test_importPoll(t *testing.T) {\n\tcollection := \"rec_test2\"\n\ti := Input{}\n\ti.Init(collection)\n\terr := i.ImportPoll(\"u1\", \"i1\")\n\texpect(t, err, nil)\n\ti.ImportPoll(\"u1\", \"i2\")\n\ti.ImportPoll(\"u1\", \"i3\")\n\ti.ImportPoll(\"u2\", \"i1\")\n\ti.ImportPoll(\"u2\", \"i2\")\n}\n\nfunc Test_updatePoll(t *testing.T) {\n\tcollection := \"rec_test2\"\n\ti := Input{}\n\ti.Init(collection)\n\terr := i.UpdatePoll(\"u1\", \"\")\n\texpect(t, err, nil)\n\ti.UpdatePoll(\"u2\", \"\")\n}\n\nfunc Test_updateAllPoll(t *testing.T) {\n\tcollection := \"rec_test2\"\n\ti := Input{}\n\ti.Init(collection)\n\terr := i.UpdateAllPoll()\n\texpect(t, err, nil)\n}\n\nfunc Test_RecommendItem(t *testing.T) {\n\tcollection := \"rec_test2\"\n\trecNum := 10\n\to := Output{}\n\to.Init(collection, recNum)\n\titems, err := o.RecommendItemForUser(\"u2\")\n\texpect(t, err, nil)\n\texpect(t, items[0], \"i3\")\n}\n<commit_msg>edit test<commit_after>package gocommend\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com\/garyburd\/redigo\/redis\"\n)\n\nfunc expect(t *testing.T, a interface{}, b interface{}) {\n\tif a != b {\n\t\tt.Errorf(\"Expected %v (type %v) - Got %v (type %v)\", b, reflect.TypeOf(b), a, reflect.TypeOf(a))\n\t}\n}\n\nfunc refute(t *testing.T, a interface{}, b interface{}) {\n\tif a == b {\n\t\tt.Errorf(\"Did not expect %v (type %v) - Got %v (type %v)\", b, reflect.TypeOf(b), a, reflect.TypeOf(a))\n\t}\n}\n\nfunc Test_redis(t *testing.T) {\n\tredisClient.Do(\"SET\", \"aaa\", 123)\n\ta, err := redis.Int(redisClient.Do(\"GET\", \"aaa\"))\n\texpect(t, err, nil)\n\texpect(t, a, 123)\n}\n\nfunc Test_importPoll(t *testing.T) {\n\tcollection := \"rec_test2\"\n\ti := Input{}\n\ti.Init(collection)\n\terr := i.ImportPoll(\"u1\", \"i1\")\n\texpect(t, err, nil)\n\ti.ImportPoll(\"u1\", \"i2\")\n\ti.ImportPoll(\"u1\", \"i3\")\n\ti.ImportPoll(\"u2\", \"i1\")\n\ti.ImportPoll(\"u2\", \"i2\")\n}\n\nfunc Test_updatePoll(t *testing.T) {\n\tcollection := \"rec_test2\"\n\ti := Input{}\n\ti.Init(collection)\n\terr := i.UpdatePoll(\"u1\", \"\")\n\texpect(t, err, nil)\n\ti.UpdatePoll(\"u2\", \"\")\n}\n\nfunc Test_updateAllPoll(t *testing.T) {\n\tcollection := \"rec_test2\"\n\ti := Input{}\n\ti.Init(collection)\n\terr := i.UpdateAllPoll()\n\texpect(t, err, nil)\n}\n\nfunc Test_RecommendItem(t *testing.T) {\n\tcollection := \"rec_test2\"\n\trecNum := 10\n\to := Output{}\n\to.Init(collection, recNum)\n\titems, err := o.RecommendItemForUser(\"u2\")\n\texpect(t, err, nil)\n\texpect(t, items[0], \"i3\")\n}\n<|endoftext|>"} {"text":"<commit_before>package godevicedetect\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"hash\/crc32\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/Shaked\/godevicedetect\/platform\"\n\t\"github.com\/Shaked\/user-agents\/packages\/go\/gouseragents\"\n\t\"github.com\/gorilla\/context\"\n)\n\nconst (\n\tUnknownOs = \"UnknownOs\"\n\tUnknownVersion = \"UnknownVersion\"\n)\n\ntype PreCompiledHandler struct{}\n\ntype Compiled struct {\n\thashFunc string\n\tuserAgents map[string]platform.Device\n}\n\n\/\/func() platform.Device\nfunc (p *PreCompiledHandler) compile() *Compiled {\n\tv := struct {\n\t\tMeta map[string]string `json:meta`\n\t\tUserAgents map[string]map[string]interface{} `json:userAgents`\n\t}{}\n\n\tlog.Println(gouseragents.CompiledUserAgents)\n\terr := json.Unmarshal([]byte(gouseragents.CompiledUserAgents), &v)\n\tif nil != err {\n\t\tpanic(err)\n\t}\n\n\tc := &Compiled{}\n\n\thash := v.Meta[\"hash\"]\n\tc.hashFunc = hash\n\n\tuserAgents := map[string]platform.Device{}\n\tvar device platform.Device\n\tfor key, deviceInfo := range v.UserAgents {\n\t\tswitch deviceInfo[\"type\"].(string) {\n\t\tcase \"desktop\":\n\t\t\tdevice = platform.NewDesktop(deviceInfo[\"name\"].(string))\n\t\t\tbreak\n\t\tcase \"tablet\":\n\t\t\tdevice = platform.NewTablet(deviceInfo[\"name\"].(string))\n\t\t\tbreak\n\t\tcase \"app\":\n\t\tcase \"mobile\":\n\t\t\tdevice = platform.NewMobile(deviceInfo[\"name\"].(string))\n\t\tcase \"bot\":\n\t\t\tdevice = platform.NewBot(deviceInfo[\"name\"].(string))\n\t\t\tbreak\n\t\tdefault:\n\t\t\tpanic(\"WTF :\" + deviceInfo[\"type\"].(string))\n\t\t}\n\t\tuserAgents[key] = device\n\t}\n\tc.userAgents = userAgents\n\n\treturn c\n}\n\ntype DeviceDetect struct {\n\tr *http.Request\n\tmeta *Compiled\n}\n\nfunc NewDeviceDetect(r *http.Request, p *PreCompiledHandler) *DeviceDetect {\n\tmeta := p.compile()\n\treturn &DeviceDetect{r: r, meta: meta}\n}\n\nfunc (d *DeviceDetect) FindByUserAgent(userAgent string) platform.Device {\n\tkey := UserAgentToKey(userAgent)\n\tf, ok := d.meta.userAgents[fmt.Sprint(key)]\n\tif ok {\n\t\treturn f\n\t}\n\treturn platform.NewUnknown(userAgent)\n}\n\nfunc (d *DeviceDetect) PlatformType() platform.Device {\n\treturn d.FindByUserAgent(d.r.Header.Get(\"User-Agent\"))\n}\n\n\/\/ Vars returns the route variables for the current request, if any.\nfunc Platform(r *http.Request) platform.Device {\n\trv := context.Get(r, \"Platform\")\n\tdevice := rv.(platform.Device)\n\treturn device\n}\n\ntype PlatformHandler interface {\n\tMobile(w http.ResponseWriter, r *http.Request, d *platform.DeviceMobile)\n\tTablet(w http.ResponseWriter, r *http.Request, d *platform.DeviceTablet)\n\tDesktop(w http.ResponseWriter, r *http.Request, d *platform.DeviceDesktop)\n\tTv(w http.ResponseWriter, r *http.Request, d *platform.DeviceTv)\n\tWatch(w http.ResponseWriter, r *http.Request, d *platform.DeviceWatch)\n\tBot(w http.ResponseWriter, r *http.Request, d *platform.DeviceBot)\n\tUnknown(w http.ResponseWriter, r *http.Request, d *platform.DeviceUnknown)\n}\n\nfunc Handler(h PlatformHandler, p *PreCompiledHandler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tm := NewDeviceDetect(r, p)\n\t\td := m.FindByUserAgent(r.Header.Get(\"User-Agent\"))\n\t\tswitch m.PlatformType().(type) {\n\t\tcase *platform.DeviceTablet:\n\t\t\th.Tablet(w, r, d.(*platform.DeviceTablet))\n\t\t\tbreak\n\t\tcase *platform.DeviceMobile:\n\t\t\th.Mobile(w, r, d.(*platform.DeviceMobile))\n\t\t\tbreak\n\t\tcase *platform.DeviceTv:\n\t\t\th.Tv(w, r, d.(*platform.DeviceTv))\n\t\t\tbreak\n\t\tcase *platform.DeviceWatch:\n\t\t\th.Watch(w, r, d.(*platform.DeviceWatch))\n\t\t\tbreak\n\t\tcase *platform.DeviceDesktop:\n\t\t\th.Desktop(w, r, d.(*platform.DeviceDesktop))\n\t\t\tbreak\n\t\tcase *platform.DeviceBot:\n\t\t\th.Bot(w, r, d.(*platform.DeviceBot))\n\t\t\tbreak\n\t\t}\n\n\t})\n}\n\nfunc HandlerMux(s *http.ServeMux, p *PreCompiledHandler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tm := NewDeviceDetect(r, p)\n\t\tcontext.Set(r, \"Platform\", m.PlatformType())\n\t\ts.ServeHTTP(w, r)\n\t})\n}\n\nfunc UserAgentToKey(userAgent string) uint32 {\n\treturn crc32.ChecksumIEEE([]byte(userAgent))\n}\n<commit_msg>use lowercased agent<commit_after>package godevicedetect\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"hash\/crc32\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/Shaked\/godevicedetect\/platform\"\n\t\"github.com\/Shaked\/user-agents\/packages\/go\/gouseragents\"\n\t\"github.com\/gorilla\/context\"\n)\n\nconst (\n\tUnknownOs = \"UnknownOs\"\n\tUnknownVersion = \"UnknownVersion\"\n)\n\ntype PreCompiledHandler struct{}\n\ntype Compiled struct {\n\thashFunc string\n\tuserAgents map[string]platform.Device\n}\n\n\/\/func() platform.Device\nfunc (p *PreCompiledHandler) compile() *Compiled {\n\tv := struct {\n\t\tMeta map[string]string `json:meta`\n\t\tUserAgents map[string]map[string]interface{} `json:userAgents`\n\t}{}\n\n\tlog.Println(gouseragents.CompiledUserAgents)\n\terr := json.Unmarshal([]byte(gouseragents.CompiledUserAgents), &v)\n\tif nil != err {\n\t\tpanic(err)\n\t}\n\n\tc := &Compiled{}\n\n\thash := v.Meta[\"hash\"]\n\tc.hashFunc = hash\n\n\tuserAgents := map[string]platform.Device{}\n\tvar device platform.Device\n\tfor key, deviceInfo := range v.UserAgents {\n\t\tswitch deviceInfo[\"type\"].(string) {\n\t\tcase \"desktop\":\n\t\t\tdevice = platform.NewDesktop(deviceInfo[\"name\"].(string))\n\t\t\tbreak\n\t\tcase \"tablet\":\n\t\t\tdevice = platform.NewTablet(deviceInfo[\"name\"].(string))\n\t\t\tbreak\n\t\tcase \"app\":\n\t\tcase \"mobile\":\n\t\t\tdevice = platform.NewMobile(deviceInfo[\"name\"].(string))\n\t\tcase \"bot\":\n\t\t\tdevice = platform.NewBot(deviceInfo[\"name\"].(string))\n\t\t\tbreak\n\t\tdefault:\n\t\t\tpanic(\"WTF :\" + deviceInfo[\"type\"].(string))\n\t\t}\n\t\tuserAgents[key] = device\n\t}\n\tc.userAgents = userAgents\n\n\treturn c\n}\n\ntype DeviceDetect struct {\n\tr *http.Request\n\tmeta *Compiled\n}\n\nfunc NewDeviceDetect(r *http.Request, p *PreCompiledHandler) *DeviceDetect {\n\tmeta := p.compile()\n\treturn &DeviceDetect{r: r, meta: meta}\n}\n\nfunc (d *DeviceDetect) FindByUserAgent(userAgent string) platform.Device {\n\tkey := UserAgentToKey(userAgent)\n\tf, ok := d.meta.userAgents[fmt.Sprint(key)]\n\tif ok {\n\t\treturn f\n\t}\n\treturn platform.NewUnknown(userAgent)\n}\n\nfunc (d *DeviceDetect) PlatformType() platform.Device {\n\treturn d.FindByUserAgent(d.r.Header.Get(\"User-Agent\"))\n}\n\n\/\/ Vars returns the route variables for the current request, if any.\nfunc Platform(r *http.Request) platform.Device {\n\trv := context.Get(r, \"Platform\")\n\tdevice := rv.(platform.Device)\n\treturn device\n}\n\ntype PlatformHandler interface {\n\tMobile(w http.ResponseWriter, r *http.Request, d *platform.DeviceMobile)\n\tTablet(w http.ResponseWriter, r *http.Request, d *platform.DeviceTablet)\n\tDesktop(w http.ResponseWriter, r *http.Request, d *platform.DeviceDesktop)\n\tTv(w http.ResponseWriter, r *http.Request, d *platform.DeviceTv)\n\tWatch(w http.ResponseWriter, r *http.Request, d *platform.DeviceWatch)\n\tBot(w http.ResponseWriter, r *http.Request, d *platform.DeviceBot)\n\tUnknown(w http.ResponseWriter, r *http.Request, d *platform.DeviceUnknown)\n}\n\nfunc Handler(h PlatformHandler, p *PreCompiledHandler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tm := NewDeviceDetect(r, p)\n\t\td := m.FindByUserAgent(strings.ToLower(r.Header.Get(\"User-Agent\")))\n\t\tswitch m.PlatformType().(type) {\n\t\tcase *platform.DeviceTablet:\n\t\t\th.Tablet(w, r, d.(*platform.DeviceTablet))\n\t\t\tbreak\n\t\tcase *platform.DeviceMobile:\n\t\t\th.Mobile(w, r, d.(*platform.DeviceMobile))\n\t\t\tbreak\n\t\tcase *platform.DeviceTv:\n\t\t\th.Tv(w, r, d.(*platform.DeviceTv))\n\t\t\tbreak\n\t\tcase *platform.DeviceWatch:\n\t\t\th.Watch(w, r, d.(*platform.DeviceWatch))\n\t\t\tbreak\n\t\tcase *platform.DeviceDesktop:\n\t\t\th.Desktop(w, r, d.(*platform.DeviceDesktop))\n\t\t\tbreak\n\t\tcase *platform.DeviceBot:\n\t\t\th.Bot(w, r, d.(*platform.DeviceBot))\n\t\t\tbreak\n\t\t}\n\n\t})\n}\n\nfunc HandlerMux(s *http.ServeMux, p *PreCompiledHandler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tm := NewDeviceDetect(r, p)\n\t\tcontext.Set(r, \"Platform\", m.PlatformType())\n\t\ts.ServeHTTP(w, r)\n\t})\n}\n\nfunc UserAgentToKey(userAgent string) uint32 {\n\treturn crc32.ChecksumIEEE([]byte(userAgent))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The StudyGolang Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/ http:\/\/studygolang.com\n\/\/ Author:polaris\tstudygolang@gmail.com\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"logic\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/polaris1119\/config\"\n\t\"github.com\/polaris1119\/goutils\"\n\t\"github.com\/polaris1119\/logger\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/robfig\/cron\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar websites = make(map[string]map[string]string)\n\nconst pattern = \"(?i)go|golang|goroutine|channel\"\n\nfunc autocrawl(needAll bool, crawlConfFile string, whichSite string) {\n\tcontent, err := ioutil.ReadFile(crawlConfFile)\n\tif err != nil {\n\t\tlog.Fatalln(\"parse crawl config read file error:\", err)\n\t}\n\n\terr = json.Unmarshal(content, &websites)\n\tif err != nil {\n\t\tlog.Fatalln(\"parse crawl config json parse error:\", err)\n\t}\n\n\tif needAll {\n\t\t\/\/ 全量\n\t\tfor website, wbconf := range websites {\n\t\t\tif whichSite != \"\" && whichSite != website {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlogger.Infoln(\"all crawl\", website)\n\t\t\tgo doCrawl(wbconf, true)\n\t\t}\n\t}\n\n\t\/\/ 定时增量\n\tc := cron.New()\n\tc.AddFunc(config.ConfigFile.MustValue(\"crawl\", \"spec\", \"0 0 *\/1 * * ?\"), func() {\n\t\t\/\/ 抓取 reddit\n\t\tgo logic.DefaultReddit.Parse(\"\")\n\n\t\t\/\/ 抓取 www.oschina.net\/project\n\t\tgo logic.DefaultProject.ParseProjectList(\"http:\/\/www.oschina.net\/project\/lang\/358\/go?tag=0&os=0&sort=time\")\n\n\t\tfor website, wbconf := range websites {\n\t\t\tif whichSite != \"\" && whichSite != website {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlogger.Infoln(\"do crawl\", website)\n\t\t\tgo doCrawl(wbconf, false)\n\t\t}\n\t})\n\tc.Start()\n}\n\nfunc doCrawl(wbconf map[string]string, isAll bool) {\n\tcrawlUrl := wbconf[\"incr_url\"]\n\tif isAll {\n\t\tcrawlUrl = wbconf[\"all_url\"]\n\t}\n\n\tlistselector := wbconf[\"listselector\"]\n\tresultselector := wbconf[\"resultselector\"]\n\tpageField := wbconf[\"page_field\"]\n\n\tmaxPage := 1\n\tif isAll {\n\t\tmaxPage = goutils.MustInt(wbconf[\"max_page\"])\n\t}\n\n\t\/\/ 个人博客,一般通过 tag 方式获取,这种处理方式和搜索不一样\n\tif wbconf[\"keywords\"] == \"\" {\n\t\tfor p := maxPage; p >= 1; p-- {\n\t\t\tif pageField == \"\" {\n\n\t\t\t\t\/\/ 标题不包含 go 等关键词的,也入库\n\t\t\t\tif err := parseArticleList(crawlUrl+strconv.Itoa(p), listselector, resultselector, false); err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn\n\t}\n\n\tkeywords := strings.Split(wbconf[\"keywords\"], \",\")\n\n\tfor _, keyword := range keywords {\n\t\tfor p := 1; p <= maxPage; p++ {\n\n\t\t\tcurUrl := \"\"\n\n\t\t\tpage := fmt.Sprintf(\"&%s=%d\", pageField, p)\n\t\t\tif strings.Contains(crawlUrl, \"%s\") {\n\t\t\t\tcurUrl = fmt.Sprintf(crawlUrl, keyword) + page\n\t\t\t} else {\n\t\t\t\tcurUrl = crawlUrl + keyword + page\n\t\t\t}\n\n\t\t\tif err := parseArticleList(curUrl, listselector, resultselector, true); err != nil {\n\t\t\t\tlogger.Errorln(\"parse article url error:\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc parseArticleList(url, listselector, resultselector string, isAuto bool) (err error) {\n\n\tlogger.Infoln(\"parse url:\", url)\n\n\tvar doc *goquery.Document\n\n\tif doc, err = goquery.NewDocument(url); err != nil {\n\t\treturn\n\t}\n\n\tdoc.Find(listselector).Each(func(i int, contentSelection *goquery.Selection) {\n\n\t\taSelection := contentSelection.Find(resultselector)\n\n\t\tif isAuto {\n\t\t\ttitle := aSelection.Text()\n\n\t\t\tmatched, err := regexp.MatchString(pattern, title)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorln(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !matched {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tarticleUrl, ok := aSelection.Attr(\"href\")\n\t\tif ok {\n\t\t\tlogic.DefaultArticle.ParseArticle(context.Background(), articleUrl, isAuto)\n\t\t}\n\t})\n\n\treturn\n}\n<commit_msg>oschina处理<commit_after>\/\/ Copyright 2014 The StudyGolang Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/ http:\/\/studygolang.com\n\/\/ Author:polaris\tstudygolang@gmail.com\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"logic\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/polaris1119\/config\"\n\t\"github.com\/polaris1119\/goutils\"\n\t\"github.com\/polaris1119\/logger\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/robfig\/cron\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar websites = make(map[string]map[string]string)\n\nconst pattern = \"(?i)go|golang|goroutine|channel\"\n\nfunc autocrawl(needAll bool, crawlConfFile string, whichSite string) {\n\tcontent, err := ioutil.ReadFile(crawlConfFile)\n\tif err != nil {\n\t\tlog.Fatalln(\"parse crawl config read file error:\", err)\n\t}\n\n\terr = json.Unmarshal(content, &websites)\n\tif err != nil {\n\t\tlog.Fatalln(\"parse crawl config json parse error:\", err)\n\t}\n\n\tif needAll {\n\t\t\/\/ 全量\n\t\tfor website, wbconf := range websites {\n\t\t\tif whichSite != \"\" && whichSite != website {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlogger.Infoln(\"all crawl\", website)\n\t\t\tgo doCrawl(wbconf, true)\n\t\t}\n\t}\n\n\t\/\/ 定时增量\n\tc := cron.New()\n\tc.AddFunc(config.ConfigFile.MustValue(\"crawl\", \"spec\", \"0 0 *\/1 * * ?\"), func() {\n\t\t\/\/ 抓取 reddit\n\t\tgo logic.DefaultReddit.Parse(\"\")\n\n\t\t\/\/ 抓取 www.oschina.net\/project\n\t\tgo logic.DefaultProject.ParseProjectList(\"http:\/\/www.oschina.net\/project\/lang\/358\/go?tag=0&os=0&sort=time\")\n\n\t\tfor website, wbconf := range websites {\n\t\t\tif whichSite != \"\" && whichSite != website {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlogger.Infoln(\"do crawl\", website)\n\t\t\tgo doCrawl(wbconf, false)\n\t\t}\n\t})\n\tc.Start()\n}\n\nfunc doCrawl(wbconf map[string]string, isAll bool) {\n\tcrawlUrl := wbconf[\"incr_url\"]\n\tif isAll {\n\t\tcrawlUrl = wbconf[\"all_url\"]\n\t}\n\n\tlistselector := wbconf[\"listselector\"]\n\tresultselector := wbconf[\"resultselector\"]\n\tpageField := wbconf[\"page_field\"]\n\n\tmaxPage := 1\n\tif isAll {\n\t\tmaxPage = goutils.MustInt(wbconf[\"max_page\"])\n\t}\n\n\t\/\/ 个人博客,一般通过 tag 方式获取,这种处理方式和搜索不一样\n\tif wbconf[\"keywords\"] == \"\" {\n\t\tfor p := maxPage; p >= 1; p-- {\n\t\t\tif pageField == \"\" {\n\n\t\t\t\t\/\/ 标题不包含 go 等关键词的,也入库\n\t\t\t\tif err := parseArticleList(crawlUrl+strconv.Itoa(p), listselector, resultselector, false); err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn\n\t}\n\n\tkeywords := strings.Split(wbconf[\"keywords\"], \",\")\n\n\tfor _, keyword := range keywords {\n\t\tfor p := 1; p <= maxPage; p++ {\n\n\t\t\tcurUrl := \"\"\n\n\t\t\tpage := fmt.Sprintf(\"&%s=%d\", pageField, p)\n\t\t\tif strings.Contains(crawlUrl, \"%s\") {\n\t\t\t\tcurUrl = fmt.Sprintf(crawlUrl, keyword) + page\n\t\t\t} else {\n\t\t\t\tcurUrl = crawlUrl + keyword + page\n\t\t\t}\n\n\t\t\tif err := parseArticleList(curUrl, listselector, resultselector, true); err != nil {\n\t\t\t\tlogger.Errorln(\"parse article url error:\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc parseArticleList(url, listselector, resultselector string, isAuto bool) (err error) {\n\n\tlogger.Infoln(\"parse url:\", url)\n\n\tvar doc *goquery.Document\n\n\tif strings.Contains(url, \"oschina.net\") {\n\t\treq, err := http.NewRequest(\"GET\", url, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq.Header.Add(\"Referer\", \"http:\/\/www.oschina.net\/search?q=go&scope=blog&onlytitle=1&sort_by_time=1\")\n\t\tresp, err := http.DefaultClient.Do(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdoc, err = goquery.NewDocumentFromResponse(resp)\n\t} else {\n\t\tdoc, err = goquery.NewDocument(url)\n\t}\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdoc.Find(listselector).Each(func(i int, contentSelection *goquery.Selection) {\n\n\t\taSelection := contentSelection.Find(resultselector)\n\n\t\tif isAuto {\n\t\t\ttitle := aSelection.Text()\n\n\t\t\tmatched, err := regexp.MatchString(pattern, title)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorln(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !matched {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tarticleUrl, ok := aSelection.Attr(\"href\")\n\t\tif ok {\n\t\t\tlogic.DefaultArticle.ParseArticle(context.Background(), articleUrl, isAuto)\n\t\t}\n\t})\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package message\n\nimport (\n\t\"fmt\"\n)\n\nconst (\n\t\/\/ Reset resets the color\n\tReset = \"\\033[0m\"\n\n\t\/\/ Bold makes the following text bold\n\tBold = \"\\033[1m\"\n\n\t\/\/ Dim dims the following text\n\tDim = \"\\033[2m\"\n\n\t\/\/ Italic makes the following text italic\n\tItalic = \"\\033[3m\"\n\n\t\/\/ Underline underlines the following text\n\tUnderline = \"\\033[4m\"\n\n\t\/\/ Blink blinks the following text\n\tBlink = \"\\033[5m\"\n\n\t\/\/ Invert inverts the following text\n\tInvert = \"\\033[7m\"\n\n\t\/\/ Newline\n\tNewline = \"\\r\\n\"\n\n\t\/\/ BEL\n\tBel = \"\\007\"\n)\n\n\/\/ Interface for Styles\ntype Style interface {\n\tString() string\n\tFormat(string) string\n}\n\n\/\/ General hardcoded style, mostly used as a crutch until we flesh out the\n\/\/ framework to support backgrounds etc.\ntype style string\n\nfunc (c style) String() string {\n\treturn string(c)\n}\n\nfunc (c style) Format(s string) string {\n\treturn c.String() + s + Reset\n}\n\n\/\/ 256 color type, for terminals who support it\ntype Color256 uint8\n\n\/\/ String version of this color\nfunc (c Color256) String() string {\n\treturn fmt.Sprintf(\"38;05;%d\", c)\n}\n\n\/\/ Return formatted string with this color\nfunc (c Color256) Format(s string) string {\n\treturn \"\\033[\" + c.String() + \"m\" + s + Reset\n}\n\nfunc Color256Palette(colors ...uint8) *Palette {\n\tsize := len(colors)\n\tp := make([]Style, 0, size)\n\tfor _, color := range colors {\n\t\tp = append(p, Color256(color))\n\t}\n\treturn &Palette{\n\t\tcolors: p,\n\t\tsize: size,\n\t}\n}\n\n\/\/ No color, used for mono theme\ntype Color0 struct{}\n\n\/\/ No-op for Color0\nfunc (c Color0) String() string {\n\treturn \"\"\n}\n\n\/\/ No-op for Color0\nfunc (c Color0) Format(s string) string {\n\treturn s\n}\n\n\/\/ Container for a collection of colors\ntype Palette struct {\n\tcolors []Style\n\tsize int\n}\n\n\/\/ Get a color by index, overflows are looped around.\nfunc (p Palette) Get(i int) Style {\n\tif p.size == 1 {\n\t\treturn p.colors[0]\n\t}\n\treturn p.colors[i%(p.size-1)]\n}\n\nfunc (p Palette) Len() int {\n\treturn p.size\n}\n\nfunc (p Palette) String() string {\n\tr := \"\"\n\tfor _, c := range p.colors {\n\t\tr += c.Format(\"X\")\n\t}\n\treturn r\n}\n\n\/\/ Collection of settings for chat\ntype Theme struct {\n\tid string\n\tsys Style\n\tpm Style\n\thighlight Style\n\tnames *Palette\n}\n\nfunc (theme Theme) ID() string {\n\treturn theme.id\n}\n\n\/\/ Colorize name string given some index\nfunc (theme Theme) ColorName(u *User) string {\n\tif theme.names == nil {\n\t\treturn u.Name()\n\t}\n\n\treturn theme.names.Get(u.colorIdx).Format(u.Name())\n}\n\n\/\/ Colorize the PM string\nfunc (theme Theme) ColorPM(s string) string {\n\tif theme.pm == nil {\n\t\treturn s\n\t}\n\n\treturn theme.pm.Format(s)\n}\n\n\/\/ Colorize the Sys message\nfunc (theme Theme) ColorSys(s string) string {\n\tif theme.sys == nil {\n\t\treturn s\n\t}\n\n\treturn theme.sys.Format(s)\n}\n\n\/\/ Highlight a matched string, usually name\nfunc (theme Theme) Highlight(s string) string {\n\tif theme.highlight == nil {\n\t\treturn s\n\t}\n\treturn theme.highlight.Format(s)\n}\n\n\/\/ Timestamp colorizes the timestamp.\nfunc (theme Theme) Timestamp(s string) string {\n\treturn theme.sys.Format(s)\n}\n\n\/\/ List of initialzied themes\nvar Themes []Theme\n\n\/\/ Default theme to use\nvar DefaultTheme *Theme\n\n\/\/ MonoTheme is a simple theme without colors, useful for testing and bots.\nvar MonoTheme *Theme\n\nfunc allColors256() *Palette {\n\tcolors := []uint8{}\n\tvar i uint8\n\tfor i = 0; i < 255; i++ {\n\t\tcolors = append(colors, i)\n\t}\n\treturn Color256Palette(colors...)\n}\n\nfunc readableColors256() *Palette {\n\tcolors := []uint8{}\n\tvar i uint8\n\tfor i = 0; i < 255; i++ {\n\t\tif i == 0 || i == 7 || i == 8 || i == 15 || i == 16 || i == 17 || i > 230 {\n\t\t\t\/\/ Skip 31 Shades of Grey, and one hyperintelligent shade of blue.\n\t\t\tcontinue\n\t\t}\n\t\tcolors = append(colors, i)\n\t}\n\treturn Color256Palette(colors...)\n}\n\nfunc init() {\n\tThemes = []Theme{\n\t\t{\n\t\t\tid: \"colors\",\n\t\t\tnames: readableColors256(),\n\t\t\tsys: Color256(245), \/\/ Grey\n\t\t\tpm: Color256(7), \/\/ White\n\t\t\thighlight: style(Bold + \"\\033[48;5;11m\\033[38;5;16m\"), \/\/ Yellow highlight\n\t\t},\n\t\t{\n\t\t\tid: \"solarized\",\n\t\t\tnames: Color256Palette(1, 2, 3, 4, 5, 6, 7, 9, 13),\n\t\t\tsys: Color256(11), \/\/ Yellow\n\t\t\tpm: Color256(15), \/\/ White\n\t\t\thighlight: style(Bold + \"\\033[48;5;3m\\033[38;5;94m\"), \/\/ Orange highlight\n\t\t},\n\t\t{\n\t\t\tid: \"hacker\",\n\t\t\tnames: Color256Palette(82), \/\/ Green\n\t\t\tsys: Color256(22), \/\/ Another green\n\t\t\tpm: Color256(28), \/\/ More green, slightly lighter\n\t\t\thighlight: style(Bold + \"\\033[48;5;22m\\033[38;5;46m\"), \/\/ Green on dark green\n\t\t},\n\t\t{\n\t\t\tid: \"mono\",\n\t\t},\n\t}\n\n\tDefaultTheme = &Themes[0]\n\tMonoTheme = &Themes[3]\n\n\t\/* Some debug helpers for your convenience:\n\n\t\/\/ Debug for palettes\n\tprintPalette(allColors256())\n\n\t\/\/ Debug for themes\n\tfor _, t := range Themes {\n\t\tprintTheme(t)\n\t}\n\n\t*\/\n}\n\nfunc printTheme(t Theme) {\n\tfmt.Println(\"Printing theme:\", t.ID())\n\tif t.names != nil {\n\t\tfor i, color := range t.names.colors {\n\t\t\tfmt.Printf(\"%s \", color.Format(fmt.Sprintf(\"name%d\", i)))\n\t\t}\n\t\tfmt.Println(\"\")\n\t}\n\tfmt.Println(t.ColorSys(\"SystemMsg\"))\n\tfmt.Println(t.ColorPM(\"PrivateMsg\"))\n\tfmt.Println(t.Highlight(\"Highlight\"))\n\tfmt.Println(\"\")\n}\n\nfunc printPalette(p *Palette) {\n\tfor i, color := range p.colors {\n\t\tfmt.Printf(\"%d\\t%s\\n\", i, color.Format(color.String()+\" \"))\n\t}\n}\n<commit_msg>chat\/message: Handle nil theme (mono)<commit_after>package message\n\nimport (\n\t\"fmt\"\n)\n\nconst (\n\t\/\/ Reset resets the color\n\tReset = \"\\033[0m\"\n\n\t\/\/ Bold makes the following text bold\n\tBold = \"\\033[1m\"\n\n\t\/\/ Dim dims the following text\n\tDim = \"\\033[2m\"\n\n\t\/\/ Italic makes the following text italic\n\tItalic = \"\\033[3m\"\n\n\t\/\/ Underline underlines the following text\n\tUnderline = \"\\033[4m\"\n\n\t\/\/ Blink blinks the following text\n\tBlink = \"\\033[5m\"\n\n\t\/\/ Invert inverts the following text\n\tInvert = \"\\033[7m\"\n\n\t\/\/ Newline\n\tNewline = \"\\r\\n\"\n\n\t\/\/ BEL\n\tBel = \"\\007\"\n)\n\n\/\/ Interface for Styles\ntype Style interface {\n\tString() string\n\tFormat(string) string\n}\n\n\/\/ General hardcoded style, mostly used as a crutch until we flesh out the\n\/\/ framework to support backgrounds etc.\ntype style string\n\nfunc (c style) String() string {\n\treturn string(c)\n}\n\nfunc (c style) Format(s string) string {\n\treturn c.String() + s + Reset\n}\n\n\/\/ 256 color type, for terminals who support it\ntype Color256 uint8\n\n\/\/ String version of this color\nfunc (c Color256) String() string {\n\treturn fmt.Sprintf(\"38;05;%d\", c)\n}\n\n\/\/ Return formatted string with this color\nfunc (c Color256) Format(s string) string {\n\treturn \"\\033[\" + c.String() + \"m\" + s + Reset\n}\n\nfunc Color256Palette(colors ...uint8) *Palette {\n\tsize := len(colors)\n\tp := make([]Style, 0, size)\n\tfor _, color := range colors {\n\t\tp = append(p, Color256(color))\n\t}\n\treturn &Palette{\n\t\tcolors: p,\n\t\tsize: size,\n\t}\n}\n\n\/\/ No color, used for mono theme\ntype Color0 struct{}\n\n\/\/ No-op for Color0\nfunc (c Color0) String() string {\n\treturn \"\"\n}\n\n\/\/ No-op for Color0\nfunc (c Color0) Format(s string) string {\n\treturn s\n}\n\n\/\/ Container for a collection of colors\ntype Palette struct {\n\tcolors []Style\n\tsize int\n}\n\n\/\/ Get a color by index, overflows are looped around.\nfunc (p Palette) Get(i int) Style {\n\tif p.size == 1 {\n\t\treturn p.colors[0]\n\t}\n\treturn p.colors[i%(p.size-1)]\n}\n\nfunc (p Palette) Len() int {\n\treturn p.size\n}\n\nfunc (p Palette) String() string {\n\tr := \"\"\n\tfor _, c := range p.colors {\n\t\tr += c.Format(\"X\")\n\t}\n\treturn r\n}\n\n\/\/ Collection of settings for chat\ntype Theme struct {\n\tid string\n\tsys Style\n\tpm Style\n\thighlight Style\n\tnames *Palette\n}\n\nfunc (theme Theme) ID() string {\n\treturn theme.id\n}\n\n\/\/ Colorize name string given some index\nfunc (theme Theme) ColorName(u *User) string {\n\tif theme.names == nil {\n\t\treturn u.Name()\n\t}\n\n\treturn theme.names.Get(u.colorIdx).Format(u.Name())\n}\n\n\/\/ Colorize the PM string\nfunc (theme Theme) ColorPM(s string) string {\n\tif theme.pm == nil {\n\t\treturn s\n\t}\n\n\treturn theme.pm.Format(s)\n}\n\n\/\/ Colorize the Sys message\nfunc (theme Theme) ColorSys(s string) string {\n\tif theme.sys == nil {\n\t\treturn s\n\t}\n\n\treturn theme.sys.Format(s)\n}\n\n\/\/ Highlight a matched string, usually name\nfunc (theme Theme) Highlight(s string) string {\n\tif theme.highlight == nil {\n\t\treturn s\n\t}\n\treturn theme.highlight.Format(s)\n}\n\n\/\/ Timestamp colorizes the timestamp.\nfunc (theme Theme) Timestamp(s string) string {\n\tif theme.sys == nil {\n\t\treturn s\n\t}\n\treturn theme.sys.Format(s)\n}\n\n\/\/ List of initialzied themes\nvar Themes []Theme\n\n\/\/ Default theme to use\nvar DefaultTheme *Theme\n\n\/\/ MonoTheme is a simple theme without colors, useful for testing and bots.\nvar MonoTheme *Theme\n\nfunc allColors256() *Palette {\n\tcolors := []uint8{}\n\tvar i uint8\n\tfor i = 0; i < 255; i++ {\n\t\tcolors = append(colors, i)\n\t}\n\treturn Color256Palette(colors...)\n}\n\nfunc readableColors256() *Palette {\n\tcolors := []uint8{}\n\tvar i uint8\n\tfor i = 0; i < 255; i++ {\n\t\tif i == 0 || i == 7 || i == 8 || i == 15 || i == 16 || i == 17 || i > 230 {\n\t\t\t\/\/ Skip 31 Shades of Grey, and one hyperintelligent shade of blue.\n\t\t\tcontinue\n\t\t}\n\t\tcolors = append(colors, i)\n\t}\n\treturn Color256Palette(colors...)\n}\n\nfunc init() {\n\tThemes = []Theme{\n\t\t{\n\t\t\tid: \"colors\",\n\t\t\tnames: readableColors256(),\n\t\t\tsys: Color256(245), \/\/ Grey\n\t\t\tpm: Color256(7), \/\/ White\n\t\t\thighlight: style(Bold + \"\\033[48;5;11m\\033[38;5;16m\"), \/\/ Yellow highlight\n\t\t},\n\t\t{\n\t\t\tid: \"solarized\",\n\t\t\tnames: Color256Palette(1, 2, 3, 4, 5, 6, 7, 9, 13),\n\t\t\tsys: Color256(11), \/\/ Yellow\n\t\t\tpm: Color256(15), \/\/ White\n\t\t\thighlight: style(Bold + \"\\033[48;5;3m\\033[38;5;94m\"), \/\/ Orange highlight\n\t\t},\n\t\t{\n\t\t\tid: \"hacker\",\n\t\t\tnames: Color256Palette(82), \/\/ Green\n\t\t\tsys: Color256(22), \/\/ Another green\n\t\t\tpm: Color256(28), \/\/ More green, slightly lighter\n\t\t\thighlight: style(Bold + \"\\033[48;5;22m\\033[38;5;46m\"), \/\/ Green on dark green\n\t\t},\n\t\t{\n\t\t\tid: \"mono\",\n\t\t},\n\t}\n\n\tDefaultTheme = &Themes[0]\n\tMonoTheme = &Themes[3]\n\n\t\/* Some debug helpers for your convenience:\n\n\t\/\/ Debug for palettes\n\tprintPalette(allColors256())\n\n\t\/\/ Debug for themes\n\tfor _, t := range Themes {\n\t\tprintTheme(t)\n\t}\n\n\t*\/\n}\n\nfunc printTheme(t Theme) {\n\tfmt.Println(\"Printing theme:\", t.ID())\n\tif t.names != nil {\n\t\tfor i, color := range t.names.colors {\n\t\t\tfmt.Printf(\"%s \", color.Format(fmt.Sprintf(\"name%d\", i)))\n\t\t}\n\t\tfmt.Println(\"\")\n\t}\n\tfmt.Println(t.ColorSys(\"SystemMsg\"))\n\tfmt.Println(t.ColorPM(\"PrivateMsg\"))\n\tfmt.Println(t.Highlight(\"Highlight\"))\n\tfmt.Println(\"\")\n}\n\nfunc printPalette(p *Palette) {\n\tfor i, color := range p.colors {\n\t\tfmt.Printf(\"%d\\t%s\\n\", i, color.Format(color.String()+\" \"))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.\n\/\/ vespa test command\n\/\/ Author: jonmv\n\npackage cmd\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/vespa-engine\/vespa\/client\/go\/util\"\n\t\"github.com\/vespa-engine\/vespa\/client\/go\/vespa\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(testCmd)\n}\n\n\/\/ TODO: add link to test doc at cloud.vespa.ai\nvar testCmd = &cobra.Command{\n\tUse: \"test [tests directory or test file]\",\n\tShort: \"Run a test suite, or a single test\",\n\tLong: `Run a test suite, or a single test\n\nRuns all JSON test files in the specified directory, or the single JSON\ntest file specified.\n\nIf no directory or file is specified, the working directory is used instead.`,\n\tExample: `$ vespa test src\/test\/application\/tests\/system-test\n$ vespa test src\/test\/application\/tests\/system-test\/feed-and-query.json`,\n\tArgs: cobra.MaximumNArgs(1),\n\tDisableAutoGenTag: true,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\ttarget := getTarget()\n\t\ttestPath := \".\"\n\t\tif len(args) > 0 {\n\t\t\ttestPath = args[0]\n\t\t}\n\t\tif count, failed := runTests(testPath, target); len(failed) != 0 {\n\t\t\tfmt.Fprintf(stdout, \"\\nFailed %d of %d tests:\\n\", len(failed), count)\n\t\t\tfor _, test := range failed {\n\t\t\t\tfmt.Fprintln(stdout, test)\n\t\t\t}\n\t\t\texitFunc(3)\n\t\t} else if count == 0 {\n\t\t\tfmt.Fprintf(stdout, \"Failed to find any tests at %v\\n\", testPath)\n\t\t\texitFunc(3)\n\t\t} else {\n\t\t\tplural := \"s\"\n\t\t\tif count == 1 {\n\t\t\t\tplural = \"\"\n\t\t\t}\n\t\t\tfmt.Fprintf(stdout, \"\\n%d test%s completed successfully\\n\", count, plural)\n\t\t}\n\t},\n}\n\nfunc runTests(rootPath string, target vespa.Target) (int, []string) {\n\tcount := 0\n\tfailed := make([]string, 0)\n\tif stat, err := os.Stat(rootPath); err != nil {\n\t\tfatalErr(err, \"Failed reading specified test path\")\n\t} else if stat.IsDir() {\n\t\ttests, err := ioutil.ReadDir(rootPath) \/\/ TODO: Use os.ReadDir when >= 1.16 is required.\n\t\tif err != nil {\n\t\t\tfatalErr(err, \"Failed reading specified test directory\")\n\t\t}\n\t\tpreviousFailed := false\n\t\tfor _, test := range tests {\n\t\t\tif !test.IsDir() && filepath.Ext(test.Name()) == \".json\" {\n\t\t\t\ttestPath := path.Join(rootPath, test.Name())\n\t\t\t\tif previousFailed {\n\t\t\t\t\tfmt.Fprintln(stdout, \"\")\n\t\t\t\t\tpreviousFailed = false\n\t\t\t\t}\n\t\t\t\tfailure := runTest(testPath, target)\n\t\t\t\tif failure != \"\" {\n\t\t\t\t\tfailed = append(failed, failure)\n\t\t\t\t\tpreviousFailed = true\n\t\t\t\t}\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\t} else if strings.HasSuffix(stat.Name(), \".json\") {\n\t\tfailure := runTest(rootPath, target)\n\t\tif failure != \"\" {\n\t\t\tfailed = append(failed, failure)\n\t\t}\n\t\tcount++\n\t}\n\treturn count, failed\n}\n\n\/\/ Runs the test at the given path, and returns the specified test name if the test fails\nfunc runTest(testPath string, target vespa.Target) string {\n\tvar test test\n\ttestBytes, err := ioutil.ReadFile(testPath)\n\tif err != nil {\n\t\tfatalErr(err, fmt.Sprintf(\"Failed to read test file at %s\", testPath))\n\t}\n\tif err = json.Unmarshal(testBytes, &test); err != nil {\n\t\tfatalErr(err, fmt.Sprintf(\"Failed to parse test file at %s\", testPath))\n\t}\n\n\ttestName := test.Name\n\tif test.Name == \"\" {\n\t\ttestName = filepath.Base(testPath)\n\t}\n\tfmt.Fprintf(stdout, \"Running %s:\", testName)\n\n\tdefaultParameters, err := getParameters(test.Defaults.ParametersRaw, path.Dir(testPath))\n\tif err != nil {\n\t\tfatalErr(err, fmt.Sprintf(\"Invalid default parameters for %s\", testName))\n\t}\n\n\tif len(test.Steps) == 0 {\n\t\tfatalErr(fmt.Errorf(\"a test must have at least one step, but none were found in %s\", testPath))\n\t}\n\tfor i, step := range test.Steps {\n\t\tstepName := step.Name\n\t\tif stepName == \"\" {\n\t\t\tstepName = fmt.Sprintf(\"step %d\", i+1)\n\t\t}\n\t\tfailure, longFailure, err := verify(step, path.Dir(testPath), test.Defaults.Cluster, defaultParameters, target)\n\t\tif err != nil {\n\t\t\tfatalErr(err, fmt.Sprintf(\"Error in %s\", stepName))\n\t\t}\n\t\tif failure != \"\" {\n\t\t\tfmt.Fprintf(stdout, \" Failed %s:\\n%s\\n\", stepName, longFailure)\n\t\t\treturn fmt.Sprintf(\"%s: %s: %s\", testName, stepName, failure)\n\t\t}\n\t\tif i == 0 {\n\t\t\tfmt.Fprintf(stdout, \" \")\n\t\t}\n\t\tfmt.Fprint(stdout, \".\")\n\t}\n\tfmt.Fprintln(stdout, \" OK\")\n\treturn \"\"\n}\n\n\/\/ Asserts specified response is obtained for request, or returns a failure message, or an error if this fails\nfunc verify(step step, testsPath string, defaultCluster string, defaultParameters map[string]string, target vespa.Target) (string, string, error) {\n\trequestBody, err := getBody(step.Request.BodyRaw, testsPath)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tparameters, err := getParameters(step.Request.ParametersRaw, testsPath)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tfor name, value := range defaultParameters {\n\t\tif _, present := parameters[name]; !present {\n\t\t\tparameters[name] = value\n\t\t}\n\t}\n\n\tcluster := step.Request.Cluster\n\tif cluster == \"\" {\n\t\tcluster = defaultCluster\n\t}\n\n\tservice, err := target.Service(\"query\", 0, 0, cluster)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tmethod := step.Request.Method\n\tif method == \"\" {\n\t\tmethod = \"GET\"\n\t}\n\n\trequestUri := step.Request.URI\n\tif requestUri == \"\" {\n\t\trequestUri = \"\/search\/\"\n\t}\n\trequestUrl, err := url.ParseRequestURI(requestUri)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\texternalEndpoint := requestUrl.IsAbs()\n\tif !externalEndpoint {\n\t\trequestUrl, err = url.ParseRequestURI(service.BaseURL + requestUri)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\tquery := requestUrl.Query()\n\tfor name, value := range parameters {\n\t\tquery.Add(name, value)\n\t}\n\trequestUrl.RawQuery = query.Encode()\n\n\theader := http.Header{}\n\theader.Add(\"Content-Type\", \"application\/json\") \/\/ TODO: Not guaranteed to be true ...\n\n\trequest := &http.Request{\n\t\tURL: requestUrl,\n\t\tMethod: method,\n\t\tHeader: header,\n\t\tBody: ioutil.NopCloser(bytes.NewReader(requestBody)),\n\t}\n\tdefer request.Body.Close()\n\n\tvar response *http.Response\n\tif externalEndpoint {\n\t\tutil.ActiveHttpClient.UseCertificate([]tls.Certificate{})\n\t\tresponse, err = util.ActiveHttpClient.Do(request, 60*time.Second)\n\t} else {\n\t\tresponse, err = service.Do(request, 600*time.Second) \/\/ Vespa should provide a response within the given request timeout\n\t}\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tdefer response.Body.Close()\n\n\tstatusCode := step.Response.Code\n\tif statusCode == 0 {\n\t\tstatusCode = 200\n\t}\n\tif statusCode != response.StatusCode {\n\t\tfailure := fmt.Sprintf(\"Unexpected status code: %d\", response.StatusCode)\n\t\treturn failure, fmt.Sprintf(\"%s\\nExpected: %d\\nActual response:\\n%s\", failure, statusCode, util.ReaderToJSON(response.Body)), nil\n\t}\n\n\tresponseBodySpecBytes, err := getBody(step.Response.BodyRaw, testsPath)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tif responseBodySpecBytes == nil {\n\t\treturn \"\", \"\", nil\n\t}\n\tvar responseBodySpec interface{}\n\terr = json.Unmarshal(responseBodySpecBytes, &responseBodySpec)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tresponseBodyBytes, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tvar responseBody interface{}\n\terr = json.Unmarshal(responseBodyBytes, &responseBody)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"got non-JSON response; %w:\\n%s\", err, string(responseBodyBytes))\n\t}\n\n\tfailure, expected, err := compare(responseBodySpec, responseBody, \"\")\n\tif failure != \"\" {\n\t\tresponsePretty, _ := json.MarshalIndent(responseBody, \"\", \" \")\n\t\tlongFailure := failure\n\t\tif expected != \"\" {\n\t\t\tlongFailure += \"\\n\" + expected\n\t\t}\n\t\tlongFailure += \"\\nActual response:\\n\" + string(responsePretty)\n\t\treturn failure, longFailure, err\n\t}\n\treturn \"\", \"\", err\n}\n\nfunc compare(expected interface{}, actual interface{}, path string) (string, string, error) {\n\ttypeMatch := false\n\tvalueMatch := false\n\tswitch u := expected.(type) {\n\tcase nil:\n\t\ttypeMatch = actual == nil\n\t\tvalueMatch = actual == nil\n\tcase bool:\n\t\tv, ok := actual.(bool)\n\t\ttypeMatch = ok\n\t\tvalueMatch = ok && u == v\n\tcase float64:\n\t\tv, ok := actual.(float64)\n\t\ttypeMatch = ok\n\t\tvalueMatch = ok && math.Abs(u-v) < 1e-9\n\tcase string:\n\t\tv, ok := actual.(string)\n\t\ttypeMatch = ok\n\t\tvalueMatch = ok && (u == v)\n\tcase []interface{}:\n\t\tv, ok := actual.([]interface{})\n\t\ttypeMatch = ok\n\t\tif ok {\n\t\t\tif len(u) == len(v) {\n\t\t\t\tfor i, e := range u {\n\t\t\t\t\tfailure, expected, err := compare(e, v[i], fmt.Sprintf(\"%s\/%d\", path, i))\n\t\t\t\t\tif failure != \"\" || err != nil {\n\t\t\t\t\t\treturn failure, expected, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tvalueMatch = true\n\t\t\t} else {\n\t\t\t\treturn fmt.Sprintf(\"Unexpected number of elements at %s: %d\", path, len(v)), fmt.Sprintf(\"Expected: %d\", len(u)), nil\n\t\t\t}\n\t\t}\n\tcase map[string]interface{}:\n\t\tv, ok := actual.(map[string]interface{})\n\t\ttypeMatch = ok\n\t\tif ok {\n\t\t\tfor n, e := range u {\n\t\t\t\tchildPath := fmt.Sprintf(\"%s\/%s\", path, strings.ReplaceAll(strings.ReplaceAll(n, \"~\", \"~0\"), \"\/\", \"~1\"))\n\t\t\t\tf, ok := v[n]\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Sprintf(\"Missing expected field at %s\", childPath), \"\", nil\n\t\t\t\t}\n\t\t\t\tfailure, expected, err := compare(e, f, childPath)\n\t\t\t\tif failure != \"\" || err != nil {\n\t\t\t\t\treturn failure, expected, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tvalueMatch = true\n\t\t}\n\tdefault:\n\t\treturn \"\", \"\", fmt.Errorf(\"unexpected expected JSON type for value '%v'\", expected)\n\t}\n\n\tif !valueMatch {\n\t\tif path == \"\" {\n\t\t\tpath = \"root\"\n\t\t}\n\t\tmismatched := \"type\"\n\t\tif typeMatch {\n\t\t\tmismatched = \"value\"\n\t\t}\n\t\texpectedJson, _ := json.Marshal(expected)\n\t\tactualJson, _ := json.Marshal(actual)\n\t\treturn fmt.Sprintf(\"Unexpected %s at %s: %s\", mismatched, path, actualJson), fmt.Sprintf(\"Expected: %s\", expectedJson), nil\n\t}\n\treturn \"\", \"\", nil\n}\n\nfunc getParameters(parametersRaw []byte, testsPath string) (map[string]string, error) {\n\tif parametersRaw != nil {\n\t\tvar parametersPath string\n\t\tif err := json.Unmarshal(parametersRaw, ¶metersPath); err == nil {\n\t\t\tresolvedParametersPath := path.Join(testsPath, parametersPath)\n\t\t\tparametersRaw, err = ioutil.ReadFile(resolvedParametersPath)\n\t\t\tif err != nil {\n\t\t\t\tfatalErr(err, fmt.Sprintf(\"Failed to read request parameters file at '%s'\", resolvedParametersPath))\n\t\t\t}\n\t\t}\n\t\tvar parameters map[string]string\n\t\tif err := json.Unmarshal(parametersRaw, ¶meters); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"request parameters must be JSON with only string values: %w\", err)\n\t\t}\n\t\treturn parameters, nil\n\t}\n\treturn make(map[string]string), nil\n}\n\nfunc getBody(bodyRaw []byte, testsPath string) ([]byte, error) {\n\tvar bodyPath string\n\tif err := json.Unmarshal(bodyRaw, &bodyPath); err == nil {\n\t\tresolvedBodyPath := path.Join(testsPath, bodyPath)\n\t\tbodyRaw, err = ioutil.ReadFile(resolvedBodyPath)\n\t\tif err != nil {\n\t\t\tfatalErr(err, fmt.Sprintf(\"Failed to read body file at '%s'\", resolvedBodyPath))\n\t\t}\n\t}\n\treturn bodyRaw, nil\n}\n\ntype test struct {\n\tName string `json:\"name\"`\n\tDefaults defaults `json:\"defaults\"`\n\tSteps []step `json:\"steps\"`\n}\n\ntype defaults struct {\n\tCluster string `json:\"cluster\"`\n\tParametersRaw json.RawMessage `json:\"parameters\"`\n}\n\ntype step struct {\n\tName string `json:\"name\"`\n\tRequest request `json:\"request\"`\n\tResponse response `json:\"response\"`\n}\n\ntype request struct {\n\tCluster string `json:\"cluster\"`\n\tMethod string `json:\"method\"`\n\tURI string `json:\"uri\"`\n\tParametersRaw json.RawMessage `json:\"parameters\"`\n\tBodyRaw json.RawMessage `json:\"body\"`\n}\n\ntype response struct {\n\tCode int `json:\"code\"`\n\tBodyRaw json.RawMessage `json:\"body\"`\n}\n<commit_msg>Add link to cloud doc for vespa test<commit_after>\/\/ Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.\n\/\/ vespa test command\n\/\/ Author: jonmv\n\npackage cmd\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/vespa-engine\/vespa\/client\/go\/util\"\n\t\"github.com\/vespa-engine\/vespa\/client\/go\/vespa\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(testCmd)\n}\n\nvar testCmd = &cobra.Command{\n\tUse: \"test [tests directory or test file]\",\n\tShort: \"Run a test suite, or a single test\",\n\tLong: `Run a test suite, or a single test\n\nRuns all JSON test files in the specified directory (the working\ndirectory by default), or the single JSON test file specified.\n\nSee https:\/\/cloud.vespa.ai\/en\/reference\/testing.html for details.`,\n\tExample: `$ vespa test src\/test\/application\/tests\/system-test\n$ vespa test src\/test\/application\/tests\/system-test\/feed-and-query.json`,\n\tArgs: cobra.MaximumNArgs(1),\n\tDisableAutoGenTag: true,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\ttarget := getTarget()\n\t\ttestPath := \".\"\n\t\tif len(args) > 0 {\n\t\t\ttestPath = args[0]\n\t\t}\n\t\tif count, failed := runTests(testPath, target); len(failed) != 0 {\n\t\t\tfmt.Fprintf(stdout, \"\\nFailed %d of %d tests:\\n\", len(failed), count)\n\t\t\tfor _, test := range failed {\n\t\t\t\tfmt.Fprintln(stdout, test)\n\t\t\t}\n\t\t\texitFunc(3)\n\t\t} else if count == 0 {\n\t\t\tfmt.Fprintf(stdout, \"Failed to find any tests at %v\\n\", testPath)\n\t\t\texitFunc(3)\n\t\t} else {\n\t\t\tplural := \"s\"\n\t\t\tif count == 1 {\n\t\t\t\tplural = \"\"\n\t\t\t}\n\t\t\tfmt.Fprintf(stdout, \"\\n%d test%s completed successfully\\n\", count, plural)\n\t\t}\n\t},\n}\n\nfunc runTests(rootPath string, target vespa.Target) (int, []string) {\n\tcount := 0\n\tfailed := make([]string, 0)\n\tif stat, err := os.Stat(rootPath); err != nil {\n\t\tfatalErr(err, \"Failed reading specified test path\")\n\t} else if stat.IsDir() {\n\t\ttests, err := ioutil.ReadDir(rootPath) \/\/ TODO: Use os.ReadDir when >= 1.16 is required.\n\t\tif err != nil {\n\t\t\tfatalErr(err, \"Failed reading specified test directory\")\n\t\t}\n\t\tpreviousFailed := false\n\t\tfor _, test := range tests {\n\t\t\tif !test.IsDir() && filepath.Ext(test.Name()) == \".json\" {\n\t\t\t\ttestPath := path.Join(rootPath, test.Name())\n\t\t\t\tif previousFailed {\n\t\t\t\t\tfmt.Fprintln(stdout, \"\")\n\t\t\t\t\tpreviousFailed = false\n\t\t\t\t}\n\t\t\t\tfailure := runTest(testPath, target)\n\t\t\t\tif failure != \"\" {\n\t\t\t\t\tfailed = append(failed, failure)\n\t\t\t\t\tpreviousFailed = true\n\t\t\t\t}\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\t} else if strings.HasSuffix(stat.Name(), \".json\") {\n\t\tfailure := runTest(rootPath, target)\n\t\tif failure != \"\" {\n\t\t\tfailed = append(failed, failure)\n\t\t}\n\t\tcount++\n\t}\n\treturn count, failed\n}\n\n\/\/ Runs the test at the given path, and returns the specified test name if the test fails\nfunc runTest(testPath string, target vespa.Target) string {\n\tvar test test\n\ttestBytes, err := ioutil.ReadFile(testPath)\n\tif err != nil {\n\t\tfatalErr(err, fmt.Sprintf(\"Failed to read test file at %s\", testPath))\n\t}\n\tif err = json.Unmarshal(testBytes, &test); err != nil {\n\t\tfatalErr(err, fmt.Sprintf(\"Failed to parse test file at %s\", testPath))\n\t}\n\n\ttestName := test.Name\n\tif test.Name == \"\" {\n\t\ttestName = filepath.Base(testPath)\n\t}\n\tfmt.Fprintf(stdout, \"Running %s:\", testName)\n\n\tdefaultParameters, err := getParameters(test.Defaults.ParametersRaw, path.Dir(testPath))\n\tif err != nil {\n\t\tfatalErr(err, fmt.Sprintf(\"Invalid default parameters for %s\", testName))\n\t}\n\n\tif len(test.Steps) == 0 {\n\t\tfatalErr(fmt.Errorf(\"a test must have at least one step, but none were found in %s\", testPath))\n\t}\n\tfor i, step := range test.Steps {\n\t\tstepName := step.Name\n\t\tif stepName == \"\" {\n\t\t\tstepName = fmt.Sprintf(\"step %d\", i+1)\n\t\t}\n\t\tfailure, longFailure, err := verify(step, path.Dir(testPath), test.Defaults.Cluster, defaultParameters, target)\n\t\tif err != nil {\n\t\t\tfatalErr(err, fmt.Sprintf(\"Error in %s\", stepName))\n\t\t}\n\t\tif failure != \"\" {\n\t\t\tfmt.Fprintf(stdout, \" Failed %s:\\n%s\\n\", stepName, longFailure)\n\t\t\treturn fmt.Sprintf(\"%s: %s: %s\", testName, stepName, failure)\n\t\t}\n\t\tif i == 0 {\n\t\t\tfmt.Fprintf(stdout, \" \")\n\t\t}\n\t\tfmt.Fprint(stdout, \".\")\n\t}\n\tfmt.Fprintln(stdout, \" OK\")\n\treturn \"\"\n}\n\n\/\/ Asserts specified response is obtained for request, or returns a failure message, or an error if this fails\nfunc verify(step step, testsPath string, defaultCluster string, defaultParameters map[string]string, target vespa.Target) (string, string, error) {\n\trequestBody, err := getBody(step.Request.BodyRaw, testsPath)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tparameters, err := getParameters(step.Request.ParametersRaw, testsPath)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tfor name, value := range defaultParameters {\n\t\tif _, present := parameters[name]; !present {\n\t\t\tparameters[name] = value\n\t\t}\n\t}\n\n\tcluster := step.Request.Cluster\n\tif cluster == \"\" {\n\t\tcluster = defaultCluster\n\t}\n\n\tservice, err := target.Service(\"query\", 0, 0, cluster)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tmethod := step.Request.Method\n\tif method == \"\" {\n\t\tmethod = \"GET\"\n\t}\n\n\trequestUri := step.Request.URI\n\tif requestUri == \"\" {\n\t\trequestUri = \"\/search\/\"\n\t}\n\trequestUrl, err := url.ParseRequestURI(requestUri)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\texternalEndpoint := requestUrl.IsAbs()\n\tif !externalEndpoint {\n\t\trequestUrl, err = url.ParseRequestURI(service.BaseURL + requestUri)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\tquery := requestUrl.Query()\n\tfor name, value := range parameters {\n\t\tquery.Add(name, value)\n\t}\n\trequestUrl.RawQuery = query.Encode()\n\n\theader := http.Header{}\n\theader.Add(\"Content-Type\", \"application\/json\") \/\/ TODO: Not guaranteed to be true ...\n\n\trequest := &http.Request{\n\t\tURL: requestUrl,\n\t\tMethod: method,\n\t\tHeader: header,\n\t\tBody: ioutil.NopCloser(bytes.NewReader(requestBody)),\n\t}\n\tdefer request.Body.Close()\n\n\tvar response *http.Response\n\tif externalEndpoint {\n\t\tutil.ActiveHttpClient.UseCertificate([]tls.Certificate{})\n\t\tresponse, err = util.ActiveHttpClient.Do(request, 60*time.Second)\n\t} else {\n\t\tresponse, err = service.Do(request, 600*time.Second) \/\/ Vespa should provide a response within the given request timeout\n\t}\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tdefer response.Body.Close()\n\n\tstatusCode := step.Response.Code\n\tif statusCode == 0 {\n\t\tstatusCode = 200\n\t}\n\tif statusCode != response.StatusCode {\n\t\tfailure := fmt.Sprintf(\"Unexpected status code: %d\", response.StatusCode)\n\t\treturn failure, fmt.Sprintf(\"%s\\nExpected: %d\\nActual response:\\n%s\", failure, statusCode, util.ReaderToJSON(response.Body)), nil\n\t}\n\n\tresponseBodySpecBytes, err := getBody(step.Response.BodyRaw, testsPath)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tif responseBodySpecBytes == nil {\n\t\treturn \"\", \"\", nil\n\t}\n\tvar responseBodySpec interface{}\n\terr = json.Unmarshal(responseBodySpecBytes, &responseBodySpec)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tresponseBodyBytes, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tvar responseBody interface{}\n\terr = json.Unmarshal(responseBodyBytes, &responseBody)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"got non-JSON response; %w:\\n%s\", err, string(responseBodyBytes))\n\t}\n\n\tfailure, expected, err := compare(responseBodySpec, responseBody, \"\")\n\tif failure != \"\" {\n\t\tresponsePretty, _ := json.MarshalIndent(responseBody, \"\", \" \")\n\t\tlongFailure := failure\n\t\tif expected != \"\" {\n\t\t\tlongFailure += \"\\n\" + expected\n\t\t}\n\t\tlongFailure += \"\\nActual response:\\n\" + string(responsePretty)\n\t\treturn failure, longFailure, err\n\t}\n\treturn \"\", \"\", err\n}\n\nfunc compare(expected interface{}, actual interface{}, path string) (string, string, error) {\n\ttypeMatch := false\n\tvalueMatch := false\n\tswitch u := expected.(type) {\n\tcase nil:\n\t\ttypeMatch = actual == nil\n\t\tvalueMatch = actual == nil\n\tcase bool:\n\t\tv, ok := actual.(bool)\n\t\ttypeMatch = ok\n\t\tvalueMatch = ok && u == v\n\tcase float64:\n\t\tv, ok := actual.(float64)\n\t\ttypeMatch = ok\n\t\tvalueMatch = ok && math.Abs(u-v) < 1e-9\n\tcase string:\n\t\tv, ok := actual.(string)\n\t\ttypeMatch = ok\n\t\tvalueMatch = ok && (u == v)\n\tcase []interface{}:\n\t\tv, ok := actual.([]interface{})\n\t\ttypeMatch = ok\n\t\tif ok {\n\t\t\tif len(u) == len(v) {\n\t\t\t\tfor i, e := range u {\n\t\t\t\t\tfailure, expected, err := compare(e, v[i], fmt.Sprintf(\"%s\/%d\", path, i))\n\t\t\t\t\tif failure != \"\" || err != nil {\n\t\t\t\t\t\treturn failure, expected, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tvalueMatch = true\n\t\t\t} else {\n\t\t\t\treturn fmt.Sprintf(\"Unexpected number of elements at %s: %d\", path, len(v)), fmt.Sprintf(\"Expected: %d\", len(u)), nil\n\t\t\t}\n\t\t}\n\tcase map[string]interface{}:\n\t\tv, ok := actual.(map[string]interface{})\n\t\ttypeMatch = ok\n\t\tif ok {\n\t\t\tfor n, e := range u {\n\t\t\t\tchildPath := fmt.Sprintf(\"%s\/%s\", path, strings.ReplaceAll(strings.ReplaceAll(n, \"~\", \"~0\"), \"\/\", \"~1\"))\n\t\t\t\tf, ok := v[n]\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Sprintf(\"Missing expected field at %s\", childPath), \"\", nil\n\t\t\t\t}\n\t\t\t\tfailure, expected, err := compare(e, f, childPath)\n\t\t\t\tif failure != \"\" || err != nil {\n\t\t\t\t\treturn failure, expected, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tvalueMatch = true\n\t\t}\n\tdefault:\n\t\treturn \"\", \"\", fmt.Errorf(\"unexpected expected JSON type for value '%v'\", expected)\n\t}\n\n\tif !valueMatch {\n\t\tif path == \"\" {\n\t\t\tpath = \"root\"\n\t\t}\n\t\tmismatched := \"type\"\n\t\tif typeMatch {\n\t\t\tmismatched = \"value\"\n\t\t}\n\t\texpectedJson, _ := json.Marshal(expected)\n\t\tactualJson, _ := json.Marshal(actual)\n\t\treturn fmt.Sprintf(\"Unexpected %s at %s: %s\", mismatched, path, actualJson), fmt.Sprintf(\"Expected: %s\", expectedJson), nil\n\t}\n\treturn \"\", \"\", nil\n}\n\nfunc getParameters(parametersRaw []byte, testsPath string) (map[string]string, error) {\n\tif parametersRaw != nil {\n\t\tvar parametersPath string\n\t\tif err := json.Unmarshal(parametersRaw, ¶metersPath); err == nil {\n\t\t\tresolvedParametersPath := path.Join(testsPath, parametersPath)\n\t\t\tparametersRaw, err = ioutil.ReadFile(resolvedParametersPath)\n\t\t\tif err != nil {\n\t\t\t\tfatalErr(err, fmt.Sprintf(\"Failed to read request parameters file at '%s'\", resolvedParametersPath))\n\t\t\t}\n\t\t}\n\t\tvar parameters map[string]string\n\t\tif err := json.Unmarshal(parametersRaw, ¶meters); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"request parameters must be JSON with only string values: %w\", err)\n\t\t}\n\t\treturn parameters, nil\n\t}\n\treturn make(map[string]string), nil\n}\n\nfunc getBody(bodyRaw []byte, testsPath string) ([]byte, error) {\n\tvar bodyPath string\n\tif err := json.Unmarshal(bodyRaw, &bodyPath); err == nil {\n\t\tresolvedBodyPath := path.Join(testsPath, bodyPath)\n\t\tbodyRaw, err = ioutil.ReadFile(resolvedBodyPath)\n\t\tif err != nil {\n\t\t\tfatalErr(err, fmt.Sprintf(\"Failed to read body file at '%s'\", resolvedBodyPath))\n\t\t}\n\t}\n\treturn bodyRaw, nil\n}\n\ntype test struct {\n\tName string `json:\"name\"`\n\tDefaults defaults `json:\"defaults\"`\n\tSteps []step `json:\"steps\"`\n}\n\ntype defaults struct {\n\tCluster string `json:\"cluster\"`\n\tParametersRaw json.RawMessage `json:\"parameters\"`\n}\n\ntype step struct {\n\tName string `json:\"name\"`\n\tRequest request `json:\"request\"`\n\tResponse response `json:\"response\"`\n}\n\ntype request struct {\n\tCluster string `json:\"cluster\"`\n\tMethod string `json:\"method\"`\n\tURI string `json:\"uri\"`\n\tParametersRaw json.RawMessage `json:\"parameters\"`\n\tBodyRaw json.RawMessage `json:\"body\"`\n}\n\ntype response struct {\n\tCode int `json:\"code\"`\n\tBodyRaw json.RawMessage `json:\"body\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ updateCmd represents the update command\nvar clientsUpdateCmd = &cobra.Command{\n\tUse: \"update <id>\",\n\tShort: \"Update an OAuth 2.0 Client\",\n\tLong: `This command updates an OAuth 2.0 Client by its ID.\n\nExample:\n hydra clients update client-1 -n \"my app\" -c http:\/\/localhost\/cb -g authorization_code -r code -a core,foobar\n\nTo encrypt auto generated client secret, use \"--pgp-key\", \"--pgp-key-url\" or \"--keybase\" flag, for example:\n hydra clients update client-1 -n \"my app\" -g client_credentials -r token -a core,foobar --keybase keybase_username\n`,\n\tRun: cmdHandler.Clients.UpdateClient,\n}\n\nfunc init() {\n\tclientsCmd.AddCommand(clientsUpdateCmd)\n\tclientsUpdateCmd.Flags().StringSliceP(\"callbacks\", \"c\", []string{}, \"REQUIRED list of allowed callback URLs\")\n\tclientsUpdateCmd.Flags().StringSliceP(\"grant-types\", \"g\", []string{\"authorization_code\"}, \"A list of allowed grant types\")\n\tclientsUpdateCmd.Flags().StringSliceP(\"response-types\", \"r\", []string{\"code\"}, \"A list of allowed response types\")\n\tclientsUpdateCmd.Flags().StringSliceP(\"scope\", \"a\", []string{\"\"}, \"The scope the client is allowed to request\")\n\tclientsUpdateCmd.Flags().StringSlice(\"audience\", []string{}, \"The audience this client is allowed to request\")\n\tclientsUpdateCmd.Flags().String(\"token-endpoint-auth-method\", \"client_secret_basic\", \"Define which authentication method the client may use at the Token Endpoint. Valid values are \\\"client_secret_post\\\", \\\"client_secret_basic\\\", \\\"private_key_jwt\\\", and \\\"none\\\"\")\n\tclientsUpdateCmd.Flags().String(\"jwks-uri\", \"\", \"Define the URL where the JSON Web Key Set should be fetched from when performing the \\\"private_key_jwt\\\" client authentication method\")\n\tclientsUpdateCmd.Flags().String(\"policy-uri\", \"\", \"A URL string that points to a human-readable privacy policy document that describes how the deployment organization collects, uses, retains, and discloses personal data\")\n\tclientsUpdateCmd.Flags().String(\"tos-uri\", \"\", \"A URL string that points to a human-readable terms of service document for the client that describes a contractual relationship between the end-user and the client that the end-user accepts when authorizing the client\")\n\tclientsUpdateCmd.Flags().String(\"client-uri\", \"\", \"A URL string of a web page providing information about the client\")\n\tclientsUpdateCmd.Flags().String(\"logo-uri\", \"\", \"A URL string that references a logo for the client\")\n\tclientsUpdateCmd.Flags().StringSlice(\"allowed-cors-origins\", []string{}, \"The list of URLs allowed to make CORS requests. Requires CORS_ENABLED.\")\n\tclientsUpdateCmd.Flags().String(\"subject-type\", \"public\", \"A identifier algorithm. Valid values are \\\"public\\\" and \\\"pairwise\\\"\")\n\tclientsUpdateCmd.Flags().String(\"secret\", \"\", \"Provide the client's secret\")\n\tclientsUpdateCmd.Flags().StringP(\"name\", \"n\", \"\", \"The client's name\")\n\tclientsUpdateCmd.Flags().StringSlice(\"post-logout-callbacks\", []string{}, \"List of allowed URLs to be redirected to after a logout\")\n\n\t\/\/ encrypt client secret options\n\tclientsUpdateCmd.Flags().String(\"pgp-key\", \"\", \"Base64 encoded PGP encryption key for encrypting client secret\")\n\tclientsUpdateCmd.Flags().String(\"pgp-key-url\", \"\", \"PGP encryption key URL for encrypting client secret\")\n\tclientsUpdateCmd.Flags().String(\"keybase\", \"\", \"Keybase username for encrypting client secret\")\n}\n<commit_msg>feat: improving the client update command description<commit_after>package cmd\n\nimport (\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ updateCmd represents the update command\nvar clientsUpdateCmd = &cobra.Command{\n\tUse: \"update <id>\",\n\tShort: \"Update an entire OAuth 2.0 Client\",\n\tLong: `This command replaces an OAuth 2.0 Client by its ID.\n\nPlease be aware that this command replaces the entire client.\nTo update only the name, a full client should be provided, for example:\n hydra clients update client-1 -n \"my updated app\" -c http:\/\/localhost\/cb -g authorization_code -r code -a core,foobar\n\nIf only the name flag (-n \"my updated app\") is provided, the all other fields are updated to their default values.\n\nTo encrypt auto generated client secret, use \"--pgp-key\", \"--pgp-key-url\" or \"--keybase\" flag, for example:\n hydra clients update client-1 -n \"my updated app\" -g client_credentials -r token -a core,foobar --keybase keybase_username\n`,\n\tRun: cmdHandler.Clients.UpdateClient,\n}\n\nfunc init() {\n\tclientsCmd.AddCommand(clientsUpdateCmd)\n\tclientsUpdateCmd.Flags().StringSliceP(\"callbacks\", \"c\", []string{}, \"REQUIRED list of allowed callback URLs\")\n\tclientsUpdateCmd.Flags().StringSliceP(\"grant-types\", \"g\", []string{\"authorization_code\"}, \"A list of allowed grant types\")\n\tclientsUpdateCmd.Flags().StringSliceP(\"response-types\", \"r\", []string{\"code\"}, \"A list of allowed response types\")\n\tclientsUpdateCmd.Flags().StringSliceP(\"scope\", \"a\", []string{\"\"}, \"The scope the client is allowed to request\")\n\tclientsUpdateCmd.Flags().StringSlice(\"audience\", []string{}, \"The audience this client is allowed to request\")\n\tclientsUpdateCmd.Flags().String(\"token-endpoint-auth-method\", \"client_secret_basic\", \"Define which authentication method the client may use at the Token Endpoint. Valid values are \\\"client_secret_post\\\", \\\"client_secret_basic\\\", \\\"private_key_jwt\\\", and \\\"none\\\"\")\n\tclientsUpdateCmd.Flags().String(\"jwks-uri\", \"\", \"Define the URL where the JSON Web Key Set should be fetched from when performing the \\\"private_key_jwt\\\" client authentication method\")\n\tclientsUpdateCmd.Flags().String(\"policy-uri\", \"\", \"A URL string that points to a human-readable privacy policy document that describes how the deployment organization collects, uses, retains, and discloses personal data\")\n\tclientsUpdateCmd.Flags().String(\"tos-uri\", \"\", \"A URL string that points to a human-readable terms of service document for the client that describes a contractual relationship between the end-user and the client that the end-user accepts when authorizing the client\")\n\tclientsUpdateCmd.Flags().String(\"client-uri\", \"\", \"A URL string of a web page providing information about the client\")\n\tclientsUpdateCmd.Flags().String(\"logo-uri\", \"\", \"A URL string that references a logo for the client\")\n\tclientsUpdateCmd.Flags().StringSlice(\"allowed-cors-origins\", []string{}, \"The list of URLs allowed to make CORS requests. Requires CORS_ENABLED.\")\n\tclientsUpdateCmd.Flags().String(\"subject-type\", \"public\", \"A identifier algorithm. Valid values are \\\"public\\\" and \\\"pairwise\\\"\")\n\tclientsUpdateCmd.Flags().String(\"secret\", \"\", \"Provide the client's secret\")\n\tclientsUpdateCmd.Flags().StringP(\"name\", \"n\", \"\", \"The client's name\")\n\tclientsUpdateCmd.Flags().StringSlice(\"post-logout-callbacks\", []string{}, \"List of allowed URLs to be redirected to after a logout\")\n\n\t\/\/ encrypt client secret options\n\tclientsUpdateCmd.Flags().String(\"pgp-key\", \"\", \"Base64 encoded PGP encryption key for encrypting client secret\")\n\tclientsUpdateCmd.Flags().String(\"pgp-key-url\", \"\", \"PGP encryption key URL for encrypting client secret\")\n\tclientsUpdateCmd.Flags().String(\"keybase\", \"\", \"Keybase username for encrypting client secret\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\/stscreds\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ecr\"\n)\n\nconst defaultRegion = \"us-east-1\"\n\nfunc main() {\n\tvar (\n\t\trepo = getenv(\"PLUGIN_REPO\")\n\t\tregion = getenv(\"PLUGIN_REGION\", \"ECR_REGION\", \"AWS_REGION\")\n\t\tkey = getenv(\"PLUGIN_ACCESS_KEY\", \"ECR_ACCESS_KEY\", \"AWS_ACCESS_KEY_ID\")\n\t\tsecret = getenv(\"PLUGIN_SECRET_KEY\", \"ECR_SECRET_KEY\", \"AWS_SECRET_ACCESS_KEY\")\n\t\tcreate = parseBoolOrDefault(false, getenv(\"PLUGIN_CREATE_REPOSITORY\", \"ECR_CREATE_REPOSITORY\"))\n\t\tlifecyclePolicy = getenv(\"PLUGIN_LIFECYCLE_POLICY\")\n\t\trepositoryPolicy = getenv(\"PLUGIN_REPOSITORY_POLICY\")\n\t\tassumeRole = getenv(\"PLUGIN_ASSUME_ROLE\")\n\t)\n\n\t\/\/ set the region\n\tif region == \"\" {\n\t\tregion = defaultRegion\n\t}\n\n\tos.Setenv(\"AWS_REGION\", region)\n\n\tif key != \"\" && secret != \"\" {\n\t\tos.Setenv(\"AWS_ACCESS_KEY_ID\", key)\n\t\tos.Setenv(\"AWS_SECRET_ACCESS_KEY\", secret)\n\t}\n\n\tsess, err := session.NewSession(&aws.Config{Region: ®ion})\n\t\n\tif err != nil {\n\t\tlog.Fatal(fmt.Sprintf(\"error creating aws session: %v\", err))\n\t}\n\n\tsvc := getECRClient(sess, assumeRole)\n\tusername, password, registry, err := getAuthInfo(svc)\n\tif err != nil {\n\t\tlog.Fatal(fmt.Sprintf(\"error getting ECR auth: %v\", err))\n\t}\n\n\tif !strings.HasPrefix(repo, registry) {\n\t\trepo = fmt.Sprintf(\"%s\/%s\", registry, repo)\n\t}\n\n\tif create {\n\t\terr = ensureRepoExists(svc, trimHostname(repo, registry))\n\t\tif err != nil {\n\t\t\tlog.Fatal(fmt.Sprintf(\"error creating ECR repo: %v\", err))\n\t\t}\n\t}\n\n\tif lifecyclePolicy != \"\" {\n\t\tp, err := ioutil.ReadFile(lifecyclePolicy)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif err := uploadLifeCyclePolicy(svc, string(p), trimHostname(repo, registry)); err != nil {\n\t\t\tlog.Fatal(fmt.Sprintf(\"error uploading ECR lifecycle policy: %v\", err))\n\t\t}\n\t}\n\n\tif repositoryPolicy != \"\" {\n\t\tp, err := ioutil.ReadFile(repositoryPolicy)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif err := uploadRepositoryPolicy(svc, string(p), trimHostname(repo, registry)); err != nil {\n\t\t\tlog.Fatal(fmt.Sprintf(\"error uploading ECR repository policy. %v\", err))\n\t\t}\n\t}\n\n\tos.Setenv(\"PLUGIN_REPO\", repo)\n\tos.Setenv(\"PLUGIN_REGISTRY\", registry)\n\tos.Setenv(\"DOCKER_USERNAME\", username)\n\tos.Setenv(\"DOCKER_PASSWORD\", password)\n\n\t\/\/ invoke the base docker plugin binary\n\tcmd := exec.Command(\"drone-docker\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err = cmd.Run(); err != nil {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc trimHostname(repo, registry string) string {\n\trepo = strings.TrimPrefix(repo, registry)\n\trepo = strings.TrimLeft(repo, \"\/\")\n\treturn repo\n}\n\nfunc ensureRepoExists(svc *ecr.ECR, name string) (err error) {\n\tinput := &ecr.CreateRepositoryInput{}\n\tinput.SetRepositoryName(name)\n\t_, err = svc.CreateRepository(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok && aerr.Code() == ecr.ErrCodeRepositoryAlreadyExistsException {\n\t\t\t\/\/ eat it, we skip checking for existing to save two requests\n\t\t\terr = nil\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc uploadLifeCyclePolicy(svc *ecr.ECR, lifecyclePolicy string, name string) (err error) {\n\tinput := &ecr.PutLifecyclePolicyInput{}\n\tinput.SetLifecyclePolicyText(lifecyclePolicy)\n\tinput.SetRepositoryName(name)\n\t_, err = svc.PutLifecyclePolicy(input)\n\n\treturn err\n}\n\nfunc uploadRepositoryPolicy(svc *ecr.ECR, repositoryPolicy string, name string) (err error) {\n\tinput := &ecr.SetRepositoryPolicyInput{}\n\tinput.SetPolicyText(repositoryPolicy)\n\tinput.SetRepositoryName(name)\n\t_, err = svc.SetRepositoryPolicy(input)\n\n\treturn err\n}\n\nfunc getAuthInfo(svc *ecr.ECR) (username, password, registry string, err error) {\n\tvar result *ecr.GetAuthorizationTokenOutput\n\tvar decoded []byte\n\n\tresult, err = svc.GetAuthorizationToken(&ecr.GetAuthorizationTokenInput{})\n\tif err != nil {\n\t\treturn\n\t}\n\n\tauth := result.AuthorizationData[0]\n\ttoken := *auth.AuthorizationToken\n\tdecoded, err = base64.StdEncoding.DecodeString(token)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tregistry = strings.TrimPrefix(*auth.ProxyEndpoint, \"https:\/\/\")\n\tcreds := strings.Split(string(decoded), \":\")\n\tusername = creds[0]\n\tpassword = creds[1]\n\treturn\n}\n\nfunc parseBoolOrDefault(defaultValue bool, s string) (result bool) {\n\tvar err error\n\tresult, err = strconv.ParseBool(s)\n\tif err != nil {\n\t\tresult = false\n\t}\n\n\treturn\n}\n\nfunc getenv(key ...string) (s string) {\n\tfor _, k := range key {\n\t\ts = os.Getenv(k)\n\t\tif s != \"\" {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc getECRClient(sess *session.Session, role string) *ecr.ECR {\n\tif role == \"\" {\n\t\treturn ecr.New(sess)\n\t}\n\tcreds := stscreds.NewCredentials(sess, role)\n\treturn ecr.New(sess, &aws.Config{Credentials: creds})\n}\n<commit_msg>Apply suggestions from code review<commit_after>package main\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\/stscreds\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ecr\"\n)\n\nconst defaultRegion = \"us-east-1\"\n\nfunc main() {\n\tvar (\n\t\trepo = getenv(\"PLUGIN_REPO\")\n\t\tregion = getenv(\"PLUGIN_REGION\", \"ECR_REGION\", \"AWS_REGION\")\n\t\tkey = getenv(\"PLUGIN_ACCESS_KEY\", \"ECR_ACCESS_KEY\", \"AWS_ACCESS_KEY_ID\")\n\t\tsecret = getenv(\"PLUGIN_SECRET_KEY\", \"ECR_SECRET_KEY\", \"AWS_SECRET_ACCESS_KEY\")\n\t\tcreate = parseBoolOrDefault(false, getenv(\"PLUGIN_CREATE_REPOSITORY\", \"ECR_CREATE_REPOSITORY\"))\n\t\tlifecyclePolicy = getenv(\"PLUGIN_LIFECYCLE_POLICY\")\n\t\trepositoryPolicy = getenv(\"PLUGIN_REPOSITORY_POLICY\")\n\t\tassumeRole = getenv(\"PLUGIN_ASSUME_ROLE\")\n\t)\n\n\t\/\/ set the region\n\tif region == \"\" {\n\t\tregion = defaultRegion\n\t}\n\n\tos.Setenv(\"AWS_REGION\", region)\n\n\tif key != \"\" && secret != \"\" {\n\t\tos.Setenv(\"AWS_ACCESS_KEY_ID\", key)\n\t\tos.Setenv(\"AWS_SECRET_ACCESS_KEY\", secret)\n\t}\n\n\tsess, err := session.NewSession(&aws.Config{Region: ®ion})\n\tif err != nil {\n\t\tlog.Fatal(fmt.Sprintf(\"error creating aws session: %v\", err))\n\t}\n\n\tsvc := getECRClient(sess, assumeRole)\n\tusername, password, registry, err := getAuthInfo(svc)\n\tif err != nil {\n\t\tlog.Fatal(fmt.Sprintf(\"error getting ECR auth: %v\", err))\n\t}\n\n\tif !strings.HasPrefix(repo, registry) {\n\t\trepo = fmt.Sprintf(\"%s\/%s\", registry, repo)\n\t}\n\n\tif create {\n\t\terr = ensureRepoExists(svc, trimHostname(repo, registry))\n\t\tif err != nil {\n\t\t\tlog.Fatal(fmt.Sprintf(\"error creating ECR repo: %v\", err))\n\t\t}\n\t}\n\n\tif lifecyclePolicy != \"\" {\n\t\tp, err := ioutil.ReadFile(lifecyclePolicy)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif err := uploadLifeCyclePolicy(svc, string(p), trimHostname(repo, registry)); err != nil {\n\t\t\tlog.Fatal(fmt.Sprintf(\"error uploading ECR lifecycle policy: %v\", err))\n\t\t}\n\t}\n\n\tif repositoryPolicy != \"\" {\n\t\tp, err := ioutil.ReadFile(repositoryPolicy)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif err := uploadRepositoryPolicy(svc, string(p), trimHostname(repo, registry)); err != nil {\n\t\t\tlog.Fatal(fmt.Sprintf(\"error uploading ECR repository policy. %v\", err))\n\t\t}\n\t}\n\n\tos.Setenv(\"PLUGIN_REPO\", repo)\n\tos.Setenv(\"PLUGIN_REGISTRY\", registry)\n\tos.Setenv(\"DOCKER_USERNAME\", username)\n\tos.Setenv(\"DOCKER_PASSWORD\", password)\n\n\t\/\/ invoke the base docker plugin binary\n\tcmd := exec.Command(\"drone-docker\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err = cmd.Run(); err != nil {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc trimHostname(repo, registry string) string {\n\trepo = strings.TrimPrefix(repo, registry)\n\trepo = strings.TrimLeft(repo, \"\/\")\n\treturn repo\n}\n\nfunc ensureRepoExists(svc *ecr.ECR, name string) (err error) {\n\tinput := &ecr.CreateRepositoryInput{}\n\tinput.SetRepositoryName(name)\n\t_, err = svc.CreateRepository(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok && aerr.Code() == ecr.ErrCodeRepositoryAlreadyExistsException {\n\t\t\t\/\/ eat it, we skip checking for existing to save two requests\n\t\t\terr = nil\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc uploadLifeCyclePolicy(svc *ecr.ECR, lifecyclePolicy string, name string) (err error) {\n\tinput := &ecr.PutLifecyclePolicyInput{}\n\tinput.SetLifecyclePolicyText(lifecyclePolicy)\n\tinput.SetRepositoryName(name)\n\t_, err = svc.PutLifecyclePolicy(input)\n\n\treturn err\n}\n\nfunc uploadRepositoryPolicy(svc *ecr.ECR, repositoryPolicy string, name string) (err error) {\n\tinput := &ecr.SetRepositoryPolicyInput{}\n\tinput.SetPolicyText(repositoryPolicy)\n\tinput.SetRepositoryName(name)\n\t_, err = svc.SetRepositoryPolicy(input)\n\n\treturn err\n}\n\nfunc getAuthInfo(svc *ecr.ECR) (username, password, registry string, err error) {\n\tvar result *ecr.GetAuthorizationTokenOutput\n\tvar decoded []byte\n\n\tresult, err = svc.GetAuthorizationToken(&ecr.GetAuthorizationTokenInput{})\n\tif err != nil {\n\t\treturn\n\t}\n\n\tauth := result.AuthorizationData[0]\n\ttoken := *auth.AuthorizationToken\n\tdecoded, err = base64.StdEncoding.DecodeString(token)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tregistry = strings.TrimPrefix(*auth.ProxyEndpoint, \"https:\/\/\")\n\tcreds := strings.Split(string(decoded), \":\")\n\tusername = creds[0]\n\tpassword = creds[1]\n\treturn\n}\n\nfunc parseBoolOrDefault(defaultValue bool, s string) (result bool) {\n\tvar err error\n\tresult, err = strconv.ParseBool(s)\n\tif err != nil {\n\t\tresult = false\n\t}\n\n\treturn\n}\n\nfunc getenv(key ...string) (s string) {\n\tfor _, k := range key {\n\t\ts = os.Getenv(k)\n\t\tif s != \"\" {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc getECRClient(sess *session.Session, role string) *ecr.ECR {\n\tif role == \"\" {\n\t\treturn ecr.New(sess)\n\t}\n\treturn ecr.New(sess, &aws.Config{\n\t\tCredentials: stscreds.NewCredentials(sess, role),\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n)\n\ntype TvMazeEpisode struct {\n\tID int64 `json:\"id\"`\n\tName string `json:\"name\"`\n\tSeason int64 `json:\"season\"`\n\tEpisode int64 `json:\"number\"`\n\tSummary string `json:\"summary\"`\n\t\/\/AirDate time.Time `json:\"airdate\"`\n\tImage struct {\n\t\tMedium string `json:\"medium\"`\n\t\tOriginal string `json:\"original\"`\n\t} `json:\"image\"`\n}\n\ntype TvMazeShow struct {\n\tName string `json:\"name\"`\n\tImage struct {\n\t\tMedium string `json:\"medium\"`\n\t\tOriginal string `json:\"original\"`\n\t} `json:\"image\"`\n\tSummary string `json:\"summary\"`\n\tEmbedded struct {\n\t\tEpisodes []TvMazeEpisode `json:\"episodes\"`\n\t} `json:\"_embedded\"`\n}\n\ntype TvMazeClient struct {\n\tlogger *logrus.Entry\n}\n\nfunc (t TvMazeClient) Find(q string) (*TvMazeShow, error) {\n\tquery := fmt.Sprintf(t.urlTemplate(), q)\n\tcontextLogger := t.logger.WithField(\"url\", query)\n\tcontextLogger.Debug(\"Querying TVMaze\")\n\n\tresponse, err := http.Get(query)\n\tif err != nil {\n\t\tcontextLogger.WithField(\"err\", err).Error(\"Failed to get a response\")\n\t\treturn nil, err\n\t}\n\tif response.StatusCode == 404 {\n\t\tcontextLogger.Warn(\"No match found\")\n\t\treturn nil, err\n\t}\n\n\tshow := &TvMazeShow{}\n\tif err := json.NewDecoder(response.Body).Decode(show); err != nil {\n\t\tcontextLogger.WithField(\"err\", err).Error(\"Failed to decode\")\n\t\treturn nil, err\n\t}\n\n\treturn show, nil\n}\n\nfunc (t TvMazeClient) urlTemplate() string {\n\tenv := os.Getenv(\"TVMAZE_URL_TEMPLATE\")\n\tif env == \"\" {\n\t\t\/\/ Don't use this standard. It's here mainly as an example of what\n\t\t\/\/ format the templated is expected to look like.\n\t\treturn \"http:\/\/api.tvmaze.com\/singlesearch\/shows?q=%s&embed=episodes\"\n\t}\n\treturn env\n}\n<commit_msg>fetcher: escape query<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n)\n\ntype TvMazeEpisode struct {\n\tID int64 `json:\"id\"`\n\tName string `json:\"name\"`\n\tSeason int64 `json:\"season\"`\n\tEpisode int64 `json:\"number\"`\n\tSummary string `json:\"summary\"`\n\t\/\/AirDate time.Time `json:\"airdate\"`\n\tImage struct {\n\t\tMedium string `json:\"medium\"`\n\t\tOriginal string `json:\"original\"`\n\t} `json:\"image\"`\n}\n\ntype TvMazeShow struct {\n\tName string `json:\"name\"`\n\tImage struct {\n\t\tMedium string `json:\"medium\"`\n\t\tOriginal string `json:\"original\"`\n\t} `json:\"image\"`\n\tSummary string `json:\"summary\"`\n\tEmbedded struct {\n\t\tEpisodes []TvMazeEpisode `json:\"episodes\"`\n\t} `json:\"_embedded\"`\n}\n\ntype TvMazeClient struct {\n\tlogger *logrus.Entry\n}\n\nfunc (t TvMazeClient) Find(q string) (*TvMazeShow, error) {\n\tquery := fmt.Sprintf(t.urlTemplate(), url.QueryEscape(q))\n\tcontextLogger := t.logger.WithField(\"url\", query)\n\tcontextLogger.Debug(\"Querying TVMaze\")\n\n\tresponse, err := http.Get(query)\n\tif err != nil {\n\t\tcontextLogger.WithField(\"err\", err).Error(\"Failed to get a response\")\n\t\treturn nil, err\n\t}\n\tif response.StatusCode == 404 {\n\t\tcontextLogger.Warn(\"No match found\")\n\t\treturn nil, err\n\t}\n\n\tshow := &TvMazeShow{}\n\tif err := json.NewDecoder(response.Body).Decode(show); err != nil {\n\t\tcontextLogger.WithField(\"err\", err).Error(\"Failed to decode\")\n\t\treturn nil, err\n\t}\n\n\treturn show, nil\n}\n\nfunc (t TvMazeClient) urlTemplate() string {\n\tenv := os.Getenv(\"TVMAZE_URL_TEMPLATE\")\n\tif env == \"\" {\n\t\t\/\/ Don't use this standard. It's here mainly as an example of what\n\t\t\/\/ format the templated is expected to look like.\n\t\treturn \"http:\/\/api.tvmaze.com\/singlesearch\/shows?q=%s&embed=episodes\"\n\t}\n\treturn env\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\tflags \"github.com\/jessevdk\/go-flags\"\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/labstack\/gommon\/log\"\n\t\"github.com\/lestrrat\/go-server-starter\/listener\"\n\t\"github.com\/monochromegane\/gannoy\"\n\t\"github.com\/nightlyone\/lockfile\"\n)\n\ntype Options struct {\n\tDataDir string `short:\"d\" long:\"data-dir\" default:\".\" description:\"Specify the directory where the meta files are located.\"`\n\tLogDir string `short:\"l\" long:\"log-dir\" default-mask:\"os.Stdout\" description:\"Specify the log output directory.\"`\n\tLockDir string `short:\"L\" long:\"lock-dir\" default:\".\" description:\"Specify the lock file directory. This option is used only server-starter option.\"`\n\tWithServerStarter bool `short:\"s\" long:\"server-starter\" default:\"false\" description:\"Use server-starter listener for server address.\"`\n\tShutDownTimeout int `short:\"t\" long:\"timeout\" default:\"10\" description:\"Specify the number of seconds for shutdown timeout.\"`\n}\n\nvar opts Options\n\ntype Feature struct {\n\tW []float64 `json:\"features\"`\n}\n\nfunc main() {\n\t_, err := flags.Parse(&opts)\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Wait old process finishing.\n\tif opts.WithServerStarter {\n\t\tlock, err := initializeLock(opts.LockDir)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tdefer lock.Unlock()\n\t\tfor {\n\t\t\tif err := lock.TryLock(); err != nil {\n\t\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\te := echo.New()\n\n\t\/\/ initialize log\n\tl, err := initializeLog(opts.LogDir)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\te.Logger.SetLevel(log.INFO)\n\te.Logger.SetOutput(l)\n\n\t\/\/ Load meta files\n\tfiles, err := ioutil.ReadDir(opts.DataDir)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\n\tmetaCh := make(chan string, len(files))\n\tgannoyCh := make(chan gannoy.GannoyIndex)\n\terrCh := make(chan error)\n\tdatabases := map[string]gannoy.GannoyIndex{}\n\tvar metaCount int\n\tfor _, file := range files {\n\t\tif file.IsDir() || filepath.Ext(file.Name()) != \".meta\" {\n\t\t\tcontinue\n\t\t}\n\t\tmetaCh <- filepath.Join(opts.DataDir, file.Name())\n\t\tmetaCount++\n\t}\n\n\tfor i := 0; i < runtime.GOMAXPROCS(0); i++ {\n\t\tgo gannoyIndexInitializer(metaCh, gannoyCh, errCh)\n\t}\n\nloop:\n\tfor {\n\t\tselect {\n\t\tcase gannoy := <-gannoyCh:\n\t\t\tkey := strings.TrimSuffix(filepath.Base(gannoy.MetaFile()), \".meta\")\n\t\t\tdatabases[key] = gannoy\n\t\t\tif len(databases) >= metaCount {\n\t\t\t\tclose(metaCh)\n\t\t\t\tclose(gannoyCh)\n\t\t\t\tclose(errCh)\n\t\t\t\tbreak loop\n\t\t\t}\n\t\tcase err := <-errCh:\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t\/\/ Define API\n\te.GET(\"\/search\", func(c echo.Context) error {\n\t\tdatabase := c.QueryParam(\"database\")\n\t\tif _, ok := databases[database]; !ok {\n\t\t\treturn c.NoContent(http.StatusNotFound)\n\t\t}\n\t\tkey, err := strconv.Atoi(c.QueryParam(\"key\"))\n\t\tif err != nil {\n\t\t\tkey = -1\n\t\t}\n\t\tlimit, err := strconv.Atoi(c.QueryParam(\"limit\"))\n\t\tif err != nil {\n\t\t\tlimit = 10\n\t\t}\n\n\t\tgannoy := databases[database]\n\t\tr, err := gannoy.GetNnsByKey(key, limit, -1)\n\t\tif err != nil || len(r) == 0 {\n\t\t\treturn c.NoContent(http.StatusNotFound)\n\t\t}\n\n\t\treturn c.JSON(http.StatusOK, r)\n\t})\n\n\te.PUT(\"\/databases\/:database\/features\/:key\", func(c echo.Context) error {\n\t\tdatabase := c.Param(\"database\")\n\t\tif _, ok := databases[database]; !ok {\n\t\t\treturn c.NoContent(http.StatusUnprocessableEntity)\n\t\t}\n\t\tkey, err := strconv.Atoi(c.Param(\"key\"))\n\t\tif err != nil {\n\t\t\treturn c.NoContent(http.StatusUnprocessableEntity)\n\t\t}\n\t\tfeature := new(Feature)\n\t\tif err := c.Bind(feature); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tgannoy := databases[database]\n\t\terr = gannoy.AddItem(key, feature.W)\n\t\tif err != nil {\n\t\t\treturn c.NoContent(http.StatusUnprocessableEntity)\n\t\t}\n\t\treturn c.NoContent(http.StatusOK)\n\t})\n\n\te.DELETE(\"\/databases\/:database\/features\/:key\", func(c echo.Context) error {\n\t\tdatabase := c.Param(\"database\")\n\t\tif _, ok := databases[database]; !ok {\n\t\t\treturn c.NoContent(http.StatusUnprocessableEntity)\n\t\t}\n\t\tkey, err := strconv.Atoi(c.Param(\"key\"))\n\t\tif err != nil {\n\t\t\treturn c.NoContent(http.StatusUnprocessableEntity)\n\t\t}\n\t\tgannoy := databases[database]\n\t\terr = gannoy.RemoveItem(key)\n\t\tif err != nil {\n\t\t\treturn c.NoContent(http.StatusUnprocessableEntity)\n\t\t}\n\n\t\treturn c.NoContent(http.StatusOK)\n\t})\n\n\t\/\/ Start server\n\taddress := \":1323\"\n\tsig := os.Interrupt\n\tif opts.WithServerStarter {\n\t\taddress = \"\"\n\t\tsig = syscall.SIGTERM\n\t\tlisteners, err := listener.ListenAll()\n\t\tif err != nil && err != listener.ErrNoListeningTarget {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\te.Listener = listeners[0]\n\t}\n\n\tgo func() {\n\t\tif err := e.Start(address); err != nil {\n\t\t\te.Logger.Info(\"shutting down the server\")\n\t\t}\n\t}()\n\n\tsigCh := make(chan os.Signal, 1)\n\tsignal.Notify(sigCh, sig)\n\t<-sigCh\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Duration(opts.ShutDownTimeout)*time.Second)\n\tdefer cancel()\n\tif err := e.Shutdown(ctx); err != nil {\n\t\te.Logger.Fatal(err)\n\t}\n}\n\nfunc initializeLog(logDir string) (*os.File, error) {\n\tif logDir == \"\" {\n\t\treturn os.Stdout, nil\n\t}\n\tif err := os.MkdirAll(logDir, os.ModePerm); err != nil {\n\t\treturn nil, err\n\t}\n\treturn os.OpenFile(filepath.Join(logDir, \"db.log\"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n}\n\nfunc initializeLock(lockDir string) (lockfile.Lockfile, error) {\n\tif err := os.MkdirAll(lockDir, os.ModePerm); err != nil {\n\t\treturn \"\", err\n\t}\n\tlock := \"gannoy-server.lock\"\n\tif !filepath.IsAbs(lockDir) {\n\t\tlockDir, err := filepath.Abs(lockDir)\n\t\tif err != nil {\n\t\t\treturn lockfile.Lockfile(\"\"), err\n\t\t}\n\t\treturn lockfile.New(filepath.Join(lockDir, lock))\n\t}\n\treturn lockfile.New(filepath.Join(lockDir, lock))\n}\n\nfunc gannoyIndexInitializer(metaCh chan string, gannoyCh chan gannoy.GannoyIndex, errCh chan error) {\n\tfor meta := range metaCh {\n\t\tgannoy, err := gannoy.NewGannoyIndex(meta, gannoy.Angular{}, gannoy.RandRandom{})\n\t\tif err == nil {\n\t\t\tgannoyCh <- gannoy\n\t\t} else {\n\t\t\terrCh <- err\n\t\t}\n\t}\n}\n<commit_msg>Use LimitListener.<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/netutil\"\n\n\tflags \"github.com\/jessevdk\/go-flags\"\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/labstack\/gommon\/log\"\n\t\"github.com\/lestrrat\/go-server-starter\/listener\"\n\t\"github.com\/monochromegane\/gannoy\"\n\t\"github.com\/nightlyone\/lockfile\"\n)\n\ntype Options struct {\n\tDataDir string `short:\"d\" long:\"data-dir\" default:\".\" description:\"Specify the directory where the meta files are located.\"`\n\tLogDir string `short:\"l\" long:\"log-dir\" default-mask:\"os.Stdout\" description:\"Specify the log output directory.\"`\n\tLockDir string `short:\"L\" long:\"lock-dir\" default:\".\" description:\"Specify the lock file directory. This option is used only server-starter option.\"`\n\tWithServerStarter bool `short:\"s\" long:\"server-starter\" default:\"false\" description:\"Use server-starter listener for server address.\"`\n\tShutDownTimeout int `short:\"t\" long:\"timeout\" default:\"10\" description:\"Specify the number of seconds for shutdown timeout.\"`\n\tMaxConnections int `short:\"m\" long:\"max-connections\" default:\"100\" description:\"Specify the number of max connections.\"`\n}\n\nvar opts Options\n\ntype Feature struct {\n\tW []float64 `json:\"features\"`\n}\n\nfunc main() {\n\t_, err := flags.Parse(&opts)\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Wait old process finishing.\n\tif opts.WithServerStarter {\n\t\tlock, err := initializeLock(opts.LockDir)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tdefer lock.Unlock()\n\t\tfor {\n\t\t\tif err := lock.TryLock(); err != nil {\n\t\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\te := echo.New()\n\n\t\/\/ initialize log\n\tl, err := initializeLog(opts.LogDir)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\te.Logger.SetLevel(log.INFO)\n\te.Logger.SetOutput(l)\n\n\t\/\/ Load meta files\n\tfiles, err := ioutil.ReadDir(opts.DataDir)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\n\tmetaCh := make(chan string, len(files))\n\tgannoyCh := make(chan gannoy.GannoyIndex)\n\terrCh := make(chan error)\n\tdatabases := map[string]gannoy.GannoyIndex{}\n\tvar metaCount int\n\tfor _, file := range files {\n\t\tif file.IsDir() || filepath.Ext(file.Name()) != \".meta\" {\n\t\t\tcontinue\n\t\t}\n\t\tmetaCh <- filepath.Join(opts.DataDir, file.Name())\n\t\tmetaCount++\n\t}\n\n\tfor i := 0; i < runtime.GOMAXPROCS(0); i++ {\n\t\tgo gannoyIndexInitializer(metaCh, gannoyCh, errCh)\n\t}\n\nloop:\n\tfor {\n\t\tselect {\n\t\tcase gannoy := <-gannoyCh:\n\t\t\tkey := strings.TrimSuffix(filepath.Base(gannoy.MetaFile()), \".meta\")\n\t\t\tdatabases[key] = gannoy\n\t\t\tif len(databases) >= metaCount {\n\t\t\t\tclose(metaCh)\n\t\t\t\tclose(gannoyCh)\n\t\t\t\tclose(errCh)\n\t\t\t\tbreak loop\n\t\t\t}\n\t\tcase err := <-errCh:\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t\/\/ Define API\n\te.GET(\"\/search\", func(c echo.Context) error {\n\t\tdatabase := c.QueryParam(\"database\")\n\t\tif _, ok := databases[database]; !ok {\n\t\t\treturn c.NoContent(http.StatusNotFound)\n\t\t}\n\t\tkey, err := strconv.Atoi(c.QueryParam(\"key\"))\n\t\tif err != nil {\n\t\t\tkey = -1\n\t\t}\n\t\tlimit, err := strconv.Atoi(c.QueryParam(\"limit\"))\n\t\tif err != nil {\n\t\t\tlimit = 10\n\t\t}\n\n\t\tgannoy := databases[database]\n\t\tr, err := gannoy.GetNnsByKey(key, limit, -1)\n\t\tif err != nil || len(r) == 0 {\n\t\t\treturn c.NoContent(http.StatusNotFound)\n\t\t}\n\n\t\treturn c.JSON(http.StatusOK, r)\n\t})\n\n\te.PUT(\"\/databases\/:database\/features\/:key\", func(c echo.Context) error {\n\t\tdatabase := c.Param(\"database\")\n\t\tif _, ok := databases[database]; !ok {\n\t\t\treturn c.NoContent(http.StatusUnprocessableEntity)\n\t\t}\n\t\tkey, err := strconv.Atoi(c.Param(\"key\"))\n\t\tif err != nil {\n\t\t\treturn c.NoContent(http.StatusUnprocessableEntity)\n\t\t}\n\t\tfeature := new(Feature)\n\t\tif err := c.Bind(feature); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tgannoy := databases[database]\n\t\terr = gannoy.AddItem(key, feature.W)\n\t\tif err != nil {\n\t\t\treturn c.NoContent(http.StatusUnprocessableEntity)\n\t\t}\n\t\treturn c.NoContent(http.StatusOK)\n\t})\n\n\te.DELETE(\"\/databases\/:database\/features\/:key\", func(c echo.Context) error {\n\t\tdatabase := c.Param(\"database\")\n\t\tif _, ok := databases[database]; !ok {\n\t\t\treturn c.NoContent(http.StatusUnprocessableEntity)\n\t\t}\n\t\tkey, err := strconv.Atoi(c.Param(\"key\"))\n\t\tif err != nil {\n\t\t\treturn c.NoContent(http.StatusUnprocessableEntity)\n\t\t}\n\t\tgannoy := databases[database]\n\t\terr = gannoy.RemoveItem(key)\n\t\tif err != nil {\n\t\t\treturn c.NoContent(http.StatusUnprocessableEntity)\n\t\t}\n\n\t\treturn c.NoContent(http.StatusOK)\n\t})\n\n\t\/\/ Start server\n\tsig := os.Interrupt\n\tif opts.WithServerStarter {\n\t\tsig = syscall.SIGTERM\n\t\tlisteners, err := listener.ListenAll()\n\t\tif err != nil && err != listener.ErrNoListeningTarget {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\te.Listener = netutil.LimitListener(listeners[0], opts.MaxConnections)\n\t} else {\n\t\tl, err := net.Listen(\"tcp\", \":1323\")\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\te.Listener = netutil.LimitListener(l, opts.MaxConnections)\n\t}\n\n\tgo func() {\n\t\tif err := e.Start(\"\"); err != nil {\n\t\t\te.Logger.Info(\"shutting down the server\")\n\t\t}\n\t}()\n\n\tsigCh := make(chan os.Signal, 1)\n\tsignal.Notify(sigCh, sig)\n\t<-sigCh\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Duration(opts.ShutDownTimeout)*time.Second)\n\tdefer cancel()\n\tif err := e.Shutdown(ctx); err != nil {\n\t\te.Logger.Fatal(err)\n\t}\n}\n\nfunc initializeLog(logDir string) (*os.File, error) {\n\tif logDir == \"\" {\n\t\treturn os.Stdout, nil\n\t}\n\tif err := os.MkdirAll(logDir, os.ModePerm); err != nil {\n\t\treturn nil, err\n\t}\n\treturn os.OpenFile(filepath.Join(logDir, \"db.log\"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n}\n\nfunc initializeLock(lockDir string) (lockfile.Lockfile, error) {\n\tif err := os.MkdirAll(lockDir, os.ModePerm); err != nil {\n\t\treturn \"\", err\n\t}\n\tlock := \"gannoy-server.lock\"\n\tif !filepath.IsAbs(lockDir) {\n\t\tlockDir, err := filepath.Abs(lockDir)\n\t\tif err != nil {\n\t\t\treturn lockfile.Lockfile(\"\"), err\n\t\t}\n\t\treturn lockfile.New(filepath.Join(lockDir, lock))\n\t}\n\treturn lockfile.New(filepath.Join(lockDir, lock))\n}\n\nfunc gannoyIndexInitializer(metaCh chan string, gannoyCh chan gannoy.GannoyIndex, errCh chan error) {\n\tfor meta := range metaCh {\n\t\tgannoy, err := gannoy.NewGannoyIndex(meta, gannoy.Angular{}, gannoy.RandRandom{})\n\t\tif err == nil {\n\t\t\tgannoyCh <- gannoy\n\t\t} else {\n\t\t\terrCh <- err\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2017 uxbh\n\/\/ This file is part of gitlab.com\/uxbh\/ztdns.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ listinterfacesCmd represents the listinterfaces command\nvar listinterfacesCmd = &cobra.Command{\n\tUse: \"listinterfaces\",\n\tShort: \"List network interfaces\",\n\tLong: `List Interfaces (ztdns listinterfaces) lists the available network interfaces \nto start the server on.`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tints, _ := net.Interfaces()\n\n\t\tfor i, n := range ints {\n\t\t\taddrs, _ := n.Addrs()\n\t\t\tfmt.Printf(\"%d: %v\\n\", i, n.Name)\n\t\t\tfor i, a := range addrs {\n\t\t\t\tip, _, err := net.ParseCIDR(a.String())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"\\t%d: %s\\n\", i, ip)\n\t\t\t}\n\t\t}\n\n\t},\n}\n\nfunc init() {\n\tRootCmd.AddCommand(listinterfacesCmd)\n}\n<commit_msg>cleanup<commit_after>\/\/ Copyright © 2017 uxbh\n\/\/ This file is part of gitlab.com\/uxbh\/ztdns.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ listinterfacesCmd represents the listinterfaces command\nvar listinterfacesCmd = &cobra.Command{\n\tUse: \"listinterfaces\",\n\tShort: \"List network interfaces\",\n\tLong: `List Interfaces (ztdns listinterfaces) lists the available network interfaces \nto start the server on.`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\/\/ Get a list of interfaces from net\n\t\tints, err := net.Interfaces()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error getting interfaces: %s\", err)\n\t\t}\n\t\tfor i, n := range ints {\n\t\t\tfmt.Printf(\"%d: %v\\n\", i, n.Name)\n\t\t\t\/\/ Get a list of ip address on the interface\n\t\t\taddrs, _ := n.Addrs()\n\t\t\tfor i, a := range addrs {\n\t\t\t\tip, _, err := net.ParseCIDR(a.String())\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"\\t%d: %s\\n\", i, ip)\n\t\t\t}\n\t\t}\n\n\t},\n}\n\nfunc init() {\n\tRootCmd.AddCommand(listinterfacesCmd)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/havoc-io\/mutagen\/cmd\"\n\t\"github.com\/havoc-io\/mutagen\/daemon\"\n\t\"github.com\/havoc-io\/mutagen\/filesystem\"\n\t\"github.com\/havoc-io\/mutagen\/rpc\"\n\tsessionpkg \"github.com\/havoc-io\/mutagen\/session\"\n\t\"github.com\/havoc-io\/mutagen\/url\"\n)\n\nvar createUsage = `usage: mutagen create [-h|--help] [-i|--ignore=<pattern>]\n <alpha> <beta>\n\nCreates and starts a new synchronization session.\n`\n\ntype ignorePatterns []string\n\nfunc (p *ignorePatterns) String() string {\n\treturn \"ignore patterns\"\n}\n\nfunc (p *ignorePatterns) Set(value string) error {\n\t*p = append(*p, value)\n\treturn nil\n}\n\nfunc createMain(arguments []string) error {\n\t\/\/ Parse command line arguments.\n\tvar ignores ignorePatterns\n\tflagSet := cmd.NewFlagSet(\"create\", createUsage, []int{2})\n\tflagSet.VarP(&ignores, \"ignore\", \"i\", \"specify ignore paths\")\n\turls := flagSet.ParseOrDie(arguments)\n\n\t\/\/ Extract and parse URLs.\n\talpha, err := url.Parse(urls[0])\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to parse alpha URL\")\n\t}\n\tbeta, err := url.Parse(urls[1])\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to parse beta URL\")\n\t}\n\n\t\/\/ If either URL is a local path, make sure it's normalized.\n\tif alpha.Protocol == url.Protocol_Local {\n\t\tif alphaPath, err := filesystem.Normalize(alpha.Path); err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to normalize alpha path\")\n\t\t} else {\n\t\t\talpha.Path = alphaPath\n\t\t}\n\t}\n\tif beta.Protocol == url.Protocol_Local {\n\t\tif betaPath, err := filesystem.Normalize(beta.Path); err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to normalize beta path\")\n\t\t} else {\n\t\t\tbeta.Path = betaPath\n\t\t}\n\t}\n\n\t\/\/ Create a daemon client.\n\tdaemonClient := rpc.NewClient(daemon.NewOpener())\n\n\t\/\/ Invoke the session creation method and ensure the resulting stream is\n\t\/\/ closed when we're done.\n\tstream, err := daemonClient.Invoke(sessionpkg.MethodCreate)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to invoke session creation\")\n\t}\n\tdefer stream.Close()\n\n\t\/\/ Send the initial request.\n\tif err := stream.Send(sessionpkg.CreateRequest{\n\t\tAlpha: alpha,\n\t\tBeta: beta,\n\t\tIgnores: []string(ignores),\n\t}); err != nil {\n\t\treturn errors.Wrap(err, \"unable to send creation request\")\n\t}\n\n\t\/\/ Handle authentication challenges.\n\tif err := handlePromptRequests(stream); err != nil {\n\t\treturn errors.Wrap(err, \"unable to handle prompt requests\")\n\t}\n\n\t\/\/ Receive the create response.\n\tvar response sessionpkg.CreateResponse\n\tif err := stream.Receive(&response); err != nil {\n\t\treturn errors.Wrap(err, \"unable to receive create response\")\n\t}\n\n\t\/\/ Print the session identifier.\n\tfmt.Println(\"Created session\", response.Session)\n\n\t\/\/ Success.\n\treturn nil\n}\n<commit_msg>Cleaned up request literal in createMain.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/havoc-io\/mutagen\/cmd\"\n\t\"github.com\/havoc-io\/mutagen\/daemon\"\n\t\"github.com\/havoc-io\/mutagen\/filesystem\"\n\t\"github.com\/havoc-io\/mutagen\/rpc\"\n\tsessionpkg \"github.com\/havoc-io\/mutagen\/session\"\n\t\"github.com\/havoc-io\/mutagen\/url\"\n)\n\nvar createUsage = `usage: mutagen create [-h|--help] [-i|--ignore=<pattern>]\n <alpha> <beta>\n\nCreates and starts a new synchronization session.\n`\n\ntype ignorePatterns []string\n\nfunc (p *ignorePatterns) String() string {\n\treturn \"ignore patterns\"\n}\n\nfunc (p *ignorePatterns) Set(value string) error {\n\t*p = append(*p, value)\n\treturn nil\n}\n\nfunc createMain(arguments []string) error {\n\t\/\/ Parse command line arguments.\n\tvar ignores ignorePatterns\n\tflagSet := cmd.NewFlagSet(\"create\", createUsage, []int{2})\n\tflagSet.VarP(&ignores, \"ignore\", \"i\", \"specify ignore paths\")\n\turls := flagSet.ParseOrDie(arguments)\n\n\t\/\/ Extract and parse URLs.\n\talpha, err := url.Parse(urls[0])\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to parse alpha URL\")\n\t}\n\tbeta, err := url.Parse(urls[1])\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to parse beta URL\")\n\t}\n\n\t\/\/ If either URL is a local path, make sure it's normalized.\n\tif alpha.Protocol == url.Protocol_Local {\n\t\tif alphaPath, err := filesystem.Normalize(alpha.Path); err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to normalize alpha path\")\n\t\t} else {\n\t\t\talpha.Path = alphaPath\n\t\t}\n\t}\n\tif beta.Protocol == url.Protocol_Local {\n\t\tif betaPath, err := filesystem.Normalize(beta.Path); err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to normalize beta path\")\n\t\t} else {\n\t\t\tbeta.Path = betaPath\n\t\t}\n\t}\n\n\t\/\/ Create a daemon client.\n\tdaemonClient := rpc.NewClient(daemon.NewOpener())\n\n\t\/\/ Invoke the session creation method and ensure the resulting stream is\n\t\/\/ closed when we're done.\n\tstream, err := daemonClient.Invoke(sessionpkg.MethodCreate)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to invoke session creation\")\n\t}\n\tdefer stream.Close()\n\n\t\/\/ Send the initial request.\n\trequest := sessionpkg.CreateRequest{\n\t\tAlpha: alpha,\n\t\tBeta: beta,\n\t\tIgnores: []string(ignores),\n\t}\n\tif err := stream.Send(request); err != nil {\n\t\treturn errors.Wrap(err, \"unable to send creation request\")\n\t}\n\n\t\/\/ Handle authentication challenges.\n\tif err := handlePromptRequests(stream); err != nil {\n\t\treturn errors.Wrap(err, \"unable to handle prompt requests\")\n\t}\n\n\t\/\/ Receive the create response.\n\tvar response sessionpkg.CreateResponse\n\tif err := stream.Receive(&response); err != nil {\n\t\treturn errors.Wrap(err, \"unable to receive create response\")\n\t}\n\n\t\/\/ Print the session identifier.\n\tfmt.Println(\"Created session\", response.Session)\n\n\t\/\/ Success.\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"chain\/env\"\n)\n\nconst slashLandUsage = `\nThe \/land command takes a git branch reference.\nHere is an example using the \/land command:\n\n\/land [-prv] feature-x\n`\n\nvar (\n\tport = env.String(\"PORT\", \"8080\")\n\tgithubToken = env.String(\"GITHUB_TOKEN\", \"\")\n\torg = env.String(\"GITHUB_ORG\", \"chain\")\n\trepo = env.String(\"GITHUB_REPO\", \"chain\")\n\tprivRepo = env.String(\"GITHUB_REPO_PRIVATE\", \"chainprv\")\n\tslackChannels = env.StringSlice(\"SLACK_CHANNEL\")\n\tslackToken = env.String(\"SLACK_LAND_TOKEN\", \"\")\n\tpostURL = env.String(\"SLACK_POST_URL\", \"\")\n)\n\nvar landReqs = make(chan *landReq, 10)\n\ntype landReq struct {\n\tuserID string\n\tuserName string\n\tref string\n\tprivate bool\n}\n\nfunc main() {\n\tlog.SetFlags(log.Lshortfile)\n\tenv.Parse()\n\n\terr := configGit()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\terr = writeNetrc()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\thttp.HandleFunc(\"\/slash\", slashLand)\n\thttp.HandleFunc(\"\/health\", func(w http.ResponseWriter, req *http.Request) {\n\t\tw.WriteHeader(200)\n\t})\n\n\tgo lander()\n\n\tsay(\"Ready for action!\")\n\terr = http.ListenAndServe(\":\"+*port, nil)\n\tlog.Fatalln(err)\n}\n\nfunc slashLand(w http.ResponseWriter, r *http.Request) {\n\tif r.FormValue(\"token\") != *slackToken {\n\t\thttp.Error(w, \"unauthorized\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\tif !contains(r.FormValue(\"channel_id\"), *slackChannels) {\n\t\thttp.Error(w, fmt.Sprintf(\"%s not enabled in this channel.\", r.FormValue(\"command\")), 400)\n\t\treturn\n\t}\n\ta := strings.Fields(r.FormValue(\"text\"))\n\tprivate := false\n\tif len(a) >= 1 && a[0] == \"-prv\" {\n\t\tprivate = true\n\t\ta = a[1:]\n\t}\n\tif len(a) != 1 {\n\t\thttp.Error(w, slashLandUsage, 422)\n\t\treturn\n\t}\n\tlandReqs <- &landReq{\n\t\tref: a[0],\n\t\tuserID: r.FormValue(\"user_id\"),\n\t\tuserName: r.FormValue(\"user_name\"),\n\t\tprivate: private,\n\t}\n\tsayf(\"<@%s|%s> is attempting to land %s\",\n\t\tr.FormValue(\"user_id\"),\n\t\tr.FormValue(\"user_name\"),\n\t\ta[0],\n\t)\n}\n\nfunc lander() {\n\tfor req := range landReqs {\n\t\tland(req)\n\t}\n}\n\nfunc land(req *landReq) {\n\tdefer catch()\n\n\trepo := *repo\n\tif req.private {\n\t\trepo = *privRepo\n\t}\n\n\tgopath := \"\/tmp\/land\"\n\tlanddir := gopath + \"\/src\/\" + repo\n\n\tfetch(landdir, req.ref, repo)\n\tcommit := string(bytes.TrimSpace(runOutput(landdir, exec.Command(\"git\", \"rev-parse\", \"HEAD\"))))\n\n\tprBits, err := pipeline(\n\t\tdirCmd(landdir, \"git\", \"ls-remote\", \"origin\", `refs\/pull\/*\/head`),\n\t\texec.Command(\"fgrep\", commit),\n\t\texec.Command(\"cut\", \"-d\/\", \"-f3\"),\n\t)\n\tif err != nil {\n\t\tsayf(\"<@%s|%s> failed to land %s: could not find open pull request\",\n\t\t\treq.userID,\n\t\t\treq.userName,\n\t\t\treq.ref,\n\t\t)\n\t\treturn\n\t}\n\tpr := string(bytes.TrimSpace(prBits))\n\n\tvar prState struct {\n\t\tTitle string\n\t\tBody string\n\t\tMerged bool\n\t\tMergeable *bool\n\t}\n\terr = doGithubReq(\"GET\", \"repos\/\"+*org+\"\/\"+repo+\"\/pulls\/\"+pr, nil, &prState)\n\tif err != nil {\n\t\tsayf(\"<@%s|%s> failed to land %s: error fetching github status\",\n\t\t\treq.userID,\n\t\t\treq.userName,\n\t\t\treq.ref,\n\t\t)\n\t}\n\tif prState.Merged {\n\t\tsayf(\"<@%s|%s> %s has already landed\",\n\t\t\treq.userID,\n\t\t\treq.userName,\n\t\t\treq.ref,\n\t\t)\n\t\treturn\n\t}\n\tif prState.Mergeable != nil && *prState.Mergeable == false {\n\t\tsayf(\"<@%s|%s> failed to land %s: branch has conflicts\",\n\t\t\treq.userID,\n\t\t\treq.userName,\n\t\t\treq.ref,\n\t\t)\n\t\treturn\n\t}\n\n\tcmd := dirCmd(landdir, \"git\", \"rebase\", \"origin\/main\")\n\tcmd.Stderr = os.Stderr\n\terr = cmd.Run()\n\tif err != nil {\n\t\tcmd = dirCmd(landdir, \"git\", \"rebase\", \"--abort\")\n\t\tcmd.Stderr = os.Stderr\n\t\terr = cmd.Run()\n\t\tsayf(\"<@%s|%s> failed to land %s: branch has conflicts\",\n\t\t\treq.userID,\n\t\t\treq.userName,\n\t\t\treq.ref,\n\t\t)\n\t\treturn\n\t}\n\n\tcmd = dirCmd(landdir, \"git\", \"push\", \"origin\", req.ref, \"-f\")\n\tcmd.Stderr = os.Stderr\n\terr = cmd.Run()\n\tif err != nil {\n\t\tsayf(\"<@%s|%s> failed to land %s: could not push rebase (%s)\",\n\t\t\treq.userID,\n\t\t\treq.userName,\n\t\t\treq.ref,\n\t\t\terr,\n\t\t)\n\t\treturn\n\t}\n\n\tcommit = string(bytes.TrimSpace(runOutput(landdir, exec.Command(\"git\", \"rev-parse\", \"HEAD\"))))\n\n\tsuccess := waitForSuccessfulStatus(req, repo, commit)\n\tif !success {\n\t\treturn\n\t}\n\n\tbody := prState.Body\n\tif body != \"\" {\n\t\tbody += \"\\n\\n\"\n\t}\n\tbody += fmt.Sprintf(\"Closes #%s\\n\", pr)\n\tmergeReq := struct {\n\t\tCommitTitle string `json:\"commit_title\"`\n\t\tCommitMessage string `json:\"commit_message\"`\n\t\tSHA string `json:\"sha\"`\n\t\tSquash bool `json:\"squash\"`\n\t}{prState.Title, wrapMessage(body, 75), commit, true}\n\tvar mergeResp struct {\n\t\tMerged bool\n\t\tMessage string\n\t}\n\terr = doGithubReq(\"PUT\", fmt.Sprintf(\"repos\/%s\/%s\/pulls\/%s\/merge\", *org, repo, pr), mergeReq, &mergeResp)\n\tif err != nil {\n\t\tsayf(\"<@%s|%s> failed to land %s: could not merge pull request (%s)\",\n\t\t\treq.userID,\n\t\t\treq.userName,\n\t\t\treq.ref,\n\t\t\terr,\n\t\t)\n\t\treturn\n\t}\n\tif !mergeResp.Merged {\n\t\tsayf(\"<@%s|%s> failed to land %s: could not merge pull request (%s)\",\n\t\t\treq.userID,\n\t\t\treq.userName,\n\t\t\treq.ref,\n\t\t\tmergeResp.Message,\n\t\t)\n\t\treturn\n\t}\n\n\trunIn(landdir, exec.Command(\"git\", \"push\", \"origin\", \":\"+req.ref))\n\tfetch(landdir, \"main\", repo)\n\trunIn(landdir, exec.Command(\"git\", \"branch\", \"-D\", req.ref))\n}\n\nfunc writeNetrc() error {\n\tp := filepath.Clean(os.Getenv(\"HOME\") + \"\/.netrc\")\n\ts := \"machine github.com login chainbot password \" + *githubToken + \"\\n\"\n\treturn ioutil.WriteFile(p, []byte(s), 0600)\n}\n\nfunc configGit() error {\n\tcmd := exec.Command(\"git\", \"config\", \"--global\", \"user.email\", \"ops@chain.com\")\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd = exec.Command(\"git\", \"config\", \"--global\", \"user.name\", \"chainbot\")\n\tcmd.Stderr = os.Stderr\n\terr = cmd.Run()\n\treturn err\n}\n\nfunc wrapMessage(msg string, limit int) string {\n\tb := make([]byte, 0, len(msg))\n\tlimit++ \/\/ accounts for whitespace from SplitAfter\n\n\tfor _, line := range strings.SplitAfter(msg, \"\\n\") {\n\t\tlineLen := 0\n\t\tfor _, word := range strings.SplitAfter(line, \" \") {\n\t\t\tif lineLen+len(word) > limit && lineLen > 0 {\n\t\t\t\tlineLen = 0\n\t\t\t\tb = bytes.TrimRight(b, \" \")\n\t\t\t\tb = append(b, '\\n')\n\t\t\t}\n\t\t\tb = append(b, word...)\n\t\t\tlineLen += len(word)\n\t\t}\n\t}\n\treturn string(b)\n}\n\nfunc waitForSuccessfulStatus(req *landReq, repo, commitSHA string) bool {\n\tstart := time.Now()\n\tfor {\n\t\tif time.Since(start) > 3*time.Minute {\n\t\t\tsayf(\"<@%s|%s> failed to land %s: timed out waiting for build status\",\n\t\t\t\treq.userID,\n\t\t\t\treq.userName,\n\t\t\t\treq.ref,\n\t\t\t)\n\t\t\treturn false\n\t\t}\n\n\t\tvar statusResp struct {\n\t\t\tState, SHA string\n\t\t}\n\t\terr := doGithubReq(\"GET\", fmt.Sprintf(\"repos\/%s\/%s\/commits\/%s\/status\", *org, repo, req.ref), nil, &statusResp)\n\t\tif err != nil || statusResp.State == \"\" {\n\t\t\tsayf(\"<@%s|%s> failed to land %s: error fetching github status\",\n\t\t\t\treq.userID,\n\t\t\t\treq.userName,\n\t\t\t\treq.ref,\n\t\t\t)\n\t\t\treturn false\n\t\t}\n\t\tif statusResp.State == \"failure\" {\n\t\t\tsayf(\"<@%s|%s> failed to land %s: build failed\",\n\t\t\t\treq.userID,\n\t\t\t\treq.userName,\n\t\t\t\treq.ref,\n\t\t\t)\n\t\t\treturn false\n\t\t}\n\t\tif statusResp.State == \"success\" && statusResp.SHA == commitSHA {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(15 * time.Second)\n\t}\n\treturn true\n}\n\nfunc doGithubReq(method, path string, body, x interface{}) error {\n\tvar bodyReader io.Reader\n\tif body != nil {\n\t\tjsonBody, err := json.Marshal(body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbodyReader = bytes.NewReader(jsonBody)\n\t}\n\n\treq, err := http.NewRequest(method, \"https:\/\/api.github.com\/\"+path, bodyReader)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Add(\"Authorization\", \"token \"+*githubToken)\n\treq.Header.Add(\"Accept\", \"application\/vnd.github.polaris-preview+json\")\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\treturn json.NewDecoder(resp.Body).Decode(x)\n}\n\nfunc fetch(dir, ref, repo string) {\n\tif _, err := os.Stat(dir); os.IsNotExist(err) {\n\t\tclone(dir, ref, repo)\n\t\treturn\n\t}\n\trunIn(dir, exec.Command(\"git\", \"fetch\", \"origin\"))\n\trunIn(dir, exec.Command(\"git\", \"clean\", \"-xdf\"))\n\trunIn(dir, exec.Command(\"git\", \"checkout\", ref, \"--\"))\n\trunIn(dir, exec.Command(\"git\", \"reset\", \"--hard\", \"origin\/\"+ref))\n}\n\nfunc clone(dir, ref, repo string) {\n\tc := exec.Command(\"git\", \"clone\",\n\t\t\"--branch=\"+ref, \/\/ Check out branch 'ref'\n\t\tfmt.Sprintf(\"https:\/\/github.com\/%s\/%s.git\/\", *org, repo), \/\/ from remote 'url'\n\t\tdir, \/\/ into directory 'dir'.\n\t)\n\tc.Stderr = os.Stderr\n\tif err := c.Run(); err != nil {\n\t\tpanic(fmt.Errorf(\"%s: %v\", strings.Join(c.Args, \" \"), err))\n\t}\n}\n<commit_msg>cmd\/slashland: rebase onto proper base branch<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"chain\/env\"\n)\n\nconst slashLandUsage = `\nThe \/land command takes a git branch reference.\nHere is an example using the \/land command:\n\n\/land [-prv] feature-x\n`\n\nvar (\n\tport = env.String(\"PORT\", \"8080\")\n\tgithubToken = env.String(\"GITHUB_TOKEN\", \"\")\n\torg = env.String(\"GITHUB_ORG\", \"chain\")\n\trepo = env.String(\"GITHUB_REPO\", \"chain\")\n\tprivRepo = env.String(\"GITHUB_REPO_PRIVATE\", \"chainprv\")\n\tslackChannels = env.StringSlice(\"SLACK_CHANNEL\")\n\tslackToken = env.String(\"SLACK_LAND_TOKEN\", \"\")\n\tpostURL = env.String(\"SLACK_POST_URL\", \"\")\n)\n\nvar landReqs = make(chan *landReq, 10)\n\ntype landReq struct {\n\tuserID string\n\tuserName string\n\tref string\n\tprivate bool\n}\n\nfunc main() {\n\tlog.SetFlags(log.Lshortfile)\n\tenv.Parse()\n\n\terr := configGit()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\terr = writeNetrc()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\thttp.HandleFunc(\"\/slash\", slashLand)\n\thttp.HandleFunc(\"\/health\", func(w http.ResponseWriter, req *http.Request) {\n\t\tw.WriteHeader(200)\n\t})\n\n\tgo lander()\n\n\tsay(\"Ready for action!\")\n\terr = http.ListenAndServe(\":\"+*port, nil)\n\tlog.Fatalln(err)\n}\n\nfunc slashLand(w http.ResponseWriter, r *http.Request) {\n\tif r.FormValue(\"token\") != *slackToken {\n\t\thttp.Error(w, \"unauthorized\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\tif !contains(r.FormValue(\"channel_id\"), *slackChannels) {\n\t\thttp.Error(w, fmt.Sprintf(\"%s not enabled in this channel.\", r.FormValue(\"command\")), 400)\n\t\treturn\n\t}\n\ta := strings.Fields(r.FormValue(\"text\"))\n\tprivate := false\n\tif len(a) >= 1 && a[0] == \"-prv\" {\n\t\tprivate = true\n\t\ta = a[1:]\n\t}\n\tif len(a) != 1 {\n\t\thttp.Error(w, slashLandUsage, 422)\n\t\treturn\n\t}\n\tlandReqs <- &landReq{\n\t\tref: a[0],\n\t\tuserID: r.FormValue(\"user_id\"),\n\t\tuserName: r.FormValue(\"user_name\"),\n\t\tprivate: private,\n\t}\n\tsayf(\"<@%s|%s> is attempting to land %s\",\n\t\tr.FormValue(\"user_id\"),\n\t\tr.FormValue(\"user_name\"),\n\t\ta[0],\n\t)\n}\n\nfunc lander() {\n\tfor req := range landReqs {\n\t\tland(req)\n\t}\n}\n\nfunc land(req *landReq) {\n\tdefer catch()\n\n\trepo := *repo\n\tif req.private {\n\t\trepo = *privRepo\n\t}\n\n\tgopath := \"\/tmp\/land\"\n\tlanddir := gopath + \"\/src\/\" + repo\n\n\tfetch(landdir, req.ref, repo)\n\tcommit := string(bytes.TrimSpace(runOutput(landdir, exec.Command(\"git\", \"rev-parse\", \"HEAD\"))))\n\n\tprBits, err := pipeline(\n\t\tdirCmd(landdir, \"git\", \"ls-remote\", \"origin\", `refs\/pull\/*\/head`),\n\t\texec.Command(\"fgrep\", commit),\n\t\texec.Command(\"cut\", \"-d\/\", \"-f3\"),\n\t)\n\tif err != nil {\n\t\tsayf(\"<@%s|%s> failed to land %s: could not find open pull request\",\n\t\t\treq.userID,\n\t\t\treq.userName,\n\t\t\treq.ref,\n\t\t)\n\t\treturn\n\t}\n\tpr := string(bytes.TrimSpace(prBits))\n\n\tvar prState struct {\n\t\tTitle string\n\t\tBody string\n\t\tMerged bool\n\t\tMergeable *bool\n\t\tBase struct{ Ref string }\n\t}\n\terr = doGithubReq(\"GET\", \"repos\/\"+*org+\"\/\"+repo+\"\/pulls\/\"+pr, nil, &prState)\n\tif err != nil {\n\t\tsayf(\"<@%s|%s> failed to land %s: error fetching github status\",\n\t\t\treq.userID,\n\t\t\treq.userName,\n\t\t\treq.ref,\n\t\t)\n\t}\n\tif prState.Merged {\n\t\tsayf(\"<@%s|%s> %s has already landed\",\n\t\t\treq.userID,\n\t\t\treq.userName,\n\t\t\treq.ref,\n\t\t)\n\t\treturn\n\t}\n\tif prState.Mergeable != nil && *prState.Mergeable == false {\n\t\tsayf(\"<@%s|%s> failed to land %s: branch has conflicts\",\n\t\t\treq.userID,\n\t\t\treq.userName,\n\t\t\treq.ref,\n\t\t)\n\t\treturn\n\t}\n\n\t\/\/ base branch e.g. origin\/main, origin\/chain-core-server-1.1.x\n\tcmd := dirCmd(landdir, \"git\", \"rebase\", \"origin\/\"+prState.Base.Ref)\n\tcmd.Stderr = os.Stderr\n\terr = cmd.Run()\n\tif err != nil {\n\t\tcmd = dirCmd(landdir, \"git\", \"rebase\", \"--abort\")\n\t\tcmd.Stderr = os.Stderr\n\t\terr = cmd.Run()\n\t\tsayf(\"<@%s|%s> failed to land %s: branch has conflicts\",\n\t\t\treq.userID,\n\t\t\treq.userName,\n\t\t\treq.ref,\n\t\t)\n\t\treturn\n\t}\n\n\tcmd = dirCmd(landdir, \"git\", \"push\", \"origin\", req.ref, \"-f\")\n\tcmd.Stderr = os.Stderr\n\terr = cmd.Run()\n\tif err != nil {\n\t\tsayf(\"<@%s|%s> failed to land %s: could not push rebase (%s)\",\n\t\t\treq.userID,\n\t\t\treq.userName,\n\t\t\treq.ref,\n\t\t\terr,\n\t\t)\n\t\treturn\n\t}\n\n\tcommit = string(bytes.TrimSpace(runOutput(landdir, exec.Command(\"git\", \"rev-parse\", \"HEAD\"))))\n\n\tsuccess := waitForSuccessfulStatus(req, repo, commit)\n\tif !success {\n\t\treturn\n\t}\n\n\tbody := prState.Body\n\tif body != \"\" {\n\t\tbody += \"\\n\\n\"\n\t}\n\tbody += fmt.Sprintf(\"Closes #%s\\n\", pr)\n\tmergeReq := struct {\n\t\tCommitTitle string `json:\"commit_title\"`\n\t\tCommitMessage string `json:\"commit_message\"`\n\t\tSHA string `json:\"sha\"`\n\t\tSquash bool `json:\"squash\"`\n\t}{prState.Title, wrapMessage(body, 75), commit, true}\n\tvar mergeResp struct {\n\t\tMerged bool\n\t\tMessage string\n\t}\n\terr = doGithubReq(\"PUT\", fmt.Sprintf(\"repos\/%s\/%s\/pulls\/%s\/merge\", *org, repo, pr), mergeReq, &mergeResp)\n\tif err != nil {\n\t\tsayf(\"<@%s|%s> failed to land %s: could not merge pull request (%s)\",\n\t\t\treq.userID,\n\t\t\treq.userName,\n\t\t\treq.ref,\n\t\t\terr,\n\t\t)\n\t\treturn\n\t}\n\tif !mergeResp.Merged {\n\t\tsayf(\"<@%s|%s> failed to land %s: could not merge pull request (%s)\",\n\t\t\treq.userID,\n\t\t\treq.userName,\n\t\t\treq.ref,\n\t\t\tmergeResp.Message,\n\t\t)\n\t\treturn\n\t}\n\n\trunIn(landdir, exec.Command(\"git\", \"push\", \"origin\", \":\"+req.ref))\n\tfetch(landdir, \"main\", repo)\n\trunIn(landdir, exec.Command(\"git\", \"branch\", \"-D\", req.ref))\n}\n\nfunc writeNetrc() error {\n\tp := filepath.Clean(os.Getenv(\"HOME\") + \"\/.netrc\")\n\ts := \"machine github.com login chainbot password \" + *githubToken + \"\\n\"\n\treturn ioutil.WriteFile(p, []byte(s), 0600)\n}\n\nfunc configGit() error {\n\tcmd := exec.Command(\"git\", \"config\", \"--global\", \"user.email\", \"ops@chain.com\")\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd = exec.Command(\"git\", \"config\", \"--global\", \"user.name\", \"chainbot\")\n\tcmd.Stderr = os.Stderr\n\terr = cmd.Run()\n\treturn err\n}\n\nfunc wrapMessage(msg string, limit int) string {\n\tb := make([]byte, 0, len(msg))\n\tlimit++ \/\/ accounts for whitespace from SplitAfter\n\n\tfor _, line := range strings.SplitAfter(msg, \"\\n\") {\n\t\tlineLen := 0\n\t\tfor _, word := range strings.SplitAfter(line, \" \") {\n\t\t\tif lineLen+len(word) > limit && lineLen > 0 {\n\t\t\t\tlineLen = 0\n\t\t\t\tb = bytes.TrimRight(b, \" \")\n\t\t\t\tb = append(b, '\\n')\n\t\t\t}\n\t\t\tb = append(b, word...)\n\t\t\tlineLen += len(word)\n\t\t}\n\t}\n\treturn string(b)\n}\n\nfunc waitForSuccessfulStatus(req *landReq, repo, commitSHA string) bool {\n\tstart := time.Now()\n\tfor {\n\t\tif time.Since(start) > 3*time.Minute {\n\t\t\tsayf(\"<@%s|%s> failed to land %s: timed out waiting for build status\",\n\t\t\t\treq.userID,\n\t\t\t\treq.userName,\n\t\t\t\treq.ref,\n\t\t\t)\n\t\t\treturn false\n\t\t}\n\n\t\tvar statusResp struct {\n\t\t\tState, SHA string\n\t\t}\n\t\terr := doGithubReq(\"GET\", fmt.Sprintf(\"repos\/%s\/%s\/commits\/%s\/status\", *org, repo, req.ref), nil, &statusResp)\n\t\tif err != nil || statusResp.State == \"\" {\n\t\t\tsayf(\"<@%s|%s> failed to land %s: error fetching github status\",\n\t\t\t\treq.userID,\n\t\t\t\treq.userName,\n\t\t\t\treq.ref,\n\t\t\t)\n\t\t\treturn false\n\t\t}\n\t\tif statusResp.State == \"failure\" {\n\t\t\tsayf(\"<@%s|%s> failed to land %s: build failed\",\n\t\t\t\treq.userID,\n\t\t\t\treq.userName,\n\t\t\t\treq.ref,\n\t\t\t)\n\t\t\treturn false\n\t\t}\n\t\tif statusResp.State == \"success\" && statusResp.SHA == commitSHA {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(15 * time.Second)\n\t}\n\treturn true\n}\n\nfunc doGithubReq(method, path string, body, x interface{}) error {\n\tvar bodyReader io.Reader\n\tif body != nil {\n\t\tjsonBody, err := json.Marshal(body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbodyReader = bytes.NewReader(jsonBody)\n\t}\n\n\treq, err := http.NewRequest(method, \"https:\/\/api.github.com\/\"+path, bodyReader)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Add(\"Authorization\", \"token \"+*githubToken)\n\treq.Header.Add(\"Accept\", \"application\/vnd.github.polaris-preview+json\")\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\treturn json.NewDecoder(resp.Body).Decode(x)\n}\n\nfunc fetch(dir, ref, repo string) {\n\tif _, err := os.Stat(dir); os.IsNotExist(err) {\n\t\tclone(dir, ref, repo)\n\t\treturn\n\t}\n\trunIn(dir, exec.Command(\"git\", \"fetch\", \"origin\"))\n\trunIn(dir, exec.Command(\"git\", \"clean\", \"-xdf\"))\n\trunIn(dir, exec.Command(\"git\", \"checkout\", ref, \"--\"))\n\trunIn(dir, exec.Command(\"git\", \"reset\", \"--hard\", \"origin\/\"+ref))\n}\n\nfunc clone(dir, ref, repo string) {\n\tc := exec.Command(\"git\", \"clone\",\n\t\t\"--branch=\"+ref, \/\/ Check out branch 'ref'\n\t\tfmt.Sprintf(\"https:\/\/github.com\/%s\/%s.git\/\", *org, repo), \/\/ from remote 'url'\n\t\tdir, \/\/ into directory 'dir'.\n\t)\n\tc.Stderr = os.Stderr\n\tif err := c.Run(); err != nil {\n\t\tpanic(fmt.Errorf(\"%s: %v\", strings.Join(c.Args, \" \"), err))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015, David Howden\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\ntchimport is a tool which builds Tchaik Libraries (metadata indexes) from iTunes Library XML files or\nalternatively by reading metadata from audio files within a directory tree.\n\nImporting large iTunes XML Library files is recommended: the Tchaik library has a much smaller set of data attributes\nfor each track (so a much smaller memory footprint).\n\n tchimport -itlXML <itunes-library> -out lib.tch\n\nAlternatively you can specify a path which will be transversed. All supported audio files within this path\n(.mp3, .m4a, .flac - ID3.v1,2.{2,3,4}, MP4 and FLAC) will be scanned for metadata. Only tracks which have readable\nmetadata will be added to the library. Any errors are logged to stdout. As no other unique identifying data is know,\nthe SHA1 sum of the file path is used as the TrackID.\n\n tchimport -path <directory-path> -out lib.tch\n*\/\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/dhowden\/tchaik\/index\"\n\t\"github.com\/dhowden\/tchaik\/index\/itl\"\n)\n\nfunc main() {\n\titlXML := flag.String(\"itlXML\", \"\", \"iTunes Music Library XML file\")\n\tpath := flag.String(\"path\", \"\", \"directory path containing audio files\")\n\tout := flag.String(\"out\", \"data.tch\", \"output file (Tchaik library binary format)\")\n\n\tflag.Parse()\n\n\tif *itlXML != \"\" && *path != \"\" || *itlXML == \"\" && *path == \"\" {\n\t\tfmt.Println(\"must specify either 'itlXML' or 'path'\")\n\t\tos.Exit(1)\n\t}\n\n\tif *out == \"\" {\n\t\tfmt.Println(\"must specify 'out'\")\n\t\tos.Exit(1)\n\t}\n\n\tvar l index.Library\n\tvar err error\n\tswitch {\n\tcase *itlXML != \"\":\n\t\tl, err = importXML(*itlXML)\n\tcase *path != \"\":\n\t\tl = importPath(*path)\n\t}\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tnl := index.Convert(l, \"TrackID\")\n\n\tnf, err := os.Create(*out)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\tdefer nf.Close()\n\n\terr = index.WriteTo(nl, nf)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc importXML(itlXML string) (index.Library, error) {\n\tf, err := os.Open(itlXML)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tl, err := itl.ReadFrom(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn l, nil\n}\n\nfunc importPath(path string) index.Library {\n\ttracks := make(map[string]*Track)\n\tfiles := walk(path)\n\tfor p := range files {\n\t\tif validExtension(p) {\n\t\t\ttrack, err := processPath(p)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error processing '%v': %v\\n\", p, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttracks[p] = track\n\t\t}\n\t}\n\n\treturn &Library{\n\t\ttracks: tracks,\n\t}\n}\n<commit_msg>Tidy up tchimport<commit_after>\/\/ Copyright 2015, David Howden\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\ntchimport is a tool which builds Tchaik Libraries (metadata indexes) from iTunes Library XML files or\nalternatively by reading metadata from audio files within a directory tree.\n\nImporting large iTunes XML Library files is recommended: the Tchaik library has a much smaller set of data attributes\nfor each track (so a much smaller memory footprint).\n\n tchimport -itlXML <itunes-library> -out lib.tch\n\nAlternatively you can specify a path which will be transversed. All supported audio files within this path\n(.mp3, .m4a, .flac - ID3.v1,2.{2,3,4}, MP4 and FLAC) will be scanned for metadata. Only tracks which have readable\nmetadata will be added to the library. Any errors are logged to stdout. As no other unique identifying data is know,\nthe SHA1 sum of the file path is used as the TrackID.\n\n tchimport -path <directory-path> -out lib.tch\n*\/\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/dhowden\/tchaik\/index\"\n\t\"github.com\/dhowden\/tchaik\/index\/itl\"\n)\n\nvar itlXML, path string\nvar out string\n\nfunc init() {\n\tflag.StringVar(&itlXML, \"itlXML\", \"\", \"iTunes Music Library XML file\")\n\tflag.StringVar(&path, \"path\", \"\", \"directory path containing audio files\")\n\tflag.StringVar(&out, \"out\", \"data.tch\", \"output file (Tchaik library binary format)\")\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif itlXML != \"\" && path != \"\" || itlXML == \"\" && path == \"\" {\n\t\tfmt.Println(\"must specify either 'itlXML' or 'path'\")\n\t\tos.Exit(1)\n\t}\n\n\tif out == \"\" {\n\t\tfmt.Println(\"must specify 'out'\")\n\t\tos.Exit(1)\n\t}\n\n\tvar l index.Library\n\tvar err error\n\tswitch {\n\tcase itlXML != \"\":\n\t\tl, err = importXML(itlXML)\n\tcase path != \"\":\n\t\tl = importPath(path)\n\t}\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\terr = writeLibrary(index.Convert(l, \"TrackID\"))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc writeLibrary(l index.Library) error {\n\tf, err := os.Create(out)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\treturn index.WriteTo(l, f)\n}\n\nfunc importXML(itlXML string) (index.Library, error) {\n\tf, err := os.Open(itlXML)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tl, err := itl.ReadFrom(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn l, nil\n}\n\nfunc importPath(path string) index.Library {\n\ttracks := make(map[string]*Track)\n\tfiles := walk(path)\n\tfor p := range files {\n\t\tif validExtension(p) {\n\t\t\ttrack, err := processPath(p)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error processing '%v': %v\\n\", p, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttracks[p] = track\n\t\t}\n\t}\n\n\treturn &Library{\n\t\ttracks: tracks,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/kkdai\/youtube\/v2\"\n\tytdl \"github.com\/kkdai\/youtube\/v2\/downloader\"\n\t\"github.com\/olekukonko\/tablewriter\"\n)\n\nconst usageString string = `Usage: youtubedr [OPTION] [URL]\nDownload a video from youtube.\nExample: youtubedr -o \"Campaign Diary\".mp4 https:\/\/www.youtube.com\/watch\\?v\\=XbNghLqsVwU\n`\n\nvar (\n\toutputFile string\n\toutputDir string\n\toutputQuality string\n\titag int\n\tinfo bool\n\tinsecureSkipVerify bool\n)\n\nfunc main() {\n\tif err := run(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%+v\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc run() error {\n\tflag.Usage = func() {\n\t\tfmt.Println(usageString)\n\t\tflag.PrintDefaults()\n\t\tfmt.Println(\"\\n\" + `Use the HTTP_PROXY environment variable to set a HTTP or SOCSK5 proxy. The proxy type is determined by the URL scheme.\n\"http\", \"https\", and \"socks5\" are supported. If the scheme is empty, \"http\" is assumed.\"`)\n\t}\n\tusr, _ := user.Current()\n\tflag.StringVar(&outputFile, \"o\", \"\", \"The output file\")\n\tflag.StringVar(&outputDir, \"d\",\n\t\tfilepath.Join(usr.HomeDir, \"Movies\", \"youtubedr\"),\n\t\t\"The output directory.\")\n\tflag.StringVar(&outputQuality, \"q\", \"\", \"The output file quality (hd720, medium)\")\n\tflag.IntVar(&itag, \"i\", 0, \"Specify itag number, e.g. 13, 17\")\n\tflag.BoolVar(&info, \"info\", false, \"show info of video\")\n\tflag.BoolVar(&insecureSkipVerify, \"insecure-skip-tls-verify\", false, \"skip server certificate verification\")\n\n\tflag.Parse()\n\n\tif len(flag.Args()) == 0 {\n\t\tflag.Usage()\n\t\treturn nil\n\t}\n\n\thttpTransport := &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tIdleConnTimeout: 60 * time.Second,\n\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\tExpectContinueTimeout: 1 * time.Second,\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout: 30 * time.Second,\n\t\t\tKeepAlive: 30 * time.Second,\n\t\t}).DialContext,\n\t}\n\n\tif insecureSkipVerify {\n\t\tlog.Println(\"Skip server certificate verification\")\n\t\thttpTransport.TLSClientConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: true,\n\t\t}\n\t}\n\n\tdl := ytdl.Downloader{\n\t\tOutputDir: outputDir,\n\t}\n\tdl.HTTPClient = &http.Client{Transport: httpTransport}\n\n\targ := flag.Arg(0)\n\n\tvideo, err := dl.GetVideo(arg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif info {\n\t\tfmt.Printf(\"Title: %s\\n\", video.Title)\n\t\tfmt.Printf(\"Author: %s\\n\", video.Author)\n\t\tfmt.Printf(\"Duration: %v\\n\", video.Duration)\n\n\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\ttable.SetAutoWrapText(false)\n\t\ttable.SetHeader([]string{\"itag\", \"quality\", \"MimeType\"})\n\n\t\tfor _, itag := range video.Formats {\n\t\t\ttable.Append([]string{strconv.Itoa(itag.ItagNo), itag.Quality, itag.MimeType})\n\t\t}\n\t\ttable.Render()\n\t\treturn nil\n\t}\n\n\tfmt.Println(\"download to directory\", outputDir)\n\n\tif outputQuality == \"hd1080\" {\n\t\tfmt.Println(\"check ffmpeg is installed....\")\n\t\tffmpegVersionCmd := exec.Command(\"ffmpeg\", \"-version\")\n\t\tif err := ffmpegVersionCmd.Run(); err != nil {\n\t\t\treturn fmt.Errorf(\"please check ffmpeg is installed correctly, err: %w\", err)\n\t\t}\n\t\treturn dl.DownloadWithHighQuality(context.Background(), outputFile, video, outputQuality)\n\t}\n\n\tvar format *youtube.Format\n\tif itag > 0 {\n\t\tformat = video.Formats.FindByItag(itag)\n\t\tif format == nil {\n\t\t\treturn fmt.Errorf(\"unable to find format with itag %d\", itag)\n\t\t}\n\t} else {\n\t\tif len(video.Formats) == 0 {\n\t\t\treturn errors.New(\"no formats found\")\n\t\t}\n\t\tformat = &video.Formats[0]\n\t}\n\n\treturn dl.Download(context.Background(), video, format, outputFile)\n}\n<commit_msg>Always utilize `q` option<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/kkdai\/youtube\/v2\"\n\tytdl \"github.com\/kkdai\/youtube\/v2\/downloader\"\n\t\"github.com\/olekukonko\/tablewriter\"\n)\n\nconst usageString string = `Usage: youtubedr [OPTION] [URL]\nDownload a video from youtube.\nExample: youtubedr -o \"Campaign Diary\".mp4 https:\/\/www.youtube.com\/watch\\?v\\=XbNghLqsVwU\n`\n\nvar (\n\toutputFile string\n\toutputDir string\n\toutputQuality string\n\titag int\n\tinfo bool\n\tinsecureSkipVerify bool\n)\n\nfunc main() {\n\tif err := run(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%+v\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc run() error {\n\tflag.Usage = func() {\n\t\tfmt.Println(usageString)\n\t\tflag.PrintDefaults()\n\t\tfmt.Println(\"\\n\" + `Use the HTTP_PROXY environment variable to set a HTTP or SOCSK5 proxy. The proxy type is determined by the URL scheme.\n\"http\", \"https\", and \"socks5\" are supported. If the scheme is empty, \"http\" is assumed.\"`)\n\t}\n\tusr, _ := user.Current()\n\tflag.StringVar(&outputFile, \"o\", \"\", \"The output file\")\n\tflag.StringVar(&outputDir, \"d\",\n\t\tfilepath.Join(usr.HomeDir, \"Movies\", \"youtubedr\"),\n\t\t\"The output directory.\")\n\tflag.StringVar(&outputQuality, \"q\", \"\", \"The output file quality (hd720, medium)\")\n\tflag.IntVar(&itag, \"i\", 0, \"Specify itag number, e.g. 13, 17\")\n\tflag.BoolVar(&info, \"info\", false, \"show info of video\")\n\tflag.BoolVar(&insecureSkipVerify, \"insecure-skip-tls-verify\", false, \"skip server certificate verification\")\n\n\tflag.Parse()\n\n\tif len(flag.Args()) == 0 {\n\t\tflag.Usage()\n\t\treturn nil\n\t}\n\n\thttpTransport := &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tIdleConnTimeout: 60 * time.Second,\n\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\tExpectContinueTimeout: 1 * time.Second,\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout: 30 * time.Second,\n\t\t\tKeepAlive: 30 * time.Second,\n\t\t}).DialContext,\n\t}\n\n\tif insecureSkipVerify {\n\t\tlog.Println(\"Skip server certificate verification\")\n\t\thttpTransport.TLSClientConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: true,\n\t\t}\n\t}\n\n\tdl := ytdl.Downloader{\n\t\tOutputDir: outputDir,\n\t}\n\tdl.HTTPClient = &http.Client{Transport: httpTransport}\n\n\targ := flag.Arg(0)\n\n\tvideo, err := dl.GetVideo(arg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif info {\n\t\tfmt.Printf(\"Title: %s\\n\", video.Title)\n\t\tfmt.Printf(\"Author: %s\\n\", video.Author)\n\t\tfmt.Printf(\"Duration: %v\\n\", video.Duration)\n\n\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\ttable.SetAutoWrapText(false)\n\t\ttable.SetHeader([]string{\"itag\", \"quality\", \"MimeType\"})\n\n\t\tfor _, itag := range video.Formats {\n\t\t\ttable.Append([]string{strconv.Itoa(itag.ItagNo), itag.Quality, itag.MimeType})\n\t\t}\n\t\ttable.Render()\n\t\treturn nil\n\t}\n\n\tfmt.Println(\"download to directory\", outputDir)\n\n\tif len(video.Formats) == 0 {\n\t\treturn errors.New(\"no formats found\")\n\t}\n\n\tvar format *youtube.Format\n\tif itag > 0 {\n\t\tformat = video.Formats.FindByItag(itag)\n\t\tif format == nil {\n\t\t\treturn fmt.Errorf(\"unable to find format with itag %d\", itag)\n\t\t}\n\t\toutputQuality = format.Quality\n\t} else if outputQuality != \"\" {\n\t\tformat = video.Formats.FindByQuality(outputQuality)\n\t\tif format == nil {\n\t\t\treturn fmt.Errorf(\"unable to find format with quality %s\", outputQuality)\n\t\t}\n\t} else {\n\t\tformat = &video.Formats[0]\n\t}\n\n\tif outputQuality == \"hd1080\" {\n\t\tfmt.Println(\"check ffmpeg is installed....\")\n\t\tffmpegVersionCmd := exec.Command(\"ffmpeg\", \"-version\")\n\t\tif err := ffmpegVersionCmd.Run(); err != nil {\n\t\t\treturn fmt.Errorf(\"please check ffmpeg is installed correctly, err: %w\", err)\n\t\t}\n\t\treturn dl.DownloadWithHighQuality(context.Background(), outputFile, video, outputQuality)\n\t}\n\treturn dl.Download(context.Background(),video, format, outputFile)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The project AUTHORS. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage feeds\n\nimport (\n\t\"os\"\n\t\"testing\"\n)\n\nconst (\n\tatomURL = \"http:\/\/www.example.com\/atom_feed.xml\"\n\trssURL = \"http:\/\/www.example.com\/rss_feed.xml\"\n\thtmlCode = `<!DOCTYPE html>\n<html>\n <head>\n <title>Foobar<\/title>\n <link href=\"http:\/\/www.example.com\/atom_feed.xml\" type=\"application\/atom+xml\"\/>\n <link href=\"http:\/\/www.example.com\/rss_feed.xml\" type=\"application\/rss+xml\"\/>\n <\/head>\n <body>\n <\/body>\n<\/html>`\n)\n\nfunc TestFind(t *testing.T) {\n\tlinks, err := Find([]byte(htmlCode))\n\tif err != nil {\n\t\tt.Fatal(links)\n\t}\n\n\ttestResults(t, links)\n}\n\nfunc TestFindFromFile(t *testing.T) {\n\tlinks, err := FindFromFile(os.Getenv(\"GOPATH\") + \"\/src\/github.com\/gilliek\/go-feedsfinder\/testdata\/index.html\")\n\tif err != nil {\n\t\tt.Fatal(links)\n\t}\n\n\ttestResults(t, links)\n}\n\nfunc testResults(t *testing.T, links []Link) {\n\tif nbLinks := len(links); nbLinks != 2 {\n\t\tt.Fatalf(\"Invalid number of links: expected 2, found %d\", nbLinks)\n\t}\n\n\tatom := links[0]\n\trss := links[1]\n\n\tif atom.URL != atomURL {\n\t\tt.Errorf(\"Invalid Atom feed URL: expected '%s', found '%s'\", atomURL, atom.URL)\n\t}\n\n\tif atom.Type != \"atom\" {\n\t\tt.Errorf(\"Invalid Atom feed type: expected 'atom', found '%s'\", atom.Type)\n\t}\n\n\tif rss.URL != rssURL {\n\t\tt.Errorf(\"Invalid RSS feed URL: expected '%s', found '%s'\", rssURL, rss.URL)\n\t}\n\n\tif rss.Type != \"rss\" {\n\t\tt.Errorf(\"Invalid RSS feed type: expected 'rss', found '%s'\", rss.Type)\n\t}\n\n}\n<commit_msg>add test for the FindFromURL function<commit_after>\/\/ Copyright 2014 The project AUTHORS. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage feeds\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"testing\"\n)\n\nconst (\n\tatomURL = \"http:\/\/www.example.com\/atom_feed.xml\"\n\trssURL = \"http:\/\/www.example.com\/rss_feed.xml\"\n\thtmlCode = `<!DOCTYPE html>\n<html>\n <head>\n <title>Foobar<\/title>\n <link href=\"http:\/\/www.example.com\/atom_feed.xml\" type=\"application\/atom+xml\"\/>\n <link href=\"http:\/\/www.example.com\/rss_feed.xml\" type=\"application\/rss+xml\"\/>\n <\/head>\n <body>\n <\/body>\n<\/html>`\n)\n\nfunc TestFind(t *testing.T) {\n\tlinks, err := Find([]byte(htmlCode))\n\tif err != nil {\n\t\tt.Fatal(links)\n\t}\n\n\ttestResults(t, links)\n}\n\nfunc TestFindFromURL(t *testing.T) {\n\thandler := func(w http.ResponseWriter, r *http.Request) {\n\t\tio.WriteString(w, htmlCode)\n\t}\n\n\tserver := httptest.NewServer(http.HandlerFunc(handler))\n\tdefer server.Close()\n\n\tlinks, err := FindFromURL(server.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttestResults(t, links)\n}\n\nfunc TestFindFromFile(t *testing.T) {\n\tlinks, err := FindFromFile(os.Getenv(\"GOPATH\") + \"\/src\/github.com\/gilliek\/go-feedsfinder\/testdata\/index.html\")\n\tif err != nil {\n\t\tt.Fatal(links)\n\t}\n\n\ttestResults(t, links)\n}\n\nfunc testResults(t *testing.T, links []Link) {\n\tif nbLinks := len(links); nbLinks != 2 {\n\t\tt.Fatalf(\"Invalid number of links: expected 2, found %d\", nbLinks)\n\t}\n\n\tatom := links[0]\n\trss := links[1]\n\n\tif atom.URL != atomURL {\n\t\tt.Errorf(\"Invalid Atom feed URL: expected '%s', found '%s'\", atomURL, atom.URL)\n\t}\n\n\tif atom.Type != \"atom\" {\n\t\tt.Errorf(\"Invalid Atom feed type: expected 'atom', found '%s'\", atom.Type)\n\t}\n\n\tif rss.URL != rssURL {\n\t\tt.Errorf(\"Invalid RSS feed URL: expected '%s', found '%s'\", rssURL, rss.URL)\n\t}\n\n\tif rss.Type != \"rss\" {\n\t\tt.Errorf(\"Invalid RSS feed type: expected 'rss', found '%s'\", rss.Type)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package flash_test\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/blue-jay\/core\/flash\"\n\t\"github.com\/blue-jay\/core\/session\"\n\t\"github.com\/blue-jay\/core\/view\"\n\n\t\"github.com\/gorilla\/sessions\"\n)\n\n\/\/ TestFlashSession ensures flashes can be added to the session.\nfunc TestFlashSession(t *testing.T) {\n\toptions := sessions.Options{\n\t\tPath: \"\/\",\n\t\tDomain: \"\",\n\t\tMaxAge: 28800,\n\t\tSecure: false,\n\t\tHttpOnly: true,\n\t}\n\n\ts := session.Info{\n\t\tAuthKey: \"PzCh6FNAB7\/jhmlUQ0+25sjJ+WgcJeKR2bAOtnh9UnfVN+WJSBvY\/YC80Rs+rbMtwfmSP4FUSxKPtpYKzKFqFA==\",\n\t\tEncryptKey: \"3oTKCcKjDHMUlV+qur2Ve664SPpSuviyGQ\/UqnroUD8=\",\n\t\tCSRFKey: \"xULAGF5FcWvqHsXaovNFJYfgCt6pedRPROqNvsZjU18=\",\n\t\tName: \"sess\",\n\t\tOptions: options,\n\t}\n\n\t\/\/ Simulate a request\n\tw := httptest.NewRecorder()\n\tr, err := http.NewRequest(\"GET\", \"\/\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttext := \"Success test.\"\n\n\t\/\/ Set up the session cookie store\n\ts.SetupConfig()\n\n\t\/\/ Get the session\n\tsess, _ := s.Instance(r)\n\n\t\/\/ Add flashes to the session\n\tsess.AddFlash(flash.Info{text, flash.Success})\n\tsess.Save(r, w)\n\n\tflashes := sess.Flashes()\n\n\tif len(flashes) != 1 {\n\t\tt.Fatal(\"Expected 1 flash message.\")\n\t}\n\n\tf, ok := flashes[0].(flash.Info)\n\n\tif f.Class != flash.Success {\n\t\tt.Fatal(\"Flash class is: %v, should be: %v.\", f.Class, flash.Success)\n\t}\n\n\tif f.Message != text {\n\t\tt.Fatalf(\"Flash message is: %v, should be: %v\", f.Message, text)\n\t}\n\n\tif !ok {\n\t\tt.Fatal(\"Flashes missing from session.\")\n\t}\n}\n\n\/\/ TestModify ensures flashes are added to the view.\n\/*func TestModify(t *testing.T) {\n\tviewInfo := &view.Info{\n\t\tBaseURI: \"\/\",\n\t\tExtension: \"tmpl\",\n\t\tFolder: \"testdata\/view\",\n\t\tCaching: false,\n\t}\n\n\ttemplates := view.Template{\n\t\tRoot: \"test\",\n\t\tChildren: []string{},\n\t}\n\n\toptions := sessions.Options{\n\t\tPath: \"\/\",\n\t\tDomain: \"\",\n\t\tMaxAge: 28800,\n\t\tSecure: false,\n\t\tHttpOnly: true,\n\t}\n\n\ts := session.Info{\n\t\tAuthKey: \"PzCh6FNAB7\/jhmlUQ0+25sjJ+WgcJeKR2bAOtnh9UnfVN+WJSBvY\/YC80Rs+rbMtwfmSP4FUSxKPtpYKzKFqFA==\",\n\t\tEncryptKey: \"3oTKCcKjDHMUlV+qur2Ve664SPpSuviyGQ\/UqnroUD8=\",\n\t\tCSRFKey: \"xULAGF5FcWvqHsXaovNFJYfgCt6pedRPROqNvsZjU18=\",\n\t\tName: \"sess\",\n\t\tOptions: options,\n\t}\n\n\t\/\/ Set up the view\n\tviewInfo.SetTemplates(templates.Root, templates.Children)\n\n\t\/\/ Apply the flash modifier\n\tviewInfo.SetModifiers(\n\t\tflash.Modify,\n\t)\n\n\t\/\/ Set up the session cookie store\n\ts.SetupConfig()\n\n\t\/\/ Simulate a request\n\tw := httptest.NewRecorder()\n\tr, err := http.NewRequest(\"GET\", \"\/\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttext := \"Success test.\"\n\n\thandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tv := viewInfo.New()\n\n\t\t\/\/ Get the session\n\t\tsess, _ := s.Instance(r)\n\n\t\t\/\/ Add flashes to the session\n\t\tsess.AddFlash(flash.Info{text, flash.Success})\n\t\tsess.Save(r, w)\n\n\t\terr := v.Render(w, r)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Should not get error: %v\", err)\n\t\t}\n\t})\n\n\thandler.ServeHTTP(w, r)\n\n\tactual := w.Body.String()\n\texpected := fmt.Sprintf(`<div class=\"%v\">%v<\/div>`, flash.Success, text)\n\n\tif actual != expected {\n\t\tt.Fatalf(\"\\nactual: %v\\nexpected: %v\", actual, expected)\n\t}\n}\n\n\/\/ TestModify ensures flashes are not displayed on the page.\nfunc TestModifyFail(t *testing.T) {\n\tviewInfo := &view.Info{\n\t\tBaseURI: \"\/\",\n\t\tExtension: \"tmpl\",\n\t\tFolder: \"testdata\/view\",\n\t\tCaching: false,\n\t}\n\n\ttemplates := view.Template{\n\t\tRoot: \"test_fail\",\n\t\tChildren: []string{},\n\t}\n\n\toptions := sessions.Options{\n\t\tPath: \"\/\",\n\t\tDomain: \"\",\n\t\tMaxAge: 28800,\n\t\tSecure: false,\n\t\tHttpOnly: true,\n\t}\n\n\ts := session.Info{\n\t\tAuthKey: \"PzCh6FNAB7\/jhmlUQ0+25sjJ+WgcJeKR2bAOtnh9UnfVN+WJSBvY\/YC80Rs+rbMtwfmSP4FUSxKPtpYKzKFqFA==\",\n\t\tEncryptKey: \"3oTKCcKjDHMUlV+qur2Ve664SPpSuviyGQ\/UqnroUD8=\",\n\t\tCSRFKey: \"xULAGF5FcWvqHsXaovNFJYfgCt6pedRPROqNvsZjU18=\",\n\t\tName: \"sess\",\n\t\tOptions: options,\n\t}\n\n\t\/\/ Set up the view\n\tviewInfo.SetTemplates(templates.Root, templates.Children)\n\n\t\/\/ Apply the flash modifier\n\tviewInfo.SetModifiers(\n\t\tflash.Modify,\n\t)\n\n\t\/\/ Set up the session cookie store\n\ts.SetupConfig()\n\n\t\/\/ Simulate a request\n\tw := httptest.NewRecorder()\n\tr, err := http.NewRequest(\"GET\", \"\/\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttext := \"Success test.\"\n\n\thandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tv := viewInfo.New()\n\n\t\t\/\/ Get the session\n\t\tsess, _ := s.Instance(r)\n\n\t\t\/\/ Add flashes to the session\n\t\tsess.AddFlash(flash.Info{text, flash.Success})\n\t\tsess.Save(r, w)\n\n\t\terr := v.Render(w, r)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Should not get error: %v\", err)\n\t\t}\n\t})\n\n\thandler.ServeHTTP(w, r)\n\n\tactual := w.Body.String()\n\texpected := \"Failure!\"\n\n\tif actual != expected {\n\t\tt.Fatalf(\"\\nactual: %v\\nexpected: %v\", actual, expected)\n\t}\n}\n\n\/\/ TestFlashDefault ensures flashes are added to the view even if a plain text\n\/\/ message is added to flashes instead of a flash.Info type\nfunc TestFlashDefault(t *testing.T) {\n\tviewInfo := &view.Info{\n\t\tBaseURI: \"\/\",\n\t\tExtension: \"tmpl\",\n\t\tFolder: \"testdata\/view\",\n\t\tCaching: false,\n\t}\n\n\ttemplates := view.Template{\n\t\tRoot: \"test\",\n\t\tChildren: []string{},\n\t}\n\n\toptions := sessions.Options{\n\t\tPath: \"\/\",\n\t\tDomain: \"\",\n\t\tMaxAge: 28800,\n\t\tSecure: false,\n\t\tHttpOnly: true,\n\t}\n\n\ts := session.Info{\n\t\tAuthKey: \"PzCh6FNAB7\/jhmlUQ0+25sjJ+WgcJeKR2bAOtnh9UnfVN+WJSBvY\/YC80Rs+rbMtwfmSP4FUSxKPtpYKzKFqFA==\",\n\t\tEncryptKey: \"3oTKCcKjDHMUlV+qur2Ve664SPpSuviyGQ\/UqnroUD8=\",\n\t\tCSRFKey: \"xULAGF5FcWvqHsXaovNFJYfgCt6pedRPROqNvsZjU18=\",\n\t\tName: \"sess\",\n\t\tOptions: options,\n\t}\n\n\t\/\/ Set up the view\n\tviewInfo.SetTemplates(templates.Root, templates.Children)\n\n\t\/\/ Apply the flash modifier\n\tviewInfo.SetModifiers(\n\t\tflash.Modify,\n\t)\n\n\t\/\/ Set up the session cookie store\n\ts.SetupConfig()\n\n\t\/\/ Simulate a request\n\tw := httptest.NewRecorder()\n\tr, err := http.NewRequest(\"GET\", \"\/\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttext := \"Just a string.\"\n\n\thandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tv := viewInfo.New()\n\n\t\t\/\/ Get the session\n\t\tsess, _ := s.Instance(r)\n\n\t\t\/\/ Add flashes to the session\n\t\tsess.AddFlash(text)\n\t\tsess.Save(r, w)\n\n\t\terr := v.Render(w, r)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Should not get error: %v\", err)\n\t\t}\n\t})\n\n\thandler.ServeHTTP(w, r)\n\n\tactual := w.Body.String()\n\texpected := fmt.Sprintf(`<div class=\"%v\">%v<\/div>`, flash.Standard, text)\n\n\tif actual != expected {\n\t\tt.Fatalf(\"\\nactual: %v\\nexpected: %v\", actual, expected)\n\t}\n}*\/\n\n\/\/ TestSendFlashes are available for AJAX.\nfunc TestSendFlashes(t *testing.T) {\n\tviewInfo := &view.Info{\n\t\tBaseURI: \"\/\",\n\t\tExtension: \"tmpl\",\n\t\tFolder: \"testdata\/view\",\n\t\tCaching: false,\n\t}\n\n\ttemplates := view.Template{\n\t\tRoot: \"test\",\n\t\tChildren: []string{},\n\t}\n\n\toptions := sessions.Options{\n\t\tPath: \"\/\",\n\t\tDomain: \"\",\n\t\tMaxAge: 28800,\n\t\tSecure: false,\n\t\tHttpOnly: true,\n\t}\n\n\ts := session.Info{\n\t\tAuthKey: \"PzCh6FNAB7\/jhmlUQ0+25sjJ+WgcJeKR2bAOtnh9UnfVN+WJSBvY\/YC80Rs+rbMtwfmSP4FUSxKPtpYKzKFqFA==\",\n\t\tEncryptKey: \"3oTKCcKjDHMUlV+qur2Ve664SPpSuviyGQ\/UqnroUD8=\",\n\t\tCSRFKey: \"xULAGF5FcWvqHsXaovNFJYfgCt6pedRPROqNvsZjU18=\",\n\t\tName: \"sess\",\n\t\tOptions: options,\n\t}\n\n\t\/\/ Set up the view\n\tviewInfo.SetTemplates(templates.Root, templates.Children)\n\n\t\/\/ Set up the session cookie store\n\ts.SetupConfig()\n\n\t\/\/ Simulate a request\n\tw := httptest.NewRecorder()\n\tr, err := http.NewRequest(\"GET\", \"\/\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttext := \"Success test.\"\n\n\thandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\/\/v := viewInfo.New()\n\n\t\t\/\/ Get the session\n\t\tsess, _ := s.Instance(r)\n\n\t\t\/\/ Add flashes to the session\n\t\tsess.AddFlash(flash.Info{text, flash.Success})\n\t\tsess.AddFlash(text)\n\t\tsess.Save(r, w)\n\n\t\tflash.SendFlashes(w, r, sess)\n\t})\n\n\thandler.ServeHTTP(w, r)\n\n\tactual := w.Body.String()\n\texpected := fmt.Sprintf(`[{\"Message\":\"%v\",\"Class\":\"%v\"},{\"Message\":\"%v\",\"Class\":\"%v\"}]`, text, flash.Success, text, flash.Standard)\n\n\tif actual != expected {\n\t\tt.Fatalf(\"\\nactual: %v\\nexpected: %v\", actual, expected)\n\t}\n}\n\n\/*\n\/\/ TestNonStringFlash ensures flashes do not error when added with a non-standard type.\nfunc TestNonStringFlash(t *testing.T) {\n\tviewInfo := &view.Info{\n\t\tBaseURI: \"\/\",\n\t\tExtension: \"tmpl\",\n\t\tFolder: \"testdata\/view\",\n\t\tCaching: false,\n\t}\n\n\ttemplates := view.Template{\n\t\tRoot: \"test\",\n\t\tChildren: []string{},\n\t}\n\n\toptions := sessions.Options{\n\t\tPath: \"\/\",\n\t\tDomain: \"\",\n\t\tMaxAge: 28800,\n\t\tSecure: false,\n\t\tHttpOnly: true,\n\t}\n\n\ts := session.Info{\n\t\tAuthKey: \"PzCh6FNAB7\/jhmlUQ0+25sjJ+WgcJeKR2bAOtnh9UnfVN+WJSBvY\/YC80Rs+rbMtwfmSP4FUSxKPtpYKzKFqFA==\",\n\t\tEncryptKey: \"3oTKCcKjDHMUlV+qur2Ve664SPpSuviyGQ\/UqnroUD8=\",\n\t\tCSRFKey: \"xULAGF5FcWvqHsXaovNFJYfgCt6pedRPROqNvsZjU18=\",\n\t\tName: \"sess\",\n\t\tOptions: options,\n\t}\n\n\t\/\/ Set up the view\n\tviewInfo.SetTemplates(templates.Root, templates.Children)\n\n\t\/\/ Apply the flash modifier\n\tviewInfo.SetModifiers(\n\t\tflash.Modify,\n\t)\n\n\t\/\/ Set up the session cookie store\n\ts.SetupConfig()\n\n\t\/\/ Simulate a request\n\tw := httptest.NewRecorder()\n\tr, err := http.NewRequest(\"GET\", \"\/\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttext := 123\n\n\thandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tv := viewInfo.New()\n\n\t\t\/\/ Get the session\n\t\tsess, _ := s.Instance(r)\n\n\t\t\/\/ Add flashes to the session\n\t\tsess.AddFlash(text)\n\t\tsess.Save(r, w)\n\n\t\terr := v.Render(w, r)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Should not get error: %v\", err)\n\t\t}\n\t})\n\n\thandler.ServeHTTP(w, r)\n\n\tactual := w.Body.String()\n\texpected := fmt.Sprintf(`<div class=\"%v\">%v<\/div>`, flash.Standard, text)\n\n\tif actual != expected {\n\t\tt.Fatalf(\"\\nactual: %v\\nexpected: %v\", actual, expected)\n\t}\n}*\/\n<commit_msg>Clean up flash tests<commit_after>package flash_test\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com\/blue-jay\/core\/flash\"\n\t\"github.com\/blue-jay\/core\/view\"\n)\n\n\/\/ Session is an interface for typical sessions\ntype Session struct {\n\tflashes []interface{}\n\tmutex sync.RWMutex\n}\n\n\/\/ Save mocks saving the session\nfunc (s *Session) Save(r *http.Request, w http.ResponseWriter) error {\n\treturn nil\n}\n\n\/\/ Flashes retrieves the flashes\nfunc (s *Session) Flashes(vars ...string) []interface{} {\n\ts.mutex.RLock()\n\tf := s.flashes\n\t\/\/ Clear the flashes\n\ts.flashes = s.flashes[:0]\n\ts.mutex.RUnlock()\n\treturn f\n}\n\n\/\/ AddFlash adds a flash to the list\nfunc (s *Session) AddFlash(f interface{}) {\n\ts.mutex.Lock()\n\ts.flashes = append(s.flashes, f)\n\ts.mutex.Unlock()\n}\n\n\/\/ TestFlashSession ensures flashes can be added to the session.\nfunc TestFlashSession(t *testing.T) {\n\ttext := \"Success test.\"\n\n\t\/\/ Get the fake session\n\tsess := Session{}\n\n\t\/\/ Add flashes to the session\n\tsess.AddFlash(flash.Info{text, flash.Success})\n\n\t\/\/ Get the flashes\n\tflashes := sess.Flashes()\n\n\tif len(flashes) != 1 {\n\t\tt.Fatal(\"Expected 1 flash message.\")\n\t}\n\n\t\/\/ Convert the flash\n\tf, ok := flashes[0].(flash.Info)\n\n\tif f.Class != flash.Success {\n\t\tt.Fatal(\"Flash class is: %v, should be: %v.\", f.Class, flash.Success)\n\t}\n\n\tif f.Message != text {\n\t\tt.Fatalf(\"Flash message is: %v, should be: %v\", f.Message, text)\n\t}\n\n\tif !ok {\n\t\tt.Fatal(\"Flashes missing from session.\")\n\t}\n}\n\n\/\/ TestSendFlashes are available for AJAX.\nfunc TestSendFlashes(t *testing.T) {\n\tviewInfo := &view.Info{\n\t\tBaseURI: \"\/\",\n\t\tExtension: \"tmpl\",\n\t\tFolder: \"testdata\/view\",\n\t\tCaching: false,\n\t}\n\n\ttemplates := view.Template{\n\t\tRoot: \"test\",\n\t\tChildren: []string{},\n\t}\n\n\t\/\/ Set up the view\n\tviewInfo.SetTemplates(templates.Root, templates.Children)\n\n\t\/\/ Simulate a request\n\tw := httptest.NewRecorder()\n\tr, err := http.NewRequest(\"GET\", \"\/\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttext := \"Success test.\"\n\n\t\/\/ Get the fake session\n\tsess := &Session{}\n\n\thandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\/\/ Add flashes to the session\n\t\tsess.AddFlash(flash.Info{text, flash.Success})\n\t\tsess.AddFlash(text)\n\n\t\t\/\/ Send the flashes\n\t\tflash.SendFlashes(w, r, sess)\n\t})\n\n\thandler.ServeHTTP(w, r)\n\n\tactual := w.Body.String()\n\texpected := fmt.Sprintf(`[{\"Message\":\"%v\",\"Class\":\"%v\"},{\"Message\":\"%v\",\"Class\":\"%v\"}]`, text, flash.Success, text, flash.Standard)\n\n\tif actual != expected {\n\t\tt.Fatalf(\"\\nactual: %v\\nexpected: %v\", actual, expected)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\/\/ Flogo action\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/action\/flow\"\n\n\t\/\/ Activities from https:\/\/github.com\/TIBCOSoftware\/flogo-contrib\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/actreply\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/actreturn\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/aggregate\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/aggregate_old\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/app\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/awsiot\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/awssns\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/channel\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/coap\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/couchbase\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/counter\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/error\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/filter\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/gpio\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/inference\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/kafkapub\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/lambda\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/log\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/mapper\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/mongodb\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/rest\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/subflow\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/twilio\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/wsmessage\"\n\n\t\/\/ Triggers from https:\/\/github.com\/TIBCOSoftware\/flogo-contrib\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/channel\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/trigger\/cli\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/trigger\/coap\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/trigger\/graphql\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/trigger\/kafkasub\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/trigger\/lambda\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/trigger\/mqtt\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/trigger\/rest\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/trigger\/timer\"\n\n\t\/\/ Activities from https:\/\/github.com\/retgits\/flogo-components\n\t_ \"github.com\/retgits\/flogo-components\/activity\/addtodate\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/amazons3\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/amazonses\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/amazonsqssend\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/awsssm\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/commandparser\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/downloadfile\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/dynamodbinsert\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/dynamodbquery\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/envkey\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/githubissues\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/globalcache\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/gzip\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/iftttwebhook\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/mashtoken\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/mysqlfetch\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/null\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/pubnubpublisher\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/queryparser\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/randomnumber\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/randomstring\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/readfile\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/tomlreader\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/trellocard\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/writetofile\"\n\n\t\/\/ Triggers from https:\/\/github.com\/retgits\/flogo-components\n\t_ \"github.com\/retgits\/flogo-components\/trigger\/grpctrigger\"\n\t_ \"github.com\/retgits\/flogo-components\/trigger\/pubnubsubscriber\"\n)\n<commit_msg>Update Flogo imports<commit_after>package main\n\nimport (\n\t\/\/ Flogo action\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/action\/flow\"\n\n\t\/\/ Activities from https:\/\/github.com\/TIBCOSoftware\/flogo-contrib\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/actreply\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/actreturn\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/aggregate\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/aggregate_old\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/app\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/awsiot\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/awssns\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/channel\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/coap\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/couchbase\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/counter\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/error\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/filter\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/gpio\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/inference\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/kafkapub\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/lambda\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/log\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/mapper\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/mongodb\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/rest\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/subflow\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/twilio\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/wsmessage\"\n\n\t\/\/ Triggers from https:\/\/github.com\/TIBCOSoftware\/flogo-contrib\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/activity\/channel\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/trigger\/cli\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/trigger\/coap\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/trigger\/graphql\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/trigger\/kafkasub\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/trigger\/lambda\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/trigger\/mqtt\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/trigger\/rest\"\n\t_ \"github.com\/TIBCOSoftware\/flogo-contrib\/trigger\/timer\"\n\n\t\/\/ Activities from https:\/\/github.com\/retgits\/flogo-components\n\t_ \"github.com\/retgits\/flogo-components\/activity\/addtodate\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/amazons3\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/amazonses\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/amazonsqssend\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/awsssm\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/commandparser\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/downloadfile\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/dynamodbinsert\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/dynamodbquery\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/envkey\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/githubissues\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/globalcache\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/gzip\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/iftttwebhook\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/mashtoken\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/mysqlfetch\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/null\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/pubnubpublisher\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/queryparser\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/randomnumber\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/randomstring\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/readfile\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/tomlreader\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/trellocard\"\n\t_ \"github.com\/retgits\/flogo-components\/activity\/writetofile\"\n\n\t\/\/ Triggers from https:\/\/github.com\/retgits\/flogo-components\n\t_ \"github.com\/retgits\/flogo-components\/trigger\/grpctrigger\"\n\t_ \"github.com\/retgits\/flogo-components\/trigger\/openfaas\"\n\t_ \"github.com\/retgits\/flogo-components\/trigger\/pubnubsubscriber\"\n)\n<|endoftext|>"} {"text":"<commit_before>package fly\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/EngineerBetter\/concourse-up\/config\"\n\t\"github.com\/EngineerBetter\/concourse-up\/util\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n)\n\n\/\/ AWSPipeline is AWS specific implementation of Pipeline interface\ntype AWSPipeline struct {\n\tAWSAccessKeyID string\n\tAWSDefaultRegion string\n\tAWSSecretAccessKey string\n\tDeployment string\n\tFlagAWSRegion string\n\tFlagDomain string\n\tFlagGithubAuthID string\n\tFlagGithubAuthSecret string\n\tFlagTLSCert string\n\tFlagTLSKey string\n\tFlagWebSize string\n\tFlagWorkerSize string\n\tFlagWorkers int\n\tConcourseUpVersion string\n\tNamespace string\n}\n\n\/\/ NewAWSPipeline return AWSPipeline\nfunc NewAWSPipeline() Pipeline {\n\treturn AWSPipeline{}\n}\n\n\/\/BuildPipelineParams builds params for AWS concourse-up self update pipeline\nfunc (a AWSPipeline) BuildPipelineParams(config config.Config) (Pipeline, error) {\n\tsess, err := session.NewSession()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcreds, err := sess.Config.Credentials.Get()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\tdomain string\n\t\tconcourseCert string\n\t\tconcourseKey string\n\t)\n\n\tif !validIP4(config.Domain) {\n\t\tdomain = config.Domain\n\t}\n\n\tif domain != \"\" {\n\t\tconcourseCert = config.ConcourseCert\n\t\tconcourseKey = config.ConcourseKey\n\t}\n\n\treturn AWSPipeline{\n\t\tAWSAccessKeyID: creds.AccessKeyID,\n\t\tAWSSecretAccessKey: creds.SecretAccessKey,\n\t\tDeployment: strings.TrimPrefix(config.Deployment, \"concourse-up-\"),\n\t\tFlagAWSRegion: config.Region,\n\t\tFlagDomain: domain,\n\t\tFlagGithubAuthID: config.GithubClientID,\n\t\tFlagGithubAuthSecret: config.GithubClientSecret,\n\t\tFlagTLSCert: concourseCert,\n\t\tFlagTLSKey: concourseKey,\n\t\tFlagWebSize: config.ConcourseWebSize,\n\t\tFlagWorkerSize: config.ConcourseWorkerSize,\n\t\tFlagWorkers: config.ConcourseWorkerCount,\n\t\tConcourseUpVersion: ConcourseUpVersion,\n\t\tNamespace: config.Namespace,\n\t}, nil\n}\n\n\/\/ GetConfigTemplate returns template for AWS Concourse Up self update pipeline\nfunc (a AWSPipeline) GetConfigTemplate() string {\n\treturn awsPipelineTemplate\n\n}\n\n\/\/ Indent is a helper function to indent the field a given number of spaces\nfunc (a AWSPipeline) Indent(countStr, field string) string {\n\treturn util.Indent(countStr, field)\n}\n\nconst awsPipelineTemplate = `\n---\nresources:\n- name: concourse-up-release\n type: github-release\n source:\n user: engineerbetter\n repository: concourse-up\n pre_release: true\n- name: every-month\n type: time\n source: {interval: 730h}\n\njobs:\n- name: self-update\n serial_groups: [cup]\n serial: true\n plan:\n - get: concourse-up-release\n trigger: true\n - task: update\n params:\n AWS_REGION: \"{{ .FlagAWSRegion }}\"\n DOMAIN: \"{{ .FlagDomain }}\"\n TLS_CERT: |-\n {{ .Indent \"8\" .FlagTLSCert }}\n TLS_KEY: |-\n {{ .Indent \"8\" .FlagTLSKey }}\n WORKERS: \"{{ .FlagWorkers }}\"\n WORKER_SIZE: \"{{ .FlagWorkerSize }}\"\n WEB_SIZE: \"{{ .FlagWebSize }}\"\n DEPLOYMENT: \"{{ .Deployment }}\"\n GITHUB_AUTH_CLIENT_ID: \"{{ .FlagGithubAuthID }}\"\n GITHUB_AUTH_CLIENT_SECRET: \"{{ .FlagGithubAuthSecret }}\"\n AWS_ACCESS_KEY_ID: \"{{ .AWSAccessKeyID }}\"\n AWS_SECRET_ACCESS_KEY: \"{{ .AWSSecretAccessKey }}\"\n SELF_UPDATE: true\n NAMESPACE: {{ .Namespace }}\n config:\n platform: linux\n image_resource:\n type: docker-image\n source:\n repository: engineerbetter\/pcf-ops\n inputs:\n - name: concourse-up-release\n run:\n path: bash\n args:\n - -c\n - |\n set -eux\n\n cd concourse-up-release\n chmod +x concourse-up-linux-amd64\n .\/concourse-up-linux-amd64 deploy $DEPLOYMENT\n- name: renew-cert\n serial_groups: [cup]\n serial: true\n plan:\n - get: concourse-up-release\n version: {tag: {{ .ConcourseUpVersion }} }\n - get: every-month\n trigger: true\n - task: update\n params:\n AWS_REGION: \"{{ .FlagAWSRegion }}\"\n DOMAIN: \"{{ .FlagDomain }}\"\n TLS_CERT: |-\n {{ .Indent \"8\" .FlagTLSCert }}\n TLS_KEY: |-\n {{ .Indent \"8\" .FlagTLSKey }}\n WORKERS: \"{{ .FlagWorkers }}\"\n WORKER_SIZE: \"{{ .FlagWorkerSize }}\"\n WEB_SIZE: \"{{ .FlagWebSize }}\"\n DEPLOYMENT: \"{{ .Deployment }}\"\n GITHUB_AUTH_CLIENT_ID: \"{{ .FlagGithubAuthID }}\"\n GITHUB_AUTH_CLIENT_SECRET: \"{{ .FlagGithubAuthSecret }}\"\n AWS_ACCESS_KEY_ID: \"{{ .AWSAccessKeyID }}\"\n AWS_SECRET_ACCESS_KEY: \"{{ .AWSSecretAccessKey }}\"\n SELF_UPDATE: true\n NAMESPACE: {{ .Namespace }}\n config:\n platform: linux\n image_resource:\n type: docker-image\n source:\n repository: engineerbetter\/pcf-ops\n inputs:\n - name: concourse-up-release\n run:\n path: bash\n args:\n - -c\n - |\n set -eux\n\n cd concourse-up-release\n chmod +x concourse-up-linux-amd64\n\t\t .\/concourse-up-linux-amd64 deploy $DEPLOYMENT\n\t\t `\n<commit_msg>removes unnecessary tab from self update pipeline config<commit_after>package fly\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/EngineerBetter\/concourse-up\/config\"\n\t\"github.com\/EngineerBetter\/concourse-up\/util\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n)\n\n\/\/ AWSPipeline is AWS specific implementation of Pipeline interface\ntype AWSPipeline struct {\n\tAWSAccessKeyID string\n\tAWSDefaultRegion string\n\tAWSSecretAccessKey string\n\tDeployment string\n\tFlagAWSRegion string\n\tFlagDomain string\n\tFlagGithubAuthID string\n\tFlagGithubAuthSecret string\n\tFlagTLSCert string\n\tFlagTLSKey string\n\tFlagWebSize string\n\tFlagWorkerSize string\n\tFlagWorkers int\n\tConcourseUpVersion string\n\tNamespace string\n}\n\n\/\/ NewAWSPipeline return AWSPipeline\nfunc NewAWSPipeline() Pipeline {\n\treturn AWSPipeline{}\n}\n\n\/\/BuildPipelineParams builds params for AWS concourse-up self update pipeline\nfunc (a AWSPipeline) BuildPipelineParams(config config.Config) (Pipeline, error) {\n\tsess, err := session.NewSession()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcreds, err := sess.Config.Credentials.Get()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\tdomain string\n\t\tconcourseCert string\n\t\tconcourseKey string\n\t)\n\n\tif !validIP4(config.Domain) {\n\t\tdomain = config.Domain\n\t}\n\n\tif domain != \"\" {\n\t\tconcourseCert = config.ConcourseCert\n\t\tconcourseKey = config.ConcourseKey\n\t}\n\n\treturn AWSPipeline{\n\t\tAWSAccessKeyID: creds.AccessKeyID,\n\t\tAWSSecretAccessKey: creds.SecretAccessKey,\n\t\tDeployment: strings.TrimPrefix(config.Deployment, \"concourse-up-\"),\n\t\tFlagAWSRegion: config.Region,\n\t\tFlagDomain: domain,\n\t\tFlagGithubAuthID: config.GithubClientID,\n\t\tFlagGithubAuthSecret: config.GithubClientSecret,\n\t\tFlagTLSCert: concourseCert,\n\t\tFlagTLSKey: concourseKey,\n\t\tFlagWebSize: config.ConcourseWebSize,\n\t\tFlagWorkerSize: config.ConcourseWorkerSize,\n\t\tFlagWorkers: config.ConcourseWorkerCount,\n\t\tConcourseUpVersion: ConcourseUpVersion,\n\t\tNamespace: config.Namespace,\n\t}, nil\n}\n\n\/\/ GetConfigTemplate returns template for AWS Concourse Up self update pipeline\nfunc (a AWSPipeline) GetConfigTemplate() string {\n\treturn awsPipelineTemplate\n\n}\n\n\/\/ Indent is a helper function to indent the field a given number of spaces\nfunc (a AWSPipeline) Indent(countStr, field string) string {\n\treturn util.Indent(countStr, field)\n}\n\nconst awsPipelineTemplate = `\n---\nresources:\n- name: concourse-up-release\n type: github-release\n source:\n user: engineerbetter\n repository: concourse-up\n pre_release: true\n- name: every-month\n type: time\n source: {interval: 730h}\n\njobs:\n- name: self-update\n serial_groups: [cup]\n serial: true\n plan:\n - get: concourse-up-release\n trigger: true\n - task: update\n params:\n AWS_REGION: \"{{ .FlagAWSRegion }}\"\n DOMAIN: \"{{ .FlagDomain }}\"\n TLS_CERT: |-\n {{ .Indent \"8\" .FlagTLSCert }}\n TLS_KEY: |-\n {{ .Indent \"8\" .FlagTLSKey }}\n WORKERS: \"{{ .FlagWorkers }}\"\n WORKER_SIZE: \"{{ .FlagWorkerSize }}\"\n WEB_SIZE: \"{{ .FlagWebSize }}\"\n DEPLOYMENT: \"{{ .Deployment }}\"\n GITHUB_AUTH_CLIENT_ID: \"{{ .FlagGithubAuthID }}\"\n GITHUB_AUTH_CLIENT_SECRET: \"{{ .FlagGithubAuthSecret }}\"\n AWS_ACCESS_KEY_ID: \"{{ .AWSAccessKeyID }}\"\n AWS_SECRET_ACCESS_KEY: \"{{ .AWSSecretAccessKey }}\"\n SELF_UPDATE: true\n NAMESPACE: {{ .Namespace }}\n config:\n platform: linux\n image_resource:\n type: docker-image\n source:\n repository: engineerbetter\/pcf-ops\n inputs:\n - name: concourse-up-release\n run:\n path: bash\n args:\n - -c\n - |\n set -eux\n\n cd concourse-up-release\n chmod +x concourse-up-linux-amd64\n .\/concourse-up-linux-amd64 deploy $DEPLOYMENT\n- name: renew-cert\n serial_groups: [cup]\n serial: true\n plan:\n - get: concourse-up-release\n version: {tag: {{ .ConcourseUpVersion }} }\n - get: every-month\n trigger: true\n - task: update\n params:\n AWS_REGION: \"{{ .FlagAWSRegion }}\"\n DOMAIN: \"{{ .FlagDomain }}\"\n TLS_CERT: |-\n {{ .Indent \"8\" .FlagTLSCert }}\n TLS_KEY: |-\n {{ .Indent \"8\" .FlagTLSKey }}\n WORKERS: \"{{ .FlagWorkers }}\"\n WORKER_SIZE: \"{{ .FlagWorkerSize }}\"\n WEB_SIZE: \"{{ .FlagWebSize }}\"\n DEPLOYMENT: \"{{ .Deployment }}\"\n GITHUB_AUTH_CLIENT_ID: \"{{ .FlagGithubAuthID }}\"\n GITHUB_AUTH_CLIENT_SECRET: \"{{ .FlagGithubAuthSecret }}\"\n AWS_ACCESS_KEY_ID: \"{{ .AWSAccessKeyID }}\"\n AWS_SECRET_ACCESS_KEY: \"{{ .AWSSecretAccessKey }}\"\n SELF_UPDATE: true\n NAMESPACE: {{ .Namespace }}\n config:\n platform: linux\n image_resource:\n type: docker-image\n source:\n repository: engineerbetter\/pcf-ops\n inputs:\n - name: concourse-up-release\n run:\n path: bash\n args:\n - -c\n - |\n set -eux\n\n cd concourse-up-release\n chmod +x concourse-up-linux-amd64\n .\/concourse-up-linux-amd64 deploy $DEPLOYMENT\n`\n<|endoftext|>"} {"text":"<commit_before>package janus\n\nimport (\n\t\"time\"\n\n\t\"github.com\/RangelReale\/osin\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\n\/\/ OAuthSpec Holds an oauth definition and basic options\ntype OAuthSpec struct {\n\t*OAuth\n\tOAuthManager *OAuthManager\n}\n\n\/\/ OAuth holds the configuration for oauth proxies\ntype OAuth struct {\n\tID bson.ObjectId `bson:\"_id,omitempty\" json:\"id,omitempty\" valid:\"required\"`\n\tName string `bson:\"name\" json:\"name\" valid:\"required\"`\n\tCreatedAt time.Time `bson:\"created_at\" json:\"created_at\" valid:\"-\"`\n\tUpdatedAt time.Time `bson:\"updated_at\" json:\"updated_at\" valid:\"-\"`\n\tOauthEndpoints OauthEndpoints `bson:\"oauth_endpoints\" json:\"oauth_endpoints\"`\n\tOauthClientEndpoints OauthClientEndpoints `bson:\"oauth_client_endpoints\" json:\"oauth_client_endpoints\"`\n\tAllowedAccessTypes []osin.AccessRequestType `bson:\"allowed_access_types\" json:\"allowed_access_types\"`\n\tAllowedAuthorizeTypes []osin.AuthorizeRequestType `bson:\"allowed_authorize_types\" json:\"allowed_authorize_types\"`\n\tAuthorizeLoginRedirect string `bson:\"auth_login_redirect\" json:\"auth_login_redirect\"`\n}\n\n\/\/ OauthEndpoints defines the oauth endpoints that wil be proxied\ntype OauthEndpoints struct {\n\tAuthorize Proxy `bson:\"authorize\" json:\"authorize\"`\n\tToken Proxy `bson:\"token\" json:\"token\"`\n\tInfo Proxy `bson:\"info\" json:\"info\"`\n}\n\n\/\/ OauthClientEndpoints defines the oauth client endpoints that wil be proxied\ntype OauthClientEndpoints struct {\n\tCreate Proxy `bson:\"create\" json:\"create\"`\n\tRemove Proxy `bson:\"remove\" json:\"remove\"`\n}\n<commit_msg>Added secrets map to the oauth spec<commit_after>package janus\n\nimport (\n\t\"time\"\n\n\t\"github.com\/RangelReale\/osin\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\n\/\/ OAuthSpec Holds an oauth definition and basic options\ntype OAuthSpec struct {\n\t*OAuth\n\tOAuthManager *OAuthManager\n}\n\n\/\/ OAuth holds the configuration for oauth proxies\ntype OAuth struct {\n\tID bson.ObjectId `bson:\"_id,omitempty\" json:\"id,omitempty\" valid:\"required\"`\n\tName string `bson:\"name\" json:\"name\" valid:\"required\"`\n\tCreatedAt time.Time `bson:\"created_at\" json:\"created_at\" valid:\"-\"`\n\tUpdatedAt time.Time `bson:\"updated_at\" json:\"updated_at\" valid:\"-\"`\n\tOauthEndpoints OauthEndpoints `bson:\"oauth_endpoints\" json:\"oauth_endpoints\"`\n\tOauthClientEndpoints OauthClientEndpoints `bson:\"oauth_client_endpoints\" json:\"oauth_client_endpoints\"`\n\tAllowedAccessTypes []osin.AccessRequestType `bson:\"allowed_access_types\" json:\"allowed_access_types\"`\n\tAllowedAuthorizeTypes []osin.AuthorizeRequestType `bson:\"allowed_authorize_types\" json:\"allowed_authorize_types\"`\n\tAuthorizeLoginRedirect string `bson:\"auth_login_redirect\" json:\"auth_login_redirect\"`\n\tSecrets map[string]string `bson:\"secrets\" json:\"secrets\"`\n}\n\n\/\/ OauthEndpoints defines the oauth endpoints that wil be proxied\ntype OauthEndpoints struct {\n\tAuthorize Proxy `bson:\"authorize\" json:\"authorize\"`\n\tToken Proxy `bson:\"token\" json:\"token\"`\n\tInfo Proxy `bson:\"info\" json:\"info\"`\n}\n\n\/\/ OauthClientEndpoints defines the oauth client endpoints that wil be proxied\ntype OauthClientEndpoints struct {\n\tCreate Proxy `bson:\"create\" json:\"create\"`\n\tRemove Proxy `bson:\"remove\" json:\"remove\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\tretryablehttp \"github.com\/hashicorp\/go-retryablehttp\"\n)\n\nconst (\n\tapiURL = \"https:\/\/api.opendota.com\/api\/\"\n)\n\nfunc getMatchData(matchID int64) oDotaMatchData {\n\tvar matchData oDotaMatchData\n\tmatchURL := fmt.Sprintf(\"%s%s\/%d\", apiURL, \"matches\", matchID)\n\tlog.Println(matchID)\n\tgetJSON(matchURL, &matchData)\n\treturn matchData\n}\n\nfunc forceScan(matchID int64) {\n\tmatchURL := fmt.Sprintf(\"%s%s\/%d\", apiURL, \"request\", matchID)\n\n\tr, _ := retryablehttp.Post(matchURL, \"\", nil)\n\n\tdefer r.Body.Close()\n}\n\nfunc getLeaguesData() oDotaLeaguesData {\n\tvar leaguesData oDotaLeaguesData\n\tleaguesURL := fmt.Sprintf(\"%s%s\", apiURL, \"leagues\")\n\tgetJSON(leaguesURL, &leaguesData)\n\treturn leaguesData\n}\n\nfunc getPlayersData() oDotaPlayersData {\n\tvar playersData oDotaPlayersData\n\tplayersURL := fmt.Sprintf(\"%s%s\", apiURL, \"proPlayers\")\n\tgetJSON(playersURL, &playersData)\n\treturn playersData\n}\n<commit_msg>Forcescan logging<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\tretryablehttp \"github.com\/hashicorp\/go-retryablehttp\"\n)\n\nconst (\n\tapiURL = \"https:\/\/api.opendota.com\/api\/\"\n)\n\nfunc getMatchData(matchID int64) oDotaMatchData {\n\tvar matchData oDotaMatchData\n\tmatchURL := fmt.Sprintf(\"%s%s\/%d\", apiURL, \"matches\", matchID)\n\tlog.Println(matchID)\n\tgetJSON(matchURL, &matchData)\n\treturn matchData\n}\n\nfunc forceScan(matchID int64) {\n\tmatchURL := fmt.Sprintf(\"%s%s\/%d\", apiURL, \"request\", matchID)\n\tlog.Println(\"Forcescan\")\n\tlog.Println(matchID)\n\tr, _ := retryablehttp.Post(matchURL, \"\", nil)\n\n\tdefer r.Body.Close()\n}\n\nfunc getLeaguesData() oDotaLeaguesData {\n\tvar leaguesData oDotaLeaguesData\n\tleaguesURL := fmt.Sprintf(\"%s%s\", apiURL, \"leagues\")\n\tgetJSON(leaguesURL, &leaguesData)\n\treturn leaguesData\n}\n\nfunc getPlayersData() oDotaPlayersData {\n\tvar playersData oDotaPlayersData\n\tplayersURL := fmt.Sprintf(\"%s%s\", apiURL, \"proPlayers\")\n\tgetJSON(playersURL, &playersData)\n\treturn playersData\n}\n<|endoftext|>"} {"text":"<commit_before>package oplog\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\ntype OpLog struct {\n\ts *mgo.Session\n\tStatus *OpLogStatus\n}\n\ntype OpLogStatus struct {\n\tStatus string `json:\"status\"`\n\t\/\/ Total number of events recieved on the UDP interface\n\tEventsReceived int `json:\"events_received\"`\n\t\/\/ Total number of events ingested into MongoDB with success\n\tEventsIngested int `json:\"events_ingested\"`\n\t\/\/ Total number of events received on the UDP interface with an invalid format\n\tEventsError int `json:\"events_error\"`\n\t\/\/ Total number of events discarded because the queue was full\n\tEventsDiscarded int `json:\"events_discarded\"`\n\t\/\/ Current number of events in the ingestion queue\n\tQueueSize int `json:\"queue_size\"`\n\t\/\/ Maximum number of events allowed in the ingestion queue before discarding events\n\tQueueMaxSize int `json:\"queue_max_size\"`\n\t\/\/ Number of clients connected to the SSE API\n\tClients int `json:\"clients\"`\n}\n\ntype OpLogFilter struct {\n\tType string\n}\n\ntype OpLogCollection struct {\n\t*mgo.Collection\n}\n\n\/\/ Operation represents an operation stored in the OpLog, ready to be exposed as SSE.\ntype Operation struct {\n\tId *bson.ObjectId `bson:\"_id,omitempty\"`\n\tEvent string `bson:\"event\"`\n\tData *OperationData `bson:\"data\"`\n}\n\n\/\/ OperationData is the data part of the SSE event for the operation.\ntype OperationData struct {\n\tTimestamp time.Time `bson:\"ts\" json:\"timestamp\"`\n\tUserId string `bson:\"uid\" json:\"user_id\"`\n\tType string `bson:\"t\" json:\"type\"`\n\tId string `bson:\"id\" json:\"id\"`\n}\n\n\/\/ objectId returns a bson.ObjectId from an hex representation of an object id of nil\n\/\/ if an empty string is passed\nfunc objectId(id string) *bson.ObjectId {\n\tif id != \"\" {\n\t\toid := bson.ObjectIdHex(id)\n\t\treturn &oid\n\t}\n\treturn nil\n}\n\n\/\/ Close closes the underlaying mgo session\nfunc (c *OpLogCollection) Close() {\n\tc.Database.Session.Close()\n}\n\n\/\/ Info returns a human readable version of the operation\nfunc (op *Operation) Info() string {\n\tid := \"(new)\"\n\tif op.Id != nil {\n\t\tid = op.Id.Hex()\n\t}\n\treturn fmt.Sprintf(\"%s:%s(%s:%s from %s)\", id, op.Event, op.Data.Type, op.Data.Id, op.Data.UserId)\n}\n\n\/\/ NewOpLog returns an OpLog connected to the given provided mongo URL.\n\/\/ If the capped collection does not exists, it will be created with the max\n\/\/ size defined by maxBytes parameter.\nfunc NewOpLog(mongoURL string, maxBytes int) (*OpLog, error) {\n\tsession, err := mgo.Dial(mongoURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\toplog := &OpLog{session, &OpLogStatus{Status: \"OK\"}}\n\toplog.init(maxBytes)\n\treturn oplog, nil\n}\n\nfunc (oplog *OpLog) c() *OpLogCollection {\n\treturn &OpLogCollection{oplog.s.Copy().DB(\"\").C(\"oplog\")}\n}\n\n\/\/ init creates capped collection if it does not exists.\nfunc (oplog *OpLog) init(maxBytes int) {\n\texists := false\n\tnames, _ := oplog.s.DB(\"\").CollectionNames()\n\tfor _, name := range names {\n\t\tif name == \"oplog\" {\n\t\t\texists = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !exists {\n\t\tlog.Info(\"OPLOG creating capped collection\")\n\t\toplog.c().Create(&mgo.CollectionInfo{\n\t\t\tCapped: true,\n\t\t\tMaxBytes: maxBytes,\n\t\t})\n\t}\n}\n\n\/\/ Insert append a operation into the OpLog\nfunc (oplog *OpLog) Ingest(ops <-chan *Operation) {\n\tc := oplog.c()\n\tfor {\n\t\tselect {\n\t\tcase op := <-ops:\n\t\t\tlog.Debugf(\"OPLOG ingest operation: %#v\", op.Info())\n\t\t\toplog.Status.QueueSize = len(ops)\n\t\t\tfor {\n\t\t\t\tif err := c.Insert(op); err != nil {\n\t\t\t\t\tlog.Warnf(\"OPLOG can't insert operation, try to reconnect: %s\", err)\n\t\t\t\t\t\/\/ Try to reconnect\n\t\t\t\t\ttime.Sleep(time.Second)\n\t\t\t\t\tc.Close()\n\t\t\t\t\tc = oplog.c()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\toplog.Status.EventsIngested++\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ HasId checks if an operation id is present in the capped collection.\nfunc (oplog *OpLog) HasId(id string) bool {\n\tc := oplog.c()\n\tdefer c.Close()\n\toid := objectId(id)\n\tif oid == nil {\n\t\treturn false\n\t}\n\tcount, err := c.FindId(oid).Count()\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn count != 0\n}\n\n\/\/ LastId returns the most recently inserted operation id if any or \"\" if oplog is empty\nfunc (oplog *OpLog) LastId() string {\n\tc := oplog.c()\n\tdefer c.Close()\n\toperation := &Operation{}\n\tc.Find(nil).Sort(\"-$natural\").One(operation)\n\tif operation.Id != nil {\n\t\treturn operation.Id.Hex()\n\t}\n\treturn \"\"\n}\n\n\/\/ tail creates a tail cursor starting at a given id\nfunc (oplog *OpLog) tail(c *OpLogCollection, lastId *bson.ObjectId, filter OpLogFilter) *mgo.Iter {\n\tquery := bson.M{}\n\tif filter.Type != \"\" {\n\t\tquery[\"data.t\"] = filter.Type\n\t}\n\tif lastId == nil {\n\t\t\/\/ If no last id provided, find the last operation id in the colleciton\n\t\tlastId = objectId(oplog.LastId())\n\t}\n\tif lastId != nil {\n\t\tquery[\"_id\"] = bson.M{\"$gt\": lastId}\n\t}\n\treturn c.Find(query).Sort(\"$natural\").Tail(5 * time.Second)\n}\n\n\/\/ Tail tails all the new operations in the oplog and send the operation in\n\/\/ the given channel. If the lastId parameter is given, all operation posted after\n\/\/ this event will be returned.\nfunc (oplog *OpLog) Tail(lastId string, filter OpLogFilter, out chan<- Operation, err chan<- error) {\n\tc := oplog.c()\n\toperation := Operation{}\n\tlastObjectId := objectId(lastId)\n\titer := oplog.tail(c, lastObjectId, filter)\n\n\tfor {\n\t\tfor iter.Next(&operation) {\n\t\t\tlastObjectId = operation.Id\n\t\t\tout <- operation\n\t\t}\n\n\t\tif iter.Timeout() {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Try to reconnect\n\t\tlog.Warnf(\"OPLOG tail failed with error, try to reconnect: %s\", iter.Err())\n\t\titer.Close()\n\t\tc.Close()\n\t\tc = oplog.c()\n\t\titer = oplog.tail(c, lastObjectId, filter)\n\t\ttime.Sleep(time.Second)\n\t}\n}\n<commit_msg>Fix a crash if an invalid object id is passed<commit_after>package oplog\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\ntype OpLog struct {\n\ts *mgo.Session\n\tStatus *OpLogStatus\n}\n\ntype OpLogStatus struct {\n\tStatus string `json:\"status\"`\n\t\/\/ Total number of events recieved on the UDP interface\n\tEventsReceived int `json:\"events_received\"`\n\t\/\/ Total number of events ingested into MongoDB with success\n\tEventsIngested int `json:\"events_ingested\"`\n\t\/\/ Total number of events received on the UDP interface with an invalid format\n\tEventsError int `json:\"events_error\"`\n\t\/\/ Total number of events discarded because the queue was full\n\tEventsDiscarded int `json:\"events_discarded\"`\n\t\/\/ Current number of events in the ingestion queue\n\tQueueSize int `json:\"queue_size\"`\n\t\/\/ Maximum number of events allowed in the ingestion queue before discarding events\n\tQueueMaxSize int `json:\"queue_max_size\"`\n\t\/\/ Number of clients connected to the SSE API\n\tClients int `json:\"clients\"`\n}\n\ntype OpLogFilter struct {\n\tType string\n}\n\ntype OpLogCollection struct {\n\t*mgo.Collection\n}\n\n\/\/ Operation represents an operation stored in the OpLog, ready to be exposed as SSE.\ntype Operation struct {\n\tId *bson.ObjectId `bson:\"_id,omitempty\"`\n\tEvent string `bson:\"event\"`\n\tData *OperationData `bson:\"data\"`\n}\n\n\/\/ OperationData is the data part of the SSE event for the operation.\ntype OperationData struct {\n\tTimestamp time.Time `bson:\"ts\" json:\"timestamp\"`\n\tUserId string `bson:\"uid\" json:\"user_id\"`\n\tType string `bson:\"t\" json:\"type\"`\n\tId string `bson:\"id\" json:\"id\"`\n}\n\n\/\/ parseObjectId returns a bson.ObjectId from an hex representation of an object id or nil\n\/\/ if an empty string is passed or if the format of the id wasn't valid\nfunc parseObjectId(id string) *bson.ObjectId {\n\tif id != \"\" && bson.IsObjectIdHex(id) {\n\t\toid := bson.ObjectIdHex(id)\n\t\treturn &oid\n\t}\n\treturn nil\n}\n\n\/\/ Close closes the underlaying mgo session\nfunc (c *OpLogCollection) Close() {\n\tc.Database.Session.Close()\n}\n\n\/\/ Info returns a human readable version of the operation\nfunc (op *Operation) Info() string {\n\tid := \"(new)\"\n\tif op.Id != nil {\n\t\tid = op.Id.Hex()\n\t}\n\treturn fmt.Sprintf(\"%s:%s(%s:%s from %s)\", id, op.Event, op.Data.Type, op.Data.Id, op.Data.UserId)\n}\n\n\/\/ NewOpLog returns an OpLog connected to the given provided mongo URL.\n\/\/ If the capped collection does not exists, it will be created with the max\n\/\/ size defined by maxBytes parameter.\nfunc NewOpLog(mongoURL string, maxBytes int) (*OpLog, error) {\n\tsession, err := mgo.Dial(mongoURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\toplog := &OpLog{session, &OpLogStatus{Status: \"OK\"}}\n\toplog.init(maxBytes)\n\treturn oplog, nil\n}\n\nfunc (oplog *OpLog) c() *OpLogCollection {\n\treturn &OpLogCollection{oplog.s.Copy().DB(\"\").C(\"oplog\")}\n}\n\n\/\/ init creates capped collection if it does not exists.\nfunc (oplog *OpLog) init(maxBytes int) {\n\texists := false\n\tnames, _ := oplog.s.DB(\"\").CollectionNames()\n\tfor _, name := range names {\n\t\tif name == \"oplog\" {\n\t\t\texists = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !exists {\n\t\tlog.Info(\"OPLOG creating capped collection\")\n\t\toplog.c().Create(&mgo.CollectionInfo{\n\t\t\tCapped: true,\n\t\t\tMaxBytes: maxBytes,\n\t\t})\n\t}\n}\n\n\/\/ Insert append a operation into the OpLog\nfunc (oplog *OpLog) Ingest(ops <-chan *Operation) {\n\tc := oplog.c()\n\tfor {\n\t\tselect {\n\t\tcase op := <-ops:\n\t\t\tlog.Debugf(\"OPLOG ingest operation: %#v\", op.Info())\n\t\t\toplog.Status.QueueSize = len(ops)\n\t\t\tfor {\n\t\t\t\tif err := c.Insert(op); err != nil {\n\t\t\t\t\tlog.Warnf(\"OPLOG can't insert operation, try to reconnect: %s\", err)\n\t\t\t\t\t\/\/ Try to reconnect\n\t\t\t\t\ttime.Sleep(time.Second)\n\t\t\t\t\tc.Close()\n\t\t\t\t\tc = oplog.c()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\toplog.Status.EventsIngested++\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ HasId checks if an operation id is present in the capped collection.\nfunc (oplog *OpLog) HasId(id string) bool {\n\tc := oplog.c()\n\tdefer c.Close()\n\toid := parseObjectId(id)\n\tif oid == nil {\n\t\treturn false\n\t}\n\tcount, err := c.FindId(oid).Count()\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn count != 0\n}\n\n\/\/ LastId returns the most recently inserted operation id if any or \"\" if oplog is empty\nfunc (oplog *OpLog) LastId() string {\n\tc := oplog.c()\n\tdefer c.Close()\n\toperation := &Operation{}\n\tc.Find(nil).Sort(\"-$natural\").One(operation)\n\tif operation.Id != nil {\n\t\treturn operation.Id.Hex()\n\t}\n\treturn \"\"\n}\n\n\/\/ tail creates a tail cursor starting at a given id\nfunc (oplog *OpLog) tail(c *OpLogCollection, lastId *bson.ObjectId, filter OpLogFilter) *mgo.Iter {\n\tquery := bson.M{}\n\tif filter.Type != \"\" {\n\t\tquery[\"data.t\"] = filter.Type\n\t}\n\tif lastId == nil {\n\t\t\/\/ If no last id provided, find the last operation id in the colleciton\n\t\tlastId = parseObjectId(oplog.LastId())\n\t}\n\tif lastId != nil {\n\t\tquery[\"_id\"] = bson.M{\"$gt\": lastId}\n\t}\n\treturn c.Find(query).Sort(\"$natural\").Tail(5 * time.Second)\n}\n\n\/\/ Tail tails all the new operations in the oplog and send the operation in\n\/\/ the given channel. If the lastId parameter is given, all operation posted after\n\/\/ this event will be returned.\nfunc (oplog *OpLog) Tail(lastId string, filter OpLogFilter, out chan<- Operation, err chan<- error) {\n\tc := oplog.c()\n\toperation := Operation{}\n\tlastObjectId := parseObjectId(lastId)\n\titer := oplog.tail(c, lastObjectId, filter)\n\n\tfor {\n\t\tfor iter.Next(&operation) {\n\t\t\tlastObjectId = operation.Id\n\t\t\tout <- operation\n\t\t}\n\n\t\tif iter.Timeout() {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Try to reconnect\n\t\tlog.Warnf(\"OPLOG tail failed with error, try to reconnect: %s\", iter.Err())\n\t\titer.Close()\n\t\tc.Close()\n\t\tc = oplog.c()\n\t\titer = oplog.tail(c, lastObjectId, filter)\n\t\ttime.Sleep(time.Second)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Akamai Technologies, Inc.\n\/\/ Please see the accompanying LICENSE file for licensing information.\n\/\/\n\/\/ Note: this file is a derivative work of\n\/\/\n\/\/ https:\/\/github.com\/Operational-Transformation\/ot.py\/blob\/3777bee2c2cdb263d4ba09dd8ff0974b48f6b87c\/ot\/text_operation.py\n\/\/ https:\/\/github.com\/Operational-Transformation\/ot.py\/blob\/3777bee2c2cdb263d4ba09dd8ff0974b48f6b87c\/ot\/client.py\n\/\/ https:\/\/github.com\/Operational-Transformation\/ot.v\/blob\/d48f2598142236ee8980247060c98ba3175c464a\/ListOperation.v\n\/\/\n\/\/ Copyright © 2012-2013 Tim Baumann, http:\/\/timbaumann.info\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the “Software”), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage ot\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n)\n\ntype Tag int\n\ntype Op struct {\n\t\/\/ Len is either delete-len (if negative) or retain-len (if positive)\n\tSize int\n\n\t\/\/ Body is the text to be inserted\n\tBody []rune\n}\n\nfunc (o Op) Clone() Op {\n\tvar body2 []rune\n\tif len(o.Body) > 0 {\n\t\tbody2 = make([]rune, len(o.Body))\n\t\tcopy(body2, o.Body)\n\t}\n\treturn Op{\n\t\tSize: o.Size,\n\t\tBody: body2,\n\t}\n}\n\nfunc (o *Op) IsRetain() bool {\n\tif o == nil {\n\t\treturn false\n\t}\n\treturn o.Size > 0\n}\n\nfunc (o *Op) IsDelete() bool {\n\tif o == nil {\n\t\treturn false\n\t}\n\treturn o.Size < 0\n}\n\nfunc (o *Op) IsInsert() bool {\n\tif o == nil {\n\t\treturn false\n\t}\n\treturn o.Size == 0\n\t\/\/ return o.Size == 0 && len(o.Body) > 0\n}\n\nfunc (o *Op) Len() int {\n\tswitch {\n\tcase o == nil:\n\t\treturn 0\n\tcase o.Size < 0:\n\t\treturn -o.Size\n\tcase o.Size > 0:\n\t\treturn o.Size\n\tdefault:\n\t\treturn len(o.Body)\n\t}\n}\n\nfunc AsRunes(s string) []rune {\n\trs := make([]rune, utf8.RuneCountInString(s))\n\tfor i, r := range s {\n\t\trs[i] = r\n\t}\n\treturn rs\n}\n\nfunc AsString(rs []rune) string {\n\tbuf := bytes.Buffer{}\n\tfor _, r := range rs {\n\t\tbuf.WriteRune(r)\n\t}\n\treturn buf.String()\n}\n\nfunc (o *Op) String() string {\n\tswitch {\n\tcase o == nil:\n\t\treturn \"N \"\n\tcase o.IsDelete():\n\t\treturn fmt.Sprintf(\"D%d\", -o.Size)\n\tcase o.IsRetain():\n\t\treturn fmt.Sprintf(\"R%d\", o.Size)\n\tcase o.IsInsert():\n\t\treturn fmt.Sprintf(\"I%s\", AsString(o.Body))\n\tdefault:\n\t\treturn fmt.Sprintf(\"E%#v\", o)\n\t}\n}\n\ntype Ops []Op\n\nfunc (os Ops) Clone() Ops {\n\tos2 := make(Ops, len(os))\n\tfor i, op := range os {\n\t\tos2[i] = op.Clone()\n\t}\n\treturn os2\n}\n\nfunc (os Ops) String() string {\n\tif len(os) > 0 {\n\t\tstrs := []string{}\n\t\tfor _, o := range os {\n\t\t\tstrs = append(strs, o.String())\n\t\t}\n\t\treturn fmt.Sprintf(\"[%s]\", strings.Join(strs, \" \"))\n\t} else {\n\t\treturn \"[]\"\n\t}\n}\n\nfunc (os Ops) First() *Op {\n\treturn &os[0]\n}\n\nfunc (os Ops) Last() *Op {\n\treturn &os[len(os)-1]\n}\n\nfunc (os Ops) Rest() Ops {\n\treturn os[1:]\n}\n\nfunc (op *Op) extendBody(rhs []rune) {\n\tlhs := op.Body\n\top.Body = make([]rune, len(lhs)+len(rhs))\n\tcopy(op.Body, lhs)\n\tcopy(op.Body[len(lhs):], rhs)\n}\n\nfunc (os *Ops) insertPenultimate(op Op) {\n\trhs := *os\n\trlen := len(rhs)\n\tlhs := make(Ops, rlen+1)\n\tcopy(lhs, rhs[:rlen-1])\n\tlhs[rlen-1] = op\n\tlhs[rlen] = rhs[rlen-1]\n\t*os = lhs\n}\n\nfunc (os *Ops) insertUltimate(op Op) {\n\trhs := *os\n\tlhs := make(Ops, len(rhs))\n\tcopy(lhs, rhs)\n\tlhs = append(lhs, op)\n\t*os = lhs\n}\n\nfunc (os *Ops) Insert(body []rune) {\n\tops := *os\n\tolen := len(ops)\n\tswitch {\n\tcase len(body) == 0:\n\t\tbreak\n\tcase olen > 0 && os.Last().IsInsert():\n\t\tops.Last().extendBody(body)\n\tcase olen > 0 && os.Last().IsDelete():\n\t\tif olen > 1 && ops[olen-2].IsInsert() {\n\t\t\t(&ops[olen-2]).extendBody(body)\n\t\t} else {\n\t\t\tos.insertPenultimate(Op{Body: body})\n\t\t}\n\tdefault:\n\t\tos.insertUltimate(Op{Body: body})\n\t}\n}\n\nfunc (os *Ops) Retain(size int) {\n\tswitch {\n\tcase size == 0:\n\t\treturn\n\tcase len(*os) > 0 && os.Last().IsRetain():\n\t\tos.Last().Size += size\n\tdefault:\n\t\tos.insertUltimate(Op{Size: size})\n\t}\n}\n\nfunc (os *Ops) Delete(size int) {\n\tops := *os\n\tolen := len(ops)\n\tif size == 0 {\n\t\treturn\n\t}\n\tif size > 0 {\n\t\tsize = -size\n\t}\n\tif olen > 0 && ops[olen-1].IsDelete() {\n\t\tops[olen-1].Size += size\n\t} else {\n\t\tos.insertUltimate(Op{Size: size})\n\t}\n}\n\nfunc (o Op) MarshalJSON() ([]byte, error) {\n\tswitch {\n\tcase o.IsDelete() || o.IsRetain():\n\t\treturn json.Marshal(o.Size)\n\tdefault:\n\t\treturn json.Marshal(AsString(o.Body))\n\t}\n}\n\nfunc (o *Op) UnmarshalJSON(data []byte) error {\n\tswitch {\n\tcase len(data) == 0:\n\t\treturn fmt.Errorf(\"illegal op: %q\", data)\n\tcase data[0] == '\"':\n\t\tvar s string\n\t\tif err := json.Unmarshal(data, &s); err != nil {\n\t\t\treturn err\n\t\t}\n\t\to.Body = AsRunes(s)\n\t\treturn nil\n\tdefault:\n\t\treturn json.Unmarshal(data, &o.Size)\n\t}\n}\n\nfunc min(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t} else {\n\t\treturn b\n\t}\n}\n\nfunc (o Op) Shorten(nl int) Op {\n\tswitch {\n\tcase o.IsRetain():\n\t\to.Size -= nl\n\tcase o.IsDelete():\n\t\to.Size += nl\n\tcase o.IsInsert():\n\t\to.Body = o.Body[nl:]\n\t}\n\treturn o\n}\n\nfunc shortenOps(a *Op, b *Op) (*Op, *Op) {\n\tla := a.Len()\n\tlb := b.Len()\n\tswitch {\n\tcase la == lb:\n\t\treturn nil, nil\n\tcase a != nil && b != nil && la > lb:\n\t\ta2 := a.Shorten(lb)\n\t\treturn &a2, nil\n\tcase a != nil && b != nil && la <= lb:\n\t\tb2 := b.Shorten(la)\n\t\treturn nil, &b2\n\t}\n\treturn a, b\n}\n\nfunc Compose(as, bs Ops) Ops {\n\tops := Ops{}\n\n\tif len(as) == 0 {\n\t\tops := make(Ops, len(bs))\n\t\tcopy(ops, bs)\n\t\treturn ops\n\t}\n\nFix:\n\tfor {\n\t\tswitch {\n\t\tcase len(as) == 0 && len(bs) == 0:\n\t\t\tbreak Fix\n\t\tcase len(as) > 0 && as.First().IsDelete():\n\t\t\tops.Delete(as.First().Size)\n\t\t\tas = as.Rest()\n\t\t\tcontinue\n\t\tcase len(bs) > 0 && bs.First().IsInsert():\n\t\t\tops.Insert(bs.First().Body)\n\t\t\tbs = bs.Rest()\n\t\t\tcontinue\n\t\tcase len(as) > 0 && len(bs) > 0:\n\t\t\ta := as.First()\n\t\t\tb := bs.First()\n\t\t\tminlen := min(a.Len(), b.Len())\n\t\t\tswitch {\n\t\t\tcase a.IsRetain() && b.IsRetain():\n\t\t\t\tops.Retain(minlen)\n\t\t\tcase a.IsRetain() && b.IsDelete():\n\t\t\t\tops.Delete(minlen)\n\t\t\tcase a.IsInsert() && b.IsRetain():\n\t\t\t\tops.Insert(a.Body[:minlen])\n\t\t\tcase a.IsInsert() && b.IsDelete():\n\t\t\t\t\/\/ops.Delete(minlen)\n\t\t\t}\n\t\t\ta, b = shortenOps(a, b)\n\t\t\tif a == nil {\n\t\t\t\tas = as.Rest()\n\t\t\t} else {\n\t\t\t\tas = append(Ops{*a}, as.Rest()...)\n\t\t\t}\n\t\t\tif b == nil {\n\t\t\t\tbs = bs.Rest()\n\t\t\t} else {\n\t\t\t\tbs = append(Ops{*b}, bs.Rest()...)\n\t\t\t}\n\t\t\tcontinue\n\t\tcase len(as) > 0 && len(bs) == 0:\n\t\t\tops = append(ops, as...)\n\t\t\tas = nil\n\t\t\tcontinue\n\t\tcase len(as) == 0 && len(bs) > 0:\n\t\t\tops = append(ops, bs...)\n\t\t\tbs = nil\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tpanic(\"impossible\")\n\t\t}\n\t}\n\n\treturn ops\n}\n\nfunc Transform(as, bs Ops) (Ops, Ops) {\n\tvar ai, bi int\n\tvar al, bl int\n\tvar aos, bos Ops\n\tvar a, b *Op\n\n\tal = len(as)\n\tbl = len(bs)\n\n\tif ai < al {\n\t\ta = &as[ai]\n\t}\n\tif bi < bl {\n\t\tb = &bs[bi]\n\t}\n\n\tif al == 0 {\n\t\treturn nil, bs.Clone()\n\t}\n\tif bl == 0 {\n\t\treturn as.Clone(), nil\n\t}\n\n\tvar loopCount = -1\n\n\tfmt.Printf(\"xform: as: %s, bs: %s, a: %s, b: %s, ai: %d, bi: %d\\n\", as, bs, a, b, ai, bi)\n\tfor {\n\t\tloopCount++\n\t\tfmt.Printf(\"loop: %d, a: %s, b: %s, ai: %d, bi: %d\\n\", loopCount, a, b, ai, bi)\n\n\t\tif a == nil && b == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif a.IsInsert() {\n\t\t\taos.Insert(a.Body)\n\t\t\tbos.Retain(a.Len())\n\t\t\tai++\n\t\t\tif ai < al {\n\t\t\t\ta = &as[ai]\n\t\t\t} else {\n\t\t\t\ta = nil\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif b.IsInsert() {\n\t\t\taos.Retain(b.Len())\n\t\t\tbos.Insert(b.Body)\n\t\t\tbi++\n\t\t\tif bi < bl {\n\t\t\t\tb = &bs[bi]\n\t\t\t} else {\n\t\t\t\tb = nil\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif a == nil {\n\t\t\tpanic(\"boom1\")\n\t\t}\n\t\tif b == nil {\n\t\t\tpanic(\"boom2\")\n\t\t}\n\n\t\tminlen := min(a.Len(), b.Len())\n\t\tswitch {\n\t\tcase a.IsRetain() && b.IsRetain():\n\t\t\taos.Retain(minlen)\n\t\t\tbos.Retain(minlen)\n\t\tcase a.IsDelete() && b.IsRetain():\n\t\t\taos.Delete(minlen)\n\t\tcase a.IsRetain() && b.IsDelete():\n\t\t\tbos.Delete(minlen)\n\t\t}\n\t\ta2, b2 := a, b\n\t\ta, b = shortenOps(a, b)\n\t\tfmt.Printf(\"shorten:\\n\\ta : %s\\n\\tb : %s\\n\\ta2: %s\\n\\tb2: %s\\n\", a2, b2, a, b)\n\t\tif a == nil {\n\t\t\tai++\n\t\t\tif ai < al {\n\t\t\t\ta = &as[ai]\n\t\t\t} else {\n\t\t\t\ta = nil\n\t\t\t}\n\t\t}\n\t\tif b == nil {\n\t\t\tbi++\n\t\t\tif bi < bl {\n\t\t\t\tb = &bs[bi]\n\t\t\t} else {\n\t\t\t\tb = nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn aos, bos\n}\n\nfunc Normalize(os Ops) Ops {\n\tos2 := Ops{}\n\tfor _, o := range os {\n\t\tif o.Len() != 0 {\n\t\t\tos2 = append(os2, o)\n\t\t}\n\t}\n\treturn os2\n}\n\ntype Doc struct {\n\t\/\/ Current text\n\tbody []rune\n}\n\nfunc NewDoc() *Doc {\n\treturn &Doc{\n\t\tbody: []rune{},\n\t}\n}\n\nfunc (d *Doc) Len() int {\n\treturn len(d.body)\n}\n\nfunc (d *Doc) String() string {\n\tbuf := bytes.Buffer{}\n\tfor _, r := range d.body {\n\t\tbuf.WriteRune(r)\n\t}\n\treturn buf.String()\n}\n\nfunc (d *Doc) Apply(os Ops) {\n\tif len(os) == 0 {\n\t\treturn\n\t}\n\tpos := 0\n\tparts := [][]rune{}\n\tfor _, o := range os {\n\t\tswitch {\n\t\tcase o.IsDelete():\n\t\t\tpos += o.Len()\n\t\tcase o.IsRetain() && o.Len() > 0:\n\t\t\tparts = append(parts, d.body[pos:pos+o.Len()])\n\t\t\tpos += o.Len()\n\t\tcase o.IsInsert():\n\t\t\tparts = append(parts, o.Body)\n\t\t}\n\t}\n\tsize := 0\n\tfor _, p := range parts {\n\t\tsize += len(p)\n\t}\n\td.body = make([]rune, size)\n\n\tidx := 0\n\tfor _, p := range parts {\n\t\tcopy(d.body[idx:idx+len(p)], p)\n\t\tidx += len(p)\n\t}\n}\n<commit_msg>Quiet ot.<commit_after>\/\/ Copyright 2014 Akamai Technologies, Inc.\n\/\/ Please see the accompanying LICENSE file for licensing information.\n\/\/\n\/\/ Note: this file is a derivative work of\n\/\/\n\/\/ https:\/\/github.com\/Operational-Transformation\/ot.py\/blob\/3777bee2c2cdb263d4ba09dd8ff0974b48f6b87c\/ot\/text_operation.py\n\/\/ https:\/\/github.com\/Operational-Transformation\/ot.py\/blob\/3777bee2c2cdb263d4ba09dd8ff0974b48f6b87c\/ot\/client.py\n\/\/ https:\/\/github.com\/Operational-Transformation\/ot.v\/blob\/d48f2598142236ee8980247060c98ba3175c464a\/ListOperation.v\n\/\/\n\/\/ Copyright © 2012-2013 Tim Baumann, http:\/\/timbaumann.info\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the “Software”), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage ot\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n)\n\ntype Tag int\n\ntype Op struct {\n\t\/\/ Len is either delete-len (if negative) or retain-len (if positive)\n\tSize int\n\n\t\/\/ Body is the text to be inserted\n\tBody []rune\n}\n\nfunc (o Op) Clone() Op {\n\tvar body2 []rune\n\tif len(o.Body) > 0 {\n\t\tbody2 = make([]rune, len(o.Body))\n\t\tcopy(body2, o.Body)\n\t}\n\treturn Op{\n\t\tSize: o.Size,\n\t\tBody: body2,\n\t}\n}\n\nfunc (o *Op) IsRetain() bool {\n\tif o == nil {\n\t\treturn false\n\t}\n\treturn o.Size > 0\n}\n\nfunc (o *Op) IsDelete() bool {\n\tif o == nil {\n\t\treturn false\n\t}\n\treturn o.Size < 0\n}\n\nfunc (o *Op) IsInsert() bool {\n\tif o == nil {\n\t\treturn false\n\t}\n\treturn o.Size == 0\n\t\/\/ return o.Size == 0 && len(o.Body) > 0\n}\n\nfunc (o *Op) Len() int {\n\tswitch {\n\tcase o == nil:\n\t\treturn 0\n\tcase o.Size < 0:\n\t\treturn -o.Size\n\tcase o.Size > 0:\n\t\treturn o.Size\n\tdefault:\n\t\treturn len(o.Body)\n\t}\n}\n\nfunc AsRunes(s string) []rune {\n\trs := make([]rune, utf8.RuneCountInString(s))\n\tfor i, r := range s {\n\t\trs[i] = r\n\t}\n\treturn rs\n}\n\nfunc AsString(rs []rune) string {\n\tbuf := bytes.Buffer{}\n\tfor _, r := range rs {\n\t\tbuf.WriteRune(r)\n\t}\n\treturn buf.String()\n}\n\nfunc (o *Op) String() string {\n\tswitch {\n\tcase o == nil:\n\t\treturn \"N \"\n\tcase o.IsDelete():\n\t\treturn fmt.Sprintf(\"D%d\", -o.Size)\n\tcase o.IsRetain():\n\t\treturn fmt.Sprintf(\"R%d\", o.Size)\n\tcase o.IsInsert():\n\t\treturn fmt.Sprintf(\"I%s\", AsString(o.Body))\n\tdefault:\n\t\treturn fmt.Sprintf(\"E%#v\", o)\n\t}\n}\n\ntype Ops []Op\n\nfunc (os Ops) Clone() Ops {\n\tos2 := make(Ops, len(os))\n\tfor i, op := range os {\n\t\tos2[i] = op.Clone()\n\t}\n\treturn os2\n}\n\nfunc (os Ops) String() string {\n\tif len(os) > 0 {\n\t\tstrs := []string{}\n\t\tfor _, o := range os {\n\t\t\tstrs = append(strs, o.String())\n\t\t}\n\t\treturn fmt.Sprintf(\"[%s]\", strings.Join(strs, \" \"))\n\t} else {\n\t\treturn \"[]\"\n\t}\n}\n\nfunc (os Ops) First() *Op {\n\treturn &os[0]\n}\n\nfunc (os Ops) Last() *Op {\n\treturn &os[len(os)-1]\n}\n\nfunc (os Ops) Rest() Ops {\n\treturn os[1:]\n}\n\nfunc (op *Op) extendBody(rhs []rune) {\n\tlhs := op.Body\n\top.Body = make([]rune, len(lhs)+len(rhs))\n\tcopy(op.Body, lhs)\n\tcopy(op.Body[len(lhs):], rhs)\n}\n\nfunc (os *Ops) insertPenultimate(op Op) {\n\trhs := *os\n\trlen := len(rhs)\n\tlhs := make(Ops, rlen+1)\n\tcopy(lhs, rhs[:rlen-1])\n\tlhs[rlen-1] = op\n\tlhs[rlen] = rhs[rlen-1]\n\t*os = lhs\n}\n\nfunc (os *Ops) insertUltimate(op Op) {\n\trhs := *os\n\tlhs := make(Ops, len(rhs))\n\tcopy(lhs, rhs)\n\tlhs = append(lhs, op)\n\t*os = lhs\n}\n\nfunc (os *Ops) Insert(body []rune) {\n\tops := *os\n\tolen := len(ops)\n\tswitch {\n\tcase len(body) == 0:\n\t\tbreak\n\tcase olen > 0 && os.Last().IsInsert():\n\t\tops.Last().extendBody(body)\n\tcase olen > 0 && os.Last().IsDelete():\n\t\tif olen > 1 && ops[olen-2].IsInsert() {\n\t\t\t(&ops[olen-2]).extendBody(body)\n\t\t} else {\n\t\t\tos.insertPenultimate(Op{Body: body})\n\t\t}\n\tdefault:\n\t\tos.insertUltimate(Op{Body: body})\n\t}\n}\n\nfunc (os *Ops) Retain(size int) {\n\tswitch {\n\tcase size == 0:\n\t\treturn\n\tcase len(*os) > 0 && os.Last().IsRetain():\n\t\tos.Last().Size += size\n\tdefault:\n\t\tos.insertUltimate(Op{Size: size})\n\t}\n}\n\nfunc (os *Ops) Delete(size int) {\n\tops := *os\n\tolen := len(ops)\n\tif size == 0 {\n\t\treturn\n\t}\n\tif size > 0 {\n\t\tsize = -size\n\t}\n\tif olen > 0 && ops[olen-1].IsDelete() {\n\t\tops[olen-1].Size += size\n\t} else {\n\t\tos.insertUltimate(Op{Size: size})\n\t}\n}\n\nfunc (o Op) MarshalJSON() ([]byte, error) {\n\tswitch {\n\tcase o.IsDelete() || o.IsRetain():\n\t\treturn json.Marshal(o.Size)\n\tdefault:\n\t\treturn json.Marshal(AsString(o.Body))\n\t}\n}\n\nfunc (o *Op) UnmarshalJSON(data []byte) error {\n\tswitch {\n\tcase len(data) == 0:\n\t\treturn fmt.Errorf(\"illegal op: %q\", data)\n\tcase data[0] == '\"':\n\t\tvar s string\n\t\tif err := json.Unmarshal(data, &s); err != nil {\n\t\t\treturn err\n\t\t}\n\t\to.Body = AsRunes(s)\n\t\treturn nil\n\tdefault:\n\t\treturn json.Unmarshal(data, &o.Size)\n\t}\n}\n\nfunc min(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t} else {\n\t\treturn b\n\t}\n}\n\nfunc (o Op) Shorten(nl int) Op {\n\tswitch {\n\tcase o.IsRetain():\n\t\to.Size -= nl\n\tcase o.IsDelete():\n\t\to.Size += nl\n\tcase o.IsInsert():\n\t\to.Body = o.Body[nl:]\n\t}\n\treturn o\n}\n\nfunc shortenOps(a *Op, b *Op) (*Op, *Op) {\n\tla := a.Len()\n\tlb := b.Len()\n\tswitch {\n\tcase la == lb:\n\t\treturn nil, nil\n\tcase a != nil && b != nil && la > lb:\n\t\ta2 := a.Shorten(lb)\n\t\treturn &a2, nil\n\tcase a != nil && b != nil && la <= lb:\n\t\tb2 := b.Shorten(la)\n\t\treturn nil, &b2\n\t}\n\treturn a, b\n}\n\nfunc Compose(as, bs Ops) Ops {\n\tops := Ops{}\n\n\tif len(as) == 0 {\n\t\tops := make(Ops, len(bs))\n\t\tcopy(ops, bs)\n\t\treturn ops\n\t}\n\nFix:\n\tfor {\n\t\tswitch {\n\t\tcase len(as) == 0 && len(bs) == 0:\n\t\t\tbreak Fix\n\t\tcase len(as) > 0 && as.First().IsDelete():\n\t\t\tops.Delete(as.First().Size)\n\t\t\tas = as.Rest()\n\t\t\tcontinue\n\t\tcase len(bs) > 0 && bs.First().IsInsert():\n\t\t\tops.Insert(bs.First().Body)\n\t\t\tbs = bs.Rest()\n\t\t\tcontinue\n\t\tcase len(as) > 0 && len(bs) > 0:\n\t\t\ta := as.First()\n\t\t\tb := bs.First()\n\t\t\tminlen := min(a.Len(), b.Len())\n\t\t\tswitch {\n\t\t\tcase a.IsRetain() && b.IsRetain():\n\t\t\t\tops.Retain(minlen)\n\t\t\tcase a.IsRetain() && b.IsDelete():\n\t\t\t\tops.Delete(minlen)\n\t\t\tcase a.IsInsert() && b.IsRetain():\n\t\t\t\tops.Insert(a.Body[:minlen])\n\t\t\tcase a.IsInsert() && b.IsDelete():\n\t\t\t\t\/\/ops.Delete(minlen)\n\t\t\t}\n\t\t\ta, b = shortenOps(a, b)\n\t\t\tif a == nil {\n\t\t\t\tas = as.Rest()\n\t\t\t} else {\n\t\t\t\tas = append(Ops{*a}, as.Rest()...)\n\t\t\t}\n\t\t\tif b == nil {\n\t\t\t\tbs = bs.Rest()\n\t\t\t} else {\n\t\t\t\tbs = append(Ops{*b}, bs.Rest()...)\n\t\t\t}\n\t\t\tcontinue\n\t\tcase len(as) > 0 && len(bs) == 0:\n\t\t\tops = append(ops, as...)\n\t\t\tas = nil\n\t\t\tcontinue\n\t\tcase len(as) == 0 && len(bs) > 0:\n\t\t\tops = append(ops, bs...)\n\t\t\tbs = nil\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tpanic(\"impossible\")\n\t\t}\n\t}\n\n\treturn ops\n}\n\nfunc Transform(as, bs Ops) (Ops, Ops) {\n\tvar ai, bi int\n\tvar al, bl int\n\tvar aos, bos Ops\n\tvar a, b *Op\n\n\tal = len(as)\n\tbl = len(bs)\n\n\tif ai < al {\n\t\ta = &as[ai]\n\t}\n\tif bi < bl {\n\t\tb = &bs[bi]\n\t}\n\n\tif al == 0 {\n\t\treturn nil, bs.Clone()\n\t}\n\tif bl == 0 {\n\t\treturn as.Clone(), nil\n\t}\n\n\tvar loopCount = -1\n\n\t\/\/ fmt.Printf(\"xform: as: %s, bs: %s, a: %s, b: %s, ai: %d, bi: %d\\n\", as, bs, a, b, ai, bi)\n\tfor {\n\t\tloopCount++\n\t\t\/\/ fmt.Printf(\"loop: %d, a: %s, b: %s, ai: %d, bi: %d\\n\", loopCount, a, b, ai, bi)\n\n\t\tif a == nil && b == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif a.IsInsert() {\n\t\t\taos.Insert(a.Body)\n\t\t\tbos.Retain(a.Len())\n\t\t\tai++\n\t\t\tif ai < al {\n\t\t\t\ta = &as[ai]\n\t\t\t} else {\n\t\t\t\ta = nil\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif b.IsInsert() {\n\t\t\taos.Retain(b.Len())\n\t\t\tbos.Insert(b.Body)\n\t\t\tbi++\n\t\t\tif bi < bl {\n\t\t\t\tb = &bs[bi]\n\t\t\t} else {\n\t\t\t\tb = nil\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif a == nil {\n\t\t\tpanic(\"boom1\")\n\t\t}\n\t\tif b == nil {\n\t\t\tpanic(\"boom2\")\n\t\t}\n\n\t\tminlen := min(a.Len(), b.Len())\n\t\tswitch {\n\t\tcase a.IsRetain() && b.IsRetain():\n\t\t\taos.Retain(minlen)\n\t\t\tbos.Retain(minlen)\n\t\tcase a.IsDelete() && b.IsRetain():\n\t\t\taos.Delete(minlen)\n\t\tcase a.IsRetain() && b.IsDelete():\n\t\t\tbos.Delete(minlen)\n\t\t}\n\t\t\/\/ a2, b2 := a, b\n\t\ta, b = shortenOps(a, b)\n\t\t\/\/ fmt.Printf(\"shorten:\\n\\ta : %s\\n\\tb : %s\\n\\ta2: %s\\n\\tb2: %s\\n\", a2, b2, a, b)\n\t\tif a == nil {\n\t\t\tai++\n\t\t\tif ai < al {\n\t\t\t\ta = &as[ai]\n\t\t\t} else {\n\t\t\t\ta = nil\n\t\t\t}\n\t\t}\n\t\tif b == nil {\n\t\t\tbi++\n\t\t\tif bi < bl {\n\t\t\t\tb = &bs[bi]\n\t\t\t} else {\n\t\t\t\tb = nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn aos, bos\n}\n\nfunc Normalize(os Ops) Ops {\n\tos2 := Ops{}\n\tfor _, o := range os {\n\t\tif o.Len() != 0 {\n\t\t\tos2 = append(os2, o)\n\t\t}\n\t}\n\treturn os2\n}\n\ntype Doc struct {\n\t\/\/ Current text\n\tbody []rune\n}\n\nfunc NewDoc() *Doc {\n\treturn &Doc{\n\t\tbody: []rune{},\n\t}\n}\n\nfunc (d *Doc) Len() int {\n\treturn len(d.body)\n}\n\nfunc (d *Doc) String() string {\n\tbuf := bytes.Buffer{}\n\tfor _, r := range d.body {\n\t\tbuf.WriteRune(r)\n\t}\n\treturn buf.String()\n}\n\nfunc (d *Doc) Apply(os Ops) {\n\tif len(os) == 0 {\n\t\treturn\n\t}\n\tpos := 0\n\tparts := [][]rune{}\n\tfor _, o := range os {\n\t\tswitch {\n\t\tcase o.IsDelete():\n\t\t\tpos += o.Len()\n\t\tcase o.IsRetain() && o.Len() > 0:\n\t\t\tparts = append(parts, d.body[pos:pos+o.Len()])\n\t\t\tpos += o.Len()\n\t\tcase o.IsInsert():\n\t\t\tparts = append(parts, o.Body)\n\t\t}\n\t}\n\tsize := 0\n\tfor _, p := range parts {\n\t\tsize += len(p)\n\t}\n\td.body = make([]rune, size)\n\n\tidx := 0\n\tfor _, p := range parts {\n\t\tcopy(d.body[idx:idx+len(p)], p)\n\t\tidx += len(p)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package log\n\nimport (\n\t\"time\"\n\n\t\"google.golang.org\/grpc\/metadata\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ ======== 兼容 qiniu\/log ===============\nconst (\n\tLdebug int = int(DebugLevel)\n\tLinfo = int(InfoLevel)\n\tLwarn = int(WarnLevel)\n\tLerror = int(ErrorLevel)\n\tLpanic = int(PanicLevel)\n\tLfatal = int(FatalLevel)\n)\n\n\/\/ ======== 兼容 qiniu\/log ===============\nfunc SetOutputLevel(l int) { v = Level(l) }\n\n\/\/ ======== 兼容 wothing\/log ===============\n\n\/\/ TraceIn and TraceOut use in function in and out,reduce code line\n\/\/ Example:\n\/\/\tfunc test() {\n\/\/\t\tuser := User{Name: \"zhangsan\", Age: 21, School: \"xayddx\"}\n\/\/\t\tservice := \"verification.GetVerifiCode\"\n\/\/\t\tdefer log.TraceOut(log.TraceIn(\"12345\", service, \"user:%v\", user))\n\/\/\t\t....\n\/\/\t}\n\n\/\/ TraceIn 方法入口打印日志\nfunc TraceIn(tag string, method string, format string, m ...interface{}) (string, string, time.Time) {\n\tstartTime := time.Now()\n\tstd.Tprintf(v, InfoLevel, tag, \"calling \"+method+\", \"+format, m...)\n\treturn tag, method, startTime\n}\n\n\/\/ TraceCtx 方法入口打印日志\nfunc TraceCtx(ctx context.Context, method string, format string, m ...interface{}) (string, string, time.Time) {\n\ttag := \"-\"\n\tif md, ok := metadata.FromContext(ctx); ok {\n\t\tif md[\"tid\"] != nil && len(md[\"tid\"]) > 0 {\n\t\t\ttag = md[\"tid\"][0]\n\t\t}\n\t}\n\tstartTime := time.Now()\n\tstd.Tprintf(v, InfoLevel, tag, \"calling \"+method+\", \"+format, m...)\n\treturn tag, method, startTime\n}\n\n\/\/ TraceOut 方法退出记录下消耗时间\nfunc TraceOut(tag string, method string, startTime time.Time) {\n\tstd.Tprintf(v, InfoLevel, tag, \"finished \"+method+\", took %v\", time.Since(startTime))\n}\n<commit_msg>add println method to wothing<commit_after>package log\n\nimport (\n\t\"time\"\n\n\t\"google.golang.org\/grpc\/metadata\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ ======== 兼容 qiniu\/log ===============\nconst (\n\tLdebug int = int(DebugLevel)\n\tLinfo = int(InfoLevel)\n\tLwarn = int(WarnLevel)\n\tLerror = int(ErrorLevel)\n\tLpanic = int(PanicLevel)\n\tLfatal = int(FatalLevel)\n)\n\n\/\/ ======== 兼容 qiniu\/log ===============\nfunc SetOutputLevel(l int) { v = Level(l) }\n\n\/\/ ======== 兼容 wothing\/log ===============\n\n\/\/ TraceIn and TraceOut use in function in and out,reduce code line\n\/\/ Example:\n\/\/\tfunc test() {\n\/\/\t\tuser := User{Name: \"zhangsan\", Age: 21, School: \"xayddx\"}\n\/\/\t\tservice := \"verification.GetVerifiCode\"\n\/\/\t\tdefer log.TraceOut(log.TraceIn(\"12345\", service, \"user:%v\", user))\n\/\/\t\t....\n\/\/\t}\n\nfunc Println(m ...interface{}) { std.Tprintf(v, PrintLevel, \"\", \"\", m...) }\n\n\/\/ TraceIn 方法入口打印日志\nfunc TraceIn(tag string, method string, format string, m ...interface{}) (string, string, time.Time) {\n\tstartTime := time.Now()\n\tstd.Tprintf(v, InfoLevel, tag, \"calling \"+method+\", \"+format, m...)\n\treturn tag, method, startTime\n}\n\n\/\/ TraceCtx 方法入口打印日志\nfunc TraceCtx(ctx context.Context, method string, format string, m ...interface{}) (string, string, time.Time) {\n\ttag := \"-\"\n\tif md, ok := metadata.FromContext(ctx); ok {\n\t\tif md[\"tid\"] != nil && len(md[\"tid\"]) > 0 {\n\t\t\ttag = md[\"tid\"][0]\n\t\t}\n\t}\n\tstartTime := time.Now()\n\tstd.Tprintf(v, InfoLevel, tag, \"calling \"+method+\", \"+format, m...)\n\treturn tag, method, startTime\n}\n\n\/\/ TraceOut 方法退出记录下消耗时间\nfunc TraceOut(tag string, method string, startTime time.Time) {\n\tstd.Tprintf(v, InfoLevel, tag, \"finished \"+method+\", took %v\", time.Since(startTime))\n}\n<|endoftext|>"} {"text":"<commit_before>package dragon\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\n\/\/ Imports generate zstdlib.go from api files and libs in GOPATH.\nfunc Imports() error {\n\tif !existGoImports() {\n\t\treturn errors.New(\"goimports command isn't installed\")\n\t}\n\n\tlibChan := make(chan lib, 1000)\n\tdone := make(chan error)\n\ttmp, err := ioutil.TempFile(outPath(), \"dragon-imports\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func(fname string) {\n\t\ttmp.Close()\n\t\tos.Remove(fname)\n\t}(tmp.Name())\n\n\tgo func() {\n\t\tdone <- out(libChan, tmp)\n\t}()\n\n\teg := &errgroup.Group{}\n\teg.Go(func() error {\n\t\treturn stdLibs(libChan)\n\t})\n\teg.Go(func() error {\n\t\treturn gopathLibs(libChan)\n\t})\n\terr = eg.Wait()\n\tif err != nil {\n\t\treturn err\n\t}\n\tclose(libChan)\n\n\terr = <-done\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = tmp.Sync()\n\tif err != nil {\n\t\treturn err\n\t}\n\ttmp.Close()\n\n\treturn installUsing(tmp.Name())\n}\n\ntype lib struct {\n\tpkg string\n\tobject string\n\tpath string\n}\n\nfunc existGoImports() bool {\n\tfor _, path := range [...]string{outPath(), cmdPath()} {\n\t\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n<commit_msg>just calling `tmp.Close() is enough<commit_after>package dragon\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\n\/\/ Imports generate zstdlib.go from api files and libs in GOPATH.\nfunc Imports() error {\n\tif !existGoImports() {\n\t\treturn errors.New(\"goimports command isn't installed\")\n\t}\n\n\tlibChan := make(chan lib, 1000)\n\tdone := make(chan error)\n\ttmp, err := ioutil.TempFile(outPath(), \"dragon-imports\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func(fname string) {\n\t\ttmp.Close()\n\t\tos.Remove(fname)\n\t}(tmp.Name())\n\n\tgo func() {\n\t\tdone <- out(libChan, tmp)\n\t}()\n\n\teg := &errgroup.Group{}\n\teg.Go(func() error {\n\t\treturn stdLibs(libChan)\n\t})\n\teg.Go(func() error {\n\t\treturn gopathLibs(libChan)\n\t})\n\tif err := eg.Wait(); err != nil {\n\t\treturn err\n\t}\n\tclose(libChan)\n\n\terr = <-done\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := tmp.Close(); err != nil {\n\t\treturn err\n\t}\n\n\treturn installUsing(tmp.Name())\n}\n\ntype lib struct {\n\tpkg string\n\tobject string\n\tpath string\n}\n\nfunc existGoImports() bool {\n\tfor _, path := range [...]string{outPath(), cmdPath()} {\n\t\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package arn\n\n\/\/ AniListAnimeList ...\ntype AniListAnimeList struct {\n\tID int `json:\"id\"`\n\tDisplayName string `json:\"display_name\"`\n\t\/\/ AnimeTime int `json:\"anime_time\"`\n\t\/\/ MangaChap int `json:\"manga_chap\"`\n\t\/\/ About string `json:\"about\"`\n\t\/\/ ListOrder int `json:\"list_order\"`\n\t\/\/ AdultContent bool `json:\"adult_content\"`\n\t\/\/ ForumHomepage int `json:\"forum_homepage\"`\n\t\/\/ LegacyLists bool `json:\"legacy_lists\"`\n\t\/\/ Donator int `json:\"donator\"`\n\t\/\/ Following bool `json:\"following\"`\n\t\/\/ ImageURLLge string `json:\"image_url_lge\"`\n\t\/\/ ImageURLMed string `json:\"image_url_med\"`\n\t\/\/ ImageURLBanner string `json:\"image_url_banner\"`\n\t\/\/ TitleLanguage string `json:\"title_language\"`\n\t\/\/ ScoreType int `json:\"score_type\"`\n\t\/\/ CustomListAnime []string `json:\"custom_list_anime\"`\n\t\/\/ CustomListManga []interface{} `json:\"custom_list_manga\"`\n\t\/\/ Stats struct {\n\t\/\/ \tStatusDistribution struct {\n\t\/\/ \t\tAnime struct {\n\t\/\/ \t\t\tWatching int `json:\"watching\"`\n\t\/\/ \t\t\tPlanToWatch int `json:\"plan to watch\"`\n\t\/\/ \t\t\tCompleted int `json:\"completed\"`\n\t\/\/ \t\t\tDropped int `json:\"dropped\"`\n\t\/\/ \t\t\tOnHold int `json:\"on-hold\"`\n\t\/\/ \t\t} `json:\"anime\"`\n\t\/\/ \t\tManga struct {\n\t\/\/ \t\t\tReading int `json:\"reading\"`\n\t\/\/ \t\t\tPlanToRead int `json:\"plan to read\"`\n\t\/\/ \t\t\tCompleted int `json:\"completed\"`\n\t\/\/ \t\t\tDropped int `json:\"dropped\"`\n\t\/\/ \t\t\tOnHold int `json:\"on-hold\"`\n\t\/\/ \t\t} `json:\"manga\"`\n\t\/\/ \t} `json:\"status_distribution\"`\n\t\/\/ \tScoreDistribution struct {\n\t\/\/ \t\tAnime struct {\n\t\/\/ \t\t\tNum10 int `json:\"10\"`\n\t\/\/ \t\t\tNum20 int `json:\"20\"`\n\t\/\/ \t\t\tNum30 int `json:\"30\"`\n\t\/\/ \t\t\tNum40 int `json:\"40\"`\n\t\/\/ \t\t\tNum50 int `json:\"50\"`\n\t\/\/ \t\t\tNum60 int `json:\"60\"`\n\t\/\/ \t\t\tNum70 int `json:\"70\"`\n\t\/\/ \t\t\tNum80 int `json:\"80\"`\n\t\/\/ \t\t\tNum90 int `json:\"90\"`\n\t\/\/ \t\t\tNum100 int `json:\"100\"`\n\t\/\/ \t\t} `json:\"anime\"`\n\t\/\/ \t\tManga struct {\n\t\/\/ \t\t\tNum10 int `json:\"10\"`\n\t\/\/ \t\t\tNum20 int `json:\"20\"`\n\t\/\/ \t\t\tNum30 int `json:\"30\"`\n\t\/\/ \t\t\tNum40 int `json:\"40\"`\n\t\/\/ \t\t\tNum50 int `json:\"50\"`\n\t\/\/ \t\t\tNum60 int `json:\"60\"`\n\t\/\/ \t\t\tNum70 int `json:\"70\"`\n\t\/\/ \t\t\tNum80 int `json:\"80\"`\n\t\/\/ \t\t\tNum90 int `json:\"90\"`\n\t\/\/ \t\t\tNum100 int `json:\"100\"`\n\t\/\/ \t\t} `json:\"manga\"`\n\t\/\/ \t} `json:\"score_distribution\"`\n\t\/\/ \tFavouriteGenres struct {\n\t\/\/ \t\tDrama int `json:\"Drama\"`\n\t\/\/ \t\tAction int `json:\"Action\"`\n\t\/\/ \t\tComedy int `json:\"Comedy\"`\n\t\/\/ \t\tAdventure int `json:\"Adventure\"`\n\t\/\/ \t\tRomance int `json:\"Romance\"`\n\t\/\/ \t\tPsychological int `json:\"Psychological\"`\n\t\/\/ \t\tFantasy int `json:\"Fantasy\"`\n\t\/\/ \t\tMystery int `json:\"Mystery\"`\n\t\/\/ \t\tSciFi int `json:\"Sci-Fi\"`\n\t\/\/ \t\tThriller int `json:\"Thriller\"`\n\t\/\/ \t} `json:\"favourite_genres\"`\n\t\/\/ \tListScores struct {\n\t\/\/ \t\tAnime struct {\n\t\/\/ \t\t\tMean int `json:\"mean\"`\n\t\/\/ \t\t\tStandardDeviation int `json:\"standard_deviation\"`\n\t\/\/ \t\t} `json:\"anime\"`\n\t\/\/ \t\tManga struct {\n\t\/\/ \t\t\tMean int `json:\"mean\"`\n\t\/\/ \t\t\tStandardDeviation int `json:\"standard_deviation\"`\n\t\/\/ \t\t} `json:\"manga\"`\n\t\/\/ \t} `json:\"list_scores\"`\n\t\/\/ } `json:\"stats\"`\n\t\/\/ AdvancedRating bool `json:\"advanced_rating\"`\n\t\/\/ AdvancedRatingNames []interface{} `json:\"advanced_rating_names\"`\n\t\/\/ Notifications int `json:\"notifications\"`\n\t\/\/ AiringNotifications int `json:\"airing_notifications\"`\n\t\/\/ UpdatedAt int `json:\"updated_at\"`\n\tLists struct {\n\t\tCompleted []*AniListAnimeListItem `json:\"completed\"`\n\t\tWatching []*AniListAnimeListItem `json:\"watching\"`\n\t\tPlanToWatch []*AniListAnimeListItem `json:\"plan_to_watch\"`\n\t\tDropped []*AniListAnimeListItem `json:\"dropped\"`\n\t\tOnHold []*AniListAnimeListItem `json:\"on_hold\"`\n\t} `json:\"lists\"`\n\tCustomLists [][]*AniListAnimeListItem `json:\"custom_lists\"`\n\t\/\/ CSS struct {\n\t\/\/ \tURL string `json:\"url\"`\n\t\/\/ \tPopup bool `json:\"popup\"`\n\t\/\/ \tCount bool `json:\"count\"`\n\t\/\/ \tCovers string `json:\"covers\"`\n\t\/\/ } `json:\"css\"`\n}\n<commit_msg>Fixed custom lists<commit_after>package arn\n\n\/\/ AniListAnimeList ...\ntype AniListAnimeList struct {\n\tID int `json:\"id\"`\n\tDisplayName string `json:\"display_name\"`\n\t\/\/ AnimeTime int `json:\"anime_time\"`\n\t\/\/ MangaChap int `json:\"manga_chap\"`\n\t\/\/ About string `json:\"about\"`\n\t\/\/ ListOrder int `json:\"list_order\"`\n\t\/\/ AdultContent bool `json:\"adult_content\"`\n\t\/\/ ForumHomepage int `json:\"forum_homepage\"`\n\t\/\/ LegacyLists bool `json:\"legacy_lists\"`\n\t\/\/ Donator int `json:\"donator\"`\n\t\/\/ Following bool `json:\"following\"`\n\t\/\/ ImageURLLge string `json:\"image_url_lge\"`\n\t\/\/ ImageURLMed string `json:\"image_url_med\"`\n\t\/\/ ImageURLBanner string `json:\"image_url_banner\"`\n\t\/\/ TitleLanguage string `json:\"title_language\"`\n\t\/\/ ScoreType int `json:\"score_type\"`\n\t\/\/ CustomListAnime []string `json:\"custom_list_anime\"`\n\t\/\/ CustomListManga []interface{} `json:\"custom_list_manga\"`\n\t\/\/ Stats struct {\n\t\/\/ \tStatusDistribution struct {\n\t\/\/ \t\tAnime struct {\n\t\/\/ \t\t\tWatching int `json:\"watching\"`\n\t\/\/ \t\t\tPlanToWatch int `json:\"plan to watch\"`\n\t\/\/ \t\t\tCompleted int `json:\"completed\"`\n\t\/\/ \t\t\tDropped int `json:\"dropped\"`\n\t\/\/ \t\t\tOnHold int `json:\"on-hold\"`\n\t\/\/ \t\t} `json:\"anime\"`\n\t\/\/ \t\tManga struct {\n\t\/\/ \t\t\tReading int `json:\"reading\"`\n\t\/\/ \t\t\tPlanToRead int `json:\"plan to read\"`\n\t\/\/ \t\t\tCompleted int `json:\"completed\"`\n\t\/\/ \t\t\tDropped int `json:\"dropped\"`\n\t\/\/ \t\t\tOnHold int `json:\"on-hold\"`\n\t\/\/ \t\t} `json:\"manga\"`\n\t\/\/ \t} `json:\"status_distribution\"`\n\t\/\/ \tScoreDistribution struct {\n\t\/\/ \t\tAnime struct {\n\t\/\/ \t\t\tNum10 int `json:\"10\"`\n\t\/\/ \t\t\tNum20 int `json:\"20\"`\n\t\/\/ \t\t\tNum30 int `json:\"30\"`\n\t\/\/ \t\t\tNum40 int `json:\"40\"`\n\t\/\/ \t\t\tNum50 int `json:\"50\"`\n\t\/\/ \t\t\tNum60 int `json:\"60\"`\n\t\/\/ \t\t\tNum70 int `json:\"70\"`\n\t\/\/ \t\t\tNum80 int `json:\"80\"`\n\t\/\/ \t\t\tNum90 int `json:\"90\"`\n\t\/\/ \t\t\tNum100 int `json:\"100\"`\n\t\/\/ \t\t} `json:\"anime\"`\n\t\/\/ \t\tManga struct {\n\t\/\/ \t\t\tNum10 int `json:\"10\"`\n\t\/\/ \t\t\tNum20 int `json:\"20\"`\n\t\/\/ \t\t\tNum30 int `json:\"30\"`\n\t\/\/ \t\t\tNum40 int `json:\"40\"`\n\t\/\/ \t\t\tNum50 int `json:\"50\"`\n\t\/\/ \t\t\tNum60 int `json:\"60\"`\n\t\/\/ \t\t\tNum70 int `json:\"70\"`\n\t\/\/ \t\t\tNum80 int `json:\"80\"`\n\t\/\/ \t\t\tNum90 int `json:\"90\"`\n\t\/\/ \t\t\tNum100 int `json:\"100\"`\n\t\/\/ \t\t} `json:\"manga\"`\n\t\/\/ \t} `json:\"score_distribution\"`\n\t\/\/ \tFavouriteGenres struct {\n\t\/\/ \t\tDrama int `json:\"Drama\"`\n\t\/\/ \t\tAction int `json:\"Action\"`\n\t\/\/ \t\tComedy int `json:\"Comedy\"`\n\t\/\/ \t\tAdventure int `json:\"Adventure\"`\n\t\/\/ \t\tRomance int `json:\"Romance\"`\n\t\/\/ \t\tPsychological int `json:\"Psychological\"`\n\t\/\/ \t\tFantasy int `json:\"Fantasy\"`\n\t\/\/ \t\tMystery int `json:\"Mystery\"`\n\t\/\/ \t\tSciFi int `json:\"Sci-Fi\"`\n\t\/\/ \t\tThriller int `json:\"Thriller\"`\n\t\/\/ \t} `json:\"favourite_genres\"`\n\t\/\/ \tListScores struct {\n\t\/\/ \t\tAnime struct {\n\t\/\/ \t\t\tMean int `json:\"mean\"`\n\t\/\/ \t\t\tStandardDeviation int `json:\"standard_deviation\"`\n\t\/\/ \t\t} `json:\"anime\"`\n\t\/\/ \t\tManga struct {\n\t\/\/ \t\t\tMean int `json:\"mean\"`\n\t\/\/ \t\t\tStandardDeviation int `json:\"standard_deviation\"`\n\t\/\/ \t\t} `json:\"manga\"`\n\t\/\/ \t} `json:\"list_scores\"`\n\t\/\/ } `json:\"stats\"`\n\t\/\/ AdvancedRating bool `json:\"advanced_rating\"`\n\t\/\/ AdvancedRatingNames []interface{} `json:\"advanced_rating_names\"`\n\t\/\/ Notifications int `json:\"notifications\"`\n\t\/\/ AiringNotifications int `json:\"airing_notifications\"`\n\t\/\/ UpdatedAt int `json:\"updated_at\"`\n\tLists struct {\n\t\tCompleted []*AniListAnimeListItem `json:\"completed\"`\n\t\tWatching []*AniListAnimeListItem `json:\"watching\"`\n\t\tPlanToWatch []*AniListAnimeListItem `json:\"plan_to_watch\"`\n\t\tDropped []*AniListAnimeListItem `json:\"dropped\"`\n\t\tOnHold []*AniListAnimeListItem `json:\"on_hold\"`\n\t} `json:\"lists\"`\n\tCustomLists map[string][]*AniListAnimeListItem `json:\"custom_lists\"`\n\t\/\/ CSS struct {\n\t\/\/ \tURL string `json:\"url\"`\n\t\/\/ \tPopup bool `json:\"popup\"`\n\t\/\/ \tCount bool `json:\"count\"`\n\t\/\/ \tCovers string `json:\"covers\"`\n\t\/\/ } `json:\"css\"`\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Martin Hebnes Pedersen (LA5NTA). All rights reserved.\n\/\/ Use of this source code is governed by the MIT-license that can be\n\/\/ found in the LICENSE file.\n\npackage ardop\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n)\n\ntype listener struct {\n\tincoming <-chan net.Conn\n\tquit chan struct{}\n\terrors <-chan error\n\taddr Addr\n}\n\nfunc (l listener) Accept() (c net.Conn, err error) {\n\tselect {\n\tcase c, ok := <-l.incoming:\n\t\tif !ok {\n\t\t\treturn nil, io.EOF\n\t\t}\n\t\treturn c, nil\n\tcase err = <-l.errors:\n\t\treturn nil, err\n\t}\n}\n\nfunc (l listener) Addr() net.Addr {\n\treturn l.addr\n}\n\nfunc (l listener) Close() error {\n\tclose(l.quit)\n\treturn nil\n}\n\nfunc (tnc *TNC) Listen() (ln net.Listener, err error) {\n\tif tnc.closed {\n\t\treturn nil, ErrTNCClosed\n\t}\n\n\tif tnc.listenerActive {\n\t\treturn nil, ErrActiveListenerExists\n\t}\n\ttnc.listenerActive = true\n\n\tincoming := make(chan net.Conn)\n\tquit := make(chan struct{})\n\terrors := make(chan error)\n\n\tmycall, err := tnc.MyCall()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to get mycall: %s\", err)\n\t}\n\n\tif err := tnc.SetListenEnabled(true); err != nil {\n\t\treturn nil, fmt.Errorf(\"TNC failed to enable listening: %s\", err)\n\t}\n\n\tgo func() {\n\t\tdefer func() {\n\t\t\tclose(incoming) \/\/ Important to close this first!\n\t\t\tclose(errors)\n\t\t\ttnc.listenerActive = false\n\t\t}()\n\n\t\tmsgListener := tnc.in.Listen()\n\t\tdefer msgListener.Close()\n\t\tmsgs := msgListener.Msgs()\n\n\t\tvar remotecall, targetcall string\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-quit:\n\t\t\t\ttnc.SetListenEnabled(false) \/\/ Should return this in listener.Close()\n\t\t\t\terrors <- fmt.Errorf(\"Closed\")\n\t\t\t\treturn\n\t\t\tcase msg, ok := <-msgs:\n\t\t\t\tswitch {\n\t\t\t\tcase !ok:\n\t\t\t\t\terrors <- ErrTNCClosed\n\t\t\t\t\treturn\n\t\t\t\tcase msg.cmd == cmdCancelPending:\n\t\t\t\t\tremotecall, targetcall = \"\", \"\"\n\t\t\t\tcase msg.cmd == cmdConnected:\n\t\t\t\t\tremotecall = msg.value.([]string)[0]\n\t\t\t\tcase msg.cmd == cmdTarget:\n\t\t\t\t\ttargetcall = msg.String()\n\t\t\t\t}\n\n\t\t\t\tif len(remotecall) > 0 && len(targetcall) > 0 {\n\t\t\t\t\ttnc.data = &tncConn{\n\t\t\t\t\t\tremoteAddr: Addr{remotecall},\n\t\t\t\t\t\tlocalAddr: Addr{targetcall},\n\t\t\t\t\t\tctrlOut: tnc.out,\n\t\t\t\t\t\tdataOut: tnc.dataOut,\n\t\t\t\t\t\tctrlIn: tnc.in,\n\t\t\t\t\t\tdataIn: tnc.dataIn,\n\t\t\t\t\t\teofChan: make(chan struct{}),\n\t\t\t\t\t\tisTCP: tnc.isTCP,\n\t\t\t\t\t}\n\n\t\t\t\t\ttnc.connected = true\n\t\t\t\t\tincoming <- tnc.data\n\n\t\t\t\t\tremotecall, targetcall = \"\", \"\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn listener{incoming, quit, errors, Addr{mycall}}, nil\n}\n<commit_msg>ardop: Fix bug in Listener<commit_after>\/\/ Copyright 2015 Martin Hebnes Pedersen (LA5NTA). All rights reserved.\n\/\/ Use of this source code is governed by the MIT-license that can be\n\/\/ found in the LICENSE file.\n\npackage ardop\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n)\n\ntype listener struct {\n\tincoming <-chan net.Conn\n\tquit chan struct{}\n\terrors <-chan error\n\taddr Addr\n}\n\nfunc (l listener) Accept() (c net.Conn, err error) {\n\tselect {\n\tcase c, ok := <-l.incoming:\n\t\tif !ok {\n\t\t\treturn nil, io.EOF\n\t\t}\n\t\treturn c, nil\n\tcase err = <-l.errors:\n\t\treturn nil, err\n\t}\n}\n\nfunc (l listener) Addr() net.Addr {\n\treturn l.addr\n}\n\nfunc (l listener) Close() error {\n\tclose(l.quit)\n\treturn nil\n}\n\nfunc (tnc *TNC) Listen() (ln net.Listener, err error) {\n\tif tnc.closed {\n\t\treturn nil, ErrTNCClosed\n\t}\n\n\tif tnc.listenerActive {\n\t\treturn nil, ErrActiveListenerExists\n\t}\n\ttnc.listenerActive = true\n\n\tincoming := make(chan net.Conn)\n\tquit := make(chan struct{})\n\terrors := make(chan error)\n\n\tmycall, err := tnc.MyCall()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to get mycall: %s\", err)\n\t}\n\n\tif err := tnc.SetListenEnabled(true); err != nil {\n\t\treturn nil, fmt.Errorf(\"TNC failed to enable listening: %s\", err)\n\t}\n\n\tgo func() {\n\t\tdefer func() {\n\t\t\tclose(incoming) \/\/ Important to close this first!\n\t\t\tclose(errors)\n\t\t\ttnc.listenerActive = false\n\t\t}()\n\n\t\tmsgListener := tnc.in.Listen()\n\t\tdefer msgListener.Close()\n\t\tmsgs := msgListener.Msgs()\n\n\t\tvar targetcall string\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-quit:\n\t\t\t\ttnc.SetListenEnabled(false) \/\/ Should return this in listener.Close()\n\t\t\t\terrors <- fmt.Errorf(\"Closed\")\n\t\t\t\treturn\n\t\t\tcase msg, ok := <-msgs:\n\t\t\t\tif !ok {\n\t\t\t\t\terrors <- ErrTNCClosed\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tswitch msg.cmd {\n\t\t\t\tcase cmdCancelPending, cmdDisconnected:\n\t\t\t\t\ttargetcall = \"\" \/\/ Reset\n\t\t\t\tcase cmdTarget:\n\t\t\t\t\ttargetcall = msg.String()\n\t\t\t\tcase cmdConnected:\n\t\t\t\t\tif targetcall == \"\" {\n\t\t\t\t\t\t\/\/ This can not be an incoming connection.\n\t\t\t\t\t\t\/\/ Incoming connections always gets cmdTarget before cmdConnected according to the spec\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tremotecall := msg.value.([]string)[0]\n\t\t\t\t\ttnc.data = &tncConn{\n\t\t\t\t\t\tremoteAddr: Addr{remotecall},\n\t\t\t\t\t\tlocalAddr: Addr{targetcall},\n\t\t\t\t\t\tctrlOut: tnc.out,\n\t\t\t\t\t\tdataOut: tnc.dataOut,\n\t\t\t\t\t\tctrlIn: tnc.in,\n\t\t\t\t\t\tdataIn: tnc.dataIn,\n\t\t\t\t\t\teofChan: make(chan struct{}),\n\t\t\t\t\t\tisTCP: tnc.isTCP,\n\t\t\t\t\t}\n\t\t\t\t\ttnc.connected = true\n\t\t\t\t\tincoming <- tnc.data\n\t\t\t\t\ttargetcall = \"\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn listener{incoming, quit, errors, Addr{mycall}}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Google Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage zoekt\n\nimport (\n\t\"bytes\"\n\t\"log\"\n\t\"sort\"\n\t\"unicode\/utf8\"\n)\n\nvar _ = log.Println\n\n\/\/ contentProvider is an abstraction to treat matches for names and\n\/\/ content with the same code.\ntype contentProvider struct {\n\tid *indexData\n\tstats *Stats\n\n\t\/\/ mutable\n\terr error\n\tidx uint32\n\t_data []byte\n\t_nl []uint32\n\t_nlBuf []uint32\n\t_sects []DocumentSection\n\t_sectBuf []DocumentSection\n\tfileSize uint32\n}\n\n\/\/ setDocument skips to the given document.\nfunc (p *contentProvider) setDocument(docID uint32) {\n\tfileStart := p.id.boundaries[docID]\n\n\tp.idx = docID\n\tp.fileSize = p.id.boundaries[docID+1] - fileStart\n\n\tp._nl = nil\n\tp._sects = nil\n\tp._data = nil\n}\n\nfunc (p *contentProvider) docSections() []DocumentSection {\n\tif p._sects == nil {\n\t\tvar sz uint32\n\t\tp._sects, sz, p.err = p.id.readDocSections(p.idx, p._sectBuf)\n\t\tp.stats.ContentBytesLoaded += int64(sz)\n\t\tp._sectBuf = p._sects\n\t}\n\treturn p._sects\n}\n\nfunc (p *contentProvider) newlines() []uint32 {\n\tif p._nl == nil {\n\t\tvar sz uint32\n\t\tp._nl, sz, p.err = p.id.readNewlines(p.idx, p._nlBuf)\n\t\tp._nlBuf = p._nl\n\t\tp.stats.ContentBytesLoaded += int64(sz)\n\t}\n\treturn p._nl\n}\n\nfunc (p *contentProvider) data(fileName bool) []byte {\n\tif fileName {\n\t\treturn p.id.fileNameContent[p.id.fileNameIndex[p.idx]:p.id.fileNameIndex[p.idx+1]]\n\t}\n\n\tif p._data == nil {\n\t\tp._data, p.err = p.id.readContents(p.idx)\n\t\tp.stats.FilesLoaded++\n\t\tp.stats.ContentBytesLoaded += int64(len(p._data))\n\t}\n\treturn p._data\n}\n\n\/\/ Find offset in bytes (relative to corpus start) for an offset in\n\/\/ runes (relative to document start). If filename is set, the corpus\n\/\/ is the set of filenames, with the document being the name itself.\nfunc (p *contentProvider) findOffset(filename bool, r uint32) uint32 {\n\tif p.id.metaData.PlainASCII {\n\t\treturn r\n\t}\n\n\tsample := p.id.runeOffsets\n\truneEnds := p.id.fileEndRunes\n\tfileStartByte := p.id.boundaries[p.idx]\n\tif filename {\n\t\tsample = p.id.fileNameRuneOffsets\n\t\truneEnds = p.id.fileNameEndRunes\n\t\tfileStartByte = p.id.fileNameIndex[p.idx]\n\t}\n\n\tabsR := r\n\tif p.idx > 0 {\n\t\tabsR += runeEnds[p.idx-1]\n\t}\n\n\tbyteOff := sample[absR\/runeOffsetFrequency]\n\tleft := absR % runeOffsetFrequency\n\n\tvar data []byte\n\n\tif filename {\n\t\tdata = p.id.fileNameContent[byteOff:]\n\t} else {\n\t\tdata, p.err = p.id.readContentSlice(byteOff, 3*runeOffsetFrequency)\n\t\tif p.err != nil {\n\t\t\treturn 0\n\t\t}\n\t}\n\tfor left > 0 {\n\t\t_, sz := utf8.DecodeRune(data)\n\t\tbyteOff += uint32(sz)\n\t\tdata = data[sz:]\n\t\tleft--\n\t}\n\n\tbyteOff -= fileStartByte\n\treturn byteOff\n}\n\nfunc (p *contentProvider) fillMatches(ms []*candidateMatch) []LineMatch {\n\tvar result []LineMatch\n\tif ms[0].fileName {\n\t\t\/\/ There is only \"line\" in a filename.\n\t\tres := LineMatch{\n\t\t\tLine: p.id.fileName(p.idx),\n\t\t\tFileName: true,\n\t\t}\n\n\t\tfor _, m := range ms {\n\t\t\tres.LineFragments = append(res.LineFragments, LineFragmentMatch{\n\t\t\t\tLineOffset: int(m.byteOffset),\n\t\t\t\tMatchLength: int(m.byteMatchSz),\n\t\t\t\tOffset: m.byteOffset,\n\t\t\t})\n\n\t\t\tresult = []LineMatch{res}\n\t\t}\n\t} else {\n\t\tms = breakMatchesOnNewlines(ms, p.data(false))\n\t\tresult = p.fillContentMatches(ms)\n\t}\n\n\tsects := p.docSections()\n\tfor i, m := range result {\n\t\tresult[i].Score = matchScore(sects, &m)\n\t}\n\n\treturn result\n}\n\nfunc (p *contentProvider) fillContentMatches(ms []*candidateMatch) []LineMatch {\n\tvar result []LineMatch\n\tfor len(ms) > 0 {\n\t\tm := ms[0]\n\t\tnum, lineStart, lineEnd := m.line(p.newlines(), p.fileSize)\n\n\t\tvar lineCands []*candidateMatch\n\n\t\tendMatch := m.byteOffset + m.byteMatchSz\n\n\t\tfor len(ms) > 0 {\n\t\t\tm := ms[0]\n\t\t\tif int(m.byteOffset) <= lineEnd {\n\t\t\t\tendMatch = m.byteOffset + m.byteMatchSz\n\t\t\t\tlineCands = append(lineCands, m)\n\t\t\t\tms = ms[1:]\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif len(lineCands) == 0 {\n\t\t\tlog.Panicf(\n\t\t\t\t\"%s %v infinite loop: num %d start,end %d,%d, offset %d\",\n\t\t\t\tp.id.fileName(p.idx), p.id.metaData,\n\t\t\t\tnum, lineStart, lineEnd,\n\t\t\t\tm.byteOffset)\n\t\t}\n\n\t\tdata := p.data(false)\n\n\t\t\/\/ Due to merging matches, we may have a match that\n\t\t\/\/ crosses a line boundary. Prevent confusion by\n\t\t\/\/ taking lines until we pass the last match\n\t\tfor lineEnd < len(data) && endMatch > uint32(lineEnd) {\n\t\t\tnext := bytes.IndexByte(data[lineEnd+1:], '\\n')\n\t\t\tif next == -1 {\n\t\t\t\tlineEnd = len(data)\n\t\t\t} else {\n\t\t\t\t\/\/ TODO(hanwen): test that checks \"+1\" part here.\n\t\t\t\tlineEnd += next + 1\n\t\t\t}\n\t\t}\n\n\t\tfinalMatch := LineMatch{\n\t\t\tLineStart: lineStart,\n\t\t\tLineEnd: lineEnd,\n\t\t\tLineNumber: num,\n\t\t}\n\t\tfinalMatch.Line = p.data(false)[lineStart:lineEnd]\n\n\t\tfor _, m := range lineCands {\n\t\t\tfragment := LineFragmentMatch{\n\t\t\t\tOffset: m.byteOffset,\n\t\t\t\tLineOffset: int(m.byteOffset) - lineStart,\n\t\t\t\tMatchLength: int(m.byteMatchSz),\n\t\t\t}\n\t\t\tfinalMatch.LineFragments = append(finalMatch.LineFragments, fragment)\n\t\t}\n\t\tresult = append(result, finalMatch)\n\t}\n\treturn result\n}\n\nconst (\n\t\/\/ TODO - how to scale this relative to rank?\n\tscorePartialWordMatch = 50.0\n\tscoreWordMatch = 500.0\n\tscoreImportantThreshold = 2000.0\n\tscorePartialSymbol = 4000.0\n\tscoreSymbol = 7000.0\n\tscoreFactorAtomMatch = 400.0\n\tscoreShardRankFactor = 20.0\n\tscoreFileOrderFactor = 10.0\n\tscoreLineOrderFactor = 1.0\n)\n\nfunc findSection(secs []DocumentSection, off, sz uint32) *DocumentSection {\n\tj := sort.Search(len(secs), func(i int) bool {\n\t\treturn secs[i].End >= off+sz\n\t})\n\n\tif j == len(secs) {\n\t\treturn nil\n\t}\n\n\tif secs[j].Start <= off && off+sz <= secs[j].End {\n\t\treturn &secs[j]\n\t}\n\treturn nil\n}\n\nfunc matchScore(secs []DocumentSection, m *LineMatch) float64 {\n\tvar maxScore float64\n\tfor _, f := range m.LineFragments {\n\t\tstartBoundary := f.LineOffset < len(m.Line) && (f.LineOffset == 0 || byteClass(m.Line[f.LineOffset-1]) != byteClass(m.Line[f.LineOffset]))\n\n\t\tend := int(f.LineOffset) + f.MatchLength\n\t\tendBoundary := end > 0 && (end == len(m.Line) || byteClass(m.Line[end-1]) != byteClass(m.Line[end]))\n\n\t\tscore := 0.0\n\t\tif startBoundary && endBoundary {\n\t\t\tscore = scoreWordMatch\n\t\t} else if startBoundary || endBoundary {\n\t\t\tscore = scorePartialWordMatch\n\t\t}\n\n\t\tsec := findSection(secs, f.Offset, uint32(f.MatchLength))\n\t\tif sec != nil {\n\t\t\tstartMatch := sec.Start == f.Offset\n\t\t\tendMatch := sec.End == f.Offset+uint32(f.MatchLength)\n\t\t\tif startMatch && endMatch {\n\t\t\t\tscore += scoreSymbol\n\t\t\t} else if startMatch || endMatch {\n\t\t\t\tscore += (scoreSymbol + scorePartialSymbol) \/ 2\n\t\t\t} else {\n\t\t\t\tscore += scorePartialSymbol\n\t\t\t}\n\t\t}\n\t\tif score > maxScore {\n\t\t\tmaxScore = score\n\t\t}\n\t}\n\treturn maxScore\n}\n\ntype matchScoreSlice []LineMatch\n\nfunc (m matchScoreSlice) Len() int { return len(m) }\nfunc (m matchScoreSlice) Swap(i, j int) { m[i], m[j] = m[j], m[i] }\nfunc (m matchScoreSlice) Less(i, j int) bool { return m[i].Score > m[j].Score }\n\ntype fileMatchSlice []FileMatch\n\nfunc (m fileMatchSlice) Len() int { return len(m) }\nfunc (m fileMatchSlice) Swap(i, j int) { m[i], m[j] = m[j], m[i] }\nfunc (m fileMatchSlice) Less(i, j int) bool { return m[i].Score > m[j].Score }\n\nfunc sortMatchesByScore(ms []LineMatch) {\n\tsort.Sort(matchScoreSlice(ms))\n}\n\n\/\/ Sort a slice of results.\nfunc SortFilesByScore(ms []FileMatch) {\n\tsort.Sort(fileMatchSlice(ms))\n}\n<commit_msg>Reuse existing data buffer in contentprovider<commit_after>\/\/ Copyright 2016 Google Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage zoekt\n\nimport (\n\t\"bytes\"\n\t\"log\"\n\t\"sort\"\n\t\"unicode\/utf8\"\n)\n\nvar _ = log.Println\n\n\/\/ contentProvider is an abstraction to treat matches for names and\n\/\/ content with the same code.\ntype contentProvider struct {\n\tid *indexData\n\tstats *Stats\n\n\t\/\/ mutable\n\terr error\n\tidx uint32\n\t_data []byte\n\t_nl []uint32\n\t_nlBuf []uint32\n\t_sects []DocumentSection\n\t_sectBuf []DocumentSection\n\tfileSize uint32\n}\n\n\/\/ setDocument skips to the given document.\nfunc (p *contentProvider) setDocument(docID uint32) {\n\tfileStart := p.id.boundaries[docID]\n\n\tp.idx = docID\n\tp.fileSize = p.id.boundaries[docID+1] - fileStart\n\n\tp._nl = nil\n\tp._sects = nil\n\tp._data = nil\n}\n\nfunc (p *contentProvider) docSections() []DocumentSection {\n\tif p._sects == nil {\n\t\tvar sz uint32\n\t\tp._sects, sz, p.err = p.id.readDocSections(p.idx, p._sectBuf)\n\t\tp.stats.ContentBytesLoaded += int64(sz)\n\t\tp._sectBuf = p._sects\n\t}\n\treturn p._sects\n}\n\nfunc (p *contentProvider) newlines() []uint32 {\n\tif p._nl == nil {\n\t\tvar sz uint32\n\t\tp._nl, sz, p.err = p.id.readNewlines(p.idx, p._nlBuf)\n\t\tp._nlBuf = p._nl\n\t\tp.stats.ContentBytesLoaded += int64(sz)\n\t}\n\treturn p._nl\n}\n\nfunc (p *contentProvider) data(fileName bool) []byte {\n\tif fileName {\n\t\treturn p.id.fileNameContent[p.id.fileNameIndex[p.idx]:p.id.fileNameIndex[p.idx+1]]\n\t}\n\n\tif p._data == nil {\n\t\tp._data, p.err = p.id.readContents(p.idx)\n\t\tp.stats.FilesLoaded++\n\t\tp.stats.ContentBytesLoaded += int64(len(p._data))\n\t}\n\treturn p._data\n}\n\n\/\/ Find offset in bytes (relative to corpus start) for an offset in\n\/\/ runes (relative to document start). If filename is set, the corpus\n\/\/ is the set of filenames, with the document being the name itself.\nfunc (p *contentProvider) findOffset(filename bool, r uint32) uint32 {\n\tif p.id.metaData.PlainASCII {\n\t\treturn r\n\t}\n\n\tsample := p.id.runeOffsets\n\truneEnds := p.id.fileEndRunes\n\tfileStartByte := p.id.boundaries[p.idx]\n\tif filename {\n\t\tsample = p.id.fileNameRuneOffsets\n\t\truneEnds = p.id.fileNameEndRunes\n\t\tfileStartByte = p.id.fileNameIndex[p.idx]\n\t}\n\n\tabsR := r\n\tif p.idx > 0 {\n\t\tabsR += runeEnds[p.idx-1]\n\t}\n\n\tbyteOff := sample[absR\/runeOffsetFrequency]\n\tleft := absR % runeOffsetFrequency\n\n\tvar data []byte\n\n\tif filename {\n\t\tdata = p.id.fileNameContent[byteOff:]\n\t} else {\n\t\tdata, p.err = p.id.readContentSlice(byteOff, 3*runeOffsetFrequency)\n\t\tif p.err != nil {\n\t\t\treturn 0\n\t\t}\n\t}\n\tfor left > 0 {\n\t\t_, sz := utf8.DecodeRune(data)\n\t\tbyteOff += uint32(sz)\n\t\tdata = data[sz:]\n\t\tleft--\n\t}\n\n\tbyteOff -= fileStartByte\n\treturn byteOff\n}\n\nfunc (p *contentProvider) fillMatches(ms []*candidateMatch) []LineMatch {\n\tvar result []LineMatch\n\tif ms[0].fileName {\n\t\t\/\/ There is only \"line\" in a filename.\n\t\tres := LineMatch{\n\t\t\tLine: p.id.fileName(p.idx),\n\t\t\tFileName: true,\n\t\t}\n\n\t\tfor _, m := range ms {\n\t\t\tres.LineFragments = append(res.LineFragments, LineFragmentMatch{\n\t\t\t\tLineOffset: int(m.byteOffset),\n\t\t\t\tMatchLength: int(m.byteMatchSz),\n\t\t\t\tOffset: m.byteOffset,\n\t\t\t})\n\n\t\t\tresult = []LineMatch{res}\n\t\t}\n\t} else {\n\t\tms = breakMatchesOnNewlines(ms, p.data(false))\n\t\tresult = p.fillContentMatches(ms)\n\t}\n\n\tsects := p.docSections()\n\tfor i, m := range result {\n\t\tresult[i].Score = matchScore(sects, &m)\n\t}\n\n\treturn result\n}\n\nfunc (p *contentProvider) fillContentMatches(ms []*candidateMatch) []LineMatch {\n\tvar result []LineMatch\n\tfor len(ms) > 0 {\n\t\tm := ms[0]\n\t\tnum, lineStart, lineEnd := m.line(p.newlines(), p.fileSize)\n\n\t\tvar lineCands []*candidateMatch\n\n\t\tendMatch := m.byteOffset + m.byteMatchSz\n\n\t\tfor len(ms) > 0 {\n\t\t\tm := ms[0]\n\t\t\tif int(m.byteOffset) <= lineEnd {\n\t\t\t\tendMatch = m.byteOffset + m.byteMatchSz\n\t\t\t\tlineCands = append(lineCands, m)\n\t\t\t\tms = ms[1:]\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif len(lineCands) == 0 {\n\t\t\tlog.Panicf(\n\t\t\t\t\"%s %v infinite loop: num %d start,end %d,%d, offset %d\",\n\t\t\t\tp.id.fileName(p.idx), p.id.metaData,\n\t\t\t\tnum, lineStart, lineEnd,\n\t\t\t\tm.byteOffset)\n\t\t}\n\n\t\tdata := p.data(false)\n\n\t\t\/\/ Due to merging matches, we may have a match that\n\t\t\/\/ crosses a line boundary. Prevent confusion by\n\t\t\/\/ taking lines until we pass the last match\n\t\tfor lineEnd < len(data) && endMatch > uint32(lineEnd) {\n\t\t\tnext := bytes.IndexByte(data[lineEnd+1:], '\\n')\n\t\t\tif next == -1 {\n\t\t\t\tlineEnd = len(data)\n\t\t\t} else {\n\t\t\t\t\/\/ TODO(hanwen): test that checks \"+1\" part here.\n\t\t\t\tlineEnd += next + 1\n\t\t\t}\n\t\t}\n\n\t\tfinalMatch := LineMatch{\n\t\t\tLineStart: lineStart,\n\t\t\tLineEnd: lineEnd,\n\t\t\tLineNumber: num,\n\t\t}\n\t\tfinalMatch.Line = data[lineStart:lineEnd]\n\n\t\tfor _, m := range lineCands {\n\t\t\tfragment := LineFragmentMatch{\n\t\t\t\tOffset: m.byteOffset,\n\t\t\t\tLineOffset: int(m.byteOffset) - lineStart,\n\t\t\t\tMatchLength: int(m.byteMatchSz),\n\t\t\t}\n\t\t\tfinalMatch.LineFragments = append(finalMatch.LineFragments, fragment)\n\t\t}\n\t\tresult = append(result, finalMatch)\n\t}\n\treturn result\n}\n\nconst (\n\t\/\/ TODO - how to scale this relative to rank?\n\tscorePartialWordMatch = 50.0\n\tscoreWordMatch = 500.0\n\tscoreImportantThreshold = 2000.0\n\tscorePartialSymbol = 4000.0\n\tscoreSymbol = 7000.0\n\tscoreFactorAtomMatch = 400.0\n\tscoreShardRankFactor = 20.0\n\tscoreFileOrderFactor = 10.0\n\tscoreLineOrderFactor = 1.0\n)\n\nfunc findSection(secs []DocumentSection, off, sz uint32) *DocumentSection {\n\tj := sort.Search(len(secs), func(i int) bool {\n\t\treturn secs[i].End >= off+sz\n\t})\n\n\tif j == len(secs) {\n\t\treturn nil\n\t}\n\n\tif secs[j].Start <= off && off+sz <= secs[j].End {\n\t\treturn &secs[j]\n\t}\n\treturn nil\n}\n\nfunc matchScore(secs []DocumentSection, m *LineMatch) float64 {\n\tvar maxScore float64\n\tfor _, f := range m.LineFragments {\n\t\tstartBoundary := f.LineOffset < len(m.Line) && (f.LineOffset == 0 || byteClass(m.Line[f.LineOffset-1]) != byteClass(m.Line[f.LineOffset]))\n\n\t\tend := int(f.LineOffset) + f.MatchLength\n\t\tendBoundary := end > 0 && (end == len(m.Line) || byteClass(m.Line[end-1]) != byteClass(m.Line[end]))\n\n\t\tscore := 0.0\n\t\tif startBoundary && endBoundary {\n\t\t\tscore = scoreWordMatch\n\t\t} else if startBoundary || endBoundary {\n\t\t\tscore = scorePartialWordMatch\n\t\t}\n\n\t\tsec := findSection(secs, f.Offset, uint32(f.MatchLength))\n\t\tif sec != nil {\n\t\t\tstartMatch := sec.Start == f.Offset\n\t\t\tendMatch := sec.End == f.Offset+uint32(f.MatchLength)\n\t\t\tif startMatch && endMatch {\n\t\t\t\tscore += scoreSymbol\n\t\t\t} else if startMatch || endMatch {\n\t\t\t\tscore += (scoreSymbol + scorePartialSymbol) \/ 2\n\t\t\t} else {\n\t\t\t\tscore += scorePartialSymbol\n\t\t\t}\n\t\t}\n\t\tif score > maxScore {\n\t\t\tmaxScore = score\n\t\t}\n\t}\n\treturn maxScore\n}\n\ntype matchScoreSlice []LineMatch\n\nfunc (m matchScoreSlice) Len() int { return len(m) }\nfunc (m matchScoreSlice) Swap(i, j int) { m[i], m[j] = m[j], m[i] }\nfunc (m matchScoreSlice) Less(i, j int) bool { return m[i].Score > m[j].Score }\n\ntype fileMatchSlice []FileMatch\n\nfunc (m fileMatchSlice) Len() int { return len(m) }\nfunc (m fileMatchSlice) Swap(i, j int) { m[i], m[j] = m[j], m[i] }\nfunc (m fileMatchSlice) Less(i, j int) bool { return m[i].Score > m[j].Score }\n\nfunc sortMatchesByScore(ms []LineMatch) {\n\tsort.Sort(matchScoreSlice(ms))\n}\n\n\/\/ Sort a slice of results.\nfunc SortFilesByScore(ms []FileMatch) {\n\tsort.Sort(fileMatchSlice(ms))\n}\n<|endoftext|>"} {"text":"<commit_before>package bencode\n\nimport (\n\t\"bytes\"\n\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"sort\"\n)\n\ntype sortValues []reflect.Value\n\nfunc (p sortValues) Len() int { return len(p) }\nfunc (p sortValues) Less(i, j int) bool { return p[i].String() < p[j].String() }\nfunc (p sortValues) Swap(i, j int) { p[i], p[j] = p[j], p[i] }\n\ntype sortFields []reflect.StructField\n\nfunc (p sortFields) Len() int { return len(p) }\nfunc (p sortFields) Less(i, j int) bool {\n\tiName, jName := p[i].Name, p[j].Name\n\tif p[i].Tag.Get(\"bencode\") != \"\" {\n\t\tiName = p[i].Tag.Get(\"bencode\")\n\t}\n\tif p[j].Tag.Get(\"bencode\") != \"\" {\n\t\tiName = p[j].Tag.Get(\"bencode\")\n\t}\n\treturn iName < jName\n}\nfunc (p sortFields) Swap(i, j int) { p[i], p[j] = p[j], p[i] }\n\n\/\/An Encoder writes bencoded objects to an output stream.\ntype Encoder struct {\n\tw io.Writer\n}\n\n\/\/NewEncoder returns a new encoder that writes to w.\nfunc NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w}\n}\n\n\/\/Encode writes the bencoded data of val to its output stream.\n\/\/See the documentation for Decode about the conversion of Go values to\n\/\/bencoded data.\nfunc (e *Encoder) Encode(val interface{}) error {\n\treturn encodeValue(e.w, reflect.ValueOf(val))\n}\n\n\/\/EncodeString returns the bencoded data of val as a string.\nfunc EncodeString(val interface{}) (string, error) {\n\tbuf := new(bytes.Buffer)\n\te := NewEncoder(buf)\n\tif err := e.Encode(val); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn buf.String(), nil\n}\n\nfunc encodeValue(w io.Writer, val reflect.Value) error {\n\t\/\/inspect the val to check\n\tv := indirect(val)\n\n\tswitch v.Kind() {\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t_, err := fmt.Fprintf(w, \"i%de\", v.Int())\n\t\treturn err\n\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t_, err := fmt.Fprintf(w, \"i%de\", v.Uint())\n\t\treturn err\n\n\tcase reflect.String:\n\t\t_, err := fmt.Fprintf(w, \"%d:%s\", len(v.String()), v.String())\n\t\treturn err\n\n\tcase reflect.Slice, reflect.Array:\n\t\tif _, err := fmt.Fprint(w, \"l\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\tif err := encodeValue(w, v.Index(i)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t_, err := fmt.Fprint(w, \"e\")\n\t\treturn err\n\n\tcase reflect.Map:\n\t\tif _, err := fmt.Fprint(w, \"d\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar (\n\t\t\tkeys sortValues = v.MapKeys()\n\t\t\tmval reflect.Value\n\t\t)\n\t\tsort.Sort(keys)\n\t\tfor i := range keys {\n\t\t\tif err := encodeValue(w, keys[i]); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tmval = v.MapIndex(keys[i])\n\t\t\tif err := encodeValue(w, mval); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t_, err := fmt.Fprint(w, \"e\")\n\t\treturn err\n\n\tcase reflect.Struct:\n\t\tt := v.Type()\n\t\tif _, err := fmt.Fprint(w, \"d\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/put keys into keys\n\t\tvar (\n\t\t\tkeys = make(sortFields, t.NumField())\n\t\t\tmval reflect.Value\n\t\t\trkey reflect.Value\n\t\t)\n\t\tfor i := range keys {\n\t\t\tkeys[i] = t.Field(i)\n\t\t}\n\t\tsort.Sort(keys)\n\t\tfor _, key := range keys {\n\t\t\t\/\/determine if key has a tag\n\t\t\tif tag := key.Tag.Get(\"bencode\"); tag != \"\" {\n\t\t\t\trkey = reflect.ValueOf(tag)\n\t\t\t} else {\n\t\t\t\trkey = reflect.ValueOf(key.Name)\n\t\t\t}\n\n\t\t\t\/\/encode the key\n\t\t\tif err := encodeValue(w, rkey); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t\/\/encode the value\n\t\t\tmval = v.FieldByIndex(key.Index)\n\t\t\tif err := encodeValue(w, mval); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t_, err := fmt.Fprint(w, \"e\")\n\t\treturn err\n\t}\n\n\treturn fmt.Errorf(\"Can't encode type: %s\", v.Type())\n}\n<commit_msg>fix for sortFields by tag<commit_after>package bencode\n\nimport (\n\t\"bytes\"\n\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"sort\"\n)\n\ntype sortValues []reflect.Value\n\nfunc (p sortValues) Len() int { return len(p) }\nfunc (p sortValues) Less(i, j int) bool { return p[i].String() < p[j].String() }\nfunc (p sortValues) Swap(i, j int) { p[i], p[j] = p[j], p[i] }\n\ntype sortFields []reflect.StructField\n\nfunc (p sortFields) Len() int { return len(p) }\nfunc (p sortFields) Less(i, j int) bool {\n\tiName, jName := p[i].Name, p[j].Name\n\tif p[i].Tag.Get(\"bencode\") != \"\" {\n\t\tiName = p[i].Tag.Get(\"bencode\")\n\t}\n\tif p[j].Tag.Get(\"bencode\") != \"\" {\n\t\tjName = p[j].Tag.Get(\"bencode\")\n\t}\n\treturn iName < jName\n}\nfunc (p sortFields) Swap(i, j int) { p[i], p[j] = p[j], p[i] }\n\n\/\/An Encoder writes bencoded objects to an output stream.\ntype Encoder struct {\n\tw io.Writer\n}\n\n\/\/NewEncoder returns a new encoder that writes to w.\nfunc NewEncoder(w io.Writer) *Encoder {\n\treturn &Encoder{w}\n}\n\n\/\/Encode writes the bencoded data of val to its output stream.\n\/\/See the documentation for Decode about the conversion of Go values to\n\/\/bencoded data.\nfunc (e *Encoder) Encode(val interface{}) error {\n\treturn encodeValue(e.w, reflect.ValueOf(val))\n}\n\n\/\/EncodeString returns the bencoded data of val as a string.\nfunc EncodeString(val interface{}) (string, error) {\n\tbuf := new(bytes.Buffer)\n\te := NewEncoder(buf)\n\tif err := e.Encode(val); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn buf.String(), nil\n}\n\nfunc encodeValue(w io.Writer, val reflect.Value) error {\n\t\/\/inspect the val to check\n\tv := indirect(val)\n\n\tswitch v.Kind() {\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t_, err := fmt.Fprintf(w, \"i%de\", v.Int())\n\t\treturn err\n\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t_, err := fmt.Fprintf(w, \"i%de\", v.Uint())\n\t\treturn err\n\n\tcase reflect.String:\n\t\t_, err := fmt.Fprintf(w, \"%d:%s\", len(v.String()), v.String())\n\t\treturn err\n\n\tcase reflect.Slice, reflect.Array:\n\t\tif _, err := fmt.Fprint(w, \"l\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\tif err := encodeValue(w, v.Index(i)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t_, err := fmt.Fprint(w, \"e\")\n\t\treturn err\n\n\tcase reflect.Map:\n\t\tif _, err := fmt.Fprint(w, \"d\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar (\n\t\t\tkeys sortValues = v.MapKeys()\n\t\t\tmval reflect.Value\n\t\t)\n\t\tsort.Sort(keys)\n\t\tfor i := range keys {\n\t\t\tif err := encodeValue(w, keys[i]); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tmval = v.MapIndex(keys[i])\n\t\t\tif err := encodeValue(w, mval); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t_, err := fmt.Fprint(w, \"e\")\n\t\treturn err\n\n\tcase reflect.Struct:\n\t\tt := v.Type()\n\t\tif _, err := fmt.Fprint(w, \"d\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/put keys into keys\n\t\tvar (\n\t\t\tkeys = make(sortFields, t.NumField())\n\t\t\tmval reflect.Value\n\t\t\trkey reflect.Value\n\t\t)\n\t\tfor i := range keys {\n\t\t\tkeys[i] = t.Field(i)\n\t\t}\n\t\tsort.Sort(keys)\n\t\tfor _, key := range keys {\n\t\t\t\/\/determine if key has a tag\n\t\t\tif tag := key.Tag.Get(\"bencode\"); tag != \"\" {\n\t\t\t\trkey = reflect.ValueOf(tag)\n\t\t\t} else {\n\t\t\t\trkey = reflect.ValueOf(key.Name)\n\t\t\t}\n\n\t\t\t\/\/encode the key\n\t\t\tif err := encodeValue(w, rkey); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t\/\/encode the value\n\t\t\tmval = v.FieldByIndex(key.Index)\n\t\t\tif err := encodeValue(w, mval); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t_, err := fmt.Fprint(w, \"e\")\n\t\treturn err\n\t}\n\n\treturn fmt.Errorf(\"Can't encode type: %s\", v.Type())\n}\n<|endoftext|>"} {"text":"<commit_before>package hitGox\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"github.com\/valyala\/fasthttp\"\n)\n\n\/\/ User contains information about user.\ntype FeaturedMedia struct {\n\tMediaID string `json:\"media_id,omitempty\"`\n\tMediaDisplayName string `json:\"media_display_name,omitempty\"`\n\tMediaName string `json:\"media_name,omitempty\"`\n\tBackdrop string `json:\"backdrop,omitempty\"`\n\tBackdropHTML string `json:\"backdrop_html,omitempty\"`\n}\n\n\/\/ GetFeaturedMedia return information about user.\n\/\/\n\/\/ When a user isn’t found, this API returns a regular response but with all values containing null.\nfunc GetFeaturedMedia() (FeaturedMedia, error) {\n\t_, body, err := fasthttp.Get(nil, API+\"\/mediafeatured\")\n\tif err != nil {\n\t\treturn FeaturedMedia{}, err\n\t}\n\tvar obj FeaturedMedia\n\tif err = json.NewDecoder(bytes.NewReader(body)).Decode(&obj); err != nil {\n\t\treturn FeaturedMedia{}, err\n\t}\n\treturn obj, nil\n}\n<commit_msg>✅ Approved getFeaturedMedia<commit_after>package hitGox\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n)\n\n\/\/ FeaturedMedia is about featured live stream.\ntype FeaturedMedia struct {\n\tMediaID string `json:\"media_id\"`\n\tMediaDisplayName string `json:\"media_display_name\"`\n\tMediaName string `json:\"media_name\"`\n\tBackdrop string `json:\"backdrop\"`\n\tBackdropHTML string `json:\"backdrop_html\"`\n}\n\n\/\/ GetFeaturedMedia returns a featured live stream.\nfunc GetFeaturedMedia() (*FeaturedMedia, error) {\n\turl := APIEndpoint + \"\/mediafeatured\"\n\tresp, err := get(url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar obj FeaturedMedia\n\tif err = json.NewDecoder(bytes.NewReader(resp)).Decode(&obj); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &obj, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2015 The Things Network\n\/\/ Use of this source code is governed by the MIT license that can be found in the LICENSE file.\n\npackage gateway\n\nimport (\n\t\"fmt\"\n\t\"github.com\/thethingsnetwork\/core\/lorawan\/semtech\"\n\t\"github.com\/thethingsnetwork\/core\/utils\/log\"\n\t\"github.com\/thethingsnetwork\/core\/utils\/pointer\"\n\t\"io\"\n\t\"time\"\n)\n\ntype Forwarder struct {\n\tId [8]byte \/\/ Gateway's Identifier\n\tLogger log.Logger\n\talti int \/\/ GPS altitude in RX meters\n\tupnb uint \/\/ Number of upstream datagrams sent\n\tackn uint \/\/ Number of upstream datagrams that were acknowledged\n\tdwnb uint \/\/ Number of downlink datagrams received\n\tlati float64 \/\/ GPS latitude, North is +\n\tlong float64 \/\/ GPS longitude, East is +\n\trxfw uint \/\/ Number of radio packets forwarded\n\trxnb uint \/\/ Number of radio packets received\n\tadapters []io.ReadWriteCloser \/\/ List of downlink adapters\n\tpackets []semtech.Packet \/\/ Downlink packets received\n\tacks map[[4]byte]uint \/\/ adapterIndex | packet.Identifier | packet.Token\n\tcommands chan command \/\/ Concurrent access on gateway stats\n\tErrors chan error \/\/ Done channel\n}\n\ntype commandName string\ntype command struct {\n\tname commandName\n\tdata interface{}\n}\n\nconst (\n\tcmd_ACK commandName = \"Acknowledged\"\n\tcmd_EMIT commandName = \"Emitted\"\n\tcmd_RECVUP commandName = \"Radio Packet Received\"\n\tcmd_RECVDWN commandName = \"Dowlink Datagram Received\"\n\tcmd_FWD commandName = \"Forwarded\"\n\tcmd_FLUSH commandName = \"Flush\"\n\tcmd_STATS commandName = \"Stats\"\n)\n\n\/\/ NewForwarder create a forwarder instance bound to a set of routers.\nfunc NewForwarder(id [8]byte, adapters ...io.ReadWriteCloser) (*Forwarder, error) {\n\tif len(adapters) == 0 {\n\t\treturn nil, fmt.Errorf(\"At least one adapter must be supplied\")\n\t}\n\n\tif len(adapters) > 255 { \/\/ cf fwd.acks\n\t\treturn nil, fmt.Errorf(\"Cannot connect more than 255 adapters\")\n\t}\n\n\tfwd := &Forwarder{\n\t\tId: id,\n\t\tLogger: log.VoidLogger{},\n\t\talti: 120,\n\t\tlati: 53.3702,\n\t\tlong: 4.8952,\n\t\tadapters: adapters,\n\t\tpackets: make([]semtech.Packet, 0),\n\t\tacks: make(map[[4]byte]uint),\n\t\tcommands: make(chan command),\n\t\tErrors: make(chan error, len(adapters)),\n\t}\n\n\tgo fwd.handleCommands()\n\n\t\/\/ Star listening to each adapter Read() method\n\tfor i, adapter := range fwd.adapters {\n\t\tgo fwd.listenAdapter(adapter, i)\n\t}\n\n\treturn fwd, nil\n}\n\n\/\/ log wraps the Logger.log method, this is nothing more than a shortcut\nfunc (fwd Forwarder) log(format string, a ...interface{}) {\n\tfwd.Logger.Log(format, a...)\n}\n\n\/\/ listenAdapter listen to incoming datagrams from an adapter. Non-valid packets are ignored.\nfunc (fwd Forwarder) listenAdapter(adapter io.ReadWriteCloser, index int) {\n\tfor {\n\t\tbuf := make([]byte, 1024)\n\t\tn, err := adapter.Read(buf)\n\t\tfwd.log(\"%d bytes received by adapter\\n\", n)\n\t\tif err != nil {\n\t\t\tfwd.log(\"Error: %+v\", err)\n\t\t\tfwd.Errors <- err\n\t\t\treturn \/\/ Error on reading, we assume the connection is closed \/ lost\n\t\t}\n\t\tfwd.log(\"Forwarder unmarshals datagram %x\\n\", buf[:n])\n\t\tpacket, err := semtech.Unmarshal(buf[:n])\n\t\tif err != nil {\n\t\t\tfwd.log(\"Error: %+v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch packet.Identifier {\n\t\tcase semtech.PUSH_ACK, semtech.PULL_ACK:\n\t\t\tfwd.commands <- command{cmd_ACK, ackToken(index, *packet)}\n\t\tcase semtech.PULL_RESP:\n\t\t\tfwd.commands <- command{cmd_RECVDWN, packet}\n\t\tdefault:\n\t\t\tfwd.log(\"Forwarder ignores contingent packet %+v\\n\", packet)\n\t\t}\n\n\t}\n}\n\n\/\/ handleCommands acts as a monitor between all goroutines that attempt to modify the forwarder\n\/\/ attributes. All sensitive operations are done by commands sent through an appropriate channel.\n\/\/ This method consumes commands from the channel until it's closed.\nfunc (fwd *Forwarder) handleCommands() {\n\tfor cmd := range fwd.commands {\n\t\tfwd.log(\"Fowarder executes command: %v\\n\", cmd.name)\n\n\t\tswitch cmd.name {\n\t\tcase cmd_ACK:\n\t\t\ttoken := cmd.data.([4]byte)\n\t\t\tif fwd.acks[token] > 0 {\n\t\t\t\tfwd.acks[token] -= 1\n\t\t\t\tfwd.ackn += 1\n\t\t\t}\n\t\tcase cmd_FWD:\n\t\t\tfwd.rxfw += 1\n\t\tcase cmd_EMIT:\n\t\t\ttoken := cmd.data.([4]byte)\n\t\t\tfwd.acks[token] += 1\n\t\t\tfwd.upnb += 1\n\t\tcase cmd_RECVUP:\n\t\t\tfwd.rxnb += 1\n\t\tcase cmd_RECVDWN:\n\t\t\tfwd.dwnb += 1\n\t\t\tfwd.packets = append(fwd.packets, *cmd.data.(*semtech.Packet))\n\t\tcase cmd_FLUSH:\n\t\t\tcmd.data.(chan []semtech.Packet) <- fwd.packets\n\t\t\tfwd.packets = make([]semtech.Packet, 0)\n\t\tcase cmd_STATS:\n\t\t\tvar ackr float64\n\t\t\tif fwd.upnb > 0 {\n\t\t\t\tackr = float64(fwd.ackn) \/ float64(fwd.upnb)\n\t\t\t}\n\t\t\tcmd.data.(chan semtech.Stat) <- semtech.Stat{\n\t\t\t\tAckr: &ackr,\n\t\t\t\tAlti: pointer.Int(fwd.alti),\n\t\t\t\tDwnb: pointer.Uint(fwd.dwnb),\n\t\t\t\tLati: pointer.Float64(fwd.lati),\n\t\t\t\tLong: pointer.Float64(fwd.long),\n\t\t\t\tRxfw: pointer.Uint(fwd.rxfw),\n\t\t\t\tRxnb: pointer.Uint(fwd.rxnb),\n\t\t\t\tRxok: pointer.Uint(fwd.rxnb),\n\t\t\t\tTime: pointer.Time(time.Now()),\n\t\t\t\tTxnb: pointer.Uint(0),\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Forward dispatch a packet to all connected routers.\nfunc (fwd Forwarder) Forward(packet semtech.Packet) error {\n\tfwd.commands <- command{cmd_RECVUP, nil}\n\tif packet.Identifier != semtech.PUSH_DATA {\n\t\treturn fmt.Errorf(\"Unable to forward with identifier %x\", packet.Identifier)\n\t}\n\n\traw, err := semtech.Marshal(packet)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i, adapter := range fwd.adapters {\n\t\tn, err := adapter.Write(raw)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif n < len(raw) {\n\t\t\treturn fmt.Errorf(\"Packet was too long\")\n\t\t}\n\t\tfwd.commands <- command{cmd_EMIT, ackToken(i, packet)}\n\t}\n\n\tfwd.commands <- command{cmd_FWD, nil}\n\treturn nil\n}\n\n\/\/ Flush spits out all downlink packet received by the forwarder since the last flush.\nfunc (fwd Forwarder) Flush() []semtech.Packet {\n\tchpkt := make(chan []semtech.Packet)\n\tfwd.commands <- command{cmd_FLUSH, chpkt}\n\treturn <-chpkt\n}\n\n\/\/ Stats computes and return the forwarder statistics since it was created\nfunc (fwd Forwarder) Stats() semtech.Stat {\n\tchstats := make(chan semtech.Stat)\n\tfwd.commands <- command{cmd_STATS, chstats}\n\treturn <-chstats\n}\n\n\/\/ Stop terminate the forwarder activity. Closing all routers connections\nfunc (fwd Forwarder) Stop() error {\n\tvar errors []error\n\n\t\/\/ Close the uplink adapters\n\tfor _, adapter := range fwd.adapters {\n\t\terr := adapter.Close()\n\t\tif err != nil {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}\n\n\tif len(errors) > 0 {\n\t\treturn fmt.Errorf(\"Unable to stop the forwarder: %+v\", errors)\n\t}\n\n\treturn nil\n}\n<commit_msg>[simulators.gateway] Fix stop goroutines leaks.<commit_after>\/\/ Copyright © 2015 The Things Network\n\/\/ Use of this source code is governed by the MIT license that can be found in the LICENSE file.\n\npackage gateway\n\nimport (\n\t\"fmt\"\n\t\"github.com\/thethingsnetwork\/core\/lorawan\/semtech\"\n\t\"github.com\/thethingsnetwork\/core\/utils\/log\"\n\t\"github.com\/thethingsnetwork\/core\/utils\/pointer\"\n\t\"io\"\n\t\"time\"\n)\n\ntype Forwarder struct {\n\tId [8]byte \/\/ Gateway's Identifier\n\tLogger log.Logger\n\talti int \/\/ GPS altitude in RX meters\n\tupnb uint \/\/ Number of upstream datagrams sent\n\tackn uint \/\/ Number of upstream datagrams that were acknowledged\n\tdwnb uint \/\/ Number of downlink datagrams received\n\tlati float64 \/\/ GPS latitude, North is +\n\tlong float64 \/\/ GPS longitude, East is +\n\trxfw uint \/\/ Number of radio packets forwarded\n\trxnb uint \/\/ Number of radio packets received\n\tadapters []io.ReadWriteCloser \/\/ List of downlink adapters\n\tpackets []semtech.Packet \/\/ Downlink packets received\n\tacks map[[4]byte]uint \/\/ adapterIndex | packet.Identifier | packet.Token\n\tcommands chan command \/\/ Concurrent access on gateway stats\n\tquit chan error \/\/ Adapter which loses connection spit here\n}\n\ntype commandName string\ntype command struct {\n\tname commandName\n\tdata interface{}\n}\n\nconst (\n\tcmd_ACK commandName = \"Acknowledged\"\n\tcmd_EMIT commandName = \"Emitted\"\n\tcmd_RECVUP commandName = \"Radio Packet Received\"\n\tcmd_RECVDWN commandName = \"Dowlink Datagram Received\"\n\tcmd_FWD commandName = \"Forwarded\"\n\tcmd_FLUSH commandName = \"Flush\"\n\tcmd_STATS commandName = \"Stats\"\n)\n\n\/\/ NewForwarder create a forwarder instance bound to a set of routers.\nfunc NewForwarder(id [8]byte, adapters ...io.ReadWriteCloser) (*Forwarder, error) {\n\tif len(adapters) == 0 {\n\t\treturn nil, fmt.Errorf(\"At least one adapter must be supplied\")\n\t}\n\n\tif len(adapters) > 255 { \/\/ cf fwd.acks\n\t\treturn nil, fmt.Errorf(\"Cannot connect more than 255 adapters\")\n\t}\n\n\tfwd := &Forwarder{\n\t\tId: id,\n\t\tLogger: log.VoidLogger{},\n\t\talti: 120,\n\t\tlati: 53.3702,\n\t\tlong: 4.8952,\n\t\tadapters: adapters,\n\t\tpackets: make([]semtech.Packet, 0),\n\t\tacks: make(map[[4]byte]uint),\n\t\tcommands: make(chan command),\n\t\tquit: make(chan error, len(adapters)),\n\t}\n\n\tgo fwd.handleCommands()\n\n\t\/\/ Star listening to each adapter Read() method\n\tfor i, adapter := range fwd.adapters {\n\t\tgo fwd.listenAdapter(adapter, i)\n\t}\n\n\treturn fwd, nil\n}\n\n\/\/ log wraps the Logger.log method, this is nothing more than a shortcut\nfunc (fwd Forwarder) log(format string, a ...interface{}) {\n\tfwd.Logger.Log(format, a...) \/\/ NOTE: concurrent-safe ?\n}\n\n\/\/ listenAdapter listen to incoming datagrams from an adapter. Non-valid packets are ignored.\nfunc (fwd Forwarder) listenAdapter(adapter io.ReadWriteCloser, index int) {\n\tfor {\n\t\tbuf := make([]byte, 1024)\n\t\tn, err := adapter.Read(buf)\n\t\tfwd.log(\"%d bytes received by adapter\\n\", n)\n\t\tif err != nil {\n\t\t\tfwd.log(\"Error: %+v\", err)\n\t\t\tfwd.quit <- err\n\t\t\treturn \/\/ Connection lost \/ closed\n\t\t}\n\t\tfwd.log(\"Forwarder unmarshals datagram %x\\n\", buf[:n])\n\t\tpacket, err := semtech.Unmarshal(buf[:n])\n\t\tif err != nil {\n\t\t\tfwd.log(\"Error: %+v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch packet.Identifier {\n\t\tcase semtech.PUSH_ACK, semtech.PULL_ACK:\n\t\t\tfwd.commands <- command{cmd_ACK, ackToken(index, *packet)}\n\t\tcase semtech.PULL_RESP:\n\t\t\tfwd.commands <- command{cmd_RECVDWN, packet}\n\t\tdefault:\n\t\t\tfwd.log(\"Forwarder ignores contingent packet %+v\\n\", packet)\n\t\t}\n\t}\n}\n\n\/\/ handleCommands acts as a monitor between all goroutines that attempt to modify the forwarder\n\/\/ attributes. All sensitive operations are done by commands sent through an appropriate channel.\n\/\/ This method consumes commands from the channel until it's closed.\nfunc (fwd *Forwarder) handleCommands() {\n\tfor cmd := range fwd.commands {\n\t\tfwd.log(\"Fowarder executes command: %v\\n\", cmd.name)\n\n\t\tswitch cmd.name {\n\t\tcase cmd_ACK:\n\t\t\ttoken := cmd.data.([4]byte)\n\t\t\tif fwd.acks[token] > 0 {\n\t\t\t\tfwd.acks[token] -= 1\n\t\t\t\tfwd.ackn += 1\n\t\t\t}\n\t\tcase cmd_FWD:\n\t\t\tfwd.rxfw += 1\n\t\tcase cmd_EMIT:\n\t\t\ttoken := cmd.data.([4]byte)\n\t\t\tfwd.acks[token] += 1\n\t\t\tfwd.upnb += 1\n\t\tcase cmd_RECVUP:\n\t\t\tfwd.rxnb += 1\n\t\tcase cmd_RECVDWN:\n\t\t\tfwd.dwnb += 1\n\t\t\tfwd.packets = append(fwd.packets, *cmd.data.(*semtech.Packet))\n\t\tcase cmd_FLUSH:\n\t\t\tcmd.data.(chan []semtech.Packet) <- fwd.packets\n\t\t\tfwd.packets = make([]semtech.Packet, 0)\n\t\tcase cmd_STATS:\n\t\t\tvar ackr float64\n\t\t\tif fwd.upnb > 0 {\n\t\t\t\tackr = float64(fwd.ackn) \/ float64(fwd.upnb)\n\t\t\t}\n\t\t\tcmd.data.(chan semtech.Stat) <- semtech.Stat{\n\t\t\t\tAckr: &ackr,\n\t\t\t\tAlti: pointer.Int(fwd.alti),\n\t\t\t\tDwnb: pointer.Uint(fwd.dwnb),\n\t\t\t\tLati: pointer.Float64(fwd.lati),\n\t\t\t\tLong: pointer.Float64(fwd.long),\n\t\t\t\tRxfw: pointer.Uint(fwd.rxfw),\n\t\t\t\tRxnb: pointer.Uint(fwd.rxnb),\n\t\t\t\tRxok: pointer.Uint(fwd.rxnb),\n\t\t\t\tTime: pointer.Time(time.Now()),\n\t\t\t\tTxnb: pointer.Uint(0),\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Forward dispatch a packet to all connected routers.\nfunc (fwd Forwarder) Forward(packet semtech.Packet) error {\n\tfwd.commands <- command{cmd_RECVUP, nil}\n\tif packet.Identifier != semtech.PUSH_DATA {\n\t\treturn fmt.Errorf(\"Unable to forward with identifier %x\", packet.Identifier)\n\t}\n\n\traw, err := semtech.Marshal(packet)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i, adapter := range fwd.adapters {\n\t\tn, err := adapter.Write(raw)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif n < len(raw) {\n\t\t\treturn fmt.Errorf(\"Packet was too long\")\n\t\t}\n\t\tfwd.commands <- command{cmd_EMIT, ackToken(i, packet)}\n\t}\n\n\tfwd.commands <- command{cmd_FWD, nil}\n\treturn nil\n}\n\n\/\/ Flush spits out all downlink packet received by the forwarder since the last flush.\nfunc (fwd Forwarder) Flush() []semtech.Packet {\n\tchpkt := make(chan []semtech.Packet)\n\tfwd.commands <- command{cmd_FLUSH, chpkt}\n\treturn <-chpkt\n}\n\n\/\/ Stats computes and return the forwarder statistics since it was created\nfunc (fwd Forwarder) Stats() semtech.Stat {\n\tchstats := make(chan semtech.Stat)\n\tfwd.commands <- command{cmd_STATS, chstats}\n\treturn <-chstats\n}\n\n\/\/ Stop terminate the forwarder activity. Closing all routers connections\nfunc (fwd Forwarder) Stop() error {\n\tvar errors []error\n\n\t\/\/ Close the uplink adapters\n\tfor _, adapter := range fwd.adapters {\n\t\terr := adapter.Close()\n\t\tif err != nil {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}\n\n\t\/\/ Wait for each adapter to terminate\n\tfor range fwd.adapters {\n\t\t<-fwd.quit\n\t}\n\n\tclose(fwd.commands)\n\n\tif len(errors) > 0 {\n\t\treturn fmt.Errorf(\"Unable to stop the forwarder: %+v\", errors)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package citadel\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/samalba\/dockerclient\"\n)\n\ntype Engine struct {\n\tID string `json:\"id,omitempty\"`\n\tAddr string `json:\"addr,omitempty\"`\n\tCpus float64 `json:\"cpus,omitempty\"`\n\tMemory float64 `json:\"memory,omitempty\"`\n\tLabels []string `json:\"labels,omitempty\"`\n\n\tclient *dockerclient.DockerClient\n\teventHandler EventHandler\n}\n\nfunc (e *Engine) Connect(config *tls.Config) error {\n\tc, err := dockerclient.NewDockerClient(e.Addr, config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.client = c\n\n\treturn nil\n}\n\nfunc (e *Engine) SetClient(c *dockerclient.DockerClient) {\n\te.client = c\n}\n\n\/\/ IsConnected returns true if the engine is connected to a remote docker API\nfunc (e *Engine) IsConnected() bool {\n\treturn e.client != nil\n}\n\nfunc (e *Engine) Pull(image string) error {\n\timageInfo := ParseImageName(image)\n\tif err := e.client.PullImage(imageInfo.Name, imageInfo.Tag); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (e *Engine) Start(c *Container, pullImage bool) error {\n\tvar (\n\t\terr error\n\t\tenv = []string{}\n\t\tclient = e.client\n\t\ti = c.Image\n\t)\n\tc.Engine = e\n\n\tfor k, v := range i.Environment {\n\t\tenv = append(env, fmt.Sprintf(\"%s=%s\", k, v))\n\t}\n\n\tenv = append(env,\n\t\tfmt.Sprintf(\"_citadel_type=%s\", i.Type),\n\t\tfmt.Sprintf(\"_citadel_labels=%s\", strings.Join(i.Labels, \",\")),\n\t)\n\n\tvols := make(map[string]struct{})\n\tbinds := []string{}\n\tfor _, v := range i.Volumes {\n\t\tif strings.Index(v, \":\") > -1 {\n\t\t\tcv := strings.Split(v, \":\")\n\t\t\tbinds = append(binds, v)\n\t\t\tv = cv[1]\n\t\t}\n\t\tvols[v] = struct{}{}\n\t}\n\tconfig := &dockerclient.ContainerConfig{\n\t\tHostname: i.Hostname,\n\t\tDomainname: i.Domainname,\n\t\tImage: i.Name,\n\t\tCmd: i.Args,\n\t\tMemory: int(i.Memory) * 1024 * 1024,\n\t\tEnv: env,\n\t\tCpuShares: int(i.Cpus * 100.0 \/ e.Cpus),\n\t\tExposedPorts: make(map[string]struct{}),\n\t\tVolumes: vols,\n\t}\n\n\tlinks := []string{}\n\tfor k, v := range i.Links {\n\t\tlinks = append(links, fmt.Sprintf(\"%s:%s\", k, v))\n\t}\n\thostConfig := &dockerclient.HostConfig{\n\t\tPublishAllPorts: i.Publish,\n\t\tPortBindings: make(map[string][]dockerclient.PortBinding),\n\t\tLinks: links,\n\t\tBinds: binds,\n\t\tRestartPolicy: dockerclient.RestartPolicy{\n\t\t\tName: i.RestartPolicy.Name,\n\t\t\tMaximumRetryCount: i.RestartPolicy.MaximumRetryCount,\n\t\t},\n\t\tNetworkMode: i.NetworkMode,\n\t}\n\n\tfor _, b := range i.BindPorts {\n\t\tkey := fmt.Sprintf(\"%d\/%s\", b.ContainerPort, b.Proto)\n\t\tconfig.ExposedPorts[key] = struct{}{}\n\n\t\thostConfig.PortBindings[key] = []dockerclient.PortBinding{\n\t\t\t{\n\t\t\t\tHostIp: b.HostIp,\n\t\t\t\tHostPort: fmt.Sprint(b.Port),\n\t\t\t},\n\t\t}\n\t}\n\n\tif pullImage {\n\t\tif err := e.Pull(i.Name); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif c.ID, err = client.CreateContainer(config, c.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif err := client.StartContainer(c.ID, hostConfig); err != nil {\n\t\treturn err\n\t}\n\n\treturn e.updatePortInformation(c)\n}\n\nfunc (e *Engine) ListImages() ([]string, error) {\n\timages, err := e.client.ListImages()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := []string{}\n\n\tfor _, i := range images {\n\t\tfor _, t := range i.RepoTags {\n\t\t\tout = append(out, t)\n\t\t}\n\t}\n\n\treturn out, nil\n}\n\nfunc (e *Engine) updatePortInformation(c *Container) error {\n\tinfo, err := e.client.InspectContainer(c.ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn parsePortInformation(info, c)\n}\n\nfunc (e *Engine) ListContainers(all bool) ([]*Container, error) {\n\tout := []*Container{}\n\n\tc, err := e.client.ListContainers(all, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, ci := range c {\n\t\tcc, err := FromDockerContainer(ci.Id, ci.Image, e)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tout = append(out, cc)\n\t}\n\n\treturn out, nil\n}\n\nfunc (e *Engine) Logs(container *Container, stdout bool, stderr bool) (io.ReadCloser, error) {\n\tlogopts := &dockerclient.LogOptions{\n\t\tStdout: stdout,\n\t\tStderr: stderr,\n\t\tTimestamps: false,\n\t\tFollow: false,\n\t\tTail: 0,\n\t}\n\treturn e.client.ContainerLogs(container.ID, logopts)\n}\n\nfunc (e *Engine) Kill(container *Container, sig int) error {\n\treturn e.client.KillContainer(container.ID)\n}\n\nfunc (e *Engine) Stop(container *Container) error {\n\treturn e.client.StopContainer(container.ID, 8)\n}\n\nfunc (e *Engine) Restart(container *Container, timeout int) error {\n\treturn e.client.RestartContainer(container.ID, timeout)\n}\n\nfunc (e *Engine) Remove(container *Container) error {\n\treturn e.client.RemoveContainer(container.ID, true)\n}\n\nfunc (e *Engine) Events(h EventHandler) error {\n\tif e.eventHandler != nil {\n\t\treturn fmt.Errorf(\"event handler already set\")\n\t}\n\te.eventHandler = h\n\n\te.client.StartMonitorEvents(e.handler)\n\n\treturn nil\n}\n\nfunc (e *Engine) String() string {\n\treturn fmt.Sprintf(\"engine %s addr %s\", e.ID, e.Addr)\n}\n\nfunc (e *Engine) handler(ev *dockerclient.Event, args ...interface{}) {\n\tevent := &Event{\n\t\tEngine: e,\n\t\tType: ev.Status,\n\t\tTime: time.Unix(int64(ev.Time), 0),\n\t}\n\n\tcontainer, err := FromDockerContainer(ev.Id, ev.From, e)\n\tif err != nil {\n\t\t\/\/ TODO: un fuck this shit, fuckin handler\n\t\treturn\n\t}\n\n\tevent.Container = container\n\n\te.eventHandler.Handle(event)\n}\n<commit_msg>ListContainers API now includes filter parameter that we don't need for the moment<commit_after>package citadel\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/samalba\/dockerclient\"\n)\n\ntype Engine struct {\n\tID string `json:\"id,omitempty\"`\n\tAddr string `json:\"addr,omitempty\"`\n\tCpus float64 `json:\"cpus,omitempty\"`\n\tMemory float64 `json:\"memory,omitempty\"`\n\tLabels []string `json:\"labels,omitempty\"`\n\n\tclient *dockerclient.DockerClient\n\teventHandler EventHandler\n}\n\nfunc (e *Engine) Connect(config *tls.Config) error {\n\tc, err := dockerclient.NewDockerClient(e.Addr, config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.client = c\n\n\treturn nil\n}\n\nfunc (e *Engine) SetClient(c *dockerclient.DockerClient) {\n\te.client = c\n}\n\n\/\/ IsConnected returns true if the engine is connected to a remote docker API\nfunc (e *Engine) IsConnected() bool {\n\treturn e.client != nil\n}\n\nfunc (e *Engine) Pull(image string) error {\n\timageInfo := ParseImageName(image)\n\tif err := e.client.PullImage(imageInfo.Name, imageInfo.Tag); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (e *Engine) Start(c *Container, pullImage bool) error {\n\tvar (\n\t\terr error\n\t\tenv = []string{}\n\t\tclient = e.client\n\t\ti = c.Image\n\t)\n\tc.Engine = e\n\n\tfor k, v := range i.Environment {\n\t\tenv = append(env, fmt.Sprintf(\"%s=%s\", k, v))\n\t}\n\n\tenv = append(env,\n\t\tfmt.Sprintf(\"_citadel_type=%s\", i.Type),\n\t\tfmt.Sprintf(\"_citadel_labels=%s\", strings.Join(i.Labels, \",\")),\n\t)\n\n\tvols := make(map[string]struct{})\n\tbinds := []string{}\n\tfor _, v := range i.Volumes {\n\t\tif strings.Index(v, \":\") > -1 {\n\t\t\tcv := strings.Split(v, \":\")\n\t\t\tbinds = append(binds, v)\n\t\t\tv = cv[1]\n\t\t}\n\t\tvols[v] = struct{}{}\n\t}\n\tconfig := &dockerclient.ContainerConfig{\n\t\tHostname: i.Hostname,\n\t\tDomainname: i.Domainname,\n\t\tImage: i.Name,\n\t\tCmd: i.Args,\n\t\tMemory: int(i.Memory) * 1024 * 1024,\n\t\tEnv: env,\n\t\tCpuShares: int(i.Cpus * 100.0 \/ e.Cpus),\n\t\tExposedPorts: make(map[string]struct{}),\n\t\tVolumes: vols,\n\t}\n\n\tlinks := []string{}\n\tfor k, v := range i.Links {\n\t\tlinks = append(links, fmt.Sprintf(\"%s:%s\", k, v))\n\t}\n\thostConfig := &dockerclient.HostConfig{\n\t\tPublishAllPorts: i.Publish,\n\t\tPortBindings: make(map[string][]dockerclient.PortBinding),\n\t\tLinks: links,\n\t\tBinds: binds,\n\t\tRestartPolicy: dockerclient.RestartPolicy{\n\t\t\tName: i.RestartPolicy.Name,\n\t\t\tMaximumRetryCount: i.RestartPolicy.MaximumRetryCount,\n\t\t},\n\t\tNetworkMode: i.NetworkMode,\n\t}\n\n\tfor _, b := range i.BindPorts {\n\t\tkey := fmt.Sprintf(\"%d\/%s\", b.ContainerPort, b.Proto)\n\t\tconfig.ExposedPorts[key] = struct{}{}\n\n\t\thostConfig.PortBindings[key] = []dockerclient.PortBinding{\n\t\t\t{\n\t\t\t\tHostIp: b.HostIp,\n\t\t\t\tHostPort: fmt.Sprint(b.Port),\n\t\t\t},\n\t\t}\n\t}\n\n\tif pullImage {\n\t\tif err := e.Pull(i.Name); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif c.ID, err = client.CreateContainer(config, c.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif err := client.StartContainer(c.ID, hostConfig); err != nil {\n\t\treturn err\n\t}\n\n\treturn e.updatePortInformation(c)\n}\n\nfunc (e *Engine) ListImages() ([]string, error) {\n\timages, err := e.client.ListImages()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := []string{}\n\n\tfor _, i := range images {\n\t\tfor _, t := range i.RepoTags {\n\t\t\tout = append(out, t)\n\t\t}\n\t}\n\n\treturn out, nil\n}\n\nfunc (e *Engine) updatePortInformation(c *Container) error {\n\tinfo, err := e.client.InspectContainer(c.ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn parsePortInformation(info, c)\n}\n\nfunc (e *Engine) ListContainers(all bool) ([]*Container, error) {\n\tout := []*Container{}\n\n\tc, err := e.client.ListContainers(all, true, \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, ci := range c {\n\t\tcc, err := FromDockerContainer(ci.Id, ci.Image, e)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tout = append(out, cc)\n\t}\n\n\treturn out, nil\n}\n\nfunc (e *Engine) Logs(container *Container, stdout bool, stderr bool) (io.ReadCloser, error) {\n\tlogopts := &dockerclient.LogOptions{\n\t\tStdout: stdout,\n\t\tStderr: stderr,\n\t\tTimestamps: false,\n\t\tFollow: false,\n\t\tTail: 0,\n\t}\n\treturn e.client.ContainerLogs(container.ID, logopts)\n}\n\nfunc (e *Engine) Kill(container *Container, sig int) error {\n\treturn e.client.KillContainer(container.ID)\n}\n\nfunc (e *Engine) Stop(container *Container) error {\n\treturn e.client.StopContainer(container.ID, 8)\n}\n\nfunc (e *Engine) Restart(container *Container, timeout int) error {\n\treturn e.client.RestartContainer(container.ID, timeout)\n}\n\nfunc (e *Engine) Remove(container *Container) error {\n\treturn e.client.RemoveContainer(container.ID, true)\n}\n\nfunc (e *Engine) Events(h EventHandler) error {\n\tif e.eventHandler != nil {\n\t\treturn fmt.Errorf(\"event handler already set\")\n\t}\n\te.eventHandler = h\n\n\te.client.StartMonitorEvents(e.handler)\n\n\treturn nil\n}\n\nfunc (e *Engine) String() string {\n\treturn fmt.Sprintf(\"engine %s addr %s\", e.ID, e.Addr)\n}\n\nfunc (e *Engine) handler(ev *dockerclient.Event, args ...interface{}) {\n\tevent := &Event{\n\t\tEngine: e,\n\t\tType: ev.Status,\n\t\tTime: time.Unix(int64(ev.Time), 0),\n\t}\n\n\tcontainer, err := FromDockerContainer(ev.Id, ev.From, e)\n\tif err != nil {\n\t\t\/\/ TODO: un fuck this shit, fuckin handler\n\t\treturn\n\t}\n\n\tevent.Container = container\n\n\te.eventHandler.Handle(event)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nfunc main() {\n}\n<commit_msg>Throw in some initial basic code so we have something to work with<commit_after>package main\n\nimport (\n\t\"log\"\n\n\t\"pault.ag\/go\/debian\/control\"\n\t\"pault.ag\/go\/debian\/dependency\"\n\t\"pault.ag\/go\/resolver\"\n)\n\nfunc main() {\n\tlog.SetFlags(log.Lshortfile)\n\n\t\/\/ TODO configurable path? perhaps allow for an optional *.dsc instead?\n\tcon, err := control.ParseControlFile(\"debian\/control\")\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\\n\", err)\n\t}\n\n\t\/\/ TODO configurable or something\n\tsuite := \"unstable\"\n\tarch := \"amd64\"\n\tindex, err := resolver.GetBinaryIndex(\n\t\t\"http:\/\/httpredir.debian.org\/debian\",\n\t\tsuite,\n\t\t\"main\",\n\t\tarch,\n\t)\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\\n\", err)\n\t}\n\n\tallCan := true\n\tallPossi := append(\n\t\tcon.Source.BuildDepends.GetAllPossibilities(),\n\t\tcon.Source.BuildDependsIndep.GetAllPossibilities()...,\n\t)\n\n\tdepArch, err := dependency.ParseArch(\"any\")\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\\n\", err)\n\t}\n\n\tallBins := []control.BinaryIndex{}\n\tfor _, possi := range allPossi {\n\t\tcan, why, bins := index.ExplainSatisfies(*depArch, possi)\n\t\tif !can {\n\t\t\tlog.Printf(\"%s: %s\\n\", possi.Name, why)\n\t\t\tallCan = false\n\t\t} else {\n\t\t\t\/\/ TODO more smarts for which dep out of bins to use\n\t\t\tallBins = append(allBins, bins[0])\n\t\t}\n\t}\n\n\tif !allCan {\n\t\tlog.Fatalf(\"Unsatisfied possi; exiting.\\n\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package ghosttohugo\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\n\tjww \"github.com\/spf13\/jwalterweatherman\"\n)\n\nfunc cardBookmark(payload interface{}) string {\n\tm, ok := payload.(map[string]interface{})\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardBookmark: payload not correct type\")\n\t\treturn \"\"\n\t}\n\n\tmeta, ok := m[\"metadata\"]\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardBookmark: payload does not contain metadata\")\n\t\treturn \"\"\n\t}\n\n\tmetadata, ok := meta.(map[string]interface{})\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardBookmark: payload metadata was not correct type\")\n\t\treturn \"\"\n\t}\n\n\turl, ok := metadata[\"url\"]\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardBookmark: missing url\")\n\t\treturn \"\"\n\t}\n\ttitle, ok := metadata[\"title\"]\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardBookmark: missing title\")\n\t\treturn \"\"\n\t}\n\tdescription, ok := metadata[\"description\"]\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardBookmark: missing description\")\n\t\treturn \"\"\n\t}\n\n\tvar thumbnail, icon, author, publisher, caption string\n\tthumb, ok := metadata[\"thumbnail\"]\n\tif ok && thumb != nil {\n\t\tjww.TRACE.Println(\"cardBookmark: found thumbnail\")\n\t\tthumbnail = stripContentFolder(thumb.(string))\n\t}\n\ticoninfo, ok := metadata[\"icon\"]\n\tif ok && iconinfo != nil {\n\t\tjww.TRACE.Println(\"cardBookmark: found icon\")\n\t\ticon = stripContentFolder(iconinfo.(string))\n\t}\n\tauthorinfo, ok := metadata[\"author\"]\n\tif ok && authorinfo != nil {\n\t\tjww.TRACE.Println(\"cardBookmark: found author\")\n\t\tauthor = authorinfo.(string)\n\t}\n\tpublisherinfo, ok := metadata[\"publisher\"]\n\tif ok && publisherinfo != nil {\n\t\tjww.TRACE.Println(\"cardBookmark: found publisher\")\n\t\tpublisher = publisherinfo.(string)\n\t}\n\tcapt, ok := m[\"caption\"]\n\tif ok && capt != nil {\n\t\tjww.TRACE.Println(\"cardBookmark: found caption\")\n\t\tcaption = capt.(string)\n\t}\n\n\treturn fmt.Sprintf(\n\t\t\"{{< bookmark url=%q title=%q description=%q icon=%q\"+\n\t\t\t\" author=%q publisher=%q thumbnail=%q caption=%q >}}\",\n\t\turl.(string),\n\t\ttitle.(string),\n\t\tdescription.(string),\n\t\ticon,\n\t\tauthor,\n\t\tpublisher,\n\t\tthumbnail,\n\t\tcaption,\n\t)\n}\n\nfunc cardCode(payload interface{}) string {\n\tm, ok := payload.(map[string]interface{})\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardCode: payload not correct type\")\n\t\treturn \"\"\n\t}\n\n\tcode, ok := m[\"code\"]\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardCode: missing code\")\n\t\treturn \"\"\n\t}\n\n\tvar buf bytes.Buffer\n\n\tbuf.WriteString(\"```\")\n\tif lang, ok := m[\"language\"]; ok {\n\t\tbuf.WriteString(lang.(string))\n\t}\n\tbuf.WriteString(\"\\n\")\n\tbuf.WriteString(code.(string))\n\tbuf.WriteString(\"\\n```\\n\")\n\n\treturn buf.String()\n}\n\nfunc cardEmbed(payload interface{}) string {\n\tm, ok := payload.(map[string]interface{})\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardEmbed: payload not correct type\")\n\t\treturn \"\"\n\t}\n\n\thtml, ok := m[\"html\"]\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardEmbed: missing html\")\n\t\treturn \"\"\n\t}\n\n\treturn html.(string)\n}\nfunc cardGallery(payload interface{}) string {\n\tm, ok := payload.(map[string]interface{})\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardGallery: payload not correct type\")\n\t\treturn \"\"\n\t}\n\timages, ok := m[\"images\"]\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardGallery: missing images\")\n\t\treturn \"\"\n\t}\n\n\tvar buf bytes.Buffer\n\tbuf.WriteString(\"{{< gallery\")\n\tif caption, ok := m[\"caption\"]; ok {\n\t\tbuf.WriteString(fmt.Sprintf(\" caption=\\\"%s\\\"\", caption.(string)))\n\t}\n\tbuf.WriteString(\" >}}\\n\")\n\n\tfor _, img := range images.([]interface{}) {\n\t\timage, ok := img.(map[string]interface{})\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tbuf.WriteString(\"{{< galleryImg \")\n\t\tbuf.WriteString(fmt.Sprintf(\" src=%q\", stripContentFolder(image[\"src\"].(string))))\n\t\tbuf.WriteString(fmt.Sprintf(\" width=\\\"%.0f\\\"\", image[\"width\"].(float64)))\n\t\tbuf.WriteString(fmt.Sprintf(\" height=\\\"%.0f\\\"\", image[\"height\"].(float64)))\n\t\tif alt, ok := image[\"alt\"]; ok {\n\t\t\tbuf.WriteString(fmt.Sprintf(\" alt=%q\", alt.(string)))\n\t\t}\n\t\tif title, ok := image[\"title\"]; ok {\n\t\t\tbuf.WriteString(fmt.Sprintf(\" title=%q\", title.(string)))\n\t\t}\n\t\tbuf.WriteString(\" >}}\")\n\t}\n\n\tbuf.WriteString(\"{{< \/gallery >}}\")\n\n\treturn buf.String()\n}\n\nfunc cardHR(payload interface{}) string {\n\treturn \"---\\n\"\n}\n\nfunc cardHTML(payload interface{}) string {\n\tm, ok := payload.(map[string]interface{})\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardHTML: payload not correct type\")\n\t\treturn \"\"\n\t}\n\n\tif html, ok := m[\"html\"]; ok {\n\t\treturn html.(string)\n\t}\n\n\treturn \"\"\n}\n\nfunc cardImage(payload interface{}) string {\n\tm, ok := payload.(map[string]interface{})\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardImage: payload not correct type\")\n\t\treturn \"\"\n\t}\n\n\tsrc, ok := m[\"src\"]\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardImage: missing src\")\n\t\treturn \"\"\n\t}\n\n\tif caption, ok := m[\"caption\"]; ok {\n\t\treturn fmt.Sprintf(\n\t\t\t\"{{< figure src=\\\"%s\\\" caption=\\\"%s\\\" >}}\\n\",\n\t\t\tstripContentFolder(src.(string)),\n\t\t\tcaption,\n\t\t)\n\t}\n\n\treturn fmt.Sprintf(\n\t\t\"{{< figure src=\\\"%s\\\" >}}\\n\",\n\t\tstripContentFolder(src.(string)),\n\t)\n}\n\nfunc cardMarkdown(payload interface{}) string {\n\tm, ok := payload.(map[string]interface{})\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardMarkdown: payload not correct type\")\n\t\treturn \"\"\n\t}\n\tif markdown, ok := m[\"markdown\"]; ok {\n\t\treturn fmt.Sprintf(\"%s\\n\", markdown.(string))\n\t}\n\treturn \"\"\n}\n<commit_msg>resolve nil description bug (#37)<commit_after>package ghosttohugo\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\n\tjww \"github.com\/spf13\/jwalterweatherman\"\n)\n\nfunc cardBookmark(payload interface{}) string {\n\tm, ok := payload.(map[string]interface{})\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardBookmark: payload not correct type\")\n\t\treturn \"\"\n\t}\n\n\tmeta, ok := m[\"metadata\"]\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardBookmark: payload does not contain metadata\")\n\t\treturn \"\"\n\t}\n\n\tmetadata, ok := meta.(map[string]interface{})\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardBookmark: payload metadata was not correct type\")\n\t\treturn \"\"\n\t}\n\n\turl, ok := metadata[\"url\"]\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardBookmark: missing url\")\n\t\treturn \"\"\n\t}\n\ttitle, ok := metadata[\"title\"]\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardBookmark: missing title\")\n\t\treturn \"\"\n\t}\n\tdescription, ok := metadata[\"description\"]\n\tif !ok || description == nil {\n\t\tjww.ERROR.Println(\"cardBookmark: missing description\")\n\t\treturn \"\"\n\t}\n\n\tvar thumbnail, icon, author, publisher, caption string\n\tthumb, ok := metadata[\"thumbnail\"]\n\tif ok && thumb != nil {\n\t\tjww.TRACE.Println(\"cardBookmark: found thumbnail\")\n\t\tthumbnail = stripContentFolder(thumb.(string))\n\t}\n\ticoninfo, ok := metadata[\"icon\"]\n\tif ok && iconinfo != nil {\n\t\tjww.TRACE.Println(\"cardBookmark: found icon\")\n\t\ticon = stripContentFolder(iconinfo.(string))\n\t}\n\tauthorinfo, ok := metadata[\"author\"]\n\tif ok && authorinfo != nil {\n\t\tjww.TRACE.Println(\"cardBookmark: found author\")\n\t\tauthor = authorinfo.(string)\n\t}\n\tpublisherinfo, ok := metadata[\"publisher\"]\n\tif ok && publisherinfo != nil {\n\t\tjww.TRACE.Println(\"cardBookmark: found publisher\")\n\t\tpublisher = publisherinfo.(string)\n\t}\n\tcapt, ok := m[\"caption\"]\n\tif ok && capt != nil {\n\t\tjww.TRACE.Println(\"cardBookmark: found caption\")\n\t\tcaption = capt.(string)\n\t}\n\n\treturn fmt.Sprintf(\n\t\t\"{{< bookmark url=%q title=%q description=%q icon=%q\"+\n\t\t\t\" author=%q publisher=%q thumbnail=%q caption=%q >}}\",\n\t\turl.(string),\n\t\ttitle.(string),\n\t\tdescription.(string),\n\t\ticon,\n\t\tauthor,\n\t\tpublisher,\n\t\tthumbnail,\n\t\tcaption,\n\t)\n}\n\nfunc cardCode(payload interface{}) string {\n\tm, ok := payload.(map[string]interface{})\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardCode: payload not correct type\")\n\t\treturn \"\"\n\t}\n\n\tcode, ok := m[\"code\"]\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardCode: missing code\")\n\t\treturn \"\"\n\t}\n\n\tvar buf bytes.Buffer\n\n\tbuf.WriteString(\"```\")\n\tif lang, ok := m[\"language\"]; ok {\n\t\tbuf.WriteString(lang.(string))\n\t}\n\tbuf.WriteString(\"\\n\")\n\tbuf.WriteString(code.(string))\n\tbuf.WriteString(\"\\n```\\n\")\n\n\treturn buf.String()\n}\n\nfunc cardEmbed(payload interface{}) string {\n\tm, ok := payload.(map[string]interface{})\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardEmbed: payload not correct type\")\n\t\treturn \"\"\n\t}\n\n\thtml, ok := m[\"html\"]\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardEmbed: missing html\")\n\t\treturn \"\"\n\t}\n\n\treturn html.(string)\n}\nfunc cardGallery(payload interface{}) string {\n\tm, ok := payload.(map[string]interface{})\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardGallery: payload not correct type\")\n\t\treturn \"\"\n\t}\n\timages, ok := m[\"images\"]\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardGallery: missing images\")\n\t\treturn \"\"\n\t}\n\n\tvar buf bytes.Buffer\n\tbuf.WriteString(\"{{< gallery\")\n\tif caption, ok := m[\"caption\"]; ok {\n\t\tbuf.WriteString(fmt.Sprintf(\" caption=\\\"%s\\\"\", caption.(string)))\n\t}\n\tbuf.WriteString(\" >}}\\n\")\n\n\tfor _, img := range images.([]interface{}) {\n\t\timage, ok := img.(map[string]interface{})\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tbuf.WriteString(\"{{< galleryImg \")\n\t\tbuf.WriteString(fmt.Sprintf(\" src=%q\", stripContentFolder(image[\"src\"].(string))))\n\t\tbuf.WriteString(fmt.Sprintf(\" width=\\\"%.0f\\\"\", image[\"width\"].(float64)))\n\t\tbuf.WriteString(fmt.Sprintf(\" height=\\\"%.0f\\\"\", image[\"height\"].(float64)))\n\t\tif alt, ok := image[\"alt\"]; ok {\n\t\t\tbuf.WriteString(fmt.Sprintf(\" alt=%q\", alt.(string)))\n\t\t}\n\t\tif title, ok := image[\"title\"]; ok {\n\t\t\tbuf.WriteString(fmt.Sprintf(\" title=%q\", title.(string)))\n\t\t}\n\t\tbuf.WriteString(\" >}}\")\n\t}\n\n\tbuf.WriteString(\"{{< \/gallery >}}\")\n\n\treturn buf.String()\n}\n\nfunc cardHR(payload interface{}) string {\n\treturn \"---\\n\"\n}\n\nfunc cardHTML(payload interface{}) string {\n\tm, ok := payload.(map[string]interface{})\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardHTML: payload not correct type\")\n\t\treturn \"\"\n\t}\n\n\tif html, ok := m[\"html\"]; ok {\n\t\treturn html.(string)\n\t}\n\n\treturn \"\"\n}\n\nfunc cardImage(payload interface{}) string {\n\tm, ok := payload.(map[string]interface{})\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardImage: payload not correct type\")\n\t\treturn \"\"\n\t}\n\n\tsrc, ok := m[\"src\"]\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardImage: missing src\")\n\t\treturn \"\"\n\t}\n\n\tif caption, ok := m[\"caption\"]; ok {\n\t\treturn fmt.Sprintf(\n\t\t\t\"{{< figure src=\\\"%s\\\" caption=\\\"%s\\\" >}}\\n\",\n\t\t\tstripContentFolder(src.(string)),\n\t\t\tcaption,\n\t\t)\n\t}\n\n\treturn fmt.Sprintf(\n\t\t\"{{< figure src=\\\"%s\\\" >}}\\n\",\n\t\tstripContentFolder(src.(string)),\n\t)\n}\n\nfunc cardMarkdown(payload interface{}) string {\n\tm, ok := payload.(map[string]interface{})\n\tif !ok {\n\t\tjww.ERROR.Println(\"cardMarkdown: payload not correct type\")\n\t\treturn \"\"\n\t}\n\tif markdown, ok := m[\"markdown\"]; ok {\n\t\treturn fmt.Sprintf(\"%s\\n\", markdown.(string))\n\t}\n\treturn \"\"\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/koding\/kite\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"log\"\n\t\"github.com\/bitly\/go-nsq\"\n\t\"runtime\"\n\t\"time\"\n)\n\nvar GameState [][]uint8\nvar XSize uint8\nvar YSize uint8\n\nfunc main() {\n\tfmt.Println(\"Starting Controller\")\n\tXSize = 25\n\tYSize = 25\n\treset()\n\truntime.GOMAXPROCS(2)\n\twg := &sync.WaitGroup{}\n\twg.Add(2)\n\tgo startKite()\n\tgo startMessaging()\n\twg.Wait()\n}\n\nfunc startKite() {\n\tfmt.Println(\"Controller starting kite\")\n\tk := kite.New(\"controller\", \"1.0.0\")\n\tk.Config.Port = 6001\n\tk.Config.DisableAuthentication = true\n\tk.HandleFunc(\"hello\", hello)\n\tk.Run()\n}\n\nfunc startMessaging() {\n\tfmt.Println(\"Controller starting NSQ\")\n\tconfig := nsq.NewConfig()\n\n \tw, _ := nsq.NewProducer(os.Getenv(\"MESSAGING_SERVICE_HOST\") + \":4150\", config)\n\n\tfor {\n\t\ttick(w)\n\t\ttime.Sleep(time.Second * 1)\n\t}\t\n\tw.Stop()\n\n}\n\nfunc tick(w *nsq.Producer) {\n\tevaluate()\t\n\terr := w.Publish(\"tick\", []byte(\"test\"))\n\tif err != nil {\n\t\tlog.Panic(\"Could not connect\")\n\t}\n\t\n}\n\nfunc hello(r *kite.Request) (interface{}, error) {\n\n\tfmt.Println(\"Controller got hello\")\n\n\t\/\/ You can return anything as result, as long as it is JSON marshalable.\n\treturn nil, nil\n}\n\nfunc update(r *kite.Request) (interface{}, error) {\n fmt.Println(\"Controller received state update\")\n return nil, nil\n}\n\nfunc getState(r *kite.Request) (interface{}, error) {\n fmt.Println(\"Controller received state request\")\n return GameState, nil\n}\n\nfunc reset() {\n\tfmt.Println(\"Resetting Game State\")\n\t\/\/ Allocate the top-level slice.\n\tGameState := make([][]uint8, YSize) \/\/ One row per unit of y.\n\t\/\/ Loop over the rows, allocating the slice for each row.\n\tfor i := range GameState {\n\t\tGameState[i] = make([]uint8, XSize)\n\t\tfor j := range GameState[i] {\n\t\t\tGameState[i][j] = 1\n\t\t}\n\t}\n}\n\nfunc evaluate() {\n\tfmt.Println(\"Evaluting Game State\")\n\tfor i := range GameState {\n\t\tfor j := range GameState[i] {\n\t\t\tfmt.Print(GameState[i][j])\n\t\t}\n\t\tfmt.Println()\n\t}\n}\n\n<commit_msg>adding<commit_after>package main\n\nimport (\n\t\"github.com\/koding\/kite\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"log\"\n\t\"github.com\/bitly\/go-nsq\"\n\t\"runtime\"\n\t\"time\"\n)\n\nvar GameState [][]uint8\nvar XSize uint8\nvar YSize uint8\n\nfunc main() {\n\tfmt.Println(\"Starting Controller\")\n\tXSize = 25\n\tYSize = 25\n\treset()\n\tevaluate()\n\truntime.GOMAXPROCS(2)\n\twg := &sync.WaitGroup{}\n\twg.Add(2)\n\tgo startKite()\n\tgo startMessaging()\n\twg.Wait()\n}\n\nfunc startKite() {\n\tfmt.Println(\"Controller starting kite\")\n\tk := kite.New(\"controller\", \"1.0.0\")\n\tk.Config.Port = 6001\n\tk.Config.DisableAuthentication = true\n\tk.HandleFunc(\"hello\", hello)\n\tk.Run()\n}\n\nfunc startMessaging() {\n\tfmt.Println(\"Controller starting NSQ\")\n\tconfig := nsq.NewConfig()\n\n \tw, _ := nsq.NewProducer(os.Getenv(\"MESSAGING_SERVICE_HOST\") + \":4150\", config)\n\n\tfor {\n\t\ttick(w)\n\t\ttime.Sleep(time.Second * 1)\n\t}\t\n\tw.Stop()\n\n}\n\nfunc tick(w *nsq.Producer) {\n\tevaluate()\t\n\terr := w.Publish(\"tick\", []byte(\"test\"))\n\tif err != nil {\n\t\tlog.Panic(\"Could not connect\")\n\t}\n\t\n}\n\nfunc hello(r *kite.Request) (interface{}, error) {\n\n\tfmt.Println(\"Controller got hello\")\n\n\t\/\/ You can return anything as result, as long as it is JSON marshalable.\n\treturn nil, nil\n}\n\nfunc update(r *kite.Request) (interface{}, error) {\n fmt.Println(\"Controller received state update\")\n return nil, nil\n}\n\nfunc getState(r *kite.Request) (interface{}, error) {\n fmt.Println(\"Controller received state request\")\n return GameState, nil\n}\n\nfunc reset() {\n\tfmt.Println(\"Resetting Game State\")\n\t\/\/ Allocate the top-level slice.\n\tGameState := make([][]uint8, YSize) \/\/ One row per unit of y.\n\t\/\/ Loop over the rows, allocating the slice for each row.\n\tfor i := range GameState {\n\t\tGameState[i] = make([]uint8, XSize)\n\t\tfor j := range GameState[i] {\n\t\t\tGameState[i][j] = 1\n\t\t}\n\t}\n}\n\nfunc evaluate() {\n\tfmt.Println(\"Evaluting Game State\")\n\tfor i := range GameState {\n\t\tfor j := range GameState[i] {\n\t\t\tfmt.Print(GameState[i][j])\n\t\t}\n\t\tfmt.Println()\n\t}\n}\n\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 ibelie, Chen Jie, Joungtao. All rights reserved.\n\/\/ Use of this source code is governed by The MIT License\n\/\/ that can be found in the LICENSE file.\n\npackage rpc\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"sort\"\n\n\t\"io\/ioutil\"\n)\n\nvar (\n\tENTITY_PKG = map[string]string{\n\t\t\"github.com\/ibelie\/microserver\": \"\",\n\t\t\"github.com\/ibelie\/ruid\": \"\",\n\t\t\"github.com\/ibelie\/tygo\": \"\",\n\t}\n)\n\nconst (\n\tENTITY_CODE = `\n\ntype Entity struct {\n\ttygo.Tygo\n\truid.RUID\n\tKey ruid.RUID\n\tType string\n\tcachedSize int\n}\n\nfunc (e *Entity) MaxFieldNum() int {\n\treturn 1\n}\n\nfunc (e *Entity) ByteSize() (size int) {\n\tif e != nil {\n\t\te.cachedSize = tygo.SizeVarint(e.RUID) + tygo.SizeVarint(e.Key) + tygo.SizeVarint(microserver.EntityType(e.Type))\n\t\tsize = 1 + tygo.SizeVarint(e.cachedSize) + e.cachedSize\n\t\te.SetCachedSize(size)\n\t}\n\treturn\n}\n\nfunc (e *Entity) Serialize(output *tygo.ProtoBuf) {\n\tif e != nil {\n\t\toutput.WriteBytes(10) \/\/ tag: 10 MAKE_TAG(1, WireBytes=2)\n\t\toutput.WriteVarint(e.cachedSize)\n\t\toutput.WriteVarint(e.RUID)\n\t\toutput.WriteVarint(e.Key)\n\t\toutput.WriteVarint(microserver.EntityType(e.Type))\n\t}\n}\n\nfunc (e *Entity) Deserialize(input *tygo.ProtoBuf) (err error) {\n\tfor !input.ExpectEnd() {\n\t\tvar tag int\n\t\tvar cutoff bool\n\t\tif tag, cutoff, err = input.ReadTag(127); err == nil && cutoff && tag == 10 { \/\/ MAKE_TAG(1, WireBytes=2)\n\t\t\tif x, e := input.ReadBuf(); e == nil {\n\t\t\t\ttmpi := &tygo.ProtoBuf{Buffer: x}\n\t\t\t\tif x, e := tmpi.ReadVarint(); e == nil {\n\t\t\t\t\te.RUID = ruid.RUID(x)\n\t\t\t\t} else {\n\t\t\t\t\terr = e\n\t\t\t\t}\n\t\t\t\tif x, e := tmpi.ReadVarint(); e == nil {\n\t\t\t\t\te.Key = ruid.RUID(x)\n\t\t\t\t} else {\n\t\t\t\t\terr = e\n\t\t\t\t}\n\t\t\t\tif x, e := tmpi.ReadVarint(); e == nil {\n\t\t\t\t\te.Type = microserver.EntityName(x)\n\t\t\t\t} else {\n\t\t\t\t\terr = e\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr = e\n\t\t\t}\n\t\t\treturn\n\t\t} else if err = input.SkipField(tag); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n`\n)\n\nfunc Entity(path string) {\n\tpkgname, depends := Extract(path)\n\tfmt.Println(depends)\n\n\tvar head bytes.Buffer\n\tvar body bytes.Buffer\n\thead.Write([]byte(fmt.Sprintf(`\/\/ Generated by ibelie-rpc. DO NOT EDIT!\n\npackage %s\n`, pkgname)))\n\tbody.Write([]byte(ENTITY_CODE))\n\n\tpkgs := ENTITY_PKG\n\tvar sortedPkg []string\n\tfor path, _ := range pkgs {\n\t\tsortedPkg = append(sortedPkg, path)\n\t}\n\tsort.Strings(sortedPkg)\n\tfor _, path := range sortedPkg {\n\t\thead.Write([]byte(fmt.Sprintf(`\nimport %s\"%s\"`, pkgs[path], path)))\n\t}\n\n\thead.Write(body.Bytes())\n\tioutil.WriteFile(SRC_PATH+path+\"\/entity.rpc.go\", head.Bytes(), 0666)\n}\n<commit_msg>modify entity injector<commit_after>\/\/ Copyright 2017 ibelie, Chen Jie, Joungtao. All rights reserved.\n\/\/ Use of this source code is governed by The MIT License\n\/\/ that can be found in the LICENSE file.\n\npackage rpc\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"sort\"\n\n\t\"io\/ioutil\"\n)\n\nvar (\n\tENTITY_PKG = map[string]string{\n\t\t\"github.com\/ibelie\/ruid\": \"\",\n\t\t\"github.com\/ibelie\/tygo\": \"\",\n\t}\n)\n\nconst (\n\tENTITY_CODE = `\n\ntype Entity struct {\n\ttygo.Tygo\n\truid.RUID\n\tKey ruid.RUID\n\tType uint64\n\tcachedSize int\n}\n\nfunc (e *Entity) MaxFieldNum() int {\n\treturn 1\n}\n\nfunc (e *Entity) ByteSize() (size int) {\n\tif e != nil {\n\t\te.cachedSize = tygo.SizeVarint(e.RUID) + tygo.SizeVarint(e.Key) + tygo.SizeVarint(e.Type)\n\t\tsize = 1 + tygo.SizeVarint(e.cachedSize) + e.cachedSize\n\t\te.SetCachedSize(size)\n\t}\n\treturn\n}\n\nfunc (e *Entity) Serialize(output *tygo.ProtoBuf) {\n\tif e != nil {\n\t\toutput.WriteBytes(10) \/\/ tag: 10 MAKE_TAG(1, WireBytes=2)\n\t\toutput.WriteVarint(e.cachedSize)\n\t\toutput.WriteVarint(e.RUID)\n\t\toutput.WriteVarint(e.Key)\n\t\toutput.WriteVarint(e.Type)\n\t}\n}\n\nfunc (e *Entity) Deserialize(input *tygo.ProtoBuf) (err error) {\n\tfor !input.ExpectEnd() {\n\t\tvar tag int\n\t\tvar cutoff bool\n\t\tif tag, cutoff, err = input.ReadTag(127); err == nil && cutoff && tag == 10 { \/\/ MAKE_TAG(1, WireBytes=2)\n\t\t\tvar buffer []byte\n\t\t\tif buffer, err = input.ReadBuf(); err == nil {\n\t\t\t\tvar i, k uint64\n\t\t\t\ttmpi := &tygo.ProtoBuf{Buffer: buffer}\n\t\t\t\tif i, err = tmpi.ReadVarint(); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t} else if k, err = tmpi.ReadVarint(); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t} else if e.Type, err = tmpi.ReadVarint(); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\te.RUID = i\n\t\t\t\te.Key = k\n\t\t\t}\n\t\t\treturn\n\t\t} else if err = input.SkipField(tag); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n`\n)\n\nfunc Entity(path string) {\n\tpkgname, depends := Extract(path)\n\tfmt.Println(depends)\n\n\tvar head bytes.Buffer\n\tvar body bytes.Buffer\n\thead.Write([]byte(fmt.Sprintf(`\/\/ Generated by ibelie-rpc. DO NOT EDIT!\n\npackage %s\n`, pkgname)))\n\tbody.Write([]byte(ENTITY_CODE))\n\n\tpkgs := ENTITY_PKG\n\tvar sortedPkg []string\n\tfor path, _ := range pkgs {\n\t\tsortedPkg = append(sortedPkg, path)\n\t}\n\tsort.Strings(sortedPkg)\n\tfor _, path := range sortedPkg {\n\t\thead.Write([]byte(fmt.Sprintf(`\nimport %s\"%s\"`, pkgs[path], path)))\n\t}\n\n\thead.Write(body.Bytes())\n\tioutil.WriteFile(SRC_PATH+path+\"\/entity.rpc.go\", head.Bytes(), 0666)\n}\n<|endoftext|>"} {"text":"<commit_before>package goparse\n\nimport (\n\t\"go\/ast\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"reflect\"\n\t\"strings\"\n)\n\n\/\/ StructDesc contains description of parsed struct\ntype StructDesc struct {\n\tName string\n\tField []struct {\n\t\tName string\n\t\tType string\n\t\tTags []string\n\t}\n}\n\n\/\/ GetFileStructs returns structs descriptions from parsed go file\nfunc GetFileStructs(filename string, prefix string, tag string) ([]StructDesc, error) {\n\tresult := make([]StructDesc, 0, 5)\n\n\tfset := token.NewFileSet()\n\n\tf, err := parser.ParseFile(fset, filename, nil, 0)\n\tif nil != err {\n\t\treturn result, err\n\t}\n\n\tfor i := range f.Decls {\n\t\tif g, ok := f.Decls[i].(*ast.GenDecl); ok {\n\t\t\tfor _, s := range g.Specs {\n\t\t\t\tif ts, ok := s.(*ast.TypeSpec); ok {\n\t\t\t\t\tif \"\" == prefix || strings.HasPrefix(ts.Name.String(), prefix) {\n\t\t\t\t\t\tif tt, ok := ts.Type.(*ast.StructType); ok {\n\t\t\t\t\t\t\tnewStruct := StructDesc{Name: ts.Name.String(), Field: make([]struct {\n\t\t\t\t\t\t\t\tName string\n\t\t\t\t\t\t\t\tType string\n\t\t\t\t\t\t\t\tTags []string\n\t\t\t\t\t\t\t}, 0, len(tt.Fields.List))}\n\t\t\t\t\t\t\tfor _, field := range tt.Fields.List {\n\t\t\t\t\t\t\t\tnewField := struct {\n\t\t\t\t\t\t\t\t\tName string\n\t\t\t\t\t\t\t\t\tType string\n\t\t\t\t\t\t\t\t\tTags []string\n\t\t\t\t\t\t\t\t}{}\n\t\t\t\t\t\t\t\tif len(field.Names) < 1 {\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tnewField.Name = field.Names[0].Name\n\t\t\t\t\t\t\t\tswitch e := field.Type.(type) {\n\t\t\t\t\t\t\t\tcase *ast.Ident:\n\t\t\t\t\t\t\t\t\tnewField.Type = e.Name\n\t\t\t\t\t\t\t\tcase *ast.ArrayType:\n\t\t\t\t\t\t\t\t\tif e2, ok := e.Elt.(*ast.Ident); ok {\n\t\t\t\t\t\t\t\t\t\tnewField.Type = \"[]\" + e2.Name\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tcase *ast.StarExpr:\n\t\t\t\t\t\t\t\t\tif e2, ok := e.X.(*ast.Ident); ok {\n\t\t\t\t\t\t\t\t\t\tnewField.Type = \"*\" + e2.Name\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif nil != field.Tag {\n\t\t\t\t\t\t\t\t\tnewField.Tags = strings.Split(reflect.StructTag(strings.Trim(field.Tag.Value, \"`\")).Get(tag), \",\")\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tnewStruct.Field = append(newStruct.Field, newField)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tresult = append(result, newStruct)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn result, nil\n}\n<commit_msg>Support for \"recursive\" types<commit_after>package goparse\n\nimport (\n\t\"go\/ast\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"reflect\"\n\t\"strings\"\n)\n\n\/\/ StructDesc contains description of parsed struct\ntype StructDesc struct {\n\tName string\n\tField []struct {\n\t\tName string\n\t\tType string\n\t\tTags []string\n\t}\n}\n\nfunc getTypeName(t ast.Expr) string {\n\tswitch e := t.(type) {\n\tcase *ast.Ident:\n\t\treturn e.Name\n\tcase *ast.ArrayType:\n\t\treturn \"[]\" + getTypeName(e.Elt)\n\tcase *ast.StarExpr:\n\t\treturn \"*\" + getTypeName(e.X)\n\t}\n\treturn \"unknown\"\n}\n\n\/\/ GetFileStructs returns structs descriptions from parsed go file\nfunc GetFileStructs(filename string, prefix string, tag string) ([]StructDesc, error) {\n\tresult := make([]StructDesc, 0, 5)\n\n\tfset := token.NewFileSet()\n\n\tf, err := parser.ParseFile(fset, filename, nil, 0)\n\tif nil != err {\n\t\treturn result, err\n\t}\n\n\tfor i := range f.Decls {\n\t\tif g, ok := f.Decls[i].(*ast.GenDecl); ok {\n\t\t\tfor _, s := range g.Specs {\n\t\t\t\tif ts, ok := s.(*ast.TypeSpec); ok {\n\t\t\t\t\tif \"\" == prefix || strings.HasPrefix(ts.Name.String(), prefix) {\n\t\t\t\t\t\tif tt, ok := ts.Type.(*ast.StructType); ok {\n\t\t\t\t\t\t\tnewStruct := StructDesc{Name: ts.Name.String(), Field: make([]struct {\n\t\t\t\t\t\t\t\tName string\n\t\t\t\t\t\t\t\tType string\n\t\t\t\t\t\t\t\tTags []string\n\t\t\t\t\t\t\t}, 0, len(tt.Fields.List))}\n\t\t\t\t\t\t\tfor _, field := range tt.Fields.List {\n\t\t\t\t\t\t\t\tnewField := struct {\n\t\t\t\t\t\t\t\t\tName string\n\t\t\t\t\t\t\t\t\tType string\n\t\t\t\t\t\t\t\t\tTags []string\n\t\t\t\t\t\t\t\t}{}\n\t\t\t\t\t\t\t\tif len(field.Names) < 1 {\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tnewField.Name = field.Names[0].Name\n\t\t\t\t\t\t\t\tnewField.Type = getTypeName(field.Type)\n\t\t\t\t\t\t\t\tif nil != field.Tag {\n\t\t\t\t\t\t\t\t\tnewField.Tags = strings.Split(reflect.StructTag(strings.Trim(field.Tag.Value, \"`\")).Get(tag), \",\")\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tnewStruct.Field = append(newStruct.Field, newField)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tresult = append(result, newStruct)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn result, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package gopcap\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"time\"\n)\n\n\/\/ checkMagicNum checks the first four bytes of a pcap file, searching for the magic number\n\/\/ and checking the byte order. Returns three values: whether the file is a pcap file, whether\n\/\/ the byte order needs flipping, and any error that was encountered. If error is returned,\n\/\/ the other values are invalid.\nfunc checkMagicNum(src io.Reader) (bool, bool, error) {\n\t\/\/ These magic numbers form the header of a pcap file.\n\tmagic := []byte{0xa1, 0xb2, 0xc3, 0xd4}\n\tmagic_reverse := []byte{0xd4, 0xc3, 0xb2, 0xa1}\n\n\tbuffer := make([]byte, 4)\n\tread_count, err := src.Read(buffer)\n\n\tif err != nil {\n\t\treturn false, false, err\n\t}\n\tif read_count != 4 {\n\t\treturn false, false, InsufficientLength\n\t}\n\n\tif bytes.Compare(buffer, magic) == 0 {\n\t\treturn true, false, nil\n\t} else if bytes.Compare(buffer, magic_reverse) == 0 {\n\t\treturn true, true, nil\n\t}\n\n\treturn false, false, NotAPcapFile\n}\n\n\/\/ parsePacket parses a full packet out of the pcap file. It returns an error if any problems were\n\/\/ encountered.\nfunc parsePacket(pkt *Packet, src io.Reader, flipped bool) error {\n\terr := populatePacketHeader(pkt, src, flipped)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdata := make([]byte, pkt.IncludedLen)\n\treadlen, err := src.Read(data)\n\tpkt.Data = data\n\n\tif uint32(readlen) != pkt.IncludedLen {\n\t\terr = UnexpectedEOF\n\t}\n\n\treturn err\n}\n\n\/\/ populateFileHeader reads the next 20 bytes out of the .pcap file and uses it to populate the\n\/\/ PcapFile structure.\nfunc populateFileHeader(file *PcapFile, src io.Reader, flipped bool) error {\n\tbuffer := make([]byte, 20)\n\tread_count, err := src.Read(buffer)\n\n\tif err != nil {\n\t\treturn err\n\t} else if read_count != 20 {\n\t\treturn InsufficientLength\n\t}\n\n\t\/\/ First two bytes are the major version number.\n\tfile.MajorVersion = getUint16(buffer[0:2], flipped)\n\n\t\/\/ Next two are the minor version number.\n\tfile.MinorVersion = getUint16(buffer[2:4], flipped)\n\n\t\/\/ GMT to local correction, in seconds east of UTC.\n\tfile.TZCorrection = getInt32(buffer[4:8], flipped)\n\n\t\/\/ Next is the number of significant figures in the timestamps. Almost always zero.\n\tfile.SigFigs = getUint32(buffer[8:12], flipped)\n\n\t\/\/ Now the maximum length of the captured packet data.\n\tfile.MaxLen = getUint32(buffer[12:16], flipped)\n\n\t\/\/ And the link type.\n\tfile.LinkType = Link(getUint32(buffer[16:20], flipped))\n\n\treturn nil\n}\n\n\/\/ populatePacketHeader reads the next 16 bytes out of the file and builds it into a\n\/\/ packet header.\nfunc populatePacketHeader(packet *Packet, src io.Reader, flipped bool) error {\n\tbuffer := make([]byte, 16)\n\tread_count, err := src.Read(buffer)\n\n\tif read_count != 16 {\n\t\treturn InsufficientLength\n\t}\n\n\t\/\/ First is a pair of fields that build up the timestamp.\n\tts_seconds := getUint32(buffer[0:4], flipped)\n\tts_millis := getUint32(buffer[4:8], flipped)\n\tpacket.Timestamp = (time.Duration(ts_seconds) * time.Second) + (time.Duration(ts_millis) * time.Millisecond)\n\n\t\/\/ Next is the length of the data segment.\n\tpacket.IncludedLen = getUint32(buffer[8:12], flipped)\n\n\t\/\/ Then the original length of the packet.\n\tpacket.ActualLen = getUint32(buffer[12:16], flipped)\n\n\treturn err\n}\n<commit_msg>Handle EOF better.<commit_after>package gopcap\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"time\"\n)\n\n\/\/ checkMagicNum checks the first four bytes of a pcap file, searching for the magic number\n\/\/ and checking the byte order. Returns three values: whether the file is a pcap file, whether\n\/\/ the byte order needs flipping, and any error that was encountered. If error is returned,\n\/\/ the other values are invalid.\nfunc checkMagicNum(src io.Reader) (bool, bool, error) {\n\t\/\/ These magic numbers form the header of a pcap file.\n\tmagic := []byte{0xa1, 0xb2, 0xc3, 0xd4}\n\tmagic_reverse := []byte{0xd4, 0xc3, 0xb2, 0xa1}\n\n\tbuffer := make([]byte, 4)\n\tread_count, err := src.Read(buffer)\n\n\tif read_count != 4 {\n\t\treturn false, false, InsufficientLength\n\t}\n\tif (err != nil) && (err != io.EOF) {\n\t\treturn false, false, err\n\t}\n\n\tif bytes.Compare(buffer, magic) == 0 {\n\t\treturn true, false, nil\n\t} else if bytes.Compare(buffer, magic_reverse) == 0 {\n\t\treturn true, true, nil\n\t}\n\n\treturn false, false, NotAPcapFile\n}\n\n\/\/ parsePacket parses a full packet out of the pcap file. It returns an error if any problems were\n\/\/ encountered.\nfunc parsePacket(pkt *Packet, src io.Reader, flipped bool) error {\n\terr := populatePacketHeader(pkt, src, flipped)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdata := make([]byte, pkt.IncludedLen)\n\treadlen, err := src.Read(data)\n\tpkt.Data = data\n\n\tif uint32(readlen) != pkt.IncludedLen {\n\t\terr = UnexpectedEOF\n\t}\n\n\treturn err\n}\n\n\/\/ populateFileHeader reads the next 20 bytes out of the .pcap file and uses it to populate the\n\/\/ PcapFile structure.\nfunc populateFileHeader(file *PcapFile, src io.Reader, flipped bool) error {\n\tbuffer := make([]byte, 20)\n\tread_count, err := src.Read(buffer)\n\n\tif err != nil {\n\t\treturn err\n\t} else if read_count != 20 {\n\t\treturn InsufficientLength\n\t}\n\n\t\/\/ First two bytes are the major version number.\n\tfile.MajorVersion = getUint16(buffer[0:2], flipped)\n\n\t\/\/ Next two are the minor version number.\n\tfile.MinorVersion = getUint16(buffer[2:4], flipped)\n\n\t\/\/ GMT to local correction, in seconds east of UTC.\n\tfile.TZCorrection = getInt32(buffer[4:8], flipped)\n\n\t\/\/ Next is the number of significant figures in the timestamps. Almost always zero.\n\tfile.SigFigs = getUint32(buffer[8:12], flipped)\n\n\t\/\/ Now the maximum length of the captured packet data.\n\tfile.MaxLen = getUint32(buffer[12:16], flipped)\n\n\t\/\/ And the link type.\n\tfile.LinkType = Link(getUint32(buffer[16:20], flipped))\n\n\treturn nil\n}\n\n\/\/ populatePacketHeader reads the next 16 bytes out of the file and builds it into a\n\/\/ packet header.\nfunc populatePacketHeader(packet *Packet, src io.Reader, flipped bool) error {\n\tbuffer := make([]byte, 16)\n\tread_count, err := src.Read(buffer)\n\n\tif read_count != 16 {\n\t\treturn InsufficientLength\n\t}\n\n\t\/\/ First is a pair of fields that build up the timestamp.\n\tts_seconds := getUint32(buffer[0:4], flipped)\n\tts_millis := getUint32(buffer[4:8], flipped)\n\tpacket.Timestamp = (time.Duration(ts_seconds) * time.Second) + (time.Duration(ts_millis) * time.Millisecond)\n\n\t\/\/ Next is the length of the data segment.\n\tpacket.IncludedLen = getUint32(buffer[8:12], flipped)\n\n\t\/\/ Then the original length of the packet.\n\tpacket.ActualLen = getUint32(buffer[12:16], flipped)\n\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"time\"\n\t\/\/\t\"github.com\/davecgh\/go-spew\/spew\"\n)\n\n\/\/RawImage contains data we got from API that needs to be modified before further usage\ntype RawImage struct {\n\tImgid int `json:\"id_number\"`\n\tURL string `json:\"image\"`\n\tScore int `json:\"score\"`\n\tOriginalFormat string `json:\"original_format\"`\n\tFaves int `json:\"faves\"`\n}\n\n\/\/Image contains data needed to filter fetch and save image\ntype Image struct {\n\tImgid int\n\tURL string\n\tFilename string\n\tScore int\n\tFaves int\n}\n\n\/\/Search returns to us array of searched images...\ntype Search struct {\n\tImages []RawImage `json:\"search\"`\n}\n\nfunc (imgdata *Image) isDeepEqual(b Image) bool {\n\tif b.Imgid == imgdata.Imgid &&\n\t\tb.URL == imgdata.URL &&\n\t\tb.Filename == imgdata.Filename &&\n\t\tb.Score == imgdata.Score &&\n\t\tb.Faves == imgdata.Faves {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ImageCh is a channel of image data. You can put images into channel by parsing\n\/\/Derpibooru API by id(s) or by tags and you can download images that are already\n\/\/in channel\ntype ImageCh chan Image\n\n\/\/Push gets unmarchalled JSON info, massages it and plugs it into channel so it\n\/\/would be processed in other places\nfunc trim(dat RawImage) Image {\n\n\ttfn := strconv.Itoa(dat.Imgid) + \".\" + dat.OriginalFormat\n\treturn Image{\n\t\tImgid: dat.Imgid,\n\t\tFilename: tfn,\n\t\tURL: prefix + \"\/\" + path.Dir(dat.URL) + \"\/\" + tfn,\n\t\tScore: dat.Score,\n\t\tFaves: dat.Faves,\n\t}\n}\n\n\/\/ParseImg gets image IDs, fetches information about those images from Derpibooru and pushes them into the channel.\nfunc (imgchan ImageCh) ParseImg(ids []int, key string) {\n\n\tfor _, imgid := range ids {\n\t\tsource := prefix + \"\/\/derpibooru.org\/images\/\" + strconv.Itoa(imgid) + \".json\"\n\t\tif key != \"\" {\n\t\t\tsource = source + \"?key=\" + key\n\t\t}\n\n\t\tlInfo(\"Getting image info at:\", source)\n\n\t\tbody, err := getRemoteJSON(source)\n\t\tif err != nil {\n\t\t\tlErr(err)\n\t\t\tcontinue\n\t\t}\n\t\tvar dat RawImage\n\t\tif err := json.Unmarshal(body, &dat); \/\/transforming json into native map\n\n\t\terr != nil {\n\t\t\tlErr(err)\n\t\t\tcontinue\n\t\t}\n\n\t\timgchan <- trim(dat)\n\t}\n\n\tclose(imgchan) \/\/closing channel, we are done here\n\n\treturn\n}\n\n\/\/DlImg reads image data from channel and downloads specified images to disc\nfunc (imgchan ImageCh) downloadImages(opts *Config) {\n\n\tlInfo(\"Worker started; reading channel\") \/\/nice notification that we are not forgotten\n\tvar n, size int\n\tfor imgdata := range imgchan {\n\n\t\tif imgdata.Filename == \"\" {\n\t\t\tlErr(\"Empty filename. Oops?\") \/\/something somewhere had gone wrong, not a cause to die, going to the next image\n\t\t\tcontinue\n\t\t}\n\n\t\tlInfo(\"Saving as\", imgdata.Filename)\n\n\t\tsize += imgdata.saveImage(opts)\n\t\tn++\n\t}\n\tlInfof(\"Downloaded %d images, for a total of %s\", n, fmtbytes(float64(size)))\n}\n\nfunc (imgdata Image) saveImage(opts *Config) (size int) { \/\/ To not hold all the files open when there is no need. All pointers to files are in the scope of this function.\n\n\toutput, err := os.Create(opts.ImageDir + string(os.PathSeparator) + imgdata.Filename) \/\/And now, THE FILE!\n\tif err != nil {\n\t\tlErr(\"Error when creating file for image: \", strconv.Itoa(imgdata.Imgid))\n\t\tlErr(err) \/\/Either we got no permisson or no space, end of line\n\t\treturn\n\t}\n\tdefer func() {\n\t\terr = output.Close() \/\/Not forgetting to deal with it after completing download\n\t\tif err != nil {\n\t\t\tlFatal(\"Could not close downloaded file\")\n\t\t}\n\t}()\n\n\tstart := time.Now() \/\/timing download time. We can't begin it sooner, not sure if we can begin it later\n\n\tbody, header, err := getImage(imgdata.URL)\n\n\tif err != nil {\n\t\tlErr(\"Error when getting image: \", strconv.Itoa(imgdata.Imgid))\n\t\tlErr(err)\n\t\treturn\n\t}\n\n\tsize, err = output.Write(body) \/\/\n\tif err != nil {\n\t\tlErr(\"Unable to write image on disk, id: \", strconv.Itoa(imgdata.Imgid))\n\t\tlErr(err)\n\t\treturn\n\t}\n\ttimed := time.Since(start).Seconds()\n\n\tlInfof(\"Downloaded %d bytes in %.2fs, speed %s\/s\\n\", size, timed, fmtbytes(float64(size)\/timed))\n\n\tsizestring, prs := header[\"Content-Length\"]\n\tif !prs {\n\t\tlErr(\"Filesize not provided\")\n\t\treturn\n\t}\n\n\texpsize, err := strconv.Atoi(sizestring[0])\n\tif err != nil {\n\t\tlErr(\"Unable to get expected filesize\")\n\t\treturn\n\t}\n\tif expsize != size {\n\t\tlErr(\"Unable to download full image\")\n\t\treturn 0\n\t}\n\treturn\n}\n\n\/\/ParseTag gets image tags, fetches information about all images it could from Derpibooru and pushes them into the channel.\nfunc (imgchan ImageCh) ParseTag(opts *TagOpts, key string) {\n\n\t\/\/Unlike main, I don't see how I could separate bits out to decrease complexity\n\tsource := prefix + \"\/\/derpibooru.org\/search.json?q=\" + opts.Tag \/\/yay hardwiring url strings!\n\n\tif key != \"\" {\n\t\tsource = source + \"&key=\" + key\n\t}\n\n\tlInfo(\"Searching as\", source)\n\n\tfor i := opts.StartPage; opts.StopPage == 0 || i <= opts.StopPage; i++ {\n\t\tlInfo(\"Searching page\", i)\n\n\t\tbody, err := getRemoteJSON(source + \"&page=\" + strconv.Itoa(i))\n\t\tif err != nil {\n\t\t\tlErr(\"Error while json from page \", i)\n\t\t\tlErr(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar dats Search \/\/Because we got array incoming instead of single object, we using a slive of maps!\n\t\terr = json.Unmarshal(body, &dats) \/\/transforming json into native view\n\n\t\tif err != nil {\n\t\t\tlErr(\"Error while parsing search page\", i)\n\t\t\tlErr(err)\n\t\t\tif serr, ok := err.(*json.SyntaxError); ok { \/\/In case crap was still given, we are looking at it.\n\t\t\t\tlErr(\"Occurred at offset: \", serr.Offset)\n\t\t\t}\n\t\t\tcontinue\n\n\t\t}\n\n\t\tif len(dats.Images) == 0 {\n\t\t\tlInfo(\"Pages are all over\") \/\/Does not mean that process is over.\n\t\t\tbreak\n\t\t} \/\/exit due to finishing all pages\n\n\t\tfor _, dat := range dats.Images {\n\t\t\timgchan <- trim(dat)\n\t\t}\n\n\t}\n\n\tclose(imgchan)\n}\n<commit_msg>Enabled no-clobber by default Closes #17 Also removed some unneeded returns after non-fatal errors<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"time\"\n\t\/\/\t\"github.com\/davecgh\/go-spew\/spew\"\n)\n\n\/\/RawImage contains data we got from API that needs to be modified before further usage\ntype RawImage struct {\n\tImgid int `json:\"id_number\"`\n\tURL string `json:\"image\"`\n\tScore int `json:\"score\"`\n\tOriginalFormat string `json:\"original_format\"`\n\tFaves int `json:\"faves\"`\n}\n\n\/\/Image contains data needed to filter fetch and save image\ntype Image struct {\n\tImgid int\n\tURL string\n\tFilename string\n\tScore int\n\tFaves int\n}\n\n\/\/Search returns to us array of searched images...\ntype Search struct {\n\tImages []RawImage `json:\"search\"`\n}\n\nfunc (imgdata *Image) isDeepEqual(b Image) bool {\n\tif b.Imgid == imgdata.Imgid &&\n\t\tb.URL == imgdata.URL &&\n\t\tb.Filename == imgdata.Filename &&\n\t\tb.Score == imgdata.Score &&\n\t\tb.Faves == imgdata.Faves {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ImageCh is a channel of image data. You can put images into channel by parsing\n\/\/Derpibooru API by id(s) or by tags and you can download images that are already\n\/\/in channel\ntype ImageCh chan Image\n\n\/\/Push gets unmarchalled JSON info, massages it and plugs it into channel so it\n\/\/would be processed in other places\nfunc trim(dat RawImage) Image {\n\n\ttfn := strconv.Itoa(dat.Imgid) + \".\" + dat.OriginalFormat\n\treturn Image{\n\t\tImgid: dat.Imgid,\n\t\tFilename: tfn,\n\t\tURL: prefix + \"\/\" + path.Dir(dat.URL) + \"\/\" + tfn,\n\t\tScore: dat.Score,\n\t\tFaves: dat.Faves,\n\t}\n}\n\n\/\/ParseImg gets image IDs, fetches information about those images from Derpibooru and pushes them into the channel.\nfunc (imgchan ImageCh) ParseImg(ids []int, key string) {\n\n\tfor _, imgid := range ids {\n\t\tsource := prefix + \"\/\/derpibooru.org\/images\/\" + strconv.Itoa(imgid) + \".json\"\n\t\tif key != \"\" {\n\t\t\tsource = source + \"?key=\" + key\n\t\t}\n\n\t\tlInfo(\"Getting image info at:\", source)\n\n\t\tbody, err := getRemoteJSON(source)\n\t\tif err != nil {\n\t\t\tlErr(err)\n\t\t\tcontinue\n\t\t}\n\t\tvar dat RawImage\n\t\tif err := json.Unmarshal(body, &dat); \/\/transforming json into native map\n\n\t\terr != nil {\n\t\t\tlErr(err)\n\t\t\tcontinue\n\t\t}\n\n\t\timgchan <- trim(dat)\n\t}\n\n\tclose(imgchan) \/\/closing channel, we are done here\n\n\treturn\n}\n\n\/\/DlImg reads image data from channel and downloads specified images to disc\nfunc (imgchan ImageCh) downloadImages(opts *Config) {\n\n\tlInfo(\"Worker started; reading channel\") \/\/nice notification that we are not forgotten\n\tvar n, size int\n\tfor imgdata := range imgchan {\n\n\t\tif imgdata.Filename == \"\" {\n\t\t\tlErr(\"Empty filename. Oops?\") \/\/something somewhere had gone wrong, not a cause to die, going to the next image\n\t\t\tcontinue\n\t\t}\n\n\t\tlInfo(\"Saving as\", imgdata.Filename)\n\n\t\tsize += imgdata.saveImage(opts)\n\t\tn++\n\t}\n\tlInfof(\"Downloaded %d images, for a total of %s\", n, fmtbytes(float64(size)))\n}\n\nfunc (imgdata Image) saveImage(opts *Config) (size int) { \/\/ To not hold all the files open when there is no need. All pointers to files are in the scope of this function.\n\n\tfilepath := opts.ImageDir + string(os.PathSeparator) + imgdata.Filename\n\n\toutput, _ := os.Open(filepath) \/\/And now, THE FILE! New, truncated, ready to write\n\n\tfstat, err := output.Stat()\n\tif err != nil {\n\t\tlErr(\"Can't get file stats\")\n\t}\n\toutput.Close()\n\n\tstart := time.Now() \/\/timing download time. We can't begin it sooner, not sure if we can begin it later\n\n\tbody, header, err := getImage(imgdata.URL)\n\n\tsizestring, prs := header[\"Content-Length\"]\n\tif !prs {\n\t\tlErr(\"Filesize not provided\")\n\t}\n\texpsize, err := strconv.Atoi(sizestring[0])\n\tif err != nil {\n\t\tlErr(\"Unable to get expected filesize\")\n\t}\n\tlInfo(expsize, fstat.Size())\n\tif int64(expsize) == fstat.Size() {\n\t\tlInfo(\"Skipping: no-clobber\")\n\t\treturn 0\n\t}\n\n\tif err != nil {\n\t\tlErr(\"Error when getting image: \", strconv.Itoa(imgdata.Imgid))\n\t\tlErr(err)\n\t\treturn\n\t}\n\n\toutput, err = os.Create(filepath) \/\/And now, THE FILE! New, truncated, ready to write\n\tif err != nil {\n\t\tlErr(\"Error when creating file for image: \", strconv.Itoa(imgdata.Imgid))\n\t\tlErr(err) \/\/Either we got no permisson or no space, end of line\n\t\treturn\n\t}\n\tdefer func() {\n\t\terr = output.Close() \/\/Not forgetting to deal with it after completing download\n\t\tif err != nil {\n\t\t\tlFatal(\"Could not close downloaded file\")\n\t\t}\n\t}()\n\n\tsize, err = output.Write(body) \/\/\n\tif err != nil {\n\t\tlErr(\"Unable to write image on disk, id: \", strconv.Itoa(imgdata.Imgid))\n\t\tlErr(err)\n\t\treturn\n\t}\n\ttimed := time.Since(start).Seconds()\n\n\tlInfof(\"Downloaded %d bytes in %.2fs, speed %s\/s\\n\", size, timed, fmtbytes(float64(size)\/timed))\n\n\tif expsize != size {\n\t\tlErr(\"Unable to download full image\")\n\t\treturn 0\n\t}\n\treturn\n}\n\n\/\/ParseTag gets image tags, fetches information about all images it could from Derpibooru and pushes them into the channel.\nfunc (imgchan ImageCh) ParseTag(opts *TagOpts, key string) {\n\n\t\/\/Unlike main, I don't see how I could separate bits out to decrease complexity\n\tsource := prefix + \"\/\/derpibooru.org\/search.json?q=\" + opts.Tag \/\/yay hardwiring url strings!\n\n\tif key != \"\" {\n\t\tsource = source + \"&key=\" + key\n\t}\n\n\tlInfo(\"Searching as\", source)\n\n\tfor i := opts.StartPage; opts.StopPage == 0 || i <= opts.StopPage; i++ {\n\t\tlInfo(\"Searching page\", i)\n\n\t\tbody, err := getRemoteJSON(source + \"&page=\" + strconv.Itoa(i))\n\t\tif err != nil {\n\t\t\tlErr(\"Error while json from page \", i)\n\t\t\tlErr(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar dats Search \/\/Because we got array incoming instead of single object, we using a slive of maps!\n\t\terr = json.Unmarshal(body, &dats) \/\/transforming json into native view\n\n\t\tif err != nil {\n\t\t\tlErr(\"Error while parsing search page\", i)\n\t\t\tlErr(err)\n\t\t\tif serr, ok := err.(*json.SyntaxError); ok { \/\/In case crap was still given, we are looking at it.\n\t\t\t\tlErr(\"Occurred at offset: \", serr.Offset)\n\t\t\t}\n\t\t\tcontinue\n\n\t\t}\n\n\t\tif len(dats.Images) == 0 {\n\t\t\tlInfo(\"Pages are all over\") \/\/Does not mean that process is over.\n\t\t\tbreak\n\t\t} \/\/exit due to finishing all pages\n\n\t\tfor _, dat := range dats.Images {\n\t\t\timgchan <- trim(dat)\n\t\t}\n\n\t}\n\n\tclose(imgchan)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage convert\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/appc\/spec\/schema\"\n\t\"github.com\/appc\/spec\/schema\/types\"\n\t\"github.com\/opencontainers\/specs\"\n)\n\ntype IsolatorCapSet struct {\n\tSets []string `json:\"set\"`\n}\n\ntype ResourceMem struct {\n\tLimit string `json:\"limit\"`\n}\n\ntype ResourceCPU struct {\n\tLimit string `json:\"limit\"`\n}\n\nvar manifestName string\n\nfunc Oci2aciManifest(ociPath string) (string, error) {\n\tif bValidate := validateOCIProc(ociPath); bValidate != true {\n\t\terr := errors.New(\"Invalid oci bundle.\")\n\t\treturn \"\", err\n\t}\n\n\tdirWork := createWorkDir()\n\t\/\/ convert layout\n\taciManifestPath, err := convertLayout(ociPath, dirWork)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn aciManifestPath, err\n\n}\n\nfunc Oci2aciImage(ociPath string) (string, error) {\n\tif bValidate := validateOCIProc(ociPath); bValidate != true {\n\t\terr := errors.New(\"Invalid oci bundle.\")\n\t\treturn \"\", err\n\t}\n\n\tdirWork := createWorkDir()\n\t\/\/ First, convert layout\n\t_, err := convertLayout(ociPath, dirWork)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Second, build image\n\taciImgPath, err := buildACI(dirWork)\n\n\treturn aciImgPath, err\n\n}\n\n\/\/ Entry point of oci2aci,\n\/\/ First convert oci layout to aci layout, then build aci layout to image.\nfunc RunOCI2ACI(args []string, flagDebug bool, flagName string) error {\n\tvar srcPath, dstPath string\n\n\tsrcPath = args[0]\n\tif len(args) == 1 {\n\t\tdstPath = \"\"\n\t} else {\n\t\tdstPath = args[1]\n\t\text := filepath.Ext(dstPath)\n\t\tif ext != schema.ACIExtension {\n\t\t\terrStr := fmt.Sprintf(\"Extension must be %s (given %s)\", schema.ACIExtension, ext)\n\t\t\terr := errors.New(errStr)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif flagDebug {\n\t\tInitDebug()\n\t}\n\n\tmanifestName = flagName\n\t_, err := types.NewACName(manifestName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif bValidate := validateOCIProc(srcPath); bValidate != true {\n\t\tlog.Printf(\"Conversion stop.\")\n\t\treturn nil\n\t}\n\n\tdirWork := createWorkDir()\n\t\/\/ First, convert layout\n\tmanifestPath, err := convertLayout(srcPath, dirWork)\n\tif err != nil {\n\t\tif debugEnabled {\n\t\t\tlog.Printf(\"Conversion from oci to aci layout failed: %v\", err)\n\t\t}\n\n\t} else {\n\t\tif debugEnabled {\n\t\t\tlog.Printf(\"Manifest:%v generated successfully.\", manifestPath)\n\t\t}\n\t}\n\t\/\/ Second, build image\n\timgPath, err := buildACI(dirWork)\n\tif err != nil {\n\t\tif debugEnabled {\n\t\t\tlog.Printf(\"Generate aci image failed:%v\", err)\n\t\t}\n\t} else {\n\t\tif debugEnabled {\n\t\t\tlog.Printf(\"Image:%v generated successfully.\", imgPath)\n\t\t}\n\t}\n\t\/\/ Save aci image to the path user specified\n\tif dstPath != \"\" {\n\t\tif err = run(exec.Command(\"cp\", imgPath, dstPath)); err != nil {\n\t\t\tif debugEnabled {\n\t\t\t\tlog.Printf(\"Store aci image failed:%v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tif debugEnabled {\n\t\t\t\tlog.Printf(\"Image:%v generated successfully\", dstPath)\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\treturn nil\n}\n\n\/\/ Create work directory for the conversion output\nfunc createWorkDir() string {\n\tidir, err := ioutil.TempDir(\"\", \"oci2aci\")\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\trootfs := filepath.Join(idir, \"rootfs\")\n\tos.MkdirAll(rootfs, 0755)\n\n\tdata := []byte{}\n\tif err := ioutil.WriteFile(filepath.Join(idir, \"manifest\"), data, 0644); err != nil {\n\t\treturn \"\"\n\t}\n\treturn idir\n}\n\n\/\/ The structure of appc manifest:\n\/\/ 1.acKind\n\/\/ 2. acVersion\n\/\/ 3. name\n\/\/ 4. labels\n\/\/\t4.1 version\n\/\/\t4.2 os\n\/\/\t4.3 arch\n\/\/ 5. app\n\/\/\t5.1 exec\n\/\/\t5.2 user\n\/\/\t5.3 group\n\/\/\t5.4 eventHandlers\n\/\/\t5.5 workingDirectory\n\/\/\t5.6 environment\n\/\/\t5.7 mountPoints\n\/\/\t5.8 ports\n\/\/ 5.9 isolators\n\/\/ 6. annotations\n\/\/\t6.1 created\n\/\/\t6.2 authors\n\/\/\t6.3 homepage\n\/\/\t6.4 documentation\n\/\/ 7. dependencies\n\/\/\t7.1 imageName\n\/\/\t7.2 imageID\n\/\/\t7.3 labels\n\/\/\t7.4 size\n\/\/ 8. pathWhitelist\n\nfunc genManifest(path string) *schema.ImageManifest {\n\t\/\/ Get runtime.json and config.json\n\truntimePath := path + \"\/runtime.json\"\n\tconfigPath := path + \"\/config.json\"\n\n\truntime, err := ioutil.ReadFile(runtimePath)\n\tif err != nil {\n\t\tif debugEnabled {\n\t\t\tlog.Printf(\"Open file runtime.json failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\n\tconfig, err := ioutil.ReadFile(configPath)\n\tif err != nil {\n\t\tif debugEnabled {\n\t\t\tlog.Printf(\"Open file config.json failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\n\tvar spec specs.LinuxSpec\n\terr = json.Unmarshal(config, &spec)\n\tif err != nil {\n\t\tif debugEnabled {\n\t\t\tlog.Printf(\"Unmarshal config.json failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\n\tvar runSpec specs.LinuxRuntimeSpec\n\terr = json.Unmarshal(runtime, &runSpec)\n\tif err != nil {\n\t\tif debugEnabled {\n\t\t\tlog.Printf(\"Unmarshal runtime.json failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\t\/\/ Begin to convert runtime.json\/config.json to manifest\n\tm := new(schema.ImageManifest)\n\n\t\/\/ 1. Assemble \"acKind\" field\n\tm.ACKind = schema.ImageManifestKind\n\n\t\/\/ 2. Assemble \"acVersion\" field\n\tm.ACVersion = schema.AppContainerVersion\n\n\t\/\/ 3. Assemble \"name\" field\n\tm.Name = types.ACIdentifier(manifestName)\n\n\t\/\/ 4. Assemble \"labels\" field\n\t\/\/ 4.1 \"version\"\n\tlabel := new(types.Label)\n\tlabel.Name = types.ACIdentifier(\"version\")\n\tlabel.Value = spec.Version\n\tm.Labels = append(m.Labels, *label)\n\t\/\/ 4.2 \"os\"\n\tlabel = new(types.Label)\n\tlabel.Name = types.ACIdentifier(\"os\")\n\tlabel.Value = spec.Platform.OS\n\tm.Labels = append(m.Labels, *label)\n\t\/\/ 4.3 \"arch\"\n\tlabel = new(types.Label)\n\tlabel.Name = types.ACIdentifier(\"arch\")\n\tlabel.Value = spec.Platform.Arch\n\tm.Labels = append(m.Labels, *label)\n\n\t\/\/ 5. Assemble \"app\" field\n\tapp := new(types.App)\n\t\/\/ 5.1 \"exec\"\n\tapp.Exec = spec.Process.Args\n\tif len(app.Exec) == 0 {\n\t\tapp.Exec = append(app.Exec, \"\/bin\/sh\")\n\t}\n\n\tif len(app.Exec) > 0 && !filepath.IsAbs(app.Exec[0]) {\n\t\tapp.Exec[0] = \"\/bin\/\" + app.Exec[0]\n\t}\n\n\t\/\/ 5.2 \"user\"\n\tapp.User = fmt.Sprintf(\"%d\", spec.Process.User.UID)\n\t\/\/ 5.3 \"group\"\n\tapp.Group = fmt.Sprintf(\"%d\", spec.Process.User.GID)\n\t\/\/ 5.4 \"eventHandlers\"\n\tevent := new(types.EventHandler)\n\tevent.Name = \"pre-start\"\n\tfor index := range runSpec.Hooks.Prestart {\n\t\tevent.Exec = append(event.Exec, runSpec.Hooks.Prestart[index].Path)\n\t\tevent.Exec = append(event.Exec, runSpec.Hooks.Prestart[index].Args...)\n\t\tevent.Exec = append(event.Exec, runSpec.Hooks.Prestart[index].Env...)\n\t}\n\tif len(event.Exec) == 0 {\n\t\tevent.Exec = append(event.Exec, \"\/bin\/ls\")\n\t}\n\tapp.EventHandlers = append(app.EventHandlers, *event)\n\tevent = new(types.EventHandler)\n\tevent.Name = \"post-stop\"\n\tfor index := range runSpec.Hooks.Poststop {\n\t\tevent.Exec = append(event.Exec, runSpec.Hooks.Poststop[index].Path)\n\t\tevent.Exec = append(event.Exec, runSpec.Hooks.Poststop[index].Args...)\n\t\tevent.Exec = append(event.Exec, runSpec.Hooks.Poststop[index].Env...)\n\t}\n\tif len(event.Exec) == 0 {\n\t\tevent.Exec = append(event.Exec, \"\/bin\/ls\")\n\t}\n\tapp.EventHandlers = append(app.EventHandlers, *event)\n\t\/\/ 5.5 \"workingDirectory\"\n\tapp.WorkingDirectory = spec.Process.Cwd\n\t\/\/ 5.6 \"environment\"\n\tenv := new(types.EnvironmentVariable)\n\tfor index := range spec.Process.Env {\n\t\ts := strings.Split(spec.Process.Env[index], \"=\")\n\t\tenv.Name = s[0]\n\t\tenv.Value = s[1]\n\t\tapp.Environment = append(app.Environment, *env)\n\t}\n\n\t\/\/ 5.7 \"mountPoints\"\n\tfor index := range spec.Mounts {\n\t\tmount := new(types.MountPoint)\n\t\tmount.Name = types.ACName(spec.Mounts[index].Name)\n\t\tmount.Path = spec.Mounts[index].Path\n\t\tmount.ReadOnly = false\n\t\tapp.MountPoints = append(app.MountPoints, *mount)\n\t}\n\n\t\/\/ 5.8 \"ports\"\n\n\t\/\/ 5.9 \"isolators\"\n\tif runSpec.Linux.Resources != nil {\n\t\tif runSpec.Linux.Resources.CPU.Quota != 0 {\n\t\t\tcpuLimt := new(ResourceCPU)\n\t\t\tcpuLimt.Limit = fmt.Sprintf(\"%dm\", runSpec.Linux.Resources.CPU.Quota)\n\t\t\tisolator := new(types.Isolator)\n\t\t\tisolator.Name = types.ACIdentifier(\"resource\/cpu\")\n\t\t\tbytes, _ := json.Marshal(cpuLimt)\n\n\t\t\tvalueRaw := json.RawMessage(bytes)\n\t\t\tisolator.ValueRaw = &valueRaw\n\n\t\t\tapp.Isolators = append(app.Isolators, *isolator)\n\t\t}\n\t\tif runSpec.Linux.Resources.Memory.Limit != 0 {\n\t\t\tmemLimt := new(ResourceMem)\n\t\t\tmemLimt.Limit = fmt.Sprintf(\"%dG\", runSpec.Linux.Resources.Memory.Limit\/(1024*1024*1024))\n\t\t\tisolator := new(types.Isolator)\n\t\t\tisolator.Name = types.ACIdentifier(\"resource\/memory\")\n\t\t\tbytes, _ := json.Marshal(memLimt)\n\n\t\t\tvalueRaw := json.RawMessage(bytes)\n\t\t\tisolator.ValueRaw = &valueRaw\n\n\t\t\tapp.Isolators = append(app.Isolators, *isolator)\n\t\t}\n\t}\n\n\tif len(spec.Linux.Capabilities) != 0 {\n\t\tisolatorCapSet := new(IsolatorCapSet)\n\t\tisolatorCapSet.Sets = append(isolatorCapSet.Sets, spec.Linux.Capabilities...)\n\n\t\tisolator := new(types.Isolator)\n\t\tisolator.Name = types.ACIdentifier(types.LinuxCapabilitiesRetainSetName)\n\t\tbytes, _ := json.Marshal(isolatorCapSet)\n\n\t\tvalueRaw := json.RawMessage(bytes)\n\t\tisolator.ValueRaw = &valueRaw\n\n\t\tapp.Isolators = append(app.Isolators, *isolator)\n\t}\n\n\t\/\/ 6. \"annotations\"\n\n\t\/\/ 7. \"dependencies\"\n\n\t\/\/ 8. \"pathWhitelist\"\n\n\tm.App = app\n\n\treturn m\n}\n\n\/\/ Convert OCI layout to ACI layout\nfunc convertLayout(srcPath, dstPath string) (string, error) {\n\tsrc, _ := filepath.Abs(srcPath)\n\tsrc += \"\/rootfs\"\n\tif err := run(exec.Command(\"cp\", \"-rf\", src, dstPath)); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tm := genManifest(srcPath)\n\n\tbytes, err := json.MarshalIndent(m, \"\", \"\\t\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tmanifestPath := dstPath + \"\/manifest\"\n\n\tioutil.WriteFile(manifestPath, bytes, 0644)\n\treturn manifestPath, nil\n}\n<commit_msg>Update dealt with oci Process.Args, Process.Cwd, prestart and poststone<commit_after>\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage convert\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/appc\/spec\/schema\"\n\t\"github.com\/appc\/spec\/schema\/types\"\n\t\"github.com\/opencontainers\/specs\"\n)\n\ntype IsolatorCapSet struct {\n\tSets []string `json:\"set\"`\n}\n\ntype ResourceMem struct {\n\tLimit string `json:\"limit\"`\n}\n\ntype ResourceCPU struct {\n\tLimit string `json:\"limit\"`\n}\n\nvar manifestName string\n\nfunc Oci2aciManifest(ociPath string) (string, error) {\n\tif bValidate := validateOCIProc(ociPath); bValidate != true {\n\t\terr := errors.New(\"Invalid oci bundle.\")\n\t\treturn \"\", err\n\t}\n\n\tdirWork := createWorkDir()\n\t\/\/ convert layout\n\taciManifestPath, err := convertLayout(ociPath, dirWork)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn aciManifestPath, err\n\n}\n\nfunc Oci2aciImage(ociPath string) (string, error) {\n\tif bValidate := validateOCIProc(ociPath); bValidate != true {\n\t\terr := errors.New(\"Invalid oci bundle.\")\n\t\treturn \"\", err\n\t}\n\n\tdirWork := createWorkDir()\n\t\/\/ First, convert layout\n\t_, err := convertLayout(ociPath, dirWork)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Second, build image\n\taciImgPath, err := buildACI(dirWork)\n\n\treturn aciImgPath, err\n\n}\n\n\/\/ Entry point of oci2aci,\n\/\/ First convert oci layout to aci layout, then build aci layout to image.\nfunc RunOCI2ACI(args []string, flagDebug bool, flagName string) error {\n\tvar srcPath, dstPath string\n\n\tsrcPath = args[0]\n\tif len(args) == 1 {\n\t\tdstPath = \"\"\n\t} else {\n\t\tdstPath = args[1]\n\t\text := filepath.Ext(dstPath)\n\t\tif ext != schema.ACIExtension {\n\t\t\terrStr := fmt.Sprintf(\"Extension must be %s (given %s)\", schema.ACIExtension, ext)\n\t\t\terr := errors.New(errStr)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif flagDebug {\n\t\tInitDebug()\n\t}\n\n\tmanifestName = flagName\n\t_, err := types.NewACName(manifestName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif bValidate := validateOCIProc(srcPath); bValidate != true {\n\t\tlog.Printf(\"Conversion stop.\")\n\t\treturn nil\n\t}\n\n\tdirWork := createWorkDir()\n\t\/\/ First, convert layout\n\tmanifestPath, err := convertLayout(srcPath, dirWork)\n\tif err != nil {\n\t\tif debugEnabled {\n\t\t\tlog.Printf(\"Conversion from oci to aci layout failed: %v\", err)\n\t\t}\n\n\t} else {\n\t\tif debugEnabled {\n\t\t\tlog.Printf(\"Manifest:%v generated successfully.\", manifestPath)\n\t\t}\n\t}\n\t\/\/ Second, build image\n\timgPath, err := buildACI(dirWork)\n\tif err != nil {\n\t\tif debugEnabled {\n\t\t\tlog.Printf(\"Generate aci image failed:%v\", err)\n\t\t}\n\t} else {\n\t\tif debugEnabled {\n\t\t\tlog.Printf(\"Image:%v generated successfully.\", imgPath)\n\t\t}\n\t}\n\t\/\/ Save aci image to the path user specified\n\tif dstPath != \"\" {\n\t\tif err = run(exec.Command(\"cp\", imgPath, dstPath)); err != nil {\n\t\t\tif debugEnabled {\n\t\t\t\tlog.Printf(\"Store aci image failed:%v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tif debugEnabled {\n\t\t\t\tlog.Printf(\"Image:%v generated successfully\", dstPath)\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\treturn nil\n}\n\n\/\/ Create work directory for the conversion output\nfunc createWorkDir() string {\n\tidir, err := ioutil.TempDir(\"\", \"oci2aci\")\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\trootfs := filepath.Join(idir, \"rootfs\")\n\tos.MkdirAll(rootfs, 0755)\n\n\tdata := []byte{}\n\tif err := ioutil.WriteFile(filepath.Join(idir, \"manifest\"), data, 0644); err != nil {\n\t\treturn \"\"\n\t}\n\treturn idir\n}\n\n\/\/ The structure of appc manifest:\n\/\/ 1.acKind\n\/\/ 2. acVersion\n\/\/ 3. name\n\/\/ 4. labels\n\/\/\t4.1 version\n\/\/\t4.2 os\n\/\/\t4.3 arch\n\/\/ 5. app\n\/\/\t5.1 exec\n\/\/\t5.2 user\n\/\/\t5.3 group\n\/\/\t5.4 eventHandlers\n\/\/\t5.5 workingDirectory\n\/\/\t5.6 environment\n\/\/\t5.7 mountPoints\n\/\/\t5.8 ports\n\/\/ 5.9 isolators\n\/\/ 6. annotations\n\/\/\t6.1 created\n\/\/\t6.2 authors\n\/\/\t6.3 homepage\n\/\/\t6.4 documentation\n\/\/ 7. dependencies\n\/\/\t7.1 imageName\n\/\/\t7.2 imageID\n\/\/\t7.3 labels\n\/\/\t7.4 size\n\/\/ 8. pathWhitelist\n\nfunc genManifest(path string) *schema.ImageManifest {\n\t\/\/ Get runtime.json and config.json\n\truntimePath := path + \"\/runtime.json\"\n\tconfigPath := path + \"\/config.json\"\n\n\truntime, err := ioutil.ReadFile(runtimePath)\n\tif err != nil {\n\t\tif debugEnabled {\n\t\t\tlog.Printf(\"Open file runtime.json failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\n\tconfig, err := ioutil.ReadFile(configPath)\n\tif err != nil {\n\t\tif debugEnabled {\n\t\t\tlog.Printf(\"Open file config.json failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\n\tvar spec specs.LinuxSpec\n\terr = json.Unmarshal(config, &spec)\n\tif err != nil {\n\t\tif debugEnabled {\n\t\t\tlog.Printf(\"Unmarshal config.json failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\n\tvar runSpec specs.LinuxRuntimeSpec\n\terr = json.Unmarshal(runtime, &runSpec)\n\tif err != nil {\n\t\tif debugEnabled {\n\t\t\tlog.Printf(\"Unmarshal runtime.json failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\t\/\/ Begin to convert runtime.json\/config.json to manifest\n\tm := new(schema.ImageManifest)\n\n\t\/\/ 1. Assemble \"acKind\" field\n\tm.ACKind = schema.ImageManifestKind\n\n\t\/\/ 2. Assemble \"acVersion\" field\n\tm.ACVersion = schema.AppContainerVersion\n\n\t\/\/ 3. Assemble \"name\" field\n\tm.Name = types.ACIdentifier(manifestName)\n\n\t\/\/ 4. Assemble \"labels\" field\n\t\/\/ 4.1 \"version\"\n\tlabel := new(types.Label)\n\tlabel.Name = types.ACIdentifier(\"version\")\n\tlabel.Value = spec.Version\n\tm.Labels = append(m.Labels, *label)\n\t\/\/ 4.2 \"os\"\n\tlabel = new(types.Label)\n\tlabel.Name = types.ACIdentifier(\"os\")\n\tlabel.Value = spec.Platform.OS\n\tm.Labels = append(m.Labels, *label)\n\t\/\/ 4.3 \"arch\"\n\tlabel = new(types.Label)\n\tlabel.Name = types.ACIdentifier(\"arch\")\n\tlabel.Value = spec.Platform.Arch\n\tm.Labels = append(m.Labels, *label)\n\n\t\/\/ 5. Assemble \"app\" field\n\tapp := new(types.App)\n\t\/\/ 5.1 \"exec\"\n\tapp.Exec = spec.Process.Args\n\n\tprefixDir := \"\"\n\t\/\/var exeStr string\n\tif app.Exec == nil {\n\t\tapp.Exec = append(app.Exec, \"\/bin\/sh\")\n\t} else {\n\t\tif !filepath.IsAbs(app.Exec[0]) {\n\t\t\tif spec.Process.Cwd == \"\" {\n\t\t\t\tprefixDir = \"\/\"\n\t\t\t} else {\n\t\t\t\tprefixDir = spec.Process.Cwd\n\t\t\t}\n\t\t}\n\t\tapp.Exec[0] = prefixDir + app.Exec[0]\n\t}\n\n\t\/\/ 5.2 \"user\"\n\tapp.User = fmt.Sprintf(\"%d\", spec.Process.User.UID)\n\t\/\/ 5.3 \"group\"\n\tapp.Group = fmt.Sprintf(\"%d\", spec.Process.User.GID)\n\t\/\/ 5.4 \"eventHandlers\"\n\tevent := new(types.EventHandler)\n\tevent.Name = \"pre-start\"\n\tfor index := range runSpec.Hooks.Prestart {\n\t\tevent.Exec = append(event.Exec, runSpec.Hooks.Prestart[index].Path)\n\t\tevent.Exec = append(event.Exec, runSpec.Hooks.Prestart[index].Args...)\n\t\tevent.Exec = append(event.Exec, runSpec.Hooks.Prestart[index].Env...)\n\t}\n\tif len(event.Exec) == 0 {\n\t\tevent.Exec = append(event.Exec, \"\/bin\/echo\")\n\t\tevent.Exec = append(event.Exec, \"-n\")\n\t}\n\tapp.EventHandlers = append(app.EventHandlers, *event)\n\tevent = new(types.EventHandler)\n\tevent.Name = \"post-stop\"\n\tfor index := range runSpec.Hooks.Poststop {\n\t\tevent.Exec = append(event.Exec, runSpec.Hooks.Poststop[index].Path)\n\t\tevent.Exec = append(event.Exec, runSpec.Hooks.Poststop[index].Args...)\n\t\tevent.Exec = append(event.Exec, runSpec.Hooks.Poststop[index].Env...)\n\t}\n\tif len(event.Exec) == 0 {\n\t\tevent.Exec = append(event.Exec, \"\/bin\/echo\")\n\t\tevent.Exec = append(event.Exec, \"-n\")\n\t}\n\tapp.EventHandlers = append(app.EventHandlers, *event)\n\t\/\/ 5.5 \"workingDirectory\"\n\tapp.WorkingDirectory = spec.Process.Cwd\n\t\/\/ 5.6 \"environment\"\n\tenv := new(types.EnvironmentVariable)\n\tfor index := range spec.Process.Env {\n\t\ts := strings.Split(spec.Process.Env[index], \"=\")\n\t\tenv.Name = s[0]\n\t\tenv.Value = s[1]\n\t\tapp.Environment = append(app.Environment, *env)\n\t}\n\n\t\/\/ 5.7 \"mountPoints\"\n\tfor index := range spec.Mounts {\n\t\tmount := new(types.MountPoint)\n\t\tmount.Name = types.ACName(spec.Mounts[index].Name)\n\t\tmount.Path = spec.Mounts[index].Path\n\t\tmount.ReadOnly = false\n\t\tapp.MountPoints = append(app.MountPoints, *mount)\n\t}\n\n\t\/\/ 5.8 \"ports\"\n\n\t\/\/ 5.9 \"isolators\"\n\tif runSpec.Linux.Resources != nil {\n\t\tif runSpec.Linux.Resources.CPU.Quota != 0 {\n\t\t\tcpuLimt := new(ResourceCPU)\n\t\t\tcpuLimt.Limit = fmt.Sprintf(\"%dm\", runSpec.Linux.Resources.CPU.Quota)\n\t\t\tisolator := new(types.Isolator)\n\t\t\tisolator.Name = types.ACIdentifier(\"resource\/cpu\")\n\t\t\tbytes, _ := json.Marshal(cpuLimt)\n\n\t\t\tvalueRaw := json.RawMessage(bytes)\n\t\t\tisolator.ValueRaw = &valueRaw\n\n\t\t\tapp.Isolators = append(app.Isolators, *isolator)\n\t\t}\n\t\tif runSpec.Linux.Resources.Memory.Limit != 0 {\n\t\t\tmemLimt := new(ResourceMem)\n\t\t\tmemLimt.Limit = fmt.Sprintf(\"%dG\", runSpec.Linux.Resources.Memory.Limit\/(1024*1024*1024))\n\t\t\tisolator := new(types.Isolator)\n\t\t\tisolator.Name = types.ACIdentifier(\"resource\/memory\")\n\t\t\tbytes, _ := json.Marshal(memLimt)\n\n\t\t\tvalueRaw := json.RawMessage(bytes)\n\t\t\tisolator.ValueRaw = &valueRaw\n\n\t\t\tapp.Isolators = append(app.Isolators, *isolator)\n\t\t}\n\t}\n\n\tif len(spec.Linux.Capabilities) != 0 {\n\t\tisolatorCapSet := new(IsolatorCapSet)\n\t\tisolatorCapSet.Sets = append(isolatorCapSet.Sets, spec.Linux.Capabilities...)\n\n\t\tisolator := new(types.Isolator)\n\t\tisolator.Name = types.ACIdentifier(types.LinuxCapabilitiesRetainSetName)\n\t\tbytes, _ := json.Marshal(isolatorCapSet)\n\n\t\tvalueRaw := json.RawMessage(bytes)\n\t\tisolator.ValueRaw = &valueRaw\n\n\t\tapp.Isolators = append(app.Isolators, *isolator)\n\t}\n\n\t\/\/ 6. \"annotations\"\n\n\t\/\/ 7. \"dependencies\"\n\n\t\/\/ 8. \"pathWhitelist\"\n\n\tm.App = app\n\n\treturn m\n}\n\n\/\/ Convert OCI layout to ACI layout\nfunc convertLayout(srcPath, dstPath string) (string, error) {\n\tsrc, _ := filepath.Abs(srcPath)\n\tsrc += \"\/rootfs\"\n\tif err := run(exec.Command(\"cp\", \"-rf\", src, dstPath)); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tm := genManifest(srcPath)\n\n\tbytes, err := json.MarshalIndent(m, \"\", \"\\t\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tmanifestPath := dstPath + \"\/manifest\"\n\n\tioutil.WriteFile(manifestPath, bytes, 0644)\n\treturn manifestPath, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package epochs\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"math\/big\"\n\t\"time\"\n)\n\nconst SecondsPerDay = 24 * 60 * 60\n\n\/\/ epoch2time gets a Unix time of the given x after dividing by q and\n\/\/ adding s.\nfunc epoch2time(x, q, s *big.Int) time.Time {\n\tz := new(big.Int)\n\tm := new(big.Int)\n\tz.DivMod(x, q, m)\n\tz.Add(z, s)\n\tr := m.Mul(m, big.NewInt(1e9)).Div(m, q)\n\treturn time.Unix(z.Int64(), r.Int64()).UTC()\n}\n\n\/\/ time2epoch reverses epoch2time.\nfunc time2epoch(t time.Time, m, s *big.Int) int64 {\n\tbf := new(big.Float).SetInt(big.NewInt(t.UnixNano()))\n\tbf.Quo(bf, big.NewFloat(1e9))\n\tbf.Sub(bf, new(big.Float).SetInt(s))\n\tbf.Mul(bf, new(big.Float).SetInt(m))\n\n\tr, acc := bf.Int64()\n\tif acc != big.Exact {\n\t\tfmt.Println(acc)\n\t}\n\n\treturn r\n}\n\n\/\/ Chrome time is the number of microseconds since 1601-01-01, which\n\/\/ is 11,644,473,600 seconds before the Unix epoch.\nfunc Chrome(num int64) time.Time {\n\treturn epoch2time(\n\t\tbig.NewInt(num),\n\t\tbig.NewInt(1e6),\n\t\tbig.NewInt(-11644473600),\n\t)\n}\nfunc ToChrome(t time.Time) int64 {\n\treturn time2epoch(\n\t\tt,\n\t\tbig.NewInt(1e6),\n\t\tbig.NewInt(-11644473600),\n\t)\n}\n\n\/\/ Cocoa time is the number of seconds since 2001-01-01, which\n\/\/ is 978,307,200 seconds after the Unix epoch.\nfunc Cocoa(num int64) time.Time {\n\treturn epoch2time(\n\t\tbig.NewInt(num),\n\t\tbig.NewInt(1),\n\t\tbig.NewInt(978307200),\n\t)\n}\nfunc ToCocoa(t time.Time) int64 {\n\treturn time2epoch(\n\t\tt,\n\t\tbig.NewInt(1),\n\t\tbig.NewInt(978307200),\n\t)\n}\n\n\/\/ GoogleCalendar seems to count 32-day months from the day before the\n\/\/ Unix epoch. @noppers worked out how to do this.\nfunc GoogleCalendar(num int64) time.Time {\n\n\tn := int(num)\n\n\ttotalDays := n \/ SecondsPerDay\n\tseconds := n % SecondsPerDay\n\n\t\/\/ A \"Google month\" has 32 days!\n\tmonths := totalDays \/ 32\n\tdays := totalDays % 32\n\n\t\/\/ The \"Google epoch\" is apparently off by a day.\n\tt := time.Unix(-SecondsPerDay, 0).UTC()\n\n\t\/\/ Add the days first...\n\tu := t.AddDate(0, 0, days)\n\n\t\/\/ ...then the months...\n\tv := u.AddDate(0, months, 0)\n\n\t\/\/ ...then the seconds.\n\tw := v.Add(time.Duration(seconds * 1e9))\n\n\treturn w\n}\nfunc ToGoogleCalendar(t time.Time) int64 {\n\ty := t.Year() - 1970\n\tm := int(t.Month()) - 1\n\tr := ((((y*12+m)*32+t.Day())*24+t.Hour())*60+t.Minute())*60 + t.Second()\n\treturn int64(r)\n}\n\n\/\/ ICQ time is the number of days since 1899-12-30, which is\n\/\/ 2,209,161,600 seconds before the Unix epoch. Days can have a\n\/\/ fractional part.\nfunc ICQ(days float64) time.Time {\n\n\tt := time.Unix(-2209161600, 0).UTC()\n\n\tintdays := int(days)\n\n\t\/\/ Want the fractional part of the day in nanoseconds.\n\tfracday := int64((days - float64(intdays)) * SecondsPerDay * 1e9)\n\n\treturn t.AddDate(0, 0, intdays).Add(time.Duration(fracday))\n}\n\n\/\/ Java time is the number of milliseconds since the Unix epoch.\nfunc Java(num int64) time.Time {\n\treturn epoch2time(\n\t\tbig.NewInt(num),\n\t\tbig.NewInt(1000),\n\t\tbig.NewInt(0),\n\t)\n}\nfunc ToJava(t time.Time) int64 {\n\treturn time2epoch(\n\t\tt,\n\t\tbig.NewInt(1000),\n\t\tbig.NewInt(0),\n\t)\n}\n\n\/\/ Mozilla time (e.g., formhistory.sqlite) is the number of\n\/\/ microseconds since the Unix epoch.\nfunc Mozilla(num int64) time.Time {\n\treturn epoch2time(\n\t\tbig.NewInt(num),\n\t\tbig.NewInt(1e6),\n\t\tbig.NewInt(0),\n\t)\n}\nfunc ToMozilla(t time.Time) int64 {\n\treturn time2epoch(\n\t\tt,\n\t\tbig.NewInt(1e6),\n\t\tbig.NewInt(0),\n\t)\n}\n\n\/\/ OLE time is the number of days since 1899-12-30, which is\n\/\/ 2,209,161,600 seconds before the Unix epoch. Days can have a\n\/\/ fractional part and is given as a string of hex characters\n\/\/ representing an IEEE 8-byte floating-point number.\nfunc OLE(days string) time.Time {\n\tvar d [8]byte\n\tvar f float64\n\n\tn, err := fmt.Sscanf(\n\t\tdays,\n\t\t\"%02x%02x%02x%02x%02x%02x%02x%02x\",\n\t\t&d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7],\n\t)\n\tif err != nil {\n\t\tfmt.Println(\"fmt.Sscanf failed:\", err)\n\t}\n\tif n != 8 {\n\t\tfmt.Println(\"fmt.Sscanf did not scan 8 items:\", n)\n\t}\n\n\tbuf := bytes.NewReader(d[:])\n\tif err := binary.Read(buf, binary.LittleEndian, &f); err != nil {\n\t\tfmt.Println(\"binary.Read failed:\", err)\n\t}\n\n\treturn ICQ(f)\n}\n\n\/\/ Symbian time is the number of microseconds since the year 0, which\n\/\/ is 62,167,219,200 seconds before the Unix epoch.\nfunc Symbian(num int64) time.Time {\n\treturn epoch2time(\n\t\tbig.NewInt(num),\n\t\tbig.NewInt(1e6),\n\t\tbig.NewInt(-62167219200),\n\t)\n}\nfunc ToSymbian(t time.Time) int64 {\n\treturn time2epoch(\n\t\tt,\n\t\tbig.NewInt(1e6),\n\t\tbig.NewInt(-62167219200),\n\t)\n}\n\n\/\/ Unix time is the number of seconds since 1970-01-01.\nfunc Unix(num int64) time.Time {\n\treturn time.Unix(num, 0).UTC()\n}\nfunc ToUnix(t time.Time) int64 {\n\treturn t.Unix()\n}\n\n\/\/ UUID version 1 time (RFC 4122) is the number of hectonanoseconds\n\/\/ (100 ns) since 1582-10-15, which is 12,219,292,800 seconds before\n\/\/ the Unix epoch.\nfunc UUIDv1(num int64) time.Time {\n\treturn epoch2time(\n\t\tbig.NewInt(num),\n\t\tbig.NewInt(1e7),\n\t\tbig.NewInt(-12219292800),\n\t)\n}\nfunc ToUUIDv1(t time.Time) int64 {\n\treturn time2epoch(\n\t\tt,\n\t\tbig.NewInt(1e7),\n\t\tbig.NewInt(-12219292800),\n\t)\n}\n\n\/\/ Windows date time (e.g., .NET) is the number of hectonanoseconds\n\/\/ (100 ns) since 0001-01-01, which is 62,135,596,800 seconds before\n\/\/ the Unix epoch.\nfunc WindowsDate(num int64) time.Time {\n\treturn epoch2time(\n\t\tbig.NewInt(num),\n\t\tbig.NewInt(1e7),\n\t\tbig.NewInt(-62135596800),\n\t)\n}\nfunc ToWindowsDate(t time.Time) int64 {\n\treturn time2epoch(\n\t\tt,\n\t\tbig.NewInt(1e7),\n\t\tbig.NewInt(-62135596800),\n\t)\n}\n\n\/\/ Windows file time (e.g., NTFS) is the number of hectonanoseconds\n\/\/ (100 ns) since 1601-01-01, which is 11,644,473,600 seconds before\n\/\/ the Unix epoch.\nfunc WindowsFile(num int64) time.Time {\n\treturn epoch2time(\n\t\tbig.NewInt(num),\n\t\tbig.NewInt(1e7),\n\t\tbig.NewInt(-11644473600),\n\t)\n}\nfunc ToWindowsFile(t time.Time) int64 {\n\treturn time2epoch(\n\t\tt,\n\t\tbig.NewInt(1e7),\n\t\tbig.NewInt(-11644473600),\n\t)\n}\n<commit_msg>implement ToOLE<commit_after>package epochs\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"math\"\n\t\"math\/big\"\n\t\"time\"\n)\n\nconst SecondsPerDay = 24 * 60 * 60\nconst NanosecondsPerDay = SecondsPerDay * 1e9\n\n\/\/ epoch2time gets a Unix time of the given x after dividing by q and\n\/\/ adding s.\nfunc epoch2time(x, q, s *big.Int) time.Time {\n\tz := new(big.Int)\n\tm := new(big.Int)\n\tz.DivMod(x, q, m)\n\tz.Add(z, s)\n\tr := m.Mul(m, big.NewInt(1e9)).Div(m, q)\n\treturn time.Unix(z.Int64(), r.Int64()).UTC()\n}\n\n\/\/ time2epoch reverses epoch2time.\nfunc time2epoch(t time.Time, m, s *big.Int) int64 {\n\tbf := new(big.Float).SetInt(big.NewInt(t.UnixNano()))\n\tbf.Quo(bf, big.NewFloat(1e9))\n\tbf.Sub(bf, new(big.Float).SetInt(s))\n\tbf.Mul(bf, new(big.Float).SetInt(m))\n\n\tr, acc := bf.Int64()\n\tif acc != big.Exact {\n\t\tfmt.Println(acc)\n\t}\n\n\treturn r\n}\n\n\/\/ Chrome time is the number of microseconds since 1601-01-01, which\n\/\/ is 11,644,473,600 seconds before the Unix epoch.\nfunc Chrome(num int64) time.Time {\n\treturn epoch2time(\n\t\tbig.NewInt(num),\n\t\tbig.NewInt(1e6),\n\t\tbig.NewInt(-11644473600),\n\t)\n}\nfunc ToChrome(t time.Time) int64 {\n\treturn time2epoch(\n\t\tt,\n\t\tbig.NewInt(1e6),\n\t\tbig.NewInt(-11644473600),\n\t)\n}\n\n\/\/ Cocoa time is the number of seconds since 2001-01-01, which\n\/\/ is 978,307,200 seconds after the Unix epoch.\nfunc Cocoa(num int64) time.Time {\n\treturn epoch2time(\n\t\tbig.NewInt(num),\n\t\tbig.NewInt(1),\n\t\tbig.NewInt(978307200),\n\t)\n}\nfunc ToCocoa(t time.Time) int64 {\n\treturn time2epoch(\n\t\tt,\n\t\tbig.NewInt(1),\n\t\tbig.NewInt(978307200),\n\t)\n}\n\n\/\/ GoogleCalendar seems to count 32-day months from the day before the\n\/\/ Unix epoch. @noppers worked out how to do this.\nfunc GoogleCalendar(num int64) time.Time {\n\n\tn := int(num)\n\n\ttotalDays := n \/ SecondsPerDay\n\tseconds := n % SecondsPerDay\n\n\t\/\/ A \"Google month\" has 32 days!\n\tmonths := totalDays \/ 32\n\tdays := totalDays % 32\n\n\t\/\/ The \"Google epoch\" is apparently off by a day.\n\tt := time.Unix(-SecondsPerDay, 0).UTC()\n\n\t\/\/ Add the days first...\n\tu := t.AddDate(0, 0, days)\n\n\t\/\/ ...then the months...\n\tv := u.AddDate(0, months, 0)\n\n\t\/\/ ...then the seconds.\n\tw := v.Add(time.Duration(seconds * 1e9))\n\n\treturn w\n}\nfunc ToGoogleCalendar(t time.Time) int64 {\n\ty := t.Year() - 1970\n\tm := int(t.Month()) - 1\n\tr := ((((y*12+m)*32+t.Day())*24+t.Hour())*60+t.Minute())*60 + t.Second()\n\treturn int64(r)\n}\n\n\/\/ ICQ time is the number of days since 1899-12-30, which is\n\/\/ 2,209,161,600 seconds before the Unix epoch. Days can have a\n\/\/ fractional part.\nfunc ICQ(days float64) time.Time {\n\n\tt := time.Unix(-2209161600, 0).UTC()\n\n\tintdays := int(days)\n\n\t\/\/ Want the fractional part of the day in nanoseconds.\n\tfracday := int64((days - float64(intdays)) * NanosecondsPerDay)\n\n\treturn t.AddDate(0, 0, intdays).Add(time.Duration(fracday))\n}\nfunc ToICQ(t time.Time) float64 {\n\tt2 := time.Unix(-2209161600, 0)\n\treturn float64(t.Sub(t2).Nanoseconds()) \/ float64(NanosecondsPerDay)\n}\n\n\/\/ Java time is the number of milliseconds since the Unix epoch.\nfunc Java(num int64) time.Time {\n\treturn epoch2time(\n\t\tbig.NewInt(num),\n\t\tbig.NewInt(1000),\n\t\tbig.NewInt(0),\n\t)\n}\nfunc ToJava(t time.Time) int64 {\n\treturn time2epoch(\n\t\tt,\n\t\tbig.NewInt(1000),\n\t\tbig.NewInt(0),\n\t)\n}\n\n\/\/ Mozilla time (e.g., formhistory.sqlite) is the number of\n\/\/ microseconds since the Unix epoch.\nfunc Mozilla(num int64) time.Time {\n\treturn epoch2time(\n\t\tbig.NewInt(num),\n\t\tbig.NewInt(1e6),\n\t\tbig.NewInt(0),\n\t)\n}\nfunc ToMozilla(t time.Time) int64 {\n\treturn time2epoch(\n\t\tt,\n\t\tbig.NewInt(1e6),\n\t\tbig.NewInt(0),\n\t)\n}\n\n\/\/ OLE time is the number of days since 1899-12-30, which is\n\/\/ 2,209,161,600 seconds before the Unix epoch. Days can have a\n\/\/ fractional part and is given as a string of hex characters\n\/\/ representing an IEEE 8-byte floating-point number.\nfunc OLE(days string) time.Time {\n\tvar d [8]byte\n\tvar f float64\n\n\tn, err := fmt.Sscanf(\n\t\tdays,\n\t\t\"%02x%02x%02x%02x%02x%02x%02x%02x\",\n\t\t&d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7],\n\t)\n\tif err != nil {\n\t\tfmt.Println(\"fmt.Sscanf failed:\", err)\n\t}\n\tif n != 8 {\n\t\tfmt.Println(\"fmt.Sscanf did not scan 8 items:\", n)\n\t}\n\n\tbuf := bytes.NewReader(d[:])\n\tif err := binary.Read(buf, binary.LittleEndian, &f); err != nil {\n\t\tfmt.Println(\"binary.Read failed:\", err)\n\t}\n\n\treturn ICQ(f)\n}\nfunc ToOLE(t time.Time) string {\n\ticq := ToICQ(t)\n\tbuf := new(bytes.Buffer)\n\terr := binary.Write(buf, binary.LittleEndian, math.Float64bits(icq))\n\tif err != nil {\n\t\tfmt.Println(\"binary.Write failed:\", err)\n\t}\n\treturn fmt.Sprintf(\"%016x\", buf.Bytes())\n}\n\n\/\/ Symbian time is the number of microseconds since the year 0, which\n\/\/ is 62,167,219,200 seconds before the Unix epoch.\nfunc Symbian(num int64) time.Time {\n\treturn epoch2time(\n\t\tbig.NewInt(num),\n\t\tbig.NewInt(1e6),\n\t\tbig.NewInt(-62167219200),\n\t)\n}\nfunc ToSymbian(t time.Time) int64 {\n\treturn time2epoch(\n\t\tt,\n\t\tbig.NewInt(1e6),\n\t\tbig.NewInt(-62167219200),\n\t)\n}\n\n\/\/ Unix time is the number of seconds since 1970-01-01.\nfunc Unix(num int64) time.Time {\n\treturn time.Unix(num, 0).UTC()\n}\nfunc ToUnix(t time.Time) int64 {\n\treturn t.Unix()\n}\n\n\/\/ UUID version 1 time (RFC 4122) is the number of hectonanoseconds\n\/\/ (100 ns) since 1582-10-15, which is 12,219,292,800 seconds before\n\/\/ the Unix epoch.\nfunc UUIDv1(num int64) time.Time {\n\treturn epoch2time(\n\t\tbig.NewInt(num),\n\t\tbig.NewInt(1e7),\n\t\tbig.NewInt(-12219292800),\n\t)\n}\nfunc ToUUIDv1(t time.Time) int64 {\n\treturn time2epoch(\n\t\tt,\n\t\tbig.NewInt(1e7),\n\t\tbig.NewInt(-12219292800),\n\t)\n}\n\n\/\/ Windows date time (e.g., .NET) is the number of hectonanoseconds\n\/\/ (100 ns) since 0001-01-01, which is 62,135,596,800 seconds before\n\/\/ the Unix epoch.\nfunc WindowsDate(num int64) time.Time {\n\treturn epoch2time(\n\t\tbig.NewInt(num),\n\t\tbig.NewInt(1e7),\n\t\tbig.NewInt(-62135596800),\n\t)\n}\nfunc ToWindowsDate(t time.Time) int64 {\n\treturn time2epoch(\n\t\tt,\n\t\tbig.NewInt(1e7),\n\t\tbig.NewInt(-62135596800),\n\t)\n}\n\n\/\/ Windows file time (e.g., NTFS) is the number of hectonanoseconds\n\/\/ (100 ns) since 1601-01-01, which is 11,644,473,600 seconds before\n\/\/ the Unix epoch.\nfunc WindowsFile(num int64) time.Time {\n\treturn epoch2time(\n\t\tbig.NewInt(num),\n\t\tbig.NewInt(1e7),\n\t\tbig.NewInt(-11644473600),\n\t)\n}\nfunc ToWindowsFile(t time.Time) int64 {\n\treturn time2epoch(\n\t\tt,\n\t\tbig.NewInt(1e7),\n\t\tbig.NewInt(-11644473600),\n\t)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Alex Browne. All rights reserved.\n\/\/ Use of this source code is governed by the MIT\n\/\/ license, which can be found in the LICENSE file.\n\n\/\/ File errors.go declares all the different errors that might be thrown\n\/\/ by the package and provides constructors for each one.\n\npackage zoom\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\n\/\/ TODO: add more custom error types based on common use cases throughout the package\n\n\/\/ NameAlreadyRegisteredError is returned if you try to register a\n\/\/ name which has already been registered.\ntype NameAlreadyRegisteredError struct {\n\tname string\n}\n\nfunc (e *NameAlreadyRegisteredError) Error() string {\n\treturn \"zoom: the name '\" + e.name + \"' has already been registered\"\n}\n\nfunc NewNameAlreadyRegisteredError(name string) *NameAlreadyRegisteredError {\n\treturn &NameAlreadyRegisteredError{name}\n}\n\n\/\/ TypeAlreadyRegisteredError is returned if you try to register a\n\/\/ type which has already been registered.\ntype TypeAlreadyRegisteredError struct {\n\ttyp reflect.Type\n}\n\nfunc (e *TypeAlreadyRegisteredError) Error() string {\n\treturn \"zoom: the type '\" + e.typ.String() + \"' has already been registered\"\n}\n\nfunc NewTypeAlreadyRegisteredError(typ reflect.Type) *TypeAlreadyRegisteredError {\n\treturn &TypeAlreadyRegisteredError{typ}\n}\n\n\/\/ ModelTypeNotRegisteredError is returned if you attempt to perform\n\/\/ certain operations for unregistered types.\ntype ModelTypeNotRegisteredError struct {\n\ttyp reflect.Type\n}\n\nfunc (e *ModelTypeNotRegisteredError) Error() string {\n\treturn \"zoom: the type '\" + e.typ.String() + \"' has not been registered\"\n}\n\nfunc NewModelTypeNotRegisteredError(typ reflect.Type) *ModelTypeNotRegisteredError {\n\treturn &ModelTypeNotRegisteredError{typ}\n}\n\n\/\/ ModelNameNotRegisteredError is returned if you attempt to perform\n\/\/ certain operations for unregistered names.\ntype ModelNameNotRegisteredError struct {\n\tname string\n}\n\nfunc (e *ModelNameNotRegisteredError) Error() string {\n\treturn \"zoom: the model name '\" + e.name + \"' has not been registered\"\n}\n\nfunc NewModelNameNotRegisteredError(name string) *ModelNameNotRegisteredError {\n\treturn &ModelNameNotRegisteredError{name}\n}\n\n\/\/ KeyNotFoundError is returned from Find, Scan, and Query functions if the\n\/\/ model you are trying to find does not exist in the database.\ntype KeyNotFoundError struct {\n\tkey string\n\tmodelType reflect.Type\n}\n\nfunc (e *KeyNotFoundError) Error() string {\n\treturn fmt.Sprintf(\"zoom: could not find model of type %s with key %s\", e.modelType.String(), e.key)\n}\n\nfunc NewKeyNotFoundError(key string, modelType reflect.Type) *KeyNotFoundError {\n\treturn &KeyNotFoundError{key, modelType}\n}\n<commit_msg>refactor errors.go to use fmt.Sprintf instead of string concatenation<commit_after>\/\/ Copyright 2013 Alex Browne. All rights reserved.\n\/\/ Use of this source code is governed by the MIT\n\/\/ license, which can be found in the LICENSE file.\n\n\/\/ File errors.go declares all the different errors that might be thrown\n\/\/ by the package and provides constructors for each one.\n\npackage zoom\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\n\/\/ TODO: add more custom error types based on common use cases throughout the package\n\n\/\/ NameAlreadyRegisteredError is returned if you try to register a\n\/\/ name which has already been registered.\ntype NameAlreadyRegisteredError struct {\n\tname string\n}\n\nfunc (e *NameAlreadyRegisteredError) Error() string {\n\treturn fmt.Sprintf(\"zoom: the name %s has already been registered\", e.name)\n}\n\nfunc NewNameAlreadyRegisteredError(name string) *NameAlreadyRegisteredError {\n\treturn &NameAlreadyRegisteredError{name}\n}\n\n\/\/ TypeAlreadyRegisteredError is returned if you try to register a\n\/\/ type which has already been registered.\ntype TypeAlreadyRegisteredError struct {\n\ttyp reflect.Type\n}\n\nfunc (e *TypeAlreadyRegisteredError) Error() string {\n\treturn fmt.Sprintf(\"zoom: the type %s has already been registered\", e.typ.String())\n}\n\nfunc NewTypeAlreadyRegisteredError(typ reflect.Type) *TypeAlreadyRegisteredError {\n\treturn &TypeAlreadyRegisteredError{typ}\n}\n\n\/\/ ModelTypeNotRegisteredError is returned if you attempt to perform\n\/\/ certain operations for unregistered types.\ntype ModelTypeNotRegisteredError struct {\n\ttyp reflect.Type\n}\n\nfunc (e *ModelTypeNotRegisteredError) Error() string {\n\treturn fmt.Sprintf(\"zoom: the type %s has not been registered\", e.typ.String())\n}\n\nfunc NewModelTypeNotRegisteredError(typ reflect.Type) *ModelTypeNotRegisteredError {\n\treturn &ModelTypeNotRegisteredError{typ}\n}\n\n\/\/ ModelNameNotRegisteredError is returned if you attempt to perform\n\/\/ certain operations for unregistered names.\ntype ModelNameNotRegisteredError struct {\n\tname string\n}\n\nfunc (e *ModelNameNotRegisteredError) Error() string {\n\treturn fmt.Sprintf(\"zoom: the model name %s has not been registered\", e.name)\n}\n\nfunc NewModelNameNotRegisteredError(name string) *ModelNameNotRegisteredError {\n\treturn &ModelNameNotRegisteredError{name}\n}\n\n\/\/ KeyNotFoundError is returned from Find, Scan, and Query functions if the\n\/\/ model you are trying to find does not exist in the database.\ntype KeyNotFoundError struct {\n\tkey string\n\tmodelType reflect.Type\n}\n\nfunc (e *KeyNotFoundError) Error() string {\n\treturn fmt.Sprintf(\"zoom: could not find model of type %s with key %s\", e.modelType.String(), e.key)\n}\n\nfunc NewKeyNotFoundError(key string, modelType reflect.Type) *KeyNotFoundError {\n\treturn &KeyNotFoundError{key, modelType}\n}\n<|endoftext|>"} {"text":"<commit_before>package arangolite\n\ntype statusCodedError interface {\n\terror\n\tStatusCode() int\n}\n\ntype statusCodedErrorBehavior struct {\n\terror\n\tstatusCode int\n}\n\nfunc (e *statusCodedErrorBehavior) StatusCode() int {\n\treturn e.statusCode\n}\n\nfunc withStatusCode(err error, statusCode int) error {\n\treturn statusCodedErrorBehavior{err, statusCode}\n}\n\n\/\/ HasStatusCode returns true when one of the given error status code matches the one returned by the database.\nfunc HasStatusCode(err error, statusCode ...int) bool {\n\te, ok := err.(statusCodedError)\n\tif !ok {\n\t\treturn false\n\t}\n\tcode := e.StatusCode()\n\tfor _, c := range statusCode {\n\t\tif code == c {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ GetStatusCode returns the status code encapsulated in the error.\nfunc GetStatusCode(err error) (code int, ok bool) {\n\te, ok := err.(statusCodedError)\n\tif !ok {\n\t\treturn 0, false\n\t}\n\treturn e.StatusCode(), true\n}\n\ntype numberedError interface {\n\terror\n\tErrorNum() int\n}\n\ntype numberedErrorBehavior struct {\n\terror\n\terrorNum int\n}\n\nfunc (e *numberedErrorBehavior) ErrorNum() int {\n\treturn e.errorNum\n}\n\nfunc withErrorNum(err error, errorNum int) error {\n\treturn numberedErrorBehavior{err, errorNum}\n}\n\n\/\/ HasErrorNum returns true when one of the given error num matches the one returned by the database.\nfunc HasErrorNum(err error, errorNum ...int) bool {\n\te, ok := err.(numberedError)\n\tif !ok {\n\t\treturn false\n\t}\n\tnum := e.ErrorNum()\n\tfor _, n := range errorNum {\n\t\tif num == n {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ GetErrorNum returns the database error num encapsulated in the error.\nfunc GetErrorNum(err error) (errorNum int, ok bool) {\n\te, ok := err.(numberedError)\n\tif !ok {\n\t\treturn 0, false\n\t}\n\treturn e.ErrorNum(), true\n}\n\n\/\/ IsErrInvalidRequest returns true when the database returns a 400.\nfunc IsErrInvalidRequest(err error) bool {\n\treturn HasStatusCode(err, 400)\n}\n\n\/\/ IsErrUnauthorized returns true when the database returns a 401.\nfunc IsErrUnauthorized(err error) bool {\n\treturn HasStatusCode(err, 401)\n}\n\n\/\/ IsErrForbidden returns true when the database returns a 403.\nfunc IsErrForbidden(err error) bool {\n\treturn HasStatusCode(err, 403)\n}\n\n\/\/ IsErrUnique returns true when the error num is a 1210 - ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED.\nfunc IsErrUnique(err error) bool {\n\treturn HasErrorNum(err, 1210)\n}\n\n\/\/ IsErrNotFound returns true when the database returns a 404 or when the error num is:\n\/\/ 1202 - ERROR_ARANGO_DOCUMENT_NOT_FOUND\n\/\/ 1203 - ERROR_ARANGO_COLLECTION_NOT_FOUND\nfunc IsErrNotFound(err error) bool {\n\treturn HasStatusCode(err, 404) || HasErrorNum(err, 1202, 1203)\n}\n<commit_msg>Fixed<commit_after>package arangolite\n\ntype statusCodedError interface {\n\terror\n\tStatusCode() int\n}\n\ntype statusCodedErrorBehavior struct {\n\terror\n\tstatusCode int\n}\n\nfunc (e statusCodedErrorBehavior) StatusCode() int {\n\treturn e.statusCode\n}\n\nfunc withStatusCode(err error, statusCode int) error {\n\treturn struct {\n\t\terror\n\t\tstatusCodedErrorBehavior\n\t}{\n\t\terr,\n\t\tstatusCodedErrorBehavior{statusCode: statusCode},\n\t}\n}\n\n\/\/ HasStatusCode returns true when one of the given error status code matches the one returned by the database.\nfunc HasStatusCode(err error, statusCode ...int) bool {\n\te, ok := err.(statusCodedError)\n\tif !ok {\n\t\treturn false\n\t}\n\tcode := e.StatusCode()\n\tfor _, c := range statusCode {\n\t\tif code == c {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ GetStatusCode returns the status code encapsulated in the error.\nfunc GetStatusCode(err error) (code int, ok bool) {\n\te, ok := err.(statusCodedError)\n\tif !ok {\n\t\treturn 0, false\n\t}\n\treturn e.StatusCode(), true\n}\n\ntype numberedError interface {\n\terror\n\tErrorNum() int\n}\n\ntype numberedErrorBehavior struct {\n\terror\n\terrorNum int\n}\n\nfunc (e numberedErrorBehavior) ErrorNum() int {\n\treturn e.errorNum\n}\n\nfunc withErrorNum(err error, errorNum int) error {\n\treturn struct {\n\t\terror\n\t\tnumberedErrorBehavior\n\t}{\n\t\terr,\n\t\tnumberedErrorBehavior{errorNum: errorNum},\n\t}\n}\n\n\/\/ HasErrorNum returns true when one of the given error num matches the one returned by the database.\nfunc HasErrorNum(err error, errorNum ...int) bool {\n\te, ok := err.(numberedError)\n\tif !ok {\n\t\treturn false\n\t}\n\tnum := e.ErrorNum()\n\tfor _, n := range errorNum {\n\t\tif num == n {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ GetErrorNum returns the database error num encapsulated in the error.\nfunc GetErrorNum(err error) (errorNum int, ok bool) {\n\te, ok := err.(numberedError)\n\tif !ok {\n\t\treturn 0, false\n\t}\n\treturn e.ErrorNum(), true\n}\n\n\/\/ IsErrInvalidRequest returns true when the database returns a 400.\nfunc IsErrInvalidRequest(err error) bool {\n\treturn HasStatusCode(err, 400)\n}\n\n\/\/ IsErrUnauthorized returns true when the database returns a 401.\nfunc IsErrUnauthorized(err error) bool {\n\treturn HasStatusCode(err, 401)\n}\n\n\/\/ IsErrForbidden returns true when the database returns a 403.\nfunc IsErrForbidden(err error) bool {\n\treturn HasStatusCode(err, 403)\n}\n\n\/\/ IsErrUnique returns true when the error num is a 1210 - ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED.\nfunc IsErrUnique(err error) bool {\n\treturn HasErrorNum(err, 1210)\n}\n\n\/\/ IsErrNotFound returns true when the database returns a 404 or when the error num is:\n\/\/ 1202 - ERROR_ARANGO_DOCUMENT_NOT_FOUND\n\/\/ 1203 - ERROR_ARANGO_COLLECTION_NOT_FOUND\nfunc IsErrNotFound(err error) bool {\n\treturn HasStatusCode(err, 404) || HasErrorNum(err, 1202, 1203)\n}\n<|endoftext|>"} {"text":"<commit_before>package gforms\n\ntype Errors map[string]string\n<commit_msg>Updated Errors method.<commit_after>package gforms\n\ntype Errors map[string]string\n\nfunc (es Errors) Has(key string) bool {\n\t_, ok := es[key]\n\treturn ok\n}\n\nfunc (es Errors) Get(key string) string {\n\tv, _ := es[key]\n\treturn v\n}\n<|endoftext|>"} {"text":"<commit_before>package errors\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar prefix string\n\ntype erx struct {\n\tpc uintptr\n\terror error\n\tparent *erx\n}\n\n\/\/ Implementation of the error interface.\nfunc (e erx) Error() string {\n\tx, flat := &e, make([]*erx, 0, 16)\n\tfor {\n\t\tflat = append(flat, x)\n\t\tif x.parent == nil {\n\t\t\tbreak\n\t\t}\n\t\tx = x.parent\n\t}\n\tmax := len(flat) - 1\n\n\tvar buf bytes.Buffer\n\tfor i := 0; i <= max; i++ {\n\t\tx = flat[max-i]\n\n\t\tif i == 0 {\n\t\t\tbuf.WriteString(\"E \")\n\t\t} else {\n\t\t\tbuf.WriteString(\"\\n\")\n\t\t\tbuf.WriteString(prefix)\n\t\t\tbuf.WriteString(\"+ \")\n\t\t}\n\n\t\tfn := runtime.FuncForPC(x.pc)\n\t\tif fn == nil {\n\t\t\tbuf.WriteString(\"unknown\")\n\t\t} else {\n\t\t\tfile, line := fn.FileLine(x.pc)\n\t\t\tbuf.WriteString(trimSourcePath(fn.Name(), file))\n\t\t\tbuf.WriteRune(':')\n\t\t\tbuf.WriteString(strconv.Itoa(line))\n\t\t}\n\n\t\tif x.error != nil {\n\t\t\tbuf.WriteRune(' ')\n\t\t\tbuf.WriteString(x.error.Error())\n\t\t}\n\t}\n\n\treturn buf.String()\n}\n\n\/\/ New returns a new error with the given printf-formatted error message.\nfunc New(text string, a ...interface{}) error {\n\tx := &erx{\n\t\tpc: getPC(3),\n\t\terror: errors.New(text),\n\t}\n\tif len(a) == 0 {\n\t\tx.error = errors.New(text)\n\t} else {\n\t\tx.error = fmt.Errorf(text, a)\n\t}\n\n\treturn x\n}\n\n\/\/ Append returns a new error with the parent error\n\/\/ and given printf-formatted error message.\nfunc Append(parent error, text string, a ...interface{}) error {\n\tpc := getPC(3)\n\tp := extend(parent, pc)\n\n\tvar err error\n\tif len(a) == 0 {\n\t\terr = errors.New(text)\n\t} else {\n\t\terr = fmt.Errorf(text, a)\n\t}\n\n\tif parent == nil {\n\t\tp.error = err\n\t\treturn p\n\t}\n\n\te := &erx{\n\t\tpc: pc,\n\t\terror: err,\n\t\tparent: p,\n\t}\n\n\treturn e\n}\n\n\/\/ Wrap returns wrapped one or more errors.\nfunc Wrap(errs ...error) error {\n\tif len(errs) == 0 {\n\t\treturn nil\n\t}\n\n\tpc := getPC(3)\n\n\tvar x, parent *erx\n\nloop:\n\tfor _, err := range errs {\n\t\tif err == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tparent = x\n\t\tx = extend(err, pc)\n\n\t\tif parent == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\te := x\n\t\tfor e.parent != nil {\n\t\t\tif e == parent {\n\t\t\t\tx = parent\n\t\t\t\tcontinue loop\n\t\t\t}\n\t\t\te = e.parent\n\t\t}\n\t\te.parent = parent\n\t}\n\n\tif x == nil {\n\t\treturn nil\n\t}\n\n\treturn x\n}\n\n\/\/ Sets the prefix for child errors.\nfunc SetPrefix(s string) {\n\tprefix = s\n}\n\nfunc getPC(calldepth int) uintptr {\n\tvar pcs [1]uintptr\n\truntime.Callers(calldepth, pcs[:])\n\n\treturn pcs[0]\n}\n\nfunc extend(err error, pc uintptr) *erx {\n\te, ok := err.(*erx)\n\tif !ok {\n\t\te = &erx{\n\t\t\tpc: pc,\n\t\t\terror: err,\n\t\t}\n\t}\n\n\treturn e\n}\n\nfunc trimSourcePath(name, path string) string {\n\tconst sep = '\/'\n\n\tindexName := strings.LastIndexFunc(name, func(r rune) bool {\n\t\treturn r == sep\n\t})\n\n\tn := 2\n\tif indexName == -1 {\n\t\tn = 1\n\t}\n\n\tindexPath := strings.LastIndexFunc(path, func(r rune) bool {\n\t\tif r == sep {\n\t\t\tn--\n\t\t}\n\t\treturn n == 0\n\t})\n\n\tif indexName == -1 {\n\t\treturn path[indexPath+1:]\n\t}\n\n\treturn name[:indexName] + path[indexPath:]\n}\n<commit_msg>fixed variadic functions<commit_after>package errors\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar prefix string\n\ntype erx struct {\n\tpc uintptr\n\terror error\n\tparent *erx\n}\n\n\/\/ Implementation of the error interface.\nfunc (e erx) Error() string {\n\tx, flat := &e, make([]*erx, 0, 16)\n\tfor {\n\t\tflat = append(flat, x)\n\t\tif x.parent == nil {\n\t\t\tbreak\n\t\t}\n\t\tx = x.parent\n\t}\n\tmax := len(flat) - 1\n\n\tvar buf bytes.Buffer\n\tfor i := 0; i <= max; i++ {\n\t\tx = flat[max-i]\n\n\t\tif i == 0 {\n\t\t\tbuf.WriteString(\"E \")\n\t\t} else {\n\t\t\tbuf.WriteString(\"\\n\")\n\t\t\tbuf.WriteString(prefix)\n\t\t\tbuf.WriteString(\"+ \")\n\t\t}\n\n\t\tfn := runtime.FuncForPC(x.pc)\n\t\tif fn == nil {\n\t\t\tbuf.WriteString(\"unknown\")\n\t\t} else {\n\t\t\tfile, line := fn.FileLine(x.pc)\n\t\t\tbuf.WriteString(trimSourcePath(fn.Name(), file))\n\t\t\tbuf.WriteRune(':')\n\t\t\tbuf.WriteString(strconv.Itoa(line))\n\t\t}\n\n\t\tif x.error != nil {\n\t\t\tbuf.WriteRune(' ')\n\t\t\tbuf.WriteString(x.error.Error())\n\t\t}\n\t}\n\n\treturn buf.String()\n}\n\n\/\/ New returns a new error with the given printf-formatted error message.\nfunc New(text string, a ...interface{}) error {\n\tx := &erx{\n\t\tpc: getPC(3),\n\t\terror: errors.New(text),\n\t}\n\tif len(a) == 0 {\n\t\tx.error = errors.New(text)\n\t} else {\n\t\tx.error = fmt.Errorf(text, a...)\n\t}\n\n\treturn x\n}\n\n\/\/ Append returns a new error with the parent error\n\/\/ and given printf-formatted error message.\nfunc Append(parent error, text string, a ...interface{}) error {\n\tpc := getPC(3)\n\tp := extend(parent, pc)\n\n\tvar err error\n\tif len(a) == 0 {\n\t\terr = errors.New(text)\n\t} else {\n\t\terr = fmt.Errorf(text, a...)\n\t}\n\n\tif parent == nil {\n\t\tp.error = err\n\t\treturn p\n\t}\n\n\te := &erx{\n\t\tpc: pc,\n\t\terror: err,\n\t\tparent: p,\n\t}\n\n\treturn e\n}\n\n\/\/ Wrap returns wrapped one or more errors.\nfunc Wrap(errs ...error) error {\n\tif len(errs) == 0 {\n\t\treturn nil\n\t}\n\n\tpc := getPC(3)\n\n\tvar x, parent *erx\n\nloop:\n\tfor _, err := range errs {\n\t\tif err == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tparent = x\n\t\tx = extend(err, pc)\n\n\t\tif parent == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\te := x\n\t\tfor e.parent != nil {\n\t\t\tif e == parent {\n\t\t\t\tx = parent\n\t\t\t\tcontinue loop\n\t\t\t}\n\t\t\te = e.parent\n\t\t}\n\t\te.parent = parent\n\t}\n\n\tif x == nil {\n\t\treturn nil\n\t}\n\n\treturn x\n}\n\n\/\/ Sets the prefix for child errors.\nfunc SetPrefix(s string) {\n\tprefix = s\n}\n\nfunc getPC(calldepth int) uintptr {\n\tvar pcs [1]uintptr\n\truntime.Callers(calldepth, pcs[:])\n\n\treturn pcs[0]\n}\n\nfunc extend(err error, pc uintptr) *erx {\n\te, ok := err.(*erx)\n\tif !ok {\n\t\te = &erx{\n\t\t\tpc: pc,\n\t\t\terror: err,\n\t\t}\n\t}\n\n\treturn e\n}\n\nfunc trimSourcePath(name, path string) string {\n\tconst sep = '\/'\n\n\tindexName := strings.LastIndexFunc(name, func(r rune) bool {\n\t\treturn r == sep\n\t})\n\n\tn := 2\n\tif indexName == -1 {\n\t\tn = 1\n\t}\n\n\tindexPath := strings.LastIndexFunc(path, func(r rune) bool {\n\t\tif r == sep {\n\t\t\tn--\n\t\t}\n\t\treturn n == 0\n\t})\n\n\tif indexName == -1 {\n\t\treturn path[indexPath+1:]\n\t}\n\n\treturn name[:indexName] + path[indexPath:]\n}\n<|endoftext|>"} {"text":"<commit_before>package executor\n\nimport \"net\/http\"\n\ntype Error interface {\n\terror\n\n\tName() string\n\tHttpCode() int\n}\n\ntype execError struct {\n\tname string\n\tmessage string\n\thttpCode int\n}\n\nfunc (err execError) Name() string {\n\treturn err.name\n}\n\nfunc (err execError) Error() string {\n\treturn err.message\n}\n\nfunc (err execError) HttpCode() int {\n\treturn err.httpCode\n}\n\nvar Errors = map[string]Error{}\n\nfunc registerError(name string, message string, status int) Error {\n\terr := execError{name, message, status}\n\tErrors[name] = err\n\treturn err\n}\n\nvar (\n\tErrContainerGuidNotAvailable = registerError(\"ContainerGuidNotAvailable\", \"container guid not available\", http.StatusBadRequest)\n\tErrContainerNotCompleted = registerError(\"ContainerNotCompleted\", \"container must be stopped before it can be deleted\", http.StatusBadRequest)\n\tErrInsufficientResourcesAvailable = registerError(\"InsufficientResourcesAvailable\", \"insufficient resources available\", http.StatusServiceUnavailable)\n\tErrContainerNotFound = registerError(\"ContainerNotFound\", \"container not found\", http.StatusNotFound)\n\tErrStepsInvalid = registerError(\"StepsInvalid\", \"steps invalid\", http.StatusBadRequest)\n\tErrLimitsInvalid = registerError(\"LimitsInvalid\", \"container limits invalid\", http.StatusBadRequest)\n\tErrGuidNotSpecified = registerError(\"GuidNotSpecified\", \"container guid not specified\", http.StatusBadRequest)\n\tErrInvalidTransition = registerError(\"InvalidStateTransition\", \"container cannot transition to given state\", http.StatusConflict)\n\tErrFailureToCheckSpace = registerError(\"ErrFailureToCheckSpace\", \"failed to check available space\", http.StatusInternalServerError)\n\tErrInvalidSecurityGroup = registerError(\"ErrInvalidSecurityGroup\", \"security group has invalid values\", http.StatusBadRequest)\n\tErrNoProcessToStop = registerError(\"ErrNoProcessToStop\", \"failed to find a process to stop\", http.StatusNotFound)\n)\n<commit_msg>Remove unused http codes from errors<commit_after>package executor\n\ntype Error interface {\n\terror\n\n\tName() string\n}\n\ntype execError struct {\n\tname string\n\tmessage string\n}\n\nfunc (err execError) Name() string {\n\treturn err.name\n}\n\nfunc (err execError) Error() string {\n\treturn err.message\n}\n\nvar Errors = map[string]Error{}\n\nfunc registerError(name, message string) Error {\n\terr := execError{name, message}\n\tErrors[name] = err\n\treturn err\n}\n\nvar (\n\tErrContainerGuidNotAvailable = registerError(\"ContainerGuidNotAvailable\", \"container guid not available\")\n\tErrContainerNotCompleted = registerError(\"ContainerNotCompleted\", \"container must be stopped before it can be deleted\")\n\tErrInsufficientResourcesAvailable = registerError(\"InsufficientResourcesAvailable\", \"insufficient resources available\")\n\tErrContainerNotFound = registerError(\"ContainerNotFound\", \"container not found\")\n\tErrStepsInvalid = registerError(\"StepsInvalid\", \"steps invalid\")\n\tErrLimitsInvalid = registerError(\"LimitsInvalid\", \"container limits invalid\")\n\tErrGuidNotSpecified = registerError(\"GuidNotSpecified\", \"container guid not specified\")\n\tErrInvalidTransition = registerError(\"InvalidStateTransition\", \"container cannot transition to given state\")\n\tErrFailureToCheckSpace = registerError(\"ErrFailureToCheckSpace\", \"failed to check available space\")\n\tErrInvalidSecurityGroup = registerError(\"ErrInvalidSecurityGroup\", \"security group has invalid values\")\n\tErrNoProcessToStop = registerError(\"ErrNoProcessToStop\", \"failed to find a process to stop\")\n)\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"math\"\nimport \"fmt\"\n\nfunc test() {\n\tfmt.Println(\"Called test()\")\n\n}\n\nfunc main() {\n\tfmt.Println(math.Cos(3.4))\n\ttest()\n\n}\n<commit_msg>Testing out the base64 module<commit_after>package main\n\nimport (\n\t\"math\"\n\t\"fmt\"\n\t\"encoding\/base64\"\n)\n\nfunc test() {\n\tfmt.Println(\"Called test()\")\n\n}\n\nfunc main() {\n\t\/\/ testing out base64 encoding of URLs\n\turl := []byte(\"http:\/\/www.golang.com\")\n\tstr := base64.StdEncoding.EncodeToString(url)\n\tfmt.Println(str)\n\t\n\tfmt.Println(math.Cos(3.4))\n\ttest()\n\n}\n<|endoftext|>"} {"text":"<commit_before>package gobotslack\n\nimport (\n\t\"fmt\"\n\t\"github.com\/BurntSushi\/toml\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/eternnoir\/gobot\"\n\t\"github.com\/eternnoir\/gobot\/payload\"\n\t\"github.com\/nlopes\/slack\"\n)\n\nconst AdapterName string = \"gobotslack\"\n\nfunc init() {\n\tgobot.RegisterAdapter(AdapterName, &SlackAdapter{})\n}\n\ntype SlackAdapter struct {\n\tbot *gobot.Gobot\n\tapi *slack.Client\n\trtm *slack.RTM\n\ttoken string\n\tdefaultChannel string\n\tchannelMap map[string]slack.Channel\n\tuserNameMap map[string]slack.User\n\tuserIdMap map[string]slack.User\n}\n\ntype SlackConfig struct {\n\tToken string\n\tChannel string\n}\n\nfunc (sa *SlackAdapter) Init(bot *gobot.Gobot) error {\n\tlog.Infof(\"SlackAdapter init.\")\n\tvar conf SlackConfig\n\tif _, err := toml.DecodeFile(bot.ConfigPath+\"\/slack.toml\", &conf); err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"SlackAdapter get config %#v\", conf)\n\tsa.token = conf.Token\n\tsa.bot = bot\n\tsa.api = slack.New(sa.token)\n\tdc := \"general\"\n\tif conf.Channel != \"\" {\n\t\tdc = conf.Channel\n\t}\n\tsa.defaultChannel = dc\n\tlog.Infof(\"SlackAdapter init done. %#v\", sa)\n\treturn nil\n}\n\nfunc (sa *SlackAdapter) Start() {\n\tlog.Info(\"SlackAdapter start.\")\n\tsa.initChannelMap()\n\tsa.initUserMap()\n\tsa.startRTM()\n}\n\nfunc (sa *SlackAdapter) initChannelMap() {\n\tchs, err := sa.api.GetChannels(false)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\tpanic(\"SlackConfig load channels fail.\")\n\t}\n\tsa.channelMap = map[string]slack.Channel{}\n\tfor _, ch := range chs {\n\t\tlog.Infof(\"[SlackAdapter] load channel %s Id: %s\", ch.Name, ch.ID)\n\t\tsa.channelMap[ch.Name] = ch\n\t}\n}\n\nfunc (sa *SlackAdapter) initUserMap() {\n\tusers, err := sa.api.GetUsers()\n\tif err != nil {\n\t\tlog.Error(err)\n\t\tpanic(\"SlackConfig load users fail.\")\n\t}\n\tlog.Infof(\"[SlackAdapter] load users %#v\", users)\n\tsa.userNameMap = map[string]slack.User{}\n\tsa.userIdMap = map[string]slack.User{}\n\tfor _, user := range users {\n\t\tsa.userNameMap[user.Name] = user\n\t\tsa.userIdMap[user.ID] = user\n\t}\n}\n\nfunc (sa *SlackAdapter) Send(text string) error {\n\tlog.Infof(\"[SlackAdapter] Get new text to send.%s\", text, sa.defaultChannel)\n\tif ch, ok := sa.channelMap[sa.defaultChannel]; ok {\n\t\tlog.Infof(\"[SlackAdapter] Send new text %s. To %s\", text, ch.Name)\n\t\t_, _, err := sa.api.PostMessage(\"#\"+ch.Name, text, slack.PostMessageParameters{AsUser: true})\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t} else {\n\t\tlog.Errorf(\"[SlackAdapter] Channel name %s not found.\", sa.defaultChannel)\n\t}\n\treturn nil\n}\n\nfunc (sa *SlackAdapter) Reply(orimessage *payload.Message, text string) error {\n\tlog.Infof(\"Get Replay message. Origin message is %s. Text %s\", orimessage.Text, text)\n\tev := orimessage.Payload.(*slack.MessageEvent)\n\tresMsg := sa.rtm.NewOutgoingMessage(text, ev.Channel)\n\tsa.rtm.SendMessage(resMsg)\n\treturn nil\n}\n\nfunc (sa *SlackAdapter) startRTM() {\n\trtm := sa.api.NewRTM()\n\tsa.rtm = rtm\n\tgo rtm.ManageConnection()\n\nLoop:\n\tfor {\n\t\tselect {\n\t\tcase msg := <-rtm.IncomingEvents:\n\t\t\tlog.Info(\"Event Received: \")\n\t\t\tswitch ev := msg.Data.(type) {\n\t\t\tcase *slack.HelloEvent:\n\t\t\t\t\/\/ Ignore hello\n\n\t\t\tcase *slack.ConnectedEvent:\n\t\t\t\tfmt.Println(\"Infos:\", ev.Info)\n\t\t\t\tfmt.Println(\"Connection counter:\", ev.ConnectionCount)\n\t\t\t\t\/\/ Replace #general with your Channel ID\n\t\t\t\trtm.SendMessage(rtm.NewOutgoingMessage(\"Hello world\", \"#general\"))\n\n\t\t\tcase *slack.MessageEvent:\n\t\t\t\tlog.Infof(\"[SlackAdapter] Get message %#v\", ev)\n\t\t\t\tmsg := &payload.Message{}\n\t\t\t\tmsg.SourceAdapter = AdapterName\n\t\t\t\tmsg.Text = ev.Msg.Text\n\t\t\t\tmsg.Payload = ev\n\t\t\t\tsa.bot.Receive(msg)\n\n\t\t\tcase *slack.PresenceChangeEvent:\n\t\t\t\tfmt.Printf(\"Presence Change: %v\\n\", ev)\n\n\t\t\tcase *slack.LatencyReport:\n\t\t\t\tfmt.Printf(\"Current latency: %v\\n\", ev.Value)\n\n\t\t\tcase *slack.RTMError:\n\t\t\t\tlog.Errorf(\"[SlackAdapter] Error: %s\", ev.Error())\n\n\t\t\tcase *slack.InvalidAuthEvent:\n\t\t\t\tlog.Error(\"[SlackAdapter] InvalidAuthEvent Error\")\n\t\t\t\tbreak Loop\n\n\t\t\tdefault:\n\n\t\t\t\t\/\/ Ignore other events..\n\t\t\t\t\/\/ fmt.Printf(\"Unexpected: %v\\n\", msg.Data)\n\t\t\t}\n\t\t}\n\t}\n\n}\n<commit_msg>add parse support.<commit_after>package gobotslack\n\nimport (\n\t\"fmt\"\n\t\"github.com\/BurntSushi\/toml\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/eternnoir\/gobot\"\n\t\"github.com\/eternnoir\/gobot\/payload\"\n\t\"github.com\/nlopes\/slack\"\n)\n\nconst AdapterName string = \"gobotslack\"\n\nfunc init() {\n\tgobot.RegisterAdapter(AdapterName, &SlackAdapter{})\n}\n\ntype SlackAdapter struct {\n\tbot *gobot.Gobot\n\tapi *slack.Client\n\trtm *slack.RTM\n\ttoken string\n\tdefaultChannel string\n\tchannelMap map[string]slack.Channel\n\tuserNameMap map[string]slack.User\n\tuserIdMap map[string]slack.User\n}\n\ntype SlackConfig struct {\n\tToken string\n\tChannel string\n}\n\nfunc (sa *SlackAdapter) Init(bot *gobot.Gobot) error {\n\tlog.Infof(\"SlackAdapter init.\")\n\tvar conf SlackConfig\n\tif _, err := toml.DecodeFile(bot.ConfigPath+\"\/slack.toml\", &conf); err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"SlackAdapter get config %#v\", conf)\n\tsa.token = conf.Token\n\tsa.bot = bot\n\tsa.api = slack.New(sa.token)\n\tdc := \"general\"\n\tif conf.Channel != \"\" {\n\t\tdc = conf.Channel\n\t}\n\tsa.defaultChannel = dc\n\tlog.Infof(\"SlackAdapter init done. %#v\", sa)\n\treturn nil\n}\n\nfunc (sa *SlackAdapter) Start() {\n\tlog.Info(\"SlackAdapter start.\")\n\tsa.initChannelMap()\n\tsa.initUserMap()\n\tsa.startRTM()\n}\n\nfunc (sa *SlackAdapter) initChannelMap() {\n\tchs, err := sa.api.GetChannels(false)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\tpanic(\"SlackConfig load channels fail.\")\n\t}\n\tsa.channelMap = map[string]slack.Channel{}\n\tfor _, ch := range chs {\n\t\tlog.Infof(\"[SlackAdapter] load channel %s Id: %s\", ch.Name, ch.ID)\n\t\tsa.channelMap[ch.Name] = ch\n\t}\n}\n\nfunc (sa *SlackAdapter) initUserMap() {\n\tusers, err := sa.api.GetUsers()\n\tif err != nil {\n\t\tlog.Error(err)\n\t\tpanic(\"SlackConfig load users fail.\")\n\t}\n\tlog.Infof(\"[SlackAdapter] load users %#v\", users)\n\tsa.userNameMap = map[string]slack.User{}\n\tsa.userIdMap = map[string]slack.User{}\n\tfor _, user := range users {\n\t\tsa.userNameMap[user.Name] = user\n\t\tsa.userIdMap[user.ID] = user\n\t}\n}\n\nfunc (sa *SlackAdapter) Send(text string) error {\n\tlog.Infof(\"[SlackAdapter] Get new text to send.%s\", text, sa.defaultChannel)\n\tif ch, ok := sa.channelMap[sa.defaultChannel]; ok {\n\t\tlog.Infof(\"[SlackAdapter] Send new text %s. To %s\", text, ch.Name)\n\t\t_, _, err := sa.api.PostMessage(\"#\"+ch.Name, text, slack.PostMessageParameters{AsUser: true, Parse: \"full\"})\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t} else {\n\t\tlog.Errorf(\"[SlackAdapter] Channel name %s not found.\", sa.defaultChannel)\n\t}\n\treturn nil\n}\n\nfunc (sa *SlackAdapter) Reply(orimessage *payload.Message, text string) error {\n\tlog.Infof(\"Get Replay message. Origin message is %s. Text %s\", orimessage.Text, text)\n\tev := orimessage.Payload.(*slack.MessageEvent)\n\tresMsg := sa.rtm.NewOutgoingMessage(text, ev.Channel)\n\tsa.rtm.SendMessage(resMsg)\n\treturn nil\n}\n\nfunc (sa *SlackAdapter) startRTM() {\n\trtm := sa.api.NewRTM()\n\tsa.rtm = rtm\n\tgo rtm.ManageConnection()\n\nLoop:\n\tfor {\n\t\tselect {\n\t\tcase msg := <-rtm.IncomingEvents:\n\t\t\tlog.Info(\"Event Received: \")\n\t\t\tswitch ev := msg.Data.(type) {\n\t\t\tcase *slack.HelloEvent:\n\t\t\t\t\/\/ Ignore hello\n\n\t\t\tcase *slack.ConnectedEvent:\n\t\t\t\tfmt.Println(\"Infos:\", ev.Info)\n\t\t\t\tfmt.Println(\"Connection counter:\", ev.ConnectionCount)\n\t\t\t\t\/\/ Replace #general with your Channel ID\n\t\t\t\trtm.SendMessage(rtm.NewOutgoingMessage(\"Hello world\", \"#general\"))\n\n\t\t\tcase *slack.MessageEvent:\n\t\t\t\tlog.Infof(\"[SlackAdapter] Get message %#v\", ev)\n\t\t\t\tmsg := &payload.Message{}\n\t\t\t\tmsg.SourceAdapter = AdapterName\n\t\t\t\tmsg.Text = ev.Msg.Text\n\t\t\t\tmsg.Payload = ev\n\t\t\t\tsa.bot.Receive(msg)\n\n\t\t\tcase *slack.PresenceChangeEvent:\n\t\t\t\tfmt.Printf(\"Presence Change: %v\\n\", ev)\n\n\t\t\tcase *slack.LatencyReport:\n\t\t\t\tfmt.Printf(\"Current latency: %v\\n\", ev.Value)\n\n\t\t\tcase *slack.RTMError:\n\t\t\t\tlog.Errorf(\"[SlackAdapter] Error: %s\", ev.Error())\n\n\t\t\tcase *slack.InvalidAuthEvent:\n\t\t\t\tlog.Error(\"[SlackAdapter] InvalidAuthEvent Error\")\n\t\t\t\tbreak Loop\n\n\t\t\tdefault:\n\n\t\t\t\t\/\/ Ignore other events..\n\t\t\t\t\/\/ fmt.Printf(\"Unexpected: %v\\n\", msg.Data)\n\t\t\t}\n\t\t}\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package gopherpods\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"google.golang.org\/appengine\"\n\t\"google.golang.org\/appengine\/datastore\"\n\t\"google.golang.org\/appengine\/log\"\n\t\"google.golang.org\/appengine\/mail\"\n\t\"google.golang.org\/appengine\/urlfetch\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tyyyymmdd = \"2006-01-02\"\n)\n\nvar (\n\tpodcastsTmpl = template.Must(template.ParseFiles(\n\t\t\"static\/html\/base.html\",\n\t\t\"static\/html\/podcasts.html\",\n\t))\n\n\tsubmitTmpl = template.Must(template.ParseFiles(\n\t\t\"static\/html\/base.html\",\n\t\t\"static\/html\/submit.html\",\n\t))\n\n\tthanksTmpl = template.Must(template.ParseFiles(\n\t\t\"static\/html\/base.html\",\n\t\t\"static\/html\/thanks.html\",\n\t))\n\n\tsubmissionsTmpl = template.Must(template.ParseFiles(\n\t\t\"static\/html\/base.html\",\n\t\t\"static\/html\/submissions.html\",\n\t))\n\n\tsuccessTmpl = template.Must(template.ParseFiles(\n\t\t\"static\/html\/base.html\",\n\t\t\"static\/html\/success.html\",\n\t))\n)\n\ntype aehandler struct {\n\th func(ctx context.Context, w http.ResponseWriter, r *http.Request) error\n\tmethod string\n}\n\nfunc (a aehandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != a.method {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tctx := appengine.NewContext(r)\n\tif err := a.h(ctx, w, r); err != nil {\n\t\tlog.Errorf(ctx, \"%v\", err)\n\t\thttp.Error(w, \"There was an error, sorry\", http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\nfunc init() {\n\thttp.Handle(\"\/\", aehandler{podcastsHandler, \"GET\"})\n\thttp.Handle(\"\/submit\/\", aehandler{submitHandler, \"GET\"})\n\thttp.Handle(\"\/submit\/add\", aehandler{submitAddHandler, \"POST\"})\n\n\thttp.Handle(\"\/submissions\/\", aehandler{submissionsHandler, \"GET\"})\n\thttp.Handle(\"\/submissions\/add\", aehandler{submissionsAddHandler, \"POST\"})\n\thttp.Handle(\"\/submissions\/del\", aehandler{submissionsDelHandler, \"POST\"})\n\n\thttp.Handle(\"\/tasks\/email\", aehandler{emailHandler, \"GET\"})\n}\n\ntype Podcast struct {\n\tID int64 `datastore:\",noindex\"`\n\tShow string `datastore:\",noindex\"`\n\tTitle string `datastore:\",noindex\"`\n\tDesc string `datastore:\",noindex\"`\n\tURL template.URL `datastore:\",noindex\"`\n\tDate time.Time `datastore:\"\"`\n\tAdded time.Time `datastore:\"\"`\n}\n\nfunc (p *Podcast) DateFormatted() string {\n\treturn p.Date.Format(yyyymmdd)\n}\n\ntype Submission struct {\n\tShow string `datastore:\",noindex\"`\n\tTitle string `datastore:\",noindex\"`\n\tDesc string `datastore:\",noindex\"`\n\tURL template.URL `datastore:\",noindex\"`\n\tDate time.Time `datastore:\",noindex\"`\n\tSubmitted time.Time `datastore:\"\"`\n\tKey string `datastore:\"-\"`\n}\n\nfunc (s *Submission) DateFormatted() string {\n\treturn s.Date.Format(yyyymmdd)\n}\n\nfunc podcastsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tpodcasts := make([]Podcast, 0)\n\tif _, err := datastore.NewQuery(\"Podcast\").GetAll(ctx, &podcasts); err != nil {\n\t\treturn err\n\t}\n\n\tvar tmplData = struct {\n\t\tPodcasts []Podcast\n\t}{\n\t\tpodcasts,\n\t}\n\n\treturn podcastsTmpl.ExecuteTemplate(w, \"base\", tmplData)\n}\n\nfunc submitHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\treturn submitTmpl.ExecuteTemplate(w, \"base\", nil)\n}\n\nfunc sanitize(xss string) string {\n\treturn template.HTMLEscapeString(template.JSEscapeString(strings.TrimSpace(xss)))\n}\n\ntype RecaptchaResponse struct {\n\tSuccess bool `json:\"success\"`\n\tErrorCode []string `json:\"error-codes\"`\n}\n\nfunc submitAddHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tif err := r.ParseForm(); err != nil {\n\t\treturn err\n\t}\n\n\tif !appengine.IsDevAppServer() {\n\t\tform := url.Values{}\n\t\tform.Add(\"secret\", os.Getenv(\"SECRET\"))\n\t\tform.Add(\"response\", r.FormValue(\"g-recaptcha-response\"))\n\t\tform.Add(\"remoteip\", r.RemoteAddr)\n\t\treq, err := http.NewRequest(\"POST\", \"https:\/\/www.google.com\/recaptcha\/api\/siteverify\", strings.NewReader(form.Encode()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\n\t\tcli := urlfetch.Client(ctx)\n\t\tresp, err := cli.Do(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar recaptcha RecaptchaResponse\n\t\tif err := json.NewDecoder(resp.Body).Decode(&recaptcha); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !recaptcha.Success {\n\t\t\tlog.Infof(ctx, \"%v\", recaptcha)\n\t\t\treturn fmt.Errorf(\"reCAPTCHA check failed\")\n\t\t}\n\t}\n\n\tdate, err := time.Parse(yyyymmdd, sanitize(r.FormValue(\"date\")))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsub := Submission{\n\t\tShow: sanitize(r.FormValue(\"show\")),\n\t\tTitle: sanitize(r.FormValue(\"title\")),\n\t\tDesc: sanitize(r.FormValue(\"desc\")),\n\t\tURL: template.URL(sanitize(r.FormValue(\"url\"))),\n\t\tSubmitted: time.Now(),\n\t\tDate: date,\n\t}\n\n\tif _, err := datastore.Put(ctx, datastore.NewIncompleteKey(ctx, \"Submission\", nil), &sub); err != nil {\n\t\treturn err\n\t}\n\n\treturn thanksTmpl.ExecuteTemplate(w, \"base\", nil)\n}\n\nfunc submissionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tsubmissions := make([]Submission, 0)\n\tkeys, err := datastore.NewQuery(\"Submission\").Order(\"Submitted\").GetAll(ctx, &submissions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i := range submissions {\n\t\tsubmissions[i].Key = keys[i].Encode()\n\t}\n\n\tvar tmplData = struct {\n\t\tSubmissions []Submission\n\t}{\n\t\tSubmissions: submissions,\n\t}\n\n\treturn submissionsTmpl.ExecuteTemplate(w, \"base\", tmplData)\n}\n\nfunc submissionsAddHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tif err := r.ParseForm(); err != nil {\n\t\treturn err\n\t}\n\n\tID, _, err := datastore.AllocateIDs(ctx, \"Podcast\", nil, 100)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdate, err := time.Parse(yyyymmdd, r.FormValue(\"date\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpodcast := Podcast{\n\t\tID: ID,\n\t\tShow: r.FormValue(\"show\"),\n\t\tTitle: r.FormValue(\"title\"),\n\t\tDesc: r.FormValue(\"desc\"),\n\t\tURL: template.URL(r.FormValue(\"url\")),\n\t\tDate: date,\n\t\tAdded: time.Now(),\n\t}\n\n\tif _, err := datastore.Put(ctx, datastore.NewKey(ctx, \"Podcast\", \"\", ID, nil), &podcast); err != nil {\n\t\treturn err\n\t}\n\n\tkey, err := datastore.DecodeKey(r.FormValue(\"key\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := datastore.Delete(ctx, key); err != nil {\n\t\treturn err\n\t}\n\n\treturn successTmpl.ExecuteTemplate(w, \"base\", nil)\n}\n\nfunc submissionsDelHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tif err := r.ParseForm(); err != nil {\n\t\treturn err\n\t}\n\n\tkey, err := datastore.DecodeKey(r.FormValue(\"key\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := datastore.Delete(ctx, key); err != nil {\n\t\treturn err\n\t}\n\n\treturn successTmpl.ExecuteTemplate(w, \"base\", nil)\n}\n\nfunc emailHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tkeys, err := datastore.NewQuery(\"Submission\").KeysOnly().GetAll(ctx, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(keys) == 0 {\n\t\treturn nil\n\t}\n\n\tmsg := mail.Message{\n\t\tSubject: \"GopherPods\",\n\t\tSender: os.Getenv(\"EMAIL\"),\n\t\tBody: fmt.Sprintf(\"There are %d outstanding submissions.\\n\\nhttps:\/\/gopherpods.appspot.com\/submissions\/\\n\\n%v\",\n\t\t\tlen(keys), time.Now()),\n\t}\n\n\tif err := mail.SendToAdmins(ctx, &msg); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>Refactor recaptcha check, simplify mail and handlers<commit_after>package gopherpods\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"google.golang.org\/appengine\"\n\t\"google.golang.org\/appengine\/datastore\"\n\t\"google.golang.org\/appengine\/log\"\n\t\"google.golang.org\/appengine\/mail\"\n\t\"google.golang.org\/appengine\/urlfetch\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tyyyymmdd = \"2006-01-02\"\n\trecaptchaURL = \"https:\/\/www.google.com\/recaptcha\/api\/siteverify\"\n)\n\nvar (\n\tpodcastsTmpl = template.Must(template.ParseFiles(\n\t\t\"static\/html\/base.html\",\n\t\t\"static\/html\/podcasts.html\",\n\t))\n\n\tsubmitTmpl = template.Must(template.ParseFiles(\n\t\t\"static\/html\/base.html\",\n\t\t\"static\/html\/submit.html\",\n\t))\n\n\tthanksTmpl = template.Must(template.ParseFiles(\n\t\t\"static\/html\/base.html\",\n\t\t\"static\/html\/thanks.html\",\n\t))\n\n\tsubmissionsTmpl = template.Must(template.ParseFiles(\n\t\t\"static\/html\/base.html\",\n\t\t\"static\/html\/submissions.html\",\n\t))\n\n\tsuccessTmpl = template.Must(template.ParseFiles(\n\t\t\"static\/html\/base.html\",\n\t\t\"static\/html\/success.html\",\n\t))\n)\n\ntype aehandler struct {\n\th func(ctx context.Context, w http.ResponseWriter, r *http.Request) error\n\tmethod string\n}\n\nfunc (a aehandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != a.method {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tctx := appengine.NewContext(r)\n\tif err := a.h(ctx, w, r); err != nil {\n\t\tlog.Errorf(ctx, \"%v\", err)\n\t\thttp.Error(w, \"There was an error, sorry\", http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\nfunc init() {\n\thttp.Handle(\"\/\", aehandler{podcastsHandler, \"GET\"})\n\thttp.Handle(\"\/submit\", aehandler{submitHandler, \"GET\"})\n\thttp.Handle(\"\/submit\/add\", aehandler{submitAddHandler, \"POST\"})\n\n\thttp.Handle(\"\/submissions\", aehandler{submissionsHandler, \"GET\"})\n\thttp.Handle(\"\/submissions\/add\", aehandler{submissionsAddHandler, \"POST\"})\n\thttp.Handle(\"\/submissions\/del\", aehandler{submissionsDelHandler, \"POST\"})\n\n\thttp.Handle(\"\/tasks\/email\", aehandler{emailHandler, \"GET\"})\n}\n\ntype Podcast struct {\n\tID int64 `datastore:\",noindex\"`\n\tShow string `datastore:\",noindex\"`\n\tTitle string `datastore:\",noindex\"`\n\tDesc string `datastore:\",noindex\"`\n\tURL template.URL `datastore:\",noindex\"`\n\tDate time.Time `datastore:\"\"`\n\tAdded time.Time `datastore:\"\"`\n}\n\nfunc (p *Podcast) DateFormatted() string {\n\treturn p.Date.Format(yyyymmdd)\n}\n\ntype Submission struct {\n\tShow string `datastore:\",noindex\"`\n\tTitle string `datastore:\",noindex\"`\n\tDesc string `datastore:\",noindex\"`\n\tURL template.URL `datastore:\",noindex\"`\n\tDate time.Time `datastore:\",noindex\"`\n\tSubmitted time.Time `datastore:\"\"`\n\tKey string `datastore:\"-\"`\n}\n\nfunc (s *Submission) DateFormatted() string {\n\treturn s.Date.Format(yyyymmdd)\n}\n\nfunc podcastsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tpodcasts := make([]Podcast, 0)\n\tif _, err := datastore.NewQuery(\"Podcast\").Order(\"-Date\").GetAll(ctx, &podcasts); err != nil {\n\t\treturn err\n\t}\n\n\tvar tmplData = struct {\n\t\tPodcasts []Podcast\n\t}{\n\t\tpodcasts,\n\t}\n\n\treturn podcastsTmpl.ExecuteTemplate(w, \"base\", tmplData)\n}\n\nfunc submitHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\treturn submitTmpl.ExecuteTemplate(w, \"base\", nil)\n}\n\nfunc sanitize(xss string) string {\n\treturn template.HTMLEscapeString(template.JSEscapeString(strings.TrimSpace(xss)))\n}\n\nfunc recaptchaCheck(ctx context.Context, response, ip string) (bool, error) {\n\tif appengine.IsDevAppServer() {\n\t\treturn true, nil\n\t}\n\n\tform := url.Values{}\n\tform.Add(\"secret\", os.Getenv(\"SECRET\"))\n\tform.Add(\"response\", response)\n\tform.Add(\"remoteip\", ip)\n\treq, err := http.NewRequest(\"POST\", recaptchaURL, strings.NewReader(form.Encode()))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tcli := urlfetch.Client(ctx)\n\n\treq.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\tresp, err := cli.Do(req)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tvar recaptcha RecaptchaResponse\n\tif err := json.NewDecoder(resp.Body).Decode(&recaptcha); err != nil {\n\t\treturn false, err\n\t}\n\n\tif !recaptcha.Success {\n\t\tlog.Warningf(ctx, \"%+v\", recaptcha)\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}\n\ntype RecaptchaResponse struct {\n\tSuccess bool `json:\"success\"`\n\tErrorCode []string `json:\"error-codes\"`\n}\n\nfunc submitAddHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tif err := r.ParseForm(); err != nil {\n\t\treturn err\n\t}\n\n\tsuccess, err := recaptchaCheck(ctx, r.FormValue(\"g-recaptcha-response\"), r.RemoteAddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !success {\n\t\thttp.Error(w, \"failed reCAPTCHA check\", http.StatusBadRequest)\n\t\treturn nil\n\t}\n\n\tdate, err := time.Parse(yyyymmdd, sanitize(r.FormValue(\"date\")))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsub := Submission{\n\t\tShow: sanitize(r.FormValue(\"show\")),\n\t\tTitle: sanitize(r.FormValue(\"title\")),\n\t\tDesc: sanitize(r.FormValue(\"desc\")),\n\t\tURL: template.URL(sanitize(r.FormValue(\"url\"))),\n\t\tSubmitted: time.Now(),\n\t\tDate: date,\n\t}\n\n\tif _, err := datastore.Put(ctx, datastore.NewIncompleteKey(ctx, \"Submission\", nil), &sub); err != nil {\n\t\treturn err\n\t}\n\n\treturn thanksTmpl.ExecuteTemplate(w, \"base\", nil)\n}\n\nfunc submissionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tsubmissions := make([]Submission, 0)\n\tkeys, err := datastore.NewQuery(\"Submission\").Order(\"Submitted\").GetAll(ctx, &submissions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i := range submissions {\n\t\tsubmissions[i].Key = keys[i].Encode()\n\t}\n\n\tvar tmplData = struct {\n\t\tSubmissions []Submission\n\t}{\n\t\tSubmissions: submissions,\n\t}\n\n\treturn submissionsTmpl.ExecuteTemplate(w, \"base\", tmplData)\n}\n\nfunc submissionsAddHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tif err := r.ParseForm(); err != nil {\n\t\treturn err\n\t}\n\n\tID, _, err := datastore.AllocateIDs(ctx, \"Podcast\", nil, 100)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdate, err := time.Parse(yyyymmdd, r.FormValue(\"date\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpodcast := Podcast{\n\t\tID: ID,\n\t\tShow: r.FormValue(\"show\"),\n\t\tTitle: r.FormValue(\"title\"),\n\t\tDesc: r.FormValue(\"desc\"),\n\t\tURL: template.URL(r.FormValue(\"url\")),\n\t\tDate: date,\n\t\tAdded: time.Now(),\n\t}\n\n\tif _, err := datastore.Put(ctx, datastore.NewKey(ctx, \"Podcast\", \"\", ID, nil), &podcast); err != nil {\n\t\treturn err\n\t}\n\n\tkey, err := datastore.DecodeKey(r.FormValue(\"key\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := datastore.Delete(ctx, key); err != nil {\n\t\treturn err\n\t}\n\n\treturn successTmpl.ExecuteTemplate(w, \"base\", nil)\n}\n\nfunc submissionsDelHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tif err := r.ParseForm(); err != nil {\n\t\treturn err\n\t}\n\n\tkey, err := datastore.DecodeKey(r.FormValue(\"key\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := datastore.Delete(ctx, key); err != nil {\n\t\treturn err\n\t}\n\n\treturn successTmpl.ExecuteTemplate(w, \"base\", nil)\n}\n\nfunc emailHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tkeys, err := datastore.NewQuery(\"Submission\").KeysOnly().GetAll(ctx, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(keys) == 0 {\n\t\treturn nil\n\t}\n\n\tmsg := mail.Message{\n\t\tSubject: \"GopherPods\",\n\t\tSender: os.Getenv(\"EMAIL\"),\n\t\tBody: fmt.Sprintf(\"There are %d outstanding submissions\", len(keys)),\n\t}\n\n\tif err := mail.SendToAdmins(ctx, &msg); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage kex2\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"io\"\n\t\"testing\"\n\t\"time\"\n\n\tkeybase1 \"github.com\/keybase\/client\/go\/protocol\"\n\trpc \"github.com\/keybase\/go-framed-msgpack-rpc\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tGoodProvisionee = 0\n\tBadProvisioneeFailHello = 1 << iota\n\tBadProvisioneeFailDidCounterSign = 1 << iota\n\tBadProvisioneeSlowHello = 1 << iota\n\tBadProvisioneeSlowDidCounterSign = 1 << iota\n\tBadProvisioneeCancel = 1 << iota\n)\n\ntype mockProvisioner struct {\n\tuid keybase1.UID\n}\n\ntype mockProvisionee struct {\n\tbehavior int\n}\n\nfunc newMockProvisioner(t *testing.T) *mockProvisioner {\n\treturn &mockProvisioner{\n\t\tuid: genUID(t),\n\t}\n}\n\ntype nullLogOutput struct {\n}\n\nfunc (n *nullLogOutput) Error(s string, args ...interface{}) {}\nfunc (n *nullLogOutput) Warning(s string, args ...interface{}) {}\nfunc (n *nullLogOutput) Info(s string, args ...interface{}) {}\nfunc (n *nullLogOutput) Debug(s string, args ...interface{}) {}\nfunc (n *nullLogOutput) Profile(s string, args ...interface{}) {}\n\nvar _ rpc.LogOutput = (*nullLogOutput)(nil)\n\nfunc makeLogFactory() rpc.LogFactory {\n\tif testing.Verbose() {\n\t\treturn nil\n\t}\n\treturn rpc.NewSimpleLogFactory(&nullLogOutput{}, nil)\n}\n\nfunc genUID(t *testing.T) keybase1.UID {\n\tuid := make([]byte, 8)\n\tif _, err := rand.Read(uid); err != nil {\n\t\tt.Fatalf(\"rand failed: %v\\n\", err)\n\t}\n\treturn keybase1.UID(hex.EncodeToString(uid))\n}\n\nfunc genKeybase1DeviceID(t *testing.T) keybase1.DeviceID {\n\tdid := make([]byte, 16)\n\tif _, err := rand.Read(did); err != nil {\n\t\tt.Fatalf(\"rand failed: %v\\n\", err)\n\t}\n\treturn keybase1.DeviceID(hex.EncodeToString(did))\n}\n\nfunc newMockProvisionee(t *testing.T, behavior int) *mockProvisionee {\n\treturn &mockProvisionee{behavior}\n}\n\nfunc (mp *mockProvisioner) GetLogFactory() rpc.LogFactory {\n\treturn makeLogFactory()\n}\n\nfunc (mp *mockProvisioner) CounterSign(input keybase1.HelloRes) (output []byte, err error) {\n\toutput = []byte(string(input))\n\treturn\n}\n\nfunc (mp *mockProvisioner) GetHelloArg() (res keybase1.HelloArg, err error) {\n\tres.Uid = mp.uid\n\treturn\n}\n\nfunc (mp *mockProvisionee) GetLogFactory() rpc.LogFactory {\n\treturn makeLogFactory()\n}\n\nvar ErrHandleHello = errors.New(\"handle hello failure\")\nvar ErrHandleDidCounterSign = errors.New(\"handle didCounterSign failure\")\nvar testTimeout = time.Duration(20) * time.Millisecond\n\nfunc (mp *mockProvisionee) HandleHello(arg keybase1.HelloArg) (res keybase1.HelloRes, err error) {\n\tif (mp.behavior & BadProvisioneeSlowHello) != 0 {\n\t\ttime.Sleep(testTimeout * 3)\n\t}\n\tif (mp.behavior & BadProvisioneeFailHello) != 0 {\n\t\terr = ErrHandleHello\n\t\treturn\n\t}\n\tres = keybase1.HelloRes(arg.SigBody)\n\treturn\n}\n\nfunc (mp *mockProvisionee) HandleDidCounterSign([]byte) error {\n\tif (mp.behavior & BadProvisioneeSlowDidCounterSign) != 0 {\n\t\ttime.Sleep(testTimeout * 3)\n\t}\n\tif (mp.behavior & BadProvisioneeFailDidCounterSign) != 0 {\n\t\treturn ErrHandleDidCounterSign\n\t}\n\treturn nil\n}\n\nfunc testProtocolXWithBehavior(t *testing.T, provisioneeBehavior int) (results [2]error) {\n\n\ttimeout := testTimeout\n\trouter := newMockRouterWithBehaviorAndMaxPoll(GoodRouter, timeout)\n\n\ts2 := genSecret(t)\n\n\tch := make(chan error, 3)\n\n\tsecretCh := make(chan Secret)\n\n\tctx, cancelFn := context.WithCancel(context.Background())\n\n\t\/\/ Run the provisioner\n\tgo func() {\n\t\terr := RunProvisioner(ProvisionerArg{\n\t\t\tKexBaseArg: KexBaseArg{\n\t\t\t\tCtx: ctx,\n\t\t\t\tMr: router,\n\t\t\t\tSecret: genSecret(t),\n\t\t\t\tDeviceID: genKeybase1DeviceID(t),\n\t\t\t\tSecretChannel: secretCh,\n\t\t\t\tTimeout: timeout,\n\t\t\t},\n\t\t\tProvisioner: newMockProvisioner(t),\n\t\t})\n\t\tch <- err\n\t}()\n\n\t\/\/ Run the privisionee\n\tgo func() {\n\t\terr := RunProvisionee(ProvisioneeArg{\n\t\t\tKexBaseArg: KexBaseArg{\n\t\t\t\tCtx: context.Background(),\n\t\t\t\tMr: router,\n\t\t\t\tSecret: s2,\n\t\t\t\tDeviceID: genKeybase1DeviceID(t),\n\t\t\t\tSecretChannel: make(chan Secret),\n\t\t\t\tTimeout: timeout,\n\t\t\t},\n\t\t\tProvisionee: newMockProvisionee(t, provisioneeBehavior),\n\t\t})\n\t\tch <- err\n\t}()\n\n\tif (provisioneeBehavior & BadProvisioneeCancel) != 0 {\n\t\tgo func() {\n\t\t\ttime.Sleep(testTimeout \/ 2)\n\t\t\tcancelFn()\n\t\t}()\n\t}\n\n\tsecretCh <- s2\n\n\tfor i := 0; i < 2; i++ {\n\t\tif e, eof := <-ch; !eof {\n\t\t\tt.Fatalf(\"got unexpected channel close (try %d)\", i)\n\t\t} else if e != nil {\n\t\t\tresults[i] = e\n\t\t}\n\t}\n\n\treturn results\n}\n\nfunc TestFullProtocolXSuccess(t *testing.T) {\n\tresults := testProtocolXWithBehavior(t, GoodProvisionee)\n\tfor i, e := range results {\n\t\tif e != nil {\n\t\t\tt.Fatalf(\"Bad error %d: %v\", i, e)\n\t\t}\n\t}\n}\n\n\/\/ Since errors are exported as strings, then we should just test that the\n\/\/ right kind of error was specified\nfunc eeq(e1, e2 error) bool {\n\treturn e1 != nil && e1.Error() == e2.Error()\n}\n\nfunc TestFullProtocolXProvisioneeFailHello(t *testing.T) {\n\tresults := testProtocolXWithBehavior(t, BadProvisioneeFailHello)\n\tif !eeq(results[0], ErrHandleHello) {\n\t\tt.Fatalf(\"Bad error 0: %v\", results[0])\n\t}\n\tif !eeq(results[1], ErrHandleHello) {\n\t\tt.Fatalf(\"Bad error 1: %v\", results[1])\n\t}\n}\n\nfunc TestFullProtocolXProvisioneeFailDidCounterSign(t *testing.T) {\n\tresults := testProtocolXWithBehavior(t, BadProvisioneeFailDidCounterSign)\n\tif !eeq(results[0], ErrHandleDidCounterSign) {\n\t\tt.Fatalf(\"Bad error 0: %v\", results[0])\n\t}\n\tif !eeq(results[1], ErrHandleDidCounterSign) {\n\t\tt.Fatalf(\"Bad error 1: %v\", results[1])\n\t}\n}\n\nfunc TestFullProtocolXProvisioneeSlowHello(t *testing.T) {\n\tresults := testProtocolXWithBehavior(t, BadProvisioneeSlowHello)\n\tfor i, e := range results {\n\t\tif !eeq(e, ErrTimedOut) && !eeq(e, io.EOF) && !eeq(e, ErrHelloTimeout) {\n\t\t\tt.Fatalf(\"Bad error %d: %v\", i, e)\n\t\t}\n\t}\n}\n\nfunc TestFullProtocolXProvisioneeSlowHelloWithCancel(t *testing.T) {\n\tresults := testProtocolXWithBehavior(t, BadProvisioneeSlowHello|BadProvisioneeCancel)\n\tfor i, e := range results {\n\t\tif !eeq(e, ErrCanceled) && !eeq(e, io.EOF) {\n\t\t\tt.Fatalf(\"Bad error %d: %v\", i, e)\n\t\t}\n\t}\n}\n\nfunc TestFullProtocolXProvisioneeSlowDidCounterSign(t *testing.T) {\n\tresults := testProtocolXWithBehavior(t, BadProvisioneeSlowDidCounterSign)\n\tfor i, e := range results {\n\t\tif !eeq(e, ErrTimedOut) && !eeq(e, io.EOF) {\n\t\t\tt.Fatalf(\"Bad error %d: %v\", i, e)\n\t\t}\n\t}\n}\n\nfunc TestFullProtocolY(t *testing.T) {\n\n\ttimeout := time.Duration(60) * time.Second\n\trouter := newMockRouterWithBehaviorAndMaxPoll(GoodRouter, timeout)\n\n\ts1 := genSecret(t)\n\n\tch := make(chan error, 3)\n\n\tsecretCh := make(chan Secret)\n\n\t\/\/ Run the provisioner\n\tgo func() {\n\t\terr := RunProvisioner(ProvisionerArg{\n\t\t\tKexBaseArg: KexBaseArg{\n\t\t\t\tCtx: context.TODO(),\n\t\t\t\tMr: router,\n\t\t\t\tSecret: s1,\n\t\t\t\tDeviceID: genKeybase1DeviceID(t),\n\t\t\t\tSecretChannel: make(chan Secret),\n\t\t\t\tTimeout: timeout,\n\t\t\t},\n\t\t\tProvisioner: newMockProvisioner(t),\n\t\t})\n\t\tch <- err\n\t}()\n\n\t\/\/ Run the provisionee\n\tgo func() {\n\t\terr := RunProvisionee(ProvisioneeArg{\n\t\t\tKexBaseArg: KexBaseArg{\n\t\t\t\tCtx: context.TODO(),\n\t\t\t\tMr: router,\n\t\t\t\tSecret: genSecret(t),\n\t\t\t\tDeviceID: genKeybase1DeviceID(t),\n\t\t\t\tSecretChannel: secretCh,\n\t\t\t\tTimeout: timeout,\n\t\t\t},\n\t\t\tProvisionee: newMockProvisionee(t, GoodProvisionee),\n\t\t})\n\t\tch <- err\n\t}()\n\n\tsecretCh <- s1\n\n\tfor i := 0; i < 2; i++ {\n\t\tif e, eof := <-ch; !eof {\n\t\t\tt.Fatalf(\"got unexpected channel close (try %d)\", i)\n\t\t} else if e != nil {\n\t\t\tt.Fatalf(\"Unexpected error (receive %d): %v\", i, e)\n\t\t}\n\t}\n\n}\n<commit_msg>Change kex2 timeout for appveyor<commit_after>\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage kex2\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"io\"\n\t\"testing\"\n\t\"time\"\n\n\tkeybase1 \"github.com\/keybase\/client\/go\/protocol\"\n\trpc \"github.com\/keybase\/go-framed-msgpack-rpc\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tGoodProvisionee = 0\n\tBadProvisioneeFailHello = 1 << iota\n\tBadProvisioneeFailDidCounterSign = 1 << iota\n\tBadProvisioneeSlowHello = 1 << iota\n\tBadProvisioneeSlowDidCounterSign = 1 << iota\n\tBadProvisioneeCancel = 1 << iota\n)\n\ntype mockProvisioner struct {\n\tuid keybase1.UID\n}\n\ntype mockProvisionee struct {\n\tbehavior int\n}\n\nfunc newMockProvisioner(t *testing.T) *mockProvisioner {\n\treturn &mockProvisioner{\n\t\tuid: genUID(t),\n\t}\n}\n\ntype nullLogOutput struct {\n}\n\nfunc (n *nullLogOutput) Error(s string, args ...interface{}) {}\nfunc (n *nullLogOutput) Warning(s string, args ...interface{}) {}\nfunc (n *nullLogOutput) Info(s string, args ...interface{}) {}\nfunc (n *nullLogOutput) Debug(s string, args ...interface{}) {}\nfunc (n *nullLogOutput) Profile(s string, args ...interface{}) {}\n\nvar _ rpc.LogOutput = (*nullLogOutput)(nil)\n\nfunc makeLogFactory() rpc.LogFactory {\n\tif testing.Verbose() {\n\t\treturn nil\n\t}\n\treturn rpc.NewSimpleLogFactory(&nullLogOutput{}, nil)\n}\n\nfunc genUID(t *testing.T) keybase1.UID {\n\tuid := make([]byte, 8)\n\tif _, err := rand.Read(uid); err != nil {\n\t\tt.Fatalf(\"rand failed: %v\\n\", err)\n\t}\n\treturn keybase1.UID(hex.EncodeToString(uid))\n}\n\nfunc genKeybase1DeviceID(t *testing.T) keybase1.DeviceID {\n\tdid := make([]byte, 16)\n\tif _, err := rand.Read(did); err != nil {\n\t\tt.Fatalf(\"rand failed: %v\\n\", err)\n\t}\n\treturn keybase1.DeviceID(hex.EncodeToString(did))\n}\n\nfunc newMockProvisionee(t *testing.T, behavior int) *mockProvisionee {\n\treturn &mockProvisionee{behavior}\n}\n\nfunc (mp *mockProvisioner) GetLogFactory() rpc.LogFactory {\n\treturn makeLogFactory()\n}\n\nfunc (mp *mockProvisioner) CounterSign(input keybase1.HelloRes) (output []byte, err error) {\n\toutput = []byte(string(input))\n\treturn\n}\n\nfunc (mp *mockProvisioner) GetHelloArg() (res keybase1.HelloArg, err error) {\n\tres.Uid = mp.uid\n\treturn\n}\n\nfunc (mp *mockProvisionee) GetLogFactory() rpc.LogFactory {\n\treturn makeLogFactory()\n}\n\nvar ErrHandleHello = errors.New(\"handle hello failure\")\nvar ErrHandleDidCounterSign = errors.New(\"handle didCounterSign failure\")\nvar testTimeout = time.Duration(50) * time.Millisecond\n\nfunc (mp *mockProvisionee) HandleHello(arg keybase1.HelloArg) (res keybase1.HelloRes, err error) {\n\tif (mp.behavior & BadProvisioneeSlowHello) != 0 {\n\t\ttime.Sleep(testTimeout * 3)\n\t}\n\tif (mp.behavior & BadProvisioneeFailHello) != 0 {\n\t\terr = ErrHandleHello\n\t\treturn\n\t}\n\tres = keybase1.HelloRes(arg.SigBody)\n\treturn\n}\n\nfunc (mp *mockProvisionee) HandleDidCounterSign([]byte) error {\n\tif (mp.behavior & BadProvisioneeSlowDidCounterSign) != 0 {\n\t\ttime.Sleep(testTimeout * 3)\n\t}\n\tif (mp.behavior & BadProvisioneeFailDidCounterSign) != 0 {\n\t\treturn ErrHandleDidCounterSign\n\t}\n\treturn nil\n}\n\nfunc testProtocolXWithBehavior(t *testing.T, provisioneeBehavior int) (results [2]error) {\n\n\ttimeout := testTimeout\n\trouter := newMockRouterWithBehaviorAndMaxPoll(GoodRouter, timeout)\n\n\ts2 := genSecret(t)\n\n\tch := make(chan error, 3)\n\n\tsecretCh := make(chan Secret)\n\n\tctx, cancelFn := context.WithCancel(context.Background())\n\n\t\/\/ Run the provisioner\n\tgo func() {\n\t\terr := RunProvisioner(ProvisionerArg{\n\t\t\tKexBaseArg: KexBaseArg{\n\t\t\t\tCtx: ctx,\n\t\t\t\tMr: router,\n\t\t\t\tSecret: genSecret(t),\n\t\t\t\tDeviceID: genKeybase1DeviceID(t),\n\t\t\t\tSecretChannel: secretCh,\n\t\t\t\tTimeout: timeout,\n\t\t\t},\n\t\t\tProvisioner: newMockProvisioner(t),\n\t\t})\n\t\tch <- err\n\t}()\n\n\t\/\/ Run the privisionee\n\tgo func() {\n\t\terr := RunProvisionee(ProvisioneeArg{\n\t\t\tKexBaseArg: KexBaseArg{\n\t\t\t\tCtx: context.Background(),\n\t\t\t\tMr: router,\n\t\t\t\tSecret: s2,\n\t\t\t\tDeviceID: genKeybase1DeviceID(t),\n\t\t\t\tSecretChannel: make(chan Secret),\n\t\t\t\tTimeout: timeout,\n\t\t\t},\n\t\t\tProvisionee: newMockProvisionee(t, provisioneeBehavior),\n\t\t})\n\t\tch <- err\n\t}()\n\n\tif (provisioneeBehavior & BadProvisioneeCancel) != 0 {\n\t\tgo func() {\n\t\t\ttime.Sleep(testTimeout \/ 2)\n\t\t\tcancelFn()\n\t\t}()\n\t}\n\n\tsecretCh <- s2\n\n\tfor i := 0; i < 2; i++ {\n\t\tif e, eof := <-ch; !eof {\n\t\t\tt.Fatalf(\"got unexpected channel close (try %d)\", i)\n\t\t} else if e != nil {\n\t\t\tresults[i] = e\n\t\t}\n\t}\n\n\treturn results\n}\n\nfunc TestFullProtocolXSuccess(t *testing.T) {\n\tresults := testProtocolXWithBehavior(t, GoodProvisionee)\n\tfor i, e := range results {\n\t\tif e != nil {\n\t\t\tt.Fatalf(\"Bad error %d: %v\", i, e)\n\t\t}\n\t}\n}\n\n\/\/ Since errors are exported as strings, then we should just test that the\n\/\/ right kind of error was specified\nfunc eeq(e1, e2 error) bool {\n\treturn e1 != nil && e1.Error() == e2.Error()\n}\n\nfunc TestFullProtocolXProvisioneeFailHello(t *testing.T) {\n\tresults := testProtocolXWithBehavior(t, BadProvisioneeFailHello)\n\tif !eeq(results[0], ErrHandleHello) {\n\t\tt.Fatalf(\"Bad error 0: %v\", results[0])\n\t}\n\tif !eeq(results[1], ErrHandleHello) {\n\t\tt.Fatalf(\"Bad error 1: %v\", results[1])\n\t}\n}\n\nfunc TestFullProtocolXProvisioneeFailDidCounterSign(t *testing.T) {\n\tresults := testProtocolXWithBehavior(t, BadProvisioneeFailDidCounterSign)\n\tif !eeq(results[0], ErrHandleDidCounterSign) {\n\t\tt.Fatalf(\"Bad error 0: %v\", results[0])\n\t}\n\tif !eeq(results[1], ErrHandleDidCounterSign) {\n\t\tt.Fatalf(\"Bad error 1: %v\", results[1])\n\t}\n}\n\nfunc TestFullProtocolXProvisioneeSlowHello(t *testing.T) {\n\tresults := testProtocolXWithBehavior(t, BadProvisioneeSlowHello)\n\tfor i, e := range results {\n\t\tif !eeq(e, ErrTimedOut) && !eeq(e, io.EOF) && !eeq(e, ErrHelloTimeout) {\n\t\t\tt.Fatalf(\"Bad error %d: %v\", i, e)\n\t\t}\n\t}\n}\n\nfunc TestFullProtocolXProvisioneeSlowHelloWithCancel(t *testing.T) {\n\tresults := testProtocolXWithBehavior(t, BadProvisioneeSlowHello|BadProvisioneeCancel)\n\tfor i, e := range results {\n\t\tif !eeq(e, ErrCanceled) && !eeq(e, io.EOF) {\n\t\t\tt.Fatalf(\"Bad error %d: %v\", i, e)\n\t\t}\n\t}\n}\n\nfunc TestFullProtocolXProvisioneeSlowDidCounterSign(t *testing.T) {\n\tresults := testProtocolXWithBehavior(t, BadProvisioneeSlowDidCounterSign)\n\tfor i, e := range results {\n\t\tif !eeq(e, ErrTimedOut) && !eeq(e, io.EOF) {\n\t\t\tt.Fatalf(\"Bad error %d: %v\", i, e)\n\t\t}\n\t}\n}\n\nfunc TestFullProtocolY(t *testing.T) {\n\n\ttimeout := time.Duration(60) * time.Second\n\trouter := newMockRouterWithBehaviorAndMaxPoll(GoodRouter, timeout)\n\n\ts1 := genSecret(t)\n\n\tch := make(chan error, 3)\n\n\tsecretCh := make(chan Secret)\n\n\t\/\/ Run the provisioner\n\tgo func() {\n\t\terr := RunProvisioner(ProvisionerArg{\n\t\t\tKexBaseArg: KexBaseArg{\n\t\t\t\tCtx: context.TODO(),\n\t\t\t\tMr: router,\n\t\t\t\tSecret: s1,\n\t\t\t\tDeviceID: genKeybase1DeviceID(t),\n\t\t\t\tSecretChannel: make(chan Secret),\n\t\t\t\tTimeout: timeout,\n\t\t\t},\n\t\t\tProvisioner: newMockProvisioner(t),\n\t\t})\n\t\tch <- err\n\t}()\n\n\t\/\/ Run the provisionee\n\tgo func() {\n\t\terr := RunProvisionee(ProvisioneeArg{\n\t\t\tKexBaseArg: KexBaseArg{\n\t\t\t\tCtx: context.TODO(),\n\t\t\t\tMr: router,\n\t\t\t\tSecret: genSecret(t),\n\t\t\t\tDeviceID: genKeybase1DeviceID(t),\n\t\t\t\tSecretChannel: secretCh,\n\t\t\t\tTimeout: timeout,\n\t\t\t},\n\t\t\tProvisionee: newMockProvisionee(t, GoodProvisionee),\n\t\t})\n\t\tch <- err\n\t}()\n\n\tsecretCh <- s1\n\n\tfor i := 0; i < 2; i++ {\n\t\tif e, eof := <-ch; !eof {\n\t\t\tt.Fatalf(\"got unexpected channel close (try %d)\", i)\n\t\t} else if e != nil {\n\t\t\tt.Fatalf(\"Unexpected error (receive %d): %v\", i, e)\n\t\t}\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package libkb\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\tjsonw \"github.com\/keybase\/go-jsonw\"\n)\n\ntype Session struct {\n\tContextified\n\tfile *JsonFile\n\ttoken string\n\tcsrf string\n\tinFile bool\n\tloaded bool\n\tchecked bool\n\tvalid bool\n\tuid *UID\n\tusername *string\n\tmtime int64\n\tsync.RWMutex\n}\n\nfunc newSession(g *GlobalContext) *Session {\n\treturn &Session{Contextified: Contextified{g}}\n}\n\n\/\/ NewSessionThin creates a minimal (thin) session of just the uid and username.\n\/\/ Clients of the daemon that use the session protocol need this.\nfunc NewSessionThin(uid UID, username string, token string) *Session {\n\t\/\/ XXX should this set valid to true? daemon won't return a\n\t\/\/ session unless valid is true, so...\n\treturn &Session{uid: &uid, username: &username, token: token, valid: true}\n}\n\nfunc (s *Session) IsLoggedIn() bool {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.valid\n}\n\nfunc (s *Session) GetUsername() *string {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.username\n}\n\nfunc (s *Session) GetUID() *UID {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.uid\n}\n\nfunc (s *Session) GetToken() string {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.token\n}\n\nfunc (s *Session) GetCsrf() string {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.csrf\n}\n\nfunc (s *Session) SetLoggedIn(sessionID, csrfToken, username string, uid UID) {\n\ts.Lock()\n\ts.valid = true\n\ts.uid = &uid\n\ts.username = &username\n\ts.token = sessionID\n\ts.GetDictionary().SetKey(\"session\", jsonw.NewString(sessionID))\n\ts.Unlock()\n\n\ts.SetCsrf(csrfToken)\n\ts.SetDirty()\n}\n\nfunc (s *Session) SetDirty() {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.file.dirty = true\n\ts.GetDictionary().SetKey(\"mtime\", jsonw.NewInt64(time.Now().Unix()))\n}\n\nfunc (s *Session) SetCsrf(t string) {\n\ts.Lock()\n\ts.csrf = t\n\tif s.file == nil {\n\t\ts.Unlock()\n\t\treturn\n\t}\n\ts.GetDictionary().SetKey(\"csrf\", jsonw.NewString(t))\n\ts.Unlock()\n\ts.SetDirty()\n}\n\nfunc (s *Session) isConfigLoggedIn() bool {\n\treader := s.G().Env.GetConfig()\n\treturn reader.GetUsername() != \"\" && reader.GetDeviceID() != nil && reader.GetUID() != nil\n}\n\n\/\/ The session file can be out of sync with the config file, particularly when\n\/\/ switching between the node and go clients.\nfunc (s *Session) nukeSessionFileIfOutOfSync() error {\n\tsessionFile := s.G().Env.GetSessionFilename()\n\t\/\/ Use stat to check existence.\n\t_, statErr := os.Lstat(sessionFile)\n\tif statErr == nil && !s.isConfigLoggedIn() {\n\t\ts.G().Log.Warning(\"Session file found but user is not logged in. Deleting session file.\")\n\t\treturn os.Remove(sessionFile)\n\t}\n\treturn nil\n}\n\nfunc (s *Session) Load() error {\n\ts.RLock()\n\ts.G().Log.Debug(\"+ Loading session\")\n\tif s.loaded {\n\t\ts.G().Log.Debug(\"- Skipped; already loaded\")\n\t\ts.RUnlock()\n\t\treturn nil\n\t}\n\ts.RUnlock()\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\terr := s.nukeSessionFileIfOutOfSync()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.file = NewJsonFile(s.G().Env.GetSessionFilename(), \"session\")\n\terr = s.file.Load(false)\n\ts.loaded = true\n\n\tif err != nil {\n\t\ts.G().Log.Error(\"Failed to load session file\")\n\t\treturn err\n\t}\n\n\tif s.file.Exists() {\n\t\tvar tmp error\n\t\tvar token, csrf string\n\t\tok := true\n\t\ts.file.jw.AtKey(\"session\").GetStringVoid(&token, &tmp)\n\t\tif tmp != nil {\n\t\t\ts.G().Log.Warning(\"Bad 'session' value in session file %s: %s\",\n\t\t\t\ts.file.filename, tmp.Error())\n\t\t\tok = false\n\t\t}\n\t\ts.file.jw.AtKey(\"csrf\").GetStringVoid(&csrf, &tmp)\n\t\tif tmp != nil {\n\t\t\ts.G().Log.Warning(\"Bad 'csrf' value in session file %s: %s\",\n\t\t\t\ts.file.filename, tmp.Error())\n\t\t\tok = false\n\t\t}\n\t\tmtime, _ := s.file.jw.AtKey(\"mtime\").GetInt64()\n\t\tif ok {\n\t\t\ts.token = token\n\t\t\ts.csrf = csrf\n\t\t\ts.inFile = true\n\t\t\ts.mtime = mtime\n\t\t}\n\t}\n\ts.G().Log.Debug(\"- Loaded session\")\n\treturn nil\n}\n\nfunc (s *Session) GetDictionary() *jsonw.Wrapper {\n\treturn s.file.jw\n}\n\nfunc (s *Session) Write() error {\n\ts.Lock()\n\tdefer s.Unlock()\n\treturn s.file.MaybeSave(true, 0)\n}\n\nfunc (s *Session) IsRecent() bool {\n\ts.RLock()\n\tdefer s.RUnlock()\n\tif s.mtime == 0 {\n\t\treturn false\n\t}\n\tt := time.Unix(s.mtime, 0)\n\treturn time.Since(t) < time.Hour\n}\n\nfunc (s *Session) Check() error {\n\ts.RLock()\n\ts.G().Log.Debug(\"+ Checking session\")\n\tif s.checked {\n\t\ts.G().Log.Debug(\"- already checked, short-circuting\")\n\t\ts.RUnlock()\n\t\treturn nil\n\t}\n\ts.RUnlock()\n\ts.Lock()\n\ts.checked = true\n\ts.Unlock()\n\n\tres, err := s.G().API.Get(ApiArg{\n\t\tEndpoint: \"sesscheck\",\n\t\tNeedSession: true,\n\t\tAppStatus: []string{\"OK\", \"BAD_SESSION\"},\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.AppStatus == \"OK\" {\n\t\ts.G().Log.Debug(\"| Stored session checked out\")\n\t\tvar err error\n\t\tvar uid UID\n\t\tvar username, csrf string\n\t\tGetUidVoid(res.Body.AtKey(\"logged_in_uid\"), &uid, &err)\n\t\tres.Body.AtKey(\"username\").GetStringVoid(&username, &err)\n\t\tres.Body.AtKey(\"csrf_token\").GetStringVoid(&csrf, &err)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Server replied with unrecognized response: %s\", err.Error())\n\t\t\treturn err\n\t\t} else {\n\t\t\ts.Lock()\n\t\t\ts.valid = true\n\t\t\ts.uid = &uid\n\t\t\ts.username = &username\n\t\t\ts.Unlock()\n\t\t\tif !s.IsRecent() {\n\t\t\t\ts.SetCsrf(csrf)\n\t\t\t}\n\t\t}\n\t} else {\n\t\ts.G().Log.Notice(\"Stored session expired\")\n\t\ts.Lock()\n\t\ts.valid = false\n\t\ts.Unlock()\n\t}\n\n\ts.G().Log.Debug(\"- Checked session\")\n\treturn nil\n}\n\nfunc (s *Session) HasSessionToken() bool {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn len(s.token) > 0\n}\n\nfunc (s *Session) IsValid() bool {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.valid\n}\n\nfunc (s *Session) postLogout() error {\n\t_, err := s.G().API.Post(ApiArg{\n\t\tEndpoint: \"logout\",\n\t\tNeedSession: true,\n\t})\n\tif err == nil {\n\t\ts.Lock()\n\t\ts.valid = false\n\t\ts.checked = false\n\t\ts.token = \"\"\n\t\ts.csrf = \"\"\n\t\ts.Unlock()\n\t}\n\treturn err\n}\n\nfunc (s *Session) Logout() error {\n\terr := s.Load()\n\tvar e2 error\n\tif err == nil && s.HasSessionToken() {\n\t\te2 = s.postLogout()\n\t\ts.Lock()\n\t\tif e3 := s.file.Nuke(); e3 != nil {\n\t\t\ts.inFile = false\n\t\t\ts.G().Log.Warning(\"Failed to remove session file: %s\", e3.Error())\n\t\t}\n\t\ts.Unlock()\n\t}\n\tif err == nil && e2 != nil {\n\t\terr = e2\n\t}\n\treturn err\n}\n\nfunc (s *Session) loadAndCheck() (bool, error) {\n\terr := s.Load()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif s.HasSessionToken() {\n\t\terr = s.Check()\n\t}\n\treturn s.IsValid(), err\n}\n<commit_msg>Closed #297 (only set session to checked if sesscheck api call succeeds)<commit_after>package libkb\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\tjsonw \"github.com\/keybase\/go-jsonw\"\n)\n\ntype Session struct {\n\tContextified\n\tfile *JsonFile\n\ttoken string\n\tcsrf string\n\tinFile bool\n\tloaded bool\n\tchecked bool\n\tvalid bool\n\tuid *UID\n\tusername *string\n\tmtime int64\n\tsync.RWMutex\n}\n\nfunc newSession(g *GlobalContext) *Session {\n\treturn &Session{Contextified: Contextified{g}}\n}\n\n\/\/ NewSessionThin creates a minimal (thin) session of just the uid and username.\n\/\/ Clients of the daemon that use the session protocol need this.\nfunc NewSessionThin(uid UID, username string, token string) *Session {\n\t\/\/ XXX should this set valid to true? daemon won't return a\n\t\/\/ session unless valid is true, so...\n\treturn &Session{uid: &uid, username: &username, token: token, valid: true}\n}\n\nfunc (s *Session) IsLoggedIn() bool {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.valid\n}\n\nfunc (s *Session) GetUsername() *string {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.username\n}\n\nfunc (s *Session) GetUID() *UID {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.uid\n}\n\nfunc (s *Session) GetToken() string {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.token\n}\n\nfunc (s *Session) GetCsrf() string {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.csrf\n}\n\nfunc (s *Session) SetLoggedIn(sessionID, csrfToken, username string, uid UID) {\n\ts.Lock()\n\ts.valid = true\n\ts.uid = &uid\n\ts.username = &username\n\ts.token = sessionID\n\ts.GetDictionary().SetKey(\"session\", jsonw.NewString(sessionID))\n\ts.Unlock()\n\n\ts.SetCsrf(csrfToken)\n\ts.SetDirty()\n}\n\nfunc (s *Session) SetDirty() {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.file.dirty = true\n\ts.GetDictionary().SetKey(\"mtime\", jsonw.NewInt64(time.Now().Unix()))\n}\n\nfunc (s *Session) SetCsrf(t string) {\n\ts.Lock()\n\ts.csrf = t\n\tif s.file == nil {\n\t\ts.Unlock()\n\t\treturn\n\t}\n\ts.GetDictionary().SetKey(\"csrf\", jsonw.NewString(t))\n\ts.Unlock()\n\ts.SetDirty()\n}\n\nfunc (s *Session) isConfigLoggedIn() bool {\n\treader := s.G().Env.GetConfig()\n\treturn reader.GetUsername() != \"\" && reader.GetDeviceID() != nil && reader.GetUID() != nil\n}\n\n\/\/ The session file can be out of sync with the config file, particularly when\n\/\/ switching between the node and go clients.\nfunc (s *Session) nukeSessionFileIfOutOfSync() error {\n\tsessionFile := s.G().Env.GetSessionFilename()\n\t\/\/ Use stat to check existence.\n\t_, statErr := os.Lstat(sessionFile)\n\tif statErr == nil && !s.isConfigLoggedIn() {\n\t\ts.G().Log.Warning(\"Session file found but user is not logged in. Deleting session file.\")\n\t\treturn os.Remove(sessionFile)\n\t}\n\treturn nil\n}\n\nfunc (s *Session) Load() error {\n\ts.RLock()\n\ts.G().Log.Debug(\"+ Loading session\")\n\tif s.loaded {\n\t\ts.G().Log.Debug(\"- Skipped; already loaded\")\n\t\ts.RUnlock()\n\t\treturn nil\n\t}\n\ts.RUnlock()\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\terr := s.nukeSessionFileIfOutOfSync()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.file = NewJsonFile(s.G().Env.GetSessionFilename(), \"session\")\n\terr = s.file.Load(false)\n\ts.loaded = true\n\n\tif err != nil {\n\t\ts.G().Log.Error(\"Failed to load session file\")\n\t\treturn err\n\t}\n\n\tif s.file.Exists() {\n\t\tvar tmp error\n\t\tvar token, csrf string\n\t\tok := true\n\t\ts.file.jw.AtKey(\"session\").GetStringVoid(&token, &tmp)\n\t\tif tmp != nil {\n\t\t\ts.G().Log.Warning(\"Bad 'session' value in session file %s: %s\",\n\t\t\t\ts.file.filename, tmp.Error())\n\t\t\tok = false\n\t\t}\n\t\ts.file.jw.AtKey(\"csrf\").GetStringVoid(&csrf, &tmp)\n\t\tif tmp != nil {\n\t\t\ts.G().Log.Warning(\"Bad 'csrf' value in session file %s: %s\",\n\t\t\t\ts.file.filename, tmp.Error())\n\t\t\tok = false\n\t\t}\n\t\tmtime, _ := s.file.jw.AtKey(\"mtime\").GetInt64()\n\t\tif ok {\n\t\t\ts.token = token\n\t\t\ts.csrf = csrf\n\t\t\ts.inFile = true\n\t\t\ts.mtime = mtime\n\t\t}\n\t}\n\ts.G().Log.Debug(\"- Loaded session\")\n\treturn nil\n}\n\nfunc (s *Session) GetDictionary() *jsonw.Wrapper {\n\treturn s.file.jw\n}\n\nfunc (s *Session) Write() error {\n\ts.Lock()\n\tdefer s.Unlock()\n\treturn s.file.MaybeSave(true, 0)\n}\n\nfunc (s *Session) IsRecent() bool {\n\ts.RLock()\n\tdefer s.RUnlock()\n\tif s.mtime == 0 {\n\t\treturn false\n\t}\n\tt := time.Unix(s.mtime, 0)\n\treturn time.Since(t) < time.Hour\n}\n\nfunc (s *Session) Check() error {\n\ts.RLock()\n\ts.G().Log.Debug(\"+ Checking session\")\n\tif s.checked {\n\t\ts.G().Log.Debug(\"- already checked, short-circuting\")\n\t\ts.RUnlock()\n\t\treturn nil\n\t}\n\ts.RUnlock()\n\n\tres, err := s.G().API.Get(ApiArg{\n\t\tEndpoint: \"sesscheck\",\n\t\tNeedSession: true,\n\t\tAppStatus: []string{\"OK\", \"BAD_SESSION\"},\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.Lock()\n\ts.checked = true\n\ts.Unlock()\n\n\tif res.AppStatus == \"OK\" {\n\t\ts.G().Log.Debug(\"| Stored session checked out\")\n\t\tvar err error\n\t\tvar uid UID\n\t\tvar username, csrf string\n\t\tGetUidVoid(res.Body.AtKey(\"logged_in_uid\"), &uid, &err)\n\t\tres.Body.AtKey(\"username\").GetStringVoid(&username, &err)\n\t\tres.Body.AtKey(\"csrf_token\").GetStringVoid(&csrf, &err)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Server replied with unrecognized response: %s\", err.Error())\n\t\t\treturn err\n\t\t} else {\n\t\t\ts.Lock()\n\t\t\ts.valid = true\n\t\t\ts.uid = &uid\n\t\t\ts.username = &username\n\t\t\ts.Unlock()\n\t\t\tif !s.IsRecent() {\n\t\t\t\ts.SetCsrf(csrf)\n\t\t\t}\n\t\t}\n\t} else {\n\t\ts.G().Log.Notice(\"Stored session expired\")\n\t\ts.Lock()\n\t\ts.valid = false\n\t\ts.Unlock()\n\t}\n\n\ts.G().Log.Debug(\"- Checked session\")\n\treturn nil\n}\n\nfunc (s *Session) HasSessionToken() bool {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn len(s.token) > 0\n}\n\nfunc (s *Session) IsValid() bool {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.valid\n}\n\nfunc (s *Session) postLogout() error {\n\t_, err := s.G().API.Post(ApiArg{\n\t\tEndpoint: \"logout\",\n\t\tNeedSession: true,\n\t})\n\tif err == nil {\n\t\ts.Lock()\n\t\ts.valid = false\n\t\ts.checked = false\n\t\ts.token = \"\"\n\t\ts.csrf = \"\"\n\t\ts.Unlock()\n\t}\n\treturn err\n}\n\nfunc (s *Session) Logout() error {\n\terr := s.Load()\n\tvar e2 error\n\tif err == nil && s.HasSessionToken() {\n\t\te2 = s.postLogout()\n\t\ts.Lock()\n\t\tif e3 := s.file.Nuke(); e3 != nil {\n\t\t\ts.inFile = false\n\t\t\ts.G().Log.Warning(\"Failed to remove session file: %s\", e3.Error())\n\t\t}\n\t\ts.Unlock()\n\t}\n\tif err == nil && e2 != nil {\n\t\terr = e2\n\t}\n\treturn err\n}\n\nfunc (s *Session) loadAndCheck() (bool, error) {\n\terr := s.Load()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif s.HasSessionToken() {\n\t\terr = s.Check()\n\t}\n\treturn s.IsValid(), err\n}\n<|endoftext|>"} {"text":"<commit_before>package libkb\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\tjsonw \"github.com\/keybase\/go-jsonw\"\n)\n\ntype SessionReader interface {\n\tAPIArgs() (token, csrf string)\n}\n\ntype Session struct {\n\tContextified\n\tfile *JsonFile\n\ttoken string\n\tcsrf string\n\tinFile bool\n\tloaded bool\n\tchecked bool\n\tvalid bool\n\tuid *UID\n\tusername *string\n\tmtime int64\n\tsync.RWMutex\n}\n\nfunc newSession(g *GlobalContext) *Session {\n\treturn &Session{Contextified: Contextified{g}}\n}\n\n\/\/ NewSessionThin creates a minimal (thin) session of just the uid and username.\n\/\/ Clients of the daemon that use the session protocol need this.\nfunc NewSessionThin(uid UID, username string, token string) *Session {\n\t\/\/ XXX should this set valid to true? daemon won't return a\n\t\/\/ session unless valid is true, so...\n\treturn &Session{uid: &uid, username: &username, token: token, valid: true}\n}\n\nfunc (s *Session) IsLoggedIn() bool {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.valid\n}\n\nfunc (s *Session) GetUsername() *string {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.username\n}\n\nfunc (s *Session) GetUID() *UID {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.uid\n}\n\nfunc (s *Session) GetToken() string {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.token\n}\n\nfunc (s *Session) GetCsrf() string {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.csrf\n}\n\nfunc (s *Session) APIArgs() (token, csrf string) {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.token, s.csrf\n}\n\nfunc (s *Session) SetUsername(username string) {\n\ts.Lock()\n\ts.username = &username\n\ts.Unlock()\n}\n\nfunc (s *Session) SetLoggedIn(sessionID, csrfToken, username string, uid UID) {\n\ts.Lock()\n\ts.valid = true\n\ts.uid = &uid\n\ts.username = &username\n\ts.token = sessionID\n\tif s.file == nil {\n\t\tG.Log.Warning(\"s.file == nil\")\n\t\ts.Unlock()\n\t\ts.Load()\n\t\ts.Lock()\n\t}\n\tif s.GetDictionary() == nil {\n\t\tG.Log.Warning(\"s.GetDict() == nil\")\n\t}\n\ts.GetDictionary().SetKey(\"session\", jsonw.NewString(sessionID))\n\ts.Unlock()\n\n\ts.SetCsrf(csrfToken)\n\ts.SetDirty()\n}\n\nfunc (s *Session) SetDirty() {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.file.dirty = true\n\ts.GetDictionary().SetKey(\"mtime\", jsonw.NewInt64(time.Now().Unix()))\n}\n\nfunc (s *Session) SetCsrf(t string) {\n\ts.Lock()\n\ts.csrf = t\n\tif s.file == nil {\n\t\ts.Unlock()\n\t\treturn\n\t}\n\ts.GetDictionary().SetKey(\"csrf\", jsonw.NewString(t))\n\ts.Unlock()\n\ts.SetDirty()\n}\n\nfunc (s *Session) isConfigLoggedIn() bool {\n\treader := s.G().Env.GetConfig()\n\treturn reader.GetUsername() != \"\" && reader.GetDeviceID() != nil && reader.GetUID() != nil\n}\n\n\/\/ The session file can be out of sync with the config file, particularly when\n\/\/ switching between the node and go clients.\nfunc (s *Session) nukeSessionFileIfOutOfSync() error {\n\tsessionFile := s.G().Env.GetSessionFilename()\n\t\/\/ Use stat to check existence.\n\t_, statErr := os.Lstat(sessionFile)\n\tif statErr == nil && !s.isConfigLoggedIn() {\n\t\ts.G().Log.Warning(\"Session file found but user is not logged in. Deleting session file.\")\n\t\treturn os.Remove(sessionFile)\n\t}\n\treturn nil\n}\n\nfunc (s *Session) Load() error {\n\ts.RLock()\n\ts.G().Log.Debug(\"+ Loading session\")\n\tif s.loaded {\n\t\ts.G().Log.Debug(\"- Skipped; already loaded\")\n\t\ts.RUnlock()\n\t\treturn nil\n\t}\n\ts.RUnlock()\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\terr := s.nukeSessionFileIfOutOfSync()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.file = NewJsonFile(s.G().Env.GetSessionFilename(), \"session\")\n\terr = s.file.Load(false)\n\ts.loaded = true\n\n\tif err != nil {\n\t\ts.G().Log.Error(\"Failed to load session file\")\n\t\treturn err\n\t}\n\n\tif s.file.Exists() {\n\t\tvar tmp error\n\t\tvar token, csrf string\n\t\tok := true\n\t\ts.file.jw.AtKey(\"session\").GetStringVoid(&token, &tmp)\n\t\tif tmp != nil {\n\t\t\ts.G().Log.Warning(\"Bad 'session' value in session file %s: %s\",\n\t\t\t\ts.file.filename, tmp.Error())\n\t\t\tok = false\n\t\t}\n\t\ts.file.jw.AtKey(\"csrf\").GetStringVoid(&csrf, &tmp)\n\t\tif tmp != nil {\n\t\t\ts.G().Log.Warning(\"Bad 'csrf' value in session file %s: %s\",\n\t\t\t\ts.file.filename, tmp.Error())\n\t\t\tok = false\n\t\t}\n\t\tmtime, _ := s.file.jw.AtKey(\"mtime\").GetInt64()\n\t\tif ok {\n\t\t\ts.token = token\n\t\t\ts.csrf = csrf\n\t\t\ts.inFile = true\n\t\t\ts.mtime = mtime\n\t\t}\n\t}\n\ts.G().Log.Debug(\"- Loaded session\")\n\treturn nil\n}\n\nfunc (s *Session) GetDictionary() *jsonw.Wrapper {\n\treturn s.file.jw\n}\n\nfunc (s *Session) Write() error {\n\ts.Lock()\n\tdefer s.Unlock()\n\treturn s.file.MaybeSave(true, 0)\n}\n\nfunc (s *Session) IsRecent() bool {\n\ts.RLock()\n\tdefer s.RUnlock()\n\tif s.mtime == 0 {\n\t\treturn false\n\t}\n\tt := time.Unix(s.mtime, 0)\n\treturn time.Since(t) < time.Hour\n}\n\nfunc (s *Session) Check() error {\n\ts.RLock()\n\ts.G().Log.Debug(\"+ Checking session\")\n\tif s.checked {\n\t\ts.G().Log.Debug(\"- already checked, short-circuting\")\n\t\ts.RUnlock()\n\t\treturn nil\n\t}\n\ts.RUnlock()\n\n\tres, err := s.G().API.Get(ApiArg{\n\t\tSessionR: s,\n\t\tEndpoint: \"sesscheck\",\n\t\tNeedSession: true,\n\t\tAppStatus: []string{\"OK\", \"BAD_SESSION\"},\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.Lock()\n\ts.checked = true\n\ts.Unlock()\n\n\tif res.AppStatus == \"OK\" {\n\t\ts.G().Log.Debug(\"| Stored session checked out\")\n\t\tvar err error\n\t\tvar uid UID\n\t\tvar username, csrf string\n\t\tGetUidVoid(res.Body.AtKey(\"logged_in_uid\"), &uid, &err)\n\t\tres.Body.AtKey(\"username\").GetStringVoid(&username, &err)\n\t\tres.Body.AtKey(\"csrf_token\").GetStringVoid(&csrf, &err)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Server replied with unrecognized response: %s\", err.Error())\n\t\t\treturn err\n\t\t}\n\t\ts.Lock()\n\t\ts.valid = true\n\t\ts.uid = &uid\n\t\ts.username = &username\n\t\ts.Unlock()\n\t\tif !s.IsRecent() {\n\t\t\ts.SetCsrf(csrf)\n\t\t}\n\t} else {\n\t\ts.G().Log.Notice(\"Stored session expired\")\n\t\ts.Lock()\n\t\ts.valid = false\n\t\ts.Unlock()\n\t}\n\n\ts.G().Log.Debug(\"- Checked session\")\n\treturn nil\n}\n\nfunc (s *Session) HasSessionToken() bool {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn len(s.token) > 0\n}\n\nfunc (s *Session) IsValid() bool {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.valid\n}\n\nfunc (s *Session) postLogout() error {\n\n\t_, err := s.G().API.Post(ApiArg{\n\t\tSessionR: s,\n\t\tEndpoint: \"logout\",\n\t\tNeedSession: true,\n\t})\n\tif err == nil {\n\t\ts.Lock()\n\t\ts.valid = false\n\t\ts.checked = false\n\t\ts.token = \"\"\n\t\ts.csrf = \"\"\n\t\ts.Unlock()\n\t}\n\treturn err\n}\n\nfunc (s *Session) Logout() error {\n\terr := s.Load()\n\tvar e2 error\n\tif err == nil && s.HasSessionToken() {\n\t\te2 = s.postLogout()\n\t\ts.Lock()\n\t\tif e3 := s.file.Nuke(); e3 != nil {\n\t\t\ts.inFile = false\n\t\t\ts.G().Log.Warning(\"Failed to remove session file: %s\", e3.Error())\n\t\t}\n\t\ts.Unlock()\n\t}\n\tif err == nil && e2 != nil {\n\t\terr = e2\n\t}\n\treturn err\n}\n\nfunc (s *Session) loadAndCheck() (bool, error) {\n\terr := s.Load()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif s.HasSessionToken() {\n\t\terr = s.Check()\n\t}\n\treturn s.IsValid(), err\n}\n<commit_msg>removed mutex<commit_after>package libkb\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\tjsonw \"github.com\/keybase\/go-jsonw\"\n)\n\ntype SessionReader interface {\n\tAPIArgs() (token, csrf string)\n}\n\ntype Session struct {\n\tContextified\n\tfile *JsonFile\n\ttoken string\n\tcsrf string\n\tinFile bool\n\tloaded bool\n\tchecked bool\n\tvalid bool\n\tuid *UID\n\tusername *string\n\tmtime int64\n}\n\nfunc newSession(g *GlobalContext) *Session {\n\treturn &Session{Contextified: Contextified{g}}\n}\n\n\/\/ NewSessionThin creates a minimal (thin) session of just the uid and username.\n\/\/ Clients of the daemon that use the session protocol need this.\nfunc NewSessionThin(uid UID, username string, token string) *Session {\n\t\/\/ XXX should this set valid to true? daemon won't return a\n\t\/\/ session unless valid is true, so...\n\treturn &Session{uid: &uid, username: &username, token: token, valid: true}\n}\n\nfunc (s *Session) IsLoggedIn() bool {\n\treturn s.valid\n}\n\nfunc (s *Session) GetUsername() *string {\n\treturn s.username\n}\n\nfunc (s *Session) GetUID() *UID {\n\treturn s.uid\n}\n\nfunc (s *Session) GetToken() string {\n\treturn s.token\n}\n\nfunc (s *Session) GetCsrf() string {\n\treturn s.csrf\n}\n\nfunc (s *Session) APIArgs() (token, csrf string) {\n\treturn s.token, s.csrf\n}\n\nfunc (s *Session) SetUsername(username string) {\n\ts.username = &username\n}\n\nfunc (s *Session) SetLoggedIn(sessionID, csrfToken, username string, uid UID) {\n\ts.valid = true\n\ts.uid = &uid\n\ts.username = &username\n\ts.token = sessionID\n\tif s.file == nil {\n\t\tG.Log.Warning(\"s.file == nil\")\n\t\ts.Load()\n\t}\n\tif s.GetDictionary() == nil {\n\t\tG.Log.Warning(\"s.GetDict() == nil\")\n\t}\n\ts.GetDictionary().SetKey(\"session\", jsonw.NewString(sessionID))\n\n\ts.SetCsrf(csrfToken)\n\ts.SetDirty()\n}\n\nfunc (s *Session) SetDirty() {\n\ts.file.dirty = true\n\ts.GetDictionary().SetKey(\"mtime\", jsonw.NewInt64(time.Now().Unix()))\n}\n\nfunc (s *Session) SetCsrf(t string) {\n\ts.csrf = t\n\tif s.file == nil {\n\t\treturn\n\t}\n\ts.GetDictionary().SetKey(\"csrf\", jsonw.NewString(t))\n\ts.SetDirty()\n}\n\nfunc (s *Session) isConfigLoggedIn() bool {\n\treader := s.G().Env.GetConfig()\n\treturn reader.GetUsername() != \"\" && reader.GetDeviceID() != nil && reader.GetUID() != nil\n}\n\n\/\/ The session file can be out of sync with the config file, particularly when\n\/\/ switching between the node and go clients.\nfunc (s *Session) nukeSessionFileIfOutOfSync() error {\n\tsessionFile := s.G().Env.GetSessionFilename()\n\t\/\/ Use stat to check existence.\n\t_, statErr := os.Lstat(sessionFile)\n\tif statErr == nil && !s.isConfigLoggedIn() {\n\t\ts.G().Log.Warning(\"Session file found but user is not logged in. Deleting session file.\")\n\t\treturn os.Remove(sessionFile)\n\t}\n\treturn nil\n}\n\nfunc (s *Session) Load() error {\n\ts.G().Log.Debug(\"+ Loading session\")\n\tif s.loaded {\n\t\ts.G().Log.Debug(\"- Skipped; already loaded\")\n\t\treturn nil\n\t}\n\n\terr := s.nukeSessionFileIfOutOfSync()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.file = NewJsonFile(s.G().Env.GetSessionFilename(), \"session\")\n\terr = s.file.Load(false)\n\ts.loaded = true\n\n\tif err != nil {\n\t\ts.G().Log.Error(\"Failed to load session file\")\n\t\treturn err\n\t}\n\n\tif s.file.Exists() {\n\t\tvar tmp error\n\t\tvar token, csrf string\n\t\tok := true\n\t\ts.file.jw.AtKey(\"session\").GetStringVoid(&token, &tmp)\n\t\tif tmp != nil {\n\t\t\ts.G().Log.Warning(\"Bad 'session' value in session file %s: %s\",\n\t\t\t\ts.file.filename, tmp.Error())\n\t\t\tok = false\n\t\t}\n\t\ts.file.jw.AtKey(\"csrf\").GetStringVoid(&csrf, &tmp)\n\t\tif tmp != nil {\n\t\t\ts.G().Log.Warning(\"Bad 'csrf' value in session file %s: %s\",\n\t\t\t\ts.file.filename, tmp.Error())\n\t\t\tok = false\n\t\t}\n\t\tmtime, _ := s.file.jw.AtKey(\"mtime\").GetInt64()\n\t\tif ok {\n\t\t\ts.token = token\n\t\t\ts.csrf = csrf\n\t\t\ts.inFile = true\n\t\t\ts.mtime = mtime\n\t\t}\n\t}\n\ts.G().Log.Debug(\"- Loaded session\")\n\treturn nil\n}\n\nfunc (s *Session) GetDictionary() *jsonw.Wrapper {\n\treturn s.file.jw\n}\n\nfunc (s *Session) Write() error {\n\treturn s.file.MaybeSave(true, 0)\n}\n\nfunc (s *Session) IsRecent() bool {\n\tif s.mtime == 0 {\n\t\treturn false\n\t}\n\tt := time.Unix(s.mtime, 0)\n\treturn time.Since(t) < time.Hour\n}\n\nfunc (s *Session) Check() error {\n\ts.G().Log.Debug(\"+ Checking session\")\n\tif s.checked {\n\t\ts.G().Log.Debug(\"- already checked, short-circuting\")\n\t\treturn nil\n\t}\n\n\tres, err := s.G().API.Get(ApiArg{\n\t\tSessionR: s,\n\t\tEndpoint: \"sesscheck\",\n\t\tNeedSession: true,\n\t\tAppStatus: []string{\"OK\", \"BAD_SESSION\"},\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.checked = true\n\n\tif res.AppStatus == \"OK\" {\n\t\ts.G().Log.Debug(\"| Stored session checked out\")\n\t\tvar err error\n\t\tvar uid UID\n\t\tvar username, csrf string\n\t\tGetUidVoid(res.Body.AtKey(\"logged_in_uid\"), &uid, &err)\n\t\tres.Body.AtKey(\"username\").GetStringVoid(&username, &err)\n\t\tres.Body.AtKey(\"csrf_token\").GetStringVoid(&csrf, &err)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Server replied with unrecognized response: %s\", err.Error())\n\t\t\treturn err\n\t\t}\n\t\ts.valid = true\n\t\ts.uid = &uid\n\t\ts.username = &username\n\t\tif !s.IsRecent() {\n\t\t\ts.SetCsrf(csrf)\n\t\t}\n\t} else {\n\t\ts.G().Log.Notice(\"Stored session expired\")\n\t\ts.valid = false\n\t}\n\n\ts.G().Log.Debug(\"- Checked session\")\n\treturn nil\n}\n\nfunc (s *Session) HasSessionToken() bool {\n\treturn len(s.token) > 0\n}\n\nfunc (s *Session) IsValid() bool {\n\treturn s.valid\n}\n\nfunc (s *Session) postLogout() error {\n\n\t_, err := s.G().API.Post(ApiArg{\n\t\tSessionR: s,\n\t\tEndpoint: \"logout\",\n\t\tNeedSession: true,\n\t})\n\tif err == nil {\n\t\ts.valid = false\n\t\ts.checked = false\n\t\ts.token = \"\"\n\t\ts.csrf = \"\"\n\t}\n\treturn err\n}\n\nfunc (s *Session) Logout() error {\n\terr := s.Load()\n\tvar e2 error\n\tif err == nil && s.HasSessionToken() {\n\t\te2 = s.postLogout()\n\t\tif e3 := s.file.Nuke(); e3 != nil {\n\t\t\ts.inFile = false\n\t\t\ts.G().Log.Warning(\"Failed to remove session file: %s\", e3.Error())\n\t\t}\n\t}\n\tif err == nil && e2 != nil {\n\t\terr = e2\n\t}\n\treturn err\n}\n\nfunc (s *Session) loadAndCheck() (bool, error) {\n\terr := s.Load()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif s.HasSessionToken() {\n\t\terr = s.Check()\n\t}\n\treturn s.IsValid(), err\n}\n<|endoftext|>"} {"text":"<commit_before>package activity\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com\/aws\/aws-sdk-go\/service\/swf\"\n)\n\ntype ActivityHandlerFunc func(activityTask *swf.PollForActivityTaskOutput, input interface{}) (interface{}, error)\n\ntype ActivityHandler struct {\n\tActivity string\n\tHandlerFunc ActivityHandlerFunc\n\tInput interface{}\n}\n\ntype CoordinatedActivityHandlerStartFunc func(*swf.PollForActivityTaskOutput, interface{}) (interface{}, error)\n\ntype CoordinatedActivityHandlerTickFunc func(*swf.PollForActivityTaskOutput, interface{}) (bool, interface{}, error)\n\ntype CoordinatedActivityHandlerCancelFunc func(*swf.PollForActivityTaskOutput, interface{}) error\n\ntype CoordinatedActivityHandlerFinishFunc func(*swf.PollForActivityTaskOutput, interface{}) error\n\ntype CoordinatedActivityHandler struct {\n\t\/\/ Start is called when a new activity is ready to be handled.\n\tStart CoordinatedActivityHandlerStartFunc\n\n\t\/\/ Tick is called regularly to process a running activity.\n\tTick CoordinatedActivityHandlerTickFunc\n\n\t\/\/ Cancel is called when a running activity receives a request to cancel\n\t\/\/ via heartbeat update.\n\tCancel CoordinatedActivityHandlerCancelFunc\n\n\t\/\/ Finish is called at the end of handling every activity.\n\t\/\/ It is called no matter the outcome, eg if Start fails,\n\t\/\/ Tick decides to stop continuing, or the activity is canceled.\n\tFinish CoordinatedActivityHandlerFinishFunc\n\n\tInput interface{}\n\tActivity string\n}\n\nfunc NewActivityHandler(activity string, handler interface{}) *ActivityHandler {\n\tinput := inputType(handler, 1)\n\toutput := outputType(handler, 0)\n\tnewType := input\n\tif input.Kind() == reflect.Ptr {\n\t\tnewType = input.Elem()\n\t}\n\ttypeCheck(handler, []string{\"*swf.PollForActivityTaskOutput\", input.String()}, []string{output, \"error\"})\n\treturn &ActivityHandler{\n\t\tActivity: activity,\n\t\tHandlerFunc: marshalledFunc{reflect.ValueOf(handler)}.activityHandlerFunc,\n\t\tInput: reflect.New(newType).Elem().Interface(),\n\t}\n}\n\nfunc NewCoordinatedActivityHandler(activity string, start interface{}, tick interface{}, cancel interface{}, finish interface{}) *CoordinatedActivityHandler {\n\tinput := inputType(tick, 1)\n\tnewType := input\n\tif input.Kind() == reflect.Ptr {\n\t\tnewType = input.Elem()\n\t}\n\toutput := outputType(tick, 1)\n\n\ttypeCheck(start, []string{\"*swf.PollForActivityTaskOutput\", input.String()}, []string{output, \"error\"})\n\ttypeCheck(tick, []string{\"*swf.PollForActivityTaskOutput\", input.String()}, []string{\"bool\", output, \"error\"})\n\ttypeCheck(cancel, []string{\"*swf.PollForActivityTaskOutput\", input.String()}, []string{\"error\"})\n\ttypeCheck(finish, []string{\"*swf.PollForActivityTaskOutput\", input.String()}, []string{\"error\"})\n\n\treturn &CoordinatedActivityHandler{\n\t\tActivity: activity,\n\t\tStart: marshalledFunc{reflect.ValueOf(start)}.handleCoordinatedActivityStart,\n\t\tTick: marshalledFunc{reflect.ValueOf(tick)}.handleCoordinatedActivityTick,\n\t\tCancel: marshalledFunc{reflect.ValueOf(cancel)}.handleCoordinatedActivityCancel,\n\t\tFinish: marshalledFunc{reflect.ValueOf(finish)}.handleCoordinatedActivityFinish,\n\t\tInput: reflect.New(newType).Elem().Interface(),\n\t}\n\n}\n\nfunc (a *ActivityHandler) ZeroInput() interface{} {\n\treturn reflect.New(reflect.TypeOf(a.Input)).Interface()\n}\n\ntype marshalledFunc struct {\n\tv reflect.Value\n}\n\nfunc (m marshalledFunc) activityHandlerFunc(task *swf.PollForActivityTaskOutput, input interface{}) (interface{}, error) {\n\tret := m.v.Call([]reflect.Value{reflect.ValueOf(task), reflect.ValueOf(input)})\n\treturn outputValue(ret[0]), errorValue(ret[1])\n}\n\nfunc (m marshalledFunc) longRunningActivityHandlerFunc(task *swf.PollForActivityTaskOutput, input interface{}) {\n\tm.v.Call([]reflect.Value{reflect.ValueOf(task), reflect.ValueOf(input)})\n}\n\nfunc (m marshalledFunc) handleCoordinatedActivityStart(task *swf.PollForActivityTaskOutput, input interface{}) (interface{}, error) {\n\tret := m.v.Call([]reflect.Value{reflect.ValueOf(task), reflect.ValueOf(input)})\n\treturn outputValue(ret[0]), errorValue(ret[1])\n}\n\nfunc (m marshalledFunc) handleCoordinatedActivityTick(task *swf.PollForActivityTaskOutput, input interface{}) (bool, interface{}, error) {\n\tret := m.v.Call([]reflect.Value{reflect.ValueOf(task), reflect.ValueOf(input)})\n\treturn outputValue(ret[0]).(bool), outputValue(ret[1]), errorValue(ret[2])\n}\n\nfunc (m marshalledFunc) handleCoordinatedActivityCancel(task *swf.PollForActivityTaskOutput, input interface{}) error {\n\tret := m.v.Call([]reflect.Value{reflect.ValueOf(task), reflect.ValueOf(input)})\n\treturn errorValue(ret[0])\n}\n\nfunc (m marshalledFunc) handleCoordinatedActivityFinish(task *swf.PollForActivityTaskOutput, input interface{}) error {\n\tret := m.v.Call([]reflect.Value{reflect.ValueOf(task), reflect.ValueOf(input)})\n\treturn errorValue(ret[0])\n}\n\nfunc outputValue(v reflect.Value) interface{} {\n\tswitch v.Kind() {\n\tcase reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice:\n\t\tif v.IsNil() {\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn v.Interface()\n\t\t}\n\tdefault:\n\t\treturn v.Interface()\n\t}\n}\n\nfunc errorValue(v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn nil\n\t}\n\treturn v.Interface().(error)\n}\n\nfunc inputType(handler interface{}, idx int) reflect.Type {\n\tt := reflect.TypeOf(handler)\n\tif reflect.Func != t.Kind() {\n\t\tpanic(fmt.Sprintf(\"kind was %v, not Func\", t.Kind()))\n\t}\n\treturn t.In(idx)\n}\n\nfunc outputType(handler interface{}, idx int) string {\n\tt := reflect.TypeOf(handler)\n\tif reflect.Func != t.Kind() {\n\t\tpanic(fmt.Sprintf(\"kind was %v, not Func\", t.Kind()))\n\t}\n\treturn t.Out(idx).String()\n}\n\nfunc typeCheck(typedFunc interface{}, in []string, out []string) {\n\tt := reflect.TypeOf(typedFunc)\n\tif reflect.Func != t.Kind() {\n\t\tpanic(fmt.Sprintf(\"kind was %v, not Func\", t.Kind()))\n\t}\n\tif len(in) != t.NumIn() {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"input arity was %v, not %v\",\n\t\t\tt.NumIn(), len(in),\n\t\t))\n\t}\n\n\tfor i, rt := range in {\n\t\tif rt != t.In(i).String() {\n\t\t\tpanic(fmt.Sprintf(\n\t\t\t\t\"type of argument %v was %v, not %v\",\n\t\t\t\ti, t.In(i), rt,\n\t\t\t))\n\t\t}\n\t}\n\n\tif len(out) != t.NumOut() {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"number of return values was %v, not %v\",\n\t\t\tt.NumOut(), len(out),\n\t\t))\n\t}\n\n\tfor i, rt := range out {\n\t\tif rt != t.Out(i).String() {\n\t\t\tpanic(fmt.Sprintf(\n\t\t\t\t\"type of return value %v was %v, not %v\",\n\t\t\t\ti, t.Out(i), rt,\n\t\t\t))\n\t\t}\n\t}\n}\n<commit_msg>Add more explicit documentation to Tick field in CoordinatedActivityHandler<commit_after>package activity\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com\/aws\/aws-sdk-go\/service\/swf\"\n)\n\ntype ActivityHandlerFunc func(activityTask *swf.PollForActivityTaskOutput, input interface{}) (interface{}, error)\n\ntype ActivityHandler struct {\n\tActivity string\n\tHandlerFunc ActivityHandlerFunc\n\tInput interface{}\n}\n\ntype CoordinatedActivityHandlerStartFunc func(*swf.PollForActivityTaskOutput, interface{}) (interface{}, error)\n\ntype CoordinatedActivityHandlerTickFunc func(*swf.PollForActivityTaskOutput, interface{}) (bool, interface{}, error)\n\ntype CoordinatedActivityHandlerCancelFunc func(*swf.PollForActivityTaskOutput, interface{}) error\n\ntype CoordinatedActivityHandlerFinishFunc func(*swf.PollForActivityTaskOutput, interface{}) error\n\ntype CoordinatedActivityHandler struct {\n\t\/\/ Start is called when a new activity is ready to be handled.\n\tStart CoordinatedActivityHandlerStartFunc\n\n\t\/\/ Tick is called regularly to process a running activity.\n\t\/\/ Tick that returns false, nil, nil just expresses that the job is still running.\n\t\/\/ Tick that returns false, &SomeStruct{}, nil will express that the job is still running and also send an 'ActivityUpdated' signal back to the FSM with SomeStruct{} as the Input.\n\t\/\/ Tick that returns true, &SomeStruct{}, nil, expresses that the job\/activity is done and send SomeStruct{} back as the result. as well as stops heartbeating.\n\t\/\/ Tick that returns true, nil, nil, expresses that the job is done and send no result back, as well as stops heartbeating.\n\t\/\/ Tick that returns false, nil, err expresses that the job\/activity failed and sends back err as the reason. as well as stops heartbeating.\n\tTick CoordinatedActivityHandlerTickFunc\n\n\t\/\/ Cancel is called when a running activity receives a request to cancel\n\t\/\/ via heartbeat update.\n\tCancel CoordinatedActivityHandlerCancelFunc\n\n\t\/\/ Finish is called at the end of handling every activity.\n\t\/\/ It is called no matter the outcome, eg if Start fails,\n\t\/\/ Tick decides to stop continuing, or the activity is canceled.\n\tFinish CoordinatedActivityHandlerFinishFunc\n\n\tInput interface{}\n\tActivity string\n}\n\nfunc NewActivityHandler(activity string, handler interface{}) *ActivityHandler {\n\tinput := inputType(handler, 1)\n\toutput := outputType(handler, 0)\n\tnewType := input\n\tif input.Kind() == reflect.Ptr {\n\t\tnewType = input.Elem()\n\t}\n\ttypeCheck(handler, []string{\"*swf.PollForActivityTaskOutput\", input.String()}, []string{output, \"error\"})\n\treturn &ActivityHandler{\n\t\tActivity: activity,\n\t\tHandlerFunc: marshalledFunc{reflect.ValueOf(handler)}.activityHandlerFunc,\n\t\tInput: reflect.New(newType).Elem().Interface(),\n\t}\n}\n\nfunc NewCoordinatedActivityHandler(activity string, start interface{}, tick interface{}, cancel interface{}, finish interface{}) *CoordinatedActivityHandler {\n\tinput := inputType(tick, 1)\n\tnewType := input\n\tif input.Kind() == reflect.Ptr {\n\t\tnewType = input.Elem()\n\t}\n\toutput := outputType(tick, 1)\n\n\ttypeCheck(start, []string{\"*swf.PollForActivityTaskOutput\", input.String()}, []string{output, \"error\"})\n\ttypeCheck(tick, []string{\"*swf.PollForActivityTaskOutput\", input.String()}, []string{\"bool\", output, \"error\"})\n\ttypeCheck(cancel, []string{\"*swf.PollForActivityTaskOutput\", input.String()}, []string{\"error\"})\n\ttypeCheck(finish, []string{\"*swf.PollForActivityTaskOutput\", input.String()}, []string{\"error\"})\n\n\treturn &CoordinatedActivityHandler{\n\t\tActivity: activity,\n\t\tStart: marshalledFunc{reflect.ValueOf(start)}.handleCoordinatedActivityStart,\n\t\tTick: marshalledFunc{reflect.ValueOf(tick)}.handleCoordinatedActivityTick,\n\t\tCancel: marshalledFunc{reflect.ValueOf(cancel)}.handleCoordinatedActivityCancel,\n\t\tFinish: marshalledFunc{reflect.ValueOf(finish)}.handleCoordinatedActivityFinish,\n\t\tInput: reflect.New(newType).Elem().Interface(),\n\t}\n\n}\n\nfunc (a *ActivityHandler) ZeroInput() interface{} {\n\treturn reflect.New(reflect.TypeOf(a.Input)).Interface()\n}\n\ntype marshalledFunc struct {\n\tv reflect.Value\n}\n\nfunc (m marshalledFunc) activityHandlerFunc(task *swf.PollForActivityTaskOutput, input interface{}) (interface{}, error) {\n\tret := m.v.Call([]reflect.Value{reflect.ValueOf(task), reflect.ValueOf(input)})\n\treturn outputValue(ret[0]), errorValue(ret[1])\n}\n\nfunc (m marshalledFunc) longRunningActivityHandlerFunc(task *swf.PollForActivityTaskOutput, input interface{}) {\n\tm.v.Call([]reflect.Value{reflect.ValueOf(task), reflect.ValueOf(input)})\n}\n\nfunc (m marshalledFunc) handleCoordinatedActivityStart(task *swf.PollForActivityTaskOutput, input interface{}) (interface{}, error) {\n\tret := m.v.Call([]reflect.Value{reflect.ValueOf(task), reflect.ValueOf(input)})\n\treturn outputValue(ret[0]), errorValue(ret[1])\n}\n\nfunc (m marshalledFunc) handleCoordinatedActivityTick(task *swf.PollForActivityTaskOutput, input interface{}) (bool, interface{}, error) {\n\tret := m.v.Call([]reflect.Value{reflect.ValueOf(task), reflect.ValueOf(input)})\n\treturn outputValue(ret[0]).(bool), outputValue(ret[1]), errorValue(ret[2])\n}\n\nfunc (m marshalledFunc) handleCoordinatedActivityCancel(task *swf.PollForActivityTaskOutput, input interface{}) error {\n\tret := m.v.Call([]reflect.Value{reflect.ValueOf(task), reflect.ValueOf(input)})\n\treturn errorValue(ret[0])\n}\n\nfunc (m marshalledFunc) handleCoordinatedActivityFinish(task *swf.PollForActivityTaskOutput, input interface{}) error {\n\tret := m.v.Call([]reflect.Value{reflect.ValueOf(task), reflect.ValueOf(input)})\n\treturn errorValue(ret[0])\n}\n\nfunc outputValue(v reflect.Value) interface{} {\n\tswitch v.Kind() {\n\tcase reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice:\n\t\tif v.IsNil() {\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn v.Interface()\n\t\t}\n\tdefault:\n\t\treturn v.Interface()\n\t}\n}\n\nfunc errorValue(v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn nil\n\t}\n\treturn v.Interface().(error)\n}\n\nfunc inputType(handler interface{}, idx int) reflect.Type {\n\tt := reflect.TypeOf(handler)\n\tif reflect.Func != t.Kind() {\n\t\tpanic(fmt.Sprintf(\"kind was %v, not Func\", t.Kind()))\n\t}\n\treturn t.In(idx)\n}\n\nfunc outputType(handler interface{}, idx int) string {\n\tt := reflect.TypeOf(handler)\n\tif reflect.Func != t.Kind() {\n\t\tpanic(fmt.Sprintf(\"kind was %v, not Func\", t.Kind()))\n\t}\n\treturn t.Out(idx).String()\n}\n\nfunc typeCheck(typedFunc interface{}, in []string, out []string) {\n\tt := reflect.TypeOf(typedFunc)\n\tif reflect.Func != t.Kind() {\n\t\tpanic(fmt.Sprintf(\"kind was %v, not Func\", t.Kind()))\n\t}\n\tif len(in) != t.NumIn() {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"input arity was %v, not %v\",\n\t\t\tt.NumIn(), len(in),\n\t\t))\n\t}\n\n\tfor i, rt := range in {\n\t\tif rt != t.In(i).String() {\n\t\t\tpanic(fmt.Sprintf(\n\t\t\t\t\"type of argument %v was %v, not %v\",\n\t\t\t\ti, t.In(i), rt,\n\t\t\t))\n\t\t}\n\t}\n\n\tif len(out) != t.NumOut() {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"number of return values was %v, not %v\",\n\t\t\tt.NumOut(), len(out),\n\t\t))\n\t}\n\n\tfor i, rt := range out {\n\t\tif rt != t.Out(i).String() {\n\t\t\tpanic(fmt.Sprintf(\n\t\t\t\t\"type of return value %v was %v, not %v\",\n\t\t\t\ti, t.Out(i), rt,\n\t\t\t))\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package namecheap\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/publicsuffix\"\n\n\t\"github.com\/StackExchange\/dnscontrol\/models\"\n\t\"github.com\/StackExchange\/dnscontrol\/providers\"\n\t\"github.com\/StackExchange\/dnscontrol\/providers\/diff\"\n\tnc \"github.com\/billputer\/go-namecheap\"\n\t\"github.com\/miekg\/dns\/dnsutil\"\n)\n\nvar NamecheapDefaultNs = []string{\"dns1.registrar-servers.com\", \"dns2.registrar-servers.com\"}\n\ntype Namecheap struct {\n\tApiKey string\n\tApiUser string\n\tclient *nc.Client\n}\n\nvar docNotes = providers.DocumentationNotes{\n\tproviders.DocCreateDomains: providers.Cannot(\"Requires domain registered through their service\"),\n\tproviders.DocOfficiallySupported: providers.Cannot(),\n\tproviders.DocDualHost: providers.Cannot(\"Doesn't allow control of apex NS records\"),\n\tproviders.CanUseAlias: providers.Cannot(),\n\tproviders.CanUseCAA: providers.Cannot(),\n\tproviders.CanUseSRV: providers.Unimplemented(\"namecheap supports srv records, we just need someone to implement it and make sure the tests pass.\"),\n\tproviders.CanUsePTR: providers.Cannot(),\n\tproviders.CanUseTLSA: providers.Cannot(),\n}\n\nfunc init() {\n\tproviders.RegisterRegistrarType(\"NAMECHEAP\", newReg, docNotes)\n\tproviders.RegisterDomainServiceProviderType(\"NAMECHEAP\", newDsp, providers.CantUseNOPURGE)\n}\n\nfunc newDsp(conf map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) {\n\treturn newProvider(conf, metadata)\n}\n\nfunc newReg(conf map[string]string) (providers.Registrar, error) {\n\treturn newProvider(conf, nil)\n}\n\nfunc newProvider(m map[string]string, metadata json.RawMessage) (*Namecheap, error) {\n\tapi := &Namecheap{}\n\tapi.ApiUser, api.ApiKey = m[\"apiuser\"], m[\"apikey\"]\n\tif api.ApiKey == \"\" || api.ApiUser == \"\" {\n\t\treturn nil, fmt.Errorf(\"Namecheap apikey and apiuser must be provided.\")\n\t}\n\tapi.client = nc.NewClient(api.ApiUser, api.ApiKey, api.ApiUser)\n\t\/\/ if BaseURL is specified in creds, use that url\n\tBaseURL, ok := m[\"BaseURL\"]\n\tif ok {\n\t\tapi.client.BaseURL = BaseURL\n\t}\n\treturn api, nil\n}\n\nfunc splitDomain(domain string) (sld string, tld string) {\n\ttld, _ = publicsuffix.PublicSuffix(domain)\n\td, _ := publicsuffix.EffectiveTLDPlusOne(domain)\n\tsld = strings.Split(d, \".\")[0]\n\treturn sld, tld\n}\n\n\/\/ namecheap has request limiting at unpublished limits\n\/\/ this channel acts as a global rate limiter\n\/\/ read from it before every request\n\/\/ from support in SEP-2017:\n\/\/ \"The limits for the API calls will be 20\/Min, 700\/Hour and 8000\/Day for one user.\n\/\/ If you can limit the requests within these it should be fine.\"\nvar throttle = make(chan bool, 20)\n\nfunc init() {\n\tgo func() {\n\t\tfor {\n\t\t\t\/\/ add (up to) 20 requests every minute\n\t\t\tfor i := 0; i < 20; i++ {\n\t\t\t\tselect {\n\t\t\t\tcase throttle <- true:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (n *Namecheap) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {\n\tdc.Punycode()\n\tsld, tld := splitDomain(dc.Name)\n\t<-throttle\n\trecords, err := n.client.DomainsDNSGetHosts(sld, tld)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar actual []*models.RecordConfig\n\n\t\/\/ namecheap does not allow setting @ NS with basic DNS\n\tdc.Filter(func(r *models.RecordConfig) bool {\n\t\tif r.Type == \"NS\" && r.Name == \"@\" {\n\t\t\tif !strings.HasSuffix(r.Target, \"registrar-servers.com.\") {\n\t\t\t\tfmt.Println(\"\\n\", r.Target, \"Namecheap does not support changing apex NS records. Skipping.\")\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t})\n\n\t\/\/ namecheap has this really annoying feature where they add some parking records if you have no records.\n\t\/\/ This causes a few problems for our purposes, specifically the integration tests.\n\t\/\/ lets detect that one case and pretend it is a no-op.\n\tif len(dc.Records) == 0 && len(records.Hosts) == 2 {\n\t\tif records.Hosts[0].Type == \"CNAME\" &&\n\t\t\tstrings.Contains(records.Hosts[0].Address, \"parkingpage\") &&\n\t\t\trecords.Hosts[1].Type == \"URL\" {\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\n\tfor _, r := range records.Hosts {\n\t\tif r.Type == \"SOA\" {\n\t\t\tcontinue\n\t\t}\n\t\trec := &models.RecordConfig{\n\t\t\tNameFQDN: dnsutil.AddOrigin(r.Name, dc.Name),\n\t\t\tType: r.Type,\n\t\t\tTarget: r.Address,\n\t\t\tTTL: uint32(r.TTL),\n\t\t\tMxPreference: uint16(r.MXPref),\n\t\t\tOriginal: r,\n\t\t}\n\t\tactual = append(actual, rec)\n\t}\n\n\tdiffer := diff.New(dc)\n\t_, create, delete, modify := differ.IncrementalDiff(actual)\n\n\t\/\/ \/\/ because namecheap doesn't have selective create, delete, modify,\n\t\/\/ \/\/ we bundle them all up to send at once. We *do* want to see the\n\t\/\/ \/\/ changes though\n\n\tvar desc []string\n\tfor _, i := range create {\n\t\tdesc = append(desc, \"\\n\"+i.String())\n\t}\n\tfor _, i := range delete {\n\t\tdesc = append(desc, \"\\n\"+i.String())\n\t}\n\tfor _, i := range modify {\n\t\tdesc = append(desc, \"\\n\"+i.String())\n\t}\n\n\tmsg := fmt.Sprintf(\"GENERATE_ZONE: %s (%d records)%s\", dc.Name, len(dc.Records), desc)\n\tcorrections := []*models.Correction{}\n\n\t\/\/ only create corrections if there are changes\n\tif len(desc) > 0 {\n\t\tcorrections = append(corrections,\n\t\t\t&models.Correction{\n\t\t\t\tMsg: msg,\n\t\t\t\tF: func() error {\n\t\t\t\t\treturn n.generateRecords(dc)\n\t\t\t\t},\n\t\t\t})\n\t}\n\n\treturn corrections, nil\n}\n\nfunc (n *Namecheap) generateRecords(dc *models.DomainConfig) error {\n\n\tvar recs []nc.DomainDNSHost\n\n\tid := 1\n\tfor _, r := range dc.Records {\n\t\tname := dnsutil.TrimDomainName(r.NameFQDN, dc.Name)\n\t\trec := nc.DomainDNSHost{\n\t\t\tID: id,\n\t\t\tName: name,\n\t\t\tType: r.Type,\n\t\t\tAddress: r.Target,\n\t\t\tMXPref: int(r.MxPreference),\n\t\t\tTTL: int(r.TTL),\n\t\t}\n\t\trecs = append(recs, rec)\n\t\tid++\n\t}\n\tsld, tld := splitDomain(dc.Name)\n\t<-throttle\n\t_, err := n.client.DomainDNSSetHosts(sld, tld, recs)\n\treturn err\n}\n\nfunc (n *Namecheap) GetNameservers(domainName string) ([]*models.Nameserver, error) {\n\t\/\/ return default namecheap nameservers\n\tns := NamecheapDefaultNs\n\n\treturn models.StringsToNameservers(ns), nil\n}\n\nfunc (n *Namecheap) GetRegistrarCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {\n\t<-throttle\n\tinfo, err := n.client.DomainGetInfo(dc.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsort.Strings(info.DNSDetails.Nameservers)\n\tfound := strings.Join(info.DNSDetails.Nameservers, \",\")\n\tdesiredNs := []string{}\n\tfor _, d := range dc.Nameservers {\n\t\tdesiredNs = append(desiredNs, d.Name)\n\t}\n\tsort.Strings(desiredNs)\n\tdesired := strings.Join(desiredNs, \",\")\n\tif found != desired {\n\t\tparts := strings.SplitN(dc.Name, \".\", 2)\n\t\tsld, tld := parts[0], parts[1]\n\t\treturn []*models.Correction{\n\t\t\t{\n\t\t\t\tMsg: fmt.Sprintf(\"Change Nameservers from '%s' to '%s'\", found, desired),\n\t\t\t\tF: func() error {\n\t\t\t\t\t<-throttle\n\t\t\t\t\t_, err := n.client.DomainDNSSetCustom(sld, tld, desired)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t}},\n\t\t}, nil\n\t}\n\treturn nil, nil\n}\n<commit_msg>re-add sleep to namecheap<commit_after>package namecheap\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/publicsuffix\"\n\n\t\"github.com\/StackExchange\/dnscontrol\/models\"\n\t\"github.com\/StackExchange\/dnscontrol\/providers\"\n\t\"github.com\/StackExchange\/dnscontrol\/providers\/diff\"\n\tnc \"github.com\/billputer\/go-namecheap\"\n\t\"github.com\/miekg\/dns\/dnsutil\"\n)\n\nvar NamecheapDefaultNs = []string{\"dns1.registrar-servers.com\", \"dns2.registrar-servers.com\"}\n\ntype Namecheap struct {\n\tApiKey string\n\tApiUser string\n\tclient *nc.Client\n}\n\nvar docNotes = providers.DocumentationNotes{\n\tproviders.DocCreateDomains: providers.Cannot(\"Requires domain registered through their service\"),\n\tproviders.DocOfficiallySupported: providers.Cannot(),\n\tproviders.DocDualHost: providers.Cannot(\"Doesn't allow control of apex NS records\"),\n\tproviders.CanUseAlias: providers.Cannot(),\n\tproviders.CanUseCAA: providers.Cannot(),\n\tproviders.CanUseSRV: providers.Unimplemented(\"namecheap supports srv records, we just need someone to implement it and make sure the tests pass.\"),\n\tproviders.CanUsePTR: providers.Cannot(),\n\tproviders.CanUseTLSA: providers.Cannot(),\n}\n\nfunc init() {\n\tproviders.RegisterRegistrarType(\"NAMECHEAP\", newReg, docNotes)\n\tproviders.RegisterDomainServiceProviderType(\"NAMECHEAP\", newDsp, providers.CantUseNOPURGE)\n}\n\nfunc newDsp(conf map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) {\n\treturn newProvider(conf, metadata)\n}\n\nfunc newReg(conf map[string]string) (providers.Registrar, error) {\n\treturn newProvider(conf, nil)\n}\n\nfunc newProvider(m map[string]string, metadata json.RawMessage) (*Namecheap, error) {\n\tapi := &Namecheap{}\n\tapi.ApiUser, api.ApiKey = m[\"apiuser\"], m[\"apikey\"]\n\tif api.ApiKey == \"\" || api.ApiUser == \"\" {\n\t\treturn nil, fmt.Errorf(\"Namecheap apikey and apiuser must be provided.\")\n\t}\n\tapi.client = nc.NewClient(api.ApiUser, api.ApiKey, api.ApiUser)\n\t\/\/ if BaseURL is specified in creds, use that url\n\tBaseURL, ok := m[\"BaseURL\"]\n\tif ok {\n\t\tapi.client.BaseURL = BaseURL\n\t}\n\treturn api, nil\n}\n\nfunc splitDomain(domain string) (sld string, tld string) {\n\ttld, _ = publicsuffix.PublicSuffix(domain)\n\td, _ := publicsuffix.EffectiveTLDPlusOne(domain)\n\tsld = strings.Split(d, \".\")[0]\n\treturn sld, tld\n}\n\n\/\/ namecheap has request limiting at unpublished limits\n\/\/ this channel acts as a global rate limiter\n\/\/ read from it before every request\n\/\/ from support in SEP-2017:\n\/\/ \"The limits for the API calls will be 20\/Min, 700\/Hour and 8000\/Day for one user.\n\/\/ If you can limit the requests within these it should be fine.\"\nvar throttle = make(chan bool, 20)\n\nfunc init() {\n\tgo func() {\n\t\tfor {\n\t\t\t\/\/ add (up to) 20 requests every minute\n\t\t\tfor i := 0; i < 20; i++ {\n\t\t\t\tselect {\n\t\t\t\tcase throttle <- true:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t}\n\t\t\ttime.Sleep(time.Minute)\n\t\t}\n\t}()\n}\n\nfunc (n *Namecheap) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {\n\tdc.Punycode()\n\tsld, tld := splitDomain(dc.Name)\n\t<-throttle\n\trecords, err := n.client.DomainsDNSGetHosts(sld, tld)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar actual []*models.RecordConfig\n\n\t\/\/ namecheap does not allow setting @ NS with basic DNS\n\tdc.Filter(func(r *models.RecordConfig) bool {\n\t\tif r.Type == \"NS\" && r.Name == \"@\" {\n\t\t\tif !strings.HasSuffix(r.Target, \"registrar-servers.com.\") {\n\t\t\t\tfmt.Println(\"\\n\", r.Target, \"Namecheap does not support changing apex NS records. Skipping.\")\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t})\n\n\t\/\/ namecheap has this really annoying feature where they add some parking records if you have no records.\n\t\/\/ This causes a few problems for our purposes, specifically the integration tests.\n\t\/\/ lets detect that one case and pretend it is a no-op.\n\tif len(dc.Records) == 0 && len(records.Hosts) == 2 {\n\t\tif records.Hosts[0].Type == \"CNAME\" &&\n\t\t\tstrings.Contains(records.Hosts[0].Address, \"parkingpage\") &&\n\t\t\trecords.Hosts[1].Type == \"URL\" {\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\n\tfor _, r := range records.Hosts {\n\t\tif r.Type == \"SOA\" {\n\t\t\tcontinue\n\t\t}\n\t\trec := &models.RecordConfig{\n\t\t\tNameFQDN: dnsutil.AddOrigin(r.Name, dc.Name),\n\t\t\tType: r.Type,\n\t\t\tTarget: r.Address,\n\t\t\tTTL: uint32(r.TTL),\n\t\t\tMxPreference: uint16(r.MXPref),\n\t\t\tOriginal: r,\n\t\t}\n\t\tactual = append(actual, rec)\n\t}\n\n\tdiffer := diff.New(dc)\n\t_, create, delete, modify := differ.IncrementalDiff(actual)\n\n\t\/\/ \/\/ because namecheap doesn't have selective create, delete, modify,\n\t\/\/ \/\/ we bundle them all up to send at once. We *do* want to see the\n\t\/\/ \/\/ changes though\n\n\tvar desc []string\n\tfor _, i := range create {\n\t\tdesc = append(desc, \"\\n\"+i.String())\n\t}\n\tfor _, i := range delete {\n\t\tdesc = append(desc, \"\\n\"+i.String())\n\t}\n\tfor _, i := range modify {\n\t\tdesc = append(desc, \"\\n\"+i.String())\n\t}\n\n\tmsg := fmt.Sprintf(\"GENERATE_ZONE: %s (%d records)%s\", dc.Name, len(dc.Records), desc)\n\tcorrections := []*models.Correction{}\n\n\t\/\/ only create corrections if there are changes\n\tif len(desc) > 0 {\n\t\tcorrections = append(corrections,\n\t\t\t&models.Correction{\n\t\t\t\tMsg: msg,\n\t\t\t\tF: func() error {\n\t\t\t\t\treturn n.generateRecords(dc)\n\t\t\t\t},\n\t\t\t})\n\t}\n\n\treturn corrections, nil\n}\n\nfunc (n *Namecheap) generateRecords(dc *models.DomainConfig) error {\n\n\tvar recs []nc.DomainDNSHost\n\n\tid := 1\n\tfor _, r := range dc.Records {\n\t\tname := dnsutil.TrimDomainName(r.NameFQDN, dc.Name)\n\t\trec := nc.DomainDNSHost{\n\t\t\tID: id,\n\t\t\tName: name,\n\t\t\tType: r.Type,\n\t\t\tAddress: r.Target,\n\t\t\tMXPref: int(r.MxPreference),\n\t\t\tTTL: int(r.TTL),\n\t\t}\n\t\trecs = append(recs, rec)\n\t\tid++\n\t}\n\tsld, tld := splitDomain(dc.Name)\n\t<-throttle\n\t_, err := n.client.DomainDNSSetHosts(sld, tld, recs)\n\treturn err\n}\n\nfunc (n *Namecheap) GetNameservers(domainName string) ([]*models.Nameserver, error) {\n\t\/\/ return default namecheap nameservers\n\tns := NamecheapDefaultNs\n\n\treturn models.StringsToNameservers(ns), nil\n}\n\nfunc (n *Namecheap) GetRegistrarCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {\n\t<-throttle\n\tinfo, err := n.client.DomainGetInfo(dc.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsort.Strings(info.DNSDetails.Nameservers)\n\tfound := strings.Join(info.DNSDetails.Nameservers, \",\")\n\tdesiredNs := []string{}\n\tfor _, d := range dc.Nameservers {\n\t\tdesiredNs = append(desiredNs, d.Name)\n\t}\n\tsort.Strings(desiredNs)\n\tdesired := strings.Join(desiredNs, \",\")\n\tif found != desired {\n\t\tparts := strings.SplitN(dc.Name, \".\", 2)\n\t\tsld, tld := parts[0], parts[1]\n\t\treturn []*models.Correction{\n\t\t\t{\n\t\t\t\tMsg: fmt.Sprintf(\"Change Nameservers from '%s' to '%s'\", found, desired),\n\t\t\t\tF: func() error {\n\t\t\t\t\t<-throttle\n\t\t\t\t\t_, err := n.client.DomainDNSSetCustom(sld, tld, desired)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t}},\n\t\t}, nil\n\t}\n\treturn nil, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*Licensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cephinstaller\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/parnurzeal\/gorequest\"\n\t\"github.com\/skyrings\/skyring-common\/conf\"\n\t\"github.com\/skyrings\/skyring-common\/models\"\n\t\"github.com\/skyrings\/skyring-common\/provisioner\"\n\t\"github.com\/skyrings\/skyring-common\/tools\/logger\"\n\t\"github.com\/skyrings\/skyring-common\/tools\/task\"\n\t\"github.com\/skyrings\/skyring-common\/utils\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype CephInstaller struct {\n}\n\nconst (\n\tProviderName = \"ceph-installer\"\n\tCEPH_INSTALLER_API_DEFAULT_PREFIX = \"api\"\n\tCEPH_INSTALLER_API_PORT = 8181\n\tMON = \"MON\"\n\tOSD = \"OSD\"\n)\n\ntype InstallReq struct {\n\tHosts []string `json:\"hosts\"`\n\tRedhatStorage bool `json:\"redhat_storage\"`\n\tRedhatUseCdn bool `json:\"redhat_use_cdn\"`\n}\n\ntype Task struct {\n\tEndpoint string `json:\"endpoint\"`\n\tSucceeded bool `json:\"succeeded\"`\n\tStdout string `json:\"stdout\"`\n\tStarted string `json:\"started\"`\n\tExitCode int `json:\"exit_code\"`\n\tEnded string `json:\"ended\"`\n\tCommand string `json:\"command\"`\n\tStderr string `json:\"stderr\"`\n\tIdentifier string `json:\"identifier\"`\n}\n\nvar httprequest = gorequest.New()\n\nfunc init() {\n\tprovisioner.RegisterProvider(ProviderName, func(config io.Reader) (provisioner.Provisioner, error) {\n\t\treturn New(config)\n\t})\n}\n\nfunc New(config io.Reader) (*CephInstaller, error) {\n\tinstaller := new(CephInstaller)\n\tinstaller.LoadRoutes()\n\treturn installer, nil\n}\n\nfunc (c CephInstaller) Install(ctxt string, t *task.Task, providerName string, nodes []models.ClusterNode) []models.ClusterNode {\n\n\tdb_nodes, err := util.GetNodesByIdStr(nodes)\n\tif err != nil {\n\t\tlogger.Get().Error(\"%s-Error getting nodes while package installation: %v\", ctxt, err)\n\t\treturn nodes\n\t}\n\n\tvar (\n\t\twg sync.WaitGroup\n\t\tfailedNodes []models.ClusterNode\n\t\tsyncMutex sync.Mutex\n\t)\n\n\tfor _, node := range nodes {\n\t\twg.Add(1)\n\t\tgo func(node models.ClusterNode, hostname string) {\n\t\t\tdefer wg.Done()\n\t\t\tvar route models.ApiRoute\n\t\t\thosts := []string{hostname}\n\t\t\tdata := make(map[string]interface{})\n\t\t\tif util.StringInSlice(MON, node.NodeType) {\n\t\t\t\troute = CEPH_INSTALLER_API_ROUTES[\"monInstall\"]\n\t\t\t\tdata[\"calamari\"] = true\n\t\t\t} else if util.StringInSlice(OSD, node.NodeType) {\n\t\t\t\troute = CEPH_INSTALLER_API_ROUTES[\"osdInstall\"]\n\t\t\t}\n\n\t\t\tdata[\"hosts\"] = hosts\n\t\t\tdata[\"redhat_storage\"] = conf.SystemConfig.Provisioners[providerName].RedhatStorage\n\n\t\t\treqData, err := formConfigureData(data)\n\t\t\tif err != nil {\n\t\t\t\tfailedNodes = append(failedNodes, node)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tresp, body, errs := httprequest.Post(formUrl(route)).Send(reqData).End()\n\n\t\t\trespData, err := parseInstallResponseData(ctxt, resp, body, errs)\n\t\t\tif err != nil {\n\t\t\t\tsyncMutex.Lock()\n\t\t\t\tdefer syncMutex.Unlock()\n\t\t\t\tfailedNodes = append(failedNodes, node)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif ok := syncRequestStatus(ctxt, respData.Identifier); !ok {\n\t\t\t\tsyncMutex.Lock()\n\t\t\t\tdefer syncMutex.Unlock()\n\t\t\t\tfailedNodes = append(failedNodes, node)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.UpdateStatus(\"Installed packages on: %s\", hostname)\n\t\t}(node, db_nodes[node.NodeId].Hostname)\n\t}\n\twg.Wait()\n\n\treturn failedNodes\n}\nfunc (c CephInstaller) Configure(ctxt string, t *task.Task, reqType string, data map[string]interface{}) error {\n\tvar (\n\t\tresp *http.Response\n\t\tbody string\n\t\terrs []error\n\t\troute models.ApiRoute\n\t)\n\treqData, err := formConfigureData(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif reqType == MON {\n\t\troute = CEPH_INSTALLER_API_ROUTES[\"monConfigure\"]\n\n\t} else if reqType == OSD {\n\t\troute = CEPH_INSTALLER_API_ROUTES[\"osdConfigure\"]\n\n\t}\n\n\tresp, body, errs = httprequest.Post(formUrl(route)).Send(reqData).End()\n\n\trespData, err := parseInstallResponseData(ctxt, resp, body, errs)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif ok := syncRequestStatus(ctxt, respData.Identifier); !ok {\n\t\treturn errors.New(\"MON Configuration Failed\")\n\t}\n\treturn nil\n}\n\nfunc formUrl(route models.ApiRoute) string {\n\turl := fmt.Sprintf(\"http:\/\/localhost:%d\/%s\/%s\", CEPH_INSTALLER_API_PORT, CEPH_INSTALLER_API_DEFAULT_PREFIX, route.Pattern)\n\treturn url\n}\n\nfunc syncRequestStatus(ctxt string, taskId string) bool {\n\tvar status bool\n\n\tfor {\n\t\ttime.Sleep(10 * time.Second)\n\t\ttaskData, err := getTaskData(ctxt, taskId)\n\t\tif err != nil {\n\t\t\treturn status\n\t\t}\n\n\t\tif taskData.Ended != \"\" {\n\t\t\t\/\/ If request has failed return with error\n\t\t\tif taskData.Succeeded {\n\t\t\t\tstatus = true\n\t\t\t} else {\n\t\t\t\tlogger.Get().Error(\"%s-Error while installing Packages. stdout: %v. stderr:%v\", ctxt, taskData.Stdout, taskData.Stderr)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn status\n}\n\nfunc getTaskData(ctxt string, taskId string) (Task, error) {\n\n\tvar respData Task\n\troute := CEPH_INSTALLER_API_ROUTES[\"taskStatus\"]\n\troute.Pattern = strings.Replace(route.Pattern, \"{id}\", taskId, 1)\n\tresp, body, errs := httprequest.Get(formUrl(route)).End()\n\tif len(errs) > 0 {\n\t\tlogger.Get().Error(\"%s-Error getting task status for task:%v.Err:%v\", ctxt, taskId, errs)\n\t\treturn respData, errors.New(\"Error getting task status\")\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tlogger.Get().Error(\"%s-Error Getting Task Status:%v\", ctxt, taskId)\n\t\treturn respData, errors.New(\"Error getting task status\")\n\t}\n\n\tif err := json.Unmarshal([]byte(body), &respData); err != nil {\n\t\tlogger.Get().Error(\"%s-Unable to unmarshal response. error: %v\", ctxt, err)\n\t\treturn respData, errors.New(\"Unable to unmarshal response\")\n\t}\n\treturn respData, nil\n}\n\nfunc parseInstallResponseData(ctxt string, resp *http.Response, body string, errs []error) (Task, error) {\n\tvar respData Task\n\tif len(errs) > 0 {\n\t\tlogger.Get().Error(\"%s-Error Installing Packages:%v\", ctxt, errs)\n\t\treturn respData, errors.New(\"Error Sending ceph-installer request\")\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tlogger.Get().Error(\"%s-Error Sending ceph-installer request\", ctxt)\n\t\treturn respData, errors.New(\"Error Sending ceph-installer request. Ceph-installer failed\")\n\t}\n\tif err := json.Unmarshal([]byte(body), &respData); err != nil {\n\t\tlogger.Get().Error(\"%s-Unable to unmarshal response. error: %v\", ctxt, err)\n\t\treturn respData, errors.New(\"Unable to unmarshal response\")\n\t}\n\treturn respData, nil\n}\n\nfunc formConfigureData(data map[string]interface{}) (string, error) {\n\n\trequest, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(request), nil\n\n}\n<commit_msg>skyring-common: Log the provisioner call details<commit_after>\/*Licensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cephinstaller\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/parnurzeal\/gorequest\"\n\t\"github.com\/skyrings\/skyring-common\/conf\"\n\t\"github.com\/skyrings\/skyring-common\/models\"\n\t\"github.com\/skyrings\/skyring-common\/provisioner\"\n\t\"github.com\/skyrings\/skyring-common\/tools\/logger\"\n\t\"github.com\/skyrings\/skyring-common\/tools\/task\"\n\t\"github.com\/skyrings\/skyring-common\/utils\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype CephInstaller struct {\n}\n\nconst (\n\tProviderName = \"ceph-installer\"\n\tCEPH_INSTALLER_API_DEFAULT_PREFIX = \"api\"\n\tCEPH_INSTALLER_API_PORT = 8181\n\tMON = \"MON\"\n\tOSD = \"OSD\"\n)\n\ntype InstallReq struct {\n\tHosts []string `json:\"hosts\"`\n\tRedhatStorage bool `json:\"redhat_storage\"`\n\tRedhatUseCdn bool `json:\"redhat_use_cdn\"`\n}\n\ntype Task struct {\n\tEndpoint string `json:\"endpoint\"`\n\tSucceeded bool `json:\"succeeded\"`\n\tStdout string `json:\"stdout\"`\n\tStarted string `json:\"started\"`\n\tExitCode int `json:\"exit_code\"`\n\tEnded string `json:\"ended\"`\n\tCommand string `json:\"command\"`\n\tStderr string `json:\"stderr\"`\n\tIdentifier string `json:\"identifier\"`\n}\n\nvar httprequest = gorequest.New()\n\nfunc init() {\n\tprovisioner.RegisterProvider(ProviderName, func(config io.Reader) (provisioner.Provisioner, error) {\n\t\treturn New(config)\n\t})\n}\n\nfunc New(config io.Reader) (*CephInstaller, error) {\n\tinstaller := new(CephInstaller)\n\tinstaller.LoadRoutes()\n\treturn installer, nil\n}\n\nfunc (c CephInstaller) Install(ctxt string, t *task.Task, providerName string, nodes []models.ClusterNode) []models.ClusterNode {\n\n\tdb_nodes, err := util.GetNodesByIdStr(nodes)\n\tif err != nil {\n\t\tlogger.Get().Error(\"%s-Error getting nodes while package installation: %v\", ctxt, err)\n\t\treturn nodes\n\t}\n\n\tvar (\n\t\twg sync.WaitGroup\n\t\tfailedNodes []models.ClusterNode\n\t\tsyncMutex sync.Mutex\n\t)\n\n\tfor _, node := range nodes {\n\t\twg.Add(1)\n\t\tgo func(node models.ClusterNode, hostname string) {\n\t\t\tdefer wg.Done()\n\t\t\tvar route models.ApiRoute\n\t\t\thosts := []string{hostname}\n\t\t\tdata := make(map[string]interface{})\n\t\t\tif util.StringInSlice(MON, node.NodeType) {\n\t\t\t\troute = CEPH_INSTALLER_API_ROUTES[\"monInstall\"]\n\t\t\t\tdata[\"calamari\"] = true\n\t\t\t} else if util.StringInSlice(OSD, node.NodeType) {\n\t\t\t\troute = CEPH_INSTALLER_API_ROUTES[\"osdInstall\"]\n\t\t\t}\n\n\t\t\tdata[\"hosts\"] = hosts\n\t\t\tdata[\"redhat_storage\"] = conf.SystemConfig.Provisioners[providerName].RedhatStorage\n\n\t\t\treqData, err := formConfigureData(data)\n\t\t\tif err != nil {\n\t\t\t\tfailedNodes = append(failedNodes, node)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tresp, body, errs := httprequest.Post(formUrl(route)).Send(reqData).End()\n\n\t\t\trespData, err := parseInstallResponseData(ctxt, resp, body, errs)\n\t\t\tif err != nil {\n\t\t\t\tsyncMutex.Lock()\n\t\t\t\tdefer syncMutex.Unlock()\n\t\t\t\tfailedNodes = append(failedNodes, node)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlogger.Get().Info(\n\t\t\t\t\"%s-Started installation on node :%s. TaskId: %s. Request Data: %v. Route: %s\",\n\t\t\t\tctxt,\n\t\t\t\thostname,\n\t\t\t\trespData.Identifier,\n\t\t\t\tdata,\n\t\t\t\tformUrl(route))\n\t\t\tif ok := syncRequestStatus(ctxt, respData.Identifier); !ok {\n\t\t\t\tsyncMutex.Lock()\n\t\t\t\tdefer syncMutex.Unlock()\n\t\t\t\tfailedNodes = append(failedNodes, node)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.UpdateStatus(\"Installed packages on: %s\", hostname)\n\t\t}(node, db_nodes[node.NodeId].Hostname)\n\t}\n\twg.Wait()\n\n\treturn failedNodes\n}\nfunc (c CephInstaller) Configure(ctxt string, t *task.Task, reqType string, data map[string]interface{}) error {\n\tvar (\n\t\tresp *http.Response\n\t\tbody string\n\t\terrs []error\n\t\troute models.ApiRoute\n\t)\n\treqData, err := formConfigureData(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif reqType == MON {\n\t\troute = CEPH_INSTALLER_API_ROUTES[\"monConfigure\"]\n\n\t} else if reqType == OSD {\n\t\troute = CEPH_INSTALLER_API_ROUTES[\"osdConfigure\"]\n\n\t}\n\n\tresp, body, errs = httprequest.Post(formUrl(route)).Send(reqData).End()\n\n\trespData, err := parseInstallResponseData(ctxt, resp, body, errs)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogger.Get().Info(\n\t\t\"%s-Started configuration on node: %s. TaskId: %s. Request Data: %v. Route: %s\",\n\t\tctxt,\n\t\tdata[\"host\"].(string),\n\t\trespData.Identifier,\n\t\treqData,\n\t\tformUrl(route))\n\tif ok := syncRequestStatus(ctxt, respData.Identifier); !ok {\n\t\treturn errors.New(\"MON Configuration Failed\")\n\t}\n\treturn nil\n}\n\nfunc formUrl(route models.ApiRoute) string {\n\turl := fmt.Sprintf(\"http:\/\/localhost:%d\/%s\/%s\", CEPH_INSTALLER_API_PORT, CEPH_INSTALLER_API_DEFAULT_PREFIX, route.Pattern)\n\treturn url\n}\n\nfunc syncRequestStatus(ctxt string, taskId string) bool {\n\tvar status bool\n\n\tfor {\n\t\ttime.Sleep(10 * time.Second)\n\t\ttaskData, err := getTaskData(ctxt, taskId)\n\t\tif err != nil {\n\t\t\treturn status\n\t\t}\n\n\t\tif taskData.Ended != \"\" {\n\t\t\t\/\/ If request has failed return with error\n\t\t\tif taskData.Succeeded {\n\t\t\t\tstatus = true\n\t\t\t} else {\n\t\t\t\tlogger.Get().Error(\"%s-Error while installing Packages. stdout: %v. stderr:%v\", ctxt, taskData.Stdout, taskData.Stderr)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn status\n}\n\nfunc getTaskData(ctxt string, taskId string) (Task, error) {\n\n\tvar respData Task\n\troute := CEPH_INSTALLER_API_ROUTES[\"taskStatus\"]\n\troute.Pattern = strings.Replace(route.Pattern, \"{id}\", taskId, 1)\n\tresp, body, errs := httprequest.Get(formUrl(route)).End()\n\tif len(errs) > 0 {\n\t\tlogger.Get().Error(\"%s-Error getting task status for task:%v.Err:%v\", ctxt, taskId, errs)\n\t\treturn respData, errors.New(\"Error getting task status\")\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tlogger.Get().Error(\"%s-Error Getting Task Status:%v\", ctxt, taskId)\n\t\treturn respData, errors.New(\"Error getting task status\")\n\t}\n\n\tif err := json.Unmarshal([]byte(body), &respData); err != nil {\n\t\tlogger.Get().Error(\"%s-Unable to unmarshal response. error: %v\", ctxt, err)\n\t\treturn respData, errors.New(\"Unable to unmarshal response\")\n\t}\n\treturn respData, nil\n}\n\nfunc parseInstallResponseData(ctxt string, resp *http.Response, body string, errs []error) (Task, error) {\n\tvar respData Task\n\tif len(errs) > 0 {\n\t\tlogger.Get().Error(\"%s-Error Installing Packages:%v\", ctxt, errs)\n\t\treturn respData, errors.New(\"Error Sending ceph-installer request\")\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tlogger.Get().Error(\"%s-Error Sending ceph-installer request\", ctxt)\n\t\treturn respData, errors.New(\"Error Sending ceph-installer request. Ceph-installer failed\")\n\t}\n\tif err := json.Unmarshal([]byte(body), &respData); err != nil {\n\t\tlogger.Get().Error(\"%s-Unable to unmarshal response. error: %v\", ctxt, err)\n\t\treturn respData, errors.New(\"Unable to unmarshal response\")\n\t}\n\treturn respData, nil\n}\n\nfunc formConfigureData(data map[string]interface{}) (string, error) {\n\n\trequest, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(request), nil\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"fmt\"\nimport \"os\"\nimport \"strings\"\nimport \"regexp\"\nimport \"flag\"\nimport \"bufio\"\nimport \"log\"\nimport \"github.com\/joncrlsn\/pgutil\"\nimport \"github.com\/joncrlsn\/fileutil\"\nimport \"github.com\/joncrlsn\/misc\"\n\n\/\/ End of Statement regex\nvar eosRegex *regexp.Regexp = regexp.MustCompile(`;\\s*$|;\\s*--.*$`)\nvar inReader *bufio.Reader = bufio.NewReader(os.Stdin)\n\n\/\/ Executes a file of SQL statements one statement at a time, stopping everything\n\/\/ if one of them has an error\nfunc main() {\n\n \/\/ -f (filename) is a required program argument\n\tvar fileName string\n\tflag.StringVar(&fileName, \"f\", \"\", \"path of the SQL file to run\")\n\tdbInfo := pgutil.DbInfo{}\n\tdbInfo.DbOptions = \"sslmode=disable\"\n\tdbInfo.Populate()\n\n\tif len(fileName) == 0 {\n fmt.Println(\"Missing required filename argument (-f)\")\n\t\tusage()\n\t}\n\n\texists, _ := fileutil.Exists(fileName)\n\tif !exists {\n\t\tfmt.Fprintf(os.Stderr, \"File does not exist: %s\\n\", fileName)\n\t\tos.Exit(2)\n\t}\n\n runFile(fileName, &dbInfo)\n}\n\n\/\/ Reads the file and runs the SQL statements one by one\nfunc runFile(fileName string, dbInfo *pgutil.DbInfo) {\n \/\/ Open connection to the database\n db, err := dbInfo.Open()\n check(\"opening database\", err)\n\n \/\/ Read each statement from the file one at a time and execute them\n\tsqlChan := sqlStatements(fileName)\n\tfor sql := range sqlChan {\n\t\t\/\/ Execute SQL. If not successful, stop and ask user\n\t\t\/\/ whether or not we should continue\n\t\tfmt.Println(\"================================\")\n\t\tfmt.Println(\"Executing SQL: \", sql)\n result, err := db.Exec(sql) \n\n \/\/ If there was an error, ask user whether or not we should continue\n\t\tif err != nil {\n log.Println(\"Error:\", err) \n if misc.PromptYesNo(\"SQL failed! Do you want to continue?\", false) {\n continue\n }\n os.Exit(1)\n\t\t}\n\n rowCnt, err := result.RowsAffected()\n check(\"getting rows affected count\", err)\n fmt.Printf(\"Rows affected: %d\\n\", rowCnt)\n\t}\n fmt.Println(\"Done!\")\n}\n\n\/*\n * Reads and returns (via channel) SQL statements from the given file.\n * SQL statements must end with a semi-colon\n *\/\nfunc sqlStatements(fileName string) <-chan string {\n\tstatementChan := make(chan string)\n\n\tgo func() {\n\t\tlineChan := fileutil.ReadLinesChannel(fileName)\n\n\t\t\/\/ TODO: Convert this to a string builder\n\t\tstatement := \"\"\n\t\tfor line := range lineChan {\n\t\t\t\/\/fmt.Printf(\" Line: %s\\n\", line)\n\n\t\t\t\/\/ ignore blank or empty lines\n\t\t\tif len(strings.TrimSpace(line)) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tstatement += line + \"\\n\"\n\n\t\t\t\/\/ look for line ending with just a semi-colon\n\t\t\t\/\/ or a semi-colon with a SQL comment following\n\t\t\tif eosRegex.MatchString(line) {\n\t\t\t\tstatementChan <- statement\n\t\t\t\tstatement = \"\"\n\t\t\t}\n\t\t}\n\n\t\tclose(statementChan)\n\t}()\n\n\treturn statementChan\n}\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"usage: %s -f <sqlFileName> [-host <string>] [-port <int>] [-db <string>] [-user <string>] [-pw <password>] \\n\", os.Args[0])\n fmt.Fprintln(os.Stderr, `\nDatabase connection properties can be specified in two ways:\n * Environment variables\n * Program flags (override environment variables)\n * ~\/.pgpass file (for the password)\n\nEnvironment variables are:\n DBHOST : host name where database is running (default is localhost)\n DBPORT : port database is listening on (default is 5432)\n DBNAME : name of database you want to update\n DBUSER : user in postgres you'll be executing the commands as\n DBPASS : password for the user\n\nProgram flags are:\n -f : required. file path to the SQL\n -U : user in postgres to execute the commands (matches psql flag)\n -h : host name where database is running--default is localhost (matches psql flag)\n -p : port. defaults to 5432 (matches psql flag)\n -d : database name (matches psql flag)\n -pw : password for the user`)\n\tos.Exit(2)\n}\n\nfunc check(msg string, err error) {\n if err != nil {\n log.Fatal(\"Error \" + msg, err)\n }\n}\n<commit_msg>minor improvements<commit_after>package main\n\nimport \"fmt\"\nimport \"os\"\nimport \"strings\"\nimport \"regexp\"\nimport \"flag\"\nimport \"bufio\"\nimport \"log\"\nimport \"github.com\/joncrlsn\/pgutil\"\nimport \"github.com\/joncrlsn\/fileutil\"\nimport \"github.com\/joncrlsn\/misc\"\n\n\/\/ End of Statement regex\nvar eosRegex *regexp.Regexp = regexp.MustCompile(`;\\s*$|;\\s*--.*$`)\nvar inReader *bufio.Reader = bufio.NewReader(os.Stdin)\n\n\/\/ Executes a file of SQL statements one statement at a time, stopping everything\n\/\/ if one of them has an error\nfunc main() {\n\n \/\/ -f (filename) is a required program argument\n\tvar fileName string\n\tflag.StringVar(&fileName, \"f\", \"\", \"path of the SQL file to run\")\n\tdbInfo := pgutil.DbInfo{}\n\tdbInfo.DbOptions = \"sslmode=disable\"\n\tdbInfo.Populate()\n\n\tif len(fileName) == 0 {\n fmt.Fprintln(os.Stderr, \"Missing required filename argument (-f)\")\n\t\tusage()\n\t}\n\n\texists, _ := fileutil.Exists(fileName)\n\tif !exists {\n\t\tfmt.Fprintf(os.Stderr, \"File does not exist: %s\\n\", fileName)\n\t\tos.Exit(2)\n\t}\n\n runFile(fileName, &dbInfo)\n}\n\n\/\/ Reads the file and runs the SQL statements one by one\nfunc runFile(fileName string, dbInfo *pgutil.DbInfo) {\n \/\/ Open connection to the database\n db, err := dbInfo.Open()\n check(\"opening database\", err)\n\n \/\/ Read each statement from the file one at a time and execute them\n\tsqlChan := sqlStatements(fileName)\n\tfor sql := range sqlChan {\n\t\t\/\/ Execute SQL. If not successful, stop and ask user\n\t\t\/\/ whether or not we should continue\n\t\tfmt.Println(\"\\n================================\")\n\t\tlog.Print(\"Executing SQL: \", sql)\n result, err := db.Exec(sql)\n\n \/\/ If there was an error, ask user whether or not we should continue\n\t\tif err != nil {\n fmt.Fprintln(os.Stderr, \"Error: \", err)\n if misc.PromptYesNo(\"SQL failed! Do you want to continue?\", false) {\n continue\n }\n os.Exit(1)\n\t\t}\n\n rowCnt, err := result.RowsAffected()\n check(\"getting rows affected count\", err)\n log.Printf(\"Rows affected: %d\\n\", rowCnt)\n\t}\n log.Println(\"Done!\")\n}\n\n\/*\n * Reads and returns (via channel) SQL statements from the given file.\n * SQL statements must end with a semi-colon\n *\/\nfunc sqlStatements(fileName string) <-chan string {\n\tstatementChan := make(chan string)\n\n\tgo func() {\n\t\tlineChan := fileutil.ReadLinesChannel(fileName)\n\n\t\t\/\/ TODO: Convert this to a string builder\n\t\tstatement := \"\"\n\t\tfor line := range lineChan {\n\t\t\t\/\/fmt.Printf(\" Line: %s\\n\", line)\n\n\t\t\t\/\/ ignore blank or empty lines\n\t\t\tline = strings.TrimSpace(line)\n\t\t\tif len(line) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif strings.HasPrefix(line, \"--\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tstatement += line + \"\\n\"\n\n\t\t\t\/\/ look for line ending with just a semi-colon\n\t\t\t\/\/ or a semi-colon with a SQL comment following\n\t\t\tif eosRegex.MatchString(line) {\n\t\t\t\tstatementChan <- statement\n\t\t\t\tstatement = \"\"\n\t\t\t}\n\t\t}\n\n\t\tclose(statementChan)\n\t}()\n\n\treturn statementChan\n}\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"usage: %s -f <sqlFileName> [-host <string>] [-port <int>] [-db <string>] [-user <string>] [-pw <password>] \\n\", os.Args[0])\n fmt.Fprintln(os.Stderr, `\nDatabase connection properties can be specified in two ways:\n * Environment variables\n * Program flags (override environment variables)\n * ~\/.pgpass file (for the password)\n\nEnvironment variables are:\n DBHOST : host name where database is running (default is localhost)\n DBPORT : port database is listening on (default is 5432)\n DBNAME : name of database you want to update\n DBUSER : user in postgres you'll be executing the commands as\n DBPASS : password for the user\n\nProgram flags are:\n -f : required. file path to the SQL\n -U : user in postgres to execute the commands (matches psql flag)\n -h : host name where database is running--default is localhost (matches psql flag)\n -p : port. defaults to 5432 (matches psql flag)\n -d : database name (matches psql flag)\n -pw : password for the postgres user`)\n\tos.Exit(2)\n}\n\nfunc check(msg string, err error) {\n if err != nil {\n log.Fatal(\"Error \" + msg, err)\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package piwik\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"gopkg.in\/macaron.v1\"\n)\n\ntype Options struct {\n\tPiwikUrl string\n\tIgnoreDoNotTrack bool\n\tWebsiteID string\n\tToken string\n}\n\nfunc prepareOptions(options []Options) Options {\n\tvar opt Options\n\tif len(options) > 0 {\n\t\topt = options[0]\n\t}\n\topt.PiwikUrl = strings.TrimSuffix(strings.TrimSuffix(opt.PiwikUrl, \"piwik.php\"), \"\/\") + \"\/piwik.php?\"\n\n\treturn opt\n}\n\nfunc Piwik(options ...Options) macaron.Handler {\n\topt := prepareOptions(options)\n\n\treturn func(ctx *macaron.Context, logger *log.Logger) {\n\t\tparams := make(url.Values)\n\t\tparams.Set(\"idsite\", opt.WebsiteID)\n\t\tparams.Set(\"rec\", \"1\")\n\n\t\tparams.Set(\"url\", ctx.Req.URL.String())\n\t\tparams.Set(\"apiv\", \"1\")\n\n\t\th := ctx.Req.Header\n\t\tparams.Set(\"urlref\", h.Get(\"Referer\"))\n\t\tfmt.Println(\"USERAGENT\")\n\t\tfmt.Println(h.Get(\"Accept-Language\"))\n\t\tparams.Set(\"ua\", h.Get(\"User-Agent\"))\n\t\tparams.Set(\"lang\", h.Get(\"Accept-Language\"))\n\n\t\tparams.Set(\"token_auth\", opt.Token)\n\t\tparams.Set(\"cip\", ctx.RemoteAddr())\n\n\t\t\/\/ collecting data is finished, go async now\n\t\tgo func() {\n\t\t\tlogger.Println(params.Encode())\n\t\t\tres, err := http.Get(opt.PiwikUrl + params.Encode())\n\t\t\tif err != nil {\n\t\t\t\tlogger.Println(\"Error contacting piwik:\", err)\n\t\t\t}\n\t\t\tr, err := ioutil.ReadAll(res.Body)\n\t\t\tlogger.Println(string(r))\n\t\t}()\n\n\t\tctx.Next()\n\t}\n}\n<commit_msg>added DNT<commit_after>package piwik\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"gopkg.in\/macaron.v1\"\n)\n\ntype Options struct {\n\tPiwikUrl string\n\tIgnoreDoNotTrack bool\n\tWebsiteID string\n\tToken string\n}\n\nfunc prepareOptions(options []Options) Options {\n\tvar opt Options\n\tif len(options) > 0 {\n\t\topt = options[0]\n\t}\n\topt.PiwikUrl = strings.TrimSuffix(strings.TrimSuffix(opt.PiwikUrl, \"piwik.php\"), \"\/\") + \"\/piwik.php?\"\n\n\treturn opt\n}\n\nfunc Piwik(options ...Options) macaron.Handler {\n\topt := prepareOptions(options)\n\n\treturn func(ctx *macaron.Context, logger *log.Logger) {\n\t\tif !opt.IgnoreDoNotTrack && ctx.Req.Header.Get(\"DNT\") == \"1\" {\n\t\t\treturn\n\t\t}\n\n\t\tparams := make(url.Values)\n\t\tparams.Set(\"idsite\", opt.WebsiteID)\n\t\tparams.Set(\"rec\", \"1\")\n\n\t\tparams.Set(\"url\", ctx.Req.URL.String())\n\t\tparams.Set(\"apiv\", \"1\")\n\n\t\th := ctx.Req.Header\n\t\tparams.Set(\"urlref\", h.Get(\"Referer\"))\n\t\tparams.Set(\"ua\", h.Get(\"User-Agent\"))\n\t\tparams.Set(\"lang\", h.Get(\"Accept-Language\"))\n\n\t\tparams.Set(\"token_auth\", opt.Token)\n\t\tparams.Set(\"cip\", ctx.RemoteAddr())\n\n\t\t\/\/ collecting data is finished, go async now\n\t\tgo func() {\n\t\t\tres, err := http.Get(opt.PiwikUrl + params.Encode())\n\t\t\tif err != nil {\n\t\t\t\tlogger.Println(\"Error contacting piwik:\", err)\n\t\t\t}\n\t\t\tif res.StatusCode != http.StatusOK {\n\t\t\t\tlogger.Println(\"Error contacting piwik:\", res.Status)\n\t\t\t}\n\t\t}()\n\n\t\tctx.Next()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ This file was auto-generated using createmock. See the following page for\n\/\/ more information:\n\/\/\n\/\/ https:\/\/github.com\/jacobsa\/oglemock\n\/\/\n\npackage mock_backup\n\nimport (\n\tfmt \"fmt\"\n\tbackup \"github.com\/jacobsa\/comeback\/backup\"\n\tblob \"github.com\/jacobsa\/comeback\/blob\"\n\toglemock \"github.com\/jacobsa\/oglemock\"\n\tregexp \"regexp\"\n\truntime \"runtime\"\n\tunsafe \"unsafe\"\n)\n\ntype MockDirectorySaver interface {\n\tbackup.DirectorySaver\n\toglemock.MockObject\n}\n\ntype mockDirectorySaver struct {\n\tcontroller oglemock.Controller\n\tdescription string\n}\n\nfunc NewMockDirectorySaver(\n\tc oglemock.Controller,\n\tdesc string) MockDirectorySaver {\n\treturn &mockDirectorySaver{\n\t\tcontroller: c,\n\t\tdescription: desc,\n\t}\n}\n\nfunc (m *mockDirectorySaver) Oglemock_Id() uintptr {\n\treturn uintptr(unsafe.Pointer(m))\n}\n\nfunc (m *mockDirectorySaver) Oglemock_Description() string {\n\treturn m.description\n}\n\nfunc (m *mockDirectorySaver) Save(p0 string, p1 string, p2 []regexp.Regexp) (o0 blob.Score, o1 error) {\n\t\/\/ Get a file name and line number for the caller.\n\t_, file, line, _ := runtime.Caller(1)\n\n\t\/\/ Hand the call off to the controller, which does most of the work.\n\tretVals := m.controller.HandleMethodCall(\n\t\tm,\n\t\t\"Save\",\n\t\tfile,\n\t\tline,\n\t\t[]interface{}{p0, p1, p2})\n\n\tif len(retVals) != 2 {\n\t\tpanic(fmt.Sprintf(\"mockDirectorySaver.Save: invalid return values: %v\", retVals))\n\t}\n\n\t\/\/ o0 blob.Score\n\tif retVals[0] != nil {\n\t\to0 = retVals[0].(blob.Score)\n\t}\n\n\t\/\/ o1 error\n\tif retVals[1] != nil {\n\t\to1 = retVals[1].(error)\n\t}\n\n\treturn\n}\n<commit_msg>Updated mock again.<commit_after>\/\/ This file was auto-generated using createmock. See the following page for\n\/\/ more information:\n\/\/\n\/\/ https:\/\/github.com\/jacobsa\/oglemock\n\/\/\n\npackage mock_backup\n\nimport (\n\tbackup \"github.com\/jacobsa\/comeback\/backup\"\n\tblob \"github.com\/jacobsa\/comeback\/blob\"\n\tfmt \"fmt\"\n\toglemock \"github.com\/jacobsa\/oglemock\"\n\tregexp \"regexp\"\n\truntime \"runtime\"\n\tunsafe \"unsafe\"\n)\n\ntype MockDirectorySaver interface {\n\tbackup.DirectorySaver\n\toglemock.MockObject\n}\n\ntype mockDirectorySaver struct {\n\tcontroller\toglemock.Controller\n\tdescription\tstring\n}\n\nfunc NewMockDirectorySaver(\n\tc oglemock.Controller,\n\tdesc string) MockDirectorySaver {\n\treturn &mockDirectorySaver{\n\t\tcontroller:\tc,\n\t\tdescription:\tdesc,\n\t}\n}\n\nfunc (m *mockDirectorySaver) Oglemock_Id() uintptr {\n\treturn uintptr(unsafe.Pointer(m))\n}\n\nfunc (m *mockDirectorySaver) Oglemock_Description() string {\n\treturn m.description\n}\n\nfunc (m *mockDirectorySaver) Save(p0 string, p1 string, p2 []*regexp.Regexp) (o0 blob.Score, o1 error) {\n\t\/\/ Get a file name and line number for the caller.\n\t_, file, line, _ := runtime.Caller(1)\n\n\t\/\/ Hand the call off to the controller, which does most of the work.\n\tretVals := m.controller.HandleMethodCall(\n\t\tm,\n\t\t\"Save\",\n\t\tfile,\n\t\tline,\n\t\t[]interface{}{p0, p1, p2})\n\n\tif len(retVals) != 2 {\n\t\tpanic(fmt.Sprintf(\"mockDirectorySaver.Save: invalid return values: %v\", retVals))\n\t}\n\n\t\/\/ o0 blob.Score\n\tif retVals[0] != nil {\n\t\to0 = retVals[0].(blob.Score)\n\t}\n\n\t\/\/ o1 error\n\tif retVals[1] != nil {\n\t\to1 = retVals[1].(error)\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package jobs\n\nimport (\n\t\"sort\"\n\n\t\"github.com\/aerogo\/aero\"\n\t\"github.com\/animenotifier\/notify.moe\/components\"\n\t\"github.com\/animenotifier\/notify.moe\/utils\"\n)\n\nvar jobInfo = map[string]*utils.JobInfo{\n\t\"anime-ratings\": &utils.JobInfo{\n\t\tName: \"anime-ratings\",\n\t},\n\t\"twist\": &utils.JobInfo{\n\t\tName: \"twist\",\n\t},\n\t\"refresh-osu\": &utils.JobInfo{\n\t\tName: \"refresh-osu\",\n\t},\n\t\"mal-download\": &utils.JobInfo{\n\t\tName: \"mal-download\",\n\t},\n\t\"mal-parse\": &utils.JobInfo{\n\t\tName: \"mal-parse\",\n\t},\n\t\/\/ \"mal-sync\": &utils.JobInfo{\n\t\/\/ \tName: \"mal-sync\",\n\t\/\/ },\n}\n\nvar jobLogs = []string{}\n\n\/\/ Overview shows all background jobs.\nfunc Overview(ctx *aero.Context) string {\n\tuser := utils.GetUser(ctx)\n\tjobs := []*utils.JobInfo{}\n\n\tfor _, job := range jobInfo {\n\t\tjobs = append(jobs, job)\n\t}\n\n\tsort.Slice(jobs, func(i, j int) bool {\n\t\treturn jobs[i].Name < jobs[j].Name\n\t})\n\n\treturn ctx.HTML(components.EditorJobs(jobs, jobLogs, ctx.URI(), user))\n}\n<commit_msg>Added Kitsu anime import to editor accessible jobs<commit_after>package jobs\n\nimport (\n\t\"sort\"\n\n\t\"github.com\/aerogo\/aero\"\n\t\"github.com\/animenotifier\/notify.moe\/components\"\n\t\"github.com\/animenotifier\/notify.moe\/utils\"\n)\n\nvar jobInfo = map[string]*utils.JobInfo{\n\t\"anime-ratings\": &utils.JobInfo{\n\t\tName: \"anime-ratings\",\n\t},\n\t\"kitsu-import-anime\": &utils.JobInfo{\n\t\tName: \"kitsu-import-anime\",\n\t},\n\t\"twist\": &utils.JobInfo{\n\t\tName: \"twist\",\n\t},\n\t\"refresh-osu\": &utils.JobInfo{\n\t\tName: \"refresh-osu\",\n\t},\n\t\"mal-download\": &utils.JobInfo{\n\t\tName: \"mal-download\",\n\t},\n\t\"mal-parse\": &utils.JobInfo{\n\t\tName: \"mal-parse\",\n\t},\n\t\/\/ \"mal-sync\": &utils.JobInfo{\n\t\/\/ \tName: \"mal-sync\",\n\t\/\/ },\n}\n\nvar jobLogs = []string{}\n\n\/\/ Overview shows all background jobs.\nfunc Overview(ctx *aero.Context) string {\n\tuser := utils.GetUser(ctx)\n\tjobs := []*utils.JobInfo{}\n\n\tfor _, job := range jobInfo {\n\t\tjobs = append(jobs, job)\n\t}\n\n\tsort.Slice(jobs, func(i, j int) bool {\n\t\treturn jobs[i].Name < jobs[j].Name\n\t})\n\n\treturn ctx.HTML(components.EditorJobs(jobs, jobLogs, ctx.URI(), user))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage rsa\n\n\/\/ This file implements the PSS signature scheme [1].\n\/\/\n\/\/ [1] http:\/\/www.rsa.com\/rsalabs\/pkcs\/files\/h11300-wp-pkcs-1v2-2-rsa-cryptography-standard.pdf\n\nimport (\n\t\"bytes\"\n\t\"crypto\"\n\t\"errors\"\n\t\"hash\"\n\t\"io\"\n\t\"math\/big\"\n)\n\nfunc emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byte, error) {\n\t\/\/ See [1], section 9.1.1\n\thLen := hash.Size()\n\tsLen := len(salt)\n\temLen := (emBits + 7) \/ 8\n\n\t\/\/ 1. If the length of M is greater than the input limitation for the\n\t\/\/ hash function (2^61 - 1 octets for SHA-1), output \"message too\n\t\/\/ long\" and stop.\n\t\/\/\n\t\/\/ 2. Let mHash = Hash(M), an octet string of length hLen.\n\n\tif len(mHash) != hLen {\n\t\treturn nil, errors.New(\"crypto\/rsa: input must be hashed message\")\n\t}\n\n\t\/\/ 3. If emLen < hLen + sLen + 2, output \"encoding error\" and stop.\n\n\tif emLen < hLen+sLen+2 {\n\t\treturn nil, errors.New(\"crypto\/rsa: encoding error\")\n\t}\n\n\tem := make([]byte, emLen)\n\tdb := em[:emLen-sLen-hLen-2+1+sLen]\n\th := em[emLen-sLen-hLen-2+1+sLen : emLen-1]\n\n\t\/\/ 4. Generate a random octet string salt of length sLen; if sLen = 0,\n\t\/\/ then salt is the empty string.\n\t\/\/\n\t\/\/ 5. Let\n\t\/\/ M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt;\n\t\/\/\n\t\/\/ M' is an octet string of length 8 + hLen + sLen with eight\n\t\/\/ initial zero octets.\n\t\/\/\n\t\/\/ 6. Let H = Hash(M'), an octet string of length hLen.\n\n\tvar prefix [8]byte\n\n\thash.Write(prefix[:])\n\thash.Write(mHash)\n\thash.Write(salt)\n\n\th = hash.Sum(h[:0])\n\thash.Reset()\n\n\t\/\/ 7. Generate an octet string PS consisting of emLen - sLen - hLen - 2\n\t\/\/ zero octets. The length of PS may be 0.\n\t\/\/\n\t\/\/ 8. Let DB = PS || 0x01 || salt; DB is an octet string of length\n\t\/\/ emLen - hLen - 1.\n\n\tdb[emLen-sLen-hLen-2] = 0x01\n\tcopy(db[emLen-sLen-hLen-1:], salt)\n\n\t\/\/ 9. Let dbMask = MGF(H, emLen - hLen - 1).\n\t\/\/\n\t\/\/ 10. Let maskedDB = DB \\xor dbMask.\n\n\tmgf1XOR(db, hash, h)\n\n\t\/\/ 11. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in\n\t\/\/ maskedDB to zero.\n\n\tdb[0] &= (0xFF >> uint(8*emLen-emBits))\n\n\t\/\/ 12. Let EM = maskedDB || H || 0xbc.\n\tem[emLen-1] = 0xBC\n\n\t\/\/ 13. Output EM.\n\treturn em, nil\n}\n\nfunc emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {\n\t\/\/ 1. If the length of M is greater than the input limitation for the\n\t\/\/ hash function (2^61 - 1 octets for SHA-1), output \"inconsistent\"\n\t\/\/ and stop.\n\t\/\/\n\t\/\/ 2. Let mHash = Hash(M), an octet string of length hLen.\n\thLen := hash.Size()\n\tif hLen != len(mHash) {\n\t\treturn ErrVerification\n\t}\n\n\t\/\/ 3. If emLen < hLen + sLen + 2, output \"inconsistent\" and stop.\n\temLen := (emBits + 7) \/ 8\n\tif emLen < hLen+sLen+2 {\n\t\treturn ErrVerification\n\t}\n\n\t\/\/ 4. If the rightmost octet of EM does not have hexadecimal value\n\t\/\/ 0xbc, output \"inconsistent\" and stop.\n\tif em[len(em)-1] != 0xBC {\n\t\treturn ErrVerification\n\t}\n\n\t\/\/ 5. Let maskedDB be the leftmost emLen - hLen - 1 octets of EM, and\n\t\/\/ let H be the next hLen octets.\n\tdb := em[:emLen-hLen-1]\n\th := em[emLen-hLen-1 : len(em)-1]\n\n\t\/\/ 6. If the leftmost 8 * emLen - emBits bits of the leftmost octet in\n\t\/\/ maskedDB are not all equal to zero, output \"inconsistent\" and\n\t\/\/ stop.\n\tif em[0]&(0xFF<<uint(8-(8*emLen-emBits))) != 0 {\n\t\treturn ErrVerification\n\t}\n\n\t\/\/ 7. Let dbMask = MGF(H, emLen - hLen - 1).\n\t\/\/\n\t\/\/ 8. Let DB = maskedDB \\xor dbMask.\n\tmgf1XOR(db, hash, h)\n\n\t\/\/ 9. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in DB\n\t\/\/ to zero.\n\tdb[0] &= (0xFF >> uint(8*emLen-emBits))\n\n\tif sLen == PSSSaltLengthAuto {\n\tFindSaltLength:\n\t\tfor sLen = emLen - (hLen + 2); sLen >= 0; sLen-- {\n\t\t\tswitch db[emLen-hLen-sLen-2] {\n\t\t\tcase 1:\n\t\t\t\tbreak FindSaltLength\n\t\t\tcase 0:\n\t\t\t\tcontinue\n\t\t\tdefault:\n\t\t\t\treturn ErrVerification\n\t\t\t}\n\t\t}\n\t\tif sLen < 0 {\n\t\t\treturn ErrVerification\n\t\t}\n\t} else {\n\t\t\/\/ 10. If the emLen - hLen - sLen - 2 leftmost octets of DB are not zero\n\t\t\/\/ or if the octet at position emLen - hLen - sLen - 1 (the leftmost\n\t\t\/\/ position is \"position 1\") does not have hexadecimal value 0x01,\n\t\t\/\/ output \"inconsistent\" and stop.\n\t\tfor _, e := range db[:emLen-hLen-sLen-2] {\n\t\t\tif e != 0x00 {\n\t\t\t\treturn ErrVerification\n\t\t\t}\n\t\t}\n\t\tif db[emLen-hLen-sLen-2] != 0x01 {\n\t\t\treturn ErrVerification\n\t\t}\n\t}\n\n\t\/\/ 11. Let salt be the last sLen octets of DB.\n\tsalt := db[len(db)-sLen:]\n\n\t\/\/ 12. Let\n\t\/\/ M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt ;\n\t\/\/ M' is an octet string of length 8 + hLen + sLen with eight\n\t\/\/ initial zero octets.\n\t\/\/\n\t\/\/ 13. Let H' = Hash(M'), an octet string of length hLen.\n\tvar prefix [8]byte\n\thash.Write(prefix[:])\n\thash.Write(mHash)\n\thash.Write(salt)\n\n\th0 := hash.Sum(nil)\n\n\t\/\/ 14. If H = H', output \"consistent.\" Otherwise, output \"inconsistent.\"\n\tif !bytes.Equal(h0, h) {\n\t\treturn ErrVerification\n\t}\n\treturn nil\n}\n\n\/\/ signPSSWithSalt calculates the signature of hashed using PSS [1] with specified salt.\n\/\/ Note that hashed must be the result of hashing the input message using the\n\/\/ given hash function. salt is a random sequence of bytes whose length will be\n\/\/ later used to verify the signature.\nfunc signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) (s []byte, err error) {\n\tnBits := priv.N.BitLen()\n\tem, err := emsaPSSEncode(hashed, nBits-1, salt, hash.New())\n\tif err != nil {\n\t\treturn\n\t}\n\tm := new(big.Int).SetBytes(em)\n\tc, err := decryptAndCheck(rand, priv, m)\n\tif err != nil {\n\t\treturn\n\t}\n\ts = make([]byte, (nBits+7)\/8)\n\tcopyWithLeftPad(s, c.Bytes())\n\treturn\n}\n\nconst (\n\t\/\/ PSSSaltLengthAuto causes the salt in a PSS signature to be as large\n\t\/\/ as possible when signing, and to be auto-detected when verifying.\n\tPSSSaltLengthAuto = 0\n\t\/\/ PSSSaltLengthEqualsHash causes the salt length to equal the length\n\t\/\/ of the hash used in the signature.\n\tPSSSaltLengthEqualsHash = -1\n)\n\n\/\/ PSSOptions contains options for creating and verifying PSS signatures.\ntype PSSOptions struct {\n\t\/\/ SaltLength controls the length of the salt used in the PSS\n\t\/\/ signature. It can either be a number of bytes, or one of the special\n\t\/\/ PSSSaltLength constants.\n\tSaltLength int\n\n\t\/\/ Hash, if not zero, overrides the hash function passed to SignPSS.\n\t\/\/ This is the only way to specify the hash function when using the\n\t\/\/ crypto.Signer interface.\n\tHash crypto.Hash\n}\n\n\/\/ HashFunc returns pssOpts.Hash so that PSSOptions implements\n\/\/ crypto.SignerOpts.\nfunc (pssOpts *PSSOptions) HashFunc() crypto.Hash {\n\treturn pssOpts.Hash\n}\n\nfunc (opts *PSSOptions) saltLength() int {\n\tif opts == nil {\n\t\treturn PSSSaltLengthAuto\n\t}\n\treturn opts.SaltLength\n}\n\n\/\/ SignPSS calculates the signature of hashed using RSASSA-PSS [1].\n\/\/ Note that hashed must be the result of hashing the input message using the\n\/\/ given hash function. The opts argument may be nil, in which case sensible\n\/\/ defaults are used.\nfunc SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte, opts *PSSOptions) ([]byte, error) {\n\tsaltLength := opts.saltLength()\n\tswitch saltLength {\n\tcase PSSSaltLengthAuto:\n\t\tsaltLength = (priv.N.BitLen()+7)\/8 - 2 - hash.Size()\n\tcase PSSSaltLengthEqualsHash:\n\t\tsaltLength = hash.Size()\n\t}\n\n\tif opts != nil && opts.Hash != 0 {\n\t\thash = opts.Hash\n\t}\n\n\tsalt := make([]byte, saltLength)\n\tif _, err := io.ReadFull(rand, salt); err != nil {\n\t\treturn nil, err\n\t}\n\treturn signPSSWithSalt(rand, priv, hash, hashed, salt)\n}\n\n\/\/ VerifyPSS verifies a PSS signature.\n\/\/ hashed is the result of hashing the input message using the given hash\n\/\/ function and sig is the signature. A valid signature is indicated by\n\/\/ returning a nil error. The opts argument may be nil, in which case sensible\n\/\/ defaults are used.\nfunc VerifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, opts *PSSOptions) error {\n\treturn verifyPSS(pub, hash, hashed, sig, opts.saltLength())\n}\n\n\/\/ verifyPSS verifies a PSS signature with the given salt length.\nfunc verifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, saltLen int) error {\n\tnBits := pub.N.BitLen()\n\tif len(sig) != (nBits+7)\/8 {\n\t\treturn ErrVerification\n\t}\n\ts := new(big.Int).SetBytes(sig)\n\tm := encrypt(new(big.Int), pub, s)\n\temBits := nBits - 1\n\temLen := (emBits + 7) \/ 8\n\tif emLen < len(m.Bytes()) {\n\t\treturn ErrVerification\n\t}\n\tem := make([]byte, emLen)\n\tcopyWithLeftPad(em, m.Bytes())\n\tif saltLen == PSSSaltLengthEqualsHash {\n\t\tsaltLen = hash.Size()\n\t}\n\treturn emsaPSSVerify(hashed, em, emBits, saltLen, hash.New())\n}\n<commit_msg>crypto\/rsa: fix URL for the PKCS #1 v2.2 document in pss.go<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage rsa\n\n\/\/ This file implements the PSS signature scheme [1].\n\/\/\n\/\/ [1] https:\/\/www.emc.com\/collateral\/white-papers\/h11300-pkcs-1v2-2-rsa-cryptography-standard-wp.pdf\n\nimport (\n\t\"bytes\"\n\t\"crypto\"\n\t\"errors\"\n\t\"hash\"\n\t\"io\"\n\t\"math\/big\"\n)\n\nfunc emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byte, error) {\n\t\/\/ See [1], section 9.1.1\n\thLen := hash.Size()\n\tsLen := len(salt)\n\temLen := (emBits + 7) \/ 8\n\n\t\/\/ 1. If the length of M is greater than the input limitation for the\n\t\/\/ hash function (2^61 - 1 octets for SHA-1), output \"message too\n\t\/\/ long\" and stop.\n\t\/\/\n\t\/\/ 2. Let mHash = Hash(M), an octet string of length hLen.\n\n\tif len(mHash) != hLen {\n\t\treturn nil, errors.New(\"crypto\/rsa: input must be hashed message\")\n\t}\n\n\t\/\/ 3. If emLen < hLen + sLen + 2, output \"encoding error\" and stop.\n\n\tif emLen < hLen+sLen+2 {\n\t\treturn nil, errors.New(\"crypto\/rsa: encoding error\")\n\t}\n\n\tem := make([]byte, emLen)\n\tdb := em[:emLen-sLen-hLen-2+1+sLen]\n\th := em[emLen-sLen-hLen-2+1+sLen : emLen-1]\n\n\t\/\/ 4. Generate a random octet string salt of length sLen; if sLen = 0,\n\t\/\/ then salt is the empty string.\n\t\/\/\n\t\/\/ 5. Let\n\t\/\/ M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt;\n\t\/\/\n\t\/\/ M' is an octet string of length 8 + hLen + sLen with eight\n\t\/\/ initial zero octets.\n\t\/\/\n\t\/\/ 6. Let H = Hash(M'), an octet string of length hLen.\n\n\tvar prefix [8]byte\n\n\thash.Write(prefix[:])\n\thash.Write(mHash)\n\thash.Write(salt)\n\n\th = hash.Sum(h[:0])\n\thash.Reset()\n\n\t\/\/ 7. Generate an octet string PS consisting of emLen - sLen - hLen - 2\n\t\/\/ zero octets. The length of PS may be 0.\n\t\/\/\n\t\/\/ 8. Let DB = PS || 0x01 || salt; DB is an octet string of length\n\t\/\/ emLen - hLen - 1.\n\n\tdb[emLen-sLen-hLen-2] = 0x01\n\tcopy(db[emLen-sLen-hLen-1:], salt)\n\n\t\/\/ 9. Let dbMask = MGF(H, emLen - hLen - 1).\n\t\/\/\n\t\/\/ 10. Let maskedDB = DB \\xor dbMask.\n\n\tmgf1XOR(db, hash, h)\n\n\t\/\/ 11. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in\n\t\/\/ maskedDB to zero.\n\n\tdb[0] &= (0xFF >> uint(8*emLen-emBits))\n\n\t\/\/ 12. Let EM = maskedDB || H || 0xbc.\n\tem[emLen-1] = 0xBC\n\n\t\/\/ 13. Output EM.\n\treturn em, nil\n}\n\nfunc emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {\n\t\/\/ 1. If the length of M is greater than the input limitation for the\n\t\/\/ hash function (2^61 - 1 octets for SHA-1), output \"inconsistent\"\n\t\/\/ and stop.\n\t\/\/\n\t\/\/ 2. Let mHash = Hash(M), an octet string of length hLen.\n\thLen := hash.Size()\n\tif hLen != len(mHash) {\n\t\treturn ErrVerification\n\t}\n\n\t\/\/ 3. If emLen < hLen + sLen + 2, output \"inconsistent\" and stop.\n\temLen := (emBits + 7) \/ 8\n\tif emLen < hLen+sLen+2 {\n\t\treturn ErrVerification\n\t}\n\n\t\/\/ 4. If the rightmost octet of EM does not have hexadecimal value\n\t\/\/ 0xbc, output \"inconsistent\" and stop.\n\tif em[len(em)-1] != 0xBC {\n\t\treturn ErrVerification\n\t}\n\n\t\/\/ 5. Let maskedDB be the leftmost emLen - hLen - 1 octets of EM, and\n\t\/\/ let H be the next hLen octets.\n\tdb := em[:emLen-hLen-1]\n\th := em[emLen-hLen-1 : len(em)-1]\n\n\t\/\/ 6. If the leftmost 8 * emLen - emBits bits of the leftmost octet in\n\t\/\/ maskedDB are not all equal to zero, output \"inconsistent\" and\n\t\/\/ stop.\n\tif em[0]&(0xFF<<uint(8-(8*emLen-emBits))) != 0 {\n\t\treturn ErrVerification\n\t}\n\n\t\/\/ 7. Let dbMask = MGF(H, emLen - hLen - 1).\n\t\/\/\n\t\/\/ 8. Let DB = maskedDB \\xor dbMask.\n\tmgf1XOR(db, hash, h)\n\n\t\/\/ 9. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in DB\n\t\/\/ to zero.\n\tdb[0] &= (0xFF >> uint(8*emLen-emBits))\n\n\tif sLen == PSSSaltLengthAuto {\n\tFindSaltLength:\n\t\tfor sLen = emLen - (hLen + 2); sLen >= 0; sLen-- {\n\t\t\tswitch db[emLen-hLen-sLen-2] {\n\t\t\tcase 1:\n\t\t\t\tbreak FindSaltLength\n\t\t\tcase 0:\n\t\t\t\tcontinue\n\t\t\tdefault:\n\t\t\t\treturn ErrVerification\n\t\t\t}\n\t\t}\n\t\tif sLen < 0 {\n\t\t\treturn ErrVerification\n\t\t}\n\t} else {\n\t\t\/\/ 10. If the emLen - hLen - sLen - 2 leftmost octets of DB are not zero\n\t\t\/\/ or if the octet at position emLen - hLen - sLen - 1 (the leftmost\n\t\t\/\/ position is \"position 1\") does not have hexadecimal value 0x01,\n\t\t\/\/ output \"inconsistent\" and stop.\n\t\tfor _, e := range db[:emLen-hLen-sLen-2] {\n\t\t\tif e != 0x00 {\n\t\t\t\treturn ErrVerification\n\t\t\t}\n\t\t}\n\t\tif db[emLen-hLen-sLen-2] != 0x01 {\n\t\t\treturn ErrVerification\n\t\t}\n\t}\n\n\t\/\/ 11. Let salt be the last sLen octets of DB.\n\tsalt := db[len(db)-sLen:]\n\n\t\/\/ 12. Let\n\t\/\/ M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt ;\n\t\/\/ M' is an octet string of length 8 + hLen + sLen with eight\n\t\/\/ initial zero octets.\n\t\/\/\n\t\/\/ 13. Let H' = Hash(M'), an octet string of length hLen.\n\tvar prefix [8]byte\n\thash.Write(prefix[:])\n\thash.Write(mHash)\n\thash.Write(salt)\n\n\th0 := hash.Sum(nil)\n\n\t\/\/ 14. If H = H', output \"consistent.\" Otherwise, output \"inconsistent.\"\n\tif !bytes.Equal(h0, h) {\n\t\treturn ErrVerification\n\t}\n\treturn nil\n}\n\n\/\/ signPSSWithSalt calculates the signature of hashed using PSS [1] with specified salt.\n\/\/ Note that hashed must be the result of hashing the input message using the\n\/\/ given hash function. salt is a random sequence of bytes whose length will be\n\/\/ later used to verify the signature.\nfunc signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) (s []byte, err error) {\n\tnBits := priv.N.BitLen()\n\tem, err := emsaPSSEncode(hashed, nBits-1, salt, hash.New())\n\tif err != nil {\n\t\treturn\n\t}\n\tm := new(big.Int).SetBytes(em)\n\tc, err := decryptAndCheck(rand, priv, m)\n\tif err != nil {\n\t\treturn\n\t}\n\ts = make([]byte, (nBits+7)\/8)\n\tcopyWithLeftPad(s, c.Bytes())\n\treturn\n}\n\nconst (\n\t\/\/ PSSSaltLengthAuto causes the salt in a PSS signature to be as large\n\t\/\/ as possible when signing, and to be auto-detected when verifying.\n\tPSSSaltLengthAuto = 0\n\t\/\/ PSSSaltLengthEqualsHash causes the salt length to equal the length\n\t\/\/ of the hash used in the signature.\n\tPSSSaltLengthEqualsHash = -1\n)\n\n\/\/ PSSOptions contains options for creating and verifying PSS signatures.\ntype PSSOptions struct {\n\t\/\/ SaltLength controls the length of the salt used in the PSS\n\t\/\/ signature. It can either be a number of bytes, or one of the special\n\t\/\/ PSSSaltLength constants.\n\tSaltLength int\n\n\t\/\/ Hash, if not zero, overrides the hash function passed to SignPSS.\n\t\/\/ This is the only way to specify the hash function when using the\n\t\/\/ crypto.Signer interface.\n\tHash crypto.Hash\n}\n\n\/\/ HashFunc returns pssOpts.Hash so that PSSOptions implements\n\/\/ crypto.SignerOpts.\nfunc (pssOpts *PSSOptions) HashFunc() crypto.Hash {\n\treturn pssOpts.Hash\n}\n\nfunc (opts *PSSOptions) saltLength() int {\n\tif opts == nil {\n\t\treturn PSSSaltLengthAuto\n\t}\n\treturn opts.SaltLength\n}\n\n\/\/ SignPSS calculates the signature of hashed using RSASSA-PSS [1].\n\/\/ Note that hashed must be the result of hashing the input message using the\n\/\/ given hash function. The opts argument may be nil, in which case sensible\n\/\/ defaults are used.\nfunc SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte, opts *PSSOptions) ([]byte, error) {\n\tsaltLength := opts.saltLength()\n\tswitch saltLength {\n\tcase PSSSaltLengthAuto:\n\t\tsaltLength = (priv.N.BitLen()+7)\/8 - 2 - hash.Size()\n\tcase PSSSaltLengthEqualsHash:\n\t\tsaltLength = hash.Size()\n\t}\n\n\tif opts != nil && opts.Hash != 0 {\n\t\thash = opts.Hash\n\t}\n\n\tsalt := make([]byte, saltLength)\n\tif _, err := io.ReadFull(rand, salt); err != nil {\n\t\treturn nil, err\n\t}\n\treturn signPSSWithSalt(rand, priv, hash, hashed, salt)\n}\n\n\/\/ VerifyPSS verifies a PSS signature.\n\/\/ hashed is the result of hashing the input message using the given hash\n\/\/ function and sig is the signature. A valid signature is indicated by\n\/\/ returning a nil error. The opts argument may be nil, in which case sensible\n\/\/ defaults are used.\nfunc VerifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, opts *PSSOptions) error {\n\treturn verifyPSS(pub, hash, hashed, sig, opts.saltLength())\n}\n\n\/\/ verifyPSS verifies a PSS signature with the given salt length.\nfunc verifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, saltLen int) error {\n\tnBits := pub.N.BitLen()\n\tif len(sig) != (nBits+7)\/8 {\n\t\treturn ErrVerification\n\t}\n\ts := new(big.Int).SetBytes(sig)\n\tm := encrypt(new(big.Int), pub, s)\n\temBits := nBits - 1\n\temLen := (emBits + 7) \/ 8\n\tif emLen < len(m.Bytes()) {\n\t\treturn ErrVerification\n\t}\n\tem := make([]byte, emLen)\n\tcopyWithLeftPad(em, m.Bytes())\n\tif saltLen == PSSSaltLengthEqualsHash {\n\t\tsaltLen = hash.Size()\n\t}\n\treturn emsaPSSVerify(hashed, em, emBits, saltLen, hash.New())\n}\n<|endoftext|>"} {"text":"<commit_before>package domain\n\nimport (\n\t\"errors\"\n)\n\ntype ChannelRepository interface {\n\tChannels() ([]*Channel, error)\n\tStore(channel *Channel) error\n\tFindById(id int64) (*Channel, error)\n}\n\n\/\/ Chat channel\ntype Channel struct {\n\tId int64\n\tName string\n\tPublic bool\n\tCapacity int\n\tClients []*Client\n}\n\nfunc NewChannel(name string) *Channel {\n\tclients := make([]*Client, 0)\n\treturn &Channel{\n\t\tName: name,\n\t\tPublic: true,\n\t\tCapacity: 0,\n\t\tClients: clients,\n\t}\n}\n\n\/\/ Add adds a client to the channel\nfunc (c *Channel) Join(client *Client) error {\n\tif !c.HasAccess(client) {\n\t\treturn errors.New(\"Client cannot join this channel\")\n\t}\n\n\tif c.Capacity != 0 && len(c.Clients) >= c.Capacity {\n\t\treturn errors.New(\"Channel is full\")\n\t}\n\n\tc.Clients = append(c.Clients, client)\n\tclient.Channel = c\n\treturn nil\n}\n\nfunc (c *Channel) Send(message *Message) {\n\tfor _, client := range c.Clients {\n\t\tclient.ClientSender.Send(message)\n\t}\n}\n\nfunc (c *Channel) HasAccess(client *Client) bool {\n\tif c.Public {\n\t\treturn true\n\t}\n\t\/\/TODO: Channel permissions\n\treturn false\n}\n<commit_msg>make sure a user cannot join channel twice<commit_after>package domain\n\nimport (\n\t\"errors\"\n)\n\ntype ChannelRepository interface {\n\tChannels() ([]*Channel, error)\n\tStore(channel *Channel) error\n\tFindById(id int64) (*Channel, error)\n}\n\n\/\/ Chat channel\ntype Channel struct {\n\tId int64\n\tName string\n\tPublic bool\n\tCapacity int\n\tClients []*Client\n}\n\nfunc NewChannel(name string) *Channel {\n\tclients := make([]*Client, 0)\n\treturn &Channel{\n\t\tName: name,\n\t\tPublic: true,\n\t\tCapacity: 0,\n\t\tClients: clients,\n\t}\n}\n\n\/\/ Add adds a client to the channel\nfunc (c *Channel) Join(client *Client) error {\n\tif !c.HasAccess(client) {\n\t\treturn errors.New(\"Client cannot join this channel\")\n\t}\n\n\tif c.Capacity != 0 && len(c.Clients) >= c.Capacity {\n\t\treturn errors.New(\"Channel is full\")\n\t}\n\n\tfor _, curClient := range c.Clients {\n\t\tif curClient.Id == client.Id {\n\t\t\treturn errors.New(\"Client already in the channel\")\n\t\t}\n\t}\n\n\tc.Clients = append(c.Clients, client)\n\tclient.Channel = c\n\treturn nil\n}\n\nfunc (c *Channel) Send(message *Message) {\n\tfor _, client := range c.Clients {\n\t\tclient.ClientSender.Send(message)\n\t}\n}\n\nfunc (c *Channel) HasAccess(client *Client) bool {\n\tif c.Public {\n\t\treturn true\n\t}\n\t\/\/TODO: Channel permissions\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package index\n\nimport (\n\t\"fmt\"\n\t\"html\/template\"\n\t\"net\/http\"\n\n\t\"github.com\/pivotal-golang\/lager\"\n\t\"github.com\/tedsuo\/rata\"\n\n\t\"github.com\/concourse\/atc\/builds\"\n\t\"github.com\/concourse\/atc\/config\"\n\t\"github.com\/concourse\/atc\/db\"\n\t\"github.com\/concourse\/atc\/radar\"\n\t\"github.com\/concourse\/atc\/web\/routes\"\n)\n\ntype handler struct {\n\tlogger lager.Logger\n\n\t\/\/ used for providing resource state\n\tradar *radar.Radar\n\n\tresources config.Resources\n\tjobs config.Jobs\n\tdb db.DB\n\ttemplate *template.Template\n}\n\nfunc NewHandler(logger lager.Logger, radar *radar.Radar, resources config.Resources, jobs config.Jobs, db db.DB, template *template.Template) http.Handler {\n\treturn &handler{\n\t\tlogger: logger,\n\n\t\tradar: radar,\n\n\t\tresources: resources,\n\t\tjobs: jobs,\n\t\tdb: db,\n\t\ttemplate: template,\n\t}\n}\n\ntype TemplateData struct {\n\tJobs []JobStatus\n\tNodes []DotNode\n\tEdges []DotEdge\n}\n\ntype DotNode struct {\n\tID string `json:\"id\"`\n\tValue map[string]string `json:\"value,omitempty\"`\n}\n\ntype DotEdge struct {\n\tSource string `json:\"u\"`\n\tDestination string `json:\"v\"`\n\tValue map[string]string `json:\"value,omitempty\"`\n}\n\ntype JobStatus struct {\n\tJob config.Job\n\tCurrentBuild builds.Build\n}\n\nfunc (handler *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tdata := TemplateData{\n\t\tNodes: []DotNode{},\n\t\tEdges: []DotEdge{},\n\t}\n\n\tlog := handler.logger.Session(\"index\")\n\n\tcurrentBuilds := map[string]builds.Build{}\n\n\tfor _, job := range handler.jobs {\n\t\tcurrentBuild, err := handler.db.GetCurrentBuild(job.Name)\n\t\tif err != nil {\n\t\t\tcurrentBuild.Status = builds.StatusPending\n\t\t}\n\n\t\tcurrentBuilds[job.Name] = currentBuild\n\n\t\tdata.Jobs = append(data.Jobs, JobStatus{\n\t\t\tJob: job,\n\t\t\tCurrentBuild: currentBuild,\n\t\t})\n\t}\n\n\tfor _, resource := range handler.resources {\n\t\tresourceID := resourceNode(resource.Name)\n\n\t\tresourceURI, _ := routes.Routes.CreatePathForRoute(routes.GetResource, rata.Params{\n\t\t\t\"resource\": resource.Name,\n\t\t})\n\n\t\tfailing, checking := handler.radar.ResourceStatus(resource.Name)\n\n\t\tvar status string\n\t\tif failing {\n\t\t\tstatus = \"failing\"\n\t\t} else {\n\t\t\tstatus = \"ok\"\n\t\t}\n\n\t\tif checking {\n\t\t\tstatus += \" checking\"\n\t\t}\n\n\t\tdata.Nodes = append(data.Nodes, DotNode{\n\t\t\tID: resourceID,\n\t\t\tValue: map[string]string{\n\t\t\t\t\"label\": fmt.Sprintf(`<h1 class=\"resource\"><a href=\"%s\">%s<\/a><\/h1>`, resourceURI, resource.Name),\n\t\t\t\t\"type\": \"resource\",\n\t\t\t\t\"status\": status,\n\t\t\t},\n\t\t})\n\t}\n\n\tfor _, job := range handler.jobs {\n\t\tjobID := jobNode(job.Name)\n\t\tcurrentBuild := currentBuilds[job.Name]\n\n\t\tvar buildURI string\n\t\tvar err error\n\n\t\tif currentBuild.Name != \"\" {\n\t\t\tbuildURI, err = routes.Routes.CreatePathForRoute(routes.GetBuild, rata.Params{\n\t\t\t\t\"job\": job.Name,\n\t\t\t\t\"build\": currentBuild.Name,\n\t\t\t})\n\t\t} else {\n\t\t\tbuildURI, err = routes.Routes.CreatePathForRoute(routes.GetJob, rata.Params{\n\t\t\t\t\"job\": job.Name,\n\t\t\t})\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Error(\"failed-to-create-route\", err)\n\t\t}\n\n\t\tdata.Nodes = append(data.Nodes, DotNode{\n\t\t\tID: jobID,\n\t\t\tValue: map[string]string{\n\t\t\t\t\"label\": fmt.Sprintf(`<h1 class=\"job\"><a href=\"%s\">%s<\/a>`, buildURI, job.Name),\n\t\t\t\t\"status\": string(currentBuild.Status),\n\t\t\t\t\"type\": \"job\",\n\t\t\t},\n\t\t})\n\n\t\tedges := map[string]DotEdge{}\n\n\t\tfor _, input := range job.Inputs {\n\t\t\tif len(input.Passed) > 0 {\n\t\t\t\tvar nodeID string\n\n\t\t\t\tif len(input.Passed) > 1 {\n\t\t\t\t\tnodeID = jobID + \"-input-\" + input.Resource\n\n\t\t\t\t\tdata.Nodes = append(data.Nodes, DotNode{\n\t\t\t\t\t\tID: nodeID,\n\t\t\t\t\t\tValue: map[string]string{\n\t\t\t\t\t\t\t\"useDef\": \"gateway\",\n\t\t\t\t\t\t},\n\t\t\t\t\t})\n\n\t\t\t\t\tdata.Edges = append(data.Edges, DotEdge{\n\t\t\t\t\t\tSource: nodeID,\n\t\t\t\t\t\tDestination: jobID,\n\t\t\t\t\t})\n\t\t\t\t} else {\n\t\t\t\t\tnodeID = jobID\n\t\t\t\t}\n\n\t\t\t\tfor _, passed := range input.Passed {\n\t\t\t\t\tcurrentBuild := currentBuilds[passed]\n\n\t\t\t\t\tpassedJob, found := handler.jobs.Lookup(passed)\n\t\t\t\t\tif !found {\n\t\t\t\t\t\tpanic(\"unknown job: \" + passed)\n\t\t\t\t\t}\n\n\t\t\t\t\texistingEdge, found := edges[passed]\n\t\t\t\t\tif found {\n\t\t\t\t\t\tif len(passedJob.Inputs) > 1 {\n\t\t\t\t\t\t\texistingEdge.Value[\"label\"] += \"\\n\" + input.Resource\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tvalue := map[string]string{\n\t\t\t\t\t\t\t\"status\": string(currentBuild.Status),\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif len(passedJob.Inputs) > 1 {\n\t\t\t\t\t\t\tvalue[\"label\"] = input.Resource\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tedges[passed] = DotEdge{\n\t\t\t\t\t\t\tSource: jobNode(passed),\n\t\t\t\t\t\t\tDestination: nodeID,\n\t\t\t\t\t\t\tValue: value,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tdata.Edges = append(data.Edges, DotEdge{\n\t\t\t\t\tSource: resourceNode(input.Resource),\n\t\t\t\t\tDestination: jobID,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tfor _, edge := range edges {\n\t\t\tdata.Edges = append(data.Edges, edge)\n\t\t}\n\n\t\tfor _, output := range job.Outputs {\n\t\t\tdata.Edges = append(data.Edges, DotEdge{\n\t\t\t\tSource: jobID,\n\t\t\t\tDestination: resourceNode(output.Resource),\n\t\t\t\tValue: map[string]string{\n\t\t\t\t\t\"status\": string(currentBuild.Status),\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t}\n\n\terr := handler.template.Execute(w, data)\n\tif err != nil {\n\t\tlog.Fatal(\"failed-to-execute-template\", err, lager.Data{\n\t\t\t\"template-data\": data,\n\t\t})\n\t}\n}\n\nfunc resourceNode(resource string) string {\n\treturn \"resource-\" + resource\n}\n\nfunc jobNode(job string) string {\n\treturn \"job-\" + job\n}\n<commit_msg>hide resources from pipeline with ?hide-resources<commit_after>package index\n\nimport (\n\t\"fmt\"\n\t\"html\/template\"\n\t\"net\/http\"\n\n\t\"github.com\/pivotal-golang\/lager\"\n\t\"github.com\/tedsuo\/rata\"\n\n\t\"github.com\/concourse\/atc\/builds\"\n\t\"github.com\/concourse\/atc\/config\"\n\t\"github.com\/concourse\/atc\/db\"\n\t\"github.com\/concourse\/atc\/radar\"\n\t\"github.com\/concourse\/atc\/web\/routes\"\n)\n\ntype handler struct {\n\tlogger lager.Logger\n\n\t\/\/ used for providing resource state\n\tradar *radar.Radar\n\n\tresources config.Resources\n\tjobs config.Jobs\n\tdb db.DB\n\ttemplate *template.Template\n}\n\nfunc NewHandler(logger lager.Logger, radar *radar.Radar, resources config.Resources, jobs config.Jobs, db db.DB, template *template.Template) http.Handler {\n\treturn &handler{\n\t\tlogger: logger,\n\n\t\tradar: radar,\n\n\t\tresources: resources,\n\t\tjobs: jobs,\n\t\tdb: db,\n\t\ttemplate: template,\n\t}\n}\n\ntype TemplateData struct {\n\tJobs []JobStatus\n\tNodes []DotNode\n\tEdges []DotEdge\n}\n\ntype DotNode struct {\n\tID string `json:\"id\"`\n\tValue map[string]string `json:\"value,omitempty\"`\n}\n\ntype DotEdge struct {\n\tSource string `json:\"u\"`\n\tDestination string `json:\"v\"`\n\tValue map[string]string `json:\"value,omitempty\"`\n}\n\ntype JobStatus struct {\n\tJob config.Job\n\tCurrentBuild builds.Build\n}\n\nfunc (handler *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t_, hideResources := r.URL.Query()[\"hide-resources\"]\n\tshowResources := !hideResources\n\n\tdata := TemplateData{\n\t\tNodes: []DotNode{},\n\t\tEdges: []DotEdge{},\n\t}\n\n\tlog := handler.logger.Session(\"index\")\n\n\tcurrentBuilds := map[string]builds.Build{}\n\n\tfor _, job := range handler.jobs {\n\t\tcurrentBuild, err := handler.db.GetCurrentBuild(job.Name)\n\t\tif err != nil {\n\t\t\tcurrentBuild.Status = builds.StatusPending\n\t\t}\n\n\t\tcurrentBuilds[job.Name] = currentBuild\n\n\t\tdata.Jobs = append(data.Jobs, JobStatus{\n\t\t\tJob: job,\n\t\t\tCurrentBuild: currentBuild,\n\t\t})\n\t}\n\n\tif showResources {\n\t\tfor _, resource := range handler.resources {\n\t\t\tresourceID := resourceNode(resource.Name)\n\n\t\t\tresourceURI, _ := routes.Routes.CreatePathForRoute(routes.GetResource, rata.Params{\n\t\t\t\t\"resource\": resource.Name,\n\t\t\t})\n\n\t\t\tfailing, checking := handler.radar.ResourceStatus(resource.Name)\n\n\t\t\tvar status string\n\t\t\tif failing {\n\t\t\t\tstatus = \"failing\"\n\t\t\t} else {\n\t\t\t\tstatus = \"ok\"\n\t\t\t}\n\n\t\t\tif checking {\n\t\t\t\tstatus += \" checking\"\n\t\t\t}\n\n\t\t\tdata.Nodes = append(data.Nodes, DotNode{\n\t\t\t\tID: resourceID,\n\t\t\t\tValue: map[string]string{\n\t\t\t\t\t\"label\": fmt.Sprintf(`<h1 class=\"resource\"><a href=\"%s\">%s<\/a><\/h1>`, resourceURI, resource.Name),\n\t\t\t\t\t\"type\": \"resource\",\n\t\t\t\t\t\"status\": status,\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t}\n\n\tfor _, job := range handler.jobs {\n\t\tjobID := jobNode(job.Name)\n\t\tcurrentBuild := currentBuilds[job.Name]\n\n\t\tvar buildURI string\n\t\tvar err error\n\n\t\tif currentBuild.Name != \"\" {\n\t\t\tbuildURI, err = routes.Routes.CreatePathForRoute(routes.GetBuild, rata.Params{\n\t\t\t\t\"job\": job.Name,\n\t\t\t\t\"build\": currentBuild.Name,\n\t\t\t})\n\t\t} else {\n\t\t\tbuildURI, err = routes.Routes.CreatePathForRoute(routes.GetJob, rata.Params{\n\t\t\t\t\"job\": job.Name,\n\t\t\t})\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Error(\"failed-to-create-route\", err)\n\t\t}\n\n\t\tdata.Nodes = append(data.Nodes, DotNode{\n\t\t\tID: jobID,\n\t\t\tValue: map[string]string{\n\t\t\t\t\"label\": fmt.Sprintf(`<h1 class=\"job\"><a href=\"%s\">%s<\/a>`, buildURI, job.Name),\n\t\t\t\t\"status\": string(currentBuild.Status),\n\t\t\t\t\"type\": \"job\",\n\t\t\t},\n\t\t})\n\n\t\tedges := map[string]DotEdge{}\n\n\t\tfor _, input := range job.Inputs {\n\t\t\tif len(input.Passed) > 0 {\n\t\t\t\tvar nodeID string\n\n\t\t\t\tif len(input.Passed) > 1 {\n\t\t\t\t\tnodeID = jobID + \"-input-\" + input.Resource\n\n\t\t\t\t\tdata.Nodes = append(data.Nodes, DotNode{\n\t\t\t\t\t\tID: nodeID,\n\t\t\t\t\t\tValue: map[string]string{\n\t\t\t\t\t\t\t\"useDef\": \"gateway\",\n\t\t\t\t\t\t},\n\t\t\t\t\t})\n\n\t\t\t\t\tdata.Edges = append(data.Edges, DotEdge{\n\t\t\t\t\t\tSource: nodeID,\n\t\t\t\t\t\tDestination: jobID,\n\t\t\t\t\t})\n\t\t\t\t} else {\n\t\t\t\t\tnodeID = jobID\n\t\t\t\t}\n\n\t\t\t\tfor _, passed := range input.Passed {\n\t\t\t\t\tcurrentBuild := currentBuilds[passed]\n\n\t\t\t\t\tpassedJob, found := handler.jobs.Lookup(passed)\n\t\t\t\t\tif !found {\n\t\t\t\t\t\tpanic(\"unknown job: \" + passed)\n\t\t\t\t\t}\n\n\t\t\t\t\texistingEdge, found := edges[passed]\n\t\t\t\t\tif found {\n\t\t\t\t\t\tif len(passedJob.Inputs) > 1 {\n\t\t\t\t\t\t\texistingEdge.Value[\"label\"] += \"\\n\" + input.Resource\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tvalue := map[string]string{\n\t\t\t\t\t\t\t\"status\": string(currentBuild.Status),\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif len(passedJob.Inputs) > 1 {\n\t\t\t\t\t\t\tvalue[\"label\"] = input.Resource\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tedges[passed] = DotEdge{\n\t\t\t\t\t\t\tSource: jobNode(passed),\n\t\t\t\t\t\t\tDestination: nodeID,\n\t\t\t\t\t\t\tValue: value,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if showResources {\n\t\t\t\tdata.Edges = append(data.Edges, DotEdge{\n\t\t\t\t\tSource: resourceNode(input.Resource),\n\t\t\t\t\tDestination: jobID,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tfor _, edge := range edges {\n\t\t\tdata.Edges = append(data.Edges, edge)\n\t\t}\n\n\t\tif showResources {\n\t\t\tfor _, output := range job.Outputs {\n\t\t\t\tdata.Edges = append(data.Edges, DotEdge{\n\t\t\t\t\tSource: jobID,\n\t\t\t\t\tDestination: resourceNode(output.Resource),\n\t\t\t\t\tValue: map[string]string{\n\t\t\t\t\t\t\"status\": string(currentBuild.Status),\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\terr := handler.template.Execute(w, data)\n\tif err != nil {\n\t\tlog.Fatal(\"failed-to-execute-template\", err, lager.Data{\n\t\t\t\"template-data\": data,\n\t\t})\n\t}\n}\n\nfunc resourceNode(resource string) string {\n\treturn \"resource-\" + resource\n}\n\nfunc jobNode(job string) string {\n\treturn \"job-\" + job\n}\n<|endoftext|>"} {"text":"<commit_before>package influx\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/influxdata\/chronograf\"\n)\n\nvar _ chronograf.TimeSeries = &Client{}\nvar _ chronograf.TSDBStatus = &Client{}\nvar _ chronograf.Databases = &Client{}\n\n\/\/ Shared transports for all clients to prevent leaking connections\nvar (\n\tskipVerifyTransport = &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\tdefaultTransport = &http.Transport{}\n)\n\n\/\/ Client is a device for retrieving time series data from an InfluxDB instance\ntype Client struct {\n\tURL *url.URL\n\tAuthorizer Authorizer\n\tInsecureSkipVerify bool\n\tLogger chronograf.Logger\n}\n\n\/\/ Response is a partial JSON decoded InfluxQL response used\n\/\/ to check for some errors\ntype Response struct {\n\tResults json.RawMessage\n\tErr string `json:\"error,omitempty\"`\n}\n\n\/\/ MarshalJSON returns the raw results bytes from the response\nfunc (r Response) MarshalJSON() ([]byte, error) {\n\treturn r.Results, nil\n}\n\nfunc (c *Client) query(u *url.URL, q chronograf.Query) (chronograf.Response, error) {\n\tu.Path = \"query\"\n\treq, err := http.NewRequest(\"POST\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\tcommand := q.Command\n\t\/\/ TODO(timraymond): move this upper Query() function\n\tif len(q.TemplateVars) > 0 {\n\t\tcommand = TemplateReplace(q.Command, q.TemplateVars)\n\t}\n\tlogs := c.Logger.\n\t\tWithField(\"component\", \"proxy\").\n\t\tWithField(\"host\", req.Host).\n\t\tWithField(\"command\", command).\n\t\tWithField(\"db\", q.DB).\n\t\tWithField(\"rp\", q.RP)\n\tlogs.Debug(\"query\")\n\n\tparams := req.URL.Query()\n\tparams.Set(\"q\", command)\n\tparams.Set(\"db\", q.DB)\n\tparams.Set(\"rp\", q.RP)\n\tparams.Set(\"epoch\", \"ms\") \/\/ TODO(timraymond): set this based on analysis\n\treq.URL.RawQuery = params.Encode()\n\n\tif c.Authorizer != nil {\n\t\tif err := c.Authorizer.Set(req); err != nil {\n\t\t\tlogs.Error(\"Error setting authorization header \", err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\thc := &http.Client{}\n\tif c.InsecureSkipVerify {\n\t\thc.Transport = skipVerifyTransport\n\t} else {\n\t\thc.Transport = defaultTransport\n\t}\n\tresp, err := hc.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar response Response\n\tdec := json.NewDecoder(resp.Body)\n\tdecErr := dec.Decode(&response)\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"received status code %d from server: err: %s\", resp.StatusCode, response.Err)\n\t}\n\n\t\/\/ ignore this error if we got an invalid status code\n\tif decErr != nil && decErr.Error() == \"EOF\" && resp.StatusCode != http.StatusOK {\n\t\tdecErr = nil\n\t}\n\n\t\/\/ If we got a valid decode error, send that back\n\tif decErr != nil {\n\t\tlogs.WithField(\"influx_status\", resp.StatusCode).\n\t\t\tError(\"Error parsing results from influxdb: err:\", decErr)\n\t\treturn nil, decErr\n\t}\n\n\t\/\/ If we don't have an error in our json response, and didn't get statusOK\n\t\/\/ then send back an error\n\tif resp.StatusCode != http.StatusOK && response.Err != \"\" {\n\t\tlogs.\n\t\t\tWithField(\"influx_status\", resp.StatusCode).\n\t\t\tError(\"Received non-200 response from influxdb\")\n\n\t\treturn &response, fmt.Errorf(\"received status code %d from server\",\n\t\t\tresp.StatusCode)\n\t}\n\treturn &response, nil\n}\n\ntype result struct {\n\tResponse chronograf.Response\n\tErr error\n}\n\n\/\/ Query issues a request to a configured InfluxDB instance for time series\n\/\/ information specified by query. Queries must be \"fully-qualified,\" and\n\/\/ include both the database and retention policy. In-flight requests can be\n\/\/ cancelled using the provided context.\nfunc (c *Client) Query(ctx context.Context, q chronograf.Query) (chronograf.Response, error) {\n\tresps := make(chan (result))\n\tgo func() {\n\t\tresp, err := c.query(c.URL, q)\n\t\tresps <- result{resp, err}\n\t}()\n\n\tselect {\n\tcase resp := <-resps:\n\t\treturn resp.Response, resp.Err\n\tcase <-ctx.Done():\n\t\treturn nil, chronograf.ErrUpstreamTimeout\n\t}\n}\n\n\/\/ Connect caches the URL and optional Bearer Authorization for the data source\nfunc (c *Client) Connect(ctx context.Context, src *chronograf.Source) error {\n\tu, err := url.Parse(src.URL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.Authorizer = DefaultAuthorization(src)\n\t\/\/ Only allow acceptance of all certs if the scheme is https AND the user opted into to the setting.\n\tif u.Scheme == \"https\" && src.InsecureSkipVerify {\n\t\tc.InsecureSkipVerify = src.InsecureSkipVerify\n\t}\n\n\tc.URL = u\n\treturn nil\n}\n\n\/\/ Users transforms InfluxDB into a user store\nfunc (c *Client) Users(ctx context.Context) chronograf.UsersStore {\n\treturn c\n}\n\n\/\/ Roles aren't support in OSS\nfunc (c *Client) Roles(ctx context.Context) (chronograf.RolesStore, error) {\n\treturn nil, fmt.Errorf(\"Roles not support in open-source InfluxDB. Roles are support in Influx Enterprise\")\n}\n\n\/\/ Ping hits the influxdb ping endpoint and returns the type of influx\nfunc (c *Client) Ping(ctx context.Context) error {\n\t_, _, err := c.pingTimeout(ctx)\n\treturn err\n}\n\n\/\/ Version hits the influxdb ping endpoint and returns the version of influx\nfunc (c *Client) Version(ctx context.Context) (string, error) {\n\tversion, _, err := c.pingTimeout(ctx)\n\treturn version, err\n}\n\n\/\/ Type hits the influxdb ping endpoint and returns the type of influx running\nfunc (c *Client) Type(ctx context.Context) (string, error) {\n\t_, tsdbType, err := c.pingTimeout(ctx)\n\treturn tsdbType, err\n}\n\nfunc (c *Client) pingTimeout(ctx context.Context) (string, string, error) {\n\tresps := make(chan (pingResult))\n\tgo func() {\n\t\tversion, tsdbType, err := c.ping(c.URL)\n\t\tresps <- pingResult{version, tsdbType, err}\n\t}()\n\n\tselect {\n\tcase resp := <-resps:\n\t\treturn resp.Version, resp.Type, resp.Err\n\tcase <-ctx.Done():\n\t\treturn \"\", \"\", chronograf.ErrUpstreamTimeout\n\t}\n}\n\ntype pingResult struct {\n\tVersion string\n\tType string\n\tErr error\n}\n\nfunc (c *Client) ping(u *url.URL) (string, string, error) {\n\tu.Path = \"ping\"\n\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\thc := &http.Client{}\n\tif c.InsecureSkipVerify {\n\t\thc.Transport = skipVerifyTransport\n\t} else {\n\t\thc.Transport = defaultTransport\n\t}\n\n\tresp, err := hc.Do(req)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tif resp.StatusCode != http.StatusNoContent {\n\t\tvar err = fmt.Errorf(string(body))\n\t\treturn \"\", \"\", err\n\t}\n\n\tversion := resp.Header.Get(\"X-Influxdb-Version\")\n\tif strings.Contains(version, \"-c\") {\n\t\treturn version, chronograf.InfluxEnterprise, nil\n\t} else if strings.Contains(version, \"relay\") {\n\t\treturn version, chronograf.InfluxRelay, nil\n\t}\n\treturn version, chronograf.InfluxDB, nil\n}\n<commit_msg>Add check for enterprise build type header<commit_after>package influx\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/influxdata\/chronograf\"\n)\n\nvar _ chronograf.TimeSeries = &Client{}\nvar _ chronograf.TSDBStatus = &Client{}\nvar _ chronograf.Databases = &Client{}\n\n\/\/ Shared transports for all clients to prevent leaking connections\nvar (\n\tskipVerifyTransport = &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\tdefaultTransport = &http.Transport{}\n)\n\n\/\/ Client is a device for retrieving time series data from an InfluxDB instance\ntype Client struct {\n\tURL *url.URL\n\tAuthorizer Authorizer\n\tInsecureSkipVerify bool\n\tLogger chronograf.Logger\n}\n\n\/\/ Response is a partial JSON decoded InfluxQL response used\n\/\/ to check for some errors\ntype Response struct {\n\tResults json.RawMessage\n\tErr string `json:\"error,omitempty\"`\n}\n\n\/\/ MarshalJSON returns the raw results bytes from the response\nfunc (r Response) MarshalJSON() ([]byte, error) {\n\treturn r.Results, nil\n}\n\nfunc (c *Client) query(u *url.URL, q chronograf.Query) (chronograf.Response, error) {\n\tu.Path = \"query\"\n\treq, err := http.NewRequest(\"POST\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\tcommand := q.Command\n\t\/\/ TODO(timraymond): move this upper Query() function\n\tif len(q.TemplateVars) > 0 {\n\t\tcommand = TemplateReplace(q.Command, q.TemplateVars)\n\t}\n\tlogs := c.Logger.\n\t\tWithField(\"component\", \"proxy\").\n\t\tWithField(\"host\", req.Host).\n\t\tWithField(\"command\", command).\n\t\tWithField(\"db\", q.DB).\n\t\tWithField(\"rp\", q.RP)\n\tlogs.Debug(\"query\")\n\n\tparams := req.URL.Query()\n\tparams.Set(\"q\", command)\n\tparams.Set(\"db\", q.DB)\n\tparams.Set(\"rp\", q.RP)\n\tparams.Set(\"epoch\", \"ms\") \/\/ TODO(timraymond): set this based on analysis\n\treq.URL.RawQuery = params.Encode()\n\n\tif c.Authorizer != nil {\n\t\tif err := c.Authorizer.Set(req); err != nil {\n\t\t\tlogs.Error(\"Error setting authorization header \", err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\thc := &http.Client{}\n\tif c.InsecureSkipVerify {\n\t\thc.Transport = skipVerifyTransport\n\t} else {\n\t\thc.Transport = defaultTransport\n\t}\n\tresp, err := hc.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar response Response\n\tdec := json.NewDecoder(resp.Body)\n\tdecErr := dec.Decode(&response)\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"received status code %d from server: err: %s\", resp.StatusCode, response.Err)\n\t}\n\n\t\/\/ ignore this error if we got an invalid status code\n\tif decErr != nil && decErr.Error() == \"EOF\" && resp.StatusCode != http.StatusOK {\n\t\tdecErr = nil\n\t}\n\n\t\/\/ If we got a valid decode error, send that back\n\tif decErr != nil {\n\t\tlogs.WithField(\"influx_status\", resp.StatusCode).\n\t\t\tError(\"Error parsing results from influxdb: err:\", decErr)\n\t\treturn nil, decErr\n\t}\n\n\t\/\/ If we don't have an error in our json response, and didn't get statusOK\n\t\/\/ then send back an error\n\tif resp.StatusCode != http.StatusOK && response.Err != \"\" {\n\t\tlogs.\n\t\t\tWithField(\"influx_status\", resp.StatusCode).\n\t\t\tError(\"Received non-200 response from influxdb\")\n\n\t\treturn &response, fmt.Errorf(\"received status code %d from server\",\n\t\t\tresp.StatusCode)\n\t}\n\treturn &response, nil\n}\n\ntype result struct {\n\tResponse chronograf.Response\n\tErr error\n}\n\n\/\/ Query issues a request to a configured InfluxDB instance for time series\n\/\/ information specified by query. Queries must be \"fully-qualified,\" and\n\/\/ include both the database and retention policy. In-flight requests can be\n\/\/ cancelled using the provided context.\nfunc (c *Client) Query(ctx context.Context, q chronograf.Query) (chronograf.Response, error) {\n\tresps := make(chan (result))\n\tgo func() {\n\t\tresp, err := c.query(c.URL, q)\n\t\tresps <- result{resp, err}\n\t}()\n\n\tselect {\n\tcase resp := <-resps:\n\t\treturn resp.Response, resp.Err\n\tcase <-ctx.Done():\n\t\treturn nil, chronograf.ErrUpstreamTimeout\n\t}\n}\n\n\/\/ Connect caches the URL and optional Bearer Authorization for the data source\nfunc (c *Client) Connect(ctx context.Context, src *chronograf.Source) error {\n\tu, err := url.Parse(src.URL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.Authorizer = DefaultAuthorization(src)\n\t\/\/ Only allow acceptance of all certs if the scheme is https AND the user opted into to the setting.\n\tif u.Scheme == \"https\" && src.InsecureSkipVerify {\n\t\tc.InsecureSkipVerify = src.InsecureSkipVerify\n\t}\n\n\tc.URL = u\n\treturn nil\n}\n\n\/\/ Users transforms InfluxDB into a user store\nfunc (c *Client) Users(ctx context.Context) chronograf.UsersStore {\n\treturn c\n}\n\n\/\/ Roles aren't support in OSS\nfunc (c *Client) Roles(ctx context.Context) (chronograf.RolesStore, error) {\n\treturn nil, fmt.Errorf(\"Roles not support in open-source InfluxDB. Roles are support in Influx Enterprise\")\n}\n\n\/\/ Ping hits the influxdb ping endpoint and returns the type of influx\nfunc (c *Client) Ping(ctx context.Context) error {\n\t_, _, err := c.pingTimeout(ctx)\n\treturn err\n}\n\n\/\/ Version hits the influxdb ping endpoint and returns the version of influx\nfunc (c *Client) Version(ctx context.Context) (string, error) {\n\tversion, _, err := c.pingTimeout(ctx)\n\treturn version, err\n}\n\n\/\/ Type hits the influxdb ping endpoint and returns the type of influx running\nfunc (c *Client) Type(ctx context.Context) (string, error) {\n\t_, tsdbType, err := c.pingTimeout(ctx)\n\treturn tsdbType, err\n}\n\nfunc (c *Client) pingTimeout(ctx context.Context) (string, string, error) {\n\tresps := make(chan (pingResult))\n\tgo func() {\n\t\tversion, tsdbType, err := c.ping(c.URL)\n\t\tresps <- pingResult{version, tsdbType, err}\n\t}()\n\n\tselect {\n\tcase resp := <-resps:\n\t\treturn resp.Version, resp.Type, resp.Err\n\tcase <-ctx.Done():\n\t\treturn \"\", \"\", chronograf.ErrUpstreamTimeout\n\t}\n}\n\ntype pingResult struct {\n\tVersion string\n\tType string\n\tErr error\n}\n\nfunc (c *Client) ping(u *url.URL) (string, string, error) {\n\tu.Path = \"ping\"\n\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\thc := &http.Client{}\n\tif c.InsecureSkipVerify {\n\t\thc.Transport = skipVerifyTransport\n\t} else {\n\t\thc.Transport = defaultTransport\n\t}\n\n\tresp, err := hc.Do(req)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tif resp.StatusCode != http.StatusNoContent {\n\t\tvar err = fmt.Errorf(string(body))\n\t\treturn \"\", \"\", err\n\t}\n\n\tversion := resp.Header.Get(\"X-Influxdb-Build\")\n\tif version == \"ENT\" {\n\t\treturn version, chronograf.InfluxEnterprise, nil\n\t}\n\tversion = resp.Header.Get(\"X-Influxdb-Version\")\n\tif strings.Contains(version, \"-c\") {\n\t\treturn version, chronograf.InfluxEnterprise, nil\n\t} else if strings.Contains(version, \"relay\") {\n\t\treturn version, chronograf.InfluxRelay, nil\n\t}\n\n\treturn version, chronograf.InfluxDB, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage parallel_test\n\nimport (\n\t\"sort\"\n\t\"sync\"\n\tstdtesting \"testing\"\n\t\"time\"\n\n\tgc \"launchpad.net\/gocheck\"\n\n\t\"launchpad.net\/juju-core\/utils\/parallel\"\n)\n\nfunc Test(t *stdtesting.T) {\n\tgc.TestingT(t)\n}\n\ntype parallelSuite struct{}\n\nvar _ = gc.Suite(¶llelSuite{})\n\nfunc (*parallelSuite) TestParallelMaxPar(c *gc.C) {\n\tconst (\n\t\ttotalDo = 10\n\t\tmaxConcurrentRunnersPar = 3\n\t)\n\tvar mu sync.Mutex\n\tmaxConcurrentRunners := 0\n\tnbRunners := 0\n\tnbRuns := 0\n\tparallelRunner := parallel.NewRun(maxConcurrentRunnersPar)\n\tfor i := 0; i < totalDo; i++ {\n\t\tparallelRunner.Do(func() error {\n\t\t\tmu.Lock()\n\t\t\tnbRuns++\n\t\t\tnbRunners++\n\t\t\tif nbRunners > maxConcurrentRunners {\n\t\t\t\tmaxConcurrentRunners = nbRunners\n\t\t\t}\n\t\t\tmu.Unlock()\n\t\t\ttime.Sleep(time.Second\/10)\n\t\t\tmu.Lock()\n\t\t\tnbRunners--\n\t\t\tmu.Unlock()\n\t\t\treturn nil\n\t\t})\n\t}\n\terr := parallelRunner.Wait()\n\tif nbRunners != 0 {\n\t\tc.Errorf(\"%d functions still running\", nbRunners)\n\t}\n\tif nbRuns != totalDo {\n\t\tc.Errorf(\"all functions not executed; want %d got %d\", totalDo, nbRuns)\n\t}\n\tc.Check(err, gc.IsNil)\n\tif maxConcurrentRunners != maxConcurrentRunnersPar {\n\t\tc.Errorf(\"wrong number of do's ran at once; want %d got %d\", maxConcurrentRunnersPar, maxConcurrentRunners)\n\t}\n}\n\ntype intError int\n\nfunc (intError) Error() string {\n\treturn \"error\"\n}\n\nfunc (*parallelSuite) TestParallelError(c *gc.C) {\n\tconst (\n\t\ttotalDo = 10\n\t\terrDo = 5\n\t)\n\tparallelRun := parallel.NewRun(6)\n\tfor i := 0; i < totalDo; i++ {\n\t\ti := i\n\t\tif i >= errDo {\n\t\t\tparallelRun.Do(func() error {\n\t\t\t\treturn intError(i)\n\t\t\t})\n\t\t} else {\n\t\t\tparallelRun.Do(func() error {\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}\n\t}\n\terr := parallelRun.Wait()\n\tc.Check(err, gc.NotNil)\n\terrs := err.(parallel.Errors)\n\tc.Check(len(errs), gc.Equals, totalDo-errDo)\n\tints := make([]int, len(errs))\n\tfor i, err := range errs {\n\t\tints[i] = int(err.(intError))\n\t}\n\tsort.Ints(ints)\n\tfor i, n := range ints {\n\t\tc.Check(n, gc.Equals, i+errDo)\n\t}\n}\n\nfunc (*parallelSuite) TestZeroWorkerPanics(c *gc.C) {\n\tdefer func() {\n\t\tr := recover()\n\t\tc.Check(r, gc.Matches, \"parameter maxPar must be >= 1\")\n\t}()\n\tparallel.NewRun(0)\n}\n<commit_msg>gofmt.<commit_after>\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage parallel_test\n\nimport (\n\t\"sort\"\n\t\"sync\"\n\tstdtesting \"testing\"\n\t\"time\"\n\n\tgc \"launchpad.net\/gocheck\"\n\n\t\"launchpad.net\/juju-core\/utils\/parallel\"\n)\n\nfunc Test(t *stdtesting.T) {\n\tgc.TestingT(t)\n}\n\ntype parallelSuite struct{}\n\nvar _ = gc.Suite(¶llelSuite{})\n\nfunc (*parallelSuite) TestParallelMaxPar(c *gc.C) {\n\tconst (\n\t\ttotalDo = 10\n\t\tmaxConcurrentRunnersPar = 3\n\t)\n\tvar mu sync.Mutex\n\tmaxConcurrentRunners := 0\n\tnbRunners := 0\n\tnbRuns := 0\n\tparallelRunner := parallel.NewRun(maxConcurrentRunnersPar)\n\tfor i := 0; i < totalDo; i++ {\n\t\tparallelRunner.Do(func() error {\n\t\t\tmu.Lock()\n\t\t\tnbRuns++\n\t\t\tnbRunners++\n\t\t\tif nbRunners > maxConcurrentRunners {\n\t\t\t\tmaxConcurrentRunners = nbRunners\n\t\t\t}\n\t\t\tmu.Unlock()\n\t\t\ttime.Sleep(time.Second \/ 10)\n\t\t\tmu.Lock()\n\t\t\tnbRunners--\n\t\t\tmu.Unlock()\n\t\t\treturn nil\n\t\t})\n\t}\n\terr := parallelRunner.Wait()\n\tif nbRunners != 0 {\n\t\tc.Errorf(\"%d functions still running\", nbRunners)\n\t}\n\tif nbRuns != totalDo {\n\t\tc.Errorf(\"all functions not executed; want %d got %d\", totalDo, nbRuns)\n\t}\n\tc.Check(err, gc.IsNil)\n\tif maxConcurrentRunners != maxConcurrentRunnersPar {\n\t\tc.Errorf(\"wrong number of do's ran at once; want %d got %d\", maxConcurrentRunnersPar, maxConcurrentRunners)\n\t}\n}\n\ntype intError int\n\nfunc (intError) Error() string {\n\treturn \"error\"\n}\n\nfunc (*parallelSuite) TestParallelError(c *gc.C) {\n\tconst (\n\t\ttotalDo = 10\n\t\terrDo = 5\n\t)\n\tparallelRun := parallel.NewRun(6)\n\tfor i := 0; i < totalDo; i++ {\n\t\ti := i\n\t\tif i >= errDo {\n\t\t\tparallelRun.Do(func() error {\n\t\t\t\treturn intError(i)\n\t\t\t})\n\t\t} else {\n\t\t\tparallelRun.Do(func() error {\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}\n\t}\n\terr := parallelRun.Wait()\n\tc.Check(err, gc.NotNil)\n\terrs := err.(parallel.Errors)\n\tc.Check(len(errs), gc.Equals, totalDo-errDo)\n\tints := make([]int, len(errs))\n\tfor i, err := range errs {\n\t\tints[i] = int(err.(intError))\n\t}\n\tsort.Ints(ints)\n\tfor i, n := range ints {\n\t\tc.Check(n, gc.Equals, i+errDo)\n\t}\n}\n\nfunc (*parallelSuite) TestZeroWorkerPanics(c *gc.C) {\n\tdefer func() {\n\t\tr := recover()\n\t\tc.Check(r, gc.Matches, \"parameter maxPar must be >= 1\")\n\t}()\n\tparallel.NewRun(0)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/labstack\/echo\"\n\n\t\"labix.org\/v2\/mgo\"\n)\n\nfunc main() {\n\n\tsession, err := mgo.Dial(\"127.0.0.1\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer session.Close()\n\n\te := echo.New()\n\te.Static(\"\/\", \"public\")\n\te.Run(\":3050\")\n}\n<commit_msg>update<commit_after>package main\n\nimport (\n\t\"github.com\/labstack\/echo\"\n\n\t\"labix.org\/v2\/mgo\"\n)\n\nfunc main() {\n\n\tsession, err := mgo.Dial(\"127.0.0.1\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer session.Close()\n\n\te := echo.New()\n\te.Static(\"\/\", \"public\")\n\te.Run(\":5025\")\n}\n<|endoftext|>"} {"text":"<commit_before>package helpers\n\nfunc ConvertTypeForJS(s string) string {\n\tconv := map[string]string{\n\t\t\"array\": \"array\",\n\t\t\"boolean\": \"boolean\",\n\t\t\"integer\": \"number\",\n\t\t\"number\": \"number\",\n\t\t\"object\": \"object\",\n\t\t\"string\": \"string\",\n\t}\n\treturn conv[s]\n}\n<commit_msg>Return given type when target is not found<commit_after>package helpers\n\nfunc ConvertTypeForJS(s string) string {\n\tv, ok := map[string]string{\n\t\t\"integer\": \"number\",\n\t}[s]\n\tif !ok {\n\t\treturn s\n\t}\n\treturn v\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n)\n\nimport (\n\t\"github.com\/qiniu\/api\/conf\"\n\t\"github.com\/qiniu\/api\/rsf\"\n)\n\nfunc main() {\n\tak := flag.String(\"ak\", \"\", \"access key\")\n\tsk := flag.String(\"sk\", \"\", \"secret key\")\n\tbucket := flag.String(\"bucket\", \"\", \"bucket\")\n\tout := flag.String(\"o\", \"\", \"output file\")\n\tflag.Parse()\n\tif ak == nil || sk == nil || bucket == nil || out == nil {\n\t\tlog.Fatalln(\"invalid args\")\n\t\treturn\n\t}\n\tconf.ACCESS_KEY = *ak\n\tconf.SECRET_KEY = *sk\n\tlist := []rsf.ListItem{}\n\tclient := rsf.New(nil)\n\tmarker := \"\"\n\tvar err error\n\tfor {\n\t\tvar ret []rsf.ListItem\n\t\tret, marker, err = client.ListPrefix(nil, *bucket, \"\", marker, 1000)\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tlog.Fatalln(\"error occured!\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tlist = append(list, ret...)\n\t}\n\tdata, err := json.MarshalIndent(list, \"\", \"\")\n\tif err != nil {\n\t\tlog.Fatalln(\"list data error\", err)\n\t\treturn\n\t}\n\terr = ioutil.WriteFile(*out, data, 0)\n\tif err != nil {\n\t\tlog.Fatalln(\"write file failed\")\n\t}\n}\n<commit_msg>usage<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n)\n\nimport (\n\t\"github.com\/qiniu\/api\/conf\"\n\t\"github.com\/qiniu\/api\/rsf\"\n)\n\nfunc main() {\n\tak := flag.String(\"ak\", \"\", \"access key\")\n\tsk := flag.String(\"sk\", \"\", \"secret key\")\n\tbucket := flag.String(\"bucket\", \"\", \"bucket\")\n\tout := flag.String(\"o\", \"\", \"output file\")\n\tflag.Parse()\n\tif *ak == \"\" || *sk == \"\" || *bucket == \"\" || *out == \"\" {\n\t\tflag.PrintDefaults()\n\t\tlog.Fatalln(\"invalid args\")\n\t\treturn\n\t}\n\tconf.ACCESS_KEY = *ak\n\tconf.SECRET_KEY = *sk\n\tlist := []rsf.ListItem{}\n\tclient := rsf.New(nil)\n\tmarker := \"\"\n\tvar err error\n\tfor {\n\t\tvar ret []rsf.ListItem\n\t\tret, marker, err = client.ListPrefix(nil, *bucket, \"\", marker, 1000)\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tlog.Fatalln(\"error occured!\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tlist = append(list, ret...)\n\t}\n\tdata, err := json.MarshalIndent(list, \"\", \"\")\n\tif err != nil {\n\t\tlog.Fatalln(\"list data error\", err)\n\t\treturn\n\t}\n\terr = ioutil.WriteFile(*out, data, 0)\n\tif err != nil {\n\t\tlog.Fatalln(\"write file failed\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ templates.go\n\/\/ template model interfaces\n\/\/\npackage srnd\n\nimport (\n \"fmt\"\n \"github.com\/hoisie\/mustache\"\n \"io\"\n \"io\/ioutil\"\n \"log\"\n \"path\/filepath\"\n \"sort\"\n)\n\ntype templateEngine struct {\n \/\/ every newsgroup\n groups map[string]GroupModel\n \/\/ loaded templates\n templates map[string]string\n \/\/ root directory for templates\n template_dir string\n}\n\nfunc (self templateEngine) templateCached(name string) (ok bool) {\n _, ok = self.templates[name]\n return \n}\n\nfunc (self templateEngine) getTemplate(name string) (t string) {\n if self.templateCached(name) {\n t, _ = self.templates[name]\n } else {\n \/\/ ignores errors, this is probably bad\n b, _ := ioutil.ReadFile(filepath.Join(self.template_dir, name))\n t = string(b)\n self.templates[name] = t\n }\n return\n}\n\nfunc (self templateEngine) renderTemplate(name string, obj interface{}) string {\n t := self.getTemplate(name)\n return mustache.Render(t, obj)\n}\n\n\/\/ get a board model given a newsgroup\n\/\/ load un updated board model if we don't have it\nfunc (self templateEngine) obtainBoard(prefix, frontend, group string, db Database) (model GroupModel) {\n model, ok := self.groups[group]\n if ! ok {\n p := db.GetGroupPageCount(group)\n pages := int(p)\n \/\/ ignore error\n perpage, _ := db.GetThreadsPerPage(group)\n for page := 0 ; page < pages ; page ++ {\n model = append(model, db.GetGroupForPage(prefix, frontend, group, page, int(perpage)))\n }\n self.groups[group] = model\n }\n return\n\n}\n\/\/ generate a board page\nfunc (self templateEngine) genBoardPage(prefix, frontend, newsgroup string, page int, outfile string, db Database) {\n\n \/\/ get it\n board := self.obtainBoard(prefix, frontend, newsgroup, db)\n \/\/ update it\n board = board.Update(page, db)\n if page >= len(board) {\n log.Println(\"board page should not exist\", newsgroup, \"page\", page)\n return\n }\n wr, err := OpenFileWriter(outfile)\n if err == nil {\n board[page].RenderTo(wr)\n wr.Close()\n log.Println(\"wrote file\", outfile)\n } else {\n log.Println(\"error generating board page\", page, \"for\", newsgroup, err)\n }\n \/\/ save it\n self.groups[newsgroup] = board\n}\n\n\/\/ generate every page for a board\nfunc (self templateEngine) genBoard(prefix, frontend, newsgroup, outdir string, db Database) {\n \/\/ get it\n board := self.obtainBoard(prefix, frontend, newsgroup, db)\n \/\/ update it\n board = board.UpdateAll(db)\n \/\/ save it\n self.groups[newsgroup] = board\n\n pages := len(board)\n for page := 0 ; page < pages ; page ++ {\n outfile := filepath.Join(outdir, fmt.Sprintf(\"%s-%d.html\", newsgroup, page))\n wr, err := OpenFileWriter(outfile)\n if err == nil {\n board[page].RenderTo(wr)\n wr.Close()\n log.Println(\"wrote file\", outfile)\n } else {\n log.Println(\"error generating board page\", page, \"for\", newsgroup, err)\n }\n }\n}\n\nfunc (self templateEngine) genUkko(prefix, frontend, outfile string, database Database) {\n \/\/ get the last 15 bumped threads globally\n var threads []ThreadModel\n for _, article := range database.GetLastBumpedThreads(\"\", 15) {\n newsgroup, msgid := article[1], article[0]\n \/\/ obtain board\n board := self.obtainBoard(prefix, frontend, newsgroup, database)\n board = board.Update(0, database)\n for _, th := range(board[0].Threads()) {\n if th.OP().MessageID() == msgid {\n threads = append(threads, th.Update(database))\n break\n }\n }\n \/\/ save state of board\n self.groups[newsgroup] = board\n }\n wr, err := OpenFileWriter(outfile)\n if err == nil {\n io.WriteString(wr, template.renderTemplate(\"ukko.mustache\", map[string]interface{} { \"prefix\" : prefix, \"threads\" : threads }))\n wr.Close()\n } else {\n log.Println(\"error generating ukko\", err)\n }\n}\n\nfunc (self templateEngine) genThread(messageID, prefix, frontend, outfile string, db Database) {\n\n newsgroup, page, err := db.GetPageForRootMessage(messageID)\n if err != nil {\n log.Println(\"did not get root post info when regenerating thread\", messageID, err)\n return\n }\n \/\/ get it\n board := self.obtainBoard(prefix, frontend, newsgroup, db)\n \/\/ update our thread\n if int(page) >= len(board) {\n log.Println(\"we need to update the board model\", page, \"does not exist yet\")\n board = board.UpdateAll(db)\n self.groups[newsgroup] = board\n if int(page) >= len(board) {\n log.Println(\"we are going way too fast not regenerating thread\", messageID)\n return\n }\n }\n \/\/ if we lack this thread, reload the board page\n \/\/ otherwise just reload the thread\n if board[page].HasThread(messageID) {\n board[page] = board[page].UpdateThread(messageID, db)\n } else {\n board[page] = board[page].Update(db)\n }\n for _, th := range board[page].Threads() {\n if th.OP().MessageID() == messageID {\n th = th.Update(db)\n \/\/ we found it\n wr, err := OpenFileWriter(outfile)\n if err == nil {\n th.RenderTo(wr)\n wr.Close()\n log.Println(\"wrote file\", outfile)\n } else {\n log.Println(\"did not write\", outfile, err)\n }\n }\n }\n \/\/ save it\n self.groups[newsgroup] = board\n}\n\nfunc newTemplateEngine(dir string) *templateEngine {\n return &templateEngine{\n groups: make(map[string]GroupModel),\n templates: make(map[string]string),\n template_dir: dir,\n }\n}\n\nvar template = newTemplateEngine(defaultTemplateDir())\n\n\nfunc renderPostForm(prefix, board, op_msg_id string) string {\n url := prefix + \"post\/\" + board\n button := \"New Thread\"\n if op_msg_id != \"\" {\n button = \"Reply\"\n }\n return template.renderTemplate(\"postform.mustache\", map[string]string { \"post_url\" : url, \"reference\" : op_msg_id , \"button\" : button } )\n}\n\n\n\nfunc (self templateEngine) genFrontPage(top_count int, frontend_name, outfile string, db Database) {\n \/\/ the graph for the front page\n var frontpage_graph boardPageRows\n\n \/\/ for each group\n groups := db.GetAllNewsgroups()\n for idx, group := range groups {\n if idx >= top_count {\n break\n }\n \/\/ posts per hour\n hour := db.CountPostsInGroup(group, 3600)\n \/\/ posts per day\n day := db.CountPostsInGroup(group, 86400)\n \/\/ posts total\n all := db.CountPostsInGroup(group, 0)\n frontpage_graph = append(frontpage_graph, boardPageRow{\n All: all,\n Day: day,\n Hour: hour,\n Board: group,\n })\n }\n wr, err := OpenFileWriter(outfile)\n if err != nil {\n log.Println(\"cannot render front page\", err)\n return\n }\n\n param := make(map[string]interface{})\n sort.Sort(frontpage_graph)\n param[\"graph\"] = frontpage_graph\n param[\"frontend\"] = frontend_name\n param[\"totalposts\"] = db.ArticleCount()\n _, err = io.WriteString(wr, self.renderTemplate(\"frontpage.mustache\", param))\n if err != nil {\n log.Println(\"error writing front page\", err)\n }\n wr.Close()\n}\n<commit_msg>try fixing model caching<commit_after>\/\/\n\/\/ templates.go\n\/\/ template model interfaces\n\/\/\npackage srnd\n\nimport (\n \"fmt\"\n \"github.com\/hoisie\/mustache\"\n \"io\"\n \"io\/ioutil\"\n \"log\"\n \"path\/filepath\"\n \"sort\"\n)\n\ntype templateEngine struct {\n \/\/ every newsgroup\n groups map[string]GroupModel\n \/\/ loaded templates\n templates map[string]string\n \/\/ root directory for templates\n template_dir string\n}\n\nfunc (self templateEngine) templateCached(name string) (ok bool) {\n _, ok = self.templates[name]\n return \n}\n\nfunc (self templateEngine) getTemplate(name string) (t string) {\n if self.templateCached(name) {\n t, _ = self.templates[name]\n } else {\n \/\/ ignores errors, this is probably bad\n b, _ := ioutil.ReadFile(filepath.Join(self.template_dir, name))\n t = string(b)\n self.templates[name] = t\n }\n return\n}\n\nfunc (self templateEngine) renderTemplate(name string, obj interface{}) string {\n t := self.getTemplate(name)\n return mustache.Render(t, obj)\n}\n\n\/\/ get a board model given a newsgroup\n\/\/ load un updated board model if we don't have it\nfunc (self templateEngine) obtainBoard(prefix, frontend, group string, db Database) (model GroupModel) {\n model, ok := self.groups[group]\n if ! ok {\n p := db.GetGroupPageCount(group)\n pages := int(p)\n \/\/ ignore error\n perpage, _ := db.GetThreadsPerPage(group)\n for page := 0 ; page < pages ; page ++ {\n model = append(model, db.GetGroupForPage(prefix, frontend, group, page, int(perpage)))\n }\n self.groups[group] = model\n }\n return\n\n}\n\/\/ generate a board page\nfunc (self templateEngine) genBoardPage(prefix, frontend, newsgroup string, page int, outfile string, db Database) {\n\n \/\/ get it\n board := self.obtainBoard(prefix, frontend, newsgroup, db)\n \/\/ update it\n board = board.Update(page, db)\n if page >= len(board) {\n log.Println(\"board page should not exist\", newsgroup, \"page\", page)\n return\n }\n wr, err := OpenFileWriter(outfile)\n if err == nil {\n board[page].RenderTo(wr)\n wr.Close()\n log.Println(\"wrote file\", outfile)\n } else {\n log.Println(\"error generating board page\", page, \"for\", newsgroup, err)\n }\n \/\/ save it\n self.groups[newsgroup] = board\n}\n\n\/\/ generate every page for a board\nfunc (self templateEngine) genBoard(prefix, frontend, newsgroup, outdir string, db Database) {\n \/\/ get it\n board := self.obtainBoard(prefix, frontend, newsgroup, db)\n \/\/ update it\n board = board.UpdateAll(db)\n \/\/ save it\n self.groups[newsgroup] = board\n\n pages := len(board)\n for page := 0 ; page < pages ; page ++ {\n outfile := filepath.Join(outdir, fmt.Sprintf(\"%s-%d.html\", newsgroup, page))\n wr, err := OpenFileWriter(outfile)\n if err == nil {\n board[page].RenderTo(wr)\n wr.Close()\n log.Println(\"wrote file\", outfile)\n } else {\n log.Println(\"error generating board page\", page, \"for\", newsgroup, err)\n }\n }\n}\n\nfunc (self templateEngine) genUkko(prefix, frontend, outfile string, database Database) {\n \/\/ get the last 15 bumped threads globally\n var threads []ThreadModel\n for _, article := range database.GetLastBumpedThreads(\"\", 15) {\n newsgroup, msgid := article[1], article[0]\n \/\/ obtain board\n board := self.obtainBoard(prefix, frontend, newsgroup, database)\n board = board.Update(0, database)\n for _, th := range(board[0].Threads()) {\n if th.OP().MessageID() == msgid {\n threads = append(threads, th.Update(database))\n break\n }\n }\n \/\/ save state of board\n self.groups[newsgroup] = board\n }\n wr, err := OpenFileWriter(outfile)\n if err == nil {\n io.WriteString(wr, template.renderTemplate(\"ukko.mustache\", map[string]interface{} { \"prefix\" : prefix, \"threads\" : threads }))\n wr.Close()\n } else {\n log.Println(\"error generating ukko\", err)\n }\n}\n\nfunc (self templateEngine) genThread(messageID, prefix, frontend, outfile string, db Database) {\n\n newsgroup, page, err := db.GetPageForRootMessage(messageID)\n if err != nil {\n log.Println(\"did not get root post info when regenerating thread\", messageID, err)\n return\n }\n var th ThreadModel\n \/\/ get it\n board := self.obtainBoard(prefix, frontend, newsgroup, db)\n \/\/ update our thread\n if int(page) < len(board) {\n \/\/ if we lack this thread, reload the board page\n \/\/ otherwise just reload the thread\n if board[page].HasThread(messageID) {\n board[page] = board[page].UpdateThread(messageID, db)\n } else {\n board[page] = board[page].Update(db)\n }\n for _, th = range board[page].Threads() {\n if th.OP().MessageID() == messageID {\n th = th.Update(db)\n }\n }\n } else {\n \/\/ something is wrong\n \/\/ find it manually\n board = board.UpdateAll(db)\n for idx, board_page := range board {\n if board_page.HasThread(messageID) {\n \/\/ we have it, obtain the thread\n for _, th = range board_page.Threads() {\n if th.OP().MessageID() == messageID {\n \/\/ update the thread\n th = th.Update(db)\n \/\/ break out of inner loop\n break\n }\n }\n \/\/ save the model\n board[idx] = board_page\n \/\/ break out of outer loop\n break\n }\n }\n }\n \/\/ by here if we don't have the thread model we'll panic\n \/\/ this is fine\n\n wr, err := OpenFileWriter(outfile)\n if err == nil {\n th.RenderTo(wr)\n wr.Close()\n log.Println(\"wrote file\", outfile)\n } else {\n log.Println(\"did not write\", outfile, err)\n }\n \/\/ save it\n self.groups[newsgroup] = board\n}\n\nfunc newTemplateEngine(dir string) *templateEngine {\n return &templateEngine{\n groups: make(map[string]GroupModel),\n templates: make(map[string]string),\n template_dir: dir,\n }\n}\n\nvar template = newTemplateEngine(defaultTemplateDir())\n\n\nfunc renderPostForm(prefix, board, op_msg_id string) string {\n url := prefix + \"post\/\" + board\n button := \"New Thread\"\n if op_msg_id != \"\" {\n button = \"Reply\"\n }\n return template.renderTemplate(\"postform.mustache\", map[string]string { \"post_url\" : url, \"reference\" : op_msg_id , \"button\" : button } )\n}\n\n\n\nfunc (self templateEngine) genFrontPage(top_count int, frontend_name, outfile string, db Database) {\n \/\/ the graph for the front page\n var frontpage_graph boardPageRows\n\n \/\/ for each group\n groups := db.GetAllNewsgroups()\n for idx, group := range groups {\n if idx >= top_count {\n break\n }\n \/\/ posts per hour\n hour := db.CountPostsInGroup(group, 3600)\n \/\/ posts per day\n day := db.CountPostsInGroup(group, 86400)\n \/\/ posts total\n all := db.CountPostsInGroup(group, 0)\n frontpage_graph = append(frontpage_graph, boardPageRow{\n All: all,\n Day: day,\n Hour: hour,\n Board: group,\n })\n }\n wr, err := OpenFileWriter(outfile)\n if err != nil {\n log.Println(\"cannot render front page\", err)\n return\n }\n\n param := make(map[string]interface{})\n sort.Sort(frontpage_graph)\n param[\"graph\"] = frontpage_graph\n param[\"frontend\"] = frontend_name\n param[\"totalposts\"] = db.ArticleCount()\n _, err = io.WriteString(wr, self.renderTemplate(\"frontpage.mustache\", param))\n if err != nil {\n log.Println(\"error writing front page\", err)\n }\n wr.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package headerfs\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"sort\"\n\n\t\"github.com\/btcsuite\/btcd\/chaincfg\/chainhash\"\n\t\"github.com\/btcsuite\/btcwallet\/walletdb\"\n)\n\nvar (\n\t\/\/ indexBucket is the main top-level bucket for the header index.\n\t\/\/ Nothing is stored in this bucket other than the sub-buckets which\n\t\/\/ contains the indexes for the various header types.\n\tindexBucket = []byte(\"header-index\")\n\n\t\/\/ bitcoinTip is the key which tracks the \"tip\" of the block header\n\t\/\/ chain. The value of this key will be the current block hash of the\n\t\/\/ best known chain that we're synced to.\n\tbitcoinTip = []byte(\"bitcoin\")\n\n\t\/\/ regFilterTip is the key which tracks the \"tip\" of the regular\n\t\/\/ compact filter header chain. The value of this key will be the\n\t\/\/ current block hash of the best known chain that the headers for\n\t\/\/ regular filter are synced to.\n\tregFilterTip = []byte(\"regular\")\n\n\t\/\/ extFilterTip is the key which tracks the \"tip\" of the extended\n\t\/\/ compact filter header chain. The value of this key will be the\n\t\/\/ current block hash of the best known chain that the headers for\n\t\/\/ extended filter are synced to.\n\textFilterTip = []byte(\"ext\")\n)\n\nvar (\n\t\/\/ ErrHeightNotFound is returned when a specified height isn't found in\n\t\/\/ a target index.\n\tErrHeightNotFound = fmt.Errorf(\"target height not found in index\")\n\n\t\/\/ ErrHashNotFound is returned when a specified block hash isn't found\n\t\/\/ in a target index.\n\tErrHashNotFound = fmt.Errorf(\"target hash not found in index\")\n)\n\n\/\/ HeaderType is an enum-like type which defines the various header types that\n\/\/ are stored within the index.\ntype HeaderType uint8\n\nconst (\n\t\/\/ Block is the header type that represents regular Bitcoin block\n\t\/\/ headers.\n\tBlock HeaderType = iota\n\n\t\/\/ RegularFilter is a header type that represents the basic filter\n\t\/\/ header type for the filter header chain.\n\tRegularFilter\n)\n\nconst (\n\t\/\/ BlockHeaderSize is the size in bytes of the Block header type.\n\tBlockHeaderSize = 80\n\n\t\/\/ RegularFilterHeaderSize is the size in bytes of the RegularFilter\n\t\/\/ header type.\n\tRegularFilterHeaderSize = 32\n)\n\n\/\/ headerIndex is an index stored within the database that allows for random\n\/\/ access into the on-disk header file. This, in conjunction with a flat file\n\/\/ of headers consists of header database. The keys have been specifically\n\/\/ crafted in order to ensure maximum write performance during IBD, and also to\n\/\/ provide the necessary indexing properties required.\ntype headerIndex struct {\n\tdb walletdb.DB\n\n\tindexType HeaderType\n}\n\n\/\/ newHeaderIndex creates a new headerIndex given an already open database, and\n\/\/ a particular header type.\nfunc newHeaderIndex(db walletdb.DB, indexType HeaderType) (*headerIndex, error) {\n\t\/\/ As an initially step, we'll attempt to create all the buckets\n\t\/\/ necessary for functioning of the index. If these buckets has already\n\t\/\/ been created, then we can exit early.\n\terr := walletdb.Update(db, func(tx walletdb.ReadWriteTx) error {\n\t\t_, err := tx.CreateTopLevelBucket(indexBucket)\n\t\treturn err\n\n\t})\n\tif err != nil && err != walletdb.ErrBucketExists {\n\t\treturn nil, err\n\t}\n\n\treturn &headerIndex{\n\t\tdb: db,\n\t\tindexType: indexType,\n\t}, nil\n}\n\n\/\/ headerEntry is an internal type that's used to quickly map a (height, hash)\n\/\/ pair into the proper key that'll be stored within the database.\ntype headerEntry struct {\n\thash chainhash.Hash\n\theight uint32\n}\n\n\/\/ headerBatch is a batch of header entries to be written to disk.\n\/\/\n\/\/ NOTE: The entries within a batch SHOULD be properly sorted by hash in\n\/\/ order to ensure the batch is written in a sequential write.\ntype headerBatch []headerEntry\n\n\/\/ Len returns the number of routes in the collection.\n\/\/\n\/\/ NOTE: This is part of the sort.Interface implementation.\nfunc (h headerBatch) Len() int {\n\treturn len(h)\n}\n\n\/\/ Less reports where the entry with index i should sort before the entry with\n\/\/ index j. As we want to ensure the items are written in sequential order,\n\/\/ items with the \"first\" hash.\n\/\/\n\/\/ NOTE: This is part of the sort.Interface implementation.\nfunc (h headerBatch) Less(i, j int) bool {\n\treturn bytes.Compare(h[i].hash[:], h[j].hash[:]) < 0\n}\n\n\/\/ Swap swaps the elements with indexes i and j.\n\/\/\n\/\/ NOTE: This is part of the sort.Interface implementation.\nfunc (h headerBatch) Swap(i, j int) {\n\th[i], h[j] = h[j], h[i]\n}\n\n\/\/ addHeaders writes a batch of header entries in a single atomic batch\nfunc (h *headerIndex) addHeaders(batch headerBatch) error {\n\t\/\/ If we're writing a 0-length batch, make no changes and return.\n\tif len(batch) == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ In order to ensure optimal write performance, we'll ensure that the\n\t\/\/ items are sorted by their hash before insertion into the database.\n\tsort.Sort(batch)\n\n\treturn walletdb.Update(h.db, func(tx walletdb.ReadWriteTx) error {\n\t\trootBucket := tx.ReadWriteBucket(indexBucket)\n\n\t\tvar tipKey []byte\n\n\t\t\/\/ Based on the specified index type of this instance of the\n\t\t\/\/ index, we'll grab the key that tracks the tip of the chain\n\t\t\/\/ so we can update the index once all the header entries have\n\t\t\/\/ been updated.\n\t\t\/\/ TODO(roasbeef): only need block tip?\n\t\tswitch h.indexType {\n\t\tcase Block:\n\t\t\ttipKey = bitcoinTip\n\t\tcase RegularFilter:\n\t\t\ttipKey = regFilterTip\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unknown index type: %v\", h.indexType)\n\t\t}\n\n\t\tvar (\n\t\t\tchainTipHash chainhash.Hash\n\t\t\tchainTipHeight uint32\n\t\t)\n\n\t\tfor _, header := range batch {\n\t\t\tvar heightBytes [4]byte\n\t\t\tbinary.BigEndian.PutUint32(heightBytes[:], header.height)\n\t\t\terr := rootBucket.Put(header.hash[:], heightBytes[:])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ TODO(roasbeef): need to remedy if side-chain\n\t\t\t\/\/ tracking added\n\t\t\tif header.height >= chainTipHeight {\n\t\t\t\tchainTipHash = header.hash\n\t\t\t\tchainTipHeight = header.height\n\t\t\t}\n\t\t}\n\n\t\treturn rootBucket.Put(tipKey, chainTipHash[:])\n\t})\n}\n\n\/\/ heightFromHash returns the height of the entry that matches the specified\n\/\/ height. With this height, the caller is then able to seek to the appropriate\n\/\/ spot in the flat files in order to extract the true header.\nfunc (h *headerIndex) heightFromHash(hash *chainhash.Hash) (uint32, error) {\n\tvar height uint32\n\terr := walletdb.View(h.db, func(tx walletdb.ReadTx) error {\n\t\tvar err error\n\t\theight, err = h.heightFromHashWithTx(tx, hash)\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn height, nil\n}\n\n\/\/ heightFromHashWithTx returns the height of the entry that matches the\n\/\/ specified hash by using the given DB transaction. With this height, the\n\/\/ caller is then able to seek to the appropriate spot in the flat files in\n\/\/ order to extract the true header.\nfunc (h *headerIndex) heightFromHashWithTx(tx walletdb.ReadTx,\n\thash *chainhash.Hash) (uint32, error) {\n\n\trootBucket := tx.ReadBucket(indexBucket)\n\n\theightBytes := rootBucket.Get(hash[:])\n\tif heightBytes == nil {\n\t\t\/\/ If the hash wasn't found, then we don't know of this hash\n\t\t\/\/ within the index.\n\t\treturn 0, ErrHashNotFound\n\t}\n\n\treturn binary.BigEndian.Uint32(heightBytes), nil\n}\n\n\/\/ chainTip returns the best hash and height that the index knows of.\nfunc (h *headerIndex) chainTip() (*chainhash.Hash, uint32, error) {\n\tvar (\n\t\ttipHeight uint32\n\t\ttipHash *chainhash.Hash\n\t)\n\n\terr := walletdb.View(h.db, func(tx walletdb.ReadTx) error {\n\t\trootBucket := tx.ReadBucket(indexBucket)\n\n\t\tvar tipKey []byte\n\n\t\t\/\/ Based on the specified index type of this instance of the\n\t\t\/\/ index, we'll grab the particular key that tracks the chain\n\t\t\/\/ tip.\n\t\tswitch h.indexType {\n\t\tcase Block:\n\t\t\ttipKey = bitcoinTip\n\t\tcase RegularFilter:\n\t\t\ttipKey = regFilterTip\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unknown chain tip index type: %v\", h.indexType)\n\t\t}\n\n\t\t\/\/ Now that we have the particular tip key for this header\n\t\t\/\/ type, we'll fetch the hash for this tip, then using that\n\t\t\/\/ we'll fetch the height that corresponds to that hash.\n\t\ttipHashBytes := rootBucket.Get(tipKey)\n\t\ttipHeightBytes := rootBucket.Get(tipHashBytes)\n\t\tif len(tipHeightBytes) != 4 {\n\t\t\treturn ErrHeightNotFound\n\t\t}\n\n\t\t\/\/ With the height fetched, we can now populate our return\n\t\t\/\/ parameters.\n\t\th, err := chainhash.NewHash(tipHashBytes)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttipHash = h\n\t\ttipHeight = binary.BigEndian.Uint32(tipHeightBytes)\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn tipHash, tipHeight, nil\n}\n\n\/\/ truncateIndex truncates the index for a particluar header type by a single\n\/\/ header entry. The passed newTip pointer should point to the hash of the new\n\/\/ chain tip. Optionally, if the entry is to be deleted as well, then the\n\/\/ delete flag should be set to true.\nfunc (h *headerIndex) truncateIndex(newTip *chainhash.Hash, delete bool) error {\n\treturn walletdb.Update(h.db, func(tx walletdb.ReadWriteTx) error {\n\t\trootBucket := tx.ReadWriteBucket(indexBucket)\n\n\t\tvar tipKey []byte\n\n\t\t\/\/ Based on the specified index type of this instance of the\n\t\t\/\/ index, we'll grab the key that tracks the tip of the chain\n\t\t\/\/ we need to update.\n\t\tswitch h.indexType {\n\t\tcase Block:\n\t\t\ttipKey = bitcoinTip\n\t\tcase RegularFilter:\n\t\t\ttipKey = regFilterTip\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unknown index type: %v\", h.indexType)\n\t\t}\n\n\t\t\/\/ If the delete flag is set, then we'll also delete this entry\n\t\t\/\/ from the database as the primary index (block headers) is\n\t\t\/\/ being rolled back.\n\t\tif delete {\n\t\t\tprevTipHash := rootBucket.Get(tipKey)\n\t\t\tif err := rootBucket.Delete(prevTipHash); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ With the now stale entry deleted, we'll update the chain tip\n\t\t\/\/ to point to the new hash.\n\t\treturn rootBucket.Put(tipKey, newTip[:])\n\t})\n}\n<commit_msg>headerfs: extract chainTipWithTx<commit_after>package headerfs\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"sort\"\n\n\t\"github.com\/btcsuite\/btcd\/chaincfg\/chainhash\"\n\t\"github.com\/btcsuite\/btcwallet\/walletdb\"\n)\n\nvar (\n\t\/\/ indexBucket is the main top-level bucket for the header index.\n\t\/\/ Nothing is stored in this bucket other than the sub-buckets which\n\t\/\/ contains the indexes for the various header types.\n\tindexBucket = []byte(\"header-index\")\n\n\t\/\/ bitcoinTip is the key which tracks the \"tip\" of the block header\n\t\/\/ chain. The value of this key will be the current block hash of the\n\t\/\/ best known chain that we're synced to.\n\tbitcoinTip = []byte(\"bitcoin\")\n\n\t\/\/ regFilterTip is the key which tracks the \"tip\" of the regular\n\t\/\/ compact filter header chain. The value of this key will be the\n\t\/\/ current block hash of the best known chain that the headers for\n\t\/\/ regular filter are synced to.\n\tregFilterTip = []byte(\"regular\")\n\n\t\/\/ extFilterTip is the key which tracks the \"tip\" of the extended\n\t\/\/ compact filter header chain. The value of this key will be the\n\t\/\/ current block hash of the best known chain that the headers for\n\t\/\/ extended filter are synced to.\n\textFilterTip = []byte(\"ext\")\n)\n\nvar (\n\t\/\/ ErrHeightNotFound is returned when a specified height isn't found in\n\t\/\/ a target index.\n\tErrHeightNotFound = fmt.Errorf(\"target height not found in index\")\n\n\t\/\/ ErrHashNotFound is returned when a specified block hash isn't found\n\t\/\/ in a target index.\n\tErrHashNotFound = fmt.Errorf(\"target hash not found in index\")\n)\n\n\/\/ HeaderType is an enum-like type which defines the various header types that\n\/\/ are stored within the index.\ntype HeaderType uint8\n\nconst (\n\t\/\/ Block is the header type that represents regular Bitcoin block\n\t\/\/ headers.\n\tBlock HeaderType = iota\n\n\t\/\/ RegularFilter is a header type that represents the basic filter\n\t\/\/ header type for the filter header chain.\n\tRegularFilter\n)\n\nconst (\n\t\/\/ BlockHeaderSize is the size in bytes of the Block header type.\n\tBlockHeaderSize = 80\n\n\t\/\/ RegularFilterHeaderSize is the size in bytes of the RegularFilter\n\t\/\/ header type.\n\tRegularFilterHeaderSize = 32\n)\n\n\/\/ headerIndex is an index stored within the database that allows for random\n\/\/ access into the on-disk header file. This, in conjunction with a flat file\n\/\/ of headers consists of header database. The keys have been specifically\n\/\/ crafted in order to ensure maximum write performance during IBD, and also to\n\/\/ provide the necessary indexing properties required.\ntype headerIndex struct {\n\tdb walletdb.DB\n\n\tindexType HeaderType\n}\n\n\/\/ newHeaderIndex creates a new headerIndex given an already open database, and\n\/\/ a particular header type.\nfunc newHeaderIndex(db walletdb.DB, indexType HeaderType) (*headerIndex, error) {\n\t\/\/ As an initially step, we'll attempt to create all the buckets\n\t\/\/ necessary for functioning of the index. If these buckets has already\n\t\/\/ been created, then we can exit early.\n\terr := walletdb.Update(db, func(tx walletdb.ReadWriteTx) error {\n\t\t_, err := tx.CreateTopLevelBucket(indexBucket)\n\t\treturn err\n\n\t})\n\tif err != nil && err != walletdb.ErrBucketExists {\n\t\treturn nil, err\n\t}\n\n\treturn &headerIndex{\n\t\tdb: db,\n\t\tindexType: indexType,\n\t}, nil\n}\n\n\/\/ headerEntry is an internal type that's used to quickly map a (height, hash)\n\/\/ pair into the proper key that'll be stored within the database.\ntype headerEntry struct {\n\thash chainhash.Hash\n\theight uint32\n}\n\n\/\/ headerBatch is a batch of header entries to be written to disk.\n\/\/\n\/\/ NOTE: The entries within a batch SHOULD be properly sorted by hash in\n\/\/ order to ensure the batch is written in a sequential write.\ntype headerBatch []headerEntry\n\n\/\/ Len returns the number of routes in the collection.\n\/\/\n\/\/ NOTE: This is part of the sort.Interface implementation.\nfunc (h headerBatch) Len() int {\n\treturn len(h)\n}\n\n\/\/ Less reports where the entry with index i should sort before the entry with\n\/\/ index j. As we want to ensure the items are written in sequential order,\n\/\/ items with the \"first\" hash.\n\/\/\n\/\/ NOTE: This is part of the sort.Interface implementation.\nfunc (h headerBatch) Less(i, j int) bool {\n\treturn bytes.Compare(h[i].hash[:], h[j].hash[:]) < 0\n}\n\n\/\/ Swap swaps the elements with indexes i and j.\n\/\/\n\/\/ NOTE: This is part of the sort.Interface implementation.\nfunc (h headerBatch) Swap(i, j int) {\n\th[i], h[j] = h[j], h[i]\n}\n\n\/\/ addHeaders writes a batch of header entries in a single atomic batch\nfunc (h *headerIndex) addHeaders(batch headerBatch) error {\n\t\/\/ If we're writing a 0-length batch, make no changes and return.\n\tif len(batch) == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ In order to ensure optimal write performance, we'll ensure that the\n\t\/\/ items are sorted by their hash before insertion into the database.\n\tsort.Sort(batch)\n\n\treturn walletdb.Update(h.db, func(tx walletdb.ReadWriteTx) error {\n\t\trootBucket := tx.ReadWriteBucket(indexBucket)\n\n\t\tvar tipKey []byte\n\n\t\t\/\/ Based on the specified index type of this instance of the\n\t\t\/\/ index, we'll grab the key that tracks the tip of the chain\n\t\t\/\/ so we can update the index once all the header entries have\n\t\t\/\/ been updated.\n\t\t\/\/ TODO(roasbeef): only need block tip?\n\t\tswitch h.indexType {\n\t\tcase Block:\n\t\t\ttipKey = bitcoinTip\n\t\tcase RegularFilter:\n\t\t\ttipKey = regFilterTip\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unknown index type: %v\", h.indexType)\n\t\t}\n\n\t\tvar (\n\t\t\tchainTipHash chainhash.Hash\n\t\t\tchainTipHeight uint32\n\t\t)\n\n\t\tfor _, header := range batch {\n\t\t\tvar heightBytes [4]byte\n\t\t\tbinary.BigEndian.PutUint32(heightBytes[:], header.height)\n\t\t\terr := rootBucket.Put(header.hash[:], heightBytes[:])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ TODO(roasbeef): need to remedy if side-chain\n\t\t\t\/\/ tracking added\n\t\t\tif header.height >= chainTipHeight {\n\t\t\t\tchainTipHash = header.hash\n\t\t\t\tchainTipHeight = header.height\n\t\t\t}\n\t\t}\n\n\t\treturn rootBucket.Put(tipKey, chainTipHash[:])\n\t})\n}\n\n\/\/ heightFromHash returns the height of the entry that matches the specified\n\/\/ height. With this height, the caller is then able to seek to the appropriate\n\/\/ spot in the flat files in order to extract the true header.\nfunc (h *headerIndex) heightFromHash(hash *chainhash.Hash) (uint32, error) {\n\tvar height uint32\n\terr := walletdb.View(h.db, func(tx walletdb.ReadTx) error {\n\t\tvar err error\n\t\theight, err = h.heightFromHashWithTx(tx, hash)\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn height, nil\n}\n\n\/\/ heightFromHashWithTx returns the height of the entry that matches the\n\/\/ specified hash by using the given DB transaction. With this height, the\n\/\/ caller is then able to seek to the appropriate spot in the flat files in\n\/\/ order to extract the true header.\nfunc (h *headerIndex) heightFromHashWithTx(tx walletdb.ReadTx,\n\thash *chainhash.Hash) (uint32, error) {\n\n\trootBucket := tx.ReadBucket(indexBucket)\n\n\theightBytes := rootBucket.Get(hash[:])\n\tif heightBytes == nil {\n\t\t\/\/ If the hash wasn't found, then we don't know of this hash\n\t\t\/\/ within the index.\n\t\treturn 0, ErrHashNotFound\n\t}\n\n\treturn binary.BigEndian.Uint32(heightBytes), nil\n}\n\n\/\/ chainTip returns the best hash and height that the index knows of.\nfunc (h *headerIndex) chainTip() (*chainhash.Hash, uint32, error) {\n\tvar (\n\t\ttipHeight uint32\n\t\ttipHash *chainhash.Hash\n\t)\n\n\terr := walletdb.View(h.db, func(tx walletdb.ReadTx) error {\n\t\tvar err error\n\t\ttipHash, tipHeight, err = h.chainTipWithTx(tx)\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn tipHash, tipHeight, nil\n}\n\n\/\/ chainTipWithTx returns the best hash and height that the index knows of by\n\/\/ using the given DB transaction.\nfunc (h *headerIndex) chainTipWithTx(tx walletdb.ReadTx) (*chainhash.Hash,\n\tuint32, error) {\n\n\trootBucket := tx.ReadBucket(indexBucket)\n\n\tvar tipKey []byte\n\n\t\/\/ Based on the specified index type of this instance of the index,\n\t\/\/ we'll grab the particular key that tracks the chain tip.\n\tswitch h.indexType {\n\tcase Block:\n\t\ttipKey = bitcoinTip\n\tcase RegularFilter:\n\t\ttipKey = regFilterTip\n\tdefault:\n\t\treturn nil, 0, fmt.Errorf(\"unknown chain tip index type: %v\",\n\t\t\th.indexType)\n\t}\n\n\t\/\/ Now that we have the particular tip key for this header type, we'll\n\t\/\/ fetch the hash for this tip, then using that we'll fetch the height\n\t\/\/ that corresponds to that hash.\n\ttipHashBytes := rootBucket.Get(tipKey)\n\ttipHeightBytes := rootBucket.Get(tipHashBytes)\n\tif len(tipHeightBytes) != 4 {\n\t\treturn nil, 0, ErrHeightNotFound\n\t}\n\n\t\/\/ With the height fetched, we can now populate our return\n\t\/\/ parameters.\n\ttipHash, err := chainhash.NewHash(tipHashBytes)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn tipHash, binary.BigEndian.Uint32(tipHeightBytes), nil\n}\n\n\/\/ truncateIndex truncates the index for a particluar header type by a single\n\/\/ header entry. The passed newTip pointer should point to the hash of the new\n\/\/ chain tip. Optionally, if the entry is to be deleted as well, then the\n\/\/ delete flag should be set to true.\nfunc (h *headerIndex) truncateIndex(newTip *chainhash.Hash, delete bool) error {\n\treturn walletdb.Update(h.db, func(tx walletdb.ReadWriteTx) error {\n\t\trootBucket := tx.ReadWriteBucket(indexBucket)\n\n\t\tvar tipKey []byte\n\n\t\t\/\/ Based on the specified index type of this instance of the\n\t\t\/\/ index, we'll grab the key that tracks the tip of the chain\n\t\t\/\/ we need to update.\n\t\tswitch h.indexType {\n\t\tcase Block:\n\t\t\ttipKey = bitcoinTip\n\t\tcase RegularFilter:\n\t\t\ttipKey = regFilterTip\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unknown index type: %v\", h.indexType)\n\t\t}\n\n\t\t\/\/ If the delete flag is set, then we'll also delete this entry\n\t\t\/\/ from the database as the primary index (block headers) is\n\t\t\/\/ being rolled back.\n\t\tif delete {\n\t\t\tprevTipHash := rootBucket.Get(tipKey)\n\t\t\tif err := rootBucket.Delete(prevTipHash); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ With the now stale entry deleted, we'll update the chain tip\n\t\t\/\/ to point to the new hash.\n\t\treturn rootBucket.Put(tipKey, newTip[:])\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package crypto\n\nimport (\n\t\"crypto\/cipher\"\n\t\"errors\"\n\t\"io\"\n\n\t\"v2ray.com\/core\/common\"\n\t\"v2ray.com\/core\/common\/buf\"\n\t\"v2ray.com\/core\/common\/serial\"\n)\n\nvar (\n\tErrAuthenticationFailed = errors.New(\"Authentication failed.\")\n\n\terrInsufficientBuffer = errors.New(\"Insufficient buffer.\")\n\terrInvalidNonce = errors.New(\"Invalid nonce.\")\n)\n\ntype BytesGenerator interface {\n\tNext() []byte\n}\n\ntype NoOpBytesGenerator struct {\n\tbuffer [1]byte\n}\n\nfunc (v NoOpBytesGenerator) Next() []byte {\n\treturn v.buffer[:0]\n}\n\ntype StaticBytesGenerator struct {\n\tContent []byte\n}\n\nfunc (v StaticBytesGenerator) Next() []byte {\n\treturn v.Content\n}\n\ntype Authenticator interface {\n\tNonceSize() int\n\tOverhead() int\n\tOpen(dst, cipherText []byte) ([]byte, error)\n\tSeal(dst, plainText []byte) ([]byte, error)\n}\n\ntype AEADAuthenticator struct {\n\tcipher.AEAD\n\tNonceGenerator BytesGenerator\n\tAdditionalDataGenerator BytesGenerator\n}\n\nfunc (v *AEADAuthenticator) Open(dst, cipherText []byte) ([]byte, error) {\n\tiv := v.NonceGenerator.Next()\n\tif len(iv) != v.AEAD.NonceSize() {\n\t\treturn nil, errInvalidNonce\n\t}\n\n\tadditionalData := v.AdditionalDataGenerator.Next()\n\treturn v.AEAD.Open(dst, iv, cipherText, additionalData)\n}\n\nfunc (v *AEADAuthenticator) Seal(dst, plainText []byte) ([]byte, error) {\n\tiv := v.NonceGenerator.Next()\n\tif len(iv) != v.AEAD.NonceSize() {\n\t\treturn nil, errInvalidNonce\n\t}\n\n\tadditionalData := v.AdditionalDataGenerator.Next()\n\treturn v.AEAD.Seal(dst, iv, plainText, additionalData), nil\n}\n\ntype AuthenticationReader struct {\n\tauth Authenticator\n\tbuffer *buf.Buffer\n\treader io.Reader\n\n\tchunk []byte\n\taggressive bool\n}\n\nfunc NewAuthenticationReader(auth Authenticator, reader io.Reader, aggressive bool) *AuthenticationReader {\n\treturn &AuthenticationReader{\n\t\tauth: auth,\n\t\tbuffer: buf.NewLocal(32 * 1024),\n\t\treader: reader,\n\t\taggressive: aggressive,\n\t}\n}\n\nfunc (v *AuthenticationReader) NextChunk() error {\n\tif v.buffer.Len() < 2 {\n\t\treturn errInsufficientBuffer\n\t}\n\tsize := int(serial.BytesToUint16(v.buffer.BytesTo(2)))\n\tif size > v.buffer.Len()-2 {\n\t\treturn errInsufficientBuffer\n\t}\n\tif size == v.auth.Overhead() {\n\t\treturn io.EOF\n\t}\n\tif size < v.auth.Overhead() {\n\t\treturn errors.New(\"AuthenticationReader: invalid packet size.\")\n\t}\n\tcipherChunk := v.buffer.BytesRange(2, size+2)\n\tplainChunk, err := v.auth.Open(cipherChunk[:0], cipherChunk)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv.chunk = plainChunk\n\tv.buffer.SliceFrom(size + 2)\n\treturn nil\n}\n\nfunc (v *AuthenticationReader) CopyChunk(b []byte) int {\n\tif len(v.chunk) == 0 {\n\t\treturn 0\n\t}\n\tnBytes := copy(b, v.chunk)\n\tif nBytes == len(v.chunk) {\n\t\tv.chunk = nil\n\t} else {\n\t\tv.chunk = v.chunk[nBytes:]\n\t}\n\treturn nBytes\n}\n\nfunc (v *AuthenticationReader) EnsureChunk() error {\n\tfor {\n\t\terr := v.NextChunk()\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif err == errInsufficientBuffer {\n\t\t\tif v.buffer.IsEmpty() {\n\t\t\t\tv.buffer.Clear()\n\t\t\t} else {\n\t\t\t\tleftover := v.buffer.Bytes()\n\t\t\t\tcommon.Must(v.buffer.Reset(func(b []byte) (int, error) {\n\t\t\t\t\treturn copy(b, leftover), nil\n\t\t\t\t}))\n\t\t\t}\n\t\t\terr = v.buffer.AppendSupplier(buf.ReadFrom(v.reader))\n\t\t\tif err == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n}\n\nfunc (v *AuthenticationReader) Read(b []byte) (int, error) {\n\tif len(v.chunk) > 0 {\n\t\tnBytes := v.CopyChunk(b)\n\t\treturn nBytes, nil\n\t}\n\n\terr := v.EnsureChunk()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\ttotalBytes := v.CopyChunk(b)\n\tfor v.aggressive && totalBytes < len(b) {\n\t\tif err := v.NextChunk(); err != nil {\n\t\t\tbreak\n\t\t}\n\t\ttotalBytes += v.CopyChunk(b[totalBytes:])\n\t}\n\treturn totalBytes, nil\n}\n\ntype AuthenticationWriter struct {\n\tauth Authenticator\n\tbuffer []byte\n\twriter io.Writer\n}\n\nfunc NewAuthenticationWriter(auth Authenticator, writer io.Writer) *AuthenticationWriter {\n\treturn &AuthenticationWriter{\n\t\tauth: auth,\n\t\tbuffer: make([]byte, 32*1024),\n\t\twriter: writer,\n\t}\n}\n\nfunc (v *AuthenticationWriter) Write(b []byte) (int, error) {\n\tcipherChunk, err := v.auth.Seal(v.buffer[2:2], b)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tserial.Uint16ToBytes(uint16(len(cipherChunk)), v.buffer[:0])\n\t_, err = v.writer.Write(v.buffer[:2+len(cipherChunk)])\n\treturn len(b), err\n}\n<commit_msg>fix auth reader buffer overrun<commit_after>package crypto\n\nimport (\n\t\"crypto\/cipher\"\n\t\"errors\"\n\t\"io\"\n\n\t\"v2ray.com\/core\/common\"\n\t\"v2ray.com\/core\/common\/buf\"\n\t\"v2ray.com\/core\/common\/serial\"\n)\n\nvar (\n\tErrAuthenticationFailed = errors.New(\"Authentication failed.\")\n\n\terrInsufficientBuffer = errors.New(\"Insufficient buffer.\")\n\terrInvalidNonce = errors.New(\"Invalid nonce.\")\n\terrInvalidLength = errors.New(\"Invalid buffer size.\")\n)\n\ntype BytesGenerator interface {\n\tNext() []byte\n}\n\ntype NoOpBytesGenerator struct {\n\tbuffer [1]byte\n}\n\nfunc (v NoOpBytesGenerator) Next() []byte {\n\treturn v.buffer[:0]\n}\n\ntype StaticBytesGenerator struct {\n\tContent []byte\n}\n\nfunc (v StaticBytesGenerator) Next() []byte {\n\treturn v.Content\n}\n\ntype Authenticator interface {\n\tNonceSize() int\n\tOverhead() int\n\tOpen(dst, cipherText []byte) ([]byte, error)\n\tSeal(dst, plainText []byte) ([]byte, error)\n}\n\ntype AEADAuthenticator struct {\n\tcipher.AEAD\n\tNonceGenerator BytesGenerator\n\tAdditionalDataGenerator BytesGenerator\n}\n\nfunc (v *AEADAuthenticator) Open(dst, cipherText []byte) ([]byte, error) {\n\tiv := v.NonceGenerator.Next()\n\tif len(iv) != v.AEAD.NonceSize() {\n\t\treturn nil, errInvalidNonce\n\t}\n\n\tadditionalData := v.AdditionalDataGenerator.Next()\n\treturn v.AEAD.Open(dst, iv, cipherText, additionalData)\n}\n\nfunc (v *AEADAuthenticator) Seal(dst, plainText []byte) ([]byte, error) {\n\tiv := v.NonceGenerator.Next()\n\tif len(iv) != v.AEAD.NonceSize() {\n\t\treturn nil, errInvalidNonce\n\t}\n\n\tadditionalData := v.AdditionalDataGenerator.Next()\n\treturn v.AEAD.Seal(dst, iv, plainText, additionalData), nil\n}\n\ntype AuthenticationReader struct {\n\tauth Authenticator\n\tbuffer *buf.Buffer\n\treader io.Reader\n\n\tchunk []byte\n\taggressive bool\n}\n\nconst (\n\treaderBufferSize = 32 * 1024\n)\n\nfunc NewAuthenticationReader(auth Authenticator, reader io.Reader, aggressive bool) *AuthenticationReader {\n\treturn &AuthenticationReader{\n\t\tauth: auth,\n\t\tbuffer: buf.NewLocal(readerBufferSize),\n\t\treader: reader,\n\t\taggressive: aggressive,\n\t}\n}\n\nfunc (v *AuthenticationReader) NextChunk() error {\n\tif v.buffer.Len() < 2 {\n\t\treturn errInsufficientBuffer\n\t}\n\tsize := int(serial.BytesToUint16(v.buffer.BytesTo(2)))\n\tif size > v.buffer.Len()-2 {\n\t\treturn errInsufficientBuffer\n\t}\n\tif size > readerBufferSize-2 {\n\t\treturn errInvalidLength\n\t}\n\tif size == v.auth.Overhead() {\n\t\treturn io.EOF\n\t}\n\tif size < v.auth.Overhead() {\n\t\treturn errors.New(\"AuthenticationReader: invalid packet size.\")\n\t}\n\tcipherChunk := v.buffer.BytesRange(2, size+2)\n\tplainChunk, err := v.auth.Open(cipherChunk[:0], cipherChunk)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv.chunk = plainChunk\n\tv.buffer.SliceFrom(size + 2)\n\treturn nil\n}\n\nfunc (v *AuthenticationReader) CopyChunk(b []byte) int {\n\tif len(v.chunk) == 0 {\n\t\treturn 0\n\t}\n\tnBytes := copy(b, v.chunk)\n\tif nBytes == len(v.chunk) {\n\t\tv.chunk = nil\n\t} else {\n\t\tv.chunk = v.chunk[nBytes:]\n\t}\n\treturn nBytes\n}\n\nfunc (v *AuthenticationReader) EnsureChunk() error {\n\tfor {\n\t\terr := v.NextChunk()\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif err == errInsufficientBuffer {\n\t\t\tif v.buffer.IsEmpty() {\n\t\t\t\tv.buffer.Clear()\n\t\t\t} else {\n\t\t\t\tleftover := v.buffer.Bytes()\n\t\t\t\tcommon.Must(v.buffer.Reset(func(b []byte) (int, error) {\n\t\t\t\t\treturn copy(b, leftover), nil\n\t\t\t\t}))\n\t\t\t}\n\t\t\terr = v.buffer.AppendSupplier(buf.ReadFrom(v.reader))\n\t\t\tif err == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n}\n\nfunc (v *AuthenticationReader) Read(b []byte) (int, error) {\n\tif len(v.chunk) > 0 {\n\t\tnBytes := v.CopyChunk(b)\n\t\treturn nBytes, nil\n\t}\n\n\terr := v.EnsureChunk()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\ttotalBytes := v.CopyChunk(b)\n\tfor v.aggressive && totalBytes < len(b) {\n\t\tif err := v.NextChunk(); err != nil {\n\t\t\tbreak\n\t\t}\n\t\ttotalBytes += v.CopyChunk(b[totalBytes:])\n\t}\n\treturn totalBytes, nil\n}\n\ntype AuthenticationWriter struct {\n\tauth Authenticator\n\tbuffer []byte\n\twriter io.Writer\n}\n\nfunc NewAuthenticationWriter(auth Authenticator, writer io.Writer) *AuthenticationWriter {\n\treturn &AuthenticationWriter{\n\t\tauth: auth,\n\t\tbuffer: make([]byte, 32*1024),\n\t\twriter: writer,\n\t}\n}\n\nfunc (v *AuthenticationWriter) Write(b []byte) (int, error) {\n\tcipherChunk, err := v.auth.Seal(v.buffer[2:2], b)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tserial.Uint16ToBytes(uint16(len(cipherChunk)), v.buffer[:0])\n\t_, err = v.writer.Write(v.buffer[:2+len(cipherChunk)])\n\treturn len(b), err\n}\n<|endoftext|>"} {"text":"<commit_before>package net\n\nimport (\n\t\"net\"\n\n\t\"v2ray.com\/core\/app\/log\"\n\t\"v2ray.com\/core\/common\/predicate\"\n)\n\nvar (\n\t\/\/ LocalHostIP is a constant value for localhost IP in IPv4.\n\tLocalHostIP = IPAddress([]byte{127, 0, 0, 1})\n\n\t\/\/ AnyIP is a constant value for any IP in IPv4.\n\tAnyIP = IPAddress([]byte{0, 0, 0, 0})\n\n\t\/\/ LocalHostDomain is a constant value for localhost domain.\n\tLocalHostDomain = DomainAddress(\"localhost\")\n\n\t\/\/ LocalHostIPv6 is a constant value for localhost IP in IPv6.\n\tLocalHostIPv6 = IPAddress([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1})\n)\n\n\/\/ AddressFamily is the type of address.\ntype AddressFamily int\n\nconst (\n\t\/\/ AddressFamilyIPv4 represents address as IPv4\n\tAddressFamilyIPv4 = AddressFamily(0)\n\n\t\/\/ AddressFamilyIPv6 represents address as IPv6\n\tAddressFamilyIPv6 = AddressFamily(1)\n\n\t\/\/ AddressFamilyDomain represents address as Domain\n\tAddressFamilyDomain = AddressFamily(2)\n)\n\nfunc (v AddressFamily) Either(fs ...AddressFamily) bool {\n\tfor _, f := range fs {\n\t\tif v == f {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (v AddressFamily) IsIPv4() bool {\n\treturn v == AddressFamilyIPv4\n}\n\nfunc (v AddressFamily) IsIPv6() bool {\n\treturn v == AddressFamilyIPv6\n}\n\nfunc (v AddressFamily) IsDomain() bool {\n\treturn v == AddressFamilyDomain\n}\n\n\/\/ Address represents a network address to be communicated with. It may be an IP address or domain\n\/\/ address, not both. This interface doesn't resolve IP address for a given domain.\ntype Address interface {\n\tIP() net.IP \/\/ IP of this Address\n\tDomain() string \/\/ Domain of this Address\n\tFamily() AddressFamily\n\n\tString() string \/\/ String representation of this Address\n}\n\n\/\/ ParseAddress parses a string into an Address. The return value will be an IPAddress when\n\/\/ the string is in the form of IPv4 or IPv6 address, or a DomainAddress otherwise.\nfunc ParseAddress(addr string) Address {\n\tip := net.ParseIP(addr)\n\tif ip != nil {\n\t\treturn IPAddress(ip)\n\t}\n\treturn DomainAddress(addr)\n}\n\n\/\/ IPAddress creates an Address with given IP.\nfunc IPAddress(ip []byte) Address {\n\tswitch len(ip) {\n\tcase net.IPv4len:\n\t\tvar addr ipv4Address = [4]byte{ip[0], ip[1], ip[2], ip[3]}\n\t\treturn addr\n\tcase net.IPv6len:\n\t\tif predicate.BytesAll(ip[0:10], 0) && predicate.BytesAll(ip[10:12], 0xff) {\n\t\t\treturn IPAddress(ip[12:16])\n\t\t}\n\t\tvar addr ipv6Address = [16]byte{\n\t\t\tip[0], ip[1], ip[2], ip[3],\n\t\t\tip[4], ip[5], ip[6], ip[7],\n\t\t\tip[8], ip[9], ip[10], ip[11],\n\t\t\tip[12], ip[13], ip[14], ip[15],\n\t\t}\n\t\treturn addr\n\tdefault:\n\t\tlog.Trace(newError(\"invalid IP format: \", ip).AtError())\n\t\treturn nil\n\t}\n}\n\n\/\/ DomainAddress creates an Address with given domain.\nfunc DomainAddress(domain string) Address {\n\treturn domainAddress(domain)\n}\n\ntype ipv4Address [4]byte\n\nfunc (v ipv4Address) IP() net.IP {\n\treturn net.IP(v[:])\n}\n\nfunc (ipv4Address) Domain() string {\n\tpanic(\"Calling Domain() on an IPv4Address.\")\n}\n\nfunc (ipv4Address) Family() AddressFamily {\n\treturn AddressFamilyIPv4\n}\n\nfunc (v ipv4Address) String() string {\n\treturn v.IP().String()\n}\n\ntype ipv6Address [16]byte\n\nfunc (v ipv6Address) IP() net.IP {\n\treturn net.IP(v[:])\n}\n\nfunc (ipv6Address) Domain() string {\n\tpanic(\"Calling Domain() on an IPv6Address.\")\n}\n\nfunc (ipv6Address) Family() AddressFamily {\n\treturn AddressFamilyIPv6\n}\n\nfunc (v ipv6Address) String() string {\n\treturn \"[\" + v.IP().String() + \"]\"\n}\n\ntype domainAddress string\n\nfunc (domainAddress) IP() net.IP {\n\tpanic(\"Calling IP() on a DomainAddress.\")\n}\n\nfunc (v domainAddress) Domain() string {\n\treturn string(v)\n}\n\nfunc (domainAddress) Family() AddressFamily {\n\treturn AddressFamilyDomain\n}\n\nfunc (v domainAddress) String() string {\n\treturn v.Domain()\n}\n\nfunc (v *IPOrDomain) AsAddress() Address {\n\tif v == nil {\n\t\treturn nil\n\t}\n\tswitch addr := v.Address.(type) {\n\tcase *IPOrDomain_Ip:\n\t\treturn IPAddress(addr.Ip)\n\tcase *IPOrDomain_Domain:\n\t\treturn DomainAddress(addr.Domain)\n\t}\n\tpanic(\"Common|Net: Invalid address.\")\n}\n\nfunc NewIPOrDomain(addr Address) *IPOrDomain {\n\tswitch addr.Family() {\n\tcase AddressFamilyDomain:\n\t\treturn &IPOrDomain{\n\t\t\tAddress: &IPOrDomain_Domain{\n\t\t\t\tDomain: addr.Domain(),\n\t\t\t},\n\t\t}\n\tcase AddressFamilyIPv4, AddressFamilyIPv6:\n\t\treturn &IPOrDomain{\n\t\t\tAddress: &IPOrDomain_Ip{\n\t\t\t\tIp: addr.IP(),\n\t\t\t},\n\t\t}\n\tdefault:\n\t\tpanic(\"Unknown Address type.\")\n\t}\n}\n<commit_msg>refactor<commit_after>package net\n\nimport (\n\t\"net\"\n\n\t\"v2ray.com\/core\/app\/log\"\n\t\"v2ray.com\/core\/common\/predicate\"\n)\n\nvar (\n\t\/\/ LocalHostIP is a constant value for localhost IP in IPv4.\n\tLocalHostIP = IPAddress([]byte{127, 0, 0, 1})\n\n\t\/\/ AnyIP is a constant value for any IP in IPv4.\n\tAnyIP = IPAddress([]byte{0, 0, 0, 0})\n\n\t\/\/ LocalHostDomain is a constant value for localhost domain.\n\tLocalHostDomain = DomainAddress(\"localhost\")\n\n\t\/\/ LocalHostIPv6 is a constant value for localhost IP in IPv6.\n\tLocalHostIPv6 = IPAddress([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1})\n)\n\n\/\/ AddressFamily is the type of address.\ntype AddressFamily int\n\nconst (\n\t\/\/ AddressFamilyIPv4 represents address as IPv4\n\tAddressFamilyIPv4 = AddressFamily(0)\n\n\t\/\/ AddressFamilyIPv6 represents address as IPv6\n\tAddressFamilyIPv6 = AddressFamily(1)\n\n\t\/\/ AddressFamilyDomain represents address as Domain\n\tAddressFamilyDomain = AddressFamily(2)\n)\n\n\/\/ Either returns true if current AddressFamily matches any of the AddressFamilys provided.\nfunc (af AddressFamily) Either(fs ...AddressFamily) bool {\n\tfor _, f := range fs {\n\t\tif af == f {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ IsIPv4 returns true if current AddressFamily is IPv4.\nfunc (af AddressFamily) IsIPv4() bool {\n\treturn af == AddressFamilyIPv4\n}\n\n\/\/ IsIPv6 returns true if current AddressFamily is IPv6.\nfunc (af AddressFamily) IsIPv6() bool {\n\treturn af == AddressFamilyIPv6\n}\n\n\/\/ IsDomain returns true if current AddressFamily is Domain.\nfunc (af AddressFamily) IsDomain() bool {\n\treturn af == AddressFamilyDomain\n}\n\n\/\/ Address represents a network address to be communicated with. It may be an IP address or domain\n\/\/ address, not both. This interface doesn't resolve IP address for a given domain.\ntype Address interface {\n\tIP() net.IP \/\/ IP of this Address\n\tDomain() string \/\/ Domain of this Address\n\tFamily() AddressFamily\n\n\tString() string \/\/ String representation of this Address\n}\n\n\/\/ ParseAddress parses a string into an Address. The return value will be an IPAddress when\n\/\/ the string is in the form of IPv4 or IPv6 address, or a DomainAddress otherwise.\nfunc ParseAddress(addr string) Address {\n\tip := net.ParseIP(addr)\n\tif ip != nil {\n\t\treturn IPAddress(ip)\n\t}\n\treturn DomainAddress(addr)\n}\n\n\/\/ IPAddress creates an Address with given IP.\nfunc IPAddress(ip []byte) Address {\n\tswitch len(ip) {\n\tcase net.IPv4len:\n\t\tvar addr ipv4Address = [4]byte{ip[0], ip[1], ip[2], ip[3]}\n\t\treturn addr\n\tcase net.IPv6len:\n\t\tif predicate.BytesAll(ip[0:10], 0) && predicate.BytesAll(ip[10:12], 0xff) {\n\t\t\treturn IPAddress(ip[12:16])\n\t\t}\n\t\tvar addr ipv6Address = [16]byte{\n\t\t\tip[0], ip[1], ip[2], ip[3],\n\t\t\tip[4], ip[5], ip[6], ip[7],\n\t\t\tip[8], ip[9], ip[10], ip[11],\n\t\t\tip[12], ip[13], ip[14], ip[15],\n\t\t}\n\t\treturn addr\n\tdefault:\n\t\tlog.Trace(newError(\"invalid IP format: \", ip).AtError())\n\t\treturn nil\n\t}\n}\n\n\/\/ DomainAddress creates an Address with given domain.\nfunc DomainAddress(domain string) Address {\n\treturn domainAddress(domain)\n}\n\ntype ipv4Address [4]byte\n\nfunc (a ipv4Address) IP() net.IP {\n\treturn net.IP(a[:])\n}\n\nfunc (ipv4Address) Domain() string {\n\tpanic(\"Calling Domain() on an IPv4Address.\")\n}\n\nfunc (ipv4Address) Family() AddressFamily {\n\treturn AddressFamilyIPv4\n}\n\nfunc (a ipv4Address) String() string {\n\treturn a.IP().String()\n}\n\ntype ipv6Address [16]byte\n\nfunc (a ipv6Address) IP() net.IP {\n\treturn net.IP(a[:])\n}\n\nfunc (ipv6Address) Domain() string {\n\tpanic(\"Calling Domain() on an IPv6Address.\")\n}\n\nfunc (ipv6Address) Family() AddressFamily {\n\treturn AddressFamilyIPv6\n}\n\nfunc (a ipv6Address) String() string {\n\treturn \"[\" + a.IP().String() + \"]\"\n}\n\ntype domainAddress string\n\nfunc (domainAddress) IP() net.IP {\n\tpanic(\"Calling IP() on a DomainAddress.\")\n}\n\nfunc (a domainAddress) Domain() string {\n\treturn string(a)\n}\n\nfunc (domainAddress) Family() AddressFamily {\n\treturn AddressFamilyDomain\n}\n\nfunc (a domainAddress) String() string {\n\treturn a.Domain()\n}\n\n\/\/ AsAddress translates IPOrDomain to Address.\nfunc (d *IPOrDomain) AsAddress() Address {\n\tif d == nil {\n\t\treturn nil\n\t}\n\tswitch addr := d.Address.(type) {\n\tcase *IPOrDomain_Ip:\n\t\treturn IPAddress(addr.Ip)\n\tcase *IPOrDomain_Domain:\n\t\treturn DomainAddress(addr.Domain)\n\t}\n\tpanic(\"Common|Net: Invalid address.\")\n}\n\n\/\/ NewIPOrDomain translates Address to IPOrDomain\nfunc NewIPOrDomain(addr Address) *IPOrDomain {\n\tswitch addr.Family() {\n\tcase AddressFamilyDomain:\n\t\treturn &IPOrDomain{\n\t\t\tAddress: &IPOrDomain_Domain{\n\t\t\t\tDomain: addr.Domain(),\n\t\t\t},\n\t\t}\n\tcase AddressFamilyIPv4, AddressFamilyIPv6:\n\t\treturn &IPOrDomain{\n\t\t\tAddress: &IPOrDomain_Ip{\n\t\t\t\tIp: addr.IP(),\n\t\t\t},\n\t\t}\n\tdefault:\n\t\tpanic(\"Unknown Address type.\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package jira\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com\/google\/go-querystring\/query\"\n)\n\n\/\/ FilterService handles fields for the Jira instance \/ API.\n\/\/\n\/\/ Jira API docs: https:\/\/developer.atlassian.com\/cloud\/jira\/platform\/rest\/v3\/#api-group-Filter\ntype FilterService struct {\n\tclient *Client\n}\n\n\/\/ Filter represents a Filter in Jira\ntype Filter struct {\n\tSelf string `json:\"self\"`\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDescription string `json:\"description\"`\n\tOwner User `json:\"owner\"`\n\tJql string `json:\"jql\"`\n\tViewURL string `json:\"viewUrl\"`\n\tSearchURL string `json:\"searchUrl\"`\n\tFavourite bool `json:\"favourite\"`\n\tFavouritedCount int `json:\"favouritedCount\"`\n\tSharePermissions []interface{} `json:\"sharePermissions\"`\n\tSubscriptions struct {\n\t\tSize int `json:\"size\"`\n\t\tItems []interface{} `json:\"items\"`\n\t\tMaxResults int `json:\"max-results\"`\n\t\tStartIndex int `json:\"start-index\"`\n\t\tEndIndex int `json:\"end-index\"`\n\t} `json:\"subscriptions\"`\n}\n\n\/\/ GetMyFiltersQueryOptions specifies the optional parameters for the Get My Filters method\ntype GetMyFiltersQueryOptions struct {\n\tIncludeFavourites bool `url:\"includeFavourites,omitempty\"`\n\tExpand string `url:\"expand,omitempty\"`\n}\n\n\/\/ FiltersList reflects a list of filters\ntype FiltersList struct {\n\tMaxResults int `json:\"maxResults\" structs:\"maxResults\"`\n\tStartAt int `json:\"startAt\" structs:\"startAt\"`\n\tTotal int `json:\"total\" structs:\"total\"`\n\tIsLast bool `json:\"isLast\" structs:\"isLast\"`\n\tValues []FiltersListItem `json:\"values\" structs:\"values\"`\n}\n\n\/\/ FiltersListItem represents a Filter of FiltersList in Jira\ntype FiltersListItem struct {\n\tSelf string `json:\"self\"`\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDescription string `json:\"description\"`\n\tOwner User `json:\"owner\"`\n\tJql string `json:\"jql\"`\n\tViewURL string `json:\"viewUrl\"`\n\tSearchURL string `json:\"searchUrl\"`\n\tFavourite bool `json:\"favourite\"`\n\tFavouritedCount int `json:\"favouritedCount\"`\n\tSharePermissions []interface{} `json:\"sharePermissions\"`\n\tSubscriptions []struct {\n\t\tID int `json:\"id\"`\n\t\tUser User `json:\"user\"`\n\t} `json:\"subscriptions\"`\n}\n\n\/\/ FilterSearchOptions specifies the optional parameters for the Search method\n\/\/ https:\/\/developer.atlassian.com\/cloud\/jira\/platform\/rest\/v3\/#api-rest-api-3-filter-search-get\ntype FilterSearchOptions struct {\n\t\/\/ String used to perform a case-insensitive partial match with name.\n\tFilterName string `url:\"filterName,omitempty\"`\n\n\t\/\/ User account ID used to return filters with the matching owner.accountId. This parameter cannot be used with owner.\n\tAccountID string `url:\"accountId,omitempty\"`\n\n\t\/\/ Group name used to returns filters that are shared with a group that matches sharePermissions.group.groupname.\n\tGroupName string `url:\"groupname,omitempty\"`\n\n\t\/\/ Project ID used to returns filters that are shared with a project that matches sharePermissions.project.id.\n\t\/\/ Format: int64\n\tProjectID int64 `url:\"projectId,omitempty\"`\n\n\t\/\/ Orders the results using one of these filter properties.\n\t\/\/ - `description` Orders by filter `description`. Note that this ordering works independently of whether the expand to display the description field is in use.\n\t\/\/ - `favourite_count` Orders by `favouritedCount`.\n\t\/\/ - `is_favourite` Orders by `favourite`.\n\t\/\/ - `id` Orders by filter `id`.\n\t\/\/ - `name` Orders by filter `name`.\n\t\/\/ - `owner` Orders by `owner.accountId`.\n\t\/\/\n\t\/\/ Default: `name`\n\t\/\/\n\t\/\/ Valid values: id, name, description, owner, favorite_count, is_favorite, -id, -name, -description, -owner, -favorite_count, -is_favorite\n\tOrderBy string `url:\"orderBy,omitempty\"`\n\n\t\/\/ The index of the first item to return in a page of results (page offset).\n\t\/\/ Default: 0, Format: int64\n\tStartAt int64 `url:\"startAt,omitempty\"`\n\n\t\/\/ The maximum number of items to return per page. The maximum is 100.\n\t\/\/ Default: 50, Format: int32\n\tMaxResults int32 `url:\"maxResults,omitempty\"`\n\n\t\/\/ Use expand to include additional information about filter in the response. This parameter accepts multiple values separated by a comma:\n\t\/\/ - description Returns the description of the filter.\n\t\/\/ - favourite Returns an indicator of whether the user has set the filter as a favorite.\n\t\/\/ - favouritedCount Returns a count of how many users have set this filter as a favorite.\n\t\/\/ - jql Returns the JQL query that the filter uses.\n\t\/\/ - owner Returns the owner of the filter.\n\t\/\/ - searchUrl Returns a URL to perform the filter's JQL query.\n\t\/\/ - sharePermissions Returns the share permissions defined for the filter.\n\t\/\/ - subscriptions Returns the users that are subscribed to the filter.\n\t\/\/ - viewUrl Returns a URL to view the filter.\n\tExpand string `url:\"expand,omitempty\"`\n}\n\n\/\/ GetListWithContext retrieves all filters from Jira\nfunc (fs *FilterService) GetListWithContext(ctx context.Context) ([]*Filter, *Response, error) {\n\n\toptions := &GetQueryOptions{}\n\tapiEndpoint := \"rest\/api\/2\/filter\"\n\treq, err := fs.client.NewRequestWithContext(ctx, \"GET\", apiEndpoint, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif options != nil {\n\t\tq, err := query.Values(options)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\treq.URL.RawQuery = q.Encode()\n\t}\n\n\tfilters := []*Filter{}\n\tresp, err := fs.client.Do(req, &filters)\n\tif err != nil {\n\t\tjerr := NewJiraError(resp, err)\n\t\treturn nil, resp, jerr\n\t}\n\treturn filters, resp, err\n}\n\n\/\/ GetList wraps GetListWithContext using the background context.\nfunc (fs *FilterService) GetList() ([]*Filter, *Response, error) {\n\treturn fs.GetListWithContext(context.Background())\n}\n\n\/\/ GetFavouriteListWithContext retrieves the user's favourited filters from Jira\nfunc (fs *FilterService) GetFavouriteListWithContext(ctx context.Context) ([]*Filter, *Response, error) {\n\tapiEndpoint := \"rest\/api\/2\/filter\/favourite\"\n\treq, err := fs.client.NewRequestWithContext(ctx, \"GET\", apiEndpoint, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfilters := []*Filter{}\n\tresp, err := fs.client.Do(req, &filters)\n\tif err != nil {\n\t\tjerr := NewJiraError(resp, err)\n\t\treturn nil, resp, jerr\n\t}\n\treturn filters, resp, err\n}\n\n\/\/ GetFavouriteList wraps GetFavouriteListWithContext using the background context.\nfunc (fs *FilterService) GetFavouriteList() ([]*Filter, *Response, error) {\n\treturn fs.GetFavouriteListWithContext(context.Background())\n}\n\n\/\/ GetWithContext retrieves a single Filter from Jira\nfunc (fs *FilterService) GetWithContext(ctx context.Context, filterID int) (*Filter, *Response, error) {\n\tapiEndpoint := fmt.Sprintf(\"rest\/api\/2\/filter\/%d\", filterID)\n\treq, err := fs.client.NewRequestWithContext(ctx, \"GET\", apiEndpoint, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfilter := new(Filter)\n\tresp, err := fs.client.Do(req, filter)\n\tif err != nil {\n\t\tjerr := NewJiraError(resp, err)\n\t\treturn nil, resp, jerr\n\t}\n\n\treturn filter, resp, err\n}\n\n\/\/ Get wraps GetWithContext using the background context.\nfunc (fs *FilterService) Get(filterID int) (*Filter, *Response, error) {\n\treturn fs.GetWithContext(context.Background(), filterID)\n}\n\n\/\/ GetMyFiltersWithContext retrieves the my Filters.\n\/\/\n\/\/ https:\/\/developer.atlassian.com\/cloud\/jira\/platform\/rest\/v3\/#api-rest-api-3-filter-my-get\nfunc (fs *FilterService) GetMyFiltersWithContext(ctx context.Context, opts *GetMyFiltersQueryOptions) ([]*Filter, *Response, error) {\n\tapiEndpoint := \"rest\/api\/3\/filter\/my\"\n\turl, err := addOptions(apiEndpoint, opts)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq, err := fs.client.NewRequestWithContext(ctx, \"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tfilters := []*Filter{}\n\tresp, err := fs.client.Do(req, &filters)\n\tif err != nil {\n\t\tjerr := NewJiraError(resp, err)\n\t\treturn nil, resp, jerr\n\t}\n\treturn filters, resp, nil\n}\n\n\/\/ GetMyFilters wraps GetMyFiltersWithContext using the background context.\nfunc (fs *FilterService) GetMyFilters(opts *GetMyFiltersQueryOptions) ([]*Filter, *Response, error) {\n\treturn fs.GetMyFiltersWithContext(context.Background(), opts)\n}\n\n\/\/ SearchWithContext will search for filter according to the search options\n\/\/\n\/\/ Jira API docs: https:\/\/developer.atlassian.com\/cloud\/jira\/platform\/rest\/v3\/#api-rest-api-3-filter-search-get\nfunc (fs *FilterService) SearchWithContext(ctx context.Context, opt *FilterSearchOptions) (*FiltersList, *Response, error) {\n\tapiEndpoint := \"rest\/api\/3\/filter\/search\"\n\turl, err := addOptions(apiEndpoint, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq, err := fs.client.NewRequestWithContext(ctx, \"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tfilters := new(FiltersList)\n\tresp, err := fs.client.Do(req, filters)\n\tif err != nil {\n\t\tjerr := NewJiraError(resp, err)\n\t\treturn nil, resp, jerr\n\t}\n\n\treturn filters, resp, err\n}\n\n\/\/ Search wraps SearchWithContext using the background context.\nfunc (fs *FilterService) Search(opt *FilterSearchOptions) (*FiltersList, *Response, error) {\n\treturn fs.SearchWithContext(context.Background(), opt)\n}\n<commit_msg>fix: resolve nogo tautological linting error (#364)<commit_after>package jira\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com\/google\/go-querystring\/query\"\n)\n\n\/\/ FilterService handles fields for the Jira instance \/ API.\n\/\/\n\/\/ Jira API docs: https:\/\/developer.atlassian.com\/cloud\/jira\/platform\/rest\/v3\/#api-group-Filter\ntype FilterService struct {\n\tclient *Client\n}\n\n\/\/ Filter represents a Filter in Jira\ntype Filter struct {\n\tSelf string `json:\"self\"`\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDescription string `json:\"description\"`\n\tOwner User `json:\"owner\"`\n\tJql string `json:\"jql\"`\n\tViewURL string `json:\"viewUrl\"`\n\tSearchURL string `json:\"searchUrl\"`\n\tFavourite bool `json:\"favourite\"`\n\tFavouritedCount int `json:\"favouritedCount\"`\n\tSharePermissions []interface{} `json:\"sharePermissions\"`\n\tSubscriptions struct {\n\t\tSize int `json:\"size\"`\n\t\tItems []interface{} `json:\"items\"`\n\t\tMaxResults int `json:\"max-results\"`\n\t\tStartIndex int `json:\"start-index\"`\n\t\tEndIndex int `json:\"end-index\"`\n\t} `json:\"subscriptions\"`\n}\n\n\/\/ GetMyFiltersQueryOptions specifies the optional parameters for the Get My Filters method\ntype GetMyFiltersQueryOptions struct {\n\tIncludeFavourites bool `url:\"includeFavourites,omitempty\"`\n\tExpand string `url:\"expand,omitempty\"`\n}\n\n\/\/ FiltersList reflects a list of filters\ntype FiltersList struct {\n\tMaxResults int `json:\"maxResults\" structs:\"maxResults\"`\n\tStartAt int `json:\"startAt\" structs:\"startAt\"`\n\tTotal int `json:\"total\" structs:\"total\"`\n\tIsLast bool `json:\"isLast\" structs:\"isLast\"`\n\tValues []FiltersListItem `json:\"values\" structs:\"values\"`\n}\n\n\/\/ FiltersListItem represents a Filter of FiltersList in Jira\ntype FiltersListItem struct {\n\tSelf string `json:\"self\"`\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDescription string `json:\"description\"`\n\tOwner User `json:\"owner\"`\n\tJql string `json:\"jql\"`\n\tViewURL string `json:\"viewUrl\"`\n\tSearchURL string `json:\"searchUrl\"`\n\tFavourite bool `json:\"favourite\"`\n\tFavouritedCount int `json:\"favouritedCount\"`\n\tSharePermissions []interface{} `json:\"sharePermissions\"`\n\tSubscriptions []struct {\n\t\tID int `json:\"id\"`\n\t\tUser User `json:\"user\"`\n\t} `json:\"subscriptions\"`\n}\n\n\/\/ FilterSearchOptions specifies the optional parameters for the Search method\n\/\/ https:\/\/developer.atlassian.com\/cloud\/jira\/platform\/rest\/v3\/#api-rest-api-3-filter-search-get\ntype FilterSearchOptions struct {\n\t\/\/ String used to perform a case-insensitive partial match with name.\n\tFilterName string `url:\"filterName,omitempty\"`\n\n\t\/\/ User account ID used to return filters with the matching owner.accountId. This parameter cannot be used with owner.\n\tAccountID string `url:\"accountId,omitempty\"`\n\n\t\/\/ Group name used to returns filters that are shared with a group that matches sharePermissions.group.groupname.\n\tGroupName string `url:\"groupname,omitempty\"`\n\n\t\/\/ Project ID used to returns filters that are shared with a project that matches sharePermissions.project.id.\n\t\/\/ Format: int64\n\tProjectID int64 `url:\"projectId,omitempty\"`\n\n\t\/\/ Orders the results using one of these filter properties.\n\t\/\/ - `description` Orders by filter `description`. Note that this ordering works independently of whether the expand to display the description field is in use.\n\t\/\/ - `favourite_count` Orders by `favouritedCount`.\n\t\/\/ - `is_favourite` Orders by `favourite`.\n\t\/\/ - `id` Orders by filter `id`.\n\t\/\/ - `name` Orders by filter `name`.\n\t\/\/ - `owner` Orders by `owner.accountId`.\n\t\/\/\n\t\/\/ Default: `name`\n\t\/\/\n\t\/\/ Valid values: id, name, description, owner, favorite_count, is_favorite, -id, -name, -description, -owner, -favorite_count, -is_favorite\n\tOrderBy string `url:\"orderBy,omitempty\"`\n\n\t\/\/ The index of the first item to return in a page of results (page offset).\n\t\/\/ Default: 0, Format: int64\n\tStartAt int64 `url:\"startAt,omitempty\"`\n\n\t\/\/ The maximum number of items to return per page. The maximum is 100.\n\t\/\/ Default: 50, Format: int32\n\tMaxResults int32 `url:\"maxResults,omitempty\"`\n\n\t\/\/ Use expand to include additional information about filter in the response. This parameter accepts multiple values separated by a comma:\n\t\/\/ - description Returns the description of the filter.\n\t\/\/ - favourite Returns an indicator of whether the user has set the filter as a favorite.\n\t\/\/ - favouritedCount Returns a count of how many users have set this filter as a favorite.\n\t\/\/ - jql Returns the JQL query that the filter uses.\n\t\/\/ - owner Returns the owner of the filter.\n\t\/\/ - searchUrl Returns a URL to perform the filter's JQL query.\n\t\/\/ - sharePermissions Returns the share permissions defined for the filter.\n\t\/\/ - subscriptions Returns the users that are subscribed to the filter.\n\t\/\/ - viewUrl Returns a URL to view the filter.\n\tExpand string `url:\"expand,omitempty\"`\n}\n\n\/\/ GetListWithContext retrieves all filters from Jira\nfunc (fs *FilterService) GetListWithContext(ctx context.Context) ([]*Filter, *Response, error) {\n\n\toptions := &GetQueryOptions{}\n\tapiEndpoint := \"rest\/api\/2\/filter\"\n\treq, err := fs.client.NewRequestWithContext(ctx, \"GET\", apiEndpoint, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tq, err := query.Values(options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq.URL.RawQuery = q.Encode()\n\n\tfilters := []*Filter{}\n\tresp, err := fs.client.Do(req, &filters)\n\tif err != nil {\n\t\tjerr := NewJiraError(resp, err)\n\t\treturn nil, resp, jerr\n\t}\n\treturn filters, resp, err\n}\n\n\/\/ GetList wraps GetListWithContext using the background context.\nfunc (fs *FilterService) GetList() ([]*Filter, *Response, error) {\n\treturn fs.GetListWithContext(context.Background())\n}\n\n\/\/ GetFavouriteListWithContext retrieves the user's favourited filters from Jira\nfunc (fs *FilterService) GetFavouriteListWithContext(ctx context.Context) ([]*Filter, *Response, error) {\n\tapiEndpoint := \"rest\/api\/2\/filter\/favourite\"\n\treq, err := fs.client.NewRequestWithContext(ctx, \"GET\", apiEndpoint, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfilters := []*Filter{}\n\tresp, err := fs.client.Do(req, &filters)\n\tif err != nil {\n\t\tjerr := NewJiraError(resp, err)\n\t\treturn nil, resp, jerr\n\t}\n\treturn filters, resp, err\n}\n\n\/\/ GetFavouriteList wraps GetFavouriteListWithContext using the background context.\nfunc (fs *FilterService) GetFavouriteList() ([]*Filter, *Response, error) {\n\treturn fs.GetFavouriteListWithContext(context.Background())\n}\n\n\/\/ GetWithContext retrieves a single Filter from Jira\nfunc (fs *FilterService) GetWithContext(ctx context.Context, filterID int) (*Filter, *Response, error) {\n\tapiEndpoint := fmt.Sprintf(\"rest\/api\/2\/filter\/%d\", filterID)\n\treq, err := fs.client.NewRequestWithContext(ctx, \"GET\", apiEndpoint, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfilter := new(Filter)\n\tresp, err := fs.client.Do(req, filter)\n\tif err != nil {\n\t\tjerr := NewJiraError(resp, err)\n\t\treturn nil, resp, jerr\n\t}\n\n\treturn filter, resp, err\n}\n\n\/\/ Get wraps GetWithContext using the background context.\nfunc (fs *FilterService) Get(filterID int) (*Filter, *Response, error) {\n\treturn fs.GetWithContext(context.Background(), filterID)\n}\n\n\/\/ GetMyFiltersWithContext retrieves the my Filters.\n\/\/\n\/\/ https:\/\/developer.atlassian.com\/cloud\/jira\/platform\/rest\/v3\/#api-rest-api-3-filter-my-get\nfunc (fs *FilterService) GetMyFiltersWithContext(ctx context.Context, opts *GetMyFiltersQueryOptions) ([]*Filter, *Response, error) {\n\tapiEndpoint := \"rest\/api\/3\/filter\/my\"\n\turl, err := addOptions(apiEndpoint, opts)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq, err := fs.client.NewRequestWithContext(ctx, \"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tfilters := []*Filter{}\n\tresp, err := fs.client.Do(req, &filters)\n\tif err != nil {\n\t\tjerr := NewJiraError(resp, err)\n\t\treturn nil, resp, jerr\n\t}\n\treturn filters, resp, nil\n}\n\n\/\/ GetMyFilters wraps GetMyFiltersWithContext using the background context.\nfunc (fs *FilterService) GetMyFilters(opts *GetMyFiltersQueryOptions) ([]*Filter, *Response, error) {\n\treturn fs.GetMyFiltersWithContext(context.Background(), opts)\n}\n\n\/\/ SearchWithContext will search for filter according to the search options\n\/\/\n\/\/ Jira API docs: https:\/\/developer.atlassian.com\/cloud\/jira\/platform\/rest\/v3\/#api-rest-api-3-filter-search-get\nfunc (fs *FilterService) SearchWithContext(ctx context.Context, opt *FilterSearchOptions) (*FiltersList, *Response, error) {\n\tapiEndpoint := \"rest\/api\/3\/filter\/search\"\n\turl, err := addOptions(apiEndpoint, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq, err := fs.client.NewRequestWithContext(ctx, \"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tfilters := new(FiltersList)\n\tresp, err := fs.client.Do(req, filters)\n\tif err != nil {\n\t\tjerr := NewJiraError(resp, err)\n\t\treturn nil, resp, jerr\n\t}\n\n\treturn filters, resp, err\n}\n\n\/\/ Search wraps SearchWithContext using the background context.\nfunc (fs *FilterService) Search(opt *FilterSearchOptions) (*FiltersList, *Response, error) {\n\treturn fs.SearchWithContext(context.Background(), opt)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 gf Author(https:\/\/github.com\/gogf\/gf). All Rights Reserved.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the MIT License.\n\/\/ If a copy of the MIT was not distributed with this file,\n\/\/ You can obtain one at https:\/\/github.com\/gogf\/gf.\n\npackage gdb\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"github.com\/gogf\/gf\/os\/gtime\"\n)\n\n\/\/ Unscoped enables\/disables the soft deleting feature.\nfunc (m *Model) Unscoped(unscoped ...bool) *Model {\n\tmodel := m.getModel()\n\tif len(unscoped) > 0 {\n\t\tmodel.unscoped = unscoped[0]\n\t} else {\n\t\tmodel.unscoped = true\n\t}\n\treturn model\n}\n\n\/\/ Delete does \"DELETE FROM ... \" statement for the model.\n\/\/ The optional parameter <where> is the same as the parameter of Model.Where function,\n\/\/ see Model.Where.\nfunc (m *Model) Delete(where ...interface{}) (result sql.Result, err error) {\n\tif len(where) > 0 {\n\t\treturn m.Where(where[0], where[1:]...).Delete()\n\t}\n\tdefer func() {\n\t\tif err == nil {\n\t\t\tm.checkAndRemoveCache()\n\t\t}\n\t}()\n\tvar (\n\t\tfieldNameDelete = m.getSoftFieldNameDelete()\n\t\tconditionWhere, conditionExtra, conditionArgs = m.formatCondition(false)\n\t)\n\t\/\/ Soft deleting.\n\tif !m.unscoped && fieldNameDelete != \"\" {\n\t\treturn m.db.DoUpdate(\n\t\t\tm.getLink(true),\n\t\t\tm.tables,\n\t\t\tfmt.Sprintf(`%s='%s'`, m.db.QuoteString(fieldNameDelete), gtime.Now().String()),\n\t\t\tconditionWhere+conditionExtra,\n\t\t\tconditionArgs...,\n\t\t)\n\t}\n\treturn m.db.DoDelete(m.getLink(true), m.tables, conditionWhere+conditionExtra, conditionArgs...)\n}\n<commit_msg>improve function Unscoped for package gdb<commit_after>\/\/ Copyright 2017 gf Author(https:\/\/github.com\/gogf\/gf). All Rights Reserved.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the MIT License.\n\/\/ If a copy of the MIT was not distributed with this file,\n\/\/ You can obtain one at https:\/\/github.com\/gogf\/gf.\n\npackage gdb\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"github.com\/gogf\/gf\/os\/gtime\"\n)\n\n\/\/ Unscoped disables the soft deleting feature.\nfunc (m *Model) Unscoped() *Model {\n\tmodel := m.getModel()\n\tmodel.unscoped = true\n\treturn model\n}\n\n\/\/ Delete does \"DELETE FROM ... \" statement for the model.\n\/\/ The optional parameter <where> is the same as the parameter of Model.Where function,\n\/\/ see Model.Where.\nfunc (m *Model) Delete(where ...interface{}) (result sql.Result, err error) {\n\tif len(where) > 0 {\n\t\treturn m.Where(where[0], where[1:]...).Delete()\n\t}\n\tdefer func() {\n\t\tif err == nil {\n\t\t\tm.checkAndRemoveCache()\n\t\t}\n\t}()\n\tvar (\n\t\tfieldNameDelete = m.getSoftFieldNameDelete()\n\t\tconditionWhere, conditionExtra, conditionArgs = m.formatCondition(false)\n\t)\n\t\/\/ Soft deleting.\n\tif !m.unscoped && fieldNameDelete != \"\" {\n\t\treturn m.db.DoUpdate(\n\t\t\tm.getLink(true),\n\t\t\tm.tables,\n\t\t\tfmt.Sprintf(`%s='%s'`, m.db.QuoteString(fieldNameDelete), gtime.Now().String()),\n\t\t\tconditionWhere+conditionExtra,\n\t\t\tconditionArgs...,\n\t\t)\n\t}\n\treturn m.db.DoDelete(m.getLink(true), m.tables, conditionWhere+conditionExtra, conditionArgs...)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage log provides provides a minimal interface for structured logging\n*\/\npackage log\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\tgklog \"github.com\/go-kit\/kit\/log\"\n\tgklevels \"github.com\/go-kit\/kit\/log\/levels\"\n)\n\nconst (\n\tloggerFormatJSON = \"json\"\n\tloggerFormatLogFmt = \"logfmt\"\n)\n\n\/\/ Logger provides provides a minimal interface for structured logging\n\/\/ It supplies leveled logging functionw which create a log event from keyvals,\n\/\/ a variadic sequence of alternating keys and values.\ntype Logger interface {\n\tDebug(keyvals ...interface{})\n\tInfo(keyvals ...interface{})\n\tError(keyvals ...interface{})\n\tWarn(keyvals ...interface{})\n\tCrit(keyvals ...interface{})\n\n\tWith(keyvals ...interface{}) Logger\n}\n\n\/\/ newLogger takes the name of the file and format of the logger as an argument, creates the file and returns\n\/\/ a leveled logger that logs to the file.\n\/\/ @format can have values logfmt or json. Default value is logfmt.\nfunc newLogger(file string, format string) Logger {\n\tfw, err := GetFile(file)\n\tif err != nil {\n\t\tlog.Fatal(\"error opening log file\",err)\n\t}\n\n\tvar l log.Logger\n\tif format == loggerFormatJSON{\n\t\tl = gklog.NewJSONLogger(fw)\n\t} else {\n\t\tl = gklog.NewLogfmtLogger(fw)\n\t}\n\n\tkitlevels := gklevels.New(\n\t\tl,\n\n\t\t\/\/ Fudge values so that switching between debug\/info levels does not\n\t\t\/\/ mess with the log justification\n\t\tgklevels.DebugValue(\"dbug\"),\n\t\tgklevels.ErrorValue(\"errr\"),\n\t)\n\n\tkitlevels = kitlevels.With(\"ts\", gklog.DefaultTimestampUTC)\n\n\treturn levels{kitlevels}\n}\n\n\/\/Return a Json Logger\nfunc NewJsonLogger(fle string) Logger {\n\treturn newLogger(fle,loggerFormatJSON)\n}\n\n\/\/Return a Fmt Logger\nfunc NewLogfmtLogger(fle string) Logger {\n\treturn newLogger(fle,loggerFormatLogFmt)\n}\n\/\/GetFile opens a file in read\/write to append data to it\nfunc GetFile(name string) (*os.File, error) {\n\treturn os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n}\n\ntype levels struct {\n\tkit gklevels.Levels\n}\n\nfunc (l levels) Debug(keyvals ...interface{}) {\n\tif err := l.kit.Debug().Log(keyvals...); err != nil {\n\t\tlog.Fatal(\"Error while logging(debug):\",err)\n\t}\n}\n\nfunc (l levels) Info(keyvals ...interface{}) {\n\tif err := l.kit.Info().Log(keyvals...); err != nil {\n\t\tlog.Fatal(\"Error while logging(info):\",err)\n\t}\n}\n\nfunc (l levels) Error(keyvals ...interface{}) {\n\tif err := l.kit.Error().Log(keyvals...); err != nil {\n\t\tlog.Fatal(\"Error while logging(error):\",err)\n\t}\n}\nfunc (l levels) Warn(keyvals ...interface{}) {\n\tif err := l.kit.Warn().Log(keyvals...); err != nil {\n\t\tlog.Fatal(\"Error while logging(warn):\",err)\n\t}\n}\nfunc (l levels) Crit(keyvals ...interface{}) {\n\tif err := l.kit.Crit().Log(keyvals...); err != nil {\n\t\tlog.Fatal(\"Error while logging(crit):\",err)\n\t}\n}\n\nfunc (l levels) With(keyvals ...interface{}) Logger {\n\treturn levels{l.kit.With(keyvals...)}\n}\n<commit_msg>changed log.Fatal() to log.Println() while logging error<commit_after>\/*\nPackage log provides provides a minimal interface for structured logging\n*\/\npackage log\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\tgklog \"github.com\/go-kit\/kit\/log\"\n\tgklevels \"github.com\/go-kit\/kit\/log\/levels\"\n)\n\nconst (\n\tloggerFormatJSON = \"json\"\n\tloggerFormatLogFmt = \"logfmt\"\n)\n\n\/\/ Logger provides provides a minimal interface for structured logging\n\/\/ It supplies leveled logging functionw which create a log event from keyvals,\n\/\/ a variadic sequence of alternating keys and values.\ntype Logger interface {\n\tDebug(keyvals ...interface{})\n\tInfo(keyvals ...interface{})\n\tError(keyvals ...interface{})\n\tWarn(keyvals ...interface{})\n\tCrit(keyvals ...interface{})\n\n\tWith(keyvals ...interface{}) Logger\n}\n\n\/\/ newLogger takes the name of the file and format of the logger as an argument, creates the file and returns\n\/\/ a leveled logger that logs to the file.\n\/\/ @format can have values logfmt or json. Default value is logfmt.\nfunc newLogger(file string, format string) Logger {\n\tfw, err := GetFile(file)\n\tif err != nil {\n\t\tlog.Fatal(\"error opening log file\",err)\n\t}\n\n\tvar l log.Logger\n\tif format == loggerFormatJSON{\n\t\tl = gklog.NewJSONLogger(fw)\n\t} else {\n\t\tl = gklog.NewLogfmtLogger(fw)\n\t}\n\n\tkitlevels := gklevels.New(\n\t\tl,\n\n\t\t\/\/ Fudge values so that switching between debug\/info levels does not\n\t\t\/\/ mess with the log justification\n\t\tgklevels.DebugValue(\"dbug\"),\n\t\tgklevels.ErrorValue(\"errr\"),\n\t)\n\n\tkitlevels = kitlevels.With(\"ts\", gklog.DefaultTimestampUTC)\n\n\treturn levels{kitlevels}\n}\n\n\/\/Return a Json Logger\nfunc NewJsonLogger(fle string) Logger {\n\treturn newLogger(fle,loggerFormatJSON)\n}\n\n\/\/Return a Fmt Logger\nfunc NewLogfmtLogger(fle string) Logger {\n\treturn newLogger(fle,loggerFormatLogFmt)\n}\n\/\/GetFile opens a file in read\/write to append data to it\nfunc GetFile(name string) (*os.File, error) {\n\treturn os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n}\n\ntype levels struct {\n\tkit gklevels.Levels\n}\n\nfunc (l levels) Debug(keyvals ...interface{}) {\n\tif err := l.kit.Debug().Log(keyvals...); err != nil {\n\t\tlog.Println(\"Error while logging(debug):\",err)\n\t}\n}\n\nfunc (l levels) Info(keyvals ...interface{}) {\n\tif err := l.kit.Info().Log(keyvals...); err != nil {\n\t\tlog.Println(\"Error while logging(info):\",err)\n\t}\n}\n\nfunc (l levels) Error(keyvals ...interface{}) {\n\tif err := l.kit.Error().Log(keyvals...); err != nil {\n\t\tlog.Println(\"Error while logging(error):\",err)\n\t}\n}\nfunc (l levels) Warn(keyvals ...interface{}) {\n\tif err := l.kit.Warn().Log(keyvals...); err != nil {\n\t\tlog.Println(\"Error while logging(warn):\",err)\n\t}\n}\nfunc (l levels) Crit(keyvals ...interface{}) {\n\tif err := l.kit.Crit().Log(keyvals...); err != nil {\n\t\tlog.Println(\"Error while logging(crit):\",err)\n\t}\n}\n\nfunc (l levels) With(keyvals ...interface{}) Logger {\n\treturn levels{l.kit.With(keyvals...)}\n}\n<|endoftext|>"} {"text":"<commit_before>package htmlfiller\n\nimport (\n\t\"html\"\n\t\"strings\"\n)\n\nfunc Fill(html_ string, vals map[string]string, errors ...map[string]string) string {\n\tfor name, val := range vals {\n\t\thtml_ = FillElement(html_, name, val)\n\t}\n\tif len(errors) > 0 {\n\t\tfor name, error := range errors[0] {\n\t\t\thtml_ = FillElement(html_, name + \"_error\", error)\n\t\t}\n\t}\n\treturn html_\n}\n\nfunc hasMatchingAttr(token html.Token, attrName, val string) bool {\n for _, attr := range token.Attr {\n if attr.Key == attrName {\n return attr.Val == val\n }\n }\n return false\n}\n\nfunc hasID(token html.Token, id string) bool {\n return hasMatchingAttr(token, \"id\", id)\n}\n\nfunc hasName(token html.Token, name string) bool {\n return hasMatchingAttr(token, \"name\", name)\n}\n\nfunc hasValue(token html.Token, val string) bool {\n return hasMatchingAttr(token, \"value\", val)\n}\n\nfunc hasType(token html.Token, type_ string) bool {\n return hasMatchingAttr(token, \"type\", type_)\n}\n\nfunc setAttr(token *html.Token, attrName, val string) {\n for i, attr := range token.Attr {\n if attr.Key == attrName {\n \/\/ the token (element)'s attr already exists,\n \/\/ so give it the value\n attr.Val = val\n \/\/ and add the modified attr back into the token\n token.Attr[i] = attr\n return\n }\n }\n \/\/ if we made it down here, the attribute does not exist \n \/\/ in the token, so we must create it and set the val\n token.Attr = append(token.Attr, html.Attribute{attrName, val})\n}\n\nfunc setValue(token *html.Token, val string) {\n setAttr(token, \"value\", val)\n}\n\nfunc setSelected(token *html.Token) {\n setAttr(token, \"selected\", \"selected\")\n}\n\nfunc setChecked(token *html.Token) {\n setAttr(token, \"checked\", \"checked\")\n}\n\nfunc removeAttr(token *html.Token, attrName string) {\n for i, attr := range token.Attr {\n if attr.Key == attrName {\n \/\/ remove the attribute by slicing it out of the list\n token.Attr = append(token.Attr[:i], token.Attr[i+1:]...)\n break\n }\n }\n}\n\nfunc removeSelected(token *html.Token) {\n removeAttr(token, \"selected\")\n}\n\nfunc removeChecked(token *html.Token) {\n removeAttr(token, \"checked\")\n}\n\nfunc FillElement(html_ string, name, val string) (newHtml string) {\n\treader := strings.NewReader(html_)\n\ttokenizer := html.NewTokenizer(reader)\n\tfillNextText := false\n\tinSelect := false\n\tfor {\n\t\ttokenizer.Next()\n\t\ttoken := tokenizer.Token()\n\t\telemName := token.Data\n if token.Type == html.ErrorToken {\n \/\/ finished parsing the html\n break\n }\n switch token.Type {\n case html.StartTagToken:\n switch elemName {\n case \"textarea\":\n if hasName(token, name) {\n\t\t\t\t \/\/ the next token that is a TextToken\n\t\t\t\t \/\/ should be filled with the value\n\t\t\t\t fillNextText = true\n }\n case \"select\":\n\t\t\t if hasName(token, name) {\n\t\t\t\t \/\/ we are in the select tag, so we must\n\t\t\t\t \/\/ search for the right <option> element now\n\t\t\t\t inSelect = true\n }\n case \"option\":\n\t\t\t if inSelect {\n\t\t\t\t removeSelected(&token)\n if hasValue(token, val) {\n\t\t\t\t\t \/\/ this option element we want to set\n\t\t\t\t\t \/\/ as the default, so make it selected and\n\t\t\t\t\t \/\/ end our search\n\t\t\t\t\t setSelected(&token)\n\t\t\t\t }\n }\n case \"input\":\n token = handleInputElement(token, name, val)\n case \"span\":\n if hasID(token, name) {\n \/\/ the next token that is a TextToken\n \/\/ should be filled with the value\n fillNextText = true\n }\n }\n case html.EndTagToken:\n switch elemName {\n case \"textarea\", \"span\":\n if fillNextText {\n\t\t\t\t \/\/ there was no text token, so manually\n\t\t\t\t \/\/ insert the value\n\t\t\t\t newHtml += val\n\t\t\t\t fillNextText = false\n }\n case \"select\":\n\t\t\t if inSelect {\n inSelect = false\n }\n }\n case html.SelfClosingTagToken:\n\t switch elemName {\n case \"input\":\n token = handleInputElement(token, name, val)\n }\n case html.TextToken:\n if fillNextText {\n \/\/ fill in the text token's contents with the val\n\t\t\t token.Data = val\n\t\t\t fillNextText = false\n\t\t }\n }\n\n\t\tnewHtml += token.String()\n\t}\n\n\treturn newHtml\n}\n\nfunc handleInputElement(token html.Token, name, val string) html.Token { \n if hasName(token, name) {\n if hasType(token, \"checkbox\") || hasType(token, \"radio\") {\n \/\/ checkboxes and radio buttons are special,\n \/\/ so we must add a checked attribute to one\n \/\/ that has the given val\n if hasValue(token, val) {\n setChecked(&token)\n } else {\n if hasType(token, \"radio\") {\n \/\/ since a radio group can only have\n \/\/ one selection,\n \/\/ remove the checked attribute\n \/\/ if it does not have the given val\n removeChecked(&token)\n }\n }\n } else {\n setValue(&token, val)\n }\n }\n return token\n}\n<commit_msg>update for latest weekly<commit_after>package htmlfiller\n\nimport (\n\t\"exp\/html\"\n\t\"strings\"\n)\n\nfunc Fill(html_ string, vals map[string]string, errors ...map[string]string) string {\n\tfor name, val := range vals {\n\t\thtml_ = FillElement(html_, name, val)\n\t}\n\tif len(errors) > 0 {\n\t\tfor name, error := range errors[0] {\n\t\t\thtml_ = FillElement(html_, name + \"_error\", error)\n\t\t}\n\t}\n\treturn html_\n}\n\nfunc hasMatchingAttr(token html.Token, attrName, val string) bool {\n for _, attr := range token.Attr {\n if attr.Key == attrName {\n return attr.Val == val\n }\n }\n return false\n}\n\nfunc hasID(token html.Token, id string) bool {\n return hasMatchingAttr(token, \"id\", id)\n}\n\nfunc hasName(token html.Token, name string) bool {\n return hasMatchingAttr(token, \"name\", name)\n}\n\nfunc hasValue(token html.Token, val string) bool {\n return hasMatchingAttr(token, \"value\", val)\n}\n\nfunc hasType(token html.Token, type_ string) bool {\n return hasMatchingAttr(token, \"type\", type_)\n}\n\nfunc setAttr(token *html.Token, attrName, val string) {\n for i, attr := range token.Attr {\n if attr.Key == attrName {\n \/\/ the token (element)'s attr already exists,\n \/\/ so give it the value\n attr.Val = val\n \/\/ and add the modified attr back into the token\n token.Attr[i] = attr\n return\n }\n }\n \/\/ if we made it down here, the attribute does not exist \n \/\/ in the token, so we must create it and set the val\n token.Attr = append(token.Attr, html.Attribute{\"\", attrName, val})\n}\n\nfunc setValue(token *html.Token, val string) {\n setAttr(token, \"value\", val)\n}\n\nfunc setSelected(token *html.Token) {\n setAttr(token, \"selected\", \"selected\")\n}\n\nfunc setChecked(token *html.Token) {\n setAttr(token, \"checked\", \"checked\")\n}\n\nfunc removeAttr(token *html.Token, attrName string) {\n for i, attr := range token.Attr {\n if attr.Key == attrName {\n \/\/ remove the attribute by slicing it out of the list\n token.Attr = append(token.Attr[:i], token.Attr[i+1:]...)\n break\n }\n }\n}\n\nfunc removeSelected(token *html.Token) {\n removeAttr(token, \"selected\")\n}\n\nfunc removeChecked(token *html.Token) {\n removeAttr(token, \"checked\")\n}\n\nfunc FillElement(html_ string, name, val string) (newHtml string) {\n\treader := strings.NewReader(html_)\n\ttokenizer := html.NewTokenizer(reader)\n\tfillNextText := false\n\tinSelect := false\n\tfor {\n\t\ttokenizer.Next()\n\t\ttoken := tokenizer.Token()\n\t\telemName := token.Data\n if token.Type == html.ErrorToken {\n \/\/ finished parsing the html\n break\n }\n switch token.Type {\n case html.StartTagToken:\n switch elemName {\n case \"textarea\":\n if hasName(token, name) {\n\t\t\t\t \/\/ the next token that is a TextToken\n\t\t\t\t \/\/ should be filled with the value\n\t\t\t\t fillNextText = true\n }\n case \"select\":\n\t\t\t if hasName(token, name) {\n\t\t\t\t \/\/ we are in the select tag, so we must\n\t\t\t\t \/\/ search for the right <option> element now\n\t\t\t\t inSelect = true\n }\n case \"option\":\n\t\t\t if inSelect {\n\t\t\t\t removeSelected(&token)\n if hasValue(token, val) {\n\t\t\t\t\t \/\/ this option element we want to set\n\t\t\t\t\t \/\/ as the default, so make it selected and\n\t\t\t\t\t \/\/ end our search\n\t\t\t\t\t setSelected(&token)\n\t\t\t\t }\n }\n case \"input\":\n token = handleInputElement(token, name, val)\n case \"span\":\n if hasID(token, name) {\n \/\/ the next token that is a TextToken\n \/\/ should be filled with the value\n fillNextText = true\n }\n }\n case html.EndTagToken:\n switch elemName {\n case \"textarea\", \"span\":\n if fillNextText {\n\t\t\t\t \/\/ there was no text token, so manually\n\t\t\t\t \/\/ insert the value\n\t\t\t\t newHtml += val\n\t\t\t\t fillNextText = false\n }\n case \"select\":\n\t\t\t if inSelect {\n inSelect = false\n }\n }\n case html.SelfClosingTagToken:\n\t switch elemName {\n case \"input\":\n token = handleInputElement(token, name, val)\n }\n case html.TextToken:\n if fillNextText {\n \/\/ fill in the text token's contents with the val\n\t\t\t token.Data = val\n\t\t\t fillNextText = false\n\t\t }\n }\n\n\t\tnewHtml += token.String()\n\t}\n\n\treturn newHtml\n}\n\nfunc handleInputElement(token html.Token, name, val string) html.Token { \n if hasName(token, name) {\n if hasType(token, \"checkbox\") || hasType(token, \"radio\") {\n \/\/ checkboxes and radio buttons are special,\n \/\/ so we must add a checked attribute to one\n \/\/ that has the given val\n if hasValue(token, val) {\n setChecked(&token)\n } else {\n if hasType(token, \"radio\") {\n \/\/ since a radio group can only have\n \/\/ one selection,\n \/\/ remove the checked attribute\n \/\/ if it does not have the given val\n removeChecked(&token)\n }\n }\n } else {\n setValue(&token, val)\n }\n }\n return token\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"log\"\n\t\"io\/ioutil\"\n\t\"time\"\n\t\"net\/http\"\n\t\"flapjack\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"encoding\/json\"\n)\n\nfunc handler(newState chan flapjack.Event, w http.ResponseWriter, r *http.Request) {\n\tvar state flapjack.Event\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tmessage := \"Error: Couldn't read request body: %s\\n\"\n\t\tlog.Printf(message, err)\n\t\tfmt.Fprintf(w, message, err)\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(body, &state)\n\tif err != nil {\n\t\tmessage := \"Error: Couldn't read request body: %s\\n\"\n\t\tlog.Println(message, err)\n\t\tfmt.Fprintf(w, message, err)\n\t\treturn\n\t}\n\n\tfmt.Printf(\"Caching event: %+v\\n\", state)\n\n\t\/\/ Populate a time if none has been set.\n\tif state.Time == 0 {\n\t\tstate.Time = time.Now().Unix()\n\t}\n\n\tnewState <- state\n\tfmt.Fprintf(w, \"Caching event\\n\")\n}\n\nfunc cacheState(newState chan flapjack.Event, state map[string]flapjack.Event) {\n\tfor ns := range newState {\n\t\tkey := ns.Entity + \":\" + ns.Check\n\t\tstate[key] = ns\n\t}\n}\n\nfunc submitCachedState(states map[string]flapjack.Event) {\n\taddress := \"localhost:6379\"\n\tdatabase := 13\n\n\tconn, err := redis.Dial(\"tcp\", address) \/\/ Connect to Redis\n\tif err != nil {\n\t\tfmt.Printf(\"Error: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tconn.Do(\"SELECT\", database) \/\/ Switch database\n\n\tinterval := 1 * time.Second\n\n\tfor {\n\t\tfmt.Printf(\"Number of cached states: %d\\n\", len(states))\n\n\t\tfor id, state := range (states) {\n\t\t\tfmt.Printf(\"%s : %+v\\n\", id, state)\n\n\t\t\tevent := &flapjack.Event{\n\t\t\t\tEntity: state.Entity,\n\t\t\t\tCheck: state.Check,\n\t\t\t\tType: \"service\",\n\t\t\t\tState: state.State,\n\t\t\t\tSummary: state.Summary,\n\t\t\t\tTime: state.Time,\n\t\t\t}\n\n\t\t\tevent.Submit(conn)\n\t\t}\n\n\t\ttime.Sleep(interval)\n\t}\n}\n\nfunc main() {\n\tnewState := make(chan flapjack.Event)\n\tstate := map[string]flapjack.Event{}\n\n\tgo cacheState(newState, state)\n\tgo submitCachedState(state)\n\n\thttp.HandleFunc(\"\/\", func (w http.ResponseWriter, r *http.Request) {\n\t\thandler(newState, w, r)\n\t})\n\thttp.ListenAndServe(\":8080\", nil)\n}\n<commit_msg>Move Redis handling into subpackage. Ensure current timestamp on events.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"io\/ioutil\"\n\t\"time\"\n\t\"net\/http\"\n\t\"flapjack\"\n\t\"encoding\/json\"\n)\n\nfunc handler(newState chan flapjack.Event, w http.ResponseWriter, r *http.Request) {\n\tvar state flapjack.Event\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tmessage := \"Error: Couldn't read request body: %s\\n\"\n\t\tlog.Printf(message, err)\n\t\tfmt.Fprintf(w, message, err)\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(body, &state)\n\tif err != nil {\n\t\tmessage := \"Error: Couldn't read request body: %s\\n\"\n\t\tlog.Println(message, err)\n\t\tfmt.Fprintf(w, message, err)\n\t\treturn\n\t}\n\n\tfmt.Printf(\"Caching event: %+v\\n\", state)\n\n\t\/\/ Populate a time if none has been set.\n\tif state.Time == 0 {\n\t\tstate.Time = time.Now().Unix()\n\t}\n\n\tnewState <- state\n\tfmt.Fprintf(w, \"Caching event\\n\")\n}\n\nfunc cacheState(newState chan flapjack.Event, state map[string]flapjack.Event) {\n\tfor ns := range newState {\n\t\tkey := ns.Entity + \":\" + ns.Check\n\t\tstate[key] = ns\n\t}\n}\n\nfunc submitCachedState(states map[string]flapjack.Event) {\n\ttransport, err := flapjack.Dial(\"localhost:6379\", 13)\n\tif err != nil {\n\t\tfmt.Printf(\"Error: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfor {\n\t\tfmt.Printf(\"Number of cached states: %d\\n\", len(states))\n\t\tfor id, state := range(states) {\n\t\t\tevent := flapjack.Event{\n\t\t\t\tEntity: state.Entity,\n\t\t\t\tCheck: state.Check,\n\t\t\t\tType: \"service\",\n\t\t\t\tState: state.State,\n\t\t\t\tSummary: state.Summary,\n\t\t\t\tTime: time.Now().Unix(),\n\t\t\t}\n\n\t\t\tfmt.Printf(\"%s : %+v\\n\", id, event)\n\t\t\ttransport.Send(event)\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n}\n\nfunc main() {\n\tnewState := make(chan flapjack.Event)\n\tstate := map[string]flapjack.Event{}\n\n\tgo cacheState(newState, state)\n\tgo submitCachedState(state)\n\n\thttp.HandleFunc(\"\/\", func (w http.ResponseWriter, r *http.Request) {\n\t\thandler(newState, w, r)\n\t})\n\thttp.ListenAndServe(\":8080\", nil)\n}\n<|endoftext|>"} {"text":"<commit_before>package httpclient\n\nimport (\n\t\"bufio\"\n\t\"container\/list\"\n\t\"errors\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ returns the current version\nfunc Version() string {\n\treturn \"0.2\"\n}\n\ntype connCache struct {\n\tdl *list.List\n\toutstanding int\n}\n\n\/\/ HttpClient wraps Go's built in HTTP client providing an API to:\n\/\/ * set connect timeout\n\/\/ * set read\/write timeout\n\/\/ * easy access to the connection object for a given request\n\/\/\n\/\/ TODO: https support\ntype HttpClient struct {\n\tsync.RWMutex\n\tclient *http.Client\n\tcachedConns map[string]*connCache\n\tconnMap map[*http.Request]net.Conn\n\tConnectTimeout time.Duration\n\tReadWriteTimeout time.Duration\n\tMaxRedirects int\n\tMaxConnsPerHost int\n}\n\nfunc New() *HttpClient {\n\tclient := &http.Client{}\n\th := &HttpClient{\n\t\tclient: client,\n\t\tcachedConns: make(map[string]*connCache),\n\t\tconnMap: make(map[*http.Request]net.Conn),\n\t\tConnectTimeout: 5 * time.Second,\n\t\tReadWriteTimeout: 5 * time.Second,\n\t\tMaxConnsPerHost: 5,\n\t}\n\n\tredirFunc := func(r *http.Request, v []*http.Request) error {\n\t\treturn h.redirectPolicy(r, v)\n\t}\n\n\ttransport := &http.Transport{}\n\ttransport.RegisterProtocol(\"hc_http\", h)\n\n\tclient.CheckRedirect = redirFunc\n\tclient.Transport = transport\n\n\treturn h\n}\n\nfunc (h *HttpClient) redirectPolicy(req *http.Request, via []*http.Request) error {\n\tif len(via) >= h.MaxRedirects {\n\t\treturn errors.New(\"stopped after 3 redirects\")\n\t}\n\treturn nil\n}\n\nfunc (h *HttpClient) RoundTrip(req *http.Request) (*http.Response, error) {\n\tvar c net.Conn\n\tvar err error\n\n\taddr := req.URL.Host\n\tif !hasPort(addr) {\n\t\taddr = addr + \":80\"\n\t}\n\n\tc, err = h.checkConnCache(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif c == nil {\n\t\tlog.Printf(\" dialing...\")\n\t\tc, err = net.DialTimeout(\"tcp\", addr, h.ConnectTimeout)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\th.Lock()\n\th.connMap[req] = c\n\th.Unlock()\n\n\treturn h.exec(c, req)\n}\n\nfunc (h *HttpClient) checkConnCache(addr string) (net.Conn, error) {\n\tvar c net.Conn\n\n\th.Lock()\n\tdefer h.Unlock()\n\n\tlog.Printf(\"checking cache for %s\", addr)\n\n\tcc, ok := h.cachedConns[addr]\n\tif ok {\n\t\tlog.Printf(\" found entry in cache\")\n\t\t\/\/ address is in map, check the connection list\n\t\te := cc.dl.Front()\n\t\tif e != nil {\n\t\t\tlog.Printf(\" found cached connection\")\n\t\t\tcc.dl.Remove(e)\n\t\t\tc = e.Value.(net.Conn)\n\t\t}\n\t} else {\n\t\tlog.Printf(\" creating new entry in cache\")\n\t\t\/\/ this client hasnt seen this address before\n\t\tcc = &connCache{\n\t\t\tdl: list.New(),\n\t\t}\n\t\th.cachedConns[addr] = cc\n\t}\n\n\t\/\/ TODO: implement accounting for outstanding connections\n\tif cc.outstanding > h.MaxConnsPerHost {\n\t\treturn nil, errors.New(\"too many outstanding conns on this addr\")\n\t}\n\n\treturn c, nil\n}\n\nfunc (h *HttpClient) cacheConn(addr string, conn net.Conn) error {\n\th.Lock()\n\tdefer h.Unlock()\n\n\tlog.Printf(\"returning connection to cache for %s\", addr)\n\tcc, ok := h.cachedConns[addr]\n\tif !ok {\n\t\treturn errors.New(\"addr %s not in cache map\")\n\t}\n\tcc.dl.PushBack(conn)\n\n\treturn nil\n}\n\nfunc (h *HttpClient) exec(conn net.Conn, req *http.Request) (*http.Response, error) {\n\tdeadline := time.Now().Add(h.ReadWriteTimeout)\n\tconn.SetDeadline(deadline)\n\n\tbw := bufio.NewWriter(conn)\n\tbr := bufio.NewReader(conn)\n\n\terr := req.Write(bw)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbw.Flush()\n\n\treturn http.ReadResponse(br, req)\n}\n\nfunc (h *HttpClient) GetConn(req *http.Request) (net.Conn, error) {\n\th.RLock()\n\tdefer h.RUnlock()\n\n\tconn, ok := h.connMap[req]\n\tif !ok {\n\t\treturn nil, errors.New(\"connection not in map\")\n\t}\n\n\treturn conn, nil\n}\n\nfunc (h *HttpClient) Do(req *http.Request) (*http.Response, error) {\n\t\/\/ h@x0r Go's http client to use our RoundTripper\n\treq.URL.Scheme = \"hc_http\"\n\n\tresp, err := h.client.Do(req)\n\tif err != nil {\n\t\tconn, _ := h.GetConn(req)\n\t\tif conn == nil {\n\t\t\tlog.Panicf(\"PANIC: could not find connection for failed request\")\n\t\t}\n\n\t\tconn.Close()\n\n\t\th.Lock()\n\t\tdelete(h.connMap, req)\n\t\th.Unlock()\n\t}\n\n\treturn resp, err\n}\n\nfunc (h *HttpClient) FinishRequest(req *http.Request) error {\n\tconn, err := h.GetConn(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\th.Lock()\n\tdelete(h.connMap, req)\n\th.Unlock()\n\n\treturn h.cacheConn(req.URL.Host, conn)\n}\n\n\/\/ Given a string of the form \"host\", \"host:port\", or \"[ipv6::address]:port\",\n\/\/ return true if the string includes a port.\nfunc hasPort(s string) bool { return strings.LastIndex(s, \":\") > strings.LastIndex(s, \"]\") }\n<commit_msg>canonicalize addr<commit_after>package httpclient\n\nimport (\n\t\"bufio\"\n\t\"container\/list\"\n\t\"errors\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ returns the current version\nfunc Version() string {\n\treturn \"0.3\"\n}\n\ntype connCache struct {\n\tdl *list.List\n\toutstanding int\n}\n\n\/\/ HttpClient wraps Go's built in HTTP client providing an API to:\n\/\/ * set connect timeout\n\/\/ * set read\/write timeout\n\/\/ * easy access to the connection object for a given request\n\/\/\n\/\/ TODO: https support\ntype HttpClient struct {\n\tsync.RWMutex\n\tclient *http.Client\n\tcachedConns map[string]*connCache\n\tconnMap map[*http.Request]net.Conn\n\tConnectTimeout time.Duration\n\tReadWriteTimeout time.Duration\n\tMaxRedirects int\n\tMaxConnsPerHost int\n}\n\nfunc New() *HttpClient {\n\tclient := &http.Client{}\n\th := &HttpClient{\n\t\tclient: client,\n\t\tcachedConns: make(map[string]*connCache),\n\t\tconnMap: make(map[*http.Request]net.Conn),\n\t\tConnectTimeout: 5 * time.Second,\n\t\tReadWriteTimeout: 5 * time.Second,\n\t\tMaxConnsPerHost: 5,\n\t}\n\n\tredirFunc := func(r *http.Request, v []*http.Request) error {\n\t\treturn h.redirectPolicy(r, v)\n\t}\n\n\ttransport := &http.Transport{}\n\ttransport.RegisterProtocol(\"hc_http\", h)\n\n\tclient.CheckRedirect = redirFunc\n\tclient.Transport = transport\n\n\treturn h\n}\n\nfunc (h *HttpClient) redirectPolicy(req *http.Request, via []*http.Request) error {\n\tif len(via) >= h.MaxRedirects {\n\t\treturn errors.New(\"stopped after 3 redirects\")\n\t}\n\treturn nil\n}\n\nfunc (h *HttpClient) RoundTrip(req *http.Request) (*http.Response, error) {\n\tvar c net.Conn\n\tvar err error\n\n\taddr := canonicalAddr(req.URL.Host)\n\tc, err = h.checkConnCache(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif c == nil {\n\t\tlog.Printf(\" dialing...\")\n\t\tc, err = net.DialTimeout(\"tcp\", addr, h.ConnectTimeout)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\th.Lock()\n\th.connMap[req] = c\n\th.Unlock()\n\n\treturn h.exec(c, req)\n}\n\nfunc (h *HttpClient) checkConnCache(addr string) (net.Conn, error) {\n\tvar c net.Conn\n\n\th.Lock()\n\tdefer h.Unlock()\n\n\tlog.Printf(\"checking cache for %s\", addr)\n\n\tcc, ok := h.cachedConns[addr]\n\tif ok {\n\t\tlog.Printf(\" found entry in cache\")\n\t\t\/\/ address is in map, check the connection list\n\t\te := cc.dl.Front()\n\t\tif e != nil {\n\t\t\tlog.Printf(\" found cached connection\")\n\t\t\tcc.dl.Remove(e)\n\t\t\tc = e.Value.(net.Conn)\n\t\t}\n\t} else {\n\t\tlog.Printf(\" creating new entry in cache\")\n\t\t\/\/ this client hasnt seen this address before\n\t\tcc = &connCache{\n\t\t\tdl: list.New(),\n\t\t}\n\t\th.cachedConns[addr] = cc\n\t}\n\n\t\/\/ TODO: implement accounting for outstanding connections\n\tif cc.outstanding > h.MaxConnsPerHost {\n\t\treturn nil, errors.New(\"too many outstanding conns on this addr\")\n\t}\n\n\treturn c, nil\n}\n\nfunc (h *HttpClient) cacheConn(addr string, conn net.Conn) error {\n\th.Lock()\n\tdefer h.Unlock()\n\n\tlog.Printf(\"returning connection to cache for %s\", addr)\n\tcc, ok := h.cachedConns[addr]\n\tif !ok {\n\t\treturn errors.New(\"addr %s not in cache map\")\n\t}\n\tcc.dl.PushBack(conn)\n\n\treturn nil\n}\n\nfunc (h *HttpClient) exec(conn net.Conn, req *http.Request) (*http.Response, error) {\n\tdeadline := time.Now().Add(h.ReadWriteTimeout)\n\tconn.SetDeadline(deadline)\n\n\tbw := bufio.NewWriter(conn)\n\tbr := bufio.NewReader(conn)\n\n\terr := req.Write(bw)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbw.Flush()\n\n\treturn http.ReadResponse(br, req)\n}\n\nfunc (h *HttpClient) GetConn(req *http.Request) (net.Conn, error) {\n\th.RLock()\n\tdefer h.RUnlock()\n\n\tconn, ok := h.connMap[req]\n\tif !ok {\n\t\treturn nil, errors.New(\"connection not in map\")\n\t}\n\n\treturn conn, nil\n}\n\nfunc (h *HttpClient) Do(req *http.Request) (*http.Response, error) {\n\t\/\/ h@x0r Go's http client to use our RoundTripper\n\treq.URL.Scheme = \"hc_http\"\n\n\tresp, err := h.client.Do(req)\n\tif err != nil {\n\t\tconn, _ := h.GetConn(req)\n\t\tif conn == nil {\n\t\t\tlog.Panicf(\"PANIC: could not find connection for failed request\")\n\t\t}\n\n\t\tconn.Close()\n\n\t\th.Lock()\n\t\tdelete(h.connMap, req)\n\t\th.Unlock()\n\t}\n\n\treturn resp, err\n}\n\nfunc (h *HttpClient) FinishRequest(req *http.Request) error {\n\tconn, err := h.GetConn(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\th.Lock()\n\tdelete(h.connMap, req)\n\th.Unlock()\n\n\treturn h.cacheConn(canonicalAddr(req.URL.Host), conn)\n}\n\nfunc canonicalAddr(s string) string {\n\tif !hasPort(s) {\n\t\ts = s + \":80\"\n\t}\n\treturn s\n}\n\n\/\/ Given a string of the form \"host\", \"host:port\", or \"[ipv6::address]:port\",\n\/\/ return true if the string includes a port.\nfunc hasPort(s string) bool { return strings.LastIndex(s, \":\") > strings.LastIndex(s, \"]\") }\n<|endoftext|>"} {"text":"<commit_before>package pipeline\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/colinrgodsey\/step-daemon\/gcode\"\n\t\"github.com\/colinrgodsey\/step-daemon\/io\"\n\t\"github.com\/colinrgodsey\/step-daemon\/physics\"\n\t\"github.com\/colinrgodsey\/step-daemon\/vec\"\n)\n\nconst syncTimeout = 10 * 60 \/\/ seconds\n\ntype deltaHandler struct {\n\thead, tail io.Conn\n\tsyncC chan vec.Vec4\n\n\tpos vec.Vec4\n\tfr, frScale float64\n\tabs bool\n}\n\nfunc (h *deltaHandler) headRead(msg io.Any) {\n\tswitch msg := msg.(type) {\n\tcase gcode.GCode:\n\t\tswitch {\n\t\tcase msg.IsG(0), msg.IsG(1): \/\/ move\n\t\t\th.procGMove(msg)\n\t\t\treturn\n\t\tcase msg.IsG(28): \/\/ home\n\t\t\tdefer h.headRead(gcode.New('M', 114)) \/\/ get pos after\n\t\tcase msg.IsG(29): \/\/ z probe\n\t\t\tdefer h.headRead(gcode.New('G', 28)) \/\/ home after\n\t\tcase msg.IsG(90): \/\/ set absolute\n\t\t\th.info(\"setting to absolute coords\")\n\t\t\th.abs = true\n\t\tcase msg.IsG(91): \/\/ set relative\n\t\t\th.info(\"setting to relative coords\")\n\t\t\th.abs = false\n\t\tcase msg.IsG(92): \/\/ set pos\n\t\t\th.pos = msg.Args.GetVec4(h.pos)\n\t\tcase msg.IsM(114): \/\/ get pos\n\t\t\th.syncC = make(chan vec.Vec4)\n\t\t\tdefer h.getPos()\n\t\tcase msg.IsM(220): \/\/ set feedrate\n\t\t\tif x, ok := msg.Args.GetFloat('S'); ok {\n\t\t\t\th.frScale = x \/ 100.0\n\t\t\t\th.info(\"setting feedrate scale to %v\", h.frScale)\n\t\t\t}\n\t\t}\n\t}\n\th.tail.Write(msg)\n}\n\nfunc (h *deltaHandler) tailRead(msg io.Any) {\n\tbailParse := func(err error) {\n\t\tif err != nil {\n\t\t\tpanic(\"Failed to parse string: \" + err.Error())\n\t\t}\n\t}\n\n\tswitch msg := msg.(type) {\n\tcase string:\n\t\tswitch {\n\t\tcase strings.Index(msg, \"X:\") == 0 && strings.Index(msg, \" Count \") > 0:\n\t\t\t\/\/ X:0.00 Y:0.00 Z:10.00 E:0.00 Count X:0 Y:0 Z:16000\n\t\t\tspl := strings.Split(msg, \" \")\n\t\t\tx, err := strconv.ParseFloat(string(spl[0][2:]), 64)\n\t\t\tbailParse(err)\n\t\t\ty, err := strconv.ParseFloat(string(spl[1][2:]), 64)\n\t\t\tbailParse(err)\n\t\t\tz, err := strconv.ParseFloat(string(spl[2][2:]), 64)\n\t\t\tbailParse(err)\n\t\t\te, err := strconv.ParseFloat(string(spl[3][2:]), 64)\n\t\t\tbailParse(err)\n\n\t\t\t\/\/TODO: still not happy about this pattern\n\t\t\tif h.syncC != nil {\n\t\t\t\th.info(\"syncd with device position\")\n\t\t\t\th.syncC <- vec.NewVec4(x, y, z, e)\n\t\t\t}\n\t\t}\n\t}\n\th.head.Write(msg)\n}\n\n\/\/TODO: i hate this, replace this later\nfunc (h *deltaHandler) getPos() {\n\th.info(\"syncing with device position\")\n\tselect {\n\tcase pos := <-h.syncC:\n\t\th.pos = pos\n\tcase <-time.After(syncTimeout * time.Second):\n\t\tpanic(\"timed out while syncing position\")\n\t}\n\th.syncC = nil\n\th.headRead(gcode.New('G', 92, gcode.ArgV(h.pos)...))\n}\n\nfunc (h *deltaHandler) procGMove(g gcode.GCode) {\n\tvar newPos vec.Vec4\n\tif h.abs {\n\t\tnewPos = g.Args.GetVec4(h.pos)\n\t} else {\n\t\tnewPos = g.Args.GetVec4(vec.Vec4{}).Add(h.pos)\n\t}\n\tif f, ok := g.Args.GetFloat('F'); ok {\n\t\th.fr = f * h.frScale \/ 60.0\n\t}\n\tif newPos.Eq(h.pos) {\n\t\treturn\n\t}\n\n\tm := physics.NewMove(h.pos, newPos, h.fr)\n\th.pos = newPos\n\tif h.fr != 0 {\n\t\th.tail.Write(m)\n\t} else {\n\t\th.info(\"skipped move with 0 feedrate\")\n\t}\n}\n\nfunc (h *deltaHandler) info(s string, args ...interface{}) {\n\th.head.Write(fmt.Sprintf(\"info:\"+s, args...))\n}\n\nfunc DeltaHandler(head, tail io.Conn) {\n\th := deltaHandler{\n\t\thead: head, tail: tail,\n\t\tfrScale: 1.0,\n\t}\n\n\tgo func() {\n\t\tfor msg := range tail.Rc() {\n\t\t\th.tailRead(msg)\n\t\t}\n\t}()\n\n\tfor msg := range head.Rc() {\n\t\th.headRead(msg)\n\t}\n}\n<commit_msg>sync pattern<commit_after>package pipeline\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/colinrgodsey\/step-daemon\/gcode\"\n\t\"github.com\/colinrgodsey\/step-daemon\/io\"\n\t\"github.com\/colinrgodsey\/step-daemon\/physics\"\n\t\"github.com\/colinrgodsey\/step-daemon\/vec\"\n)\n\nconst syncTimeout = 10 * 60 \/\/ seconds\n\ntype deltaHandler struct {\n\thead, tail io.Conn\n\tsyncC chan vec.Vec4\n\n\tpos vec.Vec4\n\tfr, frScale float64\n\tabs bool\n}\n\nfunc (h *deltaHandler) headRead(msg io.Any) {\n\tswitch msg := msg.(type) {\n\tcase gcode.GCode:\n\t\tswitch {\n\t\tcase msg.IsG(0), msg.IsG(1): \/\/ move\n\t\t\th.procGMove(msg)\n\t\t\treturn\n\t\tcase msg.IsG(28): \/\/ home\n\t\t\tdefer h.headRead(gcode.New('M', 114)) \/\/ get pos after\n\t\tcase msg.IsG(29): \/\/ z probe\n\t\t\tdefer h.headRead(gcode.New('G', 28)) \/\/ home after\n\t\tcase msg.IsG(90): \/\/ set absolute\n\t\t\th.info(\"setting to absolute coords\")\n\t\t\th.abs = true\n\t\tcase msg.IsG(91): \/\/ set relative\n\t\t\th.info(\"setting to relative coords\")\n\t\t\th.abs = false\n\t\tcase msg.IsG(92): \/\/ set pos\n\t\t\th.pos = msg.Args.GetVec4(h.pos)\n\t\tcase msg.IsM(114): \/\/ get pos\n\t\t\tc := make(chan vec.Vec4)\n\t\t\th.syncC = c\n\t\t\tdefer h.getPos(c)\n\t\tcase msg.IsM(220): \/\/ set feedrate\n\t\t\tif x, ok := msg.Args.GetFloat('S'); ok {\n\t\t\t\th.frScale = x \/ 100.0\n\t\t\t\th.info(\"setting feedrate scale to %v\", h.frScale)\n\t\t\t}\n\t\t}\n\t}\n\th.tail.Write(msg)\n}\n\nfunc (h *deltaHandler) tailRead(msg io.Any) {\n\tbailParse := func(err error) {\n\t\tif err != nil {\n\t\t\tpanic(\"Failed to parse string: \" + err.Error())\n\t\t}\n\t}\n\n\tswitch msg := msg.(type) {\n\tcase string:\n\t\tswitch {\n\t\tcase strings.Index(msg, \"X:\") == 0 && strings.Index(msg, \" Count \") > 0:\n\t\t\t\/\/ X:0.00 Y:0.00 Z:10.00 E:0.00 Count X:0 Y:0 Z:16000\n\t\t\tspl := strings.Split(msg, \" \")\n\t\t\tx, err := strconv.ParseFloat(string(spl[0][2:]), 64)\n\t\t\tbailParse(err)\n\t\t\ty, err := strconv.ParseFloat(string(spl[1][2:]), 64)\n\t\t\tbailParse(err)\n\t\t\tz, err := strconv.ParseFloat(string(spl[2][2:]), 64)\n\t\t\tbailParse(err)\n\t\t\te, err := strconv.ParseFloat(string(spl[3][2:]), 64)\n\t\t\tbailParse(err)\n\n\t\t\t\/\/TODO: still not happy about this pattern\n\t\t\tif h.syncC != nil {\n\t\t\t\th.info(\"syncd with device position\")\n\t\t\t\th.syncC <- vec.NewVec4(x, y, z, e)\n\t\t\t\th.syncC = nil\n\t\t\t}\n\t\t}\n\t}\n\th.head.Write(msg)\n}\n\n\/\/TODO: i hate this, replace this later\nfunc (h *deltaHandler) getPos(c <-chan vec.Vec4) {\n\th.info(\"syncing with device position\")\n\tselect {\n\tcase pos := <-c:\n\t\th.pos = pos\n\tcase <-time.After(syncTimeout * time.Second):\n\t\tpanic(\"timed out while syncing position\")\n\t}\n\th.syncC = nil\n\th.headRead(gcode.New('G', 92, gcode.ArgV(h.pos)...))\n}\n\nfunc (h *deltaHandler) procGMove(g gcode.GCode) {\n\tvar newPos vec.Vec4\n\tif h.abs {\n\t\tnewPos = g.Args.GetVec4(h.pos)\n\t} else {\n\t\tnewPos = g.Args.GetVec4(vec.Vec4{}).Add(h.pos)\n\t}\n\tif f, ok := g.Args.GetFloat('F'); ok {\n\t\th.fr = f * h.frScale \/ 60.0\n\t}\n\tif newPos.Eq(h.pos) {\n\t\treturn\n\t}\n\n\tm := physics.NewMove(h.pos, newPos, h.fr)\n\th.pos = newPos\n\tif h.fr != 0 {\n\t\th.tail.Write(m)\n\t} else {\n\t\th.info(\"skipped move with 0 feedrate\")\n\t}\n}\n\nfunc (h *deltaHandler) info(s string, args ...interface{}) {\n\th.head.Write(fmt.Sprintf(\"info:\"+s, args...))\n}\n\nfunc DeltaHandler(head, tail io.Conn) {\n\th := deltaHandler{\n\t\thead: head, tail: tail,\n\t\tfrScale: 1.0,\n\t}\n\n\tgo func() {\n\t\tfor msg := range tail.Rc() {\n\t\t\th.tailRead(msg)\n\t\t}\n\t}()\n\n\tfor msg := range head.Rc() {\n\t\th.headRead(msg)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package metainfo\n\nimport (\n\t\"io\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/anacrolix\/torrent\/bencode\"\n)\n\ntype MetaInfo struct {\n\tInfoBytes bencode.Bytes `bencode:\"info,omitempty\"` \/\/ BEP 3\n\tAnnounce string `bencode:\"announce,omitempty\"` \/\/ BEP 3\n\tAnnounceList AnnounceList `bencode:\"announce-list,omitempty\"` \/\/ BEP 12\n\tNodes []Node `bencode:\"nodes,omitempty\"` \/\/ BEP 5\n\t\/\/ Where's this specified? Mentioned at\n\t\/\/ https:\/\/wiki.theory.org\/index.php\/BitTorrentSpecification: (optional) the creation time of\n\t\/\/ the torrent, in standard UNIX epoch format (integer, seconds since 1-Jan-1970 00:00:00 UTC)\n\tCreationDate int64 `bencode:\"creation date,omitempty,ignore_unmarshal_type_error\"`\n\tComment string `bencode:\"comment,omitempty\"`\n\tCreatedBy string `bencode:\"created by,omitempty\"`\n\tEncoding string `bencode:\"encoding,omitempty\"`\n\tUrlList UrlList `bencode:\"url-list,omitempty\"` \/\/ BEP 19\n}\n\n\/\/ Load a MetaInfo from an io.Reader. Returns a non-nil error in case of\n\/\/ failure.\nfunc Load(r io.Reader) (*MetaInfo, error) {\n\tvar mi MetaInfo\n\td := bencode.NewDecoder(r)\n\terr := d.Decode(&mi)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &mi, nil\n}\n\n\/\/ Convenience function for loading a MetaInfo from a file.\nfunc LoadFromFile(filename string) (*MetaInfo, error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\treturn Load(f)\n}\n\nfunc (mi MetaInfo) UnmarshalInfo() (info Info, err error) {\n\terr = bencode.Unmarshal(mi.InfoBytes, &info)\n\treturn\n}\n\nfunc (mi MetaInfo) HashInfoBytes() (infoHash Hash) {\n\treturn HashBytes(mi.InfoBytes)\n}\n\n\/\/ Encode to bencoded form.\nfunc (mi MetaInfo) Write(w io.Writer) error {\n\treturn bencode.NewEncoder(w).Encode(mi)\n}\n\n\/\/ Set good default values in preparation for creating a new MetaInfo file.\nfunc (mi *MetaInfo) SetDefaults() {\n\tmi.Comment = \"\"\n\tmi.CreatedBy = \"github.com\/anacrolix\/torrent\"\n\tmi.CreationDate = time.Now().Unix()\n\t\/\/ mi.Info.PieceLength = 256 * 1024\n}\n\n\/\/ Creates a Magnet from a MetaInfo. Optional infohash and parsed info can be provided.\nfunc (mi *MetaInfo) Magnet(infoHash *Hash, info *Info) (m Magnet) {\n\tfor _, t := range mi.UpvertedAnnounceList().DistinctValues() {\n\t\tm.Trackers = append(m.Trackers, t)\n\t}\n\tif info != nil {\n\t\tm.DisplayName = info.Name\n\t}\n\tif infoHash != nil {\n\t\tm.InfoHash = *infoHash\n\t} else {\n\t\tm.InfoHash = mi.HashInfoBytes()\n\t}\n\tm.Params = make(url.Values)\n\tm.Params[\"ws\"] = mi.UrlList\n\treturn\n}\n\n\/\/ Returns the announce list converted from the old single announce field if\n\/\/ necessary.\nfunc (mi *MetaInfo) UpvertedAnnounceList() AnnounceList {\n\tif mi.AnnounceList.OverridesAnnounce(mi.Announce) {\n\t\treturn mi.AnnounceList\n\t}\n\tif mi.Announce != \"\" {\n\t\treturn [][]string{{mi.Announce}}\n\t}\n\treturn nil\n}\n<commit_msg>Fix DeepSource anti-pattern<commit_after>package metainfo\n\nimport (\n\t\"io\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/anacrolix\/torrent\/bencode\"\n)\n\ntype MetaInfo struct {\n\tInfoBytes bencode.Bytes `bencode:\"info,omitempty\"` \/\/ BEP 3\n\tAnnounce string `bencode:\"announce,omitempty\"` \/\/ BEP 3\n\tAnnounceList AnnounceList `bencode:\"announce-list,omitempty\"` \/\/ BEP 12\n\tNodes []Node `bencode:\"nodes,omitempty\"` \/\/ BEP 5\n\t\/\/ Where's this specified? Mentioned at\n\t\/\/ https:\/\/wiki.theory.org\/index.php\/BitTorrentSpecification: (optional) the creation time of\n\t\/\/ the torrent, in standard UNIX epoch format (integer, seconds since 1-Jan-1970 00:00:00 UTC)\n\tCreationDate int64 `bencode:\"creation date,omitempty,ignore_unmarshal_type_error\"`\n\tComment string `bencode:\"comment,omitempty\"`\n\tCreatedBy string `bencode:\"created by,omitempty\"`\n\tEncoding string `bencode:\"encoding,omitempty\"`\n\tUrlList UrlList `bencode:\"url-list,omitempty\"` \/\/ BEP 19\n}\n\n\/\/ Load a MetaInfo from an io.Reader. Returns a non-nil error in case of\n\/\/ failure.\nfunc Load(r io.Reader) (*MetaInfo, error) {\n\tvar mi MetaInfo\n\td := bencode.NewDecoder(r)\n\terr := d.Decode(&mi)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &mi, nil\n}\n\n\/\/ Convenience function for loading a MetaInfo from a file.\nfunc LoadFromFile(filename string) (*MetaInfo, error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\treturn Load(f)\n}\n\nfunc (mi MetaInfo) UnmarshalInfo() (info Info, err error) {\n\terr = bencode.Unmarshal(mi.InfoBytes, &info)\n\treturn\n}\n\nfunc (mi MetaInfo) HashInfoBytes() (infoHash Hash) {\n\treturn HashBytes(mi.InfoBytes)\n}\n\n\/\/ Encode to bencoded form.\nfunc (mi MetaInfo) Write(w io.Writer) error {\n\treturn bencode.NewEncoder(w).Encode(mi)\n}\n\n\/\/ Set good default values in preparation for creating a new MetaInfo file.\nfunc (mi *MetaInfo) SetDefaults() {\n\tmi.Comment = \"\"\n\tmi.CreatedBy = \"github.com\/anacrolix\/torrent\"\n\tmi.CreationDate = time.Now().Unix()\n\t\/\/ mi.Info.PieceLength = 256 * 1024\n}\n\n\/\/ Creates a Magnet from a MetaInfo. Optional infohash and parsed info can be provided.\nfunc (mi *MetaInfo) Magnet(infoHash *Hash, info *Info) (m Magnet) {\n\tm.Trackers = append(m.Trackers, mi.UpvertedAnnounceList().DistinctValues()...)\n\tif info != nil {\n\t\tm.DisplayName = info.Name\n\t}\n\tif infoHash != nil {\n\t\tm.InfoHash = *infoHash\n\t} else {\n\t\tm.InfoHash = mi.HashInfoBytes()\n\t}\n\tm.Params = make(url.Values)\n\tm.Params[\"ws\"] = mi.UrlList\n\treturn\n}\n\n\/\/ Returns the announce list converted from the old single announce field if\n\/\/ necessary.\nfunc (mi *MetaInfo) UpvertedAnnounceList() AnnounceList {\n\tif mi.AnnounceList.OverridesAnnounce(mi.Announce) {\n\t\treturn mi.AnnounceList\n\t}\n\tif mi.Announce != \"\" {\n\t\treturn [][]string{{mi.Announce}}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package sql\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestDryRunSelect(t *testing.T) {\n\ta := assert.New(t)\n\tr, e := newParser().Parse(`SELECT * FROM churn.churn LIMIT 10;`)\n\ta.NoError(e)\n\ta.Nil(dryRunSelect(r, testDB))\n}\n\nfunc TestDescribeTables(t *testing.T) {\n\ta := assert.New(t)\n\tr, e := newParser().Parse(`SELECT * FROM churn.churn LIMIT 10;`)\n\ta.NoError(e)\n\tfts, e := describeTables(r, testDB)\n\ta.NoError(e)\n\ta.Equal(21, len(fts))\n\n\tr, e = newParser().Parse(`SELECT Churn, churn.churn.Partner,TotalCharges FROM churn.churn LIMIT 10;`)\n\ta.NoError(e)\n\tfts, e = describeTables(r, testDB)\n\ta.NoError(e)\n\ta.Equal(3, len(fts))\n\ta.Contains([]string{\"VARCHAR(255)\", \"VARCHAR\"}, fts[\"Churn\"][\"churn.churn\"])\n\ta.Contains([]string{\"VARCHAR(255)\", \"VARCHAR\"}, fts[\"Partner\"][\"churn.churn\"])\n\ta.Equal(\"FLOAT\", fts[\"TotalCharges\"][\"churn.churn\"])\n}\n\nfunc TestIndexSelectFields(t *testing.T) {\n\ta := assert.New(t)\n\tr, e := newParser().Parse(`SELECT * FROM churn.churn LIMIT 10;`)\n\ta.NoError(e)\n\tf := indexSelectFields(r)\n\ta.Equal(0, len(f))\n\n\tr, e = newParser().Parse(`SELECT f FROM churn.churn LIMIT 10;`)\n\ta.NoError(e)\n\tf = indexSelectFields(r)\n\ta.Equal(1, len(f))\n\ta.Equal(map[string]string{}, f[\"f\"])\n\n\tr, e = newParser().Parse(`SELECT t1.f, t2.f, g FROM churn.churn LIMIT 10;`)\n\ta.NoError(e)\n\tf = indexSelectFields(r)\n\ta.Equal(2, len(f))\n\ta.Equal(map[string]string{}, f[\"g\"])\n\ta.Equal(\"\", f[\"f\"][\"t1\"])\n\ta.Equal(\"\", f[\"f\"][\"t2\"])\n}\n\nfunc TestVerify(t *testing.T) {\n\ta := assert.New(t)\n\tr, e := newParser().Parse(`SELECT Churn, churn.churn.Partner FROM churn.churn LIMIT 10;`)\n\ta.NoError(e)\n\tfts, e := verify(r, testDB)\n\ta.NoError(e)\n\ta.Equal(2, len(fts))\n\ttyp, ok := fts.get(\"Churn\")\n\ta.Equal(true, ok)\n\ta.Contains([]string{\"VARCHAR(255)\", \"VARCHAR\"}, typ)\n\n\ttyp, ok = fts.get(\"churn.churn.Partner\")\n\ta.Equal(true, ok)\n\ta.Contains([]string{\"VARCHAR(255)\", \"VARCHAR\"}, typ)\n\n\t_, ok = fts.get(\"churn.churn.gender\")\n\ta.Equal(false, ok)\n\n\t_, ok = fts.get(\"gender\")\n\ta.Equal(false, ok)\n}\n\nfunc TestVerifyColumnNameAndType(t *testing.T) {\n\ta := assert.New(t)\n\ttrainParse, e := newParser().Parse(`SELECT gender, tenure, TotalCharges\nFROM churn.churn LIMIT 10\nTRAIN DNNClassifier\nWITH\n n_classes = 3,\n hidden_units = [10, 20]\nCOLUMN gender, tenure, TotalCharges\nLABEL class\nINTO my_dnn_model;`)\n\ta.NoError(e)\n\n\tpredParse, e := newParser().Parse(`SELECT gender, tenure, TotalCharges\nFROM churn.churn LIMIT 10\nPREDICT iris.predict.class\nUSING my_dnn_model;`)\n\ta.NoError(e)\n\ta.NoError(verifyColumnNameAndType(trainParse, predParse, testDB))\n\n\tpredParse, e = newParser().Parse(`SELECT gender, tenure\nFROM churn.churn LIMIT 10\nPREDICT iris.predict.class\nUSING my_dnn_model;`)\n\ta.NoError(e)\n\ta.EqualError(verifyColumnNameAndType(trainParse, predParse, testDB),\n\t\t\"predFields doesn't contain column TotalCharges\")\n}\n\nfunc TestDescribeEmptyTables(t *testing.T) {\n\ta := assert.New(t)\n\tr, e := newParser().Parse(`SELECT * FROM iris.iris_empty LIMIT 10;`)\n\ta.NoError(e)\n\t_, e = describeTables(r, testDB)\n\ta.EqualError(e, \"table is Empty. table name: iris.iris_empty\")\n}\n<commit_msg>update all churn.churn<commit_after>package sql\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestDryRunSelect(t *testing.T) {\n\ta := assert.New(t)\n\tr, e := newParser().Parse(`SELECT * FROM churn.train LIMIT 10;`)\n\ta.NoError(e)\n\ta.Nil(dryRunSelect(r, testDB))\n}\n\nfunc TestDescribeTables(t *testing.T) {\n\ta := assert.New(t)\n\tr, e := newParser().Parse(`SELECT * FROM churn.train LIMIT 10;`)\n\ta.NoError(e)\n\tfts, e := describeTables(r, testDB)\n\ta.NoError(e)\n\ta.Equal(21, len(fts))\n\n\tr, e = newParser().Parse(`SELECT Churn, churn.train.Partner,TotalCharges FROM churn.train LIMIT 10;`)\n\ta.NoError(e)\n\tfts, e = describeTables(r, testDB)\n\ta.NoError(e)\n\ta.Equal(3, len(fts))\n\ta.Contains([]string{\"VARCHAR(255)\", \"VARCHAR\"}, fts[\"Churn\"][\"churn.train\"])\n\ta.Contains([]string{\"VARCHAR(255)\", \"VARCHAR\"}, fts[\"Partner\"][\"churn.train\"])\n\ta.Equal(\"FLOAT\", fts[\"TotalCharges\"][\"churn.train\"])\n}\n\nfunc TestIndexSelectFields(t *testing.T) {\n\ta := assert.New(t)\n\tr, e := newParser().Parse(`SELECT * FROM churn.train LIMIT 10;`)\n\ta.NoError(e)\n\tf := indexSelectFields(r)\n\ta.Equal(0, len(f))\n\n\tr, e = newParser().Parse(`SELECT f FROM churn.train LIMIT 10;`)\n\ta.NoError(e)\n\tf = indexSelectFields(r)\n\ta.Equal(1, len(f))\n\ta.Equal(map[string]string{}, f[\"f\"])\n\n\tr, e = newParser().Parse(`SELECT t1.f, t2.f, g FROM churn.train LIMIT 10;`)\n\ta.NoError(e)\n\tf = indexSelectFields(r)\n\ta.Equal(2, len(f))\n\ta.Equal(map[string]string{}, f[\"g\"])\n\ta.Equal(\"\", f[\"f\"][\"t1\"])\n\ta.Equal(\"\", f[\"f\"][\"t2\"])\n}\n\nfunc TestVerify(t *testing.T) {\n\ta := assert.New(t)\n\tr, e := newParser().Parse(`SELECT Churn, churn.train.Partner FROM churn.train LIMIT 10;`)\n\ta.NoError(e)\n\tfts, e := verify(r, testDB)\n\ta.NoError(e)\n\ta.Equal(2, len(fts))\n\ttyp, ok := fts.get(\"Churn\")\n\ta.Equal(true, ok)\n\ta.Contains([]string{\"VARCHAR(255)\", \"VARCHAR\"}, typ)\n\n\ttyp, ok = fts.get(\"churn.train.Partner\")\n\ta.Equal(true, ok)\n\ta.Contains([]string{\"VARCHAR(255)\", \"VARCHAR\"}, typ)\n\n\t_, ok = fts.get(\"churn.train.gender\")\n\ta.Equal(false, ok)\n\n\t_, ok = fts.get(\"gender\")\n\ta.Equal(false, ok)\n}\n\nfunc TestVerifyColumnNameAndType(t *testing.T) {\n\ta := assert.New(t)\n\ttrainParse, e := newParser().Parse(`SELECT gender, tenure, TotalCharges\nFROM churn.train LIMIT 10\nTRAIN DNNClassifier\nWITH\n n_classes = 3,\n hidden_units = [10, 20]\nCOLUMN gender, tenure, TotalCharges\nLABEL class\nINTO my_dnn_model;`)\n\ta.NoError(e)\n\n\tpredParse, e := newParser().Parse(`SELECT gender, tenure, TotalCharges\nFROM churn.train LIMIT 10\nPREDICT iris.predict.class\nUSING my_dnn_model;`)\n\ta.NoError(e)\n\ta.NoError(verifyColumnNameAndType(trainParse, predParse, testDB))\n\n\tpredParse, e = newParser().Parse(`SELECT gender, tenure\nFROM churn.train LIMIT 10\nPREDICT iris.predict.class\nUSING my_dnn_model;`)\n\ta.NoError(e)\n\ta.EqualError(verifyColumnNameAndType(trainParse, predParse, testDB),\n\t\t\"predFields doesn't contain column TotalCharges\")\n}\n\nfunc TestDescribeEmptyTables(t *testing.T) {\n\ta := assert.New(t)\n\tr, e := newParser().Parse(`SELECT * FROM iris.iris_empty LIMIT 10;`)\n\ta.NoError(e)\n\t_, e = describeTables(r, testDB)\n\ta.EqualError(e, \"table is Empty. table name: iris.iris_empty\")\n}\n<|endoftext|>"} {"text":"<commit_before>package backup\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/coreos\/etcd\/client\"\n\t\"github.com\/mhausenblas\/reshifter\/pkg\/types\"\n\t\"github.com\/mhausenblas\/reshifter\/pkg\/util\"\n)\n\nvar (\n\ttmpTestDir = \"test\/\"\n\tstoretests = []struct {\n\t\tpath string\n\t\tval string\n\t}{\n\t\t{\"\", \"\"},\n\t\t{\"non-valid-key\", \"\"},\n\t\t{\"\/\", \"root\"},\n\t\t{\"\/\" + tmpTestDir, \"some\"},\n\t\t{\"\/\" + tmpTestDir + \"\/first-level\", \"another\"},\n\t\t{\"\/\" + tmpTestDir + \"\/this:also\", \"escaped\"},\n\t}\n)\n\nfunc TestStore(t *testing.T) {\n\tfor _, tt := range storetests {\n\t\tp, err := store(\".\", tt.path, tt.val)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tc, _ := ioutil.ReadFile(p)\n\t\tgot := string(c)\n\t\tif tt.path == \"\/\" {\n\t\t\t_ = os.Remove(p)\n\t\t}\n\t\twant := tt.val\n\t\tif got != want {\n\t\t\tt.Errorf(\"backup.store(\\\".\\\", %q, %q) => %q, want %q\", tt.path, tt.val, got, want)\n\t\t}\n\t}\n\t\/\/ make sure to clean up remaining directories:\n\t_ = os.RemoveAll(tmpTestDir)\n}\n\nfunc TestBackupv2(t *testing.T) {\n\tport := \"4001\"\n\t\/\/ testing insecure etcd 2:\n\ttetcd := \"http:\/\/127.0.0.1:\" + port\n\t\/\/ backing up to remote https:\/\/play.minio.io:9000:\n\t_ = os.Setenv(\"ACCESS_KEY_ID\", \"Q3AM3UQ867SPQQA43P2F\")\n\t_ = os.Setenv(\"SECRET_ACCESS_KEY\", \"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG\")\n\tetcd2Backup(t, port, tetcd, types.LegacyKubernetesPrefix+\"\/namespaces\/kube-system\")\n\tetcd2Backup(t, port, tetcd, types.KubernetesPrefix+\"\/namespaces\/kube-system\")\n\tetcd2Backup(t, port, tetcd, types.OpenShiftPrefix+\"\/builds\")\n}\n\nfunc TestBackupv3(t *testing.T) {\n\tport := \"4001\"\n\t\/\/ testing insecure etcd3:\n\ttetcd := \"http:\/\/127.0.0.1:\" + port\n\t\/\/ backing up to remote https:\/\/play.minio.io:9000:\n\t_ = os.Setenv(\"ACCESS_KEY_ID\", \"Q3AM3UQ867SPQQA43P2F\")\n\t_ = os.Setenv(\"SECRET_ACCESS_KEY\", \"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG\")\n\tetcd3Backup(t, port, tetcd, types.LegacyKubernetesPrefix+\"\/namespaces\/kube-system\")\n\tetcd3Backup(t, port, tetcd, types.KubernetesPrefix+\"\/namespaces\/kube-system\")\n\tetcd3Backup(t, port, tetcd, types.OpenShiftPrefix+\"\/builds\")\n}\n\nfunc TestBackupv2inv3(t *testing.T) {\n\tdefer func() { _ = util.EtcdDown() }()\n\tport := \"4001\"\n\t\/\/ testing insecure etcd 3:\n\ttetcd := \"http:\/\/127.0.0.1:\" + port\n\t\/\/ backing up to remote https:\/\/play.minio.io:9000:\n\t_ = os.Setenv(\"ACCESS_KEY_ID\", \"Q3AM3UQ867SPQQA43P2F\")\n\t_ = os.Setenv(\"SECRET_ACCESS_KEY\", \"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG\")\n\n\t\/\/ adding key using v2 API in an etcd3\n\t_, err := util.LaunchEtcd3(tetcd, port)\n\tif err != nil {\n\t\tt.Errorf(\"%s\", err)\n\t\treturn\n\t}\n\tc2, err := util.NewClient2(tetcd, false)\n\tif err != nil {\n\t\tt.Errorf(\"Can't connect to local etcd3 at %s: %s\", tetcd, err)\n\t\treturn\n\t}\n\tkapi := client.NewKeysAPI(c2)\n\tkey := types.LegacyKubernetesPrefix + \"\/namespaces\/kube-system\"\n\tval := \"{\\\"kind\\\":\\\"Namespace\\\",\\\"apiVersion\\\":\\\"v1\\\"}\"\n\t_, err = kapi.Set(context.Background(), key, val, &client.SetOptions{Dir: false, PrevExist: client.PrevNoExist})\n\tif err != nil {\n\t\tt.Errorf(\"Can't create etcd entry %s=%s: %s\", key, val, err)\n\t\treturn\n\t}\n\tkey = types.LegacyKubernetesPrefix + \"\/ThirdPartyResourceData\/stable.example.com\/crontabs\/dohnto\/my-new-cron-object\"\n\tval = \"{\\\"kind\\\":\\\"ThirdPartyResource\\\",\\\"apiVersion\\\":\\\"v1\\\"}\"\n\t_, err = kapi.Set(context.Background(), key, val, &client.SetOptions{Dir: false, PrevExist: client.PrevNoExist})\n\tif err != nil {\n\t\tt.Errorf(\"Can't create etcd entry %s=%s: %s\", key, val, err)\n\t\treturn\n\t}\n\tbackupid, err := Backup(tetcd, types.DefaultWorkDir, \"play.minio.io:9000\", \"reshifter-test-cluster\")\n\tif err != nil {\n\t\tt.Errorf(\"Error during backup: %s\", err)\n\t\treturn\n\t}\n\topath, _ := filepath.Abs(filepath.Join(types.DefaultWorkDir, backupid))\n\t_, err = os.Stat(opath + \".zip\")\n\tif err != nil {\n\t\tt.Errorf(\"No archive found: %s\", err)\n\t}\n\t\/\/ make sure to clean up:\n\t_ = os.Remove(opath + \".zip\")\n}\n\nfunc TestBackupSecure(t *testing.T) {\n\tport := \"4001\"\n\t\/\/ testing secure etcd 2:\n\ttetcd := \"https:\/\/127.0.0.1:\" + port\n\t\/\/ backing up to remote https:\/\/play.minio.io:9000:\n\t_ = os.Setenv(\"ACCESS_KEY_ID\", \"Q3AM3UQ867SPQQA43P2F\")\n\t_ = os.Setenv(\"SECRET_ACCESS_KEY\", \"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG\")\n\tetcd2Backup(t, port, tetcd, types.LegacyKubernetesPrefix+\"\/namespaces\/kube-system\")\n}\n\nfunc etcd2Backup(t *testing.T, port, tetcd, key string) {\n\tdefer func() { _ = util.EtcdDown() }()\n\t_ = os.Setenv(\"RS_ETCD_CLIENT_CERT\", filepath.Join(util.Certsdir(), \"client.pem\"))\n\t_ = os.Setenv(\"RS_ETCD_CLIENT_KEY\", filepath.Join(util.Certsdir(), \"client-key.pem\"))\n\t_ = os.Setenv(\"RS_ETCD_CA_CERT\", filepath.Join(util.Certsdir(), \"ca.pem\"))\n\tsecure, err := util.LaunchEtcd2(tetcd, port)\n\tif err != nil {\n\t\tt.Errorf(\"%s\", err)\n\t\treturn\n\t}\n\tc2, err := util.NewClient2(tetcd, secure)\n\tif err != nil {\n\t\tt.Errorf(\"Can't connect to local etcd2 at %s: %s\", tetcd, err)\n\t\treturn\n\t}\n\tkapi := client.NewKeysAPI(c2)\n\tval := \"{\\\"kind\\\":\\\"Namespace\\\",\\\"apiVersion\\\":\\\"v1\\\"}\"\n\t_, err = kapi.Set(context.Background(), key, val, &client.SetOptions{Dir: false, PrevExist: client.PrevNoExist})\n\tif err != nil {\n\t\tt.Errorf(\"Can't create etcd entry %s=%s: %s\", key, val, err)\n\t\treturn\n\t}\n\tif strings.HasPrefix(key, types.OpenShiftPrefix) { \/\/ make sure to add vanilla key as well\n\t\t_, err = kapi.Set(context.Background(), types.KubernetesPrefix+\"\/namespaces\/kube-system\", val, &client.SetOptions{Dir: false, PrevExist: client.PrevNoExist})\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Can't create etcd entry %s=%s: %s\", key, val, err)\n\t\t\treturn\n\t\t}\n\t}\n\tbackupid, err := Backup(tetcd, types.DefaultWorkDir, \"play.minio.io:9000\", \"reshifter-test-cluster\")\n\tif err != nil {\n\t\tt.Errorf(\"Error during backup: %s\", err)\n\t\treturn\n\t}\n\topath, _ := filepath.Abs(filepath.Join(types.DefaultWorkDir, backupid))\n\t_, err = os.Stat(opath + \".zip\")\n\tif err != nil {\n\t\tt.Errorf(\"No archive found: %s\", err)\n\t}\n\t\/\/ make sure to clean up:\n\t_ = os.Remove(opath + \".zip\")\n}\n\nfunc etcd3Backup(t *testing.T, port, tetcd, key string) {\n\tdefer func() { _ = util.EtcdDown() }()\n\t_ = os.Setenv(\"ETCDCTL_API\", \"3\")\n\t_ = os.Setenv(\"RS_ETCD_CLIENT_CERT\", filepath.Join(util.Certsdir(), \"client.pem\"))\n\t_ = os.Setenv(\"RS_ETCD_CLIENT_KEY\", filepath.Join(util.Certsdir(), \"client-key.pem\"))\n\t_ = os.Setenv(\"RS_ETCD_CA_CERT\", filepath.Join(util.Certsdir(), \"ca.pem\"))\n\tsecure, err := util.LaunchEtcd3(tetcd, port)\n\tif err != nil {\n\t\tt.Errorf(\"%s\", err)\n\t\treturn\n\t}\n\tc3, err := util.NewClient3(tetcd, secure)\n\tif err != nil {\n\t\tt.Errorf(\"Can't connect to local etcd3 at %s: %s\", tetcd, err)\n\t\treturn\n\t}\n\tval := \"{\\\"kind\\\":\\\"Namespace\\\",\\\"apiVersion\\\":\\\"v1\\\"}\"\n\tif err != nil {\n\t\tt.Errorf(\"%s\", err)\n\t\treturn\n\t}\n\t_, err = c3.Put(context.Background(), key, val)\n\tif err != nil {\n\t\tt.Errorf(\"Can't create etcd entry %s=%s: %s\", key, val, err)\n\t\treturn\n\t}\n\tif strings.HasPrefix(key, types.OpenShiftPrefix) { \/\/ make sure to add vanilla key as well\n\t\t_, err = c3.Put(context.Background(), types.KubernetesPrefix+\"\/namespaces\/kube-system\", val)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Can't create etcd entry %s=%s: %s\", key, val, err)\n\t\t\treturn\n\t\t}\n\t}\n\tbackupid, err := Backup(tetcd, types.DefaultWorkDir, \"play.minio.io:9000\", \"reshifter-test-cluster\")\n\tif err != nil {\n\t\tt.Errorf(\"Error during backup: %s\", err)\n\t\treturn\n\t}\n\topath, _ := filepath.Abs(filepath.Join(types.DefaultWorkDir, backupid))\n\t_, err = os.Stat(opath + \".zip\")\n\tif err != nil {\n\t\tt.Errorf(\"No archive found: %s\", err)\n\t}\n\t\/\/ make sure to clean up:\n\t_ = os.Remove(opath + \".zip\")\n}\n\nfunc genentry(etcdversion string, distro types.KubernetesDistro) (string, string, error) {\n\tswitch distro {\n\tcase types.Vanilla:\n\t\tif etcdversion == \"2\" {\n\t\t\treturn types.LegacyKubernetesPrefix + \"\/namespaces\/kube-system\", \"{\\\"kind\\\":\\\"Namespace\\\",\\\"apiVersion\\\":\\\"v1\\\"}\", nil\n\t\t}\n\t\treturn types.KubernetesPrefix + \"\/namespaces\/kube-system\", \"{\\\"kind\\\":\\\"Namespace\\\",\\\"apiVersion\\\":\\\"v1\\\"}\", nil\n\tcase types.OpenShift:\n\t\treturn types.OpenShiftPrefix + \"\/builds\", \"{\\\"kind\\\":\\\"Build\\\",\\\"apiVersion\\\":\\\"v1\\\"}\", nil\n\tdefault:\n\t\treturn \"\", \"\", fmt.Errorf(\"That's not a Kubernetes distro\")\n\t}\n}\n<commit_msg>adds unit tests for filter backup strategy<commit_after>package backup\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/coreos\/etcd\/client\"\n\t\"github.com\/mhausenblas\/reshifter\/pkg\/types\"\n\t\"github.com\/mhausenblas\/reshifter\/pkg\/util\"\n)\n\nvar (\n\ttmpTestDir = \"test\/\"\n\tstoretests = []struct {\n\t\tpath string\n\t\tval string\n\t}{\n\t\t{\"\", \"\"},\n\t\t{\"non-valid-key\", \"\"},\n\t\t{\"\/\", \"root\"},\n\t\t{\"\/\" + tmpTestDir, \"some\"},\n\t\t{\"\/\" + tmpTestDir + \"\/first-level\", \"another\"},\n\t\t{\"\/\" + tmpTestDir + \"\/this:also\", \"escaped\"},\n\t}\n\tfiltertests = []struct {\n\t\tfilter string\n\t\tpath string\n\t\texists bool\n\t}{\n\t\t{\"filter:abc\", \"\/abc\", true},\n\t\t{\"filter:abc\", \"\/def\", false},\n\t\t{\"filter:name\", \"\/some\/name\", true},\n\t\t{\"filter:abc,def\", \"\/some\/other\/def\", true},\n\t}\n)\n\nfunc TestStore(t *testing.T) {\n\tfor _, tt := range storetests {\n\t\tp, err := store(\".\", tt.path, tt.val)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tc, _ := ioutil.ReadFile(p)\n\t\tgot := string(c)\n\t\tif tt.path == \"\/\" {\n\t\t\t_ = os.Remove(p)\n\t\t}\n\t\twant := tt.val\n\t\tif got != want {\n\t\t\tt.Errorf(\"backup.store(\\\".\\\", %q, %q) => %q, want %q\", tt.path, tt.val, got, want)\n\t\t}\n\t}\n\t\/\/ make sure to clean up remaining directories:\n\t_ = os.RemoveAll(tmpTestDir)\n}\n\nfunc TestBackupv2(t *testing.T) {\n\tport := \"4001\"\n\t\/\/ testing insecure etcd 2:\n\ttetcd := \"http:\/\/127.0.0.1:\" + port\n\t\/\/ backing up to remote https:\/\/play.minio.io:9000:\n\t_ = os.Setenv(\"ACCESS_KEY_ID\", \"Q3AM3UQ867SPQQA43P2F\")\n\t_ = os.Setenv(\"SECRET_ACCESS_KEY\", \"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG\")\n\tetcd2Backup(t, port, tetcd, types.LegacyKubernetesPrefix+\"\/namespaces\/kube-system\")\n\tetcd2Backup(t, port, tetcd, types.KubernetesPrefix+\"\/namespaces\/kube-system\")\n\tetcd2Backup(t, port, tetcd, types.OpenShiftPrefix+\"\/builds\")\n}\n\nfunc TestBackupv3(t *testing.T) {\n\tport := \"4001\"\n\t\/\/ testing insecure etcd3:\n\ttetcd := \"http:\/\/127.0.0.1:\" + port\n\t\/\/ backing up to remote https:\/\/play.minio.io:9000:\n\t_ = os.Setenv(\"ACCESS_KEY_ID\", \"Q3AM3UQ867SPQQA43P2F\")\n\t_ = os.Setenv(\"SECRET_ACCESS_KEY\", \"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG\")\n\tetcd3Backup(t, port, tetcd, types.LegacyKubernetesPrefix+\"\/namespaces\/kube-system\")\n\tetcd3Backup(t, port, tetcd, types.KubernetesPrefix+\"\/namespaces\/kube-system\")\n\tetcd3Backup(t, port, tetcd, types.OpenShiftPrefix+\"\/builds\")\n}\n\nfunc TestBackupv2inv3(t *testing.T) {\n\tdefer func() { _ = util.EtcdDown() }()\n\tport := \"4001\"\n\t\/\/ testing insecure etcd 3:\n\ttetcd := \"http:\/\/127.0.0.1:\" + port\n\t\/\/ backing up to remote https:\/\/play.minio.io:9000:\n\t_ = os.Setenv(\"ACCESS_KEY_ID\", \"Q3AM3UQ867SPQQA43P2F\")\n\t_ = os.Setenv(\"SECRET_ACCESS_KEY\", \"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG\")\n\n\t\/\/ adding key using v2 API in an etcd3\n\t_, err := util.LaunchEtcd3(tetcd, port)\n\tif err != nil {\n\t\tt.Errorf(\"%s\", err)\n\t\treturn\n\t}\n\tc2, err := util.NewClient2(tetcd, false)\n\tif err != nil {\n\t\tt.Errorf(\"Can't connect to local etcd3 at %s: %s\", tetcd, err)\n\t\treturn\n\t}\n\tkapi := client.NewKeysAPI(c2)\n\tkey := types.LegacyKubernetesPrefix + \"\/namespaces\/kube-system\"\n\tval := \"{\\\"kind\\\":\\\"Namespace\\\",\\\"apiVersion\\\":\\\"v1\\\"}\"\n\t_, err = kapi.Set(context.Background(), key, val, &client.SetOptions{Dir: false, PrevExist: client.PrevNoExist})\n\tif err != nil {\n\t\tt.Errorf(\"Can't create etcd entry %s=%s: %s\", key, val, err)\n\t\treturn\n\t}\n\tkey = types.LegacyKubernetesPrefix + \"\/ThirdPartyResourceData\/stable.example.com\/crontabs\/dohnto\/my-new-cron-object\"\n\tval = \"{\\\"kind\\\":\\\"ThirdPartyResource\\\",\\\"apiVersion\\\":\\\"v1\\\"}\"\n\t_, err = kapi.Set(context.Background(), key, val, &client.SetOptions{Dir: false, PrevExist: client.PrevNoExist})\n\tif err != nil {\n\t\tt.Errorf(\"Can't create etcd entry %s=%s: %s\", key, val, err)\n\t\treturn\n\t}\n\tbackupid, err := Backup(tetcd, types.DefaultWorkDir, \"play.minio.io:9000\", \"reshifter-test-cluster\")\n\tif err != nil {\n\t\tt.Errorf(\"Error during backup: %s\", err)\n\t\treturn\n\t}\n\topath, _ := filepath.Abs(filepath.Join(types.DefaultWorkDir, backupid))\n\t_, err = os.Stat(opath + \".zip\")\n\tif err != nil {\n\t\tt.Errorf(\"No archive found: %s\", err)\n\t}\n\t\/\/ make sure to clean up:\n\t_ = os.Remove(opath + \".zip\")\n}\n\nfunc TestBackupSecure(t *testing.T) {\n\tport := \"4001\"\n\t\/\/ testing secure etcd 2:\n\ttetcd := \"https:\/\/127.0.0.1:\" + port\n\t\/\/ backing up to remote https:\/\/play.minio.io:9000:\n\t_ = os.Setenv(\"ACCESS_KEY_ID\", \"Q3AM3UQ867SPQQA43P2F\")\n\t_ = os.Setenv(\"SECRET_ACCESS_KEY\", \"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG\")\n\tetcd2Backup(t, port, tetcd, types.LegacyKubernetesPrefix+\"\/namespaces\/kube-system\")\n}\n\nfunc TestFilters(t *testing.T) {\n\tfor _, ft := range filtertests {\n\t\t_ = os.Setenv(\"RS_BACKUP_STRATEGY\", ft.filter)\n\t\t_ = filter(ft.path, \".\", \"\/tmp\")\n\t\tgot := true\n\t\tif _, err := os.Stat(\"\/tmp\" + ft.path); os.IsNotExist(err) {\n\t\t\tgot = false\n\t\t}\n\t\twant := ft.exists\n\t\tif got != want {\n\t\t\tt.Errorf(\"backup.filter(\\\"%s\\\", \\\".\\\", \\\"\/tmp\\\") with %s => %t, want %t\", ft.path, ft.filter, got, want)\n\t\t}\n\t}\n\n}\n\nfunc etcd2Backup(t *testing.T, port, tetcd, key string) {\n\tdefer func() { _ = util.EtcdDown() }()\n\t_ = os.Setenv(\"RS_ETCD_CLIENT_CERT\", filepath.Join(util.Certsdir(), \"client.pem\"))\n\t_ = os.Setenv(\"RS_ETCD_CLIENT_KEY\", filepath.Join(util.Certsdir(), \"client-key.pem\"))\n\t_ = os.Setenv(\"RS_ETCD_CA_CERT\", filepath.Join(util.Certsdir(), \"ca.pem\"))\n\tsecure, err := util.LaunchEtcd2(tetcd, port)\n\tif err != nil {\n\t\tt.Errorf(\"%s\", err)\n\t\treturn\n\t}\n\tc2, err := util.NewClient2(tetcd, secure)\n\tif err != nil {\n\t\tt.Errorf(\"Can't connect to local etcd2 at %s: %s\", tetcd, err)\n\t\treturn\n\t}\n\tkapi := client.NewKeysAPI(c2)\n\tval := \"{\\\"kind\\\":\\\"Namespace\\\",\\\"apiVersion\\\":\\\"v1\\\"}\"\n\t_, err = kapi.Set(context.Background(), key, val, &client.SetOptions{Dir: false, PrevExist: client.PrevNoExist})\n\tif err != nil {\n\t\tt.Errorf(\"Can't create etcd entry %s=%s: %s\", key, val, err)\n\t\treturn\n\t}\n\tif strings.HasPrefix(key, types.OpenShiftPrefix) { \/\/ make sure to add vanilla key as well\n\t\t_, err = kapi.Set(context.Background(), types.KubernetesPrefix+\"\/namespaces\/kube-system\", val, &client.SetOptions{Dir: false, PrevExist: client.PrevNoExist})\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Can't create etcd entry %s=%s: %s\", key, val, err)\n\t\t\treturn\n\t\t}\n\t}\n\tbackupid, err := Backup(tetcd, types.DefaultWorkDir, \"play.minio.io:9000\", \"reshifter-test-cluster\")\n\tif err != nil {\n\t\tt.Errorf(\"Error during backup: %s\", err)\n\t\treturn\n\t}\n\topath, _ := filepath.Abs(filepath.Join(types.DefaultWorkDir, backupid))\n\t_, err = os.Stat(opath + \".zip\")\n\tif err != nil {\n\t\tt.Errorf(\"No archive found: %s\", err)\n\t}\n\t\/\/ make sure to clean up:\n\t_ = os.Remove(opath + \".zip\")\n}\n\nfunc etcd3Backup(t *testing.T, port, tetcd, key string) {\n\tdefer func() { _ = util.EtcdDown() }()\n\t_ = os.Setenv(\"ETCDCTL_API\", \"3\")\n\t_ = os.Setenv(\"RS_ETCD_CLIENT_CERT\", filepath.Join(util.Certsdir(), \"client.pem\"))\n\t_ = os.Setenv(\"RS_ETCD_CLIENT_KEY\", filepath.Join(util.Certsdir(), \"client-key.pem\"))\n\t_ = os.Setenv(\"RS_ETCD_CA_CERT\", filepath.Join(util.Certsdir(), \"ca.pem\"))\n\tsecure, err := util.LaunchEtcd3(tetcd, port)\n\tif err != nil {\n\t\tt.Errorf(\"%s\", err)\n\t\treturn\n\t}\n\tc3, err := util.NewClient3(tetcd, secure)\n\tif err != nil {\n\t\tt.Errorf(\"Can't connect to local etcd3 at %s: %s\", tetcd, err)\n\t\treturn\n\t}\n\tval := \"{\\\"kind\\\":\\\"Namespace\\\",\\\"apiVersion\\\":\\\"v1\\\"}\"\n\tif err != nil {\n\t\tt.Errorf(\"%s\", err)\n\t\treturn\n\t}\n\t_, err = c3.Put(context.Background(), key, val)\n\tif err != nil {\n\t\tt.Errorf(\"Can't create etcd entry %s=%s: %s\", key, val, err)\n\t\treturn\n\t}\n\tif strings.HasPrefix(key, types.OpenShiftPrefix) { \/\/ make sure to add vanilla key as well\n\t\t_, err = c3.Put(context.Background(), types.KubernetesPrefix+\"\/namespaces\/kube-system\", val)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Can't create etcd entry %s=%s: %s\", key, val, err)\n\t\t\treturn\n\t\t}\n\t}\n\tbackupid, err := Backup(tetcd, types.DefaultWorkDir, \"play.minio.io:9000\", \"reshifter-test-cluster\")\n\tif err != nil {\n\t\tt.Errorf(\"Error during backup: %s\", err)\n\t\treturn\n\t}\n\topath, _ := filepath.Abs(filepath.Join(types.DefaultWorkDir, backupid))\n\t_, err = os.Stat(opath + \".zip\")\n\tif err != nil {\n\t\tt.Errorf(\"No archive found: %s\", err)\n\t}\n\t\/\/ make sure to clean up:\n\t_ = os.Remove(opath + \".zip\")\n}\n\nfunc genentry(etcdversion string, distro types.KubernetesDistro) (string, string, error) {\n\tswitch distro {\n\tcase types.Vanilla:\n\t\tif etcdversion == \"2\" {\n\t\t\treturn types.LegacyKubernetesPrefix + \"\/namespaces\/kube-system\", \"{\\\"kind\\\":\\\"Namespace\\\",\\\"apiVersion\\\":\\\"v1\\\"}\", nil\n\t\t}\n\t\treturn types.KubernetesPrefix + \"\/namespaces\/kube-system\", \"{\\\"kind\\\":\\\"Namespace\\\",\\\"apiVersion\\\":\\\"v1\\\"}\", nil\n\tcase types.OpenShift:\n\t\treturn types.OpenShiftPrefix + \"\/builds\", \"{\\\"kind\\\":\\\"Build\\\",\\\"apiVersion\\\":\\\"v1\\\"}\", nil\n\tdefault:\n\t\treturn \"\", \"\", fmt.Errorf(\"That's not a Kubernetes distro\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package zk\n\nimport (\n\tlog \"github.com\/funkygao\/log4go\"\n\t\"github.com\/funkygao\/zkclient\"\n)\n\nvar (\n\t_ zkclient.ZkDataListener = &leaderElector{}\n)\n\ntype leaderElector struct {\n\tctx *controller\n\n\tleaderID string \/\/ participant id of the leader\n\n\tonResigningAsLeader func()\n\tonBecomingLeader func()\n}\n\nfunc newLeaderElector(ctx *controller, onBecomingLeader func(), onResigningAsLeader func()) *leaderElector {\n\treturn &leaderElector{\n\t\tctx: ctx,\n\t\tonBecomingLeader: onBecomingLeader,\n\t\tonResigningAsLeader: onResigningAsLeader,\n\t}\n}\n\nfunc (l *leaderElector) startup() {\n\t\/\/ watch for leader changes\n\t\/\/ leader also need watch leader changes because of network partition\n\tl.ctx.zc.SubscribeDataChanges(l.ctx.kb.leader(), l)\n\tl.elect()\n}\n\nfunc (l *leaderElector) fetchLeaderID() string {\n\tb, err := l.ctx.zc.Get(l.ctx.kb.leader())\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn string(b)\n}\n\nfunc (l *leaderElector) elect() (win bool) {\n\t\/\/ we can get here during the initial startup and the HandleDataDeleted callback.\n\t\/\/ because of the potential race condition, it's possible that the leader has already\n\t\/\/ been elected when we get here.\n\tl.leaderID = l.fetchLeaderID()\n\tif l.leaderID != \"\" {\n\t\tlog.Trace(\"[%s] found leader: %s, quit elect\", l.ctx.participant, l.leaderID)\n\t\treturn\n\t}\n\n\tlog.Trace(\"[%s] elect...\", l.ctx.participant)\n\n\tif err := l.ctx.zc.CreateLiveNode(l.ctx.kb.leader(), l.ctx.participant.Marshal(), 2); err == nil {\n\t\tlog.Trace(\"[%s] elect win!\", l.ctx.participant)\n\n\t\twin = true\n\t\tl.leaderID = l.ctx.participant.Endpoint\n\t\tl.onBecomingLeader()\n\t} else {\n\t\tl.leaderID = l.fetchLeaderID() \/\/ refresh leader id\n\t\tif l.leaderID == \"\" {\n\t\t\tlog.Warn(\"[%s] a leader has been elected but just resigned, this will lead to another round of election\", l.ctx.participant)\n\t\t} else {\n\t\t\tlog.Trace(\"[%s] elect lose to %s :-)\", l.ctx.participant, l.leaderID)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (l *leaderElector) close() {\n\t\/\/ needn't delete \/controller znode because when\n\t\/\/ zkclient closes the ephemeral znode will disappear automatically\n\tl.leaderID = \"\"\n\tl.ctx.zc.UnsubscribeDataChanges(l.ctx.kb.leader(), l)\n}\n\nfunc (l *leaderElector) amLeader() bool {\n\treturn l.leaderID == l.ctx.participant.Endpoint\n}\n\nfunc (l *leaderElector) HandleDataChange(dataPath string, lastData []byte) error {\n\tl.leaderID = l.fetchLeaderID()\n\tlog.Trace(\"[%s] new leader is %s\", l.ctx.participant, l.leaderID)\n\treturn nil\n}\n\nfunc (l *leaderElector) HandleDataDeleted(dataPath string) error {\n\tlog.Trace(\"[%s] leader[%s] gone!\", l.ctx.participant, l.leaderID)\n\n\tif l.amLeader() {\n\t\tl.onResigningAsLeader()\n\t}\n\n\tl.elect()\n\treturn nil\n}\n<commit_msg>bug fix: call onResigningAsLeader if leader resign<commit_after>package zk\n\nimport (\n\tlog \"github.com\/funkygao\/log4go\"\n\t\"github.com\/funkygao\/zkclient\"\n)\n\nvar (\n\t_ zkclient.ZkDataListener = &leaderElector{}\n)\n\ntype leaderElector struct {\n\tctx *controller\n\n\tleaderID string \/\/ participant id of the leader\n\n\tonResigningAsLeader func()\n\tonBecomingLeader func()\n}\n\nfunc newLeaderElector(ctx *controller, onBecomingLeader func(), onResigningAsLeader func()) *leaderElector {\n\treturn &leaderElector{\n\t\tctx: ctx,\n\t\tonBecomingLeader: onBecomingLeader,\n\t\tonResigningAsLeader: onResigningAsLeader,\n\t}\n}\n\nfunc (l *leaderElector) startup() {\n\t\/\/ watch for leader changes\n\t\/\/ leader also need watch leader changes because of network partition\n\tl.ctx.zc.SubscribeDataChanges(l.ctx.kb.leader(), l)\n\tl.elect()\n}\n\nfunc (l *leaderElector) fetchLeaderID() string {\n\tb, err := l.ctx.zc.Get(l.ctx.kb.leader())\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn string(b)\n}\n\nfunc (l *leaderElector) elect() (win bool) {\n\t\/\/ we can get here during the initial startup and the HandleDataDeleted callback.\n\t\/\/ because of the potential race condition, it's possible that the leader has already\n\t\/\/ been elected when we get here.\n\tl.leaderID = l.fetchLeaderID()\n\tif l.leaderID != \"\" {\n\t\tlog.Trace(\"[%s] found leader: %s, quit elect\", l.ctx.participant, l.leaderID)\n\t\treturn\n\t}\n\n\tlog.Trace(\"[%s] elect...\", l.ctx.participant)\n\n\tif err := l.ctx.zc.CreateLiveNode(l.ctx.kb.leader(), l.ctx.participant.Marshal(), 2); err == nil {\n\t\tlog.Trace(\"[%s] elect win!\", l.ctx.participant)\n\n\t\twin = true\n\t\tl.leaderID = l.ctx.participant.Endpoint\n\t\tl.onBecomingLeader()\n\t} else {\n\t\tl.leaderID = l.fetchLeaderID() \/\/ refresh leader id\n\t\tif l.leaderID == \"\" {\n\t\t\tlog.Warn(\"[%s] a leader has been elected but just resigned, this will lead to another round of election\", l.ctx.participant)\n\t\t} else {\n\t\t\tlog.Trace(\"[%s] elect lose to %s :-)\", l.ctx.participant, l.leaderID)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (l *leaderElector) close() {\n\t\/\/ needn't delete \/controller znode because when\n\t\/\/ zkclient closes the ephemeral znode will disappear automatically\n\tl.leaderID = \"\"\n\tl.ctx.zc.UnsubscribeDataChanges(l.ctx.kb.leader(), l)\n}\n\nfunc (l *leaderElector) amLeader() bool {\n\treturn l.leaderID == l.ctx.participant.Endpoint\n}\n\nfunc (l *leaderElector) HandleDataChange(dataPath string, lastData []byte) error {\n\twasLeader := l.amLeader()\n\tl.leaderID = l.fetchLeaderID()\n\tamLeader := l.amLeader()\n\tif wasLeader && !amLeader {\n\t\tl.onResigningAsLeader()\n\t}\n\n\tlog.Trace(\"[%s] new leader is %s\", l.ctx.participant, l.leaderID)\n\treturn nil\n}\n\nfunc (l *leaderElector) HandleDataDeleted(dataPath string) error {\n\tlog.Trace(\"[%s] leader[%s] gone!\", l.ctx.participant, l.leaderID)\n\n\tif l.amLeader() {\n\t\tl.onResigningAsLeader()\n\t}\n\n\tl.elect()\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package handler\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authz\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/logging\"\n)\n\ntype authzMiddleware struct {\n\tpolicyProvider authz.PolicyProvider\n\tlogger *logrus.Entry\n}\n\nfunc (m authzMiddleware) Handle(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {\n\t\tpolicy := m.policyProvider.ProvideAuthzPolicy()\n\t\tif err := policy.IsAllowed(r); err != nil {\n\t\t\tm.logger.WithError(err).Debug(\"Failed to pass authz policy\")\n\t\t\tWriteResponse(rw, APIResponse{Error: err})\n\t\t\treturn\n\t\t}\n\n\t\tnext.ServeHTTP(rw, r)\n\t})\n}\n\ntype RequireAuthz func(h http.Handler, p authz.PolicyProvider) http.Handler\n\nfunc NewRequireAuthzFactory(loggerFactory logging.Factory) RequireAuthz {\n\treturn func(h http.Handler, p authz.PolicyProvider) http.Handler {\n\t\tm := authzMiddleware{\n\t\t\tpolicyProvider: p,\n\t\t\tlogger: loggerFactory.NewLogger(\"authz\"),\n\t\t}\n\t\treturn m.Handle(h)\n\t}\n}\n<commit_msg>Restore setting try refresh token header<commit_after>package handler\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authz\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/authn\"\n\tcorehttp \"github.com\/skygeario\/skygear-server\/pkg\/core\/http\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/logging\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/skyerr\"\n)\n\ntype authzMiddleware struct {\n\tpolicyProvider authz.PolicyProvider\n\tlogger *logrus.Entry\n}\n\nfunc (m authzMiddleware) Handle(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {\n\t\tpolicy := m.policyProvider.ProvideAuthzPolicy()\n\t\tif err := policy.IsAllowed(r); err != nil {\n\t\t\tm.logger.WithError(err).Debug(\"Failed to pass authz policy\")\n\t\t\t\/\/ NOTE(louis): In case the policy returns this error\n\t\t\t\/\/ write a header to hint the client SDK to try refresh.\n\t\t\tif !authn.IsValidAuthn(r.Context()) && skyerr.IsKind(err, authz.NotAuthenticated) {\n\t\t\t\trw.Header().Set(corehttp.HeaderTryRefreshToken, \"true\")\n\t\t\t}\n\t\t\tWriteResponse(rw, APIResponse{Error: err})\n\t\t\treturn\n\t\t}\n\n\t\tnext.ServeHTTP(rw, r)\n\t})\n}\n\ntype RequireAuthz func(h http.Handler, p authz.PolicyProvider) http.Handler\n\nfunc NewRequireAuthzFactory(loggerFactory logging.Factory) RequireAuthz {\n\treturn func(h http.Handler, p authz.PolicyProvider) http.Handler {\n\t\tm := authzMiddleware{\n\t\t\tpolicyProvider: p,\n\t\t\tlogger: loggerFactory.NewLogger(\"authz\"),\n\t\t}\n\t\treturn m.Handle(h)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016-2017 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage labels\n\nimport (\n\t\"encoding\/json\"\n\t\"strings\"\n\t\"testing\"\n\n\t. \"gopkg.in\/check.v1\"\n)\n\n\/\/ Hook up gocheck into the \"go test\" runner.\nfunc Test(t *testing.T) {\n\tTestingT(t)\n}\n\ntype LabelsSuite struct{}\n\nvar _ = Suite(&LabelsSuite{})\n\nvar (\n\tlblsArray = []string{`unspec:%=%ed`, `unspec:\/\/=\/=`, `unspec:foo=bar`, `unspec:foo2==bar2`, `unspec:foo=====`, `unspec:foo\\\\==\\=`, `unspec:key=`}\n\tlbls = Labels{\n\t\t\"foo\": NewLabel(\"foo\", \"bar\", LabelSourceUnspec),\n\t\t\"foo2\": NewLabel(\"foo2\", \"=bar2\", LabelSourceUnspec),\n\t\t\"key\": NewLabel(\"key\", \"\", LabelSourceUnspec),\n\t\t\"foo==\": NewLabel(\"foo==\", \"==\", LabelSourceUnspec),\n\t\t`foo\\\\=`: NewLabel(`foo\\\\=`, `\\=`, LabelSourceUnspec),\n\t\t`\/\/=\/`: NewLabel(`\/\/=\/`, \"\", LabelSourceUnspec),\n\t\t`%`: NewLabel(`%`, `%ed`, LabelSourceUnspec),\n\t}\n\n\tDefaultLabelSourceKeyPrefix = LabelSourceAny + \".\"\n)\n\nfunc (s *LabelsSuite) TestSHA256Sum(c *C) {\n\tstr := lbls.SHA256Sum()\n\tc.Assert(str, Equals, \"756e737065633a253d2565643b756e737065633a2f2f3d2f3d3b756e737065633a666f6f3d6261723b756e737065633a666f6f323d3d626172323b756e737065633a666f6f3d3d3d3d3d3b756e737065633a666f6f5c5c3d3d5c3d3b756e737065633a6b65793d3bc672b8d1ef56ed28ab87c3622c5114069bdd3ad7b8f9737498d0c01ecef0967a\")\n}\n\nfunc (s *LabelsSuite) TestSortMap(c *C) {\n\tlblsString := strings.Join(lblsArray, \";\")\n\tlblsString += \";\"\n\tsortedMap := lbls.sortedList()\n\tc.Assert(sortedMap, DeepEquals, []byte(lblsString))\n}\n\ntype lblTest struct {\n\tlabel string\n\tresult *Label\n}\n\nfunc (s *LabelsSuite) TestMap2Labels(c *C) {\n\tm := Map2Labels(map[string]string{\n\t\t\"k8s:foo\": \"bar\",\n\t\t\"k8s:foo2\": \"=bar2\",\n\t\t\"key\": \"\",\n\t\t\"foo==\": \"==\",\n\t\t`foo\\\\=`: `\\=`,\n\t\t`\/\/=\/`: \"\",\n\t\t`%`: `%ed`,\n\t}, LabelSourceUnspec)\n\tc.Assert(m, DeepEquals, lbls)\n}\n\nfunc (s *LabelsSuite) TestMergeLabels(c *C) {\n\tto := Labels{\n\t\t\"key1\": NewLabel(\"key1\", \"value1\", \"source1\"),\n\t\t\"key2\": NewLabel(\"key2\", \"value3\", \"source4\"),\n\t}\n\tfrom := Labels{\n\t\t\"key1\": NewLabel(\"key1\", \"value3\", \"source4\"),\n\t}\n\twant := Labels{\n\t\t\"key1\": NewLabel(\"key1\", \"value3\", \"source4\"),\n\t\t\"key2\": NewLabel(\"key2\", \"value3\", \"source4\"),\n\t}\n\tto.MergeLabels(from)\n\tfrom[\"key1\"].Value = \"changed\"\n\tc.Assert(to, DeepEquals, want)\n}\n\nfunc (s *LabelsSuite) TestParseLabel(c *C) {\n\ttests := []struct {\n\t\tstr string\n\t\tout *Label\n\t}{\n\t\t{\"source1:key1=value1\", NewLabel(\"key1\", \"value1\", \"source1\")},\n\t\t{\"key1=value1\", NewLabel(\"key1\", \"value1\", LabelSourceUnspec)},\n\t\t{\"value1\", NewLabel(\"value1\", \"\", LabelSourceUnspec)},\n\t\t{\"source1:key1\", NewLabel(\"key1\", \"\", \"source1\")},\n\t\t{\"source1:key1==value1\", NewLabel(\"key1\", \"=value1\", \"source1\")},\n\t\t{\"source::key1=value1\", NewLabel(\"::key1\", \"value1\", \"source\")},\n\t\t{\"$key1=value1\", NewLabel(\"key1\", \"value1\", LabelSourceReserved)},\n\t\t{\"1foo\", NewLabel(\"1foo\", \"\", LabelSourceUnspec)},\n\t\t{\":2foo\", NewLabel(\"2foo\", \"\", LabelSourceUnspec)},\n\t\t{\":3foo=\", NewLabel(\"3foo\", \"\", LabelSourceUnspec)},\n\t\t{\"4blah=:foo=\", NewLabel(\"foo\", \"\", \"4blah=\")},\n\t\t{\"5blah::foo=\", NewLabel(\"::foo\", \"\", \"5blah\")},\n\t\t{\"6foo==\", NewLabel(\"6foo\", \"=\", LabelSourceUnspec)},\n\t\t{\"7foo=bar\", NewLabel(\"7foo\", \"bar\", LabelSourceUnspec)},\n\t\t{\"k8s:foo=bar:\", NewLabel(\"foo\", \"bar:\", \"k8s\")},\n\t\t{LabelSourceReservedKeyPrefix + \"host\", NewLabel(\"host\", \"\", LabelSourceReserved)},\n\t}\n\tfor _, test := range tests {\n\t\tlbl := ParseLabel(test.str)\n\t\tc.Assert(lbl, DeepEquals, test.out)\n\t}\n}\n\nfunc (s *LabelsSuite) TestParseSelectLabel(c *C) {\n\ttests := []struct {\n\t\tstr string\n\t\tout *Label\n\t}{\n\t\t{\"source1:key1=value1\", NewLabel(\"key1\", \"value1\", \"source1\")},\n\t\t{\"key1=value1\", NewLabel(\"key1\", \"value1\", LabelSourceAny)},\n\t\t{\"value1\", NewLabel(\"value1\", \"\", LabelSourceAny)},\n\t\t{\"source1:key1\", NewLabel(\"key1\", \"\", \"source1\")},\n\t\t{\"source1:key1==value1\", NewLabel(\"key1\", \"=value1\", \"source1\")},\n\t\t{\"source::key1=value1\", NewLabel(\"::key1\", \"value1\", \"source\")},\n\t\t{\"$key1=value1\", NewLabel(\"key1\", \"value1\", LabelSourceReserved)},\n\t\t{\"1foo\", NewLabel(\"1foo\", \"\", LabelSourceAny)},\n\t\t{\":2foo\", NewLabel(\"2foo\", \"\", LabelSourceAny)},\n\t\t{\":3foo=\", NewLabel(\"3foo\", \"\", LabelSourceAny)},\n\t\t{\"4blah=:foo=\", NewLabel(\"foo\", \"\", \"4blah=\")},\n\t\t{\"5blah::foo=\", NewLabel(\"::foo\", \"\", \"5blah\")},\n\t\t{\"6foo==\", NewLabel(\"6foo\", \"=\", LabelSourceAny)},\n\t\t{\"7foo=bar\", NewLabel(\"7foo\", \"bar\", LabelSourceAny)},\n\t\t{\"k8s:foo=bar:\", NewLabel(\"foo\", \"bar:\", \"k8s\")},\n\t\t{LabelSourceReservedKeyPrefix + \"host\", NewLabel(\"host\", \"\", LabelSourceReserved)},\n\t}\n\tfor _, test := range tests {\n\t\tlbl := ParseSelectLabel(test.str)\n\t\tc.Assert(lbl, DeepEquals, test.out)\n\t}\n}\n\nfunc (s *LabelsSuite) TestLabel(c *C) {\n\tvar label Label\n\n\tlongLabel := `{\"source\": \"kubernetes\", \"key\": \"io.kubernetes.pod.name\", \"value\": \"foo\"}`\n\tinvLabel := `{\"source\": \"kubernetes\", \"value\": \"foo\"}`\n\tshortLabel := `\"web\"`\n\n\terr := json.Unmarshal([]byte(longLabel), &label)\n\tc.Assert(err, Equals, nil)\n\tc.Assert(label.Source, Equals, \"kubernetes\")\n\tc.Assert(label.Value, Equals, \"foo\")\n\n\tlabel = Label{}\n\terr = json.Unmarshal([]byte(invLabel), &label)\n\tc.Assert(err, Not(Equals), nil)\n\n\tlabel = Label{}\n\terr = json.Unmarshal([]byte(shortLabel), &label)\n\tc.Assert(err, Equals, nil)\n\tc.Assert(label.Source, Equals, LabelSourceUnspec)\n\tc.Assert(label.Value, Equals, \"\")\n\n\tlabel = Label{}\n\terr = json.Unmarshal([]byte(\"\"), &label)\n\tc.Assert(err, Not(Equals), nil)\n}\n\nfunc (s *LabelsSuite) TestLabelCompare(c *C) {\n\ta1 := NewLabel(\".\", \"\", \"\")\n\ta2 := NewLabel(\".\", \"\", \"\")\n\tb1 := NewLabel(\"bar\", \"\", LabelSourceUnspec)\n\tc1 := NewLabel(\"bar\", \"\", \"kubernetes\")\n\td1 := NewLabel(\"\", \"\", \"\")\n\n\tc.Assert(a1.Equals(a2), Equals, true)\n\tc.Assert(a2.Equals(a1), Equals, true)\n\tc.Assert(a1.Equals(b1), Equals, false)\n\tc.Assert(a1.Equals(c1), Equals, false)\n\tc.Assert(a1.Equals(d1), Equals, false)\n\tc.Assert(b1.Equals(c1), Equals, false)\n}\n\nfunc (s *LabelsSuite) TestLabelParseKey(c *C) {\n\ttests := []struct {\n\t\tstr string\n\t\tout string\n\t}{\n\t\t{\"source0:key0=value1\", \"source0.key0\"},\n\t\t{\"source3:key1\", \"source3.key1\"},\n\t\t{\"source4:key1==value1\", \"source4.key1\"},\n\t\t{\"source::key1=value1\", \"source.:key1\"},\n\t\t{\"4blah=:foo=\", \"4blah=.foo\"},\n\t\t{\"5blah::foo=\", \"5blah.:foo\"},\n\t\t{\"source2.key1=value1\", DefaultLabelSourceKeyPrefix + \"source2.key1\"},\n\t\t{\"1foo\", DefaultLabelSourceKeyPrefix + \"1foo\"},\n\t\t{\":2foo\", DefaultLabelSourceKeyPrefix + \"2foo\"},\n\t\t{\":3foo=\", DefaultLabelSourceKeyPrefix + \"3foo\"},\n\t\t{\"6foo==\", DefaultLabelSourceKeyPrefix + \"6foo\"},\n\t\t{\"7foo=bar\", DefaultLabelSourceKeyPrefix + \"7foo\"},\n\t\t{\"cilium.key1=value1\", DefaultLabelSourceKeyPrefix + \"cilium.key1\"},\n\t\t{\"key1=value1\", DefaultLabelSourceKeyPrefix + \"key1\"},\n\t\t{\"value1\", DefaultLabelSourceKeyPrefix + \"value1\"},\n\t\t{\"$world=value1\", LabelSourceReservedKeyPrefix + \"world\"},\n\t\t{\"k8s:foo=bar:\", LabelSourceK8sKeyPrefix + \"foo\"},\n\t}\n\tfor _, test := range tests {\n\t\tlbl := GetExtendedKeyFrom(test.str)\n\t\tc.Assert(lbl, DeepEquals, test.out)\n\t}\n}\n<commit_msg> Issue 1453 :Fix another test TestSHA256Sum to test only 32bytes SHA and not the big incorrect length SHA Signed-off by: Manali Bhutiyani manali@covalent.io<commit_after>\/\/ Copyright 2016-2017 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage labels\n\nimport (\n\t\"encoding\/json\"\n\t\"strings\"\n\t\"testing\"\n\n\t. \"gopkg.in\/check.v1\"\n)\n\n\/\/ Hook up gocheck into the \"go test\" runner.\nfunc Test(t *testing.T) {\n\tTestingT(t)\n}\n\ntype LabelsSuite struct{}\n\nvar _ = Suite(&LabelsSuite{})\n\nvar (\n\tlblsArray = []string{`unspec:%=%ed`, `unspec:\/\/=\/=`, `unspec:foo=bar`, `unspec:foo2==bar2`, `unspec:foo=====`, `unspec:foo\\\\==\\=`, `unspec:key=`}\n\tlbls = Labels{\n\t\t\"foo\": NewLabel(\"foo\", \"bar\", LabelSourceUnspec),\n\t\t\"foo2\": NewLabel(\"foo2\", \"=bar2\", LabelSourceUnspec),\n\t\t\"key\": NewLabel(\"key\", \"\", LabelSourceUnspec),\n\t\t\"foo==\": NewLabel(\"foo==\", \"==\", LabelSourceUnspec),\n\t\t`foo\\\\=`: NewLabel(`foo\\\\=`, `\\=`, LabelSourceUnspec),\n\t\t`\/\/=\/`: NewLabel(`\/\/=\/`, \"\", LabelSourceUnspec),\n\t\t`%`: NewLabel(`%`, `%ed`, LabelSourceUnspec),\n\t}\n\n\tDefaultLabelSourceKeyPrefix = LabelSourceAny + \".\"\n)\n\nfunc (s *LabelsSuite) TestSHA256Sum(c *C) {\n\tstr := lbls.SHA256Sum()\n\tc.Assert(str, Equals, \"cf51cc7e153a09e82b242f2f0fb2f0f3923d2742a9d84de8bb0de669e5e558e3\")\n}\n\nfunc (s *LabelsSuite) TestSortMap(c *C) {\n\tlblsString := strings.Join(lblsArray, \";\")\n\tlblsString += \";\"\n\tsortedMap := lbls.sortedList()\n\tc.Assert(sortedMap, DeepEquals, []byte(lblsString))\n}\n\ntype lblTest struct {\n\tlabel string\n\tresult *Label\n}\n\nfunc (s *LabelsSuite) TestMap2Labels(c *C) {\n\tm := Map2Labels(map[string]string{\n\t\t\"k8s:foo\": \"bar\",\n\t\t\"k8s:foo2\": \"=bar2\",\n\t\t\"key\": \"\",\n\t\t\"foo==\": \"==\",\n\t\t`foo\\\\=`: `\\=`,\n\t\t`\/\/=\/`: \"\",\n\t\t`%`: `%ed`,\n\t}, LabelSourceUnspec)\n\tc.Assert(m, DeepEquals, lbls)\n}\n\nfunc (s *LabelsSuite) TestMergeLabels(c *C) {\n\tto := Labels{\n\t\t\"key1\": NewLabel(\"key1\", \"value1\", \"source1\"),\n\t\t\"key2\": NewLabel(\"key2\", \"value3\", \"source4\"),\n\t}\n\tfrom := Labels{\n\t\t\"key1\": NewLabel(\"key1\", \"value3\", \"source4\"),\n\t}\n\twant := Labels{\n\t\t\"key1\": NewLabel(\"key1\", \"value3\", \"source4\"),\n\t\t\"key2\": NewLabel(\"key2\", \"value3\", \"source4\"),\n\t}\n\tto.MergeLabels(from)\n\tfrom[\"key1\"].Value = \"changed\"\n\tc.Assert(to, DeepEquals, want)\n}\n\nfunc (s *LabelsSuite) TestParseLabel(c *C) {\n\ttests := []struct {\n\t\tstr string\n\t\tout *Label\n\t}{\n\t\t{\"source1:key1=value1\", NewLabel(\"key1\", \"value1\", \"source1\")},\n\t\t{\"key1=value1\", NewLabel(\"key1\", \"value1\", LabelSourceUnspec)},\n\t\t{\"value1\", NewLabel(\"value1\", \"\", LabelSourceUnspec)},\n\t\t{\"source1:key1\", NewLabel(\"key1\", \"\", \"source1\")},\n\t\t{\"source1:key1==value1\", NewLabel(\"key1\", \"=value1\", \"source1\")},\n\t\t{\"source::key1=value1\", NewLabel(\"::key1\", \"value1\", \"source\")},\n\t\t{\"$key1=value1\", NewLabel(\"key1\", \"value1\", LabelSourceReserved)},\n\t\t{\"1foo\", NewLabel(\"1foo\", \"\", LabelSourceUnspec)},\n\t\t{\":2foo\", NewLabel(\"2foo\", \"\", LabelSourceUnspec)},\n\t\t{\":3foo=\", NewLabel(\"3foo\", \"\", LabelSourceUnspec)},\n\t\t{\"4blah=:foo=\", NewLabel(\"foo\", \"\", \"4blah=\")},\n\t\t{\"5blah::foo=\", NewLabel(\"::foo\", \"\", \"5blah\")},\n\t\t{\"6foo==\", NewLabel(\"6foo\", \"=\", LabelSourceUnspec)},\n\t\t{\"7foo=bar\", NewLabel(\"7foo\", \"bar\", LabelSourceUnspec)},\n\t\t{\"k8s:foo=bar:\", NewLabel(\"foo\", \"bar:\", \"k8s\")},\n\t\t{LabelSourceReservedKeyPrefix + \"host\", NewLabel(\"host\", \"\", LabelSourceReserved)},\n\t}\n\tfor _, test := range tests {\n\t\tlbl := ParseLabel(test.str)\n\t\tc.Assert(lbl, DeepEquals, test.out)\n\t}\n}\n\nfunc (s *LabelsSuite) TestParseSelectLabel(c *C) {\n\ttests := []struct {\n\t\tstr string\n\t\tout *Label\n\t}{\n\t\t{\"source1:key1=value1\", NewLabel(\"key1\", \"value1\", \"source1\")},\n\t\t{\"key1=value1\", NewLabel(\"key1\", \"value1\", LabelSourceAny)},\n\t\t{\"value1\", NewLabel(\"value1\", \"\", LabelSourceAny)},\n\t\t{\"source1:key1\", NewLabel(\"key1\", \"\", \"source1\")},\n\t\t{\"source1:key1==value1\", NewLabel(\"key1\", \"=value1\", \"source1\")},\n\t\t{\"source::key1=value1\", NewLabel(\"::key1\", \"value1\", \"source\")},\n\t\t{\"$key1=value1\", NewLabel(\"key1\", \"value1\", LabelSourceReserved)},\n\t\t{\"1foo\", NewLabel(\"1foo\", \"\", LabelSourceAny)},\n\t\t{\":2foo\", NewLabel(\"2foo\", \"\", LabelSourceAny)},\n\t\t{\":3foo=\", NewLabel(\"3foo\", \"\", LabelSourceAny)},\n\t\t{\"4blah=:foo=\", NewLabel(\"foo\", \"\", \"4blah=\")},\n\t\t{\"5blah::foo=\", NewLabel(\"::foo\", \"\", \"5blah\")},\n\t\t{\"6foo==\", NewLabel(\"6foo\", \"=\", LabelSourceAny)},\n\t\t{\"7foo=bar\", NewLabel(\"7foo\", \"bar\", LabelSourceAny)},\n\t\t{\"k8s:foo=bar:\", NewLabel(\"foo\", \"bar:\", \"k8s\")},\n\t\t{LabelSourceReservedKeyPrefix + \"host\", NewLabel(\"host\", \"\", LabelSourceReserved)},\n\t}\n\tfor _, test := range tests {\n\t\tlbl := ParseSelectLabel(test.str)\n\t\tc.Assert(lbl, DeepEquals, test.out)\n\t}\n}\n\nfunc (s *LabelsSuite) TestLabel(c *C) {\n\tvar label Label\n\n\tlongLabel := `{\"source\": \"kubernetes\", \"key\": \"io.kubernetes.pod.name\", \"value\": \"foo\"}`\n\tinvLabel := `{\"source\": \"kubernetes\", \"value\": \"foo\"}`\n\tshortLabel := `\"web\"`\n\n\terr := json.Unmarshal([]byte(longLabel), &label)\n\tc.Assert(err, Equals, nil)\n\tc.Assert(label.Source, Equals, \"kubernetes\")\n\tc.Assert(label.Value, Equals, \"foo\")\n\n\tlabel = Label{}\n\terr = json.Unmarshal([]byte(invLabel), &label)\n\tc.Assert(err, Not(Equals), nil)\n\n\tlabel = Label{}\n\terr = json.Unmarshal([]byte(shortLabel), &label)\n\tc.Assert(err, Equals, nil)\n\tc.Assert(label.Source, Equals, LabelSourceUnspec)\n\tc.Assert(label.Value, Equals, \"\")\n\n\tlabel = Label{}\n\terr = json.Unmarshal([]byte(\"\"), &label)\n\tc.Assert(err, Not(Equals), nil)\n}\n\nfunc (s *LabelsSuite) TestLabelCompare(c *C) {\n\ta1 := NewLabel(\".\", \"\", \"\")\n\ta2 := NewLabel(\".\", \"\", \"\")\n\tb1 := NewLabel(\"bar\", \"\", LabelSourceUnspec)\n\tc1 := NewLabel(\"bar\", \"\", \"kubernetes\")\n\td1 := NewLabel(\"\", \"\", \"\")\n\n\tc.Assert(a1.Equals(a2), Equals, true)\n\tc.Assert(a2.Equals(a1), Equals, true)\n\tc.Assert(a1.Equals(b1), Equals, false)\n\tc.Assert(a1.Equals(c1), Equals, false)\n\tc.Assert(a1.Equals(d1), Equals, false)\n\tc.Assert(b1.Equals(c1), Equals, false)\n}\n\nfunc (s *LabelsSuite) TestLabelParseKey(c *C) {\n\ttests := []struct {\n\t\tstr string\n\t\tout string\n\t}{\n\t\t{\"source0:key0=value1\", \"source0.key0\"},\n\t\t{\"source3:key1\", \"source3.key1\"},\n\t\t{\"source4:key1==value1\", \"source4.key1\"},\n\t\t{\"source::key1=value1\", \"source.:key1\"},\n\t\t{\"4blah=:foo=\", \"4blah=.foo\"},\n\t\t{\"5blah::foo=\", \"5blah.:foo\"},\n\t\t{\"source2.key1=value1\", DefaultLabelSourceKeyPrefix + \"source2.key1\"},\n\t\t{\"1foo\", DefaultLabelSourceKeyPrefix + \"1foo\"},\n\t\t{\":2foo\", DefaultLabelSourceKeyPrefix + \"2foo\"},\n\t\t{\":3foo=\", DefaultLabelSourceKeyPrefix + \"3foo\"},\n\t\t{\"6foo==\", DefaultLabelSourceKeyPrefix + \"6foo\"},\n\t\t{\"7foo=bar\", DefaultLabelSourceKeyPrefix + \"7foo\"},\n\t\t{\"cilium.key1=value1\", DefaultLabelSourceKeyPrefix + \"cilium.key1\"},\n\t\t{\"key1=value1\", DefaultLabelSourceKeyPrefix + \"key1\"},\n\t\t{\"value1\", DefaultLabelSourceKeyPrefix + \"value1\"},\n\t\t{\"$world=value1\", LabelSourceReservedKeyPrefix + \"world\"},\n\t\t{\"k8s:foo=bar:\", LabelSourceK8sKeyPrefix + \"foo\"},\n\t}\n\tfor _, test := range tests {\n\t\tlbl := GetExtendedKeyFrom(test.str)\n\t\tc.Assert(lbl, DeepEquals, test.out)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package logs are convenience methods for fetching logs from a minikube cluster\npackage logs\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/audit\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/bootstrapper\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/command\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/config\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/cruntime\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/localpath\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/out\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/style\"\n)\n\n\/\/ rootCauses are regular expressions that match known failures\nvar rootCauses = []string{\n\t`^error: `,\n\t`eviction manager: pods.* evicted`,\n\t`unknown flag: --`,\n\t`forbidden.*no providers available`,\n\t`eviction manager:.*evicted`,\n\t`tls: bad certificate`,\n\t`kubelet.*no API client`,\n\t`kubelet.*No api server`,\n\t`STDIN.*127.0.0.1:8080`,\n\t`failed to create listener`,\n\t`address already in use`,\n\t`unable to evict any pods`,\n\t`eviction manager: unexpected error`,\n\t`Resetting AnonymousAuth to false`,\n\t`Unable to register node.*forbidden`,\n\t`Failed to initialize CSINodeInfo.*forbidden`,\n\t`Failed to admit pod`,\n\t`failed to \"StartContainer\"`,\n\t`Failed to start ContainerManager`,\n\t`kubelet.*forbidden.*cannot \\w+ resource`,\n\t`leases.*forbidden.*cannot \\w+ resource`,\n\t`failed to start daemon`,\n}\n\n\/\/ rootCauseRe combines rootCauses into a single regex\nvar rootCauseRe = regexp.MustCompile(strings.Join(rootCauses, \"|\"))\n\n\/\/ ignoreCauseRe is a regular expression that matches spurious errors to not surface\nvar ignoreCauseRe = regexp.MustCompile(\"error: no objects passed to apply\")\n\n\/\/ importantPods are a list of pods to retrieve logs for, in addition to the bootstrapper logs.\nvar importantPods = []string{\n\t\"kube-apiserver\",\n\t\"etcd\",\n\t\"coredns\",\n\t\"kube-scheduler\",\n\t\"kube-proxy\",\n\t\"kubernetes-dashboard\",\n\t\"storage-provisioner\",\n\t\"kube-controller-manager\",\n}\n\n\/\/ logRunner is the subset of CommandRunner used for logging\ntype logRunner interface {\n\tRunCmd(*exec.Cmd) (*command.RunResult, error)\n}\n\n\/\/ lookbackwardsCount is how far back to look in a log for problems. This should be large enough to\n\/\/ include usage messages from a failed binary, but small enough to not include irrelevant problems.\nconst lookBackwardsCount = 400\n\n\/\/ Follow follows logs from multiple files in tail(1) format\nfunc Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr logRunner, logOutput io.Writer) error {\n\tcs := []string{}\n\tfor _, v := range logCommands(r, bs, cfg, 0, true) {\n\t\tcs = append(cs, v+\" &\")\n\t}\n\tcs = append(cs, \"wait\")\n\n\tcmd := exec.Command(\"\/bin\/bash\", \"-c\", strings.Join(cs, \" \"))\n\tcmd.Stdout = logOutput\n\tcmd.Stderr = logOutput\n\tif _, err := cr.RunCmd(cmd); err != nil {\n\t\treturn errors.Wrapf(err, \"log follow\")\n\t}\n\treturn nil\n}\n\n\/\/ IsProblem returns whether this line matches a known problem\nfunc IsProblem(line string) bool {\n\treturn rootCauseRe.MatchString(line) && !ignoreCauseRe.MatchString(line)\n}\n\n\/\/ FindProblems finds possible root causes among the logs\nfunc FindProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr logRunner) map[string][]string {\n\tpMap := map[string][]string{}\n\tcmds := logCommands(r, bs, cfg, lookBackwardsCount, false)\n\tfor name := range cmds {\n\t\tklog.Infof(\"Gathering logs for %s ...\", name)\n\t\tvar b bytes.Buffer\n\t\tc := exec.Command(\"\/bin\/bash\", \"-c\", cmds[name])\n\t\tc.Stderr = &b\n\t\tc.Stdout = &b\n\n\t\tif rr, err := cr.RunCmd(c); err != nil {\n\t\t\tklog.Warningf(\"failed %s: command: %s %v output: %s\", name, rr.Command(), err, rr.Output())\n\t\t\tcontinue\n\t\t}\n\t\tscanner := bufio.NewScanner(&b)\n\t\tproblems := []string{}\n\t\tfor scanner.Scan() {\n\t\t\tl := scanner.Text()\n\t\t\tif IsProblem(l) {\n\t\t\t\tklog.Warningf(\"Found %s problem: %s\", name, l)\n\t\t\t\tproblems = append(problems, l)\n\t\t\t}\n\t\t}\n\t\tif len(problems) > 0 {\n\t\t\tpMap[name] = problems\n\t\t}\n\t}\n\treturn pMap\n}\n\n\/\/ OutputProblems outputs discovered problems.\nfunc OutputProblems(problems map[string][]string, maxLines int, logOutput *os.File) {\n\tout.SetErrFile(logOutput)\n\tdefer out.SetErrFile(os.Stderr)\n\n\tfor name, lines := range problems {\n\t\tout.FailureT(\"Problems detected in {{.name}}:\", out.V{\"name\": name})\n\t\tif len(lines) > maxLines {\n\t\t\tlines = lines[len(lines)-maxLines:]\n\t\t}\n\t\tfor _, l := range lines {\n\t\t\tout.ErrT(style.LogEntry, l)\n\t\t}\n\t}\n}\n\n\/\/ Output displays logs from multiple sources in tail(1) format\nfunc Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, runner command.Runner, lines int, logOutput *os.File) error {\n\tcmds := logCommands(r, bs, cfg, lines, false)\n\tcmds[\"kernel\"] = \"uptime && uname -a && grep PRETTY \/etc\/os-release\"\n\n\tnames := []string{}\n\tfor k := range cmds {\n\t\tnames = append(names, k)\n\t}\n\n\tout.SetOutFile(logOutput)\n\tdefer out.SetOutFile(os.Stdout)\n\n\tsort.Strings(names)\n\tfailed := []string{}\n\tfor i, name := range names {\n\t\tif i > 0 {\n\t\t\tout.Styled(style.Empty, \"\")\n\t\t}\n\t\tout.Styled(style.Empty, \"==> {{.name}} <==\", out.V{\"name\": name})\n\t\tvar b bytes.Buffer\n\t\tc := exec.Command(\"\/bin\/bash\", \"-c\", cmds[name])\n\t\tc.Stdout = &b\n\t\tc.Stderr = &b\n\t\tif rr, err := runner.RunCmd(c); err != nil {\n\t\t\tklog.Errorf(\"command %s failed with error: %v output: %q\", rr.Command(), err, rr.Output())\n\t\t\tfailed = append(failed, name)\n\t\t\tcontinue\n\t\t}\n\t\tl := \"\"\n\t\tscanner := bufio.NewScanner(&b)\n\t\tfor scanner.Scan() {\n\t\t\tl += scanner.Text() + \"\\n\"\n\t\t}\n\t\tout.Styled(style.Empty, l)\n\t}\n\n\tif len(failed) > 0 {\n\t\treturn fmt.Errorf(\"unable to fetch logs for: %s\", strings.Join(failed, \", \"))\n\t}\n\treturn nil\n}\n\n\/\/ outputAudit displays the audit logs.\nfunc outputAudit(lines int) error {\n\tout.Styled(style.Empty, \"\")\n\tout.Styled(style.Empty, \"==> Audit <==\")\n\tr, err := audit.Report(lines)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create audit report: %v\", err)\n\t}\n\tout.Styled(style.Empty, r.ASCIITable())\n\treturn nil\n}\n\n\/\/ outputLastStart outputs the last start logs.\nfunc outputLastStart() error {\n\tout.Styled(style.Empty, \"\")\n\tout.Styled(style.Empty, \"==> Last Start <==\")\n\tfp := localpath.LastStartLog()\n\tf, err := os.Open(fp)\n\tif os.IsNotExist(err) {\n\t\tmsg := fmt.Sprintf(\"Last start log file not found at %s\", fp)\n\t\tout.Styled(style.Empty, msg)\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to open file %s: %v\", fp, err)\n\t}\n\tdefer f.Close()\n\tl := \"\"\n\ts := bufio.NewScanner(f)\n\tfor s.Scan() {\n\t\tl += s.Text() + \"\\n\"\n\t}\n\tout.Styled(style.Empty, l)\n\tif err := s.Err(); err != nil {\n\t\treturn fmt.Errorf(\"failed to read file %s: %v\", fp, err)\n\t}\n\treturn nil\n}\n\n\/\/ OutputOffline outputs logs that don't need a running cluster.\nfunc OutputOffline(lines int, logOutput *os.File) {\n\tout.SetOutFile(logOutput)\n\tdefer out.SetOutFile(os.Stdout)\n\tif err := outputAudit(lines); err != nil {\n\t\tklog.Errorf(\"failed to output audit logs: %v\", err)\n\t}\n\tif err := outputLastStart(); err != nil {\n\t\tklog.Errorf(\"failed to output last start logs: %v\", err)\n\t}\n\n\tout.Styled(style.Empty, \"\")\n}\n\n\/\/ logCommands returns a list of commands that would be run to receive the anticipated logs\nfunc logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, length int, follow bool) map[string]string {\n\tcmds := bs.LogCommands(cfg, bootstrapper.LogOptions{Lines: length, Follow: follow})\n\tfor _, pod := range importantPods {\n\t\tids, err := r.ListContainers(cruntime.ListContainersOptions{Name: pod})\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to list containers for %q: %v\", pod, err)\n\t\t\tcontinue\n\t\t}\n\t\tklog.Infof(\"%d containers: %s\", len(ids), ids)\n\t\tif len(ids) == 0 {\n\t\t\tklog.Warningf(\"No container was found matching %q\", pod)\n\t\t\tcontinue\n\t\t}\n\t\tfor _, i := range ids {\n\t\t\tkey := fmt.Sprintf(\"%s [%s]\", pod, i)\n\t\t\tcmds[key] = r.ContainerLogCmd(i, length, follow)\n\t\t}\n\t}\n\tcmds[r.Name()] = r.SystemLogCmd(length)\n\tcmds[\"container status\"] = cruntime.ContainerStatusCommand()\n\n\treturn cmds\n}\n<commit_msg>fix TestFunctional\/serial\/LogsFileCmd test failure<commit_after>\/*\nCopyright 2019 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package logs are convenience methods for fetching logs from a minikube cluster\npackage logs\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/audit\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/bootstrapper\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/command\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/config\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/cruntime\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/localpath\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/out\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/style\"\n)\n\n\/\/ rootCauses are regular expressions that match known failures\nvar rootCauses = []string{\n\t`^error: `,\n\t`eviction manager: pods.* evicted`,\n\t`unknown flag: --`,\n\t`forbidden.*no providers available`,\n\t`eviction manager:.*evicted`,\n\t`tls: bad certificate`,\n\t`kubelet.*no API client`,\n\t`kubelet.*No api server`,\n\t`STDIN.*127.0.0.1:8080`,\n\t`failed to create listener`,\n\t`address already in use`,\n\t`unable to evict any pods`,\n\t`eviction manager: unexpected error`,\n\t`Resetting AnonymousAuth to false`,\n\t`Unable to register node.*forbidden`,\n\t`Failed to initialize CSINodeInfo.*forbidden`,\n\t`Failed to admit pod`,\n\t`failed to \"StartContainer\"`,\n\t`Failed to start ContainerManager`,\n\t`kubelet.*forbidden.*cannot \\w+ resource`,\n\t`leases.*forbidden.*cannot \\w+ resource`,\n\t`failed to start daemon`,\n}\n\n\/\/ rootCauseRe combines rootCauses into a single regex\nvar rootCauseRe = regexp.MustCompile(strings.Join(rootCauses, \"|\"))\n\n\/\/ ignoreCauseRe is a regular expression that matches spurious errors to not surface\nvar ignoreCauseRe = regexp.MustCompile(\"error: no objects passed to apply\")\n\n\/\/ importantPods are a list of pods to retrieve logs for, in addition to the bootstrapper logs.\nvar importantPods = []string{\n\t\"kube-apiserver\",\n\t\"etcd\",\n\t\"coredns\",\n\t\"kube-scheduler\",\n\t\"kube-proxy\",\n\t\"kubernetes-dashboard\",\n\t\"storage-provisioner\",\n\t\"kube-controller-manager\",\n}\n\n\/\/ logRunner is the subset of CommandRunner used for logging\ntype logRunner interface {\n\tRunCmd(*exec.Cmd) (*command.RunResult, error)\n}\n\n\/\/ lookbackwardsCount is how far back to look in a log for problems. This should be large enough to\n\/\/ include usage messages from a failed binary, but small enough to not include irrelevant problems.\nconst lookBackwardsCount = 400\n\n\/\/ Follow follows logs from multiple files in tail(1) format\nfunc Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr logRunner, logOutput io.Writer) error {\n\tcs := []string{}\n\tfor _, v := range logCommands(r, bs, cfg, 0, true) {\n\t\tcs = append(cs, v+\" &\")\n\t}\n\tcs = append(cs, \"wait\")\n\n\tcmd := exec.Command(\"\/bin\/bash\", \"-c\", strings.Join(cs, \" \"))\n\tcmd.Stdout = logOutput\n\tcmd.Stderr = logOutput\n\tif _, err := cr.RunCmd(cmd); err != nil {\n\t\treturn errors.Wrapf(err, \"log follow\")\n\t}\n\treturn nil\n}\n\n\/\/ IsProblem returns whether this line matches a known problem\nfunc IsProblem(line string) bool {\n\treturn rootCauseRe.MatchString(line) && !ignoreCauseRe.MatchString(line)\n}\n\n\/\/ FindProblems finds possible root causes among the logs\nfunc FindProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr logRunner) map[string][]string {\n\tpMap := map[string][]string{}\n\tcmds := logCommands(r, bs, cfg, lookBackwardsCount, false)\n\tfor name := range cmds {\n\t\tklog.Infof(\"Gathering logs for %s ...\", name)\n\t\tvar b bytes.Buffer\n\t\tc := exec.Command(\"\/bin\/bash\", \"-c\", cmds[name])\n\t\tc.Stderr = &b\n\t\tc.Stdout = &b\n\n\t\tif rr, err := cr.RunCmd(c); err != nil {\n\t\t\tklog.Warningf(\"failed %s: command: %s %v output: %s\", name, rr.Command(), err, rr.Output())\n\t\t\tcontinue\n\t\t}\n\t\tscanner := bufio.NewScanner(&b)\n\t\tproblems := []string{}\n\t\tfor scanner.Scan() {\n\t\t\tl := scanner.Text()\n\t\t\tif IsProblem(l) {\n\t\t\t\tklog.Warningf(\"Found %s problem: %s\", name, l)\n\t\t\t\tproblems = append(problems, l)\n\t\t\t}\n\t\t}\n\t\tif len(problems) > 0 {\n\t\t\tpMap[name] = problems\n\t\t}\n\t}\n\treturn pMap\n}\n\n\/\/ OutputProblems outputs discovered problems.\nfunc OutputProblems(problems map[string][]string, maxLines int, logOutput *os.File) {\n\tout.SetErrFile(logOutput)\n\tdefer out.SetErrFile(os.Stderr)\n\n\tfor name, lines := range problems {\n\t\tout.FailureT(\"Problems detected in {{.name}}:\", out.V{\"name\": name})\n\t\tif len(lines) > maxLines {\n\t\t\tlines = lines[len(lines)-maxLines:]\n\t\t}\n\t\tfor _, l := range lines {\n\t\t\tout.ErrT(style.LogEntry, l)\n\t\t}\n\t}\n}\n\n\/\/ Output displays logs from multiple sources in tail(1) format\nfunc Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, runner command.Runner, lines int, logOutput *os.File) error {\n\tcmds := logCommands(r, bs, cfg, lines, false)\n\tcmds[\"kernel\"] = \"uptime && uname -a && grep PRETTY \/etc\/os-release\"\n\n\tnames := []string{}\n\tfor k := range cmds {\n\t\tnames = append(names, k)\n\t}\n\n\tout.SetOutFile(logOutput)\n\tdefer out.SetOutFile(os.Stdout)\n\tout.SetErrFile(logOutput)\n\tdefer out.SetErrFile(os.Stderr)\n\n\tsort.Strings(names)\n\tfailed := []string{}\n\tfor i, name := range names {\n\t\tif i > 0 {\n\t\t\tout.Styled(style.Empty, \"\")\n\t\t}\n\t\tout.Styled(style.Empty, \"==> {{.name}} <==\", out.V{\"name\": name})\n\t\tvar b bytes.Buffer\n\t\tc := exec.Command(\"\/bin\/bash\", \"-c\", cmds[name])\n\t\tc.Stdout = &b\n\t\tc.Stderr = &b\n\t\tif rr, err := runner.RunCmd(c); err != nil {\n\t\t\tklog.Errorf(\"command %s failed with error: %v output: %q\", rr.Command(), err, rr.Output())\n\t\t\tfailed = append(failed, name)\n\t\t\tcontinue\n\t\t}\n\t\tl := \"\"\n\t\tscanner := bufio.NewScanner(&b)\n\t\tfor scanner.Scan() {\n\t\t\tl += scanner.Text() + \"\\n\"\n\t\t}\n\t\tout.Styled(style.Empty, l)\n\t}\n\n\tif len(failed) > 0 {\n\t\treturn fmt.Errorf(\"unable to fetch logs for: %s\", strings.Join(failed, \", \"))\n\t}\n\treturn nil\n}\n\n\/\/ outputAudit displays the audit logs.\nfunc outputAudit(lines int) error {\n\tout.Styled(style.Empty, \"\")\n\tout.Styled(style.Empty, \"==> Audit <==\")\n\tr, err := audit.Report(lines)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create audit report: %v\", err)\n\t}\n\tout.Styled(style.Empty, r.ASCIITable())\n\treturn nil\n}\n\n\/\/ outputLastStart outputs the last start logs.\nfunc outputLastStart() error {\n\tout.Styled(style.Empty, \"\")\n\tout.Styled(style.Empty, \"==> Last Start <==\")\n\tfp := localpath.LastStartLog()\n\tf, err := os.Open(fp)\n\tif os.IsNotExist(err) {\n\t\tmsg := fmt.Sprintf(\"Last start log file not found at %s\", fp)\n\t\tout.Styled(style.Empty, msg)\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to open file %s: %v\", fp, err)\n\t}\n\tdefer f.Close()\n\tl := \"\"\n\ts := bufio.NewScanner(f)\n\tfor s.Scan() {\n\t\tl += s.Text() + \"\\n\"\n\t}\n\tout.Styled(style.Empty, l)\n\tif err := s.Err(); err != nil {\n\t\treturn fmt.Errorf(\"failed to read file %s: %v\", fp, err)\n\t}\n\treturn nil\n}\n\n\/\/ OutputOffline outputs logs that don't need a running cluster.\nfunc OutputOffline(lines int, logOutput *os.File) {\n\tout.SetOutFile(logOutput)\n\tdefer out.SetOutFile(os.Stdout)\n\tout.SetErrFile(logOutput)\n\tdefer out.SetErrFile(os.Stderr)\n\tif err := outputAudit(lines); err != nil {\n\t\tklog.Errorf(\"failed to output audit logs: %v\", err)\n\t}\n\tif err := outputLastStart(); err != nil {\n\t\tklog.Errorf(\"failed to output last start logs: %v\", err)\n\t}\n\n\tout.Styled(style.Empty, \"\")\n}\n\n\/\/ logCommands returns a list of commands that would be run to receive the anticipated logs\nfunc logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, length int, follow bool) map[string]string {\n\tcmds := bs.LogCommands(cfg, bootstrapper.LogOptions{Lines: length, Follow: follow})\n\tfor _, pod := range importantPods {\n\t\tids, err := r.ListContainers(cruntime.ListContainersOptions{Name: pod})\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to list containers for %q: %v\", pod, err)\n\t\t\tcontinue\n\t\t}\n\t\tklog.Infof(\"%d containers: %s\", len(ids), ids)\n\t\tif len(ids) == 0 {\n\t\t\tklog.Warningf(\"No container was found matching %q\", pod)\n\t\t\tcontinue\n\t\t}\n\t\tfor _, i := range ids {\n\t\t\tkey := fmt.Sprintf(\"%s [%s]\", pod, i)\n\t\t\tcmds[key] = r.ContainerLogCmd(i, length, follow)\n\t\t}\n\t}\n\tcmds[r.Name()] = r.SystemLogCmd(length)\n\tcmds[\"container status\"] = cruntime.ContainerStatusCommand()\n\n\treturn cmds\n}\n<|endoftext|>"} {"text":"<commit_before>package order\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/bitfinexcom\/bitfinex-api-go\/pkg\/convert\"\n)\n\ntype Order struct {\n\tID int64\n\tGID int64\n\tCID int64\n\tSymbol string\n\tMTSCreated int64\n\tMTSUpdated int64\n\tAmount float64\n\tAmountOrig float64\n\tType string\n\tTypePrev string\n\tMTSTif int64\n\tFlags int64\n\tStatus string\n\tPrice float64\n\tPriceAvg float64\n\tPriceTrailing float64\n\tPriceAuxLimit float64\n\tNotify bool\n\tHidden bool\n\tPlacedID int64\n\tMeta map[string]interface{}\n}\n\n\/\/ Snapshot is a collection of Orders that would usually be sent on\n\/\/ inital connection.\ntype Snapshot struct {\n\tSnapshot []*Order\n}\n\n\/\/ Update is an Order that gets sent out after every change to an order.\ntype Update Order\n\n\/\/ New gets sent out after an Order was created successfully.\ntype New Order\n\n\/\/ Cancel gets sent out after an Order was cancelled successfully.\ntype Cancel Order\n\n\/\/ FromRaw takes the raw list of values as returned from the websocket\n\/\/ service and tries to convert it into an Order.\nfunc FromRaw(raw []interface{}) (o *Order, err error) {\n\tif len(raw) == 12 {\n\t\to = &Order{\n\t\t\tID: convert.I64ValOrZero(raw[0]),\n\t\t\tSymbol: convert.SValOrEmpty(raw[1]),\n\t\t\tAmount: convert.F64ValOrZero(raw[2]),\n\t\t\tAmountOrig: convert.F64ValOrZero(raw[3]),\n\t\t\tType: convert.SValOrEmpty(raw[4]),\n\t\t\tStatus: convert.SValOrEmpty(raw[5]),\n\t\t\tPrice: convert.F64ValOrZero(raw[6]),\n\t\t\tPriceAvg: convert.F64ValOrZero(raw[7]),\n\t\t\tMTSUpdated: convert.I64ValOrZero(raw[8]),\n\t\t}\n\t\treturn\n\t}\n\n\tif len(raw) < 26 {\n\t\treturn o, fmt.Errorf(\"data slice too short for order: %#v\", raw)\n\t}\n\n\to = &Order{\n\t\tID: convert.I64ValOrZero(raw[0]),\n\t\tGID: convert.I64ValOrZero(raw[1]),\n\t\tCID: convert.I64ValOrZero(raw[2]),\n\t\tSymbol: convert.SValOrEmpty(raw[3]),\n\t\tMTSCreated: convert.I64ValOrZero(raw[4]),\n\t\tMTSUpdated: convert.I64ValOrZero(raw[5]),\n\t\tAmount: convert.F64ValOrZero(raw[6]),\n\t\tAmountOrig: convert.F64ValOrZero(raw[7]),\n\t\tType: convert.SValOrEmpty(raw[8]),\n\t\tTypePrev: convert.SValOrEmpty(raw[9]),\n\t\tMTSTif: convert.I64ValOrZero(raw[10]),\n\t\tFlags: convert.I64ValOrZero(raw[12]),\n\t\tStatus: convert.SValOrEmpty(raw[13]),\n\t\tPrice: convert.F64ValOrZero(raw[16]),\n\t\tPriceAvg: convert.F64ValOrZero(raw[17]),\n\t\tPriceTrailing: convert.F64ValOrZero(raw[18]),\n\t\tPriceAuxLimit: convert.F64ValOrZero(raw[19]),\n\t\tNotify: convert.BValOrFalse(raw[23]),\n\t\tHidden: convert.BValOrFalse(raw[24]),\n\t\tPlacedID: convert.I64ValOrZero(raw[25]),\n\t}\n\n\tif len(raw) >= 31 {\n\t\to.Meta = convert.SiMapOrEmpty(raw[31])\n\t}\n\n\treturn\n}\n\n\/\/ SnapshotFromRaw takes a raw list of values as returned from the websocket\n\/\/ service and tries to convert it into an Snapshot.\nfunc SnapshotFromRaw(raw []interface{}) (s *Snapshot, err error) {\n\tif len(raw) == 0 {\n\t\treturn s, fmt.Errorf(\"data slice too short for order: %#v\", raw)\n\t}\n\n\tos := make([]*Order, 0)\n\tswitch raw[0].(type) {\n\tcase []interface{}:\n\t\tfor _, v := range raw {\n\t\t\tif l, ok := v.([]interface{}); ok {\n\t\t\t\to, err := FromRaw(l)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn s, err\n\t\t\t\t}\n\t\t\t\tos = append(os, o)\n\t\t\t}\n\t\t}\n\tdefault:\n\t\treturn s, fmt.Errorf(\"not an order snapshot\")\n\t}\n\ts = &Snapshot{Snapshot: os}\n\n\treturn\n}\n\nfunc FromWSRaw(raw []interface{}, op string) (interface{}, error) {\n\tif op == \"os\" {\n\t\treturn SnapshotFromRaw(raw)\n\t}\n\n\to, err := FromRaw(raw)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch op {\n\tcase \"on\":\n\t\treturn New(*o), nil\n\t}\n\n\treturn nil, fmt.Errorf(\"unrecognized order operation: %s\", op)\n}\n<commit_msg>enriching order model with functionality to explicitly get order new, cancel, snapshot and update types<commit_after>package order\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/bitfinexcom\/bitfinex-api-go\/pkg\/convert\"\n)\n\ntype Order struct {\n\tID int64\n\tGID int64\n\tCID int64\n\tSymbol string\n\tMTSCreated int64\n\tMTSUpdated int64\n\tAmount float64\n\tAmountOrig float64\n\tType string\n\tTypePrev string\n\tMTSTif int64\n\tFlags int64\n\tStatus string\n\tPrice float64\n\tPriceAvg float64\n\tPriceTrailing float64\n\tPriceAuxLimit float64\n\tNotify bool\n\tHidden bool\n\tPlacedID int64\n\tMeta map[string]interface{}\n}\n\n\/\/ Snapshot is a collection of Orders that would usually be sent on\n\/\/ inital connection.\ntype Snapshot struct {\n\tSnapshot []*Order\n}\n\n\/\/ Update is an Order that gets sent out after every change to an order.\ntype Update Order\n\n\/\/ New gets sent out after an Order was created successfully.\ntype New Order\n\n\/\/ Cancel gets sent out after an Order was cancelled successfully.\ntype Cancel Order\n\n\/\/ FromRaw takes the raw list of values as returned from the websocket\n\/\/ service and tries to convert it into an Order.\nfunc FromRaw(raw []interface{}) (o *Order, err error) {\n\tif len(raw) == 12 {\n\t\to = &Order{\n\t\t\tID: convert.I64ValOrZero(raw[0]),\n\t\t\tSymbol: convert.SValOrEmpty(raw[1]),\n\t\t\tAmount: convert.F64ValOrZero(raw[2]),\n\t\t\tAmountOrig: convert.F64ValOrZero(raw[3]),\n\t\t\tType: convert.SValOrEmpty(raw[4]),\n\t\t\tStatus: convert.SValOrEmpty(raw[5]),\n\t\t\tPrice: convert.F64ValOrZero(raw[6]),\n\t\t\tPriceAvg: convert.F64ValOrZero(raw[7]),\n\t\t\tMTSUpdated: convert.I64ValOrZero(raw[8]),\n\t\t}\n\t\treturn\n\t}\n\n\tif len(raw) < 26 {\n\t\treturn o, fmt.Errorf(\"data slice too short for order: %#v\", raw)\n\t}\n\n\to = &Order{\n\t\tID: convert.I64ValOrZero(raw[0]),\n\t\tGID: convert.I64ValOrZero(raw[1]),\n\t\tCID: convert.I64ValOrZero(raw[2]),\n\t\tSymbol: convert.SValOrEmpty(raw[3]),\n\t\tMTSCreated: convert.I64ValOrZero(raw[4]),\n\t\tMTSUpdated: convert.I64ValOrZero(raw[5]),\n\t\tAmount: convert.F64ValOrZero(raw[6]),\n\t\tAmountOrig: convert.F64ValOrZero(raw[7]),\n\t\tType: convert.SValOrEmpty(raw[8]),\n\t\tTypePrev: convert.SValOrEmpty(raw[9]),\n\t\tMTSTif: convert.I64ValOrZero(raw[10]),\n\t\tFlags: convert.I64ValOrZero(raw[12]),\n\t\tStatus: convert.SValOrEmpty(raw[13]),\n\t\tPrice: convert.F64ValOrZero(raw[16]),\n\t\tPriceAvg: convert.F64ValOrZero(raw[17]),\n\t\tPriceTrailing: convert.F64ValOrZero(raw[18]),\n\t\tPriceAuxLimit: convert.F64ValOrZero(raw[19]),\n\t\tNotify: convert.BValOrFalse(raw[23]),\n\t\tHidden: convert.BValOrFalse(raw[24]),\n\t\tPlacedID: convert.I64ValOrZero(raw[25]),\n\t}\n\n\tif len(raw) >= 31 {\n\t\to.Meta = convert.SiMapOrEmpty(raw[31])\n\t}\n\n\treturn\n}\n\n\/\/ NewFromRaw reds \"on\" type message from data sream and\n\/\/ maps it to order.New data structure\nfunc NewFromRaw(raw []interface{}) (New, error) {\n\to, err := FromRaw(raw)\n\tif err != nil {\n\t\treturn New{}, err\n\t}\n\n\treturn New(*o), nil\n}\n\n\/\/ UpdateFromRaw reds \"ou\" type message from data sream and\n\/\/ maps it to order.Update data structure\nfunc UpdateFromRaw(raw []interface{}) (Update, error) {\n\to, err := FromRaw(raw)\n\tif err != nil {\n\t\treturn Update{}, err\n\t}\n\n\treturn Update(*o), nil\n}\n\n\/\/ CancelFromRaw reds \"oc\" type message from data sream and\n\/\/ maps it to order.Cancel data structure\nfunc CancelFromRaw(raw []interface{}) (Cancel, error) {\n\to, err := FromRaw(raw)\n\tif err != nil {\n\t\treturn Cancel{}, err\n\t}\n\n\treturn Cancel(*o), nil\n}\n\n\/\/ SnapshotFromRaw takes a raw list of values as returned from the websocket\n\/\/ service and tries to convert it into an Snapshot.\nfunc SnapshotFromRaw(raw []interface{}) (s *Snapshot, err error) {\n\tif len(raw) == 0 {\n\t\treturn s, fmt.Errorf(\"data slice too short for order: %#v\", raw)\n\t}\n\n\tos := make([]*Order, 0)\n\tswitch raw[0].(type) {\n\tcase []interface{}:\n\t\tfor _, v := range raw {\n\t\t\tif l, ok := v.([]interface{}); ok {\n\t\t\t\to, err := FromRaw(l)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn s, err\n\t\t\t\t}\n\t\t\t\tos = append(os, o)\n\t\t\t}\n\t\t}\n\tdefault:\n\t\treturn s, fmt.Errorf(\"not an order snapshot\")\n\t}\n\ts = &Snapshot{Snapshot: os}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/miekg\/dns\"\n\n\tkerrors \"k8s.io\/apimachinery\/pkg\/util\/errors\"\n\tutilruntime \"k8s.io\/apimachinery\/pkg\/util\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n)\n\nconst (\n\tdefaultTTL = 30 * time.Minute\n)\n\ntype dnsValue struct {\n\t\/\/ All IPv4 addresses for a given domain name\n\tips []net.IP\n\t\/\/ Time-to-live value from non-authoritative\/cached name server for the domain\n\tttl time.Duration\n\t\/\/ Holds (last dns lookup time + ttl), tells when to refresh IPs next time\n\tnextQueryTime time.Time\n}\n\ntype DNS struct {\n\t\/\/ Protects dnsMap operations\n\tlock sync.Mutex\n\t\/\/ Holds dns name and its corresponding information\n\tdnsMap map[string]dnsValue\n\n\t\/\/ DNS resolvers\n\tnameservers []string\n\t\/\/ DNS port\n\tport string\n}\n\nfunc NewDNS(resolverConfigFile string) (*DNS, error) {\n\tconfig, err := dns.ClientConfigFromFile(resolverConfigFile)\n\tif err != nil || config == nil {\n\t\treturn nil, fmt.Errorf(\"cannot initialize the resolver: %v\", err)\n\t}\n\n\treturn &DNS{\n\t\tdnsMap: map[string]dnsValue{},\n\t\tnameservers: filterIPv4Servers(config.Servers),\n\t\tport: config.Port,\n\t}, nil\n}\n\nfunc (d *DNS) Size() int {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\treturn len(d.dnsMap)\n}\n\nfunc (d *DNS) Get(dns string) dnsValue {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tdata := dnsValue{}\n\tif res, ok := d.dnsMap[dns]; ok {\n\t\tdata.ips = make([]net.IP, len(res.ips))\n\t\tcopy(data.ips, res.ips)\n\t\tdata.ttl = res.ttl\n\t\tdata.nextQueryTime = res.nextQueryTime\n\t}\n\treturn data\n}\n\nfunc (d *DNS) Add(dns string) error {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\td.dnsMap[dns] = dnsValue{}\n\terr, _ := d.updateOne(dns)\n\tif err != nil {\n\t\tdelete(d.dnsMap, dns)\n\t}\n\treturn err\n}\n\nfunc (d *DNS) Update() (error, bool) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\terrList := []error{}\n\tchanged := false\n\tfor dns := range d.dnsMap {\n\t\terr, updated := d.updateOne(dns)\n\t\tif err != nil {\n\t\t\terrList = append(errList, err)\n\t\t\tcontinue\n\t\t}\n\t\tif updated {\n\t\t\tchanged = true\n\t\t}\n\t}\n\treturn kerrors.NewAggregate(errList), changed\n}\n\nfunc (d *DNS) updateOne(dns string) (error, bool) {\n\tres, ok := d.dnsMap[dns]\n\tif !ok {\n\t\t\/\/ Should not happen, all operations on dnsMap are synchronized by d.lock\n\t\treturn fmt.Errorf(\"DNS value not found in dnsMap for domain: %q\", dns), false\n\t}\n\n\tips, minTTL, err := d.getIPsAndMinTTL(dns)\n\tif err != nil {\n\t\tres.nextQueryTime = time.Now().Add(defaultTTL)\n\t\td.dnsMap[dns] = res\n\t\treturn err, false\n\t}\n\n\tchanged := false\n\tif !ipsEqual(res.ips, ips) {\n\t\tchanged = true\n\t}\n\tres.ips = ips\n\tres.ttl = minTTL\n\tres.nextQueryTime = time.Now().Add(res.ttl)\n\td.dnsMap[dns] = res\n\treturn nil, changed\n}\n\nfunc (d *DNS) getIPsAndMinTTL(domain string) ([]net.IP, time.Duration, error) {\n\tips := []net.IP{}\n\tttlSet := false\n\tvar minTTL uint32\n\n\tfor _, server := range d.nameservers {\n\t\tmsg := new(dns.Msg)\n\t\tmsg.SetQuestion(dns.Fqdn(domain), dns.TypeA)\n\n\t\tdialServer := server\n\t\tif _, _, err := net.SplitHostPort(server); err != nil {\n\t\t\tdialServer = net.JoinHostPort(server, d.port)\n\t\t}\n\t\tc := new(dns.Client)\n\t\tc.Timeout = 2 * time.Second\n\t\tin, _, err := c.Exchange(msg, dialServer)\n\t\tif err != nil {\n\t\t\treturn nil, defaultTTL, err\n\t\t}\n\t\tif in != nil && in.Rcode != dns.RcodeSuccess {\n\t\t\treturn nil, defaultTTL, fmt.Errorf(\"failed to get a valid answer: %v\", in)\n\t\t}\n\n\t\tif in != nil && len(in.Answer) > 0 {\n\t\t\tfor _, a := range in.Answer {\n\t\t\t\tif !ttlSet || a.Header().Ttl < minTTL {\n\t\t\t\t\tminTTL = a.Header().Ttl\n\t\t\t\t\tttlSet = true\n\t\t\t\t}\n\n\t\t\t\tswitch t := a.(type) {\n\t\t\t\tcase *dns.A:\n\t\t\t\t\tips = append(ips, t.A)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif !ttlSet || (len(ips) == 0) {\n\t\treturn nil, defaultTTL, fmt.Errorf(\"IPv4 addr not found for domain: %q, nameservers: %v\", domain, d.nameservers)\n\t}\n\n\tttl, err := time.ParseDuration(fmt.Sprintf(\"%ds\", minTTL))\n\tif err != nil {\n\t\tutilruntime.HandleError(fmt.Errorf(\"Invalid TTL value for domain: %q, err: %v, defaulting ttl=%s\", domain, err, defaultTTL.String()))\n\t\tttl = defaultTTL\n\t}\n\n\treturn removeDuplicateIPs(ips), ttl, nil\n}\n\nfunc (d *DNS) GetMinQueryTime() (time.Time, bool) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\ttimeSet := false\n\tvar minTime time.Time\n\tfor _, res := range d.dnsMap {\n\t\tif (timeSet == false) || res.nextQueryTime.Before(minTime) {\n\t\t\ttimeSet = true\n\t\t\tminTime = res.nextQueryTime\n\t\t}\n\t}\n\n\treturn minTime, timeSet\n}\n\nfunc ipsEqual(oldips, newips []net.IP) bool {\n\tif len(oldips) != len(newips) {\n\t\treturn false\n\t}\n\n\tfor _, oldip := range oldips {\n\t\tfound := false\n\t\tfor _, newip := range newips {\n\t\t\tif oldip.Equal(newip) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc filterIPv4Servers(servers []string) []string {\n\tipv4Servers := []string{}\n\tfor _, server := range servers {\n\t\tipString := server\n\t\tif host, _, err := net.SplitHostPort(server); err == nil {\n\t\t\tipString = host\n\t\t}\n\n\t\tif ip := net.ParseIP(ipString); ip != nil {\n\t\t\tif ip.To4() != nil {\n\t\t\t\tipv4Servers = append(ipv4Servers, server)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ipv4Servers\n}\n\nfunc removeDuplicateIPs(ips []net.IP) []net.IP {\n\tipSet := sets.NewString()\n\tfor _, ip := range ips {\n\t\tipSet.Insert(ip.String())\n\t}\n\n\tuniqueIPs := []net.IP{}\n\tfor _, str := range ipSet.List() {\n\t\tip := net.ParseIP(str)\n\t\tif ip != nil {\n\t\t\tuniqueIPs = append(uniqueIPs, ip)\n\t\t}\n\t}\n\n\treturn uniqueIPs\n}\n<commit_msg>Increased DNS request timeout from 2 secs to 5 secs<commit_after>package common\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/miekg\/dns\"\n\n\tkerrors \"k8s.io\/apimachinery\/pkg\/util\/errors\"\n\tutilruntime \"k8s.io\/apimachinery\/pkg\/util\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n)\n\nconst (\n\tdefaultTTL = 30 * time.Minute\n)\n\ntype dnsValue struct {\n\t\/\/ All IPv4 addresses for a given domain name\n\tips []net.IP\n\t\/\/ Time-to-live value from non-authoritative\/cached name server for the domain\n\tttl time.Duration\n\t\/\/ Holds (last dns lookup time + ttl), tells when to refresh IPs next time\n\tnextQueryTime time.Time\n}\n\ntype DNS struct {\n\t\/\/ Protects dnsMap operations\n\tlock sync.Mutex\n\t\/\/ Holds dns name and its corresponding information\n\tdnsMap map[string]dnsValue\n\n\t\/\/ DNS resolvers\n\tnameservers []string\n\t\/\/ DNS port\n\tport string\n}\n\nfunc NewDNS(resolverConfigFile string) (*DNS, error) {\n\tconfig, err := dns.ClientConfigFromFile(resolverConfigFile)\n\tif err != nil || config == nil {\n\t\treturn nil, fmt.Errorf(\"cannot initialize the resolver: %v\", err)\n\t}\n\n\treturn &DNS{\n\t\tdnsMap: map[string]dnsValue{},\n\t\tnameservers: filterIPv4Servers(config.Servers),\n\t\tport: config.Port,\n\t}, nil\n}\n\nfunc (d *DNS) Size() int {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\treturn len(d.dnsMap)\n}\n\nfunc (d *DNS) Get(dns string) dnsValue {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tdata := dnsValue{}\n\tif res, ok := d.dnsMap[dns]; ok {\n\t\tdata.ips = make([]net.IP, len(res.ips))\n\t\tcopy(data.ips, res.ips)\n\t\tdata.ttl = res.ttl\n\t\tdata.nextQueryTime = res.nextQueryTime\n\t}\n\treturn data\n}\n\nfunc (d *DNS) Add(dns string) error {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\td.dnsMap[dns] = dnsValue{}\n\terr, _ := d.updateOne(dns)\n\tif err != nil {\n\t\tdelete(d.dnsMap, dns)\n\t}\n\treturn err\n}\n\nfunc (d *DNS) Update() (error, bool) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\terrList := []error{}\n\tchanged := false\n\tfor dns := range d.dnsMap {\n\t\terr, updated := d.updateOne(dns)\n\t\tif err != nil {\n\t\t\terrList = append(errList, err)\n\t\t\tcontinue\n\t\t}\n\t\tif updated {\n\t\t\tchanged = true\n\t\t}\n\t}\n\treturn kerrors.NewAggregate(errList), changed\n}\n\nfunc (d *DNS) updateOne(dns string) (error, bool) {\n\tres, ok := d.dnsMap[dns]\n\tif !ok {\n\t\t\/\/ Should not happen, all operations on dnsMap are synchronized by d.lock\n\t\treturn fmt.Errorf(\"DNS value not found in dnsMap for domain: %q\", dns), false\n\t}\n\n\tips, minTTL, err := d.getIPsAndMinTTL(dns)\n\tif err != nil {\n\t\tres.nextQueryTime = time.Now().Add(defaultTTL)\n\t\td.dnsMap[dns] = res\n\t\treturn err, false\n\t}\n\n\tchanged := false\n\tif !ipsEqual(res.ips, ips) {\n\t\tchanged = true\n\t}\n\tres.ips = ips\n\tres.ttl = minTTL\n\tres.nextQueryTime = time.Now().Add(res.ttl)\n\td.dnsMap[dns] = res\n\treturn nil, changed\n}\n\nfunc (d *DNS) getIPsAndMinTTL(domain string) ([]net.IP, time.Duration, error) {\n\tips := []net.IP{}\n\tttlSet := false\n\tvar minTTL uint32\n\n\tfor _, server := range d.nameservers {\n\t\tmsg := new(dns.Msg)\n\t\tmsg.SetQuestion(dns.Fqdn(domain), dns.TypeA)\n\n\t\tdialServer := server\n\t\tif _, _, err := net.SplitHostPort(server); err != nil {\n\t\t\tdialServer = net.JoinHostPort(server, d.port)\n\t\t}\n\t\tc := new(dns.Client)\n\t\tc.Timeout = 5 * time.Second\n\t\tin, _, err := c.Exchange(msg, dialServer)\n\t\tif err != nil {\n\t\t\treturn nil, defaultTTL, err\n\t\t}\n\t\tif in != nil && in.Rcode != dns.RcodeSuccess {\n\t\t\treturn nil, defaultTTL, fmt.Errorf(\"failed to get a valid answer: %v\", in)\n\t\t}\n\n\t\tif in != nil && len(in.Answer) > 0 {\n\t\t\tfor _, a := range in.Answer {\n\t\t\t\tif !ttlSet || a.Header().Ttl < minTTL {\n\t\t\t\t\tminTTL = a.Header().Ttl\n\t\t\t\t\tttlSet = true\n\t\t\t\t}\n\n\t\t\t\tswitch t := a.(type) {\n\t\t\t\tcase *dns.A:\n\t\t\t\t\tips = append(ips, t.A)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif !ttlSet || (len(ips) == 0) {\n\t\treturn nil, defaultTTL, fmt.Errorf(\"IPv4 addr not found for domain: %q, nameservers: %v\", domain, d.nameservers)\n\t}\n\n\tttl, err := time.ParseDuration(fmt.Sprintf(\"%ds\", minTTL))\n\tif err != nil {\n\t\tutilruntime.HandleError(fmt.Errorf(\"Invalid TTL value for domain: %q, err: %v, defaulting ttl=%s\", domain, err, defaultTTL.String()))\n\t\tttl = defaultTTL\n\t}\n\n\treturn removeDuplicateIPs(ips), ttl, nil\n}\n\nfunc (d *DNS) GetMinQueryTime() (time.Time, bool) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\ttimeSet := false\n\tvar minTime time.Time\n\tfor _, res := range d.dnsMap {\n\t\tif (timeSet == false) || res.nextQueryTime.Before(minTime) {\n\t\t\ttimeSet = true\n\t\t\tminTime = res.nextQueryTime\n\t\t}\n\t}\n\n\treturn minTime, timeSet\n}\n\nfunc ipsEqual(oldips, newips []net.IP) bool {\n\tif len(oldips) != len(newips) {\n\t\treturn false\n\t}\n\n\tfor _, oldip := range oldips {\n\t\tfound := false\n\t\tfor _, newip := range newips {\n\t\t\tif oldip.Equal(newip) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc filterIPv4Servers(servers []string) []string {\n\tipv4Servers := []string{}\n\tfor _, server := range servers {\n\t\tipString := server\n\t\tif host, _, err := net.SplitHostPort(server); err == nil {\n\t\t\tipString = host\n\t\t}\n\n\t\tif ip := net.ParseIP(ipString); ip != nil {\n\t\t\tif ip.To4() != nil {\n\t\t\t\tipv4Servers = append(ipv4Servers, server)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ipv4Servers\n}\n\nfunc removeDuplicateIPs(ips []net.IP) []net.IP {\n\tipSet := sets.NewString()\n\tfor _, ip := range ips {\n\t\tipSet.Insert(ip.String())\n\t}\n\n\tuniqueIPs := []net.IP{}\n\tfor _, str := range ipSet.List() {\n\t\tip := net.ParseIP(str)\n\t\tif ip != nil {\n\t\t\tuniqueIPs = append(uniqueIPs, ip)\n\t\t}\n\t}\n\n\treturn uniqueIPs\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\npackage node\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\tutilwait \"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\n\tnetworkapi \"github.com\/openshift\/origin\/pkg\/network\/apis\/network\"\n\t\"github.com\/openshift\/origin\/pkg\/network\/common\"\n\tnetworkclient \"github.com\/openshift\/origin\/pkg\/network\/generated\/internalclientset\"\n)\n\ntype nodeVNIDMap struct {\n\tpolicy osdnPolicy\n\tnetworkClient networkclient.Interface\n\n\t\/\/ Synchronizes add or remove ids\/namespaces\n\tlock sync.Mutex\n\tids map[string]uint32\n\tmcEnabled map[string]bool\n\tnamespaces map[uint32]sets.String\n}\n\nfunc newNodeVNIDMap(policy osdnPolicy, networkClient networkclient.Interface) *nodeVNIDMap {\n\treturn &nodeVNIDMap{\n\t\tpolicy: policy,\n\t\tnetworkClient: networkClient,\n\t\tids: make(map[string]uint32),\n\t\tmcEnabled: make(map[string]bool),\n\t\tnamespaces: make(map[uint32]sets.String),\n\t}\n}\n\nfunc (vmap *nodeVNIDMap) addNamespaceToSet(name string, vnid uint32) {\n\tset, found := vmap.namespaces[vnid]\n\tif !found {\n\t\tset = sets.NewString()\n\t\tvmap.namespaces[vnid] = set\n\t}\n\tset.Insert(name)\n}\n\nfunc (vmap *nodeVNIDMap) removeNamespaceFromSet(name string, vnid uint32) {\n\tif set, found := vmap.namespaces[vnid]; found {\n\t\tset.Delete(name)\n\t\tif set.Len() == 0 {\n\t\t\tdelete(vmap.namespaces, vnid)\n\t\t}\n\t}\n}\n\nfunc (vmap *nodeVNIDMap) GetNamespaces(id uint32) []string {\n\tvmap.lock.Lock()\n\tdefer vmap.lock.Unlock()\n\n\tif set, ok := vmap.namespaces[id]; ok {\n\t\treturn set.List()\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (vmap *nodeVNIDMap) GetMulticastEnabled(id uint32) bool {\n\tvmap.lock.Lock()\n\tdefer vmap.lock.Unlock()\n\n\tset, exists := vmap.namespaces[id]\n\tif !exists || set.Len() == 0 {\n\t\treturn false\n\t}\n\tfor _, ns := range set.List() {\n\t\tif !vmap.mcEnabled[ns] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Nodes asynchronously watch for both NetNamespaces and services\n\/\/ NetNamespaces populates vnid map and services\/pod-setup depend on vnid map\n\/\/ If for some reason, vnid map propagation from master to node is slow\n\/\/ and if service\/pod-setup tries to lookup vnid map then it may fail.\n\/\/ So, use this method to alleviate this problem. This method will\n\/\/ retry vnid lookup before giving up.\nfunc (vmap *nodeVNIDMap) WaitAndGetVNID(name string) (uint32, error) {\n\tvar id uint32\n\tbackoff := utilwait.Backoff{\n\t\tDuration: 100 * time.Millisecond,\n\t\tFactor: 1.5,\n\t\tSteps: 5,\n\t}\n\terr := utilwait.ExponentialBackoff(backoff, func() (bool, error) {\n\t\tvar err error\n\t\tid, err = vmap.getVNID(name)\n\t\treturn err == nil, nil\n\t})\n\tif err == nil {\n\t\treturn id, nil\n\t} else {\n\t\tVnidNotFoundErrors.Inc()\n\t\treturn 0, fmt.Errorf(\"failed to find netid for namespace: %s in vnid map\", name)\n\t}\n}\n\nfunc (vmap *nodeVNIDMap) getVNID(name string) (uint32, error) {\n\tvmap.lock.Lock()\n\tdefer vmap.lock.Unlock()\n\n\tif id, ok := vmap.ids[name]; ok {\n\t\treturn id, nil\n\t}\n\treturn 0, fmt.Errorf(\"failed to find netid for namespace: %s in vnid map\", name)\n}\n\nfunc (vmap *nodeVNIDMap) setVNID(name string, id uint32, mcEnabled bool) {\n\tvmap.lock.Lock()\n\tdefer vmap.lock.Unlock()\n\n\tif oldId, found := vmap.ids[name]; found {\n\t\tvmap.removeNamespaceFromSet(name, oldId)\n\t}\n\tvmap.ids[name] = id\n\tvmap.mcEnabled[name] = mcEnabled\n\tvmap.addNamespaceToSet(name, id)\n\n\tglog.Infof(\"Associate netid %d to namespace %q with mcEnabled %v\", id, name, mcEnabled)\n}\n\nfunc (vmap *nodeVNIDMap) unsetVNID(name string) (id uint32, err error) {\n\tvmap.lock.Lock()\n\tdefer vmap.lock.Unlock()\n\n\tid, found := vmap.ids[name]\n\tif !found {\n\t\treturn 0, fmt.Errorf(\"failed to find netid for namespace: %s in vnid map\", name)\n\t}\n\tvmap.removeNamespaceFromSet(name, id)\n\tdelete(vmap.ids, name)\n\tdelete(vmap.mcEnabled, name)\n\tglog.Infof(\"Dissociate netid %d from namespace %q\", id, name)\n\treturn id, nil\n}\n\nfunc netnsIsMulticastEnabled(netns *networkapi.NetNamespace) bool {\n\tenabled, ok := netns.Annotations[networkapi.MulticastEnabledAnnotation]\n\treturn enabled == \"true\" && ok\n}\n\nfunc (vmap *nodeVNIDMap) populateVNIDs() error {\n\tnets, err := vmap.networkClient.Network().NetNamespaces().List(metav1.ListOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, net := range nets.Items {\n\t\tvmap.setVNID(net.Name, net.NetID, netnsIsMulticastEnabled(&net))\n\t}\n\treturn nil\n}\n\nfunc (vmap *nodeVNIDMap) Start() error {\n\t\/\/ Populate vnid map synchronously so that existing services can fetch vnid\n\terr := vmap.populateVNIDs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo utilwait.Forever(vmap.watchNetNamespaces, 0)\n\treturn nil\n}\n\nfunc (vmap *nodeVNIDMap) watchNetNamespaces() {\n\tcommon.RunEventQueue(vmap.networkClient.Network().RESTClient(), common.NetNamespaces, func(delta cache.Delta) error {\n\t\tnetns := delta.Object.(*networkapi.NetNamespace)\n\n\t\tglog.V(5).Infof(\"Watch %s event for NetNamespace %q\", delta.Type, netns.ObjectMeta.Name)\n\t\tswitch delta.Type {\n\t\tcase cache.Sync, cache.Added, cache.Updated:\n\t\t\t\/\/ Skip this event if nothing has changed\n\t\t\toldNetID, err := vmap.getVNID(netns.NetName)\n\t\t\toldMCEnabled := vmap.mcEnabled[netns.NetName]\n\t\t\tmcEnabled := netnsIsMulticastEnabled(netns)\n\t\t\tif err == nil && oldNetID == netns.NetID && oldMCEnabled == mcEnabled {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvmap.setVNID(netns.NetName, netns.NetID, mcEnabled)\n\n\t\t\tif delta.Type == cache.Added {\n\t\t\t\tvmap.policy.AddNetNamespace(netns)\n\t\t\t} else {\n\t\t\t\tvmap.policy.UpdateNetNamespace(netns, oldNetID)\n\t\t\t}\n\t\tcase cache.Deleted:\n\t\t\t\/\/ Unset VNID first so further operations don't see the deleted VNID\n\t\t\tvmap.unsetVNID(netns.NetName)\n\t\t\tvmap.policy.DeleteNetNamespace(netns)\n\t\t}\n\t\treturn nil\n\t})\n}\n<commit_msg>Bug 1509799 - Fix WaitAndGetVNID() in sdn node<commit_after>\/\/ +build linux\n\npackage node\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\tutilwait \"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\n\tnetworkapi \"github.com\/openshift\/origin\/pkg\/network\/apis\/network\"\n\t\"github.com\/openshift\/origin\/pkg\/network\/common\"\n\tnetworkclient \"github.com\/openshift\/origin\/pkg\/network\/generated\/internalclientset\"\n)\n\ntype nodeVNIDMap struct {\n\tpolicy osdnPolicy\n\tnetworkClient networkclient.Interface\n\n\t\/\/ Synchronizes add or remove ids\/namespaces\n\tlock sync.Mutex\n\tids map[string]uint32\n\tmcEnabled map[string]bool\n\tnamespaces map[uint32]sets.String\n}\n\nfunc newNodeVNIDMap(policy osdnPolicy, networkClient networkclient.Interface) *nodeVNIDMap {\n\treturn &nodeVNIDMap{\n\t\tpolicy: policy,\n\t\tnetworkClient: networkClient,\n\t\tids: make(map[string]uint32),\n\t\tmcEnabled: make(map[string]bool),\n\t\tnamespaces: make(map[uint32]sets.String),\n\t}\n}\n\nfunc (vmap *nodeVNIDMap) addNamespaceToSet(name string, vnid uint32) {\n\tset, found := vmap.namespaces[vnid]\n\tif !found {\n\t\tset = sets.NewString()\n\t\tvmap.namespaces[vnid] = set\n\t}\n\tset.Insert(name)\n}\n\nfunc (vmap *nodeVNIDMap) removeNamespaceFromSet(name string, vnid uint32) {\n\tif set, found := vmap.namespaces[vnid]; found {\n\t\tset.Delete(name)\n\t\tif set.Len() == 0 {\n\t\t\tdelete(vmap.namespaces, vnid)\n\t\t}\n\t}\n}\n\nfunc (vmap *nodeVNIDMap) GetNamespaces(id uint32) []string {\n\tvmap.lock.Lock()\n\tdefer vmap.lock.Unlock()\n\n\tif set, ok := vmap.namespaces[id]; ok {\n\t\treturn set.List()\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (vmap *nodeVNIDMap) GetMulticastEnabled(id uint32) bool {\n\tvmap.lock.Lock()\n\tdefer vmap.lock.Unlock()\n\n\tset, exists := vmap.namespaces[id]\n\tif !exists || set.Len() == 0 {\n\t\treturn false\n\t}\n\tfor _, ns := range set.List() {\n\t\tif !vmap.mcEnabled[ns] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Nodes asynchronously watch for both NetNamespaces and services\n\/\/ NetNamespaces populates vnid map and services\/pod-setup depend on vnid map\n\/\/ If for some reason, vnid map propagation from master to node is slow\n\/\/ and if service\/pod-setup tries to lookup vnid map then it may fail.\n\/\/ So, use this method to alleviate this problem. This method will\n\/\/ retry vnid lookup before giving up.\nfunc (vmap *nodeVNIDMap) WaitAndGetVNID(name string) (uint32, error) {\n\tvar id uint32\n\t\/\/ ~5 sec timeout\n\tbackoff := utilwait.Backoff{\n\t\tDuration: 400 * time.Millisecond,\n\t\tFactor: 1.5,\n\t\tSteps: 6,\n\t}\n\terr := utilwait.ExponentialBackoff(backoff, func() (bool, error) {\n\t\tvar err error\n\t\tid, err = vmap.getVNID(name)\n\t\treturn err == nil, nil\n\t})\n\tif err == nil {\n\t\treturn id, nil\n\t} else {\n\t\t\/\/ We may find netid when we check with api server but we will\n\t\t\/\/ still treat this as an error if we don't find it in vnid map.\n\t\t\/\/ So that we can imply insufficient timeout if we see many VnidNotFoundErrors.\n\t\tVnidNotFoundErrors.Inc()\n\n\t\tnetns, err := vmap.networkClient.Network().NetNamespaces().Get(name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"failed to find netid for namespace: %s, %v\", name, err)\n\t\t}\n\t\tglog.Warningf(\"Netid for namespace: %s exists but not found in vnid map\", name)\n\t\tvmap.setVNID(netns.Name, netns.NetID, netnsIsMulticastEnabled(netns))\n\t\treturn netns.NetID, nil\n\t}\n}\n\nfunc (vmap *nodeVNIDMap) getVNID(name string) (uint32, error) {\n\tvmap.lock.Lock()\n\tdefer vmap.lock.Unlock()\n\n\tif id, ok := vmap.ids[name]; ok {\n\t\treturn id, nil\n\t}\n\treturn 0, fmt.Errorf(\"failed to find netid for namespace: %s in vnid map\", name)\n}\n\nfunc (vmap *nodeVNIDMap) setVNID(name string, id uint32, mcEnabled bool) {\n\tvmap.lock.Lock()\n\tdefer vmap.lock.Unlock()\n\n\tif oldId, found := vmap.ids[name]; found {\n\t\tvmap.removeNamespaceFromSet(name, oldId)\n\t}\n\tvmap.ids[name] = id\n\tvmap.mcEnabled[name] = mcEnabled\n\tvmap.addNamespaceToSet(name, id)\n\n\tglog.Infof(\"Associate netid %d to namespace %q with mcEnabled %v\", id, name, mcEnabled)\n}\n\nfunc (vmap *nodeVNIDMap) unsetVNID(name string) (id uint32, err error) {\n\tvmap.lock.Lock()\n\tdefer vmap.lock.Unlock()\n\n\tid, found := vmap.ids[name]\n\tif !found {\n\t\treturn 0, fmt.Errorf(\"failed to find netid for namespace: %s in vnid map\", name)\n\t}\n\tvmap.removeNamespaceFromSet(name, id)\n\tdelete(vmap.ids, name)\n\tdelete(vmap.mcEnabled, name)\n\tglog.Infof(\"Dissociate netid %d from namespace %q\", id, name)\n\treturn id, nil\n}\n\nfunc netnsIsMulticastEnabled(netns *networkapi.NetNamespace) bool {\n\tenabled, ok := netns.Annotations[networkapi.MulticastEnabledAnnotation]\n\treturn enabled == \"true\" && ok\n}\n\nfunc (vmap *nodeVNIDMap) populateVNIDs() error {\n\tnets, err := vmap.networkClient.Network().NetNamespaces().List(metav1.ListOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, net := range nets.Items {\n\t\tvmap.setVNID(net.Name, net.NetID, netnsIsMulticastEnabled(&net))\n\t}\n\treturn nil\n}\n\nfunc (vmap *nodeVNIDMap) Start() error {\n\t\/\/ Populate vnid map synchronously so that existing services can fetch vnid\n\terr := vmap.populateVNIDs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo utilwait.Forever(vmap.watchNetNamespaces, 0)\n\treturn nil\n}\n\nfunc (vmap *nodeVNIDMap) watchNetNamespaces() {\n\tcommon.RunEventQueue(vmap.networkClient.Network().RESTClient(), common.NetNamespaces, func(delta cache.Delta) error {\n\t\tnetns := delta.Object.(*networkapi.NetNamespace)\n\n\t\tglog.V(5).Infof(\"Watch %s event for NetNamespace %q\", delta.Type, netns.ObjectMeta.Name)\n\t\tswitch delta.Type {\n\t\tcase cache.Sync, cache.Added, cache.Updated:\n\t\t\t\/\/ Skip this event if nothing has changed\n\t\t\toldNetID, err := vmap.getVNID(netns.NetName)\n\t\t\toldMCEnabled := vmap.mcEnabled[netns.NetName]\n\t\t\tmcEnabled := netnsIsMulticastEnabled(netns)\n\t\t\tif err == nil && oldNetID == netns.NetID && oldMCEnabled == mcEnabled {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvmap.setVNID(netns.NetName, netns.NetID, mcEnabled)\n\n\t\t\tif delta.Type == cache.Added {\n\t\t\t\tvmap.policy.AddNetNamespace(netns)\n\t\t\t} else {\n\t\t\t\tvmap.policy.UpdateNetNamespace(netns, oldNetID)\n\t\t\t}\n\t\tcase cache.Deleted:\n\t\t\t\/\/ Unset VNID first so further operations don't see the deleted VNID\n\t\t\tvmap.unsetVNID(netns.NetName)\n\t\t\tvmap.policy.DeleteNetNamespace(netns)\n\t\t}\n\t\treturn nil\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package paxos\n\nimport (\n \"borg\/assert\"\n \"testing\"\n)\n\n\/\/ TODO: test wire format\n\/\/ \"x\" \/\/ too few separators\n\/\/ \"x:x\" \/\/ too few separators\n\/\/ \"x:x:x\" \/\/ too few separators\n\/\/ \"x:x:x:x:x\" \/\/ too many separators\n\/\/ \"1:x:INVITE:1\" \/\/ invalid to address\n\/\/ \"X:*:INVITE:1\" \/\/ invalid from address\n\n\/\/func TestIgnoresMalformedMessages(t *testing.T) {\n\/\/\tunknownCommand := newInviteFrom(1, 1)\n\/\/\tunknownCommand.(*Msg).cmd = \"x\"\n\/\/\tinvalidBody := newNominateFrom(1, 1, \"foo\")\n\/\/\tinvalidBody.(*Msg).body = \"x\"\n\/\/\n\/\/\ttotest := []Message{\n\/\/\t\tnewInviteFrom(1, 0), \/\/ invalid round number\n\/\/\t\tunknownCommand, \/\/ unknown command\n\/\/\n\/\/\t\tinvalidBody, \/\/ too few separators in nominate body\n\/\/\t\tnewNominateFrom(1, 0, \"foo\"), \/\/ invalid round number\n\/\/\t}\n\/\/\n\/\/\tfor _, test := range totest {\n\/\/\t\tins := make(chan Message)\n\/\/\t\touts := SyncPutter(make(chan Message))\n\/\/\n\/\/\t\tgo acceptor(ins, PutWrapper{1, 2, outs})\n\/\/\t\tins <- test\n\/\/\n\/\/\t\t\/\/ We want to check that it didn't try to send a response.\n\/\/\t\t\/\/ If it didn't, it will continue to read the next input message and\n\/\/\t\t\/\/ this will work fine. If it did, this will deadlock.\n\/\/\t\tins <- test\n\/\/\n\/\/\t\tclose(ins)\n\/\/\t}\n\/\/}\n\n\/\/func TestIgnoresMalformedMessageBadCommand(t *testing.T) {\n\/\/ msgs := make(chan Message)\n\/\/ taught := make(chan string)\n\/\/\n\/\/ go func() {\n\/\/ taught <- learner(1, msgs)\n\/\/ }()\n\/\/\n\/\/ m := newVoteFrom(1, 1, \"foo\")\n\/\/ m.(*Msg).cmd = \"foo\"\n\/\/ msgs <- m\n\/\/ msgs <- newVoteFrom(1, 1, \"foo\")\n\/\/\n\/\/ assert.Equal(t, \"foo\", <-taught, \"\")\n\/\/}\n\/\/\n\/\/func TestIgnoresMessageWithIncorrectArityInBody(t *testing.T) {\n\/\/ msgs := make(chan Message)\n\/\/ taught := make(chan string)\n\/\/\n\/\/ go func() {\n\/\/ taught <- learner(1, msgs)\n\/\/ }()\n\/\/\n\/\/ m := newVoteFrom(1, 1, \"foo\")\n\/\/ m.(*Msg).body = \"\"\n\/\/ msgs <- m\n\/\/ msgs <- newVoteFrom(1, 1, \"foo\")\n\/\/\n\/\/ assert.Equal(t, \"foo\", <-taught, \"\")\n\/\/}\n\n\/\/ For testing convenience\nfunc newVoteFrom(from byte, i uint64, vval string) Message {\n m := NewVote(i, vval)\n m.SetSeqn(1)\n m.SetFrom(from)\n return m\n}\n\n\/\/ For testing convenience\nfunc newNominateFrom(from byte, crnd uint64, v string) Message {\n m := NewNominate(crnd, v)\n m.SetSeqn(1)\n m.SetFrom(from)\n return m\n}\n\n\/\/ For testing convenience\nfunc newRsvpFrom(from byte, i, vrnd uint64, vval string) Message {\n m := NewRsvp(i, vrnd, vval)\n m.SetSeqn(1)\n m.SetFrom(from)\n return m\n}\n\n\/\/ For testing convenience\nfunc newInviteFrom(from byte, rnd uint64) Message {\n m := NewInvite(rnd)\n m.SetSeqn(1)\n m.SetFrom(from)\n return m\n}\n\nfunc TestMessageNewInvite(t *testing.T) {\n m := NewInvite(1)\n assert.Equal(t, Invite, m.Cmd(), \"\")\n assert.Equal(t, \"1\", m.Body(), \"\")\n}\n\nfunc TestMessageNewInviteAlt(t *testing.T) {\n m := NewInvite(2)\n assert.Equal(t, Invite, m.Cmd(), \"\")\n assert.Equal(t, \"2\", m.Body(), \"\")\n}\n\nfunc TestMessageNewNominate(t *testing.T) {\n m := NewNominate(1, \"foo\")\n assert.Equal(t, Nominate, m.Cmd(), \"\")\n assert.Equal(t, \"1:foo\", m.Body(), \"\")\n}\n\nfunc TestMessageNewNominateAlt(t *testing.T) {\n m := NewNominate(2, \"bar\")\n assert.Equal(t, Nominate, m.Cmd(), \"\")\n assert.Equal(t, \"2:bar\", m.Body(), \"\")\n}\n\nfunc TestMessageNewRsvp(t *testing.T) {\n m := NewRsvp(1, 0, \"\")\n assert.Equal(t, Rsvp, m.Cmd(), \"\")\n assert.Equal(t, \"1:0:\", m.Body(), \"\")\n}\n\nfunc TestMessageNewRsvpAlt(t *testing.T) {\n m := NewRsvp(2, 1, \"foo\")\n assert.Equal(t, Rsvp, m.Cmd(), \"\")\n assert.Equal(t, \"2:1:foo\", m.Body(), \"\")\n}\n\nfunc TestMessageNewVote(t *testing.T) {\n m := NewVote(1, \"foo\")\n assert.Equal(t, Vote, m.Cmd(), \"\")\n assert.Equal(t, \"1:foo\", m.Body(), \"\")\n}\n\nfunc TestMessageNewVoteAlt(t *testing.T) {\n m := NewVote(2, \"bar\")\n assert.Equal(t, Vote, m.Cmd(), \"\")\n assert.Equal(t, \"2:bar\", m.Body(), \"\")\n}\n\nfunc TestMessageSetFrom(t *testing.T) {\n m := NewInvite(1)\n m.SetFrom(1)\n assert.Equal(t, 1, m.From(), \"\")\n m.SetFrom(2)\n assert.Equal(t, 2, m.From(), \"\")\n}\n\nfunc TestMessageSetSeqn(t *testing.T) {\n m := NewInvite(1)\n m.SetSeqn(1)\n assert.Equal(t, uint64(1), m.Seqn(), \"\")\n m.SetSeqn(2)\n assert.Equal(t, uint64(2), m.Seqn(), \"\")\n}\n\nfunc TestMessageInviteParts(t *testing.T) {\n m := NewInvite(1)\n crnd := InviteParts(m)\n assert.Equal(t, uint64(1), crnd, \"\")\n}\n\nfunc TestMessageNominateParts(t *testing.T) {\n m := NewNominate(1, \"foo\")\n crnd, v := NominateParts(m)\n assert.Equal(t, uint64(1), crnd, \"\")\n assert.Equal(t, \"foo\", v, \"\")\n}\n\nfunc TestMessageRsvpParts(t *testing.T) {\n m := NewRsvp(1, 0, \"\")\n i, vrnd, vval := RsvpParts(m)\n assert.Equal(t, uint64(1), i, \"\")\n assert.Equal(t, uint64(0), vrnd, \"\")\n assert.Equal(t, \"\", vval, \"\")\n}\n\nfunc TestMessageVoteParts(t *testing.T) {\n m := NewVote(1, \"foo\")\n i, vval := VoteParts(m)\n assert.Equal(t, uint64(1), i, \"\")\n assert.Equal(t, \"foo\", vval, \"\")\n}\n\nfunc TestMessageInvitePartsAlt(t *testing.T) {\n m := NewInvite(2)\n crnd := InviteParts(m)\n assert.Equal(t, uint64(2), crnd, \"\")\n}\n\nfunc TestMessageNominatePartsAlt(t *testing.T) {\n m := NewNominate(2, \"bar\")\n crnd, v := NominateParts(m)\n assert.Equal(t, uint64(2), crnd, \"\")\n assert.Equal(t, \"bar\", v, \"\")\n}\n\nfunc TestMessageRsvpPartsAlt(t *testing.T) {\n m := NewRsvp(2, 1, \"foo\")\n i, vrnd, vval := RsvpParts(m)\n assert.Equal(t, uint64(2), i, \"\")\n assert.Equal(t, uint64(1), vrnd, \"\")\n assert.Equal(t, \"foo\", vval, \"\")\n}\n\nfunc TestMessageVotePartsAlt(t *testing.T) {\n m := NewVote(2, \"bar\")\n i, vval := VoteParts(m)\n assert.Equal(t, uint64(2), i, \"\")\n assert.Equal(t, \"bar\", vval, \"\")\n}\n<commit_msg>consolidate tests<commit_after>package paxos\n\nimport (\n \"borg\/assert\"\n \"testing\"\n)\n\n\/\/ TODO: test wire format\n\/\/ \"x\" \/\/ too few separators\n\/\/ \"x:x\" \/\/ too few separators\n\/\/ \"x:x:x\" \/\/ too few separators\n\/\/ \"x:x:x:x:x\" \/\/ too many separators\n\/\/ \"1:x:INVITE:1\" \/\/ invalid to address\n\/\/ \"X:*:INVITE:1\" \/\/ invalid from address\n\n\/\/func TestIgnoresMalformedMessages(t *testing.T) {\n\/\/\tunknownCommand := newInviteFrom(1, 1)\n\/\/\tunknownCommand.(*Msg).cmd = \"x\"\n\/\/\tinvalidBody := newNominateFrom(1, 1, \"foo\")\n\/\/\tinvalidBody.(*Msg).body = \"x\"\n\/\/\n\/\/\ttotest := []Message{\n\/\/\t\tnewInviteFrom(1, 0), \/\/ invalid round number\n\/\/\t\tunknownCommand, \/\/ unknown command\n\/\/\n\/\/\t\tinvalidBody, \/\/ too few separators in nominate body\n\/\/\t\tnewNominateFrom(1, 0, \"foo\"), \/\/ invalid round number\n\/\/\t}\n\/\/\n\/\/\tfor _, test := range totest {\n\/\/\t\tins := make(chan Message)\n\/\/\t\touts := SyncPutter(make(chan Message))\n\/\/\n\/\/\t\tgo acceptor(ins, PutWrapper{1, 2, outs})\n\/\/\t\tins <- test\n\/\/\n\/\/\t\t\/\/ We want to check that it didn't try to send a response.\n\/\/\t\t\/\/ If it didn't, it will continue to read the next input message and\n\/\/\t\t\/\/ this will work fine. If it did, this will deadlock.\n\/\/\t\tins <- test\n\/\/\n\/\/\t\tclose(ins)\n\/\/\t}\n\/\/}\n\n\/\/func TestIgnoresMalformedMessageBadCommand(t *testing.T) {\n\/\/ msgs := make(chan Message)\n\/\/ taught := make(chan string)\n\/\/\n\/\/ go func() {\n\/\/ taught <- learner(1, msgs)\n\/\/ }()\n\/\/\n\/\/ m := newVoteFrom(1, 1, \"foo\")\n\/\/ m.(*Msg).cmd = \"foo\"\n\/\/ msgs <- m\n\/\/ msgs <- newVoteFrom(1, 1, \"foo\")\n\/\/\n\/\/ assert.Equal(t, \"foo\", <-taught, \"\")\n\/\/}\n\/\/\n\/\/func TestIgnoresMessageWithIncorrectArityInBody(t *testing.T) {\n\/\/ msgs := make(chan Message)\n\/\/ taught := make(chan string)\n\/\/\n\/\/ go func() {\n\/\/ taught <- learner(1, msgs)\n\/\/ }()\n\/\/\n\/\/ m := newVoteFrom(1, 1, \"foo\")\n\/\/ m.(*Msg).body = \"\"\n\/\/ msgs <- m\n\/\/ msgs <- newVoteFrom(1, 1, \"foo\")\n\/\/\n\/\/ assert.Equal(t, \"foo\", <-taught, \"\")\n\/\/}\n\n\/\/ For testing convenience\nfunc newVoteFrom(from byte, i uint64, vval string) Message {\n m := NewVote(i, vval)\n m.SetSeqn(1)\n m.SetFrom(from)\n return m\n}\n\n\/\/ For testing convenience\nfunc newNominateFrom(from byte, crnd uint64, v string) Message {\n m := NewNominate(crnd, v)\n m.SetSeqn(1)\n m.SetFrom(from)\n return m\n}\n\n\/\/ For testing convenience\nfunc newRsvpFrom(from byte, i, vrnd uint64, vval string) Message {\n m := NewRsvp(i, vrnd, vval)\n m.SetSeqn(1)\n m.SetFrom(from)\n return m\n}\n\n\/\/ For testing convenience\nfunc newInviteFrom(from byte, rnd uint64) Message {\n m := NewInvite(rnd)\n m.SetSeqn(1)\n m.SetFrom(from)\n return m\n}\n\nfunc TestMessageNewInvite(t *testing.T) {\n m := NewInvite(1)\n assert.Equal(t, Invite, m.Cmd(), \"\")\n crnd := InviteParts(m)\n assert.Equal(t, uint64(1), crnd, \"\")\n}\n\nfunc TestMessageNewInviteAlt(t *testing.T) {\n m := NewInvite(2)\n assert.Equal(t, Invite, m.Cmd(), \"\")\n crnd := InviteParts(m)\n assert.Equal(t, uint64(2), crnd, \"\")\n}\n\nfunc TestMessageNewNominate(t *testing.T) {\n m := NewNominate(1, \"foo\")\n assert.Equal(t, Nominate, m.Cmd(), \"\")\n crnd, v := NominateParts(m)\n assert.Equal(t, uint64(1), crnd, \"\")\n assert.Equal(t, \"foo\", v, \"\")\n}\n\nfunc TestMessageNewNominateAlt(t *testing.T) {\n m := NewNominate(2, \"bar\")\n assert.Equal(t, Nominate, m.Cmd(), \"\")\n crnd, v := NominateParts(m)\n assert.Equal(t, uint64(2), crnd, \"\")\n assert.Equal(t, \"bar\", v, \"\")\n}\n\nfunc TestMessageNewRsvp(t *testing.T) {\n m := NewRsvp(1, 0, \"\")\n assert.Equal(t, Rsvp, m.Cmd(), \"\")\n i, vrnd, vval := RsvpParts(m)\n assert.Equal(t, uint64(1), i, \"\")\n assert.Equal(t, uint64(0), vrnd, \"\")\n assert.Equal(t, \"\", vval, \"\")\n}\n\nfunc TestMessageNewRsvpAlt(t *testing.T) {\n m := NewRsvp(2, 1, \"foo\")\n assert.Equal(t, Rsvp, m.Cmd(), \"\")\n i, vrnd, vval := RsvpParts(m)\n assert.Equal(t, uint64(2), i, \"\")\n assert.Equal(t, uint64(1), vrnd, \"\")\n assert.Equal(t, \"foo\", vval, \"\")\n}\n\nfunc TestMessageNewVote(t *testing.T) {\n m := NewVote(1, \"foo\")\n assert.Equal(t, Vote, m.Cmd(), \"\")\n i, vval := VoteParts(m)\n assert.Equal(t, uint64(1), i, \"\")\n assert.Equal(t, \"foo\", vval, \"\")\n}\n\nfunc TestMessageNewVoteAlt(t *testing.T) {\n m := NewVote(2, \"bar\")\n assert.Equal(t, Vote, m.Cmd(), \"\")\n i, vval := VoteParts(m)\n assert.Equal(t, uint64(2), i, \"\")\n assert.Equal(t, \"bar\", vval, \"\")\n}\n\nfunc TestMessageSetFrom(t *testing.T) {\n m := NewInvite(1)\n m.SetFrom(1)\n assert.Equal(t, 1, m.From(), \"\")\n m.SetFrom(2)\n assert.Equal(t, 2, m.From(), \"\")\n}\n\nfunc TestMessageSetSeqn(t *testing.T) {\n m := NewInvite(1)\n m.SetSeqn(1)\n assert.Equal(t, uint64(1), m.Seqn(), \"\")\n m.SetSeqn(2)\n assert.Equal(t, uint64(2), m.Seqn(), \"\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2021 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage runner\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n)\n\n\/\/ Runner runs an e2e test\ntype Runner struct {\n\tpkgName string\n\ttestCase TestCase\n\tcmd string\n\tt *testing.T\n\tinitialCommit string\n\tkptBin string\n}\n\nfunc getKptBin() (string, error) {\n\tif p := os.Getenv(kptBinEnv); p != \"\" {\n\t\treturn p, nil\n\t}\n\treturn \"\", fmt.Errorf(\"must specify env '%s' for kpt binary path\", kptBinEnv)\n}\n\nconst (\n\t\/\/ If this env is set to \"true\", this e2e test framework will update the\n\t\/\/ expected diff and results if they already exist. If will not change\n\t\/\/ config.yaml.\n\tupdateExpectedEnv string = \"KPT_E2E_UPDATE_EXPECTED\"\n\tkptBinEnv string = \"KPT_E2E_BIN\"\n\n\texpectedDir string = \".expected\"\n\texpectedResultsFile string = \"results.yaml\"\n\texpectedDiffFile string = \"diff.patch\"\n\texpectedConfigFile string = \"config.yaml\"\n\tCommandFnEval string = \"eval\"\n\tCommandFnRender string = \"render\"\n)\n\n\/\/ NewRunner returns a new runner for pkg\nfunc NewRunner(t *testing.T, testCase TestCase, c string) (*Runner, error) {\n\tinfo, err := os.Stat(testCase.Path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot open path %s: %w\", testCase.Path, err)\n\t}\n\tif !info.IsDir() {\n\t\treturn nil, fmt.Errorf(\"path %s is not a directory\", testCase.Path)\n\t}\n\tkptBin, err := getKptBin()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to find kpt binary: %w\", err)\n\t}\n\tt.Logf(\"Using kpt binary: %s\", kptBin)\n\treturn &Runner{\n\t\tpkgName: filepath.Base(testCase.Path),\n\t\ttestCase: testCase,\n\t\tcmd: c,\n\t\tt: t,\n\t\tkptBin: kptBin,\n\t}, nil\n}\n\n\/\/ Run runs the test.\nfunc (r *Runner) Run() error {\n\tswitch r.cmd {\n\tcase CommandFnEval:\n\t\treturn r.runFnEval()\n\tcase CommandFnRender:\n\t\treturn r.runFnRender()\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid command %s\", r.cmd)\n\t}\n}\n\nfunc (r *Runner) runFnEval() error {\n\tr.t.Logf(\"Running test against package %s\\n\", r.pkgName)\n\ttmpDir, err := ioutil.TempDir(\"\", \"kpt-fn-e2e-*\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create temporary dir: %w\", err)\n\t}\n\tdefer os.RemoveAll(tmpDir)\n\tpkgPath := filepath.Join(tmpDir, r.pkgName)\n\t\/\/ create result dir\n\tresultsPath := filepath.Join(tmpDir, \"results\")\n\terr = os.Mkdir(resultsPath, 0755)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create results dir %s: %w\", resultsPath, err)\n\t}\n\n\t\/\/ copy package to temp directory\n\terr = copyDir(r.testCase.Path, pkgPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to copy package: %w\", err)\n\t}\n\n\t\/\/ init and commit package files\n\terr = r.preparePackage(pkgPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to prepare package: %w\", err)\n\t}\n\n\t\/\/ run function\n\tkptArgs := []string{\"fn\", \"eval\", pkgPath, \"--results-dir\", resultsPath}\n\tif r.testCase.Config.EvalConfig.Network {\n\t\tkptArgs = append(kptArgs, \"--network\")\n\t}\n\tif r.testCase.Config.EvalConfig.Image != \"\" {\n\t\tkptArgs = append(kptArgs, \"--image\", r.testCase.Config.EvalConfig.Image)\n\t} else if !r.testCase.Config.EvalConfig.execUniquePath.Empty() {\n\t\tkptArgs = append(kptArgs, \"--exec-path\", string(r.testCase.Config.EvalConfig.execUniquePath))\n\t}\n\tif !r.testCase.Config.EvalConfig.fnConfigUniquePath.Empty() {\n\t\tkptArgs = append(kptArgs, \"--fn-config\", string(r.testCase.Config.EvalConfig.fnConfigUniquePath))\n\t}\n\tif r.testCase.Config.EvalConfig.IncludeMetaResources {\n\t\tkptArgs = append(kptArgs, \"--include-meta-resources\")\n\t}\n\t\/\/ args must be appended last\n\tif len(r.testCase.Config.EvalConfig.Args) > 0 {\n\t\tkptArgs = append(kptArgs, \"--\")\n\t\tfor k, v := range r.testCase.Config.EvalConfig.Args {\n\t\t\tkptArgs = append(kptArgs, fmt.Sprintf(\"%s=%s\", k, v))\n\t\t}\n\t}\n\tfor i := 0; i < r.testCase.Config.RunCount(); i++ {\n\t\toutput, fnErr := runCommand(\"\", r.kptBin, kptArgs)\n\t\tif fnErr != nil {\n\t\t\tr.t.Logf(\"kpt error output: %s\", output)\n\t\t}\n\t\t\/\/ Update the diff file or results file if updateExpectedEnv is set.\n\t\tif strings.ToLower(os.Getenv(updateExpectedEnv)) == \"true\" {\n\t\t\treturn r.updateExpected(pkgPath, resultsPath, filepath.Join(r.testCase.Path, expectedDir))\n\t\t}\n\n\t\t\/\/ compare results\n\t\terr = r.compareResult(fnErr, pkgPath, resultsPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ we passed result check, now we should break if the command error\n\t\t\/\/ is expected\n\t\tif fnErr != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (r *Runner) runFnRender() error {\n\tr.t.Logf(\"Running test against package %s\\n\", r.pkgName)\n\ttmpDir, err := ioutil.TempDir(\"\", \"kpt-pipeline-e2e-*\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create temporary dir: %w\", err)\n\t}\n\tif r.testCase.Config.Debug {\n\t\tfmt.Printf(\"Running test against package %s in dir %s \\n\", r.pkgName, tmpDir)\n\t}\n\tif !r.testCase.Config.Debug {\n\t\t\/\/ if debug is true, keep the test directory around for debugging\n\t\tdefer os.RemoveAll(tmpDir)\n\t}\n\tpkgPath := filepath.Join(tmpDir, r.pkgName)\n\t\/\/ create dir to store untouched pkg to compare against\n\torigPkgPath := filepath.Join(tmpDir, \"original\")\n\terr = os.Mkdir(origPkgPath, 0755)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create original dir %s: %w\", origPkgPath, err)\n\t}\n\n\t\/\/ copy package to temp directory\n\terr = copyDir(r.testCase.Path, pkgPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to copy package: %w\", err)\n\t}\n\terr = copyDir(r.testCase.Path, origPkgPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to copy package: %w\", err)\n\t}\n\n\t\/\/ init and commit package files\n\terr = r.preparePackage(pkgPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to prepare package: %w\", err)\n\t}\n\n\t\/\/ run function\n\tkptArgs := []string{\"fn\", \"render\", pkgPath}\n\tfor i := 0; i < r.testCase.Config.RunCount(); i++ {\n\t\toutput, fnErr := runCommand(\"\", r.kptBin, kptArgs)\n\t\t\/\/ Update the diff file or results file if updateExpectedEnv is set.\n\t\tif strings.ToLower(os.Getenv(updateExpectedEnv)) == \"true\" {\n\t\t\t\/\/ TODO: `fn render` doesn't support result file now\n\t\t\t\/\/ use empty string to skip update results\n\t\t\treturn r.updateExpected(pkgPath, \"\", filepath.Join(r.testCase.Path, expectedDir))\n\t\t}\n\t\tif fnErr != nil {\n\t\t\tr.t.Logf(\"kpt error output: %s\", output)\n\t\t}\n\t\t\/\/ compare results\n\t\t\/\/ TODO: `fn render` doesn't support result file now\n\t\t\/\/ use empty string for results dir\n\t\terr = r.compareResult(fnErr, pkgPath, \"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ we passed result check, now we should break if the command error\n\t\t\/\/ is expected\n\t\tif fnErr != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (r *Runner) preparePackage(pkgPath string) error {\n\terr := gitInit(pkgPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = gitAddAll(pkgPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = gitCommit(pkgPath, \"first\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.initialCommit, err = getCommitHash(pkgPath)\n\treturn err\n}\n\nfunc (r *Runner) compareResult(exitErr error, tmpPkgPath, resultsPath string) error {\n\texpected, err := newExpected(tmpPkgPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ get exit code\n\texitCode := 0\n\tif e, ok := exitErr.(*exec.ExitError); ok {\n\t\texitCode = e.ExitCode()\n\t} else if exitErr != nil {\n\t\treturn fmt.Errorf(\"cannot get exit code, received error '%w'\", exitErr)\n\t}\n\n\tif exitCode != r.testCase.Config.ExitCode {\n\t\treturn fmt.Errorf(\"actual exit code %d doesn't match expected %d\", exitCode, r.testCase.Config.ExitCode)\n\t}\n\n\t\/\/ compare results\n\tactual, err := readActualResults(resultsPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read actual results: %w\", err)\n\t}\n\tdiffOfResult, err := diffStrings(actual, expected.Results)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error when run diff of results: %w: %s\", err, diffOfResult)\n\t}\n\tif actual != expected.Results {\n\t\treturn fmt.Errorf(\"actual results doesn't match expected\\nActual\\n===\\n%s\\nDiff of Results\\n===\\n%s\",\n\t\t\tactual, diffOfResult)\n\t}\n\n\t\/\/ compare diff\n\tactual, err = readActualDiff(tmpPkgPath, r.initialCommit)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read actual diff: %w\", err)\n\t}\n\tif actual != expected.Diff {\n\t\tdiffOfDiff, err := diffStrings(actual, expected.Diff)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error when run diff of diff: %w: %s\", err, diffOfDiff)\n\t\t}\n\t\treturn fmt.Errorf(\"actual diff doesn't match expected\\nActual\\n===\\n%s\\nDiff of Diff\\n===\\n%s\",\n\t\t\tactual, diffOfDiff)\n\t}\n\treturn nil\n}\n\nfunc (r *Runner) Skip() bool {\n\treturn r.testCase.Config.Skip\n}\n\nfunc readActualResults(resultsPath string) (string, error) {\n\t\/\/ no results\n\tif resultsPath == \"\" {\n\t\treturn \"\", nil\n\t}\n\tl, err := ioutil.ReadDir(resultsPath)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to get files in results dir: %w\", err)\n\t}\n\tif len(l) > 1 {\n\t\treturn \"\", fmt.Errorf(\"unexpected results files number %d, should be 0 or 1\", len(l))\n\t}\n\tif len(l) == 0 {\n\t\t\/\/ no result file\n\t\treturn \"\", nil\n\t}\n\tresultsFile := l[0].Name()\n\tactualResults, err := ioutil.ReadFile(filepath.Join(resultsPath, resultsFile))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to read actual results: %w\", err)\n\t}\n\treturn strings.TrimSpace(string(actualResults)), nil\n}\n\nfunc readActualDiff(path, origHash string) (string, error) {\n\terr := gitAddAll(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\terr = gitCommit(path, \"second\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t\/\/ diff with first commit\n\tactualDiff, err := gitDiff(path, origHash, \"HEAD\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.TrimSpace(actualDiff), nil\n}\n\n\/\/ expected contains the expected result for the function running\ntype expected struct {\n\tResults string\n\tDiff string\n}\n\nfunc newExpected(path string) (expected, error) {\n\te := expected{}\n\t\/\/ get expected results\n\texpectedResults, err := ioutil.ReadFile(filepath.Join(path, expectedDir, expectedResultsFile))\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\te.Results = \"\"\n\tcase err != nil:\n\t\treturn e, fmt.Errorf(\"failed to read expected results: %w\", err)\n\tdefault:\n\t\te.Results = strings.TrimSpace(string(expectedResults))\n\t}\n\n\t\/\/ get expected diff\n\texpectedDiff, err := ioutil.ReadFile(filepath.Join(path, expectedDir, expectedDiffFile))\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\te.Diff = \"\"\n\tcase err != nil:\n\t\treturn e, fmt.Errorf(\"failed to read expected diff: %w\", err)\n\tdefault:\n\t\te.Diff = strings.TrimSpace(string(expectedDiff))\n\t}\n\n\treturn e, nil\n}\n\nfunc (r *Runner) updateExpected(tmpPkgPath, resultsPath, sourceOfTruthPath string) error {\n\t\/\/ We update results directory only when a result file already exists.\n\tl, err := ioutil.ReadDir(resultsPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(l) == 0 {\n\t\tactualDiff, err := readActualDiff(tmpPkgPath, r.initialCommit)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif actualDiff != \"\" {\n\t\t\tif err := ioutil.WriteFile(filepath.Join(sourceOfTruthPath, expectedDiffFile), []byte(actualDiff+\"\\n\"), 0666); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tactualResults, err := readActualResults(resultsPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif actualResults != \"\" {\n\t\t\tif err := ioutil.WriteFile(filepath.Join(sourceOfTruthPath, expectedResultsFile), []byte(actualResults+\"\\n\"), 0666); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>fix updating e2e results (#1757)<commit_after>\/\/ Copyright 2021 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage runner\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n)\n\n\/\/ Runner runs an e2e test\ntype Runner struct {\n\tpkgName string\n\ttestCase TestCase\n\tcmd string\n\tt *testing.T\n\tinitialCommit string\n\tkptBin string\n}\n\nfunc getKptBin() (string, error) {\n\tif p := os.Getenv(kptBinEnv); p != \"\" {\n\t\treturn p, nil\n\t}\n\treturn \"\", fmt.Errorf(\"must specify env '%s' for kpt binary path\", kptBinEnv)\n}\n\nconst (\n\t\/\/ If this env is set to \"true\", this e2e test framework will update the\n\t\/\/ expected diff and results if they already exist. If will not change\n\t\/\/ config.yaml.\n\tupdateExpectedEnv string = \"KPT_E2E_UPDATE_EXPECTED\"\n\tkptBinEnv string = \"KPT_E2E_BIN\"\n\n\texpectedDir string = \".expected\"\n\texpectedResultsFile string = \"results.yaml\"\n\texpectedDiffFile string = \"diff.patch\"\n\texpectedConfigFile string = \"config.yaml\"\n\tCommandFnEval string = \"eval\"\n\tCommandFnRender string = \"render\"\n)\n\n\/\/ NewRunner returns a new runner for pkg\nfunc NewRunner(t *testing.T, testCase TestCase, c string) (*Runner, error) {\n\tinfo, err := os.Stat(testCase.Path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot open path %s: %w\", testCase.Path, err)\n\t}\n\tif !info.IsDir() {\n\t\treturn nil, fmt.Errorf(\"path %s is not a directory\", testCase.Path)\n\t}\n\tkptBin, err := getKptBin()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to find kpt binary: %w\", err)\n\t}\n\tt.Logf(\"Using kpt binary: %s\", kptBin)\n\treturn &Runner{\n\t\tpkgName: filepath.Base(testCase.Path),\n\t\ttestCase: testCase,\n\t\tcmd: c,\n\t\tt: t,\n\t\tkptBin: kptBin,\n\t}, nil\n}\n\n\/\/ Run runs the test.\nfunc (r *Runner) Run() error {\n\tswitch r.cmd {\n\tcase CommandFnEval:\n\t\treturn r.runFnEval()\n\tcase CommandFnRender:\n\t\treturn r.runFnRender()\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid command %s\", r.cmd)\n\t}\n}\n\nfunc (r *Runner) runFnEval() error {\n\tr.t.Logf(\"Running test against package %s\\n\", r.pkgName)\n\ttmpDir, err := ioutil.TempDir(\"\", \"kpt-fn-e2e-*\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create temporary dir: %w\", err)\n\t}\n\tdefer os.RemoveAll(tmpDir)\n\tpkgPath := filepath.Join(tmpDir, r.pkgName)\n\t\/\/ create result dir\n\tresultsPath := filepath.Join(tmpDir, \"results\")\n\terr = os.Mkdir(resultsPath, 0755)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create results dir %s: %w\", resultsPath, err)\n\t}\n\n\t\/\/ copy package to temp directory\n\terr = copyDir(r.testCase.Path, pkgPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to copy package: %w\", err)\n\t}\n\n\t\/\/ init and commit package files\n\terr = r.preparePackage(pkgPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to prepare package: %w\", err)\n\t}\n\n\t\/\/ run function\n\tkptArgs := []string{\"fn\", \"eval\", pkgPath, \"--results-dir\", resultsPath}\n\tif r.testCase.Config.EvalConfig.Network {\n\t\tkptArgs = append(kptArgs, \"--network\")\n\t}\n\tif r.testCase.Config.EvalConfig.Image != \"\" {\n\t\tkptArgs = append(kptArgs, \"--image\", r.testCase.Config.EvalConfig.Image)\n\t} else if !r.testCase.Config.EvalConfig.execUniquePath.Empty() {\n\t\tkptArgs = append(kptArgs, \"--exec-path\", string(r.testCase.Config.EvalConfig.execUniquePath))\n\t}\n\tif !r.testCase.Config.EvalConfig.fnConfigUniquePath.Empty() {\n\t\tkptArgs = append(kptArgs, \"--fn-config\", string(r.testCase.Config.EvalConfig.fnConfigUniquePath))\n\t}\n\tif r.testCase.Config.EvalConfig.IncludeMetaResources {\n\t\tkptArgs = append(kptArgs, \"--include-meta-resources\")\n\t}\n\t\/\/ args must be appended last\n\tif len(r.testCase.Config.EvalConfig.Args) > 0 {\n\t\tkptArgs = append(kptArgs, \"--\")\n\t\tfor k, v := range r.testCase.Config.EvalConfig.Args {\n\t\t\tkptArgs = append(kptArgs, fmt.Sprintf(\"%s=%s\", k, v))\n\t\t}\n\t}\n\tfor i := 0; i < r.testCase.Config.RunCount(); i++ {\n\t\toutput, fnErr := runCommand(\"\", r.kptBin, kptArgs)\n\t\tif fnErr != nil {\n\t\t\tr.t.Logf(\"kpt error output: %s\", output)\n\t\t}\n\t\t\/\/ Update the diff file or results file if updateExpectedEnv is set.\n\t\tif strings.ToLower(os.Getenv(updateExpectedEnv)) == \"true\" {\n\t\t\treturn r.updateExpected(pkgPath, resultsPath, filepath.Join(r.testCase.Path, expectedDir))\n\t\t}\n\n\t\t\/\/ compare results\n\t\terr = r.compareResult(fnErr, pkgPath, resultsPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ we passed result check, now we should break if the command error\n\t\t\/\/ is expected\n\t\tif fnErr != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (r *Runner) runFnRender() error {\n\tr.t.Logf(\"Running test against package %s\\n\", r.pkgName)\n\ttmpDir, err := ioutil.TempDir(\"\", \"kpt-pipeline-e2e-*\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create temporary dir: %w\", err)\n\t}\n\tif r.testCase.Config.Debug {\n\t\tfmt.Printf(\"Running test against package %s in dir %s \\n\", r.pkgName, tmpDir)\n\t}\n\tif !r.testCase.Config.Debug {\n\t\t\/\/ if debug is true, keep the test directory around for debugging\n\t\tdefer os.RemoveAll(tmpDir)\n\t}\n\tpkgPath := filepath.Join(tmpDir, r.pkgName)\n\t\/\/ create dir to store untouched pkg to compare against\n\torigPkgPath := filepath.Join(tmpDir, \"original\")\n\terr = os.Mkdir(origPkgPath, 0755)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create original dir %s: %w\", origPkgPath, err)\n\t}\n\n\t\/\/ copy package to temp directory\n\terr = copyDir(r.testCase.Path, pkgPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to copy package: %w\", err)\n\t}\n\terr = copyDir(r.testCase.Path, origPkgPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to copy package: %w\", err)\n\t}\n\n\t\/\/ init and commit package files\n\terr = r.preparePackage(pkgPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to prepare package: %w\", err)\n\t}\n\n\t\/\/ run function\n\tkptArgs := []string{\"fn\", \"render\", pkgPath}\n\tfor i := 0; i < r.testCase.Config.RunCount(); i++ {\n\t\toutput, fnErr := runCommand(\"\", r.kptBin, kptArgs)\n\t\t\/\/ Update the diff file or results file if updateExpectedEnv is set.\n\t\tif strings.ToLower(os.Getenv(updateExpectedEnv)) == \"true\" {\n\t\t\t\/\/ TODO: `fn render` doesn't support result file now\n\t\t\t\/\/ use empty string to skip update results\n\t\t\treturn r.updateExpected(pkgPath, \"\", filepath.Join(r.testCase.Path, expectedDir))\n\t\t}\n\t\tif fnErr != nil {\n\t\t\tr.t.Logf(\"kpt error output: %s\", output)\n\t\t}\n\t\t\/\/ compare results\n\t\t\/\/ TODO: `fn render` doesn't support result file now\n\t\t\/\/ use empty string for results dir\n\t\terr = r.compareResult(fnErr, pkgPath, \"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ we passed result check, now we should break if the command error\n\t\t\/\/ is expected\n\t\tif fnErr != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (r *Runner) preparePackage(pkgPath string) error {\n\terr := gitInit(pkgPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = gitAddAll(pkgPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = gitCommit(pkgPath, \"first\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.initialCommit, err = getCommitHash(pkgPath)\n\treturn err\n}\n\nfunc (r *Runner) compareResult(exitErr error, tmpPkgPath, resultsPath string) error {\n\texpected, err := newExpected(tmpPkgPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ get exit code\n\texitCode := 0\n\tif e, ok := exitErr.(*exec.ExitError); ok {\n\t\texitCode = e.ExitCode()\n\t} else if exitErr != nil {\n\t\treturn fmt.Errorf(\"cannot get exit code, received error '%w'\", exitErr)\n\t}\n\n\tif exitCode != r.testCase.Config.ExitCode {\n\t\treturn fmt.Errorf(\"actual exit code %d doesn't match expected %d\", exitCode, r.testCase.Config.ExitCode)\n\t}\n\n\t\/\/ compare results\n\tactual, err := readActualResults(resultsPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read actual results: %w\", err)\n\t}\n\tdiffOfResult, err := diffStrings(actual, expected.Results)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error when run diff of results: %w: %s\", err, diffOfResult)\n\t}\n\tif actual != expected.Results {\n\t\treturn fmt.Errorf(\"actual results doesn't match expected\\nActual\\n===\\n%s\\nDiff of Results\\n===\\n%s\",\n\t\t\tactual, diffOfResult)\n\t}\n\n\t\/\/ compare diff\n\tactual, err = readActualDiff(tmpPkgPath, r.initialCommit)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read actual diff: %w\", err)\n\t}\n\tif actual != expected.Diff {\n\t\tdiffOfDiff, err := diffStrings(actual, expected.Diff)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error when run diff of diff: %w: %s\", err, diffOfDiff)\n\t\t}\n\t\treturn fmt.Errorf(\"actual diff doesn't match expected\\nActual\\n===\\n%s\\nDiff of Diff\\n===\\n%s\",\n\t\t\tactual, diffOfDiff)\n\t}\n\treturn nil\n}\n\nfunc (r *Runner) Skip() bool {\n\treturn r.testCase.Config.Skip\n}\n\nfunc readActualResults(resultsPath string) (string, error) {\n\t\/\/ no results\n\tif resultsPath == \"\" {\n\t\treturn \"\", nil\n\t}\n\tl, err := ioutil.ReadDir(resultsPath)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to get files in results dir: %w\", err)\n\t}\n\tif len(l) > 1 {\n\t\treturn \"\", fmt.Errorf(\"unexpected results files number %d, should be 0 or 1\", len(l))\n\t}\n\tif len(l) == 0 {\n\t\t\/\/ no result file\n\t\treturn \"\", nil\n\t}\n\tresultsFile := l[0].Name()\n\tactualResults, err := ioutil.ReadFile(filepath.Join(resultsPath, resultsFile))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to read actual results: %w\", err)\n\t}\n\treturn strings.TrimSpace(string(actualResults)), nil\n}\n\nfunc readActualDiff(path, origHash string) (string, error) {\n\terr := gitAddAll(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\terr = gitCommit(path, \"second\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t\/\/ diff with first commit\n\tactualDiff, err := gitDiff(path, origHash, \"HEAD\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.TrimSpace(actualDiff), nil\n}\n\n\/\/ expected contains the expected result for the function running\ntype expected struct {\n\tResults string\n\tDiff string\n}\n\nfunc newExpected(path string) (expected, error) {\n\te := expected{}\n\t\/\/ get expected results\n\texpectedResults, err := ioutil.ReadFile(filepath.Join(path, expectedDir, expectedResultsFile))\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\te.Results = \"\"\n\tcase err != nil:\n\t\treturn e, fmt.Errorf(\"failed to read expected results: %w\", err)\n\tdefault:\n\t\te.Results = strings.TrimSpace(string(expectedResults))\n\t}\n\n\t\/\/ get expected diff\n\texpectedDiff, err := ioutil.ReadFile(filepath.Join(path, expectedDir, expectedDiffFile))\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\te.Diff = \"\"\n\tcase err != nil:\n\t\treturn e, fmt.Errorf(\"failed to read expected diff: %w\", err)\n\tdefault:\n\t\te.Diff = strings.TrimSpace(string(expectedDiff))\n\t}\n\n\treturn e, nil\n}\n\nfunc (r *Runner) updateExpected(tmpPkgPath, resultsPath, sourceOfTruthPath string) error {\n\tif resultsPath != \"\" {\n\t\t\/\/ We update results directory only when a result file already exists.\n\t\tl, err := ioutil.ReadDir(resultsPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(l) > 0 {\n\t\t\tactualResults, err := readActualResults(resultsPath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif actualResults != \"\" {\n\t\t\t\tif err := ioutil.WriteFile(filepath.Join(sourceOfTruthPath, expectedResultsFile), []byte(actualResults+\"\\n\"), 0666); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tactualDiff, err := readActualDiff(tmpPkgPath, r.initialCommit)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif actualDiff != \"\" {\n\t\tif err := ioutil.WriteFile(filepath.Join(sourceOfTruthPath, expectedDiffFile), []byte(actualDiff+\"\\n\"), 0666); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"testing\"\n\nfunc TestLookupValid(t *testing.T) {\n\tdomain := Domain{\"marshland.ovh\", ClientConfig{}}\n\tproto, service := \"tcp\", \"imaps\"\n\n\tgot_addr, got_port, err := domain.lookup(service, proto)\n\twant_addr, want_port := \"hermes.marshland.ovh\", uint16(993)\n\n\tif err != nil {\n\t\tt.Errorf(\"Service %q:\/\/%q returned error: %v\", proto, service, err)\n\t}\n\tif got_addr != want_addr {\n\t\tt.Errorf(\"Service %q:\/\/%q returned address %q, expected %q\", proto, service, got_addr, want_addr)\n\t}\n\tif got_port != want_port {\n\t\tt.Errorf(\"Service %q:\/\/%q returned port %d, expected %d\", proto, service, got_port, want_port)\n\t}\n}\n\nfunc TestLookupEmpty(t *testing.T) {\n\tdomain := Domain{\"marshland.ovh\", ClientConfig{}}\n\tproto, service := \"udp\", \"imaps\"\n\n\t_, _, err := domain.lookup(service, proto)\n\n\tif err == nil {\n\t\tt.Errorf(\"Service %q:\/\/%q should have errored\", proto, service)\n\t}\n}\n\nfunc TestLookupError(t *testing.T) {\n\tdomain := Domain{\"marshland.ovh\", ClientConfig{}}\n\tproto, service := \"udp\", \"imaps\"\n\n\t_, _, err := domain.lookup(service, proto)\n\n\tif err == nil {\n\t\tt.Errorf(\"Service %q:\/\/%q should have errored\", proto, service)\n\t}\n}\n<commit_msg>Test config generation logic.<commit_after>package main\n\nimport \"testing\"\n\nfunc TestLookupValid(t *testing.T) {\n\tdomain := Domain{\"marshland.ovh\", ClientConfig{}}\n\tproto, service := \"tcp\", \"imaps\"\n\n\tgot_addr, got_port, err := domain.lookup(service, proto)\n\twant_addr, want_port := \"hermes.marshland.ovh\", uint16(993)\n\n\tif err != nil {\n\t\tt.Errorf(\"Service %q:\/\/%q returned error: %v\", proto, service, err)\n\t}\n\tif got_addr != want_addr {\n\t\tt.Errorf(\"Service %q:\/\/%q returned address %q, expected %q\", proto, service, got_addr, want_addr)\n\t}\n\tif got_port != want_port {\n\t\tt.Errorf(\"Service %q:\/\/%q returned port %d, expected %d\", proto, service, got_port, want_port)\n\t}\n}\n\nfunc TestLookupEmpty(t *testing.T) {\n\tdomain := Domain{\"marshland.ovh\", ClientConfig{}}\n\tproto, service := \"udp\", \"imaps\"\n\n\t_, _, err := domain.lookup(service, proto)\n\n\tif err == nil {\n\t\tt.Errorf(\"Service %q:\/\/%q should have errored\", proto, service)\n\t}\n}\n\nfunc TestLookupError(t *testing.T) {\n\tdomain := Domain{\"marshland.ovh\", ClientConfig{}}\n\tproto, service := \"udp\", \"imaps\"\n\n\t_, _, err := domain.lookup(service, proto)\n\n\tif err == nil {\n\t\tt.Errorf(\"Service %q:\/\/%q should have errored\", proto, service)\n\t}\n}\n\nfunc TestConfigIncoming(t *testing.T) {\n\tdomain := Domain{\"marshland.ovh\", ClientConfig{}}\n\tdomain.generate_xml()\n\n\tgot := domain.config.Providers[0].IncomingServers[0]\n\twant := IncomingServer{}\n\twant.Type = \"imap\"\n\twant.Hostname = \"hermes.marshland.ovh\"\n\twant.Port = 993\n\twant.SocketType = \"SSL\"\n\twant.Authentication = \"password-cleartext\"\n\twant.Username = \"%EMAILLOCALPART%\"\n\n\tif got != want {\n\t\tt.Errorf(\"Incoming server doesn't match expected value\")\n\t}\n}\n\nfunc TestConfigOutgoing(t *testing.T) {\n\tdomain := Domain{\"marshland.ovh\", ClientConfig{}}\n\tdomain.generate_xml()\n\n\tgot := domain.config.Providers[0].OutgoingServers[0]\n\twant := OutgoingServer{}\n\twant.Type = \"smtp\"\n\twant.Hostname = \"hermes.marshland.ovh\"\n\twant.Port = 465\n\twant.SocketType = \"SSL\"\n\twant.Authentication = \"password-cleartext\"\n\twant.Username = \"%EMAILLOCALPART%\"\n\n\tif got != want {\n\t\tt.Errorf(\"Incoming server doesn't match expected value\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package flickr\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"fmt\"\n\t\"net\"\n\t\"http\"\n\t\"sort\"\n\t\"bufio\"\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"crypto\/md5\"\n)\n\nconst (\n\tendpoint = \"http:\/\/api.flickr.com\/services\/rest\/?\"\n\tuploadEndpoint = \"http:\/\/api.flickr.com\/services\/upload\/\"\n\treplaceEndpoint = \"http:\/\/api.flickr.com\/services\/replace\/\"\n\tapiHost = \"api.flickr.com\"\n)\n\ntype Request struct {\n\tApiKey string\n\tMethod string\n\tArgs map[string]string\n}\n\ntype nopCloser struct {\n\tio.Reader\n}\n\nfunc (nopCloser) Close() os.Error { return nil }\n\ntype Error string\n\nfunc (e Error) String() string {\n\treturn string(e)\n}\n\nfunc (request *Request) Sign(secret string) {\n\targs := request.Args\n\n\t\/\/ Remove api_sig\n\targs[\"api_sig\"] = \"\", false\n\n\tsorted_keys := make([]string, len(args)+2)\n\n\targs[\"api_key\"] = request.ApiKey\n\targs[\"method\"] = request.Method\n\n\t\/\/ Sort array keys\n\ti := 0\n\tfor k := range args {\n\t\tsorted_keys[i] = k\n\t\ti++\n\t}\n\tsort.SortStrings(sorted_keys)\n\n\t\/\/ Build out ordered key-value string prefixed by secret\n\ts := secret\n\tfor _, key := range sorted_keys {\n\t\tif args[key] != \"\" {\n\t\t\ts += fmt.Sprintf(\"%s%s\", key, args[key])\n\t\t}\n\t}\n\n\t\/\/ Since we're only adding two keys, it's easier \n\t\/\/ and more space-efficient to just delete them\n\t\/\/ them copy the whole map\n\targs[\"api_key\"] = \"\", false\n\targs[\"method\"] = \"\", false\n\n\t\/\/ Have the full string, now hash\n\thash := md5.New()\n\thash.Write([]byte(s))\n\n\t\/\/ Add api_sig as one of the args\n\targs[\"api_sig\"] = fmt.Sprintf(\"%x\", hash.Sum())\n}\n\nfunc (request *Request) URL() string {\n\targs := request.Args\n\n\targs[\"api_key\"] = request.ApiKey\n\targs[\"method\"] = request.Method\n\n\ts := endpoint + encodeQuery(args)\n\treturn s\n}\n\nfunc (request *Request) Execute() (response string, ret os.Error) {\n\tif request.ApiKey == \"\" || request.Method == \"\" {\n\t\treturn \"\", Error(\"Need both API key and method\")\n\t}\n\n\ts := request.URL()\n\n\tres, _, err := http.Get(s)\n\tdefer res.Body.Close()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbody, _ := ioutil.ReadAll(res.Body)\n\treturn string(body), nil\n}\n\nfunc encodeQuery(args map[string]string) string {\n\ti := 0\n\ts := bytes.NewBuffer(nil)\n\tfor k, v := range args {\n\t\tif i != 0 {\n\t\t\ts.WriteString(\"&\")\n\t\t}\n\t\ti++\n\t\ts.WriteString(k + \"=\" + http.URLEscape(v))\n\t}\n\treturn s.String()\n}\n\nfunc (request *Request) buildPost(url string, filename string, filetype string) (*http.Request, os.Error) {\n\tf, err := os.Open(filename, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstat, err := f.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tf_size := stat.Size\n\n\trequest.Args[\"api_key\"] = request.ApiKey\n\n\tboundary, end := \"----###---###--flickr-go-rules\", \"\\r\\n\"\n\n\t\/\/ Build out all of POST body sans file\n\theader := bytes.NewBuffer(nil)\n\tfor k, v := range request.Args {\n\t\theader.WriteString(\"--\" + boundary + end)\n\t\theader.WriteString(\"Content-Disposition: form-data; name=\\\"\" + k + \"\\\"\" + end + end)\n\t\theader.WriteString(v + end)\n\t}\n\theader.WriteString(\"--\" + boundary + end)\n\theader.WriteString(\"Content-Disposition: form-data; name=\\\"photo\\\"; filename=\\\"photo.jpg\\\"\" + end)\n\theader.WriteString(\"Content-Type: \" + filetype + end + end)\n\n\tfooter := bytes.NewBuffer(nil)\n\tfooter.WriteString(end + \"--\" + boundary + \"--\" + end)\n\n\tbody_len := int64(header.Len()) + int64(footer.Len()) + f_size\n\n\tr, w := io.Pipe()\n\tgo func() {\n\t\tpieces := []io.Reader{header, f, footer}\n\n\t\tfor _, k := range pieces {\n\t\t\t_, err = io.Copy(w, k)\n\t\t\tif err != nil {\n\t\t\t\tw.CloseWithError(nil)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tf.Close()\n\t\tw.Close()\n\t}()\n\n\tpostRequest := new(http.Request)\n\tpostRequest.Method = \"POST\"\n\tpostRequest.RawURL = url\n\tpostRequest.Host = apiHost\n\tpostRequest.Header = map[string]string{\n\t\t\"Content-Type\": \"multipart\/form-data; boundary=\" + boundary + end,\n\t}\n\tpostRequest.Body = r\n\tpostRequest.ContentLength = body_len\n\treturn postRequest, nil\n}\n\n\/\/ Example: \n\/\/ r.Upload(\"thumb.jpg\", \"image\/jpeg\")\nfunc (request *Request) Upload(filename string, filetype string) (response string, err os.Error) {\n\tpostRequest, err := request.buildPost(uploadEndpoint, filename, filetype)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn sendPost(postRequest)\n}\n\nfunc (request *Request) Replace(filename string, filetype string) (response string, err os.Error) {\n\tpostRequest, err := request.buildPost(replaceEndpoint, filename, filetype)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn sendPost(postRequest)\n}\n\nfunc sendPost(postRequest *http.Request) (body string, err os.Error) {\n\t\/\/ Create and use TCP connection (lifted mostly wholesale from http.send)\n\tconn, err := net.Dial(\"tcp\", \"\", \"api.flickr.com:80\")\n\tdefer conn.Close()\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tpostRequest.Write(conn)\n\n\treader := bufio.NewReader(conn)\n\tresp, err := http.ReadResponse(reader, postRequest.Method)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\trawBody, _ := ioutil.ReadAll(resp.Body)\n\n\treturn string(rawBody), nil\n}\n<commit_msg>More go-like<commit_after>package flickr\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"fmt\"\n\t\"net\"\n\t\"http\"\n\t\"sort\"\n\t\"bufio\"\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"crypto\/md5\"\n)\n\nconst (\n\tendpoint = \"http:\/\/api.flickr.com\/services\/rest\/?\"\n\tuploadEndpoint = \"http:\/\/api.flickr.com\/services\/upload\/\"\n\treplaceEndpoint = \"http:\/\/api.flickr.com\/services\/replace\/\"\n\tapiHost = \"api.flickr.com\"\n)\n\ntype Request struct {\n\tApiKey string\n\tMethod string\n\tArgs map[string]string\n}\n\ntype nopCloser struct {\n\tio.Reader\n}\n\nfunc (nopCloser) Close() os.Error { return nil }\n\ntype Error string\n\nfunc (e Error) String() string {\n\treturn string(e)\n}\n\nfunc (request *Request) Sign(secret string) {\n\targs := request.Args\n\n\t\/\/ Remove api_sig\n\targs[\"api_sig\"] = \"\", false\n\n\tsorted_keys := make([]string, len(args)+2)\n\n\targs[\"api_key\"] = request.ApiKey\n\targs[\"method\"] = request.Method\n\n\t\/\/ Sort array keys\n\ti := 0\n\tfor k := range args {\n\t\tsorted_keys[i] = k\n\t\ti++\n\t}\n\tsort.SortStrings(sorted_keys)\n\n\t\/\/ Build out ordered key-value string prefixed by secret\n\ts := secret\n\tfor _, key := range sorted_keys {\n\t\tif args[key] != \"\" {\n\t\t\ts += fmt.Sprintf(\"%s%s\", key, args[key])\n\t\t}\n\t}\n\n\t\/\/ Since we're only adding two keys, it's easier \n\t\/\/ and more space-efficient to just delete them\n\t\/\/ them copy the whole map\n\targs[\"api_key\"] = \"\", false\n\targs[\"method\"] = \"\", false\n\n\t\/\/ Have the full string, now hash\n\thash := md5.New()\n\thash.Write([]byte(s))\n\n\t\/\/ Add api_sig as one of the args\n\targs[\"api_sig\"] = fmt.Sprintf(\"%x\", hash.Sum())\n}\n\nfunc (request *Request) URL() string {\n\targs := request.Args\n\n\targs[\"api_key\"] = request.ApiKey\n\targs[\"method\"] = request.Method\n\n\ts := endpoint + encodeQuery(args)\n\treturn s\n}\n\nfunc (request *Request) Execute() (response string, ret os.Error) {\n\tif request.ApiKey == \"\" || request.Method == \"\" {\n\t\treturn \"\", Error(\"Need both API key and method\")\n\t}\n\n\ts := request.URL()\n\n\tres, _, err := http.Get(s)\n\tdefer res.Body.Close()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbody, _ := ioutil.ReadAll(res.Body)\n\treturn string(body), nil\n}\n\nfunc encodeQuery(args map[string]string) string {\n\ti := 0\n\ts := bytes.NewBuffer(nil)\n\tfor k, v := range args {\n\t\tif i != 0 {\n\t\t\ts.WriteString(\"&\")\n\t\t}\n\t\ti++\n\t\ts.WriteString(k + \"=\" + http.URLEscape(v))\n\t}\n\treturn s.String()\n}\n\nfunc (request *Request) buildPost(url string, filename string, filetype string) (*http.Request, os.Error) {\n\tf, err := os.Open(filename, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstat, err := f.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tf_size := stat.Size\n\n\trequest.Args[\"api_key\"] = request.ApiKey\n\n\tboundary, end := \"----###---###--flickr-go-rules\", \"\\r\\n\"\n\n\t\/\/ Build out all of POST body sans file\n\theader := bytes.NewBuffer(nil)\n\tfor k, v := range request.Args {\n\t\theader.WriteString(\"--\" + boundary + end)\n\t\theader.WriteString(\"Content-Disposition: form-data; name=\\\"\" + k + \"\\\"\" + end + end)\n\t\theader.WriteString(v + end)\n\t}\n\theader.WriteString(\"--\" + boundary + end)\n\theader.WriteString(\"Content-Disposition: form-data; name=\\\"photo\\\"; filename=\\\"photo.jpg\\\"\" + end)\n\theader.WriteString(\"Content-Type: \" + filetype + end + end)\n\n\tfooter := bytes.NewBuffer(nil)\n\tfooter.WriteString(end + \"--\" + boundary + \"--\" + end)\n\n\tbody_len := int64(header.Len()) + int64(footer.Len()) + f_size\n\n\tr, w := io.Pipe()\n\tgo func() {\n\t\tpieces := []io.Reader{header, f, footer}\n\n\t\tfor _, k := range pieces {\n\t\t\t_, err = io.Copy(w, k)\n\t\t\tif err != nil {\n\t\t\t\tw.CloseWithError(nil)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tf.Close()\n\t\tw.Close()\n\t}()\n\n\tpostRequest := &http.Request{\n\t\tMethod: \"POST\",\n\t\tRawURL: url,\n\t\tHost: apiHost,\n\t\tHeader: map[string]string{\n\t\t\t\"Content-Type\": \"multipart\/form-data; boundary=\" + boundary + end,\n\t\t},\n\t\tBody: r,\n\t\tContentLength: body_len,\n\t}\n\treturn postRequest, nil\n}\n\n\/\/ Example: \n\/\/ r.Upload(\"thumb.jpg\", \"image\/jpeg\")\nfunc (request *Request) Upload(filename string, filetype string) (response string, err os.Error) {\n\tpostRequest, err := request.buildPost(uploadEndpoint, filename, filetype)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn sendPost(postRequest)\n}\n\nfunc (request *Request) Replace(filename string, filetype string) (response string, err os.Error) {\n\tpostRequest, err := request.buildPost(replaceEndpoint, filename, filetype)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn sendPost(postRequest)\n}\n\nfunc sendPost(postRequest *http.Request) (body string, err os.Error) {\n\t\/\/ Create and use TCP connection (lifted mostly wholesale from http.send)\n\tconn, err := net.Dial(\"tcp\", \"\", \"api.flickr.com:80\")\n\tdefer conn.Close()\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tpostRequest.Write(conn)\n\n\treader := bufio.NewReader(conn)\n\tresp, err := http.ReadResponse(reader, postRequest.Method)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\trawBody, _ := ioutil.ReadAll(resp.Body)\n\n\treturn string(rawBody), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package autopilot\n\nimport (\n\t\"bytes\"\n\t\"math\/big\"\n\t\"net\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/btcsuite\/btcd\/btcec\"\n\t\"github.com\/btcsuite\/btcutil\"\n\t\"github.com\/coreos\/bbolt\"\n\t\"github.com\/lightningnetwork\/lnd\/channeldb\"\n\t\"github.com\/lightningnetwork\/lnd\/lnwire\"\n)\n\nvar (\n\ttestSig = &btcec.Signature{\n\t\tR: new(big.Int),\n\t\tS: new(big.Int),\n\t}\n\t_, _ = testSig.R.SetString(\"63724406601629180062774974542967536251589935445068131219452686511677818569431\", 10)\n\t_, _ = testSig.S.SetString(\"18801056069249825825291287104931333862866033135609736119018462340006816851118\", 10)\n\n\tchanIDCounter uint64 \/\/ To be used atomically.\n)\n\n\/\/ databaseChannelGraph wraps a channeldb.ChannelGraph instance with the\n\/\/ necessary API to properly implement the autopilot.ChannelGraph interface.\n\/\/\n\/\/ TODO(roasbeef): move inmpl to main package?\ntype databaseChannelGraph struct {\n\tdb *channeldb.ChannelGraph\n}\n\n\/\/ A compile time assertion to ensure databaseChannelGraph meets the\n\/\/ autopilot.ChannelGraph interface.\nvar _ ChannelGraph = (*databaseChannelGraph)(nil)\n\n\/\/ ChannelGraphFromDatabase returns an instance of the autopilot.ChannelGraph\n\/\/ backed by a live, open channeldb instance.\nfunc ChannelGraphFromDatabase(db *channeldb.ChannelGraph) ChannelGraph {\n\treturn &databaseChannelGraph{\n\t\tdb: db,\n\t}\n}\n\n\/\/ type dbNode is a wrapper struct around a database transaction an\n\/\/ channeldb.LightningNode. The wrapper method implement the autopilot.Node\n\/\/ interface.\ntype dbNode struct {\n\ttx *bolt.Tx\n\n\tnode *channeldb.LightningNode\n}\n\n\/\/ A compile time assertion to ensure dbNode meets the autopilot.Node\n\/\/ interface.\nvar _ Node = (*dbNode)(nil)\n\n\/\/ PubKey is the identity public key of the node. This will be used to attempt\n\/\/ to target a node for channel opening by the main autopilot agent. The key\n\/\/ will be returned in serialized compressed format.\n\/\/\n\/\/ NOTE: Part of the autopilot.Node interface.\nfunc (d dbNode) PubKey() [33]byte {\n\treturn d.node.PubKeyBytes\n}\n\n\/\/ Addrs returns a slice of publicly reachable public TCP addresses that the\n\/\/ peer is known to be listening on.\n\/\/\n\/\/ NOTE: Part of the autopilot.Node interface.\nfunc (d dbNode) Addrs() []net.Addr {\n\treturn d.node.Addresses\n}\n\n\/\/ ForEachChannel is a higher-order function that will be used to iterate\n\/\/ through all edges emanating from\/to the target node. For each active\n\/\/ channel, this function should be called with the populated ChannelEdge that\n\/\/ describes the active channel.\n\/\/\n\/\/ NOTE: Part of the autopilot.Node interface.\nfunc (d dbNode) ForEachChannel(cb func(ChannelEdge) error) error {\n\treturn d.node.ForEachChannel(d.tx, func(tx *bolt.Tx,\n\t\tei *channeldb.ChannelEdgeInfo, ep, _ *channeldb.ChannelEdgePolicy) error {\n\n\t\t\/\/ Skip channels for which no outgoing edge policy is available.\n\t\t\/\/\n\t\t\/\/ TODO(joostjager): Ideally the case where channels have a nil\n\t\t\/\/ policy should be supported, as auto pilot is not looking at\n\t\t\/\/ the policies. For now, it is not easily possible to get a\n\t\t\/\/ reference to the other end LightningNode object without\n\t\t\/\/ retrieving the policy.\n\t\tif ep == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tpubkey, _ := ep.Node.PubKey()\n\t\tedge := ChannelEdge{\n\t\t\tChannel: Channel{\n\t\t\t\tChanID: lnwire.NewShortChanIDFromInt(ep.ChannelID),\n\t\t\t\tCapacity: ei.Capacity,\n\t\t\t\tFundedAmt: ei.Capacity,\n\t\t\t\tNode: NewNodeID(pubkey),\n\t\t\t},\n\t\t\tPeer: dbNode{\n\t\t\t\ttx: tx,\n\t\t\t\tnode: ep.Node,\n\t\t\t},\n\t\t}\n\n\t\treturn cb(edge)\n\t})\n}\n\n\/\/ ForEachNode is a higher-order function that should be called once for each\n\/\/ connected node within the channel graph. If the passed callback returns an\n\/\/ error, then execution should be terminated.\n\/\/\n\/\/ NOTE: Part of the autopilot.ChannelGraph interface.\nfunc (d *databaseChannelGraph) ForEachNode(cb func(Node) error) error {\n\treturn d.db.ForEachNode(nil, func(tx *bolt.Tx, n *channeldb.LightningNode) error {\n\n\t\t\/\/ We'll skip over any node that doesn't have any advertised\n\t\t\/\/ addresses. As we won't be able to reach them to actually\n\t\t\/\/ open any channels.\n\t\tif len(n.Addresses) == 0 {\n\t\t\treturn nil\n\t\t}\n\n\t\tnode := dbNode{\n\t\t\ttx: tx,\n\t\t\tnode: n,\n\t\t}\n\t\treturn cb(node)\n\t})\n}\n\n\/\/ addRandChannel creates a new channel two target nodes. This function is\n\/\/ meant to aide in the generation of random graphs for use within test cases\n\/\/ the exercise the autopilot package.\nfunc (d *databaseChannelGraph) addRandChannel(node1, node2 *btcec.PublicKey,\n\tcapacity btcutil.Amount) (*ChannelEdge, *ChannelEdge, error) {\n\n\tfetchNode := func(pub *btcec.PublicKey) (*channeldb.LightningNode, error) {\n\t\tif pub != nil {\n\t\t\tdbNode, err := d.db.FetchLightningNode(pub)\n\t\t\tswitch {\n\t\t\tcase err == channeldb.ErrGraphNodeNotFound:\n\t\t\t\tfallthrough\n\t\t\tcase err == channeldb.ErrGraphNotFound:\n\t\t\t\tgraphNode := &channeldb.LightningNode{\n\t\t\t\t\tHaveNodeAnnouncement: true,\n\t\t\t\t\tAddresses: []net.Addr{\n\t\t\t\t\t\t&net.TCPAddr{\n\t\t\t\t\t\t\tIP: bytes.Repeat([]byte(\"a\"), 16),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tFeatures: lnwire.NewFeatureVector(nil,\n\t\t\t\t\t\tlnwire.GlobalFeatures),\n\t\t\t\t\tAuthSigBytes: testSig.Serialize(),\n\t\t\t\t}\n\t\t\t\tgraphNode.AddPubKey(pub)\n\t\t\t\tif err := d.db.AddLightningNode(graphNode); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\tcase err != nil:\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\treturn dbNode, nil\n\t\t}\n\n\t\tnodeKey, err := randKey()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdbNode := &channeldb.LightningNode{\n\t\t\tHaveNodeAnnouncement: true,\n\t\t\tAddresses: []net.Addr{\n\t\t\t\t&net.TCPAddr{\n\t\t\t\t\tIP: bytes.Repeat([]byte(\"a\"), 16),\n\t\t\t\t},\n\t\t\t},\n\t\t\tFeatures: lnwire.NewFeatureVector(nil, lnwire.GlobalFeatures),\n\t\t\tAuthSigBytes: testSig.Serialize(),\n\t\t}\n\t\tdbNode.AddPubKey(nodeKey)\n\t\tif err := d.db.AddLightningNode(dbNode); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn dbNode, nil\n\t}\n\n\tvertex1, err := fetchNode(node1)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvertex2, err := fetchNode(node2)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar lnNode1, lnNode2 *btcec.PublicKey\n\tif bytes.Compare(vertex1.PubKeyBytes[:], vertex2.PubKeyBytes[:]) == -1 {\n\t\tlnNode1, _ = vertex1.PubKey()\n\t\tlnNode2, _ = vertex2.PubKey()\n\t} else {\n\t\tlnNode1, _ = vertex2.PubKey()\n\t\tlnNode2, _ = vertex1.PubKey()\n\t}\n\n\tchanID := randChanID()\n\tedge := &channeldb.ChannelEdgeInfo{\n\t\tChannelID: chanID.ToUint64(),\n\t\tCapacity: capacity,\n\t}\n\tedge.AddNodeKeys(lnNode1, lnNode2, lnNode1, lnNode2)\n\tif err := d.db.AddChannelEdge(edge); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tedgePolicy := &channeldb.ChannelEdgePolicy{\n\t\tSigBytes: testSig.Serialize(),\n\t\tChannelID: chanID.ToUint64(),\n\t\tLastUpdate: time.Now(),\n\t\tTimeLockDelta: 10,\n\t\tMinHTLC: 1,\n\t\tFeeBaseMSat: 10,\n\t\tFeeProportionalMillionths: 10000,\n\t\tFlags: 0,\n\t}\n\n\tif err := d.db.UpdateEdgePolicy(edgePolicy); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tedgePolicy = &channeldb.ChannelEdgePolicy{\n\t\tSigBytes: testSig.Serialize(),\n\t\tChannelID: chanID.ToUint64(),\n\t\tLastUpdate: time.Now(),\n\t\tTimeLockDelta: 10,\n\t\tMinHTLC: 1,\n\t\tFeeBaseMSat: 10,\n\t\tFeeProportionalMillionths: 10000,\n\t\tFlags: 1,\n\t}\n\tif err := d.db.UpdateEdgePolicy(edgePolicy); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &ChannelEdge{\n\t\t\tChannel: Channel{\n\t\t\t\tChanID: chanID,\n\t\t\t\tCapacity: capacity,\n\t\t\t},\n\t\t\tPeer: dbNode{\n\t\t\t\tnode: vertex1,\n\t\t\t},\n\t\t},\n\t\t&ChannelEdge{\n\t\t\tChannel: Channel{\n\t\t\t\tChanID: chanID,\n\t\t\t\tCapacity: capacity,\n\t\t\t},\n\t\t\tPeer: dbNode{\n\t\t\t\tnode: vertex2,\n\t\t\t},\n\t\t},\n\t\tnil\n}\n\n\/\/ memChannelGraph is an implementation of the autopilot.ChannelGraph backed by\n\/\/ an in-memory graph.\ntype memChannelGraph struct {\n\tgraph map[NodeID]memNode\n}\n\n\/\/ A compile time assertion to ensure memChannelGraph meets the\n\/\/ autopilot.ChannelGraph interface.\nvar _ ChannelGraph = (*memChannelGraph)(nil)\n\n\/\/ newMemChannelGraph creates a new blank in-memory channel graph\n\/\/ implementation.\nfunc newMemChannelGraph() *memChannelGraph {\n\treturn &memChannelGraph{\n\t\tgraph: make(map[NodeID]memNode),\n\t}\n}\n\n\/\/ ForEachNode is a higher-order function that should be called once for each\n\/\/ connected node within the channel graph. If the passed callback returns an\n\/\/ error, then execution should be terminated.\n\/\/\n\/\/ NOTE: Part of the autopilot.ChannelGraph interface.\nfunc (m memChannelGraph) ForEachNode(cb func(Node) error) error {\n\tfor _, node := range m.graph {\n\t\tif err := cb(node); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ randChanID generates a new random channel ID.\nfunc randChanID() lnwire.ShortChannelID {\n\tid := atomic.AddUint64(&chanIDCounter, 1)\n\treturn lnwire.NewShortChanIDFromInt(id)\n}\n\n\/\/ randKey returns a random public key.\nfunc randKey() (*btcec.PublicKey, error) {\n\tpriv, err := btcec.NewPrivateKey(btcec.S256())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn priv.PubKey(), nil\n}\n\n\/\/ addRandChannel creates a new channel two target nodes. This function is\n\/\/ meant to aide in the generation of random graphs for use within test cases\n\/\/ the exercise the autopilot package.\nfunc (m *memChannelGraph) addRandChannel(node1, node2 *btcec.PublicKey,\n\tcapacity btcutil.Amount) (*ChannelEdge, *ChannelEdge, error) {\n\n\tvar (\n\t\tvertex1, vertex2 memNode\n\t\tok bool\n\t)\n\n\tif node1 != nil {\n\t\tvertex1, ok = m.graph[NewNodeID(node1)]\n\t\tif !ok {\n\t\t\tvertex1 = memNode{\n\t\t\t\tpub: node1,\n\t\t\t}\n\t\t}\n\t} else {\n\t\tnewPub, err := randKey()\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tvertex1 = memNode{\n\t\t\tpub: newPub,\n\t\t}\n\t}\n\n\tif node2 != nil {\n\t\tvertex2, ok = m.graph[NewNodeID(node2)]\n\t\tif !ok {\n\t\t\tvertex2 = memNode{\n\t\t\t\tpub: node2,\n\t\t\t}\n\t\t}\n\t} else {\n\t\tnewPub, err := randKey()\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tvertex2 = memNode{\n\t\t\tpub: newPub,\n\t\t}\n\t}\n\n\tchannel := Channel{\n\t\tChanID: randChanID(),\n\t\tCapacity: capacity,\n\t}\n\n\tedge1 := ChannelEdge{\n\t\tChannel: channel,\n\t\tPeer: vertex2,\n\t}\n\tvertex1.chans = append(vertex1.chans, edge1)\n\n\tedge2 := ChannelEdge{\n\t\tChannel: channel,\n\t\tPeer: vertex1,\n\t}\n\tvertex2.chans = append(vertex2.chans, edge2)\n\n\tm.graph[NewNodeID(vertex1.pub)] = vertex1\n\tm.graph[NewNodeID(vertex2.pub)] = vertex2\n\n\treturn &edge1, &edge2, nil\n}\n\n\/\/ memNode is a purely in-memory implementation of the autopilot.Node\n\/\/ interface.\ntype memNode struct {\n\tpub *btcec.PublicKey\n\n\tchans []ChannelEdge\n\n\taddrs []net.Addr\n}\n\n\/\/ A compile time assertion to ensure memNode meets the autopilot.Node\n\/\/ interface.\nvar _ Node = (*memNode)(nil)\n\n\/\/ PubKey is the identity public key of the node. This will be used to attempt\n\/\/ to target a node for channel opening by the main autopilot agent.\n\/\/\n\/\/ NOTE: Part of the autopilot.Node interface.\nfunc (m memNode) PubKey() [33]byte {\n\tvar n [33]byte\n\tcopy(n[:], m.pub.SerializeCompressed())\n\n\treturn n\n}\n\n\/\/ Addrs returns a slice of publicly reachable public TCP addresses that the\n\/\/ peer is known to be listening on.\n\/\/\n\/\/ NOTE: Part of the autopilot.Node interface.\nfunc (m memNode) Addrs() []net.Addr {\n\treturn m.addrs\n}\n\n\/\/ ForEachChannel is a higher-order function that will be used to iterate\n\/\/ through all edges emanating from\/to the target node. For each active\n\/\/ channel, this function should be called with the populated ChannelEdge that\n\/\/ describes the active channel.\n\/\/\n\/\/ NOTE: Part of the autopilot.Node interface.\nfunc (m memNode) ForEachChannel(cb func(ChannelEdge) error) error {\n\tfor _, channel := range m.chans {\n\t\tif err := cb(channel); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>autopilot: optimize heavy loaded agent by fetching raw bytes for ChannelEdge<commit_after>package autopilot\n\nimport (\n\t\"bytes\"\n\t\"math\/big\"\n\t\"net\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/btcsuite\/btcd\/btcec\"\n\t\"github.com\/btcsuite\/btcutil\"\n\t\"github.com\/coreos\/bbolt\"\n\t\"github.com\/lightningnetwork\/lnd\/channeldb\"\n\t\"github.com\/lightningnetwork\/lnd\/lnwire\"\n)\n\nvar (\n\ttestSig = &btcec.Signature{\n\t\tR: new(big.Int),\n\t\tS: new(big.Int),\n\t}\n\t_, _ = testSig.R.SetString(\"63724406601629180062774974542967536251589935445068131219452686511677818569431\", 10)\n\t_, _ = testSig.S.SetString(\"18801056069249825825291287104931333862866033135609736119018462340006816851118\", 10)\n\n\tchanIDCounter uint64 \/\/ To be used atomically.\n)\n\n\/\/ databaseChannelGraph wraps a channeldb.ChannelGraph instance with the\n\/\/ necessary API to properly implement the autopilot.ChannelGraph interface.\n\/\/\n\/\/ TODO(roasbeef): move inmpl to main package?\ntype databaseChannelGraph struct {\n\tdb *channeldb.ChannelGraph\n}\n\n\/\/ A compile time assertion to ensure databaseChannelGraph meets the\n\/\/ autopilot.ChannelGraph interface.\nvar _ ChannelGraph = (*databaseChannelGraph)(nil)\n\n\/\/ ChannelGraphFromDatabase returns an instance of the autopilot.ChannelGraph\n\/\/ backed by a live, open channeldb instance.\nfunc ChannelGraphFromDatabase(db *channeldb.ChannelGraph) ChannelGraph {\n\treturn &databaseChannelGraph{\n\t\tdb: db,\n\t}\n}\n\n\/\/ type dbNode is a wrapper struct around a database transaction an\n\/\/ channeldb.LightningNode. The wrapper method implement the autopilot.Node\n\/\/ interface.\ntype dbNode struct {\n\ttx *bolt.Tx\n\n\tnode *channeldb.LightningNode\n}\n\n\/\/ A compile time assertion to ensure dbNode meets the autopilot.Node\n\/\/ interface.\nvar _ Node = (*dbNode)(nil)\n\n\/\/ PubKey is the identity public key of the node. This will be used to attempt\n\/\/ to target a node for channel opening by the main autopilot agent. The key\n\/\/ will be returned in serialized compressed format.\n\/\/\n\/\/ NOTE: Part of the autopilot.Node interface.\nfunc (d dbNode) PubKey() [33]byte {\n\treturn d.node.PubKeyBytes\n}\n\n\/\/ Addrs returns a slice of publicly reachable public TCP addresses that the\n\/\/ peer is known to be listening on.\n\/\/\n\/\/ NOTE: Part of the autopilot.Node interface.\nfunc (d dbNode) Addrs() []net.Addr {\n\treturn d.node.Addresses\n}\n\n\/\/ ForEachChannel is a higher-order function that will be used to iterate\n\/\/ through all edges emanating from\/to the target node. For each active\n\/\/ channel, this function should be called with the populated ChannelEdge that\n\/\/ describes the active channel.\n\/\/\n\/\/ NOTE: Part of the autopilot.Node interface.\nfunc (d dbNode) ForEachChannel(cb func(ChannelEdge) error) error {\n\treturn d.node.ForEachChannel(d.tx, func(tx *bolt.Tx,\n\t\tei *channeldb.ChannelEdgeInfo, ep, _ *channeldb.ChannelEdgePolicy) error {\n\n\t\t\/\/ Skip channels for which no outgoing edge policy is available.\n\t\t\/\/\n\t\t\/\/ TODO(joostjager): Ideally the case where channels have a nil\n\t\t\/\/ policy should be supported, as auto pilot is not looking at\n\t\t\/\/ the policies. For now, it is not easily possible to get a\n\t\t\/\/ reference to the other end LightningNode object without\n\t\t\/\/ retrieving the policy.\n\t\tif ep == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tedge := ChannelEdge{\n\t\t\tChannel: Channel{\n\t\t\t\tChanID: lnwire.NewShortChanIDFromInt(ep.ChannelID),\n\t\t\t\tCapacity: ei.Capacity,\n\t\t\t\tFundedAmt: ei.Capacity,\n\t\t\t\tNode: NodeID(ep.Node.PubKeyBytes),\n\t\t\t},\n\t\t\tPeer: dbNode{\n\t\t\t\ttx: tx,\n\t\t\t\tnode: ep.Node,\n\t\t\t},\n\t\t}\n\n\t\treturn cb(edge)\n\t})\n}\n\n\/\/ ForEachNode is a higher-order function that should be called once for each\n\/\/ connected node within the channel graph. If the passed callback returns an\n\/\/ error, then execution should be terminated.\n\/\/\n\/\/ NOTE: Part of the autopilot.ChannelGraph interface.\nfunc (d *databaseChannelGraph) ForEachNode(cb func(Node) error) error {\n\treturn d.db.ForEachNode(nil, func(tx *bolt.Tx, n *channeldb.LightningNode) error {\n\n\t\t\/\/ We'll skip over any node that doesn't have any advertised\n\t\t\/\/ addresses. As we won't be able to reach them to actually\n\t\t\/\/ open any channels.\n\t\tif len(n.Addresses) == 0 {\n\t\t\treturn nil\n\t\t}\n\n\t\tnode := dbNode{\n\t\t\ttx: tx,\n\t\t\tnode: n,\n\t\t}\n\t\treturn cb(node)\n\t})\n}\n\n\/\/ addRandChannel creates a new channel two target nodes. This function is\n\/\/ meant to aide in the generation of random graphs for use within test cases\n\/\/ the exercise the autopilot package.\nfunc (d *databaseChannelGraph) addRandChannel(node1, node2 *btcec.PublicKey,\n\tcapacity btcutil.Amount) (*ChannelEdge, *ChannelEdge, error) {\n\n\tfetchNode := func(pub *btcec.PublicKey) (*channeldb.LightningNode, error) {\n\t\tif pub != nil {\n\t\t\tdbNode, err := d.db.FetchLightningNode(pub)\n\t\t\tswitch {\n\t\t\tcase err == channeldb.ErrGraphNodeNotFound:\n\t\t\t\tfallthrough\n\t\t\tcase err == channeldb.ErrGraphNotFound:\n\t\t\t\tgraphNode := &channeldb.LightningNode{\n\t\t\t\t\tHaveNodeAnnouncement: true,\n\t\t\t\t\tAddresses: []net.Addr{\n\t\t\t\t\t\t&net.TCPAddr{\n\t\t\t\t\t\t\tIP: bytes.Repeat([]byte(\"a\"), 16),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tFeatures: lnwire.NewFeatureVector(nil,\n\t\t\t\t\t\tlnwire.GlobalFeatures),\n\t\t\t\t\tAuthSigBytes: testSig.Serialize(),\n\t\t\t\t}\n\t\t\t\tgraphNode.AddPubKey(pub)\n\t\t\t\tif err := d.db.AddLightningNode(graphNode); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\tcase err != nil:\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\treturn dbNode, nil\n\t\t}\n\n\t\tnodeKey, err := randKey()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdbNode := &channeldb.LightningNode{\n\t\t\tHaveNodeAnnouncement: true,\n\t\t\tAddresses: []net.Addr{\n\t\t\t\t&net.TCPAddr{\n\t\t\t\t\tIP: bytes.Repeat([]byte(\"a\"), 16),\n\t\t\t\t},\n\t\t\t},\n\t\t\tFeatures: lnwire.NewFeatureVector(nil, lnwire.GlobalFeatures),\n\t\t\tAuthSigBytes: testSig.Serialize(),\n\t\t}\n\t\tdbNode.AddPubKey(nodeKey)\n\t\tif err := d.db.AddLightningNode(dbNode); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn dbNode, nil\n\t}\n\n\tvertex1, err := fetchNode(node1)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvertex2, err := fetchNode(node2)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar lnNode1, lnNode2 *btcec.PublicKey\n\tif bytes.Compare(vertex1.PubKeyBytes[:], vertex2.PubKeyBytes[:]) == -1 {\n\t\tlnNode1, _ = vertex1.PubKey()\n\t\tlnNode2, _ = vertex2.PubKey()\n\t} else {\n\t\tlnNode1, _ = vertex2.PubKey()\n\t\tlnNode2, _ = vertex1.PubKey()\n\t}\n\n\tchanID := randChanID()\n\tedge := &channeldb.ChannelEdgeInfo{\n\t\tChannelID: chanID.ToUint64(),\n\t\tCapacity: capacity,\n\t}\n\tedge.AddNodeKeys(lnNode1, lnNode2, lnNode1, lnNode2)\n\tif err := d.db.AddChannelEdge(edge); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tedgePolicy := &channeldb.ChannelEdgePolicy{\n\t\tSigBytes: testSig.Serialize(),\n\t\tChannelID: chanID.ToUint64(),\n\t\tLastUpdate: time.Now(),\n\t\tTimeLockDelta: 10,\n\t\tMinHTLC: 1,\n\t\tFeeBaseMSat: 10,\n\t\tFeeProportionalMillionths: 10000,\n\t\tFlags: 0,\n\t}\n\n\tif err := d.db.UpdateEdgePolicy(edgePolicy); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tedgePolicy = &channeldb.ChannelEdgePolicy{\n\t\tSigBytes: testSig.Serialize(),\n\t\tChannelID: chanID.ToUint64(),\n\t\tLastUpdate: time.Now(),\n\t\tTimeLockDelta: 10,\n\t\tMinHTLC: 1,\n\t\tFeeBaseMSat: 10,\n\t\tFeeProportionalMillionths: 10000,\n\t\tFlags: 1,\n\t}\n\tif err := d.db.UpdateEdgePolicy(edgePolicy); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &ChannelEdge{\n\t\t\tChannel: Channel{\n\t\t\t\tChanID: chanID,\n\t\t\t\tCapacity: capacity,\n\t\t\t},\n\t\t\tPeer: dbNode{\n\t\t\t\tnode: vertex1,\n\t\t\t},\n\t\t},\n\t\t&ChannelEdge{\n\t\t\tChannel: Channel{\n\t\t\t\tChanID: chanID,\n\t\t\t\tCapacity: capacity,\n\t\t\t},\n\t\t\tPeer: dbNode{\n\t\t\t\tnode: vertex2,\n\t\t\t},\n\t\t},\n\t\tnil\n}\n\n\/\/ memChannelGraph is an implementation of the autopilot.ChannelGraph backed by\n\/\/ an in-memory graph.\ntype memChannelGraph struct {\n\tgraph map[NodeID]memNode\n}\n\n\/\/ A compile time assertion to ensure memChannelGraph meets the\n\/\/ autopilot.ChannelGraph interface.\nvar _ ChannelGraph = (*memChannelGraph)(nil)\n\n\/\/ newMemChannelGraph creates a new blank in-memory channel graph\n\/\/ implementation.\nfunc newMemChannelGraph() *memChannelGraph {\n\treturn &memChannelGraph{\n\t\tgraph: make(map[NodeID]memNode),\n\t}\n}\n\n\/\/ ForEachNode is a higher-order function that should be called once for each\n\/\/ connected node within the channel graph. If the passed callback returns an\n\/\/ error, then execution should be terminated.\n\/\/\n\/\/ NOTE: Part of the autopilot.ChannelGraph interface.\nfunc (m memChannelGraph) ForEachNode(cb func(Node) error) error {\n\tfor _, node := range m.graph {\n\t\tif err := cb(node); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ randChanID generates a new random channel ID.\nfunc randChanID() lnwire.ShortChannelID {\n\tid := atomic.AddUint64(&chanIDCounter, 1)\n\treturn lnwire.NewShortChanIDFromInt(id)\n}\n\n\/\/ randKey returns a random public key.\nfunc randKey() (*btcec.PublicKey, error) {\n\tpriv, err := btcec.NewPrivateKey(btcec.S256())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn priv.PubKey(), nil\n}\n\n\/\/ addRandChannel creates a new channel two target nodes. This function is\n\/\/ meant to aide in the generation of random graphs for use within test cases\n\/\/ the exercise the autopilot package.\nfunc (m *memChannelGraph) addRandChannel(node1, node2 *btcec.PublicKey,\n\tcapacity btcutil.Amount) (*ChannelEdge, *ChannelEdge, error) {\n\n\tvar (\n\t\tvertex1, vertex2 memNode\n\t\tok bool\n\t)\n\n\tif node1 != nil {\n\t\tvertex1, ok = m.graph[NewNodeID(node1)]\n\t\tif !ok {\n\t\t\tvertex1 = memNode{\n\t\t\t\tpub: node1,\n\t\t\t}\n\t\t}\n\t} else {\n\t\tnewPub, err := randKey()\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tvertex1 = memNode{\n\t\t\tpub: newPub,\n\t\t}\n\t}\n\n\tif node2 != nil {\n\t\tvertex2, ok = m.graph[NewNodeID(node2)]\n\t\tif !ok {\n\t\t\tvertex2 = memNode{\n\t\t\t\tpub: node2,\n\t\t\t}\n\t\t}\n\t} else {\n\t\tnewPub, err := randKey()\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tvertex2 = memNode{\n\t\t\tpub: newPub,\n\t\t}\n\t}\n\n\tchannel := Channel{\n\t\tChanID: randChanID(),\n\t\tCapacity: capacity,\n\t}\n\n\tedge1 := ChannelEdge{\n\t\tChannel: channel,\n\t\tPeer: vertex2,\n\t}\n\tvertex1.chans = append(vertex1.chans, edge1)\n\n\tedge2 := ChannelEdge{\n\t\tChannel: channel,\n\t\tPeer: vertex1,\n\t}\n\tvertex2.chans = append(vertex2.chans, edge2)\n\n\tm.graph[NewNodeID(vertex1.pub)] = vertex1\n\tm.graph[NewNodeID(vertex2.pub)] = vertex2\n\n\treturn &edge1, &edge2, nil\n}\n\n\/\/ memNode is a purely in-memory implementation of the autopilot.Node\n\/\/ interface.\ntype memNode struct {\n\tpub *btcec.PublicKey\n\n\tchans []ChannelEdge\n\n\taddrs []net.Addr\n}\n\n\/\/ A compile time assertion to ensure memNode meets the autopilot.Node\n\/\/ interface.\nvar _ Node = (*memNode)(nil)\n\n\/\/ PubKey is the identity public key of the node. This will be used to attempt\n\/\/ to target a node for channel opening by the main autopilot agent.\n\/\/\n\/\/ NOTE: Part of the autopilot.Node interface.\nfunc (m memNode) PubKey() [33]byte {\n\tvar n [33]byte\n\tcopy(n[:], m.pub.SerializeCompressed())\n\n\treturn n\n}\n\n\/\/ Addrs returns a slice of publicly reachable public TCP addresses that the\n\/\/ peer is known to be listening on.\n\/\/\n\/\/ NOTE: Part of the autopilot.Node interface.\nfunc (m memNode) Addrs() []net.Addr {\n\treturn m.addrs\n}\n\n\/\/ ForEachChannel is a higher-order function that will be used to iterate\n\/\/ through all edges emanating from\/to the target node. For each active\n\/\/ channel, this function should be called with the populated ChannelEdge that\n\/\/ describes the active channel.\n\/\/\n\/\/ NOTE: Part of the autopilot.Node interface.\nfunc (m memNode) ForEachChannel(cb func(ChannelEdge) error) error {\n\tfor _, channel := range m.chans {\n\t\tif err := cb(channel); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n\n\tAutoreader is a simple program, designed to be run from go:generate, that\n\thelps generate the annoying boilerplate to implement\n\tboardgame.PropertyReader and boardgame.PropertyReadSetter.\n\n*\/\npackage main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/MarcGrol\/golangAnnotations\/parser\"\n\t\"log\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\nvar readerTemplate *template.Template\nvar readSetterTemplate *template.Template\n\nconst magicDocLinePrefix = \"autoreader\"\n\ntype templateConfig struct {\n\tFirstLetter string\n\tStructName string\n}\n\nfunc init() {\n\treaderTemplate = template.Must(template.New(\"reader\").Parse(readerTemplateText))\n\treadSetterTemplate = template.Must(template.New(\"readsetter\").Parse(readSetterTemplateText))\n}\n\nfunc main() {\n\toutput, err := processPackage(\"examplepkg\/\")\n\n\tif err != nil {\n\t\tfmt.Println(\"ERROR\", err)\n\t\treturn\n\t}\n\n\tfmt.Println(output)\n}\n\nfunc processPackage(location string) (output string, err error) {\n\tsources, err := parser.ParseSourceDir(location, \".*\")\n\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Couldn't parse sources: \" + err.Error())\n\t}\n\n\toutput = header\n\n\tfor _, theStruct := range sources.Structs {\n\n\t\tenableAutoReader := false\n\n\t\tfor _, docLine := range theStruct.DocLines {\n\t\t\tdocLine = strings.TrimPrefix(docLine, \"\/\/\")\n\t\t\tdocLine = strings.TrimSpace(docLine)\n\t\t\tif strings.HasPrefix(docLine, magicDocLinePrefix) {\n\t\t\t\tenableAutoReader = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !enableAutoReader {\n\t\t\tcontinue\n\t\t}\n\n\t\toutput += readerForStruct(theStruct.Name)\n\t\toutput += readSetterForStruct(theStruct.Name)\n\t}\n\n\treturn output, nil\n}\n\nfunc readerForStruct(structName string) string {\n\tbuf := new(bytes.Buffer)\n\n\terr := readerTemplate.Execute(buf, templateConfig{\n\t\tFirstLetter: structName[:1],\n\t\tStructName: structName,\n\t})\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\treturn buf.String()\n}\n\nfunc readSetterForStruct(structName string) string {\n\tbuf := new(bytes.Buffer)\n\n\terr := readSetterTemplate.Execute(buf, templateConfig{\n\t\tFirstLetter: structName[:1],\n\t\tStructName: structName,\n\t})\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\treturn buf.String()\n}\n\nconst header = `\/************************************\n *\n * This file was auto-generated by autoreader. DO NOT EDIT.\n *\n ************************************\/\n\nimport (\n\t\"github.com\/jkomoros\/boardgame\"\n)\n\n`\n\nconst readerTemplateText = `func ({{.FirstLetter}} *{{.StructName}}) Reader() boardgame.PropertyReader {\n\treturn boardgame.DefaultReader({{.FirstLetter}})\n}\n\n`\n\nconst readSetterTemplateText = `func ({{.FirstLetter}} *{{.StructName}}) ReadSetter() boardgame.PropertyReadSetter {\n\treturn boardgame.DefaultReadSetter({{.FirstLetter}})\n}\n\n`\n<commit_msg>Export the package name in the top of the file, as necessary for correct behavior. Part of #301.<commit_after>\/*\n\n\tAutoreader is a simple program, designed to be run from go:generate, that\n\thelps generate the annoying boilerplate to implement\n\tboardgame.PropertyReader and boardgame.PropertyReadSetter.\n\n*\/\npackage main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/MarcGrol\/golangAnnotations\/parser\"\n\t\"log\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\nvar headerTemplate *template.Template\nvar readerTemplate *template.Template\nvar readSetterTemplate *template.Template\n\nconst magicDocLinePrefix = \"autoreader\"\n\ntype templateConfig struct {\n\tFirstLetter string\n\tStructName string\n}\n\nfunc init() {\n\theaderTemplate = template.Must(template.New(\"header\").Parse(headerTemplateText))\n\treaderTemplate = template.Must(template.New(\"reader\").Parse(readerTemplateText))\n\treadSetterTemplate = template.Must(template.New(\"readsetter\").Parse(readSetterTemplateText))\n}\n\nfunc main() {\n\toutput, err := processPackage(\"examplepkg\/\")\n\n\tif err != nil {\n\t\tfmt.Println(\"ERROR\", err)\n\t\treturn\n\t}\n\n\tfmt.Println(output)\n}\n\nfunc processPackage(location string) (output string, err error) {\n\tsources, err := parser.ParseSourceDir(location, \".*\")\n\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Couldn't parse sources: \" + err.Error())\n\t}\n\n\thaveOutputHeader := false\n\n\tfor _, theStruct := range sources.Structs {\n\n\t\tif !haveOutputHeader {\n\t\t\toutput += headerForPackage(theStruct.PackageName)\n\t\t\thaveOutputHeader = true\n\t\t}\n\n\t\tenableAutoReader := false\n\n\t\tfor _, docLine := range theStruct.DocLines {\n\t\t\tdocLine = strings.TrimPrefix(docLine, \"\/\/\")\n\t\t\tdocLine = strings.TrimSpace(docLine)\n\t\t\tif strings.HasPrefix(docLine, magicDocLinePrefix) {\n\t\t\t\tenableAutoReader = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !enableAutoReader {\n\t\t\tcontinue\n\t\t}\n\n\t\toutput += readerForStruct(theStruct.Name)\n\t\toutput += readSetterForStruct(theStruct.Name)\n\t}\n\n\treturn output, nil\n}\n\nfunc templateOutput(template *template.Template, values interface{}) string {\n\tbuf := new(bytes.Buffer)\n\n\terr := template.Execute(buf, values)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\treturn buf.String()\n}\n\nfunc headerForPackage(packageName string) string {\n\treturn templateOutput(headerTemplate, map[string]string{\n\t\t\"packageName\": packageName,\n\t})\n}\n\nfunc readerForStruct(structName string) string {\n\n\treturn templateOutput(readerTemplate, templateConfig{\n\t\tFirstLetter: structName[:1],\n\t\tStructName: structName,\n\t})\n\n}\n\nfunc readSetterForStruct(structName string) string {\n\treturn templateOutput(readSetterTemplate, templateConfig{\n\t\tFirstLetter: structName[:1],\n\t\tStructName: structName,\n\t})\n}\n\nconst headerTemplateText = `\/************************************\n *\n * This file was auto-generated by autoreader. DO NOT EDIT.\n *\n ************************************\/\npackage {{.packageName}}\n\nimport (\n\t\"github.com\/jkomoros\/boardgame\"\n)\n\n`\n\nconst readerTemplateText = `func ({{.FirstLetter}} *{{.StructName}}) Reader() boardgame.PropertyReader {\n\treturn boardgame.DefaultReader({{.FirstLetter}})\n}\n\n`\n\nconst readSetterTemplateText = `func ({{.FirstLetter}} *{{.StructName}}) ReadSetter() boardgame.PropertyReadSetter {\n\treturn boardgame.DefaultReadSetter({{.FirstLetter}})\n}\n\n`\n<|endoftext|>"} {"text":"<commit_before>\/\/ The `postgres` plugin for SHIELD is intended to be a generic\n\/\/ backup\/restore plugin for a postgres server. It can be used against\n\/\/ any postgres server compatible with the `psql` and `pg_dumpall` tools\n\/\/ installed on the system where this plugin is run.\n\/\/\n\/\/ PLUGIN FEATURES\n\/\/\n\/\/ This plugin implements functionality suitable for use with the following\n\/\/ SHIELD Job components:\n\/\/\n\/\/ Target: yes\n\/\/ Store: no\n\/\/\n\/\/ PLUGIN CONFIGURATION\n\/\/\n\/\/ The endpoint configuration passed to this plugin is used to identify\n\/\/ what postgres instance to back up, and how to connect to it. Your\n\/\/ endpoint JSON should look something like this:\n\/\/\n\/\/ {\n\/\/ \"pg_user\":\"username-for-postgres\",\n\/\/ \"pg_password\":\"password-for-above-user\",\n\/\/ \"pg_host\":\"hostname-or-ip-of-pg-server\",\n\/\/ \"pg_port\":\"port-above-pg-server-listens-on\", # optional, defaults to '5432'\n\/\/ \"pg_database\": \"name-of-db-to-backup\" # optional, defaults to 'all'\n\/\/ \"pg_bindir\": \"PostgreSQL binaries directory\" # optional, defaults to '\/var\/vcap\/packages\/postgres\/bin'\n\/\/ }\n\/\/\n\/\/ The `pg_port` field is optional. If specified, the plugin will connect to the\n\/\/ given port to perform backups. If not specified plugin will connect to\n\/\/ default postgres port 5432.\n\/\/\n\/\/ The `pg_database` field is optional. If specified, the plugin will only\n\/\/ perform backups of the named database. If not specified (the default), all\n\/\/ databases will be backed up.\n\/\/\n\/\/ The `pg_bindir` field is optional. It specifies where to find the PostgreSQL\n\/\/ binaries such as pg_dump \/ pg_dumpall \/ pg_restore. If specified, the plugin\n\/\/ will attempt to use binaries from within the given directory. If not specified\n\/\/ the plugin will default to trying to use binaries in\n\/\/ '\/var\/vcap\/packages\/postgres\/bin'.\n\/\/\n\/\/ BACKUP DETAILS\n\/\/\n\/\/ The `postgres` plugin makes use of `pg_dumpall -c` to back up all databases\n\/\/ on the postgres server it connects to. There is currently no filtering of\n\/\/ individual databases to back up, unless that is done via the postgres users\n\/\/ and roles. The dumps generated include SQL to clean up existing databses\/tables,\n\/\/ so that the restore will go smoothly.\n\/\/\n\/\/ Backing up with the `postgres` plugin will not drop any existing connections to the\n\/\/ database, or restart the service.\n\/\/\n\/\/ RESTORE DETAILS\n\/\/\n\/\/ To restore, the `postgres` plugin connects to the postgres server using the `psql`\n\/\/ command. It then feeds in the backup data (`pg_dumpall` output). To work around\n\/\/ cases where the databases being restored cannot be recreated due to existing connections,\n\/\/ the plugin disallows incoming connections for each database, and disconnects the existing\n\/\/ connections, prior to dropping the database. Once the database is recreated, connections\n\/\/ are once again allowed into the database.\n\/\/\n\/\/ Restoring with the `postgres` plugin will terminate existing connections to the database,\n\/\/ but does not need to restart the postgres service.\n\/\/\n\/\/ DEPENDENCIES\n\/\/\n\/\/ This plugin relies on the `pg_dumpall` and `psql` commands. Please ensure that they\n\/\/ are present on the system that will be running the backups + restores for postgres.\n\/\/ If you are using shield-boshrelease to deploy SHIELD, these tools are provided, if you\n\/\/ include the `agent-pgtools` job template along side your `shield-agent`.\n\/\/\npackage main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\n\t\"github.com\/starkandwayne\/goutils\/ansi\"\n\n\t. \"github.com\/starkandwayne\/shield\/plugin\"\n)\n\nvar (\n\tDefaultPort = \"5432\"\n)\n\nfunc main() {\n\tp := PostgresPlugin{\n\t\tName: \"PostgreSQL Backup Plugin\",\n\t\tAuthor: \"Stark & Wayne\",\n\t\tVersion: \"0.0.1\",\n\t\tFeatures: PluginFeatures{\n\t\t\tTarget: \"yes\",\n\t\t\tStore: \"no\",\n\t\t},\n\t}\n\n\tRun(p)\n}\n\ntype PostgresPlugin PluginInfo\n\ntype PostgresConnectionInfo struct {\n\tHost string\n\tPort string\n\tUser string\n\tPassword string\n\tBin string\n\tDatabase string\n}\n\nfunc (p PostgresPlugin) Meta() PluginInfo {\n\treturn PluginInfo(p)\n}\n\nfunc (p PostgresPlugin) Validate(endpoint ShieldEndpoint) error {\n\tvar (\n\t\ts string\n\t\terr error\n\t\tfail bool\n\t)\n\n\ts, err = endpoint.StringValue(\"pg_host\")\n\tif err != nil {\n\t\tansi.Printf(\"@R{\\u2717 pg_host %s}\\n\", err)\n\t\tfail = true\n\t} else {\n\t\tansi.Printf(\"@G{\\u2713 pg_host} @C{%s}\\n\", s)\n\t}\n\n\ts, err = endpoint.StringValueDefault(\"pg_port\", \"\")\n\tif err != nil {\n\t\tansi.Printf(\"@R{\\u2717 pg_port %s}\\n\", err)\n\t} else if s == \"\" {\n\t\tansi.Printf(\"@G{\\u2713 pg_port} using default port @C{%s}\\n\", DefaultPort)\n\t} else {\n\t\tansi.Printf(\"@G{\\u2713 pg_port} @C{%s}\\n\", s)\n\t}\n\n\ts, err = endpoint.StringValue(\"pg_user\")\n\tif err != nil {\n\t\tansi.Printf(\"@R{\\u2717 pg_user %s}\\n\", err)\n\t\tfail = true\n\t} else {\n\t\tansi.Printf(\"@G{\\u2713 pg_user} @C{%s}\\n\", s)\n\t}\n\n\ts, err = endpoint.StringValue(\"pg_password\")\n\tif err != nil {\n\t\tansi.Printf(\"@R{\\u2717 pg_password %s}\\n\", err)\n\t\tfail = true\n\t} else {\n\t\tansi.Printf(\"@G{\\u2713 pg_password} @C{%s}\\n\", s)\n\t}\n\n\ts, err = endpoint.StringValueDefault(\"pg_database\", \"\")\n\tif err != nil {\n\t\tansi.Printf(\"@R{\\u2717 pg_database %s}\\n\", err)\n\t} else if s == \"\" {\n\t\tansi.Printf(\"@G{\\u2713 pg_database} none (all databases will be backed up)\\n\")\n\t} else {\n\t\tansi.Printf(\"@G{\\u2713 pg_database} @C{%s}\\n\", s)\n\t}\n\n\tif fail {\n\t\treturn fmt.Errorf(\"postgres: invalid configuration\")\n\t}\n\treturn nil\n}\n\nfunc (p PostgresPlugin) Backup(endpoint ShieldEndpoint) error {\n\tpg, err := pgConnectionInfo(endpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsetupEnvironmentVariables(pg)\n\n\tcmd := \"\"\n\tif pg.Database != \"\" {\n\t\t\/\/ Run dump all on the specified db\n\t\tcmd = fmt.Sprintf(\"%s\/pg_dump %s -c --no-password\", pg.Bin, pg.Database)\n\t} else {\n\t\t\/\/ Else run dump on all\n\t\tcmd = fmt.Sprintf(\"%s\/pg_dumpall -c --no-password\", pg.Bin)\n\t}\n\tDEBUG(\"Executing: `%s`\", cmd)\n\treturn Exec(cmd, STDOUT)\n}\n\nfunc (p PostgresPlugin) Restore(endpoint ShieldEndpoint) error {\n\tpg, err := pgConnectionInfo(endpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsetupEnvironmentVariables(pg)\n\n\tcmd := exec.Command(fmt.Sprintf(\"%s\/psql\", pg.Bin), \"-d\", \"postgres\")\n\tDEBUG(\"Exec: %s\/psql -d postgres\", pg.Bin)\n\tDEBUG(\"Redirecting stdout and stderr to stderr\")\n\tcmd.Stdout = os.Stderr\n\tcmd.Stderr = os.Stderr\n\tstdin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tscanErr := make(chan error)\n\tgo func(out io.WriteCloser, in io.Reader, errChan chan<- error) {\n\t\tDEBUG(\"Starting to read SQL statements from stdin...\")\n\t\tr := bufio.NewReader(in)\n\t\treg := regexp.MustCompile(\"^DROP DATABASE (.*);$\")\n\t\ti := 0\n\t\tfor {\n\t\t\tthisLine := []byte{}\n\t\t\tisPrefix := true\n\t\t\tvar err error\n\t\t\tfor isPrefix {\n\t\t\t\tvar tmpLine []byte\n\t\t\t\ttmpLine, isPrefix, err = r.ReadLine()\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err == io.EOF {\n\t\t\t\t\t\tgoto eof\n\t\t\t\t\t}\n\t\t\t\t\terrChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tthisLine = append(thisLine, tmpLine...)\n\t\t\t}\n\t\t\tm := reg.FindStringSubmatch(string(thisLine))\n\t\t\tif len(m) > 0 {\n\t\t\t\tDEBUG(\"Found dropped database '%s' on line %d\", m[1], i)\n\t\t\t\tout.Write([]byte(fmt.Sprintf(\"UPDATE pg_database SET datallowconn = 'false' WHERE datname = '%s';\\n\", m[1])))\n\t\t\t\tout.Write([]byte(fmt.Sprintf(\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '%s';\\n\", m[1])))\n\t\t\t}\n\t\t\t_, err = out.Write([]byte(string(thisLine) + \"\\n\"))\n\t\t\tif err != nil {\n\t\t\t\tDEBUG(\"Error when writing to output: %s\", err)\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\ti++\n\t\t}\n\teof:\n\t\tDEBUG(\"Completed restore with %d lines of SQL\", i)\n\t\tout.Close()\n\t\terrChan <- nil\n\t}(stdin, os.Stdin, scanErr)\n\terr = cmd.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn <-scanErr\n}\n\nfunc (p PostgresPlugin) Store(endpoint ShieldEndpoint) (string, error) {\n\treturn \"\", UNIMPLEMENTED\n}\n\nfunc (p PostgresPlugin) Retrieve(endpoint ShieldEndpoint, file string) error {\n\treturn UNIMPLEMENTED\n}\n\nfunc (p PostgresPlugin) Purge(endpoint ShieldEndpoint, file string) error {\n\treturn UNIMPLEMENTED\n}\n\nfunc setupEnvironmentVariables(pg *PostgresConnectionInfo) {\n\tDEBUG(\"Setting up env:\\n PGUSER=%s, PGPASSWORD=%s, PGHOST=%s, PGPORT=%s\", pg.User, pg.Password, pg.Host, pg.Port)\n\tos.Setenv(\"PGUSER\", pg.User)\n\tos.Setenv(\"PGPASSWORD\", pg.Password)\n\tos.Setenv(\"PGHOST\", pg.Host)\n\tos.Setenv(\"PGPORT\", pg.Port)\n}\n\nfunc pgConnectionInfo(endpoint ShieldEndpoint) (*PostgresConnectionInfo, error) {\n\tuser, err := endpoint.StringValue(\"pg_user\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tDEBUG(\"PGUSER: '%s'\", user)\n\n\tpassword, err := endpoint.StringValue(\"pg_password\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tDEBUG(\"PGPASSWORD: '%s'\", password)\n\n\thost, err := endpoint.StringValue(\"pg_host\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tDEBUG(\"PGHOST: '%s'\", host)\n\n\tport, err := endpoint.StringValueDefault(\"pg_port\", DefaultPort)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tDEBUG(\"PGPORT: '%s'\", port)\n\n\tdatabase, err := endpoint.StringValueDefault(\"pg_database\", \"\")\n\tDEBUG(\"PGDATABASE: '%s'\", database)\n\n\tbin, err := endpoint.StringValueDefault(\"pg_bindir\", \"\/var\/vcap\/packages\/postgres\/bin\")\n\tDEBUG(\"PGBINDIR: '%s'\", bin)\n\n\treturn &PostgresConnectionInfo{\n\t\tHost: host,\n\t\tPort: port,\n\t\tUser: user,\n\t\tPassword: password,\n\t\tBin: bin,\n\t\tDatabase: database,\n\t}, nil\n}\n<commit_msg>Pass errors out of postgres plugin connection info.<commit_after>\/\/ The `postgres` plugin for SHIELD is intended to be a generic\n\/\/ backup\/restore plugin for a postgres server. It can be used against\n\/\/ any postgres server compatible with the `psql` and `pg_dumpall` tools\n\/\/ installed on the system where this plugin is run.\n\/\/\n\/\/ PLUGIN FEATURES\n\/\/\n\/\/ This plugin implements functionality suitable for use with the following\n\/\/ SHIELD Job components:\n\/\/\n\/\/ Target: yes\n\/\/ Store: no\n\/\/\n\/\/ PLUGIN CONFIGURATION\n\/\/\n\/\/ The endpoint configuration passed to this plugin is used to identify\n\/\/ what postgres instance to back up, and how to connect to it. Your\n\/\/ endpoint JSON should look something like this:\n\/\/\n\/\/ {\n\/\/ \"pg_user\":\"username-for-postgres\",\n\/\/ \"pg_password\":\"password-for-above-user\",\n\/\/ \"pg_host\":\"hostname-or-ip-of-pg-server\",\n\/\/ \"pg_port\":\"port-above-pg-server-listens-on\", # optional, defaults to '5432'\n\/\/ \"pg_database\": \"name-of-db-to-backup\" # optional, defaults to 'all'\n\/\/ \"pg_bindir\": \"PostgreSQL binaries directory\" # optional, defaults to '\/var\/vcap\/packages\/postgres\/bin'\n\/\/ }\n\/\/\n\/\/ The `pg_port` field is optional. If specified, the plugin will connect to the\n\/\/ given port to perform backups. If not specified plugin will connect to\n\/\/ default postgres port 5432.\n\/\/\n\/\/ The `pg_database` field is optional. If specified, the plugin will only\n\/\/ perform backups of the named database. If not specified (the default), all\n\/\/ databases will be backed up.\n\/\/\n\/\/ The `pg_bindir` field is optional. It specifies where to find the PostgreSQL\n\/\/ binaries such as pg_dump \/ pg_dumpall \/ pg_restore. If specified, the plugin\n\/\/ will attempt to use binaries from within the given directory. If not specified\n\/\/ the plugin will default to trying to use binaries in\n\/\/ '\/var\/vcap\/packages\/postgres\/bin'.\n\/\/\n\/\/ BACKUP DETAILS\n\/\/\n\/\/ The `postgres` plugin makes use of `pg_dumpall -c` to back up all databases\n\/\/ on the postgres server it connects to. There is currently no filtering of\n\/\/ individual databases to back up, unless that is done via the postgres users\n\/\/ and roles. The dumps generated include SQL to clean up existing databses\/tables,\n\/\/ so that the restore will go smoothly.\n\/\/\n\/\/ Backing up with the `postgres` plugin will not drop any existing connections to the\n\/\/ database, or restart the service.\n\/\/\n\/\/ RESTORE DETAILS\n\/\/\n\/\/ To restore, the `postgres` plugin connects to the postgres server using the `psql`\n\/\/ command. It then feeds in the backup data (`pg_dumpall` output). To work around\n\/\/ cases where the databases being restored cannot be recreated due to existing connections,\n\/\/ the plugin disallows incoming connections for each database, and disconnects the existing\n\/\/ connections, prior to dropping the database. Once the database is recreated, connections\n\/\/ are once again allowed into the database.\n\/\/\n\/\/ Restoring with the `postgres` plugin will terminate existing connections to the database,\n\/\/ but does not need to restart the postgres service.\n\/\/\n\/\/ DEPENDENCIES\n\/\/\n\/\/ This plugin relies on the `pg_dumpall` and `psql` commands. Please ensure that they\n\/\/ are present on the system that will be running the backups + restores for postgres.\n\/\/ If you are using shield-boshrelease to deploy SHIELD, these tools are provided, if you\n\/\/ include the `agent-pgtools` job template along side your `shield-agent`.\n\/\/\npackage main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\n\t\"github.com\/starkandwayne\/goutils\/ansi\"\n\n\t. \"github.com\/starkandwayne\/shield\/plugin\"\n)\n\nvar (\n\tDefaultPort = \"5432\"\n)\n\nfunc main() {\n\tp := PostgresPlugin{\n\t\tName: \"PostgreSQL Backup Plugin\",\n\t\tAuthor: \"Stark & Wayne\",\n\t\tVersion: \"0.0.1\",\n\t\tFeatures: PluginFeatures{\n\t\t\tTarget: \"yes\",\n\t\t\tStore: \"no\",\n\t\t},\n\t}\n\n\tRun(p)\n}\n\ntype PostgresPlugin PluginInfo\n\ntype PostgresConnectionInfo struct {\n\tHost string\n\tPort string\n\tUser string\n\tPassword string\n\tBin string\n\tDatabase string\n}\n\nfunc (p PostgresPlugin) Meta() PluginInfo {\n\treturn PluginInfo(p)\n}\n\nfunc (p PostgresPlugin) Validate(endpoint ShieldEndpoint) error {\n\tvar (\n\t\ts string\n\t\terr error\n\t\tfail bool\n\t)\n\n\ts, err = endpoint.StringValue(\"pg_host\")\n\tif err != nil {\n\t\tansi.Printf(\"@R{\\u2717 pg_host %s}\\n\", err)\n\t\tfail = true\n\t} else {\n\t\tansi.Printf(\"@G{\\u2713 pg_host} @C{%s}\\n\", s)\n\t}\n\n\ts, err = endpoint.StringValueDefault(\"pg_port\", \"\")\n\tif err != nil {\n\t\tansi.Printf(\"@R{\\u2717 pg_port %s}\\n\", err)\n\t} else if s == \"\" {\n\t\tansi.Printf(\"@G{\\u2713 pg_port} using default port @C{%s}\\n\", DefaultPort)\n\t} else {\n\t\tansi.Printf(\"@G{\\u2713 pg_port} @C{%s}\\n\", s)\n\t}\n\n\ts, err = endpoint.StringValue(\"pg_user\")\n\tif err != nil {\n\t\tansi.Printf(\"@R{\\u2717 pg_user %s}\\n\", err)\n\t\tfail = true\n\t} else {\n\t\tansi.Printf(\"@G{\\u2713 pg_user} @C{%s}\\n\", s)\n\t}\n\n\ts, err = endpoint.StringValue(\"pg_password\")\n\tif err != nil {\n\t\tansi.Printf(\"@R{\\u2717 pg_password %s}\\n\", err)\n\t\tfail = true\n\t} else {\n\t\tansi.Printf(\"@G{\\u2713 pg_password} @C{%s}\\n\", s)\n\t}\n\n\ts, err = endpoint.StringValueDefault(\"pg_database\", \"\")\n\tif err != nil {\n\t\tansi.Printf(\"@R{\\u2717 pg_database %s}\\n\", err)\n\t} else if s == \"\" {\n\t\tansi.Printf(\"@G{\\u2713 pg_database} none (all databases will be backed up)\\n\")\n\t} else {\n\t\tansi.Printf(\"@G{\\u2713 pg_database} @C{%s}\\n\", s)\n\t}\n\n\tif fail {\n\t\treturn fmt.Errorf(\"postgres: invalid configuration\")\n\t}\n\treturn nil\n}\n\nfunc (p PostgresPlugin) Backup(endpoint ShieldEndpoint) error {\n\tpg, err := pgConnectionInfo(endpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsetupEnvironmentVariables(pg)\n\n\tcmd := \"\"\n\tif pg.Database != \"\" {\n\t\t\/\/ Run dump all on the specified db\n\t\tcmd = fmt.Sprintf(\"%s\/pg_dump %s -c --no-password\", pg.Bin, pg.Database)\n\t} else {\n\t\t\/\/ Else run dump on all\n\t\tcmd = fmt.Sprintf(\"%s\/pg_dumpall -c --no-password\", pg.Bin)\n\t}\n\tDEBUG(\"Executing: `%s`\", cmd)\n\treturn Exec(cmd, STDOUT)\n}\n\nfunc (p PostgresPlugin) Restore(endpoint ShieldEndpoint) error {\n\tpg, err := pgConnectionInfo(endpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsetupEnvironmentVariables(pg)\n\n\tcmd := exec.Command(fmt.Sprintf(\"%s\/psql\", pg.Bin), \"-d\", \"postgres\")\n\tDEBUG(\"Exec: %s\/psql -d postgres\", pg.Bin)\n\tDEBUG(\"Redirecting stdout and stderr to stderr\")\n\tcmd.Stdout = os.Stderr\n\tcmd.Stderr = os.Stderr\n\tstdin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tscanErr := make(chan error)\n\tgo func(out io.WriteCloser, in io.Reader, errChan chan<- error) {\n\t\tDEBUG(\"Starting to read SQL statements from stdin...\")\n\t\tr := bufio.NewReader(in)\n\t\treg := regexp.MustCompile(\"^DROP DATABASE (.*);$\")\n\t\ti := 0\n\t\tfor {\n\t\t\tthisLine := []byte{}\n\t\t\tisPrefix := true\n\t\t\tvar err error\n\t\t\tfor isPrefix {\n\t\t\t\tvar tmpLine []byte\n\t\t\t\ttmpLine, isPrefix, err = r.ReadLine()\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err == io.EOF {\n\t\t\t\t\t\tgoto eof\n\t\t\t\t\t}\n\t\t\t\t\terrChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tthisLine = append(thisLine, tmpLine...)\n\t\t\t}\n\t\t\tm := reg.FindStringSubmatch(string(thisLine))\n\t\t\tif len(m) > 0 {\n\t\t\t\tDEBUG(\"Found dropped database '%s' on line %d\", m[1], i)\n\t\t\t\tout.Write([]byte(fmt.Sprintf(\"UPDATE pg_database SET datallowconn = 'false' WHERE datname = '%s';\\n\", m[1])))\n\t\t\t\tout.Write([]byte(fmt.Sprintf(\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '%s';\\n\", m[1])))\n\t\t\t}\n\t\t\t_, err = out.Write([]byte(string(thisLine) + \"\\n\"))\n\t\t\tif err != nil {\n\t\t\t\tDEBUG(\"Error when writing to output: %s\", err)\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\ti++\n\t\t}\n\teof:\n\t\tDEBUG(\"Completed restore with %d lines of SQL\", i)\n\t\tout.Close()\n\t\terrChan <- nil\n\t}(stdin, os.Stdin, scanErr)\n\terr = cmd.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn <-scanErr\n}\n\nfunc (p PostgresPlugin) Store(endpoint ShieldEndpoint) (string, error) {\n\treturn \"\", UNIMPLEMENTED\n}\n\nfunc (p PostgresPlugin) Retrieve(endpoint ShieldEndpoint, file string) error {\n\treturn UNIMPLEMENTED\n}\n\nfunc (p PostgresPlugin) Purge(endpoint ShieldEndpoint, file string) error {\n\treturn UNIMPLEMENTED\n}\n\nfunc setupEnvironmentVariables(pg *PostgresConnectionInfo) {\n\tDEBUG(\"Setting up env:\\n PGUSER=%s, PGPASSWORD=%s, PGHOST=%s, PGPORT=%s\", pg.User, pg.Password, pg.Host, pg.Port)\n\tos.Setenv(\"PGUSER\", pg.User)\n\tos.Setenv(\"PGPASSWORD\", pg.Password)\n\tos.Setenv(\"PGHOST\", pg.Host)\n\tos.Setenv(\"PGPORT\", pg.Port)\n}\n\nfunc pgConnectionInfo(endpoint ShieldEndpoint) (*PostgresConnectionInfo, error) {\n\tuser, err := endpoint.StringValue(\"pg_user\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tDEBUG(\"PGUSER: '%s'\", user)\n\n\tpassword, err := endpoint.StringValue(\"pg_password\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tDEBUG(\"PGPASSWORD: '%s'\", password)\n\n\thost, err := endpoint.StringValue(\"pg_host\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tDEBUG(\"PGHOST: '%s'\", host)\n\n\tport, err := endpoint.StringValueDefault(\"pg_port\", DefaultPort)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tDEBUG(\"PGPORT: '%s'\", port)\n\n\tdatabase, err := endpoint.StringValueDefault(\"pg_database\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tDEBUG(\"PGDATABASE: '%s'\", database)\n\n\tbin, err := endpoint.StringValueDefault(\"pg_bindir\", \"\/var\/vcap\/packages\/postgres\/bin\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tDEBUG(\"PGBINDIR: '%s'\", bin)\n\n\treturn &PostgresConnectionInfo{\n\t\tHost: host,\n\t\tPort: port,\n\t\tUser: user,\n\t\tPassword: password,\n\t\tBin: bin,\n\t\tDatabase: database,\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package plugin\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/mcilloni\/go-openbaton\/util\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/streadway\/amqp\"\n)\n\nfunc (p *plug) spawnWorkers() {\n\ttag := util.FuncName()\n\n\tp.l.WithFields(log.Fields{\n\t\t\"tag\": tag,\n\t\t\"num-of-workers\": p.params.Workers,\n\t}).Debug(\"spawning workers\")\n\n\tp.wg.Add(p.params.Workers)\n\tfor i := 0; i < p.params.Workers; i++ {\n\t\tgo p.worker(i)\n\t}\n}\n\nfunc (p *plug) temporaryQueue() (string, error) {\n\tqueue, err := p.cnl.QueueDeclare(\n\t\t\"\", \/\/ name\n\t\tfalse, \/\/ durable\n\t\tfalse, \/\/ delete when usused\n\t\ttrue, \/\/ exclusive\n\t\tfalse, \/\/ noWait\n\t\tnil, \/\/ arguments\n\t)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn queue.Name, nil\n}\n\nfunc (p *plug) worker(id int) {\n\ttag := util.FuncName()\n\n\tp.l.WithFields(log.Fields{\n\t\t\"tag\": tag,\n\t\t\"worker-id\": id,\n\t}).Debug(\"spawning workers\")\n\n\tfor req := range p.reqChan {\n\t\tresult, err := p.rh.Handle(req.MethodName, req.Parameters)\n\n\t\tvar resp response\n\t\tif err != nil {\n\t\t\t\/\/ The NFVO expects a Java Exception;\n\t\t\t\/\/ This type switch checks if the error is not one of the special\n\t\t\t\/\/ Java-compatible types already and wraps it.\n\t\t\tswitch err.(type) {\n\t\t\tcase plugError:\n\t\t\t\tresp.Exception = err\n\n\t\t\tcase DriverError:\n\t\t\t\tresp.Exception = err\n\n\t\t\t\/\/ if the error is not a special plugin error, than wrap it:\n\t\t\t\/\/ the nfvo expects a Java exception.\n\t\t\tdefault:\n\t\t\t\tresp.Exception = plugError{err.Error()}\n\t\t\t}\n\t\t} else {\n\t\t\tresp.Answer = result\n\t\t}\n\n\t\tbResp, err := json.Marshal(resp)\n\t\tif err != nil {\n\t\t\tp.l.WithError(err).WithFields(log.Fields{\n\t\t\t\t\"tag\": tag,\n\t\t\t\t\"worker-id\": id,\n\t\t\t}).Error(\"failure while serialising response\")\n\t\t\tcontinue\n\t\t}\n\n\t\terr = p.cnl.Publish(\n\t\t\tpluginExchange,\n\t\t\treq.ReplyTo,\n\t\t\tfalse,\n\t\t\tfalse,\n\t\t\tamqp.Publishing{\n\t\t\t\tContentType: \"text\/plain\",\n\t\t\t\tCorrelationId: req.CorrID,\n\t\t\t\tBody: bResp,\n\t\t\t},\n\t\t)\n\n\t\tif err != nil {\n\t\t\tp.l.WithError(err).WithFields(log.Fields{\n\t\t\t\t\"tag\": tag,\n\t\t\t\t\"worker-id\": id,\n\t\t\t\t\"reply-queue\": req.ReplyTo,\n\t\t\t\t\"reply-corrid\": req.CorrID,\n\t\t\t}).Error(\"failure while replying\")\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tp.wg.Done()\n}\n<commit_msg>Logging fixes<commit_after>package plugin\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/mcilloni\/go-openbaton\/util\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/streadway\/amqp\"\n)\n\nfunc (p *plug) spawnWorkers() {\n\ttag := util.FuncName()\n\n\tp.l.WithFields(log.Fields{\n\t\t\"tag\": tag,\n\t\t\"num-of-workers\": p.params.Workers,\n\t}).Debug(\"spawning workers\")\n\n\tp.wg.Add(p.params.Workers)\n\tfor i := 0; i < p.params.Workers; i++ {\n\t\tgo p.worker(i)\n\t}\n}\n\nfunc (p *plug) temporaryQueue() (string, error) {\n\tqueue, err := p.cnl.QueueDeclare(\n\t\t\"\", \/\/ name\n\t\tfalse, \/\/ durable\n\t\tfalse, \/\/ delete when usused\n\t\ttrue, \/\/ exclusive\n\t\tfalse, \/\/ noWait\n\t\tnil, \/\/ arguments\n\t)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn queue.Name, nil\n}\n\nfunc (p *plug) worker(id int) {\n\ttag := util.FuncName()\n\n\tp.l.WithFields(log.Fields{\n\t\t\"tag\": tag,\n\t\t\"worker-id\": id,\n\t}).Debug(\"worker is starting\")\n\n\tfor req := range p.reqChan {\n\t\tresult, err := p.rh.Handle(req.MethodName, req.Parameters)\n\n\t\tvar resp response\n\t\tif err != nil {\n\t\t\t\/\/ The NFVO expects a Java Exception;\n\t\t\t\/\/ This type switch checks if the error is not one of the special\n\t\t\t\/\/ Java-compatible types already and wraps it.\n\t\t\tswitch err.(type) {\n\t\t\tcase plugError:\n\t\t\t\tresp.Exception = err\n\n\t\t\tcase DriverError:\n\t\t\t\tresp.Exception = err\n\n\t\t\t\/\/ if the error is not a special plugin error, than wrap it:\n\t\t\t\/\/ the nfvo expects a Java exception.\n\t\t\tdefault:\n\t\t\t\tresp.Exception = plugError{err.Error()}\n\t\t\t}\n\t\t} else {\n\t\t\tresp.Answer = result\n\t\t}\n\n\t\tbResp, err := json.Marshal(resp)\n\t\tif err != nil {\n\t\t\tp.l.WithError(err).WithFields(log.Fields{\n\t\t\t\t\"tag\": tag,\n\t\t\t\t\"worker-id\": id,\n\t\t\t}).Error(\"failure while serialising response\")\n\t\t\tcontinue\n\t\t}\n\n\t\terr = p.cnl.Publish(\n\t\t\tpluginExchange,\n\t\t\treq.ReplyTo,\n\t\t\tfalse,\n\t\t\tfalse,\n\t\t\tamqp.Publishing{\n\t\t\t\tContentType: \"text\/plain\",\n\t\t\t\tCorrelationId: req.CorrID,\n\t\t\t\tBody: bResp,\n\t\t\t},\n\t\t)\n\n\t\tif err != nil {\n\t\t\tp.l.WithError(err).WithFields(log.Fields{\n\t\t\t\t\"tag\": tag,\n\t\t\t\t\"worker-id\": id,\n\t\t\t\t\"reply-queue\": req.ReplyTo,\n\t\t\t\t\"reply-corrid\": req.CorrID,\n\t\t\t}).Error(\"failure while replying\")\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tp.l.WithFields(log.Fields{\n\t\t\"tag\": tag,\n\t\t\"worker-id\": id,\n\t}).Debug(\"worker is stopping\")\n\n\tp.wg.Done()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package dbmirror is a package to create a database which is almost exactly\n\/\/ the same as osu!'s beatmap database.\npackage dbmirror\n\nimport (\n\t\"database\/sql\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\traven \"github.com\/getsentry\/raven-go\"\n\t\"github.com\/osuripple\/cheesegull\/models\"\n\tosuapi \"github.com\/thehowl\/go-osuapi\"\n)\n\nconst (\n\t\/\/ NewBatchEvery is the amount of time that will elapse between one batch\n\t\/\/ of requests and another.\n\tNewBatchEvery = time.Minute\n\t\/\/ PerBatch is the amount of requests and updates every batch contains.\n\tPerBatch = 100\n\t\/\/ SetUpdaterWorkers is the number of goroutines which should take care of\n\t\/\/ new batches. Keep in mind that this will be the number of maximum\n\t\/\/ concurrent connections to the osu! API.\n\tSetUpdaterWorkers = PerBatch \/ 10\n)\n\n\/\/ hasVideo checks whether a beatmap set has a video.\nvar hasVideo func(set int) (bool, error)\n\n\/\/ SetHasVideo sets the hasVideo function to the one passed.\nfunc SetHasVideo(f func(int) (bool, error)) {\n\tif f == nil {\n\t\treturn\n\t}\n\thasVideo = f\n}\n\nfunc createChildrenBeatmaps(bms []osuapi.Beatmap) []models.Beatmap {\n\tcgBms := make([]models.Beatmap, len(bms))\n\tfor idx, bm := range bms {\n\t\tcgBms[idx] = models.Beatmap{\n\t\t\tID: bm.BeatmapID,\n\t\t\tParentSetID: bm.BeatmapSetID,\n\t\t\tDiffName: bm.DiffName,\n\t\t\tFileMD5: bm.FileMD5,\n\t\t\tMode: int(bm.Mode),\n\t\t\tBPM: bm.BPM,\n\t\t\tAR: float32(bm.ApproachRate),\n\t\t\tOD: float32(bm.OverallDifficulty),\n\t\t\tCS: float32(bm.CircleSize),\n\t\t\tHP: float32(bm.HPDrain),\n\t\t\tTotalLength: bm.TotalLength,\n\t\t\tHitLength: bm.HitLength,\n\t\t\tPlaycount: bm.Playcount,\n\t\t\tPasscount: bm.Passcount,\n\t\t\tMaxCombo: bm.MaxCombo,\n\t\t\tDifficultyRating: bm.DifficultyRating,\n\t\t}\n\t}\n\treturn cgBms\n}\n\nfunc setFromOsuAPIBeatmap(b osuapi.Beatmap) models.Set {\n\treturn models.Set{\n\t\tID: b.BeatmapSetID,\n\t\tRankedStatus: int(b.Approved),\n\t\tApprovedDate: time.Time(b.ApprovedDate),\n\t\tLastUpdate: time.Time(b.LastUpdate),\n\t\tLastChecked: time.Now(),\n\t\tArtist: b.Artist,\n\t\tTitle: b.Title,\n\t\tCreator: b.Creator,\n\t\tSource: b.Source,\n\t\tTags: b.Tags,\n\t\tGenre: int(b.Genre),\n\t\tLanguage: int(b.Language),\n\t\tFavourites: b.FavouriteCount,\n\t}\n}\n\nfunc updateSet(c *osuapi.Client, db *sql.DB, set models.Set) error {\n\tbms, err := c.GetBeatmaps(osuapi.GetBeatmapsOpts{\n\t\tBeatmapSetID: set.ID,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(bms) == 0 {\n\t\t\/\/ set has been deleted from osu!, so we do the same thing\n\t\treturn models.DeleteSet(db, set.ID)\n\t}\n\n\t\/\/ create the new set based on the information we can obtain from the\n\t\/\/ first beatmap's information\n\tvar x = bms[0]\n\tupdated := !time.Time(x.LastUpdate).Equal(set.LastUpdate)\n\tset = setFromOsuAPIBeatmap(x)\n\tset.ChildrenBeatmaps = createChildrenBeatmaps(bms)\n\tif updated {\n\t\t\/\/ if it has been updated, video might have been added or removed\n\t\t\/\/ so we need to check for it\n\t\tset.HasVideo, err = hasVideo(x.BeatmapSetID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn models.CreateSet(db, set)\n}\n\n\/\/ By making the buffer the same size of the batch, we can be sure that all\n\/\/ sets from the previous batch will have completed by the time we finish\n\/\/ pushing all the beatmaps to the queue.\nvar setQueue = make(chan models.Set, PerBatch)\n\n\/\/ setUpdater is a function to be run as a goroutine, that receives sets\n\/\/ from setQueue and brings the information in the database up-to-date for that\n\/\/ set.\nfunc setUpdater(c *osuapi.Client, db *sql.DB) {\n\tfor set := range setQueue {\n\t\terr := updateSet(c, db, set)\n\t\tif err != nil {\n\t\t\tlogError(err)\n\t\t}\n\t}\n}\n\n\/\/ StartSetUpdater does batch updates for the beatmaps in the database,\n\/\/ employing goroutines to fetch the data from the osu! API and then write it to\n\/\/ the database.\nfunc StartSetUpdater(c *osuapi.Client, db *sql.DB) {\n\tfor i := 0; i < SetUpdaterWorkers; i++ {\n\t\tgo setUpdater(c, db)\n\t}\n\tfor {\n\t\tsets, err := models.FetchSetsForBatchUpdate(db, PerBatch)\n\t\tif err != nil {\n\t\t\tlogError(err)\n\t\t\ttime.Sleep(NewBatchEvery)\n\t\t\tcontinue\n\t\t}\n\t\tfor _, set := range sets {\n\t\t\tsetQueue <- set\n\t\t}\n\t\tif len(sets) > 0 {\n\t\t\tlog.Println(\"[U] Updating sets, the oldest LastChecked is\", sets[0].LastChecked)\n\t\t}\n\t\ttime.Sleep(NewBatchEvery)\n\t}\n}\n\nvar envSentryDSN = os.Getenv(\"SENTRY_DSN\")\n\n\/\/ logError attempts to log an error to Sentry, as well as stdout.\nfunc logError(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\tif envSentryDSN != \"\" {\n\t\traven.CaptureError(err, nil)\n\t}\n\tlog.Println(err)\n}\n<commit_msg>update number of workers in dbmirror<commit_after>\/\/ Package dbmirror is a package to create a database which is almost exactly\n\/\/ the same as osu!'s beatmap database.\npackage dbmirror\n\nimport (\n\t\"database\/sql\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\traven \"github.com\/getsentry\/raven-go\"\n\t\"github.com\/osuripple\/cheesegull\/models\"\n\tosuapi \"github.com\/thehowl\/go-osuapi\"\n)\n\nconst (\n\t\/\/ NewBatchEvery is the amount of time that will elapse between one batch\n\t\/\/ of requests and another.\n\tNewBatchEvery = time.Minute\n\t\/\/ PerBatch is the amount of requests and updates every batch contains.\n\tPerBatch = 100\n\t\/\/ SetUpdaterWorkers is the number of goroutines which should take care of\n\t\/\/ new batches. Keep in mind that this will be the number of maximum\n\t\/\/ concurrent connections to the osu! API.\n\tSetUpdaterWorkers = PerBatch \/ 20\n)\n\n\/\/ hasVideo checks whether a beatmap set has a video.\nvar hasVideo func(set int) (bool, error)\n\n\/\/ SetHasVideo sets the hasVideo function to the one passed.\nfunc SetHasVideo(f func(int) (bool, error)) {\n\tif f == nil {\n\t\treturn\n\t}\n\thasVideo = f\n}\n\nfunc createChildrenBeatmaps(bms []osuapi.Beatmap) []models.Beatmap {\n\tcgBms := make([]models.Beatmap, len(bms))\n\tfor idx, bm := range bms {\n\t\tcgBms[idx] = models.Beatmap{\n\t\t\tID: bm.BeatmapID,\n\t\t\tParentSetID: bm.BeatmapSetID,\n\t\t\tDiffName: bm.DiffName,\n\t\t\tFileMD5: bm.FileMD5,\n\t\t\tMode: int(bm.Mode),\n\t\t\tBPM: bm.BPM,\n\t\t\tAR: float32(bm.ApproachRate),\n\t\t\tOD: float32(bm.OverallDifficulty),\n\t\t\tCS: float32(bm.CircleSize),\n\t\t\tHP: float32(bm.HPDrain),\n\t\t\tTotalLength: bm.TotalLength,\n\t\t\tHitLength: bm.HitLength,\n\t\t\tPlaycount: bm.Playcount,\n\t\t\tPasscount: bm.Passcount,\n\t\t\tMaxCombo: bm.MaxCombo,\n\t\t\tDifficultyRating: bm.DifficultyRating,\n\t\t}\n\t}\n\treturn cgBms\n}\n\nfunc setFromOsuAPIBeatmap(b osuapi.Beatmap) models.Set {\n\treturn models.Set{\n\t\tID: b.BeatmapSetID,\n\t\tRankedStatus: int(b.Approved),\n\t\tApprovedDate: time.Time(b.ApprovedDate),\n\t\tLastUpdate: time.Time(b.LastUpdate),\n\t\tLastChecked: time.Now(),\n\t\tArtist: b.Artist,\n\t\tTitle: b.Title,\n\t\tCreator: b.Creator,\n\t\tSource: b.Source,\n\t\tTags: b.Tags,\n\t\tGenre: int(b.Genre),\n\t\tLanguage: int(b.Language),\n\t\tFavourites: b.FavouriteCount,\n\t}\n}\n\nfunc updateSet(c *osuapi.Client, db *sql.DB, set models.Set) error {\n\tbms, err := c.GetBeatmaps(osuapi.GetBeatmapsOpts{\n\t\tBeatmapSetID: set.ID,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(bms) == 0 {\n\t\t\/\/ set has been deleted from osu!, so we do the same thing\n\t\treturn models.DeleteSet(db, set.ID)\n\t}\n\n\t\/\/ create the new set based on the information we can obtain from the\n\t\/\/ first beatmap's information\n\tvar x = bms[0]\n\tupdated := !time.Time(x.LastUpdate).Equal(set.LastUpdate)\n\tset = setFromOsuAPIBeatmap(x)\n\tset.ChildrenBeatmaps = createChildrenBeatmaps(bms)\n\tif updated {\n\t\t\/\/ if it has been updated, video might have been added or removed\n\t\t\/\/ so we need to check for it\n\t\tset.HasVideo, err = hasVideo(x.BeatmapSetID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn models.CreateSet(db, set)\n}\n\n\/\/ By making the buffer the same size of the batch, we can be sure that all\n\/\/ sets from the previous batch will have completed by the time we finish\n\/\/ pushing all the beatmaps to the queue.\nvar setQueue = make(chan models.Set, PerBatch)\n\n\/\/ setUpdater is a function to be run as a goroutine, that receives sets\n\/\/ from setQueue and brings the information in the database up-to-date for that\n\/\/ set.\nfunc setUpdater(c *osuapi.Client, db *sql.DB) {\n\tfor set := range setQueue {\n\t\terr := updateSet(c, db, set)\n\t\tif err != nil {\n\t\t\tlogError(err)\n\t\t}\n\t}\n}\n\n\/\/ StartSetUpdater does batch updates for the beatmaps in the database,\n\/\/ employing goroutines to fetch the data from the osu! API and then write it to\n\/\/ the database.\nfunc StartSetUpdater(c *osuapi.Client, db *sql.DB) {\n\tfor i := 0; i < SetUpdaterWorkers; i++ {\n\t\tgo setUpdater(c, db)\n\t}\n\tfor {\n\t\tsets, err := models.FetchSetsForBatchUpdate(db, PerBatch)\n\t\tif err != nil {\n\t\t\tlogError(err)\n\t\t\ttime.Sleep(NewBatchEvery)\n\t\t\tcontinue\n\t\t}\n\t\tfor _, set := range sets {\n\t\t\tsetQueue <- set\n\t\t}\n\t\tif len(sets) > 0 {\n\t\t\tlog.Println(\"[U] Updating sets, the oldest LastChecked is\", sets[0].LastChecked)\n\t\t}\n\t\ttime.Sleep(NewBatchEvery)\n\t}\n}\n\nvar envSentryDSN = os.Getenv(\"SENTRY_DSN\")\n\n\/\/ logError attempts to log an error to Sentry, as well as stdout.\nfunc logError(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\tif envSentryDSN != \"\" {\n\t\traven.CaptureError(err, nil)\n\t}\n\tlog.Println(err)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ddl\n\nimport (\n\t\"math\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/clientv3\"\n\t\"github.com\/coreos\/etcd\/clientv3\/concurrency\"\n\t\"github.com\/coreos\/etcd\/mvcc\/mvccpb\"\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/ngaut\/log\"\n\tgoctx \"golang.org\/x\/net\/context\"\n)\n\n\/\/ OwnerManager is used to campaign the owner and manage the owner information.\ntype OwnerManager interface {\n\t\/\/ ID returns the ID of DDL.\n\tID() string\n\t\/\/ IsOwner returns whether the ownerManager is the DDL owner.\n\tIsOwner() bool\n\t\/\/ SetOwner sets whether the ownerManager is the DDL owner.\n\tSetOwner(isOwner bool)\n\t\/\/ IsOwner returns whether the ownerManager is the background owner.\n\tIsBgOwner() bool\n\t\/\/ SetOwner sets whether the ownerManager is the background owner.\n\tSetBgOwner(isOwner bool)\n\t\/\/ CampaignOwners campaigns the DDL owner and the background owner.\n\tCampaignOwners(ctx goctx.Context, wg *sync.WaitGroup) error\n\t\/\/ Cancel cancels this etcd ownerManager campaign.\n\tCancel()\n}\n\nconst (\n\t\/\/ DDLOwnerKey is the ddl owner path that is saved to etcd, and it's exported for testing.\n\tDDLOwnerKey = \"\/tidb\/ddl\/fg\/owner\"\n\t\/\/ BgOwnerKey is the background owner path that is saved to etcd, and it's exported for testing.\n\tBgOwnerKey = \"\/tidb\/ddl\/bg\/owner\"\n\tnewSessionDefaultRetryCnt = 3\n\tnewSessionRetryUnlimited = math.MaxInt64\n)\n\n\/\/ ownerManager represents the structure which is used for electing owner.\ntype ownerManager struct {\n\tddlOwner int32\n\tbgOwner int32\n\tddlID string \/\/ id is the ID of DDL.\n\tetcdCli *clientv3.Client\n\tcancel goctx.CancelFunc\n}\n\n\/\/ NewOwnerManager creates a new OwnerManager.\nfunc NewOwnerManager(etcdCli *clientv3.Client, id string, cancel goctx.CancelFunc) OwnerManager {\n\treturn &ownerManager{\n\t\tetcdCli: etcdCli,\n\t\tddlID: id,\n\t\tcancel: cancel,\n\t}\n}\n\n\/\/ ID implements OwnerManager.ID interface.\nfunc (m *ownerManager) ID() string {\n\treturn m.ddlID\n}\n\n\/\/ IsOwner implements OwnerManager.IsOwner interface.\nfunc (m *ownerManager) IsOwner() bool {\n\treturn atomic.LoadInt32(&m.ddlOwner) == 1\n}\n\n\/\/ SetOwner implements OwnerManager.SetOwner interface.\nfunc (m *ownerManager) SetOwner(isOwner bool) {\n\tif isOwner {\n\t\tatomic.StoreInt32(&m.ddlOwner, 1)\n\t} else {\n\t\tatomic.StoreInt32(&m.ddlOwner, 0)\n\t}\n}\n\n\/\/ Cancel implements OwnerManager.Cancel interface.\nfunc (m *ownerManager) Cancel() {\n\tm.cancel()\n}\n\n\/\/ IsBgOwner implements OwnerManager.IsBgOwner interface.\nfunc (m *ownerManager) IsBgOwner() bool {\n\treturn atomic.LoadInt32(&m.bgOwner) == 1\n}\n\n\/\/ SetBgOwner implements OwnerManager.SetBgOwner interface.\nfunc (m *ownerManager) SetBgOwner(isOwner bool) {\n\tif isOwner {\n\t\tatomic.StoreInt32(&m.bgOwner, 1)\n\t} else {\n\t\tatomic.StoreInt32(&m.bgOwner, 0)\n\t}\n}\n\n\/\/ NewSessionTTL is the etcd session's TTL in seconds. It's exported for testing.\nvar NewSessionTTL = 10\n\nfunc (m *ownerManager) newSession(ctx goctx.Context, retryCnt int) (*concurrency.Session, error) {\n\tvar err error\n\tvar etcdSession *concurrency.Session\n\tfor i := 0; i < retryCnt; i++ {\n\t\tetcdSession, err = concurrency.NewSession(m.etcdCli,\n\t\t\tconcurrency.WithTTL(NewSessionTTL), concurrency.WithContext(ctx))\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tlog.Warnf(\"[ddl] failed to new session, err %v\", err)\n\t\tif isContextFinished(err) {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(200 * time.Millisecond)\n\t\tcontinue\n\t}\n\treturn etcdSession, errors.Trace(err)\n}\n\n\/\/ CampaignOwners implements OwnerManager.CampaignOwners interface.\nfunc (m *ownerManager) CampaignOwners(ctx goctx.Context, wg *sync.WaitGroup) error {\n\tddlSession, err := m.newSession(ctx, newSessionDefaultRetryCnt)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tbgSession, err := m.newSession(ctx, newSessionDefaultRetryCnt)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\twg.Add(2)\n\tddlCtx, _ := goctx.WithCancel(ctx)\n\tgo m.campaignLoop(ddlCtx, ddlSession, DDLOwnerKey, wg)\n\n\tbgCtx, _ := goctx.WithCancel(ctx)\n\tgo m.campaignLoop(bgCtx, bgSession, BgOwnerKey, wg)\n\treturn nil\n}\n\nfunc (m *ownerManager) campaignLoop(ctx goctx.Context, etcdSession *concurrency.Session, key string, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tvar err error\n\tfor {\n\t\tselect {\n\t\tcase <-etcdSession.Done():\n\t\t\tlog.Info(\"[ddl] %s etcd session is done, creates a new one\", key)\n\t\t\tetcdSession, err = m.newSession(ctx, newSessionRetryUnlimited)\n\t\t\tif err != nil {\n\t\t\t\tlog.Infof(\"[ddl] break %s campaign loop, err %v\", key, err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\tlog.Infof(\"[ddl] break %s campaign loop\", key)\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\telec := concurrency.NewElection(etcdSession, key)\n\t\terr = elec.Campaign(ctx, m.ddlID)\n\t\tif err != nil {\n\t\t\tlog.Infof(\"[ddl] %s ownerManager %s failed to campaign, err %v\", key, m.ddlID, err)\n\t\t\tif isContextFinished(err) {\n\t\t\t\tlog.Warnf(\"[ddl] break %s campaign loop, err %v\", key, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\townerKey, err := GetOwnerInfo(ctx, elec, key, m.ddlID)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tm.setOwnerVal(key, true)\n\n\t\tm.watchOwner(ctx, etcdSession, ownerKey)\n\t\tm.setOwnerVal(key, false)\n\t}\n}\n\n\/\/ GetOwnerInfo gets the owner information.\nfunc GetOwnerInfo(ctx goctx.Context, elec *concurrency.Election, key, id string) (string, error) {\n\tresp, err := elec.Leader(ctx)\n\tif err != nil {\n\t\t\/\/ If no leader elected currently, it returns ErrElectionNoLeader.\n\t\tlog.Infof(\"[ddl] failed to get leader, err %v\", err)\n\t\treturn \"\", errors.Trace(err)\n\t}\n\townerID := string(resp.Kvs[0].Value)\n\tlog.Infof(\"[ddl] %s ownerManager is %s, owner is %v\", key, id, ownerID)\n\tif ownerID != id {\n\t\tlog.Warnf(\"[ddl] ownerManager %s isn't the owner\", id)\n\t\treturn \"\", errors.New(\"ownerInfoNotMatch\")\n\t}\n\n\treturn string(resp.Kvs[0].Key), nil\n}\n\nfunc (m *ownerManager) setOwnerVal(key string, val bool) {\n\tif key == DDLOwnerKey {\n\t\tm.SetOwner(val)\n\t} else {\n\t\tm.SetBgOwner(val)\n\t}\n}\n\nfunc (m *ownerManager) watchOwner(ctx goctx.Context, etcdSession *concurrency.Session, key string) {\n\tlog.Debugf(\"[ddl] ownerManager %s watch owner key %v\", m.ddlID, key)\n\twatchCh := m.etcdCli.Watch(ctx, key)\n\tfor {\n\t\tselect {\n\t\tcase resp := <-watchCh:\n\t\t\tif resp.Canceled {\n\t\t\t\tlog.Infof(\"[ddl] ownerManager %s watch owner key %v failed, no owner\",\n\t\t\t\t\tm.ddlID, key)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, ev := range resp.Events {\n\t\t\t\tif ev.Type == mvccpb.DELETE {\n\t\t\t\t\tlog.Infof(\"[ddl] ownerManager %s watch owner key %v failed, owner is deleted\", m.ddlID, key)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-etcdSession.Done():\n\t\t\treturn\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n<commit_msg>ddl: update owner_manager TTL (#3440)<commit_after>\/\/ Copyright 2017 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ddl\n\nimport (\n\t\"math\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/clientv3\"\n\t\"github.com\/coreos\/etcd\/clientv3\/concurrency\"\n\t\"github.com\/coreos\/etcd\/mvcc\/mvccpb\"\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/ngaut\/log\"\n\tgoctx \"golang.org\/x\/net\/context\"\n)\n\n\/\/ OwnerManager is used to campaign the owner and manage the owner information.\ntype OwnerManager interface {\n\t\/\/ ID returns the ID of DDL.\n\tID() string\n\t\/\/ IsOwner returns whether the ownerManager is the DDL owner.\n\tIsOwner() bool\n\t\/\/ SetOwner sets whether the ownerManager is the DDL owner.\n\tSetOwner(isOwner bool)\n\t\/\/ IsOwner returns whether the ownerManager is the background owner.\n\tIsBgOwner() bool\n\t\/\/ SetOwner sets whether the ownerManager is the background owner.\n\tSetBgOwner(isOwner bool)\n\t\/\/ CampaignOwners campaigns the DDL owner and the background owner.\n\tCampaignOwners(ctx goctx.Context, wg *sync.WaitGroup) error\n\t\/\/ Cancel cancels this etcd ownerManager campaign.\n\tCancel()\n}\n\nconst (\n\t\/\/ DDLOwnerKey is the ddl owner path that is saved to etcd, and it's exported for testing.\n\tDDLOwnerKey = \"\/tidb\/ddl\/fg\/owner\"\n\t\/\/ BgOwnerKey is the background owner path that is saved to etcd, and it's exported for testing.\n\tBgOwnerKey = \"\/tidb\/ddl\/bg\/owner\"\n\tnewSessionDefaultRetryCnt = 3\n\tnewSessionRetryUnlimited = math.MaxInt64\n)\n\n\/\/ ownerManager represents the structure which is used for electing owner.\ntype ownerManager struct {\n\tddlOwner int32\n\tbgOwner int32\n\tddlID string \/\/ id is the ID of DDL.\n\tetcdCli *clientv3.Client\n\tcancel goctx.CancelFunc\n}\n\n\/\/ NewOwnerManager creates a new OwnerManager.\nfunc NewOwnerManager(etcdCli *clientv3.Client, id string, cancel goctx.CancelFunc) OwnerManager {\n\treturn &ownerManager{\n\t\tetcdCli: etcdCli,\n\t\tddlID: id,\n\t\tcancel: cancel,\n\t}\n}\n\n\/\/ ID implements OwnerManager.ID interface.\nfunc (m *ownerManager) ID() string {\n\treturn m.ddlID\n}\n\n\/\/ IsOwner implements OwnerManager.IsOwner interface.\nfunc (m *ownerManager) IsOwner() bool {\n\treturn atomic.LoadInt32(&m.ddlOwner) == 1\n}\n\n\/\/ SetOwner implements OwnerManager.SetOwner interface.\nfunc (m *ownerManager) SetOwner(isOwner bool) {\n\tif isOwner {\n\t\tatomic.StoreInt32(&m.ddlOwner, 1)\n\t} else {\n\t\tatomic.StoreInt32(&m.ddlOwner, 0)\n\t}\n}\n\n\/\/ Cancel implements OwnerManager.Cancel interface.\nfunc (m *ownerManager) Cancel() {\n\tm.cancel()\n}\n\n\/\/ IsBgOwner implements OwnerManager.IsBgOwner interface.\nfunc (m *ownerManager) IsBgOwner() bool {\n\treturn atomic.LoadInt32(&m.bgOwner) == 1\n}\n\n\/\/ SetBgOwner implements OwnerManager.SetBgOwner interface.\nfunc (m *ownerManager) SetBgOwner(isOwner bool) {\n\tif isOwner {\n\t\tatomic.StoreInt32(&m.bgOwner, 1)\n\t} else {\n\t\tatomic.StoreInt32(&m.bgOwner, 0)\n\t}\n}\n\n\/\/ NewSessionTTL is the etcd session's TTL in seconds. It's exported for testing.\nvar NewSessionTTL = 60\n\nfunc (m *ownerManager) newSession(ctx goctx.Context, retryCnt int) (*concurrency.Session, error) {\n\tvar err error\n\tvar etcdSession *concurrency.Session\n\tfor i := 0; i < retryCnt; i++ {\n\t\tetcdSession, err = concurrency.NewSession(m.etcdCli,\n\t\t\tconcurrency.WithTTL(NewSessionTTL), concurrency.WithContext(ctx))\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tlog.Warnf(\"[ddl] failed to new session, err %v\", err)\n\t\tif isContextFinished(err) {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(200 * time.Millisecond)\n\t\tcontinue\n\t}\n\treturn etcdSession, errors.Trace(err)\n}\n\n\/\/ CampaignOwners implements OwnerManager.CampaignOwners interface.\nfunc (m *ownerManager) CampaignOwners(ctx goctx.Context, wg *sync.WaitGroup) error {\n\tddlSession, err := m.newSession(ctx, newSessionDefaultRetryCnt)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tbgSession, err := m.newSession(ctx, newSessionDefaultRetryCnt)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\twg.Add(2)\n\tddlCtx, _ := goctx.WithCancel(ctx)\n\tgo m.campaignLoop(ddlCtx, ddlSession, DDLOwnerKey, wg)\n\n\tbgCtx, _ := goctx.WithCancel(ctx)\n\tgo m.campaignLoop(bgCtx, bgSession, BgOwnerKey, wg)\n\treturn nil\n}\n\nfunc (m *ownerManager) campaignLoop(ctx goctx.Context, etcdSession *concurrency.Session, key string, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tvar err error\n\tfor {\n\t\tselect {\n\t\tcase <-etcdSession.Done():\n\t\t\tlog.Info(\"[ddl] %s etcd session is done, creates a new one\", key)\n\t\t\tetcdSession, err = m.newSession(ctx, newSessionRetryUnlimited)\n\t\t\tif err != nil {\n\t\t\t\tlog.Infof(\"[ddl] break %s campaign loop, err %v\", key, err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\tlog.Infof(\"[ddl] break %s campaign loop\", key)\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\telec := concurrency.NewElection(etcdSession, key)\n\t\terr = elec.Campaign(ctx, m.ddlID)\n\t\tif err != nil {\n\t\t\tlog.Infof(\"[ddl] %s ownerManager %s failed to campaign, err %v\", key, m.ddlID, err)\n\t\t\tif isContextFinished(err) {\n\t\t\t\tlog.Warnf(\"[ddl] break %s campaign loop, err %v\", key, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\townerKey, err := GetOwnerInfo(ctx, elec, key, m.ddlID)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tm.setOwnerVal(key, true)\n\n\t\tm.watchOwner(ctx, etcdSession, ownerKey)\n\t\tm.setOwnerVal(key, false)\n\t}\n}\n\n\/\/ GetOwnerInfo gets the owner information.\nfunc GetOwnerInfo(ctx goctx.Context, elec *concurrency.Election, key, id string) (string, error) {\n\tresp, err := elec.Leader(ctx)\n\tif err != nil {\n\t\t\/\/ If no leader elected currently, it returns ErrElectionNoLeader.\n\t\tlog.Infof(\"[ddl] failed to get leader, err %v\", err)\n\t\treturn \"\", errors.Trace(err)\n\t}\n\townerID := string(resp.Kvs[0].Value)\n\tlog.Infof(\"[ddl] %s ownerManager is %s, owner is %v\", key, id, ownerID)\n\tif ownerID != id {\n\t\tlog.Warnf(\"[ddl] ownerManager %s isn't the owner\", id)\n\t\treturn \"\", errors.New(\"ownerInfoNotMatch\")\n\t}\n\n\treturn string(resp.Kvs[0].Key), nil\n}\n\nfunc (m *ownerManager) setOwnerVal(key string, val bool) {\n\tif key == DDLOwnerKey {\n\t\tm.SetOwner(val)\n\t} else {\n\t\tm.SetBgOwner(val)\n\t}\n}\n\nfunc (m *ownerManager) watchOwner(ctx goctx.Context, etcdSession *concurrency.Session, key string) {\n\tlog.Debugf(\"[ddl] ownerManager %s watch owner key %v\", m.ddlID, key)\n\twatchCh := m.etcdCli.Watch(ctx, key)\n\tfor {\n\t\tselect {\n\t\tcase resp := <-watchCh:\n\t\t\tif resp.Canceled {\n\t\t\t\tlog.Infof(\"[ddl] ownerManager %s watch owner key %v failed, no owner\",\n\t\t\t\t\tm.ddlID, key)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, ev := range resp.Events {\n\t\t\t\tif ev.Type == mvccpb.DELETE {\n\t\t\t\t\tlog.Infof(\"[ddl] ownerManager %s watch owner key %v failed, owner is deleted\", m.ddlID, key)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-etcdSession.Done():\n\t\t\treturn\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage system runs system-level testing of rqlite. This includes testing of single nodes, and multi-node clusters.\n*\/\npackage system\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc Test_SingleNode(t *testing.T) {\n\tnode := mustNewLeaderNode()\n\tdefer node.Deprovision()\n\n\ttests := []struct {\n\t\tstmt string\n\t\texpected string\n\t\texecute bool\n\t}{\n\t\t{\n\t\t\tstmt: `CREATE TABLE foo (id integer not null primary key, name text)`,\n\t\t\texpected: `{\"results\":[{}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `INSERT INTO foo(name) VALUES(\"fiona\")`,\n\t\t\texpected: `{\"results\":[{\"last_insert_id\":1,\"rows_affected\":1}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `INSERT INTO bar(name) VALUES(\"fiona\")`,\n\t\t\texpected: `{\"results\":[{\"error\":\"no such table: bar\"}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `INSERT blah blah`,\n\t\t\texpected: `{\"results\":[{\"error\":\"near \\\"blah\\\": syntax error\"}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT * FROM foo`,\n\t\t\texpected: `{\"results\":[{\"columns\":[\"id\",\"name\"],\"types\":[\"integer\",\"text\"],\"values\":[[1,\"fiona\"]]}]}`,\n\t\t\texecute: false,\n\t\t},\n\t\t{\n\t\t\tstmt: `DROP TABLE bar`,\n\t\t\texpected: `{\"results\":[{\"error\":\"no such table: bar\"}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `DROP TABLE foo`,\n\t\t\texpected: `{\"results\":[{\"last_insert_id\":1,\"rows_affected\":1}]}`,\n\t\t\texecute: true,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tvar r string\n\t\tvar err error\n\t\tif tt.execute {\n\t\t\tr, err = node.Execute(tt.stmt)\n\t\t} else {\n\t\t\tr, err = node.Query(tt.stmt)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(`test %d failed \"%s\": %s`, i, tt.stmt, err.Error())\n\t\t}\n\t\tif r != tt.expected {\n\t\t\tt.Fatalf(`test %d received wrong result \"%s\" got: %s exp: %s`, i, tt.stmt, r, tt.expected)\n\t\t}\n\t}\n}\n\nfunc Test_SingleNodeMulti(t *testing.T) {\n\tnode := mustNewLeaderNode()\n\tdefer node.Deprovision()\n\n\ttests := []struct {\n\t\tstmt string\n\t\texpected string\n\t\texecute bool\n\t}{\n\t\t{\n\t\t\tstmt: `CREATE TABLE foo (id integer not null primary key, name text)`,\n\t\t\texpected: `{\"results\":[{}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `CREATE TABLE bar (id integer not null primary key, sequence integer)`,\n\t\t\texpected: `{\"results\":[{}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `INSERT INTO foo(name) VALUES(\"fiona\")`,\n\t\t\texpected: `{\"results\":[{\"last_insert_id\":1,\"rows_affected\":1}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `INSERT INTO foo(name) VALUES(\"declan\")`,\n\t\t\texpected: `{\"results\":[{\"last_insert_id\":2,\"rows_affected\":1}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `INSERT INTO bar(sequence) VALUES(5)`,\n\t\t\texpected: `{\"results\":[{\"last_insert_id\":1,\"rows_affected\":1}]}`,\n\t\t\texecute: true,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tvar r string\n\t\tvar err error\n\t\tif tt.execute {\n\t\t\tr, err = node.Execute(tt.stmt)\n\t\t} else {\n\t\t\tr, err = node.Query(tt.stmt)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(`test %d failed \"%s\": %s`, i, tt.stmt, err.Error())\n\t\t}\n\t\tif r != tt.expected {\n\t\t\tt.Fatalf(`test %d received wrong result \"%s\" got: %s exp: %s`, i, tt.stmt, r, tt.expected)\n\t\t}\n\t}\n\n\tr, err := node.QueryMulti([]string{\"SELECT * FROM foo\", \"SELECT * FROM bar\"})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to run multiple queries: %s\", err.Error())\n\t}\n\tif r != `{\"results\":[{\"columns\":[\"id\",\"name\"],\"types\":[\"integer\",\"text\"],\"values\":[[1,\"fiona\"],[2,\"declan\"]]},{\"columns\":[\"id\",\"sequence\"],\"types\":[\"integer\",\"integer\"],\"values\":[[1,5]]}]}` {\n\t\tt.Fatalf(\"test received wrong result got %s\", r)\n\t}\n}\n\nfunc Test_SingleParameterizedNode(t *testing.T) {\n\tnode := mustNewLeaderNode()\n\tdefer node.Deprovision()\n\n\ttests := []struct {\n\t\tstmt []interface{}\n\t\texpected string\n\t\texecute bool\n\t}{\n\t\t{\n\t\t\tstmt: []interface{}{\"CREATE TABLE foo (id integer not null primary key, name text, age integer)\"},\n\t\t\texpected: `{\"results\":[{}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: []interface{}{\"INSERT INTO foo(name, age) VALUES(?, ?)\", \"fiona\", 20},\n\t\t\texpected: `{\"results\":[{\"last_insert_id\":1,\"rows_affected\":1}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: []interface{}{\"SELECT * FROM foo WHERE NAME=?\", \"fiona\"},\n\t\t\texpected: `{\"results\":[{\"columns\":[\"id\",\"name\",\"age\"],\"types\":[\"integer\",\"text\",\"integer\"],\"values\":[[1,\"fiona\",20]]}]}`,\n\t\t\texecute: false,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tvar r string\n\t\tvar err error\n\t\tif tt.execute {\n\t\t\tr, err = node.ExecuteParameterized(tt.stmt)\n\t\t} else {\n\t\t\tr, err = node.QueryParameterized(tt.stmt)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(`test %d failed \"%s\": %s`, i, tt.stmt, err.Error())\n\t\t}\n\t\tif r != tt.expected {\n\t\t\tt.Fatalf(`test %d received wrong result \"%s\" got: %s exp: %s`, i, tt.stmt, r, tt.expected)\n\t\t}\n\t}\n}\n\n\/\/ Test_SingleNodeSQLInjection demonstrates that using the non-parameterized API is vulnerable to\n\/\/ SQL injection attacks.\nfunc Test_SingleNodeSQLInjection(t *testing.T) {\n\tnode := mustNewLeaderNode()\n\tdefer node.Deprovision()\n\n\ttests := []struct {\n\t\tstmt string\n\t\texpected string\n\t\texecute bool\n\t}{\n\t\t{\n\t\t\tstmt: `CREATE TABLE foo (id integer not null primary key, name text)`,\n\t\t\texpected: `{\"results\":[{}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT * FROM foo WHERE name=\"baz\"`,\n\t\t\texpected: `{\"results\":[{\"columns\":[\"id\",\"name\"],\"types\":[\"integer\",\"text\"]}]}`,\n\t\t\texecute: false,\n\t\t},\n\t\t{\n\t\t\tstmt: fmt.Sprintf(`SELECT * FROM foo WHERE name=%s`, `\"baz\";DROP TABLE FOO`),\n\t\t\texpected: `{\"results\":[{}]}`,\n\t\t\texecute: false,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT * FROM foo`,\n\t\t\texpected: `{\"results\":[{\"error\":\"no such table: foo\"}]}`,\n\t\t\texecute: false,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tvar r string\n\t\tvar err error\n\t\tif tt.execute {\n\t\t\tr, err = node.Execute(tt.stmt)\n\t\t} else {\n\t\t\tr, err = node.Query(tt.stmt)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(`test %d failed \"%s\": %s`, i, tt.stmt, err.Error())\n\t\t}\n\t\tif r != tt.expected {\n\t\t\tt.Fatalf(`test %d received wrong result \"%s\" got: %s exp: %s`, i, tt.stmt, r, tt.expected)\n\t\t}\n\t}\n}\n\n\/\/ Test_SingleNodeNoSQLInjection demonstrates that using the parameterized API protects\n\/\/ against SQL injection attacks.\nfunc Test_SingleNodeNoSQLInjection(t *testing.T) {\n\tnode := mustNewLeaderNode()\n\tdefer node.Deprovision()\n\n\ttests := []struct {\n\t\tstmt []interface{}\n\t\texpected string\n\t\texecute bool\n\t}{\n\t\t{\n\t\t\tstmt: []interface{}{\"CREATE TABLE foo (id integer not null primary key, name text)\"},\n\t\t\texpected: `{\"results\":[{}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: []interface{}{`SELECT * FROM foo WHERE name=\"baz\"`},\n\t\t\texpected: `{\"results\":[{\"columns\":[\"id\",\"name\"],\"types\":[\"integer\",\"text\"]}]}`,\n\t\t\texecute: false,\n\t\t},\n\t\t{\n\t\t\tstmt: []interface{}{`SELECT * FROM foo WHERE name=?`, `\"baz\";DROP TABLE FOO`},\n\t\t\texpected: `{\"results\":[{\"columns\":[\"id\",\"name\"],\"types\":[\"integer\",\"text\"]}]}`,\n\t\t\texecute: false,\n\t\t},\n\t\t{\n\t\t\tstmt: []interface{}{`SELECT * FROM foo`},\n\t\t\texpected: `{\"results\":[{\"columns\":[\"id\",\"name\"],\"types\":[\"integer\",\"text\"]}]}`,\n\t\t\texecute: false,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tvar r string\n\t\tvar err error\n\t\tif tt.execute {\n\t\t\tr, err = node.ExecuteParameterized(tt.stmt)\n\t\t} else {\n\t\t\tr, err = node.QueryParameterized(tt.stmt)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(`test %d failed \"%s\": %s`, i, tt.stmt, err.Error())\n\t\t}\n\t\tif r != tt.expected {\n\t\t\tt.Fatalf(`test %d received wrong result \"%s\" got: %s exp: %s`, i, tt.stmt, r, tt.expected)\n\t\t}\n\t}\n}\n\nfunc Test_SingleNodeRestart(t *testing.T) {\n\t\/\/ Deprovision of a node deletes the node's dir, so make a copy first.\n\tsrcdir := filepath.Join(\"testdata\", \"v5.6.0-data\")\n\tdestdir := mustTempDir()\n\tif err := os.Remove(destdir); err != nil {\n\t\tt.Fatalf(\"failed to remove dest dir: %s\", err)\n\t}\n\tif err := copyDir(srcdir, destdir); err != nil {\n\t\tt.Fatalf(\"failed to copy node test directory: %s\", err)\n\t}\n\n\tnode := mustNodeEncrypted(destdir, true, false, false, \"node1\")\n\tdefer node.Deprovision()\n\tif _, err := node.WaitForLeader(); err != nil {\n\t\tt.Fatal(\"node never became leader\")\n\t}\n\n\t\/\/ Let's wait a few seconds to be sure logs are applied.\n\tn := 0\n\tfor {\n\t\ttime.Sleep(1 * time.Second)\n\n\t\tr, err := node.QueryNoneConsistency(`SELECT * FROM foo`)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"query failed: %s\", err)\n\t\t}\n\n\t\texpected := `{\"results\":[{\"columns\":[\"id\",\"name\",\"age\"],\"types\":[\"integer\",\"text\",\"integer\"],\"values\":[[1,\"fiona\",20]]}]}`\n\t\tif r != expected && n == 10 {\n\t\t\tt.Fatalf(`query received wrong result, got: %s exp: %s`, r, expected)\n\t\t} else {\n\t\t\tbreak \/\/ Test successful!\n\t\t}\n\n\t\tn++\n\t}\n}\n\nfunc Test_SingleNodeCoverage(t *testing.T) {\n\tnode := mustNewLeaderNode()\n\tdefer node.Deprovision()\n\n\t\/\/ Access endpoints to ensure the code is covered.\n\tvar err error\n\tvar str string\n\n\tstr, err = node.Status()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to access status endpoint: %s\", err.Error())\n\t}\n\tif !isJSON(str) {\n\t\tt.Fatalf(\"output from status endpoint is not valid JSON: %s\", str)\n\t}\n\n\tstr, err = node.Expvar()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to access expvar endpoint: %s\", err.Error())\n\t}\n\tif !isJSON(str) {\n\t\tt.Fatalf(\"output from expvar endpoint is not valid JSON: %s\", str)\n\t}\n}\n<commit_msg>Fix restart test logic<commit_after>\/*\nPackage system runs system-level testing of rqlite. This includes testing of single nodes, and multi-node clusters.\n*\/\npackage system\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc Test_SingleNode(t *testing.T) {\n\tnode := mustNewLeaderNode()\n\tdefer node.Deprovision()\n\n\ttests := []struct {\n\t\tstmt string\n\t\texpected string\n\t\texecute bool\n\t}{\n\t\t{\n\t\t\tstmt: `CREATE TABLE foo (id integer not null primary key, name text)`,\n\t\t\texpected: `{\"results\":[{}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `INSERT INTO foo(name) VALUES(\"fiona\")`,\n\t\t\texpected: `{\"results\":[{\"last_insert_id\":1,\"rows_affected\":1}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `INSERT INTO bar(name) VALUES(\"fiona\")`,\n\t\t\texpected: `{\"results\":[{\"error\":\"no such table: bar\"}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `INSERT blah blah`,\n\t\t\texpected: `{\"results\":[{\"error\":\"near \\\"blah\\\": syntax error\"}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT * FROM foo`,\n\t\t\texpected: `{\"results\":[{\"columns\":[\"id\",\"name\"],\"types\":[\"integer\",\"text\"],\"values\":[[1,\"fiona\"]]}]}`,\n\t\t\texecute: false,\n\t\t},\n\t\t{\n\t\t\tstmt: `DROP TABLE bar`,\n\t\t\texpected: `{\"results\":[{\"error\":\"no such table: bar\"}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `DROP TABLE foo`,\n\t\t\texpected: `{\"results\":[{\"last_insert_id\":1,\"rows_affected\":1}]}`,\n\t\t\texecute: true,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tvar r string\n\t\tvar err error\n\t\tif tt.execute {\n\t\t\tr, err = node.Execute(tt.stmt)\n\t\t} else {\n\t\t\tr, err = node.Query(tt.stmt)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(`test %d failed \"%s\": %s`, i, tt.stmt, err.Error())\n\t\t}\n\t\tif r != tt.expected {\n\t\t\tt.Fatalf(`test %d received wrong result \"%s\" got: %s exp: %s`, i, tt.stmt, r, tt.expected)\n\t\t}\n\t}\n}\n\nfunc Test_SingleNodeMulti(t *testing.T) {\n\tnode := mustNewLeaderNode()\n\tdefer node.Deprovision()\n\n\ttests := []struct {\n\t\tstmt string\n\t\texpected string\n\t\texecute bool\n\t}{\n\t\t{\n\t\t\tstmt: `CREATE TABLE foo (id integer not null primary key, name text)`,\n\t\t\texpected: `{\"results\":[{}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `CREATE TABLE bar (id integer not null primary key, sequence integer)`,\n\t\t\texpected: `{\"results\":[{}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `INSERT INTO foo(name) VALUES(\"fiona\")`,\n\t\t\texpected: `{\"results\":[{\"last_insert_id\":1,\"rows_affected\":1}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `INSERT INTO foo(name) VALUES(\"declan\")`,\n\t\t\texpected: `{\"results\":[{\"last_insert_id\":2,\"rows_affected\":1}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `INSERT INTO bar(sequence) VALUES(5)`,\n\t\t\texpected: `{\"results\":[{\"last_insert_id\":1,\"rows_affected\":1}]}`,\n\t\t\texecute: true,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tvar r string\n\t\tvar err error\n\t\tif tt.execute {\n\t\t\tr, err = node.Execute(tt.stmt)\n\t\t} else {\n\t\t\tr, err = node.Query(tt.stmt)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(`test %d failed \"%s\": %s`, i, tt.stmt, err.Error())\n\t\t}\n\t\tif r != tt.expected {\n\t\t\tt.Fatalf(`test %d received wrong result \"%s\" got: %s exp: %s`, i, tt.stmt, r, tt.expected)\n\t\t}\n\t}\n\n\tr, err := node.QueryMulti([]string{\"SELECT * FROM foo\", \"SELECT * FROM bar\"})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to run multiple queries: %s\", err.Error())\n\t}\n\tif r != `{\"results\":[{\"columns\":[\"id\",\"name\"],\"types\":[\"integer\",\"text\"],\"values\":[[1,\"fiona\"],[2,\"declan\"]]},{\"columns\":[\"id\",\"sequence\"],\"types\":[\"integer\",\"integer\"],\"values\":[[1,5]]}]}` {\n\t\tt.Fatalf(\"test received wrong result got %s\", r)\n\t}\n}\n\nfunc Test_SingleParameterizedNode(t *testing.T) {\n\tnode := mustNewLeaderNode()\n\tdefer node.Deprovision()\n\n\ttests := []struct {\n\t\tstmt []interface{}\n\t\texpected string\n\t\texecute bool\n\t}{\n\t\t{\n\t\t\tstmt: []interface{}{\"CREATE TABLE foo (id integer not null primary key, name text, age integer)\"},\n\t\t\texpected: `{\"results\":[{}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: []interface{}{\"INSERT INTO foo(name, age) VALUES(?, ?)\", \"fiona\", 20},\n\t\t\texpected: `{\"results\":[{\"last_insert_id\":1,\"rows_affected\":1}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: []interface{}{\"SELECT * FROM foo WHERE NAME=?\", \"fiona\"},\n\t\t\texpected: `{\"results\":[{\"columns\":[\"id\",\"name\",\"age\"],\"types\":[\"integer\",\"text\",\"integer\"],\"values\":[[1,\"fiona\",20]]}]}`,\n\t\t\texecute: false,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tvar r string\n\t\tvar err error\n\t\tif tt.execute {\n\t\t\tr, err = node.ExecuteParameterized(tt.stmt)\n\t\t} else {\n\t\t\tr, err = node.QueryParameterized(tt.stmt)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(`test %d failed \"%s\": %s`, i, tt.stmt, err.Error())\n\t\t}\n\t\tif r != tt.expected {\n\t\t\tt.Fatalf(`test %d received wrong result \"%s\" got: %s exp: %s`, i, tt.stmt, r, tt.expected)\n\t\t}\n\t}\n}\n\n\/\/ Test_SingleNodeSQLInjection demonstrates that using the non-parameterized API is vulnerable to\n\/\/ SQL injection attacks.\nfunc Test_SingleNodeSQLInjection(t *testing.T) {\n\tnode := mustNewLeaderNode()\n\tdefer node.Deprovision()\n\n\ttests := []struct {\n\t\tstmt string\n\t\texpected string\n\t\texecute bool\n\t}{\n\t\t{\n\t\t\tstmt: `CREATE TABLE foo (id integer not null primary key, name text)`,\n\t\t\texpected: `{\"results\":[{}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT * FROM foo WHERE name=\"baz\"`,\n\t\t\texpected: `{\"results\":[{\"columns\":[\"id\",\"name\"],\"types\":[\"integer\",\"text\"]}]}`,\n\t\t\texecute: false,\n\t\t},\n\t\t{\n\t\t\tstmt: fmt.Sprintf(`SELECT * FROM foo WHERE name=%s`, `\"baz\";DROP TABLE FOO`),\n\t\t\texpected: `{\"results\":[{}]}`,\n\t\t\texecute: false,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT * FROM foo`,\n\t\t\texpected: `{\"results\":[{\"error\":\"no such table: foo\"}]}`,\n\t\t\texecute: false,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tvar r string\n\t\tvar err error\n\t\tif tt.execute {\n\t\t\tr, err = node.Execute(tt.stmt)\n\t\t} else {\n\t\t\tr, err = node.Query(tt.stmt)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(`test %d failed \"%s\": %s`, i, tt.stmt, err.Error())\n\t\t}\n\t\tif r != tt.expected {\n\t\t\tt.Fatalf(`test %d received wrong result \"%s\" got: %s exp: %s`, i, tt.stmt, r, tt.expected)\n\t\t}\n\t}\n}\n\n\/\/ Test_SingleNodeNoSQLInjection demonstrates that using the parameterized API protects\n\/\/ against SQL injection attacks.\nfunc Test_SingleNodeNoSQLInjection(t *testing.T) {\n\tnode := mustNewLeaderNode()\n\tdefer node.Deprovision()\n\n\ttests := []struct {\n\t\tstmt []interface{}\n\t\texpected string\n\t\texecute bool\n\t}{\n\t\t{\n\t\t\tstmt: []interface{}{\"CREATE TABLE foo (id integer not null primary key, name text)\"},\n\t\t\texpected: `{\"results\":[{}]}`,\n\t\t\texecute: true,\n\t\t},\n\t\t{\n\t\t\tstmt: []interface{}{`SELECT * FROM foo WHERE name=\"baz\"`},\n\t\t\texpected: `{\"results\":[{\"columns\":[\"id\",\"name\"],\"types\":[\"integer\",\"text\"]}]}`,\n\t\t\texecute: false,\n\t\t},\n\t\t{\n\t\t\tstmt: []interface{}{`SELECT * FROM foo WHERE name=?`, `\"baz\";DROP TABLE FOO`},\n\t\t\texpected: `{\"results\":[{\"columns\":[\"id\",\"name\"],\"types\":[\"integer\",\"text\"]}]}`,\n\t\t\texecute: false,\n\t\t},\n\t\t{\n\t\t\tstmt: []interface{}{`SELECT * FROM foo`},\n\t\t\texpected: `{\"results\":[{\"columns\":[\"id\",\"name\"],\"types\":[\"integer\",\"text\"]}]}`,\n\t\t\texecute: false,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tvar r string\n\t\tvar err error\n\t\tif tt.execute {\n\t\t\tr, err = node.ExecuteParameterized(tt.stmt)\n\t\t} else {\n\t\t\tr, err = node.QueryParameterized(tt.stmt)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(`test %d failed \"%s\": %s`, i, tt.stmt, err.Error())\n\t\t}\n\t\tif r != tt.expected {\n\t\t\tt.Fatalf(`test %d received wrong result \"%s\" got: %s exp: %s`, i, tt.stmt, r, tt.expected)\n\t\t}\n\t}\n}\n\nfunc Test_SingleNodeRestart(t *testing.T) {\n\t\/\/ Deprovision of a node deletes the node's dir, so make a copy first.\n\tsrcdir := filepath.Join(\"testdata\", \"v5.6.0-data\")\n\tdestdir := mustTempDir()\n\tif err := os.Remove(destdir); err != nil {\n\t\tt.Fatalf(\"failed to remove dest dir: %s\", err)\n\t}\n\tif err := copyDir(srcdir, destdir); err != nil {\n\t\tt.Fatalf(\"failed to copy node test directory: %s\", err)\n\t}\n\n\tnode := mustNodeEncrypted(destdir, true, false, false, \"node1\")\n\tdefer node.Deprovision()\n\tif _, err := node.WaitForLeader(); err != nil {\n\t\tt.Fatal(\"node never became leader\")\n\t}\n\n\t\/\/ Let's wait a few seconds to be sure logs are applied.\n\tn := 0\n\tfor {\n\t\ttime.Sleep(1 * time.Second)\n\n\t\tr, err := node.QueryNoneConsistency(`SELECT * FROM foo`)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"query failed: %s\", err)\n\t\t}\n\n\t\texpected := `{\"results\":[{\"columns\":[\"id\",\"name\",\"age\"],\"types\":[\"integer\",\"text\",\"integer\"],\"values\":[[1,\"fiona\",20]]}]}`\n\t\tif r != expected {\n\t\t\tif n == 10 {\n\t\t\t\tt.Fatalf(`query received wrong result, got: %s exp: %s`, r, expected)\n\t\t\t}\n\t\t} else {\n\t\t\tbreak \/\/ Test successful!\n\t\t}\n\n\t\tn++\n\t}\n}\n\nfunc Test_SingleNodeCoverage(t *testing.T) {\n\tnode := mustNewLeaderNode()\n\tdefer node.Deprovision()\n\n\t\/\/ Access endpoints to ensure the code is covered.\n\tvar err error\n\tvar str string\n\n\tstr, err = node.Status()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to access status endpoint: %s\", err.Error())\n\t}\n\tif !isJSON(str) {\n\t\tt.Fatalf(\"output from status endpoint is not valid JSON: %s\", str)\n\t}\n\n\tstr, err = node.Expvar()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to access expvar endpoint: %s\", err.Error())\n\t}\n\tif !isJSON(str) {\n\t\tt.Fatalf(\"output from expvar endpoint is not valid JSON: %s\", str)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gapi\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n)\n\ntype Folder struct {\n\tId int64 `json:\"id\"`\n\tUid string `json:\"uid\"`\n\tTitle string `json:\"title\"`\n}\n\nfunc (c *Client) Folders() ([]Folder, error) {\n\tfolders := make([]Folder, 0)\n\n\treq, err := c.newRequest(\"GET\", \"\/api\/folders\/\", nil, nil)\n\tif err != nil {\n\t\treturn folders, err\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn folders, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn folders, errors.New(resp.Status)\n\t}\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn folders, err\n\t}\n\terr = json.Unmarshal(data, &folders)\n\treturn folders, err\n}\n\nfunc (c *Client) Folder(id int64) (Folder, error) {\n\tfolder := Folder{}\n\treq, err := c.newRequest(\"GET\", fmt.Sprintf(\"\/api\/folders\/%d\", id), nil, nil)\n\tif err != nil {\n\t\treturn folder, err\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn folder, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn folder, errors.New(resp.Status)\n\t}\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn folder, err\n\t}\n\terr = json.Unmarshal(data, &folder)\n\treturn folder, err\n}\n\nfunc (c *Client) NewFolder(title string) (Folder, error) {\n\tfolder := Folder{}\n\tdataMap := map[string]string{\n\t\t\"title\": title,\n\t}\n\tdata, err := json.Marshal(dataMap)\n\treq, err := c.newRequest(\"POST\", \"\/api\/folders\", nil, bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn folder, err\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn folder, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn folder, errors.New(resp.Status)\n\t}\n\tdata, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn folder, err\n\t}\n\terr = json.Unmarshal(data, &folder)\n\tif err != nil {\n\t\treturn folder, err\n\t}\n\treturn folder, err\n}\n\nfunc (c *Client) UpdateFolder(id string, name string) error {\n\tdataMap := map[string]string{\n\t\t\"name\": name,\n\t}\n\tdata, err := json.Marshal(dataMap)\n\treq, err := c.newRequest(\"PUT\", fmt.Sprintf(\"\/api\/folders\/%s\", id), nil, bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn errors.New(resp.Status)\n\t}\n\treturn err\n}\n\nfunc (c *Client) DeleteFolder(id string) error {\n\treq, err := c.newRequest(\"DELETE\", fmt.Sprintf(\"\/api\/folders\/%s\", id), nil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn errors.New(resp.Status)\n\t}\n\treturn err\n}\n<commit_msg>use folder id api endpoint for finding folder by id<commit_after>package gapi\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n)\n\ntype Folder struct {\n\tId int64 `json:\"id\"`\n\tUid string `json:\"uid\"`\n\tTitle string `json:\"title\"`\n}\n\nfunc (c *Client) Folders() ([]Folder, error) {\n\tfolders := make([]Folder, 0)\n\n\treq, err := c.newRequest(\"GET\", \"\/api\/folders\/\", nil, nil)\n\tif err != nil {\n\t\treturn folders, err\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn folders, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn folders, errors.New(resp.Status)\n\t}\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn folders, err\n\t}\n\terr = json.Unmarshal(data, &folders)\n\treturn folders, err\n}\n\nfunc (c *Client) Folder(id int64) (*Folder, error) {\n\tfolder := &Folder{}\n\treq, err := c.newRequest(\"GET\", fmt.Sprintf(\"\/api\/folders\/id\/%d\", id), nil, nil)\n\tif err != nil {\n\t\treturn folder, err\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn folder, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn folder, errors.New(resp.Status)\n\t}\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn folder, err\n\t}\n\terr = json.Unmarshal(data, &folder)\n\treturn folder, err\n}\n\nfunc (c *Client) NewFolder(title string) (Folder, error) {\n\tfolder := Folder{}\n\tdataMap := map[string]string{\n\t\t\"title\": title,\n\t}\n\tdata, err := json.Marshal(dataMap)\n\treq, err := c.newRequest(\"POST\", \"\/api\/folders\", nil, bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn folder, err\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn folder, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\tdata, _ = ioutil.ReadAll(resp.Body)\n\t\treturn folder, fmt.Errorf(\"status: %s body: %s\", resp.Status, data)\n\t}\n\tdata, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn folder, err\n\t}\n\terr = json.Unmarshal(data, &folder)\n\tif err != nil {\n\t\treturn folder, err\n\t}\n\treturn folder, err\n}\n\nfunc (c *Client) UpdateFolder(id string, name string) error {\n\tdataMap := map[string]string{\n\t\t\"name\": name,\n\t}\n\tdata, err := json.Marshal(dataMap)\n\treq, err := c.newRequest(\"PUT\", fmt.Sprintf(\"\/api\/folders\/%s\", id), nil, bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn errors.New(resp.Status)\n\t}\n\treturn err\n}\n\nfunc (c *Client) DeleteFolder(id string) error {\n\treq, err := c.newRequest(\"DELETE\", fmt.Sprintf(\"\/api\/folders\/%s\", id), nil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn errors.New(resp.Status)\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package diagnose\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\t\"go.opentelemetry.io\/otel\/attribute\"\n\t\"go.opentelemetry.io\/otel\/codes\"\n\tsdktrace \"go.opentelemetry.io\/otel\/sdk\/trace\"\n\t\"go.opentelemetry.io\/otel\/trace\"\n)\n\nconst (\n\twarningEventName = \"warning\"\n\tskippedEventName = \"skipped\"\n\tactionKey = \"actionKey\"\n\tspotCheckOkEventName = \"spot-check-ok\"\n\tspotCheckWarnEventName = \"spot-check-warn\"\n\tspotCheckErrorEventName = \"spot-check-error\"\n\terrorMessageKey = attribute.Key(\"error.message\")\n\tnameKey = attribute.Key(\"name\")\n\tmessageKey = attribute.Key(\"message\")\n)\n\nvar (\n\tMainSection = trace.WithAttributes(attribute.Key(\"diagnose\").String(\"main-section\"))\n)\n\nvar diagnoseSession = struct{}{}\nvar noopTracer = trace.NewNoopTracerProvider().Tracer(\"vault-diagnose\")\n\ntype testFunction func(context.Context) error\n\ntype Session struct {\n\ttc *TelemetryCollector\n\ttracer trace.Tracer\n\ttp *sdktrace.TracerProvider\n\tskip map[string]bool\n}\n\n\/\/ New initializes a Diagnose tracing session. In particular this wires a TelemetryCollector, which\n\/\/ synchronously receives and tracks OpenTelemetry spans in order to provide a tree structure of results\n\/\/ when the outermost span ends.\nfunc New(w io.Writer) *Session {\n\ttc := NewTelemetryCollector(w)\n\t\/\/so, _ := stdout.NewExporter(stdout.WithPrettyPrint())\n\ttp := sdktrace.NewTracerProvider(\n\t\tsdktrace.WithSampler(sdktrace.AlwaysSample()),\n\t\t\/\/sdktrace.WithSpanProcessor(sdktrace.NewSimpleSpanProcessor(so)),\n\t\tsdktrace.WithSpanProcessor(tc),\n\t)\n\ttracer := tp.Tracer(\"vault-diagnose\")\n\tsess := &Session{\n\t\ttp: tp,\n\t\ttc: tc,\n\t\ttracer: tracer,\n\t\tskip: make(map[string]bool),\n\t}\n\treturn sess\n}\n\nfunc (s *Session) SetSkipList(ls []string) {\n\tfor _, e := range ls {\n\t\ts.skip[e] = true\n\t}\n}\n\n\/\/ IsSkipped returns true if skipName is present in the skip list. Can be used in combination with Skip to mark a\n\/\/ span skipped and conditionally skip some logic.\nfunc (s *Session) IsSkipped(skipName string) bool {\n\treturn s.skip[skipName]\n}\n\n\/\/ Context returns a new context with a defined diagnose session\nfunc Context(ctx context.Context, sess *Session) context.Context {\n\treturn context.WithValue(ctx, diagnoseSession, sess)\n}\n\n\/\/ CurrentSession retrieves the active diagnose session from the context, or nil if none.\nfunc CurrentSession(ctx context.Context) *Session {\n\tsessionCtxVal := ctx.Value(diagnoseSession)\n\tif sessionCtxVal != nil {\n\n\t\treturn sessionCtxVal.(*Session)\n\n\t}\n\treturn nil\n}\n\n\/\/ Finalize ends the Diagnose session, returning the root of the result tree. This will be empty until\n\/\/ the outermost span ends.\nfunc (s *Session) Finalize(ctx context.Context) *Result {\n\ts.tp.ForceFlush(ctx)\n\treturn s.tc.RootResult\n}\n\n\/\/ StartSpan starts a \"diagnose\" span, which is really just an OpenTelemetry Tracing span.\nfunc StartSpan(ctx context.Context, spanName string, options ...trace.SpanOption) (context.Context, trace.Span) {\n\tsession := CurrentSession(ctx)\n\tif session != nil {\n\t\treturn session.tracer.Start(ctx, spanName, options...)\n\t} else {\n\t\treturn noopTracer.Start(ctx, spanName, options...)\n\t}\n}\n\n\/\/ Fail records a failure in the current span\nfunc Fail(ctx context.Context, message string) {\n\tspan := trace.SpanFromContext(ctx)\n\tspan.SetStatus(codes.Error, message)\n}\n\n\/\/ Error records an error in the current span (but unlike Fail, doesn't set the overall span status to Error)\nfunc Error(ctx context.Context, err error, options ...trace.EventOption) error {\n\tspan := trace.SpanFromContext(ctx)\n\tspan.RecordError(err, options...)\n\treturn err\n}\n\n\/\/ Skipped marks the current span skipped\nfunc Skipped(ctx context.Context) {\n\tspan := trace.SpanFromContext(ctx)\n\tspan.AddEvent(skippedEventName)\n}\n\n\/\/ Warn records a warning on the current span\nfunc Warn(ctx context.Context, msg string) {\n\tspan := trace.SpanFromContext(ctx)\n\tspan.AddEvent(warningEventName, trace.WithAttributes(messageKey.String(msg)))\n}\n\n\/\/ SpotOk adds an Ok result without adding a new Span. This should be used for instantaneous checks with no\n\/\/ possible sub-spans\nfunc SpotOk(ctx context.Context, checkName, message string, options ...trace.EventOption) {\n\taddSpotCheckResult(ctx, spotCheckOkEventName, checkName, message, options...)\n}\n\n\/\/ SpotWarn adds a Warning result without adding a new Span. This should be used for instantaneous checks with no\n\/\/ possible sub-spans\nfunc SpotWarn(ctx context.Context, checkName, message string, options ...trace.EventOption) {\n\taddSpotCheckResult(ctx, spotCheckWarnEventName, checkName, message, options...)\n}\n\n\/\/ SpotError adds an Error result without adding a new Span. This should be used for instantaneous checks with no\n\/\/ possible sub-spans\nfunc SpotError(ctx context.Context, checkName string, err error, options ...trace.EventOption) error {\n\tvar message string\n\tif err != nil {\n\t\tmessage = err.Error()\n\t}\n\taddSpotCheckResult(ctx, spotCheckErrorEventName, checkName, message, options...)\n\treturn err\n}\n\nfunc addSpotCheckResult(ctx context.Context, eventName, checkName, message string, options ...trace.EventOption) {\n\tspan := trace.SpanFromContext(ctx)\n\tattrs := append(options, trace.WithAttributes(nameKey.String(checkName)))\n\tif message != \"\" {\n\t\tattrs = append(attrs, trace.WithAttributes(messageKey.String(message)))\n\t}\n\tspan.AddEvent(eventName, attrs...)\n}\n\nfunc SpotCheck(ctx context.Context, checkName string, f func() error) error {\n\terr := f()\n\tif err != nil {\n\t\tSpotError(ctx, checkName, err)\n\t\treturn err\n\t} else {\n\t\tSpotOk(ctx, checkName, \"\")\n\t}\n\treturn nil\n}\n\n\/\/ Test creates a new named span, and executes the provided function within it. If the function returns an error,\n\/\/ the span is considered to have failed.\nfunc Test(ctx context.Context, spanName string, function testFunction, options ...trace.SpanOption) error {\n\tctx, span := StartSpan(ctx, spanName, options...)\n\tdefer span.End()\n\n\terr := function(ctx)\n\tif err != nil {\n\t\tspan.SetStatus(codes.Error, err.Error())\n\t}\n\treturn err\n}\n\n\/\/ WithTimeout wraps a context consuming function, and when called, returns an error if the sub-function does not\n\/\/ complete within the timeout, e.g.\n\/\/\n\/\/ diagnose.Test(ctx, \"my-span\", diagnose.WithTimeout(5 * time.Second, myTestFunc))\nfunc WithTimeout(d time.Duration, f testFunction) testFunction {\n\treturn func(ctx context.Context) error {\n\t\trch := make(chan error)\n\t\tt := time.NewTimer(d)\n\t\tdefer t.Stop()\n\t\tgo f(ctx)\n\t\tselect {\n\t\tcase <-t.C:\n\t\t\treturn fmt.Errorf(\"timed out after %s\", d.String())\n\t\tcase err := <-rch:\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n\/\/ Skippable wraps a Test function with logic that will not run the test if the skipName\n\/\/ was in the session's skip list\nfunc Skippable(skipName string, f testFunction) testFunction {\n\treturn func(ctx context.Context) error {\n\t\tsession := CurrentSession(ctx)\n\t\tif session != nil {\n\t\t\tif !session.IsSkipped(skipName) {\n\t\t\t\treturn f(ctx)\n\t\t\t} else {\n\t\t\t\tSkipped(ctx)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n}\n<commit_msg>Add a Success helper to set successful spans' messages (#11621)<commit_after>package diagnose\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\t\"go.opentelemetry.io\/otel\/attribute\"\n\t\"go.opentelemetry.io\/otel\/codes\"\n\tsdktrace \"go.opentelemetry.io\/otel\/sdk\/trace\"\n\t\"go.opentelemetry.io\/otel\/trace\"\n)\n\nconst (\n\twarningEventName = \"warning\"\n\tskippedEventName = \"skipped\"\n\tactionKey = \"actionKey\"\n\tspotCheckOkEventName = \"spot-check-ok\"\n\tspotCheckWarnEventName = \"spot-check-warn\"\n\tspotCheckErrorEventName = \"spot-check-error\"\n\terrorMessageKey = attribute.Key(\"error.message\")\n\tnameKey = attribute.Key(\"name\")\n\tmessageKey = attribute.Key(\"message\")\n)\n\nvar (\n\tMainSection = trace.WithAttributes(attribute.Key(\"diagnose\").String(\"main-section\"))\n)\n\nvar diagnoseSession = struct{}{}\nvar noopTracer = trace.NewNoopTracerProvider().Tracer(\"vault-diagnose\")\n\ntype testFunction func(context.Context) error\n\ntype Session struct {\n\ttc *TelemetryCollector\n\ttracer trace.Tracer\n\ttp *sdktrace.TracerProvider\n\tskip map[string]bool\n}\n\n\/\/ New initializes a Diagnose tracing session. In particular this wires a TelemetryCollector, which\n\/\/ synchronously receives and tracks OpenTelemetry spans in order to provide a tree structure of results\n\/\/ when the outermost span ends.\nfunc New(w io.Writer) *Session {\n\ttc := NewTelemetryCollector(w)\n\t\/\/so, _ := stdout.NewExporter(stdout.WithPrettyPrint())\n\ttp := sdktrace.NewTracerProvider(\n\t\tsdktrace.WithSampler(sdktrace.AlwaysSample()),\n\t\t\/\/sdktrace.WithSpanProcessor(sdktrace.NewSimpleSpanProcessor(so)),\n\t\tsdktrace.WithSpanProcessor(tc),\n\t)\n\ttracer := tp.Tracer(\"vault-diagnose\")\n\tsess := &Session{\n\t\ttp: tp,\n\t\ttc: tc,\n\t\ttracer: tracer,\n\t\tskip: make(map[string]bool),\n\t}\n\treturn sess\n}\n\nfunc (s *Session) SetSkipList(ls []string) {\n\tfor _, e := range ls {\n\t\ts.skip[e] = true\n\t}\n}\n\n\/\/ IsSkipped returns true if skipName is present in the skip list. Can be used in combination with Skip to mark a\n\/\/ span skipped and conditionally skip some logic.\nfunc (s *Session) IsSkipped(skipName string) bool {\n\treturn s.skip[skipName]\n}\n\n\/\/ Context returns a new context with a defined diagnose session\nfunc Context(ctx context.Context, sess *Session) context.Context {\n\treturn context.WithValue(ctx, diagnoseSession, sess)\n}\n\n\/\/ CurrentSession retrieves the active diagnose session from the context, or nil if none.\nfunc CurrentSession(ctx context.Context) *Session {\n\tsessionCtxVal := ctx.Value(diagnoseSession)\n\tif sessionCtxVal != nil {\n\n\t\treturn sessionCtxVal.(*Session)\n\n\t}\n\treturn nil\n}\n\n\/\/ Finalize ends the Diagnose session, returning the root of the result tree. This will be empty until\n\/\/ the outermost span ends.\nfunc (s *Session) Finalize(ctx context.Context) *Result {\n\ts.tp.ForceFlush(ctx)\n\treturn s.tc.RootResult\n}\n\n\/\/ StartSpan starts a \"diagnose\" span, which is really just an OpenTelemetry Tracing span.\nfunc StartSpan(ctx context.Context, spanName string, options ...trace.SpanOption) (context.Context, trace.Span) {\n\tsession := CurrentSession(ctx)\n\tif session != nil {\n\t\treturn session.tracer.Start(ctx, spanName, options...)\n\t} else {\n\t\treturn noopTracer.Start(ctx, spanName, options...)\n\t}\n}\n\n\/\/ Success sets the span to Successful (overriding any previous status) and sets the message to the input.\nfunc Success(ctx context.Context, message string) {\n\tspan := trace.SpanFromContext(ctx)\n\tspan.SetStatus(codes.Ok, message)\n}\n\n\/\/ Fail records a failure in the current span\nfunc Fail(ctx context.Context, message string) {\n\tspan := trace.SpanFromContext(ctx)\n\tspan.SetStatus(codes.Error, message)\n}\n\n\/\/ Error records an error in the current span (but unlike Fail, doesn't set the overall span status to Error)\nfunc Error(ctx context.Context, err error, options ...trace.EventOption) error {\n\tspan := trace.SpanFromContext(ctx)\n\tspan.RecordError(err, options...)\n\treturn err\n}\n\n\/\/ Skipped marks the current span skipped\nfunc Skipped(ctx context.Context) {\n\tspan := trace.SpanFromContext(ctx)\n\tspan.AddEvent(skippedEventName)\n}\n\n\/\/ Warn records a warning on the current span\nfunc Warn(ctx context.Context, msg string) {\n\tspan := trace.SpanFromContext(ctx)\n\tspan.AddEvent(warningEventName, trace.WithAttributes(messageKey.String(msg)))\n}\n\n\/\/ SpotOk adds an Ok result without adding a new Span. This should be used for instantaneous checks with no\n\/\/ possible sub-spans\nfunc SpotOk(ctx context.Context, checkName, message string, options ...trace.EventOption) {\n\taddSpotCheckResult(ctx, spotCheckOkEventName, checkName, message, options...)\n}\n\n\/\/ SpotWarn adds a Warning result without adding a new Span. This should be used for instantaneous checks with no\n\/\/ possible sub-spans\nfunc SpotWarn(ctx context.Context, checkName, message string, options ...trace.EventOption) {\n\taddSpotCheckResult(ctx, spotCheckWarnEventName, checkName, message, options...)\n}\n\n\/\/ SpotError adds an Error result without adding a new Span. This should be used for instantaneous checks with no\n\/\/ possible sub-spans\nfunc SpotError(ctx context.Context, checkName string, err error, options ...trace.EventOption) error {\n\tvar message string\n\tif err != nil {\n\t\tmessage = err.Error()\n\t}\n\taddSpotCheckResult(ctx, spotCheckErrorEventName, checkName, message, options...)\n\treturn err\n}\n\nfunc addSpotCheckResult(ctx context.Context, eventName, checkName, message string, options ...trace.EventOption) {\n\tspan := trace.SpanFromContext(ctx)\n\tattrs := append(options, trace.WithAttributes(nameKey.String(checkName)))\n\tif message != \"\" {\n\t\tattrs = append(attrs, trace.WithAttributes(messageKey.String(message)))\n\t}\n\tspan.AddEvent(eventName, attrs...)\n}\n\nfunc SpotCheck(ctx context.Context, checkName string, f func() error) error {\n\terr := f()\n\tif err != nil {\n\t\tSpotError(ctx, checkName, err)\n\t\treturn err\n\t} else {\n\t\tSpotOk(ctx, checkName, \"\")\n\t}\n\treturn nil\n}\n\n\/\/ Test creates a new named span, and executes the provided function within it. If the function returns an error,\n\/\/ the span is considered to have failed.\nfunc Test(ctx context.Context, spanName string, function testFunction, options ...trace.SpanOption) error {\n\tctx, span := StartSpan(ctx, spanName, options...)\n\tdefer span.End()\n\n\terr := function(ctx)\n\tif err != nil {\n\t\tspan.SetStatus(codes.Error, err.Error())\n\t}\n\treturn err\n}\n\n\/\/ WithTimeout wraps a context consuming function, and when called, returns an error if the sub-function does not\n\/\/ complete within the timeout, e.g.\n\/\/\n\/\/ diagnose.Test(ctx, \"my-span\", diagnose.WithTimeout(5 * time.Second, myTestFunc))\nfunc WithTimeout(d time.Duration, f testFunction) testFunction {\n\treturn func(ctx context.Context) error {\n\t\trch := make(chan error)\n\t\tt := time.NewTimer(d)\n\t\tdefer t.Stop()\n\t\tgo f(ctx)\n\t\tselect {\n\t\tcase <-t.C:\n\t\t\treturn fmt.Errorf(\"timed out after %s\", d.String())\n\t\tcase err := <-rch:\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n\/\/ Skippable wraps a Test function with logic that will not run the test if the skipName\n\/\/ was in the session's skip list\nfunc Skippable(skipName string, f testFunction) testFunction {\n\treturn func(ctx context.Context) error {\n\t\tsession := CurrentSession(ctx)\n\t\tif session != nil {\n\t\t\tif !session.IsSkipped(skipName) {\n\t\t\t\treturn f(ctx)\n\t\t\t} else {\n\t\t\t\tSkipped(ctx)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package class\n\nimport . \"github.com\/zxh0\/jvm.go\/jvmgo\/any\"\n\n\/\/ todo\n\/\/ cannot use package 'native' because of cycle import!\nvar registry = map[string]Any{}\nvar registerNatives Any\n\nfunc SetRegisterNatives(_registerNatives Any) {\n\tregisterNatives = _registerNatives\n}\n\nfunc RegisterNativeMethod(className, methodName, methodDescriptor string, method Any) {\n\tkey := className + \"~\" + methodName + \"~\" + methodDescriptor\n\tregistry[key] = method\n}\n\nfunc findNativeMethod(method *Method) Any {\n\tif method.IsRegisterNatives() {\n\t\treturn registerNatives\n\t}\n\n\tkey := method.class.name + \"~\" + method.name + \"~\" + method.descriptor\n\tif method, ok := registry[key]; ok {\n\t\treturn method\n\t} else {\n\t\tpanic(\"native method not found: \" + key)\n\t}\n}\n<commit_msg>detect duplicate keys<commit_after>package class\n\nimport . \"github.com\/zxh0\/jvm.go\/jvmgo\/any\"\n\n\/\/ todo\n\/\/ cannot use package 'native' because of cycle import!\nvar registry = map[string]Any{}\nvar registerNatives Any\n\nfunc SetRegisterNatives(_registerNatives Any) {\n\tregisterNatives = _registerNatives\n}\n\nfunc RegisterNativeMethod(className, methodName, methodDescriptor string, method Any) {\n\tkey := className + \"~\" + methodName + \"~\" + methodDescriptor\n\tregistry[key] = method\n\n\tif _, ok := registry[key]; !ok {\n\t\tregistry[key] = method\n\t} else {\n\t\tpanic(\"native method:\" + key + \" has been registered !\")\n\t}\n\n}\n\nfunc findNativeMethod(method *Method) Any {\n\tif method.IsRegisterNatives() {\n\t\treturn registerNatives\n\t}\n\n\tkey := method.class.name + \"~\" + method.name + \"~\" + method.descriptor\n\tif method, ok := registry[key]; ok {\n\t\treturn method\n\t} else {\n\t\tpanic(\"native method not found: \" + key)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package backend\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"path\/filepath\"\n\n\t\"github.com\/kpango\/glg\"\n)\n\nvar (\n\tTARGET_PREFIX string = *flag.String(\"prefix\", \"\", \"Use to find service from dir\")\n\tTARGET_SUFFIX string = *flag.String(\"suffix\", \"\", \"Use to find service from dir\")\n\tTARGET_FILE string = *flag.String(\"filename\", \"Makefile\", \"This file name is used for `go run` command\")\n)\n\nfunc loadServicesConfiguration() (error, bool) {\n\tallServices = make(map[string]*service)\n\traw, err := ioutil.ReadFile(\"services.json\")\n\tif err != nil {\n\t\treturn err, false\n\t}\n\terr = json.Unmarshal(raw, &allServices)\n\tif err != nil {\n\t\treturn err, false\n\t}\n\tfor key, val := range allServices {\n\t\tif val.Target == \"\" {\n\t\t\tglg.Warnf(\"Field `target` is not provided for %v service\", key)\n\t\t\tdelete(allServices, key)\n\t\t} else {\n\t\t\tval.Name = key\n\t\t}\n\t}\n\treturn nil, false\n}\n\nfunc init() {\n\terr, fatal := loadServicesConfiguration()\n\tif err != nil {\n\t\tglg.Errorf(\"Can't load services: %v.\", err)\n\t\tif fatal {\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n\nfunc RestartAllServices(args ...string) {\n\tStopAllServices()\n\terr, fatal := loadServicesConfiguration()\n\tif err != nil {\n\t\tglg.Errorf(\"Can't load services: %v.\", err)\n\t\tif fatal {\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\tStartAllServices(args...)\n}\n\nfunc RestartService(svcName string) {\n\tStopService(svcName)\n\tsvc := GetService(svcName)\n\tif svc != nil {\n\t\tsvc.Start()\n\t}\n}\n\nfunc isTargetExist(target string) bool {\n\tfileInfo, err := os.Stat(target)\n\tif os.IsNotExist(err) || fileInfo.IsDir() {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc svcByTargetName(target string) (*service, error) {\n\tif !isTargetExist(target) {\n\t\treturn nil, errors.New(\"service does not exist\")\n\t}\n\treturn &service{\n\t\tTarget: target,\n\t}, nil\n}\n\n\/\/ Ищет сервис в соседних директориях\n\/\/ Запускаемый файл должен называться `main.go`\nfunc svcByNameFromDir(svcName string) (*service, error) {\n\tprintln(filepath.Join(TARGET_PREFIX, svcName, TARGET_SUFFIX, TARGET_FILE))\n\tsvc, err := svcByTargetName(filepath.Join(TARGET_PREFIX, svcName, TARGET_SUFFIX, TARGET_FILE))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsvc.Name = svcName\n\treturn svc, nil\n}\n\nfunc findService(svcName string) (*service, error) {\n\tsvc, exist := allServices[svcName]\n\tif !exist {\n\t\tsvc, err := svcByNameFromDir(svcName)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Can't find service %v.\", svcName)\n\t\t}\n\t\tallServices[svcName] = svc\n\t\treturn svc, nil\n\t} else {\n\t\treturn svc, nil\n\t}\n}\n\nfunc GetService(svcName string, args ...string) *service {\n\tsvc, err := findService(svcName)\n\tif err != nil {\n\t\tglg.Warn(err)\n\t\treturn nil\n\t}\n\terr = svc.SetupService(args...)\n\tif err != nil {\n\t\tglg.Errorf(\"Can't create service: %v\", err)\n\t\treturn nil\n\t}\n\treturn svc\n}\n\nfunc StartAllServices(args ...string) {\n\tfor _, svc := range allServices {\n\t\tif svc.IsRunning {\n\t\t\tglg.Infof(\"%v already started.\", svc.Name)\n\t\t} else {\n\t\t\tsvc := GetService(svc.Name, args...)\n\t\t\tif svc != nil {\n\t\t\t\tsvc.Start()\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc StopAllServices() {\n\tfor _, svc := range allServices {\n\t\tsvc.Stop()\n\t}\n}\n\nfunc StopService(svcName string) {\n\tsvc, exist := allServices[svcName]\n\tif !exist {\n\t\tglg.Warnf(\"Can't find service %v.\", svcName)\n\t} else {\n\t\tsvc.Stop()\n\t}\n}\n\nfunc KillAllServices() {\n\tfor _, svc := range allServices {\n\t\tsvc.Kill()\n\t}\n}\n\nfunc KillService(svcName string) {\n\tsvc, exist := allServices[svcName]\n\tif !exist {\n\t\tglg.Warnf(\"Can't find service %v.\", svcName)\n\t} else {\n\t\tsvc.Kill()\n\t}\n}\n<commit_msg>fix start all<commit_after>package backend\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"path\/filepath\"\n\n\t\"github.com\/kpango\/glg\"\n)\n\nvar (\n\tTARGET_PREFIX string = *flag.String(\"prefix\", \"\", \"Use to find service from dir\")\n\tTARGET_SUFFIX string = *flag.String(\"suffix\", \"\", \"Use to find service from dir\")\n\tTARGET_FILE string = *flag.String(\"filename\", \"Makefile\", \"This file name is used for `go run` command\")\n)\n\nfunc loadServicesConfiguration() (error, bool) {\n\tallServices = make(map[string]*service)\n\traw, err := ioutil.ReadFile(\"services.json\")\n\tif err != nil {\n\t\treturn err, false\n\t}\n\terr = json.Unmarshal(raw, &allServices)\n\tif err != nil {\n\t\treturn err, false\n\t}\n\tfor key, val := range allServices {\n\t\tif val.Target == \"\" {\n\t\t\tglg.Warnf(\"Field `target` is not provided for %v service\", key)\n\t\t\tdelete(allServices, key)\n\t\t} else {\n\t\t\tval.Name = key\n\t\t}\n\t}\n\treturn nil, false\n}\n\nfunc init() {\n\terr, fatal := loadServicesConfiguration()\n\tif err != nil {\n\t\tglg.Errorf(\"Can't load services: %v.\", err)\n\t\tif fatal {\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n\nfunc RestartAllServices(args ...string) {\n\tStopAllServices()\n\terr, fatal := loadServicesConfiguration()\n\tif err != nil {\n\t\tglg.Errorf(\"Can't load services: %v.\", err)\n\t\tif fatal {\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\tStartAllServices(args...)\n}\n\nfunc RestartService(svcName string) {\n\tStopService(svcName)\n\tsvc := GetService(svcName)\n\tif svc != nil {\n\t\tsvc.Start()\n\t}\n}\n\nfunc isTargetExist(target string) bool {\n\tfileInfo, err := os.Stat(target)\n\tif os.IsNotExist(err) || fileInfo.IsDir() {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc svcByTargetName(target string) (*service, error) {\n\tif !isTargetExist(target) {\n\t\treturn nil, errors.New(\"service does not exist\")\n\t}\n\treturn &service{\n\t\tTarget: target,\n\t}, nil\n}\n\n\/\/ Ищет сервис в соседних директориях\n\/\/ Запускаемый файл должен называться `main.go`\nfunc svcByNameFromDir(svcName string) (*service, error) {\n\tprintln(filepath.Join(TARGET_PREFIX, svcName, TARGET_SUFFIX, TARGET_FILE))\n\tsvc, err := svcByTargetName(filepath.Join(TARGET_PREFIX, svcName, TARGET_SUFFIX, TARGET_FILE))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsvc.Name = svcName\n\treturn svc, nil\n}\n\nfunc findService(svcName string) (*service, error) {\n\tsvc, exist := allServices[svcName]\n\tif !exist {\n\t\tsvc, err := svcByNameFromDir(svcName)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Can't find service %v.\", svcName)\n\t\t}\n\t\tallServices[svcName] = svc\n\t\treturn svc, nil\n\t} else {\n\t\treturn svc, nil\n\t}\n}\n\nfunc GetService(svcName string, args ...string) *service {\n\tsvc, err := findService(svcName)\n\tif err != nil {\n\t\tglg.Warn(err)\n\t\treturn nil\n\t}\n\terr = svc.SetupService(args...)\n\tif err != nil {\n\t\tglg.Errorf(\"Can't create service: %v\", err)\n\t\treturn nil\n\t}\n\treturn svc\n}\n\nfunc StartAllServices(args ...string) {\n\tfor _, svc := range allServices {\n\t\tif svc.IsRunning {\n\t\t\tglg.Infof(\"%v already started.\", svc.Name)\n\t\t} else {\n\t\t\tsvc := GetService(svc.Name, args...)\n\t\t\tif svc != nil {\n\t\t\t\tgo svc.Start()\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc StopAllServices() {\n\tfor _, svc := range allServices {\n\t\tsvc.Stop()\n\t}\n}\n\nfunc StopService(svcName string) {\n\tsvc, exist := allServices[svcName]\n\tif !exist {\n\t\tglg.Warnf(\"Can't find service %v.\", svcName)\n\t} else {\n\t\tsvc.Stop()\n\t}\n}\n\nfunc KillAllServices() {\n\tfor _, svc := range allServices {\n\t\tsvc.Kill()\n\t}\n}\n\nfunc KillService(svcName string) {\n\tsvc, exist := allServices[svcName]\n\tif !exist {\n\t\tglg.Warnf(\"Can't find service %v.\", svcName)\n\t} else {\n\t\tsvc.Kill()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package app\n\nimport (\n\t\"github.com\/gorilla\/mux\"\n\t\"net\/http\"\n\t\"github.com\/Zenika\/MARCEL\/backend\/medias\"\n\t\"github.com\/rs\/cors\"\n\t\"log\"\n\t\"os\"\n\t\/\/\"github.com\/Zenika\/MARCEL\/backend\/plugins\"\n\t\"github.com\/Zenika\/MARCEL\/backend\/apidoc\"\n)\n\n\/\/current version of the API\nconst MARCEL_API_VERSION = \"1\"\n\nvar logFileName string = os.Getenv(\"MARCEL_LOG_FILE\")\nvar logFile *os.File\n\ntype App struct {\n\tRouter http.Handler\n\n\tmediaService medias.MediaService\n}\n\nfunc (a *App) Initialize() {\n\n\ta.initializeLog()\n\n\ta.initializeData()\n\n\ta.initializeRoutes()\n}\n\nfunc (a *App) Run(addr string) {\n\tlog.Fatal(http.ListenAndServe(addr, a.Router))\n\tlog.Printf(\"Server is started and listening on port %v\", addr)\n\n\tdefer logFile.Close()\n\n\tselect {}\n}\n\nfunc (a *App) initializeRoutes() {\n\n\tc := cors.New(cors.Options{\n\t\tAllowedOrigins: []string{\"*\"},\n\t\t\/\/ AllowedOrigins: []string{\"http:\/\/localhost:*\"},\n\t\tAllowedMethods: []string{\"GET\", \"POST\", \"DELETE\", \"OPTION\", \"PUT\"},\n\t\tAllowCredentials: true,\n\t})\n\n\tr := mux.NewRouter()\n\ts := r.PathPrefix(\"\/api\/v\" + MARCEL_API_VERSION).Subrouter()\n\ts.HandleFunc(\"\/medias\", a.mediaService.GetAllHandler).Methods(\"GET\")\n\ts.HandleFunc(\"\/medias\/config\", a.mediaService.GetConfigHandler).Methods(\"GET\")\n\ts.HandleFunc(\"\/medias\/create\", a.mediaService.CreateHandler).Methods(\"GET\")\n\ts.HandleFunc(\"\/medias\/{idMedia:[0-9]*}\", a.mediaService.GetHandler).Methods(\"GET\")\n\ts.HandleFunc(\"\/medias\/{idMedia:[0-9]*}\", a.mediaService.PostHandler).Methods(\"POST\")\n\tr.HandleFunc(\"\/swagger.json\", apidoc.GetConfigHandler).Methods(\"GET\")\n\n\ta.Router = c.Handler(r)\n}\n\nfunc (a *App) initializeLog() {\n\tif len(logFileName) == 0 {\n\t\tlogFileName = \"marcel.log\"\n\t}\n\tvar err error = nil\n\tlogFile, err = os.OpenFile(logFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.SetOutput(logFile)\n}\n\nfunc (a *App) initializeData() {\n\n\t\/\/Load plugins list from DB\n\t\/\/plugins.LoadPluginsCatalog()\n\n\t\/\/Load Medias configuration from DB\n\ta.mediaService = medias.NewMediaService()\n\ta.mediaService.GetMediaManager().LoadMedias()\n}\n<commit_msg>[Fix] Remove attribute for POST media<commit_after>package app\n\nimport (\n\t\"github.com\/gorilla\/mux\"\n\t\"net\/http\"\n\t\"github.com\/Zenika\/MARCEL\/backend\/medias\"\n\t\"github.com\/rs\/cors\"\n\t\"log\"\n\t\"os\"\n\t\/\/\"github.com\/Zenika\/MARCEL\/backend\/plugins\"\n\t\"github.com\/Zenika\/MARCEL\/backend\/apidoc\"\n)\n\n\/\/current version of the API\nconst MARCEL_API_VERSION = \"1\"\n\nvar logFileName string = os.Getenv(\"MARCEL_LOG_FILE\")\nvar logFile *os.File\n\ntype App struct {\n\tRouter http.Handler\n\n\tmediaService medias.MediaService\n}\n\nfunc (a *App) Initialize() {\n\n\ta.initializeLog()\n\n\ta.initializeData()\n\n\ta.initializeRoutes()\n}\n\nfunc (a *App) Run(addr string) {\n\tlog.Fatal(http.ListenAndServe(addr, a.Router))\n\tlog.Printf(\"Server is started and listening on port %v\", addr)\n\n\tdefer logFile.Close()\n\n\tselect {}\n}\n\nfunc (a *App) initializeRoutes() {\n\n\tc := cors.New(cors.Options{\n\t\tAllowedOrigins: []string{\"*\"},\n\t\t\/\/ AllowedOrigins: []string{\"http:\/\/localhost:*\"},\n\t\tAllowedMethods: []string{\"GET\", \"POST\", \"DELETE\", \"OPTION\", \"PUT\"},\n\t\tAllowCredentials: true,\n\t})\n\n\tr := mux.NewRouter()\n\ts := r.PathPrefix(\"\/api\/v\" + MARCEL_API_VERSION).Subrouter()\n\ts.HandleFunc(\"\/medias\", a.mediaService.GetAllHandler).Methods(\"GET\")\n\ts.HandleFunc(\"\/medias\/config\", a.mediaService.GetConfigHandler).Methods(\"GET\")\n\ts.HandleFunc(\"\/medias\/create\", a.mediaService.CreateHandler).Methods(\"GET\")\n\ts.HandleFunc(\"\/medias\/{idMedia:[0-9]*}\", a.mediaService.GetHandler).Methods(\"GET\")\n\ts.HandleFunc(\"\/medias\", a.mediaService.PostHandler).Methods(\"POST\")\n\tr.HandleFunc(\"\/swagger.json\", apidoc.GetConfigHandler).Methods(\"GET\")\n\n\ta.Router = c.Handler(r)\n}\n\nfunc (a *App) initializeLog() {\n\tif len(logFileName) == 0 {\n\t\tlogFileName = \"marcel.log\"\n\t}\n\tvar err error = nil\n\tlogFile, err = os.OpenFile(logFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.SetOutput(logFile)\n}\n\nfunc (a *App) initializeData() {\n\n\t\/\/Load plugins list from DB\n\t\/\/plugins.LoadPluginsCatalog()\n\n\t\/\/Load Medias configuration from DB\n\ta.mediaService = medias.NewMediaService()\n\ta.mediaService.GetMediaManager().LoadMedias()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n)\n\nfunc init() {\n\t\/\/ No Favicon this is an API Server. But god forbid you actually use this in a browser.\n\thttp.Handle(\"\/favicon.ico\", http.NotFoundHandler())\n\n\t\/\/ http.HandleFunc(\"\/\", handler)\n}\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, \"Hello!\")\n}\n<commit_msg>Base backend<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n)\n\nfunc init() {\n\t\/\/ No Favicon this is an API Server. But god forbid you actually use this in a browser.\n\thttp.Handle(\"\/favicon.ico\", http.NotFoundHandler())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage registry\n\nimport (\n\t\"time\"\n\n\t\"github.com\/jacobsa\/comeback\/blob\"\n)\n\ntype Registry interface {\n\t\/\/ Record that the named backup job has completed.\n\tRecordBackup(j CompletedJob) (err error)\n\n\t\/\/ Return a list of the most recent completed backups.\n\tListRecentBackups() (jobs []CompletedJob, err error)\n\n\t\/\/ Find a particular completed job by start time.\n\tFindBackup(startTime time.Time) (job CompletedJob, err error)\n}\n\n\/\/ A record in the backup registry describing a successful backup job.\ntype CompletedJob struct {\n\t\/\/ The time at which the backup was started.\n\tStartTime time.Time\n\n\t\/\/ The name of the backup job.\n\tName string\n\n\t\/\/ The score representing the contents of the backup.\n\tScore blob.Score\n}\n<commit_msg>ListRecentBackups -> ListBackups<commit_after>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage registry\n\nimport (\n\t\"time\"\n\n\t\"github.com\/jacobsa\/comeback\/blob\"\n)\n\ntype Registry interface {\n\t\/\/ Record that the named backup job has completed.\n\tRecordBackup(j CompletedJob) (err error)\n\n\t\/\/ Return a list of all completed backups.\n\tListBackups() (jobs []CompletedJob, err error)\n\n\t\/\/ Find a particular completed job by start time.\n\tFindBackup(startTime time.Time) (job CompletedJob, err error)\n}\n\n\/\/ A record in the backup registry describing a successful backup job.\ntype CompletedJob struct {\n\t\/\/ The time at which the backup was started.\n\tStartTime time.Time\n\n\t\/\/ The name of the backup job.\n\tName string\n\n\t\/\/ The score representing the contents of the backup.\n\tScore blob.Score\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"io\/ioutil\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc testIconsFindHelper(terms []string) icons {\n\treturn newIcons().find(terms)\n}\n\nfunc TestIcons_iconsYamlPath_TestEnv(t *testing.T) {\n\tactual := iconsYamlPath()\n\texpected := \"workflow\/icons.yml\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_iconsYamlPath_ProductionEnv(t *testing.T) {\n\tresetEnv := setTestEnvHelper(\"FAW_ICONS_YAML_PATH\", \"\")\n\tdefer resetEnv()\n\n\tactual := iconsYamlPath()\n\texpected := \"icons.yml\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_iconsReadYaml(t *testing.T) {\n\tpath := \"workflow\/icons.yml\"\n\tactual, _ := iconsReadYaml(path)\n\n\texpected, _ := ioutil.ReadFile(path)\n\tif !reflect.DeepEqual(actual, expected) {\n\t\tt.Error(\"failed to read file\")\n\t}\n}\n\nfunc TestIcons_iconsReadYaml_Error(t *testing.T) {\n\tpath := \"\"\n\t_, err := iconsReadYaml(path)\n\n\tif err == nil {\n\t\tt.Error(\"expected error, but nil\")\n\t}\n}\n\nfunc TestIcons_iconsUnmarshalYaml(t *testing.T) {\n\tb := []byte(`\nicons:\n - name: Glass\n id: glass\n unicode: f000\n created: 1.0\n filter:\n - martini\n`)\n\tactual, _ := iconsUnmarshalYaml(b)\n\n\ticon := icon{\n\t\tName: \"Glass\",\n\t\tID: \"glass\",\n\t\tUnicode: \"f000\",\n\t\tCreated: \"1.0\",\n\t\tFilter: []string{\"martini\"},\n\t}\n\texpected := iconsYaml{icons{icon}}\n\tif !reflect.DeepEqual(actual, expected) {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_find_AllIcons(t *testing.T) {\n\tfi := testIconsFindHelper([]string{\"\"})\n\n\tactual := len(fi)\n\texpected := 519\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_find_ZeroIcon(t *testing.T) {\n\tfi := testIconsFindHelper([]string{\"foo-bar-baz\"})\n\n\tactual := len(fi)\n\texpected := 0\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_find_OneIcon(t *testing.T) {\n\tfi := testIconsFindHelper([]string{\"github-square\"})\n\n\tactual := len(fi)\n\texpected := 1\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_find_TwoIcons(t *testing.T) {\n\tfi := testIconsFindHelper([]string{\"github-\"})\n\n\tactual := len(fi)\n\texpected := 2\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_find_FirstIcon(t *testing.T) {\n\tfi := testIconsFindHelper([]string{\"\"})\n\n\tactual := fi[0].ID\n\texpected := \"adjust\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_find_LastIcon(t *testing.T) {\n\tfi := testIconsFindHelper([]string{\"\"})\n\n\tactual := fi[len(fi)-1].ID\n\texpected := \"youtube-square\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_find_TaxiIcon(t *testing.T) {\n\tfi := testIconsFindHelper([]string{\"taxi\"})\n\n\tactual := fi[0].Name\n\texpected := \"Taxi\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n\n\tactual = fi[0].ID\n\texpected = \"taxi\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n\n\tactual = fi[0].Unicode\n\texpected = \"f1ba\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n\n\tactual = fi[0].Created\n\texpected = \"4.1\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n\n\tactual = fi[0].Aliases[0]\n\texpected = \"cab\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n\n\tactual = fi[0].Filter[0]\n\texpected = \"vehicle\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n\n\tactual = fi[0].Categories[0]\n\texpected = \"Web Application Icons\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_find_Aliases(t *testing.T) {\n\tfi := testIconsFindHelper([]string{\"navicon\"})\n\n\tactual := fi[0].ID\n\texpected := \"bars\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n\n\tif len(fi) != 1 {\n\t\tt.Errorf(\"expected %v to eq %v\", len(fi), 1)\n\t}\n}\n<commit_msg>Fix tests (Font Awesome 4.4.0)<commit_after>package main\n\nimport (\n\t\"io\/ioutil\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc testIconsFindHelper(terms []string) icons {\n\treturn newIcons().find(terms)\n}\n\nfunc TestIcons_iconsYamlPath_TestEnv(t *testing.T) {\n\tactual := iconsYamlPath()\n\texpected := \"workflow\/icons.yml\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_iconsYamlPath_ProductionEnv(t *testing.T) {\n\tresetEnv := setTestEnvHelper(\"FAW_ICONS_YAML_PATH\", \"\")\n\tdefer resetEnv()\n\n\tactual := iconsYamlPath()\n\texpected := \"icons.yml\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_iconsReadYaml(t *testing.T) {\n\tpath := \"workflow\/icons.yml\"\n\tactual, _ := iconsReadYaml(path)\n\n\texpected, _ := ioutil.ReadFile(path)\n\tif !reflect.DeepEqual(actual, expected) {\n\t\tt.Error(\"failed to read file\")\n\t}\n}\n\nfunc TestIcons_iconsReadYaml_Error(t *testing.T) {\n\tpath := \"\"\n\t_, err := iconsReadYaml(path)\n\n\tif err == nil {\n\t\tt.Error(\"expected error, but nil\")\n\t}\n}\n\nfunc TestIcons_iconsUnmarshalYaml(t *testing.T) {\n\tb := []byte(`\nicons:\n - name: Glass\n id: glass\n unicode: f000\n created: 1.0\n filter:\n - martini\n`)\n\tactual, _ := iconsUnmarshalYaml(b)\n\n\ticon := icon{\n\t\tName: \"Glass\",\n\t\tID: \"glass\",\n\t\tUnicode: \"f000\",\n\t\tCreated: \"1.0\",\n\t\tFilter: []string{\"martini\"},\n\t}\n\texpected := iconsYaml{icons{icon}}\n\tif !reflect.DeepEqual(actual, expected) {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_find_AllIcons(t *testing.T) {\n\tfi := testIconsFindHelper([]string{\"\"})\n\n\tactual := len(fi)\n\texpected := 585\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_find_ZeroIcon(t *testing.T) {\n\tfi := testIconsFindHelper([]string{\"foo-bar-baz\"})\n\n\tactual := len(fi)\n\texpected := 0\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_find_OneIcon(t *testing.T) {\n\tfi := testIconsFindHelper([]string{\"github-square\"})\n\n\tactual := len(fi)\n\texpected := 1\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_find_TwoIcons(t *testing.T) {\n\tfi := testIconsFindHelper([]string{\"github-\"})\n\n\tactual := len(fi)\n\texpected := 2\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_find_FirstIcon(t *testing.T) {\n\tfi := testIconsFindHelper([]string{\"\"})\n\n\tactual := fi[0].ID\n\texpected := \"500px\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_find_LastIcon(t *testing.T) {\n\tfi := testIconsFindHelper([]string{\"\"})\n\n\tactual := fi[len(fi)-1].ID\n\texpected := \"youtube-square\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_find_TaxiIcon(t *testing.T) {\n\tfi := testIconsFindHelper([]string{\"taxi\"})\n\n\tactual := fi[0].Name\n\texpected := \"Taxi\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n\n\tactual = fi[0].ID\n\texpected = \"taxi\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n\n\tactual = fi[0].Unicode\n\texpected = \"f1ba\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n\n\tactual = fi[0].Created\n\texpected = \"4.1\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n\n\tactual = fi[0].Aliases[0]\n\texpected = \"cab\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n\n\tactual = fi[0].Filter[0]\n\texpected = \"vehicle\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n\n\tactual = fi[0].Categories[0]\n\texpected = \"Web Application Icons\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n}\n\nfunc TestIcons_find_Aliases(t *testing.T) {\n\tfi := testIconsFindHelper([]string{\"navicon\"})\n\n\tactual := fi[0].ID\n\texpected := \"bars\"\n\tif actual != expected {\n\t\tt.Errorf(\"expected %v to eq %v\", actual, expected)\n\t}\n\n\tif len(fi) != 1 {\n\t\tt.Errorf(\"expected %v to eq %v\", len(fi), 1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package imageserver_test\n\nimport (\n\t. \"github.com\/pierrre\/imageserver\"\n\t\"github.com\/pierrre\/imageserver\/testdata\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestImage(t *testing.T) {\n\tfor _, image := range testdata.Images {\n\t\tdata, err := image.MarshalBinary()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tnewImage, err := NewImageUnmarshalBinary(data)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif !reflect.DeepEqual(newImage, image) {\n\t\t\tt.Fatal(\"image not equals\")\n\t\t}\n\t}\n}\n\nfunc TestImageUnmarshalBinaryError(t *testing.T) {\n\t_, err := NewImageUnmarshalBinary(nil)\n\tif err == nil {\n\t\tt.Fatal(\"no error\")\n\t}\n}\n<commit_msg>100% test coverage on Image<commit_after>package imageserver_test\n\nimport (\n\t. \"github.com\/pierrre\/imageserver\"\n\t\"github.com\/pierrre\/imageserver\/testdata\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestImage(t *testing.T) {\n\tfor _, image := range testdata.Images {\n\t\tdata, err := image.MarshalBinary()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tnewImage, err := NewImageUnmarshalBinary(data)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif !reflect.DeepEqual(newImage, image) {\n\t\t\tt.Fatal(\"image not equals\")\n\t\t}\n\t}\n}\n\nfunc TestImageUnmarshalBinaryError(t *testing.T) {\n\tfor _, image := range testdata.Images {\n\t\tdata, err := image.MarshalBinary()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tindex := -1 \/\/ Always truncate 1 byte\n\t\tfor _, offset := range []int{\n\t\t\t4,\n\t\t\tlen(image.Format),\n\t\t\t4,\n\t\t\tlen(image.Data),\n\t\t} {\n\t\t\tindex += offset\n\t\t\terrorData := data[0:index]\n\t\t\t_, err = NewImageUnmarshalBinary(errorData)\n\t\t\tif err == nil {\n\t\t\t\tt.Fatal(\"no error\")\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package postgres\n\nimport (\n\t\"bytes\"\n\t\"database\/sql\/driver\"\n\t\"encoding\/gob\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"strings\"\n)\n\ntype sessionValues map[string]interface{}\n\nfunc init() {\n\tgob.Register(sessionValues{})\n}\n\n\/\/ Value() converts a product into a string suitable for the SQL database\n\/\/\n\/\/ It encodes the data using Gob, then converts to a hex string suitable for the\n\/\/ bytea type\nfunc (p sessionValues) Value() (driver.Value, error) {\n\tbuf := bytes.Buffer{}\n\tencoder := gob.NewEncoder(&buf)\n\terr := encoder.Encode(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn `E'\\\\x` + hex.EncodeToString(buf.Bytes()) + `'`, nil\n}\n\n\/\/ Scan() pulls the product out of the SQL database\n\/\/\n\/\/ It expects the data to be in a bytea field, and encoded using GOB\nfunc (p sessionValues) Scan(src interface{}) error {\n\tif src == nil {\n\t\treturn nil\n\t}\n\n\tb, ok := src.([]uint8)\n\tif !ok {\n\t\treturn fmt.Errorf(\"expected session values to be a byte array\")\n\t}\n\n\tbs := string(b)\n\tif strings.HasPrefix(bs, `E'\\\\x`) && strings.HasSuffix(bs, `'`) {\n\t\tbs = bs[5 : len(bs)-1]\n\n\t\tg, err := hex.DecodeString(bs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbuf := bytes.NewBuffer(g)\n\t\tdecoder := gob.NewDecoder(buf)\n\t\treturn decoder.Decode(&p)\n\t}\n\treturn fmt.Errorf(\"session value does not have correct form. received %s\", bs)\n}\n<commit_msg>Subtle tweak for postgres sessions<commit_after>package postgres\n\nimport (\n\t\"bytes\"\n\t\"database\/sql\/driver\"\n\t\"encoding\/gob\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"strings\"\n)\n\ntype sessionValues map[string]interface{}\n\nfunc init() {\n\tgob.Register(sessionValues{})\n\t\/\/ Register this as well as is used by oAuth code\n\tgob.Register(map[string]interface{}{})\n}\n\n\/\/ Value() converts a product into a string suitable for the SQL database\n\/\/\n\/\/ It encodes the data using Gob, then converts to a hex string suitable for the\n\/\/ bytea type\nfunc (p sessionValues) Value() (driver.Value, error) {\n\tbuf := bytes.Buffer{}\n\tencoder := gob.NewEncoder(&buf)\n\terr := encoder.Encode(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn `E'\\\\x` + hex.EncodeToString(buf.Bytes()) + `'`, nil\n}\n\n\/\/ Scan() pulls the product out of the SQL database\n\/\/\n\/\/ It expects the data to be in a bytea field, and encoded using GOB\nfunc (p sessionValues) Scan(src interface{}) error {\n\tif src == nil {\n\t\treturn nil\n\t}\n\n\tb, ok := src.([]uint8)\n\tif !ok {\n\t\treturn fmt.Errorf(\"expected session values to be a byte array\")\n\t}\n\n\tbs := string(b)\n\tif strings.HasPrefix(bs, `E'\\\\x`) && strings.HasSuffix(bs, `'`) {\n\t\tbs = bs[5 : len(bs)-1]\n\n\t\tg, err := hex.DecodeString(bs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbuf := bytes.NewBuffer(g)\n\t\tdecoder := gob.NewDecoder(buf)\n\t\treturn decoder.Decode(&p)\n\t}\n\treturn fmt.Errorf(\"session value does not have correct form. received %s\", bs)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/diginatu\/nagome\/nicolive\"\n)\n\n\/\/ ProceedNicoliveEvent is struct for proceeding nico events from nicolive packeage.\ntype ProceedNicoliveEvent struct {\n\tcv *CommentViewer\n\tuserDB *nicolive.UserDB\n}\n\n\/\/ NewProceedNicoliveEvent makes new ProceedNicoliveEvent and returns it.\nfunc NewProceedNicoliveEvent(cv *CommentViewer) *ProceedNicoliveEvent {\n\tudb, err := nicolive.NewUserDB(filepath.Join(App.SavePath, userDBFileName))\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn &ProceedNicoliveEvent{\n\t\tcv: cv,\n\t\tuserDB: udb,\n\t}\n}\n\nfunc (p *ProceedNicoliveEvent) getUserName(id string, useAPI bool) (*nicolive.User, error) {\n\tu, err := p.userDB.Fetch(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif useAPI {\n\t\tif u == nil {\n\t\t\tu, nerr := nicolive.FetchUserInfo(id, p.cv.Ac)\n\t\t\tif nerr != nil {\n\t\t\t\treturn nil, nerr\n\t\t\t}\n\t\t\terr := p.userDB.Store(u)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn u, nil\n\t\t}\n\t}\n\treturn u, nil\n}\n\nfunc (p *ProceedNicoliveEvent) proceedComment(ev *nicolive.Event) {\n\tcm, _ := ev.Content.(nicolive.Comment)\n\tct := CtCommentGot{\n\t\tNo: cm.No,\n\t\tDate: cm.Date,\n\t\tUserID: cm.UserID,\n\t\tRaw: cm.Comment,\n\t\tIsPremium: cm.IsPremium,\n\t\tIsBroadcaster: cm.IsCommand,\n\t\tIsStaff: cm.IsStaff,\n\t\tIsAnonymity: cm.IsAnonymity,\n\t\tScore: cm.Score,\n\t}\n\n\t\/\/ user info\n\tuseAPI := p.cv.Settings.UserNameGet && cm.Date.After(p.cv.Cmm.ConnectedTm) && !cm.IsAnonymity && !cm.IsCommand\n\tu, err := p.getUserName(cm.UserID, useAPI)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tif u != nil {\n\t\tct.UserName = u.Name\n\t\tct.UserThumbnailURL = u.ThumbnailURL\n\t}\n\tif cm.IsCommand {\n\t\tct.UserName = \"Broadcaster\"\n\t}\n\n\tct.Comment = strings.Replace(cm.Comment, \"\\n\", \"<br>\", -1)\n\tp.cv.Evch <- NewMessageMust(DomainComment, CommCommentGot, ct)\n}\n\n\/\/ ProceedNicoEvent will receive events and emits it.\nfunc (p *ProceedNicoliveEvent) ProceedNicoEvent(ev *nicolive.Event) {\n\tswitch ev.Type {\n\tcase nicolive.EventTypeCommentGot:\n\t\tp.proceedComment(ev)\n\n\tcase nicolive.EventTypeCommentOpen:\n\t\tp.cv.Evch <- NewMessageMust(DomainUI, CommUIClearComments, nil)\n\t\tlv := ev.Content.(*nicolive.LiveWaku)\n\t\tlog.Println(lv)\n\t\tct := CtNagomeBroadOpen{\n\t\t\tBroadID: lv.BroadID,\n\t\t\tTitle: lv.Stream.Title,\n\t\t\tDescription: lv.Stream.Description,\n\t\t\tCommunityID: lv.Stream.CommunityID,\n\t\t\tOwnerID: lv.Stream.OwnerID,\n\t\t\tOwnerName: lv.Stream.OwnerName,\n\t\t\tOwnerBroad: lv.OwnerBroad,\n\t\t\tOpenTime: lv.Stream.OpenTime,\n\t\t\tStartTime: lv.Stream.StartTime,\n\t\t\tEndTime: lv.Stream.EndTime,\n\t\t}\n\t\tp.cv.Evch <- NewMessageMust(DomainNagome, CommNagomeBroadOpen, ct)\n\n\tcase nicolive.EventTypeCommentClose:\n\t\tp.cv.Evch <- NewMessageMust(DomainNagome, CommNagomeBroadClose, nil)\n\n\tcase nicolive.EventTypeHeartBeatGot:\n\t\thb := ev.Content.(*nicolive.HeartbeatValue)\n\t\tct := CtNagomeBroadInfo{hb.WatchCount, hb.CommentCount}\n\t\tp.cv.Evch <- NewMessageMust(DomainNagome, CommNagomeBroadInfo, ct)\n\n\tcase nicolive.EventTypeCommentSend:\n\t\tp.cv.Evch <- NewMessageMust(DomainNagome, CommNagomeCommentSend, nil)\n\n\tcase nicolive.EventTypeCommentErr:\n\t\tnerr := ev.Content.(nicolive.Error)\n\t\tp.cv.CreateEvNewDialog(CtUIDialogTypeWarn, nerr.TypeString(), nerr.Description())\n\n\tcase nicolive.EventTypeAntennaOpen:\n\t\tp.cv.Evch <- NewMessageMust(DomainNagome, CommNagomeAntennaOpen, nil)\n\n\tcase nicolive.EventTypeAntennaClose:\n\t\tp.cv.Evch <- NewMessageMust(DomainNagome, CommNagomeAntennaClose, nil)\n\n\tcase nicolive.EventTypeAntennaErr:\n\t\tlog.Println(ev)\n\n\tcase nicolive.EventTypeAntennaGot:\n\t\tai := ev.Content.(*nicolive.AntennaItem)\n\t\tct := CtAntennaGot{ai.BroadID, ai.CommunityID, ai.UserID}\n\t\tp.cv.Evch <- NewMessageMust(DomainAntenna, CommAntennaGot, ct)\n\n\tdefault:\n\t\tlog.Println(ev)\n\t}\n}\n<commit_msg>limit on getting user name using API<commit_after>package main\n\nimport (\n\t\"log\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/diginatu\/nagome\/nicolive\"\n)\n\nconst (\n\tuserNameAPITimesAMinute = 6\n)\n\n\/\/ ProceedNicoliveEvent is struct for proceeding nico events from nicolive packeage.\ntype ProceedNicoliveEvent struct {\n\tcv *CommentViewer\n\tuserDB *nicolive.UserDB\n\tuserNameAPITimes int\n\tuserNameAPIFastTime time.Time\n}\n\n\/\/ NewProceedNicoliveEvent makes new ProceedNicoliveEvent and returns it.\nfunc NewProceedNicoliveEvent(cv *CommentViewer) *ProceedNicoliveEvent {\n\tudb, err := nicolive.NewUserDB(filepath.Join(App.SavePath, userDBFileName))\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn &ProceedNicoliveEvent{\n\t\tcv: cv,\n\t\tuserDB: udb,\n\t}\n}\n\nfunc (p *ProceedNicoliveEvent) getUserName(id string, useAPI bool) (*nicolive.User, error) {\n\tu, err := p.userDB.Fetch(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif u != nil {\n\t\treturn u, nil\n\t}\n\n\tif useAPI {\n\t\t\/\/ reset API limit\n\t\tif time.Now().After(p.userNameAPIFastTime.Add(time.Minute)) {\n\t\t\tp.userNameAPIFastTime = time.Now()\n\t\t\tp.userNameAPITimes = userNameAPITimesAMinute\n\t\t}\n\t\tlog.Println(p.userNameAPITimes)\n\t\tif p.userNameAPITimes > 0 {\n\t\t\tu, nerr := nicolive.FetchUserInfo(id, p.cv.Ac)\n\t\t\tif nerr != nil {\n\t\t\t\treturn nil, nerr\n\t\t\t}\n\t\t\tp.userNameAPITimes--\n\t\t\terr := p.userDB.Store(u)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn u, nil\n\t\t}\n\t}\n\treturn u, nil\n}\n\nfunc (p *ProceedNicoliveEvent) proceedComment(ev *nicolive.Event) {\n\tcm, _ := ev.Content.(nicolive.Comment)\n\tct := CtCommentGot{\n\t\tNo: cm.No,\n\t\tDate: cm.Date,\n\t\tUserID: cm.UserID,\n\t\tRaw: cm.Comment,\n\t\tIsPremium: cm.IsPremium,\n\t\tIsBroadcaster: cm.IsCommand,\n\t\tIsStaff: cm.IsStaff,\n\t\tIsAnonymity: cm.IsAnonymity,\n\t\tScore: cm.Score,\n\t}\n\n\t\/\/ user info\n\tuseAPI := p.cv.Settings.UserNameGet && cm.Date.After(p.cv.Cmm.ConnectedTm) && !cm.IsAnonymity && !cm.IsCommand\n\tu, err := p.getUserName(cm.UserID, useAPI)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tif u != nil {\n\t\tct.UserName = u.Name\n\t\tct.UserThumbnailURL = u.ThumbnailURL\n\t}\n\tif cm.IsCommand {\n\t\tct.UserName = \"Broadcaster\"\n\t}\n\n\tct.Comment = strings.Replace(cm.Comment, \"\\n\", \"<br>\", -1)\n\tp.cv.Evch <- NewMessageMust(DomainComment, CommCommentGot, ct)\n}\n\n\/\/ ProceedNicoEvent will receive events and emits it.\nfunc (p *ProceedNicoliveEvent) ProceedNicoEvent(ev *nicolive.Event) {\n\tswitch ev.Type {\n\tcase nicolive.EventTypeCommentGot:\n\t\tp.proceedComment(ev)\n\n\tcase nicolive.EventTypeCommentOpen:\n\t\tp.cv.Evch <- NewMessageMust(DomainUI, CommUIClearComments, nil)\n\t\tlv := ev.Content.(*nicolive.LiveWaku)\n\t\tlog.Println(lv)\n\t\tct := CtNagomeBroadOpen{\n\t\t\tBroadID: lv.BroadID,\n\t\t\tTitle: lv.Stream.Title,\n\t\t\tDescription: lv.Stream.Description,\n\t\t\tCommunityID: lv.Stream.CommunityID,\n\t\t\tOwnerID: lv.Stream.OwnerID,\n\t\t\tOwnerName: lv.Stream.OwnerName,\n\t\t\tOwnerBroad: lv.OwnerBroad,\n\t\t\tOpenTime: lv.Stream.OpenTime,\n\t\t\tStartTime: lv.Stream.StartTime,\n\t\t\tEndTime: lv.Stream.EndTime,\n\t\t}\n\t\tp.cv.Evch <- NewMessageMust(DomainNagome, CommNagomeBroadOpen, ct)\n\n\tcase nicolive.EventTypeCommentClose:\n\t\tp.cv.Evch <- NewMessageMust(DomainNagome, CommNagomeBroadClose, nil)\n\n\tcase nicolive.EventTypeHeartBeatGot:\n\t\thb := ev.Content.(*nicolive.HeartbeatValue)\n\t\tct := CtNagomeBroadInfo{hb.WatchCount, hb.CommentCount}\n\t\tp.cv.Evch <- NewMessageMust(DomainNagome, CommNagomeBroadInfo, ct)\n\n\tcase nicolive.EventTypeCommentSend:\n\t\tp.cv.Evch <- NewMessageMust(DomainNagome, CommNagomeCommentSend, nil)\n\n\tcase nicolive.EventTypeCommentErr:\n\t\tnerr := ev.Content.(nicolive.Error)\n\t\tp.cv.CreateEvNewDialog(CtUIDialogTypeWarn, nerr.TypeString(), nerr.Description())\n\n\tcase nicolive.EventTypeAntennaOpen:\n\t\tp.cv.Evch <- NewMessageMust(DomainNagome, CommNagomeAntennaOpen, nil)\n\n\tcase nicolive.EventTypeAntennaClose:\n\t\tp.cv.Evch <- NewMessageMust(DomainNagome, CommNagomeAntennaClose, nil)\n\n\tcase nicolive.EventTypeAntennaErr:\n\t\tlog.Println(ev)\n\n\tcase nicolive.EventTypeAntennaGot:\n\t\tai := ev.Content.(*nicolive.AntennaItem)\n\t\tct := CtAntennaGot{ai.BroadID, ai.CommunityID, ai.UserID}\n\t\tp.cv.Evch <- NewMessageMust(DomainAntenna, CommAntennaGot, ct)\n\n\tdefault:\n\t\tlog.Println(ev)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package html2text\n\nimport (\n\t\"regexp\"\n\t\"testing\"\n)\n\nfunc TestText(t *testing.T) {\n\ttestCases := []struct {\n\t\tinput string\n\t\texpr string\n\t}{\n\t\t{\n\t\t\t`<li>\n <a href=\"\/new\" data-ga-click=\"Header, create new repository, icon:repo\"><span class=\"octicon octicon-repo\"><\/span> New repository<\/a>\n<\/li>`,\n\t\t\t`\/new`,\n\t\t},\n\t\t{\n\t\t\t`hi\n\n\t\t\t<br>\n\t\n\thello <a href=\"https:\/\/google.com\">google<\/a>\n\t<br><br>\n\ttest<p>List:<\/p>\n\n\t<ul>\n\t\t<li><a href=\"foo\">Foo<\/a><\/li>\n\t\t<li><a href=\"http:\/\/www.microshwhat.com\/bar\/soapy\">Barsoap<\/a><\/li>\n <li>Baz<\/li>\n\t<\/ul>\n`,\n\t\t\t`hi\n\nhello\nhttps:\/\/google.com\ntest\n\nList:\n\nfoo\nhttp:\/\/www.microshwhat.com\/bar\/soapy\n\nBaz`,\n\t\t},\n\t\t\/\/ Malformed input html.\n\t\t{\n\t\t\t`hi\n\n\t\t\thello <a href=\"https:\/\/google.com\">google<\/a>\n\n\t\t\ttest<p>List:<\/p>\n\n\t\t\t<ul>\n\t\t\t\t<li><a href=\"foo\">Foo<\/a>\n\t\t\t\t<li><a href=\"\/\n\t\t bar\/baz\">Bar<\/a>\n\t\t <li>Baz<\/li>\n\t\t\t<\/ul>\n\t\t`,\n\t\t\t`hi hello\nhttps:\/\/google.com\ntest\n\nList:\n\nfoo\n\/\\n[ \\t]+bar\/baz\n\nBaz`,\n\t\t},\n\t}\n\n\tfor _, testCase := range testCases {\n\t\ttext, err := FromString(testCase.input)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tif expr := regexp.MustCompile(testCase.expr); !expr.MatchString(text) {\n\t\t\tt.Errorf(\"Input did not match expression\\nInput:\\n>>>>\\n%s\\n<<<<\\n\\nOutput:\\n>>>>\\n%s\\n<<<<\\n\\nExpression: %s\\n\", testCase.input, text, expr.String())\n\t\t} else {\n\t\t\tt.Logf(\"input:\\n\\n%s\\n\\n\\n\\noutput:\\n\\n%s\\n\", testCase.input, text)\n\t\t}\n\t}\n}\n<commit_msg>Update tests<commit_after>package html2text\n\nimport (\n\t\"testing\"\n)\n\nfunc TestText(t *testing.T) {\n\ttestCases := []struct {\n\t\tinput string\n\t\texpr string\n\t}{\n\t\t{\n\t\t\t`<li>\n <a href=\"\/new\" data-ga-click=\"Header, create new repository, icon:repo\"><span class=\"octicon octicon-repo\"><\/span> New repository<\/a>\n<\/li>`,\n\t\t\t\"* (http:\/\/www.microshwhat.com\/bar\/soapy\/new)[New repository]\",\n\t\t},\n\t\t{\n\t\t\t`hi\n\n\t\t\t<br>\n\n\thello <a href=\"https:\/\/google.com\">google<\/a>\n\t<br><br>\n\ttest<p>List:<\/p>\n\n\t<ul>\n\t\t<li><a href=\"foo\">Foo<\/a><\/li>\n\t\t<li><a href=\"http:\/\/www.microshwhat.com\/bar\/soapy\">Barsoap<\/a><\/li>\n <li>Baz<\/li>\n\t<\/ul>\n`,\n\t\t\t\"hi \\n\\n hello (https:\/\/google.com)[google] \\n\\n test\\n\\nList: \\n\\n * (foo)[Foo] \\n * (http:\/\/www.microshwhat.com\/bar\/soapy)[Barsoap] \\n * Baz\",\n\t\t},\n\t\t\/\/ Malformed input html.\n\t\t{\n\t\t\t`hi\n\n\t\t\thello <a href=\"https:\/\/google.com\">google<\/a>\n\n\t\t\ttest<p>List:<\/p>\n\n\t\t\t<ul>\n\t\t\t\t<li><a href=\"foo\">Foo<\/a>\n\t\t\t\t<li><a href=\"\/\n\t\t bar\/baz\">Bar<\/a>\n\t\t <li>Baz<\/li>\n\t\t\t<\/ul>\n\t\t`,\n\t\t\t\"hi hello (https:\/\/google.com)[google] test\\n\\nList: \\n\\n * (foo)[Foo] \\n * (http:\/\/www.microshwhat.com\/bar\/soapy\/bar\/baz)[Bar] \\n * Baz\",\n\t\t},\n\t}\n\n\tfor _, testCase := range testCases {\n\t\ttext, err := FromString(testCase.input, \"http:\/\/www.microshwhat.com\/bar\/soapy\", OmitClasses|OmitIds|OmitRoles)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\n\t\tif testCase.expr != text {\n\t\t\tt.Errorf(\"Input did not match expression\\nInput:\\n>>>>\\n@@%s@@\\n<<<<\\n\\nOutput:\\n>>>>\\n##%s##\\n<<<<\\n\\nExpression: ^^%s^^\\n\", testCase.input, text, testCase.expr)\n\t\t} else {\n\t\t\tt.Logf(\"input:\\n\\n%s\\n\\n\\n\\noutput:\\n\\n%s\\n\", testCase.input, text)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package framesweb\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"net\"\n\t\"net\/http\"\n\n\t\"github.com\/dustin\/frames\"\n)\n\n\/\/ A RoundTripper over frames.\ntype FramesRoundTripper struct {\n\tDialer frames.ChannelDialer\n}\n\nfunc (f *FramesRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {\n\tc, err := f.Dialer.Dial()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer c.Close()\n\n\terr = req.Write(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb := bufio.NewReader(c)\n\treturn http.ReadResponse(b, req)\n}\n\n\/\/ Get an HTTP client that maintains a persistent frames connection.\nfunc NewFramesClient(n, addr string) (*http.Client, error) {\n\tc, err := net.Dial(n, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfrt := &FramesRoundTripper{\n\t\tDialer: frames.NewClient(c),\n\t}\n\n\thc := &http.Client{\n\t\tTransport: frt,\n\t}\n\n\treturn hc, nil\n}\n\n\/\/ Close the frames client.\nfunc CloseFramesClient(hc *http.Client) error {\n\tif frt, ok := hc.Transport.(*FramesRoundTripper); ok {\n\t\treturn frt.Dialer.Close()\n\t}\n\treturn errors.New(\"Not a frames client\")\n}\n<commit_msg>Close the channel when the client closes the body.<commit_after>package framesweb\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\n\t\"github.com\/dustin\/frames\"\n)\n\n\/\/ A RoundTripper over frames.\ntype FramesRoundTripper struct {\n\tDialer frames.ChannelDialer\n\terr error\n}\n\ntype channelBodyCloser struct {\n\trc io.ReadCloser\n\tc io.Closer\n}\n\nfunc (c *channelBodyCloser) Read(b []byte) (int, error) {\n\treturn c.rc.Read(b)\n}\n\nfunc (c *channelBodyCloser) Close() error {\n\tc.rc.Close()\n\treturn c.c.Close()\n}\n\nfunc (f *FramesRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {\n\tif f.err != nil {\n\t\treturn nil, f.err\n\t}\n\n\tc, err := f.Dialer.Dial()\n\tif err != nil {\n\t\tf.err = err\n\t\treturn nil, err\n\t}\n\n\terr = req.Write(c)\n\tif err != nil {\n\t\tf.err = err\n\t\tc.Close()\n\t\treturn nil, err\n\t}\n\n\tb := bufio.NewReader(c)\n\tres, err := http.ReadResponse(b, req)\n\tif err != nil {\n\t\tf.err = err\n\t}\n\tres.Body = &channelBodyCloser{res.Body, c}\n\treturn res, err\n}\n\n\/\/ Get an HTTP client that maintains a persistent frames connection.\nfunc NewFramesClient(n, addr string) (*http.Client, error) {\n\tc, err := net.Dial(n, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfrt := &FramesRoundTripper{\n\t\tDialer: frames.NewClient(c),\n\t}\n\n\thc := &http.Client{\n\t\tTransport: frt,\n\t}\n\n\treturn hc, nil\n}\n\n\/\/ Close the frames client.\nfunc CloseFramesClient(hc *http.Client) error {\n\tif frt, ok := hc.Transport.(*FramesRoundTripper); ok {\n\t\treturn frt.Dialer.Close()\n\t}\n\treturn errors.New(\"Not a frames client\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tnxcli \"github.com\/jaracil\/nxcli\"\n\tnexus \"github.com\/jaracil\/nxcli\/nxcore\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nvar (\n\tapp = kingpin.New(\"cli\", \"Nexus command line interface\")\n\tserverIP = app.Flag(\"server\", \"Server address.\").Default(\"127.0.0.1:1717\").Short('s').String()\n\ttimeout = app.Flag(\"timeout\", \"Execution timeout\").Default(\"60\").Short('t').Int()\n\tuser = app.Flag(\"user\", \"Nexus username\").Short('u').Default(\"test\").String()\n\tpass = app.Flag(\"pass\", \"Nexus password\").Default(\"test\").Short('p').String()\n\n\t\/\/\/\n\n\tshell = app.Command(\"shell\", \"Interactive shell\")\n\n\t\/\/\/\n\n\tlogin = app.Command(\"login\", \"Tests to login with an username\/password and exits\")\n\tloginName = login.Arg(\"username\", \"username\").Required().String()\n\tloginPass = login.Arg(\"password\", \"password\").Required().String()\n\n\t\/\/\/\n\n\tpush = app.Command(\"push\", \"Execute a task.push rpc call on Nexus\")\n\tpushMethod = push.Arg(\"method\", \"Method to call\").Required().String()\n\tpushParams = push.Arg(\"params\", \"parameters\").StringMap()\n\n\tpull = app.Command(\"pull\", \"Execute a task.pull rpc call on Nexus\")\n\tpullMethod = pull.Arg(\"prefix\", \"Method to call\").Required().String()\n\n\t\/\/\/\n\n\tpipeCmd = app.Command(\"pipe\", \"Pipe tasks\")\n\n\tpipeRead = pipeCmd.Command(\"read\", \"Create and read from a pipe. It will be destroyed on exit\")\n\n\tpipeWrite = pipeCmd.Command(\"open\", \"Open a pipe and send data\")\n\tpipeWriteId = pipeWrite.Arg(\"pipeId\", \"ID of the pipe to write to\").Required().String()\n\tpipeWriteData = pipeWrite.Arg(\"data\", \"Data to write to the pipe\").Required().Strings()\n\n\t\/\/\/\n\n\tuserCmd = app.Command(\"user\", \"user management\")\n\n\tuserCreate = userCmd.Command(\"create\", \"Create a new user\")\n\tuserCreateName = userCreate.Arg(\"username\", \"username\").Required().String()\n\tuserCreatePass = userCreate.Arg(\"password\", \"password\").Required().String()\n\n\tuserDelete = userCmd.Command(\"delete\", \"Delete an user\")\n\tuserDeleteName = userDelete.Arg(\"username\", \"username\").Required().String()\n\n\tuserPass = userCmd.Command(\"passwd\", \"Change an user password\")\n\tuserPassName = userPass.Arg(\"username\", \"username\").Required().String()\n\tuserPassPass = userPass.Arg(\"password\", \"password\").Required().String()\n\n\t\/\/\/\n\n\ttagsCmd = app.Command(\"tags\", \"tags management\")\n\n\ttagsSet = tagsCmd.Command(\"set\", \"Set tags for an user on a prefix. Tags is a map[string]string in the form tag:value\")\n\ttagsSetUser = tagsSet.Arg(\"user\", \"user\").Required().String()\n\ttagsSetPrefix = tagsSet.Arg(\"prefix\", \"prefix\").Required().String()\n\ttagsSetTags = tagsSet.Arg(\"tags\", \"tag:value\").Required().StringMap()\n\n\ttagsDel = tagsCmd.Command(\"del\", \"delete tags for an user on a prefix. Tags is a list of space separated strings\")\n\ttagsDelUser = tagsDel.Arg(\"user\", \"user\").Required().String()\n\ttagsDelPrefix = tagsDel.Arg(\"prefix\", \"prefix\").Required().String()\n\ttagsDelTags = tagsDel.Arg(\"tags\", \"tag:value\").Required().Strings()\n\n\t\/\/\n\n\tchanCmd = app.Command(\"topic\", \"Topics management\")\n\n\tchanSub = chanCmd.Command(\"sub\", \"Subscribe a pipe to a topic\")\n\tchanSubPipe = chanSub.Arg(\"pipe\", \"pipe id to subscribe\").Required().String()\n\tchanSubChan = chanSub.Arg(\"topic\", \"Topic to subscribe to\").Required().String()\n\n\tchanUnsub = chanCmd.Command(\"unsub\", \"Unsubscribe a pipe from a topic\")\n\tchanUnsubPipe = chanUnsub.Arg(\"pipe\", \"pipe id to subscribe\").Required().String()\n\tchanUnsubChan = chanUnsub.Arg(\"topic\", \"Topic to subscribe to\").Required().String()\n\n\tchanPub = chanCmd.Command(\"pub\", \"Publish a message to a topic\")\n\tchanPubChan = chanPub.Arg(\"topic\", \"Topic to subscribe to\").Required().String()\n\tchanPubMsg = chanPub.Arg(\"data\", \"Data to send\").Required().Strings()\n)\n\nfunc main() {\n\n\t\/\/ Enable -h as HelpFlag\n\tapp.HelpFlag.Short('h')\n\tapp.UsageTemplate(kingpin.CompactUsageTemplate)\n\n\tparsed := kingpin.MustParse(app.Parse(os.Args[1:]))\n\n\tif nc, err := nxcli.Dial(*serverIP, nil); err == nil {\n\t\tif _, err := nc.Login(*user, *pass); err != nil {\n\t\t\tlog.Println(\"Couldn't login:\", err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"Logged as\", *user)\n\t\t}\n\n\t\texecCmd(nc, parsed)\n\t} else {\n\t\tlog.Println(\"Cannot connect to\", *serverIP)\n\t}\n}\n\nfunc execCmd(nc *nexus.NexusConn, parsed string) {\n\tswitch parsed {\n\n\tcase login.FullCommand():\n\t\tif _, err := nc.Login(*loginName, *loginPass); err != nil {\n\t\t\tlog.Println(\"Couldn't login:\", err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"Logged as\", *loginName)\n\t\t\tuser = loginName\n\t\t}\n\n\tcase push.FullCommand():\n\t\tif ret, err := nc.TaskPush(*pushMethod, *pushParams, time.Second*time.Duration(*timeout)); err != nil {\n\t\t\tlog.Println(\"Error:\", err)\n\t\t\treturn\n\t\t} else {\n\t\t\tb, _ := json.MarshalIndent(ret, \"\", \" \")\n\t\t\tlog.Println(\"Result:\")\n\t\t\tif s, err := strconv.Unquote(string(b)); err == nil {\n\t\t\t\tfmt.Println(s)\n\t\t\t} else {\n\t\t\t\tfmt.Println(string(b))\n\t\t\t}\n\t\t}\n\n\tcase pull.FullCommand():\n\t\tlog.Println(\"Pulling\", *pullMethod)\n\t\tret, err := nc.TaskPull(*pullMethod, time.Second*time.Duration(*timeout))\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error:\", err)\n\t\t\treturn\n\t\t} else {\n\t\t\tb, _ := json.MarshalIndent(ret, \"\", \" \")\n\t\t\tfmt.Println(string(b))\n\t\t}\n\n\t\tfmt.Printf(\"[R]esult or [E]rror? \")\n\n\t\tstdin := bufio.NewScanner(os.Stdin)\n\n\t\tif stdin.Scan() && strings.HasPrefix(strings.ToLower(stdin.Text()), \"e\") {\n\t\t\tfmt.Printf(\"Code: \")\n\t\t\tstdin.Scan()\n\t\t\tcode, _ := strconv.Atoi(stdin.Text())\n\n\t\t\tfmt.Printf(\"Message: \")\n\t\t\tstdin.Scan()\n\t\t\tmsg := stdin.Text()\n\n\t\t\tfmt.Printf(\"Data: \")\n\t\t\tstdin.Scan()\n\t\t\tdata := stdin.Text()\n\n\t\t\tret.SendError(code, msg, data)\n\n\t\t} else {\n\t\t\tfmt.Printf(\"Result: \")\n\t\t\tif stdin.Scan() {\n\t\t\t\tret.SendResult(stdin.Text())\n\t\t\t} else {\n\t\t\t\tret.SendResult(\"dummy response\")\n\t\t\t}\n\t\t}\n\n\tcase pipeWrite.FullCommand():\n\t\t\/\/ Clean afterwards in case we are looping on shell mode\n\t\tdefer func() { *pipeWriteData = []string{} }()\n\n\t\tif pipe, err := nc.PipeOpen(*pipeWriteId); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\n\t\t\tif _, err := pipe.Write(*pipeWriteData); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Sent!\")\n\t\t\t}\n\t\t}\n\n\tcase pipeRead.FullCommand():\n\t\tpopts := nexus.PipeOpts{Length: 100}\n\n\t\tif pipe, err := nc.PipeCreate(&popts); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"Pipe created:\", pipe.Id())\n\t\t\tfor {\n\t\t\t\tif pdata, err := pipe.Read(10, time.Second*time.Duration(*timeout)); err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\ttime.Sleep(time.Second)\n\t\t\t\t} else {\n\t\t\t\t\tfor _, msg := range pdata.Msgs {\n\t\t\t\t\t\tlog.Println(\"Got:\", msg.Msg, msg.Count)\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Printf(\"There are %d messages left in the pipe and %d drops\\n\", pdata.Waiting, pdata.Drops)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\tcase userCreate.FullCommand():\n\t\tlog.Printf(\"Creating user \\\"%s\\\" with password \\\"%s\\\"\", *userCreateName, *userCreatePass)\n\t\tif _, err := nc.UserCreate(*userCreateName, *userCreatePass); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"OK\")\n\t\t}\n\n\tcase userDelete.FullCommand():\n\t\tlog.Printf(\"Deleting user \\\"%s\\\"\", *userDeleteName)\n\n\t\tif _, err := nc.UserDelete(*userDeleteName); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"OK\")\n\t\t}\n\n\tcase userPass.FullCommand():\n\t\tif _, err := nc.UserSetPass(*userPassName, *userPassPass); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"OK\")\n\t\t}\n\n\tcase tagsSet.FullCommand():\n\t\t\/\/ Clean afterwards in case we are looping on shell mode\n\t\tdefer func() { *tagsSetTags = make(map[string]string) }()\n\n\t\tvar tags map[string]interface{}\n\t\tif b, err := json.Marshal(*tagsSetTags); err == nil {\n\t\t\tif json.Unmarshal(b, &tags) != nil {\n\t\t\t\tlog.Println(\"Error parsing tags\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tlog.Printf(\"Setting tags: %v on %s@%s\", tags, *tagsSetUser, *tagsSetPrefix)\n\t\tif _, err := nc.UserSetTags(*tagsSetUser, *tagsSetPrefix, tags); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"OK\")\n\t\t}\n\n\tcase tagsDel.FullCommand():\n\t\t\/\/ Clean afterwards in case we are looping on shell mode\n\t\tdefer func() { *tagsDelTags = []string{} }()\n\n\t\tif _, err := nc.UserDelTags(*tagsDelUser, *tagsDelPrefix, *tagsDelTags); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"OK\")\n\t\t}\n\n\tcase shell.FullCommand():\n\n\t\targs := os.Args[1:]\n\t\tfor k, v := range args {\n\t\t\tif v == shell.FullCommand() {\n\t\t\t\targs = append(args[:k], args[k+1:]...)\n\t\t\t}\n\t\t}\n\n\t\ts := bufio.NewScanner(os.Stdin)\n\t\tfmt.Printf(\"%s@%s >> \", *user, *serverIP)\n\t\tfor s.Scan() {\n\t\t\tcmd, err := app.Parse(append(args, strings.Split(s.Text(), \" \")...))\n\t\t\tif err == nil {\n\t\t\t\tif cmd != shell.FullCommand() {\n\t\t\t\t\tparsed := kingpin.MustParse(cmd, err)\n\t\t\t\t\texecCmd(nc, parsed)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t\tfmt.Printf(\"%s@%s >> \", *user, *serverIP)\n\t\t}\n\n\t\tif err := s.Err(); err != nil {\n\t\t\tlog.Fatalln(\"reading standard input:\", err)\n\t\t}\n\n\tcase chanSub.FullCommand():\n\t\tif pipe, err := nc.PipeOpen(*chanSubPipe); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tif _, err := nc.TopicSubscribe(pipe, *chanSubChan); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tlog.Println(\"OK\")\n\t\t\t}\n\t\t}\n\n\tcase chanUnsub.FullCommand():\n\t\tif pipe, err := nc.PipeOpen(*chanSubPipe); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tif _, err := nc.TopicUnsubscribe(pipe, *chanUnsubChan); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tlog.Println(\"OK\")\n\t\t\t}\n\t\t}\n\n\tcase chanPub.FullCommand():\n\t\t\/\/ Clean afterwards in case we are looping on shell mode\n\t\tdefer func() { *chanPubMsg = []string{} }()\n\n\t\tfmt.Println(*chanPubMsg)\n\t\tif res, err := nc.TopicPublish(*chanPubChan, *chanPubMsg); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(res)\n\n\t\t}\n\t}\n}\n<commit_msg>demos, cli: Fix adding tags with @<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tnxcli \"github.com\/jaracil\/nxcli\"\n\tnexus \"github.com\/jaracil\/nxcli\/nxcore\"\n\t\"github.com\/nayarsystems\/kingpin\"\n)\n\nvar (\n\tapp = kingpin.New(\"cli\", \"Nexus command line interface\")\n\tserverIP = app.Flag(\"server\", \"Server address.\").Default(\"127.0.0.1:1717\").Short('s').String()\n\ttimeout = app.Flag(\"timeout\", \"Execution timeout\").Default(\"60\").Short('t').Int()\n\tuser = app.Flag(\"user\", \"Nexus username\").Short('u').Default(\"test\").String()\n\tpass = app.Flag(\"pass\", \"Nexus password\").Default(\"test\").Short('p').String()\n\n\t\/\/\/\n\n\tshell = app.Command(\"shell\", \"Interactive shell\")\n\n\t\/\/\/\n\n\tlogin = app.Command(\"login\", \"Tests to login with an username\/password and exits\")\n\tloginName = login.Arg(\"username\", \"username\").Required().String()\n\tloginPass = login.Arg(\"password\", \"password\").Required().String()\n\n\t\/\/\/\n\n\tpush = app.Command(\"push\", \"Execute a task.push rpc call on Nexus\")\n\tpushMethod = push.Arg(\"method\", \"Method to call\").Required().String()\n\tpushParams = push.Arg(\"params\", \"parameters\").StringMap()\n\n\tpull = app.Command(\"pull\", \"Execute a task.pull rpc call on Nexus\")\n\tpullMethod = pull.Arg(\"prefix\", \"Method to call\").Required().String()\n\n\t\/\/\/\n\n\tpipeCmd = app.Command(\"pipe\", \"Pipe tasks\")\n\n\tpipeRead = pipeCmd.Command(\"read\", \"Create and read from a pipe. It will be destroyed on exit\")\n\n\tpipeWrite = pipeCmd.Command(\"open\", \"Open a pipe and send data\")\n\tpipeWriteId = pipeWrite.Arg(\"pipeId\", \"ID of the pipe to write to\").Required().String()\n\tpipeWriteData = pipeWrite.Arg(\"data\", \"Data to write to the pipe\").Required().Strings()\n\n\t\/\/\/\n\n\tuserCmd = app.Command(\"user\", \"user management\")\n\n\tuserCreate = userCmd.Command(\"create\", \"Create a new user\")\n\tuserCreateName = userCreate.Arg(\"username\", \"username\").Required().String()\n\tuserCreatePass = userCreate.Arg(\"password\", \"password\").Required().String()\n\n\tuserDelete = userCmd.Command(\"delete\", \"Delete an user\")\n\tuserDeleteName = userDelete.Arg(\"username\", \"username\").Required().String()\n\n\tuserPass = userCmd.Command(\"passwd\", \"Change an user password\")\n\tuserPassName = userPass.Arg(\"username\", \"username\").Required().String()\n\tuserPassPass = userPass.Arg(\"password\", \"password\").Required().String()\n\n\t\/\/\/\n\n\ttagsCmd = app.Command(\"tags\", \"tags management\")\n\n\ttagsSet = tagsCmd.Command(\"set\", \"Set tags for an user on a prefix. Tags is a map like 'tag:value tag2:value2'\")\n\ttagsSetUser = tagsSet.Arg(\"user\", \"user\").Required().String()\n\ttagsSetPrefix = tagsSet.Arg(\"prefix\", \"prefix\").Required().String()\n\ttagsSetTags = tagsSet.Arg(\"tags\", \"tag:value\").StringMapIface()\n\n\ttagsSetJ = tagsCmd.Command(\"setj\", \"Set tags for an user on a prefix. Tags is a json dict like: { 'tag': value }\")\n\ttagsSetJUser = tagsSetJ.Arg(\"user\", \"user\").Required().String()\n\ttagsSetJPrefix = tagsSetJ.Arg(\"prefix\", \"prefix\").Required().String()\n\ttagsSetJTagsJson = tagsSetJ.Arg(\"tags\", \"{'@task.push': true}\").Required().String()\n\n\ttagsDel = tagsCmd.Command(\"del\", \"delete tags for an user on a prefix. Tags is a list of space separated strings\")\n\ttagsDelUser = tagsDel.Arg(\"user\", \"user\").Required().String()\n\ttagsDelPrefix = tagsDel.Arg(\"prefix\", \"prefix\").Required().String()\n\ttagsDelTags = tagsDel.Arg(\"tags\", \"tag1 tag2 tag3\").Required().Strings()\n\n\t\/\/\n\n\tchanCmd = app.Command(\"topic\", \"Topics management\")\n\n\tchanSub = chanCmd.Command(\"sub\", \"Subscribe a pipe to a topic\")\n\tchanSubPipe = chanSub.Arg(\"pipe\", \"pipe id to subscribe\").Required().String()\n\tchanSubChan = chanSub.Arg(\"topic\", \"Topic to subscribe to\").Required().String()\n\n\tchanUnsub = chanCmd.Command(\"unsub\", \"Unsubscribe a pipe from a topic\")\n\tchanUnsubPipe = chanUnsub.Arg(\"pipe\", \"pipe id to subscribe\").Required().String()\n\tchanUnsubChan = chanUnsub.Arg(\"topic\", \"Topic to subscribe to\").Required().String()\n\n\tchanPub = chanCmd.Command(\"pub\", \"Publish a message to a topic\")\n\tchanPubChan = chanPub.Arg(\"topic\", \"Topic to subscribe to\").Required().String()\n\tchanPubMsg = chanPub.Arg(\"data\", \"Data to send\").Required().Strings()\n)\n\nfunc main() {\n\n\t\/\/ Enable -h as HelpFlag\n\tapp.HelpFlag.Short('h')\n\t\/\/\t\/app.UsageTemplate(kingpin.CompactUsageTemplate)\n\n\tparsed := kingpin.MustParse(app.Parse(os.Args[1:]))\n\n\tif nc, err := nxcli.Dial(*serverIP, nil); err == nil {\n\t\tif _, err := nc.Login(*user, *pass); err != nil {\n\t\t\tlog.Println(\"Couldn't login:\", err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"Logged as\", *user)\n\t\t}\n\n\t\texecCmd(nc, parsed)\n\t} else {\n\t\tlog.Println(\"Cannot connect to\", *serverIP)\n\t}\n}\n\nfunc execCmd(nc *nexus.NexusConn, parsed string) {\n\tswitch parsed {\n\n\tcase login.FullCommand():\n\t\tif _, err := nc.Login(*loginName, *loginPass); err != nil {\n\t\t\tlog.Println(\"Couldn't login:\", err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"Logged as\", *loginName)\n\t\t\tuser = loginName\n\t\t}\n\n\tcase push.FullCommand():\n\t\tif ret, err := nc.TaskPush(*pushMethod, *pushParams, time.Second*time.Duration(*timeout)); err != nil {\n\t\t\tlog.Println(\"Error:\", err)\n\t\t\treturn\n\t\t} else {\n\t\t\tb, _ := json.MarshalIndent(ret, \"\", \" \")\n\t\t\tlog.Println(\"Result:\")\n\t\t\tif s, err := strconv.Unquote(string(b)); err == nil {\n\t\t\t\tfmt.Println(s)\n\t\t\t} else {\n\t\t\t\tfmt.Println(string(b))\n\t\t\t}\n\t\t}\n\n\tcase pull.FullCommand():\n\t\tlog.Println(\"Pulling\", *pullMethod)\n\t\tret, err := nc.TaskPull(*pullMethod, time.Second*time.Duration(*timeout))\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error:\", err)\n\t\t\treturn\n\t\t} else {\n\t\t\tb, _ := json.MarshalIndent(ret, \"\", \" \")\n\t\t\tfmt.Println(string(b))\n\t\t}\n\n\t\tfmt.Printf(\"[R]esult or [E]rror? \")\n\n\t\tstdin := bufio.NewScanner(os.Stdin)\n\n\t\tif stdin.Scan() && strings.HasPrefix(strings.ToLower(stdin.Text()), \"e\") {\n\t\t\tfmt.Printf(\"Code: \")\n\t\t\tstdin.Scan()\n\t\t\tcode, _ := strconv.Atoi(stdin.Text())\n\n\t\t\tfmt.Printf(\"Message: \")\n\t\t\tstdin.Scan()\n\t\t\tmsg := stdin.Text()\n\n\t\t\tfmt.Printf(\"Data: \")\n\t\t\tstdin.Scan()\n\t\t\tdata := stdin.Text()\n\n\t\t\tret.SendError(code, msg, data)\n\n\t\t} else {\n\t\t\tfmt.Printf(\"Result: \")\n\t\t\tif stdin.Scan() {\n\t\t\t\tret.SendResult(stdin.Text())\n\t\t\t} else {\n\t\t\t\tret.SendResult(\"dummy response\")\n\t\t\t}\n\t\t}\n\n\tcase pipeWrite.FullCommand():\n\t\t\/\/ Clean afterwards in case we are looping on shell mode\n\t\tdefer func() { *pipeWriteData = []string{} }()\n\n\t\tif pipe, err := nc.PipeOpen(*pipeWriteId); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\n\t\t\tif _, err := pipe.Write(*pipeWriteData); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Sent!\")\n\t\t\t}\n\t\t}\n\n\tcase pipeRead.FullCommand():\n\t\tpopts := nexus.PipeOpts{Length: 100}\n\n\t\tif pipe, err := nc.PipeCreate(&popts); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"Pipe created:\", pipe.Id())\n\t\t\tfor {\n\t\t\t\tif pdata, err := pipe.Read(10, time.Second*time.Duration(*timeout)); err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\ttime.Sleep(time.Second)\n\t\t\t\t} else {\n\t\t\t\t\tfor _, msg := range pdata.Msgs {\n\t\t\t\t\t\tlog.Println(\"Got:\", msg.Msg, msg.Count)\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Printf(\"There are %d messages left in the pipe and %d drops\\n\", pdata.Waiting, pdata.Drops)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\tcase userCreate.FullCommand():\n\t\tlog.Printf(\"Creating user \\\"%s\\\" with password \\\"%s\\\"\", *userCreateName, *userCreatePass)\n\t\tif _, err := nc.UserCreate(*userCreateName, *userCreatePass); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"OK\")\n\t\t}\n\n\tcase userDelete.FullCommand():\n\t\tlog.Printf(\"Deleting user \\\"%s\\\"\", *userDeleteName)\n\n\t\tif _, err := nc.UserDelete(*userDeleteName); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"OK\")\n\t\t}\n\n\tcase userPass.FullCommand():\n\t\tif _, err := nc.UserSetPass(*userPassName, *userPassPass); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"OK\")\n\t\t}\n\n\tcase tagsSet.FullCommand():\n\t\t\/\/ Clean afterwards in case we are looping on shell mode\n\t\tdefer func() { *tagsSetTags = make(map[string]interface{}) }()\n\n\t\tvar tags map[string]interface{}\n\t\tif b, err := json.Marshal(*tagsSetTags); err == nil {\n\t\t\tif json.Unmarshal(b, &tags) != nil {\n\t\t\t\tlog.Println(\"Error parsing tags\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tlog.Printf(\"Setting tags: %v on %s@%s\", tags, *tagsSetUser, *tagsSetPrefix)\n\t\tif _, err := nc.UserSetTags(*tagsSetUser, *tagsSetPrefix, tags); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"OK\")\n\t\t}\n\n\tcase tagsSetJ.FullCommand():\n\t\t\/\/ Clean afterwards in case we are looping on shell mode\n\n\t\tvar tags map[string]interface{}\n\t\tif json.Unmarshal([]byte(*tagsSetJTagsJson), &tags) != nil {\n\t\t\tlog.Println(\"Error parsing tags json:\", *tagsSetJTagsJson)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Printf(\"Setting tags: %v on %s@%s\", tags, *tagsSetJUser, *tagsSetJPrefix)\n\t\tif _, err := nc.UserSetTags(*tagsSetJUser, *tagsSetJPrefix, tags); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"OK\")\n\t\t}\n\n\tcase tagsDel.FullCommand():\n\t\t\/\/ Clean afterwards in case we are looping on shell mode\n\t\tdefer func() { *tagsDelTags = []string{} }()\n\n\t\tif _, err := nc.UserDelTags(*tagsDelUser, *tagsDelPrefix, *tagsDelTags); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"OK\")\n\t\t}\n\n\tcase shell.FullCommand():\n\n\t\targs := os.Args[1:]\n\t\tfor k, v := range args {\n\t\t\tif v == shell.FullCommand() {\n\t\t\t\targs = append(args[:k], args[k+1:]...)\n\t\t\t}\n\t\t}\n\n\t\ts := bufio.NewScanner(os.Stdin)\n\t\tfmt.Printf(\"%s@%s >> \", *user, *serverIP)\n\t\tfor s.Scan() {\n\t\t\tcmd, err := app.Parse(append(args, strings.Split(s.Text(), \" \")...))\n\t\t\tif err == nil {\n\t\t\t\tif cmd != shell.FullCommand() {\n\t\t\t\t\tparsed := kingpin.MustParse(cmd, err)\n\t\t\t\t\texecCmd(nc, parsed)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t\tfmt.Printf(\"%s@%s >> \", *user, *serverIP)\n\t\t}\n\n\t\tif err := s.Err(); err != nil {\n\t\t\tlog.Fatalln(\"reading standard input:\", err)\n\t\t}\n\n\tcase chanSub.FullCommand():\n\t\tif pipe, err := nc.PipeOpen(*chanSubPipe); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tif _, err := nc.TopicSubscribe(pipe, *chanSubChan); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tlog.Println(\"OK\")\n\t\t\t}\n\t\t}\n\n\tcase chanUnsub.FullCommand():\n\t\tif pipe, err := nc.PipeOpen(*chanSubPipe); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tif _, err := nc.TopicUnsubscribe(pipe, *chanUnsubChan); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tlog.Println(\"OK\")\n\t\t\t}\n\t\t}\n\n\tcase chanPub.FullCommand():\n\t\t\/\/ Clean afterwards in case we are looping on shell mode\n\t\tdefer func() { *chanPubMsg = []string{} }()\n\n\t\tif res, err := nc.TopicPublish(*chanPubChan, *chanPubMsg); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Println(\"Result:\", res)\n\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package dockertest\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/docker\/go-connections\/nat\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ Protocol is a string representing a protocol such as TCP or UDP.\ntype Protocol string\n\nconst (\n\t\/\/ RandomPort may be passed to a function to indicate\n\t\/\/ that a random port should be chosen.\n\tRandomPort uint16 = 0\n\n\t\/\/ ProtocolTCP represents a tcp Protocol.\n\tProtocolTCP Protocol = \"tcp\"\n\n\t\/\/ ProtocolUDP represents a udp Protocol.\n\tProtocolUDP Protocol = \"udp\"\n)\n\n\/\/ Port represents a port spec that may be used by the *Ports struct\n\/\/ to expose a port on a container.\ntype Port struct {\n\t\/\/ Private is the port exposed on the inside of the container\n\t\/\/ that you wish to map.\n\tPrivate uint16\n\n\t\/\/ Public is the publicly facing port that you wish to expose\n\t\/\/ the Private port on. Note, this may be `RandomPort` if you\n\t\/\/ wish to expose a random port instead of a specific port.\n\tPublic uint16\n\n\t\/\/ Address is the IP address to expose the port mapping\n\t\/\/ on. By default, 0.0.0.0 will be used.\n\tAddress string\n\n\t\/\/ Protocol is the network protocol to expose.\n\tProtocol Protocol\n}\n\n\/\/ Port converts the struct into a nat.Port\nfunc (s *Port) Port() (nat.Port, error) {\n\tif s.Protocol == \"\" {\n\t\treturn nat.Port(0), errors.New(\"Protocol not specified\")\n\t}\n\treturn nat.NewPort(\n\t\tstring(s.Protocol), fmt.Sprintf(\"%d\", s.Private))\n}\n\n\/\/ Binding converts the struct in a a nat.PortBinding. If no address\n\/\/ has been given 0.0.0.0 will be used for the host ip.\nfunc (s *Port) Binding() nat.PortBinding {\n\taddress := s.Address\n\tif address == \"\" {\n\t\taddress = \"0.0.0.0\"\n\t}\n\treturn nat.PortBinding{\n\t\tHostIP: address,\n\t\tHostPort: fmt.Sprintf(\"%d\", s.Public),\n\t}\n}\n\n\/\/ Ports is when to convey port exposures to RunContainer()\ntype Ports struct {\n\t\/\/ Specs is a map of internal to external ports. The external\n\t\/\/ port may be the same as the internal port or it may be the\n\t\/\/ constant `RandomPort` if you wish for Docker to chose a port\n\t\/\/ for you.\n\tSpecs []*Port\n}\n\n\/\/ Bindings will take the port specs and return port bindings that can\n\/\/ be used in by the container.HostConfig.PortBindings field.\nfunc (p *Ports) Bindings() (nat.PortMap, error) {\n\tports := nat.PortMap{}\n\n\tfor _, spec := range p.Specs {\n\t\tport, err := spec.Port()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t_, exists := ports[port]\n\t\tif !exists {\n\t\t\tports[port] = []nat.PortBinding{}\n\t\t}\n\n\t\tports[port] = append(ports[port], spec.Binding())\n\t}\n\n\treturn ports, nil\n}\n\n\/\/ Add is a shortcut function to add a new port spec.\nfunc (p *Ports) Add(port *Port) {\n\tp.Specs = append(p.Specs, port)\n}\n\n\/\/ NewPorts will produces a new *Ports struct that's ready to be\n\/\/ modified.\nfunc NewPorts() *Ports {\n\treturn &Ports{Specs: []*Port{}}\n}\n<commit_msg>adding json fields to Port<commit_after>package dockertest\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/docker\/go-connections\/nat\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ Protocol is a string representing a protocol such as TCP or UDP.\ntype Protocol string\n\nconst (\n\t\/\/ RandomPort may be passed to a function to indicate\n\t\/\/ that a random port should be chosen.\n\tRandomPort uint16 = 0\n\n\t\/\/ ProtocolTCP represents a tcp Protocol.\n\tProtocolTCP Protocol = \"tcp\"\n\n\t\/\/ ProtocolUDP represents a udp Protocol.\n\tProtocolUDP Protocol = \"udp\"\n)\n\n\/\/ Port represents a port spec that may be used by the *Ports struct\n\/\/ to expose a port on a container.\ntype Port struct {\n\t\/\/ Private is the port exposed on the inside of the container\n\t\/\/ that you wish to map.\n\tPrivate uint16\n\n\t\/\/ Public is the publicly facing port that you wish to expose\n\t\/\/ the Private port on. Note, this may be `RandomPort` if you\n\t\/\/ wish to expose a random port instead of a specific port.\n\tPublic uint16 `json:\"port\"`\n\n\t\/\/ Address is the IP address to expose the port mapping\n\t\/\/ on. By default, 0.0.0.0 will be used.\n\tAddress string `json:\"address\"`\n\n\t\/\/ Protocol is the network protocol to expose.\n\tProtocol Protocol `json:\"protocol\"`\n}\n\n\/\/ Port converts the struct into a nat.Port\nfunc (s *Port) Port() (nat.Port, error) {\n\tif s.Protocol == \"\" {\n\t\treturn nat.Port(0), errors.New(\"Protocol not specified\")\n\t}\n\treturn nat.NewPort(\n\t\tstring(s.Protocol), fmt.Sprintf(\"%d\", s.Private))\n}\n\n\/\/ Binding converts the struct in a a nat.PortBinding. If no address\n\/\/ has been given 0.0.0.0 will be used for the host ip.\nfunc (s *Port) Binding() nat.PortBinding {\n\taddress := s.Address\n\tif address == \"\" {\n\t\taddress = \"0.0.0.0\"\n\t}\n\treturn nat.PortBinding{\n\t\tHostIP: address,\n\t\tHostPort: fmt.Sprintf(\"%d\", s.Public),\n\t}\n}\n\n\/\/ Ports is when to convey port exposures to RunContainer()\ntype Ports struct {\n\t\/\/ Specs is a map of internal to external ports. The external\n\t\/\/ port may be the same as the internal port or it may be the\n\t\/\/ constant `RandomPort` if you wish for Docker to chose a port\n\t\/\/ for you.\n\tSpecs []*Port\n}\n\n\/\/ Bindings will take the port specs and return port bindings that can\n\/\/ be used in by the container.HostConfig.PortBindings field.\nfunc (p *Ports) Bindings() (nat.PortMap, error) {\n\tports := nat.PortMap{}\n\n\tfor _, spec := range p.Specs {\n\t\tport, err := spec.Port()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t_, exists := ports[port]\n\t\tif !exists {\n\t\t\tports[port] = []nat.PortBinding{}\n\t\t}\n\n\t\tports[port] = append(ports[port], spec.Binding())\n\t}\n\n\treturn ports, nil\n}\n\n\/\/ Add is a shortcut function to add a new port spec.\nfunc (p *Ports) Add(port *Port) {\n\tp.Specs = append(p.Specs, port)\n}\n\n\/\/ NewPorts will produces a new *Ports struct that's ready to be\n\/\/ modified.\nfunc NewPorts() *Ports {\n\treturn &Ports{Specs: []*Port{}}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\n\/**\nEven Fibonacci numbers\nProblem 2\n\nEach new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:\n\n1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...\n\nBy considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.\n**\/\n\nimport (\n\t\"fmt\"\n)\nconst (\n\tLIMIT = 4000000\n)\n\nfunc main() {\n\tsum := 0\n\tc := fib_generator()\n\tfor {\n\t\tfib := <- c\n\t\tif 0 == fib % 2 {\n\t\t\tsum += fib\n\t\t} else if fib >= LIMIT {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfmt.Printf(\"the sum of the even-valued terms is %d\\n\", sum)\n}\n\nfunc fib_generator() chan int {\n c := make(chan int)\n\n go func() { \n for i, j := 0, 1; j <= LIMIT; i, j = i+j,i {\n c <- i\n }\n\n close(c)\n }()\n\n return c\n}<commit_msg>deferring close<commit_after>package main\n\n\/**\nEven Fibonacci numbers\nProblem 2\n\nEach new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:\n\n1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...\n\nBy considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.\n**\/\n\nimport (\n\t\"fmt\"\n)\nconst (\n\tLIMIT = 4000000\n)\n\nfunc main() {\n\tsum := 0\n\tc := fib_generator()\n\tfor {\n\t\tfib := <- c\n\t\tif 0 == fib % 2 {\n\t\t\tsum += fib\n\t\t} else if fib >= LIMIT {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfmt.Printf(\"the sum of the even-valued terms is %d\\n\", sum)\n}\n\nfunc fib_generator() chan int {\n c := make(chan int)\n\n go func() { \n for i, j := 0, 1; j <= LIMIT; i, j = i+j,i {\n c <- i\n }\n defer close(c)\n }()\n\n return c\n}<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Copyright (c) 2017 Intel Corporation\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\n\tyaml \"gopkg.in\/yaml.v2\"\n)\n\n\/\/ Constants for the Guest image used by ciao-down\n\nconst (\n\tguestDownloadURL = \"https:\/\/cloud-images.ubuntu.com\/xenial\/current\/xenial-server-cloudimg-amd64-disk1.img\"\n\tguestImageFriendlyName = \"Ubuntu 16.04\"\n\tdefaultHostname = \"singlevm\"\n)\n\ntype portMapping struct {\n\tHost int `yaml:\"host\"`\n\tGuest int `yaml:\"guest\"`\n}\n\nfunc (p portMapping) String() string {\n\treturn fmt.Sprintf(\"%d-%d\", p.Host, p.Guest)\n}\n\ntype mount struct {\n\tTag string `yaml:\"tag\"`\n\tSecurityModel string `yaml:\"security_model\"`\n\tPath string `yaml:\"path\"`\n}\n\nfunc (m mount) String() string {\n\treturn fmt.Sprintf(\"%s,%s,%s\", m.Tag, m.SecurityModel, m.Path)\n}\n\ntype instance struct {\n\tMemGiB int `yaml:\"mem_gib\"`\n\tCPUs int `yaml:\"cpus\"`\n\tPortMappings []portMapping `yaml:\"ports\"`\n\tMounts []mount `yaml:\"mounts\"`\n}\n\ntype instanceSpec struct {\n\tBaseImageURL string `yaml:\"base_image_url\"`\n\tBaseImageName string `yaml:\"base_image_name\"`\n\tHostname string `yaml:\"hostname\"`\n}\n\nfunc newDefaultInstance(ws *workspace, vmType string) *instance {\n\tvar in instance\n\n\tin.Mounts = []mount{\n\t\t{\n\t\t\tTag: \"hostgo\",\n\t\t\tSecurityModel: \"passthrough\",\n\t\t\tPath: ws.GoPath,\n\t\t},\n\t}\n\n\tin.PortMappings = []portMapping{\n\t\t{\n\t\t\tHost: 10022,\n\t\t\tGuest: 22,\n\t\t},\n\t}\n\n\tif vmType == CIAO {\n\t\tin.PortMappings = append(in.PortMappings, portMapping{\n\t\t\tHost: 3000,\n\t\t\tGuest: 3000,\n\t\t})\n\t}\n\n\treturn &in\n}\n\nfunc newInstanceFromFile(ws *workspace) (*instance, error) {\n\tin := &instance{}\n\n\terr := in.load(ws)\n\tif err == nil {\n\t\treturn in, nil\n\t}\n\n\t\/\/ Check for legacy state files.\n\n\tvmType := CIAO\n\tdata, err := ioutil.ReadFile(path.Join(ws.instanceDir, \"vmtype.txt\"))\n\tif err == nil {\n\t\tvmType = string(data)\n\t\tif vmType != CIAO && vmType != CLEARCONTAINERS {\n\t\t\terr := fmt.Errorf(\"Unsupported vmType %s. Should be one of \"+CIAO+\"|\"+CLEARCONTAINERS, vmType)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tuiPath := \"\"\n\tdata, err = ioutil.ReadFile(path.Join(ws.instanceDir, \"ui_path.txt\"))\n\tif err == nil {\n\t\tuiPath = string(data)\n\t}\n\n\tin = newDefaultInstance(ws, vmType)\n\n\tif uiPath != \"\" {\n\t\tin.Mounts = append(in.Mounts, mount{\n\t\t\tTag: \"hostui\",\n\t\t\tSecurityModel: \"mapped\",\n\t\t\tPath: filepath.Clean(uiPath),\n\t\t})\n\t}\n\n\treturn in, nil\n}\n\nfunc (in *instance) unmarshall(data []byte) error {\n\terr := yaml.Unmarshal(data, in)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to unmarshall instance state : %v\", err)\n\t}\n\n\tfor i := range in.Mounts {\n\t\tif err := checkDirectory(in.Mounts[i].Path); err != nil {\n\t\t\treturn fmt.Errorf(\"Bad mount %s specified: %v\",\n\t\t\t\tin.Mounts[i].Path, err)\n\t\t}\n\t}\n\n\tvar memDef, cpuDef int\n\tif in.MemGiB == 0 || in.CPUs == 0 {\n\t\tmemDef, cpuDef = getMemAndCpus()\n\t\tif in.MemGiB == 0 {\n\t\t\tin.MemGiB = memDef\n\t\t}\n\t\tif in.CPUs == 0 {\n\t\t\tin.CPUs = cpuDef\n\t\t}\n\t}\n\n\tvar i int\n\tfor i = 0; i < len(in.PortMappings); i++ {\n\t\tif in.PortMappings[i].Guest == 22 {\n\t\t\tbreak\n\t\t}\n\t}\n\tif i == len(in.PortMappings) {\n\t\tin.PortMappings = append(in.PortMappings,\n\t\t\tportMapping{\n\t\t\t\tHost: 10022,\n\t\t\t\tGuest: 22,\n\t\t\t})\n\t}\n\n\treturn nil\n}\n\nfunc (in *instance) unmarshallWithTemplate(ws *workspace, data string) error {\n\ttmpl, err := template.New(\"instance-data\").Parse(string(data))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to parse instance data template: %v\", err)\n\t}\n\tvar buf bytes.Buffer\n\terr = tmpl.Execute(&buf, ws)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to execute instance data template: %v\", err)\n\t}\n\treturn in.unmarshall(buf.Bytes())\n}\n\nfunc (in *instance) load(ws *workspace) error {\n\tdata, err := ioutil.ReadFile(path.Join(ws.instanceDir, \"state.yaml\"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to read instance state : %v\", err)\n\t}\n\n\treturn in.unmarshall(data)\n}\n\nfunc (in *instance) save(ws *workspace) error {\n\tdata, err := yaml.Marshal(in)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to marshall instance state : %v\", err)\n\t}\n\terr = ioutil.WriteFile(path.Join(ws.instanceDir, \"state.yaml\"),\n\t\tdata, 0600)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to write instance state : %v\", err)\n\t}\n\treturn nil\n}\n\nfunc (in *instance) mergeMounts(m mounts) {\n\tmountCount := len(in.Mounts)\n\tfor _, mount := range m {\n\t\tvar i int\n\t\tfor i = 0; i < mountCount; i++ {\n\t\t\tif mount.Tag == in.Mounts[i].Tag {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif i == mountCount {\n\t\t\tin.Mounts = append(in.Mounts, mount)\n\t\t} else {\n\t\t\tin.Mounts[i] = mount\n\t\t}\n\t}\n}\n\nfunc (in *instance) mergePorts(p ports) {\n\tportCount := len(in.PortMappings)\n\tfor _, port := range p {\n\t\tvar i int\n\t\tfor i = 0; i < portCount; i++ {\n\t\t\tif port.Guest == in.PortMappings[i].Guest {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif i == portCount {\n\t\t\tin.PortMappings = append(in.PortMappings, port)\n\t\t} else {\n\t\t\tin.PortMappings[i] = port\n\t\t}\n\t}\n}\n\nfunc (in *instance) sshPort() (int, error) {\n\tfor _, p := range in.PortMappings {\n\t\tif p.Guest == 22 {\n\t\t\treturn p.Host, nil\n\t\t}\n\t}\n\treturn 0, fmt.Errorf(\"No SSH port configured\")\n}\n\nfunc (ins *instanceSpec) unmarshall(data []byte) error {\n\terr := yaml.Unmarshal(data, ins)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to unmarshall instance specification : %v\", err)\n\t}\n\n\tif ins.BaseImageURL == \"\" {\n\t\tins.BaseImageURL = guestDownloadURL\n\t\tins.BaseImageName = guestImageFriendlyName\n\t} else {\n\t\turl, err := url.Parse(ins.BaseImageURL)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to parse url %s : %v\",\n\t\t\t\tins.BaseImageURL, err)\n\t\t}\n\t\tif ins.BaseImageName == \"\" {\n\t\t\tlastSlash := strings.LastIndex(url.Path, \"\/\")\n\t\t\tif lastSlash == -1 {\n\t\t\t\tins.BaseImageName = url.Path\n\t\t\t} else {\n\t\t\t\tins.BaseImageName = url.Path[lastSlash+1:]\n\t\t\t}\n\t\t}\n\t}\n\n\tif ins.Hostname == \"\" {\n\t\tins.Hostname = defaultHostname\n\t}\n\treturn nil\n}\n\nfunc (ins *instanceSpec) unmarshallWithTemplate(ws *workspace, data string) error {\n\ttmpl, err := template.New(\"instance-spec\").Parse(string(data))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to parse instance data template: %v\", err)\n\t}\n\tvar buf bytes.Buffer\n\terr = tmpl.Execute(&buf, ws)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to execute instance data template: %v\", err)\n\t}\n\treturn ins.unmarshall(buf.Bytes())\n}\n<commit_msg>ciao-down: rename newDefaultInstance to newLegacyInstance<commit_after>\/\/\n\/\/ Copyright (c) 2017 Intel Corporation\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\n\tyaml \"gopkg.in\/yaml.v2\"\n)\n\n\/\/ Constants for the Guest image used by ciao-down\n\nconst (\n\tguestDownloadURL = \"https:\/\/cloud-images.ubuntu.com\/xenial\/current\/xenial-server-cloudimg-amd64-disk1.img\"\n\tguestImageFriendlyName = \"Ubuntu 16.04\"\n\tdefaultHostname = \"singlevm\"\n)\n\ntype portMapping struct {\n\tHost int `yaml:\"host\"`\n\tGuest int `yaml:\"guest\"`\n}\n\nfunc (p portMapping) String() string {\n\treturn fmt.Sprintf(\"%d-%d\", p.Host, p.Guest)\n}\n\ntype mount struct {\n\tTag string `yaml:\"tag\"`\n\tSecurityModel string `yaml:\"security_model\"`\n\tPath string `yaml:\"path\"`\n}\n\nfunc (m mount) String() string {\n\treturn fmt.Sprintf(\"%s,%s,%s\", m.Tag, m.SecurityModel, m.Path)\n}\n\ntype instance struct {\n\tMemGiB int `yaml:\"mem_gib\"`\n\tCPUs int `yaml:\"cpus\"`\n\tPortMappings []portMapping `yaml:\"ports\"`\n\tMounts []mount `yaml:\"mounts\"`\n}\n\ntype instanceSpec struct {\n\tBaseImageURL string `yaml:\"base_image_url\"`\n\tBaseImageName string `yaml:\"base_image_name\"`\n\tHostname string `yaml:\"hostname\"`\n}\n\n\/\/ This function creates a default instanceData object for legacy ciao-down\n\/\/ ciao VMs. These old VMs did not store information about mounts and\n\/\/ mapped ports as this information was hard-coded into ciao-down itself.\n\/\/ Consequently, when migrating one of these old VMs we need to fill in\n\/\/ the missing information.\nfunc newLegacyInstance(ws *workspace, vmType string) *instance {\n\tvar in instance\n\n\tin.Mounts = []mount{\n\t\t{\n\t\t\tTag: \"hostgo\",\n\t\t\tSecurityModel: \"passthrough\",\n\t\t\tPath: ws.GoPath,\n\t\t},\n\t}\n\n\tin.PortMappings = []portMapping{\n\t\t{\n\t\t\tHost: 10022,\n\t\t\tGuest: 22,\n\t\t},\n\t}\n\n\tif vmType == CIAO {\n\t\tin.PortMappings = append(in.PortMappings, portMapping{\n\t\t\tHost: 3000,\n\t\t\tGuest: 3000,\n\t\t})\n\t}\n\n\treturn &in\n}\n\nfunc newInstanceFromFile(ws *workspace) (*instance, error) {\n\tin := &instance{}\n\n\terr := in.load(ws)\n\tif err == nil {\n\t\treturn in, nil\n\t}\n\n\t\/\/ Check for legacy state files.\n\n\tvmType := CIAO\n\tdata, err := ioutil.ReadFile(path.Join(ws.instanceDir, \"vmtype.txt\"))\n\tif err == nil {\n\t\tvmType = string(data)\n\t\tif vmType != CIAO && vmType != CLEARCONTAINERS {\n\t\t\terr := fmt.Errorf(\"Unsupported vmType %s. Should be one of \"+CIAO+\"|\"+CLEARCONTAINERS, vmType)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tuiPath := \"\"\n\tdata, err = ioutil.ReadFile(path.Join(ws.instanceDir, \"ui_path.txt\"))\n\tif err == nil {\n\t\tuiPath = string(data)\n\t}\n\n\tin = newLegacyInstance(ws, vmType)\n\n\tif uiPath != \"\" {\n\t\tin.Mounts = append(in.Mounts, mount{\n\t\t\tTag: \"hostui\",\n\t\t\tSecurityModel: \"mapped\",\n\t\t\tPath: filepath.Clean(uiPath),\n\t\t})\n\t}\n\n\treturn in, nil\n}\n\nfunc (in *instance) unmarshall(data []byte) error {\n\terr := yaml.Unmarshal(data, in)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to unmarshall instance state : %v\", err)\n\t}\n\n\tfor i := range in.Mounts {\n\t\tif err := checkDirectory(in.Mounts[i].Path); err != nil {\n\t\t\treturn fmt.Errorf(\"Bad mount %s specified: %v\",\n\t\t\t\tin.Mounts[i].Path, err)\n\t\t}\n\t}\n\n\tvar memDef, cpuDef int\n\tif in.MemGiB == 0 || in.CPUs == 0 {\n\t\tmemDef, cpuDef = getMemAndCpus()\n\t\tif in.MemGiB == 0 {\n\t\t\tin.MemGiB = memDef\n\t\t}\n\t\tif in.CPUs == 0 {\n\t\t\tin.CPUs = cpuDef\n\t\t}\n\t}\n\n\tvar i int\n\tfor i = 0; i < len(in.PortMappings); i++ {\n\t\tif in.PortMappings[i].Guest == 22 {\n\t\t\tbreak\n\t\t}\n\t}\n\tif i == len(in.PortMappings) {\n\t\tin.PortMappings = append(in.PortMappings,\n\t\t\tportMapping{\n\t\t\t\tHost: 10022,\n\t\t\t\tGuest: 22,\n\t\t\t})\n\t}\n\n\treturn nil\n}\n\nfunc (in *instance) unmarshallWithTemplate(ws *workspace, data string) error {\n\ttmpl, err := template.New(\"instance-data\").Parse(string(data))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to parse instance data template: %v\", err)\n\t}\n\tvar buf bytes.Buffer\n\terr = tmpl.Execute(&buf, ws)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to execute instance data template: %v\", err)\n\t}\n\treturn in.unmarshall(buf.Bytes())\n}\n\nfunc (in *instance) load(ws *workspace) error {\n\tdata, err := ioutil.ReadFile(path.Join(ws.instanceDir, \"state.yaml\"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to read instance state : %v\", err)\n\t}\n\n\treturn in.unmarshall(data)\n}\n\nfunc (in *instance) save(ws *workspace) error {\n\tdata, err := yaml.Marshal(in)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to marshall instance state : %v\", err)\n\t}\n\terr = ioutil.WriteFile(path.Join(ws.instanceDir, \"state.yaml\"),\n\t\tdata, 0600)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to write instance state : %v\", err)\n\t}\n\treturn nil\n}\n\nfunc (in *instance) mergeMounts(m mounts) {\n\tmountCount := len(in.Mounts)\n\tfor _, mount := range m {\n\t\tvar i int\n\t\tfor i = 0; i < mountCount; i++ {\n\t\t\tif mount.Tag == in.Mounts[i].Tag {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif i == mountCount {\n\t\t\tin.Mounts = append(in.Mounts, mount)\n\t\t} else {\n\t\t\tin.Mounts[i] = mount\n\t\t}\n\t}\n}\n\nfunc (in *instance) mergePorts(p ports) {\n\tportCount := len(in.PortMappings)\n\tfor _, port := range p {\n\t\tvar i int\n\t\tfor i = 0; i < portCount; i++ {\n\t\t\tif port.Guest == in.PortMappings[i].Guest {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif i == portCount {\n\t\t\tin.PortMappings = append(in.PortMappings, port)\n\t\t} else {\n\t\t\tin.PortMappings[i] = port\n\t\t}\n\t}\n}\n\nfunc (in *instance) sshPort() (int, error) {\n\tfor _, p := range in.PortMappings {\n\t\tif p.Guest == 22 {\n\t\t\treturn p.Host, nil\n\t\t}\n\t}\n\treturn 0, fmt.Errorf(\"No SSH port configured\")\n}\n\nfunc (ins *instanceSpec) unmarshall(data []byte) error {\n\terr := yaml.Unmarshal(data, ins)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to unmarshall instance specification : %v\", err)\n\t}\n\n\tif ins.BaseImageURL == \"\" {\n\t\tins.BaseImageURL = guestDownloadURL\n\t\tins.BaseImageName = guestImageFriendlyName\n\t} else {\n\t\turl, err := url.Parse(ins.BaseImageURL)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to parse url %s : %v\",\n\t\t\t\tins.BaseImageURL, err)\n\t\t}\n\t\tif ins.BaseImageName == \"\" {\n\t\t\tlastSlash := strings.LastIndex(url.Path, \"\/\")\n\t\t\tif lastSlash == -1 {\n\t\t\t\tins.BaseImageName = url.Path\n\t\t\t} else {\n\t\t\t\tins.BaseImageName = url.Path[lastSlash+1:]\n\t\t\t}\n\t\t}\n\t}\n\n\tif ins.Hostname == \"\" {\n\t\tins.Hostname = defaultHostname\n\t}\n\treturn nil\n}\n\nfunc (ins *instanceSpec) unmarshallWithTemplate(ws *workspace, data string) error {\n\ttmpl, err := template.New(\"instance-spec\").Parse(string(data))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to parse instance data template: %v\", err)\n\t}\n\tvar buf bytes.Buffer\n\terr = tmpl.Execute(&buf, ws)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to execute instance data template: %v\", err)\n\t}\n\treturn ins.unmarshall(buf.Bytes())\n}\n<|endoftext|>"} {"text":"<commit_before>package quictun\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\ntype closeWriter interface {\n\tCloseWrite() error\n}\n\nvar count int64\n\nfunc Proxy(conn, targetConn net.Conn) {\n\tdefer conn.Close()\n\tdefer targetConn.Close()\n\tdefer atomic.AddInt64(&count, -1)\n\n\tatomic.AddInt64(&count, 1)\n\n\tlog.Printf(\"[%d] link start %v <-> %v\",\n\t\tatomic.LoadInt64(&count), conn.RemoteAddr(), targetConn.RemoteAddr())\n\n\tcopyAndWait := func(dst, src net.Conn, c chan int64) {\n\t\tbuf := make([]byte, 1024)\n\t\tn, err := io.CopyBuffer(dst, src, buf)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Copy: %s\\n\", err.Error())\n\t\t}\n\t\tif tcpConn, ok := dst.(closeWriter); ok {\n\t\t\ttcpConn.CloseWrite()\n\t\t} else {\n\t\t\tdst.SetReadDeadline(time.Now().Add(time.Second))\n\t\t}\n\t\tc <- n\n\t}\n\n\tstart := time.Now()\n\n\tstod := make(chan int64)\n\tgo copyAndWait(targetConn, conn, stod)\n\n\tdtos := make(chan int64)\n\tgo copyAndWait(conn, targetConn, dtos)\n\n\tvar nstod, ndtos int64\n\tfor i := 0; i < 2; {\n\t\tselect {\n\t\tcase nstod = <-stod:\n\t\t\ti++\n\t\tcase ndtos = <-dtos:\n\t\t\ti++\n\t\t}\n\t}\n\td := BeautifyDuration(time.Since(start))\n\tlog.Printf(\"CLOSE %s after %s ->%s <-%s\\n\",\n\t\ttargetConn.RemoteAddr(), d, BeautifySize(nstod), BeautifySize(ndtos))\n}\n<commit_msg>set read deadline seems not work...<commit_after>package quictun\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\ntype closeWriter interface {\n\tCloseWrite() error\n}\n\nvar count int64\n\nfunc Proxy(conn, targetConn net.Conn) {\n\tdefer conn.Close()\n\tdefer targetConn.Close()\n\tdefer atomic.AddInt64(&count, -1)\n\n\tatomic.AddInt64(&count, 1)\n\n\tlog.Printf(\"[%d] link start %v <-> %v\",\n\t\tatomic.LoadInt64(&count), conn.RemoteAddr(), targetConn.RemoteAddr())\n\n\tcopyAndWait := func(dst, src net.Conn, c chan int64) {\n\t\tbuf := make([]byte, 1024)\n\t\tn, err := io.CopyBuffer(dst, src, buf)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Copy: %s\\n\", err.Error())\n\t\t}\n\t\tif tcpConn, ok := dst.(closeWriter); ok {\n\t\t\ttcpConn.CloseWrite()\n\t\t} else {\n\t\t\tdst.Close()\n\t\t}\n\t\tc <- n\n\t}\n\n\tstart := time.Now()\n\n\tstod := make(chan int64)\n\tgo copyAndWait(targetConn, conn, stod)\n\n\tdtos := make(chan int64)\n\tgo copyAndWait(conn, targetConn, dtos)\n\n\tvar nstod, ndtos int64\n\tfor i := 0; i < 2; {\n\t\tselect {\n\t\tcase nstod = <-stod:\n\t\t\ti++\n\t\tcase ndtos = <-dtos:\n\t\t\ti++\n\t\t}\n\t}\n\td := BeautifyDuration(time.Since(start))\n\tlog.Printf(\"CLOSE %s after %s ->%s <-%s\\n\",\n\t\ttargetConn.RemoteAddr(), d, BeautifySize(nstod), BeautifySize(ndtos))\n}\n<|endoftext|>"} {"text":"<commit_before>package http_api\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"sync\"\n\n\t\"github.com\/uniqush\/uniqush-push\/push\"\n\t\"github.com\/uniqush\/uniqush-push\/srv\/apns\/common\"\n)\n\n\/\/ HTTPPushRequestProcessor sends push notification requests to APNS using HTTP API\ntype HTTPPushRequestProcessor struct {\n\tclient *http.Client\n}\n\n\/\/ NewRequestProcessor returns a new HTTPPushProcessor using net\/http DefaultClient connection pool\nfunc NewRequestProcessor() common.PushRequestProcessor {\n\treturn &HTTPPushRequestProcessor{\n\t\tclient: http.DefaultClient,\n\t}\n}\n\nfunc (processor *HTTPPushRequestProcessor) AddRequest(request *common.PushRequest) {\n\tgo processor.sendRequests(request)\n}\n\nfunc (processor *HTTPPushRequestProcessor) GetMaxPayloadSize() int {\n\treturn 4096\n}\n\nfunc (processor *HTTPPushRequestProcessor) Finalize() {\n\tswitch transport := processor.client.Transport.(type) {\n\tcase *http.Transport:\n\t\ttransport.CloseIdleConnections()\n\tdefault:\n\t}\n}\n\nfunc (processor *HTTPPushRequestProcessor) SetErrorReportChan(errChan chan<- push.PushError) {}\n\nfunc (processor *HTTPPushRequestProcessor) sendRequests(request *common.PushRequest) {\n\tdefer close(request.ErrChan)\n\n\tjwtManager, err := common.NewJWTManager(request.PSP.FixedData[\"p8\"], request.PSP.FixedData[\"keyid\"], request.PSP.FixedData[\"teamid\"])\n\tif err != nil {\n\t\trequest.ErrChan <- push.NewError(err.Error())\n\t\treturn\n\t}\n\tjwt, err := jwtManager.GenerateToken()\n\tif err != nil {\n\t\trequest.ErrChan <- push.NewError(err.Error())\n\t\treturn\n\t}\n\n\twg := new(sync.WaitGroup)\n\twg.Add(len(request.Devtokens))\n\n\theader := http.Header{\n\t\t\"authorization\": []string{\"bearer \" + jwt},\n\t\t\"apns-expiration\": []string{\"0\"}, \/\/ Only attempt to send the notification once\n\t\t\"apns-priority\": []string{\"10\"}, \/\/ Send notification immidiately\n\t\t\"apns-topic\": []string{request.PSP.FixedData[\"bundleid\"]},\n\t}\n\n\tfor i, token := range request.Devtokens {\n\t\tmsgID := request.GetId(i)\n\n\t\turl := fmt.Sprintf(\"%s\/3\/device\/%s\", request.PSP.VolatileData[\"addr\"], hex.EncodeToString(token))\n\t\thttpRequest, err := http.NewRequest(\"POST\", url, bytes.NewReader(request.Payload))\n\t\tif err != nil {\n\t\t\trequest.ErrChan <- push.NewError(err.Error())\n\t\t\tcontinue\n\t\t}\n\t\thttpRequest.Header = header\n\n\t\tgo processor.sendRequest(wg, httpRequest, msgID, request.ErrChan, request.ResChan)\n\t}\n\n\twg.Wait()\n}\n\nfunc (processor *HTTPPushRequestProcessor) sendRequest(wg *sync.WaitGroup, request *http.Request, messageID uint32, errChan chan<- push.PushError, resChan chan<- *common.APNSResult) {\n\tdefer wg.Done()\n\n\tresponse, err := processor.client.Do(request)\n\tif err != nil {\n\t\terrChan <- push.NewConnectionError(err)\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\n\tresponseBody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\terrChan <- push.NewError(err.Error())\n\t\treturn\n\t}\n\n\tif len(responseBody) > 0 {\n\t\t\/\/ Successful request should return empty response body\n\t\tapnsError := new(APNSErrorResponse)\n\t\terr := json.Unmarshal(responseBody, apnsError)\n\t\tif err != nil {\n\t\t\terrChan <- push.NewError(err.Error())\n\t\t} else {\n\t\t\terrChan <- push.NewBadNotificationWithDetails(apnsError.Reason)\n\t\t}\n\t} else {\n\t\tresChan <- &common.APNSResult{\n\t\t\tMsgId: messageID,\n\t\t}\n\t}\n}\n<commit_msg>Determine message expiration from TTL<commit_after>package http_api\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"sync\"\n\n\t\"github.com\/uniqush\/uniqush-push\/push\"\n\t\"github.com\/uniqush\/uniqush-push\/srv\/apns\/common\"\n)\n\n\/\/ HTTPPushRequestProcessor sends push notification requests to APNS using HTTP API\ntype HTTPPushRequestProcessor struct {\n\tclient *http.Client\n}\n\n\/\/ NewRequestProcessor returns a new HTTPPushProcessor using net\/http DefaultClient connection pool\nfunc NewRequestProcessor() common.PushRequestProcessor {\n\treturn &HTTPPushRequestProcessor{\n\t\tclient: http.DefaultClient,\n\t}\n}\n\nfunc (processor *HTTPPushRequestProcessor) AddRequest(request *common.PushRequest) {\n\tgo processor.sendRequests(request)\n}\n\nfunc (processor *HTTPPushRequestProcessor) GetMaxPayloadSize() int {\n\treturn 4096\n}\n\nfunc (processor *HTTPPushRequestProcessor) Finalize() {\n\tswitch transport := processor.client.Transport.(type) {\n\tcase *http.Transport:\n\t\ttransport.CloseIdleConnections()\n\tdefault:\n\t}\n}\n\nfunc (processor *HTTPPushRequestProcessor) SetErrorReportChan(errChan chan<- push.PushError) {}\n\nfunc (processor *HTTPPushRequestProcessor) sendRequests(request *common.PushRequest) {\n\tdefer close(request.ErrChan)\n\n\tjwtManager, err := common.NewJWTManager(request.PSP.FixedData[\"p8\"], request.PSP.FixedData[\"keyid\"], request.PSP.FixedData[\"teamid\"])\n\tif err != nil {\n\t\trequest.ErrChan <- push.NewError(err.Error())\n\t\treturn\n\t}\n\tjwt, err := jwtManager.GenerateToken()\n\tif err != nil {\n\t\trequest.ErrChan <- push.NewError(err.Error())\n\t\treturn\n\t}\n\n\twg := new(sync.WaitGroup)\n\twg.Add(len(request.Devtokens))\n\n\theader := http.Header{\n\t\t\"authorization\": []string{\"bearer \" + jwt},\n\t\t\"apns-expiration\": []string{fmt.Sprint(request.Expiry)},\n\t\t\"apns-priority\": []string{\"10\"}, \/\/ Send notification immidiately\n\t\t\"apns-topic\": []string{request.PSP.FixedData[\"bundleid\"]},\n\t}\n\n\tfor i, token := range request.Devtokens {\n\t\tmsgID := request.GetId(i)\n\n\t\turl := fmt.Sprintf(\"%s\/3\/device\/%s\", request.PSP.VolatileData[\"addr\"], hex.EncodeToString(token))\n\t\thttpRequest, err := http.NewRequest(\"POST\", url, bytes.NewReader(request.Payload))\n\t\tif err != nil {\n\t\t\trequest.ErrChan <- push.NewError(err.Error())\n\t\t\tcontinue\n\t\t}\n\t\thttpRequest.Header = header\n\n\t\tgo processor.sendRequest(wg, httpRequest, msgID, request.ErrChan, request.ResChan)\n\t}\n\n\twg.Wait()\n}\n\nfunc (processor *HTTPPushRequestProcessor) sendRequest(wg *sync.WaitGroup, request *http.Request, messageID uint32, errChan chan<- push.PushError, resChan chan<- *common.APNSResult) {\n\tdefer wg.Done()\n\n\tresponse, err := processor.client.Do(request)\n\tif err != nil {\n\t\terrChan <- push.NewConnectionError(err)\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\n\tresponseBody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\terrChan <- push.NewError(err.Error())\n\t\treturn\n\t}\n\n\tif len(responseBody) > 0 {\n\t\t\/\/ Successful request should return empty response body\n\t\tapnsError := new(APNSErrorResponse)\n\t\terr := json.Unmarshal(responseBody, apnsError)\n\t\tif err != nil {\n\t\t\terrChan <- push.NewError(err.Error())\n\t\t} else {\n\t\t\terrChan <- push.NewBadNotificationWithDetails(apnsError.Reason)\n\t\t}\n\t} else {\n\t\tresChan <- &common.APNSResult{\n\t\t\tMsgId: messageID,\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tbrowserStarted int = iota\n\tbrowserFailed\n\tseleniumError\n)\n\nconst (\n\tpingPath string = \"\/ping\"\n\terrPath string = \"\/err\"\n\troutePath string = \"\/wd\/hub\/session\"\n\tproxyPath string = routePath + \"\/\"\n\thead int = len(proxyPath)\n\ttail int = head + 32\n\tsessPart int = 4 \/\/ \/wd\/hub\/session\/{various length session}\n)\n\nvar (\n\tport = flag.Int(\"port\", 8080, \"port to bind to\")\n\tconf = flag.String(\"conf\", \"browsers.xml\", \"browsers configuration file path\")\n\tlisten string\n\tconfig Browsers\n\troutes map[string]*Host = make(map[string]*Host)\n\tnum uint64\n\tlock sync.Mutex\n)\n\ntype caps map[string]interface{}\n\nfunc (c *caps) capability(k string) string {\n\tdc := (*c)[\"desiredCapabilities\"]\n\tswitch dc.(type) {\n\tcase map[string]interface{}:\n\t\tv := dc.(map[string]interface{})\n\t\tswitch v[k].(type) {\n\t\tcase string:\n\t\t\treturn v[k].(string)\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc (c *caps) browser() string {\n\treturn c.capability(\"browserName\")\n}\n\nfunc (c *caps) version() string {\n\treturn c.capability(\"version\")\n}\n\nfunc (h *Host) url() string {\n\treturn fmt.Sprintf(\"http:\/\/%s%s\", h.net(), routePath)\n}\n\nfunc (h *Host) session(c caps) (map[string]interface{}, int) {\n\tb, _ := json.Marshal(c)\n\tresp, err := http.Post(h.url(), \"application\/json\", bytes.NewReader(b))\n\tif err != nil {\n\t\treturn nil, seleniumError\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, browserFailed\n\t}\n\tvar reply map[string]interface{}\n\tjson.NewDecoder(resp.Body).Decode(&reply)\n\treturn reply, browserStarted\n}\n\nfunc reply(w http.ResponseWriter, msg map[string]interface{}) {\n\treply, _ := json.Marshal(msg)\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(reply)\n}\n\nfunc serial() uint64 {\n\tlock.Lock()\n\tdefer lock.Unlock()\n\tid := num\n\tnum++\n\treturn id\n}\n\nfunc info(r *http.Request) (user, remote string) {\n\tuser = \"unknown\"\n\tif u, _, ok := r.BasicAuth(); ok {\n\t\tuser = u\n\t}\n\tremote = r.RemoteAddr\n\treturn\n}\n\nfunc fmtBrowser(browser, version string) string {\n\tif version != \"\" {\n\t\treturn fmt.Sprintf(\"%s-%s\", browser, version)\n\t}\n\treturn browser\n}\n\nfunc route(w http.ResponseWriter, r *http.Request) {\n\tstart := time.Now()\n\tid := serial()\n\tuser, remote := info(r)\n\tvar c caps\n\terr := json.NewDecoder(r.Body).Decode(&c)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"bad json format: %s\", err.Error()), http.StatusBadRequest)\n\t\tlog.Printf(\"[%d] [BAD_JSON] [%s] [%s] [%v]\\n\", id, user, remote, err)\n\t\treturn\n\t}\n\tbrowser, version := c.browser(), c.version()\n\tif browser == \"\" {\n\t\thttp.Error(w, \"browser not set\", http.StatusBadRequest)\n\t\tlog.Printf(\"[%d] [BROWSER_NOT_SET] [%s] [%s]\\n\", id, user, remote)\n\t\treturn\n\t}\n\tcount := 0\nloop:\n\tfor {\n\t\thosts := config.find(browser, version)\n\t\tif len(hosts) == 0 {\n\t\t\thttp.Error(w, fmt.Sprintf(\"unsupported browser: %s %s\", browser, version), http.StatusNotFound)\n\t\t\tlog.Printf(\"[%d] [UNSUPPORTED_BROWSER] [%s] [%s] [%s]\\n\", id, user, remote, fmtBrowser(browser, version))\n\t\t\treturn\n\t\t}\n\t\tfor h, i := hosts.choose(); ; h, i = hosts.choose() {\n\t\t\tcount++\n\t\t\tif h == nil {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tlog.Printf(\"[%d] [SESSION_ATTEMPTED] [%s] [%s] [%s] [%s] [%d]\\n\", id, user, remote, fmtBrowser(browser, version), h.net(), count)\n\t\t\texcludes := make([]string, 0)\n\t\t\tswitch resp, status := h.session(c); status {\n\t\t\tcase browserStarted:\n\t\t\t\tsess := resp[\"sessionId\"].(string)\n\t\t\t\tresp[\"sessionId\"] = h.sum() + sess\n\t\t\t\treply(w, resp)\n\t\t\t\tlog.Printf(\"[%d] [%v] [SESSION_CREATED] [%s] [%s] [%s] [%s] [%s] [%d]\\n\", id, time.Now().Sub(start), user, remote, fmtBrowser(browser, version), h.net(), sess, count)\n\t\t\t\treturn\n\t\t\tcase browserFailed:\n\t\t\t\thosts = append(hosts[:i], hosts[i+1:]...)\n\t\t\tcase seleniumError:\n\t\t\t\texcludes = append(excludes, h.region)\n\t\t\t\thosts = config.find(browser, version, excludes...)\n\t\t\t}\n\t\t\tlog.Printf(\"[%d] [SESSION_FAILED] [%s] [%s] [%s] [%s]\\n\", id, user, remote, fmtBrowser(browser, version), h.net())\n\t\t\tif len(hosts) == 0 {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t}\n\t}\n\thttp.Error(w, fmt.Sprintf(\"cannot create session %s on any hosts after %d attempt(s)\", fmtBrowser(browser, version), count), http.StatusInternalServerError)\n\tlog.Printf(\"[%d] [SESSION_NOT_CREATED] [%s] [%s] [%s]\\n\", id, user, remote, fmtBrowser(browser, version))\n}\n\nfunc proxy(r *http.Request) {\n\tuser, remote := info(r)\n\tr.URL.Scheme = \"http\"\n\tif len(r.URL.Path) > tail {\n\t\tsum := r.URL.Path[head:tail]\n\t\tpath := r.URL.Path[:head] + r.URL.Path[tail:]\n\t\tif h, ok := routes[sum]; ok {\n\t\t\tif body, err := ioutil.ReadAll(r.Body); err == nil {\n\t\t\t\tr.Body.Close()\n\t\t\t\tvar msg map[string]interface{}\n\t\t\t\tif err := json.Unmarshal(body, &msg); err == nil {\n\t\t\t\t\tdelete(msg, \"sessionId\")\n\t\t\t\t\tbody, _ = json.Marshal(msg)\n\t\t\t\t\tr.ContentLength = int64(len(body))\n\t\t\t\t}\n\t\t\t\tr.Body = ioutil.NopCloser(bytes.NewReader(body))\n\t\t\t}\n\t\t\tr.URL.Host = h.net()\n\t\t\tr.URL.Path = path\n\t\t\tif r.Method == \"DELETE\" {\n\t\t\t\tsess := strings.Split(path, \"\/\")[sessPart]\n\t\t\t\tlog.Printf(\"[SESSION_DELETED] [%s] [%s] [%s] [%s]\\n\", user, remote, h.net(), sess)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\tr.URL.Host = listen\n\tr.URL.Path = errPath\n}\n\nfunc ping(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"Ok\\n\"))\n}\n\nfunc err(w http.ResponseWriter, r *http.Request) {\n\thttp.Error(w, \"route not found\", http.StatusNotFound)\n}\n\nfunc postOnly(handler http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method != \"POST\" {\n\t\t\thttp.Error(w, \"method not allowed\", http.StatusMethodNotAllowed)\n\t\t\treturn\n\t\t}\n\t\thandler(w, r)\n\t}\n}\n\nfunc readConfig(fn string, browsers *Browsers) error {\n\tfile, err := ioutil.ReadFile(fn)\n\tif err != nil {\n\t\treturn errors.New(fmt.Sprintf(\"error reading configuration file %s: %v\", fn, err))\n\t}\n\tif err := xml.Unmarshal(file, browsers); err != nil {\n\t\treturn errors.New(fmt.Sprintf(\"error parsing configuration file %s: %v\", fn, err))\n\t}\n\treturn nil\n}\n\nfunc linkRoutes(config *Browsers) {\n\tfor _, b := range config.Browsers {\n\t\tfor _, v := range b.Versions {\n\t\t\tfor _, r := range v.Regions {\n\t\t\t\tfor i, h := range r.Hosts {\n\t\t\t\t\tr.Hosts[i].region = r.Name\n\t\t\t\t\troutes[h.sum()] = &r.Hosts[i]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc init() {\n\tflag.Parse()\n\tlisten = fmt.Sprintf(\":%d\", *port)\n\n\terr := readConfig(*conf, &config)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlinkRoutes(&config)\n}\n\nfunc mux() http.Handler {\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(pingPath, ping)\n\tmux.HandleFunc(errPath, err)\n\tmux.HandleFunc(routePath, postOnly(route))\n\tmux.Handle(proxyPath, &httputil.ReverseProxy{Director: proxy})\n\treturn mux\n}\n\nfunc main() {\n\tlog.Println(\"listening on\", listen)\n\tlog.Print(http.ListenAndServe(listen, mux()))\n}\n<commit_msg>Removed remote port from logging.<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tbrowserStarted int = iota\n\tbrowserFailed\n\tseleniumError\n)\n\nconst (\n\tpingPath string = \"\/ping\"\n\terrPath string = \"\/err\"\n\troutePath string = \"\/wd\/hub\/session\"\n\tproxyPath string = routePath + \"\/\"\n\thead int = len(proxyPath)\n\ttail int = head + 32\n\tsessPart int = 4 \/\/ \/wd\/hub\/session\/{various length session}\n)\n\nvar (\n\tport = flag.Int(\"port\", 8080, \"port to bind to\")\n\tconf = flag.String(\"conf\", \"browsers.xml\", \"browsers configuration file path\")\n\tlisten string\n\tconfig Browsers\n\troutes map[string]*Host = make(map[string]*Host)\n\tnum uint64\n\tlock sync.Mutex\n)\n\ntype caps map[string]interface{}\n\nfunc (c *caps) capability(k string) string {\n\tdc := (*c)[\"desiredCapabilities\"]\n\tswitch dc.(type) {\n\tcase map[string]interface{}:\n\t\tv := dc.(map[string]interface{})\n\t\tswitch v[k].(type) {\n\t\tcase string:\n\t\t\treturn v[k].(string)\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc (c *caps) browser() string {\n\treturn c.capability(\"browserName\")\n}\n\nfunc (c *caps) version() string {\n\treturn c.capability(\"version\")\n}\n\nfunc (h *Host) url() string {\n\treturn fmt.Sprintf(\"http:\/\/%s%s\", h.net(), routePath)\n}\n\nfunc (h *Host) session(c caps) (map[string]interface{}, int) {\n\tb, _ := json.Marshal(c)\n\tresp, err := http.Post(h.url(), \"application\/json\", bytes.NewReader(b))\n\tif err != nil {\n\t\treturn nil, seleniumError\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, browserFailed\n\t}\n\tvar reply map[string]interface{}\n\tjson.NewDecoder(resp.Body).Decode(&reply)\n\treturn reply, browserStarted\n}\n\nfunc reply(w http.ResponseWriter, msg map[string]interface{}) {\n\treply, _ := json.Marshal(msg)\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(reply)\n}\n\nfunc serial() uint64 {\n\tlock.Lock()\n\tdefer lock.Unlock()\n\tid := num\n\tnum++\n\treturn id\n}\n\nfunc info(r *http.Request) (user, remote string) {\n\tuser = \"unknown\"\n\tif u, _, ok := r.BasicAuth(); ok {\n\t\tuser = u\n\t}\n\tremote, _, err := net.SplitHostPort(r.RemoteAddr)\n\tif err != nil {\n\t\tremote = r.RemoteAddr\n\t}\n\treturn\n}\n\nfunc fmtBrowser(browser, version string) string {\n\tif version != \"\" {\n\t\treturn fmt.Sprintf(\"%s-%s\", browser, version)\n\t}\n\treturn browser\n}\n\nfunc route(w http.ResponseWriter, r *http.Request) {\n\tstart := time.Now()\n\tid := serial()\n\tuser, remote := info(r)\n\tvar c caps\n\terr := json.NewDecoder(r.Body).Decode(&c)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"bad json format: %s\", err.Error()), http.StatusBadRequest)\n\t\tlog.Printf(\"[%d] [BAD_JSON] [%s] [%s] [%v]\\n\", id, user, remote, err)\n\t\treturn\n\t}\n\tbrowser, version := c.browser(), c.version()\n\tif browser == \"\" {\n\t\thttp.Error(w, \"browser not set\", http.StatusBadRequest)\n\t\tlog.Printf(\"[%d] [BROWSER_NOT_SET] [%s] [%s]\\n\", id, user, remote)\n\t\treturn\n\t}\n\tcount := 0\nloop:\n\tfor {\n\t\thosts := config.find(browser, version)\n\t\tif len(hosts) == 0 {\n\t\t\thttp.Error(w, fmt.Sprintf(\"unsupported browser: %s %s\", browser, version), http.StatusNotFound)\n\t\t\tlog.Printf(\"[%d] [UNSUPPORTED_BROWSER] [%s] [%s] [%s]\\n\", id, user, remote, fmtBrowser(browser, version))\n\t\t\treturn\n\t\t}\n\t\tfor h, i := hosts.choose(); ; h, i = hosts.choose() {\n\t\t\tcount++\n\t\t\tif h == nil {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tlog.Printf(\"[%d] [SESSION_ATTEMPTED] [%s] [%s] [%s] [%s] [%d]\\n\", id, user, remote, fmtBrowser(browser, version), h.net(), count)\n\t\t\texcludes := make([]string, 0)\n\t\t\tswitch resp, status := h.session(c); status {\n\t\t\tcase browserStarted:\n\t\t\t\tsess := resp[\"sessionId\"].(string)\n\t\t\t\tresp[\"sessionId\"] = h.sum() + sess\n\t\t\t\treply(w, resp)\n\t\t\t\tlog.Printf(\"[%d] [%v] [SESSION_CREATED] [%s] [%s] [%s] [%s] [%s] [%d]\\n\", id, time.Now().Sub(start), user, remote, fmtBrowser(browser, version), h.net(), sess, count)\n\t\t\t\treturn\n\t\t\tcase browserFailed:\n\t\t\t\thosts = append(hosts[:i], hosts[i+1:]...)\n\t\t\tcase seleniumError:\n\t\t\t\texcludes = append(excludes, h.region)\n\t\t\t\thosts = config.find(browser, version, excludes...)\n\t\t\t}\n\t\t\tlog.Printf(\"[%d] [SESSION_FAILED] [%s] [%s] [%s] [%s]\\n\", id, user, remote, fmtBrowser(browser, version), h.net())\n\t\t\tif len(hosts) == 0 {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t}\n\t}\n\thttp.Error(w, fmt.Sprintf(\"cannot create session %s on any hosts after %d attempt(s)\", fmtBrowser(browser, version), count), http.StatusInternalServerError)\n\tlog.Printf(\"[%d] [SESSION_NOT_CREATED] [%s] [%s] [%s]\\n\", id, user, remote, fmtBrowser(browser, version))\n}\n\nfunc proxy(r *http.Request) {\n\tuser, remote := info(r)\n\tr.URL.Scheme = \"http\"\n\tif len(r.URL.Path) > tail {\n\t\tsum := r.URL.Path[head:tail]\n\t\tpath := r.URL.Path[:head] + r.URL.Path[tail:]\n\t\tif h, ok := routes[sum]; ok {\n\t\t\tif body, err := ioutil.ReadAll(r.Body); err == nil {\n\t\t\t\tr.Body.Close()\n\t\t\t\tvar msg map[string]interface{}\n\t\t\t\tif err := json.Unmarshal(body, &msg); err == nil {\n\t\t\t\t\tdelete(msg, \"sessionId\")\n\t\t\t\t\tbody, _ = json.Marshal(msg)\n\t\t\t\t\tr.ContentLength = int64(len(body))\n\t\t\t\t}\n\t\t\t\tr.Body = ioutil.NopCloser(bytes.NewReader(body))\n\t\t\t}\n\t\t\tr.URL.Host = h.net()\n\t\t\tr.URL.Path = path\n\t\t\tif r.Method == \"DELETE\" {\n\t\t\t\tsess := strings.Split(path, \"\/\")[sessPart]\n\t\t\t\tlog.Printf(\"[SESSION_DELETED] [%s] [%s] [%s] [%s]\\n\", user, remote, h.net(), sess)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\tr.URL.Host = listen\n\tr.URL.Path = errPath\n}\n\nfunc ping(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"Ok\\n\"))\n}\n\nfunc err(w http.ResponseWriter, r *http.Request) {\n\thttp.Error(w, \"route not found\", http.StatusNotFound)\n}\n\nfunc postOnly(handler http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method != \"POST\" {\n\t\t\thttp.Error(w, \"method not allowed\", http.StatusMethodNotAllowed)\n\t\t\treturn\n\t\t}\n\t\thandler(w, r)\n\t}\n}\n\nfunc readConfig(fn string, browsers *Browsers) error {\n\tfile, err := ioutil.ReadFile(fn)\n\tif err != nil {\n\t\treturn errors.New(fmt.Sprintf(\"error reading configuration file %s: %v\", fn, err))\n\t}\n\tif err := xml.Unmarshal(file, browsers); err != nil {\n\t\treturn errors.New(fmt.Sprintf(\"error parsing configuration file %s: %v\", fn, err))\n\t}\n\treturn nil\n}\n\nfunc linkRoutes(config *Browsers) {\n\tfor _, b := range config.Browsers {\n\t\tfor _, v := range b.Versions {\n\t\t\tfor _, r := range v.Regions {\n\t\t\t\tfor i, h := range r.Hosts {\n\t\t\t\t\tr.Hosts[i].region = r.Name\n\t\t\t\t\troutes[h.sum()] = &r.Hosts[i]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc init() {\n\tflag.Parse()\n\tlisten = fmt.Sprintf(\":%d\", *port)\n\n\terr := readConfig(*conf, &config)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlinkRoutes(&config)\n}\n\nfunc mux() http.Handler {\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(pingPath, ping)\n\tmux.HandleFunc(errPath, err)\n\tmux.HandleFunc(routePath, postOnly(route))\n\tmux.Handle(proxyPath, &httputil.ReverseProxy{Director: proxy})\n\treturn mux\n}\n\nfunc main() {\n\tlog.Println(\"listening on\", listen)\n\tlog.Print(http.ListenAndServe(listen, mux()))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"code.google.com\/p\/go.net\/html\/charset\"\n\t\"compress\/gzip\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar disableGzip = flag.Bool(\"disable-gzip\", false, \"Don't compress HTTP responses with gzip.\")\n\ntype proxyHandler struct {\n\t\/\/ TLS is whether this is an HTTPS connection.\n\tTLS bool\n\n\t\/\/ connectPort is the server port that was specified in a CONNECT request.\n\tconnectPort string\n\n\t\/\/ user is a user that has already been authenticated.\n\tuser string\n\n\t\/\/ rt is the RoundTripper that will be used to fulfill the requests.\n\t\/\/ If it is nil, a default Transport will be used.\n\trt http.RoundTripper\n}\n\n\/\/ lanAddress returns whether addr is in one of the LAN address ranges.\nfunc lanAddress(addr string) bool {\n\tip := net.ParseIP(addr)\n\tif ip4 := ip.To4(); ip4 != nil {\n\t\tswitch ip4[0] {\n\t\tcase 10, 127:\n\t\t\treturn true\n\t\tcase 172:\n\t\t\treturn ip4[1]&0xf0 == 16\n\t\tcase 192:\n\t\t\treturn ip4[1] == 168\n\t\t}\n\t\treturn false\n\t}\n\n\tif ip[0]&0xfe == 0xfc {\n\t\treturn true\n\t}\n\tif ip[0] == 0xfe && (ip[1]&0xfc) == 0x80 {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (h proxyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tactiveConnections.Add(1)\n\tdefer activeConnections.Done()\n\n\tif len(r.URL.String()) > 10000 {\n\t\thttp.Error(w, \"URL too long\", http.StatusRequestURITooLong)\n\t\treturn\n\t}\n\n\tclient := r.RemoteAddr\n\thost, _, err := net.SplitHostPort(client)\n\tif err == nil {\n\t\tclient = host\n\t}\n\tuser := client\n\n\tif h.user != \"\" {\n\t\tuser = h.user\n\t} else if !*authNever && !h.TLS && (*authAlways || !lanAddress(client)) {\n\t\tu := authenticate(w, r)\n\t\tif u == \"\" {\n\t\t\treturn\n\t\t}\n\t\tuser = u\n\t}\n\n\tif r.Host == localServer {\n\t\thttp.DefaultServeMux.ServeHTTP(w, r)\n\t\treturn\n\t}\n\n\tif r.Method == \"CONNECT\" {\n\t\tif !tlsReady {\n\t\t\tsc := scorecard{\n\t\t\t\ttally: URLRules.MatchingRules(r.URL),\n\t\t\t}\n\t\t\tsc.calculate(user)\n\t\t\tif sc.action == BLOCK {\n\t\t\t\tshowBlockPage(w, r, &sc, user)\n\t\t\t\tlogAccess(r, nil, sc, \"\", 0, false, user)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tconn, err := newHijackedConn(w)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(conn, \"HTTP\/1.1 500 Internal Server Error\")\n\t\t\tfmt.Fprintln(conn)\n\t\t\tfmt.Fprintln(conn, err)\n\t\t\tconn.Close()\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(conn, \"HTTP\/1.1 200 Connection Established\\r\\n\\r\\n\")\n\t\tif tlsReady {\n\t\t\tSSLBump(conn, r.URL.Host, user)\n\t\t} else {\n\t\t\tconnectDirect(conn, r.URL.Host, nil)\n\t\t}\n\t\treturn\n\t}\n\n\tif r.Header.Get(\"Upgrade\") == \"websocket\" {\n\t\th.makeWebsocketConnection(w, r)\n\t\treturn\n\t}\n\n\tr.Header.Add(\"Via\", r.Proto+\" Redwood\")\n\tr.Header.Add(\"X-Forwarded-For\", client)\n\n\tgzipOK := !*disableGzip && strings.Contains(r.Header.Get(\"Accept-Encoding\"), \"gzip\")\n\tr.Header.Del(\"Accept-Encoding\")\n\n\t\/\/ Reconstruct the URL if it is incomplete (i.e. on a transparent proxy).\n\tif r.URL.Host == \"\" {\n\t\tr.URL.Host = r.Host\n\t}\n\tif r.URL.Scheme == \"\" {\n\t\tif h.TLS {\n\t\t\tr.URL.Scheme = \"https\"\n\t\t} else {\n\t\t\tr.URL.Scheme = \"http\"\n\t\t}\n\t}\n\n\tsc := scorecard{\n\t\ttally: URLRules.MatchingRules(r.URL),\n\t}\n\tsc.calculate(user)\n\tif sc.action == BLOCK {\n\t\tshowBlockPage(w, r, &sc, user)\n\t\tlogAccess(r, nil, sc, \"\", 0, false, user)\n\t\treturn\n\t}\n\n\tchangeQuery(r.URL)\n\n\tvar resp *http.Response\n\tif h.rt == nil {\n\t\tresp, err = transport.RoundTrip(r)\n\t} else {\n\t\tresp, err = h.rt.RoundTrip(r)\n\t}\n\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusServiceUnavailable)\n\t\tlog.Printf(\"error fetching %s: %s\", r.URL, err)\n\t\tlogAccess(r, nil, sc, \"\", 0, false, user)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tcontentType, action := checkContentType(resp)\n\n\tswitch action {\n\tcase BLOCK:\n\t\tsc.action = BLOCK\n\t\tsc.blocked = []string{\"blocked-mime\"}\n\t\tshowBlockPage(w, r, &sc, user)\n\t\tlogAccess(r, resp, sc, contentType, 0, false, user)\n\t\treturn\n\n\tcase ALLOW:\n\t\tsc.action = IGNORE\n\t\tcopyResponseHeader(w, resp)\n\t\tio.Copy(w, resp.Body)\n\t\tlogAccess(r, resp, sc, contentType, 0, false, user)\n\t\treturn\n\t}\n\n\tlr := &io.LimitedReader{\n\t\tR: resp.Body,\n\t\tN: 1e7,\n\t}\n\tcontent, err := ioutil.ReadAll(lr)\n\tif err != nil {\n\t\tlog.Printf(\"error while reading response body (URL: %s): %s\", r.URL, err)\n\t}\n\tif lr.N == 0 {\n\t\tlog.Println(\"response body too long to filter:\", r.URL)\n\t\tcopyResponseHeader(w, resp)\n\t\tw.Write(content)\n\t\tn, err := io.Copy(w, resp.Body)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error while copying response (URL: %s): %s\", r.URL, err)\n\t\t}\n\t\tsc.action = IGNORE\n\t\tlogAccess(r, resp, sc, contentType, int(n)+len(content), false, user)\n\t\treturn\n\t}\n\n\tmodified := false\n\t_, cs, _ := charset.DetermineEncoding(content, resp.Header.Get(\"Content-Type\"))\n\tif strings.Contains(contentType, \"html\") {\n\t\tmodified = pruneContent(r.URL, &content, cs)\n\t\tif modified {\n\t\t\tresp.Header.Set(\"Content-Type\", \"text\/html; charset=utf-8\")\n\t\t\tcs = \"utf-8\"\n\t\t}\n\t}\n\n\tscanContent(content, contentType, cs, sc.tally)\n\tsc.calculate(user)\n\n\tif sc.action == BLOCK {\n\t\tshowBlockPage(w, r, &sc, user)\n\t\tlogAccess(r, resp, sc, contentType, len(content), modified, user)\n\t\treturn\n\t}\n\n\tif gzipOK && len(content) > 1000 {\n\t\tresp.Header.Set(\"Content-Encoding\", \"gzip\")\n\t\tcopyResponseHeader(w, resp)\n\t\tgzw := gzip.NewWriter(w)\n\t\tgzw.Write(content)\n\t\tgzw.Close()\n\t} else {\n\t\tcopyResponseHeader(w, resp)\n\t\tw.Write(content)\n\t}\n\n\tlogAccess(r, resp, sc, contentType, len(content), modified, user)\n}\n\n\/\/ copyResponseHeader writes resp's header and status code to w.\nfunc copyResponseHeader(w http.ResponseWriter, resp *http.Response) {\n\tnewHeader := w.Header()\n\tfor key, values := range resp.Header {\n\t\tfor _, v := range values {\n\t\t\tnewHeader.Add(key, v)\n\t\t}\n\t}\n\n\tw.WriteHeader(resp.StatusCode)\n}\n\n\/\/ A hijackedConn is a connection that has been hijacked (to fulfill a CONNECT\n\/\/ request).\ntype hijackedConn struct {\n\tnet.Conn\n\tio.Reader\n}\n\nfunc (hc *hijackedConn) Read(b []byte) (int, error) {\n\treturn hc.Reader.Read(b)\n}\n\nfunc newHijackedConn(w http.ResponseWriter) (*hijackedConn, error) {\n\thj, ok := w.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, errors.New(\"connection doesn't support hijacking\")\n\t}\n\tconn, bufrw, err := hj.Hijack()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = bufrw.Flush()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &hijackedConn{\n\t\tConn: conn,\n\t\tReader: bufrw.Reader,\n\t}, nil\n}\n\nvar transport = http.Transport{\n\tTLSClientConfig: unverifiedClientConfig,\n\tProxy: http.ProxyFromEnvironment,\n}\n\n\/\/ This is to deal with the problem of stale keepalive connections, which cause\n\/\/ transport.RoundTrip to return io.EOF.\nfunc init() {\n\tgo func() {\n\t\tfor _ = range time.Tick(10 * time.Second) {\n\t\t\ttransport.CloseIdleConnections()\n\t\t}\n\t}()\n\n\ttransport.RegisterProtocol(\"ftp\", FTPTransport{})\n}\n\nfunc (h proxyHandler) makeWebsocketConnection(w http.ResponseWriter, r *http.Request) {\n\taddr := r.Host\n\tif _, _, err := net.SplitHostPort(addr); err != nil {\n\t\t\/\/ There is no port specified; we need to add it.\n\t\tport := h.connectPort\n\t\tif port == \"\" {\n\t\t\tport = \"80\"\n\t\t}\n\t\taddr = net.JoinHostPort(addr, port)\n\t}\n\tvar err error\n\tvar serverConn net.Conn\n\tif h.TLS {\n\t\tserverConn, err = tls.Dial(\"tcp\", addr, unverifiedClientConfig)\n\t} else {\n\t\tserverConn, err = net.Dial(\"tcp\", addr)\n\t}\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\terr = r.Write(serverConn)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\thj, ok := w.(http.Hijacker)\n\tif !ok {\n\t\thttp.Error(w, \"Couldn't create a websocket connection\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tconn, bufrw, err := hj.Hijack()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tgo func() {\n\t\tio.Copy(conn, serverConn)\n\t\tconn.Close()\n\t}()\n\tio.Copy(serverConn, bufrw)\n\tserverConn.Close()\n\treturn\n}\n<commit_msg>If the response from the server doesn't include a Content-Type, don't add one.<commit_after>package main\n\nimport (\n\t\"code.google.com\/p\/go.net\/html\/charset\"\n\t\"compress\/gzip\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar disableGzip = flag.Bool(\"disable-gzip\", false, \"Don't compress HTTP responses with gzip.\")\n\ntype proxyHandler struct {\n\t\/\/ TLS is whether this is an HTTPS connection.\n\tTLS bool\n\n\t\/\/ connectPort is the server port that was specified in a CONNECT request.\n\tconnectPort string\n\n\t\/\/ user is a user that has already been authenticated.\n\tuser string\n\n\t\/\/ rt is the RoundTripper that will be used to fulfill the requests.\n\t\/\/ If it is nil, a default Transport will be used.\n\trt http.RoundTripper\n}\n\n\/\/ lanAddress returns whether addr is in one of the LAN address ranges.\nfunc lanAddress(addr string) bool {\n\tip := net.ParseIP(addr)\n\tif ip4 := ip.To4(); ip4 != nil {\n\t\tswitch ip4[0] {\n\t\tcase 10, 127:\n\t\t\treturn true\n\t\tcase 172:\n\t\t\treturn ip4[1]&0xf0 == 16\n\t\tcase 192:\n\t\t\treturn ip4[1] == 168\n\t\t}\n\t\treturn false\n\t}\n\n\tif ip[0]&0xfe == 0xfc {\n\t\treturn true\n\t}\n\tif ip[0] == 0xfe && (ip[1]&0xfc) == 0x80 {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (h proxyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tactiveConnections.Add(1)\n\tdefer activeConnections.Done()\n\n\tif len(r.URL.String()) > 10000 {\n\t\thttp.Error(w, \"URL too long\", http.StatusRequestURITooLong)\n\t\treturn\n\t}\n\n\tclient := r.RemoteAddr\n\thost, _, err := net.SplitHostPort(client)\n\tif err == nil {\n\t\tclient = host\n\t}\n\tuser := client\n\n\tif h.user != \"\" {\n\t\tuser = h.user\n\t} else if !*authNever && !h.TLS && (*authAlways || !lanAddress(client)) {\n\t\tu := authenticate(w, r)\n\t\tif u == \"\" {\n\t\t\treturn\n\t\t}\n\t\tuser = u\n\t}\n\n\tif r.Host == localServer {\n\t\thttp.DefaultServeMux.ServeHTTP(w, r)\n\t\treturn\n\t}\n\n\tif r.Method == \"CONNECT\" {\n\t\tif !tlsReady {\n\t\t\tsc := scorecard{\n\t\t\t\ttally: URLRules.MatchingRules(r.URL),\n\t\t\t}\n\t\t\tsc.calculate(user)\n\t\t\tif sc.action == BLOCK {\n\t\t\t\tshowBlockPage(w, r, &sc, user)\n\t\t\t\tlogAccess(r, nil, sc, \"\", 0, false, user)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tconn, err := newHijackedConn(w)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(conn, \"HTTP\/1.1 500 Internal Server Error\")\n\t\t\tfmt.Fprintln(conn)\n\t\t\tfmt.Fprintln(conn, err)\n\t\t\tconn.Close()\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(conn, \"HTTP\/1.1 200 Connection Established\\r\\n\\r\\n\")\n\t\tif tlsReady {\n\t\t\tSSLBump(conn, r.URL.Host, user)\n\t\t} else {\n\t\t\tconnectDirect(conn, r.URL.Host, nil)\n\t\t}\n\t\treturn\n\t}\n\n\tif r.Header.Get(\"Upgrade\") == \"websocket\" {\n\t\th.makeWebsocketConnection(w, r)\n\t\treturn\n\t}\n\n\tr.Header.Add(\"Via\", r.Proto+\" Redwood\")\n\tr.Header.Add(\"X-Forwarded-For\", client)\n\n\tgzipOK := !*disableGzip && strings.Contains(r.Header.Get(\"Accept-Encoding\"), \"gzip\")\n\tr.Header.Del(\"Accept-Encoding\")\n\n\t\/\/ Reconstruct the URL if it is incomplete (i.e. on a transparent proxy).\n\tif r.URL.Host == \"\" {\n\t\tr.URL.Host = r.Host\n\t}\n\tif r.URL.Scheme == \"\" {\n\t\tif h.TLS {\n\t\t\tr.URL.Scheme = \"https\"\n\t\t} else {\n\t\t\tr.URL.Scheme = \"http\"\n\t\t}\n\t}\n\n\tsc := scorecard{\n\t\ttally: URLRules.MatchingRules(r.URL),\n\t}\n\tsc.calculate(user)\n\tif sc.action == BLOCK {\n\t\tshowBlockPage(w, r, &sc, user)\n\t\tlogAccess(r, nil, sc, \"\", 0, false, user)\n\t\treturn\n\t}\n\n\tchangeQuery(r.URL)\n\n\tvar resp *http.Response\n\tif h.rt == nil {\n\t\tresp, err = transport.RoundTrip(r)\n\t} else {\n\t\tresp, err = h.rt.RoundTrip(r)\n\t}\n\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusServiceUnavailable)\n\t\tlog.Printf(\"error fetching %s: %s\", r.URL, err)\n\t\tlogAccess(r, nil, sc, \"\", 0, false, user)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tcontentType, action := checkContentType(resp)\n\n\tswitch action {\n\tcase BLOCK:\n\t\tsc.action = BLOCK\n\t\tsc.blocked = []string{\"blocked-mime\"}\n\t\tshowBlockPage(w, r, &sc, user)\n\t\tlogAccess(r, resp, sc, contentType, 0, false, user)\n\t\treturn\n\n\tcase ALLOW:\n\t\tsc.action = IGNORE\n\t\tcopyResponseHeader(w, resp)\n\t\tio.Copy(w, resp.Body)\n\t\tlogAccess(r, resp, sc, contentType, 0, false, user)\n\t\treturn\n\t}\n\n\tlr := &io.LimitedReader{\n\t\tR: resp.Body,\n\t\tN: 1e7,\n\t}\n\tcontent, err := ioutil.ReadAll(lr)\n\tif err != nil {\n\t\tlog.Printf(\"error while reading response body (URL: %s): %s\", r.URL, err)\n\t}\n\tif lr.N == 0 {\n\t\tlog.Println(\"response body too long to filter:\", r.URL)\n\t\tcopyResponseHeader(w, resp)\n\t\tw.Write(content)\n\t\tn, err := io.Copy(w, resp.Body)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error while copying response (URL: %s): %s\", r.URL, err)\n\t\t}\n\t\tsc.action = IGNORE\n\t\tlogAccess(r, resp, sc, contentType, int(n)+len(content), false, user)\n\t\treturn\n\t}\n\n\tmodified := false\n\t_, cs, _ := charset.DetermineEncoding(content, resp.Header.Get(\"Content-Type\"))\n\tif strings.Contains(contentType, \"html\") {\n\t\tmodified = pruneContent(r.URL, &content, cs)\n\t\tif modified {\n\t\t\tresp.Header.Set(\"Content-Type\", \"text\/html; charset=utf-8\")\n\t\t\tcs = \"utf-8\"\n\t\t}\n\t}\n\n\tscanContent(content, contentType, cs, sc.tally)\n\tsc.calculate(user)\n\n\tif sc.action == BLOCK {\n\t\tshowBlockPage(w, r, &sc, user)\n\t\tlogAccess(r, resp, sc, contentType, len(content), modified, user)\n\t\treturn\n\t}\n\n\tif resp.Header.Get(\"Content-Type\") == \"\" {\n\t\t\/\/ If the server didn't specify a content type, don't let the http\n\t\t\/\/ package add one by doing MIME sniffing.\n\t\tresp.Header.Set(\"Content-Type\", \"\")\n\t}\n\n\tif gzipOK && len(content) > 1000 {\n\t\tresp.Header.Set(\"Content-Encoding\", \"gzip\")\n\t\tresp.Header.Del(\"Content-Length\")\n\t\tcopyResponseHeader(w, resp)\n\t\tgzw := gzip.NewWriter(w)\n\t\tgzw.Write(content)\n\t\tgzw.Close()\n\t} else {\n\t\tcopyResponseHeader(w, resp)\n\t\tw.Write(content)\n\t}\n\n\tlogAccess(r, resp, sc, contentType, len(content), modified, user)\n}\n\n\/\/ copyResponseHeader writes resp's header and status code to w.\nfunc copyResponseHeader(w http.ResponseWriter, resp *http.Response) {\n\tnewHeader := w.Header()\n\tfor key, values := range resp.Header {\n\t\tfor _, v := range values {\n\t\t\tnewHeader.Add(key, v)\n\t\t}\n\t}\n\n\tw.WriteHeader(resp.StatusCode)\n}\n\n\/\/ A hijackedConn is a connection that has been hijacked (to fulfill a CONNECT\n\/\/ request).\ntype hijackedConn struct {\n\tnet.Conn\n\tio.Reader\n}\n\nfunc (hc *hijackedConn) Read(b []byte) (int, error) {\n\treturn hc.Reader.Read(b)\n}\n\nfunc newHijackedConn(w http.ResponseWriter) (*hijackedConn, error) {\n\thj, ok := w.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, errors.New(\"connection doesn't support hijacking\")\n\t}\n\tconn, bufrw, err := hj.Hijack()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = bufrw.Flush()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &hijackedConn{\n\t\tConn: conn,\n\t\tReader: bufrw.Reader,\n\t}, nil\n}\n\nvar transport = http.Transport{\n\tTLSClientConfig: unverifiedClientConfig,\n\tProxy: http.ProxyFromEnvironment,\n}\n\n\/\/ This is to deal with the problem of stale keepalive connections, which cause\n\/\/ transport.RoundTrip to return io.EOF.\nfunc init() {\n\tgo func() {\n\t\tfor _ = range time.Tick(10 * time.Second) {\n\t\t\ttransport.CloseIdleConnections()\n\t\t}\n\t}()\n\n\ttransport.RegisterProtocol(\"ftp\", FTPTransport{})\n}\n\nfunc (h proxyHandler) makeWebsocketConnection(w http.ResponseWriter, r *http.Request) {\n\taddr := r.Host\n\tif _, _, err := net.SplitHostPort(addr); err != nil {\n\t\t\/\/ There is no port specified; we need to add it.\n\t\tport := h.connectPort\n\t\tif port == \"\" {\n\t\t\tport = \"80\"\n\t\t}\n\t\taddr = net.JoinHostPort(addr, port)\n\t}\n\tvar err error\n\tvar serverConn net.Conn\n\tif h.TLS {\n\t\tserverConn, err = tls.Dial(\"tcp\", addr, unverifiedClientConfig)\n\t} else {\n\t\tserverConn, err = net.Dial(\"tcp\", addr)\n\t}\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\terr = r.Write(serverConn)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\thj, ok := w.(http.Hijacker)\n\tif !ok {\n\t\thttp.Error(w, \"Couldn't create a websocket connection\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tconn, bufrw, err := hj.Hijack()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tgo func() {\n\t\tio.Copy(conn, serverConn)\n\t\tconn.Close()\n\t}()\n\tio.Copy(serverConn, bufrw)\n\tserverConn.Close()\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package shell\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/cluster\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/filer_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/master_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\/grace\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/peterh\/liner\"\n)\n\nvar (\n\tline *liner.State\n\thistoryPath = path.Join(os.TempDir(), \"weed-shell\")\n)\n\nfunc RunShell(options ShellOptions) {\n\n\tsort.Slice(Commands, func(i, j int) bool {\n\t\treturn strings.Compare(Commands[i].Name(), Commands[j].Name()) < 0\n\t})\n\n\tline = liner.NewLiner()\n\tdefer line.Close()\n\tgrace.OnInterrupt(func() {\n\t\tline.Close()\n\t})\n\n\tline.SetCtrlCAborts(true)\n\tline.SetTabCompletionStyle(liner.TabPrints)\n\n\tsetCompletionHandler()\n\tloadHistory()\n\n\tdefer saveHistory()\n\n\treg, _ := regexp.Compile(`'.*?'|\".*?\"|\\S+`)\n\n\tcommandEnv := NewCommandEnv(options)\n\n\tgo commandEnv.MasterClient.KeepConnectedToMaster()\n\tcommandEnv.MasterClient.WaitUntilConnected()\n\n\tif commandEnv.option.FilerAddress == \"\" {\n\t\tvar filers []pb.ServerAddress\n\t\tcommandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error {\n\t\t\tresp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{\n\t\t\t\tClientType: cluster.FilerType,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor _, clusterNode := range resp.ClusterNodes {\n\t\t\t\tfilers = append(filers, pb.ServerAddress(clusterNode.Address))\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tfmt.Printf(\"master: %s \", *options.Masters)\n\t\tif len(filers) > 0 {\n\t\t\tfmt.Printf(\"filers: %v\", filers)\n\t\t\tcommandEnv.option.FilerAddress = filers[rand.Intn(len(filers))]\n\t\t}\n\t\tfmt.Println()\n\t}\n\n\tif commandEnv.option.FilerAddress != \"\" {\n\t\tcommandEnv.WithFilerClient(false, func(filerClient filer_pb.SeaweedFilerClient) error {\n\t\t\tresp, err := filerClient.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif resp.ClusterId != \"\" {\n\t\t\t\tfmt.Printf(`\n---\nFree Monitoring Data URL:\nhttps:\/\/cloud.seaweedfs.com\/ui\/%s\n---\n`, resp.ClusterId)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tfor {\n\t\tcmd, err := line.Prompt(\"> \")\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tfmt.Printf(\"%v\\n\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tfor _, c := range strings.Split(cmd, \";\") {\n\t\t\tif processEachCmd(reg, c, commandEnv) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc processEachCmd(reg *regexp.Regexp, cmd string, commandEnv *CommandEnv) bool {\n\tcmds := reg.FindAllString(cmd, -1)\n\n\tline.AppendHistory(cmd)\n\n\tif len(cmds) == 0 {\n\t\treturn false\n\t} else {\n\n\t\targs := make([]string, len(cmds[1:]))\n\n\t\tfor i := range args {\n\t\t\targs[i] = strings.Trim(string(cmds[1+i]), \"\\\"'\")\n\t\t}\n\n\t\tcmd := cmds[0]\n\t\tif cmd == \"help\" || cmd == \"?\" {\n\t\t\tprintHelp(cmds)\n\t\t} else if cmd == \"exit\" || cmd == \"quit\" {\n\t\t\treturn true\n\t\t} else {\n\t\t\tfoundCommand := false\n\t\t\tfor _, c := range Commands {\n\t\t\t\tif c.Name() == cmd || c.Name() == \"fs.\"+cmd {\n\t\t\t\t\tif err := c.Do(args, commandEnv, os.Stdout); err != nil {\n\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"error: %v\\n\", err)\n\t\t\t\t\t}\n\t\t\t\t\tfoundCommand = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !foundCommand {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"unknown command: %v\\n\", cmd)\n\t\t\t}\n\t\t}\n\n\t}\n\treturn false\n}\n\nfunc printGenericHelp() {\n\tmsg :=\n\t\t`Type:\t\"help <command>\" for help on <command>. Most commands support \"<command> -h\" also for options. \n`\n\tfmt.Print(msg)\n\n\tfor _, c := range Commands {\n\t\thelpTexts := strings.SplitN(c.Help(), \"\\n\", 2)\n\t\tfmt.Printf(\" %-30s\\t# %s \\n\", c.Name(), helpTexts[0])\n\t}\n}\n\nfunc printHelp(cmds []string) {\n\targs := cmds[1:]\n\tif len(args) == 0 {\n\t\tprintGenericHelp()\n\t} else if len(args) > 1 {\n\t\tfmt.Println()\n\t} else {\n\t\tcmd := strings.ToLower(args[0])\n\n\t\tfor _, c := range Commands {\n\t\t\tif c.Name() == cmd {\n\t\t\t\tfmt.Printf(\" %s\\t# %s\\n\", c.Name(), c.Help())\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc setCompletionHandler() {\n\tline.SetCompleter(func(line string) (c []string) {\n\t\tfor _, i := range Commands {\n\t\t\tif strings.HasPrefix(i.Name(), strings.ToLower(line)) {\n\t\t\t\tc = append(c, i.Name())\n\t\t\t}\n\t\t}\n\t\treturn\n\t})\n}\n\nfunc loadHistory() {\n\tif f, err := os.Open(historyPath); err == nil {\n\t\tline.ReadHistory(f)\n\t\tf.Close()\n\t}\n}\n\nfunc saveHistory() {\n\tif f, err := os.Create(historyPath); err != nil {\n\t\tfmt.Printf(\"Error creating history file: %v\\n\", err)\n\t} else {\n\t\tif _, err = line.WriteHistory(f); err != nil {\n\t\t\tfmt.Printf(\"Error writing history file: %v\\n\", err)\n\t\t}\n\t\tf.Close()\n\t}\n}\n<commit_msg>fix compilation<commit_after>package shell\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/cluster\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/filer_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/master_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\/grace\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/peterh\/liner\"\n)\n\nvar (\n\tline *liner.State\n\thistoryPath = path.Join(os.TempDir(), \"weed-shell\")\n)\n\nfunc RunShell(options ShellOptions) {\n\n\tsort.Slice(Commands, func(i, j int) bool {\n\t\treturn strings.Compare(Commands[i].Name(), Commands[j].Name()) < 0\n\t})\n\n\tline = liner.NewLiner()\n\tdefer line.Close()\n\tgrace.OnInterrupt(func() {\n\t\tline.Close()\n\t})\n\n\tline.SetCtrlCAborts(true)\n\tline.SetTabCompletionStyle(liner.TabPrints)\n\n\tsetCompletionHandler()\n\tloadHistory()\n\n\tdefer saveHistory()\n\n\treg, _ := regexp.Compile(`'.*?'|\".*?\"|\\S+`)\n\n\tcommandEnv := NewCommandEnv(&options)\n\n\tgo commandEnv.MasterClient.KeepConnectedToMaster()\n\tcommandEnv.MasterClient.WaitUntilConnected()\n\n\tif commandEnv.option.FilerAddress == \"\" {\n\t\tvar filers []pb.ServerAddress\n\t\tcommandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error {\n\t\t\tresp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{\n\t\t\t\tClientType: cluster.FilerType,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor _, clusterNode := range resp.ClusterNodes {\n\t\t\t\tfilers = append(filers, pb.ServerAddress(clusterNode.Address))\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tfmt.Printf(\"master: %s \", *options.Masters)\n\t\tif len(filers) > 0 {\n\t\t\tfmt.Printf(\"filers: %v\", filers)\n\t\t\tcommandEnv.option.FilerAddress = filers[rand.Intn(len(filers))]\n\t\t}\n\t\tfmt.Println()\n\t}\n\n\tif commandEnv.option.FilerAddress != \"\" {\n\t\tcommandEnv.WithFilerClient(false, func(filerClient filer_pb.SeaweedFilerClient) error {\n\t\t\tresp, err := filerClient.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif resp.ClusterId != \"\" {\n\t\t\t\tfmt.Printf(`\n---\nFree Monitoring Data URL:\nhttps:\/\/cloud.seaweedfs.com\/ui\/%s\n---\n`, resp.ClusterId)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tfor {\n\t\tcmd, err := line.Prompt(\"> \")\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tfmt.Printf(\"%v\\n\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tfor _, c := range strings.Split(cmd, \";\") {\n\t\t\tif processEachCmd(reg, c, commandEnv) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc processEachCmd(reg *regexp.Regexp, cmd string, commandEnv *CommandEnv) bool {\n\tcmds := reg.FindAllString(cmd, -1)\n\n\tline.AppendHistory(cmd)\n\n\tif len(cmds) == 0 {\n\t\treturn false\n\t} else {\n\n\t\targs := make([]string, len(cmds[1:]))\n\n\t\tfor i := range args {\n\t\t\targs[i] = strings.Trim(string(cmds[1+i]), \"\\\"'\")\n\t\t}\n\n\t\tcmd := cmds[0]\n\t\tif cmd == \"help\" || cmd == \"?\" {\n\t\t\tprintHelp(cmds)\n\t\t} else if cmd == \"exit\" || cmd == \"quit\" {\n\t\t\treturn true\n\t\t} else {\n\t\t\tfoundCommand := false\n\t\t\tfor _, c := range Commands {\n\t\t\t\tif c.Name() == cmd || c.Name() == \"fs.\"+cmd {\n\t\t\t\t\tif err := c.Do(args, commandEnv, os.Stdout); err != nil {\n\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"error: %v\\n\", err)\n\t\t\t\t\t}\n\t\t\t\t\tfoundCommand = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !foundCommand {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"unknown command: %v\\n\", cmd)\n\t\t\t}\n\t\t}\n\n\t}\n\treturn false\n}\n\nfunc printGenericHelp() {\n\tmsg :=\n\t\t`Type:\t\"help <command>\" for help on <command>. Most commands support \"<command> -h\" also for options. \n`\n\tfmt.Print(msg)\n\n\tfor _, c := range Commands {\n\t\thelpTexts := strings.SplitN(c.Help(), \"\\n\", 2)\n\t\tfmt.Printf(\" %-30s\\t# %s \\n\", c.Name(), helpTexts[0])\n\t}\n}\n\nfunc printHelp(cmds []string) {\n\targs := cmds[1:]\n\tif len(args) == 0 {\n\t\tprintGenericHelp()\n\t} else if len(args) > 1 {\n\t\tfmt.Println()\n\t} else {\n\t\tcmd := strings.ToLower(args[0])\n\n\t\tfor _, c := range Commands {\n\t\t\tif c.Name() == cmd {\n\t\t\t\tfmt.Printf(\" %s\\t# %s\\n\", c.Name(), c.Help())\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc setCompletionHandler() {\n\tline.SetCompleter(func(line string) (c []string) {\n\t\tfor _, i := range Commands {\n\t\t\tif strings.HasPrefix(i.Name(), strings.ToLower(line)) {\n\t\t\t\tc = append(c, i.Name())\n\t\t\t}\n\t\t}\n\t\treturn\n\t})\n}\n\nfunc loadHistory() {\n\tif f, err := os.Open(historyPath); err == nil {\n\t\tline.ReadHistory(f)\n\t\tf.Close()\n\t}\n}\n\nfunc saveHistory() {\n\tif f, err := os.Create(historyPath); err != nil {\n\t\tfmt.Printf(\"Error creating history file: %v\\n\", err)\n\t} else {\n\t\tif _, err = line.WriteHistory(f); err != nil {\n\t\t\tfmt.Printf(\"Error writing history file: %v\\n\", err)\n\t\t}\n\t\tf.Close()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package xsdgen_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\t\"aqwari.net\/xml\/xsdgen\"\n)\n\nfunc tmpfile() *os.File {\n\tf, err := ioutil.TempFile(\"\", \"xsdgen_test\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn f\n}\n\nfunc xsdfile(s string) (filename string) {\n\tfile := tmpfile()\n\tdefer file.Close()\n\tfmt.Fprintf(file, `\n\t\t<schema xmlns=\"http:\/\/www.w3.org\/2001\/XMLSchema\"\n\t\t xmlns:tns=\"http:\/\/www.example.com\/\"\n\t\t xmlns:xs=\"http:\/\/www.w3.org\/2001\/XMLSchema\"\n\t\t xmlns:soapenc=\"http:\/\/schemas.xmlsoap.org\/soap\/encoding\/\"\n\t\t xmlns:wsdl=\"http:\/\/schemas.xmlsoap.org\/wsdl\/\"\n\t\t targetNamespace=\"http:\/\/www.example.com\/\">\n\t\t %s\n\t\t<\/schema>\n\t`, s)\n\treturn file.Name()\n}\n\nfunc ExampleConfig_GenCLI() {\n\tvar cfg xsdgen.Config\n\tcfg.Option(\n\t\txsdgen.IgnoreAttributes(\"id\", \"href\", \"offset\"),\n\t\txsdgen.IgnoreElements(\"comment\"),\n\t\txsdgen.PackageName(\"webapi\"),\n\t\txsdgen.Replace(\"_\", \"\"),\n\t\txsdgen.HandleSOAPArrayType(),\n\t\txsdgen.SOAPArrayAsSlice(),\n\t)\n\tif err := cfg.GenCLI(\"webapi.xsd\", \"deps\/soap11.xsd\"); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc ExampleLogOutput() {\n\tvar cfg xsdgen.Config\n\tcfg.Option(\n\t\txsdgen.LogOutput(log.New(os.Stderr, \"\", 0)),\n\t\txsdgen.LogLevel(2))\n\tif err := cfg.GenCLI(\"file.wsdl\"); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc ExampleIgnoreAttributes() {\n\tdoc := xsdfile(`\n\t <complexType name=\"ArrayOfString\">\n\t <any maxOccurs=\"unbounded\" \/>\n\t <attribute name=\"soapenc:arrayType\" type=\"xs:string\" \/>\n\t <\/complexType>\n\t`)\n\tvar cfg xsdgen.Config\n\tcfg.Option(xsdgen.IgnoreAttributes(\"arrayType\"))\n\n\tout, err := cfg.GenSource(doc)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\\n\", out)\n\n\t\/\/ Output: package ws\n\t\/\/\n\t\/\/ type ArrayOfString struct {\n\t\/\/ \tItems []string `xml:\",any\"`\n\t\/\/ }\n}\n\nfunc ExampleIgnoreElements() {\n\tdoc := xsdfile(`\n\t <complexType name=\"Person\">\n\t <sequence>\n\t <element name=\"name\" type=\"xs:string\" \/>\n\t <element name=\"deceased\" type=\"soapenc:boolean\" \/>\n\t <element name=\"private\" type=\"xs:int\" \/>\n\t <\/sequence>\n\t <\/complexType>\n\t`)\n\tvar cfg xsdgen.Config\n\tcfg.Option(\n\t\txsdgen.IgnoreElements(\"private\"),\n\t\txsdgen.IgnoreAttributes(\"id\", \"href\"))\n\n\tout, err := cfg.GenSource(doc)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\\n\", out)\n\n\t\/\/ Output: package ws\n\t\/\/\n\t\/\/ type Person struct {\n\t\/\/ \tName string `xml:\"http:\/\/www.example.com\/ name\"`\n\t\/\/ \tDeceased bool `xml:\"http:\/\/www.example.com\/ deceased\"`\n\t\/\/ }\n}\n\nfunc ExamplePackageName() {\n\tdoc := xsdfile(`\n\t <simpleType name=\"zipcode\">\n\t <restriction base=\"xs:string\">\n\t <length value=\"10\" \/>\n\t <\/restriction>\n\t <\/simpleType>\n\t`)\n\tvar cfg xsdgen.Config\n\tcfg.Option(xsdgen.PackageName(\"postal\"))\n\n\tout, err := cfg.GenSource(doc)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\\n\", out)\n\n\t\/\/ Output: package postal\n\t\/\/\n\t\/\/ \/\/ May be no more than 10 items long\n\t\/\/ type Zipcode string\n}\n\nfunc ExampleReplace() {\n\tdoc := xsdfile(`\n\t <complexType name=\"ArrayOfString\">\n\t <any maxOccurs=\"unbounded\" \/>\n\t <attribute name=\"soapenc:arrayType\" type=\"xs:string\" \/>\n\t <\/complexType>\n\t`)\n\tvar cfg xsdgen.Config\n\tcfg.Option(xsdgen.Replace(\"ArrayOf(.*)\", \"${1}Array\"))\n\n\tout, err := cfg.GenSource(doc)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\\n\", out)\n\n\t\/\/ Output: package ws\n\t\/\/\n\t\/\/ type StringArray struct {\n\t\/\/ \tArrayType string `xml:\"arrayType,attr\"`\n\t\/\/ \tItems []string `xml:\",any\"`\n\t\/\/ }\n}\n\nfunc ExampleHandleSOAPArrayType() {\n\tdoc := xsdfile(`\n\t <complexType name=\"BoolArray\">\n\t <complexContent>\n\t <restriction base=\"soapenc:Array\">\n\t <attribute ref=\"soapenc:arrayType\" wsdl:arrayType=\"xs:boolean[]\"\/>\n\t <\/restriction>\n\t <\/complexContent>\n\t <\/complexType>`)\n\n\tvar cfg xsdgen.Config\n\tcfg.Option(xsdgen.HandleSOAPArrayType())\n\n\tout, err := cfg.GenSource(doc)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\\n\", out)\n\n\t\/\/ Output: package ws\n\t\/\/\n\t\/\/ type BoolArray struct {\n\t\/\/ \tOffset ArrayCoordinate `xml:\"offset,attr\"`\n\t\/\/ \tId string `xml:\"id,attr\"`\n\t\/\/ \tHref string `xml:\"href,attr\"`\n\t\/\/ \tItems []bool `xml:\",any\"`\n\t\/\/ }\n}\n\nfunc ExampleSOAPArrayAsSlice() {\n\tdoc := xsdfile(`\n\t <complexType name=\"BoolArray\">\n\t <complexContent>\n\t <restriction base=\"soapenc:Array\">\n\t <attribute ref=\"soapenc:arrayType\" wsdl:arrayType=\"xs:boolean[]\"\/>\n\t <\/restriction>\n\t <\/complexContent>\n\t <\/complexType>`)\n\n\tvar cfg xsdgen.Config\n\tcfg.Option(\n\t\txsdgen.HandleSOAPArrayType(),\n\t\txsdgen.SOAPArrayAsSlice(),\n\t\txsdgen.LogOutput(log.New(os.Stderr, \"\", 0)),\n\t\txsdgen.LogLevel(3),\n\t\txsdgen.IgnoreAttributes(\"offset\", \"id\", \"href\"))\n\n\tout, err := cfg.GenSource(doc)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\\n\", out)\n\n\t\/\/ Output: package ws\n\t\/\/\n\t\/\/ import \"encoding\/xml\"\n\t\/\/\n\t\/\/ type BoolArray []bool\n\t\/\/\n\t\/\/ func (a *BoolArray) MarshalXML(e *xml.Encoder, start xml.StartElement) error {\n\t\/\/ \ttag := xml.StartElement{Name: xml.Name{\"\", \",any\"}}\n\t\/\/ \tfor _, elt := range *a {\n\t\/\/ \t\tif err := e.EncodeElement(elt, tag); err != nil {\n\t\/\/ \t\t\treturn err\n\t\/\/ \t\t}\n\t\/\/ \t}\n\t\/\/ \treturn nil\n\t\/\/ }\n\t\/\/ func (a *BoolArray) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {\n\t\/\/ \tvar tok xml.Token\n\t\/\/ \tfor tok, err = d.Token(); err == nil; tok, err = d.Token() {\n\t\/\/ \t\tif tok, ok := tok.(xml.StartElement); ok {\n\t\/\/ \t\t\tvar item bool\n\t\/\/ \t\t\tif err = d.DecodeElement(&item, &tok); err == nil {\n\t\/\/ \t\t\t\t*a = append(*a, item)\n\t\/\/ \t\t\t}\n\t\/\/ \t\t}\n\t\/\/ \t\tif _, ok := tok.(xml.EndElement); ok {\n\t\/\/ \t\t\tbreak\n\t\/\/ \t\t}\n\t\/\/ \t}\n\t\/\/ \treturn err\n\t\/\/ }\n}\n\nfunc ExampleUseFieldNames() {\n\tdoc := xsdfile(`\n\t <complexType name=\"library\">\n\t <sequence>\n\t <element name=\"book\" maxOccurs=\"unbounded\">\n\t <complexType>\n\t <all>\n\t <element name=\"title\" type=\"xs:string\" \/>\n\t <element name=\"published\" type=\"xs:date\" \/>\n\t <element name=\"author\" type=\"xs:string\" \/>\n\t <\/all>\n\t <\/complexType>\n\t <\/element>\n\t <\/sequence>\n\t <\/complexType>`)\n\n\tvar cfg xsdgen.Config\n\tcfg.Option(xsdgen.UseFieldNames())\n\n\tout, err := cfg.GenSource(doc)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\\n\", out)\n\n\t\/\/ Output: package ws\n\t\/\/\n\t\/\/ import (\n\t\/\/ \t\"bytes\"\n\t\/\/ \t\"time\"\n\t\/\/ )\n\t\/\/\n\t\/\/ type Book struct {\n\t\/\/ \tTitle string `xml:\"http:\/\/www.example.com\/ title\"`\n\t\/\/ \tPublished xsdDate `xml:\"http:\/\/www.example.com\/ published\"`\n\t\/\/ \tAuthor string `xml:\"http:\/\/www.example.com\/ author\"`\n\t\/\/ }\n\t\/\/ type Library struct {\n\t\/\/ \tBook []Book `xml:\"http:\/\/www.example.com\/ book\"`\n\t\/\/ \tTitle string `xml:\"http:\/\/www.example.com\/ title\"`\n\t\/\/ \tPublished xsdDate `xml:\"http:\/\/www.example.com\/ published\"`\n\t\/\/ \tAuthor string `xml:\"http:\/\/www.example.com\/ author\"`\n\t\/\/ }\n\t\/\/ type xsdDate time.Time\n\t\/\/\n\t\/\/ func (t *xsdDate) UnmarshalText(text []byte) error {\n\t\/\/ \treturn _unmarshalTime(text, (*time.Time)(t), \"2006-01-02\")\n\t\/\/ }\n\t\/\/ func (t *xsdDate) MarshalText() ([]byte, error) {\n\t\/\/ \treturn []byte((*time.Time)(t).Format(\"2006-01-02\")), nil\n\t\/\/ }\n\t\/\/ func _unmarshalTime(text []byte, t *time.Time, format string) (err error) {\n\t\/\/ \ts := string(bytes.TrimSpace(text))\n\t\/\/ \t*t, err = time.Parse(format, s)\n\t\/\/ \tif _, ok := err.(*time.ParseError); ok {\n\t\/\/ \t\t*t, err = time.Parse(format+\"Z07:00\", s)\n\t\/\/ \t}\n\t\/\/ \treturn err\n\t\/\/ }\n}\n<commit_msg>Add missing whitespace to make tests happy<commit_after>package xsdgen_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\t\"aqwari.net\/xml\/xsdgen\"\n)\n\nfunc tmpfile() *os.File {\n\tf, err := ioutil.TempFile(\"\", \"xsdgen_test\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn f\n}\n\nfunc xsdfile(s string) (filename string) {\n\tfile := tmpfile()\n\tdefer file.Close()\n\tfmt.Fprintf(file, `\n\t\t<schema xmlns=\"http:\/\/www.w3.org\/2001\/XMLSchema\"\n\t\t xmlns:tns=\"http:\/\/www.example.com\/\"\n\t\t xmlns:xs=\"http:\/\/www.w3.org\/2001\/XMLSchema\"\n\t\t xmlns:soapenc=\"http:\/\/schemas.xmlsoap.org\/soap\/encoding\/\"\n\t\t xmlns:wsdl=\"http:\/\/schemas.xmlsoap.org\/wsdl\/\"\n\t\t targetNamespace=\"http:\/\/www.example.com\/\">\n\t\t %s\n\t\t<\/schema>\n\t`, s)\n\treturn file.Name()\n}\n\nfunc ExampleConfig_GenCLI() {\n\tvar cfg xsdgen.Config\n\tcfg.Option(\n\t\txsdgen.IgnoreAttributes(\"id\", \"href\", \"offset\"),\n\t\txsdgen.IgnoreElements(\"comment\"),\n\t\txsdgen.PackageName(\"webapi\"),\n\t\txsdgen.Replace(\"_\", \"\"),\n\t\txsdgen.HandleSOAPArrayType(),\n\t\txsdgen.SOAPArrayAsSlice(),\n\t)\n\tif err := cfg.GenCLI(\"webapi.xsd\", \"deps\/soap11.xsd\"); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc ExampleLogOutput() {\n\tvar cfg xsdgen.Config\n\tcfg.Option(\n\t\txsdgen.LogOutput(log.New(os.Stderr, \"\", 0)),\n\t\txsdgen.LogLevel(2))\n\tif err := cfg.GenCLI(\"file.wsdl\"); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc ExampleIgnoreAttributes() {\n\tdoc := xsdfile(`\n\t <complexType name=\"ArrayOfString\">\n\t <any maxOccurs=\"unbounded\" \/>\n\t <attribute name=\"soapenc:arrayType\" type=\"xs:string\" \/>\n\t <\/complexType>\n\t`)\n\tvar cfg xsdgen.Config\n\tcfg.Option(xsdgen.IgnoreAttributes(\"arrayType\"))\n\n\tout, err := cfg.GenSource(doc)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\\n\", out)\n\n\t\/\/ Output: package ws\n\t\/\/\n\t\/\/ type ArrayOfString struct {\n\t\/\/ \tItems []string `xml:\",any\"`\n\t\/\/ }\n}\n\nfunc ExampleIgnoreElements() {\n\tdoc := xsdfile(`\n\t <complexType name=\"Person\">\n\t <sequence>\n\t <element name=\"name\" type=\"xs:string\" \/>\n\t <element name=\"deceased\" type=\"soapenc:boolean\" \/>\n\t <element name=\"private\" type=\"xs:int\" \/>\n\t <\/sequence>\n\t <\/complexType>\n\t`)\n\tvar cfg xsdgen.Config\n\tcfg.Option(\n\t\txsdgen.IgnoreElements(\"private\"),\n\t\txsdgen.IgnoreAttributes(\"id\", \"href\"))\n\n\tout, err := cfg.GenSource(doc)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\\n\", out)\n\n\t\/\/ Output: package ws\n\t\/\/\n\t\/\/ type Person struct {\n\t\/\/ \tName string `xml:\"http:\/\/www.example.com\/ name\"`\n\t\/\/ \tDeceased bool `xml:\"http:\/\/www.example.com\/ deceased\"`\n\t\/\/ }\n}\n\nfunc ExamplePackageName() {\n\tdoc := xsdfile(`\n\t <simpleType name=\"zipcode\">\n\t <restriction base=\"xs:string\">\n\t <length value=\"10\" \/>\n\t <\/restriction>\n\t <\/simpleType>\n\t`)\n\tvar cfg xsdgen.Config\n\tcfg.Option(xsdgen.PackageName(\"postal\"))\n\n\tout, err := cfg.GenSource(doc)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\\n\", out)\n\n\t\/\/ Output: package postal\n\t\/\/\n\t\/\/ \/\/ May be no more than 10 items long\n\t\/\/ type Zipcode string\n}\n\nfunc ExampleReplace() {\n\tdoc := xsdfile(`\n\t <complexType name=\"ArrayOfString\">\n\t <any maxOccurs=\"unbounded\" \/>\n\t <attribute name=\"soapenc:arrayType\" type=\"xs:string\" \/>\n\t <\/complexType>\n\t`)\n\tvar cfg xsdgen.Config\n\tcfg.Option(xsdgen.Replace(\"ArrayOf(.*)\", \"${1}Array\"))\n\n\tout, err := cfg.GenSource(doc)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\\n\", out)\n\n\t\/\/ Output: package ws\n\t\/\/\n\t\/\/ type StringArray struct {\n\t\/\/ \tArrayType string `xml:\"arrayType,attr\"`\n\t\/\/ \tItems []string `xml:\",any\"`\n\t\/\/ }\n}\n\nfunc ExampleHandleSOAPArrayType() {\n\tdoc := xsdfile(`\n\t <complexType name=\"BoolArray\">\n\t <complexContent>\n\t <restriction base=\"soapenc:Array\">\n\t <attribute ref=\"soapenc:arrayType\" wsdl:arrayType=\"xs:boolean[]\"\/>\n\t <\/restriction>\n\t <\/complexContent>\n\t <\/complexType>`)\n\n\tvar cfg xsdgen.Config\n\tcfg.Option(xsdgen.HandleSOAPArrayType())\n\n\tout, err := cfg.GenSource(doc)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\\n\", out)\n\n\t\/\/ Output: package ws\n\t\/\/\n\t\/\/ type BoolArray struct {\n\t\/\/ \tOffset ArrayCoordinate `xml:\"offset,attr\"`\n\t\/\/ \tId string `xml:\"id,attr\"`\n\t\/\/ \tHref string `xml:\"href,attr\"`\n\t\/\/ \tItems []bool `xml:\",any\"`\n\t\/\/ }\n}\n\nfunc ExampleSOAPArrayAsSlice() {\n\tdoc := xsdfile(`\n\t <complexType name=\"BoolArray\">\n\t <complexContent>\n\t <restriction base=\"soapenc:Array\">\n\t <attribute ref=\"soapenc:arrayType\" wsdl:arrayType=\"xs:boolean[]\"\/>\n\t <\/restriction>\n\t <\/complexContent>\n\t <\/complexType>`)\n\n\tvar cfg xsdgen.Config\n\tcfg.Option(\n\t\txsdgen.HandleSOAPArrayType(),\n\t\txsdgen.SOAPArrayAsSlice(),\n\t\txsdgen.LogOutput(log.New(os.Stderr, \"\", 0)),\n\t\txsdgen.LogLevel(3),\n\t\txsdgen.IgnoreAttributes(\"offset\", \"id\", \"href\"))\n\n\tout, err := cfg.GenSource(doc)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\\n\", out)\n\n\t\/\/ Output: package ws\n\t\/\/\n\t\/\/ import \"encoding\/xml\"\n\t\/\/\n\t\/\/ type BoolArray []bool\n\t\/\/\n\t\/\/ func (a *BoolArray) MarshalXML(e *xml.Encoder, start xml.StartElement) error {\n\t\/\/ \ttag := xml.StartElement{Name: xml.Name{\"\", \",any\"}}\n\t\/\/ \tfor _, elt := range *a {\n\t\/\/ \t\tif err := e.EncodeElement(elt, tag); err != nil {\n\t\/\/ \t\t\treturn err\n\t\/\/ \t\t}\n\t\/\/ \t}\n\t\/\/ \treturn nil\n\t\/\/ }\n\t\/\/ func (a *BoolArray) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {\n\t\/\/ \tvar tok xml.Token\n\t\/\/ \tfor tok, err = d.Token(); err == nil; tok, err = d.Token() {\n\t\/\/ \t\tif tok, ok := tok.(xml.StartElement); ok {\n\t\/\/ \t\t\tvar item bool\n\t\/\/ \t\t\tif err = d.DecodeElement(&item, &tok); err == nil {\n\t\/\/ \t\t\t\t*a = append(*a, item)\n\t\/\/ \t\t\t}\n\t\/\/ \t\t}\n\t\/\/ \t\tif _, ok := tok.(xml.EndElement); ok {\n\t\/\/ \t\t\tbreak\n\t\/\/ \t\t}\n\t\/\/ \t}\n\t\/\/ \treturn err\n\t\/\/ }\n}\n\nfunc ExampleUseFieldNames() {\n\tdoc := xsdfile(`\n\t <complexType name=\"library\">\n\t <sequence>\n\t <element name=\"book\" maxOccurs=\"unbounded\">\n\t <complexType>\n\t <all>\n\t <element name=\"title\" type=\"xs:string\" \/>\n\t <element name=\"published\" type=\"xs:date\" \/>\n\t <element name=\"author\" type=\"xs:string\" \/>\n\t <\/all>\n\t <\/complexType>\n\t <\/element>\n\t <\/sequence>\n\t <\/complexType>`)\n\n\tvar cfg xsdgen.Config\n\tcfg.Option(xsdgen.UseFieldNames())\n\n\tout, err := cfg.GenSource(doc)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\\n\", out)\n\n\t\/\/ Output: package ws\n\t\/\/\n\t\/\/ import (\n\t\/\/ \t\"bytes\"\n\t\/\/ \t\"time\"\n\t\/\/ )\n\t\/\/\n\t\/\/ type Book struct {\n\t\/\/ \tTitle string `xml:\"http:\/\/www.example.com\/ title\"`\n\t\/\/ \tPublished xsdDate `xml:\"http:\/\/www.example.com\/ published\"`\n\t\/\/ \tAuthor string `xml:\"http:\/\/www.example.com\/ author\"`\n\t\/\/ }\n\t\/\/\n\t\/\/ type Library struct {\n\t\/\/ \tBook []Book `xml:\"http:\/\/www.example.com\/ book\"`\n\t\/\/ \tTitle string `xml:\"http:\/\/www.example.com\/ title\"`\n\t\/\/ \tPublished xsdDate `xml:\"http:\/\/www.example.com\/ published\"`\n\t\/\/ \tAuthor string `xml:\"http:\/\/www.example.com\/ author\"`\n\t\/\/ }\n\t\/\/\n\t\/\/ type xsdDate time.Time\n\t\/\/\n\t\/\/ func (t *xsdDate) UnmarshalText(text []byte) error {\n\t\/\/ \treturn _unmarshalTime(text, (*time.Time)(t), \"2006-01-02\")\n\t\/\/ }\n\t\/\/ func (t *xsdDate) MarshalText() ([]byte, error) {\n\t\/\/ \treturn []byte((*time.Time)(t).Format(\"2006-01-02\")), nil\n\t\/\/ }\n\t\/\/ func _unmarshalTime(text []byte, t *time.Time, format string) (err error) {\n\t\/\/ \ts := string(bytes.TrimSpace(text))\n\t\/\/ \t*t, err = time.Parse(format, s)\n\t\/\/ \tif _, ok := err.(*time.ParseError); ok {\n\t\/\/ \t\t*t, err = time.Parse(format+\"Z07:00\", s)\n\t\/\/ \t}\n\t\/\/ \treturn err\n\t\/\/ }\n}\n<|endoftext|>"} {"text":"<commit_before>package renderweb\n\nimport (\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/rs\/xhandler\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"tower.pro\/renderer\/components\"\n\t\"tower.pro\/renderer\/middlewares\"\n\t\"tower.pro\/renderer\/template\"\n)\n\ntype webOptions struct {\n\talwaysHTML bool\n\treqTimeout time.Duration\n\tdefaultCtx template.Context\n\n\tmiddlewares []middlewares.Handler\n\tcomponentSetter middlewares.Handler\n\ttemplateCtxSetter middlewares.Handler\n}\n\n\/\/ Option - Sets web server handler options.\ntype Option func(*webOptions)\n\n\/\/ WithComponentSetter - Sets component reader HTTP request middleware.\nfunc WithComponentSetter(componentSetter middlewares.Handler) Option {\n\treturn func(o *webOptions) {\n\t\to.componentSetter = componentSetter\n\t}\n}\n\n\/\/ WithTemplateContextSetter - Sets component template context setter HTTP request middleware.\nfunc WithTemplateContextSetter(templateCtxSetter middlewares.Handler) Option {\n\treturn func(o *webOptions) {\n\t\to.templateCtxSetter = templateCtxSetter\n\t}\n}\n\n\/\/ WithTimeout - Sets API server request timeout.\nfunc WithTimeout(t time.Duration) Option {\n\treturn func(o *webOptions) {\n\t\to.reqTimeout = t\n\t}\n}\n\n\/\/ WithAlwaysHTML - Responds with html only when enabled.\nfunc WithAlwaysHTML(enable ...bool) Option {\n\treturn func(o *webOptions) {\n\t\tif len(enable) == 0 {\n\t\t\to.alwaysHTML = true\n\t\t} else {\n\t\t\to.alwaysHTML = enable[0]\n\t\t}\n\t}\n}\n\n\/\/ WithDefaultTemplateContext - Sets default template context.\nfunc WithDefaultTemplateContext(ctx template.Context) Option {\n\treturn func(o *webOptions) {\n\t\to.defaultCtx = ctx\n\t}\n}\n\n\/\/ WithMiddleware - Adds a middleware.\nfunc WithMiddleware(m middlewares.Handler) Option {\n\treturn func(o *webOptions) {\n\t\to.middlewares = append(o.middlewares, m)\n\t}\n}\n\nfunc defaultCtxSetter(o *webOptions) middlewares.Handler {\n\treturn ToMiddleware(func(ctx context.Context, w http.ResponseWriter, r *http.Request, next xhandler.HandlerC) {\n\t\tif _, ok := components.TemplateContext(ctx); !ok && o.defaultCtx != nil {\n\t\t\tctx = components.NewTemplateContext(ctx, o.defaultCtx.Clone())\n\t\t}\n\t\tnext.ServeHTTPC(ctx, w, r)\n\t})\n}\n<commit_msg>renderweb: tracing options<commit_after>package renderweb\n\nimport (\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/rs\/xhandler\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"tower.pro\/renderer\/components\"\n\t\"tower.pro\/renderer\/middlewares\"\n\t\"tower.pro\/renderer\/template\"\n)\n\ntype webOptions struct {\n\ttracing bool\n\talwaysHTML bool\n\treqTimeout time.Duration\n\tdefaultCtx template.Context\n\n\tmiddlewares []middlewares.Handler\n\tcomponentSetter middlewares.Handler\n\ttemplateCtxSetter middlewares.Handler\n}\n\n\/\/ Option - Sets web server handler options.\ntype Option func(*webOptions)\n\nfunc constructOpts(opts ...Option) *webOptions {\n\to := &webOptions{\n\t\treqTimeout: time.Second * 15,\n\t\tcomponentSetter: UnmarshalFromRequest,\n\t}\n\to.templateCtxSetter = defaultCtxSetter(o)\n\tfor _, opt := range opts {\n\t\topt(o)\n\t}\n\treturn o\n}\n\n\/\/ WithTracing - Enables tracing. Uses first parameter if any.\nfunc WithTracing(enable ...bool) Option {\n\treturn func(o *webOptions) {\n\t\tif len(enable) == 0 {\n\t\t\to.tracing = true\n\t\t} else {\n\t\t\to.tracing = enable[0]\n\t\t}\n\t}\n}\n\n\/\/ WithComponentSetter - Sets component reader HTTP request middleware.\nfunc WithComponentSetter(componentSetter middlewares.Handler) Option {\n\treturn func(o *webOptions) {\n\t\to.componentSetter = componentSetter\n\t}\n}\n\n\/\/ WithTemplateContextSetter - Sets component template context setter HTTP request middleware.\nfunc WithTemplateContextSetter(templateCtxSetter middlewares.Handler) Option {\n\treturn func(o *webOptions) {\n\t\to.templateCtxSetter = templateCtxSetter\n\t}\n}\n\n\/\/ WithTimeout - Sets API server request timeout.\nfunc WithTimeout(t time.Duration) Option {\n\treturn func(o *webOptions) {\n\t\to.reqTimeout = t\n\t}\n}\n\n\/\/ WithAlwaysHTML - Responds with html only when enabled. Uses first parameter if any.\nfunc WithAlwaysHTML(enable ...bool) Option {\n\treturn func(o *webOptions) {\n\t\tif len(enable) == 0 {\n\t\t\to.alwaysHTML = true\n\t\t} else {\n\t\t\to.alwaysHTML = enable[0]\n\t\t}\n\t}\n}\n\n\/\/ WithDefaultTemplateContext - Sets default template context.\nfunc WithDefaultTemplateContext(ctx template.Context) Option {\n\treturn func(o *webOptions) {\n\t\to.defaultCtx = ctx\n\t}\n}\n\n\/\/ WithMiddleware - Adds a middleware.\nfunc WithMiddleware(m middlewares.Handler) Option {\n\treturn func(o *webOptions) {\n\t\to.middlewares = append(o.middlewares, m)\n\t}\n}\n\nfunc defaultCtxSetter(o *webOptions) middlewares.Handler {\n\treturn ToMiddleware(func(ctx context.Context, w http.ResponseWriter, r *http.Request, next xhandler.HandlerC) {\n\t\tif _, ok := components.TemplateContext(ctx); !ok && o.defaultCtx != nil {\n\t\t\tctx = components.NewTemplateContext(ctx, o.defaultCtx.Clone())\n\t\t}\n\t\tnext.ServeHTTPC(ctx, w, r)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package repo\n\nimport (\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"crypto\/rand\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/kopia\/kopia\/blob\"\n)\n\nconst flushPackIndexTimeout = 10 * time.Minute\nconst packObjectPrefix = \"P\"\n\ntype packInfo struct {\n\tcurrentPackData bytes.Buffer\n\tcurrentPackIndex *packIndex\n\tcurrentPackID string\n}\n\ntype blockLocation struct {\n\tpackIndex int\n\tobjectIndex int\n}\n\ntype packManager struct {\n\tobjectManager *ObjectManager\n\tstorage blob.Storage\n\n\tmu sync.RWMutex\n\tblockToIndex map[string]*packIndex\n\n\tpendingPackIndexes packIndexes\n\tflushPackIndexesAfter time.Time\n\n\tpackGroups map[string]*packInfo\n}\n\nfunc (p *packManager) enabled() bool {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\treturn p.pendingPackIndexes != nil\n}\n\nfunc (p *packManager) blockIDToPackSection(blockID string) (ObjectIDSection, bool, error) {\n\tif strings.HasPrefix(blockID, packObjectPrefix) {\n\t\treturn ObjectIDSection{}, false, nil\n\t}\n\n\tpi, err := p.ensurePackIndexesLoaded()\n\tif err != nil {\n\t\treturn ObjectIDSection{}, false, fmt.Errorf(\"can't load pack index: %v\", err)\n\t}\n\n\tndx := pi[blockID]\n\tif ndx == nil {\n\t\treturn ObjectIDSection{}, false, nil\n\t}\n\n\tblk := ndx.Items[blockID]\n\tif blk == \"\" {\n\t\treturn ObjectIDSection{}, false, nil\n\t}\n\n\tif plus := strings.IndexByte(blk, '+'); plus > 0 {\n\t\tif start, err := strconv.ParseInt(blk[0:plus], 10, 64); err == nil {\n\t\t\tif length, err := strconv.ParseInt(blk[plus+1:], 10, 64); err == nil {\n\t\t\t\tif base, err := ParseObjectID(ndx.PackObject); err == nil {\n\t\t\t\t\treturn ObjectIDSection{\n\t\t\t\t\t\tBase: base,\n\t\t\t\t\t\tStart: start,\n\t\t\t\t\t\tLength: length,\n\t\t\t\t\t}, true, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ObjectIDSection{}, false, fmt.Errorf(\"invalid pack index for %q\", blockID)\n}\n\nfunc (p *packManager) begin() error {\n\tp.ensurePackIndexesLoaded()\n\tp.flushPackIndexesAfter = time.Now().Add(flushPackIndexTimeout)\n\tp.pendingPackIndexes = make(packIndexes)\n\treturn nil\n}\n\nfunc (p *packManager) AddToPack(packGroup string, blockID string, data []byte) (ObjectID, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\t\/\/ See if we already have this block ID in some pack.\n\tif _, ok := p.blockToIndex[blockID]; ok {\n\t\treturn ObjectID{StorageBlock: blockID}, nil\n\t}\n\n\tg := p.packGroups[packGroup]\n\tif g == nil {\n\t\tg = &packInfo{}\n\t\tp.packGroups[packGroup] = g\n\t}\n\n\tif g.currentPackIndex == nil {\n\t\tg.currentPackIndex = &packIndex{\n\t\t\tItems: make(map[string]string),\n\t\t\tPackGroup: packGroup,\n\t\t\tCreateTime: time.Now().UTC(),\n\t\t}\n\t\tg.currentPackID = p.newPackID()\n\t\tp.pendingPackIndexes[g.currentPackID] = g.currentPackIndex\n\t\tg.currentPackData.Reset()\n\t}\n\n\toffset := g.currentPackData.Len()\n\tg.currentPackData.Write(data)\n\tg.currentPackIndex.Items[blockID] = fmt.Sprintf(\"%v+%v\", int64(offset), int64(len(data)))\n\n\tif g.currentPackData.Len() >= p.objectManager.format.MaxPackFileLength {\n\t\tif err := p.finishCurrentPackLocked(); err != nil {\n\t\t\treturn NullObjectID, err\n\t\t}\n\t}\n\n\tif time.Now().After(p.flushPackIndexesAfter) {\n\t\tp.flushPackIndexesLocked()\n\t}\n\n\tp.blockToIndex[blockID] = g.currentPackIndex\n\treturn ObjectID{StorageBlock: blockID}, nil\n}\n\nfunc (p *packManager) finishPacking() error {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tif err := p.finishCurrentPackLocked(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := p.flushPackIndexesLocked(); err != nil {\n\t\treturn err\n\t}\n\n\tp.pendingPackIndexes = nil\n\treturn nil\n}\n\nfunc (p *packManager) flushPackIndexesLocked() error {\n\tif len(p.pendingPackIndexes) > 0 {\n\t\tlog.Printf(\"saving %v pack indexes\", len(p.pendingPackIndexes))\n\t\tif err := p.writePackIndexes(p.pendingPackIndexes); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tp.flushPackIndexesAfter = time.Now().Add(flushPackIndexTimeout)\n\tp.pendingPackIndexes = make(packIndexes)\n\treturn nil\n}\n\nfunc (p *packManager) writePackIndexes(ndx packIndexes) error {\n\tw := p.objectManager.NewWriter(WriterOptions{\n\t\tdisablePacking: true,\n\t\tDescription: \"pack index\",\n\t\tBlockNamePrefix: packObjectPrefix,\n\t\tsplitter: newNeverSplitter(),\n\t})\n\tdefer w.Close()\n\n\tzw := gzip.NewWriter(w)\n\tif err := json.NewEncoder(zw).Encode(p.pendingPackIndexes); err != nil {\n\t\treturn fmt.Errorf(\"can't encode pack index: %v\", err)\n\t}\n\tzw.Close()\n\n\tif _, err := w.Result(); err != nil {\n\t\treturn fmt.Errorf(\"can't save pack index object: %v\", err)\n\t}\n\n\treturn nil\n}\nfunc (p *packManager) finishCurrentPackLocked() error {\n\tfor _, g := range p.packGroups {\n\t\tif err := p.finishPackLocked(g); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (p *packManager) finishPackLocked(g *packInfo) error {\n\tif g.currentPackIndex == nil {\n\t\treturn nil\n\t}\n\tw := p.objectManager.NewWriter(WriterOptions{\n\t\tDescription: fmt.Sprintf(\"pack:%v\", g.currentPackID),\n\t\tsplitter: newNeverSplitter(),\n\t\tdisablePacking: true,\n\t})\n\tdefer w.Close()\n\n\tif _, err := g.currentPackData.WriteTo(w); err != nil {\n\t\treturn fmt.Errorf(\"unable to write pack: %v\", err)\n\t}\n\tg.currentPackData.Reset()\n\toid, err := w.Result()\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't save pack data: %v\", err)\n\t}\n\n\tg.currentPackIndex.PackObject = oid.String()\n\tg.currentPackIndex = nil\n\n\treturn nil\n}\n\nfunc (p *packManager) loadMergedPackIndex(olderThan *time.Time) (map[string]*packIndex, []string, error) {\n\tch, cancel := p.objectManager.storage.ListBlocks(packObjectPrefix)\n\tdefer cancel()\n\n\tt0 := time.Now()\n\n\tvar wg sync.WaitGroup\n\n\terrors := make(chan error, parallelFetches)\n\tvar mu sync.Mutex\n\n\tpackIndexData := map[string][]byte{}\n\ttotalSize := 0\n\tvar blockIDs []string\n\tfor i := 0; i < parallelFetches; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tfor b := range ch {\n\t\t\t\tif b.Error != nil {\n\t\t\t\t\terrors <- b.Error\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif olderThan != nil && b.TimeStamp.After(*olderThan) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tr, err := p.objectManager.Open(ObjectID{StorageBlock: b.BlockID})\n\t\t\t\tif err != nil {\n\t\t\t\t\terrors <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tdata, err := ioutil.ReadAll(r)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrors <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tmu.Lock()\n\t\t\t\tpackIndexData[b.BlockID] = data\n\t\t\t\tblockIDs = append(blockIDs, b.BlockID)\n\t\t\t\ttotalSize += len(data)\n\t\t\t\tmu.Unlock()\n\t\t\t}\n\t\t}()\n\t}\n\n\twg.Wait()\n\tclose(errors)\n\n\t\/\/ Propagate async errors, if any.\n\tfor err := range errors {\n\t\treturn nil, nil, err\n\t}\n\n\tif false {\n\t\tlog.Printf(\"loaded %v pack indexes (%v bytes) in %v\", len(packIndexData), totalSize, time.Since(t0))\n\t}\n\n\tmerged := make(packIndexes)\n\tfor blockID, content := range packIndexData {\n\t\tvar r io.Reader = bytes.NewReader(content)\n\t\tzr, err := gzip.NewReader(r)\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"unable to read pack index from %q: %v\", blockID, err)\n\t\t}\n\n\t\tpi, err := loadPackIndexes(zr)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tmerged.merge(pi)\n\t}\n\n\treturn merged, blockIDs, nil\n}\n\nfunc (p *packManager) ensurePackIndexesLoaded() (map[string]*packIndex, error) {\n\tp.mu.RLock()\n\tpi := p.blockToIndex\n\tp.mu.RUnlock()\n\tif pi != nil {\n\t\treturn pi, nil\n\t}\n\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tmerged, _, err := p.loadMergedPackIndex(nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpi = make(map[string]*packIndex)\n\tfor _, pck := range merged {\n\t\tfor blockID := range pck.Items {\n\t\t\tpi[blockID] = pck\n\t\t}\n\t}\n\n\tp.blockToIndex = pi\n\t\/\/ log.Printf(\"loaded pack index with %v entries\", len(p.blockToIndex))\n\n\treturn pi, nil\n}\n\nfunc (p *packManager) Compact(cutoffTime time.Time) error {\n\tmerged, blockIDs, err := p.loadMergedPackIndex(&cutoffTime)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(blockIDs) < parallelFetches {\n\t\treturn nil\n\t}\n\n\tif err := p.writePackIndexes(merged); err != nil {\n\t\treturn err\n\t}\n\n\tch := makeStringChannel(blockIDs)\n\tvar wg sync.WaitGroup\n\n\tfor i := 0; i < parallelDeletes; i++ {\n\t\twg.Add(1)\n\t\tgo func(workerID int) {\n\t\t\tdefer wg.Done()\n\n\t\t\tfor blockID := range ch {\n\t\t\t\tif err := p.objectManager.storage.DeleteBlock(blockID); err != nil {\n\t\t\t\t\tlog.Printf(\"warning: unable to delete %q: %v\", blockID, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\twg.Wait()\n\treturn nil\n}\n\nfunc makeStringChannel(s []string) <-chan string {\n\tch := make(chan string)\n\tgo func() {\n\t\tdefer close(ch)\n\n\t\tfor _, v := range s {\n\t\t\tch <- v\n\t\t}\n\t}()\n\treturn ch\n}\n\nfunc (p *packManager) newPackID() string {\n\tid := make([]byte, 8)\n\trand.Read(id)\n\treturn hex.EncodeToString(id)\n}\n\nfunc (p *packManager) Flush() error {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\treturn p.finishCurrentPackLocked()\n}\n<commit_msg>also close current pack when time-based flushing happens<commit_after>package repo\n\nimport (\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"crypto\/rand\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/kopia\/kopia\/blob\"\n)\n\nconst flushPackIndexTimeout = 10 * time.Second\nconst packObjectPrefix = \"P\"\n\ntype packInfo struct {\n\tcurrentPackData bytes.Buffer\n\tcurrentPackIndex *packIndex\n\tcurrentPackID string\n}\n\ntype blockLocation struct {\n\tpackIndex int\n\tobjectIndex int\n}\n\ntype packManager struct {\n\tobjectManager *ObjectManager\n\tstorage blob.Storage\n\n\tmu sync.RWMutex\n\tblockToIndex map[string]*packIndex\n\n\tpendingPackIndexes packIndexes\n\tflushPackIndexesAfter time.Time\n\n\tpackGroups map[string]*packInfo\n}\n\nfunc (p *packManager) enabled() bool {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\treturn p.pendingPackIndexes != nil\n}\n\nfunc (p *packManager) blockIDToPackSection(blockID string) (ObjectIDSection, bool, error) {\n\tif strings.HasPrefix(blockID, packObjectPrefix) {\n\t\treturn ObjectIDSection{}, false, nil\n\t}\n\n\tpi, err := p.ensurePackIndexesLoaded()\n\tif err != nil {\n\t\treturn ObjectIDSection{}, false, fmt.Errorf(\"can't load pack index: %v\", err)\n\t}\n\n\tndx := pi[blockID]\n\tif ndx == nil {\n\t\treturn ObjectIDSection{}, false, nil\n\t}\n\n\tblk := ndx.Items[blockID]\n\tif blk == \"\" {\n\t\treturn ObjectIDSection{}, false, nil\n\t}\n\n\tif plus := strings.IndexByte(blk, '+'); plus > 0 {\n\t\tif start, err := strconv.ParseInt(blk[0:plus], 10, 64); err == nil {\n\t\t\tif length, err := strconv.ParseInt(blk[plus+1:], 10, 64); err == nil {\n\t\t\t\tif base, err := ParseObjectID(ndx.PackObject); err == nil {\n\t\t\t\t\treturn ObjectIDSection{\n\t\t\t\t\t\tBase: base,\n\t\t\t\t\t\tStart: start,\n\t\t\t\t\t\tLength: length,\n\t\t\t\t\t}, true, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ObjectIDSection{}, false, fmt.Errorf(\"invalid pack index for %q\", blockID)\n}\n\nfunc (p *packManager) begin() error {\n\tp.ensurePackIndexesLoaded()\n\tp.flushPackIndexesAfter = time.Now().Add(flushPackIndexTimeout)\n\tp.pendingPackIndexes = make(packIndexes)\n\treturn nil\n}\n\nfunc (p *packManager) AddToPack(packGroup string, blockID string, data []byte) (ObjectID, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\t\/\/ See if we already have this block ID in some pack.\n\tif _, ok := p.blockToIndex[blockID]; ok {\n\t\treturn ObjectID{StorageBlock: blockID}, nil\n\t}\n\n\tg := p.packGroups[packGroup]\n\tif g == nil {\n\t\tg = &packInfo{}\n\t\tp.packGroups[packGroup] = g\n\t}\n\n\tif g.currentPackIndex == nil {\n\t\tg.currentPackIndex = &packIndex{\n\t\t\tItems: make(map[string]string),\n\t\t\tPackGroup: packGroup,\n\t\t\tCreateTime: time.Now().UTC(),\n\t\t}\n\t\tg.currentPackID = p.newPackID()\n\t\tp.pendingPackIndexes[g.currentPackID] = g.currentPackIndex\n\t\tg.currentPackData.Reset()\n\t}\n\n\toffset := g.currentPackData.Len()\n\tg.currentPackData.Write(data)\n\tg.currentPackIndex.Items[blockID] = fmt.Sprintf(\"%v+%v\", int64(offset), int64(len(data)))\n\n\tif g.currentPackData.Len() >= p.objectManager.format.MaxPackFileLength {\n\t\tif err := p.finishCurrentPackLocked(); err != nil {\n\t\t\treturn NullObjectID, err\n\t\t}\n\t}\n\n\tif time.Now().After(p.flushPackIndexesAfter) {\n\t\tif err := p.finishCurrentPackLocked(); err != nil {\n\t\t\treturn NullObjectID, err\n\t\t}\n\t\tif err := p.flushPackIndexesLocked(); err != nil {\n\t\t\treturn NullObjectID, err\n\t\t}\n\t}\n\n\tp.blockToIndex[blockID] = g.currentPackIndex\n\treturn ObjectID{StorageBlock: blockID}, nil\n}\n\nfunc (p *packManager) finishPacking() error {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tif err := p.finishCurrentPackLocked(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := p.flushPackIndexesLocked(); err != nil {\n\t\treturn err\n\t}\n\n\tp.pendingPackIndexes = nil\n\treturn nil\n}\n\nfunc (p *packManager) flushPackIndexesLocked() error {\n\tif len(p.pendingPackIndexes) > 0 {\n\t\tlog.Printf(\"saving %v pack indexes\", len(p.pendingPackIndexes))\n\t\tif err := p.writePackIndexes(p.pendingPackIndexes); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tp.flushPackIndexesAfter = time.Now().Add(flushPackIndexTimeout)\n\tp.pendingPackIndexes = make(packIndexes)\n\treturn nil\n}\n\nfunc (p *packManager) writePackIndexes(ndx packIndexes) error {\n\tw := p.objectManager.NewWriter(WriterOptions{\n\t\tdisablePacking: true,\n\t\tDescription: \"pack index\",\n\t\tBlockNamePrefix: packObjectPrefix,\n\t\tsplitter: newNeverSplitter(),\n\t})\n\tdefer w.Close()\n\n\tzw := gzip.NewWriter(w)\n\tif err := json.NewEncoder(zw).Encode(p.pendingPackIndexes); err != nil {\n\t\treturn fmt.Errorf(\"can't encode pack index: %v\", err)\n\t}\n\tzw.Close()\n\n\tif _, err := w.Result(); err != nil {\n\t\treturn fmt.Errorf(\"can't save pack index object: %v\", err)\n\t}\n\n\treturn nil\n}\nfunc (p *packManager) finishCurrentPackLocked() error {\n\tfor _, g := range p.packGroups {\n\t\tif err := p.finishPackLocked(g); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (p *packManager) finishPackLocked(g *packInfo) error {\n\tif g.currentPackIndex == nil {\n\t\treturn nil\n\t}\n\tw := p.objectManager.NewWriter(WriterOptions{\n\t\tDescription: fmt.Sprintf(\"pack:%v\", g.currentPackID),\n\t\tsplitter: newNeverSplitter(),\n\t\tdisablePacking: true,\n\t})\n\tdefer w.Close()\n\n\tif _, err := g.currentPackData.WriteTo(w); err != nil {\n\t\treturn fmt.Errorf(\"unable to write pack: %v\", err)\n\t}\n\tg.currentPackData.Reset()\n\toid, err := w.Result()\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't save pack data: %v\", err)\n\t}\n\n\tg.currentPackIndex.PackObject = oid.String()\n\tg.currentPackIndex = nil\n\n\treturn nil\n}\n\nfunc (p *packManager) loadMergedPackIndex(olderThan *time.Time) (map[string]*packIndex, []string, error) {\n\tch, cancel := p.objectManager.storage.ListBlocks(packObjectPrefix)\n\tdefer cancel()\n\n\tt0 := time.Now()\n\n\tvar wg sync.WaitGroup\n\n\terrors := make(chan error, parallelFetches)\n\tvar mu sync.Mutex\n\n\tpackIndexData := map[string][]byte{}\n\ttotalSize := 0\n\tvar blockIDs []string\n\tfor i := 0; i < parallelFetches; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tfor b := range ch {\n\t\t\t\tif b.Error != nil {\n\t\t\t\t\terrors <- b.Error\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif olderThan != nil && b.TimeStamp.After(*olderThan) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tr, err := p.objectManager.Open(ObjectID{StorageBlock: b.BlockID})\n\t\t\t\tif err != nil {\n\t\t\t\t\terrors <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tdata, err := ioutil.ReadAll(r)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrors <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tmu.Lock()\n\t\t\t\tpackIndexData[b.BlockID] = data\n\t\t\t\tblockIDs = append(blockIDs, b.BlockID)\n\t\t\t\ttotalSize += len(data)\n\t\t\t\tmu.Unlock()\n\t\t\t}\n\t\t}()\n\t}\n\n\twg.Wait()\n\tclose(errors)\n\n\t\/\/ Propagate async errors, if any.\n\tfor err := range errors {\n\t\treturn nil, nil, err\n\t}\n\n\tif false {\n\t\tlog.Printf(\"loaded %v pack indexes (%v bytes) in %v\", len(packIndexData), totalSize, time.Since(t0))\n\t}\n\n\tmerged := make(packIndexes)\n\tfor blockID, content := range packIndexData {\n\t\tvar r io.Reader = bytes.NewReader(content)\n\t\tzr, err := gzip.NewReader(r)\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"unable to read pack index from %q: %v\", blockID, err)\n\t\t}\n\n\t\tpi, err := loadPackIndexes(zr)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tmerged.merge(pi)\n\t}\n\n\treturn merged, blockIDs, nil\n}\n\nfunc (p *packManager) ensurePackIndexesLoaded() (map[string]*packIndex, error) {\n\tp.mu.RLock()\n\tpi := p.blockToIndex\n\tp.mu.RUnlock()\n\tif pi != nil {\n\t\treturn pi, nil\n\t}\n\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tmerged, _, err := p.loadMergedPackIndex(nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpi = make(map[string]*packIndex)\n\tfor _, pck := range merged {\n\t\tfor blockID := range pck.Items {\n\t\t\tpi[blockID] = pck\n\t\t}\n\t}\n\n\tp.blockToIndex = pi\n\t\/\/ log.Printf(\"loaded pack index with %v entries\", len(p.blockToIndex))\n\n\treturn pi, nil\n}\n\nfunc (p *packManager) Compact(cutoffTime time.Time) error {\n\tmerged, blockIDs, err := p.loadMergedPackIndex(&cutoffTime)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(blockIDs) < parallelFetches {\n\t\treturn nil\n\t}\n\n\tif err := p.writePackIndexes(merged); err != nil {\n\t\treturn err\n\t}\n\n\tch := makeStringChannel(blockIDs)\n\tvar wg sync.WaitGroup\n\n\tfor i := 0; i < parallelDeletes; i++ {\n\t\twg.Add(1)\n\t\tgo func(workerID int) {\n\t\t\tdefer wg.Done()\n\n\t\t\tfor blockID := range ch {\n\t\t\t\tif err := p.objectManager.storage.DeleteBlock(blockID); err != nil {\n\t\t\t\t\tlog.Printf(\"warning: unable to delete %q: %v\", blockID, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\twg.Wait()\n\treturn nil\n}\n\nfunc makeStringChannel(s []string) <-chan string {\n\tch := make(chan string)\n\tgo func() {\n\t\tdefer close(ch)\n\n\t\tfor _, v := range s {\n\t\t\tch <- v\n\t\t}\n\t}()\n\treturn ch\n}\n\nfunc (p *packManager) newPackID() string {\n\tid := make([]byte, 8)\n\trand.Read(id)\n\treturn hex.EncodeToString(id)\n}\n\nfunc (p *packManager) Flush() error {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\treturn p.finishCurrentPackLocked()\n}\n<|endoftext|>"} {"text":"<commit_before>package repo\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/spf13\/afero\"\n)\n\nfunc TestRemote(t *testing.T) {\n\tfs := afero.NewMemMapFs()\n\tfd, err := fs.OpenFile(\"remotes.yml\", os.O_CREATE|os.O_RDWR, 0666)\n\tif err != nil {\n\t\tt.Errorf(\"Could not open in memory: %v\", err)\n\t\treturn\n\t}\n\n\tdefer fd.Close()\n\n\trms, err := NewYAMLRemotes(fd)\n\tif err != nil {\n\t\tt.Errorf(\"Creating yaml store failed: %v\", err)\n\t\treturn\n\t}\n\n\tremoteAlc := NewRemote(\"alice\", \"Qm123\")\n\tremoteBob := NewRemote(\"bob\", \"Qm321\")\n\tremoteChr := NewRemote(\"chris\", \"QmABC\")\n\tremoteMal := NewRemote(\"micrathene\", \"Qm123\")\n\n\tfor _, rm := range []Remote{remoteAlc, remoteBob, remoteChr} {\n\t\tif err := rms.Insert(rm); err != nil {\n\t\t\tt.Errorf(\"Insert(%v) into the remote store failed: %v\", rm.ID(), err)\n\t\t\treturn\n\t\t}\n\n\t\tretrievedRemote, err := rms.Get(rm.ID())\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Retrieving remote failed: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif !RemoteIsEqual(rm, retrievedRemote) {\n\t\t\tt.Errorf(\"Remotes are not equal\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tif err := rms.Insert(remoteMal); err == nil {\n\t\tt.Errorf(\"Insert(malicious_micra) into the remote store worked\")\n\t\treturn\n\t}\n\n\tif err := rms.Remove(\"alice\"); err != nil {\n\t\tt.Errorf(\"Removing remote failed: %v\", err)\n\t\treturn\n\t}\n\n\tif r, err := rms.Get(\"alice\"); err == nil || r != nil {\n\t\tt.Errorf(\"removed remote still there: %v (%v)\", err, r)\n\t\treturn\n\t}\n\n\tlst := AsList(rms)\n\tif lst[0].ID() != \"bob\" {\n\t\tt.Errorf(\"Not bob\")\n\t}\n\n\tif lst[1].ID() != \"chris\" {\n\t\tt.Errorf(\"Not chris\")\n\t}\n\n\tif err := rms.Close(); err != nil {\n\t\tt.Errorf(\"Closing yaml store failed: %v\", err)\n\t\treturn\n\t}\n}\n<commit_msg>repo\/remote_test.go: Purge dep on afero (pulls in sftp, wtf?!)<commit_after>package repo\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/disorganizer\/brig\/util\/testutil\"\n)\n\nfunc TestRemote(t *testing.T) {\n\tpath := filepath.Join(os.TempDir(), \"brig-test-remote.yml\")\n\tfd, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0666)\n\tif err != nil {\n\t\tt.Errorf(\"Could not open in memory: %v\", err)\n\t\treturn\n\t}\n\n\tdefer fd.Close()\n\tdefer testutil.Remover(t, path)\n\n\trms, err := NewYAMLRemotes(fd)\n\tif err != nil {\n\t\tt.Errorf(\"Creating yaml store failed: %v\", err)\n\t\treturn\n\t}\n\n\tremoteAlc := NewRemote(\"alice\", \"Qm123\")\n\tremoteBob := NewRemote(\"bob\", \"Qm321\")\n\tremoteChr := NewRemote(\"chris\", \"QmABC\")\n\tremoteMal := NewRemote(\"micrathene\", \"Qm123\")\n\n\tfor _, rm := range []Remote{remoteAlc, remoteBob, remoteChr} {\n\t\tif err := rms.Insert(rm); err != nil {\n\t\t\tt.Errorf(\"Insert(%v) into the remote store failed: %v\", rm.ID(), err)\n\t\t\treturn\n\t\t}\n\n\t\tretrievedRemote, err := rms.Get(rm.ID())\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Retrieving remote failed: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif !RemoteIsEqual(rm, retrievedRemote) {\n\t\t\tt.Errorf(\"Remotes are not equal\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tif err := rms.Insert(remoteMal); err == nil {\n\t\tt.Errorf(\"Insert(malicious_micra) into the remote store worked\")\n\t\treturn\n\t}\n\n\tif err := rms.Remove(\"alice\"); err != nil {\n\t\tt.Errorf(\"Removing remote failed: %v\", err)\n\t\treturn\n\t}\n\n\tif r, err := rms.Get(\"alice\"); err == nil || r != nil {\n\t\tt.Errorf(\"removed remote still there: %v (%v)\", err, r)\n\t\treturn\n\t}\n\n\tlst := AsList(rms)\n\tif lst[0].ID() != \"bob\" {\n\t\tt.Errorf(\"Not bob\")\n\t}\n\n\tif lst[1].ID() != \"chris\" {\n\t\tt.Errorf(\"Not chris\")\n\t}\n\n\tif err := rms.Close(); err != nil {\n\t\tt.Errorf(\"Closing yaml store failed: %v\", err)\n\t\treturn\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package reposerver\n\nimport (\n\t\"crypto\/tls\"\n\t\"os\"\n\n\t\"github.com\/argoproj\/argo-cd\/common\"\n\n\tversionpkg \"github.com\/argoproj\/argo-cd\/pkg\/apiclient\/version\"\n\t\"github.com\/argoproj\/argo-cd\/reposerver\/apiclient\"\n\treposervercache \"github.com\/argoproj\/argo-cd\/reposerver\/cache\"\n\t\"github.com\/argoproj\/argo-cd\/reposerver\/metrics\"\n\t\"github.com\/argoproj\/argo-cd\/reposerver\/repository\"\n\t\"github.com\/argoproj\/argo-cd\/server\/version\"\n\tgrpc_util \"github.com\/argoproj\/argo-cd\/util\/grpc\"\n\ttlsutil \"github.com\/argoproj\/argo-cd\/util\/tls\"\n\n\tgrpc_middleware \"github.com\/grpc-ecosystem\/go-grpc-middleware\"\n\tgrpc_logrus \"github.com\/grpc-ecosystem\/go-grpc-middleware\/logging\/logrus\"\n\tgrpc_prometheus \"github.com\/grpc-ecosystem\/go-grpc-prometheus\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n\t\"google.golang.org\/grpc\/reflection\"\n)\n\n\/\/ ArgoCDRepoServer is the repo server implementation\ntype ArgoCDRepoServer struct {\n\tlog *log.Entry\n\tmetricsServer *metrics.MetricsServer\n\tcache *reposervercache.Cache\n\topts []grpc.ServerOption\n\tinitConstants repository.RepoServerInitConstants\n}\n\n\/\/ NewServer returns a new instance of the Argo CD Repo server\nfunc NewServer(metricsServer *metrics.MetricsServer, cache *reposervercache.Cache, tlsConfCustomizer tlsutil.ConfigCustomizer, initConstants repository.RepoServerInitConstants) (*ArgoCDRepoServer, error) {\n\t\/\/ generate TLS cert\n\thosts := []string{\n\t\t\"localhost\",\n\t\t\"argocd-repo-server\",\n\t}\n\tcert, err := tlsutil.GenerateX509KeyPair(tlsutil.CertOptions{\n\t\tHosts: hosts,\n\t\tOrganization: \"Argo CD\",\n\t\tIsCA: true,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttlsConfig := &tls.Config{Certificates: []tls.Certificate{*cert}}\n\ttlsConfCustomizer(tlsConfig)\n\n\tif os.Getenv(common.EnvEnableGRPCTimeHistogramEnv) == \"true\" {\n\t\tgrpc_prometheus.EnableHandlingTimeHistogram()\n\t}\n\n\tserverLog := log.NewEntry(log.StandardLogger())\n\tstreamInterceptors := []grpc.StreamServerInterceptor{grpc_logrus.StreamServerInterceptor(serverLog), grpc_prometheus.StreamServerInterceptor, grpc_util.PanicLoggerStreamServerInterceptor(serverLog)}\n\tunaryInterceptors := []grpc.UnaryServerInterceptor{grpc_logrus.UnaryServerInterceptor(serverLog), grpc_prometheus.UnaryServerInterceptor, grpc_util.PanicLoggerUnaryServerInterceptor(serverLog)}\n\n\treturn &ArgoCDRepoServer{\n\t\tlog: serverLog,\n\t\tmetricsServer: metricsServer,\n\t\tcache: cache,\n\t\tinitConstants: initConstants,\n\t\topts: []grpc.ServerOption{\n\t\t\tgrpc.Creds(credentials.NewTLS(tlsConfig)),\n\t\t\tgrpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptors...)),\n\t\t\tgrpc.StreamInterceptor(grpc_middleware.ChainStreamServer(streamInterceptors...)),\n\t\t\tgrpc.MaxRecvMsgSize(apiclient.MaxGRPCMessageSize),\n\t\t\tgrpc.MaxSendMsgSize(apiclient.MaxGRPCMessageSize),\n\t\t},\n\t}, nil\n}\n\n\/\/ CreateGRPC creates new configured grpc server\nfunc (a *ArgoCDRepoServer) CreateGRPC() *grpc.Server {\n\tserver := grpc.NewServer(a.opts...)\n\tversionpkg.RegisterVersionServiceServer(server, &version.Server{})\n\tmanifestService := repository.NewService(a.metricsServer, a.cache, a.initConstants)\n\tapiclient.RegisterRepoServerServiceServer(server, manifestService)\n\n\t\/\/ Register reflection service on gRPC server.\n\treflection.Register(server)\n\n\treturn server\n}\n<commit_msg>fix: add grpc health check (#5060)<commit_after>package reposerver\n\nimport (\n\t\"crypto\/tls\"\n\t\"os\"\n\n\tgrpc_middleware \"github.com\/grpc-ecosystem\/go-grpc-middleware\"\n\tgrpc_logrus \"github.com\/grpc-ecosystem\/go-grpc-middleware\/logging\/logrus\"\n\tgrpc_prometheus \"github.com\/grpc-ecosystem\/go-grpc-prometheus\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n\t\"google.golang.org\/grpc\/health\"\n\t\"google.golang.org\/grpc\/health\/grpc_health_v1\"\n\t\"google.golang.org\/grpc\/reflection\"\n\n\t\"github.com\/argoproj\/argo-cd\/common\"\n\tversionpkg \"github.com\/argoproj\/argo-cd\/pkg\/apiclient\/version\"\n\t\"github.com\/argoproj\/argo-cd\/reposerver\/apiclient\"\n\treposervercache \"github.com\/argoproj\/argo-cd\/reposerver\/cache\"\n\t\"github.com\/argoproj\/argo-cd\/reposerver\/metrics\"\n\t\"github.com\/argoproj\/argo-cd\/reposerver\/repository\"\n\t\"github.com\/argoproj\/argo-cd\/server\/version\"\n\tgrpc_util \"github.com\/argoproj\/argo-cd\/util\/grpc\"\n\ttlsutil \"github.com\/argoproj\/argo-cd\/util\/tls\"\n)\n\n\/\/ ArgoCDRepoServer is the repo server implementation\ntype ArgoCDRepoServer struct {\n\tlog *log.Entry\n\tmetricsServer *metrics.MetricsServer\n\tcache *reposervercache.Cache\n\topts []grpc.ServerOption\n\tinitConstants repository.RepoServerInitConstants\n}\n\n\/\/ NewServer returns a new instance of the Argo CD Repo server\nfunc NewServer(metricsServer *metrics.MetricsServer, cache *reposervercache.Cache, tlsConfCustomizer tlsutil.ConfigCustomizer, initConstants repository.RepoServerInitConstants) (*ArgoCDRepoServer, error) {\n\t\/\/ generate TLS cert\n\thosts := []string{\n\t\t\"localhost\",\n\t\t\"argocd-repo-server\",\n\t}\n\tcert, err := tlsutil.GenerateX509KeyPair(tlsutil.CertOptions{\n\t\tHosts: hosts,\n\t\tOrganization: \"Argo CD\",\n\t\tIsCA: true,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttlsConfig := &tls.Config{Certificates: []tls.Certificate{*cert}}\n\ttlsConfCustomizer(tlsConfig)\n\n\tif os.Getenv(common.EnvEnableGRPCTimeHistogramEnv) == \"true\" {\n\t\tgrpc_prometheus.EnableHandlingTimeHistogram()\n\t}\n\n\tserverLog := log.NewEntry(log.StandardLogger())\n\tstreamInterceptors := []grpc.StreamServerInterceptor{grpc_logrus.StreamServerInterceptor(serverLog), grpc_prometheus.StreamServerInterceptor, grpc_util.PanicLoggerStreamServerInterceptor(serverLog)}\n\tunaryInterceptors := []grpc.UnaryServerInterceptor{grpc_logrus.UnaryServerInterceptor(serverLog), grpc_prometheus.UnaryServerInterceptor, grpc_util.PanicLoggerUnaryServerInterceptor(serverLog)}\n\n\treturn &ArgoCDRepoServer{\n\t\tlog: serverLog,\n\t\tmetricsServer: metricsServer,\n\t\tcache: cache,\n\t\tinitConstants: initConstants,\n\t\topts: []grpc.ServerOption{\n\t\t\tgrpc.Creds(credentials.NewTLS(tlsConfig)),\n\t\t\tgrpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptors...)),\n\t\t\tgrpc.StreamInterceptor(grpc_middleware.ChainStreamServer(streamInterceptors...)),\n\t\t\tgrpc.MaxRecvMsgSize(apiclient.MaxGRPCMessageSize),\n\t\t\tgrpc.MaxSendMsgSize(apiclient.MaxGRPCMessageSize),\n\t\t},\n\t}, nil\n}\n\n\/\/ CreateGRPC creates new configured grpc server\nfunc (a *ArgoCDRepoServer) CreateGRPC() *grpc.Server {\n\tserver := grpc.NewServer(a.opts...)\n\tversionpkg.RegisterVersionServiceServer(server, &version.Server{})\n\tmanifestService := repository.NewService(a.metricsServer, a.cache, a.initConstants)\n\tapiclient.RegisterRepoServerServiceServer(server, manifestService)\n\n\thealthService := health.NewServer()\n\tgrpc_health_v1.RegisterHealthServer(server, healthService)\n\n\t\/\/ Register reflection service on gRPC server.\n\treflection.Register(server)\n\n\treturn server\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 The Terraformer Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage datadog\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\tdatadogV2 \"github.com\/DataDog\/datadog-api-client-go\/api\/v2\/datadog\"\n\n\t\"github.com\/GoogleCloudPlatform\/terraformer\/terraformutils\"\n)\n\nvar (\n\t\/\/ UserAllowEmptyValues ...\n\tUserAllowEmptyValues = []string{}\n)\n\n\/\/ UserGenerator ...\ntype UserGenerator struct {\n\tDatadogService\n}\n\nfunc (g *UserGenerator) createResources(users []datadogV2.User) []terraformutils.Resource {\n\tresources := []terraformutils.Resource{}\n\tfor _, user := range users {\n\t\trelations := user.GetRelationships()\n\t\troles := relations.GetRoles()\n\t\t\/\/ If not roles are present, we can assume user was created via V1 API\n\t\tif len(roles.GetData()) == 0 {\n\t\t\tattr := user.GetAttributes()\n\t\t\tresources = append(resources, g.createResource(attr.GetHandle()))\n\t\t\tcontinue\n\t\t}\n\n\t\tresources = append(resources, g.createResource(user.GetId()))\n\t}\n\treturn resources\n}\n\nfunc (g *UserGenerator) createResource(userID string) terraformutils.Resource {\n\treturn terraformutils.NewSimpleResource(\n\t\tuserID,\n\t\tfmt.Sprintf(\"user_%s\", userID),\n\t\t\"datadog_user\",\n\t\t\"datadog\",\n\t\tUserAllowEmptyValues,\n\t)\n}\n\n\/\/ InitResources Generate TerraformResources from Datadog API,\n\/\/ from each user create 1 TerraformResource.\n\/\/ Need User ID as ID for terraform resource\nfunc (g *UserGenerator) InitResources() error {\n\tdatadogClientV2 := g.Args[\"datadogClientV2\"].(*datadogV2.APIClient)\n\tauthV2 := g.Args[\"authV2\"].(context.Context)\n\n\tresp, _, err := datadogClientV2.UsersApi.ListUsers(authV2).PageSize(int64(1000)).Execute()\n\tif err != nil {\n\t\treturn err\n\t}\n\tg.Resources = g.createResources(resp.GetData())\n\treturn nil\n}\n<commit_msg>support paginated responses<commit_after>\/\/ Copyright 2018 The Terraformer Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage datadog\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\tdatadogV2 \"github.com\/DataDog\/datadog-api-client-go\/api\/v2\/datadog\"\n\t\"github.com\/GoogleCloudPlatform\/terraformer\/terraformutils\"\n)\n\nvar (\n\t\/\/ UserAllowEmptyValues ...\n\tUserAllowEmptyValues = []string{}\n)\n\n\/\/ UserGenerator ...\ntype UserGenerator struct {\n\tDatadogService\n}\n\nfunc (g *UserGenerator) createResources(users []datadogV2.User) []terraformutils.Resource {\n\tresources := []terraformutils.Resource{}\n\tfor _, user := range users {\n\t\trelations := user.GetRelationships()\n\t\troles := relations.GetRoles()\n\t\t\/\/ If no roles are present, we can assume user was created via the V1 API\n\t\t\/\/ Hence, import the user via their handle\n\t\tif len(roles.GetData()) == 0 {\n\t\t\tattr := user.GetAttributes()\n\t\t\tresources = append(resources, g.createResource(attr.GetHandle()))\n\t\t\tcontinue\n\t\t}\n\n\t\tresources = append(resources, g.createResource(user.GetId()))\n\t}\n\treturn resources\n}\n\nfunc (g *UserGenerator) createResource(userID string) terraformutils.Resource {\n\treturn terraformutils.NewSimpleResource(\n\t\tuserID,\n\t\tfmt.Sprintf(\"user_%s\", userID),\n\t\t\"datadog_user\",\n\t\t\"datadog\",\n\t\tUserAllowEmptyValues,\n\t)\n}\n\n\/\/ InitResources Generate TerraformResources from Datadog API,\n\/\/ from each user create 1 TerraformResource.\n\/\/ Need User ID as ID for terraform resource\nfunc (g *UserGenerator) InitResources() error {\n\tvar users []datadogV2.User\n\n\tdatadogClientV2 := g.Args[\"datadogClientV2\"].(*datadogV2.APIClient)\n\tauthV2 := g.Args[\"authV2\"].(context.Context)\n\n\tpageSize := int64(1000)\n\tpageNumber := int64(1)\n\tremaining := int64(1)\n\n\tfor remaining > int64(0) {\n\t\tresp, _, err := datadogClientV2.UsersApi.ListUsers(authV2).PageSize(pageSize).PageNumber(pageNumber).Execute()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tusers = append(users, resp.GetData()...)\n\n\t\tremaining = resp.Meta.Page.GetTotalCount() - pageSize*pageNumber\n\t\tpageNumber++\n\t}\n\n\tg.Resources = g.createResources(users)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package wemo\n\n\/*\n Need to set up a subscription service to Wemo events.\n\n 1. Take a discovered device and\n 2. Send a subscribe message to\n deviceIP:devicePort\/upnp\/event\/basicevent1\n 3. If the responce is 200, the subscription is successful and ...\n 4. ... thus it should be added to the subscribed device list\n 5. Subscriptions should be renewed around the timeout period\n 6. When state is emitted record state changes against the subscription id (SID)\n\n*\/\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n \"time\"\n \"strconv\"\n)\n\n\n\/\/ Device and Subscription Info\ntype SubscriptionInfo struct {\n DeviceInfo\n State bool\n Timeout int\n Sid string\n Host string\n}\n\n\/\/ Structure for XML to Parse to\ntype Deviceevent struct {\n\tXMLName xml.Name `xml:\"propertyset\"`\n\tBinaryState string `xml:\"property>BinaryState\"`\n}\n\n\/\/ Structure for sending subscribed event data with\ntype SubscriptionEvent struct {\n\tSid string\n\tState bool\n}\n\n\/\/ Listen for incomming subscribed state changes.\nfunc Listener(listenerAddress string, cs chan SubscriptionEvent) {\n\n\tlog.Println(\"Listening... \", listenerAddress)\n\n\thttp.HandleFunc(\"\/listener\", func(w http.ResponseWriter, r *http.Request) {\n\n\t\teventxml := Deviceevent{}\n\n\t\tif r.Method == \"NOTIFY\" {\n\n\t\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\t\tif err == nil {\n \n\t\t\t\terr := xml.Unmarshal([]byte(body), &eventxml)\n\t\t\t\tif err != nil {\n \n\t\t\t\t\tlog.Println(\"Unmarshal error: \", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n \n b, err := strconv.ParseBool(eventxml.BinaryState)\n if err == nil {\n cs <- SubscriptionEvent {r.Header.Get(\"Sid\"), b}\n }\n \n\t\t\t}\n\t\t}\n\t})\n\n\terr := http.ListenAndServe(listenerAddress, nil)\n if err != nil {\n log.Println(\"From Listen and Serve an Err! \", err)\n }\n}\n\n\/\/ Manage firstly the subscription and then the resubscription of this device.\nfunc (self *Device) ManageSubscription(listenerAddress string, timeout int, subscriptions map[string]*SubscriptionInfo) (string, int){\n \/* Subscribe to the device. Add device to subscriptions list\n \n Once the device has a SID, it should have resubscriptions requested before the timeout.\n \n Should a resubscription fail, an attempt should be made to unsubscribe and \n then subscribe to the device in question. Returning the new SID or an error\n \n The new SID should be updated in the subscription list and the old item removed.\n *\/\n \n \/\/ Initial Subscribe\n info, _ := self.FetchDeviceInfo()\n \n id, err := self.Subscribe(listenerAddress, timeout)\n if err != 200 {\n log.Println(\"Error with initial subscription: \", err)\n return \"\", err\n } else {\n subscriptions[id] = &SubscriptionInfo{*info, false, timeout, id, self.Host} \n }\n \n \/\/ Setup resubscription timer\n timer := time.NewTimer(time.Second * time.Duration(timeout))\n go func() (string, int){\n for _ = range timer.C {\n timer.Reset(time.Second * time.Duration(timeout))\n \n \/\/ Resubscribe\n _, err := self.ReSubscribe(id, timeout)\n if err != 200 {\n \n \/\/ Failed to resubscribe so try unsubscribe, it is likely to fail but don't care.\n self.UnSubscribe(id)\n \n \/\/ Setup a new subscription, if this fails, next attempt will be when timer triggers again\n newId, err := self.Subscribe(listenerAddress, timeout)\n if err != 200 {\n log.Println(\"Error with subscription attempt: \", err)\n } else { \n \/\/ If the subscription is successful. Check if the new SID exists and if not remove it. Then add the new SID\n _, ok := subscriptions[newId]\n if ok == false {\n delete(subscriptions, id)\n }\n subscriptions[newId] = &SubscriptionInfo{*info, false, timeout, newId, self.Host} \n id = newId\n }\n \n }\n }\n return \"\", err\n }()\n \n return id, err\n \n}\n\n\/\/ Subscribe to the device event emitter, return the Subscription ID (sid) and StatusCode\nfunc (self *Device) Subscribe(listenerAddress string, timeout int) (string, int) {\n\n\thost := self.Host\n\n\taddress := fmt.Sprintf(\"http:\/\/%s\/upnp\/event\/basicevent1\", host)\n\n\tif timeout == 0 {\n\t\ttimeout = 300\n\t}\n\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequest(\"SUBSCRIBE\", address, nil)\n\tif err != nil {\n\t\tlog.Println(\"http NewRequest Err: \", err)\n\t}\n\n\treq.Header.Add(\"host\", fmt.Sprintf(\"http:\/\/%s\", host))\n\treq.Header.Add(\"path\", \"\/upnp\/event\/basicevent1\")\n\treq.Header.Add(\"callback\", fmt.Sprintf(\"<http:\/\/%s\/listener>\", listenerAddress))\n\treq.Header.Add(\"nt\", \"upnp:event\")\n\treq.Header.Add(\"timeout\", fmt.Sprintf(\"Second-%d\", timeout))\n \n req.Close = true\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Println(\"Client Request Error: \", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode == 200 {\n log.Println(\"Subscription Successful: \", self.Host, resp.StatusCode)\n\t\treturn resp.Header.Get(\"Sid\"), resp.StatusCode\n\t} else if resp.StatusCode == 400 {\n\t\tlog.Println(\"Subscription Unsuccessful, Incompatible header fields: \", self.Host, resp.StatusCode)\n\t} else if resp.StatusCode == 412 {\n\t\tlog.Println(\"Subscription Unsuccessful, Precondition Failed: \", self.Host, resp.StatusCode)\n\t} else {\n\t\tlog.Println(\"Subscription Unsuccessful, Unable to accept renewal: \", self.Host, resp.StatusCode)\n\t}\n\n\treturn \"\", resp.StatusCode\n\n}\n\n\/\/ According to the spec all subscribers must unsubscribe when the publisher is no longer required to provide state updates. Return the StatusCode\nfunc (self *Device) UnSubscribe(sid string) int {\n\n\thost := self.Host\n\n\taddress := fmt.Sprintf(\"http:\/\/%s\/upnp\/event\/basicevent1\", host)\n\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequest(\"UNSUBSCRIBE\", address, nil)\n\tif err != nil {\n\t\tlog.Println(\"http NewRequest Err: \", err)\n\t}\n\n\treq.Header.Add(\"host\", fmt.Sprintf(\"http:\/\/%s\", host))\n\treq.Header.Add(\"SID\", sid)\n \n req.Close = true\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Println(\"Client Request Error: \", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode == 200 {\n log.Println(\"Unsubscription Successful: \", self.Host, resp.StatusCode)\n\t} else if resp.StatusCode == 400 {\n\t\tlog.Println(\"Unsubscription Unsuccessful, Incompatible header fields: \", self.Host, resp.StatusCode)\n\t} else if resp.StatusCode == 412 {\n\t\tlog.Println(\"Unsubscription Unsuccessful, Precondition Failed: \", self.Host, resp.StatusCode)\n\t} else {\n\t\tlog.Println(\"Unsubscription Unsuccessful, Unable to accept renewal: \", self.Host, resp.StatusCode)\n\t}\n\n\treturn resp.StatusCode\n\n}\n\n\/\/ The subscription to the device must be renewed before the timeout. Return the Subscription ID (sid) and StatusCode\nfunc (self *Device) ReSubscribe(sid string, timeout int) (string, int) {\n \n\thost := self.Host\n\n\taddress := fmt.Sprintf(\"http:\/\/%s\/upnp\/event\/basicevent1\", host)\n\n\tif timeout == 0 {\n\t\ttimeout = 300\n\t}\n\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequest(\"SUBSCRIBE\", address, nil)\n\tif err != nil {\n\t\tlog.Println(\"http NewRequest Err: \", err)\n\t}\n\n\treq.Header.Add(\"host\", fmt.Sprintf(\"http:\/\/%s\", host))\n\treq.Header.Add(\"SID\", sid)\n\treq.Header.Add(\"timeout\", fmt.Sprintf(\"Second-%d\", timeout))\n\n req.Close = true\n \n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Println(\"Client Request Error: \", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode == 200 {\n \/\/log.Println(\"Resubscription Successful: \", resp.StatusCode)\n\t\treturn resp.Header.Get(\"Sid\"), resp.StatusCode\n\t} else if resp.StatusCode == 400 {\n\t\tlog.Println(\"Resubscription Unsuccessful, Incompatible header fields: \", self.Host, resp.StatusCode)\n\t} else if resp.StatusCode == 412 {\n\t\tlog.Println(\"Resubscription Unsuccessful, Precondition Failed: \", self.Host, resp.StatusCode)\n\t} else {\n\t\tlog.Println(\"Resubscription Unsuccessful, Unable to accept renewal: \", self.Host, resp.StatusCode)\n\t}\n\n\treturn \"\", resp.StatusCode\n\n}\n<commit_msg>Fixed a couple of bits and tidied up<commit_after>package wemo\n\n\/*\n Need to set up a subscription service to Wemo events.\n\n 1. Take a discovered device and\n 2. Send a subscribe message to\n deviceIP:devicePort\/upnp\/event\/basicevent1\n 3. If the responce is 200, the subscription is successful and ...\n 4. ... thus it should be added to the subscribed device list\n 5. Subscriptions should be renewed around the timeout period\n 6. When state is emitted record state changes against the subscription id (SID)\n\n*\/\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"time\"\n)\n\n\/\/SubscriptionInfo struct\ntype SubscriptionInfo struct {\n\tDeviceInfo\n\tState bool\n\tTimeout int\n\tSid string\n\tHost string\n}\n\n\/\/Deviceevent Structure for XML to Parse to\ntype Deviceevent struct {\n\tXMLName xml.Name `xml:\"propertyset\"`\n\tBinaryState string `xml:\"property>BinaryState\"`\n}\n\n\/\/SubscriptionEvent Structure for sending subscribed event data with\ntype SubscriptionEvent struct {\n\tSid string\n\tState bool\n}\n\n\/\/Listener Listen for incomming subscribed state changes.\nfunc Listener(listenerAddress string, cs chan SubscriptionEvent) {\n\n\tlog.Println(\"Listening... \", listenerAddress)\n\n\thttp.HandleFunc(\"\/listener\", func(w http.ResponseWriter, r *http.Request) {\n\n\t\teventxml := Deviceevent{}\n\n\t\tif r.Method == \"NOTIFY\" {\n\n\t\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\t\tif err == nil {\n\n\t\t\t\terr := xml.Unmarshal([]byte(body), &eventxml)\n\t\t\t\tif err != nil {\n\n\t\t\t\t\tlog.Println(\"Unmarshal error: \", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tb, err := strconv.ParseBool(eventxml.BinaryState)\n\t\t\t\tif err == nil {\n\t\t\t\t\tcs <- SubscriptionEvent{r.Header.Get(\"Sid\"), b}\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t})\n\n\terr := http.ListenAndServe(listenerAddress, nil)\n\tif err != nil {\n\t\tlog.Println(\"From Listen and Serve an Err! \", err)\n\t}\n}\n\n\/\/ManageSubscription Manage firstly the subscription and then the resubscription of this device.\nfunc (d *Device) ManageSubscription(listenerAddress string, timeout int, subscriptions map[string]*SubscriptionInfo) (string, int) {\n\t\/* Subscribe to the device. Add device to subscriptions list\n\n\t Once the device has a SID, it should have resubscriptions requested before the timeout.\n\n\t Should a resubscription fail, an attempt should be made to unsubscribe and\n\t then subscribe to the device in question. Returning the new SID or an error\n\n\t The new SID should be updated in the subscription list and the old item removed.\n\t*\/\n\n\t\/\/ Initial Subscribe\n\tinfo, _ := d.FetchDeviceInfo()\n\n\tid, err := d.Subscribe(listenerAddress, timeout)\n\tif err != 200 {\n\t\tlog.Println(\"Error with initial subscription: \", err)\n\t\treturn \"\", err\n\t}\n\tsubscriptions[id] = &SubscriptionInfo{*info, false, timeout, id, d.Host}\n\n\t\/\/ Setup resubscription timer\n\ttimer := time.NewTimer(time.Second * time.Duration(timeout))\n\tgo func() (string, int) {\n\t\tfor _ = range timer.C {\n\t\t\ttimer.Reset(time.Second * time.Duration(timeout))\n\n\t\t\t\/\/ Resubscribe\n\t\t\t_, err := d.ReSubscribe(id, timeout)\n\t\t\tif err != 200 {\n\n\t\t\t\t\/\/ Failed to resubscribe so try unsubscribe, it is likely to fail but don't care.\n\t\t\t\td.UnSubscribe(id)\n\n\t\t\t\t\/\/ Setup a new subscription, if this fails, next attempt will be when timer triggers again\n\t\t\t\tnewID, err := d.Subscribe(listenerAddress, timeout)\n\t\t\t\tif err != 200 {\n\t\t\t\t\tlog.Println(\"Error with subscription attempt: \", err)\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ If the subscription is successful. Check if the new SID exists and if not remove it. Then add the new SID\n\t\t\t\t\t_, ok := subscriptions[newID]\n\t\t\t\t\tif ok == false {\n\t\t\t\t\t\tdelete(subscriptions, id)\n\t\t\t\t\t}\n\t\t\t\t\tsubscriptions[newID] = &SubscriptionInfo{*info, false, timeout, newID, d.Host}\n\t\t\t\t\tid = newID\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t\treturn \"\", err\n\t}()\n\n\treturn id, err\n\n}\n\n\/\/Subscribe to the device event emitter, return the Subscription ID (sid) and StatusCode\nfunc (d *Device) Subscribe(listenerAddress string, timeout int) (string, int) {\n\n\taddress := fmt.Sprintf(\"http:\/\/%s\/upnp\/event\/basicevent1\", d.Host)\n\n\tif timeout == 0 {\n\t\ttimeout = 300\n\t}\n\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequest(\"SUBSCRIBE\", address, nil)\n\tif err != nil {\n\t\tlog.Println(\"http NewRequest Err: \", err)\n\t}\n\n\treq.Header.Add(\"host\", fmt.Sprintf(\"http:\/\/%s\", d.Host))\n\treq.Header.Add(\"path\", \"\/upnp\/event\/basicevent1\")\n\treq.Header.Add(\"callback\", fmt.Sprintf(\"<http:\/\/%s\/listener>\", listenerAddress))\n\treq.Header.Add(\"nt\", \"upnp:event\")\n\treq.Header.Add(\"timeout\", fmt.Sprintf(\"Second-%d\", timeout))\n\n\treq.Close = true\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Println(\"Client Request Error: \", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode == 200 {\n\t\tlog.Println(\"Subscription Successful: \", d.Host, resp.StatusCode)\n\t\treturn resp.Header.Get(\"Sid\"), resp.StatusCode\n\t} else if resp.StatusCode == 400 {\n\t\tlog.Println(\"Subscription Unsuccessful, Incompatible header fields: \", d.Host, resp.StatusCode)\n\t} else if resp.StatusCode == 412 {\n\t\tlog.Println(\"Subscription Unsuccessful, Precondition Failed: \", d.Host, resp.StatusCode)\n\t} else {\n\t\tlog.Println(\"Subscription Unsuccessful, Unable to accept renewal: \", d.Host, resp.StatusCode)\n\t}\n\n\treturn \"\", resp.StatusCode\n\n}\n\n\/\/UnSubscribe According to the spec all subscribers must unsubscribe when the publisher is no longer required to provide state updates. Return the StatusCode\nfunc (d *Device) UnSubscribe(sid string) int {\n\n\taddress := fmt.Sprintf(\"http:\/\/%s\/upnp\/event\/basicevent1\", d.Host)\n\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequest(\"UNSUBSCRIBE\", address, nil)\n\tif err != nil {\n\t\tlog.Println(\"http NewRequest Err: \", err)\n\t\treturn 0\n\t}\n\n\treq.Header.Add(\"host\", fmt.Sprintf(\"http:\/\/%s\", d.Host))\n\treq.Header.Add(\"SID\", sid)\n\n\treq.Close = true\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Println(\"Client Request Error: \", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode == 200 {\n\t\tlog.Println(\"Unsubscription Successful: \", d.Host, resp.StatusCode)\n\t} else if resp.StatusCode == 400 {\n\t\tlog.Println(\"Unsubscription Unsuccessful, Incompatible header fields: \", d.Host, resp.StatusCode)\n\t} else if resp.StatusCode == 412 {\n\t\tlog.Println(\"Unsubscription Unsuccessful, Precondition Failed: \", d.Host, resp.StatusCode)\n\t} else {\n\t\tlog.Println(\"Unsubscription Unsuccessful, Unable to accept renewal: \", d.Host, resp.StatusCode)\n\t}\n\n\treturn resp.StatusCode\n\n}\n\n\/\/ReSubscribe The subscription to the device must be renewed before the timeout. Return the Subscription ID (sid) and StatusCode\nfunc (d *Device) ReSubscribe(sid string, timeout int) (string, int) {\n\n\taddress := fmt.Sprintf(\"http:\/\/%s\/upnp\/event\/basicevent1\", d.Host)\n\n\tif timeout == 0 {\n\t\ttimeout = 300\n\t}\n\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequest(\"SUBSCRIBE\", address, nil)\n\tif err != nil {\n\t\tlog.Println(\"http NewRequest Err: \", err)\n\t\treturn \"\", 0\n\t}\n\n\treq.Header.Add(\"host\", fmt.Sprintf(\"http:\/\/%s\", d.Host))\n\treq.Header.Add(\"SID\", sid)\n\treq.Header.Add(\"timeout\", fmt.Sprintf(\"Second-%d\", timeout))\n\n\treq.Close = true\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Println(\"Client Request Error: \", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode == 200 {\n\t\t\/\/log.Println(\"Resubscription Successful: \", resp.StatusCode)\n\t\treturn resp.Header.Get(\"Sid\"), resp.StatusCode\n\t} else if resp.StatusCode == 400 {\n\t\tlog.Println(\"Resubscription Unsuccessful, Incompatible header fields: \", d.Host, resp.StatusCode)\n\t} else if resp.StatusCode == 412 {\n\t\tlog.Println(\"Resubscription Unsuccessful, Precondition Failed: \", d.Host, resp.StatusCode)\n\t} else {\n\t\tlog.Println(\"Resubscription Unsuccessful, Unable to accept renewal: \", d.Host, resp.StatusCode)\n\t}\n\n\treturn \"\", resp.StatusCode\n\n}\n<|endoftext|>"} {"text":"<commit_before>package jet\n\nimport (\n\t\"database\/sql\"\n)\n\ntype query struct {\n\tdb *Db\n\tqo queryObject\n\tid string\n\tquery string\n\targs []interface{}\n\tlastErr error\n\tstmt *sql.Stmt\n}\n\n\/\/ newQuery initiates a new query for the provided query object (either *sql.Tx or *sql.DB)\nfunc newQuery(qo queryObject, db *Db) *query {\n\treturn &query{\n\t\tqo: qo,\n\t\tdb: db,\n\t\tid: newQueryId(),\n\t}\n}\n\nfunc (q *query) Run() error {\n\treturn q.Rows(nil)\n}\n\nfunc (q *query) Rows(v interface{}, maxRows ...int64) error {\n\t\/\/ Always clear the error and close the statement - if it's not handled\n\t\/\/ by the LRU - after we are done with Rows.\n\tdefer func() {\n\t\tq.lastErr = nil\n\t\tif q.db.LRUCache == nil {\n\t\t\tq.stmt.Close()\n\t\t}\n\t}()\n\n\t\/\/ Since Query doesn't return the error directly we do it here\n\tif q.lastErr != nil {\n\t\treturn q.lastErr\n\t}\n\n\t\/\/ Determine max rows\n\tvar max int64 = -1\n\tif len(maxRows) > 0 {\n\t\tmax = maxRows[0]\n\t}\n\tvar (\n\t\trows *sql.Rows\n\t\terr error\n\t)\n\tif q.db.LogFunc != nil {\n\t\tq.db.LogFunc(q.id, q.query, q.args...)\n\t}\n\tif v == nil {\n\t\t_, err = q.stmt.Exec(q.encodedArgs()...)\n\t\treturn q.onErr(err)\n\n\t} else {\n\t\trows, err = q.stmt.Query(q.encodedArgs()...)\n\t\tif err != nil {\n\t\t\treturn q.onErr(err)\n\t\t}\n\t\tdefer rows.Close()\n\t}\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn q.onErr(err)\n\t}\n\tvar i int64 = 0\n\tmppr := &mapper{\n\t\tconv: q.db.ColumnConverter,\n\t}\n\tfor {\n\t\t\/\/ Check if max rows has been reached\n\t\tif max >= 0 && i >= max {\n\t\t\tbreak\n\t\t}\n\t\t\/\/ Break if no more rows\n\t\tif !rows.Next() {\n\t\t\tbreak\n\t\t}\n\t\t\/\/ Scan values into containers\n\t\tcont := make([]interface{}, 0, len(cols))\n\t\tfor i := 0; i < cap(cont); i++ {\n\t\t\tcont = append(cont, new(interface{}))\n\t\t}\n\t\terr := rows.Scan(cont...)\n\t\tif err != nil {\n\t\t\treturn q.onErr(err)\n\t\t}\n\n\t\t\/\/ Map values\n\t\terr = mppr.unpack(cols, cont, v)\n\t\tif err != nil {\n\t\t\treturn q.onErr(err)\n\t\t}\n\t\ti++\n\t}\n\treturn nil\n}\n\nfunc (q *query) encodedArgs() []interface{} {\n\tenc := make([]interface{}, 0, len(q.args))\n\tfor _, a := range q.args {\n\t\tv, ok := a.(ComplexValue)\n\t\tif ok {\n\t\t\tenc = append(enc, v.Encode())\n\t\t} else {\n\t\t\tenc = append(enc, a)\n\t\t}\n\t}\n\treturn enc\n}\n\nfunc (q *query) prepare(query string, args ...interface{}) Runnable {\n\tq2, a2 := substituteMapAndArrayMarks(query, args...)\n\tq.query = q2\n\tq.args = a2\n\n\tvar stmt *sql.Stmt\n\tvar lkey string\n\tif q.db.LRUCache != nil {\n\t\tlkey = q.id + q.query\n\t\tstmt = q.db.LRUCache.get(lkey)\n\t}\n\tif stmt == nil {\n\t\tvar err error\n\t\tstmt, err = q.qo.Prepare(q.query)\n\t\tif err != nil {\n\t\t\tq.onErr(err)\n\t\t\treturn q\n\t\t}\n\t\tif q.db.LRUCache != nil {\n\t\t\tq.db.LRUCache.set(lkey, stmt)\n\t\t}\n\t}\n\tq.stmt = stmt\n\n\treturn q\n}\n\nfunc (q *query) onErr(err error) error {\n\tif err != nil {\n\t\tq.lastErr = err\n\t\tif q.db.LRUCache != nil {\n\t\t\tq.db.LRUCache.reset()\n\t\t}\n\t}\n\treturn err\n}\n<commit_msg>Prevent panic when aptempting closing statement if statement is nill<commit_after>package jet\n\nimport (\n\t\"database\/sql\"\n)\n\ntype query struct {\n\tdb *Db\n\tqo queryObject\n\tid string\n\tquery string\n\targs []interface{}\n\tlastErr error\n\tstmt *sql.Stmt\n}\n\n\/\/ newQuery initiates a new query for the provided query object (either *sql.Tx or *sql.DB)\nfunc newQuery(qo queryObject, db *Db) *query {\n\treturn &query{\n\t\tqo: qo,\n\t\tdb: db,\n\t\tid: newQueryId(),\n\t}\n}\n\nfunc (q *query) Run() error {\n\treturn q.Rows(nil)\n}\n\nfunc (q *query) Rows(v interface{}, maxRows ...int64) error {\n\t\/\/ Always clear the error and close the statement - if it's not handled\n\t\/\/ by the LRU - after we are done with Rows.\n\tdefer func() {\n\t\tq.lastErr = nil\n\t\tif q.db.LRUCache == nil && q.stmt != nil {\n\t\t\tq.stmt.Close()\n\t\t}\n\t}()\n\n\t\/\/ Since Query doesn't return the error directly we do it here\n\tif q.lastErr != nil {\n\t\treturn q.lastErr\n\t}\n\n\t\/\/ Determine max rows\n\tvar max int64 = -1\n\tif len(maxRows) > 0 {\n\t\tmax = maxRows[0]\n\t}\n\tvar (\n\t\trows *sql.Rows\n\t\terr error\n\t)\n\tif q.db.LogFunc != nil {\n\t\tq.db.LogFunc(q.id, q.query, q.args...)\n\t}\n\tif v == nil {\n\t\t_, err = q.stmt.Exec(q.encodedArgs()...)\n\t\treturn q.onErr(err)\n\n\t} else {\n\t\trows, err = q.stmt.Query(q.encodedArgs()...)\n\t\tif err != nil {\n\t\t\treturn q.onErr(err)\n\t\t}\n\t\tdefer rows.Close()\n\t}\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn q.onErr(err)\n\t}\n\tvar i int64 = 0\n\tmppr := &mapper{\n\t\tconv: q.db.ColumnConverter,\n\t}\n\tfor {\n\t\t\/\/ Check if max rows has been reached\n\t\tif max >= 0 && i >= max {\n\t\t\tbreak\n\t\t}\n\t\t\/\/ Break if no more rows\n\t\tif !rows.Next() {\n\t\t\tbreak\n\t\t}\n\t\t\/\/ Scan values into containers\n\t\tcont := make([]interface{}, 0, len(cols))\n\t\tfor i := 0; i < cap(cont); i++ {\n\t\t\tcont = append(cont, new(interface{}))\n\t\t}\n\t\terr := rows.Scan(cont...)\n\t\tif err != nil {\n\t\t\treturn q.onErr(err)\n\t\t}\n\n\t\t\/\/ Map values\n\t\terr = mppr.unpack(cols, cont, v)\n\t\tif err != nil {\n\t\t\treturn q.onErr(err)\n\t\t}\n\t\ti++\n\t}\n\treturn nil\n}\n\nfunc (q *query) encodedArgs() []interface{} {\n\tenc := make([]interface{}, 0, len(q.args))\n\tfor _, a := range q.args {\n\t\tv, ok := a.(ComplexValue)\n\t\tif ok {\n\t\t\tenc = append(enc, v.Encode())\n\t\t} else {\n\t\t\tenc = append(enc, a)\n\t\t}\n\t}\n\treturn enc\n}\n\nfunc (q *query) prepare(query string, args ...interface{}) Runnable {\n\tq2, a2 := substituteMapAndArrayMarks(query, args...)\n\tq.query = q2\n\tq.args = a2\n\n\tvar stmt *sql.Stmt\n\tvar lkey string\n\tif q.db.LRUCache != nil {\n\t\tlkey = q.id + q.query\n\t\tstmt = q.db.LRUCache.get(lkey)\n\t}\n\tif stmt == nil {\n\t\tvar err error\n\t\tstmt, err = q.qo.Prepare(q.query)\n\t\tif err != nil {\n\t\t\tq.onErr(err)\n\t\t\treturn q\n\t\t}\n\t\tif q.db.LRUCache != nil {\n\t\t\tq.db.LRUCache.set(lkey, stmt)\n\t\t}\n\t}\n\tq.stmt = stmt\n\n\treturn q\n}\n\nfunc (q *query) onErr(err error) error {\n\tif err != nil {\n\t\tq.lastErr = err\n\t\tif q.db.LRUCache != nil {\n\t\t\tq.db.LRUCache.reset()\n\t\t}\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package gorethink\n\nimport (\n\t\"code.google.com\/p\/goprotobuf\/proto\"\n\t\"fmt\"\n\tp \"github.com\/dancannon\/gorethink\/ql2\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype termsList []RqlTerm\ntype termsObj map[string]RqlTerm\ntype RqlTerm struct {\n\tname string\n\ttermType p.Term_TermType\n\tdata interface{}\n\targs []RqlTerm\n\toptArgs map[string]RqlTerm\n}\n\n\/\/ build takes the query tree and turns it into a protobuf term tree.\nfunc (t RqlTerm) build() *p.Term {\n\tswitch t.termType {\n\tcase p.Term_DATUM:\n\t\tdatum, err := constructDatum(t)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn datum\n\tdefault:\n\t\targs := []*p.Term{}\n\t\toptArgs := []*p.Term_AssocPair{}\n\t\tterm := &p.Term{\n\t\t\tType: t.termType.Enum(),\n\t\t}\n\n\t\tfor _, v := range t.args {\n\t\t\targs = append(args, v.build())\n\t\t}\n\n\t\tfor k, v := range t.optArgs {\n\t\t\toptArgs = append(optArgs, &p.Term_AssocPair{\n\t\t\t\tKey: proto.String(k),\n\t\t\t\tVal: v.build(),\n\t\t\t})\n\t\t}\n\n\t\tterm.Args = args\n\t\tterm.Optargs = optArgs\n\n\t\treturn term\n\t}\n}\n\n\/\/ compose returns a string representation of the query tree\nfunc (t RqlTerm) String() string {\n\tswitch t.termType {\n\tcase p.Term_MAKE_ARRAY:\n\t\treturn fmt.Sprintf(\"[%s]\", strings.Join(argsToStringSlice(t.args), \", \"))\n\tcase p.Term_MAKE_OBJ:\n\t\treturn fmt.Sprintf(\"{%s}\", strings.Join(optArgsToStringSlice(t.optArgs), \", \"))\n\tcase p.Term_FUNC:\n\t\t\/\/ Get string representation of each argument\n\t\targs := []string{}\n\t\tfor _, v := range t.args[0].args {\n\t\t\targs = append(args, fmt.Sprintf(\"var_%d\", v.data))\n\t\t}\n\n\t\treturn fmt.Sprintf(\"func(%s r.RqlTerm) r.RqlTerm { return %s }\",\n\t\t\tstrings.Join(args, \", \"),\n\t\t\tt.args[1].String(),\n\t\t)\n\tcase p.Term_VAR:\n\t\treturn fmt.Sprintf(\"var_%s\", t.args[0])\n\tcase p.Term_IMPLICIT_VAR:\n\t\treturn \"r.Row\"\n\tcase p.Term_DATUM:\n\t\tswitch v := t.data.(type) {\n\t\tcase string:\n\t\t\treturn strconv.Quote(v)\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"%v\", v)\n\t\t}\n\n\tdefault:\n\t\tif t.name != \"\" {\n\t\t\treturn fmt.Sprintf(\"r.%s(%s)\", t.name, strings.Join(allArgsToStringSlice(t.args, t.optArgs), \", \"))\n\t\t} else {\n\t\t\treturn fmt.Sprintf(\"(%s)\", strings.Join(allArgsToStringSlice(t.args, t.optArgs), \", \"))\n\t\t}\n\t}\n}\n\ntype WriteResponse struct {\n\tErrors int\n\tCreated int\n\tInserted int\n\tUpdated int\n\tUnchanged int\n\tReplaced int\n\tDeleted int\n\tCreated int\n\tGeneratedKeys []string `gorethink:\"generated_keys\"`\n\tFirstError string `gorethink:\"first_error\"` \/\/ populated if Errors > 0\n\tNewValue interface{} `gorethink:\"new_val\"`\n\tOldValue interface{} `gorethink:\"old_val\"`\n}\n\n\/\/ Run runs a query using the given connection.\n\/\/\n\/\/ Optional arguments :\n\/\/ \"db\", \"use_outdated\" (defaults to false), \"noreply\" (defaults to false) and \"time_format\".\n\/\/\n\/\/\trows, err := query.Run(sess)\n\/\/\tif err != nil {\n\/\/\t\t\/\/ error\n\/\/\t}\n\/\/\tfor rows.Next() {\n\/\/\t\tdoc := MyDocumentType{}\n\/\/\t\terr := r.Scan(&doc)\n\/\/\t\t \/\/ Do something with row\n\/\/\t}\nfunc (t RqlTerm) Run(s *Session, args ...interface{}) (*ResultRows, error) {\n\targm := optArgsToMap([]string{\"db\", \"use_outdated\", \"noreply\", \"time_format\"}, args)\n\treturn s.startQuery(t, argm)\n}\n\n\/\/ Run runs a query using the given connection but unlike Run returns ResultRow.\n\/\/ This function should be used if your query only returns a single row.\n\/\/\n\/\/ Optional arguments :\n\/\/ \"db\", \"use_outdated\" (defaults to false), \"noreply\" (defaults to false) and \"time_format\".\n\/\/\n\/\/\trow, err := query.RunRow(sess, \"use_outdated\", true)\n\/\/\tif err != nil {\n\/\/\t\t\/\/ error\n\/\/\t}\n\/\/\tif row.IsNil() {\n\/\/\t\t\/\/ nothing was found\n\/\/\t}\n\/\/\terr = row.Scan(&doc)\nfunc (t RqlTerm) RunRow(s *Session, args ...interface{}) (*ResultRow, error) {\n\trows, err := t.Run(s, args...)\n\tif err == nil {\n\t\tdefer rows.Close()\n\t\tif !rows.Next() {\n\t\t\terr = RqlDriverError{\"No rows in the result set\"}\n\t\t}\n\t}\n\treturn &ResultRow{rows: rows, err: err}, err\n}\n\n\/\/ RunWrite runs a query using the given connection but unlike Run automatically\n\/\/ scans the result into a variable of type WriteResponse. This function should be used\n\/\/ if you are running a write query (such as Insert, Update, TableCreate, etc...)\n\/\/\n\/\/ Optional arguments :\n\/\/ \"db\", \"use_outdated\" (defaults to false), \"noreply\" (defaults to false) and \"time_format\".\n\/\/\n\/\/\tres, err := r.Db(\"database\").Table(\"table\").Insert(doc).RunWrite(sess, \"noreply\", true)\nfunc (t RqlTerm) RunWrite(s *Session, args ...interface{}) (WriteResponse, error) {\n\tvar response WriteResponse\n\trow, err := t.RunRow(s, args...)\n\tif err == nil {\n\t\terr = row.Scan(&response)\n\t}\n\treturn response, err\n}\n\n\/\/ Exec runs the query but does not return the result (It also automatically sets\n\/\/ the noreply option).\n\/\/\n\/\/ Optional arguments :\n\/\/ \"db\", \"use_outdated\" (defaults to false) and \"time_format\".\nfunc (t RqlTerm) Exec(s *Session, args ...interface{}) error {\n\t\/\/ Ensure that noreply is set to true\n\targs = append(args, \"noreply\")\n\targs = append(args, true)\n\n\t_, err := t.Run(s, args...)\n\treturn err\n}\n<commit_msg>Removed duplicated field<commit_after>package gorethink\n\nimport (\n\t\"code.google.com\/p\/goprotobuf\/proto\"\n\t\"fmt\"\n\tp \"github.com\/dancannon\/gorethink\/ql2\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype termsList []RqlTerm\ntype termsObj map[string]RqlTerm\ntype RqlTerm struct {\n\tname string\n\ttermType p.Term_TermType\n\tdata interface{}\n\targs []RqlTerm\n\toptArgs map[string]RqlTerm\n}\n\n\/\/ build takes the query tree and turns it into a protobuf term tree.\nfunc (t RqlTerm) build() *p.Term {\n\tswitch t.termType {\n\tcase p.Term_DATUM:\n\t\tdatum, err := constructDatum(t)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn datum\n\tdefault:\n\t\targs := []*p.Term{}\n\t\toptArgs := []*p.Term_AssocPair{}\n\t\tterm := &p.Term{\n\t\t\tType: t.termType.Enum(),\n\t\t}\n\n\t\tfor _, v := range t.args {\n\t\t\targs = append(args, v.build())\n\t\t}\n\n\t\tfor k, v := range t.optArgs {\n\t\t\toptArgs = append(optArgs, &p.Term_AssocPair{\n\t\t\t\tKey: proto.String(k),\n\t\t\t\tVal: v.build(),\n\t\t\t})\n\t\t}\n\n\t\tterm.Args = args\n\t\tterm.Optargs = optArgs\n\n\t\treturn term\n\t}\n}\n\n\/\/ compose returns a string representation of the query tree\nfunc (t RqlTerm) String() string {\n\tswitch t.termType {\n\tcase p.Term_MAKE_ARRAY:\n\t\treturn fmt.Sprintf(\"[%s]\", strings.Join(argsToStringSlice(t.args), \", \"))\n\tcase p.Term_MAKE_OBJ:\n\t\treturn fmt.Sprintf(\"{%s}\", strings.Join(optArgsToStringSlice(t.optArgs), \", \"))\n\tcase p.Term_FUNC:\n\t\t\/\/ Get string representation of each argument\n\t\targs := []string{}\n\t\tfor _, v := range t.args[0].args {\n\t\t\targs = append(args, fmt.Sprintf(\"var_%d\", v.data))\n\t\t}\n\n\t\treturn fmt.Sprintf(\"func(%s r.RqlTerm) r.RqlTerm { return %s }\",\n\t\t\tstrings.Join(args, \", \"),\n\t\t\tt.args[1].String(),\n\t\t)\n\tcase p.Term_VAR:\n\t\treturn fmt.Sprintf(\"var_%s\", t.args[0])\n\tcase p.Term_IMPLICIT_VAR:\n\t\treturn \"r.Row\"\n\tcase p.Term_DATUM:\n\t\tswitch v := t.data.(type) {\n\t\tcase string:\n\t\t\treturn strconv.Quote(v)\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"%v\", v)\n\t\t}\n\n\tdefault:\n\t\tif t.name != \"\" {\n\t\t\treturn fmt.Sprintf(\"r.%s(%s)\", t.name, strings.Join(allArgsToStringSlice(t.args, t.optArgs), \", \"))\n\t\t} else {\n\t\t\treturn fmt.Sprintf(\"(%s)\", strings.Join(allArgsToStringSlice(t.args, t.optArgs), \", \"))\n\t\t}\n\t}\n}\n\ntype WriteResponse struct {\n\tErrors int\n\tCreated int\n\tInserted int\n\tUpdated int\n\tUnchanged int\n\tReplaced int\n\tDeleted int\n\tGeneratedKeys []string `gorethink:\"generated_keys\"`\n\tFirstError string `gorethink:\"first_error\"` \/\/ populated if Errors > 0\n\tNewValue interface{} `gorethink:\"new_val\"`\n\tOldValue interface{} `gorethink:\"old_val\"`\n}\n\n\/\/ Run runs a query using the given connection.\n\/\/\n\/\/ Optional arguments :\n\/\/ \"db\", \"use_outdated\" (defaults to false), \"noreply\" (defaults to false) and \"time_format\".\n\/\/\n\/\/\trows, err := query.Run(sess)\n\/\/\tif err != nil {\n\/\/\t\t\/\/ error\n\/\/\t}\n\/\/\tfor rows.Next() {\n\/\/\t\tdoc := MyDocumentType{}\n\/\/\t\terr := r.Scan(&doc)\n\/\/\t\t \/\/ Do something with row\n\/\/\t}\nfunc (t RqlTerm) Run(s *Session, args ...interface{}) (*ResultRows, error) {\n\targm := optArgsToMap([]string{\"db\", \"use_outdated\", \"noreply\", \"time_format\"}, args)\n\treturn s.startQuery(t, argm)\n}\n\n\/\/ Run runs a query using the given connection but unlike Run returns ResultRow.\n\/\/ This function should be used if your query only returns a single row.\n\/\/\n\/\/ Optional arguments :\n\/\/ \"db\", \"use_outdated\" (defaults to false), \"noreply\" (defaults to false) and \"time_format\".\n\/\/\n\/\/\trow, err := query.RunRow(sess, \"use_outdated\", true)\n\/\/\tif err != nil {\n\/\/\t\t\/\/ error\n\/\/\t}\n\/\/\tif row.IsNil() {\n\/\/\t\t\/\/ nothing was found\n\/\/\t}\n\/\/\terr = row.Scan(&doc)\nfunc (t RqlTerm) RunRow(s *Session, args ...interface{}) (*ResultRow, error) {\n\trows, err := t.Run(s, args...)\n\tif err == nil {\n\t\tdefer rows.Close()\n\t\tif !rows.Next() {\n\t\t\terr = RqlDriverError{\"No rows in the result set\"}\n\t\t}\n\t}\n\treturn &ResultRow{rows: rows, err: err}, err\n}\n\n\/\/ RunWrite runs a query using the given connection but unlike Run automatically\n\/\/ scans the result into a variable of type WriteResponse. This function should be used\n\/\/ if you are running a write query (such as Insert, Update, TableCreate, etc...)\n\/\/\n\/\/ Optional arguments :\n\/\/ \"db\", \"use_outdated\" (defaults to false), \"noreply\" (defaults to false) and \"time_format\".\n\/\/\n\/\/\tres, err := r.Db(\"database\").Table(\"table\").Insert(doc).RunWrite(sess, \"noreply\", true)\nfunc (t RqlTerm) RunWrite(s *Session, args ...interface{}) (WriteResponse, error) {\n\tvar response WriteResponse\n\trow, err := t.RunRow(s, args...)\n\tif err == nil {\n\t\terr = row.Scan(&response)\n\t}\n\treturn response, err\n}\n\n\/\/ Exec runs the query but does not return the result (It also automatically sets\n\/\/ the noreply option).\n\/\/\n\/\/ Optional arguments :\n\/\/ \"db\", \"use_outdated\" (defaults to false) and \"time_format\".\nfunc (t RqlTerm) Exec(s *Session, args ...interface{}) error {\n\t\/\/ Ensure that noreply is set to true\n\targs = append(args, \"noreply\")\n\targs = append(args, true)\n\n\t_, err := t.Run(s, args...)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package gojx\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n)\n\n\/\/ Query provides query engine for selection by condition.\ntype Query struct {\n\ts *Storage\n\n\teq map[string]interface{}\n\torder string\n\tlimit [2]int\n}\n\n\/\/ Eq add eq condition in index field.\nfunc (q *Query) Eq(k string, v interface{}) *Query {\n\tq.eq[k] = v\n\treturn q\n}\n\n\/\/ Limit set returning slice size.\nfunc (q *Query) Limit(i int) *Query {\n\tq.limit = [2]int{0, i}\n\treturn q\n}\n\n\/\/ Page set returning slice size as pagination.\nfunc (q *Query) Pager(page, size int) *Query {\n\tif page < 1 {\n\t\tpage = 1\n\t}\n\tif size < 1 {\n\t\tsize = 1\n\t}\n\tq.limit = [2]int{(page - 1) * size, page * size}\n\treturn q\n}\n\n\/\/ Order set result slice order by pk int.\nfunc (q *Query) Order(order string) *Query {\n\tq.order = strings.ToUpper(order)\n\treturn q\n}\n\n\/\/ ToSlice query storage by conditions and assign results to slice.\n\/\/ Slice need a *[]*Struct ( a reference of slice of struct pointer.\nfunc (q *Query) ToSlice(v interface{}) (e error) {\n\tsliceRt, sliceRv := reflect.TypeOf(v), reflect.ValueOf(v)\n\n\t\/\/ check type, need *[]*Value\n\tif sliceRt.Kind() != reflect.Ptr || sliceRt.Elem().Kind() != reflect.Slice || sliceRt.Elem().Elem().Kind() != reflect.Ptr || sliceRt.Elem().Elem().Elem().Kind() != reflect.Struct {\n\t\te = ErrorToSliceNeedSlice\n\t\treturn\n\t}\n\n\t\/\/ get table\n\tsliceElemRt := sliceRt.Elem().Elem().Elem()\n\ttable := q.s.table[getReflectTypeName(sliceElemRt)]\n\n\t\/\/ parse eq condition\n\tsliceResult := q.parseEq(table)\n\n\tif len(sliceResult) < 1 {\n\t\treturn\n\t}\n\n\t\/\/ build slice\n\tq.buildSlice(sliceResult, table, sliceElemRt, sliceRv)\n\treturn\n}\n\n\/\/ assign result data to slice.\nfunc (q *Query) buildSlice(sliceResult []int, t *Table, rt reflect.Type, sliceRv reflect.Value) (e error) {\n\tif len(sliceResult) < 1 {\n\t\treturn\n\t}\n\tfor _, pk := range sliceResult {\n\t\trv := reflect.New(rt)\n\t\tif pk < 1 {\n\t\t\tcontinue\n\t\t}\n\t\t_, iftValue, e := t.Get(pk)\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\t\tif iftValue == nil {\n\t\t\tcontinue\n\t\t}\n\t\te = q.s.saver.ToStruct(iftValue, rv.Interface())\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\t\tsliceRv.Elem().Set(reflect.Append(sliceRv.Elem(), rv))\n\t}\n\treturn\n}\n\n\/\/ parse eq condition.\n\/\/ return pk slice []int.\nfunc (q *Query) parseEq(t *Table) []int {\n\tres := []interface{}{}\n\tfor k, v := range q.eq {\n\t\tidx := t.valueIndex[k]\n\t\tif idx == nil {\n\t\t\tcontinue\n\t\t}\n\t\ttmp := idx.Get(fmt.Sprintf(\"%v\", v))\n\t\tif len(tmp) > 0 {\n\t\t\tif len(res) < 1 {\n\t\t\t\tres = tmp\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttmp2 := []interface{}{}\n\t\t\tfor _, j := range tmp {\n\t\t\t\tif _, b := isInInterfaceSlice(res, j); b {\n\t\t\t\t\ttmp2 = append(tmp2, j)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(tmp2) < 1 {\n\t\t\t\treturn []int{}\n\t\t\t}\n\t\t\tres = tmp2\n\t\t} else {\n\t\t\treturn []int{}\n\t\t}\n\t}\n\tresult := itfSliceToIntSlice(res)\n\tresult = q.parseOrder(result)\n\tresult = q.parseLimit(result)\n\treturn result\n}\n\n\/\/ parse limit condition and pagination.\nfunc (q *Query) parseLimit(result []int) []int {\n\tif q.limit[1] > 0 {\n\t\tlength := len(result)\n\t\tif length <= q.limit[0] {\n\t\t\treturn nil\n\t\t}\n\t\tif length <= q.limit[1] {\n\t\t\tq.limit[1] = length\n\t\t}\n\t\treturn result[q.limit[0]:q.limit[1]]\n\t}\n\treturn result\n}\n\n\/\/ order result slice\nfunc (q *Query) parseOrder(result []int) []int {\n\tif q.order == \"ASC\" {\n\t\tsortIntSliceASC(result)\n\t}\n\tif q.order == \"DESC\" {\n\t\tsortIntSliceDESC(result)\n\t}\n\treturn result\n}\n\n\/\/ create new Query with *Storage.\nfunc NewQuery(s *Storage) *Query {\n\tq := new(Query)\n\tq.s = s\n\tq.order = \"ASC\"\n\tq.eq = make(map[string]interface{})\n\tq.limit = [2]int{-1, -1}\n\treturn q\n}\n<commit_msg>issue ui update<commit_after>package gojx\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n)\n\n\/\/ Query provides query engine for selection by condition.\ntype Query struct {\n\ts *Storage\n\n\teq map[string]interface{}\n\torder string\n\tlimit [2]int\n}\n\n\/\/ Eq add eq condition in index field.\nfunc (q *Query) Eq(k string, v interface{}) *Query {\n\tq.eq[k] = v\n\treturn q\n}\n\n\/\/ Limit set returning slice size.\nfunc (q *Query) Limit(i int) *Query {\n\tq.limit = [2]int{0, i}\n\treturn q\n}\n\n\/\/ Page set returning slice size as pagination.\nfunc (q *Query) Pager(page, size int) *Query {\n\tif page < 1 {\n\t\tpage = 1\n\t}\n\tif size < 1 {\n\t\tsize = 1\n\t}\n\tq.limit = [2]int{(page-1)*size, page*size}\n\treturn q\n}\n\n\/\/ Order set result slice order by pk int.\nfunc (q *Query) Order(order string) *Query {\n\tq.order = strings.ToUpper(order)\n\treturn q\n}\n\n\/\/ ToSlice query storage by conditions and assign results to slice.\n\/\/ Slice need a *[]*Struct ( a reference of slice of struct pointer.\nfunc (q *Query) ToSlice(v interface{}) (e error) {\n\tsliceRt, sliceRv := reflect.TypeOf(v), reflect.ValueOf(v)\n\n\t\/\/ check type, need *[]*Value\n\tif sliceRt.Kind() != reflect.Ptr || sliceRt.Elem().Kind() != reflect.Slice || sliceRt.Elem().Elem().Kind() != reflect.Ptr || sliceRt.Elem().Elem().Elem().Kind() != reflect.Struct {\n\t\te = ErrorToSliceNeedSlice\n\t\treturn\n\t}\n\n\t\/\/ get table\n\tsliceElemRt := sliceRt.Elem().Elem().Elem()\n\ttable := q.s.table[getReflectTypeName(sliceElemRt)]\n\n\t\/\/ parse eq condition\n\tsliceResult := q.parseEq(table)\n\n\tif len(sliceResult) < 1 {\n\t\treturn\n\t}\n\n\t\/\/ build slice\n\tq.buildSlice(sliceResult, table, sliceElemRt, sliceRv)\n\treturn\n}\n\n\/\/ assign result data to slice.\nfunc (q *Query) buildSlice(sliceResult []int, t *Table, rt reflect.Type, sliceRv reflect.Value) (e error) {\n\tif len(sliceResult) < 1 {\n\t\treturn\n\t}\n\tfor _, pk := range sliceResult {\n\t\trv := reflect.New(rt)\n\t\tif pk < 1 {\n\t\t\tcontinue\n\t\t}\n\t\t_, iftValue, e := t.Get(pk)\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\t\tif iftValue == nil {\n\t\t\tcontinue\n\t\t}\n\t\te = q.s.saver.ToStruct(iftValue, rv.Interface())\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\t\tsliceRv.Elem().Set(reflect.Append(sliceRv.Elem(), rv))\n\t}\n\treturn\n}\n\n\/\/ parse eq condition.\n\/\/ return pk slice []int.\nfunc (q *Query) parseEq(t *Table) []int {\n\tres := []interface{}{}\n\tfor k, v := range q.eq {\n\t\tidx := t.valueIndex[k]\n\t\tif idx == nil {\n\t\t\tcontinue\n\t\t}\n\t\ttmp := idx.Get(fmt.Sprintf(\"%v\", v))\n\t\tif len(tmp) > 0 {\n\t\t\tif len(res) < 1 {\n\t\t\t\tres = tmp\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttmp2 := []interface{}{}\n\t\t\tfor _, j := range tmp {\n\t\t\t\tif _, b := isInInterfaceSlice(res, j); b {\n\t\t\t\t\ttmp2 = append(tmp2, j)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(tmp2) < 1 {\n\t\t\t\treturn []int{}\n\t\t\t}\n\t\t\tres = tmp2\n\t\t} else {\n\t\t\treturn []int{}\n\t\t}\n\t}\n\tresult := itfSliceToIntSlice(res)\n\tresult = q.parseOrder(result)\n\tresult = q.parseLimit(result)\n\treturn result\n}\n\n\/\/ parse limit condition and pagination.\nfunc (q *Query) parseLimit(result []int) []int {\n\tif q.limit[1] > 0 {\n\t\tlength := len(result)\n\t\tif length <= q.limit[0] {\n\t\t\treturn nil\n\t\t}\n\t\tif length <= q.limit[1] {\n\t\t\tq.limit[1] = length\n\t\t}\n\t\treturn result[q.limit[0]:q.limit[1]]\n\t}\n\treturn result\n}\n\n\/\/ order result slice\nfunc (q *Query) parseOrder(result []int) []int {\n\tif q.order == \"ASC\" {\n\t\tsortIntSliceASC(result)\n\t}\n\tif q.order == \"DESC\" {\n\t\tsortIntSliceDESC(result)\n\t}\n\treturn result\n}\n\n\/\/ create new Query with *Storage.\nfunc NewQuery(s *Storage) *Query {\n\tq := new(Query)\n\tq.s = s\n\tq.order = \"ASC\"\n\tq.eq = make(map[string]interface{})\n\tq.limit = [2]int{-1, -1}\n\treturn q\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage telsh provides \"middle\" (for the telnet package) that can be used to implement a TELNET server\nthat provides a \"shell\" interface.\n\nShell interfaces you may be familiar with include: \"bash\", \"csh\", \"sh\", \"zsk\", etc.\n\nTELNET Server\n\nHere is an example usage:\n\n\tpackage main\n\t\n\timport (\n\t\t\"github.com\/reiver\/go-oi\"\n\t\t\"github.com\/reiver\/go-telnet\"\n\t\t\"github.com\/reiver\/go-telnet\/telsh\"\n\n\t\t\"io\"\n\t)\n\n\tfunc main() {\n\t\t\n\t\ttelnetHandler := telsh.NewShellHandler()\n\t\t\n\t\tif err := telnetHandler.RegisterElse(\n\t\t\ttelsh.ProducerFunc(\n\t\t\t\tfunc(ctx telsh.Context, name string, args ...string) telsh.Handler {\n\t\t\t\t\treturn telsh.PromoteHandlerFunc(\n\t\t\t\t\t\tfunc(stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {\n\t\t\t\t\t\t\toi.LongWrite(stdout, []byte{'w','a','t','?', '\\r','\\n'})\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t},\n\t\t\t\t\t)\n\t\t\t\t},\n\t\t\t),\n\t\t); nil != err {\n\t\t\tpanic(err)\n\t\t}\n\t\t\n\t\tif err := telnetHandler.Register(\"help\",\n\t\t\ttelsh.ProducerFunc(\n\t\t\t\tfunc(ctx telsh.Context, name string, args ...string) telsh.Handler {\n\t\t\t\treturn telsh.PromoteHandlerFunc(\n\t\t\t\t\t\tfunc(stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {\n\t\t\t\t\t\t\toi.LongWrite(stdout, []byte{'r','t','f','m','!', '\\r','\\n'})\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t},\n\t\t\t\t\t)\n\t\t\t\t},\n\t\t\t),\n\t\t); nil != err {\n\t\t\tpanic(err)\n\t\t}\n\t\t\n\t\terr := telnet.ListenAndServe(\":5555\", telnetHandler)\n\t\tif nil != err {\n\t\t\t\/\/@TODO: Handle this error better.\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n*\/\npackage telsh\n<commit_msg>added more docs<commit_after>\/*\nPackage telsh provides \"middle\" (for the telnet package) that can be used to implement a TELNET server\nthat provides a \"shell\" interface.\n\nShell interfaces you may be familiar with include: \"bash\", \"csh\", \"sh\", \"zsk\", etc.\n\nTELNET Server\n\nHere is an example usage:\n\n\tpackage main\n\t\n\timport (\n\t\t\"github.com\/reiver\/go-oi\"\n\t\t\"github.com\/reiver\/go-telnet\"\n\t\t\"github.com\/reiver\/go-telnet\/telsh\"\n\n\t\t\"io\"\n\t)\n\n\tfunc main() {\n\t\t\n\t\ttelnetHandler := telsh.NewShellHandler()\n\t\t\n\t\tif err := telnetHandler.RegisterElse(\n\t\t\ttelsh.ProducerFunc(\n\t\t\t\tfunc(ctx telsh.Context, name string, args ...string) telsh.Handler {\n\t\t\t\t\treturn telsh.PromoteHandlerFunc(\n\t\t\t\t\t\tfunc(stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {\n\t\t\t\t\t\t\toi.LongWrite(stdout, []byte{'w','a','t','?', '\\r','\\n'})\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t},\n\t\t\t\t\t)\n\t\t\t\t},\n\t\t\t),\n\t\t); nil != err {\n\t\t\tpanic(err)\n\t\t}\n\t\t\n\t\tif err := telnetHandler.Register(\"help\",\n\t\t\ttelsh.ProducerFunc(\n\t\t\t\tfunc(ctx telsh.Context, name string, args ...string) telsh.Handler {\n\t\t\t\treturn telsh.PromoteHandlerFunc(\n\t\t\t\t\t\tfunc(stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {\n\t\t\t\t\t\t\toi.LongWrite(stdout, []byte{'r','t','f','m','!', '\\r','\\n'})\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t},\n\t\t\t\t\t)\n\t\t\t\t},\n\t\t\t),\n\t\t); nil != err {\n\t\t\tpanic(err)\n\t\t}\n\t\t\n\t\terr := telnet.ListenAndServe(\":5555\", telnetHandler)\n\t\tif nil != err {\n\t\t\t\/\/@TODO: Handle this error better.\n\t\t\tpanic(err)\n\t\t}\n\t}\n\nHere is a more \"unpacked\" example:\n\n\tpackage main\n\t\n\t\n\timport (\n\t\t\"github.com\/reiver\/go-oi\"\n\t\t\"github.com\/reiver\/go-telnet\"\n\t\t\"github.com\/reiver\/go-telnet\/telsh\"\n\t\t\n\t\t\"fmt\"\n\t\t\"io\"\n\t\t\"time\"\n\t)\n\t\n\t\n\tvar (\n\t\tshellHandler := telsh.NewShellHandler()\n\t)\n\t\n\t\n\tfunc init() {\n\t\t\n\t\tshellHandler.Register(\"dance\", telsh.ProducerFunc(producer))\n\t\t\n\t\t\n\t\tshellHandler.WelcomeMessage = `\n\t __ __ ______ _ _____ ____ __ __ ______ \n\t \\ \\ \/ \/| ____|| | \/ ____| \/ __ \\ | \\\/ || ____|\n\t \\ \\ \/\\ \/ \/ | |__ | | | | | | | || \\ \/ || |__ \n\t \\ \\\/ \\\/ \/ | __| | | | | | | | || |\\\/| || __| \n\t \\ \/\\ \/ | |____ | |____ | |____ | |__| || | | || |____ \n\t \\\/ \\\/ |______||______| \\_____| \\____\/ |_| |_||______|\n\t\n\t`\n\t}\n\t\n\t\n\tfunc producer(ctx telsh.Context, name string, args ...string) telsh.Handler{\n\t\treturn telsh.PromoteHandlerFunc(handler)\n\t}\n\t\n\t\n\tfunc handler(stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {\n\t\tfor i:=0; i<20; i++ {\n\t\t\toi.LongWriteString(stdout, \"\\r⠋\")\n\t\t\ttime.Sleep(50*time.Millisecond)\n\t\t\t\n\t\t\toi.LongWriteString(stdout, \"\\r⠙\")\n\t\t\ttime.Sleep(50*time.Millisecond)\n\t\t\t\n\t\t\toi.LongWriteString(stdout, \"\\r⠹\")\n\t\t\ttime.Sleep(50*time.Millisecond)\n\t\t\t\n\t\t\toi.LongWriteString(stdout, \"\\r⠸\")\n\t\t\ttime.Sleep(50*time.Millisecond)\n\t\t\t\n\t\t\toi.LongWriteString(stdout, \"\\r⠼\")\n\t\t\ttime.Sleep(50*time.Millisecond)\n\t\t\t\n\t\t\toi.LongWriteString(stdout, \"\\r⠴\")\n\t\t\ttime.Sleep(50*time.Millisecond)\n\t\t\t\n\t\t\toi.LongWriteString(stdout, \"\\r⠦\")\n\t\t\ttime.Sleep(50*time.Millisecond)\n\t\t\t\n\t\t\toi.LongWriteString(stdout, \"\\r⠧\")\n\t\t\ttime.Sleep(50*time.Millisecond)\n\t\t\t\n\t\t\toi.LongWriteString(stdout, \"\\r⠇\")\n\t\t\ttime.Sleep(50*time.Millisecond)\n\t\t\t\n\t\t\toi.LongWriteString(stdout, \"\\r⠏\")\n\t\t\ttime.Sleep(50*time.Millisecond)\n\t\t}\n\t\toi.LongWriteString(stdout, \"\\r \\r\\n\")\n\n\t\treturn nil\n\t}\n\t\n\t\n\tfunc main() {\n\t\t\n\t\taddr := \":5555\"\n\t\tif err := telnet.ListenAndServe(addr, shellHandler); nil != err {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n*\/\npackage telsh\n<|endoftext|>"} {"text":"<commit_before>package tessen\n\nvar all_templates = map[string]string{\n\t\"debug\": default_debug_template,\n\t\"event_list\": default_event_list_template,\n\t\"event_view\": default_event_view_template,\n\t\"help\": default_help_template,\n}\n\n\/\/ issued: [{{ dateFormat \"2006-01-02T15:04\" .check.issued }}](fg-blue)\nconst (\n\tdefault_debug_template = \"{{ . | toJson}}\\n\"\n\tdefault_event_list_template = `{{ colorizedSensuStatus .check.status | printf \"%-6s\"}} [{{ .check.name | printf \"%-40s\" }}](fg-green) {{ .client.name }}`\n\tdefault_event_view_template = `\nEvent:\n _id: [{{ ._id }}](fg-blue)\n acknowledged: [{{ .acknowledged }}](fg-blue)\n\nClient:\n name: [{{ .client.name }}](fg-blue)\n instance_id: [{{ .client.instance_id }}](fg-blue)\n{{ if .client.tags }}\n fqdn: [{{ .client.tags.FQDN }}](fg-blue)\n ecosystem: [{{ .client.tags.Ecosystem }}](fg-blue)\n region: [{{ .client.tags.Region }}](fg-blue)\n display_name: [{{ index . \"client\" \"tags\" \"Display Name\" }}](fg-blue)\n{{ end }}\n\nCheck:\n name: [{{ .check.name }}](fg-green)\n command: [{{ .check.command }}](fg-green)\n issued: [{{ .check.issued }}](fg-blue)\n team: [{{ .check.team }}](fg-blue)\n project: [{{ .check.project }}](fg-blue)\n status: {{ colorizedSensuStatus .check.status }}\n\n runbook: [{{ .check.runbook }}](fg-blue)\n\n page: [{{ .check.page }}](fg-blue)\n ticket: [{{ .check.ticket }}](fg-blue)\n{{ if .check.history }}\n history: {{range .check.history}} {{ colorizedSensuStatus . }}{{end}}\n{{ end }}\n\nOutput:\n\n {{ indent 2 .check.output }}\n\n`\n\tdefault_help_template = `\n[Quick reference for tessen](fg-white)\n\n[Actions:](fg-blue)\n\n <enter> - select item\n h - show help page\n\n[Commands (a'la vim\/tig):](fg-blue)\n\n :query {JQ boolean expression} - display filtered results\n :help - show help page\n :<up> - select previous command\n :quit or :q - quit\n\n[Navigation:](fg-blue)\n\n up\/k - previous line\n down\/j - next line\n C-f\/<space> - next page\n C-b - previous page\n } - next paragraph\/section\/fast-move\n { - previous paragraph\/section\/fast-move\n n - next search match\n g - go to top of page\n G - go to bottom of page\n q - go back \/ quit\n C-c\/Q - quit\n\n[Notes:](fg-blue)\n\n Learning JQ is highly recommended, particularly boolean expressions:\n\n https:\/\/stedolan.github.io\/jq\/manual\/\n\n`\n)\n<commit_msg>Improve the event show template<commit_after>package tessen\n\nvar all_templates = map[string]string{\n\t\"debug\": default_debug_template,\n\t\"event_list\": default_event_list_template,\n\t\"event_view\": default_event_view_template,\n\t\"help\": default_help_template,\n}\n\n\/\/ issued: [{{ dateFormat \"2006-01-02T15:04\" .check.issued }}](fg-blue)\nconst (\n\tdefault_debug_template = \"{{ . | toJson}}\\n\"\n\tdefault_event_list_template = `{{ colorizedSensuStatus .check.status | printf \"%-6s\"}} [{{ .check.name | printf \"%-40s\" }}](fg-green) {{ .client.name }}`\n\tdefault_event_view_template = `\nEvent:\n _id: [{{ ._id }}](fg-blue)\n acknowledged: [{{ .acknowledged }}](fg-blue)\n occurrences: [{{ .occurrences }}](fg-blue)\n\nClient:\n name: [{{ .client.name }}](fg-green)\n instance_id: [{{ .client.instance_id }}](fg-blue)\n{{ if .client.tags }}\n fqdn: [{{ .client.tags.FQDN }}](fg-blue)\n ecosystem: [{{ .client.tags.Ecosystem }}](fg-green)\n region: [{{ .client.tags.Region }}](fg-blue)\n display_name: [{{ index . \"client\" \"tags\" \"Display Name\" }}](fg-blue)\n{{ end }}\n\nCheck:\n name: [{{ .check.name }}](fg-green)\n command: [{{ .check.command }}](fg-green)\n interval: [{{ .check.interval }}](fg-blue)\n issued: [{{ .check.issued }}](fg-blue)\n team: [{{ .check.team }}](fg-blue)\n project: [{{ .check.project }}](fg-blue)\n status: {{ colorizedSensuStatus .check.status }}\n\n runbook: [{{ .check.runbook }}](fg-blue)\n\n page: [{{ .check.page }}](fg-blue)\n ticket: [{{ .check.ticket }}](fg-blue)\n{{ if .check.history }}\n history: {{range .check.history}} {{ colorizedSensuStatus . }}{{end}}\n{{ end }}\n\nOutput:\n\n {{ indent 2 .check.output }}\n\n`\n\tdefault_help_template = `\n[Quick reference for tessen](fg-white)\n\n[Actions:](fg-blue)\n\n <enter> - select item\n h - show help page\n\n[Commands (a'la vim\/tig):](fg-blue)\n\n :query {JQ boolean expression} - display filtered results\n :help - show help page\n :<up> - select previous command\n :quit or :q - quit\n\n[Navigation:](fg-blue)\n\n up\/k - previous line\n down\/j - next line\n C-f\/<space> - next page\n C-b - previous page\n } - next paragraph\/section\/fast-move\n { - previous paragraph\/section\/fast-move\n n - next search match\n g - go to top of page\n G - go to bottom of page\n q - go back \/ quit\n C-c\/Q - quit\n\n[Notes:](fg-blue)\n\n Learning JQ is highly recommended, particularly boolean expressions:\n\n https:\/\/stedolan.github.io\/jq\/manual\/\n\n`\n)\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"github.com\/nu7hatch\/gouuid\"\n\t\"io\/ioutil\"\n\t\"sync\"\n)\n\n\/\/ Templates used to be executed on hosts\n\ntype Template struct {\n\tId string\n\tTitle string \/\/ Short title\n\tDescription string \/\/ Full description that explains in layman's terms what this does, so everyone can help as part of the authorization process\n\tCommand string \/\/ Command to be executed\n\tEnabled bool \/\/ Is this available for running?\n\tTimeout int \/\/ Seconds of execution before the command is killed\n\tAcl *TemplateACL\n\tValidationRules []*ExecutionValidation \/\/ Validation rules\n}\n\ntype TemplateACL struct {\n\tMinAuth uint \/\/ Minimum amount of authorization before the template is actually executed (eg 3 = requester + 2 additional approvers)\n\tIncludedTags []string\n\tExcludedTags []string\n}\n\ntype TemplateStore struct {\n\tConfFile string\n\tTemplates map[string]*Template\n\ttemplateMux sync.RWMutex\n}\n\nfunc (s *Template) IsValid() (bool, error) {\n\tif len(s.Title) < 1 {\n\t\treturn false, errors.New(\"Fill in a title\")\n\t}\n\n\t\/\/ Title must be unique\n\tserver.templateStore.templateMux.RLock()\n\tdefer server.templateStore.templateMux.RUnlock()\n\tfor _, template := range server.templateStore.Templates {\n\t\tif template.Title == s.Title {\n\t\t\treturn false, errors.New(\"Title is not unique\")\n\t\t}\n\t}\n\n\tif len(s.Description) < 1 {\n\t\treturn false, errors.New(\"Fill in a description\")\n\t}\n\tif len(s.Command) < 1 {\n\t\treturn false, errors.New(\"Fill in a command\")\n\t}\n\treturn true, nil\n}\n\nfunc (s *TemplateStore) Remove(templateId string) {\n\ts.templateMux.Lock()\n\tdefer s.templateMux.Unlock()\n\tdelete(s.Templates, templateId)\n}\n\nfunc (s *TemplateStore) Get(templateId string) *Template {\n\ts.templateMux.RLock()\n\tdefer s.templateMux.RLock()\n\treturn s.Templates[templateId]\n}\n\nfunc (s *TemplateStore) Add(template *Template) {\n\ts.templateMux.Lock()\n\tdefer s.templateMux.Unlock()\n\ts.Templates[template.Id] = template\n}\n\nfunc (s *TemplateStore) save() {\n\ts.templateMux.Lock()\n\tdefer s.templateMux.Unlock()\n\tbytes, je := json.Marshal(s.Templates)\n\tif je != nil {\n\t\tlog.Printf(\"Failed to write templates: %s\", je)\n\t\treturn\n\t}\n\terr := ioutil.WriteFile(s.ConfFile, bytes, 0644)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to write templates: %s\", err)\n\t\treturn\n\t}\n}\n\nfunc (s *TemplateStore) load() {\n\ts.templateMux.Lock()\n\tdefer s.templateMux.Unlock()\n\t\/\/ Read file and load into user store\n\tbytes, err := ioutil.ReadFile(s.ConfFile)\n\tif err == nil {\n\t\tvar v map[string]*Template\n\t\tje := json.Unmarshal(bytes, &v)\n\t\tif je != nil {\n\t\t\tlog.Printf(\"Invalid templates.json: %s\", je)\n\t\t\treturn\n\t\t}\n\t\ts.Templates = v\n\t}\n}\n\nfunc newTemplateStore() *TemplateStore {\n\ts := &TemplateStore{\n\t\tConfFile: \"\/etc\/indispenso\/templates.conf\",\n\t\tTemplates: make(map[string]*Template),\n\t}\n\ts.load()\n\treturn s\n}\n\nfunc newTemplateAcl() *TemplateACL {\n\treturn &TemplateACL{\n\t\tIncludedTags: make([]string, 0),\n\t\tExcludedTags: make([]string, 0),\n\t}\n}\n\nfunc newTemplate(title string, description string, command string, enabled bool, includedTags []string, excludedTags []string, minAuth uint, timeout int) *Template {\n\tid, _ := uuid.NewV4()\n\tacl := newTemplateAcl()\n\tacl.IncludedTags = includedTags\n\tacl.ExcludedTags = excludedTags\n\tacl.MinAuth = minAuth\n\n\tif len(acl.IncludedTags) == 1 && acl.IncludedTags[0] == \"\" {\n\t\tacl.IncludedTags = make([]string, 0)\n\t}\n\tif len(acl.ExcludedTags) == 1 && acl.ExcludedTags[0] == \"\" {\n\t\tacl.ExcludedTags = make([]string, 0)\n\t}\n\n\treturn &Template{\n\t\tId: id.String(),\n\t\tTitle: title,\n\t\tDescription: description,\n\t\tCommand: command,\n\t\tEnabled: enabled,\n\t\tAcl: acl,\n\t\tTimeout: timeout,\n\t}\n}\n<commit_msg>Init array of validation rules<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"github.com\/nu7hatch\/gouuid\"\n\t\"io\/ioutil\"\n\t\"sync\"\n)\n\n\/\/ Templates used to be executed on hosts\n\ntype Template struct {\n\tId string\n\tTitle string \/\/ Short title\n\tDescription string \/\/ Full description that explains in layman's terms what this does, so everyone can help as part of the authorization process\n\tCommand string \/\/ Command to be executed\n\tEnabled bool \/\/ Is this available for running?\n\tTimeout int \/\/ Seconds of execution before the command is killed\n\tAcl *TemplateACL\n\tValidationRules []*ExecutionValidation \/\/ Validation rules\n}\n\ntype TemplateACL struct {\n\tMinAuth uint \/\/ Minimum amount of authorization before the template is actually executed (eg 3 = requester + 2 additional approvers)\n\tIncludedTags []string\n\tExcludedTags []string\n}\n\ntype TemplateStore struct {\n\tConfFile string\n\tTemplates map[string]*Template\n\ttemplateMux sync.RWMutex\n}\n\nfunc (s *Template) IsValid() (bool, error) {\n\tif len(s.Title) < 1 {\n\t\treturn false, errors.New(\"Fill in a title\")\n\t}\n\n\t\/\/ Title must be unique\n\tserver.templateStore.templateMux.RLock()\n\tdefer server.templateStore.templateMux.RUnlock()\n\tfor _, template := range server.templateStore.Templates {\n\t\tif template.Title == s.Title {\n\t\t\treturn false, errors.New(\"Title is not unique\")\n\t\t}\n\t}\n\n\tif len(s.Description) < 1 {\n\t\treturn false, errors.New(\"Fill in a description\")\n\t}\n\tif len(s.Command) < 1 {\n\t\treturn false, errors.New(\"Fill in a command\")\n\t}\n\treturn true, nil\n}\n\nfunc (s *TemplateStore) Remove(templateId string) {\n\ts.templateMux.Lock()\n\tdefer s.templateMux.Unlock()\n\tdelete(s.Templates, templateId)\n}\n\nfunc (s *TemplateStore) Get(templateId string) *Template {\n\ts.templateMux.RLock()\n\tdefer s.templateMux.RLock()\n\treturn s.Templates[templateId]\n}\n\nfunc (s *TemplateStore) Add(template *Template) {\n\ts.templateMux.Lock()\n\tdefer s.templateMux.Unlock()\n\ts.Templates[template.Id] = template\n}\n\nfunc (s *TemplateStore) save() {\n\ts.templateMux.Lock()\n\tdefer s.templateMux.Unlock()\n\tbytes, je := json.Marshal(s.Templates)\n\tif je != nil {\n\t\tlog.Printf(\"Failed to write templates: %s\", je)\n\t\treturn\n\t}\n\terr := ioutil.WriteFile(s.ConfFile, bytes, 0644)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to write templates: %s\", err)\n\t\treturn\n\t}\n}\n\nfunc (s *TemplateStore) load() {\n\ts.templateMux.Lock()\n\tdefer s.templateMux.Unlock()\n\t\/\/ Read file and load into user store\n\tbytes, err := ioutil.ReadFile(s.ConfFile)\n\tif err == nil {\n\t\tvar v map[string]*Template\n\t\tje := json.Unmarshal(bytes, &v)\n\t\tif je != nil {\n\t\t\tlog.Printf(\"Invalid templates.json: %s\", je)\n\t\t\treturn\n\t\t}\n\t\ts.Templates = v\n\t}\n}\n\nfunc newTemplateStore() *TemplateStore {\n\ts := &TemplateStore{\n\t\tConfFile: \"\/etc\/indispenso\/templates.conf\",\n\t\tTemplates: make(map[string]*Template),\n\t}\n\ts.load()\n\treturn s\n}\n\nfunc newTemplateAcl() *TemplateACL {\n\treturn &TemplateACL{\n\t\tIncludedTags: make([]string, 0),\n\t\tExcludedTags: make([]string, 0),\n\t}\n}\n\nfunc newTemplate(title string, description string, command string, enabled bool, includedTags []string, excludedTags []string, minAuth uint, timeout int) *Template {\n\tid, _ := uuid.NewV4()\n\tacl := newTemplateAcl()\n\tacl.IncludedTags = includedTags\n\tacl.ExcludedTags = excludedTags\n\tacl.MinAuth = minAuth\n\n\tif len(acl.IncludedTags) == 1 && acl.IncludedTags[0] == \"\" {\n\t\tacl.IncludedTags = make([]string, 0)\n\t}\n\tif len(acl.ExcludedTags) == 1 && acl.ExcludedTags[0] == \"\" {\n\t\tacl.ExcludedTags = make([]string, 0)\n\t}\n\n\treturn &Template{\n\t\tId: id.String(),\n\t\tTitle: title,\n\t\tDescription: description,\n\t\tCommand: command,\n\t\tEnabled: enabled,\n\t\tAcl: acl,\n\t\tTimeout: timeout,\n\t\tValidationRules: make([]*ExecutionValidation, 0),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport ( \n \"fmt\"\n \"math\/rand\"\n \"time\" \n \"github.com\/fjukstad\/rpcman\"\n)\n\n\nfunc genrands(num int) (ret [] float64) { \n\n ret = make([] float64, num)\n\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n for i := 0; i < num; i++ {\n ret[i] = r.Float64() * 100\n }\n return ret\n \n} \n\n\n\nfunc main() {\n rpc := rpcman.Init(\"tcp:\/\/localhost:5555\") \n\n for i := 0; i < 10; i++ {\n floats := genrands(100)\n sum,_ := rpc.Call(\"sum\", floats)\n std,_ := rpc.Call(\"std\", floats)\n variance,_ := rpc.Call(\"var\", floats) \n mean,_ := rpc.Call(\"mean\", floats) \n \n add, _ := rpc.Call(\"add\", 2,3)\n\n fmt.Println(sum,std,variance,mean,add) \n }\n\n rpc.Close()\n\n} \n<commit_msg>new tests for new implementation<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"time\"\n\n\t\"github.com\/fjukstad\/rpcman\"\n)\n\nfunc genrands(num int) (ret []float64) {\n\n\tret = make([]float64, num)\n\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tfor i := 0; i < num; i++ {\n\t\tret[i] = r.Float64() * 100\n\t}\n\treturn ret\n\n}\n\nfunc main() {\n\trpc, err := rpcman.Init(\"tcp:\/\/localhost:5555\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\tfloats := genrands(100)\n\t\tsum, _ := rpc.Call(\"sum\", floats)\n\t\tstd, _ := rpc.Call(\"std\", floats)\n\t\tvariance, _ := rpc.Call(\"var\", floats)\n\t\tmean, _ := rpc.Call(\"mean\", floats)\n\n\t\tadd, _ := rpc.Call(\"add\", 2, 3)\n\n\t\tfmt.Println(sum, std, variance, mean, add)\n\t}\n\n\t\/\/ rpc.Close()\n\n}\n<|endoftext|>"} {"text":"<commit_before>package cache\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/go-kit\/kit\/log\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/weaveworks\/flux\/image\"\n\t\"github.com\/weaveworks\/flux\/registry\"\n)\n\nconst refreshWhenExpiryWithin = time.Minute\nconst askForNewImagesInterval = time.Minute\n\n\/\/ Warmer refreshes the information kept in the cache from remote\n\/\/ registries.\ntype Warmer struct {\n\tclientFactory registry.ClientFactory\n\tcache Client\n\tburst int\n\tPriority chan image.Name\n\tNotify func()\n}\n\n\/\/ NewWarmer creates cache warmer that (when Loop is invoked) will\n\/\/ periodically refresh the values kept in the cache.\nfunc NewWarmer(cf registry.ClientFactory, cacheClient Client, burst int) (*Warmer, error) {\n\tif cf == nil || cacheClient == nil || burst <= 0 {\n\t\treturn nil, errors.New(\"arguments must be non-nil (or > 0 in the case of burst)\")\n\t}\n\treturn &Warmer{\n\t\tclientFactory: cf,\n\t\tcache: cacheClient,\n\t\tburst: burst,\n\t}, nil\n}\n\n\/\/ .. and this is what we keep in the backlog\ntype backlogItem struct {\n\timage.Name\n\tregistry.Credentials\n}\n\n\/\/ Loop continuously gets the images to populate the cache with,\n\/\/ and populate the cache with them.\nfunc (w *Warmer) Loop(logger log.Logger, stop <-chan struct{}, wg *sync.WaitGroup, imagesToFetchFunc func() registry.ImageCreds) {\n\tdefer wg.Done()\n\n\trefresh := time.Tick(askForNewImagesInterval)\n\timageCreds := imagesToFetchFunc()\n\tbacklog := imageCredsToBacklog(imageCreds)\n\n\t\/\/ We have some fine control over how long to spend on each fetch\n\t\/\/ operation, since they are given a `context`. For now though,\n\t\/\/ just rattle through them one by one, however long they take.\n\tctx := context.Background()\n\n\t\/\/ This loop acts keeps a kind of priority queue, whereby image\n\t\/\/ names coming in on the `Priority` channel are looked up first.\n\t\/\/ If there are none, images used in the cluster are refreshed;\n\t\/\/ but no more often than once every `askForNewImagesInterval`,\n\t\/\/ since there is no effective back-pressure on cache refreshes\n\t\/\/ and it would spin freely otherwise).\n\tfor {\n\t\tselect {\n\t\tcase <-stop:\n\t\t\tlogger.Log(\"stopping\", \"true\")\n\t\t\treturn\n\t\tcase name := <-w.Priority:\n\t\t\tlogger.Log(\"priority\", name.String())\n\t\t\t\/\/ NB the implicit contract here is that the prioritised\n\t\t\t\/\/ image has to have been running the last time we\n\t\t\t\/\/ requested the credentials.\n\t\t\tif creds, ok := imageCreds[name]; ok {\n\t\t\t\tw.warm(ctx, logger, name, creds)\n\t\t\t} else {\n\t\t\t\tlogger.Log(\"priority\", name.String(), \"err\", \"no creds available\")\n\t\t\t}\n\t\t\tcontinue\n\t\tdefault:\n\t\t}\n\n\t\tif len(backlog) > 0 {\n\t\t\tim := backlog[0]\n\t\t\tbacklog = backlog[1:]\n\t\t\tw.warm(ctx, logger, im.Name, im.Credentials)\n\t\t} else {\n\t\t\tselect {\n\t\t\tcase <-refresh:\n\t\t\t\timageCreds = imagesToFetchFunc()\n\t\t\t\tbacklog = imageCredsToBacklog(imageCreds)\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc imageCredsToBacklog(imageCreds registry.ImageCreds) []backlogItem {\n\tbacklog := make([]backlogItem, len(imageCreds))\n\tvar i int\n\tfor name, cred := range imageCreds {\n\t\tbacklog[i] = backlogItem{name, cred}\n\t\ti++\n\t}\n\treturn backlog\n}\n\nfunc (w *Warmer) warm(ctx context.Context, logger log.Logger, id image.Name, creds registry.Credentials) {\n\terrorLogger := log.With(logger, \"canonical_name\", id.CanonicalName(), \"auth\", creds)\n\tclient, err := w.clientFactory.ClientFor(id.CanonicalName(), creds)\n\tif err != nil {\n\t\terrorLogger.Log(\"err\", err.Error())\n\t\treturn\n\t}\n\n\t\/\/ This is what we're going to write back to the cache\n\tvar repo ImageRepository\n\trepoKey := NewRepositoryKey(id.CanonicalName())\n\tbytes, _, err := w.cache.GetKey(repoKey)\n\tif err == nil {\n\t\terr = json.Unmarshal(bytes, &repo)\n\t} else if err == ErrNotCached {\n\t\terr = nil\n\t}\n\n\tif err != nil {\n\t\terrorLogger.Log(\"err\", errors.Wrap(err, \"fetching previous result from cache\"))\n\t\treturn\n\t}\n\t\/\/ Save for comparison later\n\toldImages := repo.Images\n\n\t\/\/ Now we have the previous result; everything after will be\n\t\/\/ attempting to refresh that value. Whatever happens, at the end\n\t\/\/ we'll write something back.\n\tdefer func() {\n\t\tbytes, err := json.Marshal(repo)\n\t\tif err == nil {\n\t\t\terr = w.cache.SetKey(repoKey, bytes)\n\t\t}\n\t\tif err != nil {\n\t\t\terrorLogger.Log(\"err\", errors.Wrap(err, \"writing result to cache\"))\n\t\t}\n\t}()\n\n\ttags, err := client.Tags(ctx)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), context.DeadlineExceeded.Error()) && !strings.Contains(err.Error(), \"net\/http: request canceled\") {\n\t\t\terrorLogger.Log(\"err\", errors.Wrap(err, \"requesting tags\"))\n\t\t\trepo.LastError = err.Error()\n\t\t}\n\t\treturn\n\t}\n\n\tnewImages := map[string]image.Info{}\n\n\t\/\/ Create a list of manifests that need updating\n\tvar toUpdate []image.Ref\n\tvar missing, expired int\n\tfor _, tag := range tags {\n\t\t\/\/ See if we have the manifest already cached\n\t\tnewID := id.ToRef(tag)\n\t\tkey := NewManifestKey(newID.CanonicalRef())\n\t\tbytes, expiry, err := w.cache.GetKey(key)\n\t\t\/\/ If err, then we don't have it yet. Update.\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\tmissing++\n\t\tcase time.Until(expiry) < refreshWhenExpiryWithin:\n\t\t\texpired++\n\t\tdefault:\n\t\t\tvar image image.Info\n\t\t\tif err := json.Unmarshal(bytes, &image); err == nil {\n\t\t\t\tnewImages[tag] = image\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmissing++\n\t\t}\n\t\ttoUpdate = append(toUpdate, newID)\n\t}\n\n\tvar successCount int\n\n\tif len(toUpdate) > 0 {\n\t\tlogger.Log(\"fetching\", id.String(), \"total\", len(toUpdate), \"expired\", expired, \"missing\", missing)\n\t\tvar successMx sync.Mutex\n\n\t\t\/\/ The upper bound for concurrent fetches against a single host is\n\t\t\/\/ w.Burst, so limit the number of fetching goroutines to that.\n\t\tfetchers := make(chan struct{}, w.burst)\n\t\tawaitFetchers := &sync.WaitGroup{}\n\tupdates:\n\t\tfor _, imID := range toUpdate {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tbreak updates\n\t\t\tcase fetchers <- struct{}{}:\n\t\t\t}\n\n\t\t\tawaitFetchers.Add(1)\n\t\t\tgo func(imageID image.Ref) {\n\t\t\t\tdefer func() { awaitFetchers.Done(); <-fetchers }()\n\t\t\t\t\/\/ Get the image from the remote\n\t\t\t\timg, err := client.Manifest(ctx, imageID.Tag)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err, ok := errors.Cause(err).(net.Error); ok && err.Timeout() {\n\t\t\t\t\t\t\/\/ This was due to a context timeout, don't bother logging\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\terrorLogger.Log(\"err\", errors.Wrap(err, \"requesting manifests\"))\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tkey := NewManifestKey(img.ID.CanonicalRef())\n\t\t\t\t\/\/ Write back to memcached\n\t\t\t\tval, err := json.Marshal(img)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrorLogger.Log(\"err\", errors.Wrap(err, \"serializing tag to store in cache\"))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = w.cache.SetKey(key, val)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrorLogger.Log(\"err\", errors.Wrap(err, \"storing manifests in cache\"))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tsuccessMx.Lock()\n\t\t\t\tsuccessCount++\n\t\t\t\tnewImages[imageID.Tag] = img\n\t\t\t\tsuccessMx.Unlock()\n\t\t\t}(imID)\n\t\t}\n\t\tawaitFetchers.Wait()\n\t\tlogger.Log(\"updated\", id.String(), \"count\", successCount)\n\t}\n\n\t\/\/ We managed to fetch new metadata for everything we were missing\n\t\/\/ (if anything). Ratchet the result forward.\n\tif successCount == len(toUpdate) {\n\t\trepo = ImageRepository{\n\t\t\tLastUpdate: time.Now(),\n\t\t\tImages: newImages,\n\t\t}\n\t}\n\n\tif w.Notify != nil {\n\t\tcacheTags := StringSet{}\n\t\tfor t := range oldImages {\n\t\t\tcacheTags[t] = struct{}{}\n\t\t}\n\n\t\t\/\/ If there's more tags than there used to be, there must be\n\t\t\/\/ at least one new tag.\n\t\tif len(cacheTags) < len(tags) {\n\t\t\tw.Notify()\n\t\t\treturn\n\t\t}\n\t\t\/\/ Otherwise, check whether there are any entries in the\n\t\t\/\/ fetched tags that aren't in the cached tags.\n\t\ttagSet := NewStringSet(tags)\n\t\tif !tagSet.Subset(cacheTags) {\n\t\t\tw.Notify()\n\t\t}\n\t}\n}\n\n\/\/ StringSet is a set of strings.\ntype StringSet map[string]struct{}\n\n\/\/ NewStringSet returns a StringSet containing exactly the strings\n\/\/ given as arguments.\nfunc NewStringSet(ss []string) StringSet {\n\tres := StringSet{}\n\tfor _, s := range ss {\n\t\tres[s] = struct{}{}\n\t}\n\treturn res\n}\n\n\/\/ Subset returns true if `s` is a subset of `t` (including the case\n\/\/ of having the same members).\nfunc (s StringSet) Subset(t StringSet) bool {\n\tfor k := range s {\n\t\tif _, ok := t[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n<commit_msg>Avoid spin in cache refresh loop To implement the loop as described in the comment above it, when the backlog is empty we need to block until the refresh tick fires (or a priority image comes in). By having a default case, it's possible to spin.<commit_after>package cache\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/go-kit\/kit\/log\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/weaveworks\/flux\/image\"\n\t\"github.com\/weaveworks\/flux\/registry\"\n)\n\nconst refreshWhenExpiryWithin = time.Minute\nconst askForNewImagesInterval = time.Minute\n\n\/\/ Warmer refreshes the information kept in the cache from remote\n\/\/ registries.\ntype Warmer struct {\n\tclientFactory registry.ClientFactory\n\tcache Client\n\tburst int\n\tPriority chan image.Name\n\tNotify func()\n}\n\n\/\/ NewWarmer creates cache warmer that (when Loop is invoked) will\n\/\/ periodically refresh the values kept in the cache.\nfunc NewWarmer(cf registry.ClientFactory, cacheClient Client, burst int) (*Warmer, error) {\n\tif cf == nil || cacheClient == nil || burst <= 0 {\n\t\treturn nil, errors.New(\"arguments must be non-nil (or > 0 in the case of burst)\")\n\t}\n\treturn &Warmer{\n\t\tclientFactory: cf,\n\t\tcache: cacheClient,\n\t\tburst: burst,\n\t}, nil\n}\n\n\/\/ .. and this is what we keep in the backlog\ntype backlogItem struct {\n\timage.Name\n\tregistry.Credentials\n}\n\n\/\/ Loop continuously gets the images to populate the cache with,\n\/\/ and populate the cache with them.\nfunc (w *Warmer) Loop(logger log.Logger, stop <-chan struct{}, wg *sync.WaitGroup, imagesToFetchFunc func() registry.ImageCreds) {\n\tdefer wg.Done()\n\n\trefresh := time.Tick(askForNewImagesInterval)\n\timageCreds := imagesToFetchFunc()\n\tbacklog := imageCredsToBacklog(imageCreds)\n\n\t\/\/ We have some fine control over how long to spend on each fetch\n\t\/\/ operation, since they are given a `context`. For now though,\n\t\/\/ just rattle through them one by one, however long they take.\n\tctx := context.Background()\n\n\t\/\/ NB the implicit contract here is that the prioritised\n\t\/\/ image has to have been running the last time we\n\t\/\/ requested the credentials.\n\tpriorityWarm := func (name image.Name) {\n\t\tlogger.Log(\"priority\", name.String())\n\t\tif creds, ok := imageCreds[name]; ok {\n\t\t\tw.warm(ctx, logger, name, creds)\n\t\t} else {\n\t\t\tlogger.Log(\"priority\", name.String(), \"err\", \"no creds available\")\n\t\t}\n\t}\n\n\t\/\/ This loop acts keeps a kind of priority queue, whereby image\n\t\/\/ names coming in on the `Priority` channel are looked up first.\n\t\/\/ If there are none, images used in the cluster are refreshed;\n\t\/\/ but no more often than once every `askForNewImagesInterval`,\n\t\/\/ since there is no effective back-pressure on cache refreshes\n\t\/\/ and it would spin freely otherwise).\n\tfor {\n\t\tselect {\n\t\tcase <-stop:\n\t\t\tlogger.Log(\"stopping\", \"true\")\n\t\t\treturn\n\t\tcase name := <-w.Priority:\n\t\t\tpriorityWarm(name)\n\t\t\tcontinue\n\t\tdefault:\n\t\t}\n\n\t\tif len(backlog) > 0 {\n\t\t\tim := backlog[0]\n\t\t\tbacklog = backlog[1:]\n\t\t\tw.warm(ctx, logger, im.Name, im.Credentials)\n\t\t} else {\n\t\t\tselect {\n\t\t\tcase <-stop:\n\t\t\t\tlogger.Log(\"stopping\", \"true\")\n\t\t\t\treturn \n\t\t\tcase <-refresh:\n\t\t\t\timageCreds = imagesToFetchFunc()\n\t\t\t\tbacklog = imageCredsToBacklog(imageCreds)\n\t\t\tcase name := <-w.Priority:\n\t\t\t\tpriorityWarm(name)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\nfunc imageCredsToBacklog(imageCreds registry.ImageCreds) []backlogItem {\n\tbacklog := make([]backlogItem, len(imageCreds))\n\tvar i int\n\tfor name, cred := range imageCreds {\n\t\tbacklog[i] = backlogItem{name, cred}\n\t\ti++\n\t}\n\treturn backlog\n}\n\nfunc (w *Warmer) warm(ctx context.Context, logger log.Logger, id image.Name, creds registry.Credentials) {\n\terrorLogger := log.With(logger, \"canonical_name\", id.CanonicalName(), \"auth\", creds)\n\tclient, err := w.clientFactory.ClientFor(id.CanonicalName(), creds)\n\tif err != nil {\n\t\terrorLogger.Log(\"err\", err.Error())\n\t\treturn\n\t}\n\n\t\/\/ This is what we're going to write back to the cache\n\tvar repo ImageRepository\n\trepoKey := NewRepositoryKey(id.CanonicalName())\n\tbytes, _, err := w.cache.GetKey(repoKey)\n\tif err == nil {\n\t\terr = json.Unmarshal(bytes, &repo)\n\t} else if err == ErrNotCached {\n\t\terr = nil\n\t}\n\n\tif err != nil {\n\t\terrorLogger.Log(\"err\", errors.Wrap(err, \"fetching previous result from cache\"))\n\t\treturn\n\t}\n\t\/\/ Save for comparison later\n\toldImages := repo.Images\n\n\t\/\/ Now we have the previous result; everything after will be\n\t\/\/ attempting to refresh that value. Whatever happens, at the end\n\t\/\/ we'll write something back.\n\tdefer func() {\n\t\tbytes, err := json.Marshal(repo)\n\t\tif err == nil {\n\t\t\terr = w.cache.SetKey(repoKey, bytes)\n\t\t}\n\t\tif err != nil {\n\t\t\terrorLogger.Log(\"err\", errors.Wrap(err, \"writing result to cache\"))\n\t\t}\n\t}()\n\n\ttags, err := client.Tags(ctx)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), context.DeadlineExceeded.Error()) && !strings.Contains(err.Error(), \"net\/http: request canceled\") {\n\t\t\terrorLogger.Log(\"err\", errors.Wrap(err, \"requesting tags\"))\n\t\t\trepo.LastError = err.Error()\n\t\t}\n\t\treturn\n\t}\n\n\tnewImages := map[string]image.Info{}\n\n\t\/\/ Create a list of manifests that need updating\n\tvar toUpdate []image.Ref\n\tvar missing, expired int\n\tfor _, tag := range tags {\n\t\t\/\/ See if we have the manifest already cached\n\t\tnewID := id.ToRef(tag)\n\t\tkey := NewManifestKey(newID.CanonicalRef())\n\t\tbytes, expiry, err := w.cache.GetKey(key)\n\t\t\/\/ If err, then we don't have it yet. Update.\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\tmissing++\n\t\tcase time.Until(expiry) < refreshWhenExpiryWithin:\n\t\t\texpired++\n\t\tdefault:\n\t\t\tvar image image.Info\n\t\t\tif err := json.Unmarshal(bytes, &image); err == nil {\n\t\t\t\tnewImages[tag] = image\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmissing++\n\t\t}\n\t\ttoUpdate = append(toUpdate, newID)\n\t}\n\n\tvar successCount int\n\n\tif len(toUpdate) > 0 {\n\t\tlogger.Log(\"fetching\", id.String(), \"total\", len(toUpdate), \"expired\", expired, \"missing\", missing)\n\t\tvar successMx sync.Mutex\n\n\t\t\/\/ The upper bound for concurrent fetches against a single host is\n\t\t\/\/ w.Burst, so limit the number of fetching goroutines to that.\n\t\tfetchers := make(chan struct{}, w.burst)\n\t\tawaitFetchers := &sync.WaitGroup{}\n\tupdates:\n\t\tfor _, imID := range toUpdate {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tbreak updates\n\t\t\tcase fetchers <- struct{}{}:\n\t\t\t}\n\n\t\t\tawaitFetchers.Add(1)\n\t\t\tgo func(imageID image.Ref) {\n\t\t\t\tdefer func() { awaitFetchers.Done(); <-fetchers }()\n\t\t\t\t\/\/ Get the image from the remote\n\t\t\t\timg, err := client.Manifest(ctx, imageID.Tag)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err, ok := errors.Cause(err).(net.Error); ok && err.Timeout() {\n\t\t\t\t\t\t\/\/ This was due to a context timeout, don't bother logging\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\terrorLogger.Log(\"err\", errors.Wrap(err, \"requesting manifests\"))\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tkey := NewManifestKey(img.ID.CanonicalRef())\n\t\t\t\t\/\/ Write back to memcached\n\t\t\t\tval, err := json.Marshal(img)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrorLogger.Log(\"err\", errors.Wrap(err, \"serializing tag to store in cache\"))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = w.cache.SetKey(key, val)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrorLogger.Log(\"err\", errors.Wrap(err, \"storing manifests in cache\"))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tsuccessMx.Lock()\n\t\t\t\tsuccessCount++\n\t\t\t\tnewImages[imageID.Tag] = img\n\t\t\t\tsuccessMx.Unlock()\n\t\t\t}(imID)\n\t\t}\n\t\tawaitFetchers.Wait()\n\t\tlogger.Log(\"updated\", id.String(), \"count\", successCount)\n\t}\n\n\t\/\/ We managed to fetch new metadata for everything we were missing\n\t\/\/ (if anything). Ratchet the result forward.\n\tif successCount == len(toUpdate) {\n\t\trepo = ImageRepository{\n\t\t\tLastUpdate: time.Now(),\n\t\t\tImages: newImages,\n\t\t}\n\t}\n\n\tif w.Notify != nil {\n\t\tcacheTags := StringSet{}\n\t\tfor t := range oldImages {\n\t\t\tcacheTags[t] = struct{}{}\n\t\t}\n\n\t\t\/\/ If there's more tags than there used to be, there must be\n\t\t\/\/ at least one new tag.\n\t\tif len(cacheTags) < len(tags) {\n\t\t\tw.Notify()\n\t\t\treturn\n\t\t}\n\t\t\/\/ Otherwise, check whether there are any entries in the\n\t\t\/\/ fetched tags that aren't in the cached tags.\n\t\ttagSet := NewStringSet(tags)\n\t\tif !tagSet.Subset(cacheTags) {\n\t\t\tw.Notify()\n\t\t}\n\t}\n}\n\n\/\/ StringSet is a set of strings.\ntype StringSet map[string]struct{}\n\n\/\/ NewStringSet returns a StringSet containing exactly the strings\n\/\/ given as arguments.\nfunc NewStringSet(ss []string) StringSet {\n\tres := StringSet{}\n\tfor _, s := range ss {\n\t\tres[s] = struct{}{}\n\t}\n\treturn res\n}\n\n\/\/ Subset returns true if `s` is a subset of `t` (including the case\n\/\/ of having the same members).\nfunc (s StringSet) Subset(t StringSet) bool {\n\tfor k := range s {\n\t\tif _, ok := t[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package repository\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com\/hoffie\/larasync\/repository\/odf\"\n)\n\n\/\/ Transaction represents a server side transaction for specific NIBs\n\/\/ which is used to synchronize the different clients.\ntype Transaction struct {\n\tID int64\n\tNIBIDs []string\n\tPreviousID int64\n}\n\n\/\/ newTransactionFromPb returns a new Transaction from the\n\/\/ protobuf Transaction.\nfunc newTransactionFromPb(pbTransaction *odf.Transaction) *Transaction {\n\treturn &Transaction{\n\t\tID: pbTransaction.GetID(),\n\t\tPreviousID: pbTransaction.GetPreviousID(),\n\t\tNIBIDs: pbTransaction.GetNIBIDs(),\n\t}\n}\n\n\/\/ toPb converts this Transaction to a protobuf Transaction.\n\/\/ This is used by the encoder.\nfunc (t *Transaction) toPb() (*odf.Transaction, error) {\n\tif t.ID == 0 {\n\t\treturn nil, errors.New(\"Transaction ID must not be empty\")\n\t}\n\tif len(t.NIBIDs) == 0 {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"The transaction with ID %d has no NIB IDs\",\n\t\t\tt.ID,\n\t\t)\n\t}\n\tprotoTransaction := &odf.Transaction{\n\t\tID: &t.ID,\n\t\tPreviousID: nil,\n\t\tNIBIDs: t.NIBIDs}\n\tif t.PreviousID != 0 {\n\t\tprotoTransaction.PreviousID = &t.PreviousID\n\t}\n\treturn protoTransaction, nil\n}\n\n\/\/ nibUUIDsFromTransactions returns all uuids from a list of transactions.\nfunc nibUUIDsFromTransactions(transactions []*Transaction) <-chan string {\n\tnibUUIDChannel := make(chan string, 100)\n\tgo func() {\n\t\tfor _, transaction := range transactions {\n\t\t\tfor _, nibID := range transaction.NIBIDs {\n\t\t\t\tnibUUIDChannel <- nibID\n\t\t\t}\n\t\t}\n\t\tclose(nibUUIDChannel)\n\t}()\n\treturn nibUUIDChannel\n}\n<commit_msg>repository: Implemented the IDString() function in the Transaction object.<commit_after>package repository\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com\/hoffie\/larasync\/repository\/odf\"\n)\n\n\/\/ Transaction represents a server side transaction for specific NIBs\n\/\/ which is used to synchronize the different clients.\ntype Transaction struct {\n\tID int64\n\tNIBIDs []string\n\tPreviousID int64\n}\n\n\/\/ newTransactionFromPb returns a new Transaction from the\n\/\/ protobuf Transaction.\nfunc newTransactionFromPb(pbTransaction *odf.Transaction) *Transaction {\n\treturn &Transaction{\n\t\tID: pbTransaction.GetID(),\n\t\tPreviousID: pbTransaction.GetPreviousID(),\n\t\tNIBIDs: pbTransaction.GetNIBIDs(),\n\t}\n}\n\n\/\/ toPb converts this Transaction to a protobuf Transaction.\n\/\/ This is used by the encoder.\nfunc (t *Transaction) toPb() (*odf.Transaction, error) {\n\tif t.ID == 0 {\n\t\treturn nil, errors.New(\"Transaction ID must not be empty\")\n\t}\n\tif len(t.NIBIDs) == 0 {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"The transaction with ID %d has no NIB IDs\",\n\t\t\tt.ID,\n\t\t)\n\t}\n\tprotoTransaction := &odf.Transaction{\n\t\tID: &t.ID,\n\t\tPreviousID: nil,\n\t\tNIBIDs: t.NIBIDs}\n\tif t.PreviousID != 0 {\n\t\tprotoTransaction.PreviousID = &t.PreviousID\n\t}\n\treturn protoTransaction, nil\n}\n\n\/\/ IDString returns the ID of this Transaction as a string.\nfunc (t *Transaction) IDString() string {\n\treturn strconv.FormatInt(t.ID, 10)\n}\n\n\/\/ nibUUIDsFromTransactions returns all uuids from a list of transactions.\nfunc nibUUIDsFromTransactions(transactions []*Transaction) <-chan string {\n\tnibUUIDChannel := make(chan string, 100)\n\tgo func() {\n\t\tfor _, transaction := range transactions {\n\t\t\tfor _, nibID := range transaction.NIBIDs {\n\t\t\t\tnibUUIDChannel <- nibID\n\t\t\t}\n\t\t}\n\t\tclose(nibUUIDChannel)\n\t}()\n\treturn nibUUIDChannel\n}\n<|endoftext|>"} {"text":"<commit_before>package admin\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/qor\/action_bar\"\n\t\"github.com\/qor\/activity\"\n\t\"github.com\/qor\/admin\"\n\t\"github.com\/qor\/i18n\/exchange_actions\"\n\t\"github.com\/qor\/media_library\"\n\t\"github.com\/qor\/qor\"\n\t\"github.com\/qor\/qor-example\/app\/models\"\n\t\"github.com\/qor\/qor-example\/config\/admin\/bindatafs\"\n\t\"github.com\/qor\/qor-example\/config\/auth\"\n\t\"github.com\/qor\/qor-example\/config\/i18n\"\n\t\"github.com\/qor\/qor-example\/db\"\n\t\"github.com\/qor\/qor\/resource\"\n\t\"github.com\/qor\/qor\/utils\"\n\t\"github.com\/qor\/transition\"\n\t\"github.com\/qor\/validations\"\n)\n\nvar Admin *admin.Admin\nvar ActionBar *action_bar.ActionBar\nvar Countries = []string{\"China\", \"Japan\", \"USA\"}\n\nfunc init() {\n\tAdmin = admin.New(&qor.Config{DB: db.Publish.DraftDB()})\n\tAdmin.SetSiteName(\"Qor DEMO\")\n\tAdmin.SetAuth(auth.AdminAuth{})\n\tAdmin.SetAssetFS(bindatafs.AssetFS)\n\n\t\/\/ Add Dashboard\n\tAdmin.AddMenu(&admin.Menu{Name: \"Dashboard\", Link: \"\/admin\"})\n\n\t\/\/ Add Asset Manager, for rich editor\n\tassetManager := Admin.AddResource(&media_library.AssetManager{}, &admin.Config{Invisible: true})\n\n\t\/\/ Add Product\n\tproduct := Admin.AddResource(&models.Product{}, &admin.Config{Menu: []string{\"Product Management\"}})\n\tproduct.Meta(&admin.Meta{Name: \"MadeCountry\", Type: \"select_one\", Collection: Countries})\n\tproduct.Meta(&admin.Meta{Name: \"Description\", Type: \"rich_editor\", Resource: assetManager})\n\n\tcolorVariationMeta := product.Meta(&admin.Meta{Name: \"ColorVariations\"})\n\tcolorVariation := colorVariationMeta.Resource\n\tcolorVariation.NewAttrs(\"-Product\")\n\tcolorVariation.EditAttrs(\"-Product\")\n\n\tsizeVariationMeta := colorVariation.Meta(&admin.Meta{Name: \"SizeVariations\"})\n\tsizeVariation := sizeVariationMeta.Resource\n\tsizeVariation.NewAttrs(\"-ColorVariation\")\n\tsizeVariation.EditAttrs(\n\t\t&admin.Section{\n\t\t\tRows: [][]string{\n\t\t\t\t{\"Size\", \"AvailableQuantity\"},\n\t\t\t},\n\t\t},\n\t)\n\n\tproduct.SearchAttrs(\"Name\", \"Code\", \"Category.Name\", \"Brand.Name\")\n\tproduct.EditAttrs(\n\t\t&admin.Section{\n\t\t\tTitle: \"Basic Information\",\n\t\t\tRows: [][]string{\n\t\t\t\t{\"Name\"},\n\t\t\t\t{\"Code\", \"Price\"},\n\t\t\t\t{\"Enabled\"},\n\t\t\t}},\n\t\t&admin.Section{\n\t\t\tTitle: \"Organization\",\n\t\t\tRows: [][]string{\n\t\t\t\t{\"Category\", \"MadeCountry\"},\n\t\t\t\t{\"Collections\"},\n\t\t\t}},\n\t\t\"Description\",\n\t\t\"ColorVariations\",\n\t)\n\n\tfor _, country := range Countries {\n\t\tvar country = country\n\t\tproduct.Scope(&admin.Scope{Name: country, Group: \"Made Country\", Handle: func(db *gorm.DB, ctx *qor.Context) *gorm.DB {\n\t\t\treturn db.Where(\"made_country = ?\", country)\n\t\t}})\n\t}\n\n\tproduct.IndexAttrs(\"-ColorVariations\")\n\n\tproduct.Action(&admin.Action{\n\t\tName: \"View On Site\",\n\t\tURL: func(record interface{}, context *admin.Context) string {\n\t\t\tif product, ok := record.(*models.Product); ok {\n\t\t\t\treturn fmt.Sprintf(\"\/products\/%v\", product.Code)\n\t\t\t}\n\t\t\treturn \"#\"\n\t\t},\n\t\tModes: []string{\"menu_item\", \"edit\"},\n\t})\n\n\tproduct.Action(&admin.Action{\n\t\tName: \"Disable\",\n\t\tHandle: func(arg *admin.ActionArgument) error {\n\t\t\tfor _, record := range arg.FindSelectedRecords() {\n\t\t\t\targ.Context.DB.Model(record.(*models.Product)).Update(\"enabled\", false)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\tVisible: func(record interface{}, context *admin.Context) bool {\n\t\t\tif product, ok := record.(*models.Product); ok {\n\t\t\t\treturn product.Enabled == true\n\t\t\t}\n\t\t\treturn true\n\t\t},\n\t\tModes: []string{\"index\", \"edit\", \"menu_item\"},\n\t})\n\n\tproduct.Action(&admin.Action{\n\t\tName: \"Enable\",\n\t\tHandle: func(arg *admin.ActionArgument) error {\n\t\t\tfor _, record := range arg.FindSelectedRecords() {\n\t\t\t\targ.Context.DB.Model(record.(*models.Product)).Update(\"enabled\", true)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\tVisible: func(record interface{}, context *admin.Context) bool {\n\t\t\tif product, ok := record.(*models.Product); ok {\n\t\t\t\treturn product.Enabled == false\n\t\t\t}\n\t\t\treturn true\n\t\t},\n\t\tModes: []string{\"index\", \"edit\", \"menu_item\"},\n\t})\n\n\tAdmin.AddResource(&models.Color{}, &admin.Config{Menu: []string{\"Product Management\"}})\n\tAdmin.AddResource(&models.Size{}, &admin.Config{Menu: []string{\"Product Management\"}})\n\tAdmin.AddResource(&models.Category{}, &admin.Config{Menu: []string{\"Product Management\"}})\n\tAdmin.AddResource(&models.Collection{}, &admin.Config{Menu: []string{\"Product Management\"}})\n\n\t\/\/ Add Order\n\torder := Admin.AddResource(&models.Order{}, &admin.Config{Menu: []string{\"Order Management\"}})\n\torder.Meta(&admin.Meta{Name: \"ShippingAddress\", Type: \"single_edit\"})\n\torder.Meta(&admin.Meta{Name: \"BillingAddress\", Type: \"single_edit\"})\n\torder.Meta(&admin.Meta{Name: \"ShippedAt\", Type: \"date\"})\n\n\torderItemMeta := order.Meta(&admin.Meta{Name: \"OrderItems\"})\n\torderItemMeta.Resource.Meta(&admin.Meta{Name: \"SizeVariation\", Type: \"select_one\", Collection: sizeVariationCollection})\n\torderItemMeta.Resource.NewAttrs(\"-State\")\n\torderItemMeta.Resource.EditAttrs(\"-State\")\n\n\t\/\/ define scopes for Order\n\tfor _, state := range []string{\"checkout\", \"cancelled\", \"paid\", \"paid_cancelled\", \"processing\", \"shipped\", \"returned\"} {\n\t\tvar state = state\n\t\torder.Scope(&admin.Scope{\n\t\t\tName: state,\n\t\t\tLabel: strings.Title(strings.Replace(state, \"_\", \" \", -1)),\n\t\t\tGroup: \"Order Status\",\n\t\t\tHandle: func(db *gorm.DB, context *qor.Context) *gorm.DB {\n\t\t\t\treturn db.Where(models.Order{Transition: transition.Transition{State: state}})\n\t\t\t},\n\t\t})\n\t}\n\n\t\/\/ define actions for Order\n\ttype trackingNumberArgument struct {\n\t\tTrackingNumber string\n\t}\n\n\torder.Action(&admin.Action{\n\t\tName: \"Processing\",\n\t\tHandle: func(argument *admin.ActionArgument) error {\n\t\t\tfor _, order := range argument.FindSelectedRecords() {\n\t\t\t\tdb := argument.Context.GetDB()\n\t\t\t\tif err := models.OrderState.Trigger(\"process\", order.(*models.Order), db); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tdb.Select(\"state\").Save(order)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\tVisible: func(record interface{}, context *admin.Context) bool {\n\t\t\tif order, ok := record.(*models.Order); ok {\n\t\t\t\treturn order.State == \"paid\"\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\tModes: []string{\"show\", \"menu_item\"},\n\t})\n\torder.Action(&admin.Action{\n\t\tName: \"Ship\",\n\t\tHandle: func(argument *admin.ActionArgument) error {\n\t\t\tvar (\n\t\t\t\ttx = argument.Context.GetDB().Begin()\n\t\t\t\ttrackingNumberArgument = argument.Argument.(*trackingNumberArgument)\n\t\t\t)\n\n\t\t\tif trackingNumberArgument.TrackingNumber != \"\" {\n\t\t\t\tfor _, record := range argument.FindSelectedRecords() {\n\t\t\t\t\torder := record.(*models.Order)\n\t\t\t\t\torder.TrackingNumber = &trackingNumberArgument.TrackingNumber\n\t\t\t\t\tmodels.OrderState.Trigger(\"ship\", order, tx, \"tracking number \"+trackingNumberArgument.TrackingNumber)\n\t\t\t\t\tif err := tx.Save(order).Error; err != nil {\n\t\t\t\t\t\ttx.Rollback()\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn errors.New(\"invalid shipment number\")\n\t\t\t}\n\n\t\t\ttx.Commit()\n\t\t\treturn nil\n\t\t},\n\t\tVisible: func(record interface{}, context *admin.Context) bool {\n\t\t\tif order, ok := record.(*models.Order); ok {\n\t\t\t\treturn order.State == \"processing\"\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\tResource: Admin.NewResource(&trackingNumberArgument{}),\n\t\tModes: []string{\"show\", \"menu_item\"},\n\t})\n\n\torder.Action(&admin.Action{\n\t\tName: \"Cancel\",\n\t\tHandle: func(argument *admin.ActionArgument) error {\n\t\t\tfor _, order := range argument.FindSelectedRecords() {\n\t\t\t\tdb := argument.Context.GetDB()\n\t\t\t\tif err := models.OrderState.Trigger(\"cancel\", order.(*models.Order), db); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tdb.Select(\"state\").Save(order)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\tVisible: func(record interface{}, context *admin.Context) bool {\n\t\t\tif order, ok := record.(*models.Order); ok {\n\t\t\t\tfor _, state := range []string{\"draft\", \"checkout\", \"paid\", \"processing\"} {\n\t\t\t\t\tif order.State == state {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\tModes: []string{\"index\", \"show\", \"menu_item\"},\n\t})\n\n\torder.IndexAttrs(\"User\", \"PaymentAmount\", \"ShippedAt\", \"CancelledAt\", \"State\", \"ShippingAddress\")\n\torder.NewAttrs(\"-DiscountValue\", \"-AbandonedReason\", \"-CancelledAt\")\n\torder.EditAttrs(\"-DiscountValue\", \"-AbandonedReason\", \"-CancelledAt\", \"-State\")\n\torder.ShowAttrs(\"-DiscountValue\", \"-State\")\n\torder.SearchAttrs(\"User.Name\", \"User.Email\", \"ShippingAddress.ContactName\", \"ShippingAddress.Address1\", \"ShippingAddress.Address2\")\n\n\t\/\/ Add activity for order\n\tactivity.Register(order)\n\n\t\/\/ Define another resource for same model\n\tabandonedOrder := Admin.AddResource(&models.Order{}, &admin.Config{Name: \"Abandoned Order\", Menu: []string{\"Order Management\"}})\n\tabandonedOrder.Meta(&admin.Meta{Name: \"ShippingAddress\", Type: \"single_edit\"})\n\tabandonedOrder.Meta(&admin.Meta{Name: \"BillingAddress\", Type: \"single_edit\"})\n\n\t\/\/ Define default scope for abandoned orders\n\tabandonedOrder.Scope(&admin.Scope{\n\t\tDefault: true,\n\t\tHandle: func(db *gorm.DB, context *qor.Context) *gorm.DB {\n\t\t\treturn db.Where(\"abandoned_reason IS NOT NULL AND abandoned_reason <> ?\", \"\")\n\t\t},\n\t})\n\n\t\/\/ Define scopes for abandoned orders\n\tfor _, amount := range []int{5000, 10000, 20000} {\n\t\tvar amount = amount\n\t\tabandonedOrder.Scope(&admin.Scope{\n\t\t\tName: fmt.Sprint(amount),\n\t\t\tGroup: \"Amount Greater Than\",\n\t\t\tHandle: func(db *gorm.DB, context *qor.Context) *gorm.DB {\n\t\t\t\treturn db.Where(\"payment_amount > ?\", amount)\n\t\t\t},\n\t\t})\n\t}\n\n\tabandonedOrder.IndexAttrs(\"-ShippingAddress\", \"-BillingAddress\", \"-DiscountValue\", \"-OrderItems\")\n\tabandonedOrder.NewAttrs(\"-DiscountValue\")\n\tabandonedOrder.EditAttrs(\"-DiscountValue\")\n\tabandonedOrder.ShowAttrs(\"-DiscountValue\")\n\n\t\/\/ Add Store\n\tstore := Admin.AddResource(&models.Store{}, &admin.Config{Menu: []string{\"Store Management\"}})\n\tstore.AddValidator(func(record interface{}, metaValues *resource.MetaValues, context *qor.Context) error {\n\t\tif meta := metaValues.Get(\"Name\"); meta != nil {\n\t\t\tif name := utils.ToString(meta.Value); strings.TrimSpace(name) == \"\" {\n\t\t\t\treturn validations.NewError(record, \"Name\", \"Name can't be blank\")\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\t\/\/ Add Translations\n\tAdmin.AddResource(i18n.I18n, &admin.Config{Menu: []string{\"Site Management\"}})\n\n\t\/\/ Add SEOSetting\n\tAdmin.AddResource(&models.SEOSetting{}, &admin.Config{Menu: []string{\"Site Management\"}, Singleton: true})\n\n\t\/\/ Add Setting\n\tAdmin.AddResource(&models.Setting{}, &admin.Config{Singleton: true})\n\n\t\/\/ Add User\n\tuser := Admin.AddResource(&models.User{})\n\tuser.Meta(&admin.Meta{Name: \"Gender\", Type: \"select_one\", Collection: []string{\"Male\", \"Female\", \"Unknown\"}})\n\n\tuser.IndexAttrs(\"ID\", \"Email\", \"Name\", \"Gender\", \"Role\")\n\tuser.ShowAttrs(\n\t\t&admin.Section{\n\t\t\tTitle: \"Basic Information\",\n\t\t\tRows: [][]string{\n\t\t\t\t{\"Name\"},\n\t\t\t\t{\"Email\", \"Password\"},\n\t\t\t\t{\"Gender\", \"Role\"},\n\t\t\t}},\n\t\t\"Addresses\",\n\t)\n\tuser.EditAttrs(user.ShowAttrs())\n\n\t\/\/ Add Worker\n\tWorker := getWorker()\n\tAdmin.AddResource(Worker)\n\n\tdb.Publish.SetWorker(Worker)\n\texchange_actions.RegisterExchangeJobs(i18n.I18n, Worker)\n\n\t\/\/ Add Publish\n\tAdmin.AddResource(db.Publish, &admin.Config{Singleton: true})\n\n\t\/\/ Add Search Center Resources\n\tAdmin.AddSearchResource(product, user, order)\n\n\t\/\/ Add ActionBar\n\tActionBar = action_bar.New(Admin, auth.AdminAuth{})\n\tActionBar.RegisterAction(&action_bar.Action{Name: \"Admin Dashboard\", Link: \"\/admin\"})\n\n\tinitFuncMap()\n\tinitRouter()\n}\n\nfunc sizeVariationCollection(resource interface{}, context *qor.Context) (results [][]string) {\n\tfor _, sizeVariation := range models.SizeVariations() {\n\t\tresults = append(results, []string{strconv.Itoa(int(sizeVariation.ID)), sizeVariation.Stringify()})\n\t}\n\treturn\n}\n<commit_msg>Use publish draft mode for admin<commit_after>package admin\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/qor\/action_bar\"\n\t\"github.com\/qor\/activity\"\n\t\"github.com\/qor\/admin\"\n\t\"github.com\/qor\/i18n\/exchange_actions\"\n\t\"github.com\/qor\/media_library\"\n\t\"github.com\/qor\/qor\"\n\t\"github.com\/qor\/qor-example\/app\/models\"\n\t\"github.com\/qor\/qor-example\/config\/admin\/bindatafs\"\n\t\"github.com\/qor\/qor-example\/config\/auth\"\n\t\"github.com\/qor\/qor-example\/config\/i18n\"\n\t\"github.com\/qor\/qor-example\/db\"\n\t\"github.com\/qor\/qor\/resource\"\n\t\"github.com\/qor\/qor\/utils\"\n\t\"github.com\/qor\/transition\"\n\t\"github.com\/qor\/validations\"\n)\n\nvar Admin *admin.Admin\nvar ActionBar *action_bar.ActionBar\nvar Countries = []string{\"China\", \"Japan\", \"USA\"}\n\nfunc init() {\n\tAdmin = admin.New(&qor.Config{DB: db.DB.Set(\"publish:draft_mode\", true)})\n\tAdmin.SetSiteName(\"Qor DEMO\")\n\tAdmin.SetAuth(auth.AdminAuth{})\n\tAdmin.SetAssetFS(bindatafs.AssetFS)\n\n\t\/\/ Add Dashboard\n\tAdmin.AddMenu(&admin.Menu{Name: \"Dashboard\", Link: \"\/admin\"})\n\n\t\/\/ Add Asset Manager, for rich editor\n\tassetManager := Admin.AddResource(&media_library.AssetManager{}, &admin.Config{Invisible: true})\n\n\t\/\/ Add Product\n\tproduct := Admin.AddResource(&models.Product{}, &admin.Config{Menu: []string{\"Product Management\"}})\n\tproduct.Meta(&admin.Meta{Name: \"MadeCountry\", Type: \"select_one\", Collection: Countries})\n\tproduct.Meta(&admin.Meta{Name: \"Description\", Type: \"rich_editor\", Resource: assetManager})\n\n\tcolorVariationMeta := product.Meta(&admin.Meta{Name: \"ColorVariations\"})\n\tcolorVariation := colorVariationMeta.Resource\n\tcolorVariation.NewAttrs(\"-Product\")\n\tcolorVariation.EditAttrs(\"-Product\")\n\n\tsizeVariationMeta := colorVariation.Meta(&admin.Meta{Name: \"SizeVariations\"})\n\tsizeVariation := sizeVariationMeta.Resource\n\tsizeVariation.NewAttrs(\"-ColorVariation\")\n\tsizeVariation.EditAttrs(\n\t\t&admin.Section{\n\t\t\tRows: [][]string{\n\t\t\t\t{\"Size\", \"AvailableQuantity\"},\n\t\t\t},\n\t\t},\n\t)\n\n\tproduct.SearchAttrs(\"Name\", \"Code\", \"Category.Name\", \"Brand.Name\")\n\tproduct.EditAttrs(\n\t\t&admin.Section{\n\t\t\tTitle: \"Basic Information\",\n\t\t\tRows: [][]string{\n\t\t\t\t{\"Name\"},\n\t\t\t\t{\"Code\", \"Price\"},\n\t\t\t\t{\"Enabled\"},\n\t\t\t}},\n\t\t&admin.Section{\n\t\t\tTitle: \"Organization\",\n\t\t\tRows: [][]string{\n\t\t\t\t{\"Category\", \"MadeCountry\"},\n\t\t\t\t{\"Collections\"},\n\t\t\t}},\n\t\t\"Description\",\n\t\t\"ColorVariations\",\n\t)\n\n\tfor _, country := range Countries {\n\t\tvar country = country\n\t\tproduct.Scope(&admin.Scope{Name: country, Group: \"Made Country\", Handle: func(db *gorm.DB, ctx *qor.Context) *gorm.DB {\n\t\t\treturn db.Where(\"made_country = ?\", country)\n\t\t}})\n\t}\n\n\tproduct.IndexAttrs(\"-ColorVariations\")\n\n\tproduct.Action(&admin.Action{\n\t\tName: \"View On Site\",\n\t\tURL: func(record interface{}, context *admin.Context) string {\n\t\t\tif product, ok := record.(*models.Product); ok {\n\t\t\t\treturn fmt.Sprintf(\"\/products\/%v\", product.Code)\n\t\t\t}\n\t\t\treturn \"#\"\n\t\t},\n\t\tModes: []string{\"menu_item\", \"edit\"},\n\t})\n\n\tproduct.Action(&admin.Action{\n\t\tName: \"Disable\",\n\t\tHandle: func(arg *admin.ActionArgument) error {\n\t\t\tfor _, record := range arg.FindSelectedRecords() {\n\t\t\t\targ.Context.DB.Model(record.(*models.Product)).Update(\"enabled\", false)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\tVisible: func(record interface{}, context *admin.Context) bool {\n\t\t\tif product, ok := record.(*models.Product); ok {\n\t\t\t\treturn product.Enabled == true\n\t\t\t}\n\t\t\treturn true\n\t\t},\n\t\tModes: []string{\"index\", \"edit\", \"menu_item\"},\n\t})\n\n\tproduct.Action(&admin.Action{\n\t\tName: \"Enable\",\n\t\tHandle: func(arg *admin.ActionArgument) error {\n\t\t\tfor _, record := range arg.FindSelectedRecords() {\n\t\t\t\targ.Context.DB.Model(record.(*models.Product)).Update(\"enabled\", true)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\tVisible: func(record interface{}, context *admin.Context) bool {\n\t\t\tif product, ok := record.(*models.Product); ok {\n\t\t\t\treturn product.Enabled == false\n\t\t\t}\n\t\t\treturn true\n\t\t},\n\t\tModes: []string{\"index\", \"edit\", \"menu_item\"},\n\t})\n\n\tAdmin.AddResource(&models.Color{}, &admin.Config{Menu: []string{\"Product Management\"}})\n\tAdmin.AddResource(&models.Size{}, &admin.Config{Menu: []string{\"Product Management\"}})\n\tAdmin.AddResource(&models.Category{}, &admin.Config{Menu: []string{\"Product Management\"}})\n\tAdmin.AddResource(&models.Collection{}, &admin.Config{Menu: []string{\"Product Management\"}})\n\n\t\/\/ Add Order\n\torder := Admin.AddResource(&models.Order{}, &admin.Config{Menu: []string{\"Order Management\"}})\n\torder.Meta(&admin.Meta{Name: \"ShippingAddress\", Type: \"single_edit\"})\n\torder.Meta(&admin.Meta{Name: \"BillingAddress\", Type: \"single_edit\"})\n\torder.Meta(&admin.Meta{Name: \"ShippedAt\", Type: \"date\"})\n\n\torderItemMeta := order.Meta(&admin.Meta{Name: \"OrderItems\"})\n\torderItemMeta.Resource.Meta(&admin.Meta{Name: \"SizeVariation\", Type: \"select_one\", Collection: sizeVariationCollection})\n\torderItemMeta.Resource.NewAttrs(\"-State\")\n\torderItemMeta.Resource.EditAttrs(\"-State\")\n\n\t\/\/ define scopes for Order\n\tfor _, state := range []string{\"checkout\", \"cancelled\", \"paid\", \"paid_cancelled\", \"processing\", \"shipped\", \"returned\"} {\n\t\tvar state = state\n\t\torder.Scope(&admin.Scope{\n\t\t\tName: state,\n\t\t\tLabel: strings.Title(strings.Replace(state, \"_\", \" \", -1)),\n\t\t\tGroup: \"Order Status\",\n\t\t\tHandle: func(db *gorm.DB, context *qor.Context) *gorm.DB {\n\t\t\t\treturn db.Where(models.Order{Transition: transition.Transition{State: state}})\n\t\t\t},\n\t\t})\n\t}\n\n\t\/\/ define actions for Order\n\ttype trackingNumberArgument struct {\n\t\tTrackingNumber string\n\t}\n\n\torder.Action(&admin.Action{\n\t\tName: \"Processing\",\n\t\tHandle: func(argument *admin.ActionArgument) error {\n\t\t\tfor _, order := range argument.FindSelectedRecords() {\n\t\t\t\tdb := argument.Context.GetDB()\n\t\t\t\tif err := models.OrderState.Trigger(\"process\", order.(*models.Order), db); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tdb.Select(\"state\").Save(order)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\tVisible: func(record interface{}, context *admin.Context) bool {\n\t\t\tif order, ok := record.(*models.Order); ok {\n\t\t\t\treturn order.State == \"paid\"\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\tModes: []string{\"show\", \"menu_item\"},\n\t})\n\torder.Action(&admin.Action{\n\t\tName: \"Ship\",\n\t\tHandle: func(argument *admin.ActionArgument) error {\n\t\t\tvar (\n\t\t\t\ttx = argument.Context.GetDB().Begin()\n\t\t\t\ttrackingNumberArgument = argument.Argument.(*trackingNumberArgument)\n\t\t\t)\n\n\t\t\tif trackingNumberArgument.TrackingNumber != \"\" {\n\t\t\t\tfor _, record := range argument.FindSelectedRecords() {\n\t\t\t\t\torder := record.(*models.Order)\n\t\t\t\t\torder.TrackingNumber = &trackingNumberArgument.TrackingNumber\n\t\t\t\t\tmodels.OrderState.Trigger(\"ship\", order, tx, \"tracking number \"+trackingNumberArgument.TrackingNumber)\n\t\t\t\t\tif err := tx.Save(order).Error; err != nil {\n\t\t\t\t\t\ttx.Rollback()\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn errors.New(\"invalid shipment number\")\n\t\t\t}\n\n\t\t\ttx.Commit()\n\t\t\treturn nil\n\t\t},\n\t\tVisible: func(record interface{}, context *admin.Context) bool {\n\t\t\tif order, ok := record.(*models.Order); ok {\n\t\t\t\treturn order.State == \"processing\"\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\tResource: Admin.NewResource(&trackingNumberArgument{}),\n\t\tModes: []string{\"show\", \"menu_item\"},\n\t})\n\n\torder.Action(&admin.Action{\n\t\tName: \"Cancel\",\n\t\tHandle: func(argument *admin.ActionArgument) error {\n\t\t\tfor _, order := range argument.FindSelectedRecords() {\n\t\t\t\tdb := argument.Context.GetDB()\n\t\t\t\tif err := models.OrderState.Trigger(\"cancel\", order.(*models.Order), db); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tdb.Select(\"state\").Save(order)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\tVisible: func(record interface{}, context *admin.Context) bool {\n\t\t\tif order, ok := record.(*models.Order); ok {\n\t\t\t\tfor _, state := range []string{\"draft\", \"checkout\", \"paid\", \"processing\"} {\n\t\t\t\t\tif order.State == state {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\tModes: []string{\"index\", \"show\", \"menu_item\"},\n\t})\n\n\torder.IndexAttrs(\"User\", \"PaymentAmount\", \"ShippedAt\", \"CancelledAt\", \"State\", \"ShippingAddress\")\n\torder.NewAttrs(\"-DiscountValue\", \"-AbandonedReason\", \"-CancelledAt\")\n\torder.EditAttrs(\"-DiscountValue\", \"-AbandonedReason\", \"-CancelledAt\", \"-State\")\n\torder.ShowAttrs(\"-DiscountValue\", \"-State\")\n\torder.SearchAttrs(\"User.Name\", \"User.Email\", \"ShippingAddress.ContactName\", \"ShippingAddress.Address1\", \"ShippingAddress.Address2\")\n\n\t\/\/ Add activity for order\n\tactivity.Register(order)\n\n\t\/\/ Define another resource for same model\n\tabandonedOrder := Admin.AddResource(&models.Order{}, &admin.Config{Name: \"Abandoned Order\", Menu: []string{\"Order Management\"}})\n\tabandonedOrder.Meta(&admin.Meta{Name: \"ShippingAddress\", Type: \"single_edit\"})\n\tabandonedOrder.Meta(&admin.Meta{Name: \"BillingAddress\", Type: \"single_edit\"})\n\n\t\/\/ Define default scope for abandoned orders\n\tabandonedOrder.Scope(&admin.Scope{\n\t\tDefault: true,\n\t\tHandle: func(db *gorm.DB, context *qor.Context) *gorm.DB {\n\t\t\treturn db.Where(\"abandoned_reason IS NOT NULL AND abandoned_reason <> ?\", \"\")\n\t\t},\n\t})\n\n\t\/\/ Define scopes for abandoned orders\n\tfor _, amount := range []int{5000, 10000, 20000} {\n\t\tvar amount = amount\n\t\tabandonedOrder.Scope(&admin.Scope{\n\t\t\tName: fmt.Sprint(amount),\n\t\t\tGroup: \"Amount Greater Than\",\n\t\t\tHandle: func(db *gorm.DB, context *qor.Context) *gorm.DB {\n\t\t\t\treturn db.Where(\"payment_amount > ?\", amount)\n\t\t\t},\n\t\t})\n\t}\n\n\tabandonedOrder.IndexAttrs(\"-ShippingAddress\", \"-BillingAddress\", \"-DiscountValue\", \"-OrderItems\")\n\tabandonedOrder.NewAttrs(\"-DiscountValue\")\n\tabandonedOrder.EditAttrs(\"-DiscountValue\")\n\tabandonedOrder.ShowAttrs(\"-DiscountValue\")\n\n\t\/\/ Add Store\n\tstore := Admin.AddResource(&models.Store{}, &admin.Config{Menu: []string{\"Store Management\"}})\n\tstore.AddValidator(func(record interface{}, metaValues *resource.MetaValues, context *qor.Context) error {\n\t\tif meta := metaValues.Get(\"Name\"); meta != nil {\n\t\t\tif name := utils.ToString(meta.Value); strings.TrimSpace(name) == \"\" {\n\t\t\t\treturn validations.NewError(record, \"Name\", \"Name can't be blank\")\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\t\/\/ Add Translations\n\tAdmin.AddResource(i18n.I18n, &admin.Config{Menu: []string{\"Site Management\"}})\n\n\t\/\/ Add SEOSetting\n\tAdmin.AddResource(&models.SEOSetting{}, &admin.Config{Menu: []string{\"Site Management\"}, Singleton: true})\n\n\t\/\/ Add Setting\n\tAdmin.AddResource(&models.Setting{}, &admin.Config{Singleton: true})\n\n\t\/\/ Add User\n\tuser := Admin.AddResource(&models.User{})\n\tuser.Meta(&admin.Meta{Name: \"Gender\", Type: \"select_one\", Collection: []string{\"Male\", \"Female\", \"Unknown\"}})\n\n\tuser.IndexAttrs(\"ID\", \"Email\", \"Name\", \"Gender\", \"Role\")\n\tuser.ShowAttrs(\n\t\t&admin.Section{\n\t\t\tTitle: \"Basic Information\",\n\t\t\tRows: [][]string{\n\t\t\t\t{\"Name\"},\n\t\t\t\t{\"Email\", \"Password\"},\n\t\t\t\t{\"Gender\", \"Role\"},\n\t\t\t}},\n\t\t\"Addresses\",\n\t)\n\tuser.EditAttrs(user.ShowAttrs())\n\n\t\/\/ Add Worker\n\tWorker := getWorker()\n\tAdmin.AddResource(Worker)\n\n\tdb.Publish.SetWorker(Worker)\n\texchange_actions.RegisterExchangeJobs(i18n.I18n, Worker)\n\n\t\/\/ Add Publish\n\tAdmin.AddResource(db.Publish, &admin.Config{Singleton: true})\n\n\t\/\/ Add Search Center Resources\n\tAdmin.AddSearchResource(product, user, order)\n\n\t\/\/ Add ActionBar\n\tActionBar = action_bar.New(Admin, auth.AdminAuth{})\n\tActionBar.RegisterAction(&action_bar.Action{Name: \"Admin Dashboard\", Link: \"\/admin\"})\n\n\tinitFuncMap()\n\tinitRouter()\n}\n\nfunc sizeVariationCollection(resource interface{}, context *qor.Context) (results [][]string) {\n\tfor _, sizeVariation := range models.SizeVariations() {\n\t\tresults = append(results, []string{strconv.Itoa(int(sizeVariation.ID)), sizeVariation.Stringify()})\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package gannoy\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"sync\"\n\n\t\"github.com\/gansidui\/priority_queue\"\n)\n\ntype GannoyIndex struct {\n\tmeta meta\n\ttree int\n\tdim int\n\tdistance Distance\n\trandom Random\n\tnodes Nodes\n\tK int\n\tbuildChan chan buildArgs\n}\n\nfunc NewGannoyIndex(metaFile string, distance Distance, random Random) (GannoyIndex, error) {\n\n\tmeta, err := loadMeta(metaFile)\n\tif err != nil {\n\t\treturn GannoyIndex{}, err\n\t}\n\ttree := meta.tree\n\tdim := meta.dim\n\n\tann := meta.treePath()\n\n\t\/\/ K := 3\n\tK := 50\n\tgannoy := GannoyIndex{\n\t\tmeta: meta,\n\t\ttree: tree,\n\t\tdim: dim,\n\t\tdistance: distance,\n\t\trandom: random,\n\t\tK: K,\n\t\tnodes: newNodes(ann, tree, dim, K),\n\t\tbuildChan: make(chan buildArgs, 1),\n\t}\n\tgo gannoy.builder()\n\treturn gannoy, nil\n}\n\nfunc (g GannoyIndex) Tree() {\n\tfor i, root := range g.meta.roots() {\n\t\tg.walk(i, g.nodes.getNode(root), root, 0)\n\t}\n}\n\nfunc (g *GannoyIndex) AddItem(key int, w []float64) error {\n\targs := buildArgs{action: ADD, key: key, w: w, result: make(chan error)}\n\tg.buildChan <- args\n\treturn <-args.result\n}\n\nfunc (g *GannoyIndex) RemoveItem(key int) error {\n\targs := buildArgs{action: DELETE, key: key, result: make(chan error)}\n\tg.buildChan <- args\n\treturn <-args.result\n}\n\nfunc (g *GannoyIndex) UpdateItem(key int, w []float64) error {\n\targs := buildArgs{action: UPDATE, key: key, w: w, result: make(chan error)}\n\tg.buildChan <- args\n\treturn <-args.result\n}\n\nfunc (g *GannoyIndex) GetNnsByKey(key, n, searchK int) []int {\n\tm := g.nodes.getNodeByKey(key)\n\tif !m.isLeaf() {\n\t\treturn []int{}\n\t}\n\tkeys := g.getAllNns(m.v, n, searchK)\n\treturn keys\n}\n\nfunc (g *GannoyIndex) getAllNns(v []float64, n, searchK int) []int {\n\tif searchK == -1 {\n\t\tsearchK = n * g.tree\n\t}\n\n\tq := priority_queue.New()\n\tfor _, root := range g.meta.roots() {\n\t\tq.Push(&Queue{priority: math.Inf(1), value: root})\n\t}\n\n\tnns := []int{}\n\tfor len(nns) < searchK && q.Len() > 0 {\n\t\ttop := q.Top().(*Queue)\n\t\td := top.priority\n\t\ti := top.value\n\n\t\tnd := g.nodes.getNode(i)\n\t\tq.Pop()\n\t\tif nd.isLeaf() {\n\t\t\tnns = append(nns, i)\n\t\t} else if nd.nDescendants <= g.K {\n\t\t\tdst := nd.children\n\t\t\tnns = append(nns, dst...)\n\t\t} else {\n\t\t\tmargin := g.distance.margin(nd, v, g.dim)\n\t\t\tq.Push(&Queue{priority: math.Min(d, +margin), value: nd.children[1]})\n\t\t\tq.Push(&Queue{priority: math.Min(d, -margin), value: nd.children[0]})\n\t\t}\n\t}\n\n\tsort.Ints(nns)\n\tnnsDist := []sorter{}\n\tlast := -1\n\tfor _, j := range nns {\n\t\tif j == last {\n\t\t\tcontinue\n\t\t}\n\t\tlast = j\n\t\tnode := g.nodes.getNode(j)\n\t\tnnsDist = append(nnsDist, sorter{value: g.distance.distance(v, node.v, g.dim), id: node.key})\n\t}\n\n\tm := len(nnsDist)\n\tp := m\n\tif n < m {\n\t\tp = n\n\t}\n\n\tHeapSort(nnsDist, DESC, p)\n\n\tresult := make([]int, p)\n\tfor i := 0; i < p; i++ {\n\t\tresult[i] = nnsDist[m-1-i].id\n\t}\n\n\treturn result\n}\n\nfunc (g *GannoyIndex) addItem(key int, w []float64) error {\n\tn := g.nodes.newNode()\n\tn.key = key\n\tn.v = w\n\tn.parents = make([]int, g.tree)\n\terr := n.save()\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ fmt.Printf(\"id %d\\n\", n.id)\n\n\tvar wg sync.WaitGroup\n\twg.Add(g.tree)\n\tbuildChan := make(chan int, g.tree)\n\tworker := func(n Node) {\n\t\tfor index := range buildChan {\n\t\t\t\/\/ fmt.Printf(\"root: %d\\n\", g.meta.roots()[index])\n\t\t\tg.build(index, g.meta.roots()[index], n)\n\t\t\twg.Done()\n\t\t}\n\t}\n\n\tfor i := 0; i < 3; i++ {\n\t\tgo worker(n)\n\t}\n\n\tfor index, _ := range g.meta.roots() {\n\t\tbuildChan <- index\n\t}\n\n\twg.Wait()\n\tclose(buildChan)\n\tg.nodes.maps.add(n.id, key)\n\n\treturn nil\n}\n\nfunc (g *GannoyIndex) build(index, root int, n Node) {\n\tif root == -1 {\n\t\t\/\/ 最初のノード\n\t\tn.parents[index] = -1\n\t\tn.save()\n\t\tg.meta.updateRoot(index, n.id)\n\t\treturn\n\t}\n\tid := g.findBranchByVector(root, n.v)\n\tfound := g.nodes.getNode(id)\n\t\/\/ fmt.Printf(\"Found %d\\n\", item)\n\n\torg_parent := found.parents[index]\n\tif found.isBucket() && len(found.children) < g.K {\n\t\t\/\/ ノードに余裕があれば追加\n\t\t\/\/ fmt.Printf(\"pattern bucket\\n\")\n\t\tn.updateParents(index, id)\n\t\tfound.nDescendants++\n\t\tfound.children = append(found.children, n.id)\n\t\tfound.save()\n\t} else {\n\t\t\/\/ ノードが上限またはリーフノードであれば新しいノードを追加\n\t\twillDelete := false\n\t\tvar indices []int\n\t\tif found.isLeaf() {\n\t\t\t\/\/ fmt.Printf(\"pattern leaf node\\n\")\n\t\t\tindices = []int{id, n.id}\n\t\t} else {\n\t\t\t\/\/ fmt.Printf(\"pattern full backet\\n\")\n\t\t\tindices = append(found.children, n.id)\n\t\t\twillDelete = true\n\t\t}\n\n\t\tm := g.makeTree(index, org_parent, indices)\n\t\t\/\/ fmt.Printf(\"m: %d, org_parent: %d\\n\", m, org_parent)\n\t\tif org_parent == -1 {\n\t\t\t\/\/ rootノードの入れ替え\n\t\t\tg.meta.updateRoot(index, m)\n\t\t} else {\n\t\t\tparent := g.nodes.getNode(org_parent)\n\t\t\tparent.nDescendants++\n\t\t\tchildren := make([]int, len(parent.children))\n\t\t\tfor i, child := range parent.children {\n\t\t\t\tif child == id {\n\t\t\t\t\t\/\/ 新しいノードに変更\n\t\t\t\t\tchildren[i] = m\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ 既存のノードのまま\n\t\t\t\t\tchildren[i] = child\n\t\t\t\t}\n\t\t\t}\n\t\t\tparent.children = children\n\t\t\tparent.save()\n\n\t\t}\n\t\tif willDelete {\n\t\t\tfound.destroy()\n\t\t}\n\t}\n}\n\nfunc (g *GannoyIndex) removeItem(key int) error {\n\tn := g.nodes.getNodeByKey(key)\n\n\tvar wg sync.WaitGroup\n\twg.Add(g.tree)\n\tbuildChan := make(chan int, g.tree)\n\tworker := func(n Node) {\n\t\tfor root := range buildChan {\n\t\t\tg.remove(root, n)\n\t\t\twg.Done()\n\t\t}\n\t}\n\n\tfor i := 0; i < 3; i++ {\n\t\tgo worker(n)\n\t}\n\tfor index, _ := range g.meta.roots() {\n\t\tbuildChan <- index\n\t}\n\n\twg.Wait()\n\tclose(buildChan)\n\n\tg.nodes.maps.remove(key)\n\tn.free = true\n\tn.save()\n\tg.nodes.free.push(n.id)\n\n\treturn nil\n}\n\nfunc (g *GannoyIndex) remove(root int, node Node) {\n\tif node.isRoot(root) {\n\t\tg.meta.updateRoot(root, -1)\n\t\treturn\n\t}\n\tparent := g.nodes.getNode(node.parents[root])\n\tif parent.isBucket() && len(parent.children) > 2 {\n\t\t\/\/ fmt.Printf(\"pattern bucket\\n\")\n\t\ttarget := -1\n\t\tfor i, child := range parent.children {\n\t\t\tif child == node.id {\n\t\t\t\ttarget = i\n\t\t\t}\n\t\t}\n\t\tif target == -1 {\n\t\t\treturn\n\t\t}\n\t\tchildren := append(parent.children[:target], parent.children[(target+1):]...)\n\t\tparent.nDescendants--\n\t\tparent.children = children\n\t\tparent.save()\n\t} else {\n\t\t\/\/ fmt.Printf(\"pattern leaf node\\n\")\n\t\tvar other int\n\t\tfor _, child := range parent.children {\n\t\t\tif child != node.id {\n\t\t\t\tother = child\n\t\t\t}\n\t\t}\n\t\tif parent.isRoot(root) {\n\t\t\tg.meta.updateRoot(root, other)\n\t\t} else {\n\t\t\tgrandParent := g.nodes.getNode(parent.parents[root])\n\t\t\tchildren := []int{}\n\t\t\tfor _, child := range grandParent.children {\n\t\t\t\tif child == node.parents[root] {\n\t\t\t\t\tchildren = append(children, other)\n\t\t\t\t} else {\n\t\t\t\t\tchildren = append(children, child)\n\t\t\t\t}\n\t\t\t}\n\t\t\tgrandParent.nDescendants--\n\t\t\tgrandParent.children = children\n\t\t\tgrandParent.save()\n\t\t}\n\n\t\totherNode := g.nodes.getNode(other)\n\t\totherNode.updateParents(root, parent.parents[root])\n\n\t\tparent.free = true\n\t\tparent.save()\n\t\tg.nodes.free.push(parent.id)\n\t}\n}\n\nfunc (g GannoyIndex) findBranchByVector(id int, v []float64) int {\n\tnode := g.nodes.getNode(id)\n\tif node.isLeaf() || node.isBucket() {\n\t\treturn id\n\t}\n\tside := g.distance.side(node, v, g.dim, g.random)\n\treturn g.findBranchByVector(node.children[side], v)\n}\n\nfunc (g *GannoyIndex) makeTree(root, parent int, ids []int) int {\n\tif len(ids) == 1 {\n\t\tn := g.nodes.getNode(ids[0])\n\t\tif len(n.parents) == 0 {\n\t\t\tn.parents = make([]int, g.tree)\n\t\t}\n\t\tn.updateParents(root, parent)\n\t\treturn ids[0]\n\t}\n\n\tif len(ids) <= g.K {\n\t\tm := g.nodes.newNode()\n\t\tm.parents = make([]int, g.tree)\n\t\tm.nDescendants = len(ids)\n\t\tm.parents[root] = parent\n\t\tm.children = ids\n\t\tm.save()\n\t\tfor _, child := range ids {\n\t\t\tc := g.nodes.getNode(child)\n\t\t\tif len(c.parents) == 0 {\n\t\t\t\tc.parents = make([]int, g.tree)\n\t\t\t}\n\t\t\tc.updateParents(root, m.id)\n\t\t}\n\t\treturn m.id\n\t}\n\n\tchildren := make([]Node, len(ids))\n\tfor i, id := range ids {\n\t\tchildren[i] = g.nodes.getNode(id)\n\t}\n\n\tchildrenIds := [2][]int{[]int{}, []int{}}\n\n\tm := g.nodes.newNode()\n\tm.parents = make([]int, g.tree)\n\tm.nDescendants = len(ids)\n\tm.parents[root] = parent\n\n\tm = g.distance.createSplit(children, g.dim, g.random, m)\n\tfor _, id := range ids {\n\t\tn := g.nodes.getNode(id)\n\t\tside := g.distance.side(m, n.v, g.dim, g.random)\n\t\tchildrenIds[side] = append(childrenIds[side], id)\n\t}\n\n\tfor len(childrenIds[0]) == 0 || len(childrenIds[1]) == 0 {\n\t\tchildrenIds[0] = []int{}\n\t\tchildrenIds[1] = []int{}\n\t\tfor z := 0; z < g.dim; z++ {\n\t\t\tm.v[z] = 0.0\n\t\t}\n\t\tfor _, id := range ids {\n\t\t\tside := g.random.flip()\n\t\t\tchildrenIds[side] = append(childrenIds[side], id)\n\t\t}\n\t}\n\n\tvar flip int\n\tif len(childrenIds[0]) > len(childrenIds[1]) {\n\t\tflip = 1\n\t}\n\n\tm.save()\n\tfor side := 0; side < 2; side++ {\n\t\tm.children[side^flip] = g.makeTree(root, m.id, childrenIds[side^flip])\n\t}\n\tm.save()\n\n\treturn m.id\n}\n\ntype buildArgs struct {\n\taction int\n\tkey int\n\tw []float64\n\tresult chan error\n}\n\nfunc (g *GannoyIndex) builder() {\n\tfor args := range g.buildChan {\n\t\tswitch args.action {\n\t\tcase ADD:\n\t\t\targs.result <- g.addItem(args.key, args.w)\n\t\tcase DELETE:\n\t\t\targs.result <- g.removeItem(args.key)\n\t\tcase UPDATE:\n\t\t\terr := g.removeItem(args.key)\n\t\t\tif err != nil {\n\t\t\t\targs.result <- err\n\t\t\t} else {\n\t\t\t\targs.result <- g.addItem(args.key, args.w)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (g GannoyIndex) walk(root int, node Node, id, tab int) {\n\tfor i := 0; i < tab*2; i++ {\n\t\tfmt.Print(\" \")\n\t}\n\tfmt.Printf(\"%d [%d] (%d) [nDescendants: %d, v: %v]\\n\", id, node.key, node.parents[root], node.nDescendants, node.v)\n\tif !node.isLeaf() {\n\t\tfor _, child := range node.children {\n\t\t\tg.walk(root, g.nodes.getNode(child), child, tab+1)\n\t\t}\n\t}\n}\n<commit_msg>Use destroy and free.push.<commit_after>package gannoy\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"sync\"\n\n\t\"github.com\/gansidui\/priority_queue\"\n)\n\ntype GannoyIndex struct {\n\tmeta meta\n\ttree int\n\tdim int\n\tdistance Distance\n\trandom Random\n\tnodes Nodes\n\tK int\n\tbuildChan chan buildArgs\n}\n\nfunc NewGannoyIndex(metaFile string, distance Distance, random Random) (GannoyIndex, error) {\n\n\tmeta, err := loadMeta(metaFile)\n\tif err != nil {\n\t\treturn GannoyIndex{}, err\n\t}\n\ttree := meta.tree\n\tdim := meta.dim\n\n\tann := meta.treePath()\n\n\t\/\/ K := 3\n\tK := 50\n\tgannoy := GannoyIndex{\n\t\tmeta: meta,\n\t\ttree: tree,\n\t\tdim: dim,\n\t\tdistance: distance,\n\t\trandom: random,\n\t\tK: K,\n\t\tnodes: newNodes(ann, tree, dim, K),\n\t\tbuildChan: make(chan buildArgs, 1),\n\t}\n\tgo gannoy.builder()\n\treturn gannoy, nil\n}\n\nfunc (g GannoyIndex) Tree() {\n\tfor i, root := range g.meta.roots() {\n\t\tg.walk(i, g.nodes.getNode(root), root, 0)\n\t}\n}\n\nfunc (g *GannoyIndex) AddItem(key int, w []float64) error {\n\targs := buildArgs{action: ADD, key: key, w: w, result: make(chan error)}\n\tg.buildChan <- args\n\treturn <-args.result\n}\n\nfunc (g *GannoyIndex) RemoveItem(key int) error {\n\targs := buildArgs{action: DELETE, key: key, result: make(chan error)}\n\tg.buildChan <- args\n\treturn <-args.result\n}\n\nfunc (g *GannoyIndex) UpdateItem(key int, w []float64) error {\n\targs := buildArgs{action: UPDATE, key: key, w: w, result: make(chan error)}\n\tg.buildChan <- args\n\treturn <-args.result\n}\n\nfunc (g *GannoyIndex) GetNnsByKey(key, n, searchK int) []int {\n\tm := g.nodes.getNodeByKey(key)\n\tif !m.isLeaf() {\n\t\treturn []int{}\n\t}\n\tkeys := g.getAllNns(m.v, n, searchK)\n\treturn keys\n}\n\nfunc (g *GannoyIndex) getAllNns(v []float64, n, searchK int) []int {\n\tif searchK == -1 {\n\t\tsearchK = n * g.tree\n\t}\n\n\tq := priority_queue.New()\n\tfor _, root := range g.meta.roots() {\n\t\tq.Push(&Queue{priority: math.Inf(1), value: root})\n\t}\n\n\tnns := []int{}\n\tfor len(nns) < searchK && q.Len() > 0 {\n\t\ttop := q.Top().(*Queue)\n\t\td := top.priority\n\t\ti := top.value\n\n\t\tnd := g.nodes.getNode(i)\n\t\tq.Pop()\n\t\tif nd.isLeaf() {\n\t\t\tnns = append(nns, i)\n\t\t} else if nd.nDescendants <= g.K {\n\t\t\tdst := nd.children\n\t\t\tnns = append(nns, dst...)\n\t\t} else {\n\t\t\tmargin := g.distance.margin(nd, v, g.dim)\n\t\t\tq.Push(&Queue{priority: math.Min(d, +margin), value: nd.children[1]})\n\t\t\tq.Push(&Queue{priority: math.Min(d, -margin), value: nd.children[0]})\n\t\t}\n\t}\n\n\tsort.Ints(nns)\n\tnnsDist := []sorter{}\n\tlast := -1\n\tfor _, j := range nns {\n\t\tif j == last {\n\t\t\tcontinue\n\t\t}\n\t\tlast = j\n\t\tnode := g.nodes.getNode(j)\n\t\tnnsDist = append(nnsDist, sorter{value: g.distance.distance(v, node.v, g.dim), id: node.key})\n\t}\n\n\tm := len(nnsDist)\n\tp := m\n\tif n < m {\n\t\tp = n\n\t}\n\n\tHeapSort(nnsDist, DESC, p)\n\n\tresult := make([]int, p)\n\tfor i := 0; i < p; i++ {\n\t\tresult[i] = nnsDist[m-1-i].id\n\t}\n\n\treturn result\n}\n\nfunc (g *GannoyIndex) addItem(key int, w []float64) error {\n\tn := g.nodes.newNode()\n\tn.key = key\n\tn.v = w\n\tn.parents = make([]int, g.tree)\n\terr := n.save()\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ fmt.Printf(\"id %d\\n\", n.id)\n\n\tvar wg sync.WaitGroup\n\twg.Add(g.tree)\n\tbuildChan := make(chan int, g.tree)\n\tworker := func(n Node) {\n\t\tfor index := range buildChan {\n\t\t\t\/\/ fmt.Printf(\"root: %d\\n\", g.meta.roots()[index])\n\t\t\tg.build(index, g.meta.roots()[index], n)\n\t\t\twg.Done()\n\t\t}\n\t}\n\n\tfor i := 0; i < 3; i++ {\n\t\tgo worker(n)\n\t}\n\n\tfor index, _ := range g.meta.roots() {\n\t\tbuildChan <- index\n\t}\n\n\twg.Wait()\n\tclose(buildChan)\n\tg.nodes.maps.add(n.id, key)\n\n\treturn nil\n}\n\nfunc (g *GannoyIndex) build(index, root int, n Node) {\n\tif root == -1 {\n\t\t\/\/ 最初のノード\n\t\tn.parents[index] = -1\n\t\tn.save()\n\t\tg.meta.updateRoot(index, n.id)\n\t\treturn\n\t}\n\tid := g.findBranchByVector(root, n.v)\n\tfound := g.nodes.getNode(id)\n\t\/\/ fmt.Printf(\"Found %d\\n\", item)\n\n\torg_parent := found.parents[index]\n\tif found.isBucket() && len(found.children) < g.K {\n\t\t\/\/ ノードに余裕があれば追加\n\t\t\/\/ fmt.Printf(\"pattern bucket\\n\")\n\t\tn.updateParents(index, id)\n\t\tfound.nDescendants++\n\t\tfound.children = append(found.children, n.id)\n\t\tfound.save()\n\t} else {\n\t\t\/\/ ノードが上限またはリーフノードであれば新しいノードを追加\n\t\twillDelete := false\n\t\tvar indices []int\n\t\tif found.isLeaf() {\n\t\t\t\/\/ fmt.Printf(\"pattern leaf node\\n\")\n\t\t\tindices = []int{id, n.id}\n\t\t} else {\n\t\t\t\/\/ fmt.Printf(\"pattern full backet\\n\")\n\t\t\tindices = append(found.children, n.id)\n\t\t\twillDelete = true\n\t\t}\n\n\t\tm := g.makeTree(index, org_parent, indices)\n\t\t\/\/ fmt.Printf(\"m: %d, org_parent: %d\\n\", m, org_parent)\n\t\tif org_parent == -1 {\n\t\t\t\/\/ rootノードの入れ替え\n\t\t\tg.meta.updateRoot(index, m)\n\t\t} else {\n\t\t\tparent := g.nodes.getNode(org_parent)\n\t\t\tparent.nDescendants++\n\t\t\tchildren := make([]int, len(parent.children))\n\t\t\tfor i, child := range parent.children {\n\t\t\t\tif child == id {\n\t\t\t\t\t\/\/ 新しいノードに変更\n\t\t\t\t\tchildren[i] = m\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ 既存のノードのまま\n\t\t\t\t\tchildren[i] = child\n\t\t\t\t}\n\t\t\t}\n\t\t\tparent.children = children\n\t\t\tparent.save()\n\n\t\t}\n\t\tif willDelete {\n\t\t\tfound.destroy()\n\t\t\tg.nodes.free.push(found.id)\n\t\t}\n\t}\n}\n\nfunc (g *GannoyIndex) removeItem(key int) error {\n\tn := g.nodes.getNodeByKey(key)\n\n\tvar wg sync.WaitGroup\n\twg.Add(g.tree)\n\tbuildChan := make(chan int, g.tree)\n\tworker := func(n Node) {\n\t\tfor root := range buildChan {\n\t\t\tg.remove(root, n)\n\t\t\twg.Done()\n\t\t}\n\t}\n\n\tfor i := 0; i < 3; i++ {\n\t\tgo worker(n)\n\t}\n\tfor index, _ := range g.meta.roots() {\n\t\tbuildChan <- index\n\t}\n\n\twg.Wait()\n\tclose(buildChan)\n\n\tg.nodes.maps.remove(key)\n\tn.destroy()\n\tg.nodes.free.push(n.id)\n\n\treturn nil\n}\n\nfunc (g *GannoyIndex) remove(root int, node Node) {\n\tif node.isRoot(root) {\n\t\tg.meta.updateRoot(root, -1)\n\t\treturn\n\t}\n\tparent := g.nodes.getNode(node.parents[root])\n\tif parent.isBucket() && len(parent.children) > 2 {\n\t\t\/\/ fmt.Printf(\"pattern bucket\\n\")\n\t\ttarget := -1\n\t\tfor i, child := range parent.children {\n\t\t\tif child == node.id {\n\t\t\t\ttarget = i\n\t\t\t}\n\t\t}\n\t\tif target == -1 {\n\t\t\treturn\n\t\t}\n\t\tchildren := append(parent.children[:target], parent.children[(target+1):]...)\n\t\tparent.nDescendants--\n\t\tparent.children = children\n\t\tparent.save()\n\t} else {\n\t\t\/\/ fmt.Printf(\"pattern leaf node\\n\")\n\t\tvar other int\n\t\tfor _, child := range parent.children {\n\t\t\tif child != node.id {\n\t\t\t\tother = child\n\t\t\t}\n\t\t}\n\t\tif parent.isRoot(root) {\n\t\t\tg.meta.updateRoot(root, other)\n\t\t} else {\n\t\t\tgrandParent := g.nodes.getNode(parent.parents[root])\n\t\t\tchildren := []int{}\n\t\t\tfor _, child := range grandParent.children {\n\t\t\t\tif child == node.parents[root] {\n\t\t\t\t\tchildren = append(children, other)\n\t\t\t\t} else {\n\t\t\t\t\tchildren = append(children, child)\n\t\t\t\t}\n\t\t\t}\n\t\t\tgrandParent.nDescendants--\n\t\t\tgrandParent.children = children\n\t\t\tgrandParent.save()\n\t\t}\n\n\t\totherNode := g.nodes.getNode(other)\n\t\totherNode.updateParents(root, parent.parents[root])\n\n\t\tparent.destroy()\n\t\tg.nodes.free.push(parent.id)\n\t}\n}\n\nfunc (g GannoyIndex) findBranchByVector(id int, v []float64) int {\n\tnode := g.nodes.getNode(id)\n\tif node.isLeaf() || node.isBucket() {\n\t\treturn id\n\t}\n\tside := g.distance.side(node, v, g.dim, g.random)\n\treturn g.findBranchByVector(node.children[side], v)\n}\n\nfunc (g *GannoyIndex) makeTree(root, parent int, ids []int) int {\n\tif len(ids) == 1 {\n\t\tn := g.nodes.getNode(ids[0])\n\t\tif len(n.parents) == 0 {\n\t\t\tn.parents = make([]int, g.tree)\n\t\t}\n\t\tn.updateParents(root, parent)\n\t\treturn ids[0]\n\t}\n\n\tif len(ids) <= g.K {\n\t\tm := g.nodes.newNode()\n\t\tm.parents = make([]int, g.tree)\n\t\tm.nDescendants = len(ids)\n\t\tm.parents[root] = parent\n\t\tm.children = ids\n\t\tm.save()\n\t\tfor _, child := range ids {\n\t\t\tc := g.nodes.getNode(child)\n\t\t\tif len(c.parents) == 0 {\n\t\t\t\tc.parents = make([]int, g.tree)\n\t\t\t}\n\t\t\tc.updateParents(root, m.id)\n\t\t}\n\t\treturn m.id\n\t}\n\n\tchildren := make([]Node, len(ids))\n\tfor i, id := range ids {\n\t\tchildren[i] = g.nodes.getNode(id)\n\t}\n\n\tchildrenIds := [2][]int{[]int{}, []int{}}\n\n\tm := g.nodes.newNode()\n\tm.parents = make([]int, g.tree)\n\tm.nDescendants = len(ids)\n\tm.parents[root] = parent\n\n\tm = g.distance.createSplit(children, g.dim, g.random, m)\n\tfor _, id := range ids {\n\t\tn := g.nodes.getNode(id)\n\t\tside := g.distance.side(m, n.v, g.dim, g.random)\n\t\tchildrenIds[side] = append(childrenIds[side], id)\n\t}\n\n\tfor len(childrenIds[0]) == 0 || len(childrenIds[1]) == 0 {\n\t\tchildrenIds[0] = []int{}\n\t\tchildrenIds[1] = []int{}\n\t\tfor z := 0; z < g.dim; z++ {\n\t\t\tm.v[z] = 0.0\n\t\t}\n\t\tfor _, id := range ids {\n\t\t\tside := g.random.flip()\n\t\t\tchildrenIds[side] = append(childrenIds[side], id)\n\t\t}\n\t}\n\n\tvar flip int\n\tif len(childrenIds[0]) > len(childrenIds[1]) {\n\t\tflip = 1\n\t}\n\n\tm.save()\n\tfor side := 0; side < 2; side++ {\n\t\tm.children[side^flip] = g.makeTree(root, m.id, childrenIds[side^flip])\n\t}\n\tm.save()\n\n\treturn m.id\n}\n\ntype buildArgs struct {\n\taction int\n\tkey int\n\tw []float64\n\tresult chan error\n}\n\nfunc (g *GannoyIndex) builder() {\n\tfor args := range g.buildChan {\n\t\tswitch args.action {\n\t\tcase ADD:\n\t\t\targs.result <- g.addItem(args.key, args.w)\n\t\tcase DELETE:\n\t\t\targs.result <- g.removeItem(args.key)\n\t\tcase UPDATE:\n\t\t\terr := g.removeItem(args.key)\n\t\t\tif err != nil {\n\t\t\t\targs.result <- err\n\t\t\t} else {\n\t\t\t\targs.result <- g.addItem(args.key, args.w)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (g GannoyIndex) walk(root int, node Node, id, tab int) {\n\tfor i := 0; i < tab*2; i++ {\n\t\tfmt.Print(\" \")\n\t}\n\tfmt.Printf(\"%d [%d] (%d) [nDescendants: %d, v: %v]\\n\", id, node.key, node.parents[root], node.nDescendants, node.v)\n\tif !node.isLeaf() {\n\t\tfor _, child := range node.children {\n\t\t\tg.walk(root, g.nodes.getNode(child), child, tab+1)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"path\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/giagiannis\/data-profiler\/core\"\n)\n\n\/\/ TaskEngine is deployed once for the server's lifetime and keeps the tasks\n\/\/ which are executed.\ntype TaskEngine struct {\n\tTasks []*Task\n\tlock sync.Mutex\n}\n\n\/\/ NewTaskEngine initializes a new TasEngine object. Called once per server deployment.\nfunc NewTaskEngine() *TaskEngine {\n\tte := new(TaskEngine)\n\tte.lock = *new(sync.Mutex)\n\treturn te\n}\n\n\/\/ Submit appends a new task to the task engine and initializes its execution.\nfunc (e *TaskEngine) Submit(t *Task) {\n\tif t == nil {\n\t\treturn\n\t}\n\te.lock.Lock()\n\tgo t.Run()\n\te.Tasks = append(e.Tasks, t)\n\te.lock.Unlock()\n}\n\n\/\/ Task is the primitive struct that represents a task of the server.\ntype Task struct {\n\tStatus string\n\tStarted time.Time\n\tDuration float64\n\tDescription string\n\tDataset *ModelDataset\n\tfnc func() error\n}\n\n\/\/ Run is responsible to execute to task's method and update the task status\n\/\/ accordingly.\nfunc (t *Task) Run() {\n\tt.Started = time.Now()\n\tt.Status = \"RUNNING\"\n\terr := t.fnc()\n\tif err != nil {\n\t\tt.Status = \"ERROR - \" + err.Error()\n\t} else {\n\t\tt.Status = \"DONE\"\n\t\tt.Duration = time.Since(t.Started).Seconds()\n\t}\n}\n\n\/\/ NewSMComputationTask initializes a new Similarity Matrix computation task.\nfunc NewSMComputationTask(datasetID string, conf map[string]string) *Task {\n\ttask := new(Task)\n\tdts := modelDatasetGetInfo(datasetID)\n\ttask.Dataset = dts\n\ttask.Description = fmt.Sprintf(\"SM Computation for %s, type %s\\n\",\n\t\tdts.Name, conf[\"estimatorType\"])\n\ttask.fnc = func() error {\n\t\tdatasets := core.DiscoverDatasets(dts.Path)\n\t\testType := *core.NewDatasetSimilarityEstimatorType(conf[\"estimatorType\"])\n\t\test := core.NewDatasetSimilarityEstimator(estType, datasets)\n\t\test.Configure(conf)\n\t\tif conf[\"popPolicy\"] == \"aprx\" {\n\t\t\tpop := new(core.DatasetSimilarityPopulationPolicy)\n\t\t\tpop.PolicyType = core.PopulationPolicyAprx\n\t\t\tval, err := strconv.ParseFloat(conf[\"popParameterValue\"], 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t\tif conf[\"popParameter\"] == \"popCount\" {\n\t\t\t\tpop.Parameters = map[string]float64{\"count\": val}\n\t\t\t}\n\t\t\tif conf[\"popParameter\"] == \"popThreshold\" {\n\t\t\t\tpop.Parameters = map[string]float64{\"threshold\": val}\n\t\t\t}\n\t\t\test.SetPopulationPolicy(*pop)\n\t\t}\n\t\terr := est.Compute()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsm := est.SimilarityMatrix()\n\t\t\/\/\t\tvar smID string\n\t\tif sm != nil {\n\t\t\t\/\/smID =\n\t\t\tmodelSimilarityMatrixInsert(datasetID, sm.Serialize(), est.Serialize(), conf)\n\t\t}\n\t\t\/\/modelEstimatorInsert(datasetID, smID, est.Serialize(), conf)\n\t\treturn nil\n\t}\n\treturn task\n}\n\n\/\/ NewMDSComputationTask initializes a new Multidimensional Scaling execution task.\nfunc NewMDSComputationTask(smID, datasetID string, conf map[string]string) *Task {\n\tsmModel := modelSimilarityMatrixGet(smID)\n\tif smModel == nil {\n\t\tlog.Println(\"SM not found\")\n\t\treturn nil\n\t}\n\n\tcnt, err := ioutil.ReadFile(smModel.Path)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tsm := new(core.DatasetSimilarityMatrix)\n\tsm.Deserialize(cnt)\n\tk, err := strconv.ParseInt(conf[\"k\"], 10, 64)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tdat := modelDatasetGetInfo(datasetID)\n\ttask := new(Task)\n\ttask.Dataset = dat\n\ttask.Description = fmt.Sprintf(\"MDS Execution for %s with k=%d\\n\",\n\t\tdat.Name, k)\n\ttask.fnc = func() error {\n\t\tmds := core.NewMDScaling(sm, int(k), Conf.Scripts.MDS)\n\t\terr = mds.Compute()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgof := fmt.Sprintf(\"%.5f\", mds.Gof())\n\t\tstress := fmt.Sprintf(\"%.5f\", mds.Stress())\n\t\tmodelCoordinatesInsert(mds.Coordinates(), dat.ID, conf[\"k\"], gof, stress, smID)\n\t\treturn nil\n\t}\n\treturn task\n}\n\n\/\/ NewOperatorRunTask initializes a new operator execution task.\nfunc NewOperatorRunTask(operatorID string) *Task {\n\tm := modelOperatorGet(operatorID)\n\tif m == nil {\n\t\tlog.Println(\"Operator was not found\")\n\t\treturn nil\n\t}\n\tdat := modelDatasetGetInfo(m.DatasetID)\n\tfor _, f := range modelDatasetGetFiles(dat.ID) {\n\t\tdat.Files = append(dat.Files, dat.Path+\"\/\"+f)\n\t}\n\ttask := new(Task)\n\ttask.Description = fmt.Sprintf(\"%s evaluation\", m.Name)\n\ttask.Dataset = dat\n\ttask.fnc = func() error {\n\t\teval, err := core.NewDatasetEvaluator(core.OnlineEval,\n\t\t\tmap[string]string{\n\t\t\t\t\"script\": m.Path,\n\t\t\t\t\"testset\": \"\",\n\t\t\t})\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\tscores := core.NewDatasetScores()\n\t\tfor _, f := range dat.Files {\n\t\t\ts, err := eval.Evaluate(f)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\tscores.Scores[path.Base(f)] = s\n\t\t\t}\n\t\t}\n\t\tcnt, _ := scores.Serialize()\n\t\tmodelOperatorScoresInsert(operatorID, cnt)\n\t\treturn nil\n\t}\n\treturn task\n}\n\n\/\/ NewModelTrainTask generates a new ML for a given (dataset,operator) combination,\n\/\/ according to the user-specified parameters.\nfunc NewModelTrainTask(datasetID, operatorID string, sr float64,\n\tmodelType string,\n\tcoordinatesID, mlScript string,\n\tmatrixID, k string) *Task {\n\tm := modelDatasetGetInfo(datasetID)\n\ttask := new(Task)\n\ttask.Description = fmt.Sprintf(\"Model training (%s for %s)\", path.Base(mlScript), m.Name)\n\ttask.Dataset = m\n\ttask.fnc = func() error {\n\t\tdatasets := core.DiscoverDatasets(m.Path)\n\t\to := modelOperatorGet(operatorID)\n\t\tvar evaluator core.DatasetEvaluator\n\t\tvar err error\n\t\tif o.ScoresFile != \"\" {\n\t\t\tevaluator, err = core.NewDatasetEvaluator(core.FileBasedEval, map[string]string{\"scores\": o.ScoresFile})\n\t\t} else {\n\t\t\tevaluator, err = core.NewDatasetEvaluator(core.OnlineEval, map[string]string{\"script\": o.Path, \"testset\": \"\"})\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn err\n\t\t}\n\t\tt := core.NewModelerType(modelType)\n\t\tmodeler := core.NewModeler(t, datasets, sr, evaluator)\n\t\tvar conf map[string]string\n\t\tif t == core.ScriptBasedModelerType {\n\t\t\tc := modelCoordinatesGet(coordinatesID)\n\t\t\tconf = map[string]string{\"script\": mlScript, \"coordinates\": path.Base(c.Path)}\n\t\t} else if t == core.KNNModelerType {\n\t\t\tm := modelSimilarityMatrixGet(matrixID)\n\t\t\tconf = map[string]string{\"k\": k, \"smatrix\": path.Base(m.Path)}\n\t\t}\n\t\tmodeler.Configure(conf)\n\t\terr = modeler.Run()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn err\n\t\t}\n\t\t\/\/ serialze appxValues\n\t\tvar cnt [][]float64\n\t\tcnt = append(cnt, modeler.AppxValues())\n\t\tappxBuffer := serializeCSVFile(cnt)\n\t\tsamplesBuffer, _ := json.Marshal(modeler.Samples())\n\t\terrors := make(map[string]string)\n\t\tfor k, v := range modeler.ErrorMetrics() {\n\t\t\terrors[k] = fmt.Sprintf(\"%.5f\", v)\n\t\t}\n\t\tmodelDatasetModelInsert(coordinatesID, operatorID, datasetID, samplesBuffer, appxBuffer, conf, errors, sr)\n\t\treturn nil\n\t}\n\treturn task\n}\n<commit_msg>reverting<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"path\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/giagiannis\/data-profiler\/core\"\n)\n\n\/\/ TaskEngine is deployed once for the server's lifetime and keeps the tasks\n\/\/ which are executed.\ntype TaskEngine struct {\n\tTasks []*Task\n\tlock sync.Mutex\n}\n\n\/\/ NewTaskEngine initializes a new TasEngine object. Called once per server deployment.\nfunc NewTaskEngine() *TaskEngine {\n\tte := new(TaskEngine)\n\tte.lock = *new(sync.Mutex)\n\treturn te\n}\n\n\/\/ Submit appends a new task to the task engine and initializes its execution.\nfunc (e *TaskEngine) Submit(t *Task) {\n\tif t == nil {\n\t\treturn\n\t}\n\te.lock.Lock()\n\tgo t.Run()\n\te.Tasks = append(e.Tasks, t)\n\te.lock.Unlock()\n}\n\n\/\/ Task is the primitive struct that represents a task of the server.\ntype Task struct {\n\tStatus string\n\tStarted time.Time\n\tDuration float64\n\tDescription string\n\tDataset *ModelDataset\n\tfnc func() error\n}\n\n\/\/ Run is responsible to execute to task's method and update the task status\n\/\/ accordingly.\nfunc (t *Task) Run() {\n\tt.Started = time.Now()\n\tt.Status = \"RUNNING\"\n\terr := t.fnc()\n\tif err != nil {\n\t\tt.Status = \"ERROR - \" + err.Error()\n\t} else {\n\t\tt.Status = \"DONE\"\n\t\tt.Duration = time.Since(t.Started).Seconds()\n\t}\n}\n\n\/\/ NewSMComputationTask initializes a new Similarity Matrix computation task.\nfunc NewSMComputationTask(datasetID string, conf map[string]string) *Task {\n\ttask := new(Task)\n\tdts := modelDatasetGetInfo(datasetID)\n\ttask.Dataset = dts\n\ttask.Description = fmt.Sprintf(\"SM Computation for %s, type %s\\n\",\n\t\tdts.Name, conf[\"estimatorType\"])\n\ttask.fnc = func() error {\n\t\tdatasets := core.DiscoverDatasets(dts.Path)\n\t\testType := *core.NewDatasetSimilarityEstimatorType(conf[\"estimatorType\"])\n\t\test := core.NewDatasetSimilarityEstimator(estType, datasets)\n\t\test.Configure(conf)\n\t\tif conf[\"popPolicy\"] == \"aprx\" {\n\t\t\tpop := new(core.DatasetSimilarityPopulationPolicy)\n\t\t\tpop.PolicyType = core.PopulationPolicyAprx\n\t\t\tval, err := strconv.ParseFloat(conf[\"popParameterValue\"], 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t\tif conf[\"popParameter\"] == \"popCount\" {\n\t\t\t\tpop.Parameters = map[string]float64{\"count\": val}\n\t\t\t}\n\t\t\tif conf[\"popParameter\"] == \"popThreshold\" {\n\t\t\t\tpop.Parameters = map[string]float64{\"threshold\": val}\n\t\t\t}\n\t\t\test.SetPopulationPolicy(*pop)\n\t\t}\n\t\terr := est.Compute()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsm := est.SimilarityMatrix()\n\t\t\/\/\t\tvar smID string\n\t\tif sm != nil {\n\t\t\t\/\/smID =\n\t\t\tmodelSimilarityMatrixInsert(datasetID, sm.Serialize(), est.Serialize(), conf)\n\t\t}\n\t\t\/\/modelEstimatorInsert(datasetID, smID, est.Serialize(), conf)\n\t\treturn nil\n\t}\n\treturn task\n}\n\n\/\/ NewMDSComputationTask initializes a new Multidimensional Scaling execution task.\nfunc NewMDSComputationTask(smID, datasetID string, conf map[string]string) *Task {\n\tsmModel := modelSimilarityMatrixGet(smID)\n\tif smModel == nil {\n\t\tlog.Println(\"SM not found\")\n\t\treturn nil\n\t}\n\n\tcnt, err := ioutil.ReadFile(smModel.Path)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tsm := new(core.DatasetSimilarityMatrix)\n\tsm.Deserialize(cnt)\n\tk, err := strconv.ParseInt(conf[\"k\"], 10, 64)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tdat := modelDatasetGetInfo(datasetID)\n\ttask := new(Task)\n\ttask.Dataset = dat\n\ttask.Description = fmt.Sprintf(\"MDS Execution for %s with k=%d\\n\",\n\t\tdat.Name, k)\n\ttask.fnc = func() error {\n\t\tmds := core.NewMDScaling(sm, int(k), Conf.Scripts.MDS)\n\t\terr = mds.Compute()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgof := fmt.Sprintf(\"%.5f\", mds.Gof())\n\t\tstress := fmt.Sprintf(\"%.5f\", mds.Stress())\n\t\tmodelCoordinatesInsert(mds.Coordinates(), dat.ID, conf[\"k\"], gof, stress, smID)\n\t\treturn nil\n\t}\n\treturn task\n}\n\n\/\/ NewOperatorRunTask initializes a new operator execution task.\nfunc NewOperatorRunTask(operatorID string) *Task {\n\tm := modelOperatorGet(operatorID)\n\tif m == nil {\n\t\tlog.Println(\"Operator was not found\")\n\t\treturn nil\n\t}\n\tdat := modelDatasetGetInfo(m.DatasetID)\n\tfor _, f := range modelDatasetGetFiles(dat.ID) {\n\t\tdat.Files = append(dat.Files, dat.Path+\"\/\"+f)\n\t}\n\ttask := new(Task)\n\ttask.Description = fmt.Sprintf(\"%s evaluation\", m.Name)\n\ttask.Dataset = dat\n\ttask.fnc = func() error {\n\t\teval, err := core.NewDatasetEvaluator(core.OnlineEval,\n\t\t\tmap[string]string{\n\t\t\t\t\"script\": m.Path,\n\t\t\t\t\"testset\": \"\",\n\t\t\t})\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\tscores := core.NewDatasetScores()\n\t\tfor _, f := range dat.Files {\n\t\t\ts, err := eval.Evaluate(f)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\tscores.Scores[path.Base(f)] = s\n\t\t\t}\n\t\t}\n\t\tcnt, _ := scores.Serialize()\n\t\tmodelOperatorScoresInsert(operatorID, cnt)\n\t\treturn nil\n\t}\n\treturn task\n}\n\n\/\/ NewModelTrainTask generates a new ML for a given (dataset,operator) combination,\n\/\/ according to the user-specified parameters.\nfunc NewModelTrainTask(datasetID, operatorID string, sr float64,\n\tmodelType string,\n\tcoordinatesID, mlScript string,\n\tmatrixID, k string) *Task {\n\tm := modelDatasetGetInfo(datasetID)\n\ttask := new(Task)\n\ttask.Description = fmt.Sprintf(\"Model training (%s for %s)\", path.Base(mlScript), m.Name)\n\ttask.Dataset = m\n\ttask.fnc = func() error {\n\t\tdatasets := core.DiscoverDatasets(m.Path)\n\t\to := modelOperatorGet(operatorID)\n\t\tvar evaluator core.DatasetEvaluator\n\t\tvar err error\n\t\tif o.ScoresFile != \"\" {\n\t\t\tevaluator, err = core.NewDatasetEvaluator(core.FileBasedEval, map[string]string{\"scores\": o.ScoresFile})\n\t\t} else {\n\t\t\tevaluator, err = core.NewDatasetEvaluator(core.OnlineEval, map[string]string{\"script\": o.Path, \"testset\": \"\"})\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn err\n\t\t}\n\t\tt := core.NewModelerType(modelType)\n\t\tmodeler := core.NewModeler(t, datasets, sr, evaluator)\n\t\tvar conf map[string]string\n\t\tif t == core.ScriptBasedModelerType {\n\t\t\tc := modelCoordinatesGet(coordinatesID)\n\t\t\tconf = map[string]string{\"script\": mlScript, \"coordinates\": c.Path}\n\t\t} else if t == core.KNNModelerType {\n\t\t\tm := modelSimilarityMatrixGet(matrixID)\n\t\t\tconf = map[string]string{\"k\": k, \"smatrix\": m.Path}\n\t\t}\n\t\tmodeler.Configure(conf)\n\t\terr = modeler.Run()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn err\n\t\t}\n\t\t\/\/ serialze appxValues\n\t\tvar cnt [][]float64\n\t\tcnt = append(cnt, modeler.AppxValues())\n\t\tappxBuffer := serializeCSVFile(cnt)\n\t\tsamplesBuffer, _ := json.Marshal(modeler.Samples())\n\t\terrors := make(map[string]string)\n\t\tfor k, v := range modeler.ErrorMetrics() {\n\t\t\terrors[k] = fmt.Sprintf(\"%.5f\", v)\n\t\t}\n\t\tmodelDatasetModelInsert(coordinatesID, operatorID, datasetID, samplesBuffer, appxBuffer, conf, errors, sr)\n\t\treturn nil\n\t}\n\treturn task\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage integration\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gopkg.in\/check.v1\"\n)\n\nvar (\n\tT = NewCommand(\"tsuru\").WithArgs\n\tallPlatforms = []string{\n\t\t\"tsuru\/python\",\n\t\t\"tsuru\/go\",\n\t\t\"tsuru\/buildpack\",\n\t\t\"tsuru\/cordova\",\n\t\t\"tsuru\/elixir\",\n\t\t\"tsuru\/java\",\n\t\t\"tsuru\/nodejs\",\n\t\t\"tsuru\/php\",\n\t\t\"tsuru\/play\",\n\t\t\"tsuru\/pypy\",\n\t\t\"tsuru\/python3\",\n\t\t\"tsuru\/ruby\",\n\t\t\"tsuru\/static\",\n\t}\n\tallProvisioners = []string{\n\t\t\"docker\",\n\t\t\"swarm\",\n\t}\n\tflows = []ExecFlow{\n\t\tplatformsToInstall(),\n\t\tinstallerConfigTest(),\n\t\tinstallerTest(),\n\t\ttargetTest(),\n\t\tloginTest(),\n\t\tremoveInstallNodes(),\n\t\tquotaTest(),\n\t\tteamTest(),\n\t\tpoolAdd(),\n\t\tplatformAdd(),\n\t\texampleApps(),\n\t}\n)\n\nvar installerConfig = `driver:\n name: virtualbox\n options:\n virtualbox-cpu-count: 2\n virtualbox-memory: 2048\nhosts:\n apps:\n size: 2\ncomponents:\n tsuru:\n version: latest\n install-dashboard: false\n`\n\nfunc platformsToInstall() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"platformimages\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tfor _, platImg := range allPlatforms {\n\t\t\tenv.Add(\"platformimages\", platImg)\n\t\t}\n\t}\n\treturn flow\n}\n\nfunc installerConfigTest() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"installerconfig\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tf, err := ioutil.TempFile(\"\", \"installer-config\")\n\t\tc.Assert(err, check.IsNil)\n\t\tdefer f.Close()\n\t\tf.Write([]byte(installerConfig))\n\t\tenv.Set(\"installerconfig\", f.Name())\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := NewCommand(\"rm\", \"{{.installerconfig}}\").Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc installerTest() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"targetaddr\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"install\", \"--config\", \"{{.installerconfig}}\").WithTimeout(30 * time.Minute).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tregex := regexp.MustCompile(`(?si).*Core Hosts:.*?([\\d.]+)\\s.*`)\n\t\tparts := regex.FindStringSubmatch(res.Stdout.String())\n\t\tc.Assert(parts, check.HasLen, 2)\n\t\ttargetHost := parts[1]\n\t\tregex = regexp.MustCompile(`(?si).*Tsuru API.*?\\|\\s(\\d+)`)\n\t\tparts = regex.FindStringSubmatch(res.Stdout.String())\n\t\tc.Assert(parts, check.HasLen, 2)\n\t\ttargetPort := parts[1]\n\t\tenv.Set(\"targetaddr\", fmt.Sprintf(\"http:\/\/%s:%s\", targetHost, targetPort))\n\t\tregex = regexp.MustCompile(`\\| (https?[^\\s]+?) \\|`)\n\t\tallParts := regex.FindAllStringSubmatch(res.Stdout.String(), -1)\n\t\tfor _, parts = range allParts {\n\t\t\tc.Assert(parts, check.HasLen, 2)\n\t\t\tenv.Add(\"nodeopts\", fmt.Sprintf(\"--register address=%s --cacert ~\/.tsuru\/installs\/tsuru\/certs\/ca.pem --clientcert ~\/.tsuru\/installs\/tsuru\/certs\/cert.pem --clientkey ~\/.tsuru\/installs\/tsuru\/certs\/key.pem\", parts[1]))\n\t\t\tenv.Add(\"nodestoremove\", parts[1])\n\t\t}\n\t\tregex = regexp.MustCompile(`Username: ([[:print:]]+)`)\n\t\tparts = regex.FindStringSubmatch(res.Stdout.String())\n\t\tenv.Set(\"adminuser\", parts[1])\n\t\tregex = regexp.MustCompile(`Password: ([[:print:]]+)`)\n\t\tparts = regex.FindStringSubmatch(res.Stdout.String())\n\t\tenv.Set(\"adminpassword\", parts[1])\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := T(\"uninstall\", \"-y\").Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc targetTest() ExecFlow {\n\tflow := ExecFlow{}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\ttargetName := \"integration-target\"\n\t\tres := T(\"target-add\", targetName, \"{{.targetaddr}}\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tres = T(\"target-list\").Run(env)\n\t\tc.Assert(res, ResultMatches, Expected{Stdout: `\\s+` + targetName + ` .*`})\n\t\tres = T(\"target-set\", targetName).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc loginTest() ExecFlow {\n\tflow := ExecFlow{}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"login\", \"{{.adminuser}}\").WithInput(\"{{.adminpassword}}\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc removeInstallNodes() ExecFlow {\n\tflow := ExecFlow{\n\t\tmatrix: map[string]string{\n\t\t\t\"node\": \"nodestoremove\",\n\t\t},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"node-remove\", \"-y\", \"--no-rebalance\", \"{{.node}}\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc quotaTest() ExecFlow {\n\tflow := ExecFlow{\n\t\trequires: []string{\"adminuser\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"user-quota-change\", \"{{.adminuser}}\", \"100\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tres = T(\"user-quota-view\", \"{{.adminuser}}\").Run(env)\n\t\tc.Assert(res, ResultMatches, Expected{Stdout: `(?s)Apps usage.*\/100`})\n\t}\n\treturn flow\n}\n\nfunc teamTest() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"team\"},\n\t}\n\tteamName := \"integration-team\"\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"team-create\", teamName).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tenv.Set(\"team\", teamName)\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := T(\"team-remove\", \"-y\", teamName).Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc poolAdd() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"poolnames\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tfor _, prov := range allProvisioners {\n\t\t\tpoolName := \"ipool-\" + prov\n\t\t\tres := T(\"pool-add\", \"--provisioner\", prov, poolName).Run(env)\n\t\t\tc.Assert(res, ResultOk)\n\t\t\tenv.Add(\"poolnames\", poolName)\n\t\t\tres = T(\"pool-teams-add\", poolName, \"{{.team}}\").Run(env)\n\t\t\tc.Assert(res, ResultOk)\n\t\t\tres = T(\"node-add\", \"{{.nodeopts}}\", \"pool=\"+poolName).Run(env)\n\t\t\tc.Assert(res, ResultOk)\n\t\t\tres = T(\"event-list\").Run(env)\n\t\t\tc.Assert(res, ResultOk)\n\t\t\tnodeopts := env.All(\"nodeopts\")\n\t\t\tenv.Set(\"nodeopts\", append(nodeopts[1:], nodeopts[0])...)\n\t\t\tregex := regexp.MustCompile(`node.create.*?node:\\s+(.*?)\\s+`)\n\t\t\tparts := regex.FindStringSubmatch(res.Stdout.String())\n\t\t\tc.Assert(parts, check.HasLen, 2)\n\t\t\tenv.Add(\"nodeaddrs\", parts[1])\n\t\t\tregex = regexp.MustCompile(parts[1] + `.*?ready`)\n\t\t\tok := retry(time.Minute, func() bool {\n\t\t\t\tres = T(\"node-list\").Run(env)\n\t\t\t\treturn regex.MatchString(res.Stdout.String())\n\t\t\t})\n\t\t\tc.Assert(ok, check.Equals, true, check.Commentf(\"node not ready after 1 minute: %v\", res))\n\t\t}\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tfor _, node := range env.All(\"nodeaddrs\") {\n\t\t\tres := T(\"node-remove\", \"-y\", \"--no-rebalance\", node).Run(env)\n\t\t\tc.Check(res, ResultOk)\n\t\t}\n\t\tfor _, prov := range allProvisioners {\n\t\t\tpoolName := \"ipool-\" + prov\n\t\t\tres := T(\"pool-teams-remove\", poolName, \"{{.team}}\").Run(env)\n\t\t\tc.Check(res, ResultOk)\n\t\t\tres = T(\"pool-remove\", \"-y\", poolName).Run(env)\n\t\t\tc.Check(res, ResultOk)\n\t\t}\n\t}\n\treturn flow\n}\n\nfunc platformAdd() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"platforms\"},\n\t\tmatrix: map[string]string{\n\t\t\t\"platimg\": \"platformimages\",\n\t\t},\n\t\tparallel: true,\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\timg := env.Get(\"platimg\")\n\t\tsuffix := img[strings.LastIndex(img, \"\/\")+1:]\n\t\tplatName := \"iplat-\" + suffix\n\t\tres := T(\"platform-add\", platName, \"-i\", img).WithTimeout(15 * time.Minute).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tenv.Add(\"platforms\", platName)\n\t\tres = T(\"platform-list\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tc.Assert(res, ResultMatches, Expected{Stdout: \"(?s).*- \" + platName + \".*\"})\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\timg := env.Get(\"platimg\")\n\t\tsuffix := img[strings.LastIndex(img, \"\/\")+1:]\n\t\tplatName := \"iplat-\" + suffix\n\t\tres := T(\"platform-remove\", \"-y\", platName).Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc exampleApps() ExecFlow {\n\tflow := ExecFlow{\n\t\tmatrix: map[string]string{\n\t\t\t\"pool\": \"poolnames\",\n\t\t\t\"plat\": \"platforms\",\n\t\t},\n\t\tparallel: true,\n\t}\n\tappName := \"iapp-{{.plat}}-{{.pool}}\"\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"app-create\", appName, \"{{.plat}}\", \"-t\", \"{{.team}}\", \"-o\", \"{{.pool}}\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tres = T(\"app-info\", \"-a\", appName).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tplatRE := regexp.MustCompile(`(?s)Platform: (.*?)\\n`)\n\t\tparts := platRE.FindStringSubmatch(res.Stdout.String())\n\t\tc.Assert(parts, check.HasLen, 2)\n\t\tlang := strings.Replace(parts[1], \"iplat-\", \"\", -1)\n\t\tres = T(\"app-deploy\", \"-a\", appName, \"{{.examplesdir}}\/\"+lang+\"\/\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tres = T(\"app-info\", \"-a\", appName).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\taddrRE := regexp.MustCompile(`(?s)Address: (.*?)\\n`)\n\t\tparts = addrRE.FindStringSubmatch(res.Stdout.String())\n\t\tc.Assert(parts, check.HasLen, 2)\n\t\tcmd := NewCommand(\"curl\", \"-sSf\", \"http:\/\/\"+parts[1])\n\t\tok := retry(time.Minute, func() bool {\n\t\t\tres = cmd.Run(env)\n\t\t\treturn res.ExitCode == 0\n\t\t})\n\t\tc.Assert(ok, check.Equals, true, check.Commentf(\"invalid result: %v\", res))\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := T(\"app-remove\", \"-y\", \"-a\", appName).Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc (s *S) TestBase(c *check.C) {\n\tenv := NewEnvironment()\n\tif !env.Has(\"enabled\") {\n\t\treturn\n\t}\n\tvar executedFlows []*ExecFlow\n\tdefer func() {\n\t\tfor i := len(executedFlows) - 1; i >= 0; i-- {\n\t\t\texecutedFlows[i].Rollback(c, env)\n\t\t}\n\t}()\n\tfor i := range flows {\n\t\tf := &flows[i]\n\t\tif len(f.provides) > 0 {\n\t\t\tprovidesAll := true\n\t\t\tfor _, envVar := range f.provides {\n\t\t\t\tif env.Get(envVar) == \"\" {\n\t\t\t\t\tprovidesAll = false\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif providesAll {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\texecutedFlows = append(executedFlows, f)\n\t\tf.Run(c, env)\n\t}\n}\n<commit_msg>integration: increase time waiting for application to be available<commit_after>\/\/ Copyright 2017 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage integration\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gopkg.in\/check.v1\"\n)\n\nvar (\n\tT = NewCommand(\"tsuru\").WithArgs\n\tallPlatforms = []string{\n\t\t\"tsuru\/python\",\n\t\t\"tsuru\/go\",\n\t\t\"tsuru\/buildpack\",\n\t\t\"tsuru\/cordova\",\n\t\t\"tsuru\/elixir\",\n\t\t\"tsuru\/java\",\n\t\t\"tsuru\/nodejs\",\n\t\t\"tsuru\/php\",\n\t\t\"tsuru\/play\",\n\t\t\"tsuru\/pypy\",\n\t\t\"tsuru\/python3\",\n\t\t\"tsuru\/ruby\",\n\t\t\"tsuru\/static\",\n\t}\n\tallProvisioners = []string{\n\t\t\"docker\",\n\t\t\"swarm\",\n\t}\n\tflows = []ExecFlow{\n\t\tplatformsToInstall(),\n\t\tinstallerConfigTest(),\n\t\tinstallerTest(),\n\t\ttargetTest(),\n\t\tloginTest(),\n\t\tremoveInstallNodes(),\n\t\tquotaTest(),\n\t\tteamTest(),\n\t\tpoolAdd(),\n\t\tplatformAdd(),\n\t\texampleApps(),\n\t}\n)\n\nvar installerConfig = `driver:\n name: virtualbox\n options:\n virtualbox-cpu-count: 2\n virtualbox-memory: 2048\nhosts:\n apps:\n size: 2\ncomponents:\n tsuru:\n version: latest\n install-dashboard: false\n`\n\nfunc platformsToInstall() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"platformimages\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tfor _, platImg := range allPlatforms {\n\t\t\tenv.Add(\"platformimages\", platImg)\n\t\t}\n\t}\n\treturn flow\n}\n\nfunc installerConfigTest() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"installerconfig\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tf, err := ioutil.TempFile(\"\", \"installer-config\")\n\t\tc.Assert(err, check.IsNil)\n\t\tdefer f.Close()\n\t\tf.Write([]byte(installerConfig))\n\t\tenv.Set(\"installerconfig\", f.Name())\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := NewCommand(\"rm\", \"{{.installerconfig}}\").Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc installerTest() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"targetaddr\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"install\", \"--config\", \"{{.installerconfig}}\").WithTimeout(30 * time.Minute).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tregex := regexp.MustCompile(`(?si).*Core Hosts:.*?([\\d.]+)\\s.*`)\n\t\tparts := regex.FindStringSubmatch(res.Stdout.String())\n\t\tc.Assert(parts, check.HasLen, 2)\n\t\ttargetHost := parts[1]\n\t\tregex = regexp.MustCompile(`(?si).*Tsuru API.*?\\|\\s(\\d+)`)\n\t\tparts = regex.FindStringSubmatch(res.Stdout.String())\n\t\tc.Assert(parts, check.HasLen, 2)\n\t\ttargetPort := parts[1]\n\t\tenv.Set(\"targetaddr\", fmt.Sprintf(\"http:\/\/%s:%s\", targetHost, targetPort))\n\t\tregex = regexp.MustCompile(`\\| (https?[^\\s]+?) \\|`)\n\t\tallParts := regex.FindAllStringSubmatch(res.Stdout.String(), -1)\n\t\tfor _, parts = range allParts {\n\t\t\tc.Assert(parts, check.HasLen, 2)\n\t\t\tenv.Add(\"nodeopts\", fmt.Sprintf(\"--register address=%s --cacert ~\/.tsuru\/installs\/tsuru\/certs\/ca.pem --clientcert ~\/.tsuru\/installs\/tsuru\/certs\/cert.pem --clientkey ~\/.tsuru\/installs\/tsuru\/certs\/key.pem\", parts[1]))\n\t\t\tenv.Add(\"nodestoremove\", parts[1])\n\t\t}\n\t\tregex = regexp.MustCompile(`Username: ([[:print:]]+)`)\n\t\tparts = regex.FindStringSubmatch(res.Stdout.String())\n\t\tenv.Set(\"adminuser\", parts[1])\n\t\tregex = regexp.MustCompile(`Password: ([[:print:]]+)`)\n\t\tparts = regex.FindStringSubmatch(res.Stdout.String())\n\t\tenv.Set(\"adminpassword\", parts[1])\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := T(\"uninstall\", \"-y\").Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc targetTest() ExecFlow {\n\tflow := ExecFlow{}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\ttargetName := \"integration-target\"\n\t\tres := T(\"target-add\", targetName, \"{{.targetaddr}}\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tres = T(\"target-list\").Run(env)\n\t\tc.Assert(res, ResultMatches, Expected{Stdout: `\\s+` + targetName + ` .*`})\n\t\tres = T(\"target-set\", targetName).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc loginTest() ExecFlow {\n\tflow := ExecFlow{}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"login\", \"{{.adminuser}}\").WithInput(\"{{.adminpassword}}\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc removeInstallNodes() ExecFlow {\n\tflow := ExecFlow{\n\t\tmatrix: map[string]string{\n\t\t\t\"node\": \"nodestoremove\",\n\t\t},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"node-remove\", \"-y\", \"--no-rebalance\", \"{{.node}}\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc quotaTest() ExecFlow {\n\tflow := ExecFlow{\n\t\trequires: []string{\"adminuser\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"user-quota-change\", \"{{.adminuser}}\", \"100\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tres = T(\"user-quota-view\", \"{{.adminuser}}\").Run(env)\n\t\tc.Assert(res, ResultMatches, Expected{Stdout: `(?s)Apps usage.*\/100`})\n\t}\n\treturn flow\n}\n\nfunc teamTest() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"team\"},\n\t}\n\tteamName := \"integration-team\"\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"team-create\", teamName).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tenv.Set(\"team\", teamName)\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := T(\"team-remove\", \"-y\", teamName).Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc poolAdd() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"poolnames\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tfor _, prov := range allProvisioners {\n\t\t\tpoolName := \"ipool-\" + prov\n\t\t\tres := T(\"pool-add\", \"--provisioner\", prov, poolName).Run(env)\n\t\t\tc.Assert(res, ResultOk)\n\t\t\tenv.Add(\"poolnames\", poolName)\n\t\t\tres = T(\"pool-teams-add\", poolName, \"{{.team}}\").Run(env)\n\t\t\tc.Assert(res, ResultOk)\n\t\t\tres = T(\"node-add\", \"{{.nodeopts}}\", \"pool=\"+poolName).Run(env)\n\t\t\tc.Assert(res, ResultOk)\n\t\t\tres = T(\"event-list\").Run(env)\n\t\t\tc.Assert(res, ResultOk)\n\t\t\tnodeopts := env.All(\"nodeopts\")\n\t\t\tenv.Set(\"nodeopts\", append(nodeopts[1:], nodeopts[0])...)\n\t\t\tregex := regexp.MustCompile(`node.create.*?node:\\s+(.*?)\\s+`)\n\t\t\tparts := regex.FindStringSubmatch(res.Stdout.String())\n\t\t\tc.Assert(parts, check.HasLen, 2)\n\t\t\tenv.Add(\"nodeaddrs\", parts[1])\n\t\t\tregex = regexp.MustCompile(parts[1] + `.*?ready`)\n\t\t\tok := retry(time.Minute, func() bool {\n\t\t\t\tres = T(\"node-list\").Run(env)\n\t\t\t\treturn regex.MatchString(res.Stdout.String())\n\t\t\t})\n\t\t\tc.Assert(ok, check.Equals, true, check.Commentf(\"node not ready after 1 minute: %v\", res))\n\t\t}\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tfor _, node := range env.All(\"nodeaddrs\") {\n\t\t\tres := T(\"node-remove\", \"-y\", \"--no-rebalance\", node).Run(env)\n\t\t\tc.Check(res, ResultOk)\n\t\t}\n\t\tfor _, prov := range allProvisioners {\n\t\t\tpoolName := \"ipool-\" + prov\n\t\t\tres := T(\"pool-teams-remove\", poolName, \"{{.team}}\").Run(env)\n\t\t\tc.Check(res, ResultOk)\n\t\t\tres = T(\"pool-remove\", \"-y\", poolName).Run(env)\n\t\t\tc.Check(res, ResultOk)\n\t\t}\n\t}\n\treturn flow\n}\n\nfunc platformAdd() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"platforms\"},\n\t\tmatrix: map[string]string{\n\t\t\t\"platimg\": \"platformimages\",\n\t\t},\n\t\tparallel: true,\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\timg := env.Get(\"platimg\")\n\t\tsuffix := img[strings.LastIndex(img, \"\/\")+1:]\n\t\tplatName := \"iplat-\" + suffix\n\t\tres := T(\"platform-add\", platName, \"-i\", img).WithTimeout(15 * time.Minute).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tenv.Add(\"platforms\", platName)\n\t\tres = T(\"platform-list\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tc.Assert(res, ResultMatches, Expected{Stdout: \"(?s).*- \" + platName + \".*\"})\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\timg := env.Get(\"platimg\")\n\t\tsuffix := img[strings.LastIndex(img, \"\/\")+1:]\n\t\tplatName := \"iplat-\" + suffix\n\t\tres := T(\"platform-remove\", \"-y\", platName).Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc exampleApps() ExecFlow {\n\tflow := ExecFlow{\n\t\tmatrix: map[string]string{\n\t\t\t\"pool\": \"poolnames\",\n\t\t\t\"plat\": \"platforms\",\n\t\t},\n\t\tparallel: true,\n\t}\n\tappName := \"iapp-{{.plat}}-{{.pool}}\"\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"app-create\", appName, \"{{.plat}}\", \"-t\", \"{{.team}}\", \"-o\", \"{{.pool}}\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tres = T(\"app-info\", \"-a\", appName).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tplatRE := regexp.MustCompile(`(?s)Platform: (.*?)\\n`)\n\t\tparts := platRE.FindStringSubmatch(res.Stdout.String())\n\t\tc.Assert(parts, check.HasLen, 2)\n\t\tlang := strings.Replace(parts[1], \"iplat-\", \"\", -1)\n\t\tres = T(\"app-deploy\", \"-a\", appName, \"{{.examplesdir}}\/\"+lang+\"\/\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tres = T(\"app-info\", \"-a\", appName).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\taddrRE := regexp.MustCompile(`(?s)Address: (.*?)\\n`)\n\t\tparts = addrRE.FindStringSubmatch(res.Stdout.String())\n\t\tc.Assert(parts, check.HasLen, 2)\n\t\tcmd := NewCommand(\"curl\", \"-sSf\", \"http:\/\/\"+parts[1])\n\t\tok := retry(5*time.Minute, func() bool {\n\t\t\tres = cmd.Run(env)\n\t\t\treturn res.ExitCode == 0\n\t\t})\n\t\tc.Assert(ok, check.Equals, true, check.Commentf(\"invalid result: %v\", res))\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := T(\"app-remove\", \"-y\", \"-a\", appName).Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc (s *S) TestBase(c *check.C) {\n\tenv := NewEnvironment()\n\tif !env.Has(\"enabled\") {\n\t\treturn\n\t}\n\tvar executedFlows []*ExecFlow\n\tdefer func() {\n\t\tfor i := len(executedFlows) - 1; i >= 0; i-- {\n\t\t\texecutedFlows[i].Rollback(c, env)\n\t\t}\n\t}()\n\tfor i := range flows {\n\t\tf := &flows[i]\n\t\tif len(f.provides) > 0 {\n\t\t\tprovidesAll := true\n\t\t\tfor _, envVar := range f.provides {\n\t\t\t\tif env.Get(envVar) == \"\" {\n\t\t\t\t\tprovidesAll = false\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif providesAll {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\texecutedFlows = append(executedFlows, f)\n\t\tf.Run(c, env)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"os\"\n\t\"os\/exec\"\n\t\"testing\"\n)\n\nfunc TestLoad(t *testing.T) {\n\tos.Setenv(\"PREST_CONF\", \".\/testdata\/prest.toml\")\n\tdefer os.Unsetenv(\"PREST_CONF\")\n\n\tLoad()\n\tif len(PrestConf.AccessConf.Tables) < 2 {\n\t\tt.Errorf(\"expected > 2, got: %d\", len(PrestConf.AccessConf.Tables))\n\t}\n\n\tLoad()\n\tfor _, ignoretable := range PrestConf.AccessConf.IgnoreTable {\n\t\tif ignoretable != \"test_permission_does_not_exist\" {\n\t\t\tt.Error(\"expected ['test_permission_does_not_exist'], but got another result\")\n\t\t}\n\t}\n\n\tLoad()\n\tif !PrestConf.AccessConf.Restrict {\n\t\tt.Error(\"expected true, but got false\")\n\t}\n\n\tos.Setenv(\"PREST_CONF\", \"foo\/bar\/prest.toml\")\n\tLoad()\n\n}\n\nfunc TestParse(t *testing.T) {\n\tos.Setenv(\"PREST_CONF\", \".\/testdata\/prest.toml\")\n\n\tviperCfg()\n\tcfg := &Prest{}\n\terr := Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.HTTPPort != 6000 {\n\t\tt.Errorf(\"expected port: 6000, got: %d\", cfg.HTTPPort)\n\t}\n\tif cfg.PGDatabase != \"prest-test\" {\n\t\tt.Errorf(\"expected database: prest, got: %s\", cfg.PGDatabase)\n\t}\n\n\tos.Unsetenv(\"PREST_CONF\")\n\n\tos.Setenv(\"PREST_CONF\", \"..\/prest.toml\")\n\tos.Setenv(\"PREST_HTTP_PORT\", \"4000\")\n\n\tviperCfg()\n\tcfg = &Prest{}\n\terr = Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.HTTPPort != 4000 {\n\t\tt.Errorf(\"expected port: 4000, got: %d\", cfg.HTTPPort)\n\t}\n\tif !cfg.EnableDefaultJWT {\n\t\tt.Error(\"EnableDefaultJWT: expected true but got false\")\n\t}\n\n\tos.Unsetenv(\"PREST_CONF\")\n\n\tos.Setenv(\"PREST_CONF\", \"\")\n\tos.Setenv(\"PREST_JWT_DEFAULT\", \"false\")\n\n\tviperCfg()\n\tcfg = &Prest{}\n\terr = Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.HTTPPort != 4000 {\n\t\tt.Errorf(\"expected port: 4000, got: %d\", cfg.HTTPPort)\n\t}\n\tif cfg.EnableDefaultJWT {\n\t\tt.Error(\"EnableDefaultJWT: expected false but got true\")\n\t}\n\n\tos.Unsetenv(\"PREST_JWT_DEFAULT\")\n\tos.Setenv(\"PREST_CONF\", \".\/testdata\/prest.toml\")\n\n\tviperCfg()\n\tcfg = &Prest{}\n\terr = Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.HTTPPort != 4000 {\n\t\tt.Errorf(\"expected port: 4000, got: %d\", cfg.HTTPPort)\n\t}\n\n\tos.Unsetenv(\"PREST_CONF\")\n\tos.Unsetenv(\"PREST_HTTP_PORT\")\n\tos.Setenv(\"PREST_JWT_KEY\", \"s3cr3t\")\n\n\tviperCfg()\n\tcfg = &Prest{}\n\terr = Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.JWTKey != \"s3cr3t\" {\n\t\tt.Errorf(\"expected jwt key: s3cr3t, got: %s\", cfg.JWTKey)\n\t}\n\tif cfg.JWTAlgo != \"HS256\" {\n\t\tt.Errorf(\"expected (default) jwt algo: HS256, got: %s\", cfg.JWTAlgo)\n\t}\n\n\tos.Unsetenv(\"PREST_JWT_KEY\")\n\tos.Setenv(\"PREST_JWT_ALGO\", \"HS512\")\n\n\tviperCfg()\n\tcfg = &Prest{}\n\terr = Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.JWTAlgo != \"HS512\" {\n\t\tt.Errorf(\"expected jwt algo: HS512, got: %s\", cfg.JWTAlgo)\n\t}\n\n\tos.Unsetenv(\"PREST_JWT_ALGO\")\n\n\t\/\/ test configs that will panic\n\tcmd := exec.Command(os.Args[0], \"-test.run=TestPanicAndFatalErrors\")\n\tcmd.Env = append(os.Environ(), \"BE_CRASHER=1\")\n\terr = cmd.Run()\n\tif e, ok := err.(*exec.ExitError); !ok && e.Success() {\n\t\tt.Fatal(\"process ran without error\")\n\t}\n}\n\nfunc TestPanicAndFatalErrors(t *testing.T) {\n\tif os.Getenv(\"BE_CRASHER\") == \"1\" {\n\t\tos.Setenv(\"PREST_CONF\", \"\/foo\/bar\/not_found.toml\")\n\t\tviperCfg()\n\t\tcfg := &Prest{}\n\t\t_ = Parse(cfg)\n\t\tos.Unsetenv(\"PREST_CONF\")\n\t}\n}\n\nfunc TestGetDefaultPrestConf(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\tdefaultFile string\n\t\tprestConf string\n\t\tresult string\n\t}{\n\t\t{\"empty config\", \".\/prest.toml\", \"\", \"\"},\n\t\t{\"custom config\", \".\/prest.toml\", \"..\/prest.toml\", \"..\/prest.toml\"},\n\t\t{\"default config\", \".\/testdata\/prest.toml\", \"\", \".\/testdata\/prest.toml\"},\n\t}\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tdefaultFile = tc.defaultFile\n\t\t\tcfg := getDefaultPrestConf(tc.prestConf)\n\t\t\tif cfg != tc.result {\n\t\t\t\tt.Errorf(\"expected %v, but got %v\", tc.result, cfg)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestDatabaseURL(t *testing.T) {\n\tos.Setenv(\"PREST_PG_URL\", \"postgresql:\/\/user:pass@localhost:1234\/mydatabase\/?sslmode=disable\")\n\n\tviperCfg()\n\tcfg := &Prest{}\n\terr := Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.PGDatabase != \"mydatabase\" {\n\t\tt.Errorf(\"expected database name: mydatabase, got: %s\", cfg.PGDatabase)\n\t}\n\tif cfg.PGHost != \"localhost\" {\n\t\tt.Errorf(\"expected database host: localhost, got: %s\", cfg.PGHost)\n\t}\n\tif cfg.PGPort != 1234 {\n\t\tt.Errorf(\"expected database port: 1234, got: %d\", cfg.PGPort)\n\t}\n\tif cfg.PGUser != \"user\" {\n\t\tt.Errorf(\"expected database user: user, got: %s\", cfg.PGUser)\n\t}\n\tif cfg.PGPass != \"pass\" {\n\t\tt.Errorf(\"expected database password: pass, got: %s\", cfg.PGPass)\n\t}\n\tif cfg.SSLMode != \"disable\" {\n\t\tt.Errorf(\"expected database ssl mode: disable, got: %s\", cfg.SSLMode)\n\t}\n\n\tos.Unsetenv(\"PREST_PG_URL\")\n\tos.Setenv(\"DATABASE_URL\", \"postgresql:\/\/cloud:cloudPass@localhost:5432\/CloudDatabase\/?sslmode=disable\")\n\n\tcfg = &Prest{}\n\terr = Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.PGPort != 5432 {\n\t\tt.Errorf(\"expected database port: 5432, got: %d\", cfg.PGPort)\n\t}\n\tif cfg.PGUser != \"cloud\" {\n\t\tt.Errorf(\"expected database user: cloud, got: %s\", cfg.PGUser)\n\t}\n\tif cfg.PGPass != \"cloudPass\" {\n\t\tt.Errorf(\"expected database password: cloudPass, got: %s\", cfg.PGPass)\n\t}\n\tif cfg.SSLMode != \"disable\" {\n\t\tt.Errorf(\"expected database SSL mode: disable, got: %s\", cfg.SSLMode)\n\t}\n\n\tos.Unsetenv(\"DATABASE_URL\")\n}\n\nfunc TestHTTPPort(t *testing.T) {\n\tos.Setenv(\"PORT\", \"8080\")\n\n\tviperCfg()\n\tcfg := &Prest{}\n\terr := Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.HTTPPort != 8080 {\n\t\tt.Errorf(\"expected http port: 8080, got: %d\", cfg.HTTPPort)\n\t}\n\n\t\/\/ set env PREST_HTTP_PORT and PORT\n\tos.Setenv(\"PREST_HTTP_PORT\", \"3000\")\n\n\tcfg = &Prest{}\n\terr = Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.HTTPPort != 8080 {\n\t\tt.Errorf(\"expected http port: 8080, got: %d\", cfg.HTTPPort)\n\t}\n\n\t\/\/ unset env PORT and set PREST_HTTP_PORT\n\tos.Unsetenv(\"PORT\")\n\n\tcfg = &Prest{}\n\terr = Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.HTTPPort != 3000 {\n\t\tt.Errorf(\"expected http port: 3000, got: %d\", cfg.HTTPPort)\n\t}\n\n\tos.Unsetenv(\"PREST_HTTP_PORT\")\n}\n\nfunc Test_parseDatabaseURL(t *testing.T) {\n\tc := &Prest{PGURL: \"postgresql:\/\/user:pass@localhost:5432\/mydatabase\/?sslmode=require\"}\n\tif err := parseDatabaseURL(c); err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif c.PGDatabase != \"mydatabase\" {\n\t\tt.Errorf(\"expected database name: mydatabase, got: %s\", c.PGDatabase)\n\t}\n\tif c.PGPort != 5432 {\n\t\tt.Errorf(\"expected database port: 5432, got: %d\", c.PGPort)\n\t}\n\tif c.PGUser != \"user\" {\n\t\tt.Errorf(\"expected database user: user, got: %s\", c.PGUser)\n\t}\n\tif c.PGPass != \"pass\" {\n\t\tt.Errorf(\"expected database password: password, got: %s\", c.PGPass)\n\t}\n\tif c.SSLMode != \"require\" {\n\t\tt.Errorf(\"expected database SSL mode: require, got: %s\", c.SSLMode)\n\t}\n\n\t\/\/ errors\n\tc = &Prest{PGURL: \"postgresql:\/\/user:pass@localhost:port\/mydatabase\/?sslmode=require\"}\n\tif err := parseDatabaseURL(c); err == nil {\n\t\tt.Error(\"expected error, got nothing\")\n\t}\n}\n\nfunc Test_portFromEnv(t *testing.T) {\n\tc := &Prest{}\n\n\tos.Setenv(\"PORT\", \"PORT\")\n\n\terr := portFromEnv(c)\n\tif err == nil {\n\t\tt.Errorf(\"expect error, got: %d\", c.HTTPPort)\n\t}\n\n\tos.Unsetenv(\"PORT\")\n}\n\nfunc Test_Auth(t *testing.T) {\n\tos.Setenv(\"PREST_CONF\", \".\/testdata\/prest.toml\")\n\n\tviperCfg()\n\tcfg := &Prest{}\n\terr := Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\n\tif cfg.AuthEnabled != false {\n\t\tt.Errorf(\"expected auth.enabled to be: false, got: %v\", cfg.AuthEnabled)\n\t}\n\n\tif cfg.AuthTable != \"prest_users\" {\n\t\tt.Errorf(\"expected auth.table to be: prest_users, got: %s\", cfg.AuthTable)\n\t}\n\n\tif cfg.AuthUsername != \"username\" {\n\t\tt.Errorf(\"expected auth.username to be: username, got: %s\", cfg.AuthUsername)\n\t}\n\n\tif cfg.AuthPassword != \"password\" {\n\t\tt.Errorf(\"expected auth.password to be: password, got: %s\", cfg.AuthPassword)\n\t}\n\n\tif cfg.AuthEncrypt != \"MD5\" {\n\t\tt.Errorf(\"expected auth.encrypt to be: MD5, got: %s\", cfg.AuthEncrypt)\n\t}\n\n\tmetadata := []string{\"first_name\", \"last_name\", \"last_login\"}\n\tif len(cfg.AuthMetadata) != len(metadata) {\n\t\tt.Errorf(\"expected auth.metadata to be: %d, got: %d\", len(cfg.AuthMetadata), len(metadata))\n\t}\n\n\tfor i, v := range cfg.AuthMetadata {\n\t\tif v != metadata[i] {\n\t\t\tt.Errorf(\"expected auth.metadata field %d to be: %s, got: %s\", i, v, metadata[i])\n\t\t}\n\t}\n}\n<commit_msg>remove load prest<commit_after>package config\n\nimport (\n\t\"os\"\n\t\"os\/exec\"\n\t\"testing\"\n)\n\nfunc TestLoad(t *testing.T) {\n\tLoad()\n\tif len(PrestConf.AccessConf.Tables) < 2 {\n\t\tt.Errorf(\"expected > 2, got: %d\", len(PrestConf.AccessConf.Tables))\n\t}\n\n\tLoad()\n\tfor _, ignoretable := range PrestConf.AccessConf.IgnoreTable {\n\t\tif ignoretable != \"test_permission_does_not_exist\" {\n\t\t\tt.Error(\"expected ['test_permission_does_not_exist'], but got another result\")\n\t\t}\n\t}\n\n\tLoad()\n\tif !PrestConf.AccessConf.Restrict {\n\t\tt.Error(\"expected true, but got false\")\n\t}\n\n\tos.Setenv(\"PREST_CONF\", \"foo\/bar\/prest.toml\")\n\tLoad()\n}\n\nfunc TestParse(t *testing.T) {\n\tviperCfg()\n\tcfg := &Prest{}\n\terr := Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.HTTPPort != 6000 {\n\t\tt.Errorf(\"expected port: 6000, got: %d\", cfg.HTTPPort)\n\t}\n\tif cfg.PGDatabase != \"prest-test\" {\n\t\tt.Errorf(\"expected database: prest, got: %s\", cfg.PGDatabase)\n\t}\n\n\tos.Unsetenv(\"PREST_CONF\")\n\tos.Setenv(\"PREST_HTTP_PORT\", \"4000\")\n\n\tviperCfg()\n\tcfg = &Prest{}\n\terr = Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.HTTPPort != 4000 {\n\t\tt.Errorf(\"expected port: 4000, got: %d\", cfg.HTTPPort)\n\t}\n\tif !cfg.EnableDefaultJWT {\n\t\tt.Error(\"EnableDefaultJWT: expected true but got false\")\n\t}\n\n\tos.Unsetenv(\"PREST_CONF\")\n\n\tos.Setenv(\"PREST_CONF\", \"\")\n\tos.Setenv(\"PREST_JWT_DEFAULT\", \"false\")\n\n\tviperCfg()\n\tcfg = &Prest{}\n\terr = Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.HTTPPort != 4000 {\n\t\tt.Errorf(\"expected port: 4000, got: %d\", cfg.HTTPPort)\n\t}\n\tif cfg.EnableDefaultJWT {\n\t\tt.Error(\"EnableDefaultJWT: expected false but got true\")\n\t}\n\n\tos.Unsetenv(\"PREST_JWT_DEFAULT\")\n\n\tviperCfg()\n\tcfg = &Prest{}\n\terr = Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.HTTPPort != 4000 {\n\t\tt.Errorf(\"expected port: 4000, got: %d\", cfg.HTTPPort)\n\t}\n\n\tos.Unsetenv(\"PREST_CONF\")\n\tos.Unsetenv(\"PREST_HTTP_PORT\")\n\tos.Setenv(\"PREST_JWT_KEY\", \"s3cr3t\")\n\n\tviperCfg()\n\tcfg = &Prest{}\n\terr = Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.JWTKey != \"s3cr3t\" {\n\t\tt.Errorf(\"expected jwt key: s3cr3t, got: %s\", cfg.JWTKey)\n\t}\n\tif cfg.JWTAlgo != \"HS256\" {\n\t\tt.Errorf(\"expected (default) jwt algo: HS256, got: %s\", cfg.JWTAlgo)\n\t}\n\n\tos.Unsetenv(\"PREST_JWT_KEY\")\n\tos.Setenv(\"PREST_JWT_ALGO\", \"HS512\")\n\n\tviperCfg()\n\tcfg = &Prest{}\n\terr = Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.JWTAlgo != \"HS512\" {\n\t\tt.Errorf(\"expected jwt algo: HS512, got: %s\", cfg.JWTAlgo)\n\t}\n\n\tos.Unsetenv(\"PREST_JWT_ALGO\")\n\n\t\/\/ test configs that will panic\n\tcmd := exec.Command(os.Args[0], \"-test.run=TestPanicAndFatalErrors\")\n\tcmd.Env = append(os.Environ(), \"BE_CRASHER=1\")\n\terr = cmd.Run()\n\tif e, ok := err.(*exec.ExitError); !ok && e.Success() {\n\t\tt.Fatal(\"process ran without error\")\n\t}\n}\n\nfunc TestPanicAndFatalErrors(t *testing.T) {\n\tif os.Getenv(\"BE_CRASHER\") == \"1\" {\n\t\tos.Setenv(\"PREST_CONF\", \"\/foo\/bar\/not_found.toml\")\n\t\tviperCfg()\n\t\tcfg := &Prest{}\n\t\t_ = Parse(cfg)\n\t\tos.Unsetenv(\"PREST_CONF\")\n\t}\n}\n\nfunc TestGetDefaultPrestConf(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\tdefaultFile string\n\t\tprestConf string\n\t\tresult string\n\t}{\n\t\t{\"empty config\", \".\/prest.toml\", \"\", \"\"},\n\t\t{\"custom config\", \".\/prest.toml\", \"..\/prest.toml\", \"..\/prest.toml\"},\n\t\t{\"default config\", \".\/testdata\/prest.toml\", \"\", \".\/testdata\/prest.toml\"},\n\t}\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tdefaultFile = tc.defaultFile\n\t\t\tcfg := getDefaultPrestConf(tc.prestConf)\n\t\t\tif cfg != tc.result {\n\t\t\t\tt.Errorf(\"expected %v, but got %v\", tc.result, cfg)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestDatabaseURL(t *testing.T) {\n\tos.Setenv(\"PREST_PG_URL\", \"postgresql:\/\/user:pass@localhost:1234\/mydatabase\/?sslmode=disable\")\n\n\tviperCfg()\n\tcfg := &Prest{}\n\terr := Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.PGDatabase != \"mydatabase\" {\n\t\tt.Errorf(\"expected database name: mydatabase, got: %s\", cfg.PGDatabase)\n\t}\n\tif cfg.PGHost != \"localhost\" {\n\t\tt.Errorf(\"expected database host: localhost, got: %s\", cfg.PGHost)\n\t}\n\tif cfg.PGPort != 1234 {\n\t\tt.Errorf(\"expected database port: 1234, got: %d\", cfg.PGPort)\n\t}\n\tif cfg.PGUser != \"user\" {\n\t\tt.Errorf(\"expected database user: user, got: %s\", cfg.PGUser)\n\t}\n\tif cfg.PGPass != \"pass\" {\n\t\tt.Errorf(\"expected database password: pass, got: %s\", cfg.PGPass)\n\t}\n\tif cfg.SSLMode != \"disable\" {\n\t\tt.Errorf(\"expected database ssl mode: disable, got: %s\", cfg.SSLMode)\n\t}\n\n\tos.Unsetenv(\"PREST_PG_URL\")\n\tos.Setenv(\"DATABASE_URL\", \"postgresql:\/\/cloud:cloudPass@localhost:5432\/CloudDatabase\/?sslmode=disable\")\n\n\tcfg = &Prest{}\n\terr = Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.PGPort != 5432 {\n\t\tt.Errorf(\"expected database port: 5432, got: %d\", cfg.PGPort)\n\t}\n\tif cfg.PGUser != \"cloud\" {\n\t\tt.Errorf(\"expected database user: cloud, got: %s\", cfg.PGUser)\n\t}\n\tif cfg.PGPass != \"cloudPass\" {\n\t\tt.Errorf(\"expected database password: cloudPass, got: %s\", cfg.PGPass)\n\t}\n\tif cfg.SSLMode != \"disable\" {\n\t\tt.Errorf(\"expected database SSL mode: disable, got: %s\", cfg.SSLMode)\n\t}\n\n\tos.Unsetenv(\"DATABASE_URL\")\n}\n\nfunc TestHTTPPort(t *testing.T) {\n\tos.Setenv(\"PORT\", \"8080\")\n\n\tviperCfg()\n\tcfg := &Prest{}\n\terr := Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.HTTPPort != 8080 {\n\t\tt.Errorf(\"expected http port: 8080, got: %d\", cfg.HTTPPort)\n\t}\n\n\t\/\/ set env PREST_HTTP_PORT and PORT\n\tos.Setenv(\"PREST_HTTP_PORT\", \"3000\")\n\n\tcfg = &Prest{}\n\terr = Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.HTTPPort != 8080 {\n\t\tt.Errorf(\"expected http port: 8080, got: %d\", cfg.HTTPPort)\n\t}\n\n\t\/\/ unset env PORT and set PREST_HTTP_PORT\n\tos.Unsetenv(\"PORT\")\n\n\tcfg = &Prest{}\n\terr = Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif cfg.HTTPPort != 3000 {\n\t\tt.Errorf(\"expected http port: 3000, got: %d\", cfg.HTTPPort)\n\t}\n\n\tos.Unsetenv(\"PREST_HTTP_PORT\")\n}\n\nfunc Test_parseDatabaseURL(t *testing.T) {\n\tc := &Prest{PGURL: \"postgresql:\/\/user:pass@localhost:5432\/mydatabase\/?sslmode=require\"}\n\tif err := parseDatabaseURL(c); err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\tif c.PGDatabase != \"mydatabase\" {\n\t\tt.Errorf(\"expected database name: mydatabase, got: %s\", c.PGDatabase)\n\t}\n\tif c.PGPort != 5432 {\n\t\tt.Errorf(\"expected database port: 5432, got: %d\", c.PGPort)\n\t}\n\tif c.PGUser != \"user\" {\n\t\tt.Errorf(\"expected database user: user, got: %s\", c.PGUser)\n\t}\n\tif c.PGPass != \"pass\" {\n\t\tt.Errorf(\"expected database password: password, got: %s\", c.PGPass)\n\t}\n\tif c.SSLMode != \"require\" {\n\t\tt.Errorf(\"expected database SSL mode: require, got: %s\", c.SSLMode)\n\t}\n\n\t\/\/ errors\n\tc = &Prest{PGURL: \"postgresql:\/\/user:pass@localhost:port\/mydatabase\/?sslmode=require\"}\n\tif err := parseDatabaseURL(c); err == nil {\n\t\tt.Error(\"expected error, got nothing\")\n\t}\n}\n\nfunc Test_portFromEnv(t *testing.T) {\n\tc := &Prest{}\n\n\tos.Setenv(\"PORT\", \"PORT\")\n\n\terr := portFromEnv(c)\n\tif err == nil {\n\t\tt.Errorf(\"expect error, got: %d\", c.HTTPPort)\n\t}\n\n\tos.Unsetenv(\"PORT\")\n}\n\nfunc Test_Auth(t *testing.T) {\n\tos.Setenv(\"PREST_CONF\", \".\/testdata\/prest.toml\")\n\n\tviperCfg()\n\tcfg := &Prest{}\n\terr := Parse(cfg)\n\tif err != nil {\n\t\tt.Errorf(\"expected no errors, but got %v\", err)\n\t}\n\n\tif cfg.AuthEnabled != false {\n\t\tt.Errorf(\"expected auth.enabled to be: false, got: %v\", cfg.AuthEnabled)\n\t}\n\n\tif cfg.AuthTable != \"prest_users\" {\n\t\tt.Errorf(\"expected auth.table to be: prest_users, got: %s\", cfg.AuthTable)\n\t}\n\n\tif cfg.AuthUsername != \"username\" {\n\t\tt.Errorf(\"expected auth.username to be: username, got: %s\", cfg.AuthUsername)\n\t}\n\n\tif cfg.AuthPassword != \"password\" {\n\t\tt.Errorf(\"expected auth.password to be: password, got: %s\", cfg.AuthPassword)\n\t}\n\n\tif cfg.AuthEncrypt != \"MD5\" {\n\t\tt.Errorf(\"expected auth.encrypt to be: MD5, got: %s\", cfg.AuthEncrypt)\n\t}\n\n\tmetadata := []string{\"first_name\", \"last_name\", \"last_login\"}\n\tif len(cfg.AuthMetadata) != len(metadata) {\n\t\tt.Errorf(\"expected auth.metadata to be: %d, got: %d\", len(cfg.AuthMetadata), len(metadata))\n\t}\n\n\tfor i, v := range cfg.AuthMetadata {\n\t\tif v != metadata[i] {\n\t\t\tt.Errorf(\"expected auth.metadata field %d to be: %s, got: %s\", i, v, metadata[i])\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"errors\"\n\t\"github.com\/ngerakines\/testutils\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"path\/filepath\"\n\t\"testing\"\n)\n\ntype tempFileManager struct {\n\tpath string\n\tfiles map[string]string\n}\n\nfunc (fm *tempFileManager) initFile(name, body string) {\n\tpath := filepath.Join(fm.path, name)\n\terr := ioutil.WriteFile(path, []byte(body), 00777)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfm.files[name] = path\n}\n\nfunc (fm *tempFileManager) get(name string) (string, error) {\n\tpath, hasPath := fm.files[name]\n\tif hasPath {\n\t\treturn path, nil\n\t}\n\treturn \"\", errors.New(\"No config file exists with that label.\")\n}\n\nfunc initTempFileManager(path string) *tempFileManager {\n\tfm := new(tempFileManager)\n\tfm.path = path\n\tfm.files = make(map[string]string)\n\tfm.initFile(\"basic\", `{\n\t\t\"listen\": \":7041\",\n\t\t\"lruSize\": 120000,\n\t\t\"index\": {\"engine\": \"local\", \"localBasePath\": \".\/index\"},\n\t\t\"storage\": {\"engine\": \"s3\", \"s3Buckets\": [\"localhost\"], \"s3Key\": \"foo\", \"s3Secret\": \"bar\", \"s3Host\": \"localhost\"}\n\t\t}`)\n\treturn fm\n}\n\nfunc TestDefaultConfig(t *testing.T) {\n\tdm := testutils.NewDirectoryManager()\n\tdefer dm.Close()\n\n\tappConfig := NewDefaultAppConfig()\n\tif err != nil {\n\t\tt.Error(err.Error())\n\t\treturn\n\t}\n\n\tif appConfig.Listen() != \":7040\" {\n\t\tt.Error(\"Invalid default for appConfig.Listen()\", appConfig.Listen())\n\t\treturn\n\t}\n\tif appConfig.Storage().Engine() != \"local\" {\n\t\tt.Error(\"Invalid default for appConfig.Storage().Engine()\", appConfig.Storage().Engine())\n\t\treturn\n\t}\n}\n\nfunc TestBasicConfig(t *testing.T) {\n\tdm := testutils.NewDirectoryManager()\n\tdefer dm.Close()\n\tfm := initTempFileManager(dm.Path)\n\n\tpath, err := fm.get(\"basic\")\n\tif err != nil {\n\t\tt.Error(err.Error())\n\t\treturn\n\t}\n\tappConfig, err := LoadAppConfig(path)\n\tif err != nil {\n\t\tt.Error(err.Error())\n\t\treturn\n\t}\n\n\tif appConfig.Listen() != \":7041\" {\n\t\tt.Error(\"appConfig.Listen()\", appConfig.Listen())\n\t}\n\n\tif appConfig.Storage().Engine() != \"s3\" {\n\t\tt.Error(\"appConfig.Storage().Engine()\", appConfig.Storage().Engine())\n\t}\n}\n<commit_msg>Removing stale tests.<commit_after><|endoftext|>"} {"text":"<commit_before>package gerrit\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com\/google\/go-querystring\/query\"\n)\n\n\/\/ TODO Try to reduce the code duplications of a std API req\n\/\/ Maybe with http:\/\/play.golang.org\/p\/j-667shCCB\n\/\/ and https:\/\/groups.google.com\/forum\/#!topic\/golang-nuts\/D-gIr24k5uY\n\n\/\/ A Client manages communication with the Gerrit API.\ntype Client struct {\n\t\/\/ HTTP client used to communicate with the API.\n\tclient *http.Client\n\n\t\/\/ Base URL for API requests.\n\t\/\/ BaseURL should always be specified with a trailing slash.\n\tbaseURL *url.URL\n\n\t\/\/ Gerrit service for authentication\n\tAuthentication *AuthenticationService\n\n\t\/\/ Services used for talking to different parts of the standard\n\t\/\/ Gerrit API.\n\tAccess *AccessService\n\tAccounts *AccountsService\n\tChanges *ChangesService\n\tConfig *ConfigService\n\tGroups *GroupsService\n\tPlugins *PluginsService\n\tProjects *ProjectsService\n\n\t\/\/ Additional services used for talking to non-standard Gerrit\n\t\/\/ APIs.\n\tEventsLog *EventsLogService\n}\n\n\/\/ Response is a Gerrit API response.\n\/\/ This wraps the standard http.Response returned from Gerrit.\ntype Response struct {\n\t*http.Response\n}\n\n\/\/ NewClient returns a new Gerrit API client.\n\/\/ gerritInstance has to be the HTTP endpoint of the Gerrit instance.\n\/\/ If a nil httpClient is provided, http.DefaultClient will be used.\nfunc NewClient(gerritURL string, httpClient *http.Client) (*Client, error) {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tif len(gerritURL) == 0 {\n\t\treturn nil, fmt.Errorf(\"No Gerrit instance given.\")\n\t}\n\tbaseURL, err := url.Parse(gerritURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Client{\n\t\tclient: httpClient,\n\t\tbaseURL: baseURL,\n\t}\n\tc.Authentication = &AuthenticationService{client: c}\n\tc.Access = &AccessService{client: c}\n\tc.Accounts = &AccountsService{client: c}\n\tc.Changes = &ChangesService{client: c}\n\tc.Config = &ConfigService{client: c}\n\tc.Groups = &GroupsService{client: c}\n\tc.Plugins = &PluginsService{client: c}\n\tc.Projects = &ProjectsService{client: c}\n\tc.EventsLog = &EventsLogService{client: c}\n\n\treturn c, nil\n}\n\n\/\/ NewRequest creates an API request.\n\/\/ A relative URL can be provided in urlStr, in which case it is resolved relative to the baseURL of the Client.\n\/\/ Relative URLs should always be specified without a preceding slash.\n\/\/ If specified, the value pointed to by body is JSON encoded and included as the request body.\nfunc (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) {\n\t\/\/ Build URL for request\n\tu, err := c.buildURLForRequest(urlStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buf io.ReadWriter\n\tif body != nil {\n\t\tbuf = new(bytes.Buffer)\n\t\terr := json.NewEncoder(buf).Encode(body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(method, u, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Apply Authentication\n\tc.addAuthentication(req)\n\n\t\/\/ Request compact JSON\n\t\/\/ See https:\/\/gerrit-review.googlesource.com\/Documentation\/rest-api.html#output\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\n\t\/\/ TODO: Add gzip encoding\n\t\/\/ Accept-Encoding request header is set to gzip\n\t\/\/ See https:\/\/gerrit-review.googlesource.com\/Documentation\/rest-api.html#output\n\n\treturn req, nil\n}\n\n\/\/ Call is a combine function for Client.NewRequest and Client.Do.\n\/\/\n\/\/ Most API methods are quite the same.\n\/\/ Get the URL, apply options, make a request, and get the response.\n\/\/ Without adding special headers or something.\n\/\/ To avoid a big amount of code duplication you can Client.Call.\n\/\/\n\/\/ method is the HTTP method you want to call.\n\/\/ u is the URL you want to call.\n\/\/ body is the HTTP body.\n\/\/ v is the HTTP response.\n\/\/\n\/\/ For more information read https:\/\/github.com\/google\/go-github\/issues\/234\nfunc (c *Client) Call(method, u string, body interface{}, v interface{}) (*Response, error) {\n\treq, err := c.NewRequest(method, u, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := c.Do(req, v, body)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\n\treturn resp, err\n}\n\n\/\/ buildURLForRequest will build the URL (as string) that will be called.\n\/\/ We need such a utility method, because the URL.Path needs to be escaped (partly).\n\/\/\n\/\/ E.g. if a project is called via \"projects\/%s\" and the project is named \"plugin\/delete-project\"\n\/\/ there has to be \"projects\/plugin%25Fdelete-project\" instead of \"projects\/plugin\/delete-project\".\n\/\/ The second url will return nothing.\nfunc (c *Client) buildURLForRequest(urlStr string) (string, error) {\n\tu := c.baseURL.String()\n\n\t\/\/ If there is no \/ at the end, add one\n\tif strings.HasSuffix(u, \"\/\") == false {\n\t\tu += \"\/\"\n\t}\n\n\t\/\/ If there is a \"\/\" at the start, remove it\n\tif strings.HasPrefix(urlStr, \"\/\") == true {\n\t\turlStr = urlStr[1:]\n\t}\n\n\t\/\/ If we are authenticated, lets apply the a\/ prefix\n\tif c.Authentication.HasAuth() == true {\n\t\turlStr = \"a\/\" + urlStr\n\t}\n\n\trel, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tu += rel.String()\n\n\treturn u, nil\n}\n\n\/\/ Do sends an API request and returns the API response.\n\/\/ The API response is JSON decoded and stored in the value pointed to by v,\n\/\/ or returned as an error if an API error has occurred.\n\/\/ If v implements the io.Writer interface, the raw response body will be written to v,\n\/\/ without attempting to first decode it.\n\/\/ The original body is also required in case case the request needs to be\n\/\/ retried.\nfunc (c *Client) Do(req *http.Request, v interface{}, body interface{}) (*Response, error) {\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If the server responds with 401 Unauthorized and we're using digest\n\t\/\/ authentication then generate an Authorization header and retry\n\t\/\/ the request.\n\tif resp.StatusCode == http.StatusUnauthorized && c.Authentication.HasDigestAuth() {\n\t\tdigestAuthHeader, err := c.Authentication.digestAuthHeader(resp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Use the original url but strip \/a. This will be\n\t\t\/\/ automatically added in NewRequest.\n\t\turi := strings.TrimLeft(req.URL.RequestURI(), \"\/a\")\n\n\t\tauthRequest, err := c.NewRequest(req.Method, uri, body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Duplicate the original headers then establish the newly\n\t\t\/\/ created Authorization header.\n\t\t\/\/ TODO - Need to figure out the header. Still getting\n\t\t\/\/ 401 Unauthorized (which is better than before which was\n\t\t\/\/ a straight error).\n\t\tauthRequest.Header = req.Header\n\t\tauthRequest.Header.Del(\"WWW-Authenticate\")\n\t\tauthRequest.Header.Set(\"Authorization\", digestAuthHeader)\n\n\t\tresp, err = c.client.Do(authRequest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Wrap response\n\tresponse := &Response{Response: resp}\n\n\terr = CheckResponse(resp)\n\tif err != nil {\n\t\t\/\/ even though there was an error, we still return the response\n\t\t\/\/ in case the caller wants to inspect it further\n\t\treturn response, err\n\t}\n\n\tif v != nil {\n\t\tdefer resp.Body.Close()\n\t\tif w, ok := v.(io.Writer); ok {\n\t\t\tio.Copy(w, resp.Body)\n\t\t} else {\n\t\t\tvar body []byte\n\t\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\t\/\/ even though there was an error, we still return the response\n\t\t\t\t\/\/ in case the caller wants to inspect it further\n\t\t\t\treturn response, err\n\t\t\t}\n\n\t\t\tbody = RemoveMagicPrefixLine(body)\n\t\t\terr = json.Unmarshal(body, v)\n\t\t}\n\t}\n\treturn response, err\n}\n\nfunc (c *Client) addAuthentication(req *http.Request) {\n\t\/\/ Apply HTTP Basic Authentication\n\tif c.Authentication.HasBasicAuth() == true {\n\t\treq.SetBasicAuth(c.Authentication.name, c.Authentication.secret)\n\t}\n\n\t\/\/ Apply HTTP Cookie\n\tif c.Authentication.HasCookieAuth() == true {\n\t\treq.AddCookie(&http.Cookie{\n\t\t\tName: c.Authentication.name,\n\t\t\tValue: c.Authentication.secret,\n\t\t})\n\t}\n}\n\n\/\/ DeleteRequest sends an DELETE API Request to urlStr with optional body.\n\/\/ It is a shorthand combination for Client.NewRequest with Client.Do.\n\/\/\n\/\/ Relative URLs should always be specified without a preceding slash.\n\/\/ If specified, the value pointed to by body is JSON encoded and included as the request body.\nfunc (c *Client) DeleteRequest(urlStr string, body interface{}) (*Response, error) {\n\treq, err := c.NewRequest(\"DELETE\", urlStr, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c.Do(req, nil, body)\n}\n\n\/\/ RemoveMagicPrefixLine removes the \"magic prefix line\" of Gerris JSON\n\/\/ response if present. The JSON response body starts with a magic prefix line\n\/\/ that must be stripped before feeding the rest of the response body to a JSON\n\/\/ parser. The reason for this is to prevent against Cross Site Script\n\/\/ Inclusion (XSSI) attacks. By default all standard Gerrit APIs include this\n\/\/ prefix line though some plugins may not.\n\/\/\n\/\/ Gerrit API docs: https:\/\/gerrit-review.googlesource.com\/Documentation\/rest-api.html#output\nfunc RemoveMagicPrefixLine(body []byte) []byte {\n\tif bytes.HasPrefix(body, []byte(\")]}'\\n\")) {\n\t\tindex := bytes.IndexByte(body, '\\n')\n\t\tif index > -1 {\n\t\t\t\/\/ +1 to catch the \\n as well\n\t\t\tbody = body[(index + 1):]\n\t\t}\n\t}\n\treturn body\n}\n\n\/\/ CheckResponse checks the API response for errors, and returns them if present.\n\/\/ A response is considered an error if it has a status code outside the 200 range.\n\/\/ API error responses are expected to have no response body.\n\/\/\n\/\/ Gerrit API docs: https:\/\/gerrit-review.googlesource.com\/Documentation\/rest-api.html#response-codes\nfunc CheckResponse(r *http.Response) error {\n\tif c := r.StatusCode; 200 <= c && c <= 299 {\n\t\treturn nil\n\t}\n\n\t\/\/ Some calls require an authentification\n\t\/\/ In such cases errors like:\n\t\/\/ \t\tAPI call to https:\/\/review.typo3.org\/accounts\/self failed: 403 Forbidden\n\t\/\/ will be thrown.\n\n\terr := fmt.Errorf(\"API call to %s failed: %s\", r.Request.URL.String(), r.Status)\n\treturn err\n}\n\n\/\/ addOptions adds the parameters in opt as URL query parameters to s.\n\/\/ opt must be a struct whose fields may contain \"url\" tags.\nfunc addOptions(s string, opt interface{}) (string, error) {\n\tv := reflect.ValueOf(opt)\n\tif v.Kind() == reflect.Ptr && v.IsNil() {\n\t\treturn s, nil\n\t}\n\n\tu, err := url.Parse(s)\n\tif err != nil {\n\t\treturn s, err\n\t}\n\n\tqs, err := query.Values(opt)\n\tif err != nil {\n\t\treturn s, err\n\t}\n\n\tu.RawQuery = qs.Encode()\n\treturn u.String(), nil\n}\n\n\/\/ getStringResponseWithoutOptions retrieved a single string Response for a GET request\nfunc getStringResponseWithoutOptions(client *Client, u string) (string, *Response, error) {\n\tv := new(string)\n\tresp, err := client.Call(\"GET\", u, nil, v)\n\treturn *v, resp, err\n}\n<commit_msg>only apply a\/ if it's not already<commit_after>package gerrit\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com\/google\/go-querystring\/query\"\n)\n\n\/\/ TODO Try to reduce the code duplications of a std API req\n\/\/ Maybe with http:\/\/play.golang.org\/p\/j-667shCCB\n\/\/ and https:\/\/groups.google.com\/forum\/#!topic\/golang-nuts\/D-gIr24k5uY\n\n\/\/ A Client manages communication with the Gerrit API.\ntype Client struct {\n\t\/\/ HTTP client used to communicate with the API.\n\tclient *http.Client\n\n\t\/\/ Base URL for API requests.\n\t\/\/ BaseURL should always be specified with a trailing slash.\n\tbaseURL *url.URL\n\n\t\/\/ Gerrit service for authentication\n\tAuthentication *AuthenticationService\n\n\t\/\/ Services used for talking to different parts of the standard\n\t\/\/ Gerrit API.\n\tAccess *AccessService\n\tAccounts *AccountsService\n\tChanges *ChangesService\n\tConfig *ConfigService\n\tGroups *GroupsService\n\tPlugins *PluginsService\n\tProjects *ProjectsService\n\n\t\/\/ Additional services used for talking to non-standard Gerrit\n\t\/\/ APIs.\n\tEventsLog *EventsLogService\n}\n\n\/\/ Response is a Gerrit API response.\n\/\/ This wraps the standard http.Response returned from Gerrit.\ntype Response struct {\n\t*http.Response\n}\n\n\/\/ NewClient returns a new Gerrit API client.\n\/\/ gerritInstance has to be the HTTP endpoint of the Gerrit instance.\n\/\/ If a nil httpClient is provided, http.DefaultClient will be used.\nfunc NewClient(gerritURL string, httpClient *http.Client) (*Client, error) {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tif len(gerritURL) == 0 {\n\t\treturn nil, fmt.Errorf(\"No Gerrit instance given.\")\n\t}\n\tbaseURL, err := url.Parse(gerritURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Client{\n\t\tclient: httpClient,\n\t\tbaseURL: baseURL,\n\t}\n\tc.Authentication = &AuthenticationService{client: c}\n\tc.Access = &AccessService{client: c}\n\tc.Accounts = &AccountsService{client: c}\n\tc.Changes = &ChangesService{client: c}\n\tc.Config = &ConfigService{client: c}\n\tc.Groups = &GroupsService{client: c}\n\tc.Plugins = &PluginsService{client: c}\n\tc.Projects = &ProjectsService{client: c}\n\tc.EventsLog = &EventsLogService{client: c}\n\n\treturn c, nil\n}\n\n\/\/ NewRequest creates an API request.\n\/\/ A relative URL can be provided in urlStr, in which case it is resolved relative to the baseURL of the Client.\n\/\/ Relative URLs should always be specified without a preceding slash.\n\/\/ If specified, the value pointed to by body is JSON encoded and included as the request body.\nfunc (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) {\n\t\/\/ Build URL for request\n\tu, err := c.buildURLForRequest(urlStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buf io.ReadWriter\n\tif body != nil {\n\t\tbuf = new(bytes.Buffer)\n\t\terr := json.NewEncoder(buf).Encode(body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(method, u, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Apply Authentication\n\tc.addAuthentication(req)\n\n\t\/\/ Request compact JSON\n\t\/\/ See https:\/\/gerrit-review.googlesource.com\/Documentation\/rest-api.html#output\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\n\t\/\/ TODO: Add gzip encoding\n\t\/\/ Accept-Encoding request header is set to gzip\n\t\/\/ See https:\/\/gerrit-review.googlesource.com\/Documentation\/rest-api.html#output\n\n\treturn req, nil\n}\n\n\/\/ Call is a combine function for Client.NewRequest and Client.Do.\n\/\/\n\/\/ Most API methods are quite the same.\n\/\/ Get the URL, apply options, make a request, and get the response.\n\/\/ Without adding special headers or something.\n\/\/ To avoid a big amount of code duplication you can Client.Call.\n\/\/\n\/\/ method is the HTTP method you want to call.\n\/\/ u is the URL you want to call.\n\/\/ body is the HTTP body.\n\/\/ v is the HTTP response.\n\/\/\n\/\/ For more information read https:\/\/github.com\/google\/go-github\/issues\/234\nfunc (c *Client) Call(method, u string, body interface{}, v interface{}) (*Response, error) {\n\treq, err := c.NewRequest(method, u, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := c.Do(req, v, body)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\n\treturn resp, err\n}\n\n\/\/ buildURLForRequest will build the URL (as string) that will be called.\n\/\/ We need such a utility method, because the URL.Path needs to be escaped (partly).\n\/\/\n\/\/ E.g. if a project is called via \"projects\/%s\" and the project is named \"plugin\/delete-project\"\n\/\/ there has to be \"projects\/plugin%25Fdelete-project\" instead of \"projects\/plugin\/delete-project\".\n\/\/ The second url will return nothing.\nfunc (c *Client) buildURLForRequest(urlStr string) (string, error) {\n\tu := c.baseURL.String()\n\n\t\/\/ If there is no \/ at the end, add one\n\tif strings.HasSuffix(u, \"\/\") == false {\n\t\tu += \"\/\"\n\t}\n\n\t\/\/ If there is a \"\/\" at the start, remove it\n\tif strings.HasPrefix(urlStr, \"\/\") == true {\n\t\turlStr = urlStr[1:]\n\t}\n\n\t\/\/ If we are authenticated, lets apply the a\/ prefix but only if it's\n\t\/\/ not already applied.\n\tif c.Authentication.HasAuth() == true && !strings.HasPrefix(urlStr, \"a\/\") {\n\t\turlStr = \"a\/\" + urlStr\n\t}\n\n\trel, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tu += rel.String()\n\n\treturn u, nil\n}\n\n\/\/ Do sends an API request and returns the API response.\n\/\/ The API response is JSON decoded and stored in the value pointed to by v,\n\/\/ or returned as an error if an API error has occurred.\n\/\/ If v implements the io.Writer interface, the raw response body will be written to v,\n\/\/ without attempting to first decode it.\n\/\/ The original body is also required in case case the request needs to be\n\/\/ retried.\nfunc (c *Client) Do(req *http.Request, v interface{}, body interface{}) (*Response, error) {\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If the server responds with 401 Unauthorized and we're using digest\n\t\/\/ authentication then generate an Authorization header and retry\n\t\/\/ the request.\n\tif resp.StatusCode == http.StatusUnauthorized && c.Authentication.HasDigestAuth() {\n\t\tdigestAuthHeader, err := c.Authentication.digestAuthHeader(resp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tauthRequest, err := c.NewRequest(req.Method, req.URL.RequestURI(), body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Duplicate the original headers then establish the newly\n\t\t\/\/ created Authorization header.\n\t\t\/\/ TODO - Need to figure out the header. Still getting\n\t\t\/\/ 401 Unauthorized (which is better than before which was\n\t\t\/\/ a straight error).\n\t\tauthRequest.Header = req.Header\n\t\tauthRequest.Header.Del(\"WWW-Authenticate\")\n\t\tauthRequest.Header.Set(\"Authorization\", digestAuthHeader)\n\n\t\tresp, err = c.client.Do(authRequest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Wrap response\n\tresponse := &Response{Response: resp}\n\n\terr = CheckResponse(resp)\n\tif err != nil {\n\t\t\/\/ even though there was an error, we still return the response\n\t\t\/\/ in case the caller wants to inspect it further\n\t\treturn response, err\n\t}\n\n\tif v != nil {\n\t\tdefer resp.Body.Close()\n\t\tif w, ok := v.(io.Writer); ok {\n\t\t\tio.Copy(w, resp.Body)\n\t\t} else {\n\t\t\tvar body []byte\n\t\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\t\/\/ even though there was an error, we still return the response\n\t\t\t\t\/\/ in case the caller wants to inspect it further\n\t\t\t\treturn response, err\n\t\t\t}\n\n\t\t\tbody = RemoveMagicPrefixLine(body)\n\t\t\terr = json.Unmarshal(body, v)\n\t\t}\n\t}\n\treturn response, err\n}\n\nfunc (c *Client) addAuthentication(req *http.Request) {\n\t\/\/ Apply HTTP Basic Authentication\n\tif c.Authentication.HasBasicAuth() == true {\n\t\treq.SetBasicAuth(c.Authentication.name, c.Authentication.secret)\n\t}\n\n\t\/\/ Apply HTTP Cookie\n\tif c.Authentication.HasCookieAuth() == true {\n\t\treq.AddCookie(&http.Cookie{\n\t\t\tName: c.Authentication.name,\n\t\t\tValue: c.Authentication.secret,\n\t\t})\n\t}\n}\n\n\/\/ DeleteRequest sends an DELETE API Request to urlStr with optional body.\n\/\/ It is a shorthand combination for Client.NewRequest with Client.Do.\n\/\/\n\/\/ Relative URLs should always be specified without a preceding slash.\n\/\/ If specified, the value pointed to by body is JSON encoded and included as the request body.\nfunc (c *Client) DeleteRequest(urlStr string, body interface{}) (*Response, error) {\n\treq, err := c.NewRequest(\"DELETE\", urlStr, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c.Do(req, nil, body)\n}\n\n\/\/ RemoveMagicPrefixLine removes the \"magic prefix line\" of Gerris JSON\n\/\/ response if present. The JSON response body starts with a magic prefix line\n\/\/ that must be stripped before feeding the rest of the response body to a JSON\n\/\/ parser. The reason for this is to prevent against Cross Site Script\n\/\/ Inclusion (XSSI) attacks. By default all standard Gerrit APIs include this\n\/\/ prefix line though some plugins may not.\n\/\/\n\/\/ Gerrit API docs: https:\/\/gerrit-review.googlesource.com\/Documentation\/rest-api.html#output\nfunc RemoveMagicPrefixLine(body []byte) []byte {\n\tif bytes.HasPrefix(body, []byte(\")]}'\\n\")) {\n\t\tindex := bytes.IndexByte(body, '\\n')\n\t\tif index > -1 {\n\t\t\t\/\/ +1 to catch the \\n as well\n\t\t\tbody = body[(index + 1):]\n\t\t}\n\t}\n\treturn body\n}\n\n\/\/ CheckResponse checks the API response for errors, and returns them if present.\n\/\/ A response is considered an error if it has a status code outside the 200 range.\n\/\/ API error responses are expected to have no response body.\n\/\/\n\/\/ Gerrit API docs: https:\/\/gerrit-review.googlesource.com\/Documentation\/rest-api.html#response-codes\nfunc CheckResponse(r *http.Response) error {\n\tif c := r.StatusCode; 200 <= c && c <= 299 {\n\t\treturn nil\n\t}\n\n\t\/\/ Some calls require an authentification\n\t\/\/ In such cases errors like:\n\t\/\/ \t\tAPI call to https:\/\/review.typo3.org\/accounts\/self failed: 403 Forbidden\n\t\/\/ will be thrown.\n\n\terr := fmt.Errorf(\"API call to %s failed: %s\", r.Request.URL.String(), r.Status)\n\treturn err\n}\n\n\/\/ addOptions adds the parameters in opt as URL query parameters to s.\n\/\/ opt must be a struct whose fields may contain \"url\" tags.\nfunc addOptions(s string, opt interface{}) (string, error) {\n\tv := reflect.ValueOf(opt)\n\tif v.Kind() == reflect.Ptr && v.IsNil() {\n\t\treturn s, nil\n\t}\n\n\tu, err := url.Parse(s)\n\tif err != nil {\n\t\treturn s, err\n\t}\n\n\tqs, err := query.Values(opt)\n\tif err != nil {\n\t\treturn s, err\n\t}\n\n\tu.RawQuery = qs.Encode()\n\treturn u.String(), nil\n}\n\n\/\/ getStringResponseWithoutOptions retrieved a single string Response for a GET request\nfunc getStringResponseWithoutOptions(client *Client, u string) (string, *Response, error) {\n\tv := new(string)\n\tresp, err := client.Call(\"GET\", u, nil, v)\n\treturn *v, resp, err\n}\n<|endoftext|>"} {"text":"<commit_before>package s3gof3r\n\nimport (\n\t\"crypto\/md5\"\n\t\"fmt\"\n\t\"hash\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n)\n\nconst (\n\tqWaitSz = 2\n)\n\ntype getter struct {\n\turl url.URL\n\tb *Bucket\n\tbufsz int64\n\terr error\n\twg sync.WaitGroup\n\n\tchunkID int\n\trChunk *chunk\n\tcontentLen int64\n\tbytesRead int64\n\tchunkTotal int\n\n\treadCh chan *chunk\n\tgetCh chan *chunk\n\tquit chan struct{}\n\tqWait map[int]*chunk\n\n\tsp *bp\n\n\tclosed bool\n\tc *Config\n\n\tmd5 hash.Hash\n\tcIdx int64\n}\n\ntype chunk struct {\n\tid int\n\theader http.Header\n\tstart int64\n\tsize int64\n\tb []byte\n}\n\nfunc newGetter(getURL url.URL, c *Config, b *Bucket) (io.ReadCloser, http.Header, error) {\n\tg := new(getter)\n\tg.url = getURL\n\tg.c = c\n\tg.bufsz = max64(c.PartSize, 1)\n\tg.c.NTry = max(c.NTry, 1)\n\tg.c.Concurrency = max(c.Concurrency, 1)\n\n\tg.getCh = make(chan *chunk)\n\tg.readCh = make(chan *chunk)\n\tg.quit = make(chan struct{})\n\tg.qWait = make(map[int]*chunk)\n\tg.b = b\n\tg.md5 = md5.New()\n\n\t\/\/ use get instead of head for error messaging\n\tresp, err := g.retryRequest(\"GET\", g.url.String(), nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer checkClose(resp.Body, &err)\n\tif resp.StatusCode != 200 {\n\t\treturn nil, nil, newRespError(resp)\n\t}\n\tg.contentLen = resp.ContentLength\n\tg.chunkTotal = int((g.contentLen + g.bufsz - 1) \/ g.bufsz) \/\/ round up, integer division\n\tlogger.debugPrintf(\"object size: %3.2g MB\", float64(g.contentLen)\/float64((1*mb)))\n\n\tg.sp = bufferPool(g.bufsz)\n\n\tfor i := 0; i < g.c.Concurrency; i++ {\n\t\tgo g.worker()\n\t}\n\tgo g.initChunks()\n\treturn g, resp.Header, nil\n}\n\nfunc (g *getter) retryRequest(method, urlStr string, body io.ReadSeeker) (resp *http.Response, err error) {\n\tfor i := 0; i < g.c.NTry; i++ {\n\t\tvar req *http.Request\n\t\treq, err = http.NewRequest(method, urlStr, body)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tg.b.Sign(req)\n\t\tresp, err = g.c.Client.Do(req)\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t\tlogger.debugPrintln(err)\n\t\tif body != nil {\n\t\t\tif _, err = body.Seek(0, 0); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc (g *getter) initChunks() {\n\tid := 0\n\tfor i := int64(0); i < g.contentLen; {\n\t\tfor len(g.qWait) >= qWaitSz {\n\t\t\t\/\/ Limit growth of qWait\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t}\n\t\tsize := min64(g.bufsz, g.contentLen-i)\n\t\tc := &chunk{\n\t\t\tid: id,\n\t\t\theader: http.Header{\n\t\t\t\t\"Range\": {fmt.Sprintf(\"bytes=%d-%d\",\n\t\t\t\t\ti, i+size-1)},\n\t\t\t},\n\t\t\tstart: i,\n\t\t\tsize: size,\n\t\t\tb: nil,\n\t\t}\n\t\ti += size\n\t\tid++\n\t\tg.wg.Add(1)\n\t\tg.getCh <- c\n\t}\n\tclose(g.getCh)\n}\n\nfunc (g *getter) worker() {\n\tfor c := range g.getCh {\n\t\tg.retryGetChunk(c)\n\t}\n\n}\n\nfunc (g *getter) retryGetChunk(c *chunk) {\n\tdefer g.wg.Done()\n\tvar err error\n\tc.b = <-g.sp.get\n\tfor i := 0; i < g.c.NTry; i++ {\n\t\ttime.Sleep(time.Duration(math.Exp2(float64(i))) * 100 * time.Millisecond) \/\/ exponential back-off\n\t\terr = g.getChunk(c)\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t\tlogger.debugPrintf(\"error on attempt %d: retrying chunk: %v, error: %s\", i, c.id, err)\n\t}\n\tg.err = err\n\tclose(g.quit) \/\/ out of tries, ensure quit by closing channel\n}\n\nfunc (g *getter) getChunk(c *chunk) error {\n\t\/\/ ensure buffer is empty\n\n\tr, err := http.NewRequest(\"GET\", g.url.String(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Header = c.header\n\tg.b.Sign(r)\n\tresp, err := g.c.Client.Do(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer checkClose(resp.Body, &err)\n\tif resp.StatusCode != 206 {\n\t\treturn newRespError(resp)\n\t}\n\tn, err := io.ReadAtLeast(resp.Body, c.b, int(c.size))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif int64(n) != c.size {\n\t\treturn fmt.Errorf(\"chunk %d: Expected %d bytes, received %d\",\n\t\t\tc.id, c.size, n)\n\t}\n\tg.readCh <- c\n\treturn nil\n}\n\nfunc (g *getter) Read(p []byte) (int, error) {\n\tvar err error\n\tif g.closed {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tif g.err != nil {\n\t\treturn 0, g.err\n\t}\n\tnw := 0\n\tfor nw < len(p) {\n\t\tif g.bytesRead == g.contentLen {\n\t\t\treturn nw, io.EOF\n\t\t}\n\t\tif g.rChunk == nil {\n\t\t\tg.rChunk, err = g.nextChunk()\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tg.cIdx = 0\n\t\t}\n\n\t\tn := copy(p[nw:], g.rChunk.b[g.cIdx:g.rChunk.size])\n\t\tg.cIdx += int64(n)\n\t\tnw += n\n\t\tg.bytesRead += int64(n)\n\n\t\tif g.cIdx >= g.rChunk.size-1 { \/\/ chunk complete\n\t\t\tg.sp.give <- g.rChunk.b\n\t\t\tg.chunkID++\n\t\t\tg.rChunk = nil\n\t\t}\n\t}\n\treturn nw, nil\n\n}\n\nfunc (g *getter) nextChunk() (*chunk, error) {\n\tfor {\n\n\t\t\/\/ first check qWait\n\t\tc := g.qWait[g.chunkID]\n\t\tif c != nil {\n\t\t\tdelete(g.qWait, g.chunkID)\n\t\t\tif g.c.Md5Check {\n\t\t\t\tif _, err := g.md5.Write(c.b[:c.size]); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn c, nil\n\t\t}\n\t\t\/\/ if next chunk not in qWait, read from channel\n\t\tselect {\n\t\tcase c := <-g.readCh:\n\t\t\tg.qWait[c.id] = c\n\t\tcase <-g.quit:\n\t\t\treturn nil, g.err \/\/ fatal error, quit.\n\t\t}\n\t}\n}\n\nfunc (g *getter) Close() error {\n\tif g.closed {\n\t\treturn syscall.EINVAL\n\t}\n\tif g.err != nil {\n\t\treturn g.err\n\t}\n\tg.wg.Wait()\n\tg.closed = true\n\tclose(g.sp.quit)\n\tif g.bytesRead != g.contentLen {\n\t\treturn fmt.Errorf(\"read error: %d bytes read. expected: %d\", g.bytesRead, g.contentLen)\n\t}\n\tif g.c.Md5Check {\n\t\tif err := g.checkMd5(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (g *getter) checkMd5() (err error) {\n\tcalcMd5 := fmt.Sprintf(\"%x\", g.md5.Sum(nil))\n\tmd5Path := fmt.Sprint(\".md5\", g.url.Path, \".md5\")\n\tmd5Url, err := g.b.url(md5Path, g.c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger.debugPrintln(\"md5: \", calcMd5)\n\tlogger.debugPrintln(\"md5Path: \", md5Path)\n\tresp, err := g.retryRequest(\"GET\", md5Url.String(), nil)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer checkClose(resp.Body, &err)\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"MD5 check failed: %s not found: %s\", md5Url.String(), newRespError(resp))\n\t}\n\tgivenMd5, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\tif calcMd5 != string(givenMd5) {\n\t\treturn fmt.Errorf(\"MD5 mismatch. given:%s calculated:%s\", givenMd5, calcMd5)\n\t}\n\treturn\n}\n<commit_msg>Fix OBOE when comparing cIdx and rChunk.size Remove the -1 occurring when checking if the current index (cIdx) was equal or greater than size of chunk (rChunk.size) being processed. Both values are 1-based and there should not be the need to offset the size by 1; cIdx starts at 0 initially, but is replaced by number of bytes read which is 1-based as it is a count and not an index into the range of bytes being read.<commit_after>package s3gof3r\n\nimport (\n\t\"crypto\/md5\"\n\t\"fmt\"\n\t\"hash\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n)\n\nconst (\n\tqWaitSz = 2\n)\n\ntype getter struct {\n\turl url.URL\n\tb *Bucket\n\tbufsz int64\n\terr error\n\twg sync.WaitGroup\n\n\tchunkID int\n\trChunk *chunk\n\tcontentLen int64\n\tbytesRead int64\n\tchunkTotal int\n\n\treadCh chan *chunk\n\tgetCh chan *chunk\n\tquit chan struct{}\n\tqWait map[int]*chunk\n\n\tsp *bp\n\n\tclosed bool\n\tc *Config\n\n\tmd5 hash.Hash\n\tcIdx int64\n}\n\ntype chunk struct {\n\tid int\n\theader http.Header\n\tstart int64\n\tsize int64\n\tb []byte\n}\n\nfunc newGetter(getURL url.URL, c *Config, b *Bucket) (io.ReadCloser, http.Header, error) {\n\tg := new(getter)\n\tg.url = getURL\n\tg.c = c\n\tg.bufsz = max64(c.PartSize, 1)\n\tg.c.NTry = max(c.NTry, 1)\n\tg.c.Concurrency = max(c.Concurrency, 1)\n\n\tg.getCh = make(chan *chunk)\n\tg.readCh = make(chan *chunk)\n\tg.quit = make(chan struct{})\n\tg.qWait = make(map[int]*chunk)\n\tg.b = b\n\tg.md5 = md5.New()\n\n\t\/\/ use get instead of head for error messaging\n\tresp, err := g.retryRequest(\"GET\", g.url.String(), nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer checkClose(resp.Body, &err)\n\tif resp.StatusCode != 200 {\n\t\treturn nil, nil, newRespError(resp)\n\t}\n\tg.contentLen = resp.ContentLength\n\tg.chunkTotal = int((g.contentLen + g.bufsz - 1) \/ g.bufsz) \/\/ round up, integer division\n\tlogger.debugPrintf(\"object size: %3.2g MB\", float64(g.contentLen)\/float64((1*mb)))\n\n\tg.sp = bufferPool(g.bufsz)\n\n\tfor i := 0; i < g.c.Concurrency; i++ {\n\t\tgo g.worker()\n\t}\n\tgo g.initChunks()\n\treturn g, resp.Header, nil\n}\n\nfunc (g *getter) retryRequest(method, urlStr string, body io.ReadSeeker) (resp *http.Response, err error) {\n\tfor i := 0; i < g.c.NTry; i++ {\n\t\tvar req *http.Request\n\t\treq, err = http.NewRequest(method, urlStr, body)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tg.b.Sign(req)\n\t\tresp, err = g.c.Client.Do(req)\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t\tlogger.debugPrintln(err)\n\t\tif body != nil {\n\t\t\tif _, err = body.Seek(0, 0); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc (g *getter) initChunks() {\n\tid := 0\n\tfor i := int64(0); i < g.contentLen; {\n\t\tfor len(g.qWait) >= qWaitSz {\n\t\t\t\/\/ Limit growth of qWait\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t}\n\t\tsize := min64(g.bufsz, g.contentLen-i)\n\t\tc := &chunk{\n\t\t\tid: id,\n\t\t\theader: http.Header{\n\t\t\t\t\"Range\": {fmt.Sprintf(\"bytes=%d-%d\",\n\t\t\t\t\ti, i+size-1)},\n\t\t\t},\n\t\t\tstart: i,\n\t\t\tsize: size,\n\t\t\tb: nil,\n\t\t}\n\t\ti += size\n\t\tid++\n\t\tg.wg.Add(1)\n\t\tg.getCh <- c\n\t}\n\tclose(g.getCh)\n}\n\nfunc (g *getter) worker() {\n\tfor c := range g.getCh {\n\t\tg.retryGetChunk(c)\n\t}\n\n}\n\nfunc (g *getter) retryGetChunk(c *chunk) {\n\tdefer g.wg.Done()\n\tvar err error\n\tc.b = <-g.sp.get\n\tfor i := 0; i < g.c.NTry; i++ {\n\t\ttime.Sleep(time.Duration(math.Exp2(float64(i))) * 100 * time.Millisecond) \/\/ exponential back-off\n\t\terr = g.getChunk(c)\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t\tlogger.debugPrintf(\"error on attempt %d: retrying chunk: %v, error: %s\", i, c.id, err)\n\t}\n\tg.err = err\n\tclose(g.quit) \/\/ out of tries, ensure quit by closing channel\n}\n\nfunc (g *getter) getChunk(c *chunk) error {\n\t\/\/ ensure buffer is empty\n\n\tr, err := http.NewRequest(\"GET\", g.url.String(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Header = c.header\n\tg.b.Sign(r)\n\tresp, err := g.c.Client.Do(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer checkClose(resp.Body, &err)\n\tif resp.StatusCode != 206 {\n\t\treturn newRespError(resp)\n\t}\n\tn, err := io.ReadAtLeast(resp.Body, c.b, int(c.size))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif int64(n) != c.size {\n\t\treturn fmt.Errorf(\"chunk %d: Expected %d bytes, received %d\",\n\t\t\tc.id, c.size, n)\n\t}\n\tg.readCh <- c\n\treturn nil\n}\n\nfunc (g *getter) Read(p []byte) (int, error) {\n\tvar err error\n\tif g.closed {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tif g.err != nil {\n\t\treturn 0, g.err\n\t}\n\tnw := 0\n\tfor nw < len(p) {\n\t\tif g.bytesRead == g.contentLen {\n\t\t\treturn nw, io.EOF\n\t\t}\n\t\tif g.rChunk == nil {\n\t\t\tg.rChunk, err = g.nextChunk()\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tg.cIdx = 0\n\t\t}\n\n\t\tn := copy(p[nw:], g.rChunk.b[g.cIdx:g.rChunk.size])\n\t\tg.cIdx += int64(n)\n\t\tnw += n\n\t\tg.bytesRead += int64(n)\n\n\t\tif g.cIdx >= g.rChunk.size { \/\/ chunk complete\n\t\t\tg.sp.give <- g.rChunk.b\n\t\t\tg.chunkID++\n\t\t\tg.rChunk = nil\n\t\t}\n\t}\n\treturn nw, nil\n\n}\n\nfunc (g *getter) nextChunk() (*chunk, error) {\n\tfor {\n\n\t\t\/\/ first check qWait\n\t\tc := g.qWait[g.chunkID]\n\t\tif c != nil {\n\t\t\tdelete(g.qWait, g.chunkID)\n\t\t\tif g.c.Md5Check {\n\t\t\t\tif _, err := g.md5.Write(c.b[:c.size]); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn c, nil\n\t\t}\n\t\t\/\/ if next chunk not in qWait, read from channel\n\t\tselect {\n\t\tcase c := <-g.readCh:\n\t\t\tg.qWait[c.id] = c\n\t\tcase <-g.quit:\n\t\t\treturn nil, g.err \/\/ fatal error, quit.\n\t\t}\n\t}\n}\n\nfunc (g *getter) Close() error {\n\tif g.closed {\n\t\treturn syscall.EINVAL\n\t}\n\tif g.err != nil {\n\t\treturn g.err\n\t}\n\tg.wg.Wait()\n\tg.closed = true\n\tclose(g.sp.quit)\n\tif g.bytesRead != g.contentLen {\n\t\treturn fmt.Errorf(\"read error: %d bytes read. expected: %d\", g.bytesRead, g.contentLen)\n\t}\n\tif g.c.Md5Check {\n\t\tif err := g.checkMd5(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (g *getter) checkMd5() (err error) {\n\tcalcMd5 := fmt.Sprintf(\"%x\", g.md5.Sum(nil))\n\tmd5Path := fmt.Sprint(\".md5\", g.url.Path, \".md5\")\n\tmd5Url, err := g.b.url(md5Path, g.c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger.debugPrintln(\"md5: \", calcMd5)\n\tlogger.debugPrintln(\"md5Path: \", md5Path)\n\tresp, err := g.retryRequest(\"GET\", md5Url.String(), nil)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer checkClose(resp.Body, &err)\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"MD5 check failed: %s not found: %s\", md5Url.String(), newRespError(resp))\n\t}\n\tgivenMd5, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\tif calcMd5 != string(givenMd5) {\n\t\treturn fmt.Errorf(\"MD5 mismatch. given:%s calculated:%s\", givenMd5, calcMd5)\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package QesyGo\n\nimport (\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"time\"\n)\n\ntype CacheRedis struct {\n\tPool *redis.Pool \/\/ redis connection pool\n\tConninfo string\n\tKey string\n}\n\nfunc (cr *CacheRedis) newPool() {\n\tcr.Pool = &redis.Pool{\n\t\tMaxIdle: 30,\n\t\tIdleTimeout: 240 * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tc, err := redis.Dial(\"tcp\", cr.Conninfo)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, err\n\t\t},\n\t}\n}\n\n\/\/var pool = newPool()\nfunc (cr *CacheRedis) Connect() error {\n\tcr.newPool()\n\tc := cr.Pool.Get()\n\tdefer c.Close()\n\treturn c.Err()\n}\n\nfunc (cr *CacheRedis) do(commandName string, args ...interface{}) (reply interface{}, err error) {\n\tc := cr.Pool.Get()\n\tdefer c.Close()\n\treturn c.Do(commandName, args...)\n}\n\nfunc (cr *CacheRedis) Get(key string) (string, error) {\n\tstr, err := redis.String(cr.do(\"GET\", key))\n\treturn str, err\n}\n\nfunc (cr *CacheRedis) Set(key string, value string) error {\n\t_, err := cr.do(\"SET\", key, value)\n\treturn err\n}\n\nfunc (cr *CacheRedis) Del(key string) error {\n\t_, err := cr.do(\"DEL\", key)\n\treturn err\n}\n\nfunc (cr *CacheRedis) Exists(key string) (bool, error) {\n\treturn redis.Bool(cr.do(\"EXISTS\", key))\n}\n\nfunc (cr *CacheRedis) Expire(key string, second int) (bool, error) {\n\treturn redis.Bool(cr.do(\"EXPIRE\", key, second))\n}\n\nfunc (cr *CacheRedis) Keys(key string) (interface{}, error) {\n\treturn cr.do(\"KEYS\", key)\n}\n\nfunc (cr *CacheRedis) Ttl(key string) (interface{}, error) {\n\treturn redis.Int(cr.do(\"TTL\", key))\n}\n\nfunc (cr *CacheRedis) HMset(key string, arr map[string]string) (interface{}, error) {\n\treturn cr.do(\"HMSET\", redis.Args{}.Add(key).AddFlat(arr)...)\n}\n\nfunc (cr *CacheRedis) HGet(key string, subKey string) (interface{}, error) {\n\treturn cr.do(\"HGET\", key, subKey)\n}\n\nfunc (cr *CacheRedis) HGetAll(key string) (map[string]string, error) {\n\trsByte, err := cr.do(\"HGETALL\", key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trs, err := redis.StringMap(rsByte, err)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn rs, err\n}\n<commit_msg>getKeys []byte to strings<commit_after>package QesyGo\n\nimport (\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"time\"\n)\n\ntype CacheRedis struct {\n\tPool *redis.Pool \/\/ redis connection pool\n\tConninfo string\n\tKey string\n}\n\nfunc (cr *CacheRedis) newPool() {\n\tcr.Pool = &redis.Pool{\n\t\tMaxIdle: 30,\n\t\tIdleTimeout: 240 * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tc, err := redis.Dial(\"tcp\", cr.Conninfo)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, err\n\t\t},\n\t}\n}\n\n\/\/var pool = newPool()\nfunc (cr *CacheRedis) Connect() error {\n\tcr.newPool()\n\tc := cr.Pool.Get()\n\tdefer c.Close()\n\treturn c.Err()\n}\n\nfunc (cr *CacheRedis) do(commandName string, args ...interface{}) (reply interface{}, err error) {\n\tc := cr.Pool.Get()\n\tdefer c.Close()\n\treturn c.Do(commandName, args...)\n}\n\nfunc (cr *CacheRedis) Get(key string) (string, error) {\n\tstr, err := redis.String(cr.do(\"GET\", key))\n\treturn str, err\n}\n\nfunc (cr *CacheRedis) Set(key string, value string) error {\n\t_, err := cr.do(\"SET\", key, value)\n\treturn err\n}\n\nfunc (cr *CacheRedis) Del(key string) error {\n\t_, err := cr.do(\"DEL\", key)\n\treturn err\n}\n\nfunc (cr *CacheRedis) Exists(key string) (bool, error) {\n\treturn redis.Bool(cr.do(\"EXISTS\", key))\n}\n\nfunc (cr *CacheRedis) Expire(key string, second int) (bool, error) {\n\treturn redis.Bool(cr.do(\"EXPIRE\", key, second))\n}\n\nfunc (cr *CacheRedis) Keys(key string) (interface{}, error) {\n\treturn redis.Strings(cr.do(\"KEYS\", key))\n}\n\nfunc (cr *CacheRedis) Ttl(key string) (interface{}, error) {\n\treturn redis.Int(cr.do(\"TTL\", key))\n}\n\nfunc (cr *CacheRedis) HMset(key string, arr map[string]string) (interface{}, error) {\n\treturn cr.do(\"HMSET\", redis.Args{}.Add(key).AddFlat(arr)...)\n}\n\nfunc (cr *CacheRedis) HGet(key string, subKey string) (interface{}, error) {\n\treturn cr.do(\"HGET\", key, subKey)\n}\n\nfunc (cr *CacheRedis) HGetAll(key string) (map[string]string, error) {\n\trsByte, err := cr.do(\"HGETALL\", key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trs, err := redis.StringMap(rsByte, err)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn rs, err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2014 Ludovic Fauvet\n\/\/ Licensed under the MIT license\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"time\"\n)\n\nconst (\n\tredisConnectionTimeout = 200 * time.Millisecond\n\tredisReadWriteTimeout = 300 * time.Second\n)\n\nvar (\n\terrUnreachable = errors.New(\"endpoint unreachable\")\n)\n\ntype redisobj struct {\n\tpool *redis.Pool\n}\n\nfunc NewRedis() *redisobj {\n\tr := &redisobj{}\n\n\tr.pool = &redis.Pool{\n\t\tMaxIdle: 10,\n\t\tIdleTimeout: 240 * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\treturn r.connect()\n\t\t},\n\t\tTestOnBorrow: func(c redis.Conn, t time.Time) error {\n\t\t\t_, err := c.Do(\"PING\")\n\t\t\treturn err\n\t\t},\n\t}\n\n\treturn r\n}\n\nfunc (r *redisobj) Close() {\n\tr.pool.Close()\n}\n\nfunc (r *redisobj) connect() (redis.Conn, error) {\n\tsentinels := GetConfig().RedisSentinels\n\n\tif len(sentinels) > 0 {\n\n\t\tif len(GetConfig().RedisSentinelMasterName) == 0 {\n\t\t\tlog.Error(\"Config: RedisSentinelMasterName cannot be empty!\")\n\t\t\tgoto single\n\t\t}\n\n\t\tfor _, s := range sentinels {\n\t\t\tlog.Debug(\"Connecting to redis sentinel %s\", s.Host)\n\t\t\tvar master []string\n\t\t\tvar masterhost string\n\t\t\tvar cm redis.Conn\n\n\t\t\tc, err := r.connectTo(s.Host)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Sentinel: %s\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/AUTH?\n\t\t\trole, err := r.askRole(c)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Sentinel: %s\", err.Error())\n\t\t\t\tgoto closeSentinel\n\t\t\t}\n\t\t\tif role != \"sentinel\" {\n\t\t\t\tlog.Error(\"Sentinel: %s is not a sentinel but a %s\", s.Host, role)\n\t\t\t\tgoto closeSentinel\n\t\t\t}\n\n\t\t\tmaster, err = redis.Strings(c.Do(\"SENTINEL\", \"get-master-addr-by-name\", GetConfig().RedisSentinelMasterName))\n\t\t\tif err == redis.ErrNil {\n\t\t\t\tlog.Error(\"Sentinel: %s doesn't know the master-name %s\", s.Host, GetConfig().RedisSentinelMasterName)\n\t\t\t\tgoto closeSentinel\n\t\t\t} else if err != nil {\n\t\t\t\tlog.Error(\"Sentinel: %s\", err.Error())\n\t\t\t\tgoto closeSentinel\n\t\t\t}\n\n\t\t\tmasterhost = fmt.Sprintf(\"%s:%s\", master[0], master[1])\n\n\t\t\tcm, err = r.connectTo(masterhost)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Redis master: %s\", err.Error())\n\t\t\t\tgoto closeSentinel\n\t\t\t}\n\n\t\t\tif r.auth(cm) != nil {\n\t\t\t\tlog.Error(\"Redis master: auth failed\")\n\t\t\t\tgoto closeMaster\n\t\t\t}\n\n\t\t\trole, err = r.askRole(cm)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Redis master: %s\", err.Error())\n\t\t\t\tgoto closeMaster\n\t\t\t}\n\t\t\tif role != \"master\" {\n\t\t\t\tlog.Error(\"Redis master: %s is not a master but a %s\", masterhost, role)\n\t\t\t\tgoto closeMaster\n\t\t\t}\n\n\t\t\t\/\/ Close the connection to the sentinel\n\t\t\tc.Close()\n\n\t\t\tlog.Debug(\"Connected to redis master %s\", masterhost)\n\t\t\treturn cm, nil\n\n\t\tcloseMaster:\n\t\t\tcm.Close()\n\n\t\tcloseSentinel:\n\t\t\tc.Close()\n\t\t}\n\t}\n\nsingle:\n\n\tif len(GetConfig().RedisAddress) == 0 {\n\t\tif len(sentinels) == 0 {\n\t\t\tlog.Error(\"No redis master available\")\n\t\t}\n\t\treturn nil, errUnreachable\n\t}\n\n\tlog.Warning(\"No redis master available, trying using the configured RedisAddress as fallback\")\n\n\tc, err := r.connectTo(GetConfig().RedisAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif r.auth(c); err != nil {\n\t\tc.Close()\n\t\treturn nil, err\n\t}\n\tlog.Debug(\"Connected to redis master %s\", GetConfig().RedisAddress)\n\treturn c, err\n\n}\n\nfunc (r *redisobj) connectTo(address string) (redis.Conn, error) {\n\treturn redis.DialTimeout(\"tcp\", address, redisConnectionTimeout, redisReadWriteTimeout, redisReadWriteTimeout)\n}\n\nfunc (r *redisobj) askRole(c redis.Conn) (string, error) {\n\troleReply, err := redis.Values(c.Do(\"ROLE\"))\n\trole, err := redis.String(roleReply[0], err)\n\treturn role, err\n}\n\nfunc (r *redisobj) auth(c redis.Conn) (err error) {\n\tif GetConfig().RedisPassword != \"\" {\n\t\t_, err = c.Do(\"AUTH\", GetConfig().RedisPassword)\n\t}\n\treturn\n}\n<commit_msg>redis: ensure the role of the node is of type master<commit_after>\/\/ Copyright (c) 2014 Ludovic Fauvet\n\/\/ Licensed under the MIT license\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"time\"\n)\n\nconst (\n\tredisConnectionTimeout = 200 * time.Millisecond\n\tredisReadWriteTimeout = 300 * time.Second\n)\n\nvar (\n\terrUnreachable = errors.New(\"endpoint unreachable\")\n)\n\ntype redisobj struct {\n\tpool *redis.Pool\n}\n\nfunc NewRedis() *redisobj {\n\tr := &redisobj{}\n\n\tr.pool = &redis.Pool{\n\t\tMaxIdle: 10,\n\t\tIdleTimeout: 240 * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\treturn r.connect()\n\t\t},\n\t\tTestOnBorrow: func(c redis.Conn, t time.Time) error {\n\t\t\t_, err := c.Do(\"PING\")\n\t\t\treturn err\n\t\t},\n\t}\n\n\treturn r\n}\n\nfunc (r *redisobj) Close() {\n\tr.pool.Close()\n}\n\nfunc (r *redisobj) connect() (redis.Conn, error) {\n\tsentinels := GetConfig().RedisSentinels\n\n\tif len(sentinels) > 0 {\n\n\t\tif len(GetConfig().RedisSentinelMasterName) == 0 {\n\t\t\tlog.Error(\"Config: RedisSentinelMasterName cannot be empty!\")\n\t\t\tgoto single\n\t\t}\n\n\t\tfor _, s := range sentinels {\n\t\t\tlog.Debug(\"Connecting to redis sentinel %s\", s.Host)\n\t\t\tvar master []string\n\t\t\tvar masterhost string\n\t\t\tvar cm redis.Conn\n\n\t\t\tc, err := r.connectTo(s.Host)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Sentinel: %s\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/AUTH?\n\t\t\trole, err := r.askRole(c)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Sentinel: %s\", err.Error())\n\t\t\t\tgoto closeSentinel\n\t\t\t}\n\t\t\tif role != \"sentinel\" {\n\t\t\t\tlog.Error(\"Sentinel: %s is not a sentinel but a %s\", s.Host, role)\n\t\t\t\tgoto closeSentinel\n\t\t\t}\n\n\t\t\tmaster, err = redis.Strings(c.Do(\"SENTINEL\", \"get-master-addr-by-name\", GetConfig().RedisSentinelMasterName))\n\t\t\tif err == redis.ErrNil {\n\t\t\t\tlog.Error(\"Sentinel: %s doesn't know the master-name %s\", s.Host, GetConfig().RedisSentinelMasterName)\n\t\t\t\tgoto closeSentinel\n\t\t\t} else if err != nil {\n\t\t\t\tlog.Error(\"Sentinel: %s\", err.Error())\n\t\t\t\tgoto closeSentinel\n\t\t\t}\n\n\t\t\tmasterhost = fmt.Sprintf(\"%s:%s\", master[0], master[1])\n\n\t\t\tcm, err = r.connectTo(masterhost)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Redis master: %s\", err.Error())\n\t\t\t\tgoto closeSentinel\n\t\t\t}\n\n\t\t\tif r.auth(cm) != nil {\n\t\t\t\tlog.Error(\"Redis master: auth failed\")\n\t\t\t\tgoto closeMaster\n\t\t\t}\n\n\t\t\trole, err = r.askRole(cm)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Redis master: %s\", err.Error())\n\t\t\t\tgoto closeMaster\n\t\t\t}\n\t\t\tif role != \"master\" {\n\t\t\t\tlog.Error(\"Redis master: %s is not a master but a %s\", masterhost, role)\n\t\t\t\tgoto closeMaster\n\t\t\t}\n\n\t\t\t\/\/ Close the connection to the sentinel\n\t\t\tc.Close()\n\n\t\t\tlog.Debug(\"Connected to redis master %s\", masterhost)\n\t\t\treturn cm, nil\n\n\t\tcloseMaster:\n\t\t\tcm.Close()\n\n\t\tcloseSentinel:\n\t\t\tc.Close()\n\t\t}\n\t}\n\nsingle:\n\n\tif len(GetConfig().RedisAddress) == 0 {\n\t\tif len(sentinels) == 0 {\n\t\t\tlog.Error(\"No redis master available\")\n\t\t}\n\t\treturn nil, errUnreachable\n\t}\n\n\tlog.Warning(\"No redis master available, trying using the configured RedisAddress as fallback\")\n\n\tc, err := r.connectTo(GetConfig().RedisAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif r.auth(c); err != nil {\n\t\tc.Close()\n\t\treturn nil, err\n\t}\n\trole, err := r.askRole(c)\n\tif err != nil {\n\t\tlog.Error(\"Redis master: %s\", err.Error())\n\t\treturn nil, errUnreachable\n\t}\n\tif role != \"master\" {\n\t\tlog.Error(\"Redis master: %s is not a master but a %s\", GetConfig().RedisAddress, role)\n\t\treturn nil, errUnreachable\n\t}\n\tlog.Debug(\"Connected to redis master %s\", GetConfig().RedisAddress)\n\treturn c, err\n\n}\n\nfunc (r *redisobj) connectTo(address string) (redis.Conn, error) {\n\treturn redis.DialTimeout(\"tcp\", address, redisConnectionTimeout, redisReadWriteTimeout, redisReadWriteTimeout)\n}\n\nfunc (r *redisobj) askRole(c redis.Conn) (string, error) {\n\troleReply, err := redis.Values(c.Do(\"ROLE\"))\n\trole, err := redis.String(roleReply[0], err)\n\treturn role, err\n}\n\nfunc (r *redisobj) auth(c redis.Conn) (err error) {\n\tif GetConfig().RedisPassword != \"\" {\n\t\t_, err = c.Do(\"AUTH\", GetConfig().RedisPassword)\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The nats-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage framework\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/nats-io\/nats-operator\/pkg\/client\"\n\tkubernetesutil \"github.com\/nats-io\/nats-operator\/pkg\/util\/kubernetes\"\n\t\"github.com\/nats-io\/nats-operator\/pkg\/util\/probe\"\n\t\"github.com\/nats-io\/nats-operator\/pkg\/util\/retryutil\"\n\t\"github.com\/nats-io\/nats-operator\/test\/e2e\/e2eutil\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/intstr\"\n\tcorev1 \"k8s.io\/client-go\/kubernetes\/typed\/core\/v1\"\n\t\"k8s.io\/client-go\/tools\/clientcmd\"\n)\n\nvar Global *Framework\n\ntype Framework struct {\n\topImage string\n\tKubeClient corev1.CoreV1Interface\n\tCRClient client.NatsClusterCR\n\tNamespace string\n}\n\n\/\/ Setup setups a test framework and points \"Global\" to it.\nfunc Setup() error {\n\tkubeconfig := flag.String(\"kubeconfig\", \"\", \"kube config path, e.g. $HOME\/.kube\/config\")\n\topImage := flag.String(\"operator-image\", \"\", \"operator image, e.g. nats-io\/nats-operator\")\n\tns := flag.String(\"namespace\", \"default\", \"e2e test namespace\")\n\tflag.Parse()\n\n\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", *kubeconfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcli, err := corev1.NewForConfig(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcrClient, err := client.NewCRClient(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tGlobal = &Framework{\n\t\tKubeClient: cli,\n\t\tCRClient: crClient,\n\t\tNamespace: *ns,\n\t\topImage: *opImage,\n\t}\n\treturn Global.setup()\n}\n\nfunc Teardown() error {\n\tif err := Global.deleteNatsOperator(); err != nil {\n\t\treturn err\n\t}\n\t\/\/ TODO: check all deleted and wait\n\tGlobal = nil\n\tlogrus.Info(\"e2e teardown successfully\")\n\treturn nil\n}\n\nfunc (f *Framework) setup() error {\n\tif err := f.SetupNatsOperator(); err != nil {\n\t\treturn fmt.Errorf(\"failed to setup NATS operator: %v\", err)\n\t}\n\tlogrus.Info(\"NATS operator created successfully\")\n\n\tlogrus.Info(\"e2e setup successfully\")\n\treturn nil\n}\n\nfunc (f *Framework) SetupNatsOperator() error {\n\t\/\/ TODO: unify this and the yaml file in example\/\n\tcmd := []string{\"\/usr\/local\/bin\/nats-operator\"}\n\n\tpod := &v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"nats-operator\",\n\t\t\tLabels: map[string]string{\"name\": \"nats-operator\"},\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tContainers: []v1.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"nats-operator\",\n\t\t\t\t\tImage: f.opImage,\n\t\t\t\t\tImagePullPolicy: v1.PullAlways,\n\t\t\t\t\tCommand: cmd,\n\t\t\t\t\tEnv: []v1.EnvVar{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"MY_POD_NAMESPACE\",\n\t\t\t\t\t\t\tValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: \"metadata.namespace\"}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"MY_POD_NAME\",\n\t\t\t\t\t\t\tValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: \"metadata.name\"}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tReadinessProbe: &v1.Probe{\n\t\t\t\t\t\tHandler: v1.Handler{\n\t\t\t\t\t\t\tHTTPGet: &v1.HTTPGetAction{\n\t\t\t\t\t\t\t\tPath: probe.HTTPReadyzEndpoint,\n\t\t\t\t\t\t\t\tPort: intstr.IntOrString{Type: intstr.Int, IntVal: 8080},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tInitialDelaySeconds: 3,\n\t\t\t\t\t\tPeriodSeconds: 3,\n\t\t\t\t\t\tFailureThreshold: 3,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRestartPolicy: v1.RestartPolicyNever,\n\t\t},\n\t}\n\n\tp, err := kubernetesutil.CreateAndWaitPod(f.KubeClient, f.Namespace, pod, 60*time.Second)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogrus.Infof(\"NATS operator pod is running on node (%s)\", p.Spec.NodeName)\n\n\treturn e2eutil.WaitUntilOperatorReady(f.KubeClient, f.Namespace, \"nats-operator\")\n}\n\nfunc (f *Framework) DeleteNatsOperatorCompletely() error {\n\terr := f.deleteNatsOperator()\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ On k8s 1.6.1, grace period isn't accurate. It took ~10s for operator pod to completely disappear.\n\t\/\/ We work around by increasing the wait time. Revisit this later.\n\terr = retryutil.Retry(5*time.Second, 6, func() (bool, error) {\n\t\t_, err := f.KubeClient.Pods(f.Namespace).Get(\"nats-operator\", metav1.GetOptions{})\n\t\tif err == nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tif kubernetesutil.IsKubernetesResourceNotFoundError(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, err\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"fail to wait NATS operator pod gone from API: %v\", err)\n\t}\n\treturn nil\n}\n\nfunc (f *Framework) deleteNatsOperator() error {\n\treturn f.KubeClient.Pods(f.Namespace).Delete(\"nats-operator\", metav1.NewDeleteOptions(1))\n}\n<commit_msg>e2e: use nats-operator service account<commit_after>\/\/ Copyright 2017 The nats-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage framework\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/intstr\"\n\tcorev1 \"k8s.io\/client-go\/kubernetes\/typed\/core\/v1\"\n\t\"k8s.io\/client-go\/tools\/clientcmd\"\n\n\t\"github.com\/nats-io\/nats-operator\/pkg\/client\"\n\tkubernetesutil \"github.com\/nats-io\/nats-operator\/pkg\/util\/kubernetes\"\n\t\"github.com\/nats-io\/nats-operator\/pkg\/util\/probe\"\n\t\"github.com\/nats-io\/nats-operator\/pkg\/util\/retryutil\"\n\t\"github.com\/nats-io\/nats-operator\/test\/e2e\/e2eutil\"\n)\n\nvar Global *Framework\n\ntype Framework struct {\n\topImage string\n\tKubeClient corev1.CoreV1Interface\n\tCRClient client.NatsClusterCR\n\tNamespace string\n}\n\n\/\/ Setup setups a test framework and points \"Global\" to it.\nfunc Setup() error {\n\tkubeconfig := flag.String(\"kubeconfig\", \"\", \"kube config path, e.g. $HOME\/.kube\/config\")\n\topImage := flag.String(\"operator-image\", \"\", \"operator image, e.g. nats-io\/nats-operator\")\n\tns := flag.String(\"namespace\", \"default\", \"e2e test namespace\")\n\tflag.Parse()\n\n\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", *kubeconfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcli, err := corev1.NewForConfig(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcrClient, err := client.NewCRClient(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tGlobal = &Framework{\n\t\tKubeClient: cli,\n\t\tCRClient: crClient,\n\t\tNamespace: *ns,\n\t\topImage: *opImage,\n\t}\n\treturn Global.setup()\n}\n\nfunc Teardown() error {\n\tif err := Global.deleteNatsOperator(); err != nil {\n\t\treturn err\n\t}\n\t\/\/ TODO: check all deleted and wait\n\tGlobal = nil\n\tlogrus.Info(\"e2e teardown successfully\")\n\treturn nil\n}\n\nfunc (f *Framework) setup() error {\n\tif err := f.SetupNatsOperator(); err != nil {\n\t\treturn fmt.Errorf(\"failed to setup NATS operator: %v\", err)\n\t}\n\tlogrus.Info(\"NATS operator created successfully\")\n\n\tlogrus.Info(\"e2e setup successfully\")\n\treturn nil\n}\n\nfunc (f *Framework) SetupNatsOperator() error {\n\t\/\/ TODO: unify this and the yaml file in example\/\n\tcmd := []string{\"\/usr\/local\/bin\/nats-operator\"}\n\n\tpod := &v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"nats-operator\",\n\t\t\tLabels: map[string]string{\"name\": \"nats-operator\"},\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tContainers: []v1.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"nats-operator\",\n\t\t\t\t\tImage: f.opImage,\n\t\t\t\t\tImagePullPolicy: v1.PullAlways,\n\t\t\t\t\tCommand: cmd,\n\t\t\t\t\tEnv: []v1.EnvVar{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"MY_POD_NAMESPACE\",\n\t\t\t\t\t\t\tValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: \"metadata.namespace\"}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"MY_POD_NAME\",\n\t\t\t\t\t\t\tValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: \"metadata.name\"}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tReadinessProbe: &v1.Probe{\n\t\t\t\t\t\tHandler: v1.Handler{\n\t\t\t\t\t\t\tHTTPGet: &v1.HTTPGetAction{\n\t\t\t\t\t\t\t\tPath: probe.HTTPReadyzEndpoint,\n\t\t\t\t\t\t\t\tPort: intstr.IntOrString{Type: intstr.Int, IntVal: 8080},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tInitialDelaySeconds: 3,\n\t\t\t\t\t\tPeriodSeconds: 3,\n\t\t\t\t\t\tFailureThreshold: 3,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRestartPolicy: v1.RestartPolicyNever,\n\t\t\tServiceAccountName: \"nats-operator\",\n\t\t},\n\t}\n\n\tp, err := kubernetesutil.CreateAndWaitPod(f.KubeClient, f.Namespace, pod, 60*time.Second)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogrus.Infof(\"NATS operator pod is running on node (%s)\", p.Spec.NodeName)\n\n\treturn e2eutil.WaitUntilOperatorReady(f.KubeClient, f.Namespace, \"nats-operator\")\n}\n\nfunc (f *Framework) DeleteNatsOperatorCompletely() error {\n\terr := f.deleteNatsOperator()\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ On k8s 1.6.1, grace period isn't accurate. It took ~10s for operator pod to completely disappear.\n\t\/\/ We work around by increasing the wait time. Revisit this later.\n\terr = retryutil.Retry(5*time.Second, 6, func() (bool, error) {\n\t\t_, err := f.KubeClient.Pods(f.Namespace).Get(\"nats-operator\", metav1.GetOptions{})\n\t\tif err == nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tif kubernetesutil.IsKubernetesResourceNotFoundError(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, err\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"fail to wait NATS operator pod gone from API: %v\", err)\n\t}\n\treturn nil\n}\n\nfunc (f *Framework) deleteNatsOperator() error {\n\treturn f.KubeClient.Pods(f.Namespace).Delete(\"nats-operator\", metav1.NewDeleteOptions(1))\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e\n\nimport (\n\t\"time\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/resource\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/v1\"\n\tstorage \"k8s.io\/kubernetes\/pkg\/apis\/storage\/v1beta1\"\n\tstorageutil \"k8s.io\/kubernetes\/pkg\/apis\/storage\/v1beta1\/util\"\n\tclientset \"k8s.io\/kubernetes\/pkg\/client\/clientset_generated\/clientset\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nconst (\n\t\/\/ Requested size of the volume\n\trequestedSize = \"1500Mi\"\n\t\/\/ Plugin name of the external provisioner\n\texternalPluginName = \"example.com\/nfs\"\n)\n\nfunc testDynamicProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, expectedSize string) {\n\terr := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)\n\tExpect(err).NotTo(HaveOccurred())\n\n\tBy(\"checking the claim\")\n\t\/\/ Get new copy of the claim\n\tclaim, err = client.Core().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})\n\tExpect(err).NotTo(HaveOccurred())\n\n\t\/\/ Get the bound PV\n\tpv, err := client.Core().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})\n\tExpect(err).NotTo(HaveOccurred())\n\n\t\/\/ Check sizes\n\texpectedCapacity := resource.MustParse(expectedSize)\n\tpvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]\n\tExpect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))\n\n\trequestedCapacity := resource.MustParse(requestedSize)\n\tclaimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]\n\tExpect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))\n\n\t\/\/ Check PV properties\n\tExpect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(v1.PersistentVolumeReclaimDelete))\n\texpectedAccessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}\n\tExpect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))\n\tExpect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))\n\tExpect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))\n\n\t\/\/ We start two pods:\n\t\/\/ - The first writes 'hello word' to the \/mnt\/test (= the volume).\n\t\/\/ - The second one runs grep 'hello world' on \/mnt\/test.\n\t\/\/ If both succeed, Kubernetes actually allocated something that is\n\t\/\/ persistent across pods.\n\tBy(\"checking the created volume is writable\")\n\trunInPodWithVolume(client, claim.Namespace, claim.Name, \"echo 'hello world' > \/mnt\/test\/data\")\n\n\tBy(\"checking the created volume is readable and retains data\")\n\trunInPodWithVolume(client, claim.Namespace, claim.Name, \"grep 'hello world' \/mnt\/test\/data\")\n\n\t\/\/ Ugly hack: if we delete the AWS\/GCE\/OpenStack volume here, it will\n\t\/\/ probably collide with destruction of the pods above - the pods\n\t\/\/ still have the volume attached (kubelet is slow...) and deletion\n\t\/\/ of attached volume is not allowed by AWS\/GCE\/OpenStack.\n\t\/\/ Kubernetes *will* retry deletion several times in\n\t\/\/ pvclaimbinder-sync-period.\n\t\/\/ So, technically, this sleep is not needed. On the other hand,\n\t\/\/ the sync perion is 10 minutes and we really don't want to wait\n\t\/\/ 10 minutes here. There is no way how to see if kubelet is\n\t\/\/ finished with cleaning volumes. A small sleep here actually\n\t\/\/ speeds up the test!\n\t\/\/ Three minutes should be enough to clean up the pods properly.\n\t\/\/ We've seen GCE PD detach to take more than 1 minute.\n\tBy(\"Sleeping to let kubelet destroy all pods\")\n\ttime.Sleep(3 * time.Minute)\n\n\tBy(\"deleting the claim\")\n\tframework.ExpectNoError(client.Core().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))\n\n\t\/\/ Wait for the PV to get deleted too.\n\tframework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))\n}\n\nvar _ = framework.KubeDescribe(\"Dynamic provisioning\", func() {\n\tf := framework.NewDefaultFramework(\"volume-provisioning\")\n\n\t\/\/ filled in BeforeEach\n\tvar c clientset.Interface\n\tvar ns string\n\n\tBeforeEach(func() {\n\t\tc = f.ClientSet\n\t\tns = f.Namespace.Name\n\t})\n\n\tframework.KubeDescribe(\"DynamicProvisioner\", func() {\n\t\tIt(\"should create and delete persistent volumes [Slow] [Volume]\", func() {\n\t\t\tframework.SkipUnlessProviderIs(\"openstack\", \"gce\", \"aws\", \"gke\")\n\n\t\t\tBy(\"creating a StorageClass\")\n\t\t\tclass := newStorageClass(\"\")\n\t\t\t_, err := c.Storage().StorageClasses().Create(class)\n\t\t\tdefer c.Storage().StorageClasses().Delete(class.Name, nil)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tBy(\"creating a claim with a dynamic provisioning annotation\")\n\t\t\tclaim := newClaim(ns, false)\n\t\t\tdefer func() {\n\t\t\t\tc.Core().PersistentVolumeClaims(ns).Delete(claim.Name, nil)\n\t\t\t}()\n\t\t\tclaim, err = c.Core().PersistentVolumeClaims(ns).Create(claim)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\/\/ Expected size of the volume is 2GiB, because all three supported cloud\n\t\t\t\/\/ providers allocate volumes in 1GiB chunks.\n\t\t\ttestDynamicProvisioning(c, claim, \"2Gi\")\n\t\t})\n\t})\n\n\tframework.KubeDescribe(\"DynamicProvisioner Alpha\", func() {\n\t\tIt(\"should create and delete alpha persistent volumes [Slow] [Volume]\", func() {\n\t\t\tframework.SkipUnlessProviderIs(\"openstack\", \"gce\", \"aws\", \"gke\")\n\n\t\t\tBy(\"creating a claim with an alpha dynamic provisioning annotation\")\n\t\t\tclaim := newClaim(ns, true)\n\t\t\tdefer func() {\n\t\t\t\tc.Core().PersistentVolumeClaims(ns).Delete(claim.Name, nil)\n\t\t\t}()\n\t\t\tclaim, err := c.Core().PersistentVolumeClaims(ns).Create(claim)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\ttestDynamicProvisioning(c, claim, \"2Gi\")\n\t\t})\n\t})\n\n\tframework.KubeDescribe(\"DynamicProvisioner External\", func() {\n\t\tIt(\"should let an external dynamic provisioner create and delete persistent volumes [Slow]\", func() {\n\t\t\tBy(\"creating an external dynamic provisioner pod\")\n\t\t\tpod := startExternalProvisioner(c, ns)\n\t\t\tdefer c.Core().Pods(ns).Delete(pod.Name, nil)\n\n\t\t\tBy(\"creating a StorageClass\")\n\t\t\tclass := newStorageClass(externalPluginName)\n\t\t\t_, err := c.Storage().StorageClasses().Create(class)\n\t\t\tdefer c.Storage().StorageClasses().Delete(class.Name, nil)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tBy(\"creating a claim with a dynamic provisioning annotation\")\n\t\t\tclaim := newClaim(ns, false)\n\t\t\tdefer func() {\n\t\t\t\tc.Core().PersistentVolumeClaims(ns).Delete(claim.Name, nil)\n\t\t\t}()\n\t\t\tclaim, err = c.Core().PersistentVolumeClaims(ns).Create(claim)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\/\/ Expected size of the externally provisioned volume depends on the external\n\t\t\t\/\/ provisioner: for nfs-provisioner used here, it's equal to requested\n\t\t\ttestDynamicProvisioning(c, claim, requestedSize)\n\t\t})\n\t})\n})\n\nfunc newClaim(ns string, alpha bool) *v1.PersistentVolumeClaim {\n\tclaim := v1.PersistentVolumeClaim{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tGenerateName: \"pvc-\",\n\t\t\tNamespace: ns,\n\t\t},\n\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\tAccessModes: []v1.PersistentVolumeAccessMode{\n\t\t\t\tv1.ReadWriteOnce,\n\t\t\t},\n\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\tRequests: v1.ResourceList{\n\t\t\t\t\tv1.ResourceName(v1.ResourceStorage): resource.MustParse(requestedSize),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif alpha {\n\t\tclaim.Annotations = map[string]string{\n\t\t\tstorageutil.AlphaStorageClassAnnotation: \"\",\n\t\t}\n\t} else {\n\t\tclaim.Annotations = map[string]string{\n\t\t\tstorageutil.StorageClassAnnotation: \"fast\",\n\t\t}\n\n\t}\n\n\treturn &claim\n}\n\n\/\/ runInPodWithVolume runs a command in a pod with given claim mounted to \/mnt directory.\nfunc runInPodWithVolume(c clientset.Interface, ns, claimName, command string) {\n\tpod := &v1.Pod{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Pod\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tGenerateName: \"pvc-volume-tester-\",\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tContainers: []v1.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"volume-tester\",\n\t\t\t\t\tImage: \"gcr.io\/google_containers\/busybox:1.24\",\n\t\t\t\t\tCommand: []string{\"\/bin\/sh\"},\n\t\t\t\t\tArgs: []string{\"-c\", command},\n\t\t\t\t\tVolumeMounts: []v1.VolumeMount{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"my-volume\",\n\t\t\t\t\t\t\tMountPath: \"\/mnt\/test\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRestartPolicy: v1.RestartPolicyNever,\n\t\t\tVolumes: []v1.Volume{\n\t\t\t\t{\n\t\t\t\t\tName: \"my-volume\",\n\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\tPersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\tClaimName: claimName,\n\t\t\t\t\t\t\tReadOnly: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tpod, err := c.Core().Pods(ns).Create(pod)\n\tdefer func() {\n\t\tframework.ExpectNoError(c.Core().Pods(ns).Delete(pod.Name, nil))\n\t}()\n\tframework.ExpectNoError(err, \"Failed to create pod: %v\", err)\n\tframework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))\n}\n\nfunc newStorageClass(pluginName string) *storage.StorageClass {\n\tif pluginName == \"\" {\n\t\tswitch {\n\t\tcase framework.ProviderIs(\"gke\"), framework.ProviderIs(\"gce\"):\n\t\t\tpluginName = \"kubernetes.io\/gce-pd\"\n\t\tcase framework.ProviderIs(\"aws\"):\n\t\t\tpluginName = \"kubernetes.io\/aws-ebs\"\n\t\tcase framework.ProviderIs(\"openstack\"):\n\t\t\tpluginName = \"kubernetes.io\/cinder\"\n\t\t}\n\t}\n\n\treturn &storage.StorageClass{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"StorageClass\",\n\t\t},\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: \"fast\",\n\t\t},\n\t\tProvisioner: pluginName,\n\t}\n}\n\nfunc startExternalProvisioner(c clientset.Interface, ns string) *v1.Pod {\n\tpodClient := c.Core().Pods(ns)\n\n\tprovisionerPod := &v1.Pod{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Pod\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tGenerateName: \"external-provisioner-\",\n\t\t},\n\n\t\tSpec: v1.PodSpec{\n\t\t\tContainers: []v1.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"nfs-provisioner\",\n\t\t\t\t\tImage: \"quay.io\/kubernetes_incubator\/nfs-provisioner:v1.0.1\",\n\t\t\t\t\tSecurityContext: &v1.SecurityContext{\n\t\t\t\t\t\tCapabilities: &v1.Capabilities{\n\t\t\t\t\t\t\tAdd: []v1.Capability{\"DAC_READ_SEARCH\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tArgs: []string{\n\t\t\t\t\t\t\"-provisioner=\" + externalPluginName,\n\t\t\t\t\t\t\"-grace-period=0\",\n\t\t\t\t\t},\n\t\t\t\t\tPorts: []v1.ContainerPort{\n\t\t\t\t\t\t{Name: \"nfs\", ContainerPort: 2049},\n\t\t\t\t\t\t{Name: \"mountd\", ContainerPort: 20048},\n\t\t\t\t\t\t{Name: \"rpcbind\", ContainerPort: 111},\n\t\t\t\t\t\t{Name: \"rpcbind-udp\", ContainerPort: 111, Protocol: v1.ProtocolUDP},\n\t\t\t\t\t},\n\t\t\t\t\tEnv: []v1.EnvVar{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"POD_IP\",\n\t\t\t\t\t\t\tValueFrom: &v1.EnvVarSource{\n\t\t\t\t\t\t\t\tFieldRef: &v1.ObjectFieldSelector{\n\t\t\t\t\t\t\t\t\tFieldPath: \"status.podIP\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tImagePullPolicy: v1.PullIfNotPresent,\n\t\t\t\t\tVolumeMounts: []v1.VolumeMount{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"export-volume\",\n\t\t\t\t\t\t\tMountPath: \"\/export\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVolumes: []v1.Volume{\n\t\t\t\t{\n\t\t\t\t\tName: \"export-volume\",\n\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\tEmptyDir: &v1.EmptyDirVolumeSource{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tprovisionerPod, err := podClient.Create(provisionerPod)\n\tframework.ExpectNoError(err, \"Failed to create %s pod: %v\", provisionerPod.Name, err)\n\n\tframework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod))\n\n\tBy(\"locating the provisioner pod\")\n\tpod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{})\n\tframework.ExpectNoError(err, \"Cannot locate the provisioner pod %v: %v\", provisionerPod.Name, err)\n\n\treturn pod\n}\n<commit_msg>Revert \"Add e2e test for external pv provisioning\"<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e\n\nimport (\n\t\"time\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/resource\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/v1\"\n\tstorage \"k8s.io\/kubernetes\/pkg\/apis\/storage\/v1beta1\"\n\tstorageutil \"k8s.io\/kubernetes\/pkg\/apis\/storage\/v1beta1\/util\"\n\tclientset \"k8s.io\/kubernetes\/pkg\/client\/clientset_generated\/clientset\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nconst (\n\t\/\/ Requested size of the volume\n\trequestedSize = \"1500Mi\"\n\t\/\/ Expected size of the volume is 2GiB, because all three supported cloud\n\t\/\/ providers allocate volumes in 1GiB chunks.\n\texpectedSize = \"2Gi\"\n)\n\nfunc testDynamicProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim) {\n\terr := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)\n\tExpect(err).NotTo(HaveOccurred())\n\n\tBy(\"checking the claim\")\n\t\/\/ Get new copy of the claim\n\tclaim, err = client.Core().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})\n\tExpect(err).NotTo(HaveOccurred())\n\n\t\/\/ Get the bound PV\n\tpv, err := client.Core().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})\n\tExpect(err).NotTo(HaveOccurred())\n\n\t\/\/ Check sizes\n\texpectedCapacity := resource.MustParse(expectedSize)\n\tpvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]\n\tExpect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))\n\n\trequestedCapacity := resource.MustParse(requestedSize)\n\tclaimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]\n\tExpect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))\n\n\t\/\/ Check PV properties\n\tExpect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(v1.PersistentVolumeReclaimDelete))\n\texpectedAccessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}\n\tExpect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))\n\tExpect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))\n\tExpect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))\n\n\t\/\/ We start two pods:\n\t\/\/ - The first writes 'hello word' to the \/mnt\/test (= the volume).\n\t\/\/ - The second one runs grep 'hello world' on \/mnt\/test.\n\t\/\/ If both succeed, Kubernetes actually allocated something that is\n\t\/\/ persistent across pods.\n\tBy(\"checking the created volume is writable\")\n\trunInPodWithVolume(client, claim.Namespace, claim.Name, \"echo 'hello world' > \/mnt\/test\/data\")\n\n\tBy(\"checking the created volume is readable and retains data\")\n\trunInPodWithVolume(client, claim.Namespace, claim.Name, \"grep 'hello world' \/mnt\/test\/data\")\n\n\t\/\/ Ugly hack: if we delete the AWS\/GCE\/OpenStack volume here, it will\n\t\/\/ probably collide with destruction of the pods above - the pods\n\t\/\/ still have the volume attached (kubelet is slow...) and deletion\n\t\/\/ of attached volume is not allowed by AWS\/GCE\/OpenStack.\n\t\/\/ Kubernetes *will* retry deletion several times in\n\t\/\/ pvclaimbinder-sync-period.\n\t\/\/ So, technically, this sleep is not needed. On the other hand,\n\t\/\/ the sync perion is 10 minutes and we really don't want to wait\n\t\/\/ 10 minutes here. There is no way how to see if kubelet is\n\t\/\/ finished with cleaning volumes. A small sleep here actually\n\t\/\/ speeds up the test!\n\t\/\/ Three minutes should be enough to clean up the pods properly.\n\t\/\/ We've seen GCE PD detach to take more than 1 minute.\n\tBy(\"Sleeping to let kubelet destroy all pods\")\n\ttime.Sleep(3 * time.Minute)\n\n\tBy(\"deleting the claim\")\n\tframework.ExpectNoError(client.Core().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))\n\n\t\/\/ Wait for the PV to get deleted too.\n\tframework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))\n}\n\nvar _ = framework.KubeDescribe(\"Dynamic provisioning\", func() {\n\tf := framework.NewDefaultFramework(\"volume-provisioning\")\n\n\t\/\/ filled in BeforeEach\n\tvar c clientset.Interface\n\tvar ns string\n\n\tBeforeEach(func() {\n\t\tc = f.ClientSet\n\t\tns = f.Namespace.Name\n\t})\n\n\tframework.KubeDescribe(\"DynamicProvisioner\", func() {\n\t\tIt(\"should create and delete persistent volumes [Slow] [Volume]\", func() {\n\t\t\tframework.SkipUnlessProviderIs(\"openstack\", \"gce\", \"aws\", \"gke\")\n\n\t\t\tBy(\"creating a StorageClass\")\n\t\t\tclass := newStorageClass()\n\t\t\t_, err := c.Storage().StorageClasses().Create(class)\n\t\t\tdefer c.Storage().StorageClasses().Delete(class.Name, nil)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tBy(\"creating a claim with a dynamic provisioning annotation\")\n\t\t\tclaim := newClaim(ns, false)\n\t\t\tdefer func() {\n\t\t\t\tc.Core().PersistentVolumeClaims(ns).Delete(claim.Name, nil)\n\t\t\t}()\n\t\t\tclaim, err = c.Core().PersistentVolumeClaims(ns).Create(claim)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\ttestDynamicProvisioning(c, claim)\n\t\t})\n\t})\n\n\tframework.KubeDescribe(\"DynamicProvisioner Alpha\", func() {\n\t\tIt(\"should create and delete alpha persistent volumes [Slow] [Volume]\", func() {\n\t\t\tframework.SkipUnlessProviderIs(\"openstack\", \"gce\", \"aws\", \"gke\")\n\n\t\t\tBy(\"creating a claim with an alpha dynamic provisioning annotation\")\n\t\t\tclaim := newClaim(ns, true)\n\t\t\tdefer func() {\n\t\t\t\tc.Core().PersistentVolumeClaims(ns).Delete(claim.Name, nil)\n\t\t\t}()\n\t\t\tclaim, err := c.Core().PersistentVolumeClaims(ns).Create(claim)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\ttestDynamicProvisioning(c, claim)\n\t\t})\n\t})\n})\n\nfunc newClaim(ns string, alpha bool) *v1.PersistentVolumeClaim {\n\tclaim := v1.PersistentVolumeClaim{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tGenerateName: \"pvc-\",\n\t\t\tNamespace: ns,\n\t\t},\n\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\tAccessModes: []v1.PersistentVolumeAccessMode{\n\t\t\t\tv1.ReadWriteOnce,\n\t\t\t},\n\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\tRequests: v1.ResourceList{\n\t\t\t\t\tv1.ResourceName(v1.ResourceStorage): resource.MustParse(requestedSize),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif alpha {\n\t\tclaim.Annotations = map[string]string{\n\t\t\tstorageutil.AlphaStorageClassAnnotation: \"\",\n\t\t}\n\t} else {\n\t\tclaim.Annotations = map[string]string{\n\t\t\tstorageutil.StorageClassAnnotation: \"fast\",\n\t\t}\n\n\t}\n\n\treturn &claim\n}\n\n\/\/ runInPodWithVolume runs a command in a pod with given claim mounted to \/mnt directory.\nfunc runInPodWithVolume(c clientset.Interface, ns, claimName, command string) {\n\tpod := &v1.Pod{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Pod\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tGenerateName: \"pvc-volume-tester-\",\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tContainers: []v1.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"volume-tester\",\n\t\t\t\t\tImage: \"gcr.io\/google_containers\/busybox:1.24\",\n\t\t\t\t\tCommand: []string{\"\/bin\/sh\"},\n\t\t\t\t\tArgs: []string{\"-c\", command},\n\t\t\t\t\tVolumeMounts: []v1.VolumeMount{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"my-volume\",\n\t\t\t\t\t\t\tMountPath: \"\/mnt\/test\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRestartPolicy: v1.RestartPolicyNever,\n\t\t\tVolumes: []v1.Volume{\n\t\t\t\t{\n\t\t\t\t\tName: \"my-volume\",\n\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\tPersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\tClaimName: claimName,\n\t\t\t\t\t\t\tReadOnly: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tpod, err := c.Core().Pods(ns).Create(pod)\n\tdefer func() {\n\t\tframework.ExpectNoError(c.Core().Pods(ns).Delete(pod.Name, nil))\n\t}()\n\tframework.ExpectNoError(err, \"Failed to create pod: %v\", err)\n\tframework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))\n}\n\nfunc newStorageClass() *storage.StorageClass {\n\tvar pluginName string\n\n\tswitch {\n\tcase framework.ProviderIs(\"gke\"), framework.ProviderIs(\"gce\"):\n\t\tpluginName = \"kubernetes.io\/gce-pd\"\n\tcase framework.ProviderIs(\"aws\"):\n\t\tpluginName = \"kubernetes.io\/aws-ebs\"\n\tcase framework.ProviderIs(\"openstack\"):\n\t\tpluginName = \"kubernetes.io\/cinder\"\n\t}\n\n\treturn &storage.StorageClass{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"StorageClass\",\n\t\t},\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: \"fast\",\n\t\t},\n\t\tProvisioner: pluginName,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/glendc\/cgreader\"\n)\n\nvar mountains [8]int\n\ntype ship struct {\n\tx, y, target int\n}\n\nfunc (s *ship) GetTarget() {\n\ty := 0\n\ts.target = -1\n\tfor i, mountain := range mountains {\n\t\tif mountain >= y {\n\t\t\ty = mountain\n\t\t\ts.target = i\n\t\t}\n\t}\n}\n\nfunc (s *ship) FireOrHold() string {\n\tif s.target == -1 {\n\t\ts.GetTarget()\n\t}\n\n\tif s.x == s.target {\n\t\ts.target = -1\n\t\treturn \"FIRE\"\n\t}\n\n\treturn \"HOLD\"\n}\n\nvar hero ship\n\nfunc Initialize(input <-chan string) {\n\thero = ship{0, 0, -1}\n}\n\nfunc Update(input <-chan string, output chan string) {\n\tfmt.Sscanf(<-input, \"%d %d\", &hero.x, &hero.y)\n\tfor i := range mountains {\n\t\tfmt.Sscanf(<-input, \"%d\", &mountains[i])\n\t}\n\n\toutput <- hero.FireOrHold()\n}\n\nfunc main() {\n\tcgreader.SetFrameRate(5)\n\tcgreader.RunKirkProgram(\"..\/..\/input\/kirk_6.txt\", true, Initialize, Update)\n\t\/*cgreader.RunKirkPrograms(\n\tcgreader.GetFileList(\"..\/..\/input\/kirk_%d.txt\", 6),\n\tfalse,\n\tInitialize,\n\tUpdate)*\/\n}\n<commit_msg>kirk solution works<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/glendc\/cgreader\"\n)\n\nvar mountains [8]int\n\ntype ship struct {\n\tx, y, target int\n}\n\nfunc (s *ship) GetTarget() {\n\ty := 0\n\ts.target = -1\n\tfor i, mountain := range mountains {\n\t\tif mountain >= y {\n\t\t\ty = mountain\n\t\t\ts.target = i\n\t\t}\n\t}\n}\n\nfunc (s *ship) FireOrHold() string {\n\tif s.target == -1 {\n\t\ts.GetTarget()\n\t}\n\n\tif s.x == s.target {\n\t\ts.target = -1\n\t\treturn \"FIRE\"\n\t}\n\n\treturn \"HOLD\"\n}\n\nvar hero ship\n\nfunc Initialize(input <-chan string) {\n\thero = ship{0, 0, -1}\n}\n\nfunc Update(input <-chan string, output chan string) {\n\tfmt.Sscanf(<-input, \"%d %d\", &hero.x, &hero.y)\n\tfor i := range mountains {\n\t\tfmt.Sscanf(<-input, \"%d\", &mountains[i])\n\t}\n\n\toutput <- hero.FireOrHold()\n}\n\nfunc main() {\n\t\/\/cgreader.SetFrameRate(10)\n\t\/\/cgreader.RunKirkProgram(\"..\/..\/input\/kirk_6.txt\", true, Initialize, Update)\n\tcgreader.RunKirkPrograms(\n\t\tcgreader.GetFileList(\"..\/..\/input\/kirk_%d.txt\", 6),\n\t\tfalse,\n\t\tInitialize,\n\t\tUpdate)\n}\n<|endoftext|>"} {"text":"<commit_before>package pulls\n\nimport (\n\tgh \"github.com\/crosbymichael\/octokat\"\n\t\"strconv\"\n)\n\n\/\/ Top level type that manages a repository\ntype Maintainer struct {\n\trepo gh.Repo\n\tclient *gh.Client\n}\n\nfunc NewMaintainer(client *gh.Client, org, repo string) (*Maintainer, error) {\n\treturn &Maintainer{\n\t\trepo: gh.Repo{Name: repo, UserName: org},\n\t\tclient: gh.NewClient(),\n\t}, nil\n}\n\nfunc (m *Maintainer) Repository() (*gh.Repository, error) {\n\treturn m.client.Repository(m.repo, nil)\n}\n\n\/\/ Return all pull requests\nfunc (m *Maintainer) GetPullRequests(state string) ([]*gh.PullRequest, error) {\n\to := &gh.Options{}\n\to.QueryParams = map[string]string{\n\t\t\"state\": state,\n\t}\n\treturn m.client.PullRequests(m.repo, o)\n}\n\n\/\/ Return a single pull request\nfunc (m *Maintainer) GetPullRequest(number string) (*gh.PullRequest, error) {\n\treturn m.client.PullRequest(m.repo, number, nil)\n}\n\n\/\/ Return all comments for an issue or pull request\nfunc (m *Maintainer) GetComments(pr *gh.PullRequest) ([]gh.Comment, error) {\n\tnumber := strconv.Itoa(pr.Number)\n\treturn m.client.Comments(m.repo, number, nil)\n}\n\nfunc (m *Maintainer) GetNoMergePullRequests() ([]*gh.PullRequest, error) {\n\tprs, err := m.GetPullRequests(\"open\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout := []*gh.PullRequest{}\n\tfor _, pr := range prs {\n\t\tfullPr, err := m.GetPullRequest(strconv.Itoa(pr.Number))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !fullPr.Mergeable {\n\t\t\tout = append(out, fullPr)\n\t\t}\n\t}\n\treturn out, nil\n}\n<commit_msg>Use client that is passed to NewMaintainer<commit_after>package pulls\n\nimport (\n\tgh \"github.com\/crosbymichael\/octokat\"\n\t\"strconv\"\n)\n\n\/\/ Top level type that manages a repository\ntype Maintainer struct {\n\trepo gh.Repo\n\tclient *gh.Client\n}\n\nfunc NewMaintainer(client *gh.Client, org, repo string) (*Maintainer, error) {\n\treturn &Maintainer{\n\t\trepo: gh.Repo{Name: repo, UserName: org},\n\t\tclient: client,\n\t}, nil\n}\n\nfunc (m *Maintainer) Repository() (*gh.Repository, error) {\n\treturn m.client.Repository(m.repo, nil)\n}\n\n\/\/ Return all pull requests\nfunc (m *Maintainer) GetPullRequests(state string) ([]*gh.PullRequest, error) {\n\to := &gh.Options{}\n\to.QueryParams = map[string]string{\n\t\t\"state\": state,\n\t}\n\treturn m.client.PullRequests(m.repo, o)\n}\n\n\/\/ Return a single pull request\nfunc (m *Maintainer) GetPullRequest(number string) (*gh.PullRequest, error) {\n\treturn m.client.PullRequest(m.repo, number, nil)\n}\n\n\/\/ Return all comments for an issue or pull request\nfunc (m *Maintainer) GetComments(pr *gh.PullRequest) ([]gh.Comment, error) {\n\tnumber := strconv.Itoa(pr.Number)\n\treturn m.client.Comments(m.repo, number, nil)\n}\n\n\/\/ Return all pull requests that cannot be merged cleanly\nfunc (m *Maintainer) GetNoMergePullRequests() ([]*gh.PullRequest, error) {\n\tprs, err := m.GetPullRequests(\"open\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout := []*gh.PullRequest{}\n\tfor _, pr := range prs {\n\t\tfullPr, err := m.GetPullRequest(strconv.Itoa(pr.Number))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !fullPr.Mergeable {\n\t\t\tout = append(out, fullPr)\n\t\t}\n\t}\n\treturn out, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package gremgo\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/gorilla\/websocket\"\n)\n\ntype dialer interface {\n\tconnect() error\n\twrite([]byte) error\n\tread() ([]byte, error)\n}\n\n\/\/\/\/\/\n\/*\nWebSocket Connection\n*\/\n\/\/\/\/\/\n\n\/\/ Ws is the dialer for a WebSocket connection\ntype Ws struct {\n\thost string\n\tconn *websocket.Conn\n}\n\nfunc (ws *Ws) connect() (err error) {\n\td := websocket.Dialer{}\n\tws.conn, _, err = d.Dial(ws.host, http.Header{})\n\treturn\n}\n\nfunc (ws *Ws) write(msg []byte) (err error) {\n\terr = ws.conn.WriteMessage(2, msg)\n\treturn\n}\n\nfunc (ws *Ws) read() (msg []byte, err error) {\n\t_, msg, err = ws.conn.ReadMessage()\n\treturn\n}\n\n\/\/\/\/\/\n\nfunc (c *Client) writeWorker() { \/\/ writeWorker works on a loop and dispatches messages as soon as it recieves them\n\tfor {\n\t\tselect {\n\t\tcase msg := <-c.requests: \/\/ Wait for message send request\n\t\t\terr := c.conn.write(msg) \/\/ Write message\n\t\t\tif err != nil { \/\/ TODO: Fix error handling here\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\tdefault:\n\t\t}\n\t}\n}\n\nfunc (c *Client) readWorker() { \/\/ readWorker works on a loop and sorts messages as soon as it recieves them\n\tfor {\n\t\tmsg, err := c.conn.read()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif msg != nil {\n\t\t\t\/\/ TODO: Make this multithreaded\n\t\t\tc.handleResponse(msg) \/\/ Send message for sorting and retrieval on a separate thread\n\t\t}\n\t}\n}\n<commit_msg>Fixed read and write buffers<commit_after>package gremgo\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/gorilla\/websocket\"\n)\n\ntype dialer interface {\n\tconnect() error\n\twrite([]byte) error\n\tread() ([]byte, error)\n}\n\n\/\/\/\/\/\n\/*\nWebSocket Connection\n*\/\n\/\/\/\/\/\n\n\/\/ Ws is the dialer for a WebSocket connection\ntype Ws struct {\n\thost string\n\tconn *websocket.Conn\n}\n\nfunc (ws *Ws) connect() (err error) {\n\td := websocket.Dialer{\n\t\tWriteBufferSize: 8192,\n\t\tReadBufferSize: 8192,\n\t}\n\tws.conn, _, err = d.Dial(ws.host, http.Header{})\n\treturn\n}\n\nfunc (ws *Ws) write(msg []byte) (err error) {\n\terr = ws.conn.WriteMessage(2, msg)\n\treturn\n}\n\nfunc (ws *Ws) read() (msg []byte, err error) {\n\t_, msg, err = ws.conn.ReadMessage()\n\treturn\n}\n\n\/\/\/\/\/\n\nfunc (c *Client) writeWorker() { \/\/ writeWorker works on a loop and dispatches messages as soon as it recieves them\n\tfor {\n\t\tselect {\n\t\tcase msg := <-c.requests: \/\/ Wait for message send request\n\t\t\terr := c.conn.write(msg) \/\/ Write message\n\t\t\tif err != nil { \/\/ TODO: Fix error handling here\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\tdefault:\n\t\t}\n\t}\n}\n\nfunc (c *Client) readWorker() { \/\/ readWorker works on a loop and sorts messages as soon as it recieves them\n\tfor {\n\t\tmsg, err := c.conn.read()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif msg != nil {\n\t\t\t\/\/ TODO: Make this multithreaded\n\t\t\tc.handleResponse(msg) \/\/ Send message for sorting and retrieval on a separate thread\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package pop\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/markbates\/going\/defaults\"\n\t\"github.com\/markbates\/going\/randx\"\n)\n\n\/\/ Connections contains all of the available connections\nvar Connections = map[string]*Connection{}\n\n\/\/ Connection represents all of the necessary details for\n\/\/ talking with a datastore\ntype Connection struct {\n\tID string\n\tStore Store\n\tDialect Dialect\n\tTimings []time.Duration\n}\n\nfunc (c *Connection) String() string {\n\treturn c.Dialect.URL()\n}\n\nfunc (c *Connection) URL() string {\n\treturn c.String()\n}\n\n\/\/ NewConnection creates a new connection, and sets it's `Dialect`\n\/\/ appropriately based on the `ConnectionDetails` passed into it.\nfunc NewConnection(deets *ConnectionDetails) *Connection {\n\tc := &Connection{\n\t\tID: randx.String(30),\n\t\tTimings: []time.Duration{},\n\t}\n\tswitch deets.Dialect {\n\tcase \"postgres\":\n\t\tc.Dialect = NewPostgreSQL(deets)\n\tcase \"mysql\":\n\t\tc.Dialect = NewMySQL(deets)\n\t\t\/\/ case \"sqlite3\":\n\t\t\/\/ \tc.Dialect = NewSQLite(deets)\n\t}\n\treturn c\n}\n\n\/\/ Connect takes the name of a connection, default is \"development\", and will\n\/\/ return that connection from the available `Connections`. If a connection with\n\/\/ that name can not be found an error will be returned. If a connection is\n\/\/ found, and it has yet to open a connection with its underlying datastore,\n\/\/ a connection to that store will be opened.\nfunc Connect(e string) (*Connection, error) {\n\te = defaults.String(e, \"development\")\n\tc := Connections[e]\n\tif c == nil {\n\t\treturn c, fmt.Errorf(\"Could not find connection named %s!\", e)\n\t}\n\tif c.Store != nil {\n\t\treturn c, nil\n\t}\n\tdb, err := sqlx.Open(c.Dialect.Details().Dialect, c.Dialect.URL())\n\tif err == nil {\n\t\tc.Store = &dB{db}\n\t}\n\treturn c, nil\n}\n\n\/\/ Transaction will start a new transaction on the connection. If the inner function\n\/\/ returns an error then the transaction will be rolled back, otherwise the transaction\n\/\/ will automatically commit at the end.\nfunc (c *Connection) Transaction(fn func(tx *Connection) error) error {\n\ttx, err := c.Store.Transaction()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcn := &Connection{\n\t\tID: randx.String(30),\n\t\tStore: tx,\n\t\tDialect: c.Dialect,\n\t\tTimings: []time.Duration{},\n\t}\n\terr = fn(cn)\n\tif err != nil {\n\t\terr = tx.Rollback()\n\t} else {\n\t\terr = tx.Commit()\n\t}\n\treturn err\n}\n\n\/\/ Rollback will open a new transaction and automatically rollback that transaction\n\/\/ when the inner function returns, regardless. This can be useful for tests, etc...\nfunc (c *Connection) Rollback(fn func(tx *Connection)) error {\n\ttx, err := c.Store.Transaction()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcn := &Connection{\n\t\tID: randx.String(30),\n\t\tStore: tx,\n\t\tDialect: c.Dialect,\n\t\tTimings: []time.Duration{},\n\t}\n\tfn(cn)\n\treturn tx.Rollback()\n}\n\n\/\/ Q creates a new \"empty\" query for the current connection.\nfunc (c *Connection) Q() *Query {\n\treturn Q(c)\n}\n\nfunc (c *Connection) timeFunc(name string, fn func() error) error {\n\tnow := time.Now()\n\terr := fn()\n\tc.Timings = append(c.Timings, time.Now().Sub(now))\n\treturn err\n}\n<commit_msg>if the connection is already a transaction, just use that<commit_after>package pop\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/markbates\/going\/defaults\"\n\t\"github.com\/markbates\/going\/randx\"\n)\n\n\/\/ Connections contains all of the available connections\nvar Connections = map[string]*Connection{}\n\n\/\/ Connection represents all of the necessary details for\n\/\/ talking with a datastore\ntype Connection struct {\n\tID string\n\tStore Store\n\tDialect Dialect\n\tTimings []time.Duration\n\ttX *tX\n}\n\nfunc (c *Connection) String() string {\n\treturn c.Dialect.URL()\n}\n\nfunc (c *Connection) URL() string {\n\treturn c.String()\n}\n\n\/\/ NewConnection creates a new connection, and sets it's `Dialect`\n\/\/ appropriately based on the `ConnectionDetails` passed into it.\nfunc NewConnection(deets *ConnectionDetails) *Connection {\n\tc := &Connection{\n\t\tID: randx.String(30),\n\t\tTimings: []time.Duration{},\n\t}\n\tswitch deets.Dialect {\n\tcase \"postgres\":\n\t\tc.Dialect = NewPostgreSQL(deets)\n\tcase \"mysql\":\n\t\tc.Dialect = NewMySQL(deets)\n\t\t\/\/ case \"sqlite3\":\n\t\t\/\/ \tc.Dialect = NewSQLite(deets)\n\t}\n\treturn c\n}\n\n\/\/ Connect takes the name of a connection, default is \"development\", and will\n\/\/ return that connection from the available `Connections`. If a connection with\n\/\/ that name can not be found an error will be returned. If a connection is\n\/\/ found, and it has yet to open a connection with its underlying datastore,\n\/\/ a connection to that store will be opened.\nfunc Connect(e string) (*Connection, error) {\n\te = defaults.String(e, \"development\")\n\tc := Connections[e]\n\tif c == nil {\n\t\treturn c, fmt.Errorf(\"Could not find connection named %s!\", e)\n\t}\n\tif c.Store != nil {\n\t\treturn c, nil\n\t}\n\tdb, err := sqlx.Open(c.Dialect.Details().Dialect, c.Dialect.URL())\n\tif err == nil {\n\t\tc.Store = &dB{db}\n\t}\n\treturn c, nil\n}\n\n\/\/ Transaction will start a new transaction on the connection. If the inner function\n\/\/ returns an error then the transaction will be rolled back, otherwise the transaction\n\/\/ will automatically commit at the end.\nfunc (c *Connection) Transaction(fn func(tx *Connection) error) error {\n\tvar cn *Connection\n\tif c.tX == nil {\n\t\ttx, err := c.Store.Transaction()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcn = &Connection{\n\t\t\tID: randx.String(30),\n\t\t\tStore: tx,\n\t\t\tDialect: c.Dialect,\n\t\t\tTimings: []time.Duration{},\n\t\t\ttX: tx,\n\t\t}\n\t} else {\n\t\tcn = c\n\t}\n\terr := fn(cn)\n\tif err != nil {\n\t\terr = cn.tX.Rollback()\n\t} else {\n\t\terr = cn.tX.Commit()\n\t}\n\treturn err\n}\n\n\/\/ Rollback will open a new transaction and automatically rollback that transaction\n\/\/ when the inner function returns, regardless. This can be useful for tests, etc...\nfunc (c *Connection) Rollback(fn func(tx *Connection)) error {\n\tvar cn *Connection\n\tif c.tX == nil {\n\t\ttx, err := c.Store.Transaction()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcn = &Connection{\n\t\t\tID: randx.String(30),\n\t\t\tStore: tx,\n\t\t\tDialect: c.Dialect,\n\t\t\tTimings: []time.Duration{},\n\t\t\ttX: tx,\n\t\t}\n\t} else {\n\t\tcn = c\n\t}\n\tfn(cn)\n\treturn cn.tX.Rollback()\n}\n\n\/\/ Q creates a new \"empty\" query for the current connection.\nfunc (c *Connection) Q() *Query {\n\treturn Q(c)\n}\n\nfunc (c *Connection) timeFunc(name string, fn func() error) error {\n\tnow := time.Now()\n\terr := fn()\n\tc.Timings = append(c.Timings, time.Now().Sub(now))\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package tarantool\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\t\"net\/url\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\nvar (\n\tErrEmptyDefaultSpace = errors.New(\"zero-length default space or unnecessary slash in dsn.path\")\n\tErrSyncFailed = errors.New(\"SYNC failed\")\n)\n\ntype Options struct {\n\tConnectTimeout time.Duration\n\tQueryTimeout time.Duration\n\tDefaultSpace string\n\tUser string\n\tPassword string\n\tUUID string\n\tReplicaSetUUID string\n\tPerf PerfCount\n}\n\ntype Greeting struct {\n\tVersion []byte\n\tAuth []byte\n}\n\ntype Connection struct {\n\trequestID uint64\n\trequests *requestMap\n\twriteChan chan *BinaryPacket \/\/ packed messages with header\n\tcloseOnce sync.Once\n\texit chan bool\n\tclosed chan bool\n\ttcpConn net.Conn\n\n\tccr io.Reader\n\tccw io.Writer\n\n\t\/\/ options\n\tqueryTimeout time.Duration\n\tgreeting *Greeting\n\tpackData *packData\n\tremoteAddr string\n\tfirstError error\n\tfirstErrorLock *sync.Mutex\n\tperf PerfCount\n}\n\n\/\/ Connect to tarantool instance with options.\n\/\/ Returned Connection could be used to execute queries.\nfunc Connect(dsnString string, options *Options) (conn *Connection, err error) {\n\tdsn, opts, err := parseOptions(dsnString, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconn, err = newConn(dsn.Scheme, dsn.Host, opts)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ set schema pulling deadline\n\tdeadline := time.Now().Add(opts.ConnectTimeout)\n\tconn.tcpConn.SetDeadline(deadline)\n\n\terr = conn.pullSchema()\n\tif err != nil {\n\t\tconn.tcpConn.Close()\n\t\tconn = nil\n\t\treturn\n\t}\n\n\t\/\/ remove deadline\n\tconn.tcpConn.SetDeadline(time.Time{})\n\n\tgo conn.worker()\n\n\treturn\n}\n\nfunc newConn(scheme, addr string, opts Options) (conn *Connection, err error) {\n\n\tdefer func() { \/\/ close opened connection if error\n\t\tif err != nil && conn != nil {\n\t\t\tif conn.tcpConn != nil {\n\t\t\t\tconn.tcpConn.Close()\n\t\t\t}\n\t\t\tconn = nil\n\t\t}\n\t}()\n\n\tconn = &Connection{\n\t\tremoteAddr: addr,\n\t\trequests: newRequestMap(),\n\t\twriteChan: make(chan *BinaryPacket, 256),\n\t\texit: make(chan bool),\n\t\tclosed: make(chan bool),\n\t\tfirstErrorLock: &sync.Mutex{},\n\t\tpackData: newPackData(opts.DefaultSpace),\n\t\tqueryTimeout: opts.QueryTimeout,\n\t\tperf: opts.Perf,\n\t}\n\n\tconn.tcpConn, err = net.DialTimeout(scheme, conn.remoteAddr, opts.ConnectTimeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif conn.perf.NetRead != nil {\n\t\tconn.ccr = NewCountedReader(conn.tcpConn, conn.perf.NetRead)\n\t} else {\n\t\tconn.ccr = conn.tcpConn\n\t}\n\n\tif conn.perf.NetWrite != nil {\n\t\tconn.ccw = NewCountedWriter(conn.tcpConn, conn.perf.NetWrite)\n\t} else {\n\t\tconn.ccw = conn.tcpConn\n\t}\n\n\tgreeting := make([]byte, 128)\n\n\tconnectDeadline := time.Now().Add(opts.ConnectTimeout)\n\tconn.tcpConn.SetDeadline(connectDeadline)\n\t\/\/ removing deadline deferred\n\tdefer conn.tcpConn.SetDeadline(time.Time{})\n\n\t_, err = io.ReadFull(conn.ccr, greeting)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tconn.greeting = &Greeting{\n\t\tVersion: greeting[:64],\n\t\tAuth: greeting[64:108],\n\t}\n\n\t\/\/ try to authenticate if user have been provided\n\tif len(opts.User) > 0 {\n\t\trequestID := conn.nextID()\n\n\t\tpp := packetPool.GetWithID(requestID)\n\n\t\terr = pp.packMsg(&Auth{\n\t\t\tUser: opts.User,\n\t\t\tPassword: opts.Password,\n\t\t\tGreetingAuth: conn.greeting.Auth,\n\t\t}, conn.packData)\n\t\tif err != nil {\n\t\t\tpp.Release()\n\t\t\treturn\n\t\t}\n\n\t\t_, err = pp.WriteTo(conn.ccw)\n\t\tpp.Release()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tpp = packetPool.Get()\n\t\tdefer pp.Release()\n\n\t\tif err = pp.readPacket(conn.ccr); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tauthResponse := &pp.packet\n\t\tif authResponse.requestID != requestID {\n\t\t\terr = ErrSyncFailed\n\t\t\treturn\n\t\t}\n\n\t\tif authResponse.Result != nil && authResponse.Result.Error != nil {\n\t\t\terr = authResponse.Result.Error\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc parseOptions(dsnString string, options *Options) (*url.URL, Options, error) {\n\tif options == nil {\n\t\toptions = &Options{}\n\t}\n\topts := *options \/\/ copy to new object\n\n\t\/\/ remove schema, if present\n\n\t\/\/ === for backward compatibility (only tcp despite of user wishes :)\n\tdsnString = strings.TrimPrefix(dsnString, \"unix:\")\n\t\/\/ ===\n\n\t\/\/ tcp is the default scheme\n\tswitch {\n\tcase strings.HasPrefix(dsnString, \"tcp:\/\/\"):\n\tcase strings.HasPrefix(dsnString, \"\/\/\"):\n\t\tdsnString = \"tcp:\" + dsnString\n\tdefault:\n\t\tdsnString = \"tcp:\/\/\" + dsnString\n\t}\n\tdsn, err := url.Parse(dsnString)\n\tif err != nil {\n\t\treturn dsn, opts, err\n\t}\n\n\tif opts.ConnectTimeout.Nanoseconds() == 0 {\n\t\topts.ConnectTimeout = DefaultConnectTimeout\n\t}\n\tif opts.QueryTimeout.Nanoseconds() == 0 {\n\t\topts.QueryTimeout = DefaultQueryTimeout\n\t}\n\n\tif len(opts.User) == 0 {\n\t\tif user := dsn.User; user != nil {\n\t\t\topts.User = user.Username()\n\t\t\topts.Password, _ = user.Password()\n\t\t}\n\t}\n\n\tif len(opts.DefaultSpace) == 0 && len(dsn.Path) > 0 {\n\t\tpath := strings.TrimPrefix(dsn.Path, \"\/\")\n\t\t\/\/ check it if it is necessary\n\t\tswitch {\n\t\tcase len(path) == 0:\n\t\t\treturn nil, opts, ErrEmptyDefaultSpace\n\t\t\/\/case strings.IndexAny(path, \"\/ ,\") != -1:\n\t\t\/\/\treturn nil, opts, ErrBadDSNPath\n\t\tdefault:\n\t\t\topts.DefaultSpace = path\n\t\t}\n\t}\n\n\treturn dsn, opts, nil\n}\n\nfunc (conn *Connection) pullSchema() (err error) {\n\t\/\/ select space and index schema\n\trequest := func(q Query) (*Result, error) {\n\t\tvar err error\n\n\t\trequestID := conn.nextID()\n\n\t\tpp := packetPool.GetWithID(requestID)\n\t\tif err = pp.packMsg(q, conn.packData); err != nil {\n\t\t\tpp.Release()\n\t\t\treturn nil, err\n\t\t}\n\n\t\t_, err = pp.WriteTo(conn.ccw)\n\t\tpp.Release()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpp = packetPool.Get()\n\t\tdefer pp.Release()\n\n\t\tif err = pp.readPacket(conn.ccr); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresponse := &pp.packet\n\t\tif response.requestID != requestID {\n\t\t\treturn nil, errors.New(\"Bad response requestID\")\n\t\t}\n\n\t\tif response.Result == nil {\n\t\t\treturn nil, errors.New(\"Nil response result\")\n\t\t}\n\n\t\tif response.Result.Error != nil {\n\t\t\treturn nil, response.Result.Error\n\t\t}\n\n\t\treturn response.Result, nil\n\t}\n\n\tres, err := request(&Select{\n\t\tSpace: ViewSpace,\n\t\tKey: 0,\n\t\tIterator: IterAll,\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, space := range res.Data {\n\t\tspaceID, _ := conn.packData.spaceNo(space[0])\n\t\tconn.packData.spaceMap[space[2].(string)] = spaceID\n\t}\n\n\tres, err = request(&Select{\n\t\tSpace: ViewIndex,\n\t\tKey: 0,\n\t\tIterator: IterAll,\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, index := range res.Data {\n\t\tspaceID, _ := conn.packData.fieldNo(index[0])\n\t\tindexID, _ := conn.packData.fieldNo(index[1])\n\t\tindexName := index[2].(string)\n\t\tindexAttr := index[4].(map[string]interface{}) \/\/ e.g: {\"unique\": true}\n\t\tindexFields := index[5].([]interface{}) \/\/ e.g: [[0 num] [1 str]]\n\n\t\tindexSpaceMap, exists := conn.packData.indexMap[spaceID]\n\t\tif !exists {\n\t\t\tindexSpaceMap = make(map[string]uint64)\n\t\t\tconn.packData.indexMap[spaceID] = indexSpaceMap\n\t\t}\n\t\tindexSpaceMap[indexName] = indexID\n\n\t\t\/\/ build list of primary key field numbers for this space, if the PK is detected\n\t\tif indexAttr != nil && indexID == 0 {\n\t\t\tif unique, ok := indexAttr[\"unique\"]; ok && unique.(bool) {\n\t\t\t\tpk := make([]int, len(indexFields))\n\t\t\t\tfor i := range indexFields {\n\t\t\t\t\tdescr := indexFields[i].([]interface{})\n\t\t\t\t\tf, _ := conn.packData.fieldNo(descr[0])\n\t\t\t\t\tpk[i] = int(f)\n\t\t\t\t}\n\t\t\t\tconn.packData.primaryKeyMap[spaceID] = pk\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (conn *Connection) nextID() uint64 {\n\treturn atomic.AddUint64(&conn.requestID, 1)\n}\n\nfunc (conn *Connection) stop() {\n\tconn.closeOnce.Do(func() {\n\t\t\/\/ debug.PrintStack()\n\t\tclose(conn.exit)\n\t\tconn.tcpConn.Close()\n\t\truntime.GC()\n\t})\n}\n\nfunc (conn *Connection) GetPrimaryKeyFields(space interface{}) ([]int, bool) {\n\tvar spaceID uint64\n\tvar err error\n\n\tif conn.packData == nil {\n\t\treturn nil, false\n\t}\n\tif spaceID, err = conn.packData.spaceNo(space); err != nil {\n\t\treturn nil, false\n\t}\n\n\tf, ok := conn.packData.primaryKeyMap[spaceID]\n\treturn f, ok\n}\n\nfunc (conn *Connection) Close() {\n\tconn.stop()\n\t<-conn.closed\n}\n\nfunc (conn *Connection) String() string {\n\treturn conn.remoteAddr\n}\n\nfunc (conn *Connection) IsClosed() bool {\n\tselect {\n\tcase <-conn.exit:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (conn *Connection) getError() error {\n\tconn.firstErrorLock.Lock()\n\tdefer conn.firstErrorLock.Unlock()\n\treturn conn.firstError\n}\n\nfunc (conn *Connection) setError(err error) {\n\tif err != nil && err != io.EOF {\n\t\tconn.firstErrorLock.Lock()\n\t\tif conn.firstError == nil {\n\t\t\tconn.firstError = err\n\t\t}\n\t\tconn.firstErrorLock.Unlock()\n\t}\n}\n\nfunc (conn *Connection) worker() {\n\tvar wg sync.WaitGroup\n\n\twg.Add(2)\n\n\tgo func() {\n\t\terr := conn.writer()\n\t\tconn.setError(err)\n\t\tconn.stop()\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\terr := conn.reader()\n\t\tconn.setError(err)\n\t\tconn.stop()\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n\n\t\/\/ release all pending packets\n\twriteChan := conn.writeChan\n\tconn.writeChan = nil\n\nCLEANUP_LOOP:\n\tfor {\n\t\tselect {\n\t\tcase pp := <-writeChan:\n\t\t\tpp.Release()\n\t\tdefault:\n\t\t\tbreak CLEANUP_LOOP\n\t\t}\n\t}\n\n\t\/\/ send error reply to all pending requests\n\tconn.requests.CleanUp(func(req *request) {\n\t\tselect {\n\t\tcase req.replyChan <- &AsyncResult{\n\t\t\tError: ConnectionClosedError(conn),\n\t\t\tErrorCode: ErrNoConnection,\n\t\t\tOpaque: req.opaque,\n\t\t}:\n\t\tdefault:\n\t\t}\n\t})\n\n\tclose(conn.closed)\n}\n\nfunc (conn *Connection) writer() (err error) {\n\twriteChan := conn.writeChan\n\tstopChan := conn.exit\n\tw := bufio.NewWriterSize(conn.ccw, DefaultWriterBufSize)\n\n\twp := func(w io.Writer, packet *BinaryPacket) error {\n\t\tif conn.perf.NetPacketsOut != nil {\n\t\t\tconn.perf.NetPacketsOut.Add(1)\n\t\t}\n\t\t_, err := packet.WriteTo(w)\n\t\tpacket.Release()\n\t\treturn err\n\t}\n\nWRITER_LOOP:\n\tfor {\n\t\tselect {\n\t\tcase packet, ok := <-writeChan:\n\t\t\tif !ok {\n\t\t\t\tbreak WRITER_LOOP\n\t\t\t}\n\t\t\tif err = wp(w, packet); err != nil {\n\t\t\t\tbreak WRITER_LOOP\n\t\t\t}\n\t\tcase <-stopChan:\n\t\t\tbreak WRITER_LOOP\n\t\tdefault:\n\t\t\tif err = w.Flush(); err != nil {\n\t\t\t\tbreak WRITER_LOOP\n\t\t\t}\n\n\t\t\t\/\/ same without flush\n\t\t\tselect {\n\t\t\tcase packet, ok := <-writeChan:\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak WRITER_LOOP\n\t\t\t\t}\n\t\t\t\tif err = wp(w, packet); err != nil {\n\t\t\t\t\tbreak WRITER_LOOP\n\t\t\t\t}\n\t\t\tcase <-stopChan:\n\t\t\t\tbreak WRITER_LOOP\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (conn *Connection) reader() (err error) {\n\tvar pp *BinaryPacket\n\tvar requestID uint64\n\n\tr := bufio.NewReaderSize(conn.ccr, DefaultReaderBufSize)\n\nREADER_LOOP:\n\tfor {\n\t\tpp := packetPool.Get()\n\t\tif requestID, err = pp.readRawPacket(r); err != nil {\n\t\t\tbreak READER_LOOP\n\t\t}\n\n\t\tif conn.perf.NetPacketsIn != nil {\n\t\t\tconn.perf.NetPacketsIn.Add(1)\n\t\t}\n\n\t\treq := conn.requests.Pop(requestID)\n\t\tif req == nil {\n\t\t\tpp.Release()\n\t\t\tpp = nil\n\t\t\tcontinue\n\t\t}\n\n\t\tselect {\n\t\tcase req.replyChan <- &AsyncResult{0, nil, pp, conn, req.opaque}:\n\t\t\tpp = nil\n\t\tdefault:\n\t\t}\n\t}\n\n\tif pp != nil {\n\t\tpp.Release()\n\t}\n\treturn\n}\n<commit_msg>Add GetPerf method to Connection<commit_after>package tarantool\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\t\"net\/url\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\nvar (\n\tErrEmptyDefaultSpace = errors.New(\"zero-length default space or unnecessary slash in dsn.path\")\n\tErrSyncFailed = errors.New(\"SYNC failed\")\n)\n\ntype Options struct {\n\tConnectTimeout time.Duration\n\tQueryTimeout time.Duration\n\tDefaultSpace string\n\tUser string\n\tPassword string\n\tUUID string\n\tReplicaSetUUID string\n\tPerf PerfCount\n}\n\ntype Greeting struct {\n\tVersion []byte\n\tAuth []byte\n}\n\ntype Connection struct {\n\trequestID uint64\n\trequests *requestMap\n\twriteChan chan *BinaryPacket \/\/ packed messages with header\n\tcloseOnce sync.Once\n\texit chan bool\n\tclosed chan bool\n\ttcpConn net.Conn\n\n\tccr io.Reader\n\tccw io.Writer\n\n\t\/\/ options\n\tqueryTimeout time.Duration\n\tgreeting *Greeting\n\tpackData *packData\n\tremoteAddr string\n\tfirstError error\n\tfirstErrorLock *sync.Mutex\n\tperf PerfCount\n}\n\n\/\/ Connect to tarantool instance with options.\n\/\/ Returned Connection could be used to execute queries.\nfunc Connect(dsnString string, options *Options) (conn *Connection, err error) {\n\tdsn, opts, err := parseOptions(dsnString, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconn, err = newConn(dsn.Scheme, dsn.Host, opts)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ set schema pulling deadline\n\tdeadline := time.Now().Add(opts.ConnectTimeout)\n\tconn.tcpConn.SetDeadline(deadline)\n\n\terr = conn.pullSchema()\n\tif err != nil {\n\t\tconn.tcpConn.Close()\n\t\tconn = nil\n\t\treturn\n\t}\n\n\t\/\/ remove deadline\n\tconn.tcpConn.SetDeadline(time.Time{})\n\n\tgo conn.worker()\n\n\treturn\n}\n\nfunc newConn(scheme, addr string, opts Options) (conn *Connection, err error) {\n\n\tdefer func() { \/\/ close opened connection if error\n\t\tif err != nil && conn != nil {\n\t\t\tif conn.tcpConn != nil {\n\t\t\t\tconn.tcpConn.Close()\n\t\t\t}\n\t\t\tconn = nil\n\t\t}\n\t}()\n\n\tconn = &Connection{\n\t\tremoteAddr: addr,\n\t\trequests: newRequestMap(),\n\t\twriteChan: make(chan *BinaryPacket, 256),\n\t\texit: make(chan bool),\n\t\tclosed: make(chan bool),\n\t\tfirstErrorLock: &sync.Mutex{},\n\t\tpackData: newPackData(opts.DefaultSpace),\n\t\tqueryTimeout: opts.QueryTimeout,\n\t\tperf: opts.Perf,\n\t}\n\n\tconn.tcpConn, err = net.DialTimeout(scheme, conn.remoteAddr, opts.ConnectTimeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif conn.perf.NetRead != nil {\n\t\tconn.ccr = NewCountedReader(conn.tcpConn, conn.perf.NetRead)\n\t} else {\n\t\tconn.ccr = conn.tcpConn\n\t}\n\n\tif conn.perf.NetWrite != nil {\n\t\tconn.ccw = NewCountedWriter(conn.tcpConn, conn.perf.NetWrite)\n\t} else {\n\t\tconn.ccw = conn.tcpConn\n\t}\n\n\tgreeting := make([]byte, 128)\n\n\tconnectDeadline := time.Now().Add(opts.ConnectTimeout)\n\tconn.tcpConn.SetDeadline(connectDeadline)\n\t\/\/ removing deadline deferred\n\tdefer conn.tcpConn.SetDeadline(time.Time{})\n\n\t_, err = io.ReadFull(conn.ccr, greeting)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tconn.greeting = &Greeting{\n\t\tVersion: greeting[:64],\n\t\tAuth: greeting[64:108],\n\t}\n\n\t\/\/ try to authenticate if user have been provided\n\tif len(opts.User) > 0 {\n\t\trequestID := conn.nextID()\n\n\t\tpp := packetPool.GetWithID(requestID)\n\n\t\terr = pp.packMsg(&Auth{\n\t\t\tUser: opts.User,\n\t\t\tPassword: opts.Password,\n\t\t\tGreetingAuth: conn.greeting.Auth,\n\t\t}, conn.packData)\n\t\tif err != nil {\n\t\t\tpp.Release()\n\t\t\treturn\n\t\t}\n\n\t\t_, err = pp.WriteTo(conn.ccw)\n\t\tpp.Release()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tpp = packetPool.Get()\n\t\tdefer pp.Release()\n\n\t\tif err = pp.readPacket(conn.ccr); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tauthResponse := &pp.packet\n\t\tif authResponse.requestID != requestID {\n\t\t\terr = ErrSyncFailed\n\t\t\treturn\n\t\t}\n\n\t\tif authResponse.Result != nil && authResponse.Result.Error != nil {\n\t\t\terr = authResponse.Result.Error\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc parseOptions(dsnString string, options *Options) (*url.URL, Options, error) {\n\tif options == nil {\n\t\toptions = &Options{}\n\t}\n\topts := *options \/\/ copy to new object\n\n\t\/\/ remove schema, if present\n\n\t\/\/ === for backward compatibility (only tcp despite of user wishes :)\n\tdsnString = strings.TrimPrefix(dsnString, \"unix:\")\n\t\/\/ ===\n\n\t\/\/ tcp is the default scheme\n\tswitch {\n\tcase strings.HasPrefix(dsnString, \"tcp:\/\/\"):\n\tcase strings.HasPrefix(dsnString, \"\/\/\"):\n\t\tdsnString = \"tcp:\" + dsnString\n\tdefault:\n\t\tdsnString = \"tcp:\/\/\" + dsnString\n\t}\n\tdsn, err := url.Parse(dsnString)\n\tif err != nil {\n\t\treturn dsn, opts, err\n\t}\n\n\tif opts.ConnectTimeout.Nanoseconds() == 0 {\n\t\topts.ConnectTimeout = DefaultConnectTimeout\n\t}\n\tif opts.QueryTimeout.Nanoseconds() == 0 {\n\t\topts.QueryTimeout = DefaultQueryTimeout\n\t}\n\n\tif len(opts.User) == 0 {\n\t\tif user := dsn.User; user != nil {\n\t\t\topts.User = user.Username()\n\t\t\topts.Password, _ = user.Password()\n\t\t}\n\t}\n\n\tif len(opts.DefaultSpace) == 0 && len(dsn.Path) > 0 {\n\t\tpath := strings.TrimPrefix(dsn.Path, \"\/\")\n\t\t\/\/ check it if it is necessary\n\t\tswitch {\n\t\tcase len(path) == 0:\n\t\t\treturn nil, opts, ErrEmptyDefaultSpace\n\t\t\/\/case strings.IndexAny(path, \"\/ ,\") != -1:\n\t\t\/\/\treturn nil, opts, ErrBadDSNPath\n\t\tdefault:\n\t\t\topts.DefaultSpace = path\n\t\t}\n\t}\n\n\treturn dsn, opts, nil\n}\n\nfunc (conn *Connection) pullSchema() (err error) {\n\t\/\/ select space and index schema\n\trequest := func(q Query) (*Result, error) {\n\t\tvar err error\n\n\t\trequestID := conn.nextID()\n\n\t\tpp := packetPool.GetWithID(requestID)\n\t\tif err = pp.packMsg(q, conn.packData); err != nil {\n\t\t\tpp.Release()\n\t\t\treturn nil, err\n\t\t}\n\n\t\t_, err = pp.WriteTo(conn.ccw)\n\t\tpp.Release()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpp = packetPool.Get()\n\t\tdefer pp.Release()\n\n\t\tif err = pp.readPacket(conn.ccr); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresponse := &pp.packet\n\t\tif response.requestID != requestID {\n\t\t\treturn nil, errors.New(\"Bad response requestID\")\n\t\t}\n\n\t\tif response.Result == nil {\n\t\t\treturn nil, errors.New(\"Nil response result\")\n\t\t}\n\n\t\tif response.Result.Error != nil {\n\t\t\treturn nil, response.Result.Error\n\t\t}\n\n\t\treturn response.Result, nil\n\t}\n\n\tres, err := request(&Select{\n\t\tSpace: ViewSpace,\n\t\tKey: 0,\n\t\tIterator: IterAll,\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, space := range res.Data {\n\t\tspaceID, _ := conn.packData.spaceNo(space[0])\n\t\tconn.packData.spaceMap[space[2].(string)] = spaceID\n\t}\n\n\tres, err = request(&Select{\n\t\tSpace: ViewIndex,\n\t\tKey: 0,\n\t\tIterator: IterAll,\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, index := range res.Data {\n\t\tspaceID, _ := conn.packData.fieldNo(index[0])\n\t\tindexID, _ := conn.packData.fieldNo(index[1])\n\t\tindexName := index[2].(string)\n\t\tindexAttr := index[4].(map[string]interface{}) \/\/ e.g: {\"unique\": true}\n\t\tindexFields := index[5].([]interface{}) \/\/ e.g: [[0 num] [1 str]]\n\n\t\tindexSpaceMap, exists := conn.packData.indexMap[spaceID]\n\t\tif !exists {\n\t\t\tindexSpaceMap = make(map[string]uint64)\n\t\t\tconn.packData.indexMap[spaceID] = indexSpaceMap\n\t\t}\n\t\tindexSpaceMap[indexName] = indexID\n\n\t\t\/\/ build list of primary key field numbers for this space, if the PK is detected\n\t\tif indexAttr != nil && indexID == 0 {\n\t\t\tif unique, ok := indexAttr[\"unique\"]; ok && unique.(bool) {\n\t\t\t\tpk := make([]int, len(indexFields))\n\t\t\t\tfor i := range indexFields {\n\t\t\t\t\tdescr := indexFields[i].([]interface{})\n\t\t\t\t\tf, _ := conn.packData.fieldNo(descr[0])\n\t\t\t\t\tpk[i] = int(f)\n\t\t\t\t}\n\t\t\t\tconn.packData.primaryKeyMap[spaceID] = pk\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (conn *Connection) nextID() uint64 {\n\treturn atomic.AddUint64(&conn.requestID, 1)\n}\n\nfunc (conn *Connection) stop() {\n\tconn.closeOnce.Do(func() {\n\t\t\/\/ debug.PrintStack()\n\t\tclose(conn.exit)\n\t\tconn.tcpConn.Close()\n\t\truntime.GC()\n\t})\n}\n\nfunc (conn *Connection) GetPerf() PerfCount {\n\treturn conn.perf\n}\n\nfunc (conn *Connection) GetPrimaryKeyFields(space interface{}) ([]int, bool) {\n\tvar spaceID uint64\n\tvar err error\n\n\tif conn.packData == nil {\n\t\treturn nil, false\n\t}\n\tif spaceID, err = conn.packData.spaceNo(space); err != nil {\n\t\treturn nil, false\n\t}\n\n\tf, ok := conn.packData.primaryKeyMap[spaceID]\n\treturn f, ok\n}\n\nfunc (conn *Connection) Close() {\n\tconn.stop()\n\t<-conn.closed\n}\n\nfunc (conn *Connection) String() string {\n\treturn conn.remoteAddr\n}\n\nfunc (conn *Connection) IsClosed() bool {\n\tselect {\n\tcase <-conn.exit:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (conn *Connection) getError() error {\n\tconn.firstErrorLock.Lock()\n\tdefer conn.firstErrorLock.Unlock()\n\treturn conn.firstError\n}\n\nfunc (conn *Connection) setError(err error) {\n\tif err != nil && err != io.EOF {\n\t\tconn.firstErrorLock.Lock()\n\t\tif conn.firstError == nil {\n\t\t\tconn.firstError = err\n\t\t}\n\t\tconn.firstErrorLock.Unlock()\n\t}\n}\n\nfunc (conn *Connection) worker() {\n\tvar wg sync.WaitGroup\n\n\twg.Add(2)\n\n\tgo func() {\n\t\terr := conn.writer()\n\t\tconn.setError(err)\n\t\tconn.stop()\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\terr := conn.reader()\n\t\tconn.setError(err)\n\t\tconn.stop()\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n\n\t\/\/ release all pending packets\n\twriteChan := conn.writeChan\n\tconn.writeChan = nil\n\nCLEANUP_LOOP:\n\tfor {\n\t\tselect {\n\t\tcase pp := <-writeChan:\n\t\t\tpp.Release()\n\t\tdefault:\n\t\t\tbreak CLEANUP_LOOP\n\t\t}\n\t}\n\n\t\/\/ send error reply to all pending requests\n\tconn.requests.CleanUp(func(req *request) {\n\t\tselect {\n\t\tcase req.replyChan <- &AsyncResult{\n\t\t\tError: ConnectionClosedError(conn),\n\t\t\tErrorCode: ErrNoConnection,\n\t\t\tOpaque: req.opaque,\n\t\t}:\n\t\tdefault:\n\t\t}\n\t})\n\n\tclose(conn.closed)\n}\n\nfunc (conn *Connection) writer() (err error) {\n\twriteChan := conn.writeChan\n\tstopChan := conn.exit\n\tw := bufio.NewWriterSize(conn.ccw, DefaultWriterBufSize)\n\n\twp := func(w io.Writer, packet *BinaryPacket) error {\n\t\tif conn.perf.NetPacketsOut != nil {\n\t\t\tconn.perf.NetPacketsOut.Add(1)\n\t\t}\n\t\t_, err := packet.WriteTo(w)\n\t\tpacket.Release()\n\t\treturn err\n\t}\n\nWRITER_LOOP:\n\tfor {\n\t\tselect {\n\t\tcase packet, ok := <-writeChan:\n\t\t\tif !ok {\n\t\t\t\tbreak WRITER_LOOP\n\t\t\t}\n\t\t\tif err = wp(w, packet); err != nil {\n\t\t\t\tbreak WRITER_LOOP\n\t\t\t}\n\t\tcase <-stopChan:\n\t\t\tbreak WRITER_LOOP\n\t\tdefault:\n\t\t\tif err = w.Flush(); err != nil {\n\t\t\t\tbreak WRITER_LOOP\n\t\t\t}\n\n\t\t\t\/\/ same without flush\n\t\t\tselect {\n\t\t\tcase packet, ok := <-writeChan:\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak WRITER_LOOP\n\t\t\t\t}\n\t\t\t\tif err = wp(w, packet); err != nil {\n\t\t\t\t\tbreak WRITER_LOOP\n\t\t\t\t}\n\t\t\tcase <-stopChan:\n\t\t\t\tbreak WRITER_LOOP\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (conn *Connection) reader() (err error) {\n\tvar pp *BinaryPacket\n\tvar requestID uint64\n\n\tr := bufio.NewReaderSize(conn.ccr, DefaultReaderBufSize)\n\nREADER_LOOP:\n\tfor {\n\t\tpp := packetPool.Get()\n\t\tif requestID, err = pp.readRawPacket(r); err != nil {\n\t\t\tbreak READER_LOOP\n\t\t}\n\n\t\tif conn.perf.NetPacketsIn != nil {\n\t\t\tconn.perf.NetPacketsIn.Add(1)\n\t\t}\n\n\t\treq := conn.requests.Pop(requestID)\n\t\tif req == nil {\n\t\t\tpp.Release()\n\t\t\tpp = nil\n\t\t\tcontinue\n\t\t}\n\n\t\tselect {\n\t\tcase req.replyChan <- &AsyncResult{0, nil, pp, conn, req.opaque}:\n\t\t\tpp = nil\n\t\tdefault:\n\t\t}\n\t}\n\n\tif pp != nil {\n\t\tpp.Release()\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package reminderplugin\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/iopred\/bruxism\"\n)\n\n\/\/ A Reminder holds data about a specific reminder.\ntype Reminder struct {\n\tStartTime time.Time\n\tTime time.Time\n\tRequester string\n\tTarget string\n\tMessage string\n\tIsPrivate bool\n}\n\n\/\/ ReminderPlugin is a plugin that reminds users.\ntype ReminderPlugin struct {\n\tsync.RWMutex\n\tbot *bruxism.Bot\n\tReminders []*Reminder\n\tTotalReminders int\n}\n\nvar randomTimes = []string{\n\t\"1 minute\",\n\t\"10 minutes\",\n\t\"1 hour\",\n\t\"4 hours\",\n\t\"tomorrow\",\n\t\"next week\",\n}\n\nvar randomMessages = []string{\n\t\"walk the dog\",\n\t\"take pizza out of the oven\",\n\t\"check my email\",\n\t\"feed the baby\",\n\t\"play some quake\",\n}\n\nfunc (p *ReminderPlugin) random(list []string) string {\n\treturn list[rand.Intn(len(list))]\n}\n\nfunc (p *ReminderPlugin) randomReminder(service bruxism.Service) string {\n\tticks := \"\"\n\tif service.Name() == bruxism.DiscordServiceName {\n\t\tticks = \"`\"\n\t}\n\n\treturn fmt.Sprintf(\"%s%sreminder %s %s%s\", ticks, service.CommandPrefix(), p.random(randomTimes), p.random(randomMessages), ticks)\n}\n\n\/\/ Help returns a list of help strings that are printed when the user requests them.\nfunc (p *ReminderPlugin) Help(bot *bruxism.Bot, service bruxism.Service, message bruxism.Message, detailed bool) []string {\n\thelp := []string{\n\t\tbruxism.CommandHelp(service, \"reminder\", \"<time> <reminder>\", \"Sets a reminder that is sent after the provided time.\")[0],\n\t}\n\tif detailed {\n\t\thelp = append(help, []string{\n\t\t\t\"Examples: \",\n\t\t\tp.randomReminder(service),\n\t\t\tp.randomReminder(service),\n\t\t}...)\n\t}\n\treturn help\n}\n\nfunc (p *ReminderPlugin) parseReminder(parts []string) (time.Time, string, error) {\n\tif parts[0] == \"tomorrow\" {\n\t\treturn time.Now().Add(1 * time.Hour * 24), strings.Join(parts[1:], \" \"), nil\n\t}\n\n\tif parts[0] == \"next\" {\n\t\tswitch parts[1] {\n\t\tcase \"week\":\n\t\t\treturn time.Now().Add(1 * time.Hour * 24 * 7), strings.Join(parts[2:], \" \"), nil\n\t\tcase \"month\":\n\t\t\treturn time.Now().Add(1 * time.Hour * 24 * 7 * 4), strings.Join(parts[2:], \" \"), nil\n\t\tcase \"year\":\n\t\t\treturn time.Now().Add(1 * time.Hour * 24 * 365), strings.Join(parts[2:], \" \"), nil\n\t\tdefault:\n\t\t\treturn time.Time{}, \"\", errors.New(\"Invalid next.\")\n\t\t}\n\t}\n\n\ti, err := strconv.Atoi(parts[0])\n\tif err != nil {\n\t\treturn time.Time{}, \"\", err\n\t}\n\n\tswitch {\n\tcase strings.HasPrefix(parts[1], \"sec\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Second), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"min\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Minute), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"hour\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"day\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour * 24), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"week\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour * 24 * 7), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"month\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour * 24 * 7 * 4), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"year\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour * 24 * 365), strings.Join(parts[2:], \" \"), nil\n\t}\n\n\treturn time.Time{}, \"\", errors.New(\"Invalid string.\")\n}\n\n\/\/ AddReminder adds a reminder.\nfunc (p *ReminderPlugin) AddReminder(reminder *Reminder) error {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\ti := 0\n\tfor _, r := range p.Reminders {\n\t\tif r.Requester == reminder.Requester {\n\t\t\ti++\n\t\t\tif i > 5 {\n\t\t\t\treturn errors.New(\"You have too many reminders already.\")\n\t\t\t}\n\t\t}\n\t}\n\n\ti = 0\n\tfor _, r := range p.Reminders {\n\t\tif r.Time.After(reminder.Time) {\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\n\tp.Reminders = append(p.Reminders, reminder)\n\tcopy(p.Reminders[i+1:], p.Reminders[i:])\n\tp.Reminders[i] = reminder\n\tp.TotalReminders++\n\n\treturn nil\n}\n\nfunc (p *ReminderPlugin) Message(bot *bruxism.Bot, service bruxism.Service, message bruxism.Message) {\n\tdefer bruxism.MessageRecover()\n\n\tif service.IsMe(message) {\n\t\treturn\n\t}\n\n\tif !bruxism.MatchesCommand(service, \"remind\", message) && !bruxism.MatchesCommand(service, \"reminder\", message) {\n\t\treturn\n\t}\n\n\t_, parts := bruxism.ParseCommand(service, message)\n\n\tif len(parts) < 2 {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid reminder, no time or message. eg: %s\", p.randomReminder(service)))\n\t\treturn\n\t}\n\n\tt, r, err := p.parseReminder(parts)\n\n\tnow := time.Now()\n\n\tif err != nil || t.Before(now) || t.After(now.Add(time.Hour*24*365+time.Hour)) {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid time. eg: %s\", strings.Join(randomTimes, \", \")))\n\t\treturn\n\t}\n\n\tif r == \"\" {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid reminder, no message. eg: %s\", p.randomReminder(service)))\n\t\treturn\n\t}\n\n\trequester := message.UserName()\n\tif service.Name() == bruxism.DiscordServiceName {\n\t\trequester = fmt.Sprintf(\"<@%s>\", message.UserID())\n\t}\n\n\terr = p.AddReminder(&Reminder{\n\t\tStartTime: now,\n\t\tTime: t,\n\t\tRequester: requester,\n\t\tTarget: message.Channel(),\n\t\tMessage: r,\n\t\tIsPrivate: service.IsPrivate(message),\n\t})\n\tif err != nil {\n\t\tservice.SendMessage(message.Channel(), err.Error())\n\t\treturn\n\t}\n\n\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Reminder set for %s.\", humanize.Time(t)))\n}\n\n\/\/ SendReminder sends a reminder.\nfunc (p *ReminderPlugin) SendReminder(service bruxism.Service, reminder *Reminder) {\n\tif reminder.IsPrivate {\n\t\tservice.SendMessage(reminder.Target, fmt.Sprintf(\"%s you set a reminder: %s\", humanize.Time(reminder.StartTime), reminder.Message))\n\t} else {\n\t\tservice.SendMessage(reminder.Target, fmt.Sprintf(\"%s %s set a reminder: %s\", humanize.Time(reminder.StartTime), reminder.Requester, reminder.Message))\n\t}\n}\n\n\/\/ Run will block until a reminder needs to be fired and then fire it.\nfunc (p *ReminderPlugin) Run(bot *bruxism.Bot, service bruxism.Service) {\n\tfor {\n\t\tp.RLock()\n\n\t\tif len(p.Reminders) > 0 {\n\t\t\treminder := p.Reminders[0]\n\t\t\tif time.Now().After(reminder.Time) {\n\t\t\t\tp.RUnlock()\n\n\t\t\t\tp.SendReminder(service, reminder)\n\n\t\t\t\tp.Lock()\n\t\t\t\tp.Reminders = p.Reminders[1:]\n\t\t\t\tp.Unlock()\n\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tp.RUnlock()\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n}\n\n\/\/ Load will load plugin state from a byte array.\nfunc (p *ReminderPlugin) Load(bot *bruxism.Bot, service bruxism.Service, data []byte) error {\n\tif data != nil {\n\t\tif err := json.Unmarshal(data, p); err != nil {\n\t\t\tlog.Println(\"Error loading data\", err)\n\t\t}\n\t}\n\tif len(p.Reminders) > p.TotalReminders {\n\t\tp.TotalReminders = len(p.Reminders)\n\t}\n\n\tgo p.Run(bot, service)\n\treturn nil\n}\n\n\/\/ Save will save plugin state to a byte array.\nfunc (p *ReminderPlugin) Save() ([]byte, error) {\n\treturn json.Marshal(p)\n}\n\n\/\/ Stats will return the stats for a plugin.\nfunc (p *ReminderPlugin) Stats(bot *bruxism.Bot, service bruxism.Service, message bruxism.Message) []string {\n\treturn []string{fmt.Sprintf(\"Reminders: \\t%d\\n\", p.TotalReminders)}\n}\n\n\/\/ Name returns the name of the plugin.\nfunc (p *ReminderPlugin) Name() string {\n\treturn \"Reminder\"\n}\n\n\/\/ New will create a new Reminder plugin.\nfunc New() bruxism.Plugin {\n\treturn &ReminderPlugin{\n\t\tReminders: []*Reminder{},\n\t}\n}\n<commit_msg>Prevent Sep from @everyone.<commit_after>package reminderplugin\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/iopred\/bruxism\"\n)\n\n\/\/ A Reminder holds data about a specific reminder.\ntype Reminder struct {\n\tStartTime time.Time\n\tTime time.Time\n\tRequester string\n\tTarget string\n\tMessage string\n\tIsPrivate bool\n}\n\n\/\/ ReminderPlugin is a plugin that reminds users.\ntype ReminderPlugin struct {\n\tsync.RWMutex\n\tbot *bruxism.Bot\n\tReminders []*Reminder\n\tTotalReminders int\n}\n\nvar randomTimes = []string{\n\t\"1 minute\",\n\t\"10 minutes\",\n\t\"1 hour\",\n\t\"4 hours\",\n\t\"tomorrow\",\n\t\"next week\",\n}\n\nvar randomMessages = []string{\n\t\"walk the dog\",\n\t\"take pizza out of the oven\",\n\t\"check my email\",\n\t\"feed the baby\",\n\t\"play some quake\",\n}\n\nfunc (p *ReminderPlugin) random(list []string) string {\n\treturn list[rand.Intn(len(list))]\n}\n\nfunc (p *ReminderPlugin) randomReminder(service bruxism.Service) string {\n\tticks := \"\"\n\tif service.Name() == bruxism.DiscordServiceName {\n\t\tticks = \"`\"\n\t}\n\n\treturn fmt.Sprintf(\"%s%sreminder %s %s%s\", ticks, service.CommandPrefix(), p.random(randomTimes), p.random(randomMessages), ticks)\n}\n\n\/\/ Help returns a list of help strings that are printed when the user requests them.\nfunc (p *ReminderPlugin) Help(bot *bruxism.Bot, service bruxism.Service, message bruxism.Message, detailed bool) []string {\n\thelp := []string{\n\t\tbruxism.CommandHelp(service, \"reminder\", \"<time> <reminder>\", \"Sets a reminder that is sent after the provided time.\")[0],\n\t}\n\tif detailed {\n\t\thelp = append(help, []string{\n\t\t\t\"Examples: \",\n\t\t\tp.randomReminder(service),\n\t\t\tp.randomReminder(service),\n\t\t}...)\n\t}\n\treturn help\n}\n\nfunc (p *ReminderPlugin) parseReminder(parts []string) (time.Time, string, error) {\n\tif parts[0] == \"tomorrow\" {\n\t\treturn time.Now().Add(1 * time.Hour * 24), strings.Join(parts[1:], \" \"), nil\n\t}\n\n\tif parts[0] == \"next\" {\n\t\tswitch parts[1] {\n\t\tcase \"week\":\n\t\t\treturn time.Now().Add(1 * time.Hour * 24 * 7), strings.Join(parts[2:], \" \"), nil\n\t\tcase \"month\":\n\t\t\treturn time.Now().Add(1 * time.Hour * 24 * 7 * 4), strings.Join(parts[2:], \" \"), nil\n\t\tcase \"year\":\n\t\t\treturn time.Now().Add(1 * time.Hour * 24 * 365), strings.Join(parts[2:], \" \"), nil\n\t\tdefault:\n\t\t\treturn time.Time{}, \"\", errors.New(\"Invalid next.\")\n\t\t}\n\t}\n\n\ti, err := strconv.Atoi(parts[0])\n\tif err != nil {\n\t\treturn time.Time{}, \"\", err\n\t}\n\n\tswitch {\n\tcase strings.HasPrefix(parts[1], \"sec\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Second), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"min\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Minute), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"hour\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"day\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour * 24), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"week\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour * 24 * 7), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"month\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour * 24 * 7 * 4), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"year\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour * 24 * 365), strings.Join(parts[2:], \" \"), nil\n\t}\n\n\treturn time.Time{}, \"\", errors.New(\"Invalid string.\")\n}\n\n\/\/ AddReminder adds a reminder.\nfunc (p *ReminderPlugin) AddReminder(reminder *Reminder) error {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\ti := 0\n\tfor _, r := range p.Reminders {\n\t\tif r.Requester == reminder.Requester {\n\t\t\ti++\n\t\t\tif i > 5 {\n\t\t\t\treturn errors.New(\"You have too many reminders already.\")\n\t\t\t}\n\t\t}\n\t}\n\n\ti = 0\n\tfor _, r := range p.Reminders {\n\t\tif r.Time.After(reminder.Time) {\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\n\tp.Reminders = append(p.Reminders, reminder)\n\tcopy(p.Reminders[i+1:], p.Reminders[i:])\n\tp.Reminders[i] = reminder\n\tp.TotalReminders++\n\n\treturn nil\n}\n\nfunc (p *ReminderPlugin) Message(bot *bruxism.Bot, service bruxism.Service, message bruxism.Message) {\n\tdefer bruxism.MessageRecover()\n\n\tif service.IsMe(message) {\n\t\treturn\n\t}\n\n\tif !bruxism.MatchesCommand(service, \"remind\", message) && !bruxism.MatchesCommand(service, \"reminder\", message) {\n\t\treturn\n\t}\n\n\t_, parts := bruxism.ParseCommand(service, message)\n\n\tif len(parts) < 2 {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid reminder, no time or message. eg: %s\", p.randomReminder(service)))\n\t\treturn\n\t}\n\n\tt, r, err := p.parseReminder(parts)\n\n\tnow := time.Now()\n\n\tif err != nil || t.Before(now) || t.After(now.Add(time.Hour*24*365+time.Hour)) {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid time. eg: %s\", strings.Join(randomTimes, \", \")))\n\t\treturn\n\t}\n\n\tif r == \"\" {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid reminder, no message. eg: %s\", p.randomReminder(service)))\n\t\treturn\n\t}\n\n\trequester := message.UserName()\n\tif service.Name() == bruxism.DiscordServiceName {\n\t\trequester = fmt.Sprintf(\"<@%s>\", message.UserID())\n\n\t\tif strings.Index(r, \"<@\") != -1 {\n\t\t\tservice.SendMessage(message.Channel(), \"Invalid reminder, no mentions, sorry.\")\n\t\t\treturn\n\t\t}\n\t}\n\n\terr = p.AddReminder(&Reminder{\n\t\tStartTime: now,\n\t\tTime: t,\n\t\tRequester: requester,\n\t\tTarget: message.Channel(),\n\t\tMessage: r,\n\t\tIsPrivate: service.IsPrivate(message),\n\t})\n\tif err != nil {\n\t\tservice.SendMessage(message.Channel(), err.Error())\n\t\treturn\n\t}\n\n\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Reminder set for %s.\", humanize.Time(t)))\n}\n\n\/\/ SendReminder sends a reminder.\nfunc (p *ReminderPlugin) SendReminder(service bruxism.Service, reminder *Reminder) {\n\tif strings.Index(reminder.Message, \"<@\") != -1 {\n\t\treturn\n\t}\n\n\tif reminder.IsPrivate {\n\t\tservice.SendMessage(reminder.Target, fmt.Sprintf(\"%s you set a reminder: %s\", humanize.Time(reminder.StartTime), reminder.Message))\n\t} else {\n\t\tservice.SendMessage(reminder.Target, fmt.Sprintf(\"%s %s set a reminder: %s\", humanize.Time(reminder.StartTime), reminder.Requester, reminder.Message))\n\t}\n}\n\n\/\/ Run will block until a reminder needs to be fired and then fire it.\nfunc (p *ReminderPlugin) Run(bot *bruxism.Bot, service bruxism.Service) {\n\tfor {\n\t\tp.RLock()\n\n\t\tif len(p.Reminders) > 0 {\n\t\t\treminder := p.Reminders[0]\n\t\t\tif time.Now().After(reminder.Time) {\n\t\t\t\tp.RUnlock()\n\n\t\t\t\tp.SendReminder(service, reminder)\n\n\t\t\t\tp.Lock()\n\t\t\t\tp.Reminders = p.Reminders[1:]\n\t\t\t\tp.Unlock()\n\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tp.RUnlock()\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n}\n\n\/\/ Load will load plugin state from a byte array.\nfunc (p *ReminderPlugin) Load(bot *bruxism.Bot, service bruxism.Service, data []byte) error {\n\tif data != nil {\n\t\tif err := json.Unmarshal(data, p); err != nil {\n\t\t\tlog.Println(\"Error loading data\", err)\n\t\t}\n\t}\n\tif len(p.Reminders) > p.TotalReminders {\n\t\tp.TotalReminders = len(p.Reminders)\n\t}\n\n\tgo p.Run(bot, service)\n\treturn nil\n}\n\n\/\/ Save will save plugin state to a byte array.\nfunc (p *ReminderPlugin) Save() ([]byte, error) {\n\treturn json.Marshal(p)\n}\n\n\/\/ Stats will return the stats for a plugin.\nfunc (p *ReminderPlugin) Stats(bot *bruxism.Bot, service bruxism.Service, message bruxism.Message) []string {\n\treturn []string{fmt.Sprintf(\"Reminders: \\t%d\\n\", p.TotalReminders)}\n}\n\n\/\/ Name returns the name of the plugin.\nfunc (p *ReminderPlugin) Name() string {\n\treturn \"Reminder\"\n}\n\n\/\/ New will create a new Reminder plugin.\nfunc New() bruxism.Plugin {\n\treturn &ReminderPlugin{\n\t\tReminders: []*Reminder{},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package reminderplugin\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/iopred\/bruxism\"\n)\n\n\/\/ A Reminder holds data about a specific reminder.\ntype Reminder struct {\n\tStartTime time.Time\n\tTime time.Time\n\tRequester string\n\tTarget string\n\tMessage string\n\tIsPrivate bool\n}\n\n\/\/ ReminderPlugin is a plugin that reminds users.\ntype ReminderPlugin struct {\n\tsync.RWMutex\n\tbot *bruxism.Bot\n\tReminders []*Reminder\n\tTotalReminders int\n}\n\nvar randomTimes = []string{\n\t\"1 minute\",\n\t\"10 minutes\",\n\t\"1 hour\",\n\t\"4 hours\",\n\t\"tomorrow\",\n\t\"next week\",\n}\n\nvar randomMessages = []string{\n\t\"walk the dog\",\n\t\"take pizza out of the oven\",\n\t\"check my email\",\n\t\"feed the baby\",\n\t\"play some quake\",\n}\n\nfunc (p *ReminderPlugin) random(list []string) string {\n\treturn list[rand.Intn(len(list))]\n}\n\nfunc (p *ReminderPlugin) randomReminder(service bruxism.Service) string {\n\tticks := \"\"\n\tif service.Name() == bruxism.DiscordServiceName {\n\t\tticks = \"`\"\n\t}\n\n\treturn fmt.Sprintf(\"%s%sreminder %s %s%s\", ticks, service.CommandPrefix(), p.random(randomTimes), p.random(randomMessages), ticks)\n}\n\n\/\/ Help returns a list of help strings that are printed when the user requests them.\nfunc (p *ReminderPlugin) Help(bot *bruxism.Bot, service bruxism.Service, message bruxism.Message, detailed bool) []string {\n\thelp := []string{\n\t\tbruxism.CommandHelp(service, \"reminder\", \"<time> <reminder>\", \"Sets a reminder that is sent after the provided time.\")[0],\n\t\tbruxism.CommandHelp(service, \"reminderlist\", \"\", \"List all active reminders.\")[0],\n\t\tbruxism.CommandHelp(service, \"reminderdelete\", \"<index>\", \"Deletes a reminder by index. eg: reminderdelete 0\")[0],\n\t}\n\tif detailed {\n\t\thelp = append(help, []string{\n\t\t\t\"Examples: \",\n\t\t\tp.randomReminder(service),\n\t\t\tp.randomReminder(service),\n\t\t}...)\n\t}\n\treturn help\n}\n\nfunc (p *ReminderPlugin) parseReminder(parts []string) (time.Time, string, error) {\n\tif parts[0] == \"tomorrow\" {\n\t\treturn time.Now().Add(1 * time.Hour * 24), strings.Join(parts[1:], \" \"), nil\n\t}\n\n\tif parts[0] == \"next\" {\n\t\tswitch parts[1] {\n\t\tcase \"week\":\n\t\t\treturn time.Now().Add(1 * time.Hour * 24 * 7), strings.Join(parts[2:], \" \"), nil\n\t\tcase \"month\":\n\t\t\treturn time.Now().Add(1 * time.Hour * 24 * 7 * 4), strings.Join(parts[2:], \" \"), nil\n\t\tcase \"year\":\n\t\t\treturn time.Now().Add(1 * time.Hour * 24 * 365), strings.Join(parts[2:], \" \"), nil\n\t\tdefault:\n\t\t\treturn time.Time{}, \"\", errors.New(\"Invalid next.\")\n\t\t}\n\t}\n\n\ti, err := strconv.Atoi(parts[0])\n\tif err != nil {\n\t\treturn time.Time{}, \"\", err\n\t}\n\n\tswitch {\n\tcase strings.HasPrefix(parts[1], \"sec\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Second), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"min\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Minute), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"hour\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"day\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour * 24), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"week\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour * 24 * 7), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"month\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour * 24 * 7 * 4), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"year\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour * 24 * 365), strings.Join(parts[2:], \" \"), nil\n\t}\n\n\treturn time.Time{}, \"\", errors.New(\"Invalid string.\")\n}\n\n\/\/ AddReminder adds a reminder.\nfunc (p *ReminderPlugin) AddReminder(reminder *Reminder) error {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\ti := 0\n\tfor _, r := range p.Reminders {\n\t\tif r.Requester == reminder.Requester {\n\t\t\ti++\n\t\t\tif i > 5 {\n\t\t\t\treturn errors.New(\"You have too many reminders already.\")\n\t\t\t}\n\t\t}\n\t}\n\n\ti = 0\n\tfor _, r := range p.Reminders {\n\t\tif r.Time.After(reminder.Time) {\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\n\tp.Reminders = append(p.Reminders, reminder)\n\tcopy(p.Reminders[i+1:], p.Reminders[i:])\n\tp.Reminders[i] = reminder\n\tp.TotalReminders++\n\n\treturn nil\n}\n\nfunc (p *ReminderPlugin) Message(bot *bruxism.Bot, service bruxism.Service, message bruxism.Message) {\n\tdefer bruxism.MessageRecover()\n\n\tif service.IsMe(message) {\n\t\treturn\n\t}\n\n\trequester := message.UserName()\n\tif service.Name() == bruxism.DiscordServiceName {\n\t\trequester = fmt.Sprintf(\"<@%s>\", message.UserID())\n\t}\n\n\tif bruxism.MatchesCommand(service, \"remindlist\", message) || bruxism.MatchesCommand(service, \"reminderlist\", message) {\n\t\treminders := []string{}\n\t\ti := 0\n\t\tfor _, r := range p.Reminders {\n\t\t\tif r.Requester == requester {\n\t\t\t\treminders = append(reminders, fmt.Sprintf(\"%d - %s: %s\", i, humanize.Time(r.Time), r.Message))\n\t\t\t\ti++\n\t\t\t}\n\t\t}\n\t\tif len(reminders) > 0 {\n\t\t\tif service.SupportsMultiline() {\n\t\t\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Your reminders:\\n%s\", strings.Join(reminders, \"\\n\")))\n\t\t\t} else {\n\t\t\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Your reminders: %s\", strings.Join(reminders, \". \")))\n\t\t\t}\n\t\t} else {\n\t\t\tservice.SendMessage(message.Channel(), \"You have no reminders.\")\n\t\t}\n\t\treturn\n\t}\n\n\tif bruxism.MatchesCommand(service, \"reminddelete\", message) || bruxism.MatchesCommand(service, \"reminderdelete\", message) {\n\t\tindex := 0\n\t\tindexString, parts := bruxism.ParseCommand(service, message)\n\t\tvar err error\n\t\tif len(parts) > 0 {\n\t\t\tindex, err = strconv.Atoi(indexString)\n\t\t\tif err != nil {\n\t\t\t\tservice.SendMessage(message.Channel(), \"Invalid reminder.\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tj := 0\n\t\tfor i, r := range p.Reminders {\n\t\t\tif r.Requester == requester {\n\t\t\t\tif j == index {\n\t\t\t\t\tp.Lock()\n\t\t\t\t\tp.Reminders = append(p.Reminders[:j], p.Reminders[i+j:]...)\n\t\t\t\t\tp.Unlock()\n\t\t\t\t\tservice.SendMessage(message.Channel(), \"Reminder deleted.\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tj++\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\tif !bruxism.MatchesCommand(service, \"remind\", message) && !bruxism.MatchesCommand(service, \"reminder\", message) {\n\t\treturn\n\t}\n\n\t_, parts := bruxism.ParseCommand(service, message)\n\n\tif len(parts) < 2 {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid reminder, no time or message. eg: %s\", p.randomReminder(service)))\n\t\treturn\n\t}\n\n\tt, r, err := p.parseReminder(parts)\n\n\tnow := time.Now()\n\n\tif err != nil || t.Before(now) || t.After(now.Add(time.Hour*24*365+time.Hour)) {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid time. eg: %s\", strings.Join(randomTimes, \", \")))\n\t\treturn\n\t}\n\n\tif r == \"\" {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid reminder, no message. eg: %s\", p.randomReminder(service)))\n\t\treturn\n\t}\n\n\tif service.Name() == bruxism.DiscordServiceName {\n\t\tif strings.Index(r, \"<@\") != -1 || strings.Index(strings.ToLower(r), \"@everyone\") != -1 || strings.Index(strings.ToLower(r), \"@here\") != -1 {\n\t\t\tservice.SendMessage(message.Channel(), \"Invalid reminder, no mentions, sorry.\")\n\t\t\treturn\n\t\t}\n\t}\n\n\terr = p.AddReminder(&Reminder{\n\t\tStartTime: now,\n\t\tTime: t,\n\t\tRequester: requester,\n\t\tTarget: message.Channel(),\n\t\tMessage: r,\n\t\tIsPrivate: service.IsPrivate(message),\n\t})\n\tif err != nil {\n\t\tservice.SendMessage(message.Channel(), err.Error())\n\t\treturn\n\t}\n\n\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Reminder set for %s.\", humanize.Time(t)))\n}\n\n\/\/ SendReminder sends a reminder.\nfunc (p *ReminderPlugin) SendReminder(service bruxism.Service, reminder *Reminder) {\n\tif strings.Index(reminder.Message, \"<@\") != -1 || strings.Index(strings.ToLower(reminder.Message), \"@everyone\") != -1 || strings.Index(strings.ToLower(reminder.Message), \"@here\") != -1 {\n\t\treturn\n\t}\n\n\tif reminder.IsPrivate {\n\t\tservice.SendMessage(reminder.Target, fmt.Sprintf(\"%s you set a reminder: %s\", humanize.Time(reminder.StartTime), reminder.Message))\n\t} else {\n\t\tservice.SendMessage(reminder.Target, fmt.Sprintf(\"%s %s set a reminder: %s\", humanize.Time(reminder.StartTime), reminder.Requester, reminder.Message))\n\t}\n}\n\n\/\/ Run will block until a reminder needs to be fired and then fire it.\nfunc (p *ReminderPlugin) Run(bot *bruxism.Bot, service bruxism.Service) {\n\tfor {\n\t\tp.RLock()\n\n\t\tif len(p.Reminders) > 0 {\n\t\t\treminder := p.Reminders[0]\n\t\t\tif time.Now().After(reminder.Time) {\n\t\t\t\tp.RUnlock()\n\n\t\t\t\tp.SendReminder(service, reminder)\n\n\t\t\t\tp.Lock()\n\t\t\t\tp.Reminders = p.Reminders[1:]\n\t\t\t\tp.Unlock()\n\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tp.RUnlock()\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n}\n\n\/\/ Load will load plugin state from a byte array.\nfunc (p *ReminderPlugin) Load(bot *bruxism.Bot, service bruxism.Service, data []byte) error {\n\tif data != nil {\n\t\tif err := json.Unmarshal(data, p); err != nil {\n\t\t\tlog.Println(\"Error loading data\", err)\n\t\t}\n\t}\n\tif len(p.Reminders) > p.TotalReminders {\n\t\tp.TotalReminders = len(p.Reminders)\n\t}\n\n\tgo p.Run(bot, service)\n\treturn nil\n}\n\n\/\/ Save will save plugin state to a byte array.\nfunc (p *ReminderPlugin) Save() ([]byte, error) {\n\treturn json.Marshal(p)\n}\n\n\/\/ Stats will return the stats for a plugin.\nfunc (p *ReminderPlugin) Stats(bot *bruxism.Bot, service bruxism.Service, message bruxism.Message) []string {\n\treturn []string{fmt.Sprintf(\"Reminders: \\t%d\\n\", p.TotalReminders)}\n}\n\n\/\/ Name returns the name of the plugin.\nfunc (p *ReminderPlugin) Name() string {\n\treturn \"Reminder\"\n}\n\n\/\/ New will create a new Reminder plugin.\nfunc New() bruxism.Plugin {\n\treturn &ReminderPlugin{\n\t\tReminders: []*Reminder{},\n\t}\n}\n<commit_msg>I am really bad at development.<commit_after>package reminderplugin\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/iopred\/bruxism\"\n)\n\n\/\/ A Reminder holds data about a specific reminder.\ntype Reminder struct {\n\tStartTime time.Time\n\tTime time.Time\n\tRequester string\n\tTarget string\n\tMessage string\n\tIsPrivate bool\n}\n\n\/\/ ReminderPlugin is a plugin that reminds users.\ntype ReminderPlugin struct {\n\tsync.RWMutex\n\tbot *bruxism.Bot\n\tReminders []*Reminder\n\tTotalReminders int\n}\n\nvar randomTimes = []string{\n\t\"1 minute\",\n\t\"10 minutes\",\n\t\"1 hour\",\n\t\"4 hours\",\n\t\"tomorrow\",\n\t\"next week\",\n}\n\nvar randomMessages = []string{\n\t\"walk the dog\",\n\t\"take pizza out of the oven\",\n\t\"check my email\",\n\t\"feed the baby\",\n\t\"play some quake\",\n}\n\nfunc (p *ReminderPlugin) random(list []string) string {\n\treturn list[rand.Intn(len(list))]\n}\n\nfunc (p *ReminderPlugin) randomReminder(service bruxism.Service) string {\n\tticks := \"\"\n\tif service.Name() == bruxism.DiscordServiceName {\n\t\tticks = \"`\"\n\t}\n\n\treturn fmt.Sprintf(\"%s%sreminder %s %s%s\", ticks, service.CommandPrefix(), p.random(randomTimes), p.random(randomMessages), ticks)\n}\n\n\/\/ Help returns a list of help strings that are printed when the user requests them.\nfunc (p *ReminderPlugin) Help(bot *bruxism.Bot, service bruxism.Service, message bruxism.Message, detailed bool) []string {\n\thelp := []string{\n\t\tbruxism.CommandHelp(service, \"reminder\", \"<time> <reminder>\", \"Sets a reminder that is sent after the provided time.\")[0],\n\t\tbruxism.CommandHelp(service, \"reminderlist\", \"\", \"List all active reminders.\")[0],\n\t\tbruxism.CommandHelp(service, \"reminderdelete\", \"<index>\", \"Deletes a reminder by index. eg: reminderdelete 0\")[0],\n\t}\n\tif detailed {\n\t\thelp = append(help, []string{\n\t\t\t\"Examples: \",\n\t\t\tp.randomReminder(service),\n\t\t\tp.randomReminder(service),\n\t\t}...)\n\t}\n\treturn help\n}\n\nfunc (p *ReminderPlugin) parseReminder(parts []string) (time.Time, string, error) {\n\tif parts[0] == \"tomorrow\" {\n\t\treturn time.Now().Add(1 * time.Hour * 24), strings.Join(parts[1:], \" \"), nil\n\t}\n\n\tif parts[0] == \"next\" {\n\t\tswitch parts[1] {\n\t\tcase \"week\":\n\t\t\treturn time.Now().Add(1 * time.Hour * 24 * 7), strings.Join(parts[2:], \" \"), nil\n\t\tcase \"month\":\n\t\t\treturn time.Now().Add(1 * time.Hour * 24 * 7 * 4), strings.Join(parts[2:], \" \"), nil\n\t\tcase \"year\":\n\t\t\treturn time.Now().Add(1 * time.Hour * 24 * 365), strings.Join(parts[2:], \" \"), nil\n\t\tdefault:\n\t\t\treturn time.Time{}, \"\", errors.New(\"Invalid next.\")\n\t\t}\n\t}\n\n\ti, err := strconv.Atoi(parts[0])\n\tif err != nil {\n\t\treturn time.Time{}, \"\", err\n\t}\n\n\tswitch {\n\tcase strings.HasPrefix(parts[1], \"sec\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Second), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"min\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Minute), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"hour\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"day\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour * 24), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"week\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour * 24 * 7), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"month\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour * 24 * 7 * 4), strings.Join(parts[2:], \" \"), nil\n\tcase strings.HasPrefix(parts[1], \"year\"):\n\t\treturn time.Now().Add(time.Duration(i) * time.Hour * 24 * 365), strings.Join(parts[2:], \" \"), nil\n\t}\n\n\treturn time.Time{}, \"\", errors.New(\"Invalid string.\")\n}\n\n\/\/ AddReminder adds a reminder.\nfunc (p *ReminderPlugin) AddReminder(reminder *Reminder) error {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\ti := 0\n\tfor _, r := range p.Reminders {\n\t\tif r.Requester == reminder.Requester {\n\t\t\ti++\n\t\t\tif i > 5 {\n\t\t\t\treturn errors.New(\"You have too many reminders already.\")\n\t\t\t}\n\t\t}\n\t}\n\n\ti = 0\n\tfor _, r := range p.Reminders {\n\t\tif r.Time.After(reminder.Time) {\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\n\tp.Reminders = append(p.Reminders, reminder)\n\tcopy(p.Reminders[i+1:], p.Reminders[i:])\n\tp.Reminders[i] = reminder\n\tp.TotalReminders++\n\n\treturn nil\n}\n\nfunc (p *ReminderPlugin) Message(bot *bruxism.Bot, service bruxism.Service, message bruxism.Message) {\n\tdefer bruxism.MessageRecover()\n\n\tif service.IsMe(message) {\n\t\treturn\n\t}\n\n\trequester := message.UserName()\n\tif service.Name() == bruxism.DiscordServiceName {\n\t\trequester = fmt.Sprintf(\"<@%s>\", message.UserID())\n\t}\n\n\tif bruxism.MatchesCommand(service, \"remindlist\", message) || bruxism.MatchesCommand(service, \"reminderlist\", message) {\n\t\treminders := []string{}\n\t\ti := 0\n\t\tfor _, r := range p.Reminders {\n\t\t\tif r.Requester == requester {\n\t\t\t\treminders = append(reminders, fmt.Sprintf(\"%d - %s: %s\", i, humanize.Time(r.Time), r.Message))\n\t\t\t\ti++\n\t\t\t}\n\t\t}\n\t\tif len(reminders) > 0 {\n\t\t\tif service.SupportsMultiline() {\n\t\t\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Your reminders:\\n%s\", strings.Join(reminders, \"\\n\")))\n\t\t\t} else {\n\t\t\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Your reminders: %s\", strings.Join(reminders, \". \")))\n\t\t\t}\n\t\t} else {\n\t\t\tservice.SendMessage(message.Channel(), \"You have no reminders.\")\n\t\t}\n\t\treturn\n\t}\n\n\tif bruxism.MatchesCommand(service, \"reminddelete\", message) || bruxism.MatchesCommand(service, \"reminderdelete\", message) {\n\t\tindex := 0\n\t\tindexString, parts := bruxism.ParseCommand(service, message)\n\t\tvar err error\n\t\tif len(parts) > 0 {\n\t\t\tindex, err = strconv.Atoi(indexString)\n\t\t\tif err != nil {\n\t\t\t\tservice.SendMessage(message.Channel(), \"Invalid reminder.\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tj := 0\n\t\tfor i, r := range p.Reminders {\n\t\t\tif r.Requester == requester {\n\t\t\t\tif j == index {\n\t\t\t\t\tp.Lock()\n\t\t\t\t\tp.Reminders = append(p.Reminders[:i], p.Reminders[i+1:]...)\n\t\t\t\t\tp.Unlock()\n\t\t\t\t\tservice.SendMessage(message.Channel(), \"Reminder deleted.\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tj++\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\tif !bruxism.MatchesCommand(service, \"remind\", message) && !bruxism.MatchesCommand(service, \"reminder\", message) {\n\t\treturn\n\t}\n\n\t_, parts := bruxism.ParseCommand(service, message)\n\n\tif len(parts) < 2 {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid reminder, no time or message. eg: %s\", p.randomReminder(service)))\n\t\treturn\n\t}\n\n\tt, r, err := p.parseReminder(parts)\n\n\tnow := time.Now()\n\n\tif err != nil || t.Before(now) || t.After(now.Add(time.Hour*24*365+time.Hour)) {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid time. eg: %s\", strings.Join(randomTimes, \", \")))\n\t\treturn\n\t}\n\n\tif r == \"\" {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid reminder, no message. eg: %s\", p.randomReminder(service)))\n\t\treturn\n\t}\n\n\tif service.Name() == bruxism.DiscordServiceName {\n\t\tif strings.Index(r, \"<@\") != -1 || strings.Index(strings.ToLower(r), \"@everyone\") != -1 || strings.Index(strings.ToLower(r), \"@here\") != -1 {\n\t\t\tservice.SendMessage(message.Channel(), \"Invalid reminder, no mentions, sorry.\")\n\t\t\treturn\n\t\t}\n\t}\n\n\terr = p.AddReminder(&Reminder{\n\t\tStartTime: now,\n\t\tTime: t,\n\t\tRequester: requester,\n\t\tTarget: message.Channel(),\n\t\tMessage: r,\n\t\tIsPrivate: service.IsPrivate(message),\n\t})\n\tif err != nil {\n\t\tservice.SendMessage(message.Channel(), err.Error())\n\t\treturn\n\t}\n\n\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Reminder set for %s.\", humanize.Time(t)))\n}\n\n\/\/ SendReminder sends a reminder.\nfunc (p *ReminderPlugin) SendReminder(service bruxism.Service, reminder *Reminder) {\n\tif strings.Index(reminder.Message, \"<@\") != -1 || strings.Index(strings.ToLower(reminder.Message), \"@everyone\") != -1 || strings.Index(strings.ToLower(reminder.Message), \"@here\") != -1 {\n\t\treturn\n\t}\n\n\tif reminder.IsPrivate {\n\t\tservice.SendMessage(reminder.Target, fmt.Sprintf(\"%s you set a reminder: %s\", humanize.Time(reminder.StartTime), reminder.Message))\n\t} else {\n\t\tservice.SendMessage(reminder.Target, fmt.Sprintf(\"%s %s set a reminder: %s\", humanize.Time(reminder.StartTime), reminder.Requester, reminder.Message))\n\t}\n}\n\n\/\/ Run will block until a reminder needs to be fired and then fire it.\nfunc (p *ReminderPlugin) Run(bot *bruxism.Bot, service bruxism.Service) {\n\tfor {\n\t\tp.RLock()\n\n\t\tif len(p.Reminders) > 0 {\n\t\t\treminder := p.Reminders[0]\n\t\t\tif time.Now().After(reminder.Time) {\n\t\t\t\tp.RUnlock()\n\n\t\t\t\tp.SendReminder(service, reminder)\n\n\t\t\t\tp.Lock()\n\t\t\t\tp.Reminders = p.Reminders[1:]\n\t\t\t\tp.Unlock()\n\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tp.RUnlock()\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n}\n\n\/\/ Load will load plugin state from a byte array.\nfunc (p *ReminderPlugin) Load(bot *bruxism.Bot, service bruxism.Service, data []byte) error {\n\tif data != nil {\n\t\tif err := json.Unmarshal(data, p); err != nil {\n\t\t\tlog.Println(\"Error loading data\", err)\n\t\t}\n\t}\n\tif len(p.Reminders) > p.TotalReminders {\n\t\tp.TotalReminders = len(p.Reminders)\n\t}\n\n\tgo p.Run(bot, service)\n\treturn nil\n}\n\n\/\/ Save will save plugin state to a byte array.\nfunc (p *ReminderPlugin) Save() ([]byte, error) {\n\treturn json.Marshal(p)\n}\n\n\/\/ Stats will return the stats for a plugin.\nfunc (p *ReminderPlugin) Stats(bot *bruxism.Bot, service bruxism.Service, message bruxism.Message) []string {\n\treturn []string{fmt.Sprintf(\"Reminders: \\t%d\\n\", p.TotalReminders)}\n}\n\n\/\/ Name returns the name of the plugin.\nfunc (p *ReminderPlugin) Name() string {\n\treturn \"Reminder\"\n}\n\n\/\/ New will create a new Reminder plugin.\nfunc New() bruxism.Plugin {\n\treturn &ReminderPlugin{\n\t\tReminders: []*Reminder{},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package samples_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"path\"\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/cloud-foundation-toolkit\/infra\/blueprint-test\/pkg\/tft\"\n\t\"github.com\/GoogleCloudPlatform\/cloud-foundation-toolkit\/infra\/blueprint-test\/pkg\/utils\"\n)\n\nconst (\n\tsetupPath = \"..\/setup\"\n\tsampleDir = \"..\/..\/\"\n)\n\n\/\/ testGroups represents a collection of samples matched to a projectID.\ntype testGroups struct {\n\tprojectID string\n\tgroup int\n\tsamples []string\n}\n\n\/\/ Retry if these errors are encountered.\nvar retryErrors = map[string]string{\n\t\/\/ IAM for Eventarc service agent is eventually consistent\n\t\".*Permission denied while using the Eventarc Service Agent.*\": \"Eventarc Service Agent IAM is eventually consistent\",\n}\n\nfunc TestSamples(t *testing.T) {\n\t\/\/ common test env\n\ttestEnv := map[string]string{\n\t\t\"GOOGLE_REGION\": \"us-central1\",\n\t\t\"GOOGLE_ZONE\": \"us-central1-a\",\n\t}\n\tfor k, v := range testEnv {\n\t\tutils.SetEnv(t, k, v)\n\t}\n\n\t\/\/ This initial blueprint test is to extract output info\n\t\/\/ so we only have to set the env vars once.\n\tsetup := tft.NewTFBlueprintTest(t,\n\t\ttft.WithTFDir(sampleDir),\n\t\ttft.WithSetupPath(setupPath),\n\t)\n\ttestProjectIDs := setup.GetTFSetupOutputListVal(\"project_ids\")\n\t\/\/ number of groups is determined by length of testProjectIDs slice\n\ttestGroups := discoverTestCaseGroups(t, testProjectIDs)\n\n\tfor _, tg := range testGroups {\n\t\tfor _, sample := range tg.samples {\n\t\t\ttestName := fmt.Sprintf(\"%d\/%s\", tg.group, sample)\n\t\t\tt.Run(testName, func(t *testing.T) {\n\t\t\t\tutils.SetEnv(t, \"GOOGLE_PROJECT\", tg.projectID)\n\t\t\t\tsampleTest := tft.NewTFBlueprintTest(t,\n\t\t\t\t\ttft.WithTFDir(path.Join(sampleDir, sample)),\n\t\t\t\t\ttft.WithSetupPath(setupPath),\n\t\t\t\t\ttft.WithRetryableTerraformErrors(retryErrors, 10, time.Minute),\n\t\t\t\t)\n\t\t\t\tsampleTest.Test()\n\t\t\t})\n\t\t}\n\t}\n}\n\n\/\/ skipDiscoverDirs are directories that are skipped when discovering test cases.\nvar skipDiscoverDirs = map[string]bool{\n\t\"test\": true,\n\t\"build\": true,\n\t\".git\": true,\n}\n\n\/\/ discoverTestCaseGroups discovers individual sample directories in the parent directory\n\/\/ and assigns them to a test group based on projects provided.\n\/\/ It also skips known directories that are not samples.\nfunc discoverTestCaseGroups(t *testing.T, projects []string) []*testGroups {\n\tt.Helper()\n\tsamples := []string{}\n\tdirs, err := ioutil.ReadDir(sampleDir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, f := range dirs {\n\t\tif !f.IsDir() || skipDiscoverDirs[f.Name()] {\n\t\t\tcontinue\n\t\t}\n\t\tsamples = append(samples, f.Name())\n\t}\n\tsort.Strings(samples)\n\n\t\/\/ One test group is associated to one project.\n\tgroups := []*testGroups{}\n\tfor i, project := range projects {\n\t\tgroups = append(groups, &testGroups{projectID: project, samples: []string{}, group: i})\n\t}\n\t\/\/ Rather than chunking we assign them in a round robin fashion as some samples like sql takes more time.\n\t\/\/ Chunking would result in all sql* assigned to a single project.\n\t\/\/ We sort the sample slice beforehand so assignments should be stable for a given run.\n\tfor i, sample := range samples {\n\t\tgroupIndex := i % len(projects)\n\t\tgroups[groupIndex].samples = append(groups[groupIndex].samples, sample)\n\t}\n\treturn groups\n}\n<commit_msg>chore: retry service activation, IAM (#181)<commit_after>package samples_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"path\"\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/cloud-foundation-toolkit\/infra\/blueprint-test\/pkg\/tft\"\n\t\"github.com\/GoogleCloudPlatform\/cloud-foundation-toolkit\/infra\/blueprint-test\/pkg\/utils\"\n)\n\nconst (\n\tsetupPath = \"..\/setup\"\n\tsampleDir = \"..\/..\/\"\n)\n\n\/\/ testGroups represents a collection of samples matched to a projectID.\ntype testGroups struct {\n\tprojectID string\n\tgroup int\n\tsamples []string\n}\n\n\/\/ Retry if these errors are encountered.\nvar retryErrors = map[string]string{\n\t\/\/ IAM for Eventarc service agent is eventually consistent\n\t\".*Permission denied while using the Eventarc Service Agent.*\": \"Eventarc Service Agent IAM is eventually consistent\",\n\t\/\/ API activation is eventually consistent.\n\t\".*SERVICE_DISABLED.*\": \"Service enablement is eventually consistent\",\n\t\/\/ Retry function GCS access\n \".*does not have storage.objects.get access.*\": \"GCS IAM is eventually consistent\",\n}\n\nfunc TestSamples(t *testing.T) {\n\t\/\/ common test env\n\ttestEnv := map[string]string{\n\t\t\"GOOGLE_REGION\": \"us-central1\",\n\t\t\"GOOGLE_ZONE\": \"us-central1-a\",\n\t}\n\tfor k, v := range testEnv {\n\t\tutils.SetEnv(t, k, v)\n\t}\n\n\t\/\/ This initial blueprint test is to extract output info\n\t\/\/ so we only have to set the env vars once.\n\tsetup := tft.NewTFBlueprintTest(t,\n\t\ttft.WithTFDir(sampleDir),\n\t\ttft.WithSetupPath(setupPath),\n\t)\n\ttestProjectIDs := setup.GetTFSetupOutputListVal(\"project_ids\")\n\t\/\/ number of groups is determined by length of testProjectIDs slice\n\ttestGroups := discoverTestCaseGroups(t, testProjectIDs)\n\n\tfor _, tg := range testGroups {\n\t\tfor _, sample := range tg.samples {\n\t\t\ttestName := fmt.Sprintf(\"%d\/%s\", tg.group, sample)\n\t\t\tt.Run(testName, func(t *testing.T) {\n\t\t\t\tutils.SetEnv(t, \"GOOGLE_PROJECT\", tg.projectID)\n\t\t\t\tsampleTest := tft.NewTFBlueprintTest(t,\n\t\t\t\t\ttft.WithTFDir(path.Join(sampleDir, sample)),\n\t\t\t\t\ttft.WithSetupPath(setupPath),\n\t\t\t\t\ttft.WithRetryableTerraformErrors(retryErrors, 10, time.Minute),\n\t\t\t\t)\n\t\t\t\tsampleTest.Test()\n\t\t\t})\n\t\t}\n\t}\n}\n\n\/\/ skipDiscoverDirs are directories that are skipped when discovering test cases.\nvar skipDiscoverDirs = map[string]bool{\n\t\"test\": true,\n\t\"build\": true,\n\t\".git\": true,\n}\n\n\/\/ discoverTestCaseGroups discovers individual sample directories in the parent directory\n\/\/ and assigns them to a test group based on projects provided.\n\/\/ It also skips known directories that are not samples.\nfunc discoverTestCaseGroups(t *testing.T, projects []string) []*testGroups {\n\tt.Helper()\n\tsamples := []string{}\n\tdirs, err := ioutil.ReadDir(sampleDir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, f := range dirs {\n\t\tif !f.IsDir() || skipDiscoverDirs[f.Name()] {\n\t\t\tcontinue\n\t\t}\n\t\tsamples = append(samples, f.Name())\n\t}\n\tsort.Strings(samples)\n\n\t\/\/ One test group is associated to one project.\n\tgroups := []*testGroups{}\n\tfor i, project := range projects {\n\t\tgroups = append(groups, &testGroups{projectID: project, samples: []string{}, group: i})\n\t}\n\t\/\/ Rather than chunking we assign them in a round robin fashion as some samples like sql takes more time.\n\t\/\/ Chunking would result in all sql* assigned to a single project.\n\t\/\/ We sort the sample slice beforehand so assignments should be stable for a given run.\n\tfor i, sample := range samples {\n\t\tgroupIndex := i % len(projects)\n\t\tgroups[groupIndex].samples = append(groups[groupIndex].samples, sample)\n\t}\n\treturn groups\n}\n<|endoftext|>"} {"text":"<commit_before>package consul\n\nimport (\n\t\"errors\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/consul\/acl\"\n\t\"github.com\/hashicorp\/consul\/consul\/structs\"\n)\n\nconst (\n\t\/\/ aclNotFound indicates there is no matching ACL\n\taclNotFound = \"ACL not found\"\n\n\t\/\/ anonymousToken is the token ID we re-write to if there\n\t\/\/ is no token ID provided\n\tanonymousToken = \"anonymous\"\n)\n\n\/\/ aclCacheEntry is used to cache non-authoritative ACL's\n\/\/ If non-authoritative, then we must respect a TTL\ntype aclCacheEntry struct {\n\tACL acl.ACL\n\tExpires time.Time\n\tETag string\n}\n\n\/\/ aclFault is used to fault in the rules for an ACL if we take a miss\nfunc (s *Server) aclFault(id string) (string, error) {\n\tstate := s.fsm.State()\n\t_, acl, err := state.ACLGet(id)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif acl == nil {\n\t\treturn \"\", errors.New(aclNotFound)\n\t}\n\treturn acl.Rules, nil\n}\n\n\/\/ resolveToken is used to resolve an ACL is any is appropriate\nfunc (s *Server) resolveToken(id string) (acl.ACL, error) {\n\t\/\/ Check if there is no ACL datacenter (ACL's disabled)\n\tauthDC := s.config.ACLDatacenter\n\tif len(authDC) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Handle the anonymous token\n\tif len(id) == 0 {\n\t\tid = anonymousToken\n\t}\n\n\t\/\/ Check if we are the ACL datacenter and the leader, use the\n\t\/\/ authoritative cache\n\tif s.config.Datacenter == authDC && s.IsLeader() {\n\t\treturn s.aclAuthCache.GetACL(id)\n\t}\n\n\t\/\/ Use our non-authoritative cache\n\treturn s.lookupACL(id, authDC)\n}\n\n\/\/ lookupACL is used when we are non-authoritative, and need\n\/\/ to resolve an ACL\nfunc (s *Server) lookupACL(id, authDC string) (acl.ACL, error) {\n\t\/\/ Check the cache for the ACL\n\tvar cached *aclCacheEntry\n\traw, ok := s.aclCache.Get(id)\n\tif ok {\n\t\tcached = raw.(*aclCacheEntry)\n\t}\n\n\t\/\/ Check for live cache\n\tif cached != nil && time.Now().Before(cached.Expires) {\n\t\treturn cached.ACL, nil\n\t}\n\n\t\/\/ Attempt to refresh the policy\n\targs := structs.ACLPolicyRequest{\n\t\tDatacenter: authDC,\n\t\tACL: id,\n\t}\n\tvar out structs.ACLPolicy\n\terr := s.RPC(\"ACL.GetPolicy\", &args, &out)\n\n\t\/\/ Handle the happy path\n\tif err == nil {\n\t\treturn s.useACLPolicy(id, cached, &out)\n\t}\n\n\t\/\/ Check for not-found\n\tif strings.Contains(err.Error(), aclNotFound) {\n\t\treturn nil, errors.New(aclNotFound)\n\t} else {\n\t\ts.logger.Printf(\"[ERR] consul.acl: Failed to get policy for '%s': %v\", id, err)\n\t}\n\n\t\/\/ Unable to refresh, apply the down policy\n\tswitch s.config.ACLDownPolicy {\n\tcase \"allow\":\n\t\treturn acl.AllowAll(), nil\n\tcase \"extend-cache\":\n\t\tif cached != nil {\n\t\t\treturn cached.ACL, nil\n\t\t}\n\t\tfallthrough\n\tdefault:\n\t\treturn acl.DenyAll(), nil\n\t}\n}\n\n\/\/ useACLPolicy handles an ACLPolicy response\nfunc (s *Server) useACLPolicy(id string, cached *aclCacheEntry, p *structs.ACLPolicy) (acl.ACL, error) {\n\t\/\/ Check if we can used the cached policy\n\tif cached != nil && cached.ETag == p.ETag {\n\t\tif p.TTL > 0 {\n\t\t\tcached.Expires = time.Now().Add(p.TTL)\n\t\t}\n\t\treturn cached.ACL, nil\n\t}\n\n\t\/\/ Check for a cached compiled policy\n\tvar compiled acl.ACL\n\traw, ok := s.aclPolicyCache.Get(p.ETag)\n\tif ok {\n\t\tcompiled = raw.(acl.ACL)\n\t} else {\n\t\t\/\/ Determine the root policy\n\t\tvar root acl.ACL\n\t\tswitch p.Root {\n\t\tcase \"allow\":\n\t\t\troot = acl.AllowAll()\n\t\tdefault:\n\t\t\troot = acl.DenyAll()\n\t\t}\n\n\t\t\/\/ Compile the ACL\n\t\tacl, err := acl.New(root, p.Policy)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Cache the policy\n\t\ts.aclPolicyCache.Add(p.ETag, acl)\n\t\tcompiled = acl\n\t}\n\n\t\/\/ Cache the ACL\n\tcached = &aclCacheEntry{\n\t\tACL: compiled,\n\t\tETag: p.ETag,\n\t}\n\tif p.TTL > 0 {\n\t\tcached.Expires = time.Now().Add(p.TTL)\n\t}\n\ts.aclCache.Add(id, cached)\n\treturn compiled, nil\n}\n<commit_msg>consul: Adding some metrics for ACL usage<commit_after>package consul\n\nimport (\n\t\"errors\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/armon\/go-metrics\"\n\t\"github.com\/hashicorp\/consul\/acl\"\n\t\"github.com\/hashicorp\/consul\/consul\/structs\"\n)\n\nconst (\n\t\/\/ aclNotFound indicates there is no matching ACL\n\taclNotFound = \"ACL not found\"\n\n\t\/\/ anonymousToken is the token ID we re-write to if there\n\t\/\/ is no token ID provided\n\tanonymousToken = \"anonymous\"\n)\n\n\/\/ aclCacheEntry is used to cache non-authoritative ACL's\n\/\/ If non-authoritative, then we must respect a TTL\ntype aclCacheEntry struct {\n\tACL acl.ACL\n\tExpires time.Time\n\tETag string\n}\n\n\/\/ aclFault is used to fault in the rules for an ACL if we take a miss\nfunc (s *Server) aclFault(id string) (string, error) {\n\tdefer metrics.MeasureSince([]string{\"consul\", \"acl\", \"fault\"}, time.Now())\n\tstate := s.fsm.State()\n\t_, acl, err := state.ACLGet(id)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif acl == nil {\n\t\treturn \"\", errors.New(aclNotFound)\n\t}\n\treturn acl.Rules, nil\n}\n\n\/\/ resolveToken is used to resolve an ACL is any is appropriate\nfunc (s *Server) resolveToken(id string) (acl.ACL, error) {\n\t\/\/ Check if there is no ACL datacenter (ACL's disabled)\n\tauthDC := s.config.ACLDatacenter\n\tif len(authDC) == 0 {\n\t\treturn nil, nil\n\t}\n\tdefer metrics.MeasureSince([]string{\"consul\", \"acl\", \"resolveToken\"}, time.Now())\n\n\t\/\/ Handle the anonymous token\n\tif len(id) == 0 {\n\t\tid = anonymousToken\n\t}\n\n\t\/\/ Check if we are the ACL datacenter and the leader, use the\n\t\/\/ authoritative cache\n\tif s.config.Datacenter == authDC && s.IsLeader() {\n\t\treturn s.aclAuthCache.GetACL(id)\n\t}\n\n\t\/\/ Use our non-authoritative cache\n\treturn s.lookupACL(id, authDC)\n}\n\n\/\/ lookupACL is used when we are non-authoritative, and need\n\/\/ to resolve an ACL\nfunc (s *Server) lookupACL(id, authDC string) (acl.ACL, error) {\n\t\/\/ Check the cache for the ACL\n\tvar cached *aclCacheEntry\n\traw, ok := s.aclCache.Get(id)\n\tif ok {\n\t\tcached = raw.(*aclCacheEntry)\n\t}\n\n\t\/\/ Check for live cache\n\tif cached != nil && time.Now().Before(cached.Expires) {\n\t\tmetrics.IncrCounter([]string{\"consul\", \"acl\", \"cache_hit\"}, 1)\n\t\treturn cached.ACL, nil\n\t} else {\n\t\tmetrics.IncrCounter([]string{\"consul\", \"acl\", \"cache_miss\"}, 1)\n\t}\n\n\t\/\/ Attempt to refresh the policy\n\targs := structs.ACLPolicyRequest{\n\t\tDatacenter: authDC,\n\t\tACL: id,\n\t}\n\tvar out structs.ACLPolicy\n\terr := s.RPC(\"ACL.GetPolicy\", &args, &out)\n\n\t\/\/ Handle the happy path\n\tif err == nil {\n\t\treturn s.useACLPolicy(id, cached, &out)\n\t}\n\n\t\/\/ Check for not-found\n\tif strings.Contains(err.Error(), aclNotFound) {\n\t\treturn nil, errors.New(aclNotFound)\n\t} else {\n\t\ts.logger.Printf(\"[ERR] consul.acl: Failed to get policy for '%s': %v\", id, err)\n\t}\n\n\t\/\/ Unable to refresh, apply the down policy\n\tswitch s.config.ACLDownPolicy {\n\tcase \"allow\":\n\t\treturn acl.AllowAll(), nil\n\tcase \"extend-cache\":\n\t\tif cached != nil {\n\t\t\treturn cached.ACL, nil\n\t\t}\n\t\tfallthrough\n\tdefault:\n\t\treturn acl.DenyAll(), nil\n\t}\n}\n\n\/\/ useACLPolicy handles an ACLPolicy response\nfunc (s *Server) useACLPolicy(id string, cached *aclCacheEntry, p *structs.ACLPolicy) (acl.ACL, error) {\n\t\/\/ Check if we can used the cached policy\n\tif cached != nil && cached.ETag == p.ETag {\n\t\tif p.TTL > 0 {\n\t\t\tcached.Expires = time.Now().Add(p.TTL)\n\t\t}\n\t\treturn cached.ACL, nil\n\t}\n\n\t\/\/ Check for a cached compiled policy\n\tvar compiled acl.ACL\n\traw, ok := s.aclPolicyCache.Get(p.ETag)\n\tif ok {\n\t\tcompiled = raw.(acl.ACL)\n\t} else {\n\t\t\/\/ Determine the root policy\n\t\tvar root acl.ACL\n\t\tswitch p.Root {\n\t\tcase \"allow\":\n\t\t\troot = acl.AllowAll()\n\t\tdefault:\n\t\t\troot = acl.DenyAll()\n\t\t}\n\n\t\t\/\/ Compile the ACL\n\t\tacl, err := acl.New(root, p.Policy)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Cache the policy\n\t\ts.aclPolicyCache.Add(p.ETag, acl)\n\t\tcompiled = acl\n\t}\n\n\t\/\/ Cache the ACL\n\tcached = &aclCacheEntry{\n\t\tACL: compiled,\n\t\tETag: p.ETag,\n\t}\n\tif p.TTL > 0 {\n\t\tcached.Expires = time.Now().Add(p.TTL)\n\t}\n\ts.aclCache.Add(id, cached)\n\treturn compiled, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package xlog\n\nimport \"io\"\n\n\/\/ globalInstance stores the global logger.\nvar globalInstance *Logger\n\n\/\/ Close releases any resources held by the global logger. The logger should\n\/\/ not be used again after calling this method without re-configuring it, as\n\/\/ this method sets the global instance to nil.\nfunc Close() {\n\tinstance().Close()\n\tglobalInstance = nil\n}\n\n\/\/ SetName sets the name of the global logger.\nfunc SetName(name string) {\n\tinstance().SetName(name)\n}\n\n\/\/ Append adds a file to the global logger.\nfunc Append(file string, level Level) {\n\tinstance().Append(file, level)\n}\n\n\/\/ MultiAppend adds one or more files to the global logger.\nfunc MultiAppend(files []string, level Level) {\n\tinstance().MultiAppend(files, level)\n}\n\n\/\/ AppendWriter adds a writer to the global logger.\nfunc AppendWriter(writer io.Writer, level Level) {\n\tinstance().AppendWriter(writer, level)\n}\n\n\/\/ MultiAppendWriters adds one or more io.Writer instances to the global logger.\nfunc MultiAppendWriters(writers []io.Writer, level Level) {\n\tinstance().MultiAppendWriters(writers, level)\n}\n\n\/\/ Writable returns true when global logging is enabled, and the global logger\n\/\/ hasn't been closed.\nfunc Writable() bool {\n\treturn instance().Writable()\n}\n\n\/\/ Log writes the message to each logger appended to the global logger at given level\n\/\/ or higher.\nfunc Log(level Level, v ...interface{}) {\n\tinstance().Log(level, v...)\n}\n\n\/\/ Logf writes the message to each logger appended to the global logger given\n\/\/ level or higher.\nfunc Logf(level Level, format string, v ...interface{}) {\n\tinstance().Logf(level, format, v...)\n}\n\n\/\/ Debug writes to the global logger at DebugLevel.\n\/\/ Arguments are handled in the manner of fmt.Print.\nfunc Debug(v ...interface{}) {\n\tinstance().Debug(v...)\n}\n\n\/\/ Debugf writes to the global logger at DebugLevel.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc Debugf(format string, v ...interface{}) {\n\tinstance().Debugf(format, v...)\n}\n\n\/\/ Info writes to the global logger at InfoLevel.\n\/\/ Arguments are handled in the manner of fmt.Print.\nfunc Info(v ...interface{}) {\n\tinstance().Info(v...)\n}\n\n\/\/ Infof writes to the global logger at InfoLevel.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc Infof(format string, v ...interface{}) {\n\tinstance().Infof(format, v...)\n}\n\n\/\/ Notice writes to the global logger at NoticeLevel.\n\/\/ Arguments are handled in the manner of fmt.Print.\nfunc Notice(v ...interface{}) {\n\tinstance().Notice(v...)\n}\n\n\/\/ Noticef writes to the global logger at NoticeLevel.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc Noticef(format string, v ...interface{}) {\n\tinstance().Noticef(format, v...)\n}\n\n\/\/ Warning writes to the global logger at WarningLevel.\n\/\/ Arguments are handled in the manner of fmt.Print.\nfunc Warning(v ...interface{}) {\n\tinstance().Warning(v...)\n}\n\n\/\/ Warningf writes to the global logger at WarningLevel.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc Warningf(format string, v ...interface{}) {\n\tinstance().Warningf(format, v...)\n}\n\n\/\/ Error writes to the global logger at ErrorLevel.\n\/\/ Arguments are handled in the manner of fmt.Print.\nfunc Error(v ...interface{}) {\n\tinstance().Error(v...)\n}\n\n\/\/ Errorf writes to the global logger at ErrorLevel.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc Errorf(format string, v ...interface{}) {\n\tinstance().Errorf(format, v...)\n}\n\n\/\/ Critical writes to the global logger at CriticalLevel.\n\/\/ Arguments are handled in the manner of fmt.Print.\nfunc Critical(v ...interface{}) {\n\tinstance().Critical(v...)\n}\n\n\/\/ Criticalf writes to the global logger at CriticalLevel.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc Criticalf(format string, v ...interface{}) {\n\tinstance().Criticalf(format, v...)\n}\n\n\/\/ Alert writes to the global logger at AlertLevel.\n\/\/ Arguments are handled in the manner of fmt.Print.\nfunc Alert(v ...interface{}) {\n\tinstance().Alert(v...)\n}\n\n\/\/ Alertf writes to the global logger at AlertLevel.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc Alertf(format string, v ...interface{}) {\n\tinstance().Alertf(format, v...)\n}\n\n\/\/ Emergency writes to the global logger at EmergencyLevel.\n\/\/ Arguments are handled in the manner of fmt.Print.\nfunc Emergency(v ...interface{}) {\n\tinstance().Emergency(v...)\n}\n\n\/\/ Emergencyf writes to the global logger at EmergencyLevel.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc Emergencyf(format string, v ...interface{}) {\n\tinstance().Emergencyf(format, v...)\n}\n\n\/\/ instance calls panic() when the global logger has not been configured.\nfunc instance() *Logger {\n\tif globalInstance == nil {\n\t\tglobalInstance = NewLogger(\"xlog\")\n\t}\n\treturn globalInstance\n}\n<commit_msg>Added some global functions<commit_after>package xlog\n\nimport \"io\"\n\n\/\/ globalInstance stores the global logger.\nvar globalInstance *Logger\n\n\/\/ Close releases any resources held by the global logger. The logger should\n\/\/ not be used again after calling this method without re-configuring it, as\n\/\/ this method sets the global instance to nil.\nfunc Close() {\n\tinstance().Close()\n\tglobalInstance = nil\n}\n\n\/\/ SetName sets the name of the global logger.\nfunc SetName(name string) {\n\tinstance().SetName(name)\n}\n\n\/\/ SetFormatter sets the formatter used by the global logger.\nfunc SetFormatter(formatter Formatter) {\n\tinstance().Formatter = formatter\n}\n\n\/\/ SetLoggerMap sets the logger map used by the global logger.\nfunc SetLoggerMap(lm LoggerMap) {\n\tinstance().Loggers = lm\n}\n\n\/\/ Enabled returns whether the global logger is enabled.\nfunc Enabled() bool {\n\treturn instance().Enabled\n}\n\n\/\/ SetEnabled sets whether the global logger is enabled.\nfunc SetEnabled(enabled bool) {\n\tinstance().Enabled = enabled\n}\n\n\/\/ Append adds a file to the global logger.\nfunc Append(file string, level Level) {\n\tinstance().Append(file, level)\n}\n\n\/\/ MultiAppend adds one or more files to the global logger.\nfunc MultiAppend(files []string, level Level) {\n\tinstance().MultiAppend(files, level)\n}\n\n\/\/ AppendWriter adds a writer to the global logger.\nfunc AppendWriter(writer io.Writer, level Level) {\n\tinstance().AppendWriter(writer, level)\n}\n\n\/\/ MultiAppendWriters adds one or more io.Writer instances to the global logger.\nfunc MultiAppendWriters(writers []io.Writer, level Level) {\n\tinstance().MultiAppendWriters(writers, level)\n}\n\n\/\/ Writable returns true when global logging is enabled, and the global logger\n\/\/ hasn't been closed.\nfunc Writable() bool {\n\treturn instance().Writable()\n}\n\n\/\/ Log writes the message to each logger appended to the global logger at given level\n\/\/ or higher.\nfunc Log(level Level, v ...interface{}) {\n\tinstance().Log(level, v...)\n}\n\n\/\/ Logf writes the message to each logger appended to the global logger given\n\/\/ level or higher.\nfunc Logf(level Level, format string, v ...interface{}) {\n\tinstance().Logf(level, format, v...)\n}\n\n\/\/ Debug writes to the global logger at DebugLevel.\n\/\/ Arguments are handled in the manner of fmt.Print.\nfunc Debug(v ...interface{}) {\n\tinstance().Debug(v...)\n}\n\n\/\/ Debugf writes to the global logger at DebugLevel.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc Debugf(format string, v ...interface{}) {\n\tinstance().Debugf(format, v...)\n}\n\n\/\/ Info writes to the global logger at InfoLevel.\n\/\/ Arguments are handled in the manner of fmt.Print.\nfunc Info(v ...interface{}) {\n\tinstance().Info(v...)\n}\n\n\/\/ Infof writes to the global logger at InfoLevel.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc Infof(format string, v ...interface{}) {\n\tinstance().Infof(format, v...)\n}\n\n\/\/ Notice writes to the global logger at NoticeLevel.\n\/\/ Arguments are handled in the manner of fmt.Print.\nfunc Notice(v ...interface{}) {\n\tinstance().Notice(v...)\n}\n\n\/\/ Noticef writes to the global logger at NoticeLevel.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc Noticef(format string, v ...interface{}) {\n\tinstance().Noticef(format, v...)\n}\n\n\/\/ Warning writes to the global logger at WarningLevel.\n\/\/ Arguments are handled in the manner of fmt.Print.\nfunc Warning(v ...interface{}) {\n\tinstance().Warning(v...)\n}\n\n\/\/ Warningf writes to the global logger at WarningLevel.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc Warningf(format string, v ...interface{}) {\n\tinstance().Warningf(format, v...)\n}\n\n\/\/ Error writes to the global logger at ErrorLevel.\n\/\/ Arguments are handled in the manner of fmt.Print.\nfunc Error(v ...interface{}) {\n\tinstance().Error(v...)\n}\n\n\/\/ Errorf writes to the global logger at ErrorLevel.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc Errorf(format string, v ...interface{}) {\n\tinstance().Errorf(format, v...)\n}\n\n\/\/ Critical writes to the global logger at CriticalLevel.\n\/\/ Arguments are handled in the manner of fmt.Print.\nfunc Critical(v ...interface{}) {\n\tinstance().Critical(v...)\n}\n\n\/\/ Criticalf writes to the global logger at CriticalLevel.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc Criticalf(format string, v ...interface{}) {\n\tinstance().Criticalf(format, v...)\n}\n\n\/\/ Alert writes to the global logger at AlertLevel.\n\/\/ Arguments are handled in the manner of fmt.Print.\nfunc Alert(v ...interface{}) {\n\tinstance().Alert(v...)\n}\n\n\/\/ Alertf writes to the global logger at AlertLevel.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc Alertf(format string, v ...interface{}) {\n\tinstance().Alertf(format, v...)\n}\n\n\/\/ Emergency writes to the global logger at EmergencyLevel.\n\/\/ Arguments are handled in the manner of fmt.Print.\nfunc Emergency(v ...interface{}) {\n\tinstance().Emergency(v...)\n}\n\n\/\/ Emergencyf writes to the global logger at EmergencyLevel.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc Emergencyf(format string, v ...interface{}) {\n\tinstance().Emergencyf(format, v...)\n}\n\n\/\/ instance calls panic() when the global logger has not been configured.\nfunc instance() *Logger {\n\tif globalInstance == nil {\n\t\tglobalInstance = NewLogger(\"xlog\")\n\t}\n\treturn globalInstance\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Tomas Machalek <tomas.machalek@gmail.com>\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/tomachalek\/gloomy\/index\/builder\"\n\t\"github.com\/tomachalek\/gloomy\/index\/extras\"\n\t\"github.com\/tomachalek\/gloomy\/index\/gconf\"\n\t\"github.com\/tomachalek\/gloomy\/service\"\n)\n\nconst (\n\tcreateIndexAction = \"create-index\"\n\textractNgramsAction = \"extract-ngrams\"\n\tsearchServiceAction = \"search-service\"\n\tsearchAction = \"search\"\n)\n\nfunc help() {\n\tfmt.Println(\"HELP:\")\n}\n\nfunc createIndex(conf *gconf.IndexBuilderConf, ngramSize int) {\n\tif conf.VerticalFilePath == \"\" {\n\t\tfmt.Println(\"Vertical file not specified\")\n\t\tos.Exit(1)\n\t}\n\tlog.Println(\"Importing vertical file \", conf.VerticalFilePath)\n\tif conf.OutDirectory == \"\" {\n\t\tconf.OutDirectory = filepath.Dir(conf.VerticalFilePath)\n\t}\n\tfmt.Println(\"Output directory: \", conf.OutDirectory)\n\tt0 := time.Now()\n\tbuilder.CreateGloomyIndex(conf, ngramSize)\n\tfmt.Printf(\"DONE in %s\\n\", time.Since(t0))\n}\n\nfunc extractNgrams(conf *gconf.IndexBuilderConf, ngramSize int) {\n\tif conf.VerticalFilePath == \"\" {\n\t\tfmt.Println(\"Vertical file not specified\")\n\t\tos.Exit(1)\n\t}\n\tlog.Println(\"Processing vertical file \", conf.VerticalFilePath)\n\tif conf.OutDirectory == \"\" {\n\t\tconf.OutDirectory = filepath.Dir(conf.VerticalFilePath)\n\t}\n\tfmt.Println(\"Output directory: \", conf.OutDirectory)\n\tt0 := time.Now()\n\textras.ExtractUniqueNgrams(conf, ngramSize)\n\tfmt.Printf(\"DONE in %s\\n\", time.Since(t0))\n}\n\nfunc loadSearchConf(confBasePath string) *gconf.SearchConf {\n\tif confBasePath == \"\" {\n\t\tvar err error\n\t\tconfBasePath, err = os.Getwd()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tconfBasePath = filepath.Join(confBasePath, \"gloomy.json\")\n\t}\n\treturn gconf.LoadSearchConf(confBasePath)\n}\n\nfunc searchCLI(confBasePath string, query string) {\n\tconf := loadSearchConf(confBasePath)\n\tans, err := service.Search(conf.DataPath, \"susanne\", query)\n\tif err != nil {\n\t\tlog.Printf(\"Srch error: %s\", err)\n\t}\n\tfor i := 0; ans.HasNext(); i++ {\n\t\tv := ans.Next()\n\t\tlog.Printf(\"res[%d]: %s\", i, v)\n\t}\n}\n\nfunc startSearchService(confBasePath string) {\n\tconf := loadSearchConf(confBasePath)\n\tservice.Serve(conf)\n}\n\nfunc main() {\n\tngramSize := flag.Int(\"ngram-size\", 2, \"N-gram size, 2: bigram (default), ...\")\n\tsrchConfPath := flag.String(\"conf-path\", \"\", \"Path to the gloomy.conf (by default, working dir is used\")\n\tflag.Parse()\n\tif len(flag.Args()) == 0 {\n\t\tfmt.Println(\"Missing action, try -h for help\")\n\t\tos.Exit(1)\n\n\t} else {\n\t\tswitch flag.Arg(0) {\n\t\tcase \"help\":\n\t\t\thelp()\n\t\tcase createIndexAction:\n\t\t\tconf := gconf.LoadIndexBuilderConf(flag.Arg(1))\n\t\t\tcreateIndex(conf, *ngramSize)\n\t\tcase extractNgramsAction:\n\t\t\tconf := gconf.LoadIndexBuilderConf(flag.Arg(1))\n\t\t\textractNgrams(conf, *ngramSize)\n\t\tcase searchServiceAction:\n\t\t\tstartSearchService(*srchConfPath)\n\t\tcase searchAction:\n\t\t\tsearchCLI(*srchConfPath, flag.Arg(1))\n\t\tdefault:\n\t\t\tpanic(\"Unknown action\")\n\t\t}\n\t}\n}\n<commit_msg>Remove hardcoded 'susanne' corpus<commit_after>\/\/ Copyright 2017 Tomas Machalek <tomas.machalek@gmail.com>\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/tomachalek\/gloomy\/index\/builder\"\n\t\"github.com\/tomachalek\/gloomy\/index\/extras\"\n\t\"github.com\/tomachalek\/gloomy\/index\/gconf\"\n\t\"github.com\/tomachalek\/gloomy\/service\"\n)\n\nconst (\n\tcreateIndexAction = \"create-index\"\n\textractNgramsAction = \"extract-ngrams\"\n\tsearchServiceAction = \"search-service\"\n\tsearchAction = \"search\"\n)\n\nfunc help() {\n\tfmt.Println(\"HELP:\")\n}\n\nfunc createIndex(conf *gconf.IndexBuilderConf, ngramSize int) {\n\tif conf.VerticalFilePath == \"\" {\n\t\tfmt.Println(\"Vertical file not specified\")\n\t\tos.Exit(1)\n\t}\n\tlog.Println(\"Importing vertical file \", conf.VerticalFilePath)\n\tif conf.OutDirectory == \"\" {\n\t\tconf.OutDirectory = filepath.Dir(conf.VerticalFilePath)\n\t}\n\tfmt.Println(\"Output directory: \", conf.OutDirectory)\n\tt0 := time.Now()\n\tbuilder.CreateGloomyIndex(conf, ngramSize)\n\tfmt.Printf(\"DONE in %s\\n\", time.Since(t0))\n}\n\nfunc extractNgrams(conf *gconf.IndexBuilderConf, ngramSize int) {\n\tif conf.VerticalFilePath == \"\" {\n\t\tfmt.Println(\"Vertical file not specified\")\n\t\tos.Exit(1)\n\t}\n\tlog.Println(\"Processing vertical file \", conf.VerticalFilePath)\n\tif conf.OutDirectory == \"\" {\n\t\tconf.OutDirectory = filepath.Dir(conf.VerticalFilePath)\n\t}\n\tfmt.Println(\"Output directory: \", conf.OutDirectory)\n\tt0 := time.Now()\n\textras.ExtractUniqueNgrams(conf, ngramSize)\n\tfmt.Printf(\"DONE in %s\\n\", time.Since(t0))\n}\n\nfunc loadSearchConf(confBasePath string) *gconf.SearchConf {\n\tif confBasePath == \"\" {\n\t\tvar err error\n\t\tconfBasePath, err = os.Getwd()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tconfBasePath = filepath.Join(confBasePath, \"gloomy.json\")\n\t}\n\treturn gconf.LoadSearchConf(confBasePath)\n}\n\nfunc searchCLI(confBasePath string, corpus string, query string) {\n\tconf := loadSearchConf(confBasePath)\n\tans, err := service.Search(conf.DataPath, corpus, query)\n\tif err != nil {\n\t\tlog.Printf(\"Srch error: %s\", err)\n\t}\n\tfor i := 0; ans.HasNext(); i++ {\n\t\tv := ans.Next()\n\t\tlog.Printf(\"res[%d]: %s\", i, v)\n\t}\n}\n\nfunc startSearchService(confBasePath string) {\n\tconf := loadSearchConf(confBasePath)\n\tservice.Serve(conf)\n}\n\nfunc main() {\n\tngramSize := flag.Int(\"ngram-size\", 2, \"N-gram size, 2: bigram (default), ...\")\n\tsrchConfPath := flag.String(\"conf-path\", \"\", \"Path to the gloomy.conf (by default, working dir is used\")\n\tflag.Parse()\n\tif len(flag.Args()) == 0 {\n\t\tfmt.Println(\"Missing action, try -h for help\")\n\t\tos.Exit(1)\n\n\t} else {\n\t\tswitch flag.Arg(0) {\n\t\tcase \"help\":\n\t\t\thelp()\n\t\tcase createIndexAction:\n\t\t\tconf := gconf.LoadIndexBuilderConf(flag.Arg(1))\n\t\t\tcreateIndex(conf, *ngramSize)\n\t\tcase extractNgramsAction:\n\t\t\tconf := gconf.LoadIndexBuilderConf(flag.Arg(1))\n\t\t\textractNgrams(conf, *ngramSize)\n\t\tcase searchServiceAction:\n\t\t\tstartSearchService(*srchConfPath)\n\t\tcase searchAction:\n\t\t\tif flag.Arg(1) == \"\" || flag.Arg(2) == \"\" {\n\t\t\t\tlog.Fatal(\"Missing argument (both corpus and query must be specified)\")\n\t\t\t}\n\t\t\tsearchCLI(*srchConfPath, flag.Arg(1), flag.Arg(2))\n\t\tdefault:\n\t\t\tpanic(\"Unknown action\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012-2013 Apcera Inc. All rights reserved.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/apcera\/gnatsd\/auth\"\n\t\"github.com\/apcera\/gnatsd\/logger\"\n\t\"github.com\/apcera\/gnatsd\/server\"\n)\n\nfunc main() {\n\t\/\/ Server Options\n\topts := server.Options{}\n\n\tvar showVersion bool\n\tvar debugAndTrace bool\n\tvar configFile string\n\n\t\/\/ Parse flags\n\tflag.IntVar(&opts.Port, \"port\", 0, \"Port to listen on.\")\n\tflag.IntVar(&opts.Port, \"p\", 0, \"Port to listen on.\")\n\tflag.StringVar(&opts.Host, \"addr\", \"\", \"Network host to listen on.\")\n\tflag.StringVar(&opts.Host, \"a\", \"\", \"Network host to listen on.\")\n\tflag.StringVar(&opts.Host, \"net\", \"\", \"Network host to listen on.\")\n\tflag.BoolVar(&opts.Debug, \"D\", false, \"Enable Debug logging.\")\n\tflag.BoolVar(&opts.Debug, \"debug\", false, \"Enable Debug logging.\")\n\tflag.BoolVar(&opts.Trace, \"V\", false, \"Enable Trace logging.\")\n\tflag.BoolVar(&opts.Trace, \"trace\", false, \"Enable Trace logging.\")\n\tflag.BoolVar(&debugAndTrace, \"DV\", false, \"Enable Debug and Trace logging.\")\n\tflag.BoolVar(&opts.Logtime, \"T\", true, \"Timestamp log entries.\")\n\tflag.BoolVar(&opts.Logtime, \"logtime\", true, \"Timestamp log entries.\")\n\tflag.StringVar(&opts.Username, \"user\", \"\", \"Username required for connection.\")\n\tflag.StringVar(&opts.Password, \"pass\", \"\", \"Password required for connection.\")\n\tflag.StringVar(&opts.Authorization, \"auth\", \"\", \"Authorization token required for connection.\")\n\tflag.IntVar(&opts.HTTPPort, \"m\", 0, \"HTTP Port for \/varz, \/connz endpoints.\")\n\tflag.IntVar(&opts.HTTPPort, \"http_port\", 0, \"HTTP Port for \/varz, \/connz endpoints.\")\n\tflag.StringVar(&configFile, \"c\", \"\", \"Configuration file.\")\n\tflag.StringVar(&configFile, \"config\", \"\", \"Configuration file.\")\n\tflag.StringVar(&opts.PidFile, \"P\", \"\", \"File to store process pid.\")\n\tflag.StringVar(&opts.PidFile, \"pid\", \"\", \"File to store process pid.\")\n\tflag.StringVar(&opts.LogFile, \"l\", \"\", \"File to store logging output.\")\n\tflag.StringVar(&opts.LogFile, \"log\", \"\", \"File to store logging output.\")\n\tflag.BoolVar(&opts.Syslog, \"s\", false, \"Enable syslog as log method.\")\n\tflag.BoolVar(&opts.Syslog, \"syslog\", false, \"Enable syslog as log method..\")\n\tflag.StringVar(&opts.RemoteSyslog, \"r\", \"\", \"Syslog server addr (udp:\/\/localhost:514).\")\n\tflag.StringVar(&opts.RemoteSyslog, \"remote_syslog\", \"\", \"Syslog server addr (udp:\/\/localhost:514).\")\n\tflag.BoolVar(&showVersion, \"version\", false, \"Print version information.\")\n\tflag.BoolVar(&showVersion, \"v\", false, \"Print version information.\")\n\tflag.IntVar(&opts.ProfPort, \"profile\", 0, \"Profiling HTTP port\")\n\n\tflag.Usage = server.Usage\n\n\tflag.Parse()\n\n\t\/\/ Show version and exit\n\tif showVersion {\n\t\tserver.PrintServerAndExit()\n\t}\n\n\t\/\/ One flag can set multiple options.\n\tif debugAndTrace {\n\t\topts.Trace, opts.Debug = true, true\n\t}\n\n\t\/\/ Process args looking for non-flag options,\n\t\/\/ 'version' and 'help' only for now\n\tfor _, arg := range flag.Args() {\n\t\tswitch strings.ToLower(arg) {\n\t\tcase \"version\":\n\t\t\tserver.PrintServerAndExit()\n\t\tcase \"help\":\n\t\t\tserver.Usage()\n\t\t}\n\t}\n\n\t\/\/ Parse config if given\n\tif configFile != \"\" {\n\t\tfileOpts, err := server.ProcessConfigFile(configFile)\n\t\tif err != nil {\n\t\t\tserver.PrintAndDie(err.Error())\n\t\t}\n\t\topts = *server.MergeOptions(fileOpts, &opts)\n\t}\n\n\t\/\/ Remove any host\/ip that points to itself in Route\n\tnewroutes, err := server.RemoveSelfReference(opts.ClusterPort, opts.Routes)\n\tif err != nil {\n\t\tserver.PrintAndDie(err.Error())\n\t}\n\topts.Routes = newroutes\n\n\t\/\/ Create the server with appropriate options.\n\ts := server.New(&opts)\n\n\t\/\/ Configure the authentication mechanism\n\tconfigureAuth(s, &opts)\n\n\t\/\/ Configure the logger based on the flags\n\tconfigureLogger(s, &opts)\n\n\t\/\/ Start things up. Block here until done.\n\ts.Start()\n}\n\nfunc configureAuth(s *server.Server, opts *server.Options) {\n\tif opts.Username != \"\" {\n\t\tauth := &auth.Plain{\n\t\t\tUsername: opts.Username,\n\t\t\tPassword: opts.Password,\n\t\t}\n\n\t\ts.SetAuthMethod(auth)\n\t} else if opts.Authorization != \"\" {\n\t\tauth := &auth.Token{\n\t\t\tToken: opts.Authorization,\n\t\t}\n\n\t\ts.SetAuthMethod(auth)\n\t}\n}\n\nfunc configureLogger(s *server.Server, opts *server.Options) {\n\tvar log server.Logger\n\n\tif opts.LogFile != \"\" {\n\t\tlog = logger.NewFileLogger(opts.LogFile, opts.Logtime, opts.Debug, opts.Trace, true)\n\t} else if opts.RemoteSyslog != \"\" {\n\t\tlog = logger.NewRemoteSysLogger(opts.RemoteSyslog, opts.Debug, opts.Trace)\n\t} else if opts.Syslog {\n\t\tlog = logger.NewSysLogger(opts.Debug, opts.Trace)\n\t} else {\n\t\tcolors := true\n\t\t\/\/ Check to see if stderr is being redirected and if so turn off color\n\t\tstat, _ := os.Stderr.Stat()\n\t\tif (stat.Mode() & os.ModeCharDevice) == 0 {\n\t\t\tcolors = false\n\t\t}\n\t\tlog = logger.NewStdLogger(opts.Logtime, opts.Debug, opts.Trace, colors, true)\n\t}\n\n\ts.SetLogger(log, opts.Debug, opts.Trace)\n}\n<commit_msg>handle stderr stat() throwing an error on Windows<commit_after>\/\/ Copyright 2012-2013 Apcera Inc. All rights reserved.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/apcera\/gnatsd\/auth\"\n\t\"github.com\/apcera\/gnatsd\/logger\"\n\t\"github.com\/apcera\/gnatsd\/server\"\n)\n\nfunc main() {\n\t\/\/ Server Options\n\topts := server.Options{}\n\n\tvar showVersion bool\n\tvar debugAndTrace bool\n\tvar configFile string\n\n\t\/\/ Parse flags\n\tflag.IntVar(&opts.Port, \"port\", 0, \"Port to listen on.\")\n\tflag.IntVar(&opts.Port, \"p\", 0, \"Port to listen on.\")\n\tflag.StringVar(&opts.Host, \"addr\", \"\", \"Network host to listen on.\")\n\tflag.StringVar(&opts.Host, \"a\", \"\", \"Network host to listen on.\")\n\tflag.StringVar(&opts.Host, \"net\", \"\", \"Network host to listen on.\")\n\tflag.BoolVar(&opts.Debug, \"D\", false, \"Enable Debug logging.\")\n\tflag.BoolVar(&opts.Debug, \"debug\", false, \"Enable Debug logging.\")\n\tflag.BoolVar(&opts.Trace, \"V\", false, \"Enable Trace logging.\")\n\tflag.BoolVar(&opts.Trace, \"trace\", false, \"Enable Trace logging.\")\n\tflag.BoolVar(&debugAndTrace, \"DV\", false, \"Enable Debug and Trace logging.\")\n\tflag.BoolVar(&opts.Logtime, \"T\", true, \"Timestamp log entries.\")\n\tflag.BoolVar(&opts.Logtime, \"logtime\", true, \"Timestamp log entries.\")\n\tflag.StringVar(&opts.Username, \"user\", \"\", \"Username required for connection.\")\n\tflag.StringVar(&opts.Password, \"pass\", \"\", \"Password required for connection.\")\n\tflag.StringVar(&opts.Authorization, \"auth\", \"\", \"Authorization token required for connection.\")\n\tflag.IntVar(&opts.HTTPPort, \"m\", 0, \"HTTP Port for \/varz, \/connz endpoints.\")\n\tflag.IntVar(&opts.HTTPPort, \"http_port\", 0, \"HTTP Port for \/varz, \/connz endpoints.\")\n\tflag.StringVar(&configFile, \"c\", \"\", \"Configuration file.\")\n\tflag.StringVar(&configFile, \"config\", \"\", \"Configuration file.\")\n\tflag.StringVar(&opts.PidFile, \"P\", \"\", \"File to store process pid.\")\n\tflag.StringVar(&opts.PidFile, \"pid\", \"\", \"File to store process pid.\")\n\tflag.StringVar(&opts.LogFile, \"l\", \"\", \"File to store logging output.\")\n\tflag.StringVar(&opts.LogFile, \"log\", \"\", \"File to store logging output.\")\n\tflag.BoolVar(&opts.Syslog, \"s\", false, \"Enable syslog as log method.\")\n\tflag.BoolVar(&opts.Syslog, \"syslog\", false, \"Enable syslog as log method..\")\n\tflag.StringVar(&opts.RemoteSyslog, \"r\", \"\", \"Syslog server addr (udp:\/\/localhost:514).\")\n\tflag.StringVar(&opts.RemoteSyslog, \"remote_syslog\", \"\", \"Syslog server addr (udp:\/\/localhost:514).\")\n\tflag.BoolVar(&showVersion, \"version\", false, \"Print version information.\")\n\tflag.BoolVar(&showVersion, \"v\", false, \"Print version information.\")\n\tflag.IntVar(&opts.ProfPort, \"profile\", 0, \"Profiling HTTP port\")\n\n\tflag.Usage = server.Usage\n\n\tflag.Parse()\n\n\t\/\/ Show version and exit\n\tif showVersion {\n\t\tserver.PrintServerAndExit()\n\t}\n\n\t\/\/ One flag can set multiple options.\n\tif debugAndTrace {\n\t\topts.Trace, opts.Debug = true, true\n\t}\n\n\t\/\/ Process args looking for non-flag options,\n\t\/\/ 'version' and 'help' only for now\n\tfor _, arg := range flag.Args() {\n\t\tswitch strings.ToLower(arg) {\n\t\tcase \"version\":\n\t\t\tserver.PrintServerAndExit()\n\t\tcase \"help\":\n\t\t\tserver.Usage()\n\t\t}\n\t}\n\n\t\/\/ Parse config if given\n\tif configFile != \"\" {\n\t\tfileOpts, err := server.ProcessConfigFile(configFile)\n\t\tif err != nil {\n\t\t\tserver.PrintAndDie(err.Error())\n\t\t}\n\t\topts = *server.MergeOptions(fileOpts, &opts)\n\t}\n\n\t\/\/ Remove any host\/ip that points to itself in Route\n\tnewroutes, err := server.RemoveSelfReference(opts.ClusterPort, opts.Routes)\n\tif err != nil {\n\t\tserver.PrintAndDie(err.Error())\n\t}\n\topts.Routes = newroutes\n\n\t\/\/ Create the server with appropriate options.\n\ts := server.New(&opts)\n\n\t\/\/ Configure the authentication mechanism\n\tconfigureAuth(s, &opts)\n\n\t\/\/ Configure the logger based on the flags\n\tconfigureLogger(s, &opts)\n\n\t\/\/ Start things up. Block here until done.\n\ts.Start()\n}\n\nfunc configureAuth(s *server.Server, opts *server.Options) {\n\tif opts.Username != \"\" {\n\t\tauth := &auth.Plain{\n\t\t\tUsername: opts.Username,\n\t\t\tPassword: opts.Password,\n\t\t}\n\n\t\ts.SetAuthMethod(auth)\n\t} else if opts.Authorization != \"\" {\n\t\tauth := &auth.Token{\n\t\t\tToken: opts.Authorization,\n\t\t}\n\n\t\ts.SetAuthMethod(auth)\n\t}\n}\n\nfunc configureLogger(s *server.Server, opts *server.Options) {\n\tvar log server.Logger\n\n\tif opts.LogFile != \"\" {\n\t\tlog = logger.NewFileLogger(opts.LogFile, opts.Logtime, opts.Debug, opts.Trace, true)\n\t} else if opts.RemoteSyslog != \"\" {\n\t\tlog = logger.NewRemoteSysLogger(opts.RemoteSyslog, opts.Debug, opts.Trace)\n\t} else if opts.Syslog {\n\t\tlog = logger.NewSysLogger(opts.Debug, opts.Trace)\n\t} else {\n\t\tcolors := true\n\t\t\/\/ Check to see if stderr is being redirected and if so turn off color\n\t\t\/\/ Also turn off colors if we're running on Windows where os.Stderr.Stat() returns an invalid handle-error\n\t\tstat, err := os.Stderr.Stat()\n\t\tif err != nil || (stat.Mode()&os.ModeCharDevice) == 0 {\n\t\t\tcolors = false\n\t\t}\n\t\tlog = logger.NewStdLogger(opts.Logtime, opts.Debug, opts.Trace, colors, true)\n\t}\n\n\ts.SetLogger(log, opts.Debug, opts.Trace)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype CommitCommit struct {\n\tMessage string\n}\n\ntype CommitPayload struct {\n\tSize int\n\tCommits []CommitCommit\n}\n\ntype Commit struct {\n\tId string\n\tType string\n\tPayload CommitPayload\n}\n\nfunc main() {\n\n\tfullDate := time.Now().AddDate(0, 0, -1).Format(\"2006-01-02\")\n\tgetData(fullDate)\n}\n\nfunc isDirty(message string) bool {\n\n\tresult := false\n\n\tcussWords := []string{\n\t\t\"fuck\",\n\t\t\"bitch\",\n\t\t\"stupid\",\n\t\t\" tits\",\n\t\t\"asshole\",\n\t\t\"cocksucker\",\n\t\t\"cunt\",\n\t\t\" hell \",\n\t\t\"douche\",\n\t\t\"testicle\",\n\t\t\"twat\",\n\t\t\"bastard\",\n\t\t\"sperm\",\n\t\t\"shit\",\n\t\t\"dildo\",\n\t\t\"wanker\",\n\t\t\"prick\",\n\t\t\"penis\",\n\t\t\"vagina\",\n\t\t\"whore\"}\n\n\tmessageWords := strings.Split(message, \" \")\n\n\tfor _, cussWord := range cussWords {\n\t\tfor _, word := range messageWords {\n\t\t\tif word == cussWord {\n\t\t\t\tresult = true\n\t\t\t}\n\t\t}\n\n\t}\n\treturn result\n}\n\nfunc parseCommit(line string) {\n\tvar commit Commit\n\n\tjsonErr := json.Unmarshal([]byte(line), &commit)\n\tif jsonErr != nil {\n\t\tfmt.Println(\"Could not parse json.\")\n\t\tfmt.Println(jsonErr)\n\t}\n\n\tif commit.Type == \"PushEvent\" && commit.Payload.Size > 0 {\n\t\t\/\/fmt.Println(commit.Payload.Commits[0].Message)\n\t\tif isDirty(commit.Payload.Commits[0].Message) {\n\t\t\tfmt.Println(commit.Payload.Commits[0].Message)\n\t\t}\n\t}\n\n}\n\nfunc parseFile(fName string) {\n\t\/\/ https:\/\/groups.google.com\/forum\/#!topic\/golang-nuts\/GjIkryuCyAY\n\tfileOS, err := os.Open(fName)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Can't open %s: error: %s\\n\", fName, err)\n\t\tos.Exit(1)\n\t}\n\n\tfileGzip, err := gzip.NewReader(fileOS)\n\tif err != nil {\n\t\tfmt.Printf(\"The file %v is not in gzip format.\\n\", fName)\n\t\tos.Exit(1)\n\t}\n\n\tfileRead := bufio.NewReader(fileGzip)\n\ti := 0\n\tfor {\n\t\tline, err := fileRead.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Did not get a new line.\")\n\t\t\tbreak\n\t\t}\n\n\t\tparseCommit(line)\n\n\t\ti++\n\t}\n}\n\nfunc getData(fullDate string) {\n\n\turls := makeUrlArray(fullDate)\n\n\tfor i, value := range urls {\n\n\t\tfmt.Println(\"fetching url\", value)\n\n\t\tresp, archiveErr := http.Get(value)\n\n\t\tif resp != nil {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\n\t\tif archiveErr != nil {\n\t\t\thandleError(\"Error getting github archive\", archiveErr)\n\t\t}\n\n\t\tcontents, readErr := ioutil.ReadAll(resp.Body)\n\n\t\tif readErr != nil {\n\t\t\thandleError(\"Error converting response\", readErr)\n\t\t}\n\n\t\tfname := makeFileName(fullDate, i)\n\n\t\tfileErr := ioutil.WriteFile(fname, contents, 0644)\n\n\t\tif fileErr != nil {\n\t\t\thandleError(\"Error writing response to file\", fileErr)\n\t\t}\n\n\t\tparseFile(fname)\n\n\t}\n}\n\nfunc makeUrlArray(fullDate string) [24]string {\n\n\tbaseUrl := makeUrlBase(fullDate)\n\turlEnd := \".json.gz\"\n\n\tvar urls [24]string\n\n\tfor i := 0; i < 24; i++ {\n\n\t\tvar buffer bytes.Buffer\n\t\tbuffer.WriteString(baseUrl)\n\t\tbuffer.WriteString(\"-\")\n\t\tbuffer.WriteString(strconv.Itoa(i))\n\t\tbuffer.WriteString(urlEnd)\n\t\turl := buffer.String()\n\n\t\turls[i] = url\n\t}\n\n\treturn urls\n}\n\nfunc makeUrlBase(fullDate string) string {\n\tsplit := strings.Split(fullDate, \"-\")\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"http:\/\/data.githubarchive.org\/\")\n\tbuffer.WriteString(split[0]) \/\/year\n\tbuffer.WriteString(\"-\")\n\tbuffer.WriteString(split[1]) \/\/month\n\tbuffer.WriteString(\"-\")\n\tbuffer.WriteString(split[2]) \/\/day\n\n\treturn buffer.String()\n}\n\nfunc makeFileName(fullDate string, i int) string {\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"data-\")\n\tbuffer.WriteString(fullDate)\n\tbuffer.WriteString(\"-\")\n\tbuffer.WriteString(strconv.Itoa(i))\n\tbuffer.WriteString(\".gz\")\n\n\treturn buffer.String()\n\n}\n\nfunc handleError(message string, err error) {\n\tfmt.Println(message, err)\n\tos.Exit(1)\n}\n<commit_msg>message<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype CommitCommit struct {\n\tMessage string\n}\n\ntype CommitPayload struct {\n\tSize int\n\tCommits []CommitCommit\n}\n\ntype Commit struct {\n\tId string\n\tType string\n\tPayload CommitPayload\n}\n\nfunc main() {\n\n\tfullDate := time.Now().AddDate(0, 0, -1).Format(\"2006-01-02\")\n\tgetData(fullDate)\n}\n\n\/\/ There must be a better way to do this\nfunc isDirty(message string) bool {\n\n\tresult := false\n\n\tcussWords := []string{\n\t\t\"fuck\",\n\t\t\"bitch\",\n\t\t\"stupid\",\n\t\t\" tits\",\n\t\t\"asshole\",\n\t\t\"cocksucker\",\n\t\t\"cunt\",\n\t\t\" hell \",\n\t\t\"douche\",\n\t\t\"testicle\",\n\t\t\"twat\",\n\t\t\"bastard\",\n\t\t\"sperm\",\n\t\t\"shit\",\n\t\t\"dildo\",\n\t\t\"wanker\",\n\t\t\"prick\",\n\t\t\"penis\",\n\t\t\"vagina\",\n\t\t\"whore\"}\n\n\tmessageWords := strings.Split(message, \" \")\n\n\tfor _, cussWord := range cussWords {\n\t\tfor _, word := range messageWords {\n\t\t\tif word == cussWord {\n\t\t\t\tresult = true\n\t\t\t}\n\t\t}\n\n\t}\n\treturn result\n}\n\nfunc parseCommit(line string) {\n\tvar commit Commit\n\n\tjsonErr := json.Unmarshal([]byte(line), &commit)\n\tif jsonErr != nil {\n\t\tfmt.Println(\"Could not parse json.\")\n\t\tfmt.Println(jsonErr)\n\t}\n\n\tif commit.Type == \"PushEvent\" && commit.Payload.Size > 0 {\n\t\t\/\/fmt.Println(commit.Payload.Commits[0].Message)\n\t\tif isDirty(commit.Payload.Commits[0].Message) {\n\t\t\tfmt.Println(commit.Payload.Commits[0].Message)\n\t\t}\n\t}\n\n}\n\nfunc parseFile(fName string) {\n\t\/\/ https:\/\/groups.google.com\/forum\/#!topic\/golang-nuts\/GjIkryuCyAY\n\tfileOS, err := os.Open(fName)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Can't open %s: error: %s\\n\", fName, err)\n\t\tos.Exit(1)\n\t}\n\n\tfileGzip, err := gzip.NewReader(fileOS)\n\tif err != nil {\n\t\tfmt.Printf(\"The file %v is not in gzip format.\\n\", fName)\n\t\tos.Exit(1)\n\t}\n\n\tfileRead := bufio.NewReader(fileGzip)\n\ti := 0\n\tfor {\n\t\tline, err := fileRead.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Did not get a new line.\")\n\t\t\tbreak\n\t\t}\n\n\t\tparseCommit(line)\n\n\t\ti++\n\t}\n}\n\nfunc getData(fullDate string) {\n\n\turls := makeUrlArray(fullDate)\n\n\tfor i, value := range urls {\n\n\t\tfmt.Println(\"fetching url\", value)\n\n\t\tresp, archiveErr := http.Get(value)\n\n\t\tif resp != nil {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\n\t\tif archiveErr != nil {\n\t\t\thandleError(\"Error getting github archive\", archiveErr)\n\t\t}\n\n\t\tcontents, readErr := ioutil.ReadAll(resp.Body)\n\n\t\tif readErr != nil {\n\t\t\thandleError(\"Error converting response\", readErr)\n\t\t}\n\n\t\tfname := makeFileName(fullDate, i)\n\n\t\tfileErr := ioutil.WriteFile(fname, contents, 0644)\n\n\t\tif fileErr != nil {\n\t\t\thandleError(\"Error writing response to file\", fileErr)\n\t\t}\n\n\t\tparseFile(fname)\n\n\t}\n}\n\nfunc makeUrlArray(fullDate string) [24]string {\n\n\tbaseUrl := makeUrlBase(fullDate)\n\turlEnd := \".json.gz\"\n\n\tvar urls [24]string\n\n\tfor i := 0; i < 24; i++ {\n\n\t\tvar buffer bytes.Buffer\n\t\tbuffer.WriteString(baseUrl)\n\t\tbuffer.WriteString(\"-\")\n\t\tbuffer.WriteString(strconv.Itoa(i))\n\t\tbuffer.WriteString(urlEnd)\n\t\turl := buffer.String()\n\n\t\turls[i] = url\n\t}\n\n\treturn urls\n}\n\nfunc makeUrlBase(fullDate string) string {\n\tsplit := strings.Split(fullDate, \"-\")\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"http:\/\/data.githubarchive.org\/\")\n\tbuffer.WriteString(split[0]) \/\/year\n\tbuffer.WriteString(\"-\")\n\tbuffer.WriteString(split[1]) \/\/month\n\tbuffer.WriteString(\"-\")\n\tbuffer.WriteString(split[2]) \/\/day\n\n\treturn buffer.String()\n}\n\nfunc makeFileName(fullDate string, i int) string {\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"data-\")\n\tbuffer.WriteString(fullDate)\n\tbuffer.WriteString(\"-\")\n\tbuffer.WriteString(strconv.Itoa(i))\n\tbuffer.WriteString(\".gz\")\n\n\treturn buffer.String()\n\n}\n\nfunc handleError(message string, err error) {\n\tfmt.Println(message, err)\n\tos.Exit(1)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"net\/http\"\n \"log\"\n \"io\/ioutil\"\n \"encoding\/json\"\n \"os\/exec\"\n \"flag\"\n \"strconv\"\n \"crypto\/md5\"\n \"text\/template\"\n \"bytes\"\n \"gopkg.in\/fsnotify.v1\"\n)\n\n\/\/ A map of repos => (a map of branches => (a list of actions))\n\/\/ Todo: Idiomatic way of doing this?\ntype Config map[string]map[string][]string\n\ntype HookMsg struct {\n CanonicalUrl string `json:\"canon_url\"`\n\n Commits []struct {\n Branch string\n }\n\n Repository struct {\n AbsoluteUrl string `json:\"absolute_url\"`\n }\n}\n\ntype Commit struct {\n Repository string\n Branch string\n}\n\ntype Job struct {\n Commit Commit\n Action string\n}\n\nvar listenPort = flag.Int(\"port\", 8080, \"portnumber to listen on\")\nvar configFile = flag.String(\"config\", \"golive.json\", \"the configfile to read\")\nvar verbose = flag.Bool(\"v\", false, \"print more output\")\n\nvar jobTemplates = make(map[[16]byte]template.Template)\nvar config Config\n\nfunc main() {\n\tflag.Parse()\n\n parseConfig(*configFile)\n watchConfig(*configFile)\n\n msgs := make(chan HookMsg, 100)\n commits := make(chan Commit, 100)\n jobs := make(chan Job, 100)\n actions := make(chan string, 100)\n\n go hookWrangler(msgs, commits)\n go commitWrangler(commits, jobs, config)\n go jobWrangler(jobs, actions)\n go actionRunner(actions)\n\n http.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n payload := r.FormValue(\"payload\")\n\n var hookmsg HookMsg\n if err := json.Unmarshal([]byte(payload), &hookmsg); err != nil {\n http.Error(w, \"Could not decode json\", 500)\n log.Fatal(err)\n }\n\n if *verbose {\n log.Print(\"Received commit: \", hookmsg)\n }\n\n msgs <- hookmsg\n })\n\n log.Fatal(http.ListenAndServe(\":\" + strconv.Itoa(*listenPort), nil))\n}\n\nfunc watchConfig(configFile string) {\n watcher, err := fsnotify.NewWatcher()\n if err != nil {\n log.Fatal(err)\n }\n\n log.Print(\"Watching config file: \", configFile)\n\n defer watcher.Close()\n\n\tdone := make(chan bool)\n\n go func() {\n for {\n select {\n case event := <-watcher.Events:\n if event.Name == \"\" {\n continue\n }\n\n if *verbose {\n log.Println(event.String())\n }\n\n if event.Op & fsnotify.Write == fsnotify.Write ||\n event.Op & fsnotify.Chmod == fsnotify.Chmod\n {\n log.Print(\"Relading config file \", configFile)\n\n parseConfig(configFile)\n }\n\n case err := <-watcher.Errors:\n if err == nil {\n continue\n }\n\n done <- true\n log.Fatal(\"Error when watching config file: \", err)\n }\n }\n }()\n\n err = watcher.Add(configFile)\n if err != nil {\n log.Fatal(err)\n }\n\n <- done\n}\n\nfunc parseConfig(configFile string) {\n config_raw, err := ioutil.ReadFile(configFile)\n if err != nil {\n log.Fatal(err)\n }\n\n var newConfig Config\n json.Unmarshal(config_raw, &newConfig)\n\n config = newConfig\n\n if *verbose {\n log.Print(\"Loaded config: \", config)\n }\n}\n\nfunc hookWrangler(msgs <-chan HookMsg, results chan<- Commit) {\n for msg := range msgs {\n repository := msg.CanonicalUrl + msg.Repository.AbsoluteUrl\n\n for _, commit := range msg.Commits {\n results <- Commit{ repository, commit.Branch}\n }\n }\n}\n\nfunc commitWrangler(commits <-chan Commit, results chan<- Job, config Config) {\n for commit := range commits {\n if commit.Branch == \"\" || commit.Repository == \"\" {\n continue\n }\n\n if branches, ok := config[commit.Repository]; ok {\n if actions, ok := branches[commit.Branch]; ok {\n for _, action := range actions {\n results <- Job{commit, string(action)}\n }\n }\n }\n }\n}\n\nfunc jobWrangler(jobs <-chan Job, actions chan<- string) {\n for job := range jobs {\n if *verbose {\n log.Print(\"Running job: \", job)\n }\n\n hash := md5.Sum([]byte(job.Action))\n\n if _, ok := jobTemplates[hash]; !ok {\n t, err := template.New(string(hash[:])).Parse(job.Action)\n if err != nil {\n log.Fatal(\"Could not compile template: \", job.Action, \" - \", err)\n }\n\n jobTemplates[hash] = *t\n }\n\n t := jobTemplates[hash]\n\n var buff bytes.Buffer\n (&t).Execute(&buff, job.Commit)\n s := buff.String()\n\n actions <- s\n }\n}\n\nfunc actionRunner(actions <-chan string) {\n for action := range actions {\n command := exec.Command(\"bash\", \"-c\", action)\n\n command.Run()\n }\n}\n<commit_msg>Fix if<commit_after>package main\n\nimport (\n \"net\/http\"\n \"log\"\n \"io\/ioutil\"\n \"encoding\/json\"\n \"os\/exec\"\n \"flag\"\n \"strconv\"\n \"crypto\/md5\"\n \"text\/template\"\n \"bytes\"\n \"gopkg.in\/fsnotify.v1\"\n)\n\n\/\/ A map of repos => (a map of branches => (a list of actions))\n\/\/ Todo: Idiomatic way of doing this?\ntype Config map[string]map[string][]string\n\ntype HookMsg struct {\n CanonicalUrl string `json:\"canon_url\"`\n\n Commits []struct {\n Branch string\n }\n\n Repository struct {\n AbsoluteUrl string `json:\"absolute_url\"`\n }\n}\n\ntype Commit struct {\n Repository string\n Branch string\n}\n\ntype Job struct {\n Commit Commit\n Action string\n}\n\nvar listenPort = flag.Int(\"port\", 8080, \"portnumber to listen on\")\nvar configFile = flag.String(\"config\", \"golive.json\", \"the configfile to read\")\nvar verbose = flag.Bool(\"v\", false, \"print more output\")\n\nvar jobTemplates = make(map[[16]byte]template.Template)\nvar config Config\n\nfunc main() {\n\tflag.Parse()\n\n parseConfig(*configFile)\n watchConfig(*configFile)\n\n msgs := make(chan HookMsg, 100)\n commits := make(chan Commit, 100)\n jobs := make(chan Job, 100)\n actions := make(chan string, 100)\n\n go hookWrangler(msgs, commits)\n go commitWrangler(commits, jobs, config)\n go jobWrangler(jobs, actions)\n go actionRunner(actions)\n\n http.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n payload := r.FormValue(\"payload\")\n\n var hookmsg HookMsg\n if err := json.Unmarshal([]byte(payload), &hookmsg); err != nil {\n http.Error(w, \"Could not decode json\", 500)\n log.Fatal(err)\n }\n\n if *verbose {\n log.Print(\"Received commit: \", hookmsg)\n }\n\n msgs <- hookmsg\n })\n\n log.Fatal(http.ListenAndServe(\":\" + strconv.Itoa(*listenPort), nil))\n}\n\nfunc watchConfig(configFile string) {\n watcher, err := fsnotify.NewWatcher()\n if err != nil {\n log.Fatal(err)\n }\n\n log.Print(\"Watching config file: \", configFile)\n\n defer watcher.Close()\n\n\tdone := make(chan bool)\n\n go func() {\n for {\n select {\n case event := <-watcher.Events:\n if event.Name == \"\" {\n continue\n }\n\n if *verbose {\n log.Println(event.String())\n }\n\n if event.Op & fsnotify.Write == fsnotify.Write ||\n event.Op & fsnotify.Chmod == fsnotify.Chmod {\n log.Print(\"Relading config file \", configFile)\n\n parseConfig(configFile)\n }\n\n case err := <-watcher.Errors:\n if err == nil {\n continue\n }\n\n done <- true\n log.Fatal(\"Error when watching config file: \", err)\n }\n }\n }()\n\n err = watcher.Add(configFile)\n if err != nil {\n log.Fatal(err)\n }\n\n <- done\n}\n\nfunc parseConfig(configFile string) {\n config_raw, err := ioutil.ReadFile(configFile)\n if err != nil {\n log.Fatal(err)\n }\n\n var newConfig Config\n json.Unmarshal(config_raw, &newConfig)\n\n config = newConfig\n\n if *verbose {\n log.Print(\"Loaded config: \", config)\n }\n}\n\nfunc hookWrangler(msgs <-chan HookMsg, results chan<- Commit) {\n for msg := range msgs {\n repository := msg.CanonicalUrl + msg.Repository.AbsoluteUrl\n\n for _, commit := range msg.Commits {\n results <- Commit{ repository, commit.Branch}\n }\n }\n}\n\nfunc commitWrangler(commits <-chan Commit, results chan<- Job, config Config) {\n for commit := range commits {\n if commit.Branch == \"\" || commit.Repository == \"\" {\n continue\n }\n\n if branches, ok := config[commit.Repository]; ok {\n if actions, ok := branches[commit.Branch]; ok {\n for _, action := range actions {\n results <- Job{commit, string(action)}\n }\n }\n }\n }\n}\n\nfunc jobWrangler(jobs <-chan Job, actions chan<- string) {\n for job := range jobs {\n if *verbose {\n log.Print(\"Running job: \", job)\n }\n\n hash := md5.Sum([]byte(job.Action))\n\n if _, ok := jobTemplates[hash]; !ok {\n t, err := template.New(string(hash[:])).Parse(job.Action)\n if err != nil {\n log.Fatal(\"Could not compile template: \", job.Action, \" - \", err)\n }\n\n jobTemplates[hash] = *t\n }\n\n t := jobTemplates[hash]\n\n var buff bytes.Buffer\n (&t).Execute(&buff, job.Commit)\n s := buff.String()\n\n actions <- s\n }\n}\n\nfunc actionRunner(actions <-chan string) {\n for action := range actions {\n command := exec.Command(\"bash\", \"-c\", action)\n\n command.Run()\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package goweek\n\nimport (\n\t\"time\"\n\t\"errors\"\n\t\"reflect\"\n)\n\ntype Week struct {\n\tDays []time.Time\n\tYear int\n\tNumber int\n\tFirstDay int\n}\n\nfunc NewWeek(params ...int) (*Week, error) {\n\tif len(params) < 2 {\n\t\treturn &Week{}, errors.New(\"NewWeek(): too few arguments, specify year and number of week\")\n\t} else if params[0] < 0 {\n\t\treturn &Week{}, errors.New(\"NewWeek(): year can't be less than zero\")\n\t} else if params[1] < 1 || params[1] > 53 {\n\t\treturn &Week{}, errors.New(\"NewWeek(): number of week can't be less than 1 or greater than 53\")\n\t} else {\n\t\tvar (\n\t\t\tweek = initWeek(params...)\n\t\t\tapproximateDay = (week.Number-1) * 7 \/\/ converting from human-readable to machine notation\n\t\t\tapproximateFirstDay = 0\n\t\t\tcommonNumberOfDays = 0\n\t\t\tmonthNumber = 0\n\t\t)\n\n\t\tfor index, numberOfDaysInMonth := range numberOfDays(week.Year) {\n\t\t\tif approximateDay >= commonNumberOfDays && approximateDay <= commonNumberOfDays+numberOfDaysInMonth {\n\t\t\t\tmonthNumber = index\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tcommonNumberOfDays += numberOfDaysInMonth\n\t\t\t}\n\t\t}\n\n\t\tapproximateFirstDay = approximateDay - commonNumberOfDays\n\n\t\t\/\/ workaround for calculation of day number in first week of the year\n\t\tif approximateFirstDay == 0 {\n\t\t\tapproximateFirstDay = 1\n\t\t}\n\n\t\t\/\/ workaround for calculation of day number in last week of the year\n\t\tif approximateFirstDay == -1 {\n\t\t\tmonthNumber = 11\n\t\t\tapproximateFirstDay = numberOfDays(week.Year)[monthNumber] - 2\n\t\t}\n\n\t\t\/\/ composing week listing\n\t\tvar estimatedDate = time.Date(week.Year, time.Month(monthNumber+1), approximateFirstDay, 0, 0, 0, 0, time.UTC)\n\t\tvar estimatedFirstDayOfTheWeek = estimatedDate.AddDate(0, 0, -1*int(estimatedDate.Weekday()))\n\n\t\tfor i := week.FirstDay; i <= week.FirstDay+6; i++ {\n\t\t\tweek.Days = append(week.Days, estimatedFirstDayOfTheWeek.AddDate(0, 0, i))\n\t\t}\n\n\t\treturn &week, nil\n\t}\n}\n\nfunc (week *Week) Next() (*Week, error) {\n\tvar newYear, newWeek int\n\tif week.Number+1 > 53 {\n\t\tnewYear = week.Year + 1\n\t\tnewWeek = 1\n\t} else {\n\t\tnewYear = week.Year\n\t\tnewWeek = week.Number + 1\n\t}\n\tw, e := NewWeek(newYear, newWeek, week.FirstDay)\n\n\t\/\/ fixing special cases on the verge of years\n\tif week.Number == 53 && reflect.DeepEqual(week.Days, w.Days) {\n\t\tw, e = NewWeek(newYear, newWeek + 1, week.FirstDay)\n\t}\n\n\treturn w, e\n}\n\nfunc (week *Week) Previous() (*Week, error) {\n\tvar newYear, newWeek int\n\tif week.Number-1 < 1 {\n\t\tnewYear = week.Year - 1\n\t\tnewWeek = 53\n\t} else {\n\t\tnewYear = week.Year\n\t\tnewWeek = week.Number - 1\n\t}\n\tw, e := NewWeek(newYear, newWeek, week.FirstDay)\n\n\t\/\/ fixing special cases on the verge of years\n\tif week.Number == 1 && reflect.DeepEqual(week.Days, w.Days) {\n\t\tw, e = NewWeek(newYear, newWeek - 1, week.FirstDay)\n\t}\n\n\treturn w, e\n}\n\nfunc initWeek(params ...int) Week {\n\tvar week = Week{\n\t\tYear: params[0],\n\t\tNumber: params[1],\n\t}\n\n\tif len(params) < 3 {\n\t\tweek.FirstDay = 0\n\t} else {\n\t\tweek.FirstDay = params[2]\n\t}\n\treturn week\n}\n\nfunc isLeapYear(year int) bool {\n\tif year%4 > 0 {\n\t\treturn false\n\t} else if year%100 > 0 {\n\t\treturn true\n\t} else if year%400 > 0 {\n\t\treturn false\n\t} else {\n\t\treturn true\n\t}\n}\n\nfunc numberOfDays(year int) (numbers []int) {\n\tnumbers = []int{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}\n\tif isLeapYear(year) {\n\t\tnumbers[1] = 29\n\t}\n\treturn\n}<commit_msg>#4 go fmt<commit_after>package goweek\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"time\"\n)\n\ntype Week struct {\n\tDays []time.Time\n\tYear int\n\tNumber int\n\tFirstDay int\n}\n\nfunc NewWeek(params ...int) (*Week, error) {\n\tif len(params) < 2 {\n\t\treturn &Week{}, errors.New(\"NewWeek(): too few arguments, specify year and number of week\")\n\t} else if params[0] < 0 {\n\t\treturn &Week{}, errors.New(\"NewWeek(): year can't be less than zero\")\n\t} else if params[1] < 1 || params[1] > 53 {\n\t\treturn &Week{}, errors.New(\"NewWeek(): number of week can't be less than 1 or greater than 53\")\n\t} else {\n\t\tvar (\n\t\t\tweek = initWeek(params...)\n\t\t\tapproximateDay = (week.Number - 1) * 7 \/\/ converting from human-readable to machine notation\n\t\t\tapproximateFirstDay = 0\n\t\t\tcommonNumberOfDays = 0\n\t\t\tmonthNumber = 0\n\t\t)\n\n\t\tfor index, numberOfDaysInMonth := range numberOfDays(week.Year) {\n\t\t\tif approximateDay >= commonNumberOfDays && approximateDay <= commonNumberOfDays+numberOfDaysInMonth {\n\t\t\t\tmonthNumber = index\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tcommonNumberOfDays += numberOfDaysInMonth\n\t\t\t}\n\t\t}\n\n\t\tapproximateFirstDay = approximateDay - commonNumberOfDays\n\n\t\t\/\/ workaround for calculation of day number in first week of the year\n\t\tif approximateFirstDay == 0 {\n\t\t\tapproximateFirstDay = 1\n\t\t}\n\n\t\t\/\/ workaround for calculation of day number in last week of the year\n\t\tif approximateFirstDay == -1 {\n\t\t\tmonthNumber = 11\n\t\t\tapproximateFirstDay = numberOfDays(week.Year)[monthNumber] - 2\n\t\t}\n\n\t\t\/\/ composing week listing\n\t\tvar estimatedDate = time.Date(week.Year, time.Month(monthNumber+1), approximateFirstDay, 0, 0, 0, 0, time.UTC)\n\t\tvar estimatedFirstDayOfTheWeek = estimatedDate.AddDate(0, 0, -1*int(estimatedDate.Weekday()))\n\n\t\tfor i := week.FirstDay; i <= week.FirstDay+6; i++ {\n\t\t\tweek.Days = append(week.Days, estimatedFirstDayOfTheWeek.AddDate(0, 0, i))\n\t\t}\n\n\t\treturn &week, nil\n\t}\n}\n\nfunc (week *Week) Next() (*Week, error) {\n\tvar newYear, newWeek int\n\tif week.Number+1 > 53 {\n\t\tnewYear = week.Year + 1\n\t\tnewWeek = 1\n\t} else {\n\t\tnewYear = week.Year\n\t\tnewWeek = week.Number + 1\n\t}\n\tw, e := NewWeek(newYear, newWeek, week.FirstDay)\n\n\t\/\/ fixing special cases on the verge of years\n\tif week.Number == 53 && reflect.DeepEqual(week.Days, w.Days) {\n\t\tw, e = NewWeek(newYear, newWeek+1, week.FirstDay)\n\t}\n\n\treturn w, e\n}\n\nfunc (week *Week) Previous() (*Week, error) {\n\tvar newYear, newWeek int\n\tif week.Number-1 < 1 {\n\t\tnewYear = week.Year - 1\n\t\tnewWeek = 53\n\t} else {\n\t\tnewYear = week.Year\n\t\tnewWeek = week.Number - 1\n\t}\n\tw, e := NewWeek(newYear, newWeek, week.FirstDay)\n\n\t\/\/ fixing special cases on the verge of years\n\tif week.Number == 1 && reflect.DeepEqual(week.Days, w.Days) {\n\t\tw, e = NewWeek(newYear, newWeek-1, week.FirstDay)\n\t}\n\n\treturn w, e\n}\n\nfunc initWeek(params ...int) Week {\n\tvar week = Week{\n\t\tYear: params[0],\n\t\tNumber: params[1],\n\t}\n\n\tif len(params) < 3 {\n\t\tweek.FirstDay = 0\n\t} else {\n\t\tweek.FirstDay = params[2]\n\t}\n\treturn week\n}\n\nfunc isLeapYear(year int) bool {\n\tif year%4 > 0 {\n\t\treturn false\n\t} else if year%100 > 0 {\n\t\treturn true\n\t} else if year%400 > 0 {\n\t\treturn false\n\t} else {\n\t\treturn true\n\t}\n}\n\nfunc numberOfDays(year int) (numbers []int) {\n\tnumbers = []int{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}\n\tif isLeapYear(year) {\n\t\tnumbers[1] = 29\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\npackage modules\n\nimport (\n\t\"archive\/tar\"\n\t\"compress\/gzip\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strconv\"\n)\n\nfunc init() {\n\tModules[\"geekbench\"] = &Geekbench{}\n}\n\ntype Geekbench struct {\n\tSingleCore int\n\tMultiCore int\n\tURL string\n\tID int\n}\n\nfunc (stat *Geekbench) Run() error {\n\tres, err := http.Get(\"http:\/\/cdn.primatelabs.com\/Geekbench-4.1.0-Linux.tar.gz\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := stat.extract(res.Body); err != nil {\n\t\treturn err\n\t}\n\n\tgb := exec.Command(\"build.pulse\/dist\/Geekbench-4.1.0-Linux\/geekbench4\")\n\n\tout, err := gb.Output()\n\tif err != nil {\n\t\texit, ok := err.(*exec.ExitError)\n\t\tif ok {\n\t\t\tfmt.Printf(\"%s\", exit.Stderr)\n\t\t}\n\n\t\treturn err\n\t}\n\n\tif err := os.RemoveAll(\"build.pulse\"); err != nil {\n\t\treturn err\n\t}\n\n\tr, err := regexp.Compile(\"https:\/\/browser.geekbench.com\/v4\/cpu\/([0-9]*)\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmatch := r.FindStringSubmatch(string(out))\n\tif len(match) < 2 {\n\t\treturn errors.New(\"geekbench did not return result url\")\n\t}\n\n\tstat.URL = match[0]\n\tid, _ := strconv.Atoi(match[1])\n\tstat.ID = id\n\n\tstat.SingleCore, stat.MultiCore, err = stat.scrape(stat.URL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (stat *Geekbench) Print() {\n\tfmt.Printf(\"Single-Core Score : %d\\n\", stat.SingleCore)\n\tfmt.Printf(\"Multi-Core Score : %d\\n\", stat.MultiCore)\n\tfmt.Printf(\"Result URL : %s\\n\", stat.URL)\n}\n\nfunc (stat *Geekbench) scrape(url string) (int, int, error) {\n\tres, err := http.Get(url)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tsingleReg, err := regexp.Compile(\"<th class='name'>Single-Core Score<\/th>\\n<th class='score'>([0-9]*)<\/th>\")\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tsingle := singleReg.FindStringSubmatch(string(body))\n\tif len(single) < 2 {\n\t\treturn 0, 0, errors.New(\"failed to scrape score\")\n\t}\n\n\tmultiReg, err := regexp.Compile(\"<th class='name'>Multi-Core Score<\/th>\\n<th class='score'>([0-9]*)<\/th>\")\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tmulti := multiReg.FindStringSubmatch(string(body))\n\tif len(multi) < 2 {\n\t\treturn 0, 0, errors.New(\"failed to scrape score\")\n\t}\n\n\tsingleScore, _ := strconv.Atoi(single[1])\n\tmultiScore, _ := strconv.Atoi(multi[1])\n\n\treturn singleScore, multiScore, nil\n}\n\nfunc (stat *Geekbench) extract(r io.Reader) error {\n\n\tgzf, err := gzip.NewReader(r)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\ttarReader := tar.NewReader(gzf)\n\n\tfor {\n\t\theader, err := tarReader.Next()\n\t\tif err == io.EOF {\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch header.Typeflag {\n\t\tcase tar.TypeDir:\n\t\t\tif err := os.MkdirAll(header.Name, os.ModePerm); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase tar.TypeReg:\n\t\t\tf, err := os.Create(header.Name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer f.Close()\n\n\t\t\tif _, err := io.Copy(f, tarReader); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tos.Chmod(header.Name, os.ModePerm)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"%s %c %s %s\", \"unexpected file type\", header.Typeflag, \"in file\", header.Name)\n\t\t}\n\t}\n\n}\n<commit_msg>geekbench: fix command on 32 bit<commit_after>\/\/ +build linux\n\npackage modules\n\nimport (\n\t\"archive\/tar\"\n\t\"compress\/gzip\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strconv\"\n)\n\nfunc init() {\n\tModules[\"geekbench\"] = &Geekbench{}\n}\n\ntype Geekbench struct {\n\tSingleCore int\n\tMultiCore int\n\tURL string\n\tID int\n}\n\nfunc (stat *Geekbench) Run() error {\n\tres, err := http.Get(\"http:\/\/cdn.primatelabs.com\/Geekbench-4.1.0-Linux.tar.gz\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := stat.extract(res.Body); err != nil {\n\t\treturn err\n\t}\n\n\tcmd := \"build.pulse\/dist\/Geekbench-4.1.0-Linux\/geekbench_x86_\"\n\n\tif runtime.GOARCH == \"386\" {\n\t\tcmd = fmt.Sprintf(\"%s%s\", cmd, \"32\")\n\t} else {\n\t\tcmd = fmt.Sprintf(\"%s%s\", cmd, \"64\")\n\t}\n\n\tgb := exec.Command(cmd)\n\n\tout, err := gb.Output()\n\tif err != nil {\n\t\texit, ok := err.(*exec.ExitError)\n\t\tif ok {\n\t\t\tfmt.Printf(\"%s\", exit.Stderr)\n\t\t}\n\n\t\treturn err\n\t}\n\n\tif err := os.RemoveAll(\"build.pulse\"); err != nil {\n\t\treturn err\n\t}\n\n\tr, err := regexp.Compile(\"https:\/\/browser.geekbench.com\/v4\/cpu\/([0-9]*)\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmatch := r.FindStringSubmatch(string(out))\n\tif len(match) < 2 {\n\t\treturn errors.New(\"geekbench did not return result url\")\n\t}\n\n\tstat.URL = match[0]\n\tid, _ := strconv.Atoi(match[1])\n\tstat.ID = id\n\n\tstat.SingleCore, stat.MultiCore, err = stat.scrape(stat.URL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (stat *Geekbench) Print() {\n\tfmt.Printf(\"Single-Core Score : %d\\n\", stat.SingleCore)\n\tfmt.Printf(\"Multi-Core Score : %d\\n\", stat.MultiCore)\n\tfmt.Printf(\"Result URL : %s\\n\", stat.URL)\n}\n\nfunc (stat *Geekbench) scrape(url string) (int, int, error) {\n\tres, err := http.Get(url)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tsingleReg, err := regexp.Compile(\"<th class='name'>Single-Core Score<\/th>\\n<th class='score'>([0-9]*)<\/th>\")\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tsingle := singleReg.FindStringSubmatch(string(body))\n\tif len(single) < 2 {\n\t\treturn 0, 0, errors.New(\"failed to scrape score\")\n\t}\n\n\tmultiReg, err := regexp.Compile(\"<th class='name'>Multi-Core Score<\/th>\\n<th class='score'>([0-9]*)<\/th>\")\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tmulti := multiReg.FindStringSubmatch(string(body))\n\tif len(multi) < 2 {\n\t\treturn 0, 0, errors.New(\"failed to scrape score\")\n\t}\n\n\tsingleScore, _ := strconv.Atoi(single[1])\n\tmultiScore, _ := strconv.Atoi(multi[1])\n\n\treturn singleScore, multiScore, nil\n}\n\nfunc (stat *Geekbench) extract(r io.Reader) error {\n\n\tgzf, err := gzip.NewReader(r)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\ttarReader := tar.NewReader(gzf)\n\n\tfor {\n\t\theader, err := tarReader.Next()\n\t\tif err == io.EOF {\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch header.Typeflag {\n\t\tcase tar.TypeDir:\n\t\t\tif err := os.MkdirAll(header.Name, os.ModePerm); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase tar.TypeReg:\n\t\t\tf, err := os.Create(header.Name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer f.Close()\n\n\t\t\tif _, err := io.Copy(f, tarReader); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tos.Chmod(header.Name, os.ModePerm)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"%s %c %s %s\", \"unexpected file type\", header.Typeflag, \"in file\", header.Name)\n\t\t}\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"github.com\/labstack\/echo\"\nimport \"net\/http\"\n\n\/\/ health returns an HTTP handler that provides health information\nfunc health(c echo.Context) error {\n\th := &Health{\"Fluffy Radio Api\", \"1.0.0\", \"Just Keep Fluffing!\"}\n\treturn c.JSON(http.StatusOK, h)\n}\n\ntype (\n\t\/\/ Health provides basic information about the API used for health monitoring\n\tHealth struct {\n\t\tName string `json:\"name\"`\n\t\tVersion string `json:\"version\"`\n\t\tMessage string `json:\"message\"`\n\t}\n)\n<commit_msg>Health stuff<commit_after>package main\n\nimport \"github.com\/labstack\/echo\"\nimport \"net\/http\"\n\n\/\/ health returns an HTTP handler that provides health information\nfunc health(c echo.Context) error {\n\treturn c.JSON(http.StatusOK, []string{})\n}\n<|endoftext|>"} {"text":"<commit_before>package fusefs\n\nimport (\n\t\"bytes\"\n\t\"crypto\/rand\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/asjoyner\/shade\"\n\t\"github.com\/asjoyner\/shade\/drive\"\n\t\"github.com\/asjoyner\/shade\/drive\/memory\"\n\n\t\"bazil.org\/fuse\"\n)\n\nfunc init() {\n\t\/\/ This may be helpful if the tests fail.\n\t\/\/fstestutil.DebugByDefault()\n}\n\n\/\/ TestFuseRead initializes a series of random chunks of data, calculates a\n\/\/ shade.Sum and shade.File for each, placed in a series of subdirectories\n\/\/ based on the Sum. It uses PutChunk to put them into a memory client, and\n\/\/ mounts a fusefs on that memory client. It then uses filepath.Walk to\n\/\/ iterate and validate the exposed filesystem.\nfunc TestFuseRead(t *testing.T) {\n\tmountPoint, err := ioutil.TempDir(\"\", \"fusefsTest\")\n\tif err != nil {\n\t\tt.Fatalf(\"could not acquire TempDir: %s\", err)\n\t}\n\tdefer tearDownDir(mountPoint)\n\n\tt.Logf(\"Mounting fuse filesystem at: %s\", mountPoint)\n\tclient, ffs, err := setupFuse(t, mountPoint)\n\tif err != nil {\n\t\tt.Fatalf(\"could not mount fuse: %s\", err)\n\t}\n\tdefer tearDownFuse(t, mountPoint)\n\n\tchunkSize := 100 * 256 \/\/ in bytes\n\tnc := 50 \/\/ number of chunks\n\n\t\/\/ Generate some random file contents\n\ttestChunks := make(map[string][]byte, nc)\n\tfor i := 0; i < nc; i++ {\n\t\tn := make([]byte, chunkSize)\n\t\trand.Read(n)\n\t\ts := sha256.Sum256(n)\n\t\tchunkSum := string(s[:])\n\t\ttestChunks[chunkSum] = n\n\t}\n\n\t\/\/ Populate test chunks into the client\n\tfor stringSum, chunk := range testChunks {\n\t\terr := client.PutChunk([]byte(stringSum), chunk)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to put chunk \\\"%x\\\": %s\", stringSum, err)\n\t\t}\n\t}\n\n\t\/\/ Populate files, referencing those chunks, into the client\n\t\/\/ the directory scheme is to break apart the shasum into dirs:\n\t\/\/ deadbeef == de\/ad\/be\/ff\/deadbeef\n\t\/\/ .. unless it starts with 0, so we exercise files at \/ too.\n\ti := 0\n\tfor chunkStringSum := range testChunks {\n\t\tfilename := pathFromStringSum(chunkStringSum)\n\t\tfile := shade.File{\n\t\t\tFilename: filename,\n\t\t\tFilesize: int64(len(chunkStringSum)),\n\t\t\tChunks: []shade.Chunk{{\n\t\t\t\tIndex: 0,\n\t\t\t\tSha256: []byte(chunkStringSum),\n\t\t\t}},\n\t\t\tChunksize: chunkSize,\n\t\t}\n\t\tfj, err := file.ToJSON()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"test data is broken, could not marshal File: %s\", err)\n\t\t}\n\t\tfileSum := sha256.Sum256(fj)\n\t\tif err := client.PutFile(fileSum[:], fj); err != nil {\n\t\t\tt.Errorf(\"failed to PutFile \\\"%x\\\": %s\", fileSum[:], err)\n\t\t}\n\t\ti++\n\t\tt.Logf(\"Added to drive.Client %d: %s\\n\", i, filename)\n\t}\n\n\t\/\/ double check that the client is sane\n\tfiles, err := client.ListFiles()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif nf := len(files); nf != nc {\n\t\tt.Fatalf(\"incomplete file set in client, want: %d, got %d\", nc, nf)\n\t}\n\tt.Logf(\"There are %d files known to the drive.Client.\", nc)\n\n\tif err := ffs.Refresh(); err != nil {\n\t\tt.Fatalf(\"failed to refresh fuse fileserver: %s\", err)\n\t}\n\tt.Logf(\"Drive client refreshed successfully.\")\n\n\tseen := make(map[string]bool)\n\tvisit := func(path string, f os.FileInfo, err error) error {\n\t\tif err1 := checkPath(t, testChunks, seen, mountPoint, path, f, err); err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t\treturn nil\n\t}\n\n\tt.Logf(\"Attempting to walk the filesystem.\")\n\tif err := filepath.Walk(mountPoint, visit); err != nil {\n\t\tt.Fatalf(\"filepath.Walk() returned %v\", err)\n\t}\n\n\tif ns := len(seen); ns != nc {\n\t\tt.Errorf(\"incomplete file set in fuse, want: %d, got %d\", nc, ns)\n\t\tfor chunk := range testChunks {\n\t\t\tif !seen[chunk] {\n\t\t\t\tt.Errorf(\"missing file: %x\", chunk)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ TestFuseRoundTrip creates an empty memory client, mounts a fusefs filesystem\n\/\/ against it, generates random test data, writes it to the filesystem, reads\n\/\/ it back out and validates it is as expected.\nfunc TestFuseRoundtrip(t *testing.T) {\n\tmountPoint, err := ioutil.TempDir(\"\", \"fusefsTest\")\n\tif err != nil {\n\t\tt.Fatalf(\"could not acquire TempDir: %s\", err)\n\t}\n\tdefer tearDownDir(mountPoint)\n\n\tt.Logf(\"Mounting fuse filesystem at: %s\", mountPoint)\n\tif _, _, err := setupFuse(t, mountPoint); err != nil {\n\t\tt.Fatalf(\"could not mount fuse: %s\", err)\n\t}\n\tdefer tearDownFuse(t, mountPoint)\n\n\tchunkSize := 100 * 256 \/\/ in bytes\n\tnc := 50 \/\/ number of chunks\n\n\t\/\/ Generate some random file contents\n\ttestChunks := make(map[string][]byte, nc)\n\tfor i := 0; i < nc; i++ {\n\t\tn := make([]byte, chunkSize)\n\t\trand.Read(n)\n\t\ts := sha256.Sum256(n)\n\t\tchunkSum := string(s[:])\n\t\ttestChunks[chunkSum] = n\n\t}\n\n\t\/\/ Write those testChunks to the fusefs\n\tfor stringSum, chunk := range testChunks {\n\t\tfilename := pathFromStringSum(stringSum)\n\t\tif err := ioutil.WriteFile(filename, chunk, 0400); err != nil {\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\t}\n\n\t\/\/ Validate all the files have the right contents\n\tseen := make(map[string]bool)\n\tvisit := func(path string, f os.FileInfo, err error) error {\n\t\tif err1 := checkPath(t, testChunks, seen, mountPoint, path, f, err); err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t\treturn nil\n\t}\n\n\tt.Logf(\"Attempting to walk the filesystem.\")\n\tif err := filepath.Walk(mountPoint, visit); err != nil {\n\t\tt.Fatalf(\"filepath.Walk() returned %v\", err)\n\t}\n\n\tif ns := len(seen); ns != nc {\n\t\tt.Errorf(\"incomplete file set in fuse, want: %d, got %d\", nc, ns)\n\t\tfor chunk := range testChunks {\n\t\t\tif !seen[chunk] {\n\t\t\t\tt.Errorf(\"missing file: %x\", chunk)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ setup returns the absolute path to a mountpoint for a fuse FS, and the\n\/\/ memory-backed drive.Client which it is following. The FS is configured not\n\/\/ to refresh the drive.Client, you must call Refresh() if you update the\n\/\/ client outside fuse.\nfunc setupFuse(t *testing.T, mountPoint string) (drive.Client, *Server, error) {\n\toptions := []fuse.MountOption{\n\t\tfuse.FSName(\"Shade\"),\n\t\tfuse.NoAppleDouble(),\n\t}\n\tconn, err := fuse.Mount(mountPoint, options...)\n\tif err != nil {\n\t\t\/\/ often means the mountpoint is busy... but it's a TempDir...\n\t\treturn nil, nil, fmt.Errorf(\"could not setup Fuse mount: %s\", err)\n\t}\n\n\tmc, err := memory.NewClient(drive.Config{Provider: \"memory\"})\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"NewClient() for test config failed: %s\", err)\n\t}\n\n\tffs, err := New(mc, conn, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"fuse server initialization failed: %s\", err)\n\t}\n\tgo func() {\n\t\terr = ffs.Serve()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"serving fuse connection failed: %s\", err)\n\t\t}\n\t}()\n\n\t<-conn.Ready \/\/ returns when the FS is usable\n\tif conn.MountError != nil {\n\t\treturn nil, nil, fmt.Errorf(\"fuse exited with an error: %s\", err)\n\t}\n\treturn mc, ffs, nil\n}\n\nfunc tearDownFuse(t *testing.T, dir string) {\n\tif err := fuse.Unmount(dir); err != nil {\n\t\tt.Fatalf(\"could not Unmount test dir: %s\", err)\n\t}\n}\n\nfunc tearDownDir(dir string) {\n\tif err := os.RemoveAll(dir); err != nil {\n\t\tfmt.Printf(\"Could not clean up: %s\", err)\n\t}\n}\n\nfunc pathFromStringSum(sum string) string {\n\tchs := hex.EncodeToString([]byte(sum))\n\tvar filename string\n\tif strings.HasPrefix(chs, \"0\") {\n\t\tfilename = chs\n\t} else {\n\t\t\/\/ TODO: don't call Sprintf so often. :)\n\t\tfor i := 0; i <= len(chs)-8; i += 8 {\n\t\t\tfilename = fmt.Sprintf(\"%s\/%s\", filename, chs[i:i+8])\n\t\t}\n\t}\n\treturn strings.TrimPrefix(filename, \"\/\")\n}\n\nfunc checkPath(t *testing.T, testChunks map[string][]byte, seen map[string]bool, mountPoint, path string, f os.FileInfo, err error) error {\n\tif err != nil {\n\t\tt.Errorf(\"failed to read path: %s\", err)\n\t\treturn err\n\t}\n\tif !f.IsDir() {\n\t\tt.Logf(\"Seen in FS %d: %s\\n\", len(seen)+1, path)\n\t\tcontents, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn nil\n\t\t}\n\t\tfilename := strings.TrimPrefix(path, mountPoint)\n\t\tfilename = strings.Replace(filename, \"\/\", \"\", -1)\n\t\tchs, err := hex.DecodeString(filename)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"could not hex decode filename in Fuse FS: %s: %s\", filename, err)\n\t\t\treturn nil\n\t\t}\n\t\tif bytes.Equal(contents, testChunks[string(chs)]) {\n\t\t\tt.Errorf(\"contents of %s did not match, want: %s, got %s\", filename, testChunks[filename], contents)\n\t\t\treturn nil\n\t\t}\n\t\tif seen[string(chs)] {\n\t\t\tt.Errorf(\"saw chunk twice: %x\", chs)\n\t\t}\n\t\tseen[string(chs)] = true\n\t}\n\treturn nil\n}\n\n\/\/ TestChunksForRead tests the function which calculates which chunks are\n\/\/ necessary to service a fuse read request. It initializes a set of chunks,\n\/\/ then calls chunksForRead to ensure the correct series of chunks are returned\n\/\/ for each offset.\nfunc TestChunksForRead(t *testing.T) {\n\tf := &shade.File{\n\t\tFilename: \"test\",\n\t\tFilesize: 32,\n\t\tChunks: []shade.Chunk{\n\t\t\t\/\/ chunks have artificial SHA sums to make identification easier\n\t\t\t{Index: 0, Sha256: []byte(\"0000\")},\n\t\t\t{Index: 1, Sha256: []byte(\"1111\")},\n\t\t\t{Index: 2, Sha256: []byte(\"2222\")},\n\t\t\t{Index: 3, Sha256: []byte(\"3333\")},\n\t\t},\n\t\tChunksize: 8,\n\t}\n\ttestSet := []struct {\n\t\toffset int64\n\t\tsize int64\n\t\twant [][]byte\n\t}{\n\t\t{\n\t\t\toffset: 0,\n\t\t\tsize: 1,\n\t\t\twant: [][]byte{[]byte(\"0000\")},\n\t\t},\n\t\t{\n\t\t\toffset: 0,\n\t\t\tsize: 32,\n\t\t\twant: [][]byte{\n\t\t\t\t[]byte(\"0000\"),\n\t\t\t\t[]byte(\"1111\"),\n\t\t\t\t[]byte(\"2222\"),\n\t\t\t\t[]byte(\"3333\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\toffset: 31,\n\t\t\tsize: 1,\n\t\t\twant: [][]byte{[]byte(\"3333\")},\n\t\t},\n\t\t{\n\t\t\toffset: 12,\n\t\t\tsize: 8,\n\t\t\twant: [][]byte{\n\t\t\t\t[]byte(\"1111\"),\n\t\t\t\t[]byte(\"2222\"),\n\t\t\t},\n\t\t},\n\t}\n\tfor _, ts := range testSet {\n\t\tcs, err := chunksForRead(f, ts.offset, ts.size)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"unexpected error: chunksForRead(%+v, %d, %d): %s\", f, ts.offset, ts.size, err)\n\t\t\tcontinue\n\t\t}\n\t\tif len(cs) != len(ts.want) {\n\t\t\tt.Errorf(\"chunksForRead(%+v, %d, %d), want: %s, got: %s\", f, ts.offset, ts.size, ts.want, cs)\n\t\t\tcontinue\n\t\t}\n\t\tfor i := 0; i < len(cs); i++ {\n\t\t\tif !bytes.Equal(cs[i], ts.want[i]) {\n\t\t\t\tt.Errorf(\"chunksForRead(%+v, %d, %d), want: %s, got: %s\", f, ts.offset, ts.size, ts.want, cs)\n\t\t\t}\n\t\t}\n\t}\n\t_, err := chunksForRead(f, 33, 1)\n\tif err == nil {\n\t\tt.Errorf(\"expected error from chunksForRead(%+v, %d, %d), got nil\", f, 33, 1)\n\t}\n\t_, err = chunksForRead(f, -1024, 1)\n\tif err == nil {\n\t\tt.Errorf(\"expected error from chunksForRead(%+v, %d, %d), got nil\", f, -1024, 1)\n\t}\n\t_, err = chunksForRead(f, 0, -1)\n\tif err == nil {\n\t\tt.Errorf(\"expected error from chunksForRead(%+v, %d, %d), got nil\", f, -1024, 1)\n\t}\n}\n<commit_msg>actually write test files into the fusefs :)<commit_after>package fusefs\n\nimport (\n\t\"bytes\"\n\t\"crypto\/rand\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/asjoyner\/shade\"\n\t\"github.com\/asjoyner\/shade\/drive\"\n\t\"github.com\/asjoyner\/shade\/drive\/memory\"\n\n\t\"bazil.org\/fuse\"\n)\n\nfunc init() {\n\t\/\/ This may be helpful if the tests fail.\n\t\/\/fstestutil.DebugByDefault()\n}\n\n\/\/ TestFuseRead initializes a series of random chunks of data, calculates a\n\/\/ shade.Sum and shade.File for each, placed in a series of subdirectories\n\/\/ based on the Sum. It uses PutChunk to put them into a memory client, and\n\/\/ mounts a fusefs on that memory client. It then uses filepath.Walk to\n\/\/ iterate and validate the exposed filesystem.\nfunc TestFuseRead(t *testing.T) {\n\tmountPoint, err := ioutil.TempDir(\"\", \"fusefsTest\")\n\tif err != nil {\n\t\tt.Fatalf(\"could not acquire TempDir: %s\", err)\n\t}\n\tdefer tearDownDir(mountPoint)\n\n\tt.Logf(\"Mounting fuse filesystem at: %s\", mountPoint)\n\tclient, ffs, err := setupFuse(t, mountPoint)\n\tif err != nil {\n\t\tt.Fatalf(\"could not mount fuse: %s\", err)\n\t}\n\tdefer tearDownFuse(t, mountPoint)\n\n\tchunkSize := 100 * 256 \/\/ in bytes\n\tnc := 50 \/\/ number of chunks\n\n\t\/\/ Generate some random file contents\n\ttestChunks := make(map[string][]byte, nc)\n\tfor i := 0; i < nc; i++ {\n\t\tn := make([]byte, chunkSize)\n\t\trand.Read(n)\n\t\ts := sha256.Sum256(n)\n\t\tchunkSum := string(s[:])\n\t\ttestChunks[chunkSum] = n\n\t}\n\n\t\/\/ Populate test chunks into the client\n\tfor stringSum, chunk := range testChunks {\n\t\terr := client.PutChunk([]byte(stringSum), chunk)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to put chunk \\\"%x\\\": %s\", stringSum, err)\n\t\t}\n\t}\n\n\t\/\/ Populate files, referencing those chunks, into the client\n\t\/\/ the directory scheme is to break apart the shasum into dirs:\n\t\/\/ deadbeef == de\/ad\/be\/ff\/deadbeef\n\t\/\/ .. unless it starts with 0, so we exercise files at \/ too.\n\ti := 0\n\tfor chunkStringSum := range testChunks {\n\t\tfilename := pathFromStringSum(chunkStringSum)\n\t\tfile := shade.File{\n\t\t\tFilename: filename,\n\t\t\tFilesize: int64(len(chunkStringSum)),\n\t\t\tChunks: []shade.Chunk{{\n\t\t\t\tIndex: 0,\n\t\t\t\tSha256: []byte(chunkStringSum),\n\t\t\t}},\n\t\t\tChunksize: chunkSize,\n\t\t}\n\t\tfj, err := file.ToJSON()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"test data is broken, could not marshal File: %s\", err)\n\t\t}\n\t\tfileSum := sha256.Sum256(fj)\n\t\tif err := client.PutFile(fileSum[:], fj); err != nil {\n\t\t\tt.Errorf(\"failed to PutFile \\\"%x\\\": %s\", fileSum[:], err)\n\t\t}\n\t\ti++\n\t\tt.Logf(\"Added to drive.Client %d: %s\\n\", i, filename)\n\t}\n\n\t\/\/ double check that the client is sane\n\tfiles, err := client.ListFiles()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif nf := len(files); nf != nc {\n\t\tt.Fatalf(\"incomplete file set in client, want: %d, got %d\", nc, nf)\n\t}\n\tt.Logf(\"There are %d files known to the drive.Client.\", nc)\n\n\tif err := ffs.Refresh(); err != nil {\n\t\tt.Fatalf(\"failed to refresh fuse fileserver: %s\", err)\n\t}\n\tt.Logf(\"Drive client refreshed successfully.\")\n\n\tseen := make(map[string]bool)\n\tvisit := func(path string, f os.FileInfo, err error) error {\n\t\tif err1 := checkPath(t, testChunks, seen, mountPoint, path, f, err); err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t\treturn nil\n\t}\n\n\tt.Logf(\"Attempting to walk the filesystem.\")\n\tif err := filepath.Walk(mountPoint, visit); err != nil {\n\t\tt.Fatalf(\"filepath.Walk() returned %v\", err)\n\t}\n\n\tif ns := len(seen); ns != nc {\n\t\tt.Errorf(\"incomplete file set in fuse, want: %d, got %d\", nc, ns)\n\t\tfor chunk := range testChunks {\n\t\t\tif !seen[chunk] {\n\t\t\t\tt.Errorf(\"missing file: %x\", chunk)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ TestFuseRoundTrip creates an empty memory client, mounts a fusefs filesystem\n\/\/ against it, generates random test data, writes it to the filesystem, reads\n\/\/ it back out and validates it is as expected.\nfunc TestFuseRoundtrip(t *testing.T) {\n\tmountPoint, err := ioutil.TempDir(\"\", \"fusefsTest\")\n\tif err != nil {\n\t\tt.Fatalf(\"could not acquire TempDir: %s\", err)\n\t}\n\tdefer tearDownDir(mountPoint)\n\n\tt.Logf(\"Mounting fuse filesystem at: %s\", mountPoint)\n\tif _, _, err := setupFuse(t, mountPoint); err != nil {\n\t\tt.Fatalf(\"could not mount fuse: %s\", err)\n\t}\n\tdefer tearDownFuse(t, mountPoint)\n\n\tchunkSize := 100 * 256 \/\/ in bytes\n\tnc := 50 \/\/ number of chunks\n\n\t\/\/ Generate some random file contents\n\ttestChunks := make(map[string][]byte, nc)\n\tfor i := 0; i < nc; i++ {\n\t\tn := make([]byte, chunkSize)\n\t\trand.Read(n)\n\t\ts := sha256.Sum256(n)\n\t\tchunkSum := string(s[:])\n\t\ttestChunks[chunkSum] = n\n\t}\n\n\t\/\/ Write those testChunks to the fusefs\n\tfor stringSum, chunk := range testChunks {\n\t\tfilename := path.Join(mountPoint, pathFromStringSum(stringSum))\n\t\tif err := os.MkdirAll(path.Dir(filename), 0700); err != nil {\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\t\tif err := ioutil.WriteFile(filename, chunk, 0400); err != nil {\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\t}\n\n\t\/\/ Validate all the files have the right contents\n\tseen := make(map[string]bool)\n\tvisit := func(path string, f os.FileInfo, err error) error {\n\t\tif err1 := checkPath(t, testChunks, seen, mountPoint, path, f, err); err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t\treturn nil\n\t}\n\n\tt.Logf(\"Attempting to walk the filesystem.\")\n\tif err := filepath.Walk(mountPoint, visit); err != nil {\n\t\tt.Fatalf(\"filepath.Walk() returned %v\", err)\n\t}\n\n\tif ns := len(seen); ns != nc {\n\t\tt.Errorf(\"incomplete file set in fuse, want: %d, got %d\", nc, ns)\n\t\tfor chunk := range testChunks {\n\t\t\tif !seen[chunk] {\n\t\t\t\tt.Errorf(\"missing file: %x\", chunk)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ setup returns the absolute path to a mountpoint for a fuse FS, and the\n\/\/ memory-backed drive.Client which it is following. The FS is configured not\n\/\/ to refresh the drive.Client, you must call Refresh() if you update the\n\/\/ client outside fuse.\nfunc setupFuse(t *testing.T, mountPoint string) (drive.Client, *Server, error) {\n\toptions := []fuse.MountOption{\n\t\tfuse.FSName(\"Shade\"),\n\t\tfuse.NoAppleDouble(),\n\t}\n\tconn, err := fuse.Mount(mountPoint, options...)\n\tif err != nil {\n\t\t\/\/ often means the mountpoint is busy... but it's a TempDir...\n\t\treturn nil, nil, fmt.Errorf(\"could not setup Fuse mount: %s\", err)\n\t}\n\n\tmc, err := memory.NewClient(drive.Config{Provider: \"memory\"})\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"NewClient() for test config failed: %s\", err)\n\t}\n\n\tffs, err := New(mc, conn, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"fuse server initialization failed: %s\", err)\n\t}\n\tgo func() {\n\t\terr = ffs.Serve()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"serving fuse connection failed: %s\", err)\n\t\t}\n\t}()\n\n\t<-conn.Ready \/\/ returns when the FS is usable\n\tif conn.MountError != nil {\n\t\treturn nil, nil, fmt.Errorf(\"fuse exited with an error: %s\", err)\n\t}\n\treturn mc, ffs, nil\n}\n\nfunc tearDownFuse(t *testing.T, dir string) {\n\tif err := fuse.Unmount(dir); err != nil {\n\t\tt.Fatalf(\"could not Unmount test dir: %s\", err)\n\t}\n}\n\nfunc tearDownDir(dir string) {\n\tif err := os.RemoveAll(dir); err != nil {\n\t\tfmt.Printf(\"Could not clean up: %s\", err)\n\t}\n}\n\nfunc pathFromStringSum(sum string) string {\n\tchs := hex.EncodeToString([]byte(sum))\n\tvar filename string\n\tif strings.HasPrefix(chs, \"0\") {\n\t\tfilename = chs\n\t} else {\n\t\t\/\/ TODO: don't call Sprintf so often. :)\n\t\tfor i := 0; i <= len(chs)-8; i += 8 {\n\t\t\tfilename = fmt.Sprintf(\"%s\/%s\", filename, chs[i:i+8])\n\t\t}\n\t}\n\treturn strings.TrimPrefix(filename, \"\/\")\n}\n\nfunc checkPath(t *testing.T, testChunks map[string][]byte, seen map[string]bool, mountPoint, path string, f os.FileInfo, err error) error {\n\tif err != nil {\n\t\tt.Errorf(\"failed to read path: %s\", err)\n\t\treturn err\n\t}\n\tif !f.IsDir() {\n\t\tt.Logf(\"Seen in FS %d: %s\\n\", len(seen)+1, path)\n\t\tcontents, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn nil\n\t\t}\n\t\tfilename := strings.TrimPrefix(path, mountPoint)\n\t\tfilename = strings.Replace(filename, \"\/\", \"\", -1)\n\t\tchs, err := hex.DecodeString(filename)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"could not hex decode filename in Fuse FS: %s: %s\", filename, err)\n\t\t\treturn nil\n\t\t}\n\t\tif bytes.Equal(contents, testChunks[string(chs)]) {\n\t\t\tt.Errorf(\"contents of %s did not match, want: %s, got %s\", filename, testChunks[filename], contents)\n\t\t\treturn nil\n\t\t}\n\t\tif seen[string(chs)] {\n\t\t\tt.Errorf(\"saw chunk twice: %x\", chs)\n\t\t}\n\t\tseen[string(chs)] = true\n\t}\n\treturn nil\n}\n\n\/\/ TestChunksForRead tests the function which calculates which chunks are\n\/\/ necessary to service a fuse read request. It initializes a set of chunks,\n\/\/ then calls chunksForRead to ensure the correct series of chunks are returned\n\/\/ for each offset.\nfunc TestChunksForRead(t *testing.T) {\n\tf := &shade.File{\n\t\tFilename: \"test\",\n\t\tFilesize: 32,\n\t\tChunks: []shade.Chunk{\n\t\t\t\/\/ chunks have artificial SHA sums to make identification easier\n\t\t\t{Index: 0, Sha256: []byte(\"0000\")},\n\t\t\t{Index: 1, Sha256: []byte(\"1111\")},\n\t\t\t{Index: 2, Sha256: []byte(\"2222\")},\n\t\t\t{Index: 3, Sha256: []byte(\"3333\")},\n\t\t},\n\t\tChunksize: 8,\n\t}\n\ttestSet := []struct {\n\t\toffset int64\n\t\tsize int64\n\t\twant [][]byte\n\t}{\n\t\t{\n\t\t\toffset: 0,\n\t\t\tsize: 1,\n\t\t\twant: [][]byte{[]byte(\"0000\")},\n\t\t},\n\t\t{\n\t\t\toffset: 0,\n\t\t\tsize: 32,\n\t\t\twant: [][]byte{\n\t\t\t\t[]byte(\"0000\"),\n\t\t\t\t[]byte(\"1111\"),\n\t\t\t\t[]byte(\"2222\"),\n\t\t\t\t[]byte(\"3333\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\toffset: 31,\n\t\t\tsize: 1,\n\t\t\twant: [][]byte{[]byte(\"3333\")},\n\t\t},\n\t\t{\n\t\t\toffset: 12,\n\t\t\tsize: 8,\n\t\t\twant: [][]byte{\n\t\t\t\t[]byte(\"1111\"),\n\t\t\t\t[]byte(\"2222\"),\n\t\t\t},\n\t\t},\n\t}\n\tfor _, ts := range testSet {\n\t\tcs, err := chunksForRead(f, ts.offset, ts.size)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"unexpected error: chunksForRead(%+v, %d, %d): %s\", f, ts.offset, ts.size, err)\n\t\t\tcontinue\n\t\t}\n\t\tif len(cs) != len(ts.want) {\n\t\t\tt.Errorf(\"chunksForRead(%+v, %d, %d), want: %s, got: %s\", f, ts.offset, ts.size, ts.want, cs)\n\t\t\tcontinue\n\t\t}\n\t\tfor i := 0; i < len(cs); i++ {\n\t\t\tif !bytes.Equal(cs[i], ts.want[i]) {\n\t\t\t\tt.Errorf(\"chunksForRead(%+v, %d, %d), want: %s, got: %s\", f, ts.offset, ts.size, ts.want, cs)\n\t\t\t}\n\t\t}\n\t}\n\t_, err := chunksForRead(f, 33, 1)\n\tif err == nil {\n\t\tt.Errorf(\"expected error from chunksForRead(%+v, %d, %d), got nil\", f, 33, 1)\n\t}\n\t_, err = chunksForRead(f, -1024, 1)\n\tif err == nil {\n\t\tt.Errorf(\"expected error from chunksForRead(%+v, %d, %d), got nil\", f, -1024, 1)\n\t}\n\t_, err = chunksForRead(f, 0, -1)\n\tif err == nil {\n\t\tt.Errorf(\"expected error from chunksForRead(%+v, %d, %d), got nil\", f, -1024, 1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package view\nimport (\n \"github.com\/ant0ine\/go-json-rest\/rest\"\n \"github.com\/rugo\/sacapi\/modules\/data\"\n \"strconv\"\n \"golang.org\/x\/net\/context\"\n \"github.com\/rugo\/sacapi\/modules\/calcom\"\n)\n\nfunc GetJSONMessage(w rest.ResponseWriter, r *rest.Request) {\n time, err := int64(strconv.Atoi(r.PathParam(\"time\")))\n if err != nil {\n rest.Error(w, \"Time has to be a number\", 400)\n return\n }\n t := data.ClockInfo{\n Appointment: data.Appointment{\n Time: time,\n Name: \"Meeting\",\n Description: \"Nöpe! Chuck Testa!\",\n },\n Timezone: \"UTC+01:00\",\n Apivers: 0,\n }\n w.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n w.WriteJson(t)\n}\n\nfunc GetNextCalendarEntry(w rest.ResponseWriter, r *rest.Request) {\n ctx := context.Background()\n deviceId := r.PathParam(\"id\")\n nextEntry, err := calcom.GetNextGoogleCalendarEntry(ctx, deviceId)\n\n if err != nil {\n rest.Error(w, \"Could not read calendar entries\", 500)\n return\n }\n\n w.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n w.WriteJson(nextEntry)\n}<commit_msg>did quick and dirty stuff<commit_after>package view\nimport (\n \"github.com\/ant0ine\/go-json-rest\/rest\"\n \"github.com\/rugo\/sacapi\/modules\/data\"\n \"strconv\"\n \"golang.org\/x\/net\/context\"\n \"github.com\/rugo\/sacapi\/modules\/calcom\"\n)\n\nfunc GetJSONMessage(w rest.ResponseWriter, r *rest.Request) {\n time, err := strconv.Atoi(r.PathParam(\"time\"))\n if err != nil {\n rest.Error(w, \"Time has to be a number\", 400)\n return\n }\n t := data.ClockInfo{\n Appointment: data.Appointment{\n Time: int64(time),\n Name: \"Meeting\",\n Description: \"Nöpe! Chuck Testa!\",\n },\n Timezone: \"UTC+01:00\",\n Apivers: 0,\n }\n w.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n w.WriteJson(t)\n}\n\nfunc GetNextCalendarEntry(w rest.ResponseWriter, r *rest.Request) {\n ctx := context.Background()\n deviceId := r.PathParam(\"id\")\n nextEntry, err := calcom.GetNextGoogleCalendarEntry(ctx, deviceId)\n\n if err != nil {\n rest.Error(w, \"Could not read calendar entries\", 500)\n return\n }\n\n w.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n w.WriteJson(nextEntry)\n}<|endoftext|>"} {"text":"<commit_before>\/*\n *\n * Copyright 2016, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n * * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and\/or other materials provided with the\n * distribution.\n * * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n *\/\n\n\/\/ client starts an interop client to do stress test and a metrics server to report qps.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/credentials\"\n\t\"google.golang.org\/grpc\/grpclog\"\n\t\"google.golang.org\/grpc\/interop\"\n\ttestpb \"google.golang.org\/grpc\/interop\/grpc_testing\"\n\tmetricspb \"google.golang.org\/grpc\/stress\/grpc_testing\"\n)\n\nvar (\n\tserverAddresses = flag.String(\"server_addresses\", \"localhost:8080\", \"a list of server addresses\")\n\ttestCases = flag.String(\"test_cases\", \"\", \"a list of test cases along with the relative weights\")\n\ttestDurationSecs = flag.Int(\"test_duration_secs\", -1, \"test duration in seconds\")\n\tnumChannelsPerServer = flag.Int(\"num_channels_per_server\", 1, \"Number of channels (i.e connections) to each server\")\n\tnumStubsPerChannel = flag.Int(\"num_stubs_per_channel\", 1, \"Number of client stubs per each connection to server\")\n\tmetricsPort = flag.Int(\"metrics_port\", 8081, \"The port at which the stress client exposes QPS metrics\")\n\tuseTLS = flag.Bool(\"use_tls\", false, \"Connection uses TLS if true, else plain TCP\")\n\ttestCA = flag.Bool(\"use_test_ca\", false, \"Whether to replace platform root CAs with test CA as the CA root\")\n\ttlsServerName = flag.String(\"server_host_override\", \"foo.test.google.fr\", \"The server name use to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.\")\n\n\t\/\/ The test CA root cert file\n\ttestCAFile = \"testdata\/ca.pem\"\n)\n\n\/\/ testCaseWithWeight contains the test case type and its weight.\ntype testCaseWithWeight struct {\n\tname string\n\tweight int\n}\n\n\/\/ parseTestCases converts test case string to a list of struct testCaseWithWeight.\nfunc parseTestCases(testCaseString string) []testCaseWithWeight {\n\ttestCaseStrings := strings.Split(testCaseString, \",\")\n\ttestCases := make([]testCaseWithWeight, len(testCaseStrings))\n\tfor i, str := range testCaseStrings {\n\t\ttestCase := strings.Split(str, \":\")\n\t\tif len(testCase) != 2 {\n\t\t\tpanic(fmt.Sprintf(\"invalid test case with weight: %s\", str))\n\t\t}\n\t\t\/\/ Check if test case is supported.\n\t\tswitch testCase[0] {\n\t\tcase\n\t\t\t\"empty_unary\",\n\t\t\t\"large_unary\",\n\t\t\t\"client_streaming\",\n\t\t\t\"server_streaming\",\n\t\t\t\"empty_stream\":\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"unknown test type: %s\", testCase[0]))\n\t\t}\n\t\ttestCases[i].name = testCase[0]\n\t\tw, err := strconv.Atoi(testCase[1])\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"%v\", err))\n\t\t}\n\t\ttestCases[i].weight = w\n\t}\n\treturn testCases\n}\n\n\/\/ weightedRandomTestSelector defines a weighted random selector for test case types.\ntype weightedRandomTestSelector struct {\n\ttests []testCaseWithWeight\n\ttotalWeight int\n}\n\n\/\/ newWeightedRandomTestSelector constructs a weightedRandomTestSelector with the given list of testCaseWithWeight.\nfunc newWeightedRandomTestSelector(tests []testCaseWithWeight) *weightedRandomTestSelector {\n\tvar totalWeight int\n\tfor _, t := range tests {\n\t\ttotalWeight += t.weight\n\t}\n\trand.Seed(time.Now().UnixNano())\n\treturn &weightedRandomTestSelector{tests, totalWeight}\n}\n\nfunc (selector weightedRandomTestSelector) getNextTest() string {\n\trandom := rand.Intn(selector.totalWeight)\n\tvar weightSofar int\n\tfor _, test := range selector.tests {\n\t\tweightSofar += test.weight\n\t\tif random < weightSofar {\n\t\t\treturn test.name\n\t\t}\n\t}\n\tpanic(\"no test case selected by weightedRandomTestSelector\")\n}\n\n\/\/ gauge stores the qps of one interop client (one stub).\ntype gauge struct {\n\tmutex sync.RWMutex\n\tval int64\n}\n\nfunc (g *gauge) set(v int64) {\n\tg.mutex.Lock()\n\tdefer g.mutex.Unlock()\n\tg.val = v\n}\n\nfunc (g *gauge) get() int64 {\n\tg.mutex.RLock()\n\tdefer g.mutex.RUnlock()\n\treturn g.val\n}\n\n\/\/ server implements metrics server functions.\ntype server struct {\n\tmutex sync.RWMutex\n\t\/\/ gauges is a map from \/stress_test\/server_<n>\/channel_<n>\/stub_<n>\/qps to its qps gauge.\n\tgauges map[string]*gauge\n}\n\n\/\/ newMetricsServer returns a new metrics server.\nfunc newMetricsServer() *server {\n\treturn &server{gauges: make(map[string]*gauge)}\n}\n\n\/\/ GetAllGauges returns all gauges.\nfunc (s *server) GetAllGauges(in *metricspb.EmptyMessage, stream metricspb.MetricsService_GetAllGaugesServer) error {\n\ts.mutex.RLock()\n\tdefer s.mutex.RUnlock()\n\n\tfor name, gauge := range s.gauges {\n\t\tif err := stream.Send(&metricspb.GaugeResponse{Name: name, Value: &metricspb.GaugeResponse_LongValue{LongValue: gauge.get()}}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ GetGauge returns the gauge for the given name.\nfunc (s *server) GetGauge(ctx context.Context, in *metricspb.GaugeRequest) (*metricspb.GaugeResponse, error) {\n\ts.mutex.RLock()\n\tdefer s.mutex.RUnlock()\n\n\tif g, ok := s.gauges[in.Name]; ok {\n\t\treturn &metricspb.GaugeResponse{Name: in.Name, Value: &metricspb.GaugeResponse_LongValue{LongValue: g.get()}}, nil\n\t}\n\treturn nil, grpc.Errorf(codes.InvalidArgument, \"gauge with name %s not found\", in.Name)\n}\n\n\/\/ createGauge creates a gauge using the given name in metrics server.\nfunc (s *server) createGauge(name string) *gauge {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\tif _, ok := s.gauges[name]; ok {\n\t\t\/\/ gauge already exists.\n\t\tpanic(fmt.Sprintf(\"gauge %s already exists\", name))\n\t}\n\tvar g gauge\n\ts.gauges[name] = &g\n\treturn &g\n}\n\nfunc startServer(server *server, port int) {\n\tlis, err := net.Listen(\"tcp\", \":\"+strconv.Itoa(port))\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\n\ts := grpc.NewServer()\n\tmetricspb.RegisterMetricsServiceServer(s, server)\n\ts.Serve(lis)\n\n}\n\n\/\/ performRPCs uses weightedRandomTestSelector to select test case and runs the tests.\nfunc performRPCs(gauge *gauge, conn *grpc.ClientConn, selector *weightedRandomTestSelector, stop <-chan bool) {\n\tclient := testpb.NewTestServiceClient(conn)\n\tvar numCalls int64\n\tstartTime := time.Now()\n\tfor {\n\t\tdone := make(chan bool, 1)\n\t\tgo func() {\n\t\t\ttest := selector.getNextTest()\n\t\t\tswitch test {\n\t\t\tcase \"empty_unary\":\n\t\t\t\tinterop.DoEmptyUnaryCall(client)\n\t\t\tcase \"large_unary\":\n\t\t\t\tinterop.DoLargeUnaryCall(client)\n\t\t\tcase \"client_streaming\":\n\t\t\t\tinterop.DoClientStreaming(client)\n\t\t\tcase \"server_streaming\":\n\t\t\t\tinterop.DoServerStreaming(client)\n\t\t\tcase \"empty_stream\":\n\t\t\t\tinterop.DoEmptyStream(client)\n\t\t\t}\n\t\t\tdone <- true\n\t\t}()\n\t\tselect {\n\t\tcase <-stop:\n\t\t\treturn\n\t\tcase <-done:\n\t\t\tnumCalls++\n\t\t\tgauge.set(int64(float64(numCalls) \/ time.Since(startTime).Seconds()))\n\t\t}\n\t}\n}\n\nfunc logParameterInfo(addresses []string, tests []testCaseWithWeight) {\n\tgrpclog.Printf(\"server_addresses: %s\", *serverAddresses)\n\tgrpclog.Printf(\"test_cases: %s\", *testCases)\n\tgrpclog.Printf(\"test_duration_secs: %d\", *testDurationSecs)\n\tgrpclog.Printf(\"num_channels_per_server: %d\", *numChannelsPerServer)\n\tgrpclog.Printf(\"num_stubs_per_channel: %d\", *numStubsPerChannel)\n\tgrpclog.Printf(\"metrics_port: %d\", *metricsPort)\n\n\tgrpclog.Println(\"addresses:\")\n\tfor i, addr := range addresses {\n\t\tgrpclog.Printf(\"%d. %s\\n\", i+1, addr)\n\t}\n\tgrpclog.Println(\"tests:\")\n\tfor i, test := range tests {\n\t\tgrpclog.Printf(\"%d. %v\\n\", i+1, test)\n\t}\n}\n\nfunc getChannel(address string, useTLS, testCA bool, tlsServerName string) (*grpc.ClientConn, error) {\n\tvar opts []grpc.DialOption\n\tif useTLS {\n\t\tvar sn string\n\t\tif tlsServerName != \"\" {\n\t\t\tsn = tlsServerName\n\t\t}\n\t\tvar creds credentials.TransportCredentials\n\t\tif testCA {\n\t\t\tvar err error\n\t\t\tcreds, err = credentials.NewClientTLSFromFile(testCAFile, sn)\n\t\t\tif err != nil {\n\t\t\t\tgrpclog.Fatalf(\"Failed to create TLS credentials %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tcreds = credentials.NewClientTLSFromCert(nil, sn)\n\t\t}\n\t\topts = append(opts, grpc.WithTransportCredentials(creds))\n\t} else {\n\t\topts = append(opts, grpc.WithInsecure())\n\t}\n\treturn grpc.Dial(address, opts...)\n}\n\nfunc main() {\n\tflag.Parse()\n\taddresses := strings.Split(*serverAddresses, \",\")\n\ttests := parseTestCases(*testCases)\n\tlogParameterInfo(addresses, tests)\n\ttestSelector := newWeightedRandomTestSelector(tests)\n\tmetricsServer := newMetricsServer()\n\n\tvar wg sync.WaitGroup\n\twg.Add(len(addresses) * *numChannelsPerServer * *numStubsPerChannel)\n\tstop := make(chan bool)\n\n\tfor serverIndex, address := range addresses {\n\t\tfor connIndex := 0; connIndex < *numChannelsPerServer; connIndex++ {\n\t\t\tconn, err := getChannel(address, *useTLS, *testCA, *tlsServerName)\n\t\t\tif err != nil {\n\t\t\t\tgrpclog.Fatalf(\"Fail to dial: %v\", err)\n\t\t\t}\n\t\t\tdefer conn.Close()\n\t\t\tfor clientIndex := 0; clientIndex < *numStubsPerChannel; clientIndex++ {\n\t\t\t\tname := fmt.Sprintf(\"\/stress_test\/server_%d\/channel_%d\/stub_%d\/qps\", serverIndex+1, connIndex+1, clientIndex+1)\n\t\t\t\tgo func() {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\tg := metricsServer.createGauge(name)\n\t\t\t\t\tperformRPCs(g, conn, testSelector, stop)\n\t\t\t\t}()\n\t\t\t}\n\n\t\t}\n\t}\n\tgo startServer(metricsServer, *metricsPort)\n\tif *testDurationSecs > 0 {\n\t\ttime.Sleep(time.Duration(*testDurationSecs) * time.Second)\n\t\tclose(stop)\n\t}\n\twg.Wait()\n\tgrpclog.Printf(\" ===== ALL DONE ===== \")\n\n}\n<commit_msg>Add logging of new flags<commit_after>\/*\n *\n * Copyright 2016, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n * * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and\/or other materials provided with the\n * distribution.\n * * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n *\/\n\n\/\/ client starts an interop client to do stress test and a metrics server to report qps.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/credentials\"\n\t\"google.golang.org\/grpc\/grpclog\"\n\t\"google.golang.org\/grpc\/interop\"\n\ttestpb \"google.golang.org\/grpc\/interop\/grpc_testing\"\n\tmetricspb \"google.golang.org\/grpc\/stress\/grpc_testing\"\n)\n\nvar (\n\tserverAddresses = flag.String(\"server_addresses\", \"localhost:8080\", \"a list of server addresses\")\n\ttestCases = flag.String(\"test_cases\", \"\", \"a list of test cases along with the relative weights\")\n\ttestDurationSecs = flag.Int(\"test_duration_secs\", -1, \"test duration in seconds\")\n\tnumChannelsPerServer = flag.Int(\"num_channels_per_server\", 1, \"Number of channels (i.e connections) to each server\")\n\tnumStubsPerChannel = flag.Int(\"num_stubs_per_channel\", 1, \"Number of client stubs per each connection to server\")\n\tmetricsPort = flag.Int(\"metrics_port\", 8081, \"The port at which the stress client exposes QPS metrics\")\n\tuseTLS = flag.Bool(\"use_tls\", false, \"Connection uses TLS if true, else plain TCP\")\n\ttestCA = flag.Bool(\"use_test_ca\", false, \"Whether to replace platform root CAs with test CA as the CA root\")\n\ttlsServerName = flag.String(\"server_host_override\", \"foo.test.google.fr\", \"The server name use to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.\")\n\n\t\/\/ The test CA root cert file\n\ttestCAFile = \"testdata\/ca.pem\"\n)\n\n\/\/ testCaseWithWeight contains the test case type and its weight.\ntype testCaseWithWeight struct {\n\tname string\n\tweight int\n}\n\n\/\/ parseTestCases converts test case string to a list of struct testCaseWithWeight.\nfunc parseTestCases(testCaseString string) []testCaseWithWeight {\n\ttestCaseStrings := strings.Split(testCaseString, \",\")\n\ttestCases := make([]testCaseWithWeight, len(testCaseStrings))\n\tfor i, str := range testCaseStrings {\n\t\ttestCase := strings.Split(str, \":\")\n\t\tif len(testCase) != 2 {\n\t\t\tpanic(fmt.Sprintf(\"invalid test case with weight: %s\", str))\n\t\t}\n\t\t\/\/ Check if test case is supported.\n\t\tswitch testCase[0] {\n\t\tcase\n\t\t\t\"empty_unary\",\n\t\t\t\"large_unary\",\n\t\t\t\"client_streaming\",\n\t\t\t\"server_streaming\",\n\t\t\t\"empty_stream\":\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"unknown test type: %s\", testCase[0]))\n\t\t}\n\t\ttestCases[i].name = testCase[0]\n\t\tw, err := strconv.Atoi(testCase[1])\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"%v\", err))\n\t\t}\n\t\ttestCases[i].weight = w\n\t}\n\treturn testCases\n}\n\n\/\/ weightedRandomTestSelector defines a weighted random selector for test case types.\ntype weightedRandomTestSelector struct {\n\ttests []testCaseWithWeight\n\ttotalWeight int\n}\n\n\/\/ newWeightedRandomTestSelector constructs a weightedRandomTestSelector with the given list of testCaseWithWeight.\nfunc newWeightedRandomTestSelector(tests []testCaseWithWeight) *weightedRandomTestSelector {\n\tvar totalWeight int\n\tfor _, t := range tests {\n\t\ttotalWeight += t.weight\n\t}\n\trand.Seed(time.Now().UnixNano())\n\treturn &weightedRandomTestSelector{tests, totalWeight}\n}\n\nfunc (selector weightedRandomTestSelector) getNextTest() string {\n\trandom := rand.Intn(selector.totalWeight)\n\tvar weightSofar int\n\tfor _, test := range selector.tests {\n\t\tweightSofar += test.weight\n\t\tif random < weightSofar {\n\t\t\treturn test.name\n\t\t}\n\t}\n\tpanic(\"no test case selected by weightedRandomTestSelector\")\n}\n\n\/\/ gauge stores the qps of one interop client (one stub).\ntype gauge struct {\n\tmutex sync.RWMutex\n\tval int64\n}\n\nfunc (g *gauge) set(v int64) {\n\tg.mutex.Lock()\n\tdefer g.mutex.Unlock()\n\tg.val = v\n}\n\nfunc (g *gauge) get() int64 {\n\tg.mutex.RLock()\n\tdefer g.mutex.RUnlock()\n\treturn g.val\n}\n\n\/\/ server implements metrics server functions.\ntype server struct {\n\tmutex sync.RWMutex\n\t\/\/ gauges is a map from \/stress_test\/server_<n>\/channel_<n>\/stub_<n>\/qps to its qps gauge.\n\tgauges map[string]*gauge\n}\n\n\/\/ newMetricsServer returns a new metrics server.\nfunc newMetricsServer() *server {\n\treturn &server{gauges: make(map[string]*gauge)}\n}\n\n\/\/ GetAllGauges returns all gauges.\nfunc (s *server) GetAllGauges(in *metricspb.EmptyMessage, stream metricspb.MetricsService_GetAllGaugesServer) error {\n\ts.mutex.RLock()\n\tdefer s.mutex.RUnlock()\n\n\tfor name, gauge := range s.gauges {\n\t\tif err := stream.Send(&metricspb.GaugeResponse{Name: name, Value: &metricspb.GaugeResponse_LongValue{LongValue: gauge.get()}}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ GetGauge returns the gauge for the given name.\nfunc (s *server) GetGauge(ctx context.Context, in *metricspb.GaugeRequest) (*metricspb.GaugeResponse, error) {\n\ts.mutex.RLock()\n\tdefer s.mutex.RUnlock()\n\n\tif g, ok := s.gauges[in.Name]; ok {\n\t\treturn &metricspb.GaugeResponse{Name: in.Name, Value: &metricspb.GaugeResponse_LongValue{LongValue: g.get()}}, nil\n\t}\n\treturn nil, grpc.Errorf(codes.InvalidArgument, \"gauge with name %s not found\", in.Name)\n}\n\n\/\/ createGauge creates a gauge using the given name in metrics server.\nfunc (s *server) createGauge(name string) *gauge {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\tif _, ok := s.gauges[name]; ok {\n\t\t\/\/ gauge already exists.\n\t\tpanic(fmt.Sprintf(\"gauge %s already exists\", name))\n\t}\n\tvar g gauge\n\ts.gauges[name] = &g\n\treturn &g\n}\n\nfunc startServer(server *server, port int) {\n\tlis, err := net.Listen(\"tcp\", \":\"+strconv.Itoa(port))\n\tif err != nil {\n\t\tgrpclog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\n\ts := grpc.NewServer()\n\tmetricspb.RegisterMetricsServiceServer(s, server)\n\ts.Serve(lis)\n\n}\n\n\/\/ performRPCs uses weightedRandomTestSelector to select test case and runs the tests.\nfunc performRPCs(gauge *gauge, conn *grpc.ClientConn, selector *weightedRandomTestSelector, stop <-chan bool) {\n\tclient := testpb.NewTestServiceClient(conn)\n\tvar numCalls int64\n\tstartTime := time.Now()\n\tfor {\n\t\tdone := make(chan bool, 1)\n\t\tgo func() {\n\t\t\ttest := selector.getNextTest()\n\t\t\tswitch test {\n\t\t\tcase \"empty_unary\":\n\t\t\t\tinterop.DoEmptyUnaryCall(client)\n\t\t\tcase \"large_unary\":\n\t\t\t\tinterop.DoLargeUnaryCall(client)\n\t\t\tcase \"client_streaming\":\n\t\t\t\tinterop.DoClientStreaming(client)\n\t\t\tcase \"server_streaming\":\n\t\t\t\tinterop.DoServerStreaming(client)\n\t\t\tcase \"empty_stream\":\n\t\t\t\tinterop.DoEmptyStream(client)\n\t\t\t}\n\t\t\tdone <- true\n\t\t}()\n\t\tselect {\n\t\tcase <-stop:\n\t\t\treturn\n\t\tcase <-done:\n\t\t\tnumCalls++\n\t\t\tgauge.set(int64(float64(numCalls) \/ time.Since(startTime).Seconds()))\n\t\t}\n\t}\n}\n\nfunc logParameterInfo(addresses []string, tests []testCaseWithWeight) {\n\tgrpclog.Printf(\"server_addresses: %s\", *serverAddresses)\n\tgrpclog.Printf(\"test_cases: %s\", *testCases)\n\tgrpclog.Printf(\"test_duration_secs: %d\", *testDurationSecs)\n\tgrpclog.Printf(\"num_channels_per_server: %d\", *numChannelsPerServer)\n\tgrpclog.Printf(\"num_stubs_per_channel: %d\", *numStubsPerChannel)\n\tgrpclog.Printf(\"metrics_port: %d\", *metricsPort)\n\tgrpclog.Printf(\"use_tls: %t\", *useTLS)\n\tgrpclog.Printf(\"use_test_ca: %t\", *testCA)\n\tgrpclog.Printf(\"server_host_override: %s\", *tlsServerName)\n\n\tgrpclog.Println(\"addresses:\")\n\tfor i, addr := range addresses {\n\t\tgrpclog.Printf(\"%d. %s\\n\", i+1, addr)\n\t}\n\tgrpclog.Println(\"tests:\")\n\tfor i, test := range tests {\n\t\tgrpclog.Printf(\"%d. %v\\n\", i+1, test)\n\t}\n}\n\nfunc newConn(address string, useTLS, testCA bool, tlsServerName string) (*grpc.ClientConn, error) {\n\tvar opts []grpc.DialOption\n\tif useTLS {\n\t\tvar sn string\n\t\tif tlsServerName != \"\" {\n\t\t\tsn = tlsServerName\n\t\t}\n\t\tvar creds credentials.TransportCredentials\n\t\tif testCA {\n\t\t\tvar err error\n\t\t\tcreds, err = credentials.NewClientTLSFromFile(testCAFile, sn)\n\t\t\tif err != nil {\n\t\t\t\tgrpclog.Fatalf(\"Failed to create TLS credentials %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tcreds = credentials.NewClientTLSFromCert(nil, sn)\n\t\t}\n\t\topts = append(opts, grpc.WithTransportCredentials(creds))\n\t} else {\n\t\topts = append(opts, grpc.WithInsecure())\n\t}\n\treturn grpc.Dial(address, opts...)\n}\n\nfunc main() {\n\tflag.Parse()\n\taddresses := strings.Split(*serverAddresses, \",\")\n\ttests := parseTestCases(*testCases)\n\tlogParameterInfo(addresses, tests)\n\ttestSelector := newWeightedRandomTestSelector(tests)\n\tmetricsServer := newMetricsServer()\n\n\tvar wg sync.WaitGroup\n\twg.Add(len(addresses) * *numChannelsPerServer * *numStubsPerChannel)\n\tstop := make(chan bool)\n\n\tfor serverIndex, address := range addresses {\n\t\tfor connIndex := 0; connIndex < *numChannelsPerServer; connIndex++ {\n\t\t\tconn, err := newConn(address, *useTLS, *testCA, *tlsServerName)\n\t\t\tif err != nil {\n\t\t\t\tgrpclog.Fatalf(\"Fail to dial: %v\", err)\n\t\t\t}\n\t\t\tdefer conn.Close()\n\t\t\tfor clientIndex := 0; clientIndex < *numStubsPerChannel; clientIndex++ {\n\t\t\t\tname := fmt.Sprintf(\"\/stress_test\/server_%d\/channel_%d\/stub_%d\/qps\", serverIndex+1, connIndex+1, clientIndex+1)\n\t\t\t\tgo func() {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\tg := metricsServer.createGauge(name)\n\t\t\t\t\tperformRPCs(g, conn, testSelector, stop)\n\t\t\t\t}()\n\t\t\t}\n\n\t\t}\n\t}\n\tgo startServer(metricsServer, *metricsPort)\n\tif *testDurationSecs > 0 {\n\t\ttime.Sleep(time.Duration(*testDurationSecs) * time.Second)\n\t\tclose(stop)\n\t}\n\twg.Wait()\n\tgrpclog.Printf(\" ===== ALL DONE ===== \")\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ The MIT License (MIT)\n\n\/\/ Copyright (c) 2016 Maciej Borzecki\n\n\/\/ Permission is hereby granted, free of charge, to any person obtaining\n\/\/ a copy of this software and associated documentation files (the\n\/\/ \"Software\"), to deal in the Software without restriction, including\n\/\/ without limitation the rights to use, copy, modify, merge, publish,\n\/\/ distribute, sublicense, and\/or sell copies of the Software, and to\n\/\/ permit persons to whom the Software is furnished to do so, subject to\n\/\/ the following conditions:\n\n\/\/ The above copyright notice and this permission notice shall be\n\/\/ included in all copies or substantial portions of the Software.\n\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n\/\/ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n\/\/ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n\/\/ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n\/\/ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n\/\/ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n\/\/ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\npackage match\n\nimport (\n\t\"fmt\"\n\t\"github.com\/bboozzoo\/q3stats\/loader\"\n\t\"github.com\/bboozzoo\/q3stats\/models\"\n\t\"github.com\/bboozzoo\/q3stats\/store\"\n\t\"github.com\/pkg\/errors\"\n\t\"log\"\n)\n\ntype MatchController struct {\n\tdb store.DB\n}\n\nfunc NewController(db store.DB) *MatchController {\n\n\t\/\/ move this somewhere else?\n\tmodels.CreateSchema(db)\n\n\treturn &MatchController{db}\n}\n\nfunc makePlayerMatchStat(stats []loader.RawStat) models.PlayerMatchStat {\n\tvar pms models.PlayerMatchStat\n\tmapping := map[string]*int{\n\t\t\"Score\": &pms.Score,\n\t\t\"Kills\": &pms.Kills,\n\t\t\"Deaths\": &pms.Deaths,\n\t\t\"Suicides\": &pms.Suicides,\n\t\t\"Net\": &pms.Net,\n\t\t\"DamageGiven\": &pms.DamageGiven,\n\t\t\"DamageTaken\": &pms.DamageTaken,\n\t\t\"HealthTotal\": &pms.HealthTotal,\n\t\t\"ArmorTotal\": &pms.ArmorTotal,\n\t}\n\n\tfor _, stat := range stats {\n\t\tival, ok := mapping[stat.Name]\n\t\tif ok == false {\n\t\t\tlog.Printf(\"skipping unknown stat: %s: %d\",\n\t\t\t\tstat.Name, stat.Value)\n\t\t\tcontinue\n\t\t}\n\n\t\t*ival = stat.Value\n\t}\n\treturn pms\n}\n\n\/\/ Add match from raw XML data in `data`. Returns match hash or error\nfunc (m *MatchController) AddFromData(data []byte) (string, error) {\n\trmatch, err := loader.LoadMatchData(data)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to load match data\")\n\t}\n\n\tif m.isMatchIndexed(rmatch) == true {\n\t\treturn \"\", fmt.Errorf(\"already present\")\n\t}\n\n\tif err := m.storeMatch(rmatch); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to store match data\")\n\t}\n\n\treturn rmatch.DataHash, nil\n}\n\nfunc (m *MatchController) isMatchIndexed(rmatch *loader.RawMatch) bool {\n\tdb := m.db.Conn()\n\n\tvar mfound models.Match\n\tnotfound := db.Where(\"data_hash = ?\", rmatch.DataHash).\n\t\tFind(&mfound).\n\t\tRecordNotFound()\n\tif notfound == false {\n\t\tlog.Printf(\"record found: %+v\", mfound)\n\t\treturn true\n\t}\n\n\treturn false\n}\n<commit_msg>controllers\/match: use match model helpers<commit_after>\/\/ The MIT License (MIT)\n\n\/\/ Copyright (c) 2016 Maciej Borzecki\n\n\/\/ Permission is hereby granted, free of charge, to any person obtaining\n\/\/ a copy of this software and associated documentation files (the\n\/\/ \"Software\"), to deal in the Software without restriction, including\n\/\/ without limitation the rights to use, copy, modify, merge, publish,\n\/\/ distribute, sublicense, and\/or sell copies of the Software, and to\n\/\/ permit persons to whom the Software is furnished to do so, subject to\n\/\/ the following conditions:\n\n\/\/ The above copyright notice and this permission notice shall be\n\/\/ included in all copies or substantial portions of the Software.\n\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n\/\/ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n\/\/ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n\/\/ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n\/\/ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n\/\/ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n\/\/ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\npackage match\n\nimport (\n\t\"fmt\"\n\t\"github.com\/bboozzoo\/q3stats\/loader\"\n\t\"github.com\/bboozzoo\/q3stats\/models\"\n\t\"github.com\/bboozzoo\/q3stats\/store\"\n\t\"github.com\/pkg\/errors\"\n\t\"log\"\n)\n\ntype MatchController struct {\n\tdb store.DB\n}\n\nfunc NewController(db store.DB) *MatchController {\n\n\t\/\/ move this somewhere else?\n\tmodels.CreateSchema(db)\n\n\treturn &MatchController{db}\n}\n\nfunc makePlayerMatchStat(stats []loader.RawStat) models.PlayerMatchStat {\n\tvar pms models.PlayerMatchStat\n\tmapping := map[string]*int{\n\t\t\"Score\": &pms.Score,\n\t\t\"Kills\": &pms.Kills,\n\t\t\"Deaths\": &pms.Deaths,\n\t\t\"Suicides\": &pms.Suicides,\n\t\t\"Net\": &pms.Net,\n\t\t\"DamageGiven\": &pms.DamageGiven,\n\t\t\"DamageTaken\": &pms.DamageTaken,\n\t\t\"HealthTotal\": &pms.HealthTotal,\n\t\t\"ArmorTotal\": &pms.ArmorTotal,\n\t}\n\n\tfor _, stat := range stats {\n\t\tival, ok := mapping[stat.Name]\n\t\tif ok == false {\n\t\t\tlog.Printf(\"skipping unknown stat: %s: %d\",\n\t\t\t\tstat.Name, stat.Value)\n\t\t\tcontinue\n\t\t}\n\n\t\t*ival = stat.Value\n\t}\n\treturn pms\n}\n\n\/\/ Add match from raw XML data in `data`. Returns match hash or error\nfunc (m *MatchController) AddFromData(data []byte) (string, error) {\n\trmatch, err := loader.LoadMatchData(data)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to load match data\")\n\t}\n\n\tif m.isMatchIndexed(rmatch) == true {\n\t\treturn \"\", fmt.Errorf(\"already present\")\n\t}\n\n\tif err := m.storeMatch(rmatch); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to store match data\")\n\t}\n\n\treturn rmatch.DataHash, nil\n}\n\nfunc (m *MatchController) isMatchIndexed(rmatch *loader.RawMatch) bool {\n\n\tmatch := models.FindMatchByHash(m.db, rmatch.DataHash)\n\tif match == nil {\n\t\treturn false\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"container\/heap\"\n\t\"crypto\/rand\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/big\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\nconst (\n\tdbPath = \".db.json\"\n\tstreamDirPath = \".stream\"\n)\n\nconst (\n\tidLeastByte = 'a'\n\tidGreatestByte = 'z'\n\tidLength = 8\n)\n\n\/\/ afmt represents an audio format supported by ffmpeg\/avconv.\ntype afmt struct {\n\t\/\/ fmt is the format's name in ffmpeg\/avconv.\n\tfmt string\n\t\/\/ mime is the MIME type of the format.\n\tmime string\n\t\/\/ args are the codec-specific ffmpeg\/avconv arguments to use.\n\targs []string\n}\n\nvar (\n\tafmts = map[string]afmt{\n\t\t\".opus\": {\n\t\t\tfmt: \"ogg\",\n\t\t\tmime: \"audio\/ogg\",\n\t\t\targs: []string{\n\t\t\t\t\"-b:a\", \"128000\",\n\t\t\t\t\"-compression_level\", \"0\",\n\t\t\t},\n\t\t},\n\t\t\".mp3\": {\n\t\t\tfmt: \"mp3\",\n\t\t\tmime: \"audio\/mpeg\",\n\t\t\targs: []string{\n\t\t\t\t\"-q\", \"4\",\n\t\t\t},\n\t\t},\n\t}\n)\n\n\/\/ song represents a song in a library.\ntype song struct {\n\t\/\/ ID is the unique ID of the song.\n\tID string `json:\"id\"`\n\t\/\/ Path is the path to the song's source file.\n\tPath string `json:\"path\"`\n\t\/\/ Artist is the song's artist.\n\tArtist string `json:\"artist\"`\n\t\/\/ Album is the album the song comes from.\n\tAlbum string `json:\"album\"`\n\t\/\/ Disc is the album disc the song comes from.\n\tDisc int `json:\"disc\"`\n\t\/\/ Track is the song's track number on the disc it comes from.\n\tTrack int `json:\"track\"`\n\t\/\/ Title is the song's title.\n\tTitle string `json:\"title\"`\n}\n\ntype songHeap []*song\n\nfunc (h songHeap) Len() int {\n\treturn len(h)\n}\n\nfunc compareFold(s, t string) (eq bool, less bool) {\n\tsLower := strings.ToLower(s)\n\ttLower := strings.ToLower(t)\n\treturn sLower == tLower, sLower < tLower\n}\n\nfunc (h songHeap) Less(i, j int) bool {\n\tif eq, less := compareFold(h[i].Artist, h[j].Artist); !eq {\n\t\treturn less\n\t}\n\tif eq, less := compareFold(h[i].Album, h[j].Album); !eq {\n\t\treturn less\n\t}\n\tif h[i].Disc != h[j].Disc {\n\t\treturn h[i].Disc < h[j].Disc\n\t}\n\tif h[i].Track != h[j].Track {\n\t\treturn h[i].Track < h[j].Track\n\t}\n\tif eq, less := compareFold(h[i].Title, h[j].Title); !eq {\n\t\treturn less\n\t}\n\treturn h[i].Path < h[j].Path\n}\n\nfunc (h songHeap) Swap(i, j int) {\n\th[i], h[j] = h[j], h[i]\n}\n\nfunc (h *songHeap) Push(s interface{}) {\n\t*h = append(*h, s.(*song))\n}\n\nfunc (h *songHeap) Pop() interface{} {\n\told := *h\n\tn := len(old)\n\ts := old[n-1]\n\t*h = old[0 : n-1]\n\treturn s\n}\n\n\/\/ library represents a music library and server.\ntype library struct {\n\t\/\/ Songs is the primary record of songs in the library. Keys are\n\t\/\/ song.Paths, and values are songs.\n\tSongs map[string]*song `json:\"songs\"`\n\t\/\/ SongsByID is like songs, but indexed by song.ID instead of song.Path.\n\tSongsByID map[string]*song `json:\"songsByID\"`\n\t\/\/ SongsSorted is a list of songs in sorted order. Songs are sorted by\n\t\/\/ artist, then album, then track number.\n\tSongsSorted []*song `json:\"songsSorted\"`\n\t\/\/ path is the path to the library directory.\n\tpath string\n\t\/\/ convCmd is the command used to transcode source files.\n\tconvCmd string\n\t\/\/ probeCmd is the command used to read metadata tags from source files.\n\tprobeCmd string\n\t\/\/ reading is used to delay database write operations while read\n\t\/\/ operations are occuring.\n\treading sync.WaitGroup\n\t\/\/ writing is used to delay database read operations while a write\n\t\/\/ operation is occuring.\n\twriting sync.WaitGroup\n\t\/\/ encoding is used to delay stream operations while a streaming file\n\t\/\/ at the path represented by the key is encoding.\n\tencoding map[string]*sync.WaitGroup\n\t\/\/ streamRE is a regular expression used to match stream URLs.\n\tstreamRE *regexp.Regexp\n}\n\ntype tags struct {\n\tFormat map[string]interface{}\n\tStreams []map[string]interface{}\n}\n\nfunc valRaw(key string, cont map[string]interface{}) (val string, ok bool) {\n\ttags, ok := cont[\"tags\"].(map[string]interface{})\n\tif !ok {\n\t\treturn\n\t}\n\tif val, ok = tags[strings.ToLower(key)].(string); ok {\n\t\treturn val, ok\n\t}\n\tval, ok = tags[strings.ToUpper(key)].(string)\n\treturn\n}\n\nfunc (t tags) val(key string) (val string, ok bool) {\n\tif val, ok := valRaw(key, t.Format); ok {\n\t\treturn val, ok\n\t}\n\tfor _, stream := range t.Streams {\n\t\tif val, ok := valRaw(key, stream); ok {\n\t\t\treturn val, ok\n\t\t}\n\t}\n\treturn\n}\n\nfunc valInt(valString string) (val int) {\n\tval, _ = strconv.Atoi(strings.Split(valString, \"\/\")[0])\n\treturn\n}\n\nfunc (l library) newSong(path string) (s *song, err error) {\n\tcmd := exec.Command(l.probeCmd,\n\t\t\"-print_format\", \"json\",\n\t\t\"-show_format\",\n\t\t\"-show_streams\",\n\t\tpath)\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn\n\t}\n\tif err = cmd.Start(); err != nil {\n\t\treturn\n\t}\n\tvar t tags\n\tif err = json.NewDecoder(stdout).Decode(&t); err != nil {\n\t\treturn\n\t}\n\tif err = cmd.Wait(); err != nil {\n\t\treturn\n\t}\n\tif score := t.Format[\"probe_score\"].(float64); score < 25 {\n\t\treturn nil, errors.New(\"undeterminable file type\")\n\t}\n\taudio := false\n\tfor _, stream := range t.Streams {\n\t\tif stream[\"codec_type\"] == \"audio\" {\n\t\t\taudio = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !audio {\n\t\treturn nil, errors.New(\"no audio stream\")\n\t}\n\ts = &song{\n\t\tPath: path,\n\t}\n\tif sOld, ok := l.Songs[s.Path]; ok {\n\t\ts.ID = sOld.ID\n\t} else {\n\t\tidBytes := make([]byte, 0, idLength)\n\t\tfor i := 0; i < cap(idBytes); i++ {\n\t\t\tn, err := rand.Int(rand.Reader,\n\t\t\t\tbig.NewInt(int64(idGreatestByte-idLeastByte)))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tidBytes = append(idBytes, byte(n.Int64()+idLeastByte))\n\t\t}\n\t\ts.ID = string(idBytes)\n\t}\n\ts.Artist, _ = t.val(\"artist\")\n\ts.Album, _ = t.val(\"album\")\n\tdisc, ok := t.val(\"disc\")\n\tif !ok {\n\t\tdisc, _ = t.val(\"discnumber\")\n\t}\n\ts.Disc = valInt(disc)\n\ttrack, ok := t.val(\"track\")\n\tif !ok {\n\t\ttrack, _ = t.val(\"tracknumber\")\n\t}\n\ts.Track = valInt(track)\n\ts.Title, _ = t.val(\"title\")\n\treturn\n}\n\nfunc (l *library) reload() (err error) {\n\tnewSongs := make(map[string]*song)\n\tnewSongsByID := make(map[string]*song)\n\th := &songHeap{}\n\theap.Init(h)\n\tfilepath.Walk(l.path, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil || info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\ts, err := l.newSong(path)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tnewSongs[path] = s\n\t\tnewSongsByID[s.ID] = s\n\t\theap.Push(h, s)\n\t\treturn nil\n\t})\n\tl.Songs = newSongs\n\tl.SongsByID = newSongsByID\n\tl.SongsSorted = make([]*song, 0, len(l.SongsByID))\n\tfor h.Len() > 0 {\n\t\tl.SongsSorted = append(l.SongsSorted, heap.Pop(h).(*song))\n\t}\n\tdb, err := os.OpenFile(dbPath, os.O_CREATE|os.O_TRUNC, 0666)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer db.Close()\n\tif err = json.NewEncoder(db).Encode(l); err != nil {\n\t\treturn\n\t}\n\tfilepath.Walk(streamDirPath, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil || info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tname := info.Name()\n\t\tif _, ok := l.SongsByID[strings.TrimSuffix(info.Name(),\n\t\t\tfilepath.Ext(name))]; !ok {\n\t\t\tos.Remove(path)\n\t\t}\n\t\treturn nil\n\t})\n\treturn\n}\n\nfunc chooseCmd(s string, t string) (string, error) {\n\t_, errs := exec.LookPath(s)\n\t_, errt := exec.LookPath(t)\n\tif errs == nil {\n\t\treturn s, nil\n\t} else if errt == nil {\n\t\treturn t, nil\n\t}\n\treturn \"\", fmt.Errorf(\"could not find '%s' or '%s' executable\", s, t)\n}\n\nfunc newLibrary(path string) (l *library, err error) {\n\tl = &library{\n\t\tpath: path,\n\t\tencoding: make(map[string]*sync.WaitGroup),\n\t}\n\tconvCmd, err := chooseCmd(\"ffmpeg\", \"avconv\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tprobeCmd, err := chooseCmd(\"ffprobe\", \"avprobe\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tl.convCmd = convCmd\n\tl.probeCmd = probeCmd\n\tstreamRE, err := regexp.Compile(fmt.Sprintf(\"^\\\\\/songs\\\\\/[%c-%c]{%d}\\\\..+$\",\n\t\tidLeastByte,\n\t\tidGreatestByte,\n\t\tidLength))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tl.streamRE = streamRE\n\tos.Mkdir(streamDirPath, os.ModeDir)\n\tif db, err := os.Open(dbPath); err == nil {\n\t\tdefer db.Close()\n\t\tif err = json.NewDecoder(db).Decode(l); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tl.Songs = make(map[string]*song)\n\t\tl.reload()\n\t}\n\treturn\n}\n\nfunc (l *library) putSongs() {\n\tl.writing.Wait()\n\tl.reading.Wait()\n\tl.writing.Add(1)\n\tdefer l.writing.Done()\n\tl.reload()\n}\n\nfunc (l library) getSongs(w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tl.writing.Wait()\n\tl.reading.Add(1)\n\tdefer l.reading.Done()\n\tjson.NewEncoder(w).Encode(l.SongsSorted)\n}\n\nfunc httpError(w http.ResponseWriter, status int) {\n\thttp.Error(w, http.StatusText(status), status)\n}\n\nfunc (l library) encode(dest string, src *song, af afmt) (err error) {\n\tif _, ok := l.encoding[dest]; !ok {\n\t\tl.encoding[dest] = &sync.WaitGroup{}\n\t}\n\tencoding := l.encoding[dest]\n\tencoding.Add(1)\n\tdefer encoding.Done()\n\targs := []string{\n\t\t\"-i\", src.Path,\n\t\t\"-f\", af.fmt,\n\t}\n\targs = append(args, af.args...)\n\targs = append(args, dest)\n\tif err = exec.Command(l.convCmd, args...).Run(); err != nil {\n\t\tif _, err = os.Stat(dest); err == nil {\n\t\t\tos.Remove(dest)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (l library) getStream(w http.ResponseWriter, r *http.Request) {\n\tbase := path.Base(r.URL.Path)\n\text := path.Ext(base)\n\ts, ok := l.SongsByID[strings.TrimSuffix(base, ext)]\n\tif !ok {\n\t\thttpError(w, http.StatusNotFound)\n\t\treturn\n\t}\n\taf, ok := afmts[ext]\n\tif !ok {\n\t\thttpError(w, http.StatusNotFound)\n\t\treturn\n\t}\n\tstreamPath := path.Join(streamDirPath, base)\n\tif _, ok := l.encoding[streamPath]; ok {\n\t\tl.encoding[streamPath].Wait()\n\t}\n\t_, err := os.Stat(streamPath)\n\tif err != nil && !os.IsNotExist(err) {\n\t\thttpError(w, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif os.IsNotExist(err) && l.encode(streamPath, s, af) != nil {\n\t\thttpError(w, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", af.mime)\n\thttp.ServeFile(w, r, streamPath)\n}\n\nfunc (l *library) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tswitch {\n\tcase r.URL.Path == \"\/songs\":\n\t\tswitch r.Method {\n\t\tcase \"PUT\":\n\t\t\tl.putSongs()\n\t\t\tfallthrough\n\t\tcase \"GET\":\n\t\t\tl.getSongs(w)\n\t\tdefault:\n\t\t\thttpError(w, http.StatusMethodNotAllowed)\n\t\t}\n\tcase l.streamRE.MatchString(r.URL.Path):\n\t\tswitch r.Method {\n\t\tcase \"GET\":\n\t\t\tl.getStream(w, r)\n\t\tdefault:\n\t\t\thttpError(w, http.StatusMethodNotAllowed)\n\t\t}\n\tdefault:\n\t\thttpError(w, http.StatusNotFound)\n\t}\n}\n\nfunc main() {\n\tflibrary := flag.String(\"library\", \"\", \"the library directory\")\n\tfport := flag.Uint(\"port\", 21313, \"the port to listen on\")\n\tflag.Parse()\n\tif *flibrary == \"\" {\n\t\tlog.Fatal(\"missing library flag\")\n\t}\n\tl, err := newLibrary(*flibrary)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\thttp.Handle(\"\/\", l)\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", *fport), nil))\n}\n<commit_msg>Make song paths relative to library directory<commit_after>package main\n\nimport (\n\t\"container\/heap\"\n\t\"crypto\/rand\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/big\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\nconst (\n\tdbPath = \".db.json\"\n\tstreamDirPath = \".stream\"\n)\n\nconst (\n\tidLeastByte = 'a'\n\tidGreatestByte = 'z'\n\tidLength = 8\n)\n\n\/\/ afmt represents an audio format supported by ffmpeg\/avconv.\ntype afmt struct {\n\t\/\/ fmt is the format's name in ffmpeg\/avconv.\n\tfmt string\n\t\/\/ mime is the MIME type of the format.\n\tmime string\n\t\/\/ args are the codec-specific ffmpeg\/avconv arguments to use.\n\targs []string\n}\n\nvar (\n\tafmts = map[string]afmt{\n\t\t\".opus\": {\n\t\t\tfmt: \"ogg\",\n\t\t\tmime: \"audio\/ogg\",\n\t\t\targs: []string{\n\t\t\t\t\"-b:a\", \"128000\",\n\t\t\t\t\"-compression_level\", \"0\",\n\t\t\t},\n\t\t},\n\t\t\".mp3\": {\n\t\t\tfmt: \"mp3\",\n\t\t\tmime: \"audio\/mpeg\",\n\t\t\targs: []string{\n\t\t\t\t\"-q\", \"4\",\n\t\t\t},\n\t\t},\n\t}\n)\n\n\/\/ song represents a song in a library.\ntype song struct {\n\t\/\/ ID is the unique ID of the song.\n\tID string `json:\"id\"`\n\t\/\/ Path is the path to the song's source file.\n\tPath string `json:\"path\"`\n\t\/\/ Artist is the song's artist.\n\tArtist string `json:\"artist\"`\n\t\/\/ Album is the album the song comes from.\n\tAlbum string `json:\"album\"`\n\t\/\/ Disc is the album disc the song comes from.\n\tDisc int `json:\"disc\"`\n\t\/\/ Track is the song's track number on the disc it comes from.\n\tTrack int `json:\"track\"`\n\t\/\/ Title is the song's title.\n\tTitle string `json:\"title\"`\n}\n\ntype songHeap []*song\n\nfunc (h songHeap) Len() int {\n\treturn len(h)\n}\n\nfunc compareFold(s, t string) (eq bool, less bool) {\n\tsLower := strings.ToLower(s)\n\ttLower := strings.ToLower(t)\n\treturn sLower == tLower, sLower < tLower\n}\n\nfunc (h songHeap) Less(i, j int) bool {\n\tif eq, less := compareFold(h[i].Artist, h[j].Artist); !eq {\n\t\treturn less\n\t}\n\tif eq, less := compareFold(h[i].Album, h[j].Album); !eq {\n\t\treturn less\n\t}\n\tif h[i].Disc != h[j].Disc {\n\t\treturn h[i].Disc < h[j].Disc\n\t}\n\tif h[i].Track != h[j].Track {\n\t\treturn h[i].Track < h[j].Track\n\t}\n\tif eq, less := compareFold(h[i].Title, h[j].Title); !eq {\n\t\treturn less\n\t}\n\treturn h[i].Path < h[j].Path\n}\n\nfunc (h songHeap) Swap(i, j int) {\n\th[i], h[j] = h[j], h[i]\n}\n\nfunc (h *songHeap) Push(s interface{}) {\n\t*h = append(*h, s.(*song))\n}\n\nfunc (h *songHeap) Pop() interface{} {\n\told := *h\n\tn := len(old)\n\ts := old[n-1]\n\t*h = old[0 : n-1]\n\treturn s\n}\n\n\/\/ library represents a music library and server.\ntype library struct {\n\t\/\/ Songs is the primary record of songs in the library. Keys are\n\t\/\/ song.Paths, and values are songs.\n\tSongs map[string]*song `json:\"songs\"`\n\t\/\/ SongsByID is like songs, but indexed by song.ID instead of song.Path.\n\tSongsByID map[string]*song `json:\"songsByID\"`\n\t\/\/ SongsSorted is a list of songs in sorted order. Songs are sorted by\n\t\/\/ artist, then album, then track number.\n\tSongsSorted []*song `json:\"songsSorted\"`\n\t\/\/ path is the path to the library directory.\n\tpath string\n\t\/\/ convCmd is the command used to transcode source files.\n\tconvCmd string\n\t\/\/ probeCmd is the command used to read metadata tags from source files.\n\tprobeCmd string\n\t\/\/ reading is used to delay database write operations while read\n\t\/\/ operations are occuring.\n\treading sync.WaitGroup\n\t\/\/ writing is used to delay database read operations while a write\n\t\/\/ operation is occuring.\n\twriting sync.WaitGroup\n\t\/\/ encoding is used to delay stream operations while a streaming file\n\t\/\/ at the path represented by the key is encoding.\n\tencoding map[string]*sync.WaitGroup\n\t\/\/ streamRE is a regular expression used to match stream URLs.\n\tstreamRE *regexp.Regexp\n}\n\ntype tags struct {\n\tFormat map[string]interface{}\n\tStreams []map[string]interface{}\n}\n\nfunc valRaw(key string, cont map[string]interface{}) (val string, ok bool) {\n\ttags, ok := cont[\"tags\"].(map[string]interface{})\n\tif !ok {\n\t\treturn\n\t}\n\tif val, ok = tags[strings.ToLower(key)].(string); ok {\n\t\treturn val, ok\n\t}\n\tval, ok = tags[strings.ToUpper(key)].(string)\n\treturn\n}\n\nfunc (t tags) val(key string) (val string, ok bool) {\n\tif val, ok := valRaw(key, t.Format); ok {\n\t\treturn val, ok\n\t}\n\tfor _, stream := range t.Streams {\n\t\tif val, ok := valRaw(key, stream); ok {\n\t\t\treturn val, ok\n\t\t}\n\t}\n\treturn\n}\n\nfunc valInt(valString string) (val int) {\n\tval, _ = strconv.Atoi(strings.Split(valString, \"\/\")[0])\n\treturn\n}\n\nfunc (l library) absPath(path string) string {\n\treturn filepath.Join(l.path, path)\n}\n\nfunc (l library) relPath(path string) (rel string, err error) {\n\treturn filepath.Rel(l.path, path)\n}\n\nfunc (l library) newSong(path string) (s *song, err error) {\n\tabs := l.absPath(path)\n\tcmd := exec.Command(l.probeCmd,\n\t\t\"-print_format\", \"json\",\n\t\t\"-show_format\",\n\t\t\"-show_streams\",\n\t\tabs)\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn\n\t}\n\tif err = cmd.Start(); err != nil {\n\t\treturn\n\t}\n\tvar t tags\n\tif err = json.NewDecoder(stdout).Decode(&t); err != nil {\n\t\treturn\n\t}\n\tif err = cmd.Wait(); err != nil {\n\t\treturn\n\t}\n\tif score := t.Format[\"probe_score\"].(float64); score < 25 {\n\t\treturn nil, errors.New(\"undeterminable file type\")\n\t}\n\taudio := false\n\tfor _, stream := range t.Streams {\n\t\tif stream[\"codec_type\"] == \"audio\" {\n\t\t\taudio = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !audio {\n\t\treturn nil, errors.New(\"no audio stream\")\n\t}\n\ts = &song{\n\t\tPath: path,\n\t}\n\tif sOld, ok := l.Songs[s.Path]; ok {\n\t\ts.ID = sOld.ID\n\t} else {\n\t\tidBytes := make([]byte, 0, idLength)\n\t\tfor i := 0; i < cap(idBytes); i++ {\n\t\t\tn, err := rand.Int(rand.Reader,\n\t\t\t\tbig.NewInt(int64(idGreatestByte-idLeastByte)))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tidBytes = append(idBytes, byte(n.Int64()+idLeastByte))\n\t\t}\n\t\ts.ID = string(idBytes)\n\t}\n\ts.Artist, _ = t.val(\"artist\")\n\ts.Album, _ = t.val(\"album\")\n\tdisc, ok := t.val(\"disc\")\n\tif !ok {\n\t\tdisc, _ = t.val(\"discnumber\")\n\t}\n\ts.Disc = valInt(disc)\n\ttrack, ok := t.val(\"track\")\n\tif !ok {\n\t\ttrack, _ = t.val(\"tracknumber\")\n\t}\n\ts.Track = valInt(track)\n\ts.Title, _ = t.val(\"title\")\n\treturn\n}\n\nfunc (l *library) reload() (err error) {\n\tnewSongs := make(map[string]*song)\n\tnewSongsByID := make(map[string]*song)\n\th := &songHeap{}\n\theap.Init(h)\n\tfilepath.Walk(l.path, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil || info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\trel, err := l.relPath(path)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\ts, err := l.newSong(rel)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tnewSongs[rel] = s\n\t\tnewSongsByID[s.ID] = s\n\t\theap.Push(h, s)\n\t\treturn nil\n\t})\n\tl.Songs = newSongs\n\tl.SongsByID = newSongsByID\n\tl.SongsSorted = make([]*song, 0, len(l.SongsByID))\n\tfor h.Len() > 0 {\n\t\tl.SongsSorted = append(l.SongsSorted, heap.Pop(h).(*song))\n\t}\n\tdb, err := os.OpenFile(dbPath, os.O_CREATE|os.O_TRUNC, 0666)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer db.Close()\n\tif err = json.NewEncoder(db).Encode(l); err != nil {\n\t\treturn\n\t}\n\tfilepath.Walk(streamDirPath, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil || info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tname := info.Name()\n\t\tif _, ok := l.SongsByID[strings.TrimSuffix(info.Name(),\n\t\t\tfilepath.Ext(name))]; !ok {\n\t\t\tos.Remove(path)\n\t\t}\n\t\treturn nil\n\t})\n\treturn\n}\n\nfunc chooseCmd(s string, t string) (string, error) {\n\t_, errs := exec.LookPath(s)\n\t_, errt := exec.LookPath(t)\n\tif errs == nil {\n\t\treturn s, nil\n\t} else if errt == nil {\n\t\treturn t, nil\n\t}\n\treturn \"\", fmt.Errorf(\"could not find '%s' or '%s' executable\", s, t)\n}\n\nfunc newLibrary(path string) (l *library, err error) {\n\tl = &library{\n\t\tpath: path,\n\t\tencoding: make(map[string]*sync.WaitGroup),\n\t}\n\tconvCmd, err := chooseCmd(\"ffmpeg\", \"avconv\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tprobeCmd, err := chooseCmd(\"ffprobe\", \"avprobe\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tl.convCmd = convCmd\n\tl.probeCmd = probeCmd\n\tl.streamRE, err = regexp.Compile(fmt.Sprintf(\"^\\\\\/songs\\\\\/[%c-%c]{%d}\\\\..+$\",\n\t\tidLeastByte,\n\t\tidGreatestByte,\n\t\tidLength))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tos.Mkdir(streamDirPath, os.ModeDir)\n\tif db, err := os.Open(dbPath); err == nil {\n\t\tdefer db.Close()\n\t\tif err = json.NewDecoder(db).Decode(l); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tl.Songs = make(map[string]*song)\n\t\tl.reload()\n\t}\n\treturn\n}\n\nfunc (l *library) putSongs() {\n\tl.writing.Wait()\n\tl.reading.Wait()\n\tl.writing.Add(1)\n\tdefer l.writing.Done()\n\tl.reload()\n}\n\nfunc (l library) getSongs(w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tl.writing.Wait()\n\tl.reading.Add(1)\n\tdefer l.reading.Done()\n\tjson.NewEncoder(w).Encode(l.SongsSorted)\n}\n\nfunc httpError(w http.ResponseWriter, status int) {\n\thttp.Error(w, http.StatusText(status), status)\n}\n\nfunc (l library) encode(dest string, src *song, af afmt) (err error) {\n\tif _, ok := l.encoding[dest]; !ok {\n\t\tl.encoding[dest] = &sync.WaitGroup{}\n\t}\n\tencoding := l.encoding[dest]\n\tencoding.Add(1)\n\tdefer encoding.Done()\n\targs := []string{\n\t\t\"-i\", l.absPath(src.Path),\n\t\t\"-f\", af.fmt,\n\t}\n\targs = append(args, af.args...)\n\targs = append(args, dest)\n\tif err = exec.Command(l.convCmd, args...).Run(); err != nil {\n\t\tif _, err = os.Stat(dest); err == nil {\n\t\t\tos.Remove(dest)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (l library) getStream(w http.ResponseWriter, r *http.Request) {\n\tbase := path.Base(r.URL.Path)\n\text := path.Ext(base)\n\ts, ok := l.SongsByID[strings.TrimSuffix(base, ext)]\n\tif !ok {\n\t\thttpError(w, http.StatusNotFound)\n\t\treturn\n\t}\n\taf, ok := afmts[ext]\n\tif !ok {\n\t\thttpError(w, http.StatusNotFound)\n\t\treturn\n\t}\n\tstreamPath := filepath.Join(streamDirPath, base)\n\tif _, ok := l.encoding[streamPath]; ok {\n\t\tl.encoding[streamPath].Wait()\n\t}\n\t_, err := os.Stat(streamPath)\n\tif err != nil && !os.IsNotExist(err) {\n\t\thttpError(w, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif os.IsNotExist(err) && l.encode(streamPath, s, af) != nil {\n\t\thttpError(w, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", af.mime)\n\thttp.ServeFile(w, r, streamPath)\n}\n\nfunc (l *library) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tswitch {\n\tcase r.URL.Path == \"\/songs\":\n\t\tswitch r.Method {\n\t\tcase \"PUT\":\n\t\t\tl.putSongs()\n\t\t\tfallthrough\n\t\tcase \"GET\":\n\t\t\tl.getSongs(w)\n\t\tdefault:\n\t\t\thttpError(w, http.StatusMethodNotAllowed)\n\t\t}\n\tcase l.streamRE.MatchString(r.URL.Path):\n\t\tswitch r.Method {\n\t\tcase \"GET\":\n\t\t\tl.getStream(w, r)\n\t\tdefault:\n\t\t\thttpError(w, http.StatusMethodNotAllowed)\n\t\t}\n\tdefault:\n\t\thttpError(w, http.StatusNotFound)\n\t}\n}\n\nfunc main() {\n\tflibrary := flag.String(\"library\", \"\", \"the library directory\")\n\tfport := flag.Uint(\"port\", 21313, \"the port to listen on\")\n\tflag.Parse()\n\tif *flibrary == \"\" {\n\t\tlog.Fatal(\"missing library flag\")\n\t}\n\tl, err := newLibrary(*flibrary)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\thttp.Handle(\"\/\", l)\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", *fport), nil))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cf-platform-eng\/mongodb-on-demand-release\/src\/mongodb-service-adapter\/adapter\"\n)\n\n\/\/ TODO: pass json instead of flags\nvar (\n\tid string\n\turl string\n\tusername string\n\tapiKey string\n\tgroupID string\n\tplanID string\n\tnodeAddresses string\n\tadminPassword string\n\tengineVersion string\n\treplicas int\n)\n\nfunc main() {\n\tflag.StringVar(&id, \"id\", \"\", \"ID\")\n\tflag.StringVar(&url, \"url\", \"\", \"MOM URL\")\n\tflag.StringVar(&username, \"username\", \"\", \"MOM Username\")\n\tflag.StringVar(&apiKey, \"api-key\", \"\", \"MOM API Key\")\n\tflag.StringVar(&groupID, \"group\", \"\", \"MOM Group ID\")\n\tflag.StringVar(&planID, \"plan\", \"\", \"The name of the service plan\")\n\tflag.StringVar(&nodeAddresses, \"nodes\", \"\", \"Comma separated list of addresses\")\n\tflag.StringVar(&adminPassword, \"admin-password\", \"\", \"Admin password for the mongo instance\")\n\tflag.StringVar(&engineVersion, \"engine-version\", \"\", \"Engine version\")\n\tflag.IntVar(&replicas, \"replicas\", 0, \"replicas per shard\")\n\tflag.Parse()\n\n\tlogger := log.New(os.Stderr, \"[mongodb-config-agent] \", log.LstdFlags)\n\tomClient := adapter.OMClient{Url: url, Username: username, ApiKey: apiKey}\n\n\tnodes := strings.Split(nodeAddresses, \",\")\n\tctx := &adapter.DocContext{\n\t\tID: id,\n\t\tKey: \"GrSLAAsHGXmJOrvElJ2AHTGauvH4O0EFT1r8byvb0G9sTU0viVX21PwUMqBjyXB9WrZP9QvEmCQIF1wOqJofyWmx7wWZqpO69dnc9GUWcpGQLr7eVyKTs99WAPXR3kXpF4MVrHdBMEDfRfhytgomgAso96urN6eC8RaUpjX4Bf9HcAEJwfddZshin97XKJDmqCaqAfORNnf1e8hkfTIwYg1tvIpwemmEF4TkmOgK09N5dINyejyWMU8iWG8FqW5MfQ8A2DrtIdyGSKLH05s7H1dXyADjDECaC77QqLXTx7gWyHca3I0K92PuVFoOs5385vzqTYN3kVgFotSdXgoM8Zt5QIoj2lX4PYqm2TWsVp0s15JELikH8bNVIIMGiSSWJEWGU1PVEXD7V7cYepDb88korMjr3wbh6kZ76Q7F2RtfJqkd4hKw7B5OCX04b5eppkjL598iCpSUUx3X9C6fFavWj2DrHsv9DY86iCWBlcG08DRPKs9EPizCW4jNZtJcm3T7WlcI0MZMKOtsKOCWBZA0C9YnttNrp4eTsQ1U43StiIRPqp2K8rrQAu6etURH0RHedazHeeukTWI7iTG1dZpYk9EyittZ72qKXLNLhi5vJ9TlYw8O91vihB1nJwwA3B1WbiYhkqqRzoL0cQpXJMUsUlsoSP6Q70IMU92vEHbUmna5krESPLeJfQBKGQPNVVE63XYBh2TnvFTdi6koitu209wMFUnHZrzWj3UWGqsyTqqHbPl4RhRLFe24seRwV2SbUuLygBIdptKHnA3kutAbHzsWTT8UxOaiQzFV4auxounrgXj7MoMWEVKKS8AHkELPILGqFVFC8BZsfPC0WacSN5Rg5SaCvfs74hcsCQ3ghq9PyxEb2fbHUiaCjnsBcXqzQw9AjZJG4yX0ubEwicP0bKB6y3w4PUQqdouxH5y16OgkUjrZgodJfRLgP9vqGbHNDpj4yBuswluvCFBh38gBoSIQu11qtQmk43n4G8Dskn0DrJ32l2Gz35q5LaKT\",\n\t\tAdminPassword: adminPassword,\n\t\tNodes: nodes,\n\t\tVersion: engineVersion,\n\t}\n\n\tif planID == adapter.PlanShardedSet {\n\t\tvar err error\n\t\tctx.Shards, err = nodesToShards(nodes, replicas)\n\t\tif err != nil {\n\t\t\tlogger.Fatal(err)\n\t\t}\n\t}\n\n\tlogger.Printf(\"%+v\", nodes)\n\tdoc, err := omClient.LoadDoc(planID, ctx)\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\tlogger.Println(doc)\n\n\tfor {\n\t\tlogger.Printf(\"Checking group %s\", groupID)\n\n\t\tgroupHosts, err := omClient.GetGroupHosts(groupID)\n\t\tif err != nil {\n\t\t\tlogger.Fatal(err)\n\t\t}\n\n\t\t\/\/\tlogger.Printf(\"total number of hosts *** %v\", groupHosts.TotalCount)\n\t\tif groupHosts.TotalCount == 0 {\n\t\t\tlogger.Printf(\"Host count for %s is 0, configuring...\", groupID)\n\n\t\t\terr = omClient.ConfigureGroup(doc, groupID)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Fatal(err)\n\t\t\t}\n\n\t\t\tlogger.Printf(\"Configured group %s\", groupID)\n\t\t}\n\n\t\ttime.Sleep(30 * time.Second)\n\t}\n}\n\n\/\/ nodesToShards transforms a list of nodes into shards list\n\/\/ depending on replicas per shard number.\n\/\/ Number of nodes modulo number of replicas has to be zero.\nfunc nodesToShards(nodes []string, replicas int) ([][]string, error) {\n\tif len(nodes) == 0 {\n\t\treturn nil, errors.New(\"len(nodes) == 0\")\n\t} else if replicas == 0 {\n\t\treturn nil, errors.New(\"replicas == 0\")\n\t} else if len(nodes)%replicas != 0 {\n\t\treturn nil, errors.New(\"len(nodes) % replicas != 0\")\n\t}\n\n\tb := [][]string{}\n\tfor i := 0; i < replicas; i++ {\n\t\tb = append(b, []string{})\n\t\tb[i] = nodes[i*replicas : (i*replicas)+replicas]\n\t}\n\treturn b, nil\n}\n<commit_msg>Fix planID type<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cf-platform-eng\/mongodb-on-demand-release\/src\/mongodb-service-adapter\/adapter\"\n)\n\n\/\/ TODO: pass json instead of flags\nvar (\n\tid string\n\turl string\n\tusername string\n\tapiKey string\n\tgroupID string\n\tplanID string\n\tnodeAddresses string\n\tadminPassword string\n\tengineVersion string\n\treplicas int\n)\n\nfunc main() {\n\tflag.StringVar(&id, \"id\", \"\", \"ID\")\n\tflag.StringVar(&url, \"url\", \"\", \"MOM URL\")\n\tflag.StringVar(&username, \"username\", \"\", \"MOM Username\")\n\tflag.StringVar(&apiKey, \"api-key\", \"\", \"MOM API Key\")\n\tflag.StringVar(&groupID, \"group\", \"\", \"MOM Group ID\")\n\tflag.StringVar(&planID, \"plan\", \"\", \"The name of the service plan\")\n\tflag.StringVar(&nodeAddresses, \"nodes\", \"\", \"Comma separated list of addresses\")\n\tflag.StringVar(&adminPassword, \"admin-password\", \"\", \"Admin password for the mongo instance\")\n\tflag.StringVar(&engineVersion, \"engine-version\", \"\", \"Engine version\")\n\tflag.IntVar(&replicas, \"replicas\", 0, \"replicas per shard\")\n\tflag.Parse()\n\n\tlogger := log.New(os.Stderr, \"[mongodb-config-agent] \", log.LstdFlags)\n\tomClient := adapter.OMClient{Url: url, Username: username, ApiKey: apiKey}\n\n\tnodes := strings.Split(nodeAddresses, \",\")\n\tctx := &adapter.DocContext{\n\t\tID: id,\n\t\tKey: \"GrSLAAsHGXmJOrvElJ2AHTGauvH4O0EFT1r8byvb0G9sTU0viVX21PwUMqBjyXB9WrZP9QvEmCQIF1wOqJofyWmx7wWZqpO69dnc9GUWcpGQLr7eVyKTs99WAPXR3kXpF4MVrHdBMEDfRfhytgomgAso96urN6eC8RaUpjX4Bf9HcAEJwfddZshin97XKJDmqCaqAfORNnf1e8hkfTIwYg1tvIpwemmEF4TkmOgK09N5dINyejyWMU8iWG8FqW5MfQ8A2DrtIdyGSKLH05s7H1dXyADjDECaC77QqLXTx7gWyHca3I0K92PuVFoOs5385vzqTYN3kVgFotSdXgoM8Zt5QIoj2lX4PYqm2TWsVp0s15JELikH8bNVIIMGiSSWJEWGU1PVEXD7V7cYepDb88korMjr3wbh6kZ76Q7F2RtfJqkd4hKw7B5OCX04b5eppkjL598iCpSUUx3X9C6fFavWj2DrHsv9DY86iCWBlcG08DRPKs9EPizCW4jNZtJcm3T7WlcI0MZMKOtsKOCWBZA0C9YnttNrp4eTsQ1U43StiIRPqp2K8rrQAu6etURH0RHedazHeeukTWI7iTG1dZpYk9EyittZ72qKXLNLhi5vJ9TlYw8O91vihB1nJwwA3B1WbiYhkqqRzoL0cQpXJMUsUlsoSP6Q70IMU92vEHbUmna5krESPLeJfQBKGQPNVVE63XYBh2TnvFTdi6koitu209wMFUnHZrzWj3UWGqsyTqqHbPl4RhRLFe24seRwV2SbUuLygBIdptKHnA3kutAbHzsWTT8UxOaiQzFV4auxounrgXj7MoMWEVKKS8AHkELPILGqFVFC8BZsfPC0WacSN5Rg5SaCvfs74hcsCQ3ghq9PyxEb2fbHUiaCjnsBcXqzQw9AjZJG4yX0ubEwicP0bKB6y3w4PUQqdouxH5y16OgkUjrZgodJfRLgP9vqGbHNDpj4yBuswluvCFBh38gBoSIQu11qtQmk43n4G8Dskn0DrJ32l2Gz35q5LaKT\",\n\t\tAdminPassword: adminPassword,\n\t\tNodes: nodes,\n\t\tVersion: engineVersion,\n\t}\n\n\tif planID == adapter.PlanShardedSet {\n\t\tvar err error\n\t\tctx.Shards, err = nodesToShards(nodes, replicas)\n\t\tif err != nil {\n\t\t\tlogger.Fatal(err)\n\t\t}\n\t}\n\n\tlogger.Printf(\"%+v\", nodes)\n\tdoc, err := omClient.LoadDoc(adapter.Plan(planID), ctx)\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\tlogger.Println(doc)\n\n\tfor {\n\t\tlogger.Printf(\"Checking group %s\", groupID)\n\n\t\tgroupHosts, err := omClient.GetGroupHosts(groupID)\n\t\tif err != nil {\n\t\t\tlogger.Fatal(err)\n\t\t}\n\n\t\t\/\/\tlogger.Printf(\"total number of hosts *** %v\", groupHosts.TotalCount)\n\t\tif groupHosts.TotalCount == 0 {\n\t\t\tlogger.Printf(\"Host count for %s is 0, configuring...\", groupID)\n\n\t\t\terr = omClient.ConfigureGroup(doc, groupID)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Fatal(err)\n\t\t\t}\n\n\t\t\tlogger.Printf(\"Configured group %s\", groupID)\n\t\t}\n\n\t\ttime.Sleep(30 * time.Second)\n\t}\n}\n\n\/\/ nodesToShards transforms a list of nodes into shards list\n\/\/ depending on replicas per shard number.\n\/\/ Number of nodes modulo number of replicas has to be zero.\nfunc nodesToShards(nodes []string, replicas int) ([][]string, error) {\n\tif len(nodes) == 0 {\n\t\treturn nil, errors.New(\"len(nodes) == 0\")\n\t} else if replicas == 0 {\n\t\treturn nil, errors.New(\"replicas == 0\")\n\t} else if len(nodes)%replicas != 0 {\n\t\treturn nil, errors.New(\"len(nodes) % replicas != 0\")\n\t}\n\n\tb := [][]string{}\n\tfor i := 0; i < replicas; i++ {\n\t\tb = append(b, []string{})\n\t\tb[i] = nodes[i*replicas : (i*replicas)+replicas]\n\t}\n\treturn b, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package evaluator\n\nimport (\n\t\"monkey\/object\"\n)\n\nvar builtins = map[string]*object.Builtin{\n\t\"len\": &object.Builtin{\n\t\tFn: func(args ...object.Object) object.Object {\n\t\t\tif len(args) != 1 {\n\t\t\t\treturn newError(\"wrong number of arguments. got=%d, want=1\", len(args))\n\t\t\t}\n\n\t\t\tswitch arg := args[0].(type) {\n\t\t\tcase *object.String:\n\t\t\t\treturn &object.Integer{Value: int64(len(arg.Value))}\n\t\t\tcase *object.Array:\n\t\t\t\treturn &object.Integer{Value: int64(len(arg.Elements))}\n\t\t\tdefault:\n\t\t\t\treturn newError(\"argument to 'len' not supported, got %s\", args[0].Type())\n\t\t\t}\n\t\t},\n\t},\n\t\"first\": &object.Builtin{\n\t\tFn: func(args ...object.Object) object.Object {\n\t\t\tif len(args) != 1 {\n\t\t\t\treturn newError(\"wrong number of arguments. got=%d, want=1\", len(args))\n\t\t\t}\n\n\t\t\tif args[0].Type() != object.ARRAY_OBJ {\n\t\t\t\treturn newError(\"argument to 'first' must be ARRAY, got %s\", args[0].Type())\n\t\t\t}\n\n\t\t\tarr := args[0].(*object.Array)\n\t\t\tif len(arr.Elements) > 0 {\n\t\t\t\treturn arr.Elements[0]\n\t\t\t}\n\n\t\t\treturn NULL\n\t\t},\n\t},\n\t\"last\": &object.Builtin{\n\t\tFn: func(args ...object.Object) object.Object {\n\t\t\tif len(args) != 1 {\n\t\t\t\treturn newError(\"wrong number of arguments. got=%d, want=1\", len(args))\n\t\t\t}\n\n\t\t\tif args[0].Type() != object.ARRAY_OBJ {\n\t\t\t\treturn newError(\"argument to 'last' must be ARRAY, got %s\", args[0].Type())\n\t\t\t}\n\n\t\t\tarr := args[0].(*object.Array)\n\t\t\tlength := len(arr.Elements)\n\t\t\tif length > 0 {\n\t\t\t\treturn arr.Elements[length-1]\n\t\t\t}\n\n\t\t\treturn NULL\n\t\t},\n\t},\n\t\"tail\": &object.Builtin{\n\t\tFn: func(args ...object.Object) object.Object {\n\t\t\tif len(args) != 1 {\n\t\t\t\treturn newError(\"wrong number of arguments. got=%d, want=1\", len(args))\n\t\t\t}\n\n\t\t\tif args[0].Type() != object.ARRAY_OBJ {\n\t\t\t\treturn newError(\"argument to 'tail' must be ARRAY, got %s\", args[0].Type())\n\t\t\t}\n\n\t\t\tarr := args[0].(*object.Array)\n\t\t\tlength := len(arr.Elements)\n\t\t\tif length > 0 {\n\t\t\t\tnewElements := make([]object.Object, length-1, length-1)\n\t\t\t\tcopy(newElements, arr.Elements[1:length])\n\t\t\t\treturn &object.Array{Elements: newElements}\n\t\t\t}\n\n\t\t\treturn NULL\n\t\t},\n\t},\n\t\"push\": &object.Builtin{\n\t\tFn: func(args ...object.Object) object.Object {\n\t\t\tif len(args) != 2 {\n\t\t\t\treturn newError(\"wrong number of arguments. got=%d, want=2\", len(args))\n\t\t\t}\n\n\t\t\tif args[0].Type() != object.ARRAY_OBJ {\n\t\t\t\treturn newError(\"argument to 'push' must be ARRAY, got %s\", args[0].Type())\n\t\t\t}\n\n\t\t\tarr := args[0].(*object.Array)\n\t\t\tlength := len(arr.Elements)\n\t\t\tnewElements := make([]object.Object, length+1, length+1)\n\t\t\tcopy(newElements, arr.Elements)\n\n\t\t\tnewElements[length] = args[1]\n\t\t\treturn &object.Array{Elements: newElements}\n\t\t},\n\t},\n}\n<commit_msg>add puts builtin - book content complete<commit_after>package evaluator\n\nimport (\n\t\"fmt\"\n\t\"monkey\/object\"\n)\n\nvar builtins = map[string]*object.Builtin{\n\t\"len\": &object.Builtin{\n\t\tFn: func(args ...object.Object) object.Object {\n\t\t\tif len(args) != 1 {\n\t\t\t\treturn newError(\"wrong number of arguments. got=%d, want=1\", len(args))\n\t\t\t}\n\n\t\t\tswitch arg := args[0].(type) {\n\t\t\tcase *object.String:\n\t\t\t\treturn &object.Integer{Value: int64(len(arg.Value))}\n\t\t\tcase *object.Array:\n\t\t\t\treturn &object.Integer{Value: int64(len(arg.Elements))}\n\t\t\tdefault:\n\t\t\t\treturn newError(\"argument to 'len' not supported, got %s\", args[0].Type())\n\t\t\t}\n\t\t},\n\t},\n\t\"first\": &object.Builtin{\n\t\tFn: func(args ...object.Object) object.Object {\n\t\t\tif len(args) != 1 {\n\t\t\t\treturn newError(\"wrong number of arguments. got=%d, want=1\", len(args))\n\t\t\t}\n\n\t\t\tif args[0].Type() != object.ARRAY_OBJ {\n\t\t\t\treturn newError(\"argument to 'first' must be ARRAY, got %s\", args[0].Type())\n\t\t\t}\n\n\t\t\tarr := args[0].(*object.Array)\n\t\t\tif len(arr.Elements) > 0 {\n\t\t\t\treturn arr.Elements[0]\n\t\t\t}\n\n\t\t\treturn NULL\n\t\t},\n\t},\n\t\"last\": &object.Builtin{\n\t\tFn: func(args ...object.Object) object.Object {\n\t\t\tif len(args) != 1 {\n\t\t\t\treturn newError(\"wrong number of arguments. got=%d, want=1\", len(args))\n\t\t\t}\n\n\t\t\tif args[0].Type() != object.ARRAY_OBJ {\n\t\t\t\treturn newError(\"argument to 'last' must be ARRAY, got %s\", args[0].Type())\n\t\t\t}\n\n\t\t\tarr := args[0].(*object.Array)\n\t\t\tlength := len(arr.Elements)\n\t\t\tif length > 0 {\n\t\t\t\treturn arr.Elements[length-1]\n\t\t\t}\n\n\t\t\treturn NULL\n\t\t},\n\t},\n\t\"tail\": &object.Builtin{\n\t\tFn: func(args ...object.Object) object.Object {\n\t\t\tif len(args) != 1 {\n\t\t\t\treturn newError(\"wrong number of arguments. got=%d, want=1\", len(args))\n\t\t\t}\n\n\t\t\tif args[0].Type() != object.ARRAY_OBJ {\n\t\t\t\treturn newError(\"argument to 'tail' must be ARRAY, got %s\", args[0].Type())\n\t\t\t}\n\n\t\t\tarr := args[0].(*object.Array)\n\t\t\tlength := len(arr.Elements)\n\t\t\tif length > 0 {\n\t\t\t\tnewElements := make([]object.Object, length-1, length-1)\n\t\t\t\tcopy(newElements, arr.Elements[1:length])\n\t\t\t\treturn &object.Array{Elements: newElements}\n\t\t\t}\n\n\t\t\treturn NULL\n\t\t},\n\t},\n\t\"push\": &object.Builtin{\n\t\tFn: func(args ...object.Object) object.Object {\n\t\t\tif len(args) != 2 {\n\t\t\t\treturn newError(\"wrong number of arguments. got=%d, want=2\", len(args))\n\t\t\t}\n\n\t\t\tif args[0].Type() != object.ARRAY_OBJ {\n\t\t\t\treturn newError(\"argument to 'push' must be ARRAY, got %s\", args[0].Type())\n\t\t\t}\n\n\t\t\tarr := args[0].(*object.Array)\n\t\t\tlength := len(arr.Elements)\n\t\t\tnewElements := make([]object.Object, length+1, length+1)\n\t\t\tcopy(newElements, arr.Elements)\n\n\t\t\tnewElements[length] = args[1]\n\t\t\treturn &object.Array{Elements: newElements}\n\t\t},\n\t},\n\t\"puts\": &object.Builtin{\n\t\tFn: func(args ...object.Object) object.Object {\n\t\t\tfor _, arg := range args {\n\t\t\t\tfmt.Println(arg.Inspect())\n\t\t\t}\n\n\t\t\treturn NULL\n\t\t},\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>package mmbot\n\nimport (\n\t\"log\"\n\t\"mmbot\/mmhook\"\n\t\"net\/http\"\n)\n\ntype Robot struct {\n\t*mmhook.Client\n\tConfig *Config\n\tHandlers []Handler\n\tquit chan bool\n}\n\nfunc NewRobot(config *Config) *Robot {\n\tbot := &Robot{\n\t\tClient: mmhook.NewClient(config.Config),\n\t\tConfig: config,\n\t\tquit: make(chan bool),\n\t}\n\n\treturn bot\n}\n\nfunc (r *Robot) Run() {\n\tif !r.Config.DisableServer {\n\t\tr.StartServer()\n\t\tr.receive()\n\t} else {\n\t\t<-r.quit\n\t}\n}\n\nfunc (r *Robot) Stop() {\n\tr.quit <- true\n}\n\nfunc (r *Robot) receive() {\n\tfor {\n\t\tselect {\n\t\tcase <-r.quit:\n\t\t\treturn\n\t\tcase msg := <-r.In:\n\t\t\tr.handle(&msg)\n\t\t}\n\t}\n}\n\nfunc (r *Robot) handle(inMsg *mmhook.InMessage) {\n\tmsg := &InMessage{\n\t\tInMessage: inMsg,\n\t\tRobot: r,\n\t}\n\n\tfor _, handler := range r.Handlers {\n\t\tif handler.CanHandle(msg) {\n\t\t\terr := handler.Handle(msg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (r *Robot) startServer() {\n\tmux := http.NewServeMux()\n\tr.mountClient(mux)\n\n\t\/\/ TODO: mount http handler\n\n\tlog.Printf(\"Listening on %s\\n\", r.Address())\n\tif err := http.ListenAndServe(r.Address(), mux); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc (r *Robot) mountClient(mux *http.ServeMux) {\n\tpath := r.Client.IncomingPath\n\tif path == \"\" {\n\t\tpath = \"\/\"\n\t}\n\tmux.Handle(path, r.Client)\n}\n<commit_msg>Use gorilla.mux instead of http.ServeMux<commit_after>package mmbot\n\nimport (\n\t\"log\"\n\t\"mmbot\/mmhook\"\n\t\"net\/http\"\n\n\t\"github.com\/gorilla\/mux\"\n)\n\ntype Robot struct {\n\t*mmhook.Client\n\tConfig *Config\n\tHandlers []Handler\n\tquit chan bool\n}\n\nfunc NewRobot(config *Config) *Robot {\n\tbot := &Robot{\n\t\tClient: mmhook.NewClient(config.Config),\n\t\tConfig: config,\n\t\tquit: make(chan bool),\n\t}\n\n\treturn bot\n}\n\nfunc (r *Robot) Run() {\n\tif !r.Config.DisableServer {\n\t\tr.StartServer()\n\t\tr.receive()\n\t} else {\n\t\t<-r.quit\n\t}\n}\n\nfunc (r *Robot) Stop() {\n\tr.quit <- true\n}\n\nfunc (r *Robot) receive() {\n\tfor {\n\t\tselect {\n\t\tcase <-r.quit:\n\t\t\treturn\n\t\tcase msg := <-r.In:\n\t\t\tr.handle(&msg)\n\t\t}\n\t}\n}\n\nfunc (r *Robot) handle(inMsg *mmhook.InMessage) {\n\tmsg := &InMessage{\n\t\tInMessage: inMsg,\n\t\tRobot: r,\n\t}\n\n\tfor _, handler := range r.Handlers {\n\t\tif handler.CanHandle(msg) {\n\t\t\terr := handler.Handle(msg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (r *Robot) startServer() {\n\tmux := mux.NewRouter()\n\tr.mountClient(mux)\n\n\t\/\/ TODO: mount http handler\n\n\tlog.Printf(\"Listening on %s\\n\", r.Address())\n\tif err := http.ListenAndServe(r.Address(), mux); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc (r *Robot) mountClient(mux *mux.Router) {\n\tpath := r.Client.IncomingPath\n\tif path == \"\" {\n\t\tpath = \"\/\"\n\t}\n\tmux.Handle(path, r.Client)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package text contains utility functions for manipulating raw text strings\npackage text\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\n\/\/ Indent indents a block of text with an indent string. It does this by\n\/\/ placing the given indent string at the front of every line, except on the\n\/\/ last line, if the last line has no characters. This special treatment\n\/\/ simplifies the generation of nested text structures.\nfunc Indent(text, indent string) string {\n\tif text == \"\" {\n\t\treturn text\n\t}\n\tif text[len(text)-1:] == \"\\n\" {\n\t\tresult := \"\"\n\t\tfor _, j := range strings.Split(text[:len(text)-1], \"\\n\") {\n\t\t\tresult += indent + j + \"\\n\"\n\t\t}\n\t\treturn result\n\t}\n\tresult := \"\"\n\tfor _, j := range strings.Split(strings.TrimRight(text, \"\\n\"), \"\\n\") {\n\t\tresult += indent + j + \"\\n\"\n\t}\n\treturn result[:len(result)-1]\n}\n\n\/\/ Underline returns the provided text together with a new line character and a\n\/\/ line of \"=\" characters whose length is equal to the maximum line length in\n\/\/ the provided text, followed by a final newline character.\nfunc Underline(text string) string {\n\tvar maxlen int\n\tfor _, j := range strings.Split(text, \"\\n\") {\n\t\tif len(j) > maxlen {\n\t\t\tmaxlen = len(j)\n\t\t}\n\t}\n\treturn text + \"\\n\" + strings.Repeat(\"=\", maxlen) + \"\\n\"\n}\n\n\/\/ Returns a string of the same length, filled with \"*\"s.\nfunc StarOut(test string) string {\n\treturn strings.Repeat(\"*\", len(test))\n}\n\n\/\/ GoTypeNameFrom provides a mechanism to mutate an arbitrary descriptive\n\/\/ string (name) into an exported Go type name that can be used in generated\n\/\/ code, taking into account a blacklist of names that have already been used,\n\/\/ in order to guarantee that a new name is created which will not conflict\n\/\/ with an existing type. The blacklist is updated to include the newly\n\/\/ generated name. Note, the map[string]bool construction is simply a mechanism\n\/\/ to implement set semantics; a value of `true` signifies inclusion in the\n\/\/ set. Non-existence is equivalent to existence with a value of `false`;\n\/\/ therefore it is recommended to only store `true` values.\n\/\/\n\/\/ The mutation is performed by capatilising all words (see\n\/\/ https:\/\/golang.org\/pkg\/strings\/#Title), removing all spaces and hyphens, and\n\/\/ then optionally appending an integer if the generated name conflicts with an\n\/\/ entry in the blacklist. The appended integer will be the lowest integer\n\/\/ possible, >= 1, that results in no blacklist conflict.\nfunc GoTypeNameFrom(name string, blacklist map[string]bool) string {\n\t\/\/ Capitalise words, and remove spaces and dashes, to acheive struct names in CamelCase,\n\t\/\/ but starting with an upper case letter so that the structs are exported...\n\tnormalisedName := strings.NewReplacer(\" \", \"\", \"-\", \"\").Replace(strings.Title(name))\n\t\/\/ If name already exists, add an integer suffix to name. Start with \"1\" and increment\n\t\/\/ by 1 until an unused name is found. Example: if name FooBar was generated four times\n\t\/\/ , the first instance would be called FooBar, then the next would be FooBar1, the next\n\t\/\/ FooBar2 and the last would be assigned a name of FooBar3. We do this to guarantee we\n\t\/\/ don't use duplicate names for different logical entities.\n\tfor k, baseName := 1, normalisedName; blacklist[normalisedName]; {\n\t\tnormalisedName = fmt.Sprintf(\"%v%v\", baseName, k)\n\t\tk++\n\t}\n\tblacklist[normalisedName] = true\n\treturn normalisedName\n}\n\n\/\/ Returns the indefinite article (in English) for a the given noun, which is\n\/\/ 'an' for nouns beginning with a vowel, otherwise 'a'.\nfunc IndefiniteArticle(noun string) string {\n\tif strings.ContainsRune(\"AEIOUaeiou\", rune(noun[0])) {\n\t\treturn \"an\"\n\t} else {\n\t\treturn \"a\"\n\t}\n}\n<commit_msg>Spelling correction<commit_after>\/\/ Package text contains utility functions for manipulating raw text strings\npackage text\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\n\/\/ Indent indents a block of text with an indent string. It does this by\n\/\/ placing the given indent string at the front of every line, except on the\n\/\/ last line, if the last line has no characters. This special treatment\n\/\/ simplifies the generation of nested text structures.\nfunc Indent(text, indent string) string {\n\tif text == \"\" {\n\t\treturn text\n\t}\n\tif text[len(text)-1:] == \"\\n\" {\n\t\tresult := \"\"\n\t\tfor _, j := range strings.Split(text[:len(text)-1], \"\\n\") {\n\t\t\tresult += indent + j + \"\\n\"\n\t\t}\n\t\treturn result\n\t}\n\tresult := \"\"\n\tfor _, j := range strings.Split(strings.TrimRight(text, \"\\n\"), \"\\n\") {\n\t\tresult += indent + j + \"\\n\"\n\t}\n\treturn result[:len(result)-1]\n}\n\n\/\/ Underline returns the provided text together with a new line character and a\n\/\/ line of \"=\" characters whose length is equal to the maximum line length in\n\/\/ the provided text, followed by a final newline character.\nfunc Underline(text string) string {\n\tvar maxlen int\n\tfor _, j := range strings.Split(text, \"\\n\") {\n\t\tif len(j) > maxlen {\n\t\t\tmaxlen = len(j)\n\t\t}\n\t}\n\treturn text + \"\\n\" + strings.Repeat(\"=\", maxlen) + \"\\n\"\n}\n\n\/\/ Returns a string of the same length, filled with \"*\"s.\nfunc StarOut(text string) string {\n\treturn strings.Repeat(\"*\", len(text))\n}\n\n\/\/ GoTypeNameFrom provides a mechanism to mutate an arbitrary descriptive\n\/\/ string (name) into an exported Go type name that can be used in generated\n\/\/ code, taking into account a blacklist of names that have already been used,\n\/\/ in order to guarantee that a new name is created which will not conflict\n\/\/ with an existing type. The blacklist is updated to include the newly\n\/\/ generated name. Note, the map[string]bool construction is simply a mechanism\n\/\/ to implement set semantics; a value of `true` signifies inclusion in the\n\/\/ set. Non-existence is equivalent to existence with a value of `false`;\n\/\/ therefore it is recommended to only store `true` values.\n\/\/\n\/\/ The mutation is performed by capatilising all words (see\n\/\/ https:\/\/golang.org\/pkg\/strings\/#Title), removing all spaces and hyphens, and\n\/\/ then optionally appending an integer if the generated name conflicts with an\n\/\/ entry in the blacklist. The appended integer will be the lowest integer\n\/\/ possible, >= 1, that results in no blacklist conflict.\nfunc GoTypeNameFrom(name string, blacklist map[string]bool) string {\n\t\/\/ Capitalise words, and remove spaces and dashes, to acheive struct names in CamelCase,\n\t\/\/ but starting with an upper case letter so that the structs are exported...\n\tnormalisedName := strings.NewReplacer(\" \", \"\", \"-\", \"\").Replace(strings.Title(name))\n\t\/\/ If name already exists, add an integer suffix to name. Start with \"1\" and increment\n\t\/\/ by 1 until an unused name is found. Example: if name FooBar was generated four times\n\t\/\/ , the first instance would be called FooBar, then the next would be FooBar1, the next\n\t\/\/ FooBar2 and the last would be assigned a name of FooBar3. We do this to guarantee we\n\t\/\/ don't use duplicate names for different logical entities.\n\tfor k, baseName := 1, normalisedName; blacklist[normalisedName]; {\n\t\tnormalisedName = fmt.Sprintf(\"%v%v\", baseName, k)\n\t\tk++\n\t}\n\tblacklist[normalisedName] = true\n\treturn normalisedName\n}\n\n\/\/ Returns the indefinite article (in English) for a the given noun, which is\n\/\/ 'an' for nouns beginning with a vowel, otherwise 'a'.\nfunc IndefiniteArticle(noun string) string {\n\tif strings.ContainsRune(\"AEIOUaeiou\", rune(noun[0])) {\n\t\treturn \"an\"\n\t} else {\n\t\treturn \"a\"\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/MJKWoolnough\/minewebgen\/internal\/data\"\n)\n\ntype runner struct {\n\ts *data.Server\n\tshutdown chan struct{}\n\t*io.PipeWriter\n}\n\ntype Controller struct {\n\tc *Config\n\n\tmu sync.RWMutex\n\trunning map[int]*runner\n}\n\nfunc (c Controller) Run(id int) error {\n\ts := c.c.Server(id)\n\tif s == nil {\n\t\treturn ErrUnknownServer\n\t}\n\ts.Lock()\n\tdefer s.Unlock()\n\tif s.State != data.StateStopped {\n\t\treturn ErrServerRunning\n\t}\n\tm := c.c.Map(s.Map)\n\tif m == nil {\n\t\treturn ErrUnknownServer\n\t}\n\tm.RLock()\n\tdefer m.RUnlock()\n\tmapPath := m.Path\n\tif !path.IsAbs(mapPath) {\n\t\tpwd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmapPath = path.Join(pwd, mapPath)\n\t}\n\tserverMapPath := path.Join(s.Path, \"world\")\n\tif err := os.Remove(serverMapPath); err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\tif err := os.Symlink(mapPath, serverMapPath); err != nil {\n\t\treturn err\n\t}\n\tsp := make(ServerProperties)\n\tf, err := os.Open(path.Join(s.Path, \"properties.server\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tsp.ReadFrom(f)\n\tf.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tf, err = os.Open(path.Join(m.Path, \"properties.map\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = sp.ReadFrom(f)\n\tf.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsp[\"level-name\"] = \"world\"\n\tf, err = os.Open(path.Join(s.Path, \"server.properties\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tsp.WriteTo(f)\n\tf.Close()\n\ts.State = data.StateStarting\n\tr := &runner{\n\t\ts: s,\n\t\tshutdown: make(chan struct{}, 1),\n\t}\n\tgo c.run(r)\n\treturn nil\n}\n\nfunc (c *Controller) Stop(id int) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tr, ok := c.running[id]\n\tif !ok {\n\t\treturn errors.New(\"server not running\")\n\t}\n\tclose(r.shutdown)\n\tdelete(c.running, id)\n\treturn nil\n}\n\nvar stopCmd = []byte{'\\r', '\\n', 's', 't', 'o', 'p', '\\r', '\\n'}\n\n\/\/ runs in its own goroutine\nfunc (c *Controller) run(r *runner) {\n\tcmd := exec.Command(\"java\", append(r.s.Args, \"-jar\", \"server.jar\", \"nogui\")...)\n\tcmd.Dir = r.s.Path\n\tpr, pw := io.Pipe()\n\tcmd.Stdin = pr\n\tr.PipeWriter = pw\n\terr := cmd.Start()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t} else {\n\t\tc.mu.Lock()\n\t\tc.running[r.s.ID] = r\n\t\tc.mu.Unlock()\n\t\tr.s.Lock()\n\t\tr.s.State = data.StateRunning\n\t\tr.s.Unlock()\n\t\tdied := make(chan struct{})\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-r.shutdown:\n\t\t\t\tr.s.Lock()\n\t\t\t\tr.s.State = data.StateStopping\n\t\t\t\tr.s.Unlock()\n\t\t\t\tt := time.NewTimer(time.Second * 10)\n\t\t\t\tdefer t.Stop()\n\t\t\t\tfor i := 0; i < 6; i++ {\n\t\t\t\t\tpw.Write(stopCmd)\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-died:\n\t\t\t\t\t\treturn\n\t\t\t\t\tcase <-t.C:\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcmd.Process.Kill()\n\t\t\tcase <-died:\n\t\t\t\tc.mu.Lock()\n\t\t\t\tdelete(c.running, r.s.ID)\n\t\t\t\tc.mu.Unlock()\n\t\t\t}\n\t\t}()\n\t\tcmd.Wait()\n\t\tr.shutdown = nil\n\t\tclose(died)\n\t}\n\tr.s.Lock()\n\tr.s.State = data.StateStopped\n\tr.s.Unlock()\n}\n\nfunc (c *Controller) WriteCmd(id int, cmd string) error {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\tr, ok := c.running[id]\n\tif !ok {\n\t\treturn ErrUnknownServer\n\t}\n\ttoWrite := make([]byte, 0, len(cmd)+4)\n\ttoWrite = append(toWrite, '\\r', '\\n')\n\ttoWrite = append(toWrite, cmd...)\n\ttoWrite = append(toWrite, '\\r', '\\n')\n\t_, err := r.Write(toWrite)\n\treturn err\n}\n<commit_msg>Reworked cmd writer<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/MJKWoolnough\/minewebgen\/internal\/data\"\n)\n\ntype runner struct {\n\ts *data.Server\n\tshutdown chan struct{}\n\tio.Writer\n}\n\ntype Controller struct {\n\tc *Config\n\n\tmu sync.RWMutex\n\trunning map[int]*runner\n}\n\nfunc (c Controller) Run(id int) error {\n\ts := c.c.Server(id)\n\tif s == nil {\n\t\treturn ErrUnknownServer\n\t}\n\ts.Lock()\n\tdefer s.Unlock()\n\tif s.State != data.StateStopped {\n\t\treturn ErrServerRunning\n\t}\n\tm := c.c.Map(s.Map)\n\tif m == nil {\n\t\treturn ErrUnknownServer\n\t}\n\tm.RLock()\n\tdefer m.RUnlock()\n\tmapPath := m.Path\n\tif !path.IsAbs(mapPath) {\n\t\tpwd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmapPath = path.Join(pwd, mapPath)\n\t}\n\tserverMapPath := path.Join(s.Path, \"world\")\n\tif err := os.Remove(serverMapPath); err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\tif err := os.Symlink(mapPath, serverMapPath); err != nil {\n\t\treturn err\n\t}\n\tsp := make(ServerProperties)\n\tf, err := os.Open(path.Join(s.Path, \"properties.server\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tsp.ReadFrom(f)\n\tf.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tf, err = os.Open(path.Join(m.Path, \"properties.map\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = sp.ReadFrom(f)\n\tf.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsp[\"level-name\"] = \"world\"\n\tf, err = os.Open(path.Join(s.Path, \"server.properties\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tsp.WriteTo(f)\n\tf.Close()\n\ts.State = data.StateStarting\n\tr := &runner{\n\t\ts: s,\n\t\tshutdown: make(chan struct{}, 1),\n\t}\n\tgo c.run(r)\n\treturn nil\n}\n\nfunc (c *Controller) Stop(id int) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tr, ok := c.running[id]\n\tif !ok {\n\t\treturn errors.New(\"server not running\")\n\t}\n\tclose(r.shutdown)\n\tdelete(c.running, id)\n\treturn nil\n}\n\nvar stopCmd = []byte{'\\r', '\\n', 's', 't', 'o', 'p', '\\r', '\\n'}\n\n\/\/ runs in its own goroutine\nfunc (c *Controller) run(r *runner) {\n\tcmd := exec.Command(\"java\", append(r.s.Args, \"-jar\", \"server.jar\", \"nogui\")...)\n\tcmd.Dir = r.s.Path\n\tr.Writer, _ = cmd.StdinPipe()\n\terr := cmd.Start()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t} else {\n\t\tc.mu.Lock()\n\t\tc.running[r.s.ID] = r\n\t\tc.mu.Unlock()\n\t\tr.s.Lock()\n\t\tr.s.State = data.StateRunning\n\t\tr.s.Unlock()\n\t\tdied := make(chan struct{})\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-r.shutdown:\n\t\t\t\tr.s.Lock()\n\t\t\t\tr.s.State = data.StateStopping\n\t\t\t\tr.s.Unlock()\n\t\t\t\tt := time.NewTimer(time.Second * 10)\n\t\t\t\tdefer t.Stop()\n\t\t\t\tfor i := 0; i < 6; i++ {\n\t\t\t\t\tr.Write(stopCmd)\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-died:\n\t\t\t\t\t\treturn\n\t\t\t\t\tcase <-t.C:\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcmd.Process.Kill()\n\t\t\tcase <-died:\n\t\t\t\tc.mu.Lock()\n\t\t\t\tdelete(c.running, r.s.ID)\n\t\t\t\tc.mu.Unlock()\n\t\t\t}\n\t\t}()\n\t\tcmd.Wait()\n\t\tr.shutdown = nil\n\t\tclose(died)\n\t}\n\tr.s.Lock()\n\tr.s.State = data.StateStopped\n\tr.s.Unlock()\n}\n\nfunc (c *Controller) WriteCmd(id int, cmd string) error {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\tr, ok := c.running[id]\n\tif !ok {\n\t\treturn ErrUnknownServer\n\t}\n\ttoWrite := make([]byte, 0, len(cmd)+4)\n\ttoWrite = append(toWrite, '\\r', '\\n')\n\ttoWrite = append(toWrite, cmd...)\n\ttoWrite = append(toWrite, '\\r', '\\n')\n\t_, err := r.Write(toWrite)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Jari Takkala and Brian Dignan. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"sort\"\n\t\"log\"\n)\n\ntype RarityMap struct {\n\tdata map[int][]int\n}\n\nfunc NewRarityMap() *RarityMap {\n\tr := new(RarityMap)\n\tr.data = make(map[int][]int)\n\treturn r\n}\n\n\/\/ Add a new rarity -> pieceNum mapping. \nfunc (r *RarityMap) put(rarity int, pieceNum int) {\n\tif _, ok := r.data[rarity]; !ok {\n\t\tr.data[rarity] = make([]int)\n\t}\n\n\tr.data[rarity] = append(r.data[rarity], pieceNum)\n}\n\n\/\/ Flatten out the map into a slice that's sorted by rarity. \nfunc (r *RarityMap) getPiecesByRarity() []int {\n\tpieces := make([]int, 0)\n\n\t\/\/ put all rarity map keys into a temporary unsorted slice\n\tkeys := make([]int, 0)\n\tfor rarity, _ := range r.data {\n\t\tkeys = append(keys, rarity)\n\t}\n\n\t\/\/ sort the slice of keys (rarity) in ascending order\n\tSort.Ints(keys)\n\n\t\/\/ Get the map value for each key (starting with the lowest) and \n\t\/\/ concatenate that slice of pieces (for that rarity) to the result\n\tfor _, rarity := range keys {\n\t\tpieceNums := r.data[rarity]\n\t\tpieces = append(pieces, pieceNums...)\n\t}\n\n\treturn pieces\n}\n\ntype Controller struct {\n\tfinishedPieces []bool\n\tpieceHashes []string\n\tactiveRequestsTotals []int \n\tpeerPieceTotals []int\n\tpeers map[string]PeerInfo\n\tchannels ControllerRxChannels \n\tt tomb.Tomb\n}\n\n\/\/ Sent from the controller to the peer to request a particular piece\ntype RequestPiece struct {\n\tindex int\n\texpectedHash string\n}\n\n\/\/ Sent by the peer to the controller when it receives a HAVE message\ntype HavePiece struct {\n\tindex int\n\tpeer string \n\thaveMore bool \/\/ set to 1 when the peer is initially breaking a bitfield into individual HAVE messages\n}\n\n\/\/ Sent from the controller to the peer to cancel an outstanding request\n\/\/ Also sent from the peer to the controller when it's been choked or \n\/\/ when it loses its network connection \ntype CancelPiece struct {\n\tindex int\n\tpeer string \/\/ needed for when the peer sends a cancel to the controller\n}\n\n\/\/ Sent from IO to the controller indicating that a piece has been \n\/\/ received and written to disk\ntype ReceivedPiece struct {\n\tindex int\n\tpeer string\n}\n\ntype ControllerRxChannels struct {\n\treceivedPieceCh <-chan ReceivedPiece \/\/ Other end is IO \n\tnewPeerCh <-chan PeerInfo \/\/ Other end is the PeerManager\n\tcancelPieceCh <-chan CancelPiece \/\/ Other end is Peer. Used when the peer is unable to retrieve a piece\n\thavePieceCh <-chan HavePiece \/\/ Other end is Peer. used When the peer receives a HAVE message\n}\n\nfunc NewController(finishedPieces []bool, \n\t\t\t\t\tpieceHashes []string, \n\t\t\t\t\treceivedPieceCh chan ReceivedPiece) *Controller {\n\tcont := new(Controller)\n\tcont.finishedPieces = finishedPieces\n\tcont.pieceHashes = pieceHashes\n\tcont.receivedPieceCh = receivedPieceCh\n\tcont.newPeerCh = make(chan string)\n\tcont.cancelPieceCh = make(chan CancelPiece)\n\tcont.havePieceCh = make(chan HavePiece)\n\tcont.peers = make(map[string]PeerInfo)\n\tcont.activeRequestsTotals = make([]int, len(finishedPieces))\n\treturn cont\n}\n\nfunc (cont *Controller) Stop() error {\n\tlog.Println(\"Controller : Stop : Stopping\")\n\tcont.t.Kill(nil)\n\treturn cont.t.Wait()\n}\n\nfunc (cont *Controller) createRaritySlice() []int {\n\trarityMap := NewRarityMap()\n\n\tfor pieceNum, total := range cont.peerPieceTotals {\n\t\tif cont.finishedPieces[pieceNum] {\n\t\t\tcontinue\n\t\t} \n\n\t\trarityMap.put(total, pieceNum)\n\t}\n\treturn rarityMap.getPiecesByRarity()\n}\n\nfunc (cont *Controller) updateQuantityNeededForAllPeers() {\n\tfor \n}\n\nfunc (cont *Controller) recreateDownloadPriorities(raritySlice []int) {\n\tfor _, peerInfo := range cont.peers {\n\t\tdownloadPriority := make([]int, 0)\n\t\tfor _, pieceNum := range raritySlice {\n\t\t\tif peerInfo.availablePieces[pieceNum] == 1 {\n\t\t\t\tdownloadPriority = append(downloadPriority, pieceNum)\n\t\t\t}\n\t\t}\n\t\tpeerInfo.downloadPriority = downloadPriority\n\t}\n}\n\nfunc (cont *Controller) sendRequests(peersSortedByDownloadLen []string) {\n\tfor _, peerId := range peersSortedByDownloadLen {\n\t\tpeer := cont.peers[peerId]\n\n\t\t\/\/ Confirm that this peer is still connected and is available to take requests\n\t\tif peer.isActive {\n\n\t\t\/\/ Need to keep track of which pieces were already requested to be downloaded by\n\t\t\/\/ this peer\n\n\t\t\/\/ Need to loop through pieces that haven't been asked of anyone else first, \n\t\t\/\/ then loop through pieces that have been asked of 1 person, etc. \n\n\t\t\/\/ While the number if active requests is less than the max simultaneous for a single peer,\n\t\t\/\/ tell the peer to send more requests\n\t\tfor peer.activeRequests < maxSimultaneousDownloadsPerPeer\n\t\t\t\/\/ Track the number of pieces that are requested in this iteration of the loop. If none are\n\t\t\t\/\/ requestd,\n\t\t\tpiecesRequestCount := 0\n\n\t\t}\n\t}\n}\n\nconst (\n\tmaxSimultaneousDownloadsPerPeer = 3\n)\n\nfunc (cont *Controller) Run() {\n\tlog.Println(\"Controller : Run : Started\")\n\tdefer cont.t.Done()\n\tdefer log.Println(\"Controller : Run : Completed\")\n\n\tfor {\n\t\tselect {\n\t\tcase piece := <- cont.receivedPieceCh:\n\t\t\t\/\/ Update our bitfield to show that we now have that piece\n\t\t\tcont.finishedPieces[piece.index] = true\n\n\t\t\t\/\/ Create a slice of pieces sorted by rarity\n\t\t\traritySlice := cont.createRaritySlice()\n\n\t\t\t\/\/ Given the updated finishedPieces slice, update the quantity of pieces\n\t\t\t\/\/ that are needed from each peer. This step is required to later sort \n\t\t\t\/\/ peerInfo slices by the quantity of needed pieces. \n\t\t\tcont.updateQuantityNeededForAllPeers()\n\n\t\t\t\/\/ Create a PeerId slice sorted by qtyPiecesNeeded\n\t\t\tsortedPeers := sortedPeerIds(cont.peers)\n\n\t\t\t\/\/ Iterate through the peerIds sorted by qtyPiecesNeeded,\n\t\t\t\/\/ for each Peer that isn't currently requesting the max amount \n\t\t\t\/\/ of pieces, send more piece requests. \n\t\t\tcont.sendRequests(sortedPeers)\n\n\n\t\tcase piece := <- cont.cancelPieceCh:\n\n\t\tcase peerInfo := <- cont.newPeerCh:\n\t\t\t\/\/ Throw an error if the peer is duplicate (same IP\/Port. should never happen)\n\n\t\t\t\/\/ Create a new PeerInfo struct \n\n\t\t\t\/\/ Add PeerInfo to the peers map using IP:Port as the key\n\n\t\t\t\/\/ \n\n\t\tcase piece := <- cont.havePieceCh:\n\n\t\tcase <- cont.t.Dying():\n\t\t\treturn\n\t\t}\n\t}\n\n}<commit_msg>Wrote updateQuantityNeededForAllPeers method in controller file<commit_after>\/\/ Copyright 2013 Jari Takkala and Brian Dignan. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"sort\"\n\t\"log\"\n)\n\ntype RarityMap struct {\n\tdata map[int][]int\n}\n\nfunc NewRarityMap() *RarityMap {\n\tr := new(RarityMap)\n\tr.data = make(map[int][]int)\n\treturn r\n}\n\n\/\/ Add a new rarity -> pieceNum mapping. \nfunc (r *RarityMap) put(rarity int, pieceNum int) {\n\tif _, ok := r.data[rarity]; !ok {\n\t\tr.data[rarity] = make([]int)\n\t}\n\n\tr.data[rarity] = append(r.data[rarity], pieceNum)\n}\n\n\/\/ Flatten out the map into a slice that's sorted by rarity. \nfunc (r *RarityMap) getPiecesByRarity() []int {\n\tpieces := make([]int, 0)\n\n\t\/\/ put all rarity map keys into a temporary unsorted slice\n\tkeys := make([]int, 0)\n\tfor rarity, _ := range r.data {\n\t\tkeys = append(keys, rarity)\n\t}\n\n\t\/\/ sort the slice of keys (rarity) in ascending order\n\tSort.Ints(keys)\n\n\t\/\/ Get the map value for each key (starting with the lowest) and \n\t\/\/ concatenate that slice of pieces (for that rarity) to the result\n\tfor _, rarity := range keys {\n\t\tpieceNums := r.data[rarity]\n\t\tpieces = append(pieces, pieceNums...)\n\t}\n\n\treturn pieces\n}\n\ntype Controller struct {\n\tfinishedPieces []bool\n\tpieceHashes []string\n\tactiveRequestsTotals []int \n\tpeerPieceTotals []int\n\tpeers map[string]PeerInfo\n\tchannels ControllerRxChannels \n\tt tomb.Tomb\n}\n\n\/\/ Sent from the controller to the peer to request a particular piece\ntype RequestPiece struct {\n\tindex int\n\texpectedHash string\n}\n\n\/\/ Sent by the peer to the controller when it receives a HAVE message\ntype HavePiece struct {\n\tindex int\n\tpeer string \n\thaveMore bool \/\/ set to 1 when the peer is initially breaking a bitfield into individual HAVE messages\n}\n\n\/\/ Sent from the controller to the peer to cancel an outstanding request\n\/\/ Also sent from the peer to the controller when it's been choked or \n\/\/ when it loses its network connection \ntype CancelPiece struct {\n\tindex int\n\tpeer string \/\/ needed for when the peer sends a cancel to the controller\n}\n\n\/\/ Sent from IO to the controller indicating that a piece has been \n\/\/ received and written to disk\ntype ReceivedPiece struct {\n\tindex int\n\tpeer string\n}\n\ntype ControllerRxChannels struct {\n\treceivedPieceCh <-chan ReceivedPiece \/\/ Other end is IO \n\tnewPeerCh <-chan PeerInfo \/\/ Other end is the PeerManager\n\tcancelPieceCh <-chan CancelPiece \/\/ Other end is Peer. Used when the peer is unable to retrieve a piece\n\thavePieceCh <-chan HavePiece \/\/ Other end is Peer. used When the peer receives a HAVE message\n}\n\nfunc NewController(finishedPieces []bool, \n\t\t\t\t\tpieceHashes []string, \n\t\t\t\t\treceivedPieceCh chan ReceivedPiece) *Controller {\n\tcont := new(Controller)\n\tcont.finishedPieces = finishedPieces\n\tcont.pieceHashes = pieceHashes\n\tcont.receivedPieceCh = receivedPieceCh\n\tcont.newPeerCh = make(chan string)\n\tcont.cancelPieceCh = make(chan CancelPiece)\n\tcont.havePieceCh = make(chan HavePiece)\n\tcont.peers = make(map[string]PeerInfo)\n\tcont.activeRequestsTotals = make([]int, len(finishedPieces))\n\treturn cont\n}\n\nfunc (cont *Controller) Stop() error {\n\tlog.Println(\"Controller : Stop : Stopping\")\n\tcont.t.Kill(nil)\n\treturn cont.t.Wait()\n}\n\nfunc (cont *Controller) createRaritySlice() []int {\n\trarityMap := NewRarityMap()\n\n\tfor pieceNum, total := range cont.peerPieceTotals {\n\t\tif cont.finishedPieces[pieceNum] {\n\t\t\tcontinue\n\t\t} \n\n\t\trarityMap.put(total, pieceNum)\n\t}\n\treturn rarityMap.getPiecesByRarity()\n}\n\nfunc (cont *Controller) updateQuantityNeededForAllPeers() {\n\tfor _, peerInfo := range cont.peers {\n\t\tqtyPiecesNeeded := 0\n\t\tfor pieceNum, pieceFinished := range cont.finishedPieces {\n\t\t\tif !pieceFinished && peerInfo.availablePieces[pieceNum] {\n\t\t\t\tqtyPiecesNeeded++\n\t\t\t}\n\t\t}\n\t\t\/\/ Overwrite the old value with the new value just computed\n\t\tpeerInfo.qtyPiecesNeeded = qtyPiecesNeeded\n\t}\n}\n\nfunc (cont *Controller) recreateDownloadPriorities(raritySlice []int) {\n\tfor _, peerInfo := range cont.peers {\n\t\tdownloadPriority := make([]int, 0)\n\t\tfor _, pieceNum := range raritySlice {\n\t\t\tif peerInfo.availablePieces[pieceNum] == 1 {\n\t\t\t\tdownloadPriority = append(downloadPriority, pieceNum)\n\t\t\t}\n\t\t}\n\t\tpeerInfo.downloadPriority = downloadPriority\n\t}\n}\n\nfunc (cont *Controller) sendRequests(peersSortedByDownloadLen []string) {\n\tfor _, peerId := range peersSortedByDownloadLen {\n\t\tpeer := cont.peers[peerId]\n\n\t\t\/\/ Confirm that this peer is still connected and is available to take requests\n\t\tif peer.isActive {\n\n\t\t\/\/ Need to keep track of which pieces were already requested to be downloaded by\n\t\t\/\/ this peer\n\n\t\t\/\/ Need to loop through pieces that haven't been asked of anyone else first, \n\t\t\/\/ then loop through pieces that have been asked of 1 person, etc. \n\n\t\t\/\/ While the number if active requests is less than the max simultaneous for a single peer,\n\t\t\/\/ tell the peer to send more requests\n\t\tfor peer.activeRequests < maxSimultaneousDownloadsPerPeer\n\t\t\t\/\/ Track the number of pieces that are requested in this iteration of the loop. If none are\n\t\t\t\/\/ requestd,\n\t\t\tpiecesRequestCount := 0\n\n\t\t}\n\t}\n}\n\nconst (\n\tmaxSimultaneousDownloadsPerPeer = 3\n)\n\nfunc (cont *Controller) Run() {\n\tlog.Println(\"Controller : Run : Started\")\n\tdefer cont.t.Done()\n\tdefer log.Println(\"Controller : Run : Completed\")\n\n\tfor {\n\t\tselect {\n\t\tcase piece := <- cont.receivedPieceCh:\n\t\t\t\/\/ Update our bitfield to show that we now have that piece\n\t\t\tcont.finishedPieces[piece.index] = true\n\n\t\t\t\/\/ Create a slice of pieces sorted by rarity\n\t\t\traritySlice := cont.createRaritySlice()\n\n\t\t\t\/\/ Given the updated finishedPieces slice, update the quantity of pieces\n\t\t\t\/\/ that are needed from each peer. This step is required to later sort \n\t\t\t\/\/ peerInfo slices by the quantity of needed pieces. \n\t\t\tcont.updateQuantityNeededForAllPeers()\n\n\t\t\t\/\/ Create a PeerId slice sorted by qtyPiecesNeeded\n\t\t\tsortedPeers := sortedPeerIds(cont.peers)\n\n\t\t\t\/\/ Iterate through the peerIds sorted by qtyPiecesNeeded,\n\t\t\t\/\/ for each Peer that isn't currently requesting the max amount \n\t\t\t\/\/ of pieces, send more piece requests. \n\t\t\tcont.sendRequests(sortedPeers)\n\n\n\t\tcase piece := <- cont.cancelPieceCh:\n\n\t\tcase peerInfo := <- cont.newPeerCh:\n\t\t\t\/\/ Throw an error if the peer is duplicate (same IP\/Port. should never happen)\n\n\t\t\t\/\/ Create a new PeerInfo struct \n\n\t\t\t\/\/ Add PeerInfo to the peers map using IP:Port as the key\n\n\t\t\t\/\/ \n\n\t\tcase piece := <- cont.havePieceCh:\n\n\t\tcase <- cont.t.Dying():\n\t\t\treturn\n\t\t}\n\t}\n\n}<|endoftext|>"} {"text":"<commit_before>package text\n\nimport (\n\t\"strings\"\n\t\"unicode\"\n)\n\nconst (\n\tleftbrackets = \"{[(<\"\n\trightbrackets = \"}])>\"\n\tquotes = \"'`\\\"\"\n)\n\ntype line struct {\n\ts []rune \/\/ TODO make this string\n\tpx []int \/\/ x-coord of the rightmost pixels of each rune in s\n\tdirty bool \/\/ true if the line needs to be redrawn (px needs to be repopulated)\n}\n\ntype Address struct {\n\tRow, Col int\n}\n\nfunc (a1 Address) lessThan(a2 Address) bool {\n\treturn a1.Row < a2.Row || (a1.Row == a2.Row && a1.Col < a2.Col)\n}\n\nfunc (b *Buffer) nextAddress(a Address) Address {\n\tif a.Col < len(b.lines[a.Row].s) {\n\t\ta.Col++\n\t} else if a.Row < len(b.lines)-1 {\n\t\ta.Col = 0\n\t\ta.Row++\n\t}\n\treturn a\n}\n\nfunc (b *Buffer) prevAddress(a Address) Address {\n\tif a.Col > 0 {\n\t\ta.Col--\n\t} else if a.Row > 0 {\n\t\ta.Row--\n\t\ta.Col = len(b.lines[a.Row].s)\n\t}\n\treturn a\n}\n\ntype Selection struct {\n\tHead, Tail Address \/\/ the beginning and end points of the selection\n}\n\n\/\/ load replaces the current selection with s.\nfunc (b *Buffer) load(s string, recordAction bool) {\n\tb.deleteSel(true)\n\tinput := strings.Split(s, \"\\n\")\n\tif len(input) == 1 {\n\t\tb.load1(s)\n\t} else {\n\t\trow, col := b.dot.Head.Row, b.dot.Head.Col\n\n\t\t\/\/ unchanged lines\n\t\tlPreceding := b.lines[:row]\n\t\tlFollowing := b.lines[row+1:]\n\n\t\tlNew := make([]*line, len(input))\n\n\t\t\/\/ the beginning and end of the current line are attached to the first and last of the\n\t\t\/\/ lines that are being loaded\n\t\tlNew[0] = &line{s: []rune(string(b.lines[row].s[:col]) + input[0])}\n\t\tlNew[0].px = b.font.getPx(b.margin.X, string(lNew[0].s))\n\t\tlast := len(lNew) - 1\n\t\tlNew[last] = &line{s: []rune(input[len(input)-1] + string(b.lines[row].s[col:]))}\n\t\tlNew[last].px = b.font.getPx(b.margin.X, string(lNew[last].s))\n\n\t\t\/\/ entirely new lines\n\t\tfor i := 1; i < len(lNew)-1; i++ {\n\t\t\tlNew[i] = &line{s: []rune(input[i])}\n\t\t}\n\n\t\t\/\/ put everything together\n\t\tb.lines = append(lPreceding, append(lNew, lFollowing...)...)\n\n\t\t\/\/ fix selection; b.dot.Head is already fine\n\t\tb.dot.Tail.Row = row + len(lNew) - 1\n\t\tb.dot.Tail.Col = len(input[len(input)-1])\n\t\tb.dirtyLines(row, len(b.lines))\n\t}\n\tif recordAction {\n\t\tb.initCurrentAction()\n\t\tb.currentAction.insertionBounds = b.dot\n\t\tb.currentAction.insertionText = b.contents(b.dot)\n\t}\n}\n\n\/\/ load1 inserts a string with no line breaks at b.dot, assuming an empty selection.\nfunc (b *Buffer) load1(s string) {\n\trow, col := b.dot.Head.Row, b.dot.Head.Col\n\tbefore := string(b.lines[row].s[:col])\n\tafter := string(b.lines[row].s[col:])\n\tb.lines[row].s = []rune(before + s + after)\n\tb.lines[row].px = b.font.getPx(b.margin.X, string(b.lines[row].s))\n\tb.dot.Tail.Col += len([]rune(s))\n\tb.dirtyLine(row)\n}\n\nfunc (b *Buffer) contents(sel Selection) string {\n\ta1, a2 := sel.Head, sel.Tail\n\tif a1.Row == a2.Row {\n\t\treturn string(b.lines[a1.Row].s[a1.Col:a2.Col])\n\t} else {\n\t\tsel := string(b.lines[a1.Row].s[a1.Col:]) + \"\\n\"\n\t\tfor i := a1.Row + 1; i < a2.Row; i++ {\n\t\t\tsel += string(b.lines[i].s) + \"\\n\"\n\t\t}\n\t\tsel += string(b.lines[a2.Row].s[:a2.Col])\n\t\treturn sel\n\t}\n}\n\nfunc (b *Buffer) deleteSel(recordAction bool) {\n\tif b.dot.Head == b.dot.Tail {\n\t\treturn\n\t}\n\n\tif recordAction {\n\t\tb.initCurrentAction()\n\t\tb.currentAction.deletionBounds = b.dot\n\t\tb.currentAction.deletionText = b.contents(b.dot)\n\t}\n\n\tcol1, row1, col2, row2 := b.dot.Head.Col, b.dot.Head.Row, b.dot.Tail.Col, b.dot.Tail.Row\n\tline := b.lines[row1].s[:col1]\n\tb.lines[row1].s = append(line, b.lines[row2].s[col2:]...)\n\tb.dirtyLine(row1)\n\tif row2 > row1 {\n\t\tb.lines = append(b.lines[:row1+1], b.lines[row2+1:]...)\n\t\tb.dirtyLines(row1+1, len(b.lines))\n\n\t\t\/\/ make sure we clean up the garbage left after the (new) final line\n\t\tb.clear = b.img.Bounds()\n\t\tb.clear.Min.Y = b.font.height * (len(b.lines) - 1)\n\t}\n\tb.dot.Tail = b.dot.Head\n}\n\nfunc isAlnum(c rune) bool {\n\treturn unicode.IsLetter(c) || unicode.IsNumber(c)\n}\n\n\/\/ expandSel selects some text around a. Based on acme's double click selection rules.\nfunc (b *Buffer) expandSel(a Address) {\n\tb.dot.Head, b.dot.Tail = a, a\n\tline := b.lines[a.Row].s\n\n\t\/\/ select bracketed text\n\tif b.selDelimited(leftbrackets, rightbrackets) {\n\t\tb.dirtyLines(b.dot.Head.Row, b.dot.Tail.Row+1)\n\t\treturn\n\t}\n\n\t\/\/ select line\n\tif a.Col == len(line) || a.Col == 0 {\n\t\tb.dot.Head.Col = 0\n\t\tif a.Row+1 < len(b.lines) {\n\t\t\tb.dot.Tail.Row++\n\t\t\tb.dot.Tail.Col = 0\n\t\t} else {\n\t\t\tb.dot.Tail.Col = len(line)\n\t\t}\n\t\treturn\n\t}\n\n\t\/\/ select quoted text\n\tif b.selDelimited(quotes, quotes) {\n\t\tb.dirtyLines(b.dot.Head.Row, b.dot.Tail.Row+1)\n\t\treturn\n\t}\n\n\t\/\/ Select a word. If we're on a non-alphanumeric, attempt to select a word to\n\t\/\/ the left of the click; otherwise expand across alphanumerics in both directions.\n\tfor col := a.Col; col > 0 && isAlnum(line[col-1]); col-- {\n\t\tb.dot.Head.Col--\n\t}\n\tif isAlnum(line[a.Col]) {\n\t\tfor col := a.Col; col < len(line) && isAlnum(line[col]); col++ {\n\t\t\tb.dot.Tail.Col++\n\t\t}\n\t}\n}\n\n\/\/ returns true if a selection was attempted, successfully or not\nfunc (b *Buffer) selDelimited(delims1, delims2 string) bool {\n\tleft, right := b.dot.Head, b.dot.Tail\n\tline := b.lines[left.Row].s\n\tvar delim int\n\tvar next func() Address\n\n\t\/\/ First see if we can scan to the right.\n\tif left.Col > 0 {\n\t\tif delim = strings.IndexRune(delims1, line[left.Col-1]); delim != -1 {\n\t\t\t\/\/ scan from left delimiter\n\t\t\tnext = func() Address {\n\t\t\t\tif right.Col+1 > len(line) {\n\t\t\t\t\tright.Row++\n\t\t\t\t\tif right.Row < len(b.lines) {\n\t\t\t\t\t\tline = b.lines[right.Row].s\n\t\t\t\t\t}\n\t\t\t\t\tright.Col = 0\n\t\t\t\t} else {\n\t\t\t\t\tright.Col++\n\t\t\t\t}\n\t\t\t\treturn right\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Otherwise, see if we can scan to the left.\n\tvar leftwards bool\n\tif next == nil && left.Col < len(line) {\n\t\tif delim = strings.IndexRune(delims2, line[left.Col]); delim != -1 {\n\t\t\t\/\/ scan from right delimiter\n\t\t\tleftwards = true\n\t\t\t\/\/ swap delimiters so that delim1 refers to the first one we encountered\n\t\t\tdelims1, delims2 = delims2, delims1\n\t\t\tnext = func() Address {\n\t\t\t\tif left.Col-1 < 0 {\n\t\t\t\t\tleft.Row--\n\t\t\t\t\tif left.Row >= 0 {\n\t\t\t\t\t\tleft.Col = len(b.lines[left.Row].s)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tleft.Col--\n\t\t\t\t}\n\t\t\t\treturn left\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Either we're not on a delimiter or there's nowhere to scan. Bail.\n\tif next == nil {\n\t\treturn false\n\t}\n\n\t\/\/ We're on a valid delimiter and have a next function. Scan for the matching delimiter.\n\tstack := 0\n\tfor {\n\t\tp := next()\n\t\tif p.Row < 0 || p.Row >= len(b.lines) {\n\t\t\treturn true\n\t\t} else if p.Col >= len(b.lines[p.Row].s) {\n\t\t\tcontinue\n\t\t}\n\t\tc := b.lines[p.Row].s[p.Col]\n\t\tif c == rune(delims2[delim]) && stack == 0 {\n\t\t\tb.dot.Head, b.dot.Tail = left, right\n\t\t\tif leftwards {\n\t\t\t\tb.dot.Head.Col++\n\t\t\t}\n\t\t\treturn true\n\t\t} else if c == 0 {\n\t\t\treturn true\n\t\t}\n\t\tif delims1 != delims2 && c == rune(delims1[delim]) {\n\t\t\tstack++\n\t\t}\n\t\tif delims1 != delims2 && c == rune(delims2[delim]) {\n\t\t\tstack--\n\t\t}\n\t}\n}\n<commit_msg>text: simplify selDelimited<commit_after>package text\n\nimport (\n\t\"strings\"\n\t\"unicode\"\n)\n\nconst (\n\tleftbrackets = \"{[(<\"\n\trightbrackets = \"}])>\"\n\tquotes = \"'`\\\"\"\n)\n\ntype line struct {\n\ts []rune \/\/ TODO make this string\n\tpx []int \/\/ x-coord of the rightmost pixels of each rune in s\n\tdirty bool \/\/ true if the line needs to be redrawn (px needs to be repopulated)\n}\n\ntype Address struct {\n\tRow, Col int\n}\n\nfunc (a1 Address) lessThan(a2 Address) bool {\n\treturn a1.Row < a2.Row || (a1.Row == a2.Row && a1.Col < a2.Col)\n}\n\nfunc (b *Buffer) nextAddress(a Address) Address {\n\tif a.Col < len(b.lines[a.Row].s) {\n\t\ta.Col++\n\t} else if a.Row < len(b.lines)-1 {\n\t\ta.Col = 0\n\t\ta.Row++\n\t}\n\treturn a\n}\n\nfunc (b *Buffer) prevAddress(a Address) Address {\n\tif a.Col > 0 {\n\t\ta.Col--\n\t} else if a.Row > 0 {\n\t\ta.Row--\n\t\ta.Col = len(b.lines[a.Row].s)\n\t}\n\treturn a\n}\n\ntype Selection struct {\n\tHead, Tail Address \/\/ the beginning and end points of the selection\n}\n\n\/\/ load replaces the current selection with s.\nfunc (b *Buffer) load(s string, recordAction bool) {\n\tb.deleteSel(true)\n\tinput := strings.Split(s, \"\\n\")\n\tif len(input) == 1 {\n\t\tb.load1(s)\n\t} else {\n\t\trow, col := b.dot.Head.Row, b.dot.Head.Col\n\n\t\t\/\/ unchanged lines\n\t\tlPreceding := b.lines[:row]\n\t\tlFollowing := b.lines[row+1:]\n\n\t\tlNew := make([]*line, len(input))\n\n\t\t\/\/ the beginning and end of the current line are attached to the first and last of the\n\t\t\/\/ lines that are being loaded\n\t\tlNew[0] = &line{s: []rune(string(b.lines[row].s[:col]) + input[0])}\n\t\tlNew[0].px = b.font.getPx(b.margin.X, string(lNew[0].s))\n\t\tlast := len(lNew) - 1\n\t\tlNew[last] = &line{s: []rune(input[len(input)-1] + string(b.lines[row].s[col:]))}\n\t\tlNew[last].px = b.font.getPx(b.margin.X, string(lNew[last].s))\n\n\t\t\/\/ entirely new lines\n\t\tfor i := 1; i < len(lNew)-1; i++ {\n\t\t\tlNew[i] = &line{s: []rune(input[i])}\n\t\t}\n\n\t\t\/\/ put everything together\n\t\tb.lines = append(lPreceding, append(lNew, lFollowing...)...)\n\n\t\t\/\/ fix selection; b.dot.Head is already fine\n\t\tb.dot.Tail.Row = row + len(lNew) - 1\n\t\tb.dot.Tail.Col = len(input[len(input)-1])\n\t\tb.dirtyLines(row, len(b.lines))\n\t}\n\tif recordAction {\n\t\tb.initCurrentAction()\n\t\tb.currentAction.insertionBounds = b.dot\n\t\tb.currentAction.insertionText = b.contents(b.dot)\n\t}\n}\n\n\/\/ load1 inserts a string with no line breaks at b.dot, assuming an empty selection.\nfunc (b *Buffer) load1(s string) {\n\trow, col := b.dot.Head.Row, b.dot.Head.Col\n\tbefore := string(b.lines[row].s[:col])\n\tafter := string(b.lines[row].s[col:])\n\tb.lines[row].s = []rune(before + s + after)\n\tb.lines[row].px = b.font.getPx(b.margin.X, string(b.lines[row].s))\n\tb.dot.Tail.Col += len([]rune(s))\n\tb.dirtyLine(row)\n}\n\nfunc (b *Buffer) contents(sel Selection) string {\n\ta1, a2 := sel.Head, sel.Tail\n\tif a1.Row == a2.Row {\n\t\treturn string(b.lines[a1.Row].s[a1.Col:a2.Col])\n\t} else {\n\t\tsel := string(b.lines[a1.Row].s[a1.Col:]) + \"\\n\"\n\t\tfor i := a1.Row + 1; i < a2.Row; i++ {\n\t\t\tsel += string(b.lines[i].s) + \"\\n\"\n\t\t}\n\t\tsel += string(b.lines[a2.Row].s[:a2.Col])\n\t\treturn sel\n\t}\n}\n\nfunc (b *Buffer) deleteSel(recordAction bool) {\n\tif b.dot.Head == b.dot.Tail {\n\t\treturn\n\t}\n\n\tif recordAction {\n\t\tb.initCurrentAction()\n\t\tb.currentAction.deletionBounds = b.dot\n\t\tb.currentAction.deletionText = b.contents(b.dot)\n\t}\n\n\tcol1, row1, col2, row2 := b.dot.Head.Col, b.dot.Head.Row, b.dot.Tail.Col, b.dot.Tail.Row\n\tline := b.lines[row1].s[:col1]\n\tb.lines[row1].s = append(line, b.lines[row2].s[col2:]...)\n\tb.dirtyLine(row1)\n\tif row2 > row1 {\n\t\tb.lines = append(b.lines[:row1+1], b.lines[row2+1:]...)\n\t\tb.dirtyLines(row1+1, len(b.lines))\n\n\t\t\/\/ make sure we clean up the garbage left after the (new) final line\n\t\tb.clear = b.img.Bounds()\n\t\tb.clear.Min.Y = b.font.height * (len(b.lines) - 1)\n\t}\n\tb.dot.Tail = b.dot.Head\n}\n\nfunc isAlnum(c rune) bool {\n\treturn unicode.IsLetter(c) || unicode.IsNumber(c)\n}\n\n\/\/ expandSel selects some text around a. Based on acme's double click selection rules.\nfunc (b *Buffer) expandSel(a Address) {\n\tb.dot.Head, b.dot.Tail = a, a\n\tline := b.lines[a.Row].s\n\n\t\/\/ select bracketed text\n\tif b.selDelimited(leftbrackets, rightbrackets) {\n\t\tb.dirtyLines(b.dot.Head.Row, b.dot.Tail.Row+1)\n\t\treturn\n\t}\n\n\t\/\/ select line\n\tif a.Col == len(line) || a.Col == 0 {\n\t\tb.dot.Head.Col = 0\n\t\tif a.Row+1 < len(b.lines) {\n\t\t\tb.dot.Tail.Row++\n\t\t\tb.dot.Tail.Col = 0\n\t\t} else {\n\t\t\tb.dot.Tail.Col = len(line)\n\t\t}\n\t\treturn\n\t}\n\n\t\/\/ select quoted text\n\tif b.selDelimited(quotes, quotes) {\n\t\tb.dirtyLines(b.dot.Head.Row, b.dot.Tail.Row+1)\n\t\treturn\n\t}\n\n\t\/\/ Select a word. If we're on a non-alphanumeric, attempt to select a word to\n\t\/\/ the left of the click; otherwise expand across alphanumerics in both directions.\n\tfor col := a.Col; col > 0 && isAlnum(line[col-1]); col-- {\n\t\tb.dot.Head.Col--\n\t}\n\tif isAlnum(line[a.Col]) {\n\t\tfor col := a.Col; col < len(line) && isAlnum(line[col]); col++ {\n\t\t\tb.dot.Tail.Col++\n\t\t}\n\t}\n}\n\n\/\/ returns true if a selection was attempted, successfully or not\nfunc (b *Buffer) selDelimited(delims1, delims2 string) bool {\n\taddr := b.dot.Head\n\tvar delim int\n\tvar line = b.lines[addr.Row].s\n\tvar next func(Address) Address\n\tvar rightwards bool\n\tif addr.Col > 0 {\n\t\tif delim = strings.IndexRune(delims1, line[addr.Col-1]); delim != -1 {\n\t\t\t\/\/ scan to the right, from a left delimiter\n\t\t\tnext = b.nextAddress\n\t\t\trightwards = true\n\n\t\t\t\/\/ the user double-clicked to the right of a left delimiter; move addr\n\t\t\t\/\/ to the delimiter itself\n\t\t\taddr.Col--\n\t\t}\n\t}\n\tif next == nil && addr.Col < len(line) {\n\t\tif delim = strings.IndexRune(delims2, line[addr.Col]); delim != -1 {\n\t\t\t\/\/ scan to the left, from a right delimiter\n\t\t\t\/\/ swap delimiters so that delim1 refers to the first one we encountered\n\t\t\tdelims1, delims2 = delims2, delims1\n\t\t\tnext = b.prevAddress\n\t\t}\n\t}\n\tif next == nil {\n\t\treturn false\n\t}\n\n\tstack := 0\n\tmatch := addr\n\tvar prev Address\n\tfor {\n\t\tprev = match\n\t\tmatch = next(match)\n\t\tif match == prev {\n\t\t\treturn true \/\/ we're at the end or beginning of the file; give up\n\t\t}\n\t\tline := b.lines[match.Row].s\n\t\tif match.Col > len(line)-1 {\n\t\t\tcontinue\n\t\t}\n\t\tc := line[match.Col]\n\t\tif c == rune(delims2[delim]) && stack == 0 {\n\t\t\tif rightwards {\n\t\t\t\tb.dot.Head, b.dot.Tail = addr, match\n\t\t\t} else {\n\t\t\t\tb.dot.Head, b.dot.Tail = match, addr\n\t\t\t}\n\t\t\tb.dot.Head.Col++ \/\/ move the head of the selection past the left delimiter\n\t\t\treturn true\n\t\t} else if c == 0 {\n\t\t\treturn true\n\t\t}\n\t\tif delims1 != delims2 && c == rune(delims1[delim]) {\n\t\t\tstack++\n\t\t}\n\t\tif delims1 != delims2 && c == rune(delims2[delim]) {\n\t\t\tstack--\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package dircache provides a simple cache for caching directory to path lookups\npackage dircache\n\n\/\/ _methods are called without the lock\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ DirCache caches paths to directory IDs and vice versa\ntype DirCache struct {\n\tmu sync.RWMutex\n\tcache map[string]string\n\tinvCache map[string]string\n\tfs DirCacher \/\/ Interface to find and make stuff\n\ttrueRootID string \/\/ ID of the absolute root\n\troot string \/\/ the path we are working on\n\trootID string \/\/ ID of the root directory\n\trootParentID string \/\/ ID of the root's parent directory\n\tfoundRoot bool \/\/ Whether we have found the root or not\n}\n\n\/\/ DirCacher describes an interface for doing the low level directory work\ntype DirCacher interface {\n\tFindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error)\n\tCreateDir(pathID, leaf string) (newID string, err error)\n}\n\n\/\/ New makes a DirCache\n\/\/\n\/\/ The cache is safe for concurrent use\nfunc New(root string, trueRootID string, fs DirCacher) *DirCache {\n\td := &DirCache{\n\t\ttrueRootID: trueRootID,\n\t\troot: root,\n\t\tfs: fs,\n\t}\n\td.Flush()\n\td.ResetRoot()\n\treturn d\n}\n\n\/\/ _get an ID given a path - without lock\nfunc (dc *DirCache) _get(path string) (id string, ok bool) {\n\tid, ok = dc.cache[path]\n\treturn\n}\n\n\/\/ Get an ID given a path\nfunc (dc *DirCache) Get(path string) (id string, ok bool) {\n\tdc.mu.RLock()\n\tid, ok = dc._get(path)\n\tdc.mu.RUnlock()\n\treturn\n}\n\n\/\/ GetInv gets a path given an ID\nfunc (dc *DirCache) GetInv(path string) (id string, ok bool) {\n\tdc.mu.RLock()\n\tid, ok = dc.invCache[path]\n\tdc.mu.RUnlock()\n\treturn\n}\n\n\/\/ _put a path, id into the map without lock\nfunc (dc *DirCache) _put(path, id string) {\n\tdc.cache[path] = id\n\tdc.invCache[id] = path\n}\n\n\/\/ Put a path, id into the map\nfunc (dc *DirCache) Put(path, id string) {\n\tdc.mu.Lock()\n\tdc._put(path, id)\n\tdc.mu.Unlock()\n}\n\n\/\/ _flush the map of all data without lock\nfunc (dc *DirCache) _flush() {\n\tdc.cache = make(map[string]string)\n\tdc.invCache = make(map[string]string)\n}\n\n\/\/ Flush the map of all data\nfunc (dc *DirCache) Flush() {\n\tdc.mu.Lock()\n\tdc._flush()\n\tdc.mu.Unlock()\n}\n\n\/\/ SplitPath splits a path into directory, leaf\n\/\/\n\/\/ Path shouldn't start or end with a \/\n\/\/\n\/\/ If there are no slashes then directory will be \"\" and leaf = path\nfunc SplitPath(path string) (directory, leaf string) {\n\tlastSlash := strings.LastIndex(path, \"\/\")\n\tif lastSlash >= 0 {\n\t\tdirectory = path[:lastSlash]\n\t\tleaf = path[lastSlash+1:]\n\t} else {\n\t\tdirectory = \"\"\n\t\tleaf = path\n\t}\n\treturn\n}\n\n\/\/ FindDir finds the directory passed in returning the directory ID\n\/\/ starting from pathID\n\/\/\n\/\/ Path shouldn't start or end with a \/\n\/\/\n\/\/ If create is set it will make the directory if not found\n\/\/\n\/\/ Algorithm:\n\/\/ Look in the cache for the path, if found return the pathID\n\/\/ If not found strip the last path off the path and recurse\n\/\/ Now have a parent directory id, so look in the parent for self and return it\nfunc (dc *DirCache) FindDir(path string, create bool) (pathID string, err error) {\n\tdc.mu.RLock()\n\tdefer dc.mu.RUnlock()\n\treturn dc._findDir(path, create)\n}\n\n\/\/ Look for the root and in the cache - safe to call without the mu\nfunc (dc *DirCache) _findDirInCache(path string) string {\n\t\/\/ fmt.Println(\"Finding\",path,\"create\",create,\"cache\",cache)\n\t\/\/ If it is the root, then return it\n\tif path == \"\" {\n\t\t\/\/ fmt.Println(\"Root\")\n\t\treturn dc.rootID\n\t}\n\n\t\/\/ If it is in the cache then return it\n\tpathID, ok := dc._get(path)\n\tif ok {\n\t\t\/\/ fmt.Println(\"Cache hit on\", path)\n\t\treturn pathID\n\t}\n\n\treturn \"\"\n}\n\n\/\/ Unlocked findDir - must have mu\nfunc (dc *DirCache) _findDir(path string, create bool) (pathID string, err error) {\n\t\/\/ if !dc.foundRoot {\n\t\/\/ \treturn \"\", fmt.Errorf(\"FindDir called before FindRoot\")\n\t\/\/ }\n\n\tpathID = dc._findDirInCache(path)\n\tif pathID != \"\" {\n\t\treturn pathID, nil\n\t}\n\n\t\/\/ Split the path into directory, leaf\n\tdirectory, leaf := SplitPath(path)\n\n\t\/\/ Recurse and find pathID for parent directory\n\tparentPathID, err := dc._findDir(directory, create)\n\tif err != nil {\n\t\treturn \"\", err\n\n\t}\n\n\t\/\/ Find the leaf in parentPathID\n\tpathID, found, err := dc.fs.FindLeaf(parentPathID, leaf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ If not found create the directory if required or return an error\n\tif !found {\n\t\tif create {\n\t\t\tpathID, err = dc.fs.CreateDir(parentPathID, leaf)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"Failed to make directory: %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\treturn \"\", fmt.Errorf(\"Couldn't find directory: %q\", path)\n\t\t}\n\t}\n\n\t\/\/ Store the leaf directory in the cache\n\tdc._put(path, pathID)\n\n\t\/\/ fmt.Println(\"Dir\", path, \"is\", pathID)\n\treturn pathID, nil\n}\n\n\/\/ FindPath finds the leaf and directoryID from a path\n\/\/\n\/\/ If create is set parent directories will be created if they don't exist\nfunc (dc *DirCache) FindPath(path string, create bool) (leaf, directoryID string, err error) {\n\tdc.mu.Lock()\n\tdefer dc.mu.Unlock()\n\tdirectory, leaf := SplitPath(path)\n\tdirectoryID, err = dc._findDir(directory, create)\n\tif err != nil {\n\t\tif create {\n\t\t\terr = fmt.Errorf(\"Couldn't find or make directory %q: %s\", directory, err)\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Couldn't find directory %q: %s\", directory, err)\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ FindRoot finds the root directory if not already found\n\/\/\n\/\/ Resets the root directory\n\/\/\n\/\/ If create is set it will make the directory if not found\nfunc (dc *DirCache) FindRoot(create bool) error {\n\tdc.mu.Lock()\n\tdefer dc.mu.Unlock()\n\tif dc.foundRoot {\n\t\treturn nil\n\t}\n\tdc.foundRoot = true\n\trootID, err := dc._findDir(dc.root, create)\n\tif err != nil {\n\t\tdc.foundRoot = false\n\t\treturn err\n\t}\n\tdc.rootID = rootID\n\n\t\/\/ Find the parent of the root while we still have the root\n\t\/\/ directory tree cached\n\trootParentPath, _ := SplitPath(dc.root)\n\tdc.rootParentID, _ = dc._get(rootParentPath)\n\n\t\/\/ Reset the tree based on dc.root\n\tdc._flush()\n\t\/\/ Put the root directory in\n\tdc._put(\"\", dc.rootID)\n\treturn nil\n}\n\n\/\/ RootID returns the ID of the root directory\n\/\/\n\/\/ This should be called after FindRoot\nfunc (dc *DirCache) RootID() string {\n\tdc.mu.Lock()\n\tdefer dc.mu.Unlock()\n\tif !dc.foundRoot {\n\t\tlog.Fatalf(\"Internal Error: RootID() called before FindRoot\")\n\t}\n\treturn dc.rootID\n}\n\n\/\/ RootParentID returns the ID of the parent of the root directory\n\/\/\n\/\/ This should be called after FindRoot\nfunc (dc *DirCache) RootParentID() (string, error) {\n\tdc.mu.Lock()\n\tdefer dc.mu.Unlock()\n\tif !dc.foundRoot {\n\t\treturn \"\", fmt.Errorf(\"Internal Error: RootID() called before FindRoot\")\n\t}\n\tif dc.rootParentID == \"\" {\n\t\treturn \"\", fmt.Errorf(\"Internal Error: Didn't find rootParentID\")\n\t}\n\tif dc.rootID == dc.trueRootID {\n\t\treturn \"\", fmt.Errorf(\"Is root directory\")\n\t}\n\treturn dc.rootParentID, nil\n}\n\n\/\/ ResetRoot resets the root directory to the absolute root and clears\n\/\/ the DirCache\nfunc (dc *DirCache) ResetRoot() {\n\tdc.mu.Lock()\n\tdefer dc.mu.Unlock()\n\tdc.foundRoot = false\n\tdc._flush()\n\n\t\/\/ Put the true root in\n\tdc.rootID = dc.trueRootID\n\n\t\/\/ Put the root directory in\n\tdc._put(\"\", dc.rootID)\n}\n<commit_msg>dircache: make separate mutex for the cache<commit_after>\/\/ Package dircache provides a simple cache for caching directory to path lookups\npackage dircache\n\n\/\/ _methods are called without the lock\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ DirCache caches paths to directory IDs and vice versa\ntype DirCache struct {\n\tcacheMu sync.RWMutex\n\tcache map[string]string\n\tinvCache map[string]string\n\tmu sync.Mutex\n\tfs DirCacher \/\/ Interface to find and make stuff\n\ttrueRootID string \/\/ ID of the absolute root\n\troot string \/\/ the path we are working on\n\trootID string \/\/ ID of the root directory\n\trootParentID string \/\/ ID of the root's parent directory\n\tfoundRoot bool \/\/ Whether we have found the root or not\n}\n\n\/\/ DirCacher describes an interface for doing the low level directory work\ntype DirCacher interface {\n\tFindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error)\n\tCreateDir(pathID, leaf string) (newID string, err error)\n}\n\n\/\/ New makes a DirCache\n\/\/\n\/\/ The cache is safe for concurrent use\nfunc New(root string, trueRootID string, fs DirCacher) *DirCache {\n\td := &DirCache{\n\t\ttrueRootID: trueRootID,\n\t\troot: root,\n\t\tfs: fs,\n\t}\n\td.Flush()\n\td.ResetRoot()\n\treturn d\n}\n\n\/\/ Get an ID given a path\nfunc (dc *DirCache) Get(path string) (id string, ok bool) {\n\tdc.cacheMu.RLock()\n\tid, ok = dc.cache[path]\n\tdc.cacheMu.RUnlock()\n\treturn\n}\n\n\/\/ GetInv gets a path given an ID\nfunc (dc *DirCache) GetInv(id string) (path string, ok bool) {\n\tdc.cacheMu.RLock()\n\tpath, ok = dc.invCache[id]\n\tdc.cacheMu.RUnlock()\n\treturn\n}\n\n\/\/ Put a path, id into the map\nfunc (dc *DirCache) Put(path, id string) {\n\tdc.cacheMu.Lock()\n\tdc.cache[path] = id\n\tdc.invCache[id] = path\n\tdc.cacheMu.Unlock()\n}\n\n\/\/ Flush the map of all data\nfunc (dc *DirCache) Flush() {\n\tdc.cacheMu.Lock()\n\tdc.cache = make(map[string]string)\n\tdc.invCache = make(map[string]string)\n\tdc.cacheMu.Unlock()\n}\n\n\/\/ SplitPath splits a path into directory, leaf\n\/\/\n\/\/ Path shouldn't start or end with a \/\n\/\/\n\/\/ If there are no slashes then directory will be \"\" and leaf = path\nfunc SplitPath(path string) (directory, leaf string) {\n\tlastSlash := strings.LastIndex(path, \"\/\")\n\tif lastSlash >= 0 {\n\t\tdirectory = path[:lastSlash]\n\t\tleaf = path[lastSlash+1:]\n\t} else {\n\t\tdirectory = \"\"\n\t\tleaf = path\n\t}\n\treturn\n}\n\n\/\/ FindDir finds the directory passed in returning the directory ID\n\/\/ starting from pathID\n\/\/\n\/\/ Path shouldn't start or end with a \/\n\/\/\n\/\/ If create is set it will make the directory if not found\n\/\/\n\/\/ Algorithm:\n\/\/ Look in the cache for the path, if found return the pathID\n\/\/ If not found strip the last path off the path and recurse\n\/\/ Now have a parent directory id, so look in the parent for self and return it\nfunc (dc *DirCache) FindDir(path string, create bool) (pathID string, err error) {\n\tdc.mu.Lock()\n\tdefer dc.mu.Unlock()\n\treturn dc._findDir(path, create)\n}\n\n\/\/ Look for the root and in the cache - safe to call without the mu\nfunc (dc *DirCache) _findDirInCache(path string) string {\n\t\/\/ fmt.Println(\"Finding\",path,\"create\",create,\"cache\",cache)\n\t\/\/ If it is the root, then return it\n\tif path == \"\" {\n\t\t\/\/ fmt.Println(\"Root\")\n\t\treturn dc.rootID\n\t}\n\n\t\/\/ If it is in the cache then return it\n\tpathID, ok := dc.Get(path)\n\tif ok {\n\t\t\/\/ fmt.Println(\"Cache hit on\", path)\n\t\treturn pathID\n\t}\n\n\treturn \"\"\n}\n\n\/\/ Unlocked findDir - must have mu\nfunc (dc *DirCache) _findDir(path string, create bool) (pathID string, err error) {\n\t\/\/ if !dc.foundRoot {\n\t\/\/ \treturn \"\", fmt.Errorf(\"FindDir called before FindRoot\")\n\t\/\/ }\n\n\tpathID = dc._findDirInCache(path)\n\tif pathID != \"\" {\n\t\treturn pathID, nil\n\t}\n\n\t\/\/ Split the path into directory, leaf\n\tdirectory, leaf := SplitPath(path)\n\n\t\/\/ Recurse and find pathID for parent directory\n\tparentPathID, err := dc._findDir(directory, create)\n\tif err != nil {\n\t\treturn \"\", err\n\n\t}\n\n\t\/\/ Find the leaf in parentPathID\n\tpathID, found, err := dc.fs.FindLeaf(parentPathID, leaf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ If not found create the directory if required or return an error\n\tif !found {\n\t\tif create {\n\t\t\tpathID, err = dc.fs.CreateDir(parentPathID, leaf)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"Failed to make directory: %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\treturn \"\", fmt.Errorf(\"Couldn't find directory: %q\", path)\n\t\t}\n\t}\n\n\t\/\/ Store the leaf directory in the cache\n\tdc.Put(path, pathID)\n\n\t\/\/ fmt.Println(\"Dir\", path, \"is\", pathID)\n\treturn pathID, nil\n}\n\n\/\/ FindPath finds the leaf and directoryID from a path\n\/\/\n\/\/ If create is set parent directories will be created if they don't exist\nfunc (dc *DirCache) FindPath(path string, create bool) (leaf, directoryID string, err error) {\n\tdc.mu.Lock()\n\tdefer dc.mu.Unlock()\n\tdirectory, leaf := SplitPath(path)\n\tdirectoryID, err = dc._findDir(directory, create)\n\tif err != nil {\n\t\tif create {\n\t\t\terr = fmt.Errorf(\"Couldn't find or make directory %q: %s\", directory, err)\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Couldn't find directory %q: %s\", directory, err)\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ FindRoot finds the root directory if not already found\n\/\/\n\/\/ Resets the root directory\n\/\/\n\/\/ If create is set it will make the directory if not found\nfunc (dc *DirCache) FindRoot(create bool) error {\n\tdc.mu.Lock()\n\tdefer dc.mu.Unlock()\n\tif dc.foundRoot {\n\t\treturn nil\n\t}\n\tdc.foundRoot = true\n\trootID, err := dc._findDir(dc.root, create)\n\tif err != nil {\n\t\tdc.foundRoot = false\n\t\treturn err\n\t}\n\tdc.rootID = rootID\n\n\t\/\/ Find the parent of the root while we still have the root\n\t\/\/ directory tree cached\n\trootParentPath, _ := SplitPath(dc.root)\n\tdc.rootParentID, _ = dc.Get(rootParentPath)\n\n\t\/\/ Reset the tree based on dc.root\n\tdc.Flush()\n\t\/\/ Put the root directory in\n\tdc.Put(\"\", dc.rootID)\n\treturn nil\n}\n\n\/\/ RootID returns the ID of the root directory\n\/\/\n\/\/ This should be called after FindRoot\nfunc (dc *DirCache) RootID() string {\n\tdc.mu.Lock()\n\tdefer dc.mu.Unlock()\n\tif !dc.foundRoot {\n\t\tlog.Fatalf(\"Internal Error: RootID() called before FindRoot\")\n\t}\n\treturn dc.rootID\n}\n\n\/\/ RootParentID returns the ID of the parent of the root directory\n\/\/\n\/\/ This should be called after FindRoot\nfunc (dc *DirCache) RootParentID() (string, error) {\n\tdc.mu.Lock()\n\tdefer dc.mu.Unlock()\n\tif !dc.foundRoot {\n\t\treturn \"\", fmt.Errorf(\"Internal Error: RootID() called before FindRoot\")\n\t}\n\tif dc.rootParentID == \"\" {\n\t\treturn \"\", fmt.Errorf(\"Internal Error: Didn't find rootParentID\")\n\t}\n\tif dc.rootID == dc.trueRootID {\n\t\treturn \"\", fmt.Errorf(\"Is root directory\")\n\t}\n\treturn dc.rootParentID, nil\n}\n\n\/\/ ResetRoot resets the root directory to the absolute root and clears\n\/\/ the DirCache\nfunc (dc *DirCache) ResetRoot() {\n\tdc.mu.Lock()\n\tdefer dc.mu.Unlock()\n\tdc.foundRoot = false\n\tdc.Flush()\n\n\t\/\/ Put the true root in\n\tdc.rootID = dc.trueRootID\n\n\t\/\/ Put the root directory in\n\tdc.Put(\"\", dc.rootID)\n}\n<|endoftext|>"} {"text":"<commit_before>package rpiws\n\n\/*\n\n#include \"ws2811.h\"\n\n*\/\nimport \"C\"\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"unsafe\"\n)\n\nconst (\n\tWS2811_TARGET_FREQ = C.WS2811_TARGET_FREQ\n\tRPI_PWM_CHANNELS = C.RPI_PWM_CHANNELS\n)\n\nvar (\n\tErrHardware = errors.New(\"Hardware error\")\n)\n\ntype Led uint32\n\ntype Driver struct {\n\tdevice unsafe.Pointer \/\/ Private\n\tFreq uint32\n\tDmanum int32\n\tChannel [RPI_PWM_CHANNELS]Channel\n}\n\ntype Channel struct {\n\tGpionum int32\n\tInvert int32\n\tCount int32\n\tBrightness int32\n\tleds unsafe.Pointer \/\/ use Leds() for access\n}\n\nfunc (driver *Driver) cptr() *C.ws2811_t {\n\treturn (*C.ws2811_t)(unsafe.Pointer(driver))\n}\n\nfunc (driver *Driver) Init() error {\n\tif ret, err := C.ws2811_init(driver.cptr()); ret < 0 {\n\t\tif err != nil {\n\t\t\t\/\/ errno is set\n\t\t\treturn err\n\t\t}\n\n\t\treturn errors.New(\"Initialization error\")\n\t}\n\n\treturn nil\n}\n\nfunc (driver *Driver) Fini() error {\n\tC.ws2811_fini(driver.cptr())\n\treturn nil\n}\n\nfunc (driver *Driver) render() error {\n\tif C.ws2811_render(driver.cptr()) < 0 {\n\t\treturn ErrHardware\n\t}\n\n\treturn nil\n}\n\nfunc (driver *Driver) Render() error {\n\tif err := driver.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\treturn driver.render()\n}\n\nfunc (driver *Driver) ready() (bool, error) {\n\tret := C.ws2811_dma_ready(driver.cptr())\n\n\tif ret < 0 {\n\t\treturn false, ErrHardware\n\t}\n\n\treturn ret != 0, nil\n}\n\nfunc (driver *Driver) Wait() error {\n\tvar err error\n\n\tfor {\n\t\tvar ready bool\n\t\tready, err = driver.ready()\n\t\tif ready || err != nil {\n\t\t\tbreak\n\t\t}\n\t\truntime.Gosched()\n\t}\n\n\treturn err\n}\n\nfunc (channel *Channel) Leds() []Led {\n\tif channel.Count == 0 {\n\t\treturn nil\n\t}\n\n\tsh := reflect.SliceHeader{\n\t\tData: uintptr(channel.leds),\n\t\tLen: int(channel.Count),\n\t\tCap: int(channel.Count),\n\t}\n\n\treturn *(*[]Led)(unsafe.Pointer(&sh))\n}\n\n\/\/ Helper functions\n\nfunc (led Led) R() uint8 {\n\treturn uint8((uint32(led) >> 16) & 0xff)\n}\n\nfunc (led Led) G() uint8 {\n\treturn uint8((uint32(led) >> 8) & 0xff)\n}\n\nfunc (led Led) B() uint8 {\n\treturn uint8(uint32(led) & 0xff)\n}\n\nfunc RGB(r, g, b uint8) Led {\n\treturn Led((uint32(r) << 16) | (uint32(g) << 8) | uint32(b))\n}\n<commit_msg>GCC Optimization<commit_after>package rpiws\n\n\/*\n#cgo CFLAGS: -O2 -Wall\n\n#include \"ws2811.h\"\n*\/\nimport \"C\"\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"unsafe\"\n)\n\nconst (\n\tWS2811_TARGET_FREQ = C.WS2811_TARGET_FREQ\n\tRPI_PWM_CHANNELS = C.RPI_PWM_CHANNELS\n)\n\nvar (\n\tErrHardware = errors.New(\"Hardware error\")\n)\n\ntype Led uint32\n\ntype Driver struct {\n\tdevice unsafe.Pointer \/\/ Private\n\tFreq uint32\n\tDmanum int32\n\tChannel [RPI_PWM_CHANNELS]Channel\n}\n\ntype Channel struct {\n\tGpionum int32\n\tInvert int32\n\tCount int32\n\tBrightness int32\n\tleds unsafe.Pointer \/\/ use Leds() for access\n}\n\nfunc (driver *Driver) cptr() *C.ws2811_t {\n\treturn (*C.ws2811_t)(unsafe.Pointer(driver))\n}\n\nfunc (driver *Driver) Init() error {\n\tif ret, err := C.ws2811_init(driver.cptr()); ret < 0 {\n\t\tif err != nil {\n\t\t\t\/\/ errno is set\n\t\t\treturn err\n\t\t}\n\n\t\treturn errors.New(\"Initialization error\")\n\t}\n\n\treturn nil\n}\n\nfunc (driver *Driver) Fini() error {\n\tC.ws2811_fini(driver.cptr())\n\treturn nil\n}\n\nfunc (driver *Driver) render() error {\n\tif C.ws2811_render(driver.cptr()) < 0 {\n\t\treturn ErrHardware\n\t}\n\n\treturn nil\n}\n\nfunc (driver *Driver) Render() error {\n\tif err := driver.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\treturn driver.render()\n}\n\nfunc (driver *Driver) ready() (bool, error) {\n\tret := C.ws2811_dma_ready(driver.cptr())\n\n\tif ret < 0 {\n\t\treturn false, ErrHardware\n\t}\n\n\treturn ret != 0, nil\n}\n\nfunc (driver *Driver) Wait() error {\n\tvar err error\n\n\tfor {\n\t\tvar ready bool\n\t\tready, err = driver.ready()\n\t\tif ready || err != nil {\n\t\t\tbreak\n\t\t}\n\t\truntime.Gosched()\n\t}\n\n\treturn err\n}\n\nfunc (channel *Channel) Leds() []Led {\n\tif channel.Count == 0 {\n\t\treturn nil\n\t}\n\n\tsh := reflect.SliceHeader{\n\t\tData: uintptr(channel.leds),\n\t\tLen: int(channel.Count),\n\t\tCap: int(channel.Count),\n\t}\n\n\treturn *(*[]Led)(unsafe.Pointer(&sh))\n}\n\n\/\/ Helper functions\n\nfunc (led Led) R() uint8 {\n\treturn uint8((uint32(led) >> 16) & 0xff)\n}\n\nfunc (led Led) G() uint8 {\n\treturn uint8((uint32(led) >> 8) & 0xff)\n}\n\nfunc (led Led) B() uint8 {\n\treturn uint8(uint32(led) & 0xff)\n}\n\nfunc RGB(r, g, b uint8) Led {\n\treturn Led((uint32(r) << 16) | (uint32(g) << 8) | uint32(b))\n}\n<|endoftext|>"} {"text":"<commit_before>package discover\n\n\/\/\n\/\/\n\/\/ TODO:\n\/\/ should be able to guess article link format statistically\n\/\/ handle\/allow subdomains (eg: www1.politicalbetting.com)\n\/\/ filter unwanted navlinks (eg \"mirror.co.uk\/all-about\/fred bloggs\")\n\/\/ HTTP error handling\n\/\/ multiple url formats (eg spectator has multiple cms's)\n\/\/ logging\n\nimport (\n\t\"code.google.com\/p\/cascadia\"\n\t\"code.google.com\/p\/go.net\/html\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n)\n\ntype Logger interface {\n\tPrintf(format string, v ...interface{})\n}\n\ntype NullLogger struct{}\n\nfunc (l NullLogger) Printf(format string, v ...interface{}) {\n}\n\ntype DiscovererDef struct {\n\tName string\n\tURL string\n\tArtPat []string\n\tNavSel string\n\tBaseErrorThreshold int\n}\n\ntype Discoverer struct {\n\tName string\n\tStartURL url.URL\n\tArtPats []*regexp.Regexp\n\tNavLinkSel cascadia.Selector\n\tBaseErrorThreshold int\n\tStripFragments bool\n\tStripQuery bool\n\n\tErrorLog Logger\n\tInfoLog Logger\n\tStats struct {\n\t\tErrorCount int\n\t\tFetchCount int\n\t}\n}\n\nfunc NewDiscoverer(cfg DiscovererDef) (*Discoverer, error) {\n\tdisc := &Discoverer{}\n\tu, err := url.Parse(cfg.URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdisc.Name = cfg.Name\n\tdisc.StartURL = *u\n\tdisc.ArtPats = make([]*regexp.Regexp, 0, len(cfg.ArtPat))\n\tfor _, pat := range cfg.ArtPat {\n\t\tre, err := regexp.Compile(pat)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdisc.ArtPats = append(disc.ArtPats, re)\n\t}\n\n\tif cfg.NavSel == \"\" {\n\t\tdisc.NavLinkSel = nil\n\t} else {\n\t\tsel, err := cascadia.Compile(cfg.NavSel)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdisc.NavLinkSel = sel\n\t}\n\tdisc.BaseErrorThreshold = cfg.BaseErrorThreshold\n\n\t\/\/ defaults\n\tdisc.StripFragments = true\n\tdisc.StripQuery = true\n\tdisc.ErrorLog = NullLogger{}\n\tdisc.InfoLog = NullLogger{}\n\treturn disc, nil\n}\n\nfunc (disc *Discoverer) Run(client *http.Client) (LinkSet, error) {\n\n\tqueued := make(LinkSet) \/\/ nav pages to scan for article links\n\tseen := make(LinkSet) \/\/ nav pages we've scanned\n\tarts := make(LinkSet) \/\/ article links we've found so far\n\n\tqueued.Add(disc.StartURL)\n\n\tfor len(queued) > 0 {\n\t\tu := queued.Pop()\n\t\tseen.Add(u)\n\t\t\/\/\n\n\t\troot, err := disc.fetchAndParse(client, &u)\n\t\tif err != nil {\n\t\t\tdisc.ErrorLog.Printf(\"%s\\n\", err.Error())\n\t\t\tdisc.Stats.ErrorCount++\n\t\t\tif disc.Stats.ErrorCount > disc.BaseErrorThreshold+(disc.Stats.FetchCount\/10) {\n\t\t\t\treturn nil, errors.New(\"Error threshold exceeded\")\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tdisc.Stats.FetchCount++\n\n\t\tnavLinks, err := disc.findNavLinks(root)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor navLink, _ := range navLinks {\n\t\t\tif _, got := seen[navLink]; !got {\n\t\t\t\tqueued.Add(navLink)\n\t\t\t}\n\t\t}\n\n\t\tfoo, err := disc.findArticles(root)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tarts.Merge(foo)\n\n\t\tdisc.InfoLog.Printf(\"Visited %s, found %d articles\\n\", u.String(), len(foo))\n\t}\n\n\treturn arts, nil\n}\n\nfunc (disc *Discoverer) fetchAndParse(client *http.Client, pageURL *url.URL) (*html.Node, error) {\n\tresp, err := client.Get(pageURL.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode < 200 || resp.StatusCode >= 300 {\n\t\terr = errors.New(fmt.Sprintf(\"HTTP code %d (%s)\", resp.StatusCode, pageURL.String()))\n\n\t\treturn nil, err\n\n\t}\n\n\troot, err := html.Parse(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn root, nil\n}\n\nvar aSel cascadia.Selector = cascadia.MustCompile(\"a\")\n\nfunc (disc *Discoverer) findArticles(root *html.Node) (LinkSet, error) {\n\tarts := make(LinkSet)\n\tfor _, a := range aSel.MatchAll(root) {\n\t\t\/\/ fetch url and extend to absolute\n\t\tlink, err := disc.StartURL.Parse(GetAttr(a, \"href\"))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif link.Host != disc.StartURL.Host {\n\t\t\tcontinue\n\t\t}\n\n\t\tfoo := link.RequestURI()\n\t\taccept := false\n\t\tfor _, pat := range disc.ArtPats {\n\t\t\tif pat.MatchString(foo) {\n\t\t\t\taccept = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !accept {\n\t\t\tcontinue\n\t\t}\n\n\t\tif disc.StripFragments {\n\t\t\tlink.Fragment = \"\"\n\t\t}\n\t\tif disc.StripQuery {\n\t\t\tlink.RawQuery = \"\"\n\t\t}\n\n\t\tarts[*link] = true\n\t}\n\treturn arts, nil\n}\n\nfunc (disc *Discoverer) findNavLinks(root *html.Node) (LinkSet, error) {\n\tnavLinks := make(LinkSet)\n\tif disc.NavLinkSel == nil {\n\t\treturn navLinks, nil\n\t}\n\tfor _, a := range disc.NavLinkSel.MatchAll(root) {\n\t\tlink, err := disc.StartURL.Parse(GetAttr(a, \"href\"))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif link.Host != disc.StartURL.Host {\n\t\t\tcontinue\n\t\t}\n\n\t\tlink.Fragment = \"\"\n\n\t\tnavLinks[*link] = true\n\t}\n\treturn navLinks, nil\n}\n\n\/\/ GetAttr retrieved the value of an attribute on a node.\n\/\/ Returns empty string if attribute doesn't exist.\nfunc GetAttr(n *html.Node, attr string) string {\n\tfor _, a := range n.Attr {\n\t\tif a.Key == attr {\n\t\t\treturn a.Val\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ GetTextContent recursively fetches the text for a node\nfunc GetTextContent(n *html.Node) string {\n\tif n.Type == html.TextNode {\n\t\treturn n.Data\n\t}\n\ttxt := \"\"\n\tfor child := n.FirstChild; child != nil; child = child.NextSibling {\n\t\ttxt += GetTextContent(child)\n\t}\n\n\treturn txt\n}\n\n\/\/ CompressSpace reduces all whitespace sequences (space, tabs, newlines etc) in a string to a single space.\n\/\/ Leading\/trailing space is trimmed.\n\/\/ Has the effect of converting multiline strings to one line.\nfunc CompressSpace(s string) string {\n\tmultispacePat := regexp.MustCompile(`[\\s]+`)\n\ts = strings.TrimSpace(multispacePat.ReplaceAllLiteralString(s, \" \"))\n\treturn s\n}\n<commit_msg>add HostPat option to allow multiple domains<commit_after>package discover\n\n\/\/\n\/\/\n\/\/ TODO:\n\/\/ should be able to guess article link format statistically\n\/\/ handle\/allow subdomains (eg: www1.politicalbetting.com)\n\/\/ filter unwanted navlinks (eg \"mirror.co.uk\/all-about\/fred bloggs\")\n\/\/ HTTP error handling\n\/\/ multiple url formats (eg spectator has multiple cms's)\n\/\/ logging\n\nimport (\n\t\"code.google.com\/p\/cascadia\"\n\t\"code.google.com\/p\/go.net\/html\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n)\n\ntype Logger interface {\n\tPrintf(format string, v ...interface{})\n}\n\ntype NullLogger struct{}\n\nfunc (l NullLogger) Printf(format string, v ...interface{}) {\n}\n\ntype DiscovererDef struct {\n\tName string\n\tURL string\n\tArtPat []string\n\tNavSel string\n\t\/\/ BaseErrorThreshold is starting number of http errors to accept before\n\t\/\/ bailing out.\n\t\/\/ error threshold formula: base + 10% of successful request count\n\tBaseErrorThreshold int\n\n\t\/\/ Hostpat is a regex matching accepted domains\n\t\/\/ if empty, reject everything on a different domain\n\tHostPat string\n}\n\ntype Discoverer struct {\n\tName string\n\tStartURL url.URL\n\tArtPats []*regexp.Regexp\n\tNavLinkSel cascadia.Selector\n\tBaseErrorThreshold int\n\tStripFragments bool\n\tStripQuery bool\n\tHostPat *regexp.Regexp\n\n\tErrorLog Logger\n\tInfoLog Logger\n\tStats struct {\n\t\tErrorCount int\n\t\tFetchCount int\n\t}\n}\n\nfunc NewDiscoverer(cfg DiscovererDef) (*Discoverer, error) {\n\tdisc := &Discoverer{}\n\tu, err := url.Parse(cfg.URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdisc.Name = cfg.Name\n\tdisc.StartURL = *u\n\tdisc.ArtPats = make([]*regexp.Regexp, 0, len(cfg.ArtPat))\n\tfor _, pat := range cfg.ArtPat {\n\t\tre, err := regexp.Compile(pat)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdisc.ArtPats = append(disc.ArtPats, re)\n\t}\n\n\tif cfg.NavSel == \"\" {\n\t\tdisc.NavLinkSel = nil\n\t} else {\n\t\tsel, err := cascadia.Compile(cfg.NavSel)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdisc.NavLinkSel = sel\n\t}\n\tdisc.BaseErrorThreshold = cfg.BaseErrorThreshold\n\n\tif cfg.HostPat != \"\" {\n\t\tre, err := regexp.Compile(cfg.HostPat)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdisc.HostPat = re\n\t}\n\n\t\/\/ defaults\n\tdisc.StripFragments = true\n\tdisc.StripQuery = true\n\tdisc.ErrorLog = NullLogger{}\n\tdisc.InfoLog = NullLogger{}\n\treturn disc, nil\n}\n\nfunc (disc *Discoverer) Run(client *http.Client) (LinkSet, error) {\n\n\tqueued := make(LinkSet) \/\/ nav pages to scan for article links\n\tseen := make(LinkSet) \/\/ nav pages we've scanned\n\tarts := make(LinkSet) \/\/ article links we've found so far\n\n\tqueued.Add(disc.StartURL)\n\n\tfor len(queued) > 0 {\n\t\tu := queued.Pop()\n\t\tseen.Add(u)\n\t\t\/\/\n\n\t\troot, err := disc.fetchAndParse(client, &u)\n\t\tif err != nil {\n\t\t\tdisc.ErrorLog.Printf(\"%s\\n\", err.Error())\n\t\t\tdisc.Stats.ErrorCount++\n\t\t\tif disc.Stats.ErrorCount > disc.BaseErrorThreshold+(disc.Stats.FetchCount\/10) {\n\t\t\t\treturn nil, errors.New(\"Error threshold exceeded\")\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tdisc.Stats.FetchCount++\n\n\t\tnavLinks, err := disc.findNavLinks(root)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor navLink, _ := range navLinks {\n\t\t\tif _, got := seen[navLink]; !got {\n\t\t\t\tqueued.Add(navLink)\n\t\t\t}\n\t\t}\n\n\t\tfoo, err := disc.findArticles(root)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tarts.Merge(foo)\n\n\t\tdisc.InfoLog.Printf(\"Visited %s, found %d articles\\n\", u.String(), len(foo))\n\t}\n\n\treturn arts, nil\n}\n\nfunc (disc *Discoverer) fetchAndParse(client *http.Client, pageURL *url.URL) (*html.Node, error) {\n\tresp, err := client.Get(pageURL.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode < 200 || resp.StatusCode >= 300 {\n\t\terr = errors.New(fmt.Sprintf(\"HTTP code %d (%s)\", resp.StatusCode, pageURL.String()))\n\n\t\treturn nil, err\n\n\t}\n\n\troot, err := html.Parse(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn root, nil\n}\n\nvar aSel cascadia.Selector = cascadia.MustCompile(\"a\")\n\nfunc (disc *Discoverer) findArticles(root *html.Node) (LinkSet, error) {\n\tarts := make(LinkSet)\n\tfor _, a := range aSel.MatchAll(root) {\n\t\t\/\/ fetch url and extend to absolute\n\t\tlink, err := disc.StartURL.Parse(GetAttr(a, \"href\"))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !disc.isHostGood(link.Host) {\n\t\t\tcontinue\n\t\t}\n\n\t\tfoo := link.RequestURI()\n\t\taccept := false\n\t\tfor _, pat := range disc.ArtPats {\n\t\t\tif pat.MatchString(foo) {\n\t\t\t\taccept = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !accept {\n\t\t\tcontinue\n\t\t}\n\n\t\tif disc.StripFragments {\n\t\t\tlink.Fragment = \"\"\n\t\t}\n\t\tif disc.StripQuery {\n\t\t\tlink.RawQuery = \"\"\n\t\t}\n\n\t\tarts[*link] = true\n\t}\n\treturn arts, nil\n}\n\nfunc (disc *Discoverer) findNavLinks(root *html.Node) (LinkSet, error) {\n\tnavLinks := make(LinkSet)\n\tif disc.NavLinkSel == nil {\n\t\treturn navLinks, nil\n\t}\n\tfor _, a := range disc.NavLinkSel.MatchAll(root) {\n\t\tlink, err := disc.StartURL.Parse(GetAttr(a, \"href\"))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !disc.isHostGood(link.Host) {\n\t\t\tcontinue\n\t\t}\n\n\t\tlink.Fragment = \"\"\n\n\t\tnavLinks[*link] = true\n\t}\n\treturn navLinks, nil\n}\n\n\/\/ is host domain one we'll accept?\nfunc (disc *Discoverer) isHostGood(host string) bool {\n\tif disc.HostPat != nil {\n\t\treturn disc.HostPat.MatchString(host)\n\t}\n\treturn host == disc.StartURL.Host\n}\n\n\/\/ GetAttr retrieved the value of an attribute on a node.\n\/\/ Returns empty string if attribute doesn't exist.\nfunc GetAttr(n *html.Node, attr string) string {\n\tfor _, a := range n.Attr {\n\t\tif a.Key == attr {\n\t\t\treturn a.Val\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ GetTextContent recursively fetches the text for a node\nfunc GetTextContent(n *html.Node) string {\n\tif n.Type == html.TextNode {\n\t\treturn n.Data\n\t}\n\ttxt := \"\"\n\tfor child := n.FirstChild; child != nil; child = child.NextSibling {\n\t\ttxt += GetTextContent(child)\n\t}\n\n\treturn txt\n}\n\n\/\/ CompressSpace reduces all whitespace sequences (space, tabs, newlines etc) in a string to a single space.\n\/\/ Leading\/trailing space is trimmed.\n\/\/ Has the effect of converting multiline strings to one line.\nfunc CompressSpace(s string) string {\n\tmultispacePat := regexp.MustCompile(`[\\s]+`)\n\ts = strings.TrimSpace(multispacePat.ReplaceAllLiteralString(s, \" \"))\n\treturn s\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ The RS232 package lets you access old serial junk from go\npackage rs232\n\n\/*\n#include <stdlib.h>\n#include <fcntl.h>\n#include <termios.h>\n*\/\nimport \"C\"\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\n\/\/ This is your serial port handle.\ntype SerialPort struct {\n\tport *os.File\n}\n\nfunc baudConversion(rate int) (flag _Ctype_speed_t) {\n\tif C.B9600 != 9600 {\n\t\tpanic(\"Baud rates may not map directly.\")\n\t}\n\treturn _Ctype_speed_t(rate)\n}\n\n\/\/ SerConf represents the basic serial configuration to provide to OpenPort.\ntype SerConf int\n\nconst (\n\tS_8N1 = iota\n\tS_7E1\n\tS_7O1\n)\n\n\/\/ Opens and returns a non-blocking serial port.\n\/\/ The device, baud rate, and SerConf is specified.\n\/\/\n\/\/ Example: rs232.OpenPort(\"\/dev\/ttyS0\", 115200, rs232.S_8N1)\nfunc OpenPort(port string, baudRate int, serconf SerConf) (rv SerialPort, err error) {\n\tf, open_err := os.OpenFile(port,\n\t\tos.O_RDWR|os.O_NOCTTY|os.O_NDELAY,\n\t\t0666)\n\tif open_err != nil {\n\t\terr = open_err\n\t\treturn\n\t}\n\trv.port = f\n\n\tfd := rv.port.Fd()\n\n\tvar options C.struct_termios\n\tif C.tcgetattr(C.int(fd), &options) < 0 {\n\t\tpanic(\"tcgetattr failed\")\n\t}\n\n\tif C.cfsetispeed(&options, baudConversion(baudRate)) < 0 {\n\t\tpanic(\"cfsetispeed failed\")\n\t}\n\tif C.cfsetospeed(&options, baudConversion(baudRate)) < 0 {\n\t\tpanic(\"cfsetospeed failed\")\n\t}\n\tswitch serconf {\n\tcase S_8N1:\n\t\t{\n\t\t\toptions.c_cflag &^= C.PARENB\n\t\t\toptions.c_cflag &^= C.CSTOPB\n\t\t\toptions.c_cflag &^= C.CSIZE\n\t\t\toptions.c_cflag |= C.CS8\n\t\t}\n\tcase S_7E1:\n\t\t{\n\t\t\toptions.c_cflag |= C.PARENB\n\t\t\toptions.c_cflag &^= C.PARODD\n\t\t\toptions.c_cflag &^= C.CSTOPB\n\t\t\toptions.c_cflag &^= C.CSIZE\n\t\t\toptions.c_cflag |= C.CS7\n\t\t}\n\tcase S_7O1:\n\t\t{\n\t\t\toptions.c_cflag |= C.PARENB\n\t\t\toptions.c_cflag |= C.PARODD\n\t\t\toptions.c_cflag &^= C.CSTOPB\n\t\t\toptions.c_cflag &^= C.CSIZE\n\t\t\toptions.c_cflag |= C.CS7\n\t\t}\n\t}\n\t\/\/ Local\n\toptions.c_cflag |= (C.CLOCAL | C.CREAD)\n\t\/\/ no hardware flow control\n\toptions.c_cflag &^= C.CRTSCTS\n\n\tif C.tcsetattr(C.int(fd), C.TCSANOW, &options) < 0 {\n\t\tpanic(\"tcsetattr failed\")\n\t}\n\n\tif syscall.SetNonblock(fd, false) != nil {\n\t\tpanic(\"Error disabling blocking\")\n\t}\n\n\treturn\n}\n\n\/\/ Read from the port.\nfunc (port *SerialPort) Read(p []byte) (n int, err error) {\n\treturn port.port.Read(p)\n}\n\n\/\/ Write to the port.\nfunc (port *SerialPort) Write(p []byte) (n int, err error) {\n\treturn port.port.Write(p)\n}\n\n\/\/ Close the port.\nfunc (port *SerialPort) Close() error {\n\treturn port.port.Close()\n}\n<commit_msg>Updates for the next weekly.<commit_after>\/\/ The RS232 package lets you access old serial junk from go\npackage rs232\n\n\/*\n#include <stdlib.h>\n#include <fcntl.h>\n#include <termios.h>\n*\/\nimport \"C\"\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\n\/\/ This is your serial port handle.\ntype SerialPort struct {\n\tport *os.File\n}\n\nfunc baudConversion(rate int) (flag _Ctype_speed_t) {\n\tif C.B9600 != 9600 {\n\t\tpanic(\"Baud rates may not map directly.\")\n\t}\n\treturn _Ctype_speed_t(rate)\n}\n\n\/\/ SerConf represents the basic serial configuration to provide to OpenPort.\ntype SerConf int\n\nconst (\n\tS_8N1 = iota\n\tS_7E1\n\tS_7O1\n)\n\n\/\/ Opens and returns a non-blocking serial port.\n\/\/ The device, baud rate, and SerConf is specified.\n\/\/\n\/\/ Example: rs232.OpenPort(\"\/dev\/ttyS0\", 115200, rs232.S_8N1)\nfunc OpenPort(port string, baudRate int, serconf SerConf) (rv SerialPort, err error) {\n\tf, open_err := os.OpenFile(port,\n\t\tsyscall.O_RDWR|syscall.O_NOCTTY|syscall.O_NDELAY,\n\t\t0666)\n\tif open_err != nil {\n\t\terr = open_err\n\t\treturn\n\t}\n\trv.port = f\n\n\tfd := rv.port.Fd()\n\n\tvar options C.struct_termios\n\tif C.tcgetattr(C.int(fd), &options) < 0 {\n\t\tpanic(\"tcgetattr failed\")\n\t}\n\n\tif C.cfsetispeed(&options, baudConversion(baudRate)) < 0 {\n\t\tpanic(\"cfsetispeed failed\")\n\t}\n\tif C.cfsetospeed(&options, baudConversion(baudRate)) < 0 {\n\t\tpanic(\"cfsetospeed failed\")\n\t}\n\tswitch serconf {\n\tcase S_8N1:\n\t\t{\n\t\t\toptions.c_cflag &^= C.PARENB\n\t\t\toptions.c_cflag &^= C.CSTOPB\n\t\t\toptions.c_cflag &^= C.CSIZE\n\t\t\toptions.c_cflag |= C.CS8\n\t\t}\n\tcase S_7E1:\n\t\t{\n\t\t\toptions.c_cflag |= C.PARENB\n\t\t\toptions.c_cflag &^= C.PARODD\n\t\t\toptions.c_cflag &^= C.CSTOPB\n\t\t\toptions.c_cflag &^= C.CSIZE\n\t\t\toptions.c_cflag |= C.CS7\n\t\t}\n\tcase S_7O1:\n\t\t{\n\t\t\toptions.c_cflag |= C.PARENB\n\t\t\toptions.c_cflag |= C.PARODD\n\t\t\toptions.c_cflag &^= C.CSTOPB\n\t\t\toptions.c_cflag &^= C.CSIZE\n\t\t\toptions.c_cflag |= C.CS7\n\t\t}\n\t}\n\t\/\/ Local\n\toptions.c_cflag |= (C.CLOCAL | C.CREAD)\n\t\/\/ no hardware flow control\n\toptions.c_cflag &^= C.CRTSCTS\n\n\tif C.tcsetattr(C.int(fd), C.TCSANOW, &options) < 0 {\n\t\tpanic(\"tcsetattr failed\")\n\t}\n\n\tif syscall.SetNonblock(int(fd), false) != nil {\n\t\tpanic(\"Error disabling blocking\")\n\t}\n\n\treturn\n}\n\n\/\/ Read from the port.\nfunc (port *SerialPort) Read(p []byte) (n int, err error) {\n\treturn port.port.Read(p)\n}\n\n\/\/ Write to the port.\nfunc (port *SerialPort) Write(p []byte) (n int, err error) {\n\treturn port.port.Write(p)\n}\n\n\/\/ Close the port.\nfunc (port *SerialPort) Close() error {\n\treturn port.port.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"github.com\/justone\/pmb\/api\"\n\ntype IntroducerCommand struct {\n\tName string `short:\"n\" long:\"name\" description:\"Name of this introducer.\" default:\"autogenerated\"`\n}\n\nvar introducerCommand IntroducerCommand\n\nfunc (x *IntroducerCommand) Execute(args []string) error {\n\tbus := pmb.GetPMB(urisFromOpts(globalOptions))\n\n\tconn, err := bus.GetConnection(\"introducer\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tintroConn, err := bus.GetIntroConnection(\"introducer\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn runIntroducer(bus, conn, introConn)\n}\n\nfunc init() {\n\tparser.AddCommand(\"introducer\",\n\t\t\"Run an introducer.\",\n\t\t\"\",\n\t\t&introducerCommand)\n}\n\nfunc runIntroducer(bus *pmb.PMB, conn *pmb.Connection, introConn *pmb.Connection) error {\n\tfor {\n\t\tselect {\n\t\tcase message := <-conn.In:\n\t\t\tif message.Contents[\"type\"].(string) == \"CopyData\" {\n\t\t\t\tcopyToClipboard(message.Contents[\"data\"].(string))\n\n\t\t\t\tdata := map[string]interface{}{\n\t\t\t\t\t\"type\": \"DataCopied\",\n\t\t\t\t\t\"origin\": message.Contents[\"id\"].(string),\n\t\t\t\t}\n\t\t\t\tconn.Out <- pmb.Message{Contents: data}\n\t\t\t}\n\n\t\tcase message := <-introConn.In:\n\t\t\tif message.Contents[\"type\"].(string) == \"RequestAuth\" {\n\t\t\t\t\/\/ copy primary uri to clipboard\n\t\t\t\tcopyToClipboard(bus.PrimaryURI())\n\t\t\t}\n\n\t\t\t\/\/ any other message type is an error and ignored\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>use specified name for introducer<commit_after>package main\n\nimport \"github.com\/justone\/pmb\/api\"\n\ntype IntroducerCommand struct {\n\tName string `short:\"n\" long:\"name\" description:\"Name of this introducer.\" default:\"introducer\"`\n}\n\nvar introducerCommand IntroducerCommand\n\nfunc (x *IntroducerCommand) Execute(args []string) error {\n\tbus := pmb.GetPMB(urisFromOpts(globalOptions))\n\n\tconn, err := bus.GetConnection(introducerCommand.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tintroConn, err := bus.GetIntroConnection(\"introducer\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn runIntroducer(bus, conn, introConn)\n}\n\nfunc init() {\n\tparser.AddCommand(\"introducer\",\n\t\t\"Run an introducer.\",\n\t\t\"\",\n\t\t&introducerCommand)\n}\n\nfunc runIntroducer(bus *pmb.PMB, conn *pmb.Connection, introConn *pmb.Connection) error {\n\tfor {\n\t\tselect {\n\t\tcase message := <-conn.In:\n\t\t\tif message.Contents[\"type\"].(string) == \"CopyData\" {\n\t\t\t\tcopyToClipboard(message.Contents[\"data\"].(string))\n\n\t\t\t\tdata := map[string]interface{}{\n\t\t\t\t\t\"type\": \"DataCopied\",\n\t\t\t\t\t\"origin\": message.Contents[\"id\"].(string),\n\t\t\t\t}\n\t\t\t\tconn.Out <- pmb.Message{Contents: data}\n\t\t\t}\n\n\t\tcase message := <-introConn.In:\n\t\t\tif message.Contents[\"type\"].(string) == \"RequestAuth\" {\n\t\t\t\t\/\/ copy primary uri to clipboard\n\t\t\t\tcopyToClipboard(bus.PrimaryURI())\n\t\t\t}\n\n\t\t\t\/\/ any other message type is an error and ignored\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package iocap\n\nimport (\n\t\"bytes\"\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestReader(t *testing.T) {\n\t\/\/ Create some random data for our reader.\n\tdata := make([]byte, 512)\n\tif _, err := rand.Read(data); err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tbuf := bytes.NewBuffer(data)\n\n\t\/\/ Create the Reader with a rate limit applied.\n\tr := NewReader(buf, RateOpts{Interval: 100 * time.Millisecond, Size: 128})\n\tout := make([]byte, 512)\n\n\t\/\/ Record the start time and execute the read.\n\tstart := time.Now()\n\tn, err := r.Read(out)\n\n\t\/\/ Check that we actually rate limited the read. 300ms because\n\t\/\/ initially we can read 128 bytes, then 3 bucket drains block\n\t\/\/ at 100ms a pop.\n\tif d := time.Since(start); d < 300*time.Millisecond {\n\t\tt.Fatalf(\"read returned to quickly in %s\", d)\n\t}\n\n\t\/\/ Check the return values and data.\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif n != 512 {\n\t\tt.Fatalf(\"expect 512, got: %d\", n)\n\t}\n\tif !bytes.Equal(data, out) {\n\t\tt.Fatal(\"unexpected data read\")\n\t}\n}\n\nfunc TestWriter(t *testing.T) {\n\t\/\/ Create some random data to write.\n\tdata := make([]byte, 512)\n\tif _, err := rand.Read(data); err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\n\t\/\/ Create the writer with an applied rate limit.\n\tbuf := new(bytes.Buffer)\n\tw := NewWriter(buf, RateOpts{Interval: 100 * time.Millisecond, Size: 128})\n\n\t\/\/ Record the start time and perform the write.\n\tstart := time.Now()\n\tn, err := w.Write(data)\n\n\t\/\/ Check that we rate limited the write.\n\tif d := time.Since(start); d < 300*time.Millisecond {\n\t\tt.Fatalf(\"write returned too quickly in %s\", d)\n\t}\n\n\t\/\/ Check errors and data values.\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif n != 512 {\n\t\tt.Fatalf(\"expect 512, got: %d\", n)\n\t}\n\tif !bytes.Equal(buf.Bytes(), data) {\n\t\tt.Fatal(\"unexpected data written\")\n\t}\n}\n\nfunc TestGroup(t *testing.T) {\n\t\/\/ Create the rate limiting group.\n\tg := NewGroup(RateOpts{Interval: 100 * time.Millisecond, Size: 8})\n\n\t\/\/ Create two buffers; one for the reader, one for the writer. The\n\t\/\/ idea is that these are completely separate buffers, maybe completely\n\t\/\/ unrelated in terms of the operation being performed on them.\n\t\/\/ Regardless, we want to ensure that operations across both share the\n\t\/\/ rate limit of the group.\n\tbufW := new(bytes.Buffer)\n\tbufR := new(bytes.Buffer)\n\n\t\/\/ Set up the data for the reader\/writer\n\tin := []byte(\"hello world!\")\n\tbufR.Write(in)\n\tout := make([]byte, len(in))\n\n\t\/\/ Create a reader and writer in the group against the buffers.\n\tw := g.NewWriter(bufW)\n\tr := g.NewReader(bufR)\n\n\t\/\/ Mark the start time and perform the read and write in parallel.\n\tstart := time.Now()\n\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tif _, err := w.Write(in); err != nil {\n\t\t\tt.Fatalf(\"err: %v\", err)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tif _, err := r.Read(out); err != nil {\n\t\t\tt.Fatalf(\"err: %v\", err)\n\t\t}\n\t}()\n\n\t\/\/ Wait for both operations to finish.\n\twg.Wait()\n\n\t\/\/ Make sure we blocked for at least 2 intervals. If each operation\n\t\/\/ had its own 8B\/s limit, we would finish in ~100ms, but because we\n\t\/\/ are sharing the limit, pushing 24 bytes through is going to require\n\t\/\/ two bucket drains before it completes.\n\tif d := time.Since(start); d < 200*time.Millisecond {\n\t\tt.Fatalf(\"finished too quickly in %s\", d)\n\t}\n}\n\nfunc TestPerSecond(t *testing.T) {\n\tro := PerSecond(128)\n\tif ro.Interval != time.Second {\n\t\tt.Fatalf(\"expect 1s, got: %s\", ro.Interval)\n\t}\n\tif ro.Size != 128 {\n\t\tt.Fatalf(\"expect 128, got: %d\", ro.Size)\n\t}\n}\n\nfunc ExampleReader() {\n\t\/\/ Create a buffer to read from.\n\tbuf := bytes.NewBufferString(\"hello world!\")\n\n\t\/\/ Create the rate limited reader.\n\trate := PerSecond(8) \/\/ 8B\/s\n\tr := NewReader(buf, rate)\n\n\t\/\/ Read from the reader.\n\tout := make([]byte, buf.Len())\n\tn, err := r.Read(out)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tfmt.Println(n, string(out))\n\t\/\/ Output: 12 hello world!\n}\n\nfunc ExampleWriter() {\n\t\/\/ Create the buffer to write to.\n\tbuf := new(bytes.Buffer)\n\n\t\/\/ Create the rate limited writer.\n\trate := PerSecond(8) \/\/ 8B\/s\n\tr := NewWriter(buf, rate)\n\n\t\/\/ Write data into the writer.\n\tn, err := r.Write([]byte(\"hello world!\"))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tfmt.Println(n, buf.String())\n\t\/\/ Output: 12 hello world!\n}\n<commit_msg>Update examples<commit_after>package iocap\n\nimport (\n\t\"bytes\"\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestReader(t *testing.T) {\n\t\/\/ Create some random data for our reader.\n\tdata := make([]byte, 512)\n\tif _, err := rand.Read(data); err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tbuf := bytes.NewBuffer(data)\n\n\t\/\/ Create the Reader with a rate limit applied.\n\tr := NewReader(buf, RateOpts{Interval: 100 * time.Millisecond, Size: 128})\n\tout := make([]byte, 512)\n\n\t\/\/ Record the start time and execute the read.\n\tstart := time.Now()\n\tn, err := r.Read(out)\n\n\t\/\/ Check that we actually rate limited the read. 300ms because\n\t\/\/ initially we can read 128 bytes, then 3 bucket drains block\n\t\/\/ at 100ms a pop.\n\tif d := time.Since(start); d < 300*time.Millisecond {\n\t\tt.Fatalf(\"read returned to quickly in %s\", d)\n\t}\n\n\t\/\/ Check the return values and data.\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif n != 512 {\n\t\tt.Fatalf(\"expect 512, got: %d\", n)\n\t}\n\tif !bytes.Equal(data, out) {\n\t\tt.Fatal(\"unexpected data read\")\n\t}\n}\n\nfunc TestWriter(t *testing.T) {\n\t\/\/ Create some random data to write.\n\tdata := make([]byte, 512)\n\tif _, err := rand.Read(data); err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\n\t\/\/ Create the writer with an applied rate limit.\n\tbuf := new(bytes.Buffer)\n\tw := NewWriter(buf, RateOpts{Interval: 100 * time.Millisecond, Size: 128})\n\n\t\/\/ Record the start time and perform the write.\n\tstart := time.Now()\n\tn, err := w.Write(data)\n\n\t\/\/ Check that we rate limited the write.\n\tif d := time.Since(start); d < 300*time.Millisecond {\n\t\tt.Fatalf(\"write returned too quickly in %s\", d)\n\t}\n\n\t\/\/ Check errors and data values.\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif n != 512 {\n\t\tt.Fatalf(\"expect 512, got: %d\", n)\n\t}\n\tif !bytes.Equal(buf.Bytes(), data) {\n\t\tt.Fatal(\"unexpected data written\")\n\t}\n}\n\nfunc TestGroup(t *testing.T) {\n\t\/\/ Create the rate limiting group.\n\tg := NewGroup(RateOpts{Interval: 100 * time.Millisecond, Size: 8})\n\n\t\/\/ Create two buffers; one for the reader, one for the writer. The\n\t\/\/ idea is that these are completely separate buffers, maybe completely\n\t\/\/ unrelated in terms of the operation being performed on them.\n\t\/\/ Regardless, we want to ensure that operations across both share the\n\t\/\/ rate limit of the group.\n\tbufW := new(bytes.Buffer)\n\tbufR := new(bytes.Buffer)\n\n\t\/\/ Set up the data for the reader\/writer\n\tin := []byte(\"hello world!\")\n\tbufR.Write(in)\n\tout := make([]byte, len(in))\n\n\t\/\/ Create a reader and writer in the group against the buffers.\n\tw := g.NewWriter(bufW)\n\tr := g.NewReader(bufR)\n\n\t\/\/ Mark the start time and perform the read and write in parallel.\n\tstart := time.Now()\n\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tif _, err := w.Write(in); err != nil {\n\t\t\tt.Fatalf(\"err: %v\", err)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tif _, err := r.Read(out); err != nil {\n\t\t\tt.Fatalf(\"err: %v\", err)\n\t\t}\n\t}()\n\n\t\/\/ Wait for both operations to finish.\n\twg.Wait()\n\n\t\/\/ Make sure we blocked for at least 2 intervals. If each operation\n\t\/\/ had its own 8B\/s limit, we would finish in ~100ms, but because we\n\t\/\/ are sharing the limit, pushing 24 bytes through is going to require\n\t\/\/ two bucket drains before it completes.\n\tif d := time.Since(start); d < 200*time.Millisecond {\n\t\tt.Fatalf(\"finished too quickly in %s\", d)\n\t}\n}\n\nfunc TestPerSecond(t *testing.T) {\n\tro := PerSecond(128)\n\tif ro.Interval != time.Second {\n\t\tt.Fatalf(\"expect 1s, got: %s\", ro.Interval)\n\t}\n\tif ro.Size != 128 {\n\t\tt.Fatalf(\"expect 128, got: %d\", ro.Size)\n\t}\n}\n\nfunc ExampleReader() {\n\t\/\/ Create a buffer to read from.\n\tbuf := bytes.NewBufferString(\"hello world!\")\n\n\t\/\/ Create the rate limited reader.\n\trate := PerSecond(128 * 1024) \/\/ 128K\/s\n\tr := NewReader(buf, rate)\n\n\t\/\/ Read from the reader.\n\tout := make([]byte, buf.Len())\n\tn, err := r.Read(out)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tfmt.Println(n, string(out))\n\t\/\/ Output: 12 hello world!\n}\n\nfunc ExampleWriter() {\n\t\/\/ Create the buffer to write to.\n\tbuf := new(bytes.Buffer)\n\n\t\/\/ Create the rate limited writer.\n\trate := PerSecond(128 * 1024) \/\/ 128K\/s\n\tr := NewWriter(buf, rate)\n\n\t\/\/ Write data into the writer.\n\tn, err := r.Write([]byte(\"hello world!\"))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tfmt.Println(n, buf.String())\n\t\/\/ Output: 12 hello world!\n}\n\nfunc ExampleGroup() {\n\t\/\/ Create a rate limiting group.\n\trate := PerSecond(128 * 1024) \/\/ 128K\/s\n\tg := NewGroup(rate)\n\n\t\/\/ Create a new reader and writer on the group.\n\tbuf := new(bytes.Buffer)\n\tr := g.NewReader(buf)\n\tw := g.NewWriter(buf)\n\n\t\/\/ Reader and writer are rate limited together\n\tn, err := w.Write([]byte(\"hello world!\"))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tout := make([]byte, n)\n\t_, err = r.Read(out)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tfmt.Println(string(out))\n\t\/\/ Output: hello world!\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"github.com\/hanwen\/termite\/termite\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nfunc main() {\n\thome := os.Getenv(\"HOME\")\n\tcachedir := flag.String(\"cachedir\",\n\t\tfilepath.Join(home, \".cache\", \"termite-master\"), \"content cache\")\n\tworkers := flag.String(\"workers\", \"\", \"comma separated list of worker addresses\")\n\tcoordinator := flag.String(\"coordinator\", \"localhost:1233\",\n\t\t\"address of coordinator. Overrides -workers\")\n\tsocket := flag.String(\"socket\", \".termite-socket\", \"socket to listen for commands\")\n\texclude := flag.String(\"exclude\", \"sys,proc,dev,selinux,cgroup\", \"prefixes to not export.\")\n\tsecretFile := flag.String(\"secret\", \"secret.txt\", \"file containing password.\")\n\tsrcRoot := flag.String(\"sourcedir\", \"\", \"root of corresponding source directory\")\n\tjobs := flag.Int(\"jobs\", 1, \"number of jobs to run\")\n\tport := flag.Int(\"port\", 1237, \"http status port\")\n\thouseHoldPeriod := flag.Float64(\"time.household\", 60.0, \"how often to do house hold tasks.\")\n\tkeepAlive := flag.Float64(\"time.keepalive\", 60.0, \"for how long to keep workers reserved.\")\n\n\tflag.Parse()\n\n\tlog.SetPrefix(\"M\")\n\n\tsecret, err := ioutil.ReadFile(*secretFile)\n\tif err != nil {\n\t\tlog.Fatal(\"ReadFile\", err)\n\t}\n\n\tworkerList := strings.Split(*workers, \",\")\n\texcludeList := strings.Split(*exclude, \",\")\n\tc := termite.NewContentCache(*cachedir)\n\tmaster := termite.NewMaster(\n\t\tc, *coordinator, workerList, secret, excludeList, *jobs)\n\tmaster.SetSrcRoot(*srcRoot)\n\tmaster.SetKeepAlive(*keepAlive, *houseHoldPeriod)\n\n\tlog.Println(termite.Version())\n\t\n\tgo master.ServeHTTP(*port)\n\tmaster.Start(*socket)\n}\n<commit_msg>Add usr\/lib\/locale-archive to the exclusion list.<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"github.com\/hanwen\/termite\/termite\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nfunc main() {\n\thome := os.Getenv(\"HOME\")\n\tcachedir := flag.String(\"cachedir\",\n\t\tfilepath.Join(home, \".cache\", \"termite-master\"), \"content cache\")\n\tworkers := flag.String(\"workers\", \"\", \"comma separated list of worker addresses\")\n\tcoordinator := flag.String(\"coordinator\", \"localhost:1233\",\n\t\t\"address of coordinator. Overrides -workers\")\n\tsocket := flag.String(\"socket\", \".termite-socket\", \"socket to listen for commands\")\n\texclude := flag.String(\"exclude\",\n\t\t\"usr\/lib\/locale\/locale-archive,sys,proc,dev,selinux,cgroup\", \"prefixes to not export.\")\n\tsecretFile := flag.String(\"secret\", \"secret.txt\", \"file containing password.\")\n\tsrcRoot := flag.String(\"sourcedir\", \"\", \"root of corresponding source directory\")\n\tjobs := flag.Int(\"jobs\", 1, \"number of jobs to run\")\n\tport := flag.Int(\"port\", 1237, \"http status port\")\n\thouseHoldPeriod := flag.Float64(\"time.household\", 60.0, \"how often to do house hold tasks.\")\n\tkeepAlive := flag.Float64(\"time.keepalive\", 60.0, \"for how long to keep workers reserved.\")\n\n\tflag.Parse()\n\n\tlog.SetPrefix(\"M\")\n\n\tsecret, err := ioutil.ReadFile(*secretFile)\n\tif err != nil {\n\t\tlog.Fatal(\"ReadFile\", err)\n\t}\n\n\tworkerList := strings.Split(*workers, \",\")\n\texcludeList := strings.Split(*exclude, \",\")\n\tc := termite.NewContentCache(*cachedir)\n\tmaster := termite.NewMaster(\n\t\tc, *coordinator, workerList, secret, excludeList, *jobs)\n\tmaster.SetSrcRoot(*srcRoot)\n\tmaster.SetKeepAlive(*keepAlive, *houseHoldPeriod)\n\n\tlog.Println(termite.Version())\n\t\n\tgo master.ServeHTTP(*port)\n\tmaster.Start(*socket)\n}\n<|endoftext|>"} {"text":"<commit_before>package resource\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/hashicorp\/hcl\/hcl\/ast\"\n)\n\n\/\/ Registry contains all known resources\nvar Registry = make(map[string]RegistryItem)\n\n\/\/ provider is used to create new resources from an HCL AST object item\ntype provider func(title string, item *ast.ObjectItem) (Resource, error)\n\n\/\/ RegistryItem type represents an item from the registry\ntype RegistryItem struct {\n\t\/\/ Name of the resource type\n\tName string\n\n\t\/\/ Short desription of the resource\n\tDescription string\n\n\t\/\/ Resource provider\n\tProvider provider\n}\n\n\/\/ Register adds a resource type to the registry\nfunc Register(item RegistryItem) error {\n\t_, ok := Registry[item.Name]\n\tif ok {\n\t\treturn fmt.Errorf(\"Resource type '%s' is already registered\", item.Name)\n\t}\n\n\tRegistry[item.Name] = item\n\n\treturn nil\n}\n\n\/\/ Resource is the interface type for resources\ntype Resource interface {\n\t\/\/ ResourceID returns the unique identifier of a resource\n\tResourceID() string\n\n\t\/\/ ResourceType returns the type of the resource\n\tResourceType() string\n\n\t\/\/ ResourceTitle returns the title of the resource\n\tResourceTitle() string\n\n\t\/\/ Returns the resources before which this resource shoud be processed\n\tWantBefore() []string\n\n\t\/\/ Returns the resources after which this resource should be processed\n\tWantAfter() []string\n\n\t\/\/ Evaluates the resource\n\tEvaluate() (State, error)\n\n\t\/\/ Creates the resource\n\tCreate(w io.Writer) error\n\n\t\/\/ Deletes the resource\n\tDelete(w io.Writer) error\n\n\t\/\/ Updates the resource\n\tUpdate(w io.Writer) error\n}\n\n\/\/ BaseResource is the base resource type for all resources\n\/\/ The purpose of this type is to be embedded into other resources\n\/\/ Partially implements the Resource interface\ntype BaseResource struct {\n\t\/\/ Type of the resource\n\tType string `json:\"-\"`\n\n\t\/\/ Title of the resource\n\tTitle string `json:\"-\"`\n\n\t\/\/ Desired state of the resource\n\tState string `hcl:\"state\" json:\"state\"`\n\n\t\/\/ Resources before which this resource should be processed\n\tBefore []string `hcl:\"before\" json:\"before,omitempty\"`\n\n\t\/\/ Resources after which this resource should be processed\n\tAfter []string `hcl:\"after\" json:\"after,omitempty\"`\n}\n\n\/\/ ResourceID returns the unique resource id\nfunc (b *BaseResource) ResourceID() string {\n\treturn fmt.Sprintf(\"%s[%s]\", b.Type, b.Title)\n}\n\n\/\/ ResourceType returns the resource type\nfunc (b *BaseResource) ResourceType() string {\n\treturn b.Type\n}\n\n\/\/ ResourceTitle returns the resource title\nfunc (b *BaseResource) ResourceTitle() string {\n\treturn b.Title\n}\n\n\/\/ WantBefore returns the resources before which this resource\n\/\/ should be processed\nfunc (b *BaseResource) WantBefore() []string {\n\treturn b.Before\n}\n\n\/\/ WantAfter returns the resources after which this resource\n\/\/ should be processed\nfunc (b *BaseResource) WantAfter() []string {\n\treturn b.After\n}\n\n\/\/ Printf works just like fmt.Printf except that it writes to the\n\/\/ given resource writer object and prepends the\n\/\/ resource id to the output\nfunc (b *BaseResource) Printf(w io.Writer, format string, a ...interface{}) (int, error) {\n\tfmt.Fprintf(w, \"%s \", b.ResourceID())\n\n\treturn fmt.Fprintf(w, format, a...)\n}\n<commit_msg>resource: resources accept options that can be used during processing<commit_after>package resource\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/hashicorp\/hcl\/hcl\/ast\"\n)\n\n\/\/ Registry contains all known resources\nvar Registry = make(map[string]RegistryItem)\n\n\/\/ provider is used to create new resources from an HCL AST object item\ntype provider func(title string, item *ast.ObjectItem) (Resource, error)\n\n\/\/ RegistryItem type represents an item from the registry\ntype RegistryItem struct {\n\t\/\/ Name of the resource type\n\tName string\n\n\t\/\/ Short desription of the resource\n\tDescription string\n\n\t\/\/ Resource provider\n\tProvider provider\n}\n\n\/\/ Register adds a resource type to the registry\nfunc Register(item RegistryItem) error {\n\t_, ok := Registry[item.Name]\n\tif ok {\n\t\treturn fmt.Errorf(\"Resource type '%s' is already registered\", item.Name)\n\t}\n\n\tRegistry[item.Name] = item\n\n\treturn nil\n}\n\n\/\/ Resource is the interface type for resources\ntype Resource interface {\n\t\/\/ ResourceID returns the unique identifier of a resource\n\tResourceID() string\n\n\t\/\/ ResourceType returns the type of the resource\n\tResourceType() string\n\n\t\/\/ ResourceTitle returns the title of the resource\n\tResourceTitle() string\n\n\t\/\/ Returns the resources before which this resource shoud be processed\n\tWantBefore() []string\n\n\t\/\/ Returns the resources after which this resource should be processed\n\tWantAfter() []string\n\n\t\/\/ Evaluates the resource\n\tEvaluate(w io.Writer, opts *Options) (State, error)\n\n\t\/\/ Creates the resource\n\tCreate(w io.Writer, opts *Options) error\n\n\t\/\/ Deletes the resource\n\tDelete(w io.Writer, opts *Options) error\n\n\t\/\/ Updates the resource\n\tUpdate(w io.Writer, opts *Options) error\n}\n\n\/\/ Options sent to resources during processing\ntype Options struct {\n\t\/\/ The site directory which contains module and data files\n\tSiteDir string\n\n\t\/\/ Do not take any action, just report what would be done\n\tDryRun bool\n}\n\n\/\/ BaseResource is the base resource type for all resources\n\/\/ The purpose of this type is to be embedded into other resources\n\/\/ Partially implements the Resource interface\ntype BaseResource struct {\n\t\/\/ Type of the resource\n\tType string `json:\"-\"`\n\n\t\/\/ Title of the resource\n\tTitle string `json:\"-\"`\n\n\t\/\/ Desired state of the resource\n\tState string `hcl:\"state\" json:\"state\"`\n\n\t\/\/ Resources before which this resource should be processed\n\tBefore []string `hcl:\"before\" json:\"before,omitempty\"`\n\n\t\/\/ Resources after which this resource should be processed\n\tAfter []string `hcl:\"after\" json:\"after,omitempty\"`\n}\n\n\/\/ ResourceID returns the unique resource id\nfunc (b *BaseResource) ResourceID() string {\n\treturn fmt.Sprintf(\"%s[%s]\", b.Type, b.Title)\n}\n\n\/\/ ResourceType returns the resource type\nfunc (b *BaseResource) ResourceType() string {\n\treturn b.Type\n}\n\n\/\/ ResourceTitle returns the resource title\nfunc (b *BaseResource) ResourceTitle() string {\n\treturn b.Title\n}\n\n\/\/ WantBefore returns the resources before which this resource\n\/\/ should be processed\nfunc (b *BaseResource) WantBefore() []string {\n\treturn b.Before\n}\n\n\/\/ WantAfter returns the resources after which this resource\n\/\/ should be processed\nfunc (b *BaseResource) WantAfter() []string {\n\treturn b.After\n}\n\n\/\/ Printf works just like fmt.Printf except that it writes to the\n\/\/ given resource writer object and prepends the\n\/\/ resource id to the output\nfunc (b *BaseResource) Printf(w io.Writer, format string, a ...interface{}) (int, error) {\n\tfmt.Fprintf(w, \"%s \", b.ResourceID())\n\n\treturn fmt.Fprintf(w, format, a...)\n}\n<|endoftext|>"} {"text":"<commit_before>package resource\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n)\n\n\/\/ State type represents the current and wanted states of a resource\ntype State struct {\n\t\/\/ Current state of the resource\n\tCurrent string\n\n\t\/\/ Wanted state of the resource\n\tWant string\n\n\t\/\/ Indicates that a resource is in the desired state, but is\n\t\/\/ out of date and needs to be updated, e.g. a file resource is\n\t\/\/ present, but its permissions need to be corrected.\n\tUpdate bool\n}\n\n\/\/ Resource is the interface type for resources\ntype Resource interface {\n\t\/\/ ID returns the unique identifier of the resource\n\tID() string\n\n\t\/\/ Validate validates the resource\n\tValidate() error\n\n\t\/\/ GetBefore returns the list of resources before which this\n\t\/\/ resource shoud be processed\n\tGetBefore() []string\n\n\t\/\/ GetAfter returns the list of resources after which this\n\t\/\/ resource should be processed\n\tGetAfter() []string\n\n\t\/\/ GetPresentStates returns the list of states, for which the\n\t\/\/ resource is considered to be present\n\tGetPresentStates() []string\n\n\t\/\/ GetAbsentStates returns the list of states, for which the\n\t\/\/ resource is considered to be absent\n\tGetAbsentStates() []string\n\n\t\/\/ Evaluates the resource\n\tEvaluate() (State, error)\n\n\t\/\/ Creates the resource\n\tCreate() error\n\n\t\/\/ Deletes the resource\n\tDelete() error\n\n\t\/\/ Updates the resource\n\tUpdate() error\n\n\t\/\/ Log logs events\n\tLog(format string, a ...interface{})\n}\n\n\/\/ Config type contains various settings used by the resources\ntype Config struct {\n\t\/\/ The site repo which contains module and data files\n\tSiteRepo string\n\n\t\/\/ Logger used by the resources to log events\n\tLogger *log.Logger\n}\n\n\/\/ DefaultLogger is the default logger instance used for\n\/\/ logging events from the resources\nvar DefaultLogger = log.New(os.Stdout, \"\", log.LstdFlags)\n\n\/\/ DefaultConfig is the default configuration used by the resources\nvar DefaultConfig = &Config{\n\tLogger: DefaultLogger,\n}\n\n\/\/ Log logs an event using the default resource logger\nfunc Log(format string, a ...interface{}) {\n\tDefaultConfig.Logger.Printf(format, a...)\n}\n\n\/\/ Base is the base resource type for all resources\n\/\/ The purpose of this type is to be embedded into other resources\n\/\/ Partially implements the Resource interface\ntype Base struct {\n\t\/\/ Type of the resource\n\tType string `luar:\"-\"`\n\n\t\/\/ Name of the resource\n\tName string `luar:\"-\"`\n\n\t\/\/ Desired state of the resource\n\tState string `luar:\"state\"`\n\n\t\/\/ Resources before which this resource should be processed\n\tBefore []string `luar:\"before\"`\n\n\t\/\/ PresentStates contains the list of states, for which the\n\t\/\/ resource is considered to be present\n\tPresentStates []string `luar:\"-\"`\n\n\t\/\/ AbsentStates contains the list of states, for which the\n\t\/\/ resource is considered to be absent\n\tAbsentStates []string `luar:\"-\"`\n\n\t\/\/ Resources after which this resource should be processed\n\tAfter []string `luar:\"after\"`\n}\n\n\/\/ ID returns the unique resource id\nfunc (b *Base) ID() string {\n\treturn fmt.Sprintf(\"%s[%s]\", b.Type, b.Name)\n}\n\n\/\/ Validate validates the resource\nfunc (b *Base) Validate() error {\n\tif b.Type == \"\" {\n\t\treturn errors.New(\"Invalid resource type\")\n\t}\n\n\tif b.Name == \"\" {\n\t\treturn errors.New(\"Invalid resource name\")\n\t}\n\n\treturn nil\n}\n\n\/\/ GetBefore returns the list of resources before which this resource\n\/\/ should be processed\nfunc (b *Base) GetBefore() []string {\n\treturn b.Before\n}\n\n\/\/ GetAfter returns the list of resources after which this resource\n\/\/ should be processed\nfunc (b *Base) GetAfter() []string {\n\treturn b.After\n}\n\n\/\/ GetPresentStates returns the list of states, for which the\n\/\/ resource is considered to be present\nfunc (b *Base) GetPresentStates() []string {\n\treturn b.PresentStates\n}\n\n\/\/ GetAbsentStates returns the list of states, for which the\n\/\/ resource is considered to be absent\nfunc (b *Base) GetAbsentStates() []string {\n\treturn b.AbsentStates\n}\n\n\/\/ Log writes to the default config writer object and\n\/\/ prepends the resource id to the output\nfunc (b *Base) Log(format string, a ...interface{}) {\n\tf := fmt.Sprintf(\"%s %s\", b.ID(), format)\n\tLog(f, a...)\n}\n<commit_msg>resource: validate the resource state<commit_after>package resource\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/dnaeon\/gru\/utils\"\n)\n\n\/\/ State type represents the current and wanted states of a resource\ntype State struct {\n\t\/\/ Current state of the resource\n\tCurrent string\n\n\t\/\/ Wanted state of the resource\n\tWant string\n\n\t\/\/ Indicates that a resource is in the desired state, but is\n\t\/\/ out of date and needs to be updated, e.g. a file resource is\n\t\/\/ present, but its permissions need to be corrected.\n\tUpdate bool\n}\n\n\/\/ Resource is the interface type for resources\ntype Resource interface {\n\t\/\/ ID returns the unique identifier of the resource\n\tID() string\n\n\t\/\/ Validate validates the resource\n\tValidate() error\n\n\t\/\/ GetBefore returns the list of resources before which this\n\t\/\/ resource shoud be processed\n\tGetBefore() []string\n\n\t\/\/ GetAfter returns the list of resources after which this\n\t\/\/ resource should be processed\n\tGetAfter() []string\n\n\t\/\/ GetPresentStates returns the list of states, for which the\n\t\/\/ resource is considered to be present\n\tGetPresentStates() []string\n\n\t\/\/ GetAbsentStates returns the list of states, for which the\n\t\/\/ resource is considered to be absent\n\tGetAbsentStates() []string\n\n\t\/\/ Evaluates the resource\n\tEvaluate() (State, error)\n\n\t\/\/ Creates the resource\n\tCreate() error\n\n\t\/\/ Deletes the resource\n\tDelete() error\n\n\t\/\/ Updates the resource\n\tUpdate() error\n\n\t\/\/ Log logs events\n\tLog(format string, a ...interface{})\n}\n\n\/\/ Config type contains various settings used by the resources\ntype Config struct {\n\t\/\/ The site repo which contains module and data files\n\tSiteRepo string\n\n\t\/\/ Logger used by the resources to log events\n\tLogger *log.Logger\n}\n\n\/\/ DefaultLogger is the default logger instance used for\n\/\/ logging events from the resources\nvar DefaultLogger = log.New(os.Stdout, \"\", log.LstdFlags)\n\n\/\/ DefaultConfig is the default configuration used by the resources\nvar DefaultConfig = &Config{\n\tLogger: DefaultLogger,\n}\n\n\/\/ Log logs an event using the default resource logger\nfunc Log(format string, a ...interface{}) {\n\tDefaultConfig.Logger.Printf(format, a...)\n}\n\n\/\/ Base is the base resource type for all resources\n\/\/ The purpose of this type is to be embedded into other resources\n\/\/ Partially implements the Resource interface\ntype Base struct {\n\t\/\/ Type of the resource\n\tType string `luar:\"-\"`\n\n\t\/\/ Name of the resource\n\tName string `luar:\"-\"`\n\n\t\/\/ Desired state of the resource\n\tState string `luar:\"state\"`\n\n\t\/\/ Resources before which this resource should be processed\n\tBefore []string `luar:\"before\"`\n\n\t\/\/ PresentStates contains the list of states, for which the\n\t\/\/ resource is considered to be present\n\tPresentStates []string `luar:\"-\"`\n\n\t\/\/ AbsentStates contains the list of states, for which the\n\t\/\/ resource is considered to be absent\n\tAbsentStates []string `luar:\"-\"`\n\n\t\/\/ Resources after which this resource should be processed\n\tAfter []string `luar:\"after\"`\n}\n\n\/\/ ID returns the unique resource id\nfunc (b *Base) ID() string {\n\treturn fmt.Sprintf(\"%s[%s]\", b.Type, b.Name)\n}\n\n\/\/ Validate validates the resource\nfunc (b *Base) Validate() error {\n\tif b.Type == \"\" {\n\t\treturn errors.New(\"Invalid resource type\")\n\t}\n\n\tif b.Name == \"\" {\n\t\treturn errors.New(\"Invalid resource name\")\n\t}\n\n\tstates := append(b.PresentStates, b.AbsentStates...)\n\tif !utils.NewList(states).Contains(b.State) {\n\t\treturn fmt.Errorf(\"Invalid state '%s' for resource %s\", b.State, b.ID())\n\t}\n\n\treturn nil\n}\n\n\/\/ GetBefore returns the list of resources before which this resource\n\/\/ should be processed\nfunc (b *Base) GetBefore() []string {\n\treturn b.Before\n}\n\n\/\/ GetAfter returns the list of resources after which this resource\n\/\/ should be processed\nfunc (b *Base) GetAfter() []string {\n\treturn b.After\n}\n\n\/\/ GetPresentStates returns the list of states, for which the\n\/\/ resource is considered to be present\nfunc (b *Base) GetPresentStates() []string {\n\treturn b.PresentStates\n}\n\n\/\/ GetAbsentStates returns the list of states, for which the\n\/\/ resource is considered to be absent\nfunc (b *Base) GetAbsentStates() []string {\n\treturn b.AbsentStates\n}\n\n\/\/ Log writes to the default config writer object and\n\/\/ prepends the resource id to the output\nfunc (b *Base) Log(format string, a ...interface{}) {\n\tf := fmt.Sprintf(\"%s %s\", b.ID(), format)\n\tLog(f, a...)\n}\n<|endoftext|>"} {"text":"<commit_before>package goresponse\n\ntype HTTPResponse struct {\n\tTag string `json:\"tag\"` \/\/ tag\n\tResult bool `json:\"success\"` \/\/ 请求是否成功处理\n\tState *State `json:\"state\"` \/\/ 状态元\n\tMeta *Meta `json:\"meta\"` \/\/ 扩展元\n\tData interface{} `json:\"data\"` \/\/ 核心数据\n\tError string `json:\"error\"` \/\/ 错误描述\n}\n\nfunc NewHTTPResponse() *HTTPResponse {\n\treturn &HTTPResponse{\n\t\tResult: false,\n\t\tState: NewState(),\n\t\tMeta: nil,\n\t\tData: \"\",\n\t\tError: \"\",\n\t}\n}\n\n\/\/ 设置状态元\nfunc (r *HTTPResponse) S(s *State) *HTTPResponse {\n\tr.State = s\n\tr.Result = r.IsSuccess()\n\treturn r\n}\n\n\/\/ 设置扩展元\nfunc (r *HTTPResponse) ME(m *Meta) *HTTPResponse {\n\tif m != nil {\n\t\tr.Meta = m\n\t}\n\treturn r\n}\n\n\/\/ 设置核心数据\nfunc (r *HTTPResponse) D(d interface{}) *HTTPResponse {\n\tif d != nil {\n\t\tr.Data = d\n\t}\n\treturn r\n}\n\n\/\/ 设置错误描述\nfunc (r *HTTPResponse) E(err error) *HTTPResponse {\n\tif err != nil {\n\t\tr.Error = err.Error()\n\t} else {\n\t\tr.Error = \"\"\n\t}\n\treturn r\n}\n\n\/\/ 设置执行成功\nfunc (r *HTTPResponse) Success() *HTTPResponse {\n\tr.State.Success()\n\tr.E(nil).Result = true\n\treturn r\n}\n\nfunc (r *HTTPResponse) IsSuccess() bool {\n\treturn (r.State.Status == Status_success || r.State.Status == Status_ignore)\n}\n\nfunc (r *HTTPResponse) IsInvalidToken() bool {\n\treturn (r.State.Status == Status_invalid_token)\n}\n\nfunc (r *HTTPResponse) IsTimeoutToken() bool {\n\treturn (r.State.Status == Status_token_timeout)\n}\n<commit_msg>feat: modify http response<commit_after>package goresponse\n\ntype HTTPResponse struct {\n\tTag string `json:\"tag\"` \/\/ tag\n\tResult bool `json:\"success\"` \/\/ 请求是否成功处理\n\tState *State `json:\"state\"` \/\/ 状态元\n\tMeta *Meta `json:\"meta\"` \/\/ 扩展元\n\tData interface{} `json:\"data\"` \/\/ 核心数据\n\tToken string `json:\"token\"` \/\/ 身份令牌\n\tError string `json:\"error\"` \/\/ 错误描述\n}\n\nfunc NewHTTPResponse() *HTTPResponse {\n\treturn &HTTPResponse{\n\t\tResult: false,\n\t\tState: NewState(),\n\t\tMeta: nil,\n\t\tData: \"\",\n\t\tError: \"\",\n\t}\n}\n\n\/\/ 设置状态元\nfunc (r *HTTPResponse) S(s *State) *HTTPResponse {\n\tr.State = s\n\tr.Result = r.IsSuccess()\n\treturn r\n}\n\n\/\/ 设置扩展元\nfunc (r *HTTPResponse) ME(m *Meta) *HTTPResponse {\n\tif m != nil {\n\t\tr.Meta = m\n\t}\n\treturn r\n}\n\n\/\/ 设置核心数据\nfunc (r *HTTPResponse) D(d interface{}) *HTTPResponse {\n\tif d != nil {\n\t\tr.Data = d\n\t}\n\treturn r\n}\n\n\/\/ 设置新身份令牌\nfunc (r *HTTPResponse) T(t string) *HTTPResponse {\n\tr.Token = t\n\treturn r\n}\n\n\/\/ 设置错误描述\nfunc (r *HTTPResponse) E(err error) *HTTPResponse {\n\tif err != nil {\n\t\tr.Error = err.Error()\n\t} else {\n\t\tr.Error = \"\"\n\t}\n\treturn r\n}\n\n\/\/ 设置执行成功\nfunc (r *HTTPResponse) Success() *HTTPResponse {\n\tr.State.Success()\n\tr.E(nil).Result = true\n\treturn r\n}\n\nfunc (r *HTTPResponse) IsSuccess() bool {\n\treturn (r.State.Status == Status_success || r.State.Status == Status_ignore)\n}\n\nfunc (r *HTTPResponse) IsInvalidToken() bool {\n\treturn (r.State.Status == Status_invalid_token)\n}\n\nfunc (r *HTTPResponse) IsTimeoutToken() bool {\n\treturn (r.State.Status == Status_token_timeout)\n}\n<|endoftext|>"} {"text":"<commit_before>package ipnsfs\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\n\tdag \"github.com\/jbenet\/go-ipfs\/merkledag\"\n\tft \"github.com\/jbenet\/go-ipfs\/unixfs\"\n\tufspb \"github.com\/jbenet\/go-ipfs\/unixfs\/pb\"\n)\n\nvar ErrNotYetImplemented = errors.New(\"not yet implemented\")\nvar ErrInvalidChild = errors.New(\"invalid child node\")\n\ntype Directory struct {\n\tfs *Filesystem\n\tparent childCloser\n\n\tchildDirs map[string]*Directory\n\tfiles map[string]*File\n\n\tlock sync.Mutex\n\tnode *dag.Node\n\n\tname string\n}\n\nfunc NewDirectory(name string, node *dag.Node, parent childCloser, fs *Filesystem) *Directory {\n\treturn &Directory{\n\t\tfs: fs,\n\t\tname: name,\n\t\tnode: node,\n\t\tparent: parent,\n\t\tchildDirs: make(map[string]*Directory),\n\t\tfiles: make(map[string]*File),\n\t}\n}\n\n\/\/ Open opens a file at the given path 'tpath'\nfunc (d *Directory) Open(tpath []string, mode int) (*File, error) {\n\tif len(tpath) == 0 {\n\t\treturn nil, ErrIsDirectory\n\t}\n\tif len(tpath) == 1 {\n\t\tfi, err := d.childFile(tpath[0])\n\t\tif err == nil {\n\t\t\treturn fi, nil\n\t\t}\n\n\t\tif mode|os.O_CREATE != 0 {\n\t\t\tfnode := new(dag.Node)\n\t\t\tfnode.Data = ft.FilePBData(nil, 0)\n\t\t\tnfi, err := NewFile(tpath[0], fnode, d, d.fs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\td.files[tpath[0]] = nfi\n\t\t\treturn nfi, nil\n\t\t}\n\n\t\treturn nil, ErrNoSuch\n\t}\n\n\tdir, err := d.childDir(tpath[0])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn dir.Open(tpath[1:], mode)\n}\n\n\/\/ consider combining into a single method...\ntype childCloser interface {\n\tcloseChild(string, *dag.Node) error\n}\n\nfunc (d *Directory) closeChild(name string, nd *dag.Node) error {\n\t_, err := d.fs.dserv.Add(nd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.lock.Lock()\n\terr = d.node.RemoveNodeLink(name)\n\tif err != nil && err != dag.ErrNotFound {\n\t\td.lock.Unlock()\n\t\treturn err\n\t}\n\n\terr = d.node.AddNodeLinkClean(name, nd)\n\tif err != nil {\n\t\td.lock.Unlock()\n\t\treturn err\n\t}\n\td.lock.Unlock()\n\n\treturn d.parent.closeChild(d.name, d.node)\n}\n\nfunc (d *Directory) Type() NodeType {\n\treturn TDir\n}\n\nfunc (d *Directory) childFile(name string) (*File, error) {\n\tfi, ok := d.files[name]\n\tif ok {\n\t\treturn fi, nil\n\t}\n\n\t\/\/ search dag\n\tfor _, lnk := range d.node.Links {\n\t\tif lnk.Name == name {\n\t\t\tnd, err := lnk.GetNode(d.fs.dserv)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ti, err := ft.FromBytes(nd.Data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tswitch i.GetType() {\n\t\t\tcase ufspb.Data_Directory:\n\t\t\t\treturn nil, ErrIsDirectory\n\t\t\tcase ufspb.Data_File:\n\t\t\t\tnfi, err := NewFile(name, nd, d, d.fs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\td.files[name] = nfi\n\t\t\t\treturn nfi, nil\n\t\t\tcase ufspb.Data_Metadata:\n\t\t\t\treturn nil, ErrNotYetImplemented\n\t\t\tdefault:\n\t\t\t\treturn nil, ErrInvalidChild\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, ErrNoSuch\n}\n\nfunc (d *Directory) childDir(name string) (*Directory, error) {\n\tdir, ok := d.childDirs[name]\n\tif ok {\n\t\treturn dir, nil\n\t}\n\n\tfor _, lnk := range d.node.Links {\n\t\tif lnk.Name == name {\n\t\t\tnd, err := lnk.GetNode(d.fs.dserv)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ti, err := ft.FromBytes(nd.Data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tswitch i.GetType() {\n\t\t\tcase ufspb.Data_Directory:\n\t\t\t\tndir := NewDirectory(name, nd, d, d.fs)\n\t\t\t\td.childDirs[name] = ndir\n\t\t\t\treturn ndir, nil\n\t\t\tcase ufspb.Data_File:\n\t\t\t\treturn nil, fmt.Errorf(\"%s is not a directory\", name)\n\t\t\tcase ufspb.Data_Metadata:\n\t\t\t\treturn nil, ErrNotYetImplemented\n\t\t\tdefault:\n\t\t\t\treturn nil, ErrInvalidChild\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn nil, ErrNoSuch\n}\n\nfunc (d *Directory) Child(name string) (FSNode, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\tdir, err := d.childDir(name)\n\tif err == nil {\n\t\treturn dir, nil\n\t}\n\tfi, err := d.childFile(name)\n\tif err == nil {\n\t\treturn fi, nil\n\t}\n\n\treturn nil, ErrNoSuch\n}\n\nfunc (d *Directory) List() []string {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tvar out []string\n\tfor _, lnk := range d.node.Links {\n\t\tout = append(out, lnk.Name)\n\t}\n\treturn out\n}\n\nfunc (d *Directory) Mkdir(name string) (*Directory, error) {\n\td.lock.Lock()\n\n\t_, err := d.childDir(name)\n\tif err == nil {\n\t\td.lock.Unlock()\n\t\treturn nil, errors.New(\"directory by that name already exists\")\n\t}\n\t_, err = d.childFile(name)\n\tif err == nil {\n\t\td.lock.Unlock()\n\t\treturn nil, errors.New(\"file by that name already exists\")\n\t}\n\n\tndir := &dag.Node{Data: ft.FolderPBData()}\n\terr = d.node.AddNodeLinkClean(name, ndir)\n\tif err != nil {\n\t\td.lock.Unlock()\n\t\treturn nil, err\n\t}\n\td.lock.Unlock()\n\n\terr = d.parent.closeChild(d.name, d.node)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\treturn d.childDir(name)\n}\n\nfunc (d *Directory) Unlink(name string) error {\n\td.lock.Lock()\n\tdelete(d.childDirs, name)\n\tdelete(d.files, name)\n\n\terr := d.node.RemoveNodeLink(name)\n\tif err != nil {\n\t\td.lock.Unlock()\n\t\treturn err\n\t}\n\td.lock.Unlock()\n\n\treturn d.parent.closeChild(d.name, d.node)\n}\n\n\/\/ RenameEntry renames the child by 'oldname' of this directory to 'newname'\nfunc (d *Directory) RenameEntry(oldname, newname string) error {\n\t\/\/ Is the child a directory?\n\tdir, err := d.childDir(oldname)\n\tif err == nil {\n\t\tdir.name = newname\n\n\t\terr := d.node.RemoveNodeLink(oldname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = d.node.AddNodeLinkClean(newname, dir.node)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdelete(d.childDirs, oldname)\n\t\td.childDirs[newname] = dir\n\t\treturn d.parent.closeChild(d.name, d.node)\n\t}\n\n\t\/\/ Is the child a file?\n\tfi, err := d.childFile(oldname)\n\tif err == nil {\n\t\tfi.name = newname\n\n\t\terr := d.node.RemoveNodeLink(oldname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tnd, err := fi.GetNode()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = d.node.AddNodeLinkClean(newname, nd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdelete(d.childDirs, oldname)\n\t\td.files[newname] = fi\n\t\treturn d.parent.closeChild(d.name, d.node)\n\t}\n\treturn ErrNoSuch\n}\n\n\/\/ AddChild adds the node 'nd' under this directory giving it the name 'name'\nfunc (d *Directory) AddChild(name string, nd *dag.Node) error {\n\tpbn, err := ft.FromBytes(nd.Data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = d.Child(name)\n\tif err == nil {\n\t\treturn errors.New(\"directory already has entry by that name\")\n\t}\n\n\terr = d.node.AddNodeLinkClean(name, nd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch pbn.GetType() {\n\tcase ft.TDirectory:\n\t\td.childDirs[name] = NewDirectory(name, nd, d, d.fs)\n\tcase ft.TFile, ft.TMetadata, ft.TRaw:\n\t\tnfi, err := NewFile(name, nd, d, d.fs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.files[name] = nfi\n\tdefault:\n\t\treturn ErrInvalidChild\n\t}\n\treturn d.parent.closeChild(d.name, d.node)\n}\n\nfunc (d *Directory) GetNode() (*dag.Node, error) {\n\treturn d.node, nil\n}\n\nfunc (d *Directory) Lock() {\n\td.lock.Lock()\n}\n\nfunc (d *Directory) Unlock() {\n\td.lock.Unlock()\n}\n<commit_msg>fix locking<commit_after>package ipnsfs\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\n\tdag \"github.com\/jbenet\/go-ipfs\/merkledag\"\n\tft \"github.com\/jbenet\/go-ipfs\/unixfs\"\n\tufspb \"github.com\/jbenet\/go-ipfs\/unixfs\/pb\"\n)\n\nvar ErrNotYetImplemented = errors.New(\"not yet implemented\")\nvar ErrInvalidChild = errors.New(\"invalid child node\")\n\ntype Directory struct {\n\tfs *Filesystem\n\tparent childCloser\n\n\tchildDirs map[string]*Directory\n\tfiles map[string]*File\n\n\tlock sync.Mutex\n\tnode *dag.Node\n\n\tname string\n}\n\nfunc NewDirectory(name string, node *dag.Node, parent childCloser, fs *Filesystem) *Directory {\n\treturn &Directory{\n\t\tfs: fs,\n\t\tname: name,\n\t\tnode: node,\n\t\tparent: parent,\n\t\tchildDirs: make(map[string]*Directory),\n\t\tfiles: make(map[string]*File),\n\t}\n}\n\n\/\/ Open opens a file at the given path 'tpath'\nfunc (d *Directory) Open(tpath []string, mode int) (*File, error) {\n\tif len(tpath) == 0 {\n\t\treturn nil, ErrIsDirectory\n\t}\n\tif len(tpath) == 1 {\n\t\tfi, err := d.childFile(tpath[0])\n\t\tif err == nil {\n\t\t\treturn fi, nil\n\t\t}\n\n\t\tif mode|os.O_CREATE != 0 {\n\t\t\tfnode := new(dag.Node)\n\t\t\tfnode.Data = ft.FilePBData(nil, 0)\n\t\t\tnfi, err := NewFile(tpath[0], fnode, d, d.fs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\td.files[tpath[0]] = nfi\n\t\t\treturn nfi, nil\n\t\t}\n\n\t\treturn nil, ErrNoSuch\n\t}\n\n\tdir, err := d.childDir(tpath[0])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn dir.Open(tpath[1:], mode)\n}\n\ntype childCloser interface {\n\tcloseChild(string, *dag.Node) error\n}\n\nfunc (d *Directory) closeChild(name string, nd *dag.Node) error {\n\t_, err := d.fs.dserv.Add(nd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\terr = d.node.RemoveNodeLink(name)\n\tif err != nil && err != dag.ErrNotFound {\n\t\treturn err\n\t}\n\n\terr = d.node.AddNodeLinkClean(name, nd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn d.parent.closeChild(d.name, d.node)\n}\n\nfunc (d *Directory) Type() NodeType {\n\treturn TDir\n}\n\nfunc (d *Directory) childFile(name string) (*File, error) {\n\tfi, ok := d.files[name]\n\tif ok {\n\t\treturn fi, nil\n\t}\n\n\t\/\/ search dag\n\tfor _, lnk := range d.node.Links {\n\t\tif lnk.Name == name {\n\t\t\tnd, err := lnk.GetNode(d.fs.dserv)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ti, err := ft.FromBytes(nd.Data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tswitch i.GetType() {\n\t\t\tcase ufspb.Data_Directory:\n\t\t\t\treturn nil, ErrIsDirectory\n\t\t\tcase ufspb.Data_File:\n\t\t\t\tnfi, err := NewFile(name, nd, d, d.fs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\td.files[name] = nfi\n\t\t\t\treturn nfi, nil\n\t\t\tcase ufspb.Data_Metadata:\n\t\t\t\treturn nil, ErrNotYetImplemented\n\t\t\tdefault:\n\t\t\t\treturn nil, ErrInvalidChild\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, ErrNoSuch\n}\n\nfunc (d *Directory) childDir(name string) (*Directory, error) {\n\tdir, ok := d.childDirs[name]\n\tif ok {\n\t\treturn dir, nil\n\t}\n\n\tfor _, lnk := range d.node.Links {\n\t\tif lnk.Name == name {\n\t\t\tnd, err := lnk.GetNode(d.fs.dserv)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ti, err := ft.FromBytes(nd.Data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tswitch i.GetType() {\n\t\t\tcase ufspb.Data_Directory:\n\t\t\t\tndir := NewDirectory(name, nd, d, d.fs)\n\t\t\t\td.childDirs[name] = ndir\n\t\t\t\treturn ndir, nil\n\t\t\tcase ufspb.Data_File:\n\t\t\t\treturn nil, fmt.Errorf(\"%s is not a directory\", name)\n\t\t\tcase ufspb.Data_Metadata:\n\t\t\t\treturn nil, ErrNotYetImplemented\n\t\t\tdefault:\n\t\t\t\treturn nil, ErrInvalidChild\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn nil, ErrNoSuch\n}\n\nfunc (d *Directory) Child(name string) (FSNode, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\treturn d.childUnsync(name)\n}\n\nfunc (d *Directory) childUnsync(name string) (FSNode, error) {\n\tdir, err := d.childDir(name)\n\tif err == nil {\n\t\treturn dir, nil\n\t}\n\tfi, err := d.childFile(name)\n\tif err == nil {\n\t\treturn fi, nil\n\t}\n\n\treturn nil, ErrNoSuch\n}\n\nfunc (d *Directory) List() []string {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tvar out []string\n\tfor _, lnk := range d.node.Links {\n\t\tout = append(out, lnk.Name)\n\t}\n\treturn out\n}\n\nfunc (d *Directory) Mkdir(name string) (*Directory, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\t_, err := d.childDir(name)\n\tif err == nil {\n\t\treturn nil, os.ErrExist\n\t}\n\t_, err = d.childFile(name)\n\tif err == nil {\n\t\treturn nil, os.ErrExist\n\t}\n\n\tndir := &dag.Node{Data: ft.FolderPBData()}\n\terr = d.node.AddNodeLinkClean(name, ndir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = d.parent.closeChild(d.name, d.node)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn d.childDir(name)\n}\n\nfunc (d *Directory) Unlink(name string) error {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tdelete(d.childDirs, name)\n\tdelete(d.files, name)\n\n\terr := d.node.RemoveNodeLink(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn d.parent.closeChild(d.name, d.node)\n}\n\n\/\/ RenameEntry renames the child by 'oldname' of this directory to 'newname'\nfunc (d *Directory) RenameEntry(oldname, newname string) error {\n\td.Lock()\n\tdefer d.Unlock()\n\t\/\/ Is the child a directory?\n\tdir, err := d.childDir(oldname)\n\tif err == nil {\n\t\tdir.name = newname\n\n\t\terr := d.node.RemoveNodeLink(oldname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = d.node.AddNodeLinkClean(newname, dir.node)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdelete(d.childDirs, oldname)\n\t\td.childDirs[newname] = dir\n\t\treturn d.parent.closeChild(d.name, d.node)\n\t}\n\n\t\/\/ Is the child a file?\n\tfi, err := d.childFile(oldname)\n\tif err == nil {\n\t\tfi.name = newname\n\n\t\terr := d.node.RemoveNodeLink(oldname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tnd, err := fi.GetNode()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = d.node.AddNodeLinkClean(newname, nd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdelete(d.childDirs, oldname)\n\t\td.files[newname] = fi\n\t\treturn d.parent.closeChild(d.name, d.node)\n\t}\n\treturn ErrNoSuch\n}\n\n\/\/ AddChild adds the node 'nd' under this directory giving it the name 'name'\nfunc (d *Directory) AddChild(name string, nd *dag.Node) error {\n\td.Lock()\n\tdefer d.Unlock()\n\tpbn, err := ft.FromBytes(nd.Data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = d.childUnsync(name)\n\tif err == nil {\n\t\treturn errors.New(\"directory already has entry by that name\")\n\t}\n\n\terr = d.node.AddNodeLinkClean(name, nd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch pbn.GetType() {\n\tcase ft.TDirectory:\n\t\td.childDirs[name] = NewDirectory(name, nd, d, d.fs)\n\tcase ft.TFile, ft.TMetadata, ft.TRaw:\n\t\tnfi, err := NewFile(name, nd, d, d.fs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.files[name] = nfi\n\tdefault:\n\t\treturn ErrInvalidChild\n\t}\n\treturn d.parent.closeChild(d.name, d.node)\n}\n\nfunc (d *Directory) GetNode() (*dag.Node, error) {\n\treturn d.node, nil\n}\n\nfunc (d *Directory) Lock() {\n\td.lock.Lock()\n}\n\nfunc (d *Directory) Unlock() {\n\td.lock.Unlock()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The nvim-go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage config\n\nimport (\n\t\"github.com\/neovim\/go-client\/nvim\"\n)\n\n\/\/ Config represents a config variable for nvim-go.\n\/\/ Each type must be exported for plugin.HandleAutocmd Eval option.\n\/\/ Also it does not support embeded type.\ntype Config struct {\n\tGlobal *Global\n\n\tBuild *build\n\tCover *cover\n\tFmt *fmt\n\tGenerate *generate\n\tGuru *guru\n\tIferr *iferr\n\tLint *lint\n\tRename *rename\n\tTerminal *terminal\n\tTest *test\n\n\tDebug *debug\n}\n\n\/\/ Global represents a global config variable.\ntype Global struct {\n\tChannelID int\n\tServerName string `eval:\"v:servername\"`\n\tErrorListType string `eval:\"g:go#global#errorlisttype\"`\n}\n\n\/\/ build GoBuild command config variable.\ntype build struct {\n\tAppengine int64 `eval:\"g:go#build#appengine\"`\n\tAutosave int64 `eval:\"g:go#build#autosave\"`\n\tForce int64 `eval:\"g:go#build#force\"`\n\tFlags []string `eval:\"g:go#build#flags\"`\n}\n\ntype cover struct {\n\tFlags []string `eval:\"g:go#cover#flags\"`\n\tMode string `eval:\"g:go#cover#mode\"`\n}\n\n\/\/ fmt represents a GoFmt command config variable.\ntype fmt struct {\n\tAutosave int64 `eval:\"g:go#fmt#autosave\"`\n\tMode string `eval:\"g:go#fmt#mode\"`\n}\n\n\/\/ generate represents a GoGenerate command config variables.\ntype generate struct {\n\tTestAllFuncs int64 `eval:\"g:go#generate#test#allfuncs\"`\n\tTestExclFuncs string `eval:\"g:go#generate#test#exclude\"`\n\tTestExportedFuncs int64 `eval:\"g:go#generate#test#exportedfuncs\"`\n\tTestSubTest int64 `eval:\"g:go#generate#test#subtest\"`\n}\n\n\/\/ guru represents a GoGuru command config variable.\ntype guru struct {\n\tReflection int64 `eval:\"g:go#guru#reflection\"`\n\tKeepCursor map[string]int64 `eval:\"g:go#guru#keep_cursor\"`\n\tJumpFirst int64 `eval:\"g:go#guru#jump_first\"`\n}\n\n\/\/ iferr represents a GoIferr command config variable.\ntype iferr struct {\n\tAutosave int64 `eval:\"g:go#iferr#autosave\"`\n}\n\n\/\/ lint represents a code lint commands config variable.\ntype lint struct {\n\tGolintAutosave bool `eval:\"g:go#lint#golint#autosave\"`\n\tGolintIgnore []string `eval:\"g:go#lint#golint#ignore\"`\n\tGolintMinConfidence float64 `eval:\"g:go#lint#golint#min_confidence\"`\n\tGolintMode string `eval:\"g:go#lint#golint#mode\"`\n\tGoVetAutosave int64 `eval:\"g:go#lint#govet#autosave\"`\n\tGoVetFlags []string `eval:\"g:go#lint#govet#flags\"`\n\tGoVetIgnore []string `eval:\"g:go#lint#govet#ignore\"`\n\tMetalinterAutosave int64 `eval:\"g:go#lint#metalinter#autosave\"`\n\tMetalinterAutosaveTools []string `eval:\"g:go#lint#metalinter#autosave#tools\"`\n\tMetalinterTools []string `eval:\"g:go#lint#metalinter#tools\"`\n\tMetalinterDeadline string `eval:\"g:go#lint#metalinter#deadline\"`\n\tMetalinterSkipDir []string `eval:\"g:go#lint#metalinter#skip_dir\"`\n}\n\n\/\/ rename represents a GoRename command config variable.\ntype rename struct {\n\tPrefill int64 `eval:\"g:go#rename#prefill\"`\n}\n\n\/\/ terminal represents a configure of Neovim terminal buffer.\ntype terminal struct {\n\tMode string `eval:\"g:go#terminal#mode\"`\n\tPosition string `eval:\"g:go#terminal#position\"`\n\tHeight int64 `eval:\"g:go#terminal#height\"`\n\tWidth int64 `eval:\"g:go#terminal#width\"`\n\tStopInsert int64 `eval:\"g:go#terminal#stop_insert\"`\n}\n\n\/\/ Test represents a GoTest command config variables.\ntype test struct {\n\tAllPackage int64 `eval:\"g:go#test#all_package\"`\n\tAutosave int64 `eval:\"g:go#test#autosave\"`\n\tFlags []string `eval:\"g:go#test#flags\"`\n}\n\n\/\/ Debug represents a debug of nvim-go config variable.\ntype debug struct {\n\tEnable int64 `eval:\"g:go#debug\"`\n\tPprof int64 `eval:\"g:go#debug#pprof\"`\n}\n\nvar (\n\t\/\/ ChannelID remote plugins channel id.\n\tChannelID int\n\t\/\/ ServerName Neovim socket listen location.\n\tServerName string\n\t\/\/ ErrorListType type of error list window.\n\tErrorListType string\n\n\t\/\/ BuildAppengine enable appengine bulid.\n\tBuildAppengine bool\n\t\/\/ BuildAutosave call the GoBuild command automatically at during the BufWritePost.\n\tBuildAutosave bool\n\t\/\/ BuildForce builds the binary instead of fake(use ioutil.TempFiile) build.\n\tBuildForce bool\n\t\/\/ BuildFlags flag of compile tools build command.\n\tBuildFlags []string\n\n\t\/\/ CoverFlags flags for cover command.\n\tCoverFlags []string\n\t\/\/ CoverMode mode of cover command.\n\tCoverMode string\n\n\t\/\/ FmtAutosave call the GoFmt command automatically at during the BufWritePre.\n\tFmtAutosave bool\n\t\/\/ FmtMode formatting mode of Fmt command.\n\tFmtMode string\n\n\t\/\/ GenerateTestAllFuncs accept all functions to the GenerateTest.\n\tGenerateTestAllFuncs bool\n\t\/\/ GenerateTestExclFuncs exclude function of GenerateTest.\n\tGenerateTestExclFuncs string\n\t\/\/ GenerateTestExportedFuncs accept exported functions to the GenerateTest.\n\tGenerateTestExportedFuncs bool\n\t\/\/ GenerateTestSubTest whether the use Go subtest idiom or not.\n\tGenerateTestSubTest bool\n\n\t\/\/ GuruReflection use the type reflection on GoGuru commmands.\n\tGuruReflection bool\n\t\/\/ GuruKeepCursor keep the cursor focus to source buffer instead of quickfix or locationlist.\n\tGuruKeepCursor map[string]int64\n\t\/\/ GuruJumpFirst jump the first error position on GoGuru commands.\n\tGuruJumpFirst bool\n\n\t\/\/ IferrAutosave call the GoIferr command automatically at during the BufWritePre.\n\tIferrAutosave bool\n\n\t\/\/ GolintAutosave call the GoLint command automatically at during the BufWritePost.\n\tGolintAutosave bool\n\t\/\/ GolintIgnore ignore file for lint command.\n\tGolintIgnore []string\n\t\/\/ GolintMinConfidence minimum confidence of a problem to print it\n\tGolintMinConfidence float64\n\t\/\/ GolintMode mode of golint. available value are \"root\", \"current\" and \"recursive\".\n\tGolintMode string\n\t\/\/ GoVetAutosave call the GoVet command automatically at during the BufWritePost.\n\tGoVetAutosave bool\n\t\/\/ GoVetFlags default flags for GoVet commands\n\tGoVetFlags []string\n\t\/\/ GoVetIgnore ignore directories for go vet command.\n\tGoVetIgnore []string\n\t\/\/ MetalinterAutosave call the GoMetaLinter command automatically at during the BufWritePre.\n\tMetalinterAutosave bool\n\t\/\/ MetalinterAutosaveTools lint tool list for MetalinterAutosave.\n\tMetalinterAutosaveTools []string\n\t\/\/ MetalinterTools lint tool list for GoMetaLinter command.\n\tMetalinterTools []string\n\t\/\/ MetalinterDeadline deadline of GoMetaLinter command timeout.\n\tMetalinterDeadline string\n\t\/\/ MetalinterSkipDir skips of lint of the directory.\n\tMetalinterSkipDir []string\n\n\t\/\/ RenamePrefill Enable naming prefill.\n\tRenamePrefill bool\n\n\t\/\/ TerminalMode open the terminal window mode.\n\tTerminalMode string\n\t\/\/ TerminalPosition open the terminal window position.\n\tTerminalPosition string\n\t\/\/ TerminalHeight open the terminal window height.\n\tTerminalHeight int64\n\t\/\/ TerminalWidth open the terminal window width.\n\tTerminalWidth int64\n\t\/\/ TerminalStopInsert workaround if users set \"autocmd BufEnter term:\/\/* startinsert\".\n\tTerminalStopInsert bool\n\n\t\/\/ TestAutosave call the GoBuild command automatically at during the BufWritePost.\n\tTestAutosave bool\n\t\/\/ TestAll enable all package test on GoTest. similar \"go test .\/...\", but ignored vendor and testdata.\n\tTestAll bool\n\t\/\/ TestFlags test command default flags.\n\tTestFlags []string\n\n\t\/\/ DebugEnable Enable debugging.\n\tDebugEnable bool\n\t\/\/ DebugPprof Enable net\/http\/pprof debugging.\n\tDebugPprof bool\n)\n\n\/\/ Get gets the user config variables and convert to global varialble.\nfunc Get(v *nvim.Nvim, cfg *Config) {\n\t\/\/ Client\n\tChannelID = cfg.Global.ChannelID\n\tServerName = cfg.Global.ServerName\n\tErrorListType = cfg.Global.ErrorListType\n\n\t\/\/ Build\n\tBuildAppengine = itob(cfg.Build.Appengine)\n\tBuildAutosave = itob(cfg.Build.Autosave)\n\tBuildForce = itob(cfg.Build.Force)\n\tBuildFlags = cfg.Build.Flags\n\n\t\/\/ Cover\n\tCoverFlags = cfg.Cover.Flags\n\tCoverMode = cfg.Cover.Mode\n\n\t\/\/ Fmt\n\tFmtAutosave = itob(cfg.Fmt.Autosave)\n\tFmtMode = cfg.Fmt.Mode\n\n\t\/\/ Generate\n\tGenerateTestAllFuncs = itob(cfg.Generate.TestAllFuncs)\n\tGenerateTestExclFuncs = cfg.Generate.TestExclFuncs\n\tGenerateTestExportedFuncs = itob(cfg.Generate.TestExportedFuncs)\n\tGenerateTestSubTest = itob(cfg.Generate.TestSubTest)\n\n\t\/\/ Guru\n\tGuruReflection = itob(cfg.Guru.Reflection)\n\tGuruKeepCursor = cfg.Guru.KeepCursor\n\tGuruJumpFirst = itob(cfg.Guru.JumpFirst)\n\n\t\/\/ Iferr\n\tIferrAutosave = itob(cfg.Iferr.Autosave)\n\n\t\/\/ Lint\n\tGolintAutosave = cfg.Lint.GolintAutosave\n\tGolintIgnore = cfg.Lint.GolintIgnore\n\tGolintMinConfidence = cfg.Lint.GolintMinConfidence\n\tGolintMode = cfg.Lint.GolintMode\n\tGoVetAutosave = itob(cfg.Lint.GoVetAutosave)\n\tGoVetFlags = cfg.Lint.GoVetFlags\n\tGoVetIgnore = cfg.Lint.GoVetIgnore\n\tMetalinterAutosave = itob(cfg.Lint.MetalinterAutosave)\n\tMetalinterAutosaveTools = cfg.Lint.MetalinterAutosaveTools\n\tMetalinterTools = cfg.Lint.MetalinterTools\n\tMetalinterDeadline = cfg.Lint.MetalinterDeadline\n\tMetalinterSkipDir = cfg.Lint.MetalinterSkipDir\n\n\t\/\/ Rename\n\tRenamePrefill = itob(cfg.Rename.Prefill)\n\n\t\/\/ Terminal\n\tTerminalMode = cfg.Terminal.Mode\n\tTerminalPosition = cfg.Terminal.Position\n\tTerminalHeight = cfg.Terminal.Height\n\tTerminalWidth = cfg.Terminal.Width\n\tTerminalStopInsert = itob(cfg.Terminal.StopInsert)\n\n\t\/\/ Test\n\tTestAutosave = itob(cfg.Test.Autosave)\n\tTestAll = itob(cfg.Test.AllPackage)\n\tTestFlags = cfg.Test.Flags\n\n\t\/\/ Debug\n\tDebugEnable = itob(cfg.Debug.Enable)\n\tDebugPprof = itob(cfg.Debug.Pprof)\n}\n\nfunc itob(i int64) bool { return i != int64(0) }\n<commit_msg>config: fix GolintAutosave type to int64<commit_after>\/\/ Copyright 2016 The nvim-go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage config\n\nimport (\n\t\"github.com\/neovim\/go-client\/nvim\"\n)\n\n\/\/ Config represents a config variable for nvim-go.\n\/\/ Each type must be exported for plugin.HandleAutocmd Eval option.\n\/\/ Also it does not support embeded type.\ntype Config struct {\n\tGlobal *Global\n\n\tBuild *build\n\tCover *cover\n\tFmt *fmt\n\tGenerate *generate\n\tGuru *guru\n\tIferr *iferr\n\tLint *lint\n\tRename *rename\n\tTerminal *terminal\n\tTest *test\n\n\tDebug *debug\n}\n\n\/\/ Global represents a global config variable.\ntype Global struct {\n\tChannelID int\n\tServerName string `eval:\"v:servername\"`\n\tErrorListType string `eval:\"g:go#global#errorlisttype\"`\n}\n\n\/\/ build GoBuild command config variable.\ntype build struct {\n\tAppengine int64 `eval:\"g:go#build#appengine\"`\n\tAutosave int64 `eval:\"g:go#build#autosave\"`\n\tForce int64 `eval:\"g:go#build#force\"`\n\tFlags []string `eval:\"g:go#build#flags\"`\n}\n\ntype cover struct {\n\tFlags []string `eval:\"g:go#cover#flags\"`\n\tMode string `eval:\"g:go#cover#mode\"`\n}\n\n\/\/ fmt represents a GoFmt command config variable.\ntype fmt struct {\n\tAutosave int64 `eval:\"g:go#fmt#autosave\"`\n\tMode string `eval:\"g:go#fmt#mode\"`\n}\n\n\/\/ generate represents a GoGenerate command config variables.\ntype generate struct {\n\tTestAllFuncs int64 `eval:\"g:go#generate#test#allfuncs\"`\n\tTestExclFuncs string `eval:\"g:go#generate#test#exclude\"`\n\tTestExportedFuncs int64 `eval:\"g:go#generate#test#exportedfuncs\"`\n\tTestSubTest int64 `eval:\"g:go#generate#test#subtest\"`\n}\n\n\/\/ guru represents a GoGuru command config variable.\ntype guru struct {\n\tReflection int64 `eval:\"g:go#guru#reflection\"`\n\tKeepCursor map[string]int64 `eval:\"g:go#guru#keep_cursor\"`\n\tJumpFirst int64 `eval:\"g:go#guru#jump_first\"`\n}\n\n\/\/ iferr represents a GoIferr command config variable.\ntype iferr struct {\n\tAutosave int64 `eval:\"g:go#iferr#autosave\"`\n}\n\n\/\/ lint represents a code lint commands config variable.\ntype lint struct {\n\tGolintAutosave int64 `eval:\"g:go#lint#golint#autosave\"`\n\tGolintIgnore []string `eval:\"g:go#lint#golint#ignore\"`\n\tGolintMinConfidence float64 `eval:\"g:go#lint#golint#min_confidence\"`\n\tGolintMode string `eval:\"g:go#lint#golint#mode\"`\n\tGoVetAutosave int64 `eval:\"g:go#lint#govet#autosave\"`\n\tGoVetFlags []string `eval:\"g:go#lint#govet#flags\"`\n\tGoVetIgnore []string `eval:\"g:go#lint#govet#ignore\"`\n\tMetalinterAutosave int64 `eval:\"g:go#lint#metalinter#autosave\"`\n\tMetalinterAutosaveTools []string `eval:\"g:go#lint#metalinter#autosave#tools\"`\n\tMetalinterTools []string `eval:\"g:go#lint#metalinter#tools\"`\n\tMetalinterDeadline string `eval:\"g:go#lint#metalinter#deadline\"`\n\tMetalinterSkipDir []string `eval:\"g:go#lint#metalinter#skip_dir\"`\n}\n\n\/\/ rename represents a GoRename command config variable.\ntype rename struct {\n\tPrefill int64 `eval:\"g:go#rename#prefill\"`\n}\n\n\/\/ terminal represents a configure of Neovim terminal buffer.\ntype terminal struct {\n\tMode string `eval:\"g:go#terminal#mode\"`\n\tPosition string `eval:\"g:go#terminal#position\"`\n\tHeight int64 `eval:\"g:go#terminal#height\"`\n\tWidth int64 `eval:\"g:go#terminal#width\"`\n\tStopInsert int64 `eval:\"g:go#terminal#stop_insert\"`\n}\n\n\/\/ Test represents a GoTest command config variables.\ntype test struct {\n\tAllPackage int64 `eval:\"g:go#test#all_package\"`\n\tAutosave int64 `eval:\"g:go#test#autosave\"`\n\tFlags []string `eval:\"g:go#test#flags\"`\n}\n\n\/\/ Debug represents a debug of nvim-go config variable.\ntype debug struct {\n\tEnable int64 `eval:\"g:go#debug\"`\n\tPprof int64 `eval:\"g:go#debug#pprof\"`\n}\n\nvar (\n\t\/\/ ChannelID remote plugins channel id.\n\tChannelID int\n\t\/\/ ServerName Neovim socket listen location.\n\tServerName string\n\t\/\/ ErrorListType type of error list window.\n\tErrorListType string\n\n\t\/\/ BuildAppengine enable appengine bulid.\n\tBuildAppengine bool\n\t\/\/ BuildAutosave call the GoBuild command automatically at during the BufWritePost.\n\tBuildAutosave bool\n\t\/\/ BuildForce builds the binary instead of fake(use ioutil.TempFiile) build.\n\tBuildForce bool\n\t\/\/ BuildFlags flag of compile tools build command.\n\tBuildFlags []string\n\n\t\/\/ CoverFlags flags for cover command.\n\tCoverFlags []string\n\t\/\/ CoverMode mode of cover command.\n\tCoverMode string\n\n\t\/\/ FmtAutosave call the GoFmt command automatically at during the BufWritePre.\n\tFmtAutosave bool\n\t\/\/ FmtMode formatting mode of Fmt command.\n\tFmtMode string\n\n\t\/\/ GenerateTestAllFuncs accept all functions to the GenerateTest.\n\tGenerateTestAllFuncs bool\n\t\/\/ GenerateTestExclFuncs exclude function of GenerateTest.\n\tGenerateTestExclFuncs string\n\t\/\/ GenerateTestExportedFuncs accept exported functions to the GenerateTest.\n\tGenerateTestExportedFuncs bool\n\t\/\/ GenerateTestSubTest whether the use Go subtest idiom or not.\n\tGenerateTestSubTest bool\n\n\t\/\/ GuruReflection use the type reflection on GoGuru commmands.\n\tGuruReflection bool\n\t\/\/ GuruKeepCursor keep the cursor focus to source buffer instead of quickfix or locationlist.\n\tGuruKeepCursor map[string]int64\n\t\/\/ GuruJumpFirst jump the first error position on GoGuru commands.\n\tGuruJumpFirst bool\n\n\t\/\/ IferrAutosave call the GoIferr command automatically at during the BufWritePre.\n\tIferrAutosave bool\n\n\t\/\/ GolintAutosave call the GoLint command automatically at during the BufWritePost.\n\tGolintAutosave bool\n\t\/\/ GolintIgnore ignore file for lint command.\n\tGolintIgnore []string\n\t\/\/ GolintMinConfidence minimum confidence of a problem to print it\n\tGolintMinConfidence float64\n\t\/\/ GolintMode mode of golint. available value are \"root\", \"current\" and \"recursive\".\n\tGolintMode string\n\t\/\/ GoVetAutosave call the GoVet command automatically at during the BufWritePost.\n\tGoVetAutosave bool\n\t\/\/ GoVetFlags default flags for GoVet commands\n\tGoVetFlags []string\n\t\/\/ GoVetIgnore ignore directories for go vet command.\n\tGoVetIgnore []string\n\t\/\/ MetalinterAutosave call the GoMetaLinter command automatically at during the BufWritePre.\n\tMetalinterAutosave bool\n\t\/\/ MetalinterAutosaveTools lint tool list for MetalinterAutosave.\n\tMetalinterAutosaveTools []string\n\t\/\/ MetalinterTools lint tool list for GoMetaLinter command.\n\tMetalinterTools []string\n\t\/\/ MetalinterDeadline deadline of GoMetaLinter command timeout.\n\tMetalinterDeadline string\n\t\/\/ MetalinterSkipDir skips of lint of the directory.\n\tMetalinterSkipDir []string\n\n\t\/\/ RenamePrefill Enable naming prefill.\n\tRenamePrefill bool\n\n\t\/\/ TerminalMode open the terminal window mode.\n\tTerminalMode string\n\t\/\/ TerminalPosition open the terminal window position.\n\tTerminalPosition string\n\t\/\/ TerminalHeight open the terminal window height.\n\tTerminalHeight int64\n\t\/\/ TerminalWidth open the terminal window width.\n\tTerminalWidth int64\n\t\/\/ TerminalStopInsert workaround if users set \"autocmd BufEnter term:\/\/* startinsert\".\n\tTerminalStopInsert bool\n\n\t\/\/ TestAutosave call the GoBuild command automatically at during the BufWritePost.\n\tTestAutosave bool\n\t\/\/ TestAll enable all package test on GoTest. similar \"go test .\/...\", but ignored vendor and testdata.\n\tTestAll bool\n\t\/\/ TestFlags test command default flags.\n\tTestFlags []string\n\n\t\/\/ DebugEnable Enable debugging.\n\tDebugEnable bool\n\t\/\/ DebugPprof Enable net\/http\/pprof debugging.\n\tDebugPprof bool\n)\n\n\/\/ Get gets the user config variables and convert to global varialble.\nfunc Get(v *nvim.Nvim, cfg *Config) {\n\t\/\/ Client\n\tChannelID = cfg.Global.ChannelID\n\tServerName = cfg.Global.ServerName\n\tErrorListType = cfg.Global.ErrorListType\n\n\t\/\/ Build\n\tBuildAppengine = itob(cfg.Build.Appengine)\n\tBuildAutosave = itob(cfg.Build.Autosave)\n\tBuildForce = itob(cfg.Build.Force)\n\tBuildFlags = cfg.Build.Flags\n\n\t\/\/ Cover\n\tCoverFlags = cfg.Cover.Flags\n\tCoverMode = cfg.Cover.Mode\n\n\t\/\/ Fmt\n\tFmtAutosave = itob(cfg.Fmt.Autosave)\n\tFmtMode = cfg.Fmt.Mode\n\n\t\/\/ Generate\n\tGenerateTestAllFuncs = itob(cfg.Generate.TestAllFuncs)\n\tGenerateTestExclFuncs = cfg.Generate.TestExclFuncs\n\tGenerateTestExportedFuncs = itob(cfg.Generate.TestExportedFuncs)\n\tGenerateTestSubTest = itob(cfg.Generate.TestSubTest)\n\n\t\/\/ Guru\n\tGuruReflection = itob(cfg.Guru.Reflection)\n\tGuruKeepCursor = cfg.Guru.KeepCursor\n\tGuruJumpFirst = itob(cfg.Guru.JumpFirst)\n\n\t\/\/ Iferr\n\tIferrAutosave = itob(cfg.Iferr.Autosave)\n\n\t\/\/ Lint\n\tGolintAutosave = itob(cfg.Lint.GolintAutosave)\n\tGolintIgnore = cfg.Lint.GolintIgnore\n\tGolintMinConfidence = cfg.Lint.GolintMinConfidence\n\tGolintMode = cfg.Lint.GolintMode\n\tGoVetAutosave = itob(cfg.Lint.GoVetAutosave)\n\tGoVetFlags = cfg.Lint.GoVetFlags\n\tGoVetIgnore = cfg.Lint.GoVetIgnore\n\tMetalinterAutosave = itob(cfg.Lint.MetalinterAutosave)\n\tMetalinterAutosaveTools = cfg.Lint.MetalinterAutosaveTools\n\tMetalinterTools = cfg.Lint.MetalinterTools\n\tMetalinterDeadline = cfg.Lint.MetalinterDeadline\n\tMetalinterSkipDir = cfg.Lint.MetalinterSkipDir\n\n\t\/\/ Rename\n\tRenamePrefill = itob(cfg.Rename.Prefill)\n\n\t\/\/ Terminal\n\tTerminalMode = cfg.Terminal.Mode\n\tTerminalPosition = cfg.Terminal.Position\n\tTerminalHeight = cfg.Terminal.Height\n\tTerminalWidth = cfg.Terminal.Width\n\tTerminalStopInsert = itob(cfg.Terminal.StopInsert)\n\n\t\/\/ Test\n\tTestAutosave = itob(cfg.Test.Autosave)\n\tTestAll = itob(cfg.Test.AllPackage)\n\tTestFlags = cfg.Test.Flags\n\n\t\/\/ Debug\n\tDebugEnable = itob(cfg.Debug.Enable)\n\tDebugPprof = itob(cfg.Debug.Pprof)\n}\n\nfunc itob(i int64) bool { return i != int64(0) }\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package utf8string provides an efficient way to index strings by rune rather than by byte.\npackage utf8string\n\nimport (\n\t\"errors\"\n\t\"unicode\/utf8\"\n)\n\n\/\/ String wraps a regular string with a small structure that provides more\n\/\/ efficient indexing by code point index, as opposed to byte index.\n\/\/ Scanning incrementally forwards or backwards is O(1) per index operation\n\/\/ (although not as fast a range clause going forwards). Random access is\n\/\/ O(N) in the length of the string, but the overhead is less than always\n\/\/ scanning from the beginning.\n\/\/ If the string is ASCII, random access is O(1).\n\/\/ Unlike the built-in string type, String has internal mutable state and\n\/\/ is not thread-safe.\ntype String struct {\n\tstr string\n\tnumRunes int\n\t\/\/ If width > 0, the rune at runePos starts at bytePos and has the specified width.\n\twidth int\n\tbytePos int\n\trunePos int\n\tnonASCII int \/\/ byte index of the first non-ASCII rune.\n}\n\n\/\/ NewString returns a new UTF-8 string with the provided contents.\nfunc NewString(contents string) *String {\n\treturn new(String).Init(contents)\n}\n\n\/\/ Init initializes an existing String to hold the provided contents.\n\/\/ It returns a pointer to the initialized String.\nfunc (s *String) Init(contents string) *String {\n\ts.str = contents\n\ts.bytePos = 0\n\ts.runePos = 0\n\tfor i := 0; i < len(contents); i++ {\n\t\tif contents[i] >= utf8.RuneSelf {\n\t\t\t\/\/ Not ASCII.\n\t\t\ts.numRunes = utf8.RuneCountInString(contents)\n\t\t\t_, s.width = utf8.DecodeRuneInString(contents)\n\t\t\ts.nonASCII = i\n\t\t\treturn s\n\t\t}\n\t}\n\t\/\/ ASCII is simple. Also, the empty string is ASCII.\n\ts.numRunes = len(contents)\n\ts.width = 0\n\ts.nonASCII = len(contents)\n\treturn s\n}\n\n\/\/ String returns the contents of the String. This method also means the\n\/\/ String is directly printable by fmt.Print.\nfunc (s *String) String() string {\n\treturn s.str\n}\n\n\/\/ RuneCount returns the number of runes (Unicode code points) in the String.\nfunc (s *String) RuneCount() int {\n\treturn s.numRunes\n}\n\n\/\/ IsASCII returns a boolean indicating whether the String contains only ASCII bytes.\nfunc (s *String) IsASCII() bool {\n\treturn s.width == 0\n}\n\n\/\/ Slice returns the string sliced at rune positions [i:j].\nfunc (s *String) Slice(i, j int) string {\n\t\/\/ ASCII is easy. Let the compiler catch the indexing error if there is one.\n\tif j < s.nonASCII {\n\t\treturn s.str[i:j]\n\t}\n\tif i < 0 || j > s.numRunes || i > j {\n\t\tpanic(sliceOutOfRange)\n\t}\n\tif i == j {\n\t\treturn \"\"\n\t}\n\t\/\/ For non-ASCII, after At(i), bytePos is always the position of the indexed character.\n\tvar low, high int\n\tswitch {\n\tcase i < s.nonASCII:\n\t\tlow = i\n\tcase i == s.numRunes:\n\t\tlow = len(s.str)\n\tdefault:\n\t\ts.At(i)\n\t\tlow = s.bytePos\n\t}\n\tswitch {\n\tcase j == s.numRunes:\n\t\thigh = len(s.str)\n\tdefault:\n\t\ts.At(j)\n\t\thigh = s.bytePos\n\t}\n\treturn s.str[low:high]\n}\n\n\/\/ At returns the rune with index i in the String. The sequence of runes is the same\n\/\/ as iterating over the contents with a \"for range\" clause.\nfunc (s *String) At(i int) rune {\n\t\/\/ ASCII is easy. Let the compiler catch the indexing error if there is one.\n\tif i < s.nonASCII {\n\t\treturn rune(s.str[i])\n\t}\n\n\t\/\/ Now we do need to know the index is valid.\n\tif i < 0 || i >= s.numRunes {\n\t\tpanic(outOfRange)\n\t}\n\n\tvar r rune\n\n\t\/\/ Five easy common cases: within 1 spot of bytePos\/runePos, or the beginning, or the end.\n\t\/\/ With these cases, all scans from beginning or end work in O(1) time per rune.\n\tswitch {\n\n\tcase i == s.runePos-1: \/\/ backing up one rune\n\t\tr, s.width = utf8.DecodeLastRuneInString(s.str[0:s.bytePos])\n\t\ts.runePos = i\n\t\ts.bytePos -= s.width\n\t\treturn r\n\tcase i == s.runePos+1: \/\/ moving ahead one rune\n\t\ts.runePos = i\n\t\ts.bytePos += s.width\n\t\tfallthrough\n\tcase i == s.runePos:\n\t\tr, s.width = utf8.DecodeRuneInString(s.str[s.bytePos:])\n\t\treturn r\n\tcase i == 0: \/\/ start of string\n\t\tr, s.width = utf8.DecodeRuneInString(s.str)\n\t\ts.runePos = 0\n\t\ts.bytePos = 0\n\t\treturn r\n\n\tcase i == s.numRunes-1: \/\/ last rune in string\n\t\tr, s.width = utf8.DecodeLastRuneInString(s.str)\n\t\ts.runePos = i\n\t\ts.bytePos = len(s.str) - s.width\n\t\treturn r\n\t}\n\n\t\/\/ We need to do a linear scan. There are three places to start from:\n\t\/\/ 1) The beginning\n\t\/\/ 2) bytePos\/runePos.\n\t\/\/ 3) The end\n\t\/\/ Choose the closest in rune count, scanning backwards if necessary.\n\tforward := true\n\tif i < s.runePos {\n\t\t\/\/ Between beginning and pos. Which is closer?\n\t\t\/\/ Since both i and runePos are guaranteed >= nonASCII, that's the\n\t\t\/\/ lowest location we need to start from.\n\t\tif i < (s.runePos-s.nonASCII)\/2 {\n\t\t\t\/\/ Scan forward from beginning\n\t\t\ts.bytePos, s.runePos = s.nonASCII, s.nonASCII\n\t\t} else {\n\t\t\t\/\/ Scan backwards from where we are\n\t\t\tforward = false\n\t\t}\n\t} else {\n\t\t\/\/ Between pos and end. Which is closer?\n\t\tif i-s.runePos < (s.numRunes-s.runePos)\/2 {\n\t\t\t\/\/ Scan forward from pos\n\t\t} else {\n\t\t\t\/\/ Scan backwards from end\n\t\t\ts.bytePos, s.runePos = len(s.str), s.numRunes\n\t\t\tforward = false\n\t\t}\n\t}\n\tif forward {\n\t\t\/\/ TODO: Is it much faster to use a range loop for this scan?\n\t\tfor {\n\t\t\tr, s.width = utf8.DecodeRuneInString(s.str[s.bytePos:])\n\t\t\tif s.runePos == i {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ts.runePos++\n\t\t\ts.bytePos += s.width\n\t\t}\n\t} else {\n\t\tfor {\n\t\t\tr, s.width = utf8.DecodeLastRuneInString(s.str[0:s.bytePos])\n\t\t\ts.runePos--\n\t\t\ts.bytePos -= s.width\n\t\t\tif s.runePos == i {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn r\n}\n\nvar outOfRange = errors.New(\"utf8.String: index out of range\")\nvar sliceOutOfRange = errors.New(\"utf8.String: slice index out of range\")\n<commit_msg>exp\/utf8string: Correct package name in error messages<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package utf8string provides an efficient way to index strings by rune rather than by byte.\npackage utf8string\n\nimport (\n\t\"errors\"\n\t\"unicode\/utf8\"\n)\n\n\/\/ String wraps a regular string with a small structure that provides more\n\/\/ efficient indexing by code point index, as opposed to byte index.\n\/\/ Scanning incrementally forwards or backwards is O(1) per index operation\n\/\/ (although not as fast a range clause going forwards). Random access is\n\/\/ O(N) in the length of the string, but the overhead is less than always\n\/\/ scanning from the beginning.\n\/\/ If the string is ASCII, random access is O(1).\n\/\/ Unlike the built-in string type, String has internal mutable state and\n\/\/ is not thread-safe.\ntype String struct {\n\tstr string\n\tnumRunes int\n\t\/\/ If width > 0, the rune at runePos starts at bytePos and has the specified width.\n\twidth int\n\tbytePos int\n\trunePos int\n\tnonASCII int \/\/ byte index of the first non-ASCII rune.\n}\n\n\/\/ NewString returns a new UTF-8 string with the provided contents.\nfunc NewString(contents string) *String {\n\treturn new(String).Init(contents)\n}\n\n\/\/ Init initializes an existing String to hold the provided contents.\n\/\/ It returns a pointer to the initialized String.\nfunc (s *String) Init(contents string) *String {\n\ts.str = contents\n\ts.bytePos = 0\n\ts.runePos = 0\n\tfor i := 0; i < len(contents); i++ {\n\t\tif contents[i] >= utf8.RuneSelf {\n\t\t\t\/\/ Not ASCII.\n\t\t\ts.numRunes = utf8.RuneCountInString(contents)\n\t\t\t_, s.width = utf8.DecodeRuneInString(contents)\n\t\t\ts.nonASCII = i\n\t\t\treturn s\n\t\t}\n\t}\n\t\/\/ ASCII is simple. Also, the empty string is ASCII.\n\ts.numRunes = len(contents)\n\ts.width = 0\n\ts.nonASCII = len(contents)\n\treturn s\n}\n\n\/\/ String returns the contents of the String. This method also means the\n\/\/ String is directly printable by fmt.Print.\nfunc (s *String) String() string {\n\treturn s.str\n}\n\n\/\/ RuneCount returns the number of runes (Unicode code points) in the String.\nfunc (s *String) RuneCount() int {\n\treturn s.numRunes\n}\n\n\/\/ IsASCII returns a boolean indicating whether the String contains only ASCII bytes.\nfunc (s *String) IsASCII() bool {\n\treturn s.width == 0\n}\n\n\/\/ Slice returns the string sliced at rune positions [i:j].\nfunc (s *String) Slice(i, j int) string {\n\t\/\/ ASCII is easy. Let the compiler catch the indexing error if there is one.\n\tif j < s.nonASCII {\n\t\treturn s.str[i:j]\n\t}\n\tif i < 0 || j > s.numRunes || i > j {\n\t\tpanic(sliceOutOfRange)\n\t}\n\tif i == j {\n\t\treturn \"\"\n\t}\n\t\/\/ For non-ASCII, after At(i), bytePos is always the position of the indexed character.\n\tvar low, high int\n\tswitch {\n\tcase i < s.nonASCII:\n\t\tlow = i\n\tcase i == s.numRunes:\n\t\tlow = len(s.str)\n\tdefault:\n\t\ts.At(i)\n\t\tlow = s.bytePos\n\t}\n\tswitch {\n\tcase j == s.numRunes:\n\t\thigh = len(s.str)\n\tdefault:\n\t\ts.At(j)\n\t\thigh = s.bytePos\n\t}\n\treturn s.str[low:high]\n}\n\n\/\/ At returns the rune with index i in the String. The sequence of runes is the same\n\/\/ as iterating over the contents with a \"for range\" clause.\nfunc (s *String) At(i int) rune {\n\t\/\/ ASCII is easy. Let the compiler catch the indexing error if there is one.\n\tif i < s.nonASCII {\n\t\treturn rune(s.str[i])\n\t}\n\n\t\/\/ Now we do need to know the index is valid.\n\tif i < 0 || i >= s.numRunes {\n\t\tpanic(outOfRange)\n\t}\n\n\tvar r rune\n\n\t\/\/ Five easy common cases: within 1 spot of bytePos\/runePos, or the beginning, or the end.\n\t\/\/ With these cases, all scans from beginning or end work in O(1) time per rune.\n\tswitch {\n\n\tcase i == s.runePos-1: \/\/ backing up one rune\n\t\tr, s.width = utf8.DecodeLastRuneInString(s.str[0:s.bytePos])\n\t\ts.runePos = i\n\t\ts.bytePos -= s.width\n\t\treturn r\n\tcase i == s.runePos+1: \/\/ moving ahead one rune\n\t\ts.runePos = i\n\t\ts.bytePos += s.width\n\t\tfallthrough\n\tcase i == s.runePos:\n\t\tr, s.width = utf8.DecodeRuneInString(s.str[s.bytePos:])\n\t\treturn r\n\tcase i == 0: \/\/ start of string\n\t\tr, s.width = utf8.DecodeRuneInString(s.str)\n\t\ts.runePos = 0\n\t\ts.bytePos = 0\n\t\treturn r\n\n\tcase i == s.numRunes-1: \/\/ last rune in string\n\t\tr, s.width = utf8.DecodeLastRuneInString(s.str)\n\t\ts.runePos = i\n\t\ts.bytePos = len(s.str) - s.width\n\t\treturn r\n\t}\n\n\t\/\/ We need to do a linear scan. There are three places to start from:\n\t\/\/ 1) The beginning\n\t\/\/ 2) bytePos\/runePos.\n\t\/\/ 3) The end\n\t\/\/ Choose the closest in rune count, scanning backwards if necessary.\n\tforward := true\n\tif i < s.runePos {\n\t\t\/\/ Between beginning and pos. Which is closer?\n\t\t\/\/ Since both i and runePos are guaranteed >= nonASCII, that's the\n\t\t\/\/ lowest location we need to start from.\n\t\tif i < (s.runePos-s.nonASCII)\/2 {\n\t\t\t\/\/ Scan forward from beginning\n\t\t\ts.bytePos, s.runePos = s.nonASCII, s.nonASCII\n\t\t} else {\n\t\t\t\/\/ Scan backwards from where we are\n\t\t\tforward = false\n\t\t}\n\t} else {\n\t\t\/\/ Between pos and end. Which is closer?\n\t\tif i-s.runePos < (s.numRunes-s.runePos)\/2 {\n\t\t\t\/\/ Scan forward from pos\n\t\t} else {\n\t\t\t\/\/ Scan backwards from end\n\t\t\ts.bytePos, s.runePos = len(s.str), s.numRunes\n\t\t\tforward = false\n\t\t}\n\t}\n\tif forward {\n\t\t\/\/ TODO: Is it much faster to use a range loop for this scan?\n\t\tfor {\n\t\t\tr, s.width = utf8.DecodeRuneInString(s.str[s.bytePos:])\n\t\t\tif s.runePos == i {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ts.runePos++\n\t\t\ts.bytePos += s.width\n\t\t}\n\t} else {\n\t\tfor {\n\t\t\tr, s.width = utf8.DecodeLastRuneInString(s.str[0:s.bytePos])\n\t\t\ts.runePos--\n\t\t\ts.bytePos -= s.width\n\t\t\tif s.runePos == i {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn r\n}\n\nvar outOfRange = errors.New(\"utf8string: index out of range\")\nvar sliceOutOfRange = errors.New(\"utf8string: slice index out of range\")\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage template\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\n\/\/ Strings of content from a trusted source.\ntype (\n\t\/\/ CSS encapsulates known safe content that matches any of:\n\t\/\/ 1. The CSS3 stylesheet production, such as `p { color: purple }`.\n\t\/\/ 2. The CSS3 rule production, such as `a[href=~\"https:\"].foo#bar`.\n\t\/\/ 3. CSS3 declaration productions, such as `color: red; margin: 2px`.\n\t\/\/ 4. The CSS3 value production, such as `rgba(0, 0, 255, 127)`.\n\t\/\/ See http:\/\/www.w3.org\/TR\/css3-syntax\/#style\n\tCSS string\n\n\t\/\/ HTML encapsulates a known safe HTML document fragment.\n\t\/\/ It should not be used for HTML from a third-party, or HTML with\n\t\/\/ unclosed tags or comments. The outputs of a sound HTML sanitizer\n\t\/\/ and a template escaped by this package are fine for use with HTML.\n\tHTML string\n\n\t\/\/ HTMLAttr encapsulates an HTML attribute from a trusted source,\n\t\/\/ for example, ` dir=\"ltr\"`.\n\tHTMLAttr string\n\n\t\/\/ JS encapsulates a known safe EcmaScript5 Expression, for example,\n\t\/\/ `(x + y * z())`. \n\t\/\/ Template authors are responsible for ensuring that typed expressions\n\t\/\/ do not break the intended precedence and that there is no\n\t\/\/ statement\/expression ambiguity as when passing an expression like\n\t\/\/ \"{ foo: bar() }\\n['foo']()\", which is both a valid Expression and a\n\t\/\/ valid Program with a very different meaning.\n\tJS string\n\n\t\/\/ JSStr encapsulates a sequence of characters meant to be embedded\n\t\/\/ between quotes in a JavaScript expression.\n\t\/\/ The string must match a series of StringCharacters:\n\t\/\/ StringCharacter :: SourceCharacter but not `\\` or LineTerminator\n\t\/\/ | EscapeSequence\n\t\/\/ Note that LineContinuations are not allowed.\n\t\/\/ JSStr(\"foo\\\\nbar\") is fine, but JSStr(\"foo\\\\\\nbar\") is not.\n\tJSStr string\n\n\t\/\/ URL encapsulates a known safe URL as defined in RFC 3896.\n\t\/\/ A URL like `javascript:checkThatFormNotEditedBeforeLeavingPage()`\n\t\/\/ from a trusted source should go in the page, but by default dynamic\n\t\/\/ `javascript:` URLs are filtered out since they are a frequently\n\t\/\/ exploited injection vector.\n\tURL string\n)\n\ntype contentType uint8\n\nconst (\n\tcontentTypePlain contentType = iota\n\tcontentTypeCSS\n\tcontentTypeHTML\n\tcontentTypeHTMLAttr\n\tcontentTypeJS\n\tcontentTypeJSStr\n\tcontentTypeURL\n\t\/\/ contentTypeUnsafe is used in attr.go for values that affect how\n\t\/\/ embedded content and network messages are formed, vetted,\n\t\/\/ or interpreted; or which credentials network messages carry.\n\tcontentTypeUnsafe\n)\n\n\/\/ indirect returns the value, after dereferencing as many times\n\/\/ as necessary to reach the base type (or nil).\nfunc indirect(a interface{}) interface{} {\n\tif t := reflect.TypeOf(a); t.Kind() != reflect.Ptr {\n\t\t\/\/ Avoid creating a reflect.Value if it's not a pointer.\n\t\treturn a\n\t}\n\tv := reflect.ValueOf(a)\n\tfor v.Kind() == reflect.Ptr && !v.IsNil() {\n\t\tv = v.Elem()\n\t}\n\treturn v.Interface()\n}\n\nvar (\n\terrorType = reflect.TypeOf((*error)(nil)).Elem()\n\tfmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()\n)\n\n\/\/ indirectToStringerOrError returns the value, after dereferencing as many times\n\/\/ as necessary to reach the base type (or nil) or an implementation of fmt.Stringer\n\/\/ or error,\nfunc indirectToStringerOrError(a interface{}) interface{} {\n\tv := reflect.ValueOf(a)\n\tfor !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() {\n\t\tv = v.Elem()\n\t}\n\treturn v.Interface()\n}\n\n\/\/ stringify converts its arguments to a string and the type of the content.\n\/\/ All pointers are dereferenced, as in the text\/template package.\nfunc stringify(args ...interface{}) (string, contentType) {\n\tif len(args) == 1 {\n\t\tswitch s := indirect(args[0]).(type) {\n\t\tcase string:\n\t\t\treturn s, contentTypePlain\n\t\tcase CSS:\n\t\t\treturn string(s), contentTypeCSS\n\t\tcase HTML:\n\t\t\treturn string(s), contentTypeHTML\n\t\tcase HTMLAttr:\n\t\t\treturn string(s), contentTypeHTMLAttr\n\t\tcase JS:\n\t\t\treturn string(s), contentTypeJS\n\t\tcase JSStr:\n\t\t\treturn string(s), contentTypeJSStr\n\t\tcase URL:\n\t\t\treturn string(s), contentTypeURL\n\t\t}\n\t}\n\tfor i, arg := range args {\n\t\targs[i] = indirectToStringerOrError(arg)\n\t}\n\treturn fmt.Sprint(args...), contentTypePlain\n}\n<commit_msg>html\/template: fix URL doc<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage template\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\n\/\/ Strings of content from a trusted source.\ntype (\n\t\/\/ CSS encapsulates known safe content that matches any of:\n\t\/\/ 1. The CSS3 stylesheet production, such as `p { color: purple }`.\n\t\/\/ 2. The CSS3 rule production, such as `a[href=~\"https:\"].foo#bar`.\n\t\/\/ 3. CSS3 declaration productions, such as `color: red; margin: 2px`.\n\t\/\/ 4. The CSS3 value production, such as `rgba(0, 0, 255, 127)`.\n\t\/\/ See http:\/\/www.w3.org\/TR\/css3-syntax\/#style\n\tCSS string\n\n\t\/\/ HTML encapsulates a known safe HTML document fragment.\n\t\/\/ It should not be used for HTML from a third-party, or HTML with\n\t\/\/ unclosed tags or comments. The outputs of a sound HTML sanitizer\n\t\/\/ and a template escaped by this package are fine for use with HTML.\n\tHTML string\n\n\t\/\/ HTMLAttr encapsulates an HTML attribute from a trusted source,\n\t\/\/ for example, ` dir=\"ltr\"`.\n\tHTMLAttr string\n\n\t\/\/ JS encapsulates a known safe EcmaScript5 Expression, for example,\n\t\/\/ `(x + y * z())`. \n\t\/\/ Template authors are responsible for ensuring that typed expressions\n\t\/\/ do not break the intended precedence and that there is no\n\t\/\/ statement\/expression ambiguity as when passing an expression like\n\t\/\/ \"{ foo: bar() }\\n['foo']()\", which is both a valid Expression and a\n\t\/\/ valid Program with a very different meaning.\n\tJS string\n\n\t\/\/ JSStr encapsulates a sequence of characters meant to be embedded\n\t\/\/ between quotes in a JavaScript expression.\n\t\/\/ The string must match a series of StringCharacters:\n\t\/\/ StringCharacter :: SourceCharacter but not `\\` or LineTerminator\n\t\/\/ | EscapeSequence\n\t\/\/ Note that LineContinuations are not allowed.\n\t\/\/ JSStr(\"foo\\\\nbar\") is fine, but JSStr(\"foo\\\\\\nbar\") is not.\n\tJSStr string\n\n\t\/\/ URL encapsulates a known safe URL or URL substring (see RFC 3986).\n\t\/\/ A URL like `javascript:checkThatFormNotEditedBeforeLeavingPage()`\n\t\/\/ from a trusted source should go in the page, but by default dynamic\n\t\/\/ `javascript:` URLs are filtered out since they are a frequently\n\t\/\/ exploited injection vector.\n\tURL string\n)\n\ntype contentType uint8\n\nconst (\n\tcontentTypePlain contentType = iota\n\tcontentTypeCSS\n\tcontentTypeHTML\n\tcontentTypeHTMLAttr\n\tcontentTypeJS\n\tcontentTypeJSStr\n\tcontentTypeURL\n\t\/\/ contentTypeUnsafe is used in attr.go for values that affect how\n\t\/\/ embedded content and network messages are formed, vetted,\n\t\/\/ or interpreted; or which credentials network messages carry.\n\tcontentTypeUnsafe\n)\n\n\/\/ indirect returns the value, after dereferencing as many times\n\/\/ as necessary to reach the base type (or nil).\nfunc indirect(a interface{}) interface{} {\n\tif t := reflect.TypeOf(a); t.Kind() != reflect.Ptr {\n\t\t\/\/ Avoid creating a reflect.Value if it's not a pointer.\n\t\treturn a\n\t}\n\tv := reflect.ValueOf(a)\n\tfor v.Kind() == reflect.Ptr && !v.IsNil() {\n\t\tv = v.Elem()\n\t}\n\treturn v.Interface()\n}\n\nvar (\n\terrorType = reflect.TypeOf((*error)(nil)).Elem()\n\tfmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()\n)\n\n\/\/ indirectToStringerOrError returns the value, after dereferencing as many times\n\/\/ as necessary to reach the base type (or nil) or an implementation of fmt.Stringer\n\/\/ or error,\nfunc indirectToStringerOrError(a interface{}) interface{} {\n\tv := reflect.ValueOf(a)\n\tfor !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() {\n\t\tv = v.Elem()\n\t}\n\treturn v.Interface()\n}\n\n\/\/ stringify converts its arguments to a string and the type of the content.\n\/\/ All pointers are dereferenced, as in the text\/template package.\nfunc stringify(args ...interface{}) (string, contentType) {\n\tif len(args) == 1 {\n\t\tswitch s := indirect(args[0]).(type) {\n\t\tcase string:\n\t\t\treturn s, contentTypePlain\n\t\tcase CSS:\n\t\t\treturn string(s), contentTypeCSS\n\t\tcase HTML:\n\t\t\treturn string(s), contentTypeHTML\n\t\tcase HTMLAttr:\n\t\t\treturn string(s), contentTypeHTMLAttr\n\t\tcase JS:\n\t\t\treturn string(s), contentTypeJS\n\t\tcase JSStr:\n\t\t\treturn string(s), contentTypeJSStr\n\t\tcase URL:\n\t\t\treturn string(s), contentTypeURL\n\t\t}\n\t}\n\tfor i, arg := range args {\n\t\targs[i] = indirectToStringerOrError(arg)\n\t}\n\treturn fmt.Sprint(args...), contentTypePlain\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc addTeam(handler http.Handler, teamID string, t *testing.T) {\n\tteam := Team{ID: teamID, DisplayName: teamID}\n\tb := new(bytes.Buffer)\n\tenc := json.NewEncoder(b)\n\terr := enc.Encode(team)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not serialize team: %v\", err)\n\t}\n\treq := httptest.NewRequest(http.MethodPost, \"\/api\/team\/\", b)\n\tw := httptest.NewRecorder()\n\thandler.ServeHTTP(w, req)\n\tresp := w.Result()\n\tif resp.StatusCode != http.StatusOK {\n\t\tt.Fatalf(\"Expected response %v, got %v\", http.StatusOK, resp.StatusCode)\n\t}\n}\n\nfunc addPeriod(handler http.Handler, teamID, periodJSON string, t *testing.T) {\n\treq := httptest.NewRequest(http.MethodPost, \"\/api\/period\/\"+teamID+\"\/\", strings.NewReader(periodJSON))\n\tw := httptest.NewRecorder()\n\thandler.ServeHTTP(w, req)\n\tresp := w.Result()\n\tif resp.StatusCode != http.StatusOK {\n\t\tbodyBytes, _ := ioutil.ReadAll(resp.Body)\n\t\tbody := string(bodyBytes)\n\t\tt.Fatalf(\"Expected response %v, got %v: %v\", http.StatusOK, resp.StatusCode, body)\n\t}\n}\n\nfunc TestPostPeriod(t *testing.T) {\n\tserver := Server{store: makeInMemStore()}\n\thandler := server.makeHandler()\n\n\tteamID := \"myteam\"\n\taddTeam(handler, teamID, t)\n\tperiodJSON := `{\"id\":\"2019q1\",\"displayName\":\"2019Q1\",\"unit\":\"person weeks\",\"notesURL\":\"http:\/\/test\",\"buckets\":[{\"displayName\":\"Bucket one\",\"allocationPercentage\":80,\"objectives\":[{\"name\":\"Objective 1\",\"resourceEstimate\":0,\"commitmentType\":\"Committed\",\"assignments\":[]}]}],\"people\":[]}`\n\taddPeriod(handler, teamID, periodJSON, t)\n\n\treq := httptest.NewRequest(http.MethodGet, \"\/api\/period\/\"+teamID+\"\/2019q1\", nil)\n\tw := httptest.NewRecorder()\n\thandler.ServeHTTP(w, req)\n\tresp := w.Result()\n\tif resp.StatusCode != http.StatusOK {\n\t\tt.Fatalf(\"Expected response %v, got %v\", http.StatusOK, resp.StatusCode)\n\t}\n\tp := Period{}\n\tdec := json.NewDecoder(resp.Body)\n\terr := dec.Decode(&p)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not decode response body: %v\", err)\n\t}\n\tif p.Buckets[0].AllocationPercentage != 80 {\n\t\tt.Fatalf(\"Expected allocation percentage 80, found %v\", p.Buckets[0].AllocationPercentage)\n\t}\n}\n\nfunc TestInvalidCommitmentType(t *testing.T) {\n\tserver := Server{store: makeInMemStore()}\n\thandler := server.makeHandler()\n\n\tteamID := \"myteam\"\n\taddTeam(handler, teamID, t)\n\tperiodJSON := `{\"id\":\"2019q1\",\"displayName\":\"2019Q1\",\"unit\":\"person weeks\",\"notesURL\":\"http:\/\/test\",\"buckets\":[{\"displayName\":\"Bucket one\",\"allocationPercentage\":80,\"objectives\":[{\"name\":\"Objective 1\",\"resourceEstimate\":0,\"commitmentType\":\"wibble\",\"assignments\":[]}]}],\"people\":[]}`\n\n\treq := httptest.NewRequest(http.MethodPost, \"\/api\/period\/\"+teamID+\"\/\", strings.NewReader(periodJSON))\n\tw := httptest.NewRecorder()\n\thandler.ServeHTTP(w, req)\n\tresp := w.Result()\n\n\tbodyBytes, _ := ioutil.ReadAll(resp.Body)\n\tbody := string(bodyBytes)\n\n\tif resp.StatusCode != http.StatusBadRequest {\n\t\tt.Fatalf(\"Expected status code %v, found %v: %v\", http.StatusBadRequest, resp.StatusCode, body)\n\t}\n\n\tif !strings.Contains(body, \"wibble\") {\n\t\tt.Fatalf(\"Expected bad commitment type to appear in body, found: %v\", body)\n\t}\n}\n\nfunc TestImprove(t *testing.T) {\n\tserver := Server{store: makeInMemStore()}\n\thandler := server.makeHandler()\n\n\treq := httptest.NewRequest(http.MethodGet, \"\/improve\", nil)\n\tw := httptest.NewRecorder()\n\thandler.ServeHTTP(w, req)\n\tresp := w.Result()\n\tif resp.StatusCode != http.StatusFound {\n\t\tt.Fatalf(\"Expected response %c, got %v\", http.StatusFound, resp.StatusCode)\n\t}\n\texpectedImproveURL := \"https:\/\/github.com\/google\/peoplemath\"\n\tlocation := resp.Header.Get(\"Location\")\n\tif location != expectedImproveURL {\n\t\tt.Errorf(\"Expected redirect to %v, found %v\", expectedImproveURL, location)\n\t}\n\n\tbodyBytes, _ := ioutil.ReadAll(resp.Body)\n\tbody := string(bodyBytes)\n\tif !strings.Contains(body, expectedImproveURL) {\n\t\tt.Errorf(\"Expected redirect URL %v in body, found %v\", expectedImproveURL, body)\n\t}\n}\n<commit_msg>Add backend test for missing commitment type Also refactor a little.<commit_after>\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc addTeam(handler http.Handler, teamID string, t *testing.T) {\n\tteam := Team{ID: teamID, DisplayName: teamID}\n\tb := new(bytes.Buffer)\n\tenc := json.NewEncoder(b)\n\terr := enc.Encode(team)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not serialize team: %v\", err)\n\t}\n\treq := httptest.NewRequest(http.MethodPost, \"\/api\/team\/\", b)\n\tw := httptest.NewRecorder()\n\thandler.ServeHTTP(w, req)\n\tresp := w.Result()\n\tif resp.StatusCode != http.StatusOK {\n\t\tt.Fatalf(\"Expected response %v, got %v\", http.StatusOK, resp.StatusCode)\n\t}\n}\n\nfunc attemptWritePeriod(handler http.Handler, teamID, periodJSON, httpMethod string, t *testing.T) *http.Response {\n\treq := httptest.NewRequest(httpMethod, \"\/api\/period\/\"+teamID+\"\/\", strings.NewReader(periodJSON))\n\tw := httptest.NewRecorder()\n\thandler.ServeHTTP(w, req)\n\treturn w.Result()\n}\n\nfunc addPeriod(handler http.Handler, teamID, periodJSON string, t *testing.T) {\n\tresp := attemptWritePeriod(handler, teamID, periodJSON, http.MethodPost, t)\n\tif resp.StatusCode != http.StatusOK {\n\t\tbodyBytes, _ := ioutil.ReadAll(resp.Body)\n\t\tbody := string(bodyBytes)\n\t\tt.Fatalf(\"Expected response %v, got %v: %v\", http.StatusOK, resp.StatusCode, body)\n\t}\n}\n\nfunc getPeriod(handler http.Handler, teamID, periodID string, t *testing.T) *Period {\n\treq := httptest.NewRequest(http.MethodGet, \"\/api\/period\/\"+teamID+\"\/\"+periodID, nil)\n\tw := httptest.NewRecorder()\n\thandler.ServeHTTP(w, req)\n\tresp := w.Result()\n\tif resp.StatusCode != http.StatusOK {\n\t\tt.Fatalf(\"Expected response %v, got %v\", http.StatusOK, resp.StatusCode)\n\t}\n\tp := Period{}\n\tdec := json.NewDecoder(resp.Body)\n\terr := dec.Decode(&p)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not decode response body: %v\", err)\n\t}\n\treturn &p\n}\n\nfunc TestPostPeriod(t *testing.T) {\n\tserver := Server{store: makeInMemStore()}\n\thandler := server.makeHandler()\n\n\tteamID := \"myteam\"\n\taddTeam(handler, teamID, t)\n\tperiodJSON := `{\"id\":\"2019q1\",\"displayName\":\"2019Q1\",\"unit\":\"person weeks\",\"notesURL\":\"http:\/\/test\",\"buckets\":[{\"displayName\":\"Bucket one\",\"allocationPercentage\":80,\"objectives\":[{\"name\":\"Objective 1\",\"resourceEstimate\":0,\"commitmentType\":\"Committed\",\"assignments\":[]}]}],\"people\":[]}`\n\taddPeriod(handler, teamID, periodJSON, t)\n\n\tp := getPeriod(handler, teamID, \"2019q1\", t)\n\n\tif p.Buckets[0].AllocationPercentage != 80 {\n\t\tt.Fatalf(\"Expected allocation percentage 80, found %v\", p.Buckets[0].AllocationPercentage)\n\t}\n}\n\nfunc TestInvalidCommitmentType(t *testing.T) {\n\tserver := Server{store: makeInMemStore()}\n\thandler := server.makeHandler()\n\n\tteamID := \"myteam\"\n\taddTeam(handler, teamID, t)\n\tperiodJSON := `{\"id\":\"2019q1\",\"displayName\":\"2019Q1\",\"unit\":\"person weeks\",\"notesURL\":\"http:\/\/test\",\"buckets\":[{\"displayName\":\"Bucket one\",\"allocationPercentage\":80,\"objectives\":[{\"name\":\"Objective 1\",\"resourceEstimate\":0,\"commitmentType\":\"wibble\",\"assignments\":[]}]}],\"people\":[]}`\n\n\tresp := attemptWritePeriod(handler, teamID, periodJSON, http.MethodPost, t)\n\n\tbodyBytes, _ := ioutil.ReadAll(resp.Body)\n\tbody := string(bodyBytes)\n\n\tif resp.StatusCode != http.StatusBadRequest {\n\t\tt.Fatalf(\"Expected status code %v, found %v: %v\", http.StatusBadRequest, resp.StatusCode, body)\n\t}\n\n\tif !strings.Contains(body, \"wibble\") {\n\t\tt.Fatalf(\"Expected bad commitment type to appear in body, found: %v\", body)\n\t}\n}\n\nfunc TestMissingCommitmentType(t *testing.T) {\n\tserver := Server{store: makeInMemStore()}\n\thandler := server.makeHandler()\n\n\tteamID := \"myteam\"\n\taddTeam(handler, teamID, t)\n\tperiodJSON := `{\"id\":\"2019q1\",\"displayName\":\"2019Q1\",\"unit\":\"person weeks\",\"notesURL\":\"http:\/\/test\",\"buckets\":[{\"displayName\":\"Bucket one\",\"allocationPercentage\":80,\"objectives\":[{\"name\":\"Objective 1\",\"resourceEstimate\":0,\"assignments\":[]}]}],\"people\":[]}`\n\taddPeriod(handler, teamID, periodJSON, t)\n\n\tp := getPeriod(handler, teamID, \"2019q1\", t)\n\n\treadCommitType := p.Buckets[0].Objectives[0].CommitmentType\n\tif readCommitType != \"\" {\n\t\tt.Fatalf(\"Expected read objective to have empty commitment type, found %q\", readCommitType)\n\t}\n}\n\nfunc TestImprove(t *testing.T) {\n\tserver := Server{store: makeInMemStore()}\n\thandler := server.makeHandler()\n\n\treq := httptest.NewRequest(http.MethodGet, \"\/improve\", nil)\n\tw := httptest.NewRecorder()\n\thandler.ServeHTTP(w, req)\n\tresp := w.Result()\n\tif resp.StatusCode != http.StatusFound {\n\t\tt.Fatalf(\"Expected response %c, got %v\", http.StatusFound, resp.StatusCode)\n\t}\n\texpectedImproveURL := \"https:\/\/github.com\/google\/peoplemath\"\n\tlocation := resp.Header.Get(\"Location\")\n\tif location != expectedImproveURL {\n\t\tt.Errorf(\"Expected redirect to %v, found %v\", expectedImproveURL, location)\n\t}\n\n\tbodyBytes, _ := ioutil.ReadAll(resp.Body)\n\tbody := string(bodyBytes)\n\tif !strings.Contains(body, expectedImproveURL) {\n\t\tt.Errorf(\"Expected redirect URL %v in body, found %v\", expectedImproveURL, body)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * conversion.go, part of gochem.\n *\n *\n * Copyright 2012 Raul Mera <rmera{at}chemDOThelsinkiDOTfi>\n *\n * This program is free software; you can redistribute it and\/or modify\n * it under the terms of the GNU Lesser General Public License as\n * published by the Free Software Foundation; either version 2.1 of the\n * License, or (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General\n * Public License along with this program. If not, see\n * <http:\/\/www.gnu.org\/licenses\/>.\n *\n *\n * Gochem is developed at the laboratory for instruction in Swedish, Department of Chemistry,\n * University of Helsinki, Finland.\n *\n *\n *\/\n\/***Dedicated to the long life of the Ven. Khenpo Phuntzok Tenzin Rinpoche***\/\n\npackage chem\n\n\/\/This provides useful conversion factors and other constants\n\n\/\/Conversions\nconst (\n\tDeg2Rad = 0.0174533\n\tRad2Deg = 1 \/ 0.0174533\n\tH2Kcal = 627.509 \/\/HArtree 2 Kcal\/mol\n\tKcal2H = 1 \/ 627.509\n\tKJ2Kcal = 1 \/ 4.184\n\tKcal2KJ = 4.184\n\tA2Bohr = 1.889725989\n\tBohr2A = 1 \/ 1.889725989\n\tEV2Kcal = 23.061\n\tKcal2EV = 1 \/ 23.061\n)\n\n\/\/Others\nconst (\n\tCHDist = 1.098 \/\/C(sp3)--H distance in A\n)\n<commit_msg>Added a couple of constants to the conversions.go file<commit_after>\/*\n * conversion.go, part of gochem.\n *\n *\n * Copyright 2012 Raul Mera <rmera{at}chemDOThelsinkiDOTfi>\n *\n * This program is free software; you can redistribute it and\/or modify\n * it under the terms of the GNU Lesser General Public License as\n * published by the Free Software Foundation; either version 2.1 of the\n * License, or (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General\n * Public License along with this program. If not, see\n * <http:\/\/www.gnu.org\/licenses\/>.\n *\n *\n * Gochem is developed at the laboratory for instruction in Swedish, Department of Chemistry,\n * University of Helsinki, Finland.\n *\n *\n *\/\n\/***Dedicated to the long life of the Ven. Khenpo Phuntzok Tenzin Rinpoche***\/\n\npackage chem\n\n\/\/This provides useful conversion factors and other constants\n\n\/\/Conversions\nconst (\n\tDeg2Rad = 0.0174533\n\tRad2Deg = 1 \/ 0.0174533\n\tH2Kcal = 627.509 \/\/HArtree 2 Kcal\/mol\n\tKcal2H = 1 \/ 627.509\n\tKJ2Kcal = 1 \/ 4.184\n\tKcal2KJ = 4.184\n\tA2Bohr = 1.889725989\n\tBohr2A = 1 \/ 1.889725989\n\tEV2Kcal = 23.061\n\tKcal2EV = 1 \/ 23.061\n)\n\n\/\/Others\nconst (\n\tCHDist = 1.098 \/\/C(sp3)--H distance in A\n\tKB = 1.380649e-26 \/\/ Boltzmann constant kJ\/K\n\tR = 8.31446261815324e-3 \/\/ kJ\/(K*mol)\n\tNA = 6.02214076e+23\n)\n<|endoftext|>"} {"text":"<commit_before>package gorm\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"reflect\"\n)\n\ntype Scope struct {\n\tSearch *search\n\tValue interface{}\n\tSql string\n\tSqlVars []interface{}\n\tdb *DB\n\tindirectValue *reflect.Value\n\tinstanceId string\n\tprimaryKeyField *Field\n\tskipLeft bool\n\tfields map[string]*Field\n\tselectAttrs *[]string\n}\n\nfunc (scope *Scope) IndirectValue() reflect.Value {\n\tif scope.indirectValue == nil {\n\t\tvalue := reflect.Indirect(reflect.ValueOf(scope.Value))\n\t\tif value.Kind() == reflect.Ptr {\n\t\t\tvalue = value.Elem()\n\t\t}\n\t\tscope.indirectValue = &value\n\t}\n\treturn *scope.indirectValue\n}\n\nfunc (scope *Scope) NeedPtr() *Scope {\n\treflectKind := reflect.ValueOf(scope.Value).Kind()\n\tif !((reflectKind == reflect.Invalid) || (reflectKind == reflect.Ptr)) {\n\t\terr := fmt.Errorf(\"%v %v\\n\", fileWithLineNum(), \"using unaddressable value\")\n\t\tscope.Err(err)\n\t\tfmt.Printf(err.Error())\n\t}\n\treturn scope\n}\n\n\/\/ New create a new Scope without search information\nfunc (scope *Scope) New(value interface{}) *Scope {\n\treturn &Scope{db: scope.NewDB(), Search: &search{}, Value: value}\n}\n\n\/\/ NewDB create a new DB without search information\nfunc (scope *Scope) NewDB() *DB {\n\tif scope.db != nil {\n\t\tdb := scope.db.clone()\n\t\tdb.search = nil\n\t\tdb.Value = nil\n\t\treturn db\n\t}\n\treturn nil\n}\n\nfunc (scope *Scope) DB() *DB {\n\treturn scope.db\n}\n\n\/\/ SqlDB return *sql.DB\nfunc (scope *Scope) SqlDB() sqlCommon {\n\treturn scope.db.db\n}\n\n\/\/ SkipLeft skip remaining callbacks\nfunc (scope *Scope) SkipLeft() {\n\tscope.skipLeft = true\n}\n\n\/\/ Quote used to quote database column name according to database dialect\nfunc (scope *Scope) Quote(str string) string {\n\tif strings.Index(str, \".\") != -1 {\n\t\tnewStrs := []string{}\n\t\tfor _, str := range strings.Split(str, \".\") {\n\t\t\tnewStrs = append(newStrs, scope.Dialect().Quote(str))\n\t\t}\n\t\treturn strings.Join(newStrs, \".\")\n\t} else {\n\t\treturn scope.Dialect().Quote(str)\n\t}\n}\n\n\/\/ Dialect get dialect\nfunc (scope *Scope) Dialect() Dialect {\n\treturn scope.db.parent.dialect\n}\n\n\/\/ Err write error\nfunc (scope *Scope) Err(err error) error {\n\tif err != nil {\n\t\tscope.db.err(err)\n\t}\n\treturn err\n}\n\n\/\/ Log print log message\nfunc (scope *Scope) Log(v ...interface{}) {\n\tscope.db.log(v...)\n}\n\n\/\/ HasError check if there are any error\nfunc (scope *Scope) HasError() bool {\n\treturn scope.db.Error != nil\n}\n\nfunc (scope *Scope) PrimaryField() *Field {\n\tif primaryFields := scope.GetModelStruct().PrimaryFields; len(primaryFields) > 0 {\n\t\tif len(primaryFields) > 1 {\n\t\t\tif field, ok := scope.Fields()[\"id\"]; ok {\n\t\t\t\treturn field\n\t\t\t}\n\t\t}\n\t\treturn scope.Fields()[primaryFields[0].DBName]\n\t}\n\treturn nil\n}\n\n\/\/ PrimaryKey get the primary key's column name\nfunc (scope *Scope) PrimaryKey() string {\n\tif field := scope.PrimaryField(); field != nil {\n\t\treturn field.DBName\n\t}\n\treturn \"\"\n}\n\n\/\/ PrimaryKeyZero check the primary key is blank or not\nfunc (scope *Scope) PrimaryKeyZero() bool {\n\tfield := scope.PrimaryField()\n\treturn field == nil || field.IsBlank\n}\n\n\/\/ PrimaryKeyValue get the primary key's value\nfunc (scope *Scope) PrimaryKeyValue() interface{} {\n\tif field := scope.PrimaryField(); field != nil && field.Field.IsValid() {\n\t\treturn field.Field.Interface()\n\t}\n\treturn 0\n}\n\n\/\/ HasColumn to check if has column\nfunc (scope *Scope) HasColumn(column string) bool {\n\tfor _, field := range scope.GetStructFields() {\n\t\tif field.IsNormal && (field.Name == column || field.DBName == column) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ SetColumn to set the column's value\nfunc (scope *Scope) SetColumn(column interface{}, value interface{}) error {\n\tif field, ok := column.(*Field); ok {\n\t\treturn field.Set(value)\n\t} else if name, ok := column.(string); ok {\n\n\t\tif field, ok := scope.Fields()[name]; ok {\n\t\t\treturn field.Set(value)\n\t\t}\n\n\t\tdbName := ToDBName(name)\n\t\tif field, ok := scope.Fields()[dbName]; ok {\n\t\t\treturn field.Set(value)\n\t\t}\n\n\t\tif field, ok := scope.FieldByName(name); ok {\n\t\t\treturn field.Set(value)\n\t\t}\n\t}\n\treturn errors.New(\"could not convert column to field\")\n}\n\nfunc (scope *Scope) CallMethod(name string, checkError bool) {\n\tif scope.Value == nil || (checkError && scope.HasError()) {\n\t\treturn\n\t}\n\n\tcall := func(value interface{}) {\n\t\tif fm := reflect.ValueOf(value).MethodByName(name); fm.IsValid() {\n\t\t\tswitch f := fm.Interface().(type) {\n\t\t\tcase func():\n\t\t\t\tf()\n\t\t\tcase func(s *Scope):\n\t\t\t\tf(scope)\n\t\t\tcase func(s *DB):\n\t\t\t\tf(scope.NewDB())\n\t\t\tcase func() error:\n\t\t\t\tscope.Err(f())\n\t\t\tcase func(s *Scope) error:\n\t\t\t\tscope.Err(f(scope))\n\t\t\tcase func(s *DB) error:\n\t\t\t\tscope.Err(f(scope.NewDB()))\n\t\t\tdefault:\n\t\t\t\tscope.Err(fmt.Errorf(\"unsupported function %v\", name))\n\t\t\t}\n\t\t}\n\t}\n\n\tif values := scope.IndirectValue(); values.Kind() == reflect.Slice {\n\t\tfor i := 0; i < values.Len(); i++ {\n\t\t\tcall(values.Index(i).Addr().Interface())\n\t\t}\n\t} else {\n\t\tcall(scope.Value)\n\t}\n}\n\nfunc (scope *Scope) CallMethodWithErrorCheck(name string) {\n\tscope.CallMethod(name, true)\n}\n\n\/\/ AddToVars add value as sql's vars, gorm will escape them\nfunc (scope *Scope) AddToVars(value interface{}) string {\n\tif expr, ok := value.(*expr); ok {\n\t\texp := expr.expr\n\t\tfor _, arg := range expr.args {\n\t\t\texp = strings.Replace(exp, \"?\", scope.AddToVars(arg), 1)\n\t\t}\n\t\treturn exp\n\t} else {\n\t\tscope.SqlVars = append(scope.SqlVars, value)\n\t\treturn scope.Dialect().BinVar(len(scope.SqlVars))\n\t}\n}\n\ntype tabler interface {\n\tTableName() string\n}\n\ntype dbTabler interface {\n\tTableName(*DB) string\n}\n\n\/\/ TableName get table name\nfunc (scope *Scope) TableName() string {\n\tif scope.Search != nil && len(scope.Search.tableName) > 0 {\n\t\treturn scope.Search.tableName\n\t}\n\n\tif tabler, ok := scope.Value.(tabler); ok {\n\t\treturn tabler.TableName()\n\t}\n\n\tif tabler, ok := scope.Value.(dbTabler); ok {\n\t\treturn tabler.TableName(scope.db)\n\t}\n\n\tif scope.GetModelStruct().TableName != nil {\n\t\treturn scope.GetModelStruct().TableName(scope.db)\n\t}\n\n\tscope.Err(errors.New(\"wrong table name\"))\n\treturn \"\"\n}\n\nfunc (scope *Scope) QuotedTableName() (name string) {\n\tif scope.Search != nil && len(scope.Search.tableName) > 0 {\n\t\treturn scope.Quote(scope.Search.tableName)\n\t} else {\n\t\treturn scope.Quote(scope.TableName())\n\t}\n}\n\n\/\/ CombinedConditionSql get combined condition sql\nfunc (scope *Scope) CombinedConditionSql() string {\n\treturn scope.joinsSql() + scope.whereSql() + scope.groupSql() +\n\t\tscope.havingSql() + scope.orderSql() + scope.limitSql() + scope.offsetSql()\n}\n\nfunc (scope *Scope) FieldByName(name string) (field *Field, ok bool) {\n\tfor _, field := range scope.Fields() {\n\t\tif field.Name == name {\n\t\t\treturn field, true\n\t\t}\n\t}\n\treturn nil, false\n}\n\n\/\/ Raw set sql\nfunc (scope *Scope) Raw(sql string) *Scope {\n\tscope.Sql = strings.Replace(sql, \"$$\", \"?\", -1)\n\treturn scope\n}\n\n\/\/ Exec invoke sql\nfunc (scope *Scope) Exec() *Scope {\n\tdefer scope.Trace(NowFunc())\n\n\tif !scope.HasError() {\n\t\tif result, err := scope.SqlDB().Exec(scope.Sql, scope.SqlVars...); scope.Err(err) == nil {\n\t\t\tif count, err := result.RowsAffected(); err == nil {\n\t\t\t\tscope.db.RowsAffected = count\n\t\t\t}\n\t\t}\n\t}\n\treturn scope\n}\n\n\/\/ Set set value by name\nfunc (scope *Scope) Set(name string, value interface{}) *Scope {\n\tscope.db.InstantSet(name, value)\n\treturn scope\n}\n\n\/\/ Get get value by name\nfunc (scope *Scope) Get(name string) (interface{}, bool) {\n\treturn scope.db.Get(name)\n}\n\n\/\/ InstanceId get InstanceId for scope\nfunc (scope *Scope) InstanceId() string {\n\tif scope.instanceId == \"\" {\n\t\tscope.instanceId = fmt.Sprintf(\"%v%v\", &scope, &scope.db)\n\t}\n\treturn scope.instanceId\n}\n\nfunc (scope *Scope) InstanceSet(name string, value interface{}) *Scope {\n\treturn scope.Set(name+scope.InstanceId(), value)\n}\n\nfunc (scope *Scope) InstanceGet(name string) (interface{}, bool) {\n\treturn scope.Get(name + scope.InstanceId())\n}\n\n\/\/ Trace print sql log\nfunc (scope *Scope) Trace(t time.Time) {\n\tif len(scope.Sql) > 0 {\n\t\tscope.db.slog(scope.Sql, t, scope.SqlVars...)\n\t}\n}\n\n\/\/ Begin start a transaction\nfunc (scope *Scope) Begin() *Scope {\n\tif db, ok := scope.SqlDB().(sqlDb); ok {\n\t\tif tx, err := db.Begin(); err == nil {\n\t\t\tscope.db.db = interface{}(tx).(sqlCommon)\n\t\t\tscope.InstanceSet(\"gorm:started_transaction\", true)\n\t\t}\n\t}\n\treturn scope\n}\n\n\/\/ CommitOrRollback commit current transaction if there is no error, otherwise rollback it\nfunc (scope *Scope) CommitOrRollback() *Scope {\n\tif _, ok := scope.InstanceGet(\"gorm:started_transaction\"); ok {\n\t\tif db, ok := scope.db.db.(sqlTx); ok {\n\t\t\tif scope.HasError() {\n\t\t\t\tdb.Rollback()\n\t\t\t} else {\n\t\t\t\tdb.Commit()\n\t\t\t}\n\t\t\tscope.db.db = scope.db.parent.db\n\t\t}\n\t}\n\treturn scope\n}\n\nfunc (scope *Scope) SelectAttrs() []string {\n\tif scope.selectAttrs == nil {\n\t\tattrs := []string{}\n\t\tfor _, value := range scope.Search.selects {\n\t\t\tif str, ok := value.(string); ok {\n\t\t\t\tattrs = append(attrs, str)\n\t\t\t} else if strs, ok := value.([]string); ok {\n\t\t\t\tattrs = append(attrs, strs...)\n\t\t\t} else if strs, ok := value.([]interface{}); ok {\n\t\t\t\tfor _, str := range strs {\n\t\t\t\t\tattrs = append(attrs, fmt.Sprintf(\"%v\", str))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tscope.selectAttrs = &attrs\n\t}\n\treturn *scope.selectAttrs\n}\n\nfunc (scope *Scope) OmitAttrs() []string {\n\treturn scope.Search.omits\n}\n\nfunc (scope *Scope) changeableDBColumn(column string) bool {\n\tselectAttrs := scope.SelectAttrs()\n\tomitAttrs := scope.OmitAttrs()\n\n\tif len(selectAttrs) > 0 {\n\t\tfor _, attr := range selectAttrs {\n\t\t\tif column == ToDBName(attr) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tfor _, attr := range omitAttrs {\n\t\tif column == ToDBName(attr) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (scope *Scope) changeableField(field *Field) bool {\n\tselectAttrs := scope.SelectAttrs()\n\tomitAttrs := scope.OmitAttrs()\n\n\tif len(selectAttrs) > 0 {\n\t\tfor _, attr := range selectAttrs {\n\t\t\tif field.Name == attr || field.DBName == attr {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tfor _, attr := range omitAttrs {\n\t\tif field.Name == attr || field.DBName == attr {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn !field.IsIgnored\n}\n\nfunc (scope *Scope) shouldSaveAssociations() bool {\n\tsaveAssociations, ok := scope.Get(\"gorm:save_associations\")\n\tif ok && !saveAssociations.(bool) {\n\t\treturn false\n\t}\n\treturn true\n}\n<commit_msg>Fix use SQL as table name<commit_after>package gorm\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"reflect\"\n)\n\ntype Scope struct {\n\tSearch *search\n\tValue interface{}\n\tSql string\n\tSqlVars []interface{}\n\tdb *DB\n\tindirectValue *reflect.Value\n\tinstanceId string\n\tprimaryKeyField *Field\n\tskipLeft bool\n\tfields map[string]*Field\n\tselectAttrs *[]string\n}\n\nfunc (scope *Scope) IndirectValue() reflect.Value {\n\tif scope.indirectValue == nil {\n\t\tvalue := reflect.Indirect(reflect.ValueOf(scope.Value))\n\t\tif value.Kind() == reflect.Ptr {\n\t\t\tvalue = value.Elem()\n\t\t}\n\t\tscope.indirectValue = &value\n\t}\n\treturn *scope.indirectValue\n}\n\nfunc (scope *Scope) NeedPtr() *Scope {\n\treflectKind := reflect.ValueOf(scope.Value).Kind()\n\tif !((reflectKind == reflect.Invalid) || (reflectKind == reflect.Ptr)) {\n\t\terr := fmt.Errorf(\"%v %v\\n\", fileWithLineNum(), \"using unaddressable value\")\n\t\tscope.Err(err)\n\t\tfmt.Printf(err.Error())\n\t}\n\treturn scope\n}\n\n\/\/ New create a new Scope without search information\nfunc (scope *Scope) New(value interface{}) *Scope {\n\treturn &Scope{db: scope.NewDB(), Search: &search{}, Value: value}\n}\n\n\/\/ NewDB create a new DB without search information\nfunc (scope *Scope) NewDB() *DB {\n\tif scope.db != nil {\n\t\tdb := scope.db.clone()\n\t\tdb.search = nil\n\t\tdb.Value = nil\n\t\treturn db\n\t}\n\treturn nil\n}\n\nfunc (scope *Scope) DB() *DB {\n\treturn scope.db\n}\n\n\/\/ SqlDB return *sql.DB\nfunc (scope *Scope) SqlDB() sqlCommon {\n\treturn scope.db.db\n}\n\n\/\/ SkipLeft skip remaining callbacks\nfunc (scope *Scope) SkipLeft() {\n\tscope.skipLeft = true\n}\n\n\/\/ Quote used to quote database column name according to database dialect\nfunc (scope *Scope) Quote(str string) string {\n\tif strings.Index(str, \".\") != -1 {\n\t\tnewStrs := []string{}\n\t\tfor _, str := range strings.Split(str, \".\") {\n\t\t\tnewStrs = append(newStrs, scope.Dialect().Quote(str))\n\t\t}\n\t\treturn strings.Join(newStrs, \".\")\n\t} else {\n\t\treturn scope.Dialect().Quote(str)\n\t}\n}\n\n\/\/ Dialect get dialect\nfunc (scope *Scope) Dialect() Dialect {\n\treturn scope.db.parent.dialect\n}\n\n\/\/ Err write error\nfunc (scope *Scope) Err(err error) error {\n\tif err != nil {\n\t\tscope.db.err(err)\n\t}\n\treturn err\n}\n\n\/\/ Log print log message\nfunc (scope *Scope) Log(v ...interface{}) {\n\tscope.db.log(v...)\n}\n\n\/\/ HasError check if there are any error\nfunc (scope *Scope) HasError() bool {\n\treturn scope.db.Error != nil\n}\n\nfunc (scope *Scope) PrimaryField() *Field {\n\tif primaryFields := scope.GetModelStruct().PrimaryFields; len(primaryFields) > 0 {\n\t\tif len(primaryFields) > 1 {\n\t\t\tif field, ok := scope.Fields()[\"id\"]; ok {\n\t\t\t\treturn field\n\t\t\t}\n\t\t}\n\t\treturn scope.Fields()[primaryFields[0].DBName]\n\t}\n\treturn nil\n}\n\n\/\/ PrimaryKey get the primary key's column name\nfunc (scope *Scope) PrimaryKey() string {\n\tif field := scope.PrimaryField(); field != nil {\n\t\treturn field.DBName\n\t}\n\treturn \"\"\n}\n\n\/\/ PrimaryKeyZero check the primary key is blank or not\nfunc (scope *Scope) PrimaryKeyZero() bool {\n\tfield := scope.PrimaryField()\n\treturn field == nil || field.IsBlank\n}\n\n\/\/ PrimaryKeyValue get the primary key's value\nfunc (scope *Scope) PrimaryKeyValue() interface{} {\n\tif field := scope.PrimaryField(); field != nil && field.Field.IsValid() {\n\t\treturn field.Field.Interface()\n\t}\n\treturn 0\n}\n\n\/\/ HasColumn to check if has column\nfunc (scope *Scope) HasColumn(column string) bool {\n\tfor _, field := range scope.GetStructFields() {\n\t\tif field.IsNormal && (field.Name == column || field.DBName == column) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ SetColumn to set the column's value\nfunc (scope *Scope) SetColumn(column interface{}, value interface{}) error {\n\tif field, ok := column.(*Field); ok {\n\t\treturn field.Set(value)\n\t} else if name, ok := column.(string); ok {\n\n\t\tif field, ok := scope.Fields()[name]; ok {\n\t\t\treturn field.Set(value)\n\t\t}\n\n\t\tdbName := ToDBName(name)\n\t\tif field, ok := scope.Fields()[dbName]; ok {\n\t\t\treturn field.Set(value)\n\t\t}\n\n\t\tif field, ok := scope.FieldByName(name); ok {\n\t\t\treturn field.Set(value)\n\t\t}\n\t}\n\treturn errors.New(\"could not convert column to field\")\n}\n\nfunc (scope *Scope) CallMethod(name string, checkError bool) {\n\tif scope.Value == nil || (checkError && scope.HasError()) {\n\t\treturn\n\t}\n\n\tcall := func(value interface{}) {\n\t\tif fm := reflect.ValueOf(value).MethodByName(name); fm.IsValid() {\n\t\t\tswitch f := fm.Interface().(type) {\n\t\t\tcase func():\n\t\t\t\tf()\n\t\t\tcase func(s *Scope):\n\t\t\t\tf(scope)\n\t\t\tcase func(s *DB):\n\t\t\t\tf(scope.NewDB())\n\t\t\tcase func() error:\n\t\t\t\tscope.Err(f())\n\t\t\tcase func(s *Scope) error:\n\t\t\t\tscope.Err(f(scope))\n\t\t\tcase func(s *DB) error:\n\t\t\t\tscope.Err(f(scope.NewDB()))\n\t\t\tdefault:\n\t\t\t\tscope.Err(fmt.Errorf(\"unsupported function %v\", name))\n\t\t\t}\n\t\t}\n\t}\n\n\tif values := scope.IndirectValue(); values.Kind() == reflect.Slice {\n\t\tfor i := 0; i < values.Len(); i++ {\n\t\t\tcall(values.Index(i).Addr().Interface())\n\t\t}\n\t} else {\n\t\tcall(scope.Value)\n\t}\n}\n\nfunc (scope *Scope) CallMethodWithErrorCheck(name string) {\n\tscope.CallMethod(name, true)\n}\n\n\/\/ AddToVars add value as sql's vars, gorm will escape them\nfunc (scope *Scope) AddToVars(value interface{}) string {\n\tif expr, ok := value.(*expr); ok {\n\t\texp := expr.expr\n\t\tfor _, arg := range expr.args {\n\t\t\texp = strings.Replace(exp, \"?\", scope.AddToVars(arg), 1)\n\t\t}\n\t\treturn exp\n\t} else {\n\t\tscope.SqlVars = append(scope.SqlVars, value)\n\t\treturn scope.Dialect().BinVar(len(scope.SqlVars))\n\t}\n}\n\ntype tabler interface {\n\tTableName() string\n}\n\ntype dbTabler interface {\n\tTableName(*DB) string\n}\n\n\/\/ TableName get table name\nfunc (scope *Scope) TableName() string {\n\tif scope.Search != nil && len(scope.Search.tableName) > 0 {\n\t\treturn scope.Search.tableName\n\t}\n\n\tif tabler, ok := scope.Value.(tabler); ok {\n\t\treturn tabler.TableName()\n\t}\n\n\tif tabler, ok := scope.Value.(dbTabler); ok {\n\t\treturn tabler.TableName(scope.db)\n\t}\n\n\tif scope.GetModelStruct().TableName != nil {\n\t\treturn scope.GetModelStruct().TableName(scope.db)\n\t}\n\n\tscope.Err(errors.New(\"wrong table name\"))\n\treturn \"\"\n}\n\nfunc (scope *Scope) QuotedTableName() (name string) {\n\tif scope.Search != nil && len(scope.Search.tableName) > 0 {\n\t\tif strings.Index(scope.Search.tableName, \" \") != -1 {\n\t\t\treturn scope.Search.tableName\n\t\t}\n\t\treturn scope.Quote(scope.Search.tableName)\n\t} else {\n\t\treturn scope.Quote(scope.TableName())\n\t}\n}\n\n\/\/ CombinedConditionSql get combined condition sql\nfunc (scope *Scope) CombinedConditionSql() string {\n\treturn scope.joinsSql() + scope.whereSql() + scope.groupSql() +\n\t\tscope.havingSql() + scope.orderSql() + scope.limitSql() + scope.offsetSql()\n}\n\nfunc (scope *Scope) FieldByName(name string) (field *Field, ok bool) {\n\tfor _, field := range scope.Fields() {\n\t\tif field.Name == name {\n\t\t\treturn field, true\n\t\t}\n\t}\n\treturn nil, false\n}\n\n\/\/ Raw set sql\nfunc (scope *Scope) Raw(sql string) *Scope {\n\tscope.Sql = strings.Replace(sql, \"$$\", \"?\", -1)\n\treturn scope\n}\n\n\/\/ Exec invoke sql\nfunc (scope *Scope) Exec() *Scope {\n\tdefer scope.Trace(NowFunc())\n\n\tif !scope.HasError() {\n\t\tif result, err := scope.SqlDB().Exec(scope.Sql, scope.SqlVars...); scope.Err(err) == nil {\n\t\t\tif count, err := result.RowsAffected(); err == nil {\n\t\t\t\tscope.db.RowsAffected = count\n\t\t\t}\n\t\t}\n\t}\n\treturn scope\n}\n\n\/\/ Set set value by name\nfunc (scope *Scope) Set(name string, value interface{}) *Scope {\n\tscope.db.InstantSet(name, value)\n\treturn scope\n}\n\n\/\/ Get get value by name\nfunc (scope *Scope) Get(name string) (interface{}, bool) {\n\treturn scope.db.Get(name)\n}\n\n\/\/ InstanceId get InstanceId for scope\nfunc (scope *Scope) InstanceId() string {\n\tif scope.instanceId == \"\" {\n\t\tscope.instanceId = fmt.Sprintf(\"%v%v\", &scope, &scope.db)\n\t}\n\treturn scope.instanceId\n}\n\nfunc (scope *Scope) InstanceSet(name string, value interface{}) *Scope {\n\treturn scope.Set(name+scope.InstanceId(), value)\n}\n\nfunc (scope *Scope) InstanceGet(name string) (interface{}, bool) {\n\treturn scope.Get(name + scope.InstanceId())\n}\n\n\/\/ Trace print sql log\nfunc (scope *Scope) Trace(t time.Time) {\n\tif len(scope.Sql) > 0 {\n\t\tscope.db.slog(scope.Sql, t, scope.SqlVars...)\n\t}\n}\n\n\/\/ Begin start a transaction\nfunc (scope *Scope) Begin() *Scope {\n\tif db, ok := scope.SqlDB().(sqlDb); ok {\n\t\tif tx, err := db.Begin(); err == nil {\n\t\t\tscope.db.db = interface{}(tx).(sqlCommon)\n\t\t\tscope.InstanceSet(\"gorm:started_transaction\", true)\n\t\t}\n\t}\n\treturn scope\n}\n\n\/\/ CommitOrRollback commit current transaction if there is no error, otherwise rollback it\nfunc (scope *Scope) CommitOrRollback() *Scope {\n\tif _, ok := scope.InstanceGet(\"gorm:started_transaction\"); ok {\n\t\tif db, ok := scope.db.db.(sqlTx); ok {\n\t\t\tif scope.HasError() {\n\t\t\t\tdb.Rollback()\n\t\t\t} else {\n\t\t\t\tdb.Commit()\n\t\t\t}\n\t\t\tscope.db.db = scope.db.parent.db\n\t\t}\n\t}\n\treturn scope\n}\n\nfunc (scope *Scope) SelectAttrs() []string {\n\tif scope.selectAttrs == nil {\n\t\tattrs := []string{}\n\t\tfor _, value := range scope.Search.selects {\n\t\t\tif str, ok := value.(string); ok {\n\t\t\t\tattrs = append(attrs, str)\n\t\t\t} else if strs, ok := value.([]string); ok {\n\t\t\t\tattrs = append(attrs, strs...)\n\t\t\t} else if strs, ok := value.([]interface{}); ok {\n\t\t\t\tfor _, str := range strs {\n\t\t\t\t\tattrs = append(attrs, fmt.Sprintf(\"%v\", str))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tscope.selectAttrs = &attrs\n\t}\n\treturn *scope.selectAttrs\n}\n\nfunc (scope *Scope) OmitAttrs() []string {\n\treturn scope.Search.omits\n}\n\nfunc (scope *Scope) changeableDBColumn(column string) bool {\n\tselectAttrs := scope.SelectAttrs()\n\tomitAttrs := scope.OmitAttrs()\n\n\tif len(selectAttrs) > 0 {\n\t\tfor _, attr := range selectAttrs {\n\t\t\tif column == ToDBName(attr) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tfor _, attr := range omitAttrs {\n\t\tif column == ToDBName(attr) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (scope *Scope) changeableField(field *Field) bool {\n\tselectAttrs := scope.SelectAttrs()\n\tomitAttrs := scope.OmitAttrs()\n\n\tif len(selectAttrs) > 0 {\n\t\tfor _, attr := range selectAttrs {\n\t\t\tif field.Name == attr || field.DBName == attr {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tfor _, attr := range omitAttrs {\n\t\tif field.Name == attr || field.DBName == attr {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn !field.IsIgnored\n}\n\nfunc (scope *Scope) shouldSaveAssociations() bool {\n\tsaveAssociations, ok := scope.Get(\"gorm:save_associations\")\n\tif ok && !saveAssociations.(bool) {\n\t\treturn false\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package jcapi\n\nimport (\n\t\"testing\"\n)\n\nconst (\n\ttestAPIKey string = \"<your-API-key-here>\"\n\ttestUrlBase string = \"https:\/\/console.jumpcloud.com\/api\"\n\tauthUrlBase string = \"https:\/\/auth.jumpcloud.com\/authenticate\"\n)\n\nfunc MakeTestUser() (user JCUser) {\n\tuser = JCUser{\n\t\tUserName: \"testuser\",\n\t\tFirstName: \"Test\",\n\t\tLastName: \"User\",\n\t\tEmail: \"testuser@jumpcloud.com\",\n\t\tPassword: \"test!@#$ADSF\",\n\t\tActivated: true,\n\t\tSudo: true,\n\t\tUid: \"2244\",\n\t\tGid: \"2244\",\n\t\tEnableManagedUid: true,\n\t\tTagList: make([]string, 0),\n\t\tExternallyManaged: false,\n\t}\n\n\treturn\n}\n\nfunc TestSystemUsersByOne(t *testing.T) {\n\tjcapi := NewJCAPI(testAPIKey, testUrlBase)\n\n\tnewUser := MakeTestUser()\n\n\tuserId, err := jcapi.AddUpdateUser(Insert, newUser)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not add new user ('%s'), err='%s'\", newUser.ToString(), err)\n\t}\n\n\tt.Logf(\"Returned userId=%s\", userId)\n\n\tretrievedUser, err := jcapi.GetSystemUserById(userId, true)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not get the system user I just added, err='%s'\", err)\n\t}\n\n\tif userId != retrievedUser.Id {\n\t\tt.Fatalf(\"Got back a different user ID than expected, this shouldn't happen! Initial userId='%s' - returned object: '%s'\",\n\t\t\tuserId, retrievedUser.ToString())\n\t}\n\n\tretrievedUser.Email = \"newtestemail@jumpcloud.com\"\n\n\t\/\/ We have to do the following because of bug: https:\/\/www.pivotaltracker.com\/story\/show\/84876992\n\tretrievedUser.Uid = \"2244\"\n\tretrievedUser.Gid = \"2244\"\n\n\tnewUserId, err := jcapi.AddUpdateUser(Update, retrievedUser)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not modify email on the just-added user ('%s'), err='%s'\", retrievedUser.ToString(), err)\n\t}\n\n\tif userId != newUserId {\n\t\tt.Fatalf(\"The user ID of the updated user changed across updates, this should never happen!\")\n\t}\n\n\terr = jcapi.DeleteUser(retrievedUser)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not delete user ('%s'), err='%s'\", retrievedUser.ToString(), err)\n\t}\n\n\treturn\n}\n\nfunc TestSystemUsers(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping potentially long test in short mode\")\n\t}\n\n\tjcapi := NewJCAPI(testAPIKey, testUrlBase)\n\n\tnewUser := MakeTestUser()\n\n\tuserId, err := jcapi.AddUpdateUser(Insert, newUser)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not add new user ('%s'), err='%s'\", newUser.ToString(), err)\n\t}\n\n\tt.Logf(\"Returned userId=%s\", userId)\n\n\tallUsers, err := jcapi.GetSystemUsers(true)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not get all system users, err='%s'\", err)\n\t}\n\n\tt.Logf(\"GetSystemUsers() returned %d users\", len(allUsers))\n\n\tvar foundUser int = -1\n\n\tfor i, user := range allUsers {\n\t\tif user.Id == userId {\n\t\t\tfoundUser = i\n\t\t\tt.Logf(\"Matched user[%d]='%s'\", i, user.ToString())\n\t\t}\n\t}\n\n\tif foundUser == -1 {\n\t\tt.Fatalf(\"Could not find the user ID just added '%s', foundUser=%d\", userId, foundUser)\n\t}\n\n\tallUsers[foundUser].Email = \"newtestemail@jumpcloud.com\"\n\n\t\/\/ We have to do the following because of bug: https:\/\/www.pivotaltracker.com\/story\/show\/84876992\n\tallUsers[foundUser].Uid = \"2244\"\n\tallUsers[foundUser].Gid = \"2244\"\n\n\tnewUserId, err := jcapi.AddUpdateUser(Update, allUsers[foundUser])\n\tif err != nil {\n\t\tt.Fatalf(\"Could not modify email on the just-added user ('%s'), err='%s'\", allUsers[foundUser].ToString(), err)\n\t}\n\n\tif userId != newUserId {\n\t\tt.Fatalf(\"The user ID of the updated user changed across updates, this should never happen!\")\n\t}\n\n\terr = jcapi.DeleteUser(allUsers[foundUser])\n\tif err != nil {\n\t\tt.Fatalf(\"Could not delete user ('%s'), err='%s'\", allUsers[foundUser].ToString(), err)\n\t}\n\n\treturn\n}\n\nfunc MakeTestTag() (tag JCTag) {\n\ttag = JCTag{\n\t\tName: \"Test tag #1\",\n\t\tGroupName: \"testtag1\",\n\t\tSystems: make([]string, 0),\n\t\tSystemUsers: make([]string, 0),\n\t\tExpired: false,\n\t\tSelected: false,\n\t\tExternallyManaged: false,\n\t}\n\n\treturn\n}\n\nfunc TestTags(t *testing.T) {\n\tjcapi := NewJCAPI(testAPIKey, testUrlBase)\n\n\tnewTag := MakeTestTag()\n\n\ttagId, err := jcapi.AddUpdateTag(Insert, newTag)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not add new tag ('%s'), err='%s'\", newTag.ToString(), err)\n\t}\n\n\tt.Logf(\"Returned tagId=%d\", tagId)\n\n\tallTags, err := jcapi.GetAllTags()\n\tif err != nil {\n\t\tt.Fatalf(\"Could not GetAllTags, err='%s'\", err)\n\t}\n\n\tvar foundTag int\n\n\tfor i, tag := range allTags {\n\t\tt.Logf(\"Tag[%d]='%s'\", i, tag)\n\t\tif tag.Id == tagId {\n\t\t\tfoundTag = i\n\t\t}\n\t}\n\n\tallTags[foundTag].Name = \"Test tag #1 with a name change\"\n\n\tnewTagId, err := jcapi.AddUpdateTag(Update, allTags[foundTag])\n\tif err != nil {\n\t\tt.Fatalf(\"Could not change the test tag's name, err='%s'\", err)\n\t}\n\n\tif tagId != newTagId {\n\t\tt.Fatalf(\"The ID of the tag changed during an update, this shouldn't happen.\")\n\t}\n\n\terr = jcapi.DeleteTag(allTags[foundTag])\n\tif err != nil {\n\t\tt.Fatalf(\"Could not delete the tag I just added ('%s'), err='%s'\", allTags[foundTag].ToString(), err)\n\t}\n}\n\nfunc MakeIDSource() JCIDSource {\n\n\treturn JCIDSource{\n\t\tName: \"Test Name\",\n\t\tType: \"Active Directory\",\n\t\tVersion: \"1.0.0\",\n\t\tIpAddress: \"127.0.0.1\",\n\t\tLastUpdateTime: \"2014-10-14 23:34:33\",\n\t\tDN: \"CN=JumpCloud;CN=Users;DC=jumpcloud;DC=com\",\n\t\tActive: true,\n\t}\n}\n\nfunc TestIDSources(t *testing.T) {\n\n\tjcapi := NewJCAPI(testAPIKey, testUrlBase)\n\n\te := MakeIDSource()\n\n\tresult, err := jcapi.AddUpdateIDSource(Insert, e)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not post a new ID Source object, err='%s'\", err)\n\t}\n\n\tt.Logf(\"Post to idsources API successful, result=%U\", result)\n\n\textSourceList, err := jcapi.GetAllIDSources()\n\tif err != nil {\n\t\tt.Fatalf(\"Could not list all external sources, err='%s'\", err)\n\t}\n\n\tfor idx, source := range extSourceList {\n\t\tt.Logf(\"Result %d: '%s'\", idx, source.ToString())\n\t}\n\n\teGet, exists, err := jcapi.GetIDSourceByName(e.Name)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not get an external source by name '%s', err='%s'\", e.Name, err)\n\t} else if exists && eGet.Name != e.Name {\n\t\tt.Fatalf(\"Received name is different ('%s') than what was sent ('%s')\", eGet.Name, e.Name)\n\t} else if !exists {\n\t\tt.Fatalf(\"Could not find the record we just put in '%c'\")\n\t}\n\n\t\/\/\n\t\/\/ If there's more than one test object with this name, let's just\n\t\/\/ loop over and delete them until we find no more of them...\n\t\/\/\n\tfor exists, err = true, nil; exists; eGet, exists, err = jcapi.GetIDSourceByName(e.Name) {\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"ERROR: getIDSourceByName() on '%s' failed, err='%s'\", eGet.ToString(), err)\n\t\t}\n\n\t\terr = jcapi.DeleteIDSource(eGet)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"ERROR: Delete on '%s' failed, err='%s'\", eGet.ToString(), err)\n\t\t}\n\t}\n}\n\nfunc checkAuth(t *testing.T, expectedResult bool, username, password, tag string) {\n\tauthjc := NewJCAPI(testAPIKey, authUrlBase)\n\n\tuserAuth, err := authjc.AuthUser(username, password, tag)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not authenticate the user '%s' with password '%s' and tag '%s' err='%s'\", username, password, tag, err)\n\t}\n\n\tif userAuth != expectedResult {\n\t\tt.Fatalf(\"userAuth=%t, we expected %s for user='%s', pass='%s', tag='%s'\", userAuth, expectedResult, username, password, tag)\n\t}\n}\n\nfunc TestRestAuth(t *testing.T) {\n\tjcapi := NewJCAPI(testAPIKey, testUrlBase)\n\n\tnewUser := MakeTestUser()\n\n\tuserId, err := jcapi.AddUpdateUser(Insert, newUser)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not add new user ('%s'), err='%s'\", newUser.ToString(), err)\n\t}\n\tnewUser.Id = userId\n\tdefer jcapi.DeleteUser(newUser)\n\n\tt.Logf(\"Returned userId=%s\", userId)\n\n\tcheckAuth(t, true, newUser.UserName, newUser.Password, \"\")\n\tcheckAuth(t, false, newUser.UserName, newUser.Password, \"mytesttag\")\n\tcheckAuth(t, false, newUser.UserName, \"a0938mbo\", \"\")\n\tcheckAuth(t, false, \"2309vnotauser\", newUser.Password, \"\")\n\tcheckAuth(t, false, \"\", \"\", \"\")\n\n\t\/\/\n\t\/\/ Now add a tag and put the user in it, and let's try all the tag checking stuff\n\t\/\/\n\tnewTag := MakeTestTag()\n\n\tnewTag.SystemUsers = append(newTag.SystemUsers, userId)\n\n\ttagId, err := jcapi.AddUpdateTag(Insert, newTag)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not add new tag ('%s'), err='%s'\", newTag.ToString(), err)\n\t}\n\tnewTag.Id = tagId\n\tdefer jcapi.DeleteTag(newTag)\n\n\tt.Logf(\"Returned tagId=%d\", tagId)\n\n\tcheckAuth(t, true, newUser.UserName, newUser.Password, newTag.Name)\n\tcheckAuth(t, false, newUser.UserName, newUser.Password, \"not a real tag\")\n\tcheckAuth(t, true, newUser.UserName, newUser.Password, \"\")\n}\n<commit_msg>Fixed authUrlBase<commit_after>package jcapi\n\nimport (\n\t\"testing\"\n)\n\nconst (\n\ttestAPIKey string = \"<your-API-key-here>\"\n\ttestUrlBase string = \"https:\/\/console.jumpcloud.com\/api\"\n\tauthUrlBase string = \"https:\/\/auth.jumpcloud.com\"\n)\n\nfunc MakeTestUser() (user JCUser) {\n\tuser = JCUser{\n\t\tUserName: \"testuser\",\n\t\tFirstName: \"Test\",\n\t\tLastName: \"User\",\n\t\tEmail: \"testuser@jumpcloud.com\",\n\t\tPassword: \"test!@#$ADSF\",\n\t\tActivated: true,\n\t\tSudo: true,\n\t\tUid: \"2244\",\n\t\tGid: \"2244\",\n\t\tEnableManagedUid: true,\n\t\tTagList: make([]string, 0),\n\t\tExternallyManaged: false,\n\t}\n\n\treturn\n}\n\nfunc TestSystemUsersByOne(t *testing.T) {\n\tjcapi := NewJCAPI(testAPIKey, testUrlBase)\n\n\tnewUser := MakeTestUser()\n\n\tuserId, err := jcapi.AddUpdateUser(Insert, newUser)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not add new user ('%s'), err='%s'\", newUser.ToString(), err)\n\t}\n\n\tt.Logf(\"Returned userId=%s\", userId)\n\n\tretrievedUser, err := jcapi.GetSystemUserById(userId, true)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not get the system user I just added, err='%s'\", err)\n\t}\n\n\tif userId != retrievedUser.Id {\n\t\tt.Fatalf(\"Got back a different user ID than expected, this shouldn't happen! Initial userId='%s' - returned object: '%s'\",\n\t\t\tuserId, retrievedUser.ToString())\n\t}\n\n\tretrievedUser.Email = \"newtestemail@jumpcloud.com\"\n\n\t\/\/ We have to do the following because of bug: https:\/\/www.pivotaltracker.com\/story\/show\/84876992\n\tretrievedUser.Uid = \"2244\"\n\tretrievedUser.Gid = \"2244\"\n\n\tnewUserId, err := jcapi.AddUpdateUser(Update, retrievedUser)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not modify email on the just-added user ('%s'), err='%s'\", retrievedUser.ToString(), err)\n\t}\n\n\tif userId != newUserId {\n\t\tt.Fatalf(\"The user ID of the updated user changed across updates, this should never happen!\")\n\t}\n\n\terr = jcapi.DeleteUser(retrievedUser)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not delete user ('%s'), err='%s'\", retrievedUser.ToString(), err)\n\t}\n\n\treturn\n}\n\nfunc TestSystemUsers(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping potentially long test in short mode\")\n\t}\n\n\tjcapi := NewJCAPI(testAPIKey, testUrlBase)\n\n\tnewUser := MakeTestUser()\n\n\tuserId, err := jcapi.AddUpdateUser(Insert, newUser)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not add new user ('%s'), err='%s'\", newUser.ToString(), err)\n\t}\n\n\tt.Logf(\"Returned userId=%s\", userId)\n\n\tallUsers, err := jcapi.GetSystemUsers(true)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not get all system users, err='%s'\", err)\n\t}\n\n\tt.Logf(\"GetSystemUsers() returned %d users\", len(allUsers))\n\n\tvar foundUser int = -1\n\n\tfor i, user := range allUsers {\n\t\tif user.Id == userId {\n\t\t\tfoundUser = i\n\t\t\tt.Logf(\"Matched user[%d]='%s'\", i, user.ToString())\n\t\t}\n\t}\n\n\tif foundUser == -1 {\n\t\tt.Fatalf(\"Could not find the user ID just added '%s', foundUser=%d\", userId, foundUser)\n\t}\n\n\tallUsers[foundUser].Email = \"newtestemail@jumpcloud.com\"\n\n\t\/\/ We have to do the following because of bug: https:\/\/www.pivotaltracker.com\/story\/show\/84876992\n\tallUsers[foundUser].Uid = \"2244\"\n\tallUsers[foundUser].Gid = \"2244\"\n\n\tnewUserId, err := jcapi.AddUpdateUser(Update, allUsers[foundUser])\n\tif err != nil {\n\t\tt.Fatalf(\"Could not modify email on the just-added user ('%s'), err='%s'\", allUsers[foundUser].ToString(), err)\n\t}\n\n\tif userId != newUserId {\n\t\tt.Fatalf(\"The user ID of the updated user changed across updates, this should never happen!\")\n\t}\n\n\terr = jcapi.DeleteUser(allUsers[foundUser])\n\tif err != nil {\n\t\tt.Fatalf(\"Could not delete user ('%s'), err='%s'\", allUsers[foundUser].ToString(), err)\n\t}\n\n\treturn\n}\n\nfunc MakeTestTag() (tag JCTag) {\n\ttag = JCTag{\n\t\tName: \"Test tag #1\",\n\t\tGroupName: \"testtag1\",\n\t\tSystems: make([]string, 0),\n\t\tSystemUsers: make([]string, 0),\n\t\tExpired: false,\n\t\tSelected: false,\n\t\tExternallyManaged: false,\n\t}\n\n\treturn\n}\n\nfunc TestTags(t *testing.T) {\n\tjcapi := NewJCAPI(testAPIKey, testUrlBase)\n\n\tnewTag := MakeTestTag()\n\n\ttagId, err := jcapi.AddUpdateTag(Insert, newTag)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not add new tag ('%s'), err='%s'\", newTag.ToString(), err)\n\t}\n\n\tt.Logf(\"Returned tagId=%d\", tagId)\n\n\tallTags, err := jcapi.GetAllTags()\n\tif err != nil {\n\t\tt.Fatalf(\"Could not GetAllTags, err='%s'\", err)\n\t}\n\n\tvar foundTag int\n\n\tfor i, tag := range allTags {\n\t\tt.Logf(\"Tag[%d]='%s'\", i, tag)\n\t\tif tag.Id == tagId {\n\t\t\tfoundTag = i\n\t\t}\n\t}\n\n\tallTags[foundTag].Name = \"Test tag #1 with a name change\"\n\n\tnewTagId, err := jcapi.AddUpdateTag(Update, allTags[foundTag])\n\tif err != nil {\n\t\tt.Fatalf(\"Could not change the test tag's name, err='%s'\", err)\n\t}\n\n\tif tagId != newTagId {\n\t\tt.Fatalf(\"The ID of the tag changed during an update, this shouldn't happen.\")\n\t}\n\n\terr = jcapi.DeleteTag(allTags[foundTag])\n\tif err != nil {\n\t\tt.Fatalf(\"Could not delete the tag I just added ('%s'), err='%s'\", allTags[foundTag].ToString(), err)\n\t}\n}\n\nfunc MakeIDSource() JCIDSource {\n\n\treturn JCIDSource{\n\t\tName: \"Test Name\",\n\t\tType: \"Active Directory\",\n\t\tVersion: \"1.0.0\",\n\t\tIpAddress: \"127.0.0.1\",\n\t\tLastUpdateTime: \"2014-10-14 23:34:33\",\n\t\tDN: \"CN=JumpCloud;CN=Users;DC=jumpcloud;DC=com\",\n\t\tActive: true,\n\t}\n}\n\nfunc TestIDSources(t *testing.T) {\n\n\tjcapi := NewJCAPI(testAPIKey, testUrlBase)\n\n\te := MakeIDSource()\n\n\tresult, err := jcapi.AddUpdateIDSource(Insert, e)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not post a new ID Source object, err='%s'\", err)\n\t}\n\n\tt.Logf(\"Post to idsources API successful, result=%U\", result)\n\n\textSourceList, err := jcapi.GetAllIDSources()\n\tif err != nil {\n\t\tt.Fatalf(\"Could not list all external sources, err='%s'\", err)\n\t}\n\n\tfor idx, source := range extSourceList {\n\t\tt.Logf(\"Result %d: '%s'\", idx, source.ToString())\n\t}\n\n\teGet, exists, err := jcapi.GetIDSourceByName(e.Name)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not get an external source by name '%s', err='%s'\", e.Name, err)\n\t} else if exists && eGet.Name != e.Name {\n\t\tt.Fatalf(\"Received name is different ('%s') than what was sent ('%s')\", eGet.Name, e.Name)\n\t} else if !exists {\n\t\tt.Fatalf(\"Could not find the record we just put in '%c'\")\n\t}\n\n\t\/\/\n\t\/\/ If there's more than one test object with this name, let's just\n\t\/\/ loop over and delete them until we find no more of them...\n\t\/\/\n\tfor exists, err = true, nil; exists; eGet, exists, err = jcapi.GetIDSourceByName(e.Name) {\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"ERROR: getIDSourceByName() on '%s' failed, err='%s'\", eGet.ToString(), err)\n\t\t}\n\n\t\terr = jcapi.DeleteIDSource(eGet)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"ERROR: Delete on '%s' failed, err='%s'\", eGet.ToString(), err)\n\t\t}\n\t}\n}\n\nfunc checkAuth(t *testing.T, expectedResult bool, username, password, tag string) {\n\tauthjc := NewJCAPI(testAPIKey, authUrlBase)\n\n\tuserAuth, err := authjc.AuthUser(username, password, tag)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not authenticate the user '%s' with password '%s' and tag '%s' err='%s'\", username, password, tag, err)\n\t}\n\n\tif userAuth != expectedResult {\n\t\tt.Fatalf(\"userAuth=%t, we expected %s for user='%s', pass='%s', tag='%s'\", userAuth, expectedResult, username, password, tag)\n\t}\n}\n\nfunc TestRestAuth(t *testing.T) {\n\tjcapi := NewJCAPI(testAPIKey, testUrlBase)\n\n\tnewUser := MakeTestUser()\n\n\tuserId, err := jcapi.AddUpdateUser(Insert, newUser)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not add new user ('%s'), err='%s'\", newUser.ToString(), err)\n\t}\n\tnewUser.Id = userId\n\tdefer jcapi.DeleteUser(newUser)\n\n\tt.Logf(\"Returned userId=%s\", userId)\n\n\tcheckAuth(t, true, newUser.UserName, newUser.Password, \"\")\n\tcheckAuth(t, false, newUser.UserName, newUser.Password, \"mytesttag\")\n\tcheckAuth(t, false, newUser.UserName, \"a0938mbo\", \"\")\n\tcheckAuth(t, false, \"2309vnotauser\", newUser.Password, \"\")\n\tcheckAuth(t, false, \"\", \"\", \"\")\n\n\t\/\/\n\t\/\/ Now add a tag and put the user in it, and let's try all the tag checking stuff\n\t\/\/\n\tnewTag := MakeTestTag()\n\n\tnewTag.SystemUsers = append(newTag.SystemUsers, userId)\n\n\ttagId, err := jcapi.AddUpdateTag(Insert, newTag)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not add new tag ('%s'), err='%s'\", newTag.ToString(), err)\n\t}\n\tnewTag.Id = tagId\n\tdefer jcapi.DeleteTag(newTag)\n\n\tt.Logf(\"Returned tagId=%d\", tagId)\n\n\tcheckAuth(t, true, newUser.UserName, newUser.Password, newTag.Name)\n\tcheckAuth(t, false, newUser.UserName, newUser.Password, \"not a real tag\")\n\tcheckAuth(t, true, newUser.UserName, newUser.Password, \"\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n\n\tbuild is comprised of two sub-packages, api and static, that are two\n\thalves of building a functioning webapp given a config.json.\n\n\tapi builds the server binary that handles all of the game logic. It is a\n\theadless binary that doesn't render any UI but serves as a REST endpoint\n\tfor the webapp.\n\n\tstatic accumulates all of the various client-side resources (i.e. html,\n\tcss, js) necessary to render the server and the specific games it uses.\n\tThe static server output is all static; that is that the files don't\n\tchange and it's designed to be able to upload to static file hosting\n\tsurfaces like firebase hosting. Actually organizing all of the files on\n\tdisk in the right configuration is a pain, so the static.Build() is a huge\n\thelp.\n\n\tBoth api and static Build() methods take a directory parameter, an\n\toptional directory name to store ther results of their builds in. They\n\twill create sub directories within that folder to store their results in a\n\tknown location. directory is optional; \"\" just defaults to the current\n\tdirectory.\n\n\tTypically you don't have to use these libraries directly, but instead use\n\t`boardgame-util build api`, `boardgame-util build static`, `boardgame-util\n\tclean`, and `boardgame-util serve`.\\\n\n\tThis build package contains only this documentation and no code. All code\n\tis in the api and static sub-packages. See their package doc for more\n\tabout each one.\n\n*\/\npackage build\n<commit_msg>Fixed lint error in boardgame-util\/lib\/build. Part of #552.<commit_after>\/*\n\nPackage build is comprised of two sub-packages, api and static, that are two\nhalves of building a functioning webapp given a config.json.\n\napi builds the server binary that handles all of the game logic. It is a\nheadless binary that doesn't render any UI but serves as a REST endpoint for the\nwebapp.\n\nstatic accumulates all of the various client-side resources (i.e. html, css, js)\nnecessary to render the server and the specific games it uses. The static server\noutput is all static; that is that the files don't change and it's designed to\nbe able to upload to static file hosting surfaces like firebase hosting.\nActually organizing all of the files on disk in the right configuration is a\npain, so the static.Build() is a huge help.\n\nBoth api and static Build() methods take a directory parameter, an optional\ndirectory name to store ther results of their builds in. They will create sub\ndirectories within that folder to store their results in a known location.\ndirectory is optional; \"\" just defaults to the current directory.\n\nTypically you don't have to use these libraries directly, but instead use\n`boardgame-util build api`, `boardgame-util build static`, `boardgame-util\nclean`, and `boardgame-util serve`.\\\n\nThis build package contains only this documentation and no code. All code is in\nthe api and static sub-packages. See their package doc for more about each one.\n\n*\/\npackage build\n<|endoftext|>"} {"text":"<commit_before>package jsgo_test\n\nimport (\n\t\"runtime\"\n\n\t. \"github.com\/go-zero\/jsgo\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/ginkgo\/extensions\/table\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\ntype CheckFunction func(JsValue) bool\n\nvar _ = Describe(\"JsValue\", func() {\n\tstate := NewJsState()\n\n\tIt(\"Should return a float\", func() {\n\t\tvalue, _ := state.DoString(\"182.0\")\n\t\tExpect(value.Float()).To(Equal(182.0))\n\t})\n\n\tIt(\"Should return an integer\", func() {\n\t\tvalue, _ := state.DoString(\"42\")\n\t\tExpect(value.Integer()).To(Equal(42))\n\t})\n\n\tIt(\"Should return a string\", func() {\n\t\tvalue, _ := state.DoString(\"'Hello World!'\")\n\t\tExpect(value.String()).To(Equal(\"Hello World!\"))\n\t})\n\n\tIt(\"Should return a boolean\", func() {\n\t\tvalue, _ := state.DoString(\"true\")\n\t\tExpect(value.Bool()).To(BeTrue())\n\t})\n\n\tIt(\"Should return a callable\", func() {\n\t\tvalue, _ := state.DoString(\"new Function()\")\n\t\t_, ok := value.(*JsCallable)\n\t\tExpect(ok).To(BeTrue())\n\t})\n\n\tIt(\"Should return an object\", func() {\n\t\tvalue, _ := state.DoString(\"new Object()\")\n\t\t_, ok := value.(*JsObject)\n\t\tExpect(ok).To(BeTrue())\n\t})\n\n\tIt(\"Should return an array\", func() {\n\t\tvalue, _ := state.DoString(\"new Array()\")\n\t\t_, ok := value.(*JsArray)\n\t\tExpect(ok).To(BeTrue())\n\t})\n\n\tIt(\"Should return a regexp\", func() {\n\t\tvalue, _ := state.DoString(\"new RegExp()\")\n\t\t_, ok := value.(*JsRegExp)\n\t\tExpect(ok).To(BeTrue())\n\t})\n\n\tDescribeTable(\"Should identify the value type\",\n\t\tfunc(code string, check CheckFunction) {\n\t\t\tvalue, _ := state.DoString(code)\n\t\t\tExpect(check(value)).To(BeTrue())\n\t\t\truntime.GC() \/\/ force the unreferencing of the value\n\t\t},\n\t\tEntry(\"defined\", \"1\", JsValue.IsDefined),\n\t\tEntry(\"undefined\", \"{}\", JsValue.IsUndefined),\n\t\tEntry(\"number\", \"1\", JsValue.IsNumber),\n\t\tEntry(\"string\", \"'hola'\", JsValue.IsString),\n\t\tEntry(\"boolean\", \"true\", JsValue.IsBool),\n\t\tEntry(\"null\", \"null\", JsValue.IsNull),\n\t\tEntry(\"primitive\", \"false\", JsValue.IsPrimitive),\n\t\tEntry(\"callable\", \"new Function()\", JsValue.IsCallable),\n\t\tEntry(\"array\", \"new Array()\", JsValue.IsArray),\n\t\tEntry(\"regexp\", \"new RegExp()\", JsValue.IsRegExp),\n\t\tEntry(\"object\", \"new Object()\", JsValue.IsObject),\n\t)\n\n})\n<commit_msg>better JsValue tests<commit_after>package jsgo_test\n\nimport (\n\t\"runtime\"\n\n\t. \"github.com\/go-zero\/jsgo\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/ginkgo\/extensions\/table\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\ntype CheckFunction func(JsValue) bool\n\nvar _ = Describe(\"JsValue\", func() {\n\tstate := NewJsState()\n\n\tIt(\"Should return a float\", func() {\n\t\tvalue, err := state.DoString(\"182.0\")\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\tExpect(value.Float()).To(Equal(182.0))\n\t})\n\n\tIt(\"Should return an integer\", func() {\n\t\tvalue, err := state.DoString(\"42\")\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\tExpect(value.Integer()).To(Equal(42))\n\t})\n\n\tIt(\"Should return a string\", func() {\n\t\tvalue, err := state.DoString(\"'Hello World!'\")\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\tExpect(value.String()).To(Equal(\"Hello World!\"))\n\t})\n\n\tIt(\"Should return a boolean\", func() {\n\t\tvalue, err := state.DoString(\"true\")\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\tExpect(value.Bool()).To(BeTrue())\n\t})\n\n\tIt(\"Should return a callable\", func() {\n\t\tvalue, err := state.DoString(\"new Function()\")\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\t_, ok := value.(*JsCallable)\n\t\tExpect(ok).To(BeTrue())\n\t})\n\n\tIt(\"Should return an object\", func() {\n\t\tvalue, err := state.DoString(\"new Object()\")\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\t_, ok := value.(*JsObject)\n\t\tExpect(ok).To(BeTrue())\n\t})\n\n\tIt(\"Should return an array\", func() {\n\t\tvalue, err := state.DoString(\"new Array()\")\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\t_, ok := value.(*JsArray)\n\t\tExpect(ok).To(BeTrue())\n\t})\n\n\tIt(\"Should return a regexp\", func() {\n\t\tvalue, err := state.DoString(\"new RegExp()\")\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\t_, ok := value.(*JsRegExp)\n\t\tExpect(ok).To(BeTrue())\n\t})\n\n\tDescribeTable(\"Should identify the value type\",\n\t\tfunc(code string, check CheckFunction) {\n\t\t\tvalue, err := state.DoString(code)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(check(value)).To(BeTrue())\n\t\t\truntime.GC() \/\/ force the unreferencing of the value\n\t\t},\n\t\tEntry(\"defined\", \"1\", JsValue.IsDefined),\n\t\tEntry(\"undefined\", \"{}\", JsValue.IsUndefined),\n\t\tEntry(\"number\", \"1\", JsValue.IsNumber),\n\t\tEntry(\"string\", \"'hola'\", JsValue.IsString),\n\t\tEntry(\"boolean\", \"true\", JsValue.IsBool),\n\t\tEntry(\"null\", \"null\", JsValue.IsNull),\n\t\tEntry(\"primitive\", \"false\", JsValue.IsPrimitive),\n\t\tEntry(\"callable\", \"new Function()\", JsValue.IsCallable),\n\t\tEntry(\"array\", \"new Array()\", JsValue.IsArray),\n\t\tEntry(\"regexp\", \"new RegExp()\", JsValue.IsRegExp),\n\t\tEntry(\"object\", \"new Object()\", JsValue.IsObject),\n\t)\n\n})\n<|endoftext|>"} {"text":"<commit_before>package dependency\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestLoad(t *testing.T) {\n\n\tpwd, _ := os.Getwd()\n\ttarget := NewDependency(pwd, false)\n\n\tactual, err := target.Load()\n\trequire.NoError(t, err)\n\trequire.Equal(t, 3, len(actual.Dependencies))\n\n\twithBranch := actual.Dependencies[0]\n\twithRevision := actual.Dependencies[1]\n\twithIgnore := actual.Dependencies[2]\n\n\trequire.Equal(t, \"github.com\/google\/protobuf\/examples\", withBranch.Target)\n\trequire.Equal(t, \"master\", withBranch.Branch)\n\trequire.Equal(t, \"\", withBranch.Revision)\n\n\trequire.Equal(t, \"github.com\/grpc-ecosystem\/grpc-gateway\/examples\/examplepb\", withRevision.Target)\n\trequire.Equal(t, \"\", withRevision.Branch)\n\trequire.Equal(t, \"v1.2.2\", withRevision.Revision)\n\trequire.Equal(t, \"grpc-gateway\/examplepb\", withRevision.Path)\n\n\trequire.Equal(t, \"github.com\/kubernetes\/helm\/_proto\/hapi\", withIgnore.Target)\n\trequire.Equal(t, \"\", withIgnore.Branch)\n\trequire.Equal(t, \"v2.8.1\", withIgnore.Revision)\n\trequire.Equal(t, []string{\".\/release\", \".\/rudder\", \".\/services\", \".\/version\"}, withIgnore.Ignores)\n}\n<commit_msg>Fix dependency test (#33)<commit_after>package dependency\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestLoad(t *testing.T) {\n\n\tpwd, _ := os.Getwd()\n\ttarget := NewDependency(pwd, false)\n\n\tactual, err := target.Load()\n\trequire.NoError(t, err)\n\trequire.Equal(t, 3, len(actual.Dependencies))\n\n\twithBranch := actual.Dependencies[0]\n\twithRevision := actual.Dependencies[1]\n\twithIgnore := actual.Dependencies[2]\n\n\trequire.Equal(t, \"github.com\/protocolbuffers\/protobuf\/src\", withBranch.Target)\n\trequire.Equal(t, \"master\", withBranch.Branch)\n\trequire.Equal(t, \"\", withBranch.Revision)\n\n\trequire.Equal(t, \"github.com\/grpc-ecosystem\/grpc-gateway\/examples\/examplepb\", withRevision.Target)\n\trequire.Equal(t, \"\", withRevision.Branch)\n\trequire.Equal(t, \"v1.2.2\", withRevision.Revision)\n\trequire.Equal(t, \"grpc-gateway\/examplepb\", withRevision.Path)\n\n\trequire.Equal(t, \"github.com\/kubernetes\/helm\/_proto\/hapi\", withIgnore.Target)\n\trequire.Equal(t, \"\", withIgnore.Branch)\n\trequire.Equal(t, \"v2.8.1\", withIgnore.Revision)\n\trequire.Equal(t, []string{\".\/release\", \".\/rudder\", \".\/services\", \".\/version\"}, withIgnore.Ignores)\n}\n<|endoftext|>"} {"text":"<commit_before>package deepcopier\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/guregu\/null\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestCopyTo(t *testing.T) {\n\tnow := time.Now()\n\tuser := NewUser(now)\n\tuserCopy := &UserCopy{}\n\texpectedCopy := NewUserCopy(now)\n\n\terr := Copy(user).WithContext(map[string]interface{}{\"version\": \"1\"}).To(userCopy)\n\tassert.Nil(t, err)\n\n\tassert.Equal(t, expectedCopy.Title, userCopy.Title)\n\tassert.Equal(t, expectedCopy.Date, userCopy.Date)\n\tassert.Equal(t, expectedCopy.Float32, userCopy.Float32)\n\tassert.Equal(t, expectedCopy.Float64, userCopy.Float64)\n\tassert.Equal(t, expectedCopy.Int, userCopy.Int)\n\tassert.Equal(t, expectedCopy.Int8, userCopy.Int8)\n\tassert.Equal(t, expectedCopy.Int16, userCopy.Int16)\n\tassert.Equal(t, expectedCopy.Int32, userCopy.Int32)\n\tassert.Equal(t, expectedCopy.Int64, userCopy.Int64)\n\tassert.Equal(t, expectedCopy.UInt, userCopy.UInt)\n\tassert.Equal(t, expectedCopy.UInt8, userCopy.UInt8)\n\tassert.Equal(t, expectedCopy.UInt16, userCopy.UInt16)\n\tassert.Equal(t, expectedCopy.UInt32, userCopy.UInt32)\n\tassert.Equal(t, expectedCopy.UInt64, userCopy.UInt64)\n\tassert.Equal(t, expectedCopy.StringSlice, userCopy.StringSlice)\n\tassert.Equal(t, expectedCopy.IntSlice, userCopy.IntSlice)\n\tassert.Equal(t, expectedCopy.IntMethod, userCopy.IntMethod)\n\tassert.Equal(t, expectedCopy.Int8Method, userCopy.Int8Method)\n\tassert.Equal(t, expectedCopy.Int16Method, userCopy.Int16Method)\n\tassert.Equal(t, expectedCopy.Int32Method, userCopy.Int32Method)\n\tassert.Equal(t, expectedCopy.Int64Method, userCopy.Int64Method)\n\tassert.Equal(t, expectedCopy.UIntMethod, userCopy.UIntMethod)\n\tassert.Equal(t, expectedCopy.UInt8Method, userCopy.UInt8Method)\n\tassert.Equal(t, expectedCopy.UInt16Method, userCopy.UInt16Method)\n\tassert.Equal(t, expectedCopy.UInt32Method, userCopy.UInt32Method)\n\tassert.Equal(t, expectedCopy.UInt64Method, userCopy.UInt64Method)\n\tassert.Equal(t, expectedCopy.MethodWithContext, userCopy.MethodWithContext)\n\tassert.Equal(t, expectedCopy.SuperMethod, userCopy.SuperMethod)\n\tassert.Equal(t, expectedCopy.StringSlice, userCopy.StringSlice)\n\tassert.Equal(t, expectedCopy.IntSlice, userCopy.IntSlice)\n}\n\nfunc TestCopyFrom(t *testing.T) {\n\tnow := time.Now()\n\tuser := &User{}\n\tuserCopy := NewUserCopy(now)\n\texpected := NewUser(now)\n\n\terr := Copy(user).From(userCopy)\n\tassert.Nil(t, err)\n\n\tassert.Equal(t, expected.Name, user.Name)\n\tassert.Equal(t, expected.Date, user.Date)\n\tassert.Equal(t, expected.AFloat32, user.AFloat32)\n\tassert.Equal(t, expected.AFloat64, user.AFloat64)\n\tassert.Equal(t, expected.AnInt, user.AnInt)\n\tassert.Equal(t, expected.AnInt8, user.AnInt8)\n\tassert.Equal(t, expected.AnInt16, user.AnInt16)\n\tassert.Equal(t, expected.AnInt32, user.AnInt32)\n\tassert.Equal(t, expected.AnInt64, user.AnInt64)\n\tassert.Equal(t, expected.AnUInt, user.AnUInt)\n\tassert.Equal(t, expected.AnUInt8, user.AnUInt8)\n\tassert.Equal(t, expected.AnUInt16, user.AnUInt16)\n\tassert.Equal(t, expected.AnUInt32, user.AnUInt32)\n\tassert.Equal(t, expected.AnUInt64, user.AnUInt64)\n\tassert.Equal(t, expected.AStringSlice, user.AStringSlice)\n\tassert.Equal(t, expected.AnIntSlice, user.AnIntSlice)\n}\n\n\/\/ -----------------------------------------------------------------------------\n\/\/ Fixtures\n\/\/ -----------------------------------------------------------------------------\n\ntype User struct {\n\tName string\n\tDate time.Time\n\tAFloat32 float32\n\tAFloat64 float64\n\tAnInt int\n\tAnInt8 int8\n\tAnInt16 int16\n\tAnInt32 int32\n\tAnInt64 int64\n\tAnUInt uint\n\tAnUInt8 uint8\n\tAnUInt16 uint16\n\tAnUInt32 uint32\n\tAnUInt64 uint64\n\tAStringSlice []string\n\tAnIntSlice []int\n\tANullString null.String\n}\n\nfunc NewUser(now time.Time) *User {\n\treturn &User{\n\t\tName: \"Chuck Norris\",\n\t\tDate: now,\n\t\tAFloat32: float32(10.0),\n\t\tAFloat64: float64(10.0),\n\t\tAnInt: int(10),\n\t\tAnInt8: int8(10),\n\t\tAnInt16: int16(10),\n\t\tAnInt32: int32(10),\n\t\tAnInt64: int64(10),\n\t\tAnUInt: uint(10),\n\t\tAnUInt8: uint8(10),\n\t\tAnUInt16: uint16(10),\n\t\tAnUInt32: uint32(10),\n\t\tAnUInt64: uint64(10),\n\t\tAStringSlice: []string{\"Chuck\", \"Norris\"},\n\t\tAnIntSlice: []int{0, 8, 15},\n\t\tANullString: null.StringFrom(\"I'm null\"),\n\t}\n}\n\nfunc (u *User) Float32Method() float32 {\n\treturn float32(10.0)\n}\n\nfunc (u *User) Float64Method() float64 {\n\treturn float64(10.0)\n}\n\nfunc (u *User) IntMethod() int {\n\treturn int(10)\n}\n\nfunc (u *User) Int8Method() int8 {\n\treturn int8(10)\n}\n\nfunc (u *User) Int16Method() int16 {\n\treturn int16(10)\n}\n\nfunc (u *User) Int32Method() int32 {\n\treturn int32(10)\n}\n\nfunc (u *User) Int64Method() int64 {\n\treturn int64(10)\n}\n\nfunc (u *User) UIntMethod() uint {\n\treturn uint(10)\n}\n\nfunc (u *User) UInt8Method() uint8 {\n\treturn uint8(10)\n}\n\nfunc (u *User) UInt16Method() uint16 {\n\treturn uint16(10)\n}\n\nfunc (u *User) UInt32Method() uint32 {\n\treturn uint32(10)\n}\n\nfunc (u *User) UInt64Method() uint64 {\n\treturn uint64(10)\n}\n\nfunc (u *User) MethodWithDifferentName() string {\n\treturn \"hello\"\n}\n\nfunc (u *User) MethodWithContext(context map[string]interface{}) string {\n\treturn context[\"version\"].(string)\n}\n\ntype UserCopy struct {\n\tDate time.Time `json:\"date\"`\n\tTitle string `json:\"name\" deepcopier:\"field:Name\"`\n\tFloat32 float32 `json:\"a_float32\" deepcopier:\"field:AFloat32\"`\n\tFloat64 float64 `json:\"a_float64\" deepcopier:\"field:AFloat64\"`\n\tInt int `json:\"an_int\" deepcopier:\"field:AnInt\"`\n\tInt8 int8 `json:\"an_int8\" deepcopier:\"field:AnInt8\"`\n\tInt16 int16 `json:\"an_int16\" deepcopier:\"field:AnInt16\"`\n\tInt32 int32 `json:\"an_int32\" deepcopier:\"field:AnInt32\"`\n\tInt64 int64 `json:\"an_int64\" deepcopier:\"field:AnInt64\"`\n\tUInt uint `json:\"an_uint\" deepcopier:\"field:AnUInt\"`\n\tUInt8 uint8 `json:\"an_uint8\" deepcopier:\"field:AnUInt8\"`\n\tUInt16 uint16 `json:\"an_uint16\" deepcopier:\"field:AnUInt16\"`\n\tUInt32 uint32 `json:\"an_uint32\" deepcopier:\"field:AnUInt32\"`\n\tUInt64 uint64 `json:\"an_uint64\" deepcopier:\"field:AnUInt64\"`\n\tNullString null.String `json:\"a_null_string\" deepcopier:\"field:ANullString\"`\n\tStringSlice []string `json:\"a_string_slice\" deepcopier:\"field:AStringSlice\"`\n\tIntSlice []int `json:\"an_int_slice\" deepcopier:\"field:AnIntSlice\"`\n\tIntMethod int `json:\"int_method\"`\n\tInt8Method int8 `json:\"int8_method\"`\n\tInt16Method int16 `json:\"int16_method\"`\n\tInt32Method int32 `json:\"int32_method\"`\n\tInt64Method int64 `json:\"int64_method\"`\n\tUIntMethod uint `json:\"uint_method\"`\n\tUInt8Method uint8 `json:\"uint8_method\"`\n\tUInt16Method uint16 `json:\"uint16_method\"`\n\tUInt32Method uint32 `json:\"uint32_method\"`\n\tUInt64Method uint64 `json:\"uint64_method\"`\n\tMethodWithContext string `json:\"method_with_context\" deepcopier:\"context\"`\n\tSuperMethod string `json:\"super_method\" deepcopier:\"field:MethodWithDifferentName\"`\n}\n\nfunc NewUserCopy(now time.Time) *UserCopy {\n\treturn &UserCopy{\n\t\tTitle: \"Chuck Norris\",\n\t\tDate: now,\n\t\tFloat32: float32(10.0),\n\t\tFloat64: float64(10.0),\n\t\tInt: int(10),\n\t\tInt8: int8(10),\n\t\tInt16: int16(10),\n\t\tInt32: int32(10),\n\t\tInt64: int64(10),\n\t\tUInt: uint(10),\n\t\tUInt8: uint8(10),\n\t\tUInt16: uint16(10),\n\t\tUInt32: uint32(10),\n\t\tUInt64: uint64(10),\n\t\tStringSlice: []string{\"Chuck\", \"Norris\"},\n\t\tIntSlice: []int{0, 8, 15},\n\t\tNullString: null.StringFrom(\"I'm null\"),\n\t\tIntMethod: int(10),\n\t\tInt8Method: int8(10),\n\t\tInt16Method: int16(10),\n\t\tInt32Method: int32(10),\n\t\tInt64Method: int64(10),\n\t\tUIntMethod: uint(10),\n\t\tUInt8Method: uint8(10),\n\t\tUInt16Method: uint16(10),\n\t\tUInt32Method: uint32(10),\n\t\tUInt64Method: uint64(10),\n\t\tMethodWithContext: \"1\",\n\t\tSuperMethod: \"hello\",\n\t}\n}\n<commit_msg>Use assert.New() for tests.<commit_after>package deepcopier\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/guregu\/null\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestCopyTo(t *testing.T) {\n\tis := assert.New(t)\n\tnow := time.Now()\n\n\tuser := NewUser(now)\n\tuserCopy := &UserCopy{}\n\texpectedCopy := NewUserCopy(now)\n\n\tis.Nil(Copy(user).WithContext(map[string]interface{}{\"version\": \"1\"}).To(userCopy))\n\n\tis.Equal(expectedCopy.Title, userCopy.Title)\n\tis.Equal(expectedCopy.Date, userCopy.Date)\n\tis.Equal(expectedCopy.Float32, userCopy.Float32)\n\tis.Equal(expectedCopy.Float64, userCopy.Float64)\n\tis.Equal(expectedCopy.Int, userCopy.Int)\n\tis.Equal(expectedCopy.Int8, userCopy.Int8)\n\tis.Equal(expectedCopy.Int16, userCopy.Int16)\n\tis.Equal(expectedCopy.Int32, userCopy.Int32)\n\tis.Equal(expectedCopy.Int64, userCopy.Int64)\n\tis.Equal(expectedCopy.UInt, userCopy.UInt)\n\tis.Equal(expectedCopy.UInt8, userCopy.UInt8)\n\tis.Equal(expectedCopy.UInt16, userCopy.UInt16)\n\tis.Equal(expectedCopy.UInt32, userCopy.UInt32)\n\tis.Equal(expectedCopy.UInt64, userCopy.UInt64)\n\tis.Equal(expectedCopy.StringSlice, userCopy.StringSlice)\n\tis.Equal(expectedCopy.IntSlice, userCopy.IntSlice)\n\tis.Equal(expectedCopy.IntMethod, userCopy.IntMethod)\n\tis.Equal(expectedCopy.Int8Method, userCopy.Int8Method)\n\tis.Equal(expectedCopy.Int16Method, userCopy.Int16Method)\n\tis.Equal(expectedCopy.Int32Method, userCopy.Int32Method)\n\tis.Equal(expectedCopy.Int64Method, userCopy.Int64Method)\n\tis.Equal(expectedCopy.UIntMethod, userCopy.UIntMethod)\n\tis.Equal(expectedCopy.UInt8Method, userCopy.UInt8Method)\n\tis.Equal(expectedCopy.UInt16Method, userCopy.UInt16Method)\n\tis.Equal(expectedCopy.UInt32Method, userCopy.UInt32Method)\n\tis.Equal(expectedCopy.UInt64Method, userCopy.UInt64Method)\n\tis.Equal(expectedCopy.MethodWithContext, userCopy.MethodWithContext)\n\tis.Equal(expectedCopy.SuperMethod, userCopy.SuperMethod)\n\tis.Equal(expectedCopy.StringSlice, userCopy.StringSlice)\n\tis.Equal(expectedCopy.IntSlice, userCopy.IntSlice)\n}\n\nfunc TestCopyFrom(t *testing.T) {\n\tis := assert.New(t)\n\tnow := time.Now()\n\n\tuser := &User{}\n\tuserCopy := NewUserCopy(now)\n\texpected := NewUser(now)\n\n\tis.Nil(Copy(user).From(userCopy))\n\n\tis.Equal(expected.Name, user.Name)\n\tis.Equal(expected.Date, user.Date)\n\tis.Equal(expected.AFloat32, user.AFloat32)\n\tis.Equal(expected.AFloat64, user.AFloat64)\n\tis.Equal(expected.AnInt, user.AnInt)\n\tis.Equal(expected.AnInt8, user.AnInt8)\n\tis.Equal(expected.AnInt16, user.AnInt16)\n\tis.Equal(expected.AnInt32, user.AnInt32)\n\tis.Equal(expected.AnInt64, user.AnInt64)\n\tis.Equal(expected.AnUInt, user.AnUInt)\n\tis.Equal(expected.AnUInt8, user.AnUInt8)\n\tis.Equal(expected.AnUInt16, user.AnUInt16)\n\tis.Equal(expected.AnUInt32, user.AnUInt32)\n\tis.Equal(expected.AnUInt64, user.AnUInt64)\n\tis.Equal(expected.AStringSlice, user.AStringSlice)\n\tis.Equal(expected.AnIntSlice, user.AnIntSlice)\n}\n\n\/\/ -----------------------------------------------------------------------------\n\/\/ Fixtures\n\/\/ -----------------------------------------------------------------------------\n\ntype User struct {\n\tName string\n\tDate time.Time\n\tAFloat32 float32\n\tAFloat64 float64\n\tAnInt int\n\tAnInt8 int8\n\tAnInt16 int16\n\tAnInt32 int32\n\tAnInt64 int64\n\tAnUInt uint\n\tAnUInt8 uint8\n\tAnUInt16 uint16\n\tAnUInt32 uint32\n\tAnUInt64 uint64\n\tAStringSlice []string\n\tAnIntSlice []int\n\tANullString null.String\n}\n\nfunc NewUser(now time.Time) *User {\n\treturn &User{\n\t\tName: \"Chuck Norris\",\n\t\tDate: now,\n\t\tAFloat32: float32(10.0),\n\t\tAFloat64: float64(10.0),\n\t\tAnInt: int(10),\n\t\tAnInt8: int8(10),\n\t\tAnInt16: int16(10),\n\t\tAnInt32: int32(10),\n\t\tAnInt64: int64(10),\n\t\tAnUInt: uint(10),\n\t\tAnUInt8: uint8(10),\n\t\tAnUInt16: uint16(10),\n\t\tAnUInt32: uint32(10),\n\t\tAnUInt64: uint64(10),\n\t\tAStringSlice: []string{\"Chuck\", \"Norris\"},\n\t\tAnIntSlice: []int{0, 8, 15},\n\t\tANullString: null.StringFrom(\"I'm null\"),\n\t}\n}\n\nfunc (u *User) Float32Method() float32 {\n\treturn float32(10.0)\n}\n\nfunc (u *User) Float64Method() float64 {\n\treturn float64(10.0)\n}\n\nfunc (u *User) IntMethod() int {\n\treturn int(10)\n}\n\nfunc (u *User) Int8Method() int8 {\n\treturn int8(10)\n}\n\nfunc (u *User) Int16Method() int16 {\n\treturn int16(10)\n}\n\nfunc (u *User) Int32Method() int32 {\n\treturn int32(10)\n}\n\nfunc (u *User) Int64Method() int64 {\n\treturn int64(10)\n}\n\nfunc (u *User) UIntMethod() uint {\n\treturn uint(10)\n}\n\nfunc (u *User) UInt8Method() uint8 {\n\treturn uint8(10)\n}\n\nfunc (u *User) UInt16Method() uint16 {\n\treturn uint16(10)\n}\n\nfunc (u *User) UInt32Method() uint32 {\n\treturn uint32(10)\n}\n\nfunc (u *User) UInt64Method() uint64 {\n\treturn uint64(10)\n}\n\nfunc (u *User) MethodWithDifferentName() string {\n\treturn \"hello\"\n}\n\nfunc (u *User) MethodWithContext(context map[string]interface{}) string {\n\treturn context[\"version\"].(string)\n}\n\ntype UserCopy struct {\n\tDate time.Time `json:\"date\"`\n\tTitle string `json:\"name\" deepcopier:\"field:Name\"`\n\tFloat32 float32 `json:\"a_float32\" deepcopier:\"field:AFloat32\"`\n\tFloat64 float64 `json:\"a_float64\" deepcopier:\"field:AFloat64\"`\n\tInt int `json:\"an_int\" deepcopier:\"field:AnInt\"`\n\tInt8 int8 `json:\"an_int8\" deepcopier:\"field:AnInt8\"`\n\tInt16 int16 `json:\"an_int16\" deepcopier:\"field:AnInt16\"`\n\tInt32 int32 `json:\"an_int32\" deepcopier:\"field:AnInt32\"`\n\tInt64 int64 `json:\"an_int64\" deepcopier:\"field:AnInt64\"`\n\tUInt uint `json:\"an_uint\" deepcopier:\"field:AnUInt\"`\n\tUInt8 uint8 `json:\"an_uint8\" deepcopier:\"field:AnUInt8\"`\n\tUInt16 uint16 `json:\"an_uint16\" deepcopier:\"field:AnUInt16\"`\n\tUInt32 uint32 `json:\"an_uint32\" deepcopier:\"field:AnUInt32\"`\n\tUInt64 uint64 `json:\"an_uint64\" deepcopier:\"field:AnUInt64\"`\n\tNullString null.String `json:\"a_null_string\" deepcopier:\"field:ANullString\"`\n\tStringSlice []string `json:\"a_string_slice\" deepcopier:\"field:AStringSlice\"`\n\tIntSlice []int `json:\"an_int_slice\" deepcopier:\"field:AnIntSlice\"`\n\tIntMethod int `json:\"int_method\"`\n\tInt8Method int8 `json:\"int8_method\"`\n\tInt16Method int16 `json:\"int16_method\"`\n\tInt32Method int32 `json:\"int32_method\"`\n\tInt64Method int64 `json:\"int64_method\"`\n\tUIntMethod uint `json:\"uint_method\"`\n\tUInt8Method uint8 `json:\"uint8_method\"`\n\tUInt16Method uint16 `json:\"uint16_method\"`\n\tUInt32Method uint32 `json:\"uint32_method\"`\n\tUInt64Method uint64 `json:\"uint64_method\"`\n\tMethodWithContext string `json:\"method_with_context\" deepcopier:\"context\"`\n\tSuperMethod string `json:\"super_method\" deepcopier:\"field:MethodWithDifferentName\"`\n}\n\nfunc NewUserCopy(now time.Time) *UserCopy {\n\treturn &UserCopy{\n\t\tTitle: \"Chuck Norris\",\n\t\tDate: now,\n\t\tFloat32: float32(10.0),\n\t\tFloat64: float64(10.0),\n\t\tInt: int(10),\n\t\tInt8: int8(10),\n\t\tInt16: int16(10),\n\t\tInt32: int32(10),\n\t\tInt64: int64(10),\n\t\tUInt: uint(10),\n\t\tUInt8: uint8(10),\n\t\tUInt16: uint16(10),\n\t\tUInt32: uint32(10),\n\t\tUInt64: uint64(10),\n\t\tStringSlice: []string{\"Chuck\", \"Norris\"},\n\t\tIntSlice: []int{0, 8, 15},\n\t\tNullString: null.StringFrom(\"I'm null\"),\n\t\tIntMethod: int(10),\n\t\tInt8Method: int8(10),\n\t\tInt16Method: int16(10),\n\t\tInt32Method: int32(10),\n\t\tInt64Method: int64(10),\n\t\tUIntMethod: uint(10),\n\t\tUInt8Method: uint8(10),\n\t\tUInt16Method: uint16(10),\n\t\tUInt32Method: uint32(10),\n\t\tUInt64Method: uint64(10),\n\t\tMethodWithContext: \"1\",\n\t\tSuperMethod: \"hello\",\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package nbfx\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"testing\"\n)\n\nfunc TestEncodeExample1(t *testing.T) {\n\tencoder := NewEncoder()\n\tpath := \"..\/examples\/1\"\n\txmlBin, err := ioutil.ReadFile(path + \".xml\")\n\tif failOn(err, \"unable to open \"+path+\".xml\", t) {\n\t\treturn\n\t}\n\texpected, err := ioutil.ReadFile(path + \".bin\")\n\tif failOn(err, \"unable to open \"+path+\".bin\", t) {\n\t\treturn\n\t}\n\tactual, err := encoder.Encode(string(xmlBin))\n\tif err != nil {\n\t\tt.Error(err.Error())\n\t\treturn\n\t}\n\tassertBinEqual(t, actual, expected)\n}\n\nfunc TestEncodePrefixDictionaryElementB(t *testing.T) {\n\txml := \"<b:Foo>\"\n\n\tencoder := NewEncoderWithStrings(map[uint32]string{0x02: \"Foo\"})\n\tactual, err := encoder.Encode(xml)\n\tif err != nil {\n\t\tt.Error(\"Unexpected error: \" + err.Error() + \" Got: \" + fmt.Sprintf(\"%x\", actual))\n\t\treturn\n\t}\n\tassertBinEqual(t, actual, []byte{0x45, 0x02})\n}\n\nfunc TestEncodePrefixDictionaryElementS(t *testing.T) {\n\txml := \"<s:Foo>\"\n\n\tencoder := NewEncoderWithStrings(map[uint32]string{0x02: \"Foo\"})\n\tactual, err := encoder.Encode(xml)\n\tif err != nil {\n\t\tt.Error(\"Unexpected error: \" + err.Error() + \" Got: \" + fmt.Sprintf(\"%x\", actual))\n\t\treturn\n\t}\n\tassertBinEqual(t, actual, []byte{0x56, 0x02})\n}\n\nfunc TestWriteMultiByteInt31_17(t *testing.T) {\n\ttestWriteMultiByteInt31(t, 17, []byte{0x11})\n}\n\nfunc TestWriteMultiByteInt31_145(t *testing.T) {\n\ttestWriteMultiByteInt31(t, 145, []byte{0x91, 0x01})\n}\n\nfunc TestWriteMultiByteInt31_5521(t *testing.T) {\n\ttestWriteMultiByteInt31(t, 5521, []byte{0x91, 0x2B})\n}\n\nfunc TestWriteMultiByteInt31_16384(t *testing.T) {\n\ttestWriteMultiByteInt31(t, 16384, []byte{0x80, 0x80, 0x01})\n}\n\nfunc TestWriteMultiByteInt31_2097152(t *testing.T) {\n\ttestWriteMultiByteInt31(t, 2097152, []byte{0x80, 0x80, 0x80, 0x01})\n}\n\nfunc TestWriteMultiByteInt31_268435456(t *testing.T) {\n\ttestWriteMultiByteInt31(t, 268435456, []byte{0x80, 0x80, 0x80, 0x80, 0x01})\n}\n\nfunc TestWriteString_abc(t *testing.T) {\n\tbuffer := bytes.Buffer{}\n\tstr := \"abc\"\n\texpected := []byte{0x03, 0x61, 0x62, 0x63}\n\texpectedLen := len(expected)\n\te := &encoder{bin:&buffer}\n\ti, err := writeString(e, str)\n\tif err != nil {\n\t\tt.Error(\"Error: \" + err.Error())\n\t\treturn\n\t}\n\tif i != expectedLen {\n\t\tt.Errorf(\"Expected to write %d bytes but wrote %d\", expected, i)\n\t}\n\tassertBinEqual(t, buffer.Bytes(), expected)\n}\n\nfunc testWriteMultiByteInt31(t *testing.T, num uint32, expected []byte) {\n\tbuffer := bytes.Buffer{}\n\te := &encoder{bin:&bytes.Buffer{}}\n\ti, err := writeMultiByteInt31(e, num)\n\tif err != nil {\n\t\tt.Error(\"Error: \" + err.Error())\n\t\treturn\n\t}\n\tif i != len(expected) {\n\t\tt.Errorf(\"Expected to write %d byte\/s but wrote %d\", len(expected), i)\n\t\treturn\n\t}\n\tassertBinEqual(t, buffer.Bytes()[0:i], expected)\n}\n\nfunc assertBinEqual(t *testing.T, actual, expected []byte) {\n\tif len(actual) != len(expected) {\n\t\tt.Error(\"length of actual \" + fmt.Sprint(len(actual)) + \" not equal to length of expected \" + fmt.Sprint(len(expected)))\n\t}\n\tfor i, b := range actual {\n\t\tif b != expected[i] {\n\t\t\tfmt.Println(\"actual\", actual, \"expected\", expected)\n\t\t\tt.Error(fmt.Sprintf(\"%x differs from expected %x at index %d\", actual, expected, i))\n\t\t}\n\t}\n}\n<commit_msg>Fix testWriteMultiByteInt31<commit_after>package nbfx\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"testing\"\n)\n\nfunc TestEncodeExample1(t *testing.T) {\n\tencoder := NewEncoder()\n\tpath := \"..\/examples\/1\"\n\txmlBin, err := ioutil.ReadFile(path + \".xml\")\n\tif failOn(err, \"unable to open \"+path+\".xml\", t) {\n\t\treturn\n\t}\n\texpected, err := ioutil.ReadFile(path + \".bin\")\n\tif failOn(err, \"unable to open \"+path+\".bin\", t) {\n\t\treturn\n\t}\n\tactual, err := encoder.Encode(string(xmlBin))\n\tif err != nil {\n\t\tt.Error(err.Error())\n\t\treturn\n\t}\n\tassertBinEqual(t, actual, expected)\n}\n\nfunc TestEncodePrefixDictionaryElementB(t *testing.T) {\n\txml := \"<b:Foo>\"\n\n\tencoder := NewEncoderWithStrings(map[uint32]string{0x02: \"Foo\"})\n\tactual, err := encoder.Encode(xml)\n\tif err != nil {\n\t\tt.Error(\"Unexpected error: \" + err.Error() + \" Got: \" + fmt.Sprintf(\"%x\", actual))\n\t\treturn\n\t}\n\tassertBinEqual(t, actual, []byte{0x45, 0x02})\n}\n\nfunc TestEncodePrefixDictionaryElementS(t *testing.T) {\n\txml := \"<s:Foo>\"\n\n\tencoder := NewEncoderWithStrings(map[uint32]string{0x02: \"Foo\"})\n\tactual, err := encoder.Encode(xml)\n\tif err != nil {\n\t\tt.Error(\"Unexpected error: \" + err.Error() + \" Got: \" + fmt.Sprintf(\"%x\", actual))\n\t\treturn\n\t}\n\tassertBinEqual(t, actual, []byte{0x56, 0x02})\n}\n\nfunc TestWriteMultiByteInt31_17(t *testing.T) {\n\ttestWriteMultiByteInt31(t, 17, []byte{0x11})\n}\n\nfunc TestWriteMultiByteInt31_145(t *testing.T) {\n\ttestWriteMultiByteInt31(t, 145, []byte{0x91, 0x01})\n}\n\nfunc TestWriteMultiByteInt31_5521(t *testing.T) {\n\ttestWriteMultiByteInt31(t, 5521, []byte{0x91, 0x2B})\n}\n\nfunc TestWriteMultiByteInt31_16384(t *testing.T) {\n\ttestWriteMultiByteInt31(t, 16384, []byte{0x80, 0x80, 0x01})\n}\n\nfunc TestWriteMultiByteInt31_2097152(t *testing.T) {\n\ttestWriteMultiByteInt31(t, 2097152, []byte{0x80, 0x80, 0x80, 0x01})\n}\n\nfunc TestWriteMultiByteInt31_268435456(t *testing.T) {\n\ttestWriteMultiByteInt31(t, 268435456, []byte{0x80, 0x80, 0x80, 0x80, 0x01})\n}\n\nfunc TestWriteString_abc(t *testing.T) {\n\tbuffer := bytes.Buffer{}\n\tstr := \"abc\"\n\texpected := []byte{0x03, 0x61, 0x62, 0x63}\n\texpectedLen := len(expected)\n\te := &encoder{bin:&buffer}\n\ti, err := writeString(e, str)\n\tif err != nil {\n\t\tt.Error(\"Error: \" + err.Error())\n\t\treturn\n\t}\n\tif i != expectedLen {\n\t\tt.Errorf(\"Expected to write %d bytes but wrote %d\", expected, i)\n\t}\n\tassertBinEqual(t, buffer.Bytes(), expected)\n}\n\nfunc testWriteMultiByteInt31(t *testing.T, num uint32, expected []byte) {\n\tbuffer := bytes.Buffer{}\n\te := &encoder{bin:&buffer}\n\ti, err := writeMultiByteInt31(e, num)\n\tif err != nil {\n\t\tt.Error(\"Error: \" + err.Error())\n\t\treturn\n\t}\n\tif i != len(expected) {\n\t\tt.Errorf(\"Expected to write %d byte\/s but wrote %d\", len(expected), i)\n\t\treturn\n\t}\n\tassertBinEqual(t, buffer.Bytes()[0:i], expected)\n}\n\nfunc assertBinEqual(t *testing.T, actual, expected []byte) {\n\tif len(actual) != len(expected) {\n\t\tt.Error(\"length of actual \" + fmt.Sprint(len(actual)) + \" not equal to length of expected \" + fmt.Sprint(len(expected)))\n\t}\n\tfor i, b := range actual {\n\t\tif b != expected[i] {\n\t\t\tfmt.Println(\"actual\", actual, \"expected\", expected)\n\t\t\tt.Error(fmt.Sprintf(\"%x differs from expected %x at index %d\", actual, expected, i))\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage bytes_test\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n)\n\nfunc ExampleBuffer() {\n\tvar b bytes.Buffer \/\/ A Buffer needs no initialization.\n\tb.Write([]byte(\"Hello \"))\n\tfmt.Fprintf(&b, \"world!\")\n\tb.WriteTo(os.Stdout)\n\t\/\/ Output: Hello world!\n}\n\nfunc ExampleBuffer_reader() {\n\t\/\/ A Buffer can turn a string or a []byte into an io.Reader.\n\tbuf := bytes.NewBufferString(\"R29waGVycyBydWxlIQ==\")\n\tdec := base64.NewDecoder(base64.StdEncoding, buf)\n\tio.Copy(os.Stdout, dec)\n\t\/\/ Output: Gophers rule!\n}\n\nfunc ExampleCompare() {\n\t\/\/ Interpret Compare's result by comparing it to zero.\n\tvar a, b []byte\n\tif bytes.Compare(a, b) < 0 {\n\t\t\/\/ a less b\n\t}\n\tif bytes.Compare(a, b) <= 0 {\n\t\t\/\/ a less or equal b\n\t}\n\tif bytes.Compare(a, b) > 0 {\n\t\t\/\/ a greater b\n\t}\n\tif bytes.Compare(a, b) >= 0 {\n\t\t\/\/ a greater or equal b\n\t}\n\n\t\/\/ Prefer Equal to Compare for equality comparisons.\n\tif bytes.Equal(a, b) {\n\t\t\/\/ a equal b\n\t}\n\tif !bytes.Equal(a, b) {\n\t\t\/\/ a not equal b\n\t}\n}\n\nfunc ExampleCompare_search() {\n\t\/\/ Binary search to find a matching byte slice.\n\tvar needle []byte\n\tvar haystack [][]byte \/\/ Assume sorted\n\ti := sort.Search(len(haystack), func(i int) bool {\n\t\t\/\/ Return needle <= haystack[i].\n\t\treturn bytes.Compare(needle, haystack[i]) <= 0\n\t})\n\tif i < len(haystack) && bytes.Equal(needle, haystack[i]) {\n\t\t\/\/ Found it!\n\t}\n}\n<commit_msg>bytes: Change Compare example to be consistent with sort.Search's.<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage bytes_test\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n)\n\nfunc ExampleBuffer() {\n\tvar b bytes.Buffer \/\/ A Buffer needs no initialization.\n\tb.Write([]byte(\"Hello \"))\n\tfmt.Fprintf(&b, \"world!\")\n\tb.WriteTo(os.Stdout)\n\t\/\/ Output: Hello world!\n}\n\nfunc ExampleBuffer_reader() {\n\t\/\/ A Buffer can turn a string or a []byte into an io.Reader.\n\tbuf := bytes.NewBufferString(\"R29waGVycyBydWxlIQ==\")\n\tdec := base64.NewDecoder(base64.StdEncoding, buf)\n\tio.Copy(os.Stdout, dec)\n\t\/\/ Output: Gophers rule!\n}\n\nfunc ExampleCompare() {\n\t\/\/ Interpret Compare's result by comparing it to zero.\n\tvar a, b []byte\n\tif bytes.Compare(a, b) < 0 {\n\t\t\/\/ a less b\n\t}\n\tif bytes.Compare(a, b) <= 0 {\n\t\t\/\/ a less or equal b\n\t}\n\tif bytes.Compare(a, b) > 0 {\n\t\t\/\/ a greater b\n\t}\n\tif bytes.Compare(a, b) >= 0 {\n\t\t\/\/ a greater or equal b\n\t}\n\n\t\/\/ Prefer Equal to Compare for equality comparisons.\n\tif bytes.Equal(a, b) {\n\t\t\/\/ a equal b\n\t}\n\tif !bytes.Equal(a, b) {\n\t\t\/\/ a not equal b\n\t}\n}\n\nfunc ExampleCompare_search() {\n\t\/\/ Binary search to find a matching byte slice.\n\tvar needle []byte\n\tvar haystack [][]byte \/\/ Assume sorted\n\ti := sort.Search(len(haystack), func(i int) bool {\n\t\t\/\/ Return haystack[i] >= needle.\n\t\treturn bytes.Compare(haystack[i], needle) >= 0\n\t})\n\tif i < len(haystack) && bytes.Equal(haystack[i], needle) {\n\t\t\/\/ Found it!\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package ecdsa implements the Elliptic Curve Digital Signature Algorithm, as\n\/\/ defined in FIPS 186-3.\npackage ecdsa\n\n\/\/ References:\n\/\/ [NSA]: Suite B implementer's guide to FIPS 186-3,\n\/\/ http:\/\/www.nsa.gov\/ia\/_files\/ecdsa.pdf\n\/\/ [SECG]: SECG, SEC1\n\/\/ http:\/\/www.secg.org\/download\/aid-780\/sec1-v2.pdf\n\nimport (\n\t\"crypto\/elliptic\"\n\t\"io\"\n\t\"math\/big\"\n)\n\n\/\/ PublicKey represents an ECDSA public key.\ntype PublicKey struct {\n\telliptic.Curve\n\tX, Y *big.Int\n}\n\n\/\/ PrivateKey represents a ECDSA private key.\ntype PrivateKey struct {\n\tPublicKey\n\tD *big.Int\n}\n\nvar one = new(big.Int).SetInt64(1)\n\n\/\/ randFieldElement returns a random element of the field underlying the given\n\/\/ curve using the procedure given in [NSA] A.2.1.\nfunc randFieldElement(c elliptic.Curve, rand io.Reader) (k *big.Int, err error) {\n\tparams := c.Params()\n\tb := make([]byte, params.BitSize\/8+8)\n\t_, err = io.ReadFull(rand, b)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tk = new(big.Int).SetBytes(b)\n\tn := new(big.Int).Sub(params.N, one)\n\tk.Mod(k, n)\n\tk.Add(k, one)\n\treturn\n}\n\n\/\/ GenerateKey generates a public&private key pair.\nfunc GenerateKey(c elliptic.Curve, rand io.Reader) (priv *PrivateKey, err error) {\n\tk, err := randFieldElement(c, rand)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tpriv = new(PrivateKey)\n\tpriv.PublicKey.Curve = c\n\tpriv.D = k\n\tpriv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())\n\treturn\n}\n\n\/\/ hashToInt converts a hash value to an integer. There is some disagreement\n\/\/ about how this is done. [NSA] suggests that this is done in the obvious\n\/\/ manner, but [SECG] truncates the hash to the bit-length of the curve order\n\/\/ first. We follow [SECG] because that's what OpenSSL does.\nfunc hashToInt(hash []byte, c elliptic.Curve) *big.Int {\n\torderBits := c.Params().N.BitLen()\n\torderBytes := (orderBits + 7) \/ 8\n\tif len(hash) > orderBytes {\n\t\thash = hash[:orderBytes]\n\t}\n\n\tret := new(big.Int).SetBytes(hash)\n\texcess := orderBytes*8 - orderBits\n\tif excess > 0 {\n\t\tret.Rsh(ret, uint(excess))\n\t}\n\treturn ret\n}\n\n\/\/ Sign signs an arbitrary length hash (which should be the result of hashing a\n\/\/ larger message) using the private key, priv. It returns the signature as a\n\/\/ pair of integers. The security of the private key depends on the entropy of\n\/\/ rand.\nfunc Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) {\n\t\/\/ See [NSA] 3.4.1\n\tc := priv.PublicKey.Curve\n\tN := c.Params().N\n\n\tvar k, kInv *big.Int\n\tfor {\n\t\tfor {\n\t\t\tk, err = randFieldElement(c, rand)\n\t\t\tif err != nil {\n\t\t\t\tr = nil\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tkInv = new(big.Int).ModInverse(k, N)\n\t\t\tr, _ = priv.Curve.ScalarBaseMult(k.Bytes())\n\t\t\tr.Mod(r, N)\n\t\t\tif r.Sign() != 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\te := hashToInt(hash, c)\n\t\ts = new(big.Int).Mul(priv.D, r)\n\t\ts.Add(s, e)\n\t\ts.Mul(s, kInv)\n\t\ts.Mod(s, N)\n\t\tif s.Sign() != 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ Verify verifies the signature in r, s of hash using the public key, pub. It\n\/\/ returns true iff the signature is valid.\nfunc Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool {\n\t\/\/ See [NSA] 3.4.2\n\tc := pub.Curve\n\tN := c.Params().N\n\n\tif r.Sign() == 0 || s.Sign() == 0 {\n\t\treturn false\n\t}\n\tif r.Cmp(N) >= 0 || s.Cmp(N) >= 0 {\n\t\treturn false\n\t}\n\te := hashToInt(hash, c)\n\tw := new(big.Int).ModInverse(s, N)\n\n\tu1 := e.Mul(e, w)\n\tu2 := w.Mul(r, w)\n\n\tx1, y1 := c.ScalarBaseMult(u1.Bytes())\n\tx2, y2 := c.ScalarMult(pub.X, pub.Y, u2.Bytes())\n\tif x1.Cmp(x2) == 0 {\n\t\treturn false\n\t}\n\tx, _ := c.Add(x1, y1, x2, y2)\n\tx.Mod(x, N)\n\treturn x.Cmp(r) == 0\n}\n<commit_msg>crypto\/ecdsa: fix case where p != 0 mod 8 and the hash length < p.<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package ecdsa implements the Elliptic Curve Digital Signature Algorithm, as\n\/\/ defined in FIPS 186-3.\npackage ecdsa\n\n\/\/ References:\n\/\/ [NSA]: Suite B implementer's guide to FIPS 186-3,\n\/\/ http:\/\/www.nsa.gov\/ia\/_files\/ecdsa.pdf\n\/\/ [SECG]: SECG, SEC1\n\/\/ http:\/\/www.secg.org\/download\/aid-780\/sec1-v2.pdf\n\nimport (\n\t\"crypto\/elliptic\"\n\t\"io\"\n\t\"math\/big\"\n)\n\n\/\/ PublicKey represents an ECDSA public key.\ntype PublicKey struct {\n\telliptic.Curve\n\tX, Y *big.Int\n}\n\n\/\/ PrivateKey represents a ECDSA private key.\ntype PrivateKey struct {\n\tPublicKey\n\tD *big.Int\n}\n\nvar one = new(big.Int).SetInt64(1)\n\n\/\/ randFieldElement returns a random element of the field underlying the given\n\/\/ curve using the procedure given in [NSA] A.2.1.\nfunc randFieldElement(c elliptic.Curve, rand io.Reader) (k *big.Int, err error) {\n\tparams := c.Params()\n\tb := make([]byte, params.BitSize\/8+8)\n\t_, err = io.ReadFull(rand, b)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tk = new(big.Int).SetBytes(b)\n\tn := new(big.Int).Sub(params.N, one)\n\tk.Mod(k, n)\n\tk.Add(k, one)\n\treturn\n}\n\n\/\/ GenerateKey generates a public&private key pair.\nfunc GenerateKey(c elliptic.Curve, rand io.Reader) (priv *PrivateKey, err error) {\n\tk, err := randFieldElement(c, rand)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tpriv = new(PrivateKey)\n\tpriv.PublicKey.Curve = c\n\tpriv.D = k\n\tpriv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())\n\treturn\n}\n\n\/\/ hashToInt converts a hash value to an integer. There is some disagreement\n\/\/ about how this is done. [NSA] suggests that this is done in the obvious\n\/\/ manner, but [SECG] truncates the hash to the bit-length of the curve order\n\/\/ first. We follow [SECG] because that's what OpenSSL does. Additionally,\n\/\/ OpenSSL right shifts excess bits from the number if the hash is too large\n\/\/ and we mirror that too.\nfunc hashToInt(hash []byte, c elliptic.Curve) *big.Int {\n\torderBits := c.Params().N.BitLen()\n\torderBytes := (orderBits + 7) \/ 8\n\tif len(hash) > orderBytes {\n\t\thash = hash[:orderBytes]\n\t}\n\n\tret := new(big.Int).SetBytes(hash)\n\texcess := len(hash)*8 - orderBits\n\tif excess > 0 {\n\t\tret.Rsh(ret, uint(excess))\n\t}\n\treturn ret\n}\n\n\/\/ Sign signs an arbitrary length hash (which should be the result of hashing a\n\/\/ larger message) using the private key, priv. It returns the signature as a\n\/\/ pair of integers. The security of the private key depends on the entropy of\n\/\/ rand.\nfunc Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) {\n\t\/\/ See [NSA] 3.4.1\n\tc := priv.PublicKey.Curve\n\tN := c.Params().N\n\n\tvar k, kInv *big.Int\n\tfor {\n\t\tfor {\n\t\t\tk, err = randFieldElement(c, rand)\n\t\t\tif err != nil {\n\t\t\t\tr = nil\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tkInv = new(big.Int).ModInverse(k, N)\n\t\t\tr, _ = priv.Curve.ScalarBaseMult(k.Bytes())\n\t\t\tr.Mod(r, N)\n\t\t\tif r.Sign() != 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\te := hashToInt(hash, c)\n\t\ts = new(big.Int).Mul(priv.D, r)\n\t\ts.Add(s, e)\n\t\ts.Mul(s, kInv)\n\t\ts.Mod(s, N)\n\t\tif s.Sign() != 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ Verify verifies the signature in r, s of hash using the public key, pub. It\n\/\/ returns true iff the signature is valid.\nfunc Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool {\n\t\/\/ See [NSA] 3.4.2\n\tc := pub.Curve\n\tN := c.Params().N\n\n\tif r.Sign() == 0 || s.Sign() == 0 {\n\t\treturn false\n\t}\n\tif r.Cmp(N) >= 0 || s.Cmp(N) >= 0 {\n\t\treturn false\n\t}\n\te := hashToInt(hash, c)\n\tw := new(big.Int).ModInverse(s, N)\n\n\tu1 := e.Mul(e, w)\n\tu2 := w.Mul(r, w)\n\n\tx1, y1 := c.ScalarBaseMult(u1.Bytes())\n\tx2, y2 := c.ScalarMult(pub.X, pub.Y, u2.Bytes())\n\tif x1.Cmp(x2) == 0 {\n\t\treturn false\n\t}\n\tx, _ := c.Add(x1, y1, x2, y2)\n\tx.Mod(x, N)\n\treturn x.Cmp(r) == 0\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage ioutil\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ Random number state, accessed without lock; racy but harmless.\n\/\/ We generate random temporary file names so that there's a good\n\/\/ chance the file doesn't exist yet - keeps the number of tries in\n\/\/ TempFile to a minimum.\nvar rand uint32\nvar randmu sync.Mutex\n\nfunc reseed() uint32 {\n\treturn uint32(time.Now().UnixNano() + int64(os.Getpid()))\n}\n\nfunc nextSuffix() string {\n\trandmu.Lock()\n\tr := rand\n\tif r == 0 {\n\t\tr = reseed()\n\t}\n\tr = r*1664525 + 1013904223 \/\/ constants from Numerical Recipes\n\trand = r\n\trandmu.Unlock()\n\treturn strconv.Itoa(int(1e9 + r%1e9))[1:]\n}\n\n\/\/ TempFile creates a new temporary file in the directory dir\n\/\/ with a name beginning with prefix, opens the file for reading\n\/\/ and writing, and returns the resulting *os.File.\n\/\/ If dir is the empty string, TempFile uses the default directory\n\/\/ for temporary files (see os.TempDir).\n\/\/ Multiple programs calling TempFile simultaneously\n\/\/ will not choose the same file. The caller can use f.Name()\n\/\/ to find the name of the file. It is the caller's responsibility to\n\/\/ remove the file when no longer needed.\nfunc TempFile(dir, prefix string) (f *os.File, err error) {\n\tif dir == \"\" {\n\t\tdir = os.TempDir()\n\t}\n\n\tnconflict := 0\n\tfor i := 0; i < 10000; i++ {\n\t\tname := filepath.Join(dir, prefix+nextSuffix())\n\t\tf, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)\n\t\tif os.IsExist(err) {\n\t\t\tif nconflict++; nconflict > 10 {\n\t\t\t\trand = reseed()\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn\n}\n\n\/\/ TempDir creates a new temporary directory in the directory dir\n\/\/ with a name beginning with prefix and returns the path of the\n\/\/ new directory. If dir is the empty string, TempDir uses the\n\/\/ default directory for temporary files (see os.TempDir).\n\/\/ Multiple programs calling TempDir simultaneously\n\/\/ will not choose the same directory. It is the caller's responsibility\n\/\/ to remove the directory when no longer needed.\nfunc TempDir(dir, prefix string) (name string, err error) {\n\tif dir == \"\" {\n\t\tdir = os.TempDir()\n\t}\n\n\tnconflict := 0\n\tfor i := 0; i < 10000; i++ {\n\t\ttry := filepath.Join(dir, prefix+nextSuffix())\n\t\terr = os.Mkdir(try, 0700)\n\t\tif os.IsExist(err) {\n\t\t\tif nconflict++; nconflict > 10 {\n\t\t\t\trand = reseed()\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif err == nil {\n\t\t\tname = try\n\t\t}\n\t\tbreak\n\t}\n\treturn\n}\n<commit_msg>io\/ioutil: use pathname instead of name in docs to avoid confusion caller of ioutil.TempFile() can use f.Name() to get \"pathname\" of the temporary file, instead of just the \"name\" of the file.<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage ioutil\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ Random number state.\n\/\/ We generate random temporary file names so that there's a good\n\/\/ chance the file doesn't exist yet - keeps the number of tries in\n\/\/ TempFile to a minimum.\nvar rand uint32\nvar randmu sync.Mutex\n\nfunc reseed() uint32 {\n\treturn uint32(time.Now().UnixNano() + int64(os.Getpid()))\n}\n\nfunc nextSuffix() string {\n\trandmu.Lock()\n\tr := rand\n\tif r == 0 {\n\t\tr = reseed()\n\t}\n\tr = r*1664525 + 1013904223 \/\/ constants from Numerical Recipes\n\trand = r\n\trandmu.Unlock()\n\treturn strconv.Itoa(int(1e9 + r%1e9))[1:]\n}\n\n\/\/ TempFile creates a new temporary file in the directory dir\n\/\/ with a name beginning with prefix, opens the file for reading\n\/\/ and writing, and returns the resulting *os.File.\n\/\/ If dir is the empty string, TempFile uses the default directory\n\/\/ for temporary files (see os.TempDir).\n\/\/ Multiple programs calling TempFile simultaneously\n\/\/ will not choose the same file. The caller can use f.Name()\n\/\/ to find the pathname of the file. It is the caller's responsibility\n\/\/ to remove the file when no longer needed.\nfunc TempFile(dir, prefix string) (f *os.File, err error) {\n\tif dir == \"\" {\n\t\tdir = os.TempDir()\n\t}\n\n\tnconflict := 0\n\tfor i := 0; i < 10000; i++ {\n\t\tname := filepath.Join(dir, prefix+nextSuffix())\n\t\tf, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)\n\t\tif os.IsExist(err) {\n\t\t\tif nconflict++; nconflict > 10 {\n\t\t\t\trand = reseed()\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn\n}\n\n\/\/ TempDir creates a new temporary directory in the directory dir\n\/\/ with a name beginning with prefix and returns the path of the\n\/\/ new directory. If dir is the empty string, TempDir uses the\n\/\/ default directory for temporary files (see os.TempDir).\n\/\/ Multiple programs calling TempDir simultaneously\n\/\/ will not choose the same directory. It is the caller's responsibility\n\/\/ to remove the directory when no longer needed.\nfunc TempDir(dir, prefix string) (name string, err error) {\n\tif dir == \"\" {\n\t\tdir = os.TempDir()\n\t}\n\n\tnconflict := 0\n\tfor i := 0; i < 10000; i++ {\n\t\ttry := filepath.Join(dir, prefix+nextSuffix())\n\t\terr = os.Mkdir(try, 0700)\n\t\tif os.IsExist(err) {\n\t\t\tif nconflict++; nconflict > 10 {\n\t\t\t\trand = reseed()\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif err == nil {\n\t\t\tname = try\n\t\t}\n\t\tbreak\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage exec\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/ ErrNotFound is the error resulting if a path search failed to find an executable file.\nvar ErrNotFound = errors.New(\"executable file not found in %PATH%\")\n\nfunc chkStat(file string) error {\n\td, err := os.Stat(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif d.IsDir() {\n\t\treturn os.EPERM\n\t}\n\treturn nil\n}\n\nfunc findExecutable(file string, exts []string) (string, error) {\n\tif len(exts) == 0 {\n\t\treturn file, chkStat(file)\n\t}\n\tf := strings.ToLower(file)\n\tfor _, e := range exts {\n\t\tif strings.HasSuffix(f, e) {\n\t\t\treturn file, chkStat(file)\n\t\t}\n\t}\n\tfor _, e := range exts {\n\t\tif f := file + e; chkStat(f) == nil {\n\t\t\treturn f, nil\n\t\t}\n\t}\n\treturn ``, os.ENOENT\n}\n\nfunc LookPath(file string) (f string, err error) {\n\tx := os.Getenv(`PATHEXT`)\n\tif x == `` {\n\t\tx = `.COM;.EXE;.BAT;.CMD`\n\t}\n\texts := []string{}\n\tfor _, e := range strings.Split(strings.ToLower(x), `;`) {\n\t\tif e == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif e[0] != '.' {\n\t\t\te = \".\" + e\n\t\t}\n\t\texts = append(exts, e)\n\t}\n\tif strings.IndexAny(file, `:\\\/`) != -1 {\n\t\tif f, err = findExecutable(file, exts); err == nil {\n\t\t\treturn\n\t\t}\n\t\treturn ``, &Error{file, err}\n\t}\n\tif pathenv := os.Getenv(`PATH`); pathenv == `` {\n\t\tif f, err = findExecutable(`.\\`+file, exts); err == nil {\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tfor _, dir := range strings.Split(pathenv, `;`) {\n\t\t\tif f, err = findExecutable(dir+`\\`+file, exts); err == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn ``, &Error{file, ErrNotFound}\n}\n<commit_msg>os\/exec: make LookPath always search the current directory under Windows.<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage exec\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"strings\"\n)\n\n\/\/ ErrNotFound is the error resulting if a path search failed to find an executable file.\nvar ErrNotFound = errors.New(\"executable file not found in %PATH%\")\n\nfunc chkStat(file string) error {\n\td, err := os.Stat(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif d.IsDir() {\n\t\treturn os.EPERM\n\t}\n\treturn nil\n}\n\nfunc findExecutable(file string, exts []string) (string, error) {\n\tif len(exts) == 0 {\n\t\treturn file, chkStat(file)\n\t}\n\tf := strings.ToLower(file)\n\tfor _, e := range exts {\n\t\tif strings.HasSuffix(f, e) {\n\t\t\treturn file, chkStat(file)\n\t\t}\n\t}\n\tfor _, e := range exts {\n\t\tif f := file + e; chkStat(f) == nil {\n\t\t\treturn f, nil\n\t\t}\n\t}\n\treturn ``, os.ENOENT\n}\n\nfunc LookPath(file string) (f string, err error) {\n\tx := os.Getenv(`PATHEXT`)\n\tif x == `` {\n\t\tx = `.COM;.EXE;.BAT;.CMD`\n\t}\n\texts := []string{}\n\tfor _, e := range strings.Split(strings.ToLower(x), `;`) {\n\t\tif e == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif e[0] != '.' {\n\t\t\te = \".\" + e\n\t\t}\n\t\texts = append(exts, e)\n\t}\n\tif strings.IndexAny(file, `:\\\/`) != -1 {\n\t\tif f, err = findExecutable(file, exts); err == nil {\n\t\t\treturn\n\t\t}\n\t\treturn ``, &Error{file, err}\n\t}\n\tif f, err = findExecutable(`.\\`+file, exts); err == nil {\n\t\treturn\n\t}\n\tif pathenv := os.Getenv(`PATH`); pathenv != `` {\n\t\tfor _, dir := range strings.Split(pathenv, `;`) {\n\t\t\tif f, err = findExecutable(dir+`\\`+file, exts); err == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn ``, &Error{file, ErrNotFound}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package filepath implements utility routines for manipulating filename paths\n\/\/ in a way compatible with the target operating system-defined file paths.\npackage filepath\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"os\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strings\"\n)\n\nconst (\n\tSeparator = os.PathSeparator\n\tListSeparator = os.PathListSeparator\n)\n\n\/\/ Clean returns the shortest path name equivalent to path\n\/\/ by purely lexical processing. It applies the following rules\n\/\/ iteratively until no further processing can be done:\n\/\/\n\/\/\t1. Replace multiple Separator elements with a single one.\n\/\/\t2. Eliminate each . path name element (the current directory).\n\/\/\t3. Eliminate each inner .. path name element (the parent directory)\n\/\/\t along with the non-.. element that precedes it.\n\/\/\t4. Eliminate .. elements that begin a rooted path:\n\/\/\t that is, replace \"\/..\" by \"\/\" at the beginning of a path,\n\/\/ assuming Separator is '\/'.\n\/\/\n\/\/ If the result of this process is an empty string, Clean\n\/\/ returns the string \".\".\n\/\/\n\/\/ See also Rob Pike, ``Lexical File Names in Plan 9 or\n\/\/ Getting Dot-Dot right,''\n\/\/ http:\/\/plan9.bell-labs.com\/sys\/doc\/lexnames.html\nfunc Clean(path string) string {\n\tvol := VolumeName(path)\n\tpath = path[len(vol):]\n\tif path == \"\" {\n\t\tif len(vol) > 1 && vol[1] != ':' {\n\t\t\t\/\/ should be UNC\n\t\t\treturn FromSlash(vol)\n\t\t}\n\t\treturn vol + \".\"\n\t}\n\trooted := os.IsPathSeparator(path[0])\n\n\t\/\/ Invariants:\n\t\/\/\treading from path; r is index of next byte to process.\n\t\/\/\twriting to buf; w is index of next byte to write.\n\t\/\/\tdotdot is index in buf where .. must stop, either because\n\t\/\/\t\tit is the leading slash or it is a leading ..\/..\/.. prefix.\n\tn := len(path)\n\tbuf := []byte(path)\n\tr, w, dotdot := 0, 0, 0\n\tif rooted {\n\t\tbuf[0] = Separator\n\t\tr, w, dotdot = 1, 1, 1\n\t}\n\n\tfor r < n {\n\t\tswitch {\n\t\tcase os.IsPathSeparator(path[r]):\n\t\t\t\/\/ empty path element\n\t\t\tr++\n\t\tcase path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])):\n\t\t\t\/\/ . element\n\t\t\tr++\n\t\tcase path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):\n\t\t\t\/\/ .. element: remove to last separator\n\t\t\tr += 2\n\t\t\tswitch {\n\t\t\tcase w > dotdot:\n\t\t\t\t\/\/ can backtrack\n\t\t\t\tw--\n\t\t\t\tfor w > dotdot && !os.IsPathSeparator(buf[w]) {\n\t\t\t\t\tw--\n\t\t\t\t}\n\t\t\tcase !rooted:\n\t\t\t\t\/\/ cannot backtrack, but not rooted, so append .. element.\n\t\t\t\tif w > 0 {\n\t\t\t\t\tbuf[w] = Separator\n\t\t\t\t\tw++\n\t\t\t\t}\n\t\t\t\tbuf[w] = '.'\n\t\t\t\tw++\n\t\t\t\tbuf[w] = '.'\n\t\t\t\tw++\n\t\t\t\tdotdot = w\n\t\t\t}\n\t\tdefault:\n\t\t\t\/\/ real path element.\n\t\t\t\/\/ add slash if needed\n\t\t\tif rooted && w != 1 || !rooted && w != 0 {\n\t\t\t\tbuf[w] = Separator\n\t\t\t\tw++\n\t\t\t}\n\t\t\t\/\/ copy element\n\t\t\tfor ; r < n && !os.IsPathSeparator(path[r]); r++ {\n\t\t\t\tbuf[w] = path[r]\n\t\t\t\tw++\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Turn empty string into \".\"\n\tif w == 0 {\n\t\tbuf[w] = '.'\n\t\tw++\n\t}\n\n\treturn FromSlash(vol + string(buf[0:w]))\n}\n\n\/\/ ToSlash returns the result of replacing each separator character\n\/\/ in path with a slash ('\/') character.\nfunc ToSlash(path string) string {\n\tif Separator == '\/' {\n\t\treturn path\n\t}\n\treturn strings.Replace(path, string(Separator), \"\/\", -1)\n}\n\n\/\/ FromSlash returns the result of replacing each slash ('\/') character\n\/\/ in path with a separator character.\nfunc FromSlash(path string) string {\n\tif Separator == '\/' {\n\t\treturn path\n\t}\n\treturn strings.Replace(path, \"\/\", string(Separator), -1)\n}\n\n\/\/ SplitList splits a list of paths joined by the OS-specific ListSeparator.\nfunc SplitList(path string) []string {\n\tif path == \"\" {\n\t\treturn []string{}\n\t}\n\treturn strings.Split(path, string(ListSeparator))\n}\n\n\/\/ Split splits path immediately following the final Separator,\n\/\/ separating it into a directory and file name component.\n\/\/ If there is no Separator in path, Split returns an empty dir\n\/\/ and file set to path.\nfunc Split(path string) (dir, file string) {\n\tvol := VolumeName(path)\n\ti := len(path) - 1\n\tfor i >= len(vol) && !os.IsPathSeparator(path[i]) {\n\t\ti--\n\t}\n\treturn path[:i+1], path[i+1:]\n}\n\n\/\/ Join joins any number of path elements into a single path, adding\n\/\/ a Separator if necessary. All empty strings are ignored.\nfunc Join(elem ...string) string {\n\tfor i, e := range elem {\n\t\tif e != \"\" {\n\t\t\treturn Clean(strings.Join(elem[i:], string(Separator)))\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ Ext returns the file name extension used by path.\n\/\/ The extension is the suffix beginning at the final dot\n\/\/ in the final element of path; it is empty if there is\n\/\/ no dot.\nfunc Ext(path string) string {\n\tfor i := len(path) - 1; i >= 0 && !os.IsPathSeparator(path[i]); i-- {\n\t\tif path[i] == '.' {\n\t\t\treturn path[i:]\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ EvalSymlinks returns the path name after the evaluation of any symbolic\n\/\/ links.\n\/\/ If path is relative it will be evaluated relative to the current directory.\nfunc EvalSymlinks(path string) (string, error) {\n\tif runtime.GOOS == \"windows\" {\n\t\t\/\/ Symlinks are not supported under windows.\n\t\t_, err := os.Lstat(path)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn Clean(path), nil\n\t}\n\tconst maxIter = 255\n\toriginalPath := path\n\t\/\/ consume path by taking each frontmost path element,\n\t\/\/ expanding it if it's a symlink, and appending it to b\n\tvar b bytes.Buffer\n\tfor n := 0; path != \"\"; n++ {\n\t\tif n > maxIter {\n\t\t\treturn \"\", errors.New(\"EvalSymlinks: too many links in \" + originalPath)\n\t\t}\n\n\t\t\/\/ find next path component, p\n\t\ti := strings.IndexRune(path, Separator)\n\t\tvar p string\n\t\tif i == -1 {\n\t\t\tp, path = path, \"\"\n\t\t} else {\n\t\t\tp, path = path[:i], path[i+1:]\n\t\t}\n\n\t\tif p == \"\" {\n\t\t\tif b.Len() == 0 {\n\t\t\t\t\/\/ must be absolute path\n\t\t\t\tb.WriteRune(Separator)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tfi, err := os.Lstat(b.String() + p)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif fi.Mode()&os.ModeSymlink == 0 {\n\t\t\tb.WriteString(p)\n\t\t\tif path != \"\" {\n\t\t\t\tb.WriteRune(Separator)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ it's a symlink, put it at the front of path\n\t\tdest, err := os.Readlink(b.String() + p)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif IsAbs(dest) {\n\t\t\tb.Reset()\n\t\t}\n\t\tpath = dest + string(Separator) + path\n\t}\n\treturn Clean(b.String()), nil\n}\n\n\/\/ Abs returns an absolute representation of path.\n\/\/ If the path is not absolute it will be joined with the current\n\/\/ working directory to turn it into an absolute path. The absolute\n\/\/ path name for a given file is not guaranteed to be unique.\nfunc Abs(path string) (string, error) {\n\tif IsAbs(path) {\n\t\treturn Clean(path), nil\n\t}\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn Join(wd, path), nil\n}\n\n\/\/ Rel returns a relative path that is lexically equivalent to targpath when\n\/\/ joined to basepath with an intervening separator. That is,\n\/\/ Join(basepath, Rel(basepath, targpath)) is equivalent to targpath itself.\n\/\/ An error is returned if targpath can't be made relative to basepath or if\n\/\/ knowing the current working directory would be necessary to compute it.\nfunc Rel(basepath, targpath string) (string, error) {\n\tbaseVol := VolumeName(basepath)\n\ttargVol := VolumeName(targpath)\n\tbase := Clean(basepath)\n\ttarg := Clean(targpath)\n\tif targ == base {\n\t\treturn \".\", nil\n\t}\n\tbase = base[len(baseVol):]\n\ttarg = targ[len(targVol):]\n\tif base == \".\" {\n\t\tbase = \"\"\n\t}\n\t\/\/ Can't use IsAbs - `\\a` and `a` are both relative in Windows.\n\tbaseSlashed := len(base) > 0 && base[0] == Separator\n\ttargSlashed := len(targ) > 0 && targ[0] == Separator\n\tif baseSlashed != targSlashed || baseVol != targVol {\n\t\treturn \"\", errors.New(\"Rel: can't make \" + targ + \" relative to \" + base)\n\t}\n\t\/\/ Position base[b0:bi] and targ[t0:ti] at the first differing elements.\n\tbl := len(base)\n\ttl := len(targ)\n\tvar b0, bi, t0, ti int\n\tfor {\n\t\tfor bi < bl && base[bi] != Separator {\n\t\t\tbi++\n\t\t}\n\t\tfor ti < tl && targ[ti] != Separator {\n\t\t\tti++\n\t\t}\n\t\tif targ[t0:ti] != base[b0:bi] {\n\t\t\tbreak\n\t\t}\n\t\tif bi < bl {\n\t\t\tbi++\n\t\t}\n\t\tif ti < tl {\n\t\t\tti++\n\t\t}\n\t\tb0 = bi\n\t\tt0 = ti\n\t}\n\tif base[b0:bi] == \"..\" {\n\t\treturn \"\", errors.New(\"Rel: can't make \" + targ + \" relative to \" + base)\n\t}\n\tif b0 != bl {\n\t\t\/\/ Base elements left. Must go up before going down.\n\t\tseps := strings.Count(base[b0:bl], string(Separator))\n\t\tsize := 2 + seps*3\n\t\tif tl != t0 {\n\t\t\tsize += 1 + tl - t0\n\t\t}\n\t\tbuf := make([]byte, size)\n\t\tn := copy(buf, \"..\")\n\t\tfor i := 0; i < seps; i++ {\n\t\t\tbuf[n] = Separator\n\t\t\tcopy(buf[n+1:], \"..\")\n\t\t\tn += 3\n\t\t}\n\t\tif t0 != tl {\n\t\t\tbuf[n] = Separator\n\t\t\tcopy(buf[n+1:], targ[t0:])\n\t\t}\n\t\treturn string(buf), nil\n\t}\n\treturn targ[t0:], nil\n}\n\n\/\/ SkipDir is used as a return value from WalkFuncs to indicate that\n\/\/ the directory named in the call is to be skipped. It is not returned\n\/\/ as an error by any function.\nvar SkipDir = errors.New(\"skip this directory\")\n\n\/\/ WalkFunc is the type of the function called for each file or directory\n\/\/ visited by Walk. If there was a problem walking to the file or directory\n\/\/ named by path, the incoming error will describe the problem and the\n\/\/ function can decide how to handle that error (and Walk will not descend\n\/\/ into that directory). If an error is returned, processing stops. The\n\/\/ sole exception is that if path is a directory and the function returns the\n\/\/ special value SkipDir, the contents of the directory are skipped\n\/\/ and processing continues as usual on the next file.\ntype WalkFunc func(path string, info os.FileInfo, err error) error\n\n\/\/ walk recursively descends path, calling w.\nfunc walk(path string, info os.FileInfo, walkFn WalkFunc) error {\n\terr := walkFn(path, info, nil)\n\tif err != nil {\n\t\tif info.IsDir() && err == SkipDir {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\tif !info.IsDir() {\n\t\treturn nil\n\t}\n\n\tlist, err := readDir(path)\n\tif err != nil {\n\t\treturn walkFn(path, info, err)\n\t}\n\n\tfor _, fileInfo := range list {\n\t\tif err = walk(Join(path, fileInfo.Name()), fileInfo, walkFn); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Walk walks the file tree rooted at root, calling walkFn for each file or\n\/\/ directory in the tree, including root. All errors that arise visiting files\n\/\/ and directories are filtered by walkFn. The files are walked in lexical\n\/\/ order, which makes the output deterministic but means that for very\n\/\/ large directories Walk can be inefficient.\nfunc Walk(root string, walkFn WalkFunc) error {\n\tinfo, err := os.Lstat(root)\n\tif err != nil {\n\t\treturn walkFn(root, nil, err)\n\t}\n\treturn walk(root, info, walkFn)\n}\n\n\/\/ readDir reads the directory named by dirname and returns\n\/\/ a sorted list of directory entries.\n\/\/ Copied from io\/ioutil to avoid the circular import.\nfunc readDir(dirname string) ([]os.FileInfo, error) {\n\tf, err := os.Open(dirname)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlist, err := f.Readdir(-1)\n\tf.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsort.Sort(byName(list))\n\treturn list, nil\n}\n\n\/\/ byName implements sort.Interface.\ntype byName []os.FileInfo\n\nfunc (f byName) Len() int { return len(f) }\nfunc (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }\nfunc (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }\n\n\/\/ Base returns the last element of path.\n\/\/ Trailing path separators are removed before extracting the last element.\n\/\/ If the path is empty, Base returns \".\".\n\/\/ If the path consists entirely of separators, Base returns a single separator.\nfunc Base(path string) string {\n\tif path == \"\" {\n\t\treturn \".\"\n\t}\n\t\/\/ Strip trailing slashes.\n\tfor len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) {\n\t\tpath = path[0 : len(path)-1]\n\t}\n\t\/\/ Find the last element\n\ti := len(path) - 1\n\tfor i >= 0 && !os.IsPathSeparator(path[i]) {\n\t\ti--\n\t}\n\tif i >= 0 {\n\t\tpath = path[i+1:]\n\t}\n\t\/\/ If empty now, it had only slashes.\n\tif path == \"\" {\n\t\treturn string(Separator)\n\t}\n\treturn path\n}\n<commit_msg>path\/filepath.Rel: document that the returned path is always relative<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package filepath implements utility routines for manipulating filename paths\n\/\/ in a way compatible with the target operating system-defined file paths.\npackage filepath\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"os\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strings\"\n)\n\nconst (\n\tSeparator = os.PathSeparator\n\tListSeparator = os.PathListSeparator\n)\n\n\/\/ Clean returns the shortest path name equivalent to path\n\/\/ by purely lexical processing. It applies the following rules\n\/\/ iteratively until no further processing can be done:\n\/\/\n\/\/\t1. Replace multiple Separator elements with a single one.\n\/\/\t2. Eliminate each . path name element (the current directory).\n\/\/\t3. Eliminate each inner .. path name element (the parent directory)\n\/\/\t along with the non-.. element that precedes it.\n\/\/\t4. Eliminate .. elements that begin a rooted path:\n\/\/\t that is, replace \"\/..\" by \"\/\" at the beginning of a path,\n\/\/ assuming Separator is '\/'.\n\/\/\n\/\/ If the result of this process is an empty string, Clean\n\/\/ returns the string \".\".\n\/\/\n\/\/ See also Rob Pike, ``Lexical File Names in Plan 9 or\n\/\/ Getting Dot-Dot right,''\n\/\/ http:\/\/plan9.bell-labs.com\/sys\/doc\/lexnames.html\nfunc Clean(path string) string {\n\tvol := VolumeName(path)\n\tpath = path[len(vol):]\n\tif path == \"\" {\n\t\tif len(vol) > 1 && vol[1] != ':' {\n\t\t\t\/\/ should be UNC\n\t\t\treturn FromSlash(vol)\n\t\t}\n\t\treturn vol + \".\"\n\t}\n\trooted := os.IsPathSeparator(path[0])\n\n\t\/\/ Invariants:\n\t\/\/\treading from path; r is index of next byte to process.\n\t\/\/\twriting to buf; w is index of next byte to write.\n\t\/\/\tdotdot is index in buf where .. must stop, either because\n\t\/\/\t\tit is the leading slash or it is a leading ..\/..\/.. prefix.\n\tn := len(path)\n\tbuf := []byte(path)\n\tr, w, dotdot := 0, 0, 0\n\tif rooted {\n\t\tbuf[0] = Separator\n\t\tr, w, dotdot = 1, 1, 1\n\t}\n\n\tfor r < n {\n\t\tswitch {\n\t\tcase os.IsPathSeparator(path[r]):\n\t\t\t\/\/ empty path element\n\t\t\tr++\n\t\tcase path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])):\n\t\t\t\/\/ . element\n\t\t\tr++\n\t\tcase path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):\n\t\t\t\/\/ .. element: remove to last separator\n\t\t\tr += 2\n\t\t\tswitch {\n\t\t\tcase w > dotdot:\n\t\t\t\t\/\/ can backtrack\n\t\t\t\tw--\n\t\t\t\tfor w > dotdot && !os.IsPathSeparator(buf[w]) {\n\t\t\t\t\tw--\n\t\t\t\t}\n\t\t\tcase !rooted:\n\t\t\t\t\/\/ cannot backtrack, but not rooted, so append .. element.\n\t\t\t\tif w > 0 {\n\t\t\t\t\tbuf[w] = Separator\n\t\t\t\t\tw++\n\t\t\t\t}\n\t\t\t\tbuf[w] = '.'\n\t\t\t\tw++\n\t\t\t\tbuf[w] = '.'\n\t\t\t\tw++\n\t\t\t\tdotdot = w\n\t\t\t}\n\t\tdefault:\n\t\t\t\/\/ real path element.\n\t\t\t\/\/ add slash if needed\n\t\t\tif rooted && w != 1 || !rooted && w != 0 {\n\t\t\t\tbuf[w] = Separator\n\t\t\t\tw++\n\t\t\t}\n\t\t\t\/\/ copy element\n\t\t\tfor ; r < n && !os.IsPathSeparator(path[r]); r++ {\n\t\t\t\tbuf[w] = path[r]\n\t\t\t\tw++\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Turn empty string into \".\"\n\tif w == 0 {\n\t\tbuf[w] = '.'\n\t\tw++\n\t}\n\n\treturn FromSlash(vol + string(buf[0:w]))\n}\n\n\/\/ ToSlash returns the result of replacing each separator character\n\/\/ in path with a slash ('\/') character.\nfunc ToSlash(path string) string {\n\tif Separator == '\/' {\n\t\treturn path\n\t}\n\treturn strings.Replace(path, string(Separator), \"\/\", -1)\n}\n\n\/\/ FromSlash returns the result of replacing each slash ('\/') character\n\/\/ in path with a separator character.\nfunc FromSlash(path string) string {\n\tif Separator == '\/' {\n\t\treturn path\n\t}\n\treturn strings.Replace(path, \"\/\", string(Separator), -1)\n}\n\n\/\/ SplitList splits a list of paths joined by the OS-specific ListSeparator.\nfunc SplitList(path string) []string {\n\tif path == \"\" {\n\t\treturn []string{}\n\t}\n\treturn strings.Split(path, string(ListSeparator))\n}\n\n\/\/ Split splits path immediately following the final Separator,\n\/\/ separating it into a directory and file name component.\n\/\/ If there is no Separator in path, Split returns an empty dir\n\/\/ and file set to path.\nfunc Split(path string) (dir, file string) {\n\tvol := VolumeName(path)\n\ti := len(path) - 1\n\tfor i >= len(vol) && !os.IsPathSeparator(path[i]) {\n\t\ti--\n\t}\n\treturn path[:i+1], path[i+1:]\n}\n\n\/\/ Join joins any number of path elements into a single path, adding\n\/\/ a Separator if necessary. All empty strings are ignored.\nfunc Join(elem ...string) string {\n\tfor i, e := range elem {\n\t\tif e != \"\" {\n\t\t\treturn Clean(strings.Join(elem[i:], string(Separator)))\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ Ext returns the file name extension used by path.\n\/\/ The extension is the suffix beginning at the final dot\n\/\/ in the final element of path; it is empty if there is\n\/\/ no dot.\nfunc Ext(path string) string {\n\tfor i := len(path) - 1; i >= 0 && !os.IsPathSeparator(path[i]); i-- {\n\t\tif path[i] == '.' {\n\t\t\treturn path[i:]\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ EvalSymlinks returns the path name after the evaluation of any symbolic\n\/\/ links.\n\/\/ If path is relative it will be evaluated relative to the current directory.\nfunc EvalSymlinks(path string) (string, error) {\n\tif runtime.GOOS == \"windows\" {\n\t\t\/\/ Symlinks are not supported under windows.\n\t\t_, err := os.Lstat(path)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn Clean(path), nil\n\t}\n\tconst maxIter = 255\n\toriginalPath := path\n\t\/\/ consume path by taking each frontmost path element,\n\t\/\/ expanding it if it's a symlink, and appending it to b\n\tvar b bytes.Buffer\n\tfor n := 0; path != \"\"; n++ {\n\t\tif n > maxIter {\n\t\t\treturn \"\", errors.New(\"EvalSymlinks: too many links in \" + originalPath)\n\t\t}\n\n\t\t\/\/ find next path component, p\n\t\ti := strings.IndexRune(path, Separator)\n\t\tvar p string\n\t\tif i == -1 {\n\t\t\tp, path = path, \"\"\n\t\t} else {\n\t\t\tp, path = path[:i], path[i+1:]\n\t\t}\n\n\t\tif p == \"\" {\n\t\t\tif b.Len() == 0 {\n\t\t\t\t\/\/ must be absolute path\n\t\t\t\tb.WriteRune(Separator)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tfi, err := os.Lstat(b.String() + p)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif fi.Mode()&os.ModeSymlink == 0 {\n\t\t\tb.WriteString(p)\n\t\t\tif path != \"\" {\n\t\t\t\tb.WriteRune(Separator)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ it's a symlink, put it at the front of path\n\t\tdest, err := os.Readlink(b.String() + p)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif IsAbs(dest) {\n\t\t\tb.Reset()\n\t\t}\n\t\tpath = dest + string(Separator) + path\n\t}\n\treturn Clean(b.String()), nil\n}\n\n\/\/ Abs returns an absolute representation of path.\n\/\/ If the path is not absolute it will be joined with the current\n\/\/ working directory to turn it into an absolute path. The absolute\n\/\/ path name for a given file is not guaranteed to be unique.\nfunc Abs(path string) (string, error) {\n\tif IsAbs(path) {\n\t\treturn Clean(path), nil\n\t}\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn Join(wd, path), nil\n}\n\n\/\/ Rel returns a relative path that is lexically equivalent to targpath when\n\/\/ joined to basepath with an intervening separator. That is,\n\/\/ Join(basepath, Rel(basepath, targpath)) is equivalent to targpath itself.\n\/\/ On success, the returned path will always be relative to basepath,\n\/\/ even if basepath and targpath share no elements.\n\/\/ An error is returned if targpath can't be made relative to basepath or if\n\/\/ knowing the current working directory would be necessary to compute it.\nfunc Rel(basepath, targpath string) (string, error) {\n\tbaseVol := VolumeName(basepath)\n\ttargVol := VolumeName(targpath)\n\tbase := Clean(basepath)\n\ttarg := Clean(targpath)\n\tif targ == base {\n\t\treturn \".\", nil\n\t}\n\tbase = base[len(baseVol):]\n\ttarg = targ[len(targVol):]\n\tif base == \".\" {\n\t\tbase = \"\"\n\t}\n\t\/\/ Can't use IsAbs - `\\a` and `a` are both relative in Windows.\n\tbaseSlashed := len(base) > 0 && base[0] == Separator\n\ttargSlashed := len(targ) > 0 && targ[0] == Separator\n\tif baseSlashed != targSlashed || baseVol != targVol {\n\t\treturn \"\", errors.New(\"Rel: can't make \" + targ + \" relative to \" + base)\n\t}\n\t\/\/ Position base[b0:bi] and targ[t0:ti] at the first differing elements.\n\tbl := len(base)\n\ttl := len(targ)\n\tvar b0, bi, t0, ti int\n\tfor {\n\t\tfor bi < bl && base[bi] != Separator {\n\t\t\tbi++\n\t\t}\n\t\tfor ti < tl && targ[ti] != Separator {\n\t\t\tti++\n\t\t}\n\t\tif targ[t0:ti] != base[b0:bi] {\n\t\t\tbreak\n\t\t}\n\t\tif bi < bl {\n\t\t\tbi++\n\t\t}\n\t\tif ti < tl {\n\t\t\tti++\n\t\t}\n\t\tb0 = bi\n\t\tt0 = ti\n\t}\n\tif base[b0:bi] == \"..\" {\n\t\treturn \"\", errors.New(\"Rel: can't make \" + targ + \" relative to \" + base)\n\t}\n\tif b0 != bl {\n\t\t\/\/ Base elements left. Must go up before going down.\n\t\tseps := strings.Count(base[b0:bl], string(Separator))\n\t\tsize := 2 + seps*3\n\t\tif tl != t0 {\n\t\t\tsize += 1 + tl - t0\n\t\t}\n\t\tbuf := make([]byte, size)\n\t\tn := copy(buf, \"..\")\n\t\tfor i := 0; i < seps; i++ {\n\t\t\tbuf[n] = Separator\n\t\t\tcopy(buf[n+1:], \"..\")\n\t\t\tn += 3\n\t\t}\n\t\tif t0 != tl {\n\t\t\tbuf[n] = Separator\n\t\t\tcopy(buf[n+1:], targ[t0:])\n\t\t}\n\t\treturn string(buf), nil\n\t}\n\treturn targ[t0:], nil\n}\n\n\/\/ SkipDir is used as a return value from WalkFuncs to indicate that\n\/\/ the directory named in the call is to be skipped. It is not returned\n\/\/ as an error by any function.\nvar SkipDir = errors.New(\"skip this directory\")\n\n\/\/ WalkFunc is the type of the function called for each file or directory\n\/\/ visited by Walk. If there was a problem walking to the file or directory\n\/\/ named by path, the incoming error will describe the problem and the\n\/\/ function can decide how to handle that error (and Walk will not descend\n\/\/ into that directory). If an error is returned, processing stops. The\n\/\/ sole exception is that if path is a directory and the function returns the\n\/\/ special value SkipDir, the contents of the directory are skipped\n\/\/ and processing continues as usual on the next file.\ntype WalkFunc func(path string, info os.FileInfo, err error) error\n\n\/\/ walk recursively descends path, calling w.\nfunc walk(path string, info os.FileInfo, walkFn WalkFunc) error {\n\terr := walkFn(path, info, nil)\n\tif err != nil {\n\t\tif info.IsDir() && err == SkipDir {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\tif !info.IsDir() {\n\t\treturn nil\n\t}\n\n\tlist, err := readDir(path)\n\tif err != nil {\n\t\treturn walkFn(path, info, err)\n\t}\n\n\tfor _, fileInfo := range list {\n\t\tif err = walk(Join(path, fileInfo.Name()), fileInfo, walkFn); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Walk walks the file tree rooted at root, calling walkFn for each file or\n\/\/ directory in the tree, including root. All errors that arise visiting files\n\/\/ and directories are filtered by walkFn. The files are walked in lexical\n\/\/ order, which makes the output deterministic but means that for very\n\/\/ large directories Walk can be inefficient.\nfunc Walk(root string, walkFn WalkFunc) error {\n\tinfo, err := os.Lstat(root)\n\tif err != nil {\n\t\treturn walkFn(root, nil, err)\n\t}\n\treturn walk(root, info, walkFn)\n}\n\n\/\/ readDir reads the directory named by dirname and returns\n\/\/ a sorted list of directory entries.\n\/\/ Copied from io\/ioutil to avoid the circular import.\nfunc readDir(dirname string) ([]os.FileInfo, error) {\n\tf, err := os.Open(dirname)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlist, err := f.Readdir(-1)\n\tf.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsort.Sort(byName(list))\n\treturn list, nil\n}\n\n\/\/ byName implements sort.Interface.\ntype byName []os.FileInfo\n\nfunc (f byName) Len() int { return len(f) }\nfunc (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }\nfunc (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }\n\n\/\/ Base returns the last element of path.\n\/\/ Trailing path separators are removed before extracting the last element.\n\/\/ If the path is empty, Base returns \".\".\n\/\/ If the path consists entirely of separators, Base returns a single separator.\nfunc Base(path string) string {\n\tif path == \"\" {\n\t\treturn \".\"\n\t}\n\t\/\/ Strip trailing slashes.\n\tfor len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) {\n\t\tpath = path[0 : len(path)-1]\n\t}\n\t\/\/ Find the last element\n\ti := len(path) - 1\n\tfor i >= 0 && !os.IsPathSeparator(path[i]) {\n\t\ti--\n\t}\n\tif i >= 0 {\n\t\tpath = path[i+1:]\n\t}\n\t\/\/ If empty now, it had only slashes.\n\tif path == \"\" {\n\t\treturn string(Separator)\n\t}\n\treturn path\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ TODO: Refactor common part of functions in this file for generic object kinds.\n\npackage utils\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\tbatch \"k8s.io\/api\/batch\/v1\"\n\t\"k8s.io\/api\/core\/v1\"\n\textensions \"k8s.io\/api\/extensions\/v1beta1\"\n\tapierrs \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tutilnet \"k8s.io\/apimachinery\/pkg\/util\/net\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\tclientset \"k8s.io\/client-go\/kubernetes\"\n)\n\nconst (\n\t\/\/ Parameters for retrying with exponential backoff.\n\tretryBackoffInitialDuration = 100 * time.Millisecond\n\tretryBackoffFactor = 3\n\tretryBackoffJitter = 0\n\tretryBackoffSteps = 6\n)\n\n\/\/ Utility for retrying the given function with exponential backoff.\nfunc RetryWithExponentialBackOff(fn wait.ConditionFunc) error {\n\tbackoff := wait.Backoff{\n\t\tDuration: retryBackoffInitialDuration,\n\t\tFactor: retryBackoffFactor,\n\t\tJitter: retryBackoffJitter,\n\t\tSteps: retryBackoffSteps,\n\t}\n\treturn wait.ExponentialBackoff(backoff, fn)\n}\n\nfunc IsRetryableAPIError(err error) bool {\n\treturn apierrs.IsTimeout(err) || apierrs.IsServerTimeout(err) || apierrs.IsTooManyRequests(err) || utilnet.IsProbableEOF(err)\n}\n\nfunc CreatePodWithRetries(c clientset.Interface, namespace string, obj *v1.Pod) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.CoreV1().Pods(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateRCWithRetries(c clientset.Interface, namespace string, obj *v1.ReplicationController) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.CoreV1().ReplicationControllers(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateReplicaSetWithRetries(c clientset.Interface, namespace string, obj *extensions.ReplicaSet) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.ExtensionsV1beta1().ReplicaSets(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateDeploymentWithRetries(c clientset.Interface, namespace string, obj *extensions.Deployment) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.ExtensionsV1beta1().Deployments(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateDaemonSetWithRetries(c clientset.Interface, namespace string, obj *extensions.DaemonSet) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.ExtensionsV1beta1().DaemonSets(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateJobWithRetries(c clientset.Interface, namespace string, obj *batch.Job) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.BatchV1().Jobs(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateSecretWithRetries(c clientset.Interface, namespace string, obj *v1.Secret) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.CoreV1().Secrets(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateConfigMapWithRetries(c clientset.Interface, namespace string, obj *v1.ConfigMap) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.CoreV1().ConfigMaps(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateServiceWithRetries(c clientset.Interface, namespace string, obj *v1.Service) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.CoreV1().Services(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateResourceQuotaWithRetries(c clientset.Interface, namespace string, obj *v1.ResourceQuota) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.CoreV1().ResourceQuotas(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n<commit_msg>Add ConnectionReset, InternalError, etc also as retryable API errors<commit_after>\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ TODO: Refactor common part of functions in this file for generic object kinds.\n\npackage utils\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\tbatch \"k8s.io\/api\/batch\/v1\"\n\t\"k8s.io\/api\/core\/v1\"\n\textensions \"k8s.io\/api\/extensions\/v1beta1\"\n\tapierrs \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tutilnet \"k8s.io\/apimachinery\/pkg\/util\/net\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\tclientset \"k8s.io\/client-go\/kubernetes\"\n)\n\nconst (\n\t\/\/ Parameters for retrying with exponential backoff.\n\tretryBackoffInitialDuration = 100 * time.Millisecond\n\tretryBackoffFactor = 3\n\tretryBackoffJitter = 0\n\tretryBackoffSteps = 6\n)\n\n\/\/ Utility for retrying the given function with exponential backoff.\nfunc RetryWithExponentialBackOff(fn wait.ConditionFunc) error {\n\tbackoff := wait.Backoff{\n\t\tDuration: retryBackoffInitialDuration,\n\t\tFactor: retryBackoffFactor,\n\t\tJitter: retryBackoffJitter,\n\t\tSteps: retryBackoffSteps,\n\t}\n\treturn wait.ExponentialBackoff(backoff, fn)\n}\n\nfunc IsRetryableAPIError(err error) bool {\n\t\/\/ These errors may indicate a transient error that we can retry in tests.\n\tif apierrs.IsInternalError(err) || apierrs.IsTimeout(err) || apierrs.IsServerTimeout(err) ||\n\t\tapierrs.IsTooManyRequests(err) || utilnet.IsProbableEOF(err) || utilnet.IsConnectionReset(err) {\n\t\treturn true\n\t}\n\t\/\/ If the error sends the Retry-After header, we respect it as an explicit confirmation we should retry.\n\tif _, shouldRetry := apierrs.SuggestsClientDelay(err); shouldRetry {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc CreatePodWithRetries(c clientset.Interface, namespace string, obj *v1.Pod) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.CoreV1().Pods(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateRCWithRetries(c clientset.Interface, namespace string, obj *v1.ReplicationController) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.CoreV1().ReplicationControllers(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateReplicaSetWithRetries(c clientset.Interface, namespace string, obj *extensions.ReplicaSet) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.ExtensionsV1beta1().ReplicaSets(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateDeploymentWithRetries(c clientset.Interface, namespace string, obj *extensions.Deployment) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.ExtensionsV1beta1().Deployments(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateDaemonSetWithRetries(c clientset.Interface, namespace string, obj *extensions.DaemonSet) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.ExtensionsV1beta1().DaemonSets(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateJobWithRetries(c clientset.Interface, namespace string, obj *batch.Job) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.BatchV1().Jobs(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateSecretWithRetries(c clientset.Interface, namespace string, obj *v1.Secret) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.CoreV1().Secrets(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateConfigMapWithRetries(c clientset.Interface, namespace string, obj *v1.ConfigMap) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.CoreV1().ConfigMaps(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateServiceWithRetries(c clientset.Interface, namespace string, obj *v1.Service) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.CoreV1().Services(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n\nfunc CreateResourceQuotaWithRetries(c clientset.Interface, namespace string, obj *v1.ResourceQuota) error {\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"Object provided to create is empty\")\n\t}\n\tcreateFunc := func() (bool, error) {\n\t\t_, err := c.CoreV1().ResourceQuotas(namespace).Create(obj)\n\t\tif err == nil || apierrs.IsAlreadyExists(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\tif IsRetryableAPIError(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Failed to create object with non-retriable error: %v\", err)\n\t}\n\treturn RetryWithExponentialBackOff(createFunc)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016 by Richard A. Wilkes. All rights reserved.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, version 2.0. If a copy of the MPL was not distributed with\n\/\/ this file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\/\/\n\/\/ This Source Code Form is \"Incompatible With Secondary Licenses\", as\n\/\/ defined by the Mozilla Public License, version 2.0.\n\npackage menu\n\nimport (\n\t\"github.com\/richardwilkes\/go-ui\/event\"\n\t\"unsafe\"\n)\n\n\/\/ #cgo darwin LDFLAGS: -framework Cocoa\n\/\/ #include <stdlib.h>\n\/\/ #include \"menu.h\"\nimport \"C\"\n\nvar (\n\tmenuMap = make(map[C.uiMenu]*Menu)\n\titemMap = make(map[C.uiMenuItem]*Item)\n)\n\n\/\/ Action that an Item can take.\ntype Action func(item *Item)\n\n\/\/ Validator determines whether the specified Item should be enabled or not.\ntype Validator func(item *Item) bool\n\n\/\/ Menu represents a set of menu items.\ntype Menu struct {\n\tmenu C.uiMenu\n}\n\n\/\/ Item represents individual actions that can be issued from a Menu.\ntype Item struct {\n\titem C.uiMenuItem\n\taction Action\n\tvalidator Validator\n}\n\n\/\/ Bar returns the application menu bar.\nfunc Bar() *Menu {\n\tif menu, ok := menuMap[C.getMainMenu()]; ok {\n\t\treturn menu\n\t}\n\tmenu := New(\"\")\n\tC.setMainMenu(menu.menu)\n\treturn menu\n}\n\n\/\/ New creates a new Menu.\nfunc New(title string) *Menu {\n\tcTitle := C.CString(title)\n\tmenu := &Menu{menu: C.uiNewMenu(cTitle)}\n\tC.free(unsafe.Pointer(cTitle))\n\tmenuMap[menu.menu] = menu\n\treturn menu\n}\n\n\/\/ SetServicesMenu marks the specified menu as the services menu.\nfunc SetServicesMenu(menu *Menu) {\n\tC.uiSetServicesMenu(menu.menu)\n}\n\n\/\/ SetWindowMenu marks the specified menu as the window menu.\nfunc SetWindowMenu(menu *Menu) {\n\tC.uiSetWindowMenu(menu.menu)\n}\n\n\/\/ SetHelpMenu marks the specified menu as the help menu.\nfunc SetHelpMenu(menu *Menu) {\n\tC.uiSetHelpMenu(menu.menu)\n}\n\n\/\/ Count of Items in this Menu.\nfunc (menu *Menu) Count() int {\n\treturn int(C.uiMenuItemCount(menu.menu))\n}\n\n\/\/ Item at the specified index, or nil.\nfunc (menu *Menu) Item(index int) *Item {\n\tif item, ok := itemMap[C.uiGetMenuItem(menu.menu, C.int(index))]; ok {\n\t\treturn item\n\t}\n\treturn nil\n}\n\n\/\/ AddItem creates a new Item and appends it to the end of the Menu.\nfunc (menu *Menu) AddItem(title string, key string, action Action, validator Validator) *Item {\n\tcTitle := C.CString(title)\n\tcKey := C.CString(key)\n\titem := &Item{item: C.uiAddMenuItem(menu.menu, cTitle, cKey), action: action, validator: validator}\n\tC.free(unsafe.Pointer(cTitle))\n\tC.free(unsafe.Pointer(cKey))\n\titemMap[item.item] = item\n\treturn item\n}\n\n\/\/ AddMenu creates a new sub-Menu and appends it to the end of the Menu.\nfunc (menu *Menu) AddMenu(title string) *Menu {\n\titem := menu.AddItem(title, \"\", nil, nil)\n\tsubMenu := New(title)\n\tC.uiSetSubMenu(item.item, subMenu.menu)\n\treturn subMenu\n}\n\n\/\/ AddSeparator creates a new separator and appends it to the end of the Menu.\nfunc (menu *Menu) AddSeparator() {\n\titem := &Item{item: C.uiAddSeparator(menu.menu)}\n\titemMap[item.item] = item\n}\n\n\/\/ SetKeyModifiers sets the Item's key equivalent modifiers. By default, a Item's modifier is set\n\/\/ to event.CommandKeyMask.\nfunc (item *Item) SetKeyModifiers(modifierMask event.KeyMask) {\n\tC.uiSetKeyModifierMask(item.item, C.int(modifierMask))\n}\n\n\/\/ SubMenu of this Item or nil.\nfunc (item *Item) SubMenu() *Menu {\n\tif menu, ok := menuMap[C.uiGetSubMenu(item.item)]; ok {\n\t\treturn menu\n\t}\n\treturn nil\n}\n\n\/\/ Dispose of the Menu, releasing any operating system resources it consumed.\nfunc (menu *Menu) Dispose() {\n\tif menu.menu != nil {\n\t\tcount := C.uiMenuItemCount(menu.menu)\n\t\tvar i C.int\n\t\tfor i = 0; i < count; i++ {\n\t\t\titem := C.uiGetMenuItem(menu.menu, i)\n\t\t\tsubMenu := menuMap[C.uiGetSubMenu(item)]\n\t\t\tif subMenu != nil {\n\t\t\t\tsubMenu.Dispose()\n\t\t\t}\n\t\t\tdelete(itemMap, item)\n\t\t}\n\t\tdelete(menuMap, menu.menu)\n\t\tC.uiDisposeMenu(menu.menu)\n\t\tmenu.menu = nil\n\t}\n}\n\n\/\/export validateMenuItem\nfunc validateMenuItem(cMenuItem C.uiMenuItem) bool {\n\tif item, ok := itemMap[cMenuItem]; ok {\n\t\tif item.validator != nil {\n\t\t\treturn item.validator(item)\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/export handleMenuItem\nfunc handleMenuItem(cMenuItem C.uiMenuItem) {\n\tif item, ok := itemMap[cMenuItem]; ok {\n\t\tif item.action != nil {\n\t\t\titem.action(item)\n\t\t}\n\t}\n}\n<commit_msg>Track the titles of menus and menu items<commit_after>\/\/ Copyright (c) 2016 by Richard A. Wilkes. All rights reserved.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, version 2.0. If a copy of the MPL was not distributed with\n\/\/ this file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\/\/\n\/\/ This Source Code Form is \"Incompatible With Secondary Licenses\", as\n\/\/ defined by the Mozilla Public License, version 2.0.\n\npackage menu\n\nimport (\n\t\"github.com\/richardwilkes\/go-ui\/event\"\n\t\"unsafe\"\n)\n\n\/\/ #cgo darwin LDFLAGS: -framework Cocoa\n\/\/ #include <stdlib.h>\n\/\/ #include \"menu.h\"\nimport \"C\"\n\nvar (\n\tmenuMap = make(map[C.uiMenu]*Menu)\n\titemMap = make(map[C.uiMenuItem]*Item)\n)\n\n\/\/ Action that an Item can take.\ntype Action func(item *Item)\n\n\/\/ Validator determines whether the specified Item should be enabled or not.\ntype Validator func(item *Item) bool\n\n\/\/ Menu represents a set of menu items.\ntype Menu struct {\n\tmenu C.uiMenu\n\ttitle string\n}\n\n\/\/ Item represents individual actions that can be issued from a Menu.\ntype Item struct {\n\titem C.uiMenuItem\n\ttitle string\n\taction Action\n\tvalidator Validator\n}\n\n\/\/ Bar returns the application menu bar.\nfunc Bar() *Menu {\n\tif menu, ok := menuMap[C.getMainMenu()]; ok {\n\t\treturn menu\n\t}\n\tmenu := New(\"\")\n\tC.setMainMenu(menu.menu)\n\treturn menu\n}\n\n\/\/ New creates a new Menu.\nfunc New(title string) *Menu {\n\tcTitle := C.CString(title)\n\tmenu := &Menu{menu: C.uiNewMenu(cTitle), title: title}\n\tC.free(unsafe.Pointer(cTitle))\n\tmenuMap[menu.menu] = menu\n\treturn menu\n}\n\n\/\/ SetServicesMenu marks the specified menu as the services menu.\nfunc SetServicesMenu(menu *Menu) {\n\tC.uiSetServicesMenu(menu.menu)\n}\n\n\/\/ SetWindowMenu marks the specified menu as the window menu.\nfunc SetWindowMenu(menu *Menu) {\n\tC.uiSetWindowMenu(menu.menu)\n}\n\n\/\/ SetHelpMenu marks the specified menu as the help menu.\nfunc SetHelpMenu(menu *Menu) {\n\tC.uiSetHelpMenu(menu.menu)\n}\n\n\/\/ Title returns the title of this Menu.\nfunc (menu *Menu) Title() string {\n\treturn menu.title\n}\n\n\/\/ Count of Items in this Menu.\nfunc (menu *Menu) Count() int {\n\treturn int(C.uiMenuItemCount(menu.menu))\n}\n\n\/\/ Item at the specified index, or nil.\nfunc (menu *Menu) Item(index int) *Item {\n\tif item, ok := itemMap[C.uiGetMenuItem(menu.menu, C.int(index))]; ok {\n\t\treturn item\n\t}\n\treturn nil\n}\n\n\/\/ AddItem creates a new Item and appends it to the end of the Menu.\nfunc (menu *Menu) AddItem(title string, key string, action Action, validator Validator) *Item {\n\tcTitle := C.CString(title)\n\tcKey := C.CString(key)\n\titem := &Item{item: C.uiAddMenuItem(menu.menu, cTitle, cKey), title: title, action: action, validator: validator}\n\tC.free(unsafe.Pointer(cTitle))\n\tC.free(unsafe.Pointer(cKey))\n\titemMap[item.item] = item\n\treturn item\n}\n\n\/\/ AddMenu creates a new sub-Menu and appends it to the end of the Menu.\nfunc (menu *Menu) AddMenu(title string) *Menu {\n\titem := menu.AddItem(title, \"\", nil, nil)\n\tsubMenu := New(title)\n\tC.uiSetSubMenu(item.item, subMenu.menu)\n\treturn subMenu\n}\n\n\/\/ AddSeparator creates a new separator and appends it to the end of the Menu.\nfunc (menu *Menu) AddSeparator() {\n\titem := &Item{item: C.uiAddSeparator(menu.menu)}\n\titemMap[item.item] = item\n}\n\n\/\/ Title returns this item's title.\nfunc (item *Item) Title() string {\n\treturn item.title\n}\n\n\/\/ SetKeyModifiers sets the Item's key equivalent modifiers. By default, a Item's modifier is set\n\/\/ to event.CommandKeyMask.\nfunc (item *Item) SetKeyModifiers(modifierMask event.KeyMask) {\n\tC.uiSetKeyModifierMask(item.item, C.int(modifierMask))\n}\n\n\/\/ SubMenu of this Item or nil.\nfunc (item *Item) SubMenu() *Menu {\n\tif menu, ok := menuMap[C.uiGetSubMenu(item.item)]; ok {\n\t\treturn menu\n\t}\n\treturn nil\n}\n\n\/\/ Dispose of the Menu, releasing any operating system resources it consumed.\nfunc (menu *Menu) Dispose() {\n\tif menu.menu != nil {\n\t\tcount := C.uiMenuItemCount(menu.menu)\n\t\tvar i C.int\n\t\tfor i = 0; i < count; i++ {\n\t\t\titem := C.uiGetMenuItem(menu.menu, i)\n\t\t\tsubMenu := menuMap[C.uiGetSubMenu(item)]\n\t\t\tif subMenu != nil {\n\t\t\t\tsubMenu.Dispose()\n\t\t\t}\n\t\t\tdelete(itemMap, item)\n\t\t}\n\t\tdelete(menuMap, menu.menu)\n\t\tC.uiDisposeMenu(menu.menu)\n\t\tmenu.menu = nil\n\t}\n}\n\n\/\/export validateMenuItem\nfunc validateMenuItem(cMenuItem C.uiMenuItem) bool {\n\tif item, ok := itemMap[cMenuItem]; ok {\n\t\tif item.validator != nil {\n\t\t\treturn item.validator(item)\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/export handleMenuItem\nfunc handleMenuItem(cMenuItem C.uiMenuItem) {\n\tif item, ok := itemMap[cMenuItem]; ok {\n\t\tif item.action != nil {\n\t\t\titem.action(item)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package widgets\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com\/ambientsound\/pms\/console\"\n\t\"github.com\/ambientsound\/pms\/options\"\n\t\"github.com\/ambientsound\/pms\/song\"\n\t\"github.com\/ambientsound\/pms\/songlist\"\n\n\t\"github.com\/gdamore\/tcell\"\n\t\"github.com\/gdamore\/tcell\/views\"\n)\n\ntype list struct {\n\tsonglist songlist.Songlist\n\tcursor int\n\tcolumns columns\n\tymin int\n\tymax int\n}\n\ntype SonglistWidget struct {\n\tcurrentListIndex int\n\tcurrentList *list\n\tlists []*list\n\tfallbackSonglist songlist.Songlist\n\tcurrentSong song.Song\n\tview views.View\n\tviewport views.ViewPort\n\toptions *options.Options\n\n\twidget\n\tviews.WidgetWatchers\n}\n\nfunc newList(s songlist.Songlist) *list {\n\tl := &list{}\n\tl.songlist = s\n\tl.columns = make(columns, 0)\n\treturn l\n}\n\nfunc min(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc max(a, b int) int {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc NewSonglistWidget(o *options.Options) (w *SonglistWidget) {\n\tw = &SonglistWidget{}\n\tw.options = o\n\tw.currentList = newList(songlist.New())\n\treturn\n}\n\nfunc (w *SonglistWidget) Songlist() songlist.Songlist {\n\treturn w.currentList.songlist\n}\n\nfunc (w *SonglistWidget) AutoSetColumnWidths() {\n\tfor i := range w.currentList.columns {\n\t\tw.currentList.columns[i].SetWidth(w.currentList.columns[i].Mid())\n\t}\n\tw.expandColumns()\n}\n\nfunc (w *SonglistWidget) SetColumns(cols []string) {\n\tch := make(chan int, len(w.currentList.columns))\n\tw.currentList.columns = make(columns, len(cols))\n\n\tfor i := range w.currentList.columns {\n\t\tgo func(i int) {\n\t\t\tw.currentList.columns[i].Tag = cols[i]\n\t\t\tw.currentList.columns[i].Set(w.currentList.songlist)\n\t\t\tch <- 0\n\t\t}(i)\n\t}\n\tfor i := 0; i < len(w.currentList.columns); i++ {\n\t\t<-ch\n\t}\n\tw.AutoSetColumnWidths()\n\tPostEventListChanged(w)\n}\n\nfunc (w *SonglistWidget) expandColumns() {\n\tif len(w.currentList.columns) == 0 {\n\t\treturn\n\t}\n\t_, _, xmax, _ := w.viewport.GetVisible()\n\ttotalWidth := 0\n\tpoolSize := len(w.currentList.columns)\n\tsaturation := make([]bool, poolSize)\n\tfor i := range w.currentList.columns {\n\t\ttotalWidth += w.currentList.columns[i].Width()\n\t}\n\tfor {\n\t\tfor i := range w.currentList.columns {\n\t\t\tif totalWidth > xmax {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif poolSize > 0 && saturation[i] {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcol := &w.currentList.columns[i]\n\t\t\tif poolSize > 0 && col.Width() > col.MaxWidth() {\n\t\t\t\tsaturation[i] = true\n\t\t\t\tpoolSize--\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcol.SetWidth(col.Width() + 1)\n\t\t\ttotalWidth++\n\t\t}\n\t}\n}\n\nfunc (w *SonglistWidget) Draw() {\n\tlist := w.Songlist()\n\tif w.view == nil || list == nil || list.Songs == nil {\n\t\treturn\n\t}\n\n\tymin, ymax := w.getVisibleBoundaries()\n\tstyle := w.Style(\"default\")\n\tlineStyled := false\n\tcursor := false\n\n\tfor y := ymin; y <= ymax; y++ {\n\n\t\ts := list.Song(y)\n\n\t\t\/\/ Style based on song's role\n\t\tcursor = y == w.currentList.cursor\n\t\tswitch {\n\t\tcase cursor:\n\t\t\tstyle = w.Style(\"cursor\")\n\t\t\tlineStyled = true\n\t\tcase w.IndexAtCurrentSong(y):\n\t\t\tstyle = w.Style(\"currentSong\")\n\t\t\tlineStyled = true\n\t\tdefault:\n\t\t\tstyle = w.Style(\"default\")\n\t\t\tlineStyled = false\n\t\t}\n\n\t\tx := 0\n\t\trightPadding := 1\n\n\t\t\/\/ Draw each column separately\n\t\tfor col := 0; col < len(w.currentList.columns); col++ {\n\n\t\t\t\/\/ Convert tag to runes\n\t\t\tkey := w.currentList.columns[col].Tag\n\t\t\tstr := s.Tags[key]\n\t\t\tif !lineStyled {\n\t\t\t\tstyle = w.Style(key)\n\t\t\t}\n\t\t\trunes := []rune(str)\n\n\t\t\tif col+1 == len(w.currentList.columns) {\n\t\t\t\trightPadding = 0\n\t\t\t}\n\t\t\tstrmin := min(len(runes), w.currentList.columns[col].Width()-rightPadding)\n\t\t\tstrmax := w.currentList.columns[col].Width()\n\t\t\tn := 0\n\t\t\tfor n < strmin {\n\t\t\t\tw.viewport.SetContent(x, y, runes[n], nil, style)\n\t\t\t\tn++\n\t\t\t\tx++\n\t\t\t}\n\t\t\tfor n < strmax {\n\t\t\t\tw.viewport.SetContent(x, y, ' ', nil, style)\n\t\t\t\tn++\n\t\t\t\tx++\n\t\t\t}\n\t\t}\n\t}\n\tw.PostEventWidgetContent(w)\n\tPostEventScroll(w)\n}\n\nfunc (w *SonglistWidget) getVisibleBoundaries() (ymin, ymax int) {\n\t_, ymin, _, ymax = w.viewport.GetVisible()\n\treturn\n}\n\nfunc (w *SonglistWidget) getBoundaries() (ymin, ymax int) {\n\treturn 0, w.Songlist().Len() - 1\n}\n\nfunc (w *SonglistWidget) setViewportSize() {\n\tx, y := w.Size()\n\tw.viewport.Resize(0, 0, -1, -1)\n\tw.viewport.SetContentSize(x, w.Songlist().Len(), true)\n\tw.viewport.SetSize(x, min(y, w.Songlist().Len()))\n}\n\nfunc (w *SonglistWidget) validateViewport() {\n\tw.validateCursorVisible()\n\tw.AutoSetColumnWidths()\n\tw.PostEventWidgetContent(w)\n}\n\nfunc (w *SonglistWidget) MoveCursorUp(i int) {\n\tw.MoveCursor(-i)\n}\n\nfunc (w *SonglistWidget) MoveCursorDown(i int) {\n\tw.MoveCursor(i)\n}\n\nfunc (w *SonglistWidget) MoveCursor(i int) {\n\tw.SetCursor(w.currentList.cursor + i)\n}\n\nfunc (w *SonglistWidget) SetCursor(i int) {\n\tw.currentList.cursor = i\n\tw.validateCursorList()\n\tw.viewport.MakeVisible(0, w.currentList.cursor)\n\tw.validateCursorVisible()\n\tw.PostEventWidgetContent(w)\n}\n\nfunc (w *SonglistWidget) Cursor() int {\n\treturn w.currentList.cursor\n}\n\nfunc (w *SonglistWidget) CursorSong() *song.Song {\n\treturn w.Songlist().Song(w.currentList.cursor)\n}\n\nfunc (w *SonglistWidget) SetCurrentSong(s *song.Song) {\n\tif s != nil {\n\t\tw.currentSong = *s\n\t} else {\n\t\tw.currentSong = song.Song{}\n\t}\n}\n\nfunc (w *SonglistWidget) IndexAtCurrentSong(i int) bool {\n\ts := w.Songlist().Song(i)\n\tif s == nil {\n\t\treturn false\n\t}\n\tif songlist.IsQueue(w.Songlist()) {\n\t\treturn s.ID == w.currentSong.ID\n\t} else {\n\t\treturn s.TagString(\"file\") == w.currentSong.TagString(\"file\")\n\t}\n}\n\n\/\/ validateCursorVisible makes sure the cursor stays within the visible area of the viewport.\nfunc (w *SonglistWidget) validateCursorVisible() {\n\tw.currentList.ymin, w.currentList.ymax = w.getVisibleBoundaries()\n\tw.validateCursor(w.currentList.ymin, w.currentList.ymax)\n}\n\n\/\/ validateCursorList makes sure the cursor stays within songlist boundaries.\nfunc (w *SonglistWidget) validateCursorList() {\n\tymin, ymax := w.getBoundaries()\n\tw.validateCursor(ymin, ymax)\n}\n\n\/\/ validateCursor adjusts the cursor based on minimum and maximum boundaries.\nfunc (w *SonglistWidget) validateCursor(ymin, ymax int) {\n\tif w.currentList.cursor < ymin {\n\t\tw.currentList.cursor = ymin\n\t}\n\tif w.currentList.cursor > ymax {\n\t\tw.currentList.cursor = ymax\n\t}\n}\n\nfunc (w *SonglistWidget) Resize() {\n\tw.setViewportSize()\n\tw.validateViewport()\n\tw.PostEventWidgetResize(w)\n}\n\nfunc (m *SonglistWidget) HandleEvent(ev tcell.Event) bool {\n\treturn false\n}\n\nfunc (w *SonglistWidget) SetView(v views.View) {\n\tw.view = v\n\tw.viewport.SetView(w.view)\n}\n\nfunc (w *SonglistWidget) Size() (int, int) {\n\treturn w.view.Size()\n}\n\nfunc (w *SonglistWidget) Name() string {\n\treturn w.Songlist().Name()\n}\n\nfunc (w *SonglistWidget) Columns() []column {\n\treturn w.currentList.columns\n}\n\nfunc (w *SonglistWidget) Len() int {\n\treturn w.Songlist().Len()\n}\n\nfunc (w *SonglistWidget) SonglistsLen() int {\n\treturn len(w.lists)\n}\n\n\/\/ PositionReadout returns a combination of PositionLongReadout() and PositionShortReadout().\nfunc (w *SonglistWidget) PositionReadout() string {\n\treturn fmt.Sprintf(\"%s %s\", w.PositionLongReadout(), w.PositionShortReadout())\n}\n\n\/\/ PositionLongReadout returns a formatted string containing the visible song\n\/\/ range as well as the total number of songs.\nfunc (w *SonglistWidget) PositionLongReadout() string {\n\tymin, ymax := w.getVisibleBoundaries()\n\treturn fmt.Sprintf(\"%d,%d-%d\/%d\", w.Cursor()+1, ymin+1, ymax+1, w.Songlist().Len())\n}\n\n\/\/ PositionShortReadout returns a percentage indicator on how far the songlist is scrolled.\nfunc (w *SonglistWidget) PositionShortReadout() string {\n\tymin, ymax := w.getVisibleBoundaries()\n\tif ymin == 0 && ymax+1 == w.Songlist().Len() {\n\t\treturn `All`\n\t}\n\tif ymin == 0 {\n\t\treturn `Top`\n\t}\n\tif ymax+1 == w.Songlist().Len() {\n\t\treturn `Bot`\n\t}\n\tfraction := float64(float64(ymin) \/ float64(w.Songlist().Len()))\n\tpercent := int(math.Floor(fraction * 100))\n\treturn fmt.Sprintf(\"%2d%%\", percent)\n}\n\n\/\/\n\nfunc (w *SonglistWidget) AddSonglist(s songlist.Songlist) {\n\tlist := newList(s)\n\tw.lists = append(w.lists, list)\n\tconsole.Log(\"Songlist UI: added songlist index %d of type %T at address %p\", len(w.lists)-1, s, s)\n}\n\n\/\/ ReplaceSonglist replaces an existing songlist with its new version. Checking\n\/\/ is done on a type-level, so only the queue and library will be replaced.\nfunc (w *SonglistWidget) ReplaceSonglist(s songlist.Songlist) {\n\tfor i := range w.lists {\n\t\tif reflect.TypeOf(w.lists[i].songlist) != reflect.TypeOf(s) {\n\t\t\tcontinue\n\t\t}\n\t\tconsole.Log(\"Songlist UI: replacing songlist of type %T at %p with new list at %p\", s, w.lists[i].songlist, s)\n\t\tconsole.Log(\"Songlist UI: comparing %p %p\", w.lists[i], w.currentList)\n\n\t\tactive := w.lists[i] == w.currentList\n\t\tw.lists[i] = newList(s)\n\n\t\tif active {\n\t\t\tconsole.Log(\"Songlist UI: replaced songlist is currently active, switching to new songlist.\")\n\t\t\tw.SetSonglist(s)\n\t\t}\n\t\treturn\n\t}\n\n\tconsole.Log(\"Songlist UI: adding songlist of type %T at address %p since no similar exists\", s, s)\n\tw.AddSonglist(s)\n}\n\nfunc (w *SonglistWidget) SetSonglist(s songlist.Songlist) {\n\tconsole.Log(\"SetSonglist(%T %p)\", s, s)\n\tw.currentListIndex = -1\n\tfor i, stored := range w.lists {\n\t\tif stored.songlist == s {\n\t\t\tw.SetSonglistIndex(i)\n\t\t\treturn\n\t\t}\n\t}\n\tw.activateList(newList(s))\n}\n\n\/\/ SetFallbackSonglist sets a songlist that should be reverted to in case a search result returns zero results.\nfunc (w *SonglistWidget) SetFallbackSonglist(s songlist.Songlist) {\n\tconsole.Log(\"SetFallbackSonglist(%T %p)\", s, s)\n\tw.fallbackSonglist = s\n}\n\nfunc (w *SonglistWidget) FallbackSonglist() songlist.Songlist {\n\treturn w.fallbackSonglist\n}\n\nfunc (w *SonglistWidget) activateList(s *list) {\n\tconsole.Log(\"activateList(%T %p)\", s.songlist, s.songlist)\n\tw.currentList = s\n\tif len(w.currentList.columns) == 0 {\n\t\tw.SetColumns(strings.Split(w.options.StringValue(\"columns\"), \",\")) \/\/ FIXME\n\t}\n\tw.setViewportSize()\n\t\/\/console.Log(\"Calling MakeVisible(%d), MakeVisible(%d)\", w.currentList.ymax, w.currentList.ymin)\n\tw.viewport.MakeVisible(0, w.currentList.ymax)\n\tw.viewport.MakeVisible(0, w.currentList.ymin)\n\tw.validateViewport()\n\tPostEventListChanged(w)\n}\n\nfunc (w *SonglistWidget) SonglistIndex() int {\n\treturn w.currentListIndex\n}\n\nfunc (w *SonglistWidget) ValidSonglistIndex(i int) bool {\n\treturn i >= 0 && i < w.SonglistsLen()\n}\n\nfunc (w *SonglistWidget) SetSonglistIndex(i int) error {\n\tconsole.Log(\"SetSonglistIndex(%d)\", i)\n\tif !w.ValidSonglistIndex(i) {\n\t\treturn fmt.Errorf(\"Index %d is out of bounds (try between 1 and %d)\", i+1, w.SonglistsLen())\n\t}\n\tw.currentListIndex = i\n\tw.activateList(w.lists[w.SonglistIndex()])\n\tw.SetFallbackSonglist(w.currentList.songlist)\n\treturn nil\n}\n<commit_msg>Retain cursor position when replacing existing songlists<commit_after>package widgets\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com\/ambientsound\/pms\/console\"\n\t\"github.com\/ambientsound\/pms\/options\"\n\t\"github.com\/ambientsound\/pms\/song\"\n\t\"github.com\/ambientsound\/pms\/songlist\"\n\n\t\"github.com\/gdamore\/tcell\"\n\t\"github.com\/gdamore\/tcell\/views\"\n)\n\ntype list struct {\n\tsonglist songlist.Songlist\n\tcursor int\n\tcolumns columns\n\tymin int\n\tymax int\n}\n\ntype SonglistWidget struct {\n\tcurrentListIndex int\n\tcurrentList *list\n\tlists []*list\n\tfallbackSonglist songlist.Songlist\n\tcurrentSong song.Song\n\tview views.View\n\tviewport views.ViewPort\n\toptions *options.Options\n\n\twidget\n\tviews.WidgetWatchers\n}\n\nfunc newList(s songlist.Songlist) *list {\n\tl := &list{}\n\tl.songlist = s\n\tl.columns = make(columns, 0)\n\treturn l\n}\n\nfunc min(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc max(a, b int) int {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc NewSonglistWidget(o *options.Options) (w *SonglistWidget) {\n\tw = &SonglistWidget{}\n\tw.options = o\n\tw.currentList = newList(songlist.New())\n\treturn\n}\n\nfunc (w *SonglistWidget) Songlist() songlist.Songlist {\n\treturn w.currentList.songlist\n}\n\nfunc (w *SonglistWidget) AutoSetColumnWidths() {\n\tfor i := range w.currentList.columns {\n\t\tw.currentList.columns[i].SetWidth(w.currentList.columns[i].Mid())\n\t}\n\tw.expandColumns()\n}\n\nfunc (w *SonglistWidget) SetColumns(cols []string) {\n\tch := make(chan int, len(w.currentList.columns))\n\tw.currentList.columns = make(columns, len(cols))\n\n\tfor i := range w.currentList.columns {\n\t\tgo func(i int) {\n\t\t\tw.currentList.columns[i].Tag = cols[i]\n\t\t\tw.currentList.columns[i].Set(w.currentList.songlist)\n\t\t\tch <- 0\n\t\t}(i)\n\t}\n\tfor i := 0; i < len(w.currentList.columns); i++ {\n\t\t<-ch\n\t}\n\tw.AutoSetColumnWidths()\n\tPostEventListChanged(w)\n}\n\nfunc (w *SonglistWidget) expandColumns() {\n\tif len(w.currentList.columns) == 0 {\n\t\treturn\n\t}\n\t_, _, xmax, _ := w.viewport.GetVisible()\n\ttotalWidth := 0\n\tpoolSize := len(w.currentList.columns)\n\tsaturation := make([]bool, poolSize)\n\tfor i := range w.currentList.columns {\n\t\ttotalWidth += w.currentList.columns[i].Width()\n\t}\n\tfor {\n\t\tfor i := range w.currentList.columns {\n\t\t\tif totalWidth > xmax {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif poolSize > 0 && saturation[i] {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcol := &w.currentList.columns[i]\n\t\t\tif poolSize > 0 && col.Width() > col.MaxWidth() {\n\t\t\t\tsaturation[i] = true\n\t\t\t\tpoolSize--\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcol.SetWidth(col.Width() + 1)\n\t\t\ttotalWidth++\n\t\t}\n\t}\n}\n\nfunc (w *SonglistWidget) Draw() {\n\tlist := w.Songlist()\n\tif w.view == nil || list == nil || list.Songs == nil {\n\t\treturn\n\t}\n\n\tymin, ymax := w.getVisibleBoundaries()\n\tstyle := w.Style(\"default\")\n\tlineStyled := false\n\tcursor := false\n\n\tfor y := ymin; y <= ymax; y++ {\n\n\t\ts := list.Song(y)\n\n\t\t\/\/ Style based on song's role\n\t\tcursor = y == w.currentList.cursor\n\t\tswitch {\n\t\tcase cursor:\n\t\t\tstyle = w.Style(\"cursor\")\n\t\t\tlineStyled = true\n\t\tcase w.IndexAtCurrentSong(y):\n\t\t\tstyle = w.Style(\"currentSong\")\n\t\t\tlineStyled = true\n\t\tdefault:\n\t\t\tstyle = w.Style(\"default\")\n\t\t\tlineStyled = false\n\t\t}\n\n\t\tx := 0\n\t\trightPadding := 1\n\n\t\t\/\/ Draw each column separately\n\t\tfor col := 0; col < len(w.currentList.columns); col++ {\n\n\t\t\t\/\/ Convert tag to runes\n\t\t\tkey := w.currentList.columns[col].Tag\n\t\t\tstr := s.Tags[key]\n\t\t\tif !lineStyled {\n\t\t\t\tstyle = w.Style(key)\n\t\t\t}\n\t\t\trunes := []rune(str)\n\n\t\t\tif col+1 == len(w.currentList.columns) {\n\t\t\t\trightPadding = 0\n\t\t\t}\n\t\t\tstrmin := min(len(runes), w.currentList.columns[col].Width()-rightPadding)\n\t\t\tstrmax := w.currentList.columns[col].Width()\n\t\t\tn := 0\n\t\t\tfor n < strmin {\n\t\t\t\tw.viewport.SetContent(x, y, runes[n], nil, style)\n\t\t\t\tn++\n\t\t\t\tx++\n\t\t\t}\n\t\t\tfor n < strmax {\n\t\t\t\tw.viewport.SetContent(x, y, ' ', nil, style)\n\t\t\t\tn++\n\t\t\t\tx++\n\t\t\t}\n\t\t}\n\t}\n\tw.PostEventWidgetContent(w)\n\tPostEventScroll(w)\n}\n\nfunc (w *SonglistWidget) getVisibleBoundaries() (ymin, ymax int) {\n\t_, ymin, _, ymax = w.viewport.GetVisible()\n\treturn\n}\n\nfunc (w *SonglistWidget) getBoundaries() (ymin, ymax int) {\n\treturn 0, w.Songlist().Len() - 1\n}\n\nfunc (w *SonglistWidget) setViewportSize() {\n\tx, y := w.Size()\n\tw.viewport.Resize(0, 0, -1, -1)\n\tw.viewport.SetContentSize(x, w.Songlist().Len(), true)\n\tw.viewport.SetSize(x, min(y, w.Songlist().Len()))\n}\n\nfunc (w *SonglistWidget) validateViewport() {\n\tw.validateCursorVisible()\n\tw.AutoSetColumnWidths()\n\tw.PostEventWidgetContent(w)\n}\n\nfunc (w *SonglistWidget) MoveCursorUp(i int) {\n\tw.MoveCursor(-i)\n}\n\nfunc (w *SonglistWidget) MoveCursorDown(i int) {\n\tw.MoveCursor(i)\n}\n\nfunc (w *SonglistWidget) MoveCursor(i int) {\n\tw.SetCursor(w.currentList.cursor + i)\n}\n\nfunc (w *SonglistWidget) SetCursor(i int) {\n\tw.currentList.cursor = i\n\tw.validateCursorList()\n\tw.viewport.MakeVisible(0, w.currentList.cursor)\n\tw.validateCursorVisible()\n\tw.PostEventWidgetContent(w)\n}\n\nfunc (w *SonglistWidget) Cursor() int {\n\treturn w.currentList.cursor\n}\n\nfunc (w *SonglistWidget) CursorSong() *song.Song {\n\treturn w.Songlist().Song(w.currentList.cursor)\n}\n\nfunc (w *SonglistWidget) SetCurrentSong(s *song.Song) {\n\tif s != nil {\n\t\tw.currentSong = *s\n\t} else {\n\t\tw.currentSong = song.Song{}\n\t}\n}\n\nfunc (w *SonglistWidget) IndexAtCurrentSong(i int) bool {\n\ts := w.Songlist().Song(i)\n\tif s == nil {\n\t\treturn false\n\t}\n\tif songlist.IsQueue(w.Songlist()) {\n\t\treturn s.ID == w.currentSong.ID\n\t} else {\n\t\treturn s.TagString(\"file\") == w.currentSong.TagString(\"file\")\n\t}\n}\n\n\/\/ validateCursorVisible makes sure the cursor stays within the visible area of the viewport.\nfunc (w *SonglistWidget) validateCursorVisible() {\n\tw.currentList.ymin, w.currentList.ymax = w.getVisibleBoundaries()\n\tw.validateCursor(w.currentList.ymin, w.currentList.ymax)\n}\n\n\/\/ validateCursorList makes sure the cursor stays within songlist boundaries.\nfunc (w *SonglistWidget) validateCursorList() {\n\tymin, ymax := w.getBoundaries()\n\tw.validateCursor(ymin, ymax)\n}\n\n\/\/ validateCursor adjusts the cursor based on minimum and maximum boundaries.\nfunc (w *SonglistWidget) validateCursor(ymin, ymax int) {\n\tif w.currentList.cursor < ymin {\n\t\tw.currentList.cursor = ymin\n\t}\n\tif w.currentList.cursor > ymax {\n\t\tw.currentList.cursor = ymax\n\t}\n}\n\nfunc (w *SonglistWidget) Resize() {\n\tw.setViewportSize()\n\tw.validateViewport()\n\tw.PostEventWidgetResize(w)\n}\n\nfunc (m *SonglistWidget) HandleEvent(ev tcell.Event) bool {\n\treturn false\n}\n\nfunc (w *SonglistWidget) SetView(v views.View) {\n\tw.view = v\n\tw.viewport.SetView(w.view)\n}\n\nfunc (w *SonglistWidget) Size() (int, int) {\n\treturn w.view.Size()\n}\n\nfunc (w *SonglistWidget) Name() string {\n\treturn w.Songlist().Name()\n}\n\nfunc (w *SonglistWidget) Columns() []column {\n\treturn w.currentList.columns\n}\n\nfunc (w *SonglistWidget) Len() int {\n\treturn w.Songlist().Len()\n}\n\nfunc (w *SonglistWidget) SonglistsLen() int {\n\treturn len(w.lists)\n}\n\n\/\/ PositionReadout returns a combination of PositionLongReadout() and PositionShortReadout().\nfunc (w *SonglistWidget) PositionReadout() string {\n\treturn fmt.Sprintf(\"%s %s\", w.PositionLongReadout(), w.PositionShortReadout())\n}\n\n\/\/ PositionLongReadout returns a formatted string containing the visible song\n\/\/ range as well as the total number of songs.\nfunc (w *SonglistWidget) PositionLongReadout() string {\n\tymin, ymax := w.getVisibleBoundaries()\n\treturn fmt.Sprintf(\"%d,%d-%d\/%d\", w.Cursor()+1, ymin+1, ymax+1, w.Songlist().Len())\n}\n\n\/\/ PositionShortReadout returns a percentage indicator on how far the songlist is scrolled.\nfunc (w *SonglistWidget) PositionShortReadout() string {\n\tymin, ymax := w.getVisibleBoundaries()\n\tif ymin == 0 && ymax+1 == w.Songlist().Len() {\n\t\treturn `All`\n\t}\n\tif ymin == 0 {\n\t\treturn `Top`\n\t}\n\tif ymax+1 == w.Songlist().Len() {\n\t\treturn `Bot`\n\t}\n\tfraction := float64(float64(ymin) \/ float64(w.Songlist().Len()))\n\tpercent := int(math.Floor(fraction * 100))\n\treturn fmt.Sprintf(\"%2d%%\", percent)\n}\n\n\/\/\n\nfunc (w *SonglistWidget) AddSonglist(s songlist.Songlist) {\n\tlist := newList(s)\n\tw.lists = append(w.lists, list)\n\tconsole.Log(\"Songlist UI: added songlist index %d of type %T at address %p\", len(w.lists)-1, s, s)\n}\n\n\/\/ ReplaceSonglist replaces an existing songlist with its new version. Checking\n\/\/ is done on a type-level, so only the queue and library will be replaced.\nfunc (w *SonglistWidget) ReplaceSonglist(s songlist.Songlist) {\n\tfor i := range w.lists {\n\t\tif reflect.TypeOf(w.lists[i].songlist) != reflect.TypeOf(s) {\n\t\t\tcontinue\n\t\t}\n\t\tconsole.Log(\"Songlist UI: replacing songlist of type %T at %p with new list at %p\", s, w.lists[i].songlist, s)\n\t\tconsole.Log(\"Songlist UI: comparing %p %p\", w.lists[i], w.currentList)\n\n\t\tactive := w.lists[i] == w.currentList\n\t\tw.lists[i].songlist = s\n\n\t\tif active {\n\t\t\tconsole.Log(\"Songlist UI: replaced songlist is currently active, switching to new songlist.\")\n\t\t\tw.SetSonglist(s)\n\t\t}\n\t\treturn\n\t}\n\n\tconsole.Log(\"Songlist UI: adding songlist of type %T at address %p since no similar exists\", s, s)\n\tw.AddSonglist(s)\n}\n\nfunc (w *SonglistWidget) SetSonglist(s songlist.Songlist) {\n\tconsole.Log(\"SetSonglist(%T %p)\", s, s)\n\tw.currentListIndex = -1\n\tfor i, stored := range w.lists {\n\t\tif stored.songlist == s {\n\t\t\tw.SetSonglistIndex(i)\n\t\t\treturn\n\t\t}\n\t}\n\tw.activateList(newList(s))\n}\n\n\/\/ SetFallbackSonglist sets a songlist that should be reverted to in case a search result returns zero results.\nfunc (w *SonglistWidget) SetFallbackSonglist(s songlist.Songlist) {\n\tconsole.Log(\"SetFallbackSonglist(%T %p)\", s, s)\n\tw.fallbackSonglist = s\n}\n\nfunc (w *SonglistWidget) FallbackSonglist() songlist.Songlist {\n\treturn w.fallbackSonglist\n}\n\nfunc (w *SonglistWidget) activateList(s *list) {\n\tconsole.Log(\"activateList(%T %p)\", s.songlist, s.songlist)\n\tw.currentList = s\n\tif len(w.currentList.columns) == 0 {\n\t\tw.SetColumns(strings.Split(w.options.StringValue(\"columns\"), \",\")) \/\/ FIXME\n\t}\n\tw.setViewportSize()\n\t\/\/console.Log(\"Calling MakeVisible(%d), MakeVisible(%d)\", w.currentList.ymax, w.currentList.ymin)\n\tw.viewport.MakeVisible(0, w.currentList.ymax)\n\tw.viewport.MakeVisible(0, w.currentList.ymin)\n\tw.validateViewport()\n\tPostEventListChanged(w)\n}\n\nfunc (w *SonglistWidget) SonglistIndex() int {\n\treturn w.currentListIndex\n}\n\nfunc (w *SonglistWidget) ValidSonglistIndex(i int) bool {\n\treturn i >= 0 && i < w.SonglistsLen()\n}\n\nfunc (w *SonglistWidget) SetSonglistIndex(i int) error {\n\tconsole.Log(\"SetSonglistIndex(%d)\", i)\n\tif !w.ValidSonglistIndex(i) {\n\t\treturn fmt.Errorf(\"Index %d is out of bounds (try between 1 and %d)\", i+1, w.SonglistsLen())\n\t}\n\tw.currentListIndex = i\n\tw.activateList(w.lists[w.SonglistIndex()])\n\tw.SetFallbackSonglist(w.currentList.songlist)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/github.com\/jackc\/pgx\"\n\t\"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/golang.org\/x\/net\/context\"\n\t\"github.com\/flynn\/flynn\/router\/types\"\n)\n\nvar ErrNotFound = errors.New(\"router: route not found\")\n\ntype DataStore interface {\n\tAdd(route *router.Route) error\n\tUpdate(route *router.Route) error\n\tGet(id string) (*router.Route, error)\n\tList() ([]*router.Route, error)\n\tRemove(id string) error\n\tSync(ctx context.Context, h SyncHandler, startc chan<- struct{}) error\n}\n\ntype DataStoreReader interface {\n\tGet(id string) (*router.Route, error)\n\tList() ([]*router.Route, error)\n}\n\ntype SyncHandler interface {\n\tSet(route *router.Route) error\n\tRemove(id string) error\n}\n\ntype pgDataStore struct {\n\tpgx *pgx.ConnPool\n\n\trouteType string\n\ttableName string\n}\n\nconst (\n\trouteTypeHTTP = \"http\"\n\trouteTypeTCP = \"tcp\"\n\ttableNameHTTP = \"http_routes\"\n\ttableNameTCP = \"tcp_routes\"\n)\n\n\/\/ NewPostgresDataStore returns a DataStore that stores route information in a\n\/\/ Postgres database. It uses pg_notify and a listener connection to watch for\n\/\/ route changes.\nfunc NewPostgresDataStore(routeType string, pgx *pgx.ConnPool) *pgDataStore {\n\ttableName := \"\"\n\tswitch routeType {\n\tcase routeTypeHTTP:\n\t\ttableName = tableNameHTTP\n\tcase routeTypeTCP:\n\t\ttableName = tableNameTCP\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown routeType: %q\", routeType))\n\t}\n\treturn &pgDataStore{\n\t\tpgx: pgx,\n\t\trouteType: routeType,\n\t\ttableName: tableName,\n\t}\n}\n\nconst sqlAddRouteHTTP = `\nINSERT INTO ` + tableNameHTTP + ` (parent_ref, service, domain, tls_cert, tls_key, sticky)\n\tVALUES ($1, $2, $3, $4, $5, $6)\n\tRETURNING id, created_at, updated_at`\n\nconst sqlAddRouteTCP = `\nINSERT INTO ` + tableNameTCP + ` (parent_ref, service, port)\n\tVALUES ($1, $2, $3)\n\tRETURNING id, created_at, updated_at`\n\nfunc (d *pgDataStore) Add(r *router.Route) (err error) {\n\tswitch d.tableName {\n\tcase tableNameHTTP:\n\t\terr = d.pgx.QueryRow(\n\t\t\tsqlAddRouteHTTP,\n\t\t\tr.ParentRef,\n\t\t\tr.Service,\n\t\t\tr.Domain,\n\t\t\tr.TLSCert,\n\t\t\tr.TLSKey,\n\t\t\tr.Sticky,\n\t\t).Scan(&r.ID, &r.CreatedAt, &r.UpdatedAt)\n\tcase tableNameTCP:\n\t\terr = d.pgx.QueryRow(\n\t\t\tsqlAddRouteTCP,\n\t\t\tr.ParentRef,\n\t\t\tr.Service,\n\t\t\tr.Port,\n\t\t).Scan(&r.ID, &r.CreatedAt, &r.UpdatedAt)\n\t}\n\tr.Type = d.routeType\n\treturn err\n}\n\nconst sqlUpdateRouteHTTP = `\nUPDATE ` + tableNameHTTP + ` SET parent_ref = $1, service = $2, tls_cert = $3, tls_key = $4, sticky = $5\n\tWHERE id = $6 AND domain = $7 AND deleted_at IS NULL\n\tRETURNING %s`\n\nconst sqlUpdateRouteTCP = `\nUPDATE ` + tableNameTCP + ` SET parent_ref = $1, service = $2\n\tWHERE id = $3 AND port = $4 AND deleted_at IS NULL\n\tRETURNING %s`\n\nfunc (d *pgDataStore) Update(r *router.Route) error {\n\tvar row *pgx.Row\n\n\tswitch d.tableName {\n\tcase tableNameHTTP:\n\t\trow = d.pgx.QueryRow(\n\t\t\tfmt.Sprintf(sqlUpdateRouteHTTP, d.columnNames()),\n\t\t\tr.ParentRef,\n\t\t\tr.Service,\n\t\t\tr.TLSCert,\n\t\t\tr.TLSKey,\n\t\t\tr.Sticky,\n\t\t\tr.ID,\n\t\t\tr.Domain,\n\t\t)\n\tcase tableNameTCP:\n\t\trow = d.pgx.QueryRow(\n\t\t\tfmt.Sprintf(sqlUpdateRouteTCP, d.columnNames()),\n\t\t\tr.ParentRef,\n\t\t\tr.Service,\n\t\t\tr.ID,\n\t\t\tr.Port,\n\t\t)\n\t}\n\terr := d.scanRoute(r, row)\n\tif err == pgx.ErrNoRows {\n\t\treturn ErrNotFound\n\t}\n\treturn err\n}\n\nconst sqlRemoveRoute = `UPDATE %s SET deleted_at = now() WHERE id = $1`\n\nfunc (d *pgDataStore) Remove(id string) error {\n\t_, err := d.pgx.Exec(fmt.Sprintf(sqlRemoveRoute, d.tableName), id)\n\treturn err\n}\n\nconst sqlGetRoute = `SELECT %s FROM %s WHERE id = $1 AND deleted_at IS NULL`\n\nfunc (d *pgDataStore) Get(id string) (*router.Route, error) {\n\tif id == \"\" {\n\t\treturn nil, ErrNotFound\n\t}\n\n\tquery := fmt.Sprintf(sqlGetRoute, d.columnNames(), d.tableName)\n\trow := d.pgx.QueryRow(query, id)\n\n\tr := &router.Route{}\n\terr := d.scanRoute(r, row)\n\tif err == pgx.ErrNoRows {\n\t\treturn nil, ErrNotFound\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r, nil\n}\n\nconst sqlListRoutes = `SELECT %s FROM %s WHERE deleted_at IS NULL`\n\nfunc (d *pgDataStore) List() ([]*router.Route, error) {\n\tquery := fmt.Sprintf(sqlListRoutes, d.columnNames(), d.tableName)\n\trows, err := d.pgx.Query(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\troutes := []*router.Route{}\n\tfor rows.Next() {\n\t\tr := &router.Route{}\n\t\terr := d.scanRoute(r, rows)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tr.Type = d.routeType\n\t\troutes = append(routes, r)\n\t}\n\treturn routes, rows.Err()\n}\n\nfunc (d *pgDataStore) Sync(ctx context.Context, h SyncHandler, startc chan<- struct{}) error {\n\tctx, cancel := context.WithCancel(ctx)\n\n\tidc, errc, err := d.startListener(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinitialRoutes, err := d.List()\n\tif err != nil {\n\t\tcancel()\n\t\treturn err\n\t}\n\n\tfor _, route := range initialRoutes {\n\t\tif err := h.Set(route); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tclose(startc)\n\n\tfor {\n\t\tselect {\n\t\tcase id := <-idc:\n\t\t\tif err := d.handleUpdate(h, id); err != nil {\n\t\t\t\tcancel()\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase err = <-errc:\n\t\t\treturn err\n\t\tcase <-ctx.Done():\n\t\t\t<-idc\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (d *pgDataStore) handleUpdate(h SyncHandler, id string) error {\n\troute, err := d.Get(id)\n\tif err == ErrNotFound {\n\t\tif err = h.Remove(id); err != nil && err != ErrNotFound {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn h.Set(route)\n}\n\nfunc (d *pgDataStore) startListener(ctx context.Context) (<-chan string, <-chan error, error) {\n\tidc := make(chan string)\n\terrc := make(chan error)\n\n\tconn, err := d.pgx.Acquire()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif err = conn.Listen(d.tableName); err != nil {\n\t\td.pgx.Release(conn)\n\t\treturn nil, nil, err\n\t}\n\n\tgo func() {\n\t\tdefer unlistenAndRelease(d.pgx, conn, d.tableName)\n\t\tdefer close(idc)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\tnotification, err := conn.WaitForNotification(time.Second)\n\t\t\tif err == pgx.ErrNotificationTimeout {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\terrc <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tidc <- notification.Payload\n\t\t}\n\t}()\n\n\treturn idc, errc, nil\n}\n\nconst (\n\tselectColumnsHTTP = \"id, parent_ref, service, domain, sticky, tls_cert, tls_key, created_at, updated_at\"\n\tselectColumnsTCP = \"id, parent_ref, service, port, created_at, updated_at\"\n)\n\nfunc (d *pgDataStore) columnNames() string {\n\tswitch d.routeType {\n\tcase routeTypeHTTP:\n\t\treturn selectColumnsHTTP\n\tcase routeTypeTCP:\n\t\treturn selectColumnsTCP\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown routeType: %q\", d.routeType))\n\t}\n}\n\ntype scannable interface {\n\tScan(dest ...interface{}) (err error)\n}\n\nfunc (d *pgDataStore) scanRoute(route *router.Route, s scannable) error {\n\troute.Type = d.routeType\n\tswitch d.tableName {\n\tcase tableNameHTTP:\n\t\treturn s.Scan(\n\t\t\t&route.ID,\n\t\t\t&route.ParentRef,\n\t\t\t&route.Service,\n\t\t\t&route.Domain,\n\t\t\t&route.Sticky,\n\t\t\t&route.TLSCert,\n\t\t\t&route.TLSKey,\n\t\t\t&route.CreatedAt,\n\t\t\t&route.UpdatedAt,\n\t\t)\n\tcase tableNameTCP:\n\t\treturn s.Scan(\n\t\t\t&route.ID,\n\t\t\t&route.ParentRef,\n\t\t\t&route.Service,\n\t\t\t&route.Port,\n\t\t\t&route.CreatedAt,\n\t\t\t&route.UpdatedAt,\n\t\t)\n\t}\n\tpanic(\"unknown tableName: \" + d.tableName)\n}\n\nconst sqlUnlisten = `UNLISTEN %s`\n\nfunc unlistenAndRelease(pool *pgx.ConnPool, conn *pgx.Conn, channel string) {\n\tif _, err := conn.Exec(fmt.Sprintf(sqlUnlisten, channel)); err != nil {\n\t\tconn.Close()\n\t}\n\tpool.Release(conn)\n}\n<commit_msg>router: Fix waiting for pg startListener to stop<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/github.com\/jackc\/pgx\"\n\t\"github.com\/flynn\/flynn\/Godeps\/_workspace\/src\/golang.org\/x\/net\/context\"\n\t\"github.com\/flynn\/flynn\/router\/types\"\n)\n\nvar ErrNotFound = errors.New(\"router: route not found\")\n\ntype DataStore interface {\n\tAdd(route *router.Route) error\n\tUpdate(route *router.Route) error\n\tGet(id string) (*router.Route, error)\n\tList() ([]*router.Route, error)\n\tRemove(id string) error\n\tSync(ctx context.Context, h SyncHandler, startc chan<- struct{}) error\n}\n\ntype DataStoreReader interface {\n\tGet(id string) (*router.Route, error)\n\tList() ([]*router.Route, error)\n}\n\ntype SyncHandler interface {\n\tSet(route *router.Route) error\n\tRemove(id string) error\n}\n\ntype pgDataStore struct {\n\tpgx *pgx.ConnPool\n\n\trouteType string\n\ttableName string\n}\n\nconst (\n\trouteTypeHTTP = \"http\"\n\trouteTypeTCP = \"tcp\"\n\ttableNameHTTP = \"http_routes\"\n\ttableNameTCP = \"tcp_routes\"\n)\n\n\/\/ NewPostgresDataStore returns a DataStore that stores route information in a\n\/\/ Postgres database. It uses pg_notify and a listener connection to watch for\n\/\/ route changes.\nfunc NewPostgresDataStore(routeType string, pgx *pgx.ConnPool) *pgDataStore {\n\ttableName := \"\"\n\tswitch routeType {\n\tcase routeTypeHTTP:\n\t\ttableName = tableNameHTTP\n\tcase routeTypeTCP:\n\t\ttableName = tableNameTCP\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown routeType: %q\", routeType))\n\t}\n\treturn &pgDataStore{\n\t\tpgx: pgx,\n\t\trouteType: routeType,\n\t\ttableName: tableName,\n\t}\n}\n\nconst sqlAddRouteHTTP = `\nINSERT INTO ` + tableNameHTTP + ` (parent_ref, service, domain, tls_cert, tls_key, sticky)\n\tVALUES ($1, $2, $3, $4, $5, $6)\n\tRETURNING id, created_at, updated_at`\n\nconst sqlAddRouteTCP = `\nINSERT INTO ` + tableNameTCP + ` (parent_ref, service, port)\n\tVALUES ($1, $2, $3)\n\tRETURNING id, created_at, updated_at`\n\nfunc (d *pgDataStore) Add(r *router.Route) (err error) {\n\tswitch d.tableName {\n\tcase tableNameHTTP:\n\t\terr = d.pgx.QueryRow(\n\t\t\tsqlAddRouteHTTP,\n\t\t\tr.ParentRef,\n\t\t\tr.Service,\n\t\t\tr.Domain,\n\t\t\tr.TLSCert,\n\t\t\tr.TLSKey,\n\t\t\tr.Sticky,\n\t\t).Scan(&r.ID, &r.CreatedAt, &r.UpdatedAt)\n\tcase tableNameTCP:\n\t\terr = d.pgx.QueryRow(\n\t\t\tsqlAddRouteTCP,\n\t\t\tr.ParentRef,\n\t\t\tr.Service,\n\t\t\tr.Port,\n\t\t).Scan(&r.ID, &r.CreatedAt, &r.UpdatedAt)\n\t}\n\tr.Type = d.routeType\n\treturn err\n}\n\nconst sqlUpdateRouteHTTP = `\nUPDATE ` + tableNameHTTP + ` SET parent_ref = $1, service = $2, tls_cert = $3, tls_key = $4, sticky = $5\n\tWHERE id = $6 AND domain = $7 AND deleted_at IS NULL\n\tRETURNING %s`\n\nconst sqlUpdateRouteTCP = `\nUPDATE ` + tableNameTCP + ` SET parent_ref = $1, service = $2\n\tWHERE id = $3 AND port = $4 AND deleted_at IS NULL\n\tRETURNING %s`\n\nfunc (d *pgDataStore) Update(r *router.Route) error {\n\tvar row *pgx.Row\n\n\tswitch d.tableName {\n\tcase tableNameHTTP:\n\t\trow = d.pgx.QueryRow(\n\t\t\tfmt.Sprintf(sqlUpdateRouteHTTP, d.columnNames()),\n\t\t\tr.ParentRef,\n\t\t\tr.Service,\n\t\t\tr.TLSCert,\n\t\t\tr.TLSKey,\n\t\t\tr.Sticky,\n\t\t\tr.ID,\n\t\t\tr.Domain,\n\t\t)\n\tcase tableNameTCP:\n\t\trow = d.pgx.QueryRow(\n\t\t\tfmt.Sprintf(sqlUpdateRouteTCP, d.columnNames()),\n\t\t\tr.ParentRef,\n\t\t\tr.Service,\n\t\t\tr.ID,\n\t\t\tr.Port,\n\t\t)\n\t}\n\terr := d.scanRoute(r, row)\n\tif err == pgx.ErrNoRows {\n\t\treturn ErrNotFound\n\t}\n\treturn err\n}\n\nconst sqlRemoveRoute = `UPDATE %s SET deleted_at = now() WHERE id = $1`\n\nfunc (d *pgDataStore) Remove(id string) error {\n\t_, err := d.pgx.Exec(fmt.Sprintf(sqlRemoveRoute, d.tableName), id)\n\treturn err\n}\n\nconst sqlGetRoute = `SELECT %s FROM %s WHERE id = $1 AND deleted_at IS NULL`\n\nfunc (d *pgDataStore) Get(id string) (*router.Route, error) {\n\tif id == \"\" {\n\t\treturn nil, ErrNotFound\n\t}\n\n\tquery := fmt.Sprintf(sqlGetRoute, d.columnNames(), d.tableName)\n\trow := d.pgx.QueryRow(query, id)\n\n\tr := &router.Route{}\n\terr := d.scanRoute(r, row)\n\tif err == pgx.ErrNoRows {\n\t\treturn nil, ErrNotFound\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r, nil\n}\n\nconst sqlListRoutes = `SELECT %s FROM %s WHERE deleted_at IS NULL`\n\nfunc (d *pgDataStore) List() ([]*router.Route, error) {\n\tquery := fmt.Sprintf(sqlListRoutes, d.columnNames(), d.tableName)\n\trows, err := d.pgx.Query(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\troutes := []*router.Route{}\n\tfor rows.Next() {\n\t\tr := &router.Route{}\n\t\terr := d.scanRoute(r, rows)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tr.Type = d.routeType\n\t\troutes = append(routes, r)\n\t}\n\treturn routes, rows.Err()\n}\n\nfunc (d *pgDataStore) Sync(ctx context.Context, h SyncHandler, startc chan<- struct{}) error {\n\tctx, cancel := context.WithCancel(ctx)\n\n\tidc, errc, err := d.startListener(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinitialRoutes, err := d.List()\n\tif err != nil {\n\t\tcancel()\n\t\treturn err\n\t}\n\n\tfor _, route := range initialRoutes {\n\t\tif err := h.Set(route); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tclose(startc)\n\n\tfor {\n\t\tselect {\n\t\tcase id := <-idc:\n\t\t\tif err := d.handleUpdate(h, id); err != nil {\n\t\t\t\tcancel()\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase err = <-errc:\n\t\t\treturn err\n\t\tcase <-ctx.Done():\n\t\t\t\/\/ wait for startListener to finish (it will either\n\t\t\t\/\/ close idc or send an error on errc)\n\t\t\tselect {\n\t\t\tcase <-idc:\n\t\t\tcase <-errc:\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (d *pgDataStore) handleUpdate(h SyncHandler, id string) error {\n\troute, err := d.Get(id)\n\tif err == ErrNotFound {\n\t\tif err = h.Remove(id); err != nil && err != ErrNotFound {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn h.Set(route)\n}\n\nfunc (d *pgDataStore) startListener(ctx context.Context) (<-chan string, <-chan error, error) {\n\tidc := make(chan string)\n\terrc := make(chan error)\n\n\tconn, err := d.pgx.Acquire()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif err = conn.Listen(d.tableName); err != nil {\n\t\td.pgx.Release(conn)\n\t\treturn nil, nil, err\n\t}\n\n\tgo func() {\n\t\tdefer unlistenAndRelease(d.pgx, conn, d.tableName)\n\t\tdefer close(idc)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\tnotification, err := conn.WaitForNotification(time.Second)\n\t\t\tif err == pgx.ErrNotificationTimeout {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\terrc <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tidc <- notification.Payload\n\t\t}\n\t}()\n\n\treturn idc, errc, nil\n}\n\nconst (\n\tselectColumnsHTTP = \"id, parent_ref, service, domain, sticky, tls_cert, tls_key, created_at, updated_at\"\n\tselectColumnsTCP = \"id, parent_ref, service, port, created_at, updated_at\"\n)\n\nfunc (d *pgDataStore) columnNames() string {\n\tswitch d.routeType {\n\tcase routeTypeHTTP:\n\t\treturn selectColumnsHTTP\n\tcase routeTypeTCP:\n\t\treturn selectColumnsTCP\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown routeType: %q\", d.routeType))\n\t}\n}\n\ntype scannable interface {\n\tScan(dest ...interface{}) (err error)\n}\n\nfunc (d *pgDataStore) scanRoute(route *router.Route, s scannable) error {\n\troute.Type = d.routeType\n\tswitch d.tableName {\n\tcase tableNameHTTP:\n\t\treturn s.Scan(\n\t\t\t&route.ID,\n\t\t\t&route.ParentRef,\n\t\t\t&route.Service,\n\t\t\t&route.Domain,\n\t\t\t&route.Sticky,\n\t\t\t&route.TLSCert,\n\t\t\t&route.TLSKey,\n\t\t\t&route.CreatedAt,\n\t\t\t&route.UpdatedAt,\n\t\t)\n\tcase tableNameTCP:\n\t\treturn s.Scan(\n\t\t\t&route.ID,\n\t\t\t&route.ParentRef,\n\t\t\t&route.Service,\n\t\t\t&route.Port,\n\t\t\t&route.CreatedAt,\n\t\t\t&route.UpdatedAt,\n\t\t)\n\t}\n\tpanic(\"unknown tableName: \" + d.tableName)\n}\n\nconst sqlUnlisten = `UNLISTEN %s`\n\nfunc unlistenAndRelease(pool *pgx.ConnPool, conn *pgx.Conn, channel string) {\n\tif _, err := conn.Exec(fmt.Sprintf(sqlUnlisten, channel)); err != nil {\n\t\tconn.Close()\n\t}\n\tpool.Release(conn)\n}\n<|endoftext|>"} {"text":"<commit_before>package routers\n\nimport (\n\t\"testing\"\n)\n\nfunc TestCreateTrie(t *testing.T) {\n\ttrie := CreateTrie()\n\tif trie.value != nil {\n\t\tt.Errorf(\"New trie's value is not nil: %v\", trie.value)\n\t}\n\tif len(trie.children) != 0 {\n\t\tt.Errorf(\"New trie's children is not empty: %v\", trie.children)\n\t}\n}\n\nfunc TestTrieInsert(t *testing.T) {\n\ttrie := CreateTrie()\n\ttrie.Insert(\"\/\", nil)\n\ttrie.Insert(\"\/foo\", 42)\n}<commit_msg>Some more trie testing<commit_after>package routers\n\nimport (\n\t\"testing\"\n)\n\nfunc TestCreateTrie(t *testing.T) {\n\ttrie := CreateTrie()\n\tif trie.value != nil {\n\t\tt.Errorf(\"New trie's value is not nil: %v\", trie.value)\n\t}\n\tif len(trie.children) != 0 {\n\t\tt.Errorf(\"New trie's children is not empty: %v\", trie.children)\n\t}\n}\n\nfunc TestTrieInsert(t *testing.T) {\n\ttrie := CreateTrie()\n\n\ttrie.Insert(\"\/\", \"pie\")\n\n\tif len(trie.children) != 1 {\n\t\tt.Errorf(\"Inserted more than 1 child into trie\")\n\t}\n\n\tif trie.children['\/'].value != \"pie\" {\n\t\tt.Errorf(\"Value inserted at root %v != %v\", trie.children[0].value, \"pie\")\n\t}\n\n\ttrie.Insert(\"\/abc\", 42)\n\n\tif len(trie.children) != 1 {\n\t\tt.Errorf(\"Inserted a new children with the same rune\")\n\t}\n\n\tnode := trie\n\n\tfor idx, r := range \"\/abc\" {\n\t\tif child, ok := node.children[r]; !ok {\n\t\t\tt.Errorf(\"Didn't insert rune %v (%v) into trie\", r, idx)\n\t\t} else {\n\t\t\tnode = child\n\t\t}\n\t}\n\n\tif node.value != 42 {\n\t\tt.Errorf(\"Inserted wrong value at tail of trie: %v != 42\", node.value)\n\t}\n\n\ttrie.Insert(\"\/bcd\", 3.14)\n\tnode = trie.children['\/']\n\n\tif len(node.children) != 2 {\n\t\tt.Errorf(\"Expected 2 children, got %v\", len(node.children))\n\t}\n}<|endoftext|>"} {"text":"<commit_before>package totp\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\tlog \"github.com\/mgutz\/logxi\/v1\"\n\n\t\"github.com\/hashicorp\/vault\/logical\"\n\t\"github.com\/hashicorp\/vault\/logical\/framework\"\n)\n\nfunc Factory(conf *logical.BackendConfig) (logical.Backend, error) {\n\treturn Backend(conf).Setup(conf)\n}\n\nfunc Backend(conf *logical.BackendConfig) *backend {\n\tvar b backend\n\tb.Backend = &framework.Backend{\n\t\tHelp: strings.TrimSpace(backendHelp),\n\n\t\tPaths: []*framework.Path{\n\t\t\tpathListRoles(&b),\n\t\t\tpathRoles(&b),\n\t\t\tpathRoleCreate(&b),\n\t\t},\n\n\t\tSecrets: []*framework.Secret{},\n\t}\n\n\tb.logger = conf.Logger\n\treturn &b\n}\n\ntype backend struct {\n\t*framework.Backend\n\n\tlogger log.Logger\n}\n\n\/\/ This needs to be updated\nconst backendHelp = `\nThe PostgreSQL backend dynamically generates database users.\n\nAfter mounting this backend, configure it using the endpoints within\nthe \"config\/\" path.\n`\n<commit_msg>Updated TOTP backend.go's structure and help string<commit_after>package totp\n\nimport (\n\t\"strings\"\n\n\tlog \"github.com\/mgutz\/logxi\/v1\"\n\n\t\"github.com\/hashicorp\/vault\/logical\"\n\t\"github.com\/hashicorp\/vault\/logical\/framework\"\n)\n\nfunc Factory(conf *logical.BackendConfig) (logical.Backend, error) {\n\treturn Backend(conf).Setup(conf)\n}\n\nfunc Backend(conf *logical.BackendConfig) *backend {\n\tvar b backend\n\tb.Backend = &framework.Backend{\n\t\tHelp: strings.TrimSpace(backendHelp),\n\n\t\tPaths: []*framework.Path{\n\t\t\tpathListRoles(&b),\n\t\t\tpathRoles(&b),\n\t\t\tpathRoleCreate(&b),\n\t\t},\n\n\t\tSecrets: []*framework.Secret{},\n\t}\n\n\tb.logger = conf.Logger\n\treturn &b\n}\n\ntype backend struct {\n\t*framework.Backend\n\tlogger log.Logger\n}\n\nconst backendHelp = `\nThe TOTP backend dynamically generates time-based one-time use passwords.\n`\n<|endoftext|>"} {"text":"<commit_before>package http\n\nimport (\n\t\"bytes\"\n\t\"github.com\/viant\/endly\"\n\t\"github.com\/viant\/endly\/model\"\n\t\"github.com\/viant\/endly\/udf\"\n\t\"github.com\/viant\/endly\/util\"\n\t\"github.com\/viant\/toolbox\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\n\/\/ServiceRequest represents an http request\ntype Request struct {\n\t*model.Repeater\n\tWhen string `description:\"criteria to send this request\"`\n\tMethod string `required:\"true\" description:\"HTTP Method\"`\n\tURL string\n\tHeader http.Header\n\tCookies Cookies\n\tBody string\n\tJSONBody interface{} `description:\"body JSON representation\"`\n\tReplace map[string]string `description:\"response body key value pair replacement\"`\n\tRequestUdf string `description:\"user defined function in context.state key, i,e, json to protobuf\"`\n\tResponseUdf string `description:\"user defined function in context.state key, i,e, protobuf to json\"`\n}\n\n\/\/Expand substitute request data with matching context map state.\nfunc (r *Request) Expand(context *endly.Context) *Request {\n\theader := make(map[string][]string)\n\tcopyExpandedHeaders(r.Header, header, context)\n\treturn &Request{\n\t\tWhen: context.Expand(r.When),\n\t\tMethod: r.Method,\n\t\tURL: context.Expand(r.URL),\n\t\tBody: context.Expand(r.Body),\n\t\tJSONBody: r.JSONBody,\n\t\tHeader: header,\n\t\tRepeater: r.Repeater,\n\t\tReplace: r.Replace,\n\t\tRequestUdf: r.RequestUdf,\n\t\tResponseUdf: r.ResponseUdf,\n\t}\n}\n\n\/\/Build builds an http.Request\nfunc (r *Request) Build(context *endly.Context, sessionCookies Cookies) (*http.Request, bool, error) {\n\n\tif r.Body == \"\" && r.JSONBody != nil {\n\t\tvar err error\n\t\tr.Body, err = toolbox.AsJSONText(r.JSONBody)\n\t\tif err != nil {\n\t\t\treturn nil, false, err\n\t\t}\n\t}\n\trequest := r.Expand(context)\n\tvar reader io.Reader\n\tvar expectBinary = false\n\tvar err error\n\tvar ok bool\n\tif len(r.Body) > 0 {\n\t\tbody := []byte(request.Body)\n\t\tif request.RequestUdf != \"\" {\n\t\t\ttransformed, err := udf.TransformWithUDF(context, request.RequestUdf, request.URL, string(body))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, false, err\n\t\t\t}\n\t\t\tif body, ok = transformed.([]byte); !ok {\n\t\t\t\tbody = []byte(toolbox.AsString(transformed))\n\t\t\t}\n\t\t}\n\t\texpectBinary = strings.HasPrefix(string(body), \"base64:\")\n\t\tbody, err = util.FromPayload(string(body))\n\t\tif err != nil {\n\t\t\treturn nil, expectBinary, err\n\t\t}\n\t\treader = bytes.NewReader(body)\n\t}\n\thttpRequest, err := http.NewRequest(strings.ToUpper(request.Method), request.URL, reader)\n\tif err != nil {\n\t\treturn nil, expectBinary, err\n\t}\n\n\tcopyHeaders(request.Header, httpRequest.Header)\n\t\/\/Set cookies from active session\n\tSetCookies(sessionCookies, request.Header)\n\t\/\/Set cookies from user http request\n\tSetCookies(request.Cookies, httpRequest.Header)\n\treturn httpRequest, expectBinary, nil\n}\n<commit_msg>added clone, and refactored expand<commit_after>package http\n\nimport (\n\t\"bytes\"\n\t\"github.com\/viant\/endly\"\n\t\"github.com\/viant\/endly\/model\"\n\t\"github.com\/viant\/endly\/udf\"\n\t\"github.com\/viant\/endly\/util\"\n\t\"github.com\/viant\/toolbox\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"github.com\/viant\/toolbox\/data\"\n)\n\n\/\/ServiceRequest represents an http request\ntype Request struct {\n\t*model.Repeater\n\tWhen string `description:\"criteria to send this request\"`\n\tMethod string `required:\"true\" description:\"HTTP Method\"`\n\tURL string\n\tHeader http.Header\n\tCookies Cookies\n\tBody string\n\tJSONBody interface{} `description:\"body JSON representation\"`\n\tReplace map[string]string `description:\"response body key value pair replacement\"`\n\tRequestUdf string `description:\"user defined function in context.state key, i,e, json to protobuf\"`\n\tResponseUdf string `description:\"user defined function in context.state key, i,e, protobuf to json\"`\n}\n\n\/\/Clone substitute request data with matching context map state.\nfunc (r *Request) Clone(context *endly.Context) *Request {\n\theader := make(map[string][]string)\n\tcopyExpandedHeaders(r.Header, header, context)\n\treturn &Request{\n\t\tWhen: context.Expand(r.When),\n\t\tMethod: r.Method,\n\t\tURL: context.Expand(r.URL),\n\t\tBody: context.Expand(r.Body),\n\t\tJSONBody: r.JSONBody,\n\t\tHeader: header,\n\t\tRepeater: r.Repeater,\n\t\tReplace: r.Replace,\n\t\tRequestUdf: r.RequestUdf,\n\t\tResponseUdf: r.ResponseUdf,\n\t}\n}\n\n\/\/Clone substitute request data with matching context map state.\nfunc (r *Request) Expand(state data.Map) {\n\tr.URL = state.ExpandAsText(r.URL)\n\tr.Body = state.ExpandAsText(r.Body)\n}\n\n\n\/\/Build builds an http.Request\nfunc (r *Request) Build(context *endly.Context, sessionCookies Cookies) (*http.Request, bool, error) {\n\n\tif r.Body == \"\" && r.JSONBody != nil {\n\t\tvar err error\n\t\tr.Body, err = toolbox.AsJSONText(r.JSONBody)\n\t\tif err != nil {\n\t\t\treturn nil, false, err\n\t\t}\n\t}\n\trequest := r.Clone(context)\n\n\tvar reader io.Reader\n\tvar expectBinary = false\n\tvar err error\n\tvar ok bool\n\tif len(r.Body) > 0 {\n\t\tbody := []byte(request.Body)\n\t\tif request.RequestUdf != \"\" {\n\t\t\ttransformed, err := udf.TransformWithUDF(context, request.RequestUdf, request.URL, string(body))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, false, err\n\t\t\t}\n\t\t\tif body, ok = transformed.([]byte); !ok {\n\t\t\t\tbody = []byte(toolbox.AsString(transformed))\n\t\t\t}\n\t\t}\n\t\texpectBinary = strings.HasPrefix(string(body), \"base64:\")\n\t\tbody, err = util.FromPayload(string(body))\n\t\tif err != nil {\n\t\t\treturn nil, expectBinary, err\n\t\t}\n\t\treader = bytes.NewReader(body)\n\t}\n\n\n\thttpRequest, err := http.NewRequest(strings.ToUpper(request.Method), request.URL, reader)\n\tif err != nil {\n\t\treturn nil, expectBinary, err\n\t}\n\n\tcopyHeaders(request.Header, httpRequest.Header)\n\t\/\/Set cookies from active session\n\tSetCookies(sessionCookies, request.Header)\n\t\/\/Set cookies from user http request\n\tSetCookies(request.Cookies, httpRequest.Header)\n\treturn httpRequest, expectBinary, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"go.pedge.io\/proto\/test\"\n\t\"go.pedge.io\/protolog\"\n\n\t\"github.com\/satori\/go.uuid\"\n\t\"go.pachyderm.com\/pachyderm\/src\/pfs\"\n\tpfstesting \"go.pachyderm.com\/pachyderm\/src\/pfs\/testing\"\n\t\"go.pachyderm.com\/pachyderm\/src\/pkg\/require\"\n\t\"go.pachyderm.com\/pachyderm\/src\/pkg\/timing\"\n\t\"go.pachyderm.com\/pachyderm\/src\/pps\"\n\t\"go.pachyderm.com\/pachyderm\/src\/pps\/container\"\n\t\"go.pachyderm.com\/pachyderm\/src\/pps\/store\"\n\t\"google.golang.org\/grpc\"\n)\n\nconst (\n\ttestNumServers = 1\n)\n\nfunc TestBasic(t *testing.T) {\n\tt.Parallel()\n\trunTest(t, testBasic)\n}\n\nfunc testBasic(t *testing.T, apiClient pps.ApiClient) {\n\t_ = os.RemoveAll(\"\/tmp\/pachyderm-test\")\n\tpipelineSource, err := apiClient.CreatePipelineSource(\n\t\tcontext.Background(),\n\t\t&pps.CreatePipelineSourceRequest{\n\t\t\tPipelineSource: &pps.PipelineSource{\n\t\t\t\tTypedPipelineSource: &pps.PipelineSource_GithubPipelineSource{\n\t\t\t\t\tGithubPipelineSource: &pps.GithubPipelineSource{\n\t\t\t\t\t\tContextDir: \"src\/pps\/server\/testdata\/basic\",\n\t\t\t\t\t\tUser: \"pachyderm\",\n\t\t\t\t\t\tRepository: \"pachyderm\",\n\t\t\t\t\t\tBranch: \"master\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\tpipeline, err := apiClient.CreateAndGetPipeline(\n\t\tcontext.Background(),\n\t\t&pps.CreateAndGetPipelineRequest{\n\t\t\tPipelineSourceId: pipelineSource.Id,\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\tpipelineRun, err := apiClient.CreatePipelineRun(\n\t\tcontext.Background(),\n\t\t&pps.CreatePipelineRunRequest{\n\t\t\tPipelineId: pipeline.Id,\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\t_, err = apiClient.StartPipelineRun(\n\t\tcontext.Background(),\n\t\t&pps.StartPipelineRunRequest{\n\t\t\tPipelineRunId: pipelineRun.Id,\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\tpipelineRunID := pipelineRun.Id\n\tpipelineRunStatus, err := getFinalPipelineRunStatus(apiClient, pipelineRunID)\n\trequire.NoError(t, err)\n\trequire.Equal(t, pps.PipelineRunStatusType_PIPELINE_RUN_STATUS_TYPE_SUCCESS, pipelineRunStatus.PipelineRunStatusType)\n\tmatches, err := filepath.Glob(\"\/tmp\/pachyderm-test\/1-out\/*\")\n\trequire.NoError(t, err)\n\trequire.Equal(\n\t\tt,\n\t\t[]string{\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/1.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/10.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/2.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/20.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/3.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/30.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/4.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/40.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/5.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/50.txt\",\n\t\t},\n\t\tmatches,\n\t)\n\tmatches, err = filepath.Glob(\"\/tmp\/pachyderm-test\/2-out\/*\")\n\trequire.NoError(t, err)\n\trequire.Equal(\n\t\tt,\n\t\t[]string{\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/1.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/10.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/2.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/20.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/3.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/30.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/4.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/40.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/5.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/50.txt.copy\",\n\t\t},\n\t\tmatches,\n\t)\n\tmatches, err = filepath.Glob(\"\/tmp\/pachyderm-test\/3-out\/*\")\n\trequire.NoError(t, err)\n\trequire.Equal(\n\t\tt,\n\t\t[]string{\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/1.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/10.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/2.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/20.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/3.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/30.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/4.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/40.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/5.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/50.txt.copy3\",\n\t\t},\n\t\tmatches,\n\t)\n\tmatches, err = filepath.Glob(\"\/tmp\/pachyderm-test\/4-out\/*\")\n\trequire.NoError(t, err)\n\trequire.Equal(\n\t\tt,\n\t\t[]string{\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/1.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/10.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/2.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/20.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/3.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/30.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/4.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/40.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/5.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/50.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/build-file.txt4\",\n\t\t},\n\t\tmatches,\n\t)\n\tmatches, err = filepath.Glob(\"\/tmp\/pachyderm-test\/5-out\/*\")\n\trequire.NoError(t, err)\n\trequire.Equal(\n\t\tt,\n\t\t[]string{\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/1.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/1.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/10.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/10.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/2.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/2.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/20.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/20.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/3.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/3.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/30.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/30.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/4.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/4.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/40.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/40.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/5.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/5.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/50.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/50.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/build-file.txt4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/build-file2.txt\",\n\t\t},\n\t\tmatches,\n\t)\n}\n\nfunc getFinalPipelineRunStatus(apiClient pps.ApiClient, pipelineRunID string) (*pps.PipelineRunStatus, error) {\n\t\/\/ TODO(pedge): not good\n\tticker := time.NewTicker(time.Second)\n\tfor i := 0; i < 20; i++ {\n\t\t<-ticker.C\n\t\tpipelineRunStatuses, err := apiClient.GetPipelineRunStatus(\n\t\t\tcontext.Background(),\n\t\t\t&pps.GetPipelineRunStatusRequest{\n\t\t\t\tPipelineRunId: pipelineRunID,\n\t\t\t},\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpipelineRunStatus := pipelineRunStatuses.PipelineRunStatus[0]\n\t\tprotolog.Printf(\"status at tick %d: %v\\n\", i, pipelineRunStatus)\n\t\tswitch pipelineRunStatus.PipelineRunStatusType {\n\t\tcase pps.PipelineRunStatusType_PIPELINE_RUN_STATUS_TYPE_ERROR:\n\t\t\treturn pipelineRunStatus, nil\n\t\tcase pps.PipelineRunStatusType_PIPELINE_RUN_STATUS_TYPE_SUCCESS:\n\t\t\treturn pipelineRunStatus, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"did not get final pipeline status for %s\", pipelineRunID)\n}\n\nfunc runTest(\n\tt *testing.T,\n\tf func(t *testing.T, apiClient pps.ApiClient),\n) {\n\tcontainerClient, err := getContainerClient()\n\trequire.NoError(t, err)\n\tstoreClient, err := getRethinkClient()\n\trequire.NoError(t, err)\n\tpfstesting.RunTest(\n\t\tt,\n\t\tfunc(t *testing.T, apiClient pfs.ApiClient, internalApiClient pfs.InternalApiClient, cluster pfstesting.Cluster) {\n\t\t\tprototest.RunT(\n\t\t\t\tt,\n\t\t\t\ttestNumServers,\n\t\t\t\tfunc(servers map[string]*grpc.Server) {\n\t\t\t\t\tfor _, server := range servers {\n\t\t\t\t\t\tpps.RegisterApiServer(server, newAPIServer(apiClient, containerClient, storeClient, timing.NewSystemTimer()))\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\tfunc(t *testing.T, clientConns map[string]*grpc.ClientConn) {\n\t\t\t\t\tvar clientConn *grpc.ClientConn\n\t\t\t\t\tfor _, c := range clientConns {\n\t\t\t\t\t\tclientConn = c\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tf(\n\t\t\t\t\t\tt,\n\t\t\t\t\t\tpps.NewApiClient(\n\t\t\t\t\t\t\tclientConn,\n\t\t\t\t\t\t),\n\t\t\t\t\t)\n\t\t\t\t},\n\t\t\t)\n\t\t},\n\t)\n}\n\nfunc getContainerClient() (container.Client, error) {\n\tdockerHost := os.Getenv(\"DOCKER_HOST\")\n\tif dockerHost == \"\" {\n\t\tdockerHost = \"unix:\/\/\/var\/run\/docker.sock\"\n\t}\n\treturn container.NewDockerClient(\n\t\tcontainer.DockerClientOptions{\n\t\t\tHost: dockerHost,\n\t\t},\n\t)\n}\n\nfunc getRethinkClient() (store.Client, error) {\n\taddress, err := getRethinkAddress()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdatabaseName := strings.Replace(uuid.NewV4().String(), \"-\", \"\", -1)\n\tif err := store.InitDBs(address, databaseName); err != nil {\n\t\treturn nil, err\n\t}\n\treturn store.NewRethinkClient(address, databaseName)\n}\n\nfunc getRethinkAddress() (string, error) {\n\trethinkAddr := os.Getenv(\"RETHINK_PORT_28015_TCP_ADDR\")\n\tif rethinkAddr == \"\" {\n\t\treturn \"\", errors.New(\"RETHINK_PORT_28015_TCP_ADDR not set\")\n\t}\n\treturn fmt.Sprintf(\"%s:28015\", rethinkAddr), nil\n}\n<commit_msg>skip srs\/pps\/server testing<commit_after>package server\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"go.pedge.io\/proto\/test\"\n\t\"go.pedge.io\/protolog\"\n\n\t\"github.com\/satori\/go.uuid\"\n\t\"go.pachyderm.com\/pachyderm\/src\/pfs\"\n\tpfstesting \"go.pachyderm.com\/pachyderm\/src\/pfs\/testing\"\n\t\"go.pachyderm.com\/pachyderm\/src\/pkg\/require\"\n\t\"go.pachyderm.com\/pachyderm\/src\/pkg\/timing\"\n\t\"go.pachyderm.com\/pachyderm\/src\/pps\"\n\t\"go.pachyderm.com\/pachyderm\/src\/pps\/container\"\n\t\"go.pachyderm.com\/pachyderm\/src\/pps\/store\"\n\t\"google.golang.org\/grpc\"\n)\n\nconst (\n\ttestNumServers = 1\n)\n\nfunc TestBasic(t *testing.T) {\n\tt.Skip()\n\tt.Parallel()\n\trunTest(t, testBasic)\n}\n\nfunc testBasic(t *testing.T, apiClient pps.ApiClient) {\n\t_ = os.RemoveAll(\"\/tmp\/pachyderm-test\")\n\tpipelineSource, err := apiClient.CreatePipelineSource(\n\t\tcontext.Background(),\n\t\t&pps.CreatePipelineSourceRequest{\n\t\t\tPipelineSource: &pps.PipelineSource{\n\t\t\t\tTypedPipelineSource: &pps.PipelineSource_GithubPipelineSource{\n\t\t\t\t\tGithubPipelineSource: &pps.GithubPipelineSource{\n\t\t\t\t\t\tContextDir: \"src\/pps\/server\/testdata\/basic\",\n\t\t\t\t\t\tUser: \"pachyderm\",\n\t\t\t\t\t\tRepository: \"pachyderm\",\n\t\t\t\t\t\tBranch: \"master\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\tpipeline, err := apiClient.CreateAndGetPipeline(\n\t\tcontext.Background(),\n\t\t&pps.CreateAndGetPipelineRequest{\n\t\t\tPipelineSourceId: pipelineSource.Id,\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\tpipelineRun, err := apiClient.CreatePipelineRun(\n\t\tcontext.Background(),\n\t\t&pps.CreatePipelineRunRequest{\n\t\t\tPipelineId: pipeline.Id,\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\t_, err = apiClient.StartPipelineRun(\n\t\tcontext.Background(),\n\t\t&pps.StartPipelineRunRequest{\n\t\t\tPipelineRunId: pipelineRun.Id,\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\tpipelineRunID := pipelineRun.Id\n\tpipelineRunStatus, err := getFinalPipelineRunStatus(apiClient, pipelineRunID)\n\trequire.NoError(t, err)\n\trequire.Equal(t, pps.PipelineRunStatusType_PIPELINE_RUN_STATUS_TYPE_SUCCESS, pipelineRunStatus.PipelineRunStatusType)\n\tmatches, err := filepath.Glob(\"\/tmp\/pachyderm-test\/1-out\/*\")\n\trequire.NoError(t, err)\n\trequire.Equal(\n\t\tt,\n\t\t[]string{\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/1.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/10.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/2.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/20.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/3.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/30.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/4.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/40.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/5.txt\",\n\t\t\t\"\/tmp\/pachyderm-test\/1-out\/50.txt\",\n\t\t},\n\t\tmatches,\n\t)\n\tmatches, err = filepath.Glob(\"\/tmp\/pachyderm-test\/2-out\/*\")\n\trequire.NoError(t, err)\n\trequire.Equal(\n\t\tt,\n\t\t[]string{\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/1.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/10.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/2.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/20.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/3.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/30.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/4.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/40.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/5.txt.copy\",\n\t\t\t\"\/tmp\/pachyderm-test\/2-out\/50.txt.copy\",\n\t\t},\n\t\tmatches,\n\t)\n\tmatches, err = filepath.Glob(\"\/tmp\/pachyderm-test\/3-out\/*\")\n\trequire.NoError(t, err)\n\trequire.Equal(\n\t\tt,\n\t\t[]string{\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/1.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/10.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/2.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/20.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/3.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/30.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/4.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/40.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/5.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/3-out\/50.txt.copy3\",\n\t\t},\n\t\tmatches,\n\t)\n\tmatches, err = filepath.Glob(\"\/tmp\/pachyderm-test\/4-out\/*\")\n\trequire.NoError(t, err)\n\trequire.Equal(\n\t\tt,\n\t\t[]string{\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/1.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/10.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/2.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/20.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/3.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/30.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/4.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/40.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/5.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/50.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/4-out\/build-file.txt4\",\n\t\t},\n\t\tmatches,\n\t)\n\tmatches, err = filepath.Glob(\"\/tmp\/pachyderm-test\/5-out\/*\")\n\trequire.NoError(t, err)\n\trequire.Equal(\n\t\tt,\n\t\t[]string{\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/1.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/1.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/10.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/10.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/2.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/2.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/20.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/20.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/3.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/3.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/30.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/30.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/4.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/4.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/40.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/40.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/5.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/5.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/50.txt.copy3\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/50.txt.copy4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/build-file.txt4\",\n\t\t\t\"\/tmp\/pachyderm-test\/5-out\/build-file2.txt\",\n\t\t},\n\t\tmatches,\n\t)\n}\n\nfunc getFinalPipelineRunStatus(apiClient pps.ApiClient, pipelineRunID string) (*pps.PipelineRunStatus, error) {\n\t\/\/ TODO(pedge): not good\n\tticker := time.NewTicker(time.Second)\n\tfor i := 0; i < 20; i++ {\n\t\t<-ticker.C\n\t\tpipelineRunStatuses, err := apiClient.GetPipelineRunStatus(\n\t\t\tcontext.Background(),\n\t\t\t&pps.GetPipelineRunStatusRequest{\n\t\t\t\tPipelineRunId: pipelineRunID,\n\t\t\t},\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpipelineRunStatus := pipelineRunStatuses.PipelineRunStatus[0]\n\t\tprotolog.Printf(\"status at tick %d: %v\\n\", i, pipelineRunStatus)\n\t\tswitch pipelineRunStatus.PipelineRunStatusType {\n\t\tcase pps.PipelineRunStatusType_PIPELINE_RUN_STATUS_TYPE_ERROR:\n\t\t\treturn pipelineRunStatus, nil\n\t\tcase pps.PipelineRunStatusType_PIPELINE_RUN_STATUS_TYPE_SUCCESS:\n\t\t\treturn pipelineRunStatus, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"did not get final pipeline status for %s\", pipelineRunID)\n}\n\nfunc runTest(\n\tt *testing.T,\n\tf func(t *testing.T, apiClient pps.ApiClient),\n) {\n\tcontainerClient, err := getContainerClient()\n\trequire.NoError(t, err)\n\tstoreClient, err := getRethinkClient()\n\trequire.NoError(t, err)\n\tpfstesting.RunTest(\n\t\tt,\n\t\tfunc(t *testing.T, apiClient pfs.ApiClient, internalApiClient pfs.InternalApiClient, cluster pfstesting.Cluster) {\n\t\t\tprototest.RunT(\n\t\t\t\tt,\n\t\t\t\ttestNumServers,\n\t\t\t\tfunc(servers map[string]*grpc.Server) {\n\t\t\t\t\tfor _, server := range servers {\n\t\t\t\t\t\tpps.RegisterApiServer(server, newAPIServer(apiClient, containerClient, storeClient, timing.NewSystemTimer()))\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\tfunc(t *testing.T, clientConns map[string]*grpc.ClientConn) {\n\t\t\t\t\tvar clientConn *grpc.ClientConn\n\t\t\t\t\tfor _, c := range clientConns {\n\t\t\t\t\t\tclientConn = c\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tf(\n\t\t\t\t\t\tt,\n\t\t\t\t\t\tpps.NewApiClient(\n\t\t\t\t\t\t\tclientConn,\n\t\t\t\t\t\t),\n\t\t\t\t\t)\n\t\t\t\t},\n\t\t\t)\n\t\t},\n\t)\n}\n\nfunc getContainerClient() (container.Client, error) {\n\tdockerHost := os.Getenv(\"DOCKER_HOST\")\n\tif dockerHost == \"\" {\n\t\tdockerHost = \"unix:\/\/\/var\/run\/docker.sock\"\n\t}\n\treturn container.NewDockerClient(\n\t\tcontainer.DockerClientOptions{\n\t\t\tHost: dockerHost,\n\t\t},\n\t)\n}\n\nfunc getRethinkClient() (store.Client, error) {\n\taddress, err := getRethinkAddress()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdatabaseName := strings.Replace(uuid.NewV4().String(), \"-\", \"\", -1)\n\tif err := store.InitDBs(address, databaseName); err != nil {\n\t\treturn nil, err\n\t}\n\treturn store.NewRethinkClient(address, databaseName)\n}\n\nfunc getRethinkAddress() (string, error) {\n\trethinkAddr := os.Getenv(\"RETHINK_PORT_28015_TCP_ADDR\")\n\tif rethinkAddr == \"\" {\n\t\treturn \"\", errors.New(\"RETHINK_PORT_28015_TCP_ADDR not set\")\n\t}\n\treturn fmt.Sprintf(\"%s:28015\", rethinkAddr), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package discover\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/flynn\/rpcplus\"\n)\n\nvar WaitTimeoutSecs = 10\n\ntype Service struct {\n\tCreated int\n\tName string\n\tHost string\n\tPort string\n\tAddr string\n\tAttrs map[string]string\n}\n\ntype ServiceSet struct {\n\tservices map[string]*Service\n\tfilters map[string]string\n\twatches map[chan *ServiceUpdate]struct{}\n\tserMutex sync.Mutex\n\tfilMutex sync.Mutex\n\twatchMutex sync.Mutex\n\tcall *rpcplus.Call\n}\n\nfunc copyService(service *Service) *Service {\n\ts := *service\n\ts.Attrs = make(map[string]string, len(service.Attrs))\n\tfor k, v := range service.Attrs {\n\t\ts.Attrs[k] = v\n\t}\n\treturn &s\n}\n\nfunc makeServiceSet(call *rpcplus.Call) *ServiceSet {\n\treturn &ServiceSet{\n\t\tservices: make(map[string]*Service),\n\t\tfilters: make(map[string]string),\n\t\twatches: make(map[chan *ServiceUpdate]struct{}),\n\t\tcall: call,\n\t}\n}\n\nfunc (s *ServiceSet) bind(updates chan *ServiceUpdate) chan struct{} {\n\t\/\/ current is an event when enough service updates have been\n\t\/\/ received to bring us to \"current\" state (when subscribed)\n\tcurrent := make(chan struct{})\n\tgo func() {\n\t\tisCurrent := false\n\t\tfor update := range updates {\n\t\t\tif update.Addr == \"\" && update.Name == \"\" && !isCurrent {\n\t\t\t\tclose(current)\n\t\t\t\tisCurrent = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif s.filters != nil && !s.matchFilters(update.Attrs) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ts.serMutex.Lock()\n\t\t\tif update.Online {\n\t\t\t\tif _, exists := s.services[update.Addr]; !exists {\n\t\t\t\t\thost, port, _ := net.SplitHostPort(update.Addr)\n\t\t\t\t\ts.services[update.Addr] = &Service{\n\t\t\t\t\t\tName: update.Name,\n\t\t\t\t\t\tAddr: update.Addr,\n\t\t\t\t\t\tHost: host,\n\t\t\t\t\t\tPort: port,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ts.services[update.Addr].Attrs = update.Attrs\n\t\t\t} else {\n\t\t\t\tif _, exists := s.services[update.Addr]; exists {\n\t\t\t\t\tdelete(s.services, update.Addr)\n\t\t\t\t} else {\n\t\t\t\t\ts.serMutex.Unlock()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\ts.serMutex.Unlock()\n\t\t\ts.updateWatches(update)\n\t\t}\n\t\ts.closeWatches()\n\t}()\n\treturn current\n}\n\nfunc (s *ServiceSet) updateWatches(update *ServiceUpdate) {\n\ts.watchMutex.Lock()\n\tdefer s.watchMutex.Unlock()\n\tfor ch := range s.watches {\n\t\tch <- update\n\t}\n}\n\nfunc (s *ServiceSet) closeWatches() {\n\ts.watchMutex.Lock()\n\tdefer s.watchMutex.Unlock()\n\tfor ch := range s.watches {\n\t\tclose(ch)\n\t}\n}\n\nfunc (s *ServiceSet) matchFilters(attrs map[string]string) bool {\n\ts.filMutex.Lock()\n\tdefer s.filMutex.Unlock()\n\tfor key, value := range s.filters {\n\t\tif attrs[key] != value {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ deprecated\nfunc (s *ServiceSet) Online() []*Service {\n\treturn s.Services()\n}\n\nfunc (s *ServiceSet) Services() []*Service {\n\ts.serMutex.Lock()\n\tdefer s.serMutex.Unlock()\n\tlist := make([]*Service, 0, len(s.services))\n\tfor _, service := range s.services {\n\t\tlist = append(list, copyService(service))\n\t}\n\treturn list\n}\n\n\/\/ deprecated\nfunc (s *ServiceSet) OnlineAddrs() []string {\n\treturn s.Addrs()\n}\n\nfunc (s *ServiceSet) Addrs() []string {\n\tlist := make([]string, 0, len(s.services))\n\tfor _, service := range s.Services() {\n\t\tlist = append(list, service.Addr)\n\t}\n\treturn list\n}\n\nfunc (s *ServiceSet) Select(attrs map[string]string) []*Service {\n\ts.serMutex.Lock()\n\tdefer s.serMutex.Unlock()\n\tlist := make([]*Service, 0, len(s.services))\nouter:\n\tfor _, service := range s.services {\n\t\tfor key, value := range attrs {\n\t\t\tif service.Attrs[key] != value {\n\t\t\t\tcontinue outer\n\t\t\t}\n\t\t}\n\t\tlist = append(list, service)\n\t}\n\treturn list\n}\n\nfunc (s *ServiceSet) Filter(attrs map[string]string) {\n\ts.filMutex.Lock()\n\ts.filters = attrs\n\ts.filMutex.Unlock()\n\ts.serMutex.Lock()\n\tdefer s.serMutex.Unlock()\n\tfor key, service := range s.services {\n\t\tif !s.matchFilters(service.Attrs) {\n\t\t\tdelete(s.services, key)\n\t\t}\n\t}\n}\n\nfunc (s *ServiceSet) Watch(ch chan *ServiceUpdate, bringCurrent bool) {\n\ts.watchMutex.Lock()\n\tdefer s.watchMutex.Unlock()\n\ts.watches[ch] = struct{}{}\n\tif bringCurrent {\n\t\tgo func() {\n\t\t\ts.serMutex.Lock()\n\t\t\tdefer s.serMutex.Unlock()\n\t\t\tfor _, service := range s.services {\n\t\t\t\tch <- &ServiceUpdate{\n\t\t\t\t\tName: service.Name,\n\t\t\t\t\tAddr: service.Addr,\n\t\t\t\t\tOnline: true,\n\t\t\t\t\tAttrs: service.Attrs,\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (s *ServiceSet) Unwatch(ch chan *ServiceUpdate) {\n\ts.watchMutex.Lock()\n\tdefer s.watchMutex.Unlock()\n\tdelete(s.watches, ch)\n}\n\nfunc (s *ServiceSet) Wait() (*ServiceUpdate, error) {\n\tupdateCh := make(chan *ServiceUpdate, 1)\n\ts.Watch(updateCh, true)\n\tdefer s.Unwatch(updateCh)\n\tselect {\n\tcase update := <-updateCh:\n\t\treturn update, nil\n\tcase <-time.After(time.Duration(WaitTimeoutSecs) * time.Second):\n\t\treturn nil, errors.New(\"discover: wait timeout exceeded\")\n\t}\n}\n\nfunc (s *ServiceSet) Close() error {\n\treturn s.call.CloseStream()\n}\n\ntype Client struct {\n\tclient *rpcplus.Client\n\theartbeats map[string]chan struct{}\n\thbMutex sync.Mutex\n}\n\nfunc NewClient() (*Client, error) {\n\taddr := os.Getenv(\"DISCOVERD\")\n\tif addr == \"\" {\n\t\taddr = \"127.0.0.1:1111\"\n\t}\n\treturn NewClientUsingAddress(addr)\n}\n\nfunc NewClientUsingAddress(addr string) (*Client, error) {\n\tclient, err := rpcplus.DialHTTP(\"tcp\", addr)\n\treturn &Client{\n\t\tclient: client,\n\t\theartbeats: make(map[string]chan struct{}),\n\t}, err\n}\n\nfunc pickMostPublicIp() string {\n\t\/\/ TODO: prefer non 10.0.0.0, 172.16.0.0, and 192.168.0.0\n\taddrs, _ := net.InterfaceAddrs()\n\tvar ip string\n\tfor _, addr := range addrs {\n\t\tip = strings.SplitN(addr.String(), \"\/\", 2)[0]\n\t\tif !strings.Contains(ip, \"::\") && ip != \"127.0.0.1\" {\n\t\t\treturn ip\n\t\t}\n\t}\n\treturn ip\n}\n\nfunc (c *Client) ServiceSet(name string) (*ServiceSet, error) {\n\tupdates := make(chan *ServiceUpdate)\n\tcall := c.client.StreamGo(\"Agent.Subscribe\", &Args{\n\t\tName: name,\n\t}, updates)\n\tset := makeServiceSet(call)\n\t<-set.bind(updates)\n\treturn set, nil\n}\n\nfunc (c *Client) Services(name string) ([]*Service, error) {\n\tset, err := c.ServiceSet(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(set.Services()) == 0 {\n\t\t_, err = set.Wait()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tset.Close()\n\treturn set.Services(), nil\n\n}\n\nfunc (c *Client) Register(name, port string, attributes map[string]string) error {\n\treturn c.RegisterWithHost(name, pickMostPublicIp(), port, attributes)\n}\n\nfunc (c *Client) RegisterWithHost(name, host, port string, attributes map[string]string) error {\n\targs := &Args{\n\t\tName: name,\n\t\tAddr: net.JoinHostPort(host, port),\n\t\tAttrs: attributes,\n\t}\n\tvar ret struct{}\n\terr := c.client.Call(\"Agent.Register\", args, &ret)\n\tif err != nil {\n\t\treturn errors.New(\"discover: register failed: \" + err.Error())\n\t}\n\tdone := make(chan struct{})\n\tc.hbMutex.Lock()\n\tc.heartbeats[args.Addr] = done\n\tc.hbMutex.Unlock()\n\tgo func() {\n\t\tticker := time.NewTicker(HeartbeatIntervalSecs * time.Second) \/\/ TODO: add jitter\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\t\/\/ TODO: log error here\n\t\t\t\tc.client.Call(\"Agent.Heartbeat\", &Args{\n\t\t\t\t\tName: name,\n\t\t\t\t\tAddr: args.Addr,\n\t\t\t\t}, &struct{}{})\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}\n\nfunc (c *Client) Unregister(name, port string) error {\n\treturn c.UnregisterWithHost(name, pickMostPublicIp(), port)\n}\n\nfunc (c *Client) UnregisterWithHost(name, host, port string) error {\n\targs := &Args{\n\t\tName: name,\n\t\tAddr: net.JoinHostPort(host, port),\n\t}\n\tc.hbMutex.Lock()\n\tclose(c.heartbeats[args.Addr])\n\tdelete(c.heartbeats, args.Addr)\n\tc.hbMutex.Unlock()\n\terr := c.client.Call(\"Agent.Unregister\", args, &struct{}{})\n\tif err != nil {\n\t\treturn errors.New(\"discover: unregister failed: \" + err.Error())\n\t}\n\treturn nil\n}\n<commit_msg>discoverd: just wait. blocks if no services until first update or timeout, otherwise noop.<commit_after>package discover\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/flynn\/rpcplus\"\n)\n\nvar WaitTimeoutSecs = 10\n\ntype Service struct {\n\tCreated int\n\tName string\n\tHost string\n\tPort string\n\tAddr string\n\tAttrs map[string]string\n}\n\ntype ServiceSet struct {\n\tservices map[string]*Service\n\tfilters map[string]string\n\twatches map[chan *ServiceUpdate]struct{}\n\tserMutex sync.Mutex\n\tfilMutex sync.Mutex\n\twatchMutex sync.Mutex\n\tcall *rpcplus.Call\n}\n\nfunc copyService(service *Service) *Service {\n\ts := *service\n\ts.Attrs = make(map[string]string, len(service.Attrs))\n\tfor k, v := range service.Attrs {\n\t\ts.Attrs[k] = v\n\t}\n\treturn &s\n}\n\nfunc makeServiceSet(call *rpcplus.Call) *ServiceSet {\n\treturn &ServiceSet{\n\t\tservices: make(map[string]*Service),\n\t\tfilters: make(map[string]string),\n\t\twatches: make(map[chan *ServiceUpdate]struct{}),\n\t\tcall: call,\n\t}\n}\n\nfunc (s *ServiceSet) bind(updates chan *ServiceUpdate) chan struct{} {\n\t\/\/ current is an event when enough service updates have been\n\t\/\/ received to bring us to \"current\" state (when subscribed)\n\tcurrent := make(chan struct{})\n\tgo func() {\n\t\tisCurrent := false\n\t\tfor update := range updates {\n\t\t\tif update.Addr == \"\" && update.Name == \"\" && !isCurrent {\n\t\t\t\tclose(current)\n\t\t\t\tisCurrent = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif s.filters != nil && !s.matchFilters(update.Attrs) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ts.serMutex.Lock()\n\t\t\tif update.Online {\n\t\t\t\tif _, exists := s.services[update.Addr]; !exists {\n\t\t\t\t\thost, port, _ := net.SplitHostPort(update.Addr)\n\t\t\t\t\ts.services[update.Addr] = &Service{\n\t\t\t\t\t\tName: update.Name,\n\t\t\t\t\t\tAddr: update.Addr,\n\t\t\t\t\t\tHost: host,\n\t\t\t\t\t\tPort: port,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ts.services[update.Addr].Attrs = update.Attrs\n\t\t\t} else {\n\t\t\t\tif _, exists := s.services[update.Addr]; exists {\n\t\t\t\t\tdelete(s.services, update.Addr)\n\t\t\t\t} else {\n\t\t\t\t\ts.serMutex.Unlock()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\ts.serMutex.Unlock()\n\t\t\ts.updateWatches(update)\n\t\t}\n\t\ts.closeWatches()\n\t}()\n\treturn current\n}\n\nfunc (s *ServiceSet) updateWatches(update *ServiceUpdate) {\n\ts.watchMutex.Lock()\n\tdefer s.watchMutex.Unlock()\n\tfor ch := range s.watches {\n\t\tch <- update\n\t}\n}\n\nfunc (s *ServiceSet) closeWatches() {\n\ts.watchMutex.Lock()\n\tdefer s.watchMutex.Unlock()\n\tfor ch := range s.watches {\n\t\tclose(ch)\n\t}\n}\n\nfunc (s *ServiceSet) matchFilters(attrs map[string]string) bool {\n\ts.filMutex.Lock()\n\tdefer s.filMutex.Unlock()\n\tfor key, value := range s.filters {\n\t\tif attrs[key] != value {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ deprecated\nfunc (s *ServiceSet) Online() []*Service {\n\treturn s.Services()\n}\n\nfunc (s *ServiceSet) Services() []*Service {\n\ts.serMutex.Lock()\n\tdefer s.serMutex.Unlock()\n\tlist := make([]*Service, 0, len(s.services))\n\tfor _, service := range s.services {\n\t\tlist = append(list, copyService(service))\n\t}\n\treturn list\n}\n\n\/\/ deprecated\nfunc (s *ServiceSet) OnlineAddrs() []string {\n\treturn s.Addrs()\n}\n\nfunc (s *ServiceSet) Addrs() []string {\n\tlist := make([]string, 0, len(s.services))\n\tfor _, service := range s.Services() {\n\t\tlist = append(list, service.Addr)\n\t}\n\treturn list\n}\n\nfunc (s *ServiceSet) Select(attrs map[string]string) []*Service {\n\ts.serMutex.Lock()\n\tdefer s.serMutex.Unlock()\n\tlist := make([]*Service, 0, len(s.services))\nouter:\n\tfor _, service := range s.services {\n\t\tfor key, value := range attrs {\n\t\t\tif service.Attrs[key] != value {\n\t\t\t\tcontinue outer\n\t\t\t}\n\t\t}\n\t\tlist = append(list, service)\n\t}\n\treturn list\n}\n\nfunc (s *ServiceSet) Filter(attrs map[string]string) {\n\ts.filMutex.Lock()\n\ts.filters = attrs\n\ts.filMutex.Unlock()\n\ts.serMutex.Lock()\n\tdefer s.serMutex.Unlock()\n\tfor key, service := range s.services {\n\t\tif !s.matchFilters(service.Attrs) {\n\t\t\tdelete(s.services, key)\n\t\t}\n\t}\n}\n\nfunc (s *ServiceSet) Watch(ch chan *ServiceUpdate, bringCurrent bool) {\n\ts.watchMutex.Lock()\n\tdefer s.watchMutex.Unlock()\n\ts.watches[ch] = struct{}{}\n\tif bringCurrent {\n\t\tgo func() {\n\t\t\ts.serMutex.Lock()\n\t\t\tdefer s.serMutex.Unlock()\n\t\t\tfor _, service := range s.services {\n\t\t\t\tch <- &ServiceUpdate{\n\t\t\t\t\tName: service.Name,\n\t\t\t\t\tAddr: service.Addr,\n\t\t\t\t\tOnline: true,\n\t\t\t\t\tAttrs: service.Attrs,\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (s *ServiceSet) Unwatch(ch chan *ServiceUpdate) {\n\ts.watchMutex.Lock()\n\tdefer s.watchMutex.Unlock()\n\tdelete(s.watches, ch)\n}\n\nfunc (s *ServiceSet) Wait() (*ServiceUpdate, error) {\n\tupdateCh := make(chan *ServiceUpdate, 1)\n\ts.Watch(updateCh, true)\n\tdefer s.Unwatch(updateCh)\n\tselect {\n\tcase update := <-updateCh:\n\t\treturn update, nil\n\tcase <-time.After(time.Duration(WaitTimeoutSecs) * time.Second):\n\t\treturn nil, errors.New(\"discover: wait timeout exceeded\")\n\t}\n}\n\nfunc (s *ServiceSet) Close() error {\n\treturn s.call.CloseStream()\n}\n\ntype Client struct {\n\tclient *rpcplus.Client\n\theartbeats map[string]chan struct{}\n\thbMutex sync.Mutex\n}\n\nfunc NewClient() (*Client, error) {\n\taddr := os.Getenv(\"DISCOVERD\")\n\tif addr == \"\" {\n\t\taddr = \"127.0.0.1:1111\"\n\t}\n\treturn NewClientUsingAddress(addr)\n}\n\nfunc NewClientUsingAddress(addr string) (*Client, error) {\n\tclient, err := rpcplus.DialHTTP(\"tcp\", addr)\n\treturn &Client{\n\t\tclient: client,\n\t\theartbeats: make(map[string]chan struct{}),\n\t}, err\n}\n\nfunc pickMostPublicIp() string {\n\t\/\/ TODO: prefer non 10.0.0.0, 172.16.0.0, and 192.168.0.0\n\taddrs, _ := net.InterfaceAddrs()\n\tvar ip string\n\tfor _, addr := range addrs {\n\t\tip = strings.SplitN(addr.String(), \"\/\", 2)[0]\n\t\tif !strings.Contains(ip, \"::\") && ip != \"127.0.0.1\" {\n\t\t\treturn ip\n\t\t}\n\t}\n\treturn ip\n}\n\nfunc (c *Client) ServiceSet(name string) (*ServiceSet, error) {\n\tupdates := make(chan *ServiceUpdate)\n\tcall := c.client.StreamGo(\"Agent.Subscribe\", &Args{\n\t\tName: name,\n\t}, updates)\n\tset := makeServiceSet(call)\n\t<-set.bind(updates)\n\treturn set, nil\n}\n\nfunc (c *Client) Services(name string) ([]*Service, error) {\n\tset, err := c.ServiceSet(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = set.Wait()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tset.Close()\n\treturn set.Services(), nil\n\n}\n\nfunc (c *Client) Register(name, port string, attributes map[string]string) error {\n\treturn c.RegisterWithHost(name, pickMostPublicIp(), port, attributes)\n}\n\nfunc (c *Client) RegisterWithHost(name, host, port string, attributes map[string]string) error {\n\targs := &Args{\n\t\tName: name,\n\t\tAddr: net.JoinHostPort(host, port),\n\t\tAttrs: attributes,\n\t}\n\tvar ret struct{}\n\terr := c.client.Call(\"Agent.Register\", args, &ret)\n\tif err != nil {\n\t\treturn errors.New(\"discover: register failed: \" + err.Error())\n\t}\n\tdone := make(chan struct{})\n\tc.hbMutex.Lock()\n\tc.heartbeats[args.Addr] = done\n\tc.hbMutex.Unlock()\n\tgo func() {\n\t\tticker := time.NewTicker(HeartbeatIntervalSecs * time.Second) \/\/ TODO: add jitter\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\t\/\/ TODO: log error here\n\t\t\t\tc.client.Call(\"Agent.Heartbeat\", &Args{\n\t\t\t\t\tName: name,\n\t\t\t\t\tAddr: args.Addr,\n\t\t\t\t}, &struct{}{})\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}\n\nfunc (c *Client) Unregister(name, port string) error {\n\treturn c.UnregisterWithHost(name, pickMostPublicIp(), port)\n}\n\nfunc (c *Client) UnregisterWithHost(name, host, port string) error {\n\targs := &Args{\n\t\tName: name,\n\t\tAddr: net.JoinHostPort(host, port),\n\t}\n\tc.hbMutex.Lock()\n\tclose(c.heartbeats[args.Addr])\n\tdelete(c.heartbeats, args.Addr)\n\tc.hbMutex.Unlock()\n\terr := c.client.Call(\"Agent.Unregister\", args, &struct{}{})\n\tif err != nil {\n\t\treturn errors.New(\"discover: unregister failed: \" + err.Error())\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package libkb\n\nimport (\n\t\"fmt\"\n)\n\n\/\/ NOTE: This file is the source of truth for our version number, but we have a\n\/\/ script that reads it at packaging\/version.sh. If you refactor this\n\/\/ file, update that script.\n\n\/\/ Version as MAJOR.MINOR.PATCH\nconst Version = \"1.0.0\"\n\n\/\/ Build number\nconst Build = \"\"\n\n\/\/ VersionString returns semantic version string.\nfunc VersionString() string {\n\treturn fmt.Sprintf(\"%s-%s\", Version, Build)\n}\n<commit_msg>New version v1.0.0-28<commit_after>package libkb\n\nimport (\n\t\"fmt\"\n)\n\n\/\/ NOTE: This file is the source of truth for our version number, but we have a\n\/\/ script that reads it at packaging\/version.sh. If you refactor this\n\/\/ file, update that script.\n\n\/\/ Version as MAJOR.MINOR.PATCH\nconst Version = \"1.0.0\"\n\n\/\/ Build number\nconst Build = \"28\"\n\n\/\/ VersionString returns semantic version string.\nfunc VersionString() string {\n\treturn fmt.Sprintf(\"%s-%s\", Version, Build)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage libkb\n\n\/\/ Version is the current version (should be MAJOR.MINOR.PATCH)\nconst Version = \"1.0.28\"\n<commit_msg>Version bump<commit_after>\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage libkb\n\n\/\/ Version is the current version (should be MAJOR.MINOR.PATCH)\nconst Version = \"1.0.29\"\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage vcs\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n)\n\n\/\/ charsetReader returns a reader for the given charset. Currently\n\/\/ it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful\n\/\/ error which is printed by go get, so the user can find why the package\n\/\/ wasn't downloaded if the encoding is not supported. Note that, in\n\/\/ order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters\n\/\/ greater than 0x7f are not rejected).\nfunc charsetReader(charset string, input io.Reader) (io.Reader, error) {\n\tswitch strings.ToLower(charset) {\n\tcase \"ascii\":\n\t\treturn input, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"can't decode XML document using charset %q\", charset)\n\t}\n}\n\n\/\/ parseMetaGoImports returns meta imports from the HTML in r.\n\/\/ Parsing ends at the end of the <head> section or the beginning of the <body>.\nfunc parseMetaGoImports(r io.Reader) (imports []metaImport, err error) {\n\td := xml.NewDecoder(r)\n\td.CharsetReader = charsetReader\n\td.Strict = false\n\tvar t xml.Token\n\tfor {\n\t\tt, err = d.Token()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tif e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, \"body\") {\n\t\t\treturn\n\t\t}\n\t\tif e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, \"head\") {\n\t\t\treturn\n\t\t}\n\t\te, ok := t.(xml.StartElement)\n\t\tif !ok || !strings.EqualFold(e.Name.Local, \"meta\") {\n\t\t\tcontinue\n\t\t}\n\t\tif attrValue(e.Attr, \"name\") != \"go-import\" {\n\t\t\tcontinue\n\t\t}\n\t\tif f := strings.Fields(attrValue(e.Attr, \"content\")); len(f) == 3 {\n\t\t\timports = append(imports, metaImport{\n\t\t\t\tPrefix: f[0],\n\t\t\t\tVCS: f[1],\n\t\t\t\tRepoRoot: f[2],\n\t\t\t})\n\t\t}\n\t}\n}\n\n\/\/ attrValue returns the attribute value for the case-insensitive key\n\/\/ `name', or the empty string if nothing is found.\nfunc attrValue(attrs []xml.Attr, name string) string {\n\tfor _, a := range attrs {\n\t\tif strings.EqualFold(a.Name.Local, name) {\n\t\t\treturn a.Value\n\t\t}\n\t}\n\treturn \"\"\n}\n<commit_msg>go\/vcs: workaround EOF bug in token-based XML decoder<commit_after>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage vcs\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n)\n\n\/\/ charsetReader returns a reader for the given charset. Currently\n\/\/ it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful\n\/\/ error which is printed by go get, so the user can find why the package\n\/\/ wasn't downloaded if the encoding is not supported. Note that, in\n\/\/ order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters\n\/\/ greater than 0x7f are not rejected).\nfunc charsetReader(charset string, input io.Reader) (io.Reader, error) {\n\tswitch strings.ToLower(charset) {\n\tcase \"ascii\":\n\t\treturn input, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"can't decode XML document using charset %q\", charset)\n\t}\n}\n\n\/\/ parseMetaGoImports returns meta imports from the HTML in r.\n\/\/ Parsing ends at the end of the <head> section or the beginning of the <body>.\nfunc parseMetaGoImports(r io.Reader) (imports []metaImport, err error) {\n\td := xml.NewDecoder(r)\n\td.CharsetReader = charsetReader\n\td.Strict = false\n\tvar t xml.Token\n\tfor {\n\t\tt, err = d.Token()\n\t\tif err != nil {\n\t\t\tif err == io.EOF || len(imports) > 0 {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tif e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, \"body\") {\n\t\t\treturn\n\t\t}\n\t\tif e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, \"head\") {\n\t\t\treturn\n\t\t}\n\t\te, ok := t.(xml.StartElement)\n\t\tif !ok || !strings.EqualFold(e.Name.Local, \"meta\") {\n\t\t\tcontinue\n\t\t}\n\t\tif attrValue(e.Attr, \"name\") != \"go-import\" {\n\t\t\tcontinue\n\t\t}\n\t\tif f := strings.Fields(attrValue(e.Attr, \"content\")); len(f) == 3 {\n\t\t\timports = append(imports, metaImport{\n\t\t\t\tPrefix: f[0],\n\t\t\t\tVCS: f[1],\n\t\t\t\tRepoRoot: f[2],\n\t\t\t})\n\t\t}\n\t}\n}\n\n\/\/ attrValue returns the attribute value for the case-insensitive key\n\/\/ `name', or the empty string if nothing is found.\nfunc attrValue(attrs []xml.Attr, name string) string {\n\tfor _, a := range attrs {\n\t\tif strings.EqualFold(a.Name.Local, name) {\n\t\t\treturn a.Value\n\t\t}\n\t}\n\treturn \"\"\n}\n<|endoftext|>"} {"text":"<commit_before>package types\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\"\n\t\"time\"\n\n\tcrypto \"github.com\/tendermint\/go-crypto\"\n\twire \"github.com\/tendermint\/go-wire\"\n\t\"github.com\/tendermint\/go-wire\/data\"\n\tcmn \"github.com\/tendermint\/tmlibs\/common\"\n\t\"github.com\/tendermint\/tmlibs\/log\"\n\n\t\"github.com\/tendermint\/tendermint\/types\"\n)\n\n\/\/-----------------------------------------------------------------\n\nvar _ types.PrivValidator = (*PrivValidatorSocketClient)(nil)\n\n\/\/ PrivValidatorSocketClient implements PrivValidator.\n\/\/ It uses a socket to request signatures.\ntype PrivValidatorSocketClient struct {\n\tcmn.BaseService\n\n\tconn net.Conn\n\n\tID types.ValidatorID\n\tSocketAddress string\n}\n\nconst (\n\tdialRetryIntervalSeconds = 1\n)\n\n\/\/ NewPrivValidatorSocketClient returns an instance of\n\/\/ PrivValidatorSocketClient.\nfunc NewPrivValidatorSocketClient(logger log.Logger, socketAddr string) *PrivValidatorSocketClient {\n\tpvsc := &PrivValidatorSocketClient{\n\t\tSocketAddress: socketAddr,\n\t}\n\tpvsc.BaseService = *cmn.NewBaseService(logger, \"privValidatorSocketClient\", pvsc)\n\treturn pvsc\n}\n\n\/\/ OnStart implements cmn.Service.\nfunc (pvsc *PrivValidatorSocketClient) OnStart() error {\n\tif err := pvsc.BaseService.OnStart(); err != nil {\n\t\treturn err\n\t}\n\n\tvar err error\n\tvar conn net.Conn\nRETRY_LOOP:\n\tfor {\n\t\tconn, err = cmn.Connect(pvsc.SocketAddress)\n\t\tif err != nil {\n\t\t\tpvsc.Logger.Error(fmt.Sprintf(\"PrivValidatorSocket failed to connect to %v. Retrying...\", pvsc.SocketAddress))\n\t\t\ttime.Sleep(time.Second * dialRetryIntervalSeconds)\n\t\t\tcontinue RETRY_LOOP\n\t\t}\n\t\tpvsc.conn = conn\n\t\treturn nil\n\t}\n}\n\n\/\/ OnStop implements cmn.Service.\nfunc (pvsc *PrivValidatorSocketClient) OnStop() {\n\tpvsc.BaseService.OnStop()\n\n\tif pvsc.conn != nil {\n\t\tpvsc.conn.Close()\n\t}\n}\n\n\/\/ Address is an alias for PubKey().Address().\nfunc (pvsc *PrivValidatorSocketClient) Address() data.Bytes {\n\tpubKey := pvsc.PubKey()\n\treturn pubKey.Address()\n}\n\n\/\/ PubKey implements PrivValidator.\nfunc (pvsc *PrivValidatorSocketClient) PubKey() crypto.PubKey {\n\tres, err := readWrite(pvsc.conn, PubKeyMsg{})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn res.(PubKeyMsg).PubKey\n}\n\n\/\/ SignVote implements PrivValidator.\nfunc (pvsc *PrivValidatorSocketClient) SignVote(chainID string, vote *types.Vote) error {\n\tres, err := readWrite(pvsc.conn, SignVoteMsg{Vote: vote})\n\tif err != nil {\n\t\treturn err\n\t}\n\t*vote = *res.(SignVoteMsg).Vote\n\treturn nil\n}\n\n\/\/ SignProposal implements PrivValidator.\nfunc (pvsc *PrivValidatorSocketClient) SignProposal(chainID string, proposal *types.Proposal) error {\n\tres, err := readWrite(pvsc.conn, SignProposalMsg{Proposal: proposal})\n\tif err != nil {\n\t\treturn err\n\t}\n\t*proposal = *res.(SignProposalMsg).Proposal\n\treturn nil\n}\n\n\/\/ SignHeartbeat implements PrivValidator.\nfunc (pvsc *PrivValidatorSocketClient) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error {\n\tres, err := readWrite(pvsc.conn, SignHeartbeatMsg{Heartbeat: heartbeat})\n\tif err != nil {\n\t\treturn err\n\t}\n\t*heartbeat = *res.(SignHeartbeatMsg).Heartbeat\n\treturn nil\n}\n\n\/\/---------------------------------------------------------\n\n\/\/ PrivValidatorSocketServer implements PrivValidator.\n\/\/ It responds to requests over a socket\ntype PrivValidatorSocketServer struct {\n\tcmn.BaseService\n\n\tproto, addr string\n\tlistener net.Listener\n\n\tprivVal PrivValidator\n\tchainID string\n}\n\n\/\/ NewPrivValidatorSocketServer returns an instance of\n\/\/ PrivValidatorSocketServer.\nfunc NewPrivValidatorSocketServer(logger log.Logger, socketAddr, chainID string, privVal PrivValidator) *PrivValidatorSocketServer {\n\tproto, addr := cmn.ProtocolAndAddress(socketAddr)\n\tpvss := &PrivValidatorSocketServer{\n\t\tproto: proto,\n\t\taddr: addr,\n\t\tprivVal: privVal,\n\t\tchainID: chainID,\n\t}\n\tpvss.BaseService = *cmn.NewBaseService(logger, \"privValidatorSocketServer\", pvss)\n\treturn pvss\n}\n\n\/\/ OnStart implements cmn.Service.\nfunc (pvss *PrivValidatorSocketServer) OnStart() error {\n\tif err := pvss.BaseService.OnStart(); err != nil {\n\t\treturn err\n\t}\n\tln, err := net.Listen(pvss.proto, pvss.addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpvss.listener = ln\n\tgo pvss.acceptConnectionsRoutine()\n\treturn nil\n}\n\n\/\/ OnStop implements cmn.Service.\nfunc (pvss *PrivValidatorSocketServer) OnStop() {\n\tpvss.BaseService.OnStop()\n\n\tif pvss.listener == nil {\n\t\treturn\n\t}\n\n\tif err := pvss.listener.Close(); err != nil {\n\t\tpvss.Logger.Error(\"Error closing listener\", \"err\", err)\n\t}\n}\n\nfunc (pvss *PrivValidatorSocketServer) acceptConnectionsRoutine() {\n\tfor {\n\t\t\/\/ Accept a connection\n\t\tpvss.Logger.Info(\"Waiting for new connection...\")\n\n\t\tconn, err := pvss.listener.Accept()\n\t\tif err != nil {\n\t\t\tif !pvss.IsRunning() {\n\t\t\t\treturn \/\/ Ignore error from listener closing.\n\t\t\t}\n\t\t\tpvss.Logger.Error(\"Failed to accept connection: \" + err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tpvss.Logger.Info(\"Accepted a new connection\")\n\n\t\t\/\/ read\/write\n\t\tfor {\n\t\t\tif !pvss.IsRunning() {\n\t\t\t\treturn \/\/ Ignore error from listener closing.\n\t\t\t}\n\n\t\t\tvar n int\n\t\t\tvar err error\n\t\t\tb := wire.ReadByteSlice(conn, 0, &n, &err) \/\/XXX: no max\n\t\t\treq, err := decodeMsg(b)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tvar res PrivValidatorSocketMsg\n\t\t\tswitch r := req.(type) {\n\t\t\tcase PubKeyMsg:\n\t\t\t\tres = PubKeyMsg{pvss.privVal.PubKey()}\n\t\t\tcase SignVoteMsg:\n\t\t\t\tpvss.privVal.SignVote(pvss.chainID, r.Vote)\n\t\t\t\tres = SignVoteMsg{r.Vote}\n\t\t\tcase SignProposalMsg:\n\t\t\t\tpvss.privVal.SignProposal(pvss.chainID, r.Proposal)\n\t\t\t\tres = SignProposalMsg{r.Proposal}\n\t\t\tcase SignHeartbeatMsg:\n\t\t\t\tpvss.privVal.SignHeartbeat(pvss.chainID, r.Heartbeat)\n\t\t\t\tres = SignHeartbeatMsg{r.Heartbeat}\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"unknown msg: %v\", r))\n\t\t\t}\n\n\t\t\tb = wire.BinaryBytes(res)\n\t\t\t_, err = conn.Write(b)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/---------------------------------------------------------\n\nconst (\n\tmsgTypePubKey = byte(0x01)\n\tmsgTypeSignVote = byte(0x10)\n\tmsgTypeSignProposal = byte(0x11)\n\tmsgTypeSignHeartbeat = byte(0x12)\n)\n\n\/\/ PrivValidatorSocketMsg is a message sent between PrivValidatorSocket client\n\/\/ and server.\ntype PrivValidatorSocketMsg interface{}\n\nvar _ = wire.RegisterInterface(\n\tstruct{ PrivValidatorSocketMsg }{},\n\twire.ConcreteType{&PubKeyMsg{}, msgTypePubKey},\n\twire.ConcreteType{&SignVoteMsg{}, msgTypeSignVote},\n\twire.ConcreteType{&SignProposalMsg{}, msgTypeSignProposal},\n\twire.ConcreteType{&SignHeartbeatMsg{}, msgTypeSignHeartbeat},\n)\n\nfunc readWrite(conn net.Conn, req PrivValidatorSocketMsg) (res PrivValidatorSocketMsg, err error) {\n\tb := wire.BinaryBytes(req)\n\t_, err = conn.Write(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar n int\n\tb = wire.ReadByteSlice(conn, 0, &n, &err) \/\/XXX: no max\n\treturn decodeMsg(b)\n}\n\nfunc decodeMsg(bz []byte) (msg PrivValidatorSocketMsg, err error) {\n\tn := new(int)\n\tr := bytes.NewReader(bz)\n\tmsgI := wire.ReadBinary(struct{ PrivValidatorSocketMsg }{}, r, 0, n, &err)\n\tmsg = msgI.(struct{ PrivValidatorSocketMsg }).PrivValidatorSocketMsg\n\treturn msg, err\n}\n\n\/\/ PubKeyMsg is a PrivValidatorSocket message containing the public key.\ntype PubKeyMsg struct {\n\tPubKey crypto.PubKey\n}\n\n\/\/ SignVoteMsg is a PrivValidatorSocket message containing a vote.\ntype SignVoteMsg struct {\n\tVote *types.Vote\n}\n\n\/\/ SignProposalMsg is a PrivValidatorSocket message containing a Proposal.\ntype SignProposalMsg struct {\n\tProposal *types.Proposal\n}\n\n\/\/ SignHeartbeatMsg is a PrivValidatorSocket message containing a Heartbeat.\ntype SignHeartbeatMsg struct {\n\tHeartbeat *types.Heartbeat\n}\n<commit_msg>wip: check error of wire read<commit_after>package types\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\"\n\t\"time\"\n\n\tcrypto \"github.com\/tendermint\/go-crypto\"\n\twire \"github.com\/tendermint\/go-wire\"\n\t\"github.com\/tendermint\/go-wire\/data\"\n\tcmn \"github.com\/tendermint\/tmlibs\/common\"\n\t\"github.com\/tendermint\/tmlibs\/log\"\n\n\t\"github.com\/tendermint\/tendermint\/types\"\n)\n\n\/\/-----------------------------------------------------------------\n\nvar _ types.PrivValidator = (*PrivValidatorSocketClient)(nil)\n\n\/\/ PrivValidatorSocketClient implements PrivValidator.\n\/\/ It uses a socket to request signatures.\ntype PrivValidatorSocketClient struct {\n\tcmn.BaseService\n\n\tconn net.Conn\n\n\tID types.ValidatorID\n\tSocketAddress string\n}\n\nconst (\n\tdialRetryIntervalSeconds = 1\n)\n\n\/\/ NewPrivValidatorSocketClient returns an instance of\n\/\/ PrivValidatorSocketClient.\nfunc NewPrivValidatorSocketClient(logger log.Logger, socketAddr string) *PrivValidatorSocketClient {\n\tpvsc := &PrivValidatorSocketClient{\n\t\tSocketAddress: socketAddr,\n\t}\n\tpvsc.BaseService = *cmn.NewBaseService(logger, \"privValidatorSocketClient\", pvsc)\n\treturn pvsc\n}\n\n\/\/ OnStart implements cmn.Service.\nfunc (pvsc *PrivValidatorSocketClient) OnStart() error {\n\tif err := pvsc.BaseService.OnStart(); err != nil {\n\t\treturn err\n\t}\n\n\tvar err error\n\tvar conn net.Conn\nRETRY_LOOP:\n\tfor {\n\t\tconn, err = cmn.Connect(pvsc.SocketAddress)\n\t\tif err != nil {\n\t\t\tpvsc.Logger.Error(fmt.Sprintf(\"PrivValidatorSocket failed to connect to %v. Retrying...\", pvsc.SocketAddress))\n\t\t\ttime.Sleep(time.Second * dialRetryIntervalSeconds)\n\t\t\tcontinue RETRY_LOOP\n\t\t}\n\t\tpvsc.conn = conn\n\t\treturn nil\n\t}\n}\n\n\/\/ OnStop implements cmn.Service.\nfunc (pvsc *PrivValidatorSocketClient) OnStop() {\n\tpvsc.BaseService.OnStop()\n\n\tif pvsc.conn != nil {\n\t\tpvsc.conn.Close()\n\t}\n}\n\n\/\/ Address is an alias for PubKey().Address().\nfunc (pvsc *PrivValidatorSocketClient) Address() data.Bytes {\n\tpubKey := pvsc.PubKey()\n\treturn pubKey.Address()\n}\n\n\/\/ PubKey implements PrivValidator.\nfunc (pvsc *PrivValidatorSocketClient) PubKey() crypto.PubKey {\n\tres, err := readWrite(pvsc.conn, PubKeyMsg{})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn res.(PubKeyMsg).PubKey\n}\n\n\/\/ SignVote implements PrivValidator.\nfunc (pvsc *PrivValidatorSocketClient) SignVote(chainID string, vote *types.Vote) error {\n\tres, err := readWrite(pvsc.conn, SignVoteMsg{Vote: vote})\n\tif err != nil {\n\t\treturn err\n\t}\n\t*vote = *res.(SignVoteMsg).Vote\n\treturn nil\n}\n\n\/\/ SignProposal implements PrivValidator.\nfunc (pvsc *PrivValidatorSocketClient) SignProposal(chainID string, proposal *types.Proposal) error {\n\tres, err := readWrite(pvsc.conn, SignProposalMsg{Proposal: proposal})\n\tif err != nil {\n\t\treturn err\n\t}\n\t*proposal = *res.(SignProposalMsg).Proposal\n\treturn nil\n}\n\n\/\/ SignHeartbeat implements PrivValidator.\nfunc (pvsc *PrivValidatorSocketClient) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error {\n\tres, err := readWrite(pvsc.conn, SignHeartbeatMsg{Heartbeat: heartbeat})\n\tif err != nil {\n\t\treturn err\n\t}\n\t*heartbeat = *res.(SignHeartbeatMsg).Heartbeat\n\treturn nil\n}\n\n\/\/---------------------------------------------------------\n\n\/\/ PrivValidatorSocketServer implements PrivValidator.\n\/\/ It responds to requests over a socket\ntype PrivValidatorSocketServer struct {\n\tcmn.BaseService\n\n\tproto, addr string\n\tlistener net.Listener\n\n\tprivVal PrivValidator\n\tchainID string\n}\n\n\/\/ NewPrivValidatorSocketServer returns an instance of\n\/\/ PrivValidatorSocketServer.\nfunc NewPrivValidatorSocketServer(logger log.Logger, socketAddr, chainID string, privVal PrivValidator) *PrivValidatorSocketServer {\n\tproto, addr := cmn.ProtocolAndAddress(socketAddr)\n\tpvss := &PrivValidatorSocketServer{\n\t\tproto: proto,\n\t\taddr: addr,\n\t\tprivVal: privVal,\n\t\tchainID: chainID,\n\t}\n\tpvss.BaseService = *cmn.NewBaseService(logger, \"privValidatorSocketServer\", pvss)\n\treturn pvss\n}\n\n\/\/ OnStart implements cmn.Service.\nfunc (pvss *PrivValidatorSocketServer) OnStart() error {\n\tif err := pvss.BaseService.OnStart(); err != nil {\n\t\treturn err\n\t}\n\tln, err := net.Listen(pvss.proto, pvss.addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpvss.listener = ln\n\tgo pvss.acceptConnectionsRoutine()\n\treturn nil\n}\n\n\/\/ OnStop implements cmn.Service.\nfunc (pvss *PrivValidatorSocketServer) OnStop() {\n\tpvss.BaseService.OnStop()\n\n\tif pvss.listener == nil {\n\t\treturn\n\t}\n\n\tif err := pvss.listener.Close(); err != nil {\n\t\tpvss.Logger.Error(\"Error closing listener\", \"err\", err)\n\t}\n}\n\nfunc (pvss *PrivValidatorSocketServer) acceptConnectionsRoutine() {\n\tfor {\n\t\t\/\/ Accept a connection\n\t\tpvss.Logger.Info(\"Waiting for new connection...\")\n\n\t\tconn, err := pvss.listener.Accept()\n\t\tif err != nil {\n\t\t\tif !pvss.IsRunning() {\n\t\t\t\treturn \/\/ Ignore error from listener closing.\n\t\t\t}\n\t\t\tpvss.Logger.Error(\"Failed to accept connection: \" + err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tpvss.Logger.Info(\"Accepted a new connection\")\n\n\t\t\/\/ read\/write\n\t\tfor {\n\t\t\tif !pvss.IsRunning() {\n\t\t\t\treturn \/\/ Ignore error from listener closing.\n\t\t\t}\n\n\t\t\tvar n int\n\t\t\tvar err error\n\t\t\tb := wire.ReadByteSlice(conn, 0, &n, &err) \/\/XXX: no max\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\treq, err := decodeMsg(b)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tvar res PrivValidatorSocketMsg\n\t\t\tswitch r := req.(type) {\n\t\t\tcase PubKeyMsg:\n\t\t\t\tres = PubKeyMsg{pvss.privVal.PubKey()}\n\t\t\tcase SignVoteMsg:\n\t\t\t\tpvss.privVal.SignVote(pvss.chainID, r.Vote)\n\t\t\t\tres = SignVoteMsg{r.Vote}\n\t\t\tcase SignProposalMsg:\n\t\t\t\tpvss.privVal.SignProposal(pvss.chainID, r.Proposal)\n\t\t\t\tres = SignProposalMsg{r.Proposal}\n\t\t\tcase SignHeartbeatMsg:\n\t\t\t\tpvss.privVal.SignHeartbeat(pvss.chainID, r.Heartbeat)\n\t\t\t\tres = SignHeartbeatMsg{r.Heartbeat}\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"unknown msg: %v\", r))\n\t\t\t}\n\n\t\t\tb = wire.BinaryBytes(res)\n\t\t\t_, err = conn.Write(b)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/---------------------------------------------------------\n\nconst (\n\tmsgTypePubKey = byte(0x01)\n\tmsgTypeSignVote = byte(0x10)\n\tmsgTypeSignProposal = byte(0x11)\n\tmsgTypeSignHeartbeat = byte(0x12)\n)\n\n\/\/ PrivValidatorSocketMsg is a message sent between PrivValidatorSocket client\n\/\/ and server.\ntype PrivValidatorSocketMsg interface{}\n\nvar _ = wire.RegisterInterface(\n\tstruct{ PrivValidatorSocketMsg }{},\n\twire.ConcreteType{&PubKeyMsg{}, msgTypePubKey},\n\twire.ConcreteType{&SignVoteMsg{}, msgTypeSignVote},\n\twire.ConcreteType{&SignProposalMsg{}, msgTypeSignProposal},\n\twire.ConcreteType{&SignHeartbeatMsg{}, msgTypeSignHeartbeat},\n)\n\nfunc readWrite(conn net.Conn, req PrivValidatorSocketMsg) (res PrivValidatorSocketMsg, err error) {\n\tb := wire.BinaryBytes(req)\n\t_, err = conn.Write(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar n int\n\tb = wire.ReadByteSlice(conn, 0, &n, &err) \/\/XXX: no max\n\treturn decodeMsg(b)\n}\n\nfunc decodeMsg(bz []byte) (msg PrivValidatorSocketMsg, err error) {\n\tn := new(int)\n\tr := bytes.NewReader(bz)\n\tmsgI := wire.ReadBinary(struct{ PrivValidatorSocketMsg }{}, r, 0, n, &err)\n\tmsg = msgI.(struct{ PrivValidatorSocketMsg }).PrivValidatorSocketMsg\n\treturn msg, err\n}\n\n\/\/ PubKeyMsg is a PrivValidatorSocket message containing the public key.\ntype PubKeyMsg struct {\n\tPubKey crypto.PubKey\n}\n\n\/\/ SignVoteMsg is a PrivValidatorSocket message containing a vote.\ntype SignVoteMsg struct {\n\tVote *types.Vote\n}\n\n\/\/ SignProposalMsg is a PrivValidatorSocket message containing a Proposal.\ntype SignProposalMsg struct {\n\tProposal *types.Proposal\n}\n\n\/\/ SignHeartbeatMsg is a PrivValidatorSocket message containing a Heartbeat.\ntype SignHeartbeatMsg struct {\n\tHeartbeat *types.Heartbeat\n}\n<|endoftext|>"} {"text":"<commit_before>package dex\n\n\/\/ Very nearly all testing for dex is integration testing, sadly; this is inevitable since we're relying on exec to use git.\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n\t\"github.com\/coocood\/assrt\"\n)\n\nfunc do(fn func()) {\n\tretreat, err := os.Getwd()\n\tif err != nil { panic(err); }\n\n\tdefer os.Chdir(retreat)\n\n\tbasedir := os.Getenv(\"BASEDIR\")\n\tif len(basedir) != 0 {\n\t\terr = os.Chdir(basedir)\n\t\tif err != nil { panic(err); }\n\t}\n\n\terr = os.MkdirAll(\"target\/test\", 0755)\n\tif err != nil { panic(err); }\n\ttmpdir, err := ioutil.TempDir(\"target\/test\",\"\")\n\tif err != nil { panic(err); }\n\terr = os.Chdir(tmpdir)\n\tif err != nil { panic(err); }\n\n\tfn()\n}\n\nfunc TestNoGraphLoading(t *testing.T) {\n\tdo(func() {\n\t\tassert := assrt.NewAssert(t)\n\n\t\tassert.Equal(\n\t\t\tnil,\n\t\t\tnil,\n\t\t)\n\t})\n}\n<commit_msg>tests for basic graph initialization<commit_after>package dex\n\n\/\/ Very nearly all testing for dex is integration testing, sadly; this is inevitable since we're relying on exec to use git.\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n\t\"github.com\/coocood\/assrt\"\n)\n\nfunc do(fn func()) {\n\tretreat, err := os.Getwd()\n\tif err != nil { panic(err); }\n\n\tdefer os.Chdir(retreat)\n\n\tbasedir := os.Getenv(\"BASEDIR\")\n\tif len(basedir) != 0 {\n\t\terr = os.Chdir(basedir)\n\t\tif err != nil { panic(err); }\n\t}\n\n\terr = os.MkdirAll(\"target\/test\", 0755)\n\tif err != nil { panic(err); }\n\ttmpdir, err := ioutil.TempDir(\"target\/test\",\"\")\n\tif err != nil { panic(err); }\n\terr = os.Chdir(tmpdir)\n\tif err != nil { panic(err); }\n\n\tfn()\n}\n\nfunc TestLoadGraphAbsentIsNil(t *testing.T) {\n\tdo(func() {\n\t\tassert := assrt.NewAssert(t)\n\n\t\tassert.Nil(LoadGraph(\".\"))\n\n\t\tassert.Nil(LoadGraph(\"notadir\"))\n\t})\n}\n\nfunc TestNewGraphInit(t *testing.T) {\n\tdo(func() {\n\t\tassert := assrt.NewAssert(t)\n\n\t\tg := NewGraph(\".\")\n\n\t\tassert.NotNil(g)\n\n\t\tgstat, _ := os.Stat(\".git\")\n\t\tassert.True(gstat.IsDir())\n\n\t\tassert.True(g.HasBranch(\"docket\/init\"))\n\n\t\tassert.Equal(\n\t\t\t\"\",\n\t\t\tg.cmd(\"ls-tree\")(\"HEAD\").Output(),\n\t\t)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package gocd\n\nimport (\n\t\"context\"\n\t\"net\/url\"\n\t\"strings\"\n)\n\n\/\/ EnvironmentsService exposes calls for interacting with Environment objects in the GoCD API.\ntype EnvironmentsService service\n\n\/\/ EnvironmentsResponseLinks describes the HAL _link resource for the api response object for a collection of environment\n\/\/ objects\n\/\/go:generate gocd-response-links-generator -type=EnvironmentsResponseLinks,EnvironmentLinks\ntype EnvironmentsResponseLinks struct {\n\tSelf *url.URL `json:\"self\"`\n\tDoc *url.URL `json:\"doc\"`\n}\n\n\/\/ EnvironmentLinks describes the HAL _link resource for the api response object for a collection of environment objects.\ntype EnvironmentLinks struct {\n\tSelf *url.URL `json:\"self\"`\n\tDoc *url.URL `json:\"doc\"`\n\tFind *url.URL `json:\"find\"`\n}\n\n\/\/ EnvironmentsResponse describes the response obejct for a plugin API call.\ntype EnvironmentsResponse struct {\n\tLinks *EnvironmentsResponseLinks `json:\"_links\"`\n\tEmbedded struct {\n\t\tEnvironments []*Environment `json:\"environments\"`\n\t} `json:\"_embedded\"`\n}\n\n\/\/ Environment describes a group of pipelines and agents\ntype Environment struct {\n\tLinks *EnvironmentLinks `json:\"_links,omitempty\"`\n\tName string `json:\"name\"`\n\tPipelines []*Pipeline `json:\"pipelines,omitempty\"`\n\tAgents []*Agent `json:\"agents,omitempty\"`\n\tEnvironmentVariables []*EnvironmentVariable `json:\"environment_variables,omitempty\"`\n\tVersion string `json:\"version\"`\n}\n\n\/\/ EnvironmentPatchRequest describes the actions to perform on an environment\ntype EnvironmentPatchRequest struct {\n\tPipelines *PatchStringAction `json:\"pipelines\"`\n\tAgents *PatchStringAction `json:\"agents\"`\n\tEnvironmentVariables *EnvironmentVariablesAction `json:\"environment_variables\"`\n}\n\n\/\/ EnvironmentVariablesAction describes a collection of Environment Variables to add or remove.\ntype EnvironmentVariablesAction struct {\n\tAdd []*EnvironmentVariable `json:\"add\"`\n\tRemove []*EnvironmentVariable `json:\"remove\"`\n}\n\n\/\/ PatchStringAction describes a collection of resources to add or remove.\ntype PatchStringAction struct {\n\tAdd []string `json:\"add\"`\n\tRemove []string `json:\"remove\"`\n}\n\n\/\/ List all environments\nfunc (es *EnvironmentsService) List(ctx context.Context) (*EnvironmentsResponse, *APIResponse, error) {\n\te := EnvironmentsResponse{}\n\t_, resp, err := es.client.getAction(ctx, &APIClientRequest{\n\t\tPath: \"admin\/environments\",\n\t\tResponseBody: &e,\n\t\tAPIVersion: apiV2,\n\t})\n\n\treturn &e, resp, err\n}\n\n\/\/ Get a single environment by name\nfunc (es *EnvironmentsService) Get(ctx context.Context, name string) (*Environment, *APIResponse, error) {\n\te := Environment{}\n\t_, resp, err := es.client.getAction(ctx, &APIClientRequest{\n\t\tPath: \"admin\/environments\/\" + name,\n\t\tResponseBody: &e,\n\t\tAPIVersion: apiV2,\n\t})\n\tif err == nil {\n\t\te.Version = strings.Replace(resp.HTTP.Header.Get(\"Etag\"), \"\\\"\", \"\", -1)\n\t}\n\n\treturn &e, resp, err\n}\n\n\/\/ Delete an environment\nfunc (es *EnvironmentsService) Delete(ctx context.Context, name string) (string, *APIResponse, error) {\n\treturn es.client.deleteAction(ctx, name, apiV2)\n}\n\n\/\/ Create an environment\nfunc (es *EnvironmentsService) Create(ctx context.Context, name string) (*Environment, *APIResponse, error) {\n\te := Environment{}\n\t_, resp, err := es.client.postAction(ctx, &APIClientRequest{\n\t\tPath: \"admin\/environments\/\" + name,\n\t\tRequestBody: Environment{\n\t\t\tName: name,\n\t\t},\n\t\tResponseBody: &e,\n\t\tAPIVersion: apiV2,\n\t})\n\tif err == nil {\n\t\te.Version = strings.Replace(resp.HTTP.Header.Get(\"Etag\"), \"\\\"\", \"\", -1)\n\t}\n\treturn &e, resp, err\n}\n\n\/\/ Patch an environments configuration by adding or removing pipelines, agents, environment variables\nfunc (es *EnvironmentsService) Patch(ctx context.Context, name string, patch *EnvironmentPatchRequest) (*Environment, *APIResponse, error) {\n\tenv := Environment{}\n\t_, resp, err := es.client.patchAction(ctx, &APIClientRequest{\n\t\tPath: \"admin\/environments\" + name,\n\t\tRequestBody: patch,\n\t\tResponseBody: &env,\n\t\tAPIVersion: apiV2,\n\t})\n\tif err == nil {\n\t\tenv.Version = strings.Replace(resp.HTTP.Header.Get(\"Etag\"), \"\\\"\", \"\", -1)\n\t}\n\n\treturn &env, resp, err\n}\n<commit_msg>Fixed formatting<commit_after>package gocd\n\nimport (\n\t\"context\"\n\t\"net\/url\"\n\t\"strings\"\n)\n\n\/\/ EnvironmentsService exposes calls for interacting with Environment objects in the GoCD API.\ntype EnvironmentsService service\n\n\/\/ EnvironmentsResponseLinks describes the HAL _link resource for the api response object for a collection of environment\n\/\/ objects\n\/\/go:generate gocd-response-links-generator -type=EnvironmentsResponseLinks,EnvironmentLinks\ntype EnvironmentsResponseLinks struct {\n\tSelf *url.URL `json:\"self\"`\n\tDoc *url.URL `json:\"doc\"`\n}\n\n\/\/ EnvironmentLinks describes the HAL _link resource for the api response object for a collection of environment objects.\ntype EnvironmentLinks struct {\n\tSelf *url.URL `json:\"self\"`\n\tDoc *url.URL `json:\"doc\"`\n\tFind *url.URL `json:\"find\"`\n}\n\n\/\/ EnvironmentsResponse describes the response obejct for a plugin API call.\ntype EnvironmentsResponse struct {\n\tLinks *EnvironmentsResponseLinks `json:\"_links\"`\n\tEmbedded struct {\n\t\tEnvironments []*Environment `json:\"environments\"`\n\t} `json:\"_embedded\"`\n}\n\n\/\/ Environment describes a group of pipelines and agents\ntype Environment struct {\n\tLinks *EnvironmentLinks `json:\"_links,omitempty\"`\n\tName string `json:\"name\"`\n\tPipelines []*Pipeline `json:\"pipelines,omitempty\"`\n\tAgents []*Agent `json:\"agents,omitempty\"`\n\tEnvironmentVariables []*EnvironmentVariable `json:\"environment_variables,omitempty\"`\n\tVersion string `json:\"version\"`\n}\n\n\/\/ EnvironmentPatchRequest describes the actions to perform on an environment\ntype EnvironmentPatchRequest struct {\n\tPipelines *PatchStringAction `json:\"pipelines\"`\n\tAgents *PatchStringAction `json:\"agents\"`\n\tEnvironmentVariables *EnvironmentVariablesAction `json:\"environment_variables\"`\n}\n\n\/\/ EnvironmentVariablesAction describes a collection of Environment Variables to add or remove.\ntype EnvironmentVariablesAction struct {\n\tAdd []*EnvironmentVariable `json:\"add\"`\n\tRemove []*EnvironmentVariable `json:\"remove\"`\n}\n\n\/\/ PatchStringAction describes a collection of resources to add or remove.\ntype PatchStringAction struct {\n\tAdd []string `json:\"add\"`\n\tRemove []string `json:\"remove\"`\n}\n\n\/\/ List all environments\nfunc (es *EnvironmentsService) List(ctx context.Context) (*EnvironmentsResponse, *APIResponse, error) {\n\te := EnvironmentsResponse{}\n\t_, resp, err := es.client.getAction(ctx, &APIClientRequest{\n\t\tPath: \"admin\/environments\",\n\t\tResponseBody: &e,\n\t\tAPIVersion: apiV2,\n\t})\n\n\treturn &e, resp, err\n}\n\n\/\/ Get a single environment by name\nfunc (es *EnvironmentsService) Get(ctx context.Context, name string) (*Environment, *APIResponse, error) {\n\te := Environment{}\n\t_, resp, err := es.client.getAction(ctx, &APIClientRequest{\n\t\tPath: \"admin\/environments\/\" + name,\n\t\tResponseBody: &e,\n\t\tAPIVersion: apiV2,\n\t})\n\tif err == nil {\n\t\te.Version = strings.Replace(resp.HTTP.Header.Get(\"Etag\"), \"\\\"\", \"\", -1)\n\t}\n\n\treturn &e, resp, err\n}\n\n\/\/ Delete an environment\nfunc (es *EnvironmentsService) Delete(ctx context.Context, name string) (string, *APIResponse, error) {\n\treturn es.client.deleteAction(ctx, name, apiV2)\n}\n\n\/\/ Create an environment\nfunc (es *EnvironmentsService) Create(ctx context.Context, name string) (*Environment, *APIResponse, error) {\n\te := Environment{}\n\t_, resp, err := es.client.postAction(ctx, &APIClientRequest{\n\t\tPath: \"admin\/environments\/\" + name,\n\t\tRequestBody: Environment{\n\t\t\tName: name,\n\t\t},\n\t\tResponseBody: &e,\n\t\tAPIVersion: apiV2,\n\t})\n\tif err == nil {\n\t\te.Version = strings.Replace(resp.HTTP.Header.Get(\"Etag\"), \"\\\"\", \"\", -1)\n\t}\n\treturn &e, resp, err\n}\n\n\/\/ Patch an environments configuration by adding or removing pipelines, agents, environment variables\nfunc (es *EnvironmentsService) Patch(ctx context.Context, name string, patch *EnvironmentPatchRequest) (*Environment, *APIResponse, error) {\n\tenv := Environment{}\n\t_, resp, err := es.client.patchAction(ctx, &APIClientRequest{\n\t\tPath: \"admin\/environments\" + name,\n\t\tRequestBody: patch,\n\t\tResponseBody: &env,\n\t\tAPIVersion: apiV2,\n\t})\n\tif err == nil {\n\t\tenv.Version = strings.Replace(resp.HTTP.Header.Get(\"Etag\"), \"\\\"\", \"\", -1)\n\t}\n\n\treturn &env, resp, err\n}\n<|endoftext|>"} {"text":"<commit_before>package gonzo_test\n\nimport (\n\t\"net\"\n\t\"testing\"\n\n\tgc \"gopkg.in\/check.v1\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n\t\/\/\"gopkg.in\/mgo.v2\/bson\"\n\n\t\"github.com\/cmars\/gonzodb\/gonzo\"\n)\n\nfunc Test(t *testing.T) {\n\tgc.TestingT(t)\n}\n\ntype gonzoSuite struct {\n\tserver *gonzo.Server\n\tsession *mgo.Session\n}\n\nvar _ = gc.Suite(&gonzoSuite{})\n\nfunc (s *gonzoSuite) SetUpTest(c *gc.C) {\n\taddr := &net.TCPAddr{IP: net.ParseIP(\"127.0.0.1\"), Port: 0}\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tc.Assert(err, gc.IsNil)\n\taddr = l.Addr().(*net.TCPAddr)\n\n\ts.server = gonzo.NewServer(l)\n\ts.server.Start()\n\n\ts.session, err = mgo.Dial(addr.String())\n\tc.Assert(err, gc.IsNil)\n}\n\nfunc (s *gonzoSuite) TearDownTest(c *gc.C) {\n\ts.session.Close()\n\ts.server.Stop()\n}\n\nfunc (s *gonzoSuite) TestInsertQuerySingle(c *gc.C) {\n\terr := s.session.DB(\"db1\").C(\"c1\").Insert(bson.D{{\"foo\", 1}, {\"bar\", 2}})\n\tc.Assert(err, gc.IsNil)\n\n\tvar result []bson.M\n\terr = s.session.DB(\"db1\").C(\"c1\").Find(nil).All(&result)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(result, gc.HasLen, 1)\n\tc.Assert(result[0][\"foo\"], gc.DeepEquals, 1)\n\tc.Assert(result[0][\"bar\"], gc.DeepEquals, 2)\n\t_, ok := result[0][\"_id\"].(bson.ObjectId)\n\tc.Assert(ok, gc.Equals, true)\n}\n\nvar queryMatchTestCases = []bson.D{\n\tbson.D{{\"artist\", \"ed hall\"}, {\"label\", \"trance syndicate\"}, {\"venue\", \"liberty lunch\"}},\n\tbson.D{{\"artist\", \"cherubs\"}, {\"label\", \"trance syndicate\"}, {\"venue\", \"cavity club\"}},\n\tbson.D{{\"artist\", \"the jesus lizard\"}, {\"label\", \"touch & go\"}, {\"venue\", \"emo's\"}},\n}\n\nfunc (s *gonzoSuite) TestInsertQueryMatch(c *gc.C) {\n\tfor _, testCase := range queryMatchTestCases {\n\t\terr := s.session.DB(\"db1\").C(\"c1\").Insert(testCase)\n\t\tc.Assert(err, gc.IsNil)\n\t}\n\n\tvar err error\n\tvar result []bson.M\n\terr = s.session.DB(\"db1\").C(\"c1\").Find(bson.M{\"artist\": \"ed hall\"}).All(&result)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(result, gc.HasLen, 1)\n\n\terr = s.session.DB(\"db1\").C(\"c1\").Find(bson.M{\"label\": \"trance syndicate\"}).All(&result)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(result, gc.HasLen, 2)\n\tfor i, m := range result {\n\t\tc.Assert(m[\"label\"], gc.Equals, \"trance syndicate\")\n\t\tif i > 0 {\n\t\t\tc.Assert(m[\"artist\"], gc.Not(gc.DeepEquals), result[i-1][\"artist\"])\n\t\t\tc.Assert(m[\"venue\"], gc.Not(gc.DeepEquals), result[i-1][\"venue\"])\n\t\t\tc.Assert(m[\"_id\"], gc.Not(gc.DeepEquals), result[i-1][\"_id\"])\n\t\t}\n\t}\n\n\terr = s.session.DB(\"db1\").C(\"c1\").Find(nil).All(&result)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(result, gc.HasLen, 3)\n}\n\nfunc (s *gonzoSuite) TestQueryIter(c *gc.C) {\n\tcount := 1000\n\tfor i := 0; i < count; i++ {\n\t\terr := s.session.DB(\"db1\").C(\"c1\").Insert(bson.M{\"i\": i})\n\t\tc.Assert(err, gc.IsNil)\n\t}\n\n\ti := s.session.DB(\"db1\").C(\"c1\").Find(nil).Iter()\n\tvar m bson.M\n\tn := 0\n\tfor i.Next(&m) {\n\t\tc.Assert(m, gc.HasLen, 2)\n\t\t_, ok := m[\"_id\"]\n\t\tc.Assert(ok, gc.Equals, true)\n\t\tn++\n\t}\n\tc.Assert(n, gc.Equals, count)\n\tc.Assert(i.Err(), gc.IsNil)\n\tc.Assert(i.Close(), gc.IsNil)\n}\n\nfunc (s *gonzoSuite) TestGridFSrt(c *gc.C) {\n\tgfs := s.session.DB(\"whatfs\").GridFS(\"whatroot\")\n\n\tfooFile, err := gfs.Create(\"foo\")\n\tc.Assert(err, gc.IsNil)\n\t_, err = fooFile.Write([]byte(\"this file contains foo\"))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(fooFile.Close(), gc.IsNil)\n\n\tfooFile, err = gfs.Open(\"foo\")\n\tc.Assert(err, gc.IsNil)\n\tbuf := make([]byte, 200)\n\tn, err := fooFile.Read(buf)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(string(buf[:n]), gc.Equals, \"this file contains foo\")\n\n\t_, err = gfs.Open(\"bar\")\n\tc.Assert(err, gc.ErrorMatches, \"not found\")\n}\n<commit_msg>Remove commented out import.<commit_after>package gonzo_test\n\nimport (\n\t\"net\"\n\t\"testing\"\n\n\tgc \"gopkg.in\/check.v1\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n\n\t\"github.com\/cmars\/gonzodb\/gonzo\"\n)\n\nfunc Test(t *testing.T) {\n\tgc.TestingT(t)\n}\n\ntype gonzoSuite struct {\n\tserver *gonzo.Server\n\tsession *mgo.Session\n}\n\nvar _ = gc.Suite(&gonzoSuite{})\n\nfunc (s *gonzoSuite) SetUpTest(c *gc.C) {\n\taddr := &net.TCPAddr{IP: net.ParseIP(\"127.0.0.1\"), Port: 0}\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tc.Assert(err, gc.IsNil)\n\taddr = l.Addr().(*net.TCPAddr)\n\n\ts.server = gonzo.NewServer(l)\n\ts.server.Start()\n\n\ts.session, err = mgo.Dial(addr.String())\n\tc.Assert(err, gc.IsNil)\n}\n\nfunc (s *gonzoSuite) TearDownTest(c *gc.C) {\n\ts.session.Close()\n\ts.server.Stop()\n}\n\nfunc (s *gonzoSuite) TestInsertQuerySingle(c *gc.C) {\n\terr := s.session.DB(\"db1\").C(\"c1\").Insert(bson.D{{\"foo\", 1}, {\"bar\", 2}})\n\tc.Assert(err, gc.IsNil)\n\n\tvar result []bson.M\n\terr = s.session.DB(\"db1\").C(\"c1\").Find(nil).All(&result)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(result, gc.HasLen, 1)\n\tc.Assert(result[0][\"foo\"], gc.DeepEquals, 1)\n\tc.Assert(result[0][\"bar\"], gc.DeepEquals, 2)\n\t_, ok := result[0][\"_id\"].(bson.ObjectId)\n\tc.Assert(ok, gc.Equals, true)\n}\n\nvar queryMatchTestCases = []bson.D{\n\tbson.D{{\"artist\", \"ed hall\"}, {\"label\", \"trance syndicate\"}, {\"venue\", \"liberty lunch\"}},\n\tbson.D{{\"artist\", \"cherubs\"}, {\"label\", \"trance syndicate\"}, {\"venue\", \"cavity club\"}},\n\tbson.D{{\"artist\", \"the jesus lizard\"}, {\"label\", \"touch & go\"}, {\"venue\", \"emo's\"}},\n}\n\nfunc (s *gonzoSuite) TestInsertQueryMatch(c *gc.C) {\n\tfor _, testCase := range queryMatchTestCases {\n\t\terr := s.session.DB(\"db1\").C(\"c1\").Insert(testCase)\n\t\tc.Assert(err, gc.IsNil)\n\t}\n\n\tvar err error\n\tvar result []bson.M\n\terr = s.session.DB(\"db1\").C(\"c1\").Find(bson.M{\"artist\": \"ed hall\"}).All(&result)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(result, gc.HasLen, 1)\n\n\terr = s.session.DB(\"db1\").C(\"c1\").Find(bson.M{\"label\": \"trance syndicate\"}).All(&result)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(result, gc.HasLen, 2)\n\tfor i, m := range result {\n\t\tc.Assert(m[\"label\"], gc.Equals, \"trance syndicate\")\n\t\tif i > 0 {\n\t\t\tc.Assert(m[\"artist\"], gc.Not(gc.DeepEquals), result[i-1][\"artist\"])\n\t\t\tc.Assert(m[\"venue\"], gc.Not(gc.DeepEquals), result[i-1][\"venue\"])\n\t\t\tc.Assert(m[\"_id\"], gc.Not(gc.DeepEquals), result[i-1][\"_id\"])\n\t\t}\n\t}\n\n\terr = s.session.DB(\"db1\").C(\"c1\").Find(nil).All(&result)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(result, gc.HasLen, 3)\n}\n\nfunc (s *gonzoSuite) TestQueryIter(c *gc.C) {\n\tcount := 1000\n\tfor i := 0; i < count; i++ {\n\t\terr := s.session.DB(\"db1\").C(\"c1\").Insert(bson.M{\"i\": i})\n\t\tc.Assert(err, gc.IsNil)\n\t}\n\n\ti := s.session.DB(\"db1\").C(\"c1\").Find(nil).Iter()\n\tvar m bson.M\n\tn := 0\n\tfor i.Next(&m) {\n\t\tc.Assert(m, gc.HasLen, 2)\n\t\t_, ok := m[\"_id\"]\n\t\tc.Assert(ok, gc.Equals, true)\n\t\tn++\n\t}\n\tc.Assert(n, gc.Equals, count)\n\tc.Assert(i.Err(), gc.IsNil)\n\tc.Assert(i.Close(), gc.IsNil)\n}\n\nfunc (s *gonzoSuite) TestGridFSrt(c *gc.C) {\n\tgfs := s.session.DB(\"whatfs\").GridFS(\"whatroot\")\n\n\tfooFile, err := gfs.Create(\"foo\")\n\tc.Assert(err, gc.IsNil)\n\t_, err = fooFile.Write([]byte(\"this file contains foo\"))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(fooFile.Close(), gc.IsNil)\n\n\tfooFile, err = gfs.Open(\"foo\")\n\tc.Assert(err, gc.IsNil)\n\tbuf := make([]byte, 200)\n\tn, err := fooFile.Read(buf)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(string(buf[:n]), gc.Equals, \"this file contains foo\")\n\n\t_, err = gfs.Open(\"bar\")\n\tc.Assert(err, gc.ErrorMatches, \"not found\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright ©2014 The gonum Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage topo\n\nimport (\n\t\"github.com\/gonum\/graph\"\n\t\"github.com\/gonum\/graph\/traverse\"\n)\n\n\/\/ IsPath returns true for a connected path within a graph.\n\/\/\n\/\/ IsPath returns true if, starting at path[0] and ending at path[len(path)-1], all nodes between\n\/\/ are valid neighbors. That is, for each element path[i], path[i+1] is a valid successor.\n\/\/\n\/\/ As special cases, IsPath returns true for a nil or zero length path, and for a path of length 1\n\/\/ (only one node) but only if the node listed in path exists within the graph.\n\/\/\n\/\/ Graph must be non-nil.\nfunc IsPathIn(g graph.Graph, path []graph.Node) bool {\n\tvar canReach func(u, v graph.Node) bool\n\tswitch g := g.(type) {\n\tcase graph.Directed:\n\t\tcanReach = g.HasEdgeFromTo\n\tdefault:\n\t\tcanReach = g.HasEdge\n\t}\n\n\tif path == nil || len(path) == 0 {\n\t\treturn true\n\t} else if len(path) == 1 {\n\t\treturn g.Has(path[0])\n\t}\n\n\tfor i := 0; i < len(path)-1; i++ {\n\t\tif !canReach(path[i], path[i+1]) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\n\/\/ ConnectedComponents returns the connected components of the graph g. All\n\/\/ edges are treated as undirected.\nfunc ConnectedComponents(g graph.Undirected) [][]graph.Node {\n\tvar (\n\t\tw traverse.DepthFirst\n\t\tc []graph.Node\n\t\tcc [][]graph.Node\n\t)\n\tduring := func(n graph.Node) {\n\t\tc = append(c, n)\n\t}\n\tafter := func() {\n\t\tcc = append(cc, []graph.Node(nil))\n\t\tcc[len(cc)-1] = append(cc[len(cc)-1], c...)\n\t\tc = c[:0]\n\t}\n\tw.WalkAll(g, nil, after, during)\n\n\treturn cc\n}\n<commit_msg>topo: fix IsPathIn doc comments<commit_after>\/\/ Copyright ©2014 The gonum Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage topo\n\nimport (\n\t\"github.com\/gonum\/graph\"\n\t\"github.com\/gonum\/graph\/traverse\"\n)\n\n\/\/ IsPathIn returns whether path is a path in g.\n\/\/\n\/\/ As special cases, IsPathIn returns true for a zero length path or for\n\/\/ a path of length 1 when the node in path exists in the graph.\nfunc IsPathIn(g graph.Graph, path []graph.Node) bool {\n\tvar canReach func(u, v graph.Node) bool\n\tswitch g := g.(type) {\n\tcase graph.Directed:\n\t\tcanReach = g.HasEdgeFromTo\n\tdefault:\n\t\tcanReach = g.HasEdge\n\t}\n\n\tif path == nil || len(path) == 0 {\n\t\treturn true\n\t} else if len(path) == 1 {\n\t\treturn g.Has(path[0])\n\t}\n\n\tfor i := 0; i < len(path)-1; i++ {\n\t\tif !canReach(path[i], path[i+1]) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\n\/\/ ConnectedComponents returns the connected components of the graph g. All\n\/\/ edges are treated as undirected.\nfunc ConnectedComponents(g graph.Undirected) [][]graph.Node {\n\tvar (\n\t\tw traverse.DepthFirst\n\t\tc []graph.Node\n\t\tcc [][]graph.Node\n\t)\n\tduring := func(n graph.Node) {\n\t\tc = append(c, n)\n\t}\n\tafter := func() {\n\t\tcc = append(cc, []graph.Node(nil))\n\t\tcc[len(cc)-1] = append(cc[len(cc)-1], c...)\n\t\tc = c[:0]\n\t}\n\tw.WalkAll(g, nil, after, during)\n\n\treturn cc\n}\n<|endoftext|>"} {"text":"<commit_before>package app\n\nimport (\n\t\"bytes\"\n\t\"compress\/flate\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/paulmach\/go.geojson\"\n\t\"gospatial\/utils\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ DB application Database\nvar (\n\tDB Database\n\tCOMMIT_LOG_FILE string = \"commit.log\"\n)\n\n\/\/ LayerCache keeps track of Database's loaded geojson layers\ntype LayerCache struct {\n\tGeojson *geojson.FeatureCollection\n\tTime time.Time\n}\n\n\/\/ Database strust for application.\ntype Database struct {\n\tFile string\n\tCache map[string]*LayerCache\n\tApikeys map[string]Customer\n\tguard sync.RWMutex\n\tcommit_log_queue chan string\n}\n\n\/\/ Connect to bolt database. Returns open database connection.\n\/\/ @returns *bolt.DB\nfunc (self *Database) connect() *bolt.DB {\n\tconn, err := bolt.Open(self.File, 0644, nil)\n\tif err != nil {\n\t\tconn.Close()\n\t\t\/\/log.Fatal(err)\n\t\tpanic(err)\n\t}\n\treturn conn\n}\n\n\/\/ Init creates bolt database if existing one not found.\n\/\/ Creates layers and apikey tables. Starts database caching for layers\n\/\/ @returns Error\nfunc (self *Database) Init() error {\n\t\/\/ Start db caching\n\tm := make(map[string]*LayerCache)\n\tself.Cache = m\n\tgo self.cacheManager()\n\tgo self.startCommitLog()\n\t\/\/ connect to db\n\tconn := self.connect()\n\tdefer conn.Close()\n\t\/\/ datasources\n\terr := self.CreateTable(conn, \"layers\")\n\tif err != nil {\n\t\tpanic(err)\n\t\treturn err\n\t}\n\t\/\/ Add table for datasource owner\n\t\/\/ permissions\n\terr = self.CreateTable(conn, \"apikeys\")\n\tif err != nil {\n\t\tpanic(err)\n\t\treturn err\n\t}\n\t\/\/ create apikey\/customer cache\n\tself.Apikeys = make(map[string]Customer)\n\t\/\/ close and return err\n\treturn err\n}\n\n\/\/ Starts Database commit log\nfunc (self *Database) startCommitLog() {\n\tself.commit_log_queue = make(chan string, 10000)\n\t\/\/ open files r and w\n\tCOMMIT_LOG, err := os.OpenFile(COMMIT_LOG_FILE, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tdefer COMMIT_LOG.Close()\n\tfor {\n\t\tif len(self.commit_log_queue) > 0 {\n\t\t\tline := <-self.commit_log_queue\n\t\t\tif _, err := COMMIT_LOG.WriteString(line + \"\\n\"); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t} else {\n\t\t\ttime.Sleep(1000 * time.Millisecond)\n\t\t}\n\t}\n}\n\nfunc (self *Database) CreateTable(conn *bolt.DB, table string) error {\n\terr := conn.Update(func(tx *bolt.Tx) error {\n\t\t_, err := tx.CreateBucketIfNotExists([]byte(table))\n\t\treturn err\n\t})\n\treturn err\n}\n\n\/\/ InsertCustomer inserts customer into apikeys table\n\/\/ @param customer {Customer}\n\/\/ @returns Error\nfunc (self *Database) InsertCustomer(customer Customer) error {\n\tself.Apikeys[customer.Apikey] = customer\n\tvalue, err := json.Marshal(customer)\n\tif err != nil {\n\t\treturn err\n\t}\n\tself.commit_log_queue <- `{\"method\": \"insert_apikey\", \"data\":` + string(value) + `}`\n\t\/\/ Insert customer into database\n\terr = self.Insert(\"apikeys\", customer.Apikey, value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn err\n}\n\n\/\/ GetCustomer returns customer from database\n\/\/ @param apikey {string}\n\/\/ @returns Customer\n\/\/ @returns Error\nfunc (self *Database) GetCustomer(apikey string) (Customer, error) {\n\t\/\/ Check apikey cache\n\tif _, ok := self.Apikeys[apikey]; ok {\n\t\treturn self.Apikeys[apikey], nil\n\t}\n\t\/\/ If customer not found get from database\n\tval, err := self.Select(\"apikeys\", apikey)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t\/\/ datasource not found\n\tif val == nil {\n\t\treturn Customer{}, fmt.Errorf(\"Apikey not found\")\n\t}\n\t\/\/ Read to struct\n\tcustomer := Customer{}\n\terr = json.Unmarshal(val, &customer)\n\tif err != nil {\n\t\treturn Customer{}, err\n\t}\n\t\/\/ Put apikey into cache\n\tself.Apikeys[apikey] = customer\n\t\/\/ Close database connection\n\treturn customer, nil\n}\n\n\/\/ NewLayer creates new datasource layer\n\/\/ @returns string - datasource id\n\/\/ @returns Error\nfunc (self *Database) NewLayer() (string, error) {\n\t\/\/ create geojson\n\tdatasource, _ := utils.NewUUID()\n\tgeojs := geojson.NewFeatureCollection()\n\t\/\/ convert to bytes\n\tvalue, err := geojs.MarshalJSON()\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\tself.commit_log_queue <- `{\"method\": \"new_layer\", \"data\": { \"datasource\": \"` + datasource + `\", \"layer\": ` + string(value) + `}}`\n\t\/\/ Insert layer into database\n\terr = self.Insert(\"layers\", datasource, value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn datasource, err\n}\n\n\/\/ InsertLayer inserts layer into database\n\/\/ @param datasource {string}\n\/\/ @param geojs {Geojson}\n\/\/ @returns Error\nfunc (self *Database) InsertLayer(datasource string, geojs *geojson.FeatureCollection) error {\n\t\/\/ Caching layer\n\tif v, ok := self.Cache[datasource]; ok {\n\t\tself.guard.Lock()\n\t\tv.Geojson = geojs\n\t\tv.Time = time.Now()\n\t\tself.guard.Unlock()\n\t} else {\n\t\tpgc := &LayerCache{Geojson: geojs, Time: time.Now()}\n\t\tself.Cache[datasource] = pgc\n\t}\n\t\/\/ convert to bytes\n\tvalue, err := geojs.MarshalJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = self.Insert(\"layers\", datasource, value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn err\n}\n\n\/\/ GetLayer returns layer from database\n\/\/ @param datasource {string}\n\/\/ @returns Geojson\n\/\/ @returns Error\nfunc (self *Database) GetLayer(datasource string) (*geojson.FeatureCollection, error) {\n\t\/\/ Caching layer\n\tif v, ok := self.Cache[datasource]; ok {\n\t\tself.guard.RLock()\n\t\tv.Time = time.Now()\n\t\tself.guard.RUnlock()\n\t\treturn v.Geojson, nil\n\t}\n\t\/\/ If page not found get from database\n\tval, err := self.Select(\"layers\", datasource)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Read to struct\n\tgeojs, err := geojson.UnmarshalFeatureCollection(val)\n\tif err != nil {\n\t\treturn geojs, err\n\t}\n\t\/\/ Store page in memory cache\n\tpgc := &LayerCache{Geojson: geojs, Time: time.Now()}\n\tself.Cache[datasource] = pgc\n\treturn geojs, nil\n}\n\n\/\/ DeleteLayer deletes layer from database\n\/\/ @param datasource {string}\n\/\/ @returns Error\nfunc (self *Database) DeleteLayer(datasource string) error {\n\tconn := self.connect()\n\tdefer conn.Close()\n\tkey := []byte(datasource)\n\tself.commit_log_queue <- `{\"method\": \"delete_layer\", \"data\": { \"datasource\": \"` + datasource + `\"}}`\n\terr := conn.Update(func(tx *bolt.Tx) error {\n\t\t\/\/bucket, err := tx.CreateBucketIfNotExists([]byte(\"layers\"))\n\t\t\/\/if err != nil {\n\t\t\/\/\treturn err\n\t\t\/\/}\n\t\t\/\/err = bucket.Delete(key)\n\t\tbucket := tx.Bucket([]byte(\"layers\"))\n\t\terr := bucket.Delete(key)\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tself.guard.Lock()\n\tdelete(self.Cache, datasource)\n\tself.guard.Unlock()\n\treturn err\n}\n\nfunc (self *Database) Insert(table string, key string, value []byte) error {\n\tconn := self.connect()\n\tdefer conn.Close()\n\terr := conn.Update(func(tx *bolt.Tx) error {\n\t\t\/\/bucket, err := tx.CreateBucketIfNotExists([]byte(table))\n\t\t\/\/if err != nil {\n\t\t\/\/\treturn err\n\t\t\/\/}\n\t\t\/\/err = bucket.Put([]byte(key), self.compressByte(value))\n\t\tbucket := tx.Bucket([]byte(table))\n\t\terr := bucket.Put([]byte(key), self.compressByte(value))\n\t\treturn err\n\t})\n\treturn err\n}\n\nfunc (self *Database) Select(table string, key string) ([]byte, error) {\n\tconn := self.connect()\n\tdefer conn.Close()\n\tval := []byte{}\n\terr := conn.View(func(tx *bolt.Tx) error {\n\t\tbucket := tx.Bucket([]byte(table))\n\t\tif bucket == nil {\n\t\t\treturn fmt.Errorf(\"Bucket %q not found!\", bucket)\n\t\t}\n\t\tval = self.decompressByte(bucket.Get([]byte(key)))\n\t\treturn nil\n\t})\n\treturn val, err\n}\n\nfunc (self *Database) SelectAll(table string) ([]string, error) {\n\tconn := self.connect()\n\tdefer conn.Close()\n\tdata := []string{}\n\terr := conn.View(func(tx *bolt.Tx) error {\n\t\tbucket := tx.Bucket([]byte(table))\n\t\tif bucket == nil {\n\t\t\treturn fmt.Errorf(\"Bucket %q not found!\", bucket)\n\t\t}\n\t\tbucket.ForEach(func(key, _ []byte) error {\n\t\t\tdata = append(data, string(key))\n\t\t\treturn nil\n\t\t})\n\t\treturn nil\n\t})\n\treturn data, err\n}\n\n\/\/ InsertFeature adds feature to layer. Updates layer in Database\n\/\/ @param datasource {string}\n\/\/ @param feat {Geojson Feature}\n\/\/ @returns Error\nfunc (self *Database) InsertFeature(datasource string, feat *geojson.Feature) error {\n\t\/\/ Get layer from database\n\tfeatCollection, err := self.GetLayer(datasource)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Add new feature to layer\n\tvalue, err := feat.MarshalJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\tself.commit_log_queue <- `{\"method\": \"insert_feature\", \"data\": { \"datasource\": \"` + datasource + `\", \"feature\": ` + string(value) + `}}`\n\tfeatCollection.AddFeature(feat)\n\terr = self.InsertLayer(datasource, featCollection)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn err\n}\n\n\/\/ cacheManager for Database. Stores layers in memory.\n\/\/\t\tUnloads layers older than 90 sec\n\/\/\t\tWhen empty --> 60 sec timer\n\/\/\t\tWhen items in cache --> 15 sec timer\nfunc (self *Database) cacheManager() {\n\tfor {\n\t\tn := float64(len(self.Cache))\n\t\tif n != 0 {\n\t\t\tfor key := range self.Cache {\n\t\t\t\t\/\/ CHECK AVAILABLE SYSTEM MEMORY\n\t\t\t\tf := float64(len(self.Cache[key].Geojson.Features))\n\t\t\t\tlimit := (300.0 - (f * (f * 0.25))) - (n * 2.0)\n\t\t\t\tif limit < 0.0 {\n\t\t\t\t\tlimit = 10.0\n\t\t\t\t}\n\t\t\t\tif time.Since(self.Cache[key].Time).Seconds() > limit {\n\t\t\t\t\tself.guard.Lock()\n\t\t\t\t\tdelete(self.Cache, key)\n\t\t\t\t\tself.guard.Unlock()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(10000 * time.Millisecond)\n\t}\n}\n\n\/\/ Methods: Compression\n\/\/ Source: https:\/\/github.com\/schollz\/gofind\/blob\/master\/utils.go#L146-L169\n\/\/ https:\/\/github.com\/schollz\/gofind\/blob\/master\/fingerprint.go#L43-L54\n\/\/ Description:\n\/\/\t\tCompress and Decompress bytes\nfunc (self *Database) compressByte(src []byte) []byte {\n\tcompressedData := new(bytes.Buffer)\n\tself.compress(src, compressedData, 9)\n\treturn compressedData.Bytes()\n}\n\nfunc (self *Database) decompressByte(src []byte) []byte {\n\tcompressedData := bytes.NewBuffer(src)\n\tdeCompressedData := new(bytes.Buffer)\n\tself.decompress(compressedData, deCompressedData)\n\treturn deCompressedData.Bytes()\n}\n\nfunc (self *Database) compress(src []byte, dest io.Writer, level int) {\n\tcompressor, _ := flate.NewWriter(dest, level)\n\tcompressor.Write(src)\n\tcompressor.Close()\n}\n\nfunc (self *Database) decompress(src io.Reader, dest io.Writer) {\n\tdecompressor := flate.NewReader(src)\n\tio.Copy(dest, decompressor)\n\tdecompressor.Close()\n}\n<commit_msg>database crash errors<commit_after>package app\n\nimport (\n\t\"bytes\"\n\t\"compress\/flate\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/paulmach\/go.geojson\"\n\t\"gospatial\/utils\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ DB application Database\nvar (\n\tDB Database\n\tCOMMIT_LOG_FILE string = \"commit.log\"\n)\n\n\/\/ LayerCache keeps track of Database's loaded geojson layers\ntype LayerCache struct {\n\tGeojson *geojson.FeatureCollection\n\tTime time.Time\n}\n\n\/\/ Database strust for application.\ntype Database struct {\n\tFile string\n\tCache map[string]*LayerCache\n\tApikeys map[string]Customer\n\tguard sync.RWMutex\n\tcommit_log_queue chan string\n}\n\n\/\/ Create to bolt database. Returns open database connection.\n\/\/ @returns *bolt.DB\nfunc (self *Database) createDb() *bolt.DB {\n\tconn, err := bolt.Open(self.File, 0644, nil)\n\tif err != nil {\n\t\tconn.Close()\n\t\tpanic(err)\n\t}\n\treturn conn\n}\n\n\/\/ Connect to bolt database. Returns open database connection.\n\/\/ @returns *bolt.DB\nfunc (self *Database) connect() *bolt.DB {\n\t\/\/ Check if file exists\n\t_, err := os.Stat(self.File)\n\tif err != nil {\n\t\tpanic(\"Database not found!\")\n\t}\n\t\/\/ Open database connection\n\tconn, err := bolt.Open(self.File, 0644, nil)\n\tif err != nil {\n\t\tconn.Close()\n\t\t\/\/log.Fatal(err)\n\t\tpanic(err)\n\t}\n\treturn conn\n}\n\n\/\/ Init creates bolt database if existing one not found.\n\/\/ Creates layers and apikey tables. Starts database caching for layers\n\/\/ @returns Error\nfunc (self *Database) Init() error {\n\t\/\/ Start db caching\n\tm := make(map[string]*LayerCache)\n\tself.Cache = m\n\tgo self.cacheManager()\n\tgo self.startCommitLog()\n\t\/\/ connect to db\n\t\/\/conn := self.connect()\n\tconn := self.createDb()\n\tdefer conn.Close()\n\t\/\/ datasources\n\terr := self.CreateTable(conn, \"layers\")\n\tif err != nil {\n\t\tpanic(err)\n\t\treturn err\n\t}\n\t\/\/ Add table for datasource owner\n\t\/\/ permissions\n\terr = self.CreateTable(conn, \"apikeys\")\n\tif err != nil {\n\t\tpanic(err)\n\t\treturn err\n\t}\n\t\/\/ create apikey\/customer cache\n\tself.Apikeys = make(map[string]Customer)\n\t\/\/ close and return err\n\treturn err\n}\n\n\/\/ Starts Database commit log\nfunc (self *Database) startCommitLog() {\n\tself.commit_log_queue = make(chan string, 10000)\n\t\/\/ open files r and w\n\tCOMMIT_LOG, err := os.OpenFile(COMMIT_LOG_FILE, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tdefer COMMIT_LOG.Close()\n\tfor {\n\t\tif len(self.commit_log_queue) > 0 {\n\t\t\tline := <-self.commit_log_queue\n\t\t\tif _, err := COMMIT_LOG.WriteString(line + \"\\n\"); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t} else {\n\t\t\ttime.Sleep(1000 * time.Millisecond)\n\t\t}\n\t}\n}\n\nfunc (self *Database) CreateTable(conn *bolt.DB, table string) error {\n\terr := conn.Update(func(tx *bolt.Tx) error {\n\t\t_, err := tx.CreateBucketIfNotExists([]byte(table))\n\t\treturn err\n\t})\n\treturn err\n}\n\n\/\/ InsertCustomer inserts customer into apikeys table\n\/\/ @param customer {Customer}\n\/\/ @returns Error\nfunc (self *Database) InsertCustomer(customer Customer) error {\n\tself.Apikeys[customer.Apikey] = customer\n\tvalue, err := json.Marshal(customer)\n\tif err != nil {\n\t\treturn err\n\t}\n\tself.commit_log_queue <- `{\"method\": \"insert_apikey\", \"data\":` + string(value) + `}`\n\t\/\/ Insert customer into database\n\terr = self.Insert(\"apikeys\", customer.Apikey, value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn err\n}\n\n\/\/ GetCustomer returns customer from database\n\/\/ @param apikey {string}\n\/\/ @returns Customer\n\/\/ @returns Error\nfunc (self *Database) GetCustomer(apikey string) (Customer, error) {\n\t\/\/ Check apikey cache\n\tif _, ok := self.Apikeys[apikey]; ok {\n\t\treturn self.Apikeys[apikey], nil\n\t}\n\t\/\/ If customer not found get from database\n\tval, err := self.Select(\"apikeys\", apikey)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t\/\/ datasource not found\n\tif val == nil {\n\t\treturn Customer{}, fmt.Errorf(\"Apikey not found\")\n\t}\n\t\/\/ Read to struct\n\tcustomer := Customer{}\n\terr = json.Unmarshal(val, &customer)\n\tif err != nil {\n\t\treturn Customer{}, err\n\t}\n\t\/\/ Put apikey into cache\n\tself.Apikeys[apikey] = customer\n\t\/\/ Close database connection\n\treturn customer, nil\n}\n\n\/\/ NewLayer creates new datasource layer\n\/\/ @returns string - datasource id\n\/\/ @returns Error\nfunc (self *Database) NewLayer() (string, error) {\n\t\/\/ create geojson\n\tdatasource, _ := utils.NewUUID()\n\tgeojs := geojson.NewFeatureCollection()\n\t\/\/ convert to bytes\n\tvalue, err := geojs.MarshalJSON()\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\tself.commit_log_queue <- `{\"method\": \"new_layer\", \"data\": { \"datasource\": \"` + datasource + `\", \"layer\": ` + string(value) + `}}`\n\t\/\/ Insert layer into database\n\terr = self.Insert(\"layers\", datasource, value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn datasource, err\n}\n\n\/\/ InsertLayer inserts layer into database\n\/\/ @param datasource {string}\n\/\/ @param geojs {Geojson}\n\/\/ @returns Error\nfunc (self *Database) InsertLayer(datasource string, geojs *geojson.FeatureCollection) error {\n\t\/\/ Update caching layer\n\tif v, ok := self.Cache[datasource]; ok {\n\t\tself.guard.Lock()\n\t\tv.Geojson = geojs\n\t\tv.Time = time.Now()\n\t\tself.guard.Unlock()\n\t} else {\n\t\tpgc := &LayerCache{Geojson: geojs, Time: time.Now()}\n\t\tself.Cache[datasource] = pgc\n\t}\n\t\/\/ convert to bytes\n\tvalue, err := geojs.MarshalJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = self.Insert(\"layers\", datasource, value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn err\n}\n\n\/\/ GetLayer returns layer from database\n\/\/ @param datasource {string}\n\/\/ @returns Geojson\n\/\/ @returns Error\nfunc (self *Database) GetLayer(datasource string) (*geojson.FeatureCollection, error) {\n\t\/\/ Caching layer\n\tif v, ok := self.Cache[datasource]; ok {\n\t\tself.guard.RLock()\n\t\tv.Time = time.Now()\n\t\tself.guard.RUnlock()\n\t\treturn v.Geojson, nil\n\t}\n\t\/\/ If page not found get from database\n\tval, err := self.Select(\"layers\", datasource)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Read to struct\n\tgeojs, err := geojson.UnmarshalFeatureCollection(val)\n\tif err != nil {\n\t\treturn geojs, err\n\t}\n\t\/\/ Store page in memory cache\n\tpgc := &LayerCache{Geojson: geojs, Time: time.Now()}\n\tself.Cache[datasource] = pgc\n\treturn geojs, nil\n}\n\n\/\/ DeleteLayer deletes layer from database\n\/\/ @param datasource {string}\n\/\/ @returns Error\nfunc (self *Database) DeleteLayer(datasource string) error {\n\tconn := self.connect()\n\tdefer conn.Close()\n\tkey := []byte(datasource)\n\tself.commit_log_queue <- `{\"method\": \"delete_layer\", \"data\": { \"datasource\": \"` + datasource + `\"}}`\n\terr := conn.Update(func(tx *bolt.Tx) error {\n\t\t\/\/bucket, err := tx.CreateBucketIfNotExists([]byte(\"layers\"))\n\t\t\/\/if err != nil {\n\t\t\/\/\treturn err\n\t\t\/\/}\n\t\t\/\/err = bucket.Delete(key)\n\t\tbucket := tx.Bucket([]byte(\"layers\"))\n\t\tif bucket == nil {\n\t\t\treturn fmt.Errorf(\"Bucket layers not found!\")\n\t\t}\n\t\terr := bucket.Delete(key)\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tself.guard.Lock()\n\tdelete(self.Cache, datasource)\n\tself.guard.Unlock()\n\treturn err\n}\n\nfunc (self *Database) Insert(table string, key string, value []byte) error {\n\tconn := self.connect()\n\tdefer conn.Close()\n\terr := conn.Update(func(tx *bolt.Tx) error {\n\t\t\/\/bucket, err := tx.CreateBucketIfNotExists([]byte(table))\n\t\t\/\/if err != nil {\n\t\t\/\/\treturn err\n\t\t\/\/}\n\t\t\/\/err = bucket.Put([]byte(key), self.compressByte(value))\n\t\tbucket := tx.Bucket([]byte(table))\n\t\tif bucket == nil {\n\t\t\treturn fmt.Errorf(\"Bucket %q not found!\", table)\n\t\t}\n\t\terr := bucket.Put([]byte(key), self.compressByte(value))\n\t\treturn err\n\t})\n\treturn err\n}\n\nfunc (self *Database) Select(table string, key string) ([]byte, error) {\n\tconn := self.connect()\n\tdefer conn.Close()\n\tval := []byte{}\n\terr := conn.View(func(tx *bolt.Tx) error {\n\t\tbucket := tx.Bucket([]byte(table))\n\t\tif bucket == nil {\n\t\t\treturn fmt.Errorf(\"Bucket %q not found!\", table)\n\t\t}\n\t\tval = self.decompressByte(bucket.Get([]byte(key)))\n\t\treturn nil\n\t})\n\treturn val, err\n}\n\nfunc (self *Database) SelectAll(table string) ([]string, error) {\n\tconn := self.connect()\n\tdefer conn.Close()\n\tdata := []string{}\n\terr := conn.View(func(tx *bolt.Tx) error {\n\t\tbucket := tx.Bucket([]byte(table))\n\t\tif bucket == nil {\n\t\t\treturn fmt.Errorf(\"Bucket %q not found!\", table)\n\t\t}\n\t\tbucket.ForEach(func(key, _ []byte) error {\n\t\t\tdata = append(data, string(key))\n\t\t\treturn nil\n\t\t})\n\t\treturn nil\n\t})\n\treturn data, err\n}\n\n\/\/ InsertFeature adds feature to layer. Updates layer in Database\n\/\/ @param datasource {string}\n\/\/ @param feat {Geojson Feature}\n\/\/ @returns Error\nfunc (self *Database) InsertFeature(datasource string, feat *geojson.Feature) error {\n\t\/\/ Get layer from database\n\tfeatCollection, err := self.GetLayer(datasource)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Add new feature to layer\n\tvalue, err := feat.MarshalJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\tself.commit_log_queue <- `{\"method\": \"insert_feature\", \"data\": { \"datasource\": \"` + datasource + `\", \"feature\": ` + string(value) + `}}`\n\tfeatCollection.AddFeature(feat)\n\t\/\/ insert layer\n\terr = self.InsertLayer(datasource, featCollection)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn err\n}\n\n\/\/ cacheManager for Database. Stores layers in memory.\n\/\/\t\tUnloads layers older than 90 sec\n\/\/\t\tWhen empty --> 60 sec timer\n\/\/\t\tWhen items in cache --> 15 sec timer\nfunc (self *Database) cacheManager() {\n\tfor {\n\t\tn := float64(len(self.Cache))\n\t\tif n != 0 {\n\t\t\tfor key := range self.Cache {\n\t\t\t\t\/\/ CHECK AVAILABLE SYSTEM MEMORY\n\t\t\t\tf := float64(len(self.Cache[key].Geojson.Features))\n\t\t\t\tlimit := (300.0 - (f * (f * 0.25))) - (n * 2.0)\n\t\t\t\tif limit < 0.0 {\n\t\t\t\t\tlimit = 10.0\n\t\t\t\t}\n\t\t\t\tif time.Since(self.Cache[key].Time).Seconds() > limit {\n\t\t\t\t\tself.guard.Lock()\n\t\t\t\t\tdelete(self.Cache, key)\n\t\t\t\t\tself.guard.Unlock()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(10000 * time.Millisecond)\n\t}\n}\n\n\/\/ Methods: Compression\n\/\/ Source: https:\/\/github.com\/schollz\/gofind\/blob\/master\/utils.go#L146-L169\n\/\/ https:\/\/github.com\/schollz\/gofind\/blob\/master\/fingerprint.go#L43-L54\n\/\/ Description:\n\/\/\t\tCompress and Decompress bytes\nfunc (self *Database) compressByte(src []byte) []byte {\n\tcompressedData := new(bytes.Buffer)\n\tself.compress(src, compressedData, 9)\n\treturn compressedData.Bytes()\n}\n\nfunc (self *Database) decompressByte(src []byte) []byte {\n\tcompressedData := bytes.NewBuffer(src)\n\tdeCompressedData := new(bytes.Buffer)\n\tself.decompress(compressedData, deCompressedData)\n\treturn deCompressedData.Bytes()\n}\n\nfunc (self *Database) compress(src []byte, dest io.Writer, level int) {\n\tcompressor, _ := flate.NewWriter(dest, level)\n\tcompressor.Write(src)\n\tcompressor.Close()\n}\n\nfunc (self *Database) decompress(src io.Reader, dest io.Writer) {\n\tdecompressor := flate.NewReader(src)\n\tio.Copy(dest, decompressor)\n\tdecompressor.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package gqt_test\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\n\t\"code.cloudfoundry.org\/garden\"\n\t\"code.cloudfoundry.org\/guardian\/gqt\/runner\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n)\n\nvar _ = Describe(\"Surviving Restarts\", func() {\n\tvar (\n\t\targs []string\n\t\tclient *runner.RunningGarden\n\t)\n\n\tBeforeEach(func() {\n\t\targs = []string{}\n\t})\n\n\tJustBeforeEach(func() {\n\t\tclient = startGarden(args...)\n\t})\n\n\tAfterEach(func() {\n\t\tExpect(client.DestroyAndStop()).To(Succeed())\n\t})\n\n\tconst (\n\t\tsubnetName string = \"177-100-10-0\"\n\t)\n\n\tContext(\"when a container is created and then garden is restarted\", func() {\n\t\tvar (\n\t\t\tcontainer garden.Container\n\t\t\thostNetInPort uint32\n\t\t\texternalIP string\n\t\t\tinterfacePrefix string\n\t\t\tpropertiesDir string\n\t\t\texistingProc garden.Process\n\t\t\tcontainerSpec garden.ContainerSpec\n\t\t\trestartArgs []string\n\t\t\tgracefulShutdown bool\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tvar err error\n\t\t\tpropertiesDir, err = ioutil.TempDir(\"\", \"props\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\targs = append(args, \"--properties-path\", path.Join(propertiesDir, \"props.json\"))\n\n\t\t\tcontainerSpec = garden.ContainerSpec{\n\t\t\t\tNetwork: \"177.100.10.30\/30\",\n\t\t\t}\n\n\t\t\trestartArgs = []string{}\n\t\t\tgracefulShutdown = true\n\t\t})\n\n\t\tJustBeforeEach(func() {\n\t\t\tvar err error\n\t\t\tcontainer, err = client.Create(containerSpec)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\thostNetInPort, _, err = container.NetIn(hostNetInPort, 8080)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tcontainer.NetOut(garden.NetOutRule{\n\t\t\t\tNetworks: []garden.IPRange{\n\t\t\t\t\tgarden.IPRangeFromIP(net.ParseIP(\"8.8.8.8\")),\n\t\t\t\t},\n\t\t\t})\n\n\t\t\tinfo, err := container.Info()\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\texternalIP = info.ExternalIP\n\t\t\tinterfacePrefix = info.Properties[\"kawasaki.iptable-prefix\"]\n\n\t\t\tout := gbytes.NewBuffer()\n\t\t\texistingProc, err = container.Run(\n\t\t\t\tgarden.ProcessSpec{\n\t\t\t\t\tPath: \"\/bin\/sh\",\n\t\t\t\t\tArgs: []string{\"-c\", \"while true; do echo hello; sleep 1; done;\"},\n\t\t\t\t},\n\t\t\t\tgarden.ProcessIO{\n\t\t\t\t\tStdout: io.MultiWriter(GinkgoWriter, out),\n\t\t\t\t\tStderr: io.MultiWriter(GinkgoWriter, out),\n\t\t\t\t})\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tif gracefulShutdown {\n\t\t\t\tExpect(client.Stop()).To(Succeed())\n\t\t\t} else {\n\t\t\t\tExpect(client.Kill()).To(MatchError(\"exit status 137\"))\n\t\t\t}\n\n\t\t\tif len(restartArgs) == 0 {\n\t\t\t\trestartArgs = args\n\t\t\t}\n\t\t\tclient = startGarden(restartArgs...)\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tExpect(os.RemoveAll(propertiesDir)).To(Succeed())\n\t\t})\n\n\t\tContext(\"when the destroy-containers-on-startup flag is passed\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\targs = append(args, \"--destroy-containers-on-startup\")\n\t\t\t})\n\n\t\t\tIt(\"destroys the remaining containers in the depotDir\", func() {\n\t\t\t\tExpect(ioutil.ReadDir(client.DepotDir)).To(BeEmpty())\n\t\t\t})\n\n\t\t\tIt(\"destroys the remaining containers' iptables\", func() {\n\t\t\t\tout, err := exec.Command(\"iptables\", \"-w\", \"-S\", \"-t\", \"filter\").CombinedOutput()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\tExpect(string(out)).NotTo(MatchRegexp(fmt.Sprintf(\"w-%d-instance.* 177.100.10.0\/30\", GinkgoParallelNode())))\n\t\t\t})\n\n\t\t\tIt(\"destroys the remaining containers' bridges\", func() {\n\t\t\t\tout, err := exec.Command(\"ifconfig\").CombinedOutput()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\tpattern := fmt.Sprintf(\".*w%d%s.*\", GinkgoParallelNode(), subnetName)\n\t\t\t\tExpect(string(out)).NotTo(MatchRegexp(pattern))\n\t\t\t})\n\n\t\t\tIt(\"kills the container processes\", func() {\n\t\t\t\tprocesses, err := exec.Command(\"ps\", \"aux\").CombinedOutput()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\tExpect(string(processes)).NotTo(ContainSubstring(fmt.Sprintf(\"run runc \/tmp\/test-garden-%d\/containers\/%s\", GinkgoParallelNode(), container.Handle())))\n\t\t\t})\n\n\t\t\tContext(\"when the garden server does not shut down gracefully\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tgracefulShutdown = false\n\t\t\t\t})\n\n\t\t\t\tIt(\"destroys orphaned containers' iptables filter rules\", func() {\n\t\t\t\t\tout, err := exec.Command(\"iptables\", \"-w\", \"-S\", \"-t\", \"filter\").CombinedOutput()\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tExpect(string(out)).NotTo(MatchRegexp(fmt.Sprintf(\"%sinstance.*\", interfacePrefix)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"destroys orphaned containers' iptables nat rules\", func() {\n\t\t\t\t\tout, err := exec.Command(\"iptables\", \"-w\", \"-S\", \"-t\", \"nat\").CombinedOutput()\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tExpect(string(out)).NotTo(MatchRegexp(fmt.Sprintf(\"%sinstance.*\", interfacePrefix)))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when a container is created after restart\", func() {\n\t\t\t\tIt(\"can be created with the same network reservation\", func() {\n\t\t\t\t\t_, err := client.Create(containerSpec)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the destroy-containers-on-startup flag is not passed\", func() {\n\t\t\tDescribe(\"on th pre-existing VM\", func() {\n\t\t\t\tIt(\"does not destroy the depot\", func() {\n\t\t\t\t\tExpect(filepath.Join(client.DepotDir, container.Handle())).To(BeADirectory())\n\t\t\t\t})\n\n\t\t\t\tIt(\"can still run processes\", func() {\n\t\t\t\t\tout := gbytes.NewBuffer()\n\t\t\t\t\tproc, err := container.Run(\n\t\t\t\t\t\tgarden.ProcessSpec{\n\t\t\t\t\t\t\tPath: \"\/bin\/sh\",\n\t\t\t\t\t\t\tArgs: []string{\"-c\", \"echo hello; exit 12\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tgarden.ProcessIO{\n\t\t\t\t\t\t\tStdout: io.MultiWriter(GinkgoWriter, out),\n\t\t\t\t\t\t\tStderr: io.MultiWriter(GinkgoWriter, out),\n\t\t\t\t\t\t})\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\texitCode, err := proc.Wait()\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\tExpect(exitCode).To(Equal(12))\n\t\t\t\t\tExpect(out).To(gbytes.Say(\"hello\"))\n\t\t\t\t})\n\n\t\t\t\tIt(\"can reattach to processes that are still running\", func() {\n\t\t\t\t\tout := gbytes.NewBuffer()\n\t\t\t\t\tprocId := existingProc.ID()\n\t\t\t\t\tprocess, err := container.Attach(procId, garden.ProcessIO{\n\t\t\t\t\t\tStdout: io.MultiWriter(GinkgoWriter, out),\n\t\t\t\t\t\tStderr: io.MultiWriter(GinkgoWriter, out),\n\t\t\t\t\t})\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tEventually(out).Should(gbytes.Say(\"hello\"))\n\n\t\t\t\t\tExpect(process.Signal(garden.SignalKill)).To(Succeed())\n\n\t\t\t\t\texited := make(chan struct{})\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tprocess.Wait()\n\t\t\t\t\t\tclose(exited)\n\t\t\t\t\t}()\n\n\t\t\t\t\tEventually(exited).Should(BeClosed())\n\t\t\t\t})\n\n\t\t\t\tIt(\"can still destroy the container\", func() {\n\t\t\t\t\tExpect(client.Destroy(container.Handle())).To(Succeed())\n\t\t\t\t})\n\n\t\t\t\tIt(\"can still be able to access the internet\", func() {\n\t\t\t\t\tExpect(checkConnection(container, \"8.8.8.8\", 53)).To(Succeed())\n\t\t\t\t})\n\n\t\t\t\tIt(\"can still be accessible from the outside\", func() {\n\t\t\t\t\tExpect(listenInContainer(container, 8080)).To(Succeed())\n\n\t\t\t\t\tinfo, err := container.Info()\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\texternalIP := info.ExternalIP\n\n\t\t\t\t\t\/\/ retry because listener process inside other container\n\t\t\t\t\t\/\/ may not start immediately\n\t\t\t\t\tEventually(func() int {\n\t\t\t\t\t\tsession := sendRequest(externalIP, hostNetInPort)\n\t\t\t\t\t\treturn session.Wait().ExitCode()\n\t\t\t\t\t}).Should(Equal(0))\n\t\t\t\t})\n\n\t\t\t\tContext(\"when the server denies all the networks\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\targs = append(args, \"--deny-network\", \"0.0.0.0\/0\")\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"still can't access disallowed IPs\", func() {\n\t\t\t\t\t\tExpect(checkConnection(container, \"8.8.4.4\", 53)).NotTo(Succeed())\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"can still be able to access the allowed IPs\", func() {\n\t\t\t\t\t\tExpect(checkConnection(container, \"8.8.8.8\", 53)).To(Succeed())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when the server is restarted without deny networks applied\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\trestartArgs = args[:]\n\t\t\t\t\t\targs = append(args, \"--deny-network\", \"0.0.0.0\/0\")\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"is able to access the internet\", func() {\n\t\t\t\t\t\tExpect(checkConnection(container, \"8.8.8.8\", 53)).To(Succeed())\n\t\t\t\t\t\tExpect(checkConnection(container, \"8.8.4.4\", 53)).To(Succeed())\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when creating a container after restart\", func() {\n\t\t\t\tIt(\"should not allocate ports used before restart\", func() {\n\t\t\t\t\tsecondContainer, err := client.Create(garden.ContainerSpec{})\n\t\t\t\t\tsecondContainerHostPort, _, err := secondContainer.NetIn(0, 8080)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tExpect(hostNetInPort).NotTo(Equal(secondContainerHostPort))\n\t\t\t\t})\n\n\t\t\t\tContext(\"with a subnet used before restart\", func() {\n\t\t\t\t\tIt(\"will not allocate an IP\", func() {\n\t\t\t\t\t\t_, err := client.Create(containerSpec)\n\t\t\t\t\t\tExpect(err).To(MatchError(\"the requested IP is already allocated\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"with an IP used before restart\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tcontainerSpec = garden.ContainerSpec{\n\t\t\t\t\t\t\t\/\/ Specifying a CIDR of < 30 will make garden give us exactly 177.100.10.5\n\t\t\t\t\t\t\tNetwork: \"177.100.10.5\/29\",\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"should not allocate the IP\", func() {\n\t\t\t\t\t\t_, err := client.Create(containerSpec)\n\t\t\t\t\t\tExpect(err).To(MatchError(\"the requested IP is already allocated\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"with no network specified\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tcontainerSpec = garden.ContainerSpec{}\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"successfully creates another container with no network specified\", func() {\n\t\t\t\t\t\t_, err := client.Create(containerSpec)\n\t\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<commit_msg>Make test less fragile<commit_after>package gqt_test\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\n\t\"code.cloudfoundry.org\/garden\"\n\t\"code.cloudfoundry.org\/guardian\/gqt\/runner\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n)\n\nvar _ = Describe(\"Surviving Restarts\", func() {\n\tvar (\n\t\targs []string\n\t\tclient *runner.RunningGarden\n\t)\n\n\tBeforeEach(func() {\n\t\targs = []string{}\n\t})\n\n\tJustBeforeEach(func() {\n\t\tclient = startGarden(args...)\n\t})\n\n\tAfterEach(func() {\n\t\tExpect(client.DestroyAndStop()).To(Succeed())\n\t})\n\n\tconst (\n\t\tsubnetName string = \"177-100-10-0\"\n\t)\n\n\tContext(\"when a container is created and then garden is restarted\", func() {\n\t\tvar (\n\t\t\tcontainer garden.Container\n\t\t\thostNetInPort uint32\n\t\t\texternalIP string\n\t\t\tinterfacePrefix string\n\t\t\tpropertiesDir string\n\t\t\texistingProc garden.Process\n\t\t\tcontainerSpec garden.ContainerSpec\n\t\t\trestartArgs []string\n\t\t\tgracefulShutdown bool\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tvar err error\n\t\t\tpropertiesDir, err = ioutil.TempDir(\"\", \"props\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\targs = append(args, \"--properties-path\", path.Join(propertiesDir, \"props.json\"))\n\n\t\t\tcontainerSpec = garden.ContainerSpec{\n\t\t\t\tNetwork: \"177.100.10.30\/30\",\n\t\t\t}\n\n\t\t\trestartArgs = []string{}\n\t\t\tgracefulShutdown = true\n\t\t})\n\n\t\tJustBeforeEach(func() {\n\t\t\tvar err error\n\t\t\tcontainer, err = client.Create(containerSpec)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\thostNetInPort, _, err = container.NetIn(hostNetInPort, 8080)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tcontainer.NetOut(garden.NetOutRule{\n\t\t\t\tNetworks: []garden.IPRange{\n\t\t\t\t\tgarden.IPRangeFromIP(net.ParseIP(\"8.8.8.8\")),\n\t\t\t\t},\n\t\t\t})\n\n\t\t\tinfo, err := container.Info()\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\texternalIP = info.ExternalIP\n\t\t\tinterfacePrefix = info.Properties[\"kawasaki.iptable-prefix\"]\n\n\t\t\tout := gbytes.NewBuffer()\n\t\t\texistingProc, err = container.Run(\n\t\t\t\tgarden.ProcessSpec{\n\t\t\t\t\tPath: \"\/bin\/sh\",\n\t\t\t\t\tArgs: []string{\"-c\", \"while true; do echo hello; sleep 1; done;\"},\n\t\t\t\t},\n\t\t\t\tgarden.ProcessIO{\n\t\t\t\t\tStdout: io.MultiWriter(GinkgoWriter, out),\n\t\t\t\t\tStderr: io.MultiWriter(GinkgoWriter, out),\n\t\t\t\t})\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tif gracefulShutdown {\n\t\t\t\tExpect(client.Stop()).To(Succeed())\n\t\t\t} else {\n\t\t\t\tExpect(client.Kill()).To(MatchError(\"exit status 137\"))\n\t\t\t}\n\n\t\t\tif len(restartArgs) == 0 {\n\t\t\t\trestartArgs = args\n\t\t\t}\n\t\t\tclient = startGarden(restartArgs...)\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tExpect(os.RemoveAll(propertiesDir)).To(Succeed())\n\t\t})\n\n\t\tContext(\"when the destroy-containers-on-startup flag is passed\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\targs = append(args, \"--destroy-containers-on-startup\")\n\t\t\t})\n\n\t\t\tIt(\"destroys the remaining containers in the depotDir\", func() {\n\t\t\t\tExpect(ioutil.ReadDir(client.DepotDir)).To(BeEmpty())\n\t\t\t})\n\n\t\t\tIt(\"destroys the remaining containers' iptables\", func() {\n\t\t\t\tout, err := exec.Command(\"iptables\", \"-w\", \"-S\", \"-t\", \"filter\").CombinedOutput()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\tExpect(string(out)).NotTo(MatchRegexp(fmt.Sprintf(\"%sinstance.*\", interfacePrefix)))\n\t\t\t})\n\n\t\t\tIt(\"destroys the remaining containers' bridges\", func() {\n\t\t\t\tout, err := exec.Command(\"ifconfig\").CombinedOutput()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\tpattern := fmt.Sprintf(\".*w%d%s.*\", GinkgoParallelNode(), subnetName)\n\t\t\t\tExpect(string(out)).NotTo(MatchRegexp(pattern))\n\t\t\t})\n\n\t\t\tIt(\"kills the container processes\", func() {\n\t\t\t\tprocesses, err := exec.Command(\"ps\", \"aux\").CombinedOutput()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\tExpect(string(processes)).NotTo(ContainSubstring(fmt.Sprintf(\"run runc \/tmp\/test-garden-%d\/containers\/%s\", GinkgoParallelNode(), container.Handle())))\n\t\t\t})\n\n\t\t\tContext(\"when the garden server does not shut down gracefully\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tgracefulShutdown = false\n\t\t\t\t})\n\n\t\t\t\tIt(\"destroys orphaned containers' iptables filter rules\", func() {\n\t\t\t\t\tout, err := exec.Command(\"iptables\", \"-w\", \"-S\", \"-t\", \"filter\").CombinedOutput()\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tExpect(string(out)).NotTo(MatchRegexp(fmt.Sprintf(\"%sinstance.*\", interfacePrefix)))\n\t\t\t\t})\n\n\t\t\t\tIt(\"destroys orphaned containers' iptables nat rules\", func() {\n\t\t\t\t\tout, err := exec.Command(\"iptables\", \"-w\", \"-S\", \"-t\", \"nat\").CombinedOutput()\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tExpect(string(out)).NotTo(MatchRegexp(fmt.Sprintf(\"%sinstance.*\", interfacePrefix)))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when a container is created after restart\", func() {\n\t\t\t\tIt(\"can be created with the same network reservation\", func() {\n\t\t\t\t\t_, err := client.Create(containerSpec)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the destroy-containers-on-startup flag is not passed\", func() {\n\t\t\tDescribe(\"on th pre-existing VM\", func() {\n\t\t\t\tIt(\"does not destroy the depot\", func() {\n\t\t\t\t\tExpect(filepath.Join(client.DepotDir, container.Handle())).To(BeADirectory())\n\t\t\t\t})\n\n\t\t\t\tIt(\"can still run processes\", func() {\n\t\t\t\t\tout := gbytes.NewBuffer()\n\t\t\t\t\tproc, err := container.Run(\n\t\t\t\t\t\tgarden.ProcessSpec{\n\t\t\t\t\t\t\tPath: \"\/bin\/sh\",\n\t\t\t\t\t\t\tArgs: []string{\"-c\", \"echo hello; exit 12\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tgarden.ProcessIO{\n\t\t\t\t\t\t\tStdout: io.MultiWriter(GinkgoWriter, out),\n\t\t\t\t\t\t\tStderr: io.MultiWriter(GinkgoWriter, out),\n\t\t\t\t\t\t})\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\texitCode, err := proc.Wait()\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\tExpect(exitCode).To(Equal(12))\n\t\t\t\t\tExpect(out).To(gbytes.Say(\"hello\"))\n\t\t\t\t})\n\n\t\t\t\tIt(\"can reattach to processes that are still running\", func() {\n\t\t\t\t\tout := gbytes.NewBuffer()\n\t\t\t\t\tprocId := existingProc.ID()\n\t\t\t\t\tprocess, err := container.Attach(procId, garden.ProcessIO{\n\t\t\t\t\t\tStdout: io.MultiWriter(GinkgoWriter, out),\n\t\t\t\t\t\tStderr: io.MultiWriter(GinkgoWriter, out),\n\t\t\t\t\t})\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tEventually(out).Should(gbytes.Say(\"hello\"))\n\n\t\t\t\t\tExpect(process.Signal(garden.SignalKill)).To(Succeed())\n\n\t\t\t\t\texited := make(chan struct{})\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tprocess.Wait()\n\t\t\t\t\t\tclose(exited)\n\t\t\t\t\t}()\n\n\t\t\t\t\tEventually(exited).Should(BeClosed())\n\t\t\t\t})\n\n\t\t\t\tIt(\"can still destroy the container\", func() {\n\t\t\t\t\tExpect(client.Destroy(container.Handle())).To(Succeed())\n\t\t\t\t})\n\n\t\t\t\tIt(\"can still be able to access the internet\", func() {\n\t\t\t\t\tExpect(checkConnection(container, \"8.8.8.8\", 53)).To(Succeed())\n\t\t\t\t})\n\n\t\t\t\tIt(\"can still be accessible from the outside\", func() {\n\t\t\t\t\tExpect(listenInContainer(container, 8080)).To(Succeed())\n\n\t\t\t\t\tinfo, err := container.Info()\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\texternalIP := info.ExternalIP\n\n\t\t\t\t\t\/\/ retry because listener process inside other container\n\t\t\t\t\t\/\/ may not start immediately\n\t\t\t\t\tEventually(func() int {\n\t\t\t\t\t\tsession := sendRequest(externalIP, hostNetInPort)\n\t\t\t\t\t\treturn session.Wait().ExitCode()\n\t\t\t\t\t}).Should(Equal(0))\n\t\t\t\t})\n\n\t\t\t\tContext(\"when the server denies all the networks\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\targs = append(args, \"--deny-network\", \"0.0.0.0\/0\")\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"still can't access disallowed IPs\", func() {\n\t\t\t\t\t\tExpect(checkConnection(container, \"8.8.4.4\", 53)).NotTo(Succeed())\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"can still be able to access the allowed IPs\", func() {\n\t\t\t\t\t\tExpect(checkConnection(container, \"8.8.8.8\", 53)).To(Succeed())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when the server is restarted without deny networks applied\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\trestartArgs = args[:]\n\t\t\t\t\t\targs = append(args, \"--deny-network\", \"0.0.0.0\/0\")\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"is able to access the internet\", func() {\n\t\t\t\t\t\tExpect(checkConnection(container, \"8.8.8.8\", 53)).To(Succeed())\n\t\t\t\t\t\tExpect(checkConnection(container, \"8.8.4.4\", 53)).To(Succeed())\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when creating a container after restart\", func() {\n\t\t\t\tIt(\"should not allocate ports used before restart\", func() {\n\t\t\t\t\tsecondContainer, err := client.Create(garden.ContainerSpec{})\n\t\t\t\t\tsecondContainerHostPort, _, err := secondContainer.NetIn(0, 8080)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tExpect(hostNetInPort).NotTo(Equal(secondContainerHostPort))\n\t\t\t\t})\n\n\t\t\t\tContext(\"with a subnet used before restart\", func() {\n\t\t\t\t\tIt(\"will not allocate an IP\", func() {\n\t\t\t\t\t\t_, err := client.Create(containerSpec)\n\t\t\t\t\t\tExpect(err).To(MatchError(\"the requested IP is already allocated\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"with an IP used before restart\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tcontainerSpec = garden.ContainerSpec{\n\t\t\t\t\t\t\t\/\/ Specifying a CIDR of < 30 will make garden give us exactly 177.100.10.5\n\t\t\t\t\t\t\tNetwork: \"177.100.10.5\/29\",\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"should not allocate the IP\", func() {\n\t\t\t\t\t\t_, err := client.Create(containerSpec)\n\t\t\t\t\t\tExpect(err).To(MatchError(\"the requested IP is already allocated\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"with no network specified\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tcontainerSpec = garden.ContainerSpec{}\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"successfully creates another container with no network specified\", func() {\n\t\t\t\t\t\t_, err := client.Create(containerSpec)\n\t\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package dockerbuild\n\nimport (\n\t\"archive\/tar\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/dynport\/dgtk\/dockerclient\"\n\t\"github.com\/dynport\/dgtk\/git\"\n)\n\ntype Build struct {\n\tProxy string \/\/ http proxy to use for faster builds in local environment\n\tRoot string \/\/ root directory used for archive (should contain a dockerfile at least)\n\n\tGitRepository string \/\/ repository to check out into the docker build archive\n\tGitRevision string \/\/ revision of the repository to use\n\n\t\/\/ If this is a ruby project then add the Gemfiles to the archive separately. That way bundler's inefficiency can be\n\t\/\/ mitigated using docker's caching strategy. Just call copy the Gemfile's somewhere (using the 'ADD' command) and\n\t\/\/ run bundler on them. Then extract the sources and use the app. This way only changes to the Gemfiles will result\n\t\/\/ in a rerun of bundler.\n\tRubyProject bool\n\n\tDockerHost string \/\/ IP of the host running docker.\n\tDockerPort int \/\/ Port docker is listening on.\n\tDockerHostUser string \/\/ If set an SSH tunnel will be setup and used for communication.\n\tDockerHostPassword string \/\/ Password of the user, if required for SSH (public key authentication should be preferred).\n\tDockerImageTag string \/\/ tag used for the resulting image\n\n\tForceBuild bool \/\/ Build even if image already exists.\n\tVerbose bool\n\n\tmsgList []message\n\n\tclient *dockerclient.DockerHost\n}\n\ntype message struct {\n\tStep int\n\tCached bool\n\tCommand string\n\tImage string\n\tStarted time.Time\n\tFinished time.Time\n}\n\ntype progress struct {\n\ttotal int64\n\tcurrent int64\n\tstarted time.Time\n}\n\nfunc newProgress(total int64) *progress {\n\treturn &progress{started: time.Now(), total: total}\n}\n\nfunc (p *progress) Write(b []byte) (int, error) {\n\ti := len(b)\n\tp.current += int64(i)\n\tfmt.Printf(\"\\rupload progress %.1f%%\", 100.0*float64(p.current)\/float64(p.total))\n\tif p.current == p.total {\n\t\tfmt.Printf(\"\\nuploaded total_size=%.3fMB in total_time%.3fs\\n\", float64(p.total)\/(1024.0*1024.0), time.Since(p.started).Seconds())\n\t}\n\treturn i, nil\n}\n\nfunc (b *Build) connectToDockerHost() (e error) {\n\tif b.client != nil {\n\t\treturn nil\n\t}\n\n\tif b.DockerHostUser == \"\" {\n\t\tport := b.DockerPort\n\t\tif port == 0 {\n\t\t\tport = 4243\n\t\t}\n\t\tb.client, e = dockerclient.New(b.DockerHost, port)\n\t\treturn e\n\t}\n\tb.client, e = dockerclient.NewViaTunnel(b.DockerHost, b.DockerHostUser, b.DockerHostPassword)\n\treturn e\n}\n\nfunc (b *Build) BuildAndPush() (string, error) {\n\timageId, e := b.Build()\n\tif e != nil {\n\t\treturn imageId, e\n\t}\n\t\/\/ build has connected so we can assume b.client is set\n\treturn imageId, b.client.PushImage(b.DockerImageTag, &dockerclient.PushImageOptions{Callback: b.callbackForPush})\n}\n\nfunc (b *Build) Build() (string, error) {\n\tif e := b.connectToDockerHost(); e != nil {\n\t\treturn \"\", e\n\t}\n\n\tif !b.ForceBuild {\n\t\tdetails, e := b.client.ImageDetails(b.DockerImageTag)\n\t\tswitch e {\n\t\tcase nil:\n\t\t\tlog.Printf(\"image for %q already exists\", b.DockerImageTag)\n\t\t\treturn details.Id, nil\n\t\tcase dockerclient.ErrorNotFound:\n\t\t\t\/\/ ignore\n\t\tdefault:\n\t\t\treturn \"\", e\n\t\t}\n\t}\n\n\tf, e := b.buildArchive()\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\tdefer func() { os.Remove(f.Name()) }()\n\n\tf, e = os.Open(f.Name())\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\tstat, e := f.Stat()\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\tprogress := newProgress(stat.Size())\n\tr := io.TeeReader(f, progress)\n\n\treturn b.client.Build(r, &dockerclient.BuildImageOptions{\n\t\tTag: b.DockerImageTag,\n\t\tCallback: b.callbackForBuild,\n\t})\n}\n\nfunc (b *Build) buildArchive() (*os.File, error) {\n\tf, e := ioutil.TempFile(\"\/tmp\", \"docker_build\")\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\tdefer f.Close()\n\n\tt := tar.NewWriter(f)\n\tdefer t.Flush()\n\tdefer t.Close()\n\n\tif b.GitRepository != \"\" {\n\t\trepo := &git.Repository{Origin: b.GitRepository}\n\t\te := repo.Init()\n\t\tif e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\te = repo.Fetch()\n\t\tif e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\tif e := repo.WriteArchiveToTar(b.GitRevision, t); e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\tif b.RubyProject {\n\t\t\tif e := repo.WriteFilesToTar(b.GitRevision, t, \"Gemfile\", \"Gemfile.lock\"); e != nil {\n\t\t\t\treturn nil, e\n\t\t\t}\n\t\t}\n\t}\n\tif withDockerFile, e := b.addFilesToArchive(b.Root, t); e != nil {\n\t\treturn nil, e\n\t} else if !withDockerFile {\n\t\treturn nil, fmt.Errorf(\"archive must contain a Dockerfile\")\n\t}\n\treturn f, nil\n}\n\nfunc (build *Build) addFilesToArchive(root string, t *tar.Writer) (withDockerFile bool, e error) {\n\treturn withDockerFile, filepath.Walk(root, func(p string, info os.FileInfo, e error) error {\n\t\tif e == nil && p != root {\n\t\t\tvar e error\n\t\t\tname := strings.TrimPrefix(p, root+\"\/\")\n\t\t\theader := &tar.Header{Name: name, ModTime: info.ModTime().UTC()}\n\t\t\tswitch {\n\t\t\tcase info.IsDir():\n\t\t\t\tif name == \".git\" {\n\t\t\t\t\treturn filepath.SkipDir\n\t\t\t\t}\n\t\t\t\theader.Typeflag = tar.TypeDir\n\t\t\t\theader.Mode = 0755\n\t\t\t\te = t.WriteHeader(header)\n\t\t\tdefault:\n\t\t\t\tb, e := ioutil.ReadFile(p)\n\t\t\t\tif e != nil {\n\t\t\t\t\treturn e\n\t\t\t\t}\n\n\t\t\t\tif name == \"Dockerfile\" {\n\t\t\t\t\twithDockerFile = true\n\t\t\t\t\tif build.Proxy != \"\" {\n\t\t\t\t\t\tb = NewDockerfile(b).MixinProxy(build.Proxy)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\theader.Mode = 0644\n\t\t\t\theader.Size = int64(len(b))\n\t\t\t\te = t.WriteHeader(header)\n\t\t\t\tif e != nil {\n\t\t\t\t\treturn e\n\t\t\t\t}\n\t\t\t\t_, e = t.Write(b)\n\t\t\t\tif e != nil {\n\t\t\t\t\treturn e\n\t\t\t\t}\n\t\t\t}\n\t\t\tif e != nil {\n\t\t\t\treturn e\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (b *Build) callbackForBuild(msg *dockerclient.JSONMessage) {\n\tif e := msg.Err(); e != nil {\n\t\tlog.Printf(\"%s\", e)\n\t\treturn\n\t}\n\tmsg.Stream = strings.TrimSpace(msg.Stream)\n\tswitch {\n\tcase msg.Stream == \"\":\n\t\t\/\/ ignore\n\tcase b.Verbose:\n\t\tfmt.Println(msg.Stream)\n\tcase strings.HasPrefix(msg.Stream, \"Step \"):\n\t\tparts := strings.SplitN(msg.Stream[5:], \" : \", 2)\n\t\tif len(parts) != 2 { \/\/ ignore message\n\t\t\treturn\n\t\t}\n\t\tstep, e := strconv.Atoi(parts[0])\n\t\tif e != nil {\n\t\t\treturn\n\t\t}\n\t\tnewMessage := message{\n\t\t\tStep: step,\n\t\t\tCommand: parts[1],\n\t\t\tStarted: time.Now(),\n\t\t}\n\t\tb.msgList = append(b.msgList, newMessage)\n\t\tfmt.Printf(\"%.3d %q\", newMessage.Step, newMessage.Command)\n\tcase msg.Stream == \"---> Using cache\":\n\t\tif len(b.msgList) > 0 {\n\t\t\tb.msgList[len(b.msgList)-1].Cached = true\n\t\t}\n\tcase strings.HasPrefix(msg.Stream, \"---> \"):\n\t\tif len(b.msgList) > 0 && len(msg.Stream) == 17 {\n\t\t\ti := len(b.msgList) - 1\n\t\t\tb.msgList[i].Finished = time.Now()\n\t\t\tb.msgList[i].Image = msg.Stream[5:]\n\t\t\tstatus := \"BUILT \"\n\t\t\tif b.msgList[i].Cached {\n\t\t\t\tstatus = \"CACHED\"\n\t\t\t}\n\t\t\tduration := b.msgList[i].Finished.Sub(b.msgList[i].Started)\n\t\t\tfmt.Printf(\"\\r%.3d [%s] [%s] %q took %5.2fs\\n\", b.msgList[i].Step, status, b.msgList[i].Image, b.msgList[i].Command, duration.Seconds())\n\t\t}\n\t}\n}\n\nfunc (b *Build) callbackForPush(msg *dockerclient.JSONMessage) {\n\tif e := msg.Err(); e != nil {\n\t\tlog.Printf(\"%s\", e)\n\t\treturn\n\t}\n\tmsg.Status = strings.TrimSpace(msg.Status)\n\tswitch {\n\tcase msg.Status == \"\":\n\t\t\/\/ ignore\n\tcase b.Verbose:\n\t\tfmt.Printf(\"%s\\n\", msg.Status)\n\tcase msg.Status == \"Buffering to disk\" && msg.Progress.Total > 0:\n\t\tfmt.Printf(\"\\r[%s] Buffering: %4.2f %%\", msg.Id, 100.0*float32(msg.Progress.Current)\/float32(msg.Progress.Total))\n\tcase msg.Status == \"Pushing\" && msg.Progress.Total > 0:\n\t\tfmt.Printf(\"\\r[%s] Pushing: %4.2f %%\", msg.Id, 100.0*float32(msg.Progress.Current)\/float32(msg.Progress.Total))\n\tcase msg.Status == \"Image successfully pushed\":\n\t\tfmt.Printf(\"\\r[%s] Pushing: ... done\\n\", msg.Id)\n\t}\n}\n<commit_msg>added missing \"=\" in output<commit_after>package dockerbuild\n\nimport (\n\t\"archive\/tar\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/dynport\/dgtk\/dockerclient\"\n\t\"github.com\/dynport\/dgtk\/git\"\n)\n\ntype Build struct {\n\tProxy string \/\/ http proxy to use for faster builds in local environment\n\tRoot string \/\/ root directory used for archive (should contain a dockerfile at least)\n\n\tGitRepository string \/\/ repository to check out into the docker build archive\n\tGitRevision string \/\/ revision of the repository to use\n\n\t\/\/ If this is a ruby project then add the Gemfiles to the archive separately. That way bundler's inefficiency can be\n\t\/\/ mitigated using docker's caching strategy. Just call copy the Gemfile's somewhere (using the 'ADD' command) and\n\t\/\/ run bundler on them. Then extract the sources and use the app. This way only changes to the Gemfiles will result\n\t\/\/ in a rerun of bundler.\n\tRubyProject bool\n\n\tDockerHost string \/\/ IP of the host running docker.\n\tDockerPort int \/\/ Port docker is listening on.\n\tDockerHostUser string \/\/ If set an SSH tunnel will be setup and used for communication.\n\tDockerHostPassword string \/\/ Password of the user, if required for SSH (public key authentication should be preferred).\n\tDockerImageTag string \/\/ tag used for the resulting image\n\n\tForceBuild bool \/\/ Build even if image already exists.\n\tVerbose bool\n\n\tmsgList []message\n\n\tclient *dockerclient.DockerHost\n}\n\ntype message struct {\n\tStep int\n\tCached bool\n\tCommand string\n\tImage string\n\tStarted time.Time\n\tFinished time.Time\n}\n\ntype progress struct {\n\ttotal int64\n\tcurrent int64\n\tstarted time.Time\n}\n\nfunc newProgress(total int64) *progress {\n\treturn &progress{started: time.Now(), total: total}\n}\n\nfunc (p *progress) Write(b []byte) (int, error) {\n\ti := len(b)\n\tp.current += int64(i)\n\tfmt.Printf(\"\\rupload progress %.1f%%\", 100.0*float64(p.current)\/float64(p.total))\n\tif p.current == p.total {\n\t\tfmt.Printf(\"\\nuploaded total_size=%.3fMB in total_time=%.3fs\\n\", float64(p.total)\/(1024.0*1024.0), time.Since(p.started).Seconds())\n\t}\n\treturn i, nil\n}\n\nfunc (b *Build) connectToDockerHost() (e error) {\n\tif b.client != nil {\n\t\treturn nil\n\t}\n\n\tif b.DockerHostUser == \"\" {\n\t\tport := b.DockerPort\n\t\tif port == 0 {\n\t\t\tport = 4243\n\t\t}\n\t\tb.client, e = dockerclient.New(b.DockerHost, port)\n\t\treturn e\n\t}\n\tb.client, e = dockerclient.NewViaTunnel(b.DockerHost, b.DockerHostUser, b.DockerHostPassword)\n\treturn e\n}\n\nfunc (b *Build) BuildAndPush() (string, error) {\n\timageId, e := b.Build()\n\tif e != nil {\n\t\treturn imageId, e\n\t}\n\t\/\/ build has connected so we can assume b.client is set\n\treturn imageId, b.client.PushImage(b.DockerImageTag, &dockerclient.PushImageOptions{Callback: b.callbackForPush})\n}\n\nfunc (b *Build) Build() (string, error) {\n\tif e := b.connectToDockerHost(); e != nil {\n\t\treturn \"\", e\n\t}\n\n\tif !b.ForceBuild {\n\t\tdetails, e := b.client.ImageDetails(b.DockerImageTag)\n\t\tswitch e {\n\t\tcase nil:\n\t\t\tlog.Printf(\"image for %q already exists\", b.DockerImageTag)\n\t\t\treturn details.Id, nil\n\t\tcase dockerclient.ErrorNotFound:\n\t\t\t\/\/ ignore\n\t\tdefault:\n\t\t\treturn \"\", e\n\t\t}\n\t}\n\n\tf, e := b.buildArchive()\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\tdefer func() { os.Remove(f.Name()) }()\n\n\tf, e = os.Open(f.Name())\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\tstat, e := f.Stat()\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\tprogress := newProgress(stat.Size())\n\tr := io.TeeReader(f, progress)\n\n\treturn b.client.Build(r, &dockerclient.BuildImageOptions{\n\t\tTag: b.DockerImageTag,\n\t\tCallback: b.callbackForBuild,\n\t})\n}\n\nfunc (b *Build) buildArchive() (*os.File, error) {\n\tf, e := ioutil.TempFile(\"\/tmp\", \"docker_build\")\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\tdefer f.Close()\n\n\tt := tar.NewWriter(f)\n\tdefer t.Flush()\n\tdefer t.Close()\n\n\tif b.GitRepository != \"\" {\n\t\trepo := &git.Repository{Origin: b.GitRepository}\n\t\te := repo.Init()\n\t\tif e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\te = repo.Fetch()\n\t\tif e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\tif e := repo.WriteArchiveToTar(b.GitRevision, t); e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\tif b.RubyProject {\n\t\t\tif e := repo.WriteFilesToTar(b.GitRevision, t, \"Gemfile\", \"Gemfile.lock\"); e != nil {\n\t\t\t\treturn nil, e\n\t\t\t}\n\t\t}\n\t}\n\tif withDockerFile, e := b.addFilesToArchive(b.Root, t); e != nil {\n\t\treturn nil, e\n\t} else if !withDockerFile {\n\t\treturn nil, fmt.Errorf(\"archive must contain a Dockerfile\")\n\t}\n\treturn f, nil\n}\n\nfunc (build *Build) addFilesToArchive(root string, t *tar.Writer) (withDockerFile bool, e error) {\n\treturn withDockerFile, filepath.Walk(root, func(p string, info os.FileInfo, e error) error {\n\t\tif e == nil && p != root {\n\t\t\tvar e error\n\t\t\tname := strings.TrimPrefix(p, root+\"\/\")\n\t\t\theader := &tar.Header{Name: name, ModTime: info.ModTime().UTC()}\n\t\t\tswitch {\n\t\t\tcase info.IsDir():\n\t\t\t\tif name == \".git\" {\n\t\t\t\t\treturn filepath.SkipDir\n\t\t\t\t}\n\t\t\t\theader.Typeflag = tar.TypeDir\n\t\t\t\theader.Mode = 0755\n\t\t\t\te = t.WriteHeader(header)\n\t\t\tdefault:\n\t\t\t\tb, e := ioutil.ReadFile(p)\n\t\t\t\tif e != nil {\n\t\t\t\t\treturn e\n\t\t\t\t}\n\n\t\t\t\tif name == \"Dockerfile\" {\n\t\t\t\t\twithDockerFile = true\n\t\t\t\t\tif build.Proxy != \"\" {\n\t\t\t\t\t\tb = NewDockerfile(b).MixinProxy(build.Proxy)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\theader.Mode = 0644\n\t\t\t\theader.Size = int64(len(b))\n\t\t\t\te = t.WriteHeader(header)\n\t\t\t\tif e != nil {\n\t\t\t\t\treturn e\n\t\t\t\t}\n\t\t\t\t_, e = t.Write(b)\n\t\t\t\tif e != nil {\n\t\t\t\t\treturn e\n\t\t\t\t}\n\t\t\t}\n\t\t\tif e != nil {\n\t\t\t\treturn e\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (b *Build) callbackForBuild(msg *dockerclient.JSONMessage) {\n\tif e := msg.Err(); e != nil {\n\t\tlog.Printf(\"%s\", e)\n\t\treturn\n\t}\n\tmsg.Stream = strings.TrimSpace(msg.Stream)\n\tswitch {\n\tcase msg.Stream == \"\":\n\t\t\/\/ ignore\n\tcase b.Verbose:\n\t\tfmt.Println(msg.Stream)\n\tcase strings.HasPrefix(msg.Stream, \"Step \"):\n\t\tparts := strings.SplitN(msg.Stream[5:], \" : \", 2)\n\t\tif len(parts) != 2 { \/\/ ignore message\n\t\t\treturn\n\t\t}\n\t\tstep, e := strconv.Atoi(parts[0])\n\t\tif e != nil {\n\t\t\treturn\n\t\t}\n\t\tnewMessage := message{\n\t\t\tStep: step,\n\t\t\tCommand: parts[1],\n\t\t\tStarted: time.Now(),\n\t\t}\n\t\tb.msgList = append(b.msgList, newMessage)\n\t\tfmt.Printf(\"%.3d %q\", newMessage.Step, newMessage.Command)\n\tcase msg.Stream == \"---> Using cache\":\n\t\tif len(b.msgList) > 0 {\n\t\t\tb.msgList[len(b.msgList)-1].Cached = true\n\t\t}\n\tcase strings.HasPrefix(msg.Stream, \"---> \"):\n\t\tif len(b.msgList) > 0 && len(msg.Stream) == 17 {\n\t\t\ti := len(b.msgList) - 1\n\t\t\tb.msgList[i].Finished = time.Now()\n\t\t\tb.msgList[i].Image = msg.Stream[5:]\n\t\t\tstatus := \"BUILT \"\n\t\t\tif b.msgList[i].Cached {\n\t\t\t\tstatus = \"CACHED\"\n\t\t\t}\n\t\t\tduration := b.msgList[i].Finished.Sub(b.msgList[i].Started)\n\t\t\tfmt.Printf(\"\\r%.3d [%s] [%s] %q took %5.2fs\\n\", b.msgList[i].Step, status, b.msgList[i].Image, b.msgList[i].Command, duration.Seconds())\n\t\t}\n\t}\n}\n\nfunc (b *Build) callbackForPush(msg *dockerclient.JSONMessage) {\n\tif e := msg.Err(); e != nil {\n\t\tlog.Printf(\"%s\", e)\n\t\treturn\n\t}\n\tmsg.Status = strings.TrimSpace(msg.Status)\n\tswitch {\n\tcase msg.Status == \"\":\n\t\t\/\/ ignore\n\tcase b.Verbose:\n\t\tfmt.Printf(\"%s\\n\", msg.Status)\n\tcase msg.Status == \"Buffering to disk\" && msg.Progress.Total > 0:\n\t\tfmt.Printf(\"\\r[%s] Buffering: %4.2f %%\", msg.Id, 100.0*float32(msg.Progress.Current)\/float32(msg.Progress.Total))\n\tcase msg.Status == \"Pushing\" && msg.Progress.Total > 0:\n\t\tfmt.Printf(\"\\r[%s] Pushing: %4.2f %%\", msg.Id, 100.0*float32(msg.Progress.Current)\/float32(msg.Progress.Total))\n\tcase msg.Status == \"Image successfully pushed\":\n\t\tfmt.Printf(\"\\r[%s] Pushing: ... done\\n\", msg.Id)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package trackello\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/VojtechVitek\/go-trello\"\n\t\"github.com\/klauern\/trackello\/rest\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ Trackello represents the connection to Trello for a specific user.\ntype Trackello struct {\n\ttoken string\n\tappKey string\n\tclient *trello.Client\n}\n\n\/\/ Card is both the Trello Card + other stats on the actions in it.\ntype Card struct {\n\tcard *trello.Card\n\tstats *statistics\n}\n\n\/\/ List is both the Trello List + other stats on the actions in it.\ntype List struct {\n\tname string\n\tcards map[string]Card\n\tstats *statistics\n}\n\n\/\/ NewTrackello will create a `Trackello` type using your preferences application token and appkey.\nfunc NewTrackello() (*Trackello, error) {\n\ttoken := viper.GetString(\"token\")\n\tappKey := viper.GetString(\"appkey\")\n\n\t\/\/ New Trello Client\n\ttr, err := trello.NewAuthClient(appKey, &token)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn nil, err\n\t}\n\n\treturn &Trackello{\n\t\ttoken: token,\n\t\tappKey: appKey,\n\t\tclient: tr,\n\t}, nil\n}\n\n\/\/ PrimaryBoard will return a board based on your settings for a primary board.\nfunc (t *Trackello) PrimaryBoard() (trello.Board, error) {\n\tboard, err := t.client.Board(viper.GetString(\"board\"))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn *board, err\n\t}\n\treturn *board, nil\n}\n\n\/\/ BoardWithId will return the Trello Board given it's ID string.\nfunc (t *Trackello) BoardWithId(id string) (trello.Board, error) {\n\tboard, err := t.client.Board(id)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn *board, err\n\t}\n\treturn *board, nil\n}\n\n\/\/ Boards will list all of the boards for the authenticated user (i.e. 'me').\nfunc (t *Trackello) Boards() ([]trello.Board, error) {\n\tmember, err := t.client.Member(\"me\")\n\tif err != nil {\n\t\tlog.Fatalf(\"Error getting 'me' Member: %v\", err)\n\t\treturn make([]trello.Board, 0), err\n\t}\n\tboards, err := member.Boards()\n\treturn boards, err\n}\n\nfunc (t *Trackello) getCardForAction(a trello.Action) (*trello.Card, error) {\n\treturn t.client.Card(a.Data.Card.Id)\n}\n\n\/\/ MapBoardActions takes the slice of []trello.Action and maps it to a Card and it's associated List.\nfunc (t *Trackello) MapBoardActions(actions []trello.Action) ([]List, error) {\n\tlistCards := make(map[string]List)\n\tfor _, action := range actions {\n\t\tif len(action.Data.Card.Id) > 0 {\n\t\t\tcard, err := t.getCardForAction(action)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlist, err := t.client.List(card.IdList)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlc, ok := listCards[list.Name]\n\t\t\tif !ok {\n\t\t\t\tcards := make(map[string]Card)\n\t\t\t\tcards[card.Name] = Card{card: card}\n\t\t\t\tlistCards[list.Name] = List{\n\t\t\t\t\tname: list.Name,\n\t\t\t\t\tcards: cards,\n\t\t\t\t}\n\t\t\t\tlc = listCards[list.Name]\n\t\t\t}\n\t\t\tif _, cok := lc.cards[card.Name]; !cok {\n\t\t\t\tnewCard := Card{\n\t\t\t\t\tcard: card,\n\t\t\t\t}\n\t\t\t\tlc.cards[card.Name] = newCard\n\t\t\t}\n\t\t\tc, _ := lc.cards[card.Name]\n\t\t\tc.AddCalculation(action)\n\t\t\tlc.cards[card.Name] = c\n\t\t\tlistCards[list.Name] = lc\n\t\t}\n\t}\n\tlist := make([]List, len(listCards))\n\tfor _, v := range listCards {\n\t\tlist = append(list, v)\n\t}\n\treturn list, nil\n}\n\n\/\/ Print will print out a list and all of the cards to the command-line.\nfunc (l *List) Print() {\n\tif len(l.name) > 0 {\n\t\tfmt.Printf(\"%s\\n\", l.name)\n\t\tfor _, card := range l.cards {\n\t\t\tfmt.Printf(\" * %s\\n\", card.String())\n\t\t}\n\t}\n}\n\nfunc (c *Card) String() string {\n\treturn fmt.Sprintf(\"%s %s\", c.card.Name, c.stats.PrintStatistics())\n}\n\n\/\/ Board will pull the Trello Board with an ID. If id is \"\", it will pull it from the PrimaryBoard configuration setting.\nfunc (t *Trackello) Board(id string) (trello.Board, error) {\n\tif id == \"\" {\n\t\treturn t.PrimaryBoard()\n\t}\n\treturn t.BoardWithId(id)\n}\n\n\/\/ BoardActions will retrieve a slice of trello.Action based on the Board ID.\nfunc BoardActions(id string) ([]trello.Action, error) {\n\tt, err := NewTrackello()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\tboard, err := t.Board(id)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\targs := rest.CreateArgsForBoardActions()\n\tactions, err := board.Actions(args...)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\treturn actions, err\n}\n<commit_msg>move some things around<commit_after>package trackello\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/VojtechVitek\/go-trello\"\n\t\"github.com\/klauern\/trackello\/rest\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ Trackello represents the connection to Trello for a specific user.\ntype Trackello struct {\n\ttoken string\n\tappKey string\n\tclient *trello.Client\n}\n\n\/\/ Card is both the Trello Card + other stats on the actions in it.\ntype Card struct {\n\tcard *trello.Card\n\tstats *statistics\n}\n\n\/\/ List is both the Trello List + other stats on the actions in it.\ntype List struct {\n\tname string\n\tcards map[string]Card\n\tstats *statistics\n}\n\n\/\/ NewTrackello will create a `Trackello` type using your preferences application token and appkey.\nfunc NewTrackello() (*Trackello, error) {\n\ttoken := viper.GetString(\"token\")\n\tappKey := viper.GetString(\"appkey\")\n\n\t\/\/ New Trello Client\n\ttr, err := trello.NewAuthClient(appKey, &token)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn nil, err\n\t}\n\n\treturn &Trackello{\n\t\ttoken: token,\n\t\tappKey: appKey,\n\t\tclient: tr,\n\t}, nil\n}\n\n\/\/ PrimaryBoard will return a board based on your settings for a primary board.\nfunc (t *Trackello) PrimaryBoard() (trello.Board, error) {\n\tboard, err := t.client.Board(viper.GetString(\"board\"))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn *board, err\n\t}\n\treturn *board, nil\n}\n\n\/\/ BoardWithId will return the Trello Board given it's ID string.\nfunc (t *Trackello) BoardWithId(id string) (trello.Board, error) {\n\tboard, err := t.client.Board(id)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn *board, err\n\t}\n\treturn *board, nil\n}\n\n\/\/ Boards will list all of the boards for the authenticated user (i.e. 'me').\nfunc (t *Trackello) Boards() ([]trello.Board, error) {\n\tmember, err := t.client.Member(\"me\")\n\tif err != nil {\n\t\tlog.Fatalf(\"Error getting 'me' Member: %v\", err)\n\t\treturn make([]trello.Board, 0), err\n\t}\n\tboards, err := member.Boards()\n\treturn boards, err\n}\n\nfunc (t *Trackello) getCardForAction(a trello.Action) (*trello.Card, error) {\n\treturn t.client.Card(a.Data.Card.Id)\n}\n\n\/\/ MapBoardActions takes the slice of []trello.Action and maps it to a Card and it's associated List.\nfunc (t *Trackello) MapBoardActions(actions []trello.Action) ([]List, error) {\n\tlistCards := make(map[string]List)\n\tfor _, action := range actions {\n\t\tif len(action.Data.Card.Id) > 0 {\n\t\t\tcard, err := t.getCardForAction(action)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlist, err := t.client.List(card.IdList)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlc, ok := listCards[list.Name]\n\t\t\tif !ok {\n\t\t\t\tlc = newCardMap(card, listCards, list.Name)\n\t\t\t\tlc = listCards[list.Name]\n\t\t\t}\n\t\t\tif _, cok := lc.cards[card.Name]; !cok {\n\t\t\t\tnewCard := Card{\n\t\t\t\t\tcard: card,\n\t\t\t\t}\n\t\t\t\tlc.cards[card.Name] = newCard\n\t\t\t}\n\t\t\tc, _ := lc.cards[card.Name]\n\t\t\tc.AddCalculation(action)\n\t\t\tlc.cards[card.Name] = c\n\t\t\tlistCards[list.Name] = lc\n\t\t}\n\t}\n\treturn makeList(listCards), nil\n}\n\nfunc newCardMap(card trello.Card, listCards map[string]List, listName string) map[string]Card {\n\tcards := make(map[string]Card)\n\tcards[card.Name] = Card{card: card}\n\tlistCards[listName] = List{\n\t\tname: listName,\n\t\tcards: cards,\n\t}\n\treturn cards\n}\n\nfunc makeList(listMap map[string]List) []List {\n\tlist := make([]List, len(listMap))\n\tfor _, v := range listMap {\n\t\tlist = append(list, v)\n\t}\n\treturn list\n}\n\n\/\/ Print will print out a list and all of the cards to the command-line.\nfunc (l *List) Print() {\n\tif len(l.name) > 0 {\n\t\tfmt.Printf(\"%s\\n\", l.name)\n\t\tfor _, card := range l.cards {\n\t\t\tfmt.Printf(\" * %s\\n\", card.String())\n\t\t}\n\t}\n}\n\nfunc (c *Card) String() string {\n\treturn fmt.Sprintf(\"%s %s\", c.card.Name, c.stats.PrintStatistics())\n}\n\n\/\/ Board will pull the Trello Board with an ID. If id is \"\", it will pull it from the PrimaryBoard configuration setting.\nfunc (t *Trackello) Board(id string) (trello.Board, error) {\n\tif id == \"\" {\n\t\treturn t.PrimaryBoard()\n\t}\n\treturn t.BoardWithId(id)\n}\n\n\/\/ BoardActions will retrieve a slice of trello.Action based on the Board ID.\nfunc BoardActions(id string) ([]trello.Action, error) {\n\tt, err := NewTrackello()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\tboard, err := t.Board(id)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\targs := rest.CreateArgsForBoardActions()\n\tactions, err := board.Actions(args...)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\treturn actions, err\n}\n<|endoftext|>"} {"text":"<commit_before>package crypto\n\nimport (\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/rand\"\n\t\"github.com\/Supernomad\/quantum\/common\"\n\t\"github.com\/Supernomad\/quantum\/logger\"\n)\n\ntype GCM struct {\n\tlog *logger.Logger\n\tecdh *ECDH\n}\n\nfunc RandomBytes(b []byte) ([]byte, error) {\n\t_, err := rand.Read(b)\n\treturn b, err\n}\n\nfunc (gcm *GCM) Seal(payload *common.Payload) (*common.Payload, bool) {\n\t\/\/ Generate the shared key\n\tkey := gcm.ecdh.GenerateSharedSecret(payload.PublicKey)\n\n\t\/\/ Grab a new random nonce\n\t_, err := RandomBytes(payload.Nonce)\n\tif err != nil {\n\t\tgcm.log.Error(\"[GCM]\", \"Error generating random nonce:\", err)\n\t\treturn payload, false\n\t}\n\n\t\/\/ Get the block ciper\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\tgcm.log.Error(\"[GCM]\", \"Error generating ciper:\", err)\n\t\treturn payload, false\n\t}\n\n\t\/\/ Get the GCM block wrapper\n\taesgcm, err := cipher.NewGCM(block)\n\tif err != nil {\n\t\tgcm.log.Error(\"[GCM]\", \"Error generating gcm wrapper:\", err)\n\t\treturn payload, false\n\t}\n\n\t\/\/ Seal the packet and associated meta data\n\taesgcm.Seal(payload.Packet[:0], payload.Nonce, payload.Packet, nil)\n\n\tcopy(payload.Key, gcm.ecdh.PublicKey[:])\n\treturn payload, true\n}\n\nfunc (gcm *GCM) Unseal(payload *common.Payload) (*common.Payload, bool) {\n\t\/\/ Generate the shared key\n\tkey := gcm.ecdh.GenerateSharedSecret(payload.Key)\n\n\t\/\/ Get the block ciper\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\tgcm.log.Error(\"[GCM]\", \"Error generating ciper:\", err)\n\t\treturn payload, false\n\t}\n\n\t\/\/ Get the GCM block wrapper\n\taesgcm, err := cipher.NewGCM(block)\n\tif err != nil {\n\t\tgcm.log.Error(\"[GCM]\", \"Error generating gcm wrapper:\", err)\n\t\treturn payload, false\n\t}\n\n\t_, err = aesgcm.Open(payload.Packet[:0], payload.Nonce, payload.Packet, nil)\n\tif err != nil {\n\t\tgcm.log.Error(\"[GCM]\", \"Error decrypting\/authenticating packet:\", err)\n\t\treturn payload, false\n\t}\n\n\treturn payload, true\n}\n\nfunc NewGCM(log *logger.Logger, ecdh *ECDH) *GCM {\n\treturn &GCM{log: log, ecdh: ecdh}\n}\n<commit_msg>Missed minor change to have gcm include the senders public key in the verification process<commit_after>package crypto\n\nimport (\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/rand\"\n\t\"github.com\/Supernomad\/quantum\/common\"\n\t\"github.com\/Supernomad\/quantum\/logger\"\n)\n\ntype GCM struct {\n\tlog *logger.Logger\n\tecdh *ECDH\n}\n\nfunc RandomBytes(b []byte) ([]byte, error) {\n\t_, err := rand.Read(b)\n\treturn b, err\n}\n\nfunc (gcm *GCM) Seal(payload *common.Payload) (*common.Payload, bool) {\n\t\/\/ Generate the shared key\n\tkey := gcm.ecdh.GenerateSharedSecret(payload.PublicKey)\n\n\t\/\/ Grab a new random nonce\n\t_, err := RandomBytes(payload.Nonce)\n\tif err != nil {\n\t\tgcm.log.Error(\"[GCM]\", \"Error generating random nonce:\", err)\n\t\treturn payload, false\n\t}\n\n\t\/\/ Get the block ciper\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\tgcm.log.Error(\"[GCM]\", \"Error generating ciper:\", err)\n\t\treturn payload, false\n\t}\n\n\t\/\/ Get the GCM block wrapper\n\taesgcm, err := cipher.NewGCM(block)\n\tif err != nil {\n\t\tgcm.log.Error(\"[GCM]\", \"Error generating gcm wrapper:\", err)\n\t\treturn payload, false\n\t}\n\n\t\/\/ Seal the packet and associated meta data\n\taesgcm.Seal(payload.Packet[:0], payload.Nonce, payload.Packet, gcm.ecdh.PublicKey[:])\n\n\tcopy(payload.Key, gcm.ecdh.PublicKey[:])\n\treturn payload, true\n}\n\nfunc (gcm *GCM) Unseal(payload *common.Payload) (*common.Payload, bool) {\n\t\/\/ Generate the shared key\n\tkey := gcm.ecdh.GenerateSharedSecret(payload.Key)\n\n\t\/\/ Get the block ciper\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\tgcm.log.Error(\"[GCM]\", \"Error generating ciper:\", err)\n\t\treturn payload, false\n\t}\n\n\t\/\/ Get the GCM block wrapper\n\taesgcm, err := cipher.NewGCM(block)\n\tif err != nil {\n\t\tgcm.log.Error(\"[GCM]\", \"Error generating gcm wrapper:\", err)\n\t\treturn payload, false\n\t}\n\n\t_, err = aesgcm.Open(payload.Packet[:0], payload.Nonce, payload.Packet, payload.Key)\n\tif err != nil {\n\t\tgcm.log.Error(\"[GCM]\", \"Error decrypting\/authenticating packet:\", err)\n\t\treturn payload, false\n\t}\n\n\treturn payload, true\n}\n\nfunc NewGCM(log *logger.Logger, ecdh *ECDH) *GCM {\n\treturn &GCM{log: log, ecdh: ecdh}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Walter Schulze\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage jsonschema\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/katydid\/katydid\/funcs\"\n\t\"github.com\/katydid\/katydid\/relapse\/ast\"\n\t\"github.com\/katydid\/katydid\/relapse\/combinator\"\n\t\"sort\"\n)\n\n\/\/TODO\nfunc TranslateDraft4(jsonSchema []byte) (*relapse.Grammar, error) {\n\tschema := &Schema{}\n\tif err := json.Unmarshal(jsonSchema, schema); err != nil {\n\t\tpanic(err)\n\t}\n\tp, err := translate(schema)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn relapse.NewGrammar(relapse.RefLookup(map[string]*relapse.Pattern{\"main\": p})), nil\n}\n\nfunc translate(schema *Schema) (*relapse.Pattern, error) {\n\tif schema.Id != nil {\n\t\treturn nil, fmt.Errorf(\"id not supported\")\n\t}\n\tif schema.Default != nil {\n\t\treturn nil, fmt.Errorf(\"default not supported\")\n\t}\n\tif schema.HasNumericConstraints() {\n\t\tp, err := translateNumeric(schema)\n\t\treturn p, err\n\t}\n\tif schema.HasStringConstraints() {\n\t\tp, err := translateString(schema)\n\t\treturn p, err\n\t}\n\tif schema.HasArrayConstraints() {\n\t\treturn nil, fmt.Errorf(\"array not supported\")\n\t}\n\tif schema.HasObjectConstraints() {\n\t\treturn nil, fmt.Errorf(\"object not supported\")\n\t}\n\tif schema.HasInstanceConstraints() {\n\t\treturn nil, fmt.Errorf(\"instance not supported\")\n\t}\n\n\tif schema.Ref != nil {\n\t\treturn nil, fmt.Errorf(\"ref not supported\")\n\t}\n\tif schema.Format != nil {\n\t\treturn nil, fmt.Errorf(\"format not supported\")\n\t}\n\treturn relapse.NewEmptySet(), nil\n}\n\nfunc translateObject(schema *Schema) (*relapse.Pattern, error) {\n\tif schema.MaxProperties != nil {\n\t\treturn nil, fmt.Errorf(\"maxProperties not supported\")\n\t}\n\tif schema.MinProperties > 0 {\n\t\treturn nil, fmt.Errorf(\"minProperties not supported\")\n\t}\n\trequired := make(map[string]struct{})\n\tfor _, req := range schema.Required {\n\t\trequired[req] = struct{}{}\n\t}\n\trequiredIf := make(map[string][]string)\n\tmoreProperties := make(map[string]*Schema)\n\tif schema.Dependencies != nil {\n\t\tdeps := *schema.Dependencies\n\t\tfor name, dep := range deps {\n\t\t\tif len(dep.RequiredProperty) > 0 {\n\t\t\t\trequiredIf[name] = deps[name].RequiredProperty\n\t\t\t} else {\n\t\t\t\tmoreProperties[name] = deps[name].Schema\n\t\t\t}\n\t\t}\n\t}\n\tadditional := relapse.NewZAny()\n\tif schema.AdditionalProperties != nil {\n\t\tif schema.AdditionalProperties.Bool != nil && !(*schema.AdditionalProperties.Bool) {\n\t\t\tadditional = relapse.NewEmpty()\n\t\t} else if schema.AdditionalProperties.Type != TypeUnknown {\n\t\t\tvar typ *relapse.Pattern\n\t\t\tswitch schema.AdditionalProperties.Type {\n\t\t\tcase TypeArray:\n\t\t\t\treturn nil, fmt.Errorf(\"type array in additionalProperties not supported\")\n\t\t\tcase TypeBoolean:\n\t\t\t\ttyp = combinator.Value(funcs.TypeBool(funcs.BoolVar()))\n\t\t\tcase TypeInteger:\n\t\t\t\ttyp = combinator.Value(funcs.TypeDouble(Integer()))\n\t\t\tcase TypeNull:\n\t\t\t\ttyp = relapse.NewEmpty()\n\t\t\tcase TypeNumber:\n\t\t\t\ttyp = combinator.Value(funcs.TypeDouble(Number()))\n\t\t\tcase TypeObject:\n\t\t\t\treturn nil, fmt.Errorf(\"type object in additionalProperties not supported\")\n\t\t\tcase TypeString:\n\t\t\t\ttyp = combinator.Value(funcs.TypeString(funcs.StringVar()))\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"unknown simpletype in additional properties: %s\",\n\t\t\t\t\tschema.AdditionalProperties.Type))\n\t\t\t}\n\t\t\tadditional = relapse.NewZeroOrMore(\n\t\t\t\trelapse.NewTreeNode(relapse.NewAnyName(), typ),\n\t\t\t)\n\t\t}\n\t}\n\tnames := []string{}\n\tfor name, _ := range schema.Properties {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\tpatterns := make(map[string]*relapse.Pattern)\n\tfor _, name := range names {\n\t\tchild, err := translate(schema.Properties[name])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpatterns[name] = relapse.NewTreeNode(relapse.NewName(name), child)\n\t}\n\t_ = additional\n\tfor _, name := range names {\n\t\tif requires, ok := requiredIf[name]; ok {\n\t\t\t_ = requires\n\t\t}\n\t\tif s, ok := moreProperties[name]; ok {\n\t\t\t_ = s\n\t\t}\n\t\tif _, ok := required[name]; !ok {\n\t\t\t\/\/prop = optional(prop)\n\t\t}\n\t}\n\tif len(schema.PatternProperties) > 0 {\n\t\treturn nil, fmt.Errorf(\"patternProperties not supported\")\n\t}\n\tpanic(\"todo\")\n}\n\nfunc optional(p *relapse.Pattern) *relapse.Pattern {\n\treturn relapse.NewOr(relapse.NewEmpty(), p)\n}\n\nfunc translateNumeric(schema *Schema) (*relapse.Pattern, error) {\n\tv := Number()\n\tif schema.Type != nil {\n\t\tif len(*schema.Type) > 1 {\n\t\t\treturn nil, fmt.Errorf(\"list of types not supported with numeric constraints %#v\", schema)\n\t\t}\n\t\tif schema.GetType()[0] == TypeInteger {\n\t\t\tv = Integer()\n\t\t} else if schema.GetType()[0] != TypeNumber {\n\t\t\treturn nil, fmt.Errorf(\"%v not supported with numeric constraints\", schema.GetType()[0])\n\t\t}\n\t}\n\tlist := []funcs.Bool{}\n\tif schema.MultipleOf != nil {\n\t\tmult := MultipleOf(v, funcs.DoubleConst(*schema.MultipleOf))\n\t\tlist = append(list, mult)\n\t}\n\tif schema.Maximum != nil {\n\t\tlt := funcs.DoubleLE(v, funcs.DoubleConst(*schema.Maximum))\n\t\tif schema.ExclusiveMaximum {\n\t\t\tlt = funcs.DoubleLt(v, funcs.DoubleConst(*schema.Maximum))\n\t\t}\n\t\tlist = append(list, lt)\n\t}\n\tif schema.Minimum != nil {\n\t\tlt := funcs.DoubleGE(v, funcs.DoubleConst(*schema.Minimum))\n\t\tif schema.ExclusiveMinimum {\n\t\t\tlt = funcs.DoubleGt(v, funcs.DoubleConst(*schema.Minimum))\n\t\t}\n\t\tlist = append(list, lt)\n\t}\n\treturn combinator.Value(and(list)), nil\n}\n\nfunc and(list []funcs.Bool) funcs.Bool {\n\tif len(list) == 0 {\n\t\tpanic(\"unreachable\")\n\t}\n\tif len(list) == 1 {\n\t\treturn list[0]\n\t}\n\treturn funcs.And(list[0], and(list[1:]))\n}\n\nfunc translateString(schema *Schema) (*relapse.Pattern, error) {\n\tv := funcs.StringVar()\n\tif schema.Type != nil {\n\t\tif len(*schema.Type) > 1 {\n\t\t\treturn nil, fmt.Errorf(\"list of types not supported with string constraints %#v\", schema)\n\t\t}\n\t\tif schema.GetType()[0] != TypeString {\n\t\t\treturn nil, fmt.Errorf(\"%v not supported with string constraints\", schema.GetType()[0])\n\t\t}\n\t}\n\tlist := []funcs.Bool{}\n\tif schema.MaxLength != nil {\n\t\tlist = append(list, MaxLength(v, *schema.MaxLength))\n\t}\n\tif schema.MinLength > 0 {\n\t\tlist = append(list, MinLength(v, schema.MinLength))\n\t}\n\tif schema.Pattern != nil {\n\t\tlist = append(list, funcs.Regex(funcs.StringConst(*schema.Pattern), v))\n\t}\n\treturn combinator.Value(and(list)), nil\n}\n\nfunc translateArray(schema *Schema) (*relapse.Pattern, error) {\n\tif schema.Type != nil {\n\t\tif len(*schema.Type) > 1 {\n\t\t\treturn nil, fmt.Errorf(\"list of types not supported with array constraints %#v\", schema)\n\t\t}\n\t\tif schema.GetType()[0] != TypeArray {\n\t\t\treturn nil, fmt.Errorf(\"%v not supported with array constraints\", schema.GetType()[0])\n\t\t}\n\t}\n\tif schema.UniqueItems {\n\t\treturn nil, fmt.Errorf(\"uniqueItems are not supported\")\n\t}\n\tif schema.MaxItems != nil {\n\t\treturn nil, fmt.Errorf(\"maxItems are not supported\")\n\t}\n\tif schema.MinItems > 0 {\n\t\treturn nil, fmt.Errorf(\"minItems are not supported\")\n\t}\n\tadditionalItems := true\n\tif schema.AdditionalItems != nil {\n\t\tif schema.Items == nil {\n\t\t\t\/\/any\n\t\t}\n\t\tif schema.AdditionalItems.Bool != nil {\n\t\t\tadditionalItems = *schema.AdditionalItems.Bool\n\t\t}\n\t\tif !additionalItems && (schema.MaxLength != nil || schema.MinLength > 0) {\n\t\t\treturn nil, fmt.Errorf(\"additionalItems: false and (maxItems|minItems) are not supported together\")\n\t\t}\n\t\treturn nil, fmt.Errorf(\"additionalItems are not supported\")\n\t}\n\tif schema.Items != nil {\n\t\tif schema.Items.Object != nil {\n\t\t\tif schema.Items.Object.Type == nil {\n\t\t\t\t\/\/any\n\t\t\t} else {\n\t\t\t\ttyp := schema.Items.Object.GetType()[0]\n\t\t\t\t_ = typ\n\t\t\t}\n\t\t\t\/\/TODO this specifies the type of every item in the list\n\t\t} else if schema.Items.Array != nil {\n\t\t\tif !additionalItems {\n\t\t\t\t\/\/TODO this specifies the length of the list as well as each ordered element's type\n\t\t\t\t\/\/ if no type is set then any type is accepted\n\t\t\t\tmaxLength := len(schema.Items.Array)\n\t\t\t\t_ = maxLength\n\t\t\t} else {\n\t\t\t\t\/\/TODO this specifies the types of the first few ordered items in the list\n\t\t\t\t\/\/ if no type is set then any type is accepted\n\t\t\t}\n\n\t\t}\n\t\treturn nil, fmt.Errorf(\"items are not supported\")\n\t}\n\treturn nil, nil\n}\n<commit_msg>some instance support<commit_after>\/\/ Copyright 2015 Walter Schulze\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage jsonschema\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/katydid\/katydid\/funcs\"\n\t\"github.com\/katydid\/katydid\/relapse\/ast\"\n\t\"github.com\/katydid\/katydid\/relapse\/combinator\"\n\t\"sort\"\n)\n\n\/\/TODO\nfunc TranslateDraft4(jsonSchema []byte) (*relapse.Grammar, error) {\n\tschema := &Schema{}\n\tif err := json.Unmarshal(jsonSchema, schema); err != nil {\n\t\tpanic(err)\n\t}\n\tp, err := translate(schema)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn relapse.NewGrammar(relapse.RefLookup(map[string]*relapse.Pattern{\"main\": p})), nil\n}\n\nfunc translate(schema *Schema) (*relapse.Pattern, error) {\n\tif schema.Id != nil {\n\t\treturn nil, fmt.Errorf(\"id not supported\")\n\t}\n\tif schema.Default != nil {\n\t\treturn nil, fmt.Errorf(\"default not supported\")\n\t}\n\tif schema.HasNumericConstraints() {\n\t\tp, err := translateNumeric(schema)\n\t\treturn p, err\n\t}\n\tif schema.HasStringConstraints() {\n\t\tp, err := translateString(schema)\n\t\treturn p, err\n\t}\n\tif schema.HasArrayConstraints() {\n\t\treturn nil, fmt.Errorf(\"array not supported\")\n\t}\n\tif schema.HasObjectConstraints() {\n\t\tp, err := translateObject(schema)\n\t\treturn p, err\n\t}\n\tif schema.HasInstanceConstraints() {\n\t\tp, err := translateInstance(schema)\n\t\treturn p, err\n\t}\n\n\tif schema.Ref != nil {\n\t\treturn nil, fmt.Errorf(\"ref not supported\")\n\t}\n\tif schema.Format != nil {\n\t\treturn nil, fmt.Errorf(\"format not supported\")\n\t}\n\treturn relapse.NewEmptySet(), nil\n}\n\nfunc translates(schemas []*Schema) ([]*relapse.Pattern, error) {\n\tps := make([]*relapse.Pattern, len(schemas))\n\tfor i := range schemas {\n\t\tvar err error\n\t\tps[i], err = translate(schemas[i])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn ps, nil\n}\n\nfunc rest(patterns []*relapse.Pattern, index int) []*relapse.Pattern {\n\treturn append(patterns[:index], patterns[index+1:]...)\n}\n\nfunc translateInstance(schema *Schema) (*relapse.Pattern, error) {\n\tif len(schema.Definitions) > 0 {\n\t\treturn nil, fmt.Errorf(\"definitions not supported\")\n\t}\n\tif len(schema.Enum) > 0 {\n\t\treturn nil, fmt.Errorf(\"enum not supported\")\n\t}\n\tif schema.Type != nil {\n\t\ttypes := *schema.Type\n\t\tif len(types) == 1 {\n\t\t\treturn translateType(types[0])\n\t\t}\n\t\tps := make([]*relapse.Pattern, len(types))\n\t\tfor i := range types {\n\t\t\tvar err error\n\t\t\tps[i], err = translateType(types[i])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\treturn relapse.NewOr(ps...), nil\n\t}\n\tif len(schema.AllOf) > 0 {\n\t\tps, err := translates(schema.AllOf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn relapse.NewAnd(ps...), nil\n\t}\n\tif len(schema.AnyOf) > 0 {\n\t\tps, err := translates(schema.AnyOf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn relapse.NewOr(ps...), nil\n\t}\n\tif len(schema.OneOf) > 0 {\n\t\tps, err := translates(schema.OneOf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(ps) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"oneof of zero schemas not supported\")\n\t\t}\n\t\tif len(ps) == 1 {\n\t\t\treturn ps[0], nil\n\t\t}\n\t\torps := make([]*relapse.Pattern, len(ps))\n\t\tfor i := range ps {\n\t\t\tother := rest(ps, i)\n\t\t\torps[i] = relapse.NewAnd(\n\t\t\t\tps[i],\n\t\t\t\trelapse.NewNot(\n\t\t\t\t\trelapse.NewOr(other...),\n\t\t\t\t),\n\t\t\t)\n\t\t}\n\t\treturn relapse.NewOr(orps...), nil\n\t}\n\tif schema.Not != nil {\n\t\tp, err := translate(schema.Not)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn relapse.NewNot(p), nil\n\t}\n\tpanic(\"unreachable object\")\n}\n\nfunc translateType(typ SimpleType) (*relapse.Pattern, error) {\n\tswitch typ {\n\tcase TypeArray:\n\t\treturn nil, fmt.Errorf(\"type array not supported\")\n\tcase TypeBoolean:\n\t\treturn combinator.Value(funcs.TypeBool(funcs.BoolVar())), nil\n\tcase TypeInteger:\n\t\treturn combinator.Value(funcs.TypeDouble(Integer())), nil\n\tcase TypeNull:\n\t\treturn relapse.NewEmpty(), nil\n\tcase TypeNumber:\n\t\treturn combinator.Value(funcs.TypeDouble(Number())), nil\n\tcase TypeObject:\n\t\treturn nil, fmt.Errorf(\"type object not supported\")\n\tcase TypeString:\n\t\treturn combinator.Value(funcs.TypeString(funcs.StringVar())), nil\n\t}\n\tpanic(fmt.Sprintf(\"unknown simpletype: %s\", typ))\n}\n\nfunc translateObject(schema *Schema) (*relapse.Pattern, error) {\n\tif schema.MaxProperties != nil {\n\t\treturn nil, fmt.Errorf(\"maxProperties not supported\")\n\t}\n\tif schema.MinProperties > 0 {\n\t\treturn nil, fmt.Errorf(\"minProperties not supported\")\n\t}\n\trequired := make(map[string]struct{})\n\tfor _, req := range schema.Required {\n\t\trequired[req] = struct{}{}\n\t}\n\trequiredIf := make(map[string][]string)\n\tmoreProperties := make(map[string]*Schema)\n\tif schema.Dependencies != nil {\n\t\tdeps := *schema.Dependencies\n\t\tfor name, dep := range deps {\n\t\t\tif len(dep.RequiredProperty) > 0 {\n\t\t\t\trequiredIf[name] = deps[name].RequiredProperty\n\t\t\t} else {\n\t\t\t\tmoreProperties[name] = deps[name].Schema\n\t\t\t}\n\t\t}\n\t}\n\tadditional := relapse.NewZAny()\n\tif schema.AdditionalProperties != nil {\n\t\tif schema.AdditionalProperties.Bool != nil && !(*schema.AdditionalProperties.Bool) {\n\t\t\tadditional = relapse.NewEmpty()\n\t\t} else if schema.AdditionalProperties.Type != TypeUnknown {\n\t\t\ttyp, err := translateType(schema.AdditionalProperties.Type)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tadditional = relapse.NewZeroOrMore(\n\t\t\t\trelapse.NewTreeNode(relapse.NewAnyName(), typ),\n\t\t\t)\n\t\t}\n\t}\n\tnames := []string{}\n\tfor name, _ := range schema.Properties {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\tpatterns := make(map[string]*relapse.Pattern)\n\tfor _, name := range names {\n\t\tchild, err := translate(schema.Properties[name])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpatterns[name] = relapse.NewTreeNode(relapse.NewName(name), child)\n\t}\n\t_ = additional\n\tfor _, name := range names {\n\t\tif requires, ok := requiredIf[name]; ok {\n\t\t\t_ = requires\n\t\t}\n\t\tif s, ok := moreProperties[name]; ok {\n\t\t\t_ = s\n\t\t}\n\t\tif _, ok := required[name]; !ok {\n\t\t\t\/\/prop = optional(prop)\n\t\t}\n\t}\n\tif len(schema.PatternProperties) > 0 {\n\t\treturn nil, fmt.Errorf(\"patternProperties not supported\")\n\t}\n\treturn nil, fmt.Errorf(\"object not fully supported\")\n}\n\nfunc optional(p *relapse.Pattern) *relapse.Pattern {\n\treturn relapse.NewOr(relapse.NewEmpty(), p)\n}\n\nfunc translateNumeric(schema *Schema) (*relapse.Pattern, error) {\n\tv := Number()\n\tif schema.Type != nil {\n\t\tif len(*schema.Type) > 1 {\n\t\t\treturn nil, fmt.Errorf(\"list of types not supported with numeric constraints %#v\", schema)\n\t\t}\n\t\tif schema.GetType()[0] == TypeInteger {\n\t\t\tv = Integer()\n\t\t} else if schema.GetType()[0] != TypeNumber {\n\t\t\treturn nil, fmt.Errorf(\"%v not supported with numeric constraints\", schema.GetType()[0])\n\t\t}\n\t}\n\tlist := []funcs.Bool{}\n\tif schema.MultipleOf != nil {\n\t\tmult := MultipleOf(v, funcs.DoubleConst(*schema.MultipleOf))\n\t\tlist = append(list, mult)\n\t}\n\tif schema.Maximum != nil {\n\t\tlt := funcs.DoubleLE(v, funcs.DoubleConst(*schema.Maximum))\n\t\tif schema.ExclusiveMaximum {\n\t\t\tlt = funcs.DoubleLt(v, funcs.DoubleConst(*schema.Maximum))\n\t\t}\n\t\tlist = append(list, lt)\n\t}\n\tif schema.Minimum != nil {\n\t\tlt := funcs.DoubleGE(v, funcs.DoubleConst(*schema.Minimum))\n\t\tif schema.ExclusiveMinimum {\n\t\t\tlt = funcs.DoubleGt(v, funcs.DoubleConst(*schema.Minimum))\n\t\t}\n\t\tlist = append(list, lt)\n\t}\n\treturn combinator.Value(and(list)), nil\n}\n\nfunc and(list []funcs.Bool) funcs.Bool {\n\tif len(list) == 0 {\n\t\tpanic(\"unreachable\")\n\t}\n\tif len(list) == 1 {\n\t\treturn list[0]\n\t}\n\treturn funcs.And(list[0], and(list[1:]))\n}\n\nfunc translateString(schema *Schema) (*relapse.Pattern, error) {\n\tv := funcs.StringVar()\n\tif schema.Type != nil {\n\t\tif len(*schema.Type) > 1 {\n\t\t\treturn nil, fmt.Errorf(\"list of types not supported with string constraints %#v\", schema)\n\t\t}\n\t\tif schema.GetType()[0] != TypeString {\n\t\t\treturn nil, fmt.Errorf(\"%v not supported with string constraints\", schema.GetType()[0])\n\t\t}\n\t}\n\tlist := []funcs.Bool{}\n\tif schema.MaxLength != nil {\n\t\tlist = append(list, MaxLength(v, *schema.MaxLength))\n\t}\n\tif schema.MinLength > 0 {\n\t\tlist = append(list, MinLength(v, schema.MinLength))\n\t}\n\tif schema.Pattern != nil {\n\t\tlist = append(list, funcs.Regex(funcs.StringConst(*schema.Pattern), v))\n\t}\n\treturn combinator.Value(and(list)), nil\n}\n\nfunc translateArray(schema *Schema) (*relapse.Pattern, error) {\n\tif schema.Type != nil {\n\t\tif len(*schema.Type) > 1 {\n\t\t\treturn nil, fmt.Errorf(\"list of types not supported with array constraints %#v\", schema)\n\t\t}\n\t\tif schema.GetType()[0] != TypeArray {\n\t\t\treturn nil, fmt.Errorf(\"%v not supported with array constraints\", schema.GetType()[0])\n\t\t}\n\t}\n\tif schema.UniqueItems {\n\t\treturn nil, fmt.Errorf(\"uniqueItems are not supported\")\n\t}\n\tif schema.MaxItems != nil {\n\t\treturn nil, fmt.Errorf(\"maxItems are not supported\")\n\t}\n\tif schema.MinItems > 0 {\n\t\treturn nil, fmt.Errorf(\"minItems are not supported\")\n\t}\n\tadditionalItems := true\n\tif schema.AdditionalItems != nil {\n\t\tif schema.Items == nil {\n\t\t\t\/\/any\n\t\t}\n\t\tif schema.AdditionalItems.Bool != nil {\n\t\t\tadditionalItems = *schema.AdditionalItems.Bool\n\t\t}\n\t\tif !additionalItems && (schema.MaxLength != nil || schema.MinLength > 0) {\n\t\t\treturn nil, fmt.Errorf(\"additionalItems: false and (maxItems|minItems) are not supported together\")\n\t\t}\n\t\treturn nil, fmt.Errorf(\"additionalItems are not supported\")\n\t}\n\tif schema.Items != nil {\n\t\tif schema.Items.Object != nil {\n\t\t\tif schema.Items.Object.Type == nil {\n\t\t\t\t\/\/any\n\t\t\t} else {\n\t\t\t\ttyp := schema.Items.Object.GetType()[0]\n\t\t\t\t_ = typ\n\t\t\t}\n\t\t\t\/\/TODO this specifies the type of every item in the list\n\t\t} else if schema.Items.Array != nil {\n\t\t\tif !additionalItems {\n\t\t\t\t\/\/TODO this specifies the length of the list as well as each ordered element's type\n\t\t\t\t\/\/ if no type is set then any type is accepted\n\t\t\t\tmaxLength := len(schema.Items.Array)\n\t\t\t\t_ = maxLength\n\t\t\t} else {\n\t\t\t\t\/\/TODO this specifies the types of the first few ordered items in the list\n\t\t\t\t\/\/ if no type is set then any type is accepted\n\t\t\t}\n\n\t\t}\n\t\treturn nil, fmt.Errorf(\"items are not supported\")\n\t}\n\treturn nil, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build windows\r\n\r\n\/*\r\nBinary vnc_to_vm connects VNC to a remote VM via its socket file over SSH.\r\n- connect to the local SSH agent in cygwin\r\n- connect to the remote server with SSH\r\n- relay the remote VNC socket file over SSH into a local listener\r\n- start VNC viewer with it\r\n\r\nRequirements:\r\n- cygwin, with ssh and socat installed, ssh-agent running\r\n- socat on the destination, TightVNC viewer locally\r\n\r\nTo avoid opening a command window:\r\n go build -ldflags -H=windowsgui vnc_to_vm.go\r\n*\/\r\n\r\npackage main\r\n\r\nimport (\r\n\t\"flag\"\r\n\t\"fmt\"\r\n\t\"io\"\r\n\t\"log\"\r\n\t\"net\"\r\n\t\"os\/exec\"\r\n\t\"regexp\"\r\n\t\"strings\"\r\n\r\n\t\"github.com\/StalkR\/misc\/windows\/cygwin\"\r\n\t\"golang.org\/x\/crypto\/ssh\"\r\n\t\"golang.org\/x\/crypto\/ssh\/agent\"\r\n)\r\n\r\nvar (\r\n\tflagHost = flag.String(\"host\", \"\", \"Host to SSH to (user@host[:port]).\")\r\n\tflagSocket = flag.String(\"socket\", \"\", \"Path to remote VNC socket.\")\r\n\tflagViewer = flag.String(\"viewer\", `C:\\Program Files\\TightVNC\\tvnviewer.exe`, \"Path to local TightVNC viewer.\")\r\n)\r\n\r\nfunc main() {\r\n\tflag.Parse()\r\n\tif err := launch(); err != nil {\r\n\t\tlog.Fatal(err)\r\n\t}\r\n}\r\n\r\nfunc launch() error {\r\n\tif *flagHost == \"\" || *flagSocket == \"\" {\r\n\t\tflag.PrintDefaults()\r\n\t\treturn nil\r\n\t}\r\n\tuserHost := regexp.MustCompile(`^([^@]*)@(.*)$`).FindStringSubmatch(*flagHost)\r\n\tif len(userHost) == 0 {\r\n\t\treturn fmt.Errorf(\"invalid host\")\r\n\t}\r\n\tuser, host := userHost[1], userHost[2]\r\n\tif strings.Contains(*flagSocket, `\"`) || strings.Contains(*flagSocket, `'`) {\r\n\t\treturn fmt.Errorf(\"invalid socket path\")\r\n\t}\r\n\tsocket := *flagSocket\r\n\r\n\tvnc, err := vncViewer()\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\tdefer vnc.Close()\r\n\treturn sshToVNC(user, host, socket, vnc)\r\n}\r\n\r\nfunc vncViewer() (io.ReadWriteCloser, error) {\r\n\tln, err := net.Listen(\"tcp\", \"localhost:0\")\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tport := ln.Addr().(*net.TCPAddr).Port\r\n\tcmd := exec.Command(*flagViewer, fmt.Sprintf(\"localhost::%d\", port))\r\n\tif err := cmd.Start(); err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tconn, err := ln.Accept()\r\n\tif err != nil {\r\n\t\tcmd.Wait()\r\n\t\treturn nil, err\r\n\t}\r\n\tif err := ln.Close(); err != nil {\r\n\t\tcmd.Wait()\r\n\t\treturn nil, err\r\n\t}\r\n\tlog.Printf(\"VNC viewer connected (localhost:%v)\", port)\r\n\treturn &vncConn{conn, cmd}, nil\r\n}\r\n\r\n\/\/ vncConn implements io.ReadWriteCloser.\r\ntype vncConn struct {\r\n\tconn io.ReadWriteCloser\r\n\tcmd *exec.Cmd\r\n}\r\n\r\nfunc (s *vncConn) Read(p []byte) (int, error) { return s.conn.Read(p) }\r\nfunc (s *vncConn) Write(p []byte) (int, error) { return s.conn.Write(p) }\r\nfunc (s *vncConn) Close() error {\r\n\tvar errors []error\r\n\tif err := s.conn.Close(); err != nil {\r\n\t\terrors = append(errors, err)\r\n\t}\r\n\tif err := s.cmd.Wait(); err != nil {\r\n\t\terrors = append(errors, err)\r\n\t}\r\n\tif len(errors) > 0 {\r\n\t\tif len(errors) == 1 {\r\n\t\t\treturn errors[0]\r\n\t\t}\r\n\t\treturn fmt.Errorf(\"close: %v errors, first: %v\", len(errors), errors[0])\r\n\t}\r\n\treturn nil\r\n}\r\n\r\nfunc sshToVNC(user, host, socket string, vnc io.ReadWriter) error {\r\n\tclient, err := connect(user, host)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\tdefer client.Close()\r\n\tlog.Print(\"Connected to SSH server\")\r\n\tsession, err := client.NewSession()\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\tdefer session.Close()\r\n\tsession.Stdin = vnc\r\n\tsession.Stdout = vnc\r\n\tif err := session.Start(fmt.Sprintf(`socat 'UNIX:\"%s\"' -`, socket)); err != nil {\r\n\t\treturn err\r\n\t}\r\n\tlog.Print(\"Connected to VNC via SSH\")\r\n\treturn session.Wait()\r\n}\r\n\r\nfunc connect(user, host string) (*ssh.Client, error) {\r\n\tag, err := cygwin.SSHAgent()\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tdefer ag.Close()\r\n\tconfig := &ssh.ClientConfig{\r\n\t\tUser: user,\r\n\t\tAuth: []ssh.AuthMethod{\r\n\t\t\tssh.PublicKeysCallback(agent.NewClient(ag).Signers),\r\n\t\t},\r\n\t\tHostKeyCallback: ssh.InsecureIgnoreHostKey(),\r\n\t}\r\n\treturn ssh.Dial(\"tcp\", host, config)\r\n}\r\n<commit_msg>kvm\/vnc_to_vm: update cygwin library, moved to github.com\/StalkR\/winpulse<commit_after>\/\/ +build windows\r\n\r\n\/*\r\nBinary vnc_to_vm connects VNC to a remote VM via its socket file over SSH.\r\n- connect to the local SSH agent in cygwin\r\n- connect to the remote server with SSH\r\n- relay the remote VNC socket file over SSH into a local listener\r\n- start VNC viewer with it\r\n\r\nRequirements:\r\n- cygwin, with ssh and socat installed, ssh-agent running\r\n- socat on the destination, TightVNC viewer locally\r\n\r\nTo avoid opening a command window:\r\n go build -ldflags -H=windowsgui vnc_to_vm.go\r\n*\/\r\n\r\npackage main\r\n\r\nimport (\r\n\t\"flag\"\r\n\t\"fmt\"\r\n\t\"io\"\r\n\t\"log\"\r\n\t\"net\"\r\n\t\"os\/exec\"\r\n\t\"regexp\"\r\n\t\"strings\"\r\n\r\n\t\"github.com\/StalkR\/winpulse\/cygwin\"\r\n\t\"golang.org\/x\/crypto\/ssh\"\r\n\t\"golang.org\/x\/crypto\/ssh\/agent\"\r\n)\r\n\r\nvar (\r\n\tflagHost = flag.String(\"host\", \"\", \"Host to SSH to (user@host[:port]).\")\r\n\tflagSocket = flag.String(\"socket\", \"\", \"Path to remote VNC socket.\")\r\n\tflagViewer = flag.String(\"viewer\", `C:\\Program Files\\TightVNC\\tvnviewer.exe`, \"Path to local TightVNC viewer.\")\r\n)\r\n\r\nfunc main() {\r\n\tflag.Parse()\r\n\tif err := launch(); err != nil {\r\n\t\tlog.Fatal(err)\r\n\t}\r\n}\r\n\r\nfunc launch() error {\r\n\tif *flagHost == \"\" || *flagSocket == \"\" {\r\n\t\tflag.PrintDefaults()\r\n\t\treturn nil\r\n\t}\r\n\tuserHost := regexp.MustCompile(`^([^@]*)@(.*)$`).FindStringSubmatch(*flagHost)\r\n\tif len(userHost) == 0 {\r\n\t\treturn fmt.Errorf(\"invalid host\")\r\n\t}\r\n\tuser, host := userHost[1], userHost[2]\r\n\tif strings.Contains(*flagSocket, `\"`) || strings.Contains(*flagSocket, `'`) {\r\n\t\treturn fmt.Errorf(\"invalid socket path\")\r\n\t}\r\n\tsocket := *flagSocket\r\n\r\n\tvnc, err := vncViewer()\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\tdefer vnc.Close()\r\n\treturn sshToVNC(user, host, socket, vnc)\r\n}\r\n\r\nfunc vncViewer() (io.ReadWriteCloser, error) {\r\n\tln, err := net.Listen(\"tcp\", \"localhost:0\")\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tport := ln.Addr().(*net.TCPAddr).Port\r\n\tcmd := exec.Command(*flagViewer, fmt.Sprintf(\"localhost::%d\", port))\r\n\tif err := cmd.Start(); err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tconn, err := ln.Accept()\r\n\tif err != nil {\r\n\t\tcmd.Wait()\r\n\t\treturn nil, err\r\n\t}\r\n\tif err := ln.Close(); err != nil {\r\n\t\tcmd.Wait()\r\n\t\treturn nil, err\r\n\t}\r\n\tlog.Printf(\"VNC viewer connected (localhost:%v)\", port)\r\n\treturn &vncConn{conn, cmd}, nil\r\n}\r\n\r\n\/\/ vncConn implements io.ReadWriteCloser.\r\ntype vncConn struct {\r\n\tconn io.ReadWriteCloser\r\n\tcmd *exec.Cmd\r\n}\r\n\r\nfunc (s *vncConn) Read(p []byte) (int, error) { return s.conn.Read(p) }\r\nfunc (s *vncConn) Write(p []byte) (int, error) { return s.conn.Write(p) }\r\nfunc (s *vncConn) Close() error {\r\n\tvar errors []error\r\n\tif err := s.conn.Close(); err != nil {\r\n\t\terrors = append(errors, err)\r\n\t}\r\n\tif err := s.cmd.Wait(); err != nil {\r\n\t\terrors = append(errors, err)\r\n\t}\r\n\tif len(errors) > 0 {\r\n\t\tif len(errors) == 1 {\r\n\t\t\treturn errors[0]\r\n\t\t}\r\n\t\treturn fmt.Errorf(\"close: %v errors, first: %v\", len(errors), errors[0])\r\n\t}\r\n\treturn nil\r\n}\r\n\r\nfunc sshToVNC(user, host, socket string, vnc io.ReadWriter) error {\r\n\tclient, err := connect(user, host)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\tdefer client.Close()\r\n\tlog.Print(\"Connected to SSH server\")\r\n\tsession, err := client.NewSession()\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\tdefer session.Close()\r\n\tsession.Stdin = vnc\r\n\tsession.Stdout = vnc\r\n\tif err := session.Start(fmt.Sprintf(`socat 'UNIX:\"%s\"' -`, socket)); err != nil {\r\n\t\treturn err\r\n\t}\r\n\tlog.Print(\"Connected to VNC via SSH\")\r\n\treturn session.Wait()\r\n}\r\n\r\nfunc connect(user, host string) (*ssh.Client, error) {\r\n\tag, err := cygwin.SSHAgent()\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tdefer ag.Close()\r\n\tconfig := &ssh.ClientConfig{\r\n\t\tUser: user,\r\n\t\tAuth: []ssh.AuthMethod{\r\n\t\t\tssh.PublicKeysCallback(agent.NewClient(ag).Signers),\r\n\t\t},\r\n\t\tHostKeyCallback: ssh.InsecureIgnoreHostKey(),\r\n\t}\r\n\treturn ssh.Dial(\"tcp\", host, config)\r\n}\r\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/gophergala2016\/goad\/queue\"\n)\n\nfunc main() {\n\taddress := os.Args[1]\n\tconcurrencycount, err := strconv.Atoi(os.Args[2])\n\tif err != nil {\n\t\tfmt.Printf(\"ERROR %s\\n\", err)\n\t\treturn\n\t}\n\tmaxRequestCount, err := strconv.Atoi(os.Args[3])\n\tsqsurl := os.Args[4]\n\tawsregion := os.Args[5]\n\tclientTimeout, _ := time.ParseDuration(\"1s\")\n\tif len(os.Args) > 6 {\n\t\tnewClientTimeout, err := time.ParseDuration(os.Args[6])\n\t\tif err == nil {\n\t\t\tclientTimeout = newClientTimeout\n\t\t} else {\n\t\t\tfmt.Printf(\"Error parsing timeout: %s\\n\", err)\n\t\t\treturn\n\t\t}\n\t}\n\tfmt.Printf(\"Using a timeout of %d nanoseconds\\n\", clientTimeout.Nanoseconds())\n\tif err != nil {\n\t\tfmt.Printf(\"ERROR %s\\n\", err)\n\t\treturn\n\t}\n\tclient := &http.Client{}\n\tclient.Timeout = clientTimeout\n\tfmt.Printf(\"Will spawn %d workers making %d requests to %s\\n\", concurrencycount, maxRequestCount, address)\n\trunLoadTest(client, sqsurl, address, maxRequestCount, concurrencycount, awsregion)\n}\n\ntype Job struct{}\n\ntype RequestResult struct {\n\tTime int64 `json:\"time\"`\n\tHost string `json:\"host\"`\n\tType string `json:\"type\"`\n\tStatus int `json:\"status\"`\n\tElapsedFirstByte int64 `json:\"elapsed-first-byte\"`\n\tElapsedLastByte int64 `json:\"elapsed-last-byte\"`\n\tElapsed int64 `json:\"elapsed\"`\n\tBytes int `json:\"bytes\"`\n\tTimeout bool `json:\"timeout\"`\n\tConnectionError bool `json:\"connection-error\"`\n\tState string `json:\"state\"`\n}\n\nfunc runLoadTest(client *http.Client, sqsurl string, url string, totalRequests int, concurrencycount int, awsregion string) {\n\tawsConfig := aws.NewConfig().WithRegion(awsregion)\n\tsqsAdaptor := queue.NewSQSAdaptor(awsConfig, sqsurl)\n\t\/\/sqsAdaptor := queue.NewDummyAdaptor(sqsurl)\n\tjobs := make(chan Job, totalRequests)\n\tch := make(chan RequestResult, totalRequests)\n\tvar wg sync.WaitGroup\n\tloadTestStartTime := time.Now()\n\tvar requestsSoFar int\n\tfor i := 0; i < totalRequests; i++ {\n\t\tjobs <- Job{}\n\t}\n\tclose(jobs)\n\tfor i := 0; i < concurrencycount; i++ {\n\t\twg.Add(1)\n\t\tgo fetch(loadTestStartTime, client, url, totalRequests, jobs, ch, &wg, awsregion)\n\t}\n\tfmt.Println(\"Waiting for results…\")\n\n\tfor requestsSoFar < totalRequests {\n\t\tvar agg [100]RequestResult\n\t\taggRequestCount := len(agg)\n\t\ti := 0\n\n\t\tvar timeToFirstTotal int64\n\t\tvar requestTimeTotal int64\n\t\ttotBytesRead := 0\n\t\tstatuses := make(map[string]int)\n\t\tvar firstRequestTime int64\n\t\tvar lastRequestTime int64\n\t\tvar slowest int64\n\t\tvar fastest int64\n\t\tvar totalTimedOut int\n\t\tvar totalConnectionError int\n\t\tfor ; i < aggRequestCount && requestsSoFar < totalRequests; i++ {\n\t\t\tr := <-ch\n\t\t\tagg[i] = r\n\t\t\trequestsSoFar++\n\t\t\tif requestsSoFar%10 == 0 {\n\t\t\t\tfmt.Printf(\"\\r%.2f%% done (%d requests out of %d)\", (float64(requestsSoFar)\/float64(totalRequests))*100.0, requestsSoFar, totalRequests)\n\t\t\t}\n\t\t\tif firstRequestTime == 0 {\n\t\t\t\tfirstRequestTime = r.Time\n\t\t\t}\n\n\t\t\tlastRequestTime = r.Time\n\n\t\t\tif r.Timeout {\n\t\t\t\ttotalTimedOut++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif r.ConnectionError {\n\t\t\t\ttotalConnectionError++\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif r.ElapsedLastByte > slowest {\n\t\t\t\tslowest = r.ElapsedLastByte\n\t\t\t}\n\t\t\tif fastest == 0 {\n\t\t\t\tfastest = r.ElapsedLastByte\n\t\t\t} else {\n\t\t\t\tif r.ElapsedLastByte < fastest {\n\t\t\t\t\tfastest = r.ElapsedLastByte\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttimeToFirstTotal += r.ElapsedFirstByte\n\t\t\ttotBytesRead += r.Bytes\n\t\t\tstatusStr := strconv.Itoa(r.Status)\n\t\t\t_, ok := statuses[statusStr]\n\t\t\tif !ok {\n\t\t\t\tstatuses[statusStr] = 1\n\t\t\t} else {\n\t\t\t\tstatuses[statusStr]++\n\t\t\t}\n\t\t\trequestTimeTotal += r.Elapsed\n\t\t}\n\t\tif i == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tdurationNanoSeconds := lastRequestTime - firstRequestTime\n\t\tdurationSeconds := float32(durationNanoSeconds) \/ float32(1000000000)\n\t\taggData := queue.AggData{\n\t\t\ti,\n\t\t\ttotalTimedOut,\n\t\t\ttotalConnectionError,\n\t\t\ttimeToFirstTotal \/ int64(i),\n\t\t\ttotBytesRead,\n\t\t\tstatuses,\n\t\t\trequestTimeTotal \/ int64(i),\n\t\t\tfloat32(i) \/ durationSeconds,\n\t\t\tslowest,\n\t\t\tfastest,\n\t\t\tawsregion,\n\t\t}\n\t\tsqsAdaptor.SendResult(aggData)\n\t}\n\tfmt.Printf(\"\\nWaiting for workers…\")\n\twg.Wait()\n\tfmt.Printf(\"\\nYay🎈 - %d requests completed\\n\", requestsSoFar)\n\n}\n\nfunc fetch(loadTestStartTime time.Time, client *http.Client, address string, requestcount int, jobs <-chan Job, ch chan RequestResult, wg *sync.WaitGroup, awsregion string) {\n\tdefer wg.Done()\n\tfmt.Printf(\"Fetching %s\\n\", address)\n\tfor _ = range jobs {\n\t\tstart := time.Now()\n\t\treq, err := http.NewRequest(\"GET\", address, nil)\n\t\treq.Header.Add(\"User-Agent\", \"GOAD\/0.1\")\n\t\tresponse, err := client.Do(req)\n\t\tvar status string\n\t\tvar elapsedFirstByte time.Duration\n\t\tvar elapsedLastByte time.Duration\n\t\tvar elapsed time.Duration\n\t\tvar statusCode int\n\t\tvar bytesRead int\n\t\tbuf := []byte(\" \")\n\t\ttimedOut := false\n\t\tconnectionError := false\n\t\tif err != nil {\n\t\t\tstatus = fmt.Sprintf(\"ERROR: %s\\n\", err)\n\t\t\t\/\/fmt.Println(status)\n\t\t\tswitch err := err.(type) {\n\t\t\tcase *url.Error:\n\t\t\t\tif err, ok := err.Err.(net.Error); ok && err.Timeout() {\n\t\t\t\t\ttimedOut = true\n\t\t\t\t}\n\t\t\tcase net.Error:\n\t\t\t\tif err.Timeout() {\n\t\t\t\t\ttimedOut = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !timedOut {\n\t\t\t\tconnectionError = true\n\t\t\t}\n\t\t} else {\n\t\t\tstatusCode = response.StatusCode\n\t\t\t_, err = response.Body.Read(buf)\n\t\t\tif err != nil {\n\t\t\t\tstatus = fmt.Sprintf(\"reading first byte failed: %s\\n\", err)\n\t\t\t}\n\t\t\telapsedFirstByte = time.Since(start)\n\t\t\tbody, err := ioutil.ReadAll(response.Body)\n\t\t\tbytesRead = len(body) + 1\n\t\t\telapsedLastByte = time.Since(start)\n\t\t\tif err != nil {\n\t\t\t\tstatus = fmt.Sprintf(\"reading response body failed: %s\\n\", err)\n\t\t\t} else {\n\t\t\t\tstatus = \"Success\"\n\t\t\t}\n\t\t\telapsed = time.Since(start)\n\t\t}\n\t\tresult := RequestResult{\n\t\t\tstart.Sub(loadTestStartTime).Nanoseconds(),\n\t\t\treq.URL.Host,\n\t\t\treq.Method,\n\t\t\tstatusCode,\n\t\t\telapsed.Nanoseconds(),\n\t\t\telapsedFirstByte.Nanoseconds(),\n\t\t\telapsedLastByte.Nanoseconds(),\n\t\t\tbytesRead,\n\t\t\ttimedOut,\n\t\t\tconnectionError,\n\t\t\tstatus,\n\t\t}\n\t\tch <- result\n\t}\n}\n<commit_msg>Don't follow redirects, don't treat them as errors<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/gophergala2016\/goad\/queue\"\n)\n\nfunc main() {\n\taddress := os.Args[1]\n\tconcurrencycount, err := strconv.Atoi(os.Args[2])\n\tif err != nil {\n\t\tfmt.Printf(\"ERROR %s\\n\", err)\n\t\treturn\n\t}\n\tmaxRequestCount, err := strconv.Atoi(os.Args[3])\n\tsqsurl := os.Args[4]\n\tawsregion := os.Args[5]\n\tclientTimeout, _ := time.ParseDuration(\"1s\")\n\tif len(os.Args) > 6 {\n\t\tnewClientTimeout, err := time.ParseDuration(os.Args[6])\n\t\tif err == nil {\n\t\t\tclientTimeout = newClientTimeout\n\t\t} else {\n\t\t\tfmt.Printf(\"Error parsing timeout: %s\\n\", err)\n\t\t\treturn\n\t\t}\n\t}\n\tfmt.Printf(\"Using a timeout of %d nanoseconds\\n\", clientTimeout.Nanoseconds())\n\tif err != nil {\n\t\tfmt.Printf(\"ERROR %s\\n\", err)\n\t\treturn\n\t}\n\tclient := &http.Client{}\n\tclient.Timeout = clientTimeout\n\tclient.CheckRedirect = func(req *http.Request, via []*http.Request) error {\n\t\treturn errors.New(\"redirect\")\n\t}\n\tfmt.Printf(\"Will spawn %d workers making %d requests to %s\\n\", concurrencycount, maxRequestCount, address)\n\trunLoadTest(client, sqsurl, address, maxRequestCount, concurrencycount, awsregion)\n}\n\ntype Job struct{}\n\ntype RequestResult struct {\n\tTime int64 `json:\"time\"`\n\tHost string `json:\"host\"`\n\tType string `json:\"type\"`\n\tStatus int `json:\"status\"`\n\tElapsedFirstByte int64 `json:\"elapsed-first-byte\"`\n\tElapsedLastByte int64 `json:\"elapsed-last-byte\"`\n\tElapsed int64 `json:\"elapsed\"`\n\tBytes int `json:\"bytes\"`\n\tTimeout bool `json:\"timeout\"`\n\tConnectionError bool `json:\"connection-error\"`\n\tState string `json:\"state\"`\n}\n\nfunc runLoadTest(client *http.Client, sqsurl string, url string, totalRequests int, concurrencycount int, awsregion string) {\n\tawsConfig := aws.NewConfig().WithRegion(awsregion)\n\tsqsAdaptor := queue.NewSQSAdaptor(awsConfig, sqsurl)\n\t\/\/sqsAdaptor := queue.NewDummyAdaptor(sqsurl)\n\tjobs := make(chan Job, totalRequests)\n\tch := make(chan RequestResult, totalRequests)\n\tvar wg sync.WaitGroup\n\tloadTestStartTime := time.Now()\n\tvar requestsSoFar int\n\tfor i := 0; i < totalRequests; i++ {\n\t\tjobs <- Job{}\n\t}\n\tclose(jobs)\n\tfor i := 0; i < concurrencycount; i++ {\n\t\twg.Add(1)\n\t\tgo fetch(loadTestStartTime, client, url, totalRequests, jobs, ch, &wg, awsregion)\n\t}\n\tfmt.Println(\"Waiting for results…\")\n\n\tfor requestsSoFar < totalRequests {\n\t\tvar agg [100]RequestResult\n\t\taggRequestCount := len(agg)\n\t\ti := 0\n\n\t\tvar timeToFirstTotal int64\n\t\tvar requestTimeTotal int64\n\t\ttotBytesRead := 0\n\t\tstatuses := make(map[string]int)\n\t\tvar firstRequestTime int64\n\t\tvar lastRequestTime int64\n\t\tvar slowest int64\n\t\tvar fastest int64\n\t\tvar totalTimedOut int\n\t\tvar totalConnectionError int\n\t\tfor ; i < aggRequestCount && requestsSoFar < totalRequests; i++ {\n\t\t\tr := <-ch\n\t\t\tagg[i] = r\n\t\t\trequestsSoFar++\n\t\t\tif requestsSoFar%10 == 0 {\n\t\t\t\tfmt.Printf(\"\\r%.2f%% done (%d requests out of %d)\", (float64(requestsSoFar)\/float64(totalRequests))*100.0, requestsSoFar, totalRequests)\n\t\t\t}\n\t\t\tif firstRequestTime == 0 {\n\t\t\t\tfirstRequestTime = r.Time\n\t\t\t}\n\n\t\t\tlastRequestTime = r.Time\n\n\t\t\tif r.Timeout {\n\t\t\t\ttotalTimedOut++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif r.ConnectionError {\n\t\t\t\ttotalConnectionError++\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif r.ElapsedLastByte > slowest {\n\t\t\t\tslowest = r.ElapsedLastByte\n\t\t\t}\n\t\t\tif fastest == 0 {\n\t\t\t\tfastest = r.ElapsedLastByte\n\t\t\t} else {\n\t\t\t\tif r.ElapsedLastByte < fastest {\n\t\t\t\t\tfastest = r.ElapsedLastByte\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttimeToFirstTotal += r.ElapsedFirstByte\n\t\t\ttotBytesRead += r.Bytes\n\t\t\tstatusStr := strconv.Itoa(r.Status)\n\t\t\t_, ok := statuses[statusStr]\n\t\t\tif !ok {\n\t\t\t\tstatuses[statusStr] = 1\n\t\t\t} else {\n\t\t\t\tstatuses[statusStr]++\n\t\t\t}\n\t\t\trequestTimeTotal += r.Elapsed\n\t\t}\n\t\tif i == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tdurationNanoSeconds := lastRequestTime - firstRequestTime\n\t\tdurationSeconds := float32(durationNanoSeconds) \/ float32(1000000000)\n\t\taggData := queue.AggData{\n\t\t\ti,\n\t\t\ttotalTimedOut,\n\t\t\ttotalConnectionError,\n\t\t\ttimeToFirstTotal \/ int64(i),\n\t\t\ttotBytesRead,\n\t\t\tstatuses,\n\t\t\trequestTimeTotal \/ int64(i),\n\t\t\tfloat32(i) \/ durationSeconds,\n\t\t\tslowest,\n\t\t\tfastest,\n\t\t\tawsregion,\n\t\t}\n\t\tsqsAdaptor.SendResult(aggData)\n\t}\n\tfmt.Printf(\"\\nWaiting for workers…\")\n\twg.Wait()\n\tfmt.Printf(\"\\nYay🎈 - %d requests completed\\n\", requestsSoFar)\n\n}\n\nfunc fetch(loadTestStartTime time.Time, client *http.Client, address string, requestcount int, jobs <-chan Job, ch chan RequestResult, wg *sync.WaitGroup, awsregion string) {\n\tdefer wg.Done()\n\tfmt.Printf(\"Fetching %s\\n\", address)\n\tfor _ = range jobs {\n\t\tstart := time.Now()\n\t\treq, err := http.NewRequest(\"GET\", address, nil)\n\t\treq.Header.Add(\"User-Agent\", \"GOAD\/0.1\")\n\t\tresponse, err := client.Do(req)\n\t\tvar status string\n\t\tvar elapsedFirstByte time.Duration\n\t\tvar elapsedLastByte time.Duration\n\t\tvar elapsed time.Duration\n\t\tvar statusCode int\n\t\tvar bytesRead int\n\t\tbuf := []byte(\" \")\n\t\ttimedOut := false\n\t\tconnectionError := false\n\t\tif err != nil && !strings.Contains(err.Error(), \"redirect\") {\n\t\t\tstatus = fmt.Sprintf(\"ERROR: %s\\n\", err)\n\t\t\tswitch err := err.(type) {\n\t\t\tcase *url.Error:\n\t\t\t\tif err, ok := err.Err.(net.Error); ok && err.Timeout() {\n\t\t\t\t\ttimedOut = true\n\t\t\t\t}\n\t\t\tcase net.Error:\n\t\t\t\tif err.Timeout() {\n\t\t\t\t\ttimedOut = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !timedOut {\n\t\t\t\tconnectionError = true\n\t\t\t}\n\t\t} else {\n\t\t\tstatusCode = response.StatusCode\n\t\t\t_, err = response.Body.Read(buf)\n\t\t\tif err != nil {\n\t\t\t\tstatus = fmt.Sprintf(\"reading first byte failed: %s\\n\", err)\n\t\t\t}\n\t\t\telapsedFirstByte = time.Since(start)\n\t\t\tbody, err := ioutil.ReadAll(response.Body)\n\t\t\tbytesRead = len(body) + 1\n\t\t\telapsedLastByte = time.Since(start)\n\t\t\tif err != nil {\n\t\t\t\tstatus = fmt.Sprintf(\"reading response body failed: %s\\n\", err)\n\t\t\t} else {\n\t\t\t\tstatus = \"Success\"\n\t\t\t}\n\t\t\telapsed = time.Since(start)\n\t\t}\n\t\tresult := RequestResult{\n\t\t\tstart.Sub(loadTestStartTime).Nanoseconds(),\n\t\t\treq.URL.Host,\n\t\t\treq.Method,\n\t\t\tstatusCode,\n\t\t\telapsed.Nanoseconds(),\n\t\t\telapsedFirstByte.Nanoseconds(),\n\t\t\telapsedLastByte.Nanoseconds(),\n\t\t\tbytesRead,\n\t\t\ttimedOut,\n\t\t\tconnectionError,\n\t\t\tstatus,\n\t\t}\n\t\tch <- result\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package push\n\nimport (\n\t\"crypto\/ecdsa\"\n\t\"crypto\/md5\"\n\t\"crypto\/tls\"\n\t\"encoding\/binary\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com\/cozy\/cozy-stack\/model\/account\"\n\t\"github.com\/cozy\/cozy-stack\/model\/instance\"\n\t\"github.com\/cozy\/cozy-stack\/model\/job\"\n\t\"github.com\/cozy\/cozy-stack\/model\/notification\/center\"\n\t\"github.com\/cozy\/cozy-stack\/model\/oauth\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/config\/config\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/logger\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/mail\"\n\t\"github.com\/sirupsen\/logrus\"\n\n\tfcm \"github.com\/appleboy\/go-fcm\"\n\n\tapns \"github.com\/sideshow\/apns2\"\n\tapns_cert \"github.com\/sideshow\/apns2\/certificate\"\n\tapns_payload \"github.com\/sideshow\/apns2\/payload\"\n\tapns_token \"github.com\/sideshow\/apns2\/token\"\n)\n\nvar (\n\tfcmClient *fcm.Client\n\tiosClient *apns.Client\n)\n\nfunc init() {\n\tjob.AddWorker(&job.WorkerConfig{\n\t\tWorkerType: \"push\",\n\t\tConcurrency: runtime.NumCPU(),\n\t\tMaxExecCount: 1,\n\t\tTimeout: 10 * time.Second,\n\t\tWorkerInit: Init,\n\t\tWorkerFunc: Worker,\n\t})\n}\n\n\/\/ Init initializes the necessary global clients\nfunc Init() (err error) {\n\tconf := config.GetConfig().Notifications\n\n\tif conf.AndroidAPIKey != \"\" {\n\t\tif conf.FCMServer != \"\" {\n\t\t\tfcmClient, err = fcm.NewClient(conf.AndroidAPIKey, fcm.WithEndpoint(conf.FCMServer))\n\t\t} else {\n\t\t\tfcmClient, err = fcm.NewClient(conf.AndroidAPIKey)\n\t\t}\n\t\tlogger.WithNamespace(\"push\").Infof(\"Initialized FCM client with Android API Key\")\n\t\tif err != nil {\n\t\t\tlogger.WithNamespace(\"push\").Warnf(\"%s\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif conf.IOSCertificateKeyPath != \"\" {\n\t\tvar authKey *ecdsa.PrivateKey\n\t\tvar certificateKey tls.Certificate\n\n\t\tswitch filepath.Ext(conf.IOSCertificateKeyPath) {\n\t\tcase \".p12\":\n\t\t\tcertificateKey, err = apns_cert.FromP12File(\n\t\t\t\tconf.IOSCertificateKeyPath, conf.IOSCertificatePassword)\n\t\tcase \".pem\":\n\t\t\tcertificateKey, err = apns_cert.FromPemFile(\n\t\t\t\tconf.IOSCertificateKeyPath, conf.IOSCertificatePassword)\n\t\tcase \".p8\":\n\t\t\tauthKey, err = apns_token.AuthKeyFromFile(conf.IOSCertificateKeyPath)\n\t\tdefault:\n\t\t\terr = errors.New(\"wrong certificate key extension\")\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif authKey != nil {\n\t\t\tt := &apns_token.Token{\n\t\t\t\tAuthKey: authKey,\n\t\t\t\tKeyID: conf.IOSKeyID,\n\t\t\t\tTeamID: conf.IOSTeamID,\n\t\t\t}\n\t\t\tiosClient = apns.NewTokenClient(t)\n\t\t} else {\n\t\t\tiosClient = apns.NewClient(certificateKey)\n\t\t}\n\t\tif conf.Development {\n\t\t\tiosClient = iosClient.Development()\n\t\t} else {\n\t\t\tiosClient = iosClient.Production()\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ Worker is the worker that send push messages.\nfunc Worker(ctx *job.WorkerContext) error {\n\tvar msg center.PushMessage\n\tif err := ctx.UnmarshalMessage(&msg); err != nil {\n\t\treturn err\n\t}\n\tcs, err := oauth.GetNotifiables(ctx.Instance)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(cs) > 10 {\n\t\tctx.Logger().Warnf(\"too many notifiable devices: %d\", len(cs))\n\t\tcs = cs[:10]\n\t}\n\tsent := false\n\tfor _, c := range cs {\n\t\tif err := push(ctx, c, &msg); err == nil {\n\t\t\tsent = true\n\t\t} else {\n\t\t\tctx.Logger().\n\t\t\t\tWithFields(logrus.Fields{\n\t\t\t\t\t\"device_id\": c.ID(),\n\t\t\t\t\t\"device_platform\": c.NotificationPlatform,\n\t\t\t\t}).\n\t\t\t\tWarnf(\"could not send notification on device: %s\", err)\n\t\t}\n\t}\n\tif !sent {\n\t\tsendFallbackMail(ctx.Instance, msg.MailFallback)\n\t}\n\treturn nil\n}\n\nfunc push(ctx *job.WorkerContext, c *oauth.Client, msg *center.PushMessage) error {\n\tswitch c.NotificationPlatform {\n\tcase oauth.PlatformFirebase, \"android\", \"ios\":\n\t\treturn pushToFirebase(ctx, c, msg)\n\tcase oauth.PlatformAPNS:\n\t\treturn pushToAPNS(ctx, c, msg)\n\tdefault:\n\t\treturn fmt.Errorf(\"notifications: unknown platform %q\", c.NotificationPlatform)\n\t}\n}\n\n\/\/ Firebase Cloud Messaging HTTP Protocol\n\/\/ https:\/\/firebase.google.com\/docs\/cloud-messaging\/http-server-ref\nfunc pushToFirebase(ctx *job.WorkerContext, c *oauth.Client, msg *center.PushMessage) error {\n\tclient := getFirebaseClient(msg.Slug(), ctx.Instance.ContextName)\n\n\tif client == nil {\n\t\tctx.Logger().Warn(\"Could not send android notification: not configured\")\n\t\treturn nil\n\t}\n\n\tvar priority string\n\tif msg.Priority == \"high\" {\n\t\tpriority = \"high\"\n\t}\n\n\tvar hashedSource []byte\n\tif msg.Collapsible {\n\t\thashedSource = hashSource(msg.Source)\n\t} else {\n\t\thashedSource = hashSource(msg.Source + msg.NotificationID)\n\t}\n\n\t\/\/ notID should be an integer, we take the first 32bits of the hashed source\n\t\/\/ value.\n\tnotID := int32(binary.BigEndian.Uint32(hashedSource[:4]))\n\tif notID < 0 {\n\t\tnotID = -notID\n\t}\n\n\tnotification := &fcm.Message{\n\t\tTo: c.NotificationDeviceToken,\n\t\tPriority: priority,\n\t\tContentAvailable: true,\n\t\tNotification: &fcm.Notification{\n\t\t\tSound: msg.Sound,\n\t\t\tTitle: msg.Title,\n\t\t\tBody: msg.Message,\n\t\t},\n\t\tData: map[string]interface{}{\n\t\t\t\/\/ Fields required by phonegap-plugin-push\n\t\t\t\/\/ see: https:\/\/github.com\/phonegap\/phonegap-plugin-push\/blob\/master\/docs\/PAYLOAD.md#android-behaviour\n\t\t\t\"notId\": notID,\n\t\t\t\"title\": msg.Title,\n\t\t\t\"body\": msg.Message,\n\t\t},\n\t}\n\n\tctx.Logger().Infof(\"Built notification for FCM: %#v\", notification)\n\tif msg.Collapsible {\n\t\tnotification.CollapseKey = hex.EncodeToString(hashedSource)\n\t}\n\tfor k, v := range msg.Data {\n\t\tnotification.Data[k] = v\n\t}\n\n\tctx.Logger().Infof(\"FCM send: %#v\", notification)\n\tres, err := client.Send(notification)\n\tif err != nil {\n\t\tctx.Logger().Warnf(\"Error during fcm send: %s\", err)\n\t\treturn err\n\t}\n\tif res.Failure == 0 {\n\t\treturn nil\n\t}\n\n\tfor _, result := range res.Results {\n\t\tif err = result.Error; err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getFirebaseClient(slug, contextName string) *fcm.Client {\n\tif slug == \"\" {\n\t\treturn fcmClient\n\t}\n\ttyp, err := account.TypeInfo(slug, contextName)\n\tif err == nil && typ.AndroidAPIKey != \"\" {\n\t\tclient, err := fcm.NewClient(typ.AndroidAPIKey)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn client\n\t}\n\treturn fcmClient\n}\n\nfunc pushToAPNS(ctx *job.WorkerContext, c *oauth.Client, msg *center.PushMessage) error {\n\tif iosClient == nil {\n\t\tctx.Logger().Warn(\"Could not send iOS notification: not configured\")\n\t\treturn nil\n\t}\n\n\tvar priority int\n\tif msg.Priority == \"normal\" {\n\t\tpriority = apns.PriorityLow\n\t} else {\n\t\tpriority = apns.PriorityHigh\n\t}\n\n\tpayload := apns_payload.NewPayload().\n\t\tAlertTitle(msg.Title).\n\t\tAlert(msg.Message).\n\t\tSound(msg.Sound)\n\n\tfor k, v := range msg.Data {\n\t\tpayload.Custom(k, v)\n\t}\n\n\tnotification := &apns.Notification{\n\t\tDeviceToken: c.NotificationDeviceToken,\n\t\tPayload: payload,\n\t\tPriority: priority,\n\t\tCollapseID: hex.EncodeToString(hashSource(msg.Source)), \/\/ CollapseID should not exceed 64 bytes\n\t}\n\n\tres, err := iosClient.PushWithContext(ctx, notification)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"failed to push apns notification: %d %s\", res.StatusCode, res.Reason)\n\t}\n\treturn nil\n}\n\nfunc hashSource(source string) []byte {\n\th := md5.New()\n\t_, _ = h.Write([]byte(source))\n\treturn h.Sum(nil)\n}\n\nfunc sendFallbackMail(inst *instance.Instance, email *mail.Options) {\n\tif inst == nil || email == nil {\n\t\treturn\n\t}\n\tmsg, err := job.NewMessage(&email)\n\tif err != nil {\n\t\treturn\n\t}\n\t_, _ = job.System().PushJob(inst, &job.JobRequest{\n\t\tWorkerType: \"sendmail\",\n\t\tMessage: msg,\n\t})\n}\n<commit_msg>Avoid duplicate notification (#2929)<commit_after>package push\n\nimport (\n\t\"crypto\/ecdsa\"\n\t\"crypto\/md5\"\n\t\"crypto\/tls\"\n\t\"encoding\/binary\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com\/cozy\/cozy-stack\/model\/account\"\n\t\"github.com\/cozy\/cozy-stack\/model\/instance\"\n\t\"github.com\/cozy\/cozy-stack\/model\/job\"\n\t\"github.com\/cozy\/cozy-stack\/model\/notification\/center\"\n\t\"github.com\/cozy\/cozy-stack\/model\/oauth\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/config\/config\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/logger\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/mail\"\n\t\"github.com\/sirupsen\/logrus\"\n\n\tfcm \"github.com\/appleboy\/go-fcm\"\n\n\tapns \"github.com\/sideshow\/apns2\"\n\tapns_cert \"github.com\/sideshow\/apns2\/certificate\"\n\tapns_payload \"github.com\/sideshow\/apns2\/payload\"\n\tapns_token \"github.com\/sideshow\/apns2\/token\"\n)\n\nvar (\n\tfcmClient *fcm.Client\n\tiosClient *apns.Client\n)\n\nfunc init() {\n\tjob.AddWorker(&job.WorkerConfig{\n\t\tWorkerType: \"push\",\n\t\tConcurrency: runtime.NumCPU(),\n\t\tMaxExecCount: 1,\n\t\tTimeout: 10 * time.Second,\n\t\tWorkerInit: Init,\n\t\tWorkerFunc: Worker,\n\t})\n}\n\n\/\/ Init initializes the necessary global clients\nfunc Init() (err error) {\n\tconf := config.GetConfig().Notifications\n\n\tif conf.AndroidAPIKey != \"\" {\n\t\tif conf.FCMServer != \"\" {\n\t\t\tfcmClient, err = fcm.NewClient(conf.AndroidAPIKey, fcm.WithEndpoint(conf.FCMServer))\n\t\t} else {\n\t\t\tfcmClient, err = fcm.NewClient(conf.AndroidAPIKey)\n\t\t}\n\t\tlogger.WithNamespace(\"push\").Infof(\"Initialized FCM client with Android API Key\")\n\t\tif err != nil {\n\t\t\tlogger.WithNamespace(\"push\").Warnf(\"%s\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif conf.IOSCertificateKeyPath != \"\" {\n\t\tvar authKey *ecdsa.PrivateKey\n\t\tvar certificateKey tls.Certificate\n\n\t\tswitch filepath.Ext(conf.IOSCertificateKeyPath) {\n\t\tcase \".p12\":\n\t\t\tcertificateKey, err = apns_cert.FromP12File(\n\t\t\t\tconf.IOSCertificateKeyPath, conf.IOSCertificatePassword)\n\t\tcase \".pem\":\n\t\t\tcertificateKey, err = apns_cert.FromPemFile(\n\t\t\t\tconf.IOSCertificateKeyPath, conf.IOSCertificatePassword)\n\t\tcase \".p8\":\n\t\t\tauthKey, err = apns_token.AuthKeyFromFile(conf.IOSCertificateKeyPath)\n\t\tdefault:\n\t\t\terr = errors.New(\"wrong certificate key extension\")\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif authKey != nil {\n\t\t\tt := &apns_token.Token{\n\t\t\t\tAuthKey: authKey,\n\t\t\t\tKeyID: conf.IOSKeyID,\n\t\t\t\tTeamID: conf.IOSTeamID,\n\t\t\t}\n\t\t\tiosClient = apns.NewTokenClient(t)\n\t\t} else {\n\t\t\tiosClient = apns.NewClient(certificateKey)\n\t\t}\n\t\tif conf.Development {\n\t\t\tiosClient = iosClient.Development()\n\t\t} else {\n\t\t\tiosClient = iosClient.Production()\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ Worker is the worker that send push messages.\nfunc Worker(ctx *job.WorkerContext) error {\n\tvar msg center.PushMessage\n\tif err := ctx.UnmarshalMessage(&msg); err != nil {\n\t\treturn err\n\t}\n\tcs, err := oauth.GetNotifiables(ctx.Instance)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(cs) > 10 {\n\t\tctx.Logger().Warnf(\"too many notifiable devices: %d\", len(cs))\n\t\tcs = cs[:10]\n\t}\n\tsent := false\n\tseen := make(map[string]struct{})\n\tfor _, c := range cs {\n\t\tif _, ok := seen[c.NotificationDeviceToken]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tseen[c.NotificationDeviceToken] = struct{}{}\n\t\tif err := push(ctx, c, &msg); err == nil {\n\t\t\tsent = true\n\t\t} else {\n\t\t\tctx.Logger().\n\t\t\t\tWithFields(logrus.Fields{\n\t\t\t\t\t\"device_id\": c.ID(),\n\t\t\t\t\t\"device_platform\": c.NotificationPlatform,\n\t\t\t\t}).\n\t\t\t\tWarnf(\"could not send notification on device: %s\", err)\n\t\t}\n\t}\n\tif !sent {\n\t\tsendFallbackMail(ctx.Instance, msg.MailFallback)\n\t}\n\treturn nil\n}\n\nfunc push(ctx *job.WorkerContext, c *oauth.Client, msg *center.PushMessage) error {\n\tswitch c.NotificationPlatform {\n\tcase oauth.PlatformFirebase, \"android\", \"ios\":\n\t\treturn pushToFirebase(ctx, c, msg)\n\tcase oauth.PlatformAPNS:\n\t\treturn pushToAPNS(ctx, c, msg)\n\tdefault:\n\t\treturn fmt.Errorf(\"notifications: unknown platform %q\", c.NotificationPlatform)\n\t}\n}\n\n\/\/ Firebase Cloud Messaging HTTP Protocol\n\/\/ https:\/\/firebase.google.com\/docs\/cloud-messaging\/http-server-ref\nfunc pushToFirebase(ctx *job.WorkerContext, c *oauth.Client, msg *center.PushMessage) error {\n\tclient := getFirebaseClient(msg.Slug(), ctx.Instance.ContextName)\n\n\tif client == nil {\n\t\tctx.Logger().Warn(\"Could not send android notification: not configured\")\n\t\treturn nil\n\t}\n\n\tvar priority string\n\tif msg.Priority == \"high\" {\n\t\tpriority = \"high\"\n\t}\n\n\tvar hashedSource []byte\n\tif msg.Collapsible {\n\t\thashedSource = hashSource(msg.Source)\n\t} else {\n\t\thashedSource = hashSource(msg.Source + msg.NotificationID)\n\t}\n\n\t\/\/ notID should be an integer, we take the first 32bits of the hashed source\n\t\/\/ value.\n\tnotID := int32(binary.BigEndian.Uint32(hashedSource[:4]))\n\tif notID < 0 {\n\t\tnotID = -notID\n\t}\n\n\tnotification := &fcm.Message{\n\t\tTo: c.NotificationDeviceToken,\n\t\tPriority: priority,\n\t\tContentAvailable: true,\n\t\tNotification: &fcm.Notification{\n\t\t\tSound: msg.Sound,\n\t\t\tTitle: msg.Title,\n\t\t\tBody: msg.Message,\n\t\t},\n\t\tData: map[string]interface{}{\n\t\t\t\/\/ Fields required by phonegap-plugin-push\n\t\t\t\/\/ see: https:\/\/github.com\/phonegap\/phonegap-plugin-push\/blob\/master\/docs\/PAYLOAD.md#android-behaviour\n\t\t\t\"notId\": notID,\n\t\t\t\"title\": msg.Title,\n\t\t\t\"body\": msg.Message,\n\t\t},\n\t}\n\n\tctx.Logger().Infof(\"Built notification for FCM: %#v\", notification)\n\tif msg.Collapsible {\n\t\tnotification.CollapseKey = hex.EncodeToString(hashedSource)\n\t}\n\tfor k, v := range msg.Data {\n\t\tnotification.Data[k] = v\n\t}\n\n\tctx.Logger().Infof(\"FCM send: %#v\", notification)\n\tres, err := client.Send(notification)\n\tif err != nil {\n\t\tctx.Logger().Warnf(\"Error during fcm send: %s\", err)\n\t\treturn err\n\t}\n\tif res.Failure == 0 {\n\t\treturn nil\n\t}\n\n\tfor _, result := range res.Results {\n\t\tif err = result.Error; err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getFirebaseClient(slug, contextName string) *fcm.Client {\n\tif slug == \"\" {\n\t\treturn fcmClient\n\t}\n\ttyp, err := account.TypeInfo(slug, contextName)\n\tif err == nil && typ.AndroidAPIKey != \"\" {\n\t\tclient, err := fcm.NewClient(typ.AndroidAPIKey)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn client\n\t}\n\treturn fcmClient\n}\n\nfunc pushToAPNS(ctx *job.WorkerContext, c *oauth.Client, msg *center.PushMessage) error {\n\tif iosClient == nil {\n\t\tctx.Logger().Warn(\"Could not send iOS notification: not configured\")\n\t\treturn nil\n\t}\n\n\tvar priority int\n\tif msg.Priority == \"normal\" {\n\t\tpriority = apns.PriorityLow\n\t} else {\n\t\tpriority = apns.PriorityHigh\n\t}\n\n\tpayload := apns_payload.NewPayload().\n\t\tAlertTitle(msg.Title).\n\t\tAlert(msg.Message).\n\t\tSound(msg.Sound)\n\n\tfor k, v := range msg.Data {\n\t\tpayload.Custom(k, v)\n\t}\n\n\tnotification := &apns.Notification{\n\t\tDeviceToken: c.NotificationDeviceToken,\n\t\tPayload: payload,\n\t\tPriority: priority,\n\t\tCollapseID: hex.EncodeToString(hashSource(msg.Source)), \/\/ CollapseID should not exceed 64 bytes\n\t}\n\n\tres, err := iosClient.PushWithContext(ctx, notification)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"failed to push apns notification: %d %s\", res.StatusCode, res.Reason)\n\t}\n\treturn nil\n}\n\nfunc hashSource(source string) []byte {\n\th := md5.New()\n\t_, _ = h.Write([]byte(source))\n\treturn h.Sum(nil)\n}\n\nfunc sendFallbackMail(inst *instance.Instance, email *mail.Options) {\n\tif inst == nil || email == nil {\n\t\treturn\n\t}\n\tmsg, err := job.NewMessage(&email)\n\tif err != nil {\n\t\treturn\n\t}\n\t_, _ = job.System().PushJob(inst, &job.JobRequest{\n\t\tWorkerType: \"sendmail\",\n\t\tMessage: msg,\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage fitasks\n\nimport (\n\t\"crypto\/x509\"\n\t\"fmt\"\n\t\"net\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/golang\/glog\"\n\t\"k8s.io\/kops\/pkg\/pki\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\"\n)\n\nvar wellKnownCertificateTypes = map[string]string{\n\t\"ca\": \"CA,KeyUsageCRLSign,KeyUsageCertSign\",\n\t\"client\": \"ExtKeyUsageClientAuth,KeyUsageDigitalSignature\",\n\t\"clientServer\": \"ExtKeyUsageServerAuth,KeyUsageDigitalSignature,ExtKeyUsageClientAuth,KeyUsageKeyEncipherment\",\n\t\"server\": \"ExtKeyUsageServerAuth,KeyUsageDigitalSignature,KeyUsageKeyEncipherment\",\n}\n\n\/\/go:generate fitask -type=Keypair\ntype Keypair struct {\n\t\/\/ Name is the name of the keypair\n\tName *string\n\t\/\/ AlternateNames a list of alternative names for this certificate\n\tAlternateNames []string `json:\"alternateNames\"`\n\t\/\/ AlternateNameTasks is a collection of subtask\n\tAlternateNameTasks []fi.Task `json:\"alternateNameTasks\"`\n\t\/\/ Lifecycle is context for a task\n\tLifecycle *fi.Lifecycle\n\t\/\/ Signer is the keypair to use to sign, for when we want to use an alternative CA\n\tSigner *Keypair\n\t\/\/ Subject is the cerificate subject\n\tSubject string `json:\"subject\"`\n\t\/\/ Type the type of certificate i.e. CA, server, client etc\n\tType string `json:\"type\"`\n\t\/\/ Format stores the api version of kops.Keyset. We are using this info in order to determine if kops\n\t\/\/ is accessing legacy secrets that do not use keyset.yaml.\n\tFormat string `json:\"format\"`\n}\n\nvar _ fi.HasCheckExisting = &Keypair{}\nvar _ fi.HasName = &Keypair{}\n\n\/\/ It's important always to check for the existing key, so we don't regenerate keys e.g. on terraform\nfunc (e *Keypair) CheckExisting(c *fi.Context) bool {\n\treturn true\n}\n\nvar _ fi.CompareWithID = &Keypair{}\n\nfunc (e *Keypair) CompareWithID() *string {\n\treturn &e.Subject\n}\n\nfunc (e *Keypair) Find(c *fi.Context) (*Keypair, error) {\n\tname := fi.StringValue(e.Name)\n\tif name == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tcert, key, format, err := c.Keystore.FindKeypair(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif cert == nil {\n\t\treturn nil, nil\n\t}\n\tif key == nil {\n\t\treturn nil, fmt.Errorf(\"found cert in store, but did not find private key: %q\", name)\n\t}\n\n\tvar alternateNames []string\n\talternateNames = append(alternateNames, cert.Certificate.DNSNames...)\n\talternateNames = append(alternateNames, cert.Certificate.EmailAddresses...)\n\tfor _, ip := range cert.Certificate.IPAddresses {\n\t\talternateNames = append(alternateNames, ip.String())\n\t}\n\tsort.Strings(alternateNames)\n\n\tactual := &Keypair{\n\t\tName: &name,\n\t\tAlternateNames: alternateNames,\n\t\tSubject: pkixNameToString(&cert.Subject),\n\t\tType: buildTypeDescription(cert.Certificate),\n\t\tFormat: string(format),\n\t}\n\n\tactual.Signer = &Keypair{Subject: pkixNameToString(&cert.Certificate.Issuer)}\n\n\t\/\/ Avoid spurious changes\n\tactual.Lifecycle = e.Lifecycle\n\n\treturn actual, nil\n}\n\nfunc (e *Keypair) Run(c *fi.Context) error {\n\terr := e.normalize(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn fi.DefaultDeltaRunMethod(e, c)\n}\n\nfunc (e *Keypair) normalize(c *fi.Context) error {\n\tvar alternateNames []string\n\n\tfor _, s := range e.AlternateNames {\n\t\ts = strings.TrimSpace(s)\n\t\tif s == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\talternateNames = append(alternateNames, s)\n\t}\n\n\tfor _, task := range e.AlternateNameTasks {\n\t\tif hasAddress, ok := task.(fi.HasAddress); ok {\n\t\t\taddress, err := hasAddress.FindIPAddress(c)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error finding address for %v: %v\", task, err)\n\t\t\t}\n\t\t\tif address == nil {\n\t\t\t\tglog.Warningf(\"Task did not have an address: %v\", task)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tglog.V(8).Infof(\"Resolved alternateName %q for %q\", *address, task)\n\t\t\talternateNames = append(alternateNames, *address)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"Unsupported type for AlternateNameDependencies: %v\", task)\n\t\t}\n\t}\n\n\tsort.Strings(alternateNames)\n\te.AlternateNames = alternateNames\n\te.AlternateNameTasks = nil\n\n\treturn nil\n}\n\nfunc (_ *Keypair) CheckChanges(a, e, changes *Keypair) error {\n\tif a != nil {\n\t\tif changes.Name != nil {\n\t\t\treturn fi.CannotChangeField(\"Name\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (_ *Keypair) Render(c *fi.Context, a, e, changes *Keypair) error {\n\tname := fi.StringValue(e.Name)\n\tif name == \"\" {\n\t\treturn fi.RequiredField(\"Name\")\n\t}\n\n\ttemplate, err := e.BuildCertificateTemplate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tchangeStoredFormat := false\n\tcreateCertificate := false\n\tif a == nil {\n\t\tcreateCertificate = true\n\t\tglog.V(8).Infof(\"creating brand new certificate\")\n\t} else if changes != nil {\n\t\tglog.V(8).Infof(\"creating certificate as changes are not nil\")\n\t\tif changes.AlternateNames != nil {\n\t\t\tcreateCertificate = true\n\t\t\tglog.V(8).Infof(\"creating certificate new AlternateNames\")\n\t\t} else if changes.Subject != \"\" {\n\t\t\tcreateCertificate = true\n\t\t\tglog.V(8).Infof(\"creating certificate new Subject\")\n\t\t} else if changes.Type != \"\" {\n\t\t\tcreateCertificate = true\n\t\t\tglog.V(8).Infof(\"creating certificate new Type\")\n\t\t} else if changes.Format != \"\" {\n\t\t\tchangeStoredFormat = true\n\t\t} else {\n\t\t\tglog.Warningf(\"Ignoring changes in key: %v\", fi.DebugAsJsonString(changes))\n\t\t}\n\t}\n\n\tif createCertificate {\n\t\tglog.V(2).Infof(\"Creating PKI keypair %q\", name)\n\n\t\tcert, privateKey, _, err := c.Keystore.FindKeypair(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ We always reuse the private key if it exists,\n\t\t\/\/ if we change keys we often have to regenerate e.g. the service accounts\n\t\t\/\/ TODO: Eventually rotate keys \/ don't always reuse?\n\t\tif privateKey == nil {\n\t\t\tglog.V(2).Infof(\"Creating privateKey %q\", name)\n\n\t\t\tprivateKey, err = pki.GeneratePrivateKey()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tsigner := fi.CertificateId_CA\n\t\tif e.Signer != nil {\n\t\t\tsigner = fi.StringValue(e.Signer.Name)\n\t\t}\n\n\t\tcert, err = c.Keystore.CreateKeypair(signer, name, template, privateKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tglog.V(8).Infof(\"created certificate with cn=%s\", cert.Subject.CommonName)\n\t}\n\n\t\/\/ TODO: Check correct subject \/ flags\n\n\tif changeStoredFormat {\n\t\t\/\/ We fetch and reinsert the same keypair, forcing an update to our preferred format\n\t\t\/\/ TODO: We're assuming that we want to save in the preferred format\n\t\tcert, privateKey, _, err := c.Keystore.FindKeypair(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = c.Keystore.StoreKeypair(name, cert, privateKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tglog.Infof(\"updated Keypair %q to API format %q\", name, e.Format)\n\t}\n\n\treturn nil\n}\n\n\/\/ BuildCertificateTemplate is responsible for constructing a certificate template\nfunc (e *Keypair) BuildCertificateTemplate() (*x509.Certificate, error) {\n\ttemplate, err := buildCertificateTemplateForType(e.Type)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsubjectPkix, err := parsePkixName(e.Subject)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing Subject: %v\", err)\n\t}\n\n\tif len(subjectPkix.ToRDNSequence()) == 0 {\n\t\treturn nil, fmt.Errorf(\"Subject name was empty for SSL keypair %q\", e.Name)\n\t}\n\n\ttemplate.Subject = *subjectPkix\n\n\tvar alternateNames []string\n\talternateNames = append(alternateNames, e.AlternateNames...)\n\n\tfor _, san := range alternateNames {\n\t\tsan = strings.TrimSpace(san)\n\t\tif san == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif ip := net.ParseIP(san); ip != nil {\n\t\t\ttemplate.IPAddresses = append(template.IPAddresses, ip)\n\t\t} else {\n\t\t\ttemplate.DNSNames = append(template.DNSNames, san)\n\t\t}\n\t}\n\n\treturn template, nil\n}\n\nfunc buildCertificateTemplateForType(certificateType string) (*x509.Certificate, error) {\n\tif expanded, found := wellKnownCertificateTypes[certificateType]; found {\n\t\tcertificateType = expanded\n\t}\n\n\ttemplate := &x509.Certificate{\n\t\tBasicConstraintsValid: true,\n\t\tIsCA: false,\n\t}\n\n\ttokens := strings.Split(certificateType, \",\")\n\tfor _, t := range tokens {\n\t\tif strings.HasPrefix(t, \"KeyUsage\") {\n\t\t\tku, found := parseKeyUsage(t)\n\t\t\tif !found {\n\t\t\t\treturn nil, fmt.Errorf(\"unrecognized certificate option: %v\", t)\n\t\t\t}\n\t\t\ttemplate.KeyUsage |= ku\n\t\t} else if strings.HasPrefix(t, \"ExtKeyUsage\") {\n\t\t\tku, found := parseExtKeyUsage(t)\n\t\t\tif !found {\n\t\t\t\treturn nil, fmt.Errorf(\"unrecognized certificate option: %v\", t)\n\t\t\t}\n\t\t\ttemplate.ExtKeyUsage = append(template.ExtKeyUsage, ku)\n\t\t} else if t == \"CA\" {\n\t\t\ttemplate.IsCA = true\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"unrecognized certificate option: %q\", t)\n\t\t}\n\t}\n\n\treturn template, nil\n}\n\n\/\/ buildTypeDescription extracts the type based on the certificate extensions\nfunc buildTypeDescription(cert *x509.Certificate) string {\n\tvar options []string\n\n\tif cert.IsCA {\n\t\toptions = append(options, \"CA\")\n\t}\n\n\toptions = append(options, keyUsageToString(cert.KeyUsage)...)\n\n\tfor _, extKeyUsage := range cert.ExtKeyUsage {\n\t\toptions = append(options, extKeyUsageToString(extKeyUsage))\n\t}\n\n\tsort.Strings(options)\n\ts := strings.Join(options, \",\")\n\n\tfor k, v := range wellKnownCertificateTypes {\n\t\tif v == s {\n\t\t\ts = k\n\t\t}\n\t}\n\n\treturn s\n}\n<commit_msg>Fix etcd Keypair change showing on every kops update when TLS enabled<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage fitasks\n\nimport (\n\t\"crypto\/x509\"\n\t\"fmt\"\n\t\"net\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/golang\/glog\"\n\t\"k8s.io\/kops\/pkg\/pki\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\"\n)\n\nvar wellKnownCertificateTypes = map[string]string{\n\t\"ca\": \"CA,KeyUsageCRLSign,KeyUsageCertSign\",\n\t\"client\": \"ExtKeyUsageClientAuth,KeyUsageDigitalSignature\",\n\t\"clientServer\": \"ExtKeyUsageClientAuth,ExtKeyUsageServerAuth,KeyUsageDigitalSignature,KeyUsageKeyEncipherment\",\n\t\"server\": \"ExtKeyUsageServerAuth,KeyUsageDigitalSignature,KeyUsageKeyEncipherment\",\n}\n\n\/\/go:generate fitask -type=Keypair\ntype Keypair struct {\n\t\/\/ Name is the name of the keypair\n\tName *string\n\t\/\/ AlternateNames a list of alternative names for this certificate\n\tAlternateNames []string `json:\"alternateNames\"`\n\t\/\/ AlternateNameTasks is a collection of subtask\n\tAlternateNameTasks []fi.Task `json:\"alternateNameTasks\"`\n\t\/\/ Lifecycle is context for a task\n\tLifecycle *fi.Lifecycle\n\t\/\/ Signer is the keypair to use to sign, for when we want to use an alternative CA\n\tSigner *Keypair\n\t\/\/ Subject is the cerificate subject\n\tSubject string `json:\"subject\"`\n\t\/\/ Type the type of certificate i.e. CA, server, client etc\n\tType string `json:\"type\"`\n\t\/\/ Format stores the api version of kops.Keyset. We are using this info in order to determine if kops\n\t\/\/ is accessing legacy secrets that do not use keyset.yaml.\n\tFormat string `json:\"format\"`\n}\n\nvar _ fi.HasCheckExisting = &Keypair{}\nvar _ fi.HasName = &Keypair{}\n\n\/\/ It's important always to check for the existing key, so we don't regenerate keys e.g. on terraform\nfunc (e *Keypair) CheckExisting(c *fi.Context) bool {\n\treturn true\n}\n\nvar _ fi.CompareWithID = &Keypair{}\n\nfunc (e *Keypair) CompareWithID() *string {\n\treturn &e.Subject\n}\n\nfunc (e *Keypair) Find(c *fi.Context) (*Keypair, error) {\n\tname := fi.StringValue(e.Name)\n\tif name == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tcert, key, format, err := c.Keystore.FindKeypair(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif cert == nil {\n\t\treturn nil, nil\n\t}\n\tif key == nil {\n\t\treturn nil, fmt.Errorf(\"found cert in store, but did not find private key: %q\", name)\n\t}\n\n\tvar alternateNames []string\n\talternateNames = append(alternateNames, cert.Certificate.DNSNames...)\n\talternateNames = append(alternateNames, cert.Certificate.EmailAddresses...)\n\tfor _, ip := range cert.Certificate.IPAddresses {\n\t\talternateNames = append(alternateNames, ip.String())\n\t}\n\tsort.Strings(alternateNames)\n\n\tactual := &Keypair{\n\t\tName: &name,\n\t\tAlternateNames: alternateNames,\n\t\tSubject: pkixNameToString(&cert.Subject),\n\t\tType: buildTypeDescription(cert.Certificate),\n\t\tFormat: string(format),\n\t}\n\n\tactual.Signer = &Keypair{Subject: pkixNameToString(&cert.Certificate.Issuer)}\n\n\t\/\/ Avoid spurious changes\n\tactual.Lifecycle = e.Lifecycle\n\n\treturn actual, nil\n}\n\nfunc (e *Keypair) Run(c *fi.Context) error {\n\terr := e.normalize(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn fi.DefaultDeltaRunMethod(e, c)\n}\n\nfunc (e *Keypair) normalize(c *fi.Context) error {\n\tvar alternateNames []string\n\n\tfor _, s := range e.AlternateNames {\n\t\ts = strings.TrimSpace(s)\n\t\tif s == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\talternateNames = append(alternateNames, s)\n\t}\n\n\tfor _, task := range e.AlternateNameTasks {\n\t\tif hasAddress, ok := task.(fi.HasAddress); ok {\n\t\t\taddress, err := hasAddress.FindIPAddress(c)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error finding address for %v: %v\", task, err)\n\t\t\t}\n\t\t\tif address == nil {\n\t\t\t\tglog.Warningf(\"Task did not have an address: %v\", task)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tglog.V(8).Infof(\"Resolved alternateName %q for %q\", *address, task)\n\t\t\talternateNames = append(alternateNames, *address)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"Unsupported type for AlternateNameDependencies: %v\", task)\n\t\t}\n\t}\n\n\tsort.Strings(alternateNames)\n\te.AlternateNames = alternateNames\n\te.AlternateNameTasks = nil\n\n\treturn nil\n}\n\nfunc (_ *Keypair) CheckChanges(a, e, changes *Keypair) error {\n\tif a != nil {\n\t\tif changes.Name != nil {\n\t\t\treturn fi.CannotChangeField(\"Name\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (_ *Keypair) Render(c *fi.Context, a, e, changes *Keypair) error {\n\tname := fi.StringValue(e.Name)\n\tif name == \"\" {\n\t\treturn fi.RequiredField(\"Name\")\n\t}\n\n\ttemplate, err := e.BuildCertificateTemplate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tchangeStoredFormat := false\n\tcreateCertificate := false\n\tif a == nil {\n\t\tcreateCertificate = true\n\t\tglog.V(8).Infof(\"creating brand new certificate\")\n\t} else if changes != nil {\n\t\tglog.V(8).Infof(\"creating certificate as changes are not nil\")\n\t\tif changes.AlternateNames != nil {\n\t\t\tcreateCertificate = true\n\t\t\tglog.V(8).Infof(\"creating certificate new AlternateNames\")\n\t\t} else if changes.Subject != \"\" {\n\t\t\tcreateCertificate = true\n\t\t\tglog.V(8).Infof(\"creating certificate new Subject\")\n\t\t} else if changes.Type != \"\" {\n\t\t\tcreateCertificate = true\n\t\t\tglog.V(8).Infof(\"creating certificate new Type\")\n\t\t} else if changes.Format != \"\" {\n\t\t\tchangeStoredFormat = true\n\t\t} else {\n\t\t\tglog.Warningf(\"Ignoring changes in key: %v\", fi.DebugAsJsonString(changes))\n\t\t}\n\t}\n\n\tif createCertificate {\n\t\tglog.V(2).Infof(\"Creating PKI keypair %q\", name)\n\n\t\tcert, privateKey, _, err := c.Keystore.FindKeypair(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ We always reuse the private key if it exists,\n\t\t\/\/ if we change keys we often have to regenerate e.g. the service accounts\n\t\t\/\/ TODO: Eventually rotate keys \/ don't always reuse?\n\t\tif privateKey == nil {\n\t\t\tglog.V(2).Infof(\"Creating privateKey %q\", name)\n\n\t\t\tprivateKey, err = pki.GeneratePrivateKey()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tsigner := fi.CertificateId_CA\n\t\tif e.Signer != nil {\n\t\t\tsigner = fi.StringValue(e.Signer.Name)\n\t\t}\n\n\t\tcert, err = c.Keystore.CreateKeypair(signer, name, template, privateKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tglog.V(8).Infof(\"created certificate with cn=%s\", cert.Subject.CommonName)\n\t}\n\n\t\/\/ TODO: Check correct subject \/ flags\n\n\tif changeStoredFormat {\n\t\t\/\/ We fetch and reinsert the same keypair, forcing an update to our preferred format\n\t\t\/\/ TODO: We're assuming that we want to save in the preferred format\n\t\tcert, privateKey, _, err := c.Keystore.FindKeypair(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = c.Keystore.StoreKeypair(name, cert, privateKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tglog.Infof(\"updated Keypair %q to API format %q\", name, e.Format)\n\t}\n\n\treturn nil\n}\n\n\/\/ BuildCertificateTemplate is responsible for constructing a certificate template\nfunc (e *Keypair) BuildCertificateTemplate() (*x509.Certificate, error) {\n\ttemplate, err := buildCertificateTemplateForType(e.Type)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsubjectPkix, err := parsePkixName(e.Subject)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing Subject: %v\", err)\n\t}\n\n\tif len(subjectPkix.ToRDNSequence()) == 0 {\n\t\treturn nil, fmt.Errorf(\"Subject name was empty for SSL keypair %q\", e.Name)\n\t}\n\n\ttemplate.Subject = *subjectPkix\n\n\tvar alternateNames []string\n\talternateNames = append(alternateNames, e.AlternateNames...)\n\n\tfor _, san := range alternateNames {\n\t\tsan = strings.TrimSpace(san)\n\t\tif san == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif ip := net.ParseIP(san); ip != nil {\n\t\t\ttemplate.IPAddresses = append(template.IPAddresses, ip)\n\t\t} else {\n\t\t\ttemplate.DNSNames = append(template.DNSNames, san)\n\t\t}\n\t}\n\n\treturn template, nil\n}\n\nfunc buildCertificateTemplateForType(certificateType string) (*x509.Certificate, error) {\n\tif expanded, found := wellKnownCertificateTypes[certificateType]; found {\n\t\tcertificateType = expanded\n\t}\n\n\ttemplate := &x509.Certificate{\n\t\tBasicConstraintsValid: true,\n\t\tIsCA: false,\n\t}\n\n\ttokens := strings.Split(certificateType, \",\")\n\tfor _, t := range tokens {\n\t\tif strings.HasPrefix(t, \"KeyUsage\") {\n\t\t\tku, found := parseKeyUsage(t)\n\t\t\tif !found {\n\t\t\t\treturn nil, fmt.Errorf(\"unrecognized certificate option: %v\", t)\n\t\t\t}\n\t\t\ttemplate.KeyUsage |= ku\n\t\t} else if strings.HasPrefix(t, \"ExtKeyUsage\") {\n\t\t\tku, found := parseExtKeyUsage(t)\n\t\t\tif !found {\n\t\t\t\treturn nil, fmt.Errorf(\"unrecognized certificate option: %v\", t)\n\t\t\t}\n\t\t\ttemplate.ExtKeyUsage = append(template.ExtKeyUsage, ku)\n\t\t} else if t == \"CA\" {\n\t\t\ttemplate.IsCA = true\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"unrecognized certificate option: %q\", t)\n\t\t}\n\t}\n\n\treturn template, nil\n}\n\n\/\/ buildTypeDescription extracts the type based on the certificate extensions\nfunc buildTypeDescription(cert *x509.Certificate) string {\n\tvar options []string\n\n\tif cert.IsCA {\n\t\toptions = append(options, \"CA\")\n\t}\n\n\toptions = append(options, keyUsageToString(cert.KeyUsage)...)\n\n\tfor _, extKeyUsage := range cert.ExtKeyUsage {\n\t\toptions = append(options, extKeyUsageToString(extKeyUsage))\n\t}\n\n\tsort.Strings(options)\n\ts := strings.Join(options, \",\")\n\n\tfor k, v := range wellKnownCertificateTypes {\n\t\tif v == s {\n\t\t\ts = k\n\t\t}\n\t}\n\n\treturn s\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\t\"net\"\n\t\"strings\"\n\n\tpb \"github.com\/shijuvar\/go-recipes\/grpc\/customer\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n)\n\nconst (\n\tport = \":50051\"\n)\n\n\/\/ server is used to implement customer.CustomerServer.\ntype server struct {\n\tsavedCustomers []*pb.CustomerRequest\n}\n\n\/\/ CreateCustomer creates a new Customer\nfunc (s *server) CreateCustomer(ctx context.Context, in *pb.CustomerRequest) (*pb.CustomerResponse, error) {\n\ts.savedCustomers = append(s.savedCustomers, in)\n\treturn &pb.CustomerResponse{Id: in.Id, Success: true}, nil\n}\n\n\/\/ GetCustomers returns all customers by given filter\nfunc (s *server) GetCustomers(filter *pb.CustomerFilter, stream pb.Customer_GetCustomersServer) error {\n\tfor _, customer := range s.savedCustomers {\n\t\tif filter.Keyword != \"\" {\n\t\t\tif !strings.Contains(customer.Name, filter.Keyword) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif err := stream.Send(customer); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tlis, err := net.Listen(\"tcp\", port)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\t\/\/ Creates a new gRPC server\n\ts := grpc.NewServer()\n\tpb.RegisterCustomerServer(s, &server{})\n\ts.Serve(lis)\n}\n<commit_msg>Minor modification<commit_after>package main\n\nimport (\n\t\"log\"\n\t\"net\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\n\tpb \"github.com\/shijuvar\/go-recipes\/grpc\/customer\"\n)\n\nconst (\n\tport = \":50051\"\n)\n\n\/\/ server is used to implement customer.CustomerServer.\ntype server struct {\n\tsavedCustomers []*pb.CustomerRequest\n}\n\n\/\/ CreateCustomer creates a new Customer\nfunc (s *server) CreateCustomer(ctx context.Context, in *pb.CustomerRequest) (*pb.CustomerResponse, error) {\n\ts.savedCustomers = append(s.savedCustomers, in)\n\treturn &pb.CustomerResponse{Id: in.Id, Success: true}, nil\n}\n\n\/\/ GetCustomers returns all customers by given filter\nfunc (s *server) GetCustomers(filter *pb.CustomerFilter, stream pb.Customer_GetCustomersServer) error {\n\tfor _, customer := range s.savedCustomers {\n\t\tif filter.Keyword != \"\" {\n\t\t\tif !strings.Contains(customer.Name, filter.Keyword) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif err := stream.Send(customer); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tlis, err := net.Listen(\"tcp\", port)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\t\/\/ Creates a new gRPC server\n\ts := grpc.NewServer()\n\tpb.RegisterCustomerServer(s, &server{})\n\ts.Serve(lis)\n}\n<|endoftext|>"} {"text":"<commit_before>package workers\n\nimport (\n\t\"strings\"\n\t\"time\"\n\n\togoutils \"github.com\/Odinman\/ogo\/utils\"\n\t\"github.com\/Odinman\/omq\/utils\"\n\tzmq \"github.com\/pebbe\/zmq4\"\n)\n\n\/* {{{ func connectQueue() (*zmq.Socket, *zmq.Poller)\n * Helper function that returns a new configured socket\n * connected to the Paranoid Pirate queue\n *\/\nfunc (w *OmqWorker) connectQueue() (*zmq.Socket, *zmq.Poller) {\n\tsoc, _ := zmq.NewSocket(zmq.DEALER)\n\tsoc.Connect(\"inproc:\/\/backend\")\n\n\t\/\/ Tell queue we're ready for work\n\tsoc.Send(PPP_READY, 0)\n\n\tpoller := zmq.NewPoller()\n\tpoller.Add(soc, zmq.POLLIN)\n\n\treturn soc, poller\n}\n\n\/* }}} *\/\n\n\/* {{{ func (w *OmqWorker) newResponser(i int)\n * 回复节点\n *\/\nfunc (w *OmqWorker) newResponser(i int) {\n\tnode, poller := w.connectQueue()\n\tw.Trace(\"%d node ready\", i)\n\n\t\/\/ If liveness hits zero, queue is considered disconnected\n\tliveness := HEARTBEAT_LIVENESS\n\tinterval := INTERVAL_INIT\n\n\t\/\/ Send out heartbeats at regular intervals\n\theartbeat_at := time.Tick(HEARTBEAT_INTERVAL)\n\n\tlastCycles := 0\n\tfor cycles := 0; true; {\n\t\tsockets, err := poller.Poll(HEARTBEAT_INTERVAL)\n\t\tif err != nil {\n\t\t\tw.Error(\"polling error: %s\", err)\n\t\t\tbreak \/\/ Interrupted\n\t\t}\n\n\t\tif len(sockets) == 1 {\n\t\t\t\/\/ Get message\n\t\t\t\/\/ - n-part envelope + content -> request\n\t\t\t\/\/ - 1-part HEARTBEAT -> heartbeat\n\t\t\tmsg, err := node.RecvMessage(0)\n\t\t\tif err != nil {\n\t\t\t\tw.Error(\"recv error: %s\", err)\n\t\t\t\tbreak \/\/ Interrupted\n\t\t\t}\n\n\t\t\tclient, cmd := utils.Unwrap(msg)\n\t\t\tif len(msg) >= 4 { \/\/命令应该大于5帧(包含信封以及空帧)\n\t\t\t\tcycles++\n\n\t\t\t\tw.Trace(\"recv cmd: %s, from client: %q\", cmd, client)\n\n\t\t\t\tact := strings.ToUpper(cmd[0])\n\t\t\t\tkey := cmd[1]\n\t\t\t\tswitch act {\n\t\t\t\tcase COMMAND_GET, COMMAND_TIMING: \/\/获取key内容\n\t\t\t\t\tif r, err := w.localGet(cmd); err != nil {\n\t\t\t\t\t\tw.Debug(\"error: %s\", err)\n\t\t\t\t\t\tif err == ErrNil {\n\t\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_NIL) \/\/没有内容,返回空\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR, err.Error()) \/\/回复REQ,因此要加上一个空帧\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tw.Trace(\"response: %s, len: %d\", r, len(r))\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_OK, r) \/\/回复REQ,因此要加上一个空帧\n\t\t\t\t\t}\n\t\t\t\tcase COMMAND_SET, COMMAND_DEL, COMMAND_SCHEDULE: \/\/key-value命令\n\t\t\t\t\t\/\/ 存到本地存储(同步)\n\t\t\t\t\t\/\/回复结果(带信封, 否则找不到发送者), 因为是异步的, 可以先回复, 再做事\n\t\t\t\t\tif err := w.localStorage(cmd); err != nil {\n\t\t\t\t\t\tw.Debug(\"error: %s\", err)\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR, err.Error()) \/\/回复REQ,因此要加上一个空帧\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_OK) \/\/回复REQ,因此要加上一个空帧\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ 发布(目标是跨IDC多点发布)\n\t\t\t\t\tpublisher.SendMessage(cmd)\n\n\t\t\t\tcase COMMAND_PUSH, COMMAND_TASK: \/\/任务队列命令\n\t\t\t\t\tvalue := cmd[2:]\n\t\t\t\t\tif err := mqpool.Push(key, value); err == nil {\n\t\t\t\t\t\tw.Debug(\"push %s successful\", key)\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_OK)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tw.Debug(\"push %s failed: %s\", key, err)\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR, err.Error())\n\t\t\t\t\t}\n\t\t\t\tcase COMMAND_BTASK: \/\/阻塞任务队列命令\n\t\t\t\t\tvalue := cmd[2:]\n\t\t\t\t\ttaskId := ogoutils.NewShortUUID()\n\t\t\t\t\tvalue = append([]string{taskId}, value...) \/\/放前面\n\t\t\t\t\tif err := mqpool.Push(key, value); err == nil {\n\t\t\t\t\t\tw.Debug(\"push block task %s successful, task id: %s\", key, taskId)\n\t\t\t\t\t\tblockTasks[taskId] = make(chan string, 1)\n\t\t\t\t\t\tbto := time.Tick(BTASK_TIMEOUT)\n\t\t\t\t\t\t\/\/go w.newBlocker(client)\n\t\t\t\t\t\tselect {\n\t\t\t\t\t\tcase <-bto: \/\/超时\n\t\t\t\t\t\t\tw.Info(\"waiting time out\")\n\t\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR)\n\t\t\t\t\t\tcase result := <-blockTasks[taskId]:\n\t\t\t\t\t\t\tw.Debug(\"block task result: %s\", result)\n\t\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_OK, result)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdelete(blockTasks, taskId)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tw.Debug(\"push %s failed: %s\", key, err)\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR, err.Error())\n\t\t\t\t\t}\n\t\t\t\tcase COMMAND_COMPLETE: \/\/ 完成阻塞任务\n\t\t\t\t\tif len(cmd) > 3 {\n\t\t\t\t\t\tif taskId := cmd[2]; taskId != \"\" {\n\t\t\t\t\t\t\tif _, ok := blockTasks[taskId]; ok {\n\t\t\t\t\t\t\t\tblockTasks[taskId] <- cmd[3]\n\t\t\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_OK)\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR)\n\t\t\t\t\t}\n\t\t\t\tcase COMMAND_POP:\n\t\t\t\t\t\/\/cmd, _ := mqueuer.RecvMessage(0)\n\t\t\t\t\tif value, err := mqpool.Pop(key); err == nil {\n\t\t\t\t\t\tw.Debug(\"pop value from mqueue: %s\", value)\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_OK, value) \/\/回复REQ,因此要加上一个空帧\n\t\t\t\t\t} else if err.Error() == RESPONSE_NIL {\n\t\t\t\t\t\tw.Trace(\"pop %s nil: %s\", key, err)\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_NIL) \/\/没有内容,返回空\n\t\t\t\t\t} else {\n\t\t\t\t\t\tw.Trace(\"pop %s from mqueue failed: %s\", key, err)\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR, err.Error()) \/\/回复REQ,因此要加上一个空帧\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\t\/\/ unknown action\n\t\t\t\t\tw.Info(\"unkown action: %s\", act)\n\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_UNKNOWN)\n\t\t\t\t}\n\n\t\t\t\t\/\/ 回满血, 结束\n\t\t\t\tliveness = HEARTBEAT_LIVENESS\n\t\t\t} else if len(msg) == 1 {\n\t\t\t\t\/\/ When we get a heartbeat message from the queue, it means the\n\t\t\t\t\/\/ queue was (recently) alive, so reset our liveness indicator:\n\t\t\t\tif msg[0] == PPP_HEARTBEAT {\n\t\t\t\t\tliveness = HEARTBEAT_LIVENESS\n\t\t\t\t} else {\n\t\t\t\t\tw.Info(\"invalid message: %q\", msg)\n\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_UNKNOWN)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tw.Info(\"invalid message: %q\", msg)\n\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_UNKNOWN)\n\t\t\t}\n\t\t\tinterval = INTERVAL_INIT\n\t\t} else {\n\t\t\t\/\/ If the queue hasn't sent us heartbeats in a while, destroy the\n\t\t\t\/\/ socket and reconnect. This is the simplest most brutal way of\n\t\t\t\/\/ discarding any messages we might have sent in the meantime:\/\/\n\t\t\tliveness--\n\t\t\tif liveness == 0 {\n\t\t\t\tw.Debug(\"W: heartbeat failure, can't reach queue, reconnecting in %s\", interval)\n\n\t\t\t\ttime.Sleep(interval)\n\n\t\t\t\tif interval < INTERVAL_MAX { \/\/每次重试都加大重试间隔\n\t\t\t\t\tinterval = 2 * interval\n\t\t\t\t}\n\t\t\t\t\/\/ reconnect\n\t\t\t\tnode, poller = w.connectQueue()\n\t\t\t\tliveness = HEARTBEAT_LIVENESS\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Send heartbeat to queue if it's time\n\t\tselect {\n\t\tcase <-heartbeat_at:\n\t\t\tif cycles > lastCycles {\n\t\t\t\tw.Trace(\"node%d worked cycles: %d\", i, cycles)\n\t\t\t\tlastCycles = cycles\n\t\t\t}\n\t\t\tnode.Send(PPP_HEARTBEAT, 0)\n\t\tdefault:\n\t\t}\n\t}\n}\n\n\/* }}} *\/\n\n\/* {{{ func (w *OmqWorker) newBlocker(client string)\n * 异步阻塞节点\n *\/\nfunc (w *OmqWorker) newBlocker(client string) {\n\t\/\/node, _ := w.connectQueue()\n\t\/\/w.Trace(\"block node for client: %q\", client)\n\n\t\/\/ Send out heartbeats at regular intervals\n\theartbeat_at := time.Tick(5 * time.Second)\n\n\t\/\/ Send heartbeat to queue if it's time\n\tselect {\n\tcase <-heartbeat_at:\n\t\tw.Debug(\"recv taskman response\")\n\t\t\/\/node.SendMessage(client, \"\", \"BTASK\")\n\t}\n}\n\n\/* }}} *\/\n<commit_msg>update<commit_after>package workers\n\nimport (\n\t\"strings\"\n\t\"time\"\n\n\togoutils \"github.com\/Odinman\/ogo\/utils\"\n\t\"github.com\/Odinman\/omq\/utils\"\n\tzmq \"github.com\/pebbe\/zmq4\"\n)\n\n\/* {{{ func connectQueue() (*zmq.Socket, *zmq.Poller)\n * Helper function that returns a new configured socket\n * connected to the Paranoid Pirate queue\n *\/\nfunc (w *OmqWorker) connectQueue() (*zmq.Socket, *zmq.Poller) {\n\tsoc, _ := zmq.NewSocket(zmq.DEALER)\n\tsoc.Connect(\"inproc:\/\/backend\")\n\n\t\/\/ Tell queue we're ready for work\n\tsoc.Send(PPP_READY, 0)\n\n\tpoller := zmq.NewPoller()\n\tpoller.Add(soc, zmq.POLLIN)\n\n\treturn soc, poller\n}\n\n\/* }}} *\/\n\n\/* {{{ func (w *OmqWorker) newResponser(i int)\n * 回复节点\n *\/\nfunc (w *OmqWorker) newResponser(i int) {\n\tnode, poller := w.connectQueue()\n\tw.Trace(\"%d node ready\", i)\n\n\t\/\/ If liveness hits zero, queue is considered disconnected\n\tliveness := HEARTBEAT_LIVENESS\n\tinterval := INTERVAL_INIT\n\n\t\/\/ Send out heartbeats at regular intervals\n\theartbeat_at := time.Tick(HEARTBEAT_INTERVAL)\n\n\tlastCycles := 0\n\tfor cycles := 0; true; {\n\t\tsockets, err := poller.Poll(HEARTBEAT_INTERVAL)\n\t\tif err != nil {\n\t\t\tw.Error(\"polling error: %s\", err)\n\t\t\tbreak \/\/ Interrupted\n\t\t}\n\n\t\tif len(sockets) == 1 {\n\t\t\t\/\/ Get message\n\t\t\t\/\/ - n-part envelope + content -> request\n\t\t\t\/\/ - 1-part HEARTBEAT -> heartbeat\n\t\t\tmsg, err := node.RecvMessage(0)\n\t\t\tif err != nil {\n\t\t\t\tw.Error(\"recv error: %s\", err)\n\t\t\t\tbreak \/\/ Interrupted\n\t\t\t}\n\n\t\t\tclient, cmd := utils.Unwrap(msg)\n\t\t\tif len(msg) >= 4 { \/\/命令应该大于5帧(包含信封以及空帧)\n\t\t\t\tcycles++\n\n\t\t\t\tw.Trace(\"recv cmd: %s, from client: %q\", cmd, client)\n\n\t\t\t\tact := strings.ToUpper(cmd[0])\n\t\t\t\tkey := cmd[1]\n\t\t\t\tswitch act {\n\t\t\t\tcase COMMAND_GET, COMMAND_TIMING: \/\/获取key内容\n\t\t\t\t\tif r, err := w.localGet(cmd); err != nil {\n\t\t\t\t\t\tw.Debug(\"error: %s\", err)\n\t\t\t\t\t\tif err == ErrNil {\n\t\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_NIL) \/\/没有内容,返回空\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR, err.Error()) \/\/回复REQ,因此要加上一个空帧\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tw.Trace(\"response: %s, len: %d\", r, len(r))\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_OK, r) \/\/回复REQ,因此要加上一个空帧\n\t\t\t\t\t}\n\t\t\t\tcase COMMAND_SET, COMMAND_DEL, COMMAND_SCHEDULE: \/\/key-value命令\n\t\t\t\t\t\/\/ 存到本地存储(同步)\n\t\t\t\t\t\/\/回复结果(带信封, 否则找不到发送者), 因为是异步的, 可以先回复, 再做事\n\t\t\t\t\tif err := w.localStorage(cmd); err != nil {\n\t\t\t\t\t\tw.Debug(\"error: %s\", err)\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR, err.Error()) \/\/回复REQ,因此要加上一个空帧\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_OK) \/\/回复REQ,因此要加上一个空帧\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ 发布(目标是跨IDC多点发布)\n\t\t\t\t\tpublisher.SendMessage(cmd)\n\n\t\t\t\tcase COMMAND_PUSH, COMMAND_TASK: \/\/任务队列命令\n\t\t\t\t\tvalue := cmd[2:]\n\t\t\t\t\tif err := mqpool.Push(key, value); err == nil {\n\t\t\t\t\t\tw.Debug(\"push %s successful\", key)\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_OK)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tw.Debug(\"push %s failed: %s\", key, err)\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR, err.Error())\n\t\t\t\t\t}\n\t\t\t\tcase COMMAND_BTASK: \/\/阻塞任务队列命令\n\t\t\t\t\tvalue := cmd[2:]\n\t\t\t\t\ttaskId := ogoutils.NewShortUUID()\n\t\t\t\t\tvalue = append([]string{taskId}, value...) \/\/放前面\n\t\t\t\t\tif err := mqpool.Push(key, value); err == nil {\n\t\t\t\t\t\tw.Debug(\"push block task %s successful, task id: %s\", key, taskId)\n\t\t\t\t\t\tblockTasks[taskId] = make(chan string, 1)\n\t\t\t\t\t\tbto := time.Tick(BTASK_TIMEOUT)\n\t\t\t\t\t\t\/\/go w.newBlocker(client)\n\t\t\t\t\t\tselect {\n\t\t\t\t\t\tcase <-bto: \/\/超时\n\t\t\t\t\t\t\tw.Info(\"waiting time out\")\n\t\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR)\n\t\t\t\t\t\tcase result := <-blockTasks[taskId]:\n\t\t\t\t\t\t\tw.Debug(\"block task result: %s\", result)\n\t\t\t\t\t\t\tif result == \"0\" {\n\t\t\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR)\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_OK, result)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdelete(blockTasks, taskId)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tw.Debug(\"push %s failed: %s\", key, err)\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR, err.Error())\n\t\t\t\t\t}\n\t\t\t\tcase COMMAND_COMPLETE: \/\/ 完成阻塞任务\n\t\t\t\t\tif len(cmd) > 3 {\n\t\t\t\t\t\tif taskId := cmd[2]; taskId != \"\" {\n\t\t\t\t\t\t\tif _, ok := blockTasks[taskId]; ok {\n\t\t\t\t\t\t\t\tblockTasks[taskId] <- cmd[3]\n\t\t\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_OK)\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR)\n\t\t\t\t\t}\n\t\t\t\tcase COMMAND_POP:\n\t\t\t\t\t\/\/cmd, _ := mqueuer.RecvMessage(0)\n\t\t\t\t\tif value, err := mqpool.Pop(key); err == nil {\n\t\t\t\t\t\tw.Debug(\"pop value from mqueue: %s\", value)\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_OK, value) \/\/回复REQ,因此要加上一个空帧\n\t\t\t\t\t} else if err.Error() == RESPONSE_NIL {\n\t\t\t\t\t\tw.Trace(\"pop %s nil: %s\", key, err)\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_NIL) \/\/没有内容,返回空\n\t\t\t\t\t} else {\n\t\t\t\t\t\tw.Trace(\"pop %s from mqueue failed: %s\", key, err)\n\t\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_ERROR, err.Error()) \/\/回复REQ,因此要加上一个空帧\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\t\/\/ unknown action\n\t\t\t\t\tw.Info(\"unkown action: %s\", act)\n\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_UNKNOWN)\n\t\t\t\t}\n\n\t\t\t\t\/\/ 回满血, 结束\n\t\t\t\tliveness = HEARTBEAT_LIVENESS\n\t\t\t} else if len(msg) == 1 {\n\t\t\t\t\/\/ When we get a heartbeat message from the queue, it means the\n\t\t\t\t\/\/ queue was (recently) alive, so reset our liveness indicator:\n\t\t\t\tif msg[0] == PPP_HEARTBEAT {\n\t\t\t\t\tliveness = HEARTBEAT_LIVENESS\n\t\t\t\t} else {\n\t\t\t\t\tw.Info(\"invalid message: %q\", msg)\n\t\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_UNKNOWN)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tw.Info(\"invalid message: %q\", msg)\n\t\t\t\tnode.SendMessage(client, \"\", RESPONSE_UNKNOWN)\n\t\t\t}\n\t\t\tinterval = INTERVAL_INIT\n\t\t} else {\n\t\t\t\/\/ If the queue hasn't sent us heartbeats in a while, destroy the\n\t\t\t\/\/ socket and reconnect. This is the simplest most brutal way of\n\t\t\t\/\/ discarding any messages we might have sent in the meantime:\/\/\n\t\t\tliveness--\n\t\t\tif liveness == 0 {\n\t\t\t\tw.Debug(\"W: heartbeat failure, can't reach queue, reconnecting in %s\", interval)\n\n\t\t\t\ttime.Sleep(interval)\n\n\t\t\t\tif interval < INTERVAL_MAX { \/\/每次重试都加大重试间隔\n\t\t\t\t\tinterval = 2 * interval\n\t\t\t\t}\n\t\t\t\t\/\/ reconnect\n\t\t\t\tnode, poller = w.connectQueue()\n\t\t\t\tliveness = HEARTBEAT_LIVENESS\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Send heartbeat to queue if it's time\n\t\tselect {\n\t\tcase <-heartbeat_at:\n\t\t\tif cycles > lastCycles {\n\t\t\t\tw.Trace(\"node%d worked cycles: %d\", i, cycles)\n\t\t\t\tlastCycles = cycles\n\t\t\t}\n\t\t\tnode.Send(PPP_HEARTBEAT, 0)\n\t\tdefault:\n\t\t}\n\t}\n}\n\n\/* }}} *\/\n\n\/* {{{ func (w *OmqWorker) newBlocker(client string)\n * 异步阻塞节点\n *\/\nfunc (w *OmqWorker) newBlocker(client string) {\n\t\/\/node, _ := w.connectQueue()\n\t\/\/w.Trace(\"block node for client: %q\", client)\n\n\t\/\/ Send out heartbeats at regular intervals\n\theartbeat_at := time.Tick(5 * time.Second)\n\n\t\/\/ Send heartbeat to queue if it's time\n\tselect {\n\tcase <-heartbeat_at:\n\t\tw.Debug(\"recv taskman response\")\n\t\t\/\/node.SendMessage(client, \"\", \"BTASK\")\n\t}\n}\n\n\/* }}} *\/\n<|endoftext|>"} {"text":"<commit_before>package eve\n\n\/\/ Writer allows to write data to the EVE memory.\ntype Writer struct {\n\td *Driver\n}\n\nfunc (w Writer) align32() {\n\td := w.d\n\tm := (len(d.buf) + d.n) & 3\n\tif m == 0 {\n\t\treturn\n\t}\n\tm = len(d.buf) + 4 - m\n\tif m > cap(d.buf) {\n\t\tm -= len(d.buf)\n\t\td.flush()\n\t}\n\td.buf = d.buf[:m]\n}\n\nfunc (w Writer) wr32(u uint32) {\n\td := w.d\n\tif len(d.buf)+4 > cap(d.buf) {\n\t\td.flush()\n\t}\n\tn := len(d.buf)\n\td.buf = d.buf[:n+4]\n\td.buf[n] = byte(u)\n\td.buf[n+1] = byte(u >> 8)\n\td.buf[n+2] = byte(u >> 16)\n\td.buf[n+3] = byte(u >> 24)\n}\n\nfunc (w Writer) aw32(u uint32) {\n\tw.align32()\n\tw.wr32(u)\n}\n\nfunc (w Writer) Write32(s ...uint32) {\n\tw.align32()\n\tfor _, u := range s {\n\t\tw.wr32(u)\n\t}\n}\n\nfunc (w Writer) Write(s []byte) (int, error) {\n\td := w.d\n\tif len(s) >= cap(d.buf) {\n\t\tif len(d.buf) > 0 {\n\t\t\td.flush()\n\t\t}\n\t\td.dci.Write(s)\n\t\td.n += len(s)\n\t} else {\n\t\tn := copy(d.buf[:cap(d.buf)], s)\n\t\td.buf = d.buf[:len(d.buf)+n]\n\t\tif n < len(s) {\n\t\t\td.flush()\n\t\t\td.buf = d.buf[:len(s)-n]\n\t\t\tcopy(d.buf, s[n:])\n\t\t}\n\t}\n\t\/\/ Write always succeeds. There is no case for infinite write transaction\n\t\/\/ so Driver.Err can be called after all writes.\n\treturn len(s), nil\n}\n\nfunc (w Writer) WriteString(s string) (int, error) {\n\td := w.d\n\tn := len(s)\n\tfor len(s) != 0 {\n\t\tif len(d.buf) == cap(d.buf) {\n\t\t\td.flush()\n\t\t}\n\t\tm := len(d.buf)\n\t\tc := copy(d.buf[m:cap(d.buf)], s)\n\t\td.buf = d.buf[:m+c]\n\t\ts = s[c:]\n\t}\n\treturn n, nil\n}\n\n\/\/ Close closes the write transaction and returns number of bytes written.\nfunc (w Writer) Close() int {\n\treturn w.d.end()\n}\n<commit_msg>display\/eve: Fix bug in Writer.Write.<commit_after>package eve\n\n\/\/ Writer allows to write data to the EVE memory.\ntype Writer struct {\n\td *Driver\n}\n\nfunc (w Writer) align32() {\n\td := w.d\n\tm := (len(d.buf) + d.n) & 3\n\tif m == 0 {\n\t\treturn\n\t}\n\tm = len(d.buf) + 4 - m\n\tif m > cap(d.buf) {\n\t\tm -= len(d.buf)\n\t\td.flush()\n\t}\n\td.buf = d.buf[:m]\n}\n\nfunc (w Writer) wr8(b byte) {\n\td := w.d\n\tif len(d.buf) == cap(d.buf) {\n\t\td.flush()\n\t}\n\tn := len(d.buf)\n\td.buf = d.buf[:n+1]\n\td.buf[n] = b\n}\n\nfunc (w Writer) wr32(u uint32) {\n\td := w.d\n\tif len(d.buf)+4 > cap(d.buf) {\n\t\td.flush()\n\t}\n\tn := len(d.buf)\n\td.buf = d.buf[:n+4]\n\td.buf[n] = byte(u)\n\td.buf[n+1] = byte(u >> 8)\n\td.buf[n+2] = byte(u >> 16)\n\td.buf[n+3] = byte(u >> 24)\n}\n\nfunc (w Writer) aw32(u uint32) {\n\tw.align32()\n\tw.wr32(u)\n}\n\nfunc (w Writer) Write8(s ...byte) {\n\tif len(s) == 1 {\n\t\tw.wr8(s[0])\n\t\treturn\n\t}\n\tif len(s) == 0 {\n\t\treturn\n\t}\n\td := w.d\n\tif len(s) >= cap(d.buf) {\n\t\tif len(d.buf) > 0 {\n\t\t\td.flush()\n\t\t}\n\t\td.dci.Write(s)\n\t\td.n += len(s)\n\t\treturn\n\t}\n\tn := len(d.buf)\n\tm := copy(d.buf[n:cap(d.buf)], s)\n\td.buf = d.buf[:n+m]\n\tif m < len(s) {\n\t\ts = s[m:]\n\t\td.flush()\n\t\td.buf = d.buf[:len(s)]\n\t\tcopy(d.buf, s)\n\t}\n}\n\nfunc (w Writer) Write32(s ...uint32) {\n\tw.align32()\n\tfor _, u := range s {\n\t\tw.wr32(u)\n\t}\n}\n\nfunc (w Writer) WriteInt(s ...int) {\n\tw.align32()\n\tfor _, i := range s {\n\t\tw.wr32(uint32(i))\n\t}\n}\n\nfunc (w Writer) Write(s []byte) (int, error) {\n\tw.Write8(s...)\n\treturn len(s), nil\n\t\/\/ BUG?: Write always succeeds. Rationale: there is no case for infinite\n\t\/\/ write transaction so Driver.Err can be called after all writes.\n}\n\nfunc (w Writer) wrs(s string) {\n\td := w.d\n\tfor len(s) != 0 {\n\t\tif len(d.buf) == cap(d.buf) {\n\t\t\td.flush()\n\t\t}\n\t\tn := len(d.buf)\n\t\tc := copy(d.buf[n:cap(d.buf)], s)\n\t\td.buf = d.buf[:n+c]\n\t\ts = s[c:]\n\t}\n}\n\nfunc (w Writer) WriteString(s string) (int, error) {\n\tw.wrs(s)\n\treturn len(s), nil\n\t\/\/ BUG?: WriteString always succeeds. Rationale: there is no case for\n\t\/\/ infinite write transaction so Driver.Err can be called after all writes.\n}\n\n\/\/ Close closes the write transaction and returns number of bytes written.\nfunc (w Writer) Close() int {\n\treturn w.d.end()\n}\n<|endoftext|>"} {"text":"<commit_before>package gpio\n\ntype Mode byte\n\nconst (\n\tIn Mode = 0\n\tInOut Mode = 1\n\tDiscon Mode = 2\n\tOut Mode = 3\n)\n\ntype Pull byte\n\nconst (\n\tNoPull Pull = 0\n\tPullDown Pull = 1\n\tPullUp Pull = 3\n)\n\ntype Config struct {\n\tMode Mode\n\tPull Pull\n}\n\nfunc (p *Port) SetupPin(n int, cfg *Config) {\n\tp.pincnf[n].Store(uint32(cfg.Pull)<<2 | uint32(cfg.Mode))\n}\n\n\/\/ Setup configures pins.\nfunc (p *Port) Setup(pins Pins, cfg *Config) {\n\tfor n := 0; n < 32; n++ {\n\t\tif pins&(1<<uint(n)) != 0 {\n\t\t\tp.SetupPin(n, cfg)\n\t\t}\n\t}\n}\n<commit_msg>nrf5\/hal\/gpio: More configuration options.<commit_after>package gpio\n\ntype Mode byte\n\nconst (\n\tIn Mode = 0\n\tInOut Mode = 1\n\tDiscon Mode = 2\n\tOut Mode = 3\n)\n\ntype Pull byte\n\nconst (\n\tNoPull Pull = 0\n\tPullDown Pull = 1\n\tPullUp Pull = 3\n)\n\ntype Drive byte\n\nconst (\n\tS0S1 Drive = 0 \/\/ Standard 0, standard 1.\n\tH0S1 Drive = 1 \/\/ High drive 0, standard 1.\n\tS0H1 Drive = 2 \/\/ Standard 0, high drive 1.\n\tH0H1 Drive = 3 \/\/ High drive 0, high drive 1.\n\tD0S1 Drive = 4 \/\/ Disconnect 0, standard 1.\n\tD0H1 Drive = 5 \/\/ Disconnect 0, high drive 1.\n\tS0D1 Drive = 6 \/\/ Standard 0, disconnect 1.\n\tH0D1 Drive = 7 \/\/ High drive 0, disconnect 1.\n)\n\ntype Sense byte\n\nconst (\n\tNoSense Sense = 0\n\tSenseHigh Sense = 2\n\tSenseLow Sense = 3\n)\n\ntype Config struct {\n\tMode Mode\n\tPull Pull\n\tDrive Drive\n\tSense Sense\n}\n\nfunc (p *Port) SetupPin(n int, cfg *Config) {\n\tp.pincnf[n].Store(\n\t\tuint32(cfg.Sense)<<16 | uint32(cfg.Drive)<<8 | uint32(cfg.Pull)<<2 |\n\t\t\tuint32(cfg.Mode),\n\t)\n}\n\n\/\/ Setup configures pins.\nfunc (p *Port) Setup(pins Pins, cfg *Config) {\n\tfor n := 0; n < 32; n++ {\n\t\tif pins&(1<<uint(n)) != 0 {\n\t\t\tp.SetupPin(n, cfg)\n\t\t}\n\t}\n}\n\n\/\/ SetDirIn allows a fast change of direction to input for specified pins.\nfunc (p *Port) SetDirIn(pins Pins) {\n\tp.dirset.Store(uint32(pins))\n}\n\n\/\/ SetDirOut allows afast change of direction to output for specified pins.\nfunc (p *Port) SetDirOut(pins Pins) {\n\tp.dirclr.Store(uint32(pins))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package ppipwm allows to produce PWM signal using PPI, TIMER and GPIOTE\n\/\/ peripherals.\npackage ppipwm\n\nimport (\n\t\"nrf5\/hal\/gpio\"\n\t\"nrf5\/hal\/gpiote\"\n\t\"nrf5\/hal\/ppi\"\n\t\"nrf5\/hal\/timer\"\n)\n\n\/\/ Toggle implementats three channel PWM usind GPIOTE OUT task configured in\n\/\/ toggle mode. It is designed specifically for nRF51 chips because of\n\/\/ limitations of their GPIOTE peripheral. Better solutions exists for nRF52.\n\/\/\n\/\/ To produce desired PWM waveform Toggle configures a timer compare register,\n\/\/ using simple algorithm, that usually generates glitches when used on working\n\/\/ PWM channel. After that, PWM works without any CPU intervention and produces\n\/\/ proper waveform until next duty cycle change. This is fine to drive LEDs but\n\/\/ can cause trubles in case of receivers that rely on stable PWM frequency or\n\/\/ phase (eg. some servos). Correct implementation is difficult and quite\n\/\/ expensive in case of nRF51.\n\/\/\n\/\/ Toggle cannot be used concurently by multiple gorutines without proper\n\/\/ synchronisation.\ntype Toggle struct {\n\tt *timer.Periph\n\tgc [3]gpiote.Chan\n}\n\n\/\/ MakeToggle returns configured Toggle implementation of PPI based PWM using\n\/\/ timer t.\nfunc MakeToggle(t *timer.Periph) Toggle {\n\tt.Task(timer.STOP).Trigger()\n\tt.StoreSHORTS(timer.COMPARE3_CLEAR)\n\treturn Toggle{t: t}\n}\n\n\/\/ NewToggle provides convenient way to create heap allocated Toggle struct. See\n\/\/ MakeToggle for more information.\nfunc NewToggle(t *timer.Periph) *Toggle {\n\tpwm := new(Toggle)\n\t*pwm = MakeToggle(t)\n\treturn pwm\n}\n\n\/\/ SetFreq sets prescaler to 2^log2pre and period to periodus microseconds. It\n\/\/ allows to configure a duty cycle with resolution = 16 * periodus \/ 2^log2pre.\n\/\/ SetFreq returns (resolution-1), which is a value that should be passed to\n\/\/ SetDuty\/SetInvDuty to produce PWM with 100% duty cycle.\nfunc (pwm *Toggle) SetFreq(log2pre, periodus int) int {\n\tif uint(log2pre) > 9 {\n\t\tpanic(\"pwm: bad prescaler\")\n\t}\n\tif periodus < 1 {\n\t\tpanic(\"pwm: bad period\")\n\t}\n\tt := pwm.t\n\tt.StorePRESCALER(log2pre)\n\tdiv := uint32(1) << uint(log2pre)\n\tmax := 16*uint32(periodus)\/div - 1\n\tif max > 0xFFFF {\n\t\tpanic(\"pwm: max>0xFFFF\")\n\t}\n\tt.StoreCC(3, max)\n\treturn int(max)\n}\n\n\/\/ Max returns a value that corresponds to 100% PWM duty cycle. See SetFreq for\n\/\/ more information.\nfunc (pwm *Toggle) Max() int {\n\treturn int(pwm.t.LoadCC(3))\n}\n\nfunc checkOutput(n int) {\n\tif uint(n) > 2 {\n\t\tpanic(\"pwm: bad output\")\n\t}\n}\n\n\/\/ Setup setups n-th of three PWM channels. Each PWM channel uses one GPIOTE\n\/\/ channel and two PPI channels.\nfunc (pwm *Toggle) Setup(n int, pin gpio.Pin, gc gpiote.Chan, pc0, pc1 ppi.Chan) {\n\tcheckOutput(n)\n\tpin.Clear()\n\tpin.Setup(gpio.ModeOut)\n\tgc.Setup(pin, 0)\n\tpwm.gc[n] = gc\n\tt := pwm.t\n\tpc0.SetEEP(t.Event(timer.COMPARE(n)))\n\tpc0.SetTEP(gc.OUT().Task())\n\tpc0.Enable()\n\tpc1.SetEEP(t.Event(timer.COMPARE(3)))\n\tpc1.SetTEP(gc.OUT().Task())\n\tpc1.Enable()\n}\n\n\/\/ SetVal sets a duty cycle for n-th PWM channel.\nfunc (pwm *Toggle) SetVal(n, val int) {\n\tcheckOutput(n)\n\tgc := pwm.gc[n]\n\tt := pwm.t\n\tpin, _ := gc.Config()\n\tt.Task(timer.STOP).Trigger()\n\tswitch {\n\tcase val <= 0:\n\t\tpin.Clear()\n\t\tgc.Setup(pin, 0)\n\t\treturn\n\tcase val >= pwm.Max():\n\t\tpin.Set()\n\t\tgc.Setup(pin, 0)\n\t\treturn\n\t}\n\tcfg := gpiote.ModeTask | gpiote.PolarityToggle\n\tt.Task(timer.CAPTURE(n)).Trigger()\n\tcnt := int(t.LoadCC(n))\n\tif cnt < val {\n\t\tgc.Setup(pin, cfg|gpiote.OutInitHigh)\n\t} else {\n\t\tgc.Setup(pin, cfg|gpiote.OutInitLow)\n\t}\n\tt.StoreCC(n, uint32(val))\n\tt.Task(timer.START).Trigger()\n}\n\n\/\/ SetInvDuty sets a duty cycle for n-th PWM channel. It produces inverted\n\/\/ waveform.\nfunc (pwm *Toggle) SetInvVal(n, val int) {\n\tcheckOutput(n)\n\tgc := pwm.gc[n]\n\tt := pwm.t\n\tpin, _ := gc.Config()\n\tt.Task(timer.STOP).Trigger()\n\tswitch {\n\tcase val <= 0:\n\t\tpin.Set()\n\t\tgc.Setup(pin, 0)\n\t\treturn\n\tcase val >= pwm.Max():\n\t\tpin.Clear()\n\t\tgc.Setup(pin, 0)\n\t\treturn\n\t}\n\tcfg := gpiote.ModeTask | gpiote.PolarityToggle\n\tt.Task(timer.CAPTURE(n)).Trigger()\n\tcnt := int(t.LoadCC(n))\n\tif cnt < val {\n\t\tgc.Setup(pin, cfg|gpiote.OutInitLow)\n\t} else {\n\t\tgc.Setup(pin, cfg|gpiote.OutInitHigh)\n\t}\n\tt.StoreCC(n, uint32(val))\n\tt.Task(timer.START).Trigger()\n}\n<commit_msg>nrf5\/ppipwm: Better description of SetFreq method.<commit_after>\/\/ Package ppipwm allows to produce PWM signal using PPI, TIMER and GPIOTE\n\/\/ peripherals.\npackage ppipwm\n\nimport (\n\t\"nrf5\/hal\/gpio\"\n\t\"nrf5\/hal\/gpiote\"\n\t\"nrf5\/hal\/ppi\"\n\t\"nrf5\/hal\/timer\"\n)\n\n\/\/ Toggle implementats three channel PWM usind GPIOTE OUT task configured in\n\/\/ toggle mode. It is designed specifically for nRF51 chips because of\n\/\/ limitations of their GPIOTE peripheral. Better solutions exists for nRF52.\n\/\/\n\/\/ To produce desired PWM waveform Toggle configures a timer compare register,\n\/\/ using simple algorithm, that usually generates glitches when used on working\n\/\/ PWM channel. After that, PWM works without any CPU intervention and produces\n\/\/ proper waveform until next duty cycle change. This is fine to drive LEDs but\n\/\/ can cause trubles in case of receivers that rely on stable PWM frequency or\n\/\/ phase (eg. some servos). Correct implementation is difficult and quite\n\/\/ expensive in case of nRF51.\n\/\/\n\/\/ Toggle cannot be used concurently by multiple gorutines without proper\n\/\/ synchronisation.\ntype Toggle struct {\n\tt *timer.Periph\n\tgc [3]gpiote.Chan\n}\n\n\/\/ MakeToggle returns configured Toggle implementation of PPI based PWM using\n\/\/ timer t.\nfunc MakeToggle(t *timer.Periph) Toggle {\n\tt.Task(timer.STOP).Trigger()\n\tt.StoreSHORTS(timer.COMPARE3_CLEAR)\n\treturn Toggle{t: t}\n}\n\n\/\/ NewToggle provides convenient way to create heap allocated Toggle struct. See\n\/\/ MakeToggle for more information.\nfunc NewToggle(t *timer.Periph) *Toggle {\n\tpwm := new(Toggle)\n\t*pwm = MakeToggle(t)\n\treturn pwm\n}\n\n\/\/ SetFreq sets timer prescaler to 2^log2pre and period to periodus microseconds\n\/\/ (log2pre must be in range [0..9]). It allows to configure a duty cycle with\n\/\/ a resolution = 16 * periodus \/ 2^log2pre. Toggle uses timer in 16-bit mode so\n\/\/ the resolution must be <= 65536. SetFreq returns (resolution-1), which is a\n\/\/ value that should be passed to SetDuty\/SetInvDuty to produce PWM with 100%\n\/\/ duty cycle.\nfunc (pwm *Toggle) SetFreq(log2pre, periodus int) int {\n\tif uint(log2pre) > 9 {\n\t\tpanic(\"pwm: bad prescaler\")\n\t}\n\tif periodus < 1 {\n\t\tpanic(\"pwm: bad period\")\n\t}\n\tt := pwm.t\n\tt.StorePRESCALER(log2pre)\n\tdiv := uint32(1) << uint(log2pre)\n\tmax := 16*uint32(periodus)\/div - 1 \/\/ 16 MHz * period \/ div - 1\n\tif max > 0xFFFF {\n\t\tpanic(\"pwm: max>0xFFFF\")\n\t}\n\tt.StoreCC(3, max)\n\treturn int(max)\n}\n\n\/\/ Max returns a value that corresponds to 100% PWM duty cycle. See SetFreq for\n\/\/ more information.\nfunc (pwm *Toggle) Max() int {\n\treturn int(pwm.t.LoadCC(3))\n}\n\nfunc checkOutput(n int) {\n\tif uint(n) > 2 {\n\t\tpanic(\"pwm: bad output\")\n\t}\n}\n\n\/\/ Setup setups n-th of three PWM channels. Each PWM channel uses one GPIOTE\n\/\/ channel and two PPI channels.\nfunc (pwm *Toggle) Setup(n int, pin gpio.Pin, gc gpiote.Chan, pc0, pc1 ppi.Chan) {\n\tcheckOutput(n)\n\tpin.Clear()\n\tpin.Setup(gpio.ModeOut)\n\tgc.Setup(pin, 0)\n\tpwm.gc[n] = gc\n\tt := pwm.t\n\tpc0.SetEEP(t.Event(timer.COMPARE(n)))\n\tpc0.SetTEP(gc.OUT().Task())\n\tpc0.Enable()\n\tpc1.SetEEP(t.Event(timer.COMPARE(3)))\n\tpc1.SetTEP(gc.OUT().Task())\n\tpc1.Enable()\n}\n\n\/\/ SetVal sets a duty cycle for n-th PWM channel.\nfunc (pwm *Toggle) SetVal(n, val int) {\n\tcheckOutput(n)\n\tgc := pwm.gc[n]\n\tt := pwm.t\n\tpin, _ := gc.Config()\n\tt.Task(timer.STOP).Trigger()\n\tswitch {\n\tcase val <= 0:\n\t\tpin.Clear()\n\t\tgc.Setup(pin, 0)\n\t\treturn\n\tcase val >= pwm.Max():\n\t\tpin.Set()\n\t\tgc.Setup(pin, 0)\n\t\treturn\n\t}\n\tcfg := gpiote.ModeTask | gpiote.PolarityToggle\n\tt.Task(timer.CAPTURE(n)).Trigger()\n\tcnt := int(t.LoadCC(n))\n\tif cnt < val {\n\t\tgc.Setup(pin, cfg|gpiote.OutInitHigh)\n\t} else {\n\t\tgc.Setup(pin, cfg|gpiote.OutInitLow)\n\t}\n\tt.StoreCC(n, uint32(val))\n\tt.Task(timer.START).Trigger()\n}\n\n\/\/ SetInvDuty sets a duty cycle for n-th PWM channel. It produces inverted\n\/\/ waveform.\nfunc (pwm *Toggle) SetInvVal(n, val int) {\n\tcheckOutput(n)\n\tgc := pwm.gc[n]\n\tt := pwm.t\n\tpin, _ := gc.Config()\n\tt.Task(timer.STOP).Trigger()\n\tswitch {\n\tcase val <= 0:\n\t\tpin.Set()\n\t\tgc.Setup(pin, 0)\n\t\treturn\n\tcase val >= pwm.Max():\n\t\tpin.Clear()\n\t\tgc.Setup(pin, 0)\n\t\treturn\n\t}\n\tcfg := gpiote.ModeTask | gpiote.PolarityToggle\n\tt.Task(timer.CAPTURE(n)).Trigger()\n\tcnt := int(t.LoadCC(n))\n\tif cnt < val {\n\t\tgc.Setup(pin, cfg|gpiote.OutInitLow)\n\t} else {\n\t\tgc.Setup(pin, cfg|gpiote.OutInitHigh)\n\t}\n\tt.StoreCC(n, uint32(val))\n\tt.Task(timer.START).Trigger()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package besticon includes functions\n\/\/ finding icons for a given web site.\npackage besticon\n\nimport (\n\t\"bytes\"\n\t\"crypto\/sha1\"\n\t\"errors\"\n\t\"fmt\"\n\t\"image\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/cookiejar\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strings\"\n\n\t\/\/ Load support for common image formats.\n\t_ \"code.google.com\/p\/go.image\/bmp\"\n\t_ \"code.google.com\/p\/go.image\/tiff\"\n\t_ \"code.google.com\/p\/go.image\/webp\"\n\t_ \"github.com\/mat\/besticon\/ico\"\n\t_ \"image\/gif\"\n\t_ \"image\/jpeg\"\n\t_ \"image\/png\"\n\n\t\"code.google.com\/p\/go.net\/html\/charset\"\n\t\"code.google.com\/p\/go.net\/publicsuffix\"\n\t\"github.com\/PuerkitoBio\/goquery\"\n)\n\n\/\/ Icon holds icon information.\ntype Icon struct {\n\tURL string `json:\"url\"`\n\tWidth int `json:\"width\"`\n\tHeight int `json:\"height\"`\n\tFormat string `json:\"format\"`\n\tBytes int `json:\"bytes\"`\n\tError error `json:\"error\"`\n\tSha1sum string `json:\"sha1sum\"`\n}\n\n\/\/ FetchBestIcon takes a siteURL and returns the icon with\n\/\/ the largest dimensions for this site or an error.\nfunc FetchBestIcon(siteURL string) (*Icon, error) {\n\treturn fetchBestIconWithClient(siteURL, &http.Client{})\n}\n\nfunc fetchBestIconWithClient(siteURL string, c *http.Client) (*Icon, error) {\n\ticons, e := fetchIconsWithClient(siteURL, c)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\tif len(icons) < 1 {\n\t\treturn nil, errors.New(\"besticon: no icons found for site\")\n\t}\n\n\tbest := icons[0]\n\treturn &best, nil\n}\n\n\/\/ FetchIcons takes a siteURL and returns all icons for this site\n\/\/ or an error.\nfunc FetchIcons(siteURL string) ([]Icon, error) {\n\treturn fetchIconsWithClient(siteURL, &http.Client{})\n}\n\n\/\/ fetchIconsWithClient modifies c's checkRedirect and Jar!\nfunc fetchIconsWithClient(siteURL string, c *http.Client) ([]Icon, error) {\n\tconfigureClient(c)\n\n\tsiteURL = strings.TrimSpace(siteURL)\n\tif !strings.HasPrefix(siteURL, \"http\") {\n\t\tsiteURL = \"http:\/\/\" + siteURL\n\t}\n\n\thtml, url, e := fetchHTML(siteURL, c)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\tlinks, e := assembleIconLinks(url, html)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\ticons := fetchAllIcons(links, c)\n\ticons = rejectBrokenIcons(icons)\n\n\t\/\/ Order when finished: (width\/height, bytes, url)\n\tsort.Stable(byURL(icons))\n\tsort.Stable(byBytes(icons))\n\tsort.Stable(sort.Reverse(byWidthHeight(icons)))\n\n\treturn icons, nil\n}\n\nfunc fetchHTML(url string, c *http.Client) ([]byte, *url.URL, error) {\n\tr, e := get(c, url)\n\tif e != nil {\n\t\treturn nil, nil, e\n\t}\n\n\tif !(r.StatusCode >= 200 && r.StatusCode < 300) {\n\t\treturn nil, nil, errors.New(\"besticon: not found\")\n\t}\n\n\tb, e := ioutil.ReadAll(r.Body)\n\tif e != nil {\n\t\treturn nil, nil, e\n\t}\n\tdefer r.Body.Close()\n\tif len(b) == 0 {\n\t\treturn nil, nil, errors.New(\"besticon: empty response\")\n\t}\n\n\treader := bytes.NewReader(b)\n\tcontentType := r.Header.Get(\"Content-Type\")\n\tutf8reader, e := charset.NewReader(reader, contentType)\n\tif e != nil {\n\t\treturn nil, nil, e\n\t}\n\tutf8bytes, e := ioutil.ReadAll(utf8reader)\n\tif e != nil {\n\t\treturn nil, nil, e\n\t}\n\n\treturn utf8bytes, r.Request.URL, nil\n}\n\nvar iconPaths = []string{\n\t\"\/favicon.ico\",\n\t\"\/apple-touch-icon.png\",\n\t\"\/apple-touch-icon-precomposed.png\",\n}\n\ntype empty struct{}\n\nfunc assembleIconLinks(siteURL *url.URL, html []byte) ([]string, error) {\n\tlinks := make(map[string]empty)\n\n\t\/\/ Add common, hard coded icon paths\n\tfor _, path := range iconPaths {\n\t\tlinks[urlFromBase(siteURL, path)] = empty{}\n\t}\n\n\t\/\/ Add icons found in page\n\turls, e := findIcons(html)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\tfor _, url := range urls {\n\t\turl, e := absoluteURL(siteURL, url)\n\t\tif e == nil {\n\t\t\tlinks[url] = empty{}\n\t\t}\n\t}\n\n\t\/\/ Turn unique keys into array\n\tresult := []string{}\n\tfor u := range links {\n\t\tresult = append(result, u)\n\t}\n\tsort.Strings(result)\n\n\treturn result, nil\n}\n\nvar csspaths = strings.Join([]string{\n\t\"link[rel='icon']\",\n\t\"link[rel='shortcut icon']\",\n\t\"link[rel='apple-touch-icon']\",\n\t\"link[rel='apple-touch-icon-precomposed']\",\n}, \", \")\n\nvar errParseHTML = errors.New(\"besticon: could not parse html\")\n\nfunc findIcons(html []byte) ([]string, error) {\n\tdoc, e := goquery.NewDocumentFromReader(bytes.NewReader(html))\n\tif e != nil || doc == nil {\n\t\treturn nil, errParseHTML\n\t}\n\n\thits := []string{}\n\tdoc.Find(csspaths).Each(func(i int, s *goquery.Selection) {\n\t\thref, ok := s.Attr(\"href\")\n\t\tif ok && href != \"\" {\n\t\t\thits = append(hits, href)\n\t\t}\n\t})\n\n\treturn hits, nil\n}\n\nfunc fetchAllIcons(urls []string, c *http.Client) []Icon {\n\tch := make(chan Icon)\n\n\tfor _, u := range urls {\n\t\tgo func(u string) { ch <- fetchIconDetails(u, c) }(u)\n\t}\n\n\ticons := []Icon{}\n\tfor range urls {\n\t\ticon := <-ch\n\t\ticons = append(icons, icon)\n\t}\n\treturn icons\n}\n\nfunc fetchIconDetails(url string, c *http.Client) Icon {\n\ti := Icon{URL: url}\n\n\tresponse, e := get(c, url)\n\tif e != nil {\n\t\ti.Error = e\n\t\treturn i\n\t}\n\n\tb, e := ioutil.ReadAll(response.Body)\n\tif e != nil {\n\t\ti.Error = e\n\t\treturn i\n\t}\n\tdefer response.Body.Close()\n\n\tcfg, format, e := image.DecodeConfig(bytes.NewReader(b))\n\tif e != nil {\n\t\ti.Error = fmt.Errorf(\"besticon: unknown image format: %s\", e)\n\t\treturn i\n\t}\n\n\ti.Width = cfg.Width\n\ti.Height = cfg.Height\n\ti.Format = format\n\ti.Bytes = len(b)\n\ti.Sha1sum = sha1Sum(b)\n\n\treturn i\n}\n\nfunc get(client *http.Client, url string) (*http.Response, error) {\n\treq, e := http.NewRequest(\"GET\", url, nil)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\tsetDefaultHeaders(req)\n\treturn client.Do(req)\n}\n\nfunc setDefaultHeaders(req *http.Request) {\n\treq.Header.Set(\"Accept\", \"*\/*\")\n\treq.Header.Set(\"User-Agent\", \"Mozilla\/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit\/534.55.3 (KHTML, like Gecko) Version\/5.1.3 Safari\/534.53.10\")\n}\n\nfunc configureClient(c *http.Client) {\n\tc.Jar = mustInitCookieJar()\n\tc.CheckRedirect = checkRedirect\n}\n\nfunc mustInitCookieJar() *cookiejar.Jar {\n\toptions := cookiejar.Options{\n\t\tPublicSuffixList: publicsuffix.List,\n\t}\n\tjar, e := cookiejar.New(&options)\n\tif e != nil {\n\t\tpanic(e)\n\t}\n\n\treturn jar\n}\n\nfunc checkRedirect(req *http.Request, via []*http.Request) error {\n\tsetDefaultHeaders(req)\n\n\tif len(via) >= 10 {\n\t\treturn errors.New(\"stopped after 10 redirects\")\n\t}\n\treturn nil\n}\n\nfunc absoluteURL(baseURL *url.URL, path string) (string, error) {\n\turl, e := url.Parse(path)\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\n\turl.Scheme = baseURL.Scheme\n\tif url.Host == \"\" {\n\t\turl.Host = baseURL.Host\n\t}\n\treturn url.String(), nil\n}\n\nfunc urlFromBase(baseURL *url.URL, path string) string {\n\turl := *baseURL\n\turl.Path = path\n\treturn url.String()\n}\n\nfunc rejectBrokenIcons(icons []Icon) []Icon {\n\tresult := []Icon{}\n\tfor _, img := range icons {\n\t\tif img.Error == nil && (img.Width > 1 && img.Height > 1) {\n\t\t\tresult = append(result, img)\n\t\t}\n\t}\n\treturn result\n}\n\nfunc sha1Sum(b []byte) string {\n\thash := sha1.New()\n\thash.Write(b)\n\tbs := hash.Sum(nil)\n\treturn fmt.Sprintf(\"%x\", bs)\n}\n\nfunc (i *Icon) ImgWidth() int {\n\treturn i.Width \/ 2.0\n}\n\ntype byWidthHeight []Icon\n\nfunc (a byWidthHeight) Len() int { return len(a) }\nfunc (a byWidthHeight) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a byWidthHeight) Less(i, j int) bool {\n\treturn (a[i].Width < a[j].Width) || (a[i].Height < a[j].Height)\n}\n\ntype byBytes []Icon\n\nfunc (a byBytes) Len() int { return len(a) }\nfunc (a byBytes) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a byBytes) Less(i, j int) bool { return (a[i].Bytes < a[j].Bytes) }\n\ntype byURL []Icon\n\nfunc (a byURL) Len() int { return len(a) }\nfunc (a byURL) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a byURL) Less(i, j int) bool { return (a[i].URL < a[j].URL) }\n<commit_msg>Set 60s timeout on outgoing HTTP requests.<commit_after>\/\/ Package besticon includes functions\n\/\/ finding icons for a given web site.\npackage besticon\n\nimport (\n\t\"bytes\"\n\t\"crypto\/sha1\"\n\t\"errors\"\n\t\"fmt\"\n\t\"image\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/cookiejar\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\/\/ Load support for common image formats.\n\t_ \"code.google.com\/p\/go.image\/bmp\"\n\t_ \"code.google.com\/p\/go.image\/tiff\"\n\t_ \"code.google.com\/p\/go.image\/webp\"\n\t_ \"github.com\/mat\/besticon\/ico\"\n\t_ \"image\/gif\"\n\t_ \"image\/jpeg\"\n\t_ \"image\/png\"\n\n\t\"code.google.com\/p\/go.net\/html\/charset\"\n\t\"code.google.com\/p\/go.net\/publicsuffix\"\n\t\"github.com\/PuerkitoBio\/goquery\"\n)\n\n\/\/ Icon holds icon information.\ntype Icon struct {\n\tURL string `json:\"url\"`\n\tWidth int `json:\"width\"`\n\tHeight int `json:\"height\"`\n\tFormat string `json:\"format\"`\n\tBytes int `json:\"bytes\"`\n\tError error `json:\"error\"`\n\tSha1sum string `json:\"sha1sum\"`\n}\n\n\/\/ FetchBestIcon takes a siteURL and returns the icon with\n\/\/ the largest dimensions for this site or an error.\nfunc FetchBestIcon(siteURL string) (*Icon, error) {\n\treturn fetchBestIconWithClient(siteURL, &http.Client{})\n}\n\nfunc fetchBestIconWithClient(siteURL string, c *http.Client) (*Icon, error) {\n\ticons, e := fetchIconsWithClient(siteURL, c)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\tif len(icons) < 1 {\n\t\treturn nil, errors.New(\"besticon: no icons found for site\")\n\t}\n\n\tbest := icons[0]\n\treturn &best, nil\n}\n\n\/\/ FetchIcons takes a siteURL and returns all icons for this site\n\/\/ or an error.\nfunc FetchIcons(siteURL string) ([]Icon, error) {\n\tc := &http.Client{Timeout: 60 * time.Second}\n\treturn fetchIconsWithClient(siteURL, c)\n}\n\n\/\/ fetchIconsWithClient modifies c's checkRedirect and Jar!\nfunc fetchIconsWithClient(siteURL string, c *http.Client) ([]Icon, error) {\n\tconfigureClient(c)\n\n\tsiteURL = strings.TrimSpace(siteURL)\n\tif !strings.HasPrefix(siteURL, \"http\") {\n\t\tsiteURL = \"http:\/\/\" + siteURL\n\t}\n\n\thtml, url, e := fetchHTML(siteURL, c)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\tlinks, e := assembleIconLinks(url, html)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\ticons := fetchAllIcons(links, c)\n\ticons = rejectBrokenIcons(icons)\n\n\t\/\/ Order when finished: (width\/height, bytes, url)\n\tsort.Stable(byURL(icons))\n\tsort.Stable(byBytes(icons))\n\tsort.Stable(sort.Reverse(byWidthHeight(icons)))\n\n\treturn icons, nil\n}\n\nfunc fetchHTML(url string, c *http.Client) ([]byte, *url.URL, error) {\n\tr, e := get(c, url)\n\tif e != nil {\n\t\treturn nil, nil, e\n\t}\n\n\tif !(r.StatusCode >= 200 && r.StatusCode < 300) {\n\t\treturn nil, nil, errors.New(\"besticon: not found\")\n\t}\n\n\tb, e := ioutil.ReadAll(r.Body)\n\tif e != nil {\n\t\treturn nil, nil, e\n\t}\n\tdefer r.Body.Close()\n\tif len(b) == 0 {\n\t\treturn nil, nil, errors.New(\"besticon: empty response\")\n\t}\n\n\treader := bytes.NewReader(b)\n\tcontentType := r.Header.Get(\"Content-Type\")\n\tutf8reader, e := charset.NewReader(reader, contentType)\n\tif e != nil {\n\t\treturn nil, nil, e\n\t}\n\tutf8bytes, e := ioutil.ReadAll(utf8reader)\n\tif e != nil {\n\t\treturn nil, nil, e\n\t}\n\n\treturn utf8bytes, r.Request.URL, nil\n}\n\nvar iconPaths = []string{\n\t\"\/favicon.ico\",\n\t\"\/apple-touch-icon.png\",\n\t\"\/apple-touch-icon-precomposed.png\",\n}\n\ntype empty struct{}\n\nfunc assembleIconLinks(siteURL *url.URL, html []byte) ([]string, error) {\n\tlinks := make(map[string]empty)\n\n\t\/\/ Add common, hard coded icon paths\n\tfor _, path := range iconPaths {\n\t\tlinks[urlFromBase(siteURL, path)] = empty{}\n\t}\n\n\t\/\/ Add icons found in page\n\turls, e := findIcons(html)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\tfor _, url := range urls {\n\t\turl, e := absoluteURL(siteURL, url)\n\t\tif e == nil {\n\t\t\tlinks[url] = empty{}\n\t\t}\n\t}\n\n\t\/\/ Turn unique keys into array\n\tresult := []string{}\n\tfor u := range links {\n\t\tresult = append(result, u)\n\t}\n\tsort.Strings(result)\n\n\treturn result, nil\n}\n\nvar csspaths = strings.Join([]string{\n\t\"link[rel='icon']\",\n\t\"link[rel='shortcut icon']\",\n\t\"link[rel='apple-touch-icon']\",\n\t\"link[rel='apple-touch-icon-precomposed']\",\n}, \", \")\n\nvar errParseHTML = errors.New(\"besticon: could not parse html\")\n\nfunc findIcons(html []byte) ([]string, error) {\n\tdoc, e := goquery.NewDocumentFromReader(bytes.NewReader(html))\n\tif e != nil || doc == nil {\n\t\treturn nil, errParseHTML\n\t}\n\n\thits := []string{}\n\tdoc.Find(csspaths).Each(func(i int, s *goquery.Selection) {\n\t\thref, ok := s.Attr(\"href\")\n\t\tif ok && href != \"\" {\n\t\t\thits = append(hits, href)\n\t\t}\n\t})\n\n\treturn hits, nil\n}\n\nfunc fetchAllIcons(urls []string, c *http.Client) []Icon {\n\tch := make(chan Icon)\n\n\tfor _, u := range urls {\n\t\tgo func(u string) { ch <- fetchIconDetails(u, c) }(u)\n\t}\n\n\ticons := []Icon{}\n\tfor range urls {\n\t\ticon := <-ch\n\t\ticons = append(icons, icon)\n\t}\n\treturn icons\n}\n\nfunc fetchIconDetails(url string, c *http.Client) Icon {\n\ti := Icon{URL: url}\n\n\tresponse, e := get(c, url)\n\tif e != nil {\n\t\ti.Error = e\n\t\treturn i\n\t}\n\n\tb, e := ioutil.ReadAll(response.Body)\n\tif e != nil {\n\t\ti.Error = e\n\t\treturn i\n\t}\n\tdefer response.Body.Close()\n\n\tcfg, format, e := image.DecodeConfig(bytes.NewReader(b))\n\tif e != nil {\n\t\ti.Error = fmt.Errorf(\"besticon: unknown image format: %s\", e)\n\t\treturn i\n\t}\n\n\ti.Width = cfg.Width\n\ti.Height = cfg.Height\n\ti.Format = format\n\ti.Bytes = len(b)\n\ti.Sha1sum = sha1Sum(b)\n\n\treturn i\n}\n\nfunc get(client *http.Client, url string) (*http.Response, error) {\n\treq, e := http.NewRequest(\"GET\", url, nil)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\tsetDefaultHeaders(req)\n\treturn client.Do(req)\n}\n\nfunc setDefaultHeaders(req *http.Request) {\n\treq.Header.Set(\"Accept\", \"*\/*\")\n\treq.Header.Set(\"User-Agent\", \"Mozilla\/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit\/534.55.3 (KHTML, like Gecko) Version\/5.1.3 Safari\/534.53.10\")\n}\n\nfunc configureClient(c *http.Client) {\n\tc.Jar = mustInitCookieJar()\n\tc.CheckRedirect = checkRedirect\n}\n\nfunc mustInitCookieJar() *cookiejar.Jar {\n\toptions := cookiejar.Options{\n\t\tPublicSuffixList: publicsuffix.List,\n\t}\n\tjar, e := cookiejar.New(&options)\n\tif e != nil {\n\t\tpanic(e)\n\t}\n\n\treturn jar\n}\n\nfunc checkRedirect(req *http.Request, via []*http.Request) error {\n\tsetDefaultHeaders(req)\n\n\tif len(via) >= 10 {\n\t\treturn errors.New(\"stopped after 10 redirects\")\n\t}\n\treturn nil\n}\n\nfunc absoluteURL(baseURL *url.URL, path string) (string, error) {\n\turl, e := url.Parse(path)\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\n\turl.Scheme = baseURL.Scheme\n\tif url.Host == \"\" {\n\t\turl.Host = baseURL.Host\n\t}\n\treturn url.String(), nil\n}\n\nfunc urlFromBase(baseURL *url.URL, path string) string {\n\turl := *baseURL\n\turl.Path = path\n\treturn url.String()\n}\n\nfunc rejectBrokenIcons(icons []Icon) []Icon {\n\tresult := []Icon{}\n\tfor _, img := range icons {\n\t\tif img.Error == nil && (img.Width > 1 && img.Height > 1) {\n\t\t\tresult = append(result, img)\n\t\t}\n\t}\n\treturn result\n}\n\nfunc sha1Sum(b []byte) string {\n\thash := sha1.New()\n\thash.Write(b)\n\tbs := hash.Sum(nil)\n\treturn fmt.Sprintf(\"%x\", bs)\n}\n\nfunc (i *Icon) ImgWidth() int {\n\treturn i.Width \/ 2.0\n}\n\ntype byWidthHeight []Icon\n\nfunc (a byWidthHeight) Len() int { return len(a) }\nfunc (a byWidthHeight) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a byWidthHeight) Less(i, j int) bool {\n\treturn (a[i].Width < a[j].Width) || (a[i].Height < a[j].Height)\n}\n\ntype byBytes []Icon\n\nfunc (a byBytes) Len() int { return len(a) }\nfunc (a byBytes) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a byBytes) Less(i, j int) bool { return (a[i].Bytes < a[j].Bytes) }\n\ntype byURL []Icon\n\nfunc (a byURL) Len() int { return len(a) }\nfunc (a byURL) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a byURL) Less(i, j int) bool { return (a[i].URL < a[j].URL) }\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\t\"bytes\"\n\t\"io\"\n)\n\n\/\/ change traq env to use our fixtures\nfunc WithFakeEnv(block func()) {\n oldEnv := os.Getenv(\"TRAQ_DATA_DIR\")\n path, _ := os.Getwd()\n os.Setenv(\"TRAQ_DATA_DIR\", path + \"\/fixtures\")\n\n block()\n\n os.Setenv(\"TRAQ_DATA_DIR\", oldEnv)\n}\n\n\/\/ capture output written to os.Stdout and return it\nfunc CaptureStdout(block func()) string {\n old := os.Stdout \/\/ keep backup of the real stdout\n r, w, _ := os.Pipe()\n os.Stdout = w\n\n block()\n\n outC := make(chan string)\n \/\/ copy the output in a separate goroutine so printing can't block indefinitely\n go func() {\n var buf bytes.Buffer\n io.Copy(&buf, r)\n outC <- buf.String()\n }()\n\n \/\/ back to normal state\n w.Close()\n os.Stdout = old\n\n return <-outC\n}\n\nfunc TestDatesInMonth(t *testing.T) {\n dates := DatesInMonth(1986, 9)\n\n if len(dates) != 30 {\n t.Errorf(\"expected 30 days in Sep 1986, got %v\", len(dates))\n }\n\n if dates[0].Weekday() != time.Monday {\n t.Errorf(\"Started on a Monday, got %v\", dates[0].Weekday())\n }\n\n if dates[len(dates) - 1].Weekday() != time.Tuesday {\n t.Errorf(\"Ended on a Tuesday, got %v\", dates[len(dates) - 1].Weekday())\n }\n}\n\nfunc TestPrintDate(t *testing.T) {\n out := CaptureStdout(func() {\n WithFakeEnv(func() {\n PrintDate(\"example\", time.Date(1986, 9, 3, 0, 0, 0, 0, time.UTC))\n })\n })\n\n expected :=\n`Wed Sep 03 20:00:00 +0100 1986;#birth;comment\nWed Sep 03 21:45:33 +0100 1986;#chillout;\nWed Sep 03 23:24:49 +0100 1986;stop;\n%%\n`\n if out != expected {\n t.Errorf(\"unexpected PrintDate output. Expected '%v' got '%v'\", expected, out)\n }\n}\n\nfunc TestFilePath(t *testing.T) {\n\tvar path string = FilePath(\"example\", time.Date(1986, 9, 3, 0, 0, 0, 0, time.UTC))\n\n\tif path != os.Getenv(\"TRAQ_DATA_DIR\")+\"\/example\/1986\/1986-09-03\" {\n\t\tt.Errorf(\"FilePath = %v, want %v\", path, os.Getenv(\"TRAQ_DATA_DIR\")+\"\/example\/1986\/1986-09-03\")\n\t}\n}\n\nfunc TestEmptySumFile(t *testing.T) {\n\tcontent := []string{\"\"}\n\tvar summed, error = SumFile(content)\n\n\tif error == nil {\n\t\tvar total, ok = summed[\"#work\"]\n\t\tif ok {\n\t\t\tt.Errorf(\"summed['#work'] = %v, should not exist\", total)\n\t\t}\n\t} else {\n\t\tt.Errorf(\"parsing error %v\", error)\n\t}\n}\n\nfunc TestSimpleSumFile(t *testing.T) {\n\tcontent := []string{\n\t\t\"Mon Oct 28 21:45:33 +0100 2013;#work;\",\n\t\t\"Mon Oct 28 23:24:49 +0100 2013;stop;\",\n\t}\n\tvar summed, error = SumFile(content)\n\n\tif error == nil {\n\t\tvar total, ok = summed[\"#work\"]\n\t\tif total != 5956 || !ok {\n\t\t\tt.Errorf(\"summed['#work'] = %v, want %v\", total, 5956)\n\t\t}\n\t} else {\n\t\tt.Errorf(\"parsing error %v\", error)\n\t}\n}\n\nfunc TestNoStopSumFile(t *testing.T) {\n\tcontent := []string{\n\t\t\"Mon Oct 28 20:00:00 +0100 2013;#play;\",\n\t\t\"Mon Oct 28 21:45:33 +0100 2013;#work;\",\n\t\t\"Mon Oct 28 23:24:49 +0100 2013;stop;\",\n\t}\n\tvar summed, error = SumFile(content)\n\n\tif error == nil {\n\t\tvar total, ok = summed[\"#play\"]\n\t\tif total != 6333 || !ok {\n\t\t\tt.Errorf(\"summed['#play'] = %v, want %v\", total, 6333)\n\t\t}\n\t\ttotal, ok = summed[\"#work\"]\n\t\tif total != 5956 || !ok {\n\t\t\tt.Errorf(\"summed['#work'] = %v, want %v\", total, 5956)\n\t\t}\n\t} else {\n\t\tt.Errorf(\"parsing error %v\", error)\n\t}\n}\nfunc TestWithStopSumFile(t *testing.T) {\n\tcontent := []string{\n\t\t\"Mon Oct 28 20:00:00 +0100 2013;#play;\",\n\t\t\"Mon Oct 28 21:45:33 +0100 2013;stop;\",\n\t\t\"Mon Oct 28 21:45:33 +0100 2013;#work;\",\n\t\t\"Mon Oct 28 23:24:49 +0100 2013;stop;\",\n\t}\n\tvar summed, error = SumFile(content)\n\n\tif error == nil {\n\t\tvar total, ok = summed[\"#play\"]\n\t\tif total != 6333 || !ok {\n\t\t\tt.Errorf(\"summed['#play'] = %v, want %v\", total, 6333)\n\t\t}\n\t\ttotal, ok = summed[\"#work\"]\n\t\tif total != 5956 || !ok {\n\t\t\tt.Errorf(\"summed['#work'] = %v, want %v\", total, 5956)\n\t\t}\n\t} else {\n\t\tt.Errorf(\"parsing error %v\", error)\n\t}\n}\n<commit_msg>update tests<commit_after>package main\n\nimport (\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\t\"bytes\"\n\t\"io\"\n)\n\n\/\/ change traq env to use our fixtures\nfunc WithFakeEnv(block func()) {\n oldEnv := os.Getenv(\"TRAQ_DATA_DIR\")\n path, _ := os.Getwd()\n os.Setenv(\"TRAQ_DATA_DIR\", path + \"\/fixtures\")\n\n block()\n\n os.Setenv(\"TRAQ_DATA_DIR\", oldEnv)\n}\n\n\/\/ capture output written to os.Stdout and return it\nfunc CaptureStdout(block func()) string {\n old := os.Stdout \/\/ keep backup of the real stdout\n r, w, _ := os.Pipe()\n os.Stdout = w\n\n block()\n\n outC := make(chan string)\n \/\/ copy the output in a separate goroutine so printing can't block indefinitely\n go func() {\n var buf bytes.Buffer\n io.Copy(&buf, r)\n outC <- buf.String()\n }()\n\n \/\/ back to normal state\n w.Close()\n os.Stdout = old\n\n return <-outC\n}\n\nfunc TestDatesInMonth(t *testing.T) {\n dates := DatesInMonth(1986, 9)\n\n if len(dates) != 30 {\n t.Errorf(\"expected 30 days in Sep 1986, got %v\", len(dates))\n }\n\n if dates[0].Weekday() != time.Monday {\n t.Errorf(\"Started on a Monday, got %v\", dates[0].Weekday())\n }\n\n if dates[len(dates) - 1].Weekday() != time.Tuesday {\n t.Errorf(\"Ended on a Tuesday, got %v\", dates[len(dates) - 1].Weekday())\n }\n}\n\nfunc TestPrintDate(t *testing.T) {\n out := CaptureStdout(func() {\n WithFakeEnv(func() {\n PrintDate(\"example\", time.Date(1986, 9, 3, 0, 0, 0, 0, time.UTC))\n })\n })\n\n expected :=\n`Wed Sep 03 20:00:00 +0100 1986;#birth;comment\nWed Sep 03 21:45:33 +0100 1986;#chillout;\nWed Sep 03 23:24:49 +0100 1986;stop;\n%%\n`\n if out != expected {\n t.Errorf(\"unexpected PrintDate output. Expected '%v' got '%v'\", expected, out)\n }\n}\n\nfunc TestEvaluateDate(t *testing.T) {\n out := CaptureStdout(func() {\n WithFakeEnv(func() {\n EvaluateDate(ContentLoader, \"example\", time.Date(1986, 9, 3, 0, 0, 0, 0, time.UTC))\n })\n })\n\n expected :=\n`1986-09-03\n#birth:1.7592\n#chillout:1.6544\n%%\n`\n\n if out != expected {\n t.Errorf(\"unexpected EvaluateDate output. Expected '%v' got '%v'\", expected, out)\n }\n}\n\nfunc TestEntry(t *testing.T) {\n expected := `Wed Sep 3 12:00:00 +0000 1986;#test;\n`\n\n if entry := Entry(time.Date(1986, 9, 3, 12, 0, 0, 0, time.UTC), \"#test\"); entry != expected {\n t.Errorf(\"got wrong entry. Expected '%v' got '%v'\", expected, entry)\n }\n}\n\nfunc TestFilePath(t *testing.T) {\n\tvar path string = FilePath(\"example\", time.Date(1986, 9, 3, 0, 0, 0, 0, time.UTC))\n\n\tif path != os.Getenv(\"TRAQ_DATA_DIR\")+\"\/example\/1986\/1986-09-03\" {\n\t\tt.Errorf(\"FilePath = %v, want %v\", path, os.Getenv(\"TRAQ_DATA_DIR\")+\"\/example\/1986\/1986-09-03\")\n\t}\n}\n\nfunc TestEmptySumFile(t *testing.T) {\n\tcontent := []string{\"\"}\n\tvar summed, error = SumFile(content)\n\n\tif error == nil {\n\t\tvar total, ok = summed[\"#work\"]\n\t\tif ok {\n\t\t\tt.Errorf(\"summed['#work'] = %v, should not exist\", total)\n\t\t}\n\t} else {\n\t\tt.Errorf(\"parsing error %v\", error)\n\t}\n}\n\nfunc TestSimpleSumFile(t *testing.T) {\n\tcontent := []string{\n\t\t\"Mon Oct 28 21:45:33 +0100 2013;#work;\",\n\t\t\"Mon Oct 28 23:24:49 +0100 2013;stop;\",\n\t}\n\tvar summed, error = SumFile(content)\n\n\tif error == nil {\n\t\tvar total, ok = summed[\"#work\"]\n\t\tif total != 5956 || !ok {\n\t\t\tt.Errorf(\"summed['#work'] = %v, want %v\", total, 5956)\n\t\t}\n\t} else {\n\t\tt.Errorf(\"parsing error %v\", error)\n\t}\n}\n\nfunc TestNoStopSumFile(t *testing.T) {\n\tcontent := []string{\n\t\t\"Mon Oct 28 20:00:00 +0100 2013;#play;\",\n\t\t\"Mon Oct 28 21:45:33 +0100 2013;#work;\",\n\t\t\"Mon Oct 28 23:24:49 +0100 2013;stop;\",\n\t}\n\tvar summed, error = SumFile(content)\n\n\tif error == nil {\n\t\tvar total, ok = summed[\"#play\"]\n\t\tif total != 6333 || !ok {\n\t\t\tt.Errorf(\"summed['#play'] = %v, want %v\", total, 6333)\n\t\t}\n\t\ttotal, ok = summed[\"#work\"]\n\t\tif total != 5956 || !ok {\n\t\t\tt.Errorf(\"summed['#work'] = %v, want %v\", total, 5956)\n\t\t}\n\t} else {\n\t\tt.Errorf(\"parsing error %v\", error)\n\t}\n}\nfunc TestWithStopSumFile(t *testing.T) {\n\tcontent := []string{\n\t\t\"Mon Oct 28 20:00:00 +0100 2013;#play;\",\n\t\t\"Mon Oct 28 21:45:33 +0100 2013;stop;\",\n\t\t\"Mon Oct 28 21:45:33 +0100 2013;#work;\",\n\t\t\"Mon Oct 28 23:24:49 +0100 2013;stop;\",\n\t}\n\tvar summed, error = SumFile(content)\n\n\tif error == nil {\n\t\tvar total, ok = summed[\"#play\"]\n\t\tif total != 6333 || !ok {\n\t\t\tt.Errorf(\"summed['#play'] = %v, want %v\", total, 6333)\n\t\t}\n\t\ttotal, ok = summed[\"#work\"]\n\t\tif total != 5956 || !ok {\n\t\t\tt.Errorf(\"summed['#work'] = %v, want %v\", total, 5956)\n\t\t}\n\t} else {\n\t\tt.Errorf(\"parsing error %v\", error)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package tree implements a basic balanced binary tree\npackage tree\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ Node is a fudemental part of what makes a tree a tree. Many Nodes creates a tree\ntype Node struct {\n\tLeft *Node\n\tRight *Node\n\tData int\n}\n\n\/\/ Tree basic tree structure.. Root is of type Node,\ntype Tree struct {\n\tRoot *Node\n\tTotal int\n\tNodeCount int\n}\n\nvar (\n\t\/\/ ErrPositiveIntegers reports that only positive intergers may be added to the tree\n\tErrPositiveIntegers = fmt.Errorf(\"only postive integers may be added\")\n\t\/\/ ErrNodeNotFound reports that a Node wasn't found\n\tErrNodeNotFound = fmt.Errorf(\"Node not found\")\n)\n\n\/\/ NewTree ...\nfunc NewTree() *Tree {\n\treturn new(Tree)\n}\n\n\/\/ FindNode ...\nfunc (t *Tree) FindNode(data int) (err error) {\n\tnewNode := Node{\n\t\tData: data,\n\t}\n\tif t.Root != nil {\n\t\tif t.findNode(t.Root, newNode) != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn ErrNodeNotFound\n}\n\nfunc (t *Tree) findNode(search *Node, target Node) *Node {\n\tvar returnNode *Node\n\tif search == nil {\n\t\treturn returnNode\n\t}\n\tif search.Data == target.Data {\n\t\treturn search\n\t}\n\treturnNode = t.findNode(search.Left, target)\n\tif returnNode == nil {\n\t\treturnNode = t.findNode(search.Right, target)\n\t}\n\treturn returnNode\n}\n\n\/\/ Add appends a new node to a branch in a balanced manner\nfunc (t *Tree) Add(data int) (err error) {\n\tt.Total += data\n\tt.NodeCount++\n\tif data < 0 {\n\t\treturn ErrPositiveIntegers\n\t}\n\tNodeToAdd := Node{\n\t\tData: data,\n\t}\n\tif t.Root == nil {\n\t\tt.Root = new(Node)\n\t}\n\tif t.Root.Data == 0 {\n\t\tt.Root = &NodeToAdd\n\t\treturn\n\t}\n\tt.add(t.Root, NodeToAdd)\n\treturn\n}\n\nfunc (t *Tree) add(oldNode *Node, newNode Node) {\n\tif newNode.Data < oldNode.Data {\n\t\tif oldNode.Left == nil {\n\t\t\toldNode.Left = &newNode\n\t\t} else {\n\t\t\tt.add(oldNode.Left, newNode)\n\t\t}\n\t} else if newNode.Data > oldNode.Data {\n\t\tif oldNode.Right == nil {\n\t\t\toldNode.Right = &newNode\n\t\t} else {\n\t\t\tt.add(oldNode.Right, newNode)\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ InOrderTraversal prints out the values in order\nfunc (t *Tree) InOrderTraversal() {\n\tif t.Root != nil {\n\t\tcurrentNode := t.Root\n\t\tif currentNode.Left == nil && currentNode.Right == nil {\n\t\t\tfmt.Println(currentNode.Data)\n\t\t} else {\n\t\t\tt.inOrderTraversal(currentNode)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (t *Tree) inOrderTraversal(n *Node) {\n\tif n.Left != nil {\n\t\tt.inOrderTraversal(n.Left)\n\t}\n\tfmt.Println(n.Data)\n\tif n.Right != nil {\n\t\tt.inOrderTraversal(n.Right)\n\t}\n\treturn\n}\n\n\/\/ Traversal prints out the values by branch side, left, right, ect...\nfunc (t *Tree) Traversal() {\n\tif t.Root != nil {\n\t\tcurrentNode := t.Root\n\t\tif currentNode.Left == nil && currentNode.Right == nil {\n\t\t\tfmt.Println(currentNode.Data)\n\t\t} else {\n\t\t\tt.traversal(currentNode)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (t *Tree) traversal(n *Node) {\n\tfmt.Println(n.Data)\n\tif n.Left != nil {\n\t\tt.traversal(n.Left)\n\t}\n\tif n.Right != nil {\n\t\tt.traversal(n.Right)\n\t}\n\treturn\n}\n\n\/\/ Sum added up all the values stored in the Nodes.. It is a redundant function because total value is kept as a Tree\n\/\/ value\nfunc (t *Tree) Sum() (total int) {\n\tvar wg sync.WaitGroup\n\tc := make(chan int, 100)\n\tif t.Root != nil {\n\t\tcurrentNode := t.Root\n\t\tif currentNode.Left == nil && currentNode.Right == nil {\n\t\t\treturn 1\n\t\t}\n\t\twg.Add(1)\n\t\tt.sum(currentNode, c, &wg)\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(c)\n\t}()\n\tfor n := range c {\n\t\ttotal += n\n\t}\n\treturn total\n}\n\nfunc (t *Tree) sum(n *Node, counter chan int, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tif n.Left != nil {\n\t\twg.Add(1)\n\t\tgo t.sum(n.Left, counter, wg)\n\t}\n\tcounter <- n.Data\n\tif n.Right != nil {\n\t\twg.Add(1)\n\t\tgo t.sum(n.Right, counter, wg)\n\t}\n\treturn\n}\n\n\/\/ CountEdges returns the number of edges the tree contains\nfunc (t *Tree) CountEdges() (edges int) {\n\tvar wg sync.WaitGroup\n\tc := make(chan int, 100)\n\tif t.Root != nil {\n\t\tcurrentNode := t.Root\n\t\tif currentNode.Left == nil && currentNode.Right == nil {\n\t\t\treturn 1\n\t\t}\n\t\twg.Add(1)\n\t\tt.countEdges(currentNode, c, &wg)\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(c)\n\t}()\n\tfor n := range c {\n\t\tedges += n\n\t}\n\treturn edges\n}\n\nfunc (t *Tree) countEdges(n *Node, counter chan int, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tif n.Left != nil {\n\t\twg.Add(1)\n\t\tgo t.countEdges(n.Left, counter, wg)\n\t}\n\tcounter <- 1\n\tif n.Right != nil {\n\t\twg.Add(1)\n\t\tgo t.countEdges(n.Right, counter, wg)\n\t}\n\treturn\n}\n\n\/\/ GenerateRandomTree uses time (time.Now().Unix()) to create enthorpy for a source of random numbers to append to the Tree\nfunc (t *Tree) GenerateRandomTree(numberOfNodesToCreate int) (err error) {\n\tif numberOfNodesToCreate < 0 {\n\t\treturn ErrPositiveIntegers\n\t}\n\tu := time.Now()\n\tsource := rand.NewSource(u.Unix())\n\tr := rand.New(source)\n\tarr := r.Perm(numberOfNodesToCreate)\n\tfor _, a := range arr {\n\t\tt.Add(a)\n\t}\n\treturn\n}\n\n\/\/ GetRootData returns the data stored at the root, however this does not return the root Node\nfunc (t *Tree) GetRootData() int {\n\treturn t.Root.Data\n}\n\n\/\/ GetTreeTotal returns the sum of the collecitve nodes on the Tree\nfunc (t *Tree) GetTreeTotal() int {\n\treturn t.Total\n}\n\n\/\/ TreeToArray converts to the into an int slice\nfunc (t *Tree) TreeToArray() []int {\n\tvar wg sync.WaitGroup\n\tc := make(chan int, 100)\n\tarr := make([]int, 0, t.NodeCount)\n\tif t.Root != nil {\n\t\tcurrentNode := t.Root\n\t\tif currentNode.Left == nil && currentNode.Right == nil {\n\t\t\treturn []int{currentNode.Data}\n\t\t}\n\t\twg.Add(1)\n\t\tt.traversalGetVals(currentNode, c, &wg)\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(c)\n\t}()\n\tfor n := range c {\n\t\tarr = append(arr, n)\n\t}\n\treturn arr\n}\n\nfunc (t *Tree) traversalGetVals(n *Node, c chan int, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tif n.Left != nil {\n\t\tc <- n.Left.Data\n\t\twg.Add(1)\n\t\tgo t.traversalGetVals(n.Left, c, wg)\n\t}\n\tif n.Right != nil {\n\t\tc <- n.Right.Data\n\t\twg.Add(1)\n\t\tgo t.traversalGetVals(n.Right, c, wg)\n\t}\n\treturn\n}\n\n\/\/ ShiftRoot rebuilds the tree with a new root\nfunc (t *Tree) ShiftRoot(newRoot int) {\n\tarr := t.TreeToArray()\n\tn := Tree{}\n\tn.Add(newRoot)\n\tfor _, i := range arr {\n\t\tn.Add(i)\n\t}\n\t*t = n\n}\n\n\/\/ PrintTree uses json.MarshalIndent() to print the Tree in an organized fashion, which can then be analysized as a JSON\n\/\/ object\nfunc (t *Tree) PrintTree() {\n\tb, err := json.MarshalIndent(t, \"\", \" \")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(string(b))\n}\n<commit_msg>godocs<commit_after>\/\/ Package tree implements a basic balanced binary tree\npackage tree\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ Node is a fundemental part of what makes a tree a tree. Many Nodes creates a tree\ntype Node struct {\n\tLeft *Node\n\tRight *Node\n\tData int\n}\n\n\/\/ Tree basic tree structure\ntype Tree struct {\n\tRoot *Node\n\tTotal int\n\tNodeCount int\n}\n\nvar (\n\t\/\/ ErrPositiveIntegers reports that only positive intergers may be added to the tree\n\tErrPositiveIntegers = fmt.Errorf(\"only postive integers may be added\")\n\t\/\/ ErrNodeNotFound reports that a Node wasn't found\n\tErrNodeNotFound = fmt.Errorf(\"Node not found\")\n)\n\n\/\/ NewTree creates a pointer to the Tree struct\nfunc NewTree() *Tree {\n\treturn new(Tree)\n}\n\n\/\/ FindNode recursively looks for the node with the specified value\nfunc (t *Tree) FindNode(data int) (err error) {\n\tnewNode := Node{\n\t\tData: data,\n\t}\n\tif t.Root != nil {\n\t\tif t.findNode(t.Root, newNode) != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn ErrNodeNotFound\n}\n\nfunc (t *Tree) findNode(search *Node, target Node) *Node {\n\tvar returnNode *Node\n\tif search == nil {\n\t\treturn returnNode\n\t}\n\tif search.Data == target.Data {\n\t\treturn search\n\t}\n\treturnNode = t.findNode(search.Left, target)\n\tif returnNode == nil {\n\t\treturnNode = t.findNode(search.Right, target)\n\t}\n\treturn returnNode\n}\n\n\/\/ Add appends a new node to a branch in a balanced manner\nfunc (t *Tree) Add(data int) (err error) {\n\tt.Total += data\n\tt.NodeCount++\n\tif data < 0 {\n\t\treturn ErrPositiveIntegers\n\t}\n\tNodeToAdd := Node{\n\t\tData: data,\n\t}\n\tif t.Root == nil {\n\t\tt.Root = new(Node)\n\t}\n\tif t.Root.Data == 0 {\n\t\tt.Root = &NodeToAdd\n\t\treturn\n\t}\n\tt.add(t.Root, NodeToAdd)\n\treturn\n}\n\nfunc (t *Tree) add(oldNode *Node, newNode Node) {\n\tif newNode.Data < oldNode.Data {\n\t\tif oldNode.Left == nil {\n\t\t\toldNode.Left = &newNode\n\t\t} else {\n\t\t\tt.add(oldNode.Left, newNode)\n\t\t}\n\t} else if newNode.Data > oldNode.Data {\n\t\tif oldNode.Right == nil {\n\t\t\toldNode.Right = &newNode\n\t\t} else {\n\t\t\tt.add(oldNode.Right, newNode)\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ InOrderTraversal prints out the values in order\nfunc (t *Tree) InOrderTraversal() {\n\tif t.Root != nil {\n\t\tcurrentNode := t.Root\n\t\tif currentNode.Left == nil && currentNode.Right == nil {\n\t\t\tfmt.Println(currentNode.Data)\n\t\t} else {\n\t\t\tt.inOrderTraversal(currentNode)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (t *Tree) inOrderTraversal(n *Node) {\n\tif n.Left != nil {\n\t\tt.inOrderTraversal(n.Left)\n\t}\n\tfmt.Println(n.Data)\n\tif n.Right != nil {\n\t\tt.inOrderTraversal(n.Right)\n\t}\n\treturn\n}\n\n\/\/ Traversal prints out the values by branch side, left, right, ect...\nfunc (t *Tree) Traversal() {\n\tif t.Root != nil {\n\t\tcurrentNode := t.Root\n\t\tif currentNode.Left == nil && currentNode.Right == nil {\n\t\t\tfmt.Println(currentNode.Data)\n\t\t} else {\n\t\t\tt.traversal(currentNode)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (t *Tree) traversal(n *Node) {\n\tfmt.Println(n.Data)\n\tif n.Left != nil {\n\t\tt.traversal(n.Left)\n\t}\n\tif n.Right != nil {\n\t\tt.traversal(n.Right)\n\t}\n\treturn\n}\n\n\/\/ Sum added up all the values stored in the Nodes.. It is a redundant function because total value is kept as a Tree\n\/\/ value\nfunc (t *Tree) Sum() (total int) {\n\tvar wg sync.WaitGroup\n\tc := make(chan int, 100)\n\tif t.Root != nil {\n\t\tcurrentNode := t.Root\n\t\tif currentNode.Left == nil && currentNode.Right == nil {\n\t\t\treturn 1\n\t\t}\n\t\twg.Add(1)\n\t\tt.sum(currentNode, c, &wg)\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(c)\n\t}()\n\tfor n := range c {\n\t\ttotal += n\n\t}\n\treturn total\n}\n\nfunc (t *Tree) sum(n *Node, counter chan int, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tif n.Left != nil {\n\t\twg.Add(1)\n\t\tgo t.sum(n.Left, counter, wg)\n\t}\n\tcounter <- n.Data\n\tif n.Right != nil {\n\t\twg.Add(1)\n\t\tgo t.sum(n.Right, counter, wg)\n\t}\n\treturn\n}\n\n\/\/ CountEdges returns the number of edges the tree contains\nfunc (t *Tree) CountEdges() (edges int) {\n\tvar wg sync.WaitGroup\n\tc := make(chan int, 100)\n\tif t.Root != nil {\n\t\tcurrentNode := t.Root\n\t\tif currentNode.Left == nil && currentNode.Right == nil {\n\t\t\treturn 1\n\t\t}\n\t\twg.Add(1)\n\t\tt.countEdges(currentNode, c, &wg)\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(c)\n\t}()\n\tfor n := range c {\n\t\tedges += n\n\t}\n\treturn edges\n}\n\nfunc (t *Tree) countEdges(n *Node, counter chan int, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tif n.Left != nil {\n\t\twg.Add(1)\n\t\tgo t.countEdges(n.Left, counter, wg)\n\t}\n\tcounter <- 1\n\tif n.Right != nil {\n\t\twg.Add(1)\n\t\tgo t.countEdges(n.Right, counter, wg)\n\t}\n\treturn\n}\n\n\/\/ GenerateRandomTree uses time (time.Now().Unix()) to create enthorpy for a source of random numbers to append to the Tree\nfunc (t *Tree) GenerateRandomTree(numberOfNodesToCreate int) (err error) {\n\tif numberOfNodesToCreate < 0 {\n\t\treturn ErrPositiveIntegers\n\t}\n\tu := time.Now()\n\tsource := rand.NewSource(u.Unix())\n\tr := rand.New(source)\n\tarr := r.Perm(numberOfNodesToCreate)\n\tfor _, a := range arr {\n\t\tt.Add(a)\n\t}\n\treturn\n}\n\n\/\/ GetRootData returns the data stored at the root, however this does not return the root Node\nfunc (t *Tree) GetRootData() int {\n\treturn t.Root.Data\n}\n\n\/\/ GetTreeTotal returns the sum of the collective nodes on the Tree\nfunc (t *Tree) GetTreeTotal() int {\n\treturn t.Total\n}\n\n\/\/ TreeToArray converts to the into an int slice\nfunc (t *Tree) TreeToArray() []int {\n\tvar wg sync.WaitGroup\n\tc := make(chan int, 100)\n\tarr := make([]int, 0, t.NodeCount)\n\tif t.Root != nil {\n\t\tcurrentNode := t.Root\n\t\tif currentNode.Left == nil && currentNode.Right == nil {\n\t\t\treturn []int{currentNode.Data}\n\t\t}\n\t\twg.Add(1)\n\t\tt.traversalGetVals(currentNode, c, &wg)\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(c)\n\t}()\n\tfor n := range c {\n\t\tarr = append(arr, n)\n\t}\n\treturn arr\n}\n\nfunc (t *Tree) traversalGetVals(n *Node, c chan int, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tif n.Left != nil {\n\t\tc <- n.Left.Data\n\t\twg.Add(1)\n\t\tgo t.traversalGetVals(n.Left, c, wg)\n\t}\n\tif n.Right != nil {\n\t\tc <- n.Right.Data\n\t\twg.Add(1)\n\t\tgo t.traversalGetVals(n.Right, c, wg)\n\t}\n\treturn\n}\n\n\/\/ ShiftRoot rebuilds the tree with a new root\nfunc (t *Tree) ShiftRoot(newRoot int) {\n\tarr := t.TreeToArray()\n\tn := Tree{}\n\tn.Add(newRoot)\n\tfor _, i := range arr {\n\t\tn.Add(i)\n\t}\n\t*t = n\n}\n\n\/\/ PrintTree uses json.MarshalIndent() to print the Tree in an organized fashion, which can then be analysized as a JSON\n\/\/ object\nfunc (t *Tree) PrintTree() {\n\tb, err := json.MarshalIndent(t, \"\", \" \")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(string(b))\n}\n<|endoftext|>"} {"text":"<commit_before>package routing\n\nimport (\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\t\/\/ vertexDecay is the decay period of colored vertexes added to\n\t\/\/ missionControl. Once vertexDecay passes after an entry has been\n\t\/\/ added to the prune view, it is garbage collected. This value is\n\t\/\/ larger than edgeDecay as an edge failure typical indicates an\n\t\/\/ unbalanced channel, while a vertex failure indicates a node is not\n\t\/\/ online and active.\n\tvertexDecay = time.Duration(time.Minute * 5)\n\n\t\/\/ edgeDecay is the decay period of colored edges added to\n\t\/\/ missionControl. Once edgeDecay passed after an entry has been added,\n\t\/\/ it is garbage collected. This value is smaller than vertexDecay as\n\t\/\/ an edge related failure during payment sending typically indicates\n\t\/\/ that a channel was unbalanced, a condition which may quickly change.\n\t\/\/\n\t\/\/ TODO(roasbeef): instead use random delay on each?\n\tedgeDecay = time.Duration(time.Second * 5)\n)\n\n\/\/ missionControl contains state which summarizes the past attempts of HTLC\n\/\/ routing by external callers when sending payments throughout the network.\n\/\/ missionControl remembers the outcome of these past routing attempts (success\n\/\/ and failure), and is able to provide hints\/guidance to future HTLC routing\n\/\/ attempts. missionControl maintains a decaying network view of the\n\/\/ edges\/vertexes that should be marked as \"pruned\" during path finding. This\n\/\/ graph view acts as a shared memory during HTLC payment routing attempts.\n\/\/ With each execution, if an error is encountered, based on the type of error\n\/\/ and the location of the error within the route, an edge or vertex is added\n\/\/ to the view. Later sending attempts will then query the view for all the\n\/\/ vertexes\/edges that should be ignored. Items in the view decay after a set\n\/\/ period of time, allowing the view to be dynamic w.r.t network changes.\ntype missionControl struct {\n\t\/\/ failedEdges maps a short channel ID to be pruned, to the time that\n\t\/\/ it was added to the prune view. Edges are added to this map if a\n\t\/\/ caller reports to missionControl a failure localized to that edge\n\t\/\/ when sending a payment.\n\tfailedEdges map[uint64]time.Time\n\n\t\/\/ failedVertexes maps a node's public key that should be pruned, to\n\t\/\/ the time that it was added to the prune view. Vertexes are added to\n\t\/\/ this map if a caller reports to missionControl a failure localized\n\t\/\/ to that particular vertex.\n\tfailedVertexes map[vertex]time.Time\n\n\tsync.Mutex\n\n\t\/\/ TODO(roasbeef): further counters, if vertex continually unavailable,\n\t\/\/ add to another generation\n\n\t\/\/ TODO(roasbeef): also add favorable metrics for nodes\n}\n\n\/\/ newMissionControl returns a new instance of missionControl.\n\/\/\n\/\/ TODO(roasbeef): persist memory\nfunc newMissionControl() *missionControl {\n\treturn &missionControl{\n\t\tfailedEdges: make(map[uint64]time.Time),\n\t\tfailedVertexes: make(map[vertex]time.Time),\n\t}\n}\n\n\/\/ ReportVertexFailure adds a vertex to the graph prune view after a client\n\/\/ reports a routing failure localized to the vertex. The time the vertex was\n\/\/ added is noted, as it'll be pruned from the view after a period of\n\/\/ vertexDecay.\nfunc (m *missionControl) ReportVertexFailure(v vertex) {\n\tlog.Debugf(\"Reporting vertex %v failure to Mission Control\", v)\n\n\tm.Lock()\n\tm.failedVertexes[v] = time.Now()\n\tm.Unlock()\n}\n\n\/\/ ReportChannelFailure adds a channel to the graph prune view. The time the\n\/\/ channel was added is noted, as it'll be pruned from the view after a period\n\/\/ of edgeDecay.\n\/\/\n\/\/ TODO(roasbeef): also add value attempted to send and capacity of channel\nfunc (m *missionControl) ReportChannelFailure(e uint64) {\n\tlog.Debugf(\"Reporting edge %v failure to Mission Control\", e)\n\n\tm.Lock()\n\tm.failedEdges[e] = time.Now()\n\tm.Unlock()\n}\n\n\/\/ GraphPruneView returns a new graphPruneView instance which is to be\n\/\/ consulted during path finding. If a vertex\/edge is found within the returned\n\/\/ prune view, it is to be ignored as a goroutine has had issues routing\n\/\/ through it successfully. Within this method the main view of the\n\/\/ missionControl is garbage collected as entires are detected to be \"stale\".\nfunc (m *missionControl) GraphPruneView() *graphPruneView {\n\t\/\/ First, we'll grab the current time, this value will be used to\n\t\/\/ determine if an entry is stale or not.\n\tnow := time.Now()\n\n\tm.Lock()\n\n\t\/\/ For each of the vertexes that have been added to the prune view, if\n\t\/\/ it is now \"stale\", then we'll ignore it and avoid adding it to the\n\t\/\/ view we'll return.\n\tvertexes := make(map[vertex]struct{})\n\tfor vertex, pruneTime := range m.failedVertexes {\n\t\tif now.Sub(pruneTime) >= edgeDecay {\n\t\t\tlog.Tracef(\"Pruning decayed failure report for vertex %v \"+\n\t\t\t\t\"from Mission Control\", vertex)\n\n\t\t\tdelete(m.failedVertexes, vertex)\n\t\t\tcontinue\n\t\t}\n\n\t\tvertexes[vertex] = struct{}{}\n\t}\n\n\t\/\/ We'll also do the same for edges, but use the edgeDecay this time\n\t\/\/ rather than the decay for vertexes.\n\tedges := make(map[uint64]struct{})\n\tfor edge, pruneTime := range m.failedEdges {\n\t\tif now.Sub(pruneTime) >= edgeDecay {\n\t\t\tlog.Tracef(\"Pruning decayed failure report for edge %v \"+\n\t\t\t\t\"from Mission Control\", edge)\n\n\t\t\tdelete(m.failedEdges, edge)\n\t\t\tcontinue\n\t\t}\n\n\t\tedges[edge] = struct{}{}\n\t}\n\n\tm.Unlock()\n\n\tlog.Debugf(\"Mission Control returning prune view of %v edges, %v \"+\n\t\t\"vertexes\", len(edges), len(vertexes))\n\n\treturn &graphPruneView{\n\t\tedges: edges,\n\t\tvertexes: vertexes,\n\t}\n}\n\n\/\/ graphPruneView is a filter of sorts that path finding routines should\n\/\/ consult during the execution. Any edges or vertexes within the view should\n\/\/ be ignored during path finding. The contents of the view reflect the current\n\/\/ state of the wider network from the PoV of mission control compiled via HTLC\n\/\/ routing attempts in the past.\ntype graphPruneView struct {\n\tedges map[uint64]struct{}\n\n\tvertexes map[vertex]struct{}\n}\n\n\/\/ ResetHistory resets the history of missionControl returning it to a state as\n\/\/ if no payment attempts have been made.\nfunc (m *missionControl) ResetHistory() {\n\tm.Lock()\n\tm.failedEdges = make(map[uint64]time.Time)\n\tm.failedVertexes = make(map[vertex]time.Time)\n\tm.Unlock()\n}\n<commit_msg>routing: properly use vertexDecay for vertexes in missionControl<commit_after>package routing\n\nimport (\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\t\/\/ vertexDecay is the decay period of colored vertexes added to\n\t\/\/ missionControl. Once vertexDecay passes after an entry has been\n\t\/\/ added to the prune view, it is garbage collected. This value is\n\t\/\/ larger than edgeDecay as an edge failure typical indicates an\n\t\/\/ unbalanced channel, while a vertex failure indicates a node is not\n\t\/\/ online and active.\n\tvertexDecay = time.Duration(time.Minute * 5)\n\n\t\/\/ edgeDecay is the decay period of colored edges added to\n\t\/\/ missionControl. Once edgeDecay passed after an entry has been added,\n\t\/\/ it is garbage collected. This value is smaller than vertexDecay as\n\t\/\/ an edge related failure during payment sending typically indicates\n\t\/\/ that a channel was unbalanced, a condition which may quickly change.\n\t\/\/\n\t\/\/ TODO(roasbeef): instead use random delay on each?\n\tedgeDecay = time.Duration(time.Second * 5)\n)\n\n\/\/ missionControl contains state which summarizes the past attempts of HTLC\n\/\/ routing by external callers when sending payments throughout the network.\n\/\/ missionControl remembers the outcome of these past routing attempts (success\n\/\/ and failure), and is able to provide hints\/guidance to future HTLC routing\n\/\/ attempts. missionControl maintains a decaying network view of the\n\/\/ edges\/vertexes that should be marked as \"pruned\" during path finding. This\n\/\/ graph view acts as a shared memory during HTLC payment routing attempts.\n\/\/ With each execution, if an error is encountered, based on the type of error\n\/\/ and the location of the error within the route, an edge or vertex is added\n\/\/ to the view. Later sending attempts will then query the view for all the\n\/\/ vertexes\/edges that should be ignored. Items in the view decay after a set\n\/\/ period of time, allowing the view to be dynamic w.r.t network changes.\ntype missionControl struct {\n\t\/\/ failedEdges maps a short channel ID to be pruned, to the time that\n\t\/\/ it was added to the prune view. Edges are added to this map if a\n\t\/\/ caller reports to missionControl a failure localized to that edge\n\t\/\/ when sending a payment.\n\tfailedEdges map[uint64]time.Time\n\n\t\/\/ failedVertexes maps a node's public key that should be pruned, to\n\t\/\/ the time that it was added to the prune view. Vertexes are added to\n\t\/\/ this map if a caller reports to missionControl a failure localized\n\t\/\/ to that particular vertex.\n\tfailedVertexes map[vertex]time.Time\n\n\tsync.Mutex\n\n\t\/\/ TODO(roasbeef): further counters, if vertex continually unavailable,\n\t\/\/ add to another generation\n\n\t\/\/ TODO(roasbeef): also add favorable metrics for nodes\n}\n\n\/\/ newMissionControl returns a new instance of missionControl.\n\/\/\n\/\/ TODO(roasbeef): persist memory\nfunc newMissionControl() *missionControl {\n\treturn &missionControl{\n\t\tfailedEdges: make(map[uint64]time.Time),\n\t\tfailedVertexes: make(map[vertex]time.Time),\n\t}\n}\n\n\/\/ ReportVertexFailure adds a vertex to the graph prune view after a client\n\/\/ reports a routing failure localized to the vertex. The time the vertex was\n\/\/ added is noted, as it'll be pruned from the view after a period of\n\/\/ vertexDecay.\nfunc (m *missionControl) ReportVertexFailure(v vertex) {\n\tlog.Debugf(\"Reporting vertex %v failure to Mission Control\", v)\n\n\tm.Lock()\n\tm.failedVertexes[v] = time.Now()\n\tm.Unlock()\n}\n\n\/\/ ReportChannelFailure adds a channel to the graph prune view. The time the\n\/\/ channel was added is noted, as it'll be pruned from the view after a period\n\/\/ of edgeDecay.\n\/\/\n\/\/ TODO(roasbeef): also add value attempted to send and capacity of channel\nfunc (m *missionControl) ReportChannelFailure(e uint64) {\n\tlog.Debugf(\"Reporting edge %v failure to Mission Control\", e)\n\n\tm.Lock()\n\tm.failedEdges[e] = time.Now()\n\tm.Unlock()\n}\n\n\/\/ GraphPruneView returns a new graphPruneView instance which is to be\n\/\/ consulted during path finding. If a vertex\/edge is found within the returned\n\/\/ prune view, it is to be ignored as a goroutine has had issues routing\n\/\/ through it successfully. Within this method the main view of the\n\/\/ missionControl is garbage collected as entires are detected to be \"stale\".\nfunc (m *missionControl) GraphPruneView() *graphPruneView {\n\t\/\/ First, we'll grab the current time, this value will be used to\n\t\/\/ determine if an entry is stale or not.\n\tnow := time.Now()\n\n\tm.Lock()\n\n\t\/\/ For each of the vertexes that have been added to the prune view, if\n\t\/\/ it is now \"stale\", then we'll ignore it and avoid adding it to the\n\t\/\/ view we'll return.\n\tvertexes := make(map[vertex]struct{})\n\tfor vertex, pruneTime := range m.failedVertexes {\n\t\tif now.Sub(pruneTime) >= vertexDecay {\n\t\t\tlog.Tracef(\"Pruning decayed failure report for vertex %v \"+\n\t\t\t\t\"from Mission Control\", vertex)\n\n\t\t\tdelete(m.failedVertexes, vertex)\n\t\t\tcontinue\n\t\t}\n\n\t\tvertexes[vertex] = struct{}{}\n\t}\n\n\t\/\/ We'll also do the same for edges, but use the edgeDecay this time\n\t\/\/ rather than the decay for vertexes.\n\tedges := make(map[uint64]struct{})\n\tfor edge, pruneTime := range m.failedEdges {\n\t\tif now.Sub(pruneTime) >= edgeDecay {\n\t\t\tlog.Tracef(\"Pruning decayed failure report for edge %v \"+\n\t\t\t\t\"from Mission Control\", edge)\n\n\t\t\tdelete(m.failedEdges, edge)\n\t\t\tcontinue\n\t\t}\n\n\t\tedges[edge] = struct{}{}\n\t}\n\n\tm.Unlock()\n\n\tlog.Debugf(\"Mission Control returning prune view of %v edges, %v \"+\n\t\t\"vertexes\", len(edges), len(vertexes))\n\n\treturn &graphPruneView{\n\t\tedges: edges,\n\t\tvertexes: vertexes,\n\t}\n}\n\n\/\/ graphPruneView is a filter of sorts that path finding routines should\n\/\/ consult during the execution. Any edges or vertexes within the view should\n\/\/ be ignored during path finding. The contents of the view reflect the current\n\/\/ state of the wider network from the PoV of mission control compiled via HTLC\n\/\/ routing attempts in the past.\ntype graphPruneView struct {\n\tedges map[uint64]struct{}\n\n\tvertexes map[vertex]struct{}\n}\n\n\/\/ ResetHistory resets the history of missionControl returning it to a state as\n\/\/ if no payment attempts have been made.\nfunc (m *missionControl) ResetHistory() {\n\tm.Lock()\n\tm.failedEdges = make(map[uint64]time.Time)\n\tm.failedVertexes = make(map[vertex]time.Time)\n\tm.Unlock()\n}\n<|endoftext|>"} {"text":"<commit_before>\/* https:\/\/leetcode.com\/problems\/perfect-squares\/description\/\nGiven a positive integer n, find the least number of perfect square numbers (for example, 1, 4, 9, 16, ...) which sum to n.\n\nFor example, given n = 12, return 3 because 12 = 4 + 4 + 4; given n = 13, return 2 because 13 = 4 + 9.\n*\/\n\npackage leetcode\n\nfunc numSquares(n int) int {\n\tdp := make([]int, n+1)\n\tfor i := 1; i*i <= n; i++ {\n\t\tdp[i*i] = 1\n\t}\n\n\tfor i := 1; i <= n; i++ {\n\t\tfor j := 1; i+j*j <= n; j++ {\n\t\t\tif dp[i+j*j] == 0 || dp[i+j*j] > dp[i]+1 {\n\t\t\t\tdp[i+j*j] = dp[i] + 1\n\t\t\t}\n\t\t}\n\t}\n\treturn dp[n]\n}\n<commit_msg>add math solution<commit_after>\/* https:\/\/leetcode.com\/problems\/perfect-squares\/description\/\nGiven a positive integer n, find the least number of perfect square numbers (for example, 1, 4, 9, 16, ...) which sum to n.\n\nFor example, given n = 12, return 3 because 12 = 4 + 4 + 4; given n = 13, return 2 because 13 = 4 + 9.\n*\/\n\npackage leetcode\n\n\/*\nfunc numSquares(n int) int {\n\tdp := make([]int, n+1)\n\tfor i := 1; i*i <= n; i++ {\n\t\tdp[i*i] = 1\n\t}\n\n\tfor i := 1; i <= n; i++ {\n\t\tfor j := 1; i+j*j <= n; j++ {\n\t\t\tif dp[i+j*j] == 0 || dp[i+j*j] > dp[i]+1 {\n\t\t\t\tdp[i+j*j] = dp[i] + 1\n\t\t\t}\n\t\t}\n\t}\n\treturn dp[n]\n}\n*\/\n\nimport \"math\"\n\nfunc numSquares(n int) int {\n\tfor n%4 == 0 {\n\t\tn \/= 4\n\t}\n\n\tif n%8 == 7 {\n\t\treturn 4\n\t}\n\n\tfor i := 0; i*i <= n; i++ {\n\t\tj := int(math.Sqrt(float64(n - i*i)))\n\t\tif i*i+j*j == n {\n\t\t\tres := 0\n\t\t\tif i > 0 {\n\t\t\t\tres++\n\t\t\t}\n\t\t\tif j > 0 {\n\t\t\t\tres++\n\t\t\t}\n\t\t\treturn res\n\t\t}\n\t}\n\treturn 3\n}\n<|endoftext|>"} {"text":"<commit_before>package vault\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/go-uuid\"\n\tcredUserpass \"github.com\/hashicorp\/vault\/builtin\/credential\/userpass\"\n\t\"github.com\/hashicorp\/vault\/logical\"\n)\n\nfunc TestRequestHandling_Wrapping(t *testing.T) {\n\tcore, _, root := TestCoreUnsealed(t)\n\n\tcore.logicalBackends[\"generic\"] = PassthroughBackendFactory\n\n\tmeUUID, _ := uuid.GenerateUUID()\n\terr := core.mount(&MountEntry{\n\t\tTable: mountTableType,\n\t\tUUID: meUUID,\n\t\tPath: \"wraptest\",\n\t\tType: \"generic\",\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\n\t\/\/ No duration specified\n\treq := &logical.Request{\n\t\tPath: \"wraptest\/foo\",\n\t\tClientToken: root,\n\t\tOperation: logical.UpdateOperation,\n\t\tData: map[string]interface{}{\n\t\t\t\"zip\": \"zap\",\n\t\t},\n\t}\n\tresp, err := core.HandleRequest(req)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif resp != nil {\n\t\tt.Fatalf(\"bad: %#v\", resp)\n\t}\n\n\treq = &logical.Request{\n\t\tPath: \"wraptest\/foo\",\n\t\tClientToken: root,\n\t\tOperation: logical.ReadOperation,\n\t\tWrapTTL: time.Duration(15 * time.Second),\n\t}\n\tresp, err = core.HandleRequest(req)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif resp == nil {\n\t\tt.Fatalf(\"bad: %v\", resp)\n\t}\n\tif resp.WrapInfo == nil || resp.WrapInfo.TTL != time.Duration(15*time.Second) {\n\t\tt.Fatalf(\"bad: %#v\", resp)\n\t}\n}\n\nfunc TestRequestHandling_LoginWrapping(t *testing.T) {\n\tcore, _, root := TestCoreUnsealed(t)\n\n\tif err := core.loadMounts(); err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\n\tcore.credentialBackends[\"userpass\"] = credUserpass.Factory\n\n\t\/\/ No duration specified\n\treq := &logical.Request{\n\t\tPath: \"sys\/auth\/userpass\",\n\t\tClientToken: root,\n\t\tOperation: logical.UpdateOperation,\n\t\tData: map[string]interface{}{\n\t\t\t\"type\": \"userpass\",\n\t\t},\n\t}\n\tresp, err := core.HandleRequest(req)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif resp != nil {\n\t\tt.Fatalf(\"bad: %#v\", resp)\n\t}\n\n\treq.Path = \"auth\/userpass\/users\/test\"\n\treq.Data = map[string]interface{}{\n\t\t\"password\": \"foo\",\n\t\t\"policies\": \"default\",\n\t}\n\tresp, err = core.HandleRequest(req)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif resp != nil {\n\t\tt.Fatalf(\"bad: %#v\", resp)\n\t}\n\n\treq = &logical.Request{\n\t\tPath: \"auth\/userpass\/login\/test\",\n\t\tOperation: logical.UpdateOperation,\n\t\tWrapTTL: time.Duration(15 * time.Second),\n\t\tData: map[string]interface{}{\n\t\t\t\"password\": \"foo\",\n\t\t},\n\t}\n\tresp, err = core.HandleRequest(req)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif resp == nil {\n\t\tt.Fatalf(\"bad: %v\", resp)\n\t}\n\tif resp.WrapInfo == nil || resp.WrapInfo.TTL != time.Duration(15*time.Second) {\n\t\tt.Fatalf(\"bad: %#v\", resp)\n\t}\n}\n<commit_msg>Add non-wrapped step<commit_after>package vault\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/go-uuid\"\n\tcredUserpass \"github.com\/hashicorp\/vault\/builtin\/credential\/userpass\"\n\t\"github.com\/hashicorp\/vault\/logical\"\n)\n\nfunc TestRequestHandling_Wrapping(t *testing.T) {\n\tcore, _, root := TestCoreUnsealed(t)\n\n\tcore.logicalBackends[\"generic\"] = PassthroughBackendFactory\n\n\tmeUUID, _ := uuid.GenerateUUID()\n\terr := core.mount(&MountEntry{\n\t\tTable: mountTableType,\n\t\tUUID: meUUID,\n\t\tPath: \"wraptest\",\n\t\tType: \"generic\",\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\n\t\/\/ No duration specified\n\treq := &logical.Request{\n\t\tPath: \"wraptest\/foo\",\n\t\tClientToken: root,\n\t\tOperation: logical.UpdateOperation,\n\t\tData: map[string]interface{}{\n\t\t\t\"zip\": \"zap\",\n\t\t},\n\t}\n\tresp, err := core.HandleRequest(req)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif resp != nil {\n\t\tt.Fatalf(\"bad: %#v\", resp)\n\t}\n\n\treq = &logical.Request{\n\t\tPath: \"wraptest\/foo\",\n\t\tClientToken: root,\n\t\tOperation: logical.ReadOperation,\n\t\tWrapTTL: time.Duration(15 * time.Second),\n\t}\n\tresp, err = core.HandleRequest(req)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif resp == nil {\n\t\tt.Fatalf(\"bad: %v\", resp)\n\t}\n\tif resp.WrapInfo == nil || resp.WrapInfo.TTL != time.Duration(15*time.Second) {\n\t\tt.Fatalf(\"bad: %#v\", resp)\n\t}\n}\n\nfunc TestRequestHandling_LoginWrapping(t *testing.T) {\n\tcore, _, root := TestCoreUnsealed(t)\n\n\tif err := core.loadMounts(); err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\n\tcore.credentialBackends[\"userpass\"] = credUserpass.Factory\n\n\t\/\/ No duration specified\n\treq := &logical.Request{\n\t\tPath: \"sys\/auth\/userpass\",\n\t\tClientToken: root,\n\t\tOperation: logical.UpdateOperation,\n\t\tData: map[string]interface{}{\n\t\t\t\"type\": \"userpass\",\n\t\t},\n\t}\n\tresp, err := core.HandleRequest(req)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif resp != nil {\n\t\tt.Fatalf(\"bad: %#v\", resp)\n\t}\n\n\treq.Path = \"auth\/userpass\/users\/test\"\n\treq.Data = map[string]interface{}{\n\t\t\"password\": \"foo\",\n\t\t\"policies\": \"default\",\n\t}\n\tresp, err = core.HandleRequest(req)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif resp != nil {\n\t\tt.Fatalf(\"bad: %#v\", resp)\n\t}\n\n\treq = &logical.Request{\n\t\tPath: \"auth\/userpass\/login\/test\",\n\t\tWrapTTL: time.Duration(15 * time.Second),\n\t\tData: map[string]interface{}{\n\t\t\t\"password\": \"foo\",\n\t\t},\n\t}\n\tresp, err = core.HandleRequest(req)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif resp == nil {\n\t\tt.Fatalf(\"bad: %v\", resp)\n\t}\n\tif resp.WrapInfo != nil {\n\t\tt.Fatalf(\"bad: %#v\", resp)\n\t}\n\n\treq = &logical.Request{\n\t\tPath: \"auth\/userpass\/login\/test\",\n\t\tOperation: logical.UpdateOperation,\n\t\tWrapTTL: time.Duration(15 * time.Second),\n\t\tData: map[string]interface{}{\n\t\t\t\"password\": \"foo\",\n\t\t},\n\t}\n\tresp, err = core.HandleRequest(req)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif resp == nil {\n\t\tt.Fatalf(\"bad: %v\", resp)\n\t}\n\tif resp.WrapInfo == nil || resp.WrapInfo.TTL != time.Duration(15*time.Second) {\n\t\tt.Fatalf(\"bad: %#v\", resp)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package telebot\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"hash\/fnv\"\n\t\"strconv\"\n\n\t\"github.com\/mitchellh\/hashstructure\"\n)\n\n\/\/ inlineQueryHashOptions sets the HashOptions to be used when hashing\n\/\/ an inline query result (used to generate IDs).\nvar inlineQueryHashOptions = &hashstructure.HashOptions{\n\tHasher: fnv.New64(),\n}\n\n\/\/ Query is an incoming inline query. When the user sends\n\/\/ an empty query, your bot could return some default or\n\/\/ trending results.\ntype Query struct {\n\t\/\/ Unique identifier for this query.\n\tID string `json:\"id\"`\n\n\t\/\/ Sender.\n\tFrom User `json:\"from\"`\n\n\t\/\/ (Optional) Sender location, only for bots that request user location.\n\tLocation Location `json:\"location\"`\n\n\t\/\/ Text of the query (up to 512 characters).\n\tText string `json:\"query\"`\n\n\t\/\/ Offset of the results to be returned, can be controlled by the bot.\n\tOffset string `json:\"offset\"`\n}\n\n\/\/ QueryResponse builds a response to an inline Query.\n\/\/ See also: https:\/\/core.telegram.org\/bots\/api#answerinlinequery\ntype QueryResponse struct {\n\t\/\/ The ID of the query to which this is a response.\n\t\/\/ It is not necessary to specify this field manually.\n\tQueryID string `json:\"inline_query_id\"`\n\n\t\/\/ The results for the inline query.\n\tResults InlineQueryResults `json:\"results\"`\n\n\t\/\/ (Optional) The maximum amount of time in seconds that the result\n\t\/\/ of the inline query may be cached on the server.\n\tCacheTime int `json:\"cache_time,omitempty\"`\n\n\t\/\/ (Optional) Pass True, if results may be cached on the server side\n\t\/\/ only for the user that sent the query. By default, results may\n\t\/\/ be returned to any user who sends the same query.\n\tIsPersonal bool `json:\"is_personal\"`\n\n\t\/\/ (Optional) Pass the offset that a client should send in the next\n\t\/\/ query with the same text to receive more results. Pass an empty\n\t\/\/ string if there are no more results or if you don‘t support\n\t\/\/ pagination. Offset length can’t exceed 64 bytes.\n\tNextOffset string `json:\"next_offset\"`\n\n\t\/\/ (Optional) If passed, clients will display a button with specified\n\t\/\/ text that switches the user to a private chat with the bot and sends\n\t\/\/ the bot a start message with the parameter switch_pm_parameter.\n\tSwitchPMText string `json:\"switch_pm_text,omitempty\"`\n\n\t\/\/ (Optional) Parameter for the start message sent to the bot when user\n\t\/\/ presses the switch button.\n\tSwitchPMParameter string `json:\"switch_pm_parameter,omitempty\"`\n}\n\n\/\/ InlineQueryResult represents one result of an inline query.\ntype InlineQueryResult interface {\n\tGetID() string\n\tSetID(string)\n}\n\n\/\/ InlineQueryResults is a slice wrapper for convenient marshalling.\ntype InlineQueryResults []InlineQueryResult\n\n\/\/ MarshalJSON makes sure IQRs have proper IDs and Type variables set.\n\/\/\n\/\/ If ID of some result appears empty, it gets set to a new hash.\n\/\/ JSON-specific Type gets infered from the actual (specific) IQR type.\nfunc (results *InlineQueryResults) MarshalJSON() ([]byte, error) {\n\tfor i, result := range *results {\n\t\tif result.GetID() == \"\" {\n\t\t\thash, err := hashstructure.Hash(result, inlineQueryHashOptions)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"telebot: can't hash IQR #%d: %s\",\n\t\t\t\t\ti, err)\n\t\t\t}\n\n\t\t\tresult.SetID(strconv.FormatUint(hash, 16))\n\t\t}\n\n\t\tif err := infereIQR(result); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"telebot: can't infere type of IQR #%d: %s\",\n\t\t\t\ti, err)\n\t\t}\n\t}\n\n\treturn json.Marshal([]InlineQueryResult(*results))\n}\n\nfunc infereIQR(result InlineQueryResult) error {\n\tswitch r := result.(type) {\n\tcase *InlineQueryResultArticle:\n\t\tr.Type = \"article\"\n\tcase *InlineQueryResultAudio:\n\t\tr.Type = \"audio\"\n\tcase *InlineQueryResultContact:\n\t\tr.Type = \"contact\"\n\tcase *InlineQueryResultDocument:\n\t\tr.Type = \"document\"\n\tcase *InlineQueryResultGif:\n\t\tr.Type = \"gif\"\n\tcase *InlineQueryResultLocation:\n\t\tr.Type = \"location\"\n\tcase *InlineQueryResultMpeg4Gif:\n\t\tr.Type = \"mpeg4_gif\"\n\tcase *InlineQueryResultPhoto:\n\t\tr.Type = \"photo\"\n\tcase *InlineQueryResultVenue:\n\t\tr.Type = \"venue\"\n\tcase *InlineQueryResultVideo:\n\t\tr.Type = \"video\"\n\tcase *InlineQueryResultVoice:\n\t\tr.Type = \"voice\"\n\tdefault:\n\t\treturn fmt.Errorf(\"%T is not an IQR\", result)\n\t}\n\n\treturn nil\n}\n\n\/\/ Result is a deprecated type, superseded by InlineQueryResult.\ntype Result interface {\n\tMarshalJSON() ([]byte, error)\n}\n<commit_msg>Typo infereIQR -> inferIQR.<commit_after>package telebot\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"hash\/fnv\"\n\t\"strconv\"\n\n\t\"github.com\/mitchellh\/hashstructure\"\n)\n\n\/\/ inlineQueryHashOptions sets the HashOptions to be used when hashing\n\/\/ an inline query result (used to generate IDs).\nvar inlineQueryHashOptions = &hashstructure.HashOptions{\n\tHasher: fnv.New64(),\n}\n\n\/\/ Query is an incoming inline query. When the user sends\n\/\/ an empty query, your bot could return some default or\n\/\/ trending results.\ntype Query struct {\n\t\/\/ Unique identifier for this query.\n\tID string `json:\"id\"`\n\n\t\/\/ Sender.\n\tFrom User `json:\"from\"`\n\n\t\/\/ (Optional) Sender location, only for bots that request user location.\n\tLocation Location `json:\"location\"`\n\n\t\/\/ Text of the query (up to 512 characters).\n\tText string `json:\"query\"`\n\n\t\/\/ Offset of the results to be returned, can be controlled by the bot.\n\tOffset string `json:\"offset\"`\n}\n\n\/\/ QueryResponse builds a response to an inline Query.\n\/\/ See also: https:\/\/core.telegram.org\/bots\/api#answerinlinequery\ntype QueryResponse struct {\n\t\/\/ The ID of the query to which this is a response.\n\t\/\/ It is not necessary to specify this field manually.\n\tQueryID string `json:\"inline_query_id\"`\n\n\t\/\/ The results for the inline query.\n\tResults InlineQueryResults `json:\"results\"`\n\n\t\/\/ (Optional) The maximum amount of time in seconds that the result\n\t\/\/ of the inline query may be cached on the server.\n\tCacheTime int `json:\"cache_time,omitempty\"`\n\n\t\/\/ (Optional) Pass True, if results may be cached on the server side\n\t\/\/ only for the user that sent the query. By default, results may\n\t\/\/ be returned to any user who sends the same query.\n\tIsPersonal bool `json:\"is_personal\"`\n\n\t\/\/ (Optional) Pass the offset that a client should send in the next\n\t\/\/ query with the same text to receive more results. Pass an empty\n\t\/\/ string if there are no more results or if you don‘t support\n\t\/\/ pagination. Offset length can’t exceed 64 bytes.\n\tNextOffset string `json:\"next_offset\"`\n\n\t\/\/ (Optional) If passed, clients will display a button with specified\n\t\/\/ text that switches the user to a private chat with the bot and sends\n\t\/\/ the bot a start message with the parameter switch_pm_parameter.\n\tSwitchPMText string `json:\"switch_pm_text,omitempty\"`\n\n\t\/\/ (Optional) Parameter for the start message sent to the bot when user\n\t\/\/ presses the switch button.\n\tSwitchPMParameter string `json:\"switch_pm_parameter,omitempty\"`\n}\n\n\/\/ InlineQueryResult represents one result of an inline query.\ntype InlineQueryResult interface {\n\tGetID() string\n\tSetID(string)\n}\n\n\/\/ InlineQueryResults is a slice wrapper for convenient marshalling.\ntype InlineQueryResults []InlineQueryResult\n\n\/\/ MarshalJSON makes sure IQRs have proper IDs and Type variables set.\n\/\/\n\/\/ If ID of some result appears empty, it gets set to a new hash.\n\/\/ JSON-specific Type gets infered from the actual (specific) IQR type.\nfunc (results *InlineQueryResults) MarshalJSON() ([]byte, error) {\n\tfor i, result := range *results {\n\t\tif result.GetID() == \"\" {\n\t\t\thash, err := hashstructure.Hash(result, inlineQueryHashOptions)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"telebot: can't hash IQR #%d: %s\",\n\t\t\t\t\ti, err)\n\t\t\t}\n\n\t\t\tresult.SetID(strconv.FormatUint(hash, 16))\n\t\t}\n\n\t\tif err := inferIQR(result); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"telebot: can't infer type of IQR #%d: %s\",\n\t\t\t\ti, err)\n\t\t}\n\t}\n\n\treturn json.Marshal([]InlineQueryResult(*results))\n}\n\nfunc inferIQR(result InlineQueryResult) error {\n\tswitch r := result.(type) {\n\tcase *InlineQueryResultArticle:\n\t\tr.Type = \"article\"\n\tcase *InlineQueryResultAudio:\n\t\tr.Type = \"audio\"\n\tcase *InlineQueryResultContact:\n\t\tr.Type = \"contact\"\n\tcase *InlineQueryResultDocument:\n\t\tr.Type = \"document\"\n\tcase *InlineQueryResultGif:\n\t\tr.Type = \"gif\"\n\tcase *InlineQueryResultLocation:\n\t\tr.Type = \"location\"\n\tcase *InlineQueryResultMpeg4Gif:\n\t\tr.Type = \"mpeg4_gif\"\n\tcase *InlineQueryResultPhoto:\n\t\tr.Type = \"photo\"\n\tcase *InlineQueryResultVenue:\n\t\tr.Type = \"venue\"\n\tcase *InlineQueryResultVideo:\n\t\tr.Type = \"video\"\n\tcase *InlineQueryResultVoice:\n\t\tr.Type = \"voice\"\n\tdefault:\n\t\treturn fmt.Errorf(\"%T is not an IQR\", result)\n\t}\n\n\treturn nil\n}\n\n\/\/ Result is a deprecated type, superseded by InlineQueryResult.\ntype Result interface {\n\tMarshalJSON() ([]byte, error)\n}\n<|endoftext|>"} {"text":"<commit_before>package sqlkungfu\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"database\/sql\"\n)\n\ntype (\n\t\/\/ InsertNull bool\n\tTableName string\n)\n\nfunc Insert(db *sql.DB, v interface{}, cfgs ...interface{}) (string, sql.Result, error) {\n\treturn DefaultMaster.Insert(db, v, cfgs...)\n}\n\n\/\/ INSERT INTO table_name (column1,column2,column3,...)\n\/\/ VALUES (value1,value2,value3,...);\nfunc (m Master) Insert(db *sql.DB, v interface{}, cfgs ...interface{}) (insert string, r sql.Result, err error) {\n\trv := indirect(reflect.ValueOf(v))\n\n\tidField, fields, holders, values, table, err := m.retrieveValues(db, rv, cfgs...)\n\tif err != nil {\n\t\treturn\n\t}\n\tinsert = fmt.Sprintf(\"INSERT INTO %s (%s) VALUES (%s)\", table, strings.Join(fields, \",\"), strings.Join(holders, \",\"))\n\n\tif r, err = db.Exec(insert, values...); err != nil {\n\t\treturn\n\t}\n\tif idField.IsValid() && idField.CanSet() || rv.Kind() == reflect.Map {\n\t\tvar id int64\n\t\tif id, err = r.LastInsertId(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif rv.Kind() == reflect.Map {\n\t\t\trv.SetMapIndex(reflect.ValueOf(\"id\"), reflect.ValueOf(id))\n\t\t} else {\n\t\t\tidField.Set(reflect.ValueOf(id).Convert(idField.Type()))\n\t\t}\n\t}\n\treturn\n}\n\nfunc (m Master) retrieveValues(db *sql.DB, rv reflect.Value, cfgs ...interface{}) (idField reflect.Value, fields []string, holders []string, values []interface{}, table string, err error) {\n\ttable = strings.ToLower(rv.Type().Name()) + \"s\"\n\t\/\/ var insertNull bool\n\tfor _, c := range cfgs {\n\t\tswitch c.(type) {\n\t\t\/\/ case InsertNull:\n\t\t\/\/ \tinsertNull = true\n\t\tcase TableName:\n\t\t\ttable = string(c.(TableName))\n\t\t}\n\t}\n\n\t\/\/ _ = insertNull\n\tswitch rv.Kind() {\n\tcase reflect.Struct:\n\t\tidField, fields, holders, values, err = m.walkStruct(rv)\n\tcase reflect.Map:\n\t\tif rv.Type().Key().Kind() != reflect.String {\n\t\t\terr = fmt.Errorf(\"sqlkungfu: Insert(map key is %s, want string)\", rv.Type().Key().Kind())\n\t\t\treturn\n\t\t}\n\t\tfor _, k := range rv.MapKeys() {\n\t\t\tfields = append(fields, m.quoteColumn(k.String()))\n\t\t\tholders = append(holders, \"?\")\n\t\t\tvalues = append(values, rv.MapIndex(k).Interface())\n\t\t}\n\t}\n\treturn\n}\n\nfunc (m Master) walkStruct(rv reflect.Value) (idField reflect.Value, fields []string, holders []string, values []interface{}, err error) {\n\trvt := rv.Type()\n\tnum := rv.NumField()\n\tfor i := 0; i < num; i++ {\n\t\tst := rvt.FieldByIndex([]int{i})\n\t\toptions := strings.Split(st.Tag.Get(m.TagKey), \",\")\n\t\tfor i, o := range options {\n\t\t\toptions[i] = strings.TrimSpace(o)\n\t\t}\n\t\tname := options[0]\n\t\tif name == \"-\" {\n\t\t\tcontinue\n\t\t} else if name == \"\" {\n\t\t\tname = strings.ToLower(st.Name)\n\t\t}\n\n\t\toptions = options[1:]\n\t\tfield := rv.FieldByIndex([]int{i})\n\t\tftype := indirectT(st.Type).Kind()\n\t\tif ftype == reflect.Struct && (st.Anonymous || optionsContain(options, \"inline\")) {\n\t\t\tid, f, h, v, e := m.walkStruct(indirect(field))\n\t\t\tif e != nil {\n\t\t\t\terr = e\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif id.IsValid() && !idField.IsValid() {\n\t\t\t\tidField = id\n\t\t\t}\n\n\t\t\tfields = append(fields, f...)\n\t\t\tholders = append(holders, h...)\n\t\t\tvalues = append(values, v...)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch ftype {\n\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\tif optionsContain(options, \"id\") || strings.ToLower(st.Name) == \"id\" {\n\t\t\t\tif f := indirect(field); f.Convert(reflect.TypeOf(0)).Int() == 0 {\n\t\t\t\t\tidField = f\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Struct, reflect.Array, reflect.Map, reflect.Slice:\n\t\t\tcontinue\n\t\t}\n\n\t\tvalues = append(values, field.Interface())\n\t\tfields = append(fields, m.quoteColumn(name))\n\t\tholders = append(holders, \"?\")\n\t}\n\n\treturn\n}\n\nfunc optionsContain(options []string, tag string) bool {\n\tfor _, o := range options {\n\t\tif o == tag {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc Update(db *sql.DB, v interface{}, cfgs ...interface{}) (string, sql.Result, error) {\n\treturn DefaultMaster.Update(db, v, cfgs...)\n}\n\n\/\/ UPDATE table_name\n\/\/ SET column1=value1,column2=value2,...\nfunc (m Master) Update(db *sql.DB, v interface{}, cfgs ...interface{}) (update string, r sql.Result, err error) {\n\trv := indirect(reflect.ValueOf(v))\n\t_, fields, _, values, table, err := m.retrieveValues(db, rv, cfgs...)\n\tif err != nil {\n\t\treturn\n\t}\n\tvar changer []string\n\tfor _, f := range fields {\n\t\tchanger = append(changer, fmt.Sprintf(\"%s=?\", f))\n\t}\n\tupdate = fmt.Sprintf(\"UPDATE %s SET %s\", table, strings.Join(changer, \",\"))\n\n\tif r, err = db.Exec(update, values...); err != nil {\n\t\treturn\n\t}\n\treturn\n}\n<commit_msg>insert: handle nil pointer values and time.Time<commit_after>package sqlkungfu\n\nimport (\n\t\"time\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"database\/sql\"\n)\n\ntype (\n\t\/\/ InsertNull bool\n\tTableName string\n)\n\nfunc Insert(db *sql.DB, v interface{}, cfgs ...interface{}) (string, sql.Result, error) {\n\treturn DefaultMaster.Insert(db, v, cfgs...)\n}\n\n\/\/ INSERT INTO table_name (column1,column2,column3,...)\n\/\/ VALUES (value1,value2,value3,...);\nfunc (m Master) Insert(db *sql.DB, v interface{}, cfgs ...interface{}) (insert string, r sql.Result, err error) {\n\trv := indirect(reflect.ValueOf(v))\n\n\tidField, fields, holders, values, table, err := m.retrieveValues(db, rv, cfgs...)\n\tif err != nil {\n\t\treturn\n\t}\n\tinsert = fmt.Sprintf(\"INSERT INTO %s (%s) VALUES (%s)\", table, strings.Join(fields, \",\"), strings.Join(holders, \",\"))\n\n\tif r, err = db.Exec(insert, values...); err != nil {\n\t\treturn\n\t}\n\tif idField.IsValid() && idField.CanSet() || rv.Kind() == reflect.Map {\n\t\tvar id int64\n\t\tif id, err = r.LastInsertId(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif rv.Kind() == reflect.Map {\n\t\t\trv.SetMapIndex(reflect.ValueOf(\"id\"), reflect.ValueOf(id))\n\t\t} else {\n\t\t\tidField.Set(reflect.ValueOf(id).Convert(idField.Type()))\n\t\t}\n\t}\n\treturn\n}\n\nfunc (m Master) retrieveValues(db *sql.DB, rv reflect.Value, cfgs ...interface{}) (idField reflect.Value, fields []string, holders []string, values []interface{}, table string, err error) {\n\ttable = strings.ToLower(rv.Type().Name()) + \"s\"\n\t\/\/ var insertNull bool\n\tfor _, c := range cfgs {\n\t\tswitch c.(type) {\n\t\t\/\/ case InsertNull:\n\t\t\/\/ \tinsertNull = true\n\t\tcase TableName:\n\t\t\ttable = string(c.(TableName))\n\t\t}\n\t}\n\n\t\/\/ _ = insertNull\n\tswitch rv.Kind() {\n\tcase reflect.Struct:\n\t\tidField, fields, holders, values, err = m.walkStruct(rv)\n\tcase reflect.Map:\n\t\tif rv.Type().Key().Kind() != reflect.String {\n\t\t\terr = fmt.Errorf(\"sqlkungfu: Insert(map key is %s, want string)\", rv.Type().Key().Kind())\n\t\t\treturn\n\t\t}\n\t\tfor _, k := range rv.MapKeys() {\n\t\t\tfields = append(fields, m.quoteColumn(k.String()))\n\t\t\tholders = append(holders, \"?\")\n\t\t\tvalues = append(values, rv.MapIndex(k).Interface())\n\t\t}\n\t}\n\treturn\n}\n\nfunc (m Master) walkStruct(rv reflect.Value) (idField reflect.Value, fields []string, holders []string, values []interface{}, err error) {\n\trvt := rv.Type()\n\tnum := rv.NumField()\n\tfor i := 0; i < num; i++ {\n\t\tst := rvt.FieldByIndex([]int{i})\n\t\toptions := strings.Split(st.Tag.Get(m.TagKey), \",\")\n\t\tfor i, o := range options {\n\t\t\toptions[i] = strings.TrimSpace(o)\n\t\t}\n\t\tname := options[0]\n\t\tif name == \"-\" {\n\t\t\tcontinue\n\t\t} else if name == \"\" {\n\t\t\tname = strings.ToLower(st.Name)\n\t\t}\n\n\t\toptions = options[1:]\n\t\tfield := rv.FieldByIndex([]int{i})\n\t\tif field.Kind() == reflect.Ptr && field.IsNil() {\n\t\t\tcontinue\n\t\t}\n\t\tftype := indirectT(st.Type)\n\t\tkind := ftype.Kind()\n\t\tif kind == reflect.Struct && (st.Anonymous || optionsContain(options, \"inline\")) {\n\t\t\tid, f, h, v, e := m.walkStruct(indirect(field))\n\t\t\tif e != nil {\n\t\t\t\terr = e\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif id.IsValid() && !idField.IsValid() {\n\t\t\t\tidField = id\n\t\t\t}\n\n\t\t\tfields = append(fields, f...)\n\t\t\tholders = append(holders, h...)\n\t\t\tvalues = append(values, v...)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch kind {\n\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\tif optionsContain(options, \"id\") || strings.ToLower(st.Name) == \"id\" {\n\t\t\t\tif f := indirect(field); f.Convert(reflect.TypeOf(0)).Int() == 0 {\n\t\t\t\t\tidField = f\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Struct:\n\t\t\tif !ftype.ConvertibleTo(reflect.TypeOf(time.Time{})) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase reflect.Array, reflect.Map, reflect.Slice:\n\t\t\tcontinue\n\t\t}\n\n\t\tvalues = append(values, field.Interface())\n\t\tfields = append(fields, m.quoteColumn(name))\n\t\tholders = append(holders, \"?\")\n\t}\n\n\treturn\n}\n\nfunc optionsContain(options []string, tag string) bool {\n\tfor _, o := range options {\n\t\tif o == tag {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc Update(db *sql.DB, v interface{}, cfgs ...interface{}) (string, sql.Result, error) {\n\treturn DefaultMaster.Update(db, v, cfgs...)\n}\n\n\/\/ UPDATE table_name\n\/\/ SET column1=value1,column2=value2,...\nfunc (m Master) Update(db *sql.DB, v interface{}, cfgs ...interface{}) (update string, r sql.Result, err error) {\n\trv := indirect(reflect.ValueOf(v))\n\t_, fields, _, values, table, err := m.retrieveValues(db, rv, cfgs...)\n\tif err != nil {\n\t\treturn\n\t}\n\tvar changer []string\n\tfor _, f := range fields {\n\t\tchanger = append(changer, fmt.Sprintf(\"%s=?\", f))\n\t}\n\tupdate = fmt.Sprintf(\"UPDATE %s SET %s\", table, strings.Join(changer, \",\"))\n\n\tif r, err = db.Exec(update, values...); err != nil {\n\t\treturn\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2013 Steve Francia <spf@spf13.com>.\n\/\/\n\/\/ Licensed under the Simple Public License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/opensource.org\/licenses\/Simple-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage hugolib\n\nimport (\n\t\"sort\"\n\n\t\"github.com\/spf13\/hugo\/helpers\"\n)\n\n\/*\n * An taxonomy list is a list of all taxonomies and their values\n * EG. List['tags'] => TagTaxonomy (from above)\n *\/\ntype TaxonomyList map[string]Taxonomy\n\n\/*\n * An taxonomy is a map of keywords to a list of pages.\n * For example\n * TagTaxonomy['technology'] = WeightedPages\n * TagTaxonomy['go'] = WeightedPages2\n *\/\ntype Taxonomy map[string]WeightedPages\n\n\/*\n * A list of Pages with their corresponding (and relative) weight\n * [{Weight: 30, Page: *1}, {Weight: 40, Page: *2}]\n *\/\ntype WeightedPages []WeightedPage\ntype WeightedPage struct {\n\tWeight int\n\tPage *Page\n}\n\n\/*\n * This is another representation of an Taxonomy using an array rather than a map.\n * Important because you can't order a map.\n *\/\ntype OrderedTaxonomy []OrderedTaxonomyEntry\n\n\/*\n * Similar to an element of an Taxonomy, but with the key embedded (as name)\n * Eg: {Name: Technology, WeightedPages: Taxonomyedpages}\n *\/\ntype OrderedTaxonomyEntry struct {\n\tName string\n\tWeightedPages WeightedPages\n}\n\n\/\/ KeyPrep... Taxonomies should be case insensitive. Can make it easily conditional later.\nfunc kp(in string) string {\n\treturn helpers.MakePathToLower(in)\n}\n\nfunc (i Taxonomy) Get(key string) WeightedPages { return i[kp(key)] }\nfunc (i Taxonomy) Count(key string) int { return len(i[kp(key)]) }\nfunc (i Taxonomy) Add(key string, w WeightedPage) {\n\tkey = kp(key)\n\ti[key] = append(i[key], w)\n}\n\n\/\/ Returns an ordered taxonomy with a non defined order\nfunc (i Taxonomy) TaxonomyArray() OrderedTaxonomy {\n\ties := make([]OrderedTaxonomyEntry, len(i))\n\tcount := 0\n\tfor k, v := range i {\n\t\ties[count] = OrderedTaxonomyEntry{Name: k, WeightedPages: v}\n\t\tcount++\n\t}\n\treturn ies\n}\n\n\/\/ Returns an ordered taxonomy sorted by key name\nfunc (i Taxonomy) Alphabetical() OrderedTaxonomy {\n\tname := func(i1, i2 *OrderedTaxonomyEntry) bool {\n\t\treturn i1.Name < i2.Name\n\t}\n\n\tia := i.TaxonomyArray()\n\tOIby(name).Sort(ia)\n\treturn ia\n}\n\n\/\/ Returns an ordered taxonomy sorted by # of pages per key\nfunc (i Taxonomy) ByCount() OrderedTaxonomy {\n\tcount := func(i1, i2 *OrderedTaxonomyEntry) bool {\n\t\treturn len(i1.WeightedPages) > len(i2.WeightedPages)\n\t}\n\n\tia := i.TaxonomyArray()\n\tOIby(count).Sort(ia)\n\treturn ia\n}\n\n\/\/ Helper to move the page access up a level\nfunc (ie OrderedTaxonomyEntry) Pages() Pages {\n\treturn ie.WeightedPages.Pages()\n}\n\nfunc (ie OrderedTaxonomyEntry) Count() int {\n\treturn len(ie.WeightedPages)\n}\n\n\/*\n * Implementation of a custom sorter for OrderedTaxonomies\n *\/\n\n\/\/ A type to implement the sort interface for TaxonomyEntries.\ntype orderedTaxonomySorter struct {\n\ttaxonomy OrderedTaxonomy\n\tby OIby\n}\n\n\/\/ Closure used in the Sort.Less method.\ntype OIby func(i1, i2 *OrderedTaxonomyEntry) bool\n\nfunc (by OIby) Sort(taxonomy OrderedTaxonomy) {\n\tps := &orderedTaxonomySorter{\n\t\ttaxonomy: taxonomy,\n\t\tby: by, \/\/ The Sort method's receiver is the function (closure) that defines the sort order.\n\t}\n\tsort.Sort(ps)\n}\n\n\/\/ Len is part of sort.Interface.\nfunc (s *orderedTaxonomySorter) Len() int {\n\treturn len(s.taxonomy)\n}\n\n\/\/ Swap is part of sort.Interface.\nfunc (s *orderedTaxonomySorter) Swap(i, j int) {\n\ts.taxonomy[i], s.taxonomy[j] = s.taxonomy[j], s.taxonomy[i]\n}\n\n\/\/ Less is part of sort.Interface. It is implemented by calling the \"by\" closure in the sorter.\nfunc (s *orderedTaxonomySorter) Less(i, j int) bool {\n\treturn s.by(&s.taxonomy[i], &s.taxonomy[j])\n}\n\nfunc (wp WeightedPages) Pages() Pages {\n\tpages := make(Pages, len(wp))\n\tfor i := range wp {\n\t\tpages[i] = wp[i].Page\n\t}\n\treturn pages\n}\n\nfunc (p WeightedPages) Len() int { return len(p) }\nfunc (p WeightedPages) Swap(i, j int) { p[i], p[j] = p[j], p[i] }\nfunc (p WeightedPages) Sort() { sort.Sort(p) }\nfunc (p WeightedPages) Count() int { return len(p) }\nfunc (p WeightedPages) Less(i, j int) bool {\n\tif p[i].Weight == p[j].Weight {\n\t\treturn p[i].Page.Date.Unix() > p[j].Page.Date.Unix()\n\t} else {\n\t\treturn p[i].Weight < p[j].Weight\n\t}\n}\n\n\/\/ TODO mimic PagesSorter for WeightedPages\n<commit_msg>Making the term \"Term\" more consistent with Taxonomy usage.<commit_after>\/\/ Copyright © 2013 Steve Francia <spf@spf13.com>.\n\/\/\n\/\/ Licensed under the Simple Public License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/opensource.org\/licenses\/Simple-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage hugolib\n\nimport (\n\t\"sort\"\n\n\t\"github.com\/spf13\/hugo\/helpers\"\n)\n\n\/*\n * An taxonomy list is a list of all taxonomies and their values\n * EG. List['tags'] => TagTaxonomy (from above)\n *\/\ntype TaxonomyList map[string]Taxonomy\n\n\/*\n * An taxonomy is a map of keywords to a list of pages.\n * For example\n * TagTaxonomy['technology'] = WeightedPages\n * TagTaxonomy['go'] = WeightedPages2\n *\/\ntype Taxonomy map[string]WeightedPages\n\n\/*\n * A list of Pages with their corresponding (and relative) weight\n * [{Weight: 30, Page: *1}, {Weight: 40, Page: *2}]\n *\/\ntype WeightedPages []WeightedPage\ntype WeightedPage struct {\n\tWeight int\n\tPage *Page\n}\n\n\/*\n * This is another representation of an Taxonomy using an array rather than a map.\n * Important because you can't order a map.\n *\/\ntype OrderedTaxonomy []OrderedTaxonomyEntry\n\n\/*\n * Similar to an element of an Taxonomy, but with the key embedded (as name)\n * Eg: {Name: Technology, WeightedPages: Taxonomyedpages}\n *\/\ntype OrderedTaxonomyEntry struct {\n\tName string\n\tWeightedPages WeightedPages\n}\n\n\/\/ KeyPrep... Taxonomies should be case insensitive. Can make it easily conditional later.\nfunc kp(in string) string {\n\treturn helpers.MakePathToLower(in)\n}\n\nfunc (i Taxonomy) Get(key string) WeightedPages { return i[kp(key)] }\nfunc (i Taxonomy) Count(key string) int { return len(i[kp(key)]) }\nfunc (i Taxonomy) Add(key string, w WeightedPage) {\n\tkey = kp(key)\n\ti[key] = append(i[key], w)\n}\n\n\/\/ Returns an ordered taxonomy with a non defined order\nfunc (i Taxonomy) TaxonomyArray() OrderedTaxonomy {\n\ties := make([]OrderedTaxonomyEntry, len(i))\n\tcount := 0\n\tfor k, v := range i {\n\t\ties[count] = OrderedTaxonomyEntry{Name: k, WeightedPages: v}\n\t\tcount++\n\t}\n\treturn ies\n}\n\n\/\/ Returns an ordered taxonomy sorted by key name\nfunc (i Taxonomy) Alphabetical() OrderedTaxonomy {\n\tname := func(i1, i2 *OrderedTaxonomyEntry) bool {\n\t\treturn i1.Name < i2.Name\n\t}\n\n\tia := i.TaxonomyArray()\n\tOIby(name).Sort(ia)\n\treturn ia\n}\n\n\/\/ Returns an ordered taxonomy sorted by # of pages per key\nfunc (i Taxonomy) ByCount() OrderedTaxonomy {\n\tcount := func(i1, i2 *OrderedTaxonomyEntry) bool {\n\t\treturn len(i1.WeightedPages) > len(i2.WeightedPages)\n\t}\n\n\tia := i.TaxonomyArray()\n\tOIby(count).Sort(ia)\n\treturn ia\n}\n\n\/\/ Helper to move the page access up a level\nfunc (ie OrderedTaxonomyEntry) Pages() Pages {\n\treturn ie.WeightedPages.Pages()\n}\n\nfunc (ie OrderedTaxonomyEntry) Count() int {\n\treturn len(ie.WeightedPages)\n}\n\nfunc (ie OrderedTaxonomyEntry) Term() string {\n\treturn ie.Name\n}\n\n\/*\n * Implementation of a custom sorter for OrderedTaxonomies\n *\/\n\n\/\/ A type to implement the sort interface for TaxonomyEntries.\ntype orderedTaxonomySorter struct {\n\ttaxonomy OrderedTaxonomy\n\tby OIby\n}\n\n\/\/ Closure used in the Sort.Less method.\ntype OIby func(i1, i2 *OrderedTaxonomyEntry) bool\n\nfunc (by OIby) Sort(taxonomy OrderedTaxonomy) {\n\tps := &orderedTaxonomySorter{\n\t\ttaxonomy: taxonomy,\n\t\tby: by, \/\/ The Sort method's receiver is the function (closure) that defines the sort order.\n\t}\n\tsort.Sort(ps)\n}\n\n\/\/ Len is part of sort.Interface.\nfunc (s *orderedTaxonomySorter) Len() int {\n\treturn len(s.taxonomy)\n}\n\n\/\/ Swap is part of sort.Interface.\nfunc (s *orderedTaxonomySorter) Swap(i, j int) {\n\ts.taxonomy[i], s.taxonomy[j] = s.taxonomy[j], s.taxonomy[i]\n}\n\n\/\/ Less is part of sort.Interface. It is implemented by calling the \"by\" closure in the sorter.\nfunc (s *orderedTaxonomySorter) Less(i, j int) bool {\n\treturn s.by(&s.taxonomy[i], &s.taxonomy[j])\n}\n\nfunc (wp WeightedPages) Pages() Pages {\n\tpages := make(Pages, len(wp))\n\tfor i := range wp {\n\t\tpages[i] = wp[i].Page\n\t}\n\treturn pages\n}\n\nfunc (p WeightedPages) Len() int { return len(p) }\nfunc (p WeightedPages) Swap(i, j int) { p[i], p[j] = p[j], p[i] }\nfunc (p WeightedPages) Sort() { sort.Sort(p) }\nfunc (p WeightedPages) Count() int { return len(p) }\nfunc (p WeightedPages) Less(i, j int) bool {\n\tif p[i].Weight == p[j].Weight {\n\t\treturn p[i].Page.Date.Unix() > p[j].Page.Date.Unix()\n\t} else {\n\t\treturn p[i].Weight < p[j].Weight\n\t}\n}\n\n\/\/ TODO mimic PagesSorter for WeightedPages\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016 Intel Corporation\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"github.com\/01org\/ciao\/payloads\"\n\t\"github.com\/01org\/ciao\/ssntp\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc testHTTPRequest(t *testing.T, method string, URL string, expectedResponse int, data []byte) []byte {\n\treq, err := http.NewRequest(method, URL, bytes.NewBuffer(data))\n\treq.Header.Set(\"X-Auth-Token\", \"imavalidtoken\")\n\tif data != nil {\n\t\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\t}\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != expectedResponse {\n\t\tvar msg string\n\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err == nil {\n\t\t\tmsg = string(body)\n\t\t}\n\n\t\tt.Fatalf(\"expected: %d, got: %d, msg: %s\", expectedResponse, resp.StatusCode, msg)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treturn body\n}\n\nfunc testCreateServer(t *testing.T, n int) payloads.ComputeServers {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ get a valid workload ID\n\twls, err := context.ds.GetWorkloads()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif len(wls) == 0 {\n\t\tt.Fatal(\"No valid workloads\")\n\t}\n\n\turl := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/servers\"\n\n\tvar server payloads.ComputeCreateServer\n\tserver.Server.MaxInstances = n\n\tserver.Server.Workload = wls[0].ID\n\n\tb, err := json.Marshal(server)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tbody := testHTTPRequest(t, \"POST\", url, http.StatusAccepted, b)\n\n\tvar servers payloads.ComputeServers\n\n\terr = json.Unmarshal(body, &servers)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif servers.TotalServers != n {\n\t\tt.Fatal(\"Not enough servers returned\")\n\t}\n\n\treturn servers\n}\n\nfunc testListServerDetailsTenant(t *testing.T, tenantID string) payloads.ComputeServers {\n\turl := computeURL + \"\/v2.1\/\" + tenantID + \"\/servers\/detail\"\n\n\tbody := testHTTPRequest(t, \"GET\", url, http.StatusOK, nil)\n\n\tvar s payloads.ComputeServers\n\terr := json.Unmarshal(body, &s)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treturn s\n}\n\nfunc TestCreateSingleServer(t *testing.T) {\n\t_ = testCreateServer(t, 1)\n}\n\nfunc TestListServerDetailsTenant(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tservers := testCreateServer(t, 1)\n\tif servers.TotalServers != 1 {\n\t\tt.Fatal(err)\n\t}\n\n\ts := testListServerDetailsTenant(t, tenant.ID)\n\n\tif s.TotalServers < 1 {\n\t\tt.Fatal(\"Not enough servers returned\")\n\t}\n}\n\nfunc TestListServerDetailsWorkload(t *testing.T) {\n\t\/\/ get a valid workload ID\n\twls, err := context.ds.GetWorkloads()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif len(wls) == 0 {\n\t\tt.Fatal(\"No valid workloads\")\n\t}\n\n\tservers := testCreateServer(t, 10)\n\tif servers.TotalServers != 10 {\n\t\tt.Fatal(\"failed to create enough servers\")\n\t}\n\n\turl := computeURL + \"\/v2.1\/flavors\/\" + wls[0].ID + \"\/servers\/detail\"\n\n\tbody := testHTTPRequest(t, \"GET\", url, http.StatusOK, nil)\n\n\tvar s payloads.ComputeServers\n\terr = json.Unmarshal(body, &s)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif s.TotalServers < 10 {\n\t\tt.Fatal(\"Did not return correct number of servers\")\n\t}\n}\n\nfunc TestShowServerDetails(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttURL := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/servers\/\"\n\n\tservers := testCreateServer(t, 1)\n\tif servers.TotalServers != 1 {\n\t\tt.Fatal(err)\n\t}\n\n\ts := testListServerDetailsTenant(t, tenant.ID)\n\n\tif s.TotalServers < 1 {\n\t\tt.Fatal(\"Not enough servers returned\")\n\t}\n\n\tfor _, s1 := range s.Servers {\n\t\turl := tURL + s1.ID\n\n\t\tbody := testHTTPRequest(t, \"GET\", url, http.StatusOK, nil)\n\n\t\tvar s2 payloads.ComputeServer\n\t\terr = json.Unmarshal(body, &s2)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif reflect.DeepEqual(s1, s2.Server) == false {\n\t\t\tt.Fatal(\"Server details not correct\")\n\t\t}\n\t}\n}\n\nfunc TestDeleteServer(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ instances have to be assigned to a node to be deleted\n\tclient := newTestClient(0, ssntp.AGENT)\n\tdefer client.ssntp.Close()\n\n\ttURL := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/servers\/\"\n\n\tservers := testCreateServer(t, 10)\n\tif servers.TotalServers != 10 {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(2 * time.Second)\n\n\tclient.sendStats()\n\n\ts := testListServerDetailsTenant(t, tenant.ID)\n\n\tif s.TotalServers < 1 {\n\t\tt.Fatal(\"Not enough servers returned\")\n\t}\n\n\tfor _, s1 := range s.Servers {\n\t\turl := tURL + s1.ID\n\t\tif s1.HostID != \"\" {\n\t\t\t_ = testHTTPRequest(t, \"DELETE\", url, http.StatusAccepted, nil)\n\t\t} else {\n\t\t\t_ = testHTTPRequest(t, \"DELETE\", url, http.StatusInternalServerError, nil)\n\t\t}\n\n\t}\n}\n\nfunc TestServersActionStart(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\turl := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/servers\/action\"\n\n\tclient := newTestClient(0, ssntp.AGENT)\n\tdefer client.ssntp.Close()\n\n\tservers := testCreateServer(t, 1)\n\tif servers.TotalServers != 1 {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(2 * time.Second)\n\n\tclient.sendStats()\n\n\ttime.Sleep(1 * time.Second)\n\n\terr = context.stopInstance(servers.Servers[0].ID)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(1 * time.Second)\n\tclient.sendStats()\n\n\tvar ids []string\n\tids = append(ids, servers.Servers[0].ID)\n\n\tcmd := payloads.CiaoServersAction{\n\t\tAction: \"os-start\",\n\t\tServerIDs: ids,\n\t}\n\n\tb, err := json.Marshal(cmd)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_ = testHTTPRequest(t, \"POST\", url, http.StatusAccepted, b)\n}\n\nfunc TestServersActionStop(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\turl := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/servers\/action\"\n\n\tclient := newTestClient(0, ssntp.AGENT)\n\tdefer client.ssntp.Close()\n\n\tservers := testCreateServer(t, 1)\n\tif servers.TotalServers != 1 {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(2 * time.Second)\n\n\tclient.sendStats()\n\n\ttime.Sleep(1 * time.Second)\n\n\tvar ids []string\n\tids = append(ids, servers.Servers[0].ID)\n\n\tcmd := payloads.CiaoServersAction{\n\t\tAction: \"os-stop\",\n\t\tServerIDs: ids,\n\t}\n\n\tb, err := json.Marshal(cmd)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_ = testHTTPRequest(t, \"POST\", url, http.StatusAccepted, b)\n}\n\nfunc TestListFlavors(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\turl := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/flavors\"\n\n\twls, err := context.ds.GetWorkloads()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tbody := testHTTPRequest(t, \"GET\", url, http.StatusOK, nil)\n\n\tvar flavors payloads.ComputeFlavors\n\terr = json.Unmarshal(body, &flavors)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif len(flavors.Flavors) != len(wls) {\n\t\tt.Fatal(\"Incorrect number of flavors returned\")\n\t}\n\n\tvar matched int\n\n\tfor _, f := range flavors.Flavors {\n\t\tfor _, w := range wls {\n\t\t\tif w.ID == f.ID && w.Description == f.Name {\n\t\t\t\tmatched++\n\t\t\t}\n\t\t}\n\t}\n\n\tif matched != len(wls) {\n\t\tt.Fatal(\"Flavor information didn't match workload information\")\n\t}\n}\n\nfunc TestShowFlavorDetails(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttURL := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/flavors\/\"\n\n\twls, err := context.ds.GetWorkloads()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, w := range wls {\n\t\tdetails := payloads.FlavorDetails{\n\t\t\tOsFlavorAccessIsPublic: true,\n\t\t\tID: w.ID,\n\t\t\tDisk: w.ImageID,\n\t\t\tName: w.Description,\n\t\t}\n\n\t\tdefaults := w.Defaults\n\t\tfor r := range defaults {\n\t\t\tswitch defaults[r].Type {\n\t\t\tcase payloads.VCPUs:\n\t\t\t\tdetails.Vcpus = defaults[r].Value\n\t\t\tcase payloads.MemMB:\n\t\t\t\tdetails.RAM = defaults[r].Value\n\t\t\t}\n\t\t}\n\n\t\turl := tURL + w.ID\n\t\tbody := testHTTPRequest(t, \"GET\", url, http.StatusOK, nil)\n\n\t\tvar f payloads.ComputeFlavorDetails\n\n\t\terr = json.Unmarshal(body, &f)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif reflect.DeepEqual(details, f.Flavor) == false {\n\t\t\tt.Fatal(\"Flavor details not correct\")\n\t\t}\n\t}\n}\n\nfunc TestListTenantResources(t *testing.T) {\n\tvar usage payloads.CiaoUsageHistory\n\n\tendTime := time.Now()\n\tstartTime := endTime.Add(-15 * time.Minute)\n\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttURL := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/resources?\"\n\n\tusage.Usages, err = context.ds.GetTenantUsage(tenant.ID, startTime, endTime)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tv := url.Values{}\n\tv.Add(\"start_date\", startTime.Format(time.RFC3339))\n\tv.Add(\"end_date\", endTime.Format(time.RFC3339))\n\n\ttURL += v.Encode()\n\n\tbody := testHTTPRequest(t, \"GET\", tURL, http.StatusOK, nil)\n\n\tvar result payloads.CiaoUsageHistory\n\n\terr = json.Unmarshal(body, &result)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif reflect.DeepEqual(usage, result) == false {\n\t\tt.Fatal(\"Tenant usage not correct\")\n\t}\n}\n\nfunc TestListTenantQuotas(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\turl := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/quotas\"\n\n\tvar expected payloads.CiaoTenantResources\n\n\tfor _, resource := range tenant.Resources {\n\t\tswitch resource.Rtype {\n\t\tcase instances:\n\t\t\texpected.InstanceLimit = resource.Limit\n\t\t\texpected.InstanceUsage = resource.Usage\n\n\t\tcase vcpu:\n\t\t\texpected.VCPULimit = resource.Limit\n\t\t\texpected.VCPUUsage = resource.Usage\n\n\t\tcase memory:\n\t\t\texpected.MemLimit = resource.Limit\n\t\t\texpected.MemUsage = resource.Usage\n\n\t\tcase disk:\n\t\t\texpected.DiskLimit = resource.Limit\n\t\t\texpected.DiskUsage = resource.Usage\n\t\t}\n\t}\n\n\texpected.ID = tenant.ID\n\n\tbody := testHTTPRequest(t, \"GET\", url, http.StatusOK, nil)\n\n\tvar result payloads.CiaoTenantResources\n\n\terr = json.Unmarshal(body, &result)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texpected.Timestamp = result.Timestamp\n\n\tif reflect.DeepEqual(expected, result) == false {\n\t\tt.Fatal(\"Tenant quotas not correct\")\n\t}\n}\n<commit_msg>ciao-controller: compute_test: add unit test for listEvents by tenant<commit_after>\/\/ Copyright (c) 2016 Intel Corporation\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"github.com\/01org\/ciao\/payloads\"\n\t\"github.com\/01org\/ciao\/ssntp\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc testHTTPRequest(t *testing.T, method string, URL string, expectedResponse int, data []byte) []byte {\n\treq, err := http.NewRequest(method, URL, bytes.NewBuffer(data))\n\treq.Header.Set(\"X-Auth-Token\", \"imavalidtoken\")\n\tif data != nil {\n\t\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\t}\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != expectedResponse {\n\t\tvar msg string\n\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err == nil {\n\t\t\tmsg = string(body)\n\t\t}\n\n\t\tt.Fatalf(\"expected: %d, got: %d, msg: %s\", expectedResponse, resp.StatusCode, msg)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treturn body\n}\n\nfunc testCreateServer(t *testing.T, n int) payloads.ComputeServers {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ get a valid workload ID\n\twls, err := context.ds.GetWorkloads()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif len(wls) == 0 {\n\t\tt.Fatal(\"No valid workloads\")\n\t}\n\n\turl := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/servers\"\n\n\tvar server payloads.ComputeCreateServer\n\tserver.Server.MaxInstances = n\n\tserver.Server.Workload = wls[0].ID\n\n\tb, err := json.Marshal(server)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tbody := testHTTPRequest(t, \"POST\", url, http.StatusAccepted, b)\n\n\tvar servers payloads.ComputeServers\n\n\terr = json.Unmarshal(body, &servers)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif servers.TotalServers != n {\n\t\tt.Fatal(\"Not enough servers returned\")\n\t}\n\n\treturn servers\n}\n\nfunc testListServerDetailsTenant(t *testing.T, tenantID string) payloads.ComputeServers {\n\turl := computeURL + \"\/v2.1\/\" + tenantID + \"\/servers\/detail\"\n\n\tbody := testHTTPRequest(t, \"GET\", url, http.StatusOK, nil)\n\n\tvar s payloads.ComputeServers\n\terr := json.Unmarshal(body, &s)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treturn s\n}\n\nfunc TestCreateSingleServer(t *testing.T) {\n\t_ = testCreateServer(t, 1)\n}\n\nfunc TestListServerDetailsTenant(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tservers := testCreateServer(t, 1)\n\tif servers.TotalServers != 1 {\n\t\tt.Fatal(err)\n\t}\n\n\ts := testListServerDetailsTenant(t, tenant.ID)\n\n\tif s.TotalServers < 1 {\n\t\tt.Fatal(\"Not enough servers returned\")\n\t}\n}\n\nfunc TestListServerDetailsWorkload(t *testing.T) {\n\t\/\/ get a valid workload ID\n\twls, err := context.ds.GetWorkloads()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif len(wls) == 0 {\n\t\tt.Fatal(\"No valid workloads\")\n\t}\n\n\tservers := testCreateServer(t, 10)\n\tif servers.TotalServers != 10 {\n\t\tt.Fatal(\"failed to create enough servers\")\n\t}\n\n\turl := computeURL + \"\/v2.1\/flavors\/\" + wls[0].ID + \"\/servers\/detail\"\n\n\tbody := testHTTPRequest(t, \"GET\", url, http.StatusOK, nil)\n\n\tvar s payloads.ComputeServers\n\terr = json.Unmarshal(body, &s)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif s.TotalServers < 10 {\n\t\tt.Fatal(\"Did not return correct number of servers\")\n\t}\n}\n\nfunc TestShowServerDetails(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttURL := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/servers\/\"\n\n\tservers := testCreateServer(t, 1)\n\tif servers.TotalServers != 1 {\n\t\tt.Fatal(err)\n\t}\n\n\ts := testListServerDetailsTenant(t, tenant.ID)\n\n\tif s.TotalServers < 1 {\n\t\tt.Fatal(\"Not enough servers returned\")\n\t}\n\n\tfor _, s1 := range s.Servers {\n\t\turl := tURL + s1.ID\n\n\t\tbody := testHTTPRequest(t, \"GET\", url, http.StatusOK, nil)\n\n\t\tvar s2 payloads.ComputeServer\n\t\terr = json.Unmarshal(body, &s2)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif reflect.DeepEqual(s1, s2.Server) == false {\n\t\t\tt.Fatal(\"Server details not correct\")\n\t\t}\n\t}\n}\n\nfunc TestDeleteServer(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ instances have to be assigned to a node to be deleted\n\tclient := newTestClient(0, ssntp.AGENT)\n\tdefer client.ssntp.Close()\n\n\ttURL := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/servers\/\"\n\n\tservers := testCreateServer(t, 10)\n\tif servers.TotalServers != 10 {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(2 * time.Second)\n\n\tclient.sendStats()\n\n\ts := testListServerDetailsTenant(t, tenant.ID)\n\n\tif s.TotalServers < 1 {\n\t\tt.Fatal(\"Not enough servers returned\")\n\t}\n\n\tfor _, s1 := range s.Servers {\n\t\turl := tURL + s1.ID\n\t\tif s1.HostID != \"\" {\n\t\t\t_ = testHTTPRequest(t, \"DELETE\", url, http.StatusAccepted, nil)\n\t\t} else {\n\t\t\t_ = testHTTPRequest(t, \"DELETE\", url, http.StatusInternalServerError, nil)\n\t\t}\n\n\t}\n}\n\nfunc TestServersActionStart(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\turl := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/servers\/action\"\n\n\tclient := newTestClient(0, ssntp.AGENT)\n\tdefer client.ssntp.Close()\n\n\tservers := testCreateServer(t, 1)\n\tif servers.TotalServers != 1 {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(2 * time.Second)\n\n\tclient.sendStats()\n\n\ttime.Sleep(1 * time.Second)\n\n\terr = context.stopInstance(servers.Servers[0].ID)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(1 * time.Second)\n\tclient.sendStats()\n\n\tvar ids []string\n\tids = append(ids, servers.Servers[0].ID)\n\n\tcmd := payloads.CiaoServersAction{\n\t\tAction: \"os-start\",\n\t\tServerIDs: ids,\n\t}\n\n\tb, err := json.Marshal(cmd)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_ = testHTTPRequest(t, \"POST\", url, http.StatusAccepted, b)\n}\n\nfunc TestServersActionStop(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\turl := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/servers\/action\"\n\n\tclient := newTestClient(0, ssntp.AGENT)\n\tdefer client.ssntp.Close()\n\n\tservers := testCreateServer(t, 1)\n\tif servers.TotalServers != 1 {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(2 * time.Second)\n\n\tclient.sendStats()\n\n\ttime.Sleep(1 * time.Second)\n\n\tvar ids []string\n\tids = append(ids, servers.Servers[0].ID)\n\n\tcmd := payloads.CiaoServersAction{\n\t\tAction: \"os-stop\",\n\t\tServerIDs: ids,\n\t}\n\n\tb, err := json.Marshal(cmd)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_ = testHTTPRequest(t, \"POST\", url, http.StatusAccepted, b)\n}\n\nfunc TestListFlavors(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\turl := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/flavors\"\n\n\twls, err := context.ds.GetWorkloads()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tbody := testHTTPRequest(t, \"GET\", url, http.StatusOK, nil)\n\n\tvar flavors payloads.ComputeFlavors\n\terr = json.Unmarshal(body, &flavors)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif len(flavors.Flavors) != len(wls) {\n\t\tt.Fatal(\"Incorrect number of flavors returned\")\n\t}\n\n\tvar matched int\n\n\tfor _, f := range flavors.Flavors {\n\t\tfor _, w := range wls {\n\t\t\tif w.ID == f.ID && w.Description == f.Name {\n\t\t\t\tmatched++\n\t\t\t}\n\t\t}\n\t}\n\n\tif matched != len(wls) {\n\t\tt.Fatal(\"Flavor information didn't match workload information\")\n\t}\n}\n\nfunc TestShowFlavorDetails(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttURL := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/flavors\/\"\n\n\twls, err := context.ds.GetWorkloads()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, w := range wls {\n\t\tdetails := payloads.FlavorDetails{\n\t\t\tOsFlavorAccessIsPublic: true,\n\t\t\tID: w.ID,\n\t\t\tDisk: w.ImageID,\n\t\t\tName: w.Description,\n\t\t}\n\n\t\tdefaults := w.Defaults\n\t\tfor r := range defaults {\n\t\t\tswitch defaults[r].Type {\n\t\t\tcase payloads.VCPUs:\n\t\t\t\tdetails.Vcpus = defaults[r].Value\n\t\t\tcase payloads.MemMB:\n\t\t\t\tdetails.RAM = defaults[r].Value\n\t\t\t}\n\t\t}\n\n\t\turl := tURL + w.ID\n\t\tbody := testHTTPRequest(t, \"GET\", url, http.StatusOK, nil)\n\n\t\tvar f payloads.ComputeFlavorDetails\n\n\t\terr = json.Unmarshal(body, &f)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif reflect.DeepEqual(details, f.Flavor) == false {\n\t\t\tt.Fatal(\"Flavor details not correct\")\n\t\t}\n\t}\n}\n\nfunc TestListTenantResources(t *testing.T) {\n\tvar usage payloads.CiaoUsageHistory\n\n\tendTime := time.Now()\n\tstartTime := endTime.Add(-15 * time.Minute)\n\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttURL := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/resources?\"\n\n\tusage.Usages, err = context.ds.GetTenantUsage(tenant.ID, startTime, endTime)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tv := url.Values{}\n\tv.Add(\"start_date\", startTime.Format(time.RFC3339))\n\tv.Add(\"end_date\", endTime.Format(time.RFC3339))\n\n\ttURL += v.Encode()\n\n\tbody := testHTTPRequest(t, \"GET\", tURL, http.StatusOK, nil)\n\n\tvar result payloads.CiaoUsageHistory\n\n\terr = json.Unmarshal(body, &result)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif reflect.DeepEqual(usage, result) == false {\n\t\tt.Fatal(\"Tenant usage not correct\")\n\t}\n}\n\nfunc TestListTenantQuotas(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\turl := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/quotas\"\n\n\tvar expected payloads.CiaoTenantResources\n\n\tfor _, resource := range tenant.Resources {\n\t\tswitch resource.Rtype {\n\t\tcase instances:\n\t\t\texpected.InstanceLimit = resource.Limit\n\t\t\texpected.InstanceUsage = resource.Usage\n\n\t\tcase vcpu:\n\t\t\texpected.VCPULimit = resource.Limit\n\t\t\texpected.VCPUUsage = resource.Usage\n\n\t\tcase memory:\n\t\t\texpected.MemLimit = resource.Limit\n\t\t\texpected.MemUsage = resource.Usage\n\n\t\tcase disk:\n\t\t\texpected.DiskLimit = resource.Limit\n\t\t\texpected.DiskUsage = resource.Usage\n\t\t}\n\t}\n\n\texpected.ID = tenant.ID\n\n\tbody := testHTTPRequest(t, \"GET\", url, http.StatusOK, nil)\n\n\tvar result payloads.CiaoTenantResources\n\n\terr = json.Unmarshal(body, &result)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texpected.Timestamp = result.Timestamp\n\n\tif reflect.DeepEqual(expected, result) == false {\n\t\tt.Fatal(\"Tenant quotas not correct\")\n\t}\n}\n\nfunc TestListEventsTenant(t *testing.T) {\n\ttenant, err := context.ds.GetTenant(computeTestUser)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\turl := computeURL + \"\/v2.1\/\" + tenant.ID + \"\/events\"\n\n\tvar expected payloads.CiaoEvents\n\n\tlogs, err := context.ds.GetEventLog()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, l := range logs {\n\t\tif tenant.ID != l.TenantID {\n\t\t\tcontinue\n\t\t}\n\n\t\tevent := payloads.CiaoEvent{\n\t\t\tTimestamp: l.Timestamp,\n\t\t\tTenantId: l.TenantID,\n\t\t\tEventType: l.EventType,\n\t\t\tMessage: l.Message,\n\t\t}\n\t\texpected.Events = append(expected.Events, event)\n\t}\n\n\tbody := testHTTPRequest(t, \"GET\", url, http.StatusOK, nil)\n\n\tvar result payloads.CiaoEvents\n\n\terr = json.Unmarshal(body, &result)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif reflect.DeepEqual(expected, result) == false {\n\t\tt.Fatal(\"Tenant events not correct\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright ©2015 The Gonum Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage lapack\n\nimport \"gonum.org\/v1\/gonum\/blas\"\n\nconst None = 'N'\n\ntype Comp byte\n\n\/\/ Complex128 defines the public complex128 LAPACK API supported by gonum\/lapack.\ntype Complex128 interface{}\n\n\/\/ Float64 defines the public float64 LAPACK API supported by gonum\/lapack.\ntype Float64 interface {\n\tDgecon(norm MatrixNorm, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64\n\tDgeev(jobvl LeftEVJob, jobvr RightEVJob, n int, a []float64, lda int, wr, wi []float64, vl []float64, ldvl int, vr []float64, ldvr int, work []float64, lwork int) (first int)\n\tDgels(trans blas.Transpose, m, n, nrhs int, a []float64, lda int, b []float64, ldb int, work []float64, lwork int) bool\n\tDgelqf(m, n int, a []float64, lda int, tau, work []float64, lwork int)\n\tDgeqrf(m, n int, a []float64, lda int, tau, work []float64, lwork int)\n\tDgesvd(jobU, jobVT SVDJob, m, n int, a []float64, lda int, s, u []float64, ldu int, vt []float64, ldvt int, work []float64, lwork int) (ok bool)\n\tDgetrf(m, n int, a []float64, lda int, ipiv []int) (ok bool)\n\tDgetri(n int, a []float64, lda int, ipiv []int, work []float64, lwork int) (ok bool)\n\tDgetrs(trans blas.Transpose, n, nrhs int, a []float64, lda int, ipiv []int, b []float64, ldb int)\n\tDggsvd3(jobU, jobV, jobQ GSVDJob, m, n, p int, a []float64, lda int, b []float64, ldb int, alpha, beta, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, work []float64, lwork int, iwork []int) (k, l int, ok bool)\n\tDlantr(norm MatrixNorm, uplo blas.Uplo, diag blas.Diag, m, n int, a []float64, lda int, work []float64) float64\n\tDlange(norm MatrixNorm, m, n int, a []float64, lda int, work []float64) float64\n\tDlansy(norm MatrixNorm, uplo blas.Uplo, n int, a []float64, lda int, work []float64) float64\n\tDlapmt(forward bool, m, n int, x []float64, ldx int, k []int)\n\tDormqr(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int)\n\tDormlq(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int)\n\tDpocon(uplo blas.Uplo, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64\n\tDpotrf(ul blas.Uplo, n int, a []float64, lda int) (ok bool)\n\tDpotrs(ul blas.Uplo, n, nrhs int, a []float64, lda int, b []float64, ldb int)\n\tDsyev(jobz EVJob, uplo blas.Uplo, n int, a []float64, lda int, w, work []float64, lwork int) (ok bool)\n\tDtrcon(norm MatrixNorm, uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int, work []float64, iwork []int) float64\n\tDtrtri(uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int) (ok bool)\n\tDtrtrs(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, nrhs int, a []float64, lda int, b []float64, ldb int) (ok bool)\n}\n\n\/\/ Direct specifies the direction of the multiplication for the Householder matrix.\ntype Direct byte\n\nconst (\n\tForward Direct = 'F' \/\/ Reflectors are right-multiplied, H_0 * H_1 * ... * H_{k-1}.\n\tBackward Direct = 'B' \/\/ Reflectors are left-multiplied, H_{k-1} * ... * H_1 * H_0.\n)\n\n\/\/ Sort is the sorting order.\ntype Sort byte\n\nconst (\n\tSortIncreasing Sort = 'I'\n\tSortDecreasing Sort = 'D'\n)\n\n\/\/ StoreV indicates the storage direction of elementary reflectors.\ntype StoreV byte\n\nconst (\n\tColumnWise StoreV = 'C' \/\/ Reflector stored in a column of the matrix.\n\tRowWise StoreV = 'R' \/\/ Reflector stored in a row of the matrix.\n)\n\n\/\/ MatrixNorm represents the kind of matrix norm to compute.\ntype MatrixNorm byte\n\nconst (\n\tMaxAbs MatrixNorm = 'M' \/\/ max(abs(A(i,j))) ('M')\n\tMaxColumnSum MatrixNorm = 'O' \/\/ Maximum column sum (one norm) ('1', 'O')\n\tMaxRowSum MatrixNorm = 'I' \/\/ Maximum row sum (infinity norm) ('I', 'i')\n\tNormFrob MatrixNorm = 'F' \/\/ Frobenius norm (sqrt of sum of squares) ('F', 'f', E, 'e')\n)\n\n\/\/ MatrixType represents the kind of matrix represented in the data.\ntype MatrixType byte\n\nconst (\n\tGeneral MatrixType = 'G' \/\/ A dense matrix (like blas64.General).\n\tUpperTri MatrixType = 'U' \/\/ An upper triangular matrix.\n\tLowerTri MatrixType = 'L' \/\/ A lower triangular matrix.\n)\n\n\/\/ Pivot specifies the pivot type for plane rotations\ntype Pivot byte\n\nconst (\n\tVariable Pivot = 'V'\n\tTop Pivot = 'T'\n\tBottom Pivot = 'B'\n)\n\ntype DecompUpdate byte\n\nconst (\n\tApplyP DecompUpdate = 'P'\n\tApplyQ DecompUpdate = 'Q'\n)\n\n\/\/ SVDJob specifies the singular vector computation type for SVD.\ntype SVDJob byte\n\nconst (\n\tSVDAll SVDJob = 'A' \/\/ Compute all singular vectors\n\tSVDInPlace SVDJob = 'S' \/\/ Compute the first singular vectors and store them in provided storage.\n\tSVDOverwrite SVDJob = 'O' \/\/ Compute the singular vectors and store them in input matrix\n\tSVDNone SVDJob = 'N' \/\/ Do not compute singular vectors\n)\n\n\/\/ GSVDJob specifies the singular vector computation type for Generalized SVD.\ntype GSVDJob byte\n\nconst (\n\tGSVDU GSVDJob = 'U' \/\/ Compute orthogonal matrix U\n\tGSVDV GSVDJob = 'V' \/\/ Compute orthogonal matrix V\n\tGSVDQ GSVDJob = 'Q' \/\/ Compute orthogonal matrix Q\n\tGSVDUnit GSVDJob = 'I' \/\/ Use unit-initialized matrix\n\tGSVDNone GSVDJob = 'N' \/\/ Do not compute orthogonal matrix\n)\n\n\/\/ EVComp specifies how eigenvectors are computed.\ntype EVComp byte\n\nconst (\n\t\/\/ OriginalEV specifies to compute the eigenvectors of the original\n\t\/\/ matrix.\n\tOriginalEV EVComp = 'V'\n\t\/\/ TridiagEV specifies to compute both the eigenvectors of the input\n\t\/\/ tridiagonal matrix.\n\tTridiagEV EVComp = 'I'\n\t\/\/ HessEV specifies to compute both the eigenvectors of the input upper\n\t\/\/ Hessenberg matrix.\n\tHessEV EVComp = 'I'\n\n\t\/\/ UpdateSchur specifies that the matrix of Schur vectors will be\n\t\/\/ updated by Dtrexc.\n\tUpdateSchur EVComp = 'V'\n)\n\n\/\/ Job types for computation of eigenvectors.\ntype (\n\tEVJob byte\n\tLeftEVJob byte\n\tRightEVJob byte\n)\n\n\/\/ Job constants for computation of eigenvectors.\nconst (\n\tComputeEV EVJob = 'V' \/\/ Compute eigenvectors in Dsyev.\n\tComputeLeftEV LeftEVJob = 'V' \/\/ Compute left eigenvectors.\n\tComputeRightEV RightEVJob = 'V' \/\/ Compute right eigenvectors.\n)\n\n\/\/ BalanceJob specifies matrix balancing operation.\ntype BalanceJob byte\n\nconst (\n\tPermute BalanceJob = 'P'\n\tScale BalanceJob = 'S'\n\tPermuteScale BalanceJob = 'B'\n)\n\n\/\/ Job constants for Dhseqr.\nconst (\n\tEigenvaluesOnly EVJob = 'E'\n\tEigenvaluesAndSchur EVJob = 'S'\n)\n\n\/\/ EVSide specifies what eigenvectors will be computed.\ntype EVSide byte\n\n\/\/ EVSide constants for Dtrevc3.\nconst (\n\tRightEV EVSide = 'R' \/\/ Compute right eigenvectors only.\n\tLeftEV EVSide = 'L' \/\/ Compute left eigenvectors only.\n\tRightLeftEV EVSide = 'B' \/\/ Compute both right and left eigenvectors.\n)\n\n\/\/ HowMany specifies which eigenvectors will be computed.\ntype HowMany byte\n\n\/\/ HowMany constants for Dhseqr.\nconst (\n\tAllEV HowMany = 'A' \/\/ Compute all right and\/or left eigenvectors.\n\tAllEVMulQ HowMany = 'B' \/\/ Compute all right and\/or left eigenvectors multiplied by an input matrix.\n\tSelectedEV HowMany = 'S' \/\/ Compute selected right and\/or left eigenvectors.\n)\n<commit_msg>lapack: remove Comp type<commit_after>\/\/ Copyright ©2015 The Gonum Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage lapack\n\nimport \"gonum.org\/v1\/gonum\/blas\"\n\nconst None = 'N'\n\n\/\/ Complex128 defines the public complex128 LAPACK API supported by gonum\/lapack.\ntype Complex128 interface{}\n\n\/\/ Float64 defines the public float64 LAPACK API supported by gonum\/lapack.\ntype Float64 interface {\n\tDgecon(norm MatrixNorm, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64\n\tDgeev(jobvl LeftEVJob, jobvr RightEVJob, n int, a []float64, lda int, wr, wi []float64, vl []float64, ldvl int, vr []float64, ldvr int, work []float64, lwork int) (first int)\n\tDgels(trans blas.Transpose, m, n, nrhs int, a []float64, lda int, b []float64, ldb int, work []float64, lwork int) bool\n\tDgelqf(m, n int, a []float64, lda int, tau, work []float64, lwork int)\n\tDgeqrf(m, n int, a []float64, lda int, tau, work []float64, lwork int)\n\tDgesvd(jobU, jobVT SVDJob, m, n int, a []float64, lda int, s, u []float64, ldu int, vt []float64, ldvt int, work []float64, lwork int) (ok bool)\n\tDgetrf(m, n int, a []float64, lda int, ipiv []int) (ok bool)\n\tDgetri(n int, a []float64, lda int, ipiv []int, work []float64, lwork int) (ok bool)\n\tDgetrs(trans blas.Transpose, n, nrhs int, a []float64, lda int, ipiv []int, b []float64, ldb int)\n\tDggsvd3(jobU, jobV, jobQ GSVDJob, m, n, p int, a []float64, lda int, b []float64, ldb int, alpha, beta, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, work []float64, lwork int, iwork []int) (k, l int, ok bool)\n\tDlantr(norm MatrixNorm, uplo blas.Uplo, diag blas.Diag, m, n int, a []float64, lda int, work []float64) float64\n\tDlange(norm MatrixNorm, m, n int, a []float64, lda int, work []float64) float64\n\tDlansy(norm MatrixNorm, uplo blas.Uplo, n int, a []float64, lda int, work []float64) float64\n\tDlapmt(forward bool, m, n int, x []float64, ldx int, k []int)\n\tDormqr(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int)\n\tDormlq(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int)\n\tDpocon(uplo blas.Uplo, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64\n\tDpotrf(ul blas.Uplo, n int, a []float64, lda int) (ok bool)\n\tDpotrs(ul blas.Uplo, n, nrhs int, a []float64, lda int, b []float64, ldb int)\n\tDsyev(jobz EVJob, uplo blas.Uplo, n int, a []float64, lda int, w, work []float64, lwork int) (ok bool)\n\tDtrcon(norm MatrixNorm, uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int, work []float64, iwork []int) float64\n\tDtrtri(uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int) (ok bool)\n\tDtrtrs(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, nrhs int, a []float64, lda int, b []float64, ldb int) (ok bool)\n}\n\n\/\/ Direct specifies the direction of the multiplication for the Householder matrix.\ntype Direct byte\n\nconst (\n\tForward Direct = 'F' \/\/ Reflectors are right-multiplied, H_0 * H_1 * ... * H_{k-1}.\n\tBackward Direct = 'B' \/\/ Reflectors are left-multiplied, H_{k-1} * ... * H_1 * H_0.\n)\n\n\/\/ Sort is the sorting order.\ntype Sort byte\n\nconst (\n\tSortIncreasing Sort = 'I'\n\tSortDecreasing Sort = 'D'\n)\n\n\/\/ StoreV indicates the storage direction of elementary reflectors.\ntype StoreV byte\n\nconst (\n\tColumnWise StoreV = 'C' \/\/ Reflector stored in a column of the matrix.\n\tRowWise StoreV = 'R' \/\/ Reflector stored in a row of the matrix.\n)\n\n\/\/ MatrixNorm represents the kind of matrix norm to compute.\ntype MatrixNorm byte\n\nconst (\n\tMaxAbs MatrixNorm = 'M' \/\/ max(abs(A(i,j))) ('M')\n\tMaxColumnSum MatrixNorm = 'O' \/\/ Maximum column sum (one norm) ('1', 'O')\n\tMaxRowSum MatrixNorm = 'I' \/\/ Maximum row sum (infinity norm) ('I', 'i')\n\tNormFrob MatrixNorm = 'F' \/\/ Frobenius norm (sqrt of sum of squares) ('F', 'f', E, 'e')\n)\n\n\/\/ MatrixType represents the kind of matrix represented in the data.\ntype MatrixType byte\n\nconst (\n\tGeneral MatrixType = 'G' \/\/ A dense matrix (like blas64.General).\n\tUpperTri MatrixType = 'U' \/\/ An upper triangular matrix.\n\tLowerTri MatrixType = 'L' \/\/ A lower triangular matrix.\n)\n\n\/\/ Pivot specifies the pivot type for plane rotations\ntype Pivot byte\n\nconst (\n\tVariable Pivot = 'V'\n\tTop Pivot = 'T'\n\tBottom Pivot = 'B'\n)\n\ntype DecompUpdate byte\n\nconst (\n\tApplyP DecompUpdate = 'P'\n\tApplyQ DecompUpdate = 'Q'\n)\n\n\/\/ SVDJob specifies the singular vector computation type for SVD.\ntype SVDJob byte\n\nconst (\n\tSVDAll SVDJob = 'A' \/\/ Compute all singular vectors\n\tSVDInPlace SVDJob = 'S' \/\/ Compute the first singular vectors and store them in provided storage.\n\tSVDOverwrite SVDJob = 'O' \/\/ Compute the singular vectors and store them in input matrix\n\tSVDNone SVDJob = 'N' \/\/ Do not compute singular vectors\n)\n\n\/\/ GSVDJob specifies the singular vector computation type for Generalized SVD.\ntype GSVDJob byte\n\nconst (\n\tGSVDU GSVDJob = 'U' \/\/ Compute orthogonal matrix U\n\tGSVDV GSVDJob = 'V' \/\/ Compute orthogonal matrix V\n\tGSVDQ GSVDJob = 'Q' \/\/ Compute orthogonal matrix Q\n\tGSVDUnit GSVDJob = 'I' \/\/ Use unit-initialized matrix\n\tGSVDNone GSVDJob = 'N' \/\/ Do not compute orthogonal matrix\n)\n\n\/\/ EVComp specifies how eigenvectors are computed.\ntype EVComp byte\n\nconst (\n\t\/\/ OriginalEV specifies to compute the eigenvectors of the original\n\t\/\/ matrix.\n\tOriginalEV EVComp = 'V'\n\t\/\/ TridiagEV specifies to compute both the eigenvectors of the input\n\t\/\/ tridiagonal matrix.\n\tTridiagEV EVComp = 'I'\n\t\/\/ HessEV specifies to compute both the eigenvectors of the input upper\n\t\/\/ Hessenberg matrix.\n\tHessEV EVComp = 'I'\n\n\t\/\/ UpdateSchur specifies that the matrix of Schur vectors will be\n\t\/\/ updated by Dtrexc.\n\tUpdateSchur EVComp = 'V'\n)\n\n\/\/ Job types for computation of eigenvectors.\ntype (\n\tEVJob byte\n\tLeftEVJob byte\n\tRightEVJob byte\n)\n\n\/\/ Job constants for computation of eigenvectors.\nconst (\n\tComputeEV EVJob = 'V' \/\/ Compute eigenvectors in Dsyev.\n\tComputeLeftEV LeftEVJob = 'V' \/\/ Compute left eigenvectors.\n\tComputeRightEV RightEVJob = 'V' \/\/ Compute right eigenvectors.\n)\n\n\/\/ BalanceJob specifies matrix balancing operation.\ntype BalanceJob byte\n\nconst (\n\tPermute BalanceJob = 'P'\n\tScale BalanceJob = 'S'\n\tPermuteScale BalanceJob = 'B'\n)\n\n\/\/ Job constants for Dhseqr.\nconst (\n\tEigenvaluesOnly EVJob = 'E'\n\tEigenvaluesAndSchur EVJob = 'S'\n)\n\n\/\/ EVSide specifies what eigenvectors will be computed.\ntype EVSide byte\n\n\/\/ EVSide constants for Dtrevc3.\nconst (\n\tRightEV EVSide = 'R' \/\/ Compute right eigenvectors only.\n\tLeftEV EVSide = 'L' \/\/ Compute left eigenvectors only.\n\tRightLeftEV EVSide = 'B' \/\/ Compute both right and left eigenvectors.\n)\n\n\/\/ HowMany specifies which eigenvectors will be computed.\ntype HowMany byte\n\n\/\/ HowMany constants for Dhseqr.\nconst (\n\tAllEV HowMany = 'A' \/\/ Compute all right and\/or left eigenvectors.\n\tAllEVMulQ HowMany = 'B' \/\/ Compute all right and\/or left eigenvectors multiplied by an input matrix.\n\tSelectedEV HowMany = 'S' \/\/ Compute selected right and\/or left eigenvectors.\n)\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 - The TXTdirect Authors\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage txtdirect\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\tbasezone = \"_redirect\"\n\tdefaultSub = \"www\"\n\tdefaultProtocol = \"https\"\n)\n\ntype record struct {\n\tVersion string\n\tTo string\n\tCode int\n\tType string\n\tVcs string\n\tFrom string\n}\n\n\/\/ Config contains the middleware's configuration\ntype Config struct {\n\tEnable []string\n\tRedirect string\n}\n\nfunc (r *record) Parse(str string) error {\n\ts := strings.Split(str, \";\")\n\tfor _, l := range s {\n\t\tswitch {\n\t\tcase strings.HasPrefix(l, \"v=\"):\n\t\t\tl = strings.TrimPrefix(l, \"v=\")\n\t\t\tr.Version = l\n\t\t\tif r.Version != \"txtv0\" {\n\t\t\t\treturn fmt.Errorf(\"unhandled version '%s'\", r.Version)\n\t\t\t}\n\t\t\tlog.Print(\"WARN: txtv0 is not suitable for production\")\n\n\t\tcase strings.HasPrefix(l, \"to=\"):\n\t\t\tl = strings.TrimPrefix(l, \"to=\")\n\t\t\tr.To = l\n\n\t\tcase strings.HasPrefix(l, \"code=\"):\n\t\t\tl = strings.TrimPrefix(l, \"code=\")\n\t\t\ti, err := strconv.Atoi(l)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"could not parse status code: %s\", err)\n\t\t\t}\n\t\t\tr.Code = i\n\n\t\tcase strings.HasPrefix(l, \"type=\"):\n\t\t\tl = strings.TrimPrefix(l, \"type=\")\n\t\t\tr.Type = l\n\n\t\tcase strings.HasPrefix(l, \"vcs=\"):\n\t\t\tl = strings.TrimPrefix(l, \"vcs=\")\n\t\t\tr.Vcs = l\n\n\t\tdefault:\n\t\t\tif r.To != \"\" {\n\t\t\t\treturn fmt.Errorf(\"multiple values without keys\")\n\t\t\t}\n\t\t\tr.To = l\n\t\t}\n\t}\n\n\tif r.Code == 0 {\n\t\tr.Code = 301\n\t}\n\n\tif r.Vcs == \"\" {\n\t\tr.Vcs = \"git\"\n\t}\n\n\tif r.Type == \"\" {\n\t\tr.Type = \"host\"\n\t}\n\n\treturn nil\n}\n\nfunc getBaseTarget(rec record) (string, int) {\n\treturn rec.To, rec.Code\n}\n\nfunc getRecord(host, path string) (record, error) {\n\tzone := strings.Join([]string{basezone, host}, \".\")\n\n\ts, err := net.LookupTXT(zone)\n\tif err != nil {\n\t\treturn record{}, fmt.Errorf(\"could not get TXT record: %s\", err)\n\t}\n\n\trec := record{}\n\tif err = rec.Parse(s[0]); err != nil {\n\t\treturn rec, fmt.Errorf(\"could not parse record: %s\", err)\n\t}\n\n\tif rec.Type == \"path\" && path != \"\" {\n\t\tzone, from, _ := zoneFromPath(host, path)\n\t\trec, err = getFinalRecord(zone, from)\n\t\tif err != nil {\n\t\t\treturn record{}, err\n\t\t}\n\t}\n\n\tif rec.To == \"\" {\n\t\ts := []string{defaultProtocol, \":\/\/\", defaultSub, \".\", host}\n\t\trec.To = strings.Join(s, \"\")\n\t}\n\n\treturn rec, nil\n}\n\nfunc getFinalRecord(zone string, from int) (record, error) {\n\tvar txts []string\n\tvar err error\n\n\ttxts, err = net.LookupTXT(zone)\n\n\t\/\/ if nothing found, jump into wildcards\n\tfor i := 1; i <= from && len(txts) == 0; i++ {\n\t\tzoneSlice := strings.Split(zone, \".\")\n\t\tzoneSlice[i] = \"_\"\n\t\tzone = strings.Join(zoneSlice, \".\")\n\t\ttxts, err = net.LookupTXT(zone)\n\t}\n\tif err != nil || len(txts) == 0 {\n\t\treturn record{}, fmt.Errorf(\"could not get TXT record: %s\", err)\n\t}\n\n\trec := record{}\n\tif err = rec.Parse(txts[0]); err != nil {\n\t\treturn rec, fmt.Errorf(\"could not parse record: %s\", err)\n\t}\n\n\treturn rec, nil\n}\n\nfunc contains(array []string, word string) bool {\n\tfor _, w := range array {\n\t\tif w == word {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Redirect the request depending on the redirect record found\nfunc Redirect(w http.ResponseWriter, r *http.Request, c Config) error {\n\thost := r.Host\n\tpath := r.URL.Path\n\n\trec, err := getRecord(host, path)\n\tif err != nil {\n\t\tif strings.HasSuffix(err.Error(), \"no such host\") {\n\t\t\tif c.Redirect != \"\" {\n\t\t\t\thttp.Redirect(w, r, c.Redirect, http.StatusMovedPermanently)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif contains(c.Enable, \"www\") {\n\t\t\t\ts := []string{defaultProtocol, \":\/\/\", defaultSub, \".\", host}\n\t\t\t\thttp.Redirect(w, r, strings.Join(s, \"\"), 301)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\tif !contains(c.Enable, rec.Type) {\n\t\treturn fmt.Errorf(\"option disabled\")\n\t}\n\n\tif rec.Type == \"host\" {\n\t\tto, code := getBaseTarget(rec)\n\t\thttp.Redirect(w, r, to, code)\n\t\treturn nil\n\t}\n\n\tif rec.Type == \"gometa\" {\n\t\treturn gometa(w, rec, host, path)\n\t}\n\n\tif rec.Type == \"path\" {\n\t\thttp.Redirect(w, r, rec.To, 302)\n\t\treturn nil\n\t}\n\n\t\/\/ if rec.Type == \"path\" {\n\t\/\/ \treturn RedirectPath(w, rec, host, path)\n\t\/\/ }\n\n\treturn fmt.Errorf(\"record type %s unsupported\", rec.Type)\n}\n\nfunc reverse(input []string) {\n\tlast := len(input) - 1\n\tfor i := 0; i < len(input)\/2; i++ {\n\t\tinput[i], input[last-i] = input[last-i], input[i]\n\t}\n}\n\nfunc zoneFromPath(host string, path string) (string, int, error) {\n\tmatch, err := regexp.Compile(\"([a-zA-Z0-9])\\\\w+\")\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tpathSlice := match.FindAllString(path, -1)\n\tfrom := len(pathSlice)\n\treverse(pathSlice)\n\turl := append(pathSlice, host)\n\turl = append([]string{basezone}, url...)\n\treturn strings.Join(url, \".\"), from, nil\n}\n<commit_msg>Fix path regex<commit_after>\/*\nCopyright 2017 - The TXTdirect Authors\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage txtdirect\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\tbasezone = \"_redirect\"\n\tdefaultSub = \"www\"\n\tdefaultProtocol = \"https\"\n)\n\ntype record struct {\n\tVersion string\n\tTo string\n\tCode int\n\tType string\n\tVcs string\n\tFrom string\n}\n\n\/\/ Config contains the middleware's configuration\ntype Config struct {\n\tEnable []string\n\tRedirect string\n}\n\nfunc (r *record) Parse(str string) error {\n\ts := strings.Split(str, \";\")\n\tfor _, l := range s {\n\t\tswitch {\n\t\tcase strings.HasPrefix(l, \"v=\"):\n\t\t\tl = strings.TrimPrefix(l, \"v=\")\n\t\t\tr.Version = l\n\t\t\tif r.Version != \"txtv0\" {\n\t\t\t\treturn fmt.Errorf(\"unhandled version '%s'\", r.Version)\n\t\t\t}\n\t\t\tlog.Print(\"WARN: txtv0 is not suitable for production\")\n\n\t\tcase strings.HasPrefix(l, \"to=\"):\n\t\t\tl = strings.TrimPrefix(l, \"to=\")\n\t\t\tr.To = l\n\n\t\tcase strings.HasPrefix(l, \"code=\"):\n\t\t\tl = strings.TrimPrefix(l, \"code=\")\n\t\t\ti, err := strconv.Atoi(l)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"could not parse status code: %s\", err)\n\t\t\t}\n\t\t\tr.Code = i\n\n\t\tcase strings.HasPrefix(l, \"type=\"):\n\t\t\tl = strings.TrimPrefix(l, \"type=\")\n\t\t\tr.Type = l\n\n\t\tcase strings.HasPrefix(l, \"vcs=\"):\n\t\t\tl = strings.TrimPrefix(l, \"vcs=\")\n\t\t\tr.Vcs = l\n\n\t\tdefault:\n\t\t\tif r.To != \"\" {\n\t\t\t\treturn fmt.Errorf(\"multiple values without keys\")\n\t\t\t}\n\t\t\tr.To = l\n\t\t}\n\t}\n\n\tif r.Code == 0 {\n\t\tr.Code = 301\n\t}\n\n\tif r.Vcs == \"\" {\n\t\tr.Vcs = \"git\"\n\t}\n\n\tif r.Type == \"\" {\n\t\tr.Type = \"host\"\n\t}\n\n\treturn nil\n}\n\nfunc getBaseTarget(rec record) (string, int) {\n\treturn rec.To, rec.Code\n}\n\nfunc getRecord(host, path string) (record, error) {\n\tzone := strings.Join([]string{basezone, host}, \".\")\n\n\ts, err := net.LookupTXT(zone)\n\tif err != nil {\n\t\treturn record{}, fmt.Errorf(\"could not get TXT record: %s\", err)\n\t}\n\n\trec := record{}\n\tif err = rec.Parse(s[0]); err != nil {\n\t\treturn rec, fmt.Errorf(\"could not parse record: %s\", err)\n\t}\n\n\tif rec.Type == \"path\" && path != \"\" {\n\t\tzone, from, _ := zoneFromPath(host, path)\n\t\trec, err = getFinalRecord(zone, from)\n\t\tif err != nil {\n\t\t\treturn record{}, err\n\t\t}\n\t}\n\n\tif rec.To == \"\" {\n\t\ts := []string{defaultProtocol, \":\/\/\", defaultSub, \".\", host}\n\t\trec.To = strings.Join(s, \"\")\n\t}\n\n\treturn rec, nil\n}\n\nfunc getFinalRecord(zone string, from int) (record, error) {\n\tvar txts []string\n\tvar err error\n\n\ttxts, err = net.LookupTXT(zone)\n\n\t\/\/ if nothing found, jump into wildcards\n\tfor i := 1; i <= from && len(txts) == 0; i++ {\n\t\tzoneSlice := strings.Split(zone, \".\")\n\t\tzoneSlice[i] = \"_\"\n\t\tzone = strings.Join(zoneSlice, \".\")\n\t\ttxts, err = net.LookupTXT(zone)\n\t}\n\tif err != nil || len(txts) == 0 {\n\t\treturn record{}, fmt.Errorf(\"could not get TXT record: %s\", err)\n\t}\n\n\trec := record{}\n\tif err = rec.Parse(txts[0]); err != nil {\n\t\treturn rec, fmt.Errorf(\"could not parse record: %s\", err)\n\t}\n\n\treturn rec, nil\n}\n\nfunc contains(array []string, word string) bool {\n\tfor _, w := range array {\n\t\tif w == word {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Redirect the request depending on the redirect record found\nfunc Redirect(w http.ResponseWriter, r *http.Request, c Config) error {\n\thost := r.Host\n\tpath := r.URL.Path\n\n\trec, err := getRecord(host, path)\n\tif err != nil {\n\t\tif strings.HasSuffix(err.Error(), \"no such host\") {\n\t\t\tif c.Redirect != \"\" {\n\t\t\t\thttp.Redirect(w, r, c.Redirect, http.StatusMovedPermanently)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif contains(c.Enable, \"www\") {\n\t\t\t\ts := []string{defaultProtocol, \":\/\/\", defaultSub, \".\", host}\n\t\t\t\thttp.Redirect(w, r, strings.Join(s, \"\"), 301)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\tif !contains(c.Enable, rec.Type) {\n\t\treturn fmt.Errorf(\"option disabled\")\n\t}\n\n\tif rec.Type == \"host\" {\n\t\tto, code := getBaseTarget(rec)\n\t\thttp.Redirect(w, r, to, code)\n\t\treturn nil\n\t}\n\n\tif rec.Type == \"gometa\" {\n\t\treturn gometa(w, rec, host, path)\n\t}\n\n\tif rec.Type == \"path\" {\n\t\thttp.Redirect(w, r, rec.To, 302)\n\t\treturn nil\n\t}\n\n\t\/\/ if rec.Type == \"path\" {\n\t\/\/ \treturn RedirectPath(w, rec, host, path)\n\t\/\/ }\n\n\treturn fmt.Errorf(\"record type %s unsupported\", rec.Type)\n}\n\nfunc reverse(input []string) {\n\tlast := len(input) - 1\n\tfor i := 0; i < len(input)\/2; i++ {\n\t\tinput[i], input[last-i] = input[last-i], input[i]\n\t}\n}\n\nfunc zoneFromPath(host string, path string) (string, int, error) {\n\tmatch, err := regexp.Compile(\"([a-zA-Z0-9]+)\")\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tpathSlice := match.FindAllString(path, -1)\n\tfrom := len(pathSlice)\n\treverse(pathSlice)\n\turl := append(pathSlice, host)\n\turl = append([]string{basezone}, url...)\n\treturn strings.Join(url, \".\"), from, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package notifier\n\nimport (\"github.com\/golang\/appengine\/xmpp\")\n\nfunc Send(from string, to []string, text string, messageType string) {\n\tm := &xmpp.Message{\n\t\tFrom: from,\n\t\tTo: to,\n\t\tBody: text,\n\t\tType: messageType,\n\t}\n\terr := m.Send(c)\n}\n<commit_msg>New xmpp library test<commit_after>package notifier\n\nimport (\"code.google.com\/p\/goexmpp\/xmpp\")\n\nfunc Send(from string, to []string, text string, messageType string) {\n\tm := &xmpp.Message{\n\t\tFrom: from,\n\t\tTo: to,\n\t\tBody: text,\n\t\tType: messageType,\n\t}\n\terr := m.Send(c)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nvar ModIRCKit = PISCModule {\n Author: \"Andrew Owen\",\n Name: \"IRCKit\",\n License: \"MIT\",\n DocString: \"A wrapper around IRCX, built to make it easy to write IRC bots\",\n Load: nil,\n}\n\n\/\/ TODO: Load IRCKit here, using IRCX libraryr\n\n\n\n<commit_msg>Formatting<commit_after>package main\n\nvar ModIRCKit = PISCModule{\n\tAuthor: \"Andrew Owen\",\n\tName: \"IRCKit\",\n\tLicense: \"MIT\",\n\tDocString: \"A wrapper around IRCX, built to make it easy to write IRC bots\",\n\tLoad: nil,\n}\n\n\/\/ TODO: Load IRCKit here, using IRCX library\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"github.com\/go-martini\/martini\"\nimport \"os\/exec\"\nimport \"fmt\"\nimport \"strconv\"\nimport \"redis\"\nimport \"bytes\"\nimport \"flag\"\nimport \"math\/rand\"\nimport \"time\"\nimport \"database\/sql\"\nimport _ \"github.com\/lib\/pq\"\n\n\n\nfunc Check(prog string) int {\n\tacceptedList := []string {\"boltactiongame\", \"test\"}\n\tfor i:=0; i<len(acceptedList); i++ {\n\t\tif acceptedList[i] == prog {\n\t\t\treturn 1\n\t\t}\n\t}\n\treturn 0\n\n}\n\n\/\/returns cmd struct of program\nfunc Execute(prog string) *exec.Cmd {\n\tcmd := exec.Command(\".\/\"+prog)\n\te := cmd.Start()\n\tif e!=nil {\n\t\tfmt.Println(\"Error runninng program \", e)\n\t}\n\treturn cmd\n\n}\n\nfunc RedisPush(cmdI *exec.Cmd) int {\n\tspec := redis.DefaultSpec().Password(\"go-redis\")\n\tclient, e := redis.NewSynchClientWithSpec(spec)\n\tif e!= nil {\n\t\tfmt.Println(\"error creating client for: \", e)\n\t}\n\tdefer client.Quit()\n\tpidString := strconv.Itoa(cmdI.Process.Pid)\n\tvar buf bytes.Buffer\n\tbuf.Write([]byte(pidString))\n\te = client.Hset(\"server:pids\", \"pid\", buf.Bytes())\n\tif e != nil {\n\t\tfmt.Println(\"error writing to list\")\n\t\treturn 0\n\t}\n\treturn 1\n}\n\nfunc PostGresQueryIDS() []int {\n\tdb, err := sql.Open(\"postgres\", \"user=thoriumnet password=thoriumtest dbname=thoriumnet host=localhost\")\n\tif err != nil {\n\t\tfmt.Println(\"err: \", err)\n\t}\n\tvar game_id int\n\tgame_ids := make([]int, 100)\n\trows, err := db.Query(\"SELECT * FROM games;\")\n\tif err != nil {\n\t\tfmt.Println(\"err2: \", err)\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\terr := rows.Scan(&game_id)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"err3: \", err)\n\t\t}\n\t\tfor index,_ := range game_ids {\n\t\t\tif game_ids[index]==0 {\n\t\t\t\tgame_ids[index]=game_id;\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn game_ids\n}\n\nfunc main() {\n\trand.Seed(time.Now().UnixNano())\n\tvar portArg = flag.Int(\"p\",rand.Intn(65000-10000)+10000, \"specifies port, default is random int between 10000-65000\")\n\tvar mapArg = flag.String(\"m\", \"default map value\", \"description of map\")\n\tflag.Parse()\n\tfmt.Println(strconv.Itoa(*portArg))\n\tfmt.Println(*mapArg)\n\tprocessL := make([]*exec.Cmd, 100)\n\tcurrentGames := PostGresQueryIDS()\n\tfor _,value := range currentGames {\n\t\tif value != 0 {\n\t\t\tfmt.Println(value)\n\t\t}\n\t}\n\tm := martini.Classic()\n\tm.Post(\"\/launch\/:name\", func(params martini.Params) string {\n\t\te := Check(params[\"name\"])\n\t\tif e==1 {\n\t\t\tcmdInfo := Execute(params[\"name\"])\n\t\t\tfor i:=0; i<len(processL); i++ {\n\t\t\t\tif processL[i]==nil {\n\t\t\t\t\tprocessL[i]=cmdInfo\n\t\t\t\t\t\/\/suc := RedisPush(cmdInfo)\n\t\t\t\t\tRedisPush(cmdInfo)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/fmt.Println(processL)\n\t\t\treturn \"launching \" + params[\"name\"] + \"with pid \" + strconv.Itoa(cmdInfo.Process.Pid) \t\n\t\t} else {\n\t\t\treturn \"not accepted\"\n\t\t}\t\n\t})\n\t\n\tm.Run()\n\/\/\terr := cmd.Wait()\n\/\/\tfmt.Println(err)\n\/\/\tfmt.Println(cmd.Path)\n\/\/\tfmt.Println(cmd.Process.Pid)\n}\n<commit_msg>JWT token<commit_after>package main\n\nimport (\n\t\"crypto\/md5\"\n\t\"crypto\/rsa\"\n\t\"encoding\/json\"\n\t\"hussain\/thorium-go\/requests\"\n\t\"net\/http\"\n\n\t\"github.com\/dgrijalva\/jwt-go\"\n\t\"github.com\/go-martini\/martini\"\n)\nimport \"os\/exec\"\nimport \"fmt\"\nimport \"strconv\"\nimport \"redis\"\nimport \"bytes\"\n\nimport \"crypto\/rand\"\n\nimport \"database\/sql\"\nimport _ \"github.com\/lib\/pq\"\n\n\/\/For now these field are from the sql table of games,\n\/\/proper struct should be\n\/*\ntype ContractInformation struct {\n\tOfferedBy string\n\tTimeRemaining int\n\tBid int\n\tAsk int\n}\n*\/\ntype ContractInformation struct {\n\tgame_id int\n\tmap_name string\n\tmax_players int\n\tis_verified bool\n}\n\nfunc Check(prog string) int {\n\tacceptedList := []string{\"boltactiongame\", \"test\"}\n\tfor i := 0; i < len(acceptedList); i++ {\n\t\tif acceptedList[i] == prog {\n\t\t\treturn 1\n\t\t}\n\t}\n\treturn 0\n\n}\n\n\/\/returns cmd struct of program\nfunc Execute(prog string) *exec.Cmd {\n\tcmd := exec.Command(\".\/\" + prog)\n\te := cmd.Start()\n\tif e != nil {\n\t\tfmt.Println(\"Error runninng program \", e)\n\t}\n\treturn cmd\n\n}\n\nfunc RedisPush(cmdI *exec.Cmd) int {\n\tspec := redis.DefaultSpec().Password(\"go-redis\")\n\tclient, e := redis.NewSynchClientWithSpec(spec)\n\tif e != nil {\n\t\tfmt.Println(\"error creating client for: \", e)\n\t}\n\tdefer client.Quit()\n\tpidString := strconv.Itoa(cmdI.Process.Pid)\n\tvar buf bytes.Buffer\n\tbuf.Write([]byte(pidString))\n\te = client.Hset(\"server:pids\", \"pid\", buf.Bytes())\n\tif e != nil {\n\t\tfmt.Println(\"error writing to list\")\n\t\treturn 0\n\t}\n\treturn 1\n}\n\nfunc PostGresQueryIDS() []int {\n\tdb, err := sql.Open(\"postgres\", \"user=thoriumnet password=thoriumtest dbname=thoriumnet host=localhost\")\n\tif err != nil {\n\t\tfmt.Println(\"err: \", err)\n\t}\n\tvar game_id int\n\tgame_ids := make([]int, 100)\n\trows, err := db.Query(\"SELECT * FROM games;\")\n\tif err != nil {\n\t\tfmt.Println(\"err2: \", err)\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\terr := rows.Scan(&game_id)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"err3: \", err)\n\t\t}\n\t\tfor index, _ := range game_ids {\n\t\t\tif game_ids[index] == 0 {\n\t\t\t\tgame_ids[index] = game_id\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn game_ids\n}\n\nfunc main() {\n\t\/\/rand.Seed(time.Now().UnixNano())\n\t\/\/var portArg = flag.Int(\"p\", rand.Intn(65000-10000)+10000, \"specifies port, default is random int between 10000-65000\")\n\t\/\/var mapArg = flag.String(\"m\", \"default map value\", \"description of map\")\n\t\/\/flag.Parse()\n\t\/\/fmt.Println(strconv.Itoa(*portArg))\n\t\/\/fmt.Println(*mapArg)\n\t\/\/rand.Seed = 1\n\t\/\/processL := make([]*exec.Cmd, 100)\n\t\/\/currentGames := PostGresQueryIDS()\n\t\/\/for _, value := range currentGames {\n\t\/\/if value != 0 {\n\t\/\/fmt.Println(value)\n\t\/\/}\n\t\/\/}\n\tm := martini.Classic()\n\t\/*m.Post(\"\/launch\/:name\", func(params martini.Params) string {\n\t\te := Check(params[\"name\"])\n\t\tif e==1 {\n\t\t\tcmdInfo := Execute(params[\"name\"])\n\t\t\tfor i:=0; i<len(processL); i++ {\n\t\t\t\tif processL[i]==nil {\n\t\t\t\t\tprocessL[i]=cmdInfo\n\t\t\t\t\t\/\/suc := RedisPush(cmdInfo)\n\t\t\t\t\tRedisPush(cmdInfo)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/fmt.Println(processL)\n\t\t\treturn \"launching \" + params[\"name\"] + \"with pid \" + strconv.Itoa(cmdInfo.Process.Pid)\n\t\t} else {\n\t\t\treturn \"not accepted\"\n\t\t}\n\t})\n\t*\/\n\t\/\/m.Get(\"\/games\", gameServerInfo)\n\tm.Post(\"\/client\/login\", handleClientLogin)\n\tm.Run()\n\t\/\/\terr := cmd.Wait()\n\t\/\/\tfmt.Println(err)\n\t\/\/\tfmt.Println(cmd.Path)\n\t\/\/\tfmt.Println(cmd.Process.Pid)\n}\n\nfunc handleClientLogin(httpReq *http.Request) (int, string) {\n\tdecoder := json.NewDecoder(httpReq.Body)\n\tvar req request.Authentication\n\terr := decoder.Decode(&req)\n\tif err != nil {\n\t\t\/\/logerr(\"Error decoding authentication request\")\n\t\tfmt.Println(\"error with json: \", err)\n\t\treturn 500, \"Internal Server Error\"\n\t}\n\t\/\/need to check if username && password are correct.\n\n\t\/*\n\t\tif req.Username == database username && req.Password == database password\n\t\tthen we can start to generate the token\n\t*\/\n\n\t\/\/create new token\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t\/\/secret key is used for signing and verifying token\n\tsecretKey := \"superdupersecretkey\"\n\n\t\/\/generate private\/public key for encrypting\/decrypting token claims (if we need to)\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\tfmt.Println(\"Could not generate key: \", err)\n\t\treturn 500, \"Internal Server Error\"\n\t}\n\t\/\/get the public key from the private key\n\tpublicKey := &privateKey.PublicKey\n\n\t\/\/need these vars for encryption\/decryption\n\tmd5hash := md5.New()\n\tlabel := []byte(\"\")\n\n\t\/\/actual encryption\n\tencryptedUsername, err := rsa.EncryptOAEP(md5hash, rand.Reader, publicKey, []byte(req.Username), label)\n\tif err != nil {\n\t\tfmt.Println(\"error encrypting: \", err)\n\t}\n\t\/\/set the UserID value to the encrypted username, not sure if needed.\n\ttoken.Claims[\"UserID\"] = req.Username\n\n\t\/\/decrypt to check if encryption worked properly\n\tdecryptedUsername, err := rsa.DecryptOAEP(md5hash, rand.Reader, privateKey, encryptedUsername, label)\n\tif err != nil {\n\t\tfmt.Println(\"error decrypting: \", err)\n\t}\n\n\tfmt.Printf(\"decrypted [%x] to \\n[%s]\\n\", token.Claims[\"UserID\"], decryptedUsername)\n\t\/*\n\t\tencrypter, err := NewEncrypter(RSA_OAEP,A128GCM, publicKey)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Algorithm not supported\")\n\t\t\treturn 500, \"Internal Server Error\"\n\t\t}\n\t*\/\n\t\/\/fmt.Println(*publicKey)\n\n\t\/\/need to sign the token with something, for now its a random string\n\ttokenString, err := token.SignedString([]byte(secretKey))\n\tif err != nil {\n\t\tfmt.Println(\"error getting signed key: \", err)\n\t\treturn 500, \"Internal Server Error\"\n\t}\n\t\/\/return 200, tokenString + \"\\n\"\n\tfmt.Println(\"Token String: \", tokenString)\n\t\/\/need to return the secret key to the parse to verify the token\n\tdecryptedToken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(secretKey), nil\n\t})\n\tfmt.Printf(\"token strings\\nRaw: [%s]\\nHeader: [%s]\\nSignature: [%s]\\n\", decryptedToken.Raw, decryptedToken.Header, decryptedToken.Signature)\n\n\t\/\/check if no error and valid token\n\tif err == nil && decryptedToken.Valid {\n\t\tfmt.Println(\"valid token, printing claims\")\n\t\tfmt.Println(decryptedToken.Claims)\n\n\t\t\/*for _, value := range decryptedToken.Claims {\n\t\t\tprintableValue := value.(string)\n\t\t\tfmt.Println(\"encryptd username: \", printableValue)\n\t\t\treturn 200, printableValue\n\t\t\tdecryptedUsername2, err := rsa.DecryptOAEP(md5hash, rand.Reader, privateKey, []byte(printableValue), label)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"error decrypting username in decrypted token: \", err)\n\t\t\t}\n\t\t\tfmt.Printf(\"decrypted username: [%s]\", decryptedUsername2)\n\t\t}\n\t\t*\/\n\t\t\/\/\tdecryptedUsername2, err := rsa.DecryptOAEP(md5hash, rand.Reader, privateKey, []byte(printableValue), label)\n\n\t} else {\n\t\tfmt.Println(\"Not valid: \", err)\n\t\treturn 500, \"Internal Server Error\"\n\t}\n\treturn 200, tokenString\n}\n\n\/*\nfunc gameServerInfo() string {\n\tdb, err := sql.Open(\"postgres\", \"user=thoriumnet password=thoriumtest dbname=thoriumnet host=localhost\")\n\tif err != nil {\n\t\tfmt.Println(\"database conn err: \", err)\n\t\t\/\/return 500, err\n\t}\n\t\/\/var tx *sql.Tx\n\t\/\/tx, e = db.Begin()\n\n\t\/\/store contract information\n\tvar info ContractInformation\n\t\/\/get game id\n\trows, err := db.Query(\"SELECT * FROM games\")\n\tif err != nil {\n\t\tfmt.Println(\"error: \", err)\n\t}\n\tdefer rows.Close()\n\t\/\/scan row by row\n\tfor rows.Next() {\n\t\t\/\/must scan all variables\n\t\terr := rows.Scan(&info.game_id, &info.map_name, &info.max_players, &info.is_verified)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"error scanning row: \", err)\n\t\t}\n\t\tfmt.Println(\"id: \", info.game_id, \"map: \", info.map_name, \"max_players: \", info.max_players, \"verified: \", info.is_verified)\n\n\t}\n\n\treturn \"finished\"\n}*\/\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\n\t\"github.com\/google\/gxui\"\n\t\"github.com\/google\/gxui\/drivers\/gl\"\n\t\"github.com\/google\/gxui\/math\"\n\t\"github.com\/google\/gxui\/themes\/dark\"\n)\n\nvar data = flag.String(\"data\", \"\", \"path to data\")\n\ntype treeAdapterNode struct {\n\tchildren []treeAdapterNode\n\tdata string\n\tid gxui.AdapterItemId\n}\n\nfunc (n treeAdapterNode) Count() int {\n\treturn len(n.children)\n}\n\nfunc (n treeAdapterNode) ItemId(index int) gxui.AdapterItemId {\n\treturn n.children[index].id\n}\n\nfunc (n treeAdapterNode) ItemIndex(id gxui.AdapterItemId) int {\n\tfor i, c := range n.children {\n\t\tif c.id == id {\n\t\t\treturn i\n\t\t}\n\t\tif c.ItemIndex(id) >= 0 {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\nfunc (n treeAdapterNode) Create(theme gxui.Theme, index int) gxui.Control {\n\tl := theme.CreateLabel()\n\tl.SetText(n.children[index].data)\n\treturn l\n}\n\nfunc (n treeAdapterNode) CreateNode(index int) gxui.TreeAdapterNode {\n\tif len(n.children[index].children) > 0 {\n\t\treturn n.children[index]\n\t} else {\n\t\treturn nil \/\/ This child is a leaf.\n\t}\n}\n\ntype treeAdapter struct {\n\ttreeAdapterNode\n\tonDataChanged gxui.Event\n\tonDataReplaced gxui.Event\n}\n\nfunc (a treeAdapter) ItemSize(theme gxui.Theme) math.Size {\n\treturn math.Size{W: math.MaxSize.W, H: 20}\n}\n\nfunc (a treeAdapter) OnDataChanged(f func()) gxui.EventSubscription {\n\tif a.onDataChanged == nil {\n\t\ta.onDataChanged = gxui.CreateEvent(f)\n\t}\n\treturn a.onDataChanged.Listen(f)\n}\n\nfunc (a treeAdapter) OnDataReplaced(f func()) gxui.EventSubscription {\n\tif a.onDataReplaced == nil {\n\t\ta.onDataReplaced = gxui.CreateEvent(f)\n\t}\n\treturn a.onDataReplaced.Listen(f)\n}\n\nfunc appMain(driver gxui.Driver) {\n\ttheme := dark.CreateTheme(driver)\n\n\tnode := func(id gxui.AdapterItemId, data string, children ...treeAdapterNode) treeAdapterNode {\n\t\treturn treeAdapterNode{\n\t\t\tchildren: children,\n\t\t\tdata: data,\n\t\t\tid: id,\n\t\t}\n\t}\n\n\tlayout := theme.CreateLinearLayout()\n\tlayout.SetOrientation(gxui.Vertical)\n\n\tadapter := treeAdapter{}\n\tadapter.children = []treeAdapterNode{\n\t\tnode(0x000, \"Animals\",\n\t\t\tnode(0x100, \"Mammals\",\n\t\t\t\tnode(0x110, \"Cats\"),\n\t\t\t\tnode(0x120, \"Dogs\"),\n\t\t\t\tnode(0x130, \"Horses\"),\n\t\t\t\tnode(0x140, \"Duck-billed platypuses\"),\n\t\t\t),\n\t\t\tnode(0x200, \"Birds\",\n\t\t\t\tnode(0x210, \"Peacocks\"),\n\t\t\t\tnode(0x220, \"Doves\"),\n\t\t\t),\n\t\t\tnode(0x300, \"Reptiles\",\n\t\t\t\tnode(0x310, \"Lizards\"),\n\t\t\t\tnode(0x320, \"Turtles\"),\n\t\t\t\tnode(0x330, \"Crocodiles\"),\n\t\t\t\tnode(0x340, \"Snakes\"),\n\t\t\t),\n\t\t\tnode(0x400, \"Amphibians\",\n\t\t\t\tnode(0x410, \"Frogs\"),\n\t\t\t\tnode(0x420, \"Toads\"),\n\t\t\t),\n\t\t\tnode(0x500, \"Arthropods\",\n\t\t\t\tnode(0x510, \"Crustaceans\",\n\t\t\t\t\tnode(0x511, \"Crabs\"),\n\t\t\t\t\tnode(0x512, \"Lobsters\"),\n\t\t\t\t),\n\t\t\t\tnode(0x520, \"Insects\",\n\t\t\t\t\tnode(0x521, \"Ants\"),\n\t\t\t\t\tnode(0x522, \"Bees\"),\n\t\t\t\t),\n\t\t\t\tnode(0x530, \"Arachnids\",\n\t\t\t\t\tnode(0x531, \"Spiders\"),\n\t\t\t\t\tnode(0x532, \"Scorpions\"),\n\t\t\t\t),\n\t\t\t),\n\t\t),\n\t}\n\n\ttree := theme.CreateTree()\n\ttree.SetAdapter(adapter)\n\ttree.Select(0x140) \/\/ Duck-billed platypuses\n\ttree.Show(tree.Selected())\n\n\tlayout.AddChild(tree)\n\n\trow := theme.CreateLinearLayout()\n\trow.SetOrientation(gxui.Horizontal)\n\tlayout.AddChild(row)\n\n\texpandAll := theme.CreateButton()\n\texpandAll.SetText(\"Expand All\")\n\texpandAll.OnClick(func(gxui.MouseEvent) { tree.ExpandAll() })\n\trow.AddChild(expandAll)\n\n\tcollapseAll := theme.CreateButton()\n\tcollapseAll.SetText(\"Collapse All\")\n\tcollapseAll.OnClick(func(gxui.MouseEvent) { tree.CollapseAll() })\n\trow.AddChild(collapseAll)\n\n\twindow := theme.CreateWindow(800, 600, \"Tree view\")\n\twindow.AddChild(layout)\n\twindow.OnClose(driver.Terminate)\n\twindow.SetPadding(math.Spacing{L: 10, T: 10, R: 10, B: 10})\n\tgxui.EventLoop(driver)\n}\n\nfunc main() {\n\tflag.Parse()\n\tgl.StartDriver(*data, appMain)\n}\n<commit_msg>Update tree sample to use the new StartDriver signature<commit_after>\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"github.com\/google\/gxui\"\n\t\"github.com\/google\/gxui\/drivers\/gl\"\n\t\"github.com\/google\/gxui\/math\"\n\t\"github.com\/google\/gxui\/themes\/dark\"\n)\n\ntype treeAdapterNode struct {\n\tchildren []treeAdapterNode\n\tdata string\n\tid gxui.AdapterItemId\n}\n\nfunc (n treeAdapterNode) Count() int {\n\treturn len(n.children)\n}\n\nfunc (n treeAdapterNode) ItemId(index int) gxui.AdapterItemId {\n\treturn n.children[index].id\n}\n\nfunc (n treeAdapterNode) ItemIndex(id gxui.AdapterItemId) int {\n\tfor i, c := range n.children {\n\t\tif c.id == id {\n\t\t\treturn i\n\t\t}\n\t\tif c.ItemIndex(id) >= 0 {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\nfunc (n treeAdapterNode) Create(theme gxui.Theme, index int) gxui.Control {\n\tl := theme.CreateLabel()\n\tl.SetText(n.children[index].data)\n\treturn l\n}\n\nfunc (n treeAdapterNode) CreateNode(index int) gxui.TreeAdapterNode {\n\tif len(n.children[index].children) > 0 {\n\t\treturn n.children[index]\n\t} else {\n\t\treturn nil \/\/ This child is a leaf.\n\t}\n}\n\ntype treeAdapter struct {\n\ttreeAdapterNode\n\tonDataChanged gxui.Event\n\tonDataReplaced gxui.Event\n}\n\nfunc (a treeAdapter) ItemSize(theme gxui.Theme) math.Size {\n\treturn math.Size{W: math.MaxSize.W, H: 20}\n}\n\nfunc (a treeAdapter) OnDataChanged(f func()) gxui.EventSubscription {\n\tif a.onDataChanged == nil {\n\t\ta.onDataChanged = gxui.CreateEvent(f)\n\t}\n\treturn a.onDataChanged.Listen(f)\n}\n\nfunc (a treeAdapter) OnDataReplaced(f func()) gxui.EventSubscription {\n\tif a.onDataReplaced == nil {\n\t\ta.onDataReplaced = gxui.CreateEvent(f)\n\t}\n\treturn a.onDataReplaced.Listen(f)\n}\n\nfunc appMain(driver gxui.Driver) {\n\ttheme := dark.CreateTheme(driver)\n\n\tnode := func(id gxui.AdapterItemId, data string, children ...treeAdapterNode) treeAdapterNode {\n\t\treturn treeAdapterNode{\n\t\t\tchildren: children,\n\t\t\tdata: data,\n\t\t\tid: id,\n\t\t}\n\t}\n\n\tlayout := theme.CreateLinearLayout()\n\tlayout.SetOrientation(gxui.Vertical)\n\n\tadapter := treeAdapter{}\n\tadapter.children = []treeAdapterNode{\n\t\tnode(0x000, \"Animals\",\n\t\t\tnode(0x100, \"Mammals\",\n\t\t\t\tnode(0x110, \"Cats\"),\n\t\t\t\tnode(0x120, \"Dogs\"),\n\t\t\t\tnode(0x130, \"Horses\"),\n\t\t\t\tnode(0x140, \"Duck-billed platypuses\"),\n\t\t\t),\n\t\t\tnode(0x200, \"Birds\",\n\t\t\t\tnode(0x210, \"Peacocks\"),\n\t\t\t\tnode(0x220, \"Doves\"),\n\t\t\t),\n\t\t\tnode(0x300, \"Reptiles\",\n\t\t\t\tnode(0x310, \"Lizards\"),\n\t\t\t\tnode(0x320, \"Turtles\"),\n\t\t\t\tnode(0x330, \"Crocodiles\"),\n\t\t\t\tnode(0x340, \"Snakes\"),\n\t\t\t),\n\t\t\tnode(0x400, \"Amphibians\",\n\t\t\t\tnode(0x410, \"Frogs\"),\n\t\t\t\tnode(0x420, \"Toads\"),\n\t\t\t),\n\t\t\tnode(0x500, \"Arthropods\",\n\t\t\t\tnode(0x510, \"Crustaceans\",\n\t\t\t\t\tnode(0x511, \"Crabs\"),\n\t\t\t\t\tnode(0x512, \"Lobsters\"),\n\t\t\t\t),\n\t\t\t\tnode(0x520, \"Insects\",\n\t\t\t\t\tnode(0x521, \"Ants\"),\n\t\t\t\t\tnode(0x522, \"Bees\"),\n\t\t\t\t),\n\t\t\t\tnode(0x530, \"Arachnids\",\n\t\t\t\t\tnode(0x531, \"Spiders\"),\n\t\t\t\t\tnode(0x532, \"Scorpions\"),\n\t\t\t\t),\n\t\t\t),\n\t\t),\n\t}\n\n\ttree := theme.CreateTree()\n\ttree.SetAdapter(adapter)\n\ttree.Select(0x140) \/\/ Duck-billed platypuses\n\ttree.Show(tree.Selected())\n\n\tlayout.AddChild(tree)\n\n\trow := theme.CreateLinearLayout()\n\trow.SetOrientation(gxui.Horizontal)\n\tlayout.AddChild(row)\n\n\texpandAll := theme.CreateButton()\n\texpandAll.SetText(\"Expand All\")\n\texpandAll.OnClick(func(gxui.MouseEvent) { tree.ExpandAll() })\n\trow.AddChild(expandAll)\n\n\tcollapseAll := theme.CreateButton()\n\tcollapseAll.SetText(\"Collapse All\")\n\tcollapseAll.OnClick(func(gxui.MouseEvent) { tree.CollapseAll() })\n\trow.AddChild(collapseAll)\n\n\twindow := theme.CreateWindow(800, 600, \"Tree view\")\n\twindow.AddChild(layout)\n\twindow.OnClose(driver.Terminate)\n\twindow.SetPadding(math.Spacing{L: 10, T: 10, R: 10, B: 10})\n\tgxui.EventLoop(driver)\n}\n\nfunc main() {\n\tgl.StartDriver(appMain)\n}\n<|endoftext|>"} {"text":"<commit_before>package cli\n\nimport (\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com\/kopia\/kopia\/internal\/units\"\n\t\"github.com\/kopia\/kopia\/repo\/content\"\n\t\"github.com\/kopia\/kopia\/repo\/encryption\"\n\t\"github.com\/kopia\/kopia\/repo\/hashing\"\n\n\tkingpin \"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nvar (\n\tbenchmarkCryptoCommand = benchmarkCommands.Command(\"crypto\", \"Run hash and encryption benchmarks\")\n\tbenchmarkCryptoBlockSize = benchmarkCryptoCommand.Flag(\"block-size\", \"Size of a block to encrypt\").Default(\"1MB\").Bytes()\n\tbenchmarkCryptoEncryption = benchmarkCryptoCommand.Flag(\"encryption\", \"Test encrypted formats\").Default(\"true\").Bool()\n\tbenchmarkCryptoRepeat = benchmarkCryptoCommand.Flag(\"repeat\", \"Number of repetitions\").Default(\"100\").Int()\n\tbenchmarkCryptoDeprecatedAlgorithms = benchmarkCryptoCommand.Flag(\"deprecated\", \"Include deprecated algorithms\").Bool()\n)\n\nfunc runBenchmarkCryptoAction(ctx *kingpin.ParseContext) error {\n\ttype benchResult struct {\n\t\thash string\n\t\tencryption string\n\t\tthroughput float64\n\t}\n\n\tvar results []benchResult\n\n\tdata := make([]byte, *benchmarkCryptoBlockSize)\n\n\tfor _, ha := range hashing.SupportedAlgorithms() {\n\t\tfor _, ea := range encryption.SupportedAlgorithms(*benchmarkCryptoDeprecatedAlgorithms) {\n\t\t\tisEncrypted := ea != encryption.NoneAlgorithm\n\t\t\tif *benchmarkCryptoEncryption != isEncrypted {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\th, e, err := content.CreateHashAndEncryptor(&content.FormattingOptions{\n\t\t\t\tEncryption: ea,\n\t\t\t\tHash: ha,\n\t\t\t\tMasterKey: make([]byte, 32),\n\t\t\t\tHMACSecret: make([]byte, 32),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tprintStderr(\"Benchmarking hash '%v' and encryption '%v'... (%v x %v bytes)\\n\", ha, ea, *benchmarkCryptoRepeat, len(data))\n\n\t\t\tt0 := time.Now()\n\n\t\t\thashCount := *benchmarkCryptoRepeat\n\t\t\thashOutput := make([]byte, 0, 64)\n\n\t\t\tfor i := 0; i < hashCount; i++ {\n\t\t\t\tcontentID := h(hashOutput[:0], data)\n\t\t\t\tif _, encerr := e.Encrypt(nil, data, contentID); encerr != nil {\n\t\t\t\t\tprintStderr(\"encryption failed: %v\\n\", encerr)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\thashTime := time.Since(t0)\n\t\t\tbytesPerSecond := float64(len(data)) * float64(hashCount) \/ hashTime.Seconds()\n\n\t\t\tresults = append(results, benchResult{hash: ha, encryption: ea, throughput: bytesPerSecond})\n\t\t}\n\t}\n\n\tsort.Slice(results, func(i, j int) bool {\n\t\treturn results[i].throughput > results[j].throughput\n\t})\n\tprintStdout(\" %-20v %-20v %v\\n\", \"Hash\", \"Encryption\", \"Throughput\")\n\tprintStdout(\"-----------------------------------------------------------------\\n\")\n\n\tfor ndx, r := range results {\n\t\tprintStdout(\"%3d. %-20v %-20v %v \/ second\\n\", ndx, r.hash, r.encryption, units.BytesStringBase2(int64(r.throughput)))\n\t}\n\n\treturn nil\n}\n\nfunc init() {\n\tbenchmarkCryptoCommand.Action(runBenchmarkCryptoAction)\n}\n<commit_msg>CLI: pre-allocated buffers for crypto benchmark (#343)<commit_after>package cli\n\nimport (\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com\/kopia\/kopia\/internal\/units\"\n\t\"github.com\/kopia\/kopia\/repo\/content\"\n\t\"github.com\/kopia\/kopia\/repo\/encryption\"\n\t\"github.com\/kopia\/kopia\/repo\/hashing\"\n\n\tkingpin \"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nvar (\n\tbenchmarkCryptoCommand = benchmarkCommands.Command(\"crypto\", \"Run hash and encryption benchmarks\")\n\tbenchmarkCryptoBlockSize = benchmarkCryptoCommand.Flag(\"block-size\", \"Size of a block to encrypt\").Default(\"1MB\").Bytes()\n\tbenchmarkCryptoEncryption = benchmarkCryptoCommand.Flag(\"encryption\", \"Test encrypted formats\").Default(\"true\").Bool()\n\tbenchmarkCryptoRepeat = benchmarkCryptoCommand.Flag(\"repeat\", \"Number of repetitions\").Default(\"100\").Int()\n\tbenchmarkCryptoDeprecatedAlgorithms = benchmarkCryptoCommand.Flag(\"deprecated\", \"Include deprecated algorithms\").Bool()\n)\n\nfunc runBenchmarkCryptoAction(ctx *kingpin.ParseContext) error {\n\ttype benchResult struct {\n\t\thash string\n\t\tencryption string\n\t\tthroughput float64\n\t}\n\n\tvar results []benchResult\n\n\tdata := make([]byte, *benchmarkCryptoBlockSize)\n\n\tconst (\n\t\tmaxEncryptionOverhead = 1024\n\t\tmaxHashSize = 64\n\t)\n\n\tvar hashOutput [maxHashSize]byte\n\n\tencryptOutput := make([]byte, len(data)+maxEncryptionOverhead)\n\n\tfor _, ha := range hashing.SupportedAlgorithms() {\n\t\tfor _, ea := range encryption.SupportedAlgorithms(*benchmarkCryptoDeprecatedAlgorithms) {\n\t\t\tisEncrypted := ea != encryption.NoneAlgorithm\n\t\t\tif *benchmarkCryptoEncryption != isEncrypted {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\th, e, err := content.CreateHashAndEncryptor(&content.FormattingOptions{\n\t\t\t\tEncryption: ea,\n\t\t\t\tHash: ha,\n\t\t\t\tMasterKey: make([]byte, 32),\n\t\t\t\tHMACSecret: make([]byte, 32),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tprintStderr(\"Benchmarking hash '%v' and encryption '%v'... (%v x %v bytes)\\n\", ha, ea, *benchmarkCryptoRepeat, len(data))\n\n\t\t\tt0 := time.Now()\n\n\t\t\thashCount := *benchmarkCryptoRepeat\n\n\t\t\tfor i := 0; i < hashCount; i++ {\n\t\t\t\tcontentID := h(hashOutput[:0], data)\n\t\t\t\tif _, encerr := e.Encrypt(encryptOutput[:0], data, contentID); encerr != nil {\n\t\t\t\t\tprintStderr(\"encryption failed: %v\\n\", encerr)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\thashTime := time.Since(t0)\n\t\t\tbytesPerSecond := float64(len(data)) * float64(hashCount) \/ hashTime.Seconds()\n\n\t\t\tresults = append(results, benchResult{hash: ha, encryption: ea, throughput: bytesPerSecond})\n\t\t}\n\t}\n\n\tsort.Slice(results, func(i, j int) bool {\n\t\treturn results[i].throughput > results[j].throughput\n\t})\n\tprintStdout(\" %-20v %-20v %v\\n\", \"Hash\", \"Encryption\", \"Throughput\")\n\tprintStdout(\"-----------------------------------------------------------------\\n\")\n\n\tfor ndx, r := range results {\n\t\tprintStdout(\"%3d. %-20v %-20v %v \/ second\\n\", ndx, r.hash, r.encryption, units.BytesStringBase2(int64(r.throughput)))\n\t}\n\n\treturn nil\n}\n\nfunc init() {\n\tbenchmarkCryptoCommand.Action(runBenchmarkCryptoAction)\n}\n<|endoftext|>"} {"text":"<commit_before>package scan\n\nimport (\n\t\"github.com\/go-openapi\/spec\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\t\/\/ ParamDescriptionKey indicates the tag used to define a parameter description in swagger:route\n\tParamDescriptionKey = \"description\"\n\t\/\/ ParamNameKey indicates the tag used to define a parameter name in swagger:route\n\tParamNameKey = \"name\"\n\t\/\/ ParamInKey indicates the tag used to define a parameter location in swagger:route\n\tParamInKey = \"in\"\n\t\/\/ ParamRequiredKey indicates the tag used to declare whether a parameter is required in swagger:route\n\tParamRequiredKey = \"required\"\n\t\/\/ ParamTypeKey indicates the tag used to define the parameter type in swagger:route\n\tParamTypeKey = \"type\"\n\t\/\/ ParamAllowEmptyKey indicates the tag used to indicate whether a parameter allows empty values in swagger:route\n\tParamAllowEmptyKey = \"allowempty\"\n\n\t\/\/ SchemaMinKey indicates the tag used to indicate the minimum value allowed for this type in swagger:route\n\tSchemaMinKey = \"min\"\n\t\/\/ SchemaMaxKey indicates the tag used to indicate the maximum value allowed for this type in swagger:route\n\tSchemaMaxKey = \"max\"\n\t\/\/ SchemaEnumKey indicates the tag used to specify the allowed values for this type in swagger:route\n\tSchemaEnumKey = \"enum\"\n\t\/\/ SchemaFormatKey indicates the expected format for this field in swagger:route\n\tSchemaFormatKey = \"format\"\n\t\/\/ SchemaDefaultKey indicates the default value for this field in swagger:route\n\tSchemaDefaultKey = \"default\"\n\t\/\/ SchemaMinLenKey indicates the minimum length this field in swagger:route\n\tSchemaMinLenKey = \"minlength\"\n\t\/\/ SchemaMaxLenKey indicates the minimum length this field in swagger:route\n\tSchemaMaxLenKey = \"maxlength\"\n\n\t\/\/ TypeArray is the identifier for an array type in swagger:route\n\tTypeArray = \"array\"\n\t\/\/ TypeNumber is the identifier for a number type in swagger:route\n\tTypeNumber = \"number\"\n\t\/\/ TypeInteger is the identifier for an integer type in swagger:route\n\tTypeInteger = \"integer\"\n\t\/\/ TypeBoolean is the identifier for a boolean type in swagger:route\n\tTypeBoolean = \"boolean\"\n\t\/\/ TypeBool is the identifier for a boolean type in swagger:route\n\tTypeBool = \"bool\"\n\t\/\/ TypeObject is the identifier for an object type in swagger:route\n\tTypeObject = \"object\"\n\t\/\/ TypeString is the identifier for a string type in swagger:route\n\tTypeString = \"string\"\n)\n\nvar (\n\tvalidIn = []string{\"path\", \"query\", \"header\", \"body\", \"form\"}\n\tbasicTypes = []string{TypeInteger, TypeNumber, TypeString, TypeBoolean, TypeBool, TypeArray}\n)\n\nfunc newSetParams(params []*spec.Parameter, setter func([]*spec.Parameter)) *setOpParams {\n\treturn &setOpParams{\n\t\tset: setter,\n\t\tparameters: params,\n\t}\n}\n\ntype setOpParams struct {\n\tset func([]*spec.Parameter)\n\tparameters []*spec.Parameter\n}\n\nfunc (s *setOpParams) Matches(line string) bool {\n\treturn rxParameters.MatchString(line)\n}\n\nfunc (s *setOpParams) Parse(lines []string) error {\n\tif len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {\n\t\treturn nil\n\t}\n\n\tvar current *spec.Parameter\n\tvar extraData map[string]string\n\n\tfor _, line := range lines {\n\t\tl := strings.TrimSpace(line)\n\n\t\tif strings.HasPrefix(l, \"+\") {\n\t\t\ts.finalizeParam(current, extraData)\n\t\t\tcurrent = new(spec.Parameter)\n\t\t\textraData = make(map[string]string)\n\t\t\tl = strings.TrimPrefix(l, \"+\")\n\t\t}\n\n\t\tkv := strings.SplitN(l, \":\", 2)\n\n\t\tif len(kv) <= 1 {\n\t\t\tcontinue\n\t\t}\n\n\t\tkey := strings.ToLower(strings.TrimSpace(kv[0]))\n\t\tvalue := strings.TrimSpace(kv[1])\n\n\t\tswitch key {\n\t\tcase ParamDescriptionKey:\n\t\t\tcurrent.Description = value\n\t\tcase ParamNameKey:\n\t\t\tcurrent.Name = value\n\t\tcase ParamInKey:\n\t\t\tv := strings.ToLower(value)\n\t\t\tif contains(validIn, v) {\n\t\t\t\tcurrent.In = v\n\t\t\t}\n\t\tcase ParamRequiredKey:\n\t\t\tif v, err := strconv.ParseBool(value); err == nil {\n\t\t\t\tcurrent.Required = v\n\t\t\t}\n\t\tcase ParamTypeKey:\n\t\t\tif current.Schema == nil {\n\t\t\t\tcurrent.Schema = new(spec.Schema)\n\t\t\t}\n\t\t\tif contains(basicTypes, value) {\n\t\t\t\tcurrent.Type = strings.ToLower(value)\n\t\t\t\tif current.Type == TypeBool {\n\t\t\t\t\tcurrent.Type = TypeBoolean\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif ref, err := spec.NewRef(\"#\/definitions\/\" + value); err == nil {\n\t\t\t\t\tcurrent.Type = TypeObject\n\t\t\t\t\tcurrent.Schema.Ref = ref\n\t\t\t\t}\n\t\t\t}\n\t\t\tcurrent.Schema.Type = spec.StringOrArray{current.Type}\n\t\tcase ParamAllowEmptyKey:\n\t\t\tif v, err := strconv.ParseBool(value); err == nil {\n\t\t\t\tcurrent.AllowEmptyValue = v\n\t\t\t}\n\t\tdefault:\n\t\t\textraData[key] = value\n\t\t}\n\t}\n\n\ts.finalizeParam(current, extraData)\n\ts.set(s.parameters)\n\treturn nil\n}\n\nfunc (s *setOpParams) finalizeParam(param *spec.Parameter, data map[string]string) {\n\tif param == nil {\n\t\treturn\n\t}\n\n\tprocessSchema(data, param)\n\ts.parameters = append(s.parameters, param)\n}\n\nfunc processSchema(data map[string]string, param *spec.Parameter) {\n\tif param.Schema == nil {\n\t\treturn\n\t}\n\n\tvar enumValues []string\n\n\tfor key, value := range data {\n\t\tswitch key {\n\t\tcase SchemaMinKey:\n\t\t\tif t := getType(param.Schema); t == TypeNumber || t == TypeInteger {\n\t\t\t\tv, _ := strconv.ParseFloat(value, 64)\n\t\t\t\tparam.Schema.Minimum = &v\n\t\t\t}\n\t\tcase SchemaMaxKey:\n\t\t\tif t := getType(param.Schema); t == TypeNumber || t == TypeInteger {\n\t\t\t\tv, _ := strconv.ParseFloat(value, 64)\n\t\t\t\tparam.Schema.Maximum = &v\n\t\t\t}\n\t\tcase SchemaMinLenKey:\n\t\t\tif getType(param.Schema) == TypeArray {\n\t\t\t\tv, _ := strconv.ParseInt(value, 10, 64)\n\t\t\t\tparam.Schema.MinLength = &v\n\t\t\t}\n\t\tcase SchemaMaxLenKey:\n\t\t\tif getType(param.Schema) == TypeArray {\n\t\t\t\tv, _ := strconv.ParseInt(value, 10, 64)\n\t\t\t\tparam.Schema.MaxLength = &v\n\t\t\t}\n\t\tcase SchemaEnumKey:\n\t\t\tenumValues = strings.Split(value, \",\")\n\t\tcase SchemaFormatKey:\n\t\t\tparam.Schema.Format = value\n\t\tcase SchemaDefaultKey:\n\t\t\tparam.Schema.Default = convert(param.Type, value)\n\t\t}\n\t}\n\n\tif param.Description != \"\" {\n\t\tparam.Schema.Description = param.Description\n\t}\n\n\tconvertEnum(param.Schema, enumValues)\n}\n\nfunc convertEnum(schema *spec.Schema, enumValues []string) {\n\tif len(enumValues) == 0 {\n\t\treturn\n\t}\n\n\tvar finalEnum []interface{}\n\tfor _, v := range enumValues {\n\t\tfinalEnum = append(finalEnum, convert(schema.Type[0], strings.TrimSpace(v)))\n\t}\n\tschema.Enum = finalEnum\n}\n\nfunc convert(typeStr, valueStr string) interface{} {\n\tswitch typeStr {\n\tcase TypeInteger:\n\t\tfallthrough\n\tcase TypeNumber:\n\t\tif num, err := strconv.ParseFloat(valueStr, 64); err == nil {\n\t\t\treturn num\n\t\t}\n\tcase TypeBoolean:\n\t\tfallthrough\n\tcase TypeBool:\n\t\tif b, err := strconv.ParseBool(valueStr); err == nil {\n\t\t\treturn b\n\t\t}\n\t}\n\treturn valueStr\n}\n\nfunc getType(schema *spec.Schema) string {\n\tif len(schema.Type) == 0 {\n\t\treturn \"\"\n\t}\n\treturn schema.Type[0]\n}\n\nfunc contains(arr []string, obj string) bool {\n\tfor _, v := range arr {\n\t\tif v == obj {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<commit_msg>fixed \"runtime error: invalid memory address or nil pointer dereference\" when invalid signature for route provided<commit_after>package scan\n\nimport (\n\t\"github.com\/go-openapi\/spec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"errors\"\n)\n\nconst (\n\t\/\/ ParamDescriptionKey indicates the tag used to define a parameter description in swagger:route\n\tParamDescriptionKey = \"description\"\n\t\/\/ ParamNameKey indicates the tag used to define a parameter name in swagger:route\n\tParamNameKey = \"name\"\n\t\/\/ ParamInKey indicates the tag used to define a parameter location in swagger:route\n\tParamInKey = \"in\"\n\t\/\/ ParamRequiredKey indicates the tag used to declare whether a parameter is required in swagger:route\n\tParamRequiredKey = \"required\"\n\t\/\/ ParamTypeKey indicates the tag used to define the parameter type in swagger:route\n\tParamTypeKey = \"type\"\n\t\/\/ ParamAllowEmptyKey indicates the tag used to indicate whether a parameter allows empty values in swagger:route\n\tParamAllowEmptyKey = \"allowempty\"\n\n\t\/\/ SchemaMinKey indicates the tag used to indicate the minimum value allowed for this type in swagger:route\n\tSchemaMinKey = \"min\"\n\t\/\/ SchemaMaxKey indicates the tag used to indicate the maximum value allowed for this type in swagger:route\n\tSchemaMaxKey = \"max\"\n\t\/\/ SchemaEnumKey indicates the tag used to specify the allowed values for this type in swagger:route\n\tSchemaEnumKey = \"enum\"\n\t\/\/ SchemaFormatKey indicates the expected format for this field in swagger:route\n\tSchemaFormatKey = \"format\"\n\t\/\/ SchemaDefaultKey indicates the default value for this field in swagger:route\n\tSchemaDefaultKey = \"default\"\n\t\/\/ SchemaMinLenKey indicates the minimum length this field in swagger:route\n\tSchemaMinLenKey = \"minlength\"\n\t\/\/ SchemaMaxLenKey indicates the minimum length this field in swagger:route\n\tSchemaMaxLenKey = \"maxlength\"\n\n\t\/\/ TypeArray is the identifier for an array type in swagger:route\n\tTypeArray = \"array\"\n\t\/\/ TypeNumber is the identifier for a number type in swagger:route\n\tTypeNumber = \"number\"\n\t\/\/ TypeInteger is the identifier for an integer type in swagger:route\n\tTypeInteger = \"integer\"\n\t\/\/ TypeBoolean is the identifier for a boolean type in swagger:route\n\tTypeBoolean = \"boolean\"\n\t\/\/ TypeBool is the identifier for a boolean type in swagger:route\n\tTypeBool = \"bool\"\n\t\/\/ TypeObject is the identifier for an object type in swagger:route\n\tTypeObject = \"object\"\n\t\/\/ TypeString is the identifier for a string type in swagger:route\n\tTypeString = \"string\"\n)\n\nvar (\n\tvalidIn = []string{\"path\", \"query\", \"header\", \"body\", \"form\"}\n\tbasicTypes = []string{TypeInteger, TypeNumber, TypeString, TypeBoolean, TypeBool, TypeArray}\n)\n\nfunc newSetParams(params []*spec.Parameter, setter func([]*spec.Parameter)) *setOpParams {\n\treturn &setOpParams{\n\t\tset: setter,\n\t\tparameters: params,\n\t}\n}\n\ntype setOpParams struct {\n\tset func([]*spec.Parameter)\n\tparameters []*spec.Parameter\n}\n\nfunc (s *setOpParams) Matches(line string) bool {\n\treturn rxParameters.MatchString(line)\n}\n\nfunc (s *setOpParams) Parse(lines []string) error {\n\tif len(lines) == 0 || (len(lines) == 1 && len(lines[0]) == 0) {\n\t\treturn nil\n\t}\n\n\tvar current *spec.Parameter\n\tvar extraData map[string]string\n\n\tfor _, line := range lines {\n\t\tl := strings.TrimSpace(line)\n\n\t\tif strings.HasPrefix(l, \"+\") {\n\t\t\ts.finalizeParam(current, extraData)\n\t\t\tcurrent = new(spec.Parameter)\n\t\t\textraData = make(map[string]string)\n\t\t\tl = strings.TrimPrefix(l, \"+\")\n\t\t}\n\n\t\tkv := strings.SplitN(l, \":\", 2)\n\n\t\tif len(kv) <= 1 {\n\t\t\tcontinue\n\t\t}\n\n\t\tkey := strings.ToLower(strings.TrimSpace(kv[0]))\n\t\tvalue := strings.TrimSpace(kv[1])\n\n\t\tif current == nil {\n\t\t\treturn errors.New(\"invalid route\/operation schema provided\")\n\t\t}\n\n\t\tswitch key {\n\t\tcase ParamDescriptionKey:\n\t\t\tcurrent.Description = value\n\t\tcase ParamNameKey:\n\t\t\tcurrent.Name = value\n\t\tcase ParamInKey:\n\t\t\tv := strings.ToLower(value)\n\t\t\tif contains(validIn, v) {\n\t\t\t\tcurrent.In = v\n\t\t\t}\n\t\tcase ParamRequiredKey:\n\t\t\tif v, err := strconv.ParseBool(value); err == nil {\n\t\t\t\tcurrent.Required = v\n\t\t\t}\n\t\tcase ParamTypeKey:\n\t\t\tif current.Schema == nil {\n\t\t\t\tcurrent.Schema = new(spec.Schema)\n\t\t\t}\n\t\t\tif contains(basicTypes, value) {\n\t\t\t\tcurrent.Type = strings.ToLower(value)\n\t\t\t\tif current.Type == TypeBool {\n\t\t\t\t\tcurrent.Type = TypeBoolean\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif ref, err := spec.NewRef(\"#\/definitions\/\" + value); err == nil {\n\t\t\t\t\tcurrent.Type = TypeObject\n\t\t\t\t\tcurrent.Schema.Ref = ref\n\t\t\t\t}\n\t\t\t}\n\t\t\tcurrent.Schema.Type = spec.StringOrArray{current.Type}\n\t\tcase ParamAllowEmptyKey:\n\t\t\tif v, err := strconv.ParseBool(value); err == nil {\n\t\t\t\tcurrent.AllowEmptyValue = v\n\t\t\t}\n\t\tdefault:\n\t\t\textraData[key] = value\n\t\t}\n\t}\n\n\ts.finalizeParam(current, extraData)\n\ts.set(s.parameters)\n\treturn nil\n}\n\nfunc (s *setOpParams) finalizeParam(param *spec.Parameter, data map[string]string) {\n\tif param == nil {\n\t\treturn\n\t}\n\n\tprocessSchema(data, param)\n\ts.parameters = append(s.parameters, param)\n}\n\nfunc processSchema(data map[string]string, param *spec.Parameter) {\n\tif param.Schema == nil {\n\t\treturn\n\t}\n\n\tvar enumValues []string\n\n\tfor key, value := range data {\n\t\tswitch key {\n\t\tcase SchemaMinKey:\n\t\t\tif t := getType(param.Schema); t == TypeNumber || t == TypeInteger {\n\t\t\t\tv, _ := strconv.ParseFloat(value, 64)\n\t\t\t\tparam.Schema.Minimum = &v\n\t\t\t}\n\t\tcase SchemaMaxKey:\n\t\t\tif t := getType(param.Schema); t == TypeNumber || t == TypeInteger {\n\t\t\t\tv, _ := strconv.ParseFloat(value, 64)\n\t\t\t\tparam.Schema.Maximum = &v\n\t\t\t}\n\t\tcase SchemaMinLenKey:\n\t\t\tif getType(param.Schema) == TypeArray {\n\t\t\t\tv, _ := strconv.ParseInt(value, 10, 64)\n\t\t\t\tparam.Schema.MinLength = &v\n\t\t\t}\n\t\tcase SchemaMaxLenKey:\n\t\t\tif getType(param.Schema) == TypeArray {\n\t\t\t\tv, _ := strconv.ParseInt(value, 10, 64)\n\t\t\t\tparam.Schema.MaxLength = &v\n\t\t\t}\n\t\tcase SchemaEnumKey:\n\t\t\tenumValues = strings.Split(value, \",\")\n\t\tcase SchemaFormatKey:\n\t\t\tparam.Schema.Format = value\n\t\tcase SchemaDefaultKey:\n\t\t\tparam.Schema.Default = convert(param.Type, value)\n\t\t}\n\t}\n\n\tif param.Description != \"\" {\n\t\tparam.Schema.Description = param.Description\n\t}\n\n\tconvertEnum(param.Schema, enumValues)\n}\n\nfunc convertEnum(schema *spec.Schema, enumValues []string) {\n\tif len(enumValues) == 0 {\n\t\treturn\n\t}\n\n\tvar finalEnum []interface{}\n\tfor _, v := range enumValues {\n\t\tfinalEnum = append(finalEnum, convert(schema.Type[0], strings.TrimSpace(v)))\n\t}\n\tschema.Enum = finalEnum\n}\n\nfunc convert(typeStr, valueStr string) interface{} {\n\tswitch typeStr {\n\tcase TypeInteger:\n\t\tfallthrough\n\tcase TypeNumber:\n\t\tif num, err := strconv.ParseFloat(valueStr, 64); err == nil {\n\t\t\treturn num\n\t\t}\n\tcase TypeBoolean:\n\t\tfallthrough\n\tcase TypeBool:\n\t\tif b, err := strconv.ParseBool(valueStr); err == nil {\n\t\t\treturn b\n\t\t}\n\t}\n\treturn valueStr\n}\n\nfunc getType(schema *spec.Schema) string {\n\tif len(schema.Type) == 0 {\n\t\treturn \"\"\n\t}\n\treturn schema.Type[0]\n}\n\nfunc contains(arr []string, obj string) bool {\n\tfor _, v := range arr {\n\t\tif v == obj {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package scene\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\n\t\"github.com\/pankona\/gomo-simra\/simra\"\n\t\"github.com\/pankona\/phantomize\/scene\/config\"\n)\n\ntype instruction struct {\n\tsprite *simra.Sprite\n\tgame *game\n}\n\nfunc (in *instruction) initialize() {\n\tin.sprite = simra.NewSprite()\n\tsimra.GetInstance().AddSprite2(in.sprite)\n}\n\nfunc (in *instruction) OnEvent(i interface{}) {\n\tc := i.(*command)\n\tswitch c.commandtype {\n\tcase commandGameStarted:\n\t\ttex := simra.NewTextTexture(\n\t\t\t\"choose first unit from here ↓\",\n\t\t\t40, \/\/ fontsize\n\t\t\tcolor.RGBA{255, 255, 255, 255},\n\t\t\timage.Rect(0, 0, config.ScreenWidth, 80),\n\t\t)\n\t\tin.sprite.ReplaceTexture2(tex)\n\t\tin.sprite.X, in.sprite.Y = config.ScreenWidth\/2+240, 260\n\t\tin.sprite.W, in.sprite.H = config.ScreenWidth, 80\n\n\tcase commandUpdateSelection:\n\t\ttex := simra.NewTextTexture(\n\t\t\t\"↑ tap field to summon the unit\",\n\t\t\t40, \/\/ fontsize\n\t\t\tcolor.RGBA{255, 255, 255, 255},\n\t\t\timage.Rect(0, 0, config.ScreenWidth, 80),\n\t\t)\n\t\tin.sprite.ReplaceTexture2(tex)\n\t\tin.sprite.X, in.sprite.Y = config.ScreenWidth\/2+275, 260\n\t\tin.sprite.W, in.sprite.H = config.ScreenWidth, 80\n\n\tcase commandSpawn:\n\t\tsimra.GetInstance().RemoveSprite(in.sprite)\n\n\t}\n}\n<commit_msg>don't create new text texture every time in instruction.go<commit_after>package scene\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\n\t\"github.com\/pankona\/gomo-simra\/simra\"\n\t\"github.com\/pankona\/phantomize\/scene\/config\"\n)\n\ntype instruction struct {\n\tsprite *simra.Sprite\n\tgame *game\n\ttexs [2]*simra.Texture\n}\n\nfunc (in *instruction) initialize() {\n\tin.sprite = simra.NewSprite()\n\tsimra.GetInstance().AddSprite2(in.sprite)\n\tin.texs[0] = simra.NewTextTexture(\n\t\t\"choose first unit from here ↓\",\n\t\t40, \/\/ fontsize\n\t\tcolor.RGBA{255, 255, 255, 255},\n\t\timage.Rect(0, 0, config.ScreenWidth, 80),\n\t)\n\n\tin.texs[1] = simra.NewTextTexture(\n\t\t\"↑ tap field to summon the unit\",\n\t\t40, \/\/ fontsize\n\t\tcolor.RGBA{255, 255, 255, 255},\n\t\timage.Rect(0, 0, config.ScreenWidth, 80),\n\t)\n}\n\nfunc (in *instruction) OnEvent(i interface{}) {\n\tc := i.(*command)\n\tswitch c.commandtype {\n\tcase commandGameStarted:\n\t\tin.sprite.ReplaceTexture2(in.texs[0])\n\t\tin.sprite.X, in.sprite.Y = config.ScreenWidth\/2+240, 260\n\t\tin.sprite.W, in.sprite.H = config.ScreenWidth, 80\n\n\tcase commandUpdateSelection:\n\t\tin.sprite.ReplaceTexture2(in.texs[1])\n\t\tin.sprite.X, in.sprite.Y = config.ScreenWidth\/2+275, 260\n\t\tin.sprite.W, in.sprite.H = config.ScreenWidth, 80\n\n\tcase commandSpawn:\n\t\tsimra.GetInstance().RemoveSprite(in.sprite)\n\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package schedule defines the interface for scheduling of slackscot actions\npackage schedule\n\nimport (\n\t\"fmt\"\n\t\"github.com\/marcsantiago\/gocron\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ ScheduleDefinition repesents\ntype ScheduleDefinition struct {\n\t\/\/ Internal value (every 1 minute would be expressed with an interval of 1). Must be set explicitly or implicitly (a weekday value implicitly sets the interval to 1)\n\tInterval uint64\n\n\t\/\/ Must be set explicitly or implicitly (\"weeks\" is implicitly set when \"Weekday\" is set). Valid time units are: \"weeks\", \"hours\", \"days\", \"minutes\", \"seconds\"\n\tUnit string\n\n\t\/\/ Optional day of the week. If set, unit and interval are ignored and implicitly considered to be \"every 1 week\"\n\tWeekday string\n\n\t\/\/ Optional \"at time\" value (i.e. \"10:30\")\n\tAtTime string\n}\n\n\/\/ Unit values\nconst (\n\tWeeks = \"weeks\"\n\tHours = \"hours\"\n\tDays = \"days\"\n\tMinutes = \"minutes\"\n\tSeconds = \"seconds\"\n)\n\nvar weekdayToNumeral = map[string]time.Weekday{\n\ttime.Monday.String(): time.Monday,\n\ttime.Tuesday.String(): time.Tuesday,\n\ttime.Wednesday.String(): time.Wednesday,\n\ttime.Thursday.String(): time.Thursday,\n\ttime.Friday.String(): time.Friday,\n\ttime.Saturday.String(): time.Saturday,\n\ttime.Sunday.String(): time.Sunday,\n}\n\n\/\/ Returns a human-friendly string for the ScheduledDefinition\nfunc (s ScheduleDefinition) String() string {\n\tvar b strings.Builder\n\n\tfmt.Fprintf(&b, \"Every \")\n\n\tif s.Weekday != \"\" {\n\t\tfmt.Fprintf(&b, \"%s\", s.Weekday)\n\t} else if s.Interval == 1 {\n\t\tfmt.Fprintf(&b, \"%s\", strings.TrimSuffix(s.Unit, \"s\"))\n\t} else {\n\t\tfmt.Fprintf(&b, \"%d %s\", s.Interval, s.Unit)\n\t}\n\n\tif s.AtTime != \"\" {\n\t\tfmt.Fprintf(&b, \" at %s\", s.AtTime)\n\t}\n\n\treturn b.String()\n}\n\n\/\/ NewJob sets up the gocron.Job with the schedule and leaves the task undefined for the caller to set up\nfunc NewJob(s *gocron.Scheduler, sd ScheduleDefinition) (j *gocron.Job, err error) {\n\tj = s.Every(sd.Interval, false)\n\n\tswitch sd.Weekday {\n\tcase time.Monday.String():\n\t\tj = j.Monday()\n\tcase time.Tuesday.String():\n\t\tj = j.Tuesday()\n\tcase time.Wednesday.String():\n\t\tj = j.Wednesday()\n\tcase time.Thursday.String():\n\t\tj = j.Thursday()\n\tcase time.Friday.String():\n\t\tj = j.Friday()\n\tcase time.Saturday.String():\n\t\tj = j.Saturday()\n\tcase time.Sunday.String():\n\t\tj = j.Sunday()\n\t\/\/ Weekday isn't set so we use units\n\tdefault:\n\t\tswitch sd.Unit {\n\t\tcase Weeks:\n\t\t\tj = j.Weeks()\n\t\tcase Hours:\n\t\t\tj = j.Hours()\n\t\tcase Days:\n\t\t\tj = j.Days()\n\t\tcase Minutes:\n\t\t\tj = j.Minutes()\n\t\tcase Seconds:\n\t\t\tj = j.Seconds()\n\t\t}\n\t}\n\n\tif sd.AtTime != \"\" {\n\t\tj = j.At(sd.AtTime)\n\t}\n\n\tif j.Err() != nil {\n\t\treturn nil, j.Err()\n\t}\n\n\treturn j, nil\n}\n<commit_msg>Reduce Schedule package Cyclomatic Complexity<commit_after>\/\/ Package schedule defines the interface for scheduling of slackscot actions\npackage schedule\n\nimport (\n\t\"fmt\"\n\t\"github.com\/marcsantiago\/gocron\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ ScheduleDefinition repesents\ntype ScheduleDefinition struct {\n\t\/\/ Internal value (every 1 minute would be expressed with an interval of 1). Must be set explicitly or implicitly (a weekday value implicitly sets the interval to 1)\n\tInterval uint64\n\n\t\/\/ Must be set explicitly or implicitly (\"weeks\" is implicitly set when \"Weekday\" is set). Valid time units are: \"weeks\", \"hours\", \"days\", \"minutes\", \"seconds\"\n\tUnit string\n\n\t\/\/ Optional day of the week. If set, unit and interval are ignored and implicitly considered to be \"every 1 week\"\n\tWeekday string\n\n\t\/\/ Optional \"at time\" value (i.e. \"10:30\")\n\tAtTime string\n}\n\n\/\/ Unit values\nconst (\n\tWeeks = \"weeks\"\n\tHours = \"hours\"\n\tDays = \"days\"\n\tMinutes = \"minutes\"\n\tSeconds = \"seconds\"\n)\n\nvar weekdayToNumeral = map[string]time.Weekday{\n\ttime.Monday.String(): time.Monday,\n\ttime.Tuesday.String(): time.Tuesday,\n\ttime.Wednesday.String(): time.Wednesday,\n\ttime.Thursday.String(): time.Thursday,\n\ttime.Friday.String(): time.Friday,\n\ttime.Saturday.String(): time.Saturday,\n\ttime.Sunday.String(): time.Sunday,\n}\n\n\/\/ Returns a human-friendly string for the ScheduledDefinition\nfunc (s ScheduleDefinition) String() string {\n\tvar b strings.Builder\n\n\tfmt.Fprintf(&b, \"Every \")\n\n\tif s.Weekday != \"\" {\n\t\tfmt.Fprintf(&b, \"%s\", s.Weekday)\n\t} else if s.Interval == 1 {\n\t\tfmt.Fprintf(&b, \"%s\", strings.TrimSuffix(s.Unit, \"s\"))\n\t} else {\n\t\tfmt.Fprintf(&b, \"%d %s\", s.Interval, s.Unit)\n\t}\n\n\tif s.AtTime != \"\" {\n\t\tfmt.Fprintf(&b, \" at %s\", s.AtTime)\n\t}\n\n\treturn b.String()\n}\n\n\/\/ Option defines an option for a Slackscot\ntype scheduleOption func(j *gocron.Job)\n\n\/\/ optionWeekday sets the weekday of a recurring job\nfunc optionWeekday(weekday string) func(j *gocron.Job) {\n\treturn func(j *gocron.Job) {\n\t\tswitch weekday {\n\t\tcase time.Monday.String():\n\t\t\tj = j.Monday()\n\t\tcase time.Tuesday.String():\n\t\t\tj = j.Tuesday()\n\t\tcase time.Wednesday.String():\n\t\t\tj = j.Wednesday()\n\t\tcase time.Thursday.String():\n\t\t\tj = j.Thursday()\n\t\tcase time.Friday.String():\n\t\t\tj = j.Friday()\n\t\tcase time.Saturday.String():\n\t\t\tj = j.Saturday()\n\t\tcase time.Sunday.String():\n\t\t\tj = j.Sunday()\n\t\t}\n\t}\n}\n\n\/\/ optionUnit sets the unit of a recurring job\nfunc optionUnit(unit string) func(j *gocron.Job) {\n\treturn func(j *gocron.Job) {\n\t\tswitch unit {\n\t\tcase Weeks:\n\t\t\tj = j.Weeks()\n\t\tcase Hours:\n\t\t\tj = j.Hours()\n\t\tcase Days:\n\t\t\tj = j.Days()\n\t\tcase Minutes:\n\t\t\tj = j.Minutes()\n\t\tcase Seconds:\n\t\t\tj = j.Seconds()\n\t\t}\n\t}\n}\n\n\/\/ optionAtTime sets the AtTime of a recurring job\nfunc optionAtTime(atTime string) func(j *gocron.Job) {\n\treturn func(j *gocron.Job) {\n\t\tj = j.At(atTime)\n\t}\n}\n\n\/\/ NewJob sets up the gocron.Job with the schedule and leaves the task undefined for the caller to set up\nfunc NewJob(s *gocron.Scheduler, sd ScheduleDefinition) (j *gocron.Job, err error) {\n\tj = s.Every(sd.Interval, false)\n\n\tscheduleOptions := make([]scheduleOption, 0)\n\n\tif sd.Weekday != \"\" {\n\t\tscheduleOptions = append(scheduleOptions, optionWeekday(sd.Weekday))\n\t} else if sd.Unit != \"\" {\n\t\tscheduleOptions = append(scheduleOptions, optionUnit(sd.Unit))\n\t}\n\n\tif sd.AtTime != \"\" {\n\t\tscheduleOptions = append(scheduleOptions, optionAtTime(sd.AtTime))\n\t}\n\n\tfor _, option := range scheduleOptions {\n\t\toption(j)\n\t}\n\n\tif j.Err() != nil {\n\t\treturn nil, j.Err()\n\t}\n\n\treturn j, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package schedule\n\nimport (\n\t\"image\/color\"\n\t\"time\"\n)\n\n\/\/ PatternSlot represents a light state in the overall pattern.\ntype PatternSlot struct {\n\tColor color.Color `json:\"color\"`\n\tTransition time.Duration `json:\"transition\"`\n\tHold time.Duration `json:\"hold\"`\n}\n\n\/\/ NewPatternSlot creates a new PatternSlot for the provided parameters.\nfunc NewPatternSlot(col color.Color, hold, transition time.Duration) *PatternSlot {\n\treturn &PatternSlot{Color: col, Hold: hold, Transition: transition}\n}\n\n\/\/ Pattern represents a light display pattern composed of one or more\n\/\/ PatternSlots.\ntype Pattern struct {\n\tID string `json:\"_id\"`\n\tName string `json:\"name\"`\n\tSlots []*PatternSlot `json:\"slots\"`\n}\n\n\/\/ NewPattern creates a new empty.\nfunc NewPattern() *Pattern {\n\treturn &Pattern{Slots: make([]*PatternSlot, 0, 20)}\n}\n\n\/\/ TODO(stephen): convert the Mon, Tue, etc to a crontab pattern.\n\/\/ There are several cron libraries for go that should make parsing cron patterns easy.\n\n\/\/ Schedule determines the pattern of schedules to display.\ntype Schedule struct {\n\tID string `json:\"_id\"`\n\tName string `json:\"name\"`\n\tPattern *Pattern `json:\"pattern\"`\n\tMon bool `json:\"mon\"`\n\tTue bool `json:\"tue\"`\n\tWed bool `json:\"wed\"`\n\tThu bool `json:\"thu\"`\n\tFri bool `json:\"fri\"`\n\tSat bool `json:\"sat\"`\n\tSun bool `json:\"sun\"`\n}\n\n\/\/ Config represents a complete configuration of a light controller node\n\/\/ including patterns available for display and schedules that are active\n\/\/ on the node.\ntype Config struct {\n\tPatterns map[string]*Pattern `json:\"patterns\"` \/\/ Patterns by ID\n\tSchedules map[string]*Schedule `json:\"schedules\"` \/\/ Schedules by ID\n}\n\n\/\/ NewConfig creates a new configuration with no patterns or schedules.\nfunc NewConfig() *Config {\n\treturn &Config{map[string]*Pattern{}, map[string]*Schedule{}}\n}\n\n\/\/ PatternByName locates a pattern by name and returns a 'ok' flag\n\/\/ if a pattern with the given name can't be found.\nfunc (c *Config) PatternByName(name string) (*Pattern, bool) {\n\tfor _, p := range c.Patterns {\n\t\tif p.Name == name {\n\t\t\treturn p, true\n\t\t}\n\t}\n\treturn nil, false\n}\n\n\/\/ AddPattern adds a pattern to the configuration.\nfunc (c *Config) AddPattern(p *Pattern) {\n\tc.Patterns[p.ID] = p\n}\n<commit_msg>Add cron-based schedule support<commit_after>package schedule\n\nimport (\n\t\"image\/color\"\n\t\"time\"\n)\n\n\/\/ NewCrontab creates a new crontab ready to add specs\nfunc NewCrontab() *Crontab {\n\treturn &Crontab{}\n}\n\n\/\/ Crontab describes a complete set of cron entries\ntype Crontab struct {\n\tSpecs []*Spec\n}\n\n\/\/ Add a new spec to the crontab\nfunc (c *Crontab) Add(expr, action string) {\n\tc.Specs = append(c.Specs, &Spec{Expression: expr, Action: action})\n}\n\n\/\/ Spec describes a cron spec entry\ntype Spec struct {\n\tExpression string\n\tAction string\n}\n\n\/\/ --------------------- Legacy code below ------------------------\n\n\/\/ PatternSlot represents a light state in the overall pattern.\ntype PatternSlot struct {\n\tColor color.Color `json:\"color\"`\n\tTransition time.Duration `json:\"transition\"`\n\tHold time.Duration `json:\"hold\"`\n}\n\n\/\/ NewPatternSlot creates a new PatternSlot for the provided parameters.\nfunc NewPatternSlot(col color.Color, hold, transition time.Duration) *PatternSlot {\n\treturn &PatternSlot{Color: col, Hold: hold, Transition: transition}\n}\n\n\/\/ Pattern represents a light display pattern composed of one or more\n\/\/ PatternSlots.\ntype Pattern struct {\n\tID string `json:\"_id\"`\n\tName string `json:\"name\"`\n\tSlots []*PatternSlot `json:\"slots\"`\n}\n\n\/\/ NewPattern creates a new empty.\nfunc NewPattern() *Pattern {\n\treturn &Pattern{Slots: make([]*PatternSlot, 0, 20)}\n}\n\n\/\/ TODO(stephen): convert the Mon, Tue, etc to a crontab pattern.\n\/\/ There are several cron libraries for go that should make parsing cron patterns easy.\n\n\/\/ Schedule determines the pattern of schedules to display.\ntype Schedule struct {\n\tID string `json:\"_id\"`\n\tName string `json:\"name\"`\n\tPattern *Pattern `json:\"pattern\"`\n\tMon bool `json:\"mon\"`\n\tTue bool `json:\"tue\"`\n\tWed bool `json:\"wed\"`\n\tThu bool `json:\"thu\"`\n\tFri bool `json:\"fri\"`\n\tSat bool `json:\"sat\"`\n\tSun bool `json:\"sun\"`\n}\n\n\/\/ Config represents a complete configuration of a light controller node\n\/\/ including patterns available for display and schedules that are active\n\/\/ on the node.\ntype Config struct {\n\tPatterns map[string]*Pattern `json:\"patterns\"` \/\/ Patterns by ID\n\tSchedules map[string]*Schedule `json:\"schedules\"` \/\/ Schedules by ID\n}\n\n\/\/ NewConfig creates a new configuration with no patterns or schedules.\nfunc NewConfig() *Config {\n\treturn &Config{map[string]*Pattern{}, map[string]*Schedule{}}\n}\n\n\/\/ PatternByName locates a pattern by name and returns a 'ok' flag\n\/\/ if a pattern with the given name can't be found.\nfunc (c *Config) PatternByName(name string) (*Pattern, bool) {\n\tfor _, p := range c.Patterns {\n\t\tif p.Name == name {\n\t\t\treturn p, true\n\t\t}\n\t}\n\treturn nil, false\n}\n\n\/\/ AddPattern adds a pattern to the configuration.\nfunc (c *Config) AddPattern(p *Pattern) {\n\tc.Patterns[p.ID] = p\n}\n<|endoftext|>"} {"text":"<commit_before>package ogdatv21\n\nimport (\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\t\"encoding\/json\"\n\t\"github.com\/the42\/ogdat\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst Version = \"OGD Austria Metadata 2.1\" \/\/ Version 2.1: 15.10.2012\nconst specfile = \"ogdat_spec-2.1.csv\"\nconst CustomTimeSpecifier1 = \"2006-01-02T15:04:05\" \/\/ RFC 3339 = ISO 8601 ohne Zeitzone\nconst CustomTimeSpecifier2 = \"2006-12-02\" \/\/ RFC 3339 = ISO 8601 ohne Zeitzone\n\n\/\/\/ BEGIN:check wheater this code may be factored out\nvar TimeFormat = []string{\n\ttime.RFC3339Nano,\n\ttime.RFC3339,\n\tCustomTimeSpecifier1,\n\tCustomTimeSpecifier2,\n}\n\ntype Kategorie struct {\n\tNumID int `json:\"-\"`\n\tID string\n\tPrettyName string `json:\"-\"`\n\tRDFProperty string `json:\"-\"`\n}\n\nfunc (kat *Kategorie) String() string {\n\treturn kat.PrettyName\n}\n\nvar (\n\tArbeit = Kategorie{NumID: 1, ID: \"arbeit\", PrettyName: \"Arbeit\", RDFProperty: \"\"}\n\tBevoelkerung = Kategorie{NumID: 2, ID: \"bevölkerung\", PrettyName: \"Bevölkerung\", RDFProperty: \"\"}\n\tBildungForschung = Kategorie{NumID: 3, ID: \"bildung-und-forschung\", PrettyName: \"Bildung und Forschung\", RDFProperty: \"\"}\n\tFinanzRW = Kategorie{NumID: 4, ID: \"finanzen-und-rechnungswesen\", PrettyName: \"Finanzen und Rechnungswesen\", RDFProperty: \"\"}\n\tGeographPlanung = Kategorie{NumID: 5, ID: \"geographie-und-planung\", PrettyName: \"Geographie und Planung\", RDFProperty: \"\"}\n\tGesellSoziales = Kategorie{NumID: 6, ID: \"gesellschaft-und-soziales\", PrettyName: \"Gesellschaft und Soziales\", RDFProperty: \"\"}\n\tGesundheit = Kategorie{NumID: 7, ID: \"gesundheit\", PrettyName: \"Gesundheit\", RDFProperty: \"\"}\n\tKunstKultur = Kategorie{NumID: 8, ID: \"kunst-und-kultur\", PrettyName: \"Kunst und Kultur\", RDFProperty: \"\"}\n\tLandFW = Kategorie{NumID: 9, ID: \"land-und-forstwirtschaft\", PrettyName: \"Land und Forstwirtschaft\", RDFProperty: \"\"}\n\tSportFZ = Kategorie{NumID: 10, ID: \"sport-und-freizeit\", PrettyName: \"Sport und Freizeit\", RDFProperty: \"\"}\n\tUmwelt = Kategorie{NumID: 11, ID: \"umwelt\", PrettyName: \"Umwelt\", RDFProperty: \"\"}\n\tVerkehrTechnik = Kategorie{NumID: 12, ID: \"verkehr-und-technik\", PrettyName: \"Verkehr und Technik\", RDFProperty: \"\"}\n\tVerwaltPol = Kategorie{NumID: 13, ID: \"verwaltung-und-politik\", PrettyName: \"Verwaltung und Politik\", RDFProperty: \"\"}\n\tWirtTourism = Kategorie{NumID: 14, ID: \"wirtschaft-und-tourismus\", PrettyName: \"Wirtschaft und Tourismus\", RDFProperty: \"\"}\n)\n\nvar categories = []Kategorie{\n\tArbeit,\n\tBevoelkerung,\n\tBildungForschung,\n\tFinanzRW,\n\tGeographPlanung,\n\tGesellSoziales,\n\tGesundheit,\n\tKunstKultur,\n\tLandFW,\n\tSportFZ,\n\tUmwelt,\n\tVerkehrTechnik,\n\tVerwaltPol,\n\tWirtTourism,\n}\n\nvar categorymap = make(map[string]Kategorie)\n\ntype Tags string\ntype ResourceSpecifier string\n\ntype Cycle struct {\n\tNumID int\n\tDomainCode string\n\tMD_MaintenanceFrequencyCode string\n\tName_DE string\n}\n\nvar (\n\tCycCont = Cycle{1, \"001\", \"continual\", \"kontinuierlich\"}\n\tCycDaily = Cycle{2, \"002\", \"daily\", \"täglich\"}\n\tCycWeekly = Cycle{3, \"003\", \"weekly\", \"wöchentlich\"}\n\tCycFortNly = Cycle{4, \"004\", \"fortnightly\", \"14-tägig\"}\n\tCycMonthly = Cycle{5, \"005\", \"monthly\", \"monatlich\"}\n\tCycQuart = Cycle{6, \"006\", \"quarterly\", \"quartalsweise\"}\n\tCycBiAnn = Cycle{7, \"007\", \"biannually\", \"halbjährlich\"}\n\tCycAnnually = Cycle{8, \"008\", \"annually\", \"jährlich\"}\n\tCycNeeded = Cycle{9, \"009\", \"asNeeded\", \"nach Bedarf\"}\n\tCycIrreg = Cycle{10, \"010\", \"irregular\", \"unregelmäßig\"}\n\tCycNP = Cycle{11, \"011\", \"notPlanned\", \"nicht geplant\"}\n\tCycUnknown = Cycle{12, \"012\", \"unknown\", \"unbekannt\"}\n)\n\nvar cycles = []Cycle{\n\tCycCont,\n\tCycDaily,\n\tCycWeekly,\n\tCycFortNly,\n\tCycMonthly,\n\tCycQuart,\n\tCycBiAnn,\n\tCycAnnually,\n\tCycNeeded,\n\tCycIrreg,\n\tCycNP,\n\tCycUnknown,\n}\n\ntype Url struct {\n\t*url.URL\n\tRaw string\n}\n\ntype Identifier struct {\n\t*uuid.UUID\n\tRaw string\n}\n\nfunc (id *Identifier) String() string {\n\treturn id.Raw\n}\n\ntype Time struct {\n\ttime.Time\n\tRaw string\n\tFormat string\n}\n\nfunc (time *Time) String() string {\n\treturn time.Raw\n}\n\nfunc (cyc *Cycle) String() string {\n\treturn cyc.Name_DE\n}\n\nfunc cmpstrtocycle(raw string, cyc Cycle) bool {\n\tif raw == cyc.Name_DE || raw == cyc.DomainCode || raw == cyc.MD_MaintenanceFrequencyCode {\n\t\treturn true\n\t}\n\tif len(raw) > 0 {\n\t\tif i, err := strconv.Atoi(raw); err == nil && i == cyc.NumID {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (cyc *Cycle) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\n\tvar found bool\n\tvar idx int\n\tvar matchcyc Cycle\n\n\tfor idx, matchcyc = range cycles {\n\t\tif found := cmpstrtocycle(raw, matchcyc); found == true {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif found {\n\t\t*cyc = cycles[idx]\n\t} else {\n\t\tcyc.NumID = -1\n\t\tcyc.Name_DE = \"**** NON cycle spec **** - \" + raw\n\t\tcyc.MD_MaintenanceFrequencyCode = cyc.Name_DE\n\t}\n\treturn nil\n}\n\nfunc (ogdtime *Time) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\togdtime.Raw = raw\n\n\tfor idx, val := range TimeFormat {\n\t\tt, err := time.Parse(val, raw)\n\t\tif err == nil {\n\t\t\togdtime.Format = TimeFormat[idx]\n\t\t\togdtime.Time = t\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (u *Url) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\tu.Raw = raw\n\turl, _ := url.Parse(raw) \/\/ an actuall error is not important. If url can not be parsed, result will be nil, which is fine here\n\tu.URL = url\n\treturn nil\n}\n\nfunc (id *Identifier) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\n\tid.Raw = string(raw)\n\tif uuid := uuid.Parse(raw); uuid != nil {\n\t\tid.UUID = &uuid\n\t}\n\treturn nil\n}\n\nfunc (kat *Kategorie) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\n\tcorecat, found := categorymap[raw]\n\tif !found {\n\t\tkat.NumID = -1\n\t\tkat.ID = raw\n\t\tkat.PrettyName = \"**** NON core category **** - \" + kat.ID\n\t} else {\n\t\t*kat = corecat\n\t}\n\treturn nil\n}\n\n\/\/\/ END:check wheater this code may be factored out\n\ntype Extras struct {\n\t\/\/ Core\n\tMetadata_Identifier *Identifier `json:\"metadata_identifier\" ogdat:\"ID:1\"` \/\/ CKAN uses since API Version 2 a UUID V4, cf. https:\/\/github.com\/okfn\/ckan\/blob\/master\/ckan\/model\/types.py\n\tMetadata_Modified *Time `json:\"metadata_modified\" ogdat:\"ID:5\"`\n\tCategorization []Kategorie `json:\"categorization\" ogdat:\"ID:10\"`\n\tBegin_DateTime *Time `json:\"begin_datetime\" ogdat:\"ID:24\"`\n\n\t\/\/ Optional\n\tSchema_Name *string `json:\"schema_name\" ogdat:\"ID:2\"`\n\tSchema_Language *string `json:\"schema_language\" ogdat:\"ID:3\"` \/\/ always \"ger\"\n\tSchema_Characterset *string `json:\"schema_characterset\" ogdat:\"ID:4\"` \/\/ always \"utf8\", cf. https:\/\/www.ghrsst.org\/files\/download.php?m=documents&f=ISO%2019115%20.pdf\n\tMetadata_Linkage []Url `json:\"metadata_linkage\" ogdat:\"ID:6\"`\n\tAttribute_Description *string `json:\"attribute_description\" ogdat:\"ID:12\"`\n\tMaintainer_Link *Url `json:\"maintainer_link\" ogdat:\"ID:13\"`\n\tPublisher *string `json:\"publisher\" ogdat:\"ID:20\"`\n\tGeographich_Toponym *string `json:\"geographic_toponym\" ogdat:\"ID:22\"`\n\n\t\/* ON\/EN\/ISO 19115:2003: westBL (344) & eastBL (345) & southBL (346) & northBL (347)\n\t * Specifiaction says a WKT of POLYGON should be used, which would make a\n\t * POLYGON ((-180.00 -90.00, 180.00 90.00)) but Example states\n\t * POLYGON (-180.00 -90.00, 180.00 90.00)\n\t * The situation is currently erroneous but unambigous, so we support both formats\n\t *\/\n\tGeographic_BBox *string `json:\"geographic_bbox\" ogdat:\"ID:23\"`\n\tEnd_DateTime *Time `json:\"end_datetime\" ogdat:\"ID:25\"`\n\tUpdate_Frequency *Cycle `json:\"update_frequency\" ogdat:\"ID:26\"`\n\tLineage_Quality *string `json:\"lineage_quality\" ogdat:\"ID:27\"`\n\tEnTitleDesc *string `json:\"en_title_and_desc\" ogdat:\"ID:28\"`\n\tLicense_Citation *string `json:\"license_citation\" ogdat:\"ID:30\"`\n}\n\ntype Resource struct {\n\t\/\/ Core\n\tURL *Url `json:\"url\" ogdat:\"ID:14\"`\n\tFormat *ResourceSpecifier `json:\"format\" ogdat:\"ID:15\"`\n\n\t\/\/ Optional\n\tName *string `json:\"name\" ogdat:\"ID:16\"`\n\tCreated *Time `json:\"created\" ogdat:\"ID:17\"`\n\tLastModified *Time `json:\"last_modified\" ogdat:\"ID:18\"`\n\n\t\/*\n\t * dcat:bytes a rdf:Property, owl:DatatypeProperty;\n\t * rdfs:isDefinedBy <http:\/\/www.w3.org\/ns\/dcat>;\n\t * rdfs:label \"size in bytes\";\n\t * rdfs:comment \"describe size of resource in bytes\";\n\t * rdfs:domain dcat:Distribution;\n\t * rdfs:range xsd:integer .\n\t *\/\n\tSize *string `json:\"size\" ogdat:\"ID:29\"`\n\tLanguage *string `json:\"language\" ogdat:\"ID:31\"`\n\t\/* Here we have a problem in spec 2.1. which says \"nach ISO\\IEC 10646-1\", which means utf-8, utf-16 and utf-32.\n\t * We would certainly support more encodings, as eg.\n\t * ISO 19115 \/ B.5.10 MD_CharacterSetCode<> or\n\t * http:\/\/www.iana.org\/assignments\/character-sets\/character-sets.xml\n\t *\/\n\tEncoding *string `json:\"characterset\" ogdat:\"ID:32\"`\n}\n\ntype MetaData struct {\n\t\/\/ Core\n\tTitle *string `json:\"title\" ogdat:\"ID:8\"`\n\tDescription *string `json:\"notes\" ogdat:\"ID:9\"`\n\tSchlagworte []Tags `json:\"tags\" ogdat:\"ID:11\"`\n\tMaintainer *string `json:\"maintainer\" ogdat:\"ID:19\"`\n\tLicense *string `json:\"license\" ogdat:\"ID:21\"` \/\/ Sollte URI des Lizenzdokuments sein\n\n\t\/\/ nested structs\n\tExtras `json:\"extras\"`\n\tResource []Resource `json:\"resources\"`\n}\n\nfunc (md *MetaData) GetBeschreibungForFieldName(name string) *ogdat.Beschreibung {\n\tif f, ok := reflect.TypeOf(md).Elem().FieldByName(name); ok {\n\t\tif id := ogdat.GetIDFromMetaDataStructField(f); id > -1 {\n\t\t\tbeschreibung, _ := ogdat.GetOGDSetForVersion(Version).GetBeschreibungForID(id)\n\t\t\treturn beschreibung\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tfor idx, val := range categories {\n\t\tcategorymap[val.ID] = categories[idx]\n\t}\n\togdat.RegisterFromCSVFile(Version, specfile)\n}\n<commit_msg>add a Raw field to cycle for unknown \"Intervall\" specifiers<commit_after>package ogdatv21\n\nimport (\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\t\"encoding\/json\"\n\t\"github.com\/the42\/ogdat\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst Version = \"OGD Austria Metadata 2.1\" \/\/ Version 2.1: 15.10.2012\nconst specfile = \"ogdat_spec-2.1.csv\"\nconst CustomTimeSpecifier1 = \"2006-01-02T15:04:05\" \/\/ RFC 3339 = ISO 8601 ohne Zeitzone\nconst CustomTimeSpecifier2 = \"2006-12-02\" \/\/ RFC 3339 = ISO 8601 ohne Zeitzone\n\n\/\/\/ BEGIN:check wheater this code may be factored out\nvar TimeFormat = []string{\n\ttime.RFC3339Nano,\n\ttime.RFC3339,\n\tCustomTimeSpecifier1,\n\tCustomTimeSpecifier2,\n}\n\ntype Kategorie struct {\n\tNumID int `json:\"-\"`\n\tID string\n\tPrettyName string `json:\"-\"`\n\tRDFProperty string `json:\"-\"`\n}\n\nfunc (kat *Kategorie) String() string {\n\treturn kat.PrettyName\n}\n\nvar (\n\tArbeit = Kategorie{NumID: 1, ID: \"arbeit\", PrettyName: \"Arbeit\", RDFProperty: \"\"}\n\tBevoelkerung = Kategorie{NumID: 2, ID: \"bevölkerung\", PrettyName: \"Bevölkerung\", RDFProperty: \"\"}\n\tBildungForschung = Kategorie{NumID: 3, ID: \"bildung-und-forschung\", PrettyName: \"Bildung und Forschung\", RDFProperty: \"\"}\n\tFinanzRW = Kategorie{NumID: 4, ID: \"finanzen-und-rechnungswesen\", PrettyName: \"Finanzen und Rechnungswesen\", RDFProperty: \"\"}\n\tGeographPlanung = Kategorie{NumID: 5, ID: \"geographie-und-planung\", PrettyName: \"Geographie und Planung\", RDFProperty: \"\"}\n\tGesellSoziales = Kategorie{NumID: 6, ID: \"gesellschaft-und-soziales\", PrettyName: \"Gesellschaft und Soziales\", RDFProperty: \"\"}\n\tGesundheit = Kategorie{NumID: 7, ID: \"gesundheit\", PrettyName: \"Gesundheit\", RDFProperty: \"\"}\n\tKunstKultur = Kategorie{NumID: 8, ID: \"kunst-und-kultur\", PrettyName: \"Kunst und Kultur\", RDFProperty: \"\"}\n\tLandFW = Kategorie{NumID: 9, ID: \"land-und-forstwirtschaft\", PrettyName: \"Land und Forstwirtschaft\", RDFProperty: \"\"}\n\tSportFZ = Kategorie{NumID: 10, ID: \"sport-und-freizeit\", PrettyName: \"Sport und Freizeit\", RDFProperty: \"\"}\n\tUmwelt = Kategorie{NumID: 11, ID: \"umwelt\", PrettyName: \"Umwelt\", RDFProperty: \"\"}\n\tVerkehrTechnik = Kategorie{NumID: 12, ID: \"verkehr-und-technik\", PrettyName: \"Verkehr und Technik\", RDFProperty: \"\"}\n\tVerwaltPol = Kategorie{NumID: 13, ID: \"verwaltung-und-politik\", PrettyName: \"Verwaltung und Politik\", RDFProperty: \"\"}\n\tWirtTourism = Kategorie{NumID: 14, ID: \"wirtschaft-und-tourismus\", PrettyName: \"Wirtschaft und Tourismus\", RDFProperty: \"\"}\n)\n\nvar categories = []Kategorie{\n\tArbeit,\n\tBevoelkerung,\n\tBildungForschung,\n\tFinanzRW,\n\tGeographPlanung,\n\tGesellSoziales,\n\tGesundheit,\n\tKunstKultur,\n\tLandFW,\n\tSportFZ,\n\tUmwelt,\n\tVerkehrTechnik,\n\tVerwaltPol,\n\tWirtTourism,\n}\n\nvar categorymap = make(map[string]Kategorie)\n\ntype Tags string\ntype ResourceSpecifier string\n\ntype Cycle struct {\n\tNumID int\n\tDomainCode string\n\tMD_MaintenanceFrequencyCode string\n\tName_DE string\n\tRaw string\n}\n\nvar (\n\tCycCont = Cycle{NumID: 1, DomainCode: \"001\", MD_MaintenanceFrequencyCode: \"continual\", Name_DE: \"kontinuierlich\"}\n\tCycDaily = Cycle{NumID: 2, DomainCode: \"002\", MD_MaintenanceFrequencyCode: \"daily\", Name_DE: \"täglich\"}\n\tCycWeekly = Cycle{NumID: 3, DomainCode: \"003\", MD_MaintenanceFrequencyCode: \"weekly\", Name_DE: \"wöchentlich\"}\n\tCycFortNly = Cycle{NumID: 4, DomainCode: \"004\", MD_MaintenanceFrequencyCode: \"fortnightly\", Name_DE: \"14-tägig\"}\n\tCycMonthly = Cycle{NumID: 5, DomainCode: \"005\", MD_MaintenanceFrequencyCode: \"monthly\", Name_DE: \"monatlich\"}\n\tCycQuart = Cycle{NumID: 6, DomainCode: \"006\", MD_MaintenanceFrequencyCode: \"quarterly\", Name_DE: \"quartalsweise\"}\n\tCycBiAnn = Cycle{NumID: 7, DomainCode: \"007\", MD_MaintenanceFrequencyCode: \"biannually\", Name_DE: \"halbjährlich\"}\n\tCycAnnually = Cycle{NumID: 8, DomainCode: \"008\", MD_MaintenanceFrequencyCode: \"annually\", Name_DE: \"jährlich\"}\n\tCycNeeded = Cycle{NumID: 9, DomainCode: \"009\", MD_MaintenanceFrequencyCode: \"asNeeded\", Name_DE: \"nach Bedarf\"}\n\tCycIrreg = Cycle{NumID: 10, DomainCode: \"010\", MD_MaintenanceFrequencyCode: \"irregular\", Name_DE: \"unregelmäßig\"}\n\tCycNP = Cycle{NumID: 11, DomainCode: \"011\", MD_MaintenanceFrequencyCode: \"notPlanned\", Name_DE: \"nicht geplant\"}\n\tCycUnknown = Cycle{NumID: 12, DomainCode: \"012\", MD_MaintenanceFrequencyCode: \"unknown\", Name_DE: \"unbekannt\"}\n)\n\nvar cycles = []Cycle{\n\tCycCont,\n\tCycDaily,\n\tCycWeekly,\n\tCycFortNly,\n\tCycMonthly,\n\tCycQuart,\n\tCycBiAnn,\n\tCycAnnually,\n\tCycNeeded,\n\tCycIrreg,\n\tCycNP,\n\tCycUnknown,\n}\n\ntype Url struct {\n\t*url.URL\n\tRaw string\n}\n\ntype Identifier struct {\n\t*uuid.UUID\n\tRaw string\n}\n\nfunc (id *Identifier) String() string {\n\treturn id.Raw\n}\n\ntype Time struct {\n\ttime.Time\n\tRaw string\n\tFormat string\n}\n\nfunc (time *Time) String() string {\n\treturn time.Raw\n}\n\nfunc (cyc *Cycle) String() string {\n\treturn cyc.Name_DE\n}\n\nfunc cmpstrtocycle(raw string, cyc Cycle) bool {\n\tif raw == cyc.Name_DE || raw == cyc.DomainCode || raw == cyc.MD_MaintenanceFrequencyCode {\n\t\treturn true\n\t}\n\tif len(raw) > 0 {\n\t\tif i, err := strconv.Atoi(raw); err == nil && i == cyc.NumID {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (cyc *Cycle) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\n\tvar found bool\n\tvar idx int\n\tvar matchcyc Cycle\n\n\tfor idx, matchcyc = range cycles {\n\t\tif found := cmpstrtocycle(raw, matchcyc); found == true {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif found {\n\t\t*cyc = cycles[idx]\n\t} else {\n\t\tcyc.NumID = -1\n\t\tcyc.Raw = raw\n\t\tcyc.Name_DE = \"**** NON cycle spec **** - \" + raw\n\t\tcyc.MD_MaintenanceFrequencyCode = cyc.Name_DE\n\t}\n\treturn nil\n}\n\nfunc (ogdtime *Time) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\togdtime.Raw = raw\n\n\tfor idx, val := range TimeFormat {\n\t\tt, err := time.Parse(val, raw)\n\t\tif err == nil {\n\t\t\togdtime.Format = TimeFormat[idx]\n\t\t\togdtime.Time = t\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (u *Url) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\tu.Raw = raw\n\turl, _ := url.Parse(raw) \/\/ an actuall error is not important. If url can not be parsed, result will be nil, which is fine here\n\tu.URL = url\n\treturn nil\n}\n\nfunc (id *Identifier) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\n\tid.Raw = string(raw)\n\tif uuid := uuid.Parse(raw); uuid != nil {\n\t\tid.UUID = &uuid\n\t}\n\treturn nil\n}\n\nfunc (kat *Kategorie) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\n\tcorecat, found := categorymap[raw]\n\tif !found {\n\t\tkat.NumID = -1\n\t\tkat.ID = raw\n\t\tkat.PrettyName = \"**** NON core category **** - \" + kat.ID\n\t} else {\n\t\t*kat = corecat\n\t}\n\treturn nil\n}\n\n\/\/\/ END:check wheater this code may be factored out\n\ntype Extras struct {\n\t\/\/ Core\n\tMetadata_Identifier *Identifier `json:\"metadata_identifier\" ogdat:\"ID:1\"` \/\/ CKAN uses since API Version 2 a UUID V4, cf. https:\/\/github.com\/okfn\/ckan\/blob\/master\/ckan\/model\/types.py\n\tMetadata_Modified *Time `json:\"metadata_modified\" ogdat:\"ID:5\"`\n\tCategorization []Kategorie `json:\"categorization\" ogdat:\"ID:10\"`\n\tBegin_DateTime *Time `json:\"begin_datetime\" ogdat:\"ID:24\"`\n\n\t\/\/ Optional\n\tSchema_Name *string `json:\"schema_name\" ogdat:\"ID:2\"`\n\tSchema_Language *string `json:\"schema_language\" ogdat:\"ID:3\"` \/\/ always \"ger\"\n\tSchema_Characterset *string `json:\"schema_characterset\" ogdat:\"ID:4\"` \/\/ always \"utf8\", cf. https:\/\/www.ghrsst.org\/files\/download.php?m=documents&f=ISO%2019115%20.pdf\n\tMetadata_Linkage []Url `json:\"metadata_linkage\" ogdat:\"ID:6\"`\n\tAttribute_Description *string `json:\"attribute_description\" ogdat:\"ID:12\"`\n\tMaintainer_Link *Url `json:\"maintainer_link\" ogdat:\"ID:13\"`\n\tPublisher *string `json:\"publisher\" ogdat:\"ID:20\"`\n\tGeographich_Toponym *string `json:\"geographic_toponym\" ogdat:\"ID:22\"`\n\n\t\/* ON\/EN\/ISO 19115:2003: westBL (344) & eastBL (345) & southBL (346) & northBL (347)\n\t * Specifiaction says a WKT of POLYGON should be used, which would make a\n\t * POLYGON ((-180.00 -90.00, 180.00 90.00)) but Example states\n\t * POLYGON (-180.00 -90.00, 180.00 90.00)\n\t * The situation is currently erroneous but unambigous, so we support both formats\n\t *\/\n\tGeographic_BBox *string `json:\"geographic_bbox\" ogdat:\"ID:23\"`\n\tEnd_DateTime *Time `json:\"end_datetime\" ogdat:\"ID:25\"`\n\tUpdate_Frequency *Cycle `json:\"update_frequency\" ogdat:\"ID:26\"`\n\tLineage_Quality *string `json:\"lineage_quality\" ogdat:\"ID:27\"`\n\tEnTitleDesc *string `json:\"en_title_and_desc\" ogdat:\"ID:28\"`\n\tLicense_Citation *string `json:\"license_citation\" ogdat:\"ID:30\"`\n}\n\ntype Resource struct {\n\t\/\/ Core\n\tURL *Url `json:\"url\" ogdat:\"ID:14\"`\n\tFormat *ResourceSpecifier `json:\"format\" ogdat:\"ID:15\"`\n\n\t\/\/ Optional\n\tName *string `json:\"name\" ogdat:\"ID:16\"`\n\tCreated *Time `json:\"created\" ogdat:\"ID:17\"`\n\tLastModified *Time `json:\"last_modified\" ogdat:\"ID:18\"`\n\n\t\/*\n\t * dcat:bytes a rdf:Property, owl:DatatypeProperty;\n\t * rdfs:isDefinedBy <http:\/\/www.w3.org\/ns\/dcat>;\n\t * rdfs:label \"size in bytes\";\n\t * rdfs:comment \"describe size of resource in bytes\";\n\t * rdfs:domain dcat:Distribution;\n\t * rdfs:range xsd:integer .\n\t *\/\n\tSize *string `json:\"size\" ogdat:\"ID:29\"`\n\tLanguage *string `json:\"language\" ogdat:\"ID:31\"`\n\t\/* Here we have a problem in spec 2.1. which says \"nach ISO\\IEC 10646-1\", which means utf-8, utf-16 and utf-32.\n\t * We would certainly support more encodings, as eg.\n\t * ISO 19115 \/ B.5.10 MD_CharacterSetCode<> or\n\t * http:\/\/www.iana.org\/assignments\/character-sets\/character-sets.xml\n\t *\/\n\tEncoding *string `json:\"characterset\" ogdat:\"ID:32\"`\n}\n\ntype MetaData struct {\n\t\/\/ Core\n\tTitle *string `json:\"title\" ogdat:\"ID:8\"`\n\tDescription *string `json:\"notes\" ogdat:\"ID:9\"`\n\tSchlagworte []Tags `json:\"tags\" ogdat:\"ID:11\"`\n\tMaintainer *string `json:\"maintainer\" ogdat:\"ID:19\"`\n\tLicense *string `json:\"license\" ogdat:\"ID:21\"` \/\/ Sollte URI des Lizenzdokuments sein\n\n\t\/\/ nested structs\n\tExtras `json:\"extras\"`\n\tResource []Resource `json:\"resources\"`\n}\n\nfunc (md *MetaData) GetBeschreibungForFieldName(name string) *ogdat.Beschreibung {\n\tif f, ok := reflect.TypeOf(md).Elem().FieldByName(name); ok {\n\t\tif id := ogdat.GetIDFromMetaDataStructField(f); id > -1 {\n\t\t\tbeschreibung, _ := ogdat.GetOGDSetForVersion(Version).GetBeschreibungForID(id)\n\t\t\treturn beschreibung\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tfor idx, val := range categories {\n\t\tcategorymap[val.ID] = categories[idx]\n\t}\n\togdat.RegisterFromCSVFile(Version, specfile)\n}\n<|endoftext|>"} {"text":"<commit_before>package amqp\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/influxdb\/influxdb\/client\/v2\"\n\t\"github.com\/influxdb\/telegraf\/outputs\"\n\t\"github.com\/streadway\/amqp\"\n)\n\ntype AMQP struct {\n\t\/\/ AMQP brokers to send metrics to\n\tURL string\n\t\/\/ AMQP exchange\n\tExchange string\n\t\/\/ Routing Key Tag\n\tRoutingTag string `toml:\"routing_tag\"`\n\n\tchannel *amqp.Channel\n\tsync.Mutex\n}\n\nvar sampleConfig = `\n # AMQP url\n url = \"amqp:\/\/localhost:5672\/influxdb\"\n # AMQP exchange\n exchange = \"telegraf\"\n # Telegraf tag to use as a routing key\n # ie, if this tag exists, it's value will be used as the routing key\n routing_tag = \"host\"\n`\n\nfunc (q *AMQP) Connect() error {\n\tq.Lock()\n\tdefer q.Unlock()\n\tconnection, err := amqp.Dial(q.URL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tchannel, err := connection.Channel()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to open a channel: %s\", err)\n\t}\n\n\terr = channel.ExchangeDeclare(\n\t\tq.Exchange, \/\/ name\n\t\t\"topic\", \/\/ type\n\t\ttrue, \/\/ durable\n\t\tfalse, \/\/ delete when unused\n\t\tfalse, \/\/ internal\n\t\tfalse, \/\/ no-wait\n\t\tnil, \/\/ arguments\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to declare an exchange: %s\", err)\n\t}\n\tq.channel = channel\n\tgo func() {\n\t\tlog.Printf(\"Closing: %s\", <-connection.NotifyClose(make(chan *amqp.Error)))\n\t\tlog.Printf(\"Trying to reconnect\")\n\t\tfor err := q.Connect(); err != nil; err = q.Connect() {\n\t\t\tlog.Println(err)\n\t\t\ttime.Sleep(10 * time.Second)\n\t\t}\n\n\t}()\n\treturn nil\n}\n\nfunc (q *AMQP) Close() error {\n\treturn q.channel.Close()\n}\n\nfunc (q *AMQP) SampleConfig() string {\n\treturn sampleConfig\n}\n\nfunc (q *AMQP) Description() string {\n\treturn \"Configuration for the AMQP server to send metrics to\"\n}\n\nfunc (q *AMQP) Write(points []*client.Point) error {\n\tq.Lock()\n\tdefer q.Unlock()\n\tif len(points) == 0 {\n\t\treturn nil\n\t}\n\tvar outbuf = make(map[string][][]byte)\n\n\tfor _, p := range points {\n\t\t\/\/ Combine tags from Point and BatchPoints and grab the resulting\n\t\t\/\/ line-protocol output string to write to AMQP\n\t\tvar value, key string\n\t\tvalue = p.String()\n\n\t\tif q.RoutingTag != \"\" {\n\t\t\tif h, ok := p.Tags()[q.RoutingTag]; ok {\n\t\t\t\tkey = h\n\t\t\t}\n\t\t}\n\t\toutbuf[key] = append(outbuf[key], []byte(value))\n\n\t}\n\tfor key, buf := range outbuf {\n\t\terr := q.channel.Publish(\n\t\t\tq.Exchange, \/\/ exchange\n\t\t\tkey, \/\/ routing key\n\t\t\tfalse, \/\/ mandatory\n\t\t\tfalse, \/\/ immediate\n\t\t\tamqp.Publishing{\n\t\t\t\tContentType: \"text\/plain\",\n\t\t\t\tBody: bytes.Join(buf, []byte(\"\\n\")),\n\t\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"FAILED to send amqp message: %s\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc init() {\n\toutputs.Add(\"amqp\", func() outputs.Output {\n\t\treturn &AMQP{}\n\t})\n}\n<commit_msg>[amqp output] Add ability to specify influxdb database<commit_after>package amqp\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/influxdb\/influxdb\/client\/v2\"\n\t\"github.com\/influxdb\/telegraf\/outputs\"\n\t\"github.com\/streadway\/amqp\"\n)\n\ntype AMQP struct {\n\t\/\/ AMQP brokers to send metrics to\n\tURL string\n\t\/\/ AMQP exchange\n\tExchange string\n\t\/\/ Routing Key Tag\n\tRoutingTag string `toml:\"routing_tag\"`\n\t\/\/ InfluxDB database\n\tDatabase string\n\t\/\/ InfluxDB retention policy\n\tRetentionPolicy string\n\t\/\/ InfluxDB precision\n\tPrecision string\n\n\tchannel *amqp.Channel\n\tsync.Mutex\n\theaders amqp.Table\n}\n\nconst (\n\tDefaultRetentionPolicy = \"default\"\n\tDefaultDatabase = \"telegraf\"\n\tDefaultPrecision = \"s\"\n)\n\nvar sampleConfig = `\n # AMQP url\n url = \"amqp:\/\/localhost:5672\/influxdb\"\n # AMQP exchange\n exchange = \"telegraf\"\n # Telegraf tag to use as a routing key\n # ie, if this tag exists, it's value will be used as the routing key\n routing_tag = \"host\"\n\n # InfluxDB retention policy\n #retention_policy = \"default\"\n # InfluxDB database\n #database = \"telegraf\"\n # InfluxDB precision\n #precision = \"s\"\n`\n\nfunc (q *AMQP) Connect() error {\n\tq.Lock()\n\tdefer q.Unlock()\n\n\tq.headers = amqp.Table{\n\t\t\"precision\": q.Precision,\n\t\t\"database\": q.Database,\n\t\t\"retention_policy\": q.RetentionPolicy,\n\t}\n\n\tconnection, err := amqp.Dial(q.URL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tchannel, err := connection.Channel()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to open a channel: %s\", err)\n\t}\n\n\terr = channel.ExchangeDeclare(\n\t\tq.Exchange, \/\/ name\n\t\t\"topic\", \/\/ type\n\t\ttrue, \/\/ durable\n\t\tfalse, \/\/ delete when unused\n\t\tfalse, \/\/ internal\n\t\tfalse, \/\/ no-wait\n\t\tnil, \/\/ arguments\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to declare an exchange: %s\", err)\n\t}\n\tq.channel = channel\n\tgo func() {\n\t\tlog.Printf(\"Closing: %s\", <-connection.NotifyClose(make(chan *amqp.Error)))\n\t\tlog.Printf(\"Trying to reconnect\")\n\t\tfor err := q.Connect(); err != nil; err = q.Connect() {\n\t\t\tlog.Println(err)\n\t\t\ttime.Sleep(10 * time.Second)\n\t\t}\n\n\t}()\n\treturn nil\n}\n\nfunc (q *AMQP) Close() error {\n\treturn q.channel.Close()\n}\n\nfunc (q *AMQP) SampleConfig() string {\n\treturn sampleConfig\n}\n\nfunc (q *AMQP) Description() string {\n\treturn \"Configuration for the AMQP server to send metrics to\"\n}\n\nfunc (q *AMQP) Write(points []*client.Point) error {\n\tq.Lock()\n\tdefer q.Unlock()\n\tif len(points) == 0 {\n\t\treturn nil\n\t}\n\tvar outbuf = make(map[string][][]byte)\n\n\tfor _, p := range points {\n\t\t\/\/ Combine tags from Point and BatchPoints and grab the resulting\n\t\t\/\/ line-protocol output string to write to AMQP\n\t\tvar value, key string\n\t\tvalue = p.String()\n\n\t\tif q.RoutingTag != \"\" {\n\t\t\tif h, ok := p.Tags()[q.RoutingTag]; ok {\n\t\t\t\tkey = h\n\t\t\t}\n\t\t}\n\t\toutbuf[key] = append(outbuf[key], []byte(value))\n\n\t}\n\tfor key, buf := range outbuf {\n\t\terr := q.channel.Publish(\n\t\t\tq.Exchange, \/\/ exchange\n\t\t\tkey, \/\/ routing key\n\t\t\tfalse, \/\/ mandatory\n\t\t\tfalse, \/\/ immediate\n\t\t\tamqp.Publishing{\n\t\t\t\tHeaders: q.headers,\n\t\t\t\tContentType: \"text\/plain\",\n\t\t\t\tBody: bytes.Join(buf, []byte(\"\\n\")),\n\t\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"FAILED to send amqp message: %s\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc init() {\n\toutputs.Add(\"amqp\", func() outputs.Output {\n\t\treturn &AMQP{\n\t\t\tDatabase: DefaultDatabase,\n\t\t\tPrecision: DefaultPrecision,\n\t\t\tRetentionPolicy: DefaultRetentionPolicy,\n\t\t}\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Fission Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage router\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"errors\"\n\t\"net\/url\"\n)\n\ntype functionHandler struct {\n\tfmap *functionServiceMap\n\tfunction\n}\n\nfunc getService(f *function) (*url.URL, error) {\n\treturn nil, errors.New(\"not implemented\")\n}\n\nfunc (fh *functionHandler) handler(responseWriter http.ResponseWriter, request *http.Request) {\n\tserviceUrl, err := fh.fmap.lookup(&fh.function)\n\tif (err != nil) {\n\t\t\/\/ Cache miss: request the Pool Manager to make a new service.\n\t\tserviceUrl, poolErr := getService(&fh.function)\n\t\tif (poolErr != nil) {\n\t\t\t\/\/ now we're really screwed\n\t\t\tlog.Printf(\"Failed to get service for function (%v,%v): %v\",\n\t\t\t\tfh.function.name, fh.function.uid, poolErr);\n\t\t\tresponseWriter.WriteHeader(500) \/\/ TODO: make this smarter based on the actual error\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ add it to the map\n\t\tfh.fmap.assign(&fh.function, serviceUrl)\n\t}\n\n\t\/\/ Proxy off our request to the serviceUrl, and send the response back.\n\t\/\/ TODO: As an optimization we may want to cache proxies too -- this would get us\n\t\/\/ connection reuse and possibly better performance\n\tdirector := func(req *http.Request) {\t\n\t\t\/\/ send this request to serviceurl\n\t\treq.URL.Scheme = serviceUrl.Scheme\n\t\treq.URL.Host = serviceUrl.Host\n\t\treq.URL.Path = serviceUrl.Path\n\t\t\/\/ leave the query string intact (req.URL.RawQuery)\n\n\t\tif _, ok := req.Header[\"User-Agent\"]; !ok {\n\t\t\t\/\/ explicitly disable User-Agent so it's not set to default value\n\t\t\treq.Header.Set(\"User-Agent\", \"\")\n\t\t}\n\t}\n\tproxy := &httputil.ReverseProxy{Director: director}\n\tproxy.ServeHTTP(responseWriter, request)\n}\n<commit_msg>Include PoolManager URL in function handler state and initializer<commit_after>\/*\nCopyright 2016 The Fission Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage router\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"errors\"\n\t\"net\/url\"\n)\n\ntype functionHandler struct {\n\tfmap *functionServiceMap\n\tpoolManagerUrl string\n\tfunction\n}\n\nfunc (*functionHandler) getServiceForFunction() (*url.URL, error) {\n\treturn nil, errors.New(\"not implemented\")\n}\n\nfunc (fh *functionHandler) handler(responseWriter http.ResponseWriter, request *http.Request) {\n\tserviceUrl, err := fh.fmap.lookup(&fh.function)\n\tif (err != nil) {\n\t\t\/\/ Cache miss: request the Pool Manager to make a new service.\n\t\tserviceUrl, poolErr := fh.getServiceForFunction()\n\t\tif (poolErr != nil) {\n\t\t\t\/\/ now we're really screwed\n\t\t\tlog.Printf(\"Failed to get service for function (%v,%v): %v\",\n\t\t\t\tfh.function.name, fh.function.uid, poolErr);\n\t\t\tresponseWriter.WriteHeader(500) \/\/ TODO: make this smarter based on the actual error\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ add it to the map\n\t\tfh.fmap.assign(&fh.function, serviceUrl)\n\t}\n\n\t\/\/ Proxy off our request to the serviceUrl, and send the response back.\n\t\/\/ TODO: As an optimization we may want to cache proxies too -- this would get us\n\t\/\/ connection reuse and possibly better performance\n\tdirector := func(req *http.Request) {\t\n\t\t\/\/ send this request to serviceurl\n\t\treq.URL.Scheme = serviceUrl.Scheme\n\t\treq.URL.Host = serviceUrl.Host\n\t\treq.URL.Path = serviceUrl.Path\n\t\t\/\/ leave the query string intact (req.URL.RawQuery)\n\n\t\tif _, ok := req.Header[\"User-Agent\"]; !ok {\n\t\t\t\/\/ explicitly disable User-Agent so it's not set to default value\n\t\t\treq.Header.Set(\"User-Agent\", \"\")\n\t\t}\n\t}\n\tproxy := &httputil.ReverseProxy{Director: director}\n\tproxy.ServeHTTP(responseWriter, request)\n\n\t\/\/ TODO: handle failures and possibly retry here.\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright Project Harbor Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage handler\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/go-openapi\/runtime\/middleware\"\n\t\"github.com\/go-openapi\/strfmt\"\n\n\t\"github.com\/goharbor\/harbor\/src\/common\/rbac\"\n\t\"github.com\/goharbor\/harbor\/src\/common\/utils\"\n\t\"github.com\/goharbor\/harbor\/src\/controller\/robot\"\n\t\"github.com\/goharbor\/harbor\/src\/lib\"\n\t\"github.com\/goharbor\/harbor\/src\/lib\/config\"\n\t\"github.com\/goharbor\/harbor\/src\/lib\/errors\"\n\t\"github.com\/goharbor\/harbor\/src\/lib\/log\"\n\tpkg \"github.com\/goharbor\/harbor\/src\/pkg\/robot\/model\"\n\t\"github.com\/goharbor\/harbor\/src\/server\/v2.0\/handler\/model\"\n\t\"github.com\/goharbor\/harbor\/src\/server\/v2.0\/models\"\n\toperation \"github.com\/goharbor\/harbor\/src\/server\/v2.0\/restapi\/operations\/robot\"\n)\n\nfunc newRobotAPI() *robotAPI {\n\treturn &robotAPI{\n\t\trobotCtl: robot.Ctl,\n\t}\n}\n\ntype robotAPI struct {\n\tBaseAPI\n\trobotCtl robot.Controller\n}\n\nfunc (rAPI *robotAPI) CreateRobot(ctx context.Context, params operation.CreateRobotParams) middleware.Responder {\n\tif err := validateName(params.Robot.Name); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tif err := rAPI.validate(params.Robot.Duration, params.Robot.Level, params.Robot.Permissions); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tif err := rAPI.requireAccess(ctx, params.Robot.Level, params.Robot.Permissions[0].Namespace, rbac.ActionCreate); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tr := &robot.Robot{\n\t\tRobot: pkg.Robot{\n\t\t\tName: params.Robot.Name,\n\t\t\tDescription: params.Robot.Description,\n\t\t\tDuration: params.Robot.Duration,\n\t\t\tVisible: true,\n\t\t},\n\t\tLevel: params.Robot.Level,\n\t}\n\n\tif err := lib.JSONCopy(&r.Permissions, params.Robot.Permissions); err != nil {\n\t\tlog.Warningf(\"failed to call JSONCopy on robot permission when CreateRobot, error: %v\", err)\n\t}\n\n\trid, pwd, err := rAPI.robotCtl.Create(ctx, r)\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tcreated, err := rAPI.robotCtl.Get(ctx, rid, nil)\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tlocation := fmt.Sprintf(\"%s\/%d\", strings.TrimSuffix(params.HTTPRequest.URL.Path, \"\/\"), created.ID)\n\treturn operation.NewCreateRobotCreated().WithLocation(location).WithPayload(&models.RobotCreated{\n\t\tID: created.ID,\n\t\tName: created.Name,\n\t\tSecret: pwd,\n\t\tCreationTime: strfmt.DateTime(created.CreationTime),\n\t\tExpiresAt: created.ExpiresAt,\n\t})\n}\n\nfunc (rAPI *robotAPI) DeleteRobot(ctx context.Context, params operation.DeleteRobotParams) middleware.Responder {\n\tif err := rAPI.RequireAuthenticated(ctx); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tr, err := rAPI.robotCtl.Get(ctx, params.RobotID, nil)\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tif err := rAPI.requireAccess(ctx, r.Level, r.ProjectID, rbac.ActionDelete); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tif err := rAPI.robotCtl.Delete(ctx, params.RobotID); err != nil {\n\t\t\/\/ for the version 1 robot account, has to ignore the no permission error.\n\t\tif !r.Editable && errors.IsNotFoundErr(err) {\n\t\t\treturn operation.NewDeleteRobotOK()\n\t\t}\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\treturn operation.NewDeleteRobotOK()\n}\n\nfunc (rAPI *robotAPI) ListRobot(ctx context.Context, params operation.ListRobotParams) middleware.Responder {\n\tif err := rAPI.RequireAuthenticated(ctx); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tquery, err := rAPI.BuildQuery(ctx, params.Q, params.Sort, params.Page, params.PageSize)\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tvar projectID int64\n\tvar level string\n\t\/\/ GET \/api\/v2.0\/robots or GET \/api\/v2.0\/robots?level=system to get all of system level robots.\n\t\/\/ GET \/api\/v2.0\/robots?level=project&project_id=1\n\tif _, ok := query.Keywords[\"Level\"]; ok {\n\t\tif !isValidLevel(query.Keywords[\"Level\"].(string)) {\n\t\t\treturn rAPI.SendError(ctx, errors.New(nil).WithMessage(\"bad request error level input\").WithCode(errors.BadRequestCode))\n\t\t}\n\t\tlevel = query.Keywords[\"Level\"].(string)\n\t\tif level == robot.LEVELPROJECT {\n\t\t\tif _, ok := query.Keywords[\"ProjectID\"]; !ok {\n\t\t\t\treturn rAPI.SendError(ctx, errors.BadRequestError(nil).WithMessage(\"must with project ID when to query project robots\"))\n\t\t\t}\n\t\t\tpid, err := strconv.ParseInt(query.Keywords[\"ProjectID\"].(string), 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn rAPI.SendError(ctx, errors.BadRequestError(nil).WithMessage(\"Project ID must be int type.\"))\n\t\t\t}\n\t\t\tprojectID = pid\n\t\t}\n\t} else {\n\t\tlevel = robot.LEVELSYSTEM\n\t\tquery.Keywords[\"ProjectID\"] = 0\n\t}\n\tquery.Keywords[\"Visible\"] = true\n\n\tif err := rAPI.requireAccess(ctx, level, projectID, rbac.ActionList); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\ttotal, err := rAPI.robotCtl.Count(ctx, query)\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\trobots, err := rAPI.robotCtl.List(ctx, query, &robot.Option{\n\t\tWithPermission: true,\n\t})\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tvar results []*models.Robot\n\tfor _, r := range robots {\n\t\tresults = append(results, model.NewRobot(r).ToSwagger())\n\t}\n\n\treturn operation.NewListRobotOK().\n\t\tWithXTotalCount(total).\n\t\tWithLink(rAPI.Links(ctx, params.HTTPRequest.URL, total, query.PageNumber, query.PageSize).String()).\n\t\tWithPayload(results)\n}\n\nfunc (rAPI *robotAPI) GetRobotByID(ctx context.Context, params operation.GetRobotByIDParams) middleware.Responder {\n\tif err := rAPI.RequireAuthenticated(ctx); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tr, err := rAPI.robotCtl.Get(ctx, params.RobotID, &robot.Option{\n\t\tWithPermission: true,\n\t})\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\tif err := rAPI.requireAccess(ctx, r.Level, r.ProjectID, rbac.ActionRead); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\treturn operation.NewGetRobotByIDOK().WithPayload(model.NewRobot(r).ToSwagger())\n}\n\nfunc (rAPI *robotAPI) UpdateRobot(ctx context.Context, params operation.UpdateRobotParams) middleware.Responder {\n\tvar err error\n\tif err := rAPI.RequireAuthenticated(ctx); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\tr, err := rAPI.robotCtl.Get(ctx, params.RobotID, &robot.Option{\n\t\tWithPermission: true,\n\t})\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tif !r.Editable {\n\t\terr = errors.DeniedError(nil).WithMessage(\"editing of legacy robot is not allowed\")\n\t} else {\n\t\terr = rAPI.updateV2Robot(ctx, params, r)\n\t}\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\treturn operation.NewUpdateRobotOK()\n}\n\nfunc (rAPI *robotAPI) RefreshSec(ctx context.Context, params operation.RefreshSecParams) middleware.Responder {\n\tif err := rAPI.RequireAuthenticated(ctx); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tr, err := rAPI.robotCtl.Get(ctx, params.RobotID, nil)\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tif err := rAPI.requireAccess(ctx, r.Level, r.ProjectID, rbac.ActionUpdate); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tvar secret string\n\trobotSec := &models.RobotSec{}\n\tif params.RobotSec.Secret != \"\" {\n\t\tif !robot.IsValidSec(params.RobotSec.Secret) {\n\t\t\treturn rAPI.SendError(ctx, errors.New(\"the secret must longer than 8 chars with at least 1 uppercase letter, 1 lowercase letter and 1 number\").WithCode(errors.BadRequestCode))\n\t\t}\n\t\tsecret = utils.Encrypt(params.RobotSec.Secret, r.Salt, utils.SHA256)\n\t\trobotSec.Secret = \"\"\n\t} else {\n\t\tsec, pwd, _, err := robot.CreateSec(r.Salt)\n\t\tif err != nil {\n\t\t\treturn rAPI.SendError(ctx, err)\n\t\t}\n\t\tsecret = sec\n\t\trobotSec.Secret = pwd\n\t}\n\n\tr.Secret = secret\n\tif err := rAPI.robotCtl.Update(ctx, r, nil); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\treturn operation.NewRefreshSecOK().WithPayload(robotSec)\n}\n\nfunc (rAPI *robotAPI) requireAccess(ctx context.Context, level string, projectIDOrName interface{}, action rbac.Action) error {\n\tif level == robot.LEVELSYSTEM {\n\t\treturn rAPI.RequireSystemAccess(ctx, action, rbac.ResourceRobot)\n\t} else if level == robot.LEVELPROJECT {\n\t\treturn rAPI.RequireProjectAccess(ctx, projectIDOrName, action, rbac.ResourceRobot)\n\t}\n\treturn errors.ForbiddenError(nil)\n}\n\n\/\/ more validation\nfunc (rAPI *robotAPI) validate(d int64, level string, permissions []*models.RobotPermission) error {\n\tif !isValidDuration(d) {\n\t\treturn errors.New(nil).WithMessage(\"bad request error duration input: %d\", d).WithCode(errors.BadRequestCode)\n\t}\n\n\tif !isValidLevel(level) {\n\t\treturn errors.New(nil).WithMessage(\"bad request error level input: %s\", level).WithCode(errors.BadRequestCode)\n\t}\n\n\tif len(permissions) == 0 {\n\t\treturn errors.New(nil).WithMessage(\"bad request empty permission\").WithCode(errors.BadRequestCode)\n\t}\n\n\tfor _, perm := range permissions {\n\t\tif len(perm.Access) == 0 {\n\t\t\treturn errors.New(nil).WithMessage(\"bad request empty access\").WithCode(errors.BadRequestCode)\n\t\t}\n\t}\n\n\t\/\/ to create a project robot, the permission must be only one project scope.\n\tif level == robot.LEVELPROJECT && len(permissions) > 1 {\n\t\treturn errors.New(nil).WithMessage(\"bad request permission\").WithCode(errors.BadRequestCode)\n\t}\n\treturn nil\n}\n\nfunc (rAPI *robotAPI) updateV2Robot(ctx context.Context, params operation.UpdateRobotParams, r *robot.Robot) error {\n\tif err := rAPI.validate(params.Robot.Duration, params.Robot.Level, params.Robot.Permissions); err != nil {\n\t\treturn err\n\t}\n\tprojectID, err := getProjectID(ctx, params.Robot.Permissions[0].Namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif r.Level != robot.LEVELSYSTEM && r.ProjectID != projectID {\n\t\treturn errors.BadRequestError(nil).WithMessage(\"cannot update the project id of robot\")\n\t}\n\tif err := rAPI.requireAccess(ctx, params.Robot.Level, projectID, rbac.ActionUpdate); err != nil {\n\t\treturn err\n\t}\n\tif params.Robot.Level != r.Level || params.Robot.Name != r.Name {\n\t\treturn errors.BadRequestError(nil).WithMessage(\"cannot update the level or name of robot\")\n\t}\n\n\tif r.Duration != params.Robot.Duration {\n\t\tr.Duration = params.Robot.Duration\n\t\tif params.Robot.Duration == -1 {\n\t\t\tr.ExpiresAt = -1\n\t\t} else if params.Robot.Duration == 0 {\n\t\t\tr.Duration = int64(config.RobotTokenDuration(ctx))\n\t\t\tr.ExpiresAt = r.CreationTime.AddDate(0, 0, config.RobotTokenDuration(ctx)).Unix()\n\t\t} else {\n\t\t\tr.ExpiresAt = r.CreationTime.AddDate(0, 0, int(params.Robot.Duration)).Unix()\n\t\t}\n\t}\n\n\tr.Description = params.Robot.Description\n\tr.Disabled = params.Robot.Disable\n\tif len(params.Robot.Permissions) != 0 {\n\t\tif err := lib.JSONCopy(&r.Permissions, params.Robot.Permissions); err != nil {\n\t\t\tlog.Warningf(\"failed to call JSONCopy on robot permission when updateV2Robot, error: %v\", err)\n\t\t}\n\t}\n\n\tif err := rAPI.robotCtl.Update(ctx, r, &robot.Option{\n\t\tWithPermission: true,\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc isValidLevel(l string) bool {\n\treturn l == robot.LEVELSYSTEM || l == robot.LEVELPROJECT\n}\n\nfunc isValidDuration(d int64) bool {\n\treturn d >= int64(-1) && d < math.MaxInt32\n}\n\n\/\/ validateName validates the robot name, especially '+' cannot be a valid character\nfunc validateName(name string) error {\n\trobotNameReg := `^[a-z0-9]+(?:[._-][a-z0-9]+)*$`\n\tlegal := regexp.MustCompile(robotNameReg).MatchString(name)\n\tif !legal {\n\t\treturn errors.BadRequestError(nil).WithMessage(\"robot name is not in lower case or contains illegal characters\")\n\t}\n\treturn nil\n}\n<commit_msg>fix update robot regression (#17248)<commit_after>\/\/ Copyright Project Harbor Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage handler\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/go-openapi\/runtime\/middleware\"\n\t\"github.com\/go-openapi\/strfmt\"\n\n\t\"github.com\/goharbor\/harbor\/src\/common\/rbac\"\n\t\"github.com\/goharbor\/harbor\/src\/common\/utils\"\n\t\"github.com\/goharbor\/harbor\/src\/controller\/robot\"\n\t\"github.com\/goharbor\/harbor\/src\/lib\"\n\t\"github.com\/goharbor\/harbor\/src\/lib\/config\"\n\t\"github.com\/goharbor\/harbor\/src\/lib\/errors\"\n\t\"github.com\/goharbor\/harbor\/src\/lib\/log\"\n\tpkg \"github.com\/goharbor\/harbor\/src\/pkg\/robot\/model\"\n\t\"github.com\/goharbor\/harbor\/src\/server\/v2.0\/handler\/model\"\n\t\"github.com\/goharbor\/harbor\/src\/server\/v2.0\/models\"\n\toperation \"github.com\/goharbor\/harbor\/src\/server\/v2.0\/restapi\/operations\/robot\"\n)\n\nfunc newRobotAPI() *robotAPI {\n\treturn &robotAPI{\n\t\trobotCtl: robot.Ctl,\n\t}\n}\n\ntype robotAPI struct {\n\tBaseAPI\n\trobotCtl robot.Controller\n}\n\nfunc (rAPI *robotAPI) CreateRobot(ctx context.Context, params operation.CreateRobotParams) middleware.Responder {\n\tif err := validateName(params.Robot.Name); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tif err := rAPI.validate(params.Robot.Duration, params.Robot.Level, params.Robot.Permissions); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tif err := rAPI.requireAccess(ctx, params.Robot.Level, params.Robot.Permissions[0].Namespace, rbac.ActionCreate); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tr := &robot.Robot{\n\t\tRobot: pkg.Robot{\n\t\t\tName: params.Robot.Name,\n\t\t\tDescription: params.Robot.Description,\n\t\t\tDuration: params.Robot.Duration,\n\t\t\tVisible: true,\n\t\t},\n\t\tLevel: params.Robot.Level,\n\t}\n\n\tif err := lib.JSONCopy(&r.Permissions, params.Robot.Permissions); err != nil {\n\t\tlog.Warningf(\"failed to call JSONCopy on robot permission when CreateRobot, error: %v\", err)\n\t}\n\n\trid, pwd, err := rAPI.robotCtl.Create(ctx, r)\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tcreated, err := rAPI.robotCtl.Get(ctx, rid, nil)\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tlocation := fmt.Sprintf(\"%s\/%d\", strings.TrimSuffix(params.HTTPRequest.URL.Path, \"\/\"), created.ID)\n\treturn operation.NewCreateRobotCreated().WithLocation(location).WithPayload(&models.RobotCreated{\n\t\tID: created.ID,\n\t\tName: created.Name,\n\t\tSecret: pwd,\n\t\tCreationTime: strfmt.DateTime(created.CreationTime),\n\t\tExpiresAt: created.ExpiresAt,\n\t})\n}\n\nfunc (rAPI *robotAPI) DeleteRobot(ctx context.Context, params operation.DeleteRobotParams) middleware.Responder {\n\tif err := rAPI.RequireAuthenticated(ctx); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tr, err := rAPI.robotCtl.Get(ctx, params.RobotID, nil)\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tif err := rAPI.requireAccess(ctx, r.Level, r.ProjectID, rbac.ActionDelete); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tif err := rAPI.robotCtl.Delete(ctx, params.RobotID); err != nil {\n\t\t\/\/ for the version 1 robot account, has to ignore the no permission error.\n\t\tif !r.Editable && errors.IsNotFoundErr(err) {\n\t\t\treturn operation.NewDeleteRobotOK()\n\t\t}\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\treturn operation.NewDeleteRobotOK()\n}\n\nfunc (rAPI *robotAPI) ListRobot(ctx context.Context, params operation.ListRobotParams) middleware.Responder {\n\tif err := rAPI.RequireAuthenticated(ctx); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tquery, err := rAPI.BuildQuery(ctx, params.Q, params.Sort, params.Page, params.PageSize)\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tvar projectID int64\n\tvar level string\n\t\/\/ GET \/api\/v2.0\/robots or GET \/api\/v2.0\/robots?level=system to get all of system level robots.\n\t\/\/ GET \/api\/v2.0\/robots?level=project&project_id=1\n\tif _, ok := query.Keywords[\"Level\"]; ok {\n\t\tif !isValidLevel(query.Keywords[\"Level\"].(string)) {\n\t\t\treturn rAPI.SendError(ctx, errors.New(nil).WithMessage(\"bad request error level input\").WithCode(errors.BadRequestCode))\n\t\t}\n\t\tlevel = query.Keywords[\"Level\"].(string)\n\t\tif level == robot.LEVELPROJECT {\n\t\t\tif _, ok := query.Keywords[\"ProjectID\"]; !ok {\n\t\t\t\treturn rAPI.SendError(ctx, errors.BadRequestError(nil).WithMessage(\"must with project ID when to query project robots\"))\n\t\t\t}\n\t\t\tpid, err := strconv.ParseInt(query.Keywords[\"ProjectID\"].(string), 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn rAPI.SendError(ctx, errors.BadRequestError(nil).WithMessage(\"Project ID must be int type.\"))\n\t\t\t}\n\t\t\tprojectID = pid\n\t\t}\n\t} else {\n\t\tlevel = robot.LEVELSYSTEM\n\t\tquery.Keywords[\"ProjectID\"] = 0\n\t}\n\tquery.Keywords[\"Visible\"] = true\n\n\tif err := rAPI.requireAccess(ctx, level, projectID, rbac.ActionList); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\ttotal, err := rAPI.robotCtl.Count(ctx, query)\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\trobots, err := rAPI.robotCtl.List(ctx, query, &robot.Option{\n\t\tWithPermission: true,\n\t})\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tvar results []*models.Robot\n\tfor _, r := range robots {\n\t\tresults = append(results, model.NewRobot(r).ToSwagger())\n\t}\n\n\treturn operation.NewListRobotOK().\n\t\tWithXTotalCount(total).\n\t\tWithLink(rAPI.Links(ctx, params.HTTPRequest.URL, total, query.PageNumber, query.PageSize).String()).\n\t\tWithPayload(results)\n}\n\nfunc (rAPI *robotAPI) GetRobotByID(ctx context.Context, params operation.GetRobotByIDParams) middleware.Responder {\n\tif err := rAPI.RequireAuthenticated(ctx); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tr, err := rAPI.robotCtl.Get(ctx, params.RobotID, &robot.Option{\n\t\tWithPermission: true,\n\t})\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\tif err := rAPI.requireAccess(ctx, r.Level, r.ProjectID, rbac.ActionRead); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\treturn operation.NewGetRobotByIDOK().WithPayload(model.NewRobot(r).ToSwagger())\n}\n\nfunc (rAPI *robotAPI) UpdateRobot(ctx context.Context, params operation.UpdateRobotParams) middleware.Responder {\n\tvar err error\n\tif err := rAPI.RequireAuthenticated(ctx); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\tr, err := rAPI.robotCtl.Get(ctx, params.RobotID, &robot.Option{\n\t\tWithPermission: true,\n\t})\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tif !r.Editable {\n\t\terr = errors.DeniedError(nil).WithMessage(\"editing of legacy robot is not allowed\")\n\t} else {\n\t\terr = rAPI.updateV2Robot(ctx, params, r)\n\t}\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\treturn operation.NewUpdateRobotOK()\n}\n\nfunc (rAPI *robotAPI) RefreshSec(ctx context.Context, params operation.RefreshSecParams) middleware.Responder {\n\tif err := rAPI.RequireAuthenticated(ctx); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tr, err := rAPI.robotCtl.Get(ctx, params.RobotID, nil)\n\tif err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tif err := rAPI.requireAccess(ctx, r.Level, r.ProjectID, rbac.ActionUpdate); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\tvar secret string\n\trobotSec := &models.RobotSec{}\n\tif params.RobotSec.Secret != \"\" {\n\t\tif !robot.IsValidSec(params.RobotSec.Secret) {\n\t\t\treturn rAPI.SendError(ctx, errors.New(\"the secret must longer than 8 chars with at least 1 uppercase letter, 1 lowercase letter and 1 number\").WithCode(errors.BadRequestCode))\n\t\t}\n\t\tsecret = utils.Encrypt(params.RobotSec.Secret, r.Salt, utils.SHA256)\n\t\trobotSec.Secret = \"\"\n\t} else {\n\t\tsec, pwd, _, err := robot.CreateSec(r.Salt)\n\t\tif err != nil {\n\t\t\treturn rAPI.SendError(ctx, err)\n\t\t}\n\t\tsecret = sec\n\t\trobotSec.Secret = pwd\n\t}\n\n\tr.Secret = secret\n\tif err := rAPI.robotCtl.Update(ctx, r, nil); err != nil {\n\t\treturn rAPI.SendError(ctx, err)\n\t}\n\n\treturn operation.NewRefreshSecOK().WithPayload(robotSec)\n}\n\nfunc (rAPI *robotAPI) requireAccess(ctx context.Context, level string, projectIDOrName interface{}, action rbac.Action) error {\n\tif level == robot.LEVELSYSTEM {\n\t\treturn rAPI.RequireSystemAccess(ctx, action, rbac.ResourceRobot)\n\t} else if level == robot.LEVELPROJECT {\n\t\treturn rAPI.RequireProjectAccess(ctx, projectIDOrName, action, rbac.ResourceRobot)\n\t}\n\treturn errors.ForbiddenError(nil)\n}\n\n\/\/ more validation\nfunc (rAPI *robotAPI) validate(d int64, level string, permissions []*models.RobotPermission) error {\n\tif !isValidDuration(d) {\n\t\treturn errors.New(nil).WithMessage(\"bad request error duration input: %d\", d).WithCode(errors.BadRequestCode)\n\t}\n\n\tif !isValidLevel(level) {\n\t\treturn errors.New(nil).WithMessage(\"bad request error level input: %s\", level).WithCode(errors.BadRequestCode)\n\t}\n\n\tif len(permissions) == 0 {\n\t\treturn errors.New(nil).WithMessage(\"bad request empty permission\").WithCode(errors.BadRequestCode)\n\t}\n\n\tfor _, perm := range permissions {\n\t\tif len(perm.Access) == 0 {\n\t\t\treturn errors.New(nil).WithMessage(\"bad request empty access\").WithCode(errors.BadRequestCode)\n\t\t}\n\t}\n\n\t\/\/ to create a project robot, the permission must be only one project scope.\n\tif level == robot.LEVELPROJECT && len(permissions) > 1 {\n\t\treturn errors.New(nil).WithMessage(\"bad request permission\").WithCode(errors.BadRequestCode)\n\t}\n\treturn nil\n}\n\nfunc (rAPI *robotAPI) updateV2Robot(ctx context.Context, params operation.UpdateRobotParams, r *robot.Robot) error {\n\tif err := rAPI.validate(params.Robot.Duration, params.Robot.Level, params.Robot.Permissions); err != nil {\n\t\treturn err\n\t}\n\tif r.Level != robot.LEVELSYSTEM {\n\t\tprojectID, err := getProjectID(ctx, params.Robot.Permissions[0].Namespace)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif r.ProjectID != projectID {\n\t\t\treturn errors.BadRequestError(nil).WithMessage(\"cannot update the project id of robot\")\n\t\t}\n\t}\n\tif err := rAPI.requireAccess(ctx, params.Robot.Level, params.Robot.Permissions[0].Namespace, rbac.ActionUpdate); err != nil {\n\t\treturn err\n\t}\n\tif params.Robot.Level != r.Level || params.Robot.Name != r.Name {\n\t\treturn errors.BadRequestError(nil).WithMessage(\"cannot update the level or name of robot\")\n\t}\n\n\tif r.Duration != params.Robot.Duration {\n\t\tr.Duration = params.Robot.Duration\n\t\tif params.Robot.Duration == -1 {\n\t\t\tr.ExpiresAt = -1\n\t\t} else if params.Robot.Duration == 0 {\n\t\t\tr.Duration = int64(config.RobotTokenDuration(ctx))\n\t\t\tr.ExpiresAt = r.CreationTime.AddDate(0, 0, config.RobotTokenDuration(ctx)).Unix()\n\t\t} else {\n\t\t\tr.ExpiresAt = r.CreationTime.AddDate(0, 0, int(params.Robot.Duration)).Unix()\n\t\t}\n\t}\n\n\tr.Description = params.Robot.Description\n\tr.Disabled = params.Robot.Disable\n\tif len(params.Robot.Permissions) != 0 {\n\t\tif err := lib.JSONCopy(&r.Permissions, params.Robot.Permissions); err != nil {\n\t\t\tlog.Warningf(\"failed to call JSONCopy on robot permission when updateV2Robot, error: %v\", err)\n\t\t}\n\t}\n\n\tif err := rAPI.robotCtl.Update(ctx, r, &robot.Option{\n\t\tWithPermission: true,\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc isValidLevel(l string) bool {\n\treturn l == robot.LEVELSYSTEM || l == robot.LEVELPROJECT\n}\n\nfunc isValidDuration(d int64) bool {\n\treturn d >= int64(-1) && d < math.MaxInt32\n}\n\n\/\/ validateName validates the robot name, especially '+' cannot be a valid character\nfunc validateName(name string) error {\n\trobotNameReg := `^[a-z0-9]+(?:[._-][a-z0-9]+)*$`\n\tlegal := regexp.MustCompile(robotNameReg).MatchString(name)\n\tif !legal {\n\t\treturn errors.BadRequestError(nil).WithMessage(\"robot name is not in lower case or contains illegal characters\")\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage rt_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"v.io\/v23\"\n\t\"v.io\/v23\/context\"\n\t\"v.io\/v23\/rpc\"\n\t\"v.io\/v23\/services\/appcycle\"\n\t\"v.io\/x\/ref\/lib\/mgmt\"\n\t\"v.io\/x\/ref\/lib\/security\/securityflag\"\n\t_ \"v.io\/x\/ref\/runtime\/factories\/generic\"\n\t\"v.io\/x\/ref\/services\/device\"\n\t\"v.io\/x\/ref\/test\"\n\t\"v.io\/x\/ref\/test\/expect\"\n\t\"v.io\/x\/ref\/test\/modules\"\n)\n\n\/\/ TestBasic verifies that the basic plumbing works: LocalStop calls result in\n\/\/ stop messages being sent on the channel passed to WaitForStop.\nfunc TestBasic(t *testing.T) {\n\tctx, shutdown := test.V23Init()\n\tdefer shutdown()\n\n\tm := v23.GetAppCycle(ctx)\n\tch := make(chan string, 1)\n\tm.WaitForStop(ctx, ch)\n\tfor i := 0; i < 10; i++ {\n\t\tm.Stop(ctx)\n\t\tif want, got := v23.LocalStop, <-ch; want != got {\n\t\t\tt.Errorf(\"WaitForStop want %q got %q\", want, got)\n\t\t}\n\t\tselect {\n\t\tcase s := <-ch:\n\t\t\tt.Errorf(\"channel expected to be empty, got %q instead\", s)\n\t\tdefault:\n\t\t}\n\t}\n}\n\n\/\/ TestMultipleWaiters verifies that the plumbing works with more than one\n\/\/ registered wait channel.\nfunc TestMultipleWaiters(t *testing.T) {\n\tctx, shutdown := test.V23Init()\n\tdefer shutdown()\n\n\tm := v23.GetAppCycle(ctx)\n\tch1 := make(chan string, 1)\n\tm.WaitForStop(ctx, ch1)\n\tch2 := make(chan string, 1)\n\tm.WaitForStop(ctx, ch2)\n\tfor i := 0; i < 10; i++ {\n\t\tm.Stop(ctx)\n\t\tif want, got := v23.LocalStop, <-ch1; want != got {\n\t\t\tt.Errorf(\"WaitForStop want %q got %q\", want, got)\n\t\t}\n\t\tif want, got := v23.LocalStop, <-ch2; want != got {\n\t\t\tt.Errorf(\"WaitForStop want %q got %q\", want, got)\n\t\t}\n\t}\n}\n\n\/\/ TestMultipleStops verifies that LocalStop does not block even if the wait\n\/\/ channel is not being drained: once the channel's buffer fills up, future\n\/\/ Stops become no-ops.\nfunc TestMultipleStops(t *testing.T) {\n\tctx, shutdown := test.V23Init()\n\tdefer shutdown()\n\n\tm := v23.GetAppCycle(ctx)\n\tch := make(chan string, 1)\n\tm.WaitForStop(ctx, ch)\n\tfor i := 0; i < 10; i++ {\n\t\tm.Stop(ctx)\n\t}\n\tif want, got := v23.LocalStop, <-ch; want != got {\n\t\tt.Errorf(\"WaitForStop want %q got %q\", want, got)\n\t}\n\tselect {\n\tcase s := <-ch:\n\t\tt.Errorf(\"channel expected to be empty, got %q instead\", s)\n\tdefault:\n\t}\n}\n\nvar noWaiters = modules.Register(func(env *modules.Env, args ...string) error {\n\tctx, shutdown := test.V23Init()\n\tdefer shutdown()\n\n\tm := v23.GetAppCycle(ctx)\n\tfmt.Fprintf(env.Stdout, \"ready\\n\")\n\tmodules.WaitForEOF(env.Stdin)\n\tm.Stop(ctx)\n\tos.Exit(42) \/\/ This should not be reached.\n\treturn nil\n}, \"noWaiters\")\n\n\/\/ TestNoWaiters verifies that the child process exits in the absence of any\n\/\/ wait channel being registered with its runtime.\nfunc TestNoWaiters(t *testing.T) {\n\tsh, err := modules.NewShell(nil, nil, testing.Verbose(), t)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n\tdefer sh.Cleanup(os.Stderr, os.Stderr)\n\th, err := sh.Start(nil, noWaiters)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n\th.Expect(\"ready\")\n\twant := fmt.Sprintf(\"exit status %d\", testUnhandledStopExitCode)\n\tif err = h.Shutdown(os.Stderr, os.Stderr); err == nil || err.Error() != want {\n\t\tt.Errorf(\"got %v, want %s\", err, want)\n\t}\n}\n\nvar forceStop = modules.Register(func(env *modules.Env, args ...string) error {\n\tctx, shutdown := test.V23Init()\n\tdefer shutdown()\n\n\tm := v23.GetAppCycle(ctx)\n\tfmt.Fprintf(env.Stdout, \"ready\\n\")\n\tmodules.WaitForEOF(env.Stdin)\n\tm.WaitForStop(ctx, make(chan string, 1))\n\tm.ForceStop(ctx)\n\tos.Exit(42) \/\/ This should not be reached.\n\treturn nil\n}, \"forceStop\")\n\n\/\/ TestForceStop verifies that ForceStop causes the child process to exit\n\/\/ immediately.\nfunc TestForceStop(t *testing.T) {\n\tsh, err := modules.NewShell(nil, nil, testing.Verbose(), t)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n\tdefer sh.Cleanup(os.Stderr, os.Stderr)\n\th, err := sh.Start(nil, forceStop)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n\th.Expect(\"ready\")\n\terr = h.Shutdown(os.Stderr, os.Stderr)\n\twant := fmt.Sprintf(\"exit status %d\", testForceStopExitCode)\n\tif err == nil || err.Error() != want {\n\t\tt.Errorf(\"got %v, want %s\", err, want)\n\t}\n}\n\nfunc checkProgress(t *testing.T, ch <-chan v23.Task, progress, goal int32) {\n\tif want, got := (v23.Task{Progress: progress, Goal: goal}), <-ch; !reflect.DeepEqual(want, got) {\n\t\tt.Errorf(\"Unexpected progress: want %+v, got %+v\", want, got)\n\t}\n}\n\nfunc checkNoProgress(t *testing.T, ch <-chan v23.Task) {\n\tselect {\n\tcase p := <-ch:\n\t\tt.Errorf(\"channel expected to be empty, got %+v instead\", p)\n\tdefault:\n\t}\n}\n\n\/\/ TestProgress verifies that the ticker update\/track logic works for a single\n\/\/ tracker.\nfunc TestProgress(t *testing.T) {\n\tctx, shutdown := test.V23Init()\n\n\tm := v23.GetAppCycle(ctx)\n\tm.AdvanceGoal(50)\n\tch := make(chan v23.Task, 1)\n\tm.TrackTask(ch)\n\tcheckNoProgress(t, ch)\n\tm.AdvanceProgress(10)\n\tcheckProgress(t, ch, 10, 50)\n\tcheckNoProgress(t, ch)\n\tm.AdvanceProgress(5)\n\tcheckProgress(t, ch, 15, 50)\n\tm.AdvanceGoal(50)\n\tcheckProgress(t, ch, 15, 100)\n\tm.AdvanceProgress(1)\n\tcheckProgress(t, ch, 16, 100)\n\tm.AdvanceGoal(10)\n\tcheckProgress(t, ch, 16, 110)\n\tm.AdvanceProgress(-13)\n\tcheckNoProgress(t, ch)\n\tm.AdvanceGoal(0)\n\tcheckNoProgress(t, ch)\n\tshutdown()\n\tif _, ok := <-ch; ok {\n\t\tt.Errorf(\"Expected channel to be closed\")\n\t}\n}\n\n\/\/ TestProgressMultipleTrackers verifies that the ticker update\/track logic\n\/\/ works for more than one tracker. It also ensures that the runtime doesn't\n\/\/ block when the tracker channels are full.\nfunc TestProgressMultipleTrackers(t *testing.T) {\n\tctx, shutdown := test.V23Init()\n\n\tm := v23.GetAppCycle(ctx)\n\t\/\/ ch1 is 1-buffered, ch2 is 2-buffered.\n\tch1, ch2 := make(chan v23.Task, 1), make(chan v23.Task, 2)\n\tm.TrackTask(ch1)\n\tm.TrackTask(ch2)\n\tcheckNoProgress(t, ch1)\n\tcheckNoProgress(t, ch2)\n\tm.AdvanceProgress(1)\n\tcheckProgress(t, ch1, 1, 0)\n\tcheckNoProgress(t, ch1)\n\tcheckProgress(t, ch2, 1, 0)\n\tcheckNoProgress(t, ch2)\n\tfor i := 0; i < 10; i++ {\n\t\tm.AdvanceProgress(1)\n\t}\n\tcheckProgress(t, ch1, 2, 0)\n\tcheckNoProgress(t, ch1)\n\tcheckProgress(t, ch2, 2, 0)\n\tcheckProgress(t, ch2, 3, 0)\n\tcheckNoProgress(t, ch2)\n\tm.AdvanceGoal(4)\n\tcheckProgress(t, ch1, 11, 4)\n\tcheckProgress(t, ch2, 11, 4)\n\tshutdown()\n\tif _, ok := <-ch1; ok {\n\t\tt.Errorf(\"Expected channel to be closed\")\n\t}\n\tif _, ok := <-ch2; ok {\n\t\tt.Errorf(\"Expected channel to be closed\")\n\t}\n}\n\nvar app = modules.Register(func(env *modules.Env, args ...string) error {\n\tctx, shutdown := test.V23Init()\n\tdefer shutdown()\n\n\tm := v23.GetAppCycle(ctx)\n\tch := make(chan string, 1)\n\tm.WaitForStop(ctx, ch)\n\tfmt.Fprintln(env.Stdout, \"ready\")\n\tfmt.Fprintf(env.Stdout, \"Got %s\\n\", <-ch)\n\tm.AdvanceGoal(10)\n\tfmt.Fprintf(env.Stdout, \"Doing some work\\n\")\n\tm.AdvanceProgress(2)\n\tfmt.Fprintf(env.Stdout, \"Doing some more work\\n\")\n\tm.AdvanceProgress(5)\n\treturn nil\n}, \"app\")\n\ntype configServer struct {\n\tch chan<- string\n}\n\nfunc (c *configServer) Set(_ *context.T, _ rpc.ServerCall, key, value string) error {\n\tif key != mgmt.AppCycleManagerConfigKey {\n\t\treturn fmt.Errorf(\"Unexpected key: %v\", key)\n\t}\n\tc.ch <- value\n\treturn nil\n\n}\n\n\/\/ TODO(caprita): Rewrite this test to not use the modules package. It can't use\n\/\/ v23test because v23test.Shell doesn't support the exec protocol.\nfunc setupRemoteAppCycleMgr(t *testing.T) (*context.T, modules.Handle, appcycle.AppCycleClientMethods, func()) {\n\tctx, shutdown := test.V23Init()\n\n\tch := make(chan string)\n\tservice := device.ConfigServer(&configServer{ch})\n\tauthorizer := securityflag.NewAuthorizerOrDie()\n\t_, configServer, err := v23.WithNewServer(ctx, \"\", service, authorizer)\n\tif err != nil {\n\t\tt.Fatalf(\"Got error: %v\", err)\n\t}\n\tconfigServiceName := configServer.Status().Endpoints[0].Name()\n\n\tsh, err := modules.NewShell(ctx, v23.GetPrincipal(ctx), testing.Verbose(), t)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n\tsh.SetConfigKey(mgmt.ParentNameConfigKey, configServiceName)\n\tsh.SetConfigKey(mgmt.ProtocolConfigKey, \"tcp\")\n\tsh.SetConfigKey(mgmt.AddressConfigKey, \"127.0.0.1:0\")\n\th, err := sh.Start(nil, app)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n\tappCycleName := \"\"\n\tselect {\n\tcase appCycleName = <-ch:\n\tcase <-time.After(time.Minute):\n\t\tt.Errorf(\"timeout\")\n\t}\n\tappCycle := appcycle.AppCycleClient(appCycleName)\n\treturn ctx, h, appCycle, func() {\n\t\tconfigServer.Stop()\n\t\tsh.Cleanup(os.Stderr, os.Stderr)\n\t\tshutdown()\n\t}\n}\n\n\/\/ TestRemoteForceStop verifies that the child process exits when sending it\n\/\/ a remote ForceStop rpc.\nfunc TestRemoteForceStop(t *testing.T) {\n\tctx, h, appCycle, cleanup := setupRemoteAppCycleMgr(t)\n\tdefer cleanup()\n\ts := expect.NewSession(t, h.Stdout(), time.Minute)\n\ts.Expect(\"ready\")\n\tif err := appCycle.ForceStop(ctx); err == nil || !strings.Contains(err.Error(), \"EOF\") {\n\t\tt.Fatalf(\"Expected EOF error, got %v instead\", err)\n\t}\n\ts.ExpectEOF()\n\terr := h.Shutdown(os.Stderr, os.Stderr)\n\twant := fmt.Sprintf(\"exit status %d\", testForceStopExitCode)\n\tif err == nil || err.Error() != want {\n\t\tt.Errorf(\"got %v, want %s\", err, want)\n\t}\n}\n\n\/\/ TestRemoteStop verifies that the child shuts down cleanly when sending it\n\/\/ a remote Stop rpc.\nfunc TestRemoteStop(t *testing.T) {\n\tctx, h, appCycle, cleanup := setupRemoteAppCycleMgr(t)\n\tdefer cleanup()\n\ts := expect.NewSession(t, h.Stdout(), time.Minute)\n\ts.Expect(\"ready\")\n\tstream, err := appCycle.Stop(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"Got error: %v\", err)\n\t}\n\trStream := stream.RecvStream()\n\texpectTask := func(progress, goal int32) {\n\t\tif !rStream.Advance() {\n\t\t\tt.Fatalf(\"unexpected streaming error: %v\", rStream.Err())\n\t\t}\n\t\ttask := rStream.Value()\n\t\tif task.Progress != progress || task.Goal != goal {\n\t\t\tt.Errorf(\"Got (%d, %d), want (%d, %d)\", task.Progress, task.Goal, progress, goal)\n\t\t}\n\t}\n\texpectTask(0, 10)\n\texpectTask(2, 10)\n\texpectTask(7, 10)\n\tif rStream.Advance() || rStream.Err() != nil {\n\t\tt.Errorf(\"Expected EOF, got (%v, %v) instead\", rStream.Value(), rStream.Err())\n\t}\n\tif err := stream.Finish(); err != nil {\n\t\tt.Errorf(\"Got error %v\", err)\n\t}\n\ts.Expect(fmt.Sprintf(\"Got %s\", v23.RemoteStop))\n\ts.Expect(\"Doing some work\")\n\ts.Expect(\"Doing some more work\")\n\ts.ExpectEOF()\n\tif err := h.Shutdown(os.Stderr, os.Stderr); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n}\n<commit_msg>runtime\/internal\/rt: remove modules from mgmt_test<commit_after>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage rt_test\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"v.io\/v23\"\n\t\"v.io\/v23\/context\"\n\t\"v.io\/v23\/rpc\"\n\t\"v.io\/v23\/services\/appcycle\"\n\t\"v.io\/x\/lib\/envvar\"\n\t\"v.io\/x\/lib\/gosh\"\n\t\"v.io\/x\/ref\"\n\tvexec \"v.io\/x\/ref\/lib\/exec\"\n\t\"v.io\/x\/ref\/lib\/mgmt\"\n\t\"v.io\/x\/ref\/lib\/security\/securityflag\"\n\t\"v.io\/x\/ref\/lib\/v23test\"\n\t_ \"v.io\/x\/ref\/runtime\/factories\/generic\"\n\t\"v.io\/x\/ref\/services\/device\"\n\t\"v.io\/x\/ref\/test\"\n\t\"v.io\/x\/ref\/test\/expect\"\n)\n\n\/\/ TestBasic verifies that the basic plumbing works: LocalStop calls result in\n\/\/ stop messages being sent on the channel passed to WaitForStop.\nfunc TestBasic(t *testing.T) {\n\tctx, shutdown := test.V23Init()\n\tdefer shutdown()\n\n\tm := v23.GetAppCycle(ctx)\n\tch := make(chan string, 1)\n\tm.WaitForStop(ctx, ch)\n\tfor i := 0; i < 10; i++ {\n\t\tm.Stop(ctx)\n\t\tif want, got := v23.LocalStop, <-ch; want != got {\n\t\t\tt.Errorf(\"WaitForStop want %q got %q\", want, got)\n\t\t}\n\t\tselect {\n\t\tcase s := <-ch:\n\t\t\tt.Errorf(\"channel expected to be empty, got %q instead\", s)\n\t\tdefault:\n\t\t}\n\t}\n}\n\n\/\/ TestMultipleWaiters verifies that the plumbing works with more than one\n\/\/ registered wait channel.\nfunc TestMultipleWaiters(t *testing.T) {\n\tctx, shutdown := test.V23Init()\n\tdefer shutdown()\n\n\tm := v23.GetAppCycle(ctx)\n\tch1 := make(chan string, 1)\n\tm.WaitForStop(ctx, ch1)\n\tch2 := make(chan string, 1)\n\tm.WaitForStop(ctx, ch2)\n\tfor i := 0; i < 10; i++ {\n\t\tm.Stop(ctx)\n\t\tif want, got := v23.LocalStop, <-ch1; want != got {\n\t\t\tt.Errorf(\"WaitForStop want %q got %q\", want, got)\n\t\t}\n\t\tif want, got := v23.LocalStop, <-ch2; want != got {\n\t\t\tt.Errorf(\"WaitForStop want %q got %q\", want, got)\n\t\t}\n\t}\n}\n\n\/\/ TestMultipleStops verifies that LocalStop does not block even if the wait\n\/\/ channel is not being drained: once the channel's buffer fills up, future\n\/\/ Stops become no-ops.\nfunc TestMultipleStops(t *testing.T) {\n\tctx, shutdown := test.V23Init()\n\tdefer shutdown()\n\n\tm := v23.GetAppCycle(ctx)\n\tch := make(chan string, 1)\n\tm.WaitForStop(ctx, ch)\n\tfor i := 0; i < 10; i++ {\n\t\tm.Stop(ctx)\n\t}\n\tif want, got := v23.LocalStop, <-ch; want != got {\n\t\tt.Errorf(\"WaitForStop want %q got %q\", want, got)\n\t}\n\tselect {\n\tcase s := <-ch:\n\t\tt.Errorf(\"channel expected to be empty, got %q instead\", s)\n\tdefault:\n\t}\n}\n\nfunc waitForEOF(r io.Reader) {\n\tio.Copy(ioutil.Discard, r)\n}\n\nvar noWaiters = gosh.Register(\"noWaiters\", func() {\n\tctx, shutdown := test.V23Init()\n\tdefer shutdown()\n\tv23.GetAppCycle(ctx).Stop(ctx)\n\tos.Exit(42) \/\/ This should not be reached.\n})\n\n\/\/ TestNoWaiters verifies that the child process exits in the absence of any\n\/\/ wait channel being registered with its runtime.\nfunc TestNoWaiters(t *testing.T) {\n\tsh := v23test.NewShell(t, v23test.Opts{})\n\tdefer sh.Cleanup()\n\n\tcmd := sh.Fn(noWaiters)\n\tcmd.ExitErrorIsOk = true\n\n\tcmd.Run()\n\twant := fmt.Sprintf(\"exit status %d\", testUnhandledStopExitCode)\n\tif err := cmd.Err; err == nil || err.Error() != want {\n\t\tt.Errorf(\"got %v, want %s\", err, want)\n\t}\n}\n\nvar forceStop = gosh.Register(\"forceStop\", func() {\n\tctx, shutdown := test.V23Init()\n\tdefer shutdown()\n\tm := v23.GetAppCycle(ctx)\n\tm.WaitForStop(ctx, make(chan string, 1))\n\tm.ForceStop(ctx)\n\tos.Exit(42) \/\/ This should not be reached.\n})\n\n\/\/ TestForceStop verifies that ForceStop causes the child process to exit\n\/\/ immediately.\nfunc TestForceStop(t *testing.T) {\n\tsh := v23test.NewShell(t, v23test.Opts{})\n\tdefer sh.Cleanup()\n\n\tcmd := sh.Fn(forceStop)\n\tcmd.ExitErrorIsOk = true\n\n\tcmd.Run()\n\twant := fmt.Sprintf(\"exit status %d\", testForceStopExitCode)\n\tif err := cmd.Err; err == nil || err.Error() != want {\n\t\tt.Errorf(\"got %v, want %s\", err, want)\n\t}\n}\n\nfunc checkProgress(t *testing.T, ch <-chan v23.Task, progress, goal int32) {\n\tif want, got := (v23.Task{Progress: progress, Goal: goal}), <-ch; !reflect.DeepEqual(want, got) {\n\t\tt.Errorf(\"Unexpected progress: want %+v, got %+v\", want, got)\n\t}\n}\n\nfunc checkNoProgress(t *testing.T, ch <-chan v23.Task) {\n\tselect {\n\tcase p := <-ch:\n\t\tt.Errorf(\"channel expected to be empty, got %+v instead\", p)\n\tdefault:\n\t}\n}\n\n\/\/ TestProgress verifies that the ticker update\/track logic works for a single\n\/\/ tracker.\nfunc TestProgress(t *testing.T) {\n\tctx, shutdown := test.V23Init()\n\n\tm := v23.GetAppCycle(ctx)\n\tm.AdvanceGoal(50)\n\tch := make(chan v23.Task, 1)\n\tm.TrackTask(ch)\n\tcheckNoProgress(t, ch)\n\tm.AdvanceProgress(10)\n\tcheckProgress(t, ch, 10, 50)\n\tcheckNoProgress(t, ch)\n\tm.AdvanceProgress(5)\n\tcheckProgress(t, ch, 15, 50)\n\tm.AdvanceGoal(50)\n\tcheckProgress(t, ch, 15, 100)\n\tm.AdvanceProgress(1)\n\tcheckProgress(t, ch, 16, 100)\n\tm.AdvanceGoal(10)\n\tcheckProgress(t, ch, 16, 110)\n\tm.AdvanceProgress(-13)\n\tcheckNoProgress(t, ch)\n\tm.AdvanceGoal(0)\n\tcheckNoProgress(t, ch)\n\tshutdown()\n\tif _, ok := <-ch; ok {\n\t\tt.Errorf(\"Expected channel to be closed\")\n\t}\n}\n\n\/\/ TestProgressMultipleTrackers verifies that the ticker update\/track logic\n\/\/ works for more than one tracker. It also ensures that the runtime doesn't\n\/\/ block when the tracker channels are full.\nfunc TestProgressMultipleTrackers(t *testing.T) {\n\tctx, shutdown := test.V23Init()\n\n\tm := v23.GetAppCycle(ctx)\n\t\/\/ ch1 is 1-buffered, ch2 is 2-buffered.\n\tch1, ch2 := make(chan v23.Task, 1), make(chan v23.Task, 2)\n\tm.TrackTask(ch1)\n\tm.TrackTask(ch2)\n\tcheckNoProgress(t, ch1)\n\tcheckNoProgress(t, ch2)\n\tm.AdvanceProgress(1)\n\tcheckProgress(t, ch1, 1, 0)\n\tcheckNoProgress(t, ch1)\n\tcheckProgress(t, ch2, 1, 0)\n\tcheckNoProgress(t, ch2)\n\tfor i := 0; i < 10; i++ {\n\t\tm.AdvanceProgress(1)\n\t}\n\tcheckProgress(t, ch1, 2, 0)\n\tcheckNoProgress(t, ch1)\n\tcheckProgress(t, ch2, 2, 0)\n\tcheckProgress(t, ch2, 3, 0)\n\tcheckNoProgress(t, ch2)\n\tm.AdvanceGoal(4)\n\tcheckProgress(t, ch1, 11, 4)\n\tcheckProgress(t, ch2, 11, 4)\n\tshutdown()\n\tif _, ok := <-ch1; ok {\n\t\tt.Errorf(\"Expected channel to be closed\")\n\t}\n\tif _, ok := <-ch2; ok {\n\t\tt.Errorf(\"Expected channel to be closed\")\n\t}\n}\n\nvar app = gosh.Register(\"app\", func() {\n\tctx, shutdown := test.V23Init()\n\tdefer shutdown()\n\n\tm := v23.GetAppCycle(ctx)\n\tch := make(chan string, 1)\n\tm.WaitForStop(ctx, ch)\n\tfmt.Println(\"ready\")\n\tfmt.Printf(\"Got %s\\n\", <-ch)\n\tm.AdvanceGoal(10)\n\tfmt.Println(\"Doing some work\")\n\tm.AdvanceProgress(2)\n\tfmt.Println(\"Doing some more work\")\n\tm.AdvanceProgress(5)\n})\n\ntype configServer struct {\n\tch chan<- string\n}\n\nfunc (c *configServer) Set(_ *context.T, _ rpc.ServerCall, key, value string) error {\n\tif key != mgmt.AppCycleManagerConfigKey {\n\t\treturn fmt.Errorf(\"Unexpected key: %v\", key)\n\t}\n\tc.ch <- value\n\treturn nil\n\n}\n\nfunc setupRemoteAppCycleMgr(t *testing.T) (*context.T, *vexec.ParentHandle, *expect.Session, appcycle.AppCycleClientMethods, func()) {\n\tsh := v23test.NewShell(t, v23test.Opts{})\n\tctx := sh.Ctx\n\tfailed := true\n\tdefer func() {\n\t\tif failed {\n\t\t\tsh.Cleanup()\n\t\t}\n\t}()\n\tgoshCmd := sh.Fn(app)\n\n\tch := make(chan string, 1)\n\tservice := device.ConfigServer(&configServer{ch})\n\tauthorizer := securityflag.NewAuthorizerOrDie()\n\t_, configServer, err := v23.WithNewServer(ctx, \"\", service, authorizer)\n\tif err != nil {\n\t\tt.Fatalf(\"WithNewServer failed: %v\", err)\n\t}\n\tconfigServiceName := configServer.Status().Endpoints[0].Name()\n\n\tconfig := vexec.NewConfig()\n\tconfig.Set(mgmt.ParentNameConfigKey, configServiceName)\n\tconfig.Set(mgmt.ProtocolConfigKey, \"tcp\")\n\tconfig.Set(mgmt.AddressConfigKey, \"127.0.0.1:0\")\n\tconfig.Set(mgmt.SecurityAgentPathConfigKey, goshCmd.Vars[ref.EnvAgentPath])\n\n\tcmd := exec.Command(goshCmd.Args[0], goshCmd.Args[1:]...)\n\tcmd.Env = envvar.MapToSlice(goshCmd.Vars)\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tt.Fatalf(\"StdoutPipe failed: %v\", err)\n\t}\n\thandle := vexec.NewParentHandle(cmd, vexec.ConfigOpt{Config: config})\n\tif err := handle.Start(); err != nil {\n\t\tt.Fatalf(\"Start failed: %v\", err)\n\t}\n\tif err := handle.WaitForReady(20 * time.Second); err != nil {\n\t\tt.Fatalf(\"WaitForReady failed: %v\", err)\n\t}\n\n\tappCycleName := \"\"\n\tselect {\n\tcase appCycleName = <-ch:\n\tcase <-time.After(time.Minute):\n\t\tt.Errorf(\"timeout\")\n\t}\n\ts := expect.NewSession(t, stdout, time.Minute)\n\ts.Expect(\"ready\")\n\tappCycle := appcycle.AppCycleClient(appCycleName)\n\tfailed = false\n\treturn ctx, handle, s, appCycle, sh.Cleanup\n}\n\n\/\/ TestRemoteForceStop verifies that the child process exits when sending it\n\/\/ a remote ForceStop rpc.\nfunc TestRemoteForceStop(t *testing.T) {\n\tctx, h, s, appCycle, cleanup := setupRemoteAppCycleMgr(t)\n\tdefer cleanup()\n\tif err := appCycle.ForceStop(ctx); err == nil || !strings.Contains(err.Error(), \"EOF\") {\n\t\tt.Fatalf(\"Expected EOF error, got %v instead\", err)\n\t}\n\ts.ExpectEOF()\n\twant := fmt.Sprintf(\"exit status %d\", testForceStopExitCode)\n\tif err := h.Wait(0); err == nil || err.Error() != want {\n\t\tt.Errorf(\"got %v, want %s\", err, want)\n\t}\n}\n\n\/\/ TestRemoteStop verifies that the child shuts down cleanly when sending it\n\/\/ a remote Stop rpc.\nfunc TestRemoteStop(t *testing.T) {\n\tctx, h, s, appCycle, cleanup := setupRemoteAppCycleMgr(t)\n\tdefer cleanup()\n\tstream, err := appCycle.Stop(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"Got error: %v\", err)\n\t}\n\trStream := stream.RecvStream()\n\texpectTask := func(progress, goal int32) {\n\t\tif !rStream.Advance() {\n\t\t\tt.Fatalf(\"unexpected streaming error: %v\", rStream.Err())\n\t\t}\n\t\ttask := rStream.Value()\n\t\tif task.Progress != progress || task.Goal != goal {\n\t\t\tt.Errorf(\"Got (%d, %d), want (%d, %d)\", task.Progress, task.Goal, progress, goal)\n\t\t}\n\t}\n\texpectTask(0, 10)\n\texpectTask(2, 10)\n\texpectTask(7, 10)\n\tif rStream.Advance() || rStream.Err() != nil {\n\t\tt.Errorf(\"Expected EOF, got (%v, %v) instead\", rStream.Value(), rStream.Err())\n\t}\n\tif err := stream.Finish(); err != nil {\n\t\tt.Errorf(\"Got error %v\", err)\n\t}\n\ts.Expect(fmt.Sprintf(\"Got %s\", v23.RemoteStop))\n\ts.Expect(\"Doing some work\")\n\ts.Expect(\"Doing some more work\")\n\ts.ExpectEOF()\n\tif err := h.Wait(0); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package maptiles\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"errors\"\n\t\"time\"\n\t\"math\/rand\"\n\n\t\"tileserver\/mapnik\"\n\t\"tileserver\/ligneous\"\n\t\n\tlog \"github.com\/cihub\/seelog\"\n)\n\n\nfunc init() {\n\tlogger, _ := ligneous.InitLogger()\n\tlog.UseLogger(logger)\n}\n\n\nfunc randomNum(min, max int) int {\n rand.Seed(time.Now().Unix())\n return rand.Intn(max - min) + min\n}\n\n\ntype TileCoord struct {\n\tX, Y, Zoom uint64\n\tTms bool\n\tLayer string\n}\n\nfunc (c TileCoord) OSMFilename() string {\n\treturn fmt.Sprintf(\"%d\/%d\/%d.png\", c.Zoom, c.X, c.Y)\n}\n\ntype TileFetchResult struct {\n\tCoord TileCoord\n\tBlobPNG []byte\n}\n\ntype TileFetchRequest struct {\n\tCoord TileCoord\n\tOutChan chan<- TileFetchResult\n}\n\nfunc (c *TileCoord) setTMS(tms bool) {\n\tif c.Tms != tms {\n\t\tc.Y = (1 << c.Zoom) - c.Y - 1\n\t\tc.Tms = tms\n\t}\n}\n\nfunc NewTileRendererChan(stylesheet string) chan<- TileFetchRequest {\n\tc := make(chan TileFetchRequest)\n\n\tgo func(requestChan <-chan TileFetchRequest) {\n\t\tvar err error\n\t\tt := NewTileRenderer(stylesheet)\n\t\tfor request := range requestChan {\n\t\t\tresult := TileFetchResult{request.Coord, nil}\n\t\t\tresult.BlobPNG, err = t.RenderTile(request.Coord)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Error while rendering\", request.Coord, \":\", err.Error())\n\t\t\t\tresult.BlobPNG = nil\n\t\t\t}\n\t\t\trequest.OutChan <- result\n\t\t}\n\t}(c)\n\n\treturn c\n}\n\n\/\/ Renders images as Web Mercator tiles\ntype TileRenderer struct {\n\tm *mapnik.Map\n\tmp mapnik.Projection\n\tproxy bool\n\ts string\n}\n\nfunc NewTileRenderer(stylesheet string) *TileRenderer {\n\tt := new(TileRenderer)\n\tvar err error\n\tif err != nil {\n\t\tlog.Critical(err)\n\t}\n\tt.m = mapnik.NewMap(256, 256)\n\tt.m.Load(stylesheet)\n\tt.mp = t.m.Projection()\n\n\tif strings.Contains(stylesheet, \".xml\") {\n\t\tt.proxy = false\n\t\tt.s = stylesheet\n\t} else if strings.Contains(stylesheet, \"http\") && strings.Contains(stylesheet, \"{z}\/{x}\/{y}.png\") {\n\t\tt.proxy = true\n\t\tt.s = stylesheet\n\t}\n\n\treturn t\n}\n\nfunc (t *TileRenderer) RenderTile(c TileCoord) ([]byte, error) {\n\tc.setTMS(false)\n\tif t.proxy {\n\t\treturn t.HttpGetTileZXY(c.Zoom, c.X, c.Y)\n\t} else {\n\t\treturn t.RenderTileZXY(c.Zoom, c.X, c.Y)\n\t}\n}\n\n\/\/ Render a tile with coordinates in Google tile format.\n\/\/ Most upper left tile is always 0,0. Method is not thread-safe,\n\/\/ so wrap with a mutex when accessing the same renderer by multiple\n\/\/ threads or setup multiple goroutinesand communicate with channels,\n\/\/ see NewTileRendererChan.\nfunc (t *TileRenderer) RenderTileZXY(zoom, x, y uint64) ([]byte, error) {\n\t\/\/ Calculate pixel positions of bottom left & top right\n\tp0 := [2]float64{float64(x) * 256, (float64(y) + 1) * 256}\n\tp1 := [2]float64{(float64(x) + 1) * 256, float64(y) * 256}\n\n\t\/\/ Convert to LatLong(EPSG:4326)\n\tl0 := fromPixelToLL(p0, zoom)\n\tl1 := fromPixelToLL(p1, zoom)\n\n\t\/\/ Convert to map projection (e.g. mercartor co-ords EPSG:3857)\n\tc0 := t.mp.Forward(mapnik.Coord{l0[0], l0[1]})\n\tc1 := t.mp.Forward(mapnik.Coord{l1[0], l1[1]})\n\n\t\/\/ Bounding box for the Tile \n\tt.m.Resize(256, 256)\n\tt.m.ZoomToMinMax(c0.X, c0.Y, c1.X, c1.Y)\n\tt.m.SetBufferSize(128) \n\n\tblob, err := t.m.RenderToMemoryPng()\n\t\n\tlog.Debug( fmt.Sprintf(\"RENDER BLOB %v %v %v %v\", t.s zoom, x, y) )\n\t\n\treturn blob, err\n}\n\nfunc (t *TileRenderer) subDomain() string {\n\tsubs := []string{\"a\",\"b\",\"c\"}\n\tn := randomNum(0,3);\n\treturn subs[n]\n} \n\nfunc (t *TileRenderer) HttpGetTileZXY(zoom, x, y uint64) ([]byte, error) {\n\ttileUrl := strings.Replace(t.s, \"{z}\", fmt.Sprintf(\"%v\", zoom), -1);\n\ttileUrl = strings.Replace(tileUrl, \"{x}\", fmt.Sprintf(\"%v\", x), -1);\n\ttileUrl = strings.Replace(tileUrl, \"{y}\", fmt.Sprintf(\"%v\", y), -1);\n\ttileUrl = strings.Replace(tileUrl, \"{s}\", t.subDomain(), -1);\n\t\n\tresp, err := http.Get(tileUrl)\n\tblob, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\t\n\tlog.Debug( fmt.Sprintf(\"PROXY GET %v %v\", tileUrl, resp.StatusCode) )\n\t\n\tif 200 != resp.StatusCode {\n\t\terr := errors.New(\"Request error: \"+ string(blob))\n\t\treturn []byte{}, err\n\t}\n\n\treturn blob, err\n}\n<commit_msg>logging fix<commit_after>package maptiles\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"errors\"\n\t\"time\"\n\t\"math\/rand\"\n\n\t\"tileserver\/mapnik\"\n\t\"tileserver\/ligneous\"\n\t\n\tlog \"github.com\/cihub\/seelog\"\n)\n\n\nfunc init() {\n\tlogger, _ := ligneous.InitLogger()\n\tlog.UseLogger(logger)\n}\n\n\nfunc randomNum(min, max int) int {\n rand.Seed(time.Now().Unix())\n return rand.Intn(max - min) + min\n}\n\n\ntype TileCoord struct {\n\tX, Y, Zoom uint64\n\tTms bool\n\tLayer string\n}\n\nfunc (c TileCoord) OSMFilename() string {\n\treturn fmt.Sprintf(\"%d\/%d\/%d.png\", c.Zoom, c.X, c.Y)\n}\n\ntype TileFetchResult struct {\n\tCoord TileCoord\n\tBlobPNG []byte\n}\n\ntype TileFetchRequest struct {\n\tCoord TileCoord\n\tOutChan chan<- TileFetchResult\n}\n\nfunc (c *TileCoord) setTMS(tms bool) {\n\tif c.Tms != tms {\n\t\tc.Y = (1 << c.Zoom) - c.Y - 1\n\t\tc.Tms = tms\n\t}\n}\n\nfunc NewTileRendererChan(stylesheet string) chan<- TileFetchRequest {\n\tc := make(chan TileFetchRequest)\n\n\tgo func(requestChan <-chan TileFetchRequest) {\n\t\tvar err error\n\t\tt := NewTileRenderer(stylesheet)\n\t\tfor request := range requestChan {\n\t\t\tresult := TileFetchResult{request.Coord, nil}\n\t\t\tresult.BlobPNG, err = t.RenderTile(request.Coord)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Error while rendering\", request.Coord, \":\", err.Error())\n\t\t\t\tresult.BlobPNG = nil\n\t\t\t}\n\t\t\trequest.OutChan <- result\n\t\t}\n\t}(c)\n\n\treturn c\n}\n\n\/\/ Renders images as Web Mercator tiles\ntype TileRenderer struct {\n\tm *mapnik.Map\n\tmp mapnik.Projection\n\tproxy bool\n\ts string\n}\n\nfunc NewTileRenderer(stylesheet string) *TileRenderer {\n\tt := new(TileRenderer)\n\tvar err error\n\tif err != nil {\n\t\tlog.Critical(err)\n\t}\n\tt.m = mapnik.NewMap(256, 256)\n\tt.m.Load(stylesheet)\n\tt.mp = t.m.Projection()\n\n\tif strings.Contains(stylesheet, \".xml\") {\n\t\tt.proxy = false\n\t\tt.s = stylesheet\n\t} else if strings.Contains(stylesheet, \"http\") && strings.Contains(stylesheet, \"{z}\/{x}\/{y}.png\") {\n\t\tt.proxy = true\n\t\tt.s = stylesheet\n\t}\n\n\treturn t\n}\n\nfunc (t *TileRenderer) RenderTile(c TileCoord) ([]byte, error) {\n\tc.setTMS(false)\n\tif t.proxy {\n\t\treturn t.HttpGetTileZXY(c.Zoom, c.X, c.Y)\n\t} else {\n\t\treturn t.RenderTileZXY(c.Zoom, c.X, c.Y)\n\t}\n}\n\n\/\/ Render a tile with coordinates in Google tile format.\n\/\/ Most upper left tile is always 0,0. Method is not thread-safe,\n\/\/ so wrap with a mutex when accessing the same renderer by multiple\n\/\/ threads or setup multiple goroutinesand communicate with channels,\n\/\/ see NewTileRendererChan.\nfunc (t *TileRenderer) RenderTileZXY(zoom, x, y uint64) ([]byte, error) {\n\t\/\/ Calculate pixel positions of bottom left & top right\n\tp0 := [2]float64{float64(x) * 256, (float64(y) + 1) * 256}\n\tp1 := [2]float64{(float64(x) + 1) * 256, float64(y) * 256}\n\n\t\/\/ Convert to LatLong(EPSG:4326)\n\tl0 := fromPixelToLL(p0, zoom)\n\tl1 := fromPixelToLL(p1, zoom)\n\n\t\/\/ Convert to map projection (e.g. mercartor co-ords EPSG:3857)\n\tc0 := t.mp.Forward(mapnik.Coord{l0[0], l0[1]})\n\tc1 := t.mp.Forward(mapnik.Coord{l1[0], l1[1]})\n\n\t\/\/ Bounding box for the Tile \n\tt.m.Resize(256, 256)\n\tt.m.ZoomToMinMax(c0.X, c0.Y, c1.X, c1.Y)\n\tt.m.SetBufferSize(128) \n\n\tblob, err := t.m.RenderToMemoryPng()\n\t\n\tlog.Debug( fmt.Sprintf(\"RENDER BLOB %v %v %v %v\", t.s, zoom, x, y) )\n\t\n\treturn blob, err\n}\n\nfunc (t *TileRenderer) subDomain() string {\n\tsubs := []string{\"a\",\"b\",\"c\"}\n\tn := randomNum(0,3);\n\treturn subs[n]\n} \n\nfunc (t *TileRenderer) HttpGetTileZXY(zoom, x, y uint64) ([]byte, error) {\n\ttileUrl := strings.Replace(t.s, \"{z}\", fmt.Sprintf(\"%v\", zoom), -1);\n\ttileUrl = strings.Replace(tileUrl, \"{x}\", fmt.Sprintf(\"%v\", x), -1);\n\ttileUrl = strings.Replace(tileUrl, \"{y}\", fmt.Sprintf(\"%v\", y), -1);\n\ttileUrl = strings.Replace(tileUrl, \"{s}\", t.subDomain(), -1);\n\t\n\tresp, err := http.Get(tileUrl)\n\tblob, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\t\n\tlog.Debug( fmt.Sprintf(\"PROXY GET %v %v\", tileUrl, resp.StatusCode) )\n\t\n\tif 200 != resp.StatusCode {\n\t\terr := errors.New(\"Request error: \"+ string(blob))\n\t\treturn []byte{}, err\n\t}\n\n\treturn blob, err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package ostype is define OS type of SakuraCloud public archive\npackage ostype\n\n\/\/go:generate stringer -type=ArchiveOSTypes\n\n\/\/ ArchiveOSTypes パブリックアーカイブOS種別\ntype ArchiveOSTypes int\n\nconst (\n\t\/\/ CentOS OS種別:CentOS\n\tCentOS ArchiveOSTypes = iota\n\t\/\/ Ubuntu OS種別:Ubuntu\n\tUbuntu\n\t\/\/ Debian OS種別:Debian\n\tDebian\n\t\/\/ VyOS OS種別:VyOS\n\tVyOS\n\t\/\/ CoreOS OS種別:CoreOS\n\tCoreOS\n\t\/\/ RancherOS OS種別:RancherOS\n\tRancherOS\n\t\/\/ Kusanagi OS種別:Kusanagi(CentOS)\n\tKusanagi\n\t\/\/ SiteGuard OS種別:SiteGuard(CentOS)\n\tSiteGuard\n\t\/\/ Plesk OS種別:Plesk(CentOS)\n\tPlesk\n\t\/\/ FreeBSD OS種別:FreeBSD\n\tFreeBSD\n\t\/\/ Windows2012 OS種別:Windows Server 2012 R2 Datacenter Edition\n\tWindows2012\n\t\/\/ Windows2012RDS OS種別:Windows Server 2012 R2 for RDS\n\tWindows2012RDS\n\t\/\/ Windows2012RDSOffice OS種別:Windows Server 2012 R2 for RDS(Office)\n\tWindows2012RDSOffice\n\t\/\/ Windows2016 OS種別:Windows Server 2016 Datacenter Edition\n\tWindows2016\n\t\/\/ Windows2016RDS OS種別:Windows Server 2016 RDS\n\tWindows2016RDS\n\t\/\/ Windows2016RDSOffice OS種別:Windows Server 2016 RDS(Office)\n\tWindows2016RDSOffice\n\t\/\/ Windows2016SQLServerWeb OS種別:Windows Server 2016 SQLServer(Web)\n\tWindows2016SQLServerWeb\n\t\/\/ Windows2016SQLServerStandard OS種別:Windows Server 2016 SQLServer(Standard)\n\tWindows2016SQLServerStandard\n\t\/\/ Custom OS種別:カスタム\n\tCustom\n)\n\n\/\/ IsWindows Windowsか\nfunc (o ArchiveOSTypes) IsWindows() bool {\n\tswitch o {\n\tcase Windows2012, Windows2012RDS, Windows2012RDSOffice,\n\t\tWindows2016, Windows2016RDS, Windows2016RDSOffice,\n\t\tWindows2016SQLServerWeb, Windows2016SQLServerStandard:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n\/\/ IsSupportDiskEdit ディスクの修正機能をフルサポートしているか(Windowsは一部サポートのためfalseを返す)\nfunc (o ArchiveOSTypes) IsSupportDiskEdit() bool {\n\tswitch o {\n\tcase CentOS, Ubuntu, Debian, VyOS, CoreOS, RancherOS, Kusanagi, SiteGuard, Plesk, FreeBSD:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n<commit_msg>Add StrToOSType func<commit_after>\/\/ Package ostype is define OS type of SakuraCloud public archive\npackage ostype\n\n\/\/go:generate stringer -type=ArchiveOSTypes\n\n\/\/ ArchiveOSTypes パブリックアーカイブOS種別\ntype ArchiveOSTypes int\n\nconst (\n\t\/\/ CentOS OS種別:CentOS\n\tCentOS ArchiveOSTypes = iota\n\t\/\/ Ubuntu OS種別:Ubuntu\n\tUbuntu\n\t\/\/ Debian OS種別:Debian\n\tDebian\n\t\/\/ VyOS OS種別:VyOS\n\tVyOS\n\t\/\/ CoreOS OS種別:CoreOS\n\tCoreOS\n\t\/\/ RancherOS OS種別:RancherOS\n\tRancherOS\n\t\/\/ Kusanagi OS種別:Kusanagi(CentOS)\n\tKusanagi\n\t\/\/ SiteGuard OS種別:SiteGuard(CentOS)\n\tSiteGuard\n\t\/\/ Plesk OS種別:Plesk(CentOS)\n\tPlesk\n\t\/\/ FreeBSD OS種別:FreeBSD\n\tFreeBSD\n\t\/\/ Windows2012 OS種別:Windows Server 2012 R2 Datacenter Edition\n\tWindows2012\n\t\/\/ Windows2012RDS OS種別:Windows Server 2012 R2 for RDS\n\tWindows2012RDS\n\t\/\/ Windows2012RDSOffice OS種別:Windows Server 2012 R2 for RDS(Office)\n\tWindows2012RDSOffice\n\t\/\/ Windows2016 OS種別:Windows Server 2016 Datacenter Edition\n\tWindows2016\n\t\/\/ Windows2016RDS OS種別:Windows Server 2016 RDS\n\tWindows2016RDS\n\t\/\/ Windows2016RDSOffice OS種別:Windows Server 2016 RDS(Office)\n\tWindows2016RDSOffice\n\t\/\/ Windows2016SQLServerWeb OS種別:Windows Server 2016 SQLServer(Web)\n\tWindows2016SQLServerWeb\n\t\/\/ Windows2016SQLServerStandard OS種別:Windows Server 2016 SQLServer(Standard)\n\tWindows2016SQLServerStandard\n\t\/\/ Custom OS種別:カスタム\n\tCustom\n)\n\n\/\/ IsWindows Windowsか\nfunc (o ArchiveOSTypes) IsWindows() bool {\n\tswitch o {\n\tcase Windows2012, Windows2012RDS, Windows2012RDSOffice,\n\t\tWindows2016, Windows2016RDS, Windows2016RDSOffice,\n\t\tWindows2016SQLServerWeb, Windows2016SQLServerStandard:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n\/\/ IsSupportDiskEdit ディスクの修正機能をフルサポートしているか(Windowsは一部サポートのためfalseを返す)\nfunc (o ArchiveOSTypes) IsSupportDiskEdit() bool {\n\tswitch o {\n\tcase CentOS, Ubuntu, Debian, VyOS, CoreOS, RancherOS, Kusanagi, SiteGuard, Plesk, FreeBSD:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc StrToOSType(osType string) ArchiveOSTypes {\n\tswitch osType {\n\tcase \"centos\":\n\t\treturn CentOS\n\tcase \"ubuntu\":\n\t\treturn Ubuntu\n\tcase \"debian\":\n\t\treturn Debian\n\tcase \"vyos\":\n\t\treturn VyOS\n\tcase \"coreos\":\n\t\treturn CoreOS\n\tcase \"rancheros\":\n\t\treturn RancherOS\n\tcase \"kusanagi\":\n\t\treturn Kusanagi\n\tcase \"site-guard\":\n\t\treturn SiteGuard\n\tcase \"plesk\":\n\t\treturn Plesk\n\tcase \"freebsd\":\n\t\treturn FreeBSD\n\tcase \"windows2012\":\n\t\treturn Windows2012\n\tcase \"windows2012-rds\":\n\t\treturn Windows2012RDS\n\tcase \"windows2012-rds-office\":\n\t\treturn Windows2012RDSOffice\n\tcase \"windows2016\":\n\t\treturn Windows2016\n\tcase \"windows2016-rds\":\n\t\treturn Windows2016RDS\n\tcase \"windows2016-rds-office\":\n\t\treturn Windows2016RDSOffice\n\tcase \"windows2016-sql-web\":\n\t\treturn Windows2016SQLServerWeb\n\tcase \"windows2016-sql-standard\":\n\t\treturn Windows2016SQLServerStandard\n\tdefault:\n\t\treturn Custom\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage flushfs_test\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com\/jacobsa\/fuse\/samples\"\n\t\"github.com\/jacobsa\/fuse\/samples\/flushfs\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\nfunc TestFlushFS(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype FlushFSTest struct {\n\tsamples.SampleTest\n\n\tmu sync.Mutex\n\n\t\/\/ GUARDED_BY(mu)\n\tflushes []string\n\tflushErr error\n\n\t\/\/ GUARDED_BY(mu)\n\tfsyncs []string\n\tfsyncErr error\n}\n\nfunc init() { RegisterTestSuite(&FlushFSTest{}) }\n\nfunc (t *FlushFSTest) SetUp(ti *TestInfo) {\n\tvar err error\n\n\t\/\/ Set up a file system.\n\treportTo := func(slice *[]string, err *error) func(string) error {\n\t\treturn func(s string) error {\n\t\t\tt.mu.Lock()\n\t\t\tdefer t.mu.Unlock()\n\n\t\t\t*slice = append(*slice, s)\n\t\t\treturn *err\n\t\t}\n\t}\n\n\tt.FileSystem, err = flushfs.NewFileSystem(\n\t\treportTo(&t.flushes, &t.flushErr),\n\t\treportTo(&t.fsyncs, &t.fsyncErr))\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ Mount it.\n\tt.SampleTest.SetUp(ti)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ Match byte slices equal to the supplied string.\nfunc byteSliceEq(expected string) Matcher {\n\tpred := func(c interface{}) error {\n\t\tslice, ok := c.([]byte)\n\t\tif !ok {\n\t\t\treturn errors.New(\"which is not []byte\")\n\t\t}\n\n\t\tif string(slice) != expected {\n\t\t\treturn fmt.Errorf(\"which is string \\\"%s\\\"\", string(slice))\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn NewMatcher(\n\t\tpred,\n\t\tfmt.Sprintf(\"byte slice equal to string \\\"%s\\\"\", expected))\n}\n\n\/\/ Return a copy of the current contents of t.flushes.\n\/\/\n\/\/ LOCKS_EXCLUDED(t.mu)\nfunc (t *FlushFSTest) getFlushes() (p []string) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tp = make([]string, len(t.flushes))\n\tcopy(p, t.flushes)\n\treturn\n}\n\n\/\/ Return a copy of the current contents of t.fsyncs.\n\/\/\n\/\/ LOCKS_EXCLUDED(t.mu)\nfunc (t *FlushFSTest) getFsyncs() (p []string) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tp = make([]string, len(t.fsyncs))\n\tcopy(p, t.fsyncs)\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *FlushFSTest) CloseReports_ReadWrite() {\n\tvar n int\n\tvar off int64\n\tvar err error\n\tbuf := make([]byte, 1024)\n\n\t\/\/ Open the file.\n\tf, err := os.OpenFile(path.Join(t.Dir, \"foo\"), os.O_RDWR, 0)\n\tAssertEq(nil, err)\n\n\tdefer func() {\n\t\tif f != nil {\n\t\t\tExpectEq(nil, f.Close())\n\t\t}\n\t}()\n\n\t\/\/ Write some contents to the file.\n\tn, err = f.Write([]byte(\"taco\"))\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ Seek and read them back.\n\toff, err = f.Seek(0, 0)\n\tAssertEq(nil, err)\n\tAssertEq(0, off)\n\n\tn, err = f.Read(buf)\n\tAssertThat(err, AnyOf(nil, io.EOF))\n\tAssertEq(\"taco\", string(buf[:n]))\n\n\t\/\/ At this point, no flushes or fsyncs should have happened.\n\tAssertThat(t.getFlushes(), ElementsAre())\n\tAssertThat(t.getFsyncs(), ElementsAre())\n\n\t\/\/ Close the file.\n\terr = f.Close()\n\tf = nil\n\tAssertEq(nil, err)\n\n\t\/\/ Now we should have received the flush operation (but still no fsync).\n\tExpectThat(t.getFlushes(), ElementsAre(byteSliceEq(\"taco\")))\n\tExpectThat(t.getFsyncs(), ElementsAre())\n}\n\nfunc (t *FlushFSTest) CloseReports_ReadOnly() {\n\tvar err error\n\n\t\/\/ Open the file.\n\tf, err := os.OpenFile(path.Join(t.Dir, \"foo\"), os.O_RDONLY, 0)\n\tAssertEq(nil, err)\n\n\tdefer func() {\n\t\tif f != nil {\n\t\t\tExpectEq(nil, f.Close())\n\t\t}\n\t}()\n\n\t\/\/ At this point, no flushes or fsyncs should have happened.\n\tAssertThat(t.getFlushes(), ElementsAre())\n\tAssertThat(t.getFsyncs(), ElementsAre())\n\n\t\/\/ Close the file.\n\terr = f.Close()\n\tf = nil\n\tAssertEq(nil, err)\n\n\t\/\/ Now we should have received the flush operation (but still no fsync).\n\tExpectThat(t.getFlushes(), ElementsAre(byteSliceEq(\"\")))\n\tExpectThat(t.getFsyncs(), ElementsAre())\n}\n\nfunc (t *FlushFSTest) CloseReports_WriteOnly() {\n\tvar n int\n\tvar err error\n\n\t\/\/ Open the file.\n\tf, err := os.OpenFile(path.Join(t.Dir, \"foo\"), os.O_WRONLY, 0)\n\tAssertEq(nil, err)\n\n\tdefer func() {\n\t\tif f != nil {\n\t\t\tExpectEq(nil, f.Close())\n\t\t}\n\t}()\n\n\t\/\/ Write some contents to the file.\n\tn, err = f.Write([]byte(\"taco\"))\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ At this point, no flushes or fsyncs should have happened.\n\tAssertThat(t.getFlushes(), ElementsAre())\n\tAssertThat(t.getFsyncs(), ElementsAre())\n\n\t\/\/ Close the file.\n\terr = f.Close()\n\tf = nil\n\tAssertEq(nil, err)\n\n\t\/\/ Now we should have received the flush operation (but still no fsync).\n\tExpectThat(t.getFlushes(), ElementsAre(byteSliceEq(\"taco\")))\n\tExpectThat(t.getFsyncs(), ElementsAre())\n}\n\nfunc (t *FlushFSTest) CloseReports_MultipleTimes_NonOverlappingFileHandles() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *FlushFSTest) CloseReports_MultipleTimes_OverlappingFileHandles() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *FlushFSTest) CloseError() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *FlushFSTest) FsyncReports() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *FlushFSTest) FsyncError() {\n\tAssertTrue(false, \"TODO\")\n}\n<commit_msg>FlushFSTest.CloseReports_MultipleTimes_NonOverlappingFileHandles<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage flushfs_test\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com\/jacobsa\/fuse\/samples\"\n\t\"github.com\/jacobsa\/fuse\/samples\/flushfs\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\nfunc TestFlushFS(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype FlushFSTest struct {\n\tsamples.SampleTest\n\n\tmu sync.Mutex\n\n\t\/\/ GUARDED_BY(mu)\n\tflushes []string\n\tflushErr error\n\n\t\/\/ GUARDED_BY(mu)\n\tfsyncs []string\n\tfsyncErr error\n}\n\nfunc init() { RegisterTestSuite(&FlushFSTest{}) }\n\nfunc (t *FlushFSTest) SetUp(ti *TestInfo) {\n\tvar err error\n\n\t\/\/ Set up a file system.\n\treportTo := func(slice *[]string, err *error) func(string) error {\n\t\treturn func(s string) error {\n\t\t\tt.mu.Lock()\n\t\t\tdefer t.mu.Unlock()\n\n\t\t\t*slice = append(*slice, s)\n\t\t\treturn *err\n\t\t}\n\t}\n\n\tt.FileSystem, err = flushfs.NewFileSystem(\n\t\treportTo(&t.flushes, &t.flushErr),\n\t\treportTo(&t.fsyncs, &t.fsyncErr))\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ Mount it.\n\tt.SampleTest.SetUp(ti)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ Match byte slices equal to the supplied string.\nfunc byteSliceEq(expected string) Matcher {\n\tpred := func(c interface{}) error {\n\t\tslice, ok := c.([]byte)\n\t\tif !ok {\n\t\t\treturn errors.New(\"which is not []byte\")\n\t\t}\n\n\t\tif string(slice) != expected {\n\t\t\treturn fmt.Errorf(\"which is string \\\"%s\\\"\", string(slice))\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn NewMatcher(\n\t\tpred,\n\t\tfmt.Sprintf(\"byte slice equal to string \\\"%s\\\"\", expected))\n}\n\n\/\/ Return a copy of the current contents of t.flushes.\n\/\/\n\/\/ LOCKS_EXCLUDED(t.mu)\nfunc (t *FlushFSTest) getFlushes() (p []string) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tp = make([]string, len(t.flushes))\n\tcopy(p, t.flushes)\n\treturn\n}\n\n\/\/ Return a copy of the current contents of t.fsyncs.\n\/\/\n\/\/ LOCKS_EXCLUDED(t.mu)\nfunc (t *FlushFSTest) getFsyncs() (p []string) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tp = make([]string, len(t.fsyncs))\n\tcopy(p, t.fsyncs)\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *FlushFSTest) CloseReports_ReadWrite() {\n\tvar n int\n\tvar off int64\n\tvar err error\n\tbuf := make([]byte, 1024)\n\n\t\/\/ Open the file.\n\tf, err := os.OpenFile(path.Join(t.Dir, \"foo\"), os.O_RDWR, 0)\n\tAssertEq(nil, err)\n\n\tdefer func() {\n\t\tif f != nil {\n\t\t\tExpectEq(nil, f.Close())\n\t\t}\n\t}()\n\n\t\/\/ Write some contents to the file.\n\tn, err = f.Write([]byte(\"taco\"))\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ Seek and read them back.\n\toff, err = f.Seek(0, 0)\n\tAssertEq(nil, err)\n\tAssertEq(0, off)\n\n\tn, err = f.Read(buf)\n\tAssertThat(err, AnyOf(nil, io.EOF))\n\tAssertEq(\"taco\", string(buf[:n]))\n\n\t\/\/ At this point, no flushes or fsyncs should have happened.\n\tAssertThat(t.getFlushes(), ElementsAre())\n\tAssertThat(t.getFsyncs(), ElementsAre())\n\n\t\/\/ Close the file.\n\terr = f.Close()\n\tf = nil\n\tAssertEq(nil, err)\n\n\t\/\/ Now we should have received the flush operation (but still no fsync).\n\tExpectThat(t.getFlushes(), ElementsAre(byteSliceEq(\"taco\")))\n\tExpectThat(t.getFsyncs(), ElementsAre())\n}\n\nfunc (t *FlushFSTest) CloseReports_ReadOnly() {\n\tvar err error\n\n\t\/\/ Open the file.\n\tf, err := os.OpenFile(path.Join(t.Dir, \"foo\"), os.O_RDONLY, 0)\n\tAssertEq(nil, err)\n\n\tdefer func() {\n\t\tif f != nil {\n\t\t\tExpectEq(nil, f.Close())\n\t\t}\n\t}()\n\n\t\/\/ At this point, no flushes or fsyncs should have happened.\n\tAssertThat(t.getFlushes(), ElementsAre())\n\tAssertThat(t.getFsyncs(), ElementsAre())\n\n\t\/\/ Close the file.\n\terr = f.Close()\n\tf = nil\n\tAssertEq(nil, err)\n\n\t\/\/ Now we should have received the flush operation (but still no fsync).\n\tExpectThat(t.getFlushes(), ElementsAre(byteSliceEq(\"\")))\n\tExpectThat(t.getFsyncs(), ElementsAre())\n}\n\nfunc (t *FlushFSTest) CloseReports_WriteOnly() {\n\tvar n int\n\tvar err error\n\n\t\/\/ Open the file.\n\tf, err := os.OpenFile(path.Join(t.Dir, \"foo\"), os.O_WRONLY, 0)\n\tAssertEq(nil, err)\n\n\tdefer func() {\n\t\tif f != nil {\n\t\t\tExpectEq(nil, f.Close())\n\t\t}\n\t}()\n\n\t\/\/ Write some contents to the file.\n\tn, err = f.Write([]byte(\"taco\"))\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ At this point, no flushes or fsyncs should have happened.\n\tAssertThat(t.getFlushes(), ElementsAre())\n\tAssertThat(t.getFsyncs(), ElementsAre())\n\n\t\/\/ Close the file.\n\terr = f.Close()\n\tf = nil\n\tAssertEq(nil, err)\n\n\t\/\/ Now we should have received the flush operation (but still no fsync).\n\tExpectThat(t.getFlushes(), ElementsAre(byteSliceEq(\"taco\")))\n\tExpectThat(t.getFsyncs(), ElementsAre())\n}\n\nfunc (t *FlushFSTest) CloseReports_MultipleTimes_NonOverlappingFileHandles() {\n\tvar n int\n\tvar err error\n\n\t\/\/ Open the file.\n\tf, err := os.OpenFile(path.Join(t.Dir, \"foo\"), os.O_WRONLY, 0)\n\tAssertEq(nil, err)\n\n\tdefer func() {\n\t\tif f != nil {\n\t\t\tExpectEq(nil, f.Close())\n\t\t}\n\t}()\n\n\t\/\/ Write some contents to the file.\n\tn, err = f.Write([]byte(\"taco\"))\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ At this point, no flushes or fsyncs should have happened.\n\tAssertThat(t.getFlushes(), ElementsAre())\n\tAssertThat(t.getFsyncs(), ElementsAre())\n\n\t\/\/ Close the file.\n\terr = f.Close()\n\tf = nil\n\tAssertEq(nil, err)\n\n\t\/\/ Now we should have received the flush operation (but still no fsync).\n\tAssertThat(t.getFlushes(), ElementsAre(byteSliceEq(\"taco\")))\n\tAssertThat(t.getFsyncs(), ElementsAre())\n\n\t\/\/ Open the file again.\n\tf, err = os.OpenFile(path.Join(t.Dir, \"foo\"), os.O_WRONLY, 0)\n\tAssertEq(nil, err)\n\n\t\/\/ Write again; expect no further flushes.\n\tn, err = f.Write([]byte(\"p\"))\n\tAssertEq(nil, err)\n\tAssertEq(1, n)\n\n\tAssertThat(t.getFlushes(), ElementsAre(byteSliceEq(\"taco\")))\n\tAssertThat(t.getFsyncs(), ElementsAre())\n\n\t\/\/ Close the file. Now the new contents should be flushed.\n\terr = f.Close()\n\tf = nil\n\tAssertEq(nil, err)\n\n\tAssertThat(t.getFlushes(), ElementsAre(byteSliceEq(\"taco\"), byteSliceEq(\"paco\")))\n\tAssertThat(t.getFsyncs(), ElementsAre())\n}\n\nfunc (t *FlushFSTest) CloseReports_MultipleTimes_OverlappingFileHandles() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *FlushFSTest) CloseError() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *FlushFSTest) FsyncReports() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *FlushFSTest) FsyncError() {\n\tAssertTrue(false, \"TODO\")\n}\n<|endoftext|>"} {"text":"<commit_before>package lechan\n\nimport (\n\t\"time\"\n\n\t\"sync\/atomic\"\n)\n\ntype Lechan struct {\n\tsize int32\n\titems int32\n\tqueue chan interface{}\n\tinput chan interface{}\n\toutput chan interface{}\n\tclose chan struct{}\n}\n\nfunc New(size int) *Lechan {\n\tch := &Lechan{\n\t\tsize: int32(size),\n\t\tqueue: make(chan interface{}, size),\n\t\tclose: make(chan struct{}, 1),\n\t}\n\tgo ch.run()\n\treturn ch\n}\n\nfunc (ch *Lechan) Put(value interface{}) {\n\tch.queue <- value\n\tatomic.AddInt32(&ch.items, 1)\n}\n\nfunc (ch *Lechan) TryPut(value interface{}) bool {\n\tif atomic.LoadInt32(&ch.items) == ch.size {\n\t\treturn false\n\t}\n\tch.queue <- value\n\tatomic.AddInt32(&ch.items, 1)\n\treturn true\n}\n\nfunc (ch *Lechan) TryCountedPut(value interface{}, count int) bool {\n\tfor i := 0; i < count; i++ {\n\t\tif ch.TryPut(value) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (ch *Lechan) TryTimedPut(value interface{}, duration time.Duration) bool {\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(duration):\n\t\t\treturn false\n\t\tdefault:\n\t\t\tif ch.TryPut(value) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (ch *Lechan) TryLimitedPut(value interface{}, count int, delay time.Duration) bool {\n\tfor i := 0; i < count; i++ {\n\t\tif ch.TryTimedPut(value, delay) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (ch *Lechan) Get() (value interface{}, ok bool) {\n\tif atomic.LoadInt32(&ch.items) == 0 {\n\t\treturn nil, false\n\t}\n\tvalue = <-ch.queue\n\tatomic.AddInt32(&ch.items, -1)\n\treturn value, true\n}\n\nfunc (ch *Lechan) In() chan<- interface{} {\n\treturn ch.input\n}\n\nfunc (ch *Lechan) Out() <-chan interface{} {\n\treturn ch.output\n}\n\nfunc (ch *Lechan) Close() {\n\tch.close <- struct{}{}\n}\n\nfunc (ch *Lechan) run() {\n\tvar next interface{}\n\n\tfor {\n\t\tselect {\n\t\tcase ch.output <- next:\n\n\t\tdefault:\n\t\t\tselect {\n\t\t\tdefault:\n\t\t\t\ttmp, ok := ch.Get()\n\t\t\t\tif ok {\n\t\t\t\t\tnext = tmp\n\t\t\t\t}\n\n\t\t\tcase value := <-ch.input:\n\t\t\t\tch.TryPut(value)\n\n\t\t\tcase <-ch.close:\n\t\t\t\tclose(ch.input)\n\t\t\t\tclose(ch.output)\n\t\t\t\tclose(ch.queue)\n\t\t\t\tclose(ch.close)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>lechan: fix<commit_after>package lechan\n\nimport (\n\t\"time\"\n\n\t\"sync\/atomic\"\n)\n\ntype Lechan struct {\n\tsize int32\n\titems int32\n\tqueue chan interface{}\n\tinput chan interface{}\n\toutput chan interface{}\n\tclose chan struct{}\n}\n\nfunc New(size int) *Lechan {\n\tch := &Lechan{\n\t\tsize: int32(size),\n\t\tqueue: make(chan interface{}, size),\n\t\tclose: make(chan struct{}, 1),\n\t}\n\tgo ch.run()\n\treturn ch\n}\n\nfunc (ch *Lechan) Put(value interface{}) {\n\tch.queue <- value\n\tatomic.AddInt32(&ch.items, 1)\n}\n\nfunc (ch *Lechan) TryPut(value interface{}) bool {\n\tif atomic.LoadInt32(&ch.items) == ch.size {\n\t\treturn false\n\t}\n\tch.queue <- value\n\tatomic.AddInt32(&ch.items, 1)\n\treturn true\n}\n\nfunc (ch *Lechan) TryCountedPut(value interface{}, count int) bool {\n\tfor i := 0; i < count; i++ {\n\t\tif ch.TryPut(value) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (ch *Lechan) TryTimedPut(value interface{}, duration time.Duration) bool {\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(duration):\n\t\t\treturn false\n\t\tdefault:\n\t\t\tif ch.TryPut(value) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (ch *Lechan) TryLimitedPut(value interface{}, count int, delay time.Duration) bool {\n\tfor i := 0; i < count; i++ {\n\t\tif ch.TryTimedPut(value, delay) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (ch *Lechan) Get() (value interface{}, ok bool) {\n\tif atomic.LoadInt32(&ch.items) == 0 {\n\t\treturn nil, false\n\t}\n\tvalue = <-ch.queue\n\tatomic.AddInt32(&ch.items, -1)\n\treturn value, true\n}\n\nfunc (ch *Lechan) In() chan<- interface{} {\n\treturn ch.input\n}\n\nfunc (ch *Lechan) Out() <-chan interface{} {\n\treturn ch.output\n}\n\nfunc (ch *Lechan) Close() {\n\tch.close <- struct{}{}\n}\n\nfunc (ch *Lechan) run() {\n\tvar next interface{}\n\n\tfor {\n\t\tselect {\n\t\tcase ch.output <- next:\n\n\t\tdefault:\n\t\t\tselect {\n\t\t\tcase next = <-ch.queue:\n\t\t\t\tatomic.AddInt32(&ch.items, -1)\n\n\t\t\tdefault:\n\t\t\t\tselect {\n\t\t\t\tcase value := <-ch.input:\n\t\t\t\t\tch.TryPut(value)\n\n\t\t\t\tcase <-ch.close:\n\t\t\t\t\tclose(ch.input)\n\t\t\t\t\tclose(ch.output)\n\t\t\t\t\tclose(ch.queue)\n\t\t\t\t\tclose(ch.close)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package blackjack\n\nimport (\n\t\"fmt\"\n\t\"github.com\/jkomoros\/boardgame\"\n)\n\ntype Suit string\n\nconst (\n\tSuitSpades Suit = \"Spades\"\n\tSuitHearts = \"Hearts\"\n\tSuitClubs = \"Clubs\"\n\tSuitDiamonds = \"Diamonds\"\n\tSuitJokers = \"Jokers\"\n)\n\ntype Rank int\n\nconst (\n\tRankJoker Rank = iota\n\tRankAce\n\tRank2\n\tRank3\n\tRank4\n\tRank5\n\tRank6\n\tRank7\n\tRank8\n\tRank9\n\tRank10\n\tRankJack\n\tRankQueen\n\tRankKing\n)\n\ntype Card struct {\n\tSuit Suit\n\tRank Rank\n}\n\nfunc (c *Card) Props() []string {\n\treturn boardgame.PropertyReaderPropsImpl(c)\n}\n\nfunc (c *Card) Prop(name string) interface{} {\n\treturn boardgame.PropertyReaderPropImpl(c, name)\n}\n\nfunc (c *Card) String() string {\n\treturn fmt.Sprintf(\"%s %d\", c.Suit, c.Rank)\n}\n\n\/\/Designed to be used with stack.ComponentValues()\nfunc cards(in []boardgame.PropertyReader) []*Card {\n\tresult := make([]*Card, len(in))\n\tfor i := 0; i < len(in); i++ {\n\t\tc := in[i]\n\t\tif c == nil {\n\t\t\tresult[i] = nil\n\t\t\tcontinue\n\t\t}\n\t\tresult[i] = c.(*Card)\n\t}\n\treturn result\n}\n<commit_msg>Switch the card String constants to be the unicode suit characters. Part of #16.<commit_after>package blackjack\n\nimport (\n\t\"fmt\"\n\t\"github.com\/jkomoros\/boardgame\"\n)\n\ntype Suit string\n\nconst (\n\tSuitSpades Suit = \"\\u2660\"\n\tSuitHearts = \"\\u2665\"\n\tSuitClubs = \"\\u2663\"\n\tSuitDiamonds = \"\\u2666\"\n\tSuitJokers = \"Jokers\"\n)\n\ntype Rank int\n\nconst (\n\tRankJoker Rank = iota\n\tRankAce\n\tRank2\n\tRank3\n\tRank4\n\tRank5\n\tRank6\n\tRank7\n\tRank8\n\tRank9\n\tRank10\n\tRankJack\n\tRankQueen\n\tRankKing\n)\n\ntype Card struct {\n\tSuit Suit\n\tRank Rank\n}\n\nfunc (c *Card) Props() []string {\n\treturn boardgame.PropertyReaderPropsImpl(c)\n}\n\nfunc (c *Card) Prop(name string) interface{} {\n\treturn boardgame.PropertyReaderPropImpl(c, name)\n}\n\nfunc (c *Card) String() string {\n\treturn fmt.Sprintf(\"%s %d\", c.Suit, c.Rank)\n}\n\n\/\/Designed to be used with stack.ComponentValues()\nfunc cards(in []boardgame.PropertyReader) []*Card {\n\tresult := make([]*Card, len(in))\n\tfor i := 0; i < len(in); i++ {\n\t\tc := in[i]\n\t\tif c == nil {\n\t\t\tresult[i] = nil\n\t\t\tcontinue\n\t\t}\n\t\tresult[i] = c.(*Card)\n\t}\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright (c) 2016 VMware, Inc. All Rights Reserved.\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"github.com\/vmware\/harbor\/api\"\n\t\"github.com\/vmware\/harbor\/controllers\"\n\t\"github.com\/vmware\/harbor\/service\"\n\t\"github.com\/vmware\/harbor\/service\/token\"\n\n\t\"github.com\/astaxie\/beego\"\n)\n\nfunc initRouters() {\n\n\tbeego.SetStaticPath(\"registry\/static\/i18n\", \"static\/i18n\")\n\tbeego.SetStaticPath(\"registry\/static\/resources\", \"static\/resources\")\n\tbeego.SetStaticPath(\"registry\/static\/vendors\", \"static\/vendors\")\n\n\tbeego.Router(\"\/login\", &controllers.CommonController{}, \"post:Login\")\n\tbeego.Router(\"\/logout\", &controllers.CommonController{}, \"get:Logout\")\n\tbeego.Router(\"\/language\", &controllers.CommonController{}, \"get:SwitchLanguage\")\n\tbeego.Router(\"\/userExists\", &controllers.CommonController{}, \"post:UserExists\")\n\tbeego.Router(\"\/reset\", &controllers.CommonController{}, \"post:ResetPassword\")\n\tbeego.Router(\"\/sendEmail\", &controllers.CommonController{}, \"get:SendEmail\")\n\n\tbeego.Router(\"\/\", &controllers.IndexController{})\n\tbeego.Router(\"\/signIn\", &controllers.SignInController{})\n\tbeego.Router(\"\/register\", &controllers.RegisterController{})\n\tbeego.Router(\"\/addUser\", &controllers.AddUserController{})\n\tbeego.Router(\"\/forgotPassword\", &controllers.ForgotPasswordController{})\n\tbeego.Router(\"\/resetPassword\", &controllers.ResetPasswordController{})\n\tbeego.Router(\"\/changePassword\", &controllers.ChangePasswordController{})\n\n\tbeego.Router(\"\/registry\/project\", &controllers.ProjectController{})\n\tbeego.Router(\"\/registry\/detail\", &controllers.ItemDetailController{})\n\n\tbeego.Router(\"\/search\", &controllers.SearchController{})\n\n\t\/\/API:\n\tbeego.Router(\"\/api\/search\", &api.SearchAPI{})\n\tbeego.Router(\"\/api\/projects\/:pid\/members\/?:mid\", &api.ProjectMemberAPI{})\n\tbeego.Router(\"\/api\/projects\/\", &api.ProjectAPI{}, \"get:List\")\n\tbeego.Router(\"\/api\/projects\/?:id\", &api.ProjectAPI{})\n\tbeego.Router(\"\/api\/projects\/:id\/toggle_project_public\", &api.ProjectAPI{}, \"post:ToggleProjectPublic\")\n\tbeego.Router(\"\/api\/statistics\", &api.StatisticAPI{})\n\tbeego.Router(\"\/api\/projects\/:id\/logs\/filter\", &api.ProjectAPI{}, \"post:FilterAccessLog\")\n\tbeego.Router(\"\/api\/users\", &api.UserAPI{})\n\tbeego.Router(\"\/api\/users\/?:id\", &api.UserAPI{})\n\tbeego.Router(\"\/api\/users\/:id\/password\", &api.UserAPI{}, \"put:ChangePassword\")\n\tbeego.Router(\"\/api\/users\/:id\/toggle_user_admin\", &api.UserAPI{}, \"post:ToggleUserAdminRole\")\n\tbeego.Router(\"\/api\/repositories\", &api.RepositoryAPI{})\n\tbeego.Router(\"\/api\/repositories\/tags\", &api.RepositoryAPI{}, \"get:GetTags\")\n\tbeego.Router(\"\/api\/repositories\/manifests\", &api.RepositoryAPI{}, \"get:GetManifests\")\n\tbeego.Router(\"api\/logs\", &api.LogAPI{})\n\n\t\/\/external service that hosted on harbor process:\n\tbeego.Router(\"\/service\/notifications\", &service.NotificationHandler{})\n\tbeego.Router(\"\/service\/token\", &token.Handler{})\n}\n<commit_msg>modify router<commit_after>\/*\n Copyright (c) 2016 VMware, Inc. All Rights Reserved.\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"github.com\/vmware\/harbor\/api\"\n\t\"github.com\/vmware\/harbor\/controllers\"\n\t\"github.com\/vmware\/harbor\/service\"\n\t\"github.com\/vmware\/harbor\/service\/token\"\n\n\t\"github.com\/astaxie\/beego\"\n)\n\nfunc initRouters() {\n\n\tbeego.SetStaticPath(\"registry\/static\/i18n\", \"static\/i18n\")\n\tbeego.SetStaticPath(\"registry\/static\/resources\", \"static\/resources\")\n\tbeego.SetStaticPath(\"registry\/static\/vendors\", \"static\/vendors\")\n\n\tbeego.Router(\"\/login\", &controllers.CommonController{}, \"post:Login\")\n\tbeego.Router(\"\/logout\", &controllers.CommonController{}, \"get:Logout\")\n\tbeego.Router(\"\/language\", &controllers.CommonController{}, \"get:SwitchLanguage\")\n\tbeego.Router(\"\/userExists\", &controllers.CommonController{}, \"post:UserExists\")\n\tbeego.Router(\"\/reset\", &controllers.CommonController{}, \"post:ResetPassword\")\n\tbeego.Router(\"\/sendEmail\", &controllers.CommonController{}, \"get:SendEmail\")\n\n\tbeego.Router(\"\/\", &controllers.IndexController{})\n\tbeego.Router(\"\/signIn\", &controllers.SignInController{})\n\tbeego.Router(\"\/register\", &controllers.RegisterController{})\n\tbeego.Router(\"\/addUser\", &controllers.AddUserController{})\n\tbeego.Router(\"\/forgotPassword\", &controllers.ForgotPasswordController{})\n\tbeego.Router(\"\/resetPassword\", &controllers.ResetPasswordController{})\n\tbeego.Router(\"\/changePassword\", &controllers.ChangePasswordController{})\n\n\tbeego.Router(\"\/registry\/project\", &controllers.ProjectController{})\n\tbeego.Router(\"\/registry\/detail\", &controllers.ItemDetailController{})\n\n\tbeego.Router(\"\/search\", &controllers.SearchController{})\n\n\t\/\/API:\n\tbeego.Router(\"\/api\/search\", &api.SearchAPI{})\n\tbeego.Router(\"\/api\/projects\/:pid\/members\/?:mid\", &api.ProjectMemberAPI{})\n\tbeego.Router(\"\/api\/projects\/\", &api.ProjectAPI{}, \"get:List\")\n\tbeego.Router(\"\/api\/projects\/?:id\", &api.ProjectAPI{})\n\tbeego.Router(\"\/api\/projects\/:id\/toggle_project_public\", &api.ProjectAPI{}, \"post:ToggleProjectPublic\")\n\tbeego.Router(\"\/api\/statistics\", &api.StatisticAPI{})\n\tbeego.Router(\"\/api\/projects\/:id\/logs\/filter\", &api.ProjectAPI{}, \"post:FilterAccessLog\")\n\tbeego.Router(\"\/api\/users\/?:id\", &api.UserAPI{})\n\tbeego.Router(\"\/api\/users\/:id\/password\", &api.UserAPI{}, \"put:ChangePassword\")\n\tbeego.Router(\"\/api\/users\/:id\/toggle_user_admin\", &api.UserAPI{}, \"post:ToggleUserAdminRole\")\n\tbeego.Router(\"\/api\/repositories\", &api.RepositoryAPI{})\n\tbeego.Router(\"\/api\/repositories\/tags\", &api.RepositoryAPI{}, \"get:GetTags\")\n\tbeego.Router(\"\/api\/repositories\/manifests\", &api.RepositoryAPI{}, \"get:GetManifests\")\n\tbeego.Router(\"api\/logs\", &api.LogAPI{})\n\n\t\/\/external service that hosted on harbor process:\n\tbeego.Router(\"\/service\/notifications\", &service.NotificationHandler{})\n\tbeego.Router(\"\/service\/token\", &token.Handler{})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/rand\"\n\t\"crypto\/sha1\"\n\t\"encoding\/base64\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"code.google.com\/p\/go.crypto\/pbkdf2\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\n\tpb \"..\/proto\"\n)\n\nconst (\n\tbnotifyPackageName = \"cc.bran.bnotify\"\n\tgcmSendAddress = \"https:\/\/android.googleapis.com\/gcm\/send\"\n\taesKeySize = 16\n\tpbkdfIterCount = 400000\n\tserverIDSize = 16\n)\n\nvar (\n\tport = flag.Int(\"port\", 50051, \"port to listen to RPCs on\")\n\tsettingsFilename = flag.String(\"settings\", \"bnotify.conf\", \"filename of settings file\")\n\tstateFilename = flag.String(\"state\", \"bnotify.state\", \"filename of state file\")\n\n\tuint64Size = binary.Size(uint64(0))\n\n\tstateMu sync.Mutex\n)\n\ntype notificationService struct {\n\thttpClient *http.Client\n\tapiKey string\n\tregistrationID string\n\tgcmCipher cipher.AEAD\n}\n\nfunc (ns *notificationService) SendNotification(ctx context.Context, req *pb.SendNotificationRequest) (*pb.SendNotificationResponse, error) {\n\tlog.Printf(\"Got notification request\")\n\n\t\/\/ Verify request.\n\tif req.Notification.Title == \"\" {\n\t\treturn nil, errors.New(\"notification missing title\")\n\t}\n\tif req.Notification.Text == \"\" {\n\t\treturn nil, errors.New(\"notification missing text\")\n\t}\n\n\t\/\/ Read state.\n\tserverID, seq, err := getServerIDAndSeq(true)\n\tif err != nil {\n\t\tlog.Printf(\"Error while posting notification: %v\", err)\n\t\treturn nil, errors.New(\"internal error\")\n\t}\n\n\t\/\/ Marshal request notification & encrypt.\n\tplaintextMessage, err := proto.Marshal(&pb.Message{\n\t\tServerId: serverID,\n\t\tSeq: seq,\n\t\tNotification: req.Notification,\n\t})\n\tif err != nil {\n\t\tlog.Printf(\"Error while posting notification: %v\", err)\n\t\treturn nil, errors.New(\"internal error\")\n\t}\n\tnonce := make([]byte, ns.gcmCipher.NonceSize())\n\tbinary.BigEndian.PutUint64(nonce, seq) \/\/ Ensure we do not reuse nonce.\n\tif _, err := rand.Read(nonce[uint64Size:]); err != nil {\n\t\tlog.Printf(\"Error while posting notification: %v\", err)\n\t\treturn nil, errors.New(\"internal error\")\n\t}\n\tmessage := ns.gcmCipher.Seal(nil, nonce, plaintextMessage, nil)\n\n\t\/\/ Fill out final envelope proto & base-64 encode into a payload.\n\tenvelopeData, err := proto.Marshal(&pb.Envelope{\n\t\tMessage: message,\n\t\tNonce: nonce,\n\t})\n\tif err != nil {\n\t\tlog.Printf(\"Error while posting notification: %v\", err)\n\t\treturn nil, errors.New(\"internal error\")\n\t}\n\tpayload := base64.StdEncoding.EncodeToString(envelopeData)\n\n\t\/\/ Post the notification.\n\tif err := ns.postNotification(payload); err != nil {\n\t\tlog.Printf(\"Error while posting notification: %v\", err)\n\t\treturn nil, errors.New(\"internal error\")\n\t}\n\treturn &pb.SendNotificationResponse{}, nil\n}\n\nfunc (ns *notificationService) postNotification(payload string) error {\n\t\/\/ Set up request.\n\tvalues := url.Values{}\n\tvalues.Set(\"restricted_package_name\", bnotifyPackageName)\n\tvalues.Set(\"registration_id\", ns.registrationID)\n\tvalues.Set(\"data.payload\", payload)\n\n\treq, err := http.NewRequest(\"POST\", gcmSendAddress, strings.NewReader(values.Encode()))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded;charset=UTF-8\")\n\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"key=%s\", ns.apiKey))\n\n\t\/\/ Make request to GCM server.\n\tresp, err := ns.httpClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\t\/\/ Check for HTTP error code.\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"GCM HTTP error: %v\", resp.Status)\n\t}\n\n\t\/\/ Read the first line of the response and figure out if it indicates a GCM-level error.\n\tbodyReader := bufio.NewReader(resp.Body)\n\tlineBytes, _, err := bodyReader.ReadLine()\n\tif err != nil {\n\t\treturn err\n\t}\n\tline := string(lineBytes)\n\tif strings.HasPrefix(line, \"Error=\") {\n\t\treturn fmt.Errorf(\"GCM error: %v\", strings.TrimPrefix(line, \"Error=\"))\n\t}\n\treturn nil\n}\n\nfunc getServerIDAndSeq(increment bool) (serverID string, seq uint64, _ error) {\n\t\/\/ TODO(bran): consider using a library (e.g. SQLite) to handle intricacies of handling files on disk\n\t\/\/ (this is probably subtly buggy under certain failure modes, & nonce reuse is disastrous...)\n\tstateMu.Lock()\n\tdefer stateMu.Unlock()\n\n\t\/\/ Get current state.\n\tvar state *pb.BNotifyServerState\n\tinFile, err := os.Open(*stateFilename)\n\tif err == nil {\n\t\tif err := func() error {\n\t\t\tdefer inFile.Close()\n\t\t\tdata, err := ioutil.ReadAll(inFile)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"could not read state file: %v\", err)\n\t\t\t}\n\t\t\tstate = &pb.BNotifyServerState{}\n\t\t\tif err := proto.Unmarshal(data, state); err != nil {\n\t\t\t\treturn fmt.Errorf(\"could not deserialize state: %v\", err)\n\t\t\t}\n\t\t\treturn nil\n\t\t}(); err != nil {\n\t\t\treturn \"\", 0, err\n\t\t}\n\t} else {\n\t\tlog.Printf(\"Could not open state file (%v); continuing with fresh state\", err)\n\t\tserverIDBytes := make([]byte, serverIDSize)\n\t\tif _, err := rand.Read(serverIDBytes); err != nil {\n\t\t\treturn \"\", 0, fmt.Errorf(\"could not generate server ID: %v\", err)\n\t\t}\n\t\tstate = &pb.BNotifyServerState{\n\t\t\tServerId: base64.RawStdEncoding.EncodeToString(serverIDBytes),\n\t\t\tNextSeq: 1,\n\t\t}\n\t}\n\n\t\/\/ Get result & update state if requested.\n\tserverID, nextSeq := state.ServerId, state.NextSeq\n\tif nextSeq == 0 {\n\t\treturn \"\", 0, errors.New(\"out of message sequence numbers\")\n\t}\n\tif increment {\n\t\tstate.NextSeq++\n\t}\n\n\t\/\/ Write state back.\n\t\/\/ (do this even if we aren't changing the state to make sure that we\n\t\/\/ can write state--allows us to fail early if state flag is improperly\n\t\/\/ set)\n\tstateBytes, err := proto.Marshal(state)\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"could not serialize state: %v\", err)\n\t}\n\toutFile, err := ioutil.TempFile(\"\", \"bnotifyd_\")\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"could not open temporary file: %v\", err)\n\t}\n\ttempFilename := outFile.Name()\n\tif err := func() error {\n\t\tdefer outFile.Close()\n\t\tif err := outFile.Chmod(0640); err != nil {\n\t\t\treturn fmt.Errorf(\"could not chmod tempfile: %v\", err)\n\t\t}\n\t\tif err := outFile.Truncate(0); err != nil {\n\t\t\treturn fmt.Errorf(\"could not truncate tempfile: %v\", err)\n\t\t}\n\t\tif _, err := io.Copy(outFile, bytes.NewReader(stateBytes)); err != nil {\n\t\t\treturn fmt.Errorf(\"could not write to tempfile: %v\", err)\n\t\t}\n\t\tif err := outFile.Sync(); err != nil {\n\t\t\treturn fmt.Errorf(\"could not sync tempfile: %v\", err)\n\t\t}\n\t\tif err := outFile.Close(); err != nil {\n\t\t\treturn fmt.Errorf(\"could not close tempfile: %v\", err)\n\t\t}\n\t\treturn nil\n\t}(); err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tif err := os.Rename(tempFilename, *stateFilename); err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"could not rename tempfile: %v\", err)\n\t}\n\n\treturn serverID, nextSeq, nil\n}\n\nfunc main() {\n\t\/\/ Parse flags.\n\tflag.Parse()\n\n\t\/\/ Read settings.\n\tsettingsBytes, err := ioutil.ReadFile(*settingsFilename)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error reading settings file: %v\", err)\n\t}\n\tsettings := &pb.BNotifySettings{}\n\terr = proto.UnmarshalText(string(settingsBytes), settings)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error reading settings file: %v\", err)\n\t}\n\n\t\/\/ Read state file to make sure we can (fail fast for bad config).\n\tif _, _, err := getServerIDAndSeq(false); err != nil {\n\t\tlog.Fatalf(\"Error checking state file: %v\", err)\n\t}\n\n\t\/\/ Derive key from password & salt (registration ID).\n\tkey := pbkdf2.Key([]byte(settings.Password), []byte(settings.RegistrationId), pbkdfIterCount, aesKeySize, sha1.New)\n\n\t\/\/ Initialize cipher based on key.\n\tblockCipher, err := aes.NewCipher(key)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error initializing block cipher: %v\", err)\n\t}\n\tgcmCipher, err := cipher.NewGCM(blockCipher)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error initializing GCM cipher: %v\", err)\n\t}\n\n\t\/\/ Sanity check nonce size.\n\tif gcmCipher.NonceSize() < uint64Size {\n\t\t\/\/ This should be impossible, but may as well panic with a useful message.\n\t\tlog.Fatalf(fmt.Sprintf(\"cipher nonce size too small (%d < %d)\", gcmCipher.NonceSize(), uint64Size))\n\t}\n\n\t\/\/ Create service, socket, and gRPC server objects.\n\tservice := ¬ificationService{\n\t\thttpClient: new(http.Client),\n\t\tapiKey: settings.ApiKey,\n\t\tregistrationID: settings.RegistrationId,\n\t\tgcmCipher: gcmCipher,\n\t}\n\tlistener, err := net.Listen(\"tcp\", fmt.Sprintf(\"127.0.0.1:%d\", *port))\n\tif err != nil {\n\t\tlog.Fatalf(\"Error listening on port %d: %v\", *port, err)\n\t}\n\tdefer listener.Close()\n\tserver := grpc.NewServer()\n\tpb.RegisterNotificationServiceServer(server, service)\n\n\t\/\/ Begin serving.\n\tlog.Printf(\"Listening for requests on port %d...\", *port)\n\tserver.Serve(listener)\n}\n<commit_msg>Create temp file in same directory as state file.<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/rand\"\n\t\"crypto\/sha1\"\n\t\"encoding\/base64\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"code.google.com\/p\/go.crypto\/pbkdf2\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\n\tpb \"..\/proto\"\n)\n\nconst (\n\tbnotifyPackageName = \"cc.bran.bnotify\"\n\tgcmSendAddress = \"https:\/\/android.googleapis.com\/gcm\/send\"\n\taesKeySize = 16\n\tpbkdfIterCount = 400000\n\tserverIDSize = 16\n)\n\nvar (\n\tport = flag.Int(\"port\", 50051, \"port to listen to RPCs on\")\n\tsettingsFilename = flag.String(\"settings\", \"bnotify.conf\", \"filename of settings file\")\n\tstateFilename = flag.String(\"state\", \"bnotify.state\", \"filename of state file\")\n\n\tuint64Size = binary.Size(uint64(0))\n\n\tstateMu sync.Mutex\n)\n\ntype notificationService struct {\n\thttpClient *http.Client\n\tapiKey string\n\tregistrationID string\n\tgcmCipher cipher.AEAD\n}\n\nfunc (ns *notificationService) SendNotification(ctx context.Context, req *pb.SendNotificationRequest) (*pb.SendNotificationResponse, error) {\n\tlog.Printf(\"Got notification request\")\n\n\t\/\/ Verify request.\n\tif req.Notification.Title == \"\" {\n\t\treturn nil, errors.New(\"notification missing title\")\n\t}\n\tif req.Notification.Text == \"\" {\n\t\treturn nil, errors.New(\"notification missing text\")\n\t}\n\n\t\/\/ Read state.\n\tserverID, seq, err := getServerIDAndSeq(true)\n\tif err != nil {\n\t\tlog.Printf(\"Error while posting notification: %v\", err)\n\t\treturn nil, errors.New(\"internal error\")\n\t}\n\n\t\/\/ Marshal request notification & encrypt.\n\tplaintextMessage, err := proto.Marshal(&pb.Message{\n\t\tServerId: serverID,\n\t\tSeq: seq,\n\t\tNotification: req.Notification,\n\t})\n\tif err != nil {\n\t\tlog.Printf(\"Error while posting notification: %v\", err)\n\t\treturn nil, errors.New(\"internal error\")\n\t}\n\tnonce := make([]byte, ns.gcmCipher.NonceSize())\n\tbinary.BigEndian.PutUint64(nonce, seq) \/\/ Ensure we do not reuse nonce.\n\tif _, err := rand.Read(nonce[uint64Size:]); err != nil {\n\t\tlog.Printf(\"Error while posting notification: %v\", err)\n\t\treturn nil, errors.New(\"internal error\")\n\t}\n\tmessage := ns.gcmCipher.Seal(nil, nonce, plaintextMessage, nil)\n\n\t\/\/ Fill out final envelope proto & base-64 encode into a payload.\n\tenvelopeData, err := proto.Marshal(&pb.Envelope{\n\t\tMessage: message,\n\t\tNonce: nonce,\n\t})\n\tif err != nil {\n\t\tlog.Printf(\"Error while posting notification: %v\", err)\n\t\treturn nil, errors.New(\"internal error\")\n\t}\n\tpayload := base64.StdEncoding.EncodeToString(envelopeData)\n\n\t\/\/ Post the notification.\n\tif err := ns.postNotification(payload); err != nil {\n\t\tlog.Printf(\"Error while posting notification: %v\", err)\n\t\treturn nil, errors.New(\"internal error\")\n\t}\n\treturn &pb.SendNotificationResponse{}, nil\n}\n\nfunc (ns *notificationService) postNotification(payload string) error {\n\t\/\/ Set up request.\n\tvalues := url.Values{}\n\tvalues.Set(\"restricted_package_name\", bnotifyPackageName)\n\tvalues.Set(\"registration_id\", ns.registrationID)\n\tvalues.Set(\"data.payload\", payload)\n\n\treq, err := http.NewRequest(\"POST\", gcmSendAddress, strings.NewReader(values.Encode()))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded;charset=UTF-8\")\n\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"key=%s\", ns.apiKey))\n\n\t\/\/ Make request to GCM server.\n\tresp, err := ns.httpClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\t\/\/ Check for HTTP error code.\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"GCM HTTP error: %v\", resp.Status)\n\t}\n\n\t\/\/ Read the first line of the response and figure out if it indicates a GCM-level error.\n\tbodyReader := bufio.NewReader(resp.Body)\n\tlineBytes, _, err := bodyReader.ReadLine()\n\tif err != nil {\n\t\treturn err\n\t}\n\tline := string(lineBytes)\n\tif strings.HasPrefix(line, \"Error=\") {\n\t\treturn fmt.Errorf(\"GCM error: %v\", strings.TrimPrefix(line, \"Error=\"))\n\t}\n\treturn nil\n}\n\nfunc getServerIDAndSeq(increment bool) (serverID string, seq uint64, _ error) {\n\t\/\/ TODO(bran): consider using a library (e.g. SQLite) to handle files on disk\n\t\/\/ (sadly, I can't find a non-cgo SQLite package. oh well.)\n\tstateMu.Lock()\n\tdefer stateMu.Unlock()\n\n\t\/\/ Get current state.\n\tvar state *pb.BNotifyServerState\n\tinFile, err := os.Open(*stateFilename)\n\tif err == nil {\n\t\tif err := func() error {\n\t\t\tdefer inFile.Close()\n\t\t\tdata, err := ioutil.ReadAll(inFile)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"could not read state file: %v\", err)\n\t\t\t}\n\t\t\tstate = &pb.BNotifyServerState{}\n\t\t\tif err := proto.Unmarshal(data, state); err != nil {\n\t\t\t\treturn fmt.Errorf(\"could not deserialize state: %v\", err)\n\t\t\t}\n\t\t\treturn nil\n\t\t}(); err != nil {\n\t\t\treturn \"\", 0, err\n\t\t}\n\t} else {\n\t\tlog.Printf(\"Could not open state file (%v); continuing with fresh state\", err)\n\t\tserverIDBytes := make([]byte, serverIDSize)\n\t\tif _, err := rand.Read(serverIDBytes); err != nil {\n\t\t\treturn \"\", 0, fmt.Errorf(\"could not generate server ID: %v\", err)\n\t\t}\n\t\tstate = &pb.BNotifyServerState{\n\t\t\tServerId: base64.RawStdEncoding.EncodeToString(serverIDBytes),\n\t\t\tNextSeq: 1,\n\t\t}\n\t}\n\n\t\/\/ Get result & update state if requested.\n\tserverID, nextSeq := state.ServerId, state.NextSeq\n\tif nextSeq == 0 {\n\t\treturn \"\", 0, errors.New(\"out of message sequence numbers\")\n\t}\n\tif increment {\n\t\tstate.NextSeq++\n\t}\n\n\t\/\/ Write state back.\n\t\/\/ (do this even if we aren't changing the state to make sure that we\n\t\/\/ can write state--allows us to fail early if state flag is improperly\n\t\/\/ set)\n\tstateBytes, err := proto.Marshal(state)\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"could not serialize state: %v\", err)\n\t}\n\toutFile, err := ioutil.TempFile(path.Dir(*stateFilename), \"bnotifyd_\")\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"could not open temporary file: %v\", err)\n\t}\n\ttempFilename := outFile.Name()\n\tif err := func() error {\n\t\tdefer outFile.Close()\n\t\tif err := outFile.Chmod(0640); err != nil {\n\t\t\treturn fmt.Errorf(\"could not chmod tempfile: %v\", err)\n\t\t}\n\t\tif err := outFile.Truncate(0); err != nil {\n\t\t\treturn fmt.Errorf(\"could not truncate tempfile: %v\", err)\n\t\t}\n\t\tif _, err := io.Copy(outFile, bytes.NewReader(stateBytes)); err != nil {\n\t\t\treturn fmt.Errorf(\"could not write to tempfile: %v\", err)\n\t\t}\n\t\tif err := outFile.Sync(); err != nil {\n\t\t\treturn fmt.Errorf(\"could not sync tempfile: %v\", err)\n\t\t}\n\t\tif err := outFile.Close(); err != nil {\n\t\t\treturn fmt.Errorf(\"could not close tempfile: %v\", err)\n\t\t}\n\t\treturn nil\n\t}(); err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tif err := os.Rename(tempFilename, *stateFilename); err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"could not rename tempfile: %v\", err)\n\t}\n\n\treturn serverID, nextSeq, nil\n}\n\nfunc main() {\n\t\/\/ Parse flags.\n\tflag.Parse()\n\n\t\/\/ Read settings.\n\tsettingsBytes, err := ioutil.ReadFile(*settingsFilename)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error reading settings file: %v\", err)\n\t}\n\tsettings := &pb.BNotifySettings{}\n\terr = proto.UnmarshalText(string(settingsBytes), settings)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error reading settings file: %v\", err)\n\t}\n\n\t\/\/ Read state file to make sure we can (fail fast for bad config).\n\tif _, _, err := getServerIDAndSeq(false); err != nil {\n\t\tlog.Fatalf(\"Error checking state file: %v\", err)\n\t}\n\n\t\/\/ Derive key from password & salt (registration ID).\n\tkey := pbkdf2.Key([]byte(settings.Password), []byte(settings.RegistrationId), pbkdfIterCount, aesKeySize, sha1.New)\n\n\t\/\/ Initialize cipher based on key.\n\tblockCipher, err := aes.NewCipher(key)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error initializing block cipher: %v\", err)\n\t}\n\tgcmCipher, err := cipher.NewGCM(blockCipher)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error initializing GCM cipher: %v\", err)\n\t}\n\n\t\/\/ Sanity check nonce size.\n\tif gcmCipher.NonceSize() < uint64Size {\n\t\t\/\/ This should be impossible, but may as well panic with a useful message.\n\t\tlog.Fatalf(fmt.Sprintf(\"cipher nonce size too small (%d < %d)\", gcmCipher.NonceSize(), uint64Size))\n\t}\n\n\t\/\/ Create service, socket, and gRPC server objects.\n\tservice := ¬ificationService{\n\t\thttpClient: new(http.Client),\n\t\tapiKey: settings.ApiKey,\n\t\tregistrationID: settings.RegistrationId,\n\t\tgcmCipher: gcmCipher,\n\t}\n\tlistener, err := net.Listen(\"tcp\", fmt.Sprintf(\"127.0.0.1:%d\", *port))\n\tif err != nil {\n\t\tlog.Fatalf(\"Error listening on port %d: %v\", *port, err)\n\t}\n\tdefer listener.Close()\n\tserver := grpc.NewServer()\n\tpb.RegisterNotificationServiceServer(server, service)\n\n\t\/\/ Begin serving.\n\tlog.Printf(\"Listening for requests on port %d...\", *port)\n\tserver.Serve(listener)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"database\/sql\"\n\t_ \"github.com\/lib\/pq\"\n\t\"log\"\n\t\"net\/http\"\n\t\"regexp\"\n \"strings\"\n)\n\nvar Db *sql.DB\n\nfunc main() {\n\tvar err error\n\tDb, err = sql.Open(\"postgres\", \"connect_timeout=5 user=hackmann password='hackmann' dbname=hackmann sslmode=disable\")\n\tdefer Db.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\thttp.HandleFunc(\"\/\", rootHandler)\n\thttp.HandleFunc(\"\/register\", registerHandler)\n\tif err := http.ListenAndServe(\"0.0.0.0:80\", nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc rootHandler(w http.ResponseWriter, r *http.Request) {\n const rootDirectory = \".\/dist\"\n switch (r.URL.Path) {\n case \"\/\":\n w.Header().Set(\"Content-Type\", \"application\/xhtml+xml\")\n http.ServeFile(w, r, rootDirectory + \"\/index.xhtml\")\n default:\n if strings.HasSuffix(r.URL.Path, \".svg\") {\n w.Header().Set(\"Content-Type\", \"image\/svg+xml\")\n }\n http.ServeFile(w, r, rootDirectory + r.URL.Path)\n }\n}\n\nfunc registerHandler(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\tvar firstname, lastname, school, email = r.PostForm.Get(\"firstname\"), r.PostForm.Get(\"lastname\"), r.PostForm.Get(\"school\"), r.PostForm.Get(\"email\")\n\tif !(len(firstname) > 0 || len(lastname) > 0 || len(school) > 0 || isValidEmail(email)) {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar id int\n\terr := Db.QueryRow(`SELECT id FROM registrations WHERE email = $1`, email).Scan(&id)\n\tswitch {\n\tcase err == sql.ErrNoRows:\n\t\terr = Db.QueryRow(`INSERT INTO registrations (firstname, lastname, school, email) VALUES ($1, $2, $3, $4) RETURNING id`, firstname, lastname, school, email).Scan(&id)\n\tcase err != nil:\n\t\tbreak\n\tdefault:\n\t\terr = Db.QueryRow(`UPDATE registrations SET firstname = $1, lastname = $2, school = $3 WHERE id = $4 RETURNING id`, firstname, lastname, school, id).Scan(&id)\n\t}\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n\n\/\/ regexp source: https:\/\/github.com\/asaskevich\/govalidator\nfunc isValidEmail(email string) bool {\n\treturn regexp.MustCompile(\"(?i)^(((([a-zA-Z]|\\\\d|[!#\\\\$%&'\\\\*\\\\+\\\\-\\\\\/=\\\\?\\\\^_`{\\\\|}~]|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])+(\\\\.([a-zA-Z]|\\\\d|[!#\\\\$%&'\\\\*\\\\+\\\\-\\\\\/=\\\\?\\\\^_`{\\\\|}~]|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])+)*)|((\\\\x22)((((\\\\x20|\\\\x09)*(\\\\x0d\\\\x0a))?(\\\\x20|\\\\x09)+)?(([\\\\x01-\\\\x08\\\\x0b\\\\x0c\\\\x0e-\\\\x1f\\\\x7f]|\\\\x21|[\\\\x23-\\\\x5b]|[\\\\x5d-\\\\x7e]|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])|(\\\\([\\\\x01-\\\\x09\\\\x0b\\\\x0c\\\\x0d-\\\\x7f]|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}]))))*(((\\\\x20|\\\\x09)*(\\\\x0d\\\\x0a))?(\\\\x20|\\\\x09)+)?(\\\\x22)))@((([a-zA-Z]|\\\\d|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])|(([a-zA-Z]|\\\\d|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])([a-zA-Z]|\\\\d|-|\\\\.|_|~|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])*([a-zA-Z]|\\\\d|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])))\\\\.)+(([a-zA-Z]|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])|(([a-zA-Z]|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])([a-zA-Z]|\\\\d|-|\\\\.|_|~|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])*([a-zA-Z]|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])))\\\\.?$\").MatchString(email)\n}\n<commit_msg>use gzip<commit_after>package main\n\nimport (\n \"bytes\"\n \"compress\/gzip\"\n\t\"database\/sql\"\n\t_ \"github.com\/lib\/pq\"\n \"io\"\n\t\"log\"\n\t\"net\/http\"\n \"os\"\n \"path\/filepath\"\n\t\"regexp\"\n)\n\ntype fileCache struct {\n\tbuf bytes.Buffer\n gzipped bytes.Buffer\n}\n\nfunc (f *fileCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n acceptsGzip := false\n for _, v := range r.Header[\"Accept-Encoding\"] {\n if v == \"gzip\" {\n acceptsGzip = true\n break\n }\n }\n \n if acceptsGzip {\n w.Header().Set(\"Content-Encoding\", \"gzip\")\n w.Write(f.gzipped.Bytes())\n } else {\n w.Write(f.buf.Bytes())\n }\n}\n\nfunc NewCache(fileName string) *fileCache {\n\tf, err := os.Open(fileName)\n\tif err != nil {\n\t\tlog.Fatalf(\"couldn't open file: %v\", err)\n\t}\n\n\tret := &fileCache{}\n\t_, err = io.Copy(io.MultiWriter(&ret.buf, gzip.NewWriter(&ret.gzipped)), f)\n if err != nil {\n\t\tlog.Fatalf(\"couldn't read file: %v\", err)\n\t}\n\n\treturn ret\n}\n\nconst staticFileDirectory = \"dist\"\n\nvar db *sql.DB\nvar staticFiles = map[string]*fileCache {}\n\nfunc main() {\n\tvar err error\n\tdb, err = sql.Open(\"postgres\", \"connect_timeout=5 user=hackmann password='hackmann' dbname=hackmann sslmode=disable\")\n\tdefer db.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n \n staticFilePaths := []string {}\n err = filepath.Walk(staticFileDirectory, func(path string, info os.FileInfo, err error) error {\n if !info.IsDir() {\n \/\/ deal with Windows path separator\n staticFilePaths = append(staticFilePaths, path)\n }\n return err\n })\n if err != nil {\n log.Fatal(err)\n }\n \n for _, path := range staticFilePaths {\n staticFiles[path] = NewCache(path)\n }\n\n\thttp.HandleFunc(\"\/\", rootHandler)\n\thttp.HandleFunc(\"\/register\", registerHandler)\n\tif err := http.ListenAndServe(\"0.0.0.0:80\", nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc rootHandler(w http.ResponseWriter, r *http.Request) {\n var path string\n switch (r.URL.Path) {\n case \"\/\":\n path = filepath.FromSlash(staticFileDirectory + \"\/index.xhtml\")\n default:\n path = filepath.FromSlash(staticFileDirectory + r.URL.Path)\n }\n \n file, ok := staticFiles[path]\n if !ok {\n w.WriteHeader(http.StatusNotFound)\n return\n }\n \n var contentType string\n switch (filepath.Ext(path)) {\n case \".xhtml\":\n contentType = \"application\/xhtml+xml\"\n case \".css\":\n contentType = \"text\/css\"\n case \".js\":\n contentType = \"application\/javascript\"\n case \".svg\":\n contentType = \"image\/svg+xml\"\n default:\n contentType = \"text\/plain\"\n }\n w.Header().Set(\"Content-Type\", contentType)\n \n file.ServeHTTP(w, r)\n}\n\nfunc registerHandler(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\tvar firstname, lastname, school, email = r.PostForm.Get(\"firstname\"), r.PostForm.Get(\"lastname\"), r.PostForm.Get(\"school\"), r.PostForm.Get(\"email\")\n\tif !(len(firstname) > 0 || len(lastname) > 0 || len(school) > 0 || isValidEmail(email)) {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar id int\n\terr := db.QueryRow(`SELECT id FROM registrations WHERE email = $1`, email).Scan(&id)\n\tswitch {\n\tcase err == sql.ErrNoRows:\n\t\terr = db.QueryRow(`INSERT INTO registrations (firstname, lastname, school, email) VALUES ($1, $2, $3, $4) RETURNING id`, firstname, lastname, school, email).Scan(&id)\n\tcase err != nil:\n\t\tbreak\n\tdefault:\n\t\terr = db.QueryRow(`UPDATE registrations SET firstname = $1, lastname = $2, school = $3 WHERE id = $4 RETURNING id`, firstname, lastname, school, id).Scan(&id)\n\t}\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n\n\/\/ regexp source: https:\/\/github.com\/asaskevich\/govalidator\nfunc isValidEmail(email string) bool {\n\treturn regexp.MustCompile(\"(?i)^(((([a-zA-Z]|\\\\d|[!#\\\\$%&'\\\\*\\\\+\\\\-\\\\\/=\\\\?\\\\^_`{\\\\|}~]|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])+(\\\\.([a-zA-Z]|\\\\d|[!#\\\\$%&'\\\\*\\\\+\\\\-\\\\\/=\\\\?\\\\^_`{\\\\|}~]|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])+)*)|((\\\\x22)((((\\\\x20|\\\\x09)*(\\\\x0d\\\\x0a))?(\\\\x20|\\\\x09)+)?(([\\\\x01-\\\\x08\\\\x0b\\\\x0c\\\\x0e-\\\\x1f\\\\x7f]|\\\\x21|[\\\\x23-\\\\x5b]|[\\\\x5d-\\\\x7e]|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])|(\\\\([\\\\x01-\\\\x09\\\\x0b\\\\x0c\\\\x0d-\\\\x7f]|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}]))))*(((\\\\x20|\\\\x09)*(\\\\x0d\\\\x0a))?(\\\\x20|\\\\x09)+)?(\\\\x22)))@((([a-zA-Z]|\\\\d|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])|(([a-zA-Z]|\\\\d|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])([a-zA-Z]|\\\\d|-|\\\\.|_|~|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])*([a-zA-Z]|\\\\d|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])))\\\\.)+(([a-zA-Z]|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])|(([a-zA-Z]|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])([a-zA-Z]|\\\\d|-|\\\\.|_|~|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])*([a-zA-Z]|[\\\\x{00A0}-\\\\x{D7FF}\\\\x{F900}-\\\\x{FDCF}\\\\x{FDF0}-\\\\x{FFEF}])))\\\\.?$\").MatchString(email)\n}\n<|endoftext|>"} {"text":"<commit_before>package levant\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\tnomad \"github.com\/hashicorp\/nomad\/api\"\n\tnomadStructs \"github.com\/hashicorp\/nomad\/nomad\/structs\"\n\t\"github.com\/jrasell\/levant\/logging\"\n)\n\ntype nomadClient struct {\n\tnomad *nomad.Client\n}\n\n\/\/ NomadClient is an interface to the Nomad API and deployment functions.\ntype NomadClient interface {\n\t\/\/ Deploy triggers a register of the job resulting in a Nomad deployment which\n\t\/\/ is monitored to determine the eventual state.\n\tDeploy(*nomad.Job, int, bool) bool\n}\n\n\/\/ NewNomadClient is used to create a new client to interact with Nomad.\nfunc NewNomadClient(addr string) (NomadClient, error) {\n\tconfig := nomad.DefaultConfig()\n\n\tif addr != \"\" {\n\t\tconfig.Address = addr\n\t}\n\n\tc, err := nomad.NewClient(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &nomadClient{nomad: c}, nil\n}\n\n\/\/ Deploy triggers a register of the job resulting in a Nomad deployment which\n\/\/ is monitored to determine the eventual state.\nfunc (c *nomadClient) Deploy(job *nomad.Job, autoPromote int, forceCount bool) (success bool) {\n\n\t\/\/ Validate the job to check it is syntactically correct.\n\tif _, _, err := c.nomad.Jobs().Validate(job, nil); err != nil {\n\t\tlogging.Error(\"levant\/deploy: job validation failed: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ If job.Type isn't set we can't continue\n\tif job.Type == nil {\n\t\tlogging.Error(\"levant\/deploy: Nomad job `type` is not set, should be set to `service`\")\n\t\treturn\n\t}\n\n\tif !forceCount {\n\t\tlogging.Debug(\"levant\/deploy: running dynamic job count updater for job %s\", *job.Name)\n\t\tif err := c.dynamicGroupCountUpdater(job); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Check that the job has at least 1 TaskGroup with count > 0 (GH-16)\n\tTGCount := 0\n\tfor _, group := range job.TaskGroups {\n\t\tTGCount += *group.Count\n\t}\n\tif TGCount == 0 {\n\t\tlogging.Error(\"levant\/deploy: all TaskGroups have a count of 0, nothing to do\")\n\t\treturn\n\t}\n\n\tlogging.Info(\"levant\/deploy: triggering a deployment of job %s\", *job.Name)\n\n\teval, _, err := c.nomad.Jobs().Register(job, nil)\n\tif err != nil {\n\t\tlogging.Error(\"levant\/deploy: unable to register job %s with Nomad: %v\", *job.Name, err)\n\t\treturn\n\t}\n\n\t\/\/ Trigger the evaluationInspector to identify any potential errors in the\n\t\/\/ Nomad evaluation run. As far as I can tell from testing; a single alloc\n\t\/\/ failure in an evaluation means no allocs will be placed so we exit here.\n\terr = c.evaluationInspector(&eval.EvalID)\n\tif err != nil {\n\t\tlogging.Error(\"levant\/deploy: %v\", err)\n\t\treturn\n\t}\n\n\tswitch *job.Type {\n\tcase nomadStructs.JobTypeService:\n\t\tlogging.Debug(\"levant\/deploy: beginning deployment watcher for job %s\", *job.Name)\n\t\tsuccess = c.deploymentWatcher(eval.EvalID, autoPromote)\n\tdefault:\n\t\tlogging.Debug(\"levant\/deploy: job type %s does not support Nomad deployment model\", *job.Type)\n\t\tsuccess = true\n\t}\n\n\treturn\n}\n\nfunc (c *nomadClient) evaluationInspector(evalID *string) error {\n\n\tfor {\n\t\tevalInfo, _, err := c.nomad.Evaluations().Info(*evalID, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch evalInfo.Status {\n\t\tcase nomadStructs.EvalStatusComplete, nomadStructs.EvalStatusFailed, nomadStructs.EvalStatusCancelled:\n\t\t\tif len(evalInfo.FailedTGAllocs) == 0 {\n\t\t\t\tlogging.Info(\"levant\/deploy: evaluation %s finished successfully\", *evalID)\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tvar class, dimension []string\n\n\t\t\tfor group, metrics := range evalInfo.FailedTGAllocs {\n\n\t\t\t\t\/\/ Iterate the classes and dimensions to generate lists of each failure.\n\t\t\t\tfor c := range metrics.ClassExhausted {\n\t\t\t\t\tclass = append(class, c)\n\t\t\t\t}\n\t\t\t\tfor d := range metrics.DimensionExhausted {\n\t\t\t\t\tdimension = append(dimension, d)\n\t\t\t\t}\n\n\t\t\t\tlogging.Error(\"levant\/deploy: task group %s failed to place %v allocs, failed on %v and exhausted %v\",\n\t\t\t\t\tgroup, metrics.CoalescedFailures+1, class, dimension)\n\t\t\t}\n\n\t\t\treturn fmt.Errorf(\"evaluation %v finished with status %s but failed to place allocations\",\n\t\t\t\t*evalID, evalInfo.Status)\n\n\t\tdefault:\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc (c *nomadClient) deploymentWatcher(evalID string, autoPromote int) (success bool) {\n\n\tvar canaryChan chan interface{}\n\tdeploymentChan := make(chan interface{})\n\n\tt := time.Now()\n\twt := time.Duration(5 * time.Second)\n\n\t\/\/ Get the deploymentID from the evaluationID so that we can watch the\n\t\/\/ deployment for end status.\n\tdepID, err := c.getDeploymentID(evalID)\n\tif err != nil {\n\t\tlogging.Error(\"levant\/deploy: unable to get info of evaluation %s: %v\", evalID, err)\n\t}\n\n\t\/\/ Setup the canaryChan and launch the autoPromote go routine if autoPromote\n\t\/\/ has been enabled.\n\tif autoPromote > 0 {\n\t\tcanaryChan = make(chan interface{})\n\t\tgo c.canaryAutoPromote(depID, autoPromote, canaryChan, deploymentChan)\n\t}\n\n\tq := &nomad.QueryOptions{WaitIndex: 1, AllowStale: true, WaitTime: wt}\n\n\tfor {\n\n\t\tdep, meta, err := c.nomad.Deployments().Info(depID, q)\n\t\tlogging.Debug(\"levant\/deploy: deployment %v running for %.2fs\", depID, time.Since(t).Seconds())\n\n\t\t\/\/ Listen for the deploymentChan closing which indicates Levant should exit\n\t\t\/\/ the deployment watcher.\n\t\tselect {\n\t\tcase <-deploymentChan:\n\t\t\treturn false\n\t\tdefault:\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlogging.Error(\"levant\/deploy: unable to get info of deployment %s: %v\", depID, err)\n\t\t\treturn\n\t\t}\n\n\t\tif meta.LastIndex <= q.WaitIndex {\n\t\t\tcontinue\n\t\t}\n\n\t\tq.WaitIndex = meta.LastIndex\n\n\t\tcont, err := c.checkDeploymentStatus(dep.Status, depID, canaryChan)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\tif cont {\n\t\t\tcontinue\n\t\t} else {\n\t\t\treturn true\n\t\t}\n\t}\n}\n\nfunc (c *nomadClient) checkDeploymentStatus(status, depID string, shutdownChan chan interface{}) (bool, error) {\n\n\tswitch status {\n\tcase nomadStructs.DeploymentStatusSuccessful:\n\t\tlogging.Info(\"levant\/deploy: deployment %v has completed successfully\", depID)\n\t\treturn false, nil\n\tcase nomadStructs.DeploymentStatusRunning:\n\t\treturn true, nil\n\tdefault:\n\t\tif shutdownChan != nil {\n\t\t\tlogging.Debug(\"levant\/deploy: deployment %v meaning canary auto promote will shutdown\", status)\n\t\t\tclose(shutdownChan)\n\t\t}\n\n\t\tlogging.Error(\"levant\/deploy: deployment %v has status %s, Levant will now exit\", depID, status)\n\t\tc.checkFailedDeployment(&depID)\n\t\treturn false, fmt.Errorf(\"deployment failed\")\n\t}\n}\n\n\/\/ canaryAutoPromote handles Levant's canary-auto-promote functionality.\nfunc (c *nomadClient) canaryAutoPromote(depID string, waitTime int, shutdownChan, deploymentChan chan interface{}) {\n\n\t\/\/ Setup the AutoPromote timer.\n\tautoPromote := time.After(time.Duration(waitTime) * time.Second)\n\n\tfor {\n\t\tselect {\n\t\tcase <-autoPromote:\n\t\t\tlogging.Info(\"levant\/deploy: auto-promote period %vs has been reached for deployment %s\",\n\t\t\t\twaitTime, depID)\n\n\t\t\t\/\/ Check the deployment is healthy before promoting.\n\t\t\tif healthy := c.checkCanaryDeploymentHealth(depID); !healthy {\n\t\t\t\tlogging.Error(\"levant\/deploy: the canary deployment %s has unhealthy allocations, unable to promote\", depID)\n\t\t\t\tclose(deploymentChan)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlogging.Info(\"levant\/deploy: triggering auto promote of deployment %s\", depID)\n\n\t\t\t\/\/ Promote the deployment.\n\t\t\t_, _, err := c.nomad.Deployments().PromoteAll(depID, nil)\n\t\t\tif err != nil {\n\t\t\t\tlogging.Error(\"levant\/deploy: unable to promote deployment %s: %v\", depID, err)\n\t\t\t\tclose(deploymentChan)\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase <-shutdownChan:\n\t\t\tlogging.Info(\"levant\/deploy: canary auto promote has been shutdown\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ checkCanaryDeploymentHealth is used to check the health status of each\n\/\/ task-group within a canary deployment.\nfunc (c *nomadClient) checkCanaryDeploymentHealth(depID string) (healthy bool) {\n\n\tvar unhealthy int\n\n\tdep, _, err := c.nomad.Deployments().Info(depID, &nomad.QueryOptions{AllowStale: true})\n\tif err != nil {\n\t\tlogging.Error(\"levant\/deploy: unable to query deployment %s for health: %v\", depID, err)\n\t\treturn\n\t}\n\n\t\/\/ Itertate each task in the deployment to determine is health status. If an\n\t\/\/ unhealthy task is found, incrament the unhealthy counter.\n\tfor taskName, taskInfo := range dep.TaskGroups {\n\t\tif taskInfo.DesiredCanaries != taskInfo.HealthyAllocs {\n\t\t\tlogging.Error(\"levant\/deploy: task %s has unhealthy allocations in deployment %s\", taskName, depID)\n\t\t\tunhealthy++\n\t\t}\n\t}\n\n\t\/\/ If zero unhealthy tasks were found, continue with the auto promotion.\n\tif unhealthy == 0 {\n\t\tlogging.Debug(\"levant\/deploy: deployment %s has 0 unhealthy allocations\", depID)\n\t\thealthy = true\n\t}\n\n\treturn\n}\n\n\/\/ getDeploymentID finds the Nomad deploymentID associated to a Nomad\n\/\/ evaluationID. This is only needed as sometimes Nomad initially returns eval\n\/\/ info with an empty deploymentID; and a retry is required in order to get the\n\/\/ updated response from Nomad.\nfunc (c *nomadClient) getDeploymentID(evalID string) (depID string, err error) {\n\n\tvar evalInfo *nomad.Evaluation\n\n\tfor {\n\t\tif evalInfo, _, err = c.nomad.Evaluations().Info(evalID, nil); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif evalInfo.DeploymentID == \"\" {\n\t\t\tlogging.Debug(\"levant\/deploy: Nomad returned an empty deployment for evaluation %v; retrying\", evalID)\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t\tcontinue\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn evalInfo.DeploymentID, nil\n}\n\n\/\/ dynamicGroupCountUpdater takes the templated and rendered job and updates the\n\/\/ group counts based on the currently deployed job; if its running.\nfunc (c *nomadClient) dynamicGroupCountUpdater(job *nomad.Job) error {\n\n\t\/\/ Gather information about the current state, if any, of the job on the\n\t\/\/ Nomad cluster.\n\trJob, _, err := c.nomad.Jobs().Info(*job.Name, &nomad.QueryOptions{})\n\n\t\/\/ This is a hack due to GH-1849; we check the error string for 404 which\n\t\/\/ indicates the job is not running, not that there was an error in the API\n\t\/\/ call.\n\tif err != nil && strings.Contains(err.Error(), \"404\") {\n\t\tlogging.Info(\"levant\/deploy: job %s not running, using template file group counts\", *job.Name)\n\t\treturn nil\n\t} else if err != nil {\n\t\tlogging.Error(\"levant\/deploy: unable to perform job evaluation: %v\", err)\n\t\treturn err\n\t}\n\n\t\/\/ Iterate the templated job and the Nomad returned job and update group count\n\t\/\/ based on matches.\n\tfor _, rGroup := range rJob.TaskGroups {\n\t\tfor _, group := range job.TaskGroups {\n\t\t\tif *rGroup.Name == *group.Name {\n\t\t\t\tlogging.Info(\"levant\/deploy: using dynamic count %v for job %s and group %s\",\n\t\t\t\t\t*rGroup.Count, *job.Name, *group.Name)\n\t\t\t\tgroup.Count = rGroup.Count\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Nomad jobs of type batch do not produce evaluations.<commit_after>package levant\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\tnomad \"github.com\/hashicorp\/nomad\/api\"\n\tnomadStructs \"github.com\/hashicorp\/nomad\/nomad\/structs\"\n\t\"github.com\/jrasell\/levant\/logging\"\n)\n\ntype nomadClient struct {\n\tnomad *nomad.Client\n}\n\n\/\/ NomadClient is an interface to the Nomad API and deployment functions.\ntype NomadClient interface {\n\t\/\/ Deploy triggers a register of the job resulting in a Nomad deployment which\n\t\/\/ is monitored to determine the eventual state.\n\tDeploy(*nomad.Job, int, bool) bool\n}\n\n\/\/ NewNomadClient is used to create a new client to interact with Nomad.\nfunc NewNomadClient(addr string) (NomadClient, error) {\n\tconfig := nomad.DefaultConfig()\n\n\tif addr != \"\" {\n\t\tconfig.Address = addr\n\t}\n\n\tc, err := nomad.NewClient(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &nomadClient{nomad: c}, nil\n}\n\n\/\/ Deploy triggers a register of the job resulting in a Nomad deployment which\n\/\/ is monitored to determine the eventual state.\nfunc (c *nomadClient) Deploy(job *nomad.Job, autoPromote int, forceCount bool) (success bool) {\n\n\t\/\/ Validate the job to check it is syntactically correct.\n\tif _, _, err := c.nomad.Jobs().Validate(job, nil); err != nil {\n\t\tlogging.Error(\"levant\/deploy: job validation failed: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ If job.Type isn't set we can't continue\n\tif job.Type == nil {\n\t\tlogging.Error(\"levant\/deploy: Nomad job `type` is not set, should be set to `service`\")\n\t\treturn\n\t}\n\n\tif !forceCount {\n\t\tlogging.Debug(\"levant\/deploy: running dynamic job count updater for job %s\", *job.Name)\n\t\tif err := c.dynamicGroupCountUpdater(job); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Check that the job has at least 1 TaskGroup with count > 0 (GH-16)\n\tTGCount := 0\n\tfor _, group := range job.TaskGroups {\n\t\tTGCount += *group.Count\n\t}\n\tif TGCount == 0 {\n\t\tlogging.Error(\"levant\/deploy: all TaskGroups have a count of 0, nothing to do\")\n\t\treturn\n\t}\n\n\tlogging.Info(\"levant\/deploy: triggering a deployment of job %s\", *job.Name)\n\n\teval, _, err := c.nomad.Jobs().Register(job, nil)\n\tif err != nil {\n\t\tlogging.Error(\"levant\/deploy: unable to register job %s with Nomad: %v\", *job.Name, err)\n\t\treturn\n\t}\n\n\t\/\/ GH-50: batch job types do not return an evaluation upon registration.\n\tif eval == nil && *job.Type == nomadStructs.JobTypeBatch {\n\t\tlogging.Debug(\"levant\/deploy: job type %s does not create evaluations\", nomadStructs.JobTypeBatch)\n\t\treturn true\n\t}\n\n\t\/\/ Trigger the evaluationInspector to identify any potential errors in the\n\t\/\/ Nomad evaluation run. As far as I can tell from testing; a single alloc\n\t\/\/ failure in an evaluation means no allocs will be placed so we exit here.\n\terr = c.evaluationInspector(&eval.EvalID)\n\tif err != nil {\n\t\tlogging.Error(\"levant\/deploy: %v\", err)\n\t\treturn\n\t}\n\n\tswitch *job.Type {\n\tcase nomadStructs.JobTypeService:\n\t\tlogging.Debug(\"levant\/deploy: beginning deployment watcher for job %s\", *job.Name)\n\t\tsuccess = c.deploymentWatcher(eval.EvalID, autoPromote)\n\tdefault:\n\t\tlogging.Debug(\"levant\/deploy: job type %s does not support Nomad deployment model\", *job.Type)\n\t\tsuccess = true\n\t}\n\n\treturn\n}\n\nfunc (c *nomadClient) evaluationInspector(evalID *string) error {\n\n\tfor {\n\t\tevalInfo, _, err := c.nomad.Evaluations().Info(*evalID, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch evalInfo.Status {\n\t\tcase nomadStructs.EvalStatusComplete, nomadStructs.EvalStatusFailed, nomadStructs.EvalStatusCancelled:\n\t\t\tif len(evalInfo.FailedTGAllocs) == 0 {\n\t\t\t\tlogging.Info(\"levant\/deploy: evaluation %s finished successfully\", *evalID)\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tvar class, dimension []string\n\n\t\t\tfor group, metrics := range evalInfo.FailedTGAllocs {\n\n\t\t\t\t\/\/ Iterate the classes and dimensions to generate lists of each failure.\n\t\t\t\tfor c := range metrics.ClassExhausted {\n\t\t\t\t\tclass = append(class, c)\n\t\t\t\t}\n\t\t\t\tfor d := range metrics.DimensionExhausted {\n\t\t\t\t\tdimension = append(dimension, d)\n\t\t\t\t}\n\n\t\t\t\tlogging.Error(\"levant\/deploy: task group %s failed to place %v allocs, failed on %v and exhausted %v\",\n\t\t\t\t\tgroup, metrics.CoalescedFailures+1, class, dimension)\n\t\t\t}\n\n\t\t\treturn fmt.Errorf(\"evaluation %v finished with status %s but failed to place allocations\",\n\t\t\t\t*evalID, evalInfo.Status)\n\n\t\tdefault:\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc (c *nomadClient) deploymentWatcher(evalID string, autoPromote int) (success bool) {\n\n\tvar canaryChan chan interface{}\n\tdeploymentChan := make(chan interface{})\n\n\tt := time.Now()\n\twt := time.Duration(5 * time.Second)\n\n\t\/\/ Get the deploymentID from the evaluationID so that we can watch the\n\t\/\/ deployment for end status.\n\tdepID, err := c.getDeploymentID(evalID)\n\tif err != nil {\n\t\tlogging.Error(\"levant\/deploy: unable to get info of evaluation %s: %v\", evalID, err)\n\t}\n\n\t\/\/ Setup the canaryChan and launch the autoPromote go routine if autoPromote\n\t\/\/ has been enabled.\n\tif autoPromote > 0 {\n\t\tcanaryChan = make(chan interface{})\n\t\tgo c.canaryAutoPromote(depID, autoPromote, canaryChan, deploymentChan)\n\t}\n\n\tq := &nomad.QueryOptions{WaitIndex: 1, AllowStale: true, WaitTime: wt}\n\n\tfor {\n\n\t\tdep, meta, err := c.nomad.Deployments().Info(depID, q)\n\t\tlogging.Debug(\"levant\/deploy: deployment %v running for %.2fs\", depID, time.Since(t).Seconds())\n\n\t\t\/\/ Listen for the deploymentChan closing which indicates Levant should exit\n\t\t\/\/ the deployment watcher.\n\t\tselect {\n\t\tcase <-deploymentChan:\n\t\t\treturn false\n\t\tdefault:\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlogging.Error(\"levant\/deploy: unable to get info of deployment %s: %v\", depID, err)\n\t\t\treturn\n\t\t}\n\n\t\tif meta.LastIndex <= q.WaitIndex {\n\t\t\tcontinue\n\t\t}\n\n\t\tq.WaitIndex = meta.LastIndex\n\n\t\tcont, err := c.checkDeploymentStatus(dep.Status, depID, canaryChan)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\tif cont {\n\t\t\tcontinue\n\t\t} else {\n\t\t\treturn true\n\t\t}\n\t}\n}\n\nfunc (c *nomadClient) checkDeploymentStatus(status, depID string, shutdownChan chan interface{}) (bool, error) {\n\n\tswitch status {\n\tcase nomadStructs.DeploymentStatusSuccessful:\n\t\tlogging.Info(\"levant\/deploy: deployment %v has completed successfully\", depID)\n\t\treturn false, nil\n\tcase nomadStructs.DeploymentStatusRunning:\n\t\treturn true, nil\n\tdefault:\n\t\tif shutdownChan != nil {\n\t\t\tlogging.Debug(\"levant\/deploy: deployment %v meaning canary auto promote will shutdown\", status)\n\t\t\tclose(shutdownChan)\n\t\t}\n\n\t\tlogging.Error(\"levant\/deploy: deployment %v has status %s, Levant will now exit\", depID, status)\n\t\tc.checkFailedDeployment(&depID)\n\t\treturn false, fmt.Errorf(\"deployment failed\")\n\t}\n}\n\n\/\/ canaryAutoPromote handles Levant's canary-auto-promote functionality.\nfunc (c *nomadClient) canaryAutoPromote(depID string, waitTime int, shutdownChan, deploymentChan chan interface{}) {\n\n\t\/\/ Setup the AutoPromote timer.\n\tautoPromote := time.After(time.Duration(waitTime) * time.Second)\n\n\tfor {\n\t\tselect {\n\t\tcase <-autoPromote:\n\t\t\tlogging.Info(\"levant\/deploy: auto-promote period %vs has been reached for deployment %s\",\n\t\t\t\twaitTime, depID)\n\n\t\t\t\/\/ Check the deployment is healthy before promoting.\n\t\t\tif healthy := c.checkCanaryDeploymentHealth(depID); !healthy {\n\t\t\t\tlogging.Error(\"levant\/deploy: the canary deployment %s has unhealthy allocations, unable to promote\", depID)\n\t\t\t\tclose(deploymentChan)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlogging.Info(\"levant\/deploy: triggering auto promote of deployment %s\", depID)\n\n\t\t\t\/\/ Promote the deployment.\n\t\t\t_, _, err := c.nomad.Deployments().PromoteAll(depID, nil)\n\t\t\tif err != nil {\n\t\t\t\tlogging.Error(\"levant\/deploy: unable to promote deployment %s: %v\", depID, err)\n\t\t\t\tclose(deploymentChan)\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase <-shutdownChan:\n\t\t\tlogging.Info(\"levant\/deploy: canary auto promote has been shutdown\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ checkCanaryDeploymentHealth is used to check the health status of each\n\/\/ task-group within a canary deployment.\nfunc (c *nomadClient) checkCanaryDeploymentHealth(depID string) (healthy bool) {\n\n\tvar unhealthy int\n\n\tdep, _, err := c.nomad.Deployments().Info(depID, &nomad.QueryOptions{AllowStale: true})\n\tif err != nil {\n\t\tlogging.Error(\"levant\/deploy: unable to query deployment %s for health: %v\", depID, err)\n\t\treturn\n\t}\n\n\t\/\/ Itertate each task in the deployment to determine is health status. If an\n\t\/\/ unhealthy task is found, incrament the unhealthy counter.\n\tfor taskName, taskInfo := range dep.TaskGroups {\n\t\tif taskInfo.DesiredCanaries != taskInfo.HealthyAllocs {\n\t\t\tlogging.Error(\"levant\/deploy: task %s has unhealthy allocations in deployment %s\", taskName, depID)\n\t\t\tunhealthy++\n\t\t}\n\t}\n\n\t\/\/ If zero unhealthy tasks were found, continue with the auto promotion.\n\tif unhealthy == 0 {\n\t\tlogging.Debug(\"levant\/deploy: deployment %s has 0 unhealthy allocations\", depID)\n\t\thealthy = true\n\t}\n\n\treturn\n}\n\n\/\/ getDeploymentID finds the Nomad deploymentID associated to a Nomad\n\/\/ evaluationID. This is only needed as sometimes Nomad initially returns eval\n\/\/ info with an empty deploymentID; and a retry is required in order to get the\n\/\/ updated response from Nomad.\nfunc (c *nomadClient) getDeploymentID(evalID string) (depID string, err error) {\n\n\tvar evalInfo *nomad.Evaluation\n\n\tfor {\n\t\tif evalInfo, _, err = c.nomad.Evaluations().Info(evalID, nil); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif evalInfo.DeploymentID == \"\" {\n\t\t\tlogging.Debug(\"levant\/deploy: Nomad returned an empty deployment for evaluation %v; retrying\", evalID)\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t\tcontinue\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn evalInfo.DeploymentID, nil\n}\n\n\/\/ dynamicGroupCountUpdater takes the templated and rendered job and updates the\n\/\/ group counts based on the currently deployed job; if its running.\nfunc (c *nomadClient) dynamicGroupCountUpdater(job *nomad.Job) error {\n\n\t\/\/ Gather information about the current state, if any, of the job on the\n\t\/\/ Nomad cluster.\n\trJob, _, err := c.nomad.Jobs().Info(*job.Name, &nomad.QueryOptions{})\n\n\t\/\/ This is a hack due to GH-1849; we check the error string for 404 which\n\t\/\/ indicates the job is not running, not that there was an error in the API\n\t\/\/ call.\n\tif err != nil && strings.Contains(err.Error(), \"404\") {\n\t\tlogging.Info(\"levant\/deploy: job %s not running, using template file group counts\", *job.Name)\n\t\treturn nil\n\t} else if err != nil {\n\t\tlogging.Error(\"levant\/deploy: unable to perform job evaluation: %v\", err)\n\t\treturn err\n\t}\n\n\t\/\/ Iterate the templated job and the Nomad returned job and update group count\n\t\/\/ based on matches.\n\tfor _, rGroup := range rJob.TaskGroups {\n\t\tfor _, group := range job.TaskGroups {\n\t\t\tif *rGroup.Name == *group.Name {\n\t\t\t\tlogging.Info(\"levant\/deploy: using dynamic count %v for job %s and group %s\",\n\t\t\t\t\t*rGroup.Count, *job.Name, *group.Name)\n\t\t\t\tgroup.Count = rGroup.Count\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*-\n * Copyright 2014 Square Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/square\/go-jose\"\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"jose-util\"\n\tapp.Usage = \"command-line utility to deal with JOSE objects\"\n\tapp.Version = \"0.0.2\"\n\tapp.Author = \"\"\n\tapp.Email = \"\"\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"encrypt\",\n\t\t\tUsage: \"encrypt a plaintext\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"key, k\",\n\t\t\t\t\tUsage: \"Path to key file (PEM\/DER)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"input, in\",\n\t\t\t\t\tUsage: \"Path to input file (stdin if missing)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"output, out\",\n\t\t\t\t\tUsage: \"Path to output file (stdout if missing)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"algorithm, alg\",\n\t\t\t\t\tUsage: \"Key management algorithm (e.g. RSA-OAEP)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"encryption, enc\",\n\t\t\t\t\tUsage: \"Content encryption algorithm (e.g. A128GCM)\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"full, f\",\n\t\t\t\t\tUsage: \"Use full serialization format (instead of compact)\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tkeyBytes, err := ioutil.ReadFile(requiredFlag(c, \"key\"))\n\t\t\t\texitOnError(err, \"unable to read key file\")\n\n\t\t\t\tpub, err := jose.LoadPublicKey(keyBytes)\n\t\t\t\texitOnError(err, \"unable to read public key\")\n\n\t\t\t\talg := jose.KeyAlgorithm(requiredFlag(c, \"alg\"))\n\t\t\t\tenc := jose.ContentEncryption(requiredFlag(c, \"enc\"))\n\n\t\t\t\tcrypter, err := jose.NewEncrypter(alg, enc, pub)\n\t\t\t\texitOnError(err, \"unable to instantiate encrypter\")\n\n\t\t\t\tobj, err := crypter.Encrypt(readInput(c.String(\"input\")))\n\t\t\t\texitOnError(err, \"unable to encrypt\")\n\n\t\t\t\tvar msg string\n\t\t\t\tif c.Bool(\"full\") {\n\t\t\t\t\tmsg = obj.FullSerialize()\n\t\t\t\t} else {\n\t\t\t\t\tmsg, err = obj.CompactSerialize()\n\t\t\t\t\texitOnError(err, \"unable to serialize message\")\n\t\t\t\t}\n\n\t\t\t\twriteOutput(c.String(\"output\"), []byte(msg))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"decrypt\",\n\t\t\tUsage: \"decrypt a plaintext\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"key, k\",\n\t\t\t\t\tUsage: \"Path to key file (PEM\/DER)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"input, in\",\n\t\t\t\t\tUsage: \"Path to input file (stdin if missing)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"output, out\",\n\t\t\t\t\tUsage: \"Path to output file (stdout if missing)\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tkeyBytes, err := ioutil.ReadFile(requiredFlag(c, \"key\"))\n\t\t\t\texitOnError(err, \"unable to read private key\")\n\n\t\t\t\tpriv, err := jose.LoadPrivateKey(keyBytes)\n\t\t\t\texitOnError(err, \"unable to read private key\")\n\n\t\t\t\tobj, err := jose.ParseEncrypted(string(readInput(c.String(\"input\"))))\n\t\t\t\texitOnError(err, \"unable to parse message\")\n\n\t\t\t\tplaintext, err := obj.Decrypt(priv)\n\t\t\t\texitOnError(err, \"unable to decrypt message\")\n\n\t\t\t\twriteOutput(c.String(\"output\"), plaintext)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"dump\",\n\t\t\tUsage: \"parse & dump message in full serialization format\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"input, in\",\n\t\t\t\t\tUsage: \"Path to input file (stdin if missing)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"output, out\",\n\t\t\t\t\tUsage: \"Path to output file (stdout if missing)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"format, f\",\n\t\t\t\t\tUsage: \"Message format (JWE\/JWS, defaults to JWE)\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tinput := string(readInput(c.String(\"input\")))\n\n\t\t\t\tvar serialized string\n\t\t\t\tvar err error\n\t\t\t\tswitch c.String(\"format\") {\n\t\t\t\tcase \"\", \"JWE\":\n\t\t\t\t\tvar jwe *jose.JsonWebEncryption\n\t\t\t\t\tjwe, err = jose.ParseEncrypted(input)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tserialized = jwe.FullSerialize()\n\t\t\t\t\t}\n\t\t\t\tcase \"JWS\":\n\t\t\t\t\tvar jws *jose.JsonWebSignature\n\t\t\t\t\tjws, err = jose.ParseSigned(input)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tserialized = jws.FullSerialize()\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\texitOnError(err, \"unable to parse message\")\n\n\t\t\t\tvar raw map[string]interface{}\n\t\t\t\terr = json.Unmarshal([]byte(serialized), &raw)\n\t\t\t\texitOnError(err, \"unable to parse message\")\n\n\t\t\t\toutput, err := json.MarshalIndent(&raw, \"\", \"\\t\")\n\t\t\t\texitOnError(err, \"unable to serialize message\")\n\n\t\t\t\twriteOutput(c.String(\"output\"), output)\n\t\t\t},\n\t\t},\n\t}\n\n\terr := app.Run(os.Args)\n\texitOnError(err, \"unable to run application\")\n}\n\n\/\/ Retrieve value of a required flag\nfunc requiredFlag(c *cli.Context, flag string) string {\n\tvalue := c.String(flag)\n\tif value == \"\" {\n\t\tfmt.Fprintf(os.Stderr, \"missing required flag --%s\\n\", flag)\n\t\tos.Exit(1)\n\t}\n\treturn value\n}\n\n\/\/ Exit and print error message if we encountered a problem\nfunc exitOnError(err error, msg string) {\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s: %s\\n\", msg, err)\n\t\tos.Exit(1)\n\t}\n}\n\n\/\/ Read input from file or stdin\nfunc readInput(path string) []byte {\n\tvar bytes []byte\n\tvar err error\n\n\tif path != \"\" {\n\t\tbytes, err = ioutil.ReadFile(path)\n\t} else {\n\t\tbytes, err = ioutil.ReadAll(os.Stdin)\n\t}\n\n\texitOnError(err, \"unable to read input\")\n\treturn bytes\n}\n\n\/\/ Write output to file or stdin\nfunc writeOutput(path string, data []byte) {\n\tvar err error\n\n\tif path != \"\" {\n\t\terr = ioutil.WriteFile(path, data, 0644)\n\t} else {\n\t\t_, err = os.Stdout.Write(data)\n\t}\n\n\texitOnError(err, \"unable to write output\")\n}\n<commit_msg>Add sign command to jose-util.<commit_after>\/*-\n * Copyright 2014 Square Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/square\/go-jose\"\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"jose-util\"\n\tapp.Usage = \"command-line utility to deal with JOSE objects\"\n\tapp.Version = \"0.0.2\"\n\tapp.Author = \"\"\n\tapp.Email = \"\"\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"encrypt\",\n\t\t\tUsage: \"encrypt a plaintext\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"key, k\",\n\t\t\t\t\tUsage: \"Path to key file (PEM\/DER)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"input, in\",\n\t\t\t\t\tUsage: \"Path to input file (stdin if missing)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"output, out\",\n\t\t\t\t\tUsage: \"Path to output file (stdout if missing)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"algorithm, alg\",\n\t\t\t\t\tUsage: \"Key management algorithm (e.g. RSA-OAEP)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"encryption, enc\",\n\t\t\t\t\tUsage: \"Content encryption algorithm (e.g. A128GCM)\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"full, f\",\n\t\t\t\t\tUsage: \"Use full serialization format (instead of compact)\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tkeyBytes, err := ioutil.ReadFile(requiredFlag(c, \"key\"))\n\t\t\t\texitOnError(err, \"unable to read key file\")\n\n\t\t\t\tpub, err := jose.LoadPublicKey(keyBytes)\n\t\t\t\texitOnError(err, \"unable to read public key\")\n\n\t\t\t\talg := jose.KeyAlgorithm(requiredFlag(c, \"alg\"))\n\t\t\t\tenc := jose.ContentEncryption(requiredFlag(c, \"enc\"))\n\n\t\t\t\tcrypter, err := jose.NewEncrypter(alg, enc, pub)\n\t\t\t\texitOnError(err, \"unable to instantiate encrypter\")\n\n\t\t\t\tobj, err := crypter.Encrypt(readInput(c.String(\"input\")))\n\t\t\t\texitOnError(err, \"unable to encrypt\")\n\n\t\t\t\tvar msg string\n\t\t\t\tif c.Bool(\"full\") {\n\t\t\t\t\tmsg = obj.FullSerialize()\n\t\t\t\t} else {\n\t\t\t\t\tmsg, err = obj.CompactSerialize()\n\t\t\t\t\texitOnError(err, \"unable to serialize message\")\n\t\t\t\t}\n\n\t\t\t\twriteOutput(c.String(\"output\"), []byte(msg))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"decrypt\",\n\t\t\tUsage: \"decrypt a plaintext\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"key, k\",\n\t\t\t\t\tUsage: \"Path to key file (PEM\/DER)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"input, in\",\n\t\t\t\t\tUsage: \"Path to input file (stdin if missing)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"output, out\",\n\t\t\t\t\tUsage: \"Path to output file (stdout if missing)\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tkeyBytes, err := ioutil.ReadFile(requiredFlag(c, \"key\"))\n\t\t\t\texitOnError(err, \"unable to read private key\")\n\n\t\t\t\tpriv, err := jose.LoadPrivateKey(keyBytes)\n\t\t\t\texitOnError(err, \"unable to read private key\")\n\n\t\t\t\tobj, err := jose.ParseEncrypted(string(readInput(c.String(\"input\"))))\n\t\t\t\texitOnError(err, \"unable to parse message\")\n\n\t\t\t\tplaintext, err := obj.Decrypt(priv)\n\t\t\t\texitOnError(err, \"unable to decrypt message\")\n\n\t\t\t\twriteOutput(c.String(\"output\"), plaintext)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"dump\",\n\t\t\tUsage: \"parse & dump message in full serialization format\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"input, in\",\n\t\t\t\t\tUsage: \"Path to input file (stdin if missing)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"output, out\",\n\t\t\t\t\tUsage: \"Path to output file (stdout if missing)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"format, f\",\n\t\t\t\t\tUsage: \"Message format (JWE\/JWS, defaults to JWE)\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tinput := string(readInput(c.String(\"input\")))\n\n\t\t\t\tvar serialized string\n\t\t\t\tvar err error\n\t\t\t\tswitch c.String(\"format\") {\n\t\t\t\tcase \"\", \"JWE\":\n\t\t\t\t\tvar jwe *jose.JsonWebEncryption\n\t\t\t\t\tjwe, err = jose.ParseEncrypted(input)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tserialized = jwe.FullSerialize()\n\t\t\t\t\t}\n\t\t\t\tcase \"JWS\":\n\t\t\t\t\tvar jws *jose.JsonWebSignature\n\t\t\t\t\tjws, err = jose.ParseSigned(input)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tserialized = jws.FullSerialize()\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\texitOnError(err, \"unable to parse message\")\n\n\t\t\t\tvar raw map[string]interface{}\n\t\t\t\terr = json.Unmarshal([]byte(serialized), &raw)\n\t\t\t\texitOnError(err, \"unable to parse message\")\n\n\t\t\t\toutput, err := json.MarshalIndent(&raw, \"\", \"\\t\")\n\t\t\t\texitOnError(err, \"unable to serialize message\")\n\n\t\t\t\twriteOutput(c.String(\"output\"), output)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"sign\",\n\t\t\tUsage: \"sign a text\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"key, k\",\n\t\t\t\t\tUsage: \"Path to key file (PEM\/DER)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"input, in\",\n\t\t\t\t\tUsage: \"Path to input file (stdin if missing)\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"output, out\",\n\t\t\t\t\tUsage: \"Path to output file (stdout if missing)\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"full, f\",\n\t\t\t\t\tUsage: \"Use full serialization format (instead of compact)\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tkeyBytes, err := ioutil.ReadFile(requiredFlag(c, \"key\"))\n\t\t\t\texitOnError(err, \"unable to read key file\")\n\n\t\t\t\tsigningKey, err := jose.LoadPrivateKey(keyBytes)\n\t\t\t\texitOnError(err, \"unable to read private key\")\n\n\t\t\t\tsigner, err := jose.NewSigner(jose.SignatureAlgorithm(\"RS256\"), signingKey)\n\t\t\t\texitOnError(err, \"unable to make signer\")\n\t\t\t\tobj, err := signer.Sign(readInput(c.String(\"input\")))\n\t\t\t\texitOnError(err, \"unable to sign\")\n\n\t\t\t\tvar msg string\n\t\t\t\tif c.Bool(\"full\") {\n\t\t\t\t\tmsg = obj.FullSerialize()\n\t\t\t\t} else {\n\t\t\t\t\tmsg, err = obj.CompactSerialize()\n\t\t\t\t\texitOnError(err, \"unable to serialize message\")\n\t\t\t\t}\n\n\t\t\t\twriteOutput(c.String(\"output\"), []byte(msg))\n\t\t\t},\n\t\t},\n\t}\n\n\terr := app.Run(os.Args)\n\texitOnError(err, \"unable to run application\")\n}\n\n\/\/ Retrieve value of a required flag\nfunc requiredFlag(c *cli.Context, flag string) string {\n\tvalue := c.String(flag)\n\tif value == \"\" {\n\t\tfmt.Fprintf(os.Stderr, \"missing required flag --%s\\n\", flag)\n\t\tos.Exit(1)\n\t}\n\treturn value\n}\n\n\/\/ Exit and print error message if we encountered a problem\nfunc exitOnError(err error, msg string) {\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s: %s\\n\", msg, err)\n\t\tos.Exit(1)\n\t}\n}\n\n\/\/ Read input from file or stdin\nfunc readInput(path string) []byte {\n\tvar bytes []byte\n\tvar err error\n\n\tif path != \"\" {\n\t\tbytes, err = ioutil.ReadFile(path)\n\t} else {\n\t\tbytes, err = ioutil.ReadAll(os.Stdin)\n\t}\n\n\texitOnError(err, \"unable to read input\")\n\treturn bytes\n}\n\n\/\/ Write output to file or stdin\nfunc writeOutput(path string, data []byte) {\n\tvar err error\n\n\tif path != \"\" {\n\t\terr = ioutil.WriteFile(path, data, 0644)\n\t} else {\n\t\t_, err = os.Stdout.Write(data)\n\t}\n\n\texitOnError(err, \"unable to write output\")\n}\n<|endoftext|>"} {"text":"<commit_before>package permissions\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/cozy\/cozy-stack\/pkg\/consts\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/crypto\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/instance\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/permissions\"\n\t\"github.com\/cozy\/cozy-stack\/web\/middlewares\"\n\t\"github.com\/labstack\/echo\"\n\tjwt \"gopkg.in\/dgrijalva\/jwt-go.v3\"\n)\n\n\/\/ exports all constants from pkg\/permissions to avoid double imports\nvar (\n\tALL = permissions.ALL\n\tGET = permissions.GET\n\tPUT = permissions.PUT\n\tPOST = permissions.POST\n\tPATCH = permissions.PATCH\n\tDELETE = permissions.DELETE\n)\n\n\/\/ keyPicker choose the proper instance key depending on token audience\nfunc keyPicker(i *instance.Instance) jwt.Keyfunc {\n\treturn func(token *jwt.Token) (interface{}, error) {\n\t\tswitch token.Claims.(*permissions.Claims).Audience {\n\t\tcase permissions.AppAudience:\n\t\t\treturn i.SessionSecret, nil\n\t\tcase permissions.RefreshTokenAudience, permissions.AccessTokenAudience:\n\t\t\treturn i.OAuthSecret, nil\n\t\t}\n\t\treturn nil, permissions.ErrInvalidAudience\n\t}\n}\n\nconst bearerAuthScheme = \"Bearer \"\n\n\/\/ ErrNoToken is returned whe the request has no token\nvar ErrNoToken = errors.New(\"No token in request\")\n\nvar registerTokenPermissions = permissions.Set{\n\tpermissions.Rule{\n\t\tVerbs: permissions.Verbs(GET),\n\t\tType: consts.Settings,\n\t\tValues: []string{\"instance\"},\n\t},\n}\n\nfunc getBearerToken(c echo.Context) string {\n\theader := c.Request().Header.Get(echo.HeaderAuthorization)\n\tif strings.HasPrefix(header, bearerAuthScheme) {\n\t\treturn header[len(bearerAuthScheme):]\n\t}\n\treturn \"\"\n}\n\nfunc getQueryToken(c echo.Context) string {\n\treturn c.QueryParam(\"bearer_token\")\n}\n\n\/\/ ContextPermissionSet is the key used in echo context to store permissions set\nconst ContextPermissionSet = \"permissions_set\"\n\n\/\/ ContextClaims is the key used in echo context to store claims\n\/\/ #nosec\nconst ContextClaims = \"token_claims\"\n\n\/\/ Extractor extracts the permission set from the context\nfunc Extractor(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\n\t\t_, _, err := extract(c)\n\t\t\/\/ if no token is provided, we call next anyway,\n\t\t\/\/ hopefully the next handler does not need permissions\n\t\tif err != nil && err != ErrNoToken {\n\t\t\treturn err\n\t\t}\n\n\t\treturn next(c)\n\t}\n}\n\nfunc extract(c echo.Context) (*permissions.Claims, *permissions.Set, error) {\n\tinstance := middlewares.GetInstance(c)\n\tvar claims permissions.Claims\n\tvar err error\n\n\tif token := getBearerToken(c); token != \"\" {\n\t\terr = crypto.ParseJWT(token, keyPicker(instance), &claims)\n\t} else if token := getQueryToken(c); token != \"\" {\n\t\terr = crypto.ParseJWT(token, keyPicker(instance), &claims)\n\t} else {\n\t\treturn nil, nil, ErrNoToken\n\t}\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif claims.Issuer != instance.Domain {\n\t\t\/\/ invalid issuer in token\n\t\treturn nil, nil, permissions.ErrInvalidToken\n\t}\n\n\tvar pdoc *permissions.Permission\n\tvar pset *permissions.Set\n\tif claims.Audience == permissions.AppAudience {\n\t\t\/\/ app token, fetch permissions from couchdb\n\t\tpdoc, err = permissions.GetForApp(instance, claims.Subject)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tpset = &pdoc.Permissions\n\t} else if claims.Audience == permissions.AccessTokenAudience {\n\t\t\/\/ Oauth token, extract permissions from JWT-encoded scope\n\t\tpset, err = permissions.UnmarshalScopeString(claims.Scope)\n\t} else if claims.Audience == permissions.RegistrationTokenAudience {\n\t\tpset = ®isterTokenPermissions\n\t} else {\n\t\terr = fmt.Errorf(\"Unrecognized token audience %v\", claims.Audience)\n\t}\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tc.Set(ContextClaims, claims)\n\tc.Set(ContextPermissionSet, pset)\n\n\treturn &claims, pset, err\n}\n\nfunc getPermission(c echo.Context) (*permissions.Set, error) {\n\n\ts, ok := c.Get(ContextPermissionSet).(permissions.Set)\n\tif ok {\n\t\treturn &s, nil\n\t}\n\n\t_, set, err := extract(c)\n\tif err != nil {\n\t\treturn nil, echo.NewHTTPError(http.StatusUnauthorized)\n\t}\n\n\treturn set, nil\n}\n\n\/\/ Allow validate the validable object against the context permission set\nfunc Allow(c echo.Context, v permissions.Verb, o permissions.Validable) error {\n\tpset, err := getPermission(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !pset.Allow(v, o) {\n\t\treturn echo.NewHTTPError(http.StatusForbidden)\n\t}\n\treturn nil\n}\n\n\/\/ AllowTypeAndID validate a type & ID against the context permission set\nfunc AllowTypeAndID(c echo.Context, v permissions.Verb, doctype, id string) error {\n\tpset, err := getPermission(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !pset.AllowID(v, doctype, id) {\n\t\treturn echo.NewHTTPError(http.StatusForbidden)\n\t}\n\treturn nil\n}\n\nfunc displayPermissions(c echo.Context) error {\n\tset, err := getPermission(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.JSON(200, set)\n}\n\n\/\/ Routes sets the routing for the status service\nfunc Routes(router *echo.Group) {\n\t\/\/ API Routes\n\trouter.GET(\"\/self\", displayPermissions)\n}\n<commit_msg>fix registrationToken -> registerToken<commit_after>package permissions\n\nimport (\n\t\"crypto\/subtle\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/cozy\/cozy-stack\/pkg\/consts\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/crypto\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/instance\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/permissions\"\n\t\"github.com\/cozy\/cozy-stack\/web\/middlewares\"\n\t\"github.com\/labstack\/echo\"\n\tjwt \"gopkg.in\/dgrijalva\/jwt-go.v3\"\n)\n\n\/\/ exports all constants from pkg\/permissions to avoid double imports\nvar (\n\tALL = permissions.ALL\n\tGET = permissions.GET\n\tPUT = permissions.PUT\n\tPOST = permissions.POST\n\tPATCH = permissions.PATCH\n\tDELETE = permissions.DELETE\n)\n\n\/\/ keyPicker choose the proper instance key depending on token audience\nfunc keyPicker(i *instance.Instance) jwt.Keyfunc {\n\treturn func(token *jwt.Token) (interface{}, error) {\n\t\tswitch token.Claims.(*permissions.Claims).Audience {\n\t\tcase permissions.AppAudience:\n\t\t\treturn i.SessionSecret, nil\n\t\tcase permissions.RefreshTokenAudience, permissions.AccessTokenAudience:\n\t\t\treturn i.OAuthSecret, nil\n\t\t}\n\t\treturn nil, permissions.ErrInvalidAudience\n\t}\n}\n\nconst bearerAuthScheme = \"Bearer \"\n\n\/\/ ErrNoToken is returned whe the request has no token\nvar ErrNoToken = errors.New(\"No token in request\")\n\nvar registerTokenPermissions = permissions.Set{\n\tpermissions.Rule{\n\t\tVerbs: permissions.Verbs(GET),\n\t\tType: consts.Settings,\n\t\tValues: []string{\"instance\"},\n\t},\n}\n\nfunc getBearerToken(c echo.Context) string {\n\theader := c.Request().Header.Get(echo.HeaderAuthorization)\n\tif strings.HasPrefix(header, bearerAuthScheme) {\n\t\treturn header[len(bearerAuthScheme):]\n\t}\n\treturn \"\"\n}\n\nfunc getQueryToken(c echo.Context) string {\n\treturn c.QueryParam(\"bearer_token\")\n}\n\n\/\/ ContextPermissionSet is the key used in echo context to store permissions set\nconst ContextPermissionSet = \"permissions_set\"\n\n\/\/ ContextClaims is the key used in echo context to store claims\n\/\/ #nosec\nconst ContextClaims = \"token_claims\"\n\n\/\/ Extractor extracts the permission set from the context\nfunc Extractor(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\n\t\t_, _, err := extract(c)\n\t\t\/\/ if no token is provided, we call next anyway,\n\t\t\/\/ hopefully the next handler does not need permissions\n\t\tif err != nil && err != ErrNoToken {\n\t\t\treturn err\n\t\t}\n\n\t\treturn next(c)\n\t}\n}\n\nfunc extract(c echo.Context) (*permissions.Claims, *permissions.Set, error) {\n\tinstance := middlewares.GetInstance(c)\n\tvar claims permissions.Claims\n\tvar err error\n\n\tif token := getBearerToken(c); token != \"\" {\n\t\terr = crypto.ParseJWT(token, keyPicker(instance), &claims)\n\t} else if token := getQueryToken(c); token != \"\" {\n\t\terr = crypto.ParseJWT(token, keyPicker(instance), &claims)\n\t} else {\n\t\treturn nil, nil, ErrNoToken\n\t}\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif claims.Issuer != instance.Domain {\n\t\t\/\/ invalid issuer in token\n\t\treturn nil, nil, permissions.ErrInvalidToken\n\t}\n\n\tvar pdoc *permissions.Permission\n\tvar pset *permissions.Set\n\tif claims.Audience == permissions.AppAudience {\n\t\t\/\/ app token, fetch permissions from couchdb\n\t\tpdoc, err = permissions.GetForApp(instance, claims.Subject)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tpset = &pdoc.Permissions\n\t} else if claims.Audience == permissions.AccessTokenAudience {\n\t\t\/\/ Oauth token, extract permissions from JWT-encoded scope\n\t\tpset, err = permissions.UnmarshalScopeString(claims.Scope)\n\t} else if registerToken := []byte(c.QueryParam(\"registerToken\"));\n\t\t\tsubtle.ConstantTimeCompare(registerToken, instance.RegisterToken) == 1 {\n\t\tpset = ®isterTokenPermissions\n\t} else {\n\t\terr = fmt.Errorf(\"Unrecognized token audience %v\", claims.Audience)\n\t}\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tc.Set(ContextClaims, claims)\n\tc.Set(ContextPermissionSet, pset)\n\n\treturn &claims, pset, err\n}\n\nfunc getPermission(c echo.Context) (*permissions.Set, error) {\n\n\ts, ok := c.Get(ContextPermissionSet).(permissions.Set)\n\tif ok {\n\t\treturn &s, nil\n\t}\n\n\t_, set, err := extract(c)\n\tif err != nil {\n\t\treturn nil, echo.NewHTTPError(http.StatusUnauthorized)\n\t}\n\n\treturn set, nil\n}\n\n\/\/ Allow validate the validable object against the context permission set\nfunc Allow(c echo.Context, v permissions.Verb, o permissions.Validable) error {\n\tpset, err := getPermission(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !pset.Allow(v, o) {\n\t\treturn echo.NewHTTPError(http.StatusForbidden)\n\t}\n\treturn nil\n}\n\n\/\/ AllowTypeAndID validate a type & ID against the context permission set\nfunc AllowTypeAndID(c echo.Context, v permissions.Verb, doctype, id string) error {\n\tpset, err := getPermission(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !pset.AllowID(v, doctype, id) {\n\t\treturn echo.NewHTTPError(http.StatusForbidden)\n\t}\n\treturn nil\n}\n\nfunc displayPermissions(c echo.Context) error {\n\tset, err := getPermission(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.JSON(200, set)\n}\n\nfunc constantTimeEqual(a, b string) bool {\n\treturn 1 ==\n}\n\n\/\/ Routes sets the routing for the status service\nfunc Routes(router *echo.Group) {\n\t\/\/ API Routes\n\trouter.GET(\"\/self\", displayPermissions)\n}\n<|endoftext|>"} {"text":"<commit_before>package app_test\n\nimport (\n\t\"github.com\/timeredbull\/tsuru\/api\/app\"\n\t\"github.com\/timeredbull\/tsuru\/api\/bind\"\n\t\"github.com\/timeredbull\/tsuru\/api\/service\"\n\t\"github.com\/timeredbull\/tsuru\/db\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestAppIsABinderApp(t *testing.T) {\n\tvar a bind.App\n\tiface := reflect.ValueOf(&a)\n\tobj := reflect.ValueOf(&app.App{})\n\tif !obj.Type().Implements(iface.Elem().Type()) {\n\t\tt.Errorf(\"app.App should implement bind.App\")\n\t}\n}\n\nfunc TestDestroyShouldUnbindAppFromInstance(t *testing.T) {\n\tdb.Session, _ = db.Open(\"127.0.0.1:27017\", \"tsuru_app_bind_test\")\n\tdefer func() {\n\t\tdb.Session.Apps().Database.DropDatabase()\n\t\tdb.Session.Close()\n\t}()\n\tinstance := service.ServiceInstance{\n\t\tName: \"MyInstance\",\n\t\tApps: []string{\"myApp\"},\n\t}\n\terr := instance.Create()\n\tif err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\tdefer db.Session.ServiceInstances().Remove(bson.M{\"_id\": instance.Name})\n\ta := app.App{\n\t\tName: \"myApp\",\n\t}\n\terr = a.Create()\n\tif err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\tdefer db.Session.Apps().Remove(bson.M{\"name\": a.Name})\n\terr = a.Destroy()\n\tif err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\tn, _ := db.Session.ServiceInstances().Find(bson.M{\"apps\": bson.M{\"$in\": []string{a.Name}}}).Count()\n\tif n != 0 {\n\t\tt.Errorf(\"Should unbind apps when destroying them, but did not.\\nExpected 0 apps to be binded to the instance %s, got %d.\", instance.Name, n)\n\t}\n}\n<commit_msg>api\/app: using app's suite in bind_test.go<commit_after>package app\n\nimport (\n\t\"github.com\/timeredbull\/tsuru\/api\/bind\"\n\t\"github.com\/timeredbull\/tsuru\/api\/service\"\n\t\"github.com\/timeredbull\/tsuru\/db\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t. \"launchpad.net\/gocheck\"\n)\n\nfunc (s *S) TestAppIsABinderApp(c *C) {\n\tvar app bind.App\n\tc.Assert(&App{}, Implements, &app)\n}\n\nfunc (s *S) TestDestroyShouldUnbindAppFromInstance(c *C) {\n\tinstance := service.ServiceInstance{Name: \"MyInstance\", Apps: []string{\"myApp\"}}\n\terr := instance.Create()\n\tc.Assert(err, IsNil)\n\tdefer db.Session.ServiceInstances().Remove(bson.M{\"_id\": instance.Name})\n\ta := App{Name: \"myApp\"}\n\terr = a.Create()\n\tc.Assert(err, IsNil)\n\tdefer db.Session.Apps().Remove(bson.M{\"name\": a.Name})\n\terr = a.Destroy()\n\tc.Assert(err, IsNil)\n\tn, _ := db.Session.ServiceInstances().Find(bson.M{\"apps\": bson.M{\"$in\": []string{a.Name}}}).Count()\n\tc.Assert(n, Equals, 0)\n}\n<|endoftext|>"} {"text":"<commit_before>package scummatlas\n\nimport \"testing\"\nimport \"io\/ioutil\"\n\nfunc TestImage(t *testing.T) {\n\tdata, err := ioutil.ReadFile(\".\/testdata\/objects\/scummbardoor.dump\")\n\tif err != nil {\n\t\tt.Errorf(\"Error reading the file\")\n\t}\n\tobject := NewObjectFromOBCD(data)\n\tif len(object.Verbs) != 2 {\n\t\tt.Errorf(\"Incorrect number of verbs\")\n\t}\n\tif object.Id != 428 {\n\t\tt.Errorf(\"Incorrect id \")\n\t}\n\tif object.Name != \"door\" {\n\t\tt.Errorf(\"Incorrect name\")\n\t}\n\tif object.Width != 40 {\n\t\tt.Errorf(\"Incorrect width\")\n\t}\n\tif object.Height != 56 {\n\t\tt.Errorf(\"Incorrect height\")\n\t}\n\tif object.X != 696 {\n\t\tt.Errorf(\"Incorrect X\")\n\t}\n\tif object.Y != 80 {\n\t\tt.Errorf(\"Incorrect Y\")\n\t}\n\tif object.Parent != 0 {\n\t\tt.Errorf(\"Incorrect Parent %x\", object.Parent)\n\t}\n\tif object.Flags != 0 {\n\t\tt.Errorf(\"Incorrect Flags %x\", object.Flags)\n\t}\n\n}\n<commit_msg>Test to fix<commit_after>package scummatlas\n\nimport \"testing\"\nimport \"io\/ioutil\"\n\nfunc TestImage(t *testing.T) {\n\tdata, err := ioutil.ReadFile(\".\/testdata\/objects\/scummbardoor.dump\")\n\tif err != nil {\n\t\tt.Errorf(\"Error reading the file\")\n\t}\n\tobject := NewObjectFromOBCD(data)\n\t\/*\n\t\tif len(object.Verbs) != 2 {\n\t\t\tt.Errorf(\"Incorrect number of verbs\")\n\t\t}\n\t*\/\n\tif object.Id != 428 {\n\t\tt.Errorf(\"Incorrect id \")\n\t}\n\tif object.Name != \"door\" {\n\t\tt.Errorf(\"Incorrect name\")\n\t}\n\tif object.Width != 40 {\n\t\tt.Errorf(\"Incorrect width\")\n\t}\n\tif object.Height != 56 {\n\t\tt.Errorf(\"Incorrect height\")\n\t}\n\tif object.X != 696 {\n\t\tt.Errorf(\"Incorrect X\")\n\t}\n\tif object.Y != 80 {\n\t\tt.Errorf(\"Incorrect Y\")\n\t}\n\tif object.Parent != 0 {\n\t\tt.Errorf(\"Incorrect Parent %x\", object.Parent)\n\t}\n\tif object.Flags != 0 {\n\t\tt.Errorf(\"Incorrect Flags %x\", object.Flags)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ebiten\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/buffered\"\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/clock\"\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/debug\"\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/driver\"\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/graphics\"\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/hooks\"\n)\n\ntype uiContext struct {\n\tgame Game\n\toffscreen *Image\n\tscreen *Image\n\n\tupdateCalled bool\n\n\toutsideWidth float64\n\toutsideHeight float64\n\n\terr atomic.Value\n\n\tm sync.Mutex\n}\n\nvar theUIContext = &uiContext{}\n\nfunc (c *uiContext) set(game Game) {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\tc.game = game\n}\n\nfunc (c *uiContext) setError(err error) {\n\tc.err.Store(err)\n}\n\nfunc (c *uiContext) Layout(outsideWidth, outsideHeight float64) {\n\t\/\/ The given outside size can be 0 e.g. just after restoring from the fullscreen mode on Windows (#1589)\n\t\/\/ Just ignore such cases. Otherwise, creating a zero-sized framebuffer causes a panic.\n\tif outsideWidth == 0 || outsideHeight == 0 {\n\t\treturn\n\t}\n\tc.outsideWidth = outsideWidth\n\tc.outsideHeight = outsideHeight\n}\n\nfunc (c *uiContext) updateOffscreen() {\n\tow, oh := c.game.Layout(int(c.outsideWidth), int(c.outsideHeight))\n\tif ow <= 0 || oh <= 0 {\n\t\tpanic(\"ebiten: Layout must return positive numbers\")\n\t}\n\n\t\/\/ TODO: This is duplicated with mobile\/ebitenmobileview\/funcs.go. Refactor this.\n\td := uiDriver().DeviceScaleFactor()\n\tsw, sh := int(c.outsideWidth*d), int(c.outsideHeight*d)\n\n\tif c.screen != nil {\n\t\tif w, h := c.screen.Size(); w != sw || h != sh {\n\t\t\tc.screen.Dispose()\n\t\t\tc.screen = nil\n\t\t}\n\t}\n\tif c.screen == nil {\n\t\tc.screen = newScreenFramebufferImage(int(c.outsideWidth*d), int(c.outsideHeight*d))\n\t}\n\n\tif c.offscreen != nil {\n\t\tif w, h := c.offscreen.Size(); w != ow || h != oh {\n\t\t\tc.offscreen.Dispose()\n\t\t\tc.offscreen = nil\n\t\t}\n\t}\n\tif c.offscreen == nil {\n\t\tc.offscreen = NewImage(ow, oh)\n\t\tc.offscreen.mipmap.SetVolatile(IsScreenClearedEveryFrame())\n\t}\n}\n\nfunc (c *uiContext) setScreenClearedEveryFrame(cleared bool) {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\n\tif c.offscreen != nil {\n\t\tc.offscreen.mipmap.SetVolatile(cleared)\n\t}\n}\n\nfunc (c *uiContext) screenScale(deviceScaleFactor float64) float64 {\n\tif c.offscreen == nil {\n\t\treturn 0\n\t}\n\tsw, sh := c.offscreen.Size()\n\tscaleX := c.outsideWidth \/ float64(sw) * deviceScaleFactor\n\tscaleY := c.outsideHeight \/ float64(sh) * deviceScaleFactor\n\treturn math.Min(scaleX, scaleY)\n}\n\nfunc (c *uiContext) offsets(deviceScaleFactor float64) (float64, float64) {\n\tif c.offscreen == nil {\n\t\treturn 0, 0\n\t}\n\tsw, sh := c.offscreen.Size()\n\ts := c.screenScale(deviceScaleFactor)\n\twidth := float64(sw) * s\n\theight := float64(sh) * s\n\tx := (c.outsideWidth*deviceScaleFactor - width) \/ 2\n\ty := (c.outsideHeight*deviceScaleFactor - height) \/ 2\n\treturn x, y\n}\n\nfunc (c *uiContext) UpdateFrame() error {\n\t\/\/ TODO: If updateCount is 0 and vsync is disabled, swapping buffers can be skipped.\n\treturn c.updateFrame(clock.Update(MaxTPS()))\n}\n\nfunc (c *uiContext) ForceUpdateFrame() error {\n\t\/\/ ForceUpdate can be invoked even if uiContext it not initialized yet (#1591).\n\tif c.outsideWidth == 0 || c.outsideHeight == 0 {\n\t\treturn nil\n\t}\n\treturn c.updateFrame(1)\n}\n\nfunc (c *uiContext) updateFrame(updateCount int) error {\n\tif err, ok := c.err.Load().(error); ok && err != nil {\n\t\treturn err\n\t}\n\n\tdebug.Logf(\"----\\n\")\n\n\tif err := buffered.BeginFrame(); err != nil {\n\t\treturn err\n\t}\n\tif err := c.updateFrameImpl(updateCount); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ All the vertices data are consumed at the end of the frame, and the data backend can be\n\t\/\/ available after that. Until then, lock the vertices backend.\n\treturn graphics.LockAndResetVertices(func() error {\n\t\tif err := buffered.EndFrame(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (c *uiContext) updateFrameImpl(updateCount int) error {\n\tc.updateOffscreen()\n\n\t\/\/ Ensure that Update is called once before Draw so that Update can be used for initialization.\n\tif !c.updateCalled && updateCount == 0 {\n\t\tupdateCount = 1\n\t\tc.updateCalled = true\n\t}\n\tdebug.Logf(\"Update count per frame: %d\\n\", updateCount)\n\n\tfor i := 0; i < updateCount; i++ {\n\t\tif err := hooks.RunBeforeUpdateHooks(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := c.game.Update(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tuiDriver().ResetForFrame()\n\t}\n\n\t\/\/ Even though updateCount == 0, the offscreen is cleared and Draw is called.\n\t\/\/ Draw should not update the game state and then the screen should not be updated without Update, but\n\t\/\/ users might want to process something at Draw with the time intervals of FPS.\n\tif IsScreenClearedEveryFrame() {\n\t\tc.offscreen.Clear()\n\t}\n\tc.game.Draw(c.offscreen)\n\n\tif uiDriver().Graphics().NeedsClearingScreen() {\n\t\t\/\/ This clear is needed for fullscreen mode or some mobile platforms (#622).\n\t\tc.screen.Clear()\n\t}\n\n\top := &DrawImageOptions{}\n\n\ts := c.screenScale(uiDriver().DeviceScaleFactor())\n\tswitch vd := uiDriver().Graphics().FramebufferYDirection(); vd {\n\tcase driver.Upward:\n\t\top.GeoM.Scale(s, -s)\n\t\t_, h := c.offscreen.Size()\n\t\top.GeoM.Translate(0, float64(h)*s)\n\tcase driver.Downward:\n\t\top.GeoM.Scale(s, s)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"ebiten: invalid v-direction: %d\", vd))\n\t}\n\n\top.GeoM.Translate(c.offsets(uiDriver().DeviceScaleFactor()))\n\top.CompositeMode = CompositeModeCopy\n\n\t\/\/ filterScreen works with >=1 scale, but does not well with <1 scale.\n\t\/\/ Use regular FilterLinear instead so far (#669).\n\tif s >= 1 {\n\t\top.Filter = filterScreen\n\t} else {\n\t\top.Filter = FilterLinear\n\t}\n\tc.screen.DrawImage(c.offscreen, op)\n\treturn nil\n}\n\nfunc (c *uiContext) AdjustPosition(x, y float64, deviceScaleFactor float64) (float64, float64) {\n\tox, oy := c.offsets(deviceScaleFactor)\n\ts := c.screenScale(deviceScaleFactor)\n\t\/\/ The scale 0 indicates that the offscreen is not initialized yet.\n\t\/\/ As any cursor values don't make sense, just return NaN.\n\tif s == 0 {\n\t\treturn math.NaN(), math.NaN()\n\t}\n\treturn (x*deviceScaleFactor - ox) \/ s, (y*deviceScaleFactor - oy) \/ s\n}\n<commit_msg>ebiten: Remove old comments<commit_after>\/\/ Copyright 2014 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ebiten\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/buffered\"\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/clock\"\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/debug\"\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/driver\"\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/graphics\"\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/hooks\"\n)\n\ntype uiContext struct {\n\tgame Game\n\toffscreen *Image\n\tscreen *Image\n\n\tupdateCalled bool\n\n\toutsideWidth float64\n\toutsideHeight float64\n\n\terr atomic.Value\n\n\tm sync.Mutex\n}\n\nvar theUIContext = &uiContext{}\n\nfunc (c *uiContext) set(game Game) {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\tc.game = game\n}\n\nfunc (c *uiContext) setError(err error) {\n\tc.err.Store(err)\n}\n\nfunc (c *uiContext) Layout(outsideWidth, outsideHeight float64) {\n\t\/\/ The given outside size can be 0 e.g. just after restoring from the fullscreen mode on Windows (#1589)\n\t\/\/ Just ignore such cases. Otherwise, creating a zero-sized framebuffer causes a panic.\n\tif outsideWidth == 0 || outsideHeight == 0 {\n\t\treturn\n\t}\n\tc.outsideWidth = outsideWidth\n\tc.outsideHeight = outsideHeight\n}\n\nfunc (c *uiContext) updateOffscreen() {\n\td := uiDriver().DeviceScaleFactor()\n\tsw, sh := int(c.outsideWidth*d), int(c.outsideHeight*d)\n\n\tow, oh := c.game.Layout(int(c.outsideWidth), int(c.outsideHeight))\n\tif ow <= 0 || oh <= 0 {\n\t\tpanic(\"ebiten: Layout must return positive numbers\")\n\t}\n\n\tif c.screen != nil {\n\t\tif w, h := c.screen.Size(); w != sw || h != sh {\n\t\t\tc.screen.Dispose()\n\t\t\tc.screen = nil\n\t\t}\n\t}\n\tif c.screen == nil {\n\t\tc.screen = newScreenFramebufferImage(int(c.outsideWidth*d), int(c.outsideHeight*d))\n\t}\n\n\tif c.offscreen != nil {\n\t\tif w, h := c.offscreen.Size(); w != ow || h != oh {\n\t\t\tc.offscreen.Dispose()\n\t\t\tc.offscreen = nil\n\t\t}\n\t}\n\tif c.offscreen == nil {\n\t\tc.offscreen = NewImage(ow, oh)\n\t\tc.offscreen.mipmap.SetVolatile(IsScreenClearedEveryFrame())\n\t}\n}\n\nfunc (c *uiContext) setScreenClearedEveryFrame(cleared bool) {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\n\tif c.offscreen != nil {\n\t\tc.offscreen.mipmap.SetVolatile(cleared)\n\t}\n}\n\nfunc (c *uiContext) screenScale(deviceScaleFactor float64) float64 {\n\tif c.offscreen == nil {\n\t\treturn 0\n\t}\n\tsw, sh := c.offscreen.Size()\n\tscaleX := c.outsideWidth \/ float64(sw) * deviceScaleFactor\n\tscaleY := c.outsideHeight \/ float64(sh) * deviceScaleFactor\n\treturn math.Min(scaleX, scaleY)\n}\n\nfunc (c *uiContext) offsets(deviceScaleFactor float64) (float64, float64) {\n\tif c.offscreen == nil {\n\t\treturn 0, 0\n\t}\n\tsw, sh := c.offscreen.Size()\n\ts := c.screenScale(deviceScaleFactor)\n\twidth := float64(sw) * s\n\theight := float64(sh) * s\n\tx := (c.outsideWidth*deviceScaleFactor - width) \/ 2\n\ty := (c.outsideHeight*deviceScaleFactor - height) \/ 2\n\treturn x, y\n}\n\nfunc (c *uiContext) UpdateFrame() error {\n\t\/\/ TODO: If updateCount is 0 and vsync is disabled, swapping buffers can be skipped.\n\treturn c.updateFrame(clock.Update(MaxTPS()))\n}\n\nfunc (c *uiContext) ForceUpdateFrame() error {\n\t\/\/ ForceUpdate can be invoked even if uiContext it not initialized yet (#1591).\n\tif c.outsideWidth == 0 || c.outsideHeight == 0 {\n\t\treturn nil\n\t}\n\treturn c.updateFrame(1)\n}\n\nfunc (c *uiContext) updateFrame(updateCount int) error {\n\tif err, ok := c.err.Load().(error); ok && err != nil {\n\t\treturn err\n\t}\n\n\tdebug.Logf(\"----\\n\")\n\n\tif err := buffered.BeginFrame(); err != nil {\n\t\treturn err\n\t}\n\tif err := c.updateFrameImpl(updateCount); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ All the vertices data are consumed at the end of the frame, and the data backend can be\n\t\/\/ available after that. Until then, lock the vertices backend.\n\treturn graphics.LockAndResetVertices(func() error {\n\t\tif err := buffered.EndFrame(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (c *uiContext) updateFrameImpl(updateCount int) error {\n\tc.updateOffscreen()\n\n\t\/\/ Ensure that Update is called once before Draw so that Update can be used for initialization.\n\tif !c.updateCalled && updateCount == 0 {\n\t\tupdateCount = 1\n\t\tc.updateCalled = true\n\t}\n\tdebug.Logf(\"Update count per frame: %d\\n\", updateCount)\n\n\tfor i := 0; i < updateCount; i++ {\n\t\tif err := hooks.RunBeforeUpdateHooks(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := c.game.Update(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tuiDriver().ResetForFrame()\n\t}\n\n\t\/\/ Even though updateCount == 0, the offscreen is cleared and Draw is called.\n\t\/\/ Draw should not update the game state and then the screen should not be updated without Update, but\n\t\/\/ users might want to process something at Draw with the time intervals of FPS.\n\tif IsScreenClearedEveryFrame() {\n\t\tc.offscreen.Clear()\n\t}\n\tc.game.Draw(c.offscreen)\n\n\tif uiDriver().Graphics().NeedsClearingScreen() {\n\t\t\/\/ This clear is needed for fullscreen mode or some mobile platforms (#622).\n\t\tc.screen.Clear()\n\t}\n\n\top := &DrawImageOptions{}\n\n\ts := c.screenScale(uiDriver().DeviceScaleFactor())\n\tswitch vd := uiDriver().Graphics().FramebufferYDirection(); vd {\n\tcase driver.Upward:\n\t\top.GeoM.Scale(s, -s)\n\t\t_, h := c.offscreen.Size()\n\t\top.GeoM.Translate(0, float64(h)*s)\n\tcase driver.Downward:\n\t\top.GeoM.Scale(s, s)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"ebiten: invalid v-direction: %d\", vd))\n\t}\n\n\top.GeoM.Translate(c.offsets(uiDriver().DeviceScaleFactor()))\n\top.CompositeMode = CompositeModeCopy\n\n\t\/\/ filterScreen works with >=1 scale, but does not well with <1 scale.\n\t\/\/ Use regular FilterLinear instead so far (#669).\n\tif s >= 1 {\n\t\top.Filter = filterScreen\n\t} else {\n\t\top.Filter = FilterLinear\n\t}\n\tc.screen.DrawImage(c.offscreen, op)\n\treturn nil\n}\n\nfunc (c *uiContext) AdjustPosition(x, y float64, deviceScaleFactor float64) (float64, float64) {\n\tox, oy := c.offsets(deviceScaleFactor)\n\ts := c.screenScale(deviceScaleFactor)\n\t\/\/ The scale 0 indicates that the offscreen is not initialized yet.\n\t\/\/ As any cursor values don't make sense, just return NaN.\n\tif s == 0 {\n\t\treturn math.NaN(), math.NaN()\n\t}\n\treturn (x*deviceScaleFactor - ox) \/ s, (y*deviceScaleFactor - oy) \/ s\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\t\"unsafe\"\n\n\t\"golang.org\/x\/sys\/windows\/svc\"\n\t\"golang.org\/x\/sys\/windows\/svc\/eventlog\"\n)\n\nconst name = \"mackerel-agent\"\n\nconst defaultEid = 1\nconst startEid = 2\nconst stopEid = 3\nconst loggerEid = 4\n\nvar (\n\tkernel32 = syscall.NewLazyDLL(\"kernel32\")\n\tprocAllocConsole = kernel32.NewProc(\"AllocConsole\")\n\tprocGenerateConsoleCtrlEvent = kernel32.NewProc(\"GenerateConsoleCtrlEvent\")\n\tprocGetModuleFileName = kernel32.NewProc(\"GetModuleFileNameW\")\n)\n\nfunc main() {\n\tif len(os.Args) == 2 {\n\t\tvar err error\n\t\tswitch os.Args[1] {\n\t\tcase \"install\":\n\t\t\terr = installService(\"mackerel-agent\", \"mackerel agent\")\n\t\tcase \"remove\":\n\t\t\terr = removeService(\"mackerel-agent\")\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\treturn\n\t}\n\n\telog, err := eventlog.Open(name)\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\tdefer elog.Close()\n\n\t\/\/ `svc.Run` blocks until windows service will stopped.\n\t\/\/ ref. https:\/\/msdn.microsoft.com\/library\/cc429362.aspx\n\terr = svc.Run(name, &handler{elog: elog})\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n}\n\ntype Logger interface {\n\tInfo(eid uint32, msg string) error\n\tWarning(eid uint32, msg string) error\n\tError(eid uint32, msg string) error\n}\n\ntype handler struct {\n\telog Logger\n\tcmd *exec.Cmd\n}\n\n\/\/ ex.\n\/\/ verbose log: 2017\/01\/21 22:21:08 command.go:434: DEBUG <command> received 'immediate' chan\n\/\/ normal log: 2017\/01\/24 14:14:27 INFO <main> Starting mackerel-agent version:0.36.0\nvar logRe = regexp.MustCompile(`^\\d{4}\/\\d{2}\/\\d{2} \\d{2}:\\d{2}:\\d{2} (?:.+\\.go:\\d+: )?([A-Z]+) `)\n\nfunc (h *handler) start() error {\n\tprocAllocConsole.Call()\n\tdir := execdir()\n\tcmd := exec.Command(filepath.Join(dir, \"mackerel-agent.exe\"))\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCreationFlags: syscall.CREATE_NEW_PROCESS_GROUP,\n\t}\n\tcmd.Dir = dir\n\n\th.cmd = cmd\n\tr, w := io.Pipe()\n\tcmd.Stderr = w\n\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbr := bufio.NewReader(r)\n\tlc := make(chan string, 10)\n\tdone := make(chan struct{})\n\n\t\/\/ It need to read data from pipe continuously. And it need to close handle\n\t\/\/ when process finished.\n\t\/\/ read data from pipe. When data arrived at EOL, send line-string to the\n\t\/\/ channel. Also remaining line to EOF.\n\tgo func() {\n\t\tdefer w.Close()\n\n\t\t\/\/ pipe stderr to windows event log\n\t\tvar body bytes.Buffer\n\t\tfor {\n\t\t\tb, err := br.ReadByte()\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\th.elog.Error(loggerEid, err.Error())\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif b == '\\n' {\n\t\t\t\tif body.Len() > 0 {\n\t\t\t\t\tlc <- body.String()\n\t\t\t\t\tbody.Reset()\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbody.WriteByte(b)\n\t\t}\n\t\tif body.Len() > 0 {\n\t\t\tlc <- body.String()\n\t\t}\n\t\tdone <- struct{}{}\n\t}()\n\n\tgo func() {\n\t\tlinebuf := []string{}\n\tloop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase line := <-lc:\n\t\t\t\tlinebuf = append(linebuf, line)\n\t\t\tcase <-time.After(10 * time.Millisecond):\n\t\t\t\t\/\/ When it take 10ms, it is located at end of paragraph. Then\n\t\t\t\t\/\/ slice appended at above should be the paragraph.\n\t\t\t\tif len(linebuf) > 0 {\n\t\t\t\t\tif match := logRe.FindStringSubmatch(linebuf[0]); match != nil {\n\t\t\t\t\t\tline := strings.Join(linebuf, \"\\n\")\n\t\t\t\t\t\tlevel := match[1]\n\t\t\t\t\t\tswitch level {\n\t\t\t\t\t\tcase \"TRACE\", \"DEBUG\", \"INFO\":\n\t\t\t\t\t\t\th.elog.Info(defaultEid, line)\n\t\t\t\t\t\tcase \"WARNING\":\n\t\t\t\t\t\t\th.elog.Warning(defaultEid, line)\n\t\t\t\t\t\tcase \"ERROR\", \"CRITICAL\":\n\t\t\t\t\t\t\th.elog.Error(defaultEid, line)\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\th.elog.Error(defaultEid, line)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tlinebuf = nil\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase <-done:\n\t\t\t\t\tbreak loop\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tclose(lc)\n\t\tclose(done)\n\t}()\n\treturn nil\n}\n\nfunc interrupt(p *os.Process) error {\n\tr1, _, err := procGenerateConsoleCtrlEvent.Call(syscall.CTRL_BREAK_EVENT, uintptr(p.Pid))\n\tif r1 == 0 {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (h *handler) stop() error {\n\tif h.cmd != nil && h.cmd.Process != nil {\n\t\terr := interrupt(h.cmd.Process)\n\t\tif err == nil {\n\t\t\tend := time.Now().Add(10 * time.Second)\n\t\t\tfor time.Now().Before(end) {\n\t\t\t\tif h.cmd.ProcessState != nil && h.cmd.ProcessState.Exited() {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\t\t}\n\t\treturn h.cmd.Process.Kill()\n\t}\n\treturn nil\n}\n\n\/\/ implement https:\/\/godoc.org\/golang.org\/x\/sys\/windows\/svc#Handler\nfunc (h *handler) Execute(args []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (svcSpecificEC bool, exitCode uint32) {\n\ts <- svc.Status{State: svc.StartPending}\n\tdefer func() {\n\t\ts <- svc.Status{State: svc.Stopped}\n\t}()\n\n\tif err := h.start(); err != nil {\n\t\th.elog.Error(startEid, err.Error())\n\t\t\/\/ https:\/\/msdn.microsoft.com\/library\/windows\/desktop\/ms681383(v=vs.85).aspx\n\t\t\/\/ use ERROR_SERVICE_SPECIFIC_ERROR\n\t\treturn true, 1\n\t}\n\n\texit := make(chan struct{})\n\tgo func() {\n\t\terr := h.cmd.Wait()\n\t\t\/\/ enter when the child process exited\n\t\tif err != nil {\n\t\t\th.elog.Error(stopEid, err.Error())\n\t\t}\n\t\texit <- struct{}{}\n\t}()\n\n\ts <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown}\nL:\n\tfor {\n\t\tselect {\n\t\tcase req := <-r:\n\t\t\tswitch req.Cmd {\n\t\t\tcase svc.Interrogate:\n\t\t\t\ts <- req.CurrentStatus\n\t\t\tcase svc.Stop, svc.Shutdown:\n\t\t\t\ts <- svc.Status{State: svc.StopPending, Accepts: svc.AcceptStop | svc.AcceptShutdown}\n\t\t\t\tif err := h.stop(); err != nil {\n\t\t\t\t\th.elog.Error(stopEid, err.Error())\n\t\t\t\t\ts <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown}\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-exit:\n\t\t\tbreak L\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc execdir() string {\n\tvar wpath [syscall.MAX_PATH]uint16\n\tr1, _, err := procGetModuleFileName.Call(0, uintptr(unsafe.Pointer(&wpath[0])), uintptr(len(wpath)))\n\tif r1 == 0 {\n\t\tlog.Fatal(err)\n\t}\n\treturn filepath.Dir(syscall.UTF16ToString(wpath[:]))\n}\n<commit_msg>wait goroutines<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\t\"unsafe\"\n\n\t\"golang.org\/x\/sys\/windows\/svc\"\n\t\"golang.org\/x\/sys\/windows\/svc\/eventlog\"\n)\n\nconst name = \"mackerel-agent\"\n\nconst defaultEid = 1\nconst startEid = 2\nconst stopEid = 3\nconst loggerEid = 4\n\nvar (\n\tkernel32 = syscall.NewLazyDLL(\"kernel32\")\n\tprocAllocConsole = kernel32.NewProc(\"AllocConsole\")\n\tprocGenerateConsoleCtrlEvent = kernel32.NewProc(\"GenerateConsoleCtrlEvent\")\n\tprocGetModuleFileName = kernel32.NewProc(\"GetModuleFileNameW\")\n)\n\nfunc main() {\n\tif len(os.Args) == 2 {\n\t\tvar err error\n\t\tswitch os.Args[1] {\n\t\tcase \"install\":\n\t\t\terr = installService(\"mackerel-agent\", \"mackerel agent\")\n\t\tcase \"remove\":\n\t\t\terr = removeService(\"mackerel-agent\")\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\treturn\n\t}\n\n\telog, err := eventlog.Open(name)\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\tdefer elog.Close()\n\n\t\/\/ `svc.Run` blocks until windows service will stopped.\n\t\/\/ ref. https:\/\/msdn.microsoft.com\/library\/cc429362.aspx\n\terr = svc.Run(name, &handler{elog: elog})\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n}\n\ntype Logger interface {\n\tInfo(eid uint32, msg string) error\n\tWarning(eid uint32, msg string) error\n\tError(eid uint32, msg string) error\n}\n\ntype handler struct {\n\telog Logger\n\tcmd *exec.Cmd\n\tr io.Reader\n\tw io.WriteCloser\n\twg sync.WaitGroup\n}\n\n\/\/ ex.\n\/\/ verbose log: 2017\/01\/21 22:21:08 command.go:434: DEBUG <command> received 'immediate' chan\n\/\/ normal log: 2017\/01\/24 14:14:27 INFO <main> Starting mackerel-agent version:0.36.0\nvar logRe = regexp.MustCompile(`^\\d{4}\/\\d{2}\/\\d{2} \\d{2}:\\d{2}:\\d{2} (?:.+\\.go:\\d+: )?([A-Z]+) `)\n\nfunc (h *handler) start() error {\n\tprocAllocConsole.Call()\n\tdir := execdir()\n\tcmd := exec.Command(filepath.Join(dir, \"mackerel-agent.exe\"))\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCreationFlags: syscall.CREATE_NEW_PROCESS_GROUP,\n\t}\n\tcmd.Dir = dir\n\n\th.cmd = cmd\n\th.r, h.w = io.Pipe()\n\tcmd.Stderr = h.w\n\n\terr := h.cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn h.aggregate()\n}\n\nfunc (h *handler) aggregate() error {\n\tbr := bufio.NewReader(h.r)\n\tlc := make(chan string, 10)\n\tdone := make(chan struct{})\n\n\t\/\/ It need to read data from pipe continuously. And it need to close handle\n\t\/\/ when process finished.\n\t\/\/ read data from pipe. When data arrived at EOL, send line-string to the\n\t\/\/ channel. Also remaining line to EOF.\n\th.wg.Add(1)\n\tgo func() {\n\t\tdefer h.wg.Done()\n\t\tdefer h.w.Close()\n\n\t\t\/\/ pipe stderr to windows event log\n\t\tvar body bytes.Buffer\n\t\tfor {\n\t\t\tb, err := br.ReadByte()\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\th.elog.Error(loggerEid, err.Error())\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif b == '\\n' {\n\t\t\t\tif body.Len() > 0 {\n\t\t\t\t\tlc <- body.String()\n\t\t\t\t\tbody.Reset()\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbody.WriteByte(b)\n\t\t}\n\t\tif body.Len() > 0 {\n\t\t\tlc <- body.String()\n\t\t}\n\t\tdone <- struct{}{}\n\t}()\n\n\th.wg.Add(1)\n\tgo func() {\n\t\tdefer h.wg.Done()\n\n\t\tlinebuf := []string{}\n\tloop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase line := <-lc:\n\t\t\t\tlinebuf = append(linebuf, line)\n\t\t\tcase <-time.After(10 * time.Millisecond):\n\t\t\t\t\/\/ When it take 10ms, it is located at end of paragraph. Then\n\t\t\t\t\/\/ slice appended at above should be the paragraph.\n\t\t\t\tif len(linebuf) > 0 {\n\t\t\t\t\tif match := logRe.FindStringSubmatch(linebuf[0]); match != nil {\n\t\t\t\t\t\tline := strings.Join(linebuf, \"\\n\")\n\t\t\t\t\t\tlevel := match[1]\n\t\t\t\t\t\tswitch level {\n\t\t\t\t\t\tcase \"TRACE\", \"DEBUG\", \"INFO\":\n\t\t\t\t\t\t\th.elog.Info(defaultEid, line)\n\t\t\t\t\t\tcase \"WARNING\":\n\t\t\t\t\t\t\th.elog.Warning(defaultEid, line)\n\t\t\t\t\t\tcase \"ERROR\", \"CRITICAL\":\n\t\t\t\t\t\t\th.elog.Error(defaultEid, line)\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\th.elog.Error(defaultEid, line)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tlinebuf = nil\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase <-done:\n\t\t\t\t\tbreak loop\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tclose(lc)\n\t\tclose(done)\n\t}()\n\n\treturn nil\n}\n\nfunc interrupt(p *os.Process) error {\n\tr1, _, err := procGenerateConsoleCtrlEvent.Call(syscall.CTRL_BREAK_EVENT, uintptr(p.Pid))\n\tif r1 == 0 {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (h *handler) stop() error {\n\tif h.cmd != nil && h.cmd.Process != nil {\n\t\terr := interrupt(h.cmd.Process)\n\t\tif err == nil {\n\t\t\tend := time.Now().Add(10 * time.Second)\n\t\t\tfor time.Now().Before(end) {\n\t\t\t\tif h.cmd.ProcessState != nil && h.cmd.ProcessState.Exited() {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\t\t}\n\t\treturn h.cmd.Process.Kill()\n\t}\n\n\th.wg.Wait()\n\treturn nil\n}\n\n\/\/ implement https:\/\/godoc.org\/golang.org\/x\/sys\/windows\/svc#Handler\nfunc (h *handler) Execute(args []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (svcSpecificEC bool, exitCode uint32) {\n\ts <- svc.Status{State: svc.StartPending}\n\tdefer func() {\n\t\ts <- svc.Status{State: svc.Stopped}\n\t}()\n\n\tif err := h.start(); err != nil {\n\t\th.elog.Error(startEid, err.Error())\n\t\t\/\/ https:\/\/msdn.microsoft.com\/library\/windows\/desktop\/ms681383(v=vs.85).aspx\n\t\t\/\/ use ERROR_SERVICE_SPECIFIC_ERROR\n\t\treturn true, 1\n\t}\n\n\texit := make(chan struct{})\n\tgo func() {\n\t\terr := h.cmd.Wait()\n\t\t\/\/ enter when the child process exited\n\t\tif err != nil {\n\t\t\th.elog.Error(stopEid, err.Error())\n\t\t}\n\t\texit <- struct{}{}\n\t}()\n\n\ts <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown}\nL:\n\tfor {\n\t\tselect {\n\t\tcase req := <-r:\n\t\t\tswitch req.Cmd {\n\t\t\tcase svc.Interrogate:\n\t\t\t\ts <- req.CurrentStatus\n\t\t\tcase svc.Stop, svc.Shutdown:\n\t\t\t\ts <- svc.Status{State: svc.StopPending, Accepts: svc.AcceptStop | svc.AcceptShutdown}\n\t\t\t\tif err := h.stop(); err != nil {\n\t\t\t\t\th.elog.Error(stopEid, err.Error())\n\t\t\t\t\ts <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown}\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-exit:\n\t\t\tbreak L\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc execdir() string {\n\tvar wpath [syscall.MAX_PATH]uint16\n\tr1, _, err := procGetModuleFileName.Call(0, uintptr(unsafe.Pointer(&wpath[0])), uintptr(len(wpath)))\n\tif r1 == 0 {\n\t\tlog.Fatal(err)\n\t}\n\treturn filepath.Dir(syscall.UTF16ToString(wpath[:]))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\n\/\/ XMl Structs\ntype Tag struct {\n\tEnumID int `xml:\"enumID,attr\"`\n\tType string `xml:\"type,attr\"`\n\tValue int `xml:\"value,attr\"`\n\tStringValue string `xml:\",chardata\"` \/\/populated when Type==\"string\"\n}\n\ntype Entity struct {\n\tVersion string `xml:\"version,attr\"`\n\tCardID string `xml:\"CardID,attr\"`\n\tTags []Tag `xml:\"Tag\"`\n}\n\ntype CardDefs struct {\n\tEntities []Entity `xml:\"Entity\"`\n}\n\n\/\/ Conversion tables, credit to https:\/\/github.com\/Sembiance\/hearthstonejson\nvar EnumIDToString = map[int]string{\n\t185: \"CardName\",\n\t183: \"CardSet\",\n\t202: \"CardType\",\n\t201: \"Faction\",\n\t199: \"Class\",\n\t203: \"Rarity\",\n\t48: \"Cost\",\n\t251: \"AttackVisualType\",\n\t184: \"CardTextInHand\",\n\t47: \"Atk\",\n\t45: \"Health\",\n\t321: \"Collectible\",\n\t342: \"ArtistName\",\n\t351: \"FlavorText\",\n\t32: \"TriggerVisual\",\n\t330: \"EnchantmentBirthVisual\",\n\t331: \"EnchantmentIdleVisual\",\n\t268: \"DevState\",\n\t365: \"HowToGetThisGoldCard\",\n\t190: \"Taunt\",\n\t364: \"HowToGetThisCard\",\n\t338: \"OneTurnEffect\",\n\t293: \"Morph\",\n\t208: \"Freeze\",\n\t252: \"CardTextInPlay\",\n\t325: \"TargetingArrowText\",\n\t189: \"Windfury\",\n\t218: \"Battlecry\",\n\t200: \"Race\",\n\t192: \"Spellpower\",\n\t187: \"Durability\",\n\t197: \"Charge\",\n\t362: \"Aura\",\n\t361: \"HealTarget\",\n\t349: \"ImmuneToSpellpower\",\n\t194: \"Divine Shield\",\n\t350: \"AdjacentBuff\",\n\t217: \"Deathrattle\",\n\t191: \"Stealth\",\n\t220: \"Combo\",\n\t339: \"Silence\",\n\t212: \"Enrage\",\n\t370: \"AffectedBySpellPower\",\n\t240: \"Cant Be Damaged\",\n\t114: \"Elite\",\n\t219: \"Secret\",\n\t363: \"Poisonous\",\n\t215: \"Recall\",\n\t340: \"Counter\",\n\t205: \"Summoned\",\n\t367: \"AIMustPlay\",\n\t335: \"InvisibleDeathrattle\",\n\t377: \"UKNOWN_HasOnDrawEffect\",\n\t388: \"SparePart\",\n\t389: \"UNKNOWN_DuneMaulShaman\",\n\t380: \"UNKNOWN_Blackrock_Heroes\",\n\t396: \"UNKNOWN_Grand_Tournement_Fallen_Hero\",\n\t401: \"UNKNOWN_BroodAffliction\",\n\t402: \"UNKNOWN_Intense_Gaze\",\n\t403: \"Inspire\",\n\t404: \"UNKNOWN_Grand_Tournament_Arcane_Blast\",\n}\n\nvar CardSetIDToString = map[int]string{\n\t2: \"Basic\",\n\t3: \"Classic\",\n\t4: \"Reward\",\n\t5: \"Missions\",\n\t7: \"System\",\n\t8: \"Debug\",\n\t11: \"Promotion\",\n\t12: \"Curse of Naxxramas\",\n\t13: \"Goblins vs Gnomes\",\n\t14: \"Blackrock Mountain\",\n\t15: \"The Grand Tournament\",\n\t16: \"Credits\",\n\t17: \"Hero Skins\",\n\t18: \"Tavern Brawl\",\n}\n\nvar CardTypeIDToString = map[int]string{\n\t3: \"Hero\",\n\t4: \"Minion\",\n\t5: \"Spell\",\n\t6: \"Enchantment\",\n\t7: \"Weapon\",\n\t10: \"Hero Power\",\n}\n\nvar RarityIDToString = map[int]string{\n\t0: \"undefined\",\n\t1: \"Common\",\n\t2: \"Free\",\n\t3: \"Rare\",\n\t4: \"Epic\",\n\t5: \"Legendary\",\n}\n\nvar CardRaceIDToString = map[int]string{\n\t14: \"Murloc\",\n\t15: \"Demon\",\n\t20: \"Beast\",\n\t21: \"Totem\",\n\t23: \"Pirate\",\n\t24: \"Dragon\",\n\t17: \"Mech\",\n}\n\nvar ClassIDToString = map[int]string{\n\t0: \"undefined\",\n\t2: \"Druid\",\n\t3: \"Hunter\",\n\t4: \"Mage\",\n\t5: \"Paladin\",\n\t6: \"Priest\",\n\t7: \"Rogue\",\n\t8: \"Shaman\",\n\t9: \"Warlock\",\n\t10: \"Warrior\",\n\t11: \"Dream\",\n}\n\nvar FactionIDToString = map[int]string{\n\t1: \"Horde\",\n\t2: \"Alliance\",\n\t3: \"Neutral\",\n}\n\nvar Mechanics = []string{\n\t\"Windfury\", \"Combo\", \"Secret\", \"Battlecry\", \"Deathrattle\",\n\t\"Taunt\", \"Stealth\", \"Spellpower\", \"Enrage\", \"Freeze\",\n\t\"Charge\", \"Overload\", \"Divine Shield\", \"Silence\", \"Morph\",\n\t\"OneTurnEffect\", \"Poisonous\", \"Aura\", \"AdjacentBuff\",\n\t\"HealTarget\", \"GrantCharge\", \"ImmuneToSpellpower\",\n\t\"AffectedBySpellPower\", \"Summoned\", \"Inspire\",\n}\n\nfunc IsMechanic(s string) bool {\n\tfor _, mechanic := range Mechanics {\n\t\tif strings.ToLower(mechanic) == strings.ToLower(s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ JSON Card struct\ntype JsonCard struct {\n\tName string `json:\"name\"`\n\tID string `json:\"id\"`\n\tAttack int `json:\"attack,omitempty\"`\n\tHealth int `json:\"health,omitempty\"`\n\tCost int `json:\"cost,omitempty\"`\n\tDurability int `json:\"durability,omitempty\"`\n\tType string `json:\"type\"`\n\tRarity string `json:\"rarity\"`\n\tText string `json:\"text,omitempty\"`\n\tInPlayText string `json:\"inPlayText,omitempty\"`\n\tFlavor string `json:\"flavor,omitempty\"`\n\tSet string `json:\"set\"`\n\tClass string `json:\"playerClass,omitempty\"`\n\tRace string `json:\"race,omitempty\"`\n\tMechanics []string `json:\"mechanics,omitempty\"`\n\tFaction string `json:\"faction,omitempty\"`\n\tArtist string `json:\"artist,omitempty\"`\n\tCollectible bool `json:\"collectible\"`\n\tElite bool `json:\"elite,omitempty\"`\n\tHowToGet string `json:\"howToGet,omitempty\"`\n\tHowToGetGold string `json:\"howToGetGold,omitempty\"`\n}\n\nfunc EntityToJson(e Entity) JsonCard {\n\tvar card JsonCard\n\tcard.ID = e.CardID\n\tfor _, tag := range e.Tags {\n\t\tenum := EnumIDToString[tag.EnumID]\n\t\tswitch {\n\t\tcase enum == \"CardName\":\n\t\t\tcard.Name = tag.StringValue\n\t\tcase enum == \"CardSet\":\n\t\t\tcard.Set = CardSetIDToString[tag.Value]\n\t\tcase enum == \"CardType\":\n\t\t\tcard.Type = CardTypeIDToString[tag.Value]\n\t\tcase enum == \"CardRace\":\n\t\t\tcard.Race = CardRaceIDToString[tag.Value]\n\t\tcase enum == \"Class\":\n\t\t\tcard.Class = ClassIDToString[tag.Value]\n\t\tcase enum == \"Rarity\":\n\t\t\tcard.Rarity = RarityIDToString[tag.Value]\n\t\tcase enum == \"Faction\":\n\t\t\tcard.Faction = FactionIDToString[tag.Value]\n\t\tcase enum == \"Atk\":\n\t\t\tcard.Attack = tag.Value\n\t\tcase enum == \"Health\":\n\t\t\tcard.Health = tag.Value\n\t\tcase enum == \"Durability\":\n\t\t\tcard.Durability = tag.Value\n\t\tcase enum == \"Cost\":\n\t\t\tcard.Cost = tag.Value\n\t\tcase enum == \"CardTextInHand\":\n\t\t\tcard.Text = tag.StringValue\n\t\tcase enum == \"CardTextInPlay\":\n\t\t\tcard.InPlayText = tag.StringValue\n\t\tcase enum == \"FlavorText\":\n\t\t\tcard.Flavor = tag.StringValue\n\t\tcase enum == \"Collectible\":\n\t\t\tcard.Collectible = tag.Value == 1\n\t\tcase enum == \"ArtistName\":\n\t\t\tcard.Artist = tag.StringValue\n\t\tcase enum == \"Elite\":\n\t\t\tcard.Elite = tag.Value == 1\n\t\tcase enum == \"HowToGetThisCard\":\n\t\t\tcard.HowToGet = tag.StringValue\n\t\tcase enum == \"HowToGetThisGoldCard\":\n\t\t\tcard.HowToGetGold = tag.StringValue\n\t\tcase IsMechanic(enum):\n\t\t\tcard.Mechanics = append(card.Mechanics, enum)\n\t\t}\n\n\t}\n\tif len(card.Class) == 0 {\n\t\tcard.Class = \"Neutral\"\n\t}\n\treturn card\n}\n\nfunc PrintUsageAndExit() {\n\tfmt.Println(\"Transform a Hearthstone xml file extracted from cardxml0.unity3d to JSON\")\n\tfmt.Println(\"Usage: hearthstone-json path\/to\/enUS.txt\")\n\tos.Exit(1)\n}\n\nfunc main() {\n\t\/\/ Get input from stdin or from an xml file supplied as the first\n\t\/\/ positional argument\n\tinput := os.Stdin\n\tif len(os.Args) > 1 {\n\t\targ := os.Args[1]\n\t\t\/\/ Handle help -- just in case!\n\t\tif arg == \"-h\" || arg == \"--help\" {\n\t\t\tPrintUsageAndExit()\n\t\t}\n\t\t\/\/ Open the xml file\n\t\txmlPath, err := filepath.Abs(arg)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tPrintUsageAndExit()\n\t\t}\n\t\tinput, err = os.Open(xmlPath)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tPrintUsageAndExit()\n\t\t}\n\t}\n\tdefer input.Close()\n\t\/\/ Decode the XML\n\tvar cardDefs CardDefs\n\tif err := xml.NewDecoder(input).Decode(&cardDefs); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\t\/\/ Transform the data\n\tvar jsonCards []JsonCard\n\tfor _, entity := range cardDefs.Entities {\n\t\tjsonCards = append(jsonCards, EntityToJson(entity))\n\t}\n\t\/\/ Output JSON!\n\tjsonData, err := json.MarshalIndent(jsonCards, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\toutput := os.Stdout\n\tdefer output.Close()\n\toutput.Write(jsonData)\n}\n<commit_msg>Add The League of Explorers card set<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\n\/\/ XMl Structs\ntype Tag struct {\n\tEnumID int `xml:\"enumID,attr\"`\n\tType string `xml:\"type,attr\"`\n\tValue int `xml:\"value,attr\"`\n\tStringValue string `xml:\",chardata\"` \/\/populated when Type==\"string\"\n}\n\ntype Entity struct {\n\tVersion string `xml:\"version,attr\"`\n\tCardID string `xml:\"CardID,attr\"`\n\tTags []Tag `xml:\"Tag\"`\n}\n\ntype CardDefs struct {\n\tEntities []Entity `xml:\"Entity\"`\n}\n\n\/\/ Conversion tables, credit to https:\/\/github.com\/Sembiance\/hearthstonejson\nvar EnumIDToString = map[int]string{\n\t185: \"CardName\",\n\t183: \"CardSet\",\n\t202: \"CardType\",\n\t201: \"Faction\",\n\t199: \"Class\",\n\t203: \"Rarity\",\n\t48: \"Cost\",\n\t251: \"AttackVisualType\",\n\t184: \"CardTextInHand\",\n\t47: \"Atk\",\n\t45: \"Health\",\n\t321: \"Collectible\",\n\t342: \"ArtistName\",\n\t351: \"FlavorText\",\n\t32: \"TriggerVisual\",\n\t330: \"EnchantmentBirthVisual\",\n\t331: \"EnchantmentIdleVisual\",\n\t268: \"DevState\",\n\t365: \"HowToGetThisGoldCard\",\n\t190: \"Taunt\",\n\t364: \"HowToGetThisCard\",\n\t338: \"OneTurnEffect\",\n\t293: \"Morph\",\n\t208: \"Freeze\",\n\t252: \"CardTextInPlay\",\n\t325: \"TargetingArrowText\",\n\t189: \"Windfury\",\n\t218: \"Battlecry\",\n\t200: \"Race\",\n\t192: \"Spellpower\",\n\t187: \"Durability\",\n\t197: \"Charge\",\n\t362: \"Aura\",\n\t361: \"HealTarget\",\n\t349: \"ImmuneToSpellpower\",\n\t194: \"Divine Shield\",\n\t350: \"AdjacentBuff\",\n\t217: \"Deathrattle\",\n\t191: \"Stealth\",\n\t220: \"Combo\",\n\t339: \"Silence\",\n\t212: \"Enrage\",\n\t370: \"AffectedBySpellPower\",\n\t240: \"Cant Be Damaged\",\n\t114: \"Elite\",\n\t219: \"Secret\",\n\t363: \"Poisonous\",\n\t215: \"Recall\",\n\t340: \"Counter\",\n\t205: \"Summoned\",\n\t367: \"AIMustPlay\",\n\t335: \"InvisibleDeathrattle\",\n\t377: \"UKNOWN_HasOnDrawEffect\",\n\t388: \"SparePart\",\n\t389: \"UNKNOWN_DuneMaulShaman\",\n\t380: \"UNKNOWN_Blackrock_Heroes\",\n\t396: \"UNKNOWN_Grand_Tournement_Fallen_Hero\",\n\t401: \"UNKNOWN_BroodAffliction\",\n\t402: \"UNKNOWN_Intense_Gaze\",\n\t403: \"Inspire\",\n\t404: \"UNKNOWN_Grand_Tournament_Arcane_Blast\",\n}\n\nvar CardSetIDToString = map[int]string{\n\t2: \"Basic\",\n\t3: \"Classic\",\n\t4: \"Reward\",\n\t5: \"Missions\",\n\t7: \"System\",\n\t8: \"Debug\",\n\t11: \"Promotion\",\n\t12: \"Curse of Naxxramas\",\n\t13: \"Goblins vs Gnomes\",\n\t14: \"Blackrock Mountain\",\n\t15: \"The Grand Tournament\",\n\t16: \"Credits\",\n\t17: \"Hero Skins\",\n\t18: \"Tavern Brawl\",\n\t20: \"The League of Explorers\",\n}\n\nvar CardTypeIDToString = map[int]string{\n\t3: \"Hero\",\n\t4: \"Minion\",\n\t5: \"Spell\",\n\t6: \"Enchantment\",\n\t7: \"Weapon\",\n\t10: \"Hero Power\",\n}\n\nvar RarityIDToString = map[int]string{\n\t0: \"undefined\",\n\t1: \"Common\",\n\t2: \"Free\",\n\t3: \"Rare\",\n\t4: \"Epic\",\n\t5: \"Legendary\",\n}\n\nvar CardRaceIDToString = map[int]string{\n\t14: \"Murloc\",\n\t15: \"Demon\",\n\t20: \"Beast\",\n\t21: \"Totem\",\n\t23: \"Pirate\",\n\t24: \"Dragon\",\n\t17: \"Mech\",\n}\n\nvar ClassIDToString = map[int]string{\n\t0: \"undefined\",\n\t2: \"Druid\",\n\t3: \"Hunter\",\n\t4: \"Mage\",\n\t5: \"Paladin\",\n\t6: \"Priest\",\n\t7: \"Rogue\",\n\t8: \"Shaman\",\n\t9: \"Warlock\",\n\t10: \"Warrior\",\n\t11: \"Dream\",\n}\n\nvar FactionIDToString = map[int]string{\n\t1: \"Horde\",\n\t2: \"Alliance\",\n\t3: \"Neutral\",\n}\n\nvar Mechanics = []string{\n\t\"Windfury\", \"Combo\", \"Secret\", \"Battlecry\", \"Deathrattle\",\n\t\"Taunt\", \"Stealth\", \"Spellpower\", \"Enrage\", \"Freeze\",\n\t\"Charge\", \"Overload\", \"Divine Shield\", \"Silence\", \"Morph\",\n\t\"OneTurnEffect\", \"Poisonous\", \"Aura\", \"AdjacentBuff\",\n\t\"HealTarget\", \"GrantCharge\", \"ImmuneToSpellpower\",\n\t\"AffectedBySpellPower\", \"Summoned\", \"Inspire\",\n}\n\nfunc IsMechanic(s string) bool {\n\tfor _, mechanic := range Mechanics {\n\t\tif strings.ToLower(mechanic) == strings.ToLower(s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ JSON Card struct\ntype JsonCard struct {\n\tName string `json:\"name\"`\n\tID string `json:\"id\"`\n\tAttack int `json:\"attack,omitempty\"`\n\tHealth int `json:\"health,omitempty\"`\n\tCost int `json:\"cost,omitempty\"`\n\tDurability int `json:\"durability,omitempty\"`\n\tType string `json:\"type\"`\n\tRarity string `json:\"rarity\"`\n\tText string `json:\"text,omitempty\"`\n\tInPlayText string `json:\"inPlayText,omitempty\"`\n\tFlavor string `json:\"flavor,omitempty\"`\n\tSet string `json:\"set\"`\n\tClass string `json:\"playerClass,omitempty\"`\n\tRace string `json:\"race,omitempty\"`\n\tMechanics []string `json:\"mechanics,omitempty\"`\n\tFaction string `json:\"faction,omitempty\"`\n\tArtist string `json:\"artist,omitempty\"`\n\tCollectible bool `json:\"collectible\"`\n\tElite bool `json:\"elite,omitempty\"`\n\tHowToGet string `json:\"howToGet,omitempty\"`\n\tHowToGetGold string `json:\"howToGetGold,omitempty\"`\n}\n\nfunc EntityToJson(e Entity) JsonCard {\n\tvar card JsonCard\n\tcard.ID = e.CardID\n\tfor _, tag := range e.Tags {\n\t\tenum := EnumIDToString[tag.EnumID]\n\t\tswitch {\n\t\tcase enum == \"CardName\":\n\t\t\tcard.Name = tag.StringValue\n\t\tcase enum == \"CardSet\":\n\t\t\tcard.Set = CardSetIDToString[tag.Value]\n\t\tcase enum == \"CardType\":\n\t\t\tcard.Type = CardTypeIDToString[tag.Value]\n\t\tcase enum == \"CardRace\":\n\t\t\tcard.Race = CardRaceIDToString[tag.Value]\n\t\tcase enum == \"Class\":\n\t\t\tcard.Class = ClassIDToString[tag.Value]\n\t\tcase enum == \"Rarity\":\n\t\t\tcard.Rarity = RarityIDToString[tag.Value]\n\t\tcase enum == \"Faction\":\n\t\t\tcard.Faction = FactionIDToString[tag.Value]\n\t\tcase enum == \"Atk\":\n\t\t\tcard.Attack = tag.Value\n\t\tcase enum == \"Health\":\n\t\t\tcard.Health = tag.Value\n\t\tcase enum == \"Durability\":\n\t\t\tcard.Durability = tag.Value\n\t\tcase enum == \"Cost\":\n\t\t\tcard.Cost = tag.Value\n\t\tcase enum == \"CardTextInHand\":\n\t\t\tcard.Text = tag.StringValue\n\t\tcase enum == \"CardTextInPlay\":\n\t\t\tcard.InPlayText = tag.StringValue\n\t\tcase enum == \"FlavorText\":\n\t\t\tcard.Flavor = tag.StringValue\n\t\tcase enum == \"Collectible\":\n\t\t\tcard.Collectible = tag.Value == 1\n\t\tcase enum == \"ArtistName\":\n\t\t\tcard.Artist = tag.StringValue\n\t\tcase enum == \"Elite\":\n\t\t\tcard.Elite = tag.Value == 1\n\t\tcase enum == \"HowToGetThisCard\":\n\t\t\tcard.HowToGet = tag.StringValue\n\t\tcase enum == \"HowToGetThisGoldCard\":\n\t\t\tcard.HowToGetGold = tag.StringValue\n\t\tcase IsMechanic(enum):\n\t\t\tcard.Mechanics = append(card.Mechanics, enum)\n\t\t}\n\n\t}\n\tif len(card.Class) == 0 {\n\t\tcard.Class = \"Neutral\"\n\t}\n\treturn card\n}\n\nfunc PrintUsageAndExit() {\n\tfmt.Println(\"Transform a Hearthstone xml file extracted from cardxml0.unity3d to JSON\")\n\tfmt.Println(\"Usage: hearthstone-json path\/to\/enUS.txt\")\n\tos.Exit(1)\n}\n\nfunc main() {\n\t\/\/ Get input from stdin or from an xml file supplied as the first\n\t\/\/ positional argument\n\tinput := os.Stdin\n\tif len(os.Args) > 1 {\n\t\targ := os.Args[1]\n\t\t\/\/ Handle help -- just in case!\n\t\tif arg == \"-h\" || arg == \"--help\" {\n\t\t\tPrintUsageAndExit()\n\t\t}\n\t\t\/\/ Open the xml file\n\t\txmlPath, err := filepath.Abs(arg)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tPrintUsageAndExit()\n\t\t}\n\t\tinput, err = os.Open(xmlPath)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tPrintUsageAndExit()\n\t\t}\n\t}\n\tdefer input.Close()\n\t\/\/ Decode the XML\n\tvar cardDefs CardDefs\n\tif err := xml.NewDecoder(input).Decode(&cardDefs); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\t\/\/ Transform the data\n\tvar jsonCards []JsonCard\n\tfor _, entity := range cardDefs.Entities {\n\t\tjsonCards = append(jsonCards, EntityToJson(entity))\n\t}\n\t\/\/ Output JSON!\n\tjsonData, err := json.MarshalIndent(jsonCards, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\toutput := os.Stdout\n\tdefer output.Close()\n\toutput.Write(jsonData)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\tjwtmiddleware \"github.com\/auth0\/go-jwt-middleware\"\n\t\"github.com\/form3tech-oss\/jwt-go\"\n\t\"github.com\/go-martini\/martini\"\n)\n\nfunc main() {\n\n\tStartServer()\n\n}\n\nfunc StartServer() {\n\tm := martini.Classic()\n\n\tjwtMiddleware := jwtmiddleware.New(jwtmiddleware.Options{\n\t\tValidationKeyGetter: func(token *jwt.Token) (interface{}, error) {\n\t\t\treturn []byte(\"My Secret\"), nil\n\t\t},\n\t\tSigningMethod: jwt.SigningMethodHS256,\n\t})\n\n\tm.Get(\"\/ping\", PingHandler)\n\tm.Get(\"\/secured\/ping\", jwtMiddleware.CheckJWT, SecuredPingHandler)\n\n\tm.Run()\n}\n\ntype Response struct {\n\tText string `json:\"text\"`\n}\n\nfunc respondJSON(text string, w http.ResponseWriter) {\n\tresponse := Response{text}\n\n\tjsonResponse, err := json.Marshal(response)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(jsonResponse)\n}\n\nfunc PingHandler(w http.ResponseWriter, r *http.Request) {\n\trespondJSON(\"All good. You don't need to be authenticated to call this\", w)\n}\n\nfunc SecuredPingHandler(w http.ResponseWriter, r *http.Request) {\n\trespondJSON(\"All good. You only get this message if you're authenticated\", w)\n}\n<commit_msg>Update how to handle jwtMiddleware in Martini (#78)<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\tjwtmiddleware \"github.com\/auth0\/go-jwt-middleware\"\n\t\"github.com\/form3tech-oss\/jwt-go\"\n\t\"github.com\/go-martini\/martini\"\n)\n\nfunc main() {\n\n\tStartServer()\n\n}\n\nfunc StartServer() {\n\tm := martini.Classic()\n\n\tjwtMiddleware := jwtmiddleware.New(jwtmiddleware.Options{\n\t\tValidationKeyGetter: func(token *jwt.Token) (interface{}, error) {\n\t\t\treturn []byte(\"My Secret\"), nil\n\t\t},\n\t\tSigningMethod: jwt.SigningMethodHS256,\n\t})\n\n\tm.Get(\"\/ping\", PingHandler)\n\tm.Get(\"\/secured\/ping\", func(w http.ResponseWriter, r *http.Request) {\n\t\tjwtMiddleware.CheckJWT(w, r)\n\t}, SecuredPingHandler)\n\n\tm.Run()\n}\n\ntype Response struct {\n\tText string `json:\"text\"`\n}\n\nfunc respondJSON(text string, w http.ResponseWriter) {\n\tresponse := Response{text}\n\n\tjsonResponse, err := json.Marshal(response)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(jsonResponse)\n}\n\nfunc PingHandler(w http.ResponseWriter, r *http.Request) {\n\trespondJSON(\"All good. You don't need to be authenticated to call this\", w)\n}\n\nfunc SecuredPingHandler(w http.ResponseWriter, r *http.Request) {\n\trespondJSON(\"All good. You only get this message if you're authenticated\", w)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cache\n\nimport (\n\t\"container\/list\"\n\t\"sync\"\n)\n\n\/\/ Create a cache that holds the given number of items, evicting the least\n\/\/ recently used item when more space is needed.\nfunc NewLruCache(capacity uint) Cache {\n\treturn nil\n}\n\ntype lruCache struct {\n\tmutex sync.RWMutex\n\tcapacity uint\n\n\t\/\/ List of elements, with least recently used at the tail.\n\telems list.List\n\n\t\/\/ Index int `elems` for lookup by key.\n\tindex map[string]*list.Element\n}\n\nfunc (c *lruCache) Insert(key string, value interface{}) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\t\/\/ Make sure the key isn't already present.\n\tc.erase_Locked(key)\n\n\t\/\/ Add a list element and index it.\n\telem := c.elems.PushFront(value)\n\tc.index[key] = elem\n}\n\nfunc (c *lruCache) erase_Locked(key string) {\n\telem, ok := c.index[key]\n\tif !ok {\n\t\treturn\n\t}\n\n\tdelete(c.index, key)\n\tc.elems.Remove(elem)\n}\n\nfunc (c *lruCache) LookUp(key string) interface{} {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\tif elem, ok := c.index[key]; ok {\n\t\treturn elem.Value\n\t}\n\n\treturn nil\n}\n<commit_msg>Implemented NewLruCache.<commit_after>\/\/ Copyright 2013 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cache\n\nimport (\n\t\"container\/list\"\n\t\"sync\"\n)\n\n\/\/ Create a cache that holds the given number of items, evicting the least\n\/\/ recently used item when more space is needed.\nfunc NewLruCache(capacity uint) Cache {\n\treturn &lruCache{capacity: capacity, index: make(map[string]*list.Element)}\n}\n\ntype lruCache struct {\n\tmutex sync.RWMutex\n\tcapacity uint\n\n\t\/\/ List of elements, with least recently used at the tail.\n\telems list.List\n\n\t\/\/ Index int `elems` for lookup by key.\n\tindex map[string]*list.Element\n}\n\nfunc (c *lruCache) Insert(key string, value interface{}) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\t\/\/ Make sure the key isn't already present.\n\tc.erase_Locked(key)\n\n\t\/\/ Add a list element and index it.\n\telem := c.elems.PushFront(value)\n\tc.index[key] = elem\n}\n\nfunc (c *lruCache) erase_Locked(key string) {\n\telem, ok := c.index[key]\n\tif !ok {\n\t\treturn\n\t}\n\n\tdelete(c.index, key)\n\tc.elems.Remove(elem)\n}\n\nfunc (c *lruCache) LookUp(key string) interface{} {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\tif elem, ok := c.index[key]; ok {\n\t\treturn elem.Value\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.net\/websocket\"\n\t\"github.com\/coreos\/go-etcd\/etcd\"\n\t\"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/go-martini\/martini\"\n)\n\nvar debugMode bool\n\nfunc debug(v ...interface{}) {\n\tif debugMode {\n\t\tlog.Println(v...)\n\t}\n}\n\nfunc assert(err error, context string) {\n\tif err != nil {\n\t\tlog.Fatal(context+\": \", err)\n\t}\n}\n\nfunc getopt(name, dfault string) string {\n\tvalue := os.Getenv(name)\n\tif value == \"\" {\n\t\tvalue = dfault\n\t}\n\treturn value\n}\n\ntype Colorizer map[string]int\n\n\/\/ returns up to 14 color escape codes (then repeats) for each unique key\nfunc (c Colorizer) Get(key string) string {\n\ti, exists := c[key]\n\tif !exists {\n\t\tc[key] = len(c)\n\t\ti = c[key]\n\t}\n\tbright := \"1;\"\n\tif i%14 > 6 {\n\t\tbright = \"\"\n\t}\n\treturn \"\\x1b[\" + bright + \"3\" + strconv.Itoa(7-(i%7)) + \"m\"\n}\n\nfunc syslogStreamer(target Target, types []string, logstream chan *Log) {\n\ttypestr := \",\" + strings.Join(types, \",\") + \",\"\n\tfor logline := range logstream {\n\t\tif typestr != \",,\" && !strings.Contains(typestr, logline.Type) {\n\t\t\tcontinue\n\t\t}\n\t\ttag, pid := getLogName(logline.Name)\n\t\tconn, err := net.Dial(\"udp\", target.Addr)\n\t\tassert(err, \"syslog\")\n\t\t\/\/ HACK: Go's syslog package hardcodes the log format, so let's send our own message\n\t\t_, err = fmt.Fprintf(conn,\n\t\t\t\"%s %s[%s]: %s\",\n\t\t\ttime.Now().Format(\"2006-01-02 15:04:05\"),\n\t\t\ttag,\n\t\t\tpid,\n\t\t\tlogline.Data)\n\t\tassert(err, \"syslog\")\n\t}\n}\n\n\/\/ getLogName returns a custom tag and PID for containers that\n\/\/ match Deis' specific application name format. Otherwise,\n\/\/ it returns the original name and 1 as the PID.\nfunc getLogName(name string) (string, string) {\n\t\/\/ example regex that should match: go_v2.web.1\n\tr := regexp.MustCompile(`(^[a-z0-9-]+)_(v[0-9]+)\\.([a-z-_]+\\.[0-9]+)$`)\n\tmatch := r.FindStringSubmatch(name)\n\tif match == nil {\n\t\treturn name, \"1\"\n\t} else {\n\t\treturn match[1], match[3]\n\t}\n}\n\nfunc websocketStreamer(w http.ResponseWriter, req *http.Request, logstream chan *Log, closer chan bool) {\n\twebsocket.Handler(func(conn *websocket.Conn) {\n\t\tfor logline := range logstream {\n\t\t\tif req.URL.Query().Get(\"type\") != \"\" && logline.Type != req.URL.Query().Get(\"type\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t_, err := conn.Write(append(marshal(logline), '\\n'))\n\t\t\tif err != nil {\n\t\t\t\tcloser <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}).ServeHTTP(w, req)\n}\n\nfunc httpStreamer(w http.ResponseWriter, req *http.Request, logstream chan *Log, multi bool) {\n\tvar colors Colorizer\n\tvar usecolor, usejson bool\n\tnameWidth := 16\n\tif req.URL.Query().Get(\"colors\") != \"off\" {\n\t\tcolors = make(Colorizer)\n\t\tusecolor = true\n\t}\n\tif req.Header.Get(\"Accept\") == \"application\/json\" {\n\t\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\t\tusejson = true\n\t} else {\n\t\tw.Header().Add(\"Content-Type\", \"text\/plain\")\n\t}\n\tfor logline := range logstream {\n\t\tif req.URL.Query().Get(\"types\") != \"\" && logline.Type != req.URL.Query().Get(\"types\") {\n\t\t\tcontinue\n\t\t}\n\t\tif usejson {\n\t\t\tw.Write(append(marshal(logline), '\\n'))\n\t\t} else {\n\t\t\tif multi {\n\t\t\t\tif len(logline.Name) > nameWidth {\n\t\t\t\t\tnameWidth = len(logline.Name)\n\t\t\t\t}\n\t\t\t\tif usecolor {\n\t\t\t\t\tw.Write([]byte(fmt.Sprintf(\n\t\t\t\t\t\t\"%s%\"+strconv.Itoa(nameWidth)+\"s|%s\\x1b[0m\\n\",\n\t\t\t\t\t\tcolors.Get(logline.Name), logline.Name, logline.Data,\n\t\t\t\t\t)))\n\t\t\t\t} else {\n\t\t\t\t\tw.Write([]byte(fmt.Sprintf(\n\t\t\t\t\t\t\"%\"+strconv.Itoa(nameWidth)+\"s|%s\\n\", logline.Name, logline.Data,\n\t\t\t\t\t)))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tw.Write(append([]byte(logline.Data), '\\n'))\n\t\t\t}\n\t\t}\n\t\tw.(http.Flusher).Flush()\n\t}\n}\n\nfunc main() {\n\tdebugMode = getopt(\"DEBUG\", \"\") != \"\"\n\tport := getopt(\"PORT\", \"8000\")\n\tendpoint := getopt(\"DOCKER_HOST\", \"unix:\/\/\/var\/run\/docker.sock\")\n\troutespath := getopt(\"ROUTESPATH\", \"\/var\/lib\/logspout\")\n\n\tclient, err := docker.NewClient(endpoint)\n\tassert(err, \"docker\")\n\tattacher := NewAttachManager(client)\n\trouter := NewRouteManager(attacher)\n\n\t\/\/ HACK: if we are connecting to etcd, get the logger's connection\n\t\/\/ details from there\n\tif etcdHost := os.Getenv(\"ETCD_HOST\"); etcdHost != \"\" {\n\t\tconnectionString := []string{\"http:\/\/\" + etcdHost + \":4001\"}\n\t\tdebug(\"etcd:\", connectionString[0])\n\t\tetcd := etcd.NewClient(connectionString)\n\t\tetcd.SetDialTimeout(3 * time.Second)\n\t\thostResp, err := etcd.Get(\"\/deis\/logs\/host\", false, false)\n\t\tassert(err, \"url\")\n\t\tportResp, err := etcd.Get(\"\/deis\/logs\/port\", false, false)\n\t\tassert(err, \"url\")\n\t\thost := fmt.Sprintf(\"%s:%s\", hostResp.Node.Value, portResp.Node.Value)\n\t\tlog.Println(\"routing all to \" + host)\n\t\trouter.Add(&Route{Target: Target{Type: \"syslog\", Addr: host}})\n\t}\n\n\tif len(os.Args) > 1 {\n\t\tu, err := url.Parse(os.Args[1])\n\t\tassert(err, \"url\")\n\t\tlog.Println(\"routing all to \" + os.Args[1])\n\t\trouter.Add(&Route{Target: Target{Type: u.Scheme, Addr: u.Host}})\n\t}\n\n\tif _, err := os.Stat(routespath); err == nil {\n\t\tlog.Println(\"loading and persisting routes in \" + routespath)\n\t\tassert(router.Load(RouteFileStore(routespath)), \"persistor\")\n\t}\n\n\tm := martini.Classic()\n\n\tm.Get(\"\/logs(?:\/(?P<predicate>[a-zA-Z]+):(?P<value>.+))?\", func(w http.ResponseWriter, req *http.Request, params martini.Params) {\n\t\tsource := new(Source)\n\t\tswitch {\n\t\tcase params[\"predicate\"] == \"id\" && params[\"value\"] != \"\":\n\t\t\tsource.ID = params[\"value\"][:12]\n\t\tcase params[\"predicate\"] == \"name\" && params[\"value\"] != \"\":\n\t\t\tsource.Name = params[\"value\"]\n\t\tcase params[\"predicate\"] == \"filter\" && params[\"value\"] != \"\":\n\t\t\tsource.Filter = params[\"value\"]\n\t\t}\n\n\t\tif source.ID != \"\" && attacher.Get(source.ID) == nil {\n\t\t\thttp.NotFound(w, req)\n\t\t\treturn\n\t\t}\n\n\t\tlogstream := make(chan *Log)\n\t\tdefer close(logstream)\n\n\t\tvar closer <-chan bool\n\t\tif req.Header.Get(\"Upgrade\") == \"websocket\" {\n\t\t\tcloserBi := make(chan bool)\n\t\t\tgo websocketStreamer(w, req, logstream, closerBi)\n\t\t\tcloser = closerBi\n\t\t} else {\n\t\t\tgo httpStreamer(w, req, logstream, source.All() || source.Filter != \"\")\n\t\t\tcloser = w.(http.CloseNotifier).CloseNotify()\n\t\t}\n\n\t\tattacher.Listen(source, logstream, closer)\n\t})\n\n\tm.Get(\"\/routes\", func(w http.ResponseWriter, req *http.Request) {\n\t\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\t\troutes, _ := router.GetAll()\n\t\tw.Write(append(marshal(routes), '\\n'))\n\t})\n\n\tm.Post(\"\/routes\", func(w http.ResponseWriter, req *http.Request) (int, string) {\n\t\troute := new(Route)\n\t\tif err := unmarshal(req.Body, route); err != nil {\n\t\t\treturn http.StatusBadRequest, \"Bad request: \" + err.Error()\n\t\t}\n\n\t\t\/\/ TODO: validate?\n\t\trouter.Add(route)\n\n\t\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\t\treturn http.StatusCreated, string(append(marshal(route), '\\n'))\n\t})\n\n\tm.Get(\"\/routes\/:id\", func(w http.ResponseWriter, req *http.Request, params martini.Params) {\n\t\troute, _ := router.Get(params[\"id\"])\n\t\tif route == nil {\n\t\t\thttp.NotFound(w, req)\n\t\t\treturn\n\t\t}\n\t\tw.Write(append(marshal(route), '\\n'))\n\t})\n\n\tm.Delete(\"\/routes\/:id\", func(w http.ResponseWriter, req *http.Request, params martini.Params) {\n\t\tif ok := router.Remove(params[\"id\"]); !ok {\n\t\t\thttp.NotFound(w, req)\n\t\t}\n\t})\n\n\tlog.Println(\"logspout serving http on :\" + port)\n\tlog.Fatal(http.ListenAndServe(\":\"+port, m))\n}\n<commit_msg>fix(logspout): add timezone to logs<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.net\/websocket\"\n\t\"github.com\/coreos\/go-etcd\/etcd\"\n\t\"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/go-martini\/martini\"\n)\n\nvar debugMode bool\n\nfunc debug(v ...interface{}) {\n\tif debugMode {\n\t\tlog.Println(v...)\n\t}\n}\n\nfunc assert(err error, context string) {\n\tif err != nil {\n\t\tlog.Fatal(context+\": \", err)\n\t}\n}\n\nfunc getopt(name, dfault string) string {\n\tvalue := os.Getenv(name)\n\tif value == \"\" {\n\t\tvalue = dfault\n\t}\n\treturn value\n}\n\ntype Colorizer map[string]int\n\n\/\/ returns up to 14 color escape codes (then repeats) for each unique key\nfunc (c Colorizer) Get(key string) string {\n\ti, exists := c[key]\n\tif !exists {\n\t\tc[key] = len(c)\n\t\ti = c[key]\n\t}\n\tbright := \"1;\"\n\tif i%14 > 6 {\n\t\tbright = \"\"\n\t}\n\treturn \"\\x1b[\" + bright + \"3\" + strconv.Itoa(7-(i%7)) + \"m\"\n}\n\nfunc syslogStreamer(target Target, types []string, logstream chan *Log) {\n\ttypestr := \",\" + strings.Join(types, \",\") + \",\"\n\tfor logline := range logstream {\n\t\tif typestr != \",,\" && !strings.Contains(typestr, logline.Type) {\n\t\t\tcontinue\n\t\t}\n\t\ttag, pid := getLogName(logline.Name)\n\t\tconn, err := net.Dial(\"udp\", target.Addr)\n\t\tassert(err, \"syslog\")\n\t\t\/\/ HACK: Go's syslog package hardcodes the log format, so let's send our own message\n\t\t_, err = fmt.Fprintf(conn,\n\t\t\t\"%s %s[%s]: %s\",\n\t\t\ttime.Now().Format(\"2006-01-02 15:04:05 MST\"),\n\t\t\ttag,\n\t\t\tpid,\n\t\t\tlogline.Data)\n\t\tassert(err, \"syslog\")\n\t}\n}\n\n\/\/ getLogName returns a custom tag and PID for containers that\n\/\/ match Deis' specific application name format. Otherwise,\n\/\/ it returns the original name and 1 as the PID.\nfunc getLogName(name string) (string, string) {\n\t\/\/ example regex that should match: go_v2.web.1\n\tr := regexp.MustCompile(`(^[a-z0-9-]+)_(v[0-9]+)\\.([a-z-_]+\\.[0-9]+)$`)\n\tmatch := r.FindStringSubmatch(name)\n\tif match == nil {\n\t\treturn name, \"1\"\n\t} else {\n\t\treturn match[1], match[3]\n\t}\n}\n\nfunc websocketStreamer(w http.ResponseWriter, req *http.Request, logstream chan *Log, closer chan bool) {\n\twebsocket.Handler(func(conn *websocket.Conn) {\n\t\tfor logline := range logstream {\n\t\t\tif req.URL.Query().Get(\"type\") != \"\" && logline.Type != req.URL.Query().Get(\"type\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t_, err := conn.Write(append(marshal(logline), '\\n'))\n\t\t\tif err != nil {\n\t\t\t\tcloser <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}).ServeHTTP(w, req)\n}\n\nfunc httpStreamer(w http.ResponseWriter, req *http.Request, logstream chan *Log, multi bool) {\n\tvar colors Colorizer\n\tvar usecolor, usejson bool\n\tnameWidth := 16\n\tif req.URL.Query().Get(\"colors\") != \"off\" {\n\t\tcolors = make(Colorizer)\n\t\tusecolor = true\n\t}\n\tif req.Header.Get(\"Accept\") == \"application\/json\" {\n\t\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\t\tusejson = true\n\t} else {\n\t\tw.Header().Add(\"Content-Type\", \"text\/plain\")\n\t}\n\tfor logline := range logstream {\n\t\tif req.URL.Query().Get(\"types\") != \"\" && logline.Type != req.URL.Query().Get(\"types\") {\n\t\t\tcontinue\n\t\t}\n\t\tif usejson {\n\t\t\tw.Write(append(marshal(logline), '\\n'))\n\t\t} else {\n\t\t\tif multi {\n\t\t\t\tif len(logline.Name) > nameWidth {\n\t\t\t\t\tnameWidth = len(logline.Name)\n\t\t\t\t}\n\t\t\t\tif usecolor {\n\t\t\t\t\tw.Write([]byte(fmt.Sprintf(\n\t\t\t\t\t\t\"%s%\"+strconv.Itoa(nameWidth)+\"s|%s\\x1b[0m\\n\",\n\t\t\t\t\t\tcolors.Get(logline.Name), logline.Name, logline.Data,\n\t\t\t\t\t)))\n\t\t\t\t} else {\n\t\t\t\t\tw.Write([]byte(fmt.Sprintf(\n\t\t\t\t\t\t\"%\"+strconv.Itoa(nameWidth)+\"s|%s\\n\", logline.Name, logline.Data,\n\t\t\t\t\t)))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tw.Write(append([]byte(logline.Data), '\\n'))\n\t\t\t}\n\t\t}\n\t\tw.(http.Flusher).Flush()\n\t}\n}\n\nfunc main() {\n\tdebugMode = getopt(\"DEBUG\", \"\") != \"\"\n\tport := getopt(\"PORT\", \"8000\")\n\tendpoint := getopt(\"DOCKER_HOST\", \"unix:\/\/\/var\/run\/docker.sock\")\n\troutespath := getopt(\"ROUTESPATH\", \"\/var\/lib\/logspout\")\n\n\tclient, err := docker.NewClient(endpoint)\n\tassert(err, \"docker\")\n\tattacher := NewAttachManager(client)\n\trouter := NewRouteManager(attacher)\n\n\t\/\/ HACK: if we are connecting to etcd, get the logger's connection\n\t\/\/ details from there\n\tif etcdHost := os.Getenv(\"ETCD_HOST\"); etcdHost != \"\" {\n\t\tconnectionString := []string{\"http:\/\/\" + etcdHost + \":4001\"}\n\t\tdebug(\"etcd:\", connectionString[0])\n\t\tetcd := etcd.NewClient(connectionString)\n\t\tetcd.SetDialTimeout(3 * time.Second)\n\t\thostResp, err := etcd.Get(\"\/deis\/logs\/host\", false, false)\n\t\tassert(err, \"url\")\n\t\tportResp, err := etcd.Get(\"\/deis\/logs\/port\", false, false)\n\t\tassert(err, \"url\")\n\t\thost := fmt.Sprintf(\"%s:%s\", hostResp.Node.Value, portResp.Node.Value)\n\t\tlog.Println(\"routing all to \" + host)\n\t\trouter.Add(&Route{Target: Target{Type: \"syslog\", Addr: host}})\n\t}\n\n\tif len(os.Args) > 1 {\n\t\tu, err := url.Parse(os.Args[1])\n\t\tassert(err, \"url\")\n\t\tlog.Println(\"routing all to \" + os.Args[1])\n\t\trouter.Add(&Route{Target: Target{Type: u.Scheme, Addr: u.Host}})\n\t}\n\n\tif _, err := os.Stat(routespath); err == nil {\n\t\tlog.Println(\"loading and persisting routes in \" + routespath)\n\t\tassert(router.Load(RouteFileStore(routespath)), \"persistor\")\n\t}\n\n\tm := martini.Classic()\n\n\tm.Get(\"\/logs(?:\/(?P<predicate>[a-zA-Z]+):(?P<value>.+))?\", func(w http.ResponseWriter, req *http.Request, params martini.Params) {\n\t\tsource := new(Source)\n\t\tswitch {\n\t\tcase params[\"predicate\"] == \"id\" && params[\"value\"] != \"\":\n\t\t\tsource.ID = params[\"value\"][:12]\n\t\tcase params[\"predicate\"] == \"name\" && params[\"value\"] != \"\":\n\t\t\tsource.Name = params[\"value\"]\n\t\tcase params[\"predicate\"] == \"filter\" && params[\"value\"] != \"\":\n\t\t\tsource.Filter = params[\"value\"]\n\t\t}\n\n\t\tif source.ID != \"\" && attacher.Get(source.ID) == nil {\n\t\t\thttp.NotFound(w, req)\n\t\t\treturn\n\t\t}\n\n\t\tlogstream := make(chan *Log)\n\t\tdefer close(logstream)\n\n\t\tvar closer <-chan bool\n\t\tif req.Header.Get(\"Upgrade\") == \"websocket\" {\n\t\t\tcloserBi := make(chan bool)\n\t\t\tgo websocketStreamer(w, req, logstream, closerBi)\n\t\t\tcloser = closerBi\n\t\t} else {\n\t\t\tgo httpStreamer(w, req, logstream, source.All() || source.Filter != \"\")\n\t\t\tcloser = w.(http.CloseNotifier).CloseNotify()\n\t\t}\n\n\t\tattacher.Listen(source, logstream, closer)\n\t})\n\n\tm.Get(\"\/routes\", func(w http.ResponseWriter, req *http.Request) {\n\t\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\t\troutes, _ := router.GetAll()\n\t\tw.Write(append(marshal(routes), '\\n'))\n\t})\n\n\tm.Post(\"\/routes\", func(w http.ResponseWriter, req *http.Request) (int, string) {\n\t\troute := new(Route)\n\t\tif err := unmarshal(req.Body, route); err != nil {\n\t\t\treturn http.StatusBadRequest, \"Bad request: \" + err.Error()\n\t\t}\n\n\t\t\/\/ TODO: validate?\n\t\trouter.Add(route)\n\n\t\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\t\treturn http.StatusCreated, string(append(marshal(route), '\\n'))\n\t})\n\n\tm.Get(\"\/routes\/:id\", func(w http.ResponseWriter, req *http.Request, params martini.Params) {\n\t\troute, _ := router.Get(params[\"id\"])\n\t\tif route == nil {\n\t\t\thttp.NotFound(w, req)\n\t\t\treturn\n\t\t}\n\t\tw.Write(append(marshal(route), '\\n'))\n\t})\n\n\tm.Delete(\"\/routes\/:id\", func(w http.ResponseWriter, req *http.Request, params martini.Params) {\n\t\tif ok := router.Remove(params[\"id\"]); !ok {\n\t\t\thttp.NotFound(w, req)\n\t\t}\n\t})\n\n\tlog.Println(\"logspout serving http on :\" + port)\n\tlog.Fatal(http.ListenAndServe(\":\"+port, m))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Matthew Baird\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gochimp\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"time\"\n)\n\n\/\/ see https:\/\/mandrillapp.com\/api\/docs\/messages.html\nconst messages_send_endpoint string = \"\/messages\/send.json\" \/\/ Send a new transactional message through Mandrill\nconst messages_send_template_endpoint string = \"\/messages\/send-template.json\" \/\/ Send a new transactional message through Mandrill using a template\nconst messages_search_endpoint string = \"\/messages\/search.json\" \/\/ Search the content of recently sent messages and optionally narrow by date range, tags and senders\nconst messages_parse_endpoint string = \"\/messages\/parse.json\" \/\/ Parse the full MIME document for an email message, returning the content of the message broken into its constituent pieces\nconst messages_send_raw_endpoint string = \"\/messages\/send-raw.json\" \/\/ Take a raw MIME document for a message, and send it exactly as if it were sent over the SMTP protocol\n\nfunc (a *MandrillAPI) MessageSend(message Message, async bool) ([]SendResponse, error) {\n\tvar response []SendResponse\n\tvar params map[string]interface{} = make(map[string]interface{})\n\tparams[\"message\"] = message\n\tparams[\"async\"] = async\n\terr := parseMandrillJson(a, messages_send_endpoint, params, &response)\n\treturn response, err\n}\n\nfunc (a *MandrillAPI) MessageSendTemplate(templateName string, templateContent []Var, message Message, async bool) ([]SendResponse, error) {\n\tvar response []SendResponse\n\tif templateName == \"\" {\n\t\treturn response, errors.New(\"templateName cannot be blank\")\n\t}\n\tvar params map[string]interface{} = make(map[string]interface{})\n\tparams[\"message\"] = message\n\tparams[\"template_name\"] = templateName\n\tparams[\"async\"] = async\n\tparams[\"template_content\"] = templateContent\n\terr := parseMandrillJson(a, messages_send_template_endpoint, params, &response)\n\treturn response, err\n}\n\nfunc (a *MandrillAPI) MessageSearch(searchRequest SearchRequest) ([]SearchResponse, error) {\n\tvar response []SearchResponse\n\tvar params map[string]interface{} = make(map[string]interface{})\n\t\/\/todo remove this hack\n\tparams[\"query\"] = searchRequest.Query\n\tparams[\"date_from\"] = searchRequest.DateFrom\n\tparams[\"date_to\"] = searchRequest.DateTo\n\tparams[\"tags\"] = searchRequest.Tags\n\tparams[\"senders\"] = searchRequest.Senders\n\tparams[\"limit\"] = searchRequest.Limit\n\terr := parseMandrillJson(a, messages_search_endpoint, params, &response)\n\treturn response, err\n}\n\nfunc (a *MandrillAPI) MessageParse(rawMessage string, async bool) (Message, error) {\n\tvar response Message\n\tif rawMessage == \"\" {\n\t\treturn response, errors.New(\"rawMessage cannot be blank\")\n\t}\n\tvar params map[string]interface{} = make(map[string]interface{})\n\tparams[\"raw_message\"] = rawMessage\n\terr := parseMandrillJson(a, messages_parse_endpoint, params, &response)\n\treturn response, err\n}\n\n\/\/ Can return oneof Invalid_Key, ValidationError or GeneralError\nfunc (a *MandrillAPI) MessageSendRaw(rawMessage string, to []string, from Recipient, async bool) ([]SendResponse, error) {\n\tvar response []SendResponse\n\tif rawMessage == \"\" {\n\t\treturn response, errors.New(\"rawMessage cannot be blank\")\n\t}\n\tif len(to) <= 0 {\n\t\treturn response, errors.New(\"You need at least one recipient in the To array\")\n\t}\n\n\tvar params map[string]interface{} = make(map[string]interface{})\n\tparams[\"raw_message\"] = rawMessage\n\tparams[\"from_email\"] = from.Email\n\tparams[\"from_name\"] = from.Name\n\tparams[\"to\"] = to\n\tparams[\"async\"] = async\n\terr := parseMandrillJson(a, messages_send_raw_endpoint, params, &response)\n\treturn response, err\n}\n\ntype SearchResponse struct {\n\tTimestamp time.Duration `json:\"ts\"`\n\tId string `json:\"_id\"`\n\tSender string `json:\"sender\"`\n\tSubject string `json:\"subject\"`\n\tEmail string `json:\"email\"`\n\tTags []string `json:\"tags\"`\n\tOpens int `json:\"opens\"`\n\tClicks int `json:\"clicks\"`\n\tState string `json:\"state\"`\n\tMetadata []map[string]string `json:\"metadata\"`\n}\n\ntype SearchRequest struct {\n\tQuery string `json:\"query\"`\n\tDateFrom time.Time `json:\"date_from\"`\n\tDateTo time.Time `json:\"date_to\"`\n\tTags []string `json:\"tags\"`\n\tSenders []string `json:\"senders\"`\n\tLimit int `json:\"limit\"`\n}\n\ntype Message struct {\n\tHtml string `json:\"html,omitempty\"`\n\tText string `json:\"text,omitempty\"`\n\tSubject string `json:\"subject\"`\n\tFromEmail string `json:\"from_email\"`\n\tFromName string `json:\"from_name\"`\n\tTo []Recipient `json:\"to\"`\n\tHeaders map[string]string `json:\"headers,omitempty\"`\n\tTrackOpens bool `json:\"track_opens,omitempty\"`\n\tTrackClicks bool `json:\"track_clicks,omitempty\"`\n\tViewContentLink bool `json:\"view_content_link,omitempty\"`\n\tAutoText bool `json:\"auto_text,omitempty\"`\n\tAutoHtml bool `json:\"auto_html,omitempty\"`\n\tUrlStripQS bool `json:\"url_strip_qs,omitempty\"`\n\tInlineCss bool `json:\"inline_css,omitempty\"`\n\tPreserveRecipients bool `json:\"preserve_recipients,omitempty\"`\n\tImportant bool `json:\"important,omitempty\"`\n\tBCCAddress string `json:\"bcc_address,omitempty\"`\n\tTrackingDomain string `json:\"tracking_domain,omitempty\"`\n\tSigningDomain string `json:\"signing_domain,omitempty\"`\n\tReturnPathDomain string `json:\"return_path_domain,omitempty\"`\n\tMerge bool `json:\"merge,omitempty\"`\n\tGlobalMergeVars []Var `json:\"global_merge_vars,omitempty\"`\n\tMergeVars []MergeVars `json:\"merge_vars,omitempty\"`\n\tTags []string `json:\"tags,omitempty\"`\n\tGoogleAnalyticsDomains []string `json:\"google_analytics_domains,omitempty\"`\n\tGoogleAnalyticsCampaign []string `json:\"google_analytics_campaign,omitempty\"`\n\tMetadata []map[string]string `json:\"metadata,omitempty\"`\n\tRecipientMetadata []RecipientMetaData `json:\"recipient_metadata,omitempty\"`\n\tAttachments []Attachment `json:\"attachments,omitempty\"`\n}\n\nfunc (m *Message) String() string {\n\tb, _ := json.Marshal(m)\n\treturn string(b)\n}\n\nfunc (m *Message) AddHeader(key, value string) {\n\tif m.Headers == nil {\n\t\tm.Headers = make(map[string]string)\n\t}\n\tm.Headers[key] = value\n}\n\nfunc (m *Message) AddRecipients(r ...Recipient) {\n\tm.To = append(m.To, r...)\n}\n\nfunc (m *Message) AddGlobalMergeVar(globalvars ...Var) {\n\tm.GlobalMergeVars = append(m.GlobalMergeVars, globalvars...)\n}\n\nfunc (m *Message) AddMergeVar(vars ...MergeVars) {\n\tm.MergeVars = append(m.MergeVars, vars...)\n}\n\nfunc (m *Message) AddTag(tags ...string) {\n\tm.Tags = append(m.Tags, tags...)\n}\n\nfunc (m *Message) AddGoogleAnalyticsDomains(domains ...string) {\n\tm.GoogleAnalyticsDomains = append(m.GoogleAnalyticsDomains, domains...)\n}\n\nfunc (m *Message) AddGoogleAnalyticsCampaign(campaigns ...string) {\n\tm.GoogleAnalyticsCampaign = append(m.GoogleAnalyticsCampaign, campaigns...)\n}\n\nfunc (m *Message) AddMetadata(metadata ...map[string]string) {\n\tm.Metadata = append(m.Metadata, metadata...)\n}\n\nfunc (m *Message) AddRecipientMetadata(metadata ...RecipientMetaData) {\n\tm.RecipientMetadata = append(m.RecipientMetadata, metadata...)\n}\n\nfunc (m *Message) AddAttachments(attachement ...Attachment) {\n\tm.Attachments = append(m.Attachments, attachement...)\n}\n\ntype Attachment struct {\n\tType string `json:\"type\"`\n\tName string `json:\"name\"`\n\tContent string `json:\"content\"`\n}\ntype RecipientMetaData struct {\n\tRecipient string `json:\"rcpt\"`\n\tVars map[string]string `json:\"values\"`\n}\n\ntype MergeVars struct {\n\tRecipient string `json:\"rcpt\"`\n\tVars []Var `json:\"vars\"`\n}\n\ntype Var struct {\n\tName string `json:\"name\"`\n\tContent string `json:\"content\"`\n}\n\nfunc NewVar(name string, content string) *Var {\n\treturn &Var{Name: name, Content: content}\n}\n\ntype Recipient struct {\n\tEmail string `json:\"email\"`\n\tName string `json:\"name\"`\n\tType string `json:\"type\"`\n}\n\ntype SendResponse struct {\n\tEmail string `json:\"email\"`\n\tStatus string `json:\"status\"`\n}\n<commit_msg>Removing omitempty for TrackOpens and TrackClicks to make sure the values are transferred to mandrill as false and not omitted. This is because mandrill seems to default to true for these values, and if they are omitted there is no way to turn off click tracking.<commit_after>\/\/ Copyright 2013 Matthew Baird\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gochimp\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"time\"\n)\n\n\/\/ see https:\/\/mandrillapp.com\/api\/docs\/messages.html\nconst messages_send_endpoint string = \"\/messages\/send.json\" \/\/ Send a new transactional message through Mandrill\nconst messages_send_template_endpoint string = \"\/messages\/send-template.json\" \/\/ Send a new transactional message through Mandrill using a template\nconst messages_search_endpoint string = \"\/messages\/search.json\" \/\/ Search the content of recently sent messages and optionally narrow by date range, tags and senders\nconst messages_parse_endpoint string = \"\/messages\/parse.json\" \/\/ Parse the full MIME document for an email message, returning the content of the message broken into its constituent pieces\nconst messages_send_raw_endpoint string = \"\/messages\/send-raw.json\" \/\/ Take a raw MIME document for a message, and send it exactly as if it were sent over the SMTP protocol\n\nfunc (a *MandrillAPI) MessageSend(message Message, async bool) ([]SendResponse, error) {\n\tvar response []SendResponse\n\tvar params map[string]interface{} = make(map[string]interface{})\n\tparams[\"message\"] = message\n\tparams[\"async\"] = async\n\terr := parseMandrillJson(a, messages_send_endpoint, params, &response)\n\treturn response, err\n}\n\nfunc (a *MandrillAPI) MessageSendTemplate(templateName string, templateContent []Var, message Message, async bool) ([]SendResponse, error) {\n\tvar response []SendResponse\n\tif templateName == \"\" {\n\t\treturn response, errors.New(\"templateName cannot be blank\")\n\t}\n\tvar params map[string]interface{} = make(map[string]interface{})\n\tparams[\"message\"] = message\n\tparams[\"template_name\"] = templateName\n\tparams[\"async\"] = async\n\tparams[\"template_content\"] = templateContent\n\terr := parseMandrillJson(a, messages_send_template_endpoint, params, &response)\n\treturn response, err\n}\n\nfunc (a *MandrillAPI) MessageSearch(searchRequest SearchRequest) ([]SearchResponse, error) {\n\tvar response []SearchResponse\n\tvar params map[string]interface{} = make(map[string]interface{})\n\t\/\/todo remove this hack\n\tparams[\"query\"] = searchRequest.Query\n\tparams[\"date_from\"] = searchRequest.DateFrom\n\tparams[\"date_to\"] = searchRequest.DateTo\n\tparams[\"tags\"] = searchRequest.Tags\n\tparams[\"senders\"] = searchRequest.Senders\n\tparams[\"limit\"] = searchRequest.Limit\n\terr := parseMandrillJson(a, messages_search_endpoint, params, &response)\n\treturn response, err\n}\n\nfunc (a *MandrillAPI) MessageParse(rawMessage string, async bool) (Message, error) {\n\tvar response Message\n\tif rawMessage == \"\" {\n\t\treturn response, errors.New(\"rawMessage cannot be blank\")\n\t}\n\tvar params map[string]interface{} = make(map[string]interface{})\n\tparams[\"raw_message\"] = rawMessage\n\terr := parseMandrillJson(a, messages_parse_endpoint, params, &response)\n\treturn response, err\n}\n\n\/\/ Can return oneof Invalid_Key, ValidationError or GeneralError\nfunc (a *MandrillAPI) MessageSendRaw(rawMessage string, to []string, from Recipient, async bool) ([]SendResponse, error) {\n\tvar response []SendResponse\n\tif rawMessage == \"\" {\n\t\treturn response, errors.New(\"rawMessage cannot be blank\")\n\t}\n\tif len(to) <= 0 {\n\t\treturn response, errors.New(\"You need at least one recipient in the To array\")\n\t}\n\n\tvar params map[string]interface{} = make(map[string]interface{})\n\tparams[\"raw_message\"] = rawMessage\n\tparams[\"from_email\"] = from.Email\n\tparams[\"from_name\"] = from.Name\n\tparams[\"to\"] = to\n\tparams[\"async\"] = async\n\terr := parseMandrillJson(a, messages_send_raw_endpoint, params, &response)\n\treturn response, err\n}\n\ntype SearchResponse struct {\n\tTimestamp time.Duration `json:\"ts\"`\n\tId string `json:\"_id\"`\n\tSender string `json:\"sender\"`\n\tSubject string `json:\"subject\"`\n\tEmail string `json:\"email\"`\n\tTags []string `json:\"tags\"`\n\tOpens int `json:\"opens\"`\n\tClicks int `json:\"clicks\"`\n\tState string `json:\"state\"`\n\tMetadata []map[string]string `json:\"metadata\"`\n}\n\ntype SearchRequest struct {\n\tQuery string `json:\"query\"`\n\tDateFrom time.Time `json:\"date_from\"`\n\tDateTo time.Time `json:\"date_to\"`\n\tTags []string `json:\"tags\"`\n\tSenders []string `json:\"senders\"`\n\tLimit int `json:\"limit\"`\n}\n\ntype Message struct {\n\tHtml string `json:\"html,omitempty\"`\n\tText string `json:\"text,omitempty\"`\n\tSubject string `json:\"subject\"`\n\tFromEmail string `json:\"from_email\"`\n\tFromName string `json:\"from_name\"`\n\tTo []Recipient `json:\"to\"`\n\tHeaders map[string]string `json:\"headers,omitempty\"`\n\tTrackOpens bool `json:\"track_opens\"`\n\tTrackClicks bool `json:\"track_clicks\"`\n\tViewContentLink bool `json:\"view_content_link,omitempty\"`\n\tAutoText bool `json:\"auto_text,omitempty\"`\n\tAutoHtml bool `json:\"auto_html,omitempty\"`\n\tUrlStripQS bool `json:\"url_strip_qs,omitempty\"`\n\tInlineCss bool `json:\"inline_css,omitempty\"`\n\tPreserveRecipients bool `json:\"preserve_recipients,omitempty\"`\n\tImportant bool `json:\"important,omitempty\"`\n\tBCCAddress string `json:\"bcc_address,omitempty\"`\n\tTrackingDomain string `json:\"tracking_domain,omitempty\"`\n\tSigningDomain string `json:\"signing_domain,omitempty\"`\n\tReturnPathDomain string `json:\"return_path_domain,omitempty\"`\n\tMerge bool `json:\"merge,omitempty\"`\n\tGlobalMergeVars []Var `json:\"global_merge_vars,omitempty\"`\n\tMergeVars []MergeVars `json:\"merge_vars,omitempty\"`\n\tTags []string `json:\"tags,omitempty\"`\n\tGoogleAnalyticsDomains []string `json:\"google_analytics_domains,omitempty\"`\n\tGoogleAnalyticsCampaign []string `json:\"google_analytics_campaign,omitempty\"`\n\tMetadata []map[string]string `json:\"metadata,omitempty\"`\n\tRecipientMetadata []RecipientMetaData `json:\"recipient_metadata,omitempty\"`\n\tAttachments []Attachment `json:\"attachments,omitempty\"`\n}\n\nfunc (m *Message) String() string {\n\tb, _ := json.Marshal(m)\n\treturn string(b)\n}\n\nfunc (m *Message) AddHeader(key, value string) {\n\tif m.Headers == nil {\n\t\tm.Headers = make(map[string]string)\n\t}\n\tm.Headers[key] = value\n}\n\nfunc (m *Message) AddRecipients(r ...Recipient) {\n\tm.To = append(m.To, r...)\n}\n\nfunc (m *Message) AddGlobalMergeVar(globalvars ...Var) {\n\tm.GlobalMergeVars = append(m.GlobalMergeVars, globalvars...)\n}\n\nfunc (m *Message) AddMergeVar(vars ...MergeVars) {\n\tm.MergeVars = append(m.MergeVars, vars...)\n}\n\nfunc (m *Message) AddTag(tags ...string) {\n\tm.Tags = append(m.Tags, tags...)\n}\n\nfunc (m *Message) AddGoogleAnalyticsDomains(domains ...string) {\n\tm.GoogleAnalyticsDomains = append(m.GoogleAnalyticsDomains, domains...)\n}\n\nfunc (m *Message) AddGoogleAnalyticsCampaign(campaigns ...string) {\n\tm.GoogleAnalyticsCampaign = append(m.GoogleAnalyticsCampaign, campaigns...)\n}\n\nfunc (m *Message) AddMetadata(metadata ...map[string]string) {\n\tm.Metadata = append(m.Metadata, metadata...)\n}\n\nfunc (m *Message) AddRecipientMetadata(metadata ...RecipientMetaData) {\n\tm.RecipientMetadata = append(m.RecipientMetadata, metadata...)\n}\n\nfunc (m *Message) AddAttachments(attachement ...Attachment) {\n\tm.Attachments = append(m.Attachments, attachement...)\n}\n\ntype Attachment struct {\n\tType string `json:\"type\"`\n\tName string `json:\"name\"`\n\tContent string `json:\"content\"`\n}\ntype RecipientMetaData struct {\n\tRecipient string `json:\"rcpt\"`\n\tVars map[string]string `json:\"values\"`\n}\n\ntype MergeVars struct {\n\tRecipient string `json:\"rcpt\"`\n\tVars []Var `json:\"vars\"`\n}\n\ntype Var struct {\n\tName string `json:\"name\"`\n\tContent string `json:\"content\"`\n}\n\nfunc NewVar(name string, content string) *Var {\n\treturn &Var{Name: name, Content: content}\n}\n\ntype Recipient struct {\n\tEmail string `json:\"email\"`\n\tName string `json:\"name\"`\n\tType string `json:\"type\"`\n}\n\ntype SendResponse struct {\n\tEmail string `json:\"email\"`\n\tStatus string `json:\"status\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n)\n\nfunc FileExist(name string) bool {\n\t_, err := os.Stat(name)\n\treturn err == nil\n}\n\nfunc BaseName(s, suffix string) string {\n\tbase := path.Base(s)\n\tif suffix != \"\" {\n\t\tbase = strings.TrimSuffix(base, suffix)\n\t}\n\treturn base\n}\n\nfunc TempDir() (string, error) {\n\treturn ioutil.TempDir(\"\", \"chef-runner-\")\n}\n<commit_msg>Document util package<commit_after>\/\/ Package util provides various utility functions.\npackage util\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n)\n\n\/\/ FileExist reports whether a file or directory exists.\nfunc FileExist(name string) bool {\n\t_, err := os.Stat(name)\n\treturn err == nil\n}\n\n\/\/ BaseName, as the basename Unix tool, deletes any prefix ending with the last\n\/\/ slash character present in a string, and a suffix, if given.\nfunc BaseName(s, suffix string) string {\n\tbase := path.Base(s)\n\tif suffix != \"\" {\n\t\tbase = strings.TrimSuffix(base, suffix)\n\t}\n\treturn base\n}\n\n\/\/ TempDir creates a new temporary directory to be used by chef-runner.\nfunc TempDir() (string, error) {\n\treturn ioutil.TempDir(\"\", \"chef-runner-\")\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"time\"\n)\n\nfunc QuickSort(a []int) {\n\tsort(a, 0, len(a))\n}\n\nfunc sort(a []int, from, to int) {\n\tif to-from <= 1 {\n\t\treturn\n\t}\n\tp := partition(a, from, to)\n\tif p > from {\n\t\tsort(a, from, p)\n\t}\n\tif p < to {\n\t\tsort(a, p+1, to)\n\t}\n}\n\nfunc partition(a []int, from, to int) int {\n\tm := (from + to) >> 1\n\tif a[from] > a[m] {\n\t\ta[from], a[m] = a[m], a[from]\n\t}\n\ti := from + 1\n\tfor j := from + 1; j < to; j++ {\n\t\tif a[j] < a[from] {\n\t\t\tif i != j {\n\t\t\t\ta[i], a[j] = a[j], a[i]\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t}\n\tif from != i-1 {\n\t\ta[from], a[i-1] = a[i-1], a[from]\n\t}\n\treturn i - 1\n}\n\nfunc Shuffle(a []int64) {\n\trand.Seed(time.Now().UnixNano())\n\tfor i := range a {\n\t\tj := rand.Intn(i + 1)\n\t\ta[i], a[j] = a[j], a[i]\n\t}\n}\n\nfunc GoNonBlocking(f func() interface{}) (ch chan interface{}) {\n\tgo func() {\n\t\tselect {\n\t\tcase ch <- f():\n\t\tdefault:\n\t\t}\n\t}()\n\treturn\n}\n\nvar Lt *log.Logger\nvar Li *log.Logger\nvar Lw *log.Logger\nvar Le *log.Logger\nvar Lf *log.Logger\nvar Lc *log.Logger\n\nfunc init() {\n\tLt = log.New(os.Stderr, \"T \", log.LstdFlags|log.Lshortfile)\n\tLi = log.New(os.Stderr, \"I \", log.LstdFlags|log.Lshortfile)\n\tLw = log.New(os.Stderr, \"W \", log.LstdFlags|log.Lshortfile)\n\tLe = log.New(os.Stderr, \"E \", log.LstdFlags|log.Lshortfile)\n\tLf = log.New(os.Stderr, \"F \", log.LstdFlags|log.Lshortfile)\n\tLc = log.New(os.Stderr, \"C \", log.LstdFlags|log.Lshortfile)\n}\n\nfunc InitLoggers(w io.Writer) {\n\tLt = log.New(w, \"T \", log.LstdFlags|log.Lshortfile)\n\tLi = log.New(w, \"I \", log.LstdFlags|log.Lshortfile)\n\tLw = log.New(w, \"W \", log.LstdFlags|log.Lshortfile)\n\tLe = log.New(w, \"E \", log.LstdFlags|log.Lshortfile)\n\tLf = log.New(w, \"F \", log.LstdFlags|log.Lshortfile)\n\tLc = log.New(w, \"C \", log.LstdFlags|log.Lshortfile)\n}\n\ntype Cue struct{}\n\nfunc LeadingZeros(d interface{}) int {\n\tswitch t := d.(type) {\n\tcase uint8:\n\t\treturn lzs(uint64(t), 8)\n\tcase int8:\n\t\treturn lzs(uint64(t), 8)\n\tcase int16:\n\t\treturn lzs(uint64(t), 16)\n\tcase uint16:\n\t\treturn lzs(uint64(t), 16)\n\tcase int32:\n\t\treturn lzs(uint64(t), 32)\n\tcase uint32:\n\t\treturn lzs(uint64(t), 32)\n\tcase int64:\n\t\treturn lzs(uint64(t), 64)\n\tcase uint64:\n\t\treturn lzs(uint64(t), 64)\n\t}\n\treturn -1 \/\/ unreachable code\n}\n\nfunc lzs(x uint64, s uint) int {\n\tu := uint64(1 << (s - 1))\n\tfor i := uint(0); i < s; i++ {\n\t\tif (u>>i)&x > 0 {\n\t\t\treturn int(i)\n\t\t}\n\t}\n\treturn int(s)\n}\n\nfunc Size(d interface{}) int {\n\tswitch d.(type) {\n\tcase uint8, int8: \/\/ also applied to byte\n\t\treturn 8\n\tcase uint16, int16:\n\t\treturn 16\n\tcase int32, uint32: \/\/ also applied to rune\n\t\treturn 32\n\tcase int64, uint64:\n\t\treturn 64\n\tdefault:\n\t\treturn -1\n\t}\n}\n<commit_msg>add randomness to quick sort<commit_after>package util\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"time\"\n)\n\nfunc QuickSort(a []int) {\n\tsort(a, 0, len(a))\n}\n\nfunc sort(a []int, from, to int) {\n\tif to-from <= 1 {\n\t\treturn\n\t}\n\tp := partition(a, from, to)\n\tif p > from {\n\t\tsort(a, from, p)\n\t}\n\tif p < to {\n\t\tsort(a, p+1, to)\n\t}\n}\n\nfunc partition(a []int, from, to int) int {\n\tm := from + rand.Intn(to-from)\n\ta[from], a[m] = a[m], a[from]\n\ti := from + 1\n\tfor j := from + 1; j < to; j++ {\n\t\tif a[j] < a[from] {\n\t\t\tif i != j {\n\t\t\t\ta[i], a[j] = a[j], a[i]\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t}\n\tif from != i-1 {\n\t\ta[from], a[i-1] = a[i-1], a[from]\n\t}\n\treturn i - 1\n}\n\nfunc Shuffle(a []int64) {\n\trand.Seed(time.Now().UnixNano())\n\tfor i := range a {\n\t\tj := rand.Intn(i + 1)\n\t\ta[i], a[j] = a[j], a[i]\n\t}\n}\n\nfunc GoNonBlocking(f func() interface{}) (ch chan interface{}) {\n\tgo func() {\n\t\tselect {\n\t\tcase ch <- f():\n\t\tdefault:\n\t\t}\n\t}()\n\treturn\n}\n\nvar Lt *log.Logger\nvar Li *log.Logger\nvar Lw *log.Logger\nvar Le *log.Logger\nvar Lf *log.Logger\nvar Lc *log.Logger\n\nfunc init() {\n\tLt = log.New(os.Stderr, \"T \", log.LstdFlags|log.Lshortfile)\n\tLi = log.New(os.Stderr, \"I \", log.LstdFlags|log.Lshortfile)\n\tLw = log.New(os.Stderr, \"W \", log.LstdFlags|log.Lshortfile)\n\tLe = log.New(os.Stderr, \"E \", log.LstdFlags|log.Lshortfile)\n\tLf = log.New(os.Stderr, \"F \", log.LstdFlags|log.Lshortfile)\n\tLc = log.New(os.Stderr, \"C \", log.LstdFlags|log.Lshortfile)\n}\n\nfunc InitLoggers(w io.Writer) {\n\tLt = log.New(w, \"T \", log.LstdFlags|log.Lshortfile)\n\tLi = log.New(w, \"I \", log.LstdFlags|log.Lshortfile)\n\tLw = log.New(w, \"W \", log.LstdFlags|log.Lshortfile)\n\tLe = log.New(w, \"E \", log.LstdFlags|log.Lshortfile)\n\tLf = log.New(w, \"F \", log.LstdFlags|log.Lshortfile)\n\tLc = log.New(w, \"C \", log.LstdFlags|log.Lshortfile)\n}\n\ntype Cue struct{}\n\nfunc LeadingZeros(d interface{}) int {\n\tswitch t := d.(type) {\n\tcase uint8:\n\t\treturn lzs(uint64(t), 8)\n\tcase int8:\n\t\treturn lzs(uint64(t), 8)\n\tcase int16:\n\t\treturn lzs(uint64(t), 16)\n\tcase uint16:\n\t\treturn lzs(uint64(t), 16)\n\tcase int32:\n\t\treturn lzs(uint64(t), 32)\n\tcase uint32:\n\t\treturn lzs(uint64(t), 32)\n\tcase int64:\n\t\treturn lzs(uint64(t), 64)\n\tcase uint64:\n\t\treturn lzs(uint64(t), 64)\n\t}\n\treturn -1 \/\/ unreachable code\n}\n\nfunc lzs(x uint64, s uint) int {\n\tu := uint64(1 << (s - 1))\n\tfor i := uint(0); i < s; i++ {\n\t\tif (u>>i)&x > 0 {\n\t\t\treturn int(i)\n\t\t}\n\t}\n\treturn int(s)\n}\n\nfunc Size(d interface{}) int {\n\tswitch d.(type) {\n\tcase uint8, int8: \/\/ also applied to byte\n\t\treturn 8\n\tcase uint16, int16:\n\t\treturn 16\n\tcase int32, uint32: \/\/ also applied to rune\n\t\treturn 32\n\tcase int64, uint64:\n\t\treturn 64\n\tdefault:\n\t\treturn -1\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Albert Nigmatzianov. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage util\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\tmillisecondsInSecond = 1000\n\tsecondsInMinute = 60\n\tminutesInHour = 60\n)\n\nfunc ParseDuration(duration int) (seconds, minutes, hours int) {\n\tseconds = duration \/ millisecondsInSecond\n\tif seconds >= secondsInMinute {\n\t\tminutes = seconds \/ secondsInMinute\n\t\tseconds -= minutes * secondsInMinute\n\t}\n\tif minutes >= minutesInHour {\n\t\thours = minutes \/ minutesInHour\n\t\tminutes -= hours * minutesInHour\n\t}\n\treturn\n}\n\nfunc DurationString(seconds, minutes, hours int) (duration string) {\n\tduration = formatNumber(minutes) + \":\" + formatNumber(seconds)\n\tif hours > 0 {\n\t\tduration = formatNumber(hours) + \":\" + duration\n\t}\n\treturn\n}\n\nfunc formatNumber(num int) (formatted string) {\n\tif num < 10 {\n\t\tformatted += \"0\"\n\t}\n\tformatted += strconv.Itoa(num)\n\treturn\n}\n\nfunc SanitizePath(path string) string {\n\tif strings.HasPrefix(path, \"~\") {\n\t\tpath = strings.Replace(path, \"~\", os.Getenv(\"HOME\"), 1)\n\t}\n\treturn filepath.Clean(path)\n}\n<commit_msg>Rename duration arg to milliseconds in ParseDuration<commit_after>\/\/ Copyright 2016 Albert Nigmatzianov. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage util\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\tmillisecondsInSecond = 1000\n\tsecondsInMinute = 60\n\tminutesInHour = 60\n)\n\nfunc ParseDuration(milliseconds int) (seconds, minutes, hours int) {\n\tseconds = milliseconds \/ millisecondsInSecond\n\tif seconds >= secondsInMinute {\n\t\tminutes = seconds \/ secondsInMinute\n\t\tseconds -= minutes * secondsInMinute\n\t}\n\tif minutes >= minutesInHour {\n\t\thours = minutes \/ minutesInHour\n\t\tminutes -= hours * minutesInHour\n\t}\n\treturn\n}\n\nfunc DurationString(seconds, minutes, hours int) (duration string) {\n\tduration = formatNumber(minutes) + \":\" + formatNumber(seconds)\n\tif hours > 0 {\n\t\tduration = formatNumber(hours) + \":\" + duration\n\t}\n\treturn\n}\n\nfunc formatNumber(num int) (formatted string) {\n\tif num < 10 {\n\t\tformatted += \"0\"\n\t}\n\tformatted += strconv.Itoa(num)\n\treturn\n}\n\nfunc SanitizePath(path string) string {\n\tif strings.HasPrefix(path, \"~\") {\n\t\tpath = strings.Replace(path, \"~\", os.Getenv(\"HOME\"), 1)\n\t}\n\treturn filepath.Clean(path)\n}\n<|endoftext|>"} {"text":"<commit_before>package overlay\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/libnetwork\/datastore\"\n\t\"github.com\/docker\/libnetwork\/ipallocator\"\n\t\"github.com\/docker\/libnetwork\/osl\"\n\t\"github.com\/vishvananda\/netlink\"\n\t\"github.com\/vishvananda\/netlink\/nl\"\n)\n\ntype networkTable map[string]*network\n\ntype network struct {\n\tid string\n\tvni uint32\n\tdbIndex uint64\n\tdbExists bool\n\tsbox osl.Sandbox\n\tendpoints endpointTable\n\tipAllocator *ipallocator.IPAllocator\n\tgw net.IP\n\tvxlanName string\n\tdriver *driver\n\tjoinCnt int\n\tonce *sync.Once\n\tinitEpoch int\n\tinitErr error\n\tsync.Mutex\n}\n\nfunc (d *driver) CreateNetwork(id string, option map[string]interface{}) error {\n\tif id == \"\" {\n\t\treturn fmt.Errorf(\"invalid network id\")\n\t}\n\n\tn := &network{\n\t\tid: id,\n\t\tdriver: d,\n\t\tendpoints: endpointTable{},\n\t\tonce: &sync.Once{},\n\t}\n\n\tn.gw = bridgeIP.IP\n\n\td.addNetwork(n)\n\n\tif err := n.obtainVxlanID(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (d *driver) DeleteNetwork(nid string) error {\n\tif nid == \"\" {\n\t\treturn fmt.Errorf(\"invalid network id\")\n\t}\n\n\tn := d.network(nid)\n\tif n == nil {\n\t\treturn fmt.Errorf(\"could not find network with id %s\", nid)\n\t}\n\n\td.deleteNetwork(nid)\n\n\treturn n.releaseVxlanID()\n}\n\nfunc (n *network) joinSandbox() error {\n\tn.Lock()\n\tif n.joinCnt != 0 {\n\t\tn.joinCnt++\n\t\tn.Unlock()\n\t\treturn nil\n\t}\n\tn.Unlock()\n\n\t\/\/ If there is a race between two go routines here only one will win\n\t\/\/ the other will wait.\n\tn.once.Do(func() {\n\t\t\/\/ save the error status of initSandbox in n.initErr so that\n\t\t\/\/ all the racing go routines are able to know the status.\n\t\tn.initErr = n.initSandbox()\n\t})\n\n\t\/\/ Increment joinCnt in all the goroutines only when the one time initSandbox\n\t\/\/ was a success.\n\tn.Lock()\n\tif n.initErr == nil {\n\t\tn.joinCnt++\n\t}\n\terr := n.initErr\n\tn.Unlock()\n\n\treturn err\n}\n\nfunc (n *network) leaveSandbox() {\n\tn.Lock()\n\tn.joinCnt--\n\tif n.joinCnt != 0 {\n\t\tn.Unlock()\n\t\treturn\n\t}\n\n\t\/\/ We are about to destroy sandbox since the container is leaving the network\n\t\/\/ Reinitialize the once variable so that we will be able to trigger one time\n\t\/\/ sandbox initialization(again) when another container joins subsequently.\n\tn.once = &sync.Once{}\n\tn.Unlock()\n\n\tn.destroySandbox()\n}\n\nfunc (n *network) destroySandbox() {\n\tsbox := n.sandbox()\n\tif sbox != nil {\n\t\tfor _, iface := range sbox.Info().Interfaces() {\n\t\t\tiface.Remove()\n\t\t}\n\n\t\tif err := deleteVxlan(n.vxlanName); err != nil {\n\t\t\tlogrus.Warnf(\"could not cleanup sandbox properly: %v\", err)\n\t\t}\n\n\t\tsbox.Destroy()\n\t}\n}\n\nfunc (n *network) initSandbox() error {\n\tn.Lock()\n\tn.initEpoch++\n\tn.Unlock()\n\n\tsbox, err := osl.NewSandbox(\n\t\tosl.GenerateKey(fmt.Sprintf(\"%d-\", n.initEpoch)+n.id), true)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not create network sandbox: %v\", err)\n\t}\n\n\t\/\/ Add a bridge inside the namespace\n\tif err := sbox.AddInterface(\"bridge1\", \"br\",\n\t\tsbox.InterfaceOptions().Address(bridgeIP),\n\t\tsbox.InterfaceOptions().Bridge(true)); err != nil {\n\t\treturn fmt.Errorf(\"could not create bridge inside the network sandbox: %v\", err)\n\t}\n\n\tvxlanName, err := createVxlan(n.vxlanID())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := sbox.AddInterface(vxlanName, \"vxlan\",\n\t\tsbox.InterfaceOptions().Master(\"bridge1\")); err != nil {\n\t\treturn fmt.Errorf(\"could not add vxlan interface inside the network sandbox: %v\",\n\t\t\terr)\n\t}\n\n\tn.vxlanName = vxlanName\n\n\tn.setSandbox(sbox)\n\n\tn.driver.peerDbUpdateSandbox(n.id)\n\n\tvar nlSock *nl.NetlinkSocket\n\tsbox.InvokeFunc(func() {\n\t\tnlSock, err = nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_NEIGH)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"failed to subscribe to neighbor group netlink messages\")\n\t\t}\n\t})\n\n\tgo n.watchMiss(nlSock)\n\n\treturn nil\n}\n\nfunc (n *network) watchMiss(nlSock *nl.NetlinkSocket) {\n\tfor {\n\t\tmsgs, err := nlSock.Receive()\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"Failed to receive from netlink: %v \", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, msg := range msgs {\n\t\t\tif msg.Header.Type != syscall.RTM_GETNEIGH && msg.Header.Type != syscall.RTM_NEWNEIGH {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tneigh, err := netlink.NeighDeserialize(msg.Data)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"Failed to deserialize netlink ndmsg: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif neigh.IP.To16() != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif neigh.State&(netlink.NUD_STALE|netlink.NUD_INCOMPLETE) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tmac, vtep, err := n.driver.resolvePeer(n.id, neigh.IP)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"could not resolve peer %q: %v\", neigh.IP, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err := n.driver.peerAdd(n.id, \"dummy\", neigh.IP, mac, vtep, true); err != nil {\n\t\t\t\tlogrus.Errorf(\"could not add neighbor entry for missed peer: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (d *driver) addNetwork(n *network) {\n\td.Lock()\n\td.networks[n.id] = n\n\td.Unlock()\n}\n\nfunc (d *driver) deleteNetwork(nid string) {\n\td.Lock()\n\tdelete(d.networks, nid)\n\td.Unlock()\n}\n\nfunc (d *driver) network(nid string) *network {\n\td.Lock()\n\tdefer d.Unlock()\n\n\treturn d.networks[nid]\n}\n\nfunc (n *network) sandbox() osl.Sandbox {\n\tn.Lock()\n\tdefer n.Unlock()\n\n\treturn n.sbox\n}\n\nfunc (n *network) setSandbox(sbox osl.Sandbox) {\n\tn.Lock()\n\tn.sbox = sbox\n\tn.Unlock()\n}\n\nfunc (n *network) vxlanID() uint32 {\n\tn.Lock()\n\tdefer n.Unlock()\n\n\treturn n.vni\n}\n\nfunc (n *network) setVxlanID(vni uint32) {\n\tn.Lock()\n\tn.vni = vni\n\tn.Unlock()\n}\n\nfunc (n *network) Key() []string {\n\treturn []string{\"overlay\", \"network\", n.id}\n}\n\nfunc (n *network) KeyPrefix() []string {\n\treturn []string{\"overlay\", \"network\"}\n}\n\nfunc (n *network) Value() []byte {\n\tb, err := json.Marshal(n.vxlanID())\n\tif err != nil {\n\t\treturn []byte{}\n\t}\n\n\treturn b\n}\n\nfunc (n *network) Index() uint64 {\n\treturn n.dbIndex\n}\n\nfunc (n *network) SetIndex(index uint64) {\n\tn.dbIndex = index\n\tn.dbExists = true\n}\n\nfunc (n *network) Exists() bool {\n\treturn n.dbExists\n}\n\nfunc (n *network) SetValue(value []byte) error {\n\tvar vni uint32\n\terr := json.Unmarshal(value, &vni)\n\tif err == nil {\n\t\tn.setVxlanID(vni)\n\t}\n\treturn err\n}\n\nfunc (n *network) writeToStore() error {\n\treturn n.driver.store.PutObjectAtomic(n)\n}\n\nfunc (n *network) releaseVxlanID() error {\n\tif n.driver.store == nil {\n\t\treturn fmt.Errorf(\"no datastore configured. cannot release vxlan id\")\n\t}\n\n\tif n.vxlanID() == 0 {\n\t\treturn nil\n\t}\n\n\tif err := n.driver.store.DeleteObjectAtomic(n); err != nil {\n\t\tif err == datastore.ErrKeyModified || err == datastore.ErrKeyNotFound {\n\t\t\t\/\/ In both the above cases we can safely assume that the key has been removed by some other\n\t\t\t\/\/ instance and so simply get out of here\n\t\t\treturn nil\n\t\t}\n\n\t\treturn fmt.Errorf(\"failed to delete network to vxlan id map: %v\", err)\n\t}\n\n\tn.driver.vxlanIdm.Release(n.vxlanID())\n\tn.setVxlanID(0)\n\treturn nil\n}\n\nfunc (n *network) obtainVxlanID() error {\n\tif n.driver.store == nil {\n\t\treturn fmt.Errorf(\"no datastore configured. cannot obtain vxlan id\")\n\t}\n\n\tfor {\n\t\tvar vxlanID uint32\n\t\tif err := n.driver.store.GetObject(datastore.Key(n.Key()...), n); err != nil {\n\t\t\tif err == datastore.ErrKeyNotFound {\n\t\t\t\tvxlanID, err = n.driver.vxlanIdm.GetID()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed to allocate vxlan id: %v\", err)\n\t\t\t\t}\n\n\t\t\t\tn.setVxlanID(vxlanID)\n\t\t\t\tif err := n.writeToStore(); err != nil {\n\t\t\t\t\tn.driver.vxlanIdm.Release(n.vxlanID())\n\t\t\t\t\tn.setVxlanID(0)\n\t\t\t\t\tif err == datastore.ErrKeyModified {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn fmt.Errorf(\"failed to update data store with vxlan id: %v\", err)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"failed to obtain vxlan id from data store: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n}\n<commit_msg>updated for testing purpose 13:20<commit_after>package overlay\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/libnetwork\/datastore\"\n\t\"github.com\/docker\/libnetwork\/ipallocator\"\n\t\"github.com\/docker\/libnetwork\/osl\"\n\t\"github.com\/vishvananda\/netlink\"\n\t\"github.com\/vishvananda\/netlink\/nl\"\n)\n\ntype networkTable map[string]*network\n\ntype network struct {\n\tid string\n\tvni uint32\n\tdbIndex uint64\n\tdbExists bool\n\tsbox osl.Sandbox\n\tendpoints endpointTable\n\tipAllocator *ipallocator.IPAllocator\n\tgw net.IP\n\tvxlanName string\n\tdriver *driver\n\tjoinCnt int\n\tonce *sync.Once\n\tinitEpoch int\n\tinitErr error\n\tsync.Mutex\n}\n\nfunc (d *driver) CreateNetwork(id string, option map[string]interface{}) error {\n\tlogrus.Debugf(\"create network call recieved at overlay driver\")\n\tlogrus.WarnF(\"create network call recieved at overlay driver\")\n\tif id == \"\" {\n\t\treturn fmt.Errorf(\"invalid network id\")\n\t}\n\n\tn := &network{\n\t\tid: id,\n\t\tdriver: d,\n\t\tendpoints: endpointTable{},\n\t\tonce: &sync.Once{},\n\t}\n\n\tn.gw = bridgeIP.IP\n\n\td.addNetwork(n)\n\n\tif err := n.obtainVxlanID(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (d *driver) DeleteNetwork(nid string) error {\n\tif nid == \"\" {\n\t\treturn fmt.Errorf(\"invalid network id\")\n\t}\n\n\tn := d.network(nid)\n\tif n == nil {\n\t\treturn fmt.Errorf(\"could not find network with id %s\", nid)\n\t}\n\n\td.deleteNetwork(nid)\n\n\treturn n.releaseVxlanID()\n}\n\nfunc (n *network) joinSandbox() error {\n\tn.Lock()\n\tif n.joinCnt != 0 {\n\t\tn.joinCnt++\n\t\tn.Unlock()\n\t\treturn nil\n\t}\n\tn.Unlock()\n\n\t\/\/ If there is a race between two go routines here only one will win\n\t\/\/ the other will wait.\n\tn.once.Do(func() {\n\t\t\/\/ save the error status of initSandbox in n.initErr so that\n\t\t\/\/ all the racing go routines are able to know the status.\n\t\tn.initErr = n.initSandbox()\n\t})\n\n\t\/\/ Increment joinCnt in all the goroutines only when the one time initSandbox\n\t\/\/ was a success.\n\tn.Lock()\n\tif n.initErr == nil {\n\t\tn.joinCnt++\n\t}\n\terr := n.initErr\n\tn.Unlock()\n\n\treturn err\n}\n\nfunc (n *network) leaveSandbox() {\n\tn.Lock()\n\tn.joinCnt--\n\tif n.joinCnt != 0 {\n\t\tn.Unlock()\n\t\treturn\n\t}\n\n\t\/\/ We are about to destroy sandbox since the container is leaving the network\n\t\/\/ Reinitialize the once variable so that we will be able to trigger one time\n\t\/\/ sandbox initialization(again) when another container joins subsequently.\n\tn.once = &sync.Once{}\n\tn.Unlock()\n\n\tn.destroySandbox()\n}\n\nfunc (n *network) destroySandbox() {\n\tsbox := n.sandbox()\n\tif sbox != nil {\n\t\tfor _, iface := range sbox.Info().Interfaces() {\n\t\t\tiface.Remove()\n\t\t}\n\n\t\tif err := deleteVxlan(n.vxlanName); err != nil {\n\t\t\tlogrus.Warnf(\"could not cleanup sandbox properly: %v\", err)\n\t\t}\n\n\t\tsbox.Destroy()\n\t}\n}\n\nfunc (n *network) initSandbox() error {\n\tn.Lock()\n\tn.initEpoch++\n\tn.Unlock()\n\n\tsbox, err := osl.NewSandbox(\n\t\tosl.GenerateKey(fmt.Sprintf(\"%d-\", n.initEpoch)+n.id), true)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not create network sandbox: %v\", err)\n\t}\n\n\t\/\/ Add a bridge inside the namespace\n\tif err := sbox.AddInterface(\"bridge1\", \"br\",\n\t\tsbox.InterfaceOptions().Address(bridgeIP),\n\t\tsbox.InterfaceOptions().Bridge(true)); err != nil {\n\t\treturn fmt.Errorf(\"could not create bridge inside the network sandbox: %v\", err)\n\t}\n\n\tvxlanName, err := createVxlan(n.vxlanID())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := sbox.AddInterface(vxlanName, \"vxlan\",\n\t\tsbox.InterfaceOptions().Master(\"bridge1\")); err != nil {\n\t\treturn fmt.Errorf(\"could not add vxlan interface inside the network sandbox: %v\",\n\t\t\terr)\n\t}\n\n\tn.vxlanName = vxlanName\n\n\tn.setSandbox(sbox)\n\n\tn.driver.peerDbUpdateSandbox(n.id)\n\n\tvar nlSock *nl.NetlinkSocket\n\tsbox.InvokeFunc(func() {\n\t\tnlSock, err = nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_NEIGH)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"failed to subscribe to neighbor group netlink messages\")\n\t\t}\n\t})\n\n\tgo n.watchMiss(nlSock)\n\n\treturn nil\n}\n\nfunc (n *network) watchMiss(nlSock *nl.NetlinkSocket) {\n\tfor {\n\t\tmsgs, err := nlSock.Receive()\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"Failed to receive from netlink: %v \", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, msg := range msgs {\n\t\t\tif msg.Header.Type != syscall.RTM_GETNEIGH && msg.Header.Type != syscall.RTM_NEWNEIGH {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tneigh, err := netlink.NeighDeserialize(msg.Data)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"Failed to deserialize netlink ndmsg: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif neigh.IP.To16() != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif neigh.State&(netlink.NUD_STALE|netlink.NUD_INCOMPLETE) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tmac, vtep, err := n.driver.resolvePeer(n.id, neigh.IP)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"could not resolve peer %q: %v\", neigh.IP, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err := n.driver.peerAdd(n.id, \"dummy\", neigh.IP, mac, vtep, true); err != nil {\n\t\t\t\tlogrus.Errorf(\"could not add neighbor entry for missed peer: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (d *driver) addNetwork(n *network) {\n\td.Lock()\n\td.networks[n.id] = n\n\td.Unlock()\n}\n\nfunc (d *driver) deleteNetwork(nid string) {\n\td.Lock()\n\tdelete(d.networks, nid)\n\td.Unlock()\n}\n\nfunc (d *driver) network(nid string) *network {\n\td.Lock()\n\tdefer d.Unlock()\n\n\treturn d.networks[nid]\n}\n\nfunc (n *network) sandbox() osl.Sandbox {\n\tn.Lock()\n\tdefer n.Unlock()\n\n\treturn n.sbox\n}\n\nfunc (n *network) setSandbox(sbox osl.Sandbox) {\n\tn.Lock()\n\tn.sbox = sbox\n\tn.Unlock()\n}\n\nfunc (n *network) vxlanID() uint32 {\n\tn.Lock()\n\tdefer n.Unlock()\n\n\treturn n.vni\n}\n\nfunc (n *network) setVxlanID(vni uint32) {\n\tn.Lock()\n\tn.vni = vni\n\tn.Unlock()\n}\n\nfunc (n *network) Key() []string {\n\treturn []string{\"overlay\", \"network\", n.id}\n}\n\nfunc (n *network) KeyPrefix() []string {\n\treturn []string{\"overlay\", \"network\"}\n}\n\nfunc (n *network) Value() []byte {\n\tb, err := json.Marshal(n.vxlanID())\n\tif err != nil {\n\t\treturn []byte{}\n\t}\n\n\treturn b\n}\n\nfunc (n *network) Index() uint64 {\n\treturn n.dbIndex\n}\n\nfunc (n *network) SetIndex(index uint64) {\n\tn.dbIndex = index\n\tn.dbExists = true\n}\n\nfunc (n *network) Exists() bool {\n\treturn n.dbExists\n}\n\nfunc (n *network) SetValue(value []byte) error {\n\tvar vni uint32\n\terr := json.Unmarshal(value, &vni)\n\tif err == nil {\n\t\tn.setVxlanID(vni)\n\t}\n\treturn err\n}\n\nfunc (n *network) writeToStore() error {\n\treturn n.driver.store.PutObjectAtomic(n)\n}\n\nfunc (n *network) releaseVxlanID() error {\n\tif n.driver.store == nil {\n\t\treturn fmt.Errorf(\"no datastore configured. cannot release vxlan id\")\n\t}\n\n\tif n.vxlanID() == 0 {\n\t\treturn nil\n\t}\n\n\tif err := n.driver.store.DeleteObjectAtomic(n); err != nil {\n\t\tif err == datastore.ErrKeyModified || err == datastore.ErrKeyNotFound {\n\t\t\t\/\/ In both the above cases we can safely assume that the key has been removed by some other\n\t\t\t\/\/ instance and so simply get out of here\n\t\t\treturn nil\n\t\t}\n\n\t\treturn fmt.Errorf(\"failed to delete network to vxlan id map: %v\", err)\n\t}\n\n\tn.driver.vxlanIdm.Release(n.vxlanID())\n\tn.setVxlanID(0)\n\treturn nil\n}\n\nfunc (n *network) obtainVxlanID() error {\n\tif n.driver.store == nil {\n\t\treturn fmt.Errorf(\"no datastore configured. cannot obtain vxlan id\")\n\t}\n\n\tfor {\n\t\tvar vxlanID uint32\n\t\tif err := n.driver.store.GetObject(datastore.Key(n.Key()...), n); err != nil {\n\t\t\tif err == datastore.ErrKeyNotFound {\n\t\t\t\tvxlanID, err = n.driver.vxlanIdm.GetID()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed to allocate vxlan id: %v\", err)\n\t\t\t\t}\n\n\t\t\t\tn.setVxlanID(vxlanID)\n\t\t\t\tif err := n.writeToStore(); err != nil {\n\t\t\t\t\tn.driver.vxlanIdm.Release(n.vxlanID())\n\t\t\t\t\tn.setVxlanID(0)\n\t\t\t\t\tif err == datastore.ErrKeyModified {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn fmt.Errorf(\"failed to update data store with vxlan id: %v\", err)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"failed to obtain vxlan id from data store: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package memory\n\nimport (\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ Forked from https:\/\/github.com\/patrickmn\/go-cache\n\n\/\/ CacheWrapper is used to ensure that the underlying cleaner goroutine used to clean expired keys will not prevent\n\/\/ Cache from being garbage collected.\ntype CacheWrapper struct {\n\t*Cache\n}\n\n\/\/ A cleaner will periodically delete expired keys from cache.\ntype cleaner struct {\n\tinterval time.Duration\n\tstop chan bool\n}\n\n\/\/ Run will periodically delete expired keys from given cache until GC notify that it should stop.\nfunc (cleaner *cleaner) Run(cache *Cache) {\n\tticker := time.NewTicker(cleaner.interval)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tcache.Clean()\n\t\tcase <-cleaner.stop:\n\t\t\tticker.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ stopCleaner is a callback from GC used to stop cleaner goroutine.\nfunc stopCleaner(wrapper *CacheWrapper) {\n\twrapper.cleaner.stop <- true\n}\n\n\/\/ startCleaner will start a cleaner goroutine for given cache.\nfunc startCleaner(cache *Cache, interval time.Duration) {\n\tcleaner := &cleaner{\n\t\tinterval: interval,\n\t\tstop: make(chan bool),\n\t}\n\n\tcache.cleaner = cleaner\n\tgo cleaner.Run(cache)\n}\n\n\/\/ Counter is a simple counter with an optional expiration.\ntype Counter struct {\n\tValue int64\n\tExpiration int64\n}\n\n\/\/ Expired returns true if the counter has expired.\nfunc (counter Counter) Expired() bool {\n\tif counter.Expiration == 0 {\n\t\treturn false\n\t}\n\treturn time.Now().UnixNano() > counter.Expiration\n}\n\n\/\/ Cache contains a collection of counters.\ntype Cache struct {\n\tmutex sync.RWMutex\n\tcounters map[string]Counter\n\tcleaner *cleaner\n}\n\n\/\/ NewCache returns a new cache.\nfunc NewCache(cleanInterval time.Duration) *CacheWrapper {\n\n\tcache := &Cache{\n\t\tcounters: map[string]Counter{},\n\t}\n\n\twrapper := &CacheWrapper{Cache: cache}\n\n\tif cleanInterval > 0 {\n\t\tstartCleaner(cache, cleanInterval)\n\t\truntime.SetFinalizer(wrapper, stopCleaner)\n\t}\n\n\treturn wrapper\n}\n\n\/\/ Increment increments given value on key.\n\/\/ If key is undefined or expired, it will create it.\nfunc (cache *Cache) Increment(key string, value int64, duration time.Duration) (int64, time.Time) {\n\tcache.mutex.Lock()\n\n\tcounter, ok := cache.counters[key]\n\tif !ok || counter.Expired() {\n\t\texpiration := time.Now().Add(duration).UnixNano()\n\t\tcounter = Counter{\n\t\t\tValue: value,\n\t\t\tExpiration: expiration,\n\t\t}\n\n\t\tcache.counters[key] = counter\n\t\tcache.mutex.Unlock()\n\n\t\treturn value, time.Unix(0, expiration)\n\t}\n\n\tvalue = counter.Value + value\n\tcounter.Value = value\n\texpiration := counter.Expiration\n\n\tcache.counters[key] = counter\n\tcache.mutex.Unlock()\n\n\treturn value, time.Unix(0, expiration)\n}\n\n\/\/ Get returns key's value and expiration.\nfunc (cache *Cache) Get(key string, duration time.Duration) (int64, time.Time) {\n\tcache.mutex.RLock()\n\n\tcounter, ok := cache.counters[key]\n\tif !ok || counter.Expired() {\n\t\texpiration := time.Now().Add(duration).UnixNano()\n\t\tcache.mutex.RUnlock()\n\t\treturn 0, time.Unix(0, expiration)\n\t}\n\n\tvalue := counter.Value\n\texpiration := counter.Expiration\n\tcache.mutex.RUnlock()\n\n\treturn value, time.Unix(0, expiration)\n}\n\n\/\/ Clean will deleted any expired keys.\nfunc (cache *Cache) Clean() {\n\tnow := time.Now().UnixNano()\n\n\tcache.mutex.Lock()\n\tfor key, counter := range cache.counters {\n\t\tif now > counter.Expiration {\n\t\t\tdelete(cache.counters, key)\n\t\t}\n\t}\n\tcache.mutex.Unlock()\n}\n\n\/\/ Reset changes the key's value and resets the expiration.\nfunc (cache *Cache) Reset(key string, duration time.Duration) (int64, time.Time) {\n\tcache.mutex.Lock()\n\tdelete(cache.counters, key)\n\tcache.mutex.Unlock()\n\n\texpiration := time.Now().Add(duration).UnixNano()\n\treturn 0, time.Unix(0, expiration)\n}\n<commit_msg>fix(store\/memory): Try an experiment to uses sync.Map to store counters<commit_after>package memory\n\nimport (\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ Forked from https:\/\/github.com\/patrickmn\/go-cache\n\n\/\/ CacheWrapper is used to ensure that the underlying cleaner goroutine used to clean expired keys will not prevent\n\/\/ Cache from being garbage collected.\ntype CacheWrapper struct {\n\t*Cache\n}\n\n\/\/ A cleaner will periodically delete expired keys from cache.\ntype cleaner struct {\n\tinterval time.Duration\n\tstop chan bool\n}\n\n\/\/ Run will periodically delete expired keys from given cache until GC notify that it should stop.\nfunc (cleaner *cleaner) Run(cache *Cache) {\n\tticker := time.NewTicker(cleaner.interval)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tcache.Clean()\n\t\tcase <-cleaner.stop:\n\t\t\tticker.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ stopCleaner is a callback from GC used to stop cleaner goroutine.\nfunc stopCleaner(wrapper *CacheWrapper) {\n\twrapper.cleaner.stop <- true\n\twrapper.cleaner = nil\n}\n\n\/\/ startCleaner will start a cleaner goroutine for given cache.\nfunc startCleaner(cache *Cache, interval time.Duration) {\n\tcleaner := &cleaner{\n\t\tinterval: interval,\n\t\tstop: make(chan bool),\n\t}\n\n\tcache.cleaner = cleaner\n\tgo cleaner.Run(cache)\n}\n\n\/\/ Counter is a simple counter with an expiration.\ntype Counter struct {\n\tmutex sync.RWMutex\n\tvalue int64\n\texpiration int64\n}\n\n\/\/ Expired returns true if the counter has expired.\nfunc (counter *Counter) Expired() bool {\n\tcounter.mutex.RLock()\n\tdefer counter.mutex.RUnlock()\n\n\tif counter.expiration == 0 {\n\t\treturn true\n\t}\n\treturn time.Now().UnixNano() > counter.expiration\n}\n\n\/\/ Load returns the value and the expiration of this counter.\n\/\/ If the counter is expired, it will use the given expiration.\nfunc (counter *Counter) Load(expiration int64) (int64, int64) {\n\tcounter.mutex.RLock()\n\tdefer counter.mutex.RUnlock()\n\n\tif counter.expiration == 0 || time.Now().UnixNano() > counter.expiration {\n\t\treturn 0, expiration\n\t}\n\n\treturn counter.value, counter.expiration\n}\n\n\/\/ Increment increments given value on this counter.\n\/\/ If the counter is expired, it will use the given expiration.\n\/\/ It returns its current value and expiration.\nfunc (counter *Counter) Increment(value int64, expiration int64) (int64, int64) {\n\tcounter.mutex.Lock()\n\tdefer counter.mutex.Unlock()\n\n\tif counter.expiration == 0 || time.Now().UnixNano() > counter.expiration {\n\t\tcounter.value = value\n\t\tcounter.expiration = expiration\n\t\treturn counter.value, counter.expiration\n\t}\n\n\tcounter.value += value\n\treturn counter.value, counter.expiration\n}\n\n\/\/ Cache contains a collection of counters.\ntype Cache struct {\n\tcounters sync.Map\n\tcleaner *cleaner\n}\n\n\/\/ NewCache returns a new cache.\nfunc NewCache(cleanInterval time.Duration) *CacheWrapper {\n\n\tcache := &Cache{}\n\twrapper := &CacheWrapper{Cache: cache}\n\n\tif cleanInterval > 0 {\n\t\tstartCleaner(cache, cleanInterval)\n\t\truntime.SetFinalizer(wrapper, stopCleaner)\n\t}\n\n\treturn wrapper\n}\n\n\/\/ LoadOrStore returns the existing counter for the key if present.\n\/\/ Otherwise, it stores and returns the given counter.\n\/\/ The loaded result is true if the counter was loaded, false if stored.\nfunc (cache *Cache) LoadOrStore(key string, counter *Counter) (*Counter, bool) {\n\tval, loaded := cache.counters.LoadOrStore(key, counter)\n\tif val == nil {\n\t\treturn counter, false\n\t}\n\n\tactual := val.(*Counter)\n\treturn actual, loaded\n}\n\n\/\/ Load returns the counter stored in the map for a key, or nil if no counter is present.\n\/\/ The ok result indicates whether counter was found in the map.\nfunc (cache *Cache) Load(key string) (*Counter, bool) {\n\tval, ok := cache.counters.Load(key)\n\tif val == nil || !ok {\n\t\treturn nil, false\n\t}\n\tactual := val.(*Counter)\n\treturn actual, true\n}\n\n\/\/ Store sets the counter for a key.\nfunc (cache *Cache) Store(key string, counter *Counter) {\n\tcache.counters.Store(key, counter)\n}\n\n\/\/ Delete deletes the value for a key.\nfunc (cache *Cache) Delete(key string) {\n\tcache.counters.Delete(key)\n}\n\n\/\/ Range calls handler sequentially for each key and value present in the cache.\n\/\/ If handler returns false, range stops the iteration.\nfunc (cache *Cache) Range(handler func(key string, counter *Counter)) {\n\tcache.counters.Range(func(k interface{}, v interface{}) bool {\n\t\tif v == nil {\n\t\t\treturn true\n\t\t}\n\n\t\tkey := k.(string)\n\t\tcounter := v.(*Counter)\n\n\t\thandler(key, counter)\n\n\t\treturn true\n\t})\n}\n\n\/\/ Increment increments given value on key.\n\/\/ If key is undefined or expired, it will create it.\nfunc (cache *Cache) Increment(key string, value int64, duration time.Duration) (int64, time.Time) {\n\texpiration := time.Now().Add(duration).UnixNano()\n\n\tcounter, loaded := cache.LoadOrStore(key, &Counter{\n\t\tmutex: sync.RWMutex{},\n\t\tvalue: value,\n\t\texpiration: expiration,\n\t})\n\n\tif !loaded {\n\t\treturn value, time.Unix(0, expiration)\n\t}\n\n\tvalue, expiration = counter.Increment(value, expiration)\n\tcache.Store(key, counter)\n\treturn value, time.Unix(0, expiration)\n}\n\n\/\/ Get returns key's value and expiration.\nfunc (cache *Cache) Get(key string, duration time.Duration) (int64, time.Time) {\n\texpiration := time.Now().Add(duration).UnixNano()\n\n\tcounter, ok := cache.Load(key)\n\tif !ok {\n\t\treturn 0, time.Unix(0, expiration)\n\t}\n\n\tvalue, expiration := counter.Load(expiration)\n\n\treturn value, time.Unix(0, expiration)\n}\n\n\/\/ Clean will deleted any expired keys.\nfunc (cache *Cache) Clean() {\n\tcache.Range(func(key string, counter *Counter) {\n\t\tif counter.Expired() {\n\t\t\tcache.counters.Delete(key)\n\t\t}\n\t})\n}\n\n\/\/ Reset changes the key's value and resets the expiration.\nfunc (cache *Cache) Reset(key string, duration time.Duration) (int64, time.Time) {\n\tcache.counters.Delete(key)\n\n\texpiration := time.Now().Add(duration).UnixNano()\n\treturn 0, time.Unix(0, expiration)\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"os\"\n\t\"text\/template\"\n\n\t\"github.com\/urfave\/cli\"\n\n\t\"github.com\/drone\/drone-cli\/drone\/internal\"\n)\n\nvar serverCreateCmd = cli.Command{\n\tName: \"create\",\n\tUsage: \"crate a new server\",\n\tAction: serverCreate,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"format\",\n\t\t\tUsage: \"format output\",\n\t\t\tValue: tmplServerCreate,\n\t\t\tHidden: true,\n\t\t},\n\t},\n}\n\nfunc serverCreate(c *cli.Context) error {\n\tclient, err := internal.NewAutoscaleClient(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tserver, err := client.ServerCreate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttmpl, err := template.New(\"_\").Parse(c.String(\"format\") + \"\\n\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn tmpl.Execute(os.Stdout, server)\n}\n\nvar tmplServerCreate = `Name: {{ .Name }}\nState: {{ .State }}\n`\n<commit_msg>fix create usage spelling<commit_after>package server\n\nimport (\n\t\"os\"\n\t\"text\/template\"\n\n\t\"github.com\/urfave\/cli\"\n\n\t\"github.com\/drone\/drone-cli\/drone\/internal\"\n)\n\nvar serverCreateCmd = cli.Command{\n\tName: \"create\",\n\tUsage: \"create a new server\",\n\tAction: serverCreate,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"format\",\n\t\t\tUsage: \"format output\",\n\t\t\tValue: tmplServerCreate,\n\t\t\tHidden: true,\n\t\t},\n\t},\n}\n\nfunc serverCreate(c *cli.Context) error {\n\tclient, err := internal.NewAutoscaleClient(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tserver, err := client.ServerCreate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttmpl, err := template.New(\"_\").Parse(c.String(\"format\") + \"\\n\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn tmpl.Execute(os.Stdout, server)\n}\n\nvar tmplServerCreate = `Name: {{ .Name }}\nState: {{ .State }}\n`\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport \"github.com\/couchbase\/indexing\/secondary\/dcp\"\nimport \"github.com\/couchbase\/indexing\/secondary\/logging\"\nimport \"errors\"\nimport \"fmt\"\nimport \"time\"\nimport \"net\"\nimport \"net\/url\"\nimport \"sync\"\n\nvar (\n\tErrInvalidNodeId = errors.New(\"Invalid NodeId\")\n\tErrInvalidService = errors.New(\"Invalid service\")\n\tErrNodeNotBucketMember = errors.New(\"Node is not a member of bucket\")\n)\n\nconst (\n\tINDEX_ADMIN_SERVICE = \"indexAdmin\"\n\tINDEX_SCAN_SERVICE = \"indexScan\"\n\tINDEX_HTTP_SERVICE = \"indexHttp\"\n)\n\n\/\/ Helper object for fetching cluster information\n\/\/ Can be used by services running on a cluster node to connect with\n\/\/ local management service for obtaining cluster information.\n\/\/ Info cache can be updated by using Refresh() method.\ntype ClusterInfoCache struct {\n\tsync.Mutex\n\turl string\n\tpoolName string\n\tlogPrefix string\n\tretries int\n\n\tclient couchbase.Client\n\tpool couchbase.Pool\n\tnodes []couchbase.Node\n\tnodesvs []couchbase.NodeServices\n}\n\ntype ServiceAddressProvider interface {\n\tGetLocalServiceAddress(srvc string) (string, error)\n\tGetLocalServicePort(srvc string) (string, error)\n\tGetLocalServiceHost(srvc string) (string, error)\n\tGetLocalHostAddress() (string, error)\n}\n\ntype NodeId int\n\nfunc NewClusterInfoCache(clusterUrl string, pool string) (*ClusterInfoCache, error) {\n\tc := &ClusterInfoCache{\n\t\turl: clusterUrl,\n\t\tpoolName: pool,\n\t\tretries: 0,\n\t}\n\n\treturn c, nil\n}\n\nfunc (c *ClusterInfoCache) SetLogPrefix(p string) {\n\tc.logPrefix = p\n}\n\nfunc (c *ClusterInfoCache) SetMaxRetries(r int) {\n\tc.retries = r\n}\n\nfunc (c *ClusterInfoCache) Fetch() error {\n\n\tfn := func(r int, err error) error {\n\t\tif r > 0 {\n\t\t\tlogging.Infof(\"%vError occured during cluster info update (%v) .. Retrying(%d)\",\n\t\t\t\tc.logPrefix, err, r)\n\t\t}\n\n\t\tc.client, err = couchbase.Connect(c.url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc.pool, err = c.client.GetPool(c.poolName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar nodes []couchbase.Node\n\t\tfor _, n := range c.pool.Nodes {\n\t\t\tif n.ClusterMembership == \"active\" {\n\t\t\t\tnodes = append(nodes, n)\n\t\t\t}\n\t\t}\n\t\tc.nodes = nodes\n\n\t\tfound := false\n\t\tfor _, node := range c.nodes {\n\t\t\tif node.ThisNode {\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn errors.New(\"Current node's cluster membership is not active\")\n\t\t}\n\n\t\tvar poolServs couchbase.PoolServices\n\t\tpoolServs, err = c.client.GetPoolServices(c.poolName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.nodesvs = poolServs.NodesExt\n\n\t\treturn nil\n\t}\n\n\trh := NewRetryHelper(c.retries, time.Second, 1, fn)\n\treturn rh.Run()\n}\n\nfunc (c ClusterInfoCache) GetNodesByServiceType(srvc string) (nids []NodeId) {\n\tfor i, svs := range c.nodesvs {\n\t\tif _, ok := svs.Services[srvc]; ok {\n\t\t\tnids = append(nids, NodeId(i))\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (c ClusterInfoCache) GetNodesByBucket(bucket string) (nids []NodeId, err error) {\n\tb, berr := c.pool.GetBucket(bucket)\n\tif berr != nil {\n\t\terr = berr\n\t\treturn\n\t}\n\tdefer b.Close()\n\n\tfor i, _ := range c.nodes {\n\t\tnid := NodeId(i)\n\t\tif _, ok := c.findVBServerIndex(b, nid); ok {\n\t\t\tnids = append(nids, nid)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (c ClusterInfoCache) GetCurrentNode() NodeId {\n\tfor i, node := range c.nodes {\n\t\tif node.ThisNode {\n\t\t\treturn NodeId(i)\n\t\t}\n\t}\n\t\/\/ TODO: can we avoid this panic ?\n\tpanic(\"Current node is not in active membership\")\n}\n\nfunc (c ClusterInfoCache) GetServiceAddress(nid NodeId, srvc string) (addr string, err error) {\n\tvar port int\n\tvar ok bool\n\n\tif int(nid) >= len(c.nodesvs) {\n\t\terr = ErrInvalidNodeId\n\t\treturn\n\t}\n\n\tnode := c.nodesvs[nid]\n\tif port, ok = node.Services[srvc]; !ok {\n\t\terr = ErrInvalidService\n\t\treturn\n\t}\n\n\t\/\/ For current node, hostname might be empty\n\t\/\/ Insert hostname used to connect to the cluster\n\tcUrl, err := url.Parse(c.url)\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Unable to parse cluster url - \" + err.Error())\n\t}\n\th, _, _ := net.SplitHostPort(cUrl.Host)\n\tif node.Hostname == \"\" {\n\t\tnode.Hostname = h\n\t}\n\n\taddr = net.JoinHostPort(node.Hostname, fmt.Sprint(port))\n\treturn\n}\n\nfunc (c ClusterInfoCache) GetVBuckets(nid NodeId, bucket string) (vbs []uint32, err error) {\n\tb, berr := c.pool.GetBucket(bucket)\n\tif berr != nil {\n\t\terr = berr\n\t\treturn\n\t}\n\tdefer b.Close()\n\n\tidx, ok := c.findVBServerIndex(b, nid)\n\tif !ok {\n\t\terr = ErrNodeNotBucketMember\n\t\treturn\n\t}\n\n\tvbmap := b.VBServerMap()\n\n\tfor vb, idxs := range vbmap.VBucketMap {\n\t\tif idxs[0] == idx {\n\t\t\tvbs = append(vbs, uint32(vb))\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (c ClusterInfoCache) findVBServerIndex(b *couchbase.Bucket, nid NodeId) (int, bool) {\n\tbnodes := b.Nodes()\n\n\tfor idx, n := range bnodes {\n\t\tif c.sameNode(n, c.nodes[nid]) {\n\t\t\treturn idx, true\n\t\t}\n\t}\n\n\treturn 0, false\n}\n\nfunc (c ClusterInfoCache) sameNode(n1 couchbase.Node, n2 couchbase.Node) bool {\n\treturn n1.Hostname == n2.Hostname\n}\n\nfunc (c ClusterInfoCache) GetLocalServiceAddress(srvc string) (string, error) {\n\tnode := c.GetCurrentNode()\n\treturn c.GetServiceAddress(node, srvc)\n}\n\nfunc (c ClusterInfoCache) GetLocalServicePort(srvc string) (string, error) {\n\taddr, err := c.GetLocalServiceAddress(srvc)\n\tif err != nil {\n\t\treturn addr, err\n\t}\n\n\t_, p, e := net.SplitHostPort(addr)\n\tif e != nil {\n\t\treturn p, e\n\t}\n\n\treturn net.JoinHostPort(\"\", p), nil\n}\n\nfunc (c ClusterInfoCache) GetLocalServiceHost(srvc string) (string, error) {\n\n\taddr, err := c.GetLocalServiceAddress(srvc)\n\tif err != nil {\n\t\treturn addr, err\n\t}\n\n\th, _, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn h, nil\n}\n\nfunc (c ClusterInfoCache) GetLocalHostAddress() (string, error) {\n\n\tcUrl, err := url.Parse(c.url)\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Unable to parse cluster url - \" + err.Error())\n\t}\n\t_, p, _ := net.SplitHostPort(cUrl.Host)\n\n\th, err := c.GetLocalServiceHost(INDEX_ADMIN_SERVICE)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn net.JoinHostPort(h, p), nil\n}\n<commit_msg>cluster_info: Add IsNodeHealthy() API<commit_after>package common\n\nimport \"github.com\/couchbase\/indexing\/secondary\/dcp\"\nimport \"github.com\/couchbase\/indexing\/secondary\/logging\"\nimport \"errors\"\nimport \"fmt\"\nimport \"time\"\nimport \"net\"\nimport \"net\/url\"\nimport \"sync\"\n\nvar (\n\tErrInvalidNodeId = errors.New(\"Invalid NodeId\")\n\tErrInvalidService = errors.New(\"Invalid service\")\n\tErrNodeNotBucketMember = errors.New(\"Node is not a member of bucket\")\n)\n\nconst (\n\tINDEX_ADMIN_SERVICE = \"indexAdmin\"\n\tINDEX_SCAN_SERVICE = \"indexScan\"\n\tINDEX_HTTP_SERVICE = \"indexHttp\"\n)\n\n\/\/ Helper object for fetching cluster information\n\/\/ Can be used by services running on a cluster node to connect with\n\/\/ local management service for obtaining cluster information.\n\/\/ Info cache can be updated by using Refresh() method.\ntype ClusterInfoCache struct {\n\tsync.Mutex\n\turl string\n\tpoolName string\n\tlogPrefix string\n\tretries int\n\n\tclient couchbase.Client\n\tpool couchbase.Pool\n\tnodes []couchbase.Node\n\tnodesvs []couchbase.NodeServices\n}\n\ntype ServiceAddressProvider interface {\n\tGetLocalServiceAddress(srvc string) (string, error)\n\tGetLocalServicePort(srvc string) (string, error)\n\tGetLocalServiceHost(srvc string) (string, error)\n\tGetLocalHostAddress() (string, error)\n}\n\ntype NodeId int\n\nfunc NewClusterInfoCache(clusterUrl string, pool string) (*ClusterInfoCache, error) {\n\tc := &ClusterInfoCache{\n\t\turl: clusterUrl,\n\t\tpoolName: pool,\n\t\tretries: 0,\n\t}\n\n\treturn c, nil\n}\n\nfunc (c *ClusterInfoCache) SetLogPrefix(p string) {\n\tc.logPrefix = p\n}\n\nfunc (c *ClusterInfoCache) SetMaxRetries(r int) {\n\tc.retries = r\n}\n\nfunc (c *ClusterInfoCache) Fetch() error {\n\n\tfn := func(r int, err error) error {\n\t\tif r > 0 {\n\t\t\tlogging.Infof(\"%vError occured during cluster info update (%v) .. Retrying(%d)\",\n\t\t\t\tc.logPrefix, err, r)\n\t\t}\n\n\t\tc.client, err = couchbase.Connect(c.url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc.pool, err = c.client.GetPool(c.poolName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar nodes []couchbase.Node\n\t\tfor _, n := range c.pool.Nodes {\n\t\t\tif n.ClusterMembership == \"active\" {\n\t\t\t\tnodes = append(nodes, n)\n\t\t\t}\n\t\t}\n\t\tc.nodes = nodes\n\n\t\tfound := false\n\t\tfor _, node := range c.nodes {\n\t\t\tif node.ThisNode {\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn errors.New(\"Current node's cluster membership is not active\")\n\t\t}\n\n\t\tvar poolServs couchbase.PoolServices\n\t\tpoolServs, err = c.client.GetPoolServices(c.poolName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.nodesvs = poolServs.NodesExt\n\n\t\treturn nil\n\t}\n\n\trh := NewRetryHelper(c.retries, time.Second, 1, fn)\n\treturn rh.Run()\n}\n\nfunc (c ClusterInfoCache) GetNodesByServiceType(srvc string) (nids []NodeId) {\n\tfor i, svs := range c.nodesvs {\n\t\tif _, ok := svs.Services[srvc]; ok {\n\t\t\tnids = append(nids, NodeId(i))\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (c ClusterInfoCache) GetNodesByBucket(bucket string) (nids []NodeId, err error) {\n\tb, berr := c.pool.GetBucket(bucket)\n\tif berr != nil {\n\t\terr = berr\n\t\treturn\n\t}\n\tdefer b.Close()\n\n\tfor i, _ := range c.nodes {\n\t\tnid := NodeId(i)\n\t\tif _, ok := c.findVBServerIndex(b, nid); ok {\n\t\t\tnids = append(nids, nid)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (c ClusterInfoCache) GetCurrentNode() NodeId {\n\tfor i, node := range c.nodes {\n\t\tif node.ThisNode {\n\t\t\treturn NodeId(i)\n\t\t}\n\t}\n\t\/\/ TODO: can we avoid this panic ?\n\tpanic(\"Current node is not in active membership\")\n}\n\nfunc (c ClusterInfoCache) IsNodeHealthy(nid NodeId) (bool, error) {\n\tif int(nid) >= len(c.nodes) {\n\t\treturn false, ErrInvalidNodeId\n\t}\n\n\treturn c.nodes[nid].Status == \"healthy\", nil\n}\n\nfunc (c ClusterInfoCache) GetServiceAddress(nid NodeId, srvc string) (addr string, err error) {\n\tvar port int\n\tvar ok bool\n\n\tif int(nid) >= len(c.nodesvs) {\n\t\terr = ErrInvalidNodeId\n\t\treturn\n\t}\n\n\tnode := c.nodesvs[nid]\n\tif port, ok = node.Services[srvc]; !ok {\n\t\terr = ErrInvalidService\n\t\treturn\n\t}\n\n\t\/\/ For current node, hostname might be empty\n\t\/\/ Insert hostname used to connect to the cluster\n\tcUrl, err := url.Parse(c.url)\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Unable to parse cluster url - \" + err.Error())\n\t}\n\th, _, _ := net.SplitHostPort(cUrl.Host)\n\tif node.Hostname == \"\" {\n\t\tnode.Hostname = h\n\t}\n\n\taddr = net.JoinHostPort(node.Hostname, fmt.Sprint(port))\n\treturn\n}\n\nfunc (c ClusterInfoCache) GetVBuckets(nid NodeId, bucket string) (vbs []uint32, err error) {\n\tb, berr := c.pool.GetBucket(bucket)\n\tif berr != nil {\n\t\terr = berr\n\t\treturn\n\t}\n\tdefer b.Close()\n\n\tidx, ok := c.findVBServerIndex(b, nid)\n\tif !ok {\n\t\terr = ErrNodeNotBucketMember\n\t\treturn\n\t}\n\n\tvbmap := b.VBServerMap()\n\n\tfor vb, idxs := range vbmap.VBucketMap {\n\t\tif idxs[0] == idx {\n\t\t\tvbs = append(vbs, uint32(vb))\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (c ClusterInfoCache) findVBServerIndex(b *couchbase.Bucket, nid NodeId) (int, bool) {\n\tbnodes := b.Nodes()\n\n\tfor idx, n := range bnodes {\n\t\tif c.sameNode(n, c.nodes[nid]) {\n\t\t\treturn idx, true\n\t\t}\n\t}\n\n\treturn 0, false\n}\n\nfunc (c ClusterInfoCache) sameNode(n1 couchbase.Node, n2 couchbase.Node) bool {\n\treturn n1.Hostname == n2.Hostname\n}\n\nfunc (c ClusterInfoCache) GetLocalServiceAddress(srvc string) (string, error) {\n\tnode := c.GetCurrentNode()\n\treturn c.GetServiceAddress(node, srvc)\n}\n\nfunc (c ClusterInfoCache) GetLocalServicePort(srvc string) (string, error) {\n\taddr, err := c.GetLocalServiceAddress(srvc)\n\tif err != nil {\n\t\treturn addr, err\n\t}\n\n\t_, p, e := net.SplitHostPort(addr)\n\tif e != nil {\n\t\treturn p, e\n\t}\n\n\treturn net.JoinHostPort(\"\", p), nil\n}\n\nfunc (c ClusterInfoCache) GetLocalServiceHost(srvc string) (string, error) {\n\n\taddr, err := c.GetLocalServiceAddress(srvc)\n\tif err != nil {\n\t\treturn addr, err\n\t}\n\n\th, _, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn h, nil\n}\n\nfunc (c ClusterInfoCache) GetLocalHostAddress() (string, error) {\n\n\tcUrl, err := url.Parse(c.url)\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Unable to parse cluster url - \" + err.Error())\n\t}\n\t_, p, _ := net.SplitHostPort(cUrl.Host)\n\n\th, err := c.GetLocalServiceHost(INDEX_ADMIN_SERVICE)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn net.JoinHostPort(h, p), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package dynamodb\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/dynamodb\"\n\t\"github.com\/gruntwork-io\/terragrunt\/aws_helper\"\n\t\"github.com\/gruntwork-io\/terragrunt\/errors\"\n\t\"github.com\/gruntwork-io\/terragrunt\/options\"\n\t\"github.com\/gruntwork-io\/terragrunt\/util\"\n)\n\n\/\/ DynamoDB only allows 10 table creates\/deletes simultaneously. To ensure we don't hit this error, especially when\n\/\/ running many automated tests in parallel, we use a counting semaphore\nvar tableCreateDeleteSemaphore = NewCountingSemaphore(10)\n\n\/\/ Terraform requires the DynamoDB table to have a primary key with this name\nconst ATTR_LOCK_ID = \"LockID\"\n\n\/\/ Default is to retry for up to 5 minutes\nconst MAX_RETRIES_WAITING_FOR_TABLE_TO_BE_ACTIVE = 30\nconst SLEEP_BETWEEN_TABLE_STATUS_CHECKS = 10 * time.Second\n\nconst DYNAMODB_PAY_PER_REQUEST_BILLING_MODE = \"PAY_PER_REQUEST\"\n\n\/\/ Create an authenticated client for DynamoDB\nfunc CreateDynamoDbClient(config *aws_helper.AwsSessionConfig, terragruntOptions *options.TerragruntOptions) (*dynamodb.DynamoDB, error) {\n\tsession, err := aws_helper.CreateAwsSession(config, terragruntOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn dynamodb.New(session), nil\n}\n\n\/\/ Create the lock table in DynamoDB if it doesn't already exist\nfunc CreateLockTableIfNecessary(tableName string, tags map[string]string, client *dynamodb.DynamoDB, terragruntOptions *options.TerragruntOptions) error {\n\ttableExists, err := LockTableExistsAndIsActive(tableName, client)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !tableExists {\n\t\tterragruntOptions.Logger.Printf(\"Lock table %s does not exist in DynamoDB. Will need to create it just this first time.\", tableName)\n\t\treturn CreateLockTable(tableName, tags, client, terragruntOptions)\n\t}\n\n\treturn nil\n}\n\n\/\/ Return true if the lock table exists in DynamoDB and is in \"active\" state\nfunc LockTableExistsAndIsActive(tableName string, client *dynamodb.DynamoDB) (bool, error) {\n\toutput, err := client.DescribeTable(&dynamodb.DescribeTableInput{TableName: aws.String(tableName)})\n\tif err != nil {\n\t\tif awsErr, isAwsErr := err.(awserr.Error); isAwsErr && awsErr.Code() == \"ResourceNotFoundException\" {\n\t\t\treturn false, nil\n\t\t} else {\n\t\t\treturn false, errors.WithStackTrace(err)\n\t\t}\n\t}\n\n\treturn *output.Table.TableStatus == dynamodb.TableStatusActive, nil\n}\n\n\/\/ Return true if the lock table's SSEncryption is turned on\nfunc LockTableCheckSSEncryptionIsOn(tableName string, client *dynamodb.DynamoDB) (bool, error) {\n\toutput, err := client.DescribeTable(&dynamodb.DescribeTableInput{TableName: aws.String(tableName)})\n\tif err != nil {\n\t\treturn false, errors.WithStackTrace(err)\n\t}\n\n\treturn output.Table.SSEDescription != nil && aws.StringValue(output.Table.SSEDescription.Status) == dynamodb.SSEStatusEnabled, nil\n}\n\n\/\/ Create a lock table in DynamoDB and wait until it is in \"active\" state. If the table already exists, merely wait\n\/\/ until it is in \"active\" state.\nfunc CreateLockTable(tableName string, tags map[string]string, client *dynamodb.DynamoDB, terragruntOptions *options.TerragruntOptions) error {\n\ttableCreateDeleteSemaphore.Acquire()\n\tdefer tableCreateDeleteSemaphore.Release()\n\n\tterragruntOptions.Logger.Printf(\"Creating table %s in DynamoDB\", tableName)\n\n\tcreateTableOutput, err := client.CreateTable(&dynamodb.CreateTableInput{\n\t\tTableName: aws.String(tableName),\n\t\tBillingMode: aws.String(DYNAMODB_PAY_PER_REQUEST_BILLING_MODE),\n\t\tAttributeDefinitions: []*dynamodb.AttributeDefinition{\n\t\t\t{\n\t\t\t\tAttributeName: aws.String(ATTR_LOCK_ID),\n\t\t\t\tAttributeType: aws.String(dynamodb.ScalarAttributeTypeS),\n\t\t\t},\n\t\t},\n\t\tKeySchema: []*dynamodb.KeySchemaElement{\n\t\t\t{\n\t\t\t\tAttributeName: aws.String(ATTR_LOCK_ID),\n\t\t\t\tKeyType: aws.String(dynamodb.KeyTypeHash),\n\t\t\t},\n\t\t},\n\t})\n\n\tif err != nil {\n\t\tif isTableAlreadyBeingCreatedOrUpdatedError(err) {\n\t\t\tterragruntOptions.Logger.Printf(\"Looks like someone created table %s at the same time. Will wait for it to be in active state.\", tableName)\n\t\t} else {\n\t\t\treturn errors.WithStackTrace(err)\n\t\t}\n\t}\n\n\terr = waitForTableToBeActive(tableName, client, MAX_RETRIES_WAITING_FOR_TABLE_TO_BE_ACTIVE, SLEEP_BETWEEN_TABLE_STATUS_CHECKS, terragruntOptions)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif createTableOutput != nil && createTableOutput.TableDescription != nil && createTableOutput.TableDescription.TableArn != nil {\n\t\t\/\/ Do not tag in case somebody else had created the table\n\n\t\terr = tagTableIfTagsGiven(tags, createTableOutput.TableDescription.TableArn, client, terragruntOptions)\n\n\t\tif err != nil {\n\t\t\treturn errors.WithStackTrace(err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc tagTableIfTagsGiven(tags map[string]string, tableArn *string, client *dynamodb.DynamoDB, terragruntOptions *options.TerragruntOptions) error {\n\n\tif tags == nil || len(tags) == 0 {\n\t\tterragruntOptions.Logger.Printf(\"No tags for lock table given.\")\n\t\treturn nil\n\t}\n\n\t\/\/ we were able to create the table successfully, now add tags\n\tterragruntOptions.Logger.Printf(\"Adding tags to lock table: %s\", tags)\n\n\tvar tagsConverted []*dynamodb.Tag\n\n\tfor k, v := range tags {\n\t\ttagsConverted = append(tagsConverted, &dynamodb.Tag{Key: aws.String(k), Value: aws.String(v)})\n\t}\n\n\tvar input = dynamodb.TagResourceInput{\n\t\tResourceArn: tableArn,\n\t\tTags: tagsConverted}\n\n\t_, err := client.TagResource(&input)\n\n\treturn err\n}\n\n\/\/ Delete the given table in DynamoDB\nfunc DeleteTable(tableName string, client *dynamodb.DynamoDB) error {\n\ttableCreateDeleteSemaphore.Acquire()\n\tdefer tableCreateDeleteSemaphore.Release()\n\n\t_, err := client.DeleteTable(&dynamodb.DeleteTableInput{TableName: aws.String(tableName)})\n\treturn err\n}\n\n\/\/ Return true if the given error is the error message returned by AWS when the resource already exists and is being\n\/\/ updated by someone else\nfunc isTableAlreadyBeingCreatedOrUpdatedError(err error) bool {\n\tawsErr, isAwsErr := err.(awserr.Error)\n\treturn isAwsErr && awsErr.Code() == \"ResourceInUseException\"\n}\n\n\/\/ Wait for the given DynamoDB table to be in the \"active\" state. If it's not in \"active\" state, sleep for the\n\/\/ specified amount of time, and try again, up to a maximum of maxRetries retries.\nfunc waitForTableToBeActive(tableName string, client *dynamodb.DynamoDB, maxRetries int, sleepBetweenRetries time.Duration, terragruntOptions *options.TerragruntOptions) error {\n\treturn waitForTableToBeActiveWithRandomSleep(tableName, client, maxRetries, sleepBetweenRetries, sleepBetweenRetries, terragruntOptions)\n}\n\n\/\/ Waits for the given table as described above, but sleeps a random amount of time greater than sleepBetweenRetriesMin\n\/\/ and less than sleepBetweenRetriesMax between tries. This is to avoid an AWS issue where all waiting requests fire at\n\/\/ the same time, which continually triggered AWS's \"subscriber limit exceeded\" API error.\nfunc waitForTableToBeActiveWithRandomSleep(tableName string, client *dynamodb.DynamoDB, maxRetries int, sleepBetweenRetriesMin time.Duration, sleepBetweenRetriesMax time.Duration, terragruntOptions *options.TerragruntOptions) error {\n\tfor i := 0; i < maxRetries; i++ {\n\t\ttableReady, err := LockTableExistsAndIsActive(tableName, client)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif tableReady {\n\t\t\tterragruntOptions.Logger.Printf(\"Success! Table %s is now in active state.\", tableName)\n\t\t\treturn nil\n\t\t}\n\n\t\tsleepBetweenRetries := util.GetRandomTime(sleepBetweenRetriesMin, sleepBetweenRetriesMax)\n\t\tterragruntOptions.Logger.Printf(\"Table %s is not yet in active state. Will check again after %s.\", tableName, sleepBetweenRetries)\n\t\ttime.Sleep(sleepBetweenRetries)\n\t}\n\n\treturn errors.WithStackTrace(TableActiveRetriesExceeded{TableName: tableName, Retries: maxRetries})\n}\n\n\/\/ Encrypt the TFState Lock table - If Necessary\nfunc UpdateLockTableSetSSEncryptionOnIfNecessary(tableName string, client *dynamodb.DynamoDB, terragruntOptions *options.TerragruntOptions) error {\n\ttableSSEncrypted, err := LockTableCheckSSEncryptionIsOn(tableName, client)\n\tif err != nil {\n\t\treturn errors.WithStackTrace(err)\n\t}\n\n\tif tableSSEncrypted {\n\t\tterragruntOptions.Logger.Printf(\"Table %s already has encryption enabled\", tableName)\n\t\treturn nil\n\t}\n\n\ttableCreateDeleteSemaphore.Acquire()\n\tdefer tableCreateDeleteSemaphore.Release()\n\n\tterragruntOptions.Logger.Printf(\"Enabling server-side encryption on table %s in AWS DynamoDB\", tableName)\n\n\tinput := &dynamodb.UpdateTableInput{\n\t\tSSESpecification: &dynamodb.SSESpecification{\n\t\t\tEnabled: aws.Bool(true),\n\t\t\tSSEType: aws.String(\"KMS\"),\n\t\t},\n\t\tTableName: aws.String(tableName),\n\t}\n\n\tif _, err := client.UpdateTable(input); err != nil {\n\t\tif isTableAlreadyBeingCreatedOrUpdatedError(err) {\n\t\t\tterragruntOptions.Logger.Printf(\"Looks like someone is already updating table %s at the same time. Will wait for that update to complete.\", tableName)\n\t\t} else {\n\t\t\treturn errors.WithStackTrace(err)\n\t\t}\n\t}\n\n\tif err := waitForEncryptionToBeEnabled(tableName, client, terragruntOptions); err != nil {\n\t\treturn errors.WithStackTrace(err)\n\t}\n\n\treturn waitForTableToBeActive(tableName, client, MAX_RETRIES_WAITING_FOR_TABLE_TO_BE_ACTIVE, SLEEP_BETWEEN_TABLE_STATUS_CHECKS, terragruntOptions)\n}\n\n\/\/ Wait until encryption is enabled for the given table\nfunc waitForEncryptionToBeEnabled(tableName string, client *dynamodb.DynamoDB, terragruntOptions *options.TerragruntOptions) error {\n\tmaxRetries := 15\n\tsleepBetweenRetries := 20 * time.Second\n\n\tterragruntOptions.Logger.Printf(\"Waiting for encryption to be enabled on table %s\", tableName)\n\n\tfor i := 0; i < maxRetries; i++ {\n\t\ttableSSEncrypted, err := LockTableCheckSSEncryptionIsOn(tableName, client)\n\t\tif err != nil {\n\t\t\treturn errors.WithStackTrace(err)\n\t\t}\n\n\t\tif tableSSEncrypted {\n\t\t\tterragruntOptions.Logger.Printf(\"Encryption is now enabled for table %s!\", tableName)\n\t\t\treturn nil\n\t\t}\n\n\t\tterragruntOptions.Logger.Printf(\"Encryption is still not enabled for table %s. Will sleep for %v and try again.\", tableName, sleepBetweenRetries)\n\t\ttime.Sleep(sleepBetweenRetries)\n\t}\n\n\treturn errors.WithStackTrace(TableEncryptedRetriesExceeded{TableName: tableName, Retries: maxRetries})\n}\n\n\/\/ Custom error types\n\ntype TableActiveRetriesExceeded struct {\n\tTableName string\n\tRetries int\n}\n\nfunc (err TableActiveRetriesExceeded) Error() string {\n\treturn fmt.Sprintf(\"Table %s is still not in active state after %d retries.\", err.TableName, err.Retries)\n}\n\ntype TableDoesNotExist struct {\n\tTableName string\n\tUnderlying error\n}\n\nfunc (err TableDoesNotExist) Error() string {\n\treturn fmt.Sprintf(\"Table %s does not exist in DynamoDB! Original error from AWS: %v\", err.TableName, err.Underlying)\n}\n\ntype TableEncryptedRetriesExceeded struct {\n\tTableName string\n\tRetries int\n}\n\nfunc (err TableEncryptedRetriesExceeded) Error() string {\n\treturn fmt.Sprintf(\"Table %s still does not have encryption enabled after %d retries.\", err.TableName, err.Retries)\n}\n<commit_msg>Updates CreateTable to instantiate `CreateTableInput` vars separately<commit_after>package dynamodb\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/dynamodb\"\n\t\"github.com\/gruntwork-io\/terragrunt\/aws_helper\"\n\t\"github.com\/gruntwork-io\/terragrunt\/errors\"\n\t\"github.com\/gruntwork-io\/terragrunt\/options\"\n\t\"github.com\/gruntwork-io\/terragrunt\/util\"\n)\n\n\/\/ DynamoDB only allows 10 table creates\/deletes simultaneously. To ensure we don't hit this error, especially when\n\/\/ running many automated tests in parallel, we use a counting semaphore\nvar tableCreateDeleteSemaphore = NewCountingSemaphore(10)\n\n\/\/ Terraform requires the DynamoDB table to have a primary key with this name\nconst ATTR_LOCK_ID = \"LockID\"\n\n\/\/ Default is to retry for up to 5 minutes\nconst MAX_RETRIES_WAITING_FOR_TABLE_TO_BE_ACTIVE = 30\nconst SLEEP_BETWEEN_TABLE_STATUS_CHECKS = 10 * time.Second\n\nconst DYNAMODB_PAY_PER_REQUEST_BILLING_MODE = \"PAY_PER_REQUEST\"\n\n\/\/ Create an authenticated client for DynamoDB\nfunc CreateDynamoDbClient(config *aws_helper.AwsSessionConfig, terragruntOptions *options.TerragruntOptions) (*dynamodb.DynamoDB, error) {\n\tsession, err := aws_helper.CreateAwsSession(config, terragruntOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn dynamodb.New(session), nil\n}\n\n\/\/ Create the lock table in DynamoDB if it doesn't already exist\nfunc CreateLockTableIfNecessary(tableName string, tags map[string]string, client *dynamodb.DynamoDB, terragruntOptions *options.TerragruntOptions) error {\n\ttableExists, err := LockTableExistsAndIsActive(tableName, client)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !tableExists {\n\t\tterragruntOptions.Logger.Printf(\"Lock table %s does not exist in DynamoDB. Will need to create it just this first time.\", tableName)\n\t\treturn CreateLockTable(tableName, tags, client, terragruntOptions)\n\t}\n\n\treturn nil\n}\n\n\/\/ Return true if the lock table exists in DynamoDB and is in \"active\" state\nfunc LockTableExistsAndIsActive(tableName string, client *dynamodb.DynamoDB) (bool, error) {\n\toutput, err := client.DescribeTable(&dynamodb.DescribeTableInput{TableName: aws.String(tableName)})\n\tif err != nil {\n\t\tif awsErr, isAwsErr := err.(awserr.Error); isAwsErr && awsErr.Code() == \"ResourceNotFoundException\" {\n\t\t\treturn false, nil\n\t\t} else {\n\t\t\treturn false, errors.WithStackTrace(err)\n\t\t}\n\t}\n\n\treturn *output.Table.TableStatus == dynamodb.TableStatusActive, nil\n}\n\n\/\/ Return true if the lock table's SSEncryption is turned on\nfunc LockTableCheckSSEncryptionIsOn(tableName string, client *dynamodb.DynamoDB) (bool, error) {\n\toutput, err := client.DescribeTable(&dynamodb.DescribeTableInput{TableName: aws.String(tableName)})\n\tif err != nil {\n\t\treturn false, errors.WithStackTrace(err)\n\t}\n\n\treturn output.Table.SSEDescription != nil && aws.StringValue(output.Table.SSEDescription.Status) == dynamodb.SSEStatusEnabled, nil\n}\n\n\/\/ Create a lock table in DynamoDB and wait until it is in \"active\" state. If the table already exists, merely wait\n\/\/ until it is in \"active\" state.\nfunc CreateLockTable(tableName string, tags map[string]string, client *dynamodb.DynamoDB, terragruntOptions *options.TerragruntOptions) error {\n\ttableCreateDeleteSemaphore.Acquire()\n\tdefer tableCreateDeleteSemaphore.Release()\n\n\tterragruntOptions.Logger.Printf(\"Creating table %s in DynamoDB\", tableName)\n\n\tattributeDefinitions := []*dynamodb.AttributeDefinition{\n\t\t{AttributeName: aws.String(ATTR_LOCK_ID), AttributeType: aws.String(dynamodb.ScalarAttributeTypeS)},\n\t}\n\n\tkeySchema := []*dynamodb.KeySchemaElement{\n\t\t{AttributeName: aws.String(ATTR_LOCK_ID), KeyType: aws.String(dynamodb.KeyTypeHash)},\n\t}\n\n\tcreateTableOutput, err := client.CreateTable(&dynamodb.CreateTableInput{\n\t\tTableName: aws.String(tableName),\n\t\tBillingMode: aws.String(DYNAMODB_PAY_PER_REQUEST_BILLING_MODE),\n\t\tAttributeDefinitions: attributeDefinitions,\n\t\tKeySchema: keySchema,\n\t})\n\n\tif err != nil {\n\t\tif isTableAlreadyBeingCreatedOrUpdatedError(err) {\n\t\t\tterragruntOptions.Logger.Printf(\"Looks like someone created table %s at the same time. Will wait for it to be in active state.\", tableName)\n\t\t} else {\n\t\t\treturn errors.WithStackTrace(err)\n\t\t}\n\t}\n\n\terr = waitForTableToBeActive(tableName, client, MAX_RETRIES_WAITING_FOR_TABLE_TO_BE_ACTIVE, SLEEP_BETWEEN_TABLE_STATUS_CHECKS, terragruntOptions)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif createTableOutput != nil && createTableOutput.TableDescription != nil && createTableOutput.TableDescription.TableArn != nil {\n\t\t\/\/ Do not tag in case somebody else had created the table\n\n\t\terr = tagTableIfTagsGiven(tags, createTableOutput.TableDescription.TableArn, client, terragruntOptions)\n\n\t\tif err != nil {\n\t\t\treturn errors.WithStackTrace(err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc tagTableIfTagsGiven(tags map[string]string, tableArn *string, client *dynamodb.DynamoDB, terragruntOptions *options.TerragruntOptions) error {\n\n\tif tags == nil || len(tags) == 0 {\n\t\tterragruntOptions.Logger.Printf(\"No tags for lock table given.\")\n\t\treturn nil\n\t}\n\n\t\/\/ we were able to create the table successfully, now add tags\n\tterragruntOptions.Logger.Printf(\"Adding tags to lock table: %s\", tags)\n\n\tvar tagsConverted []*dynamodb.Tag\n\n\tfor k, v := range tags {\n\t\ttagsConverted = append(tagsConverted, &dynamodb.Tag{Key: aws.String(k), Value: aws.String(v)})\n\t}\n\n\tvar input = dynamodb.TagResourceInput{\n\t\tResourceArn: tableArn,\n\t\tTags: tagsConverted}\n\n\t_, err := client.TagResource(&input)\n\n\treturn err\n}\n\n\/\/ Delete the given table in DynamoDB\nfunc DeleteTable(tableName string, client *dynamodb.DynamoDB) error {\n\ttableCreateDeleteSemaphore.Acquire()\n\tdefer tableCreateDeleteSemaphore.Release()\n\n\t_, err := client.DeleteTable(&dynamodb.DeleteTableInput{TableName: aws.String(tableName)})\n\treturn err\n}\n\n\/\/ Return true if the given error is the error message returned by AWS when the resource already exists and is being\n\/\/ updated by someone else\nfunc isTableAlreadyBeingCreatedOrUpdatedError(err error) bool {\n\tawsErr, isAwsErr := err.(awserr.Error)\n\treturn isAwsErr && awsErr.Code() == \"ResourceInUseException\"\n}\n\n\/\/ Wait for the given DynamoDB table to be in the \"active\" state. If it's not in \"active\" state, sleep for the\n\/\/ specified amount of time, and try again, up to a maximum of maxRetries retries.\nfunc waitForTableToBeActive(tableName string, client *dynamodb.DynamoDB, maxRetries int, sleepBetweenRetries time.Duration, terragruntOptions *options.TerragruntOptions) error {\n\treturn waitForTableToBeActiveWithRandomSleep(tableName, client, maxRetries, sleepBetweenRetries, sleepBetweenRetries, terragruntOptions)\n}\n\n\/\/ Waits for the given table as described above, but sleeps a random amount of time greater than sleepBetweenRetriesMin\n\/\/ and less than sleepBetweenRetriesMax between tries. This is to avoid an AWS issue where all waiting requests fire at\n\/\/ the same time, which continually triggered AWS's \"subscriber limit exceeded\" API error.\nfunc waitForTableToBeActiveWithRandomSleep(tableName string, client *dynamodb.DynamoDB, maxRetries int, sleepBetweenRetriesMin time.Duration, sleepBetweenRetriesMax time.Duration, terragruntOptions *options.TerragruntOptions) error {\n\tfor i := 0; i < maxRetries; i++ {\n\t\ttableReady, err := LockTableExistsAndIsActive(tableName, client)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif tableReady {\n\t\t\tterragruntOptions.Logger.Printf(\"Success! Table %s is now in active state.\", tableName)\n\t\t\treturn nil\n\t\t}\n\n\t\tsleepBetweenRetries := util.GetRandomTime(sleepBetweenRetriesMin, sleepBetweenRetriesMax)\n\t\tterragruntOptions.Logger.Printf(\"Table %s is not yet in active state. Will check again after %s.\", tableName, sleepBetweenRetries)\n\t\ttime.Sleep(sleepBetweenRetries)\n\t}\n\n\treturn errors.WithStackTrace(TableActiveRetriesExceeded{TableName: tableName, Retries: maxRetries})\n}\n\n\/\/ Encrypt the TFState Lock table - If Necessary\nfunc UpdateLockTableSetSSEncryptionOnIfNecessary(tableName string, client *dynamodb.DynamoDB, terragruntOptions *options.TerragruntOptions) error {\n\ttableSSEncrypted, err := LockTableCheckSSEncryptionIsOn(tableName, client)\n\tif err != nil {\n\t\treturn errors.WithStackTrace(err)\n\t}\n\n\tif tableSSEncrypted {\n\t\tterragruntOptions.Logger.Printf(\"Table %s already has encryption enabled\", tableName)\n\t\treturn nil\n\t}\n\n\ttableCreateDeleteSemaphore.Acquire()\n\tdefer tableCreateDeleteSemaphore.Release()\n\n\tterragruntOptions.Logger.Printf(\"Enabling server-side encryption on table %s in AWS DynamoDB\", tableName)\n\n\tinput := &dynamodb.UpdateTableInput{\n\t\tSSESpecification: &dynamodb.SSESpecification{\n\t\t\tEnabled: aws.Bool(true),\n\t\t\tSSEType: aws.String(\"KMS\"),\n\t\t},\n\t\tTableName: aws.String(tableName),\n\t}\n\n\tif _, err := client.UpdateTable(input); err != nil {\n\t\tif isTableAlreadyBeingCreatedOrUpdatedError(err) {\n\t\t\tterragruntOptions.Logger.Printf(\"Looks like someone is already updating table %s at the same time. Will wait for that update to complete.\", tableName)\n\t\t} else {\n\t\t\treturn errors.WithStackTrace(err)\n\t\t}\n\t}\n\n\tif err := waitForEncryptionToBeEnabled(tableName, client, terragruntOptions); err != nil {\n\t\treturn errors.WithStackTrace(err)\n\t}\n\n\treturn waitForTableToBeActive(tableName, client, MAX_RETRIES_WAITING_FOR_TABLE_TO_BE_ACTIVE, SLEEP_BETWEEN_TABLE_STATUS_CHECKS, terragruntOptions)\n}\n\n\/\/ Wait until encryption is enabled for the given table\nfunc waitForEncryptionToBeEnabled(tableName string, client *dynamodb.DynamoDB, terragruntOptions *options.TerragruntOptions) error {\n\tmaxRetries := 15\n\tsleepBetweenRetries := 20 * time.Second\n\n\tterragruntOptions.Logger.Printf(\"Waiting for encryption to be enabled on table %s\", tableName)\n\n\tfor i := 0; i < maxRetries; i++ {\n\t\ttableSSEncrypted, err := LockTableCheckSSEncryptionIsOn(tableName, client)\n\t\tif err != nil {\n\t\t\treturn errors.WithStackTrace(err)\n\t\t}\n\n\t\tif tableSSEncrypted {\n\t\t\tterragruntOptions.Logger.Printf(\"Encryption is now enabled for table %s!\", tableName)\n\t\t\treturn nil\n\t\t}\n\n\t\tterragruntOptions.Logger.Printf(\"Encryption is still not enabled for table %s. Will sleep for %v and try again.\", tableName, sleepBetweenRetries)\n\t\ttime.Sleep(sleepBetweenRetries)\n\t}\n\n\treturn errors.WithStackTrace(TableEncryptedRetriesExceeded{TableName: tableName, Retries: maxRetries})\n}\n\n\/\/ Custom error types\n\ntype TableActiveRetriesExceeded struct {\n\tTableName string\n\tRetries int\n}\n\nfunc (err TableActiveRetriesExceeded) Error() string {\n\treturn fmt.Sprintf(\"Table %s is still not in active state after %d retries.\", err.TableName, err.Retries)\n}\n\ntype TableDoesNotExist struct {\n\tTableName string\n\tUnderlying error\n}\n\nfunc (err TableDoesNotExist) Error() string {\n\treturn fmt.Sprintf(\"Table %s does not exist in DynamoDB! Original error from AWS: %v\", err.TableName, err.Underlying)\n}\n\ntype TableEncryptedRetriesExceeded struct {\n\tTableName string\n\tRetries int\n}\n\nfunc (err TableEncryptedRetriesExceeded) Error() string {\n\treturn fmt.Sprintf(\"Table %s still does not have encryption enabled after %d retries.\", err.TableName, err.Retries)\n}\n<|endoftext|>"} {"text":"<commit_before>package indexer\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"github.com\/couchbase\/indexing\/secondary\/collatejson\"\n\t\"github.com\/couchbase\/indexing\/secondary\/common\"\n)\n\nvar (\n\tErrSecKeyNil = errors.New(\"Secondary key array is empty\")\n)\n\n\/\/ Special index keys\nvar (\n\tMinIndexKey = &NilIndexKey{cmp: -1, pcmp: -1}\n\tMaxIndexKey = &NilIndexKey{cmp: 1, pcmp: 1}\n\tNilJsonKey = []byte(\"[]\")\n)\n\nvar (\n\tjsonEncoder *collatejson.Codec\n\tencBufPool *common.BytesBufPool\n)\n\nconst (\n\tmaxIndexEntrySize = MAX_SEC_KEY_BUFFER_LEN + MAX_DOCID_LEN + 2\n)\n\nfunc init() {\n\tjsonEncoder = collatejson.NewCodec(16)\n\tencBufPool = common.NewByteBufferPool(maxIndexEntrySize)\n}\n\n\/\/ Generic index entry abstraction (primary or secondary)\n\/\/ Represents a row in the index\ntype IndexEntry interface {\n\tReadDocId([]byte) ([]byte, error)\n\tReadSecKey([]byte) ([]byte, error)\n\n\tBytes() []byte\n\tString() string\n}\n\n\/\/ Generic index key abstraction (primary or secondary)\n\/\/ Represents a key supplied by the user for scan operation\ntype IndexKey interface {\n\tCompare(IndexEntry) int\n\tComparePrefixFields(IndexEntry) int\n\tBytes() []byte\n\tString() string\n}\n\n\/\/ Storage encoding for primary index entry\n\/\/ Raw docid bytes are stored as the key\ntype primaryIndexEntry []byte\n\nfunc NewPrimaryIndexEntry(docid []byte) (*primaryIndexEntry, error) {\n\tbuf := append([]byte(nil), docid...)\n\te := primaryIndexEntry(buf)\n\treturn &e, nil\n}\n\nfunc BytesToPrimaryIndexEntry(b []byte) (*primaryIndexEntry, error) {\n\te := primaryIndexEntry(b)\n\treturn &e, nil\n}\n\nfunc (e *primaryIndexEntry) ReadDocId(buf []byte) ([]byte, error) {\n\tbuf = append(buf, []byte(*e)...)\n\treturn buf, nil\n}\n\nfunc (e *primaryIndexEntry) ReadSecKey(buf []byte) ([]byte, error) {\n\treturn buf, nil\n}\n\nfunc (e *primaryIndexEntry) Bytes() []byte {\n\treturn []byte(*e)\n}\n\nfunc (e *primaryIndexEntry) String() string {\n\treturn string(*e)\n}\n\n\/\/ Storage encoding for secondary index entry\n\/\/ Format:\n\/\/ [collate_json_encoded_sec_key][raw_docid_bytes][len_of_docid_2_bytes]\ntype secondaryIndexEntry []byte\n\nfunc NewSecondaryIndexEntry(key []byte, docid []byte) (*secondaryIndexEntry, error) {\n\tvar err error\n\tvar buf []byte\n\n\tif isNilJsonKey(key) {\n\t\treturn nil, ErrSecKeyNil\n\t}\n\n\tpoolBuf := encBufPool.Get()\n\tdefer encBufPool.Put(poolBuf)\n\tif buf, err = jsonEncoder.Encode(key, (*poolBuf)[:0]); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf = append(buf, docid...)\n\tbuf = buf[:len(buf)+2]\n\toffset := len(buf) - 2\n\tbinary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(len(docid)))\n\n\tbuf = append([]byte(nil), buf[:len(buf)]...)\n\te := secondaryIndexEntry(buf)\n\treturn &e, nil\n}\n\nfunc BytesToSecondaryIndexEntry(b []byte) (*secondaryIndexEntry, error) {\n\te := secondaryIndexEntry(b)\n\treturn &e, nil\n}\n\nfunc (e *secondaryIndexEntry) lenDocId() int {\n\trbuf := []byte(*e)\n\toffset := len(rbuf) - 2\n\tl := binary.LittleEndian.Uint16(rbuf[offset : offset+2])\n\treturn int(l)\n}\n\nfunc (e secondaryIndexEntry) ReadDocId(buf []byte) ([]byte, error) {\n\tdoclen := e.lenDocId()\n\toffset := len(e) - doclen - 2\n\tbuf = append(buf, e[offset:offset+doclen]...)\n\n\treturn buf, nil\n}\n\nfunc (e secondaryIndexEntry) ReadSecKey(buf []byte) ([]byte, error) {\n\tvar err error\n\tdoclen := e.lenDocId()\n\n\tencoded := e[0 : len(e)-doclen-2]\n\tif buf, err = jsonEncoder.Decode(encoded, buf); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf, nil\n}\n\nfunc (e *secondaryIndexEntry) Bytes() []byte {\n\treturn []byte(*e)\n}\n\nfunc (e *secondaryIndexEntry) String() string {\n\tbuf := make([]byte, MAX_SEC_KEY_LEN*4)\n\tbuf, _ = e.ReadSecKey(buf)\n\tbuf = append(buf, ':')\n\tbuf, _ = e.ReadDocId(buf)\n\n\treturn string(buf)\n}\n\ntype NilIndexKey struct {\n\tcmp int\n\tpcmp int\n}\n\nfunc (k *NilIndexKey) Compare(entry IndexEntry) int {\n\treturn k.cmp\n}\n\nfunc (k *NilIndexKey) ComparePrefixFields(entry IndexEntry) int {\n\treturn k.pcmp\n}\n\nfunc (k *NilIndexKey) Bytes() []byte {\n\treturn nil\n}\n\nfunc (k *NilIndexKey) String() string {\n\treturn \"nil\"\n}\n\ntype primaryKey []byte\n\nfunc NewPrimaryKey(docid []byte) (IndexKey, error) {\n\tif len(docid) == 0 {\n\t\treturn &NilIndexKey{}, nil\n\t}\n\tk := primaryKey(docid)\n\treturn &k, nil\n}\n\nfunc (k *primaryKey) Compare(entry IndexEntry) int {\n\treturn bytes.Compare(*k, entry.Bytes())\n}\n\nfunc (k *primaryKey) ComparePrefixFields(entry IndexEntry) int {\n\tkbytes := []byte(*k)\n\tklen := len(kbytes)\n\n\tif klen > len(entry.Bytes()) {\n\t\treturn 1\n\t}\n\n\treturn bytes.Compare(*k, entry.Bytes()[:klen])\n}\n\nfunc (k *primaryKey) Bytes() []byte {\n\treturn *k\n}\n\nfunc (k *primaryKey) String() string {\n\treturn string(*k)\n}\n\ntype secondaryKey []byte\n\nfunc NewSecondaryKey(key []byte) (IndexKey, error) {\n\tif isNilJsonKey(key) {\n\t\treturn &NilIndexKey{}, nil\n\t}\n\n\tvar err error\n\tbuf := make([]byte, 0, MAX_SEC_KEY_LEN)\n\tif buf, err = jsonEncoder.Encode(key, buf); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf = append([]byte(nil), buf[:len(buf)]...)\n\n\tk := secondaryKey(buf)\n\treturn &k, nil\n}\n\nfunc (k *secondaryKey) Compare(entry IndexEntry) int {\n\tkbytes := []byte(*k)\n\tklen := len(kbytes)\n\n\tif klen > len(entry.Bytes()) {\n\t\treturn 1\n\t}\n\n\treturn bytes.Compare(kbytes[:klen], entry.Bytes()[:klen])\n}\n\n\/\/ A compound secondary index entry would be an array\n\/\/ of indexed fields. A compound index on N fields\n\/\/ also should be equivalent to other indexes with 1 to N\n\/\/ prefix fields.\n\/\/ When a user supplies a low or high key constraints,\n\/\/ indexing systems should check the number of fields\n\/\/ in the user supplied key and treat target index as\n\/\/ an index created on that n prefix fields.\n\/\/\n\/\/ Collatejson encoding puts a terminator character at the\n\/\/ end of encoded bytes to represent termination of json array.\n\/\/ Since we want to match partial secondary keys against fullset\n\/\/ compound index entries, we would remove the last byte from the\n\/\/ encoded user supplied secondary key to match prefixes.\nfunc (k *secondaryKey) ComparePrefixFields(entry IndexEntry) int {\n\tkbytes := []byte(*k)\n\tklen := len(kbytes)\n\tif klen > len(entry.Bytes()) {\n\t\treturn 1\n\t}\n\treturn bytes.Compare(kbytes[:klen-1], entry.Bytes()[:klen-1])\n}\n\nfunc (k *secondaryKey) Bytes() []byte {\n\treturn *k\n}\n\nfunc (k *secondaryKey) String() string {\n\tbuf := make([]byte, 0, MAX_SEC_KEY_LEN)\n\tbuf, _ = jsonEncoder.Decode(*k, buf)\n\treturn string(buf)\n}\n\nfunc isNilJsonKey(k []byte) bool {\n\treturn bytes.Equal(NilJsonKey, k) || len(k) == 0\n}\n<commit_msg>MB-15090 index_entry: Fix secondary key encode buf len<commit_after>package indexer\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/couchbase\/indexing\/secondary\/collatejson\"\n\t\"github.com\/couchbase\/indexing\/secondary\/common\"\n)\n\nvar (\n\tErrSecKeyNil = errors.New(\"Secondary key array is empty\")\n\tErrSecKeyTooLong = errors.New(fmt.Sprintf(\"Secondary key is too long (> %d)\", MAX_SEC_KEY_LEN))\n)\n\n\/\/ Special index keys\nvar (\n\tMinIndexKey = &NilIndexKey{cmp: -1, pcmp: -1}\n\tMaxIndexKey = &NilIndexKey{cmp: 1, pcmp: 1}\n\tNilJsonKey = []byte(\"[]\")\n)\n\nvar (\n\tjsonEncoder *collatejson.Codec\n\tencBufPool *common.BytesBufPool\n)\n\nconst (\n\tmaxIndexEntrySize = MAX_SEC_KEY_BUFFER_LEN + MAX_DOCID_LEN + 2\n)\n\nfunc init() {\n\tjsonEncoder = collatejson.NewCodec(16)\n\tencBufPool = common.NewByteBufferPool(maxIndexEntrySize)\n}\n\n\/\/ Generic index entry abstraction (primary or secondary)\n\/\/ Represents a row in the index\ntype IndexEntry interface {\n\tReadDocId([]byte) ([]byte, error)\n\tReadSecKey([]byte) ([]byte, error)\n\n\tBytes() []byte\n\tString() string\n}\n\n\/\/ Generic index key abstraction (primary or secondary)\n\/\/ Represents a key supplied by the user for scan operation\ntype IndexKey interface {\n\tCompare(IndexEntry) int\n\tComparePrefixFields(IndexEntry) int\n\tBytes() []byte\n\tString() string\n}\n\n\/\/ Storage encoding for primary index entry\n\/\/ Raw docid bytes are stored as the key\ntype primaryIndexEntry []byte\n\nfunc NewPrimaryIndexEntry(docid []byte) (*primaryIndexEntry, error) {\n\tbuf := append([]byte(nil), docid...)\n\te := primaryIndexEntry(buf)\n\treturn &e, nil\n}\n\nfunc BytesToPrimaryIndexEntry(b []byte) (*primaryIndexEntry, error) {\n\te := primaryIndexEntry(b)\n\treturn &e, nil\n}\n\nfunc (e *primaryIndexEntry) ReadDocId(buf []byte) ([]byte, error) {\n\tbuf = append(buf, []byte(*e)...)\n\treturn buf, nil\n}\n\nfunc (e *primaryIndexEntry) ReadSecKey(buf []byte) ([]byte, error) {\n\treturn buf, nil\n}\n\nfunc (e *primaryIndexEntry) Bytes() []byte {\n\treturn []byte(*e)\n}\n\nfunc (e *primaryIndexEntry) String() string {\n\treturn string(*e)\n}\n\n\/\/ Storage encoding for secondary index entry\n\/\/ Format:\n\/\/ [collate_json_encoded_sec_key][raw_docid_bytes][len_of_docid_2_bytes]\ntype secondaryIndexEntry []byte\n\nfunc NewSecondaryIndexEntry(key []byte, docid []byte) (*secondaryIndexEntry, error) {\n\tvar err error\n\tvar buf []byte\n\n\tif isNilJsonKey(key) {\n\t\treturn nil, ErrSecKeyNil\n\t}\n\n\tpoolBuf := encBufPool.Get()\n\tdefer encBufPool.Put(poolBuf)\n\tif buf, err = jsonEncoder.Encode(key, (*poolBuf)[:0]); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf = append(buf, docid...)\n\tbuf = buf[:len(buf)+2]\n\toffset := len(buf) - 2\n\tbinary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(len(docid)))\n\n\tbuf = append([]byte(nil), buf[:len(buf)]...)\n\te := secondaryIndexEntry(buf)\n\treturn &e, nil\n}\n\nfunc BytesToSecondaryIndexEntry(b []byte) (*secondaryIndexEntry, error) {\n\te := secondaryIndexEntry(b)\n\treturn &e, nil\n}\n\nfunc (e *secondaryIndexEntry) lenDocId() int {\n\trbuf := []byte(*e)\n\toffset := len(rbuf) - 2\n\tl := binary.LittleEndian.Uint16(rbuf[offset : offset+2])\n\treturn int(l)\n}\n\nfunc (e secondaryIndexEntry) ReadDocId(buf []byte) ([]byte, error) {\n\tdoclen := e.lenDocId()\n\toffset := len(e) - doclen - 2\n\tbuf = append(buf, e[offset:offset+doclen]...)\n\n\treturn buf, nil\n}\n\nfunc (e secondaryIndexEntry) ReadSecKey(buf []byte) ([]byte, error) {\n\tvar err error\n\tdoclen := e.lenDocId()\n\n\tencoded := e[0 : len(e)-doclen-2]\n\tif buf, err = jsonEncoder.Decode(encoded, buf); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf, nil\n}\n\nfunc (e *secondaryIndexEntry) Bytes() []byte {\n\treturn []byte(*e)\n}\n\nfunc (e *secondaryIndexEntry) String() string {\n\tbuf := make([]byte, MAX_SEC_KEY_LEN*4)\n\tbuf, _ = e.ReadSecKey(buf)\n\tbuf = append(buf, ':')\n\tbuf, _ = e.ReadDocId(buf)\n\n\treturn string(buf)\n}\n\ntype NilIndexKey struct {\n\tcmp int\n\tpcmp int\n}\n\nfunc (k *NilIndexKey) Compare(entry IndexEntry) int {\n\treturn k.cmp\n}\n\nfunc (k *NilIndexKey) ComparePrefixFields(entry IndexEntry) int {\n\treturn k.pcmp\n}\n\nfunc (k *NilIndexKey) Bytes() []byte {\n\treturn nil\n}\n\nfunc (k *NilIndexKey) String() string {\n\treturn \"nil\"\n}\n\ntype primaryKey []byte\n\nfunc NewPrimaryKey(docid []byte) (IndexKey, error) {\n\tif len(docid) == 0 {\n\t\treturn &NilIndexKey{}, nil\n\t}\n\tk := primaryKey(docid)\n\treturn &k, nil\n}\n\nfunc (k *primaryKey) Compare(entry IndexEntry) int {\n\treturn bytes.Compare(*k, entry.Bytes())\n}\n\nfunc (k *primaryKey) ComparePrefixFields(entry IndexEntry) int {\n\tkbytes := []byte(*k)\n\tklen := len(kbytes)\n\n\tif klen > len(entry.Bytes()) {\n\t\treturn 1\n\t}\n\n\treturn bytes.Compare(*k, entry.Bytes()[:klen])\n}\n\nfunc (k *primaryKey) Bytes() []byte {\n\treturn *k\n}\n\nfunc (k *primaryKey) String() string {\n\treturn string(*k)\n}\n\ntype secondaryKey []byte\n\nfunc NewSecondaryKey(key []byte) (IndexKey, error) {\n\tif len(key) > MAX_SEC_KEY_LEN {\n\t\treturn nil, ErrSecKeyTooLong\n\t}\n\n\tif isNilJsonKey(key) {\n\t\treturn &NilIndexKey{}, nil\n\t}\n\n\tvar err error\n\tbuf := make([]byte, 0, MAX_SEC_KEY_BUFFER_LEN)\n\tif buf, err = jsonEncoder.Encode(key, buf); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf = append([]byte(nil), buf[:len(buf)]...)\n\n\tk := secondaryKey(buf)\n\treturn &k, nil\n}\n\nfunc (k *secondaryKey) Compare(entry IndexEntry) int {\n\tkbytes := []byte(*k)\n\tklen := len(kbytes)\n\n\tif klen > len(entry.Bytes()) {\n\t\treturn 1\n\t}\n\n\treturn bytes.Compare(kbytes[:klen], entry.Bytes()[:klen])\n}\n\n\/\/ A compound secondary index entry would be an array\n\/\/ of indexed fields. A compound index on N fields\n\/\/ also should be equivalent to other indexes with 1 to N\n\/\/ prefix fields.\n\/\/ When a user supplies a low or high key constraints,\n\/\/ indexing systems should check the number of fields\n\/\/ in the user supplied key and treat target index as\n\/\/ an index created on that n prefix fields.\n\/\/\n\/\/ Collatejson encoding puts a terminator character at the\n\/\/ end of encoded bytes to represent termination of json array.\n\/\/ Since we want to match partial secondary keys against fullset\n\/\/ compound index entries, we would remove the last byte from the\n\/\/ encoded user supplied secondary key to match prefixes.\nfunc (k *secondaryKey) ComparePrefixFields(entry IndexEntry) int {\n\tkbytes := []byte(*k)\n\tklen := len(kbytes)\n\tif klen > len(entry.Bytes()) {\n\t\treturn 1\n\t}\n\treturn bytes.Compare(kbytes[:klen-1], entry.Bytes()[:klen-1])\n}\n\nfunc (k *secondaryKey) Bytes() []byte {\n\treturn *k\n}\n\nfunc (k *secondaryKey) String() string {\n\tbuf := make([]byte, 0, MAX_SEC_KEY_LEN)\n\tbuf, _ = jsonEncoder.Decode(*k, buf)\n\treturn string(buf)\n}\n\nfunc isNilJsonKey(k []byte) bool {\n\treturn bytes.Equal(NilJsonKey, k) || len(k) == 0\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 guylewin, guy@lewin.co.il\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage auth\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/fatedier\/frp\/models\/msg\"\n\t\"github.com\/fatedier\/frp\/utils\/util\"\n\n\t\"github.com\/vaughan0\/go-ini\"\n)\n\ntype tokenConfig struct {\n\t\/\/ Token specifies the authorization token used to create keys to be sent\n\t\/\/ to the server. The server must have a matching token for authorization\n\t\/\/ to succeed. By default, this value is \"\".\n\tToken string `json:\"token\"`\n}\n\nfunc getDefaultTokenConf() tokenConfig {\n\treturn tokenConfig{\n\t\tToken: \"\",\n\t}\n}\n\nfunc unmarshalTokenConfFromIni(conf ini.File) tokenConfig {\n\tvar (\n\t\ttmpStr string\n\t\tok bool\n\t)\n\n\tcfg := getDefaultTokenConf()\n\n\tif tmpStr, ok = conf.Get(\"common\", \"token\"); ok {\n\t\tcfg.Token = tmpStr\n\t}\n\n\treturn cfg\n}\n\ntype TokenAuthSetterVerifier struct {\n\tbaseConfig\n\n\ttoken string\n}\n\nfunc NewTokenAuth(baseCfg baseConfig, cfg tokenConfig) *TokenAuthSetterVerifier {\n\treturn &TokenAuthSetterVerifier{\n\t\tbaseConfig: baseCfg,\n\t\ttoken: cfg.Token,\n\t}\n}\n\nfunc (auth *TokenAuthSetterVerifier) SetLogin(loginMsg *msg.Login) (err error) {\n\tloginMsg.PrivilegeKey = util.GetAuthKey(auth.token, loginMsg.Timestamp)\n\treturn nil\n}\n\nfunc (auth *TokenAuthSetterVerifier) SetPing(pingMsg *msg.Ping) error {\n\tif !auth.AuthenticateHeartBeats {\n\t\treturn nil\n\t}\n\n\tpingMsg.Timestamp = time.Now().Unix()\n\tpingMsg.PrivilegeKey = util.GetAuthKey(auth.token, pingMsg.Timestamp)\n\treturn nil\n}\n\nfunc (auth *TokenAuthSetterVerifier) SetNewWorkConn(newWorkConnMsg *msg.NewWorkConn) error {\n\tif !auth.AuthenticateHeartBeats {\n\t\treturn nil\n\t}\n\n\tnewWorkConnMsg.Timestamp = time.Now().Unix()\n\tnewWorkConnMsg.PrivilegeKey = util.GetAuthKey(auth.token, newWorkConnMsg.Timestamp)\n\treturn nil\n}\n\nfunc (auth *TokenAuthSetterVerifier) VerifyLogin(loginMsg *msg.Login) error {\n\tif util.GetAuthKey(auth.token, loginMsg.Timestamp) != loginMsg.PrivilegeKey {\n\t\treturn fmt.Errorf(\"token in login doesn't match token from configuration\")\n\t}\n\treturn nil\n}\n\nfunc (auth *TokenAuthSetterVerifier) VerifyPing(pingMsg *msg.Ping) error {\n\tif !auth.AuthenticateHeartBeats {\n\t\treturn nil\n\t}\n\n\tif util.GetAuthKey(auth.token, pingMsg.Timestamp) != pingMsg.PrivilegeKey {\n\t\treturn fmt.Errorf(\"token in heartbeat doesn't match token from configuration\")\n\t}\n\treturn nil\n}\n\nfunc (auth *TokenAuthSetterVerifier) VerifyNewWorkConn(newWorkConnMsg *msg.NewWorkConn) error {\n\tif !auth.AuthenticateNewWorkConns {\n\t\treturn nil\n\t}\n\n\tif util.GetAuthKey(auth.token, newWorkConnMsg.Timestamp) != newWorkConnMsg.PrivilegeKey {\n\t\treturn fmt.Errorf(\"token in NewWorkConn doesn't match token from configuration\")\n\t}\n\treturn nil\n}\n<commit_msg>fix: auth token bug (#1762)<commit_after>\/\/ Copyright 2020 guylewin, guy@lewin.co.il\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage auth\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/fatedier\/frp\/models\/msg\"\n\t\"github.com\/fatedier\/frp\/utils\/util\"\n\n\t\"github.com\/vaughan0\/go-ini\"\n)\n\ntype tokenConfig struct {\n\t\/\/ Token specifies the authorization token used to create keys to be sent\n\t\/\/ to the server. The server must have a matching token for authorization\n\t\/\/ to succeed. By default, this value is \"\".\n\tToken string `json:\"token\"`\n}\n\nfunc getDefaultTokenConf() tokenConfig {\n\treturn tokenConfig{\n\t\tToken: \"\",\n\t}\n}\n\nfunc unmarshalTokenConfFromIni(conf ini.File) tokenConfig {\n\tvar (\n\t\ttmpStr string\n\t\tok bool\n\t)\n\n\tcfg := getDefaultTokenConf()\n\n\tif tmpStr, ok = conf.Get(\"common\", \"token\"); ok {\n\t\tcfg.Token = tmpStr\n\t}\n\n\treturn cfg\n}\n\ntype TokenAuthSetterVerifier struct {\n\tbaseConfig\n\n\ttoken string\n}\n\nfunc NewTokenAuth(baseCfg baseConfig, cfg tokenConfig) *TokenAuthSetterVerifier {\n\treturn &TokenAuthSetterVerifier{\n\t\tbaseConfig: baseCfg,\n\t\ttoken: cfg.Token,\n\t}\n}\n\nfunc (auth *TokenAuthSetterVerifier) SetLogin(loginMsg *msg.Login) (err error) {\n\tloginMsg.PrivilegeKey = util.GetAuthKey(auth.token, loginMsg.Timestamp)\n\treturn nil\n}\n\nfunc (auth *TokenAuthSetterVerifier) SetPing(pingMsg *msg.Ping) error {\n\tif !auth.AuthenticateHeartBeats {\n\t\treturn nil\n\t}\n\n\tpingMsg.Timestamp = time.Now().Unix()\n\tpingMsg.PrivilegeKey = util.GetAuthKey(auth.token, pingMsg.Timestamp)\n\treturn nil\n}\n\nfunc (auth *TokenAuthSetterVerifier) SetNewWorkConn(newWorkConnMsg *msg.NewWorkConn) error {\n\tif !auth.AuthenticateNewWorkConns {\n\t\treturn nil\n\t}\n\n\tnewWorkConnMsg.Timestamp = time.Now().Unix()\n\tnewWorkConnMsg.PrivilegeKey = util.GetAuthKey(auth.token, newWorkConnMsg.Timestamp)\n\treturn nil\n}\n\nfunc (auth *TokenAuthSetterVerifier) VerifyLogin(loginMsg *msg.Login) error {\n\tif util.GetAuthKey(auth.token, loginMsg.Timestamp) != loginMsg.PrivilegeKey {\n\t\treturn fmt.Errorf(\"token in login doesn't match token from configuration\")\n\t}\n\treturn nil\n}\n\nfunc (auth *TokenAuthSetterVerifier) VerifyPing(pingMsg *msg.Ping) error {\n\tif !auth.AuthenticateHeartBeats {\n\t\treturn nil\n\t}\n\n\tif util.GetAuthKey(auth.token, pingMsg.Timestamp) != pingMsg.PrivilegeKey {\n\t\treturn fmt.Errorf(\"token in heartbeat doesn't match token from configuration\")\n\t}\n\treturn nil\n}\n\nfunc (auth *TokenAuthSetterVerifier) VerifyNewWorkConn(newWorkConnMsg *msg.NewWorkConn) error {\n\tif !auth.AuthenticateNewWorkConns {\n\t\treturn nil\n\t}\n\n\tif util.GetAuthKey(auth.token, newWorkConnMsg.Timestamp) != newWorkConnMsg.PrivilegeKey {\n\t\treturn fmt.Errorf(\"token in NewWorkConn doesn't match token from configuration\")\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package input\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/ambientsound\/pms\/input\"\n)\n\n\/\/ TestLexer tests the input.NextToken() function, checking that it correctly\n\/\/ splits up input lines into Token structs.\nfunc TestLexer(t *testing.T) {\n\ti := 0\n\tpos := 0\n\tstr := \"the quick <brown>\\t \\tfoxee # adds a comment\"\n\ttoken := input.Token{}\n\n\tchecks := []input.Token{\n\t\tinput.Token{Class: input.TokenIdentifier, Runes: []rune(\"the\")},\n\t\tinput.Token{Class: input.TokenIdentifier, Runes: []rune(\"quick\")},\n\t\tinput.Token{Class: input.TokenIdentifier, Runes: []rune(\"<brown>\")},\n\t\tinput.Token{Class: input.TokenIdentifier, Runes: []rune(\"foxee\")},\n\t\tinput.Token{Class: input.TokenComment, Runes: []rune(\"# adds a comment\")},\n\t\tinput.Token{Class: input.TokenEnd, Runes: nil},\n\t}\n\n\tfor {\n\n\t\tif i == len(checks) {\n\t\t\tif token.Class == input.TokenEnd {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.Fatalf(\"Tokenizer generated too many tokens!\")\n\t\t}\n\n\t\tcheck := checks[i]\n\t\ttoken, npos := input.NextToken(str[pos:])\n\t\tpos += npos\n\n\t\tt.Logf(\"Token %d: pos=%d, runes='%s', input='%s'\", i, pos, string(token.Runes), str)\n\n\t\tif token.Class != check.Class {\n\t\t\tt.Fatalf(\"Token class for token %d is wrong; expected %d but got %d\", i+1, check.Class, token.Class)\n\t\t}\n\n\t\tif string(check.Runes) != string(token.Runes) {\n\t\t\tt.Fatalf(\"String check against token %d failed; expected '%s' but got '%s'\", i+1,\n\t\t\t\tstring(check.Runes),\n\t\t\t\tstring(token.Runes),\n\t\t\t)\n\t\t}\n\n\t\ti++\n\t}\n}\n<commit_msg>Rename input tests to package input_test<commit_after>package input_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/ambientsound\/pms\/input\"\n)\n\n\/\/ TestLexer tests the input.NextToken() function, checking that it correctly\n\/\/ splits up input lines into Token structs.\nfunc TestLexer(t *testing.T) {\n\ti := 0\n\tpos := 0\n\tstr := \"the quick <brown>\\t \\tfoxee # adds a comment\"\n\ttoken := input.Token{}\n\n\tchecks := []input.Token{\n\t\tinput.Token{Class: input.TokenIdentifier, Runes: []rune(\"the\")},\n\t\tinput.Token{Class: input.TokenIdentifier, Runes: []rune(\"quick\")},\n\t\tinput.Token{Class: input.TokenIdentifier, Runes: []rune(\"<brown>\")},\n\t\tinput.Token{Class: input.TokenIdentifier, Runes: []rune(\"foxee\")},\n\t\tinput.Token{Class: input.TokenComment, Runes: []rune(\"# adds a comment\")},\n\t\tinput.Token{Class: input.TokenEnd, Runes: nil},\n\t}\n\n\tfor {\n\n\t\tif i == len(checks) {\n\t\t\tif token.Class == input.TokenEnd {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.Fatalf(\"Tokenizer generated too many tokens!\")\n\t\t}\n\n\t\tcheck := checks[i]\n\t\ttoken, npos := input.NextToken(str[pos:])\n\t\tpos += npos\n\n\t\tt.Logf(\"Token %d: pos=%d, runes='%s', input='%s'\", i, pos, string(token.Runes), str)\n\n\t\tif token.Class != check.Class {\n\t\t\tt.Fatalf(\"Token class for token %d is wrong; expected %d but got %d\", i+1, check.Class, token.Class)\n\t\t}\n\n\t\tif string(check.Runes) != string(token.Runes) {\n\t\t\tt.Fatalf(\"String check against token %d failed; expected '%s' but got '%s'\", i+1,\n\t\t\t\tstring(check.Runes),\n\t\t\t\tstring(token.Runes),\n\t\t\t)\n\t\t}\n\n\t\ti++\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package engine\n\nimport (\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"World\", func() {\n\tContext(\"when created\", func() {\n\t\tIt(\"it able to provide lore\", func() {\n\t\t\tseed := NewSeed(\"shroom\")\n\t\t\tworld := NewWorld([]Entity{})\n\t\t})\n\t})\n})\n<commit_msg>working on tests<commit_after>package engine\n\nimport (\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"World\", func() {\n\tContext(\"when created\", func() {\n\t\tIt(\"it able to provide lore\", func() {\n\t\t\tseed := NewSeed(\"shroom\", 0, 0, 0, nil)\n\t\t\tworld := NewWorld([]Entity{seed})\n\t\t\tExpect(world.GetEntities()).NotTo(BeEmpty())\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package ogdatv21\n\nimport (\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\t\"encoding\/json\"\n\t\"github.com\/the42\/ogdat\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst Version = \"OGD Austria Metadata 2.1\" \/\/ Version 2.1: 15.10.2012\nconst specfile = \"ogdat_spec-2.1.csv\"\nconst TimeSpecifier = \"2006-01-02T15:04:05\" \/\/ RFC 3339 = ISO 8601 ohne Zeitzone\n\n\/\/\/ BEGIN:check wheater this code may be factored out\nconst (\n\tTime2 = time.RFC3339Nano\n\tTime3 = time.RFC3339\n\tTime1 = TimeSpecifier\n\tTimeFormatUnknow\n)\n\ntype Kategorie struct {\n\tNumID int `json:\"-\"`\n\tID string\n\tPrettyName string `json:\"-\"`\n\tRDFProperty string `json:\"-\"`\n}\n\nfunc (kat *Kategorie) String() string {\n\treturn kat.PrettyName\n}\n\nvar (\n\tArbeit = Kategorie{NumID: 1, ID: \"arbeit\", PrettyName: \"Arbeit\", RDFProperty: \"\"}\n\tBevoelkerung = Kategorie{NumID: 2, ID: \"bevölkerung\", PrettyName: \"Bevölkerung\", RDFProperty: \"\"}\n\tBildungForschung = Kategorie{NumID: 3, ID: \"bildung-und-forschung\", PrettyName: \"Bildung und Forschung\", RDFProperty: \"\"}\n\tFinanzRW = Kategorie{NumID: 4, ID: \"finanzen-und-rechnungswesen\", PrettyName: \"Finanzen und Rechnungswesen\", RDFProperty: \"\"}\n\tGeographPlanung = Kategorie{NumID: 5, ID: \"geographie-und-planung\", PrettyName: \"Geographie und Planung\", RDFProperty: \"\"}\n\tGesellSoziales = Kategorie{NumID: 6, ID: \"gesellschaft-und-soziales\", PrettyName: \"Gesellschaft und Soziales\", RDFProperty: \"\"}\n\tGesundheit = Kategorie{NumID: 7, ID: \"gesundheit\", PrettyName: \"Gesundheit\", RDFProperty: \"\"}\n\tKunstKultur = Kategorie{NumID: 8, ID: \"kunst-und-kultur\", PrettyName: \"Kunst und Kultur\", RDFProperty: \"\"}\n\tLandFW = Kategorie{NumID: 9, ID: \"land-und-forstwirtschaft\", PrettyName: \"Land und Forstwirtschaft\", RDFProperty: \"\"}\n\tSportFZ = Kategorie{NumID: 10, ID: \"sport-und-freizeit\", PrettyName: \"Sport und Freizeit\", RDFProperty: \"\"}\n\tUmwelt = Kategorie{NumID: 11, ID: \"umwelt\", PrettyName: \"Umwelt\", RDFProperty: \"\"}\n\tVerkehrTechnik = Kategorie{NumID: 12, ID: \"verkehr-und-technik\", PrettyName: \"Verkehr und Technik\", RDFProperty: \"\"}\n\tVerwaltPol = Kategorie{NumID: 13, ID: \"verwaltung-und-politik\", PrettyName: \"Verwaltung und Politik\", RDFProperty: \"\"}\n\tWirtTourism = Kategorie{NumID: 14, ID: \"wirtschaft-und-tourismus\", PrettyName: \"Wirtschaft und Tourismus\", RDFProperty: \"\"}\n)\n\nvar categories = []Kategorie{\n\tArbeit,\n\tBevoelkerung,\n\tBildungForschung,\n\tFinanzRW,\n\tGeographPlanung,\n\tGesellSoziales,\n\tGesundheit,\n\tKunstKultur,\n\tLandFW,\n\tSportFZ,\n\tUmwelt,\n\tVerkehrTechnik,\n\tVerwaltPol,\n\tWirtTourism,\n}\n\nvar categorymap = make(map[string]Kategorie)\n\ntype Tags string\ntype ResourceSpecifier string\n\ntype Cycle struct {\n\tNumID int\n\tDomainCode string\n\tMD_MaintenanceFrequencyCode string\n\tName_DE string\n}\n\nvar (\n\tCycCont = Cycle{1, \"001\", \"continual\", \"kontinuierlich\"}\n\tCycDaily = Cycle{2, \"002\", \"daily\", \"täglich\"}\n\tCycWeekly = Cycle{3, \"003\", \"weekly\", \"wöchentlich\"}\n\tCycFortNly = Cycle{4, \"004\", \"fortnightly\", \"14-tägig\"}\n\tCycMonthly = Cycle{5, \"005\", \"monthly\", \"monatlich\"}\n\tCycQuart = Cycle{6, \"006\", \"quarterly\", \"quartalsweise\"}\n\tCycBiAnn = Cycle{7, \"007\", \"biannually\", \"halbjährlich\"}\n\tCycAnnually = Cycle{8, \"008\", \"annually\", \"jährlich\"}\n\tCycNeeded = Cycle{9, \"009\", \"asNeeded\", \"nach Bedarf\"}\n\tCycIrreg = Cycle{10, \"010\", \"irregular\", \"unregelmäßig\"}\n\tCycNP = Cycle{11, \"011\", \"notPlanned\", \"nicht geplant\"}\n\tCycUnknown = Cycle{12, \"012\", \"unknown\", \"unbekannt\"}\n)\n\nvar cycles = []Cycle{\n\tCycCont,\n\tCycDaily,\n\tCycWeekly,\n\tCycFortNly,\n\tCycMonthly,\n\tCycQuart,\n\tCycBiAnn,\n\tCycAnnually,\n\tCycNeeded,\n\tCycIrreg,\n\tCycNP,\n\tCycUnknown,\n}\n\ntype Url struct {\n\t*url.URL\n\tRaw string\n}\n\ntype Identifier struct {\n\t*uuid.UUID\n\tRaw string\n}\n\nfunc (id *Identifier) String() string {\n\treturn id.Raw\n}\n\ntype Time struct {\n\ttime.Time\n\tRaw string\n\tFormat string\n}\n\nfunc (time *Time) String() string {\n\treturn time.Raw\n}\n\nfunc (cyc *Cycle) String() string {\n\treturn cyc.Name_DE\n}\n\nfunc cmpstrtocycle(raw string, cyc Cycle) bool {\n\tif raw == cyc.Name_DE || raw == cyc.DomainCode || raw == cyc.MD_MaintenanceFrequencyCode {\n\t\treturn true\n\t}\n\tif len(raw) > 0 {\n\t\tif i, err := strconv.Atoi(raw); err == nil && i == cyc.NumID {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (cyc *Cycle) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\n\tvar found bool\n\tvar idx int\n\tvar matchcyc Cycle\n\n\tfor idx, matchcyc = range cycles {\n\t\tif found := cmpstrtocycle(raw, matchcyc); found == true {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif found {\n\t\t*cyc = cycles[idx]\n\t} else {\n\t\tcyc.NumID = -1\n\t\tcyc.Name_DE = \"**** NON cycle spec **** - \" + raw\n\t\tcyc.MD_MaintenanceFrequencyCode = cyc.Name_DE\n\t}\n\treturn nil\n}\n\nfunc (ogdtime *Time) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\togdtime.Raw = raw\n\n\togdtime.Format = Time1\n\tt, err := time.Parse(ogdtime.Format, raw)\n\tif err != nil {\n\t\togdtime.Format = Time2\n\t\tt, err = time.Parse(ogdtime.Format, raw)\n\t\tif err != nil {\n\t\t\togdtime.Format = Time3\n\t\t\tt, err = time.Parse(ogdtime.Format, raw)\n\t\t\tif err != nil {\n\t\t\t\togdtime.Format = TimeFormatUnknow\n\t\t\t}\n\t\t}\n\t}\n\togdtime.Time = t\n\treturn nil\n}\n\nfunc (u *Url) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\tu.Raw = raw\n\turl, _ := url.Parse(raw) \/\/ an actuall error is not important. If url can not be parsed, result will be nil, which is fine here\n\tu.URL = url\n\treturn nil\n}\n\nfunc (id *Identifier) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\n\tid.Raw = string(raw)\n\tif uuid := uuid.Parse(raw); uuid != nil {\n\t\tid.UUID = &uuid\n\t}\n\treturn nil\n}\n\nfunc (kat *Kategorie) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\n\tcorecat, found := categorymap[raw]\n\tif !found {\n\t\tkat.NumID = -1\n\t\tkat.ID = raw\n\t\tkat.PrettyName = \"**** NON core category **** - \" + kat.ID\n\t} else {\n\t\t*kat = corecat\n\t}\n\treturn nil\n}\n\n\/\/\/ END:check wheater this code may be factored out\n\ntype Extras struct {\n\t\/\/ Core\n\tMetadata_Identifier *Identifier `json:\"metadata_identifier\" ogdat:\"ID:1\"` \/\/ CKAN uses since API Version 2 a UUID V4, cf. https:\/\/github.com\/okfn\/ckan\/blob\/master\/ckan\/model\/types.py\n\tMetadata_Modified *Time `json:\"metadata_modified ogdat:\"ID:5\"`\n\tCategorization []Kategorie `json:\"categorization ogdat:\"ID:10\"`\n\tBegin_DateTime *Time `json:\"begin_datetime\" ogdat:\"ID:24\"`\n\n\t\/\/ Optional\n\tSchema_Name *string `json:\"schema_name\" ogdat:\"ID:2\"`\n\tSchema_Language *string `json:\"schema_language\" ogdat:\"ID:3\"` \/\/ always \"ger\"\n\tSchema_Characterset *string `json:\"schema_characterset\" ogdat:\"ID:4\"` \/\/ always \"utf8\", cf. https:\/\/www.ghrsst.org\/files\/download.php?m=documents&f=ISO%2019115%20.pdf\n\tMetaData_Linkage []Url `json:\"metadata_linkage\" ogdat:\"ID:6\"`\n\tAttribute_Description *string `json:\"attribute_description\" ogdat:\"ID:12\"`\n\tMaintainer_Link *Url `json:\"maintainer_link\" ogdat:\"ID:13\"`\n\tPublisher *string `json:\"publisher\" ogdat:\"ID:20\"`\n\tGeographich_Toponym *string `json:\"geographic_toponym\" ogdat:\"ID:22\"`\n\n\t\/* ON\/EN\/ISO 19115:2003: westBL (344) & eastBL (345) & southBL (346) & northBL (347)\n\t * TODO: Specifiaction says a WKT of POLYGON should be used, which would make a\n\t * POLYGON ((-180.00 -90.00, 180.00 90.00)) but Example states\n\t * POLYGON (-180.00 -90.00, 180.00 90.00)\n\t * UNDER CLARIFICATION\n\t *\/\n\tGeographic_BBox *string `json:\"geographic_bbox\" ogdat:\"ID:23\"`\n\tEnd_DateTime *Time `json:\"end_datetime\" ogdat:\"ID:25\"`\n\tUpdate_Frequency *Cycle `json:\"update_frequency\" ogdat:\"ID:26\"`\n\tLineage_Quality *string `json:\"lineage_quality\" ogdat:\"ID:27\"`\n\tEnTitleDesc *string `json:\"en_title_and_desc\" ogdat:\"ID:28\"`\n}\n\ntype Resource struct {\n\t\/\/ Core\n\tURL *Url `json:\"url\" ogdat:\"ID:14\"`\n\tFormat ResourceSpecifier `json:\"format\" ogdat:\"ID:15\"`\n\n\t\/\/ Optional\n\tName *string `json:\"name\" ogdat:\"ID:16\"`\n\tCreated *Time `json:\"created\" ogdat:\"ID:17\"`\n\tLastModified *Time `json:\"last_modified\" ogdat:\"ID:18\"`\n\n\t\/*\n\t * dcat:bytes a rdf:Property, owl:DatatypeProperty;\n\t * rdfs:isDefinedBy <http:\/\/www.w3.org\/ns\/dcat>;\n\t * rdfs:label \"size in bytes\";\n\t * rdfs:comment \"describe size of resource in bytes\";\n\t * rdfs:domain dcat:Distribution;\n\t * rdfs:range xsd:integer .\n\t *\/\n\tSize *string `json:\"size\" ogdat:\"ID:29\"`\n\tLicense_Citation *string `json:\"license_citation\" ogdat:\"ID:30\"`\n\tLanguage *string `json:\"language\" ogdat:\"ID:31\"`\n\t\/* Here we have a problem in spec 2.1. which says \"nach ISO\\IEC 10646-1\", which means utf-8, utf-16 and utf-32.\n\t * We would certainly support more encodings, as eg.\n\t * ISO 19115 \/ B.5.10 MD_CharacterSetCode<> or\n\t * http:\/\/www.iana.org\/assignments\/character-sets\/character-sets.xml\n\t *\/\n\tEncoding *string `json:\"characterset\" ogdat:\"ID:32\"`\n}\n\ntype MetaData struct {\n\t\/\/ Core\n\tTitle string `json:\"title\" ogdat:\"ID:8\"`\n\tDescription string `json:\"notes\" ogdat:\"ID:9\"`\n\tSchlagworte []Tags `json:\"tags\" ogdat:\"ID:11\"`\n\tMaintainer string `json:\"maintainer\" ogdat:\"ID:19\"`\n\tLicense string `json:\"license\" ogdat:\"ID:21\"` \/\/ Sollte URI des Lizenzdokuments sein\n\n\t\/\/ nested structs\n\tExtras `json:\"extras\"`\n\tResource []Resource `json:\"resources\"`\n}\n\nfunc (md *MetaData) GetBeschreibungForFieldName(name string) *ogdat.Beschreibung {\n\tif f, ok := reflect.TypeOf(md).Elem().FieldByName(name); ok {\n\t\tif id := ogdat.GetIDFromMetaDataStructField(f); id > -1 {\n\t\t\tbeschreibung, _ := ogdat.GetOGDSetForVersion(Version).GetBeschreibungForID(id)\n\t\t\treturn beschreibung\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tfor idx, val := range categories {\n\t\tcategorymap[val.ID] = categories[idx]\n\t}\n\togdat.RegisterFromCSVFile(Version, specfile)\n}\n<commit_msg>make all struct members of OGDAT pointers<commit_after>package ogdatv21\n\nimport (\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\t\"encoding\/json\"\n\t\"github.com\/the42\/ogdat\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst Version = \"OGD Austria Metadata 2.1\" \/\/ Version 2.1: 15.10.2012\nconst specfile = \"ogdat_spec-2.1.csv\"\nconst TimeSpecifier = \"2006-01-02T15:04:05\" \/\/ RFC 3339 = ISO 8601 ohne Zeitzone\n\n\/\/\/ BEGIN:check wheater this code may be factored out\nconst (\n\tTime2 = time.RFC3339Nano\n\tTime3 = time.RFC3339\n\tTime1 = TimeSpecifier\n\tTimeFormatUnknow\n)\n\ntype Kategorie struct {\n\tNumID int `json:\"-\"`\n\tID string\n\tPrettyName string `json:\"-\"`\n\tRDFProperty string `json:\"-\"`\n}\n\nfunc (kat *Kategorie) String() string {\n\treturn kat.PrettyName\n}\n\nvar (\n\tArbeit = Kategorie{NumID: 1, ID: \"arbeit\", PrettyName: \"Arbeit\", RDFProperty: \"\"}\n\tBevoelkerung = Kategorie{NumID: 2, ID: \"bevölkerung\", PrettyName: \"Bevölkerung\", RDFProperty: \"\"}\n\tBildungForschung = Kategorie{NumID: 3, ID: \"bildung-und-forschung\", PrettyName: \"Bildung und Forschung\", RDFProperty: \"\"}\n\tFinanzRW = Kategorie{NumID: 4, ID: \"finanzen-und-rechnungswesen\", PrettyName: \"Finanzen und Rechnungswesen\", RDFProperty: \"\"}\n\tGeographPlanung = Kategorie{NumID: 5, ID: \"geographie-und-planung\", PrettyName: \"Geographie und Planung\", RDFProperty: \"\"}\n\tGesellSoziales = Kategorie{NumID: 6, ID: \"gesellschaft-und-soziales\", PrettyName: \"Gesellschaft und Soziales\", RDFProperty: \"\"}\n\tGesundheit = Kategorie{NumID: 7, ID: \"gesundheit\", PrettyName: \"Gesundheit\", RDFProperty: \"\"}\n\tKunstKultur = Kategorie{NumID: 8, ID: \"kunst-und-kultur\", PrettyName: \"Kunst und Kultur\", RDFProperty: \"\"}\n\tLandFW = Kategorie{NumID: 9, ID: \"land-und-forstwirtschaft\", PrettyName: \"Land und Forstwirtschaft\", RDFProperty: \"\"}\n\tSportFZ = Kategorie{NumID: 10, ID: \"sport-und-freizeit\", PrettyName: \"Sport und Freizeit\", RDFProperty: \"\"}\n\tUmwelt = Kategorie{NumID: 11, ID: \"umwelt\", PrettyName: \"Umwelt\", RDFProperty: \"\"}\n\tVerkehrTechnik = Kategorie{NumID: 12, ID: \"verkehr-und-technik\", PrettyName: \"Verkehr und Technik\", RDFProperty: \"\"}\n\tVerwaltPol = Kategorie{NumID: 13, ID: \"verwaltung-und-politik\", PrettyName: \"Verwaltung und Politik\", RDFProperty: \"\"}\n\tWirtTourism = Kategorie{NumID: 14, ID: \"wirtschaft-und-tourismus\", PrettyName: \"Wirtschaft und Tourismus\", RDFProperty: \"\"}\n)\n\nvar categories = []Kategorie{\n\tArbeit,\n\tBevoelkerung,\n\tBildungForschung,\n\tFinanzRW,\n\tGeographPlanung,\n\tGesellSoziales,\n\tGesundheit,\n\tKunstKultur,\n\tLandFW,\n\tSportFZ,\n\tUmwelt,\n\tVerkehrTechnik,\n\tVerwaltPol,\n\tWirtTourism,\n}\n\nvar categorymap = make(map[string]Kategorie)\n\ntype Tags string\ntype ResourceSpecifier string\n\ntype Cycle struct {\n\tNumID int\n\tDomainCode string\n\tMD_MaintenanceFrequencyCode string\n\tName_DE string\n}\n\nvar (\n\tCycCont = Cycle{1, \"001\", \"continual\", \"kontinuierlich\"}\n\tCycDaily = Cycle{2, \"002\", \"daily\", \"täglich\"}\n\tCycWeekly = Cycle{3, \"003\", \"weekly\", \"wöchentlich\"}\n\tCycFortNly = Cycle{4, \"004\", \"fortnightly\", \"14-tägig\"}\n\tCycMonthly = Cycle{5, \"005\", \"monthly\", \"monatlich\"}\n\tCycQuart = Cycle{6, \"006\", \"quarterly\", \"quartalsweise\"}\n\tCycBiAnn = Cycle{7, \"007\", \"biannually\", \"halbjährlich\"}\n\tCycAnnually = Cycle{8, \"008\", \"annually\", \"jährlich\"}\n\tCycNeeded = Cycle{9, \"009\", \"asNeeded\", \"nach Bedarf\"}\n\tCycIrreg = Cycle{10, \"010\", \"irregular\", \"unregelmäßig\"}\n\tCycNP = Cycle{11, \"011\", \"notPlanned\", \"nicht geplant\"}\n\tCycUnknown = Cycle{12, \"012\", \"unknown\", \"unbekannt\"}\n)\n\nvar cycles = []Cycle{\n\tCycCont,\n\tCycDaily,\n\tCycWeekly,\n\tCycFortNly,\n\tCycMonthly,\n\tCycQuart,\n\tCycBiAnn,\n\tCycAnnually,\n\tCycNeeded,\n\tCycIrreg,\n\tCycNP,\n\tCycUnknown,\n}\n\ntype Url struct {\n\t*url.URL\n\tRaw string\n}\n\ntype Identifier struct {\n\t*uuid.UUID\n\tRaw string\n}\n\nfunc (id *Identifier) String() string {\n\treturn id.Raw\n}\n\ntype Time struct {\n\ttime.Time\n\tRaw string\n\tFormat string\n}\n\nfunc (time *Time) String() string {\n\treturn time.Raw\n}\n\nfunc (cyc *Cycle) String() string {\n\treturn cyc.Name_DE\n}\n\nfunc cmpstrtocycle(raw string, cyc Cycle) bool {\n\tif raw == cyc.Name_DE || raw == cyc.DomainCode || raw == cyc.MD_MaintenanceFrequencyCode {\n\t\treturn true\n\t}\n\tif len(raw) > 0 {\n\t\tif i, err := strconv.Atoi(raw); err == nil && i == cyc.NumID {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (cyc *Cycle) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\n\tvar found bool\n\tvar idx int\n\tvar matchcyc Cycle\n\n\tfor idx, matchcyc = range cycles {\n\t\tif found := cmpstrtocycle(raw, matchcyc); found == true {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif found {\n\t\t*cyc = cycles[idx]\n\t} else {\n\t\tcyc.NumID = -1\n\t\tcyc.Name_DE = \"**** NON cycle spec **** - \" + raw\n\t\tcyc.MD_MaintenanceFrequencyCode = cyc.Name_DE\n\t}\n\treturn nil\n}\n\nfunc (ogdtime *Time) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\togdtime.Raw = raw\n\n\togdtime.Format = Time1\n\tt, err := time.Parse(ogdtime.Format, raw)\n\tif err != nil {\n\t\togdtime.Format = Time2\n\t\tt, err = time.Parse(ogdtime.Format, raw)\n\t\tif err != nil {\n\t\t\togdtime.Format = Time3\n\t\t\tt, err = time.Parse(ogdtime.Format, raw)\n\t\t\tif err != nil {\n\t\t\t\togdtime.Format = TimeFormatUnknow\n\t\t\t}\n\t\t}\n\t}\n\togdtime.Time = t\n\treturn nil\n}\n\nfunc (u *Url) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\tu.Raw = raw\n\turl, _ := url.Parse(raw) \/\/ an actuall error is not important. If url can not be parsed, result will be nil, which is fine here\n\tu.URL = url\n\treturn nil\n}\n\nfunc (id *Identifier) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\n\tid.Raw = string(raw)\n\tif uuid := uuid.Parse(raw); uuid != nil {\n\t\tid.UUID = &uuid\n\t}\n\treturn nil\n}\n\nfunc (kat *Kategorie) UnmarshalJSON(data []byte) error {\n\tvar raw string\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\n\tcorecat, found := categorymap[raw]\n\tif !found {\n\t\tkat.NumID = -1\n\t\tkat.ID = raw\n\t\tkat.PrettyName = \"**** NON core category **** - \" + kat.ID\n\t} else {\n\t\t*kat = corecat\n\t}\n\treturn nil\n}\n\n\/\/\/ END:check wheater this code may be factored out\n\ntype Extras struct {\n\t\/\/ Core\n\tMetadata_Identifier *Identifier `json:\"metadata_identifier\" ogdat:\"ID:1\"` \/\/ CKAN uses since API Version 2 a UUID V4, cf. https:\/\/github.com\/okfn\/ckan\/blob\/master\/ckan\/model\/types.py\n\tMetadata_Modified *Time `json:\"metadata_modified ogdat:\"ID:5\"`\n\tCategorization []Kategorie `json:\"categorization ogdat:\"ID:10\"`\n\tBegin_DateTime *Time `json:\"begin_datetime\" ogdat:\"ID:24\"`\n\n\t\/\/ Optional\n\tSchema_Name *string `json:\"schema_name\" ogdat:\"ID:2\"`\n\tSchema_Language *string `json:\"schema_language\" ogdat:\"ID:3\"` \/\/ always \"ger\"\n\tSchema_Characterset *string `json:\"schema_characterset\" ogdat:\"ID:4\"` \/\/ always \"utf8\", cf. https:\/\/www.ghrsst.org\/files\/download.php?m=documents&f=ISO%2019115%20.pdf\n\tMetaData_Linkage []Url `json:\"metadata_linkage\" ogdat:\"ID:6\"`\n\tAttribute_Description *string `json:\"attribute_description\" ogdat:\"ID:12\"`\n\tMaintainer_Link *Url `json:\"maintainer_link\" ogdat:\"ID:13\"`\n\tPublisher *string `json:\"publisher\" ogdat:\"ID:20\"`\n\tGeographich_Toponym *string `json:\"geographic_toponym\" ogdat:\"ID:22\"`\n\n\t\/* ON\/EN\/ISO 19115:2003: westBL (344) & eastBL (345) & southBL (346) & northBL (347)\n\t * TODO: Specifiaction says a WKT of POLYGON should be used, which would make a\n\t * POLYGON ((-180.00 -90.00, 180.00 90.00)) but Example states\n\t * POLYGON (-180.00 -90.00, 180.00 90.00)\n\t * UNDER CLARIFICATION\n\t *\/\n\tGeographic_BBox *string `json:\"geographic_bbox\" ogdat:\"ID:23\"`\n\tEnd_DateTime *Time `json:\"end_datetime\" ogdat:\"ID:25\"`\n\tUpdate_Frequency *Cycle `json:\"update_frequency\" ogdat:\"ID:26\"`\n\tLineage_Quality *string `json:\"lineage_quality\" ogdat:\"ID:27\"`\n\tEnTitleDesc *string `json:\"en_title_and_desc\" ogdat:\"ID:28\"`\n}\n\ntype Resource struct {\n\t\/\/ Core\n\tURL *Url `json:\"url\" ogdat:\"ID:14\"`\n\tFormat *ResourceSpecifier `json:\"format\" ogdat:\"ID:15\"`\n\n\t\/\/ Optional\n\tName *string `json:\"name\" ogdat:\"ID:16\"`\n\tCreated *Time `json:\"created\" ogdat:\"ID:17\"`\n\tLastModified *Time `json:\"last_modified\" ogdat:\"ID:18\"`\n\n\t\/*\n\t * dcat:bytes a rdf:Property, owl:DatatypeProperty;\n\t * rdfs:isDefinedBy <http:\/\/www.w3.org\/ns\/dcat>;\n\t * rdfs:label \"size in bytes\";\n\t * rdfs:comment \"describe size of resource in bytes\";\n\t * rdfs:domain dcat:Distribution;\n\t * rdfs:range xsd:integer .\n\t *\/\n\tSize *string `json:\"size\" ogdat:\"ID:29\"`\n\tLicense_Citation *string `json:\"license_citation\" ogdat:\"ID:30\"`\n\tLanguage *string `json:\"language\" ogdat:\"ID:31\"`\n\t\/* Here we have a problem in spec 2.1. which says \"nach ISO\\IEC 10646-1\", which means utf-8, utf-16 and utf-32.\n\t * We would certainly support more encodings, as eg.\n\t * ISO 19115 \/ B.5.10 MD_CharacterSetCode<> or\n\t * http:\/\/www.iana.org\/assignments\/character-sets\/character-sets.xml\n\t *\/\n\tEncoding *string `json:\"characterset\" ogdat:\"ID:32\"`\n}\n\ntype MetaData struct {\n\t\/\/ Core\n\tTitle *string `json:\"title\" ogdat:\"ID:8\"`\n\tDescription *string `json:\"notes\" ogdat:\"ID:9\"`\n\tSchlagworte []Tags `json:\"tags\" ogdat:\"ID:11\"`\n\tMaintainer *string `json:\"maintainer\" ogdat:\"ID:19\"`\n\tLicense *string `json:\"license\" ogdat:\"ID:21\"` \/\/ Sollte URI des Lizenzdokuments sein\n\n\t\/\/ nested structs\n\tExtras `json:\"extras\"`\n\tResource []Resource `json:\"resources\"`\n}\n\nfunc (md *MetaData) GetBeschreibungForFieldName(name string) *ogdat.Beschreibung {\n\tif f, ok := reflect.TypeOf(md).Elem().FieldByName(name); ok {\n\t\tif id := ogdat.GetIDFromMetaDataStructField(f); id > -1 {\n\t\t\tbeschreibung, _ := ogdat.GetOGDSetForVersion(Version).GetBeschreibungForID(id)\n\t\t\treturn beschreibung\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tfor idx, val := range categories {\n\t\tcategorymap[val.ID] = categories[idx]\n\t}\n\togdat.RegisterFromCSVFile(Version, specfile)\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"golang.org\/x\/sync\/errgroup\"\n\n\t\"github.com\/gobuffalo\/buffalo\/meta\"\n\t\"github.com\/gobuffalo\/envy\"\n\t\"github.com\/markbates\/deplist\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar setupOptions = struct {\n\tverbose bool\n\tupdateGoDeps bool\n\tdropDatabases bool\n}{}\n\ntype setupCheck func(meta.App) error\n\nvar setupCmd = &cobra.Command{\n\tUse: \"setup\",\n\tShort: \"Setups a newly created, or recently checked out application.\",\n\tLong: `Setup runs through checklist to make sure dependencies are setup correcly.\n\nDependencies (if used):\n* Runs \"dep ensure\" to install required Go dependencies.\n\nAsset Pipeline (if used):\n* Runs \"npm install\" or \"yarn install\" to install asset dependencies.\n\nDatabase (if used):\n* Runs \"buffalo db create -a\" to create databases.\n* Runs \"buffalo db migrate\" to run database migrations.\n* Runs \"buffalo task db:seed\" to seed the database (if the task exists).\n\nTests:\n* Runs \"buffalo test\" to confirm the application's tests are running properly.\n`,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tapp := meta.New(\".\")\n\t\tfor _, check := range []setupCheck{assetCheck, updateGoDepsCheck, databaseCheck, testCheck} {\n\t\t\terr := check(app)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t},\n}\n\nfunc updateGoDepsCheck(app meta.App) error {\n\tif app.WithModules {\n\t\tc := exec.Command(envy.Get(\"GO_BIN\", \"go\"), \"get\")\n\t\treturn run(c)\n\t}\n\tif app.WithDep {\n\t\t\/\/ use github.com\/golang\/dep\n\t\targs := []string{\"ensure\"}\n\t\tif setupOptions.verbose {\n\t\t\targs = append(args, \"-v\")\n\t\t}\n\t\tif setupOptions.updateGoDeps {\n\t\t\targs = append(args, \"--update\")\n\t\t}\n\t\terr := run(exec.Command(\"dep\", args...))\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ go old school with the installation\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\twg, _ := errgroup.WithContext(ctx)\n\tdeps, err := deplist.List()\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tdeps[\"github.com\/gobuffalo\/suite\"] = \"github.com\/gobuffalo\/suite\"\n\n\tfor dep := range deps {\n\t\targs := []string{\"get\"}\n\t\tif setupOptions.verbose {\n\t\t\targs = append(args, \"-v\")\n\t\t}\n\t\tif setupOptions.updateGoDeps {\n\t\t\targs = append(args, \"-u\")\n\t\t}\n\t\targs = append(args, dep)\n\t\tc := exec.Command(envy.Get(\"GO_BIN\", \"go\"), args...)\n\t\tf := func() error {\n\t\t\treturn run(c)\n\t\t}\n\t\twg.Go(f)\n\t}\n\terr = wg.Wait()\n\tif err != nil {\n\t\treturn errors.Errorf(\"We encountered the following error trying to install and update the dependencies for this application:\\n%s\", err)\n\t}\n\treturn nil\n}\n\nfunc testCheck(meta.App) error {\n\terr := run(exec.Command(\"buffalo\", \"test\"))\n\tif err != nil {\n\t\treturn errors.Errorf(\"We encountered the following error when trying to run your applications tests:\\n%s\", err)\n\t}\n\treturn nil\n}\n\nfunc databaseCheck(app meta.App) error {\n\tif !app.WithPop {\n\t\treturn nil\n\t}\n\tfor _, check := range []setupCheck{dbCreateCheck, dbMigrateCheck, dbSeedCheck} {\n\t\terr := check(app)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc dbCreateCheck(meta.App) error {\n\tif setupOptions.dropDatabases {\n\t\terr := run(exec.Command(\"buffalo\", \"pop\", \"drop\", \"-a\"))\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"We encountered an error when trying to drop your application's databases. Please check to make sure that your database server is running and that the username and passwords found in the database.yml are properly configured and set up on your database server.\\n %s\", err)\n\t\t}\n\t}\n\terr := run(exec.Command(\"buffalo\", \"pop\", \"create\", \"-a\"))\n\tif err != nil {\n\t\treturn errors.Errorf(\"We encountered an error when trying to create your application's databases. Please check to make sure that your database server is running and that the username and passwords found in the database.yml are properly configured and set up on your database server.\\n %s\", err)\n\t}\n\treturn nil\n}\n\nfunc dbMigrateCheck(meta.App) error {\n\terr := run(exec.Command(\"buffalo\", \"pop\", \"migrate\"))\n\tif err != nil {\n\t\treturn errors.Errorf(\"We encountered the following error when trying to migrate your database:\\n%s\", err)\n\t}\n\treturn nil\n}\n\nfunc dbSeedCheck(meta.App) error {\n\tcmd := exec.Command(\"buffalo\", \"t\", \"list\")\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\t\/\/ no tasks configured, so return\n\t\treturn nil\n\t}\n\tif bytes.Contains(out, []byte(\"db:seed\")) {\n\t\terr := run(exec.Command(\"buffalo\", \"task\", \"db:seed\"))\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"We encountered the following error when trying to seed your database:\\n%s\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc assetCheck(app meta.App) error {\n\tif !app.WithWebpack {\n\t\treturn nil\n\t}\n\tif app.WithYarn {\n\t\treturn yarnCheck(app)\n\t}\n\treturn npmCheck(app)\n}\n\nfunc npmCheck(app meta.App) error {\n\terr := nodeCheck(app)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\terr = run(exec.Command(\"npm\", \"install\", \"--no-progress\"))\n\tif err != nil {\n\t\treturn errors.Errorf(\"We encountered the following error when trying to install your asset dependencies using npm:\\n%s\", err)\n\t}\n\treturn nil\n}\n\nfunc yarnCheck(app meta.App) error {\n\tif err := nodeCheck(app); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tif _, err := exec.LookPath(\"yarnpkg\"); err != nil {\n\t\terr := run(exec.Command(\"npm\", \"install\", \"-g\", \"yarn\"))\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"This application require yarn, and we could not find it installed on your system. We tried to install it for you, but ran into the following error:\\n%s\", err)\n\t\t}\n\t}\n\tif err := run(exec.Command(\"yarnpkg\", \"install\", \"--no-progress\")); err != nil {\n\t\treturn errors.Errorf(\"We encountered the following error when trying to install your asset dependencies using yarn:\\n%s\", err)\n\t}\n\treturn nil\n}\n\nfunc nodeCheck(meta.App) error {\n\tif _, err := exec.LookPath(\"node\"); err != nil {\n\t\treturn errors.New(\"this application requires node, and we could not find it installed on your system please install node and try again\")\n\t}\n\tif _, err := exec.LookPath(\"npm\"); err != nil {\n\t\treturn errors.New(\"this application requires npm, and we could not find it installed on your system please install npm and try again\")\n\t}\n\treturn nil\n}\n\nfunc run(cmd *exec.Cmd) error {\n\tlogrus.Infof(\"--> %s\\n\", strings.Join(cmd.Args, \" \"))\n\tcmd.Stdin = os.Stdin\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdout = os.Stdout\n\treturn cmd.Run()\n}\n\nfunc init() {\n\tsetupCmd.Flags().BoolVarP(&setupOptions.verbose, \"verbose\", \"v\", false, \"run with verbose output\")\n\tsetupCmd.Flags().BoolVarP(&setupOptions.updateGoDeps, \"update\", \"u\", false, \"run go get -u against the application's Go dependencies\")\n\tsetupCmd.Flags().BoolVarP(&setupOptions.dropDatabases, \"drop\", \"d\", false, \"drop existing databases\")\n\n\tdecorate(\"setup\", setupCmd)\n\tRootCmd.AddCommand(setupCmd)\n}\n<commit_msg>install dep if missing during buffalo setup for dep app (#1371)<commit_after>package cmd\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"golang.org\/x\/sync\/errgroup\"\n\n\t\"github.com\/gobuffalo\/buffalo\/meta\"\n\t\"github.com\/gobuffalo\/envy\"\n\t\"github.com\/markbates\/deplist\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar setupOptions = struct {\n\tverbose bool\n\tupdateGoDeps bool\n\tdropDatabases bool\n}{}\n\ntype setupCheck func(meta.App) error\n\nvar setupCmd = &cobra.Command{\n\tUse: \"setup\",\n\tShort: \"Setups a newly created, or recently checked out application.\",\n\tLong: `Setup runs through checklist to make sure dependencies are setup correcly.\n\nDependencies (if used):\n* Runs \"dep ensure\" to install required Go dependencies.\n\nAsset Pipeline (if used):\n* Runs \"npm install\" or \"yarn install\" to install asset dependencies.\n\nDatabase (if used):\n* Runs \"buffalo db create -a\" to create databases.\n* Runs \"buffalo db migrate\" to run database migrations.\n* Runs \"buffalo task db:seed\" to seed the database (if the task exists).\n\nTests:\n* Runs \"buffalo test\" to confirm the application's tests are running properly.\n`,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tapp := meta.New(\".\")\n\t\tfor _, check := range []setupCheck{assetCheck, updateGoDepsCheck, databaseCheck, testCheck} {\n\t\t\terr := check(app)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t},\n}\n\nfunc updateGoDepsCheck(app meta.App) error {\n\tif app.WithModules {\n\t\tc := exec.Command(envy.Get(\"GO_BIN\", \"go\"), \"get\")\n\t\treturn run(c)\n\t}\n\tif app.WithDep {\n\t\tif _, err := exec.LookPath(\"dep\"); err != nil {\n\t\t\tif err := run(exec.Command(envy.Get(\"GO_BIN\", \"go\"), \"get\", \"github.com\/golang\/dep\/cmd\/dep\")); err != nil {\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\t\t}\n\t\targs := []string{\"ensure\"}\n\t\tif setupOptions.verbose {\n\t\t\targs = append(args, \"-v\")\n\t\t}\n\t\tif setupOptions.updateGoDeps {\n\t\t\targs = append(args, \"--update\")\n\t\t}\n\t\terr := run(exec.Command(\"dep\", args...))\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ go old school with the installation\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\twg, _ := errgroup.WithContext(ctx)\n\tdeps, err := deplist.List()\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tdeps[\"github.com\/gobuffalo\/suite\"] = \"github.com\/gobuffalo\/suite\"\n\n\tfor dep := range deps {\n\t\targs := []string{\"get\"}\n\t\tif setupOptions.verbose {\n\t\t\targs = append(args, \"-v\")\n\t\t}\n\t\tif setupOptions.updateGoDeps {\n\t\t\targs = append(args, \"-u\")\n\t\t}\n\t\targs = append(args, dep)\n\t\tc := exec.Command(envy.Get(\"GO_BIN\", \"go\"), args...)\n\t\tf := func() error {\n\t\t\treturn run(c)\n\t\t}\n\t\twg.Go(f)\n\t}\n\terr = wg.Wait()\n\tif err != nil {\n\t\treturn errors.Errorf(\"We encountered the following error trying to install and update the dependencies for this application:\\n%s\", err)\n\t}\n\treturn nil\n}\n\nfunc testCheck(meta.App) error {\n\terr := run(exec.Command(\"buffalo\", \"test\"))\n\tif err != nil {\n\t\treturn errors.Errorf(\"We encountered the following error when trying to run your applications tests:\\n%s\", err)\n\t}\n\treturn nil\n}\n\nfunc databaseCheck(app meta.App) error {\n\tif !app.WithPop {\n\t\treturn nil\n\t}\n\tfor _, check := range []setupCheck{dbCreateCheck, dbMigrateCheck, dbSeedCheck} {\n\t\terr := check(app)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc dbCreateCheck(meta.App) error {\n\tif setupOptions.dropDatabases {\n\t\terr := run(exec.Command(\"buffalo\", \"pop\", \"drop\", \"-a\"))\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"We encountered an error when trying to drop your application's databases. Please check to make sure that your database server is running and that the username and passwords found in the database.yml are properly configured and set up on your database server.\\n %s\", err)\n\t\t}\n\t}\n\terr := run(exec.Command(\"buffalo\", \"pop\", \"create\", \"-a\"))\n\tif err != nil {\n\t\treturn errors.Errorf(\"We encountered an error when trying to create your application's databases. Please check to make sure that your database server is running and that the username and passwords found in the database.yml are properly configured and set up on your database server.\\n %s\", err)\n\t}\n\treturn nil\n}\n\nfunc dbMigrateCheck(meta.App) error {\n\terr := run(exec.Command(\"buffalo\", \"pop\", \"migrate\"))\n\tif err != nil {\n\t\treturn errors.Errorf(\"We encountered the following error when trying to migrate your database:\\n%s\", err)\n\t}\n\treturn nil\n}\n\nfunc dbSeedCheck(meta.App) error {\n\tcmd := exec.Command(\"buffalo\", \"t\", \"list\")\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\t\/\/ no tasks configured, so return\n\t\treturn nil\n\t}\n\tif bytes.Contains(out, []byte(\"db:seed\")) {\n\t\terr := run(exec.Command(\"buffalo\", \"task\", \"db:seed\"))\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"We encountered the following error when trying to seed your database:\\n%s\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc assetCheck(app meta.App) error {\n\tif !app.WithWebpack {\n\t\treturn nil\n\t}\n\tif app.WithYarn {\n\t\treturn yarnCheck(app)\n\t}\n\treturn npmCheck(app)\n}\n\nfunc npmCheck(app meta.App) error {\n\terr := nodeCheck(app)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\terr = run(exec.Command(\"npm\", \"install\", \"--no-progress\"))\n\tif err != nil {\n\t\treturn errors.Errorf(\"We encountered the following error when trying to install your asset dependencies using npm:\\n%s\", err)\n\t}\n\treturn nil\n}\n\nfunc yarnCheck(app meta.App) error {\n\tif err := nodeCheck(app); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tif _, err := exec.LookPath(\"yarnpkg\"); err != nil {\n\t\terr := run(exec.Command(\"npm\", \"install\", \"-g\", \"yarn\"))\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"This application require yarn, and we could not find it installed on your system. We tried to install it for you, but ran into the following error:\\n%s\", err)\n\t\t}\n\t}\n\tif err := run(exec.Command(\"yarnpkg\", \"install\", \"--no-progress\")); err != nil {\n\t\treturn errors.Errorf(\"We encountered the following error when trying to install your asset dependencies using yarn:\\n%s\", err)\n\t}\n\treturn nil\n}\n\nfunc nodeCheck(meta.App) error {\n\tif _, err := exec.LookPath(\"node\"); err != nil {\n\t\treturn errors.New(\"this application requires node, and we could not find it installed on your system please install node and try again\")\n\t}\n\tif _, err := exec.LookPath(\"npm\"); err != nil {\n\t\treturn errors.New(\"this application requires npm, and we could not find it installed on your system please install npm and try again\")\n\t}\n\treturn nil\n}\n\nfunc run(cmd *exec.Cmd) error {\n\tlogrus.Infof(\"--> %s\\n\", strings.Join(cmd.Args, \" \"))\n\tcmd.Stdin = os.Stdin\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdout = os.Stdout\n\treturn cmd.Run()\n}\n\nfunc init() {\n\tsetupCmd.Flags().BoolVarP(&setupOptions.verbose, \"verbose\", \"v\", false, \"run with verbose output\")\n\tsetupCmd.Flags().BoolVarP(&setupOptions.updateGoDeps, \"update\", \"u\", false, \"run go get -u against the application's Go dependencies\")\n\tsetupCmd.Flags().BoolVarP(&setupOptions.dropDatabases, \"drop\", \"d\", false, \"drop existing databases\")\n\n\tdecorate(\"setup\", setupCmd)\n\tRootCmd.AddCommand(setupCmd)\n}\n<|endoftext|>"} {"text":"<commit_before>package apidAnalytics\n\nimport (\n\t\"bufio\"\n\t\"compress\/gzip\"\n\t\"crypto\/rand\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst fileExtension = \".txt.gz\"\n\n\/\/ Channel where analytics records are buffered before being dumped to a\n\/\/ file as write to file should not performed in the Http Thread\nvar internalBuffer chan axRecords\n\/\/ channel to indicate that internalBuffer channel is closed\nvar doneInternalBufferChan chan bool\n\n\/\/ Channel where close bucket event is published i.e. when a bucket\n\/\/ is ready to be closed based on collection interval\nvar closeBucketEvent chan bucket\n\/\/ channel to indicate that closeBucketEvent channel is closed\nvar doneClosebucketChan chan bool\n\n\/\/ Map from timestampt to bucket\nvar bucketMap map[int64]bucket\n\n\/\/ RW lock for bucketMap since the cache can be\n\/\/ read while its being written to and vice versa\nvar bucketMaplock = sync.RWMutex{}\n\ntype bucket struct {\n\tkeyTS int64\n\tDirName string\n\t\/\/ We need file handle and writer to close the file\n\tFileWriter fileWriter\n}\n\n\/\/ This struct will store open file handle and writer to close the file\ntype fileWriter struct {\n\tfile *os.File\n\tgw *gzip.Writer\n\tbw *bufio.Writer\n}\n\nfunc initBufferingManager() {\n\tinternalBuffer = make(chan axRecords,\n\t\tconfig.GetInt(analyticsBufferChannelSize))\n\tcloseBucketEvent = make(chan bucket)\n\tdoneInternalBufferChan = make(chan bool)\n\tdoneClosebucketChan = make(chan bool)\n\n\tbucketMaplock.Lock()\n\tbucketMap = make(map[int64]bucket)\n\tbucketMaplock.Unlock()\n\n\t\/\/ Keep polling the internal buffer for new messages\n\tgo func() {\n\t\tfor {\n\t\t\trecords, more := <-internalBuffer\n\t\t\tif more {\n\t\t\t\terr := save(records)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"Could not save %d messages to file\"+\n\t\t\t\t\t\t\" due to: %v\", len(records.Records), err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t\/\/ indicates a close signal was sent on the channel\n\t\t\t\tlog.Debugf(\"Closing channel internal buffer\")\n\t\t\t\tdoneInternalBufferChan <- true\n\t\t\t\treturn\n\t\t\t}\n\n\t\t}\n\t}()\n\n\t\/\/ Keep polling the closeEvent channel to see if bucket is ready to be closed\n\tgo func() {\n\t\tfor {\n\t\t\tbucket, more := <-closeBucketEvent\n\t\t\tif more {\n\t\t\t\tlog.Debugf(\"Close Event received for bucket: %s\",\n\t\t\t\t\tbucket.DirName)\n\n\t\t\t\t\/\/ close open file\n\t\t\t\tcloseGzipFile(bucket.FileWriter)\n\n\t\t\t\tdirToBeClosed := filepath.Join(localAnalyticsTempDir, bucket.DirName)\n\t\t\t\tstagingPath := filepath.Join(localAnalyticsStagingDir, bucket.DirName)\n\t\t\t\t\/\/ close files in tmp folder and move directory to\n\t\t\t\t\/\/ staging to indicate its ready for upload\n\t\t\t\terr := os.Rename(dirToBeClosed, stagingPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"Cannot move directory '%s' from\"+\n\t\t\t\t\t\t\" tmp to staging folder due to '%s\", bucket.DirName, err)\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ Remove bucket from bucket map once its closed successfully\n\t\t\t\t\tbucketMaplock.Lock()\n\t\t\t\t\tdelete(bucketMap, bucket.keyTS)\n\t\t\t\t\tbucketMaplock.Unlock()\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t\/\/ indicates a close signal was sent on the channel\n\t\t\t\tlog.Debugf(\"Closing channel close bucketevent\")\n\t\t\t\tdoneClosebucketChan <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ Save records to correct file based on what timestamp data is being collected for\nfunc save(records axRecords) error {\n\tbucket, err := getBucketForTimestamp(time.Now(), records.Tenant)\n\tif err != nil {\n\t\treturn err\n\t}\n\twriteGzipFile(bucket.FileWriter, records.Records)\n\treturn nil\n}\n\nfunc getBucketForTimestamp(now time.Time, tenant tenant) (bucket, error) {\n\t\/\/ first based on current timestamp and collection interval,\n\t\/\/ determine the timestamp of the bucket\n\tts := now.Unix() \/ int64(config.GetInt(analyticsCollectionInterval)) * int64(config.GetInt(analyticsCollectionInterval))\n\n\tbucketMaplock.RLock()\n\tb, exists := bucketMap[ts]\n\tbucketMaplock.RUnlock()\n\n\tif exists {\n\t\treturn b, nil\n\t} else {\n\t\ttimestamp := time.Unix(ts, 0).Format(timestampLayout)\n\n\t\t\/\/ endtimestamp of bucket = starttimestamp + collectionInterval\n\t\tendTime := time.Unix(ts+int64(config.GetInt(analyticsCollectionInterval)), 0)\n\t\tendtimestamp := endTime.Format(timestampLayout)\n\n\t\tdirName := tenant.Org + \"~\" + tenant.Env + \"~\" + timestamp\n\t\tnewPath := filepath.Join(localAnalyticsTempDir, dirName)\n\t\t\/\/ create dir\n\t\terr := os.Mkdir(newPath, os.ModePerm)\n\t\tif err != nil {\n\t\t\treturn bucket{}, fmt.Errorf(\"Cannot create directory \"+\n\t\t\t\t\"'%s' to buffer messages '%v'\", dirName, err)\n\t\t}\n\n\t\t\/\/ create file for writing\n\t\t\/\/ Format: <4DigitRandomHex>_<TSStart>.<TSEnd>_<APIDINSTANCEUUID>_writer_0.txt.gz\n\t\tfileName := getRandomHex() + \"_\" + timestamp + \".\" +\n\t\t\tendtimestamp + \"_\" +\n\t\t\tconfig.GetString(\"apigeesync_apid_instance_id\") +\n\t\t\t\"_writer_0\" + fileExtension\n\t\tcompleteFilePath := filepath.Join(newPath, fileName)\n\t\tfw, err := createGzipFile(completeFilePath)\n\t\tif err != nil {\n\t\t\treturn bucket{}, err\n\t\t}\n\n\t\tnewBucket := bucket{keyTS: ts, DirName: dirName, FileWriter: fw}\n\n\t\tbucketMaplock.Lock()\n\t\tbucketMap[ts] = newBucket\n\t\tbucketMaplock.Unlock()\n\n\t\t\/\/Send event to close directory after endTime + 5\n\t\t\/\/ seconds to make sure all buffers are flushed to file\n\t\ttimer := time.After(endTime.Sub(time.Now()) + time.Second*5)\n\t\tgo func() {\n\t\t\t<-timer\n\t\t\tcloseBucketEvent <- newBucket\n\t\t}()\n\t\treturn newBucket, nil\n\t}\n}\n\n\/\/ 4 digit Hex is prefixed to each filename to improve\n\/\/ how s3 partitions the files being uploaded\nfunc getRandomHex() string {\n\tbuff := make([]byte, 2)\n\trand.Read(buff)\n\treturn fmt.Sprintf(\"%x\", buff)\n}\n\nfunc createGzipFile(s string) (fileWriter, error) {\n\tfile, err := os.OpenFile(s, os.O_WRONLY|os.O_CREATE, os.ModePerm)\n\tif err != nil {\n\t\treturn fileWriter{},\n\t\t\tfmt.Errorf(\"Cannot create file '%s' \"+\n\t\t\t\t\"to buffer messages '%v'\", s, err)\n\t}\n\tgw := gzip.NewWriter(file)\n\tbw := bufio.NewWriter(gw)\n\treturn fileWriter{file, gw, bw}, nil\n}\n\nfunc writeGzipFile(fw fileWriter, records []interface{}) {\n\t\/\/ write each record as a new line to the bufferedWriter\n\tfor _, eachRecord := range records {\n\t\ts, _ := json.Marshal(eachRecord)\n\t\t_, err := (fw.bw).WriteString(string(s))\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Write to file failed '%v'\", err)\n\t\t}\n\t\t(fw.bw).WriteString(\"\\n\")\n\t}\n\t\/\/ Flush entire batch of records to file vs each message\n\tfw.bw.Flush()\n\tfw.gw.Flush()\n}\n\nfunc closeGzipFile(fw fileWriter) {\n\tfw.bw.Flush()\n\tfw.gw.Close()\n\tfw.file.Close()\n}\n<commit_msg>[UAP-854] Cleaned up syntax for reading on a channel until close is received<commit_after>package apidAnalytics\n\nimport (\n\t\"bufio\"\n\t\"compress\/gzip\"\n\t\"crypto\/rand\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst fileExtension = \".txt.gz\"\n\n\/\/ Channel where analytics records are buffered before being dumped to a\n\/\/ file as write to file should not performed in the Http Thread\nvar internalBuffer chan axRecords\n\/\/ channel to indicate that internalBuffer channel is closed\nvar doneInternalBufferChan chan bool\n\n\/\/ Channel where close bucket event is published i.e. when a bucket\n\/\/ is ready to be closed based on collection interval\nvar closeBucketEvent chan bucket\n\/\/ channel to indicate that closeBucketEvent channel is closed\nvar doneClosebucketChan chan bool\n\n\/\/ Map from timestampt to bucket\nvar bucketMap map[int64]bucket\n\n\/\/ RW lock for bucketMap since the cache can be\n\/\/ read while its being written to and vice versa\nvar bucketMaplock = sync.RWMutex{}\n\ntype bucket struct {\n\tkeyTS int64\n\tDirName string\n\t\/\/ We need file handle and writer to close the file\n\tFileWriter fileWriter\n}\n\n\/\/ This struct will store open file handle and writer to close the file\ntype fileWriter struct {\n\tfile *os.File\n\tgw *gzip.Writer\n\tbw *bufio.Writer\n}\n\nfunc initBufferingManager() {\n\tinternalBuffer = make(chan axRecords,\n\t\tconfig.GetInt(analyticsBufferChannelSize))\n\tcloseBucketEvent = make(chan bucket)\n\tdoneInternalBufferChan = make(chan bool)\n\tdoneClosebucketChan = make(chan bool)\n\n\tbucketMaplock.Lock()\n\tbucketMap = make(map[int64]bucket)\n\tbucketMaplock.Unlock()\n\n\t\/\/ Keep polling the internal buffer for new messages\n\tgo func() {\n\t\tfor records := range internalBuffer {\n\t\t\terr := save(records)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Could not save %d messages to file\"+\n\t\t\t\t\t\" due to: %v\", len(records.Records), err)\n\t\t\t}\n\t\t}\n\t\t\/\/ indicates a close signal was sent on the channel\n\t\tlog.Debugf(\"Closing channel internal buffer\")\n\t\tdoneInternalBufferChan <- true\n\t}()\n\n\t\/\/ Keep polling the closeEvent channel to see if bucket is ready to be closed\n\tgo func() {\n\t\tfor bucket := range closeBucketEvent {\n\t\t\tlog.Debugf(\"Close Event received for bucket: %s\",\n\t\t\t\tbucket.DirName)\n\n\t\t\t\/\/ close open file\n\t\t\tcloseGzipFile(bucket.FileWriter)\n\n\t\t\tdirToBeClosed := filepath.Join(localAnalyticsTempDir, bucket.DirName)\n\t\t\tstagingPath := filepath.Join(localAnalyticsStagingDir, bucket.DirName)\n\t\t\t\/\/ close files in tmp folder and move directory to\n\t\t\t\/\/ staging to indicate its ready for upload\n\t\t\terr := os.Rename(dirToBeClosed, stagingPath)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Cannot move directory '%s' from\" +\n\t\t\t\t\t\" tmp to staging folder due to '%s\", bucket.DirName, err)\n\t\t\t} else {\n\t\t\t\t\/\/ Remove bucket from bucket map once its closed successfully\n\t\t\t\tbucketMaplock.Lock()\n\t\t\t\tdelete(bucketMap, bucket.keyTS)\n\t\t\t\tbucketMaplock.Unlock()\n\t\t\t}\n\t\t}\n\t\t\/\/ indicates a close signal was sent on the channel\n\t\tlog.Debugf(\"Closing channel close bucketevent\")\n\t\tdoneClosebucketChan <- true\n\t}()\n}\n\n\/\/ Save records to correct file based on what timestamp data is being collected for\nfunc save(records axRecords) error {\n\tbucket, err := getBucketForTimestamp(time.Now(), records.Tenant)\n\tif err != nil {\n\t\treturn err\n\t}\n\twriteGzipFile(bucket.FileWriter, records.Records)\n\treturn nil\n}\n\nfunc getBucketForTimestamp(now time.Time, tenant tenant) (bucket, error) {\n\t\/\/ first based on current timestamp and collection interval,\n\t\/\/ determine the timestamp of the bucket\n\tts := now.Unix() \/ int64(config.GetInt(analyticsCollectionInterval)) * int64(config.GetInt(analyticsCollectionInterval))\n\n\tbucketMaplock.RLock()\n\tb, exists := bucketMap[ts]\n\tbucketMaplock.RUnlock()\n\n\tif exists {\n\t\treturn b, nil\n\t} else {\n\t\ttimestamp := time.Unix(ts, 0).Format(timestampLayout)\n\n\t\t\/\/ endtimestamp of bucket = starttimestamp + collectionInterval\n\t\tendTime := time.Unix(ts+int64(config.GetInt(analyticsCollectionInterval)), 0)\n\t\tendtimestamp := endTime.Format(timestampLayout)\n\n\t\tdirName := tenant.Org + \"~\" + tenant.Env + \"~\" + timestamp\n\t\tnewPath := filepath.Join(localAnalyticsTempDir, dirName)\n\t\t\/\/ create dir\n\t\terr := os.Mkdir(newPath, os.ModePerm)\n\t\tif err != nil {\n\t\t\treturn bucket{}, fmt.Errorf(\"Cannot create directory \"+\n\t\t\t\t\"'%s' to buffer messages '%v'\", dirName, err)\n\t\t}\n\n\t\t\/\/ create file for writing\n\t\t\/\/ Format: <4DigitRandomHex>_<TSStart>.<TSEnd>_<APIDINSTANCEUUID>_writer_0.txt.gz\n\t\tfileName := getRandomHex() + \"_\" + timestamp + \".\" +\n\t\t\tendtimestamp + \"_\" +\n\t\t\tconfig.GetString(\"apigeesync_apid_instance_id\") +\n\t\t\t\"_writer_0\" + fileExtension\n\t\tcompleteFilePath := filepath.Join(newPath, fileName)\n\t\tfw, err := createGzipFile(completeFilePath)\n\t\tif err != nil {\n\t\t\treturn bucket{}, err\n\t\t}\n\n\t\tnewBucket := bucket{keyTS: ts, DirName: dirName, FileWriter: fw}\n\n\t\tbucketMaplock.Lock()\n\t\tbucketMap[ts] = newBucket\n\t\tbucketMaplock.Unlock()\n\n\t\t\/\/Send event to close directory after endTime + 5\n\t\t\/\/ seconds to make sure all buffers are flushed to file\n\t\ttimer := time.After(endTime.Sub(time.Now()) + time.Second*5)\n\t\tgo func() {\n\t\t\t<-timer\n\t\t\tcloseBucketEvent <- newBucket\n\t\t}()\n\t\treturn newBucket, nil\n\t}\n}\n\n\/\/ 4 digit Hex is prefixed to each filename to improve\n\/\/ how s3 partitions the files being uploaded\nfunc getRandomHex() string {\n\tbuff := make([]byte, 2)\n\trand.Read(buff)\n\treturn fmt.Sprintf(\"%x\", buff)\n}\n\nfunc createGzipFile(s string) (fileWriter, error) {\n\tfile, err := os.OpenFile(s, os.O_WRONLY|os.O_CREATE, os.ModePerm)\n\tif err != nil {\n\t\treturn fileWriter{},\n\t\t\tfmt.Errorf(\"Cannot create file '%s' \"+\n\t\t\t\t\"to buffer messages '%v'\", s, err)\n\t}\n\tgw := gzip.NewWriter(file)\n\tbw := bufio.NewWriter(gw)\n\treturn fileWriter{file, gw, bw}, nil\n}\n\nfunc writeGzipFile(fw fileWriter, records []interface{}) {\n\t\/\/ write each record as a new line to the bufferedWriter\n\tfor _, eachRecord := range records {\n\t\ts, _ := json.Marshal(eachRecord)\n\t\t_, err := (fw.bw).WriteString(string(s))\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Write to file failed '%v'\", err)\n\t\t}\n\t\t(fw.bw).WriteString(\"\\n\")\n\t}\n\t\/\/ Flush entire batch of records to file vs each message\n\tfw.bw.Flush()\n\tfw.gw.Flush()\n}\n\nfunc closeGzipFile(fw fileWriter) {\n\tfw.bw.Flush()\n\tfw.gw.Close()\n\tfw.file.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package utils\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n\/\/ Signer interface describes facility implementing signing of files\ntype Signer interface {\n\tInit() error\n\tSetKey(keyRef string)\n\tSetKeyRing(keyring, secretKeyring string)\n\tSetPassphrase(passphrase, passphraseFile string)\n\tSetBatch(batch bool)\n\tDetachedSign(source string, destination string) error\n\tClearSign(source string, destination string) error\n}\n\n\/\/ Verifier interface describes signature verification factility\ntype Verifier interface {\n\tInitKeyring() error\n\tAddKeyring(keyring string)\n\tVerifyDetachedSignature(signature, cleartext io.Reader) error\n\tVerifyClearsigned(clearsigned io.Reader) error\n\tExtractClearsigned(clearsigned io.Reader) (text *os.File, err error)\n}\n\n\/\/ Test interface\nvar (\n\t_ Signer = &GpgSigner{}\n\t_ Verifier = &GpgVerifier{}\n)\n\n\/\/ GpgSigner is implementation of Signer interface using gpg\ntype GpgSigner struct {\n\tkeyRef string\n\tkeyring, secretKeyring string\n\tpassphrase, passphraseFile string\n\tbatch bool\n}\n\n\/\/ SetBatch control --no-tty flag to gpg\nfunc (g *GpgSigner) SetBatch(batch bool) {\n\tg.batch = batch\n}\n\n\/\/ SetKey sets key ID to use when signing files\nfunc (g *GpgSigner) SetKey(keyRef string) {\n\tg.keyRef = keyRef\n}\n\n\/\/ SetKeyRing allows to set custom keyring and secretkeyring\nfunc (g *GpgSigner) SetKeyRing(keyring, secretKeyring string) {\n\tg.keyring, g.secretKeyring = keyring, secretKeyring\n}\n\n\/\/ SetPassphrase sets passhprase params\nfunc (g *GpgSigner) SetPassphrase(passphrase, passphraseFile string) {\n\tg.passphrase, g.passphraseFile = passphrase, passphraseFile\n}\n\nfunc (g *GpgSigner) gpgArgs() []string {\n\targs := []string{}\n\tif g.keyring != \"\" {\n\t\targs = append(args, \"--no-auto-check-trustdb\", \"--no-default-keyring\", \"--keyring\", g.keyring)\n\t}\n\tif g.secretKeyring != \"\" {\n\t\targs = append(args, \"--secret-keyring\", g.secretKeyring)\n\t}\n\n\tif g.keyRef != \"\" {\n\t\targs = append(args, \"-u\", g.keyRef)\n\t}\n\n\tif g.passphrase != \"\" || g.passphraseFile != \"\" {\n\t\targs = append(args, \"--no-use-agent\")\n\t}\n\n\tif g.passphrase != \"\" {\n\t\targs = append(args, \"--passphrase\", g.passphrase)\n\t}\n\n\tif g.passphraseFile != \"\" {\n\t\targs = append(args, \"--passphrase-file\", g.passphraseFile)\n\t}\n\n\tif g.batch {\n\t\targs = append(args, \"--no-tty\")\n\t}\n\n\treturn args\n}\n\n\/\/ Init verifies availability of gpg & presence of keys\nfunc (g *GpgSigner) Init() error {\n\toutput, err := exec.Command(\"gpg\", \"--list-keys\", \"--dry-run\", \"--no-auto-check-trustdb\").CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to execute gpg: %s (is gpg installed?): %s\", err, string(output))\n\t}\n\n\tif g.keyring == \"\" && g.secretKeyring == \"\" && len(output) == 0 {\n\t\treturn fmt.Errorf(\"looks like there are no keys in gpg, please create one (official manual: http:\/\/www.gnupg.org\/gph\/en\/manual.html)\")\n\t}\n\n\treturn err\n}\n\n\/\/ DetachedSign signs file with detached signature in ASCII format\nfunc (g *GpgSigner) DetachedSign(source string, destination string) error {\n\tfmt.Printf(\"Signing file '%s' with gpg, please enter your passphrase when prompted:\\n\", filepath.Base(source))\n\n\targs := []string{\"-o\", destination, \"--armor\", \"--yes\"}\n\targs = append(args, g.gpgArgs()...)\n\targs = append(args, \"--detach-sign\", source)\n\tcmd := exec.Command(\"gpg\", args...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\n\/\/ ClearSign clear-signs the file\nfunc (g *GpgSigner) ClearSign(source string, destination string) error {\n\tfmt.Printf(\"Clearsigning file '%s' with gpg, please enter your passphrase when prompted:\\n\", filepath.Base(source))\n\targs := []string{\"-o\", destination, \"--yes\"}\n\targs = append(args, g.gpgArgs()...)\n\targs = append(args, \"--clearsign\", source)\n\tcmd := exec.Command(\"gpg\", args...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\n\/\/ GpgVerifier is implementation of Verifier interface using gpgv\ntype GpgVerifier struct {\n\tkeyRings []string\n}\n\n\/\/ InitKeyring verifies that gpg is installed and some keys are trusted\nfunc (g *GpgVerifier) InitKeyring() error {\n\terr := exec.Command(\"gpgv\", \"--version\").Run()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to execute gpgv: %s (is gpg installed?)\", err)\n\t}\n\n\tif len(g.keyRings) == 0 {\n\t\t\/\/ using default keyring\n\t\toutput, err := exec.Command(\"gpg\", \"--no-default-keyring\", \"--no-auto-check-trustdb\", \"--keyring\", \"trustedkeys.gpg\", \"--list-keys\").Output()\n\t\tif err == nil && len(output) == 0 {\n\t\t\tfmt.Printf(\"\\nLooks like your keyring with trusted keys is empty. You might consider importing some keys.\\n\")\n\t\t\tfmt.Printf(\"If you're running Debian or Ubuntu, it's a good idea to import current archive keys by running:\\n\\n\")\n\t\t\tfmt.Printf(\" gpg --keyring \/usr\/share\/keyrings\/debian-archive-keyring.gpg --export | gpg --no-default-keyring --keyring trustedkeys.gpg --import\\n\")\n\t\t\tfmt.Printf(\"\\n(for Ubuntu, use \/usr\/share\/keyrings\/ubuntu-archive-keyring.gpg)\\n\\n\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ AddKeyring adds custom keyring to GPG parameters\nfunc (g *GpgVerifier) AddKeyring(keyring string) {\n\tg.keyRings = append(g.keyRings, keyring)\n}\n\nfunc (g *GpgVerifier) argsKeyrings() (args []string) {\n\tif len(g.keyRings) > 0 {\n\t\targs = make([]string, 0, 2*len(g.keyRings))\n\t\tfor _, keyring := range g.keyRings {\n\t\t\targs = append(args, \"--keyring\", keyring)\n\t\t}\n\t} else {\n\t\targs = []string{\"--keyring\", \"trustedkeys.gpg\"}\n\t}\n\treturn\n}\n\nfunc (g *GpgVerifier) runGpgv(args []string, context string) error {\n\tcmd := exec.Command(\"gpgv\", args...)\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stderr.Close()\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuffer := &bytes.Buffer{}\n\n\t_, err = io.Copy(io.MultiWriter(os.Stderr, buffer), stderr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmatches := regexp.MustCompile(\"ID ([0-9A-F]{8})\").FindAllStringSubmatch(buffer.String(), -1)\n\n\terr = cmd.Wait()\n\tif err != nil {\n\t\tif len(g.keyRings) == 0 && len(matches) > 0 {\n\t\t\tfmt.Printf(\"\\nLooks like some keys are missing in your trusted keyring, you may consider importing them from keyserver:\\n\\n\")\n\n\t\t\tkeyIDs := []string{}\n\t\t\tfor _, match := range matches {\n\t\t\t\tkeyIDs = append(keyIDs, match[1])\n\t\t\t}\n\t\t\tfmt.Printf(\"gpg --no-default-keyring --keyring trustedkeys.gpg --keyserver keys.gnupg.net --recv-keys %s\\n\\n\",\n\t\t\t\tstrings.Join(keyIDs, \" \"))\n\n\t\t\tfmt.Printf(\"Sometimes keys are stored in repository root in file named Release.key, to import such key:\\n\\n\")\n\t\t\tfmt.Printf(\"wget -O - https:\/\/some.repo\/repository\/Release.key | gpg --no-default-keyring --keyring trustedkeys.gpg --import\\n\\n\")\n\t\t}\n\t\treturn fmt.Errorf(\"verification of %s failed: %s\", context, err)\n\t}\n\treturn nil\n}\n\n\/\/ VerifyDetachedSignature verifies combination of signature and cleartext using gpgv\nfunc (g *GpgVerifier) VerifyDetachedSignature(signature, cleartext io.Reader) error {\n\targs := g.argsKeyrings()\n\n\tsigf, err := ioutil.TempFile(\"\", \"aptly-gpg\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(sigf.Name())\n\tdefer sigf.Close()\n\n\t_, err = io.Copy(sigf, signature)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclearf, err := ioutil.TempFile(\"\", \"aptly-gpg\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(clearf.Name())\n\tdefer clearf.Close()\n\n\t_, err = io.Copy(clearf, cleartext)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs = append(args, sigf.Name(), clearf.Name())\n\treturn g.runGpgv(args, \"detached signature\")\n}\n\n\/\/ VerifyClearsigned verifies clearsigned file using gpgv\nfunc (g *GpgVerifier) VerifyClearsigned(clearsigned io.Reader) error {\n\targs := g.argsKeyrings()\n\n\tclearf, err := ioutil.TempFile(\"\", \"aptly-gpg\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(clearf.Name())\n\tdefer clearf.Close()\n\n\t_, err = io.Copy(clearf, clearsigned)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs = append(args, clearf.Name())\n\treturn g.runGpgv(args, \"clearsigned file\")\n}\n\n\/\/ ExtractClearsigned extracts cleartext from clearsigned file WITHOUT signature verification\nfunc (g *GpgVerifier) ExtractClearsigned(clearsigned io.Reader) (text *os.File, err error) {\n\tclearf, err := ioutil.TempFile(\"\", \"aptly-gpg\")\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer os.Remove(clearf.Name())\n\tdefer clearf.Close()\n\n\t_, err = io.Copy(clearf, clearsigned)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttext, err = ioutil.TempFile(\"\", \"aptly-gpg\")\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer os.Remove(text.Name())\n\n\targs := []string{\"--no-auto-check-trustdb\", \"--decrypt\", \"--batch\", \"--skip-verify\", \"--output\", \"-\", clearf.Name()}\n\n\tcmd := exec.Command(\"gpg\", args...)\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer stdout.Close()\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = io.Copy(text, stdout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cmd.Wait()\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"extraction of clearsigned file failed: %s\", err)\n\t}\n\n\t_, err = text.Seek(0, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn\n}\n<commit_msg>Add --no-default-keyring to example command. #182<commit_after>package utils\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n\/\/ Signer interface describes facility implementing signing of files\ntype Signer interface {\n\tInit() error\n\tSetKey(keyRef string)\n\tSetKeyRing(keyring, secretKeyring string)\n\tSetPassphrase(passphrase, passphraseFile string)\n\tSetBatch(batch bool)\n\tDetachedSign(source string, destination string) error\n\tClearSign(source string, destination string) error\n}\n\n\/\/ Verifier interface describes signature verification factility\ntype Verifier interface {\n\tInitKeyring() error\n\tAddKeyring(keyring string)\n\tVerifyDetachedSignature(signature, cleartext io.Reader) error\n\tVerifyClearsigned(clearsigned io.Reader) error\n\tExtractClearsigned(clearsigned io.Reader) (text *os.File, err error)\n}\n\n\/\/ Test interface\nvar (\n\t_ Signer = &GpgSigner{}\n\t_ Verifier = &GpgVerifier{}\n)\n\n\/\/ GpgSigner is implementation of Signer interface using gpg\ntype GpgSigner struct {\n\tkeyRef string\n\tkeyring, secretKeyring string\n\tpassphrase, passphraseFile string\n\tbatch bool\n}\n\n\/\/ SetBatch control --no-tty flag to gpg\nfunc (g *GpgSigner) SetBatch(batch bool) {\n\tg.batch = batch\n}\n\n\/\/ SetKey sets key ID to use when signing files\nfunc (g *GpgSigner) SetKey(keyRef string) {\n\tg.keyRef = keyRef\n}\n\n\/\/ SetKeyRing allows to set custom keyring and secretkeyring\nfunc (g *GpgSigner) SetKeyRing(keyring, secretKeyring string) {\n\tg.keyring, g.secretKeyring = keyring, secretKeyring\n}\n\n\/\/ SetPassphrase sets passhprase params\nfunc (g *GpgSigner) SetPassphrase(passphrase, passphraseFile string) {\n\tg.passphrase, g.passphraseFile = passphrase, passphraseFile\n}\n\nfunc (g *GpgSigner) gpgArgs() []string {\n\targs := []string{}\n\tif g.keyring != \"\" {\n\t\targs = append(args, \"--no-auto-check-trustdb\", \"--no-default-keyring\", \"--keyring\", g.keyring)\n\t}\n\tif g.secretKeyring != \"\" {\n\t\targs = append(args, \"--secret-keyring\", g.secretKeyring)\n\t}\n\n\tif g.keyRef != \"\" {\n\t\targs = append(args, \"-u\", g.keyRef)\n\t}\n\n\tif g.passphrase != \"\" || g.passphraseFile != \"\" {\n\t\targs = append(args, \"--no-use-agent\")\n\t}\n\n\tif g.passphrase != \"\" {\n\t\targs = append(args, \"--passphrase\", g.passphrase)\n\t}\n\n\tif g.passphraseFile != \"\" {\n\t\targs = append(args, \"--passphrase-file\", g.passphraseFile)\n\t}\n\n\tif g.batch {\n\t\targs = append(args, \"--no-tty\")\n\t}\n\n\treturn args\n}\n\n\/\/ Init verifies availability of gpg & presence of keys\nfunc (g *GpgSigner) Init() error {\n\toutput, err := exec.Command(\"gpg\", \"--list-keys\", \"--dry-run\", \"--no-auto-check-trustdb\").CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to execute gpg: %s (is gpg installed?): %s\", err, string(output))\n\t}\n\n\tif g.keyring == \"\" && g.secretKeyring == \"\" && len(output) == 0 {\n\t\treturn fmt.Errorf(\"looks like there are no keys in gpg, please create one (official manual: http:\/\/www.gnupg.org\/gph\/en\/manual.html)\")\n\t}\n\n\treturn err\n}\n\n\/\/ DetachedSign signs file with detached signature in ASCII format\nfunc (g *GpgSigner) DetachedSign(source string, destination string) error {\n\tfmt.Printf(\"Signing file '%s' with gpg, please enter your passphrase when prompted:\\n\", filepath.Base(source))\n\n\targs := []string{\"-o\", destination, \"--armor\", \"--yes\"}\n\targs = append(args, g.gpgArgs()...)\n\targs = append(args, \"--detach-sign\", source)\n\tcmd := exec.Command(\"gpg\", args...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\n\/\/ ClearSign clear-signs the file\nfunc (g *GpgSigner) ClearSign(source string, destination string) error {\n\tfmt.Printf(\"Clearsigning file '%s' with gpg, please enter your passphrase when prompted:\\n\", filepath.Base(source))\n\targs := []string{\"-o\", destination, \"--yes\"}\n\targs = append(args, g.gpgArgs()...)\n\targs = append(args, \"--clearsign\", source)\n\tcmd := exec.Command(\"gpg\", args...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\n\/\/ GpgVerifier is implementation of Verifier interface using gpgv\ntype GpgVerifier struct {\n\tkeyRings []string\n}\n\n\/\/ InitKeyring verifies that gpg is installed and some keys are trusted\nfunc (g *GpgVerifier) InitKeyring() error {\n\terr := exec.Command(\"gpgv\", \"--version\").Run()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to execute gpgv: %s (is gpg installed?)\", err)\n\t}\n\n\tif len(g.keyRings) == 0 {\n\t\t\/\/ using default keyring\n\t\toutput, err := exec.Command(\"gpg\", \"--no-default-keyring\", \"--no-auto-check-trustdb\", \"--keyring\", \"trustedkeys.gpg\", \"--list-keys\").Output()\n\t\tif err == nil && len(output) == 0 {\n\t\t\tfmt.Printf(\"\\nLooks like your keyring with trusted keys is empty. You might consider importing some keys.\\n\")\n\t\t\tfmt.Printf(\"If you're running Debian or Ubuntu, it's a good idea to import current archive keys by running:\\n\\n\")\n\t\t\tfmt.Printf(\" gpg --no-default-keyring --keyring \/usr\/share\/keyrings\/debian-archive-keyring.gpg --export | gpg --no-default-keyring --keyring trustedkeys.gpg --import\\n\")\n\t\t\tfmt.Printf(\"\\n(for Ubuntu, use \/usr\/share\/keyrings\/ubuntu-archive-keyring.gpg)\\n\\n\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ AddKeyring adds custom keyring to GPG parameters\nfunc (g *GpgVerifier) AddKeyring(keyring string) {\n\tg.keyRings = append(g.keyRings, keyring)\n}\n\nfunc (g *GpgVerifier) argsKeyrings() (args []string) {\n\tif len(g.keyRings) > 0 {\n\t\targs = make([]string, 0, 2*len(g.keyRings))\n\t\tfor _, keyring := range g.keyRings {\n\t\t\targs = append(args, \"--keyring\", keyring)\n\t\t}\n\t} else {\n\t\targs = []string{\"--keyring\", \"trustedkeys.gpg\"}\n\t}\n\treturn\n}\n\nfunc (g *GpgVerifier) runGpgv(args []string, context string) error {\n\tcmd := exec.Command(\"gpgv\", args...)\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stderr.Close()\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuffer := &bytes.Buffer{}\n\n\t_, err = io.Copy(io.MultiWriter(os.Stderr, buffer), stderr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmatches := regexp.MustCompile(\"ID ([0-9A-F]{8})\").FindAllStringSubmatch(buffer.String(), -1)\n\n\terr = cmd.Wait()\n\tif err != nil {\n\t\tif len(g.keyRings) == 0 && len(matches) > 0 {\n\t\t\tfmt.Printf(\"\\nLooks like some keys are missing in your trusted keyring, you may consider importing them from keyserver:\\n\\n\")\n\n\t\t\tkeyIDs := []string{}\n\t\t\tfor _, match := range matches {\n\t\t\t\tkeyIDs = append(keyIDs, match[1])\n\t\t\t}\n\t\t\tfmt.Printf(\"gpg --no-default-keyring --keyring trustedkeys.gpg --keyserver keys.gnupg.net --recv-keys %s\\n\\n\",\n\t\t\t\tstrings.Join(keyIDs, \" \"))\n\n\t\t\tfmt.Printf(\"Sometimes keys are stored in repository root in file named Release.key, to import such key:\\n\\n\")\n\t\t\tfmt.Printf(\"wget -O - https:\/\/some.repo\/repository\/Release.key | gpg --no-default-keyring --keyring trustedkeys.gpg --import\\n\\n\")\n\t\t}\n\t\treturn fmt.Errorf(\"verification of %s failed: %s\", context, err)\n\t}\n\treturn nil\n}\n\n\/\/ VerifyDetachedSignature verifies combination of signature and cleartext using gpgv\nfunc (g *GpgVerifier) VerifyDetachedSignature(signature, cleartext io.Reader) error {\n\targs := g.argsKeyrings()\n\n\tsigf, err := ioutil.TempFile(\"\", \"aptly-gpg\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(sigf.Name())\n\tdefer sigf.Close()\n\n\t_, err = io.Copy(sigf, signature)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclearf, err := ioutil.TempFile(\"\", \"aptly-gpg\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(clearf.Name())\n\tdefer clearf.Close()\n\n\t_, err = io.Copy(clearf, cleartext)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs = append(args, sigf.Name(), clearf.Name())\n\treturn g.runGpgv(args, \"detached signature\")\n}\n\n\/\/ VerifyClearsigned verifies clearsigned file using gpgv\nfunc (g *GpgVerifier) VerifyClearsigned(clearsigned io.Reader) error {\n\targs := g.argsKeyrings()\n\n\tclearf, err := ioutil.TempFile(\"\", \"aptly-gpg\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(clearf.Name())\n\tdefer clearf.Close()\n\n\t_, err = io.Copy(clearf, clearsigned)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs = append(args, clearf.Name())\n\treturn g.runGpgv(args, \"clearsigned file\")\n}\n\n\/\/ ExtractClearsigned extracts cleartext from clearsigned file WITHOUT signature verification\nfunc (g *GpgVerifier) ExtractClearsigned(clearsigned io.Reader) (text *os.File, err error) {\n\tclearf, err := ioutil.TempFile(\"\", \"aptly-gpg\")\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer os.Remove(clearf.Name())\n\tdefer clearf.Close()\n\n\t_, err = io.Copy(clearf, clearsigned)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttext, err = ioutil.TempFile(\"\", \"aptly-gpg\")\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer os.Remove(text.Name())\n\n\targs := []string{\"--no-auto-check-trustdb\", \"--decrypt\", \"--batch\", \"--skip-verify\", \"--output\", \"-\", clearf.Name()}\n\n\tcmd := exec.Command(\"gpg\", args...)\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer stdout.Close()\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = io.Copy(text, stdout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cmd.Wait()\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"extraction of clearsigned file failed: %s\", err)\n\t}\n\n\t_, err = text.Seek(0, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 the LinuxBoot Authors. All rights reserved\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The utk2 command is a replacement for utk. utk will be replaced by utk2 at\n\/\/ some point.\n\/\/\n\/\/ Synopsis:\n\/\/ utk BIOS OPERATIONS...\n\/\/\n\/\/ Examples:\n\/\/ # Dump everything to JSON:\n\/\/ utk winterfell.rom json\n\/\/\n\/\/ # Dump a single file to JSON (using regex):\n\/\/ utk winterfell.rom find Shell\n\/\/\n\/\/ # Dump GUIDs and sizes to a compact table:\n\/\/ utk winterfell.rom table\n\/\/\n\/\/ # Extract everything into a directory:\n\/\/ utk winterfell.rom extract winterfell\/\n\/\/\n\/\/ # Re-assemble the directory into an image:\n\/\/ utk winterfell\/ save winterfell2.rom\n\/\/\n\/\/ # Remove two files by their GUID and replace shell with Linux:\n\/\/ utk winterfell.rom \\\n\/\/ remove 12345678-9abc-def0-1234-567890abcdef \\\n\/\/ remove 23830293-3029-3823-0922-328328330939 \\\n\/\/ replace_pe32 Shell linux.efi \\\n\/\/ save winterfell2.rom\n\/\/\n\/\/ Operations:\n\/\/ - `json`: Dump the entire parsed image (excluding binary data) as JSON to\n\/\/ stdout.\n\/\/ - `table`: Dump GUIDs and sizes to a compact table. This is only for human\n\/\/ consumption and the format may change without notice.\n\/\/ - `find (GUID|NAME)`: Dump the JSON of one or more files. The file is found\n\/\/ by a regex match to its GUID or name in the UI section.\n\/\/ - `remove (GUID|NAME)`: Remove the first file which matches the given GUID or\n\/\/ NAME. The same matching rules and exit status are used as `find`.\n\/\/ - `replace (GUID|NAME) FILE`: Replace the first file which matches the given\n\/\/ GUID or NAME with the contents of FILE. The same matching rules and exit\n\/\/ status are used as `find`.\n\/\/ - `save FILE`: Save the current state of the image to the give file. Remember\n\/\/ that operations are applied left-to-right, so only the operations to the left\n\/\/ are included in the new image.\n\/\/ - `extract DIR`: Extract the BIOS to the given directory. Remember that\n\/\/ operations are applied left-to-right, so only the operations to the left are\n\/\/ included in the new image.\npackage main\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/linuxboot\/fiano\/uefi\"\n\t\"github.com\/linuxboot\/fiano\/visitors\"\n)\n\nfunc main() {\n\tif len(os.Args) < 2 {\n\t\tlog.Fatal(\"at least one argument is required\")\n\t}\n\n\tv, err := visitors.ParseCLI(os.Args[2:])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Load and parse the image.\n\t\/\/ TODO: if os.Args[1] is a directory, re-assemble it\n\tpath := os.Args[1]\n\tf, err := os.Stat(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar parsedRoot uefi.Firmware\n\tif m := f.Mode(); m.IsDir() {\n\t\t\/\/ Call ParseDir\n\t\tpd := visitors.ParseDir{DirPath: path}\n\t\tif parsedRoot, err = pd.Parse(); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t} else {\n\t\t\/\/ Regular file\n\t\timage, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tparsedRoot, err = uefi.Parse(image)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\t\/\/ Execute the instructions from the command line.\n\tif err := visitors.ExecuteCLI(parsedRoot, v); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>This is more readable at godoc.org<commit_after>\/\/ Copyright 2018 the LinuxBoot Authors. All rights reserved\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The utk2 command is a replacement for utk. utk will be replaced by utk2 at\n\/\/ some point.\n\/\/\n\/\/ Synopsis:\n\/\/ utk BIOS OPERATIONS...\n\/\/\n\/\/ Examples:\n\/\/ # Dump everything to JSON:\n\/\/ utk winterfell.rom json\n\/\/\n\/\/ # Dump a single file to JSON (using regex):\n\/\/ utk winterfell.rom find Shell\n\/\/\n\/\/ # Dump GUIDs and sizes to a compact table:\n\/\/ utk winterfell.rom table\n\/\/\n\/\/ # Extract everything into a directory:\n\/\/ utk winterfell.rom extract winterfell\/\n\/\/\n\/\/ # Re-assemble the directory into an image:\n\/\/ utk winterfell\/ save winterfell2.rom\n\/\/\n\/\/ # Remove two files by their GUID and replace shell with Linux:\n\/\/ utk winterfell.rom \\\n\/\/ remove 12345678-9abc-def0-1234-567890abcdef \\\n\/\/ remove 23830293-3029-3823-0922-328328330939 \\\n\/\/ replace_pe32 Shell linux.efi \\\n\/\/ save winterfell2.rom\n\/\/\n\/\/ Operations:\n\/\/ `json`: Dump the entire parsed image (excluding binary data) as JSON to\n\/\/ stdout.\n\/\/ `table`: Dump GUIDs and sizes to a compact table. This is only for human\n\/\/ consumption and the format may change without notice.\n\/\/ `find (GUID|NAME)`: Dump the JSON of one or more files. The file is\n\/\/ found by a regex match to its GUID or name in the UI\n\/\/ section.\n\/\/ `remove (GUID|NAME)`: Remove the first file which matches the given GUID\n\/\/ or NAME. The same matching rules and exit status\n\/\/ are used as `find`.\n\/\/ `replace (GUID|NAME) FILE`: Replace the first file which matches the\n\/\/ given GUID or NAME with the contents of\n\/\/ FILE. The same matching rules and exit\n\/\/ status are used as `find`.\n\/\/ `save FILE`: Save the current state of the image to the give file.\n\/\/ Remember that operations are applied left-to-right, so only\n\/\/ the operations to the left are included in the new image.\n\/\/ `extract DIR`: Extract the BIOS to the given directory. Remember that\n\/\/ operations are applied left-to-right, so only the\n\/\/ operations to the left are included in the new image.\npackage main\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/linuxboot\/fiano\/uefi\"\n\t\"github.com\/linuxboot\/fiano\/visitors\"\n)\n\nfunc main() {\n\tif len(os.Args) < 2 {\n\t\tlog.Fatal(\"at least one argument is required\")\n\t}\n\n\tv, err := visitors.ParseCLI(os.Args[2:])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Load and parse the image.\n\t\/\/ TODO: if os.Args[1] is a directory, re-assemble it\n\tpath := os.Args[1]\n\tf, err := os.Stat(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar parsedRoot uefi.Firmware\n\tif m := f.Mode(); m.IsDir() {\n\t\t\/\/ Call ParseDir\n\t\tpd := visitors.ParseDir{DirPath: path}\n\t\tif parsedRoot, err = pd.Parse(); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t} else {\n\t\t\/\/ Regular file\n\t\timage, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tparsedRoot, err = uefi.Parse(image)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\t\/\/ Execute the instructions from the command line.\n\tif err := visitors.ExecuteCLI(parsedRoot, v); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The nvim-go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage logger\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\tcolor \"github.com\/zchee\/color\/v2\"\n\tprettyjson \"github.com\/hokaccha\/go-prettyjson\"\n\t\"go.uber.org\/zap\"\n\t\"go.uber.org\/zap\/buffer\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\nfunc init() {\n\tif err := zap.RegisterEncoder(\"debug\", func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {\n\t\treturn NewConsoleEncoder(encoderConfig), nil\n\t}); err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc NewZapLogger(l zapcore.Level, opts ...zap.Option) *zap.Logger {\n\tdebug := os.Getenv(\"NVIM_GO_DEBUG\") != \"\"\n\tvar cfg zap.Config\n\tif !debug {\n\t\treturn zap.NewNop()\n\t} else {\n\t\tcfg = zap.NewDevelopmentConfig()\n\t\tcfg.Encoding = \"debug\" \/\/ already registered init function\n\t\tcfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder\n\t\topts = append(opts, zap.AddCaller())\n\t}\n\n\tcfg.DisableStacktrace = true\n\tcfg.EncoderConfig.EncodeTime = nil\n\tcfg.Level.SetLevel(zapcore.DPanicLevel) \/\/ not show logs normally\n\n\tif level := os.Getenv(\"NVIM_GO_LOG_LEVEL\"); level != \"\" {\n\t\tvar lv zapcore.Level\n\t\tif err := lv.UnmarshalText([]byte(level)); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"unknown zap log level: %v\", level))\n\t\t}\n\t\tcfg.Level.SetLevel(lv)\n\t}\n\n\tzapLogger, err := cfg.Build(opts...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn zapLogger\n}\n\nfunc NewRedirectZapLogger(lv zapcore.Level, opts ...zap.Option) (*zap.Logger, func()) {\n\tlog := NewZapLogger(lv)\n\tundo := zap.RedirectStdLog(log)\n\n\treturn log, undo\n}\n\ntype consoleEncoder struct {\n\tzapcore.Encoder\n\tconsoleEncoder zapcore.Encoder\n}\n\nfunc NewConsoleEncoder(cfg zapcore.EncoderConfig) zapcore.Encoder {\n\tcolor.NoColor = false \/\/ Force enabled\n\n\tcfg.StacktraceKey = \"\"\n\tcfg2 := cfg\n\tcfg2.NameKey = \"\"\n\tcfg2.MessageKey = \"\"\n\tcfg2.LevelKey = \"\"\n\tcfg2.CallerKey = \"\"\n\tcfg2.StacktraceKey = \"\"\n\tcfg2.TimeKey = \"\"\n\treturn consoleEncoder{\n\t\tconsoleEncoder: zapcore.NewConsoleEncoder(cfg),\n\t\tEncoder: zapcore.NewJSONEncoder(cfg2),\n\t}\n}\n\nfunc (c consoleEncoder) Clone() zapcore.Encoder {\n\treturn consoleEncoder{\n\t\tconsoleEncoder: c.consoleEncoder.Clone(),\n\t\tEncoder: c.Encoder.Clone(),\n\t}\n}\n\nfunc (c consoleEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {\n\tline, err := c.consoleEncoder.EncodeEntry(ent, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tline2, err := c.Encoder.EncodeEntry(ent, fields)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts, err := prettyjson.Format(line2.Bytes())\n\tif err != nil {\n\t\tline.AppendString(\"errrr\")\n\t}\n\n\tline2.Reset()\n\tline2.AppendString(string(s))\n\n\tif ent.Stack != \"\" {\n\t\tline2.AppendByte('\\n')\n\t\tline2.AppendString(\"Caller StackTrace\\n\")\n\t\tline2.AppendString(ent.Stack)\n\t}\n\n\tfor _, field := range fields {\n\t\tswitch field.Key {\n\t\tcase \"stacktrace\":\n\t\t\tline2.AppendByte('\\n')\n\t\t\tline2.AppendString(\"Error StackTrace\\n\")\n\t\t\tline2.AppendString(fmt.Sprintf(\"%v\\n\", field.String))\n\t\t}\n\t}\n\n\tparts := strings.Split(line2.String(), \"\\n\")\n\tfor i := range parts {\n\t\tline.AppendString(\"| \")\n\t\tline.AppendString(parts[i])\n\t\tline.AppendByte('\\n')\n\t}\n\n\treturn line, nil\n}\n<commit_msg>logger: remove cfg.EncoderConfig.EncodeTime = nil<commit_after>\/\/ Copyright 2017 The nvim-go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage logger\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\tprettyjson \"github.com\/hokaccha\/go-prettyjson\"\n\tcolor \"github.com\/zchee\/color\/v2\"\n\t\"go.uber.org\/zap\"\n\t\"go.uber.org\/zap\/buffer\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\nfunc init() {\n\tif err := zap.RegisterEncoder(\"debug\", func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {\n\t\treturn NewConsoleEncoder(encoderConfig), nil\n\t}); err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc NewZapLogger(l zapcore.Level, opts ...zap.Option) *zap.Logger {\n\tdebug := os.Getenv(\"NVIM_GO_DEBUG\") != \"\"\n\tvar cfg zap.Config\n\tif !debug {\n\t\treturn zap.NewNop()\n\t} else {\n\t\tcfg = zap.NewDevelopmentConfig()\n\t\tcfg.Encoding = \"debug\" \/\/ already registered init function\n\t\tcfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder\n\t\topts = append(opts, zap.AddCaller())\n\t}\n\n\tcfg.DisableStacktrace = true\n\tcfg.Level.SetLevel(zapcore.DPanicLevel) \/\/ not show logs normally\n\n\tif level := os.Getenv(\"NVIM_GO_LOG_LEVEL\"); level != \"\" {\n\t\tvar lv zapcore.Level\n\t\tif err := lv.UnmarshalText([]byte(level)); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"unknown zap log level: %v\", level))\n\t\t}\n\t\tcfg.Level.SetLevel(lv)\n\t}\n\n\tzapLogger, err := cfg.Build(opts...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn zapLogger\n}\n\nfunc NewRedirectZapLogger(lv zapcore.Level, opts ...zap.Option) (*zap.Logger, func()) {\n\tlog := NewZapLogger(lv)\n\tundo := zap.RedirectStdLog(log)\n\n\treturn log, undo\n}\n\ntype consoleEncoder struct {\n\tzapcore.Encoder\n\tconsoleEncoder zapcore.Encoder\n}\n\nfunc NewConsoleEncoder(cfg zapcore.EncoderConfig) zapcore.Encoder {\n\tcolor.NoColor = false \/\/ Force enabled\n\n\tcfg.StacktraceKey = \"\"\n\tcfg2 := cfg\n\tcfg2.NameKey = \"\"\n\tcfg2.MessageKey = \"\"\n\tcfg2.LevelKey = \"\"\n\tcfg2.CallerKey = \"\"\n\tcfg2.StacktraceKey = \"\"\n\tcfg2.TimeKey = \"\"\n\treturn consoleEncoder{\n\t\tconsoleEncoder: zapcore.NewConsoleEncoder(cfg),\n\t\tEncoder: zapcore.NewJSONEncoder(cfg2),\n\t}\n}\n\nfunc (c consoleEncoder) Clone() zapcore.Encoder {\n\treturn consoleEncoder{\n\t\tconsoleEncoder: c.consoleEncoder.Clone(),\n\t\tEncoder: c.Encoder.Clone(),\n\t}\n}\n\nfunc (c consoleEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {\n\tline, err := c.consoleEncoder.EncodeEntry(ent, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tline2, err := c.Encoder.EncodeEntry(ent, fields)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts, err := prettyjson.Format(line2.Bytes())\n\tif err != nil {\n\t\tline.AppendString(\"errrr\")\n\t}\n\n\tline2.Reset()\n\tline2.AppendString(string(s))\n\n\tif ent.Stack != \"\" {\n\t\tline2.AppendByte('\\n')\n\t\tline2.AppendString(\"Caller StackTrace\\n\")\n\t\tline2.AppendString(ent.Stack)\n\t}\n\n\tfor _, field := range fields {\n\t\tswitch field.Key {\n\t\tcase \"stacktrace\":\n\t\t\tline2.AppendByte('\\n')\n\t\t\tline2.AppendString(\"Error StackTrace\\n\")\n\t\t\tline2.AppendString(fmt.Sprintf(\"%v\\n\", field.String))\n\t\t}\n\t}\n\n\tparts := strings.Split(line2.String(), \"\\n\")\n\tfor i := range parts {\n\t\tline.AppendString(\"| \")\n\t\tline.AppendString(parts[i])\n\t\tline.AppendByte('\\n')\n\t}\n\n\treturn line, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package auth\n\nimport (\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"code.google.com\/p\/go.crypto\/bcrypt\"\n)\n\nvar ErrUnparsableHeader = errors.New(\"cannot parse 'Authorization' header\")\n\ntype Validator interface {\n\tIsAuthenticated(*http.Request) bool\n}\n\ntype NoopValidator struct{}\n\nfunc (NoopValidator) IsAuthenticated(*http.Request) bool { return true }\n\ntype BasicAuthHashedValidator struct {\n\tUsername string\n\tHashedPassword string\n}\n\nfunc (validator BasicAuthHashedValidator) IsAuthenticated(r *http.Request) bool {\n\tauth := r.Header.Get(\"Authorization\")\n\n\tusername, password, err := ExtractUsernameAndPassword(auth)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn validator.correctCredentials(username, password)\n}\n\nfunc (validator BasicAuthHashedValidator) correctCredentials(username string, password string) bool {\n\terr := bcrypt.CompareHashAndPassword([]byte(validator.HashedPassword), []byte(password))\n\treturn validator.Username == username && err == nil\n}\n\ntype BasicAuthValidator struct {\n\tUsername string\n\tPassword string\n}\n\nfunc (validator BasicAuthValidator) IsAuthenticated(r *http.Request) bool {\n\tauth := r.Header.Get(\"Authorization\")\n\n\tusername, password, err := ExtractUsernameAndPassword(auth)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn validator.correctCredentials(username, password)\n}\n\nfunc (validator BasicAuthValidator) correctCredentials(username string, password string) bool {\n\treturn validator.Username == username && validator.Password == password\n}\n\nfunc ExtractUsernameAndPassword(authorizationHeader string) (string, string, error) {\n\tif !strings.HasPrefix(authorizationHeader, \"Basic \") {\n\t\treturn \"\", \"\", ErrUnparsableHeader\n\t}\n\n\tencodedCredentials := authorizationHeader[6:]\n\tcredentials, err := base64.StdEncoding.DecodeString(encodedCredentials)\n\tif err != nil {\n\t\treturn \"\", \"\", ErrUnparsableHeader\n\t}\n\n\tparts := strings.Split(string(credentials), \":\")\n\tif len(parts) != 2 {\n\t\treturn \"\", \"\", ErrUnparsableHeader\n\t}\n\n\treturn parts[0], parts[1], nil\n}\n<commit_msg>regenerate fakes; add missing generate comments<commit_after>package auth\n\nimport (\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"code.google.com\/p\/go.crypto\/bcrypt\"\n)\n\nvar ErrUnparsableHeader = errors.New(\"cannot parse 'Authorization' header\")\n\n\/\/go:generate counterfeiter . Validator\ntype Validator interface {\n\tIsAuthenticated(*http.Request) bool\n}\n\ntype NoopValidator struct{}\n\nfunc (NoopValidator) IsAuthenticated(*http.Request) bool { return true }\n\ntype BasicAuthHashedValidator struct {\n\tUsername string\n\tHashedPassword string\n}\n\nfunc (validator BasicAuthHashedValidator) IsAuthenticated(r *http.Request) bool {\n\tauth := r.Header.Get(\"Authorization\")\n\n\tusername, password, err := ExtractUsernameAndPassword(auth)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn validator.correctCredentials(username, password)\n}\n\nfunc (validator BasicAuthHashedValidator) correctCredentials(username string, password string) bool {\n\terr := bcrypt.CompareHashAndPassword([]byte(validator.HashedPassword), []byte(password))\n\treturn validator.Username == username && err == nil\n}\n\ntype BasicAuthValidator struct {\n\tUsername string\n\tPassword string\n}\n\nfunc (validator BasicAuthValidator) IsAuthenticated(r *http.Request) bool {\n\tauth := r.Header.Get(\"Authorization\")\n\n\tusername, password, err := ExtractUsernameAndPassword(auth)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn validator.correctCredentials(username, password)\n}\n\nfunc (validator BasicAuthValidator) correctCredentials(username string, password string) bool {\n\treturn validator.Username == username && validator.Password == password\n}\n\nfunc ExtractUsernameAndPassword(authorizationHeader string) (string, string, error) {\n\tif !strings.HasPrefix(authorizationHeader, \"Basic \") {\n\t\treturn \"\", \"\", ErrUnparsableHeader\n\t}\n\n\tencodedCredentials := authorizationHeader[6:]\n\tcredentials, err := base64.StdEncoding.DecodeString(encodedCredentials)\n\tif err != nil {\n\t\treturn \"\", \"\", ErrUnparsableHeader\n\t}\n\n\tparts := strings.Split(string(credentials), \":\")\n\tif len(parts) != 2 {\n\t\treturn \"\", \"\", ErrUnparsableHeader\n\t}\n\n\treturn parts[0], parts[1], nil\n}\n<|endoftext|>"} {"text":"<commit_before>package validator\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\n\/\/ per validate contruct\ntype validate struct {\n\tv *Validate\n\ttop reflect.Value\n\tns []byte\n\tactualNs []byte\n\terrs ValidationErrors\n\tisPartial bool\n\thasExcludes bool\n\tincludeExclude map[string]struct{} \/\/ reset only if StructPartial or StructExcept are called, no need otherwise\n\tmisc []byte\n\n\t\/\/ StructLevel & FieldLevel fields\n\tslflParent reflect.Value\n\tslCurrent reflect.Value\n\tflField reflect.Value\n\tflParam string\n}\n\n\/\/ parent and current will be the same the first run of validateStruct\nfunc (v *validate) validateStruct(parent reflect.Value, current reflect.Value, typ reflect.Type, ns []byte, structNs []byte, ct *cTag) {\n\n\tcs, ok := v.v.structCache.Get(typ)\n\tif !ok {\n\t\tcs = v.v.extractStructCache(current, typ.Name())\n\t}\n\n\tif len(ns) == 0 {\n\n\t\tns = append(ns, cs.name...)\n\t\tns = append(ns, '.')\n\n\t\tstructNs = append(structNs, cs.name...)\n\t\tstructNs = append(structNs, '.')\n\t}\n\n\t\/\/ ct is nil on top level struct, and structs as fields that have no tag info\n\t\/\/ so if nil or if not nil and the structonly tag isn't present\n\tif ct == nil || ct.typeof != typeStructOnly {\n\n\t\tfor _, f := range cs.fields {\n\n\t\t\tif v.isPartial {\n\n\t\t\t\t_, ok = v.includeExclude[string(append(structNs, f.name...))]\n\n\t\t\t\tif (ok && v.hasExcludes) || (!ok && !v.hasExcludes) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tv.traverseField(parent, current.Field(f.idx), ns, structNs, f, f.cTags)\n\t\t}\n\t}\n\n\t\/\/ check if any struct level validations, after all field validations already checked.\n\t\/\/ first iteration will have no info about nostructlevel tag, and is checked prior to\n\t\/\/ calling the next iteration of validateStruct called from traverseField.\n\tif cs.fn != nil {\n\n\t\tv.slflParent = parent\n\t\tv.slCurrent = current\n\t\tv.ns = ns\n\t\tv.actualNs = structNs\n\n\t\tcs.fn(v)\n\t}\n}\n\n\/\/ traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options\nfunc (v *validate) traverseField(parent reflect.Value, current reflect.Value, ns []byte, structNs []byte, cf *cField, ct *cTag) {\n\n\tvar typ reflect.Type\n\tvar kind reflect.Kind\n\tvar nullable bool\n\n\tcurrent, kind, nullable = v.extractTypeInternal(current, nullable)\n\n\tswitch kind {\n\tcase reflect.Ptr, reflect.Interface, reflect.Invalid:\n\n\t\tif ct == nil {\n\t\t\treturn\n\t\t}\n\n\t\tif ct.typeof == typeOmitEmpty {\n\t\t\treturn\n\t\t}\n\n\t\tif ct.hasTag {\n\n\t\t\tif kind == reflect.Invalid {\n\n\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t&fieldError{\n\t\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\t\tactualTag: ct.tag,\n\t\t\t\t\t\tns: string(append(ns, cf.altName...)),\n\t\t\t\t\t\tstructNs: string(append(structNs, cf.name...)),\n\t\t\t\t\t\tfield: cf.altName,\n\t\t\t\t\t\tstructField: cf.name,\n\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t},\n\t\t\t\t)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tv.errs = append(v.errs,\n\t\t\t\t&fieldError{\n\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\tactualTag: ct.tag,\n\t\t\t\t\tns: string(append(ns, cf.altName...)),\n\t\t\t\t\tstructNs: string(append(structNs, cf.name...)),\n\t\t\t\t\tfield: cf.altName,\n\t\t\t\t\tstructField: cf.name,\n\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\tparam: ct.param,\n\t\t\t\t\tkind: kind,\n\t\t\t\t\ttyp: current.Type(),\n\t\t\t\t},\n\t\t\t)\n\n\t\t\treturn\n\t\t}\n\n\tcase reflect.Struct:\n\n\t\ttyp = current.Type()\n\n\t\tif typ != timeType {\n\n\t\t\tif ct != nil {\n\t\t\t\tct = ct.next\n\t\t\t}\n\n\t\t\tif ct != nil && ct.typeof == typeNoStructLevel {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ if len == 0 then validating using 'Var' or 'VarWithValue'\n\t\t\t\/\/ Var - doesn't make much sense to do it that way, should call 'Struct', but no harm...\n\t\t\t\/\/ VarWithField - this allows for validating against each field withing the struct against a specific value\n\t\t\t\/\/ pretty handly in certain situations\n\t\t\tif len(ns) > 0 {\n\t\t\t\tns = append(append(ns, cf.altName...), '.')\n\t\t\t\tstructNs = append(append(structNs, cf.name...), '.')\n\t\t\t}\n\n\t\t\tv.validateStruct(current, current, typ, ns, structNs, ct)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif !ct.hasTag {\n\t\treturn\n\t}\n\n\ttyp = current.Type()\n\nOUTER:\n\tfor {\n\t\tif ct == nil {\n\t\t\treturn\n\t\t}\n\n\t\tswitch ct.typeof {\n\n\t\tcase typeOmitEmpty:\n\n\t\t\t\/\/ set Field Level fields\n\t\t\tv.slflParent = parent\n\t\t\tv.flField = current\n\t\t\tv.flParam = \"\"\n\n\t\t\tif !nullable && !hasValue(v) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tct = ct.next\n\t\t\tcontinue\n\n\t\tcase typeDive:\n\n\t\t\tct = ct.next\n\n\t\t\t\/\/ traverse slice or map here\n\t\t\t\/\/ or panic ;)\n\t\t\tswitch kind {\n\t\t\tcase reflect.Slice, reflect.Array:\n\n\t\t\t\tvar i64 int64\n\t\t\t\treusableCF := &cField{}\n\n\t\t\t\tfor i := 0; i < current.Len(); i++ {\n\n\t\t\t\t\ti64 = int64(i)\n\n\t\t\t\t\tv.misc = append(v.misc[0:0], cf.name...)\n\t\t\t\t\tv.misc = append(v.misc, '[')\n\t\t\t\t\tv.misc = strconv.AppendInt(v.misc, i64, 10)\n\t\t\t\t\tv.misc = append(v.misc, ']')\n\n\t\t\t\t\treusableCF.name = string(v.misc)\n\n\t\t\t\t\tif cf.namesEqual {\n\t\t\t\t\t\treusableCF.altName = reusableCF.name\n\t\t\t\t\t} else {\n\n\t\t\t\t\t\tv.misc = append(v.misc[0:0], cf.altName...)\n\t\t\t\t\t\tv.misc = append(v.misc, '[')\n\t\t\t\t\t\tv.misc = strconv.AppendInt(v.misc, i64, 10)\n\t\t\t\t\t\tv.misc = append(v.misc, ']')\n\n\t\t\t\t\t\treusableCF.altName = string(v.misc)\n\t\t\t\t\t}\n\n\t\t\t\t\tv.traverseField(parent, current.Index(i), ns, structNs, reusableCF, ct)\n\t\t\t\t}\n\n\t\t\tcase reflect.Map:\n\n\t\t\t\tvar pv string\n\t\t\t\treusableCF := &cField{}\n\n\t\t\t\tfor _, key := range current.MapKeys() {\n\n\t\t\t\t\tpv = fmt.Sprintf(\"%v\", key.Interface())\n\n\t\t\t\t\tv.misc = append(v.misc[0:0], cf.name...)\n\t\t\t\t\tv.misc = append(v.misc, '[')\n\t\t\t\t\tv.misc = append(v.misc, pv...)\n\t\t\t\t\tv.misc = append(v.misc, ']')\n\n\t\t\t\t\treusableCF.name = string(v.misc)\n\n\t\t\t\t\tif cf.namesEqual {\n\t\t\t\t\t\treusableCF.altName = reusableCF.name\n\t\t\t\t\t} else {\n\t\t\t\t\t\tv.misc = append(v.misc[0:0], cf.altName...)\n\t\t\t\t\t\tv.misc = append(v.misc, '[')\n\t\t\t\t\t\tv.misc = append(v.misc, pv...)\n\t\t\t\t\t\tv.misc = append(v.misc, ']')\n\n\t\t\t\t\t\treusableCF.altName = string(v.misc)\n\t\t\t\t\t}\n\n\t\t\t\t\tv.traverseField(parent, current.MapIndex(key), ns, structNs, reusableCF, ct)\n\t\t\t\t}\n\n\t\t\tdefault:\n\t\t\t\t\/\/ throw error, if not a slice or map then should not have gotten here\n\t\t\t\t\/\/ bad dive tag\n\t\t\t\tpanic(\"dive error! can't dive on a non slice or map\")\n\t\t\t}\n\n\t\t\treturn\n\n\t\tcase typeOr:\n\n\t\t\tv.misc = v.misc[0:0]\n\n\t\t\tfor {\n\n\t\t\t\t\/\/ set Field Level fields\n\t\t\t\tv.slflParent = parent\n\t\t\t\tv.flField = current\n\t\t\t\tv.flParam = ct.param\n\n\t\t\t\tif ct.fn(v) {\n\n\t\t\t\t\t\/\/ drain rest of the 'or' values, then continue or leave\n\t\t\t\t\tfor {\n\n\t\t\t\t\t\tct = ct.next\n\n\t\t\t\t\t\tif ct == nil {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif ct.typeof != typeOr {\n\t\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tv.misc = append(v.misc, '|')\n\t\t\t\tv.misc = append(v.misc, ct.tag...)\n\n\t\t\t\tif ct.next == nil {\n\t\t\t\t\t\/\/ if we get here, no valid 'or' value and no more tags\n\n\t\t\t\t\tif ct.hasAlias {\n\n\t\t\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t\t\t&fieldError{\n\t\t\t\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\t\t\t\tactualTag: ct.actualAliasTag,\n\t\t\t\t\t\t\t\tns: string(append(ns, cf.altName...)),\n\t\t\t\t\t\t\t\tstructNs: string(append(structNs, cf.name...)),\n\t\t\t\t\t\t\t\tfield: cf.altName,\n\t\t\t\t\t\t\t\tstructField: cf.name,\n\t\t\t\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t\t\t\ttyp: typ,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t)\n\n\t\t\t\t\t} else {\n\n\t\t\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t\t\t&fieldError{\n\t\t\t\t\t\t\t\ttag: string(v.misc)[1:],\n\t\t\t\t\t\t\t\tactualTag: string(v.misc)[1:],\n\t\t\t\t\t\t\t\tns: string(append(ns, cf.altName...)),\n\t\t\t\t\t\t\t\tstructNs: string(append(structNs, cf.name...)),\n\t\t\t\t\t\t\t\tfield: cf.altName,\n\t\t\t\t\t\t\t\tstructField: cf.name,\n\t\t\t\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t\t\t\ttyp: typ,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tct = ct.next\n\t\t\t}\n\n\t\tdefault:\n\n\t\t\t\/\/ set Field Level fields\n\t\t\tv.slflParent = parent\n\t\t\tv.flField = current\n\t\t\tv.flParam = ct.param\n\n\t\t\tif !ct.fn(v) {\n\n\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t&fieldError{\n\t\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\t\tactualTag: ct.tag,\n\t\t\t\t\t\tns: string(append(ns, cf.altName...)),\n\t\t\t\t\t\tstructNs: string(append(structNs, cf.name...)),\n\t\t\t\t\t\tfield: cf.altName,\n\t\t\t\t\t\tstructField: cf.name,\n\t\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t\ttyp: typ,\n\t\t\t\t\t},\n\t\t\t\t)\n\n\t\t\t\treturn\n\n\t\t\t}\n\n\t\t\tct = ct.next\n\t\t}\n\t}\n\n}\n<commit_msg>avoid a couple of dive allocations<commit_after>package validator\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\n\/\/ per validate contruct\ntype validate struct {\n\tv *Validate\n\ttop reflect.Value\n\tns []byte\n\tactualNs []byte\n\terrs ValidationErrors\n\tisPartial bool\n\thasExcludes bool\n\tincludeExclude map[string]struct{} \/\/ reset only if StructPartial or StructExcept are called, no need otherwise\n\tmisc []byte\n\n\t\/\/ StructLevel & FieldLevel fields\n\tslflParent reflect.Value\n\tslCurrent reflect.Value\n\tflField reflect.Value\n\tflParam string\n}\n\n\/\/ parent and current will be the same the first run of validateStruct\nfunc (v *validate) validateStruct(parent reflect.Value, current reflect.Value, typ reflect.Type, ns []byte, structNs []byte, ct *cTag) {\n\n\tcs, ok := v.v.structCache.Get(typ)\n\tif !ok {\n\t\tcs = v.v.extractStructCache(current, typ.Name())\n\t}\n\n\tif len(ns) == 0 {\n\n\t\tns = append(ns, cs.name...)\n\t\tns = append(ns, '.')\n\n\t\tstructNs = append(structNs, cs.name...)\n\t\tstructNs = append(structNs, '.')\n\t}\n\n\t\/\/ ct is nil on top level struct, and structs as fields that have no tag info\n\t\/\/ so if nil or if not nil and the structonly tag isn't present\n\tif ct == nil || ct.typeof != typeStructOnly {\n\n\t\tfor _, f := range cs.fields {\n\n\t\t\tif v.isPartial {\n\n\t\t\t\t_, ok = v.includeExclude[string(append(structNs, f.name...))]\n\n\t\t\t\tif (ok && v.hasExcludes) || (!ok && !v.hasExcludes) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tv.traverseField(parent, current.Field(f.idx), ns, structNs, f, f.cTags)\n\t\t}\n\t}\n\n\t\/\/ check if any struct level validations, after all field validations already checked.\n\t\/\/ first iteration will have no info about nostructlevel tag, and is checked prior to\n\t\/\/ calling the next iteration of validateStruct called from traverseField.\n\tif cs.fn != nil {\n\n\t\tv.slflParent = parent\n\t\tv.slCurrent = current\n\t\tv.ns = ns\n\t\tv.actualNs = structNs\n\n\t\tcs.fn(v)\n\t}\n}\n\n\/\/ traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options\nfunc (v *validate) traverseField(parent reflect.Value, current reflect.Value, ns []byte, structNs []byte, cf *cField, ct *cTag) {\n\n\tvar typ reflect.Type\n\tvar kind reflect.Kind\n\tvar nullable bool\n\n\tcurrent, kind, nullable = v.extractTypeInternal(current, nullable)\n\n\tswitch kind {\n\tcase reflect.Ptr, reflect.Interface, reflect.Invalid:\n\n\t\tif ct == nil {\n\t\t\treturn\n\t\t}\n\n\t\tif ct.typeof == typeOmitEmpty {\n\t\t\treturn\n\t\t}\n\n\t\tif ct.hasTag {\n\n\t\t\tif kind == reflect.Invalid {\n\n\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t&fieldError{\n\t\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\t\tactualTag: ct.tag,\n\t\t\t\t\t\tns: string(append(ns, cf.altName...)),\n\t\t\t\t\t\tstructNs: string(append(structNs, cf.name...)),\n\t\t\t\t\t\tfield: cf.altName,\n\t\t\t\t\t\tstructField: cf.name,\n\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t},\n\t\t\t\t)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tv.errs = append(v.errs,\n\t\t\t\t&fieldError{\n\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\tactualTag: ct.tag,\n\t\t\t\t\tns: string(append(ns, cf.altName...)),\n\t\t\t\t\tstructNs: string(append(structNs, cf.name...)),\n\t\t\t\t\tfield: cf.altName,\n\t\t\t\t\tstructField: cf.name,\n\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\tparam: ct.param,\n\t\t\t\t\tkind: kind,\n\t\t\t\t\ttyp: current.Type(),\n\t\t\t\t},\n\t\t\t)\n\n\t\t\treturn\n\t\t}\n\n\tcase reflect.Struct:\n\n\t\ttyp = current.Type()\n\n\t\tif typ != timeType {\n\n\t\t\tif ct != nil {\n\t\t\t\tct = ct.next\n\t\t\t}\n\n\t\t\tif ct != nil && ct.typeof == typeNoStructLevel {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ if len == 0 then validating using 'Var' or 'VarWithValue'\n\t\t\t\/\/ Var - doesn't make much sense to do it that way, should call 'Struct', but no harm...\n\t\t\t\/\/ VarWithField - this allows for validating against each field withing the struct against a specific value\n\t\t\t\/\/ pretty handly in certain situations\n\t\t\tif len(ns) > 0 {\n\t\t\t\tns = append(append(ns, cf.altName...), '.')\n\t\t\t\tstructNs = append(append(structNs, cf.name...), '.')\n\t\t\t}\n\n\t\t\tv.validateStruct(current, current, typ, ns, structNs, ct)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif !ct.hasTag {\n\t\treturn\n\t}\n\n\ttyp = current.Type()\n\nOUTER:\n\tfor {\n\t\tif ct == nil {\n\t\t\treturn\n\t\t}\n\n\t\tswitch ct.typeof {\n\n\t\tcase typeOmitEmpty:\n\n\t\t\t\/\/ set Field Level fields\n\t\t\tv.slflParent = parent\n\t\t\tv.flField = current\n\t\t\tv.flParam = \"\"\n\n\t\t\tif !nullable && !hasValue(v) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tct = ct.next\n\t\t\tcontinue\n\n\t\tcase typeDive:\n\n\t\t\tct = ct.next\n\n\t\t\t\/\/ traverse slice or map here\n\t\t\t\/\/ or panic ;)\n\t\t\tswitch kind {\n\t\t\tcase reflect.Slice, reflect.Array:\n\n\t\t\t\tvar i64 int64\n\t\t\t\treusableCF := &cField{}\n\n\t\t\t\tfor i := 0; i < current.Len(); i++ {\n\n\t\t\t\t\ti64 = int64(i)\n\n\t\t\t\t\tv.misc = append(v.misc[0:0], cf.name...)\n\t\t\t\t\tv.misc = append(v.misc, '[')\n\t\t\t\t\tv.misc = strconv.AppendInt(v.misc, i64, 10)\n\t\t\t\t\tv.misc = append(v.misc, ']')\n\n\t\t\t\t\treusableCF.name = string(v.misc)\n\n\t\t\t\t\tif cf.namesEqual {\n\t\t\t\t\t\treusableCF.altName = reusableCF.name\n\t\t\t\t\t} else {\n\n\t\t\t\t\t\tv.misc = append(v.misc[0:0], cf.altName...)\n\t\t\t\t\t\tv.misc = append(v.misc, '[')\n\t\t\t\t\t\tv.misc = strconv.AppendInt(v.misc, i64, 10)\n\t\t\t\t\t\tv.misc = append(v.misc, ']')\n\n\t\t\t\t\t\treusableCF.altName = string(v.misc)\n\t\t\t\t\t}\n\n\t\t\t\t\tv.traverseField(parent, current.Index(i), ns, structNs, reusableCF, ct)\n\t\t\t\t}\n\n\t\t\tcase reflect.Map:\n\n\t\t\t\tvar pv string\n\t\t\t\treusableCF := &cField{}\n\n\t\t\t\tfor _, key := range current.MapKeys() {\n\n\t\t\t\t\tpv = fmt.Sprintf(\"%v\", key.Interface())\n\n\t\t\t\t\tv.misc = append(v.misc[0:0], cf.name...)\n\t\t\t\t\tv.misc = append(v.misc, '[')\n\t\t\t\t\tv.misc = append(v.misc, pv...)\n\t\t\t\t\tv.misc = append(v.misc, ']')\n\n\t\t\t\t\treusableCF.name = string(v.misc)\n\n\t\t\t\t\tif cf.namesEqual {\n\t\t\t\t\t\treusableCF.altName = reusableCF.name\n\t\t\t\t\t} else {\n\t\t\t\t\t\tv.misc = append(v.misc[0:0], cf.altName...)\n\t\t\t\t\t\tv.misc = append(v.misc, '[')\n\t\t\t\t\t\tv.misc = append(v.misc, pv...)\n\t\t\t\t\t\tv.misc = append(v.misc, ']')\n\n\t\t\t\t\t\treusableCF.altName = string(v.misc)\n\t\t\t\t\t}\n\n\t\t\t\t\tv.traverseField(parent, current.MapIndex(key), ns, structNs, reusableCF, ct)\n\t\t\t\t}\n\n\t\t\tdefault:\n\t\t\t\t\/\/ throw error, if not a slice or map then should not have gotten here\n\t\t\t\t\/\/ bad dive tag\n\t\t\t\tpanic(\"dive error! can't dive on a non slice or map\")\n\t\t\t}\n\n\t\t\treturn\n\n\t\tcase typeOr:\n\n\t\t\tv.misc = v.misc[0:0]\n\n\t\t\tfor {\n\n\t\t\t\t\/\/ set Field Level fields\n\t\t\t\tv.slflParent = parent\n\t\t\t\tv.flField = current\n\t\t\t\tv.flParam = ct.param\n\n\t\t\t\tif ct.fn(v) {\n\n\t\t\t\t\t\/\/ drain rest of the 'or' values, then continue or leave\n\t\t\t\t\tfor {\n\n\t\t\t\t\t\tct = ct.next\n\n\t\t\t\t\t\tif ct == nil {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif ct.typeof != typeOr {\n\t\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tv.misc = append(v.misc, '|')\n\t\t\t\tv.misc = append(v.misc, ct.tag...)\n\n\t\t\t\tif ct.next == nil {\n\t\t\t\t\t\/\/ if we get here, no valid 'or' value and no more tags\n\n\t\t\t\t\tif ct.hasAlias {\n\n\t\t\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t\t\t&fieldError{\n\t\t\t\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\t\t\t\tactualTag: ct.actualAliasTag,\n\t\t\t\t\t\t\t\tns: string(append(ns, cf.altName...)),\n\t\t\t\t\t\t\t\tstructNs: string(append(structNs, cf.name...)),\n\t\t\t\t\t\t\t\tfield: cf.altName,\n\t\t\t\t\t\t\t\tstructField: cf.name,\n\t\t\t\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t\t\t\ttyp: typ,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t)\n\n\t\t\t\t\t} else {\n\n\t\t\t\t\t\ttVal := string(v.misc)[1:]\n\n\t\t\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t\t\t&fieldError{\n\t\t\t\t\t\t\t\ttag: tVal,\n\t\t\t\t\t\t\t\tactualTag: tVal,\n\t\t\t\t\t\t\t\tns: string(append(ns, cf.altName...)),\n\t\t\t\t\t\t\t\tstructNs: string(append(structNs, cf.name...)),\n\t\t\t\t\t\t\t\tfield: cf.altName,\n\t\t\t\t\t\t\t\tstructField: cf.name,\n\t\t\t\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t\t\t\ttyp: typ,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tct = ct.next\n\t\t\t}\n\n\t\tdefault:\n\n\t\t\t\/\/ set Field Level fields\n\t\t\tv.slflParent = parent\n\t\t\tv.flField = current\n\t\t\tv.flParam = ct.param\n\n\t\t\tif !ct.fn(v) {\n\n\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t&fieldError{\n\t\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\t\tactualTag: ct.tag,\n\t\t\t\t\t\tns: string(append(ns, cf.altName...)),\n\t\t\t\t\t\tstructNs: string(append(structNs, cf.name...)),\n\t\t\t\t\t\tfield: cf.altName,\n\t\t\t\t\t\tstructField: cf.name,\n\t\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t\ttyp: typ,\n\t\t\t\t\t},\n\t\t\t\t)\n\n\t\t\t\treturn\n\n\t\t\t}\n\n\t\t\tct = ct.next\n\t\t}\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package inventory\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"github.com\/lflux\/eve-sdeloader\/utils\"\n)\n\nvar (\n\tinvCols = []string{\n\t\t\"typeid\",\n\t\t\"groupid\",\n\t\t\"typename\",\n\t\t\"description\",\n\t\t\"mass\",\n\t\t\"volume\",\n\t\t\"capacity\",\n\t\t\"portionsize\",\n\t\t\"raceid\",\n\t\t\"published\",\n\t\t\"marketgroupid\",\n\t\t\"graphicid\",\n\t\t\"iconid\",\n\t\t\"soundid\",\n\t}\n)\n\nvar TypeIDs map[string]*Type\n\nfunc InsertBonuses(stmt, insertTranslations *sql.Stmt, typeID string, skillID int64, bonuses []Bonus) error {\n\tfor _, bonus := range bonuses {\n\t\tvar traitID int\n\t\terr := stmt.QueryRow(typeID,\n\t\t\tskillID,\n\t\t\tbonus.Amount,\n\t\t\tbonus.UnitID,\n\t\t\tbonus.BonusText[\"en\"]).Scan(&traitID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor lang, val := range bonus.BonusText {\n\t\t\t_, err = insertTranslations.Exec(1002, traitID, lang, val)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc InsertInvTypeStatement(tx *sql.Tx) (*sql.Stmt, error) {\n\t\/\/ TODO investigate if we can perform multiple CopyIn in the same transaction\n\t\/\/ return txn.Prepare(pq.CopyIn(\"invtypes\", invCols...))\n\n\treturn tx.Prepare(fmt.Sprintf(`INSERT INTO invtypes (%s) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`,\n\t\tstrings.Join(invCols, \",\")))\n}\n\nfunc InsertCertMasteryStatement(tx *sql.Tx) (*sql.Stmt, error) {\n\treturn tx.Prepare(`INSERT INTO certmasteries (typeid, masterylevel, certid) VALUES ($1, $2, $3)`)\n}\n\nfunc InsertTraitStatement(tx *sql.Tx) (*sql.Stmt, error) {\n\treturn tx.Prepare(`INSERT INTO invtraits (typeid, skillid, bonus, unitid, bonustext) VALUES ($1, $2, $3, $4, $5) RETURNING traitid`)\n}\n\n\/\/ Import imports from a reader containing typeID YAML to the table `invtypes`\nfunc Import(db *sql.DB, r io.Reader) error {\n\tentries := make(map[string]*Type)\n\n\terr := utils.LoadFromReader(r, entries)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tTypeIDs = entries\n\n\ttxn, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinvStmt, err := InsertInvTypeStatement(txn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmastStmt, err := InsertCertMasteryStatement(txn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttraitStmt, err := InsertTraitStatement(txn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinsertTranslations, err := utils.InsertTrnTranslations(txn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor typeID, entry := range entries {\n\t\tvals := []interface{}{\n\t\t\ttypeID,\n\t\t\tentry.GroupID,\n\t\t\tentry.Name[\"en\"],\n\t\t\tentry.Description[\"en\"],\n\t\t\tentry.Mass,\n\t\t\tentry.Volume,\n\t\t\tentry.Capacity,\n\t\t\tentry.PortionSize,\n\t\t\tentry.RaceID,\n\t\t\tentry.Published,\n\t\t\tentry.MarketGroupID,\n\t\t\tentry.GraphicID,\n\t\t\tentry.IconID,\n\t\t\tentry.SoundID,\n\t\t}\n\t\t_, err = invStmt.Exec(vals...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor level, masteries := range entry.Masteries {\n\t\t\tfor _, certID := range masteries {\n\t\t\t\t_, err = mastStmt.Exec(typeID, level, certID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif entry.Traits != nil {\n\t\t\tfor skill, typeBonus := range entry.Traits.Types {\n\t\t\t\terr = InsertBonuses(traitStmt, insertTranslations, typeID, skill, typeBonus)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\terr = InsertBonuses(traitStmt, insertTranslations, typeID, -1, entry.Traits.RoleBonuses)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t}\n\n\t\tif len(entry.Name) > 0 {\n\t\t\tfor lang, val := range entry.Name {\n\t\t\t\t_, err = insertTranslations.Exec(8, typeID, lang, val)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(entry.Description) > 0 {\n\t\t\tfor lang, val := range entry.Description {\n\t\t\t\t_, err = insertTranslations.Exec(33, typeID, lang, val)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\terr = invStmt.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = mastStmt.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = traitStmt.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn txn.Commit()\n}\n<commit_msg>Import baseprice for inventory.<commit_after>package inventory\n\nimport (\n\t\"database\/sql\"\n\t\"io\"\n\n\t\"github.com\/lflux\/eve-sdeloader\/utils\"\n)\n\nvar TypeIDs map[string]*Type\n\nfunc InsertBonuses(stmt, insertTranslations *sql.Stmt, typeID string, skillID int64, bonuses []Bonus) error {\n\tfor _, bonus := range bonuses {\n\t\tvar traitID int\n\t\terr := stmt.QueryRow(typeID,\n\t\t\tskillID,\n\t\t\tbonus.Amount,\n\t\t\tbonus.UnitID,\n\t\t\tbonus.BonusText[\"en\"]).Scan(&traitID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor lang, val := range bonus.BonusText {\n\t\t\t_, err = insertTranslations.Exec(1002, traitID, lang, val)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc InsertInvTypeStatement(tx *sql.Tx) (*sql.Stmt, error) {\n\tstmt := `INSERT INTO invtypes (\n\ttypeid,\n\tgroupid,\n\ttypename,\n\tdescription,\n\tmass,\n\tvolume,\n\tcapacity,\n\tportionsize,\n\traceid,\n\tbaseprice,\n\tpublished,\n\tmarketgroupid,\n\tgraphicid,\n\ticonid,\n\tsoundid\n) VALUES (\n\t$1, $2, $3, $4, $5,\n\t$6, $7, $8, $9, $10,\n\t$11, $12, $13, $14, $15)`\n\n\treturn tx.Prepare(stmt)\n}\n\nfunc InsertCertMasteryStatement(tx *sql.Tx) (*sql.Stmt, error) {\n\treturn tx.Prepare(`INSERT INTO certmasteries (typeid, masterylevel, certid) VALUES ($1, $2, $3)`)\n}\n\nfunc InsertTraitStatement(tx *sql.Tx) (*sql.Stmt, error) {\n\treturn tx.Prepare(`INSERT INTO invtraits (typeid, skillid, bonus, unitid, bonustext) VALUES ($1, $2, $3, $4, $5) RETURNING traitid`)\n}\n\n\/\/ Import imports from a reader containing typeID YAML to the table `invtypes`\nfunc Import(db *sql.DB, r io.Reader) error {\n\tentries := make(map[string]*Type)\n\n\terr := utils.LoadFromReader(r, entries)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tTypeIDs = entries\n\n\ttxn, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinvStmt, err := InsertInvTypeStatement(txn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmastStmt, err := InsertCertMasteryStatement(txn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttraitStmt, err := InsertTraitStatement(txn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinsertTranslations, err := utils.InsertTrnTranslations(txn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor typeID, entry := range entries {\n\t\tvals := []interface{}{\n\t\t\ttypeID,\n\t\t\tentry.GroupID,\n\t\t\tentry.Name[\"en\"],\n\t\t\tentry.Description[\"en\"],\n\t\t\tentry.Mass,\n\t\t\tentry.Volume,\n\t\t\tentry.Capacity,\n\t\t\tentry.PortionSize,\n\t\t\tentry.RaceID,\n\t\t\tentry.BasePrice,\n\t\t\tentry.Published,\n\t\t\tentry.MarketGroupID,\n\t\t\tentry.GraphicID,\n\t\t\tentry.IconID,\n\t\t\tentry.SoundID,\n\t\t}\n\t\t_, err = invStmt.Exec(vals...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor level, masteries := range entry.Masteries {\n\t\t\tfor _, certID := range masteries {\n\t\t\t\t_, err = mastStmt.Exec(typeID, level, certID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif entry.Traits != nil {\n\t\t\tfor skill, typeBonus := range entry.Traits.Types {\n\t\t\t\terr = InsertBonuses(traitStmt, insertTranslations, typeID, skill, typeBonus)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\terr = InsertBonuses(traitStmt, insertTranslations, typeID, -1, entry.Traits.RoleBonuses)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif len(entry.Name) > 0 {\n\t\t\tfor lang, val := range entry.Name {\n\t\t\t\t_, err = insertTranslations.Exec(8, typeID, lang, val)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(entry.Description) > 0 {\n\t\t\tfor lang, val := range entry.Description {\n\t\t\t\t_, err = insertTranslations.Exec(33, typeID, lang, val)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\terr = invStmt.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = mastStmt.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = traitStmt.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn txn.Commit()\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage app\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/mount-utils\"\n\n\t\"k8s.io\/kubernetes\/pkg\/util\/sysctl\"\n)\n\n\/\/ Conntracker is an interface to the global sysctl. Descriptions of the various\n\/\/ sysctl fields can be found here:\n\/\/\n\/\/ https:\/\/www.kernel.org\/doc\/Documentation\/networking\/nf_conntrack-sysctl.txt\ntype Conntracker interface {\n\t\/\/ SetMax adjusts nf_conntrack_max.\n\tSetMax(max int) error\n\t\/\/ SetTCPEstablishedTimeout adjusts nf_conntrack_tcp_timeout_established.\n\tSetTCPEstablishedTimeout(seconds int) error\n\t\/\/ SetTCPCloseWaitTimeout nf_conntrack_tcp_timeout_close_wait.\n\tSetTCPCloseWaitTimeout(seconds int) error\n}\n\ntype realConntracker struct{}\n\nvar errReadOnlySysFS = errors.New(\"readOnlySysFS\")\n\nfunc (rct realConntracker) SetMax(max int) error {\n\tif err := rct.setIntSysCtl(\"nf_conntrack_max\", max); err != nil {\n\t\treturn err\n\t}\n\tklog.InfoS(\"Setting nf_conntrack_max\", \"nf_conntrack_max\", max)\n\n\t\/\/ Linux does not support writing to \/sys\/module\/nf_conntrack\/parameters\/hashsize\n\t\/\/ when the writer process is not in the initial network namespace\n\t\/\/ (https:\/\/github.com\/torvalds\/linux\/blob\/v4.10\/net\/netfilter\/nf_conntrack_core.c#L1795-L1796).\n\t\/\/ Usually that's fine. But in some configurations such as with github.com\/kinvolk\/kubeadm-nspawn,\n\t\/\/ kube-proxy is in another netns.\n\t\/\/ Therefore, check if writing in hashsize is necessary and skip the writing if not.\n\thashsize, err := readIntStringFile(\"\/sys\/module\/nf_conntrack\/parameters\/hashsize\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif hashsize >= (max \/ 4) {\n\t\treturn nil\n\t}\n\n\t\/\/ sysfs is expected to be mounted as 'rw'. However, it may be\n\t\/\/ unexpectedly mounted as 'ro' by docker because of a known docker\n\t\/\/ issue (https:\/\/github.com\/docker\/docker\/issues\/24000). Setting\n\t\/\/ conntrack will fail when sysfs is readonly. When that happens, we\n\t\/\/ don't set conntrack hashsize and return a special error\n\t\/\/ errReadOnlySysFS here. The caller should deal with\n\t\/\/ errReadOnlySysFS differently.\n\twritable, err := isSysFSWritable()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !writable {\n\t\treturn errReadOnlySysFS\n\t}\n\t\/\/ TODO: generify this and sysctl to a new sysfs.WriteInt()\n\tklog.InfoS(\"Setting conntrack hashsize\", \"conntrack hashsize\", max\/4)\n\treturn writeIntStringFile(\"\/sys\/module\/nf_conntrack\/parameters\/hashsize\", max\/4)\n}\n\nfunc (rct realConntracker) SetTCPEstablishedTimeout(seconds int) error {\n\treturn rct.setIntSysCtl(\"nf_conntrack_tcp_timeout_established\", seconds)\n}\n\nfunc (rct realConntracker) SetTCPCloseWaitTimeout(seconds int) error {\n\treturn rct.setIntSysCtl(\"nf_conntrack_tcp_timeout_close_wait\", seconds)\n}\n\nfunc (realConntracker) setIntSysCtl(name string, value int) error {\n\tentry := \"net\/netfilter\/\" + name\n\n\tsys := sysctl.New()\n\tif val, _ := sys.GetSysctl(entry); val != value {\n\t\tklog.InfoS(\"Set sysctl\", \"entry\", entry, \"value\", value)\n\t\tif err := sys.SetSysctl(entry, value); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ isSysFSWritable checks \/proc\/mounts to see whether sysfs is 'rw' or not.\nfunc isSysFSWritable() (bool, error) {\n\tconst permWritable = \"rw\"\n\tconst sysfsDevice = \"sysfs\"\n\tm := mount.New(\"\" \/* default mount path *\/)\n\tmountPoints, err := m.List()\n\tif err != nil {\n\t\tklog.ErrorS(err, \"Failed to list mount points\")\n\t\treturn false, err\n\t}\n\n\tfor _, mountPoint := range mountPoints {\n\t\tif mountPoint.Type != sysfsDevice {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Check whether sysfs is 'rw'\n\t\tif len(mountPoint.Opts) > 0 && mountPoint.Opts[0] == permWritable {\n\t\t\treturn true, nil\n\t\t}\n\t\tklog.ErrorS(nil, \"Sysfs is not writable\", \"mountPoint\", mountPoint, \"mountOptions\", mountPoint.Opts)\n\t\treturn false, errReadOnlySysFS\n\t}\n\n\treturn false, errors.New(\"no sysfs mounted\")\n}\n\nfunc readIntStringFile(filename string) (int, error) {\n\tb, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\treturn strconv.Atoi(strings.TrimSpace(string(b)))\n}\n\nfunc writeIntStringFile(filename string, value int) error {\n\treturn ioutil.WriteFile(filename, []byte(strconv.Itoa(value)), 0640)\n}\n<commit_msg>Do not attempt to overwrite higher system (sysctl) values<commit_after>\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage app\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/mount-utils\"\n\n\t\"k8s.io\/kubernetes\/pkg\/util\/sysctl\"\n)\n\n\/\/ Conntracker is an interface to the global sysctl. Descriptions of the various\n\/\/ sysctl fields can be found here:\n\/\/\n\/\/ https:\/\/www.kernel.org\/doc\/Documentation\/networking\/nf_conntrack-sysctl.txt\ntype Conntracker interface {\n\t\/\/ SetMax adjusts nf_conntrack_max.\n\tSetMax(max int) error\n\t\/\/ SetTCPEstablishedTimeout adjusts nf_conntrack_tcp_timeout_established.\n\tSetTCPEstablishedTimeout(seconds int) error\n\t\/\/ SetTCPCloseWaitTimeout nf_conntrack_tcp_timeout_close_wait.\n\tSetTCPCloseWaitTimeout(seconds int) error\n}\n\ntype realConntracker struct{}\n\nvar errReadOnlySysFS = errors.New(\"readOnlySysFS\")\n\nfunc (rct realConntracker) SetMax(max int) error {\n\tif err := rct.setIntSysCtl(\"nf_conntrack_max\", max); err != nil {\n\t\treturn err\n\t}\n\tklog.InfoS(\"Setting nf_conntrack_max\", \"nf_conntrack_max\", max)\n\n\t\/\/ Linux does not support writing to \/sys\/module\/nf_conntrack\/parameters\/hashsize\n\t\/\/ when the writer process is not in the initial network namespace\n\t\/\/ (https:\/\/github.com\/torvalds\/linux\/blob\/v4.10\/net\/netfilter\/nf_conntrack_core.c#L1795-L1796).\n\t\/\/ Usually that's fine. But in some configurations such as with github.com\/kinvolk\/kubeadm-nspawn,\n\t\/\/ kube-proxy is in another netns.\n\t\/\/ Therefore, check if writing in hashsize is necessary and skip the writing if not.\n\thashsize, err := readIntStringFile(\"\/sys\/module\/nf_conntrack\/parameters\/hashsize\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif hashsize >= (max \/ 4) {\n\t\treturn nil\n\t}\n\n\t\/\/ sysfs is expected to be mounted as 'rw'. However, it may be\n\t\/\/ unexpectedly mounted as 'ro' by docker because of a known docker\n\t\/\/ issue (https:\/\/github.com\/docker\/docker\/issues\/24000). Setting\n\t\/\/ conntrack will fail when sysfs is readonly. When that happens, we\n\t\/\/ don't set conntrack hashsize and return a special error\n\t\/\/ errReadOnlySysFS here. The caller should deal with\n\t\/\/ errReadOnlySysFS differently.\n\twritable, err := isSysFSWritable()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !writable {\n\t\treturn errReadOnlySysFS\n\t}\n\t\/\/ TODO: generify this and sysctl to a new sysfs.WriteInt()\n\tklog.InfoS(\"Setting conntrack hashsize\", \"conntrack hashsize\", max\/4)\n\treturn writeIntStringFile(\"\/sys\/module\/nf_conntrack\/parameters\/hashsize\", max\/4)\n}\n\nfunc (rct realConntracker) SetTCPEstablishedTimeout(seconds int) error {\n\treturn rct.setIntSysCtl(\"nf_conntrack_tcp_timeout_established\", seconds)\n}\n\nfunc (rct realConntracker) SetTCPCloseWaitTimeout(seconds int) error {\n\treturn rct.setIntSysCtl(\"nf_conntrack_tcp_timeout_close_wait\", seconds)\n}\n\nfunc (realConntracker) setIntSysCtl(name string, value int) error {\n\tentry := \"net\/netfilter\/\" + name\n\n\tsys := sysctl.New()\n\tif val, _ := sys.GetSysctl(entry); val != value && val < value {\n\t\tklog.InfoS(\"Set sysctl\", \"entry\", entry, \"value\", value)\n\t\tif err := sys.SetSysctl(entry, value); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ isSysFSWritable checks \/proc\/mounts to see whether sysfs is 'rw' or not.\nfunc isSysFSWritable() (bool, error) {\n\tconst permWritable = \"rw\"\n\tconst sysfsDevice = \"sysfs\"\n\tm := mount.New(\"\" \/* default mount path *\/)\n\tmountPoints, err := m.List()\n\tif err != nil {\n\t\tklog.ErrorS(err, \"Failed to list mount points\")\n\t\treturn false, err\n\t}\n\n\tfor _, mountPoint := range mountPoints {\n\t\tif mountPoint.Type != sysfsDevice {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Check whether sysfs is 'rw'\n\t\tif len(mountPoint.Opts) > 0 && mountPoint.Opts[0] == permWritable {\n\t\t\treturn true, nil\n\t\t}\n\t\tklog.ErrorS(nil, \"Sysfs is not writable\", \"mountPoint\", mountPoint, \"mountOptions\", mountPoint.Opts)\n\t\treturn false, errReadOnlySysFS\n\t}\n\n\treturn false, errors.New(\"no sysfs mounted\")\n}\n\nfunc readIntStringFile(filename string) (int, error) {\n\tb, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\treturn strconv.Atoi(strings.TrimSpace(string(b)))\n}\n\nfunc writeIntStringFile(filename string, value int) error {\n\treturn ioutil.WriteFile(filename, []byte(strconv.Itoa(value)), 0640)\n}\n<|endoftext|>"} {"text":"<commit_before>package wfe\n\nimport (\n\t\"bytes\"\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\tmrand \"math\/rand\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/letsencrypt\/boulder\/Godeps\/_workspace\/src\/github.com\/letsencrypt\/go-jose\"\n\t\"github.com\/letsencrypt\/boulder\/cmd\/load-generator\/latency\"\n)\n\ntype RatePeriod struct {\n\tFor time.Duration\n\tRate int64\n}\n\ntype registration struct {\n\tkey *rsa.PrivateKey\n\tsigner jose.Signer\n\tiMu *sync.RWMutex\n\tauths []string\n\tcerts []string\n}\n\ntype State struct {\n\trMu *sync.RWMutex\n\tregs []*registration\n\tmaxRegs int\n\tclient *http.Client\n\tapiBase string\n\ttermsURL string\n\n\twarmupRegs int\n\twarmupRate int\n\n\trealIP string\n\n\tnMu *sync.RWMutex\n\tnoncePool []string\n\n\tthroughput int64\n\n\tchallRPCAddr string\n\n\tcertKey *rsa.PrivateKey\n\tdomainBase string\n\n\tcallLatency *latency.Map\n\n\truntime time.Duration\n\n\tchallSrvProc *os.Process\n\n\twg *sync.WaitGroup\n\n\trunPlan []RatePeriod\n}\n\ntype rawRegistration struct {\n\tCerts []string `json:\"certs\"`\n\tAuths []string `json:\"auths\"`\n\tRawKey []byte `json:\"rawKey\"`\n}\n\ntype snapshot struct {\n\tRegistrations []rawRegistration\n}\n\nfunc (s *State) Snapshot() ([]byte, error) {\n\ts.rMu.Lock()\n\tdefer s.rMu.Unlock()\n\tsnap := snapshot{}\n\trawRegs := []rawRegistration{}\n\tfor _, r := range s.regs {\n\t\trawRegs = append(rawRegs, rawRegistration{\n\t\t\tCerts: r.certs,\n\t\t\tAuths: r.auths,\n\t\t\tRawKey: x509.MarshalPKCS1PrivateKey(r.key),\n\t\t})\n\t}\n\treturn json.Marshal(snap)\n}\n\nfunc (s *State) Restore(content []byte) error {\n\ts.rMu.Lock()\n\tdefer s.rMu.Unlock()\n\tsnap := snapshot{}\n\terr := json.Unmarshal(content, &snap)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, r := range snap.Registrations {\n\t\tkey, err := x509.ParsePKCS1PrivateKey(r.RawKey)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tsigner, err := jose.NewSigner(jose.RS256, key)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ts.regs = append(s.regs, ®istration{\n\t\t\tkey: key,\n\t\t\tsigner: signer,\n\t\t\tcerts: r.Certs,\n\t\t\tauths: r.Auths,\n\t\t})\n\t}\n\treturn nil\n}\n\nfunc New(rpcAddr string, apiBase string, rate int, keySize int, domainBase string, runtime time.Duration, termsURL string, realIP string, runPlan []RatePeriod, maxRegs, warmupRegs, warmupRate int) (*State, error) {\n\tcertKey, err := rsa.GenerateKey(rand.Reader, keySize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: (&net.Dialer{\n\t\t\t\tTimeout: 5 * time.Second,\n\t\t\t\tKeepAlive: 0,\n\t\t\t}).Dial,\n\t\t\tTLSHandshakeTimeout: 5 * time.Second,\n\t\t\tDisableKeepAlives: true,\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: true,\n\t\t\t},\n\t\t},\n\t}\n\treturn &State{\n\t\trMu: new(sync.RWMutex),\n\t\tnMu: new(sync.RWMutex),\n\t\tchallRPCAddr: rpcAddr,\n\t\tclient: client,\n\t\tapiBase: apiBase,\n\t\tthroughput: int64(rate),\n\t\tcertKey: certKey,\n\t\tdomainBase: domainBase,\n\t\tcallLatency: latency.New(fmt.Sprintf(\"WFE -- %s test at %d base actions \/ second\", runtime, rate)),\n\t\truntime: runtime,\n\t\ttermsURL: termsURL,\n\t\twg: new(sync.WaitGroup),\n\t\trealIP: realIP,\n\t\trunPlan: runPlan,\n\t\tmaxRegs: maxRegs,\n\t\twarmupRate: warmupRate,\n\t\twarmupRegs: warmupRegs,\n\t}, nil\n}\n\nfunc (s *State) executePlan() {\n\tfor _, p := range s.runPlan {\n\t\tatomic.StoreInt64(&s.throughput, p.Rate)\n\t\tfmt.Printf(\"Set base action rate to %d\/s for %s\\n\", p.Rate, p.For)\n\t\ttime.Sleep(p.For)\n\t}\n}\n\nfunc (s *State) warmup() {\n\tfmt.Printf(\"Beginning warmup, generating %d registrations at %d\/s\\n\", s.warmupRegs, s.warmupRate)\n\twg := new(sync.WaitGroup)\n\tfor {\n\t\ts.rMu.RLock()\n\t\tif len(s.regs) >= s.warmupRegs {\n\t\t\tbreak\n\t\t}\n\t\ts.rMu.RUnlock()\n\n\t\tgo func() {\n\t\t\twg.Add(1)\n\t\t\ts.newRegistration(nil)\n\t\t\twg.Done()\n\t\t}()\n\t\ttime.Sleep(time.Second \/ time.Duration(s.warmupRate))\n\t}\n\twg.Wait()\n\tfmt.Println(\"Finished warming up\")\n}\n\nfunc (s *State) Run(binName string, dontRunChallSrv bool, httpOneAddr string) error {\n\ts.callLatency.Started = time.Now()\n\t\/\/ If warmup, warmup\n\tif s.warmupRegs > 0 {\n\t\ts.warmup()\n\t}\n\n\t\/\/ Start chall server process\n\tif !dontRunChallSrv {\n\t\tcmd := exec.Command(binName, \"chall-srv\", \"--rpcAddr=\"+s.challRPCAddr, \"--httpOneAddr=\"+httpOneAddr)\n\t\terr := cmd.Start()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.challSrvProc = cmd.Process\n\t}\n\n\t\/\/ If their is a run plan execute it\n\tif len(s.runPlan) > 0 {\n\t\tgo s.executePlan()\n\t}\n\n\t\/\/ Run sending loop\n\tstop := make(chan bool, 1)\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-stop:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ts.wg.Add(1)\n\t\t\t\tgo s.sendCall()\n\t\t\t\ttime.Sleep(time.Duration(time.Second.Nanoseconds() \/ atomic.LoadInt64(&s.throughput)))\n\t\t\t}\n\t\t}\n\t}()\n\n\ttime.Sleep(s.runtime)\n\tfmt.Println(\"READ END\")\n\tstop <- true\n\tfmt.Println(\"SENT STOP\")\n\ts.wg.Wait()\n\tfmt.Println(\"KILLING CHALL SERVER\")\n\terr := s.challSrvProc.Kill()\n\tif err != nil {\n\t\tfmt.Printf(\"Error killing challenge server: %s\\n\", err)\n\t}\n\tfmt.Println(\"ALL DONE\")\n\ts.callLatency.Stopped = time.Now()\n\treturn nil\n}\n\nfunc (s *State) Dump(jsonPath string) error {\n\tif jsonPath != \"\" {\n\t\tdata, err := json.Marshal(s.callLatency)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = ioutil.WriteFile(jsonPath, data, os.ModePerm)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ HTTP utils\n\nfunc (s *State) post(endpoint string, payload []byte) (*http.Response, error) {\n\treq, err := http.NewRequest(\"POST\", endpoint, bytes.NewBuffer(payload))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"X-Real-IP\", s.realIP)\n\tresp, err := s.client.Do(req)\n\tif resp != nil {\n\t\tif newNonce := resp.Header.Get(\"Replay-Nonce\"); newNonce != \"\" {\n\t\t\ts.addNonce(newNonce)\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}\n\n\/\/ Nonce utils, these methods are used to generate\/store\/retrieve the nonces\n\/\/ required for the required form of JWS\n\nfunc (s *State) signWithNonce(endpoint string, alwaysNew bool, payload []byte, signer jose.Signer) ([]byte, error) {\n\tnonce, err := s.getNonce(endpoint, alwaysNew)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tjws, err := signer.Sign(payload, nonce)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(jws)\n}\n\nfunc (s *State) getNonce(from string, alwaysNew bool) (string, error) {\n\ts.nMu.RLock()\n\tif len(s.noncePool) == 0 || alwaysNew {\n\t\ts.nMu.RUnlock()\n\t\tstarted := time.Now()\n\t\tresp, err := s.client.Head(fmt.Sprintf(\"%s%s\", s.apiBase, from))\n\t\tfinished := time.Now()\n\t\tstate := \"good\"\n\t\tdefer func() { s.callLatency.Add(fmt.Sprintf(\"HEAD %s\", from), started, finished, state) }()\n\t\tif err != nil {\n\t\t\tstate = \"error\"\n\t\t\treturn \"\", err\n\t\t}\n\t\tif nonce := resp.Header.Get(\"Replay-Nonce\"); nonce != \"\" {\n\t\t\treturn nonce, nil\n\t\t}\n\t\tstate = \"error\"\n\t\treturn \"\", fmt.Errorf(\"Nonce header not supplied!\")\n\t}\n\ts.nMu.RUnlock()\n\ts.nMu.Lock()\n\tdefer s.nMu.Unlock()\n\tnonce := s.noncePool[0]\n\ts.noncePool = s.noncePool[1:]\n\treturn nonce, nil\n}\n\nfunc (s *State) addNonce(nonce string) {\n\ts.nMu.Lock()\n\tdefer s.nMu.Unlock()\n\ts.noncePool = append(s.noncePool, nonce)\n}\n\n\/\/ Reg object utils, used to add and randomly retrieve registration objects\n\nfunc (s *State) addReg(reg *registration) {\n\ts.rMu.Lock()\n\tdefer s.rMu.Unlock()\n\ts.regs = append(s.regs, reg)\n}\n\nfunc (s *State) getRandReg() (*registration, bool) {\n\tregsLength := len(s.regs)\n\tif regsLength == 0 {\n\t\treturn nil, false\n\t}\n\treturn s.regs[mrand.Intn(regsLength)], true\n}\n\nfunc (s *State) getReg() (*registration, bool) {\n\ts.rMu.RLock()\n\tdefer s.rMu.RUnlock()\n\treturn s.getRandReg()\n}\n\n\/\/ Call sender, it sends the calls!\n\ntype probabilityProfile struct {\n\tprob int\n\taction func(*registration)\n}\n\nfunc weightedCall(setup []probabilityProfile) func(*registration) {\n\tchoices := make(map[int]func(*registration))\n\tn := 0\n\tfor _, pp := range setup {\n\t\tfor i := 0; i < pp.prob; i++ {\n\t\t\tchoices[i+n] = pp.action\n\t\t}\n\t\tn += pp.prob\n\t}\n\tif len(choices) == 0 {\n\t\treturn nil\n\t}\n\n\treturn choices[mrand.Intn(n)]\n}\n\nfunc (s *State) sendCall() {\n\tactionList := []probabilityProfile{}\n\ts.rMu.RLock()\n\tif len(s.regs) < s.maxRegs {\n\t\tactionList = append(actionList, probabilityProfile{1, s.newRegistration})\n\t}\n\ts.rMu.RUnlock()\n\n\treg, found := s.getReg()\n\tif found {\n\t\tactionList = append(actionList, probabilityProfile{3, s.newAuthorization})\n\t\treg.iMu.RLock()\n\t\tif len(reg.auths) > 0 {\n\t\t\tactionList = append(actionList, probabilityProfile{4, s.newCertificate})\n\t\t}\n\t\tif len(reg.certs) > 0 {\n\t\t\tactionList = append(actionList, probabilityProfile{2, s.revokeCertificate})\n\t\t}\n\t\treg.iMu.RUnlock()\n\t}\n\n\tweightedCall(actionList)(reg)\n\ts.wg.Done()\n}\n<commit_msg>Move Add out of go func<commit_after>package wfe\n\nimport (\n\t\"bytes\"\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\tmrand \"math\/rand\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/letsencrypt\/boulder\/Godeps\/_workspace\/src\/github.com\/letsencrypt\/go-jose\"\n\t\"github.com\/letsencrypt\/boulder\/cmd\/load-generator\/latency\"\n)\n\ntype RatePeriod struct {\n\tFor time.Duration\n\tRate int64\n}\n\ntype registration struct {\n\tkey *rsa.PrivateKey\n\tsigner jose.Signer\n\tiMu *sync.RWMutex\n\tauths []string\n\tcerts []string\n}\n\ntype State struct {\n\trMu *sync.RWMutex\n\tregs []*registration\n\tmaxRegs int\n\tclient *http.Client\n\tapiBase string\n\ttermsURL string\n\n\twarmupRegs int\n\twarmupRate int\n\n\trealIP string\n\n\tnMu *sync.RWMutex\n\tnoncePool []string\n\n\tthroughput int64\n\n\tchallRPCAddr string\n\n\tcertKey *rsa.PrivateKey\n\tdomainBase string\n\n\tcallLatency *latency.Map\n\n\truntime time.Duration\n\n\tchallSrvProc *os.Process\n\n\twg *sync.WaitGroup\n\n\trunPlan []RatePeriod\n}\n\ntype rawRegistration struct {\n\tCerts []string `json:\"certs\"`\n\tAuths []string `json:\"auths\"`\n\tRawKey []byte `json:\"rawKey\"`\n}\n\ntype snapshot struct {\n\tRegistrations []rawRegistration\n}\n\nfunc (s *State) Snapshot() ([]byte, error) {\n\ts.rMu.Lock()\n\tdefer s.rMu.Unlock()\n\tsnap := snapshot{}\n\trawRegs := []rawRegistration{}\n\tfor _, r := range s.regs {\n\t\trawRegs = append(rawRegs, rawRegistration{\n\t\t\tCerts: r.certs,\n\t\t\tAuths: r.auths,\n\t\t\tRawKey: x509.MarshalPKCS1PrivateKey(r.key),\n\t\t})\n\t}\n\treturn json.Marshal(snap)\n}\n\nfunc (s *State) Restore(content []byte) error {\n\ts.rMu.Lock()\n\tdefer s.rMu.Unlock()\n\tsnap := snapshot{}\n\terr := json.Unmarshal(content, &snap)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, r := range snap.Registrations {\n\t\tkey, err := x509.ParsePKCS1PrivateKey(r.RawKey)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tsigner, err := jose.NewSigner(jose.RS256, key)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ts.regs = append(s.regs, ®istration{\n\t\t\tkey: key,\n\t\t\tsigner: signer,\n\t\t\tcerts: r.Certs,\n\t\t\tauths: r.Auths,\n\t\t})\n\t}\n\treturn nil\n}\n\nfunc New(rpcAddr string, apiBase string, rate int, keySize int, domainBase string, runtime time.Duration, termsURL string, realIP string, runPlan []RatePeriod, maxRegs, warmupRegs, warmupRate int) (*State, error) {\n\tcertKey, err := rsa.GenerateKey(rand.Reader, keySize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: (&net.Dialer{\n\t\t\t\tTimeout: 5 * time.Second,\n\t\t\t\tKeepAlive: 0,\n\t\t\t}).Dial,\n\t\t\tTLSHandshakeTimeout: 5 * time.Second,\n\t\t\tDisableKeepAlives: true,\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: true,\n\t\t\t},\n\t\t},\n\t}\n\treturn &State{\n\t\trMu: new(sync.RWMutex),\n\t\tnMu: new(sync.RWMutex),\n\t\tchallRPCAddr: rpcAddr,\n\t\tclient: client,\n\t\tapiBase: apiBase,\n\t\tthroughput: int64(rate),\n\t\tcertKey: certKey,\n\t\tdomainBase: domainBase,\n\t\tcallLatency: latency.New(fmt.Sprintf(\"WFE -- %s test at %d base actions \/ second\", runtime, rate)),\n\t\truntime: runtime,\n\t\ttermsURL: termsURL,\n\t\twg: new(sync.WaitGroup),\n\t\trealIP: realIP,\n\t\trunPlan: runPlan,\n\t\tmaxRegs: maxRegs,\n\t\twarmupRate: warmupRate,\n\t\twarmupRegs: warmupRegs,\n\t}, nil\n}\n\nfunc (s *State) executePlan() {\n\tfor _, p := range s.runPlan {\n\t\tatomic.StoreInt64(&s.throughput, p.Rate)\n\t\tfmt.Printf(\"Set base action rate to %d\/s for %s\\n\", p.Rate, p.For)\n\t\ttime.Sleep(p.For)\n\t}\n}\n\nfunc (s *State) warmup() {\n\tfmt.Printf(\"Beginning warmup, generating %d registrations at %d\/s\\n\", s.warmupRegs, s.warmupRate)\n\twg := new(sync.WaitGroup)\n\tfor {\n\t\ts.rMu.RLock()\n\t\tif len(s.regs) >= s.warmupRegs {\n\t\t\tbreak\n\t\t}\n\t\ts.rMu.RUnlock()\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\ts.newRegistration(nil)\n\t\t\twg.Done()\n\t\t}()\n\t\ttime.Sleep(time.Second \/ time.Duration(s.warmupRate))\n\t}\n\twg.Wait()\n\tfmt.Println(\"Finished warming up\")\n}\n\nfunc (s *State) Run(binName string, dontRunChallSrv bool, httpOneAddr string) error {\n\ts.callLatency.Started = time.Now()\n\t\/\/ If warmup, warmup\n\tif s.warmupRegs > 0 {\n\t\ts.warmup()\n\t}\n\n\t\/\/ Start chall server process\n\tif !dontRunChallSrv {\n\t\tcmd := exec.Command(binName, \"chall-srv\", \"--rpcAddr=\"+s.challRPCAddr, \"--httpOneAddr=\"+httpOneAddr)\n\t\terr := cmd.Start()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.challSrvProc = cmd.Process\n\t}\n\n\t\/\/ If their is a run plan execute it\n\tif len(s.runPlan) > 0 {\n\t\tgo s.executePlan()\n\t}\n\n\t\/\/ Run sending loop\n\tstop := make(chan bool, 1)\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-stop:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ts.wg.Add(1)\n\t\t\t\tgo s.sendCall()\n\t\t\t\ttime.Sleep(time.Duration(time.Second.Nanoseconds() \/ atomic.LoadInt64(&s.throughput)))\n\t\t\t}\n\t\t}\n\t}()\n\n\ttime.Sleep(s.runtime)\n\tfmt.Println(\"READ END\")\n\tstop <- true\n\tfmt.Println(\"SENT STOP\")\n\ts.wg.Wait()\n\tfmt.Println(\"KILLING CHALL SERVER\")\n\terr := s.challSrvProc.Kill()\n\tif err != nil {\n\t\tfmt.Printf(\"Error killing challenge server: %s\\n\", err)\n\t}\n\tfmt.Println(\"ALL DONE\")\n\ts.callLatency.Stopped = time.Now()\n\treturn nil\n}\n\nfunc (s *State) Dump(jsonPath string) error {\n\tif jsonPath != \"\" {\n\t\tdata, err := json.Marshal(s.callLatency)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = ioutil.WriteFile(jsonPath, data, os.ModePerm)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ HTTP utils\n\nfunc (s *State) post(endpoint string, payload []byte) (*http.Response, error) {\n\treq, err := http.NewRequest(\"POST\", endpoint, bytes.NewBuffer(payload))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"X-Real-IP\", s.realIP)\n\tresp, err := s.client.Do(req)\n\tif resp != nil {\n\t\tif newNonce := resp.Header.Get(\"Replay-Nonce\"); newNonce != \"\" {\n\t\t\ts.addNonce(newNonce)\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}\n\n\/\/ Nonce utils, these methods are used to generate\/store\/retrieve the nonces\n\/\/ required for the required form of JWS\n\nfunc (s *State) signWithNonce(endpoint string, alwaysNew bool, payload []byte, signer jose.Signer) ([]byte, error) {\n\tnonce, err := s.getNonce(endpoint, alwaysNew)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tjws, err := signer.Sign(payload, nonce)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(jws)\n}\n\nfunc (s *State) getNonce(from string, alwaysNew bool) (string, error) {\n\ts.nMu.RLock()\n\tif len(s.noncePool) == 0 || alwaysNew {\n\t\ts.nMu.RUnlock()\n\t\tstarted := time.Now()\n\t\tresp, err := s.client.Head(fmt.Sprintf(\"%s%s\", s.apiBase, from))\n\t\tfinished := time.Now()\n\t\tstate := \"good\"\n\t\tdefer func() { s.callLatency.Add(fmt.Sprintf(\"HEAD %s\", from), started, finished, state) }()\n\t\tif err != nil {\n\t\t\tstate = \"error\"\n\t\t\treturn \"\", err\n\t\t}\n\t\tif nonce := resp.Header.Get(\"Replay-Nonce\"); nonce != \"\" {\n\t\t\treturn nonce, nil\n\t\t}\n\t\tstate = \"error\"\n\t\treturn \"\", fmt.Errorf(\"Nonce header not supplied!\")\n\t}\n\ts.nMu.RUnlock()\n\ts.nMu.Lock()\n\tdefer s.nMu.Unlock()\n\tnonce := s.noncePool[0]\n\ts.noncePool = s.noncePool[1:]\n\treturn nonce, nil\n}\n\nfunc (s *State) addNonce(nonce string) {\n\ts.nMu.Lock()\n\tdefer s.nMu.Unlock()\n\ts.noncePool = append(s.noncePool, nonce)\n}\n\n\/\/ Reg object utils, used to add and randomly retrieve registration objects\n\nfunc (s *State) addReg(reg *registration) {\n\ts.rMu.Lock()\n\tdefer s.rMu.Unlock()\n\ts.regs = append(s.regs, reg)\n}\n\nfunc (s *State) getRandReg() (*registration, bool) {\n\tregsLength := len(s.regs)\n\tif regsLength == 0 {\n\t\treturn nil, false\n\t}\n\treturn s.regs[mrand.Intn(regsLength)], true\n}\n\nfunc (s *State) getReg() (*registration, bool) {\n\ts.rMu.RLock()\n\tdefer s.rMu.RUnlock()\n\treturn s.getRandReg()\n}\n\n\/\/ Call sender, it sends the calls!\n\ntype probabilityProfile struct {\n\tprob int\n\taction func(*registration)\n}\n\nfunc weightedCall(setup []probabilityProfile) func(*registration) {\n\tchoices := make(map[int]func(*registration))\n\tn := 0\n\tfor _, pp := range setup {\n\t\tfor i := 0; i < pp.prob; i++ {\n\t\t\tchoices[i+n] = pp.action\n\t\t}\n\t\tn += pp.prob\n\t}\n\tif len(choices) == 0 {\n\t\treturn nil\n\t}\n\n\treturn choices[mrand.Intn(n)]\n}\n\nfunc (s *State) sendCall() {\n\tactionList := []probabilityProfile{}\n\ts.rMu.RLock()\n\tif len(s.regs) < s.maxRegs {\n\t\tactionList = append(actionList, probabilityProfile{1, s.newRegistration})\n\t}\n\ts.rMu.RUnlock()\n\n\treg, found := s.getReg()\n\tif found {\n\t\tactionList = append(actionList, probabilityProfile{3, s.newAuthorization})\n\t\treg.iMu.RLock()\n\t\tif len(reg.auths) > 0 {\n\t\t\tactionList = append(actionList, probabilityProfile{4, s.newCertificate})\n\t\t}\n\t\tif len(reg.certs) > 0 {\n\t\t\tactionList = append(actionList, probabilityProfile{2, s.revokeCertificate})\n\t\t}\n\t\treg.iMu.RUnlock()\n\t}\n\n\tweightedCall(actionList)(reg)\n\ts.wg.Done()\n}\n<|endoftext|>"} {"text":"<commit_before>package main_test\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\treceptorrunner \"github.com\/cloudfoundry-incubator\/receptor\/cmd\/receptor\/testrunner\"\n\tBbs \"github.com\/cloudfoundry-incubator\/runtime-schema\/bbs\"\n\t\"github.com\/cloudfoundry-incubator\/runtime-schema\/models\"\n\t\"github.com\/cloudfoundry\/gunk\/diegonats\"\n\t\"github.com\/cloudfoundry\/gunk\/timeprovider\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n\t\"github.com\/pivotal-golang\/lager\/lagertest\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/ginkgomon\"\n)\n\nvar _ = Describe(\"Syncing desired state with CC\", func() {\n\tvar (\n\t\tgnatsdProcess ifrit.Process\n\t\tnatsClient diegonats.NATSClient\n\t\tbbs *Bbs.BBS\n\n\t\treceptorProcess ifrit.Process\n\n\t\trunner ifrit.Runner\n\t\tprocess ifrit.Process\n\t)\n\n\tstartNATS := func() {\n\t\tgnatsdProcess, natsClient = diegonats.StartGnatsd(natsPort)\n\t}\n\n\tstopNATS := func() {\n\t\tginkgomon.Kill(gnatsdProcess)\n\t}\n\n\tstartReceptor := func() ifrit.Process {\n\t\treturn ginkgomon.Invoke(receptorrunner.New(receptorPath, receptorrunner.Args{\n\t\t\tAddress: fmt.Sprintf(\"127.0.0.1:%d\", receptorPort),\n\t\t\tEtcdCluster: strings.Join(etcdRunner.NodeURLS(), \",\"),\n\t\t\tInitialHeartbeatInterval: 1 * time.Second,\n\t\t}))\n\t}\n\n\tnewNSyncRunner := func() *ginkgomon.Runner {\n\t\treturn ginkgomon.New(ginkgomon.Config{\n\t\t\tName: \"nsync\",\n\t\t\tAnsiColorCode: \"97m\",\n\t\t\tStartCheck: \"nsync.listener.started\",\n\t\t\tCommand: exec.Command(\n\t\t\t\tlistenerPath,\n\t\t\t\t\"-etcdCluster\", strings.Join(etcdRunner.NodeURLS(), \",\"),\n\t\t\t\t\"-diegoAPIURL\", fmt.Sprintf(\"http:\/\/127.0.0.1:%d\", receptorPort),\n\t\t\t\t\"-natsAddresses\", fmt.Sprintf(\"127.0.0.1:%d\", natsPort),\n\t\t\t\t\"-circuses\", `{\"some-stack\": \"some-health-check.tar.gz\"}`,\n\t\t\t\t\"-dockerCircusPath\", \"the\/docker\/circus\/path.tgz\",\n\t\t\t\t\"-fileServerURL\", \"http:\/\/file-server.com\",\n\t\t\t\t\"-heartbeatInterval\", \"1s\",\n\t\t\t\t\"-logLevel\", \"debug\",\n\t\t\t),\n\t\t})\n\t}\n\n\tBeforeEach(func() {\n\t\tbbs = Bbs.NewBBS(etcdRunner.Adapter(), timeprovider.NewTimeProvider(), lagertest.NewTestLogger(\"test\"))\n\t\treceptorProcess = startReceptor()\n\t\trunner = newNSyncRunner()\n\t})\n\n\tAfterEach(func() {\n\t\tginkgomon.Interrupt(receptorProcess)\n\t})\n\n\tvar publishDesireWithInstances = func(nInstances int) {\n\t\terr := natsClient.Publish(\"diego.desire.app\", []byte(fmt.Sprintf(`\n {\n \"process_guid\": \"the-guid\",\n \"droplet_uri\": \"http:\/\/the-droplet.uri.com\",\n \"start_command\": \"the-start-command\",\n \"memory_mb\": 128,\n \"disk_mb\": 512,\n \"file_descriptors\": 32,\n \"num_instances\": %d,\n \"stack\": \"some-stack\",\n \"log_guid\": \"the-log-guid\"\n }\n `, nInstances)))\n\t\tΩ(err).ShouldNot(HaveOccurred())\n\t}\n\n\tContext(\"when NATS is up\", func() {\n\t\tBeforeEach(func() {\n\t\t\tstartNATS()\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tstopNATS()\n\t\t})\n\n\t\tContext(\"and the nsync listener is started\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tprocess = ginkgomon.Invoke(runner)\n\t\t\t})\n\n\t\t\tAfterEach(func() {\n\t\t\t\tginkgomon.Interrupt(process)\n\t\t\t})\n\n\t\t\tDescribe(\"and a 'diego.desire.app' message is recieved\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tpublishDesireWithInstances(3)\n\t\t\t\t})\n\n\t\t\t\tIt(\"registers an app desire in etcd\", func() {\n\t\t\t\t\tEventually(bbs.DesiredLRPs, 10).Should(HaveLen(1))\n\t\t\t\t})\n\n\t\t\t\tContext(\"when an app is no longer desired\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tEventually(bbs.DesiredLRPs).Should(HaveLen(1))\n\n\t\t\t\t\t\tpublishDesireWithInstances(0)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"should remove the desired state from etcd\", func() {\n\t\t\t\t\t\tEventually(bbs.DesiredLRPs).Should(HaveLen(0))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"and a second nsync listener is started\", func() {\n\t\t\t\tvar (\n\t\t\t\t\tdesiredLRPChanges <-chan models.DesiredLRPChange\n\t\t\t\t\tstopWatching chan<- bool\n\n\t\t\t\t\tsecondRunner *ginkgomon.Runner\n\t\t\t\t\tsecondProcess ifrit.Process\n\t\t\t\t)\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tsecondRunner = newNSyncRunner()\n\t\t\t\t\tsecondRunner.StartCheck = \"\"\n\n\t\t\t\t\tsecondProcess = ginkgomon.Invoke(secondRunner)\n\n\t\t\t\t\tchanges, stop, _ := bbs.WatchForDesiredLRPChanges()\n\n\t\t\t\t\tdesiredLRPChanges = changes\n\t\t\t\t\tstopWatching = stop\n\t\t\t\t})\n\n\t\t\t\tAfterEach(func() {\n\t\t\t\t\tclose(stopWatching)\n\t\t\t\t\tginkgomon.Interrupt(secondProcess)\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"the second listener\", func() {\n\t\t\t\t\tIt(\"does not become active\", func() {\n\t\t\t\t\t\tConsistently(secondRunner.Buffer, 5*time.Second).ShouldNot(gbytes.Say(\"nsync.listener.started\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"and the first listener goes away\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tginkgomon.Interrupt(process)\n\t\t\t\t\t})\n\n\t\t\t\t\tDescribe(\"the second listener\", func() {\n\t\t\t\t\t\tIt(\"eventually becomes active\", func() {\n\t\t\t\t\t\t\tEventually(secondRunner.Buffer, 5*time.Second).Should(gbytes.Say(\"nsync.listener.started\"))\n\t\t\t\t\t\t})\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"and a 'diego.desire.app' message is received\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tpublishDesireWithInstances(3)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"does not emit duplicate events\", func() {\n\t\t\t\t\t\tEventually(desiredLRPChanges).Should(Receive())\n\t\t\t\t\t\tConsistently(desiredLRPChanges).ShouldNot(Receive())\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n\n\tDescribe(\"when NATS is not up\", func() {\n\t\tContext(\"and the nsync listener is started\", func() {\n\n\t\t\tBeforeEach(func() {\n\t\t\t\tprocess = ifrit.Background(runner)\n\t\t\t})\n\n\t\t\tAfterEach(func() {\n\t\t\t\tdefer stopNATS()\n\t\t\t\tdefer ginkgomon.Interrupt(process)\n\t\t\t})\n\n\t\t\tIt(\"starts only after nats comes up\", func() {\n\t\t\t\tConsistently(process.Ready()).ShouldNot(BeClosed())\n\n\t\t\t\tstartNATS()\n\t\t\t\tEventually(process.Ready(), 5*time.Second).Should(BeClosed())\n\t\t\t})\n\t\t})\n\t})\n})\n<commit_msg>Don't leak etcdAdapter in test<commit_after>package main_test\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\treceptorrunner \"github.com\/cloudfoundry-incubator\/receptor\/cmd\/receptor\/testrunner\"\n\tBbs \"github.com\/cloudfoundry-incubator\/runtime-schema\/bbs\"\n\t\"github.com\/cloudfoundry-incubator\/runtime-schema\/models\"\n\t\"github.com\/cloudfoundry\/gunk\/diegonats\"\n\t\"github.com\/cloudfoundry\/gunk\/timeprovider\"\n\t\"github.com\/cloudfoundry\/storeadapter\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n\t\"github.com\/pivotal-golang\/lager\/lagertest\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/ginkgomon\"\n)\n\nvar _ = Describe(\"Syncing desired state with CC\", func() {\n\tvar (\n\t\tgnatsdProcess ifrit.Process\n\t\tnatsClient diegonats.NATSClient\n\t\tbbs *Bbs.BBS\n\n\t\treceptorProcess ifrit.Process\n\n\t\trunner ifrit.Runner\n\t\tprocess ifrit.Process\n\n\t\tetcdAdapter storeadapter.StoreAdapter\n\t)\n\n\tstartNATS := func() {\n\t\tgnatsdProcess, natsClient = diegonats.StartGnatsd(natsPort)\n\t}\n\n\tstopNATS := func() {\n\t\tginkgomon.Kill(gnatsdProcess)\n\t}\n\n\tstartReceptor := func() ifrit.Process {\n\t\treturn ginkgomon.Invoke(receptorrunner.New(receptorPath, receptorrunner.Args{\n\t\t\tAddress: fmt.Sprintf(\"127.0.0.1:%d\", receptorPort),\n\t\t\tEtcdCluster: strings.Join(etcdRunner.NodeURLS(), \",\"),\n\t\t\tInitialHeartbeatInterval: 1 * time.Second,\n\t\t}))\n\t}\n\n\tnewNSyncRunner := func() *ginkgomon.Runner {\n\t\treturn ginkgomon.New(ginkgomon.Config{\n\t\t\tName: \"nsync\",\n\t\t\tAnsiColorCode: \"97m\",\n\t\t\tStartCheck: \"nsync.listener.started\",\n\t\t\tCommand: exec.Command(\n\t\t\t\tlistenerPath,\n\t\t\t\t\"-etcdCluster\", strings.Join(etcdRunner.NodeURLS(), \",\"),\n\t\t\t\t\"-diegoAPIURL\", fmt.Sprintf(\"http:\/\/127.0.0.1:%d\", receptorPort),\n\t\t\t\t\"-natsAddresses\", fmt.Sprintf(\"127.0.0.1:%d\", natsPort),\n\t\t\t\t\"-circuses\", `{\"some-stack\": \"some-health-check.tar.gz\"}`,\n\t\t\t\t\"-dockerCircusPath\", \"the\/docker\/circus\/path.tgz\",\n\t\t\t\t\"-fileServerURL\", \"http:\/\/file-server.com\",\n\t\t\t\t\"-heartbeatInterval\", \"1s\",\n\t\t\t\t\"-logLevel\", \"debug\",\n\t\t\t),\n\t\t})\n\t}\n\n\tBeforeEach(func() {\n\t\tetcdAdapter = etcdRunner.Adapter()\n\t\tbbs = Bbs.NewBBS(etcdAdapter, timeprovider.NewTimeProvider(), lagertest.NewTestLogger(\"test\"))\n\t\treceptorProcess = startReceptor()\n\t\trunner = newNSyncRunner()\n\t})\n\n\tAfterEach(func() {\n\t\tetcdAdapter.Disconnect()\n\t\tginkgomon.Interrupt(receptorProcess)\n\t})\n\n\tvar publishDesireWithInstances = func(nInstances int) {\n\t\terr := natsClient.Publish(\"diego.desire.app\", []byte(fmt.Sprintf(`\n {\n \"process_guid\": \"the-guid\",\n \"droplet_uri\": \"http:\/\/the-droplet.uri.com\",\n \"start_command\": \"the-start-command\",\n \"memory_mb\": 128,\n \"disk_mb\": 512,\n \"file_descriptors\": 32,\n \"num_instances\": %d,\n \"stack\": \"some-stack\",\n \"log_guid\": \"the-log-guid\"\n }\n `, nInstances)))\n\t\tΩ(err).ShouldNot(HaveOccurred())\n\t}\n\n\tContext(\"when NATS is up\", func() {\n\t\tBeforeEach(func() {\n\t\t\tstartNATS()\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tstopNATS()\n\t\t})\n\n\t\tContext(\"and the nsync listener is started\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tprocess = ginkgomon.Invoke(runner)\n\t\t\t})\n\n\t\t\tAfterEach(func() {\n\t\t\t\tginkgomon.Interrupt(process)\n\t\t\t})\n\n\t\t\tDescribe(\"and a 'diego.desire.app' message is recieved\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tpublishDesireWithInstances(3)\n\t\t\t\t})\n\n\t\t\t\tIt(\"registers an app desire in etcd\", func() {\n\t\t\t\t\tEventually(bbs.DesiredLRPs, 10).Should(HaveLen(1))\n\t\t\t\t})\n\n\t\t\t\tContext(\"when an app is no longer desired\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tEventually(bbs.DesiredLRPs).Should(HaveLen(1))\n\n\t\t\t\t\t\tpublishDesireWithInstances(0)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"should remove the desired state from etcd\", func() {\n\t\t\t\t\t\tEventually(bbs.DesiredLRPs).Should(HaveLen(0))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"and a second nsync listener is started\", func() {\n\t\t\t\tvar (\n\t\t\t\t\tdesiredLRPChanges <-chan models.DesiredLRPChange\n\t\t\t\t\tstopWatching chan<- bool\n\n\t\t\t\t\tsecondRunner *ginkgomon.Runner\n\t\t\t\t\tsecondProcess ifrit.Process\n\t\t\t\t)\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tsecondRunner = newNSyncRunner()\n\t\t\t\t\tsecondRunner.StartCheck = \"\"\n\n\t\t\t\t\tsecondProcess = ginkgomon.Invoke(secondRunner)\n\n\t\t\t\t\tchanges, stop, _ := bbs.WatchForDesiredLRPChanges()\n\n\t\t\t\t\tdesiredLRPChanges = changes\n\t\t\t\t\tstopWatching = stop\n\t\t\t\t})\n\n\t\t\t\tAfterEach(func() {\n\t\t\t\t\tclose(stopWatching)\n\t\t\t\t\tginkgomon.Interrupt(secondProcess)\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"the second listener\", func() {\n\t\t\t\t\tIt(\"does not become active\", func() {\n\t\t\t\t\t\tConsistently(secondRunner.Buffer, 5*time.Second).ShouldNot(gbytes.Say(\"nsync.listener.started\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"and the first listener goes away\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tginkgomon.Interrupt(process)\n\t\t\t\t\t})\n\n\t\t\t\t\tDescribe(\"the second listener\", func() {\n\t\t\t\t\t\tIt(\"eventually becomes active\", func() {\n\t\t\t\t\t\t\tEventually(secondRunner.Buffer, 5*time.Second).Should(gbytes.Say(\"nsync.listener.started\"))\n\t\t\t\t\t\t})\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"and a 'diego.desire.app' message is received\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tpublishDesireWithInstances(3)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"does not emit duplicate events\", func() {\n\t\t\t\t\t\tEventually(desiredLRPChanges).Should(Receive())\n\t\t\t\t\t\tConsistently(desiredLRPChanges).ShouldNot(Receive())\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n\n\tDescribe(\"when NATS is not up\", func() {\n\t\tContext(\"and the nsync listener is started\", func() {\n\n\t\t\tBeforeEach(func() {\n\t\t\t\tprocess = ifrit.Background(runner)\n\t\t\t})\n\n\t\t\tAfterEach(func() {\n\t\t\t\tdefer stopNATS()\n\t\t\t\tdefer ginkgomon.Interrupt(process)\n\t\t\t})\n\n\t\t\tIt(\"starts only after nats comes up\", func() {\n\t\t\t\tConsistently(process.Ready()).ShouldNot(BeClosed())\n\n\t\t\t\tstartNATS()\n\t\t\t\tEventually(process.Ready(), 5*time.Second).Should(BeClosed())\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/nfnt\/resize\"\n\t_ \"golang.org\/x\/image\/tiff\"\n\t\"image\"\n\t\"image\/draw\"\n\t_ \"image\/gif\"\n\t_ \"image\/jpeg\"\n\t_ \"image\/png\"\n\t\"os\"\n)\n\n\/\/ SimpleImage implements IIIFImage for reading non-JP2 image types. These can\n\/\/ only handle a basic \"read it all, then crop\/resize\" flow, and thus should\n\/\/ have very careful load testing.\ntype SimpleImage struct {\n\tfile *os.File\n\tconf image.Config\n\tdecodeWidth int\n\tdecodeHeight int\n\tdecodeArea image.Rectangle\n\tresize bool\n\tcrop bool\n}\n\nfunc NewSimpleImage(filename string) (*SimpleImage, error) {\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ti := &SimpleImage{file: file}\n\ti.conf, _, err = image.DecodeConfig(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ti.file.Seek(0, 0)\n\n\t\/\/ Default to full size, no crop\n\ti.decodeWidth = i.conf.Width\n\ti.decodeHeight = i.conf.Height\n\ti.decodeArea = image.Rect(0, 0, i.decodeWidth, i.decodeHeight)\n\ti.resize = false\n\ti.crop = false\n\n\treturn i, nil\n}\n\n\/\/ SetScale sets the image to scale by the given multiplier, typically a\n\/\/ percentage from 0 to 1. This is mutually exclusive with resizing by a set\n\/\/ width\/height value.\nfunc (i *SimpleImage) SetScale(m float64) {\n\ti.resize = true\n\ti.decodeWidth = int(float64(i.conf.Width) * m)\n\ti.decodeHeight = int(float64(i.conf.Height) * m)\n}\n\n\/\/ SetResizeWH sets the image to scale to the given width and height. If one\n\/\/ dimension is 0, the decoded image will preserve the aspect ratio while\n\/\/ scaling to the non-zero dimension.\nfunc (i *SimpleImage) SetResizeWH(width, height int) {\n\ti.resize = true\n\ti.decodeWidth = width\n\ti.decodeHeight = height\n}\n\nfunc (i *SimpleImage) SetCrop(r image.Rectangle) {\n\ti.crop = true\n\ti.decodeArea = r\n}\n\n\/\/ DecodeImage returns an image.Image that holds the decoded image data,\n\/\/ resized and cropped if resizing or cropping was requested. Both cropping\n\/\/ and resizing happen here due to the nature of openjpeg, so SetScale,\n\/\/ SetResizeWH, and SetCrop must be called before this function.\nfunc (i *SimpleImage) DecodeImage() (image.Image, error) {\n\timg, _, err := image.Decode(i.file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif i.crop {\n\t\tsrcB := img.Bounds()\n\t\tdstB := i.decodeArea\n\t\tdst := image.NewRGBA(image.Rect(0, 0, dstB.Dx(), dstB.Dy()))\n\t\tdraw.Draw(dst, srcB, img, dstB.Min, draw.Src)\n\t\timg = dst\n\t}\n\n\tif i.resize {\n\t\timg = resize.Resize(uint(i.decodeWidth), uint(i.decodeHeight), img, resize.Bilinear)\n\t}\n\n\treturn img, nil\n}\n\n\/\/ GetDimensions returns the config data as a rectangle\nfunc (i *SimpleImage) GetDimensions() (image.Rectangle, error) {\n\treturn image.Rect(0, 0, i.conf.Width, i.conf.Height), nil\n}\n<commit_msg>Fix SimpleImage resize to be *post*-crop<commit_after>package main\n\nimport (\n\t\"github.com\/nfnt\/resize\"\n\t_ \"golang.org\/x\/image\/tiff\"\n\t\"image\"\n\t\"image\/draw\"\n\t_ \"image\/gif\"\n\t_ \"image\/jpeg\"\n\t_ \"image\/png\"\n\t\"os\"\n)\n\n\/\/ SimpleImage implements IIIFImage for reading non-JP2 image types. These can\n\/\/ only handle a basic \"read it all, then crop\/resize\" flow, and thus should\n\/\/ have very careful load testing.\ntype SimpleImage struct {\n\tfile *os.File\n\tconf image.Config\n\tdecodeWidth int\n\tdecodeHeight int\n\tscaleFactor float64\n\tdecodeArea image.Rectangle\n\tresizeByPercent bool\n\tresizeByPixels bool\n\tcrop bool\n}\n\nfunc NewSimpleImage(filename string) (*SimpleImage, error) {\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ti := &SimpleImage{file: file}\n\ti.conf, _, err = image.DecodeConfig(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ti.file.Seek(0, 0)\n\n\t\/\/ Default to full size, no crop\n\ti.decodeWidth = i.conf.Width\n\ti.decodeHeight = i.conf.Height\n\ti.decodeArea = image.Rect(0, 0, i.decodeWidth, i.decodeHeight)\n\n\treturn i, nil\n}\n\n\/\/ SetScale sets the image to scale by the given multiplier, typically a\n\/\/ percentage from 0 to 1. This is mutually exclusive with resizing by a set\n\/\/ width\/height value.\nfunc (i *SimpleImage) SetScale(m float64) {\n\ti.resizeByPixels = false\n\ti.resizeByPercent = true\n\ti.scaleFactor = m\n}\n\n\/\/ SetResizeWH sets the image to scale to the given width and height. If one\n\/\/ dimension is 0, the decoded image will preserve the aspect ratio while\n\/\/ scaling to the non-zero dimension.\nfunc (i *SimpleImage) SetResizeWH(width, height int) {\n\ti.resizeByPercent = false\n\ti.resizeByPixels = true\n\ti.decodeWidth = width\n\ti.decodeHeight = height\n}\n\nfunc (i *SimpleImage) SetCrop(r image.Rectangle) {\n\ti.crop = true\n\ti.decodeArea = r\n}\n\n\/\/ DecodeImage returns an image.Image that holds the decoded image data,\n\/\/ resized and cropped if resizing or cropping was requested. Both cropping\n\/\/ and resizing happen here due to the nature of openjpeg, so SetScale,\n\/\/ SetResizeWH, and SetCrop must be called before this function.\nfunc (i *SimpleImage) DecodeImage() (image.Image, error) {\n\timg, _, err := image.Decode(i.file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif i.crop {\n\t\tsrcB := img.Bounds()\n\t\tdstB := i.decodeArea\n\t\tdst := image.NewRGBA(image.Rect(0, 0, dstB.Dx(), dstB.Dy()))\n\t\tdraw.Draw(dst, srcB, img, dstB.Min, draw.Src)\n\t\timg = dst\n\t}\n\n\tif i.resizeByPercent {\n\t\ti.decodeWidth = int(float64(i.decodeArea.Dx()) * i.scaleFactor)\n\t\ti.decodeHeight = int(float64(i.decodeArea.Dy()) * i.scaleFactor)\n\t\ti.resizeByPixels = true\n\t}\n\n\tif i.resizeByPixels {\n\t\timg = resize.Resize(uint(i.decodeWidth), uint(i.decodeHeight), img, resize.Bilinear)\n\t}\n\n\treturn img, nil\n}\n\n\/\/ GetDimensions returns the config data as a rectangle\nfunc (i *SimpleImage) GetDimensions() (image.Rectangle, error) {\n\treturn image.Rect(0, 0, i.conf.Width, i.conf.Height), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package txindex\n\nimport (\n\t\"context\"\n\n\t\"github.com\/tendermint\/tendermint\/libs\/service\"\n\t\"github.com\/tendermint\/tendermint\/state\/indexer\"\n\t\"github.com\/tendermint\/tendermint\/types\"\n)\n\n\/\/ XXX\/TODO: These types should be moved to the indexer package.\n\nconst (\n\tsubscriber = \"IndexerService\"\n)\n\n\/\/ IndexerService connects event bus, transaction and block indexers together in\n\/\/ order to index transactions and blocks coming from the event bus.\ntype IndexerService struct {\n\tservice.BaseService\n\n\ttxIdxr TxIndexer\n\tblockIdxr indexer.BlockIndexer\n\teventBus *types.EventBus\n}\n\n\/\/ NewIndexerService returns a new service instance.\nfunc NewIndexerService(\n\ttxIdxr TxIndexer,\n\tblockIdxr indexer.BlockIndexer,\n\teventBus *types.EventBus,\n) *IndexerService {\n\n\tis := &IndexerService{txIdxr: txIdxr, blockIdxr: blockIdxr, eventBus: eventBus}\n\tis.BaseService = *service.NewBaseService(nil, \"IndexerService\", is)\n\treturn is\n}\n\n\/\/ OnStart implements service.Service by subscribing for all transactions\n\/\/ and indexing them by events.\nfunc (is *IndexerService) OnStart() error {\n\t\/\/ Use SubscribeUnbuffered here to ensure both subscriptions does not get\n\t\/\/ cancelled due to not pulling messages fast enough. Cause this might\n\t\/\/ sometimes happen when there are no other subscribers.\n\tblockHeadersSub, err := is.eventBus.SubscribeUnbuffered(\n\t\tcontext.Background(),\n\t\tsubscriber,\n\t\ttypes.EventQueryNewBlockHeader)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttxsSub, err := is.eventBus.SubscribeUnbuffered(context.Background(), subscriber, types.EventQueryTx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tmsg := <-blockHeadersSub.Out()\n\t\t\teventDataHeader := msg.Data().(types.EventDataNewBlockHeader)\n\t\t\theight := eventDataHeader.Header.Height\n\t\t\tbatch := NewBatch(eventDataHeader.NumTxs)\n\n\t\t\tfor i := int64(0); i < eventDataHeader.NumTxs; i++ {\n\t\t\t\tmsg2 := <-txsSub.Out()\n\t\t\t\ttxResult := msg2.Data().(types.EventDataTx).TxResult\n\n\t\t\t\tif err = batch.Add(&txResult); err != nil {\n\t\t\t\t\tis.Logger.Error(\n\t\t\t\t\t\t\"failed to add tx to batch\",\n\t\t\t\t\t\t\"height\", height,\n\t\t\t\t\t\t\"index\", txResult.Index,\n\t\t\t\t\t\t\"err\", err,\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err := is.blockIdxr.Index(eventDataHeader); err != nil {\n\t\t\t\tis.Logger.Error(\"failed to index block\", \"height\", height, \"err\", err)\n\t\t\t} else {\n\t\t\t\tis.Logger.Error(\"indexed block\", \"height\", height)\n\t\t\t}\n\n\t\t\tif err = is.txIdxr.AddBatch(batch); err != nil {\n\t\t\t\tis.Logger.Error(\"failed to index block txs\", \"height\", height, \"err\", err)\n\t\t\t} else {\n\t\t\t\tis.Logger.Debug(\"indexed block txs\", \"height\", height, \"num_txs\", eventDataHeader.NumTxs)\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}\n\n\/\/ OnStop implements service.Service by unsubscribing from all transactions.\nfunc (is *IndexerService) OnStop() {\n\tif is.eventBus.IsRunning() {\n\t\t_ = is.eventBus.UnsubscribeAll(context.Background(), subscriber)\n\t}\n}\n<commit_msg>change index block log to info (#6290) (#6294)<commit_after>package txindex\n\nimport (\n\t\"context\"\n\n\t\"github.com\/tendermint\/tendermint\/libs\/service\"\n\t\"github.com\/tendermint\/tendermint\/state\/indexer\"\n\t\"github.com\/tendermint\/tendermint\/types\"\n)\n\n\/\/ XXX\/TODO: These types should be moved to the indexer package.\n\nconst (\n\tsubscriber = \"IndexerService\"\n)\n\n\/\/ IndexerService connects event bus, transaction and block indexers together in\n\/\/ order to index transactions and blocks coming from the event bus.\ntype IndexerService struct {\n\tservice.BaseService\n\n\ttxIdxr TxIndexer\n\tblockIdxr indexer.BlockIndexer\n\teventBus *types.EventBus\n}\n\n\/\/ NewIndexerService returns a new service instance.\nfunc NewIndexerService(\n\ttxIdxr TxIndexer,\n\tblockIdxr indexer.BlockIndexer,\n\teventBus *types.EventBus,\n) *IndexerService {\n\n\tis := &IndexerService{txIdxr: txIdxr, blockIdxr: blockIdxr, eventBus: eventBus}\n\tis.BaseService = *service.NewBaseService(nil, \"IndexerService\", is)\n\treturn is\n}\n\n\/\/ OnStart implements service.Service by subscribing for all transactions\n\/\/ and indexing them by events.\nfunc (is *IndexerService) OnStart() error {\n\t\/\/ Use SubscribeUnbuffered here to ensure both subscriptions does not get\n\t\/\/ cancelled due to not pulling messages fast enough. Cause this might\n\t\/\/ sometimes happen when there are no other subscribers.\n\tblockHeadersSub, err := is.eventBus.SubscribeUnbuffered(\n\t\tcontext.Background(),\n\t\tsubscriber,\n\t\ttypes.EventQueryNewBlockHeader)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttxsSub, err := is.eventBus.SubscribeUnbuffered(context.Background(), subscriber, types.EventQueryTx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tmsg := <-blockHeadersSub.Out()\n\t\t\teventDataHeader := msg.Data().(types.EventDataNewBlockHeader)\n\t\t\theight := eventDataHeader.Header.Height\n\t\t\tbatch := NewBatch(eventDataHeader.NumTxs)\n\n\t\t\tfor i := int64(0); i < eventDataHeader.NumTxs; i++ {\n\t\t\t\tmsg2 := <-txsSub.Out()\n\t\t\t\ttxResult := msg2.Data().(types.EventDataTx).TxResult\n\n\t\t\t\tif err = batch.Add(&txResult); err != nil {\n\t\t\t\t\tis.Logger.Error(\n\t\t\t\t\t\t\"failed to add tx to batch\",\n\t\t\t\t\t\t\"height\", height,\n\t\t\t\t\t\t\"index\", txResult.Index,\n\t\t\t\t\t\t\"err\", err,\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err := is.blockIdxr.Index(eventDataHeader); err != nil {\n\t\t\t\tis.Logger.Error(\"failed to index block\", \"height\", height, \"err\", err)\n\t\t\t} else {\n\t\t\t\tis.Logger.Info(\"indexed block\", \"height\", height)\n\t\t\t}\n\n\t\t\tif err = is.txIdxr.AddBatch(batch); err != nil {\n\t\t\t\tis.Logger.Error(\"failed to index block txs\", \"height\", height, \"err\", err)\n\t\t\t} else {\n\t\t\t\tis.Logger.Debug(\"indexed block txs\", \"height\", height, \"num_txs\", eventDataHeader.NumTxs)\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}\n\n\/\/ OnStop implements service.Service by unsubscribing from all transactions.\nfunc (is *IndexerService) OnStop() {\n\tif is.eventBus.IsRunning() {\n\t\t_ = is.eventBus.UnsubscribeAll(context.Background(), subscriber)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.\n\/\/ Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.\n\/\/ Copyright (c) 2016 Andriy Pylypenko. All rights reserved.\n\/\/\n\/\/ All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without modification,\n\/\/ are permitted provided that the following conditions are met:\n\/\/\n\/\/ 1. Redistributions of source code must retain the above copyright notice, this\n\/\/ list of conditions and the following disclaimer.\n\/\/\n\/\/ 2. Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation and\/or\n\/\/ other materials provided with the distribution.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n\/\/ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n\/\/ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n\/\/ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\n\/\/ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n\/\/ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n\/\/ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n\/\/ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\/\/ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n\/\/ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\npackage sippy\n\nimport (\n \"bufio\"\n \"net\"\n \"strconv\"\n \"strings\"\n\n \"sippy\/net\"\n \"sippy\/types\"\n)\n\nfunc NewRtpProxyClient(opts *rtpProxyClientOpts) sippy_types.RtpProxyClient {\n return NewRtp_proxy_client_base(nil, opts)\n}\n\ntype Rtp_proxy_client_base struct {\n heir sippy_types.RtpProxyClient\n opts *rtpProxyClientOpts\n transport rtp_proxy_transport\n online bool\n sbind_supported bool\n tnot_supported bool\n copy_supported bool\n stat_supported bool\n wdnt_supported bool\n caps_done bool\n shut_down bool\n active_sessions int64\n sessions_created int64\n active_streams int64\n preceived int64\n ptransmitted int64\n}\n\ntype rtp_proxy_transport interface {\n address() net.Addr\n get_rtpc_delay() float64\n is_local() bool\n send_command(string, func(string))\n shutdown()\n reconnect(net.Addr, *sippy_net.HostPort)\n}\n\nfunc (self *Rtp_proxy_client_base) IsLocal() bool {\n return self.transport.is_local()\n}\n\nfunc (self *Rtp_proxy_client_base) IsOnline() bool {\n return self.online\n}\n\nfunc (self *Rtp_proxy_client_base) WdntSupported() bool {\n return self.wdnt_supported\n}\n\nfunc (self *Rtp_proxy_client_base) SBindSupported() bool {\n return self.sbind_supported\n}\n\nfunc (self *Rtp_proxy_client_base) TNotSupported() bool {\n return self.tnot_supported\n}\n\nfunc (self *Rtp_proxy_client_base) GetProxyAddress() string {\n return self.opts.proxy_address\n}\n\nfunc (self *Rtp_proxy_client_base) me() sippy_types.RtpProxyClient {\n if self.heir != nil {\n return self.heir\n }\n return self\n}\n\nfunc (self *Rtp_proxy_client_base) Address() net.Addr {\n return self.transport.address()\n}\n\nfunc NewRtp_proxy_client_base(heir sippy_types.RtpProxyClient, opts *rtpProxyClientOpts) *Rtp_proxy_client_base {\n return &Rtp_proxy_client_base{\n heir : heir,\n caps_done : false,\n shut_down : false,\n opts : opts,\n }\n}\n\nfunc (self *Rtp_proxy_client_base) Start() error {\n var err error\n\n self.transport, err = self.opts.rtpp_class(self.me(), self.opts.config, self.opts.rtppaddr, self.opts.bind_address)\n if err != nil {\n return err\n }\n if ! self.opts.no_version_check {\n self.version_check()\n } else {\n self.caps_done = true\n self.online = true\n }\n return nil\n}\n\nfunc (self *Rtp_proxy_client_base) SendCommand(cmd string, cb func(string)) {\n self.transport.send_command(cmd, cb)\n}\n\nfunc (self *Rtp_proxy_client_base) Reconnect(addr net.Addr, bind_addr *sippy_net.HostPort) {\n self.transport.reconnect(addr, bind_addr)\n}\n\nfunc (self *Rtp_proxy_client_base) version_check() {\n if self.shut_down {\n return\n }\n self.transport.send_command(\"V\", self.version_check_reply)\n}\n\nfunc (self *Rtp_proxy_client_base) version_check_reply(version string) {\n if self.shut_down {\n return\n }\n if version == \"20040107\" {\n self.me().GoOnline()\n } else if self.online {\n self.me().GoOffline()\n } else {\n StartTimeoutWithSpread(self.version_check, nil, self.opts.hrtb_retr_ival, 1, self.opts.logger, 0.1)\n }\n}\n\nfunc (self *Rtp_proxy_client_base) heartbeat() {\n \/\/print \"heartbeat\", self, self.address\n if self.shut_down {\n return\n }\n self.transport.send_command(\"Ib\", self.heartbeat_reply)\n}\n\nfunc (self *Rtp_proxy_client_base) heartbeat_reply(stats string) {\n \/\/print \"heartbeat_reply\", self.address, stats, self.online\n if self.shut_down || ! self.online {\n return\n }\n if stats == \"\" {\n self.active_sessions = 0\n self.me().GoOffline()\n } else {\n sessions_created := int64(0)\n active_sessions := int64(0)\n active_streams := int64(0)\n preceived := int64(0)\n ptransmitted := int64(0)\n scanner := bufio.NewScanner(strings.NewReader(stats))\n for scanner.Scan() {\n line_parts := strings.SplitN(scanner.Text(), \":\", 2)\n if len(line_parts) != 2 { continue }\n switch line_parts[0] {\n case \"sessions created\":\n sessions_created, _ = strconv.ParseInt(line_parts[1], 10, 64)\n case \"active sessions\":\n active_sessions, _ = strconv.ParseInt(line_parts[1], 10, 64)\n case \"active streams\":\n active_streams, _ = strconv.ParseInt(line_parts[1], 10, 64)\n case \"packets received\":\n preceived, _ = strconv.ParseInt(line_parts[1], 10, 64)\n case \"packets transmitted\":\n ptransmitted, _ = strconv.ParseInt(line_parts[1], 10, 64)\n }\n }\n self.UpdateActive(active_sessions, sessions_created, active_streams, preceived, ptransmitted)\n }\n StartTimeoutWithSpread(self.heartbeat, nil, self.opts.hrtb_ival, 1, self.opts.logger, 0.1)\n}\n\nfunc (self *Rtp_proxy_client_base) GoOnline() {\n if self.shut_down {\n return\n }\n if ! self.online {\n if ! self.caps_done {\n newRtppCapsChecker(self)\n return\n }\n self.online = true\n self.heartbeat()\n }\n}\n\nfunc (self *Rtp_proxy_client_base) GoOffline() {\n if self.shut_down {\n return\n }\n \/\/print \"go_offline\", self.address, self.online\n if self.online {\n self.online = false\n StartTimeoutWithSpread(self.version_check, nil, self.opts.hrtb_retr_ival, 1, self.opts.logger, 0.1)\n }\n}\n\nfunc (self *Rtp_proxy_client_base) UpdateActive(active_sessions, sessions_created, active_streams, preceived, ptransmitted int64) {\n self.sessions_created = sessions_created\n self.active_sessions = active_sessions\n self.active_streams = active_streams\n self.preceived = preceived\n self.ptransmitted = ptransmitted\n}\n\nfunc (self *Rtp_proxy_client_base) GetActiveSessions() int64 {\n return self.active_sessions\n}\n\nfunc (self *Rtp_proxy_client_base) GetActiveStreams() int64 {\n return self.active_streams\n}\n\nfunc (self *Rtp_proxy_client_base) GetPReceived() int64 {\n return self.preceived\n}\n\nfunc (self *Rtp_proxy_client_base) GetSessionsCreated() int64 {\n return self.sessions_created\n}\n\nfunc (self *Rtp_proxy_client_base) GetPTransmitted() int64 {\n return self.ptransmitted\n}\n\nfunc (self *Rtp_proxy_client_base) Shutdown() {\n if self.shut_down { \/\/ do not crash when shutdown() called twice\n return\n }\n self.shut_down = true\n self.transport.shutdown()\n self.transport = nil\n}\n\nfunc (self *Rtp_proxy_client_base) IsShutDown() bool {\n return self.shut_down\n}\n\nfunc (self *Rtp_proxy_client_base) GetOpts() sippy_types.RtpProxyClientOpts {\n return self.opts\n}\n\nfunc (self *Rtp_proxy_client_base) GetRtpcDelay() float64 {\n return self.transport.get_rtpc_delay()\n}\n\ntype rtppCapsChecker struct {\n caps_requested int\n caps_received int\n rtpc *Rtp_proxy_client_base\n}\n\nfunc newRtppCapsChecker(rtpc *Rtp_proxy_client_base) *rtppCapsChecker {\n self := &rtppCapsChecker{\n rtpc : rtpc,\n }\n rtpc.caps_done = false\n CAPSTABLE := []struct{ vers string; attr *bool }{\n { \"20071218\", &self.rtpc.copy_supported },\n { \"20080403\", &self.rtpc.stat_supported },\n { \"20081224\", &self.rtpc.tnot_supported },\n { \"20090810\", &self.rtpc.sbind_supported },\n { \"20150617\", &self.rtpc.wdnt_supported },\n }\n self.caps_requested = len(CAPSTABLE)\n for _, it := range CAPSTABLE {\n attr := it.attr \/\/ For some reason the it.attr cannot be passed into the following\n \/\/ function directly - the resulting value is always that of the\n \/\/ last 'it.attr' value.\n rtpc.transport.send_command(\"VF \" + it.vers, func(res string) { self.caps_query_done(res, attr) })\n }\n return self\n}\n\nfunc (self *rtppCapsChecker) caps_query_done(result string, attr *bool) {\n self.caps_received += 1\n if result == \"1\" {\n *attr = true\n } else {\n *attr = false\n }\n if self.caps_received == self.caps_requested {\n self.rtpc.caps_done = true\n self.rtpc.me().GoOnline()\n self.rtpc = nil\n }\n}\n<commit_msg>Trim spaces.<commit_after>\/\/ Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.\n\/\/ Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.\n\/\/ Copyright (c) 2016 Andriy Pylypenko. All rights reserved.\n\/\/\n\/\/ All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without modification,\n\/\/ are permitted provided that the following conditions are met:\n\/\/\n\/\/ 1. Redistributions of source code must retain the above copyright notice, this\n\/\/ list of conditions and the following disclaimer.\n\/\/\n\/\/ 2. Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation and\/or\n\/\/ other materials provided with the distribution.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n\/\/ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n\/\/ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n\/\/ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\n\/\/ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n\/\/ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n\/\/ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n\/\/ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\/\/ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n\/\/ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\npackage sippy\n\nimport (\n \"bufio\"\n \"net\"\n \"strconv\"\n \"strings\"\n\n \"sippy\/net\"\n \"sippy\/types\"\n)\n\nfunc NewRtpProxyClient(opts *rtpProxyClientOpts) sippy_types.RtpProxyClient {\n return NewRtp_proxy_client_base(nil, opts)\n}\n\ntype Rtp_proxy_client_base struct {\n heir sippy_types.RtpProxyClient\n opts *rtpProxyClientOpts\n transport rtp_proxy_transport\n online bool\n sbind_supported bool\n tnot_supported bool\n copy_supported bool\n stat_supported bool\n wdnt_supported bool\n caps_done bool\n shut_down bool\n active_sessions int64\n sessions_created int64\n active_streams int64\n preceived int64\n ptransmitted int64\n}\n\ntype rtp_proxy_transport interface {\n address() net.Addr\n get_rtpc_delay() float64\n is_local() bool\n send_command(string, func(string))\n shutdown()\n reconnect(net.Addr, *sippy_net.HostPort)\n}\n\nfunc (self *Rtp_proxy_client_base) IsLocal() bool {\n return self.transport.is_local()\n}\n\nfunc (self *Rtp_proxy_client_base) IsOnline() bool {\n return self.online\n}\n\nfunc (self *Rtp_proxy_client_base) WdntSupported() bool {\n return self.wdnt_supported\n}\n\nfunc (self *Rtp_proxy_client_base) SBindSupported() bool {\n return self.sbind_supported\n}\n\nfunc (self *Rtp_proxy_client_base) TNotSupported() bool {\n return self.tnot_supported\n}\n\nfunc (self *Rtp_proxy_client_base) GetProxyAddress() string {\n return self.opts.proxy_address\n}\n\nfunc (self *Rtp_proxy_client_base) me() sippy_types.RtpProxyClient {\n if self.heir != nil {\n return self.heir\n }\n return self\n}\n\nfunc (self *Rtp_proxy_client_base) Address() net.Addr {\n return self.transport.address()\n}\n\nfunc NewRtp_proxy_client_base(heir sippy_types.RtpProxyClient, opts *rtpProxyClientOpts) *Rtp_proxy_client_base {\n return &Rtp_proxy_client_base{\n heir : heir,\n caps_done : false,\n shut_down : false,\n opts : opts,\n }\n}\n\nfunc (self *Rtp_proxy_client_base) Start() error {\n var err error\n\n self.transport, err = self.opts.rtpp_class(self.me(), self.opts.config, self.opts.rtppaddr, self.opts.bind_address)\n if err != nil {\n return err\n }\n if ! self.opts.no_version_check {\n self.version_check()\n } else {\n self.caps_done = true\n self.online = true\n }\n return nil\n}\n\nfunc (self *Rtp_proxy_client_base) SendCommand(cmd string, cb func(string)) {\n self.transport.send_command(cmd, cb)\n}\n\nfunc (self *Rtp_proxy_client_base) Reconnect(addr net.Addr, bind_addr *sippy_net.HostPort) {\n self.transport.reconnect(addr, bind_addr)\n}\n\nfunc (self *Rtp_proxy_client_base) version_check() {\n if self.shut_down {\n return\n }\n self.transport.send_command(\"V\", self.version_check_reply)\n}\n\nfunc (self *Rtp_proxy_client_base) version_check_reply(version string) {\n if self.shut_down {\n return\n }\n if version == \"20040107\" {\n self.me().GoOnline()\n } else if self.online {\n self.me().GoOffline()\n } else {\n StartTimeoutWithSpread(self.version_check, nil, self.opts.hrtb_retr_ival, 1, self.opts.logger, 0.1)\n }\n}\n\nfunc (self *Rtp_proxy_client_base) heartbeat() {\n \/\/print \"heartbeat\", self, self.address\n if self.shut_down {\n return\n }\n self.transport.send_command(\"Ib\", self.heartbeat_reply)\n}\n\nfunc (self *Rtp_proxy_client_base) heartbeat_reply(stats string) {\n \/\/print \"heartbeat_reply\", self.address, stats, self.online\n if self.shut_down || ! self.online {\n return\n }\n if stats == \"\" {\n self.active_sessions = 0\n self.me().GoOffline()\n } else {\n sessions_created := int64(0)\n active_sessions := int64(0)\n active_streams := int64(0)\n preceived := int64(0)\n ptransmitted := int64(0)\n scanner := bufio.NewScanner(strings.NewReader(stats))\n for scanner.Scan() {\n line_parts := strings.SplitN(scanner.Text(), \":\", 2)\n if len(line_parts) != 2 { continue }\n switch line_parts[0] {\n case \"sessions created\":\n sessions_created, _ = strconv.ParseInt(strings.TrimSpace(line_parts[1]), 10, 64)\n case \"active sessions\":\n active_sessions, _ = strconv.ParseInt(strings.TrimSpace(line_parts[1]), 10, 64)\n case \"active streams\":\n active_streams, _ = strconv.ParseInt(strings.TrimSpace(line_parts[1]), 10, 64)\n case \"packets received\":\n preceived, _ = strconv.ParseInt(strings.TrimSpace(line_parts[1]), 10, 64)\n case \"packets transmitted\":\n ptransmitted, _ = strconv.ParseInt(strings.TrimSpace(line_parts[1]), 10, 64)\n }\n }\n self.UpdateActive(active_sessions, sessions_created, active_streams, preceived, ptransmitted)\n }\n StartTimeoutWithSpread(self.heartbeat, nil, self.opts.hrtb_ival, 1, self.opts.logger, 0.1)\n}\n\nfunc (self *Rtp_proxy_client_base) GoOnline() {\n if self.shut_down {\n return\n }\n if ! self.online {\n if ! self.caps_done {\n newRtppCapsChecker(self)\n return\n }\n self.online = true\n self.heartbeat()\n }\n}\n\nfunc (self *Rtp_proxy_client_base) GoOffline() {\n if self.shut_down {\n return\n }\n \/\/print \"go_offline\", self.address, self.online\n if self.online {\n self.online = false\n StartTimeoutWithSpread(self.version_check, nil, self.opts.hrtb_retr_ival, 1, self.opts.logger, 0.1)\n }\n}\n\nfunc (self *Rtp_proxy_client_base) UpdateActive(active_sessions, sessions_created, active_streams, preceived, ptransmitted int64) {\n self.sessions_created = sessions_created\n self.active_sessions = active_sessions\n self.active_streams = active_streams\n self.preceived = preceived\n self.ptransmitted = ptransmitted\n}\n\nfunc (self *Rtp_proxy_client_base) GetActiveSessions() int64 {\n return self.active_sessions\n}\n\nfunc (self *Rtp_proxy_client_base) GetActiveStreams() int64 {\n return self.active_streams\n}\n\nfunc (self *Rtp_proxy_client_base) GetPReceived() int64 {\n return self.preceived\n}\n\nfunc (self *Rtp_proxy_client_base) GetSessionsCreated() int64 {\n return self.sessions_created\n}\n\nfunc (self *Rtp_proxy_client_base) GetPTransmitted() int64 {\n return self.ptransmitted\n}\n\nfunc (self *Rtp_proxy_client_base) Shutdown() {\n if self.shut_down { \/\/ do not crash when shutdown() called twice\n return\n }\n self.shut_down = true\n self.transport.shutdown()\n self.transport = nil\n}\n\nfunc (self *Rtp_proxy_client_base) IsShutDown() bool {\n return self.shut_down\n}\n\nfunc (self *Rtp_proxy_client_base) GetOpts() sippy_types.RtpProxyClientOpts {\n return self.opts\n}\n\nfunc (self *Rtp_proxy_client_base) GetRtpcDelay() float64 {\n return self.transport.get_rtpc_delay()\n}\n\ntype rtppCapsChecker struct {\n caps_requested int\n caps_received int\n rtpc *Rtp_proxy_client_base\n}\n\nfunc newRtppCapsChecker(rtpc *Rtp_proxy_client_base) *rtppCapsChecker {\n self := &rtppCapsChecker{\n rtpc : rtpc,\n }\n rtpc.caps_done = false\n CAPSTABLE := []struct{ vers string; attr *bool }{\n { \"20071218\", &self.rtpc.copy_supported },\n { \"20080403\", &self.rtpc.stat_supported },\n { \"20081224\", &self.rtpc.tnot_supported },\n { \"20090810\", &self.rtpc.sbind_supported },\n { \"20150617\", &self.rtpc.wdnt_supported },\n }\n self.caps_requested = len(CAPSTABLE)\n for _, it := range CAPSTABLE {\n attr := it.attr \/\/ For some reason the it.attr cannot be passed into the following\n \/\/ function directly - the resulting value is always that of the\n \/\/ last 'it.attr' value.\n rtpc.transport.send_command(\"VF \" + it.vers, func(res string) { self.caps_query_done(res, attr) })\n }\n return self\n}\n\nfunc (self *rtppCapsChecker) caps_query_done(result string, attr *bool) {\n self.caps_received += 1\n if result == \"1\" {\n *attr = true\n } else {\n *attr = false\n }\n if self.caps_received == self.caps_requested {\n self.rtpc.caps_done = true\n self.rtpc.me().GoOnline()\n self.rtpc = nil\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/go-errors\/errors\"\n\tirma \"github.com\/privacybydesign\/irmago\"\n\t\"github.com\/privacybydesign\/irmago\/server\"\n\t\"github.com\/privacybydesign\/irmago\/server\/irmaserver\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/cobra\"\n\tprefixed \"github.com\/x-cray\/logrus-prefixed-formatter\"\n)\n\nvar (\n\thttpServer *http.Server\n\tirmaServer *irmaserver.Server\n\tdefaulturl string\n\n\tlogger = logrus.New()\n)\n\n\/\/ sessionCmd represents the session command\nvar sessionCmd = &cobra.Command{\n\tUse: \"session\",\n\tShort: \"Perform an IRMA disclosure, issuance or signature session\",\n\tLong: `Perform an IRMA disclosure, issuance or signature session on the command line\n\nUsing either the builtin IRMA server library, or an external IRMA server (specify its URL\nwith --server), an IRMA session is started; the QR is printed in the terminal; and the session\nresult is printed when the session completes or fails.\n\nA session request can either be constructed using the --disclose, --issue, and --sign together\nwith --message flags, or it can be specified as JSON to the --request flag.`,\n\tExample: `irma session --disclose irma-demo.MijnOverheid.root.BSN\nirma session --sign irma-demo.MijnOverheid.root.BSN --message message\nirma session --issue irma-demo.MijnOverheid.ageLower=yes,yes,yes,no --disclose irma-demo.MijnOverheid.root.BSN\nirma session --request '{\"type\":\"disclosing\",\"content\":[{\"label\":\"BSN\",\"attributes\":[\"irma-demo.MijnOverheid.root.BSN\"]}]}'\nirma session --server http:\/\/localhost:8088 --authmethod token --key mytoken --disclose irma-demo.MijnOverheid.root.BSN\nirma session --server http:\/\/localhost:8088 --static mystaticsession\nirma session --from-package '{\"sessionPtr\": ... , \"frontendRequest\": ...}'`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tvar (\n\t\t\trequest irma.RequestorRequest\n\t\t\tirmaconfig *irma.Configuration\n\t\t\tpkg *server.SessionPackage\n\t\t\tresult *server.SessionResult\n\t\t\terr error\n\n\t\t\tflags = cmd.Flags()\n\t\t\turl, _ = flags.GetString(\"url\")\n\t\t\tserverURL, _ = flags.GetString(\"server\")\n\t\t\tnoqr, _ = flags.GetBool(\"noqr\")\n\t\t\tpairing, _ = flags.GetBool(\"pairing\")\n\t\t\tauthMethod, _ = flags.GetString(\"authmethod\")\n\t\t\tkey, _ = flags.GetString(\"key\")\n\t\t\tjsonPkg, _ = flags.GetString(\"from-package\")\n\t\t\tstatic, _ = flags.GetString(\"static\")\n\t\t)\n\t\tif url != defaulturl && serverURL != \"\" {\n\t\t\tdie(\"Failed to read configuration\", errors.New(\"--url can't be combined with --server\"))\n\t\t}\n\n\t\tif static != \"\" {\n\t\t\tif err = staticRequest(serverURL, static, noqr); err != nil {\n\t\t\t\tdie(\"Failed to handle static session\", err)\n\t\t\t}\n\t\t\t\/\/ Static sessions are fully handled on the phone.\n\t\t\treturn\n\t\t}\n\n\t\tif jsonPkg == \"\" {\n\t\t\trequest, irmaconfig, err = configureSession(cmd)\n\t\t\tif err != nil {\n\t\t\t\tdie(\"\", err)\n\t\t\t}\n\t\t\tif serverURL != \"\" {\n\t\t\t\tname, _ := flags.GetString(\"name\")\n\t\t\t\tpkg, err = postRequest(serverURL, \"session\", request, name, authMethod, key)\n\t\t\t\tif err != nil {\n\t\t\t\t\tdie(\"Session could not be started\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tpkg = &server.SessionPackage{}\n\t\t\terr = json.Unmarshal([]byte(jsonPkg), pkg)\n\t\t\tif err != nil {\n\t\t\t\tdie(\"Failed to parse session package\", err)\n\t\t\t}\n\t\t}\n\n\t\tif pkg == nil {\n\t\t\tport, _ := flags.GetInt(\"port\")\n\t\t\tprivatekeysPath, _ := flags.GetString(\"privkeys\")\n\t\t\tverbosity, _ := cmd.Flags().GetCount(\"verbose\")\n\t\t\tresult, err = libraryRequest(request, irmaconfig, url, port, privatekeysPath, noqr, verbosity, pairing)\n\t\t} else {\n\t\t\tresult, err = serverRequest(pkg, noqr, pairing)\n\t\t}\n\t\tif err != nil {\n\t\t\tdie(\"Session failed\", err)\n\t\t}\n\n\t\tprintSessionResult(result)\n\n\t\t\/\/ Done!\n\t\tif httpServer != nil {\n\t\t\t_ = httpServer.Close()\n\t\t}\n\t},\n}\n\nfunc libraryRequest(\n\trequest irma.RequestorRequest,\n\tirmaconfig *irma.Configuration,\n\turl string,\n\tport int,\n\tprivatekeysPath string,\n\tnoqr bool,\n\tverbosity int,\n\tpairing bool,\n) (*server.SessionResult, error) {\n\tif err := configureSessionServer(url, port, privatekeysPath, irmaconfig, verbosity); err != nil {\n\t\treturn nil, err\n\t}\n\tstartServer(port)\n\n\t\/\/ Start the session\n\tresultchan := make(chan *server.SessionResult)\n\tqr, requestorToken, _, err := irmaServer.StartSession(request, func(r *server.SessionResult) {\n\t\tresultchan <- r\n\t})\n\tif err != nil {\n\t\treturn nil, errors.WrapPrefix(err, \"IRMA session failed\", 0)\n\t}\n\n\t\/\/ Enable pairing if necessary\n\tvar sessionOptions *irma.SessionOptions\n\tif pairing {\n\t\toptionsRequest := irma.NewFrontendOptionsRequest()\n\t\toptionsRequest.PairingMethod = irma.PairingMethodPin\n\t\tif sessionOptions, err = irmaServer.SetFrontendOptions(requestorToken, &optionsRequest); err != nil {\n\t\t\treturn nil, errors.WrapPrefix(err, \"Failed to enable pairing\", 0)\n\t\t}\n\t}\n\n\t\/\/ Print QR code\n\tif err := printQr(qr, noqr); err != nil {\n\t\treturn nil, errors.WrapPrefix(err, \"Failed to print QR\", 0)\n\t}\n\n\tif pairing {\n\t\t\/\/ Listen for session status\n\t\tstatuschan, err := irmaServer.SessionStatus(requestorToken)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WrapPrefix(err, \"Failed to start listening for session statuses\", 0)\n\t\t}\n\n\t\terr = handlePairing(sessionOptions, statuschan, func() error {\n\t\t\treturn irmaServer.PairingCompleted(requestorToken)\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, errors.WrapPrefix(err, \"Failed to handle pairing\", 0)\n\t\t}\n\t}\n\n\t\/\/ Wait for session to finish and then return session result\n\treturn <-resultchan, nil\n}\n\nfunc serverRequest(\n\tpkg *server.SessionPackage,\n\tnoqr bool,\n\tpairing bool,\n) (*server.SessionResult, error) {\n\t\/\/ Enable pairing if necessary\n\tvar (\n\t\tqr = pkg.SessionPtr\n\t\tfrontendRequest = pkg.FrontendRequest\n\t\ttransport = irma.NewHTTPTransport(qr.URL, false)\n\t\tsessionOptions = &irma.SessionOptions{}\n\t\terr error\n\t)\n\tif pairing {\n\t\ttransport.SetHeader(irma.AuthorizationHeader, string(frontendRequest.Authorization))\n\t\toptionsRequest := irma.NewFrontendOptionsRequest()\n\t\toptionsRequest.PairingMethod = irma.PairingMethodPin\n\t\terr = transport.Post(\"frontend\/options\", sessionOptions, optionsRequest)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WrapPrefix(err, \"Failed to enable pairing\", 0)\n\t\t}\n\t}\n\n\t\/\/ Print session QR\n\tlogger.Debug(\"QR: \", prettyprint(qr))\n\tif err := printQr(qr, noqr); err != nil {\n\t\treturn nil, errors.WrapPrefix(err, \"Failed to print QR\", 0)\n\t}\n\n\tstatuschan := make(chan irma.ServerStatus)\n\terrorchan := make(chan error)\n\tvar wg sync.WaitGroup\n\n\tgo irma.WaitStatus(transport, irma.ServerStatusInitialized, statuschan, errorchan)\n\tgo func() {\n\t\terr := <-errorchan\n\t\tif err != nil {\n\t\t\t_ = server.LogFatal(err)\n\t\t}\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tif pairing {\n\t\t\terr = handlePairing(sessionOptions, statuschan, func() error {\n\t\t\t\terr = transport.Post(\"frontend\/pairingcompleted\", nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.WrapPrefix(err, \"Failed to complete pairing\", 0)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\terr = errors.WrapPrefix(err, \"Failed to handle pairing\", 0)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Wait until client connects if pairing is disabled\n\t\t\tstatus := <-statuschan\n\t\t\tif status != irma.ServerStatusConnected {\n\t\t\t\terr = errors.Errorf(\"Unexpected status: %s\", status)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Wait until client finishes\n\t\tstatus := <-statuschan\n\t\tif status != irma.ServerStatusCancelled && status != irma.ServerStatusDone {\n\t\t\terr = errors.Errorf(\"Unexpected status: %s\", status)\n\t\t\treturn\n\t\t}\n\t}()\n\n\twg.Wait()\n\n\tif err == nil && pkg.Token != \"\" {\n\t\tresult := &server.SessionResult{}\n\t\turlParts := strings.Split(pkg.SessionPtr.URL, \"\/irma\/session\/\")\n\t\tif len(urlParts) <= 1 {\n\t\t\t\/\/ Custom URL structure is used, so we cannot determine result endpoint location.\n\t\t\treturn nil, nil\n\t\t}\n\t\tpath := fmt.Sprintf(\"session\/%s\/result\", pkg.Token)\n\t\tif err = irma.NewHTTPTransport(urlParts[0], false).Get(path, result); err == nil {\n\t\t\treturn result, err\n\t\t}\n\t}\n\treturn nil, err\n}\n\nfunc staticRequest(serverURL, name string, noqr bool) error {\n\tif serverURL == \"\" {\n\t\treturn errors.New(\"--static must be combined with --server\")\n\t}\n\tqr := &irma.Qr{\n\t\tType: irma.ActionRedirect,\n\t\tURL: fmt.Sprintf(\"%s\/irma\/session\/%s\", serverURL, name),\n\t}\n\treturn printQr(qr, noqr)\n}\n\nfunc postRequest(serverURL, path string, request irma.RequestorRequest, name, authMethod, key string) (\n\t*server.SessionPackage, error) {\n\tvar (\n\t\terr error\n\t\tpkg = &server.SessionPackage{}\n\t\ttransport = irma.NewHTTPTransport(serverURL, false)\n\t)\n\n\tswitch authMethod {\n\tcase \"token\":\n\t\ttransport.SetHeader(\"Authorization\", key)\n\t\tfallthrough\n\tcase \"none\":\n\t\terr = transport.Post(path, pkg, request)\n\tcase \"hmac\", \"rsa\":\n\t\tvar jwtstr string\n\t\tjwtstr, err = signRequest(request, name, authMethod, key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlogger.Debug(\"Session request JWT: \", jwtstr)\n\t\terr = transport.Post(path, pkg, jwtstr)\n\tdefault:\n\t\treturn nil, errors.New(\"Invalid authentication method (must be none, token, hmac or rsa)\")\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pkg, err\n}\n\nfunc handlePairing(options *irma.SessionOptions, statusChan chan irma.ServerStatus, completePairing func() error) error {\n\terrorChan := make(chan error)\n\tpairingStarted := false\n\tfor {\n\t\tselect {\n\t\tcase status := <-statusChan:\n\t\t\tif status == irma.ServerStatusInitialized {\n\t\t\t\tcontinue\n\t\t\t} else if status == irma.ServerStatusPairing {\n\t\t\t\tpairingStarted = true\n\t\t\t\tgo requestPairingPermission(options, completePairing, errorChan)\n\t\t\t\tcontinue\n\t\t\t} else if status == irma.ServerStatusConnected && !pairingStarted {\n\t\t\t\tfmt.Println(\"Pairing is not supported by the connected device.\")\n\t\t\t}\n\t\t\treturn nil\n\t\tcase err := <-errorChan:\n\t\t\treturn err\n\t\t}\n\t}\n}\n\nfunc requestPairingPermission(options *irma.SessionOptions, completePairing func() error, errorChan chan error) {\n\tif options.PairingMethod == irma.PairingMethodPin {\n\t\tfmt.Println(\"\\nPairing code:\", options.PairingCode)\n\t\tfmt.Println(\"Press Enter to confirm your device shows the same pairing code; otherwise press Ctrl-C.\")\n\t\t_, err := bufio.NewReader(os.Stdin).ReadString('\\n')\n\t\tif err != nil {\n\t\t\terrorChan <- err\n\t\t\treturn\n\t\t}\n\t\tif err = completePairing(); err != nil {\n\t\t\terrorChan <- err\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(\"Pairing completed.\")\n\t\terrorChan <- nil\n\t\treturn\n\t}\n\terrorChan <- errors.Errorf(\"Pairing method %s is not supported\", options.PairingMethod)\n}\n\n\/\/ Configuration functions\n\nfunc configureSessionServer(url string, port int, privatekeysPath string, irmaconfig *irma.Configuration, verbosity int) error {\n\t\/\/ Replace \"port\" in url with actual port\n\treplace := \"$1:\" + strconv.Itoa(port)\n\turl = string(regexp.MustCompile(\"(https?:\/\/[^\/]*):port\").ReplaceAll([]byte(url), []byte(replace)))\n\n\tconfig := &server.Configuration{\n\t\tIrmaConfiguration: irmaconfig,\n\t\tLogger: logger,\n\t\tURL: url,\n\t\tDisableSchemesUpdate: true,\n\t\tVerbose: verbosity,\n\t}\n\tif privatekeysPath != \"\" {\n\t\tconfig.IssuerPrivateKeysPath = privatekeysPath\n\t}\n\n\tvar err error\n\tirmaServer, err = irmaserver.New(config)\n\treturn err\n}\n\nfunc configureSession(cmd *cobra.Command) (irma.RequestorRequest, *irma.Configuration, error) {\n\tverbosity, _ := cmd.Flags().GetCount(\"verbose\")\n\tlogger.Level = server.Verbosity(verbosity)\n\tirma.SetLogger(logger)\n\n\tif localIPErr != nil {\n\t\tlogger.Warn(\"Could not determine local IP address: \", localIPErr.Error())\n\t}\n\n\trequest, irmaconfig, err := configureRequest(cmd)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ Make sure we always run with latest configuration\n\tdisableUpdate, _ := cmd.Flags().GetBool(\"disable-schemes-update\")\n\tif !disableUpdate {\n\t\tif err = irmaconfig.UpdateSchemes(); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\treturn request, irmaconfig, nil\n}\n\nfunc init() {\n\tRootCmd.AddCommand(sessionCmd)\n\n\tlogger.Formatter = &prefixed.TextFormatter{FullTimestamp: true}\n\n\tif localIP != \"\" {\n\t\tdefaulturl = \"http:\/\/\" + localIP + \":port\"\n\t}\n\n\tflags := sessionCmd.Flags()\n\tflags.SortFlags = false\n\tflags.String(\"server\", \"\", \"External IRMA server to post request to (leave blank to use builtin library)\")\n\tflags.StringP(\"url\", \"u\", defaulturl, \"external URL to which IRMA app connects (when not using --server), \\\":port\\\" being replaced by --port value\")\n\tflags.IntP(\"port\", \"p\", 48680, \"port to listen at (when not using --server)\")\n\tflags.Bool(\"noqr\", false, \"Print JSON instead of draw QR\")\n\tflags.Bool(\"pairing\", false, \"Let IRMA app first pair, by entering the pairing code, before it can access the session\")\n\tflags.StringP(\"request\", \"r\", \"\", \"JSON session request\")\n\tflags.StringP(\"privkeys\", \"k\", \"\", \"path to private keys\")\n\tflags.Bool(\"disable-schemes-update\", false, \"disable scheme updates\")\n\n\taddRequestFlags(flags)\n\n\tflags.String(\"static\", \"\", \"Start a static IRMA session with the given name\")\n\tflags.String(\"from-package\", \"\", \"Start the IRMA session from the given session package\")\n\n\tflags.CountP(\"verbose\", \"v\", \"verbose (repeatable)\")\n}\n<commit_msg>Use --url option for static sessions instead --serverurl to properly handle localhost sessions<commit_after>package cmd\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/go-errors\/errors\"\n\tirma \"github.com\/privacybydesign\/irmago\"\n\t\"github.com\/privacybydesign\/irmago\/server\"\n\t\"github.com\/privacybydesign\/irmago\/server\/irmaserver\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/cobra\"\n\tprefixed \"github.com\/x-cray\/logrus-prefixed-formatter\"\n)\n\nvar (\n\thttpServer *http.Server\n\tirmaServer *irmaserver.Server\n\tdefaulturl string\n\n\tlogger = logrus.New()\n)\n\n\/\/ sessionCmd represents the session command\nvar sessionCmd = &cobra.Command{\n\tUse: \"session\",\n\tShort: \"Perform an IRMA disclosure, issuance or signature session\",\n\tLong: `Perform an IRMA disclosure, issuance or signature session on the command line\n\nUsing either the builtin IRMA server library, or an external IRMA server (specify its URL\nwith --server), an IRMA session is started; the QR is printed in the terminal; and the session\nresult is printed when the session completes or fails.\n\nA session request can either be constructed using the --disclose, --issue, and --sign together\nwith --message flags, or it can be specified as JSON to the --request flag.`,\n\tExample: `irma session --disclose irma-demo.MijnOverheid.root.BSN\nirma session --sign irma-demo.MijnOverheid.root.BSN --message message\nirma session --issue irma-demo.MijnOverheid.ageLower=yes,yes,yes,no --disclose irma-demo.MijnOverheid.root.BSN\nirma session --request '{\"type\":\"disclosing\",\"content\":[{\"label\":\"BSN\",\"attributes\":[\"irma-demo.MijnOverheid.root.BSN\"]}]}'\nirma session --server http:\/\/localhost:8088 --authmethod token --key mytoken --disclose irma-demo.MijnOverheid.root.BSN\nirma session --server http:\/\/localhost:8088 --static mystaticsession\nirma session --from-package '{\"sessionPtr\": ... , \"frontendRequest\": ...}'`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tvar (\n\t\t\trequest irma.RequestorRequest\n\t\t\tirmaconfig *irma.Configuration\n\t\t\tpkg *server.SessionPackage\n\t\t\tresult *server.SessionResult\n\t\t\terr error\n\n\t\t\tflags = cmd.Flags()\n\t\t\turl, _ = flags.GetString(\"url\")\n\t\t\tport, _ = flags.GetInt(\"port\")\n\t\t\tserverURL, _ = flags.GetString(\"server\")\n\t\t\tnoqr, _ = flags.GetBool(\"noqr\")\n\t\t\tpairing, _ = flags.GetBool(\"pairing\")\n\t\t\tjsonPkg, _ = flags.GetString(\"from-package\")\n\t\t\tstatic, _ = flags.GetString(\"static\")\n\t\t)\n\t\tif url != defaulturl && serverURL != \"\" {\n\t\t\tdie(\"Failed to read configuration\", errors.New(\"--url can't be combined with --server\"))\n\t\t}\n\n\t\tif static != \"\" {\n\t\t\tif pairing {\n\t\t\t\tdie(\"Failed to read configuration\", errors.New(\"--static can't be combined with --pairing\"))\n\t\t\t}\n\t\t\tif err = staticRequest(url, port, static, noqr); err != nil {\n\t\t\t\tdie(\"Failed to handle static session\", err)\n\t\t\t}\n\t\t\t\/\/ Static sessions are fully handled on the phone.\n\t\t\treturn\n\t\t}\n\n\t\tif jsonPkg == \"\" {\n\t\t\trequest, irmaconfig, err = configureSession(cmd)\n\t\t\tif err != nil {\n\t\t\t\tdie(\"\", err)\n\t\t\t}\n\t\t\tif serverURL != \"\" {\n\t\t\t\tauthMethod, _ := flags.GetString(\"authmethod\")\n\t\t\t\tkey, _ := flags.GetString(\"key\")\n\t\t\t\tname, _ := flags.GetString(\"name\")\n\t\t\t\tpkg, err = postRequest(serverURL, \"session\", request, name, authMethod, key)\n\t\t\t\tif err != nil {\n\t\t\t\t\tdie(\"Session could not be started\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tpkg = &server.SessionPackage{}\n\t\t\terr = json.Unmarshal([]byte(jsonPkg), pkg)\n\t\t\tif err != nil {\n\t\t\t\tdie(\"Failed to parse session package\", err)\n\t\t\t}\n\t\t}\n\n\t\tif pkg == nil {\n\t\t\tprivatekeysPath, _ := flags.GetString(\"privkeys\")\n\t\t\tverbosity, _ := cmd.Flags().GetCount(\"verbose\")\n\t\t\tresult, err = libraryRequest(request, irmaconfig, url, port, privatekeysPath, noqr, verbosity, pairing)\n\t\t} else {\n\t\t\tresult, err = serverRequest(pkg, noqr, pairing)\n\t\t}\n\t\tif err != nil {\n\t\t\tdie(\"Session failed\", err)\n\t\t}\n\n\t\tprintSessionResult(result)\n\n\t\t\/\/ Done!\n\t\tif httpServer != nil {\n\t\t\t_ = httpServer.Close()\n\t\t}\n\t},\n}\n\nfunc libraryRequest(\n\trequest irma.RequestorRequest,\n\tirmaconfig *irma.Configuration,\n\turl string,\n\tport int,\n\tprivatekeysPath string,\n\tnoqr bool,\n\tverbosity int,\n\tpairing bool,\n) (*server.SessionResult, error) {\n\tif err := configureSessionServer(url, port, privatekeysPath, irmaconfig, verbosity); err != nil {\n\t\treturn nil, err\n\t}\n\tstartServer(port)\n\n\t\/\/ Start the session\n\tresultchan := make(chan *server.SessionResult)\n\tqr, requestorToken, _, err := irmaServer.StartSession(request, func(r *server.SessionResult) {\n\t\tresultchan <- r\n\t})\n\tif err != nil {\n\t\treturn nil, errors.WrapPrefix(err, \"IRMA session failed\", 0)\n\t}\n\n\t\/\/ Enable pairing if necessary\n\tvar sessionOptions *irma.SessionOptions\n\tif pairing {\n\t\toptionsRequest := irma.NewFrontendOptionsRequest()\n\t\toptionsRequest.PairingMethod = irma.PairingMethodPin\n\t\tif sessionOptions, err = irmaServer.SetFrontendOptions(requestorToken, &optionsRequest); err != nil {\n\t\t\treturn nil, errors.WrapPrefix(err, \"Failed to enable pairing\", 0)\n\t\t}\n\t}\n\n\t\/\/ Print QR code\n\tif err := printQr(qr, noqr); err != nil {\n\t\treturn nil, errors.WrapPrefix(err, \"Failed to print QR\", 0)\n\t}\n\n\tif pairing {\n\t\t\/\/ Listen for session status\n\t\tstatuschan, err := irmaServer.SessionStatus(requestorToken)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WrapPrefix(err, \"Failed to start listening for session statuses\", 0)\n\t\t}\n\n\t\terr = handlePairing(sessionOptions, statuschan, func() error {\n\t\t\treturn irmaServer.PairingCompleted(requestorToken)\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, errors.WrapPrefix(err, \"Failed to handle pairing\", 0)\n\t\t}\n\t}\n\n\t\/\/ Wait for session to finish and then return session result\n\treturn <-resultchan, nil\n}\n\nfunc serverRequest(\n\tpkg *server.SessionPackage,\n\tnoqr bool,\n\tpairing bool,\n) (*server.SessionResult, error) {\n\t\/\/ Enable pairing if necessary\n\tvar (\n\t\tqr = pkg.SessionPtr\n\t\tfrontendRequest = pkg.FrontendRequest\n\t\ttransport = irma.NewHTTPTransport(qr.URL, false)\n\t\tsessionOptions = &irma.SessionOptions{}\n\t\terr error\n\t)\n\tif pairing {\n\t\ttransport.SetHeader(irma.AuthorizationHeader, string(frontendRequest.Authorization))\n\t\toptionsRequest := irma.NewFrontendOptionsRequest()\n\t\toptionsRequest.PairingMethod = irma.PairingMethodPin\n\t\terr = transport.Post(\"frontend\/options\", sessionOptions, optionsRequest)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WrapPrefix(err, \"Failed to enable pairing\", 0)\n\t\t}\n\t}\n\n\t\/\/ Print session QR\n\tlogger.Debug(\"QR: \", prettyprint(qr))\n\tif err := printQr(qr, noqr); err != nil {\n\t\treturn nil, errors.WrapPrefix(err, \"Failed to print QR\", 0)\n\t}\n\n\tstatuschan := make(chan irma.ServerStatus)\n\terrorchan := make(chan error)\n\tvar wg sync.WaitGroup\n\n\tgo irma.WaitStatus(transport, irma.ServerStatusInitialized, statuschan, errorchan)\n\tgo func() {\n\t\terr := <-errorchan\n\t\tif err != nil {\n\t\t\t_ = server.LogFatal(err)\n\t\t}\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tif pairing {\n\t\t\terr = handlePairing(sessionOptions, statuschan, func() error {\n\t\t\t\terr = transport.Post(\"frontend\/pairingcompleted\", nil, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.WrapPrefix(err, \"Failed to complete pairing\", 0)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\terr = errors.WrapPrefix(err, \"Failed to handle pairing\", 0)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Wait until client connects if pairing is disabled\n\t\t\tstatus := <-statuschan\n\t\t\tif status != irma.ServerStatusConnected {\n\t\t\t\terr = errors.Errorf(\"Unexpected status: %s\", status)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Wait until client finishes\n\t\tstatus := <-statuschan\n\t\tif status != irma.ServerStatusCancelled && status != irma.ServerStatusDone {\n\t\t\terr = errors.Errorf(\"Unexpected status: %s\", status)\n\t\t\treturn\n\t\t}\n\t}()\n\n\twg.Wait()\n\n\tif err == nil && pkg.Token != \"\" {\n\t\tresult := &server.SessionResult{}\n\t\turlParts := strings.Split(pkg.SessionPtr.URL, \"\/irma\/session\/\")\n\t\tif len(urlParts) <= 1 {\n\t\t\t\/\/ Custom URL structure is used, so we cannot determine result endpoint location.\n\t\t\treturn nil, nil\n\t\t}\n\t\tpath := fmt.Sprintf(\"session\/%s\/result\", pkg.Token)\n\t\tif err = irma.NewHTTPTransport(urlParts[0], false).Get(path, result); err == nil {\n\t\t\treturn result, err\n\t\t}\n\t}\n\treturn nil, err\n}\n\nfunc staticRequest(url string, port int, name string, noqr bool) error {\n\turl = configureURL(url, port)\n\tfmt.Println(\"Server URL:\", url)\n\tqr := &irma.Qr{\n\t\tType: irma.ActionRedirect,\n\t\tURL: fmt.Sprintf(\"%s\/irma\/session\/%s\", url, name),\n\t}\n\treturn printQr(qr, noqr)\n}\n\nfunc postRequest(serverURL, path string, request irma.RequestorRequest, name, authMethod, key string) (\n\t*server.SessionPackage, error) {\n\tvar (\n\t\terr error\n\t\tpkg = &server.SessionPackage{}\n\t\ttransport = irma.NewHTTPTransport(serverURL, false)\n\t)\n\n\tswitch authMethod {\n\tcase \"token\":\n\t\ttransport.SetHeader(\"Authorization\", key)\n\t\tfallthrough\n\tcase \"none\":\n\t\terr = transport.Post(path, pkg, request)\n\tcase \"hmac\", \"rsa\":\n\t\tvar jwtstr string\n\t\tjwtstr, err = signRequest(request, name, authMethod, key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlogger.Debug(\"Session request JWT: \", jwtstr)\n\t\terr = transport.Post(path, pkg, jwtstr)\n\tdefault:\n\t\treturn nil, errors.New(\"Invalid authentication method (must be none, token, hmac or rsa)\")\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pkg, err\n}\n\nfunc handlePairing(options *irma.SessionOptions, statusChan chan irma.ServerStatus, completePairing func() error) error {\n\terrorChan := make(chan error)\n\tpairingStarted := false\n\tfor {\n\t\tselect {\n\t\tcase status := <-statusChan:\n\t\t\tif status == irma.ServerStatusInitialized {\n\t\t\t\tcontinue\n\t\t\t} else if status == irma.ServerStatusPairing {\n\t\t\t\tpairingStarted = true\n\t\t\t\tgo requestPairingPermission(options, completePairing, errorChan)\n\t\t\t\tcontinue\n\t\t\t} else if status == irma.ServerStatusConnected && !pairingStarted {\n\t\t\t\tfmt.Println(\"Pairing is not supported by the connected device.\")\n\t\t\t}\n\t\t\treturn nil\n\t\tcase err := <-errorChan:\n\t\t\treturn err\n\t\t}\n\t}\n}\n\nfunc requestPairingPermission(options *irma.SessionOptions, completePairing func() error, errorChan chan error) {\n\tif options.PairingMethod == irma.PairingMethodPin {\n\t\tfmt.Println(\"\\nPairing code:\", options.PairingCode)\n\t\tfmt.Println(\"Press Enter to confirm your device shows the same pairing code; otherwise press Ctrl-C.\")\n\t\t_, err := bufio.NewReader(os.Stdin).ReadString('\\n')\n\t\tif err != nil {\n\t\t\terrorChan <- err\n\t\t\treturn\n\t\t}\n\t\tif err = completePairing(); err != nil {\n\t\t\terrorChan <- err\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(\"Pairing completed.\")\n\t\terrorChan <- nil\n\t\treturn\n\t}\n\terrorChan <- errors.Errorf(\"Pairing method %s is not supported\", options.PairingMethod)\n}\n\n\/\/ Configuration functions\n\nfunc configureURL(url string, port int) string {\n\t\/\/ Replace \"port\" in url with actual port\n\treplace := \"$1:\" + strconv.Itoa(port)\n\treturn string(regexp.MustCompile(\"(https?:\/\/[^\/]*):port\").ReplaceAll([]byte(url), []byte(replace)))\n}\n\nfunc configureSessionServer(url string, port int, privatekeysPath string, irmaconfig *irma.Configuration, verbosity int) error {\n\turl = configureURL(url, port)\n\tconfig := &server.Configuration{\n\t\tIrmaConfiguration: irmaconfig,\n\t\tLogger: logger,\n\t\tURL: url,\n\t\tDisableSchemesUpdate: true,\n\t\tVerbose: verbosity,\n\t}\n\tif privatekeysPath != \"\" {\n\t\tconfig.IssuerPrivateKeysPath = privatekeysPath\n\t}\n\n\tvar err error\n\tirmaServer, err = irmaserver.New(config)\n\treturn err\n}\n\nfunc configureSession(cmd *cobra.Command) (irma.RequestorRequest, *irma.Configuration, error) {\n\tverbosity, _ := cmd.Flags().GetCount(\"verbose\")\n\tlogger.Level = server.Verbosity(verbosity)\n\tirma.SetLogger(logger)\n\n\tif localIPErr != nil {\n\t\tlogger.Warn(\"Could not determine local IP address: \", localIPErr.Error())\n\t}\n\n\trequest, irmaconfig, err := configureRequest(cmd)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ Make sure we always run with latest configuration\n\tdisableUpdate, _ := cmd.Flags().GetBool(\"disable-schemes-update\")\n\tif !disableUpdate {\n\t\tif err = irmaconfig.UpdateSchemes(); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\treturn request, irmaconfig, nil\n}\n\nfunc init() {\n\tRootCmd.AddCommand(sessionCmd)\n\n\tlogger.Formatter = &prefixed.TextFormatter{FullTimestamp: true}\n\n\tif localIP != \"\" {\n\t\tdefaulturl = \"http:\/\/\" + localIP + \":port\"\n\t}\n\n\tflags := sessionCmd.Flags()\n\tflags.SortFlags = false\n\tflags.String(\"server\", \"\", \"External IRMA server to post request to (leave blank to use builtin library)\")\n\tflags.StringP(\"url\", \"u\", defaulturl, \"external URL to which IRMA app connects (when not using --server), \\\":port\\\" being replaced by --port value\")\n\tflags.IntP(\"port\", \"p\", 48680, \"port to listen at (when not using --server)\")\n\tflags.Bool(\"noqr\", false, \"Print JSON instead of draw QR\")\n\tflags.Bool(\"pairing\", false, \"Let IRMA app first pair, by entering the pairing code, before it can access the session\")\n\tflags.StringP(\"request\", \"r\", \"\", \"JSON session request\")\n\tflags.StringP(\"privkeys\", \"k\", \"\", \"path to private keys\")\n\tflags.Bool(\"disable-schemes-update\", false, \"disable scheme updates\")\n\n\taddRequestFlags(flags)\n\n\tflags.String(\"static\", \"\", \"Start a static IRMA session with the given name\")\n\tflags.String(\"from-package\", \"\", \"Start the IRMA session from the given session package\")\n\n\tflags.CountP(\"verbose\", \"v\", \"verbose (repeatable)\")\n}\n<|endoftext|>"} {"text":"<commit_before>package data\n\nimport (\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tcjson \"github.com\/tent\/canonical-json-go\"\n)\n\nconst (\n\tKeyIDLength = sha256.Size * 2\n\tKeyTypeEd25519 = \"ed25519\"\n\tKeyTypeECDSA_SHA2_P256 = \"ecdsa-sha2-nistp256\"\n\tKeySchemeEd25519 = \"ed25519\"\n\tKeySchemeECDSA_SHA2_P256 = \"ecdsa-sha2-nistp256\"\n\tKeyTypeRSASSA_PSS_SHA256 = \"rsa\"\n\tKeySchemeRSASSA_PSS_SHA256 = \"rsassa-pss-sha256\"\n)\n\nvar (\n\tHashAlgorithms = []string{\"sha256\", \"sha512\"}\n\tErrPathsAndPathHashesSet = errors.New(\"tuf: failed validation of delegated target: paths and path_hash_prefixes are both set\")\n)\n\ntype Signed struct {\n\tSigned json.RawMessage `json:\"signed\"`\n\tSignatures []Signature `json:\"signatures\"`\n}\n\ntype Signature struct {\n\tKeyID string `json:\"keyid\"`\n\tSignature HexBytes `json:\"sig\"`\n}\n\ntype PublicKey struct {\n\tType string `json:\"keytype\"`\n\tScheme string `json:\"scheme\"`\n\tAlgorithms []string `json:\"keyid_hash_algorithms,omitempty\"`\n\tValue json.RawMessage `json:\"keyval\"`\n\n\tids []string\n\tidOnce sync.Once\n}\n\ntype PrivateKey struct {\n\tType string `json:\"keytype\"`\n\tScheme string `json:\"scheme,omitempty\"`\n\tAlgorithms []string `json:\"keyid_hash_algorithms,omitempty\"`\n\tValue json.RawMessage `json:\"keyval\"`\n}\n\nfunc (k *PublicKey) IDs() []string {\n\tk.idOnce.Do(func() {\n\t\tdata, err := cjson.Marshal(k)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"tuf: error creating key ID: %w\", err))\n\t\t}\n\t\tdigest := sha256.Sum256(data)\n\t\tk.ids = []string{hex.EncodeToString(digest[:])}\n\t})\n\treturn k.ids\n}\n\nfunc (k *PublicKey) ContainsID(id string) bool {\n\tfor _, keyid := range k.IDs() {\n\t\tif id == keyid {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc DefaultExpires(role string) time.Time {\n\tvar t time.Time\n\tswitch role {\n\tcase \"root\":\n\t\tt = time.Now().AddDate(1, 0, 0)\n\tcase \"targets\":\n\t\tt = time.Now().AddDate(0, 3, 0)\n\tcase \"snapshot\":\n\t\tt = time.Now().AddDate(0, 0, 7)\n\tcase \"timestamp\":\n\t\tt = time.Now().AddDate(0, 0, 1)\n\t}\n\treturn t.UTC().Round(time.Second)\n}\n\ntype Root struct {\n\tType string `json:\"_type\"`\n\tSpecVersion string `json:\"spec_version\"`\n\tVersion int `json:\"version\"`\n\tExpires time.Time `json:\"expires\"`\n\tKeys map[string]*PublicKey `json:\"keys\"`\n\tRoles map[string]*Role `json:\"roles\"`\n\tCustom *json.RawMessage `json:\"custom,omitempty\"`\n\n\tConsistentSnapshot bool `json:\"consistent_snapshot\"`\n}\n\nfunc NewRoot() *Root {\n\treturn &Root{\n\t\tType: \"root\",\n\t\tSpecVersion: \"1.0\",\n\t\tExpires: DefaultExpires(\"root\"),\n\t\tKeys: make(map[string]*PublicKey),\n\t\tRoles: make(map[string]*Role),\n\t\tConsistentSnapshot: true,\n\t}\n}\n\nfunc (r *Root) AddKey(key *PublicKey) bool {\n\tchanged := false\n\tfor _, id := range key.IDs() {\n\t\tif _, ok := r.Keys[id]; !ok {\n\t\t\tchanged = true\n\t\t\tr.Keys[id] = key\n\t\t}\n\t}\n\treturn changed\n}\n\ntype Role struct {\n\tKeyIDs []string `json:\"keyids\"`\n\tThreshold int `json:\"threshold\"`\n}\n\nfunc (r *Role) AddKeyIDs(ids []string) bool {\n\troleIDs := make(map[string]struct{})\n\tfor _, id := range r.KeyIDs {\n\t\troleIDs[id] = struct{}{}\n\t}\n\tchanged := false\n\tfor _, id := range ids {\n\t\tif _, ok := roleIDs[id]; !ok {\n\t\t\tchanged = true\n\t\t\tr.KeyIDs = append(r.KeyIDs, id)\n\t\t}\n\t}\n\treturn changed\n}\n\ntype Files map[string]FileMeta\n\ntype FileMeta struct {\n\tLength int64 `json:\"length\",omitempty`\n\tHashes Hashes `json:\"hashes\",omitempty`\n\tCustom *json.RawMessage `json:\"custom,omitempty\"`\n}\n\ntype Hashes map[string]HexBytes\n\nfunc (f FileMeta) HashAlgorithms() []string {\n\tfuncs := make([]string, 0, len(f.Hashes))\n\tfor name := range f.Hashes {\n\t\tfuncs = append(funcs, name)\n\t}\n\treturn funcs\n}\n\ntype SnapshotFileMeta struct {\n\tFileMeta\n\tVersion int `json:\"version\"`\n}\n\ntype SnapshotFiles map[string]SnapshotFileMeta\n\ntype Snapshot struct {\n\tType string `json:\"_type\"`\n\tSpecVersion string `json:\"spec_version\"`\n\tVersion int `json:\"version\"`\n\tExpires time.Time `json:\"expires\"`\n\tMeta SnapshotFiles `json:\"meta\"`\n\tCustom *json.RawMessage `json:\"custom,omitempty\"`\n}\n\nfunc NewSnapshot() *Snapshot {\n\treturn &Snapshot{\n\t\tType: \"snapshot\",\n\t\tSpecVersion: \"1.0\",\n\t\tExpires: DefaultExpires(\"snapshot\"),\n\t\tMeta: make(SnapshotFiles),\n\t}\n}\n\ntype TargetFiles map[string]TargetFileMeta\n\ntype TargetFileMeta struct {\n\tFileMeta\n}\n\nfunc (f TargetFileMeta) HashAlgorithms() []string {\n\treturn f.FileMeta.HashAlgorithms()\n}\n\ntype Targets struct {\n\tType string `json:\"_type\"`\n\tSpecVersion string `json:\"spec_version\"`\n\tVersion int `json:\"version\"`\n\tExpires time.Time `json:\"expires\"`\n\tTargets TargetFiles `json:\"targets\"`\n\tDelegations *Delegations `json:\"delegations,omitempty\"`\n\tCustom *json.RawMessage `json:\"custom,omitempty\"`\n}\n\n\/\/ Delegations represents the edges from a parent Targets role to one or more\n\/\/ delegated target roles. See spec v1.0.19 section 4.5.\ntype Delegations struct {\n\tKeys map[string]*PublicKey `json:\"keys\"`\n\tRoles []DelegatedRole `json:\"roles\"`\n}\n\n\/\/ DelegatedRole describes a delegated role, including what paths it is\n\/\/ reponsible for. See spec v1.0.19 section 4.5.\ntype DelegatedRole struct {\n\tName string `json:\"name\"`\n\tKeyIDs []string `json:\"keyids\"`\n\tThreshold int `json:\"threshold\"`\n\tTerminating bool `json:\"terminating\"`\n\tPathHashPrefixes []string `json:\"path_hash_prefixes,omitempty\"`\n\tPaths []string `json:\"paths\"`\n}\n\n\/\/ MatchesPath evaluates whether the path patterns or path hash prefixes match\n\/\/ a given file. This determines whether a delegated role is responsible for\n\/\/ signing and verifying the file.\nfunc (d *DelegatedRole) MatchesPath(file string) (bool, error) {\n\tif err := d.validatePaths(); err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, pattern := range d.Paths {\n\t\tif matched, _ := filepath.Match(pattern, file); matched {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\tpathHash := PathHexDigest(file)\n\tfor _, hashPrefix := range d.PathHashPrefixes {\n\t\tif strings.HasPrefix(pathHash, hashPrefix) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}\n\n\/\/ validatePaths enforces the spec\n\/\/ https:\/\/theupdateframework.github.io\/specification\/v1.0.19\/index.html#file-formats-targets\n\/\/ 'role MUST specify only one of the \"path_hash_prefixes\" or \"paths\"'\n\/\/ Marshalling and unmarshalling JSON will fail and return\n\/\/ ErrPathsAndPathHashesSet if both fields are set and not empty.\nfunc (d *DelegatedRole) validatePaths() error {\n\tif len(d.PathHashPrefixes) > 0 && len(d.Paths) > 0 {\n\t\treturn ErrPathsAndPathHashesSet\n\t}\n\n\treturn nil\n}\n\n\/\/ MarshalJSON is called when writing the struct to JSON. We validate prior to\n\/\/ marshalling to ensure that an invalid delegated role can not be serialized\n\/\/ to JSON.\nfunc (d *DelegatedRole) MarshalJSON() ([]byte, error) {\n\ttype delegatedRoleAlias DelegatedRole\n\n\tif err := d.validatePaths(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn json.Marshal((*delegatedRoleAlias)(d))\n}\n\n\/\/ UnmarshalJSON is called when reading the struct from JSON. We validate once\n\/\/ unmarshalled to ensure that an error is thrown if an invalid delegated role\n\/\/ is read.\nfunc (d *DelegatedRole) UnmarshalJSON(b []byte) error {\n\ttype delegatedRoleAlias DelegatedRole\n\n\tif err := json.Unmarshal(b, (*delegatedRoleAlias)(d)); err != nil {\n\t\treturn err\n\t}\n\n\treturn d.validatePaths()\n}\n\nfunc NewTargets() *Targets {\n\treturn &Targets{\n\t\tType: \"targets\",\n\t\tSpecVersion: \"1.0\",\n\t\tExpires: DefaultExpires(\"targets\"),\n\t\tTargets: make(TargetFiles),\n\t}\n}\n\ntype TimestampFileMeta struct {\n\tFileMeta\n\tVersion int `json:\"version\"`\n}\n\ntype TimestampFiles map[string]TimestampFileMeta\n\ntype Timestamp struct {\n\tType string `json:\"_type\"`\n\tSpecVersion string `json:\"spec_version\"`\n\tVersion int `json:\"version\"`\n\tExpires time.Time `json:\"expires\"`\n\tMeta TimestampFiles `json:\"meta\"`\n\tCustom *json.RawMessage `json:\"custom,omitempty\"`\n}\n\nfunc NewTimestamp() *Timestamp {\n\treturn &Timestamp{\n\t\tType: \"timestamp\",\n\t\tSpecVersion: \"1.0\",\n\t\tExpires: DefaultExpires(\"timestamp\"),\n\t\tMeta: make(TimestampFiles),\n\t}\n}\n<commit_msg>Fix JSON struct tag for FileMeta (#172)<commit_after>package data\n\nimport (\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tcjson \"github.com\/tent\/canonical-json-go\"\n)\n\nconst (\n\tKeyIDLength = sha256.Size * 2\n\tKeyTypeEd25519 = \"ed25519\"\n\tKeyTypeECDSA_SHA2_P256 = \"ecdsa-sha2-nistp256\"\n\tKeySchemeEd25519 = \"ed25519\"\n\tKeySchemeECDSA_SHA2_P256 = \"ecdsa-sha2-nistp256\"\n\tKeyTypeRSASSA_PSS_SHA256 = \"rsa\"\n\tKeySchemeRSASSA_PSS_SHA256 = \"rsassa-pss-sha256\"\n)\n\nvar (\n\tHashAlgorithms = []string{\"sha256\", \"sha512\"}\n\tErrPathsAndPathHashesSet = errors.New(\"tuf: failed validation of delegated target: paths and path_hash_prefixes are both set\")\n)\n\ntype Signed struct {\n\tSigned json.RawMessage `json:\"signed\"`\n\tSignatures []Signature `json:\"signatures\"`\n}\n\ntype Signature struct {\n\tKeyID string `json:\"keyid\"`\n\tSignature HexBytes `json:\"sig\"`\n}\n\ntype PublicKey struct {\n\tType string `json:\"keytype\"`\n\tScheme string `json:\"scheme\"`\n\tAlgorithms []string `json:\"keyid_hash_algorithms,omitempty\"`\n\tValue json.RawMessage `json:\"keyval\"`\n\n\tids []string\n\tidOnce sync.Once\n}\n\ntype PrivateKey struct {\n\tType string `json:\"keytype\"`\n\tScheme string `json:\"scheme,omitempty\"`\n\tAlgorithms []string `json:\"keyid_hash_algorithms,omitempty\"`\n\tValue json.RawMessage `json:\"keyval\"`\n}\n\nfunc (k *PublicKey) IDs() []string {\n\tk.idOnce.Do(func() {\n\t\tdata, err := cjson.Marshal(k)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"tuf: error creating key ID: %w\", err))\n\t\t}\n\t\tdigest := sha256.Sum256(data)\n\t\tk.ids = []string{hex.EncodeToString(digest[:])}\n\t})\n\treturn k.ids\n}\n\nfunc (k *PublicKey) ContainsID(id string) bool {\n\tfor _, keyid := range k.IDs() {\n\t\tif id == keyid {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc DefaultExpires(role string) time.Time {\n\tvar t time.Time\n\tswitch role {\n\tcase \"root\":\n\t\tt = time.Now().AddDate(1, 0, 0)\n\tcase \"targets\":\n\t\tt = time.Now().AddDate(0, 3, 0)\n\tcase \"snapshot\":\n\t\tt = time.Now().AddDate(0, 0, 7)\n\tcase \"timestamp\":\n\t\tt = time.Now().AddDate(0, 0, 1)\n\t}\n\treturn t.UTC().Round(time.Second)\n}\n\ntype Root struct {\n\tType string `json:\"_type\"`\n\tSpecVersion string `json:\"spec_version\"`\n\tVersion int `json:\"version\"`\n\tExpires time.Time `json:\"expires\"`\n\tKeys map[string]*PublicKey `json:\"keys\"`\n\tRoles map[string]*Role `json:\"roles\"`\n\tCustom *json.RawMessage `json:\"custom,omitempty\"`\n\n\tConsistentSnapshot bool `json:\"consistent_snapshot\"`\n}\n\nfunc NewRoot() *Root {\n\treturn &Root{\n\t\tType: \"root\",\n\t\tSpecVersion: \"1.0\",\n\t\tExpires: DefaultExpires(\"root\"),\n\t\tKeys: make(map[string]*PublicKey),\n\t\tRoles: make(map[string]*Role),\n\t\tConsistentSnapshot: true,\n\t}\n}\n\nfunc (r *Root) AddKey(key *PublicKey) bool {\n\tchanged := false\n\tfor _, id := range key.IDs() {\n\t\tif _, ok := r.Keys[id]; !ok {\n\t\t\tchanged = true\n\t\t\tr.Keys[id] = key\n\t\t}\n\t}\n\treturn changed\n}\n\ntype Role struct {\n\tKeyIDs []string `json:\"keyids\"`\n\tThreshold int `json:\"threshold\"`\n}\n\nfunc (r *Role) AddKeyIDs(ids []string) bool {\n\troleIDs := make(map[string]struct{})\n\tfor _, id := range r.KeyIDs {\n\t\troleIDs[id] = struct{}{}\n\t}\n\tchanged := false\n\tfor _, id := range ids {\n\t\tif _, ok := roleIDs[id]; !ok {\n\t\t\tchanged = true\n\t\t\tr.KeyIDs = append(r.KeyIDs, id)\n\t\t}\n\t}\n\treturn changed\n}\n\ntype Files map[string]FileMeta\n\ntype FileMeta struct {\n\tLength int64 `json:\"length,omitempty\"`\n\tHashes Hashes `json:\"hashes,omitempty\"`\n\tCustom *json.RawMessage `json:\"custom,omitempty\"`\n}\n\ntype Hashes map[string]HexBytes\n\nfunc (f FileMeta) HashAlgorithms() []string {\n\tfuncs := make([]string, 0, len(f.Hashes))\n\tfor name := range f.Hashes {\n\t\tfuncs = append(funcs, name)\n\t}\n\treturn funcs\n}\n\ntype SnapshotFileMeta struct {\n\tFileMeta\n\tVersion int `json:\"version\"`\n}\n\ntype SnapshotFiles map[string]SnapshotFileMeta\n\ntype Snapshot struct {\n\tType string `json:\"_type\"`\n\tSpecVersion string `json:\"spec_version\"`\n\tVersion int `json:\"version\"`\n\tExpires time.Time `json:\"expires\"`\n\tMeta SnapshotFiles `json:\"meta\"`\n\tCustom *json.RawMessage `json:\"custom,omitempty\"`\n}\n\nfunc NewSnapshot() *Snapshot {\n\treturn &Snapshot{\n\t\tType: \"snapshot\",\n\t\tSpecVersion: \"1.0\",\n\t\tExpires: DefaultExpires(\"snapshot\"),\n\t\tMeta: make(SnapshotFiles),\n\t}\n}\n\ntype TargetFiles map[string]TargetFileMeta\n\ntype TargetFileMeta struct {\n\tFileMeta\n}\n\nfunc (f TargetFileMeta) HashAlgorithms() []string {\n\treturn f.FileMeta.HashAlgorithms()\n}\n\ntype Targets struct {\n\tType string `json:\"_type\"`\n\tSpecVersion string `json:\"spec_version\"`\n\tVersion int `json:\"version\"`\n\tExpires time.Time `json:\"expires\"`\n\tTargets TargetFiles `json:\"targets\"`\n\tDelegations *Delegations `json:\"delegations,omitempty\"`\n\tCustom *json.RawMessage `json:\"custom,omitempty\"`\n}\n\n\/\/ Delegations represents the edges from a parent Targets role to one or more\n\/\/ delegated target roles. See spec v1.0.19 section 4.5.\ntype Delegations struct {\n\tKeys map[string]*PublicKey `json:\"keys\"`\n\tRoles []DelegatedRole `json:\"roles\"`\n}\n\n\/\/ DelegatedRole describes a delegated role, including what paths it is\n\/\/ reponsible for. See spec v1.0.19 section 4.5.\ntype DelegatedRole struct {\n\tName string `json:\"name\"`\n\tKeyIDs []string `json:\"keyids\"`\n\tThreshold int `json:\"threshold\"`\n\tTerminating bool `json:\"terminating\"`\n\tPathHashPrefixes []string `json:\"path_hash_prefixes,omitempty\"`\n\tPaths []string `json:\"paths\"`\n}\n\n\/\/ MatchesPath evaluates whether the path patterns or path hash prefixes match\n\/\/ a given file. This determines whether a delegated role is responsible for\n\/\/ signing and verifying the file.\nfunc (d *DelegatedRole) MatchesPath(file string) (bool, error) {\n\tif err := d.validatePaths(); err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, pattern := range d.Paths {\n\t\tif matched, _ := filepath.Match(pattern, file); matched {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\tpathHash := PathHexDigest(file)\n\tfor _, hashPrefix := range d.PathHashPrefixes {\n\t\tif strings.HasPrefix(pathHash, hashPrefix) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}\n\n\/\/ validatePaths enforces the spec\n\/\/ https:\/\/theupdateframework.github.io\/specification\/v1.0.19\/index.html#file-formats-targets\n\/\/ 'role MUST specify only one of the \"path_hash_prefixes\" or \"paths\"'\n\/\/ Marshalling and unmarshalling JSON will fail and return\n\/\/ ErrPathsAndPathHashesSet if both fields are set and not empty.\nfunc (d *DelegatedRole) validatePaths() error {\n\tif len(d.PathHashPrefixes) > 0 && len(d.Paths) > 0 {\n\t\treturn ErrPathsAndPathHashesSet\n\t}\n\n\treturn nil\n}\n\n\/\/ MarshalJSON is called when writing the struct to JSON. We validate prior to\n\/\/ marshalling to ensure that an invalid delegated role can not be serialized\n\/\/ to JSON.\nfunc (d *DelegatedRole) MarshalJSON() ([]byte, error) {\n\ttype delegatedRoleAlias DelegatedRole\n\n\tif err := d.validatePaths(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn json.Marshal((*delegatedRoleAlias)(d))\n}\n\n\/\/ UnmarshalJSON is called when reading the struct from JSON. We validate once\n\/\/ unmarshalled to ensure that an error is thrown if an invalid delegated role\n\/\/ is read.\nfunc (d *DelegatedRole) UnmarshalJSON(b []byte) error {\n\ttype delegatedRoleAlias DelegatedRole\n\n\tif err := json.Unmarshal(b, (*delegatedRoleAlias)(d)); err != nil {\n\t\treturn err\n\t}\n\n\treturn d.validatePaths()\n}\n\nfunc NewTargets() *Targets {\n\treturn &Targets{\n\t\tType: \"targets\",\n\t\tSpecVersion: \"1.0\",\n\t\tExpires: DefaultExpires(\"targets\"),\n\t\tTargets: make(TargetFiles),\n\t}\n}\n\ntype TimestampFileMeta struct {\n\tFileMeta\n\tVersion int `json:\"version\"`\n}\n\ntype TimestampFiles map[string]TimestampFileMeta\n\ntype Timestamp struct {\n\tType string `json:\"_type\"`\n\tSpecVersion string `json:\"spec_version\"`\n\tVersion int `json:\"version\"`\n\tExpires time.Time `json:\"expires\"`\n\tMeta TimestampFiles `json:\"meta\"`\n\tCustom *json.RawMessage `json:\"custom,omitempty\"`\n}\n\nfunc NewTimestamp() *Timestamp {\n\treturn &Timestamp{\n\t\tType: \"timestamp\",\n\t\tSpecVersion: \"1.0\",\n\t\tExpires: DefaultExpires(\"timestamp\"),\n\t\tMeta: make(TimestampFiles),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright 2014 CoreOS, Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage etcdserver\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/etcdserver\/stats\"\n\t\"github.com\/coreos\/etcd\/pkg\/strutil\"\n\t\"github.com\/coreos\/etcd\/raft\/raftpb\"\n)\n\nconst raftPrefix = \"\/raft\"\n\n\/\/ Sender creates the default production sender used to transport raft messages\n\/\/ in the cluster. The returned sender will update the given ServerStats and\n\/\/ LeaderStats appropriately.\nfunc Sender(t *http.Transport, cl *Cluster, ss *stats.ServerStats, ls *stats.LeaderStats) func(msgs []raftpb.Message) {\n\tc := &http.Client{Transport: t}\n\n\treturn func(msgs []raftpb.Message) {\n\t\tfor _, m := range msgs {\n\t\t\t\/\/ TODO: reuse go routines\n\t\t\t\/\/ limit the number of outgoing connections for the same receiver\n\t\t\tgo send(c, cl, m, ss, ls)\n\t\t}\n\t}\n}\n\n\/\/ send uses the given client to send a message to a member in the given\n\/\/ ClusterStore, retrying up to 3 times for each message. The given\n\/\/ ServerStats and LeaderStats are updated appropriately\nfunc send(c *http.Client, cl *Cluster, m raftpb.Message, ss *stats.ServerStats, ls *stats.LeaderStats) {\n\tcid := cl.ID()\n\t\/\/ TODO (xiangli): reasonable retry logic\n\tfor i := 0; i < 3; i++ {\n\t\tmemb := cl.Member(m.To)\n\t\tif memb == nil {\n\t\t\t\/\/ TODO: unknown peer id.. what do we do? I\n\t\t\t\/\/ don't think his should ever happen, need to\n\t\t\t\/\/ look into this further.\n\t\t\tlog.Printf(\"etcdhttp: no member for %d\", m.To)\n\t\t\treturn\n\t\t}\n\t\tu := fmt.Sprintf(\"%s%s\", memb.PickPeerURL(), raftPrefix)\n\n\t\t\/\/ TODO: don't block. we should be able to have 1000s\n\t\t\/\/ of messages out at a time.\n\t\tdata, err := m.Marshal()\n\t\tif err != nil {\n\t\t\tlog.Println(\"etcdhttp: dropping message:\", err)\n\t\t\treturn \/\/ drop bad message\n\t\t}\n\t\tif m.Type == raftpb.MsgApp {\n\t\t\tss.SendAppendReq(len(data))\n\t\t}\n\t\tto := strutil.IDAsHex(m.To)\n\t\tfs := ls.Follower(to)\n\n\t\tstart := time.Now()\n\t\tsent := httpPost(c, u, cid, data)\n\t\tend := time.Now()\n\t\tif sent {\n\t\t\tfs.Succ(end.Sub(start))\n\t\t\treturn\n\t\t}\n\t\tfs.Fail()\n\t\t\/\/ TODO: backoff\n\t}\n}\n\n\/\/ httpPost POSTs a data payload to a url using the given client. Returns true\n\/\/ if the POST succeeds, false on any failure.\nfunc httpPost(c *http.Client, url string, cid uint64, data []byte) bool {\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewBuffer(data))\n\tif err != nil {\n\t\t\/\/ TODO: log the error?\n\t\treturn false\n\t}\n\treq.Header.Set(\"Content-Type\", \"application\/protobuf\")\n\treq.Header.Set(\"X-Etcd-Cluster-ID\", strconv.FormatUint(cid, 16))\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\t\/\/ TODO: log the error?\n\t\treturn false\n\t}\n\tresp.Body.Close()\n\n\tswitch resp.StatusCode {\n\tcase http.StatusPreconditionFailed:\n\t\t\/\/ TODO: shutdown the etcdserver gracefully?\n\t\tlog.Panicf(\"clusterID mismatch\")\n\t\treturn false\n\tcase http.StatusForbidden:\n\t\t\/\/ TODO: stop the server\n\t\tlog.Panicf(\"the member has been removed\")\n\t\treturn false\n\tcase http.StatusNoContent:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n<commit_msg>etcdserver: log member ID as hex string<commit_after>\/*\n Copyright 2014 CoreOS, Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage etcdserver\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/etcdserver\/stats\"\n\t\"github.com\/coreos\/etcd\/pkg\/strutil\"\n\t\"github.com\/coreos\/etcd\/raft\/raftpb\"\n)\n\nconst raftPrefix = \"\/raft\"\n\n\/\/ Sender creates the default production sender used to transport raft messages\n\/\/ in the cluster. The returned sender will update the given ServerStats and\n\/\/ LeaderStats appropriately.\nfunc Sender(t *http.Transport, cl *Cluster, ss *stats.ServerStats, ls *stats.LeaderStats) func(msgs []raftpb.Message) {\n\tc := &http.Client{Transport: t}\n\n\treturn func(msgs []raftpb.Message) {\n\t\tfor _, m := range msgs {\n\t\t\t\/\/ TODO: reuse go routines\n\t\t\t\/\/ limit the number of outgoing connections for the same receiver\n\t\t\tgo send(c, cl, m, ss, ls)\n\t\t}\n\t}\n}\n\n\/\/ send uses the given client to send a message to a member in the given\n\/\/ ClusterStore, retrying up to 3 times for each message. The given\n\/\/ ServerStats and LeaderStats are updated appropriately\nfunc send(c *http.Client, cl *Cluster, m raftpb.Message, ss *stats.ServerStats, ls *stats.LeaderStats) {\n\tcid := cl.ID()\n\t\/\/ TODO (xiangli): reasonable retry logic\n\tfor i := 0; i < 3; i++ {\n\t\tmemb := cl.Member(m.To)\n\t\tif memb == nil {\n\t\t\t\/\/ TODO: unknown peer id.. what do we do? I\n\t\t\t\/\/ don't think his should ever happen, need to\n\t\t\t\/\/ look into this further.\n\t\t\tlog.Printf(\"etcdhttp: no member for %s\", strutil.IDAsHex(m.To))\n\t\t\treturn\n\t\t}\n\t\tu := fmt.Sprintf(\"%s%s\", memb.PickPeerURL(), raftPrefix)\n\n\t\t\/\/ TODO: don't block. we should be able to have 1000s\n\t\t\/\/ of messages out at a time.\n\t\tdata, err := m.Marshal()\n\t\tif err != nil {\n\t\t\tlog.Println(\"etcdhttp: dropping message:\", err)\n\t\t\treturn \/\/ drop bad message\n\t\t}\n\t\tif m.Type == raftpb.MsgApp {\n\t\t\tss.SendAppendReq(len(data))\n\t\t}\n\t\tto := strutil.IDAsHex(m.To)\n\t\tfs := ls.Follower(to)\n\n\t\tstart := time.Now()\n\t\tsent := httpPost(c, u, cid, data)\n\t\tend := time.Now()\n\t\tif sent {\n\t\t\tfs.Succ(end.Sub(start))\n\t\t\treturn\n\t\t}\n\t\tfs.Fail()\n\t\t\/\/ TODO: backoff\n\t}\n}\n\n\/\/ httpPost POSTs a data payload to a url using the given client. Returns true\n\/\/ if the POST succeeds, false on any failure.\nfunc httpPost(c *http.Client, url string, cid uint64, data []byte) bool {\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewBuffer(data))\n\tif err != nil {\n\t\t\/\/ TODO: log the error?\n\t\treturn false\n\t}\n\treq.Header.Set(\"Content-Type\", \"application\/protobuf\")\n\treq.Header.Set(\"X-Etcd-Cluster-ID\", strconv.FormatUint(cid, 16))\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\t\/\/ TODO: log the error?\n\t\treturn false\n\t}\n\tresp.Body.Close()\n\n\tswitch resp.StatusCode {\n\tcase http.StatusPreconditionFailed:\n\t\t\/\/ TODO: shutdown the etcdserver gracefully?\n\t\tlog.Panicf(\"clusterID mismatch\")\n\t\treturn false\n\tcase http.StatusForbidden:\n\t\t\/\/ TODO: stop the server\n\t\tlog.Panicf(\"the member has been removed\")\n\t\treturn false\n\tcase http.StatusNoContent:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\t\"tour\/pic\"\n)\n\ntype Image [][]uint8\n\nfunc (m Image) ColorModel() color.Model {\n\treturn color.RGBAModel\n}\n\nfunc (m Image) Bounds() image.Rectangle {\n\tdx, dy := len(m), 0\n\tif dx > 0 {\n\t\tdy = len(m[0])\n\t}\n\n\treturn image.Rect(0, 0, dx, dy)\n}\n\nfunc (m Image) At(x, y int) color.Color {\n\tv := m[x][y]\n\treturn color.RGBA{v, v, 255, 255}\n}\n\nfunc NewImage(x, y int, f func(int, int) uint8) Image {\n\tm := make([][]uint8, x)\n\tfor i := range m {\n\t\tm[i] = make([]uint8, y)\n\t\tfor j := range m[i] {\n\t\t\tm[i][j] = f(i, j)\n\t\t}\n\t}\n\treturn m\n}\n\nfunc main() {\n\tm := NewImage(255, 255, func(a, b int) uint8 { return uint8(a * b) })\n\tpic.ShowImage(m)\n}\n<commit_msg>[x\/tour] go-tour\/gotour\/solution: Adding alternative solution to the image exercise.<commit_after>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\t\"tour\/pic\"\n)\n\ntype Image struct {\n\tHeight, Width int\n}\n\nfunc (m Image) ColorModel() color.Model {\n\treturn color.RGBAModel\n}\n\nfunc (m Image) Bounds() image.Rectangle {\n\treturn image.Rect(0, 0, m.Height, m.Width)\n}\n\nfunc (m Image) At(x, y int) color.Color {\n\tc := uint8(x ^ y)\n\treturn color.RGBA{c, c, 255, 255}\n}\n\nfunc main() {\n\tm := Image{256, 256}\n\tpic.ShowImage(m)\n}\n<|endoftext|>"} {"text":"<commit_before>package e2etest\n\nimport (\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/hashicorp\/terraform\/e2e\"\n)\n\n\/\/ The tests in this file are for the \"primary workflow\", which includes\n\/\/ variants of the following sequence, with different details:\n\/\/ terraform init\n\/\/ terraform plan\n\/\/ terraform apply\n\/\/ terraform destroy\n\nfunc TestPrimarySeparatePlan(t *testing.T) {\n\tt.Parallel()\n\n\t\/\/ This test reaches out to releases.hashicorp.com to download the\n\t\/\/ template and null providers, so it can only run if network access is\n\t\/\/ allowed.\n\tskipIfCannotAccessNetwork(t)\n\n\tfixturePath := filepath.Join(\"test-fixtures\", \"full-workflow-null\")\n\ttf := e2e.NewBinary(terraformBin, fixturePath)\n\tdefer tf.Close()\n\n\t\/\/\/\/ INIT\n\tstdout, stderr, err := tf.Run(\"init\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected init error: %s\\nstderr:\\n%s\", err, stderr)\n\t}\n\n\t\/\/ Make sure we actually downloaded the plugins, rather than picking up\n\t\/\/ copies that might be already installed globally on the system.\n\tif !strings.Contains(stdout, \"- Downloading plugin for provider \\\"template\\\"\") {\n\t\tt.Errorf(\"template provider download message is missing from init output:\\n%s\", stdout)\n\t\tt.Logf(\"(this can happen if you have a copy of the plugin in one of the global plugin search dirs)\")\n\t}\n\tif !strings.Contains(stdout, \"- Downloading plugin for provider \\\"null\\\"\") {\n\t\tt.Errorf(\"null provider download message is missing from init output:\\n%s\", stdout)\n\t\tt.Logf(\"(this can happen if you have a copy of the plugin in one of the global plugin search dirs)\")\n\t}\n\n\t\/\/\/\/ PLAN\n\tstdout, stderr, err = tf.Run(\"plan\", \"-out=tfplan\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected plan error: %s\\nstderr:\\n%s\", err, stderr)\n\t}\n\n\tif !strings.Contains(stdout, \"1 to add, 0 to change, 0 to destroy\") {\n\t\tt.Errorf(\"incorrect plan tally; want 1 to add:\\n%s\", stdout)\n\t}\n\n\tplan, err := tf.Plan(\"tfplan\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to read plan file: %s\", err)\n\t}\n\n\tstateResources := plan.State.RootModule().Resources\n\tdiffResources := plan.Diff.RootModule().Resources\n\n\tif len(stateResources) != 1 || stateResources[\"data.template_file.test\"] == nil {\n\t\tt.Errorf(\"incorrect state in plan; want just data.template_file.test to have been rendered, but have:\\n%s\", spew.Sdump(stateResources))\n\t}\n\tif len(diffResources) != 1 || diffResources[\"null_resource.test\"] == nil {\n\t\tt.Errorf(\"incorrect diff in plan; want just null_resource.test to have been rendered, but have:\\n%s\", spew.Sdump(diffResources))\n\t}\n\n\t\/\/\/\/ APPLY\n\tstdout, stderr, err = tf.Run(\"apply\", \"tfplan\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected apply error: %s\\nstderr:\\n%s\", err, stderr)\n\t}\n\n\tif !strings.Contains(stdout, \"Resources: 1 added, 0 changed, 0 destroyed\") {\n\t\tt.Errorf(\"incorrect apply tally; want 1 added:\\n%s\", stdout)\n\t}\n\n\tstate, err := tf.LocalState()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to read state file: %s\", err)\n\t}\n\n\tstateResources = state.RootModule().Resources\n\tvar gotResources []string\n\tfor n := range stateResources {\n\t\tgotResources = append(gotResources, n)\n\t}\n\tsort.Strings(gotResources)\n\n\twantResources := []string{\n\t\t\"data.template_file.test\",\n\t\t\"null_resource.test\",\n\t}\n\n\tif !reflect.DeepEqual(gotResources, wantResources) {\n\t\tt.Errorf(\"wrong resources in state\\ngot: %#v\\nwant: %#v\", gotResources, wantResources)\n\t}\n\n\t\/\/\/\/ DESTROY\n\tstdout, stderr, err = tf.Run(\"destroy\", \"-force\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected destroy error: %s\\nstderr:\\n%s\", err, stderr)\n\t}\n\n\tif !strings.Contains(stdout, \"Resources: 2 destroyed\") {\n\t\tt.Errorf(\"incorrect destroy tally; want 2 destroyed:\\n%s\", stdout)\n\t}\n\n\tstate, err = tf.LocalState()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to read state file after destroy: %s\", err)\n\t}\n\n\tstateResources = state.RootModule().Resources\n\tif len(stateResources) != 0 {\n\t\tt.Errorf(\"wrong resources in state after destroy; want none, but still have:%s\", spew.Sdump(stateResources))\n\t}\n\n}\n<commit_msg>command\/e2etest: fix TestPrimarySeparatePlan test<commit_after>package e2etest\n\nimport (\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/hashicorp\/terraform\/e2e\"\n)\n\n\/\/ The tests in this file are for the \"primary workflow\", which includes\n\/\/ variants of the following sequence, with different details:\n\/\/ terraform init\n\/\/ terraform plan\n\/\/ terraform apply\n\/\/ terraform destroy\n\nfunc TestPrimarySeparatePlan(t *testing.T) {\n\tt.Parallel()\n\n\t\/\/ This test reaches out to releases.hashicorp.com to download the\n\t\/\/ template and null providers, so it can only run if network access is\n\t\/\/ allowed.\n\tskipIfCannotAccessNetwork(t)\n\n\tfixturePath := filepath.Join(\"test-fixtures\", \"full-workflow-null\")\n\ttf := e2e.NewBinary(terraformBin, fixturePath)\n\tdefer tf.Close()\n\n\t\/\/\/\/ INIT\n\tstdout, stderr, err := tf.Run(\"init\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected init error: %s\\nstderr:\\n%s\", err, stderr)\n\t}\n\n\t\/\/ Make sure we actually downloaded the plugins, rather than picking up\n\t\/\/ copies that might be already installed globally on the system.\n\tif !strings.Contains(stdout, \"- Downloading plugin for provider \\\"template\\\"\") {\n\t\tt.Errorf(\"template provider download message is missing from init output:\\n%s\", stdout)\n\t\tt.Logf(\"(this can happen if you have a copy of the plugin in one of the global plugin search dirs)\")\n\t}\n\tif !strings.Contains(stdout, \"- Downloading plugin for provider \\\"null\\\"\") {\n\t\tt.Errorf(\"null provider download message is missing from init output:\\n%s\", stdout)\n\t\tt.Logf(\"(this can happen if you have a copy of the plugin in one of the global plugin search dirs)\")\n\t}\n\n\t\/\/\/\/ PLAN\n\tstdout, stderr, err = tf.Run(\"plan\", \"-out=tfplan\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected plan error: %s\\nstderr:\\n%s\", err, stderr)\n\t}\n\n\tif !strings.Contains(stdout, \"1 to add, 0 to change, 0 to destroy\") {\n\t\tt.Errorf(\"incorrect plan tally; want 1 to add:\\n%s\", stdout)\n\t}\n\n\tplan, err := tf.Plan(\"tfplan\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to read plan file: %s\", err)\n\t}\n\n\tstateResources := plan.State.RootModule().Resources\n\tdiffResources := plan.Diff.RootModule().Resources\n\n\tif len(stateResources) != 1 || stateResources[\"data.template_file.test\"] == nil {\n\t\tt.Errorf(\"incorrect state in plan; want just data.template_file.test to have been rendered, but have:\\n%s\", spew.Sdump(stateResources))\n\t}\n\tif len(diffResources) != 1 || diffResources[\"null_resource.test\"] == nil {\n\t\tt.Errorf(\"incorrect diff in plan; want just null_resource.test to have been rendered, but have:\\n%s\", spew.Sdump(diffResources))\n\t}\n\n\t\/\/\/\/ APPLY\n\tstdout, stderr, err = tf.Run(\"apply\", \"tfplan\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected apply error: %s\\nstderr:\\n%s\", err, stderr)\n\t}\n\n\tif !strings.Contains(stdout, \"Resources: 1 added, 0 changed, 0 destroyed\") {\n\t\tt.Errorf(\"incorrect apply tally; want 1 added:\\n%s\", stdout)\n\t}\n\n\tstate, err := tf.LocalState()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to read state file: %s\", err)\n\t}\n\n\tstateResources = state.RootModule().Resources\n\tvar gotResources []string\n\tfor n := range stateResources {\n\t\tgotResources = append(gotResources, n)\n\t}\n\tsort.Strings(gotResources)\n\n\twantResources := []string{\n\t\t\"data.template_file.test\",\n\t\t\"null_resource.test\",\n\t}\n\n\tif !reflect.DeepEqual(gotResources, wantResources) {\n\t\tt.Errorf(\"wrong resources in state\\ngot: %#v\\nwant: %#v\", gotResources, wantResources)\n\t}\n\n\t\/\/\/\/ DESTROY\n\tstdout, stderr, err = tf.Run(\"destroy\", \"-force\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected destroy error: %s\\nstderr:\\n%s\", err, stderr)\n\t}\n\n\tif !strings.Contains(stdout, \"Resources: 1 destroyed\") {\n\t\tt.Errorf(\"incorrect destroy tally; want 1 destroyed:\\n%s\", stdout)\n\t}\n\n\tstate, err = tf.LocalState()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to read state file after destroy: %s\", err)\n\t}\n\n\tstateResources = state.RootModule().Resources\n\tif len(stateResources) != 0 {\n\t\tt.Errorf(\"wrong resources in state after destroy; want none, but still have:%s\", spew.Sdump(stateResources))\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"os\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\/\/ \"io\"\n\t\"io\/ioutil\"\n\t\"flag\"\n\t\"time\"\n\t\"strings\"\n)\n\nfunc get(hostname string, port int, path string, auth string, urls bool, verbose bool, timeout int) (rv bool, err error) {\n\n\t\/\/ defer func() {\n\t\/\/ \tif err := recover(); err != nil {\n\t\/\/ \t\treturn\n\t\/\/ \t}\n\t\/\/ }()\n\n\trv = true\n\n\tif verbose {\n\t\tfmt.Fprintf(os.Stderr, \"fetching:hostname:%s:\\n\", hostname)\n\t}\n\n\tres := &http.Response{}\n\n\tif urls {\n\n\t\turl := hostname\n\t\tres, err = http.Head(url)\n\t\tdefer res.Body.Close()\n\n\t\t\/\/ req, err = http.NewRequest(\"HEAD\", url, nil)\n\t\t\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\trv = false\n\t\t\treturn\n\t\t}\n\n\t} else {\n\n\t\tclient := &http.Client{Timeout: time.Duration(timeout) * time.Second}\n\n\t\t\/\/ had to allocate this or the SetBasicAuth will panic\n\t headers := make(map[string][]string)\n\t hostPort := fmt.Sprintf(\"%s:%d\", hostname, port)\n\n\t if verbose {\n\n\t\t fmt.Fprintf(os.Stderr, \"adding hostPort:%s:%d:path:%s:\\n\", hostname, port, path)\n\n\t }\n\t\treq := &http.Request{\n\t\t\tMethod: \"HEAD\",\n\t\t\t\/\/ Host: hostPort,\n\t\t\tURL: &url.URL{\n\t\t\t\tHost: hostPort,\n\t\t\t\tScheme: \"http\",\n\t\t\t\tOpaque: path,\n\t\t\t},\n\t\t\tHeader: headers,\n\t\t}\n\n\t if auth != \"\" {\n\n\t \tup := strings.SplitN(auth, \":\", 2)\n\n\t \tif verbose {\n\n\t\t\t fmt.Fprintf(os.Stderr, \"Doing auth with:username:%s:password:%s:\", up[0], up[1])\n\n\t \t}\n\t\t\treq.SetBasicAuth(up[0], up[1])\n\n\t }\n\n\t if verbose {\n\n\t\t dump, _ := httputil.DumpRequestOut(req, true)\n\t\t fmt.Fprintf(os.Stderr, \"%s\", dump)\n\t \t\n\t }\n\n\t\tres, err = client.Do(req)\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\trv = false\n\t\t\treturn\n\t\t}\n\n\t\tdefer res.Body.Close()\n\t\t_, err = ioutil.ReadAll(res.Body)\n\n\t}\n\n\t\/\/ res, err := http.Head(url)\n\t\/\/ if err != nil {\n\t\/\/ \tfmt.Println(err.Error())\n\t\/\/ \trv = false\n\t\/\/ \treturn\n\t\/\/ }\n\n\tif verbose {\n\n\t\tfmt.Println(res.Status)\n\t\tfor k, v := range res.Header {\n\t\t\tfmt.Println(k+\":\", v)\n\t\t}\n\n\t}\n\n\tif res.StatusCode != http.StatusOK {\n\t\trv = false\n\t}\n\n\treturn\n}\n\n\nfunc main() {\n\n\tstatus := \"OK\"\n\trv := 0\n\tname := \"Bulk HTTP\"\n\tbad := 0\n\ttotal := 0\n\n\t\/\/ this needs improvement. the number of spaces here has to equal the number of chars in the badHosts append line suffix\n\tbadHosts := []byte(\" \")\n\n\tverbose := flag.Bool(\"v\", false, \"verbose output\")\n\twarn := flag.Int(\"w\", 10, \"warning level - number of non-200s or percentage of non-200s (default is numeric not percentage)\")\n\tcrit := flag.Int(\"c\", 20, \"critical level - number of non-200s or percentage of non-200s (default is numeric not percentage)\")\n\ttimeout := flag.Int(\"t\", 2, \"timeout in seconds - don't wait. Do Head requests and don't wait.\")\n\t\/\/ pct := flag.Bool(\"pct\", false, \"interpret warming and critical levels are percentages\")\n\tpath := flag.String(\"path\", \"\", \"optional path to append to the stdin lines - these will not be urlencoded. This is ignored is the urls option is given (not implemented yet).\")\n\tfile := flag.String(\"file\", \"\", \"optional path to read data from a file instead of stdin. If its a dash then read from stdin - these will not be urlencoded\")\n\tport := flag.Int(\"port\", 80, \"optional port for the http request - ignored if urls is specified\")\n\turls := flag.Bool(\"urls\", false, \"Assume the input data is full urls - its normally a list of hostnames\")\n\tauth := flag.String(\"auth\", \"\", \"Do basic auth with this username:passwd - ignored if urls is specified - make this use .netrc instead\")\n\n\tflag.Usage = func() {\n\n fmt.Fprintf(os.Stderr, \"Usage of %s:\\n\", os.Args[0])\n fmt.Fprintf(os.Stderr, `\n\tRead hostnames from a file or STDIN and do a single nagios check over\n\tthem all. Just check for 200s. Warning and Critical are either\n\tpercentages of the total, or a regular numeric thresholds.\n\n\tThe output contains the hostname of any non-200 reporting hosts.\n\n\tSkip input lines that are commented out with shell style comments\n\tlike \/^#\/.\n\n\tDo Head requests since we don't care about the content. Make this\n\toptional some day.\n\n\tAlso make this read from a file of urls with a -f option.\n\n\tMake the auth configurable from the cli\n\n \t`)\n\n flag.PrintDefaults()\n\t}\n\n\tflag.Parse()\n\n\tif len(flag.Args()) > 0 {\n\n\t\tflag.Usage()\n\t\tos.Exit(3)\n\n\t}\n\n\t\/\/ it urls is specified, the input is full urls to be used enmasse and to be url encoded\n\tif *urls {\n\t\t*path = \"\"\n\t}\n\n\t\/\/ defer func() {\n\t\/\/ \tif err := recover(); err != nil {\n\t\/\/ \t\tfmt.Println(name+\" Unknown: \", err)\n\t\/\/ \t\tos.Exit(3)\n\t\/\/ \t}\n\t\/\/ }()\n\n\tif file == nil || *file == \"\" {\n\t\tflag.Usage()\n\t\tos.Exit(3)\n\t}\n\n\t\/\/ inputSource := &io.Reader\n\tinputSource := os.Stdin\n\n\tif (*file)[0] != \"-\"[0] {\n\n\t\tvar err error\n\n\t\tinputSource, err = os.Open(*file)\n\n\t\tif err != nil {\n\n\t\t\tfmt.Printf(\"Couldn't open the specified input file:%s:error:%v:\\n\\n\", name, err)\n\t\t\tflag.Usage()\n\t\t\tos.Exit(3)\n\n\t\t}\n\n\t}\n\n\tscanner := bufio.NewScanner(inputSource)\n\tfor scanner.Scan() {\n\n\t\ttotal++\n\n\t\thostname := scanner.Text()\n\n\t\tif hostname[0] == \"#\"[0] {\n\n\t\t\tif *verbose {\n\n\t\t\t\tfmt.Printf(\"skipping:%s:\\n\", hostname)\n\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif *verbose {\n\n\t\t\tfmt.Printf(\"working on:%s:\\n\", hostname)\n\n\t\t}\n\n\t\tgoodCheck, err := get(hostname, *port, *path, *auth, *urls, *verbose, *timeout)\n\t\tif err != nil {\n\n\t\t\tfmt.Printf(\"%s get error: %T %s %#v\\n\", name, err, err, err)\n\n\t\t\tcontinue\n\n\t\t}\n\n\t\tif !goodCheck {\n\t\t\tbadHosts = append(badHosts, hostname...)\n\t\t\tbadHosts = append(badHosts, \", \"...)\n\t\t\tbad++\n\t\t}\n\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"reading standard input:\", err)\n\t}\n\n\n\tif bad >= *crit {\n\t\tstatus = \"Critical\"\n\t\trv = 1\n\t} else if bad >= *warn {\n\t\tstatus = \"Warning\"\n\t\trv = 2\n\t}\n\n\tfmt.Printf(\"%s %s: %d |%s\\n\", name, status, bad, badHosts[:len(badHosts) - 2] )\n\tos.Exit(rv)\n}\n<commit_msg>go fmt<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"os\"\n\t\/\/ \"io\"\n\t\"flag\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc get(hostname string, port int, path string, auth string, urls bool, verbose bool, timeout int) (rv bool, err error) {\n\n\t\/\/ defer func() {\n\t\/\/ \tif err := recover(); err != nil {\n\t\/\/ \t\treturn\n\t\/\/ \t}\n\t\/\/ }()\n\n\trv = true\n\n\tif verbose {\n\t\tfmt.Fprintf(os.Stderr, \"fetching:hostname:%s:\\n\", hostname)\n\t}\n\n\tres := &http.Response{}\n\n\tif urls {\n\n\t\turl := hostname\n\t\tres, err = http.Head(url)\n\t\tdefer res.Body.Close()\n\n\t\t\/\/ req, err = http.NewRequest(\"HEAD\", url, nil)\n\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\trv = false\n\t\t\treturn\n\t\t}\n\n\t} else {\n\n\t\tclient := &http.Client{Timeout: time.Duration(timeout) * time.Second}\n\n\t\t\/\/ had to allocate this or the SetBasicAuth will panic\n\t\theaders := make(map[string][]string)\n\t\thostPort := fmt.Sprintf(\"%s:%d\", hostname, port)\n\n\t\tif verbose {\n\n\t\t\tfmt.Fprintf(os.Stderr, \"adding hostPort:%s:%d:path:%s:\\n\", hostname, port, path)\n\n\t\t}\n\t\treq := &http.Request{\n\t\t\tMethod: \"HEAD\",\n\t\t\t\/\/ Host: hostPort,\n\t\t\tURL: &url.URL{\n\t\t\t\tHost: hostPort,\n\t\t\t\tScheme: \"http\",\n\t\t\t\tOpaque: path,\n\t\t\t},\n\t\t\tHeader: headers,\n\t\t}\n\n\t\tif auth != \"\" {\n\n\t\t\tup := strings.SplitN(auth, \":\", 2)\n\n\t\t\tif verbose {\n\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Doing auth with:username:%s:password:%s:\", up[0], up[1])\n\n\t\t\t}\n\t\t\treq.SetBasicAuth(up[0], up[1])\n\n\t\t}\n\n\t\tif verbose {\n\n\t\t\tdump, _ := httputil.DumpRequestOut(req, true)\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\", dump)\n\n\t\t}\n\n\t\tres, err = client.Do(req)\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\trv = false\n\t\t\treturn\n\t\t}\n\n\t\tdefer res.Body.Close()\n\t\t_, err = ioutil.ReadAll(res.Body)\n\n\t}\n\n\t\/\/ res, err := http.Head(url)\n\t\/\/ if err != nil {\n\t\/\/ \tfmt.Println(err.Error())\n\t\/\/ \trv = false\n\t\/\/ \treturn\n\t\/\/ }\n\n\tif verbose {\n\n\t\tfmt.Println(res.Status)\n\t\tfor k, v := range res.Header {\n\t\t\tfmt.Println(k+\":\", v)\n\t\t}\n\n\t}\n\n\tif res.StatusCode != http.StatusOK {\n\t\trv = false\n\t}\n\n\treturn\n}\n\nfunc main() {\n\n\tstatus := \"OK\"\n\trv := 0\n\tname := \"Bulk HTTP\"\n\tbad := 0\n\ttotal := 0\n\n\t\/\/ this needs improvement. the number of spaces here has to equal the number of chars in the badHosts append line suffix\n\tbadHosts := []byte(\" \")\n\n\tverbose := flag.Bool(\"v\", false, \"verbose output\")\n\twarn := flag.Int(\"w\", 10, \"warning level - number of non-200s or percentage of non-200s (default is numeric not percentage)\")\n\tcrit := flag.Int(\"c\", 20, \"critical level - number of non-200s or percentage of non-200s (default is numeric not percentage)\")\n\ttimeout := flag.Int(\"t\", 2, \"timeout in seconds - don't wait. Do Head requests and don't wait.\")\n\t\/\/ pct := flag.Bool(\"pct\", false, \"interpret warming and critical levels are percentages\")\n\tpath := flag.String(\"path\", \"\", \"optional path to append to the stdin lines - these will not be urlencoded. This is ignored is the urls option is given (not implemented yet).\")\n\tfile := flag.String(\"file\", \"\", \"optional path to read data from a file instead of stdin. If its a dash then read from stdin - these will not be urlencoded\")\n\tport := flag.Int(\"port\", 80, \"optional port for the http request - ignored if urls is specified\")\n\turls := flag.Bool(\"urls\", false, \"Assume the input data is full urls - its normally a list of hostnames\")\n\tauth := flag.String(\"auth\", \"\", \"Do basic auth with this username:passwd - ignored if urls is specified - make this use .netrc instead\")\n\n\tflag.Usage = func() {\n\n\t\tfmt.Fprintf(os.Stderr, \"Usage of %s:\\n\", os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, `\n\tRead hostnames from a file or STDIN and do a single nagios check over\n\tthem all. Just check for 200s. Warning and Critical are either\n\tpercentages of the total, or a regular numeric thresholds.\n\n\tThe output contains the hostname of any non-200 reporting hosts.\n\n\tSkip input lines that are commented out with shell style comments\n\tlike \/^#\/.\n\n\tDo Head requests since we don't care about the content. Make this\n\toptional some day.\n\n\tAlso make this read from a file of urls with a -f option.\n\n\tMake the auth configurable from the cli\n\n \t`)\n\n\t\tflag.PrintDefaults()\n\t}\n\n\tflag.Parse()\n\n\tif len(flag.Args()) > 0 {\n\n\t\tflag.Usage()\n\t\tos.Exit(3)\n\n\t}\n\n\t\/\/ it urls is specified, the input is full urls to be used enmasse and to be url encoded\n\tif *urls {\n\t\t*path = \"\"\n\t}\n\n\t\/\/ defer func() {\n\t\/\/ \tif err := recover(); err != nil {\n\t\/\/ \t\tfmt.Println(name+\" Unknown: \", err)\n\t\/\/ \t\tos.Exit(3)\n\t\/\/ \t}\n\t\/\/ }()\n\n\tif file == nil || *file == \"\" {\n\t\tflag.Usage()\n\t\tos.Exit(3)\n\t}\n\n\t\/\/ inputSource := &io.Reader\n\tinputSource := os.Stdin\n\n\tif (*file)[0] != \"-\"[0] {\n\n\t\tvar err error\n\n\t\tinputSource, err = os.Open(*file)\n\n\t\tif err != nil {\n\n\t\t\tfmt.Printf(\"Couldn't open the specified input file:%s:error:%v:\\n\\n\", name, err)\n\t\t\tflag.Usage()\n\t\t\tos.Exit(3)\n\n\t\t}\n\n\t}\n\n\tscanner := bufio.NewScanner(inputSource)\n\tfor scanner.Scan() {\n\n\t\ttotal++\n\n\t\thostname := scanner.Text()\n\n\t\tif hostname[0] == \"#\"[0] {\n\n\t\t\tif *verbose {\n\n\t\t\t\tfmt.Printf(\"skipping:%s:\\n\", hostname)\n\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif *verbose {\n\n\t\t\tfmt.Printf(\"working on:%s:\\n\", hostname)\n\n\t\t}\n\n\t\tgoodCheck, err := get(hostname, *port, *path, *auth, *urls, *verbose, *timeout)\n\t\tif err != nil {\n\n\t\t\tfmt.Printf(\"%s get error: %T %s %#v\\n\", name, err, err, err)\n\n\t\t\tcontinue\n\n\t\t}\n\n\t\tif !goodCheck {\n\t\t\tbadHosts = append(badHosts, hostname...)\n\t\t\tbadHosts = append(badHosts, \", \"...)\n\t\t\tbad++\n\t\t}\n\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"reading standard input:\", err)\n\t}\n\n\tif bad >= *crit {\n\t\tstatus = \"Critical\"\n\t\trv = 1\n\t} else if bad >= *warn {\n\t\tstatus = \"Warning\"\n\t\trv = 2\n\t}\n\n\tfmt.Printf(\"%s %s: %d |%s\\n\", name, status, bad, badHosts[:len(badHosts)-2])\n\tos.Exit(rv)\n}\n<|endoftext|>"} {"text":"<commit_before>package view\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"log\"\n\n\t\"github.com\/sjmudd\/ps-top\/logger\"\n\t\"github.com\/sjmudd\/ps-top\/table\"\n)\n\n\/\/ Code represents the type of information to view (as an int)\ntype Code int\n\n\/\/ View* constants represent different views we can see\nconst (\n\tViewNone Code = iota\n\tViewLatency Code = iota \/\/ view the table latency information\n\tViewOps Code = iota \/\/ view the table information by number of operations\n\tViewIO Code = iota \/\/ view the file I\/O information\n\tViewLocks Code = iota\n\tViewUsers Code = iota\n\tViewMutex Code = iota\n\tViewStages Code = iota\n\tViewMemory Code = iota \/\/ view memory usage (5.7 only)\n)\n\n\/\/ View holds the integer type of view (maybe need to fix this setup)\ntype View struct {\n\tcode Code\n}\n\nvar (\n\tnames map[Code]string \/\/ map View* to a string name\n\ttables map[Code]table.Access \/\/ map a view to a table name and whether it's selectable or not\n\n\tnextView map[Code]Code \/\/ map from one view to the next taking into account invalid views\n\tprevView map[Code]Code \/\/ map from one view to the next taking into account invalid views\n)\n\nfunc init() {\n\tnames = make(map[Code]string)\n\n\tnames[ViewLatency] = \"table_io_latency\"\n\tnames[ViewOps] = \"table_io_ops\"\n\tnames[ViewIO] = \"file_io_latency\"\n\tnames[ViewLocks] = \"table_lock_latency\"\n\tnames[ViewUsers] = \"user_latency\"\n\tnames[ViewMutex] = \"mutex_latency\"\n\tnames[ViewStages] = \"stages_latency\"\n\tnames[ViewMemory] = \"memory_usage\"\n\n\ttables = make(map[Code]table.Access)\n\n\ttables[ViewLatency] = table.NewAccess(\"performance_schema\", \"table_io_waits_summary_by_table\")\n\ttables[ViewOps] = table.NewAccess(\"performance_schema\", \"table_io_waits_summary_by_table\")\n\ttables[ViewIO] = table.NewAccess(\"performance_schema\", \"file_summary_by_instance\")\n\ttables[ViewLocks] = table.NewAccess(\"performance_schema\", \"table_lock_waits_summary_by_table\")\n\ttables[ViewUsers] = table.NewAccess(\"information_schema\", \"processlist\")\n\ttables[ViewMutex] = table.NewAccess(\"performance_schema\", \"events_waits_summary_global_by_event_name\")\n\ttables[ViewStages] = table.NewAccess(\"performance_schema\", \"events_stages_summary_global_by_event_name\")\n\ttables[ViewMemory] = table.NewAccess(\"performance_schema\", \"memory_summary_global_by_event_name\")\n\n}\n\n\/\/ ValidateViews check which views are readable. If none are we give a fatal error\nfunc ValidateViews(dbh *sql.DB) error {\n\tvar count int\n\tvar status string\n\tlogger.Println(\"Validating access to views...\")\n\n\tfor v := range names {\n\t\tta := tables[v]\n\t\te := ta.CheckSelectError(dbh)\n\t\tsuffix := \"\"\n\t\tif e == nil {\n\t\t\tstatus = \"is\"\n\t\t\tcount++\n\t\t} else {\n\t\t\tstatus = \"IS NOT\"\n\t\t\tsuffix = \" \" + e.Error()\n\t\t}\n\t\ttables[v] = ta\n\t\tlogger.Println(v.String() + \": \" + ta.Name() + \" \" + status + \" SELECTable\" + suffix)\n\t}\n\n\tif count == 0 {\n\t\treturn errors.New(\"None of the required tables are SELECTable. Giving up\")\n\t}\n\tlogger.Println(count, \"of\", len(names), \"view(s) are SELECTable, continuing\")\n\n\tsetPrevAndNextViews()\n\n\treturn nil\n}\n\n\/* set the previous and next views taking into account any invalid views\n\nname selectable? prev next\n---- ----------- ---- ----\nv1 false v4 v2\nv2 true v4 v4\nv3 false v2 v4\nv4 true v2 v2\nv5 false v4 v2\n\n*\/\n\nfunc setPrevAndNextViews() {\n\tlogger.Println(\"view.setPrevAndNextViews()...\")\n\tnextView = make(map[Code]Code)\n\tprevView = make(map[Code]Code)\n\n\t\/\/ reset values\n\tfor v := range names {\n\t\tnextView[v] = ViewNone\n\t\tprevView[v] = ViewNone\n\t}\n\n\t\/\/ Cleaner way to do this? Probably. Fix later.\n\tprevCodeOrder := []Code{ViewMemory, ViewStages, ViewMutex, ViewUsers, ViewLocks, ViewIO, ViewOps, ViewLatency}\n\tnextCodeOrder := []Code{ViewLatency, ViewOps, ViewIO, ViewLocks, ViewUsers, ViewMutex, ViewStages, ViewMemory}\n\tprevView = setValidByValues(prevCodeOrder)\n\tnextView = setValidByValues(nextCodeOrder)\n\n\t\/\/ print out the results\n\tlogger.Println(\"Final mapping of view order:\")\n\tfor i := range nextCodeOrder {\n\t\tlogger.Println(\"view:\", nextCodeOrder[i], \", prev:\", prevView[nextCodeOrder[i]], \", next:\", nextView[nextCodeOrder[i]])\n\t}\n}\n\n\/\/ setValidNextByValues returns a map of Code -> Code where the mapping points to the \"next\"\n\/\/ Code. The order is determined by the input Code slice. Only Selectable Views are considered\n\/\/ for the mapping with the other views pointing to the first Code provided.\nfunc setValidByValues(orderedCodes []Code) map[Code]Code {\n\tlogger.Println(\"view.setValidByValues()\")\n\torderedMap := make(map[Code]Code)\n\n\t\/\/ reset orderedCodes\n\tfor i := range orderedCodes {\n\t\torderedMap[orderedCodes[i]] = ViewNone\n\t}\n\n\tfirst, last := ViewNone, ViewNone\n\n\t\/\/ first pass, try to find values and point forward to next position if known.\n\t\/\/ we must find at least one value view in the first pass.\n\tfor i := range []int{1, 2} {\n\t\tfor i := range orderedCodes {\n\t\t\tcurrentPos := orderedCodes[i]\n\t\t\tif tables[currentPos].SelectError() == nil {\n\t\t\t\tif first == ViewNone {\n\t\t\t\t\tfirst = currentPos\n\t\t\t\t}\n\t\t\t\tif last != ViewNone {\n\t\t\t\t\torderedMap[last] = currentPos\n\t\t\t\t}\n\t\t\t\tlast = currentPos\n\t\t\t}\n\t\t}\n\t\tif i == 1 {\n\t\t\t\/\/ not found a valid view so something is up. Give up!\n\t\t\tif first == ViewNone {\n\t\t\t\tlog.Panic(\"setValidByValues() can't find a Selectable view! (shouldn't be here)\")\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ final pass viewNone entries should point to first\n\tfor i := range orderedCodes {\n\t\tcurrentPos := orderedCodes[i]\n\t\tif tables[currentPos].SelectError() != nil {\n\t\t\torderedMap[currentPos] = first\n\t\t}\n\t}\n\n\treturn orderedMap\n}\n\n\/\/ SetNext changes the current view to the next one\nfunc (v *View) SetNext() Code {\n\tv.code = nextView[v.code]\n\n\treturn v.code\n}\n\n\/\/ SetPrev changes the current view to the previous one\nfunc (v *View) SetPrev() Code {\n\tv.code = prevView[v.code]\n\n\treturn v.code\n}\n\n\/\/ Set sets the view to the given view (by Code)\nfunc (v *View) Set(viewCode Code) {\n\tv.code = viewCode\n\n\tif tables[v.code].SelectError() != nil {\n\t\tv.code = nextView[v.code]\n\t}\n}\n\n\/\/ SetByName sets the view based on its name.\n\/\/ - If we provide an empty name then use the default.\n\/\/ - If we don't provide a valid name then give an error\nfunc (v *View) SetByName(name string) {\n\tlogger.Println(\"View.SetByName(\" + name + \")\")\n\tif name == \"\" {\n\t\tlogger.Println(\"View.SetByName(): name is empty so setting to:\", ViewLatency.String())\n\t\tv.Set(ViewLatency)\n\t\treturn\n\t}\n\n\tfor i := range names {\n\t\tif name == names[i] {\n\t\t\tv.code = Code(i)\n\t\t\tlogger.Println(\"View.SetByName(\", name, \")\")\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ suggest what should be used\n\tallViews := \"\"\n\tfor i := range names {\n\t\tallViews = allViews + \" \" + names[i]\n\t}\n\n\t\/\/ no need for now to strip off leading space from allViews.\n\tlog.Fatal(\"Asked for a view name, '\", name, \"' which doesn't exist. Try one of:\", allViews)\n}\n\n\/\/ Get returns the Code version of the current view\nfunc (v View) Get() Code {\n\treturn v.code\n}\n\n\/\/ Name returns the string version of the current view\nfunc (v View) Name() string {\n\treturn v.code.String()\n}\n\nfunc (s Code) String() string {\n\treturn names[s]\n}\n<commit_msg>github.com\/sjmudd\/ps-top\/view: add more comments<commit_after>package view\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"log\"\n\n\t\"github.com\/sjmudd\/ps-top\/logger\"\n\t\"github.com\/sjmudd\/ps-top\/table\"\n)\n\n\/\/ Code represents the type of information to view (as an int)\ntype Code int\n\n\/\/ View* constants represent different views we can see\nconst (\n\tViewNone Code = iota \/\/ view nothing (should never be set)\n\tViewLatency Code = iota \/\/ view the table latency information\n\tViewOps Code = iota \/\/ view the table information by number of operations\n\tViewIO Code = iota \/\/ view the file I\/O information\n\tViewLocks Code = iota \/\/ view lock information\n\tViewUsers Code = iota \/\/ view user information\n\tViewMutex Code = iota \/\/ view mutex information\n\tViewStages Code = iota \/\/ view SQL stages information\n\tViewMemory Code = iota \/\/ view memory usage (5.7 only)\n)\n\n\/\/ View holds the integer type of view (maybe need to fix this setup)\ntype View struct {\n\tcode Code\n}\n\nvar (\n\tnames map[Code]string \/\/ map View* to a string name\n\ttables map[Code]table.Access \/\/ map a view to a table name and whether it's selectable or not\n\n\tnextView map[Code]Code \/\/ map from one view to the next taking into account invalid views\n\tprevView map[Code]Code \/\/ map from one view to the next taking into account invalid views\n)\n\nfunc init() {\n\tnames = make(map[Code]string)\n\n\tnames[ViewLatency] = \"table_io_latency\"\n\tnames[ViewOps] = \"table_io_ops\"\n\tnames[ViewIO] = \"file_io_latency\"\n\tnames[ViewLocks] = \"table_lock_latency\"\n\tnames[ViewUsers] = \"user_latency\"\n\tnames[ViewMutex] = \"mutex_latency\"\n\tnames[ViewStages] = \"stages_latency\"\n\tnames[ViewMemory] = \"memory_usage\"\n\n\ttables = make(map[Code]table.Access)\n\n\ttables[ViewLatency] = table.NewAccess(\"performance_schema\", \"table_io_waits_summary_by_table\")\n\ttables[ViewOps] = table.NewAccess(\"performance_schema\", \"table_io_waits_summary_by_table\")\n\ttables[ViewIO] = table.NewAccess(\"performance_schema\", \"file_summary_by_instance\")\n\ttables[ViewLocks] = table.NewAccess(\"performance_schema\", \"table_lock_waits_summary_by_table\")\n\ttables[ViewUsers] = table.NewAccess(\"information_schema\", \"processlist\")\n\ttables[ViewMutex] = table.NewAccess(\"performance_schema\", \"events_waits_summary_global_by_event_name\")\n\ttables[ViewStages] = table.NewAccess(\"performance_schema\", \"events_stages_summary_global_by_event_name\")\n\ttables[ViewMemory] = table.NewAccess(\"performance_schema\", \"memory_summary_global_by_event_name\")\n}\n\n\/\/ ValidateViews check which views are readable. If none are we give a fatal error\nfunc ValidateViews(dbh *sql.DB) error {\n\tvar count int\n\tvar status string\n\tlogger.Println(\"Validating access to views...\")\n\n\t\/\/ determine which of the defined views is valid because the underlying table access works\n\tfor v := range names {\n\t\tta := tables[v]\n\t\te := ta.CheckSelectError(dbh)\n\t\tsuffix := \"\"\n\t\tif e == nil {\n\t\t\tstatus = \"is\"\n\t\t\tcount++\n\t\t} else {\n\t\t\tstatus = \"IS NOT\"\n\t\t\tsuffix = \" \" + e.Error()\n\t\t}\n\t\ttables[v] = ta\n\t\tlogger.Println(v.String() + \": \" + ta.Name() + \" \" + status + \" SELECTable\" + suffix)\n\t}\n\n\tif count == 0 {\n\t\treturn errors.New(\"None of the required tables are SELECTable. Giving up\")\n\t}\n\tlogger.Println(count, \"of\", len(names), \"view(s) are SELECTable, continuing\")\n\n\tsetPrevAndNextViews()\n\n\treturn nil\n}\n\n\/* set the previous and next views taking into account any invalid views\n\nname selectable? prev next\n---- ----------- ---- ----\nv1 false v4 v2\nv2 true v4 v4\nv3 false v2 v4\nv4 true v2 v2\nv5 false v4 v2\n\n*\/\n\nfunc setPrevAndNextViews() {\n\tlogger.Println(\"view.setPrevAndNextViews()...\")\n\tnextView = make(map[Code]Code)\n\tprevView = make(map[Code]Code)\n\n\t\/\/ reset values\n\tfor v := range names {\n\t\tnextView[v] = ViewNone\n\t\tprevView[v] = ViewNone\n\t}\n\n\t\/\/ Cleaner way to do this? Probably. Fix later.\n\tprevCodeOrder := []Code{ViewMemory, ViewStages, ViewMutex, ViewUsers, ViewLocks, ViewIO, ViewOps, ViewLatency}\n\tnextCodeOrder := []Code{ViewLatency, ViewOps, ViewIO, ViewLocks, ViewUsers, ViewMutex, ViewStages, ViewMemory}\n\tprevView = setValidByValues(prevCodeOrder)\n\tnextView = setValidByValues(nextCodeOrder)\n\n\t\/\/ print out the results\n\tlogger.Println(\"Final mapping of view order:\")\n\tfor i := range nextCodeOrder {\n\t\tlogger.Println(\"view:\", nextCodeOrder[i], \", prev:\", prevView[nextCodeOrder[i]], \", next:\", nextView[nextCodeOrder[i]])\n\t}\n}\n\n\/\/ setValidNextByValues returns a map of Code -> Code where the mapping points to the \"next\"\n\/\/ Code. The order is determined by the input Code slice. Only Selectable Views are considered\n\/\/ for the mapping with the other views pointing to the first Code provided.\nfunc setValidByValues(orderedCodes []Code) map[Code]Code {\n\tlogger.Println(\"view.setValidByValues()\")\n\torderedMap := make(map[Code]Code)\n\n\t\/\/ reset orderedCodes\n\tfor i := range orderedCodes {\n\t\torderedMap[orderedCodes[i]] = ViewNone\n\t}\n\n\tfirst, last := ViewNone, ViewNone\n\n\t\/\/ first pass, try to find values and point forward to next position if known.\n\t\/\/ we must find at least one value view in the first pass.\n\tfor i := range []int{1, 2} {\n\t\tfor i := range orderedCodes {\n\t\t\tcurrentPos := orderedCodes[i]\n\t\t\tif tables[currentPos].SelectError() == nil {\n\t\t\t\tif first == ViewNone {\n\t\t\t\t\tfirst = currentPos\n\t\t\t\t}\n\t\t\t\tif last != ViewNone {\n\t\t\t\t\torderedMap[last] = currentPos\n\t\t\t\t}\n\t\t\t\tlast = currentPos\n\t\t\t}\n\t\t}\n\t\tif i == 1 {\n\t\t\t\/\/ not found a valid view so something is up. Give up!\n\t\t\tif first == ViewNone {\n\t\t\t\tlog.Panic(\"setValidByValues() can't find a Selectable view! (shouldn't be here)\")\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ final pass viewNone entries should point to first\n\tfor i := range orderedCodes {\n\t\tcurrentPos := orderedCodes[i]\n\t\tif tables[currentPos].SelectError() != nil {\n\t\t\torderedMap[currentPos] = first\n\t\t}\n\t}\n\n\treturn orderedMap\n}\n\n\/\/ SetNext changes the current view to the next one\nfunc (v *View) SetNext() Code {\n\tv.code = nextView[v.code]\n\n\treturn v.code\n}\n\n\/\/ SetPrev changes the current view to the previous one\nfunc (v *View) SetPrev() Code {\n\tv.code = prevView[v.code]\n\n\treturn v.code\n}\n\n\/\/ Set sets the view to the given view (by Code)\nfunc (v *View) Set(viewCode Code) {\n\tv.code = viewCode\n\n\tif tables[v.code].SelectError() != nil {\n\t\tv.code = nextView[v.code]\n\t}\n}\n\n\/\/ SetByName sets the view based on its name.\n\/\/ - If we provide an empty name then use the default.\n\/\/ - If we don't provide a valid name then give an error\nfunc (v *View) SetByName(name string) {\n\tlogger.Println(\"View.SetByName(\" + name + \")\")\n\tif name == \"\" {\n\t\tlogger.Println(\"View.SetByName(): name is empty so setting to:\", ViewLatency.String())\n\t\tv.Set(ViewLatency)\n\t\treturn\n\t}\n\n\tfor i := range names {\n\t\tif name == names[i] {\n\t\t\tv.code = Code(i)\n\t\t\tlogger.Println(\"View.SetByName(\", name, \")\")\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ suggest what should be used\n\tallViews := \"\"\n\tfor i := range names {\n\t\tallViews = allViews + \" \" + names[i]\n\t}\n\n\t\/\/ no need for now to strip off leading space from allViews.\n\tlog.Fatal(\"Asked for a view name, '\", name, \"' which doesn't exist. Try one of:\", allViews)\n}\n\n\/\/ Get returns the Code version of the current view\nfunc (v View) Get() Code {\n\treturn v.code\n}\n\n\/\/ Name returns the string version of the current view\nfunc (v View) Name() string {\n\treturn v.code.String()\n}\n\nfunc (s Code) String() string {\n\treturn names[s]\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/ToQoz\/dou\"\n\t_ \"github.com\/ToQoz\/dou\/jsonapi\"\n\t\"github.com\/ToQoz\/rome\"\n\t\"github.com\/lestrrat\/go-apache-logformat\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n)\n\nvar (\n\tAPIStatusOk = 1\n\tAPIStatusValidationError = 100\n\tAPIStatusUnexpectedError = 900\n\tlogger = apachelog.CombinedLog\n)\n\n\/\/ --- API Error type ---\n\ntype apiError struct {\n\tMessage string `json:\"message\"`\n}\n\nfunc newAPIError(err error) *apiError {\n\treturn &apiError{Message: err.Error()}\n}\n\ntype apiErrors struct {\n\tErrors []*apiError `json:\"errors\"`\n}\n\nfunc newAPIErrors(errs []error) *apiErrors {\n\taErrs := &apiErrors{}\n\n\tfor _, err := range errs {\n\t\taErrs.Errors = append(aErrs.Errors, newAPIError(err))\n\t}\n\n\treturn aErrs\n}\n\n\/\/ --- Example struct ---\n\nvar users = []*User{}\n\ntype User struct {\n\tName string `json:\"name\"`\n\tEmail string `json:\"email\"`\n}\n\nfunc (u *User) Validate() []error {\n\tvar errs []error\n\n\tif u.Name == \"\" {\n\t\terrs = append(errs, errors.New(\"User: name is required\"))\n\t}\n\n\tif u.Email == \"\" {\n\t\terrs = append(errs, errors.New(\"User: email is required\"))\n\t}\n\n\treturn errs\n}\n\nfunc (u *User) Save() error {\n\tusers = append(users, u)\n\treturn nil\n}\n\nfunc main() {\n\tdefer teardown()\n\n\t\/\/ --- Setup Router ---\n\t\/\/ ! You can use router keeping interface `api.Router` instead of github.com\/ToQoz\/rome\n\trouter := rome.NewRouter()\n\trouter.NotFoundFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tj, err := json.Marshal(map[string]string{\n\t\t\t\"message\": http.StatusText(http.StatusNotFound),\n\t\t\t\"documentation_url\": \"http:\/\/toqoz.net\",\n\t\t})\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\/\/ skip wrote bytesyze\n\t\t_, err = fmt.Fprintln(w, string(j))\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"dou: fail to fmt.Fpintln(http.ResponseWriter, string)\\n%v\", err)\n\t\t}\n\t})\n\n\t\/\/ --- Setup API ---\n\tapi, err := dou.NewAPI(\"jsonapi\")\n\tapi.Handler = router\n\t\/\/api, err := dou.NewAPI(\"jsonapi\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tapi.BeforeDispatch = func(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, *http.Request) {\n\t\t\/\/ Call default\n\t\tw, r = api.Plugin.BeforeDispatch(w, r)\n\n\t\tlw := apachelog.NewLoggingWriter(w, r, logger)\n\t\treturn lw, r\n\t}\n\n\tapi.AfterDispatch = func(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, *http.Request) {\n\t\t\/\/ Call default\n\t\tw, r = api.Plugin.AfterDispatch(w, r)\n\n\t\tif lw, ok := w.(*apachelog.LoggingWriter); ok {\n\t\t\tlw.EmitLog()\n\t\t}\n\n\t\treturn w, r\n\t}\n\n\tapi.ReadTimeout = 10 * time.Second\n\tapi.WriteTimeout = 10 * time.Second\n\tapi.MaxHeaderBytes = 1 << 20\n\n\t\/\/ --- Map routes ---\n\trouter.GetFunc(\"\/users\", func(w http.ResponseWriter, r *http.Request) {\n\t\tapi.APIStatus(w, APIStatusOk)\n\t\tapi.Ok(w, users, http.StatusOK)\n\t})\n\n\trouter.GetFunc(\"\/error\", func(w http.ResponseWriter, r *http.Request) {\n\t\tapi.APIStatus(w, APIStatusUnexpectedError)\n\t\tapi.Error(w, map[string]string{\"message\": \"Internal server error\"}, http.StatusInternalServerError)\n\t})\n\n\t\/\/ Try Ok $ curl -X POST -d 'name=ToQoz&email=toqoz403@gmail.com' -D - :8099\/users\n\t\/\/ Try Error $ curl -X POST -D - :8099\/users\n\trouter.PostFunc(\"\/users\", func(w http.ResponseWriter, r *http.Request) {\n\t\tu := &User{\n\t\t\tName: r.FormValue(\"name\"),\n\t\t\tEmail: r.FormValue(\"email\"),\n\t\t}\n\n\t\terrs := u.Validate()\n\n\t\tif len(errs) > 0 {\n\t\t\tapi.APIStatus(w, APIStatusValidationError)\n\t\t\tapi.Error(w, newAPIErrors(errs), 422)\n\t\t\treturn\n\t\t}\n\n\t\terr := u.Save()\n\n\t\tif err != nil {\n\t\t\tapi.APIStatus(w, APIStatusUnexpectedError)\n\t\t\tapi.Error(w, newAPIErrors(errs), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tapi.APIStatus(w, APIStatusOk)\n\t\tapi.Ok(w, u, http.StatusCreated)\n\t})\n\n\t\/\/ --- Create listener ---\n\t\/\/ You can use utility, for example github.com\/lestrrat\/go-server-starter-listener etc.\n\tl, err := net.Listen(\"tcp\", \":8099\")\n\n\tif err != nil {\n\t\tlog.Printf(\"Could not listen: %s\", \":8099\")\n\t\tteardown()\n\t\tos.Exit(1)\n\t}\n\n\tlog.Printf(\"Listen: %s\", \":8099\")\n\n\t\/\/ --- Handle C-c ---\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\n\tgo func() {\n\t\tfor sig := range c {\n\t\t\tlog.Print(\"Stopping the server...\")\n\n\t\t\tswitch sig {\n\t\t\tcase os.Interrupt:\n\t\t\t\t\/\/ --- Stop Server ---\n\t\t\t\tapi.Stop()\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tlog.Print(\"Receive unknown signal...\")\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ --- Run Server ---\n\tapi.Run(l)\n}\n\nfunc teardown() {\n\tlog.Print(\"Tearing down...\")\n\tlog.Print(\"Finished - bye bye. ;-)\")\n}\n<commit_msg>Don't capitalize error strings. Follow golint.<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/ToQoz\/dou\"\n\t_ \"github.com\/ToQoz\/dou\/jsonapi\"\n\t\"github.com\/ToQoz\/rome\"\n\t\"github.com\/lestrrat\/go-apache-logformat\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n)\n\nvar (\n\tAPIStatusOk = 1\n\tAPIStatusValidationError = 100\n\tAPIStatusUnexpectedError = 900\n\tlogger = apachelog.CombinedLog\n)\n\n\/\/ --- API Error type ---\n\ntype apiError struct {\n\tMessage string `json:\"message\"`\n}\n\nfunc newAPIError(err error) *apiError {\n\treturn &apiError{Message: err.Error()}\n}\n\ntype apiErrors struct {\n\tErrors []*apiError `json:\"errors\"`\n}\n\nfunc newAPIErrors(errs []error) *apiErrors {\n\taErrs := &apiErrors{}\n\n\tfor _, err := range errs {\n\t\taErrs.Errors = append(aErrs.Errors, newAPIError(err))\n\t}\n\n\treturn aErrs\n}\n\n\/\/ --- Example struct ---\n\nvar users = []*User{}\n\ntype User struct {\n\tName string `json:\"name\"`\n\tEmail string `json:\"email\"`\n}\n\nfunc (u *User) Validate() []error {\n\tvar errs []error\n\n\tif u.Name == \"\" {\n\t\terrs = append(errs, errors.New(\"user: name is required\"))\n\t}\n\n\tif u.Email == \"\" {\n\t\terrs = append(errs, errors.New(\"user: email is required\"))\n\t}\n\n\treturn errs\n}\n\nfunc (u *User) Save() error {\n\tusers = append(users, u)\n\treturn nil\n}\n\nfunc main() {\n\tdefer teardown()\n\n\t\/\/ --- Setup Router ---\n\t\/\/ ! You can use router keeping interface `api.Router` instead of github.com\/ToQoz\/rome\n\trouter := rome.NewRouter()\n\trouter.NotFoundFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tj, err := json.Marshal(map[string]string{\n\t\t\t\"message\": http.StatusText(http.StatusNotFound),\n\t\t\t\"documentation_url\": \"http:\/\/toqoz.net\",\n\t\t})\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\/\/ skip wrote bytesyze\n\t\t_, err = fmt.Fprintln(w, string(j))\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"dou: fail to fmt.Fpintln(http.ResponseWriter, string)\\n%v\", err)\n\t\t}\n\t})\n\n\t\/\/ --- Setup API ---\n\tapi, err := dou.NewAPI(\"jsonapi\")\n\tapi.Handler = router\n\t\/\/api, err := dou.NewAPI(\"jsonapi\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tapi.BeforeDispatch = func(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, *http.Request) {\n\t\t\/\/ Call default\n\t\tw, r = api.Plugin.BeforeDispatch(w, r)\n\n\t\tlw := apachelog.NewLoggingWriter(w, r, logger)\n\t\treturn lw, r\n\t}\n\n\tapi.AfterDispatch = func(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, *http.Request) {\n\t\t\/\/ Call default\n\t\tw, r = api.Plugin.AfterDispatch(w, r)\n\n\t\tif lw, ok := w.(*apachelog.LoggingWriter); ok {\n\t\t\tlw.EmitLog()\n\t\t}\n\n\t\treturn w, r\n\t}\n\n\tapi.ReadTimeout = 10 * time.Second\n\tapi.WriteTimeout = 10 * time.Second\n\tapi.MaxHeaderBytes = 1 << 20\n\n\t\/\/ --- Map routes ---\n\trouter.GetFunc(\"\/users\", func(w http.ResponseWriter, r *http.Request) {\n\t\tapi.APIStatus(w, APIStatusOk)\n\t\tapi.Ok(w, users, http.StatusOK)\n\t})\n\n\trouter.GetFunc(\"\/error\", func(w http.ResponseWriter, r *http.Request) {\n\t\tapi.APIStatus(w, APIStatusUnexpectedError)\n\t\tapi.Error(w, map[string]string{\"message\": \"Internal server error\"}, http.StatusInternalServerError)\n\t})\n\n\t\/\/ Try Ok $ curl -X POST -d 'name=ToQoz&email=toqoz403@gmail.com' -D - :8099\/users\n\t\/\/ Try Error $ curl -X POST -D - :8099\/users\n\trouter.PostFunc(\"\/users\", func(w http.ResponseWriter, r *http.Request) {\n\t\tu := &User{\n\t\t\tName: r.FormValue(\"name\"),\n\t\t\tEmail: r.FormValue(\"email\"),\n\t\t}\n\n\t\terrs := u.Validate()\n\n\t\tif len(errs) > 0 {\n\t\t\tapi.APIStatus(w, APIStatusValidationError)\n\t\t\tapi.Error(w, newAPIErrors(errs), 422)\n\t\t\treturn\n\t\t}\n\n\t\terr := u.Save()\n\n\t\tif err != nil {\n\t\t\tapi.APIStatus(w, APIStatusUnexpectedError)\n\t\t\tapi.Error(w, newAPIErrors(errs), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tapi.APIStatus(w, APIStatusOk)\n\t\tapi.Ok(w, u, http.StatusCreated)\n\t})\n\n\t\/\/ --- Create listener ---\n\t\/\/ You can use utility, for example github.com\/lestrrat\/go-server-starter-listener etc.\n\tl, err := net.Listen(\"tcp\", \":8099\")\n\n\tif err != nil {\n\t\tlog.Printf(\"Could not listen: %s\", \":8099\")\n\t\tteardown()\n\t\tos.Exit(1)\n\t}\n\n\tlog.Printf(\"Listen: %s\", \":8099\")\n\n\t\/\/ --- Handle C-c ---\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\n\tgo func() {\n\t\tfor sig := range c {\n\t\t\tlog.Print(\"Stopping the server...\")\n\n\t\t\tswitch sig {\n\t\t\tcase os.Interrupt:\n\t\t\t\t\/\/ --- Stop Server ---\n\t\t\t\tapi.Stop()\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tlog.Print(\"Receive unknown signal...\")\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ --- Run Server ---\n\tapi.Run(l)\n}\n\nfunc teardown() {\n\tlog.Print(\"Tearing down...\")\n\tlog.Print(\"Finished - bye bye. ;-)\")\n}\n<|endoftext|>"} {"text":"<commit_before>package couchdb\n\nimport (\n\t\"testing\"\n)\n\ntype ViewDocument struct {\n\tDocument\n\tViews map[string]interface{} `json:\"views\"`\n}\n\nfunc (doc *ViewDocument) GetDocument() *Document {\n\treturn &doc.Document\n}\n\ntype DataDocument struct {\n\tDocument\n\tType string `json:\"type\"`\n\tFoo string `json:\"foo\"`\n\tBeep string `json:\"beep\"`\n}\n\nfunc (doc *DataDocument) GetDocument() *Document {\n\treturn &doc.Document\n}\n\nvar c_view = Client{\"http:\/\/127.0.0.1:5984\/\"}\nvar db_view = c_view.Use(\"gotest\")\n\nfunc TestViewBefore(t *testing.T) {\n\n\t\/\/ create database\n\tt.Log(\"creating database...\")\n\t_, err := c_view.Create(\"gotest\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ create design document\n\tt.Log(\"creating design document...\")\n\tview := make(map[string]string)\n\tview[\"map\"] =\n\t\t`\n function(doc){\n if (doc.type == 'data') emit(doc.foo)\n }\n `\n\tviews := make(map[string]interface{})\n\tviews[\"foo\"] = view\n\n\tdesign := &ViewDocument{\n\t\tDocument: Document{\n\t\t\tId: \"_design\/test\",\n\t\t},\n\t\tViews: views,\n\t}\n\n\t_, err = db_view.Post(design)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ create dummy data\n\tt.Log(\"creating dummy data...\")\n\tdoc1 := &DataDocument{\n\t\tType: \"data\",\n\t\tFoo: \"foo1\",\n\t\tBeep: \"beep1\",\n\t}\n\n\t_, err = db_view.Post(doc1)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdoc2 := &DataDocument{\n\t\tType: \"data\",\n\t\tFoo: \"foo2\",\n\t\tBeep: \"beep2\",\n\t}\n\n\t_, err = db_view.Post(doc2)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestViewGet(t *testing.T) {\n\tview := db_view.View(\"test\")\n\tparams := QueryParameters{}\n\tres, err := view.Get(\"foo\", params)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif res.TotalRows != 2 || res.Offset != 0 {\n\t\tt.Error(\"view get error\")\n\t}\n}\n\nfunc TestViewGetWithQueryParameters(t *testing.T) {\n view := db_view.View(\"test\")\n params := QueryParameters{\n Key: \"\\\"foo1\\\"\",\n }\n res, err := view.Get(\"foo\", params)\n if err != nil {\n t.Fatal(err)\n }\n\tif len(res.Rows) != 1 {\n\t\tt.Error(\"view get error\")\n\t}\n}\n\nfunc TestViewAfter(t *testing.T) {\n\tt.Log(\"deleting test data for view tests...\")\n\t_, err := c_view.Delete(\"gotest\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n<commit_msg>go fmt<commit_after>package couchdb\n\nimport (\n\t\"testing\"\n)\n\ntype ViewDocument struct {\n\tDocument\n\tViews map[string]interface{} `json:\"views\"`\n}\n\nfunc (doc *ViewDocument) GetDocument() *Document {\n\treturn &doc.Document\n}\n\ntype DataDocument struct {\n\tDocument\n\tType string `json:\"type\"`\n\tFoo string `json:\"foo\"`\n\tBeep string `json:\"beep\"`\n}\n\nfunc (doc *DataDocument) GetDocument() *Document {\n\treturn &doc.Document\n}\n\nvar c_view = Client{\"http:\/\/127.0.0.1:5984\/\"}\nvar db_view = c_view.Use(\"gotest\")\n\nfunc TestViewBefore(t *testing.T) {\n\n\t\/\/ create database\n\tt.Log(\"creating database...\")\n\t_, err := c_view.Create(\"gotest\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ create design document\n\tt.Log(\"creating design document...\")\n\tview := make(map[string]string)\n\tview[\"map\"] =\n\t\t`\n function(doc){\n if (doc.type == 'data') emit(doc.foo)\n }\n `\n\tviews := make(map[string]interface{})\n\tviews[\"foo\"] = view\n\n\tdesign := &ViewDocument{\n\t\tDocument: Document{\n\t\t\tId: \"_design\/test\",\n\t\t},\n\t\tViews: views,\n\t}\n\n\t_, err = db_view.Post(design)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ create dummy data\n\tt.Log(\"creating dummy data...\")\n\tdoc1 := &DataDocument{\n\t\tType: \"data\",\n\t\tFoo: \"foo1\",\n\t\tBeep: \"beep1\",\n\t}\n\n\t_, err = db_view.Post(doc1)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdoc2 := &DataDocument{\n\t\tType: \"data\",\n\t\tFoo: \"foo2\",\n\t\tBeep: \"beep2\",\n\t}\n\n\t_, err = db_view.Post(doc2)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestViewGet(t *testing.T) {\n\tview := db_view.View(\"test\")\n\tparams := QueryParameters{}\n\tres, err := view.Get(\"foo\", params)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif res.TotalRows != 2 || res.Offset != 0 {\n\t\tt.Error(\"view get error\")\n\t}\n}\n\nfunc TestViewGetWithQueryParameters(t *testing.T) {\n\tview := db_view.View(\"test\")\n\tparams := QueryParameters{\n\t\tKey: \"\\\"foo1\\\"\",\n\t}\n\tres, err := view.Get(\"foo\", params)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(res.Rows) != 1 {\n\t\tt.Error(\"view get error\")\n\t}\n}\n\nfunc TestViewAfter(t *testing.T) {\n\tt.Log(\"deleting test data for view tests...\")\n\t_, err := c_view.Delete(\"gotest\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright Banrai LLC. All rights reserved. Use of this source code is\n\/\/ governed by the license that can be found in the LICENSE file.\n\npackage emailer\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/smtp\"\n\t\"os\"\n\t\"text\/template\"\n)\n\nconst (\n\tMAIL_SERVER = \"localhost\"\n\tMAIL_PORT = 25\n\n\tLINE_MAX_LEN = 500 \/\/ for splitting encoded attachment data\n\n\t\/\/ templates for generating the message components\n\tADDRESS = \"\\\"{{.DisplayName}}\\\" <{{.Address}}>\"\n\tHEADERS = \"From: {{.Sender}}\\r\\nTo: {{.Recipient}}\\r\\nSubject: {{.Subject}}\\r\\nMIME-Version: 1.0\\r\\nContent-Type: multipart\/alternative; boundary=\\\"{{.Boundary}}\\\"\\r\\n\"\n\tBODY = \"\\r\\n--{{.Boundary}}\\r\\nContent-Type: {{.ContentType}}\\r\\n\\r\\n{{.MessageBody}}\"\n\tATTACHMENT = \"\\r\\n--{{.Boundary}}\\r\\nContent-Type: {{.ContentType}}; name=\\\"{{.FileLocation}}\\\"\\r\\nContent-Transfer-Encoding:base64\\r\\nContent-Disposition: attachment; filename=\\\"{{.FileName}}\\\"\\r\\n\\r\\n{{.EncodedFileData}}\"\n\n\t\/\/ message body mime types\n\tTEXT_MIME = \"text\/plain\"\n\tHTML_MIME = \"text\/html\"\n)\n\ntype EmailAddress struct {\n\tDisplayName string\n\tAddress string\n}\n\ntype EmailHeaders struct {\n\tSender string\n\tRecipient string\n\tSubject string\n\tBoundary string\n}\n\ntype EmailBody struct {\n\tContentType string\n\tMessageBody string\n\tBoundary string\n}\n\ntype EmailAttachment struct {\n\tContentType string\n\tEncodedFileData string\n\tBoundary string\n\t\/\/ read content from a file\n\tFileLocation string\n\tFileName string\n\t\/\/ or provide it here\n\tContents string\n}\n\n\/\/ GenerateBoundary produces a random string that can be used for the email\n\/\/ multipart boundary marker\nfunc GenerateBoundary() string {\n\tf, e := os.OpenFile(\"\/dev\/urandom\", os.O_RDONLY, 0)\n\tdefer f.Close()\n\n\tif e != nil {\n\t\treturn \"\"\n\t} else {\n\t\tb := make([]byte, 16)\n\t\tf.Read(b)\n\t\treturn fmt.Sprintf(\"%x\", b)\n\t}\n}\n\nfunc GenerateAddress(context *EmailAddress) (string, error) {\n\tvar doc bytes.Buffer\n\tt := template.Must(template.New(\"ADDRESS\").Parse(ADDRESS))\n\terr := t.Execute(&doc, context)\n\treturn doc.String(), err\n}\n\nfunc GenerateHeaders(sender, recipient, subject, boundary string) (string, error) {\n\tvar doc bytes.Buffer\n\tcontext := &EmailHeaders{sender, recipient, subject, boundary}\n\tt := template.Must(template.New(\"HEADERS\").Parse(HEADERS))\n\terr := t.Execute(&doc, context)\n\treturn doc.String(), err\n}\n\nfunc GenerateBody(message, contentType, boundary string) (string, error) {\n\tvar doc bytes.Buffer\n\tcontext := &EmailBody{contentType, message, boundary}\n\tt := template.Must(template.New(\"BODY\").Parse(BODY))\n\terr := t.Execute(&doc, context)\n\treturn doc.String(), err\n}\n\nfunc GenerateAttachment(attachment *EmailAttachment) (string, error) {\n\tvar doc, buf bytes.Buffer\n\tvar encoded string\n\n\tif attachment.Contents != \"\" {\n\t\t\/\/ content to encode is already included\n\t\tencoded = base64.StdEncoding.EncodeToString([]byte(attachment.Contents))\n\t} else {\n\t\t\/\/ read the content from the file attachment\n\t\tcontent, contentErr := ioutil.ReadFile(attachment.FileLocation)\n\t\tif contentErr != nil {\n\t\t\treturn \"\", contentErr\n\t\t}\n\t\tencoded = base64.StdEncoding.EncodeToString(content)\n\t}\n\n\t\/\/ split the encoded data into individual lines\n\t\/\/ and append them to the byte buffer\n\tlines := len(encoded) \/ LINE_MAX_LEN\n\tfor i := 0; i < lines; i++ {\n\t\tbuf.WriteString(encoded[i*LINE_MAX_LEN:(i+1)*LINE_MAX_LEN] + \"\\n\")\n\t}\n\t\/\/ don't forget the last line in the buffer\n\tbuf.WriteString(encoded[lines*LINE_MAX_LEN:])\n\tattachment.EncodedFileData = buf.String()\n\n\t\/\/ can now process the template\n\tt := template.Must(template.New(\"ATTACHMENT\").Parse(ATTACHMENT))\n\terr := t.Execute(&doc, attachment)\n\treturn doc.String(), err\n}\n\n\/\/ SendFromServer transmits the given message, with optional attachments,\n\/\/ via the defined mail server and port\nfunc SendFromServer(subject, messageText, messageHtml, server string, sender, recipient *EmailAddress, attachments []*EmailAttachment, port int) error {\n\tvar buf bytes.Buffer\n\tboundary := GenerateBoundary()\n\n\tfrom, fromErr := GenerateAddress(sender)\n\tif fromErr != nil {\n\t\treturn fromErr\n\t}\n\n\tto, toErr := GenerateAddress(recipient)\n\tif toErr != nil {\n\t\treturn toErr\n\t}\n\n\thdr, hdrErr := GenerateHeaders(from, to, subject, boundary)\n\tif hdrErr != nil {\n\t\treturn hdrErr\n\t}\n\tbuf.WriteString(hdr)\n\n\tbody, bodyErr := GenerateBody(messageText, TEXT_MIME, boundary)\n\tif bodyErr != nil {\n\t\treturn bodyErr\n\t}\n\tbuf.WriteString(body)\n\n\t\/\/ message body in html format is optional\n\tif len(messageHtml) > 0 {\n\t\thtmlBody, htmlBodyErr := GenerateBody(messageHtml, HTML_MIME, boundary)\n\t\tif htmlBodyErr != nil {\n\t\t\treturn htmlBodyErr\n\t\t}\n\t\tbuf.WriteString(htmlBody)\n\t}\n\n\tfor _, a := range attachments {\n\t\ta.Boundary = boundary\n\t\tattach, attachErr := GenerateAttachment(a)\n\t\tif attachErr != nil {\n\t\t\treturn attachErr\n\t\t}\n\t\tbuf.WriteString(attach)\n\t}\n\n\t\/\/ add the closing boundary marker\n\tbuf.WriteString(\"\\r\\n--\")\n\tbuf.WriteString(boundary)\n\tbuf.WriteString(\"--\")\n\n\t\/\/ connect to the mail server + port\n\tc, cErr := smtp.Dial(fmt.Sprintf(\"%s:%d\", server, port))\n\tif cErr != nil {\n\t\treturn cErr\n\t}\n\n\t\/\/ set the sender and recipient (raw email address strings)\n\tc.Mail(sender.Address)\n\tc.Rcpt(recipient.Address)\n\n\t\/\/ stream the full email data\n\twc, err := c.Data()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer wc.Close()\n\n\t_, err = buf.WriteTo(wc)\n\treturn err\n}\n\n\/\/ Send transmits the given message, with optional attachments, via the\n\/\/ default mail server (localhost) and port (25)\nfunc Send(subject, messageText, messageHtml string, sender, recipient *EmailAddress, attachments []*EmailAttachment) error {\n\treturn SendFromServer(subject, messageText, messageHtml, MAIL_SERVER, sender, recipient, attachments, MAIL_PORT)\n}\n<commit_msg>Seeing if I can stick with a relatively simple internal email header structure, w\/o having to do this: stackoverflow.com\/a\/40420648\/633961<commit_after>\/\/ Copyright Banrai LLC. All rights reserved. Use of this source code is\n\/\/ governed by the license that can be found in the LICENSE file.\n\npackage emailer\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/smtp\"\n\t\"os\"\n\t\"text\/template\"\n)\n\nconst (\n\tMAIL_SERVER = \"localhost\"\n\tMAIL_PORT = 25\n\n\tLINE_MAX_LEN = 500 \/\/ for splitting encoded attachment data\n\n\t\/\/ templates for generating the message components\n\tADDRESS = \"\\\"{{.DisplayName}}\\\" <{{.Address}}>\"\n\tHEADERS = \"From: {{.Sender}}\\r\\nTo: {{.Recipient}}\\r\\nSubject: {{.Subject}}\\r\\nMIME-Version: 1.0\\r\\nContent-Type: multipart\/mixed; boundary=\\\"{{.Boundary}}\\\"\\r\\n\"\n\tBODY = \"\\r\\n--{{.Boundary}}\\r\\nContent-Type: {{.ContentType}}\\r\\n\\r\\n{{.MessageBody}}\"\n\tATTACHMENT = \"\\r\\n--{{.Boundary}}\\r\\nContent-Type: {{.ContentType}}; name=\\\"{{.FileLocation}}\\\"\\r\\nContent-Transfer-Encoding:base64\\r\\nContent-Disposition: attachment; filename=\\\"{{.FileName}}\\\"\\r\\n\\r\\n{{.EncodedFileData}}\"\n\n\t\/\/ message body mime types\n\tTEXT_MIME = \"text\/plain\"\n\tHTML_MIME = \"text\/html\"\n)\n\ntype EmailAddress struct {\n\tDisplayName string\n\tAddress string\n}\n\ntype EmailHeaders struct {\n\tSender string\n\tRecipient string\n\tSubject string\n\tBoundary string\n}\n\ntype EmailBody struct {\n\tContentType string\n\tMessageBody string\n\tBoundary string\n}\n\ntype EmailAttachment struct {\n\tContentType string\n\tEncodedFileData string\n\tBoundary string\n\t\/\/ read content from a file\n\tFileLocation string\n\tFileName string\n\t\/\/ or provide it here\n\tContents string\n}\n\n\/\/ GenerateBoundary produces a random string that can be used for the email\n\/\/ multipart boundary marker\nfunc GenerateBoundary() string {\n\tf, e := os.OpenFile(\"\/dev\/urandom\", os.O_RDONLY, 0)\n\tdefer f.Close()\n\n\tif e != nil {\n\t\treturn \"\"\n\t} else {\n\t\tb := make([]byte, 16)\n\t\tf.Read(b)\n\t\treturn fmt.Sprintf(\"%x\", b)\n\t}\n}\n\nfunc GenerateAddress(context *EmailAddress) (string, error) {\n\tvar doc bytes.Buffer\n\tt := template.Must(template.New(\"ADDRESS\").Parse(ADDRESS))\n\terr := t.Execute(&doc, context)\n\treturn doc.String(), err\n}\n\nfunc GenerateHeaders(sender, recipient, subject, boundary string) (string, error) {\n\tvar doc bytes.Buffer\n\tcontext := &EmailHeaders{sender, recipient, subject, boundary}\n\tt := template.Must(template.New(\"HEADERS\").Parse(HEADERS))\n\terr := t.Execute(&doc, context)\n\treturn doc.String(), err\n}\n\nfunc GenerateBody(message, contentType, boundary string) (string, error) {\n\tvar doc bytes.Buffer\n\tcontext := &EmailBody{contentType, message, boundary}\n\tt := template.Must(template.New(\"BODY\").Parse(BODY))\n\terr := t.Execute(&doc, context)\n\treturn doc.String(), err\n}\n\nfunc GenerateAttachment(attachment *EmailAttachment) (string, error) {\n\tvar doc, buf bytes.Buffer\n\tvar encoded string\n\n\tif attachment.Contents != \"\" {\n\t\t\/\/ content to encode is already included\n\t\tencoded = base64.StdEncoding.EncodeToString([]byte(attachment.Contents))\n\t} else {\n\t\t\/\/ read the content from the file attachment\n\t\tcontent, contentErr := ioutil.ReadFile(attachment.FileLocation)\n\t\tif contentErr != nil {\n\t\t\treturn \"\", contentErr\n\t\t}\n\t\tencoded = base64.StdEncoding.EncodeToString(content)\n\t}\n\n\t\/\/ split the encoded data into individual lines\n\t\/\/ and append them to the byte buffer\n\tlines := len(encoded) \/ LINE_MAX_LEN\n\tfor i := 0; i < lines; i++ {\n\t\tbuf.WriteString(encoded[i*LINE_MAX_LEN:(i+1)*LINE_MAX_LEN] + \"\\n\")\n\t}\n\t\/\/ don't forget the last line in the buffer\n\tbuf.WriteString(encoded[lines*LINE_MAX_LEN:])\n\tattachment.EncodedFileData = buf.String()\n\n\t\/\/ can now process the template\n\tt := template.Must(template.New(\"ATTACHMENT\").Parse(ATTACHMENT))\n\terr := t.Execute(&doc, attachment)\n\treturn doc.String(), err\n}\n\n\/\/ SendFromServer transmits the given message, with optional attachments,\n\/\/ via the defined mail server and port\nfunc SendFromServer(subject, messageText, messageHtml, server string, sender, recipient *EmailAddress, attachments []*EmailAttachment, port int) error {\n\tvar buf bytes.Buffer\n\tboundary := GenerateBoundary()\n\n\tfrom, fromErr := GenerateAddress(sender)\n\tif fromErr != nil {\n\t\treturn fromErr\n\t}\n\n\tto, toErr := GenerateAddress(recipient)\n\tif toErr != nil {\n\t\treturn toErr\n\t}\n\n\thdr, hdrErr := GenerateHeaders(from, to, subject, boundary)\n\tif hdrErr != nil {\n\t\treturn hdrErr\n\t}\n\tbuf.WriteString(hdr)\n\n\tbody, bodyErr := GenerateBody(messageText, TEXT_MIME, boundary)\n\tif bodyErr != nil {\n\t\treturn bodyErr\n\t}\n\tbuf.WriteString(body)\n\n\t\/\/ message body in html format is optional\n\tif len(messageHtml) > 0 {\n\t\thtmlBody, htmlBodyErr := GenerateBody(messageHtml, HTML_MIME, boundary)\n\t\tif htmlBodyErr != nil {\n\t\t\treturn htmlBodyErr\n\t\t}\n\t\tbuf.WriteString(htmlBody)\n\t}\n\n\tfor _, a := range attachments {\n\t\ta.Boundary = boundary\n\t\tattach, attachErr := GenerateAttachment(a)\n\t\tif attachErr != nil {\n\t\t\treturn attachErr\n\t\t}\n\t\tbuf.WriteString(attach)\n\t}\n\n\t\/\/ add the closing boundary marker\n\tbuf.WriteString(\"\\r\\n--\")\n\tbuf.WriteString(boundary)\n\tbuf.WriteString(\"--\")\n\n\t\/\/ connect to the mail server + port\n\tc, cErr := smtp.Dial(fmt.Sprintf(\"%s:%d\", server, port))\n\tif cErr != nil {\n\t\treturn cErr\n\t}\n\n\t\/\/ set the sender and recipient (raw email address strings)\n\tc.Mail(sender.Address)\n\tc.Rcpt(recipient.Address)\n\n\t\/\/ stream the full email data\n\twc, err := c.Data()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer wc.Close()\n\n\t_, err = buf.WriteTo(wc)\n\treturn err\n}\n\n\/\/ Send transmits the given message, with optional attachments, via the\n\/\/ default mail server (localhost) and port (25)\nfunc Send(subject, messageText, messageHtml string, sender, recipient *EmailAddress, attachments []*EmailAttachment) error {\n\treturn SendFromServer(subject, messageText, messageHtml, MAIL_SERVER, sender, recipient, attachments, MAIL_PORT)\n}\n<|endoftext|>"} {"text":"<commit_before>package neurgo\n\nimport (\n\t\"fmt\"\n\t\"github.com\/ajstarks\/svgo\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n)\n\ntype Point struct {\n\tx int\n\ty int\n}\n\ntype NodeCircleSVG struct {\n\tPoint\n\tradius int\n}\n\ntype NodeUUIDToCircleSVG map[string]NodeCircleSVG\n\nfunc (cortex *Cortex) RenderSVGFile(filename string) {\n\toutfile, err := os.Create(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer func() {\n\t\tif err := outfile.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\tcortex.RenderSVG(outfile)\n}\n\nfunc (cortex *Cortex) RenderSVG(writer io.Writer) {\n\n\twidth := 1000\n\theight := 1000\n\tx := 100\n\txDelta := 100\n\tyDelta := 100\n\tradius := 25\n\tneuronFill := \"fill:blue\"\n\tactuatorFill := \"fill:magenta\"\n\tsensorFill := \"fill:green\"\n\n\tcanvas := svg.New(writer)\n\tcanvas.Start(width, height)\n\tcanvas.Rect(0, 0, width, height, canvas.RGB(255, 255, 255))\n\n\tnodeUUIDToCircleSVG := make(NodeUUIDToCircleSVG)\n\tlayerToNodeIdMap := cortex.NodeIdLayerMap()\n\tlayerIndexes := layerToNodeIdMap.Keys()\n\n\tfor _, layerIndex := range layerIndexes {\n\n\t\ty := 100\n\t\tnodeIds := layerToNodeIdMap[layerIndex]\n\t\tlayerIndexStr := fmt.Sprintf(\"%v\", layerIndex)\n\n\t\tcanvas.Text(x, y, layerIndexStr, \"font-size:12;fill:black\")\n\t\ty += yDelta\n\n\t\tfor _, nodeId := range nodeIds {\n\n\t\t\tswitch nodeId.NodeType {\n\t\t\tcase NEURON:\n\t\t\t\tcanvas.Circle(x, y, radius, neuronFill)\n\t\t\tcase ACTUATOR:\n\t\t\t\tcanvas.Circle(x, y, radius, actuatorFill)\n\t\t\tcase SENSOR:\n\t\t\t\tcanvas.Circle(x, y, radius, sensorFill)\n\t\t\t}\n\n\t\t\tcircleSVG := NodeCircleSVG{Point{x: x, y: y}, radius}\n\t\t\tnodeUUIDToCircleSVG[nodeId.UUID] = circleSVG\n\n\t\t\ty += yDelta\n\t\t}\n\n\t\tx += xDelta\n\t}\n\n\taddConnectionsToSVG(cortex, canvas, nodeUUIDToCircleSVG)\n\n\tcanvas.End()\n\n}\n\nfunc addConnectionsToSVG(cortex *Cortex, canvas *svg.SVG, nodeUUIDToCircleSVG NodeUUIDToCircleSVG) {\n\n\tlayerToNodeIdMap := cortex.NodeIdLayerMap()\n\tlayerIndexes := layerToNodeIdMap.Keys()\n\n\t\/\/ loop over all layers\n\tfor _, layerIndex := range layerIndexes {\n\n\t\tnodeIds := layerToNodeIdMap[layerIndex]\n\n\t\t\/\/ loop over all nodes\n\t\tfor _, nodeId := range nodeIds {\n\t\t\tlog.Printf(\"nodeId: %v\", nodeId)\n\n\t\t\t\/\/ lookup node (assuming it is an OutboundConnector)\n\t\t\tnode := cortex.FindConnector(nodeId)\n\t\t\tif node == nil {\n\t\t\t\t\/\/ if not, ignore it (eg, actuator)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ loop over all outbound connections\n\t\t\tfor _, outbound := range node.outbound() {\n\t\t\t\ttgtNodeId := outbound.NodeId\n\t\t\t\tsrcCircle := nodeUUIDToCircleSVG[nodeId.UUID]\n\t\t\t\ttgtCircle := nodeUUIDToCircleSVG[tgtNodeId.UUID]\n\n\t\t\t\tlayerDelta := tgtNodeId.LayerIndex - nodeId.LayerIndex\n\t\t\t\tif layerDelta > 0 {\n\t\t\t\t\tadjacent := layerToNodeIdMap.LayersAdjacent(nodeId.LayerIndex, tgtNodeId.LayerIndex)\n\t\t\t\t\tif adjacent {\n\t\t\t\t\t\tforwardConnectNodesSVG(canvas, srcCircle, tgtCircle)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tforwardConnectDistantNodesSVG(canvas, srcCircle, tgtCircle)\n\t\t\t\t\t}\n\n\t\t\t\t} else if layerDelta == 0 {\n\t\t\t\t\tselfRecurrentConnectNodesSVG(canvas, srcCircle, tgtCircle)\n\t\t\t\t} else if layerDelta < 0 {\n\t\t\t\t\trecurrentConnectNodesSVG(canvas, srcCircle, tgtCircle)\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n}\n\nfunc recurrentConnectNodesSVG(canvas *svg.SVG, src NodeCircleSVG, tgt NodeCircleSVG) {\n\n\tlinestyle2 := []string{`stroke=\"turquoise\"`, `stroke-linecap=\"round\"`, `stroke-width=\"5\"`, `fill=\"none\"`}\n\tmidpoint := midpoint(Point{x: src.x, y: src.y}, Point{x: tgt.x, y: tgt.y})\n\tcontrolX := midpoint.x\n\tcontrolY := midpoint.y - 50\n\tcanvas.Qbez(src.x, src.y, controlX, controlY, tgt.x, tgt.y, linestyle2[0], linestyle2[1], linestyle2[2], linestyle2[3])\n\n}\n\nfunc selfRecurrentConnectNodesSVG(canvas *svg.SVG, src NodeCircleSVG, tgt NodeCircleSVG) {\n\n\tlinestyle2 := []string{`stroke=\"turquoise\"`, `stroke-linecap=\"round\"`, `stroke-width=\"5\"`, `fill=\"none\"`}\n\n\tsrcX := src.x - 10\n\tsrcY := src.y\n\ttgtX := src.x + 10\n\ttgtY := src.y\n\n\tcontrolX := src.x\n\tcontrolY := src.y - 100\n\tcanvas.Qbez(srcX, srcY, controlX, controlY, tgtX, tgtY, linestyle2[0], linestyle2[1], linestyle2[2], linestyle2[3])\n\n}\n\nfunc midpoint(p1 Point, p2 Point) Point {\n\tpResult := Point{}\n\tpResult.x = (p1.x + p2.x) \/ 2\n\tpResult.y = (p1.y + p2.y) \/ 2\n\treturn pResult\n}\n\nfunc forwardConnectNodesSVG(canvas *svg.SVG, src NodeCircleSVG, tgt NodeCircleSVG) {\n\tlinestyle := []string{`stroke=\"black\"`, `stroke-linecap=\"round\"`, `stroke-width=\"5\"`}\n\n\tlog.Printf(\"line: src.x %v, src.y: %v tgt.x: %v tgt.y: %v\", src.x, src.y, tgt.x, tgt.y)\n\n\tcanvas.Line(src.x, src.y, tgt.x, tgt.y, linestyle[0], linestyle[1], linestyle[2])\n\n}\n\nfunc forwardConnectDistantNodesSVG(canvas *svg.SVG, src NodeCircleSVG, tgt NodeCircleSVG) {\n\n\tlinestyle2 := []string{`stroke=\"black\"`, `stroke-linecap=\"round\"`, `stroke-width=\"5\"`, `fill=\"none\"`, `stroke-dasharray=\"10,10\"`}\n\tmidpoint := midpoint(Point{x: src.x, y: src.y}, Point{x: tgt.x, y: tgt.y})\n\tcontrolX := midpoint.x\n\tcontrolY := midpoint.y + 50\n\tcanvas.Qbez(src.x, src.y, controlX, controlY, tgt.x, tgt.y, linestyle2[0], linestyle2[1], linestyle2[2], linestyle2[3], linestyle2[4])\n\n}\n<commit_msg>remove logging<commit_after>package neurgo\n\nimport (\n\t\"fmt\"\n\t\"github.com\/ajstarks\/svgo\"\n\t\"io\"\n\t\"os\"\n)\n\ntype Point struct {\n\tx int\n\ty int\n}\n\ntype NodeCircleSVG struct {\n\tPoint\n\tradius int\n}\n\ntype NodeUUIDToCircleSVG map[string]NodeCircleSVG\n\nfunc (cortex *Cortex) RenderSVGFile(filename string) {\n\toutfile, err := os.Create(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer func() {\n\t\tif err := outfile.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\tcortex.RenderSVG(outfile)\n}\n\nfunc (cortex *Cortex) RenderSVG(writer io.Writer) {\n\n\twidth := 1000\n\theight := 1000\n\tx := 100\n\txDelta := 100\n\tyDelta := 100\n\tradius := 25\n\tneuronFill := \"fill:blue\"\n\tactuatorFill := \"fill:magenta\"\n\tsensorFill := \"fill:green\"\n\n\tcanvas := svg.New(writer)\n\tcanvas.Start(width, height)\n\tcanvas.Rect(0, 0, width, height, canvas.RGB(255, 255, 255))\n\n\tnodeUUIDToCircleSVG := make(NodeUUIDToCircleSVG)\n\tlayerToNodeIdMap := cortex.NodeIdLayerMap()\n\tlayerIndexes := layerToNodeIdMap.Keys()\n\n\tfor _, layerIndex := range layerIndexes {\n\n\t\ty := 100\n\t\tnodeIds := layerToNodeIdMap[layerIndex]\n\t\tlayerIndexStr := fmt.Sprintf(\"%v\", layerIndex)\n\n\t\tcanvas.Text(x, y, layerIndexStr, \"font-size:12;fill:black\")\n\t\ty += yDelta\n\n\t\tfor _, nodeId := range nodeIds {\n\n\t\t\tswitch nodeId.NodeType {\n\t\t\tcase NEURON:\n\t\t\t\tcanvas.Circle(x, y, radius, neuronFill)\n\t\t\tcase ACTUATOR:\n\t\t\t\tcanvas.Circle(x, y, radius, actuatorFill)\n\t\t\tcase SENSOR:\n\t\t\t\tcanvas.Circle(x, y, radius, sensorFill)\n\t\t\t}\n\n\t\t\tcircleSVG := NodeCircleSVG{Point{x: x, y: y}, radius}\n\t\t\tnodeUUIDToCircleSVG[nodeId.UUID] = circleSVG\n\n\t\t\ty += yDelta\n\t\t}\n\n\t\tx += xDelta\n\t}\n\n\taddConnectionsToSVG(cortex, canvas, nodeUUIDToCircleSVG)\n\n\tcanvas.End()\n\n}\n\nfunc addConnectionsToSVG(cortex *Cortex, canvas *svg.SVG, nodeUUIDToCircleSVG NodeUUIDToCircleSVG) {\n\n\tlayerToNodeIdMap := cortex.NodeIdLayerMap()\n\tlayerIndexes := layerToNodeIdMap.Keys()\n\n\t\/\/ loop over all layers\n\tfor _, layerIndex := range layerIndexes {\n\n\t\tnodeIds := layerToNodeIdMap[layerIndex]\n\n\t\t\/\/ loop over all nodes\n\t\tfor _, nodeId := range nodeIds {\n\n\t\t\t\/\/ lookup node (assuming it is an OutboundConnector)\n\t\t\tnode := cortex.FindConnector(nodeId)\n\t\t\tif node == nil {\n\t\t\t\t\/\/ if not, ignore it (eg, actuator)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ loop over all outbound connections\n\t\t\tfor _, outbound := range node.outbound() {\n\t\t\t\ttgtNodeId := outbound.NodeId\n\t\t\t\tsrcCircle := nodeUUIDToCircleSVG[nodeId.UUID]\n\t\t\t\ttgtCircle := nodeUUIDToCircleSVG[tgtNodeId.UUID]\n\n\t\t\t\tlayerDelta := tgtNodeId.LayerIndex - nodeId.LayerIndex\n\t\t\t\tif layerDelta > 0 {\n\t\t\t\t\tadjacent := layerToNodeIdMap.LayersAdjacent(nodeId.LayerIndex, tgtNodeId.LayerIndex)\n\t\t\t\t\tif adjacent {\n\t\t\t\t\t\tforwardConnectNodesSVG(canvas, srcCircle, tgtCircle)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tforwardConnectDistantNodesSVG(canvas, srcCircle, tgtCircle)\n\t\t\t\t\t}\n\n\t\t\t\t} else if layerDelta == 0 {\n\t\t\t\t\tselfRecurrentConnectNodesSVG(canvas, srcCircle, tgtCircle)\n\t\t\t\t} else if layerDelta < 0 {\n\t\t\t\t\trecurrentConnectNodesSVG(canvas, srcCircle, tgtCircle)\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n}\n\nfunc recurrentConnectNodesSVG(canvas *svg.SVG, src NodeCircleSVG, tgt NodeCircleSVG) {\n\n\tlinestyle2 := []string{`stroke=\"turquoise\"`, `stroke-linecap=\"round\"`, `stroke-width=\"5\"`, `fill=\"none\"`}\n\tmidpoint := midpoint(Point{x: src.x, y: src.y}, Point{x: tgt.x, y: tgt.y})\n\tcontrolX := midpoint.x\n\tcontrolY := midpoint.y - 50\n\tcanvas.Qbez(src.x, src.y, controlX, controlY, tgt.x, tgt.y, linestyle2[0], linestyle2[1], linestyle2[2], linestyle2[3])\n\n}\n\nfunc selfRecurrentConnectNodesSVG(canvas *svg.SVG, src NodeCircleSVG, tgt NodeCircleSVG) {\n\n\tlinestyle2 := []string{`stroke=\"turquoise\"`, `stroke-linecap=\"round\"`, `stroke-width=\"5\"`, `fill=\"none\"`}\n\n\tsrcX := src.x - 10\n\tsrcY := src.y\n\ttgtX := src.x + 10\n\ttgtY := src.y\n\n\tcontrolX := src.x\n\tcontrolY := src.y - 100\n\tcanvas.Qbez(srcX, srcY, controlX, controlY, tgtX, tgtY, linestyle2[0], linestyle2[1], linestyle2[2], linestyle2[3])\n\n}\n\nfunc midpoint(p1 Point, p2 Point) Point {\n\tpResult := Point{}\n\tpResult.x = (p1.x + p2.x) \/ 2\n\tpResult.y = (p1.y + p2.y) \/ 2\n\treturn pResult\n}\n\nfunc forwardConnectNodesSVG(canvas *svg.SVG, src NodeCircleSVG, tgt NodeCircleSVG) {\n\tlinestyle := []string{`stroke=\"black\"`, `stroke-linecap=\"round\"`, `stroke-width=\"5\"`}\n\n\tcanvas.Line(src.x, src.y, tgt.x, tgt.y, linestyle[0], linestyle[1], linestyle[2])\n\n}\n\nfunc forwardConnectDistantNodesSVG(canvas *svg.SVG, src NodeCircleSVG, tgt NodeCircleSVG) {\n\n\tlinestyle2 := []string{`stroke=\"black\"`, `stroke-linecap=\"round\"`, `stroke-width=\"5\"`, `fill=\"none\"`, `stroke-dasharray=\"10,10\"`}\n\tmidpoint := midpoint(Point{x: src.x, y: src.y}, Point{x: tgt.x, y: tgt.y})\n\tcontrolX := midpoint.x\n\tcontrolY := midpoint.y + 50\n\tcanvas.Qbez(src.x, src.y, controlX, controlY, tgt.x, tgt.y, linestyle2[0], linestyle2[1], linestyle2[2], linestyle2[3], linestyle2[4])\n\n}\n<|endoftext|>"} {"text":"<commit_before>package test\n\nimport (\n\t\"io\"\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/nbusy\/devastator\"\n\t\"github.com\/nbusy\/neptulon\/jsonrpc\"\n)\n\n\/\/ ClientHelper is a neptulon\/jsonrpc.Client wrapper.\n\/\/ All the functions are wrapped with proper test runner error logging.\ntype ClientHelper struct {\n\tclient *jsonrpc.Client\n\tserver *ServerHelper\n\ttesting *testing.T\n\tcert, key []byte\n}\n\n\/\/ NewClientHelper creates a new client helper object.\nfunc NewClientHelper(t *testing.T, s *ServerHelper) *ClientHelper {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration test in short testing mode\")\n\t}\n\n\treturn &ClientHelper{testing: t, server: s}\n}\n\n\/\/ AsUser attaches given user's client certificate and private key to the connection.\nfunc (c *ClientHelper) AsUser(u *devastator.User) *ClientHelper {\n\tc.cert = u.Cert\n\tc.key = u.Key\n\treturn c\n}\n\n\/\/ WithCert attaches given PEM encoded client certificate and private key to the connection.\nfunc (c *ClientHelper) WithCert(cert, key []byte) *ClientHelper {\n\tc.cert = cert\n\tc.key = key\n\treturn c\n}\n\n\/\/ Dial initiates a connection.\nfunc (c *ClientHelper) Dial() *ClientHelper {\n\taddr := \"127.0.0.1:\" + devastator.Conf.App.Port\n\n\t\/\/ retry connect in case we're operating on a very slow machine\n\tfor i := 0; i <= 5; i++ {\n\t\tclient, err := jsonrpc.Dial(addr, c.server.IntCACert, c.cert, c.key, false) \/\/ no need for debug mode on client conn as we have it on server conn already\n\t\tif err != nil {\n\t\t\tif operr, ok := err.(*net.OpError); ok && operr.Op == \"dial\" && operr.Err.Error() == \"connection refused\" {\n\t\t\t\ttime.Sleep(time.Millisecond * 50)\n\t\t\t\tcontinue\n\t\t\t} else if i == 5 {\n\t\t\t\tc.testing.Fatalf(\"Cannot connect to server address %v after 5 retries, with error: %v\", addr, err)\n\t\t\t}\n\t\t\tc.testing.Fatalf(\"Cannot connect to server address %v with error: %v\", addr, err)\n\t\t}\n\n\t\tif i != 0 {\n\t\t\tc.testing.Logf(\"WARNING: it took %v retries to connect to the server, which might indicate code issues or slow machine.\", i)\n\t\t}\n\n\t\tclient.SetReadDeadline(10)\n\t\tc.client = client\n\t\treturn c\n\t}\n\n\treturn nil\n}\n\n\/\/ WriteRequest sends a request message through the client connection.\nfunc (c *ClientHelper) WriteRequest(method string, params interface{}) (reqID string) {\n\tid, err := c.client.WriteRequest(method, params)\n\tif err != nil {\n\t\tc.testing.Fatal(\"Failed to write request to client connection:\", err)\n\t}\n\treturn id\n}\n\n\/\/ WriteRequestArr sends a request message through the client connection. Params object is variadic.\nfunc (c *ClientHelper) WriteRequestArr(method string, params ...interface{}) (reqID string) {\n\treturn c.WriteRequest(method, params)\n}\n\n\/\/ WriteNotification sends a notification message through the client connection.\nfunc (c *ClientHelper) WriteNotification(method string, params interface{}) {\n\tif err := c.client.WriteNotification(method, params); err != nil {\n\t\tc.testing.Fatal(\"Failed to write notification to client connection:\", err)\n\t}\n}\n\n\/\/ WriteNotificationArr sends a notification message through the client connection. Params object is variadic.\nfunc (c *ClientHelper) WriteNotificationArr(method string, params ...interface{}) {\n\tc.WriteNotification(method, params)\n}\n\n\/\/ WriteResponse sends a response message through the client connection.\nfunc (c *ClientHelper) WriteResponse(id string, result interface{}, err *jsonrpc.ResError) {\n\tif err := c.client.WriteResponse(id, result, err); err != nil {\n\t\tc.testing.Fatal(\"Failed to write response to client connection:\", err)\n\t}\n}\n\n\/\/ ReadReq reads a request object from a client connection.\n\/\/ If incoming message is not a request, a fatal error is logged.\n\/\/ Optionally, you can pass in a data structure that the returned JSON-RPC request params data will be serialized into.\n\/\/ Otherwise json.Unmarshal defaults apply.\nfunc (c *ClientHelper) ReadReq(paramsData interface{}) *jsonrpc.Request {\n\treq, _, _, err := c.client.ReadMsg(nil, paramsData)\n\tif err != nil {\n\t\tc.testing.Fatal(\"Failed to read request from client connection:\", err)\n\t}\n\n\tif req == nil {\n\t\tc.testing.Fatal(\"Read message was not a request message.\")\n\t}\n\n\treturn req\n}\n\n\/\/ ReadRes reads a response object from a client connection.\n\/\/ If incoming message is not a response, a fatal error is logged.\n\/\/ Optionally, you can pass in a data structure that the returned JSON-RPC response result data will be serialized into.\n\/\/ Otherwise json.Unmarshal defaults apply.\nfunc (c *ClientHelper) ReadRes(resultData interface{}) *jsonrpc.Response {\n\t_, res, _, err := c.client.ReadMsg(resultData, nil)\n\tif err != nil {\n\t\tc.testing.Fatal(\"Failed to read response from client connection:\", err)\n\t}\n\n\tif res == nil {\n\t\tc.testing.Fatal(\"Read message was not a response message.\")\n\t}\n\n\treturn res\n}\n\n\/\/ ReadMsg reads a JSON-RPC message from a client connection. If possible, use more specific ReadReq\/ReadRes\/ReadNot methods instead.\n\/\/ Optionally, you can pass in a data structure that the returned JSON-RPC response result data will be serialized into (same for request params).\n\/\/ Otherwise json.Unmarshal defaults apply.\nfunc (c *ClientHelper) ReadMsg(resultData interface{}, paramsData interface{}) (req *jsonrpc.Request, res *jsonrpc.Response, not *jsonrpc.Notification) {\n\treq, res, not, err := c.client.ReadMsg(resultData, paramsData)\n\tif err != nil {\n\t\tc.testing.Fatal(\"Failed to read message from client connection:\", err)\n\t}\n\n\treturn\n}\n\n\/\/ VerifyConnClosed verifies that the connection is in closed state.\n\/\/ Verification is done via reading from the channel and checking that returned error is io.EOF.\nfunc (c *ClientHelper) VerifyConnClosed() bool {\n\t_, _, _, err := c.client.ReadMsg(nil, nil)\n\tif err != io.EOF {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n\/\/ Close closes a client connection.\nfunc (c *ClientHelper) Close() {\n\tif err := c.client.Close(); err != nil {\n\t\tc.testing.Fatal(\"Failed to close client connection:\", err)\n\t}\n}\n<commit_msg>add doc<commit_after>package test\n\nimport (\n\t\"io\"\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/nbusy\/devastator\"\n\t\"github.com\/nbusy\/neptulon\/jsonrpc\"\n)\n\n\/\/ ClientHelper is a neptulon\/jsonrpc.Client wrapper.\n\/\/ All the functions are wrapped with proper test runner error logging.\ntype ClientHelper struct {\n\tclient *jsonrpc.Client\n\tserver *ServerHelper \/\/ server that this client will be connecting to\n\ttesting *testing.T\n\tcert, key []byte\n}\n\n\/\/ NewClientHelper creates a new client helper object.\nfunc NewClientHelper(t *testing.T, s *ServerHelper) *ClientHelper {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration test in short testing mode\")\n\t}\n\n\treturn &ClientHelper{testing: t, server: s}\n}\n\n\/\/ AsUser attaches given user's client certificate and private key to the connection.\nfunc (c *ClientHelper) AsUser(u *devastator.User) *ClientHelper {\n\tc.cert = u.Cert\n\tc.key = u.Key\n\treturn c\n}\n\n\/\/ WithCert attaches given PEM encoded client certificate and private key to the connection.\nfunc (c *ClientHelper) WithCert(cert, key []byte) *ClientHelper {\n\tc.cert = cert\n\tc.key = key\n\treturn c\n}\n\n\/\/ Dial initiates a connection.\nfunc (c *ClientHelper) Dial() *ClientHelper {\n\taddr := \"127.0.0.1:\" + devastator.Conf.App.Port\n\n\t\/\/ retry connect in case we're operating on a very slow machine\n\tfor i := 0; i <= 5; i++ {\n\t\tclient, err := jsonrpc.Dial(addr, c.server.IntCACert, c.cert, c.key, false) \/\/ no need for debug mode on client conn as we have it on server conn already\n\t\tif err != nil {\n\t\t\tif operr, ok := err.(*net.OpError); ok && operr.Op == \"dial\" && operr.Err.Error() == \"connection refused\" {\n\t\t\t\ttime.Sleep(time.Millisecond * 50)\n\t\t\t\tcontinue\n\t\t\t} else if i == 5 {\n\t\t\t\tc.testing.Fatalf(\"Cannot connect to server address %v after 5 retries, with error: %v\", addr, err)\n\t\t\t}\n\t\t\tc.testing.Fatalf(\"Cannot connect to server address %v with error: %v\", addr, err)\n\t\t}\n\n\t\tif i != 0 {\n\t\t\tc.testing.Logf(\"WARNING: it took %v retries to connect to the server, which might indicate code issues or slow machine.\", i)\n\t\t}\n\n\t\tclient.SetReadDeadline(10)\n\t\tc.client = client\n\t\treturn c\n\t}\n\n\treturn nil\n}\n\n\/\/ WriteRequest sends a request message through the client connection.\nfunc (c *ClientHelper) WriteRequest(method string, params interface{}) (reqID string) {\n\tid, err := c.client.WriteRequest(method, params)\n\tif err != nil {\n\t\tc.testing.Fatal(\"Failed to write request to client connection:\", err)\n\t}\n\treturn id\n}\n\n\/\/ WriteRequestArr sends a request message through the client connection. Params object is variadic.\nfunc (c *ClientHelper) WriteRequestArr(method string, params ...interface{}) (reqID string) {\n\treturn c.WriteRequest(method, params)\n}\n\n\/\/ WriteNotification sends a notification message through the client connection.\nfunc (c *ClientHelper) WriteNotification(method string, params interface{}) {\n\tif err := c.client.WriteNotification(method, params); err != nil {\n\t\tc.testing.Fatal(\"Failed to write notification to client connection:\", err)\n\t}\n}\n\n\/\/ WriteNotificationArr sends a notification message through the client connection. Params object is variadic.\nfunc (c *ClientHelper) WriteNotificationArr(method string, params ...interface{}) {\n\tc.WriteNotification(method, params)\n}\n\n\/\/ WriteResponse sends a response message through the client connection.\nfunc (c *ClientHelper) WriteResponse(id string, result interface{}, err *jsonrpc.ResError) {\n\tif err := c.client.WriteResponse(id, result, err); err != nil {\n\t\tc.testing.Fatal(\"Failed to write response to client connection:\", err)\n\t}\n}\n\n\/\/ ReadReq reads a request object from a client connection.\n\/\/ If incoming message is not a request, a fatal error is logged.\n\/\/ Optionally, you can pass in a data structure that the returned JSON-RPC request params data will be serialized into.\n\/\/ Otherwise json.Unmarshal defaults apply.\nfunc (c *ClientHelper) ReadReq(paramsData interface{}) *jsonrpc.Request {\n\treq, _, _, err := c.client.ReadMsg(nil, paramsData)\n\tif err != nil {\n\t\tc.testing.Fatal(\"Failed to read request from client connection:\", err)\n\t}\n\n\tif req == nil {\n\t\tc.testing.Fatal(\"Read message was not a request message.\")\n\t}\n\n\treturn req\n}\n\n\/\/ ReadRes reads a response object from a client connection.\n\/\/ If incoming message is not a response, a fatal error is logged.\n\/\/ Optionally, you can pass in a data structure that the returned JSON-RPC response result data will be serialized into.\n\/\/ Otherwise json.Unmarshal defaults apply.\nfunc (c *ClientHelper) ReadRes(resultData interface{}) *jsonrpc.Response {\n\t_, res, _, err := c.client.ReadMsg(resultData, nil)\n\tif err != nil {\n\t\tc.testing.Fatal(\"Failed to read response from client connection:\", err)\n\t}\n\n\tif res == nil {\n\t\tc.testing.Fatal(\"Read message was not a response message.\")\n\t}\n\n\treturn res\n}\n\n\/\/ ReadMsg reads a JSON-RPC message from a client connection. If possible, use more specific ReadReq\/ReadRes\/ReadNot methods instead.\n\/\/ Optionally, you can pass in a data structure that the returned JSON-RPC response result data will be serialized into (same for request params).\n\/\/ Otherwise json.Unmarshal defaults apply.\nfunc (c *ClientHelper) ReadMsg(resultData interface{}, paramsData interface{}) (req *jsonrpc.Request, res *jsonrpc.Response, not *jsonrpc.Notification) {\n\treq, res, not, err := c.client.ReadMsg(resultData, paramsData)\n\tif err != nil {\n\t\tc.testing.Fatal(\"Failed to read message from client connection:\", err)\n\t}\n\n\treturn\n}\n\n\/\/ VerifyConnClosed verifies that the connection is in closed state.\n\/\/ Verification is done via reading from the channel and checking that returned error is io.EOF.\nfunc (c *ClientHelper) VerifyConnClosed() bool {\n\t_, _, _, err := c.client.ReadMsg(nil, nil)\n\tif err != io.EOF {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n\/\/ Close closes a client connection.\nfunc (c *ClientHelper) Close() {\n\tif err := c.client.Close(); err != nil {\n\t\tc.testing.Fatal(\"Failed to close client connection:\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package monitor\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/rancher\/norman\/parse\"\n\t\"github.com\/rancher\/norman\/types\"\n\t\"github.com\/rancher\/norman\/types\/convert\"\n\t\"github.com\/rancher\/rancher\/pkg\/clustermanager\"\n\tmonitorutil \"github.com\/rancher\/rancher\/pkg\/monitoring\"\n\t\"github.com\/rancher\/rancher\/pkg\/ref\"\n\t\"github.com\/rancher\/types\/apis\/management.cattle.io\/v3\"\n\t\"github.com\/rancher\/types\/config\/dialer\"\n)\n\nfunc NewMetricHandler(dialerFactory dialer.Factory, clustermanager *clustermanager.Manager) *MetricHandler {\n\treturn &MetricHandler{\n\t\tdialerFactory: dialerFactory,\n\t\tclustermanager: clustermanager,\n\t}\n}\n\ntype MetricHandler struct {\n\tdialerFactory dialer.Factory\n\tclustermanager *clustermanager.Manager\n}\n\nfunc (h *MetricHandler) Action(actionName string, action *types.Action, apiContext *types.APIContext) error {\n\tswitch actionName {\n\tcase querycluster, queryproject:\n\t\tvar clusterName, projectName, appName, saNamespace string\n\t\tvar comm v3.CommonQueryMetricInput\n\t\tvar err error\n\t\tvar svcNamespace, svcName string\n\n\t\tif actionName == querycluster {\n\t\t\tvar queryMetricInput v3.QueryClusterMetricInput\n\t\t\tactionInput, err := parse.ReadBody(apiContext.Request)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err = convert.ToObj(actionInput, &queryMetricInput); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tclusterName = queryMetricInput.ClusterName\n\t\t\tif clusterName == \"\" {\n\t\t\t\treturn fmt.Errorf(\"clusterName is empty\")\n\t\t\t}\n\n\t\t\tcomm = queryMetricInput.CommonQueryMetricInput\n\t\t\tappName, saNamespace = monitorutil.ClusterMonitoringInfo()\n\t\t\tsvcName, svcNamespace, _ = monitorutil.ClusterPrometheusEndpoint()\n\t\t} else {\n\t\t\tvar queryMetricInput v3.QueryProjectMetricInput\n\t\t\tactionInput, err := parse.ReadBody(apiContext.Request)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = convert.ToObj(actionInput, &queryMetricInput); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tprojectID := queryMetricInput.ProjectName\n\t\t\tclusterName, projectName = ref.Parse(projectID)\n\n\t\t\tif clusterName == \"\" {\n\t\t\t\treturn fmt.Errorf(\"clusterName is empty\")\n\t\t\t}\n\n\t\t\tcomm = queryMetricInput.CommonQueryMetricInput\n\t\t\tappName, saNamespace = monitorutil.ProjectMonitoringInfo(projectName)\n\t\t\tsvcName, svcNamespace = monitorutil.ProjectPrometheusServiceInfo(projectName)\n\t\t}\n\n\t\tstart, end, step, err := parseTimeParams(comm.From, comm.To, comm.Interval)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tuserContext, err := h.clustermanager.UserContext(clusterName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"get usercontext failed, %v\", err)\n\t\t}\n\n\t\ttoken, err := getAuthToken(userContext, appName, saNamespace)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treqContext, cancel := context.WithTimeout(context.Background(), prometheusReqTimeout)\n\t\tdefer cancel()\n\n\t\tprometheusQuery, err := NewPrometheusQuery(reqContext, userContext, clusterName, token, svcNamespace, svcName, h.clustermanager, h.dialerFactory)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tquery := InitPromQuery(\"\", start, end, step, comm.Expr, \"\", false)\n\t\tseriesSlice, err := prometheusQuery.QueryRange(query)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif seriesSlice == nil {\n\t\t\tapiContext.WriteResponse(http.StatusNoContent, nil)\n\t\t\treturn nil\n\t\t}\n\n\t\tdata := map[string]interface{}{\n\t\t\t\"type\": \"queryMetricOutput\",\n\t\t\t\"series\": seriesSlice,\n\t\t}\n\n\t\tres, err := json.Marshal(data)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"marshal query stats result failed, %v\", err)\n\t\t}\n\t\tapiContext.Response.Write(res)\n\n\tcase listclustermetricname:\n\t\tvar input v3.ClusterMetricNamesInput\n\t\tactionInput, err := parse.ReadBody(apiContext.Request)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = convert.ToObj(actionInput, &input); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tclusterName := input.ClusterName\n\t\tif clusterName == \"\" {\n\t\t\treturn fmt.Errorf(\"clusterName is empty\")\n\t\t}\n\n\t\tuserContext, err := h.clustermanager.UserContext(clusterName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"get usercontext failed, %v\", err)\n\t\t}\n\n\t\tappName, saNamespace := monitorutil.ClusterMonitoringInfo()\n\t\ttoken, err := getAuthToken(userContext, appName, saNamespace)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treqContext, cancel := context.WithTimeout(context.Background(), prometheusReqTimeout)\n\t\tdefer cancel()\n\n\t\tsvcName, svcNamespace, _ := monitorutil.ClusterPrometheusEndpoint()\n\t\tprometheusQuery, err := NewPrometheusQuery(reqContext, userContext, clusterName, token, svcNamespace, svcName, h.clustermanager, h.dialerFactory)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tnames, err := prometheusQuery.GetLabelValues(\"__name__\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata := map[string]interface{}{\n\t\t\t\"type\": \"metricNamesOutput\",\n\t\t\t\"names\": names,\n\t\t}\n\n\t\tapiContext.WriteResponse(http.StatusOK, data)\n\tcase listprojectmetricname:\n\t\t\/\/ project metric names need to merge cluster level and project level prometheus labels name list\n\t\tvar input v3.ProjectMetricNamesInput\n\t\tactionInput, err := parse.ReadBody(apiContext.Request)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = convert.ToObj(actionInput, &input); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tprojectID := input.ProjectName\n\t\tclusterName, projectName := ref.Parse(projectID)\n\n\t\tif clusterName == \"\" {\n\t\t\treturn fmt.Errorf(\"clusterName is empty\")\n\t\t}\n\n\t\tuserContext, err := h.clustermanager.UserContext(clusterName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"get usercontext failed, %v\", err)\n\t\t}\n\n\t\tappName, saNamespace := monitorutil.ProjectMonitoringInfo(projectName)\n\t\ttoken, err := getAuthToken(userContext, appName, saNamespace)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treqContext, cancel := context.WithTimeout(context.Background(), prometheusReqTimeout)\n\t\tdefer cancel()\n\n\t\t\/\/ get name list for user definded metric from project prometheus\n\t\tprojectSvcName, projectSvcNamespace := monitorutil.ProjectPrometheusServiceInfo(projectName)\n\t\tprojectPrometheusQuery, err := NewPrometheusQuery(reqContext, userContext, clusterName, token, projectSvcNamespace, projectSvcName, h.clustermanager, h.dialerFactory)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tprojectNames, err := projectPrometheusQuery.GetLabelValues(\"__name__\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"get project metric list failed, %v\", err)\n\t\t}\n\n\t\t\/\/ get name list for pod\/container metric from cluster prometheus\n\t\tclusterSvcName, clusterSvcNamespace, _ := monitorutil.ClusterPrometheusEndpoint()\n\t\tclusterPrometheusQuery, err := NewPrometheusQuery(reqContext, userContext, clusterName, token, clusterSvcNamespace, clusterSvcName, h.clustermanager, h.dialerFactory)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tclusterNames, err := clusterPrometheusQuery.GetLabelValues(\"__name__\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"get cluster metric list failed, %v\", err)\n\t\t}\n\n\t\tnames := append(projectNames, clusterNames...)\n\t\tdata := map[string]interface{}{\n\t\t\t\"type\": \"metricNamesOutput\",\n\t\t\t\"names\": names,\n\t\t}\n\t\tapiContext.WriteResponse(http.StatusOK, data)\n\t}\n\treturn nil\n\n}\n<commit_msg>Fix duplicate prompt expressions when operating alert rule<commit_after>package monitor\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/rancher\/norman\/parse\"\n\t\"github.com\/rancher\/norman\/types\"\n\t\"github.com\/rancher\/norman\/types\/convert\"\n\t\"github.com\/rancher\/rancher\/pkg\/clustermanager\"\n\tmonitorutil \"github.com\/rancher\/rancher\/pkg\/monitoring\"\n\t\"github.com\/rancher\/rancher\/pkg\/ref\"\n\t\"github.com\/rancher\/types\/apis\/management.cattle.io\/v3\"\n\t\"github.com\/rancher\/types\/config\/dialer\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n)\n\nfunc NewMetricHandler(dialerFactory dialer.Factory, clustermanager *clustermanager.Manager) *MetricHandler {\n\treturn &MetricHandler{\n\t\tdialerFactory: dialerFactory,\n\t\tclustermanager: clustermanager,\n\t}\n}\n\ntype MetricHandler struct {\n\tdialerFactory dialer.Factory\n\tclustermanager *clustermanager.Manager\n}\n\nfunc (h *MetricHandler) Action(actionName string, action *types.Action, apiContext *types.APIContext) error {\n\tswitch actionName {\n\tcase querycluster, queryproject:\n\t\tvar clusterName, projectName, appName, saNamespace string\n\t\tvar comm v3.CommonQueryMetricInput\n\t\tvar err error\n\t\tvar svcNamespace, svcName string\n\n\t\tif actionName == querycluster {\n\t\t\tvar queryMetricInput v3.QueryClusterMetricInput\n\t\t\tactionInput, err := parse.ReadBody(apiContext.Request)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err = convert.ToObj(actionInput, &queryMetricInput); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tclusterName = queryMetricInput.ClusterName\n\t\t\tif clusterName == \"\" {\n\t\t\t\treturn fmt.Errorf(\"clusterName is empty\")\n\t\t\t}\n\n\t\t\tcomm = queryMetricInput.CommonQueryMetricInput\n\t\t\tappName, saNamespace = monitorutil.ClusterMonitoringInfo()\n\t\t\tsvcName, svcNamespace, _ = monitorutil.ClusterPrometheusEndpoint()\n\t\t} else {\n\t\t\tvar queryMetricInput v3.QueryProjectMetricInput\n\t\t\tactionInput, err := parse.ReadBody(apiContext.Request)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = convert.ToObj(actionInput, &queryMetricInput); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tprojectID := queryMetricInput.ProjectName\n\t\t\tclusterName, projectName = ref.Parse(projectID)\n\n\t\t\tif clusterName == \"\" {\n\t\t\t\treturn fmt.Errorf(\"clusterName is empty\")\n\t\t\t}\n\n\t\t\tcomm = queryMetricInput.CommonQueryMetricInput\n\t\t\tappName, saNamespace = monitorutil.ProjectMonitoringInfo(projectName)\n\t\t\tsvcName, svcNamespace = monitorutil.ProjectPrometheusServiceInfo(projectName)\n\t\t}\n\n\t\tstart, end, step, err := parseTimeParams(comm.From, comm.To, comm.Interval)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tuserContext, err := h.clustermanager.UserContext(clusterName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"get usercontext failed, %v\", err)\n\t\t}\n\n\t\ttoken, err := getAuthToken(userContext, appName, saNamespace)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treqContext, cancel := context.WithTimeout(context.Background(), prometheusReqTimeout)\n\t\tdefer cancel()\n\n\t\tprometheusQuery, err := NewPrometheusQuery(reqContext, userContext, clusterName, token, svcNamespace, svcName, h.clustermanager, h.dialerFactory)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tquery := InitPromQuery(\"\", start, end, step, comm.Expr, \"\", false)\n\t\tseriesSlice, err := prometheusQuery.QueryRange(query)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif seriesSlice == nil {\n\t\t\tapiContext.WriteResponse(http.StatusNoContent, nil)\n\t\t\treturn nil\n\t\t}\n\n\t\tdata := map[string]interface{}{\n\t\t\t\"type\": \"queryMetricOutput\",\n\t\t\t\"series\": seriesSlice,\n\t\t}\n\n\t\tres, err := json.Marshal(data)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"marshal query stats result failed, %v\", err)\n\t\t}\n\t\tapiContext.Response.Write(res)\n\n\tcase listclustermetricname:\n\t\tvar input v3.ClusterMetricNamesInput\n\t\tactionInput, err := parse.ReadBody(apiContext.Request)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = convert.ToObj(actionInput, &input); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tclusterName := input.ClusterName\n\t\tif clusterName == \"\" {\n\t\t\treturn fmt.Errorf(\"clusterName is empty\")\n\t\t}\n\n\t\tuserContext, err := h.clustermanager.UserContext(clusterName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"get usercontext failed, %v\", err)\n\t\t}\n\n\t\tappName, saNamespace := monitorutil.ClusterMonitoringInfo()\n\t\ttoken, err := getAuthToken(userContext, appName, saNamespace)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treqContext, cancel := context.WithTimeout(context.Background(), prometheusReqTimeout)\n\t\tdefer cancel()\n\n\t\tsvcName, svcNamespace, _ := monitorutil.ClusterPrometheusEndpoint()\n\t\tprometheusQuery, err := NewPrometheusQuery(reqContext, userContext, clusterName, token, svcNamespace, svcName, h.clustermanager, h.dialerFactory)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tnames, err := prometheusQuery.GetLabelValues(\"__name__\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata := map[string]interface{}{\n\t\t\t\"type\": \"metricNamesOutput\",\n\t\t\t\"names\": names,\n\t\t}\n\n\t\tapiContext.WriteResponse(http.StatusOK, data)\n\tcase listprojectmetricname:\n\t\t\/\/ project metric names need to merge cluster level and project level prometheus labels name list\n\t\tvar input v3.ProjectMetricNamesInput\n\t\tactionInput, err := parse.ReadBody(apiContext.Request)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = convert.ToObj(actionInput, &input); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tprojectID := input.ProjectName\n\t\tclusterName, projectName := ref.Parse(projectID)\n\n\t\tif clusterName == \"\" {\n\t\t\treturn fmt.Errorf(\"clusterName is empty\")\n\t\t}\n\n\t\tuserContext, err := h.clustermanager.UserContext(clusterName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"get usercontext failed, %v\", err)\n\t\t}\n\n\t\tappName, saNamespace := monitorutil.ProjectMonitoringInfo(projectName)\n\t\ttoken, err := getAuthToken(userContext, appName, saNamespace)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treqContext, cancel := context.WithTimeout(context.Background(), prometheusReqTimeout)\n\t\tdefer cancel()\n\n\t\t\/\/ get name list for user definded metric from project prometheus\n\t\tprojectSvcName, projectSvcNamespace := monitorutil.ProjectPrometheusServiceInfo(projectName)\n\t\tprojectPrometheusQuery, err := NewPrometheusQuery(reqContext, userContext, clusterName, token, projectSvcNamespace, projectSvcName, h.clustermanager, h.dialerFactory)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tprojectNames, err := projectPrometheusQuery.GetLabelValues(\"__name__\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"get project metric list failed, %v\", err)\n\t\t}\n\n\t\t\/\/ get name list for pod\/container metric from cluster prometheus\n\t\tclusterSvcName, clusterSvcNamespace, _ := monitorutil.ClusterPrometheusEndpoint()\n\t\tclusterPrometheusQuery, err := NewPrometheusQuery(reqContext, userContext, clusterName, token, clusterSvcNamespace, clusterSvcName, h.clustermanager, h.dialerFactory)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tclusterNames, err := clusterPrometheusQuery.GetLabelValues(\"__name__\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"get cluster metric list failed, %v\", err)\n\t\t}\n\n\t\tnames := sets.String{}\n\t\tnames.Insert(projectNames...)\n\t\tnames.Insert(clusterNames...)\n\n\t\tdata := map[string]interface{}{\n\t\t\t\"type\": \"metricNamesOutput\",\n\t\t\t\"names\": names.List(),\n\t\t}\n\t\tapiContext.WriteResponse(http.StatusOK, data)\n\t}\n\treturn nil\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build windows\n\npackage liveexec\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/onsi\/ginkgo\"\n)\n\nfunc doExecCommand(dir, binPath string, opts ExecCommandOptions, arg ...string) error {\n\tcmd := exec.Command(binPath, arg...)\n\tcmd.Dir = dir\n\n\tcmd.Env = os.Environ()\n\tfor k, v := range opts.Env {\n\t\tcmd.Env = append(cmd.Env, fmt.Sprintf(\"%s=%s\", k, v))\n\t}\n\n\tabsDir, _ := filepath.Abs(dir)\n\t_, _ = fmt.Fprintf(ginkgo.GinkgoWriter, \"\\r\\n[DEBUG] COMMAND in %s: %s %s\\r\\n\\r\\n\", absDir, binPath, strings.Join(arg, \" \"))\n\n\tres, err := cmd.CombinedOutput()\n\tlineBuf := make([]byte, 0, 4096)\n\tfor _, b := range res {\n\t\tif b == '\\n' {\n\t\t\tline := string(lineBuf)\n\t\t\tlineBuf = lineBuf[:0]\n\n\t\t\t_, _ = fmt.Fprintf(ginkgo.GinkgoWriter, \"[DEBUG] OUTPUT LINE: %s\\r\\n\", line)\n\n\t\t\tif opts.OutputLineHandler != nil {\n\t\t\t\topts.OutputLineHandler(line)\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tlineBuf = append(lineBuf, b)\n\t}\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"command failed: %s\\r\\n%s\", err, res)\n\t}\n\treturn nil\n}\n<commit_msg>[tests] Fix liveexec error handling on windows<commit_after>\/\/ +build windows\n\npackage liveexec\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/onsi\/ginkgo\"\n)\n\nfunc doExecCommand(dir, binPath string, opts ExecCommandOptions, arg ...string) error {\n\tcmd := exec.Command(binPath, arg...)\n\tcmd.Dir = dir\n\n\tcmd.Env = os.Environ()\n\tfor k, v := range opts.Env {\n\t\tcmd.Env = append(cmd.Env, fmt.Sprintf(\"%s=%s\", k, v))\n\t}\n\n\tabsDir, _ := filepath.Abs(dir)\n\t_, _ = fmt.Fprintf(ginkgo.GinkgoWriter, \"\\r\\n[DEBUG] COMMAND in %s: %s %s\\r\\n\\r\\n\", absDir, binPath, strings.Join(arg, \" \"))\n\n\tres, err := cmd.CombinedOutput()\n\tlineBuf := make([]byte, 0, 4096)\n\tfor _, b := range res {\n\t\tif b == '\\n' {\n\t\t\tline := string(lineBuf)\n\t\t\tlineBuf = lineBuf[:0]\n\n\t\t\t_, _ = fmt.Fprintf(ginkgo.GinkgoWriter, \"[DEBUG] OUTPUT LINE: %s\\r\\n\", line)\n\n\t\t\tif opts.OutputLineHandler != nil {\n\t\t\t\topts.OutputLineHandler(line)\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tlineBuf = append(lineBuf, b)\n\t}\n\n\tif err != nil {\n\t\tif exitError, ok := err.(*exec.ExitError); ok {\n\t\t\treturn fmt.Errorf(\"exit code %d\", exitError.ExitCode())\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build darwin\n\npackage xdg_test\n\nimport (\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/adrg\/xdg\"\n)\n\nfunc TestDefaultBaseDirs(t *testing.T) {\n\thome := xdg.Home\n\n\ttestDirs(t,\n\t\t&envSample{\n\t\t\tname: \"XDG_DATA_HOME\",\n\t\t\texpected: filepath.Join(home, \"Library\", \"Application Support\"),\n\t\t\tactual: &xdg.DataHome,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_DATA_DIRS\",\n\t\t\texpected: []string{\"\/Library\/Application Support\"},\n\t\t\tactual: &xdg.DataDirs,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_CONFIG_HOME\",\n\t\t\texpected: filepath.Join(home, \"Library\", \"Preferences\"),\n\t\t\tactual: &xdg.ConfigHome,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_CONFIG_DIRS\",\n\t\t\texpected: []string{\"\/Library\/Preferences\"},\n\t\t\tactual: &xdg.ConfigDirs,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_CACHE_HOME\",\n\t\t\texpected: filepath.Join(home, \"Library\", \"Caches\"),\n\t\t\tactual: &xdg.CacheHome,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_RUNTIME_DIR\",\n\t\t\texpected: filepath.Join(home, \"Library\", \"Application Support\"),\n\t\t\tactual: &xdg.RuntimeDir,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_STATE_HOME\",\n\t\t\texpected: filepath.Join(home, \"Library\", \"Application Support\"),\n\t\t\tactual: &xdg.StateHome,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_APPLICATION_DIRS\",\n\t\t\texpected: []string{\n\t\t\t\t\"\/Applications\",\n\t\t\t},\n\t\t\tactual: &xdg.ApplicationDirs,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_FONT_DIRS\",\n\t\t\texpected: []string{\n\t\t\t\tfilepath.Join(home, \"Library\/Fonts\"),\n\t\t\t\t\"\/Library\/Fonts\",\n\t\t\t\t\"\/System\/Library\/Fonts\",\n\t\t\t\t\"\/Network\/Library\/Fonts\",\n\t\t\t},\n\t\t\tactual: &xdg.FontDirs,\n\t\t},\n\t)\n}\n\nfunc TestCustomBaseDirs(t *testing.T) {\n\thome := xdg.Home\n\n\ttestDirs(t,\n\t\t&envSample{\n\t\t\tname: \"XDG_DATA_HOME\",\n\t\t\tvalue: \"~\/Library\/data\",\n\t\t\texpected: filepath.Join(home, \"Library\/data\"),\n\t\t\tactual: &xdg.DataHome,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_DATA_DIRS\",\n\t\t\tvalue: \"~\/Library\/data:\/Library\/Application Support\",\n\t\t\texpected: []string{filepath.Join(home, \"Library\/data\"), \"\/Library\/Application Support\"},\n\t\t\tactual: &xdg.DataDirs,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_CONFIG_HOME\",\n\t\t\tvalue: \"~\/Library\/config\",\n\t\t\texpected: filepath.Join(home, \"Library\/config\"),\n\t\t\tactual: &xdg.ConfigHome,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_CONFIG_DIRS\",\n\t\t\tvalue: \"~\/Library\/config:\/Library\/Preferences\",\n\t\t\texpected: []string{filepath.Join(home, \"Library\/config\"), \"\/Library\/Preferences\"},\n\t\t\tactual: &xdg.ConfigDirs,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_CACHE_HOME\",\n\t\t\tvalue: \"~\/Library\/cache\",\n\t\t\texpected: filepath.Join(home, \"Library\/cache\"),\n\t\t\tactual: &xdg.CacheHome,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_RUNTIME_DIR\",\n\t\t\tvalue: \"~\/Library\/runtime\",\n\t\t\texpected: filepath.Join(home, \"Library\/runtime\"),\n\t\t\tactual: &xdg.RuntimeDir,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_STATE_HOME\",\n\t\t\tvalue: \"~\/Library\/state\",\n\t\t\texpected: filepath.Join(home, \"Library\/state\"),\n\t\t\tactual: &xdg.StateHome,\n\t\t},\n\t)\n}\n\nfunc TestDefaultUserDirs(t *testing.T) {\n\thome := xdg.Home\n\n\ttestDirs(t,\n\t\t&envSample{\n\t\t\tname: \"XDG_DESKTOP_DIR\",\n\t\t\texpected: filepath.Join(home, \"Desktop\"),\n\t\t\tactual: &xdg.UserDirs.Desktop,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_DOWNLOAD_DIR\",\n\t\t\texpected: filepath.Join(home, \"Downloads\"),\n\t\t\tactual: &xdg.UserDirs.Download,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_DOCUMENTS_DIR\",\n\t\t\texpected: filepath.Join(home, \"Documents\"),\n\t\t\tactual: &xdg.UserDirs.Documents,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_MUSIC_DIR\",\n\t\t\texpected: filepath.Join(home, \"Music\"),\n\t\t\tactual: &xdg.UserDirs.Music,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_PICTURES_DIR\",\n\t\t\texpected: filepath.Join(home, \"Pictures\"),\n\t\t\tactual: &xdg.UserDirs.Pictures,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_VIDEOS_DIR\",\n\t\t\texpected: filepath.Join(home, \"Movies\"),\n\t\t\tactual: &xdg.UserDirs.Videos,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_TEMPLATES_DIR\",\n\t\t\texpected: filepath.Join(home, \"Templates\"),\n\t\t\tactual: &xdg.UserDirs.Templates,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_PUBLICSHARE_DIR\",\n\t\t\texpected: filepath.Join(home, \"Public\"),\n\t\t\tactual: &xdg.UserDirs.PublicShare,\n\t\t},\n\t)\n}\n\nfunc TestCustomUserDirs(t *testing.T) {\n\thome := xdg.Home\n\n\ttestDirs(t,\n\t\t&envSample{\n\t\t\tname: \"XDG_DESKTOP_DIR\",\n\t\t\tvalue: \"$HOME\/Library\/Desktop\",\n\t\t\texpected: filepath.Join(home, \"Library\/Desktop\"),\n\t\t\tactual: &xdg.UserDirs.Desktop,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_DOWNLOAD_DIR\",\n\t\t\tvalue: \"$HOME\/Library\/Downloads\",\n\t\t\texpected: filepath.Join(home, \"Library\/Downloads\"),\n\t\t\tactual: &xdg.UserDirs.Download,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_DOCUMENTS_DIR\",\n\t\t\tvalue: \"$HOME\/Library\/Documents\",\n\t\t\texpected: filepath.Join(home, \"Library\/Documents\"),\n\t\t\tactual: &xdg.UserDirs.Documents,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_MUSIC_DIR\",\n\t\t\tvalue: \"$HOME\/Library\/Music\",\n\t\t\texpected: filepath.Join(home, \"Library\/Music\"),\n\t\t\tactual: &xdg.UserDirs.Music,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_PICTURES_DIR\",\n\t\t\tvalue: \"$HOME\/Library\/Pictures\",\n\t\t\texpected: filepath.Join(home, \"Library\/Pictures\"),\n\t\t\tactual: &xdg.UserDirs.Pictures,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_VIDEOS_DIR\",\n\t\t\tvalue: \"$HOME\/Library\/Movies\",\n\t\t\texpected: filepath.Join(home, \"Library\/Movies\"),\n\t\t\tactual: &xdg.UserDirs.Videos,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_TEMPLATES_DIR\",\n\t\t\tvalue: \"$HOME\/Library\/Templates\",\n\t\t\texpected: filepath.Join(home, \"Library\/Templates\"),\n\t\t\tactual: &xdg.UserDirs.Templates,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_PUBLICSHARE_DIR\",\n\t\t\tvalue: \"$HOME\/Library\/Public\",\n\t\t\texpected: filepath.Join(home, \"Library\/Public\"),\n\t\t\tactual: &xdg.UserDirs.PublicShare,\n\t\t},\n\t)\n}\n<commit_msg>Update default base directories test case for macOS<commit_after>\/\/ +build darwin\n\npackage xdg_test\n\nimport (\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/adrg\/xdg\"\n)\n\nfunc TestDefaultBaseDirs(t *testing.T) {\n\thome := xdg.Home\n\thomeAppSupport := filepath.Join(home, \"Library\", \"Application Support\")\n\trootAppSupport := \"\/Library\/Application Support\"\n\n\ttestDirs(t,\n\t\t&envSample{\n\t\t\tname: \"XDG_DATA_HOME\",\n\t\t\texpected: homeAppSupport,\n\t\t\tactual: &xdg.DataHome,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_DATA_DIRS\",\n\t\t\texpected: []string{rootAppSupport},\n\t\t\tactual: &xdg.DataDirs,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_CONFIG_HOME\",\n\t\t\texpected: homeAppSupport,\n\t\t\tactual: &xdg.ConfigHome,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_CONFIG_DIRS\",\n\t\t\texpected: []string{\n\t\t\t\tfilepath.Join(home, \"Library\", \"Preferences\"),\n\t\t\t\trootAppSupport,\n\t\t\t\t\"\/Library\/Preferences\",\n\t\t\t},\n\t\t\tactual: &xdg.ConfigDirs,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_CACHE_HOME\",\n\t\t\texpected: filepath.Join(home, \"Library\", \"Caches\"),\n\t\t\tactual: &xdg.CacheHome,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_RUNTIME_DIR\",\n\t\t\texpected: homeAppSupport,\n\t\t\tactual: &xdg.RuntimeDir,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_STATE_HOME\",\n\t\t\texpected: homeAppSupport,\n\t\t\tactual: &xdg.StateHome,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_APPLICATION_DIRS\",\n\t\t\texpected: []string{\n\t\t\t\t\"\/Applications\",\n\t\t\t},\n\t\t\tactual: &xdg.ApplicationDirs,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_FONT_DIRS\",\n\t\t\texpected: []string{\n\t\t\t\tfilepath.Join(home, \"Library\/Fonts\"),\n\t\t\t\t\"\/Library\/Fonts\",\n\t\t\t\t\"\/System\/Library\/Fonts\",\n\t\t\t\t\"\/Network\/Library\/Fonts\",\n\t\t\t},\n\t\t\tactual: &xdg.FontDirs,\n\t\t},\n\t)\n}\n\nfunc TestCustomBaseDirs(t *testing.T) {\n\thome := xdg.Home\n\n\ttestDirs(t,\n\t\t&envSample{\n\t\t\tname: \"XDG_DATA_HOME\",\n\t\t\tvalue: \"~\/Library\/data\",\n\t\t\texpected: filepath.Join(home, \"Library\/data\"),\n\t\t\tactual: &xdg.DataHome,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_DATA_DIRS\",\n\t\t\tvalue: \"~\/Library\/data:\/Library\/Application Support\",\n\t\t\texpected: []string{filepath.Join(home, \"Library\/data\"), \"\/Library\/Application Support\"},\n\t\t\tactual: &xdg.DataDirs,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_CONFIG_HOME\",\n\t\t\tvalue: \"~\/Library\/config\",\n\t\t\texpected: filepath.Join(home, \"Library\/config\"),\n\t\t\tactual: &xdg.ConfigHome,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_CONFIG_DIRS\",\n\t\t\tvalue: \"~\/Library\/config:\/Library\/Preferences\",\n\t\t\texpected: []string{filepath.Join(home, \"Library\/config\"), \"\/Library\/Preferences\"},\n\t\t\tactual: &xdg.ConfigDirs,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_CACHE_HOME\",\n\t\t\tvalue: \"~\/Library\/cache\",\n\t\t\texpected: filepath.Join(home, \"Library\/cache\"),\n\t\t\tactual: &xdg.CacheHome,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_RUNTIME_DIR\",\n\t\t\tvalue: \"~\/Library\/runtime\",\n\t\t\texpected: filepath.Join(home, \"Library\/runtime\"),\n\t\t\tactual: &xdg.RuntimeDir,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_STATE_HOME\",\n\t\t\tvalue: \"~\/Library\/state\",\n\t\t\texpected: filepath.Join(home, \"Library\/state\"),\n\t\t\tactual: &xdg.StateHome,\n\t\t},\n\t)\n}\n\nfunc TestDefaultUserDirs(t *testing.T) {\n\thome := xdg.Home\n\n\ttestDirs(t,\n\t\t&envSample{\n\t\t\tname: \"XDG_DESKTOP_DIR\",\n\t\t\texpected: filepath.Join(home, \"Desktop\"),\n\t\t\tactual: &xdg.UserDirs.Desktop,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_DOWNLOAD_DIR\",\n\t\t\texpected: filepath.Join(home, \"Downloads\"),\n\t\t\tactual: &xdg.UserDirs.Download,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_DOCUMENTS_DIR\",\n\t\t\texpected: filepath.Join(home, \"Documents\"),\n\t\t\tactual: &xdg.UserDirs.Documents,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_MUSIC_DIR\",\n\t\t\texpected: filepath.Join(home, \"Music\"),\n\t\t\tactual: &xdg.UserDirs.Music,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_PICTURES_DIR\",\n\t\t\texpected: filepath.Join(home, \"Pictures\"),\n\t\t\tactual: &xdg.UserDirs.Pictures,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_VIDEOS_DIR\",\n\t\t\texpected: filepath.Join(home, \"Movies\"),\n\t\t\tactual: &xdg.UserDirs.Videos,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_TEMPLATES_DIR\",\n\t\t\texpected: filepath.Join(home, \"Templates\"),\n\t\t\tactual: &xdg.UserDirs.Templates,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_PUBLICSHARE_DIR\",\n\t\t\texpected: filepath.Join(home, \"Public\"),\n\t\t\tactual: &xdg.UserDirs.PublicShare,\n\t\t},\n\t)\n}\n\nfunc TestCustomUserDirs(t *testing.T) {\n\thome := xdg.Home\n\n\ttestDirs(t,\n\t\t&envSample{\n\t\t\tname: \"XDG_DESKTOP_DIR\",\n\t\t\tvalue: \"$HOME\/Library\/Desktop\",\n\t\t\texpected: filepath.Join(home, \"Library\/Desktop\"),\n\t\t\tactual: &xdg.UserDirs.Desktop,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_DOWNLOAD_DIR\",\n\t\t\tvalue: \"$HOME\/Library\/Downloads\",\n\t\t\texpected: filepath.Join(home, \"Library\/Downloads\"),\n\t\t\tactual: &xdg.UserDirs.Download,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_DOCUMENTS_DIR\",\n\t\t\tvalue: \"$HOME\/Library\/Documents\",\n\t\t\texpected: filepath.Join(home, \"Library\/Documents\"),\n\t\t\tactual: &xdg.UserDirs.Documents,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_MUSIC_DIR\",\n\t\t\tvalue: \"$HOME\/Library\/Music\",\n\t\t\texpected: filepath.Join(home, \"Library\/Music\"),\n\t\t\tactual: &xdg.UserDirs.Music,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_PICTURES_DIR\",\n\t\t\tvalue: \"$HOME\/Library\/Pictures\",\n\t\t\texpected: filepath.Join(home, \"Library\/Pictures\"),\n\t\t\tactual: &xdg.UserDirs.Pictures,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_VIDEOS_DIR\",\n\t\t\tvalue: \"$HOME\/Library\/Movies\",\n\t\t\texpected: filepath.Join(home, \"Library\/Movies\"),\n\t\t\tactual: &xdg.UserDirs.Videos,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_TEMPLATES_DIR\",\n\t\t\tvalue: \"$HOME\/Library\/Templates\",\n\t\t\texpected: filepath.Join(home, \"Library\/Templates\"),\n\t\t\tactual: &xdg.UserDirs.Templates,\n\t\t},\n\t\t&envSample{\n\t\t\tname: \"XDG_PUBLICSHARE_DIR\",\n\t\t\tvalue: \"$HOME\/Library\/Public\",\n\t\t\texpected: filepath.Join(home, \"Library\/Public\"),\n\t\t\tactual: &xdg.UserDirs.PublicShare,\n\t\t},\n\t)\n}\n<|endoftext|>"} {"text":"<commit_before>package engineio\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\t\"time\"\n\n\tws \"github.com\/gorilla\/websocket\"\n)\n\nfunc init() {\n\tt := &websocket{}\n\tRegisterTransport(t.Name(), t.HandlesUpgrades(), t.SupportsFraming(), NewWebsocketTransport)\n}\n\ntype websocket struct {\n\tsocket Socket\n\tconn *ws.Conn\n\tquitChan chan struct{}\n\tpingInterval time.Duration\n\tpingTimeout time.Duration\n\tlastPing time.Time\n\tisClosed bool\n}\n\nfunc NewWebsocketTransport(req *http.Request, pingInterval, pingTimeout time.Duration) (Transport, error) {\n\tret := &websocket{\n\t\tquitChan: make(chan struct{}, 1),\n\t\tpingInterval: pingInterval,\n\t\tpingTimeout: pingTimeout,\n\t\tlastPing: time.Now(),\n\t\tisClosed: false,\n\t}\n\treturn ret, nil\n}\n\nfunc (*websocket) Name() string {\n\treturn \"websocket\"\n}\n\nfunc (p *websocket) SetSocket(s Socket) {\n\tp.socket = s\n}\n\nfunc (*websocket) HandlesUpgrades() bool {\n\treturn true\n}\n\nfunc (*websocket) SupportsFraming() bool {\n\treturn true\n}\n\nfunc (p *websocket) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tname := r.URL.Query().Get(\"transport\")\n\tif name != p.Name() {\n\t\tencoder := NewBinaryPayloadEncoder()\n\t\twriter, err := encoder.NextString(NOOP)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\twriter.Close()\n\t\tencoder.EncodeTo(w)\n\t\treturn\n\t}\n\n\tconn, err := ws.Upgrade(w, r, nil, 10240, 10240)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tp.conn = conn\n\tdefer func() {\n\t\tp.conn.Close()\n\t\tp.socket.onClose()\n\t}()\n\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif !p.isClosed {\n\t\t\t\tp.isClosed = true\n\t\t\t\tclose(p.quitChan)\n\t\t\t}\n\t\t}()\n\t\tfor {\n\t\t\tt, r, err := conn.NextReader()\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif t == ws.TextMessage || t == ws.BinaryMessage {\n\t\t\t\tdecoder, _ := NewDecoder(r)\n\t\t\t\tswitch decoder.Type() {\n\t\t\t\tcase PING:\n\t\t\t\t\tw, _ := p.NextWriter(MessageText, PONG)\n\t\t\t\t\tio.Copy(w, decoder)\n\t\t\t\t\tw.Close()\n\t\t\t\t\tfallthrough\n\t\t\t\tcase PONG:\n\t\t\t\t\tp.lastPing = time.Now()\n\t\t\t\tcase CLOSE:\n\t\t\t\t\tp.Close()\n\t\t\t\t\treturn\n\t\t\t\tcase UPGRADE:\n\t\t\t\tcase NOOP:\n\t\t\t\tdefault:\n\t\t\t\t\tp.socket.onMessage(decoder)\n\t\t\t\t}\n\t\t\t\tdecoder.Close()\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor {\n\t\tdiff := time.Now().Sub(p.lastPing)\n\t\tselect {\n\t\tcase <-time.After(p.pingInterval - diff):\n\t\t\tw, _ := p.NextWriter(MessageText, PING)\n\t\t\tw.Close()\n\t\tcase <-time.After(p.pingTimeout - diff):\n\t\t\treturn\n\t\tcase <-p.quitChan:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (p *websocket) NextWriter(msgType MessageType, packetType PacketType) (io.WriteCloser, error) {\n\tif p.isClosed {\n\t\treturn nil, io.EOF\n\t}\n\tif packetType == CLOSE {\n\t\tret, err := p.conn.NextWriter(ws.CloseMessage)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn ret, nil\n\t}\n\tvar ret io.WriteCloser\n\tvar err error\n\tswitch msgType {\n\tcase MessageText:\n\t\tret, err = p.conn.NextWriter(ws.TextMessage)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tret, err = NewStringEncoder(ret, packetType)\n\tcase MessageBinary:\n\t\tret, err = p.conn.NextWriter(ws.BinaryMessage)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tret, err = NewBinaryEncoder(ret, packetType)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}\n\nfunc (p *websocket) Close() error {\n\tw, err := p.NextWriter(MessageText, CLOSE)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.Close()\n\tp.isClosed = true\n\tclose(p.quitChan)\n\treturn nil\n}\n\nfunc (p websocket) Upgraded() error {\n\treturn nil\n}\n<commit_msg>clear race condition of websocket writer<commit_after>package engineio\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\tws \"github.com\/gorilla\/websocket\"\n)\n\nfunc init() {\n\tt := &websocket{}\n\tRegisterTransport(t.Name(), t.HandlesUpgrades(), t.SupportsFraming(), NewWebsocketTransport)\n}\n\ntype websocket struct {\n\tsocket Socket\n\tconn *ws.Conn\n\tquitChan chan struct{}\n\tpingChan chan time.Time\n\tpingInterval time.Duration\n\tpingTimeout time.Duration\n\tlastPing time.Time\n\tisClosed bool\n\twriteLocker sync.Mutex\n}\n\nfunc NewWebsocketTransport(req *http.Request, pingInterval, pingTimeout time.Duration) (Transport, error) {\n\tret := &websocket{\n\t\tquitChan: make(chan struct{}, 1),\n\t\tpingChan: make(chan time.Time),\n\t\tpingInterval: pingInterval,\n\t\tpingTimeout: pingTimeout,\n\t\tlastPing: time.Now(),\n\t\tisClosed: false,\n\t}\n\treturn ret, nil\n}\n\nfunc (*websocket) Name() string {\n\treturn \"websocket\"\n}\n\nfunc (p *websocket) SetSocket(s Socket) {\n\tp.socket = s\n}\n\nfunc (*websocket) HandlesUpgrades() bool {\n\treturn true\n}\n\nfunc (*websocket) SupportsFraming() bool {\n\treturn true\n}\n\nfunc (p *websocket) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tname := r.URL.Query().Get(\"transport\")\n\tif name != p.Name() {\n\t\tencoder := NewBinaryPayloadEncoder()\n\t\twriter, err := encoder.NextString(NOOP)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\twriter.Close()\n\t\tencoder.EncodeTo(w)\n\t\treturn\n\t}\n\n\tconn, err := ws.Upgrade(w, r, nil, 10240, 10240)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tp.conn = conn\n\tdefer func() {\n\t\tclose(p.quitChan)\n\t\tp.conn.Close()\n\t\tp.socket.onClose()\n\t}()\n\n\tgo func(lastPing time.Time) {\n\t\tfor {\n\t\t\tdiff := time.Now().Sub(lastPing)\n\t\t\tselect {\n\t\t\tcase lastPing = <-p.pingChan:\n\t\t\tcase <-time.After(p.pingInterval - diff):\n\t\t\t\tw, _ := p.NextWriter(MessageText, PING)\n\t\t\t\tw.Close()\n\t\t\tcase <-time.After(p.pingTimeout - diff):\n\t\t\t\treturn\n\t\t\tcase <-p.quitChan:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(p.lastPing)\n\n\tfor {\n\t\tt, r, err := conn.NextReader()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif t == ws.TextMessage || t == ws.BinaryMessage {\n\t\t\tdecoder, _ := NewDecoder(r)\n\t\t\tswitch decoder.Type() {\n\t\t\tcase PING:\n\t\t\t\tw, _ := p.NextWriter(MessageText, PONG)\n\t\t\t\tio.Copy(w, decoder)\n\t\t\t\tw.Close()\n\t\t\t\tfallthrough\n\t\t\tcase PONG:\n\t\t\t\tp.lastPing = time.Now()\n\t\t\t\tp.pingChan <- p.lastPing\n\t\t\tcase CLOSE:\n\t\t\t\tp.Close()\n\t\t\t\treturn\n\t\t\tcase UPGRADE:\n\t\t\tcase NOOP:\n\t\t\tdefault:\n\t\t\t\tp.socket.onMessage(decoder)\n\t\t\t}\n\t\t\tdecoder.Close()\n\t\t}\n\t}\n}\n\ntype websocketWriter struct {\n\tio.WriteCloser\n\tlocker *sync.Mutex\n}\n\nfunc newWebsocketWriter(w io.WriteCloser, locker *sync.Mutex) websocketWriter {\n\treturn websocketWriter{\n\t\tWriteCloser: w,\n\t\tlocker: locker,\n\t}\n}\n\nfunc (p *websocket) NextWriter(msgType MessageType, packetType PacketType) (io.WriteCloser, error) {\n\tif p.isClosed {\n\t\treturn nil, io.EOF\n\t}\n\tif packetType == CLOSE {\n\t\tret, err := p.conn.NextWriter(ws.CloseMessage)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn newWebsocketWriter(ret, &p.writeLocker), nil\n\t}\n\tvar ret io.WriteCloser\n\tvar err error\n\tswitch msgType {\n\tcase MessageText:\n\t\tret, err = p.conn.NextWriter(ws.TextMessage)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tret, err = NewStringEncoder(ret, packetType)\n\tcase MessageBinary:\n\t\tret, err = p.conn.NextWriter(ws.BinaryMessage)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tret, err = NewBinaryEncoder(ret, packetType)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newWebsocketWriter(ret, &p.writeLocker), nil\n}\n\nfunc (p *websocket) Close() error {\n\tw, err := p.NextWriter(MessageText, CLOSE)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.Close()\n\tp.isClosed = true\n\tp.conn.Close()\n\treturn nil\n}\n\nfunc (p websocket) Upgraded() error {\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2015 The Syncthing Authors.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at https:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage model\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/thejerf\/suture\"\n\n\t\"github.com\/syncthing\/syncthing\/lib\/config\"\n\t\"github.com\/syncthing\/syncthing\/lib\/events\"\n\t\"github.com\/syncthing\/syncthing\/lib\/protocol\"\n\t\"github.com\/syncthing\/syncthing\/lib\/sync\"\n\t\"github.com\/syncthing\/syncthing\/lib\/util\"\n)\n\nconst minSummaryInterval = time.Minute\n\ntype FolderSummaryService interface {\n\tsuture.Service\n\tSummary(folder string) (map[string]interface{}, error)\n\tOnEventRequest()\n}\n\n\/\/ The folderSummaryService adds summary information events (FolderSummary and\n\/\/ FolderCompletion) into the event stream at certain intervals.\ntype folderSummaryService struct {\n\t*suture.Supervisor\n\n\tcfg config.Wrapper\n\tmodel Model\n\tid protocol.DeviceID\n\tevLogger events.Logger\n\timmediate chan string\n\n\t\/\/ For keeping track of folders to recalculate for\n\tfoldersMut sync.Mutex\n\tfolders map[string]struct{}\n\n\t\/\/ For keeping track of when the last event request on the API was\n\tlastEventReq time.Time\n\tlastEventReqMut sync.Mutex\n}\n\nfunc NewFolderSummaryService(cfg config.Wrapper, m Model, id protocol.DeviceID, evLogger events.Logger) FolderSummaryService {\n\tservice := &folderSummaryService{\n\t\tSupervisor: suture.New(\"folderSummaryService\", suture.Spec{\n\t\t\tPassThroughPanics: true,\n\t\t}),\n\t\tcfg: cfg,\n\t\tmodel: m,\n\t\tid: id,\n\t\tevLogger: evLogger,\n\t\timmediate: make(chan string),\n\t\tfolders: make(map[string]struct{}),\n\t\tfoldersMut: sync.NewMutex(),\n\t\tlastEventReqMut: sync.NewMutex(),\n\t}\n\n\tservice.Add(util.AsService(service.listenForUpdates, fmt.Sprintf(\"%s\/listenForUpdates\", service)))\n\tservice.Add(util.AsService(service.calculateSummaries, fmt.Sprintf(\"%s\/calculateSummaries\", service)))\n\n\treturn service\n}\n\nfunc (c *folderSummaryService) String() string {\n\treturn fmt.Sprintf(\"FolderSummaryService@%p\", c)\n}\n\nfunc (c *folderSummaryService) Summary(folder string) (map[string]interface{}, error) {\n\tvar res = make(map[string]interface{})\n\n\terrors, err := c.model.FolderErrors(folder)\n\tif err != nil && err != ErrFolderPaused {\n\t\t\/\/ Stats from the db can still be obtained if the folder is just paused\n\t\treturn nil, err\n\t}\n\tres[\"errors\"] = len(errors)\n\tres[\"pullErrors\"] = len(errors) \/\/ deprecated\n\n\tres[\"invalid\"] = \"\" \/\/ Deprecated, retains external API for now\n\n\tglobal := c.model.GlobalSize(folder)\n\tres[\"globalFiles\"], res[\"globalDirectories\"], res[\"globalSymlinks\"], res[\"globalDeleted\"], res[\"globalBytes\"], res[\"globalTotalItems\"] = global.Files, global.Directories, global.Symlinks, global.Deleted, global.Bytes, global.TotalItems()\n\n\tlocal := c.model.LocalSize(folder)\n\tres[\"localFiles\"], res[\"localDirectories\"], res[\"localSymlinks\"], res[\"localDeleted\"], res[\"localBytes\"], res[\"localTotalItems\"] = local.Files, local.Directories, local.Symlinks, local.Deleted, local.Bytes, local.TotalItems()\n\n\tneed := c.model.NeedSize(folder)\n\tres[\"needFiles\"], res[\"needDirectories\"], res[\"needSymlinks\"], res[\"needDeletes\"], res[\"needBytes\"], res[\"needTotalItems\"] = need.Files, need.Directories, need.Symlinks, need.Deleted, need.Bytes, need.TotalItems()\n\n\tif c.cfg.Folders()[folder].Type == config.FolderTypeReceiveOnly {\n\t\t\/\/ Add statistics for things that have changed locally in a receive\n\t\t\/\/ only folder.\n\t\tro := c.model.ReceiveOnlyChangedSize(folder)\n\t\tres[\"receiveOnlyChangedFiles\"] = ro.Files\n\t\tres[\"receiveOnlyChangedDirectories\"] = ro.Directories\n\t\tres[\"receiveOnlyChangedSymlinks\"] = ro.Symlinks\n\t\tres[\"receiveOnlyChangedDeletes\"] = ro.Deleted\n\t\tres[\"receiveOnlyChangedBytes\"] = ro.Bytes\n\t\tres[\"receiveOnlyTotalItems\"] = ro.TotalItems()\n\t}\n\n\tres[\"inSyncFiles\"], res[\"inSyncBytes\"] = global.Files-need.Files, global.Bytes-need.Bytes\n\n\tres[\"state\"], res[\"stateChanged\"], err = c.model.State(folder)\n\tif err != nil {\n\t\tres[\"error\"] = err.Error()\n\t}\n\n\tourSeq, _ := c.model.CurrentSequence(folder)\n\tremoteSeq, _ := c.model.RemoteSequence(folder)\n\n\tres[\"version\"] = ourSeq + remoteSeq \/\/ legacy\n\tres[\"sequence\"] = ourSeq + remoteSeq \/\/ new name\n\n\tignorePatterns, _, _ := c.model.GetIgnores(folder)\n\tres[\"ignorePatterns\"] = false\n\tfor _, line := range ignorePatterns {\n\t\tif len(line) > 0 && !strings.HasPrefix(line, \"\/\/\") {\n\t\t\tres[\"ignorePatterns\"] = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\terr = c.model.WatchError(folder)\n\tif err != nil {\n\t\tres[\"watchError\"] = err.Error()\n\t}\n\n\treturn res, nil\n}\n\nfunc (c *folderSummaryService) OnEventRequest() {\n\tc.lastEventReqMut.Lock()\n\tc.lastEventReq = time.Now()\n\tc.lastEventReqMut.Unlock()\n}\n\n\/\/ listenForUpdates subscribes to the event bus and makes note of folders that\n\/\/ need their data recalculated.\nfunc (c *folderSummaryService) listenForUpdates(ctx context.Context) {\n\tsub := c.evLogger.Subscribe(events.LocalIndexUpdated | events.RemoteIndexUpdated | events.StateChanged | events.RemoteDownloadProgress | events.DeviceConnected | events.FolderWatchStateChanged | events.DownloadProgress)\n\tdefer sub.Unsubscribe()\n\n\tfor {\n\t\t\/\/ This loop needs to be fast so we don't miss too many events.\n\n\t\tselect {\n\t\tcase ev := <-sub.C():\n\t\t\tc.processUpdate(ev)\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *folderSummaryService) processUpdate(ev events.Event) {\n\tvar folder string\n\n\tswitch ev.Type {\n\tcase events.DeviceConnected:\n\t\t\/\/ When a device connects we schedule a refresh of all\n\t\t\/\/ folders shared with that device.\n\n\t\tdata := ev.Data.(map[string]string)\n\t\tdeviceID, _ := protocol.DeviceIDFromString(data[\"id\"])\n\n\t\tc.foldersMut.Lock()\n\tnextFolder:\n\t\tfor _, folder := range c.cfg.Folders() {\n\t\t\tfor _, dev := range folder.Devices {\n\t\t\t\tif dev.DeviceID == deviceID {\n\t\t\t\t\tc.folders[folder.ID] = struct{}{}\n\t\t\t\t\tcontinue nextFolder\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tc.foldersMut.Unlock()\n\n\t\treturn\n\n\tcase events.DownloadProgress:\n\t\tdata := ev.Data.(map[string]map[string]*pullerProgress)\n\t\tc.foldersMut.Lock()\n\t\tfor folder := range data {\n\t\t\tc.folders[folder] = struct{}{}\n\t\t}\n\t\tc.foldersMut.Unlock()\n\t\treturn\n\n\tcase events.StateChanged:\n\t\tdata := ev.Data.(map[string]interface{})\n\t\tif !(data[\"to\"].(string) == \"idle\" && data[\"from\"].(string) == \"syncing\") {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ The folder changed to idle from syncing. We should do an\n\t\t\/\/ immediate refresh to update the GUI. The send to\n\t\t\/\/ c.immediate must be nonblocking so that we can continue\n\t\t\/\/ handling events.\n\n\t\tfolder = data[\"folder\"].(string)\n\t\tselect {\n\t\tcase c.immediate <- folder:\n\t\t\tc.foldersMut.Lock()\n\t\t\tdelete(c.folders, folder)\n\t\t\tc.foldersMut.Unlock()\n\t\t\treturn\n\t\tdefault:\n\t\t\t\/\/ Refresh whenever we do the next summary.\n\t\t}\n\n\tdefault:\n\t\t\/\/ The other events all have a \"folder\" attribute that they\n\t\t\/\/ affect. Whenever the local or remote index is updated for a\n\t\t\/\/ given folder we make a note of it.\n\t\t\/\/ This folder needs to be refreshed whenever we do the next\n\t\t\/\/ refresh.\n\n\t\tfolder = ev.Data.(map[string]interface{})[\"folder\"].(string)\n\t}\n\n\tc.foldersMut.Lock()\n\tc.folders[folder] = struct{}{}\n\tc.foldersMut.Unlock()\n}\n\n\/\/ calculateSummaries periodically recalculates folder summaries and\n\/\/ completion percentage, and sends the results on the event bus.\nfunc (c *folderSummaryService) calculateSummaries(ctx context.Context) {\n\tconst pumpInterval = 2 * time.Second\n\tpump := time.NewTimer(pumpInterval)\n\n\tfor {\n\t\tselect {\n\t\tcase <-pump.C:\n\t\t\tt0 := time.Now()\n\t\t\tfor _, folder := range c.foldersToHandle() {\n\t\t\t\tc.sendSummary(folder)\n\t\t\t}\n\n\t\t\t\/\/ We don't want to spend all our time calculating summaries. Lets\n\t\t\t\/\/ set an arbitrary limit at not spending more than about 30% of\n\t\t\t\/\/ our time here...\n\t\t\twait := 2*time.Since(t0) + pumpInterval\n\t\t\tpump.Reset(wait)\n\n\t\tcase folder := <-c.immediate:\n\t\t\tc.sendSummary(folder)\n\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ foldersToHandle returns the list of folders needing a summary update, and\n\/\/ clears the list.\nfunc (c *folderSummaryService) foldersToHandle() []string {\n\t\/\/ We only recalculate summaries if someone is listening to events\n\t\/\/ (a request to \/rest\/events has been made within the last\n\t\/\/ pingEventInterval).\n\n\tc.lastEventReqMut.Lock()\n\tlast := c.lastEventReq\n\tc.lastEventReqMut.Unlock()\n\tif time.Since(last) > minSummaryInterval {\n\t\treturn nil\n\t}\n\n\tc.foldersMut.Lock()\n\tres := make([]string, 0, len(c.folders))\n\tfor folder := range c.folders {\n\t\tres = append(res, folder)\n\t\tdelete(c.folders, folder)\n\t}\n\tc.foldersMut.Unlock()\n\treturn res\n}\n\n\/\/ sendSummary send the summary events for a single folder\nfunc (c *folderSummaryService) sendSummary(folder string) {\n\t\/\/ The folder summary contains how many bytes, files etc\n\t\/\/ are in the folder and how in sync we are.\n\tdata, err := c.Summary(folder)\n\tif err != nil {\n\t\treturn\n\t}\n\tc.evLogger.Log(events.FolderSummary, map[string]interface{}{\n\t\t\"folder\": folder,\n\t\t\"summary\": data,\n\t})\n\n\tfor _, devCfg := range c.cfg.Folders()[folder].Devices {\n\t\tif devCfg.DeviceID.Equals(c.id) {\n\t\t\t\/\/ We already know about ourselves.\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := c.model.Connection(devCfg.DeviceID); !ok {\n\t\t\t\/\/ We're not interested in disconnected devices.\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Get completion percentage of this folder for the\n\t\t\/\/ remote device.\n\t\tcomp := c.model.Completion(devCfg.DeviceID, folder).Map()\n\t\tcomp[\"folder\"] = folder\n\t\tcomp[\"device\"] = devCfg.DeviceID.String()\n\t\tc.evLogger.Log(events.FolderCompletion, comp)\n\t}\n}\n<commit_msg>lib\/model: Also send folder summary from sync-preparing (ref #6028) (#6202)<commit_after>\/\/ Copyright (C) 2015 The Syncthing Authors.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at https:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage model\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/thejerf\/suture\"\n\n\t\"github.com\/syncthing\/syncthing\/lib\/config\"\n\t\"github.com\/syncthing\/syncthing\/lib\/events\"\n\t\"github.com\/syncthing\/syncthing\/lib\/protocol\"\n\t\"github.com\/syncthing\/syncthing\/lib\/sync\"\n\t\"github.com\/syncthing\/syncthing\/lib\/util\"\n)\n\nconst minSummaryInterval = time.Minute\n\ntype FolderSummaryService interface {\n\tsuture.Service\n\tSummary(folder string) (map[string]interface{}, error)\n\tOnEventRequest()\n}\n\n\/\/ The folderSummaryService adds summary information events (FolderSummary and\n\/\/ FolderCompletion) into the event stream at certain intervals.\ntype folderSummaryService struct {\n\t*suture.Supervisor\n\n\tcfg config.Wrapper\n\tmodel Model\n\tid protocol.DeviceID\n\tevLogger events.Logger\n\timmediate chan string\n\n\t\/\/ For keeping track of folders to recalculate for\n\tfoldersMut sync.Mutex\n\tfolders map[string]struct{}\n\n\t\/\/ For keeping track of when the last event request on the API was\n\tlastEventReq time.Time\n\tlastEventReqMut sync.Mutex\n}\n\nfunc NewFolderSummaryService(cfg config.Wrapper, m Model, id protocol.DeviceID, evLogger events.Logger) FolderSummaryService {\n\tservice := &folderSummaryService{\n\t\tSupervisor: suture.New(\"folderSummaryService\", suture.Spec{\n\t\t\tPassThroughPanics: true,\n\t\t}),\n\t\tcfg: cfg,\n\t\tmodel: m,\n\t\tid: id,\n\t\tevLogger: evLogger,\n\t\timmediate: make(chan string),\n\t\tfolders: make(map[string]struct{}),\n\t\tfoldersMut: sync.NewMutex(),\n\t\tlastEventReqMut: sync.NewMutex(),\n\t}\n\n\tservice.Add(util.AsService(service.listenForUpdates, fmt.Sprintf(\"%s\/listenForUpdates\", service)))\n\tservice.Add(util.AsService(service.calculateSummaries, fmt.Sprintf(\"%s\/calculateSummaries\", service)))\n\n\treturn service\n}\n\nfunc (c *folderSummaryService) String() string {\n\treturn fmt.Sprintf(\"FolderSummaryService@%p\", c)\n}\n\nfunc (c *folderSummaryService) Summary(folder string) (map[string]interface{}, error) {\n\tvar res = make(map[string]interface{})\n\n\terrors, err := c.model.FolderErrors(folder)\n\tif err != nil && err != ErrFolderPaused {\n\t\t\/\/ Stats from the db can still be obtained if the folder is just paused\n\t\treturn nil, err\n\t}\n\tres[\"errors\"] = len(errors)\n\tres[\"pullErrors\"] = len(errors) \/\/ deprecated\n\n\tres[\"invalid\"] = \"\" \/\/ Deprecated, retains external API for now\n\n\tglobal := c.model.GlobalSize(folder)\n\tres[\"globalFiles\"], res[\"globalDirectories\"], res[\"globalSymlinks\"], res[\"globalDeleted\"], res[\"globalBytes\"], res[\"globalTotalItems\"] = global.Files, global.Directories, global.Symlinks, global.Deleted, global.Bytes, global.TotalItems()\n\n\tlocal := c.model.LocalSize(folder)\n\tres[\"localFiles\"], res[\"localDirectories\"], res[\"localSymlinks\"], res[\"localDeleted\"], res[\"localBytes\"], res[\"localTotalItems\"] = local.Files, local.Directories, local.Symlinks, local.Deleted, local.Bytes, local.TotalItems()\n\n\tneed := c.model.NeedSize(folder)\n\tres[\"needFiles\"], res[\"needDirectories\"], res[\"needSymlinks\"], res[\"needDeletes\"], res[\"needBytes\"], res[\"needTotalItems\"] = need.Files, need.Directories, need.Symlinks, need.Deleted, need.Bytes, need.TotalItems()\n\n\tif c.cfg.Folders()[folder].Type == config.FolderTypeReceiveOnly {\n\t\t\/\/ Add statistics for things that have changed locally in a receive\n\t\t\/\/ only folder.\n\t\tro := c.model.ReceiveOnlyChangedSize(folder)\n\t\tres[\"receiveOnlyChangedFiles\"] = ro.Files\n\t\tres[\"receiveOnlyChangedDirectories\"] = ro.Directories\n\t\tres[\"receiveOnlyChangedSymlinks\"] = ro.Symlinks\n\t\tres[\"receiveOnlyChangedDeletes\"] = ro.Deleted\n\t\tres[\"receiveOnlyChangedBytes\"] = ro.Bytes\n\t\tres[\"receiveOnlyTotalItems\"] = ro.TotalItems()\n\t}\n\n\tres[\"inSyncFiles\"], res[\"inSyncBytes\"] = global.Files-need.Files, global.Bytes-need.Bytes\n\n\tres[\"state\"], res[\"stateChanged\"], err = c.model.State(folder)\n\tif err != nil {\n\t\tres[\"error\"] = err.Error()\n\t}\n\n\tourSeq, _ := c.model.CurrentSequence(folder)\n\tremoteSeq, _ := c.model.RemoteSequence(folder)\n\n\tres[\"version\"] = ourSeq + remoteSeq \/\/ legacy\n\tres[\"sequence\"] = ourSeq + remoteSeq \/\/ new name\n\n\tignorePatterns, _, _ := c.model.GetIgnores(folder)\n\tres[\"ignorePatterns\"] = false\n\tfor _, line := range ignorePatterns {\n\t\tif len(line) > 0 && !strings.HasPrefix(line, \"\/\/\") {\n\t\t\tres[\"ignorePatterns\"] = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\terr = c.model.WatchError(folder)\n\tif err != nil {\n\t\tres[\"watchError\"] = err.Error()\n\t}\n\n\treturn res, nil\n}\n\nfunc (c *folderSummaryService) OnEventRequest() {\n\tc.lastEventReqMut.Lock()\n\tc.lastEventReq = time.Now()\n\tc.lastEventReqMut.Unlock()\n}\n\n\/\/ listenForUpdates subscribes to the event bus and makes note of folders that\n\/\/ need their data recalculated.\nfunc (c *folderSummaryService) listenForUpdates(ctx context.Context) {\n\tsub := c.evLogger.Subscribe(events.LocalIndexUpdated | events.RemoteIndexUpdated | events.StateChanged | events.RemoteDownloadProgress | events.DeviceConnected | events.FolderWatchStateChanged | events.DownloadProgress)\n\tdefer sub.Unsubscribe()\n\n\tfor {\n\t\t\/\/ This loop needs to be fast so we don't miss too many events.\n\n\t\tselect {\n\t\tcase ev := <-sub.C():\n\t\t\tc.processUpdate(ev)\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *folderSummaryService) processUpdate(ev events.Event) {\n\tvar folder string\n\n\tswitch ev.Type {\n\tcase events.DeviceConnected:\n\t\t\/\/ When a device connects we schedule a refresh of all\n\t\t\/\/ folders shared with that device.\n\n\t\tdata := ev.Data.(map[string]string)\n\t\tdeviceID, _ := protocol.DeviceIDFromString(data[\"id\"])\n\n\t\tc.foldersMut.Lock()\n\tnextFolder:\n\t\tfor _, folder := range c.cfg.Folders() {\n\t\t\tfor _, dev := range folder.Devices {\n\t\t\t\tif dev.DeviceID == deviceID {\n\t\t\t\t\tc.folders[folder.ID] = struct{}{}\n\t\t\t\t\tcontinue nextFolder\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tc.foldersMut.Unlock()\n\n\t\treturn\n\n\tcase events.DownloadProgress:\n\t\tdata := ev.Data.(map[string]map[string]*pullerProgress)\n\t\tc.foldersMut.Lock()\n\t\tfor folder := range data {\n\t\t\tc.folders[folder] = struct{}{}\n\t\t}\n\t\tc.foldersMut.Unlock()\n\t\treturn\n\n\tcase events.StateChanged:\n\t\tdata := ev.Data.(map[string]interface{})\n\t\tif data[\"to\"].(string) != \"idle\" {\n\t\t\treturn\n\t\t}\n\t\tif from := data[\"from\"].(string); from != \"syncing\" && from != \"sync-preparing\" {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ The folder changed to idle from syncing. We should do an\n\t\t\/\/ immediate refresh to update the GUI. The send to\n\t\t\/\/ c.immediate must be nonblocking so that we can continue\n\t\t\/\/ handling events.\n\n\t\tfolder = data[\"folder\"].(string)\n\t\tselect {\n\t\tcase c.immediate <- folder:\n\t\t\tc.foldersMut.Lock()\n\t\t\tdelete(c.folders, folder)\n\t\t\tc.foldersMut.Unlock()\n\t\t\treturn\n\t\tdefault:\n\t\t\t\/\/ Refresh whenever we do the next summary.\n\t\t}\n\n\tdefault:\n\t\t\/\/ The other events all have a \"folder\" attribute that they\n\t\t\/\/ affect. Whenever the local or remote index is updated for a\n\t\t\/\/ given folder we make a note of it.\n\t\t\/\/ This folder needs to be refreshed whenever we do the next\n\t\t\/\/ refresh.\n\n\t\tfolder = ev.Data.(map[string]interface{})[\"folder\"].(string)\n\t}\n\n\tc.foldersMut.Lock()\n\tc.folders[folder] = struct{}{}\n\tc.foldersMut.Unlock()\n}\n\n\/\/ calculateSummaries periodically recalculates folder summaries and\n\/\/ completion percentage, and sends the results on the event bus.\nfunc (c *folderSummaryService) calculateSummaries(ctx context.Context) {\n\tconst pumpInterval = 2 * time.Second\n\tpump := time.NewTimer(pumpInterval)\n\n\tfor {\n\t\tselect {\n\t\tcase <-pump.C:\n\t\t\tt0 := time.Now()\n\t\t\tfor _, folder := range c.foldersToHandle() {\n\t\t\t\tc.sendSummary(folder)\n\t\t\t}\n\n\t\t\t\/\/ We don't want to spend all our time calculating summaries. Lets\n\t\t\t\/\/ set an arbitrary limit at not spending more than about 30% of\n\t\t\t\/\/ our time here...\n\t\t\twait := 2*time.Since(t0) + pumpInterval\n\t\t\tpump.Reset(wait)\n\n\t\tcase folder := <-c.immediate:\n\t\t\tc.sendSummary(folder)\n\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ foldersToHandle returns the list of folders needing a summary update, and\n\/\/ clears the list.\nfunc (c *folderSummaryService) foldersToHandle() []string {\n\t\/\/ We only recalculate summaries if someone is listening to events\n\t\/\/ (a request to \/rest\/events has been made within the last\n\t\/\/ pingEventInterval).\n\n\tc.lastEventReqMut.Lock()\n\tlast := c.lastEventReq\n\tc.lastEventReqMut.Unlock()\n\tif time.Since(last) > minSummaryInterval {\n\t\treturn nil\n\t}\n\n\tc.foldersMut.Lock()\n\tres := make([]string, 0, len(c.folders))\n\tfor folder := range c.folders {\n\t\tres = append(res, folder)\n\t\tdelete(c.folders, folder)\n\t}\n\tc.foldersMut.Unlock()\n\treturn res\n}\n\n\/\/ sendSummary send the summary events for a single folder\nfunc (c *folderSummaryService) sendSummary(folder string) {\n\t\/\/ The folder summary contains how many bytes, files etc\n\t\/\/ are in the folder and how in sync we are.\n\tdata, err := c.Summary(folder)\n\tif err != nil {\n\t\treturn\n\t}\n\tc.evLogger.Log(events.FolderSummary, map[string]interface{}{\n\t\t\"folder\": folder,\n\t\t\"summary\": data,\n\t})\n\n\tfor _, devCfg := range c.cfg.Folders()[folder].Devices {\n\t\tif devCfg.DeviceID.Equals(c.id) {\n\t\t\t\/\/ We already know about ourselves.\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := c.model.Connection(devCfg.DeviceID); !ok {\n\t\t\t\/\/ We're not interested in disconnected devices.\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Get completion percentage of this folder for the\n\t\t\/\/ remote device.\n\t\tcomp := c.model.Completion(devCfg.DeviceID, folder).Map()\n\t\tcomp[\"folder\"] = folder\n\t\tcomp[\"device\"] = devCfg.DeviceID.String()\n\t\tc.evLogger.Log(events.FolderCompletion, comp)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright 2014 Google Inc. All rights reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage service\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"third_party\/kythe\/go\/rpc\/server\"\n\ttestutil \"shipshape\/test\"\n\tstrset \"shipshape\/util\/strings\"\n\n\t\"code.google.com\/p\/goprotobuf\/proto\"\n\n\tnotepb \"shipshape\/proto\/note_proto\"\n\tctxpb \"shipshape\/proto\/shipshape_context_proto\"\n\trpcpb \"shipshape\/proto\/shipshape_rpc_proto\"\n)\n\ntype fakeDispatcher struct {\n\tcategories []string\n\tfiles []string\n}\n\nfunc (f fakeDispatcher) GetCategory(ctx server.Context, in *rpcpb.GetCategoryRequest) (*rpcpb.GetCategoryResponse, error) {\n\treturn &rpcpb.GetCategoryResponse{\n\t\tCategory: f.categories,\n\t}, nil\n}\n\nfunc isSubset(a, b strset.Set) bool {\n\treturn len(a.Intersect(b)) == len(a)\n}\n\nfunc (f fakeDispatcher) Analyze(ctx server.Context, in *rpcpb.AnalyzeRequest) (*rpcpb.AnalyzeResponse, error) {\n\tvar nts []*notepb.Note\n\n\t\/\/ Assert that the analyzer was called with the right categories.\n\tif !isSubset(strset.New(in.Category...), strset.New(f.categories...)) {\n\t\treturn nil, fmt.Errorf(\"Category mismatch: got %v, supports %v\", in.Category, f.categories)\n\t}\n\n\tfor _, file := range f.files {\n\t\tnts = append(nts, ¬epb.Note{\n\t\t\tCategory: proto.String(f.categories[0]),\n\t\t\tDescription: proto.String(\"Hello world\"),\n\t\t\tLocation: testutil.CreateLocation(file),\n\t\t})\n\t}\n\n\treturn &rpcpb.AnalyzeResponse{\n\t\tNote: nts,\n\t}, nil\n}\n\ntype errDispatcher struct{}\n\nfunc (errDispatcher) GetCategory(ctx server.Context, in *rpcpb.GetCategoryRequest) (*rpcpb.GetCategoryResponse, error) {\n\treturn nil, fmt.Errorf(\"An error\")\n}\n\ntype panicDispatcher struct{}\n\nfunc (panicDispatcher) GetCategory(ctx server.Context, in *rpcpb.GetCategoryRequest) (*rpcpb.GetCategoryResponse, error) {\n\tpanic(\"panic\")\n}\n\ntype fullFakeDispatcher struct {\n\tresponse *rpcpb.AnalyzeResponse\n}\n\nfunc (f fullFakeDispatcher) Analyze(ctx server.Context, in *rpcpb.AnalyzeRequest) (*rpcpb.AnalyzeResponse, error) {\n\treturn f.response, nil\n}\n\nfunc TestGetCategories(t *testing.T) {\n\taddr2, cleanup, err := testutil.CreatekRPCTestServer(&fakeDispatcher{[]string{\"Foo\", \"Bar\"}, nil}, \"AnalyzerService\")\n\tif err != nil {\n\t\tt.Fatalf(\"Registering analyzer service failed: %v\", err)\n\t}\n\tdefer cleanup()\n\n\taddr0, cleanup, err := testutil.CreatekRPCTestServer(&fakeDispatcher{nil, nil}, \"AnalyzerService\")\n\tif err != nil {\n\t\tt.Fatalf(\"Registering analyzer service failed: %v\", err)\n\t}\n\tdefer cleanup()\n\n\taddre, cleanup, err := testutil.CreatekRPCTestServer(&errDispatcher{}, \"AnalyzerService\")\n\tif err != nil {\n\t\tt.Fatalf(\"Registering analyzer service failed: %v\", err)\n\t}\n\tdefer cleanup()\n\n\taddrp, cleanup, err := testutil.CreatekRPCTestServer(&panicDispatcher{}, \"AnalyzerService\")\n\tif err != nil {\n\t\tt.Fatalf(\"Registering analyzer service failed: %v\", err)\n\t}\n\tdefer cleanup()\n\n\ttests := []struct {\n\t\taddrs []string\n\t\tresult map[string]strset.Set\n\t}{\n\t\t{[]string{addr0}, map[string]strset.Set{addr0: nil}},\n\t\t{[]string{addr2}, map[string]strset.Set{addr2: strset.New(\"Foo\", \"Bar\")}},\n\t\t{[]string{addr0, addr2}, map[string]strset.Set{addr2: strset.New(\"Foo\", \"Bar\"), addr0: nil}},\n\t\t{[]string{addrp, addr2}, map[string]strset.Set{addr2: strset.New(\"Foo\", \"Bar\"), addrp: nil}},\n\t\t{[]string{addre, addr2}, map[string]strset.Set{addr2: strset.New(\"Foo\", \"Bar\"), addre: nil}},\n\t}\n\n\tfor _, test := range tests {\n\t\tdriver := NewDriver(test.addrs)\n\t\tcategories := driver.getAllCategories()\n\n\t\tif len(test.result) != len(categories) {\n\t\t\tt.Errorf(\"Incorrect number of results: got %v, want %v\", categories, test.result)\n\t\t}\n\n\t\tfor addr, cats := range test.result {\n\t\t\tif !strset.Equal(cats.ToSlice(), test.result[addr].ToSlice()) {\n\t\t\t\tt.Errorf(\"Incorrect categories for %s: got %v, want %v\", addr, cats, test.result[addr])\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestCallAllAnalyzers(t *testing.T) {\n\tdispatcher := &fakeDispatcher{categories: []string{\"Foo\", \"Bar\"}, files: []string{\"dir1\/A.h\", \"dir1\/A.cc\"}}\n\taddr, cleanup, err := testutil.CreatekRPCTestServer(dispatcher, \"AnalyzerService\")\n\tif err != nil {\n\t\tt.Fatalf(\"Registering analyzer service failed: %v\", err)\n\t}\n\tdefer cleanup()\n\n\tdriver := NewTestDriver(map[string]strset.Set{addr: strset.New(\"Foo\", \"Bar\")})\n\n\ttests := []struct {\n\t\tfiles []string\n\t\tcategories []string\n\t\texpect []*notepb.Note\n\t}{\n\t\t{\n\t\t\t\/\/ - SomeOtherCategory should be dropped; if it weren't fakeDispatcher would return an error.\n\t\t\t\/\/ - fakeDispatcher produces notes for files we didn't ask about; they should be dropped.\n\t\t\t[]string{\"dir1\/A.cc\"},\n\t\t\t[]string{\"Foo\", \"SomeOtherCategory\"},\n\t\t\t[]*notepb.Note{\n\t\t\t\t¬epb.Note{\n\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\tDescription: proto.String(\"\"),\n\t\t\t\t\tLocation: testutil.CreateLocation(\"dir1\/A.cc\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tcfg := &config{categories: test.categories}\n\t\tctx := &ctxpb.ShipshapeContext{FilePath: test.files}\n\n\t\tars := driver.callAllAnalyzers(cfg, ctx)\n\t\tvar notes []*notepb.Note\n\n\t\tfor _, ar := range ars {\n\t\t\tnotes = append(notes, ar.Note...)\n\t\t\tif len(ar.Failure) > 0 {\n\t\t\t\tt.Errorf(\"Received failures from analyze call: %v\", ar.Failure)\n\t\t\t}\n\t\t}\n\n\t\tok, results := testutil.CheckNoteContainsContent(test.expect, notes)\n\t\tif !ok {\n\t\t\tt.Errorf(\"Incorrect notes for config %v: %s\\n got %v, want %v\", cfg, results, notes, test.expect)\n\t\t}\n\t}\n}\n\nfunc TestCallAllAnalyzersErrorCases(t *testing.T) {\n\tctx := &ctxpb.ShipshapeContext{FilePath: []string{\"dir1\/A\", \"dir2\/B\"}}\n\tcfg := &config{categories: []string{\"Foo\"}}\n\n\ttests := []struct {\n\t\tresponse *rpcpb.AnalyzeResponse\n\t\texpectNotes []*notepb.Note\n\t\texpectFailure []*rpcpb.AnalysisFailure\n\t}{\n\t\t{ \/\/analysis had a failure\n\t\t\t&rpcpb.AnalyzeResponse{\n\t\t\t\tFailure: []*rpcpb.AnalysisFailure{\n\t\t\t\t\t&rpcpb.AnalysisFailure{\n\t\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\t\tFailureMessage: proto.String(\"badbadbad\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnil,\n\t\t\t[]*rpcpb.AnalysisFailure{\n\t\t\t\t&rpcpb.AnalysisFailure{\n\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\tFailureMessage: proto.String(\"badbadbad\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{ \/\/analysis had both failure and notes\n\t\t\t&rpcpb.AnalyzeResponse{\n\t\t\t\tNote: []*notepb.Note{\n\t\t\t\t\t¬epb.Note{\n\t\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\t\tDescription: proto.String(\"A note\"),\n\t\t\t\t\t\tLocation: testutil.CreateLocation(\"dir1\/A\"),\n\t\t\t\t\t},\n\t\t\t\t\t¬epb.Note{\n\t\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\t\tDescription: proto.String(\"A note\"),\n\t\t\t\t\t\tLocation: testutil.CreateLocation(\"dir1\/A\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tFailure: []*rpcpb.AnalysisFailure{\n\t\t\t\t\t&rpcpb.AnalysisFailure{\n\t\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\t\tFailureMessage: proto.String(\"badbadbad\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t[]*notepb.Note{\n\t\t\t\t¬epb.Note{\n\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\tDescription: proto.String(\"A note\"),\n\t\t\t\t\tLocation: testutil.CreateLocation(\"dir1\/A\"),\n\t\t\t\t},\n\t\t\t\t¬epb.Note{\n\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\tDescription: proto.String(\"A note\"),\n\t\t\t\t\tLocation: testutil.CreateLocation(\"dir1\/A\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\t[]*rpcpb.AnalysisFailure{\n\t\t\t\t&rpcpb.AnalysisFailure{\n\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\tFailureMessage: proto.String(\"badbadbad\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\taddr, cleanup, err := testutil.CreatekRPCTestServer(&fullFakeDispatcher{test.response}, \"AnalyzerService\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Registering analyzer service failed: %v\", err)\n\t\t}\n\t\tdefer cleanup()\n\n\t\tdriver := NewTestDriver(map[string]strset.Set{addr: strset.New(\"Foo\")})\n\n\t\tars := driver.callAllAnalyzers(cfg, ctx)\n\t\tvar notes []*notepb.Note\n\t\tvar failures []*rpcpb.AnalysisFailure\n\n\t\tfor _, ar := range ars {\n\t\t\tnotes = append(notes, ar.Note...)\n\t\t\tfailures = append(failures, ar.Failure...)\n\t\t}\n\n\t\tok, results := testutil.CheckNoteContainsContent(test.expectNotes, notes)\n\t\tif !ok {\n\t\t\tt.Errorf(\"Incorrect notes for original response %v: %s\\n got %v, want %v\", test.response, results, notes, test.expectNotes)\n\t\t}\n\t\tok, results = testutil.CheckFailureContainsContent(test.expectFailure, failures)\n\t\tif !ok {\n\t\t\tt.Errorf(\"Incorrect failures for original response %v: %s\\n got %v, want %v\", test.response, results, failures, test.expectFailure)\n\t\t}\n\n\t}\n}\n\nfunc TestFilterPaths(t *testing.T) {\n\ttests := []struct {\n\t\tlabel string\n\t\tignoreDirs []string\n\t\tinputFiles []string\n\t\texpectedFiles []string\n\t}{\n\t\t{\n\t\t\t\"Empty ignore list\",\n\t\t\t[]string{},\n\t\t\t[]string{\"a\", \"b\", \"c\", \"d\"},\n\t\t\t[]string{\"a\", \"b\", \"c\", \"d\"},\n\t\t},\n\t\t{\n\t\t\t\"Ignore list matches some items\",\n\t\t\t[]string{\"dir1\/\", \"dir2\/\"},\n\t\t\t[]string{\"dir1\/a\", \"dir3\/a\", \"dir2\/a\"},\n\t\t\t[]string{\"dir3\/a\"},\n\t\t},\n\t\t{\n\t\t\t\"Ignore list does not match anything\",\n\t\t\t[]string{\"dir77\/\"},\n\t\t\t[]string{\"dir1\/a\", \"dir3\/a\", \"dir2\/a\"},\n\t\t\t[]string{\"dir1\/a\", \"dir3\/a\", \"dir2\/a\"},\n\t\t},\n\t\t{\n\t\t\t\"Ignore list contains substring match\",\n\t\t\t[]string{\"dir1\/\"},\n\t\t\t[]string{\"dir4\/a\", \"dir4\/dir1\/a\"},\n\t\t\t[]string{\"dir4\/a\", \"dir4\/dir1\/a\"},\n\t\t},\n\t\t{\n\t\t\t\"Ignore list matches everything\",\n\t\t\t[]string{\"dir1\/\"},\n\t\t\t[]string{\"dir1\/a\", \"dir1\/b\", \"dir1\/c\"},\n\t\t\tnil,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tout := filterPaths(test.ignoreDirs, test.inputFiles)\n\t\tif !reflect.DeepEqual(out, test.expectedFiles) {\n\t\t\tt.Fatalf(\"Error on %q: got %v, expected %v\", test.label, out, test.expectedFiles)\n\t\t}\n\t}\n}\n<commit_msg>Existing driver tests pass now. We should add more tests for the stages, but will do that in a separate CL<commit_after>\/*\n * Copyright 2014 Google Inc. All rights reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage service\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\tstrset \"shipshape\/util\/strings\"\n\ttestutil \"shipshape\/util\/test\"\n\t\"third_party\/kythe\/go\/rpc\/server\"\n\n\t\"code.google.com\/p\/goprotobuf\/proto\"\n\n\tnotepb \"shipshape\/proto\/note_proto\"\n\tctxpb \"shipshape\/proto\/shipshape_context_proto\"\n\trpcpb \"shipshape\/proto\/shipshape_rpc_proto\"\n)\n\ntype fakeDispatcher struct {\n\tcategories []string\n\tfiles []string\n}\n\nfunc (f fakeDispatcher) GetCategory(ctx server.Context, in *rpcpb.GetCategoryRequest) (*rpcpb.GetCategoryResponse, error) {\n\treturn &rpcpb.GetCategoryResponse{\n\t\tCategory: f.categories,\n\t}, nil\n}\n\nfunc (f fakeDispatcher) GetStage(ctx server.Context, in *rpcpb.GetStageRequest) (*rpcpb.GetStageResponse, error) {\n\treturn &rpcpb.GetStageResponse{\n\t\tStage: ctxpb.Stage_PRE_BUILD.Enum(),\n\t}, nil\n}\nfunc isSubset(a, b strset.Set) bool {\n\treturn len(a.Intersect(b)) == len(a)\n}\n\nfunc (f fakeDispatcher) Analyze(ctx server.Context, in *rpcpb.AnalyzeRequest) (*rpcpb.AnalyzeResponse, error) {\n\tvar nts []*notepb.Note\n\n\t\/\/ Assert that the analyzer was called with the right categories.\n\tif !isSubset(strset.New(in.Category...), strset.New(f.categories...)) {\n\t\treturn nil, fmt.Errorf(\"Category mismatch: got %v, supports %v\", in.Category, f.categories)\n\t}\n\n\tfor _, file := range f.files {\n\t\tnts = append(nts, ¬epb.Note{\n\t\t\tCategory: proto.String(f.categories[0]),\n\t\t\tDescription: proto.String(\"Hello world\"),\n\t\t\tLocation: testutil.CreateLocation(file),\n\t\t})\n\t}\n\n\treturn &rpcpb.AnalyzeResponse{\n\t\tNote: nts,\n\t}, nil\n}\n\ntype errDispatcher struct{}\n\nfunc (errDispatcher) GetCategory(ctx server.Context, in *rpcpb.GetCategoryRequest) (*rpcpb.GetCategoryResponse, error) {\n\treturn nil, fmt.Errorf(\"An error\")\n}\n\nfunc (errDispatcher) GetStage(ctx server.Context, in *rpcpb.GetStageRequest) (*rpcpb.GetStageResponse, error) {\n\treturn nil, fmt.Errorf(\"An error\")\n}\n\ntype panicDispatcher struct{}\n\nfunc (panicDispatcher) GetCategory(ctx server.Context, in *rpcpb.GetCategoryRequest) (*rpcpb.GetCategoryResponse, error) {\n\tpanic(\"panic\")\n}\n\nfunc (panicDispatcher) GetStage(ctx server.Context, in *rpcpb.GetStageRequest) (*rpcpb.GetStageResponse, error) {\n\tpanic(\"panic\")\n}\n\ntype fullFakeDispatcher struct {\n\tresponse *rpcpb.AnalyzeResponse\n}\n\nfunc (f fullFakeDispatcher) Analyze(ctx server.Context, in *rpcpb.AnalyzeRequest) (*rpcpb.AnalyzeResponse, error) {\n\treturn f.response, nil\n}\n\nfunc TestGetServiceInfo(t *testing.T) {\n\taddr2, cleanup, err := testutil.CreatekRPCTestServer(&fakeDispatcher{[]string{\"Foo\", \"Bar\"}, nil}, \"AnalyzerService\")\n\tif err != nil {\n\t\tt.Fatalf(\"Registering analyzer service failed: %v\", err)\n\t}\n\tdefer cleanup()\n\n\taddr0, cleanup, err := testutil.CreatekRPCTestServer(&fakeDispatcher{nil, nil}, \"AnalyzerService\")\n\tif err != nil {\n\t\tt.Fatalf(\"Registering analyzer service failed: %v\", err)\n\t}\n\tdefer cleanup()\n\n\taddre, cleanup, err := testutil.CreatekRPCTestServer(&errDispatcher{}, \"AnalyzerService\")\n\tif err != nil {\n\t\tt.Fatalf(\"Registering analyzer service failed: %v\", err)\n\t}\n\tdefer cleanup()\n\n\taddrp, cleanup, err := testutil.CreatekRPCTestServer(&panicDispatcher{}, \"AnalyzerService\")\n\tif err != nil {\n\t\tt.Fatalf(\"Registering analyzer service failed: %v\", err)\n\t}\n\tdefer cleanup()\n\n\ttests := []struct {\n\t\taddrs []string\n\t\tresult map[string]strset.Set\n\t}{\n\t\t{[]string{addr0}, map[string]strset.Set{addr0: nil}},\n\t\t{[]string{addr2}, map[string]strset.Set{addr2: strset.New(\"Foo\", \"Bar\")}},\n\t\t{[]string{addr0, addr2}, map[string]strset.Set{addr2: strset.New(\"Foo\", \"Bar\"), addr0: nil}},\n\t\t{[]string{addrp, addr2}, map[string]strset.Set{addr2: strset.New(\"Foo\", \"Bar\"), addrp: nil}},\n\t\t{[]string{addre, addr2}, map[string]strset.Set{addr2: strset.New(\"Foo\", \"Bar\"), addre: nil}},\n\t}\n\n\tfor _, test := range tests {\n\t\tdriver := NewDriver(test.addrs)\n\t\tinfo := driver.getAllServiceInfo()\n\n\t\tif len(test.result) != len(info) {\n\t\t\tt.Errorf(\"Incorrect number of results: got %v, want %v\", info, test.result)\n\t\t}\n\n\t\tfor addr, expectCats := range test.result {\n\t\t\tif !strset.Equal(info[strings.TrimPrefix(addr, \"http:\/\/\")].categories.ToSlice(), expectCats.ToSlice()) {\n\t\t\t\tt.Errorf(\"Incorrect categories for %s: got %v, want %v\", addr, info[addr].categories, expectCats)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestCallAllAnalyzers(t *testing.T) {\n\tdispatcher := &fakeDispatcher{categories: []string{\"Foo\", \"Bar\"}, files: []string{\"dir1\/A.h\", \"dir1\/A.cc\"}}\n\taddr, cleanup, err := testutil.CreatekRPCTestServer(dispatcher, \"AnalyzerService\")\n\tif err != nil {\n\t\tt.Fatalf(\"Registering analyzer service failed: %v\", err)\n\t}\n\tdefer cleanup()\n\n\tdriver := NewTestDriver([]serviceInfo{\n\t\tserviceInfo{addr, strset.New(\"Foo\", \"Bar\"), ctxpb.Stage_PRE_BUILD},\n\t})\n\n\ttests := []struct {\n\t\tfiles []string\n\t\tcategories []string\n\t\texpect []*notepb.Note\n\t}{\n\t\t{\n\t\t\t\/\/ - SomeOtherCategory should be dropped; if it weren't fakeDispatcher would return an error.\n\t\t\t\/\/ - fakeDispatcher produces notes for files we didn't ask about; they should be dropped.\n\t\t\t[]string{\"dir1\/A.cc\"},\n\t\t\t[]string{\"Foo\", \"SomeOtherCategory\"},\n\t\t\t[]*notepb.Note{\n\t\t\t\t¬epb.Note{\n\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\tDescription: proto.String(\"\"),\n\t\t\t\t\tLocation: testutil.CreateLocation(\"dir1\/A.cc\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tctx := &ctxpb.ShipshapeContext{FilePath: test.files}\n\n\t\tars := driver.callAllAnalyzers(strset.New(test.categories...), ctx, ctxpb.Stage_PRE_BUILD)\n\t\tvar notes []*notepb.Note\n\n\t\tfor _, ar := range ars {\n\t\t\tnotes = append(notes, ar.Note...)\n\t\t\tif len(ar.Failure) > 0 {\n\t\t\t\tt.Errorf(\"Received failures from analyze call: %v\", ar.Failure)\n\t\t\t}\n\t\t}\n\n\t\tok, results := testutil.CheckNoteContainsContent(test.expect, notes)\n\t\tif !ok {\n\t\t\tt.Errorf(\"Incorrect notes for categories %v: %s\\n got %v, want %v\", test.categories, results, notes, test.expect)\n\t\t}\n\t}\n}\n\nfunc TestCallAllAnalyzersErrorCases(t *testing.T) {\n\tctx := &ctxpb.ShipshapeContext{FilePath: []string{\"dir1\/A\", \"dir2\/B\"}}\n\n\ttests := []struct {\n\t\tresponse *rpcpb.AnalyzeResponse\n\t\texpectNotes []*notepb.Note\n\t\texpectFailure []*rpcpb.AnalysisFailure\n\t}{\n\t\t{ \/\/analysis had a failure\n\t\t\t&rpcpb.AnalyzeResponse{\n\t\t\t\tFailure: []*rpcpb.AnalysisFailure{\n\t\t\t\t\t&rpcpb.AnalysisFailure{\n\t\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\t\tFailureMessage: proto.String(\"badbadbad\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnil,\n\t\t\t[]*rpcpb.AnalysisFailure{\n\t\t\t\t&rpcpb.AnalysisFailure{\n\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\tFailureMessage: proto.String(\"badbadbad\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{ \/\/analysis had both failure and notes\n\t\t\t&rpcpb.AnalyzeResponse{\n\t\t\t\tNote: []*notepb.Note{\n\t\t\t\t\t¬epb.Note{\n\t\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\t\tDescription: proto.String(\"A note\"),\n\t\t\t\t\t\tLocation: testutil.CreateLocation(\"dir1\/A\"),\n\t\t\t\t\t},\n\t\t\t\t\t¬epb.Note{\n\t\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\t\tDescription: proto.String(\"A note\"),\n\t\t\t\t\t\tLocation: testutil.CreateLocation(\"dir1\/A\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tFailure: []*rpcpb.AnalysisFailure{\n\t\t\t\t\t&rpcpb.AnalysisFailure{\n\t\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\t\tFailureMessage: proto.String(\"badbadbad\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t[]*notepb.Note{\n\t\t\t\t¬epb.Note{\n\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\tDescription: proto.String(\"A note\"),\n\t\t\t\t\tLocation: testutil.CreateLocation(\"dir1\/A\"),\n\t\t\t\t},\n\t\t\t\t¬epb.Note{\n\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\tDescription: proto.String(\"A note\"),\n\t\t\t\t\tLocation: testutil.CreateLocation(\"dir1\/A\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\t[]*rpcpb.AnalysisFailure{\n\t\t\t\t&rpcpb.AnalysisFailure{\n\t\t\t\t\tCategory: proto.String(\"Foo\"),\n\t\t\t\t\tFailureMessage: proto.String(\"badbadbad\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\taddr, cleanup, err := testutil.CreatekRPCTestServer(&fullFakeDispatcher{test.response}, \"AnalyzerService\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Registering analyzer service failed: %v\", err)\n\t\t}\n\t\tdefer cleanup()\n\n\t\tdriver := NewTestDriver([]serviceInfo{\n\t\t\tserviceInfo{addr, strset.New(\"Foo\"), ctxpb.Stage_PRE_BUILD},\n\t\t})\n\n\t\tars := driver.callAllAnalyzers(strset.New(\"Foo\"), ctx, ctxpb.Stage_PRE_BUILD)\n\t\tvar notes []*notepb.Note\n\t\tvar failures []*rpcpb.AnalysisFailure\n\n\t\tfor _, ar := range ars {\n\t\t\tnotes = append(notes, ar.Note...)\n\t\t\tfailures = append(failures, ar.Failure...)\n\t\t}\n\n\t\tok, results := testutil.CheckNoteContainsContent(test.expectNotes, notes)\n\t\tif !ok {\n\t\t\tt.Errorf(\"Incorrect notes for original response %v: %s\\n got %v, want %v\", test.response, results, notes, test.expectNotes)\n\t\t}\n\t\tok, results = testutil.CheckFailureContainsContent(test.expectFailure, failures)\n\t\tif !ok {\n\t\t\tt.Errorf(\"Incorrect failures for original response %v: %s\\n got %v, want %v\", test.response, results, failures, test.expectFailure)\n\t\t}\n\n\t}\n}\n\nfunc TestFilterPaths(t *testing.T) {\n\ttests := []struct {\n\t\tlabel string\n\t\tignoreDirs []string\n\t\tinputFiles []string\n\t\texpectedFiles []string\n\t}{\n\t\t{\n\t\t\t\"Empty ignore list\",\n\t\t\t[]string{},\n\t\t\t[]string{\"a\", \"b\", \"c\", \"d\"},\n\t\t\t[]string{\"a\", \"b\", \"c\", \"d\"},\n\t\t},\n\t\t{\n\t\t\t\"Ignore list matches some items\",\n\t\t\t[]string{\"dir1\/\", \"dir2\/\"},\n\t\t\t[]string{\"dir1\/a\", \"dir3\/a\", \"dir2\/a\"},\n\t\t\t[]string{\"dir3\/a\"},\n\t\t},\n\t\t{\n\t\t\t\"Ignore list does not match anything\",\n\t\t\t[]string{\"dir77\/\"},\n\t\t\t[]string{\"dir1\/a\", \"dir3\/a\", \"dir2\/a\"},\n\t\t\t[]string{\"dir1\/a\", \"dir3\/a\", \"dir2\/a\"},\n\t\t},\n\t\t{\n\t\t\t\"Ignore list contains substring match\",\n\t\t\t[]string{\"dir1\/\"},\n\t\t\t[]string{\"dir4\/a\", \"dir4\/dir1\/a\"},\n\t\t\t[]string{\"dir4\/a\", \"dir4\/dir1\/a\"},\n\t\t},\n\t\t{\n\t\t\t\"Ignore list matches everything\",\n\t\t\t[]string{\"dir1\/\"},\n\t\t\t[]string{\"dir1\/a\", \"dir1\/b\", \"dir1\/c\"},\n\t\t\tnil,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tout := filterPaths(test.ignoreDirs, test.inputFiles)\n\t\tif !reflect.DeepEqual(out, test.expectedFiles) {\n\t\t\tt.Fatalf(\"Error on %q: got %v, expected %v\", test.label, out, test.expectedFiles)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gob\n\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\n\t\"github.com\/dvyukov\/go-fuzz\/examples\/fuzz\"\n)\n\ntype X struct {\n\tA int\n\tB string\n\tC float64\n\tD []byte\n\tE interface{}\n\tF complex128\n\tG []interface{}\n\tH *int\n\tI **int\n\tJ *X\n\tK map[string]int\n}\n\nfunc init() {\n\tgob.Register(X{})\n}\n\nfunc Fuzz(data []byte) int {\n\tscore := 0\n\tfor _, ctor := range []func() interface{}{\n\t\tfunc() interface{} { return nil },\n\t\tfunc() interface{} { return new(int) },\n\t\tfunc() interface{} { return new(string) },\n\t\tfunc() interface{} { return new(float64) },\n\t\tfunc() interface{} { return new([]byte) },\n\t\tfunc() interface{} { return new(interface{}) },\n\t\tfunc() interface{} { return new(complex128) },\n\t\tfunc() interface{} { m := make(map[int]int); return &m },\n\t\tfunc() interface{} { m := make(map[string]interface{}); return &m },\n\t\tfunc() interface{} { return new(X) },\n\t} {\n\t\tv := ctor()\n\t\tdec := gob.NewDecoder(bytes.NewReader(data))\n\t\tif dec.Decode(v) != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdec.Decode(ctor())\n\t\tscore = 1\n\t\tif ctor() == nil {\n\t\t\tcontinue\n\t\t}\n\t\tb1 := new(bytes.Buffer)\n\t\tif err := gob.NewEncoder(b1).Encode(v); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tv1 := reflect.ValueOf(ctor())\n\t\terr := gob.NewDecoder(bytes.NewReader(data)).DecodeValue(v1)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif !fuzz.DeepEqual(v, v1.Interface()) {\n\t\t\tfmt.Printf(\"v0: %#v\\n\", reflect.ValueOf(v).Elem().Interface())\n\t\t\tfmt.Printf(\"v1: %#v\\n\", v1.Elem().Interface())\n\t\t\tpanic(fmt.Sprintf(\"values not equal %T\", v))\n\t\t}\n\t\tb2 := new(bytes.Buffer)\n\t\terr = gob.NewEncoder(b2).EncodeValue(v1)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tv2 := ctor()\n\t\tdec1 := gob.NewDecoder(b1)\n\t\tif err := dec1.Decode(v2); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif err := dec1.Decode(ctor()); err != io.EOF {\n\t\t\tpanic(err)\n\t\t}\n\t\tif vv, ok := v.(*X); ok && vv.I != nil && (*vv.I == nil || **vv.I == 0) {\n\t\t\t\/\/ If input contains \"I:42 I:null\", then I will be in this weird state.\n\t\t\t\/\/ It is effectively nil, but DeepEqual does not handle such case.\n\t\t\tvv.I = nil\n\t\t}\n\t\tif !fuzz.DeepEqual(v, v2) {\n\t\t\tfmt.Printf(\"v0: %#v\\n\", reflect.ValueOf(v).Elem().Interface())\n\t\t\tfmt.Printf(\"v2: %#v\\n\", reflect.ValueOf(v2).Elem().Interface())\n\t\t\tpanic(fmt.Sprintf(\"values not equal 2 %T\", v))\n\t\t}\n\t}\n\treturn score\n}\n<commit_msg>fix gob test (the weird pointers can appear in nested X structs)<commit_after>package gob\n\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\n\t\"github.com\/dvyukov\/go-fuzz\/examples\/fuzz\"\n)\n\ntype X struct {\n\tA int\n\tB string\n\tC float64\n\tD []byte\n\tE interface{}\n\tF complex128\n\tG []interface{}\n\tH *int\n\tI **int\n\tJ *X\n\tK map[string]int\n}\n\nfunc init() {\n\tgob.Register(X{})\n}\n\nfunc Fuzz(data []byte) int {\n\tscore := 0\n\tfor _, ctor := range []func() interface{}{\n\t\tfunc() interface{} { return nil },\n\t\tfunc() interface{} { return new(int) },\n\t\tfunc() interface{} { return new(string) },\n\t\tfunc() interface{} { return new(float64) },\n\t\tfunc() interface{} { return new([]byte) },\n\t\tfunc() interface{} { return new(interface{}) },\n\t\tfunc() interface{} { return new(complex128) },\n\t\tfunc() interface{} { m := make(map[int]int); return &m },\n\t\tfunc() interface{} { m := make(map[string]interface{}); return &m },\n\t\tfunc() interface{} { return new(X) },\n\t} {\n\t\tv := ctor()\n\t\tdec := gob.NewDecoder(bytes.NewReader(data))\n\t\tif dec.Decode(v) != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdec.Decode(ctor())\n\t\tscore = 1\n\t\tif ctor() == nil {\n\t\t\tcontinue\n\t\t}\n\t\tb1 := new(bytes.Buffer)\n\t\tif err := gob.NewEncoder(b1).Encode(v); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tv1 := reflect.ValueOf(ctor())\n\t\terr := gob.NewDecoder(bytes.NewReader(data)).DecodeValue(v1)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif !fuzz.DeepEqual(v, v1.Interface()) {\n\t\t\tfmt.Printf(\"v0: %#v\\n\", reflect.ValueOf(v).Elem().Interface())\n\t\t\tfmt.Printf(\"v1: %#v\\n\", v1.Elem().Interface())\n\t\t\tpanic(fmt.Sprintf(\"values not equal %T\", v))\n\t\t}\n\t\tb2 := new(bytes.Buffer)\n\t\terr = gob.NewEncoder(b2).EncodeValue(v1)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tv2 := ctor()\n\t\tdec1 := gob.NewDecoder(b1)\n\t\tif err := dec1.Decode(v2); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif err := dec1.Decode(ctor()); err != io.EOF {\n\t\t\tpanic(err)\n\t\t}\n\t\tif vv, ok := v.(*X); ok {\n\t\t\tfix(vv)\n\t\t}\n\t\tif !fuzz.DeepEqual(v, v2) {\n\t\t\tfmt.Printf(\"v0: %#v\\n\", reflect.ValueOf(v).Elem().Interface())\n\t\t\tfmt.Printf(\"v2: %#v\\n\", reflect.ValueOf(v2).Elem().Interface())\n\t\t\tpanic(fmt.Sprintf(\"values not equal 2 %T\", v))\n\t\t}\n\t}\n\treturn score\n}\n\nfunc fix(vv *X) {\n\t\/\/ See https:\/\/github.com\/golang\/go\/issues\/11119\n\tif vv.I != nil && (*vv.I == nil || **vv.I == 0) {\n\t\t\/\/ If input contains \"I:42 I:null\", then I will be in this weird state.\n\t\t\/\/ It is effectively nil, but DeepEqual does not handle such case.\n\t\tvv.I = nil\n\t}\n\tif vv.H != nil && *vv.H == 0 {\n\t\tvv.H = nil\n\t}\n\tif vv.J != nil {\n\t\tfix(vv.J)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package test\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\n\t\"github.com\/anacrolix\/torrent\"\n\t\"github.com\/anacrolix\/torrent\/internal\/testutil\"\n\t\"github.com\/anacrolix\/torrent\/metainfo\"\n\t\"github.com\/anacrolix\/torrent\/storage\"\n)\n\nfunc justOneNetwork(cc *torrent.ClientConfig) {\n\tcc.DisableTCP = true\n\tcc.DisableIPv4 = true\n}\n\nfunc TestReceiveChunkStorageFailure(t *testing.T) {\n\tseederDataDir, metainfo := testutil.GreetingTestTorrent()\n\tdefer os.RemoveAll(seederDataDir)\n\tseederClientConfig := torrent.TestingConfig()\n\tseederClientConfig.Debug = true\n\tjustOneNetwork(seederClientConfig)\n\tseederClientStorage := storage.NewMMap(seederDataDir)\n\tdefer seederClientStorage.Close()\n\tseederClientConfig.DefaultStorage = seederClientStorage\n\tseederClientConfig.Seed = true\n\tseederClientConfig.Debug = true\n\tseederClient, err := torrent.NewClient(seederClientConfig)\n\trequire.NoError(t, err)\n\tdefer testutil.ExportStatusWriter(seederClient, \"s\")()\n\tleecherClientConfig := torrent.TestingConfig()\n\tleecherClientConfig.Debug = true\n\tjustOneNetwork(leecherClientConfig)\n\tleecherClient, err := torrent.NewClient(leecherClientConfig)\n\trequire.NoError(t, err)\n\tdefer testutil.ExportStatusWriter(leecherClient, \"l\")()\n\tinfo, err := metainfo.UnmarshalInfo()\n\trequire.NoError(t, err)\n\tleecherStorage := diskFullStorage{\n\t\tpieces: make([]pieceState, info.NumPieces()),\n\t\tdata: make([]byte, info.TotalLength()),\n\t}\n\tdefer leecherStorage.Close()\n\tleecherTorrent, new, err := leecherClient.AddTorrentSpec(&torrent.TorrentSpec{\n\t\tInfoHash: metainfo.HashInfoBytes(),\n\t\tStorage: &leecherStorage,\n\t})\n\tleecherStorage.t = leecherTorrent\n\trequire.NoError(t, err)\n\tassert.True(t, new)\n\tseederTorrent, err := seederClient.AddTorrent(metainfo)\n\trequire.NoError(t, err)\n\t\/\/ Tell the seeder to find the leecher. Is it guaranteed seeders will always try to do this?\n\tseederTorrent.AddClientPeer(leecherClient)\n\t<-leecherTorrent.GotInfo()\n\tassertReadAllGreeting(t, leecherTorrent.NewReader())\n}\n\ntype pieceState struct {\n\tcomplete bool\n}\n\ntype diskFullStorage struct {\n\tpieces []pieceState\n\tt *torrent.Torrent\n\tdefaultHandledWriteChunkError bool\n\tdata []byte\n\n\tmu sync.Mutex\n\tdiskNotFull bool\n}\n\nfunc (me *diskFullStorage) Piece(p metainfo.Piece) storage.PieceImpl {\n\treturn pieceImpl{\n\t\tmip: p,\n\t\tdiskFullStorage: me,\n\t}\n}\n\nfunc (me diskFullStorage) Close() error {\n\treturn nil\n}\n\nfunc (d diskFullStorage) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (storage.TorrentImpl, error) {\n\treturn &d, nil\n}\n\ntype pieceImpl struct {\n\tmip metainfo.Piece\n\t*diskFullStorage\n}\n\nfunc (me pieceImpl) state() *pieceState {\n\treturn &me.diskFullStorage.pieces[me.mip.Index()]\n}\n\nfunc (me pieceImpl) ReadAt(p []byte, off int64) (n int, err error) {\n\toff += me.mip.Offset()\n\treturn copy(p, me.data[off:]), nil\n}\n\nfunc (me pieceImpl) WriteAt(p []byte, off int64) (int, error) {\n\toff += me.mip.Offset()\n\tif !me.defaultHandledWriteChunkError {\n\t\tgo func() {\n\t\t\tme.t.SetOnWriteChunkError(func(err error) {\n\t\t\t\tlog.Printf(\"got write chunk error to custom handler: %v\", err)\n\t\t\t\tme.mu.Lock()\n\t\t\t\tme.diskNotFull = true\n\t\t\t\tme.mu.Unlock()\n\t\t\t\tme.t.AllowDataDownload()\n\t\t\t})\n\t\t\tme.t.AllowDataDownload()\n\t\t}()\n\t\tme.defaultHandledWriteChunkError = true\n\t}\n\tme.mu.Lock()\n\tdefer me.mu.Unlock()\n\tif me.diskNotFull {\n\t\treturn copy(me.data[off:], p), nil\n\t}\n\treturn copy(me.data[off:], p[:1]), errors.New(\"disk full\")\n}\n\nfunc (me pieceImpl) MarkComplete() error {\n\tme.state().complete = true\n\treturn nil\n}\n\nfunc (me pieceImpl) MarkNotComplete() error {\n\tpanic(\"implement me\")\n}\n\nfunc (me pieceImpl) Completion() storage.Completion {\n\treturn storage.Completion{\n\t\tComplete: me.state().complete,\n\t\tOk: true,\n\t}\n}\n<commit_msg>Use anacrolix\/log in test<commit_after>package test\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com\/anacrolix\/log\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\n\t\"github.com\/anacrolix\/torrent\"\n\t\"github.com\/anacrolix\/torrent\/internal\/testutil\"\n\t\"github.com\/anacrolix\/torrent\/metainfo\"\n\t\"github.com\/anacrolix\/torrent\/storage\"\n)\n\nfunc justOneNetwork(cc *torrent.ClientConfig) {\n\tcc.DisableTCP = true\n\tcc.DisableIPv4 = true\n}\n\nfunc TestReceiveChunkStorageFailure(t *testing.T) {\n\tseederDataDir, metainfo := testutil.GreetingTestTorrent()\n\tdefer os.RemoveAll(seederDataDir)\n\tseederClientConfig := torrent.TestingConfig()\n\tseederClientConfig.Debug = true\n\tjustOneNetwork(seederClientConfig)\n\tseederClientStorage := storage.NewMMap(seederDataDir)\n\tdefer seederClientStorage.Close()\n\tseederClientConfig.DefaultStorage = seederClientStorage\n\tseederClientConfig.Seed = true\n\tseederClientConfig.Debug = true\n\tseederClient, err := torrent.NewClient(seederClientConfig)\n\trequire.NoError(t, err)\n\tdefer testutil.ExportStatusWriter(seederClient, \"s\")()\n\tleecherClientConfig := torrent.TestingConfig()\n\tleecherClientConfig.Debug = true\n\tjustOneNetwork(leecherClientConfig)\n\tleecherClient, err := torrent.NewClient(leecherClientConfig)\n\trequire.NoError(t, err)\n\tdefer testutil.ExportStatusWriter(leecherClient, \"l\")()\n\tinfo, err := metainfo.UnmarshalInfo()\n\trequire.NoError(t, err)\n\tleecherStorage := diskFullStorage{\n\t\tpieces: make([]pieceState, info.NumPieces()),\n\t\tdata: make([]byte, info.TotalLength()),\n\t}\n\tdefer leecherStorage.Close()\n\tleecherTorrent, new, err := leecherClient.AddTorrentSpec(&torrent.TorrentSpec{\n\t\tInfoHash: metainfo.HashInfoBytes(),\n\t\tStorage: &leecherStorage,\n\t})\n\tleecherStorage.t = leecherTorrent\n\trequire.NoError(t, err)\n\tassert.True(t, new)\n\tseederTorrent, err := seederClient.AddTorrent(metainfo)\n\trequire.NoError(t, err)\n\t\/\/ Tell the seeder to find the leecher. Is it guaranteed seeders will always try to do this?\n\tseederTorrent.AddClientPeer(leecherClient)\n\t<-leecherTorrent.GotInfo()\n\tassertReadAllGreeting(t, leecherTorrent.NewReader())\n}\n\ntype pieceState struct {\n\tcomplete bool\n}\n\ntype diskFullStorage struct {\n\tpieces []pieceState\n\tt *torrent.Torrent\n\tdefaultHandledWriteChunkError bool\n\tdata []byte\n\n\tmu sync.Mutex\n\tdiskNotFull bool\n}\n\nfunc (me *diskFullStorage) Piece(p metainfo.Piece) storage.PieceImpl {\n\treturn pieceImpl{\n\t\tmip: p,\n\t\tdiskFullStorage: me,\n\t}\n}\n\nfunc (me diskFullStorage) Close() error {\n\treturn nil\n}\n\nfunc (d diskFullStorage) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (storage.TorrentImpl, error) {\n\treturn &d, nil\n}\n\ntype pieceImpl struct {\n\tmip metainfo.Piece\n\t*diskFullStorage\n}\n\nfunc (me pieceImpl) state() *pieceState {\n\treturn &me.diskFullStorage.pieces[me.mip.Index()]\n}\n\nfunc (me pieceImpl) ReadAt(p []byte, off int64) (n int, err error) {\n\toff += me.mip.Offset()\n\treturn copy(p, me.data[off:]), nil\n}\n\nfunc (me pieceImpl) WriteAt(p []byte, off int64) (int, error) {\n\toff += me.mip.Offset()\n\tif !me.defaultHandledWriteChunkError {\n\t\tgo func() {\n\t\t\tme.t.SetOnWriteChunkError(func(err error) {\n\t\t\t\tlog.Printf(\"got write chunk error to custom handler: %v\", err)\n\t\t\t\tme.mu.Lock()\n\t\t\t\tme.diskNotFull = true\n\t\t\t\tme.mu.Unlock()\n\t\t\t\tme.t.AllowDataDownload()\n\t\t\t})\n\t\t\tme.t.AllowDataDownload()\n\t\t}()\n\t\tme.defaultHandledWriteChunkError = true\n\t}\n\tme.mu.Lock()\n\tdefer me.mu.Unlock()\n\tif me.diskNotFull {\n\t\treturn copy(me.data[off:], p), nil\n\t}\n\treturn copy(me.data[off:], p[:1]), errors.New(\"disk full\")\n}\n\nfunc (me pieceImpl) MarkComplete() error {\n\tme.state().complete = true\n\treturn nil\n}\n\nfunc (me pieceImpl) MarkNotComplete() error {\n\tpanic(\"implement me\")\n}\n\nfunc (me pieceImpl) Completion() storage.Completion {\n\treturn storage.Completion{\n\t\tComplete: me.state().complete,\n\t\tOk: true,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package sysvipc\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"reflect\"\n)\n\nvar test_qname string = \"ipc-transit-test-queue\"\n\nfunc TestSendRcv(t *testing.T) {\n\tdefer func() {\n\t\tos.Remove(defaultTransitPath + test_qname)\n\t}()\n\n\t\/\/ How to create this message: http:\/\/play.golang.org\/p\/13OSJHd5xe\n\t\/\/ Info about seemingly fully dynamic marshal\/unmarshal: http:\/\/stackoverflow.com\/questions\/19482612\/go-golang-array-type-inside-struct-missing-type-composite-literal\n\tsendMessage := map[string]interface{}{\n\t\t\"Name\": \"Wednesday\",\n\t\t\"Age\": 6,\n\t\t\"Parents\": map[string]interface{}{\n\t\t\t\"bee\": \"boo\",\n\t\t\t\"foo\": map[string]interface{}{\n\t\t\t\t\"hi\": []string{\"a\", \"b\"},\n\t\t\t},\n\t\t},\n\t}\n\tsendErr := Send(sendMessage, test_qname)\n\tif sendErr != nil {\n\t\tt.Error(sendErr)\n\t\treturn\n\t}\n\tm, receiveErr := Receive(test_qname)\n\tif receiveErr != nil {\n\t\tt.Error(receiveErr)\n\t\treturn\n\t}\n\tmsg := m.(map[string]interface{})\n\tfor k, v := range msg {\n\t\tfmt.Println(k, \" -> \", reflect.TypeOf(v))\n\t\tswitch vv := v.(type) {\n\t\tcase string:\n\t\t\tif k == \"Name\" {\n\t\t\t\tif v != \"Wednesday\" {\n\t\t\t\t\tt.Error(receiveErr)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Println(k, \"is string\", vv)\n\t\tcase float64:\n\t\t\tif k == \"Age\" {\n\t\t\t\tif v != 6.0 { \/\/very strange, even though it's an int in the json, it unmarshalled as a float\n\t\t\t\t\tt.Error(receiveErr)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Println(k, \"is float64\", vv)\n\t\tcase map[string]interface{}:\n\t\t\tfmt.Println(k, \"is an array:\")\n\t\t\tfor i, u := range vv {\n\t\t\t\tfmt.Println(i, u)\n\t\t\t}\n\t\tdefault:\n\t\t\tfmt.Println(k, \"is of a type I don't know how to handle\", vv)\n\t\t}\n\t}\n}\n<commit_msg>fix package name and add realistic test<commit_after>package ipc-transit_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"reflect\"\n)\n\nvar test_qname string = \"ipc-transit-test-queue\"\n\nfunc TestSendRcv(t *testing.T) {\n\tdefer func() {\n\t\tos.Remove(defaultTransitPath + test_qname)\n\t}()\n\n\t\/\/ How to create this message: http:\/\/play.golang.org\/p\/13OSJHd5xe\n\t\/\/ Info about seemingly fully dynamic marshal\/unmarshal: http:\/\/stackoverflow.com\/questions\/19482612\/go-golang-array-type-inside-struct-missing-type-composite-literal\n\tsendMessage := map[string]interface{}{\n\t\t\"Name\": \"Wednesday\",\n\t\t\"Age\": 6,\n\t\t\"Parents\": map[string]interface{}{\n\t\t\t\"bee\": \"boo\",\n\t\t\t\"foo\": map[string]interface{}{\n\t\t\t\t\"hi\": []string{\"a\", \"b\"},\n\t\t\t},\n\t\t},\n\t}\n\tsendErr := Send(sendMessage, test_qname)\n\tif sendErr != nil {\n\t\tt.Error(sendErr)\n\t\treturn\n\t}\n\tm, receiveErr := Receive(test_qname)\n\tif receiveErr != nil {\n\t\tt.Error(receiveErr)\n\t\treturn\n\t}\n\tmsg := m.(map[string]interface{})\n\tfor k, v := range msg {\n\t\tfmt.Println(k, \" -> \", reflect.TypeOf(v))\n\t\tswitch vv := v.(type) {\n\t\tcase string:\n\t\t\tif k == \"Name\" {\n\t\t\t\tif v != \"Wednesday\" {\n\t\t\t\t\tt.Error(receiveErr)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Println(k, \"is string\", vv)\n\t\tcase float64:\n\t\t\tif k == \"Age\" {\n\t\t\t\tif v != 6.0 { \/\/very strange, even though it's an int in the json, it unmarshalled as a float\n\t\t\t\t\tt.Error(receiveErr)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Println(k, \"is float64\", vv)\n\t\tcase map[string]interface{}:\n\t\t\tfmt.Println(k, \"is an array:\")\n\t\t\tfor i, u := range vv {\n\t\t\t\tfmt.Println(i, u)\n\t\t\t}\n\t\tdefault:\n\t\t\tfmt.Println(k, \"is of a type I don't know how to handle\", vv)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package templatehelper\n\nimport (\n\t\"strings\"\n\t\"text\/template\"\n)\n\n\/\/ FuncMap contains all the common string helpers\nvar (\n\tFuncMap = template.FuncMap{\n\t\t\"Left\": func(values ...interface{}) string {\n\t\t\treturn values[0].(string)[:values[1].(int)]\n\t\t},\n\t\t\"Mid\": func(values ...interface{}) string {\n\t\t\tif len(values) > 2 {\n\t\t\t\treturn values[0].(string)[values[1].(int):values[2].(int)]\n\t\t\t}\n\t\t\treturn values[0].(string)[values[1].(int):]\n\t\t},\n\t\t\"Right\": func(values ...interface{}) string {\n\t\t\treturn values[0].(string)[len(values[0].(string))-values[1].(int):]\n\t\t},\n\t\t\"Last\": func(values ...interface{}) string {\n\t\t\treturn values[0].([]string)[len(values[0].([]string))-1]\n\t\t},\n\t\t\/\/ strings functions\n\t\t\/\/ \"Compare\": strings.Compare, \/\/ 1.5+ only\n\t\t\"Contains\": strings.Contains,\n\t\t\"ContainsAny\": strings.ContainsAny,\n\t\t\"Count\": strings.Count,\n\t\t\"EqualFold\": strings.EqualFold,\n\t\t\"HasPrefix\": strings.HasPrefix,\n\t\t\"HasSuffix\": strings.HasSuffix,\n\t\t\"Index\": strings.Index,\n\t\t\"IndexAny\": strings.IndexAny,\n\t\t\"Join\": strings.Join,\n\t\t\"LastIndex\": strings.LastIndex,\n\t\t\"LastIndexAny\": strings.LastIndexAny,\n\t\t\"Repeat\": strings.Repeat,\n\t\t\"Replace\": strings.Replace,\n\t\t\"Split\": strings.Split,\n\t\t\"SplitAfter\": strings.SplitAfter,\n\t\t\"SplitAfterN\": strings.SplitAfterN,\n\t\t\"SplitN\": strings.SplitN,\n\t\t\"Title\": strings.Title,\n\t\t\"ToLower\": strings.ToLower,\n\t\t\"ToTitle\": strings.ToTitle,\n\t\t\"ToUpper\": strings.ToUpper,\n\t\t\"Trim\": strings.Trim,\n\t\t\"TrimLeft\": strings.TrimLeft,\n\t\t\"TrimPrefix\": strings.TrimPrefix,\n\t\t\"TrimRight\": strings.TrimRight,\n\t\t\"TrimSpace\": strings.TrimSpace,\n\t\t\"TrimSuffix\": strings.TrimSuffix,\n\t}\n)\n<commit_msg>Added AGPL header to templatehelper.go<commit_after>\/*\n * Copyright (C) 2017 Christian Muehlhaeuser\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as published\n * by the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n * Authors:\n * Christian Muehlhaeuser <muesli@gmail.com>\n *\/\n\n\/\/ Package templatehelper provides a func-map of common template functions\npackage templatehelper\n\nimport (\n\t\"strings\"\n\t\"text\/template\"\n)\n\n\/\/ FuncMap contains all the common string helpers\nvar (\n\tFuncMap = template.FuncMap{\n\t\t\"Left\": func(values ...interface{}) string {\n\t\t\treturn values[0].(string)[:values[1].(int)]\n\t\t},\n\t\t\"Mid\": func(values ...interface{}) string {\n\t\t\tif len(values) > 2 {\n\t\t\t\treturn values[0].(string)[values[1].(int):values[2].(int)]\n\t\t\t}\n\t\t\treturn values[0].(string)[values[1].(int):]\n\t\t},\n\t\t\"Right\": func(values ...interface{}) string {\n\t\t\treturn values[0].(string)[len(values[0].(string))-values[1].(int):]\n\t\t},\n\t\t\"Last\": func(values ...interface{}) string {\n\t\t\treturn values[0].([]string)[len(values[0].([]string))-1]\n\t\t},\n\t\t\/\/ strings functions\n\t\t\/\/ \"Compare\": strings.Compare, \/\/ 1.5+ only\n\t\t\"Contains\": strings.Contains,\n\t\t\"ContainsAny\": strings.ContainsAny,\n\t\t\"Count\": strings.Count,\n\t\t\"EqualFold\": strings.EqualFold,\n\t\t\"HasPrefix\": strings.HasPrefix,\n\t\t\"HasSuffix\": strings.HasSuffix,\n\t\t\"Index\": strings.Index,\n\t\t\"IndexAny\": strings.IndexAny,\n\t\t\"Join\": strings.Join,\n\t\t\"LastIndex\": strings.LastIndex,\n\t\t\"LastIndexAny\": strings.LastIndexAny,\n\t\t\"Repeat\": strings.Repeat,\n\t\t\"Replace\": strings.Replace,\n\t\t\"Split\": strings.Split,\n\t\t\"SplitAfter\": strings.SplitAfter,\n\t\t\"SplitAfterN\": strings.SplitAfterN,\n\t\t\"SplitN\": strings.SplitN,\n\t\t\"Title\": strings.Title,\n\t\t\"ToLower\": strings.ToLower,\n\t\t\"ToTitle\": strings.ToTitle,\n\t\t\"ToUpper\": strings.ToUpper,\n\t\t\"Trim\": strings.Trim,\n\t\t\"TrimLeft\": strings.TrimLeft,\n\t\t\"TrimPrefix\": strings.TrimPrefix,\n\t\t\"TrimRight\": strings.TrimRight,\n\t\t\"TrimSpace\": strings.TrimSpace,\n\t\t\"TrimSuffix\": strings.TrimSuffix,\n\t}\n)\n<|endoftext|>"} {"text":"<commit_before>package rwvfs\n\nimport (\n\t\"code.google.com\/p\/go.tools\/godoc\/vfs\"\n\t\"fmt\"\n\t\"os\"\n\tpathpkg \"path\"\n\t\"path\/filepath\"\n)\n\n\/\/ OS returns an implementation of FileSystem reading from the tree rooted at\n\/\/ root.\nfunc OS(root string) FileSystem {\n\treturn osFS{root, vfs.OS(root)}\n}\n\ntype osFS struct {\n\troot string\n\tvfs.FileSystem\n}\n\n\/\/ resolve is from code.google.com\/p\/go.tools\/godoc\/vfs.\nfunc (fs osFS) resolve(path string) string {\n\t\/\/ Clean the path so that it cannot possibly begin with ..\/.\n\t\/\/ If it did, the result of filepath.Join would be outside the\n\t\/\/ tree rooted at root. We probably won't ever see a path\n\t\/\/ with .. in it, but be safe anyway.\n\tpath = pathpkg.Clean(\"\/\" + path)\n\n\treturn filepath.Join(string(fs.root), path)\n}\n\n\/\/ WriterOpen opens the file at path for writing, creating the file if it\n\/\/ doesn't exist and truncating it otherwise.\nfunc (fs osFS) OpenFile(path string, flag int) (ReadWriteSeekCloser, error) {\n\tf, err := os.OpenFile(fs.resolve(path), flag, 0644)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif fi.IsDir() {\n\t\treturn nil, fmt.Errorf(\"Open: %s is a directory\", path)\n\t}\n\n\treturn f, nil\n}\n<commit_msg>Fix doc<commit_after>package rwvfs\n\nimport (\n\t\"code.google.com\/p\/go.tools\/godoc\/vfs\"\n\t\"fmt\"\n\t\"os\"\n\tpathpkg \"path\"\n\t\"path\/filepath\"\n)\n\n\/\/ OS returns an implementation of FileSystem reading from the tree rooted at\n\/\/ root.\nfunc OS(root string) FileSystem {\n\treturn osFS{root, vfs.OS(root)}\n}\n\ntype osFS struct {\n\troot string\n\tvfs.FileSystem\n}\n\n\/\/ resolve is from code.google.com\/p\/go.tools\/godoc\/vfs.\nfunc (fs osFS) resolve(path string) string {\n\t\/\/ Clean the path so that it cannot possibly begin with ..\/.\n\t\/\/ If it did, the result of filepath.Join would be outside the\n\t\/\/ tree rooted at root. We probably won't ever see a path\n\t\/\/ with .. in it, but be safe anyway.\n\tpath = pathpkg.Clean(\"\/\" + path)\n\n\treturn filepath.Join(string(fs.root), path)\n}\n\n\/\/ OpenFile opens the file at path for writing, creating the file if it doesn't\n\/\/ exist and truncating it otherwise.\nfunc (fs osFS) OpenFile(path string, flag int) (ReadWriteSeekCloser, error) {\n\tf, err := os.OpenFile(fs.resolve(path), flag, 0644)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif fi.IsDir() {\n\t\treturn nil, fmt.Errorf(\"Open: %s is a directory\", path)\n\t}\n\n\treturn f, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package termui\n\ntype WrapPanel struct {\n\tBaseElement\n\n\twidth, height int\n\torientation Orientation\n\tchildren []Element\n\tchildpos map[Element]rect\n}\n\ntype rect struct {\n\tx, y, width, height int\n}\n\nvar _ Element = new(WrapPanel)\n\nfunc NewWrapPanel(o Orientation) *WrapPanel {\n\treturn &WrapPanel{\n\t\torientation: o,\n\t\tchildpos: make(map[Element]rect),\n\t}\n}\n\n\/\/ Name returns the constant name of the WrapPanel for css styling.\nfunc (v *WrapPanel) Name() string {\n\treturn \"wrappanel\"\n}\n\n\/\/ Children returns all nested elements.\nfunc (v *WrapPanel) Children() []Element {\n\treturn v.children\n}\n\n\/\/ AddChild adds the given children to the WrapPanel\nfunc (v *WrapPanel) AddChild(e ...Element) {\n\tv.children = append(v.children, e...)\n\tfor _, c := range e {\n\t\tc.SetParent(v)\n\t}\n}\n\nfunc (v *WrapPanel) measureVertical(availableWidth, availableHeight int, arrange bool) (width int, height int) {\n\tcIdx := 0\n\twidth = 0\n\theight = 0\n\tfor cIdx < len(v.children) {\n\t\tcolWidth := 0\n\t\tcolHeight := 0\n\t\tcStart := cIdx\n\t\tcEnd := cIdx\n\n\t\tfor i := cIdx; i < len(v.children); i++ {\n\t\t\tcw, ch := v.children[i].Measure(0, 0)\n\n\t\t\tif colHeight+ch > availableHeight {\n\t\t\t\tif colHeight > height {\n\t\t\t\t\theight = colHeight\n\t\t\t\t}\n\t\t\t\twidth += colWidth\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif arrange {\n\t\t\t\tv.childpos[v.children[i]] = rect{\n\t\t\t\t\tx: width,\n\t\t\t\t\twidth: cw,\n\t\t\t\t\theight: ch,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif cw > colWidth {\n\t\t\t\tcolWidth = cw\n\t\t\t}\n\t\t\tcolHeight += ch\n\t\t\tcEnd = cIdx\n\t\t\tcIdx++\n\t\t}\n\n\t\tif arrange {\n\t\t\ty := 0\n\t\t\tfor i := cStart; i <= cEnd; i++ {\n\t\t\t\tchild := v.children[i]\n\t\t\t\trect := v.childpos[child]\n\t\t\t\tchild.Arrange(rect.width, rect.height)\n\t\t\t\trect.y = y\n\t\t\t\ty += rect.height\n\t\t\t\tv.childpos[child] = rect\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc (v *WrapPanel) measureHorizontal(availableWidth, availableHeight int, arrange bool) (width int, height int) {\n\tcIdx := 0\n\twidth = 0\n\theight = 0\n\tfor cIdx < len(v.children) {\n\t\tcolWidth := 0\n\t\tcolHeight := 0\n\t\tcStart := cIdx\n\t\tcEnd := cIdx\n\n\t\tfor i := cIdx; i < len(v.children); i++ {\n\t\t\tcw, ch := v.children[i].Measure(0, 0)\n\n\t\t\tif colWidth+cw > availableWidth {\n\t\t\t\tif colWidth > width {\n\t\t\t\t\twidth = colWidth\n\t\t\t\t}\n\t\t\t\theight += colHeight\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif arrange {\n\t\t\t\tv.childpos[v.children[i]] = rect{\n\t\t\t\t\ty: height,\n\t\t\t\t\twidth: cw,\n\t\t\t\t\theight: ch,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif ch > colHeight {\n\t\t\t\tcolHeight = ch\n\t\t\t}\n\t\t\tcolWidth += cw\n\t\t\tcEnd = cIdx\n\t\t\tcIdx++\n\t\t}\n\n\t\tif arrange {\n\t\t\tx := 0\n\t\t\tfor i := cStart; i <= cEnd; i++ {\n\t\t\t\tchild := v.children[i]\n\t\t\t\trect := v.childpos[child]\n\t\t\t\tchild.Arrange(rect.width, rect.height)\n\t\t\t\trect.x = x\n\t\t\t\tx += rect.width\n\t\t\t\tv.childpos[child] = rect\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ Measure gets the \"wanted\" size of the element based on the available size\nfunc (v *WrapPanel) Measure(availableWidth, availableHeight int) (width int, height int) {\n\tif v.orientation == Vertical {\n\t\treturn v.measureVertical(availableWidth, availableHeight, false)\n\t} else {\n\t\treturn v.measureHorizontal(availableWidth, availableHeight, false)\n\t}\n}\n\n\/\/ Arrange sets the final size for the Element end tells it to Arrange itself\nfunc (v *WrapPanel) Arrange(finalWidth, finalHeight int) {\n\tv.width, v.height = finalWidth, finalHeight\n\tif v.orientation == Vertical {\n\t\tv.measureVertical(finalWidth, finalHeight, true)\n\t} else {\n\t\tv.measureHorizontal(finalWidth, finalHeight, true)\n\t}\n}\n\n\/\/ Render renders the element on the given Renderer\nfunc (v *WrapPanel) Render(rn Renderer) {\n\tfor el, pos := range v.childpos {\n\t\trn.RenderChild(el, pos.width, pos.height, pos.x, pos.y)\n\t}\n}\n\n\/\/ Width returns the width of the WrapPanel\nfunc (v *WrapPanel) Width() int {\n\treturn v.width\n}\n\n\/\/ Height returns the height of the WrapPanel.\nfunc (v *WrapPanel) Height() int {\n\treturn v.height\n}\n<commit_msg>fixed measure of wrappanel and added clear func.<commit_after>package termui\n\ntype WrapPanel struct {\n\tBaseElement\n\n\twidth, height int\n\torientation Orientation\n\tchildren []Element\n\tchildpos map[Element]rect\n}\n\ntype rect struct {\n\tx, y, width, height int\n}\n\nvar _ Element = new(WrapPanel)\n\nfunc NewWrapPanel(o Orientation) *WrapPanel {\n\treturn &WrapPanel{\n\t\torientation: o,\n\t\tchildpos: make(map[Element]rect),\n\t}\n}\n\n\/\/ Name returns the constant name of the WrapPanel for css styling.\nfunc (v *WrapPanel) Name() string {\n\treturn \"wrappanel\"\n}\n\n\/\/ Children returns all nested elements.\nfunc (v *WrapPanel) Children() []Element {\n\treturn v.children\n}\n\n\/\/ AddChild adds the given children to the WrapPanel\nfunc (v *WrapPanel) AddChild(e ...Element) {\n\tv.children = append(v.children, e...)\n\tfor _, c := range e {\n\t\tc.SetParent(v)\n\t}\n}\n\n\/\/ Removes all children.\nfunc (v *WrapPanel) Clear() {\n\tfor _, c := range v.children {\n\t\tc.SetParent(nil)\n\t}\n\tv.childpos = make(map[Element]rect)\n\tv.children = nil\n}\n\nfunc (v *WrapPanel) measureVertical(availableWidth, availableHeight int, arrange bool) (width int, height int) {\n\tcIdx := 0\n\twidth = 0\n\theight = 0\n\tfor cIdx < len(v.children) {\n\t\tcolWidth := 0\n\t\tcolHeight := 0\n\t\tcStart := cIdx\n\t\tcEnd := cIdx\n\n\t\tfor i := cIdx; i < len(v.children); i++ {\n\t\t\tcw, ch := v.children[i].Measure(0, 0)\n\n\t\t\tif colHeight+ch > availableHeight && availableHeight != 0 {\n\t\t\t\tif colHeight > height {\n\t\t\t\t\theight = colHeight\n\t\t\t\t}\n\t\t\t\twidth += colWidth\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif arrange {\n\t\t\t\tv.childpos[v.children[i]] = rect{\n\t\t\t\t\tx: width,\n\t\t\t\t\twidth: cw,\n\t\t\t\t\theight: ch,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif cw > colWidth {\n\t\t\t\tcolWidth = cw\n\t\t\t}\n\t\t\tcolHeight += ch\n\t\t\tcEnd = cIdx\n\t\t\tcIdx++\n\t\t}\n\n\t\tif arrange {\n\t\t\ty := 0\n\t\t\tfor i := cStart; i <= cEnd; i++ {\n\t\t\t\tchild := v.children[i]\n\t\t\t\trect := v.childpos[child]\n\t\t\t\tchild.Arrange(rect.width, rect.height)\n\t\t\t\trect.y = y\n\t\t\t\ty += rect.height\n\t\t\t\tv.childpos[child] = rect\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc (v *WrapPanel) measureHorizontal(availableWidth, availableHeight int, arrange bool) (width int, height int) {\n\tcIdx := 0\n\twidth = 0\n\theight = 0\n\tfor cIdx < len(v.children) {\n\t\tcolWidth := 0\n\t\tcolHeight := 0\n\t\tcStart := cIdx\n\t\tcEnd := cIdx\n\n\t\tfor i := cIdx; i < len(v.children); i++ {\n\t\t\tcw, ch := v.children[i].Measure(0, 0)\n\n\t\t\tif colWidth+cw > availableWidth && availableWidth != 0 {\n\t\t\t\tif colWidth > width {\n\t\t\t\t\twidth = colWidth\n\t\t\t\t}\n\t\t\t\theight += colHeight\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif arrange {\n\t\t\t\tv.childpos[v.children[i]] = rect{\n\t\t\t\t\ty: height,\n\t\t\t\t\twidth: cw,\n\t\t\t\t\theight: ch,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif ch > colHeight {\n\t\t\t\tcolHeight = ch\n\t\t\t}\n\t\t\tcolWidth += cw\n\t\t\tcEnd = cIdx\n\t\t\tcIdx++\n\t\t}\n\n\t\tif arrange {\n\t\t\tx := 0\n\t\t\tfor i := cStart; i <= cEnd; i++ {\n\t\t\t\tchild := v.children[i]\n\t\t\t\trect := v.childpos[child]\n\t\t\t\tchild.Arrange(rect.width, rect.height)\n\t\t\t\trect.x = x\n\t\t\t\tx += rect.width\n\t\t\t\tv.childpos[child] = rect\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ Measure gets the \"wanted\" size of the element based on the available size\nfunc (v *WrapPanel) Measure(availableWidth, availableHeight int) (width int, height int) {\n\tif v.orientation == Vertical {\n\t\treturn v.measureVertical(availableWidth, availableHeight, false)\n\t} else {\n\t\treturn v.measureHorizontal(availableWidth, availableHeight, false)\n\t}\n}\n\n\/\/ Arrange sets the final size for the Element end tells it to Arrange itself\nfunc (v *WrapPanel) Arrange(finalWidth, finalHeight int) {\n\tv.width, v.height = finalWidth, finalHeight\n\tif v.orientation == Vertical {\n\t\tv.measureVertical(finalWidth, finalHeight, true)\n\t} else {\n\t\tv.measureHorizontal(finalWidth, finalHeight, true)\n\t}\n}\n\n\/\/ Render renders the element on the given Renderer\nfunc (v *WrapPanel) Render(rn Renderer) {\n\tfor el, pos := range v.childpos {\n\t\trn.RenderChild(el, pos.width, pos.height, pos.x, pos.y)\n\t}\n}\n\n\/\/ Width returns the width of the WrapPanel\nfunc (v *WrapPanel) Width() int {\n\treturn v.width\n}\n\n\/\/ Height returns the height of the WrapPanel.\nfunc (v *WrapPanel) Height() int {\n\treturn v.height\n}\n<|endoftext|>"} {"text":"<commit_before>package skip_list\n\nimport \"math\/rand\"\n\ntype SkipList struct {\n\thead *element\n}\n\ntype element struct {\n\tdata Data\n\tnext []*element\n}\n\ntype Data struct {\n\tKey string\n\tValue int\n}\n\nconst p = 0.25\n\nfunc NewSkipList() *SkipList {\n\treturn &SkipList{&element{}}\n}\n\nfunc (l *SkipList) Get(key string) (value int, ok bool) {\n\te := l.head\n\tlayer := len(l.head.next) - 1\n\tfor layer >= 0 {\n\t\tn := e.next[layer]\n\t\tif n == nil || n.data.Key > key {\n\t\t\t\/\/ Search the next lower layer.\n\t\t\tlayer--\n\t\t\tcontinue\n\t\t}\n\t\tif n.data.Key == key {\n\t\t\treturn n.data.Value, true\n\t\t}\n\t\t\/\/ Continue searching the current layer.\n\t\te = n\n\t}\n\treturn 0, false\n}\n\nfunc (l *SkipList) Set(key string, value int) {\n\tvar prevs []*element\n\te := l.head\n\tlayer := len(l.head.next) - 1\n\tfor layer >= 0 {\n\t\tn := e.next[layer]\n\t\tif n == nil || n.data.Key > key {\n\t\t\t\/\/ Keep a reference to the previous node; will be useful if it ends with an insertion.\n\t\t\tprevs = append(prevs, e)\n\t\t\t\/\/ Search the next lower layer.\n\t\t\tlayer--\n\t\t\tcontinue\n\t\t}\n\t\tif n.data.Key == key {\n\t\t\tn.data.Value = value\n\t\t\treturn\n\t\t}\n\t\t\/\/ Continue searching the current layer.\n\t\te = n\n\t}\n\tnewElement := &element{Data{key, value}, nil}\n\tinsert(l.head, prevs, newElement)\n}\n\nfunc insert(head *element, prevs []*element, e *element) {\n\tfor layer := 0; layer == 0 || coinflip(); layer++ {\n\t\tvar prev *element\n\t\tif layer < len(prevs) {\n\t\t\t\/\/ The element will be inserted in a preexisting layer.\n\t\t\t\/\/ prevs was built in reverse order.\n\t\t\tprev = prevs[len(prevs)-1-layer]\n\t\t} else {\n\t\t\t\/\/ The element will be inserted in a new empty layer.\n\t\t\thead.next = append(head.next, nil)\n\t\t\tprev = head\n\t\t}\n\t\te.next = append(e.next, prev.next[layer])\n\t\tprev.next[layer] = e\n\t}\n}\n\nfunc (l *SkipList) Del(key string) {\n\te := l.head\n\tlayer := len(l.head.next) - 1\n\tfor layer >= 0 {\n\t\tn := e.next[layer]\n\t\tif n == nil || n.data.Key > key {\n\t\t\t\/\/ Search the next lower layer.\n\t\t\tlayer--\n\t\t\tcontinue\n\t\t}\n\t\tif n.data.Key == key {\n\t\t\te.next[layer] = n.next[layer]\n\t\t\tif l.head.next[layer] == nil {\n\t\t\t\t\/\/ Delete empty layer.\n\t\t\t\tl.head.next = l.head.next[:len(l.head.next)-1]\n\t\t\t}\n\t\t\t\/\/ Continue deletion at the next lower layer.\n\t\t\tlayer--\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Continue searching the current layer.\n\t\te = n\n\t}\n}\n\nfunc (l *SkipList) All() []Data {\n\ta := []Data{}\n\tif len(l.head.next) < 1 {\n\t\treturn a\n\t}\n\tfor e := l.head.next[0]; e != nil; e = e.next[0] {\n\t\ta = append(a, e.data)\n\t}\n\treturn a\n}\n\nfunc coinflip() bool {\n\treturn rand.Float32() < p\n}\n<commit_msg>[skip_list\/go] Reword comments<commit_after>package skip_list\n\nimport \"math\/rand\"\n\ntype SkipList struct {\n\thead *element\n}\n\ntype element struct {\n\tdata Data\n\tnext []*element\n}\n\ntype Data struct {\n\tKey string\n\tValue int\n}\n\nconst p = 0.25\n\nfunc NewSkipList() *SkipList {\n\treturn &SkipList{&element{}}\n}\n\nfunc (l *SkipList) Get(key string) (value int, ok bool) {\n\te := l.head\n\tlayer := len(l.head.next) - 1\n\tfor layer >= 0 {\n\t\tn := e.next[layer]\n\t\tif n == nil || n.data.Key > key {\n\t\t\t\/\/ Search the next lower layer.\n\t\t\tlayer--\n\t\t\tcontinue\n\t\t}\n\t\tif n.data.Key == key {\n\t\t\treturn n.data.Value, true\n\t\t}\n\t\t\/\/ Keep searching the current layer.\n\t\te = n\n\t}\n\treturn 0, false\n}\n\nfunc (l *SkipList) Set(key string, value int) {\n\tvar prevs []*element\n\te := l.head\n\tlayer := len(l.head.next) - 1\n\tfor layer >= 0 {\n\t\tn := e.next[layer]\n\t\tif n == nil || n.data.Key > key {\n\t\t\t\/\/ Keep a reference to the previous node; will be useful if it ends with an insertion.\n\t\t\tprevs = append(prevs, e)\n\t\t\t\/\/ Search the next lower layer.\n\t\t\tlayer--\n\t\t\tcontinue\n\t\t}\n\t\tif n.data.Key == key {\n\t\t\tn.data.Value = value\n\t\t\treturn\n\t\t}\n\t\t\/\/ Keep searching the current layer.\n\t\te = n\n\t}\n\tnewElement := &element{Data{key, value}, nil}\n\tinsert(l.head, prevs, newElement)\n}\n\nfunc insert(head *element, prevs []*element, e *element) {\n\tfor layer := 0; layer == 0 || coinflip(); layer++ {\n\t\tvar prev *element\n\t\tif layer < len(prevs) {\n\t\t\t\/\/ The element will be inserted in a preexisting layer.\n\t\t\t\/\/ prevs was built in reverse order.\n\t\t\tprev = prevs[len(prevs)-1-layer]\n\t\t} else {\n\t\t\t\/\/ The element will be inserted in a new empty layer.\n\t\t\thead.next = append(head.next, nil)\n\t\t\tprev = head\n\t\t}\n\t\te.next = append(e.next, prev.next[layer])\n\t\tprev.next[layer] = e\n\t}\n}\n\nfunc (l *SkipList) Del(key string) {\n\te := l.head\n\tlayer := len(l.head.next) - 1\n\tfor layer >= 0 {\n\t\tn := e.next[layer]\n\t\tif n == nil || n.data.Key > key {\n\t\t\t\/\/ Search the next lower layer.\n\t\t\tlayer--\n\t\t\tcontinue\n\t\t}\n\t\tif n.data.Key == key {\n\t\t\te.next[layer] = n.next[layer]\n\t\t\tif l.head.next[layer] == nil {\n\t\t\t\t\/\/ Delete empty layer.\n\t\t\t\tl.head.next = l.head.next[:len(l.head.next)-1]\n\t\t\t}\n\t\t\t\/\/ Continue deletion at the next lower layer.\n\t\t\tlayer--\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Keep searching the current layer.\n\t\te = n\n\t}\n}\n\nfunc (l *SkipList) All() []Data {\n\ta := []Data{}\n\tif len(l.head.next) < 1 {\n\t\treturn a\n\t}\n\tfor e := l.head.next[0]; e != nil; e = e.next[0] {\n\t\ta = append(a, e.data)\n\t}\n\treturn a\n}\n\nfunc coinflip() bool {\n\treturn rand.Float32() < p\n}\n<|endoftext|>"} {"text":"<commit_before>package xlogutils\nimport (\n\t\"fmt\"\n\t\"errors\"\n)\n\nconst XLOG_SEG_SIZE_16MB = 16 * 1024 * 1024\nconst XLOG_SEG_SIZE_DEFAULT = XLOG_SEG_SIZE_16MB\n\nvar xlog_seg_size uint = XLOG_SEG_SIZE_DEFAULT\nvar XLogSegmentsPerXLogId uint\n\nvar ParseError = errors.New(\"Parse error\")\n\nfunc init() {\n\tSetXLogSegSize(XLOG_SEG_SIZE_16MB)\n}\n\nfunc XLogSegSize() (uint) {\n\treturn xlog_seg_size\n}\n\nfunc SetXLogSegSize(size uint) {\n\txlog_seg_size = size\n\tXLogSegmentsPerXLogId = 0x100000000 \/ xlog_seg_size\n}\n\ntype Location struct {\n\tXLogId uint\n\tOffset uint\n}\n\ntype Filename struct {\n\tTimeline uint\n\tXLogId uint\n\tSegment uint\n\tIsHistory bool\n}\n\nfunc FilenameFromString(fname string) (*Filename, error) {\n\tret := Filename{}\n\tif r, err := fmt.Sscanf(fname, \"%08X%08X%08X\", &ret.Timeline, &ret.XLogId, &ret.Segment); r != 3 || err != nil {\n\t\tif r, err := fmt.Sscan(\"%08X.history\", &ret.Timeline); r == 1 && err == nil {\n\t\t\tret.Segment = 0\n\t\t\tret.XLogId = 0\n\t\t\tret.IsHistory = true\n\t\t\treturn &ret, nil\n\t\t} else {\n\t\t\treturn nil, ParseError\n\t\t}\n\t}\n\treturn &ret, nil\n}\n\nfunc LocationFromString(loc string) (*Location, error) {\n\tret := Location{}\n\tif r, err := fmt.Sscanf(loc, \"%X\/%X\", &ret.XLogId, &ret.Offset); r != 2 || err != nil {\n\t\treturn nil, ParseError\n\t}\n\n\treturn &ret, nil\n}\n\nfunc (this *Location) Filename(tli uint) *Filename {\n\tret := Filename{}\n\tret.Timeline = tli\n\tret.XLogId = this.XLogId\n\tret.Segment = this.Offset \/ xlog_seg_size\n\treturn &ret\n}\n\nfunc (this *Location) Int() uint64 {\n\treturn uint64(this.XLogId) * 0x100000000 + uint64(this.Offset)\n}\n\nfunc (this *Location) Diff(other *Location) uint64 {\n\tif this.Int() > other.Int() {\n\t\treturn this.Int() - other.Int()\n\t} else {\n\t\treturn other.Int() - this.Int()\n\t}\n}\n\nfunc (this *Filename) String() string {\n\tif (!this.IsHistory) {\n\t\treturn fmt.Sprintf(\"%08x%08X%08X\", this.Timeline, this.XLogId, this.Segment)\n\t} else {\n\t\treturn fmt.Sprintf(\"%08X.history\", this.Timeline)\n\t}\n}\n\nfunc (this *Filename) Next() *Filename {\n\tret := Filename{}\n\tret.Timeline = this.Timeline\n\tret.XLogId = this.XLogId\n\tret.Segment = this.Segment\n\tret.Segment++\n\tif ret.Segment == XLogSegmentsPerXLogId {\n\t\tret.Segment = 0\n\t\tret.XLogId++\n\t}\n\treturn &ret\n}\n\nfunc (this *Filename) Prev() *Filename {\n\tret := Filename{}\n\tret.Timeline = this.Timeline\n\tret.XLogId = this.XLogId\n\tret.Segment = this.Segment\n\tif ret.Segment == 0 {\n\t\tret.Segment = XLogSegmentsPerXLogId\n\t\tret.XLogId--\n\t}\n\tret.Segment--\n\treturn &ret\n}<commit_msg>Added File.Location()<commit_after>package xlogutils\nimport (\n\t\"fmt\"\n\t\"errors\"\n)\n\nconst XLOG_SEG_SIZE_16MB = 16 * 1024 * 1024\nconst XLOG_SEG_SIZE_DEFAULT = XLOG_SEG_SIZE_16MB\n\nvar xlog_seg_size uint = XLOG_SEG_SIZE_DEFAULT\nvar XLogSegmentsPerXLogId uint\n\nvar ParseError = errors.New(\"Parse error\")\n\nfunc init() {\n\tSetXLogSegSize(XLOG_SEG_SIZE_16MB)\n}\n\nfunc XLogSegSize() (uint) {\n\treturn xlog_seg_size\n}\n\nfunc SetXLogSegSize(size uint) {\n\txlog_seg_size = size\n\tXLogSegmentsPerXLogId = 0x100000000 \/ xlog_seg_size\n}\n\ntype Location struct {\n\tXLogId uint\n\tOffset uint\n}\n\ntype Filename struct {\n\tTimeline uint\n\tXLogId uint\n\tSegment uint\n\tIsHistory bool\n}\n\nfunc FilenameFromString(fname string) (*Filename, error) {\n\tret := Filename{}\n\tif r, err := fmt.Sscanf(fname, \"%08X%08X%08X\", &ret.Timeline, &ret.XLogId, &ret.Segment); r != 3 || err != nil {\n\t\tif r, err := fmt.Sscan(\"%08X.history\", &ret.Timeline); r == 1 && err == nil {\n\t\t\tret.Segment = 0\n\t\t\tret.XLogId = 0\n\t\t\tret.IsHistory = true\n\t\t\treturn &ret, nil\n\t\t} else {\n\t\t\treturn nil, ParseError\n\t\t}\n\t}\n\treturn &ret, nil\n}\n\nfunc LocationFromString(loc string) (*Location, error) {\n\tret := Location{}\n\tif r, err := fmt.Sscanf(loc, \"%X\/%X\", &ret.XLogId, &ret.Offset); r != 2 || err != nil {\n\t\treturn nil, ParseError\n\t}\n\n\treturn &ret, nil\n}\n\nfunc (this *Location) Filename(tli uint) *Filename {\n\tret := Filename{}\n\tret.Timeline = tli\n\tret.XLogId = this.XLogId\n\tret.Segment = this.Offset \/ xlog_seg_size\n\treturn &ret\n}\n\nfunc (this *Location) Int() uint64 {\n\treturn uint64(this.XLogId) * 0x100000000 + uint64(this.Offset)\n}\n\nfunc (this *Location) Diff(other *Location) uint64 {\n\tif this.Int() > other.Int() {\n\t\treturn this.Int() - other.Int()\n\t} else {\n\t\treturn other.Int() - this.Int()\n\t}\n}\n\nfunc (this *Filename) String() string {\n\tif (!this.IsHistory) {\n\t\treturn fmt.Sprintf(\"%08x%08X%08X\", this.Timeline, this.XLogId, this.Segment)\n\t} else {\n\t\treturn fmt.Sprintf(\"%08X.history\", this.Timeline)\n\t}\n}\n\nfunc (this *Filename) Location() *Location {\n\tret := Location{}\n\tret.XLogId = this.XLogId\n\tret.Offset = this.Segment * xlog_seg_size\n\treturn &ret\n}\n\nfunc (this *Filename) Next() *Filename {\n\tret := Filename{}\n\tret.Timeline = this.Timeline\n\tret.XLogId = this.XLogId\n\tret.Segment = this.Segment\n\tret.Segment++\n\tif ret.Segment == XLogSegmentsPerXLogId {\n\t\tret.Segment = 0\n\t\tret.XLogId++\n\t}\n\treturn &ret\n}\n\nfunc (this *Filename) Prev() *Filename {\n\tret := Filename{}\n\tret.Timeline = this.Timeline\n\tret.XLogId = this.XLogId\n\tret.Segment = this.Segment\n\tif ret.Segment == 0 {\n\t\tret.Segment = XLogSegmentsPerXLogId\n\t\tret.XLogId--\n\t}\n\tret.Segment--\n\treturn &ret\n}<|endoftext|>"} {"text":"<commit_before>\/\/Copyright 2014 Rana Ian. All rights reserved.\n\/\/Use of this source code is governed by The MIT License\n\/\/found in the accompanying LICENSE file.\n\npackage ora_test\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"gopkg.in\/rana\/ora.v3\"\n)\n\nfunc Test_open_cursors_db(t *testing.T) {\n\t\/\/enableLogging(t)\n\t\/\/ This needs \"GRANT SELECT ANY DICTIONARY TO test\"\n\t\/\/ or at least \"GRANT SELECT ON v_$mystat TO test\".\n\t\/\/ use 'opened cursors current' statistic#=5 to determine opened cursors on oracle server\n\t\/\/ SELECT A.STATISTIC#, A.NAME, B.VALUE\n\t\/\/ FROM V$STATNAME A, V$MYSTAT B\n\t\/\/ WHERE A.STATISTIC# = B.STATISTIC#\n\tstmt, err := testDb.Prepare(\"SELECT VALUE FROM V$MYSTAT WHERE STATISTIC#=5\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar before, after int\n\tif err = stmt.QueryRow().Scan(&before); err != nil {\n\t\tt.Skip(err)\n\t}\n\trounds := 100\n\tfor i := 0; i < rounds; i++ {\n\t\tfunc() {\n\t\t\tstmt, err := testDb.Prepare(\"SELECT 1 FROM user_objects WHERE ROWNUM < 100\")\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer stmt.Close()\n\t\t\trows, err := stmt.Query()\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"SELECT: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer rows.Close()\n\t\t\tj := 0\n\t\t\tfor rows.Next() {\n\t\t\t\tj++\n\t\t\t}\n\t\t\t\/\/t.Logf(\"%d objects, error=%v\", j, rows.Err())\n\t\t}()\n\t}\n\tif err = stmt.QueryRow().Scan(&after); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif after-before >= rounds {\n\t\tt.Errorf(\"before=%d after=%d, awaited less than %d increment!\", before, after, rounds)\n\t\treturn\n\t}\n\tt.Logf(\"before=%d after=%d\", before, after)\n}\n\nfunc TestSelectNull_db(t *testing.T) {\n\tora.Cfg().Log.Rset.BeginRow = true\n\t\/\/enableLogging(t)\n\tvar (\n\t\ts string\n\t\toS ora.String\n\t\ti int64\n\t\toI ora.Int64\n\t\ttim ora.Time\n\t)\n\tfor tN, tC := range []struct {\n\t\tField string\n\t\tDest interface{}\n\t}{\n\t\t{\"''\", &s},\n\t\t{\"''\", &oS},\n\t\t{\"NULL + 0\", &i},\n\t\t{\"NULL + 0\", &oI},\n\t\t{\"SYSDATE + NULL\", &tim},\n\t} {\n\t\tqry := \"SELECT \" + tC.Field + \" x FROM DUAL\"\n\t\trows, err := testDb.Query(qry)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%d. %s: %v\", tN, qry, err)\n\t\t\treturn\n\t\t}\n\t\tfor rows.Next() {\n\t\t\tif err = rows.Scan(&tC.Dest); err != nil {\n\t\t\t\tt.Errorf(\"%d. Scan: %v\", tN, err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif rows.Err() != nil {\n\t\t\tt.Errorf(\"%d. rows: %v\", tN, rows.Err())\n\t\t}\n\t\trows.Close()\n\t}\n}\n\nfunc Test_numberP38S0Identity_db(t *testing.T) {\n\ttableName := tableName()\n\tstmt, err := testDb.Prepare(createTableSql(tableName, 1, numberP38S0Identity, varchar2C48))\n\tif err == nil {\n\t\tdefer stmt.Close()\n\t\t_, err = stmt.Exec()\n\t}\n\tif err != nil {\n\t\tt.Skipf(\"SKIP create table with identity: %v\", err)\n\t\treturn\n\t}\n\tdefer dropTableDB(testDb, t, tableName)\n\n\tstmt, err = testDb.Prepare(fmt.Sprintf(\"insert into %v (c2) values ('go') returning c1 \/*lastInsertId*\/ into :c1\", tableName))\n\tdefer stmt.Close()\n\n\t\/\/ pass nil to Exec when using 'returning into' clause with sql.DB\n\tresult, err := stmt.Exec(nil)\n\ttestErr(err, t)\n\tactual, err := result.LastInsertId()\n\ttestErr(err, t)\n\tif 1 != actual {\n\t\tt.Fatalf(\"LastInsertId: expected(%v), actual(%v)\", 1, actual)\n\t}\n}\n\nfunc Test_numberP38S0_int64_db(t *testing.T) {\n\ttestBindDefineDB(gen_int64(), t, numberP38S0)\n}\n\nfunc Test_numberP38S0Null_int64_db(t *testing.T) {\n\ttestBindDefineDB(gen_int64(), t, numberP38S0Null)\n}\n\nfunc Test_numberP16S15_float64_db(t *testing.T) {\n\ttestBindDefineDB(gen_float64(), t, numberP16S15)\n}\n\nfunc Test_numberP16S15Null_float64_db(t *testing.T) {\n\ttestBindDefineDB(gen_float64(), t, numberP16S15Null)\n}\n\nfunc Test_binaryDouble_float64_db(t *testing.T) {\n\ttestBindDefineDB(gen_float64(), t, binaryDouble)\n}\n\nfunc Test_binaryDoubleNull_float64_db(t *testing.T) {\n\ttestBindDefineDB(gen_float64(), t, binaryDoubleNull)\n}\n\nfunc Test_binaryFloat_float64_db(t *testing.T) {\n\ttestBindDefineDB(gen_float64(), t, binaryFloat)\n}\n\nfunc Test_binaryFloatNull_float64_db(t *testing.T) {\n\ttestBindDefineDB(gen_float64(), t, binaryFloatNull)\n}\n\nfunc Test_floatP126_float64_db(t *testing.T) {\n\ttestBindDefineDB(gen_float64(), t, floatP126)\n}\n\nfunc Test_floatP126Null_float64_db(t *testing.T) {\n\ttestBindDefineDB(gen_float64(), t, floatP126Null)\n}\n\nfunc Test_date_time_db(t *testing.T) {\n\ttestBindDefineDB(gen_date(), t, dateNotNull)\n}\n\nfunc Test_dateNull_time_db(t *testing.T) {\n\ttestBindDefineDB(gen_date(), t, dateNull)\n}\n\nfunc Test_timestampP9_time_db(t *testing.T) {\n\ttestBindDefineDB(gen_time(), t, timestampP9)\n}\n\nfunc Test_timestampP9Null_time_db(t *testing.T) {\n\ttestBindDefineDB(gen_time(), t, timestampP9Null)\n}\n\nfunc Test_timestampTzP9_time_db(t *testing.T) {\n\ttestBindDefineDB(gen_time(), t, timestampTzP9)\n}\n\nfunc Test_timestampTzP9Null_time_db(t *testing.T) {\n\ttestBindDefineDB(gen_time(), t, timestampTzP9Null)\n}\n\nfunc Test_timestampLtzP9_time_db(t *testing.T) {\n\ttestBindDefineDB(gen_time(), t, timestampLtzP9)\n}\n\nfunc Test_timestampLtzP9Null_time_db(t *testing.T) {\n\ttestBindDefineDB(gen_time(), t, timestampLtzP9Null)\n}\n\nfunc Test_charB48_string_db(t *testing.T) {\n\tenableLogging(t)\n\ttestBindDefineDB(gen_string48(), t, charB48)\n}\n\nfunc Test_charB48Null_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string48(), t, charB48Null)\n}\n\nfunc Test_charC48_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string48(), t, charC48)\n}\n\nfunc Test_charC48Null_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string48(), t, charC48Null)\n}\n\nfunc Test_nchar48_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string48(), t, nchar48)\n}\n\nfunc Test_nchar48Null_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string48(), t, nchar48Null)\n}\n\nfunc Test_varcharB48_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, varcharB48)\n}\n\nfunc Test_varcharB48Null_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, varcharB48Null)\n}\n\nfunc Test_varcharC48_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, varcharC48)\n}\n\nfunc Test_varcharC48Null_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, varcharC48Null)\n}\n\nfunc Test_varchar2B48_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, varchar2B48)\n}\n\nfunc Test_varchar2B48Null_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, varchar2B48Null)\n}\n\nfunc Test_varchar2C48_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, varchar2C48)\n}\n\nfunc Test_varchar2C48Null_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, varchar2C48Null)\n}\n\nfunc Test_nvarchar248_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, nvarchar248)\n}\n\nfunc Test_nvarchar248Null_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, nvarchar248Null)\n}\n\nfunc Test_long_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, long)\n}\n\nfunc Test_longNull_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, longNull)\n}\n\nfunc Test_clob_string_db(t *testing.T) {\n\t\/\/enableLogging(t)\n\ttestBindDefineDB(gen_string(), t, clob)\n}\n\nfunc Test_clobNull_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, clobNull)\n}\n\nfunc Test_nclob_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, nclob)\n}\n\nfunc Test_nclobNull_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, nclobNull)\n}\n\nfunc Test_charB1_bool_true_db(t *testing.T) {\n\tdefer setC1Bool()()\n\ttestBindDefineDB(gen_boolTrue(), t, charB1)\n}\n\nfunc Test_charB1Null_bool_true_db(t *testing.T) {\n\t\/\/enableLogging(t)\n\tdefer setC1Bool()()\n\ttestBindDefineDB(gen_boolTrue(), t, charB1Null)\n}\n\nfunc Test_charC1_bool_true_db(t *testing.T) {\n\t\/\/enableLogging(t)\n\tdefer setC1Bool()()\n\ttestBindDefineDB(gen_boolTrue(), t, charC1)\n}\n\nfunc Test_charC1Null_bool_true_db(t *testing.T) {\n\tdefer setC1Bool()()\n\ttestBindDefineDB(gen_boolTrue(), t, charC1Null)\n}\n\nfunc Test_longRaw_bytes_db(t *testing.T) {\n\ttestBindDefineDB(gen_bytes(9), t, longRaw)\n}\n\nfunc Test_longRawNull_bytes_db(t *testing.T) {\n\ttestBindDefineDB(gen_bytes(9), t, longRawNull)\n}\n\nfunc Test_raw2000_bytes_db(t *testing.T) {\n\ttestBindDefineDB(gen_bytes(2000), t, raw2000)\n}\n\nfunc Test_raw2000Null_bytes_db(t *testing.T) {\n\ttestBindDefineDB(gen_bytes(2000), t, raw2000Null)\n}\n\nfunc Test_blob_bytes_db(t *testing.T) {\n\ttestBindDefineDB(gen_bytes(9), t, blob)\n}\n\nfunc Test_blobNull_bytes_db(t *testing.T) {\n\ttestBindDefineDB(gen_bytes(9), t, blobNull)\n}\n\nfunc TestSysdba(t *testing.T) {\n\tu := os.Getenv(\"GO_ORA_DRV_TEST_SYSDBA_USERNAME\")\n\tp := os.Getenv(\"GO_ORA_DRV_TEST_SYSDBA_PASSWORD\")\n\tif u == \"\" {\n\t\tu = testSesCfg.Username\n\t\tp = testSesCfg.Password\n\t}\n\tdsn := fmt.Sprintf(\"%s\/%s@%s AS SYSDBA\", u, p, testSrvCfg.Dblink)\n\tdb, err := sql.Open(\"ora\", dsn)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer db.Close()\n\tif err := db.Ping(); err != nil {\n\t\tt.Skipf(\"%q: %v\", dsn, err)\n\t}\n}\n\nfunc TestZeroRowsAffected(t *testing.T) {\n\ttableName := tableName()\n\tif _, err := testDb.Exec(\"CREATE TABLE \" + tableName + \" (id NUMBER(3))\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer testDb.Exec(\"DROP TABLE \" + tableName)\n\tres, err := testDb.Exec(\"UPDATE \" + tableName + \" SET id=1 WHERE 1=0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif ra, err := res.RowsAffected(); err != nil {\n\t\tt.Error(err)\n\t} else if ra != 0 {\n\t\tt.Errorf(\"got %d, wanted 0 rows affected!\")\n\t}\n\tif _, err := res.LastInsertId(); err == nil {\n\t\tt.Error(\"wanted error for LastInsertId, got nil\")\n\t}\n}\n<commit_msg>add TestSetMaxConnLifetime<commit_after>\/\/Copyright 2014 Rana Ian. All rights reserved.\n\/\/Use of this source code is governed by The MIT License\n\/\/found in the accompanying LICENSE file.\n\npackage ora_test\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"gopkg.in\/rana\/ora.v3\"\n)\n\nfunc Test_open_cursors_db(t *testing.T) {\n\t\/\/enableLogging(t)\n\t\/\/ This needs \"GRANT SELECT ANY DICTIONARY TO test\"\n\t\/\/ or at least \"GRANT SELECT ON v_$mystat TO test\".\n\t\/\/ use 'opened cursors current' statistic#=5 to determine opened cursors on oracle server\n\t\/\/ SELECT A.STATISTIC#, A.NAME, B.VALUE\n\t\/\/ FROM V$STATNAME A, V$MYSTAT B\n\t\/\/ WHERE A.STATISTIC# = B.STATISTIC#\n\tstmt, err := testDb.Prepare(\"SELECT VALUE FROM V$MYSTAT WHERE STATISTIC#=5\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar before, after int\n\tif err = stmt.QueryRow().Scan(&before); err != nil {\n\t\tt.Skip(err)\n\t}\n\trounds := 100\n\tfor i := 0; i < rounds; i++ {\n\t\tfunc() {\n\t\t\tstmt, err := testDb.Prepare(\"SELECT 1 FROM user_objects WHERE ROWNUM < 100\")\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer stmt.Close()\n\t\t\trows, err := stmt.Query()\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"SELECT: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer rows.Close()\n\t\t\tj := 0\n\t\t\tfor rows.Next() {\n\t\t\t\tj++\n\t\t\t}\n\t\t\t\/\/t.Logf(\"%d objects, error=%v\", j, rows.Err())\n\t\t}()\n\t}\n\tif err = stmt.QueryRow().Scan(&after); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif after-before >= rounds {\n\t\tt.Errorf(\"before=%d after=%d, awaited less than %d increment!\", before, after, rounds)\n\t\treturn\n\t}\n\tt.Logf(\"before=%d after=%d\", before, after)\n}\n\nfunc TestSelectNull_db(t *testing.T) {\n\tora.Cfg().Log.Rset.BeginRow = true\n\t\/\/enableLogging(t)\n\tvar (\n\t\ts string\n\t\toS ora.String\n\t\ti int64\n\t\toI ora.Int64\n\t\ttim ora.Time\n\t)\n\tfor tN, tC := range []struct {\n\t\tField string\n\t\tDest interface{}\n\t}{\n\t\t{\"''\", &s},\n\t\t{\"''\", &oS},\n\t\t{\"NULL + 0\", &i},\n\t\t{\"NULL + 0\", &oI},\n\t\t{\"SYSDATE + NULL\", &tim},\n\t} {\n\t\tqry := \"SELECT \" + tC.Field + \" x FROM DUAL\"\n\t\trows, err := testDb.Query(qry)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%d. %s: %v\", tN, qry, err)\n\t\t\treturn\n\t\t}\n\t\tfor rows.Next() {\n\t\t\tif err = rows.Scan(&tC.Dest); err != nil {\n\t\t\t\tt.Errorf(\"%d. Scan: %v\", tN, err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif rows.Err() != nil {\n\t\t\tt.Errorf(\"%d. rows: %v\", tN, rows.Err())\n\t\t}\n\t\trows.Close()\n\t}\n}\n\nfunc TestSetConnMaxLifetime(t *testing.T) {\n\tvar db *sql.DB\n\tvar err error\n\tdb, err = sql.Open(\"ora\", testConStr)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdb.SetMaxIdleConns(2)\n\tdb.SetConnMaxLifetime(1 * time.Second)\n\tdefer db.Close()\n\n\tdone := make(chan struct{})\n\tvar wg sync.WaitGroup\n\tdbRoutine := func() {\n\t\tdefer wg.Done()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\tvar temp int\n\t\t\tdb.QueryRow(\"SELECT 1 FROM DUAL\").Scan(&temp)\n\t\t\tif rand.Int()%10 == 0 {\n\t\t\t\ttime.Sleep(50 * time.Millisecond)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i := 0; i < 8; i++ {\n\t\twg.Add(1)\n\t\tgo dbRoutine()\n\t}\n\ttime.Sleep(8 * time.Second)\n\tclose(done)\n\twg.Wait()\n}\n\nfunc Test_numberP38S0Identity_db(t *testing.T) {\n\ttableName := tableName()\n\tstmt, err := testDb.Prepare(createTableSql(tableName, 1, numberP38S0Identity, varchar2C48))\n\tif err == nil {\n\t\tdefer stmt.Close()\n\t\t_, err = stmt.Exec()\n\t}\n\tif err != nil {\n\t\tt.Skipf(\"SKIP create table with identity: %v\", err)\n\t\treturn\n\t}\n\tdefer dropTableDB(testDb, t, tableName)\n\n\tstmt, err = testDb.Prepare(fmt.Sprintf(\"insert into %v (c2) values ('go') returning c1 \/*lastInsertId*\/ into :c1\", tableName))\n\tdefer stmt.Close()\n\n\t\/\/ pass nil to Exec when using 'returning into' clause with sql.DB\n\tresult, err := stmt.Exec(nil)\n\ttestErr(err, t)\n\tactual, err := result.LastInsertId()\n\ttestErr(err, t)\n\tif 1 != actual {\n\t\tt.Fatalf(\"LastInsertId: expected(%v), actual(%v)\", 1, actual)\n\t}\n}\n\nfunc Test_numberP38S0_int64_db(t *testing.T) {\n\ttestBindDefineDB(gen_int64(), t, numberP38S0)\n}\n\nfunc Test_numberP38S0Null_int64_db(t *testing.T) {\n\ttestBindDefineDB(gen_int64(), t, numberP38S0Null)\n}\n\nfunc Test_numberP16S15_float64_db(t *testing.T) {\n\ttestBindDefineDB(gen_float64(), t, numberP16S15)\n}\n\nfunc Test_numberP16S15Null_float64_db(t *testing.T) {\n\ttestBindDefineDB(gen_float64(), t, numberP16S15Null)\n}\n\nfunc Test_binaryDouble_float64_db(t *testing.T) {\n\ttestBindDefineDB(gen_float64(), t, binaryDouble)\n}\n\nfunc Test_binaryDoubleNull_float64_db(t *testing.T) {\n\ttestBindDefineDB(gen_float64(), t, binaryDoubleNull)\n}\n\nfunc Test_binaryFloat_float64_db(t *testing.T) {\n\ttestBindDefineDB(gen_float64(), t, binaryFloat)\n}\n\nfunc Test_binaryFloatNull_float64_db(t *testing.T) {\n\ttestBindDefineDB(gen_float64(), t, binaryFloatNull)\n}\n\nfunc Test_floatP126_float64_db(t *testing.T) {\n\ttestBindDefineDB(gen_float64(), t, floatP126)\n}\n\nfunc Test_floatP126Null_float64_db(t *testing.T) {\n\ttestBindDefineDB(gen_float64(), t, floatP126Null)\n}\n\nfunc Test_date_time_db(t *testing.T) {\n\ttestBindDefineDB(gen_date(), t, dateNotNull)\n}\n\nfunc Test_dateNull_time_db(t *testing.T) {\n\ttestBindDefineDB(gen_date(), t, dateNull)\n}\n\nfunc Test_timestampP9_time_db(t *testing.T) {\n\ttestBindDefineDB(gen_time(), t, timestampP9)\n}\n\nfunc Test_timestampP9Null_time_db(t *testing.T) {\n\ttestBindDefineDB(gen_time(), t, timestampP9Null)\n}\n\nfunc Test_timestampTzP9_time_db(t *testing.T) {\n\ttestBindDefineDB(gen_time(), t, timestampTzP9)\n}\n\nfunc Test_timestampTzP9Null_time_db(t *testing.T) {\n\ttestBindDefineDB(gen_time(), t, timestampTzP9Null)\n}\n\nfunc Test_timestampLtzP9_time_db(t *testing.T) {\n\ttestBindDefineDB(gen_time(), t, timestampLtzP9)\n}\n\nfunc Test_timestampLtzP9Null_time_db(t *testing.T) {\n\ttestBindDefineDB(gen_time(), t, timestampLtzP9Null)\n}\n\nfunc Test_charB48_string_db(t *testing.T) {\n\tenableLogging(t)\n\ttestBindDefineDB(gen_string48(), t, charB48)\n}\n\nfunc Test_charB48Null_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string48(), t, charB48Null)\n}\n\nfunc Test_charC48_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string48(), t, charC48)\n}\n\nfunc Test_charC48Null_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string48(), t, charC48Null)\n}\n\nfunc Test_nchar48_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string48(), t, nchar48)\n}\n\nfunc Test_nchar48Null_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string48(), t, nchar48Null)\n}\n\nfunc Test_varcharB48_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, varcharB48)\n}\n\nfunc Test_varcharB48Null_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, varcharB48Null)\n}\n\nfunc Test_varcharC48_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, varcharC48)\n}\n\nfunc Test_varcharC48Null_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, varcharC48Null)\n}\n\nfunc Test_varchar2B48_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, varchar2B48)\n}\n\nfunc Test_varchar2B48Null_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, varchar2B48Null)\n}\n\nfunc Test_varchar2C48_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, varchar2C48)\n}\n\nfunc Test_varchar2C48Null_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, varchar2C48Null)\n}\n\nfunc Test_nvarchar248_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, nvarchar248)\n}\n\nfunc Test_nvarchar248Null_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, nvarchar248Null)\n}\n\nfunc Test_long_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, long)\n}\n\nfunc Test_longNull_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, longNull)\n}\n\nfunc Test_clob_string_db(t *testing.T) {\n\t\/\/enableLogging(t)\n\ttestBindDefineDB(gen_string(), t, clob)\n}\n\nfunc Test_clobNull_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, clobNull)\n}\n\nfunc Test_nclob_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, nclob)\n}\n\nfunc Test_nclobNull_string_db(t *testing.T) {\n\ttestBindDefineDB(gen_string(), t, nclobNull)\n}\n\nfunc Test_charB1_bool_true_db(t *testing.T) {\n\tdefer setC1Bool()()\n\ttestBindDefineDB(gen_boolTrue(), t, charB1)\n}\n\nfunc Test_charB1Null_bool_true_db(t *testing.T) {\n\t\/\/enableLogging(t)\n\tdefer setC1Bool()()\n\ttestBindDefineDB(gen_boolTrue(), t, charB1Null)\n}\n\nfunc Test_charC1_bool_true_db(t *testing.T) {\n\t\/\/enableLogging(t)\n\tdefer setC1Bool()()\n\ttestBindDefineDB(gen_boolTrue(), t, charC1)\n}\n\nfunc Test_charC1Null_bool_true_db(t *testing.T) {\n\tdefer setC1Bool()()\n\ttestBindDefineDB(gen_boolTrue(), t, charC1Null)\n}\n\nfunc Test_longRaw_bytes_db(t *testing.T) {\n\ttestBindDefineDB(gen_bytes(9), t, longRaw)\n}\n\nfunc Test_longRawNull_bytes_db(t *testing.T) {\n\ttestBindDefineDB(gen_bytes(9), t, longRawNull)\n}\n\nfunc Test_raw2000_bytes_db(t *testing.T) {\n\ttestBindDefineDB(gen_bytes(2000), t, raw2000)\n}\n\nfunc Test_raw2000Null_bytes_db(t *testing.T) {\n\ttestBindDefineDB(gen_bytes(2000), t, raw2000Null)\n}\n\nfunc Test_blob_bytes_db(t *testing.T) {\n\ttestBindDefineDB(gen_bytes(9), t, blob)\n}\n\nfunc Test_blobNull_bytes_db(t *testing.T) {\n\ttestBindDefineDB(gen_bytes(9), t, blobNull)\n}\n\nfunc TestSysdba(t *testing.T) {\n\tu := os.Getenv(\"GO_ORA_DRV_TEST_SYSDBA_USERNAME\")\n\tp := os.Getenv(\"GO_ORA_DRV_TEST_SYSDBA_PASSWORD\")\n\tif u == \"\" {\n\t\tu = testSesCfg.Username\n\t\tp = testSesCfg.Password\n\t}\n\tdsn := fmt.Sprintf(\"%s\/%s@%s AS SYSDBA\", u, p, testSrvCfg.Dblink)\n\tdb, err := sql.Open(\"ora\", dsn)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer db.Close()\n\tif err := db.Ping(); err != nil {\n\t\tt.Skipf(\"%q: %v\", dsn, err)\n\t}\n}\n\nfunc TestZeroRowsAffected(t *testing.T) {\n\ttableName := tableName()\n\tif _, err := testDb.Exec(\"CREATE TABLE \" + tableName + \" (id NUMBER(3))\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer testDb.Exec(\"DROP TABLE \" + tableName)\n\tres, err := testDb.Exec(\"UPDATE \" + tableName + \" SET id=1 WHERE 1=0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif ra, err := res.RowsAffected(); err != nil {\n\t\tt.Error(err)\n\t} else if ra != 0 {\n\t\tt.Errorf(\"got %d, wanted 0 rows affected!\")\n\t}\n\tif _, err := res.LastInsertId(); err == nil {\n\t\tt.Error(\"wanted error for LastInsertId, got nil\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * ZDNS Copyright 2016 Regents of the University of Michigan\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n * use this file except in compliance with the License. You may obtain a copy\n * of the License at http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n * implied. See the License for the specific language governing\n * permissions and limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/miekg\/dns\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/zmap\/zdns\"\n\t_ \"github.com\/zmap\/zdns\/modules\/alookup\"\n\t_ \"github.com\/zmap\/zdns\/modules\/axfr\"\n\t_ \"github.com\/zmap\/zdns\/modules\/dmarc\"\n\t_ \"github.com\/zmap\/zdns\/modules\/miekg\"\n\t_ \"github.com\/zmap\/zdns\/modules\/mxlookup\"\n\t_ \"github.com\/zmap\/zdns\/modules\/nslookup\"\n\t_ \"github.com\/zmap\/zdns\/modules\/spf\"\n\n\t_ \"github.com\/zmap\/zdns\/iohandlers\/file\"\n)\n\nfunc main() {\n\n\tvar gc zdns.GlobalConf\n\t\/\/ global flags relevant to every lookup module\n\tflags := flag.NewFlagSet(\"flags\", flag.ExitOnError)\n\tflags.IntVar(&gc.Threads, \"threads\", 1000, \"number of lightweight go threads\")\n\tflags.IntVar(&gc.GoMaxProcs, \"go-processes\", 0, \"number of OS processes (GOMAXPROCS)\")\n\tflags.StringVar(&gc.NamePrefix, \"prefix\", \"\", \"name to be prepended to what's passed in (e.g., www.)\")\n\tflags.BoolVar(&gc.AlexaFormat, \"alexa\", false, \"is input file from Alexa Top Million download\")\n\tflags.BoolVar(&gc.IterativeResolution, \"iterative\", false, \"Perform own iteration instead of relying on recursive resolver\")\n\tflags.StringVar(&gc.InputFilePath, \"input-file\", \"-\", \"names to read\")\n\tflags.StringVar(&gc.OutputFilePath, \"output-file\", \"-\", \"where should JSON output be saved\")\n\tflags.StringVar(&gc.MetadataFilePath, \"metadata-file\", \"\", \"where should JSON metadata be saved\")\n\tflags.StringVar(&gc.LogFilePath, \"log-file\", \"\", \"where should JSON logs be saved\")\n\n\tflags.StringVar(&gc.ResultVerbosity, \"result-verbosity\", \"normal\", \"Sets verbosity of each output record. Options: short, normal, long, trace\")\n\tflags.StringVar(&gc.IncludeInOutput, \"include-fields\", \"\", \"Comma separated list of fields to additionally output beyond result verbosity. Options: class, protocol, ttl, resolver, flags\")\n\n\tflags.IntVar(&gc.Verbosity, \"verbosity\", 3, \"log verbosity: 1 (lowest)--5 (highest)\")\n\tflags.IntVar(&gc.Retries, \"retries\", 1, \"how many times should zdns retry query if timeout or temporary failure\")\n\tflags.IntVar(&gc.MaxDepth, \"max-depth\", 10, \"how deep should we recurse when performing iterative lookups\")\n\tflags.IntVar(&gc.CacheSize, \"cache-size\", 10000, \"how many items can be stored in internal recursive cache\")\n\tflags.StringVar(&gc.InputHandler, \"input-handler\", \"file\", \"handler to input names\")\n\tflags.StringVar(&gc.OutputHandler, \"output-handler\", \"file\", \"handler to output names\")\n\tflags.BoolVar(&gc.TCPOnly, \"tcp-only\", false, \"Only perform lookups over TCP\")\n\tflags.BoolVar(&gc.UDPOnly, \"udp-only\", false, \"Only perform lookups over UDP\")\n\tservers_string := flags.String(\"name-servers\", \"\", \"comma-delimited list of DNS servers to use. If no port is specified, defaults to 53.\")\n\tconfig_file := flags.String(\"conf-file\", \"\/etc\/resolv.conf\", \"config file for DNS servers\")\n\ttimeout := flags.Int(\"timeout\", 15, \"timeout for resolving an individual name\")\n\titerationTimeout := flags.Int(\"iteration-timeout\", 4, \"timeout for resolving a single iteration in an iterative query\")\n\tclass_string := flags.String(\"class\", \"INET\", \"DNS class to query. Options: INET, CSNET, CHAOS, HESIOD, NONE, ANY. Default: INET.\")\n\tnanoSeconds := flags.Bool(\"nanoseconds\", false, \"Use nanosecond resolution timestamps\")\n\t\/\/ allow module to initialize and add its own flags before we parse\n\tif len(os.Args) < 2 {\n\t\tlog.Fatal(\"No lookup module specified. Valid modules: \", zdns.ValidlookupsString())\n\t}\n\tgc.Module = strings.ToUpper(os.Args[1])\n\tfactory := zdns.GetLookup(gc.Module)\n\tif factory == nil {\n\t\tflags.Parse(os.Args[1:])\n\t\tlog.Fatal(\"Invalid lookup module specified. Valid modules: \", zdns.ValidlookupsString())\n\t}\n\tfactory.AddFlags(flags)\n\tflags.Parse(os.Args[2:])\n\t\/\/ Do some basic sanity checking\n\t\/\/ setup global logging\n\tif gc.LogFilePath != \"\" {\n\t\tf, err := os.OpenFile(gc.LogFilePath, os.O_WRONLY|os.O_CREATE, 0666)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Unable to open log file (%s): %s\", gc.LogFilePath, err.Error())\n\t\t}\n\t\tlog.SetOutput(f)\n\t}\n\t\/\/ Translate the assigned verbosity level to a logrus log level.\n\tswitch gc.Verbosity {\n\tcase 1: \/\/ Fatal\n\t\tlog.SetLevel(log.FatalLevel)\n\tcase 2: \/\/ Error\n\t\tlog.SetLevel(log.ErrorLevel)\n\tcase 3: \/\/ Warnings (default)\n\t\tlog.SetLevel(log.WarnLevel)\n\tcase 4: \/\/ Information\n\t\tlog.SetLevel(log.InfoLevel)\n\tcase 5: \/\/ Debugging\n\t\tlog.SetLevel(log.DebugLevel)\n\tdefault:\n\t\tlog.Fatal(\"Unknown verbosity level specified. Must be between 1 (lowest)--5 (highest)\")\n\t}\n\t\/\/ complete post facto global initialization based on command line arguments\n\tgc.Timeout = time.Duration(time.Second * time.Duration(*timeout))\n\tgc.IterationTimeout = time.Duration(time.Second * time.Duration(*iterationTimeout))\n\t\/\/ class initialization\n\tswitch strings.ToUpper(*class_string) {\n\tcase \"INET\", \"IN\":\n\t\tgc.Class = dns.ClassINET\n\tcase \"CSNET\", \"CS\":\n\t\tgc.Class = dns.ClassCSNET\n\tcase \"CHAOS\", \"CH\":\n\t\tgc.Class = dns.ClassCHAOS\n\tcase \"HESIOD\", \"HS\":\n\t\tgc.Class = dns.ClassHESIOD\n\tcase \"NONE\":\n\t\tgc.Class = dns.ClassNONE\n\tcase \"ANY\":\n\t\tgc.Class = dns.ClassANY\n\tdefault:\n\t\tlog.Fatal(\"Unknown record class specified. Valid valued are INET (default), CSNET, CHAOS, HESIOD, NONE, ANY\")\n\n\t}\n\tif *servers_string == \"\" {\n\t\t\/\/ if we're doing recursive resolution, figure out default OS name servers\n\t\t\/\/ otherwise, use the set of 13 root name servers\n\t\tif gc.IterativeResolution {\n\t\t\tgc.NameServers = zdns.RootServers[:]\n\t\t} else {\n\t\t\tns, err := zdns.GetDNSServers(*config_file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(\"Unable to fetch correct name servers:\", err.Error())\n\t\t\t}\n\t\t\tgc.NameServers = ns\n\t\t}\n\t\tgc.NameServersSpecified = false\n\t\tlog.Info(\"no name servers specified. will use: \", strings.Join(gc.NameServers, \", \"))\n\t} else {\n\t\tns := strings.Split(*servers_string, \",\")\n\t\tfor i, s := range ns {\n\t\t\tif !strings.Contains(s, \":\") {\n\t\t\t\tns[i] = s + \":53\"\n\t\t\t}\n\t\t}\n\t\tgc.NameServers = ns\n\t\tgc.NameServersSpecified = true\n\t}\n\tif *nanoSeconds {\n\t\tgc.TimeFormat = time.RFC3339Nano\n\t} else {\n\t\tgc.TimeFormat = time.RFC3339\n\t}\n\tif gc.GoMaxProcs < 0 {\n\t\tlog.Fatal(\"Invalid argument for --go-processes. Must be >1.\")\n\t}\n\tif gc.GoMaxProcs != 0 {\n\t\truntime.GOMAXPROCS(gc.GoMaxProcs)\n\t}\n\tif gc.UDPOnly && gc.TCPOnly {\n\t\tlog.Fatal(\"TCP Only and UDP Only are conflicting\")\n\t}\n\t\/\/ Output Groups are defined by a base + any additional fields that the user wants\n\tgroups := strings.Split(gc.IncludeInOutput, \",\")\n\tif gc.ResultVerbosity != \"short\" && gc.ResultVerbosity != \"normal\" && gc.ResultVerbosity != \"long\" && gc.ResultVerbosity != \"trace\" {\n\t\tlog.Fatal(\"Invalid result verbosity. Options: short, normal, long, trace\")\n\t}\n\n\tgc.OutputGroups = append(gc.OutputGroups, gc.ResultVerbosity)\n\tgc.OutputGroups = append(gc.OutputGroups, groups...)\n\n\tif len(flags.Args()) > 0 {\n\t\tstat, _ := os.Stdin.Stat()\n\t\t\/\/ If stdin is piped from the terminal, and we havent specified a file, and if we have unparsed args\n\t\t\/\/ use them for a dig like reslution\n\t\tif (stat.Mode()&os.ModeCharDevice) != 0 && gc.InputFilePath == \"-\" && len(flags.Args()) == 1 {\n\t\t\tgc.PassedName = flags.Args()[0]\n\t\t} else {\n\t\t\tlog.Fatal(\"Unused command line flags: \", flags.Args())\n\t\t}\n\t}\n\n\t\/\/ some modules require multiple passes over a file (this is really just the case for zone files)\n\tif !factory.AllowStdIn() && gc.InputFilePath == \"-\" {\n\t\tlog.Fatal(\"Specified module does not allow reading from stdin\")\n\t}\n\n\t\/\/ allow the factory to initialize itself\n\tif err := factory.Initialize(&gc); err != nil {\n\t\tlog.Fatal(\"Factory was unable to initialize:\", err.Error())\n\t}\n\t\/\/ run it.\n\tif err := zdns.DoLookups(&factory, &gc); err != nil {\n\t\tlog.Fatal(\"Unable to run lookups:\", err.Error())\n\t}\n\t\/\/ allow the factory to initialize itself\n\tif err := factory.Finalize(); err != nil {\n\t\tlog.Fatal(\"Factory was unable to finalize:\", err.Error())\n\t}\n}\n<commit_msg>Add option which passes resolvers using file #185 (#187)<commit_after>\/*\n * ZDNS Copyright 2016 Regents of the University of Michigan\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n * use this file except in compliance with the License. You may obtain a copy\n * of the License at http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n * implied. See the License for the specific language governing\n * permissions and limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\t\"io\/ioutil\"\n\n\t\"github.com\/miekg\/dns\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/zmap\/zdns\"\n\t_ \"github.com\/zmap\/zdns\/modules\/alookup\"\n\t_ \"github.com\/zmap\/zdns\/modules\/axfr\"\n\t_ \"github.com\/zmap\/zdns\/modules\/dmarc\"\n\t_ \"github.com\/zmap\/zdns\/modules\/miekg\"\n\t_ \"github.com\/zmap\/zdns\/modules\/mxlookup\"\n\t_ \"github.com\/zmap\/zdns\/modules\/nslookup\"\n\t_ \"github.com\/zmap\/zdns\/modules\/spf\"\n\n\t_ \"github.com\/zmap\/zdns\/iohandlers\/file\"\n)\n\nfunc main() {\n\n\tvar gc zdns.GlobalConf\n\t\/\/ global flags relevant to every lookup module\n\tflags := flag.NewFlagSet(\"flags\", flag.ExitOnError)\n\tflags.IntVar(&gc.Threads, \"threads\", 1000, \"number of lightweight go threads\")\n\tflags.IntVar(&gc.GoMaxProcs, \"go-processes\", 0, \"number of OS processes (GOMAXPROCS)\")\n\tflags.StringVar(&gc.NamePrefix, \"prefix\", \"\", \"name to be prepended to what's passed in (e.g., www.)\")\n\tflags.BoolVar(&gc.AlexaFormat, \"alexa\", false, \"is input file from Alexa Top Million download\")\n\tflags.BoolVar(&gc.IterativeResolution, \"iterative\", false, \"Perform own iteration instead of relying on recursive resolver\")\n\tflags.StringVar(&gc.InputFilePath, \"input-file\", \"-\", \"names to read\")\n\tflags.StringVar(&gc.OutputFilePath, \"output-file\", \"-\", \"where should JSON output be saved\")\n\tflags.StringVar(&gc.MetadataFilePath, \"metadata-file\", \"\", \"where should JSON metadata be saved\")\n\tflags.StringVar(&gc.LogFilePath, \"log-file\", \"\", \"where should JSON logs be saved\")\n\n\tflags.StringVar(&gc.ResultVerbosity, \"result-verbosity\", \"normal\", \"Sets verbosity of each output record. Options: short, normal, long, trace\")\n\tflags.StringVar(&gc.IncludeInOutput, \"include-fields\", \"\", \"Comma separated list of fields to additionally output beyond result verbosity. Options: class, protocol, ttl, resolver, flags\")\n\n\tflags.IntVar(&gc.Verbosity, \"verbosity\", 3, \"log verbosity: 1 (lowest)--5 (highest)\")\n\tflags.IntVar(&gc.Retries, \"retries\", 1, \"how many times should zdns retry query if timeout or temporary failure\")\n\tflags.IntVar(&gc.MaxDepth, \"max-depth\", 10, \"how deep should we recurse when performing iterative lookups\")\n\tflags.IntVar(&gc.CacheSize, \"cache-size\", 10000, \"how many items can be stored in internal recursive cache\")\n\tflags.StringVar(&gc.InputHandler, \"input-handler\", \"file\", \"handler to input names\")\n\tflags.StringVar(&gc.OutputHandler, \"output-handler\", \"file\", \"handler to output names\")\n\tflags.BoolVar(&gc.TCPOnly, \"tcp-only\", false, \"Only perform lookups over TCP\")\n\tflags.BoolVar(&gc.UDPOnly, \"udp-only\", false, \"Only perform lookups over UDP\")\n\tservers_string := flags.String(\"name-servers\", \"\", \"List of DNS servers to use. Can be passed as comma-delimited string or via @\/path\/to\/file. If no port is specified, defaults to 53.\")\n\tconfig_file := flags.String(\"conf-file\", \"\/etc\/resolv.conf\", \"config file for DNS servers\")\n\ttimeout := flags.Int(\"timeout\", 15, \"timeout for resolving an individual name\")\n\titerationTimeout := flags.Int(\"iteration-timeout\", 4, \"timeout for resolving a single iteration in an iterative query\")\n\tclass_string := flags.String(\"class\", \"INET\", \"DNS class to query. Options: INET, CSNET, CHAOS, HESIOD, NONE, ANY. Default: INET.\")\n\tnanoSeconds := flags.Bool(\"nanoseconds\", false, \"Use nanosecond resolution timestamps\")\n\t\/\/ allow module to initialize and add its own flags before we parse\n\tif len(os.Args) < 2 {\n\t\tlog.Fatal(\"No lookup module specified. Valid modules: \", zdns.ValidlookupsString())\n\t}\n\tgc.Module = strings.ToUpper(os.Args[1])\n\tfactory := zdns.GetLookup(gc.Module)\n\tif factory == nil {\n\t\tflags.Parse(os.Args[1:])\n\t\tlog.Fatal(\"Invalid lookup module specified. Valid modules: \", zdns.ValidlookupsString())\n\t}\n\tfactory.AddFlags(flags)\n\tflags.Parse(os.Args[2:])\n\t\/\/ Do some basic sanity checking\n\t\/\/ setup global logging\n\tif gc.LogFilePath != \"\" {\n\t\tf, err := os.OpenFile(gc.LogFilePath, os.O_WRONLY|os.O_CREATE, 0666)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Unable to open log file (%s): %s\", gc.LogFilePath, err.Error())\n\t\t}\n\t\tlog.SetOutput(f)\n\t}\n\t\/\/ Translate the assigned verbosity level to a logrus log level.\n\tswitch gc.Verbosity {\n\tcase 1: \/\/ Fatal\n\t\tlog.SetLevel(log.FatalLevel)\n\tcase 2: \/\/ Error\n\t\tlog.SetLevel(log.ErrorLevel)\n\tcase 3: \/\/ Warnings (default)\n\t\tlog.SetLevel(log.WarnLevel)\n\tcase 4: \/\/ Information\n\t\tlog.SetLevel(log.InfoLevel)\n\tcase 5: \/\/ Debugging\n\t\tlog.SetLevel(log.DebugLevel)\n\tdefault:\n\t\tlog.Fatal(\"Unknown verbosity level specified. Must be between 1 (lowest)--5 (highest)\")\n\t}\n\t\/\/ complete post facto global initialization based on command line arguments\n\tgc.Timeout = time.Duration(time.Second * time.Duration(*timeout))\n\tgc.IterationTimeout = time.Duration(time.Second * time.Duration(*iterationTimeout))\n\t\/\/ class initialization\n\tswitch strings.ToUpper(*class_string) {\n\tcase \"INET\", \"IN\":\n\t\tgc.Class = dns.ClassINET\n\tcase \"CSNET\", \"CS\":\n\t\tgc.Class = dns.ClassCSNET\n\tcase \"CHAOS\", \"CH\":\n\t\tgc.Class = dns.ClassCHAOS\n\tcase \"HESIOD\", \"HS\":\n\t\tgc.Class = dns.ClassHESIOD\n\tcase \"NONE\":\n\t\tgc.Class = dns.ClassNONE\n\tcase \"ANY\":\n\t\tgc.Class = dns.ClassANY\n\tdefault:\n\t\tlog.Fatal(\"Unknown record class specified. Valid valued are INET (default), CSNET, CHAOS, HESIOD, NONE, ANY\")\n\n\t}\n\tif *servers_string == \"\" {\n\t\t\/\/ if we're doing recursive resolution, figure out default OS name servers\n\t\t\/\/ otherwise, use the set of 13 root name servers\n\t\tif gc.IterativeResolution {\n\t\t\tgc.NameServers = zdns.RootServers[:]\n\t\t} else {\n\t\t\tns, err := zdns.GetDNSServers(*config_file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(\"Unable to fetch correct name servers:\", err.Error())\n\t\t\t}\n\t\t\tgc.NameServers = ns\n\t\t}\n\t\tgc.NameServersSpecified = false\n\t\tlog.Info(\"no name servers specified. will use: \", strings.Join(gc.NameServers, \", \"))\n\t} else {\n\t\tvar ns []string\n\t\tif (*servers_string)[0] == '@' {\n\t\t\tfilepath := (*servers_string)[1:]\n\t\t\tf, err := ioutil.ReadFile(filepath)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Unable to read file (%s): %s\", filepath, err.Error())\n\t\t\t}\n\t\t\tif len(f) == 0 {\n\t\t\t\tlog.Fatalf(\"Emtpy file (%s)\", filepath)\n\t\t\t}\n\t\t\tns = strings.Split(strings.Trim(string(f), \"\\n\"), \"\\n\")\n\t\t} else {\n\t\t\tns = strings.Split(*servers_string, \",\")\n\t\t}\n\t\tfor i, s := range ns {\n\t\t\tif !strings.Contains(s, \":\") {\n\t\t\t\tns[i] = strings.TrimSpace(s) + \":53\"\n\t\t\t}\n\t\t}\n\t\tgc.NameServers = ns\n\t\tgc.NameServersSpecified = true\n\t}\n\tif *nanoSeconds {\n\t\tgc.TimeFormat = time.RFC3339Nano\n\t} else {\n\t\tgc.TimeFormat = time.RFC3339\n\t}\n\tif gc.GoMaxProcs < 0 {\n\t\tlog.Fatal(\"Invalid argument for --go-processes. Must be >1.\")\n\t}\n\tif gc.GoMaxProcs != 0 {\n\t\truntime.GOMAXPROCS(gc.GoMaxProcs)\n\t}\n\tif gc.UDPOnly && gc.TCPOnly {\n\t\tlog.Fatal(\"TCP Only and UDP Only are conflicting\")\n\t}\n\t\/\/ Output Groups are defined by a base + any additional fields that the user wants\n\tgroups := strings.Split(gc.IncludeInOutput, \",\")\n\tif gc.ResultVerbosity != \"short\" && gc.ResultVerbosity != \"normal\" && gc.ResultVerbosity != \"long\" && gc.ResultVerbosity != \"trace\" {\n\t\tlog.Fatal(\"Invalid result verbosity. Options: short, normal, long, trace\")\n\t}\n\n\tgc.OutputGroups = append(gc.OutputGroups, gc.ResultVerbosity)\n\tgc.OutputGroups = append(gc.OutputGroups, groups...)\n\n\tif len(flags.Args()) > 0 {\n\t\tstat, _ := os.Stdin.Stat()\n\t\t\/\/ If stdin is piped from the terminal, and we havent specified a file, and if we have unparsed args\n\t\t\/\/ use them for a dig like reslution\n\t\tif (stat.Mode()&os.ModeCharDevice) != 0 && gc.InputFilePath == \"-\" && len(flags.Args()) == 1 {\n\t\t\tgc.PassedName = flags.Args()[0]\n\t\t} else {\n\t\t\tlog.Fatal(\"Unused command line flags: \", flags.Args())\n\t\t}\n\t}\n\n\t\/\/ some modules require multiple passes over a file (this is really just the case for zone files)\n\tif !factory.AllowStdIn() && gc.InputFilePath == \"-\" {\n\t\tlog.Fatal(\"Specified module does not allow reading from stdin\")\n\t}\n\n\t\/\/ allow the factory to initialize itself\n\tif err := factory.Initialize(&gc); err != nil {\n\t\tlog.Fatal(\"Factory was unable to initialize:\", err.Error())\n\t}\n\t\/\/ run it.\n\tif err := zdns.DoLookups(&factory, &gc); err != nil {\n\t\tlog.Fatal(\"Unable to run lookups:\", err.Error())\n\t}\n\t\/\/ allow the factory to initialize itself\n\tif err := factory.Finalize(); err != nil {\n\t\tlog.Fatal(\"Factory was unable to finalize:\", err.Error())\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package dns\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ Parse the $GENERATE statement as used in BIND9 zones.\n\/\/ See http:\/\/www.zytrax.com\/books\/dns\/ch8\/generate.html for instance.\n\/\/ We are called after '$GENERATE '. After which we expect:\n\/\/ * the range (12-24\/2)\n\/\/ * lhs (ownername)\n\/\/ * [[ttl][class]]\n\/\/ * type\n\/\/ * rhs (rdata)\nfunc generate(l lex, c chan lex, t chan Token, o string) string {\n\tstep := 1\n\tif i := strings.IndexAny(l.token, \"\/\"); i != -1 {\n\t\tif i+1 == len(l.token) {\n\t\t\treturn \"bad step in $GENERATE range\"\n\t\t}\n\t\tif s, e := strconv.Atoi(l.token[i+1:]); e != nil {\n\t\t\treturn \"bad step in $GENERATE range\"\n\t\t} else {\n\t\t\tif s < 0 {\n\t\t\t\treturn \"bad step in $GENERATE range\"\n\t\t\t}\n\t\t\tstep = s\n\t\t}\n\t\tl.token = l.token[:i]\n\t}\n\tsx := strings.SplitN(l.token, \"-\", 2)\n\tif len(sx) != 2 {\n\t\treturn \"bad start-stop in $GENERATE range\"\n\t}\n\tstart, err := strconv.Atoi(sx[0])\n\tif err != nil {\n\t\treturn \"bad start in $GENERATE range\"\n\t}\n\tend, err := strconv.Atoi(sx[1])\n\tif err != nil {\n\t\treturn \"bad stop in $GENERATE range\"\n\t}\n\tif end < 0 || start < 0 || end <= start {\n\t\treturn \"bad range in $GENERATE range\"\n\t}\n\n\t<-c \/\/ _BLANK\n\t\/\/ Create a complete new string, which we then parse again.\n\ts := \"\"\nBuildRR:\n\tl = <-c\n\tif l.value != _NEWLINE && l.value != _EOF {\n\t\ts += l.token\n\t\tgoto BuildRR\n\t}\n\tfor i := start; i <= end; i += step {\n\t\tvar (\n\t\t\tescape bool\n\t\t\tdom string\n\t\t\tmod string\n\t\t\toffset int\n\t\t\terr error\n\t\t)\n\n\t\tfor j := 0; j < len(s); j++ { \/\/ No 'range' because we need to jump around\n\t\t\tswitch s[j] {\n\t\t\tcase '\\\\':\n\t\t\t\tif escape {\n\t\t\t\t\tdom += \"\\\\\"\n\t\t\t\t\tescape = false\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tescape = true\n\t\t\tcase '$':\n\t\t\t\tmod = \"%d\"\n\t\t\t\toffset = 0\n\t\t\t\tif escape {\n\t\t\t\t\tdom += \"$\"\n\t\t\t\t\tescape = false\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tescape = false\n\t\t\t\tif j+1 >= len(s) { \/\/ End of the string\n\t\t\t\t\tdom += fmt.Sprintf(mod, i+offset)\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\tif s[j+1] == '$' {\n\t\t\t\t\t\tdom += \"$\"\n\t\t\t\t\t\tj++\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\/\/ Search for { and }\n\t\t\t\tif s[j+1] == '{' { \/\/ Modifier block\n\t\t\t\t\tsep := strings.Index(s[j+2:], \"}\")\n\t\t\t\t\tif sep == -1 {\n\t\t\t\t\t\treturn \"bad modifier in $GENERATE\"\n\t\t\t\t\t}\n\t\t\t\t\t\/\/println(\"checking\", s[j+2:j+2+sep])\n\t\t\t\t\tmod, offset, err = modToPrintf(s[j+2 : j+2+sep])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn \"bad modifier in $GENERATE\"\n\t\t\t\t\t}\n\t\t\t\t\tj += 2 + sep \/\/ Jump to it\n\t\t\t\t}\n\t\t\t\t\/\/println(\"mod\", mod)\n\t\t\t\tdom += fmt.Sprintf(mod, i+offset)\n\t\t\tdefault:\n\t\t\t\tif escape { \/\/ Pretty useless here\n\t\t\t\t\tescape = false\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdom += string(s[j])\n\t\t\t}\n\t\t}\n\t\t\/\/ Re-parse the RR and send it on the current channel t\n\t\trx, err := NewRR(\"$ORIGIN \" + o + \"\\n\" + dom)\n\t\tif err != nil {\n\t\t\treturn err.(*ParseError).err\n\t\t}\n\t\tt <- Token{RR: rx}\n\t}\n\treturn \"\"\n}\n\n\/\/ Convert a $GENERATE modifier 0,0,d to something Printf can deal with.\nfunc modToPrintf(s string) (string, int, error) {\n\txs := strings.SplitN(s, \",\", 3)\n\tif len(xs) != 3 {\n\t\treturn \"\", 0, errors.New(\"fubar\")\n\t}\n\t\/\/ xs[0] is offset, xs[1] is width, xs[2] is base\n\tif xs[2] != \"o\" && xs[2] != \"d\" && xs[2] != \"x\" && xs[2] != \"X\" {\n\t\treturn \"\", 0, errors.New(\"fubar\")\n\t}\n\toffset, err := strconv.Atoi(xs[0])\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\twidth, err := strconv.Atoi(xs[1])\n\tif err != nil {\n\t\treturn \"\", offset, err\n\t}\n\tprintf := \"%\"\n\tswitch {\n\tcase width < 0:\n\t\treturn \"\", offset, errors.New(\"fubar\")\n\tcase width == 0:\n\t\tprintf += xs[1]\n\tdefault:\n\t\tprintf += \"0\" + xs[1]\n\t}\n\tprintf += xs[2]\n\treturn printf, offset, nil\n}\n<commit_msg>more descriptive errors<commit_after>package dns\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ Parse the $GENERATE statement as used in BIND9 zones.\n\/\/ See http:\/\/www.zytrax.com\/books\/dns\/ch8\/generate.html for instance.\n\/\/ We are called after '$GENERATE '. After which we expect:\n\/\/ * the range (12-24\/2)\n\/\/ * lhs (ownername)\n\/\/ * [[ttl][class]]\n\/\/ * type\n\/\/ * rhs (rdata)\n\/\/ But we are lazy here, only the range is parsed *all* occurences\n\/\/ of $ after that are interpreted.\n\/\/ Any error are returned as a string value, the empty string signals\n\/\/ \"no error\".\nfunc generate(l lex, c chan lex, t chan Token, o string) string {\n\tstep := 1\n\tif i := strings.IndexAny(l.token, \"\/\"); i != -1 {\n\t\tif i+1 == len(l.token) {\n\t\t\treturn \"bad step in $GENERATE range\"\n\t\t}\n\t\tif s, e := strconv.Atoi(l.token[i+1:]); e != nil {\n\t\t\treturn \"bad step in $GENERATE range\"\n\t\t} else {\n\t\t\tif s < 0 {\n\t\t\t\treturn \"bad step in $GENERATE range\"\n\t\t\t}\n\t\t\tstep = s\n\t\t}\n\t\tl.token = l.token[:i]\n\t}\n\tsx := strings.SplitN(l.token, \"-\", 2)\n\tif len(sx) != 2 {\n\t\treturn \"bad start-stop in $GENERATE range\"\n\t}\n\tstart, err := strconv.Atoi(sx[0])\n\tif err != nil {\n\t\treturn \"bad start in $GENERATE range\"\n\t}\n\tend, err := strconv.Atoi(sx[1])\n\tif err != nil {\n\t\treturn \"bad stop in $GENERATE range\"\n\t}\n\tif end < 0 || start < 0 || end <= start {\n\t\treturn \"bad range in $GENERATE range\"\n\t}\n\n\t<-c \/\/ _BLANK\n\t\/\/ Create a complete new string, which we then parse again.\n\ts := \"\"\nBuildRR:\n\tl = <-c\n\tif l.value != _NEWLINE && l.value != _EOF {\n\t\ts += l.token\n\t\tgoto BuildRR\n\t}\n\tfor i := start; i <= end; i += step {\n\t\tvar (\n\t\t\tescape bool\n\t\t\tdom string\n\t\t\tmod string\n\t\t\terr string\n\t\t\toffset int\n\t\t)\n\n\t\tfor j := 0; j < len(s); j++ { \/\/ No 'range' because we need to jump around\n\t\t\tswitch s[j] {\n\t\t\tcase '\\\\':\n\t\t\t\tif escape {\n\t\t\t\t\tdom += \"\\\\\"\n\t\t\t\t\tescape = false\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tescape = true\n\t\t\tcase '$':\n\t\t\t\tmod = \"%d\"\n\t\t\t\toffset = 0\n\t\t\t\tif escape {\n\t\t\t\t\tdom += \"$\"\n\t\t\t\t\tescape = false\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tescape = false\n\t\t\t\tif j+1 >= len(s) { \/\/ End of the string\n\t\t\t\t\tdom += fmt.Sprintf(mod, i+offset)\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\tif s[j+1] == '$' {\n\t\t\t\t\t\tdom += \"$\"\n\t\t\t\t\t\tj++\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\/\/ Search for { and }\n\t\t\t\tif s[j+1] == '{' { \/\/ Modifier block\n\t\t\t\t\tsep := strings.Index(s[j+2:], \"}\")\n\t\t\t\t\tif sep == -1 {\n\t\t\t\t\t\treturn \"bad modifier in $GENERATE\"\n\t\t\t\t\t}\n\t\t\t\t\t\/\/println(\"checking\", s[j+2:j+2+sep])\n\t\t\t\t\tmod, offset, err = modToPrintf(s[j+2 : j+2+sep])\n\t\t\t\t\tif err != \"\" {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tj += 2 + sep \/\/ Jump to it\n\t\t\t\t}\n\t\t\t\t\/\/println(\"mod\", mod)\n\t\t\t\tdom += fmt.Sprintf(mod, i+offset)\n\t\t\tdefault:\n\t\t\t\tif escape { \/\/ Pretty useless here\n\t\t\t\t\tescape = false\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdom += string(s[j])\n\t\t\t}\n\t\t}\n\t\t\/\/ Re-parse the RR and send it on the current channel t\n\t\trx, e := NewRR(\"$ORIGIN \" + o + \"\\n\" + dom)\n\t\tif e != nil {\n\t\t\treturn e.(*ParseError).err\n\t\t}\n\t\tt <- Token{RR: rx}\n\t}\n\treturn \"\"\n}\n\n\/\/ Convert a $GENERATE modifier 0,0,d to something Printf can deal with.\nfunc modToPrintf(s string) (string, int, string) {\n\txs := strings.SplitN(s, \",\", 3)\n\tif len(xs) != 3 {\n\t\treturn \"\", 0, \"bad modifier in $GENERATE\"\n\t}\n\t\/\/ xs[0] is offset, xs[1] is width, xs[2] is base\n\tif xs[2] != \"o\" && xs[2] != \"d\" && xs[2] != \"x\" && xs[2] != \"X\" {\n\t\treturn \"\", 0, \"bad base in $GENERATE\"\n\t}\n\toffset, err := strconv.Atoi(xs[0])\n\tif err != nil {\n\t\treturn \"\", 0, \"bad offset in $GENERATE\"\n\t}\n\twidth, err := strconv.Atoi(xs[1])\n\tif err != nil {\n\t\treturn \"\", offset, \"bad width in $GENERATE\"\n\t}\n\tprintf := \"%\"\n\tswitch {\n\tcase width < 0:\n\t\treturn \"\", offset, \"bad width in $GENERATE\"\n\tcase width == 0:\n\t\tprintf += xs[1]\n\tdefault:\n\t\tprintf += \"0\" + xs[1]\n\t}\n\tprintf += xs[2]\n\treturn printf, offset, \"\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage state_test\n\nimport (\n\t\"errors\"\n\t\"github.com\/jacobsa\/comeback\/backup\"\n\t\"github.com\/jacobsa\/comeback\/backup\/mock\"\n\t\"github.com\/jacobsa\/comeback\/blob\"\n\t\"github.com\/jacobsa\/comeback\/fs\"\n\t\"github.com\/jacobsa\/comeback\/fs\/mock\"\n\t\"github.com\/jacobsa\/comeback\/state\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t\"github.com\/jacobsa\/oglemock\"\n\t. \"github.com\/jacobsa\/ogletest\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestScoreMapSaver(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype ScoreMapSaverTest struct {\n\tsourceMap state.ScoreMap\n\tsinkMap state.ScoreMap\n\tfileSystem mock_fs.MockFileSystem\n\twrapped mock_backup.MockFileSaver\n\tsaver backup.FileSaver\n\n\tpath string\n\tscores []blob.Score\n\terr error\n}\n\nfunc init() { RegisterTestSuite(&ScoreMapSaverTest{}) }\n\nfunc (t *ScoreMapSaverTest) SetUp(i *TestInfo) {\n\tt.sourceMap = state.NewScoreMap()\n\tt.sinkMap = state.NewScoreMap()\n\tt.fileSystem = mock_fs.NewMockFileSystem(i.MockController, \"fileSystem\")\n\tt.wrapped = mock_backup.NewMockFileSaver(i.MockController, \"wrapped\")\n\n\tt.saver = state.NewScoreMapFileSaver(\n\t\tt.sourceMap,\n\t\tt.sinkMap,\n\t\tt.fileSystem,\n\t\tt.wrapped,\n\t)\n}\n\nfunc (t *ScoreMapSaverTest) call() {\n\tt.scores, t.err = t.saver.Save(t.path)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *ScoreMapSaverTest) CallsStat() {\n\tt.path = \"taco\"\n\n\t\/\/ File system\n\tExpectCall(t.fileSystem, \"Stat\")(\"taco\").\n\t\tWillOnce(oglemock.Return(fs.DirectoryEntry{}, errors.New(\"\")))\n\n\t\/\/ Call\n\tt.call()\n}\n\nfunc (t *ScoreMapSaverTest) StatReturnsError() {\n\t\/\/ File system\n\tExpectCall(t.fileSystem, \"Stat\")(Any()).\n\t\tWillOnce(oglemock.Return(fs.DirectoryEntry{}, errors.New(\"taco\")))\n\n\t\/\/ Call\n\tt.call()\n\n\tExpectThat(t.err, Error(HasSubstr(\"Stat\")))\n\tExpectThat(t.err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *ScoreMapSaverTest) ScoreMapContainsEntry() {\n\tt.path = \"taco\"\n\n\texpectedKey := state.ScoreMapKey{\n\t\tPath: \"taco\",\n\t\tPermissions: 0644,\n\t\tUid: 17,\n\t\tGid: 19,\n\t\tMTime: time.Now(),\n\t\tInode: 23,\n\t\tSize: 29,\n\t}\n\n\t\/\/ Source map\n\texpectedScores := []blob.Score{\n\t\tblob.ComputeScore([]byte(\"foo\")),\n\t\tblob.ComputeScore([]byte(\"bar\")),\n\t}\n\n\tt.sourceMap.Set(expectedKey, expectedScores)\n\n\t\/\/ File system\n\tentry := fs.DirectoryEntry{\n\t\tPermissions: 0644,\n\t\tUid: 17,\n\t\tGid: 19,\n\t\tMTime: expectedKey.MTime,\n\t\tInode: 23,\n\t\tSize: 29,\n\t}\n\n\tExpectCall(t.fileSystem, \"Stat\")(Any()).\n\t\tWillOnce(oglemock.Return(entry, nil))\n\n\t\/\/ Call\n\tt.call()\n\n\tAssertEq(nil, t.err)\n\tExpectThat(t.scores, DeepEquals(expectedScores))\n\tExpectThat(t.sinkMap.Get(expectedKey), DeepEquals(expectedScores))\n}\n\nfunc (t *ScoreMapSaverTest) CallsWrapped() {\n\tt.path = \"taco\"\n\n\t\/\/ File system\n\tExpectCall(t.fileSystem, \"Stat\")(Any()).\n\t\tWillOnce(oglemock.Return(fs.DirectoryEntry{}, nil))\n\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"Save\")(\"taco\").\n\t\tWillOnce(oglemock.Return(nil, errors.New(\"\")))\n\n\t\/\/ Call\n\tt.call()\n}\n\nfunc (t *ScoreMapSaverTest) WrappedReturnsError() {\n\texpectedKey := state.ScoreMapKey{\n\t\tPath: \"taco\",\n\t\tPermissions: 0644,\n\t\tUid: 17,\n\t\tGid: 19,\n\t\tMTime: time.Now(),\n\t\tInode: 23,\n\t\tSize: 29,\n\t}\n\n\t\/\/ File system\n\tentry := fs.DirectoryEntry{\n\t\tPermissions: 0644,\n\t\tUid: 17,\n\t\tGid: 19,\n\t\tMTime: expectedKey.MTime,\n\t\tInode: 23,\n\t\tSize: 29,\n\t}\n\n\tExpectCall(t.fileSystem, \"Stat\")(Any()).\n\t\tWillOnce(oglemock.Return(entry, nil))\n\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"Save\")(Any()).\n\t\tWillOnce(oglemock.Return(nil, errors.New(\"taco\")))\n\n\t\/\/ Call\n\tt.call()\n\n\tAssertThat(t.err, Error(Equals(\"taco\")))\n\tExpectEq(nil, t.sinkMap.Get(expectedKey))\n}\n\nfunc (t *ScoreMapSaverTest) WrappedReturnsScores() {\n\tt.path = \"taco\"\n\texpectedKey := state.ScoreMapKey{\n\t\tPath: \"taco\",\n\t\tPermissions: 0644,\n\t\tUid: 17,\n\t\tGid: 19,\n\t\tMTime: time.Now(),\n\t\tInode: 23,\n\t\tSize: 29,\n\t}\n\n\t\/\/ File system\n\tentry := fs.DirectoryEntry{\n\t\tPermissions: 0644,\n\t\tUid: 17,\n\t\tGid: 19,\n\t\tMTime: expectedKey.MTime,\n\t\tInode: 23,\n\t\tSize: 29,\n\t}\n\n\tExpectCall(t.fileSystem, \"Stat\")(Any()).\n\t\tWillOnce(oglemock.Return(entry, nil))\n\n\t\/\/ Wrapped\n\texpectedScores := []blob.Score{\n\t\tblob.ComputeScore([]byte(\"foo\")),\n\t\tblob.ComputeScore([]byte(\"bar\")),\n\t}\n\n\tExpectCall(t.wrapped, \"Save\")(Any()).\n\t\tWillOnce(oglemock.Return(expectedScores, nil))\n\n\t\/\/ Call\n\tt.call()\n\n\tAssertEq(nil, t.err)\n\tExpectThat(t.sinkMap.Get(expectedKey), DeepEquals(expectedScores))\n}\n<commit_msg>Changed test package.<commit_after>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage state\n\nimport (\n\t\"errors\"\n\t\"github.com\/jacobsa\/comeback\/backup\"\n\t\"github.com\/jacobsa\/comeback\/backup\/mock\"\n\t\"github.com\/jacobsa\/comeback\/blob\"\n\t\"github.com\/jacobsa\/comeback\/fs\"\n\t\"github.com\/jacobsa\/comeback\/fs\/mock\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t\"github.com\/jacobsa\/oglemock\"\n\t. \"github.com\/jacobsa\/ogletest\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestScoreMapSaver(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype ScoreMapSaverTest struct {\n\tsourceMap ScoreMap\n\tsinkMap ScoreMap\n\tfileSystem mock_fs.MockFileSystem\n\twrapped mock_backup.MockFileSaver\n\tsaver backup.FileSaver\n\n\tpath string\n\tscores []blob.Score\n\terr error\n}\n\nfunc init() { RegisterTestSuite(&ScoreMapSaverTest{}) }\n\nfunc (t *ScoreMapSaverTest) SetUp(i *TestInfo) {\n\tt.sourceMap = NewScoreMap()\n\tt.sinkMap = NewScoreMap()\n\tt.fileSystem = mock_fs.NewMockFileSystem(i.MockController, \"fileSystem\")\n\tt.wrapped = mock_backup.NewMockFileSaver(i.MockController, \"wrapped\")\n\n\tt.saver = NewScoreMapFileSaver(\n\t\tt.sourceMap,\n\t\tt.sinkMap,\n\t\tt.fileSystem,\n\t\tt.wrapped,\n\t)\n}\n\nfunc (t *ScoreMapSaverTest) call() {\n\tt.scores, t.err = t.saver.Save(t.path)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *ScoreMapSaverTest) CallsStat() {\n\tt.path = \"taco\"\n\n\t\/\/ File system\n\tExpectCall(t.fileSystem, \"Stat\")(\"taco\").\n\t\tWillOnce(oglemock.Return(fs.DirectoryEntry{}, errors.New(\"\")))\n\n\t\/\/ Call\n\tt.call()\n}\n\nfunc (t *ScoreMapSaverTest) StatReturnsError() {\n\t\/\/ File system\n\tExpectCall(t.fileSystem, \"Stat\")(Any()).\n\t\tWillOnce(oglemock.Return(fs.DirectoryEntry{}, errors.New(\"taco\")))\n\n\t\/\/ Call\n\tt.call()\n\n\tExpectThat(t.err, Error(HasSubstr(\"Stat\")))\n\tExpectThat(t.err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *ScoreMapSaverTest) ScoreMapContainsEntry() {\n\tt.path = \"taco\"\n\n\texpectedKey := ScoreMapKey{\n\t\tPath: \"taco\",\n\t\tPermissions: 0644,\n\t\tUid: 17,\n\t\tGid: 19,\n\t\tMTime: time.Now(),\n\t\tInode: 23,\n\t\tSize: 29,\n\t}\n\n\t\/\/ Source map\n\texpectedScores := []blob.Score{\n\t\tblob.ComputeScore([]byte(\"foo\")),\n\t\tblob.ComputeScore([]byte(\"bar\")),\n\t}\n\n\tt.sourceMap.Set(expectedKey, expectedScores)\n\n\t\/\/ File system\n\tentry := fs.DirectoryEntry{\n\t\tPermissions: 0644,\n\t\tUid: 17,\n\t\tGid: 19,\n\t\tMTime: expectedKey.MTime,\n\t\tInode: 23,\n\t\tSize: 29,\n\t}\n\n\tExpectCall(t.fileSystem, \"Stat\")(Any()).\n\t\tWillOnce(oglemock.Return(entry, nil))\n\n\t\/\/ Call\n\tt.call()\n\n\tAssertEq(nil, t.err)\n\tExpectThat(t.scores, DeepEquals(expectedScores))\n\tExpectThat(t.sinkMap.Get(expectedKey), DeepEquals(expectedScores))\n}\n\nfunc (t *ScoreMapSaverTest) CallsWrapped() {\n\tt.path = \"taco\"\n\n\t\/\/ File system\n\tExpectCall(t.fileSystem, \"Stat\")(Any()).\n\t\tWillOnce(oglemock.Return(fs.DirectoryEntry{}, nil))\n\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"Save\")(\"taco\").\n\t\tWillOnce(oglemock.Return(nil, errors.New(\"\")))\n\n\t\/\/ Call\n\tt.call()\n}\n\nfunc (t *ScoreMapSaverTest) WrappedReturnsError() {\n\texpectedKey := ScoreMapKey{\n\t\tPath: \"taco\",\n\t\tPermissions: 0644,\n\t\tUid: 17,\n\t\tGid: 19,\n\t\tMTime: time.Now(),\n\t\tInode: 23,\n\t\tSize: 29,\n\t}\n\n\t\/\/ File system\n\tentry := fs.DirectoryEntry{\n\t\tPermissions: 0644,\n\t\tUid: 17,\n\t\tGid: 19,\n\t\tMTime: expectedKey.MTime,\n\t\tInode: 23,\n\t\tSize: 29,\n\t}\n\n\tExpectCall(t.fileSystem, \"Stat\")(Any()).\n\t\tWillOnce(oglemock.Return(entry, nil))\n\n\t\/\/ Wrapped\n\tExpectCall(t.wrapped, \"Save\")(Any()).\n\t\tWillOnce(oglemock.Return(nil, errors.New(\"taco\")))\n\n\t\/\/ Call\n\tt.call()\n\n\tAssertThat(t.err, Error(Equals(\"taco\")))\n\tExpectEq(nil, t.sinkMap.Get(expectedKey))\n}\n\nfunc (t *ScoreMapSaverTest) WrappedReturnsScores() {\n\tt.path = \"taco\"\n\texpectedKey := ScoreMapKey{\n\t\tPath: \"taco\",\n\t\tPermissions: 0644,\n\t\tUid: 17,\n\t\tGid: 19,\n\t\tMTime: time.Now(),\n\t\tInode: 23,\n\t\tSize: 29,\n\t}\n\n\t\/\/ File system\n\tentry := fs.DirectoryEntry{\n\t\tPermissions: 0644,\n\t\tUid: 17,\n\t\tGid: 19,\n\t\tMTime: expectedKey.MTime,\n\t\tInode: 23,\n\t\tSize: 29,\n\t}\n\n\tExpectCall(t.fileSystem, \"Stat\")(Any()).\n\t\tWillOnce(oglemock.Return(entry, nil))\n\n\t\/\/ Wrapped\n\texpectedScores := []blob.Score{\n\t\tblob.ComputeScore([]byte(\"foo\")),\n\t\tblob.ComputeScore([]byte(\"bar\")),\n\t}\n\n\tExpectCall(t.wrapped, \"Save\")(Any()).\n\t\tWillOnce(oglemock.Return(expectedScores, nil))\n\n\t\/\/ Call\n\tt.call()\n\n\tAssertEq(nil, t.err)\n\tExpectThat(t.sinkMap.Get(expectedKey), DeepEquals(expectedScores))\n}\n<|endoftext|>"} {"text":"<commit_before>package zlib\n\nimport (\n\t\"crypto\/x509\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"regexp\"\n\t\"time\"\n\n\t\"ztools\/ztls\"\n)\n\nvar smtpEndRegex = regexp.MustCompile(`(?:\\r\\n)|^[0-9]{3} .+\\r\\n$`)\nvar pop3EndRegex = regexp.MustCompile(`(?:\\r\\n\\.\\r\\n$)|(?:\\r\\n$)`)\nvar imapStatusEndRegex = regexp.MustCompile(`\\r\\n$`)\n\nconst (\n\tSMTP_COMMAND = \"STARTTLS\\r\\n\"\n\tPOP3_COMMAND = \"STLS\\r\\n\"\n\tIMAP_COMMAND = \"a001 STARTTLS\\r\\n\"\n)\n\n\/\/ Implements the net.Conn interface\ntype Conn struct {\n\t\/\/ Underlying network connection\n\tconn net.Conn\n\ttlsConn *ztls.Conn\n\tisTls bool\n\n\t\/\/ Max TLS version\n\tmaxTlsVersion uint16\n\n\t\/\/ Keep track of state \/ network operations\n\toperations []ConnectionEvent\n\n\t\/\/ Cache the deadlines so we can reapply after TLS handshake\n\treadDeadline time.Time\n\twriteDeadline time.Time\n\n\tcaPool *x509.CertPool\n\tonlyCBC bool\n\n\tdomain string\n}\n\nfunc (c *Conn) getUnderlyingConn() net.Conn {\n\tif c.isTls {\n\t\treturn c.tlsConn\n\t}\n\treturn c.conn\n}\n\nfunc (c *Conn) SetCBCOnly() {\n\tc.onlyCBC = true\n}\n\nfunc (c *Conn) SetCAPool(pool *x509.CertPool) {\n\tc.caPool = pool\n}\n\nfunc (c *Conn) SetDomain(domain string) {\n\tc.domain = domain\n}\n\n\/\/ Layer in the regular conn methods\nfunc (c *Conn) LocalAddr() net.Addr {\n\treturn c.getUnderlyingConn().LocalAddr()\n}\n\nfunc (c *Conn) RemoteAddr() net.Addr {\n\treturn c.getUnderlyingConn().RemoteAddr()\n}\n\nfunc (c *Conn) SetDeadline(t time.Time) error {\n\tc.readDeadline = t\n\tc.writeDeadline = t\n\treturn c.getUnderlyingConn().SetDeadline(t)\n}\n\nfunc (c *Conn) SetReadDeadline(t time.Time) error {\n\tc.readDeadline = t\n\treturn c.getUnderlyingConn().SetReadDeadline(t)\n}\n\nfunc (c *Conn) SetWriteDeadline(t time.Time) error {\n\tc.writeDeadline = t\n\treturn c.getUnderlyingConn().SetWriteDeadline(t)\n}\n\n\/\/ Delegate here, but record all the things\nfunc (c *Conn) Write(b []byte) (int, error) {\n\tn, err := c.getUnderlyingConn().Write(b)\n\tw := WriteEvent{Sent: b}\n\tevent := ConnectionEvent{\n\t\tData: &w,\n\t\tError: err,\n\t}\n\tc.operations = append(c.operations, event)\n\treturn n, err\n}\n\nfunc (c *Conn) Read(b []byte) (int, error) {\n\tn, err := c.getUnderlyingConn().Read(b)\n\tr := ReadEvent{\n\t\tResponse: b[0:n],\n\t}\n\tc.appendEvent(&r, err)\n\treturn n, err\n}\n\nfunc (c *Conn) Close() error {\n\treturn c.getUnderlyingConn().Close()\n}\n\n\/\/ Extra method - Do a TLS Handshake and record progress\nfunc (c *Conn) TLSHandshake() error {\n\tif c.isTls {\n\t\treturn fmt.Errorf(\n\t\t\t\"Attempted repeat handshake with remote host %s\",\n\t\t\tc.RemoteAddr().String())\n\t}\n\ttlsConfig := new(ztls.Config)\n\ttlsConfig.InsecureSkipVerify = true\n\ttlsConfig.MinVersion = ztls.VersionSSL30\n\ttlsConfig.MaxVersion = c.maxTlsVersion\n\ttlsConfig.RootCAs = c.caPool\n\tif c.domain != \"\" {\n\t\ttlsConfig.ServerName = c.domain\n\t}\n\tif c.onlyCBC {\n\t\ttlsConfig.CipherSuites = ztls.CBCSuiteIDList\n\t}\n\tc.tlsConn = ztls.Client(c.conn, tlsConfig)\n\tc.tlsConn.SetReadDeadline(c.readDeadline)\n\tc.tlsConn.SetWriteDeadline(c.writeDeadline)\n\tc.isTls = true\n\terr := c.tlsConn.Handshake()\n\thl := c.tlsConn.GetHandshakeLog()\n\tts := TLSHandshakeEvent{handshakeLog: hl}\n\tevent := ConnectionEvent{\n\t\tData: &ts,\n\t\tError: err,\n\t}\n\tc.operations = append(c.operations, event)\n\treturn err\n}\n\nfunc (c *Conn) sendStartTLSCommand(command string) error {\n\t\/\/ Don't doublehandshake\n\tif c.isTls {\n\t\treturn fmt.Errorf(\n\t\t\t\"Attempt STARTTLS after TLS handshake with remote host %s\",\n\t\t\tc.RemoteAddr().String())\n\t}\n\t\/\/ Send the STARTTLS message\n\tstarttls := []byte(command)\n\t_, err := c.conn.Write(starttls)\n\treturn err\n}\n\n\/\/ Do a STARTTLS handshake\nfunc (c *Conn) SMTPStartTLSHandshake() error {\n\t\/\/ Make the state\n\tss := StartTLSEvent{Command: SMTP_COMMAND}\n\n\t\/\/ Send the command\n\tif err := c.sendStartTLSCommand(SMTP_COMMAND); err != nil {\n\t\tc.appendEvent(&ss, err)\n\t\treturn err\n\t}\n\t\/\/ Read the response on a successful send\n\tbuf := make([]byte, 256)\n\tn, err := c.readSmtpResponse(buf)\n\tss.Response = buf[0:n]\n\n\t\/\/ Record everything no matter the result\n\tc.appendEvent(&ss, err)\n\n\t\/\/ Stop if we failed already\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Successful so far, attempt to do the actual handshake\n\treturn c.TLSHandshake()\n}\n\nfunc (c *Conn) POP3StartTLSHandshake() error {\n\tss := StartTLSEvent{Command: POP3_COMMAND}\n\tif err := c.sendStartTLSCommand(POP3_COMMAND); err != nil {\n\t\tc.appendEvent(&ss, err)\n\t\treturn err\n\t}\n\n\tbuf := make([]byte, 512)\n\tn, err := c.readPop3Response(buf)\n\tss.Response = buf[0:n]\n\tc.appendEvent(&ss, err)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.TLSHandshake()\n}\n\nfunc (c *Conn) IMAPStartTLSHandshake() error {\n\tss := StartTLSEvent{Command: IMAP_COMMAND}\n\tif err := c.sendStartTLSCommand(IMAP_COMMAND); err != nil {\n\t\tc.appendEvent(&ss, err)\n\t\treturn err\n\t}\n\n\tbuf := make([]byte, 512)\n\tn, err := c.readImapStatusResponse(buf)\n\tss.Response = buf[0:n]\n\tc.appendEvent(&ss, err)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.TLSHandshake()\n}\n\nfunc (c *Conn) readUntilRegex(res []byte, expr *regexp.Regexp) (int, error) {\n\tbuf := res[0:]\n\tlength := 0\n\tfor finished := false; !finished; {\n\t\tn, err := c.getUnderlyingConn().Read(buf)\n\t\tlength += n\n\t\tif err != nil {\n\t\t\treturn length, err\n\t\t}\n\t\tif expr.Match(res[0:length]) {\n\t\t\tfinished = true\n\t\t}\n\t\tif length == len(res) {\n\t\t\treturn length, errors.New(\"Not enough buffer space\")\n\t\t}\n\t\tbuf = res[length:]\n\t}\n\treturn length, nil\n}\n\nfunc (c *Conn) readSmtpResponse(res []byte) (int, error) {\n\treturn c.readUntilRegex(res, smtpEndRegex)\n}\n\nfunc (c *Conn) SMTPBanner(b []byte) (int, error) {\n\tn, err := c.readSmtpResponse(b)\n\tmb := MailBannerEvent{}\n\tmb.Banner = string(b[0:n])\n\tc.appendEvent(&mb, err)\n\treturn n, err\n}\n\nfunc (c *Conn) EHLO(domain string) error {\n\tcmd := []byte(\"EHLO \" + domain + \"\\r\\n\")\n\tee := EHLOEvent{}\n\tif _, err := c.getUnderlyingConn().Write(cmd); err != nil {\n\t\tc.appendEvent(&ee, err)\n\t\treturn err\n\t}\n\n\tbuf := make([]byte, 512)\n\tn, err := c.readSmtpResponse(buf)\n\tee.Response = buf[0:n]\n\tc.appendEvent(&ee, err)\n\treturn err\n}\n\nfunc (c *Conn) SMTPHelp() error {\n\tcmd := []byte(\"HELP\\r\\n\")\n\th := new(SMTPHelpEvent)\n\tif _, err := c.getUnderlyingConn().Write(cmd); err != nil {\n\t\tc.appendEvent(h, err)\n\t\treturn err\n\t}\n\tbuf := make([]byte, 512)\n\tn, err := c.readSmtpResponse(buf)\n\th.Response = buf[0:n]\n\tc.appendEvent(h, err)\n\treturn err\n}\n\nfunc (c *Conn) readPop3Response(res []byte) (int, error) {\n\treturn c.readUntilRegex(res, pop3EndRegex)\n}\n\nfunc (c *Conn) POP3Banner(b []byte) (int, error) {\n\tn, err := c.readPop3Response(b)\n\tmb := MailBannerEvent{\n\t\tBanner: string(b[0:n]),\n\t}\n\tc.appendEvent(&mb, err)\n\treturn n, err\n}\n\nfunc (c *Conn) readImapStatusResponse(res []byte) (int, error) {\n\treturn c.readUntilRegex(res, imapStatusEndRegex)\n}\n\nfunc (c *Conn) IMAPBanner(b []byte) (int, error) {\n\tn, err := c.readImapStatusResponse(b)\n\tmb := MailBannerEvent{\n\t\tBanner: string(b[0:n]),\n\t}\n\tc.appendEvent(&mb, err)\n\treturn n, err\n}\n\nfunc (c *Conn) CheckHeartbleed(b []byte) (int, error) {\n\tif !c.isTls {\n\t\treturn 0, fmt.Errorf(\n\t\t\t\"Must perform TLS handshake before sending Heartbleed probe to %s\",\n\t\t\tc.RemoteAddr().String())\n\t}\n\tn, err := c.tlsConn.CheckHeartbleed(b)\n\thb := c.tlsConn.GetHeartbleedLog()\n\tif err == ztls.HeartbleedError {\n\t\terr = nil\n\t}\n\tevent := ConnectionEvent{\n\t\tData: &HeartbleedEvent{heartbleedLog: hb},\n\t\tError: err,\n\t}\n\tc.operations = append(c.operations, event)\n\treturn n, err\n}\n\nfunc (c *Conn) SendModbusEcho(b []byte) (int, error) {\n\t\/\/ TODO MODBUS\n\treturn 0, nil\n}\n\nfunc (c *Conn) States() []ConnectionEvent {\n\treturn c.operations\n}\n\nfunc (c *Conn) appendEvent(data EventData, err error) {\n\tevent := ConnectionEvent{\n\t\tData: data,\n\t\tError: err,\n\t}\n\tc.operations = append(c.operations, event)\n}\n<commit_msg>Clarify modbus TODO<commit_after>package zlib\n\nimport (\n\t\"crypto\/x509\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"regexp\"\n\t\"time\"\n\n\t\"ztools\/ztls\"\n)\n\nvar smtpEndRegex = regexp.MustCompile(`(?:\\r\\n)|^[0-9]{3} .+\\r\\n$`)\nvar pop3EndRegex = regexp.MustCompile(`(?:\\r\\n\\.\\r\\n$)|(?:\\r\\n$)`)\nvar imapStatusEndRegex = regexp.MustCompile(`\\r\\n$`)\n\nconst (\n\tSMTP_COMMAND = \"STARTTLS\\r\\n\"\n\tPOP3_COMMAND = \"STLS\\r\\n\"\n\tIMAP_COMMAND = \"a001 STARTTLS\\r\\n\"\n)\n\n\/\/ Implements the net.Conn interface\ntype Conn struct {\n\t\/\/ Underlying network connection\n\tconn net.Conn\n\ttlsConn *ztls.Conn\n\tisTls bool\n\n\t\/\/ Max TLS version\n\tmaxTlsVersion uint16\n\n\t\/\/ Keep track of state \/ network operations\n\toperations []ConnectionEvent\n\n\t\/\/ Cache the deadlines so we can reapply after TLS handshake\n\treadDeadline time.Time\n\twriteDeadline time.Time\n\n\tcaPool *x509.CertPool\n\tonlyCBC bool\n\n\tdomain string\n}\n\nfunc (c *Conn) getUnderlyingConn() net.Conn {\n\tif c.isTls {\n\t\treturn c.tlsConn\n\t}\n\treturn c.conn\n}\n\nfunc (c *Conn) SetCBCOnly() {\n\tc.onlyCBC = true\n}\n\nfunc (c *Conn) SetCAPool(pool *x509.CertPool) {\n\tc.caPool = pool\n}\n\nfunc (c *Conn) SetDomain(domain string) {\n\tc.domain = domain\n}\n\n\/\/ Layer in the regular conn methods\nfunc (c *Conn) LocalAddr() net.Addr {\n\treturn c.getUnderlyingConn().LocalAddr()\n}\n\nfunc (c *Conn) RemoteAddr() net.Addr {\n\treturn c.getUnderlyingConn().RemoteAddr()\n}\n\nfunc (c *Conn) SetDeadline(t time.Time) error {\n\tc.readDeadline = t\n\tc.writeDeadline = t\n\treturn c.getUnderlyingConn().SetDeadline(t)\n}\n\nfunc (c *Conn) SetReadDeadline(t time.Time) error {\n\tc.readDeadline = t\n\treturn c.getUnderlyingConn().SetReadDeadline(t)\n}\n\nfunc (c *Conn) SetWriteDeadline(t time.Time) error {\n\tc.writeDeadline = t\n\treturn c.getUnderlyingConn().SetWriteDeadline(t)\n}\n\n\/\/ Delegate here, but record all the things\nfunc (c *Conn) Write(b []byte) (int, error) {\n\tn, err := c.getUnderlyingConn().Write(b)\n\tw := WriteEvent{Sent: b}\n\tevent := ConnectionEvent{\n\t\tData: &w,\n\t\tError: err,\n\t}\n\tc.operations = append(c.operations, event)\n\treturn n, err\n}\n\nfunc (c *Conn) Read(b []byte) (int, error) {\n\tn, err := c.getUnderlyingConn().Read(b)\n\tr := ReadEvent{\n\t\tResponse: b[0:n],\n\t}\n\tc.appendEvent(&r, err)\n\treturn n, err\n}\n\nfunc (c *Conn) Close() error {\n\treturn c.getUnderlyingConn().Close()\n}\n\n\/\/ Extra method - Do a TLS Handshake and record progress\nfunc (c *Conn) TLSHandshake() error {\n\tif c.isTls {\n\t\treturn fmt.Errorf(\n\t\t\t\"Attempted repeat handshake with remote host %s\",\n\t\t\tc.RemoteAddr().String())\n\t}\n\ttlsConfig := new(ztls.Config)\n\ttlsConfig.InsecureSkipVerify = true\n\ttlsConfig.MinVersion = ztls.VersionSSL30\n\ttlsConfig.MaxVersion = c.maxTlsVersion\n\ttlsConfig.RootCAs = c.caPool\n\tif c.domain != \"\" {\n\t\ttlsConfig.ServerName = c.domain\n\t}\n\tif c.onlyCBC {\n\t\ttlsConfig.CipherSuites = ztls.CBCSuiteIDList\n\t}\n\tc.tlsConn = ztls.Client(c.conn, tlsConfig)\n\tc.tlsConn.SetReadDeadline(c.readDeadline)\n\tc.tlsConn.SetWriteDeadline(c.writeDeadline)\n\tc.isTls = true\n\terr := c.tlsConn.Handshake()\n\thl := c.tlsConn.GetHandshakeLog()\n\tts := TLSHandshakeEvent{handshakeLog: hl}\n\tevent := ConnectionEvent{\n\t\tData: &ts,\n\t\tError: err,\n\t}\n\tc.operations = append(c.operations, event)\n\treturn err\n}\n\nfunc (c *Conn) sendStartTLSCommand(command string) error {\n\t\/\/ Don't doublehandshake\n\tif c.isTls {\n\t\treturn fmt.Errorf(\n\t\t\t\"Attempt STARTTLS after TLS handshake with remote host %s\",\n\t\t\tc.RemoteAddr().String())\n\t}\n\t\/\/ Send the STARTTLS message\n\tstarttls := []byte(command)\n\t_, err := c.conn.Write(starttls)\n\treturn err\n}\n\n\/\/ Do a STARTTLS handshake\nfunc (c *Conn) SMTPStartTLSHandshake() error {\n\t\/\/ Make the state\n\tss := StartTLSEvent{Command: SMTP_COMMAND}\n\n\t\/\/ Send the command\n\tif err := c.sendStartTLSCommand(SMTP_COMMAND); err != nil {\n\t\tc.appendEvent(&ss, err)\n\t\treturn err\n\t}\n\t\/\/ Read the response on a successful send\n\tbuf := make([]byte, 256)\n\tn, err := c.readSmtpResponse(buf)\n\tss.Response = buf[0:n]\n\n\t\/\/ Record everything no matter the result\n\tc.appendEvent(&ss, err)\n\n\t\/\/ Stop if we failed already\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Successful so far, attempt to do the actual handshake\n\treturn c.TLSHandshake()\n}\n\nfunc (c *Conn) POP3StartTLSHandshake() error {\n\tss := StartTLSEvent{Command: POP3_COMMAND}\n\tif err := c.sendStartTLSCommand(POP3_COMMAND); err != nil {\n\t\tc.appendEvent(&ss, err)\n\t\treturn err\n\t}\n\n\tbuf := make([]byte, 512)\n\tn, err := c.readPop3Response(buf)\n\tss.Response = buf[0:n]\n\tc.appendEvent(&ss, err)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.TLSHandshake()\n}\n\nfunc (c *Conn) IMAPStartTLSHandshake() error {\n\tss := StartTLSEvent{Command: IMAP_COMMAND}\n\tif err := c.sendStartTLSCommand(IMAP_COMMAND); err != nil {\n\t\tc.appendEvent(&ss, err)\n\t\treturn err\n\t}\n\n\tbuf := make([]byte, 512)\n\tn, err := c.readImapStatusResponse(buf)\n\tss.Response = buf[0:n]\n\tc.appendEvent(&ss, err)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.TLSHandshake()\n}\n\nfunc (c *Conn) readUntilRegex(res []byte, expr *regexp.Regexp) (int, error) {\n\tbuf := res[0:]\n\tlength := 0\n\tfor finished := false; !finished; {\n\t\tn, err := c.getUnderlyingConn().Read(buf)\n\t\tlength += n\n\t\tif err != nil {\n\t\t\treturn length, err\n\t\t}\n\t\tif expr.Match(res[0:length]) {\n\t\t\tfinished = true\n\t\t}\n\t\tif length == len(res) {\n\t\t\treturn length, errors.New(\"Not enough buffer space\")\n\t\t}\n\t\tbuf = res[length:]\n\t}\n\treturn length, nil\n}\n\nfunc (c *Conn) readSmtpResponse(res []byte) (int, error) {\n\treturn c.readUntilRegex(res, smtpEndRegex)\n}\n\nfunc (c *Conn) SMTPBanner(b []byte) (int, error) {\n\tn, err := c.readSmtpResponse(b)\n\tmb := MailBannerEvent{}\n\tmb.Banner = string(b[0:n])\n\tc.appendEvent(&mb, err)\n\treturn n, err\n}\n\nfunc (c *Conn) EHLO(domain string) error {\n\tcmd := []byte(\"EHLO \" + domain + \"\\r\\n\")\n\tee := EHLOEvent{}\n\tif _, err := c.getUnderlyingConn().Write(cmd); err != nil {\n\t\tc.appendEvent(&ee, err)\n\t\treturn err\n\t}\n\n\tbuf := make([]byte, 512)\n\tn, err := c.readSmtpResponse(buf)\n\tee.Response = buf[0:n]\n\tc.appendEvent(&ee, err)\n\treturn err\n}\n\nfunc (c *Conn) SMTPHelp() error {\n\tcmd := []byte(\"HELP\\r\\n\")\n\th := new(SMTPHelpEvent)\n\tif _, err := c.getUnderlyingConn().Write(cmd); err != nil {\n\t\tc.appendEvent(h, err)\n\t\treturn err\n\t}\n\tbuf := make([]byte, 512)\n\tn, err := c.readSmtpResponse(buf)\n\th.Response = buf[0:n]\n\tc.appendEvent(h, err)\n\treturn err\n}\n\nfunc (c *Conn) readPop3Response(res []byte) (int, error) {\n\treturn c.readUntilRegex(res, pop3EndRegex)\n}\n\nfunc (c *Conn) POP3Banner(b []byte) (int, error) {\n\tn, err := c.readPop3Response(b)\n\tmb := MailBannerEvent{\n\t\tBanner: string(b[0:n]),\n\t}\n\tc.appendEvent(&mb, err)\n\treturn n, err\n}\n\nfunc (c *Conn) readImapStatusResponse(res []byte) (int, error) {\n\treturn c.readUntilRegex(res, imapStatusEndRegex)\n}\n\nfunc (c *Conn) IMAPBanner(b []byte) (int, error) {\n\tn, err := c.readImapStatusResponse(b)\n\tmb := MailBannerEvent{\n\t\tBanner: string(b[0:n]),\n\t}\n\tc.appendEvent(&mb, err)\n\treturn n, err\n}\n\nfunc (c *Conn) CheckHeartbleed(b []byte) (int, error) {\n\tif !c.isTls {\n\t\treturn 0, fmt.Errorf(\n\t\t\t\"Must perform TLS handshake before sending Heartbleed probe to %s\",\n\t\t\tc.RemoteAddr().String())\n\t}\n\tn, err := c.tlsConn.CheckHeartbleed(b)\n\thb := c.tlsConn.GetHeartbleedLog()\n\tif err == ztls.HeartbleedError {\n\t\terr = nil\n\t}\n\tevent := ConnectionEvent{\n\t\tData: &HeartbleedEvent{heartbleedLog: hb},\n\t\tError: err,\n\t}\n\tc.operations = append(c.operations, event)\n\treturn n, err\n}\n\nfunc (c *Conn) SendModbusEcho(b []byte) (int, error) {\n\t\/\/ TODO MODBUS\n\t\/\/ Build a request\n\n\t\/\/ Send it out\n\n\t\/\/ Read the response\n\n\t\/\/ make sure the whole thing gets appended to the operation log\n\t\/\/ e.g. c.appendEvent(modbusEvent, modbusError)\n\treturn 0, nil\n}\n\nfunc (c *Conn) States() []ConnectionEvent {\n\treturn c.operations\n}\n\nfunc (c *Conn) appendEvent(data EventData, err error) {\n\tevent := ConnectionEvent{\n\t\tData: data,\n\t\tError: err,\n\t}\n\tc.operations = append(c.operations, event)\n}\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport \"fmt\"\n\nconst (\n\tMajorVersion = 0\n\tMinorVersion = 9\n\tMicroVersion = 0\n\tAdditionalVersion = \"\"\n)\n\nfunc VersionString() string {\n\treturn fmt.Sprintf(\"%d.%d.%d%s\", MajorVersion, MinorVersion, MicroVersion, AdditionalVersion)\n}\n<commit_msg>bump master version to 0.10.0dev<commit_after>package common\n\nimport \"fmt\"\n\nconst (\n\tMajorVersion = 0\n\tMinorVersion = 10\n\tMicroVersion = 0\n\tAdditionalVersion = \"dev\"\n)\n\nfunc VersionString() string {\n\treturn fmt.Sprintf(\"%d.%d.%d%s\", MajorVersion, MinorVersion, MicroVersion, AdditionalVersion)\n}\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport (\n\t\"bytes\"\n \"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Docker struct {\n\th\t *httpAPI\t\t\t\/\/ contains ip string\n\tContainers []Container\n\tImages []Image\n\tUpdated time.Time\n}\n\ntype Container struct {\n\tId string\n\tImage string\n\tNetworkSettings struct {\n\t\tPortMapping struct {\n\t\t\tTcp map[string]string\n\t\t}\n\t}\n}\n\ntype Image struct {\n\tId string\n\tRepository string\n}\n\ntype httpAPI struct {\n\tip string\n}\n\n\/\/ function to initialize new httpAPI struct\nfunc NewHttpClient(ip string) (h *httpAPI) {\n\th = &httpAPI{ip}\n\treturn\n}\n\n\/\/ function to initialize new Docker struct \nfunc NewDocker(ip string) (D *Docker) {\n\tD = &Docker{NewHttpClient(ip)}\n\treturn\n}\n\n\/\/ Post function to clean up http.Post calls in code, method for HttpAPI struct\nfunc (h *httpAPI) Post(url string, content string, b io.Reader) (resp *http.Response, err error) {\n\tc := MakeHttpClient()\n\n\tresp, err = c.Post(\"http:\/\/\"+h.ip+\":4243\/\"+url,\n\t\tcontent, b)\n\n\treturn\n}\n\n\/\/ Post with a dictionary of header values\nfunc (h *httpAPI) PostHeader(url string, content string, b io.Reader, header http.Header) (resp *http.Response, err error) {\n\tc := MakeHttpClient()\n\n\treq, err := http.NewRequest(\"POST\",\n\t\t\"http:\/\/\"+h.ip+\":4243\/\"+url, b)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n req.Header = header\n req.Header.Set(\"Content-Type\", content)\n\n\tresp, err = c.Do(req)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\n\treturn\n}\n\n\/\/ Get function to clean up http.Get calls in code, method for HttpAPI struct\nfunc (h *httpAPI) Get(url string) (resp *http.Response, err error) {\n\tc := MakeHttpClient()\n\n\tresp, err = c.Get(\"http:\/\/\" + h.ip + \":4243\/\" + url)\n\n\treturn\n}\n\n\/\/ Delete function to clean up http.NewRequest(\"DELETE\"...) call, method for HttpAPI struct\nfunc (h *httpAPI) Delete(url string) (resp *http.Response, err error) {\n\tc := MakeHttpClient()\n\n\treq, err := http.NewRequest(\"DELETE\",\n\t\t\"http:\/\/\"+h.ip+\":4243\/\"+url, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tresp, err = c.Do(req)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn\n}\n\nfunc (D *Docker) GetIP() string {\n return D.h.ip\n}\n\n\/\/ function to periodically update information in Docker struct\nfunc (D *Docker) Update() {\n\tfor ; ; time.Sleep(60 * time.Second) {\n\t\tc, err := D.ListContainers()\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\tcontinue\n\t\t}\n\t\t\n\t\timg, err := D.ListImages()\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\tcontinue\n\t\t}\n\t\t\n\t\tD = Docker{_,c, img, time.Now()}\n\t\tupdate <- D\n\t}\n}\n\n\/\/ InspectContainer takes a container, and returns its port and its info\nfunc (D *Docker) InspectContainer(id string) (err error) {\n\tC := &Container{}\n\n\tresp, err := D.h.Get(\"\/containers\/\" + id + \"\/json\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn errors.New(\"Inspect Container Status is not 200\")\n\t}\n\n\trall, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tjson.Unmarshal(rall, C)\n\n\tlog.Print(\"Container inspected\")\n\treturn nil\n}\n\n\/\/ runImage takes a docker image to run, and makes sure it is running\nfunc (D *Docker) RunImage(imagename string, hint bool) (id string, err error) {\n\n\te := make([]string, 1)\n\te[0] = \"HOST=\" + D.h.ip\n\n\tc := make(map[string]interface{})\n\tc[\"Image\"] = imagename\n\tif hint {\n\t\tc[\"Env\"] = e\n\t}\n\n\tba, err := json.Marshal(c)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tb := bytes.NewBuffer(ba)\n\n\tresp, err := h.Post(\"containers\/create\", \"application\/json\", b)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 201 {\n\t\tmsg := fmt.Sprintf(\"Create Container Response status is %d\", resp.StatusCode)\n\t\treturn \"\", errors.New(msg)\n\t}\n\n\ts, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ta := &Container{}\n\tjson.Unmarshal(s, a)\n\tid = a.Id\n\n\tlog.Printf(\"Container created id:%s\", id)\n\n\tb = bytes.NewBuffer([]byte(\"{}\"))\n\tresp, err = D.h.Post(\"containers\/\"+id+\"\/start\", \"application\/json\", b)\n\n\tdefer resp.Body.Close()\n\n\tLogReader(resp.Body)\n\n\tif resp.StatusCode != 204 {\n\t\tmsg := fmt.Sprintf(\"Start Container Response status is %d\", resp.StatusCode)\n\t\treturn \"\", errors.New(msg)\n\t}\n\n\tlog.Printf(\"Container running\")\n\treturn id, nil\n\n}\n\n\/\/ StopContainer stops a container\nfunc (D *Docker) StopContainer(container string) (err error) {\n\tlog.Print(\"Stopping container \", container)\n\n\tresp, err := D.h.Post(\"containers\/\"+container+\"\/stop?t=1\", \"application\/json\", b)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tLogReader(resp.Body)\n\treturn nil\n}\n\n\/\/ StopImage takes a image to stop and stops it\nfunc (D *Docker) StopImage(imagename string) {\n\tlog.Print(\"Stopping image \", imagename)\n\n\trunning, id, err := D.ImageRunning(imagename)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif running {\n\t\terr = D.StopContainer(id)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tD.DeleteContainer(id)\n\t}\n}\n\nfunc (D *Docker) DeleteContainer(id string) (err error) {\n\tlog.Print(\"deleting container \", id)\n\n\tresp, err := D.h.Delete(\"containers\/\" + id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer resp.Body.Close()\n\tLogReader(resp.Body)\n\treturn\n}\n\n\/\/ TagImage tags an already existing image in the repository\nfunc (D *Docker) TagImage(name string, tag string) (err error) {\n\tb := bytes.NewBuffer(nil)\n\n\tresp, err := D.h.Post(\"images\/\"+name+\"\/tag?repo=\"+tag+\"&force=1\", \"application\/json\", b)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 201 {\n\t\tlog.Print(resp)\n\t\treturn errors.New(\"Code not 201\")\n\t}\n\n\treturn nil\n\n}\n\n\/\/ PushImage pushes an image to a docker index\nfunc (D *Docker) PushImage(w io.Writer, name string) (err error) {\n\tb := bytes.NewBuffer(nil)\n\n auth := base64.StdEncoding.EncodeToString([]byte(\"{}\"))\n header := make(http.Header)\n header.Set(\"X-Registry-Auth\", auth)\n\n url := \"images\/\"+name+\"\/push\"\n\n\tresp, err := D.h.PostHeader(url,\n\t\t\"application\/json\", b, header)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode != 200 {\n\n\t\t_, err = io.Copy(w, resp.Body)\n if err != nil {\n io.WriteString(w, err.Error())\n }\n\n io.WriteString(w, resp.Header.Get(\"Location\")+\"\\n\")\n io.WriteString(w, url + \"\\n\")\n\n\t\ts := fmt.Sprintf(\"Push image status code is not 200 it is %d\",\n\t\t\tresp.StatusCode)\n\t\treturn errors.New(s)\n\t}\n\n\tdefer resp.Body.Close()\n\t_, err = io.Copy(w, resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ buildImage takes a tarfile, and builds it\nfunc (D *Docker) BuildImage(fd io.Reader, name string) (err error) {\n\n\tv := fmt.Sprintf(\"%d\", time.Now().Unix())\n\tresp, err := D.h.Post(\"build?t=\"+name+\"%3A\"+v,\n\t\t\"application\/tar\", fd)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tLogReader(resp.Body)\n\n\treturn D.TagImage(name+\"%3A\"+v, name)\n}\n\n\/\/ loadImage pulls a specified image into a docker instance\nfunc (D *Docker) LoadImage(imagename string) (err error) {\n\tb := bytes.NewBuffer(nil)\n\tresp, err := D.h.Post(\"images\/create?fromImage=\"+imagename,\n\t\t\"text\", b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tLogReader(resp.Body)\n\tif resp.StatusCode != 200 {\n\t\tlog.Printf(\"create status code is not 200 %s\", resp.StatusCode)\n\t}\n\n\tlog.Printf(\"Image fetched %s\", imagename)\n\treturn nil\n}\n\n\/\/ ListContainers gives the state for a specific docker container\nfunc (D *Docker) ListContainers() (c []Container, err error) {\n\tresp, err := D.h.Get(\"containers\/json\")\n\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn c, errors.New(\"Response code is not 200\")\n\t}\n\n\tmessage, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tcontainers := &c\n\terr = json.Unmarshal(message, containers)\n\treturn\n}\n\n\/\/ ListImages gives the state of the images for a specific Docker container\nfunc (D *Docker) ListImages() (img []Image, err error) {\n\tresp, err := D.h.Get(\"images\/json\")\n\t\n\t\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn img, errors.New(\"Response code is not 200\")\n\t}\n\t\n\tmessage, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\timages := &img\n\terr = json.Unmarshal(message, images)\n\treturn\n}\n\/\/ ImageRunning states whether an image is running on a docker instance\nfunc (D *Docker) ImageRunning(imagename string) (running bool, id string, err error) {\n\n\tcontainers, err := D.ListContainers()\n\tif err != nil {\n\t\treturn false, \"\", err\n\t}\n\n\tfor _, v := range containers {\n\t\tif strings.SplitN(v.Image, \":\", 2)[0] == imagename {\n\t\t\treturn true, v.Id, nil\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>common\/docker: fixed compiler errors<commit_after>package common\n\nimport (\n\t\"bytes\"\n \"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Docker struct {\n\th\t *httpAPI\t\t\t\/\/ contains ip string\n\tContainers []Container\n\tImages []Image\n\tUpdated time.Time\n}\n\ntype Container struct {\n\tId string\n\tImage string\n\tNetworkSettings struct {\n\t\tPortMapping struct {\n\t\t\tTcp map[string]string\n\t\t}\n\t}\n}\n\ntype Image struct {\n\tId string\n\tRepository string\n}\n\ntype httpAPI struct {\n\tip string\n}\n\n\/\/ function to initialize new httpAPI struct\nfunc NewHttpClient(ip string) (h *httpAPI) {\n\th = &httpAPI{ip}\n\treturn\n}\n\n\/\/ function to initialize new Docker struct \nfunc NewDocker(ip string) (D *Docker) {\n\tD = &Docker{}\n\tD.h = NewHttpClient(ip)\n\treturn\n}\n\n\/\/ Post function to clean up http.Post calls in code, method for HttpAPI struct\nfunc (h *httpAPI) Post(url string, content string, b io.Reader) (resp *http.Response, err error) {\n\tc := MakeHttpClient()\n\n\tresp, err = c.Post(\"http:\/\/\"+h.ip+\":4243\/\"+url,\n\t\tcontent, b)\n\n\treturn\n}\n\n\/\/ Post with a dictionary of header values\nfunc (h *httpAPI) PostHeader(url string, content string, b io.Reader, header http.Header) (resp *http.Response, err error) {\n\tc := MakeHttpClient()\n\n\treq, err := http.NewRequest(\"POST\",\n\t\t\"http:\/\/\"+h.ip+\":4243\/\"+url, b)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n req.Header = header\n req.Header.Set(\"Content-Type\", content)\n\n\tresp, err = c.Do(req)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\n\treturn\n}\n\n\/\/ Get function to clean up http.Get calls in code, method for HttpAPI struct\nfunc (h *httpAPI) Get(url string) (resp *http.Response, err error) {\n\tc := MakeHttpClient()\n\n\tresp, err = c.Get(\"http:\/\/\" + h.ip + \":4243\/\" + url)\n\n\treturn\n}\n\n\/\/ Delete function to clean up http.NewRequest(\"DELETE\"...) call, method for HttpAPI struct\nfunc (h *httpAPI) Delete(url string) (resp *http.Response, err error) {\n\tc := MakeHttpClient()\n\n\treq, err := http.NewRequest(\"DELETE\",\n\t\t\"http:\/\/\"+h.ip+\":4243\/\"+url, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tresp, err = c.Do(req)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn\n}\n\nfunc (D *Docker) GetIP() string {\n return D.h.ip\n}\n\n\/\/ function to periodically update information in Docker struct\nfunc (D *Docker) Update() {\n\tfor ; ; time.Sleep(60 * time.Second) {\n\t\tc, err := D.ListContainers()\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\tcontinue\n\t\t}\n\t\t\n\t\timg, err := D.ListImages()\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\tcontinue\n\t\t}\n\t\t\n\n\t\tD.Containers = c\n\t\tD.Images = img\n\t\tD.Updated = time.Now()\n\t}\n}\n\n\/\/ InspectContainer takes a container, and returns its port and its info\nfunc (D *Docker) InspectContainer(id string) (err error) {\n\tC := &Container{}\n\n\tresp, err := D.h.Get(\"\/containers\/\" + id + \"\/json\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn errors.New(\"Inspect Container Status is not 200\")\n\t}\n\n\trall, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tjson.Unmarshal(rall, C)\n\n\tlog.Print(\"Container inspected\")\n\treturn nil\n}\n\n\/\/ runImage takes a docker image to run, and makes sure it is running\nfunc (D *Docker) RunImage(imagename string, hint bool) (id string, err error) {\n\n\te := make([]string, 1)\n\te[0] = \"HOST=\" + D.h.ip\n\n\tc := make(map[string]interface{})\n\tc[\"Image\"] = imagename\n\tif hint {\n\t\tc[\"Env\"] = e\n\t}\n\n\tba, err := json.Marshal(c)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tb := bytes.NewBuffer(ba)\n\n\tresp, err := D.h.Post(\"containers\/create\", \"application\/json\", b)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 201 {\n\t\tmsg := fmt.Sprintf(\"Create Container Response status is %d\", resp.StatusCode)\n\t\treturn \"\", errors.New(msg)\n\t}\n\n\ts, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ta := &Container{}\n\tjson.Unmarshal(s, a)\n\tid = a.Id\n\n\tlog.Printf(\"Container created id:%s\", id)\n\n\tb = bytes.NewBuffer([]byte(\"{}\"))\n\tresp, err = D.h.Post(\"containers\/\"+id+\"\/start\", \"application\/json\", b)\n\n\tdefer resp.Body.Close()\n\n\tLogReader(resp.Body)\n\n\tif resp.StatusCode != 204 {\n\t\tmsg := fmt.Sprintf(\"Start Container Response status is %d\", resp.StatusCode)\n\t\treturn \"\", errors.New(msg)\n\t}\n\n\tlog.Printf(\"Container running\")\n\treturn id, nil\n\n}\n\n\/\/ StopContainer stops a container\nfunc (D *Docker) StopContainer(container string) (err error) {\n\tlog.Print(\"Stopping container \", container)\n\tb := bytes.NewBuffer(nil)\n\t\n\tresp, err := D.h.Post(\"containers\/\"+container+\"\/stop?t=1\", \"application\/json\", b)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tLogReader(resp.Body)\n\treturn nil\n}\n\n\/\/ StopImage takes a image to stop and stops it\nfunc (D *Docker) StopImage(imagename string) {\n\tlog.Print(\"Stopping image \", imagename)\n\n\trunning, id, err := D.ImageRunning(imagename)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif running {\n\t\terr = D.StopContainer(id)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tD.DeleteContainer(id)\n\t}\n}\n\nfunc (D *Docker) DeleteContainer(id string) (err error) {\n\tlog.Print(\"deleting container \", id)\n\n\tresp, err := D.h.Delete(\"containers\/\" + id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer resp.Body.Close()\n\tLogReader(resp.Body)\n\treturn\n}\n\n\/\/ TagImage tags an already existing image in the repository\nfunc (D *Docker) TagImage(name string, tag string) (err error) {\n\tb := bytes.NewBuffer(nil)\n\n\tresp, err := D.h.Post(\"images\/\"+name+\"\/tag?repo=\"+tag+\"&force=1\", \"application\/json\", b)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 201 {\n\t\tlog.Print(resp)\n\t\treturn errors.New(\"Code not 201\")\n\t}\n\n\treturn nil\n\n}\n\n\/\/ PushImage pushes an image to a docker index\nfunc (D *Docker) PushImage(w io.Writer, name string) (err error) {\n\tb := bytes.NewBuffer(nil)\n\n auth := base64.StdEncoding.EncodeToString([]byte(\"{}\"))\n header := make(http.Header)\n header.Set(\"X-Registry-Auth\", auth)\n\n url := \"images\/\"+name+\"\/push\"\n\n\tresp, err := D.h.PostHeader(url,\n\t\t\"application\/json\", b, header)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode != 200 {\n\n\t\t_, err = io.Copy(w, resp.Body)\n if err != nil {\n io.WriteString(w, err.Error())\n }\n\n io.WriteString(w, resp.Header.Get(\"Location\")+\"\\n\")\n io.WriteString(w, url + \"\\n\")\n\n\t\ts := fmt.Sprintf(\"Push image status code is not 200 it is %d\",\n\t\t\tresp.StatusCode)\n\t\treturn errors.New(s)\n\t}\n\n\tdefer resp.Body.Close()\n\t_, err = io.Copy(w, resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ buildImage takes a tarfile, and builds it\nfunc (D *Docker) BuildImage(fd io.Reader, name string) (err error) {\n\n\tv := fmt.Sprintf(\"%d\", time.Now().Unix())\n\tresp, err := D.h.Post(\"build?t=\"+name+\"%3A\"+v,\n\t\t\"application\/tar\", fd)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tLogReader(resp.Body)\n\n\treturn D.TagImage(name+\"%3A\"+v, name)\n}\n\n\/\/ loadImage pulls a specified image into a docker instance\nfunc (D *Docker) LoadImage(imagename string) (err error) {\n\tb := bytes.NewBuffer(nil)\n\tresp, err := D.h.Post(\"images\/create?fromImage=\"+imagename,\n\t\t\"text\", b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tLogReader(resp.Body)\n\tif resp.StatusCode != 200 {\n\t\tlog.Printf(\"create status code is not 200 %s\", resp.StatusCode)\n\t}\n\n\tlog.Printf(\"Image fetched %s\", imagename)\n\treturn nil\n}\n\n\/\/ ListContainers gives the state for a specific docker container\nfunc (D *Docker) ListContainers() (c []Container, err error) {\n\tresp, err := D.h.Get(\"containers\/json\")\n\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn c, errors.New(\"Response code is not 200\")\n\t}\n\n\tmessage, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tcontainers := &c\n\terr = json.Unmarshal(message, containers)\n\treturn\n}\n\n\/\/ ListImages gives the state of the images for a specific Docker container\nfunc (D *Docker) ListImages() (img []Image, err error) {\n\tresp, err := D.h.Get(\"images\/json\")\n\t\n\t\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn img, errors.New(\"Response code is not 200\")\n\t}\n\t\n\tmessage, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\timages := &img\n\terr = json.Unmarshal(message, images)\n\treturn\n}\n\/\/ ImageRunning states whether an image is running on a docker instance\nfunc (D *Docker) ImageRunning(imagename string) (running bool, id string, err error) {\n\n\tcontainers, err := D.ListContainers()\n\tif err != nil {\n\t\treturn false, \"\", err\n\t}\n\n\tfor _, v := range containers {\n\t\tif strings.SplitN(v.Image, \":\", 2)[0] == imagename {\n\t\t\treturn true, v.Id, nil\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The following enables go generate to generate the doc.go file.\n\/\/go:generate go run $JIRI_ROOT\/release\/go\/src\/v.io\/x\/lib\/cmdline\/testdata\/gendoc.go -env=CMDLINE_PREFIX=jiri . -help\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"v.io\/jiri\/jiri\"\n\t\"v.io\/jiri\/profiles\"\n\t\"v.io\/jiri\/runutil\"\n\t\"v.io\/jiri\/tool\"\n\t\"v.io\/jiri\/util\"\n\t\"v.io\/x\/devtools\/internal\/golib\"\n\t\"v.io\/x\/devtools\/jiri-v23-profile\/v23_profile\"\n\t\"v.io\/x\/lib\/cmdline\"\n)\n\nvar cmd = &cmdline.Command{\n\tRunner: jiri.RunnerFunc(runGo),\n\tName: \"dockergo\",\n\tShort: \"Execute the go command in a docker container\",\n\tLong: `\nExecutes a Go command in a docker container. This is primarily aimed at the\nbuilds of Linux binaries and libraries where there is a dependence on cgo. This\nallows for compilation (and cross-compilation) without polluting the host\nfilesystem with compilers, C-headers, libraries etc. as dependencies are\nencapsulated in the docker image.\n\nThe docker image is expected to have the appropriate C-compiler\nand any pre-built headers\/libraries to be linked in. It is also\nexpected to have the appropriate environment variables (such as CGO_ENABLED,\nCGO_CFLAGS etc) set.\n\nSample usage on *all* platforms (Linux\/OS X):\n\nBuild the \".\/foo\" package for the host architecture and linux (command works\nfrom OS X as well):\n\n jiri-dockergo build\n\nBuild for linux\/arm from any host (including OS X):\n\n GOARCH=arm jiri-dockergo build\n\nFor more information on docker see https:\/\/www.docker.com.\n\nFor more information on the design of this particular tool including the\ndefinitions of default images, see:\nhttps:\/\/docs.google.com\/document\/d\/1Ud-QUVOjsaya57kgq0j24wDwTzKKE7o_PShQQs0DR5w\/edit?usp=sharing\n\nWhile the targets are built using the toolchain in the docker image, a local Go\ninstallation is still required for Vanadium-specific compilation prep work -\nsuch as invoking the VDL compiler on packages to generate up-to-date .go files.\n`,\n\tArgsName: \"<arg ...>\",\n\tArgsLong: \"<arg ...> is a list of arguments for the go tool.\",\n}\n\nvar (\n\timageFlag string\n\tmanifestFlag, profilesFlag string\n\tprofilesModeFlag profiles.ProfilesMode\n\ttargetFlag profiles.Target\n\textraLDFlags string\n\tmergePoliciesFlag profiles.MergePolicies\n\tverboseFlag bool\n)\n\nconst dockerBin = \"docker\"\n\nfunc init() {\n\ttool.InitializeRunFlags(&cmd.Flags)\n\tmergePoliciesFlag = profiles.JiriMergePolicies()\n\tprofiles.RegisterProfileFlags(&cmd.Flags, &profilesModeFlag, &manifestFlag, &profilesFlag, v23_profile.DefaultManifestFilename, &mergePoliciesFlag,\n\t\t&targetFlag)\n\tflag.StringVar(&imageFlag, \"image\", \"\", \"Name of the docker image to use. If empty, the tool will automatically select an image based on the environment variables, possibly edited by the profile\")\n\tflag.StringVar(&extraLDFlags, \"extra-ldflags\", \"\", golib.ExtraLDFlagsFlagDescription)\n}\n\nfunc runGo(jirix *jiri.X, args []string) error {\n\tif len(args) == 0 {\n\t\treturn jirix.UsageErrorf(\"not enough arguments\")\n\t}\n\tch, err := profiles.NewConfigHelper(jirix, profilesModeFlag, manifestFlag)\n\tif err != nil {\n\t\treturn err\n\t}\n\tprofileNames := profiles.InitProfilesFromFlag(profilesFlag, profiles.DoNotAppendJiriProfile)\n\tif err := ch.ValidateRequestedProfilesAndTarget(profileNames, targetFlag); err != nil {\n\t\treturn err\n\t}\n\tch.MergeEnvFromProfiles(mergePoliciesFlag, profiles.NativeTarget(), \"jiri\")\n\tif verboseFlag {\n\t\tfmt.Fprintf(jirix.Stdout(), \"Merged profiles: %v\\n\", profileNames)\n\t\tfmt.Fprintf(jirix.Stdout(), \"Merge policies: %v\\n\", mergePoliciesFlag)\n\t\tfmt.Fprintf(jirix.Stdout(), \"%v\\n\", strings.Join(ch.ToSlice(), \"\\n\"))\n\t}\n\tenvMap := ch.ToMap()\n\t\/\/ docker can only be used to build linux binaries\n\tif os, exists := envMap[\"GOOS\"]; exists && os != \"linux\" {\n\t\treturn fmt.Errorf(\"Only GOOS=linux is supported, not %q\", os)\n\t}\n\tenvMap[\"GOOS\"] = \"linux\"\n\t\/\/ default to the local architecture\n\tif _, exists := envMap[\"GOARCH\"]; !exists {\n\t\tenvMap[\"GOARCH\"] = runtime.GOARCH\n\t}\n\tif _, err := runutil.LookPath(dockerBin, envMap); err != nil {\n\t\treturn fmt.Errorf(\"%v not found in path: %v\", dockerBin, err)\n\t}\n\timg, err := image(envMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar installSuffix string\n\tif targetFlag.OS() == \"fnl\" {\n\t\tinstallSuffix = \"musl\"\n\t}\n\tif args, err = golib.PrepareGo(jirix, envMap, args, extraLDFlags, installSuffix); err != nil {\n\t\treturn err\n\t}\n\tif jirix.Verbose() {\n\t\tfmt.Fprintf(jirix.Stderr(), \"Using docker image: %q\\n\", img)\n\t}\n\t\/\/ Run the go tool.\n\treturn runDockerGo(jirix, img, envMap, args)\n}\n\n\/\/ image returns the name of the docker image to use, or the empty string if\n\/\/ a containerized build environment should not be used.\nfunc image(env map[string]string) (string, error) {\n\tif image := imageFlag; len(image) > 0 {\n\t\treturn image, nil\n\t}\n\tswitch goarch := env[\"GOARCH\"]; goarch {\n\tcase \"arm\", \"armhf\":\n\t\treturn \"asimshankar\/testing:armhf\", nil\n\tcase \"amd64\":\n\t\treturn \"asimshankar\/testing:amd64\", nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"unable to auto select docker image to use for GOARCH=%q\", goarch)\n\t}\n}\n\n\/\/ runDockerGo runs the \"go\" command on this node via docker, using the\n\/\/ provided docker image.\n\/\/\n\/\/ Note that many Go-compiler related environment variables (CGO_ENABLED,\n\/\/ CGO_CXXFLAGS etc.) will be ignored, as those are expected to be set in the\n\/\/ docker container.\n\/\/ TODO(ashasnkar): CGO_CFLAGS, CGO_LDFLAGS and CGO_CXXFLAGS should be checked\n\/\/ and possibly used?\n\/\/\n\/\/ In order to make users of \"jiri-dockergo\" feel like they are using a local\n\/\/ build environment (i.e., making the use of docker transparent) a few things\n\/\/ need to be done:\n\/\/ (a) All directores in GOPATH on the host need to be mounted in the container.\n\/\/ These are mounted under \/jiri\/gopath<index>\n\/\/ $JIRI_ROOT is mounted specially under \/jiri\/root\n\/\/ (b) The current working directory needs to be mounted in the container\n\/\/ (so that relative paths work).\n\/\/ Mounted under \/jiri\/workdir\n\/\/ (c) On a Linux host (where docker runs natively, not inside a virtual machine),\n\/\/ the -u flag needs to be provided to \"docker run\" to ensure that files written out\n\/\/ to the host match the UID and GID of the filesystem as it would if the build was\n\/\/ run on the host.\n\/\/\n\/\/ All this is best effort - there are still command invocations that will not\n\/\/ work via docker.\n\/\/ For example: jiri-go build -o \/tmp\/foo .\n\/\/ will succeed but the output will not appear in \/tmp\/foo on the host (it will\n\/\/ be written to \/tmp\/foo on the container and then vanish when the container\n\/\/ dies).\n\/\/ TODO(ashankar): This and other similar cases can be dealt with by inspecting \"args\"\n\/\/ and handling such cases, but that is left as a future excercise.\nfunc runDockerGo(jirix *jiri.X, image string, env map[string]string, args []string) error {\n\tvar (\n\t\tvolumeroot = \"\/jiri\" \/\/ All volumes are mounted inside here\n\t\tjiriroot = fmt.Sprintf(\"%v\/root\", volumeroot)\n\t\tgopath []string\n\t\tctr int\n\t\tworkdir string\n\t\thostWorkdir = env[\"PWD\"]\n\t\tdockerargs = []string{\"run\", \"-v\", fmt.Sprintf(\"%v:%v\", jirix.Root, jiriroot)}\n\t)\n\tif strings.HasPrefix(hostWorkdir, jirix.Root) {\n\t\tworkdir = strings.Replace(hostWorkdir, jirix.Root, jiriroot, 1)\n\t}\n\tfor _, p := range strings.Split(env[\"GOPATH\"], \":\") {\n\t\tif strings.HasPrefix(p, jirix.Root) {\n\t\t\tgopath = append(gopath, strings.Replace(p, jirix.Root, jiriroot, 1))\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ A non $JIRI_ROOT entry in the GOPATH, include that in the volumes.\n\t\tcdir := fmt.Sprintf(\"%v\/gopath%d\", volumeroot, ctr)\n\t\tctr++\n\t\tdockerargs = append(dockerargs, \"-v\", fmt.Sprintf(\"%v:%v\", p, cdir))\n\t\tgopath = append(gopath, cdir)\n\t\tif strings.HasPrefix(hostWorkdir, p) {\n\t\t\tworkdir = strings.Replace(hostWorkdir, p, cdir, 1)\n\t\t}\n\t}\n\tif len(workdir) == 0 {\n\t\t\/\/ Working directory on host is outside the directores in GOPATH.\n\t\tworkdir = fmt.Sprintf(\"%v\/workdir\", volumeroot)\n\t\tdockerargs = append(dockerargs, \"-v\", fmt.Sprintf(\"%v:%v\", hostWorkdir, workdir))\n\t}\n\t\/\/ Figure out the uid\/gid to run the container with so that files\n\t\/\/ written out to the host filesystem have the right owner\/group.\n\tif gid, ok := fileGid(jirix.Root); ok {\n\t\tdockerargs = append(dockerargs, \"-u\", fmt.Sprintf(\"%d:%d\", os.Getuid(), gid))\n\t}\n\tdockerargs = append(dockerargs,\n\t\t\"-e\", fmt.Sprintf(\"GOPATH=%v\", strings.Join(gopath, \":\")),\n\t\t\"-w\", workdir,\n\t\timage,\n\t\t\"go\")\n\tdockerargs = append(dockerargs, args...)\n\treturn util.TranslateExitCode(jirix.Run().Command(dockerBin, dockerargs...))\n}\n\nfunc main() {\n\tcmdline.Main(cmd)\n}\n<commit_msg>v.io\/x.devtools\/jiri-dockergo: use runutil.Sequence.<commit_after>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The following enables go generate to generate the doc.go file.\n\/\/go:generate go run $JIRI_ROOT\/release\/go\/src\/v.io\/x\/lib\/cmdline\/testdata\/gendoc.go -env=CMDLINE_PREFIX=jiri . -help\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"v.io\/jiri\/jiri\"\n\t\"v.io\/jiri\/profiles\"\n\t\"v.io\/jiri\/runutil\"\n\t\"v.io\/jiri\/tool\"\n\t\"v.io\/jiri\/util\"\n\t\"v.io\/x\/devtools\/internal\/golib\"\n\t\"v.io\/x\/devtools\/jiri-v23-profile\/v23_profile\"\n\t\"v.io\/x\/lib\/cmdline\"\n)\n\nvar cmd = &cmdline.Command{\n\tRunner: jiri.RunnerFunc(runGo),\n\tName: \"dockergo\",\n\tShort: \"Execute the go command in a docker container\",\n\tLong: `\nExecutes a Go command in a docker container. This is primarily aimed at the\nbuilds of Linux binaries and libraries where there is a dependence on cgo. This\nallows for compilation (and cross-compilation) without polluting the host\nfilesystem with compilers, C-headers, libraries etc. as dependencies are\nencapsulated in the docker image.\n\nThe docker image is expected to have the appropriate C-compiler\nand any pre-built headers\/libraries to be linked in. It is also\nexpected to have the appropriate environment variables (such as CGO_ENABLED,\nCGO_CFLAGS etc) set.\n\nSample usage on *all* platforms (Linux\/OS X):\n\nBuild the \".\/foo\" package for the host architecture and linux (command works\nfrom OS X as well):\n\n jiri-dockergo build\n\nBuild for linux\/arm from any host (including OS X):\n\n GOARCH=arm jiri-dockergo build\n\nFor more information on docker see https:\/\/www.docker.com.\n\nFor more information on the design of this particular tool including the\ndefinitions of default images, see:\nhttps:\/\/docs.google.com\/document\/d\/1Ud-QUVOjsaya57kgq0j24wDwTzKKE7o_PShQQs0DR5w\/edit?usp=sharing\n\nWhile the targets are built using the toolchain in the docker image, a local Go\ninstallation is still required for Vanadium-specific compilation prep work -\nsuch as invoking the VDL compiler on packages to generate up-to-date .go files.\n`,\n\tArgsName: \"<arg ...>\",\n\tArgsLong: \"<arg ...> is a list of arguments for the go tool.\",\n}\n\nvar (\n\timageFlag string\n\tmanifestFlag, profilesFlag string\n\tprofilesModeFlag profiles.ProfilesMode\n\ttargetFlag profiles.Target\n\textraLDFlags string\n\tmergePoliciesFlag profiles.MergePolicies\n\tverboseFlag bool\n)\n\nconst dockerBin = \"docker\"\n\nfunc init() {\n\ttool.InitializeRunFlags(&cmd.Flags)\n\tmergePoliciesFlag = profiles.JiriMergePolicies()\n\tprofiles.RegisterProfileFlags(&cmd.Flags, &profilesModeFlag, &manifestFlag, &profilesFlag, v23_profile.DefaultManifestFilename, &mergePoliciesFlag,\n\t\t&targetFlag)\n\tflag.StringVar(&imageFlag, \"image\", \"\", \"Name of the docker image to use. If empty, the tool will automatically select an image based on the environment variables, possibly edited by the profile\")\n\tflag.StringVar(&extraLDFlags, \"extra-ldflags\", \"\", golib.ExtraLDFlagsFlagDescription)\n}\n\nfunc runGo(jirix *jiri.X, args []string) error {\n\tif len(args) == 0 {\n\t\treturn jirix.UsageErrorf(\"not enough arguments\")\n\t}\n\tch, err := profiles.NewConfigHelper(jirix, profilesModeFlag, manifestFlag)\n\tif err != nil {\n\t\treturn err\n\t}\n\tprofileNames := profiles.InitProfilesFromFlag(profilesFlag, profiles.DoNotAppendJiriProfile)\n\tif err := ch.ValidateRequestedProfilesAndTarget(profileNames, targetFlag); err != nil {\n\t\treturn err\n\t}\n\tch.MergeEnvFromProfiles(mergePoliciesFlag, profiles.NativeTarget(), \"jiri\")\n\tif verboseFlag {\n\t\tfmt.Fprintf(jirix.Stdout(), \"Merged profiles: %v\\n\", profileNames)\n\t\tfmt.Fprintf(jirix.Stdout(), \"Merge policies: %v\\n\", mergePoliciesFlag)\n\t\tfmt.Fprintf(jirix.Stdout(), \"%v\\n\", strings.Join(ch.ToSlice(), \"\\n\"))\n\t}\n\tenvMap := ch.ToMap()\n\t\/\/ docker can only be used to build linux binaries\n\tif os, exists := envMap[\"GOOS\"]; exists && os != \"linux\" {\n\t\treturn fmt.Errorf(\"Only GOOS=linux is supported, not %q\", os)\n\t}\n\tenvMap[\"GOOS\"] = \"linux\"\n\t\/\/ default to the local architecture\n\tif _, exists := envMap[\"GOARCH\"]; !exists {\n\t\tenvMap[\"GOARCH\"] = runtime.GOARCH\n\t}\n\tif _, err := runutil.LookPath(dockerBin, envMap); err != nil {\n\t\treturn fmt.Errorf(\"%v not found in path: %v\", dockerBin, err)\n\t}\n\timg, err := image(envMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar installSuffix string\n\tif targetFlag.OS() == \"fnl\" {\n\t\tinstallSuffix = \"musl\"\n\t}\n\tif args, err = golib.PrepareGo(jirix, envMap, args, extraLDFlags, installSuffix); err != nil {\n\t\treturn err\n\t}\n\tif jirix.Verbose() {\n\t\tfmt.Fprintf(jirix.Stderr(), \"Using docker image: %q\\n\", img)\n\t}\n\t\/\/ Run the go tool.\n\treturn runDockerGo(jirix, img, envMap, args)\n}\n\n\/\/ image returns the name of the docker image to use, or the empty string if\n\/\/ a containerized build environment should not be used.\nfunc image(env map[string]string) (string, error) {\n\tif image := imageFlag; len(image) > 0 {\n\t\treturn image, nil\n\t}\n\tswitch goarch := env[\"GOARCH\"]; goarch {\n\tcase \"arm\", \"armhf\":\n\t\treturn \"asimshankar\/testing:armhf\", nil\n\tcase \"amd64\":\n\t\treturn \"asimshankar\/testing:amd64\", nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"unable to auto select docker image to use for GOARCH=%q\", goarch)\n\t}\n}\n\n\/\/ runDockerGo runs the \"go\" command on this node via docker, using the\n\/\/ provided docker image.\n\/\/\n\/\/ Note that many Go-compiler related environment variables (CGO_ENABLED,\n\/\/ CGO_CXXFLAGS etc.) will be ignored, as those are expected to be set in the\n\/\/ docker container.\n\/\/ TODO(ashasnkar): CGO_CFLAGS, CGO_LDFLAGS and CGO_CXXFLAGS should be checked\n\/\/ and possibly used?\n\/\/\n\/\/ In order to make users of \"jiri-dockergo\" feel like they are using a local\n\/\/ build environment (i.e., making the use of docker transparent) a few things\n\/\/ need to be done:\n\/\/ (a) All directores in GOPATH on the host need to be mounted in the container.\n\/\/ These are mounted under \/jiri\/gopath<index>\n\/\/ $JIRI_ROOT is mounted specially under \/jiri\/root\n\/\/ (b) The current working directory needs to be mounted in the container\n\/\/ (so that relative paths work).\n\/\/ Mounted under \/jiri\/workdir\n\/\/ (c) On a Linux host (where docker runs natively, not inside a virtual machine),\n\/\/ the -u flag needs to be provided to \"docker run\" to ensure that files written out\n\/\/ to the host match the UID and GID of the filesystem as it would if the build was\n\/\/ run on the host.\n\/\/\n\/\/ All this is best effort - there are still command invocations that will not\n\/\/ work via docker.\n\/\/ For example: jiri-go build -o \/tmp\/foo .\n\/\/ will succeed but the output will not appear in \/tmp\/foo on the host (it will\n\/\/ be written to \/tmp\/foo on the container and then vanish when the container\n\/\/ dies).\n\/\/ TODO(ashankar): This and other similar cases can be dealt with by inspecting \"args\"\n\/\/ and handling such cases, but that is left as a future excercise.\nfunc runDockerGo(jirix *jiri.X, image string, env map[string]string, args []string) error {\n\tvar (\n\t\tvolumeroot = \"\/jiri\" \/\/ All volumes are mounted inside here\n\t\tjiriroot = fmt.Sprintf(\"%v\/root\", volumeroot)\n\t\tgopath []string\n\t\tctr int\n\t\tworkdir string\n\t\thostWorkdir = env[\"PWD\"]\n\t\tdockerargs = []string{\"run\", \"-v\", fmt.Sprintf(\"%v:%v\", jirix.Root, jiriroot)}\n\t)\n\tif strings.HasPrefix(hostWorkdir, jirix.Root) {\n\t\tworkdir = strings.Replace(hostWorkdir, jirix.Root, jiriroot, 1)\n\t}\n\tfor _, p := range strings.Split(env[\"GOPATH\"], \":\") {\n\t\tif strings.HasPrefix(p, jirix.Root) {\n\t\t\tgopath = append(gopath, strings.Replace(p, jirix.Root, jiriroot, 1))\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ A non $JIRI_ROOT entry in the GOPATH, include that in the volumes.\n\t\tcdir := fmt.Sprintf(\"%v\/gopath%d\", volumeroot, ctr)\n\t\tctr++\n\t\tdockerargs = append(dockerargs, \"-v\", fmt.Sprintf(\"%v:%v\", p, cdir))\n\t\tgopath = append(gopath, cdir)\n\t\tif strings.HasPrefix(hostWorkdir, p) {\n\t\t\tworkdir = strings.Replace(hostWorkdir, p, cdir, 1)\n\t\t}\n\t}\n\tif len(workdir) == 0 {\n\t\t\/\/ Working directory on host is outside the directores in GOPATH.\n\t\tworkdir = fmt.Sprintf(\"%v\/workdir\", volumeroot)\n\t\tdockerargs = append(dockerargs, \"-v\", fmt.Sprintf(\"%v:%v\", hostWorkdir, workdir))\n\t}\n\t\/\/ Figure out the uid\/gid to run the container with so that files\n\t\/\/ written out to the host filesystem have the right owner\/group.\n\tif gid, ok := fileGid(jirix.Root); ok {\n\t\tdockerargs = append(dockerargs, \"-u\", fmt.Sprintf(\"%d:%d\", os.Getuid(), gid))\n\t}\n\tdockerargs = append(dockerargs,\n\t\t\"-e\", fmt.Sprintf(\"GOPATH=%v\", strings.Join(gopath, \":\")),\n\t\t\"-w\", workdir,\n\t\timage,\n\t\t\"go\")\n\tdockerargs = append(dockerargs, args...)\n\terr := jirix.NewSeq().Last(dockerBin, dockerargs...)\n\treturn util.TranslateExitCode(runutil.GetOriginalError(err))\n}\n\nfunc main() {\n\tcmdline.Main(cmd)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"github.com\/buaazp\/uq\/admin\"\n\t\"github.com\/buaazp\/uq\/entry\"\n\t\"github.com\/buaazp\/uq\/queue\"\n\t\"github.com\/buaazp\/uq\/store\"\n\t. \"github.com\/buaazp\/uq\/utils\"\n)\n\nvar (\n\tip string\n\thost string\n\tport int\n\tadminPort int\n\tpprofPort int\n\tprotocol string\n\tdb string\n\tdir string\n\tlogFile string\n\tetcd string\n\tcluster string\n)\n\nfunc init() {\n\tflag.StringVar(&ip, \"ip\", \"127.0.0.1\", \"self ip\/host address\")\n\tflag.StringVar(&host, \"host\", \"0.0.0.0\", \"listen ip\")\n\tflag.IntVar(&port, \"port\", 8808, \"listen port\")\n\tflag.IntVar(&adminPort, \"admin-port\", 8809, \"admin listen port\")\n\tflag.IntVar(&pprofPort, \"pprof-port\", 8080, \"pprof listen port\")\n\tflag.StringVar(&protocol, \"protocol\", \"redis\", \"frontend interface type [redis\/mc\/http]\")\n\tflag.StringVar(&db, \"db\", \"goleveldb\", \"backend storage type [goleveldb\/memdb]\")\n\tflag.StringVar(&dir, \"dir\", \".\/data\", \"backend storage path\")\n\tflag.StringVar(&logFile, \"log\", \"\", \"uq log path\")\n\tflag.StringVar(&etcd, \"etcd\", \"\", \"etcd service location\")\n\tflag.StringVar(&cluster, \"cluster\", \"uq\", \"cluster name in etcd\")\n}\n\nfunc belong(single string, team []string) bool {\n\tfor _, one := range team {\n\t\tif single == one {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc checkArgs() bool {\n\tif !belong(db, []string{\"goleveldb\", \"memdb\"}) {\n\t\tfmt.Printf(\"db mode %s is not supported!\\n\", db)\n\t\treturn false\n\t}\n\tif !belong(protocol, []string{\"redis\", \"mc\", \"http\"}) {\n\t\tfmt.Printf(\"protocol %s is not supported!\\n\", protocol)\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tdefer func() {\n\t\tfmt.Printf(\"byebye! uq see u later! 😄\\n\")\n\t}()\n\n\tflag.Parse()\n\n\tif !checkArgs() {\n\t\treturn\n\t}\n\n\terr := os.MkdirAll(dir, 0755)\n\tif err != nil {\n\t\tfmt.Printf(\"mkdir %s error: %s\\n\", dir, err)\n\t\treturn\n\t}\n\tif logFile == \"\" {\n\t\tlogFile = path.Join(dir, \"uq.log\")\n\t}\n\tlogf, err := os.OpenFile(logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)\n\tif err != nil {\n\t\tfmt.Printf(\"log open error: %s\\n\", err)\n\t\treturn\n\t}\n\tlog.SetFlags(log.Lshortfile | log.LstdFlags | log.Lmicroseconds)\n\tlog.SetPrefix(\"[uq] \")\n\tlog.SetOutput(logf)\n\n\tfmt.Printf(\"uq started! 😄\\n\")\n\n\tvar storage store.Storage\n\tif db == \"goleveldb\" {\n\t\tdbpath := path.Clean(path.Join(dir, \"uq.db\"))\n\t\tlog.Printf(\"dbpath: %s\", dbpath)\n\t\tstorage, err = store.NewLevelStore(dbpath)\n\t} else if db == \"memdb\" {\n\t\tstorage, err = store.NewMemStore()\n\t} else {\n\t\tfmt.Printf(\"store %s is not supported!\\n\", db)\n\t\treturn\n\t}\n\tif err != nil {\n\t\tfmt.Printf(\"store init error: %s\\n\", err)\n\t\treturn\n\t}\n\n\tvar etcdServers []string\n\tif etcd != \"\" {\n\t\tetcdServers = strings.Split(etcd, \",\")\n\t\tfor i, etcdServer := range etcdServers {\n\t\t\tif !strings.HasPrefix(etcdServer, \"http:\/\/\") {\n\t\t\t\tetcdServers[i] = \"http:\/\/\" + etcdServer\n\t\t\t}\n\t\t}\n\t}\n\tvar messageQueue queue.MessageQueue\n\t\/\/ messageQueue, err = queue.NewFakeQueue(storage, ip, port, etcdServers, cluster)\n\t\/\/ if err != nil {\n\t\/\/ \tfmt.Printf(\"queue init error: %s\\n\", err)\n\t\/\/ \tstorage.Close()\n\t\/\/ \treturn\n\t\/\/ }\n\tmessageQueue, err = queue.NewUnitedQueue(storage, ip, port, etcdServers, cluster)\n\tif err != nil {\n\t\tfmt.Printf(\"queue init error: %s\\n\", err)\n\t\tstorage.Close()\n\t\treturn\n\t}\n\n\tvar entrance entry.Entrance\n\tif protocol == \"http\" {\n\t\tentrance, err = entry.NewHttpEntry(host, port, messageQueue)\n\t} else if protocol == \"mc\" {\n\t\tentrance, err = entry.NewMcEntry(host, port, messageQueue)\n\t} else if protocol == \"redis\" {\n\t\tentrance, err = entry.NewRedisEntry(host, port, messageQueue)\n\t} else {\n\t\tfmt.Printf(\"protocol %s is not supported!\\n\", protocol)\n\t\treturn\n\t}\n\tif err != nil {\n\t\tfmt.Printf(\"entry init error: %s\\n\", err)\n\t\tmessageQueue.Close()\n\t\treturn\n\t}\n\n\tstop := make(chan os.Signal)\n\tentryFailed := make(chan bool)\n\tadminFailed := make(chan bool)\n\tsignal.Notify(stop, syscall.SIGINT, os.Interrupt, os.Kill)\n\tvar wg sync.WaitGroup\n\n\t\/\/ start entrance server\n\tgo func(c chan bool) {\n\t\twg.Add(1)\n\t\tdefer wg.Done()\n\t\terr := entrance.ListenAndServe()\n\t\tif err != nil {\n\t\t\tif !strings.Contains(err.Error(), \"stopped\") {\n\t\t\t\tfmt.Printf(\"entry listen error: %s\\n\", err)\n\t\t\t}\n\t\t\tclose(c)\n\t\t}\n\t}(entryFailed)\n\n\tvar adminServer admin.AdminServer\n\tadminServer, err = admin.NewAdminServer(host, adminPort, messageQueue)\n\tif err != nil {\n\t\tfmt.Printf(\"admin init error: %s\\n\", err)\n\t\tentrance.Stop()\n\t\treturn\n\t}\n\n\t\/\/ start admin server\n\tgo func(c chan bool) {\n\t\twg.Add(1)\n\t\tdefer wg.Done()\n\t\terr := adminServer.ListenAndServe()\n\t\tif err != nil {\n\t\t\tif !strings.Contains(err.Error(), \"stopped\") {\n\t\t\t\tfmt.Printf(\"entry listen error: %s\\n\", err)\n\t\t\t}\n\t\t\tclose(c)\n\t\t}\n\t}(adminFailed)\n\n\t\/\/ start pprof server\n\tgo func() {\n\t\taddr := Addrcat(host, pprofPort)\n\t\tlog.Println(http.ListenAndServe(addr, nil))\n\t}()\n\n\tselect {\n\tcase <-stop:\n\t\t\/\/ log.Printf(\"got signal: %v\", signal)\n\t\tadminServer.Stop()\n\t\tlog.Printf(\"admin server stoped.\")\n\t\tentrance.Stop()\n\t\tlog.Printf(\"entrance stoped.\")\n\tcase <-entryFailed:\n\t\tmessageQueue.Close()\n\tcase <-adminFailed:\n\t\tentrance.Stop()\n\t}\n\twg.Wait()\n}\n<commit_msg>Supported rocksdb for cmd args<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"github.com\/buaazp\/uq\/admin\"\n\t\"github.com\/buaazp\/uq\/entry\"\n\t\"github.com\/buaazp\/uq\/queue\"\n\t\"github.com\/buaazp\/uq\/store\"\n\t. \"github.com\/buaazp\/uq\/utils\"\n)\n\nvar (\n\tip string\n\thost string\n\tport int\n\tadminPort int\n\tpprofPort int\n\tprotocol string\n\tdb string\n\tdir string\n\tlogFile string\n\tetcd string\n\tcluster string\n)\n\nfunc init() {\n\tflag.StringVar(&ip, \"ip\", \"127.0.0.1\", \"self ip\/host address\")\n\tflag.StringVar(&host, \"host\", \"0.0.0.0\", \"listen ip\")\n\tflag.IntVar(&port, \"port\", 8808, \"listen port\")\n\tflag.IntVar(&adminPort, \"admin-port\", 8809, \"admin listen port\")\n\tflag.IntVar(&pprofPort, \"pprof-port\", 8080, \"pprof listen port\")\n\tflag.StringVar(&protocol, \"protocol\", \"redis\", \"frontend interface type [redis\/mc\/http]\")\n\tflag.StringVar(&db, \"db\", \"rocksdb\", \"backend storage type [rocksdb\/goleveldb\/memdb]\")\n\tflag.StringVar(&dir, \"dir\", \".\/data\", \"backend storage path\")\n\tflag.StringVar(&logFile, \"log\", \"\", \"uq log path\")\n\tflag.StringVar(&etcd, \"etcd\", \"\", \"etcd service location\")\n\tflag.StringVar(&cluster, \"cluster\", \"uq\", \"cluster name in etcd\")\n}\n\nfunc belong(single string, team []string) bool {\n\tfor _, one := range team {\n\t\tif single == one {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc checkArgs() bool {\n\tif !belong(db, []string{\"rocksdb\", \"goleveldb\", \"memdb\"}) {\n\t\tfmt.Printf(\"db mode %s is not supported!\\n\", db)\n\t\treturn false\n\t}\n\tif !belong(protocol, []string{\"redis\", \"mc\", \"http\"}) {\n\t\tfmt.Printf(\"protocol %s is not supported!\\n\", protocol)\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tdefer func() {\n\t\tfmt.Printf(\"byebye! uq see u later! 😄\\n\")\n\t}()\n\n\tflag.Parse()\n\n\tif !checkArgs() {\n\t\treturn\n\t}\n\n\terr := os.MkdirAll(dir, 0755)\n\tif err != nil {\n\t\tfmt.Printf(\"mkdir %s error: %s\\n\", dir, err)\n\t\treturn\n\t}\n\tif logFile == \"\" {\n\t\tlogFile = path.Join(dir, \"uq.log\")\n\t}\n\tlogf, err := os.OpenFile(logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)\n\tif err != nil {\n\t\tfmt.Printf(\"log open error: %s\\n\", err)\n\t\treturn\n\t}\n\tlog.SetFlags(log.Lshortfile | log.LstdFlags | log.Lmicroseconds)\n\tlog.SetPrefix(\"[uq] \")\n\tlog.SetOutput(logf)\n\n\tfmt.Printf(\"uq started! 😄\\n\")\n\n\tvar storage store.Storage\n\tif db == \"rocksdb\" {\n\t\tdbpath := path.Clean(path.Join(dir, \"uq.db\"))\n\t\tlog.Printf(\"dbpath: %s\", dbpath)\n\t\tstorage, err = store.NewRockStore(dbpath)\n\t} else if db == \"goleveldb\" {\n\t\tdbpath := path.Clean(path.Join(dir, \"uq.db\"))\n\t\tlog.Printf(\"dbpath: %s\", dbpath)\n\t\tstorage, err = store.NewLevelStore(dbpath)\n\t} else if db == \"memdb\" {\n\t\tstorage, err = store.NewMemStore()\n\t} else {\n\t\tfmt.Printf(\"store %s is not supported!\\n\", db)\n\t\treturn\n\t}\n\tif err != nil {\n\t\tfmt.Printf(\"store init error: %s\\n\", err)\n\t\treturn\n\t}\n\n\tvar etcdServers []string\n\tif etcd != \"\" {\n\t\tetcdServers = strings.Split(etcd, \",\")\n\t\tfor i, etcdServer := range etcdServers {\n\t\t\tif !strings.HasPrefix(etcdServer, \"http:\/\/\") {\n\t\t\t\tetcdServers[i] = \"http:\/\/\" + etcdServer\n\t\t\t}\n\t\t}\n\t}\n\tvar messageQueue queue.MessageQueue\n\t\/\/ messageQueue, err = queue.NewFakeQueue(storage, ip, port, etcdServers, cluster)\n\t\/\/ if err != nil {\n\t\/\/ \tfmt.Printf(\"queue init error: %s\\n\", err)\n\t\/\/ \tstorage.Close()\n\t\/\/ \treturn\n\t\/\/ }\n\tmessageQueue, err = queue.NewUnitedQueue(storage, ip, port, etcdServers, cluster)\n\tif err != nil {\n\t\tfmt.Printf(\"queue init error: %s\\n\", err)\n\t\tstorage.Close()\n\t\treturn\n\t}\n\n\tvar entrance entry.Entrance\n\tif protocol == \"http\" {\n\t\tentrance, err = entry.NewHttpEntry(host, port, messageQueue)\n\t} else if protocol == \"mc\" {\n\t\tentrance, err = entry.NewMcEntry(host, port, messageQueue)\n\t} else if protocol == \"redis\" {\n\t\tentrance, err = entry.NewRedisEntry(host, port, messageQueue)\n\t} else {\n\t\tfmt.Printf(\"protocol %s is not supported!\\n\", protocol)\n\t\treturn\n\t}\n\tif err != nil {\n\t\tfmt.Printf(\"entry init error: %s\\n\", err)\n\t\tmessageQueue.Close()\n\t\treturn\n\t}\n\n\tstop := make(chan os.Signal)\n\tentryFailed := make(chan bool)\n\tadminFailed := make(chan bool)\n\tsignal.Notify(stop, syscall.SIGINT, os.Interrupt, os.Kill)\n\tvar wg sync.WaitGroup\n\n\t\/\/ start entrance server\n\tgo func(c chan bool) {\n\t\twg.Add(1)\n\t\tdefer wg.Done()\n\t\terr := entrance.ListenAndServe()\n\t\tif err != nil {\n\t\t\tif !strings.Contains(err.Error(), \"stopped\") {\n\t\t\t\tfmt.Printf(\"entry listen error: %s\\n\", err)\n\t\t\t}\n\t\t\tclose(c)\n\t\t}\n\t}(entryFailed)\n\n\tvar adminServer admin.AdminServer\n\tadminServer, err = admin.NewAdminServer(host, adminPort, messageQueue)\n\tif err != nil {\n\t\tfmt.Printf(\"admin init error: %s\\n\", err)\n\t\tentrance.Stop()\n\t\treturn\n\t}\n\n\t\/\/ start admin server\n\tgo func(c chan bool) {\n\t\twg.Add(1)\n\t\tdefer wg.Done()\n\t\terr := adminServer.ListenAndServe()\n\t\tif err != nil {\n\t\t\tif !strings.Contains(err.Error(), \"stopped\") {\n\t\t\t\tfmt.Printf(\"entry listen error: %s\\n\", err)\n\t\t\t}\n\t\t\tclose(c)\n\t\t}\n\t}(adminFailed)\n\n\t\/\/ start pprof server\n\tgo func() {\n\t\taddr := Addrcat(host, pprofPort)\n\t\tlog.Println(http.ListenAndServe(addr, nil))\n\t}()\n\n\tselect {\n\tcase <-stop:\n\t\t\/\/ log.Printf(\"got signal: %v\", signal)\n\t\tadminServer.Stop()\n\t\tlog.Printf(\"admin server stoped.\")\n\t\tentrance.Stop()\n\t\tlog.Printf(\"entrance stoped.\")\n\tcase <-entryFailed:\n\t\tmessageQueue.Close()\n\tcase <-adminFailed:\n\t\tentrance.Stop()\n\t}\n\twg.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"testing\"\n\nvar stats *statistics\n\nfunc setUp() {\n\tprecision := 4\n\tstats = NewStatistics(precision)\n}\n\nfunc TestComputeSum_GivenFloatIntegers_ReturnSum(t *testing.T) {\n\tsetUp()\n\n\tvar tests = []struct {\n\t\tinputs []float64\n\t\texpected float64\n\t}{\n\t\t{inputs: []float64{1.00, 2.00, 3.00, 4.00, 5.00},\n\t\t\texpected: 15.00},\n\t\t{inputs: []float64{-1.00, -2.00, -3.00, -4.00, -5.00},\n\t\t\texpected: -15.00},\n\t\t{inputs: []float64{11.00, 12.00, 13.00, 14.00, 15.00},\n\t\t\texpected: 65.00},\n\t\t{inputs: []float64{16.00, 17.00, 18.00, 19.00, 20.00},\n\t\t\texpected: 90.00},\n\t\t{inputs: []float64{21.00, 22.00, 23.00, 24.00, 25.00},\n\t\t\texpected: 115.00},\n\t}\n\tfor _, test := range tests {\n\t\tstats.numbers = test.inputs\n\t\tif actual := stats.computeSum(); test.expected != actual {\n\t\t\tt.Errorf(\"Expected sum to be %f, but get %f\", test.expected, actual)\n\t\t}\n\t}\n}\n\nfunc TestComputeMean_GivenFloatIntegers_ReturnSum(t *testing.T) {\n\tsetUp()\n\n\tvar tests = []struct {\n\t\tinputs []float64\n\t\texpected float64\n\t}{\n\t\t{inputs: []float64{1.00, 2.00, 3.00, 4.00, 5.00},\n\t\t\texpected: 3.00},\n\t\t{inputs: []float64{-1.00, -2.00, -3.00, -4.00, -5.00},\n\t\t\texpected: -3.00},\n\t\t{inputs: []float64{11.00, 12.00, 13.00, 14.00, 15.00},\n\t\t\texpected: 13.00},\n\t\t{inputs: []float64{16.00, 17.00, 18.00, 19.00, 20.00},\n\t\t\texpected: 18.00},\n\t\t{inputs: []float64{21.00, 22.00, 23.00, 24.00, 25.00},\n\t\t\texpected: 23.00},\n\t}\n\tfor _, test := range tests {\n\t\tstats.numbers = test.inputs\n\t\tif actual := stats.computeMean(); test.expected != actual {\n\t\t\tt.Errorf(\"Expected mean to be %f, but get %f\", test.expected, actual)\n\t\t}\n\t}\n}\n\nfunc TestComputeMedian_GivenOddCounts_ReturnMiddleValue(t *testing.T) {\n\tsetUp()\n\n\tvar tests = []struct {\n\t\tinputs []float64\n\t\texpected float64\n\t}{\n\t\t{inputs: []float64{1.00, 2.00, 3.00, 4.00, 5.00},\n\t\t\texpected: 3.00},\n\t\t{inputs: []float64{-1.00, -2.00, -3.00, -4.00, -5.00},\n\t\t\texpected: -3.00},\n\t\t{inputs: []float64{11.00, 12.00, 13.00, 14.00, 15.00},\n\t\t\texpected: 13.00},\n\t\t{inputs: []float64{16.00, 17.00, 18.00, 19.00, 20.00},\n\t\t\texpected: 18.00},\n\t\t{inputs: []float64{21.00, 22.00, 23.00, 24.00, 25.00},\n\t\t\texpected: 23.00},\n\t}\n\n\tfor _, test := range tests {\n\t\tstats.numbers = test.inputs\n\t\tif median := stats.computeMedian(); test.expected != median {\n\t\t\tt.Errorf(\"Expected median to be %f, but got %f\", test.expected, median)\n\t\t}\n\t}\n}\n\nfunc TestComputeMedian_GivenEvenCounts_ReturnAverageOfMiddleValues(t *testing.T) {\n\tsetUp()\n\n\tvar tests = []struct {\n\t\tinputs []float64\n\t\texpected float64\n\t}{\n\t\t{inputs: []float64{1.00, 2.00, 3.00, 4.00, 5.00, 6.00},\n\t\t\texpected: 3.50},\n\t\t{inputs: []float64{-1.00, -2.00, -3.00, -4.00, -5.00, -6.00},\n\t\t\texpected: -3.50},\n\t\t{inputs: []float64{11.00, 12.00, 13.00, 14.00, 15.00, -16.00},\n\t\t\texpected: 13.50},\n\t\t{inputs: []float64{16.00, 17.00, 18.00, 19.00, 20.00, -21.00},\n\t\t\texpected: 18.50},\n\t\t{inputs: []float64{21.00, 22.00, 23.00, 24.00, 25.00, 25.00},\n\t\t\texpected: 23.50},\n\t}\n\n\tfor _, test := range tests {\n\t\tstats.numbers = test.inputs\n\t\tif median := stats.computeMedian(); test.expected != median {\n\t\t\tt.Errorf(\"Expected median to be %f, but got %f\", test.expected, median)\n\t\t}\n\t}\n}\n\nfunc TestComputeStandardDeviation_ReturnStandardDeviation(t *testing.T) {\n\tsetUp()\n\n\tvar tests = []struct {\n\t\tinputs []float64\n\t\texpected float64\n\t}{\n\t\t{inputs: []float64{1.00, 2.00, 3.00, 4.00, 5.00, 6.00},\n\t\t\texpected: 1.8708},\n\t\t{inputs: []float64{-1.00, -2.00, -3.00, -4.00, -5.00, -6.00},\n\t\t\texpected: 1.8708},\n\t\t{inputs: []float64{11.00, 12.00, 13.00, 14.00, 15.00, 16.00, 17.00},\n\t\t\texpected: 2.1602},\n\t\t{inputs: []float64{16.00, 17.00, 18.00, 19.00, 20.00, 21.00, 22.00, 23.00},\n\t\t\texpected: 2.4494},\n\t\t{inputs: []float64{21.00, 22.00, 23.00, 24.00, 25.00},\n\t\t\texpected: 1.5811},\n\t}\n\n\tfor _, test := range tests {\n\t\tstats.numbers = test.inputs\n\t\tif standardDeviation := stats.computeStandardDeviation(); test.expected != standardDeviation {\n\t\t\tt.Errorf(\"Expected standard deviation to be %f, but got %f\", test.expected, standardDeviation)\n\t\t}\n\t}\n}\n\nfunc TestComputeMean_GivenEmptyInputsArray(t *testing.T) {}\nfunc TestComputeStandardDeviation_GivenEmptyInputsArray(t *testing.T) {}\n<commit_msg>Add decimal values to sum and mean tests.<commit_after>package main\n\nimport \"testing\"\n\nvar stats *statistics\n\nfunc setUp() {\n\tprecision := 4\n\tstats = NewStatistics(precision)\n}\n\nfunc TestComputeSum_GivenFloatIntegers_ReturnSum(t *testing.T) {\n\tsetUp()\n\n\tvar tests = []struct {\n\t\tinputs []float64\n\t\texpected float64\n\t}{\n\t\t{inputs: []float64{1.00, 2.00, 3.00, 4.00, 5.00},\n\t\t\texpected: 15.00},\n\t\t{inputs: []float64{-1.00, -2.00, -3.00, -4.00, -5.00},\n\t\t\texpected: -15.00},\n\t\t{inputs: []float64{11.10, 12.23, 13.56, 14.88, 15.02},\n\t\t\texpected: 66.79},\n\t\t{inputs: []float64{16.30, 17.12, 18.95, 19.44, 20.77},\n\t\t\texpected: 92.58},\n\t\t{inputs: []float64{21.44, 22.22, 23.56, 24.43, 25.11},\n\t\t\texpected: 116.76},\n\t}\n\tfor _, test := range tests {\n\t\tstats.numbers = test.inputs\n\t\tif actual := stats.computeSum(); test.expected != actual {\n\t\t\tt.Errorf(\"Expected sum to be %f, but get %f\", test.expected, actual)\n\t\t}\n\t}\n}\n\nfunc TestComputeMean_GivenFloatIntegers_ReturnSum(t *testing.T) {\n\tsetUp()\n\n\tvar tests = []struct {\n\t\tinputs []float64\n\t\texpected float64\n\t}{\n\t\t{inputs: []float64{1.00, 2.00, 3.00, 4.00, 5.00},\n\t\t\texpected: 3.00},\n\t\t{inputs: []float64{-1.00, -2.00, -3.00, -4.00, -5.00},\n\t\t\texpected: -3.00},\n\t\t{inputs: []float64{11.10, 12.23, 13.56, 14.88, 15.02},\n\t\t\texpected: 13.3580},\n\t\t{inputs: []float64{16.30, 17.12, 18.95, 19.44, 20.77},\n\t\t\texpected: 18.5159},\n\t\t{inputs: []float64{21.44, 22.22, 23.56, 24.43, 25.11},\n\t\t\texpected: 23.3520},\n\t}\n\tfor _, test := range tests {\n\t\tstats.numbers = test.inputs\n\t\tif actual := stats.computeMean(); test.expected != actual {\n\t\t\tt.Errorf(\"Expected mean to be %f, but get %f\", test.expected, actual)\n\t\t}\n\t}\n}\n\nfunc TestComputeMedian_GivenOddCounts_ReturnMiddleValue(t *testing.T) {\n\tsetUp()\n\n\tvar tests = []struct {\n\t\tinputs []float64\n\t\texpected float64\n\t}{\n\t\t{inputs: []float64{1.00, 2.00, 3.00, 4.00, 5.00},\n\t\t\texpected: 3.00},\n\t\t{inputs: []float64{-1.00, -2.00, -3.00, -4.00, -5.00},\n\t\t\texpected: -3.00},\n\t\t{inputs: []float64{11.00, 12.00, 13.00, 14.00, 15.00},\n\t\t\texpected: 13.00},\n\t\t{inputs: []float64{16.00, 17.00, 18.00, 19.00, 20.00},\n\t\t\texpected: 18.00},\n\t\t{inputs: []float64{21.00, 22.00, 23.00, 24.00, 25.00},\n\t\t\texpected: 23.00},\n\t}\n\n\tfor _, test := range tests {\n\t\tstats.numbers = test.inputs\n\t\tif median := stats.computeMedian(); test.expected != median {\n\t\t\tt.Errorf(\"Expected median to be %f, but got %f\", test.expected, median)\n\t\t}\n\t}\n}\n\nfunc TestComputeMedian_GivenEvenCounts_ReturnAverageOfMiddleValues(t *testing.T) {\n\tsetUp()\n\n\tvar tests = []struct {\n\t\tinputs []float64\n\t\texpected float64\n\t}{\n\t\t{inputs: []float64{1.00, 2.00, 3.00, 4.00, 5.00, 6.00},\n\t\t\texpected: 3.50},\n\t\t{inputs: []float64{-1.00, -2.00, -3.00, -4.00, -5.00, -6.00},\n\t\t\texpected: -3.50},\n\t\t{inputs: []float64{11.00, 12.00, 13.00, 14.00, 15.00, -16.00},\n\t\t\texpected: 13.50},\n\t\t{inputs: []float64{16.00, 17.00, 18.00, 19.00, 20.00, -21.00},\n\t\t\texpected: 18.50},\n\t\t{inputs: []float64{21.00, 22.00, 23.00, 24.00, 25.00, 25.00},\n\t\t\texpected: 23.50},\n\t}\n\n\tfor _, test := range tests {\n\t\tstats.numbers = test.inputs\n\t\tif median := stats.computeMedian(); test.expected != median {\n\t\t\tt.Errorf(\"Expected median to be %f, but got %f\", test.expected, median)\n\t\t}\n\t}\n}\n\nfunc TestComputeStandardDeviation_ReturnStandardDeviation(t *testing.T) {\n\tsetUp()\n\n\tvar tests = []struct {\n\t\tinputs []float64\n\t\texpected float64\n\t}{\n\t\t{inputs: []float64{1.00, 2.00, 3.00, 4.00, 5.00, 6.00},\n\t\t\texpected: 1.8708},\n\t\t{inputs: []float64{-1.00, -2.00, -3.00, -4.00, -5.00, -6.00},\n\t\t\texpected: 1.8708},\n\t\t{inputs: []float64{11.00, 12.00, 13.00, 14.00, 15.00, 16.00, 17.00},\n\t\t\texpected: 2.1602},\n\t\t{inputs: []float64{16.00, 17.00, 18.00, 19.00, 20.00, 21.00, 22.00, 23.00},\n\t\t\texpected: 2.4494},\n\t\t{inputs: []float64{21.00, 22.00, 23.00, 24.00, 25.00},\n\t\t\texpected: 1.5811},\n\t}\n\n\tfor _, test := range tests {\n\t\tstats.numbers = test.inputs\n\t\tif standardDeviation := stats.computeStandardDeviation(); test.expected != standardDeviation {\n\t\t\tt.Errorf(\"Expected standard deviation to be %f, but got %f\", test.expected, standardDeviation)\n\t\t}\n\t}\n}\n\nfunc TestComputeMean_GivenEmptyInputsArray(t *testing.T) {}\nfunc TestComputeStandardDeviation_GivenEmptyInputsArray(t *testing.T) {}\n<|endoftext|>"} {"text":"<commit_before>package stats\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/cactus\/go-statsd-client\/statsd\"\n\t\"github.com\/cactus\/go-statsd-client\/statsd\/statsdtest\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestNewStatsd(t *testing.T) {\n\ts, err := NewStatsd(\"127.0.0.1:1234\", \"test\")\n\tassert.NoError(t, err)\n\n\tassert.IsType(t, &Statsd{}, s)\n}\n\nfunc TestStatsd_Inc(t *testing.T) {\n\tsender := statsdtest.NewRecordingSender()\n\tclient, err := statsd.NewClientWithSender(sender, \"test\")\n\tassert.NoError(t, err)\n\n\ts := &Statsd{\n\t\tclient: client,\n\t}\n\n\ts.Inc(\"test\", 2, 1.0, map[string]string{\"test\": \"test\"})\n\n\tsent := sender.GetSent()\n\tassert.Len(t, sent, 1)\n\tassert.Equal(t, \"test.test,test=test\", sent[0].Stat)\n\tassert.Equal(t, \"2\", sent[0].Value)\n}\n\nfunc TestStatsd_Dec(t *testing.T) {\n\tsender := statsdtest.NewRecordingSender()\n\tclient, err := statsd.NewClientWithSender(sender, \"test\")\n\tassert.NoError(t, err)\n\n\ts := &Statsd{\n\t\tclient: client,\n\t}\n\n\ts.Dec(\"test\", 2, 1.0, map[string]string{\"test\": \"test\"})\n\n\tsent := sender.GetSent()\n\tassert.Len(t, sent, 1)\n\tassert.Equal(t, \"test.test,test=test\", sent[0].Stat)\n\tassert.Equal(t, \"-2\", sent[0].Value)\n}\n\nfunc TestStatsd_Gauge(t *testing.T) {\n\tsender := statsdtest.NewRecordingSender()\n\tclient, err := statsd.NewClientWithSender(sender, \"test\")\n\tassert.NoError(t, err)\n\n\ts := &Statsd{\n\t\tclient: client,\n\t}\n\n\ts.Gauge(\"test\", 2.0, 1.0, map[string]string{\"test\": \"test\"})\n\n\tsent := sender.GetSent()\n\tassert.Len(t, sent, 1)\n\tassert.Equal(t, \"test.test,test=test\", sent[0].Stat)\n\tassert.Equal(t, \"2\", sent[0].Value)\n}\n\nfunc TestStatsd_Timing(t *testing.T) {\n\tsender := statsdtest.NewRecordingSender()\n\tclient, err := statsd.NewClientWithSender(sender, \"test\")\n\tassert.NoError(t, err)\n\n\ts := &Statsd{\n\t\tclient: client,\n\t}\n\n\ts.Timing(\"test\", time.Second, 1.0, map[string]string{\"test\": \"test\"})\n\n\tsent := sender.GetSent()\n\tassert.Len(t, sent, 1)\n\tassert.Equal(t, \"test.test,test=test\", sent[0].Stat)\n\tassert.Equal(t, \"1000\", sent[0].Value)\n}\n\nfunc TestNewBufferedStatsd(t *testing.T) {\n\ts, err := NewBufferedStatsd(\"127.0.0.1:1234\", \"test\", nil)\n\tassert.NoError(t, err)\n\n\tassert.IsType(t, &BufferedStatsd{}, s)\n}\n\nfunc TestBufferedStatsd_Inc(t *testing.T) {\n\tsender := statsdtest.NewRecordingSender()\n\tclient, err := statsd.NewClientWithSender(sender, \"test\")\n\tassert.NoError(t, err)\n\n\ts := &BufferedStatsd{\n\t\tclient: client,\n\t}\n\n\ts.Inc(\"test\", 2, 1.0, map[string]string{\"test\": \"test\"})\n\n\tsent := sender.GetSent()\n\tassert.Len(t, sent, 1)\n\tassert.Equal(t, \"test.test,test=test\", sent[0].Stat)\n\tassert.Equal(t, \"2\", sent[0].Value)\n}\n\n\n\nfunc TestBufferedStatsd_Dec(t *testing.T) {\n\tsender := statsdtest.NewRecordingSender()\n\tclient, err := statsd.NewClientWithSender(sender, \"test\")\n\tassert.NoError(t, err)\n\n\ts := &BufferedStatsd{\n\t\tclient: client,\n\t}\n\n\ts.Dec(\"test\", 2, 1.0, map[string]string{\"test\": \"test\"})\n\n\tsent := sender.GetSent()\n\tassert.Len(t, sent, 1)\n\tassert.Equal(t, \"test.test,test=test\", sent[0].Stat)\n\tassert.Equal(t, \"-2\", sent[0].Value)\n}\n\nfunc TestBufferedStatsd_Gauge(t *testing.T) {\n\tsender := statsdtest.NewRecordingSender()\n\tclient, err := statsd.NewClientWithSender(sender, \"test\")\n\tassert.NoError(t, err)\n\n\ts := &BufferedStatsd{\n\t\tclient: client,\n\t}\n\n\ts.Gauge(\"test\", 2.0, 1.0, map[string]string{\"test\": \"test\"})\n\n\tsent := sender.GetSent()\n\tassert.Len(t, sent, 1)\n\tassert.Equal(t, \"test.test,test=test\", sent[0].Stat)\n\tassert.Equal(t, \"2\", sent[0].Value)\n}\n\nfunc TestBufferedStatsd_Timing(t *testing.T) {\n\tsender := statsdtest.NewRecordingSender()\n\tclient, err := statsd.NewClientWithSender(sender, \"test\")\n\tassert.NoError(t, err)\n\n\ts := &BufferedStatsd{\n\t\tclient: client,\n\t}\n\n\ts.Timing(\"test\", time.Second, 1.0, map[string]string{\"test\": \"test\"})\n\n\tsent := sender.GetSent()\n\tassert.Len(t, sent, 1)\n\tassert.Equal(t, \"test.test,test=test\", sent[0].Stat)\n\tassert.Equal(t, \"1000\", sent[0].Value)\n}\n\nfunc TestFormatTags(t *testing.T) {\n\ttags := map[string]string{\n\t\t\"test\": \"test\",\n\t\t\"foo\": \"bar\",\n\t}\n\n\tassert.Equal(t, \"\", formatTags(nil))\n\tassert.Equal(t, \"\", formatTags(map[string]string{}))\n\n\tgot := formatTags(tags)\n\tassert.Contains(t, got, \",test=test\")\n\tassert.Contains(t, got, \",foo=bar\")\n}\n<commit_msg>Formatting<commit_after>package stats\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/cactus\/go-statsd-client\/statsd\"\n\t\"github.com\/cactus\/go-statsd-client\/statsd\/statsdtest\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestNewStatsd(t *testing.T) {\n\ts, err := NewStatsd(\"127.0.0.1:1234\", \"test\")\n\tassert.NoError(t, err)\n\n\tassert.IsType(t, &Statsd{}, s)\n}\n\nfunc TestStatsd_Inc(t *testing.T) {\n\tsender := statsdtest.NewRecordingSender()\n\tclient, err := statsd.NewClientWithSender(sender, \"test\")\n\tassert.NoError(t, err)\n\n\ts := &Statsd{\n\t\tclient: client,\n\t}\n\n\ts.Inc(\"test\", 2, 1.0, map[string]string{\"test\": \"test\"})\n\n\tsent := sender.GetSent()\n\tassert.Len(t, sent, 1)\n\tassert.Equal(t, \"test.test,test=test\", sent[0].Stat)\n\tassert.Equal(t, \"2\", sent[0].Value)\n}\n\nfunc TestStatsd_Dec(t *testing.T) {\n\tsender := statsdtest.NewRecordingSender()\n\tclient, err := statsd.NewClientWithSender(sender, \"test\")\n\tassert.NoError(t, err)\n\n\ts := &Statsd{\n\t\tclient: client,\n\t}\n\n\ts.Dec(\"test\", 2, 1.0, map[string]string{\"test\": \"test\"})\n\n\tsent := sender.GetSent()\n\tassert.Len(t, sent, 1)\n\tassert.Equal(t, \"test.test,test=test\", sent[0].Stat)\n\tassert.Equal(t, \"-2\", sent[0].Value)\n}\n\nfunc TestStatsd_Gauge(t *testing.T) {\n\tsender := statsdtest.NewRecordingSender()\n\tclient, err := statsd.NewClientWithSender(sender, \"test\")\n\tassert.NoError(t, err)\n\n\ts := &Statsd{\n\t\tclient: client,\n\t}\n\n\ts.Gauge(\"test\", 2.0, 1.0, map[string]string{\"test\": \"test\"})\n\n\tsent := sender.GetSent()\n\tassert.Len(t, sent, 1)\n\tassert.Equal(t, \"test.test,test=test\", sent[0].Stat)\n\tassert.Equal(t, \"2\", sent[0].Value)\n}\n\nfunc TestStatsd_Timing(t *testing.T) {\n\tsender := statsdtest.NewRecordingSender()\n\tclient, err := statsd.NewClientWithSender(sender, \"test\")\n\tassert.NoError(t, err)\n\n\ts := &Statsd{\n\t\tclient: client,\n\t}\n\n\ts.Timing(\"test\", time.Second, 1.0, map[string]string{\"test\": \"test\"})\n\n\tsent := sender.GetSent()\n\tassert.Len(t, sent, 1)\n\tassert.Equal(t, \"test.test,test=test\", sent[0].Stat)\n\tassert.Equal(t, \"1000\", sent[0].Value)\n}\n\nfunc TestNewBufferedStatsd(t *testing.T) {\n\ts, err := NewBufferedStatsd(\"127.0.0.1:1234\", \"test\", nil)\n\tassert.NoError(t, err)\n\n\tassert.IsType(t, &BufferedStatsd{}, s)\n}\n\nfunc TestBufferedStatsd_Inc(t *testing.T) {\n\tsender := statsdtest.NewRecordingSender()\n\tclient, err := statsd.NewClientWithSender(sender, \"test\")\n\tassert.NoError(t, err)\n\n\ts := &BufferedStatsd{\n\t\tclient: client,\n\t}\n\n\ts.Inc(\"test\", 2, 1.0, map[string]string{\"test\": \"test\"})\n\n\tsent := sender.GetSent()\n\tassert.Len(t, sent, 1)\n\tassert.Equal(t, \"test.test,test=test\", sent[0].Stat)\n\tassert.Equal(t, \"2\", sent[0].Value)\n}\n\nfunc TestBufferedStatsd_Dec(t *testing.T) {\n\tsender := statsdtest.NewRecordingSender()\n\tclient, err := statsd.NewClientWithSender(sender, \"test\")\n\tassert.NoError(t, err)\n\n\ts := &BufferedStatsd{\n\t\tclient: client,\n\t}\n\n\ts.Dec(\"test\", 2, 1.0, map[string]string{\"test\": \"test\"})\n\n\tsent := sender.GetSent()\n\tassert.Len(t, sent, 1)\n\tassert.Equal(t, \"test.test,test=test\", sent[0].Stat)\n\tassert.Equal(t, \"-2\", sent[0].Value)\n}\n\nfunc TestBufferedStatsd_Gauge(t *testing.T) {\n\tsender := statsdtest.NewRecordingSender()\n\tclient, err := statsd.NewClientWithSender(sender, \"test\")\n\tassert.NoError(t, err)\n\n\ts := &BufferedStatsd{\n\t\tclient: client,\n\t}\n\n\ts.Gauge(\"test\", 2.0, 1.0, map[string]string{\"test\": \"test\"})\n\n\tsent := sender.GetSent()\n\tassert.Len(t, sent, 1)\n\tassert.Equal(t, \"test.test,test=test\", sent[0].Stat)\n\tassert.Equal(t, \"2\", sent[0].Value)\n}\n\nfunc TestBufferedStatsd_Timing(t *testing.T) {\n\tsender := statsdtest.NewRecordingSender()\n\tclient, err := statsd.NewClientWithSender(sender, \"test\")\n\tassert.NoError(t, err)\n\n\ts := &BufferedStatsd{\n\t\tclient: client,\n\t}\n\n\ts.Timing(\"test\", time.Second, 1.0, map[string]string{\"test\": \"test\"})\n\n\tsent := sender.GetSent()\n\tassert.Len(t, sent, 1)\n\tassert.Equal(t, \"test.test,test=test\", sent[0].Stat)\n\tassert.Equal(t, \"1000\", sent[0].Value)\n}\n\nfunc TestFormatTags(t *testing.T) {\n\ttags := map[string]string{\n\t\t\"test\": \"test\",\n\t\t\"foo\": \"bar\",\n\t}\n\n\tassert.Equal(t, \"\", formatTags(nil))\n\tassert.Equal(t, \"\", formatTags(map[string]string{}))\n\n\tgot := formatTags(tags)\n\tassert.Contains(t, got, \",test=test\")\n\tassert.Contains(t, got, \",foo=bar\")\n}\n<|endoftext|>"} {"text":"<commit_before>package testutil\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"veyron.io\/tools\/lib\/collect\"\n\t\"veyron.io\/tools\/lib\/envutil\"\n\t\"veyron.io\/tools\/lib\/util\"\n)\n\nconst (\n\tDefaultIntegrationTestTimeout = 2 * time.Minute\n)\n\n\/\/ binPackages enumerates the Go commands used by veyron integration tests.\n\/\/\n\/\/ TODO(jingjin): port the integration test scripts from shell to Go\n\/\/ and make them use a build cache to share binaries.\nvar binPackages = []string{\n\t\"veyron.io\/apps\/tunnel\/tunneld\",\n\t\"veyron.io\/apps\/tunnel\/vsh\",\n\t\"veyron.io\/playground\/builder\",\n\t\"veyron.io\/veyron\/veyron\/security\/agent\/agentd\",\n\t\"veyron.io\/veyron\/veyron\/security\/agent\/pingpong\",\n\t\"veyron.io\/veyron\/veyron\/services\/mgmt\/application\/applicationd\",\n\t\"veyron.io\/veyron\/veyron\/services\/mgmt\/binary\/binaryd\",\n\t\"veyron.io\/veyron\/veyron\/services\/mgmt\/build\/buildd\",\n\t\"veyron.io\/veyron\/veyron\/services\/mgmt\/profile\/profiled\",\n\t\"veyron.io\/veyron\/veyron\/services\/mounttable\/mounttabled\",\n\t\"veyron.io\/veyron\/veyron\/services\/proxy\/proxyd\",\n\t\"veyron.io\/veyron\/veyron\/tools\/application\",\n\t\"veyron.io\/veyron\/veyron\/tools\/binary\",\n\t\"veyron.io\/veyron\/veyron\/tools\/build\",\n\t\"veyron.io\/veyron\/veyron\/tools\/debug\",\n\t\"veyron.io\/veyron\/veyron\/tools\/mounttable\",\n\t\"veyron.io\/veyron\/veyron\/tools\/principal\",\n\t\"veyron.io\/veyron\/veyron\/tools\/profile\",\n\t\"veyron.io\/veyron\/veyron\/tools\/naming\/simulator\",\n\t\"veyron.io\/veyron\/veyron2\/vdl\/vdl\",\n\t\"veyron.io\/wspr\/veyron\/services\/wsprd\",\n}\n\n\/\/ buildBinaries builds Go binaries enumerated by the binPackages list.\nfunc buildBinaries(ctx *util.Context, testName string) (*TestResult, error) {\n\t\/\/ Create a pool of workers.\n\tfmt.Fprintf(ctx.Stdout(), \"building binaries...\\n\")\n\tnumPkgs := len(binPackages)\n\ttasks := make(chan string, numPkgs)\n\ttaskResults := make(chan buildResult, numPkgs)\n\tfor i := 0; i < runtime.NumCPU(); i++ {\n\t\tgo buildWorker(nil, tasks, taskResults)\n\t}\n\n\t\/\/ Distribute work to workers.\n\tfor _, pkg := range binPackages {\n\t\ttasks <- pkg\n\t}\n\tclose(tasks)\n\n\t\/\/ Collect the results.\n\tallPassed, suites := true, []testSuite{}\n\tfor i := 0; i < numPkgs; i++ {\n\t\tresult := <-taskResults\n\t\ts := testSuite{Name: result.pkg}\n\t\tc := testCase{\n\t\t\tClassname: result.pkg,\n\t\t\tName: \"Build\",\n\t\t\tTime: fmt.Sprintf(\"%.2f\", result.time.Seconds()),\n\t\t}\n\t\tif result.status != buildPassed {\n\t\t\tFail(ctx, \"%s\\n%v\\n\", result.pkg, result.output)\n\t\t\tf := testFailure{\n\t\t\t\tMessage: \"build\",\n\t\t\t\tData: result.output,\n\t\t\t}\n\t\t\tc.Failures = append(c.Failures, f)\n\t\t\tallPassed = false\n\t\t\ts.Failures++\n\t\t} else {\n\t\t\tPass(ctx, \"%s\\n\", result.pkg)\n\t\t}\n\t\ts.Tests++\n\t\ts.Cases = append(s.Cases, c)\n\t\tsuites = append(suites, s)\n\t}\n\n\t\/\/ Create the xUnit report.\n\tclose(taskResults)\n\tif err := createXUnitReport(ctx, testName, suites); err != nil {\n\t\treturn nil, err\n\t}\n\tif !allPassed {\n\t\treturn &TestResult{Status: TestFailed}, nil\n\t}\n\treturn &TestResult{Status: TestPassed}, nil\n}\n\n\/\/ findIntegrationTests finds all test.sh or testdata\/integration_test.go files\n\/\/ from the given root dirs.\nfunc findIntegrationTests(ctx *util.Context, rootDirs []string) []string {\n\tif ctx.DryRun() {\n\t\t\/\/ In \"dry run\" mode, no test scripts are executed.\n\t\treturn nil\n\t}\n\tmatchedFiles := []string{}\n\tfor _, rootDir := range rootDirs {\n\t\tfilepath.Walk(rootDir, func(path string, info os.FileInfo, err error) error {\n\t\t\t\/\/ TODO(sjr): remove \/test.sh check once support for shell-based integration tests is removed.\n\t\t\tif strings.HasSuffix(path, string(os.PathSeparator)+\"test.sh\") || strings.HasSuffix(path, filepath.Join(\"testdata\", \"integration_test.go\")) {\n\t\t\t\tmatchedFiles = append(matchedFiles, path)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\treturn matchedFiles\n}\n\n\/\/ runIntegrationTests runs all integration tests found under\n\/\/ $VEYRON_ROOT\/roadmap\/go\/src and $VEYRON_ROOT\/veyron\/go\/src.\nfunc runIntegrationTests(ctx *util.Context, testName string) (*TestResult, error) {\n\troot, err := util.VeyronRoot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Find all integration tests.\n\ttestScripts := findIntegrationTests(ctx, []string{\n\t\tfilepath.Join(root, \"veyron\", \"go\", \"src\"),\n\t\tfilepath.Join(root, \"roadmap\", \"go\", \"src\"),\n\t\tfilepath.Join(root, \"scripts\"),\n\t})\n\n\t\/\/ Create a worker pool to run tests in parallel, passing the\n\t\/\/ location of binaries through shell_test_BIN_DIR.\n\tfmt.Fprintf(ctx.Stdout(), \"running tests...\\n\")\n\tnumTests := len(testScripts)\n\ttasks := make(chan string, numTests)\n\ttaskResults := make(chan testResult, numTests)\n\tenv := envutil.NewSnapshotFromOS()\n\tenv.Set(\"shell_test_BIN_DIR\", binDirPath())\n\tenv.Set(\"VEYRON_INTEGRATION_BIN_DIR\", binDirPath())\n\tfor i := 0; i < runtime.NumCPU(); i++ {\n\t\tgo integrationTestWorker(root, env.Map(), tasks, taskResults)\n\t}\n\n\t\/\/ Send test scripts to free workers in the pool.\n\tfor _, testScript := range testScripts {\n\t\ttasks <- testScript\n\t}\n\tclose(tasks)\n\n\t\/\/ Collect the results.\n\tallPassed, suites := true, []testSuite{}\n\tfor i := 0; i < numTests; i++ {\n\t\tresult := <-taskResults\n\t\ts := testSuite{Name: result.pkg}\n\t\tc := testCase{\n\t\t\tClassname: result.pkg,\n\t\t\tName: \"IntegrationTest\",\n\t\t\tTime: fmt.Sprintf(\"%.2f\", result.time.Seconds()),\n\t\t}\n\t\tswitch result.status {\n\t\tcase testFailed:\n\t\t\tFail(ctx, \"%s\\n%v\\n\", result.pkg, result.output)\n\t\t\tf := testFailure{\n\t\t\t\tMessage: \"test\",\n\t\t\t\tData: result.output,\n\t\t\t}\n\t\t\tc.Failures = append(c.Failures, f)\n\t\t\tallPassed = false\n\t\t\ts.Failures++\n\t\tcase testPassed:\n\t\t\tPass(ctx, \"%s\\n\", result.pkg)\n\t\t}\n\t\ts.Tests++\n\t\ts.Cases = append(s.Cases, c)\n\t\tsuites = append(suites, s)\n\t}\n\tclose(taskResults)\n\n\t\/\/ Create the xUnit report.\n\tif err := createXUnitReport(ctx, testName, suites); err != nil {\n\t\treturn nil, err\n\t}\n\tif !allPassed {\n\t\treturn &TestResult{Status: TestFailed}, nil\n\t}\n\treturn &TestResult{Status: TestPassed}, nil\n}\n\n\/\/ integrationTestWorker receives tasks from the <tasks> channel, runs\n\/\/ them, and sends results to the <results> channel.\nfunc integrationTestWorker(root string, env map[string]string, tasks <-chan string, results chan<- testResult) {\n\tvar out bytes.Buffer\n\tctx := util.NewContext(env, os.Stdin, &out, &out, false, false, false)\n\tfor script := range tasks {\n\t\tstart := time.Now()\n\t\tvar args []string\n\t\tpkgName := strings.TrimPrefix(path.Dir(script), root)\n\t\tif index := strings.Index(pkgName, \"veyron.io\"); index != -1 {\n\t\t\tpkgName = pkgName[index:]\n\t\t}\n\t\tresult := testResult{}\n\t\tswitch {\n\t\tcase strings.HasSuffix(script, \".go\"):\n\t\t\tresult.pkg = \"go.\" + pkgName\n\t\t\targs = []string{\"go\", \"test\", script}\n\t\tcase strings.HasSuffix(script, \".sh\"):\n\t\t\tresult.pkg = \"shell.\" + pkgName\n\t\t\targs = []string{\"run\", \"bash\", \"-x\", script}\n\t\tdefault:\n\t\t\tfmt.Fprintf(os.Stderr, \"unsupported type of integration test: %v\\n\", script)\n\t\t\tcontinue\n\t\t}\n\t\terr := ctx.Run().TimedCommand(DefaultIntegrationTestTimeout, \"veyron\", args...)\n\t\tresult.time = time.Now().Sub(start)\n\t\tresult.output = out.String()\n\t\tif err != nil {\n\t\t\tresult.status = testFailed\n\t\t} else {\n\t\t\tresult.status = testPassed\n\t\t}\n\t\tresults <- result\n\t\tout.Reset()\n\t}\n}\n\n\/\/ VeyronIntegrationTest runs veyron integration tests.\nfunc VeyronIntegrationTest(ctx *util.Context, testName string) (_ *TestResult, e error) {\n\t\/\/ Initialize the test.\n\tcleanup, err := initTest(ctx, testName, []string{\"web\"})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\t\/\/ Build all Go binaries used in intergartion test scripts and\n\t\/\/ then run the integration tests. We pre-build the binaries\n\t\/\/ used by multiple test scripts to speed things up.\n\tif ctx.DryRun() {\n\t\tbinPackages = nil\n\t}\n\tresult, err := buildBinaries(ctx, testName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif result.Status == TestFailed {\n\t\treturn result, nil\n\t}\n\tresult, err = runIntegrationTests(ctx, testName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result, nil\n}\n<commit_msg>lib\/testutil\/integration.go: Run one test at a time<commit_after>package testutil\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"veyron.io\/tools\/lib\/collect\"\n\t\"veyron.io\/tools\/lib\/envutil\"\n\t\"veyron.io\/tools\/lib\/util\"\n)\n\nconst (\n\tDefaultIntegrationTestTimeout = 2 * time.Minute\n)\n\n\/\/ binPackages enumerates the Go commands used by veyron integration tests.\n\/\/\n\/\/ TODO(jingjin): port the integration test scripts from shell to Go\n\/\/ and make them use a build cache to share binaries.\nvar binPackages = []string{\n\t\"veyron.io\/apps\/tunnel\/tunneld\",\n\t\"veyron.io\/apps\/tunnel\/vsh\",\n\t\"veyron.io\/playground\/builder\",\n\t\"veyron.io\/veyron\/veyron\/security\/agent\/agentd\",\n\t\"veyron.io\/veyron\/veyron\/security\/agent\/pingpong\",\n\t\"veyron.io\/veyron\/veyron\/services\/mgmt\/application\/applicationd\",\n\t\"veyron.io\/veyron\/veyron\/services\/mgmt\/binary\/binaryd\",\n\t\"veyron.io\/veyron\/veyron\/services\/mgmt\/build\/buildd\",\n\t\"veyron.io\/veyron\/veyron\/services\/mgmt\/profile\/profiled\",\n\t\"veyron.io\/veyron\/veyron\/services\/mounttable\/mounttabled\",\n\t\"veyron.io\/veyron\/veyron\/services\/proxy\/proxyd\",\n\t\"veyron.io\/veyron\/veyron\/tools\/application\",\n\t\"veyron.io\/veyron\/veyron\/tools\/binary\",\n\t\"veyron.io\/veyron\/veyron\/tools\/build\",\n\t\"veyron.io\/veyron\/veyron\/tools\/debug\",\n\t\"veyron.io\/veyron\/veyron\/tools\/mounttable\",\n\t\"veyron.io\/veyron\/veyron\/tools\/principal\",\n\t\"veyron.io\/veyron\/veyron\/tools\/profile\",\n\t\"veyron.io\/veyron\/veyron\/tools\/naming\/simulator\",\n\t\"veyron.io\/veyron\/veyron2\/vdl\/vdl\",\n\t\"veyron.io\/wspr\/veyron\/services\/wsprd\",\n}\n\n\/\/ buildBinaries builds Go binaries enumerated by the binPackages list.\nfunc buildBinaries(ctx *util.Context, testName string) (*TestResult, error) {\n\t\/\/ Create a pool of workers.\n\tfmt.Fprintf(ctx.Stdout(), \"building binaries...\\n\")\n\tnumPkgs := len(binPackages)\n\ttasks := make(chan string, numPkgs)\n\ttaskResults := make(chan buildResult, numPkgs)\n\tfor i := 0; i < runtime.NumCPU(); i++ {\n\t\tgo buildWorker(nil, tasks, taskResults)\n\t}\n\n\t\/\/ Distribute work to workers.\n\tfor _, pkg := range binPackages {\n\t\ttasks <- pkg\n\t}\n\tclose(tasks)\n\n\t\/\/ Collect the results.\n\tallPassed, suites := true, []testSuite{}\n\tfor i := 0; i < numPkgs; i++ {\n\t\tresult := <-taskResults\n\t\ts := testSuite{Name: result.pkg}\n\t\tc := testCase{\n\t\t\tClassname: result.pkg,\n\t\t\tName: \"Build\",\n\t\t\tTime: fmt.Sprintf(\"%.2f\", result.time.Seconds()),\n\t\t}\n\t\tif result.status != buildPassed {\n\t\t\tFail(ctx, \"%s\\n%v\\n\", result.pkg, result.output)\n\t\t\tf := testFailure{\n\t\t\t\tMessage: \"build\",\n\t\t\t\tData: result.output,\n\t\t\t}\n\t\t\tc.Failures = append(c.Failures, f)\n\t\t\tallPassed = false\n\t\t\ts.Failures++\n\t\t} else {\n\t\t\tPass(ctx, \"%s\\n\", result.pkg)\n\t\t}\n\t\ts.Tests++\n\t\ts.Cases = append(s.Cases, c)\n\t\tsuites = append(suites, s)\n\t}\n\n\t\/\/ Create the xUnit report.\n\tclose(taskResults)\n\tif err := createXUnitReport(ctx, testName, suites); err != nil {\n\t\treturn nil, err\n\t}\n\tif !allPassed {\n\t\treturn &TestResult{Status: TestFailed}, nil\n\t}\n\treturn &TestResult{Status: TestPassed}, nil\n}\n\n\/\/ findIntegrationTests finds all test.sh or testdata\/integration_test.go files\n\/\/ from the given root dirs.\nfunc findIntegrationTests(ctx *util.Context, rootDirs []string) []string {\n\tif ctx.DryRun() {\n\t\t\/\/ In \"dry run\" mode, no test scripts are executed.\n\t\treturn nil\n\t}\n\tmatchedFiles := []string{}\n\tfor _, rootDir := range rootDirs {\n\t\tfilepath.Walk(rootDir, func(path string, info os.FileInfo, err error) error {\n\t\t\t\/\/ TODO(sjr): remove \/test.sh check once support for shell-based integration tests is removed.\n\t\t\tif strings.HasSuffix(path, string(os.PathSeparator)+\"test.sh\") || strings.HasSuffix(path, filepath.Join(\"testdata\", \"integration_test.go\")) {\n\t\t\t\tmatchedFiles = append(matchedFiles, path)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\treturn matchedFiles\n}\n\n\/\/ runIntegrationTests runs all integration tests found under\n\/\/ $VEYRON_ROOT\/roadmap\/go\/src and $VEYRON_ROOT\/veyron\/go\/src.\nfunc runIntegrationTests(ctx *util.Context, testName string) (*TestResult, error) {\n\troot, err := util.VeyronRoot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Find all integration tests.\n\ttestScripts := findIntegrationTests(ctx, []string{\n\t\tfilepath.Join(root, \"veyron\", \"go\", \"src\"),\n\t\tfilepath.Join(root, \"roadmap\", \"go\", \"src\"),\n\t\tfilepath.Join(root, \"scripts\"),\n\t})\n\n\t\/\/ Create a worker pool to run tests in parallel, passing the\n\t\/\/ location of binaries through shell_test_BIN_DIR.\n\tfmt.Fprintf(ctx.Stdout(), \"running tests...\\n\")\n\tnumTests := len(testScripts)\n\ttasks := make(chan string, numTests)\n\ttaskResults := make(chan testResult, numTests)\n\tenv := envutil.NewSnapshotFromOS()\n\tenv.Set(\"shell_test_BIN_DIR\", binDirPath())\n\tenv.Set(\"VEYRON_INTEGRATION_BIN_DIR\", binDirPath())\n\t\/\/ TODO(rthellend): When we run these tests in parallel, some of them\n\t\/\/ appear to hang after completing successfully. For now, only run one\n\t\/\/ at a time to confirm.\n\t\/\/for i := 0; i < runtime.NumCPU(); i++ {\n\tgo integrationTestWorker(root, env.Map(), tasks, taskResults)\n\t\/\/}\n\n\t\/\/ Send test scripts to free workers in the pool.\n\tfor _, testScript := range testScripts {\n\t\ttasks <- testScript\n\t}\n\tclose(tasks)\n\n\t\/\/ Collect the results.\n\tallPassed, suites := true, []testSuite{}\n\tfor i := 0; i < numTests; i++ {\n\t\tresult := <-taskResults\n\t\ts := testSuite{Name: result.pkg}\n\t\tc := testCase{\n\t\t\tClassname: result.pkg,\n\t\t\tName: \"IntegrationTest\",\n\t\t\tTime: fmt.Sprintf(\"%.2f\", result.time.Seconds()),\n\t\t}\n\t\tswitch result.status {\n\t\tcase testFailed:\n\t\t\tFail(ctx, \"%s\\n%v\\n\", result.pkg, result.output)\n\t\t\tf := testFailure{\n\t\t\t\tMessage: \"test\",\n\t\t\t\tData: result.output,\n\t\t\t}\n\t\t\tc.Failures = append(c.Failures, f)\n\t\t\tallPassed = false\n\t\t\ts.Failures++\n\t\tcase testPassed:\n\t\t\tPass(ctx, \"%s\\n\", result.pkg)\n\t\t}\n\t\ts.Tests++\n\t\ts.Cases = append(s.Cases, c)\n\t\tsuites = append(suites, s)\n\t}\n\tclose(taskResults)\n\n\t\/\/ Create the xUnit report.\n\tif err := createXUnitReport(ctx, testName, suites); err != nil {\n\t\treturn nil, err\n\t}\n\tif !allPassed {\n\t\treturn &TestResult{Status: TestFailed}, nil\n\t}\n\treturn &TestResult{Status: TestPassed}, nil\n}\n\n\/\/ integrationTestWorker receives tasks from the <tasks> channel, runs\n\/\/ them, and sends results to the <results> channel.\nfunc integrationTestWorker(root string, env map[string]string, tasks <-chan string, results chan<- testResult) {\n\tvar out bytes.Buffer\n\tctx := util.NewContext(env, os.Stdin, &out, &out, false, false, false)\n\tfor script := range tasks {\n\t\tstart := time.Now()\n\t\tvar args []string\n\t\tpkgName := strings.TrimPrefix(path.Dir(script), root)\n\t\tif index := strings.Index(pkgName, \"veyron.io\"); index != -1 {\n\t\t\tpkgName = pkgName[index:]\n\t\t}\n\t\tresult := testResult{}\n\t\tswitch {\n\t\tcase strings.HasSuffix(script, \".go\"):\n\t\t\tresult.pkg = \"go.\" + pkgName\n\t\t\targs = []string{\"go\", \"test\", script}\n\t\tcase strings.HasSuffix(script, \".sh\"):\n\t\t\tresult.pkg = \"shell.\" + pkgName\n\t\t\targs = []string{\"run\", \"bash\", \"-x\", script}\n\t\tdefault:\n\t\t\tfmt.Fprintf(os.Stderr, \"unsupported type of integration test: %v\\n\", script)\n\t\t\tcontinue\n\t\t}\n\t\terr := ctx.Run().TimedCommand(DefaultIntegrationTestTimeout, \"veyron\", args...)\n\t\tresult.time = time.Now().Sub(start)\n\t\tresult.output = out.String()\n\t\tif err != nil {\n\t\t\tresult.status = testFailed\n\t\t} else {\n\t\t\tresult.status = testPassed\n\t\t}\n\t\tresults <- result\n\t\tout.Reset()\n\t}\n}\n\n\/\/ VeyronIntegrationTest runs veyron integration tests.\nfunc VeyronIntegrationTest(ctx *util.Context, testName string) (_ *TestResult, e error) {\n\t\/\/ Initialize the test.\n\tcleanup, err := initTest(ctx, testName, []string{\"web\"})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\t\/\/ Build all Go binaries used in intergartion test scripts and\n\t\/\/ then run the integration tests. We pre-build the binaries\n\t\/\/ used by multiple test scripts to speed things up.\n\tif ctx.DryRun() {\n\t\tbinPackages = nil\n\t}\n\tresult, err := buildBinaries(ctx, testName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif result.Status == TestFailed {\n\t\treturn result, nil\n\t}\n\tresult, err = runIntegrationTests(ctx, testName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package sshego\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"golang.org\/x\/crypto\/ssh\"\n)\n\n\/\/ KnownHosts represents in Hosts a hash map of host identifier (ip or name)\n\/\/ and the corresponding public key for the server. It corresponds to the\n\/\/ ~\/.ssh\/known_hosts file.\ntype KnownHosts struct {\n\tHosts map[string]*ServerPubKey\n\tcurHost *ServerPubKey\n\tcurStatus HostState\n\n\t\/\/ FilepathPrefix doesn't have the .json.snappy suffix on it.\n\tFilepathPrefix string\n\n\tPersistFormatSuffix string\n\n\t\/\/ PersistFormat is the format indicator\n\tPersistFormat KnownHostsPersistFormat\n\n\t\/\/ NoSave means we don't touch the files we read from\n\tNoSave bool\n}\n\n\/\/ ServerPubKey stores the RSA public keys for a particular known server. This\n\/\/ structure is stored in KnownHosts.Hosts.\ntype ServerPubKey struct {\n\tHostname string\n\n\t\/\/ HumanKey is a serialized and readable version of Key, the key for Hosts map in KnownHosts.\n\tHumanKey string\n\tServerBanned bool\n\t\/\/OurAcctKeyPair ssh.Signer\n\n\tremote net.Addr \/\/ unmarshalled form of Hostname\n\n\t\/\/key ssh.PublicKey \/\/ unmarshalled form of HumanKey\n\n\t\/\/ reading ~\/.ssh\/known_hosts\n\tMarkers string\n\tHostnames string\n\tSplitHostnames map[string]bool\n\tKeytype string\n\tBase64EncodededPublicKey string\n\tComment string\n\tPort string\n\tLineInFileOneBased int\n\n\t\/\/ if AlreadySaved, then we don't need to append.\n\tAlreadySaved bool\n}\n\ntype KnownHostsPersistFormat int\n\nconst (\n\tKHJson KnownHostsPersistFormat = 0\n\tKHGob KnownHostsPersistFormat = 1\n\tKHSsh KnownHostsPersistFormat = 2\n)\n\n\/\/ NewKnownHosts creats a new KnownHosts structure.\n\/\/ filepathPrefix does not include the\n\/\/ PersistFormat suffix. If filepathPrefix + defaultFileFormat()\n\/\/ exists as a file on disk, then we read the\n\/\/ contents of that file into the new KnownHosts.\n\/\/\n\/\/ The returned KnownHosts will remember the\n\/\/ filepathPrefix for future saves.\n\/\/\nfunc NewKnownHosts(filepath string, format KnownHostsPersistFormat) (*KnownHosts, error) {\n\tp(\"NewKnownHosts called, with filepath = '%s', format='%v'\", filepath, format)\n\n\th := &KnownHosts{\n\t\tPersistFormat: format,\n\t}\n\n\th.FilepathPrefix = filepath\n\tfn := filepath\n\tswitch format {\n\tcase KHJson:\n\t\th.PersistFormatSuffix = \".json.snappy\"\n\tcase KHGob:\n\t\th.PersistFormatSuffix = \".gob.snappy\"\n\t}\n\tfn += h.PersistFormatSuffix\n\n\tvar err error\n\tif fileExists(fn) {\n\t\tfmt.Printf(\"fn '%s' exists in NewKnownHosts(). format = %v\\n\", fn, format)\n\n\t\tswitch format {\n\t\tcase KHJson:\n\t\t\terr = h.readJSONSnappy(fn)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase KHGob:\n\t\t\terr = h.readGobSnappy(fn)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase KHSsh:\n\t\t\th, err = LoadSshKnownHosts(fn)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unknown persistence format: %v\", format)\n\t\t}\n\n\t\t\/\/fmt.Printf(\"after reading from file, h = '%#v'\\n\", h)\n\n\t} else {\n\t\tfmt.Printf(\"fn '%s' does not exist already in NewKnownHosts()\\n\", fn)\n\t\tfmt.Printf(\"making h.Hosts in NewKnownHosts()\\n\")\n\t\th.Hosts = make(map[string]*ServerPubKey)\n\t}\n\n\treturn h, nil\n}\n\n\/\/ KnownHostsEqual compares two instances of KnownHosts structures for equality.\nfunc KnownHostsEqual(a, b *KnownHosts) (bool, error) {\n\tfor k, v := range a.Hosts {\n\t\tv2, ok := b.Hosts[k]\n\t\tif !ok {\n\t\t\treturn false, fmt.Errorf(\"KnownHostsEqual detected difference at key '%s': a.Hosts had this key, but b.Hosts did not have this key\", k)\n\t\t}\n\t\tif v.HumanKey != v2.HumanKey {\n\t\t\treturn false, fmt.Errorf(\"KnownHostsEqual detected difference at key '%s': a.HumanKey = '%s' but b.HumanKey = '%s'\", k, v.HumanKey, v2.HumanKey)\n\t\t}\n\t\tif v.Hostname != v2.Hostname {\n\t\t\treturn false, fmt.Errorf(\"KnownHostsEqual detected difference at key '%s': a.Hostname = '%s' but b.Hostname = '%s'\", k, v.Hostname, v2.Hostname)\n\t\t}\n\t\tif v.ServerBanned != v2.ServerBanned {\n\t\t\treturn false, fmt.Errorf(\"KnownHostsEqual detected difference at key '%s': a.ServerBanned = '%v' but b.ServerBanned = '%v'\", k, v.ServerBanned, v2.ServerBanned)\n\t\t}\n\t}\n\tfor k := range b.Hosts {\n\t\t_, ok := a.Hosts[k]\n\t\tif !ok {\n\t\t\treturn false, fmt.Errorf(\"KnownHostsEqual detected difference at key '%s': b.Hosts had this key, but a.Hosts did not have this key\", k)\n\t\t}\n\t}\n\treturn true, nil\n}\n\n\/\/ Sync writes the contents of the KnownHosts structure to the\n\/\/ file h.FilepathPrefix + h.PersistFormat (for json\/gob); to\n\/\/ just h.FilepathPrefix for \"ssh_known_hosts\" format.\nfunc (h *KnownHosts) Sync() (err error) {\n\tfn := h.FilepathPrefix + h.PersistFormatSuffix\n\tswitch h.PersistFormat {\n\tcase KHJson:\n\t\terr = h.saveJSONSnappy(fn)\n\t\tpanicOn(err)\n\tcase KHGob:\n\t\terr = h.saveGobSnappy(fn)\n\t\tpanicOn(err)\n\tcase KHSsh:\n\t\terr = h.saveSshKnownHosts()\n\t\tpanicOn(err)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown persistence format: %v\", h.PersistFormat))\n\t}\n\treturn\n}\n\n\/\/ Close cleans up and prepares for shutdown. It calls h.Sync() to write\n\/\/ the state to disk.\nfunc (h *KnownHosts) Close() {\n\th.Sync()\n}\n\n\/\/ LoadSshKnownHosts reads a ~\/.ssh\/known_hosts style\n\/\/ file from path, see the SSH_KNOWN_HOSTS FILE FORMAT\n\/\/ section of http:\/\/manpages.ubuntu.com\/manpages\/zesty\/en\/man8\/sshd.8.html\n\/\/ or the local sshd(8) man page.\nfunc LoadSshKnownHosts(path string) (*KnownHosts, error) {\n\t\/\/pp(\"top of LoadSshKnownHosts for path = '%s'\", path)\n\n\th := &KnownHosts{\n\t\tHosts: make(map[string]*ServerPubKey),\n\t\tFilepathPrefix: path,\n\t\tPersistFormat: KHSsh,\n\t}\n\n\tif !fileExists(path) {\n\t\treturn nil, fmt.Errorf(\"path '%s' does not exist\", path)\n\t}\n\n\tby, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkillRightBracket := strings.NewReplacer(\"]\", \"\")\n\n\tlines := strings.Split(string(by), \"\\n\")\n\tfor i := range lines {\n\t\tline := strings.Trim(lines[i], \" \")\n\t\t\/\/ skip comments\n\t\tif line == \"\" || line[0] == '#' {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ skip hashed hostnames\n\t\tif line[0] == '|' {\n\t\t\tcontinue\n\t\t}\n\t\tsplt := strings.Split(line, \" \")\n\t\t\/\/pp(\"for line i = %v, splt = %#v\\n\", i, splt)\n\t\tn := len(splt)\n\t\tif n < 3 || n > 5 {\n\t\t\treturn nil, fmt.Errorf(\"known_hosts file '%s' did not have 3\/4\/5 fields on line %v: '%s'\", path, i+1, lines[i])\n\t\t}\n\t\tb := 0\n\t\tmarkers := \"\"\n\t\tif splt[0][0] == '@' {\n\t\t\tmarkers = splt[0]\n\t\t\tb = 1\n\t\t\tif strings.Contains(markers, \"@revoked\") {\n\t\t\t\tlog.Printf(\"ignoring @revoked host key at line %v of path '%s': '%s'\", i+1, path, lines[i])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif strings.Contains(markers, \"@cert-authority\") {\n\t\t\t\tlog.Printf(\"ignoring @cert-authority host key at line %v of path '%s': '%s'\", i+1, path, lines[i])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tcomment := \"\"\n\t\tif b+3 < n {\n\t\t\tcomment = splt[b+3]\n\t\t}\n\t\tpubkey := ServerPubKey{\n\t\t\tMarkers: markers,\n\t\t\tHostnames: splt[b],\n\t\t\tKeytype: splt[b+1],\n\t\t\tBase64EncodededPublicKey: splt[b+2],\n\t\t\tComment: comment,\n\t\t\tPort: \"22\",\n\t\t\tSplitHostnames: make(map[string]bool),\n\t\t}\n\t\thosts := strings.Split(pubkey.Hostnames, \",\")\n\n\t\t\/\/ 2 passes: first fill all the SplitHostnames, then each indiv.\n\n\t\t\/\/ a) fill all the SplitHostnames\n\t\tfor k := range hosts {\n\t\t\thst := hosts[k]\n\t\t\t\/\/pp(\"processing hst = '%s'\\n\", hst)\n\t\t\tif hst[0] == '[' {\n\t\t\t\thst = hst[1:]\n\t\t\t\thst = killRightBracket.Replace(hst)\n\t\t\t\t\/\/pp(\"after killing [], hst = '%s'\\n\", hst)\n\t\t\t}\n\t\t\thostport := strings.Split(hst, \":\")\n\t\t\t\/\/pp(\"hostport = '%#v'\\n\", hostport)\n\t\t\tif len(hostport) > 1 {\n\t\t\t\thst = hostport[0]\n\t\t\t\tpubkey.Port = hostport[1]\n\t\t\t}\n\t\t\tpubkey.Hostname = hst + \":\" + pubkey.Port\n\t\t\tpubkey.SplitHostnames[pubkey.Hostname] = true\n\t\t}\n\n\t\t\/\/ b) each individual name\n\t\tfor k := range hosts {\n\n\t\t\t\/\/ copy pubkey so we can modify\n\t\t\tourpubkey := pubkey\n\n\t\t\thst := hosts[k]\n\t\t\t\/\/pp(\"processing hst = '%s'\\n\", hst)\n\t\t\tif hst[0] == '[' {\n\t\t\t\thst = hst[1:]\n\t\t\t\thst = killRightBracket.Replace(hst)\n\t\t\t\t\/\/pp(\"after killing [], hst = '%s'\\n\", hst)\n\t\t\t}\n\t\t\thostport := strings.Split(hst, \":\")\n\t\t\t\/\/p(\"hostport = '%#v'\\n\", hostport)\n\t\t\tif len(hostport) > 1 {\n\t\t\t\thst = hostport[0]\n\t\t\t\tourpubkey.Port = hostport[1]\n\t\t\t}\n\t\t\tourpubkey.Hostname = hst + \":\" + ourpubkey.Port\n\n\t\t\t\/\/ unbase64 the public key to get []byte, then string() that\n\t\t\t\/\/ to get the key of h.Hosts\n\t\t\tpub := []byte(ourpubkey.Base64EncodededPublicKey)\n\t\t\texpandedMaxSize := base64.StdEncoding.DecodedLen(len(pub))\n\t\t\texpand := make([]byte, expandedMaxSize)\n\t\t\tn, err := base64.StdEncoding.Decode(expand, []byte(ourpubkey.Base64EncodededPublicKey))\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"warning: ignoring entry in known_hosts file '%s' on line %v: '%s' we find the following error: could not base64 decode the public key field. detailed error: '%s'\", path, i+1, lines[i], err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\texpand = expand[:n]\n\n\t\t\txkey, err := ssh.ParsePublicKey(expand)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"warning: ignoring entry in known_hosts file '%s' on line %v: '%s' we find the following error: could not ssh.ParsePublicKey(). detailed error: '%s'\", path, i+1, lines[i], err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tse := string(ssh.MarshalAuthorizedKey(xkey))\n\n\t\t\tourpubkey.LineInFileOneBased = i + 1\n\t\t\t\/* don't resolve now, this may be slow:\n\t\t\tourpubkey.remote, err = net.ResolveTCPAddr(\"tcp\", ourpubkey.Hostname+\":\"+ourpubkey.Port)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"warning: ignoring entry known_hosts file '%s' on line %v: '%s' we find the following error: could not resolve the hostname '%s'. detailed error: '%s'\", path, i+1, lines[i], ourpubkey.Hostname, err)\n\t\t\t}\n\t\t\t*\/\n\t\t\tourpubkey.AlreadySaved = true\n\t\t\tourpubkey.HumanKey = se\n\t\t\t\/\/ check for existing that we need to combine...\n\t\t\tprior, already := h.Hosts[se]\n\t\t\tif !already {\n\t\t\t\th.Hosts[se] = &ourpubkey\n\t\t\t\t\/\/pp(\"saved known hosts: key '%s' -> value: %#v\\n\", se, ourpubkey)\n\t\t\t} else {\n\t\t\t\t\/\/ need to combine under this key...\n\t\t\t\t\/\/pp(\"have prior entry for se='%s': %#v\\n\", se, prior)\n\t\t\t\tprior.AddHostPort(ourpubkey.Hostname)\n\t\t\t\tprior.AlreadySaved = true \/\/ reading from file, all are saved already.\n\t\t\t}\n\t\t}\n\t}\n\n\treturn h, nil\n}\n\nfunc (s *KnownHosts) saveSshKnownHosts() error {\n\n\tif s.NoSave {\n\t\treturn nil\n\t}\n\n\tfn := s.FilepathPrefix\n\n\t\/\/ backups\n\texec.Command(\"mv\", fn+\".prev\", fn+\".prev.prev\").Run()\n\texec.Command(\"cp\", \"-p\", fn, fn+\".prev\").Run()\n\n\tf, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not open file '%s' for appending: '%s'\", fn, err)\n\t}\n\tdefer f.Close()\n\n\tfor _, v := range s.Hosts {\n\t\tif v.AlreadySaved {\n\t\t\tcontinue\n\t\t}\n\n\t\thostname := \"\"\n\t\tif len(v.SplitHostnames) == 1 {\n\t\t\thn := v.Hostname\n\t\t\thp := strings.Split(hn, \":\")\n\t\t\t\/\/pp(\"hn='%v', hp='%#v'\", hn, hp)\n\t\t\tif hp[1] != \"22\" {\n\t\t\t\thn = \"[\" + hp[0] + \"]:\" + hp[1]\n\t\t\t}\n\t\t\thostname = hn\n\t\t} else {\n\t\t\t\/\/ put all hostnames under this one key.\n\t\t\tk := 0\n\t\t\tfor tmp := range v.SplitHostnames {\n\t\t\t\thp := strings.Split(tmp, \":\")\n\t\t\t\tif len(hp) != 2 {\n\t\t\t\t\tpanic(fmt.Sprintf(\"must be 2 parts here, but we got '%s'\", tmp))\n\t\t\t\t}\n\t\t\t\thn := \"[\" + hp[0] + \"]:\" + hp[1]\n\t\t\t\tif k == 0 {\n\t\t\t\t\thostname = hn\n\t\t\t\t} else {\n\t\t\t\t\thostname += \",\" + hn\n\t\t\t\t}\n\t\t\t\tk++\n\t\t\t}\n\t\t}\n\n\t\t_, err = fmt.Fprintf(f, \"%s %s %s %s\\n\",\n\t\t\thostname,\n\t\t\tv.Keytype,\n\t\t\tv.Base64EncodededPublicKey,\n\t\t\tv.Comment)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not append to file '%s': '%s'\", fn, err)\n\t\t}\n\t\tv.AlreadySaved = true\n\t}\n\n\treturn nil\n}\n\nfunc base64ofPublicKey(key ssh.PublicKey) string {\n\tb := &bytes.Buffer{}\n\te := base64.NewEncoder(base64.StdEncoding, b)\n\te.Write(key.Marshal())\n\te.Close()\n\treturn b.String()\n\n}\n\nfunc (prior *ServerPubKey) AddHostPort(hp string) {\n\t\/\/pp(\"AddHostPort called with hp = '%v'\", hp)\n\t_, already2 := prior.SplitHostnames[hp]\n\tprior.SplitHostnames[hp] = true\n\tif !already2 {\n\t\tprior.AlreadySaved = false\n\t}\n}\n<commit_msg>atg. quiet down<commit_after>package sshego\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"golang.org\/x\/crypto\/ssh\"\n)\n\n\/\/ KnownHosts represents in Hosts a hash map of host identifier (ip or name)\n\/\/ and the corresponding public key for the server. It corresponds to the\n\/\/ ~\/.ssh\/known_hosts file.\ntype KnownHosts struct {\n\tHosts map[string]*ServerPubKey\n\tcurHost *ServerPubKey\n\tcurStatus HostState\n\n\t\/\/ FilepathPrefix doesn't have the .json.snappy suffix on it.\n\tFilepathPrefix string\n\n\tPersistFormatSuffix string\n\n\t\/\/ PersistFormat is the format indicator\n\tPersistFormat KnownHostsPersistFormat\n\n\t\/\/ NoSave means we don't touch the files we read from\n\tNoSave bool\n}\n\n\/\/ ServerPubKey stores the RSA public keys for a particular known server. This\n\/\/ structure is stored in KnownHosts.Hosts.\ntype ServerPubKey struct {\n\tHostname string\n\n\t\/\/ HumanKey is a serialized and readable version of Key, the key for Hosts map in KnownHosts.\n\tHumanKey string\n\tServerBanned bool\n\t\/\/OurAcctKeyPair ssh.Signer\n\n\tremote net.Addr \/\/ unmarshalled form of Hostname\n\n\t\/\/key ssh.PublicKey \/\/ unmarshalled form of HumanKey\n\n\t\/\/ reading ~\/.ssh\/known_hosts\n\tMarkers string\n\tHostnames string\n\tSplitHostnames map[string]bool\n\tKeytype string\n\tBase64EncodededPublicKey string\n\tComment string\n\tPort string\n\tLineInFileOneBased int\n\n\t\/\/ if AlreadySaved, then we don't need to append.\n\tAlreadySaved bool\n}\n\ntype KnownHostsPersistFormat int\n\nconst (\n\tKHJson KnownHostsPersistFormat = 0\n\tKHGob KnownHostsPersistFormat = 1\n\tKHSsh KnownHostsPersistFormat = 2\n)\n\n\/\/ NewKnownHosts creats a new KnownHosts structure.\n\/\/ filepathPrefix does not include the\n\/\/ PersistFormat suffix. If filepathPrefix + defaultFileFormat()\n\/\/ exists as a file on disk, then we read the\n\/\/ contents of that file into the new KnownHosts.\n\/\/\n\/\/ The returned KnownHosts will remember the\n\/\/ filepathPrefix for future saves.\n\/\/\nfunc NewKnownHosts(filepath string, format KnownHostsPersistFormat) (*KnownHosts, error) {\n\tp(\"NewKnownHosts called, with filepath = '%s', format='%v'\", filepath, format)\n\n\th := &KnownHosts{\n\t\tPersistFormat: format,\n\t}\n\n\th.FilepathPrefix = filepath\n\tfn := filepath\n\tswitch format {\n\tcase KHJson:\n\t\th.PersistFormatSuffix = \".json.snappy\"\n\tcase KHGob:\n\t\th.PersistFormatSuffix = \".gob.snappy\"\n\t}\n\tfn += h.PersistFormatSuffix\n\n\tvar err error\n\tif fileExists(fn) {\n\t\t\/\/pp(\"fn '%s' exists in NewKnownHosts(). format = %v\\n\", fn, format)\n\n\t\tswitch format {\n\t\tcase KHJson:\n\t\t\terr = h.readJSONSnappy(fn)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase KHGob:\n\t\t\terr = h.readGobSnappy(fn)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase KHSsh:\n\t\t\th, err = LoadSshKnownHosts(fn)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unknown persistence format: %v\", format)\n\t\t}\n\n\t\t\/\/pp(\"after reading from file, h = '%#v'\\n\", h)\n\n\t} else {\n\t\t\/\/pp(\"fn '%s' does not exist already in NewKnownHosts()\\n\", fn)\n\t\t\/\/pp(\"making h.Hosts in NewKnownHosts()\\n\")\n\t\th.Hosts = make(map[string]*ServerPubKey)\n\t}\n\n\treturn h, nil\n}\n\n\/\/ KnownHostsEqual compares two instances of KnownHosts structures for equality.\nfunc KnownHostsEqual(a, b *KnownHosts) (bool, error) {\n\tfor k, v := range a.Hosts {\n\t\tv2, ok := b.Hosts[k]\n\t\tif !ok {\n\t\t\treturn false, fmt.Errorf(\"KnownHostsEqual detected difference at key '%s': a.Hosts had this key, but b.Hosts did not have this key\", k)\n\t\t}\n\t\tif v.HumanKey != v2.HumanKey {\n\t\t\treturn false, fmt.Errorf(\"KnownHostsEqual detected difference at key '%s': a.HumanKey = '%s' but b.HumanKey = '%s'\", k, v.HumanKey, v2.HumanKey)\n\t\t}\n\t\tif v.Hostname != v2.Hostname {\n\t\t\treturn false, fmt.Errorf(\"KnownHostsEqual detected difference at key '%s': a.Hostname = '%s' but b.Hostname = '%s'\", k, v.Hostname, v2.Hostname)\n\t\t}\n\t\tif v.ServerBanned != v2.ServerBanned {\n\t\t\treturn false, fmt.Errorf(\"KnownHostsEqual detected difference at key '%s': a.ServerBanned = '%v' but b.ServerBanned = '%v'\", k, v.ServerBanned, v2.ServerBanned)\n\t\t}\n\t}\n\tfor k := range b.Hosts {\n\t\t_, ok := a.Hosts[k]\n\t\tif !ok {\n\t\t\treturn false, fmt.Errorf(\"KnownHostsEqual detected difference at key '%s': b.Hosts had this key, but a.Hosts did not have this key\", k)\n\t\t}\n\t}\n\treturn true, nil\n}\n\n\/\/ Sync writes the contents of the KnownHosts structure to the\n\/\/ file h.FilepathPrefix + h.PersistFormat (for json\/gob); to\n\/\/ just h.FilepathPrefix for \"ssh_known_hosts\" format.\nfunc (h *KnownHosts) Sync() (err error) {\n\tfn := h.FilepathPrefix + h.PersistFormatSuffix\n\tswitch h.PersistFormat {\n\tcase KHJson:\n\t\terr = h.saveJSONSnappy(fn)\n\t\tpanicOn(err)\n\tcase KHGob:\n\t\terr = h.saveGobSnappy(fn)\n\t\tpanicOn(err)\n\tcase KHSsh:\n\t\terr = h.saveSshKnownHosts()\n\t\tpanicOn(err)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown persistence format: %v\", h.PersistFormat))\n\t}\n\treturn\n}\n\n\/\/ Close cleans up and prepares for shutdown. It calls h.Sync() to write\n\/\/ the state to disk.\nfunc (h *KnownHosts) Close() {\n\th.Sync()\n}\n\n\/\/ LoadSshKnownHosts reads a ~\/.ssh\/known_hosts style\n\/\/ file from path, see the SSH_KNOWN_HOSTS FILE FORMAT\n\/\/ section of http:\/\/manpages.ubuntu.com\/manpages\/zesty\/en\/man8\/sshd.8.html\n\/\/ or the local sshd(8) man page.\nfunc LoadSshKnownHosts(path string) (*KnownHosts, error) {\n\t\/\/pp(\"top of LoadSshKnownHosts for path = '%s'\", path)\n\n\th := &KnownHosts{\n\t\tHosts: make(map[string]*ServerPubKey),\n\t\tFilepathPrefix: path,\n\t\tPersistFormat: KHSsh,\n\t}\n\n\tif !fileExists(path) {\n\t\treturn nil, fmt.Errorf(\"path '%s' does not exist\", path)\n\t}\n\n\tby, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkillRightBracket := strings.NewReplacer(\"]\", \"\")\n\n\tlines := strings.Split(string(by), \"\\n\")\n\tfor i := range lines {\n\t\tline := strings.Trim(lines[i], \" \")\n\t\t\/\/ skip comments\n\t\tif line == \"\" || line[0] == '#' {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ skip hashed hostnames\n\t\tif line[0] == '|' {\n\t\t\tcontinue\n\t\t}\n\t\tsplt := strings.Split(line, \" \")\n\t\t\/\/pp(\"for line i = %v, splt = %#v\\n\", i, splt)\n\t\tn := len(splt)\n\t\tif n < 3 || n > 5 {\n\t\t\treturn nil, fmt.Errorf(\"known_hosts file '%s' did not have 3\/4\/5 fields on line %v: '%s'\", path, i+1, lines[i])\n\t\t}\n\t\tb := 0\n\t\tmarkers := \"\"\n\t\tif splt[0][0] == '@' {\n\t\t\tmarkers = splt[0]\n\t\t\tb = 1\n\t\t\tif strings.Contains(markers, \"@revoked\") {\n\t\t\t\tlog.Printf(\"ignoring @revoked host key at line %v of path '%s': '%s'\", i+1, path, lines[i])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif strings.Contains(markers, \"@cert-authority\") {\n\t\t\t\tlog.Printf(\"ignoring @cert-authority host key at line %v of path '%s': '%s'\", i+1, path, lines[i])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tcomment := \"\"\n\t\tif b+3 < n {\n\t\t\tcomment = splt[b+3]\n\t\t}\n\t\tpubkey := ServerPubKey{\n\t\t\tMarkers: markers,\n\t\t\tHostnames: splt[b],\n\t\t\tKeytype: splt[b+1],\n\t\t\tBase64EncodededPublicKey: splt[b+2],\n\t\t\tComment: comment,\n\t\t\tPort: \"22\",\n\t\t\tSplitHostnames: make(map[string]bool),\n\t\t}\n\t\thosts := strings.Split(pubkey.Hostnames, \",\")\n\n\t\t\/\/ 2 passes: first fill all the SplitHostnames, then each indiv.\n\n\t\t\/\/ a) fill all the SplitHostnames\n\t\tfor k := range hosts {\n\t\t\thst := hosts[k]\n\t\t\t\/\/pp(\"processing hst = '%s'\\n\", hst)\n\t\t\tif hst[0] == '[' {\n\t\t\t\thst = hst[1:]\n\t\t\t\thst = killRightBracket.Replace(hst)\n\t\t\t\t\/\/pp(\"after killing [], hst = '%s'\\n\", hst)\n\t\t\t}\n\t\t\thostport := strings.Split(hst, \":\")\n\t\t\t\/\/pp(\"hostport = '%#v'\\n\", hostport)\n\t\t\tif len(hostport) > 1 {\n\t\t\t\thst = hostport[0]\n\t\t\t\tpubkey.Port = hostport[1]\n\t\t\t}\n\t\t\tpubkey.Hostname = hst + \":\" + pubkey.Port\n\t\t\tpubkey.SplitHostnames[pubkey.Hostname] = true\n\t\t}\n\n\t\t\/\/ b) each individual name\n\t\tfor k := range hosts {\n\n\t\t\t\/\/ copy pubkey so we can modify\n\t\t\tourpubkey := pubkey\n\n\t\t\thst := hosts[k]\n\t\t\t\/\/pp(\"processing hst = '%s'\\n\", hst)\n\t\t\tif hst[0] == '[' {\n\t\t\t\thst = hst[1:]\n\t\t\t\thst = killRightBracket.Replace(hst)\n\t\t\t\t\/\/pp(\"after killing [], hst = '%s'\\n\", hst)\n\t\t\t}\n\t\t\thostport := strings.Split(hst, \":\")\n\t\t\t\/\/p(\"hostport = '%#v'\\n\", hostport)\n\t\t\tif len(hostport) > 1 {\n\t\t\t\thst = hostport[0]\n\t\t\t\tourpubkey.Port = hostport[1]\n\t\t\t}\n\t\t\tourpubkey.Hostname = hst + \":\" + ourpubkey.Port\n\n\t\t\t\/\/ unbase64 the public key to get []byte, then string() that\n\t\t\t\/\/ to get the key of h.Hosts\n\t\t\tpub := []byte(ourpubkey.Base64EncodededPublicKey)\n\t\t\texpandedMaxSize := base64.StdEncoding.DecodedLen(len(pub))\n\t\t\texpand := make([]byte, expandedMaxSize)\n\t\t\tn, err := base64.StdEncoding.Decode(expand, []byte(ourpubkey.Base64EncodededPublicKey))\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"warning: ignoring entry in known_hosts file '%s' on line %v: '%s' we find the following error: could not base64 decode the public key field. detailed error: '%s'\", path, i+1, lines[i], err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\texpand = expand[:n]\n\n\t\t\txkey, err := ssh.ParsePublicKey(expand)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"warning: ignoring entry in known_hosts file '%s' on line %v: '%s' we find the following error: could not ssh.ParsePublicKey(). detailed error: '%s'\", path, i+1, lines[i], err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tse := string(ssh.MarshalAuthorizedKey(xkey))\n\n\t\t\tourpubkey.LineInFileOneBased = i + 1\n\t\t\t\/* don't resolve now, this may be slow:\n\t\t\tourpubkey.remote, err = net.ResolveTCPAddr(\"tcp\", ourpubkey.Hostname+\":\"+ourpubkey.Port)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"warning: ignoring entry known_hosts file '%s' on line %v: '%s' we find the following error: could not resolve the hostname '%s'. detailed error: '%s'\", path, i+1, lines[i], ourpubkey.Hostname, err)\n\t\t\t}\n\t\t\t*\/\n\t\t\tourpubkey.AlreadySaved = true\n\t\t\tourpubkey.HumanKey = se\n\t\t\t\/\/ check for existing that we need to combine...\n\t\t\tprior, already := h.Hosts[se]\n\t\t\tif !already {\n\t\t\t\th.Hosts[se] = &ourpubkey\n\t\t\t\t\/\/pp(\"saved known hosts: key '%s' -> value: %#v\\n\", se, ourpubkey)\n\t\t\t} else {\n\t\t\t\t\/\/ need to combine under this key...\n\t\t\t\t\/\/pp(\"have prior entry for se='%s': %#v\\n\", se, prior)\n\t\t\t\tprior.AddHostPort(ourpubkey.Hostname)\n\t\t\t\tprior.AlreadySaved = true \/\/ reading from file, all are saved already.\n\t\t\t}\n\t\t}\n\t}\n\n\treturn h, nil\n}\n\nfunc (s *KnownHosts) saveSshKnownHosts() error {\n\n\tif s.NoSave {\n\t\treturn nil\n\t}\n\n\tfn := s.FilepathPrefix\n\n\t\/\/ backups\n\texec.Command(\"mv\", fn+\".prev\", fn+\".prev.prev\").Run()\n\texec.Command(\"cp\", \"-p\", fn, fn+\".prev\").Run()\n\n\tf, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not open file '%s' for appending: '%s'\", fn, err)\n\t}\n\tdefer f.Close()\n\n\tfor _, v := range s.Hosts {\n\t\tif v.AlreadySaved {\n\t\t\tcontinue\n\t\t}\n\n\t\thostname := \"\"\n\t\tif len(v.SplitHostnames) == 1 {\n\t\t\thn := v.Hostname\n\t\t\thp := strings.Split(hn, \":\")\n\t\t\t\/\/pp(\"hn='%v', hp='%#v'\", hn, hp)\n\t\t\tif hp[1] != \"22\" {\n\t\t\t\thn = \"[\" + hp[0] + \"]:\" + hp[1]\n\t\t\t}\n\t\t\thostname = hn\n\t\t} else {\n\t\t\t\/\/ put all hostnames under this one key.\n\t\t\tk := 0\n\t\t\tfor tmp := range v.SplitHostnames {\n\t\t\t\thp := strings.Split(tmp, \":\")\n\t\t\t\tif len(hp) != 2 {\n\t\t\t\t\tpanic(fmt.Sprintf(\"must be 2 parts here, but we got '%s'\", tmp))\n\t\t\t\t}\n\t\t\t\thn := \"[\" + hp[0] + \"]:\" + hp[1]\n\t\t\t\tif k == 0 {\n\t\t\t\t\thostname = hn\n\t\t\t\t} else {\n\t\t\t\t\thostname += \",\" + hn\n\t\t\t\t}\n\t\t\t\tk++\n\t\t\t}\n\t\t}\n\n\t\t_, err = fmt.Fprintf(f, \"%s %s %s %s\\n\",\n\t\t\thostname,\n\t\t\tv.Keytype,\n\t\t\tv.Base64EncodededPublicKey,\n\t\t\tv.Comment)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not append to file '%s': '%s'\", fn, err)\n\t\t}\n\t\tv.AlreadySaved = true\n\t}\n\n\treturn nil\n}\n\nfunc base64ofPublicKey(key ssh.PublicKey) string {\n\tb := &bytes.Buffer{}\n\te := base64.NewEncoder(base64.StdEncoding, b)\n\te.Write(key.Marshal())\n\te.Close()\n\treturn b.String()\n\n}\n\nfunc (prior *ServerPubKey) AddHostPort(hp string) {\n\t\/\/pp(\"AddHostPort called with hp = '%v'\", hp)\n\t_, already2 := prior.SplitHostnames[hp]\n\tprior.SplitHostnames[hp] = true\n\tif !already2 {\n\t\tprior.AlreadySaved = false\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package krpc\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/anacrolix\/torrent\/bencode\"\n)\n\nvar ErrorMethodUnknown = Error{\n\tCode: 204,\n\tMsg: \"Method Unknown\",\n}\n\n\/\/ Represented as a string or list in bencode.\ntype Error struct {\n\tCode int\n\tMsg string\n}\n\nvar (\n\t_ bencode.Unmarshaler = (*Error)(nil)\n\t_ bencode.Marshaler = (*Error)(nil)\n\t_ error = Error{}\n)\n\nfunc (e *Error) UnmarshalBencode(_b []byte) (err error) {\n\tvar _v interface{}\n\terr = bencode.Unmarshal(_b, &_v)\n\tif err != nil {\n\t\treturn\n\t}\n\tswitch v := _v.(type) {\n\tcase []interface{}:\n\t\tfunc() {\n\t\t\tdefer func() {\n\t\t\t\tr := recover()\n\t\t\t\tif r == nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = fmt.Errorf(\"unpacking %#v: %s\", v, r)\n\t\t\t}()\n\t\t\te.Code = int(v[0].(int64))\n\t\t\te.Msg = v[1].(string)\n\t\t}()\n\tcase string:\n\t\te.Msg = v\n\tdefault:\n\t\terr = fmt.Errorf(`KRPC error bencode value has unexpected type: %T`, _v)\n\t}\n\treturn\n}\n\nfunc (e Error) MarshalBencode() (ret []byte, err error) {\n\treturn bencode.Marshal([]interface{}{e.Code, e.Msg})\n}\n\nfunc (e Error) Error() string {\n\treturn fmt.Sprintf(\"KRPC error %d: %s\", e.Code, e.Msg)\n}\n<commit_msg>krpc: Add constants for all the errors in BEP 5<commit_after>package krpc\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/anacrolix\/torrent\/bencode\"\n)\n\n\/\/ These are documented in BEP 5.\nconst (\n\tErrorCodeGenericError = 201\n\tErrorCodeServerError = 202\n\tErrorCodeProtocolError = 203\n\tErrorCodeMethodUnknown = 204\n)\n\nvar ErrorMethodUnknown = Error{\n\tCode: ErrorCodeMethodUnknown,\n\tMsg: \"Method Unknown\",\n}\n\n\/\/ Represented as a string or list in bencode.\ntype Error struct {\n\tCode int\n\tMsg string\n}\n\nvar (\n\t_ bencode.Unmarshaler = (*Error)(nil)\n\t_ bencode.Marshaler = (*Error)(nil)\n\t_ error = Error{}\n)\n\nfunc (e *Error) UnmarshalBencode(_b []byte) (err error) {\n\tvar _v interface{}\n\terr = bencode.Unmarshal(_b, &_v)\n\tif err != nil {\n\t\treturn\n\t}\n\tswitch v := _v.(type) {\n\tcase []interface{}:\n\t\tfunc() {\n\t\t\tdefer func() {\n\t\t\t\tr := recover()\n\t\t\t\tif r == nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = fmt.Errorf(\"unpacking %#v: %s\", v, r)\n\t\t\t}()\n\t\t\te.Code = int(v[0].(int64))\n\t\t\te.Msg = v[1].(string)\n\t\t}()\n\tcase string:\n\t\te.Msg = v\n\tdefault:\n\t\terr = fmt.Errorf(`KRPC error bencode value has unexpected type: %T`, _v)\n\t}\n\treturn\n}\n\nfunc (e Error) MarshalBencode() (ret []byte, err error) {\n\treturn bencode.Marshal([]interface{}{e.Code, e.Msg})\n}\n\nfunc (e Error) Error() string {\n\treturn fmt.Sprintf(\"KRPC error %d: %s\", e.Code, e.Msg)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Palantir Technologies, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage builtintasks\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/palantir\/godel\/framework\/godellauncher\"\n)\n\nfunc VerifyTask(tasks []godellauncher.Task) godellauncher.Task {\n\tconst (\n\t\tverifyCmdName = \"verify\"\n\t\tapply = \"apply\"\n\t)\n\n\tverifyTasks, verifyTaskFlags := extractVerifyTasks(tasks)\n\tskipVerifyTasks := make(map[string]*bool)\n\tverifyTaskFlagVals := make(map[string]map[godellauncher.VerifyFlag]interface{})\n\n\tcmd := &cobra.Command{\n\t\tUse: verifyCmdName,\n\t\tShort: \"Run verify tasks for project\",\n\t}\n\n\tapplyVar := cmd.Flags().Bool(apply, true, \"Apply changes when possible\")\n\tfor _, task := range verifyTasks {\n\t\tskipVerifyTasks[task.Name] = cmd.Flags().Bool(\"skip-\"+task.Name, false, fmt.Sprintf(\"Skip '%s' task\", task.Name))\n\n\t\tflags, ok := verifyTaskFlags[task.Name]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif len(flags) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ hook up task-specific flags\n\t\tverifyTaskFlagVals[task.Name] = make(map[godellauncher.VerifyFlag]interface{})\n\t\tfor _, f := range flags {\n\t\t\tflagVal, err := f.AddFlag(cmd.Flags())\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tverifyTaskFlagVals[task.Name][f] = flagVal\n\t\t}\n\t}\n\n\tcmd.SilenceErrors = true\n\tcmd.SilenceUsage = true\n\treturn godellauncher.Task{\n\t\tName: cmd.Use,\n\t\tDescription: cmd.Short,\n\t\tRunImpl: func(t *godellauncher.Task, global godellauncher.GlobalConfig, stdout io.Writer) error {\n\t\t\targs := []string{global.Executable}\n\t\t\targs = append(args, global.Task)\n\t\t\targs = append(args, global.TaskArgs...)\n\t\t\tos.Args = args\n\n\t\t\tcmd.RunE = func(cmd *cobra.Command, args []string) error {\n\t\t\t\tvar failedChecks []string\n\t\t\t\tfor _, task := range verifyTasks {\n\t\t\t\t\t\/\/ skip the task\n\t\t\t\t\tif *skipVerifyTasks[task.Name] {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tvar taskFlagArgs []string\n\t\t\t\t\tif *applyVar {\n\t\t\t\t\t\ttaskFlagArgs = append(taskFlagArgs, task.Verify.ApplyTrueArgs...)\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttaskFlagArgs = append(taskFlagArgs, task.Verify.ApplyFalseArgs...)\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ get task-specific flag values\n\t\t\t\t\tfor _, f := range verifyTaskFlags[task.Name] {\n\t\t\t\t\t\tflagArgs, err := f.ToFlagArgs(verifyTaskFlagVals[task.Name][f])\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttaskFlagArgs = append(taskFlagArgs, flagArgs...)\n\t\t\t\t\t}\n\n\t\t\t\t\ttaskGlobal := global\n\t\t\t\t\ttaskGlobal.Task = task.Name\n\t\t\t\t\ttaskGlobal.TaskArgs = taskFlagArgs\n\n\t\t\t\t\tfmt.Fprintf(stdout, \"Running %s...\\n\", task.Name)\n\t\t\t\t\tif err := task.Run(taskGlobal, stdout); err != nil {\n\t\t\t\t\t\tvar applyArgs []string\n\t\t\t\t\t\tif *applyVar {\n\t\t\t\t\t\t\tapplyArgs = task.Verify.ApplyTrueArgs\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tapplyArgs = task.Verify.ApplyFalseArgs\n\t\t\t\t\t\t}\n\t\t\t\t\t\tnameWithFlag := strings.Join(append([]string{task.Name}, applyArgs...), \" \")\n\t\t\t\t\t\tfailedChecks = append(failedChecks, nameWithFlag)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif len(failedChecks) != 0 {\n\t\t\t\t\tmsgParts := []string{\"Failed tasks:\"}\n\t\t\t\t\tfor _, check := range failedChecks {\n\t\t\t\t\t\tmsgParts = append(msgParts, \"\\t\"+check)\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Fprintln(stdout, strings.Join(msgParts, \"\\n\"))\n\t\t\t\t\treturn fmt.Errorf(\"\")\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\trootCmd := godellauncher.CobraCmdToRootCmd(cmd)\n\t\t\trootCmd.SetOutput(stdout)\n\t\t\treturn rootCmd.Execute()\n\t\t},\n\t}\n}\n\nfunc extractVerifyTasks(tasks []godellauncher.Task) ([]godellauncher.Task, map[string][]godellauncher.VerifyFlag) {\n\tvar verifyTasks []godellauncher.Task\n\tverifyTaskFlags := make(map[string][]godellauncher.VerifyFlag)\n\tfor _, task := range tasks {\n\t\tif task.Verify == nil {\n\t\t\tcontinue\n\t\t}\n\t\tverifyTasks = append(verifyTasks, task)\n\t\tverifyTaskFlags[task.Name] = task.Verify.VerifyTaskFlags\n\t}\n\tsort.SliceStable(verifyTasks, func(i, j int) bool {\n\t\treturn verifyTasks[i].Verify.Ordering < verifyTasks[j].Verify.Ordering\n\t})\n\treturn verifyTasks, verifyTaskFlags\n}\n<commit_msg>Use lowercase for first letter of flag description (#289)<commit_after>\/\/ Copyright 2016 Palantir Technologies, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage builtintasks\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/palantir\/godel\/framework\/godellauncher\"\n)\n\nfunc VerifyTask(tasks []godellauncher.Task) godellauncher.Task {\n\tconst (\n\t\tverifyCmdName = \"verify\"\n\t\tapply = \"apply\"\n\t)\n\n\tverifyTasks, verifyTaskFlags := extractVerifyTasks(tasks)\n\tskipVerifyTasks := make(map[string]*bool)\n\tverifyTaskFlagVals := make(map[string]map[godellauncher.VerifyFlag]interface{})\n\n\tcmd := &cobra.Command{\n\t\tUse: verifyCmdName,\n\t\tShort: \"Run verify tasks for project\",\n\t}\n\n\tapplyVar := cmd.Flags().Bool(apply, true, \"apply changes when possible\")\n\tfor _, task := range verifyTasks {\n\t\tskipVerifyTasks[task.Name] = cmd.Flags().Bool(\"skip-\"+task.Name, false, fmt.Sprintf(\"skip '%s' task\", task.Name))\n\n\t\tflags, ok := verifyTaskFlags[task.Name]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif len(flags) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ hook up task-specific flags\n\t\tverifyTaskFlagVals[task.Name] = make(map[godellauncher.VerifyFlag]interface{})\n\t\tfor _, f := range flags {\n\t\t\tflagVal, err := f.AddFlag(cmd.Flags())\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tverifyTaskFlagVals[task.Name][f] = flagVal\n\t\t}\n\t}\n\n\tcmd.SilenceErrors = true\n\tcmd.SilenceUsage = true\n\treturn godellauncher.Task{\n\t\tName: cmd.Use,\n\t\tDescription: cmd.Short,\n\t\tRunImpl: func(t *godellauncher.Task, global godellauncher.GlobalConfig, stdout io.Writer) error {\n\t\t\targs := []string{global.Executable}\n\t\t\targs = append(args, global.Task)\n\t\t\targs = append(args, global.TaskArgs...)\n\t\t\tos.Args = args\n\n\t\t\tcmd.RunE = func(cmd *cobra.Command, args []string) error {\n\t\t\t\tvar failedChecks []string\n\t\t\t\tfor _, task := range verifyTasks {\n\t\t\t\t\t\/\/ skip the task\n\t\t\t\t\tif *skipVerifyTasks[task.Name] {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tvar taskFlagArgs []string\n\t\t\t\t\tif *applyVar {\n\t\t\t\t\t\ttaskFlagArgs = append(taskFlagArgs, task.Verify.ApplyTrueArgs...)\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttaskFlagArgs = append(taskFlagArgs, task.Verify.ApplyFalseArgs...)\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ get task-specific flag values\n\t\t\t\t\tfor _, f := range verifyTaskFlags[task.Name] {\n\t\t\t\t\t\tflagArgs, err := f.ToFlagArgs(verifyTaskFlagVals[task.Name][f])\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttaskFlagArgs = append(taskFlagArgs, flagArgs...)\n\t\t\t\t\t}\n\n\t\t\t\t\ttaskGlobal := global\n\t\t\t\t\ttaskGlobal.Task = task.Name\n\t\t\t\t\ttaskGlobal.TaskArgs = taskFlagArgs\n\n\t\t\t\t\tfmt.Fprintf(stdout, \"Running %s...\\n\", task.Name)\n\t\t\t\t\tif err := task.Run(taskGlobal, stdout); err != nil {\n\t\t\t\t\t\tvar applyArgs []string\n\t\t\t\t\t\tif *applyVar {\n\t\t\t\t\t\t\tapplyArgs = task.Verify.ApplyTrueArgs\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tapplyArgs = task.Verify.ApplyFalseArgs\n\t\t\t\t\t\t}\n\t\t\t\t\t\tnameWithFlag := strings.Join(append([]string{task.Name}, applyArgs...), \" \")\n\t\t\t\t\t\tfailedChecks = append(failedChecks, nameWithFlag)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif len(failedChecks) != 0 {\n\t\t\t\t\tmsgParts := []string{\"Failed tasks:\"}\n\t\t\t\t\tfor _, check := range failedChecks {\n\t\t\t\t\t\tmsgParts = append(msgParts, \"\\t\"+check)\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Fprintln(stdout, strings.Join(msgParts, \"\\n\"))\n\t\t\t\t\treturn fmt.Errorf(\"\")\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\trootCmd := godellauncher.CobraCmdToRootCmd(cmd)\n\t\t\trootCmd.SetOutput(stdout)\n\t\t\treturn rootCmd.Execute()\n\t\t},\n\t}\n}\n\nfunc extractVerifyTasks(tasks []godellauncher.Task) ([]godellauncher.Task, map[string][]godellauncher.VerifyFlag) {\n\tvar verifyTasks []godellauncher.Task\n\tverifyTaskFlags := make(map[string][]godellauncher.VerifyFlag)\n\tfor _, task := range tasks {\n\t\tif task.Verify == nil {\n\t\t\tcontinue\n\t\t}\n\t\tverifyTasks = append(verifyTasks, task)\n\t\tverifyTaskFlags[task.Name] = task.Verify.VerifyTaskFlags\n\t}\n\tsort.SliceStable(verifyTasks, func(i, j int) bool {\n\t\treturn verifyTasks[i].Verify.Ordering < verifyTasks[j].Verify.Ordering\n\t})\n\treturn verifyTasks, verifyTaskFlags\n}\n<|endoftext|>"} {"text":"<commit_before>package disgo\n\nimport (\n\t\"bytes\"\n\t\"compress\/zlib\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/slf4go\/logger\"\n)\n\ntype shard struct {\n\t\/\/ Parent session\n\tsession *Session\n\n\t\/\/ Websocket information used for identification and reconnection\n\twebSocket *websocket.Conn\n\tshard int\n\tsessionID string\n\tsequence int\n\theartbeat int\n\n\t\/\/ Mutex locks, reconnect to make sure there is only 1 process reconnecting and concurrent read\/write accesses on the socket\n\treadLock, writeLock sync.Mutex\n\n\t\/\/ Channels used to synchronize shutdown of this shards goroutines\n\tisShuttingDown bool\n\tcloseMainLoop chan bool\n\tcloseConfirmation chan bool\n}\n\n\/\/ Shard constructor, automatically connects with the given shardNum\nfunc newShard(session *Session, shardNum int) (*shard, error) {\n\ts := &shard{\n\t\tsession: session,\n\t\tshard: shardNum,\n\t\tcloseMainLoop: make(chan bool),\n\t\tcloseConfirmation: make(chan bool),\n\t}\n\n\t\/\/ These will be unlocked once a connection is made.\n\ts.readLock.Lock()\n\ts.writeLock.Lock()\n\n\tif err := s.connect(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}\n\n\/\/ Builds a new connection with Discord, waits for the \"hello\" frame and then proceeds to identify itself to the Discord service\nfunc (s *shard) connect() error {\n\tconn, _, err := websocket.DefaultDialer.Dial(s.session.wsUrl, http.Header{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.webSocket = conn\n\ts.webSocket.SetCloseHandler(s.onClose)\n\n\thelloFrame, err := s.readFrame(true)\n\tif err != nil {\n\t\treturn err\n\t} else if helloFrame.Op != opHello {\n\t\treturn errors.New(\"First frame sent from the Discord Gateway is not hello\")\n\t}\n\n\thello := helloPayload{}\n\tif err = json.Unmarshal(helloFrame.Data, &hello); err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Debugf(\"Connected to Discord servers: %s\", strings.Join(hello.Servers, \", \"))\n\tlogger.Debugf(\"Setting up a heartbeat interval of %d ms\", hello.HeartbeatInterval)\n\ts.heartbeat = hello.HeartbeatInterval\n\n\tif err = s.identify(); err != nil {\n\t\treturn err\n\t}\n\n\tgo s.mainLoop()\n\treturn nil\n}\n\n\/\/ identify takes care of the identification using the bot token on the discord server\nfunc (s *shard) identify() error {\n\tif s.sessionID != \"\" {\n\t\tlogger.Debugf(\"Resuming connection starting at sequence %d.\", s.sequence)\n\t\ts.sendFrame(&gatewayFrame{opResume, resumePayload{\n\t\t\tToken: s.session.token,\n\t\t\tSessionID: s.sessionID,\n\t\t\tSequence: s.sequence,\n\t\t}}, true)\n\t} else {\n\t\tlogger.Debugf(\"Identifying to websocket\")\n\t\ts.sendFrame(&gatewayFrame{opIdentify, identifyPayload{\n\t\t\tToken: s.session.token,\n\t\t\tCompress: true,\n\t\t\tLargeThreshold: 250,\n\t\t\tShard: [2]int{s.shard, cap(s.session.shards)},\n\t\t\tProperties: propertiesPayload{\n\t\t\t\tOS: runtime.GOOS,\n\t\t\t\tBrowser: \"DisGo\",\n\t\t\t\tDevice: \"DisGo\",\n\t\t\t},\n\t\t}}, true)\n\t}\n\n\tfor {\n\t\tframe, err := s.readFrame(true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch frame.Op {\n\t\tcase opDispatch:\n\t\t\tswitch frame.EventName {\n\t\t\tcase \"READY\":\n\t\t\t\tready := ReadyEvent{}\n\t\t\t\tif err := json.Unmarshal(frame.Data, &ready); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\ts.sessionID = ready.SessionID\n\n\t\t\t\tfallthrough\n\t\t\tcase \"RESUMED\":\n\t\t\t\ts.session.dispatchEvent(frame)\n\n\t\t\t\ts.readLock.Unlock()\n\t\t\t\ts.writeLock.Unlock()\n\n\t\t\t\treturn nil \/\/ Break out of the loop, we have what we want\n\t\t\tdefault:\n\t\t\t\ts.session.dispatchEvent(frame) \/\/ Nope, resume action, let's wait for more frames\n\t\t\t}\n\t\tcase opInvalidSession:\n\t\t\ts.sessionID = \"\" \/\/ Invalidate session and retry\n\t\t\treturn s.identify()\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"Unexpected opCode received from Discord: %d\", frame.Op)\n\t\t}\n\t}\n}\n\n\/\/ Main loop of the shard connection, also responsible for starting the read loop.\n\/\/ This goroutine will pass around all the messages through the rest of the library.\nfunc (s *shard) mainLoop() {\n\tlogger.Debugf(\"Starting main loop for shard [%d\/%d]\", s.shard+1, cap(s.session.shards))\n\tdefer logger.Debugf(\"Exiting main loop for shard [%d\/%d]\", s.shard+1, cap(s.session.shards))\n\tdefer func() { s.closeConfirmation <- true }()\n\n\theartbeat := time.NewTicker(time.Duration(s.heartbeat) * time.Millisecond)\n\tdefer heartbeat.Stop()\n\tsentHeartBeat := false\n\n\treader := make(chan *receivedFrame, 1)\n\tgo s.readWebSocket(reader)\n\n\tfor {\n\t\tselect {\n\t\tcase <-heartbeat.C:\n\t\t\tif !sentHeartBeat {\n\t\t\t\tgo s.sendFrame(&gatewayFrame{opHeartbeat, s.sequence}, false)\n\t\t\t\tsentHeartBeat = true\n\t\t\t} else {\n\t\t\t\tgo s.disconnect(websocket.ClosePolicyViolation, \"Did not respond to previous heartbeat\")\n\t\t\t}\n\t\tcase <-s.closeMainLoop:\n\t\t\treturn\n\t\tcase frame := <-reader:\n\t\t\tswitch frame.Op {\n\t\t\tcase opHeartbeat:\n\t\t\t\ts.sendFrame(&gatewayFrame{Op: opHeartbeatAck}, false)\n\t\t\tcase opHeartbeatAck:\n\t\t\t\tsentHeartBeat = false\n\t\t\tcase opReconnect:\n\t\t\t\tgo s.disconnect(websocket.CloseNormalClosure, \"op Reconnect\")\n\t\t\tcase opInvalidSession:\n\t\t\t\ts.sessionID = \"\"\n\t\t\t\ts.identify()\n\t\t\tcase opDispatch:\n\t\t\t\ts.session.dispatchEvent(frame)\n\t\t\tdefault:\n\t\t\t\tlogger.Errorf(\"Unexpected opCode received: %d\", frame.Op)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ The secondary goroutine of each shard, responsible for reading frames and putting them in the channel.\nfunc (s *shard) readWebSocket(reader chan *receivedFrame) {\n\tlogger.Debugf(\"Starting read loop for shard [%d\/%d]\", s.shard+1, cap(s.session.shards))\n\tdefer logger.Debugf(\"Exiting read loop for shard [%d\/%d]\", s.shard+1, cap(s.session.shards))\n\tdefer func() { s.closeConfirmation <- true }()\n\n\tfor {\n\t\tframe, err := s.readFrame(false)\n\t\tif err != nil {\n\t\t\tif !s.isShuttingDown {\n\t\t\t\ts.onClose(websocket.CloseAbnormalClosure, err.Error())\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\treader <- frame\n\t}\n\n}\n\n\/\/ Reads 1 frame from the websocket.\nfunc (s *shard) readFrame(isConnecting bool) (*receivedFrame, error) {\n\tif !isConnecting {\n\t\ts.readLock.Lock()\n\t\tdefer s.readLock.Unlock()\n\t}\n\n\tlogger.Tracef(\"Shard.readFrame() called\")\n\tmsgType, msg, err := s.webSocket.ReadMessage()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar reader io.Reader\n\treader = bytes.NewBuffer(msg)\n\n\tif msgType == websocket.BinaryMessage {\n\t\tzReader, err := zlib.NewReader(reader)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdefer zReader.Close()\n\n\t\treader = zReader\n\t}\n\n\tframe := receivedFrame{Sequence: -1}\n\terr = json.NewDecoder(reader).Decode(&frame)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogger.Debugf(\"Received frame with opCode: %d\", frame.Op)\n\n\tif frame.Sequence > s.sequence {\n\t\tlogger.Tracef(\"Last sequence received set to %d.\", frame.Sequence)\n\t\ts.sequence = frame.Sequence\n\t}\n\treturn &frame, nil\n}\n\n\/\/ Sends 1 frame to the websocket\nfunc (s *shard) sendFrame(frame *gatewayFrame, isConnecting bool) {\n\tif !isConnecting {\n\t\ts.writeLock.Lock()\n\t\tdefer s.writeLock.Unlock()\n\t}\n\n\tlogger.Debugf(\"Sending frame with opCode: %d\", frame.Op)\n\ts.webSocket.WriteJSON(frame)\n}\n\n\/\/ Called when we have received a closing intention that we have not initiated (ws close message, recv error)\nfunc (s *shard) onClose(code int, text string) error {\n\tgo func() {\n\t\tlogger.Warnf(\"Received Close Frame from Discord. Code: %d. Text: %s\", code, text)\n\n\t\ts.isShuttingDown = true\n\t\ts.stopRoutines()\n\t\ts.readLock.Lock()\n\t\ts.writeLock.Lock()\n\n\t\tif err := s.webSocket.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(code, text)); err != nil {\n\t\t\tlogger.Error(\"Could not confirm close message.\")\n\t\t\tlogger.ErrorE(err)\n\t\t}\n\n\t\ts.cleanupWebSocket()\n\t}()\n\n\treturn nil\n}\n\n\/\/ Called by us to disconnect the websocket (shutdown, protocol errors, missed heartbeats)\nfunc (s *shard) disconnect(code int, text string) {\n\ts.isShuttingDown = true\n\ts.writeLock.Lock()\n\n\tcloseMessage := make(chan int)\n\ts.webSocket.SetCloseHandler(func(code int, _ string) error {\n\t\tcloseMessage <- code\n\t\treturn nil\n\t})\n\n\tif err := s.webSocket.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(code, text)); err != nil {\n\t\tlogger.Error(\"Could not write close message to Discord, connection already dead?\")\n\t\tlogger.ErrorE(err)\n\t} else {\n\t\tselect {\n\t\tcase code := <-closeMessage:\n\t\t\tlogger.Debugf(\"Discord connection closed with code %d\", code)\n\t\tcase <-time.After(1 * time.Second):\n\t\t\tlogger.Warn(\"Discord did not reply to the close message, force-closing connection.\")\n\t\t}\n\t}\n\n\ts.stopRoutines()\n\ts.readLock.Lock()\n\ts.cleanupWebSocket()\n}\n\n\/\/ Stops the two shard goroutines\nfunc (s *shard) stopRoutines() {\n\ts.webSocket.SetReadDeadline(time.Now()) \/\/ Stop the read loop, in case it hasn't already due to an error\n\t<-s.closeConfirmation \/\/ Wait for the read loop\n\ts.closeMainLoop <- true \/\/ Stop the main loop\n\t<-s.closeConfirmation \/\/ Wait for the main loop\n}\n\n\/\/ Cleans up the current websocket and tries to reconnect it if needed\nfunc (s *shard) cleanupWebSocket() {\n\ts.webSocket.Close()\n\ts.webSocket = nil\n\n\tfor !s.session.isShuttingDown() {\n\t\tif err := s.connect(); err != nil {\n\t\t\tlogger.Error(\"Could not reconnect to Discord.\")\n\t\t\tlogger.ErrorE(err)\n\n\t\t\ttime.Sleep(500 * time.Millisecond)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n}\n<commit_msg>Added connection cleanup on failed identification<commit_after>package disgo\n\nimport (\n\t\"bytes\"\n\t\"compress\/zlib\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/slf4go\/logger\"\n)\n\ntype shard struct {\n\t\/\/ Parent session\n\tsession *Session\n\n\t\/\/ Websocket information used for identification and reconnection\n\twebSocket *websocket.Conn\n\tshard int\n\tsessionID string\n\tsequence int\n\theartbeat int\n\n\t\/\/ Mutex locks, reconnect to make sure there is only 1 process reconnecting and concurrent read\/write accesses on the socket\n\treadLock, writeLock sync.Mutex\n\n\t\/\/ Channels used to synchronize shutdown of this shards goroutines\n\tisShuttingDown bool\n\tcloseMainLoop chan bool\n\tcloseConfirmation chan bool\n}\n\n\/\/ Shard constructor, automatically connects with the given shardNum\nfunc newShard(session *Session, shardNum int) (*shard, error) {\n\ts := &shard{\n\t\tsession: session,\n\t\tshard: shardNum,\n\t\tcloseMainLoop: make(chan bool),\n\t\tcloseConfirmation: make(chan bool),\n\t}\n\n\t\/\/ These will be unlocked once a connection is made.\n\ts.readLock.Lock()\n\ts.writeLock.Lock()\n\n\tif err := s.connect(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}\n\n\/\/ Builds a new connection with Discord, waits for the \"hello\" frame and then proceeds to identify itself to the Discord service\nfunc (s *shard) connect() error {\n\t\/\/ Open the websocket\n\tconn, _, err := websocket.DefaultDialer.Dial(s.session.wsUrl, http.Header{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.webSocket = conn\n\ts.webSocket.SetCloseHandler(s.onClose)\n\n\t\/\/ At the end of this function, clean up the socket if we didn't identify correctly.\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tconn.Close()\n\t\t\ts.webSocket = nil\n\t\t}\n\t}()\n\n\t\/\/ Discord always sends a hello frame upon opening the connection\n\tvar helloFrame *receivedFrame\n\thelloFrame, err = s.readFrame(true)\n\tif err != nil {\n\t\treturn err\n\t} else if helloFrame.Op != opHello {\n\t\treturn errors.New(\"First frame sent from the Discord Gateway is not hello\")\n\t}\n\n\thello := helloPayload{}\n\tif err = json.Unmarshal(helloFrame.Data, &hello); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ We've succesfully connected.\n\tlogger.Debugf(\"Connected to Discord servers: %s\", strings.Join(hello.Servers, \", \"))\n\tlogger.Debugf(\"Setting up a heartbeat interval of %d ms\", hello.HeartbeatInterval)\n\ts.heartbeat = hello.HeartbeatInterval\n\n\t\/\/ Now identify\n\tif err = s.identify(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Identification succesful, unlock reading\/writing and start the goroutines\n\ts.readLock.Unlock()\n\ts.writeLock.Unlock()\n\tgo s.mainLoop()\n\n\treturn nil\n}\n\n\/\/ identify takes care of the identification using the bot token on the discord server\nfunc (s *shard) identify() error {\n\tif s.sessionID != \"\" {\n\t\tlogger.Debugf(\"Resuming connection starting at sequence %d.\", s.sequence)\n\t\ts.sendFrame(&gatewayFrame{opResume, resumePayload{\n\t\t\tToken: s.session.token,\n\t\t\tSessionID: s.sessionID,\n\t\t\tSequence: s.sequence,\n\t\t}}, true)\n\t} else {\n\t\tlogger.Debugf(\"Identifying to websocket\")\n\t\ts.sendFrame(&gatewayFrame{opIdentify, identifyPayload{\n\t\t\tToken: s.session.token,\n\t\t\tCompress: true,\n\t\t\tLargeThreshold: 250,\n\t\t\tShard: [2]int{s.shard, cap(s.session.shards)},\n\t\t\tProperties: propertiesPayload{\n\t\t\t\tOS: runtime.GOOS,\n\t\t\t\tBrowser: \"DisGo\",\n\t\t\t\tDevice: \"DisGo\",\n\t\t\t},\n\t\t}}, true)\n\t}\n\n\tfor {\n\t\tframe, err := s.readFrame(true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch frame.Op {\n\t\tcase opDispatch:\n\t\t\tswitch frame.EventName {\n\t\t\tcase \"READY\":\n\t\t\t\tready := ReadyEvent{}\n\t\t\t\tif err := json.Unmarshal(frame.Data, &ready); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\ts.sessionID = ready.SessionID\n\n\t\t\t\tfallthrough\n\t\t\tcase \"RESUMED\":\n\t\t\t\ts.session.dispatchEvent(frame)\n\t\t\t\treturn nil \/\/ Break out of the loop, we have what we want\n\t\t\tdefault:\n\t\t\t\ts.session.dispatchEvent(frame) \/\/ Nope, resume action, let's wait for more frames\n\t\t\t}\n\t\tcase opInvalidSession:\n\t\t\ts.sessionID = \"\" \/\/ Invalidate session and retry\n\t\t\treturn s.identify()\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"Unexpected opCode received from Discord: %d\", frame.Op)\n\t\t}\n\t}\n}\n\n\/\/ Main loop of the shard connection, also responsible for starting the read loop.\n\/\/ This goroutine will pass around all the messages through the rest of the library.\nfunc (s *shard) mainLoop() {\n\tlogger.Debugf(\"Starting main loop for shard [%d\/%d]\", s.shard+1, cap(s.session.shards))\n\tdefer logger.Debugf(\"Exiting main loop for shard [%d\/%d]\", s.shard+1, cap(s.session.shards))\n\tdefer func() { s.closeConfirmation <- true }()\n\n\theartbeat := time.NewTicker(time.Duration(s.heartbeat) * time.Millisecond)\n\tdefer heartbeat.Stop()\n\tsentHeartBeat := false\n\n\treader := make(chan *receivedFrame, 1)\n\tgo s.readWebSocket(reader)\n\n\tfor {\n\t\tselect {\n\t\tcase <-heartbeat.C:\n\t\t\tif !sentHeartBeat {\n\t\t\t\tgo s.sendFrame(&gatewayFrame{opHeartbeat, s.sequence}, false)\n\t\t\t\tsentHeartBeat = true\n\t\t\t} else {\n\t\t\t\tgo s.disconnect(websocket.ClosePolicyViolation, \"Did not respond to previous heartbeat\")\n\t\t\t}\n\t\tcase <-s.closeMainLoop:\n\t\t\treturn\n\t\tcase frame := <-reader:\n\t\t\tswitch frame.Op {\n\t\t\tcase opHeartbeat:\n\t\t\t\ts.sendFrame(&gatewayFrame{Op: opHeartbeatAck}, false)\n\t\t\tcase opHeartbeatAck:\n\t\t\t\tsentHeartBeat = false\n\t\t\tcase opReconnect:\n\t\t\t\tgo s.disconnect(websocket.CloseNormalClosure, \"op Reconnect\")\n\t\t\tcase opInvalidSession:\n\t\t\t\ts.sessionID = \"\"\n\t\t\t\ts.identify()\n\t\t\tcase opDispatch:\n\t\t\t\ts.session.dispatchEvent(frame)\n\t\t\tdefault:\n\t\t\t\tlogger.Errorf(\"Unexpected opCode received: %d\", frame.Op)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ The secondary goroutine of each shard, responsible for reading frames and putting them in the channel.\nfunc (s *shard) readWebSocket(reader chan *receivedFrame) {\n\tlogger.Debugf(\"Starting read loop for shard [%d\/%d]\", s.shard+1, cap(s.session.shards))\n\tdefer logger.Debugf(\"Exiting read loop for shard [%d\/%d]\", s.shard+1, cap(s.session.shards))\n\tdefer func() { s.closeConfirmation <- true }()\n\n\tfor {\n\t\tframe, err := s.readFrame(false)\n\t\tif err != nil {\n\t\t\tif !s.isShuttingDown {\n\t\t\t\ts.onClose(websocket.CloseAbnormalClosure, err.Error())\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\treader <- frame\n\t}\n\n}\n\n\/\/ Reads 1 frame from the websocket.\nfunc (s *shard) readFrame(isConnecting bool) (*receivedFrame, error) {\n\tif !isConnecting {\n\t\ts.readLock.Lock()\n\t\tdefer s.readLock.Unlock()\n\t}\n\n\tlogger.Tracef(\"Shard.readFrame() called\")\n\tmsgType, msg, err := s.webSocket.ReadMessage()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar reader io.Reader\n\treader = bytes.NewBuffer(msg)\n\n\tif msgType == websocket.BinaryMessage {\n\t\tzReader, err := zlib.NewReader(reader)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdefer zReader.Close()\n\n\t\treader = zReader\n\t}\n\n\tframe := receivedFrame{Sequence: -1}\n\terr = json.NewDecoder(reader).Decode(&frame)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogger.Debugf(\"Received frame with opCode: %d\", frame.Op)\n\n\tif frame.Sequence > s.sequence {\n\t\tlogger.Tracef(\"Last sequence received set to %d.\", frame.Sequence)\n\t\ts.sequence = frame.Sequence\n\t}\n\treturn &frame, nil\n}\n\n\/\/ Sends 1 frame to the websocket\nfunc (s *shard) sendFrame(frame *gatewayFrame, isConnecting bool) {\n\tif !isConnecting {\n\t\ts.writeLock.Lock()\n\t\tdefer s.writeLock.Unlock()\n\t}\n\n\tlogger.Debugf(\"Sending frame with opCode: %d\", frame.Op)\n\ts.webSocket.WriteJSON(frame)\n}\n\n\/\/ Called when we have received a closing intention that we have not initiated (ws close message, recv error)\nfunc (s *shard) onClose(code int, text string) error {\n\tgo func() {\n\t\tlogger.Warnf(\"Received Close Frame from Discord. Code: %d. Text: %s\", code, text)\n\n\t\ts.isShuttingDown = true\n\t\ts.stopRoutines()\n\t\ts.readLock.Lock()\n\t\ts.writeLock.Lock()\n\n\t\tif err := s.webSocket.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(code, text)); err != nil {\n\t\t\tlogger.Error(\"Could not confirm close message.\")\n\t\t\tlogger.ErrorE(err)\n\t\t}\n\n\t\ts.cleanupWebSocket()\n\t}()\n\n\treturn nil\n}\n\n\/\/ Called by us to disconnect the websocket (shutdown, protocol errors, missed heartbeats)\nfunc (s *shard) disconnect(code int, text string) {\n\ts.isShuttingDown = true\n\ts.writeLock.Lock()\n\n\tcloseMessage := make(chan int)\n\ts.webSocket.SetCloseHandler(func(code int, _ string) error {\n\t\tcloseMessage <- code\n\t\treturn nil\n\t})\n\n\tif err := s.webSocket.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(code, text)); err != nil {\n\t\tlogger.Error(\"Could not write close message to Discord, connection already dead?\")\n\t\tlogger.ErrorE(err)\n\t} else {\n\t\tselect {\n\t\tcase code := <-closeMessage:\n\t\t\tlogger.Debugf(\"Discord connection closed with code %d\", code)\n\t\tcase <-time.After(1 * time.Second):\n\t\t\tlogger.Warn(\"Discord did not reply to the close message, force-closing connection.\")\n\t\t}\n\t}\n\n\ts.stopRoutines()\n\ts.readLock.Lock()\n\ts.cleanupWebSocket()\n}\n\n\/\/ Stops the two shard goroutines\nfunc (s *shard) stopRoutines() {\n\ts.webSocket.SetReadDeadline(time.Now()) \/\/ Stop the read loop, in case it hasn't already due to an error\n\t<-s.closeConfirmation \/\/ Wait for the read loop\n\ts.closeMainLoop <- true \/\/ Stop the main loop\n\t<-s.closeConfirmation \/\/ Wait for the main loop\n}\n\n\/\/ Cleans up the current websocket and tries to reconnect it if needed\nfunc (s *shard) cleanupWebSocket() {\n\ts.webSocket.Close()\n\ts.webSocket = nil\n\n\tfor !s.session.isShuttingDown() {\n\t\tif err := s.connect(); err != nil {\n\t\t\tlogger.Error(\"Could not reconnect to Discord.\")\n\t\t\tlogger.ErrorE(err)\n\n\t\t\ttime.Sleep(500 * time.Millisecond)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package xlsx\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n)\n\n\/\/ Sheet is a high level structure intended to provide user access to\n\/\/ the contents of a particular sheet within an XLSX file.\ntype Sheet struct {\n\tName string\n\tFile *File\n\tRows []*Row\n\tCols []*Col\n\tMaxRow int\n\tMaxCol int\n\tHidden bool\n\tSelected bool\n\tSheetViews []SheetView\n\tSheetFormat SheetFormat\n\tAutoFilter *AutoFilter\n}\n\ntype SheetView struct {\n\tPane *Pane\n}\n\ntype Pane struct {\n\tXSplit float64\n\tYSplit float64\n\tTopLeftCell string\n\tActivePane string\n\tState string \/\/ Either \"split\" or \"frozen\"\n}\n\ntype SheetFormat struct {\n\tDefaultColWidth float64\n\tDefaultRowHeight float64\n\tOutlineLevelCol uint8\n\tOutlineLevelRow uint8\n}\n\ntype AutoFilter struct {\n\tTopLeftCell string\n\tBottomRightCell string\n}\n\n\/\/ Add a new Row to a Sheet\nfunc (s *Sheet) AddRow() *Row {\n\trow := &Row{Sheet: s}\n\ts.Rows = append(s.Rows, row)\n\tif len(s.Rows) > s.MaxRow {\n\t\ts.MaxRow = len(s.Rows)\n\t}\n\treturn row\n}\n\n\/\/ Make sure we always have as many Cols as we do cells.\nfunc (s *Sheet) maybeAddCol(cellCount int) {\n\tif cellCount > s.MaxCol {\n\t\tloopCnt := cellCount - s.MaxCol\n\t\tcurrIndex := s.MaxCol + 1\n\t\tfor i := 0; i < loopCnt; i++ {\n\n\t\t\tcol := &Col{\n\t\t\t\tstyle: NewStyle(),\n\t\t\t\tMin: currIndex,\n\t\t\t\tMax: currIndex,\n\t\t\t\tHidden: false,\n\t\t\t\tCollapsed: false}\n\t\t\ts.Cols = append(s.Cols, col)\n\t\t\tcurrIndex++\n\t\t}\n\t\ts.MaxCol = cellCount\n\t}\n}\n\n\/\/ Make sure we always have as many Cols as we do cells.\nfunc (s *Sheet) Col(idx int) *Col {\n\ts.maybeAddCol(idx + 1)\n\treturn s.Cols[idx]\n}\n\n\/\/ Get a Cell by passing it's cartesian coordinates (zero based) as\n\/\/ row and column integer indexes.\n\/\/\n\/\/ For example:\n\/\/\n\/\/ cell := sheet.Cell(0,0)\n\/\/\n\/\/ ... would set the variable \"cell\" to contain a Cell struct\n\/\/ containing the data from the field \"A1\" on the spreadsheet.\nfunc (sh *Sheet) Cell(row, col int) *Cell {\n\n\t\/\/ If the user requests a row beyond what we have, then extend.\n\tfor len(sh.Rows) <= row {\n\t\tsh.AddRow()\n\t}\n\n\tr := sh.Rows[row]\n\tfor len(r.Cells) <= col {\n\t\tr.AddCell()\n\t}\n\n\treturn r.Cells[col]\n}\n\n\/\/Set the width of a single column or multiple columns.\nfunc (s *Sheet) SetColWidth(startcol, endcol int, width float64) error {\n\tif startcol > endcol {\n\t\treturn fmt.Errorf(\"Could not set width for range %d-%d: startcol must be less than endcol.\", startcol, endcol)\n\t}\n\tcol := &Col{\n\t\tstyle: NewStyle(),\n\t\tMin: startcol + 1,\n\t\tMax: endcol + 1,\n\t\tHidden: false,\n\t\tCollapsed: false,\n\t\tWidth: width}\n\ts.Cols = append(s.Cols, col)\n\tif endcol+1 > s.MaxCol {\n\t\ts.MaxCol = endcol + 1\n\t}\n\treturn nil\n}\n\n\/\/ When merging cells, the cell may be the 'original' or the 'covered'.\n\/\/ First, figure out which cells are merge starting points. Then create\n\/\/ the necessary cells underlying the merge area.\n\/\/ Then go through all the underlying cells and apply the appropriate\n\/\/ border, based on the original cell.\nfunc (s *Sheet) handleMerged() {\n\tmerged := make(map[string]*Cell)\n\n\tfor r, row := range s.Rows {\n\t\tfor c, cell := range row.Cells {\n\t\t\tif cell.HMerge > 0 || cell.VMerge > 0 {\n\t\t\t\tcoord := GetCellIDStringFromCoords(c, r)\n\t\t\t\tmerged[coord] = cell\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ This loop iterates over all cells that should be merged and applies the correct\n\t\/\/ borders to them depending on their position. If any cells required by the merge\n\t\/\/ are missing, they will be allocated by s.Cell().\n\tfor key, cell := range merged {\n\t\tmainstyle := cell.GetStyle()\n\n\t\ttop := mainstyle.Border.Top\n\t\tleft := mainstyle.Border.Left\n\t\tright := mainstyle.Border.Right\n\t\tbottom := mainstyle.Border.Bottom\n\n\t\t\/\/ When merging cells, the upper left cell does not maintain\n\t\t\/\/ the original borders\n\t\tmainstyle.Border.Top = \"none\"\n\t\tmainstyle.Border.Left = \"none\"\n\t\tmainstyle.Border.Right = \"none\"\n\t\tmainstyle.Border.Bottom = \"none\"\n\n\t\tmaincol, mainrow, _ := GetCoordsFromCellIDString(key)\n\t\tfor rownum := 0; rownum <= cell.VMerge; rownum++ {\n\t\t\tfor colnum := 0; colnum <= cell.HMerge; colnum++ {\n\t\t\t\ttmpcell := s.Cell(mainrow+rownum, maincol+colnum)\n\t\t\t\tstyle := tmpcell.GetStyle()\n\t\t\t\tstyle.ApplyBorder = true\n\n\t\t\t\tif rownum == 0 {\n\t\t\t\t\tstyle.Border.Top = top\n\t\t\t\t}\n\n\t\t\t\tif rownum == (cell.VMerge) {\n\t\t\t\t\tstyle.Border.Bottom = bottom\n\t\t\t\t}\n\n\t\t\t\tif colnum == 0 {\n\t\t\t\t\tstyle.Border.Left = left\n\t\t\t\t}\n\n\t\t\t\tif colnum == (cell.HMerge) {\n\t\t\t\t\tstyle.Border.Right = right\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Dump sheet to its XML representation, intended for internal use only\nfunc (s *Sheet) makeXLSXSheet(refTable *RefTable, styles *xlsxStyleSheet) *xlsxWorksheet {\n\tworksheet := newXlsxWorksheet()\n\txSheet := xlsxSheetData{}\n\tmaxRow := 0\n\tmaxCell := 0\n\tvar maxLevelCol, maxLevelRow uint8\n\n\t\/\/ Scan through the sheet and see if there are any merged cells. If there\n\t\/\/ are, we may need to extend the size of the sheet. There needs to be\n\t\/\/ phantom cells underlying the area covered by the merged cell\n\ts.handleMerged()\n\n\tfor index, sheetView := range s.SheetViews {\n\t\tif sheetView.Pane != nil {\n\t\t\tworksheet.SheetViews.SheetView[index].Pane = &xlsxPane{\n\t\t\t\tXSplit: sheetView.Pane.XSplit,\n\t\t\t\tYSplit: sheetView.Pane.YSplit,\n\t\t\t\tTopLeftCell: sheetView.Pane.TopLeftCell,\n\t\t\t\tActivePane: sheetView.Pane.ActivePane,\n\t\t\t\tState: sheetView.Pane.State,\n\t\t\t}\n\n\t\t}\n\t}\n\n\tif s.Selected {\n\t\tworksheet.SheetViews.SheetView[0].TabSelected = true\n\t}\n\n\tif s.SheetFormat.DefaultRowHeight != 0 {\n\t\tworksheet.SheetFormatPr.DefaultRowHeight = s.SheetFormat.DefaultRowHeight\n\t}\n\tworksheet.SheetFormatPr.DefaultColWidth = s.SheetFormat.DefaultColWidth\n\n\tcolsXfIdList := make([]int, len(s.Cols))\n\tworksheet.Cols = &xlsxCols{Col: []xlsxCol{}}\n\tfor c, col := range s.Cols {\n\t\tXfId := 0\n\t\tif col.Min == 0 {\n\t\t\tcol.Min = 1\n\t\t}\n\t\tif col.Max == 0 {\n\t\t\tcol.Max = 1\n\t\t}\n\t\tstyle := col.GetStyle()\n\t\t\/\/col's style always not nil\n\t\tif style != nil {\n\t\t\txNumFmt := styles.newNumFmt(col.numFmt)\n\t\t\tXfId = handleStyleForXLSX(style, xNumFmt.NumFmtId, styles)\n\t\t}\n\t\tcolsXfIdList[c] = XfId\n\n\t\tvar customWidth bool\n\t\tif col.Width == 0 {\n\t\t\tcol.Width = ColWidth\n\t\t\tcustomWidth = false\n\n\t\t} else {\n\t\t\tcustomWidth = true\n\t\t}\n\t\tworksheet.Cols.Col = append(worksheet.Cols.Col,\n\t\t\txlsxCol{Min: col.Min,\n\t\t\t\tMax: col.Max,\n\t\t\t\tHidden: col.Hidden,\n\t\t\t\tWidth: col.Width,\n\t\t\t\tCustomWidth: customWidth,\n\t\t\t\tCollapsed: col.Collapsed,\n\t\t\t\tOutlineLevel: col.OutlineLevel,\n\t\t\t\tStyle: XfId,\n\t\t\t})\n\n\t\tif col.OutlineLevel > maxLevelCol {\n\t\t\tmaxLevelCol = col.OutlineLevel\n\t\t}\n\t}\n\n\tfor r, row := range s.Rows {\n\t\tif r > maxRow {\n\t\t\tmaxRow = r\n\t\t}\n\t\txRow := xlsxRow{}\n\t\txRow.R = r + 1\n\t\tif row.isCustom {\n\t\t\txRow.CustomHeight = true\n\t\t\txRow.Ht = fmt.Sprintf(\"%g\", row.Height)\n\t\t}\n\t\txRow.OutlineLevel = row.OutlineLevel\n\t\tif row.OutlineLevel > maxLevelRow {\n\t\t\tmaxLevelRow = row.OutlineLevel\n\t\t}\n\t\tfor c, cell := range row.Cells {\n\t\t\tXfId := colsXfIdList[c]\n\n\t\t\t\/\/ generate NumFmtId and add new NumFmt\n\t\t\txNumFmt := styles.newNumFmt(cell.NumFmt)\n\n\t\t\tstyle := cell.style\n\t\t\tif style != nil {\n\t\t\t\tXfId = handleStyleForXLSX(style, xNumFmt.NumFmtId, styles)\n\t\t\t} else if len(cell.NumFmt) > 0 && !compareFormatString(s.Cols[c].numFmt, cell.NumFmt) {\n\t\t\t\tXfId = handleNumFmtIdForXLSX(xNumFmt.NumFmtId, styles)\n\t\t\t}\n\n\t\t\tif c > maxCell {\n\t\t\t\tmaxCell = c\n\t\t\t}\n\t\t\txC := xlsxC{\n\t\t\t\tS: XfId,\n\t\t\t\tR: GetCellIDStringFromCoords(c, r),\n\t\t\t}\n\t\t\tif cell.formula != \"\" {\n\t\t\t\txC.F = &xlsxF{Content: cell.formula}\n\t\t\t}\n\t\t\tswitch cell.cellType {\n\t\t\tcase CellTypeInline:\n\t\t\t\t\/\/ Inline strings are turned into shared strings since they are more efficient.\n\t\t\t\t\/\/ This is what Excel does as well.\n\t\t\t\tfallthrough\n\t\t\tcase CellTypeString:\n\t\t\t\tif len(cell.Value) > 0 {\n\t\t\t\t\txC.V = strconv.Itoa(refTable.AddString(cell.Value))\n\t\t\t\t}\n\t\t\t\txC.T = \"s\"\n\t\t\tcase CellTypeNumeric:\n\t\t\t\t\/\/ Numeric is the default, so the type can be left blank\n\t\t\t\txC.V = cell.Value\n\t\t\tcase CellTypeBool:\n\t\t\t\txC.V = cell.Value\n\t\t\t\txC.T = \"b\"\n\t\t\tcase CellTypeError:\n\t\t\t\txC.V = cell.Value\n\t\t\t\txC.T = \"e\"\n\t\t\tcase CellTypeDate:\n\t\t\t\txC.V = cell.Value\n\t\t\t\txC.T = \"d\"\n\t\t\tcase CellTypeStringFormula:\n\t\t\t\txC.V = cell.Value\n\t\t\t\txC.T = \"str\"\n\t\t\tdefault:\n\t\t\t\tpanic(errors.New(\"unknown cell type cannot be marshaled\"))\n\t\t\t}\n\n\t\t\txRow.C = append(xRow.C, xC)\n\n\t\t\tif cell.HMerge > 0 || cell.VMerge > 0 {\n\t\t\t\t\/\/ r == rownum, c == colnum\n\t\t\t\tmc := xlsxMergeCell{}\n\t\t\t\tstart := GetCellIDStringFromCoords(c, r)\n\t\t\t\tendCol := c + cell.HMerge\n\t\t\t\tendRow := r + cell.VMerge\n\t\t\t\tend := GetCellIDStringFromCoords(endCol, endRow)\n\t\t\t\tmc.Ref = start + \":\" + end\n\t\t\t\tif worksheet.MergeCells == nil {\n\t\t\t\t\tworksheet.MergeCells = &xlsxMergeCells{}\n\t\t\t\t}\n\t\t\t\tworksheet.MergeCells.Cells = append(worksheet.MergeCells.Cells, mc)\n\t\t\t}\n\t\t}\n\t\txSheet.Row = append(xSheet.Row, xRow)\n\t}\n\n\t\/\/ Update sheet format with the freshly determined max levels\n\ts.SheetFormat.OutlineLevelCol = maxLevelCol\n\ts.SheetFormat.OutlineLevelRow = maxLevelRow\n\t\/\/ .. and then also apply this to the xml worksheet\n\tworksheet.SheetFormatPr.OutlineLevelCol = s.SheetFormat.OutlineLevelCol\n\tworksheet.SheetFormatPr.OutlineLevelRow = s.SheetFormat.OutlineLevelRow\n\n\tif worksheet.MergeCells != nil {\n\t\tworksheet.MergeCells.Count = len(worksheet.MergeCells.Cells)\n\t}\n\n\tif s.AutoFilter != nil {\n\t\tworksheet.AutoFilter = &xlsxAutoFilter{Ref: fmt.Sprintf(\"%v:%v\", s.AutoFilter.TopLeftCell, s.AutoFilter.BottomRightCell)}\n\t}\n\n\tworksheet.SheetData = xSheet\n\tdimension := xlsxDimension{}\n\tdimension.Ref = \"A1:\" + GetCellIDStringFromCoords(maxCell, maxRow)\n\tif dimension.Ref == \"A1:A1\" {\n\t\tdimension.Ref = \"A1\"\n\t}\n\tworksheet.Dimension = dimension\n\treturn worksheet\n}\n\nfunc handleStyleForXLSX(style *Style, NumFmtId int, styles *xlsxStyleSheet) (XfId int) {\n\txFont, xFill, xBorder, xCellXf := style.makeXLSXStyleElements()\n\tfontId := styles.addFont(xFont)\n\tfillId := styles.addFill(xFill)\n\n\t\/\/ HACK - adding light grey fill, as in OO and Google\n\tgreyfill := xlsxFill{}\n\tgreyfill.PatternFill.PatternType = \"lightGray\"\n\tstyles.addFill(greyfill)\n\n\tborderId := styles.addBorder(xBorder)\n\txCellXf.FontId = fontId\n\txCellXf.FillId = fillId\n\txCellXf.BorderId = borderId\n\txCellXf.NumFmtId = NumFmtId\n\t\/\/ apply the numFmtId when it is not the default cellxf\n\tif xCellXf.NumFmtId > 0 {\n\t\txCellXf.ApplyNumberFormat = true\n\t}\n\n\txCellXf.Alignment.Horizontal = style.Alignment.Horizontal\n\txCellXf.Alignment.Indent = style.Alignment.Indent\n\txCellXf.Alignment.ShrinkToFit = style.Alignment.ShrinkToFit\n\txCellXf.Alignment.TextRotation = style.Alignment.TextRotation\n\txCellXf.Alignment.Vertical = style.Alignment.Vertical\n\txCellXf.Alignment.WrapText = style.Alignment.WrapText\n\n\tXfId = styles.addCellXf(xCellXf)\n\treturn\n}\n\nfunc handleNumFmtIdForXLSX(NumFmtId int, styles *xlsxStyleSheet) (XfId int) {\n\txCellXf := makeXLSXCellElement()\n\txCellXf.NumFmtId = NumFmtId\n\tif xCellXf.NumFmtId > 0 {\n\t\txCellXf.ApplyNumberFormat = true\n\t}\n\tXfId = styles.addCellXf(xCellXf)\n\treturn\n}\n<commit_msg>SetColWidth index out of range issue #268<commit_after>package xlsx\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n)\n\n\/\/ Sheet is a high level structure intended to provide user access to\n\/\/ the contents of a particular sheet within an XLSX file.\ntype Sheet struct {\n\tName string\n\tFile *File\n\tRows []*Row\n\tCols []*Col\n\tMaxRow int\n\tMaxCol int\n\tHidden bool\n\tSelected bool\n\tSheetViews []SheetView\n\tSheetFormat SheetFormat\n\tAutoFilter *AutoFilter\n}\n\ntype SheetView struct {\n\tPane *Pane\n}\n\ntype Pane struct {\n\tXSplit float64\n\tYSplit float64\n\tTopLeftCell string\n\tActivePane string\n\tState string \/\/ Either \"split\" or \"frozen\"\n}\n\ntype SheetFormat struct {\n\tDefaultColWidth float64\n\tDefaultRowHeight float64\n\tOutlineLevelCol uint8\n\tOutlineLevelRow uint8\n}\n\ntype AutoFilter struct {\n\tTopLeftCell string\n\tBottomRightCell string\n}\n\n\/\/ Add a new Row to a Sheet\nfunc (s *Sheet) AddRow() *Row {\n\trow := &Row{Sheet: s}\n\ts.Rows = append(s.Rows, row)\n\tif len(s.Rows) > s.MaxRow {\n\t\ts.MaxRow = len(s.Rows)\n\t}\n\treturn row\n}\n\n\/\/ Make sure we always have as many Cols as we do cells.\nfunc (s *Sheet) maybeAddCol(cellCount int) {\n\tif cellCount > s.MaxCol {\n\t\tloopCnt := cellCount - s.MaxCol\n\t\tcurrIndex := s.MaxCol + 1\n\t\tfor i := 0; i < loopCnt; i++ {\n\n\t\t\tcol := &Col{\n\t\t\t\tstyle: NewStyle(),\n\t\t\t\tMin: currIndex,\n\t\t\t\tMax: currIndex,\n\t\t\t\tHidden: false,\n\t\t\t\tCollapsed: false}\n\t\t\ts.Cols = append(s.Cols, col)\n\t\t\tcurrIndex++\n\t\t}\n\t\ts.MaxCol = cellCount\n\t}\n}\n\n\/\/ Make sure we always have as many Cols as we do cells.\nfunc (s *Sheet) Col(idx int) *Col {\n\ts.maybeAddCol(idx + 1)\n\treturn s.Cols[idx]\n}\n\n\/\/ Get a Cell by passing it's cartesian coordinates (zero based) as\n\/\/ row and column integer indexes.\n\/\/\n\/\/ For example:\n\/\/\n\/\/ cell := sheet.Cell(0,0)\n\/\/\n\/\/ ... would set the variable \"cell\" to contain a Cell struct\n\/\/ containing the data from the field \"A1\" on the spreadsheet.\nfunc (sh *Sheet) Cell(row, col int) *Cell {\n\n\t\/\/ If the user requests a row beyond what we have, then extend.\n\tfor len(sh.Rows) <= row {\n\t\tsh.AddRow()\n\t}\n\n\tr := sh.Rows[row]\n\tfor len(r.Cells) <= col {\n\t\tr.AddCell()\n\t}\n\n\treturn r.Cells[col]\n}\n\n\/\/Set the width of a single column or multiple columns.\nfunc (s *Sheet) SetColWidth(startcol, endcol int, width float64) error {\n\tif startcol > endcol {\n\t\treturn fmt.Errorf(\"Could not set width for range %d-%d: startcol must be less than endcol.\", startcol, endcol)\n\t}\n\ts.maybeAddCol(endcol + 1)\n\tfor ; startcol <= endcol; startcol++ {\n\t\ts.Cols[startcol].Width = width\n\t}\n\n\treturn nil\n}\n\n\/\/ When merging cells, the cell may be the 'original' or the 'covered'.\n\/\/ First, figure out which cells are merge starting points. Then create\n\/\/ the necessary cells underlying the merge area.\n\/\/ Then go through all the underlying cells and apply the appropriate\n\/\/ border, based on the original cell.\nfunc (s *Sheet) handleMerged() {\n\tmerged := make(map[string]*Cell)\n\n\tfor r, row := range s.Rows {\n\t\tfor c, cell := range row.Cells {\n\t\t\tif cell.HMerge > 0 || cell.VMerge > 0 {\n\t\t\t\tcoord := GetCellIDStringFromCoords(c, r)\n\t\t\t\tmerged[coord] = cell\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ This loop iterates over all cells that should be merged and applies the correct\n\t\/\/ borders to them depending on their position. If any cells required by the merge\n\t\/\/ are missing, they will be allocated by s.Cell().\n\tfor key, cell := range merged {\n\t\tmainstyle := cell.GetStyle()\n\n\t\ttop := mainstyle.Border.Top\n\t\tleft := mainstyle.Border.Left\n\t\tright := mainstyle.Border.Right\n\t\tbottom := mainstyle.Border.Bottom\n\n\t\t\/\/ When merging cells, the upper left cell does not maintain\n\t\t\/\/ the original borders\n\t\tmainstyle.Border.Top = \"none\"\n\t\tmainstyle.Border.Left = \"none\"\n\t\tmainstyle.Border.Right = \"none\"\n\t\tmainstyle.Border.Bottom = \"none\"\n\n\t\tmaincol, mainrow, _ := GetCoordsFromCellIDString(key)\n\t\tfor rownum := 0; rownum <= cell.VMerge; rownum++ {\n\t\t\tfor colnum := 0; colnum <= cell.HMerge; colnum++ {\n\t\t\t\ttmpcell := s.Cell(mainrow+rownum, maincol+colnum)\n\t\t\t\tstyle := tmpcell.GetStyle()\n\t\t\t\tstyle.ApplyBorder = true\n\n\t\t\t\tif rownum == 0 {\n\t\t\t\t\tstyle.Border.Top = top\n\t\t\t\t}\n\n\t\t\t\tif rownum == (cell.VMerge) {\n\t\t\t\t\tstyle.Border.Bottom = bottom\n\t\t\t\t}\n\n\t\t\t\tif colnum == 0 {\n\t\t\t\t\tstyle.Border.Left = left\n\t\t\t\t}\n\n\t\t\t\tif colnum == (cell.HMerge) {\n\t\t\t\t\tstyle.Border.Right = right\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Dump sheet to its XML representation, intended for internal use only\nfunc (s *Sheet) makeXLSXSheet(refTable *RefTable, styles *xlsxStyleSheet) *xlsxWorksheet {\n\tworksheet := newXlsxWorksheet()\n\txSheet := xlsxSheetData{}\n\tmaxRow := 0\n\tmaxCell := 0\n\tvar maxLevelCol, maxLevelRow uint8\n\n\t\/\/ Scan through the sheet and see if there are any merged cells. If there\n\t\/\/ are, we may need to extend the size of the sheet. There needs to be\n\t\/\/ phantom cells underlying the area covered by the merged cell\n\ts.handleMerged()\n\n\tfor index, sheetView := range s.SheetViews {\n\t\tif sheetView.Pane != nil {\n\t\t\tworksheet.SheetViews.SheetView[index].Pane = &xlsxPane{\n\t\t\t\tXSplit: sheetView.Pane.XSplit,\n\t\t\t\tYSplit: sheetView.Pane.YSplit,\n\t\t\t\tTopLeftCell: sheetView.Pane.TopLeftCell,\n\t\t\t\tActivePane: sheetView.Pane.ActivePane,\n\t\t\t\tState: sheetView.Pane.State,\n\t\t\t}\n\n\t\t}\n\t}\n\n\tif s.Selected {\n\t\tworksheet.SheetViews.SheetView[0].TabSelected = true\n\t}\n\n\tif s.SheetFormat.DefaultRowHeight != 0 {\n\t\tworksheet.SheetFormatPr.DefaultRowHeight = s.SheetFormat.DefaultRowHeight\n\t}\n\tworksheet.SheetFormatPr.DefaultColWidth = s.SheetFormat.DefaultColWidth\n\n\tcolsXfIdList := make([]int, len(s.Cols))\n\tworksheet.Cols = &xlsxCols{Col: []xlsxCol{}}\n\tfor c, col := range s.Cols {\n\t\tXfId := 0\n\t\tif col.Min == 0 {\n\t\t\tcol.Min = 1\n\t\t}\n\t\tif col.Max == 0 {\n\t\t\tcol.Max = 1\n\t\t}\n\t\tstyle := col.GetStyle()\n\t\t\/\/col's style always not nil\n\t\tif style != nil {\n\t\t\txNumFmt := styles.newNumFmt(col.numFmt)\n\t\t\tXfId = handleStyleForXLSX(style, xNumFmt.NumFmtId, styles)\n\t\t}\n\t\tcolsXfIdList[c] = XfId\n\n\t\tvar customWidth bool\n\t\tif col.Width == 0 {\n\t\t\tcol.Width = ColWidth\n\t\t\tcustomWidth = false\n\n\t\t} else {\n\t\t\tcustomWidth = true\n\t\t}\n\t\tworksheet.Cols.Col = append(worksheet.Cols.Col,\n\t\t\txlsxCol{Min: col.Min,\n\t\t\t\tMax: col.Max,\n\t\t\t\tHidden: col.Hidden,\n\t\t\t\tWidth: col.Width,\n\t\t\t\tCustomWidth: customWidth,\n\t\t\t\tCollapsed: col.Collapsed,\n\t\t\t\tOutlineLevel: col.OutlineLevel,\n\t\t\t\tStyle: XfId,\n\t\t\t})\n\n\t\tif col.OutlineLevel > maxLevelCol {\n\t\t\tmaxLevelCol = col.OutlineLevel\n\t\t}\n\t}\n\n\tfor r, row := range s.Rows {\n\t\tif r > maxRow {\n\t\t\tmaxRow = r\n\t\t}\n\t\txRow := xlsxRow{}\n\t\txRow.R = r + 1\n\t\tif row.isCustom {\n\t\t\txRow.CustomHeight = true\n\t\t\txRow.Ht = fmt.Sprintf(\"%g\", row.Height)\n\t\t}\n\t\txRow.OutlineLevel = row.OutlineLevel\n\t\tif row.OutlineLevel > maxLevelRow {\n\t\t\tmaxLevelRow = row.OutlineLevel\n\t\t}\n\t\tfor c, cell := range row.Cells {\n\t\t\tXfId := colsXfIdList[c]\n\n\t\t\t\/\/ generate NumFmtId and add new NumFmt\n\t\t\txNumFmt := styles.newNumFmt(cell.NumFmt)\n\n\t\t\tstyle := cell.style\n\t\t\tif style != nil {\n\t\t\t\tXfId = handleStyleForXLSX(style, xNumFmt.NumFmtId, styles)\n\t\t\t} else if len(cell.NumFmt) > 0 && !compareFormatString(s.Cols[c].numFmt, cell.NumFmt) {\n\t\t\t\tXfId = handleNumFmtIdForXLSX(xNumFmt.NumFmtId, styles)\n\t\t\t}\n\n\t\t\tif c > maxCell {\n\t\t\t\tmaxCell = c\n\t\t\t}\n\t\t\txC := xlsxC{\n\t\t\t\tS: XfId,\n\t\t\t\tR: GetCellIDStringFromCoords(c, r),\n\t\t\t}\n\t\t\tif cell.formula != \"\" {\n\t\t\t\txC.F = &xlsxF{Content: cell.formula}\n\t\t\t}\n\t\t\tswitch cell.cellType {\n\t\t\tcase CellTypeInline:\n\t\t\t\t\/\/ Inline strings are turned into shared strings since they are more efficient.\n\t\t\t\t\/\/ This is what Excel does as well.\n\t\t\t\tfallthrough\n\t\t\tcase CellTypeString:\n\t\t\t\tif len(cell.Value) > 0 {\n\t\t\t\t\txC.V = strconv.Itoa(refTable.AddString(cell.Value))\n\t\t\t\t}\n\t\t\t\txC.T = \"s\"\n\t\t\tcase CellTypeNumeric:\n\t\t\t\t\/\/ Numeric is the default, so the type can be left blank\n\t\t\t\txC.V = cell.Value\n\t\t\tcase CellTypeBool:\n\t\t\t\txC.V = cell.Value\n\t\t\t\txC.T = \"b\"\n\t\t\tcase CellTypeError:\n\t\t\t\txC.V = cell.Value\n\t\t\t\txC.T = \"e\"\n\t\t\tcase CellTypeDate:\n\t\t\t\txC.V = cell.Value\n\t\t\t\txC.T = \"d\"\n\t\t\tcase CellTypeStringFormula:\n\t\t\t\txC.V = cell.Value\n\t\t\t\txC.T = \"str\"\n\t\t\tdefault:\n\t\t\t\tpanic(errors.New(\"unknown cell type cannot be marshaled\"))\n\t\t\t}\n\n\t\t\txRow.C = append(xRow.C, xC)\n\n\t\t\tif cell.HMerge > 0 || cell.VMerge > 0 {\n\t\t\t\t\/\/ r == rownum, c == colnum\n\t\t\t\tmc := xlsxMergeCell{}\n\t\t\t\tstart := GetCellIDStringFromCoords(c, r)\n\t\t\t\tendCol := c + cell.HMerge\n\t\t\t\tendRow := r + cell.VMerge\n\t\t\t\tend := GetCellIDStringFromCoords(endCol, endRow)\n\t\t\t\tmc.Ref = start + \":\" + end\n\t\t\t\tif worksheet.MergeCells == nil {\n\t\t\t\t\tworksheet.MergeCells = &xlsxMergeCells{}\n\t\t\t\t}\n\t\t\t\tworksheet.MergeCells.Cells = append(worksheet.MergeCells.Cells, mc)\n\t\t\t}\n\t\t}\n\t\txSheet.Row = append(xSheet.Row, xRow)\n\t}\n\n\t\/\/ Update sheet format with the freshly determined max levels\n\ts.SheetFormat.OutlineLevelCol = maxLevelCol\n\ts.SheetFormat.OutlineLevelRow = maxLevelRow\n\t\/\/ .. and then also apply this to the xml worksheet\n\tworksheet.SheetFormatPr.OutlineLevelCol = s.SheetFormat.OutlineLevelCol\n\tworksheet.SheetFormatPr.OutlineLevelRow = s.SheetFormat.OutlineLevelRow\n\n\tif worksheet.MergeCells != nil {\n\t\tworksheet.MergeCells.Count = len(worksheet.MergeCells.Cells)\n\t}\n\n\tif s.AutoFilter != nil {\n\t\tworksheet.AutoFilter = &xlsxAutoFilter{Ref: fmt.Sprintf(\"%v:%v\", s.AutoFilter.TopLeftCell, s.AutoFilter.BottomRightCell)}\n\t}\n\n\tworksheet.SheetData = xSheet\n\tdimension := xlsxDimension{}\n\tdimension.Ref = \"A1:\" + GetCellIDStringFromCoords(maxCell, maxRow)\n\tif dimension.Ref == \"A1:A1\" {\n\t\tdimension.Ref = \"A1\"\n\t}\n\tworksheet.Dimension = dimension\n\treturn worksheet\n}\n\nfunc handleStyleForXLSX(style *Style, NumFmtId int, styles *xlsxStyleSheet) (XfId int) {\n\txFont, xFill, xBorder, xCellXf := style.makeXLSXStyleElements()\n\tfontId := styles.addFont(xFont)\n\tfillId := styles.addFill(xFill)\n\n\t\/\/ HACK - adding light grey fill, as in OO and Google\n\tgreyfill := xlsxFill{}\n\tgreyfill.PatternFill.PatternType = \"lightGray\"\n\tstyles.addFill(greyfill)\n\n\tborderId := styles.addBorder(xBorder)\n\txCellXf.FontId = fontId\n\txCellXf.FillId = fillId\n\txCellXf.BorderId = borderId\n\txCellXf.NumFmtId = NumFmtId\n\t\/\/ apply the numFmtId when it is not the default cellxf\n\tif xCellXf.NumFmtId > 0 {\n\t\txCellXf.ApplyNumberFormat = true\n\t}\n\n\txCellXf.Alignment.Horizontal = style.Alignment.Horizontal\n\txCellXf.Alignment.Indent = style.Alignment.Indent\n\txCellXf.Alignment.ShrinkToFit = style.Alignment.ShrinkToFit\n\txCellXf.Alignment.TextRotation = style.Alignment.TextRotation\n\txCellXf.Alignment.Vertical = style.Alignment.Vertical\n\txCellXf.Alignment.WrapText = style.Alignment.WrapText\n\n\tXfId = styles.addCellXf(xCellXf)\n\treturn\n}\n\nfunc handleNumFmtIdForXLSX(NumFmtId int, styles *xlsxStyleSheet) (XfId int) {\n\txCellXf := makeXLSXCellElement()\n\txCellXf.NumFmtId = NumFmtId\n\tif xCellXf.NumFmtId > 0 {\n\t\txCellXf.ApplyNumberFormat = true\n\t}\n\tXfId = styles.addCellXf(xCellXf)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package scene\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\n\t\"golang.org\/x\/mobile\/asset\"\n\n\t\"github.com\/pankona\/gomo-simra\/simra\"\n)\n\nconst (\n\t\/\/ ScreenWidth is screen width\n\tScreenWidth = 1080 \/ 2\n\t\/\/ ScreenHeight is screen height\n\tScreenHeight = 1920 \/ 2\n)\n\n\/\/ Title represents a scene object for Title\ntype Title struct {\n\tsimra simra.Simraer\n\taudio simra.Audioer\n\tisPlaying bool\n}\n\n\/\/ Initialize initializes title scene\n\/\/ This is called from simra.\n\/\/ simra.SetDesiredScreenSize should be called to determine\n\/\/ screen size of this scene.\nfunc (title *Title) Initialize(sim simra.Simraer) {\n\ttitle.simra = sim\n\ttitle.simra.SetDesiredScreenSize(ScreenWidth, ScreenHeight)\n\t\/\/ initialize sprites\n\ttitle.initialize()\n}\n\nfunc (title *Title) initialize() {\n\n\ttitle.simra.AddTouchListener(title)\n\n\tsprite := title.simra.NewSprite()\n\tsprite.SetScale(ScreenWidth, 80)\n\tsprite.SetPosition(ScreenWidth\/2, ScreenHeight\/2)\n\ttitle.simra.AddSprite(sprite)\n\ts := sprite.GetScale()\n\ttex := title.simra.NewTextTexture(\"tap to play sound\",\n\t\t60, color.RGBA{255, 0, 0, 255}, image.Rect(0, 0, s.W, s.H))\n\tsprite.ReplaceTexture(tex)\n\n}\n\n\/\/ Drive is called from simra.\n\/\/ This is used to update sprites position.\n\/\/ Thsi will be called 60 times per sec.\nfunc (title *Title) Drive() {\n}\n\n\/\/ OnTouchBegin is called when Title scene is Touched.\nfunc (title *Title) OnTouchBegin(x, y float32) {\n}\n\n\/\/ OnTouchMove is called when Title scene is Touched and moved.\nfunc (title *Title) OnTouchMove(x, y float32) {\n}\n\n\/\/ OnTouchEnd is called when Title scene is Touched and it is released.\nfunc (title *Title) OnTouchEnd(x, y float32) {\n\tif title.isPlaying {\n\t\terr := title.audio.Stop()\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\ttitle.isPlaying = false\n\n\t} else {\n\t\ttitle.audio = simra.NewAudio()\n\t\tresource, err := asset.Open(\"test_se.mp3\")\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\n\t\terr = title.audio.Play(resource, true, func(err error) {\n\t\t\ttitle.isPlaying = false\n\t\t})\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\ttitle.isPlaying = true\n\t}\n}\n<commit_msg>use short receiver name for audio<commit_after>package scene\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\n\t\"golang.org\/x\/mobile\/asset\"\n\n\t\"github.com\/pankona\/gomo-simra\/simra\"\n)\n\nconst (\n\t\/\/ ScreenWidth is screen width\n\tScreenWidth = 1080 \/ 2\n\t\/\/ ScreenHeight is screen height\n\tScreenHeight = 1920 \/ 2\n)\n\n\/\/ Title represents a scene object for Title\ntype Title struct {\n\tsimra simra.Simraer\n\taudio simra.Audioer\n\tisPlaying bool\n}\n\n\/\/ Initialize initializes title scene\n\/\/ This is called from simra.\n\/\/ simra.SetDesiredScreenSize should be called to determine\n\/\/ screen size of this scene.\nfunc (t *Title) Initialize(sim simra.Simraer) {\n\tt.simra = sim\n\tt.simra.SetDesiredScreenSize(ScreenWidth, ScreenHeight)\n\t\/\/ initialize sprites\n\tt.initialize()\n}\n\nfunc (t *Title) initialize() {\n\n\tt.simra.AddTouchListener(t)\n\n\tsprite := t.simra.NewSprite()\n\tsprite.SetScale(ScreenWidth, 80)\n\tsprite.SetPosition(ScreenWidth\/2, ScreenHeight\/2)\n\tt.simra.AddSprite(sprite)\n\ts := sprite.GetScale()\n\ttex := t.simra.NewTextTexture(\"tap to play sound\",\n\t\t60, color.RGBA{255, 0, 0, 255}, image.Rect(0, 0, s.W, s.H))\n\tsprite.ReplaceTexture(tex)\n\n}\n\n\/\/ Drive is called from simra.\n\/\/ This is used to update sprites position.\n\/\/ Thsi will be called 60 times per sec.\nfunc (t *Title) Drive() {\n}\n\n\/\/ OnTouchBegin is called when Title scene is Touched.\nfunc (t *Title) OnTouchBegin(x, y float32) {\n}\n\n\/\/ OnTouchMove is called when Title scene is Touched and moved.\nfunc (t *Title) OnTouchMove(x, y float32) {\n}\n\n\/\/ OnTouchEnd is called when Title scene is Touched and it is released.\nfunc (t *Title) OnTouchEnd(x, y float32) {\n\tif t.isPlaying {\n\t\terr := t.audio.Stop()\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\tt.isPlaying = false\n\n\t} else {\n\t\tt.audio = simra.NewAudio()\n\t\tresource, err := asset.Open(\"test_se.mp3\")\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\n\t\terr = t.audio.Play(resource, true, func(err error) {\n\t\t\tt.isPlaying = false\n\t\t})\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tt.isPlaying = true\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package wc\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"os\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"unicode\/utf8\"\n)\n\ntype Counter struct {\n\tf *os.File\n\n\tMultibytes uint64\n\tBytes uint64\n\tLines uint64\n\tWords uint64\n}\n\nvar BufferSize = 1 << 20\n\nfunc init() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n}\n\nfunc NewCounter(f io.Reader) *Counter {\n\treturn &Counter{\n\t\tf: f,\n\t}\n}\n\nfunc (c *Counter) Count(count_multibytes, count_bytes, count_lines, count_words bool) error {\n\n\tworkers, wg := runtime.NumCPU()*2, new(sync.WaitGroup)\n\n\twg.Add(workers)\n\n\tbuffers := make(chan []byte, workers<<3)\n\n\tfor i := 0; i < workers; i++ {\n\t\tgo func() {\n\n\t\t\tvar multibyte_ct, byte_ct, line_ct, word_ct uint64\n\n\t\t\tfor buf := range buffers {\n\n\t\t\t\tif count_multibytes {\n\t\t\t\t\tmultibyte_ct += uint64(utf8.RuneCount(buf))\n\t\t\t\t}\n\t\t\t\tif count_bytes {\n\t\t\t\t\tbyte_ct += uint64(len(buf))\n\t\t\t\t}\n\n\t\t\t\tif count_words {\n\t\t\t\t\tvar in_word bool\n\t\t\t\t\tfor _, c := range buf {\n\t\t\t\t\t\tif isSpace(c) {\n\t\t\t\t\t\t\tif c == '\\n' {\n\t\t\t\t\t\t\t\tline_ct++\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tin_word = false\n\t\t\t\t\t\t} else if !in_word {\n\t\t\t\t\t\t\tword_ct++\n\t\t\t\t\t\t\tin_word = true\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if count_lines {\n\t\t\t\t\tfor _, c := range buf {\n\t\t\t\t\t\tif c == '\\n' {\n\t\t\t\t\t\t\tline_ct++\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tatomic.AddUint64(&c.Multibytes, multibyte_ct)\n\t\t\tatomic.AddUint64(&c.Lines, line_ct)\n\t\t\tatomic.AddUint64(&c.Words, word_ct)\n\t\t\tatomic.AddUint64(&c.Bytes, byte_ct)\n\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\tbr := bufio.NewReaderSize(c.f, BufferSize)\n\n\tfor {\n\t\tbuf := make([]byte, BufferSize)\n\n\t\tn, err := br.Read(buf)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn err\n\t\t}\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\t\tbuf = buf[:n]\n\n\t\t\/\/ read to a newline to prevent\n\t\t\/\/ spanning a word across two buffers\n\t\tif count_words && buf[len(buf)-1] != '\\n' {\n\t\t\textra, err := br.ReadBytes('\\n')\n\t\t\tif err != nil && err != io.EOF {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbuf = append(buf, extra...)\n\t\t}\n\n\t\tbuffers <- buf\n\t}\n\n\tclose(buffers)\n\twg.Wait()\n\n\treturn nil\n\n}\n\n\/\/ isspace() man page spec\n\/\/ http:\/\/linux.die.net\/man\/3\/isspace\nfunc isSpace(c byte) bool {\n\treturn c == ' ' || c == '\\n' || c == '\\r' || c == '\\t' || c == '\\v' || c == '\\f'\n}\n<commit_msg>better buffer size and fixing io.Reader bug<commit_after>package wc\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"os\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"unicode\/utf8\"\n)\n\ntype Counter struct {\n\tf *os.File\n\n\tMultibytes uint64\n\tBytes uint64\n\tLines uint64\n\tWords uint64\n}\n\nvar BufferSize = 4 << 20\n\nfunc init() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n}\n\nfunc NewCounter(f *os.File) *Counter {\n\treturn &Counter{\n\t\tf: f,\n\t}\n}\n\nfunc (c *Counter) Count(count_multibytes, count_bytes, count_lines, count_words bool) error {\n\n\tworkers, wg := runtime.NumCPU()*2, new(sync.WaitGroup)\n\n\twg.Add(workers)\n\n\tbuffers := make(chan []byte, workers<<3)\n\n\tfor i := 0; i < workers; i++ {\n\t\tgo func() {\n\n\t\t\tvar multibyte_ct, byte_ct, line_ct, word_ct uint64\n\n\t\t\tfor buf := range buffers {\n\n\t\t\t\tif count_multibytes {\n\t\t\t\t\tmultibyte_ct += uint64(utf8.RuneCount(buf))\n\t\t\t\t}\n\t\t\t\tif count_bytes {\n\t\t\t\t\tbyte_ct += uint64(len(buf))\n\t\t\t\t}\n\n\t\t\t\tif count_words {\n\t\t\t\t\tvar in_word bool\n\t\t\t\t\tfor _, c := range buf {\n\t\t\t\t\t\tif isSpace(c) {\n\t\t\t\t\t\t\tif c == '\\n' {\n\t\t\t\t\t\t\t\tline_ct++\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tin_word = false\n\t\t\t\t\t\t} else if !in_word {\n\t\t\t\t\t\t\tword_ct++\n\t\t\t\t\t\t\tin_word = true\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if count_lines {\n\t\t\t\t\tfor _, c := range buf {\n\t\t\t\t\t\tif c == '\\n' {\n\t\t\t\t\t\t\tline_ct++\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tatomic.AddUint64(&c.Multibytes, multibyte_ct)\n\t\t\tatomic.AddUint64(&c.Lines, line_ct)\n\t\t\tatomic.AddUint64(&c.Words, word_ct)\n\t\t\tatomic.AddUint64(&c.Bytes, byte_ct)\n\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\tbr := bufio.NewReaderSize(c.f, BufferSize)\n\n\tfor {\n\t\tbuf := make([]byte, BufferSize)\n\n\t\tn, err := br.Read(buf)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn err\n\t\t}\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\t\tbuf = buf[:n]\n\n\t\t\/\/ read to a newline to prevent\n\t\t\/\/ spanning a word across two buffers\n\t\tif count_words && buf[len(buf)-1] != '\\n' {\n\t\t\textra, err := br.ReadBytes('\\n')\n\t\t\tif err != nil && err != io.EOF {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbuf = append(buf, extra...)\n\t\t}\n\n\t\tbuffers <- buf\n\t}\n\n\tclose(buffers)\n\twg.Wait()\n\n\treturn nil\n\n}\n\n\/\/ isspace() man page spec\n\/\/ http:\/\/linux.die.net\/man\/3\/isspace\nfunc isSpace(c byte) bool {\n\treturn c == ' ' || c == '\\n' || c == '\\r' || c == '\\t' || c == '\\v' || c == '\\f'\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Hardik Bagdi <hbagdi1@binghamton.edu>\n\/\/\n\/\/ MIT License\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining\n\/\/ a copy of this software and associated documentation files (the\n\/\/ \"Software\"), to deal in the Software without restriction, including\n\/\/ without limitation the rights to use, copy, modify, merge, publish,\n\/\/ distribute, sublicense, and\/or sell copies of the Software, and to\n\/\/ permit persons to whom the Software is furnished to do so, subject to\n\/\/ the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be\n\/\/ included in all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n\/\/ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n\/\/ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n\/\/ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n\/\/ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n\/\/ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n\/\/ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\npackage unsplash\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ Response has pagination information whenever applicable\ntype Response struct {\n\thttpResponse *http.Response\n\tHasNextPage bool\n\tbody *[]byte\n\tFirstPage, LastPage, NextPage, PrevPage int\n\terr error\n\tRateLimit int\n\tRateLimitRemaining int\n}\n\nfunc (r *Response) checkForErrors() error {\n\tswitch r.httpResponse.StatusCode {\n\n\tcase 401:\n\t\treturn &AuthorizationError{ErrString: \"401: Unauthorized request\"}\n\n\tcase 403:\n\t\tif r.RateLimitRemaining == 0 {\n\t\t\treturn &RateLimitError{ErrString: \"403: Rate limit exhausted\"}\n\t\t}\n\n\t\treturn &AuthorizationError{ErrString: \"403: Access forbidden request\"}\n\n\tcase 404:\n\t\treturn &NotFoundError{ErrString: \"404: The cat got tired of the Laser\"}\n\n\t}\n\treturn nil\n}\n\nfunc newResponse(r *http.Response) (*Response, error) {\n\tif nil == r {\n\t\treturn nil,\n\t\t\t&IllegalArgumentError{ErrString: \"*http.Response cannot be null\"}\n\t}\n\tresp := new(Response)\n\tresp.httpResponse = r\n\t\/\/populate first\n\tresp.populatePagingInfo()\n\tresp.populateRateLimits()\n\t\/\/now check for errors\n\terr := resp.checkForErrors()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/looks good, read the response\n\tbuf, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp.body = &buf\n\treturn resp, nil\n}\n\nfunc (r *Response) populateRateLimits() {\n\t\/\/fails silently\n\tmaxLimit, ok := r.httpResponse.Header[\"X-Ratelimit-Limit\"]\n\tif ok && len(maxLimit) == 1 {\n\t\tr.RateLimit, _ = strconv.Atoi(maxLimit[0])\n\t}\n\trateRemaining, ok := r.httpResponse.Header[\"X-Ratelimit-Remaining\"]\n\tif ok && len(rateRemaining) == 1 {\n\t\tr.RateLimitRemaining, _ = strconv.Atoi(rateRemaining[0])\n\t}\n}\n\nfunc (r *Response) populatePagingInfo() {\n\t\/\/fails silently\n\trawLinks, ok := r.httpResponse.Header[\"Link\"]\n\tif !ok || 0 == len(rawLinks) {\n\t\treturn\n\t}\n\n\tlinks := strings.Split(rawLinks[0], \",\") \/\/TODO why is Headers returning []string?\n\n\tfor _, link := range links {\n\t\tparts := strings.Split(link, \";\")\n\t\tif !strings.Contains(parts[0], \"page\") && !strings.Contains(parts[1], \"rel=\") {\n\t\t\tcontinue\n\t\t}\n\t\thref := parts[0]\n\t\t\/\/strip out '<' and '>'\n\t\thref = href[strings.Index(href, \"<\")+1 : strings.Index(href, \">\")]\n\t\turl, err := url.Parse(href)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tpageString := url.Query().Get(\"page\")\n\t\tpageNumber, err := strconv.Atoi(string(pageString))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch strings.TrimSpace(parts[1]) {\n\t\tcase `rel=\"first\"`:\n\t\t\tr.FirstPage = pageNumber\n\t\tcase `rel=\"last\"`:\n\t\t\tr.LastPage = pageNumber\n\t\tcase `rel=\"next\"`:\n\t\t\tr.NextPage = pageNumber\n\t\t\tr.HasNextPage = true\n\t\tcase `rel=\"prev\"`:\n\t\t\tr.PrevPage = pageNumber\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n}\n<commit_msg>Improved error processing; Not really, onus on the dev-user<commit_after>\/\/ Copyright (c) 2017 Hardik Bagdi <hbagdi1@binghamton.edu>\n\/\/\n\/\/ MIT License\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining\n\/\/ a copy of this software and associated documentation files (the\n\/\/ \"Software\"), to deal in the Software without restriction, including\n\/\/ without limitation the rights to use, copy, modify, merge, publish,\n\/\/ distribute, sublicense, and\/or sell copies of the Software, and to\n\/\/ permit persons to whom the Software is furnished to do so, subject to\n\/\/ the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be\n\/\/ included in all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n\/\/ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n\/\/ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n\/\/ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n\/\/ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n\/\/ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n\/\/ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\npackage unsplash\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ Response has pagination information whenever applicable\ntype Response struct {\n\thttpResponse *http.Response\n\tHasNextPage bool\n\tbody *[]byte\n\tFirstPage, LastPage, NextPage, PrevPage int\n\terr error\n\tRateLimit int\n\tRateLimitRemaining int\n}\n\nfunc (r *Response) checkForErrors() error {\n\tswitch r.httpResponse.StatusCode {\n\tcase 200, 201, 202, 204, 205:\n\t\treturn nil\n\tcase 401:\n\t\treturn &AuthorizationError{ErrString: errStringHelper(r.httpResponse.StatusCode, \"Unauthorized request\", r.body)}\n\tcase 403:\n\t\tif r.RateLimitRemaining == 0 {\n\t\t\treturn &RateLimitError{ErrString: errStringHelper(r.httpResponse.StatusCode, \"Rate limit exhausted\", r.body)}\n\t\t}\n\n\t\treturn &AuthorizationError{ErrString: errStringHelper(r.httpResponse.StatusCode, \"Access forbidden request\", r.body)}\n\n\tcase 404:\n\t\treturn &NotFoundError{ErrString: errStringHelper(r.httpResponse.StatusCode, \"The cat got tired of the Laser\", r.body)}\n\tdefault:\n\t\treturn errors.New(errStringHelper(r.httpResponse.StatusCode, \"API returned an error\", r.body))\n\n\t}\n}\nfunc errStringHelper(statusCode int, msg string, errBody *[]byte) string {\n\tvar buf bytes.Buffer\n\t\/\/XXX Writes can fail?\n\tbuf.WriteString(strconv.Itoa(statusCode))\n\tbuf.WriteString(\": \")\n\tbuf.WriteString(msg)\n\tbuf.WriteString(\", Body: \")\n\tbuf.Write(*errBody)\n\treturn buf.String()\n}\n\nfunc newResponse(r *http.Response) (*Response, error) {\n\tif nil == r {\n\t\treturn nil,\n\t\t\t&IllegalArgumentError{ErrString: \"*http.Response cannot be null\"}\n\t}\n\tresp := new(Response)\n\tresp.httpResponse = r\n\t\/\/populate first\n\tresp.populatePagingInfo()\n\tresp.populateRateLimits()\n\t\/\/read the response\n\tbuf, err := ioutil.ReadAll(resp.httpResponse.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp.body = &buf\n\t\/\/now check for errors\n\terr = resp.checkForErrors()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}\n\nfunc (r *Response) populateRateLimits() {\n\t\/\/fails silently\n\tmaxLimit, ok := r.httpResponse.Header[\"X-Ratelimit-Limit\"]\n\tif ok && len(maxLimit) == 1 {\n\t\tr.RateLimit, _ = strconv.Atoi(maxLimit[0])\n\t}\n\trateRemaining, ok := r.httpResponse.Header[\"X-Ratelimit-Remaining\"]\n\tif ok && len(rateRemaining) == 1 {\n\t\tr.RateLimitRemaining, _ = strconv.Atoi(rateRemaining[0])\n\t}\n}\n\nfunc (r *Response) populatePagingInfo() {\n\t\/\/fails silently\n\trawLinks, ok := r.httpResponse.Header[\"Link\"]\n\tif !ok || 0 == len(rawLinks) {\n\t\treturn\n\t}\n\n\tlinks := strings.Split(rawLinks[0], \",\") \/\/TODO why is Headers returning []string?\n\n\tfor _, link := range links {\n\t\tparts := strings.Split(link, \";\")\n\t\tif !strings.Contains(parts[0], \"page\") && !strings.Contains(parts[1], \"rel=\") {\n\t\t\tcontinue\n\t\t}\n\t\thref := parts[0]\n\t\t\/\/strip out '<' and '>'\n\t\thref = href[strings.Index(href, \"<\")+1 : strings.Index(href, \">\")]\n\t\turl, err := url.Parse(href)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tpageString := url.Query().Get(\"page\")\n\t\tpageNumber, err := strconv.Atoi(string(pageString))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch strings.TrimSpace(parts[1]) {\n\t\tcase `rel=\"first\"`:\n\t\t\tr.FirstPage = pageNumber\n\t\tcase `rel=\"last\"`:\n\t\t\tr.LastPage = pageNumber\n\t\tcase `rel=\"next\"`:\n\t\t\tr.NextPage = pageNumber\n\t\t\tr.HasNextPage = true\n\t\tcase `rel=\"prev\"`:\n\t\t\tr.PrevPage = pageNumber\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/package shout provides a broadcast channel that works in select\n\/\/statements.\npackage shout\n\n\/\/Shout represents a broadcast channel. Each Shout instance has a\n\/\/goroutine that takes messages from the Send channel and transmits\n\/\/them to all subscribed Listen channels.\ntype Shout struct {\n\tsubscribers map[chan interface{}]bool\n\tsub chan chan interface{}\n\tunsub chan chan interface{}\n\tsend chan interface{}\n}\n\n\/\/Send returns the broadcast channel. All Listen channels receive\n\/\/messages sent on this channel. Closing this channel causes a panic.\n\/\/Use the Close() method to close down both the Send channel and all\n\/\/Listen channels.\nfunc (b *Shout) Send() chan<- interface{} {\n\treturn b.send\n}\n\n\/\/run is the Shout event loop. It dies when unsub is closed.\nfunc (s *Shout) run() {\n\tfor {\n\t\tselect {\n\t\tcase sub := <-s.sub:\n\t\t\ts.subscribers[sub] = true\n\t\tcase unsub, ok := <-s.unsub:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdelete(s.subscribers, unsub)\n\t\tcase msg, ok := <-s.send:\n\t\t\tif !ok {\n\t\t\t\tpanic(\"Send channel is closed\")\n\t\t\t}\n\t\t\tfor key := range s.subscribers {\n\t\t\t\tkey <- msg\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/New creates a Shout with the given buffer size on the Send channel.\nfunc New(n int) *Shout {\n\ts := Shout{}\n\ts.subscribers = make(map[chan interface{}]bool)\n\ts.sub = make(chan chan interface{})\n\ts.unsub = make(chan chan interface{})\n\ts.send = make(chan interface{}, n)\n\tgo s.run()\n\treturn &s\n}\n\n\/\/Listen returns a new Listen channel with the given buffer size.\nfunc (s *Shout) Listen(n int) *Listen {\n\tc := make(chan interface{}, n)\n\ts.sub <- c\n\treturn &Listen{s, c}\n}\n\n\/\/Close closes the Shout broadcast channel and all subscriber channels.\nfunc (s *Shout) Close() {\n\tfor k := range s.subscribers {\n\t\tclose(k)\n\t}\n\tclose(s.unsub) \/\/this causes run() to return\n\tclose(s.sub)\n\tclose(s.send)\n}\n\n\/\/Listen is a receiving channel for Shout broadcast messages.\ntype Listen struct {\n\ts *Shout\n\trcv chan interface{}\n}\n\n\/\/Rcv returns the receiving channel for Shout broadcast messages.\nfunc (c *Listen) Rcv() <-chan interface{} {\n\treturn c.rcv\n}\n\n\/\/Close unsubscribes Listen from a Shout channel. You should always\n\/\/Close an unused Listen, because eventually Shout will block trying to\n\/\/send a message to it, and no other subscribed Listen channels will\n\/\/receive messages. Alternatively, closing the Shout this Listen is\n\/\/subscribed to will close the Listen.\nfunc (c *Listen) Close() {\n\tc.s.unsub <- c.rcv\n\tclose(c.rcv)\n}\n<commit_msg>Use done chan for run(), sync subscribe\/unsubscribe<commit_after>\/\/package shout provides a broadcast channel that works in select\n\/\/statements.\npackage shout\n\nimport \"sync\"\n\n\/\/Shout represents a broadcast channel. Each Shout instance has a\n\/\/goroutine that takes messages from the Send channel and transmits\n\/\/them to all subscribed Listen channels.\ntype Shout struct {\n\tmsub sync.Mutex \/\/subscribers mutex\n\tsubscribers map[chan interface{}]bool\n\tsend chan interface{}\n\tdone chan struct{}\n}\n\n\/\/Send returns the broadcast channel. All Listen channels receive\n\/\/messages sent on this channel. Closing this channel causes a panic.\n\/\/Use the Close() method to close down both the Send channel and all\n\/\/Listen channels.\nfunc (b *Shout) Send() chan<- interface{} {\n\treturn b.send\n}\n\n\/\/run is the Shout event loop. It returns when it receives a message\n\/\/on s.done.\nfunc (s *Shout) run() {\n\tfor {\n\t\tselect {\n\t\tcase msg, ok := <-s.send:\n\t\t\tif !ok {\n\t\t\t\tpanic(\"Send channel is closed\")\n\t\t\t}\n\t\t\ts.msub.Lock()\n\t\t\tfor key := range s.subscribers {\n\t\t\t\tkey <- msg\n\t\t\t}\n\t\t\ts.msub.Unlock()\n\t\tcase <-s.done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/New creates a Shout with the given buffer size on the Send channel.\nfunc New(n int) *Shout {\n\ts := Shout{}\n\ts.subscribers = make(map[chan interface{}]bool)\n\ts.send = make(chan interface{}, n)\n\ts.done = make(chan struct{})\n\tgo s.run()\n\treturn &s\n}\n\n\/\/Listen returns a new Listen channel with the given buffer size.\nfunc (s *Shout) Listen(n int) *Listen {\n\ts.msub.Lock()\n\tdefer s.msub.Unlock()\n\tc := make(chan interface{}, n)\n\ts.subscribers[c] = true\n\treturn &Listen{s, c}\n}\n\n\/\/Close closes the Shout broadcast channel and all subscriber channels.\nfunc (s *Shout) Close() {\n\ts.msub.Lock()\n\tdefer s.msub.Unlock()\n\tfor k := range s.subscribers {\n\t\tclose(k)\n\t}\n\t\/\/Tell run() to return. Can't do close(s.done) because the\n\t\/\/close(s.send) might be processed by run() first, causing a panic.\n\ts.done <- struct{}{}\n\tclose(s.send)\n}\n\n\/\/Listen is a receiving channel for Shout broadcast messages.\ntype Listen struct {\n\ts *Shout\n\trcv chan interface{}\n}\n\n\/\/Rcv returns the receiving channel for Shout broadcast messages.\nfunc (c *Listen) Rcv() <-chan interface{} {\n\treturn c.rcv\n}\n\n\/\/Close unsubscribes Listen from a Shout channel. You should always\n\/\/Close an unused Listen, because eventually Shout will block trying to\n\/\/send a message to it, and no other subscribed Listen channels will\n\/\/receive messages.\nfunc (c *Listen) Close() {\n\tc.s.msub.Lock()\n\tdefer c.s.msub.Unlock()\n\tdelete(c.s.subscribers, c.rcv)\n\tclose(c.rcv)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package validate provides routines for validating tweets\npackage validate\n\nimport (\n\t\"github.com\/kylemcc\/twitter-text-go\/extract\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n)\n\nconst (\n\tmaxLength = 140\n\tshortUrlLength = 22\n\tshortHttpsUrlLength = 23\n)\n\n\/\/ Returns the length of the string as it would be displayed. This is equivilent to the length of the Unicode NFC\n\/\/ (See: http:\/\/www.unicode.org\/reports\/tr15). This is needed in order to consistently calculate the length of a\n\/\/ string no matter which actual form was transmitted. For example:\n\/\/\n\/\/ U+0065 Latin Small Letter E\n\/\/ + U+0301 Combining Acute Accent\n\/\/ ----------\n\/\/ = 2 bytes, 2 characters, displayed as é (1 visual glyph)\n\/\/ … The NFC of {U+0065, U+0301} is {U+00E9}, which is a single chracter and a +display_length+ of 1\n\/\/\n\/\/ The string could also contain U+00E9 already, in which case the canonicalization will not change the value.\nfunc TweetLength(text string) int {\n \/\/ TODO: input may require normalization. Will have to research\n \/\/ Go's implementation\n\tlength := utf8.RuneCountInString(text)\n\n\turls := extract.ExtractUrls(text)\n\tfor _, url := range urls {\n\t\tlength -= url.Range.Length()\n\t\tif strings.HasPrefix(url.Text, \"https:\/\/\") {\n\t\t\tlength += shortHttpsUrlLength\n\t\t} else {\n\t\t\tlength += shortUrlLength\n\t\t}\n\t}\n\treturn length\n}\n\nfunc TweetIsValid(text string) bool {\n\treturn false\n}\n\nfunc UsernameIsValid(username string) bool {\n\treturn false\n}\n\nfunc ListIsValid(list string) bool {\n\treturn false\n}\n\nfunc HashtagIsValid(hashtag string) bool {\n\treturn false\n}\n\nfunc UrlIsValid(url string) bool {\n\treturn false\n}\n<commit_msg>input does need to be normalized. the runtime doesn't normalize strings<commit_after>\/\/ Package validate provides routines for validating tweets\npackage validate\n\nimport (\n\t\"code.google.com\/p\/go.text\/unicode\/norm\"\n\t\"github.com\/kylemcc\/twitter-text-go\/extract\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n)\n\nconst (\n\tmaxLength = 140\n\tshortUrlLength = 22\n\tshortHttpsUrlLength = 23\n)\n\nvar formC = norm.NFC\n\n\/\/ Returns the length of the string as it would be displayed. This is equivilent to the length of the Unicode NFC\n\/\/ (See: http:\/\/www.unicode.org\/reports\/tr15). This is needed in order to consistently calculate the length of a\n\/\/ string no matter which actual form was transmitted. For example:\n\/\/\n\/\/ U+0065 Latin Small Letter E\n\/\/ + U+0301 Combining Acute Accent\n\/\/ ----------\n\/\/ = 2 bytes, 2 characters, displayed as é (1 visual glyph)\n\/\/ … The NFC of {U+0065, U+0301} is {U+00E9}, which is a single chracter and a +display_length+ of 1\n\/\/\n\/\/ The string could also contain U+00E9 already, in which case the canonicalization will not change the value.\nfunc TweetLength(text string) int {\n\tlength := utf8.RuneCountInString(formC.String(text))\n\n\turls := extract.ExtractUrls(text)\n\tfor _, url := range urls {\n\t\tlength -= url.Range.Length()\n\t\tif strings.HasPrefix(url.Text, \"https:\/\/\") {\n\t\t\tlength += shortHttpsUrlLength\n\t\t} else {\n\t\t\tlength += shortUrlLength\n\t\t}\n\t}\n\treturn length\n}\n\nfunc TweetIsValid(text string) bool {\n\treturn false\n}\n\nfunc UsernameIsValid(username string) bool {\n\treturn false\n}\n\nfunc ListIsValid(list string) bool {\n\treturn false\n}\n\nfunc HashtagIsValid(hashtag string) bool {\n\treturn false\n}\n\nfunc UrlIsValid(url string) bool {\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package bittrex\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/thebotguys\/signalr\"\n)\n\ntype OrderUpdate struct {\n\tOrderb\n\tType int\n}\n\ntype Fill struct {\n\tOrderb\n\tOrderType string\n\tTimestamp jTime\n}\n\ntype ExchangeState struct {\n\tMarketName string\n\tNounce int\n\tBuys []OrderUpdate\n\tSells []OrderUpdate\n\tFills []Fill\n\tInitial bool\n}\n\nfunc (b *Bittrex) SubscribeExchangeUpdate(market string, dataCh chan<- ExchangeState, stop <-chan bool) error {\n\tclient := signalr.NewWebsocketClient()\n\tsendUpdate := func(st ExchangeState) {\n\t\tselect {\n\t\tcase dataCh <- st:\n\t\tdefault:\n\t\t}\n\t}\n\tclient.OnClientMethod = func(hub string, method string, messages []json.RawMessage) {\n\t\tif hub != \"CoreHub\" || method != \"updateExchangeState\" {\n\t\t\treturn\n\t\t}\n\t\tfor _, msg := range messages {\n\t\t\tvar st ExchangeState\n\t\t\tif err := json.Unmarshal(msg, &st); err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif st.MarketName != market {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsendUpdate(st)\n\t\t}\n\t}\n\tif err := client.Connect(\"https\", WS_BASE, []string{WS_HUB}); err != nil {\n\t\treturn err\n\t}\n\tdefer client.Close()\n\t_, err := client.CallHub(WS_HUB, \"SubscribeToExchangeDeltas\", market)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmsg, err := client.CallHub(WS_HUB, \"QueryExchangeState\", market)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar st ExchangeState\n\tif err := json.Unmarshal(msg, &st); err != nil {\n\t\treturn err\n\t}\n\tst.Initial = true\n\tsendUpdate(st)\n\t<-stop\n\treturn nil\n}\n<commit_msg>ws: remove log.<commit_after>package bittrex\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/thebotguys\/signalr\"\n)\n\ntype OrderUpdate struct {\n\tOrderb\n\tType int\n}\n\ntype Fill struct {\n\tOrderb\n\tOrderType string\n\tTimestamp jTime\n}\n\ntype ExchangeState struct {\n\tMarketName string\n\tNounce int\n\tBuys []OrderUpdate\n\tSells []OrderUpdate\n\tFills []Fill\n\tInitial bool\n}\n\nfunc (b *Bittrex) SubscribeExchangeUpdate(market string, dataCh chan<- ExchangeState, stop <-chan bool) error {\n\tclient := signalr.NewWebsocketClient()\n\tsendUpdate := func(st ExchangeState) {\n\t\tselect {\n\t\tcase dataCh <- st:\n\t\tdefault:\n\t\t}\n\t}\n\tclient.OnClientMethod = func(hub string, method string, messages []json.RawMessage) {\n\t\tif hub != \"CoreHub\" || method != \"updateExchangeState\" {\n\t\t\treturn\n\t\t}\n\t\tfor _, msg := range messages {\n\t\t\tvar st ExchangeState\n\t\t\tif err := json.Unmarshal(msg, &st); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif st.MarketName != market {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsendUpdate(st)\n\t\t}\n\t}\n\tif err := client.Connect(\"https\", WS_BASE, []string{WS_HUB}); err != nil {\n\t\treturn err\n\t}\n\tdefer client.Close()\n\t_, err := client.CallHub(WS_HUB, \"SubscribeToExchangeDeltas\", market)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmsg, err := client.CallHub(WS_HUB, \"QueryExchangeState\", market)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar st ExchangeState\n\tif err := json.Unmarshal(msg, &st); err != nil {\n\t\treturn err\n\t}\n\tst.Initial = true\n\tsendUpdate(st)\n\t<-stop\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/russross\/blackfriday\"\n)\n\nconst (\n\tZSDIR = \".zs\"\n\tPUBDIR = \".pub\"\n)\n\ntype EvalFn func(args []string, vars map[string]string) (string, error)\n\nfunc split2(s, delim string) (string, string) {\n\tparts := strings.SplitN(s, delim, 2)\n\tif len(parts) == 2 {\n\t\treturn parts[0], parts[1]\n\t} else {\n\t\treturn parts[0], \"\"\n\t}\n}\n\nfunc md(path, s string) (map[string]string, string) {\n\turl := path[:len(path)-len(filepath.Ext(path))] + \".html\"\n\tv := map[string]string{\n\t\t\"file\": path,\n\t\t\"url\": url,\n\t\t\"output\": filepath.Join(PUBDIR, url),\n\t\t\"layout\": \"index.html\",\n\t}\n\tif strings.Index(s, \"\\n\\n\") == -1 {\n\t\treturn map[string]string{}, s\n\t}\n\theader, body := split2(s, \"\\n\\n\")\n\tfor _, line := range strings.Split(header, \"\\n\") {\n\t\tkey, value := split2(line, \":\")\n\t\tv[strings.ToLower(strings.TrimSpace(key))] = strings.TrimSpace(value)\n\t}\n\tif strings.HasPrefix(v[\"url\"], \".\/\") {\n\t\tv[\"url\"] = v[\"url\"][2:]\n\t}\n\treturn v, body\n}\n\nfunc render(s string, vars map[string]string, eval EvalFn) (string, error) {\n\tdelim_open := \"{{\"\n\tdelim_close := \"}}\"\n\n\tout := bytes.NewBuffer(nil)\n\tfor {\n\t\tif from := strings.Index(s, delim_open); from == -1 {\n\t\t\tout.WriteString(s)\n\t\t\treturn out.String(), nil\n\t\t} else {\n\t\t\tif to := strings.Index(s, delim_close); to == -1 {\n\t\t\t\treturn \"\", fmt.Errorf(\"Close delim not found\")\n\t\t\t} else {\n\t\t\t\tout.WriteString(s[:from])\n\t\t\t\tcmd := s[from+len(delim_open) : to]\n\t\t\t\ts = s[to+len(delim_close):]\n\t\t\t\tm := strings.Fields(cmd)\n\t\t\t\tif len(m) == 1 {\n\t\t\t\t\tif v, ok := vars[m[0]]; ok {\n\t\t\t\t\t\tout.WriteString(v)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif res, err := eval(m, vars); err == nil {\n\t\t\t\t\tout.WriteString(res)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(err) \/\/ silent\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc env(vars map[string]string) []string {\n\tenv := []string{\"ZS=\" + os.Args[0], \"ZS_OUTDIR=\" + PUBDIR}\n\tenv = append(env, os.Environ()...)\n\tif vars != nil {\n\t\tfor k, v := range vars {\n\t\t\tenv = append(env, \"ZS_\"+strings.ToUpper(k)+\"=\"+v)\n\t\t}\n\t}\n\treturn env\n}\n\nfunc run(cmd string, args []string, vars map[string]string, output io.Writer) error {\n\tvar errbuf bytes.Buffer\n\tc := exec.Command(cmd, args...)\n\tc.Env = env(vars)\n\tc.Stdout = output\n\tc.Stderr = &errbuf\n\n\terr := c.Run()\n\n\tif errbuf.Len() > 0 {\n\t\tlog.Println(errbuf.String())\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc eval(cmd []string, vars map[string]string) (string, error) {\n\toutbuf := bytes.NewBuffer(nil)\n\terr := run(path.Join(ZSDIR, cmd[0]), cmd[1:], vars, outbuf)\n\tif err != nil {\n\t\tif _, ok := err.(*exec.ExitError); ok {\n\t\t\treturn \"\", err\n\t\t}\n\t\toutbuf = bytes.NewBuffer(nil)\n\t\terr := run(cmd[0], cmd[1:], vars, outbuf)\n\t\t\/\/ Return exit errors, but ignore if the command was not found\n\t\tif _, ok := err.(*exec.ExitError); ok {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn outbuf.String(), nil\n}\n\nfunc buildMarkdown(path string) error {\n\tb, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv, body := md(path, string(b))\n\tcontent, err := render(body, v, eval)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv[\"content\"] = string(blackfriday.MarkdownBasic([]byte(content)))\n\treturn buildPlain(filepath.Join(ZSDIR, v[\"layout\"]), v)\n}\n\nfunc buildPlain(path string, vars map[string]string) error {\n\tb, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcontent, err := render(string(b), vars, eval)\n\tif err != nil {\n\t\treturn err\n\t}\n\toutput := filepath.Join(PUBDIR, path)\n\tif s, ok := vars[\"output\"]; ok {\n\t\toutput = s\n\t}\n\terr = ioutil.WriteFile(output, []byte(content), 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc copyFile(path string) (err error) {\n\tvar in, out *os.File\n\tif in, err = os.Open(path); err == nil {\n\t\tdefer in.Close()\n\t\tif out, err = os.Create(filepath.Join(PUBDIR, path)); err == nil {\n\t\t\tdefer out.Close()\n\t\t\t_, err = io.Copy(out, in)\n\t\t}\n\t}\n\treturn err\n}\n\nfunc buildAll(once bool) {\n\tlastModified := time.Unix(0, 0)\n\tmodified := false\n\tfor {\n\t\tos.Mkdir(PUBDIR, 0755)\n\t\terr := filepath.Walk(\".\", func(path string, info os.FileInfo, err error) error {\n\t\t\t\/\/ ignore hidden files and directories\n\t\t\tif filepath.Base(path)[0] == '.' || strings.HasPrefix(path, \".\") {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif info.IsDir() {\n\t\t\t\tos.Mkdir(filepath.Join(PUBDIR, path), 0755)\n\t\t\t\treturn nil\n\t\t\t} else if info.ModTime().After(lastModified) {\n\t\t\t\tif !modified {\n\t\t\t\t\t\/\/ About to be modified, so run pre-build hook\n\t\t\t\t\trun(filepath.Join(ZSDIR, \"pre\"), []string{}, nil, nil)\n\t\t\t\t\tmodified = true\n\t\t\t\t}\n\t\t\t\text := filepath.Ext(path)\n\t\t\t\tif ext == \".md\" || ext == \".mkd\" {\n\t\t\t\t\tlog.Println(\"mkd: \", path)\n\t\t\t\t\treturn buildMarkdown(path)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(\"raw: \", path)\n\t\t\t\t\treturn copyFile(path)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Println(\"ERROR:\", err)\n\t\t}\n\t\tif modified {\n\t\t\t\/\/ Something was modified, so post-build hook\n\t\t\trun(filepath.Join(ZSDIR, \"post\"), []string{}, nil, nil)\n\t\t\tmodified = false\n\t\t}\n\t\tlastModified = time.Now()\n\t\tif once {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n}\n\nfunc main() {\n\tif len(os.Args) == 1 {\n\t\tfmt.Println(os.Args[0], \"<command> [args]\")\n\t\treturn\n\t}\n\tcmd := os.Args[1]\n\targs := os.Args[2:]\n\tswitch cmd {\n\tcase \"build\":\n\t\tbuildAll(true)\n\tcase \"watch\":\n\t\tbuildAll(false) \/\/ pass duration\n\tcase \"var\":\n\t\tif len(args) == 0 {\n\t\t\tlog.Println(\"ERROR: filename expected\")\n\t\t\treturn\n\t\t}\n\t\tif b, err := ioutil.ReadFile(args[0]); err == nil {\n\t\t\tvars, _ := md(args[0], string(b))\n\t\t\tif len(args) > 1 {\n\t\t\t\tfor _, a := range args[1:] {\n\t\t\t\t\tfmt.Println(vars[a])\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor k, v := range vars {\n\t\t\t\t\tfmt.Println(k + \":\" + v)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Println(err)\n\t\t}\n\tdefault:\n\t\terr := run(path.Join(ZSDIR, cmd), args, map[string]string{}, os.Stdout)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n}\n<commit_msg>added rendering support for html and xml<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/russross\/blackfriday\"\n)\n\nconst (\n\tZSDIR = \".zs\"\n\tPUBDIR = \".pub\"\n)\n\ntype EvalFn func(args []string, vars map[string]string) (string, error)\n\nfunc split2(s, delim string) (string, string) {\n\tparts := strings.SplitN(s, delim, 2)\n\tif len(parts) == 2 {\n\t\treturn parts[0], parts[1]\n\t} else {\n\t\treturn parts[0], \"\"\n\t}\n}\n\nfunc md(path, s string) (map[string]string, string) {\n\turl := path[:len(path)-len(filepath.Ext(path))] + \".html\"\n\tv := map[string]string{\n\t\t\"file\": path,\n\t\t\"url\": url,\n\t\t\"output\": filepath.Join(PUBDIR, url),\n\t\t\"layout\": \"index.html\",\n\t}\n\tif strings.Index(s, \"\\n\\n\") == -1 {\n\t\treturn map[string]string{}, s\n\t}\n\theader, body := split2(s, \"\\n\\n\")\n\tfor _, line := range strings.Split(header, \"\\n\") {\n\t\tkey, value := split2(line, \":\")\n\t\tv[strings.ToLower(strings.TrimSpace(key))] = strings.TrimSpace(value)\n\t}\n\tif strings.HasPrefix(v[\"url\"], \".\/\") {\n\t\tv[\"url\"] = v[\"url\"][2:]\n\t}\n\treturn v, body\n}\n\nfunc render(s string, vars map[string]string, eval EvalFn) (string, error) {\n\tdelim_open := \"{{\"\n\tdelim_close := \"}}\"\n\n\tout := bytes.NewBuffer(nil)\n\tfor {\n\t\tif from := strings.Index(s, delim_open); from == -1 {\n\t\t\tout.WriteString(s)\n\t\t\treturn out.String(), nil\n\t\t} else {\n\t\t\tif to := strings.Index(s, delim_close); to == -1 {\n\t\t\t\treturn \"\", fmt.Errorf(\"Close delim not found\")\n\t\t\t} else {\n\t\t\t\tout.WriteString(s[:from])\n\t\t\t\tcmd := s[from+len(delim_open) : to]\n\t\t\t\ts = s[to+len(delim_close):]\n\t\t\t\tm := strings.Fields(cmd)\n\t\t\t\tif len(m) == 1 {\n\t\t\t\t\tif v, ok := vars[m[0]]; ok {\n\t\t\t\t\t\tout.WriteString(v)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif res, err := eval(m, vars); err == nil {\n\t\t\t\t\tout.WriteString(res)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(err) \/\/ silent\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc env(vars map[string]string) []string {\n\tenv := []string{\"ZS=\" + os.Args[0], \"ZS_OUTDIR=\" + PUBDIR}\n\tenv = append(env, os.Environ()...)\n\tif vars != nil {\n\t\tfor k, v := range vars {\n\t\t\tenv = append(env, \"ZS_\"+strings.ToUpper(k)+\"=\"+v)\n\t\t}\n\t}\n\treturn env\n}\n\nfunc run(cmd string, args []string, vars map[string]string, output io.Writer) error {\n\tvar errbuf bytes.Buffer\n\tc := exec.Command(cmd, args...)\n\tc.Env = env(vars)\n\tc.Stdout = output\n\tc.Stderr = &errbuf\n\n\terr := c.Run()\n\n\tif errbuf.Len() > 0 {\n\t\tlog.Println(errbuf.String())\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc eval(cmd []string, vars map[string]string) (string, error) {\n\toutbuf := bytes.NewBuffer(nil)\n\terr := run(path.Join(ZSDIR, cmd[0]), cmd[1:], vars, outbuf)\n\tif err != nil {\n\t\tif _, ok := err.(*exec.ExitError); ok {\n\t\t\treturn \"\", err\n\t\t}\n\t\toutbuf = bytes.NewBuffer(nil)\n\t\terr := run(cmd[0], cmd[1:], vars, outbuf)\n\t\t\/\/ Return exit errors, but ignore if the command was not found\n\t\tif _, ok := err.(*exec.ExitError); ok {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn outbuf.String(), nil\n}\n\nfunc buildMarkdown(path string) error {\n\tb, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv, body := md(path, string(b))\n\tcontent, err := render(body, v, eval)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv[\"content\"] = string(blackfriday.MarkdownBasic([]byte(content)))\n\treturn buildPlain(filepath.Join(ZSDIR, v[\"layout\"]), v)\n}\n\nfunc buildPlain(path string, vars map[string]string) error {\n\tb, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcontent, err := render(string(b), vars, eval)\n\tif err != nil {\n\t\treturn err\n\t}\n\toutput := filepath.Join(PUBDIR, path)\n\tif s, ok := vars[\"output\"]; ok {\n\t\toutput = s\n\t}\n\terr = ioutil.WriteFile(output, []byte(content), 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc copyFile(path string) (err error) {\n\tvar in, out *os.File\n\tif in, err = os.Open(path); err == nil {\n\t\tdefer in.Close()\n\t\tif out, err = os.Create(filepath.Join(PUBDIR, path)); err == nil {\n\t\t\tdefer out.Close()\n\t\t\t_, err = io.Copy(out, in)\n\t\t}\n\t}\n\treturn err\n}\n\nfunc buildAll(once bool) {\n\tlastModified := time.Unix(0, 0)\n\tmodified := false\n\tfor {\n\t\tos.Mkdir(PUBDIR, 0755)\n\t\terr := filepath.Walk(\".\", func(path string, info os.FileInfo, err error) error {\n\t\t\t\/\/ ignore hidden files and directories\n\t\t\tif filepath.Base(path)[0] == '.' || strings.HasPrefix(path, \".\") {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif info.IsDir() {\n\t\t\t\tos.Mkdir(filepath.Join(PUBDIR, path), 0755)\n\t\t\t\treturn nil\n\t\t\t} else if info.ModTime().After(lastModified) {\n\t\t\t\tif !modified {\n\t\t\t\t\t\/\/ About to be modified, so run pre-build hook\n\t\t\t\t\trun(filepath.Join(ZSDIR, \"pre\"), []string{}, nil, nil)\n\t\t\t\t\tmodified = true\n\t\t\t\t}\n\t\t\t\text := filepath.Ext(path)\n\t\t\t\tif ext == \".md\" || ext == \".mkd\" {\n\t\t\t\t\tlog.Println(\"mkd: \", path)\n\t\t\t\t\treturn buildMarkdown(path)\n\t\t\t\t} else if ext == \".html\" || ext == \".xml\" {\n\t\t\t\t\treturn buildPlain(path, map[string]string{})\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(\"raw: \", path)\n\t\t\t\t\treturn copyFile(path)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Println(\"ERROR:\", err)\n\t\t}\n\t\tif modified {\n\t\t\t\/\/ Something was modified, so post-build hook\n\t\t\trun(filepath.Join(ZSDIR, \"post\"), []string{}, nil, nil)\n\t\t\tmodified = false\n\t\t}\n\t\tlastModified = time.Now()\n\t\tif once {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n}\n\nfunc main() {\n\tif len(os.Args) == 1 {\n\t\tfmt.Println(os.Args[0], \"<command> [args]\")\n\t\treturn\n\t}\n\tcmd := os.Args[1]\n\targs := os.Args[2:]\n\tswitch cmd {\n\tcase \"build\":\n\t\tbuildAll(true)\n\tcase \"watch\":\n\t\tbuildAll(false) \/\/ pass duration\n\tcase \"var\":\n\t\tif len(args) == 0 {\n\t\t\tlog.Println(\"ERROR: filename expected\")\n\t\t\treturn\n\t\t}\n\t\tif b, err := ioutil.ReadFile(args[0]); err == nil {\n\t\t\tvars, _ := md(args[0], string(b))\n\t\t\tif len(args) > 1 {\n\t\t\t\tfor _, a := range args[1:] {\n\t\t\t\t\tfmt.Println(vars[a])\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor k, v := range vars {\n\t\t\t\t\tfmt.Println(k + \":\" + v)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Println(err)\n\t\t}\n\tdefault:\n\t\terr := run(path.Join(ZSDIR, cmd), args, map[string]string{}, os.Stdout)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/rif\/cache2go\"\n\t\"strconv\"\n)\n\nfunc main() {\n\tcache := cache2go.Cache(\"myCache\")\n\tcache.SetDataLoader(func(key interface{}) *cache2go.CacheItem {\n\t\t\/\/ Apply some clever loading logic here, e.g. read values for\n\t\t\/\/ this key from database, network or file\n\t\tval := \"This is a test with key \" + key.(string)\n\n\t\t\/\/ This helper method creates the cached item for us. Yay!\n\t\titem := cache2go.CreateCacheItem(key, 0, val)\n\t\treturn &item\n\t})\n\n\t\/\/ Let's retrieve a few auto-generated items from the cache\n\tfor i := 0; i < 10; i++ {\n\t\tres, err := cache.Value(\"someKey_\" + strconv.Itoa(i))\n\t\tif err == nil {\n\t\t\tfmt.Println(\"Found value in cache:\", res.Data())\n\t\t} else {\n\t\t\tfmt.Println(\"Error retrieving value from cache:\", err)\n\t\t}\n\t}\n}\n<commit_msg>* More comments for example.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/rif\/cache2go\"\n\t\"strconv\"\n)\n\nfunc main() {\n\tcache := cache2go.Cache(\"myCache\")\n\n\t\/\/ The data loader gets called automatically whenever something\n\t\/\/ tries to retrieve a non-existing key from the cache\n\tcache.SetDataLoader(func(key interface{}) *cache2go.CacheItem {\n\t\t\/\/ Apply some clever loading logic here, e.g. read values for\n\t\t\/\/ this key from database, network or file\n\t\tval := \"This is a test with key \" + key.(string)\n\n\t\t\/\/ This helper method creates the cached item for us. Yay!\n\t\titem := cache2go.CreateCacheItem(key, 0, val)\n\t\treturn &item\n\t})\n\n\t\/\/ Let's retrieve a few auto-generated items from the cache\n\tfor i := 0; i < 10; i++ {\n\t\tres, err := cache.Value(\"someKey_\" + strconv.Itoa(i))\n\t\tif err == nil {\n\t\t\tfmt.Println(\"Found value in cache:\", res.Data())\n\t\t} else {\n\t\t\tfmt.Println(\"Error retrieving value from cache:\", err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package decor\n\n\/\/ StaticName returns name decorator.\n\/\/\n\/\/\t`name` string to display\n\/\/\n\/\/\t`wcc` optional WC config\nfunc StaticName(name string, wcc ...WC) Decorator {\n\treturn Name(name, wcc...)\n}\n\n\/\/ Name returns name decorator.\n\/\/\n\/\/\t`name` string to display\n\/\/\n\/\/\t`wcc` optional WC config\nfunc Name(name string, wcc ...WC) Decorator {\n\tvar wc WC\n\tfor _, widthConf := range wcc {\n\t\twc = widthConf\n\t}\n\twc.Init()\n\td := &nameDecorator{\n\t\tWC: wc,\n\t\tmsg: name,\n\t}\n\treturn d\n}\n\ntype nameDecorator struct {\n\tWC\n\tmsg string\n\tcomplete *string\n}\n\nfunc (d *nameDecorator) Decor(st *Statistics) string {\n\tif st.Completed && d.complete != nil {\n\t\treturn d.FormatMsg(*d.complete)\n\t}\n\treturn d.FormatMsg(d.msg)\n}\n\nfunc (d *nameDecorator) OnCompleteMessage(msg string) {\n\td.complete = &msg\n}\n<commit_msg>remove StaticName<commit_after>package decor\n\n\/\/ Name returns name decorator.\n\/\/\n\/\/\t`name` string to display\n\/\/\n\/\/\t`wcc` optional WC config\nfunc Name(name string, wcc ...WC) Decorator {\n\tvar wc WC\n\tfor _, widthConf := range wcc {\n\t\twc = widthConf\n\t}\n\twc.Init()\n\td := &nameDecorator{\n\t\tWC: wc,\n\t\tmsg: name,\n\t}\n\treturn d\n}\n\ntype nameDecorator struct {\n\tWC\n\tmsg string\n\tcomplete *string\n}\n\nfunc (d *nameDecorator) Decor(st *Statistics) string {\n\tif st.Completed && d.complete != nil {\n\t\treturn d.FormatMsg(*d.complete)\n\t}\n\treturn d.FormatMsg(d.msg)\n}\n\nfunc (d *nameDecorator) OnCompleteMessage(msg string) {\n\td.complete = &msg\n}\n<|endoftext|>"} {"text":"<commit_before>package app\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"text\/template\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/libcompose\/version\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar versionTemplate = `Version: {{.Version}} ({{.GitCommit}})\nGo version: {{.GoVersion}}\nBuilt: {{.BuildTime}}\nOS\/Arch: {{.Os}}\/{{.Arch}}`\n\n\/\/ Version prints the libcompose version number and additionnal informations.\nfunc Version(c *cli.Context) {\n\tif c.Bool(\"short\") {\n\t\tfmt.Println(version.VERSION)\n\t\treturn\n\t}\n\n\ttmpl, err := template.New(\"\").Parse(versionTemplate)\n\tif err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\n\tv := struct {\n\t\tVersion string\n\t\tGitCommit string\n\t\tGoVersion string\n\t\tBuildTime string\n\t\tOs string\n\t\tArch string\n\t}{\n\t\tVersion: version.VERSION,\n\t\tGitCommit: version.GITCOMMIT,\n\t\tGoVersion: runtime.Version(),\n\t\tBuildTime: version.BUILDTIME,\n\t\tOs: runtime.GOOS,\n\t\tArch: runtime.GOARCH,\n\t}\n\n\tif err := tmpl.Execute(os.Stdout, v); err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\tfmt.Printf(\"\\n\")\n\treturn\n}\n<commit_msg>Update Version signature to remove warning<commit_after>package app\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"text\/template\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/libcompose\/version\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar versionTemplate = `Version: {{.Version}} ({{.GitCommit}})\nGo version: {{.GoVersion}}\nBuilt: {{.BuildTime}}\nOS\/Arch: {{.Os}}\/{{.Arch}}`\n\n\/\/ Version prints the libcompose version number and additionnal informations.\nfunc Version(c *cli.Context) error {\n\tif c.Bool(\"short\") {\n\t\tfmt.Println(version.VERSION)\n\t\treturn nil\n\t}\n\n\ttmpl, err := template.New(\"\").Parse(versionTemplate)\n\tif err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\n\tv := struct {\n\t\tVersion string\n\t\tGitCommit string\n\t\tGoVersion string\n\t\tBuildTime string\n\t\tOs string\n\t\tArch string\n\t}{\n\t\tVersion: version.VERSION,\n\t\tGitCommit: version.GITCOMMIT,\n\t\tGoVersion: runtime.Version(),\n\t\tBuildTime: version.BUILDTIME,\n\t\tOs: runtime.GOOS,\n\t\tArch: runtime.GOARCH,\n\t}\n\n\tif err := tmpl.Execute(os.Stdout, v); err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\tfmt.Printf(\"\\n\")\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/ Author: jacobsa@google.com (Aaron Jacobs)\n\/\/\n\/\/ A collection of tests for a file system backed by a GCS bucket, where in\n\/\/ most cases we interact with the file system directly for creating and\n\/\/ mofiying files (rather than through the side channel of the GCS bucket\n\/\/ itself).\n\/\/\n\/\/ These tests are registered by RegisterFSTests.\n\npackage fstesting\n\nimport (\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Read-write interaction\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype readWriteTest struct {\n\tfsTest\n}\n\nfunc (t *readWriteTest) OpenNonExistent_CreateFlagNotSet() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenNonExistent_ReadOnly() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenNonExistent_WriteOnly() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenNonExistent_ReadWrite() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenNonExistent_Append() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenExistingFile_ReadOnly() {\n\t\/\/ Create a file.\n\tconst contents = \"tacoburritoenchilada\"\n\tAssertEq(\n\t\tnil,\n\t\tioutil.WriteFile(\n\t\t\tpath.Join(t.mfs.Dir(), \"foo\"),\n\t\t\t[]byte(contents),\n\t\t\tos.FileMode(0644)))\n\n\t\/\/ Open the file for reading.\n\tf, err := os.Open(path.Join(t.mfs.Dir(), \"foo\"))\n\tAssertEq(nil, err)\n\n\tdefer func() {\n\t\tExpectEq(nil, f.Close())\n\t}()\n\n\t\/\/ Check its vitals.\n\tExpectEq(path.Join(t.mfs.Dir(), \"foo\"), f.Name())\n\n\tfi, err := f.Stat()\n\tExpectEq(\"foo\", fi.Name())\n\tExpectEq(len(contents), fi.Size())\n\tExpectEq(os.FileMode(0), fi.Mode() & ^os.ModePerm)\n\tExpectLt(math.Abs(time.Since(fi.ModTime()).Seconds()), 10)\n\tExpectFalse(fi.IsDir())\n\n\t\/\/ Read its contents.\n\tfileContents, err := ioutil.ReadAll(f)\n\tAssertEq(nil, err)\n\tExpectEq(contents, string(fileContents))\n\n\t\/\/ Attempt to write.\n\tn, err := f.Write([]byte(\"taco\"))\n\n\tAssertEq(0, n)\n\tAssertNe(nil, err)\n\tExpectThat(err, Error(HasSubstr(\"bad file descriptor\")))\n}\n\nfunc (t *readWriteTest) OpenExistingFile_WriteOnly() {\n\t\/\/ Create a file.\n\tconst contents = \"tacoburritoenchilada\"\n\tAssertEq(\n\t\tnil,\n\t\tioutil.WriteFile(\n\t\t\tpath.Join(t.mfs.Dir(), \"foo\"),\n\t\t\t[]byte(contents),\n\t\t\tos.FileMode(0644)))\n\n\t\/\/ Open the file for reading.\n\tf, err := os.OpenFile(path.Join(t.mfs.Dir(), \"foo\"), os.O_WRONLY, 0)\n\tAssertEq(nil, err)\n\n\tdefer func() {\n\t\tif f != nil {\n\t\t\tExpectEq(nil, f.Close())\n\t\t}\n\t}()\n\n\t\/\/ Check its vitals.\n\tExpectEq(path.Join(t.mfs.Dir(), \"foo\"), f.Name())\n\n\tfi, err := f.Stat()\n\tExpectEq(\"foo\", fi.Name())\n\tExpectEq(len(contents), fi.Size())\n\tExpectEq(os.FileMode(0), fi.Mode() & ^os.ModePerm)\n\tExpectLt(math.Abs(time.Since(fi.ModTime()).Seconds()), 10)\n\tExpectFalse(fi.IsDir())\n\n\t\/\/ Reading should fail.\n\t_, err = ioutil.ReadAll(f)\n\n\tAssertNe(nil, err)\n\tExpectThat(err, Error(HasSubstr(\"bad file descriptor\")))\n\n\t\/\/ Write to the start of the file using File.Write.\n\t_, err = f.Write([]byte(\"000\"))\n\tAssertEq(nil, err)\n\n\t\/\/ Write to the middle of the file using File.WriteAt.\n\t_, err = f.WriteAt([]byte(\"111\"), 4)\n\tAssertEq(nil, err)\n\n\t\/\/ Seek and write past the end of the file.\n\t_, err = f.Seek(int64(len(contents)), 0)\n\tAssertEq(nil, err)\n\n\t_, err = f.Write([]byte(\"222\"))\n\tAssertEq(nil, err)\n\n\t\/\/ Close the file.\n\tAssertEq(nil, f.Close())\n\tf = nil\n\n\t\/\/ Read back its contents.\n\tfileContents, err := ioutil.ReadFile(path.Join(t.mfs.Dir(), \"foo\"))\n\n\tAssertEq(nil, err)\n\tExpectEq(\"000o111ritoenchilada222\", string(fileContents))\n}\n\nfunc (t *readWriteTest) OpenExistingFile_ReadWrite() {\n\t\/\/ Create a file.\n\tconst contents = \"tacoburritoenchilada\"\n\tAssertEq(\n\t\tnil,\n\t\tioutil.WriteFile(\n\t\t\tpath.Join(t.mfs.Dir(), \"foo\"),\n\t\t\t[]byte(contents),\n\t\t\tos.FileMode(0644)))\n\n\t\/\/ Open the file for reading.\n\tf, err := os.OpenFile(path.Join(t.mfs.Dir(), \"foo\"), os.O_RDWR, 0)\n\tAssertEq(nil, err)\n\n\tdefer func() {\n\t\tif f != nil {\n\t\t\tExpectEq(nil, f.Close())\n\t\t}\n\t}()\n\n\t\/\/ Check its vitals.\n\tExpectEq(path.Join(t.mfs.Dir(), \"foo\"), f.Name())\n\n\tfi, err := f.Stat()\n\tExpectEq(\"foo\", fi.Name())\n\tExpectEq(len(contents), fi.Size())\n\tExpectEq(os.FileMode(0), fi.Mode() & ^os.ModePerm)\n\tExpectLt(math.Abs(time.Since(fi.ModTime()).Seconds()), 10)\n\tExpectFalse(fi.IsDir())\n\n\t\/\/ Write to the start of the file using File.Write.\n\t_, err = f.Write([]byte(\"000\"))\n\tAssertEq(nil, err)\n\n\t\/\/ Write to the middle of the file using File.WriteAt.\n\t_, err = f.WriteAt([]byte(\"111\"), 4)\n\tAssertEq(nil, err)\n\n\t\/\/ Seek and write past the end of the file.\n\t_, err = f.Seek(int64(len(contents)), 0)\n\tAssertEq(nil, err)\n\n\t_, err = f.Write([]byte(\"222\"))\n\tAssertEq(nil, err)\n\n\t\/\/ Read some contents with Seek and Read.\n\t_, err = f.Seek(4, 0)\n\tAssertEq(nil, err)\n\n\tbuf := make([]byte, 4)\n\t_, err = io.ReadFull(f, buf)\n\n\tAssertEq(nil, err)\n\tExpectEq(\"111r\", string(buf))\n\n\t\/\/ Read the full contents with ReadAt.\n\tbuf = make([]byte, len(contents)+len(\"222\"))\n\t_, err = f.ReadAt(buf, 0)\n\n\tAssertEq(nil, err)\n\tExpectEq(\"000o111ritoenchilada222\", string(buf))\n\n\t\/\/ Close the file.\n\tAssertEq(nil, f.Close())\n\tf = nil\n\n\t\/\/ Read back its contents after opening anew for reading.\n\tfileContents, err := ioutil.ReadFile(path.Join(t.mfs.Dir(), \"foo\"))\n\n\tAssertEq(nil, err)\n\tExpectEq(\"000o111ritoenchilada222\", string(fileContents))\n}\n\nfunc (t *readWriteTest) OpenExistingFile_Append() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) TruncateExistingFile_ReadOnly() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) TruncateExistingFile_WriteOnly() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) TruncateExistingFile_ReadWrite() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) TruncateExistingFile_Append() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenAlreadyOpenedFile_ReadOnly() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenAlreadyOpenedFile_WriteOnly() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenAlreadyOpenedFile_ReadWrite() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenAlreadyOpenedFile_Append() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenReadOnlyFileForWrite() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) Seek() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) Stat() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) Sync() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) Truncate() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) CreateEmptyDirectory() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) CreateWithinSubDirectory() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenWithinSubDirectory() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) TruncateWithinSubDirectory() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) ReadFromWriteOnlyFile() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) WriteToReadOnlyFile() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) StatUnopenedFile() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) LstatUnopenedFile() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) ListingsAreCached() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) AdditionsAreCached() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) RemovalsAreCached() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) BufferedWritesFlushedOnUnmount() {\n\tAssertTrue(false, \"TODO\")\n}\n<commit_msg>FakeGCS.ReadWriteTest.OpenExistingFile_Append<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/ Author: jacobsa@google.com (Aaron Jacobs)\n\/\/\n\/\/ A collection of tests for a file system backed by a GCS bucket, where in\n\/\/ most cases we interact with the file system directly for creating and\n\/\/ mofiying files (rather than through the side channel of the GCS bucket\n\/\/ itself).\n\/\/\n\/\/ These tests are registered by RegisterFSTests.\n\npackage fstesting\n\nimport (\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Read-write interaction\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype readWriteTest struct {\n\tfsTest\n}\n\nfunc (t *readWriteTest) OpenNonExistent_CreateFlagNotSet() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenNonExistent_ReadOnly() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenNonExistent_WriteOnly() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenNonExistent_ReadWrite() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenNonExistent_Append() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenExistingFile_ReadOnly() {\n\t\/\/ Create a file.\n\tconst contents = \"tacoburritoenchilada\"\n\tAssertEq(\n\t\tnil,\n\t\tioutil.WriteFile(\n\t\t\tpath.Join(t.mfs.Dir(), \"foo\"),\n\t\t\t[]byte(contents),\n\t\t\tos.FileMode(0644)))\n\n\t\/\/ Open the file for reading.\n\tf, err := os.Open(path.Join(t.mfs.Dir(), \"foo\"))\n\tAssertEq(nil, err)\n\n\tdefer func() {\n\t\tExpectEq(nil, f.Close())\n\t}()\n\n\t\/\/ Check its vitals.\n\tExpectEq(path.Join(t.mfs.Dir(), \"foo\"), f.Name())\n\n\tfi, err := f.Stat()\n\tExpectEq(\"foo\", fi.Name())\n\tExpectEq(len(contents), fi.Size())\n\tExpectEq(os.FileMode(0), fi.Mode() & ^os.ModePerm)\n\tExpectLt(math.Abs(time.Since(fi.ModTime()).Seconds()), 10)\n\tExpectFalse(fi.IsDir())\n\n\t\/\/ Read its contents.\n\tfileContents, err := ioutil.ReadAll(f)\n\tAssertEq(nil, err)\n\tExpectEq(contents, string(fileContents))\n\n\t\/\/ Attempt to write.\n\tn, err := f.Write([]byte(\"taco\"))\n\n\tAssertEq(0, n)\n\tAssertNe(nil, err)\n\tExpectThat(err, Error(HasSubstr(\"bad file descriptor\")))\n}\n\nfunc (t *readWriteTest) OpenExistingFile_WriteOnly() {\n\t\/\/ Create a file.\n\tconst contents = \"tacoburritoenchilada\"\n\tAssertEq(\n\t\tnil,\n\t\tioutil.WriteFile(\n\t\t\tpath.Join(t.mfs.Dir(), \"foo\"),\n\t\t\t[]byte(contents),\n\t\t\tos.FileMode(0644)))\n\n\t\/\/ Open the file for reading.\n\tf, err := os.OpenFile(path.Join(t.mfs.Dir(), \"foo\"), os.O_WRONLY, 0)\n\tAssertEq(nil, err)\n\n\tdefer func() {\n\t\tif f != nil {\n\t\t\tExpectEq(nil, f.Close())\n\t\t}\n\t}()\n\n\t\/\/ Check its vitals.\n\tExpectEq(path.Join(t.mfs.Dir(), \"foo\"), f.Name())\n\n\tfi, err := f.Stat()\n\tExpectEq(\"foo\", fi.Name())\n\tExpectEq(len(contents), fi.Size())\n\tExpectEq(os.FileMode(0), fi.Mode() & ^os.ModePerm)\n\tExpectLt(math.Abs(time.Since(fi.ModTime()).Seconds()), 10)\n\tExpectFalse(fi.IsDir())\n\n\t\/\/ Reading should fail.\n\t_, err = ioutil.ReadAll(f)\n\n\tAssertNe(nil, err)\n\tExpectThat(err, Error(HasSubstr(\"bad file descriptor\")))\n\n\t\/\/ Write to the start of the file using File.Write.\n\t_, err = f.Write([]byte(\"000\"))\n\tAssertEq(nil, err)\n\n\t\/\/ Write to the middle of the file using File.WriteAt.\n\t_, err = f.WriteAt([]byte(\"111\"), 4)\n\tAssertEq(nil, err)\n\n\t\/\/ Seek and write past the end of the file.\n\t_, err = f.Seek(int64(len(contents)), 0)\n\tAssertEq(nil, err)\n\n\t_, err = f.Write([]byte(\"222\"))\n\tAssertEq(nil, err)\n\n\t\/\/ Check the size now.\n\tAssertTrue(false, \"TODO\")\n\n\t\/\/ Close the file.\n\tAssertEq(nil, f.Close())\n\tf = nil\n\n\t\/\/ Read back its contents.\n\tfileContents, err := ioutil.ReadFile(path.Join(t.mfs.Dir(), \"foo\"))\n\n\tAssertEq(nil, err)\n\tExpectEq(\"000o111ritoenchilada222\", string(fileContents))\n}\n\nfunc (t *readWriteTest) OpenExistingFile_ReadWrite() {\n\t\/\/ Create a file.\n\tconst contents = \"tacoburritoenchilada\"\n\tAssertEq(\n\t\tnil,\n\t\tioutil.WriteFile(\n\t\t\tpath.Join(t.mfs.Dir(), \"foo\"),\n\t\t\t[]byte(contents),\n\t\t\tos.FileMode(0644)))\n\n\t\/\/ Open the file for reading.\n\tf, err := os.OpenFile(path.Join(t.mfs.Dir(), \"foo\"), os.O_RDWR, 0)\n\tAssertEq(nil, err)\n\n\tdefer func() {\n\t\tif f != nil {\n\t\t\tExpectEq(nil, f.Close())\n\t\t}\n\t}()\n\n\t\/\/ Check its vitals.\n\tExpectEq(path.Join(t.mfs.Dir(), \"foo\"), f.Name())\n\n\tfi, err := f.Stat()\n\tExpectEq(\"foo\", fi.Name())\n\tExpectEq(len(contents), fi.Size())\n\tExpectEq(os.FileMode(0), fi.Mode() & ^os.ModePerm)\n\tExpectLt(math.Abs(time.Since(fi.ModTime()).Seconds()), 10)\n\tExpectFalse(fi.IsDir())\n\n\t\/\/ Write to the start of the file using File.Write.\n\t_, err = f.Write([]byte(\"000\"))\n\tAssertEq(nil, err)\n\n\t\/\/ Write to the middle of the file using File.WriteAt.\n\t_, err = f.WriteAt([]byte(\"111\"), 4)\n\tAssertEq(nil, err)\n\n\t\/\/ Seek and write past the end of the file.\n\t_, err = f.Seek(int64(len(contents)), 0)\n\tAssertEq(nil, err)\n\n\t_, err = f.Write([]byte(\"222\"))\n\tAssertEq(nil, err)\n\n\t\/\/ Check the size now.\n\tAssertTrue(false, \"TODO\")\n\n\t\/\/ Read some contents with Seek and Read.\n\t_, err = f.Seek(4, 0)\n\tAssertEq(nil, err)\n\n\tbuf := make([]byte, 4)\n\t_, err = io.ReadFull(f, buf)\n\n\tAssertEq(nil, err)\n\tExpectEq(\"111r\", string(buf))\n\n\t\/\/ Read the full contents with ReadAt.\n\tbuf = make([]byte, len(contents)+len(\"222\"))\n\t_, err = f.ReadAt(buf, 0)\n\n\tAssertEq(nil, err)\n\tExpectEq(\"000o111ritoenchilada222\", string(buf))\n\n\t\/\/ Close the file.\n\tAssertEq(nil, f.Close())\n\tf = nil\n\n\t\/\/ Read back its contents after opening anew for reading.\n\tfileContents, err := ioutil.ReadFile(path.Join(t.mfs.Dir(), \"foo\"))\n\n\tAssertEq(nil, err)\n\tExpectEq(\"000o111ritoenchilada222\", string(fileContents))\n}\n\nfunc (t *readWriteTest) OpenExistingFile_Append() {\n\t\/\/ Create a file.\n\tconst contents = \"tacoburritoenchilada\"\n\tAssertEq(\n\t\tnil,\n\t\tioutil.WriteFile(\n\t\t\tpath.Join(t.mfs.Dir(), \"foo\"),\n\t\t\t[]byte(contents),\n\t\t\tos.FileMode(0644)))\n\n\t\/\/ Open the file for reading.\n\tf, err := os.OpenFile(path.Join(t.mfs.Dir(), \"foo\"), os.O_RDWR|os.O_APPEND, 0)\n\tAssertEq(nil, err)\n\n\tdefer func() {\n\t\tif f != nil {\n\t\t\tExpectEq(nil, f.Close())\n\t\t}\n\t}()\n\n\t\/\/ Check its vitals.\n\tExpectEq(path.Join(t.mfs.Dir(), \"foo\"), f.Name())\n\n\tfi, err := f.Stat()\n\tExpectEq(\"foo\", fi.Name())\n\tExpectEq(len(contents), fi.Size())\n\tExpectEq(os.FileMode(0), fi.Mode() & ^os.ModePerm)\n\tExpectLt(math.Abs(time.Since(fi.ModTime()).Seconds()), 10)\n\tExpectFalse(fi.IsDir())\n\n\t\/\/ Write using File.Write. This should go to the end of the file regardless\n\t\/\/ of whether we Seek somewhere else first.\n\t_, err = f.Seek(1, 0)\n\tAssertEq(nil, err)\n\n\t_, err = f.Write([]byte(\"222\"))\n\tAssertEq(nil, err)\n\n\t\/\/ Write to the middle of the file using File.WriteAt.\n\t_, err = f.WriteAt([]byte(\"111\"), 4)\n\tAssertEq(nil, err)\n\n\t\/\/ Check the size now.\n\tfi, err = f.Stat()\n\tAssertEq(nil, err)\n\tExpectEq(len(contents)+len(\"222\"), fi.Size())\n\n\t\/\/ Read some contents with Seek and Read.\n\t_, err = f.Seek(4, 0)\n\tAssertEq(nil, err)\n\n\tbuf := make([]byte, 4)\n\t_, err = io.ReadFull(f, buf)\n\n\tAssertEq(nil, err)\n\tExpectEq(\"111r\", string(buf))\n\n\t\/\/ Read the full contents with ReadAt.\n\tbuf = make([]byte, len(contents)+len(\"222\"))\n\t_, err = f.ReadAt(buf, 0)\n\n\tAssertEq(nil, err)\n\tExpectEq(\"taco111ritoenchilada222\", string(buf))\n\n\t\/\/ Close the file.\n\tAssertEq(nil, f.Close())\n\tf = nil\n\n\t\/\/ Read back its contents after opening anew for reading.\n\tfileContents, err := ioutil.ReadFile(path.Join(t.mfs.Dir(), \"foo\"))\n\n\tAssertEq(nil, err)\n\tExpectEq(\"taco111ritoenchilada222\", string(fileContents))\n}\n\nfunc (t *readWriteTest) TruncateExistingFile_ReadOnly() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) TruncateExistingFile_WriteOnly() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) TruncateExistingFile_ReadWrite() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) TruncateExistingFile_Append() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenAlreadyOpenedFile_ReadOnly() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenAlreadyOpenedFile_WriteOnly() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenAlreadyOpenedFile_ReadWrite() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenAlreadyOpenedFile_Append() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenReadOnlyFileForWrite() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) Seek() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) Stat() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) Sync() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) Truncate() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) CreateEmptyDirectory() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) CreateWithinSubDirectory() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) OpenWithinSubDirectory() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) TruncateWithinSubDirectory() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) ReadFromWriteOnlyFile() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) WriteToReadOnlyFile() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) StatUnopenedFile() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) LstatUnopenedFile() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) ListingsAreCached() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) AdditionsAreCached() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) RemovalsAreCached() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *readWriteTest) BufferedWritesFlushedOnUnmount() {\n\tAssertTrue(false, \"TODO\")\n}\n<|endoftext|>"} {"text":"<commit_before>package postgres_test\n\nimport (\n\t\"database\/sql\"\n\t\"github.com\/RangelReale\/osin\"\n\t_ \"github.com\/lib\/pq\"\n\t\"github.com\/ory-am\/dockertest\"\n\t\"github.com\/ory-am\/osin-storage\/Godeps\/_workspace\/src\/github.com\/pborman\/uuid\"\n\t\"github.com\/ory-am\/osin-storage\/storage\"\n\t. \"github.com\/ory-am\/osin-storage\/storage\/postgres\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar db *sql.DB\nvar store *Storage\nvar userDataMock = \"bar\"\n\nfunc TestMain(m *testing.M) {\n\tvar err error\n\tvar c dockertest.ContainerID\n\tc, db, err = dockertest.OpenPostgreSQLContainerConnection(15, time.Millisecond*500)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not set up PostgreSQL container: %v\", err)\n\t}\n\tdefer c.KillRemove()\n\n\tstore = New(db)\n\tif err = store.CreateSchemas(); err != nil {\n\t\tlog.Fatalf(\"Could not set up schemas: %v\", err)\n\t}\n\n\tos.Exit(m.Run())\n}\n\nfunc TestClientOperations(t *testing.T) {\n\tcreate := &osin.DefaultClient{\"1\", \"secret\", \"http:\/\/localhost\/\", \"\"}\n\tcreateClient(t, store, create)\n\tgetClient(t, store, create)\n\n\tupdate := &osin.DefaultClient{\"1\", \"secret\", \"http:\/\/www.google.com\/\", \"\"}\n\tupdateClient(t, store, update)\n\tgetClient(t, store, update)\n}\n\nfunc TestAuthorizeOperations(t *testing.T) {\n\tclient := &osin.DefaultClient{\"2\", \"secret\", \"http:\/\/localhost\/\", \"\"}\n\tcreateClient(t, store, client)\n\n\tfor k, authorize := range []*osin.AuthorizeData{\n\t\t&osin.AuthorizeData{\n\t\t\tClient: client,\n\t\t\tCode: uuid.New(),\n\t\t\tExpiresIn: int32(60),\n\t\t\tScope: \"scope\",\n\t\t\tRedirectUri: \"http:\/\/localhost\/\",\n\t\t\tState: \"state\",\n\t\t\t\/\/ FIXME this should be time.Now(), but an upstream ( https:\/\/github.com\/lib\/pq\/issues\/329 ) issue prevents this.\n\t\t\tCreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t\t\tUserData: userDataMock,\n\t\t},\n\t} {\n\t\t\/\/ Test save\n\t\trequire.Nil(t, store.SaveAuthorize(authorize))\n\n\t\t\/\/ Test fetch\n\t\tresult, err := store.LoadAuthorize(authorize.Code)\n\t\trequire.Nil(t, err)\n\t\trequire.True(t, reflect.DeepEqual(authorize, result), \"Case: %d\\n%v\\n\\n%v\", k, authorize, result)\n\n\t\t\/\/ Test remove\n\t\trequire.Nil(t, store.RemoveAuthorize(authorize.Code))\n\t\t_, err = store.LoadAuthorize(authorize.Code)\n\t\trequire.NotNil(t, err)\n\t}\n\n\tremoveClient(t, store, client)\n}\n\nfunc TestStoreFailsOnInvalidUserData(t *testing.T) {\n\tclient := &osin.DefaultClient{\"3\", \"secret\", \"http:\/\/localhost\/\", \"\"}\n\tauthorize := &osin.AuthorizeData{\n\t\tClient: client,\n\t\tCode: uuid.New(),\n\t\tExpiresIn: int32(60),\n\t\tScope: \"scope\",\n\t\tRedirectUri: \"http:\/\/localhost\/\",\n\t\tState: \"state\",\n\t\tCreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t\tUserData: struct{ foo string }{\"bar\"},\n\t}\n\taccess := &osin.AccessData{\n\t\tClient: client,\n\t\tAuthorizeData: authorize,\n\t\tAccessData: nil,\n\t\tAccessToken: uuid.New(),\n\t\tRefreshToken: uuid.New(),\n\t\tExpiresIn: int32(60),\n\t\tScope: \"scope\",\n\t\tRedirectUri: \"https:\/\/localhost\/\",\n\t\tCreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t\tUserData: struct{ foo string }{\"bar\"},\n\t}\n\tassert.NotNil(t, store.SaveAuthorize(authorize))\n\tassert.NotNil(t, store.SaveAccess(access))\n}\n\nfunc TestAccessOperations(t *testing.T) {\n\tclient := &osin.DefaultClient{\"3\", \"secret\", \"http:\/\/localhost\/\", \"\"}\n\tauthorize := &osin.AuthorizeData{\n\t\tClient: client,\n\t\tCode: uuid.New(),\n\t\tExpiresIn: int32(60),\n\t\tScope: \"scope\",\n\t\tRedirectUri: \"http:\/\/localhost\/\",\n\t\tState: \"state\",\n\t\t\/\/ FIXME this should be time.Now(), but an upstream ( https:\/\/github.com\/lib\/pq\/issues\/329 ) issue prevents this.\n\t\tCreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t\tUserData: userDataMock,\n\t}\n\tnestedAccess := &osin.AccessData{\n\t\tClient: client,\n\t\tAuthorizeData: authorize,\n\t\tAccessData: nil,\n\t\tAccessToken: uuid.New(),\n\t\tRefreshToken: uuid.New(),\n\t\tExpiresIn: int32(60),\n\t\tScope: \"scope\",\n\t\tRedirectUri: \"https:\/\/localhost\/\",\n\t\t\/\/ FIXME this should be time.Now(), but an upstream ( https:\/\/github.com\/lib\/pq\/issues\/329 ) issue prevents this.\n\t\tCreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t\tUserData: userDataMock,\n\t}\n\taccess := &osin.AccessData{\n\t\tClient: client,\n\t\tAuthorizeData: authorize,\n\t\tAccessData: nestedAccess,\n\t\tAccessToken: uuid.New(),\n\t\tRefreshToken: uuid.New(),\n\t\tExpiresIn: int32(60),\n\t\tScope: \"scope\",\n\t\tRedirectUri: \"https:\/\/localhost\/\",\n\t\t\/\/ FIXME this should be time.Now(), but an upstream ( https:\/\/github.com\/lib\/pq\/issues\/329 ) issue prevents this.\n\t\tCreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t\tUserData: userDataMock,\n\t}\n\n\tcreateClient(t, store, client)\n\trequire.Nil(t, store.SaveAuthorize(authorize))\n\trequire.Nil(t, store.SaveAccess(nestedAccess))\n\trequire.Nil(t, store.SaveAccess(access))\n\n\tresult, err := store.LoadAccess(access.AccessToken)\n\trequire.Nil(t, err)\n\trequire.True(t, reflect.DeepEqual(access, result))\n\n\trequire.Nil(t, store.RemoveAccess(access.AccessToken))\n\t_, err = store.LoadAccess(access.AccessToken)\n\trequire.NotNil(t, err)\n\trequire.Nil(t, store.RemoveAuthorize(authorize.Code))\n\n\tremoveClient(t, store, client)\n}\n\nfunc TestRefreshOperations(t *testing.T) {\n\tclient := &osin.DefaultClient{\"4\", \"secret\", \"http:\/\/localhost\/\", \"\"}\n\ttype test struct {\n\t\taccess *osin.AccessData\n\t}\n\n\tfor k, c := range []*test{\n\t\t&test{\n\t\t\taccess: &osin.AccessData{\n\t\t\t\tClient: client,\n\t\t\t\tAuthorizeData: &osin.AuthorizeData{\n\t\t\t\t\tClient: client,\n\t\t\t\t\tCode: uuid.New(),\n\t\t\t\t\tExpiresIn: int32(60),\n\t\t\t\t\tScope: \"scope\",\n\t\t\t\t\tRedirectUri: \"http:\/\/localhost\/\",\n\t\t\t\t\tState: \"state\",\n\t\t\t\t\tCreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t\t\t\t\tUserData: userDataMock,\n\t\t\t\t},\n\t\t\t\tAccessData: nil,\n\t\t\t\tAccessToken: uuid.New(),\n\t\t\t\tRefreshToken: uuid.New(),\n\t\t\t\tExpiresIn: int32(60),\n\t\t\t\tScope: \"scope\",\n\t\t\t\tRedirectUri: \"https:\/\/localhost\/\",\n\t\t\t\tCreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t\t\t\tUserData: userDataMock,\n\t\t\t},\n\t\t},\n\t} {\n\t\tcreateClient(t, store, client)\n\t\trequire.Nil(t, store.SaveAuthorize(c.access.AuthorizeData), \"Case %d\", k)\n\t\trequire.Nil(t, store.SaveAccess(c.access), \"Case %d\", k)\n\n\t\tresult, err := store.LoadRefresh(c.access.RefreshToken)\n\t\trequire.Nil(t, err)\n\t\trequire.True(t, reflect.DeepEqual(c.access, result), \"Case %d\", k)\n\n\t\trequire.Nil(t, store.RemoveRefresh(c.access.RefreshToken))\n\t\t_, err = store.LoadRefresh(c.access.RefreshToken)\n\n\t\trequire.NotNil(t, err, \"Case %d\", k)\n\t\trequire.Nil(t, store.RemoveAccess(c.access.AccessToken), \"Case %d\", k)\n\t\trequire.Nil(t, store.SaveAccess(c.access), \"Case %d\", k)\n\n\t\t_, err = store.LoadRefresh(c.access.RefreshToken)\n\t\trequire.Nil(t, err, \"Case %d\", k)\n\n\t\trequire.Nil(t, store.RemoveAccess(c.access.AccessToken), \"Case %d\", k)\n\t\t_, err = store.LoadRefresh(c.access.RefreshToken)\n\t\trequire.NotNil(t, err, \"Case %d\", k)\n\n\t}\n\tremoveClient(t, store, client)\n}\n\nfunc getClient(t *testing.T, store storage.Storage, set osin.Client) {\n\tclient, err := store.GetClient(set.GetId())\n\trequire.Nil(t, err)\n\trequire.EqualValues(t, set, client)\n}\n\nfunc createClient(t *testing.T, store storage.Storage, set osin.Client) {\n\trequire.Nil(t, store.CreateClient(set))\n}\n\nfunc updateClient(t *testing.T, store storage.Storage, set osin.Client) {\n\trequire.Nil(t, store.UpdateClient(set))\n}\n\nfunc removeClient(t *testing.T, store storage.Storage, set osin.Client) {\n\trequire.Nil(t, store.RemoveClient(set.GetId()))\n}\n<commit_msg>test: import path fix<commit_after>package postgres_test\n\nimport (\n\t\"database\/sql\"\n\t\"github.com\/RangelReale\/osin\"\n\t_ \"github.com\/lib\/pq\"\n\t\"github.com\/ory-am\/dockertest\"\n\t\"github.com\/ory-am\/osin-storage\/storage\"\n\t. \"github.com\/ory-am\/osin-storage\/storage\/postgres\"\n\t\"github.com\/pborman\/uuid\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar db *sql.DB\nvar store *Storage\nvar userDataMock = \"bar\"\n\nfunc TestMain(m *testing.M) {\n\tvar err error\n\tvar c dockertest.ContainerID\n\tc, db, err = dockertest.OpenPostgreSQLContainerConnection(15, time.Millisecond*500)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not set up PostgreSQL container: %v\", err)\n\t}\n\tdefer c.KillRemove()\n\n\tstore = New(db)\n\tif err = store.CreateSchemas(); err != nil {\n\t\tlog.Fatalf(\"Could not set up schemas: %v\", err)\n\t}\n\n\tos.Exit(m.Run())\n}\n\nfunc TestClientOperations(t *testing.T) {\n\tcreate := &osin.DefaultClient{\"1\", \"secret\", \"http:\/\/localhost\/\", \"\"}\n\tcreateClient(t, store, create)\n\tgetClient(t, store, create)\n\n\tupdate := &osin.DefaultClient{\"1\", \"secret\", \"http:\/\/www.google.com\/\", \"\"}\n\tupdateClient(t, store, update)\n\tgetClient(t, store, update)\n}\n\nfunc TestAuthorizeOperations(t *testing.T) {\n\tclient := &osin.DefaultClient{\"2\", \"secret\", \"http:\/\/localhost\/\", \"\"}\n\tcreateClient(t, store, client)\n\n\tfor k, authorize := range []*osin.AuthorizeData{\n\t\t&osin.AuthorizeData{\n\t\t\tClient: client,\n\t\t\tCode: uuid.New(),\n\t\t\tExpiresIn: int32(60),\n\t\t\tScope: \"scope\",\n\t\t\tRedirectUri: \"http:\/\/localhost\/\",\n\t\t\tState: \"state\",\n\t\t\t\/\/ FIXME this should be time.Now(), but an upstream ( https:\/\/github.com\/lib\/pq\/issues\/329 ) issue prevents this.\n\t\t\tCreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t\t\tUserData: userDataMock,\n\t\t},\n\t} {\n\t\t\/\/ Test save\n\t\trequire.Nil(t, store.SaveAuthorize(authorize))\n\n\t\t\/\/ Test fetch\n\t\tresult, err := store.LoadAuthorize(authorize.Code)\n\t\trequire.Nil(t, err)\n\t\trequire.True(t, reflect.DeepEqual(authorize, result), \"Case: %d\\n%v\\n\\n%v\", k, authorize, result)\n\n\t\t\/\/ Test remove\n\t\trequire.Nil(t, store.RemoveAuthorize(authorize.Code))\n\t\t_, err = store.LoadAuthorize(authorize.Code)\n\t\trequire.NotNil(t, err)\n\t}\n\n\tremoveClient(t, store, client)\n}\n\nfunc TestStoreFailsOnInvalidUserData(t *testing.T) {\n\tclient := &osin.DefaultClient{\"3\", \"secret\", \"http:\/\/localhost\/\", \"\"}\n\tauthorize := &osin.AuthorizeData{\n\t\tClient: client,\n\t\tCode: uuid.New(),\n\t\tExpiresIn: int32(60),\n\t\tScope: \"scope\",\n\t\tRedirectUri: \"http:\/\/localhost\/\",\n\t\tState: \"state\",\n\t\tCreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t\tUserData: struct{ foo string }{\"bar\"},\n\t}\n\taccess := &osin.AccessData{\n\t\tClient: client,\n\t\tAuthorizeData: authorize,\n\t\tAccessData: nil,\n\t\tAccessToken: uuid.New(),\n\t\tRefreshToken: uuid.New(),\n\t\tExpiresIn: int32(60),\n\t\tScope: \"scope\",\n\t\tRedirectUri: \"https:\/\/localhost\/\",\n\t\tCreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t\tUserData: struct{ foo string }{\"bar\"},\n\t}\n\tassert.NotNil(t, store.SaveAuthorize(authorize))\n\tassert.NotNil(t, store.SaveAccess(access))\n}\n\nfunc TestAccessOperations(t *testing.T) {\n\tclient := &osin.DefaultClient{\"3\", \"secret\", \"http:\/\/localhost\/\", \"\"}\n\tauthorize := &osin.AuthorizeData{\n\t\tClient: client,\n\t\tCode: uuid.New(),\n\t\tExpiresIn: int32(60),\n\t\tScope: \"scope\",\n\t\tRedirectUri: \"http:\/\/localhost\/\",\n\t\tState: \"state\",\n\t\t\/\/ FIXME this should be time.Now(), but an upstream ( https:\/\/github.com\/lib\/pq\/issues\/329 ) issue prevents this.\n\t\tCreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t\tUserData: userDataMock,\n\t}\n\tnestedAccess := &osin.AccessData{\n\t\tClient: client,\n\t\tAuthorizeData: authorize,\n\t\tAccessData: nil,\n\t\tAccessToken: uuid.New(),\n\t\tRefreshToken: uuid.New(),\n\t\tExpiresIn: int32(60),\n\t\tScope: \"scope\",\n\t\tRedirectUri: \"https:\/\/localhost\/\",\n\t\t\/\/ FIXME this should be time.Now(), but an upstream ( https:\/\/github.com\/lib\/pq\/issues\/329 ) issue prevents this.\n\t\tCreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t\tUserData: userDataMock,\n\t}\n\taccess := &osin.AccessData{\n\t\tClient: client,\n\t\tAuthorizeData: authorize,\n\t\tAccessData: nestedAccess,\n\t\tAccessToken: uuid.New(),\n\t\tRefreshToken: uuid.New(),\n\t\tExpiresIn: int32(60),\n\t\tScope: \"scope\",\n\t\tRedirectUri: \"https:\/\/localhost\/\",\n\t\t\/\/ FIXME this should be time.Now(), but an upstream ( https:\/\/github.com\/lib\/pq\/issues\/329 ) issue prevents this.\n\t\tCreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t\tUserData: userDataMock,\n\t}\n\n\tcreateClient(t, store, client)\n\trequire.Nil(t, store.SaveAuthorize(authorize))\n\trequire.Nil(t, store.SaveAccess(nestedAccess))\n\trequire.Nil(t, store.SaveAccess(access))\n\n\tresult, err := store.LoadAccess(access.AccessToken)\n\trequire.Nil(t, err)\n\trequire.True(t, reflect.DeepEqual(access, result))\n\n\trequire.Nil(t, store.RemoveAccess(access.AccessToken))\n\t_, err = store.LoadAccess(access.AccessToken)\n\trequire.NotNil(t, err)\n\trequire.Nil(t, store.RemoveAuthorize(authorize.Code))\n\n\tremoveClient(t, store, client)\n}\n\nfunc TestRefreshOperations(t *testing.T) {\n\tclient := &osin.DefaultClient{\"4\", \"secret\", \"http:\/\/localhost\/\", \"\"}\n\ttype test struct {\n\t\taccess *osin.AccessData\n\t}\n\n\tfor k, c := range []*test{\n\t\t&test{\n\t\t\taccess: &osin.AccessData{\n\t\t\t\tClient: client,\n\t\t\t\tAuthorizeData: &osin.AuthorizeData{\n\t\t\t\t\tClient: client,\n\t\t\t\t\tCode: uuid.New(),\n\t\t\t\t\tExpiresIn: int32(60),\n\t\t\t\t\tScope: \"scope\",\n\t\t\t\t\tRedirectUri: \"http:\/\/localhost\/\",\n\t\t\t\t\tState: \"state\",\n\t\t\t\t\tCreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t\t\t\t\tUserData: userDataMock,\n\t\t\t\t},\n\t\t\t\tAccessData: nil,\n\t\t\t\tAccessToken: uuid.New(),\n\t\t\t\tRefreshToken: uuid.New(),\n\t\t\t\tExpiresIn: int32(60),\n\t\t\t\tScope: \"scope\",\n\t\t\t\tRedirectUri: \"https:\/\/localhost\/\",\n\t\t\t\tCreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t\t\t\tUserData: userDataMock,\n\t\t\t},\n\t\t},\n\t} {\n\t\tcreateClient(t, store, client)\n\t\trequire.Nil(t, store.SaveAuthorize(c.access.AuthorizeData), \"Case %d\", k)\n\t\trequire.Nil(t, store.SaveAccess(c.access), \"Case %d\", k)\n\n\t\tresult, err := store.LoadRefresh(c.access.RefreshToken)\n\t\trequire.Nil(t, err)\n\t\trequire.True(t, reflect.DeepEqual(c.access, result), \"Case %d\", k)\n\n\t\trequire.Nil(t, store.RemoveRefresh(c.access.RefreshToken))\n\t\t_, err = store.LoadRefresh(c.access.RefreshToken)\n\n\t\trequire.NotNil(t, err, \"Case %d\", k)\n\t\trequire.Nil(t, store.RemoveAccess(c.access.AccessToken), \"Case %d\", k)\n\t\trequire.Nil(t, store.SaveAccess(c.access), \"Case %d\", k)\n\n\t\t_, err = store.LoadRefresh(c.access.RefreshToken)\n\t\trequire.Nil(t, err, \"Case %d\", k)\n\n\t\trequire.Nil(t, store.RemoveAccess(c.access.AccessToken), \"Case %d\", k)\n\t\t_, err = store.LoadRefresh(c.access.RefreshToken)\n\t\trequire.NotNil(t, err, \"Case %d\", k)\n\n\t}\n\tremoveClient(t, store, client)\n}\n\nfunc getClient(t *testing.T, store storage.Storage, set osin.Client) {\n\tclient, err := store.GetClient(set.GetId())\n\trequire.Nil(t, err)\n\trequire.EqualValues(t, set, client)\n}\n\nfunc createClient(t *testing.T, store storage.Storage, set osin.Client) {\n\trequire.Nil(t, store.CreateClient(set))\n}\n\nfunc updateClient(t *testing.T, store storage.Storage, set osin.Client) {\n\trequire.Nil(t, store.UpdateClient(set))\n}\n\nfunc removeClient(t *testing.T, store storage.Storage, set osin.Client) {\n\trequire.Nil(t, store.RemoveClient(set.GetId()))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage filemutex\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nvar (\n\tmodkernel32 = syscall.NewLazyDLL(\"kernel32.dll\")\n\tprocLockFileEx = modkernel32.NewProc(\"LockFileEx\")\n\tprocUnlockFileEx = modkernel32.NewProc(\"UnlockFileEx\")\n)\n\nconst (\n\tlockfileExclusiveLock = 2\n)\n\nfunc lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {\n\tr1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))\n\tif r1 == 0 {\n\t\tif e1 != 0 {\n\t\t\terr = error(e1)\n\t\t} else {\n\t\t\terr = syscall.EINVAL\n\t\t}\n\t}\n\treturn\n}\n\nfunc unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {\n\tr1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)\n\tif r1 == 0 {\n\t\tif e1 != 0 {\n\t\t\terr = error(e1)\n\t\t} else {\n\t\t\terr = syscall.EINVAL\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ FileMutex is similar to sync.RWMutex, but also synchronizes across processes.\n\/\/ This implementation is based on flock syscall.\ntype FileMutex struct {\n\tfd syscall.Handle\n}\n\nfunc New(filename string) (*FileMutex, error) {\n\tfd, err := syscall.CreateFile(&(syscall.StringToUTF16(filename)[0]), syscall.GENERIC_READ|syscall.GENERIC_WRITE,\n\t\tsyscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &FileMutex{fd: fd}, nil\n}\n\nfunc (m *FileMutex) Lock() error {\n\tvar ol syscall.Overlapped\n\tif err := lockFileEx(m.fd, lockfileExclusiveLock, 0, 1, 0, &ol); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (m *FileMutex) Unlock() error {\n\tvar ol syscall.Overlapped\n\tif err := unlockFileEx(m.fd, 0, 1, 0, &ol); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (m *FileMutex) RLock() error {\n\tvar ol syscall.Overlapped\n\tif err := lockFileEx(m.fd, 0, 0, 1, 0, &ol); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (m *FileMutex) RUnlock() error {\n\tvar ol syscall.Overlapped\n\tif err := unlockFileEx(m.fd, 0, 1, 0, &ol); err != nil {\n\t\treturn err\n\t}\n}\n\n\/\/ Close does an Unlock() combined with closing and unlinking the associated\n\/\/ lock file. You should create a New() FileMutex for every Lock() attempt if\n\/\/ using Close().\nfunc (m *FileMutex) Close() error {\n\tvar ol syscall.Overlapped\n\tif err := unlockFileEx(m.fd, 0, 1, 0, &ol); err != nil {\n\t\treturn err\n\t}\n\treturn syscall.Close(m.fd)\n}\n<commit_msg>add return nil<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage filemutex\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nvar (\n\tmodkernel32 = syscall.NewLazyDLL(\"kernel32.dll\")\n\tprocLockFileEx = modkernel32.NewProc(\"LockFileEx\")\n\tprocUnlockFileEx = modkernel32.NewProc(\"UnlockFileEx\")\n)\n\nconst (\n\tlockfileExclusiveLock = 2\n)\n\nfunc lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {\n\tr1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))\n\tif r1 == 0 {\n\t\tif e1 != 0 {\n\t\t\terr = error(e1)\n\t\t} else {\n\t\t\terr = syscall.EINVAL\n\t\t}\n\t}\n\treturn\n}\n\nfunc unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {\n\tr1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)\n\tif r1 == 0 {\n\t\tif e1 != 0 {\n\t\t\terr = error(e1)\n\t\t} else {\n\t\t\terr = syscall.EINVAL\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ FileMutex is similar to sync.RWMutex, but also synchronizes across processes.\n\/\/ This implementation is based on flock syscall.\ntype FileMutex struct {\n\tfd syscall.Handle\n}\n\nfunc New(filename string) (*FileMutex, error) {\n\tfd, err := syscall.CreateFile(&(syscall.StringToUTF16(filename)[0]), syscall.GENERIC_READ|syscall.GENERIC_WRITE,\n\t\tsyscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &FileMutex{fd: fd}, nil\n}\n\nfunc (m *FileMutex) Lock() error {\n\tvar ol syscall.Overlapped\n\tif err := lockFileEx(m.fd, lockfileExclusiveLock, 0, 1, 0, &ol); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (m *FileMutex) Unlock() error {\n\tvar ol syscall.Overlapped\n\tif err := unlockFileEx(m.fd, 0, 1, 0, &ol); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (m *FileMutex) RLock() error {\n\tvar ol syscall.Overlapped\n\tif err := lockFileEx(m.fd, 0, 0, 1, 0, &ol); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (m *FileMutex) RUnlock() error {\n\tvar ol syscall.Overlapped\n\tif err := unlockFileEx(m.fd, 0, 1, 0, &ol); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Close does an Unlock() combined with closing and unlinking the associated\n\/\/ lock file. You should create a New() FileMutex for every Lock() attempt if\n\/\/ using Close().\nfunc (m *FileMutex) Close() error {\n\tvar ol syscall.Overlapped\n\tif err := unlockFileEx(m.fd, 0, 1, 0, &ol); err != nil {\n\t\treturn err\n\t}\n\treturn syscall.Close(m.fd)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016, RadiantBlue Technologies, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage logger\n\nimport (\n\t\"log\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/suite\"\n\t\"github.com\/venicegeo\/pz-gocommon\/elasticsearch\"\n\tpiazza \"github.com\/venicegeo\/pz-gocommon\/gocommon\"\n)\n\nconst MOCKING = true\n\nfunc sleep() {\n\ttime.Sleep(1 * time.Second)\n}\n\ntype LoggerTester struct {\n\tsuite.Suite\n\n\tesi elasticsearch.IIndex\n\tsys *piazza.SystemConfig\n\tclient IClient\n\n\tgenericServer *piazza.GenericServer\n}\n\nfunc (suite *LoggerTester) setupFixture() {\n\tt := suite.T()\n\tassert := assert.New(t)\n\n\tvar required []piazza.ServiceName\n\tif MOCKING {\n\t\trequired = []piazza.ServiceName{}\n\t} else {\n\t\trequired = []piazza.ServiceName{piazza.PzElasticSearch}\n\t}\n\tsys, err := piazza.NewSystemConfig(piazza.PzLogger, required)\n\tassert.NoError(err)\n\tsuite.sys = sys\n\n\tesi, err := elasticsearch.NewIndexInterface(sys, \"loggertest$\", MOCKING)\n\tassert.NoError(err)\n\tsuite.esi = esi\n\n\tif MOCKING {\n\t\tclient, err := NewMockClient(sys)\n\t\tassert.NoError(err)\n\t\tsuite.client = client\n\t} else {\n\t\tclient, err := NewClient(sys)\n\t\tassert.NoError(err)\n\t\tsuite.client = client\n\t}\n\n\tloggerService := &LoggerService{}\n\tloggerService.Init(sys, esi)\n\n\tloggerServer := &LoggerServer{}\n\tloggerServer.Init(loggerService)\n\n\tsuite.genericServer = &piazza.GenericServer{Sys: sys}\n\n\terr = suite.genericServer.Configure(loggerServer.Routes)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t_, err = suite.genericServer.Start()\n\tassert.NoError(err)\n}\n\nfunc (suite *LoggerTester) teardownFixture() {\n\tsuite.genericServer.Stop()\n\n\tsuite.esi.Close()\n\tsuite.esi.Delete()\n}\n\nfunc TestRunSuite(t *testing.T) {\n\ts := &LoggerTester{}\n\tsuite.Run(t, s)\n}\n\nfunc (suite *LoggerTester) getLastMessage() string {\n\tt := suite.T()\n\tassert := assert.New(t)\n\n\tclient := suite.client\n\n\tformat := elasticsearch.QueryFormat{Size: 100, From: 0, Order: elasticsearch.SortAscending, Key: \"CreatedOn\"}\n\tms, err := client.GetFromMessages(format, map[string]string{})\n\tassert.NoError(err)\n\tassert.True(len(ms) > 0)\n\n\treturn ms[len(ms)-1].String()\n}\n\nfunc (suite *LoggerTester) Test01Elasticsearch() {\n\tt := suite.T()\n\tassert := assert.New(t)\n\n\tsuite.setupFixture()\n\tdefer suite.teardownFixture()\n\n\tversion := suite.esi.GetVersion()\n\tassert.Contains(\"2.2.0\", version)\n}\n\nfunc (suite *LoggerTester) Test02One() {\n\tt := suite.T()\n\tassert := assert.New(t)\n\n\tsuite.setupFixture()\n\tdefer suite.teardownFixture()\n\n\tclient := suite.client\n\n\tvar err error\n\n\tdata1 := Message{\n\t\tService: \"log-tester\",\n\t\tAddress: \"128.1.2.3\",\n\t\tCreatedOn: time.Now(),\n\t\tSeverity: \"Info\",\n\t\tMessage: \"The quick brown fox\",\n\t}\n\n\tdata2 := Message{\n\t\tService: \"log-tester\",\n\t\tAddress: \"128.0.0.0\",\n\t\tCreatedOn: time.Now(),\n\t\tSeverity: \"Fatal\",\n\t\tMessage: \"The quick brown fox\",\n\t}\n\n\t{\n\t\terr = client.LogMessage(&data1)\n\t\tassert.NoError(err, \"PostToMessages\")\n\t}\n\n\tsleep()\n\n\t{\n\t\tactualMssg := suite.getLastMessage()\n\t\texpectedMssg := data1.String()\n\t\tassert.EqualValues(actualMssg, expectedMssg)\n\t}\n\n\t{\n\t\terr = client.LogMessage(&data2)\n\t\tassert.NoError(err, \"PostToMessages\")\n\t}\n\n\tsleep()\n\n\t{\n\t\tactualMssg := suite.getLastMessage()\n\t\texpectedMssg := data2.String()\n\t\tassert.EqualValues(actualMssg, expectedMssg)\n\t}\n\n\t{\n\t\tstats, err := client.GetFromAdminStats()\n\t\tassert.NoError(err, \"GetFromAdminStats\")\n\t\tassert.Equal(2, stats.NumMessages, \"stats check\")\n\t}\n}\n\nfunc (suite *LoggerTester) Test03Help() {\n\tt := suite.T()\n\tassert := assert.New(t)\n\n\terr := suite.client.Log(\"mocktest\", \"0.0.0.0\", SeverityInfo, time.Now(), \"message from logger unit test via piazza.Log()\")\n\tassert.NoError(err, \"pzService.Log()\")\n}\n\nfunc (suite *LoggerTester) Test04Admin() {\n\tt := suite.T()\n\tassert := assert.New(t)\n\n\tsuite.setupFixture()\n\tdefer suite.teardownFixture()\n\n\tclient := suite.client\n\n\t_, err := client.GetFromAdminStats()\n\tassert.NoError(err, \"GetFromAdminStats\")\n}\n\nfunc (suite *LoggerTester) Test05Pagination() {\n\tt := suite.T()\n\tassert := assert.New(t)\n\n\tif MOCKING {\n\t\t\/\/t.Skip(\"Skipping test, because mocking.\")\n\t}\n\n\tsuite.setupFixture()\n\tdefer suite.teardownFixture()\n\n\tclient := suite.client\n\n\tclient.SetService(\"myservice\", \"1.2.3.4\")\n\n\terr := client.Debug(\"d\")\n\tassert.NoError(err)\n\tsleep()\n\terr = client.Info(\"i\")\n\tassert.NoError(err)\n\tsleep()\n\terr = client.Warn(\"w\")\n\tassert.NoError(err)\n\tsleep()\n\terr = client.Error(\"e\")\n\tassert.NoError(err)\n\tsleep()\n\terr = client.Fatal(\"f\")\n\tassert.NoError(err)\n\tsleep()\n\n\tformat := elasticsearch.QueryFormat{Size: 1, From: 0, Key: \"CreatedOn\", Order: elasticsearch.SortAscending}\n\tms, err := client.GetFromMessages(format, map[string]string{})\n\tassert.NoError(err)\n\tassert.Len(ms, 1)\n\tassert.EqualValues(SeverityDebug, ms[0].Severity)\n\n\tformat = elasticsearch.QueryFormat{Size: 5, From: 0, Key: \"CreatedOn\", Order: elasticsearch.SortAscending}\n\tms, err = client.GetFromMessages(format, map[string]string{})\n\tassert.NoError(err)\n\tassert.Len(ms, 5)\n\tassert.EqualValues(SeverityFatal, ms[4].Severity)\n\n\tformat = elasticsearch.QueryFormat{Size: 3, From: 2, Key: \"CreatedOn\", Order: elasticsearch.SortDescending}\n\tms, err = client.GetFromMessages(format, map[string]string{})\n\tassert.NoError(err)\n\tassert.Len(ms, 3)\n\tassert.EqualValues(SeverityWarning, ms[2].Severity)\n\tassert.EqualValues(SeverityError, ms[1].Severity)\n\tassert.EqualValues(SeverityFatal, ms[0].Severity)\n}\n\nfunc (suite *LoggerTester) Test06OtherParams() {\n\tt := suite.T()\n\tassert := assert.New(t)\n\n\tif MOCKING {\n\t\tt.Skip(\"Skipping test, because mocking.\")\n\t}\n\n\tsuite.setupFixture()\n\tdefer suite.teardownFixture()\n\n\tclient := suite.client\n\n\tclient.SetService(\"myservice\", \"1.2.3.4\")\n\n\t\/\/sometime, err := time.Parse(time.RFC3339, \"1985-04-12T23:20:50.52Z\")\n\t\/\/assert.NoError(err)\n\tsometime := time.Now()\n\n\tvar testData = []Message{\n\t\t{\n\t\t\tAddress: \"gnemud7smfv\/10.254.0.66\",\n\t\t\tMessage: \"Received Message to Relay on topic Request-Job with key f3b63085-b482-4ae8-8297-3c7d1fcfff7d\",\n\t\t\tService: \"Dispatcher\",\n\t\t\tSeverity: \"Info\",\n\t\t\tCreatedOn: sometime,\n\t\t}, {\n\t\t\tAddress: \"gnfbnqsn5m9\/10.254.0.14\",\n\t\t\tMessage: \"Processed Update Status for Job 6d0ea538-4382-4ea5-9669-56519b8c8f58 with Status Success\",\n\t\t\tService: \"JobManager\",\n\t\t\tSeverity: \"Info\",\n\t\t\tCreatedOn: sometime,\n\t\t}, {\n\t\t\tAddress: \"0.0.0.0\",\n\t\t\tMessage: \"generated 1: 09d4ec60-ea61-4066-8697-5568a47f84bf\",\n\t\t\tService: \"pz-uuidgen\",\n\t\t\tSeverity: \"Info\",\n\t\t\tCreatedOn: sometime,\n\t\t}, {\n\t\t\tAddress: \"gnfbnqsn5m9\/10.254.0.14\",\n\t\t\tMessage: \"Handling Job with Topic Create-Job for Job ID 09d4ec60-ea61-4066-8697-5568a47f84bf\",\n\t\t\tService: \"JobManager\",\n\t\t\tSeverity: \"Info\",\n\t\t\tCreatedOn: sometime,\n\t\t}, {\n\t\t\tAddress: \"gnfbnqsn5m9\/10.254.0.14\",\n\t\t\tMessage: \"Handling Job with Topic Update-Job for Job ID be4ce034-1187-4a4f-95a9-a9c31826248b\",\n\t\t\tService: \"JobManager\",\n\t\t\tSeverity: \"Info\",\n\t\t\tCreatedOn: sometime,\n\t\t},\n\t}\n\n\tfor _, e := range testData {\n\t\t\/\/ log.Printf(\"%d, %v\\n\", i, e)\n\t\terr := client.LogMessage(&e)\n\t\tassert.NoError(err)\n\t}\n\n\tsleep()\n\n\tformat := elasticsearch.QueryFormat{\n\t\tSize: 100, From: 0,\n\t\tOrder: elasticsearch.SortDescending,\n\t\tKey: \"CreatedOn\"}\n\n\tmsgs, err := client.GetFromMessages(format,\n\t\tmap[string]string{\n\t\t\t\"service\": \"JobManager\",\n\t\t\t\"contains\": \"Success\",\n\t\t})\n\tassert.NoError(err)\n\tassert.Len(msgs, 1)\n\n\t\/\/for _, msg := range msgs {\n\t\/\/log.Printf(\"%v\\n\", msg)\n\t\/\/}\n\n\tmsgs, err = client.GetFromMessages(format,\n\t\tmap[string]string{\n\t\t\t\"before\": \"1461181461\",\n\t\t\t\"after\": \"1461181362\",\n\t\t})\n\n\tassert.NoError(err)\n\tassert.Len(msgs, 4)\n\n\t\/\/for _, msg := range msgs {\n\t\/\/log.Printf(\"%v\\n\", msg)\n\t\/\/}\n\n}\n<commit_msg>Fix test<commit_after>\/\/ Copyright 2016, RadiantBlue Technologies, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage logger\n\nimport (\n\t\"log\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/suite\"\n\t\"github.com\/venicegeo\/pz-gocommon\/elasticsearch\"\n\tpiazza \"github.com\/venicegeo\/pz-gocommon\/gocommon\"\n)\n\nconst MOCKING = true\n\nfunc sleep() {\n\ttime.Sleep(1 * time.Second)\n}\n\ntype LoggerTester struct {\n\tsuite.Suite\n\n\tesi elasticsearch.IIndex\n\tsys *piazza.SystemConfig\n\tclient IClient\n\n\tgenericServer *piazza.GenericServer\n}\n\nfunc (suite *LoggerTester) setupFixture() {\n\tt := suite.T()\n\tassert := assert.New(t)\n\n\tvar required []piazza.ServiceName\n\tif MOCKING {\n\t\trequired = []piazza.ServiceName{}\n\t} else {\n\t\trequired = []piazza.ServiceName{piazza.PzElasticSearch}\n\t}\n\tsys, err := piazza.NewSystemConfig(piazza.PzLogger, required)\n\tassert.NoError(err)\n\tsuite.sys = sys\n\n\tesi, err := elasticsearch.NewIndexInterface(sys, \"loggertest$\", \"\", MOCKING)\n\tassert.NoError(err)\n\tsuite.esi = esi\n\n\tif MOCKING {\n\t\tclient, err := NewMockClient(sys)\n\t\tassert.NoError(err)\n\t\tsuite.client = client\n\t} else {\n\t\tclient, err := NewClient(sys)\n\t\tassert.NoError(err)\n\t\tsuite.client = client\n\t}\n\n\tloggerService := &LoggerService{}\n\tloggerService.Init(sys, esi)\n\n\tloggerServer := &LoggerServer{}\n\tloggerServer.Init(loggerService)\n\n\tsuite.genericServer = &piazza.GenericServer{Sys: sys}\n\n\terr = suite.genericServer.Configure(loggerServer.Routes)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t_, err = suite.genericServer.Start()\n\tassert.NoError(err)\n}\n\nfunc (suite *LoggerTester) teardownFixture() {\n\tsuite.genericServer.Stop()\n\n\tsuite.esi.Close()\n\tsuite.esi.Delete()\n}\n\nfunc TestRunSuite(t *testing.T) {\n\ts := &LoggerTester{}\n\tsuite.Run(t, s)\n}\n\nfunc (suite *LoggerTester) getLastMessage() string {\n\tt := suite.T()\n\tassert := assert.New(t)\n\n\tclient := suite.client\n\n\tformat := elasticsearch.QueryFormat{Size: 100, From: 0, Order: elasticsearch.SortAscending, Key: \"CreatedOn\"}\n\tms, err := client.GetFromMessages(format, map[string]string{})\n\tassert.NoError(err)\n\tassert.True(len(ms) > 0)\n\n\treturn ms[len(ms)-1].String()\n}\n\nfunc (suite *LoggerTester) Test01Elasticsearch() {\n\tt := suite.T()\n\tassert := assert.New(t)\n\n\tsuite.setupFixture()\n\tdefer suite.teardownFixture()\n\n\tversion := suite.esi.GetVersion()\n\tassert.Contains(\"2.2.0\", version)\n}\n\nfunc (suite *LoggerTester) Test02One() {\n\tt := suite.T()\n\tassert := assert.New(t)\n\n\tsuite.setupFixture()\n\tdefer suite.teardownFixture()\n\n\tclient := suite.client\n\n\tvar err error\n\n\tdata1 := Message{\n\t\tService: \"log-tester\",\n\t\tAddress: \"128.1.2.3\",\n\t\tCreatedOn: time.Now(),\n\t\tSeverity: \"Info\",\n\t\tMessage: \"The quick brown fox\",\n\t}\n\n\tdata2 := Message{\n\t\tService: \"log-tester\",\n\t\tAddress: \"128.0.0.0\",\n\t\tCreatedOn: time.Now(),\n\t\tSeverity: \"Fatal\",\n\t\tMessage: \"The quick brown fox\",\n\t}\n\n\t{\n\t\terr = client.LogMessage(&data1)\n\t\tassert.NoError(err, \"PostToMessages\")\n\t}\n\n\tsleep()\n\n\t{\n\t\tactualMssg := suite.getLastMessage()\n\t\texpectedMssg := data1.String()\n\t\tassert.EqualValues(actualMssg, expectedMssg)\n\t}\n\n\t{\n\t\terr = client.LogMessage(&data2)\n\t\tassert.NoError(err, \"PostToMessages\")\n\t}\n\n\tsleep()\n\n\t{\n\t\tactualMssg := suite.getLastMessage()\n\t\texpectedMssg := data2.String()\n\t\tassert.EqualValues(actualMssg, expectedMssg)\n\t}\n\n\t{\n\t\tstats, err := client.GetFromAdminStats()\n\t\tassert.NoError(err, \"GetFromAdminStats\")\n\t\tassert.Equal(2, stats.NumMessages, \"stats check\")\n\t}\n}\n\nfunc (suite *LoggerTester) Test03Help() {\n\tt := suite.T()\n\tassert := assert.New(t)\n\n\terr := suite.client.Log(\"mocktest\", \"0.0.0.0\", SeverityInfo, time.Now(), \"message from logger unit test via piazza.Log()\")\n\tassert.NoError(err, \"pzService.Log()\")\n}\n\nfunc (suite *LoggerTester) Test04Admin() {\n\tt := suite.T()\n\tassert := assert.New(t)\n\n\tsuite.setupFixture()\n\tdefer suite.teardownFixture()\n\n\tclient := suite.client\n\n\t_, err := client.GetFromAdminStats()\n\tassert.NoError(err, \"GetFromAdminStats\")\n}\n\nfunc (suite *LoggerTester) Test05Pagination() {\n\tt := suite.T()\n\tassert := assert.New(t)\n\n\tif MOCKING {\n\t\t\/\/t.Skip(\"Skipping test, because mocking.\")\n\t}\n\n\tsuite.setupFixture()\n\tdefer suite.teardownFixture()\n\n\tclient := suite.client\n\n\tclient.SetService(\"myservice\", \"1.2.3.4\")\n\n\terr := client.Debug(\"d\")\n\tassert.NoError(err)\n\tsleep()\n\terr = client.Info(\"i\")\n\tassert.NoError(err)\n\tsleep()\n\terr = client.Warn(\"w\")\n\tassert.NoError(err)\n\tsleep()\n\terr = client.Error(\"e\")\n\tassert.NoError(err)\n\tsleep()\n\terr = client.Fatal(\"f\")\n\tassert.NoError(err)\n\tsleep()\n\n\tformat := elasticsearch.QueryFormat{Size: 1, From: 0, Key: \"CreatedOn\", Order: elasticsearch.SortAscending}\n\tms, err := client.GetFromMessages(format, map[string]string{})\n\tassert.NoError(err)\n\tassert.Len(ms, 1)\n\tassert.EqualValues(SeverityDebug, ms[0].Severity)\n\n\tformat = elasticsearch.QueryFormat{Size: 5, From: 0, Key: \"CreatedOn\", Order: elasticsearch.SortAscending}\n\tms, err = client.GetFromMessages(format, map[string]string{})\n\tassert.NoError(err)\n\tassert.Len(ms, 5)\n\tassert.EqualValues(SeverityFatal, ms[4].Severity)\n\n\tformat = elasticsearch.QueryFormat{Size: 3, From: 2, Key: \"CreatedOn\", Order: elasticsearch.SortDescending}\n\tms, err = client.GetFromMessages(format, map[string]string{})\n\tassert.NoError(err)\n\tassert.Len(ms, 3)\n\tassert.EqualValues(SeverityWarning, ms[2].Severity)\n\tassert.EqualValues(SeverityError, ms[1].Severity)\n\tassert.EqualValues(SeverityFatal, ms[0].Severity)\n}\n\nfunc (suite *LoggerTester) Test06OtherParams() {\n\tt := suite.T()\n\tassert := assert.New(t)\n\n\tif MOCKING {\n\t\tt.Skip(\"Skipping test, because mocking.\")\n\t}\n\n\tsuite.setupFixture()\n\tdefer suite.teardownFixture()\n\n\tclient := suite.client\n\n\tclient.SetService(\"myservice\", \"1.2.3.4\")\n\n\t\/\/sometime, err := time.Parse(time.RFC3339, \"1985-04-12T23:20:50.52Z\")\n\t\/\/assert.NoError(err)\n\tsometime := time.Now()\n\n\tvar testData = []Message{\n\t\t{\n\t\t\tAddress: \"gnemud7smfv\/10.254.0.66\",\n\t\t\tMessage: \"Received Message to Relay on topic Request-Job with key f3b63085-b482-4ae8-8297-3c7d1fcfff7d\",\n\t\t\tService: \"Dispatcher\",\n\t\t\tSeverity: \"Info\",\n\t\t\tCreatedOn: sometime,\n\t\t}, {\n\t\t\tAddress: \"gnfbnqsn5m9\/10.254.0.14\",\n\t\t\tMessage: \"Processed Update Status for Job 6d0ea538-4382-4ea5-9669-56519b8c8f58 with Status Success\",\n\t\t\tService: \"JobManager\",\n\t\t\tSeverity: \"Info\",\n\t\t\tCreatedOn: sometime,\n\t\t}, {\n\t\t\tAddress: \"0.0.0.0\",\n\t\t\tMessage: \"generated 1: 09d4ec60-ea61-4066-8697-5568a47f84bf\",\n\t\t\tService: \"pz-uuidgen\",\n\t\t\tSeverity: \"Info\",\n\t\t\tCreatedOn: sometime,\n\t\t}, {\n\t\t\tAddress: \"gnfbnqsn5m9\/10.254.0.14\",\n\t\t\tMessage: \"Handling Job with Topic Create-Job for Job ID 09d4ec60-ea61-4066-8697-5568a47f84bf\",\n\t\t\tService: \"JobManager\",\n\t\t\tSeverity: \"Info\",\n\t\t\tCreatedOn: sometime,\n\t\t}, {\n\t\t\tAddress: \"gnfbnqsn5m9\/10.254.0.14\",\n\t\t\tMessage: \"Handling Job with Topic Update-Job for Job ID be4ce034-1187-4a4f-95a9-a9c31826248b\",\n\t\t\tService: \"JobManager\",\n\t\t\tSeverity: \"Info\",\n\t\t\tCreatedOn: sometime,\n\t\t},\n\t}\n\n\tfor _, e := range testData {\n\t\t\/\/ log.Printf(\"%d, %v\\n\", i, e)\n\t\terr := client.LogMessage(&e)\n\t\tassert.NoError(err)\n\t}\n\n\tsleep()\n\n\tformat := elasticsearch.QueryFormat{\n\t\tSize: 100, From: 0,\n\t\tOrder: elasticsearch.SortDescending,\n\t\tKey: \"CreatedOn\"}\n\n\tmsgs, err := client.GetFromMessages(format,\n\t\tmap[string]string{\n\t\t\t\"service\": \"JobManager\",\n\t\t\t\"contains\": \"Success\",\n\t\t})\n\tassert.NoError(err)\n\tassert.Len(msgs, 1)\n\n\t\/\/for _, msg := range msgs {\n\t\/\/log.Printf(\"%v\\n\", msg)\n\t\/\/}\n\n\tmsgs, err = client.GetFromMessages(format,\n\t\tmap[string]string{\n\t\t\t\"before\": \"1461181461\",\n\t\t\t\"after\": \"1461181362\",\n\t\t})\n\n\tassert.NoError(err)\n\tassert.Len(msgs, 4)\n\n\t\/\/for _, msg := range msgs {\n\t\/\/log.Printf(\"%v\\n\", msg)\n\t\/\/}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package toolbox_test\n\nimport (\n\t\"fmt\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/viant\/toolbox\"\n\t\"testing\"\n)\n\nfunc TestNewFileSetInfoInfo(t *testing.T) {\n\n\tfileSetInfo, err := toolbox.NewFileSetInfo(\".\/fileset_info_test\/\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tassert.Equal(t, 2, len(fileSetInfo.FilesInfo()))\n\n\tfileInfo := fileSetInfo.FileInfo(\"user_test.go\")\n\tassert.NotNil(t, fileInfo)\n\tassert.False(t, fileInfo.HasStructInfo(\"F\"))\n\tassert.True(t, fileInfo.HasStructInfo(\"User\"))\n\n\tassert.Equal(t, 2, len(fileInfo.Structs()))\n\n\taddress := fileSetInfo.Struct(\"Address\")\n\tassert.NotNil(t, address)\n\n\taddress2 := fileSetInfo.Struct(\"Address2\")\n\tassert.Nil(t, address2)\n\n\tuserInfo := fileInfo.Struct(\"User\")\n\tassert.NotNil(t, userInfo)\n\n\tassert.True(t, userInfo.HasField(\"ID\"))\n\tassert.True(t, userInfo.HasField(\"Name\"))\n\tassert.False(t, userInfo.HasField(\"FF\"))\n\n\tassert.Equal(t, 8, len(userInfo.Fields()))\n\n\tidInfo := userInfo.Field(\"ID\")\n\tassert.True(t, idInfo.IsPointer)\n\tassert.Equal(t, \"int\", idInfo.TypeName)\n\tassert.Equal(t, true, idInfo.IsPointer)\n\n\tdobInfo := userInfo.Field(\"DateOfBirth\")\n\tassert.True(t, dobInfo.IsStruct)\n\tassert.Equal(t, \"time.Time\", dobInfo.TypeName)\n\tassert.Equal(t, \"time\", dobInfo.TypePackage)\n\n\tassert.Equal(t, \"`foo=\\\"bar\\\"`\", dobInfo.Tag)\n\n\taddressPointer := userInfo.Field(\"AddressPointer\")\n\tassert.NotNil(t, addressPointer)\n\tassert.Equal(t, true, addressPointer.IsStruct)\n\tassert.Equal(t, \"Address\", addressPointer.TypeName)\n\n\tcInfo := userInfo.Field(\"C\")\n\tassert.True(t, cInfo.IsChannel)\n\n\tmInfo := userInfo.Field(\"M\")\n\tassert.True(t, mInfo.IsMap)\n\tassert.Equal(t, \"string\", mInfo.KeyTypeName)\n\tassert.Equal(t, \"[]string\", mInfo.ValueTypeName)\n\n\tintsInfo := userInfo.Field(\"Ints\")\n\tassert.True(t, intsInfo.IsSlice)\n\tassert.Equal(t, \"my comments\", userInfo.Comment)\n\n\tassert.False(t, userInfo.HasReceiver(\"Abc\"))\n\n\tassert.Equal(t, 3, len(userInfo.Receivers()))\n\tassert.True(t, userInfo.HasReceiver(\"Test\"))\n\tassert.True(t, userInfo.HasReceiver(\"Test2\"))\n\n\treceiver := userInfo.Receiver(\"Test\")\n\tassert.NotNil(t, receiver)\n\n}\n<commit_msg>patched test<commit_after>package toolbox_test\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/viant\/toolbox\"\n\t\"testing\"\n)\n\nfunc TestNewFileSetInfoInfo(t *testing.T) {\n\n\tfileSetInfo, err := toolbox.NewFileSetInfo(\".\/fileset_info_test\/\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tassert.Equal(t, 2, len(fileSetInfo.FilesInfo()))\n\n\tfileInfo := fileSetInfo.FileInfo(\"user_test.go\")\n\tassert.NotNil(t, fileInfo)\n\tassert.False(t, fileInfo.HasStructInfo(\"F\"))\n\tassert.True(t, fileInfo.HasStructInfo(\"User\"))\n\n\tassert.Equal(t, 2, len(fileInfo.Structs()))\n\n\taddress := fileSetInfo.Struct(\"Address\")\n\tassert.NotNil(t, address)\n\n\taddress2 := fileSetInfo.Struct(\"Address2\")\n\tassert.Nil(t, address2)\n\n\tuserInfo := fileInfo.Struct(\"User\")\n\tassert.NotNil(t, userInfo)\n\n\tassert.True(t, userInfo.HasField(\"ID\"))\n\tassert.True(t, userInfo.HasField(\"Name\"))\n\tassert.False(t, userInfo.HasField(\"FF\"))\n\n\tassert.Equal(t, 8, len(userInfo.Fields()))\n\n\tidInfo := userInfo.Field(\"ID\")\n\tassert.True(t, idInfo.IsPointer)\n\tassert.Equal(t, \"int\", idInfo.TypeName)\n\tassert.Equal(t, true, idInfo.IsPointer)\n\n\tdobInfo := userInfo.Field(\"DateOfBirth\")\n\tassert.True(t, dobInfo.IsStruct)\n\tassert.Equal(t, \"time.Time\", dobInfo.TypeName)\n\tassert.Equal(t, \"time\", dobInfo.TypePackage)\n\n\tassert.Equal(t, \"`foo=\\\"bar\\\"`\", dobInfo.Tag)\n\n\taddressPointer := userInfo.Field(\"AddressPointer\")\n\tassert.NotNil(t, addressPointer)\n\tassert.Equal(t, true, addressPointer.IsStruct)\n\tassert.Equal(t, \"Address\", addressPointer.TypeName)\n\n\tcInfo := userInfo.Field(\"C\")\n\tassert.True(t, cInfo.IsChannel)\n\n\tmInfo := userInfo.Field(\"M\")\n\tassert.True(t, mInfo.IsMap)\n\tassert.Equal(t, \"string\", mInfo.KeyTypeName)\n\tassert.Equal(t, \"[]string\", mInfo.ValueTypeName)\n\n\tintsInfo := userInfo.Field(\"Ints\")\n\tassert.True(t, intsInfo.IsSlice)\n\tassert.Equal(t, \"my comments\", userInfo.Comment)\n\n\tassert.False(t, userInfo.HasReceiver(\"Abc\"))\n\n\tassert.Equal(t, 3, len(userInfo.Receivers()))\n\tassert.True(t, userInfo.HasReceiver(\"Test\"))\n\tassert.True(t, userInfo.HasReceiver(\"Test2\"))\n\n\treceiver := userInfo.Receiver(\"Test\")\n\tassert.NotNil(t, receiver)\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/DataDog\/datadog-go\/statsd\"\n\t\"github.com\/gophr-pm\/gophr\/lib\/config\"\n\t\"github.com\/gophr-pm\/gophr\/lib\/datadog\"\n\t\"github.com\/gorilla\/mux\"\n)\n\n\/\/ DepotRequestHandlerArgs lol\ntype DepotRequestHandlerArgs struct {\n\tconfig *config.Config\n\tdataDogClient datadog.Client\n}\n\nfunc main() {\n\tvar (\n\t\tr = mux.NewRouter()\n\t\tconf = config.GetConfig()\n\t\tendpoint = fmt.Sprintf(\n\t\t\t\"\/repos\/{%s}\/{%s}\/{%s}\",\n\t\t\turlVarAuthor,\n\t\t\turlVarRepo,\n\t\t\turlVarSHA)\n\t)\n\n\t\/\/ Initialize datadog client.\n\tdataDogClient, err := datadog.NewClient(conf, \"depot.\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\t\/\/ Register the status route.\n\tr.HandleFunc(\"\/status\", StatusHandler()).Methods(\"GET\")\n\t\/\/ Register all the remaining routes for the main endpoint.\n\tr.HandleFunc(endpoint, RepoExistsHandler(conf, dataDogClient)).Methods(\"GET\")\n\tr.HandleFunc(endpoint, CreateRepoHandler(conf, dataDogClient)).Methods(\"POST\")\n\tr.HandleFunc(endpoint, DeleteRepoHandler(conf, dataDogClient)).Methods(\"DELETE\")\n\n\t\/\/ Start tailing the nginx logs.\n\tif err := tailNginxLogs(); err != nil {\n\t\tlog.Fatalln(\"Failed to start a tail on the nginx logs:\", err)\n\t}\n\n\t\/\/ Start serving.\n\tlog.Printf(\"Servicing HTTP requests on port %d.\\n\", conf.Port)\n\thttp.ListenAndServe(fmt.Sprintf(\":%d\", conf.Port), r)\n}\n<commit_msg>fix broken import<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/gophr-pm\/gophr\/lib\/config\"\n\t\"github.com\/gophr-pm\/gophr\/lib\/datadog\"\n\t\"github.com\/gorilla\/mux\"\n)\n\n\/\/ DepotRequestHandlerArgs lol\ntype DepotRequestHandlerArgs struct {\n\tconfig *config.Config\n\tdataDogClient datadog.Client\n}\n\nfunc main() {\n\tvar (\n\t\tr = mux.NewRouter()\n\t\tconf = config.GetConfig()\n\t\tendpoint = fmt.Sprintf(\n\t\t\t\"\/repos\/{%s}\/{%s}\/{%s}\",\n\t\t\turlVarAuthor,\n\t\t\turlVarRepo,\n\t\t\turlVarSHA)\n\t)\n\n\t\/\/ Initialize datadog client.\n\tdataDogClient, err := datadog.NewClient(conf, \"depot.\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\t\/\/ Register the status route.\n\tr.HandleFunc(\"\/status\", StatusHandler()).Methods(\"GET\")\n\t\/\/ Register all the remaining routes for the main endpoint.\n\tr.HandleFunc(endpoint, RepoExistsHandler(conf, dataDogClient)).Methods(\"GET\")\n\tr.HandleFunc(endpoint, CreateRepoHandler(conf, dataDogClient)).Methods(\"POST\")\n\tr.HandleFunc(endpoint, DeleteRepoHandler(conf, dataDogClient)).Methods(\"DELETE\")\n\n\t\/\/ Start tailing the nginx logs.\n\tif err := tailNginxLogs(); err != nil {\n\t\tlog.Fatalln(\"Failed to start a tail on the nginx logs:\", err)\n\t}\n\n\t\/\/ Start serving.\n\tlog.Printf(\"Servicing HTTP requests on port %d.\\n\", conf.Port)\n\thttp.ListenAndServe(fmt.Sprintf(\":%d\", conf.Port), r)\n}\n<|endoftext|>"} {"text":"<commit_before>package pewpew\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestParseKeyValString(t *testing.T) {\n\tcases := []struct {\n\t\tstr string\n\t\tdelim1 string\n\t\tdelim2 string\n\t\twant map[string]string\n\t\thasErr bool\n\t}{\n\t\t{\"\", \"\", \"\", map[string]string{}, true},\n\t\t{\"\", \":\", \";\", map[string]string{}, true},\n\t\t{\"\", \":\", \":\", map[string]string{}, true},\n\t\t{\"abc:123;\", \";\", \":\", map[string]string{\"abc\": \"123\"}, true},\n\t\t{\"abc:123\", \";\", \":\", map[string]string{\"abc\": \"123\"}, false},\n\t\t{\"key1: val2, key3 : val4,key5:val6\", \",\", \":\", map[string]string{\"key1\": \"val2\", \"key3\": \"val4\", \"key5\": \"val6\"}, false},\n\t}\n\tfor _, c := range cases {\n\t\tresult, err := parseKeyValString(c.str, c.delim1, c.delim2)\n\t\tif (err != nil) != c.hasErr {\n\t\t\tt.Errorf(\"parseKeyValString(%q, %q, %q) err: %t wanted %t\", c.str, c.delim1, c.delim2, (err != nil), c.hasErr)\n\t\t\tcontinue\n\t\t}\n\t\tif err == nil && !reflect.DeepEqual(result, c.want) {\n\t\t\tt.Errorf(\"parseKeyValString(%q, %q, %q) == %v wanted %v\", c.str, c.delim1, c.delim2, result, c.want)\n\t\t}\n\t}\n}\n\nfunc TestBuildRequest(t *testing.T) {\n\tcases := []struct {\n\t\ttarget Target\n\t\thasErr bool\n\t}{\n\t\t{Target{}, true}, \/\/empty url\n\t\t{Target{URL: \"\"}, true}, \/\/empty url\n\t\t{Target{URL: \"\", RegexURL: true}, true}, \/\/empty regex url\n\t\t{Target{URL: \"h\"}, true}, \/\/hostname too short\n\t\t{Target{URL: \"http:\/\/(*\", RegexURL: true}, true}, \/\/invalid regex\n\t\t{Target{URL: \"http:\/\/\/\/\/\"}, true}, \/\/invalid hostname\n\t\t{Target{URL: \"http:\/\/%%%\"}, true}, \/\/net\/url will fail parsing\n\t\t{Target{URL: \"http:\/\/\"}, true}, \/\/empty hostname\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tBodyFilename: \"\/thisfiledoesnotexist\"}, true}, \/\/bad file\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tHeaders: \",,,\"}, true}, \/\/invalid headers\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tHeaders: \"a:b,c,d\"}, true}, \/\/invalid headers\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tCookies: \";;;\"}, true}, \/\/invalid cookies\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tCookies: \"a=b;c;d\"}, true}, \/\/invalid cookies\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tBasicAuth: \"user:\"}, true}, \/\/invalid basic auth\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tBasicAuth: \":pass\"}, true}, \/\/invalid basic auth\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tBasicAuth: \"::\"}, true}, \/\/invalid basic auth\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tMethod: \"@\"}, true}, \/\/invalid method\n\n\t\t\/\/good cases\n\t\t{Target{URL: \"localhost\"}, false}, \/\/missing scheme (http:\/\/) should be auto fixed\n\t\t{Target{URL: \"http:\/\/localhost:80\"}, false},\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tMethod: \"POST\",\n\t\t\tBody: \"data\"}, false},\n\t\t{Target{URL: \"https:\/\/www.github.com\"}, false},\n\t\t{Target{URL: \"http:\/\/github.com\"}, false},\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tBodyFilename: \"\"}, false},\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tBodyFilename: tempFilename}, false},\n\t\t{Target{URL: \"http:\/\/localhost:80\/path\/?param=val&another=one\",\n\t\t\tHeaders: \"Accept-Encoding:gzip, Content-Type:application\/json\",\n\t\t\tCookies: \"a=b;c=d\",\n\t\t\tUserAgent: \"pewpewpew\",\n\t\t\tBasicAuth: \"user:pass\"}, false},\n\t}\n\tfor _, c := range cases {\n\t\t_, err := buildRequest(c.target)\n\t\tif (err != nil) != c.hasErr {\n\t\t\tt.Errorf(\"buildRequest(%+v) err: %t wanted: %t\", c.target, (err != nil), c.hasErr)\n\t\t}\n\t}\n}\n\nfunc TestCreateClient(t *testing.T) {\n\tcases := []struct {\n\t\ttarget Target\n\t}{\n\t\t{Target{}}, \/\/empty\n\t\t{Target{EnforceSSL: true}},\n\t\t{Target{EnforceSSL: false}},\n\t\t{Target{Compress: true}},\n\t\t{Target{Compress: false}},\n\t\t{Target{KeepAlive: true}},\n\t\t{Target{KeepAlive: false}},\n\t\t{Target{NoHTTP2: true}},\n\t\t{Target{NoHTTP2: false}},\n\t\t{Target{Timeout: \"\"}},\n\t\t{Target{Timeout: \"1s\"}},\n\t\t{Target{FollowRedirects: true}},\n\t\t{Target{FollowRedirects: false}},\n\t}\n\tfor _, c := range cases {\n\t\tcreateClient(c.target)\n\t}\n}\n<commit_msg>Tests for DNS Prefetch (#24)<commit_after>package pewpew\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestParseKeyValString(t *testing.T) {\n\tcases := []struct {\n\t\tstr string\n\t\tdelim1 string\n\t\tdelim2 string\n\t\twant map[string]string\n\t\thasErr bool\n\t}{\n\t\t{\"\", \"\", \"\", map[string]string{}, true},\n\t\t{\"\", \":\", \";\", map[string]string{}, true},\n\t\t{\"\", \":\", \":\", map[string]string{}, true},\n\t\t{\"abc:123;\", \";\", \":\", map[string]string{\"abc\": \"123\"}, true},\n\t\t{\"abc:123\", \";\", \":\", map[string]string{\"abc\": \"123\"}, false},\n\t\t{\"key1: val2, key3 : val4,key5:val6\", \",\", \":\", map[string]string{\"key1\": \"val2\", \"key3\": \"val4\", \"key5\": \"val6\"}, false},\n\t}\n\tfor _, c := range cases {\n\t\tresult, err := parseKeyValString(c.str, c.delim1, c.delim2)\n\t\tif (err != nil) != c.hasErr {\n\t\t\tt.Errorf(\"parseKeyValString(%q, %q, %q) err: %t wanted %t\", c.str, c.delim1, c.delim2, (err != nil), c.hasErr)\n\t\t\tcontinue\n\t\t}\n\t\tif err == nil && !reflect.DeepEqual(result, c.want) {\n\t\t\tt.Errorf(\"parseKeyValString(%q, %q, %q) == %v wanted %v\", c.str, c.delim1, c.delim2, result, c.want)\n\t\t}\n\t}\n}\n\nfunc TestBuildRequest(t *testing.T) {\n\tcases := []struct {\n\t\ttarget Target\n\t\thasErr bool\n\t}{\n\t\t{Target{}, true}, \/\/empty url\n\t\t{Target{URL: \"\"}, true}, \/\/empty url\n\t\t{Target{URL: \"\", RegexURL: true}, true}, \/\/empty regex url\n\t\t{Target{URL: \"h\"}, true}, \/\/hostname too short\n\t\t{Target{URL: \"http:\/\/(*\", RegexURL: true}, true}, \/\/invalid regex\n\t\t{Target{URL: \"http:\/\/\/\/\/\"}, true}, \/\/invalid hostname\n\t\t{Target{URL: \"http:\/\/%%%\"}, true}, \/\/net\/url will fail parsing\n\t\t{Target{URL: \"http:\/\/\"}, true}, \/\/empty hostname\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tBodyFilename: \"\/thisfiledoesnotexist\"}, true}, \/\/bad file\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tHeaders: \",,,\"}, true}, \/\/invalid headers\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tHeaders: \"a:b,c,d\"}, true}, \/\/invalid headers\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tCookies: \";;;\"}, true}, \/\/invalid cookies\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tCookies: \"a=b;c;d\"}, true}, \/\/invalid cookies\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tBasicAuth: \"user:\"}, true}, \/\/invalid basic auth\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tBasicAuth: \":pass\"}, true}, \/\/invalid basic auth\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tBasicAuth: \"::\"}, true}, \/\/invalid basic auth\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tMethod: \"@\"}, true}, \/\/invalid method\n\t\t{Target{URL: \"https:\/\/invaliddomain.invalidtld\",\n\t\t\tDNSPrefetch: true}, true},\n\n\t\t\/\/good cases\n\t\t{Target{URL: \"localhost\"}, false}, \/\/missing scheme (http:\/\/) should be auto fixed\n\t\t{Target{URL: \"http:\/\/localhost:80\"}, false},\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tMethod: \"POST\",\n\t\t\tBody: \"data\"}, false},\n\t\t{Target{URL: \"https:\/\/www.github.com\"}, false},\n\t\t{Target{URL: \"http:\/\/github.com\"}, false},\n\t\t{Target{URL: \"https:\/\/www.github.com\",\n\t\t\tDNSPrefetch: true}, false},\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tBodyFilename: \"\"}, false},\n\t\t{Target{URL: \"http:\/\/localhost\",\n\t\t\tBodyFilename: tempFilename}, false},\n\t\t{Target{URL: \"http:\/\/localhost:80\/path\/?param=val&another=one\",\n\t\t\tHeaders: \"Accept-Encoding:gzip, Content-Type:application\/json\",\n\t\t\tCookies: \"a=b;c=d\",\n\t\t\tUserAgent: \"pewpewpew\",\n\t\t\tBasicAuth: \"user:pass\"}, false},\n\t}\n\tfor _, c := range cases {\n\t\t_, err := buildRequest(c.target)\n\t\tif (err != nil) != c.hasErr {\n\t\t\tt.Errorf(\"buildRequest(%+v) err: %t wanted: %t\", c.target, (err != nil), c.hasErr)\n\t\t}\n\t}\n}\n\nfunc TestCreateClient(t *testing.T) {\n\tcases := []struct {\n\t\ttarget Target\n\t}{\n\t\t{Target{}}, \/\/empty\n\t\t{Target{EnforceSSL: true}},\n\t\t{Target{EnforceSSL: false}},\n\t\t{Target{Compress: true}},\n\t\t{Target{Compress: false}},\n\t\t{Target{KeepAlive: true}},\n\t\t{Target{KeepAlive: false}},\n\t\t{Target{NoHTTP2: true}},\n\t\t{Target{NoHTTP2: false}},\n\t\t{Target{Timeout: \"\"}},\n\t\t{Target{Timeout: \"1s\"}},\n\t\t{Target{FollowRedirects: true}},\n\t\t{Target{FollowRedirects: false}},\n\t}\n\tfor _, c := range cases {\n\t\tcreateClient(c.target)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package digitalocean\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/StackExchange\/dnscontrol\/models\"\n\t\"github.com\/StackExchange\/dnscontrol\/providers\"\n\t\"github.com\/StackExchange\/dnscontrol\/providers\/diff\"\n\t\"github.com\/miekg\/dns\/dnsutil\"\n\n\t\"github.com\/digitalocean\/godo\"\n\t\"golang.org\/x\/oauth2\"\n)\n\n\/*\n\nDigitalocean API DNS provider:\n\nInfo required in `creds.json`:\n - token\n\n*\/\n\ntype DoApi struct {\n\tclient *godo.Client\n}\n\nvar defaultNameServerNames = []string{\n\t\"ns1.digitalocean.com\",\n\t\"ns2.digitalocean.com\",\n\t\"ns3.digitalocean.com\",\n}\n\nfunc newDo(m map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) {\n\tif m[\"token\"] == \"\" {\n\t\treturn nil, fmt.Errorf(\"Digitalocean Token must be provided.\")\n\t}\n\n\tctx := context.Background()\n\toauthClient := oauth2.NewClient(\n\t\tctx,\n\t\toauth2.StaticTokenSource(&oauth2.Token{AccessToken: m[\"token\"]}),\n\t)\n\tclient := godo.NewClient(oauthClient)\n\n\tapi := &DoApi{client: client}\n\n\t\/\/ Get a domain to validate the token\n\t_, resp, err := api.client.Domains.List(ctx, &godo.ListOptions{PerPage: 1})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"Digitalocean Token is not valid.\")\n\t}\n\n\treturn api, nil\n}\n\nvar docNotes = providers.DocumentationNotes{\n\tproviders.DocCreateDomains: providers.Can(),\n\tproviders.DocOfficiallySupported: providers.Cannot(),\n}\n\nfunc init() {\n\tproviders.RegisterDomainServiceProviderType(\"DIGITALOCEAN\", newDo, providers.CanUseSRV, docNotes)\n}\n\nfunc (api *DoApi) EnsureDomainExists(domain string) error {\n\tctx := context.Background()\n\t_, resp, err := api.client.Domains.Get(ctx, domain)\n\tif resp.StatusCode == http.StatusNotFound {\n\t\t_, _, err := api.client.Domains.Create(ctx, &godo.DomainCreateRequest{\n\t\t\tName: domain,\n\t\t\tIPAddress: \"\",\n\t\t})\n\t\treturn err\n\t} else {\n\t\treturn err\n\t}\n}\n\nfunc (api *DoApi) GetNameservers(domain string) ([]*models.Nameserver, error) {\n\treturn models.StringsToNameservers(defaultNameServerNames), nil\n}\n\nfunc (api *DoApi) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {\n\tctx := context.Background()\n\tdc.Punycode()\n\n\trecords, err := getRecords(api, dc.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\texistingRecords := make([]*models.RecordConfig, len(records))\n\tfor i := range records {\n\t\texistingRecords[i] = toRc(dc, &records[i])\n\t}\n\n\tdiffer := diff.New(dc)\n\t_, create, delete, modify := differ.IncrementalDiff(existingRecords)\n\n\tvar corrections = []*models.Correction{}\n\n\t\/\/ Deletes first so changing type works etc.\n\tfor _, m := range delete {\n\t\tid := m.Existing.Original.(*godo.DomainRecord).ID\n\t\tcorr := &models.Correction{\n\t\t\tMsg: fmt.Sprintf(\"%s, DO ID: %d\", m.String(), id),\n\t\t\tF: func() error {\n\t\t\t\t_, err := api.client.Domains.DeleteRecord(ctx, dc.Name, id)\n\t\t\t\treturn err\n\t\t\t},\n\t\t}\n\t\tcorrections = append(corrections, corr)\n\t}\n\tfor _, m := range create {\n\t\treq := toReq(dc, m.Desired)\n\t\tcorr := &models.Correction{\n\t\t\tMsg: m.String(),\n\t\t\tF: func() error {\n\t\t\t\t_, _, err := api.client.Domains.CreateRecord(ctx, dc.Name, req)\n\t\t\t\treturn err\n\t\t\t},\n\t\t}\n\t\tcorrections = append(corrections, corr)\n\t}\n\tfor _, m := range modify {\n\t\tid := m.Existing.Original.(*godo.DomainRecord).ID\n\t\treq := toReq(dc, m.Desired)\n\t\tcorr := &models.Correction{\n\t\t\tMsg: fmt.Sprintf(\"%s, DO ID: %d\", m.String(), id),\n\t\t\tF: func() error {\n\t\t\t\t_, _, err := api.client.Domains.EditRecord(ctx, dc.Name, id, req)\n\t\t\t\treturn err\n\t\t\t},\n\t\t}\n\t\tcorrections = append(corrections, corr)\n\t}\n\n\treturn corrections, nil\n}\n\nfunc getRecords(api *DoApi, name string) ([]godo.DomainRecord, error) {\n\tctx := context.Background()\n\n\trecords := []godo.DomainRecord{}\n\topt := &godo.ListOptions{}\n\tfor {\n\t\tresult, resp, err := api.client.Domains.Records(ctx, name, opt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, d := range result {\n\t\t\trecords = append(records, d)\n\t\t}\n\n\t\tif resp.Links == nil || resp.Links.IsLastPage() {\n\t\t\tbreak\n\t\t}\n\n\t\tpage, err := resp.Links.CurrentPage()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\topt.Page = page + 1\n\t}\n\n\treturn records, nil\n}\n\nfunc toRc(dc *models.DomainConfig, r *godo.DomainRecord) *models.RecordConfig {\n\t\/\/ This handles \"@\" etc.\n\tname := dnsutil.AddOrigin(r.Name, dc.Name)\n\n\ttarget := r.Data\n\t\/\/ Make target FQDN (#rtype_variations)\n\tif r.Type == \"CNAME\" || r.Type == \"MX\" || r.Type == \"NS\" || r.Type == \"SRV\" {\n\t\t\/\/ If target is the domainname, e.g. cname foo.example.com -> example.com,\n\t\t\/\/ DO returns \"@\" on read even if fqdn was written.\n\t\tif target == \"@\" {\n\t\t\ttarget = dc.Name\n\t\t}\n\t\ttarget = dnsutil.AddOrigin(target+\".\", dc.Name)\n\t}\n\n\treturn &models.RecordConfig{\n\t\tNameFQDN: name,\n\t\tType: r.Type,\n\t\tTarget: target,\n\t\tTTL: uint32(r.TTL),\n\t\tMxPreference: uint16(r.Priority),\n\t\tSrvPriority: uint16(r.Priority),\n\t\tSrvWeight: uint16(r.Weight),\n\t\tSrvPort: uint16(r.Port),\n\t\tOriginal: r,\n\t}\n}\n\nfunc toReq(dc *models.DomainConfig, rc *models.RecordConfig) *godo.DomainRecordEditRequest {\n\t\/\/ DO wants the short name, e.g. @\n\tname := dnsutil.TrimDomainName(rc.NameFQDN, dc.Name)\n\n\t\/\/ DO uses the same property for MX and SRV priority\n\tpriority := 0\n\tswitch rc.Type { \/\/ #rtype_variations\n\tcase \"MX\":\n\t\tpriority = int(rc.MxPreference)\n\tcase \"SRV\":\n\t\tpriority = int(rc.SrvPriority)\n\t}\n\n\treturn &godo.DomainRecordEditRequest{\n\t\tType: rc.Type,\n\t\tName: name,\n\t\tData: rc.Target,\n\t\tTTL: int(rc.TTL),\n\t\tPriority: priority,\n\t\tPort: int(rc.SrvPort),\n\t\tWeight: int(rc.SrvWeight),\n\t}\n}\n<commit_msg>export newdo<commit_after>package digitalocean\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/StackExchange\/dnscontrol\/models\"\n\t\"github.com\/StackExchange\/dnscontrol\/providers\"\n\t\"github.com\/StackExchange\/dnscontrol\/providers\/diff\"\n\t\"github.com\/miekg\/dns\/dnsutil\"\n\n\t\"github.com\/digitalocean\/godo\"\n\t\"golang.org\/x\/oauth2\"\n)\n\n\/*\n\nDigitalocean API DNS provider:\n\nInfo required in `creds.json`:\n - token\n\n*\/\n\ntype DoApi struct {\n\tclient *godo.Client\n}\n\nvar defaultNameServerNames = []string{\n\t\"ns1.digitalocean.com\",\n\t\"ns2.digitalocean.com\",\n\t\"ns3.digitalocean.com\",\n}\n\nfunc NewDo(m map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) {\n\tif m[\"token\"] == \"\" {\n\t\treturn nil, fmt.Errorf(\"Digitalocean Token must be provided.\")\n\t}\n\n\tctx := context.Background()\n\toauthClient := oauth2.NewClient(\n\t\tctx,\n\t\toauth2.StaticTokenSource(&oauth2.Token{AccessToken: m[\"token\"]}),\n\t)\n\tclient := godo.NewClient(oauthClient)\n\n\tapi := &DoApi{client: client}\n\n\t\/\/ Get a domain to validate the token\n\t_, resp, err := api.client.Domains.List(ctx, &godo.ListOptions{PerPage: 1})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"Digitalocean Token is not valid.\")\n\t}\n\n\treturn api, nil\n}\n\nvar docNotes = providers.DocumentationNotes{\n\tproviders.DocCreateDomains: providers.Can(),\n\tproviders.DocOfficiallySupported: providers.Cannot(),\n}\n\nfunc init() {\n\tproviders.RegisterDomainServiceProviderType(\"DIGITALOCEAN\", NewDo, providers.CanUseSRV, docNotes)\n}\n\nfunc (api *DoApi) EnsureDomainExists(domain string) error {\n\tctx := context.Background()\n\t_, resp, err := api.client.Domains.Get(ctx, domain)\n\tif resp.StatusCode == http.StatusNotFound {\n\t\t_, _, err := api.client.Domains.Create(ctx, &godo.DomainCreateRequest{\n\t\t\tName: domain,\n\t\t\tIPAddress: \"\",\n\t\t})\n\t\treturn err\n\t} else {\n\t\treturn err\n\t}\n}\n\nfunc (api *DoApi) GetNameservers(domain string) ([]*models.Nameserver, error) {\n\treturn models.StringsToNameservers(defaultNameServerNames), nil\n}\n\nfunc (api *DoApi) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) {\n\tctx := context.Background()\n\tdc.Punycode()\n\n\trecords, err := getRecords(api, dc.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\texistingRecords := make([]*models.RecordConfig, len(records))\n\tfor i := range records {\n\t\texistingRecords[i] = toRc(dc, &records[i])\n\t}\n\n\tdiffer := diff.New(dc)\n\t_, create, delete, modify := differ.IncrementalDiff(existingRecords)\n\n\tvar corrections = []*models.Correction{}\n\n\t\/\/ Deletes first so changing type works etc.\n\tfor _, m := range delete {\n\t\tid := m.Existing.Original.(*godo.DomainRecord).ID\n\t\tcorr := &models.Correction{\n\t\t\tMsg: fmt.Sprintf(\"%s, DO ID: %d\", m.String(), id),\n\t\t\tF: func() error {\n\t\t\t\t_, err := api.client.Domains.DeleteRecord(ctx, dc.Name, id)\n\t\t\t\treturn err\n\t\t\t},\n\t\t}\n\t\tcorrections = append(corrections, corr)\n\t}\n\tfor _, m := range create {\n\t\treq := toReq(dc, m.Desired)\n\t\tcorr := &models.Correction{\n\t\t\tMsg: m.String(),\n\t\t\tF: func() error {\n\t\t\t\t_, _, err := api.client.Domains.CreateRecord(ctx, dc.Name, req)\n\t\t\t\treturn err\n\t\t\t},\n\t\t}\n\t\tcorrections = append(corrections, corr)\n\t}\n\tfor _, m := range modify {\n\t\tid := m.Existing.Original.(*godo.DomainRecord).ID\n\t\treq := toReq(dc, m.Desired)\n\t\tcorr := &models.Correction{\n\t\t\tMsg: fmt.Sprintf(\"%s, DO ID: %d\", m.String(), id),\n\t\t\tF: func() error {\n\t\t\t\t_, _, err := api.client.Domains.EditRecord(ctx, dc.Name, id, req)\n\t\t\t\treturn err\n\t\t\t},\n\t\t}\n\t\tcorrections = append(corrections, corr)\n\t}\n\n\treturn corrections, nil\n}\n\nfunc getRecords(api *DoApi, name string) ([]godo.DomainRecord, error) {\n\tctx := context.Background()\n\n\trecords := []godo.DomainRecord{}\n\topt := &godo.ListOptions{}\n\tfor {\n\t\tresult, resp, err := api.client.Domains.Records(ctx, name, opt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, d := range result {\n\t\t\trecords = append(records, d)\n\t\t}\n\n\t\tif resp.Links == nil || resp.Links.IsLastPage() {\n\t\t\tbreak\n\t\t}\n\n\t\tpage, err := resp.Links.CurrentPage()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\topt.Page = page + 1\n\t}\n\n\treturn records, nil\n}\n\nfunc toRc(dc *models.DomainConfig, r *godo.DomainRecord) *models.RecordConfig {\n\t\/\/ This handles \"@\" etc.\n\tname := dnsutil.AddOrigin(r.Name, dc.Name)\n\n\ttarget := r.Data\n\t\/\/ Make target FQDN (#rtype_variations)\n\tif r.Type == \"CNAME\" || r.Type == \"MX\" || r.Type == \"NS\" || r.Type == \"SRV\" {\n\t\t\/\/ If target is the domainname, e.g. cname foo.example.com -> example.com,\n\t\t\/\/ DO returns \"@\" on read even if fqdn was written.\n\t\tif target == \"@\" {\n\t\t\ttarget = dc.Name\n\t\t}\n\t\ttarget = dnsutil.AddOrigin(target+\".\", dc.Name)\n\t}\n\n\treturn &models.RecordConfig{\n\t\tNameFQDN: name,\n\t\tType: r.Type,\n\t\tTarget: target,\n\t\tTTL: uint32(r.TTL),\n\t\tMxPreference: uint16(r.Priority),\n\t\tSrvPriority: uint16(r.Priority),\n\t\tSrvWeight: uint16(r.Weight),\n\t\tSrvPort: uint16(r.Port),\n\t\tOriginal: r,\n\t}\n}\n\nfunc toReq(dc *models.DomainConfig, rc *models.RecordConfig) *godo.DomainRecordEditRequest {\n\t\/\/ DO wants the short name, e.g. @\n\tname := dnsutil.TrimDomainName(rc.NameFQDN, dc.Name)\n\n\t\/\/ DO uses the same property for MX and SRV priority\n\tpriority := 0\n\tswitch rc.Type { \/\/ #rtype_variations\n\tcase \"MX\":\n\t\tpriority = int(rc.MxPreference)\n\tcase \"SRV\":\n\t\tpriority = int(rc.SrvPriority)\n\t}\n\n\treturn &godo.DomainRecordEditRequest{\n\t\tType: rc.Type,\n\t\tName: name,\n\t\tData: rc.Target,\n\t\tTTL: int(rc.TTL),\n\t\tPriority: priority,\n\t\tPort: int(rc.SrvPort),\n\t\tWeight: int(rc.SrvWeight),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package app\n\ntype notFound struct {\n\tCompo\n\tIcon string\n}\n\nfunc (n *notFound) OnMount() {\n\tlinks := Window().Get(\"document\").Call(\"getElementsByTagName\", \"link\")\n\n\tfor i := 0; i < links.Length(); i++ {\n\t\tlink := links.Index(i)\n\t\trel := link.Call(\"getAttribute\", \"rel\")\n\n\t\tif rel.String() == \"icon\" {\n\t\t\tfavicon := link.Call(\"getAttribute\", \"href\")\n\t\t\tn.Icon = favicon.String()\n\t\t\tn.Update()\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (n *notFound) Render() UI {\n\treturn Div().\n\t\tClass(\"app-wasm-layout\").\n\t\tBody(\n\t\t\tDiv().\n\t\t\t\tClass(\"app-notfound-title\").\n\t\t\t\tBody(\n\t\t\t\t\tText(\"4\"),\n\t\t\t\t\tImg().\n\t\t\t\t\t\tClass(\"app-wasm-icon\").\n\t\t\t\t\t\tAlt(\"0\").\n\t\t\t\t\t\tSrc(n.Icon),\n\t\t\t\t\tText(\"4\"),\n\t\t\t\t),\n\t\t\tP().\n\t\t\t\tClass(\"app-wasm-label\").\n\t\t\t\tText(\"Not Found\"),\n\t\t)\n}\n<commit_msg>Update notfound.go<commit_after>package app\n\nvar (\n\t\/\/ NotFound is the ui element that is displayed when a request is not\n\t\/\/ routed.\n\tNotFound UI = ¬Found{}\n)\n\ntype notFound struct {\n\tCompo\n\tIcon string\n}\n\nfunc (n *notFound) OnMount() {\n\tlinks := Window().Get(\"document\").Call(\"getElementsByTagName\", \"link\")\n\n\tfor i := 0; i < links.Length(); i++ {\n\t\tlink := links.Index(i)\n\t\trel := link.Call(\"getAttribute\", \"rel\")\n\n\t\tif rel.String() == \"icon\" {\n\t\t\tfavicon := link.Call(\"getAttribute\", \"href\")\n\t\t\tn.Icon = favicon.String()\n\t\t\tn.Update()\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (n *notFound) Render() UI {\n\treturn Div().\n\t\tClass(\"app-wasm-layout\").\n\t\tBody(\n\t\t\tDiv().\n\t\t\t\tClass(\"app-notfound-title\").\n\t\t\t\tBody(\n\t\t\t\t\tText(\"4\"),\n\t\t\t\t\tImg().\n\t\t\t\t\t\tClass(\"app-wasm-icon\").\n\t\t\t\t\t\tAlt(\"0\").\n\t\t\t\t\t\tSrc(n.Icon),\n\t\t\t\t\tText(\"4\"),\n\t\t\t\t),\n\t\t\tP().\n\t\t\t\tClass(\"app-wasm-label\").\n\t\t\t\tText(\"Not Found\"),\n\t\t)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Serviced Authors.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage dfs\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com\/control-center\/serviced\/commons\/docker\"\n\t\"github.com\/control-center\/serviced\/dfs\/utils\"\n\t\"github.com\/zenoss\/glog\"\n)\n\nconst (\n\tBackupMetadataFile = \".BACKUPINFO\"\n\tSnapshotsMetadataDir = \"SNAPSHOTS\/\"\n\tDockerImagesFile = \"IMAGES.dkr\"\n)\n\n\/\/ Backup writes all application data into an export stream\nfunc (dfs *DistributedFilesystem) Backup(data BackupInfo, w io.Writer) error {\n\ttarfile := tar.NewWriter(w)\n\tdefer tarfile.Close()\n\tbuffer := bytes.NewBufferString(\"\")\n\tspool, err := utils.NewSpool(dfs.tmp)\n\tif err != nil {\n\t\tglog.Errorf(\"Could not create spool: %s\", err)\n\t\treturn err\n\t}\n\tdefer spool.Remove()\n\t\/\/ write the backup metadata\n\tglog.Infof(\"Writing backup metadata\")\n\tif err := json.NewEncoder(buffer).Encode(data); err != nil {\n\t\tglog.Errorf(\"Could not encode backup metadata: %s\", err)\n\t\treturn err\n\t}\n\theader := &tar.Header{Name: BackupMetadataFile, Size: int64(buffer.Len())}\n\tif err := tarfile.WriteHeader(header); err != nil {\n\t\tglog.Errorf(\"Could not create metadata header for backup: %s\", err)\n\t\treturn err\n\t}\n\tif _, err := buffer.WriteTo(tarfile); err != nil {\n\t\tglog.Errorf(\"Could not write backup metadata: %s\", err)\n\t\treturn err\n\t}\n\t\/\/ download the base images\n\tfor _, image := range data.BaseImages {\n\t\tif _, err := dfs.docker.FindImage(image); docker.IsImageNotFound(err) {\n\t\t\tif err := dfs.docker.PullImage(image); err != nil {\n\t\t\t\tglog.Errorf(\"Could not pull image %s: %s\", image, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tglog.Errorf(\"Could not find image %s: %s\", image, err)\n\t\t\treturn err\n\t\t}\n\t}\n\timages := data.BaseImages\n\t\/\/ export the snapshots\n\tfor _, snapshot := range data.Snapshots {\n\t\tvol, info, err := dfs.getSnapshotVolumeAndInfo(snapshot)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ load the images from this snapshot\n\t\tglog.Infof(\"Preparing images for tenant %s\", info.TenantID)\n\t\tr, err := vol.ReadMetadata(info.Label, ImagesMetadataFile)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Could not receive images metadata for tenant %s: %s\", info.TenantID, err)\n\t\t\treturn err\n\t\t}\n\t\tvar imgs []string\n\t\tif err := importJSON(r, &imgs); err != nil {\n\t\t\tglog.Errorf(\"Could not interpret images metadata for tenant %s: %s\", info.TenantID, err)\n\t\t\treturn err\n\t\t}\n\t\ttimer := time.NewTimer(0)\n\t\tfor _, img := range imgs {\n\t\t\ttimer.Reset(30 * time.Minute)\n\t\t\tif err := dfs.reg.PullImage(timer.C, img); err != nil {\n\t\t\t\tglog.Errorf(\"Could not pull image %s from registry: %s\", img, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\timage, err := dfs.reg.ImagePath(img)\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Could not get the image path from registry %s: %s\", img, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\timages = append(images, image)\n\t\t}\n\t\ttimer.Stop()\n\t\t\/\/ export the snapshot\n\t\tif err := vol.Export(info.Label, \"\", spool); err != nil {\n\t\t\tglog.Errorf(\"Could not export tenant %s: %s\", info.TenantID, err)\n\t\t\treturn err\n\t\t}\n\t\tglog.Infof(\"Exporting tenant volume %s\", info.TenantID)\n\t\theader := &tar.Header{Name: path.Join(SnapshotsMetadataDir, info.TenantID, info.Label), Size: spool.Size()}\n\t\tif err := tarfile.WriteHeader(header); err != nil {\n\t\t\tglog.Errorf(\"Could not create header for tenant %s: %s\", info.TenantID, err)\n\t\t\treturn err\n\t\t}\n\t\tif _, err := spool.WriteTo(tarfile); err != nil {\n\t\t\tglog.Errorf(\"Could not write tenant %s to backup: %s\", info.TenantID, err)\n\t\t\treturn err\n\t\t}\n\t\tglog.Infof(\"Finished exporting tenant volume %s\", info.TenantID)\n\t}\n\t\/\/ export the images\n\tglog.Infof(\"Saving images to backup\")\n\tif err := dfs.docker.SaveImages(images, spool); err != nil {\n\t\tglog.Errorf(\"Could not save images to backup: %s\", err)\n\t\treturn err\n\t}\n\theader = &tar.Header{Name: DockerImagesFile, Size: spool.Size()}\n\tif err := tarfile.WriteHeader(header); err != nil {\n\t\tglog.Errorf(\"Could not create docker images header for backup: %s\", err)\n\t\treturn err\n\t}\n\tif _, err := spool.WriteTo(tarfile); err != nil {\n\t\tglog.Errorf(\"Could not write docker images: %s\", err)\n\t\treturn err\n\t}\n\tglog.Infof(\"Successfully completed backup\")\n\treturn nil\n}\n<commit_msg>using configured timeout as pull timeout during backups<commit_after>\/\/ Copyright 2015 The Serviced Authors.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage dfs\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com\/control-center\/serviced\/commons\/docker\"\n\t\"github.com\/control-center\/serviced\/dfs\/utils\"\n\t\"github.com\/zenoss\/glog\"\n)\n\nconst (\n\tBackupMetadataFile = \".BACKUPINFO\"\n\tSnapshotsMetadataDir = \"SNAPSHOTS\/\"\n\tDockerImagesFile = \"IMAGES.dkr\"\n)\n\n\/\/ Backup writes all application data into an export stream\nfunc (dfs *DistributedFilesystem) Backup(data BackupInfo, w io.Writer) error {\n\ttarfile := tar.NewWriter(w)\n\tdefer tarfile.Close()\n\tbuffer := bytes.NewBufferString(\"\")\n\tspool, err := utils.NewSpool(dfs.tmp)\n\tif err != nil {\n\t\tglog.Errorf(\"Could not create spool: %s\", err)\n\t\treturn err\n\t}\n\tdefer spool.Remove()\n\t\/\/ write the backup metadata\n\tglog.Infof(\"Writing backup metadata\")\n\tif err := json.NewEncoder(buffer).Encode(data); err != nil {\n\t\tglog.Errorf(\"Could not encode backup metadata: %s\", err)\n\t\treturn err\n\t}\n\theader := &tar.Header{Name: BackupMetadataFile, Size: int64(buffer.Len())}\n\tif err := tarfile.WriteHeader(header); err != nil {\n\t\tglog.Errorf(\"Could not create metadata header for backup: %s\", err)\n\t\treturn err\n\t}\n\tif _, err := buffer.WriteTo(tarfile); err != nil {\n\t\tglog.Errorf(\"Could not write backup metadata: %s\", err)\n\t\treturn err\n\t}\n\t\/\/ download the base images\n\tfor _, image := range data.BaseImages {\n\t\tif _, err := dfs.docker.FindImage(image); docker.IsImageNotFound(err) {\n\t\t\tif err := dfs.docker.PullImage(image); err != nil {\n\t\t\t\tglog.Errorf(\"Could not pull image %s: %s\", image, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tglog.Errorf(\"Could not find image %s: %s\", image, err)\n\t\t\treturn err\n\t\t}\n\t}\n\timages := data.BaseImages\n\t\/\/ export the snapshots\n\tfor _, snapshot := range data.Snapshots {\n\t\tvol, info, err := dfs.getSnapshotVolumeAndInfo(snapshot)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ load the images from this snapshot\n\t\tglog.Infof(\"Preparing images for tenant %s\", info.TenantID)\n\t\tr, err := vol.ReadMetadata(info.Label, ImagesMetadataFile)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Could not receive images metadata for tenant %s: %s\", info.TenantID, err)\n\t\t\treturn err\n\t\t}\n\t\tvar imgs []string\n\t\tif err := importJSON(r, &imgs); err != nil {\n\t\t\tglog.Errorf(\"Could not interpret images metadata for tenant %s: %s\", info.TenantID, err)\n\t\t\treturn err\n\t\t}\n\t\ttimer := time.NewTimer(0)\n\t\tfor _, img := range imgs {\n\t\t\ttimer.Reset(dfs.timeout)\n\t\t\tif err := dfs.reg.PullImage(timer.C, img); err != nil {\n\t\t\t\tglog.Errorf(\"Could not pull image %s from registry: %s\", img, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\timage, err := dfs.reg.ImagePath(img)\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Could not get the image path from registry %s: %s\", img, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\timages = append(images, image)\n\t\t}\n\t\ttimer.Stop()\n\t\t\/\/ export the snapshot\n\t\tif err := vol.Export(info.Label, \"\", spool); err != nil {\n\t\t\tglog.Errorf(\"Could not export tenant %s: %s\", info.TenantID, err)\n\t\t\treturn err\n\t\t}\n\t\tglog.Infof(\"Exporting tenant volume %s\", info.TenantID)\n\t\theader := &tar.Header{Name: path.Join(SnapshotsMetadataDir, info.TenantID, info.Label), Size: spool.Size()}\n\t\tif err := tarfile.WriteHeader(header); err != nil {\n\t\t\tglog.Errorf(\"Could not create header for tenant %s: %s\", info.TenantID, err)\n\t\t\treturn err\n\t\t}\n\t\tif _, err := spool.WriteTo(tarfile); err != nil {\n\t\t\tglog.Errorf(\"Could not write tenant %s to backup: %s\", info.TenantID, err)\n\t\t\treturn err\n\t\t}\n\t\tglog.Infof(\"Finished exporting tenant volume %s\", info.TenantID)\n\t}\n\t\/\/ export the images\n\tglog.Infof(\"Saving images to backup\")\n\tif err := dfs.docker.SaveImages(images, spool); err != nil {\n\t\tglog.Errorf(\"Could not save images to backup: %s\", err)\n\t\treturn err\n\t}\n\theader = &tar.Header{Name: DockerImagesFile, Size: spool.Size()}\n\tif err := tarfile.WriteHeader(header); err != nil {\n\t\tglog.Errorf(\"Could not create docker images header for backup: %s\", err)\n\t\treturn err\n\t}\n\tif _, err := spool.WriteTo(tarfile); err != nil {\n\t\tglog.Errorf(\"Could not write docker images: %s\", err)\n\t\treturn err\n\t}\n\tglog.Infof(\"Successfully completed backup\")\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright The containerd Authors.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\n\/\/ Package cap provides Linux capability utility\npackage cap\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ FromNumber returns a cap string like \"CAP_SYS_ADMIN\"\n\/\/ that corresponds to the given number like 21.\n\/\/\n\/\/ FromNumber returns an empty string for unknown cap number.\nfunc FromNumber(num int) string {\n\tif num < 0 || num > len(capsLatest)-1 {\n\t\treturn \"\"\n\t}\n\treturn capsLatest[num]\n}\n\n\/\/ FromBitmap parses an uint64 bitmap into string slice like\n\/\/ []{\"CAP_SYS_ADMIN\", ...}.\n\/\/\n\/\/ Unknown cap numbers are returned as []int.\nfunc FromBitmap(v uint64) ([]string, []int) {\n\tvar (\n\t\tres []string\n\t\tunknown []int\n\t)\n\tfor i := 0; i <= 63; i++ {\n\t\tif b := (v >> i) & 0x1; b == 0x1 {\n\t\t\tif s := FromNumber(i); s != \"\" {\n\t\t\t\tres = append(res, s)\n\t\t\t} else {\n\t\t\t\tunknown = append(unknown, i)\n\t\t\t}\n\t\t}\n\t}\n\treturn res, unknown\n}\n\n\/\/ Type is the type of capability\ntype Type int\n\nconst (\n\t\/\/ Effective is CapEff\n\tEffective Type = 1 << iota\n\t\/\/ Permitted is CapPrm\n\tPermitted\n\t\/\/ Inheritable is CapInh\n\tInheritable\n\t\/\/ Bounding is CapBnd\n\tBounding\n\t\/\/ Ambient is CapAmb\n\tAmbient\n)\n\n\/\/ ParseProcPIDStatus returns uint64 bitmap value from \/proc\/<PID>\/status file\nfunc ParseProcPIDStatus(r io.Reader) (map[Type]uint64, error) {\n\tres := make(map[Type]uint64)\n\tscanner := bufio.NewScanner(r)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tpair := strings.SplitN(line, \":\", 2)\n\t\tif len(pair) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tk := strings.TrimSpace(pair[0])\n\t\tv := strings.TrimSpace(pair[1])\n\t\tswitch k {\n\t\tcase \"CapInh\", \"CapPrm\", \"CapEff\", \"CapBnd\", \"CapAmb\":\n\t\t\tui64, err := strconv.ParseUint(v, 16, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Errorf(\"failed to parse line %q\", line)\n\t\t\t}\n\t\t\tswitch k {\n\t\t\tcase \"CapInh\":\n\t\t\t\tres[Inheritable] = ui64\n\t\t\tcase \"CapPrm\":\n\t\t\t\tres[Permitted] = ui64\n\t\t\tcase \"CapEff\":\n\t\t\t\tres[Effective] = ui64\n\t\t\tcase \"CapBnd\":\n\t\t\t\tres[Bounding] = ui64\n\t\t\tcase \"CapAmb\":\n\t\t\t\tres[Ambient] = ui64\n\t\t\t}\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}\n\n\/\/ Current returns the list of the effective and the known caps of\n\/\/ the current process.\n\/\/\n\/\/ The result is like []string{\"CAP_SYS_ADMIN\", ...}.\n\/\/\n\/\/ The result does not contain caps that are not recognized by\n\/\/ the \"github.com\/syndtr\/gocapability\" library.\nfunc Current() ([]string, error) {\n\tf, err := os.Open(\"\/proc\/self\/status\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tcaps, err := ParseProcPIDStatus(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcapEff := caps[Effective]\n\tnames, _ := FromBitmap(capEff)\n\treturn names, nil\n}\n\nvar (\n\t\/\/ caps35 is the caps of kernel 3.5 (37 entries)\n\tcaps35 = []string{\n\t\t\"CAP_CHOWN\", \/\/ 2.2\n\t\t\"CAP_DAC_OVERRIDE\", \/\/ 2.2\n\t\t\"CAP_DAC_READ_SEARCH\", \/\/ 2.2\n\t\t\"CAP_FOWNER\", \/\/ 2.2\n\t\t\"CAP_FSETID\", \/\/ 2.2\n\t\t\"CAP_KILL\", \/\/ 2.2\n\t\t\"CAP_SETGID\", \/\/ 2.2\n\t\t\"CAP_SETUID\", \/\/ 2.2\n\t\t\"CAP_SETPCAP\", \/\/ 2.2\n\t\t\"CAP_LINUX_IMMUTABLE\", \/\/ 2.2\n\t\t\"CAP_NET_BIND_SERVICE\", \/\/ 2.2\n\t\t\"CAP_NET_BROADCAST\", \/\/ 2.2\n\t\t\"CAP_NET_ADMIN\", \/\/ 2.2\n\t\t\"CAP_NET_RAW\", \/\/ 2.2\n\t\t\"CAP_IPC_LOCK\", \/\/ 2.2\n\t\t\"CAP_IPC_OWNER\", \/\/ 2.2\n\t\t\"CAP_SYS_MODULE\", \/\/ 2.2\n\t\t\"CAP_SYS_RAWIO\", \/\/ 2.2\n\t\t\"CAP_SYS_CHROOT\", \/\/ 2.2\n\t\t\"CAP_SYS_PTRACE\", \/\/ 2.2\n\t\t\"CAP_SYS_PACCT\", \/\/ 2.2\n\t\t\"CAP_SYS_ADMIN\", \/\/ 2.2\n\t\t\"CAP_SYS_BOOT\", \/\/ 2.2\n\t\t\"CAP_SYS_NICE\", \/\/ 2.2\n\t\t\"CAP_SYS_RESOURCE\", \/\/ 2.2\n\t\t\"CAP_SYS_TIME\", \/\/ 2.2\n\t\t\"CAP_SYS_TTY_CONFIG\", \/\/ 2.2\n\t\t\"CAP_MKNOD\", \/\/ 2.4\n\t\t\"CAP_LEASE\", \/\/ 2.4\n\t\t\"CAP_AUDIT_WRITE\", \/\/ 2.6.11\n\t\t\"CAP_AUDIT_CONTROL\", \/\/ 2.6.11\n\t\t\"CAP_SETFCAP\", \/\/ 2.6.24\n\t\t\"CAP_MAC_OVERRIDE\", \/\/ 2.6.25\n\t\t\"CAP_MAC_ADMIN\", \/\/ 2.6.25\n\t\t\"CAP_SYSLOG\", \/\/ 2.6.37\n\t\t\"CAP_WAKE_ALARM\", \/\/ 3.0\n\t\t\"CAP_BLOCK_SUSPEND\", \/\/ 3.5\n\t}\n\t\/\/ caps316 is the caps of kernel 3.16 (38 entries)\n\tcaps316 = append(caps35, \"CAP_AUDIT_READ\")\n\t\/\/ caps58 is the caps of kernel 5.8 (40 entries)\n\tcaps58 = append(caps316, []string{\"CAP_PERFMON\", \"CAP_BPF\"}...)\n\t\/\/ caps59 is the caps of kernel 5.9 (41 entries)\n\tcaps59 = append(caps58, \"CAP_CHECKPOINT_RESTORE\")\n\tcapsLatest = caps59\n)\n\n\/\/ Known returns the known cap strings of the latest kernel.\n\/\/ The current latest kernel is 5.9.\nfunc Known() []string {\n\treturn capsLatest\n}\n<commit_msg>pkg\/cap: remove an outdated comment<commit_after>\/*\n Copyright The containerd Authors.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\n\/\/ Package cap provides Linux capability utility\npackage cap\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ FromNumber returns a cap string like \"CAP_SYS_ADMIN\"\n\/\/ that corresponds to the given number like 21.\n\/\/\n\/\/ FromNumber returns an empty string for unknown cap number.\nfunc FromNumber(num int) string {\n\tif num < 0 || num > len(capsLatest)-1 {\n\t\treturn \"\"\n\t}\n\treturn capsLatest[num]\n}\n\n\/\/ FromBitmap parses an uint64 bitmap into string slice like\n\/\/ []{\"CAP_SYS_ADMIN\", ...}.\n\/\/\n\/\/ Unknown cap numbers are returned as []int.\nfunc FromBitmap(v uint64) ([]string, []int) {\n\tvar (\n\t\tres []string\n\t\tunknown []int\n\t)\n\tfor i := 0; i <= 63; i++ {\n\t\tif b := (v >> i) & 0x1; b == 0x1 {\n\t\t\tif s := FromNumber(i); s != \"\" {\n\t\t\t\tres = append(res, s)\n\t\t\t} else {\n\t\t\t\tunknown = append(unknown, i)\n\t\t\t}\n\t\t}\n\t}\n\treturn res, unknown\n}\n\n\/\/ Type is the type of capability\ntype Type int\n\nconst (\n\t\/\/ Effective is CapEff\n\tEffective Type = 1 << iota\n\t\/\/ Permitted is CapPrm\n\tPermitted\n\t\/\/ Inheritable is CapInh\n\tInheritable\n\t\/\/ Bounding is CapBnd\n\tBounding\n\t\/\/ Ambient is CapAmb\n\tAmbient\n)\n\n\/\/ ParseProcPIDStatus returns uint64 bitmap value from \/proc\/<PID>\/status file\nfunc ParseProcPIDStatus(r io.Reader) (map[Type]uint64, error) {\n\tres := make(map[Type]uint64)\n\tscanner := bufio.NewScanner(r)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tpair := strings.SplitN(line, \":\", 2)\n\t\tif len(pair) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tk := strings.TrimSpace(pair[0])\n\t\tv := strings.TrimSpace(pair[1])\n\t\tswitch k {\n\t\tcase \"CapInh\", \"CapPrm\", \"CapEff\", \"CapBnd\", \"CapAmb\":\n\t\t\tui64, err := strconv.ParseUint(v, 16, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Errorf(\"failed to parse line %q\", line)\n\t\t\t}\n\t\t\tswitch k {\n\t\t\tcase \"CapInh\":\n\t\t\t\tres[Inheritable] = ui64\n\t\t\tcase \"CapPrm\":\n\t\t\t\tres[Permitted] = ui64\n\t\t\tcase \"CapEff\":\n\t\t\t\tres[Effective] = ui64\n\t\t\tcase \"CapBnd\":\n\t\t\t\tres[Bounding] = ui64\n\t\t\tcase \"CapAmb\":\n\t\t\t\tres[Ambient] = ui64\n\t\t\t}\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}\n\n\/\/ Current returns the list of the effective and the known caps of\n\/\/ the current process.\n\/\/\n\/\/ The result is like []string{\"CAP_SYS_ADMIN\", ...}.\nfunc Current() ([]string, error) {\n\tf, err := os.Open(\"\/proc\/self\/status\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tcaps, err := ParseProcPIDStatus(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcapEff := caps[Effective]\n\tnames, _ := FromBitmap(capEff)\n\treturn names, nil\n}\n\nvar (\n\t\/\/ caps35 is the caps of kernel 3.5 (37 entries)\n\tcaps35 = []string{\n\t\t\"CAP_CHOWN\", \/\/ 2.2\n\t\t\"CAP_DAC_OVERRIDE\", \/\/ 2.2\n\t\t\"CAP_DAC_READ_SEARCH\", \/\/ 2.2\n\t\t\"CAP_FOWNER\", \/\/ 2.2\n\t\t\"CAP_FSETID\", \/\/ 2.2\n\t\t\"CAP_KILL\", \/\/ 2.2\n\t\t\"CAP_SETGID\", \/\/ 2.2\n\t\t\"CAP_SETUID\", \/\/ 2.2\n\t\t\"CAP_SETPCAP\", \/\/ 2.2\n\t\t\"CAP_LINUX_IMMUTABLE\", \/\/ 2.2\n\t\t\"CAP_NET_BIND_SERVICE\", \/\/ 2.2\n\t\t\"CAP_NET_BROADCAST\", \/\/ 2.2\n\t\t\"CAP_NET_ADMIN\", \/\/ 2.2\n\t\t\"CAP_NET_RAW\", \/\/ 2.2\n\t\t\"CAP_IPC_LOCK\", \/\/ 2.2\n\t\t\"CAP_IPC_OWNER\", \/\/ 2.2\n\t\t\"CAP_SYS_MODULE\", \/\/ 2.2\n\t\t\"CAP_SYS_RAWIO\", \/\/ 2.2\n\t\t\"CAP_SYS_CHROOT\", \/\/ 2.2\n\t\t\"CAP_SYS_PTRACE\", \/\/ 2.2\n\t\t\"CAP_SYS_PACCT\", \/\/ 2.2\n\t\t\"CAP_SYS_ADMIN\", \/\/ 2.2\n\t\t\"CAP_SYS_BOOT\", \/\/ 2.2\n\t\t\"CAP_SYS_NICE\", \/\/ 2.2\n\t\t\"CAP_SYS_RESOURCE\", \/\/ 2.2\n\t\t\"CAP_SYS_TIME\", \/\/ 2.2\n\t\t\"CAP_SYS_TTY_CONFIG\", \/\/ 2.2\n\t\t\"CAP_MKNOD\", \/\/ 2.4\n\t\t\"CAP_LEASE\", \/\/ 2.4\n\t\t\"CAP_AUDIT_WRITE\", \/\/ 2.6.11\n\t\t\"CAP_AUDIT_CONTROL\", \/\/ 2.6.11\n\t\t\"CAP_SETFCAP\", \/\/ 2.6.24\n\t\t\"CAP_MAC_OVERRIDE\", \/\/ 2.6.25\n\t\t\"CAP_MAC_ADMIN\", \/\/ 2.6.25\n\t\t\"CAP_SYSLOG\", \/\/ 2.6.37\n\t\t\"CAP_WAKE_ALARM\", \/\/ 3.0\n\t\t\"CAP_BLOCK_SUSPEND\", \/\/ 3.5\n\t}\n\t\/\/ caps316 is the caps of kernel 3.16 (38 entries)\n\tcaps316 = append(caps35, \"CAP_AUDIT_READ\")\n\t\/\/ caps58 is the caps of kernel 5.8 (40 entries)\n\tcaps58 = append(caps316, []string{\"CAP_PERFMON\", \"CAP_BPF\"}...)\n\t\/\/ caps59 is the caps of kernel 5.9 (41 entries)\n\tcaps59 = append(caps58, \"CAP_CHECKPOINT_RESTORE\")\n\tcapsLatest = caps59\n)\n\n\/\/ Known returns the known cap strings of the latest kernel.\n\/\/ The current latest kernel is 5.9.\nfunc Known() []string {\n\treturn capsLatest\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Code generated by MockGen. DO NOT EDIT.\n\/\/ Source: pkg\/dao\/user.go\n\n\/\/ Package dao is a generated GoMock package.\npackage dao\n\nimport (\n\tcontext \"context\"\n\treflect \"reflect\"\n\tmodels \"tplgo\/pkg\/models\"\n\n\tgomock \"github.com\/golang\/mock\/gomock\"\n)\n\n\/\/ MockUserDao is a mock of UserDao interface.\ntype MockUserDao struct {\n\tctrl *gomock.Controller\n\trecorder *MockUserDaoMockRecorder\n}\n\n\/\/ MockUserDaoMockRecorder is the mock recorder for MockUserDao.\ntype MockUserDaoMockRecorder struct {\n\tmock *MockUserDao\n}\n\n\/\/ NewMockUserDao creates a new mock instance.\nfunc NewMockUserDao(ctrl *gomock.Controller) *MockUserDao {\n\tmock := &MockUserDao{ctrl: ctrl}\n\tmock.recorder = &MockUserDaoMockRecorder{mock}\n\treturn mock\n}\n\n\/\/ EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockUserDao) EXPECT() *MockUserDaoMockRecorder {\n\treturn m.recorder\n}\n\n\/\/ FindByID mocks base method.\nfunc (m *MockUserDao) FindByID(ctx context.Context, id int64) (*models.User, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FindByID\", ctx, id)\n\tret0, _ := ret[0].(*models.User)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n\/\/ FindByID indicates an expected call of FindByID.\nfunc (mr *MockUserDaoMockRecorder) FindByID(ctx, id interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FindByID\", reflect.TypeOf((*MockUserDao)(nil).FindByID), ctx, id)\n}\n<commit_msg>remove mock_user<commit_after><|endoftext|>"} {"text":"<commit_before>package edit\n\n\/\/ Elvish code for default bindings, hooks, and similar initializations. This assumes the `edit`\n\/\/ namespace as the global namespace.\nconst elvInit = `\nafter-command = [\n\t# Capture the most recent interactive command duration in $edit:command-duration\n\t# as a convenience for prompt functions. Note: The first time this is run is after\n\t# shell.sourceRC() finishes so the initial value of command-duration is the time\n\t# to execute the user's interactive configuration script.\n\t[m]{\n\t\tcommand-duration = $m[duration]\n\t}\n]\n\nglobal-binding = (binding-table [\n &Ctrl-'['= $close-mode~\n])\n\ninsert:binding = (binding-table [\n &Left= $move-dot-left~\n &Right= $move-dot-right~\n\n &Ctrl-Left= $move-dot-left-word~\n &Ctrl-Right= $move-dot-right-word~\n &Alt-Left= $move-dot-left-word~\n &Alt-Right= $move-dot-right-word~\n &Alt-b= $move-dot-left-word~\n &Alt-f= $move-dot-right-word~\n\n &Home= $move-dot-sol~\n &End= $move-dot-eol~\n\n &Backspace= $kill-rune-left~\n &Ctrl-H= $kill-rune-left~\n &Delete= $kill-rune-right~\n &Ctrl-W= $kill-word-left~\n &Ctrl-U= $kill-line-left~\n &Ctrl-K= $kill-line-right~\n\n &Ctrl-V= $insert-raw~\n\n &Alt-,= $lastcmd:start~\n &Alt-.= $insert-last-word~\n &Ctrl-R= $histlist:start~\n &Ctrl-L= $location:start~\n &Ctrl-N= $navigation:start~\n &Tab= $completion:smart-start~\n &Up= $history:start~\n &Alt-x= $minibuf:start~\n\n &Enter= $smart-enter~\n &Ctrl-D= $return-eof~\n])\n\ncommand:binding = (binding-table [\n &'$'= $move-dot-eol~\n &0= $move-dot-sol~\n &D= $kill-line-right~\n &b= $move-dot-left-word~\n &h= $move-dot-left~\n &i= $close-mode~\n &j= $move-dot-down~\n &k= $move-dot-up~\n &l= $move-dot-right~\n &w= $move-dot-right-word~\n &x= $kill-rune-right~\n])\n\nlisting:binding = (binding-table [\n &Up= $listing:up~\n &Down= $listing:down~\n &Tab= $listing:down-cycle~\n &Shift-Tab= $listing:up-cycle~\n])\n\nhistlist:binding = (binding-table [\n &Ctrl-D= $histlist:toggle-dedup~\n])\n\nnavigation:binding = (binding-table [\n &Left= $navigation:left~\n &Right= $navigation:right~\n &Up= $navigation:up~\n &Down= $navigation:down~\n &PageUp= $navigation:page-up~\n &PageDown= $navigation:page-down~\n &Alt-Up= $navigation:file-preview-up~\n &Alt-Down= $navigation:file-preview-down~\n &Enter= $navigation:insert-selected-and-quit~\n &Alt-Enter= $navigation:insert-selected~\n &Ctrl-F= $navigation:trigger-filter~\n &Ctrl-H= $navigation:trigger-shown-hidden~\n])\n\ncompletion:binding = (binding-table [\n &Down= $completion:down~\n &Up= $completion:up~\n &Tab= $completion:down-cycle~\n &Shift-Tab=$completion:up-cycle~\n &Left= $completion:left~\n &Right= $completion:right~\n])\n\nhistory:binding = (binding-table [\n &Up= $history:up~\n &Down= $history:down-or-quit~\n &Ctrl-'['= $close-mode~\n])\n\nlastcmd:binding = (binding-table [\n &Alt-,= $listing:accept~\n])\n\n-instant:binding = (binding-table [\n &\n])\n\nminibuf:binding = (binding-table [\n &\n])\n`\n\n\/\/ vi: set et:\n<commit_msg>Make Alt-x a global binding.<commit_after>package edit\n\n\/\/ Elvish code for default bindings, hooks, and similar initializations. This assumes the `edit`\n\/\/ namespace as the global namespace.\nconst elvInit = `\nafter-command = [\n\t# Capture the most recent interactive command duration in $edit:command-duration\n\t# as a convenience for prompt functions. Note: The first time this is run is after\n\t# shell.sourceRC() finishes so the initial value of command-duration is the time\n\t# to execute the user's interactive configuration script.\n\t[m]{\n\t\tcommand-duration = $m[duration]\n\t}\n]\n\nglobal-binding = (binding-table [\n &Ctrl-'['= $close-mode~\n &Alt-x= $minibuf:start~\n])\n\ninsert:binding = (binding-table [\n &Left= $move-dot-left~\n &Right= $move-dot-right~\n\n &Ctrl-Left= $move-dot-left-word~\n &Ctrl-Right= $move-dot-right-word~\n &Alt-Left= $move-dot-left-word~\n &Alt-Right= $move-dot-right-word~\n &Alt-b= $move-dot-left-word~\n &Alt-f= $move-dot-right-word~\n\n &Home= $move-dot-sol~\n &End= $move-dot-eol~\n\n &Backspace= $kill-rune-left~\n &Ctrl-H= $kill-rune-left~\n &Delete= $kill-rune-right~\n &Ctrl-W= $kill-word-left~\n &Ctrl-U= $kill-line-left~\n &Ctrl-K= $kill-line-right~\n\n &Ctrl-V= $insert-raw~\n\n &Alt-,= $lastcmd:start~\n &Alt-.= $insert-last-word~\n &Ctrl-R= $histlist:start~\n &Ctrl-L= $location:start~\n &Ctrl-N= $navigation:start~\n &Tab= $completion:smart-start~\n &Up= $history:start~\n\n &Enter= $smart-enter~\n &Ctrl-D= $return-eof~\n])\n\ncommand:binding = (binding-table [\n &'$'= $move-dot-eol~\n &0= $move-dot-sol~\n &D= $kill-line-right~\n &b= $move-dot-left-word~\n &h= $move-dot-left~\n &i= $close-mode~\n &j= $move-dot-down~\n &k= $move-dot-up~\n &l= $move-dot-right~\n &w= $move-dot-right-word~\n &x= $kill-rune-right~\n])\n\nlisting:binding = (binding-table [\n &Up= $listing:up~\n &Down= $listing:down~\n &Tab= $listing:down-cycle~\n &Shift-Tab= $listing:up-cycle~\n])\n\nhistlist:binding = (binding-table [\n &Ctrl-D= $histlist:toggle-dedup~\n])\n\nnavigation:binding = (binding-table [\n &Left= $navigation:left~\n &Right= $navigation:right~\n &Up= $navigation:up~\n &Down= $navigation:down~\n &PageUp= $navigation:page-up~\n &PageDown= $navigation:page-down~\n &Alt-Up= $navigation:file-preview-up~\n &Alt-Down= $navigation:file-preview-down~\n &Enter= $navigation:insert-selected-and-quit~\n &Alt-Enter= $navigation:insert-selected~\n &Ctrl-F= $navigation:trigger-filter~\n &Ctrl-H= $navigation:trigger-shown-hidden~\n])\n\ncompletion:binding = (binding-table [\n &Down= $completion:down~\n &Up= $completion:up~\n &Tab= $completion:down-cycle~\n &Shift-Tab=$completion:up-cycle~\n &Left= $completion:left~\n &Right= $completion:right~\n])\n\nhistory:binding = (binding-table [\n &Up= $history:up~\n &Down= $history:down-or-quit~\n &Ctrl-'['= $close-mode~\n])\n\nlastcmd:binding = (binding-table [\n &Alt-,= $listing:accept~\n])\n\n-instant:binding = (binding-table [\n &\n])\n\nminibuf:binding = (binding-table [\n &\n])\n`\n\n\/\/ vi: set et:\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage engine\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"path\"\n\t\"sort\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/Masterminds\/sprig\"\n\n\t\"k8s.io\/helm\/pkg\/chartutil\"\n\t\"k8s.io\/helm\/pkg\/proto\/hapi\/chart\"\n)\n\n\/\/ Engine is an implementation of 'cmd\/tiller\/environment'.Engine that uses Go templates.\ntype Engine struct {\n\t\/\/ FuncMap contains the template functions that will be passed to each\n\t\/\/ render call. This may only be modified before the first call to Render.\n\tFuncMap template.FuncMap\n\t\/\/ If strict is enabled, template rendering will fail if a template references\n\t\/\/ a value that was not passed in.\n\tStrict bool\n\tCurrentTemplates map[string]renderable\n}\n\n\/\/ New creates a new Go template Engine instance.\n\/\/\n\/\/ The FuncMap is initialized here. You may modify the FuncMap _prior to_ the\n\/\/ first invocation of Render.\n\/\/\n\/\/ The FuncMap sets all of the Sprig functions except for those that provide\n\/\/ access to the underlying OS (env, expandenv).\nfunc New() *Engine {\n\tf := FuncMap()\n\treturn &Engine{\n\t\tFuncMap: f,\n\t}\n}\n\n\/\/ FuncMap returns a mapping of all of the functions that Engine has.\n\/\/\n\/\/ Because some functions are late-bound (e.g. contain context-sensitive\n\/\/ data), the functions may not all perform identically outside of an\n\/\/ Engine as they will inside of an Engine.\n\/\/\n\/\/ Known late-bound functions:\n\/\/\n\/\/\t- \"include\": This is late-bound in Engine.Render(). The version\n\/\/\t included in the FuncMap is a placeholder.\n\/\/ - \"required\": This is late-bound in Engine.Render(). The version\n\/\/\t included in the FuncMap is a placeholder.\n\/\/ - \"tpl\": This is late-bound in Engine.Render(). The version\n\/\/\t included in the FuncMap is a placeholder.\nfunc FuncMap() template.FuncMap {\n\tf := sprig.TxtFuncMap()\n\tdelete(f, \"env\")\n\tdelete(f, \"expandenv\")\n\n\t\/\/ Add some extra functionality\n\textra := template.FuncMap{\n\t\t\"toToml\": chartutil.ToToml,\n\t\t\"toYaml\": chartutil.ToYaml,\n\t\t\"fromYaml\": chartutil.FromYaml,\n\t\t\"toJson\": chartutil.ToJson,\n\t\t\"fromJson\": chartutil.FromJson,\n\n\t\t\/\/ This is a placeholder for the \"include\" function, which is\n\t\t\/\/ late-bound to a template. By declaring it here, we preserve the\n\t\t\/\/ integrity of the linter.\n\t\t\"include\": func(string, interface{}) string { return \"not implemented\" },\n\t\t\"required\": func(string, interface{}) interface{} { return \"not implemented\" },\n\t\t\"tpl\": func(string, interface{}) interface{} { return \"not implemented\" },\n\t}\n\n\tfor k, v := range extra {\n\t\tf[k] = v\n\t}\n\n\treturn f\n}\n\n\/\/ Render takes a chart, optional values, and value overrides, and attempts to render the Go templates.\n\/\/\n\/\/ Render can be called repeatedly on the same engine.\n\/\/\n\/\/ This will look in the chart's 'templates' data (e.g. the 'templates\/' directory)\n\/\/ and attempt to render the templates there using the values passed in.\n\/\/\n\/\/ Values are scoped to their templates. A dependency template will not have\n\/\/ access to the values set for its parent. If chart \"foo\" includes chart \"bar\",\n\/\/ \"bar\" will not have access to the values for \"foo\".\n\/\/\n\/\/ Values should be prepared with something like `chartutils.ReadValues`.\n\/\/\n\/\/ Values are passed through the templates according to scope. If the top layer\n\/\/ chart includes the chart foo, which includes the chart bar, the values map\n\/\/ will be examined for a table called \"foo\". If \"foo\" is found in vals,\n\/\/ that section of the values will be passed into the \"foo\" chart. And if that\n\/\/ section contains a value named \"bar\", that value will be passed on to the\n\/\/ bar chart during render time.\nfunc (e *Engine) Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) {\n\t\/\/ Render the charts\n\ttmap := allTemplates(chrt, values)\n\te.CurrentTemplates = tmap\n\treturn e.render(tmap)\n}\n\n\/\/ renderable is an object that can be rendered.\ntype renderable struct {\n\t\/\/ tpl is the current template.\n\ttpl string\n\t\/\/ vals are the values to be supplied to the template.\n\tvals chartutil.Values\n\t\/\/ namespace prefix to the templates of the current chart\n\tbasePath string\n}\n\n\/\/ alterFuncMap takes the Engine's FuncMap and adds context-specific functions.\n\/\/\n\/\/ The resulting FuncMap is only valid for the passed-in template.\nfunc (e *Engine) alterFuncMap(t *template.Template) template.FuncMap {\n\t\/\/ Clone the func map because we are adding context-specific functions.\n\tvar funcMap template.FuncMap = map[string]interface{}{}\n\tfor k, v := range e.FuncMap {\n\t\tfuncMap[k] = v\n\t}\n\n\t\/\/ Add the 'include' function here so we can close over t.\n\tfuncMap[\"include\"] = func(name string, data interface{}) (string, error) {\n\t\tbuf := bytes.NewBuffer(nil)\n\t\tif err := t.ExecuteTemplate(buf, name, data); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn buf.String(), nil\n\t}\n\n\t\/\/ Add the 'required' function here\n\tfuncMap[\"required\"] = func(warn string, val interface{}) (interface{}, error) {\n\t\tif val == nil {\n\t\t\treturn val, fmt.Errorf(warn)\n\t\t} else if _, ok := val.(string); ok {\n\t\t\tif val == \"\" {\n\t\t\t\treturn val, fmt.Errorf(warn)\n\t\t\t}\n\t\t}\n\t\treturn val, nil\n\t}\n\n\t\/\/ Add the 'tpl' function here\n\tfuncMap[\"tpl\"] = func(tpl string, vals chartutil.Values) (string, error) {\n\t\tbasePath, err := vals.PathValue(\"Template.BasePath\")\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"Cannot retrieve Template.Basepath from values inside tpl function: %s (%s)\", tpl, err.Error())\n\t\t}\n\n\t\tr := renderable{\n\t\t\ttpl: tpl,\n\t\t\tvals: vals,\n\t\t\tbasePath: basePath.(string),\n\t\t}\n\n\t\ttemplates := map[string]renderable{}\n\t\ttemplateName, err := vals.PathValue(\"Template.Name\")\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"Cannot retrieve Template.Name from values inside tpl function: %s (%s)\", tpl, err.Error())\n\t\t}\n\n\t\ttemplates[templateName.(string)] = r\n\n\t\tresult, err := e.render(templates)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"Error during tpl function execution for %q: %s\", tpl, err.Error())\n\t\t}\n\t\treturn result[templateName.(string)], nil\n\t}\n\n\treturn funcMap\n}\n\n\/\/ render takes a map of templates\/values and renders them.\nfunc (e *Engine) render(tpls map[string]renderable) (map[string]string, error) {\n\t\/\/ Basically, what we do here is start with an empty parent template and then\n\t\/\/ build up a list of templates -- one for each file. Once all of the templates\n\t\/\/ have been parsed, we loop through again and execute every template.\n\t\/\/\n\t\/\/ The idea with this process is to make it possible for more complex templates\n\t\/\/ to share common blocks, but to make the entire thing feel like a file-based\n\t\/\/ template engine.\n\tt := template.New(\"gotpl\")\n\tif e.Strict {\n\t\tt.Option(\"missingkey=error\")\n\t} else {\n\t\t\/\/ Not that zero will attempt to add default values for types it knows,\n\t\t\/\/ but will still emit <no value> for others. We mitigate that later.\n\t\tt.Option(\"missingkey=zero\")\n\t}\n\n\tfuncMap := e.alterFuncMap(t)\n\n\t\/\/ We want to parse the templates in a predictable order. The order favors\n\t\/\/ higher-level (in file system) templates over deeply nested templates.\n\tkeys := sortTemplates(tpls)\n\n\tfiles := []string{}\n\n\tfor _, fname := range keys {\n\t\tr := tpls[fname]\n\t\tt = t.New(fname).Funcs(funcMap)\n\t\tif _, err := t.Parse(r.tpl); err != nil {\n\t\t\treturn map[string]string{}, fmt.Errorf(\"parse error in %q: %s\", fname, err)\n\t\t}\n\t\tfiles = append(files, fname)\n\t}\n\n\t\/\/ Adding the engine's currentTemplates to the template context\n\t\/\/ so they can be referenced in the tpl function\n\tfor fname, r := range e.CurrentTemplates {\n\t\tif t.Lookup(fname) == nil {\n\t\t\tt = t.New(fname).Funcs(funcMap)\n\t\t\tif _, err := t.Parse(r.tpl); err != nil {\n\t\t\t\treturn map[string]string{}, fmt.Errorf(\"parse error in %q: %s\", fname, err)\n\t\t\t}\n\t\t}\n\t}\n\n\trendered := make(map[string]string, len(files))\n\tvar buf bytes.Buffer\n\tfor _, file := range files {\n\t\t\/\/ Don't render partials. We don't care out the direct output of partials.\n\t\t\/\/ They are only included from other templates.\n\t\tif strings.HasPrefix(path.Base(file), \"_\") {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ At render time, add information about the template that is being rendered.\n\t\tvals := tpls[file].vals\n\t\tvals[\"Template\"] = map[string]interface{}{\"Name\": file, \"BasePath\": tpls[file].basePath}\n\t\tif err := t.ExecuteTemplate(&buf, file, vals); err != nil {\n\t\t\treturn map[string]string{}, fmt.Errorf(\"render error in %q: %s\", file, err)\n\t\t}\n\n\t\t\/\/ Work around the issue where Go will emit \"<no value>\" even if Options(missing=zero)\n\t\t\/\/ is set. Since missing=error will never get here, we do not need to handle\n\t\t\/\/ the Strict case.\n\t\trendered[file] = strings.Replace(buf.String(), \"<no value>\", \"\", -1)\n\t\tbuf.Reset()\n\t}\n\n\treturn rendered, nil\n}\n\nfunc sortTemplates(tpls map[string]renderable) []string {\n\tkeys := make([]string, len(tpls))\n\ti := 0\n\tfor key := range tpls {\n\t\tkeys[i] = key\n\t\ti++\n\t}\n\tsort.Sort(sort.Reverse(byPathLen(keys)))\n\treturn keys\n}\n\ntype byPathLen []string\n\nfunc (p byPathLen) Len() int { return len(p) }\nfunc (p byPathLen) Swap(i, j int) { p[j], p[i] = p[i], p[j] }\nfunc (p byPathLen) Less(i, j int) bool {\n\ta, b := p[i], p[j]\n\tca, cb := strings.Count(a, \"\/\"), strings.Count(b, \"\/\")\n\tif ca == cb {\n\t\treturn strings.Compare(a, b) == -1\n\t}\n\treturn ca < cb\n}\n\n\/\/ allTemplates returns all templates for a chart and its dependencies.\n\/\/\n\/\/ As it goes, it also prepares the values in a scope-sensitive manner.\nfunc allTemplates(c *chart.Chart, vals chartutil.Values) map[string]renderable {\n\ttemplates := map[string]renderable{}\n\trecAllTpls(c, templates, vals, true, \"\")\n\treturn templates\n}\n\n\/\/ recAllTpls recurses through the templates in a chart.\n\/\/\n\/\/ As it recurses, it also sets the values to be appropriate for the template\n\/\/ scope.\nfunc recAllTpls(c *chart.Chart, templates map[string]renderable, parentVals chartutil.Values, top bool, parentID string) {\n\t\/\/ This should never evaluate to a nil map. That will cause problems when\n\t\/\/ values are appended later.\n\tcvals := chartutil.Values{}\n\tif top {\n\t\t\/\/ If this is the top of the rendering tree, assume that parentVals\n\t\t\/\/ is already resolved to the authoritative values.\n\t\tcvals = parentVals\n\t} else if c.Metadata != nil && c.Metadata.Name != \"\" {\n\t\t\/\/ If there is a {{.Values.ThisChart}} in the parent metadata,\n\t\t\/\/ copy that into the {{.Values}} for this template.\n\t\tnewVals := chartutil.Values{}\n\t\tif vs, err := parentVals.Table(\"Values\"); err == nil {\n\t\t\tif tmp, err := vs.Table(c.Metadata.Name); err == nil {\n\t\t\t\tnewVals = tmp\n\t\t\t}\n\t\t}\n\n\t\tcvals = map[string]interface{}{\n\t\t\t\"Values\": newVals,\n\t\t\t\"Release\": parentVals[\"Release\"],\n\t\t\t\"Chart\": c.Metadata,\n\t\t\t\"Files\": chartutil.NewFiles(c.Files),\n\t\t\t\"Capabilities\": parentVals[\"Capabilities\"],\n\t\t}\n\t}\n\n\tnewParentID := c.Metadata.Name\n\tif parentID != \"\" {\n\t\t\/\/ We artificially reconstruct the chart path to child templates. This\n\t\t\/\/ creates a namespaced filename that can be used to track down the source\n\t\t\/\/ of a particular template declaration.\n\t\tnewParentID = path.Join(parentID, \"charts\", newParentID)\n\t}\n\n\tfor _, child := range c.Dependencies {\n\t\trecAllTpls(child, templates, cvals, false, newParentID)\n\t}\n\tfor _, t := range c.Templates {\n\t\ttemplates[path.Join(newParentID, t.Name)] = renderable{\n\t\t\ttpl: string(t.Data),\n\t\t\tvals: cvals,\n\t\t\tbasePath: path.Join(newParentID, \"templates\"),\n\t\t}\n\t}\n}\n<commit_msg>Added code to recover from a tiller pod crash in an event of template render failure<commit_after>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage engine\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"path\"\n\t\"sort\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/Masterminds\/sprig\"\n\n\t\"k8s.io\/helm\/pkg\/chartutil\"\n\t\"k8s.io\/helm\/pkg\/proto\/hapi\/chart\"\n)\n\n\/\/ Engine is an implementation of 'cmd\/tiller\/environment'.Engine that uses Go templates.\ntype Engine struct {\n\t\/\/ FuncMap contains the template functions that will be passed to each\n\t\/\/ render call. This may only be modified before the first call to Render.\n\tFuncMap template.FuncMap\n\t\/\/ If strict is enabled, template rendering will fail if a template references\n\t\/\/ a value that was not passed in.\n\tStrict bool\n\tCurrentTemplates map[string]renderable\n}\n\n\/\/ New creates a new Go template Engine instance.\n\/\/\n\/\/ The FuncMap is initialized here. You may modify the FuncMap _prior to_ the\n\/\/ first invocation of Render.\n\/\/\n\/\/ The FuncMap sets all of the Sprig functions except for those that provide\n\/\/ access to the underlying OS (env, expandenv).\nfunc New() *Engine {\n\tf := FuncMap()\n\treturn &Engine{\n\t\tFuncMap: f,\n\t}\n}\n\n\/\/ FuncMap returns a mapping of all of the functions that Engine has.\n\/\/\n\/\/ Because some functions are late-bound (e.g. contain context-sensitive\n\/\/ data), the functions may not all perform identically outside of an\n\/\/ Engine as they will inside of an Engine.\n\/\/\n\/\/ Known late-bound functions:\n\/\/\n\/\/\t- \"include\": This is late-bound in Engine.Render(). The version\n\/\/\t included in the FuncMap is a placeholder.\n\/\/ - \"required\": This is late-bound in Engine.Render(). The version\n\/\/\t included in the FuncMap is a placeholder.\n\/\/ - \"tpl\": This is late-bound in Engine.Render(). The version\n\/\/\t included in the FuncMap is a placeholder.\nfunc FuncMap() template.FuncMap {\n\tf := sprig.TxtFuncMap()\n\tdelete(f, \"env\")\n\tdelete(f, \"expandenv\")\n\n\t\/\/ Add some extra functionality\n\textra := template.FuncMap{\n\t\t\"toToml\": chartutil.ToToml,\n\t\t\"toYaml\": chartutil.ToYaml,\n\t\t\"fromYaml\": chartutil.FromYaml,\n\t\t\"toJson\": chartutil.ToJson,\n\t\t\"fromJson\": chartutil.FromJson,\n\n\t\t\/\/ This is a placeholder for the \"include\" function, which is\n\t\t\/\/ late-bound to a template. By declaring it here, we preserve the\n\t\t\/\/ integrity of the linter.\n\t\t\"include\": func(string, interface{}) string { return \"not implemented\" },\n\t\t\"required\": func(string, interface{}) interface{} { return \"not implemented\" },\n\t\t\"tpl\": func(string, interface{}) interface{} { return \"not implemented\" },\n\t}\n\n\tfor k, v := range extra {\n\t\tf[k] = v\n\t}\n\n\treturn f\n}\n\n\/\/ Render takes a chart, optional values, and value overrides, and attempts to render the Go templates.\n\/\/\n\/\/ Render can be called repeatedly on the same engine.\n\/\/\n\/\/ This will look in the chart's 'templates' data (e.g. the 'templates\/' directory)\n\/\/ and attempt to render the templates there using the values passed in.\n\/\/\n\/\/ Values are scoped to their templates. A dependency template will not have\n\/\/ access to the values set for its parent. If chart \"foo\" includes chart \"bar\",\n\/\/ \"bar\" will not have access to the values for \"foo\".\n\/\/\n\/\/ Values should be prepared with something like `chartutils.ReadValues`.\n\/\/\n\/\/ Values are passed through the templates according to scope. If the top layer\n\/\/ chart includes the chart foo, which includes the chart bar, the values map\n\/\/ will be examined for a table called \"foo\". If \"foo\" is found in vals,\n\/\/ that section of the values will be passed into the \"foo\" chart. And if that\n\/\/ section contains a value named \"bar\", that value will be passed on to the\n\/\/ bar chart during render time.\nfunc (e *Engine) Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) {\n\t\/\/ Render the charts\n\ttmap := allTemplates(chrt, values)\n\te.CurrentTemplates = tmap\n\treturn e.render(tmap)\n}\n\n\/\/ renderable is an object that can be rendered.\ntype renderable struct {\n\t\/\/ tpl is the current template.\n\ttpl string\n\t\/\/ vals are the values to be supplied to the template.\n\tvals chartutil.Values\n\t\/\/ namespace prefix to the templates of the current chart\n\tbasePath string\n}\n\n\/\/ alterFuncMap takes the Engine's FuncMap and adds context-specific functions.\n\/\/\n\/\/ The resulting FuncMap is only valid for the passed-in template.\nfunc (e *Engine) alterFuncMap(t *template.Template) template.FuncMap {\n\t\/\/ Clone the func map because we are adding context-specific functions.\n\tvar funcMap template.FuncMap = map[string]interface{}{}\n\tfor k, v := range e.FuncMap {\n\t\tfuncMap[k] = v\n\t}\n\n\t\/\/ Add the 'include' function here so we can close over t.\n\tfuncMap[\"include\"] = func(name string, data interface{}) (string, error) {\n\t\tbuf := bytes.NewBuffer(nil)\n\t\tif err := t.ExecuteTemplate(buf, name, data); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn buf.String(), nil\n\t}\n\n\t\/\/ Add the 'required' function here\n\tfuncMap[\"required\"] = func(warn string, val interface{}) (interface{}, error) {\n\t\tif val == nil {\n\t\t\treturn val, fmt.Errorf(warn)\n\t\t} else if _, ok := val.(string); ok {\n\t\t\tif val == \"\" {\n\t\t\t\treturn val, fmt.Errorf(warn)\n\t\t\t}\n\t\t}\n\t\treturn val, nil\n\t}\n\n\t\/\/ Add the 'tpl' function here\n\tfuncMap[\"tpl\"] = func(tpl string, vals chartutil.Values) (string, error) {\n\t\tbasePath, err := vals.PathValue(\"Template.BasePath\")\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"Cannot retrieve Template.Basepath from values inside tpl function: %s (%s)\", tpl, err.Error())\n\t\t}\n\n\t\tr := renderable{\n\t\t\ttpl: tpl,\n\t\t\tvals: vals,\n\t\t\tbasePath: basePath.(string),\n\t\t}\n\n\t\ttemplates := map[string]renderable{}\n\t\ttemplateName, err := vals.PathValue(\"Template.Name\")\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"Cannot retrieve Template.Name from values inside tpl function: %s (%s)\", tpl, err.Error())\n\t\t}\n\n\t\ttemplates[templateName.(string)] = r\n\n\t\tresult, err := e.render(templates)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"Error during tpl function execution for %q: %s\", tpl, err.Error())\n\t\t}\n\t\treturn result[templateName.(string)], nil\n\t}\n\n\treturn funcMap\n}\n\n\/\/ render takes a map of templates\/values and renders them.\nfunc (e *Engine) render(tpls map[string]renderable) (map[string]string, error) {\n\t\/\/ Basically, what we do here is start with an empty parent template and then\n\t\/\/ build up a list of templates -- one for each file. Once all of the templates\n\t\/\/ have been parsed, we loop through again and execute every template.\n\t\/\/\n\t\/\/ The idea with this process is to make it possible for more complex templates\n\t\/\/ to share common blocks, but to make the entire thing feel like a file-based\n\t\/\/ template engine.\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Printf(\"rendering template failed: %v\\n\", err)\n\t\t}\n\t}()\n\tt := template.New(\"gotpl\")\n\tif e.Strict {\n\t\tt.Option(\"missingkey=error\")\n\t} else {\n\t\t\/\/ Not that zero will attempt to add default values for types it knows,\n\t\t\/\/ but will still emit <no value> for others. We mitigate that later.\n\t\tt.Option(\"missingkey=zero\")\n\t}\n\n\tfuncMap := e.alterFuncMap(t)\n\n\t\/\/ We want to parse the templates in a predictable order. The order favors\n\t\/\/ higher-level (in file system) templates over deeply nested templates.\n\tkeys := sortTemplates(tpls)\n\n\tfiles := []string{}\n\n\tfor _, fname := range keys {\n\t\tr := tpls[fname]\n\t\tt = t.New(fname).Funcs(funcMap)\n\t\tif _, err := t.Parse(r.tpl); err != nil {\n\t\t\treturn map[string]string{}, fmt.Errorf(\"parse error in %q: %s\", fname, err)\n\t\t}\n\t\tfiles = append(files, fname)\n\t}\n\n\t\/\/ Adding the engine's currentTemplates to the template context\n\t\/\/ so they can be referenced in the tpl function\n\tfor fname, r := range e.CurrentTemplates {\n\t\tif t.Lookup(fname) == nil {\n\t\t\tt = t.New(fname).Funcs(funcMap)\n\t\t\tif _, err := t.Parse(r.tpl); err != nil {\n\t\t\t\treturn map[string]string{}, fmt.Errorf(\"parse error in %q: %s\", fname, err)\n\t\t\t}\n\t\t}\n\t}\n\n\trendered := make(map[string]string, len(files))\n\tvar buf bytes.Buffer\n\tfor _, file := range files {\n\t\t\/\/ Don't render partials. We don't care out the direct output of partials.\n\t\t\/\/ They are only included from other templates.\n\t\tif strings.HasPrefix(path.Base(file), \"_\") {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ At render time, add information about the template that is being rendered.\n\t\tvals := tpls[file].vals\n\t\tvals[\"Template\"] = map[string]interface{}{\"Name\": file, \"BasePath\": tpls[file].basePath}\n\t\tif err := t.ExecuteTemplate(&buf, file, vals); err != nil {\n\t\t\treturn map[string]string{}, fmt.Errorf(\"render error in %q: %s\", file, err)\n\t\t}\n\n\t\t\/\/ Work around the issue where Go will emit \"<no value>\" even if Options(missing=zero)\n\t\t\/\/ is set. Since missing=error will never get here, we do not need to handle\n\t\t\/\/ the Strict case.\n\t\trendered[file] = strings.Replace(buf.String(), \"<no value>\", \"\", -1)\n\t\tbuf.Reset()\n\t}\n\n\treturn rendered, nil\n}\n\nfunc sortTemplates(tpls map[string]renderable) []string {\n\tkeys := make([]string, len(tpls))\n\ti := 0\n\tfor key := range tpls {\n\t\tkeys[i] = key\n\t\ti++\n\t}\n\tsort.Sort(sort.Reverse(byPathLen(keys)))\n\treturn keys\n}\n\ntype byPathLen []string\n\nfunc (p byPathLen) Len() int { return len(p) }\nfunc (p byPathLen) Swap(i, j int) { p[j], p[i] = p[i], p[j] }\nfunc (p byPathLen) Less(i, j int) bool {\n\ta, b := p[i], p[j]\n\tca, cb := strings.Count(a, \"\/\"), strings.Count(b, \"\/\")\n\tif ca == cb {\n\t\treturn strings.Compare(a, b) == -1\n\t}\n\treturn ca < cb\n}\n\n\/\/ allTemplates returns all templates for a chart and its dependencies.\n\/\/\n\/\/ As it goes, it also prepares the values in a scope-sensitive manner.\nfunc allTemplates(c *chart.Chart, vals chartutil.Values) map[string]renderable {\n\ttemplates := map[string]renderable{}\n\trecAllTpls(c, templates, vals, true, \"\")\n\treturn templates\n}\n\n\/\/ recAllTpls recurses through the templates in a chart.\n\/\/\n\/\/ As it recurses, it also sets the values to be appropriate for the template\n\/\/ scope.\nfunc recAllTpls(c *chart.Chart, templates map[string]renderable, parentVals chartutil.Values, top bool, parentID string) {\n\t\/\/ This should never evaluate to a nil map. That will cause problems when\n\t\/\/ values are appended later.\n\tcvals := chartutil.Values{}\n\tif top {\n\t\t\/\/ If this is the top of the rendering tree, assume that parentVals\n\t\t\/\/ is already resolved to the authoritative values.\n\t\tcvals = parentVals\n\t} else if c.Metadata != nil && c.Metadata.Name != \"\" {\n\t\t\/\/ If there is a {{.Values.ThisChart}} in the parent metadata,\n\t\t\/\/ copy that into the {{.Values}} for this template.\n\t\tnewVals := chartutil.Values{}\n\t\tif vs, err := parentVals.Table(\"Values\"); err == nil {\n\t\t\tif tmp, err := vs.Table(c.Metadata.Name); err == nil {\n\t\t\t\tnewVals = tmp\n\t\t\t}\n\t\t}\n\n\t\tcvals = map[string]interface{}{\n\t\t\t\"Values\": newVals,\n\t\t\t\"Release\": parentVals[\"Release\"],\n\t\t\t\"Chart\": c.Metadata,\n\t\t\t\"Files\": chartutil.NewFiles(c.Files),\n\t\t\t\"Capabilities\": parentVals[\"Capabilities\"],\n\t\t}\n\t}\n\n\tnewParentID := c.Metadata.Name\n\tif parentID != \"\" {\n\t\t\/\/ We artificially reconstruct the chart path to child templates. This\n\t\t\/\/ creates a namespaced filename that can be used to track down the source\n\t\t\/\/ of a particular template declaration.\n\t\tnewParentID = path.Join(parentID, \"charts\", newParentID)\n\t}\n\n\tfor _, child := range c.Dependencies {\n\t\trecAllTpls(child, templates, cvals, false, newParentID)\n\t}\n\tfor _, t := range c.Templates {\n\t\ttemplates[path.Join(newParentID, t.Name)] = renderable{\n\t\t\ttpl: string(t.Data),\n\t\t\tvals: cvals,\n\t\t\tbasePath: path.Join(newParentID, \"templates\"),\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package jobs\n\nimport (\n\t\"container\/list\"\n\t\"sync\"\n\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\"\n)\n\ntype (\n\t\/\/ memQueue is a queue in-memory implementation of the Queue interface.\n\tmemQueue struct {\n\t\tMaxCapacity int\n\t\tJobs chan Job\n\n\t\tlist *list.List\n\t\trun bool\n\t\tjmu sync.RWMutex\n\t}\n\n\t\/\/ memBroker is an in-memory broker implementation of the Broker interface.\n\tmemBroker struct {\n\t\tqueues map[string]*memQueue\n\t}\n)\n\n\/\/ globalStorage is the global job persistence layer used thoughout the stack.\nvar globalStorage = &couchStorage{couchdb.GlobalJobsDB}\n\n\/\/ newMemQueue creates and a new in-memory queue.\nfunc newMemQueue(workerType string) *memQueue {\n\treturn &memQueue{\n\t\tlist: list.New(),\n\t\tJobs: make(chan Job),\n\t}\n}\n\n\/\/ Enqueue into the queue\nfunc (q *memQueue) Enqueue(job Job) error {\n\tq.jmu.Lock()\n\tdefer q.jmu.Unlock()\n\tq.list.PushBack(job)\n\tif !q.run {\n\t\tq.run = true\n\t\tgo q.send()\n\t}\n\treturn nil\n}\n\nfunc (q *memQueue) send() {\n\tfor {\n\t\tq.jmu.Lock()\n\t\te := q.list.Front()\n\t\tif e == nil {\n\t\t\tq.run = false\n\t\t\tq.jmu.Unlock()\n\t\t\treturn\n\t\t}\n\t\tq.list.Remove(e)\n\t\tq.jmu.Unlock()\n\t\tq.Jobs <- e.Value.(Job)\n\t}\n}\n\n\/\/ Len returns the length of the queue\nfunc (q *memQueue) Len() int {\n\tq.jmu.RLock()\n\tdefer q.jmu.RUnlock()\n\treturn q.list.Len()\n}\n\n\/\/ NewMemBroker creates a new in-memory broker system.\n\/\/\n\/\/ The in-memory implementation of the job system has the specifity that\n\/\/ workers are actually launched by the broker at its creation.\nfunc NewMemBroker(nbWorkers int, ws WorkersList) Broker {\n\tsetNbSlots(nbWorkers)\n\tqueues := make(map[string]*memQueue)\n\tfor workerType, conf := range ws {\n\t\tq := newMemQueue(workerType)\n\t\tqueues[workerType] = q\n\t\tw := &Worker{\n\t\t\tType: workerType,\n\t\t\tConf: conf,\n\t\t}\n\t\tw.Start(q.Jobs)\n\t}\n\treturn &memBroker{queues: queues}\n}\n\n\/\/ PushJob will produce a new Job with the given options and enqueue the job in\n\/\/ the proper queue.\nfunc (b *memBroker) PushJob(req *JobRequest) (*JobInfos, error) {\n\tworkerType := req.WorkerType\n\tq, ok := b.queues[workerType]\n\tif !ok {\n\t\treturn nil, ErrUnknownWorker\n\t}\n\tinfos := NewJobInfos(req)\n\tj := Job{\n\t\tinfos: infos,\n\t\tstorage: globalStorage,\n\t}\n\tif err := globalStorage.Create(infos); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := q.Enqueue(j); err != nil {\n\t\treturn nil, err\n\t}\n\treturn infos, nil\n}\n\n\/\/ QueueLen returns the size of the number of elements in queue of the\n\/\/ specified worker type.\nfunc (b *memBroker) QueueLen(workerType string) (int, error) {\n\tq, ok := b.queues[workerType]\n\tif !ok {\n\t\treturn 0, ErrUnknownWorker\n\t}\n\treturn q.Len(), nil\n}\n\n\/\/ GetJobInfos returns the informations about a job.\nfunc (b *memBroker) GetJobInfos(domain, jobID string) (*JobInfos, error) {\n\treturn globalStorage.Get(domain, jobID)\n}\n\nvar (\n\t_ Broker = &memBroker{}\n)\n<commit_msg>Fix publishing jobs to realtime with single couchdb storage<commit_after>package jobs\n\nimport (\n\t\"container\/list\"\n\t\"sync\"\n\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/realtime\"\n)\n\ntype (\n\t\/\/ memQueue is a queue in-memory implementation of the Queue interface.\n\tmemQueue struct {\n\t\tMaxCapacity int\n\t\tJobs chan Job\n\n\t\tlist *list.List\n\t\trun bool\n\t\tjmu sync.RWMutex\n\t}\n\n\t\/\/ memBroker is an in-memory broker implementation of the Broker interface.\n\tmemBroker struct {\n\t\tqueues map[string]*memQueue\n\t}\n)\n\n\/\/ globalStorage is the global job persistence layer used thoughout the stack.\nvar globalStorage = &couchStorage{couchdb.GlobalJobsDB}\n\n\/\/ newMemQueue creates and a new in-memory queue.\nfunc newMemQueue(workerType string) *memQueue {\n\treturn &memQueue{\n\t\tlist: list.New(),\n\t\tJobs: make(chan Job),\n\t}\n}\n\n\/\/ Enqueue into the queue\nfunc (q *memQueue) Enqueue(job Job) error {\n\tq.jmu.Lock()\n\tdefer q.jmu.Unlock()\n\tq.list.PushBack(job)\n\tif !q.run {\n\t\tq.run = true\n\t\tgo q.send()\n\t}\n\treturn nil\n}\n\nfunc (q *memQueue) send() {\n\tfor {\n\t\tq.jmu.Lock()\n\t\te := q.list.Front()\n\t\tif e == nil {\n\t\t\tq.run = false\n\t\t\tq.jmu.Unlock()\n\t\t\treturn\n\t\t}\n\t\tq.list.Remove(e)\n\t\tq.jmu.Unlock()\n\t\tq.Jobs <- e.Value.(Job)\n\t}\n}\n\n\/\/ Len returns the length of the queue\nfunc (q *memQueue) Len() int {\n\tq.jmu.RLock()\n\tdefer q.jmu.RUnlock()\n\treturn q.list.Len()\n}\n\n\/\/ NewMemBroker creates a new in-memory broker system.\n\/\/\n\/\/ The in-memory implementation of the job system has the specifity that\n\/\/ workers are actually launched by the broker at its creation.\nfunc NewMemBroker(nbWorkers int, ws WorkersList) Broker {\n\tsetNbSlots(nbWorkers)\n\tqueues := make(map[string]*memQueue)\n\tfor workerType, conf := range ws {\n\t\tq := newMemQueue(workerType)\n\t\tqueues[workerType] = q\n\t\tw := &Worker{\n\t\t\tType: workerType,\n\t\t\tConf: conf,\n\t\t}\n\t\tw.Start(q.Jobs)\n\t}\n\treturn &memBroker{queues: queues}\n}\n\n\/\/ PushJob will produce a new Job with the given options and enqueue the job in\n\/\/ the proper queue.\nfunc (b *memBroker) PushJob(req *JobRequest) (*JobInfos, error) {\n\tworkerType := req.WorkerType\n\tq, ok := b.queues[workerType]\n\tif !ok {\n\t\treturn nil, ErrUnknownWorker\n\t}\n\tinfos := NewJobInfos(req)\n\tj := Job{\n\t\tinfos: infos,\n\t\tstorage: globalStorage,\n\t}\n\tif err := globalStorage.Create(infos); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := q.Enqueue(j); err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Writing in couchdb should be enough to publish this event,\n\t\/\/ but it is not published on right domain, so we publish it again.\n\trealtime.GetHub().Publish(&realtime.Event{\n\t\tVerb: realtime.EventCreate,\n\t\tDoc: infos.Clone(),\n\t\tDomain: infos.Domain,\n\t})\n\treturn infos, nil\n}\n\n\/\/ QueueLen returns the size of the number of elements in queue of the\n\/\/ specified worker type.\nfunc (b *memBroker) QueueLen(workerType string) (int, error) {\n\tq, ok := b.queues[workerType]\n\tif !ok {\n\t\treturn 0, ErrUnknownWorker\n\t}\n\treturn q.Len(), nil\n}\n\n\/\/ GetJobInfos returns the informations about a job.\nfunc (b *memBroker) GetJobInfos(domain, jobID string) (*JobInfos, error) {\n\treturn globalStorage.Get(domain, jobID)\n}\n\nvar (\n\t_ Broker = &memBroker{}\n)\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package syserr contains sandbox-internal errors. These errors are distinct\n\/\/ from both the errors returned by host system calls and the errors returned\n\/\/ to sandboxed applications.\npackage syserr\n\nimport (\n\t\"fmt\"\n\t\"syscall\"\n\n\t\"gvisor.googlesource.com\/gvisor\/pkg\/abi\/linux\"\n\t\"gvisor.googlesource.com\/gvisor\/pkg\/syserror\"\n)\n\n\/\/ Error represents an internal error.\ntype Error struct {\n\tstring\n}\n\n\/\/ New creates a new Error and adds a translation for it.\nfunc New(message string, linuxTranslation *linux.Errno) *Error {\n\terr := &Error{message}\n\tlinuxABITranslations[err] = linuxTranslation\n\n\t\/\/ TODO: Remove this.\n\tif linuxTranslation == nil {\n\t\tlinuxBackwardsTranslations[err] = nil\n\t} else {\n\t\te := error(syscall.Errno(linuxTranslation.Number()))\n\t\t\/\/ syserror.ErrWouldBlock gets translated to syserror.EWOULDBLOCK and\n\t\t\/\/ enables proper blocking semantics. This should temporary address the\n\t\t\/\/ class of blocking bugs that keep popping up with the current state of\n\t\t\/\/ the error space.\n\t\tif e == syserror.EWOULDBLOCK {\n\t\t\te = syserror.ErrWouldBlock\n\t\t}\n\t\tlinuxBackwardsTranslations[err] = e\n\t}\n\n\treturn err\n}\n\n\/\/ NewWithoutTranslation creates a new Error. If translation is attempted on\n\/\/ the error, translation will fail.\nfunc NewWithoutTranslation(message string) *Error {\n\treturn &Error{message}\n}\n\n\/\/ String implements fmt.Stringer.String.\nfunc (e *Error) String() string {\n\tif e == nil {\n\t\treturn \"<nil>\"\n\t}\n\treturn e.string\n}\n\n\/\/ TODO: Remove this.\nvar linuxBackwardsTranslations = map[*Error]error{}\n\n\/\/ ToError translates an Error to a corresponding error value.\n\/\/\n\/\/ TODO: Remove this.\nfunc (e *Error) ToError() error {\n\tif e == nil {\n\t\treturn nil\n\t}\n\terr, ok := linuxBackwardsTranslations[e]\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"unknown error: %q\", e.string))\n\t}\n\treturn err\n}\n\n\/\/ TODO: Remove or replace most of these errors.\n\/\/\n\/\/ Some of the errors should be replaced with package specific errors and\n\/\/ others should be removed entirely.\nvar (\n\tErrNotPermitted = New(\"operation not permitted\", linux.EPERM)\n\tErrNoFileOrDir = New(\"no such file or directory\", linux.ENOENT)\n\tErrNoProcess = New(\"no such process\", linux.ESRCH)\n\tErrInterrupted = New(\"interrupted system call\", linux.EINTR)\n\tErrIO = New(\"I\/O error\", linux.EIO)\n\tErrDeviceOrAddress = New(\"no such device or address\", linux.ENXIO)\n\tErrTooManyArgs = New(\"argument list too long\", linux.E2BIG)\n\tErrEcec = New(\"exec format error\", linux.ENOEXEC)\n\tErrBadFD = New(\"bad file number\", linux.EBADF)\n\tErrNoChild = New(\"no child processes\", linux.ECHILD)\n\tErrTryAgain = New(\"try again\", linux.EAGAIN)\n\tErrNoMemory = New(\"out of memory\", linux.ENOMEM)\n\tErrPermissionDenied = New(\"permission denied\", linux.EACCES)\n\tErrBadAddress = New(\"bad address\", linux.EFAULT)\n\tErrNotBlockDevice = New(\"block device required\", linux.ENOTBLK)\n\tErrBusy = New(\"device or resource busy\", linux.EBUSY)\n\tErrExists = New(\"file exists\", linux.EEXIST)\n\tErrCrossDeviceLink = New(\"cross-device link\", linux.EXDEV)\n\tErrNoDevice = New(\"no such device\", linux.ENODEV)\n\tErrNotDir = New(\"not a directory\", linux.ENOTDIR)\n\tErrIsDir = New(\"is a directory\", linux.EISDIR)\n\tErrInvalidArgument = New(\"invalid argument\", linux.EINVAL)\n\tErrFileTableOverflow = New(\"file table overflow\", linux.ENFILE)\n\tErrTooManyOpenFiles = New(\"too many open files\", linux.EMFILE)\n\tErrNotTTY = New(\"not a typewriter\", linux.ENOTTY)\n\tErrTestFileBusy = New(\"text file busy\", linux.ETXTBSY)\n\tErrFileTooBig = New(\"file too large\", linux.EFBIG)\n\tErrNoSpace = New(\"no space left on device\", linux.ENOSPC)\n\tErrIllegalSeek = New(\"illegal seek\", linux.ESPIPE)\n\tErrReadOnlyFS = New(\"read-only file system\", linux.EROFS)\n\tErrTooManyLinks = New(\"too many links\", linux.EMLINK)\n\tErrBrokenPipe = New(\"broken pipe\", linux.EPIPE)\n\tErrDomain = New(\"math argument out of domain of func\", linux.EDOM)\n\tErrRange = New(\"math result not representable\", linux.ERANGE)\n\tErrDeadlock = New(\"resource deadlock would occur\", linux.EDEADLOCK)\n\tErrNameTooLong = New(\"file name too long\", linux.ENAMETOOLONG)\n\tErrNoLocksAvailable = New(\"no record locks available\", linux.ENOLCK)\n\tErrInvalidSyscall = New(\"invalid system call number\", linux.ENOSYS)\n\tErrDirNotEmpty = New(\"directory not empty\", linux.ENOTEMPTY)\n\tErrLinkLoop = New(\"too many symbolic links encountered\", linux.ELOOP)\n\tErrWouldBlock = New(\"operation would block\", linux.EWOULDBLOCK)\n\tErrNoMessage = New(\"no message of desired type\", linux.ENOMSG)\n\tErrIdentifierRemoved = New(\"identifier removed\", linux.EIDRM)\n\tErrChannelOutOfRange = New(\"channel number out of range\", linux.ECHRNG)\n\tErrLevelTwoNotSynced = New(\"level 2 not synchronized\", linux.EL2NSYNC)\n\tErrLevelThreeHalted = New(\"level 3 halted\", linux.EL3HLT)\n\tErrLevelThreeReset = New(\"level 3 reset\", linux.EL3RST)\n\tErrLinkNumberOutOfRange = New(\"link number out of range\", linux.ELNRNG)\n\tErrProtocolDriverNotAttached = New(\"protocol driver not attached\", linux.EUNATCH)\n\tErrNoCSIAvailable = New(\"no CSI structure available\", linux.ENOCSI)\n\tErrLevelTwoHalted = New(\"level 2 halted\", linux.EL2HLT)\n\tErrInvalidExchange = New(\"invalid exchange\", linux.EBADE)\n\tErrInvalidRequestDescriptor = New(\"invalid request descriptor\", linux.EBADR)\n\tErrExchangeFull = New(\"exchange full\", linux.EXFULL)\n\tErrNoAnode = New(\"no anode\", linux.ENOANO)\n\tErrInvalidRequestCode = New(\"invalid request code\", linux.EBADRQC)\n\tErrInvalidSlot = New(\"invalid slot\", linux.EBADSLT)\n\tErrBadFontFile = New(\"bad font file format\", linux.EBFONT)\n\tErrNotStream = New(\"device not a stream\", linux.ENOSTR)\n\tErrNoDataAvailable = New(\"no data available\", linux.ENODATA)\n\tErrTimerExpired = New(\"timer expired\", linux.ETIME)\n\tErrStreamsResourceDepleted = New(\"out of streams resources\", linux.ENOSR)\n\tErrMachineNotOnNetwork = New(\"machine is not on the network\", linux.ENONET)\n\tErrPackageNotInstalled = New(\"package not installed\", linux.ENOPKG)\n\tErrIsRemote = New(\"object is remote\", linux.EREMOTE)\n\tErrNoLink = New(\"link has been severed\", linux.ENOLINK)\n\tErrAdvertise = New(\"advertise error\", linux.EADV)\n\tErrSRMount = New(\"srmount error\", linux.ESRMNT)\n\tErrSendCommunication = New(\"communication error on send\", linux.ECOMM)\n\tErrProtocol = New(\"protocol error\", linux.EPROTO)\n\tErrMultihopAttempted = New(\"multihop attempted\", linux.EMULTIHOP)\n\tErrRFS = New(\"RFS specific error\", linux.EDOTDOT)\n\tErrInvalidDataMessage = New(\"not a data message\", linux.EBADMSG)\n\tErrOverflow = New(\"value too large for defined data type\", linux.EOVERFLOW)\n\tErrNetworkNameNotUnique = New(\"name not unique on network\", linux.ENOTUNIQ)\n\tErrFDInBadState = New(\"file descriptor in bad state\", linux.EBADFD)\n\tErrRemoteAddressChanged = New(\"remote address changed\", linux.EREMCHG)\n\tErrSharedLibraryInaccessible = New(\"can not access a needed shared library\", linux.ELIBACC)\n\tErrCorruptedSharedLibrary = New(\"accessing a corrupted shared library\", linux.ELIBBAD)\n\tErrLibSectionCorrupted = New(\".lib section in a.out corrupted\", linux.ELIBSCN)\n\tErrTooManySharedLibraries = New(\"attempting to link in too many shared libraries\", linux.ELIBMAX)\n\tErrSharedLibraryExeced = New(\"cannot exec a shared library directly\", linux.ELIBEXEC)\n\tErrIllegalByteSequence = New(\"illegal byte sequence\", linux.EILSEQ)\n\tErrShouldRestart = New(\"interrupted system call should be restarted\", linux.ERESTART)\n\tErrStreamPipe = New(\"streams pipe error\", linux.ESTRPIPE)\n\tErrTooManyUsers = New(\"too many users\", linux.EUSERS)\n\tErrNotASocket = New(\"socket operation on non-socket\", linux.ENOTSOCK)\n\tErrDestinationAddressRequired = New(\"destination address required\", linux.EDESTADDRREQ)\n\tErrMessageTooLong = New(\"message too long\", linux.EMSGSIZE)\n\tErrWrongProtocolForSocket = New(\"protocol wrong type for socket\", linux.EPROTOTYPE)\n\tErrProtocolNotAvailable = New(\"protocol not available\", linux.ENOPROTOOPT)\n\tErrProtocolNotSupported = New(\"protocol not supported\", linux.EPROTONOSUPPORT)\n\tErrSocketNotSupported = New(\"socket type not supported\", linux.ESOCKTNOSUPPORT)\n\tErrEndpointOperation = New(\"operation not supported on transport endpoint\", linux.EOPNOTSUPP)\n\tErrProtocolFamilyNotSupported = New(\"protocol family not supported\", linux.EPFNOSUPPORT)\n\tErrAddressFamilyNotSupported = New(\"address family not supported by protocol\", linux.EAFNOSUPPORT)\n\tErrAddressInUse = New(\"address already in use\", linux.EADDRINUSE)\n\tErrAddressNotAvailable = New(\"cannot assign requested address\", linux.EADDRNOTAVAIL)\n\tErrNetworkDown = New(\"network is down\", linux.ENETDOWN)\n\tErrNetworkUnreachable = New(\"network is unreachable\", linux.ENETUNREACH)\n\tErrNetworkReset = New(\"network dropped connection because of reset\", linux.ENETRESET)\n\tErrConnectionAborted = New(\"software caused connection abort\", linux.ECONNABORTED)\n\tErrConnectionReset = New(\"connection reset by peer\", linux.ECONNRESET)\n\tErrNoBufferSpace = New(\"no buffer space available\", linux.ENOBUFS)\n\tErrAlreadyConnected = New(\"transport endpoint is already connected\", linux.EISCONN)\n\tErrNotConnected = New(\"transport endpoint is not connected\", linux.ENOTCONN)\n\tErrShutdown = New(\"cannot send after transport endpoint shutdown\", linux.ESHUTDOWN)\n\tErrTooManyRefs = New(\"too many references: cannot splice\", linux.ETOOMANYREFS)\n\tErrTimedOut = New(\"connection timed out\", linux.ETIMEDOUT)\n\tErrConnectionRefused = New(\"connection refused\", linux.ECONNREFUSED)\n\tErrHostDown = New(\"host is down\", linux.EHOSTDOWN)\n\tErrNoRoute = New(\"no route to host\", linux.EHOSTUNREACH)\n\tErrAlreadyInProgress = New(\"operation already in progress\", linux.EALREADY)\n\tErrInProgress = New(\"operation now in progress\", linux.EINPROGRESS)\n\tErrStaleFileHandle = New(\"stale file handle\", linux.ESTALE)\n\tErrStructureNeedsCleaning = New(\"structure needs cleaning\", linux.EUCLEAN)\n\tErrIsNamedFile = New(\"is a named type file\", linux.ENOTNAM)\n\tErrRemoteIO = New(\"remote I\/O error\", linux.EREMOTEIO)\n\tErrQuotaExceeded = New(\"quota exceeded\", linux.EDQUOT)\n\tErrNoMedium = New(\"no medium found\", linux.ENOMEDIUM)\n\tErrWrongMediumType = New(\"wrong medium type\", linux.EMEDIUMTYPE)\n\tErrCanceled = New(\"operation Canceled\", linux.ECANCELED)\n\tErrNoKey = New(\"required key not available\", linux.ENOKEY)\n\tErrKeyExpired = New(\"key has expired\", linux.EKEYEXPIRED)\n\tErrKeyRevoked = New(\"key has been revoked\", linux.EKEYREVOKED)\n\tErrKeyRejected = New(\"key was rejected by service\", linux.EKEYREJECTED)\n\tErrOwnerDied = New(\"owner died\", linux.EOWNERDEAD)\n\tErrNotRecoverable = New(\"state not recoverable\", linux.ENOTRECOVERABLE)\n)\n\n\/\/ FromError converts a generic error to an *Error.\n\/\/\n\/\/ TODO: Remove this function.\nfunc FromError(err error) *Error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\tif errno, ok := err.(syscall.Errno); ok {\n\t\treturn FromHost(errno)\n\t}\n\tif errno, ok := syserror.TranslateError(err); ok {\n\t\treturn FromHost(errno)\n\t}\n\tpanic(\"unknown error: \" + err.Error())\n}\n<commit_msg>Clarify that syserr.New must only be called during init<commit_after>\/\/ Copyright 2018 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package syserr contains sandbox-internal errors. These errors are distinct\n\/\/ from both the errors returned by host system calls and the errors returned\n\/\/ to sandboxed applications.\npackage syserr\n\nimport (\n\t\"fmt\"\n\t\"syscall\"\n\n\t\"gvisor.googlesource.com\/gvisor\/pkg\/abi\/linux\"\n\t\"gvisor.googlesource.com\/gvisor\/pkg\/syserror\"\n)\n\n\/\/ Error represents an internal error.\ntype Error struct {\n\tstring\n}\n\n\/\/ New creates a new Error and adds a translation for it.\n\/\/\n\/\/ New must only be called at init.\nfunc New(message string, linuxTranslation *linux.Errno) *Error {\n\terr := &Error{message}\n\tlinuxABITranslations[err] = linuxTranslation\n\n\t\/\/ TODO: Remove this.\n\tif linuxTranslation == nil {\n\t\tlinuxBackwardsTranslations[err] = nil\n\t} else {\n\t\te := error(syscall.Errno(linuxTranslation.Number()))\n\t\t\/\/ syserror.ErrWouldBlock gets translated to syserror.EWOULDBLOCK and\n\t\t\/\/ enables proper blocking semantics. This should temporary address the\n\t\t\/\/ class of blocking bugs that keep popping up with the current state of\n\t\t\/\/ the error space.\n\t\tif e == syserror.EWOULDBLOCK {\n\t\t\te = syserror.ErrWouldBlock\n\t\t}\n\t\tlinuxBackwardsTranslations[err] = e\n\t}\n\n\treturn err\n}\n\n\/\/ NewWithoutTranslation creates a new Error. If translation is attempted on\n\/\/ the error, translation will fail.\nfunc NewWithoutTranslation(message string) *Error {\n\treturn &Error{message}\n}\n\n\/\/ String implements fmt.Stringer.String.\nfunc (e *Error) String() string {\n\tif e == nil {\n\t\treturn \"<nil>\"\n\t}\n\treturn e.string\n}\n\n\/\/ TODO: Remove this.\nvar linuxBackwardsTranslations = map[*Error]error{}\n\n\/\/ ToError translates an Error to a corresponding error value.\n\/\/\n\/\/ TODO: Remove this.\nfunc (e *Error) ToError() error {\n\tif e == nil {\n\t\treturn nil\n\t}\n\terr, ok := linuxBackwardsTranslations[e]\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"unknown error: %q\", e.string))\n\t}\n\treturn err\n}\n\n\/\/ TODO: Remove or replace most of these errors.\n\/\/\n\/\/ Some of the errors should be replaced with package specific errors and\n\/\/ others should be removed entirely.\nvar (\n\tErrNotPermitted = New(\"operation not permitted\", linux.EPERM)\n\tErrNoFileOrDir = New(\"no such file or directory\", linux.ENOENT)\n\tErrNoProcess = New(\"no such process\", linux.ESRCH)\n\tErrInterrupted = New(\"interrupted system call\", linux.EINTR)\n\tErrIO = New(\"I\/O error\", linux.EIO)\n\tErrDeviceOrAddress = New(\"no such device or address\", linux.ENXIO)\n\tErrTooManyArgs = New(\"argument list too long\", linux.E2BIG)\n\tErrEcec = New(\"exec format error\", linux.ENOEXEC)\n\tErrBadFD = New(\"bad file number\", linux.EBADF)\n\tErrNoChild = New(\"no child processes\", linux.ECHILD)\n\tErrTryAgain = New(\"try again\", linux.EAGAIN)\n\tErrNoMemory = New(\"out of memory\", linux.ENOMEM)\n\tErrPermissionDenied = New(\"permission denied\", linux.EACCES)\n\tErrBadAddress = New(\"bad address\", linux.EFAULT)\n\tErrNotBlockDevice = New(\"block device required\", linux.ENOTBLK)\n\tErrBusy = New(\"device or resource busy\", linux.EBUSY)\n\tErrExists = New(\"file exists\", linux.EEXIST)\n\tErrCrossDeviceLink = New(\"cross-device link\", linux.EXDEV)\n\tErrNoDevice = New(\"no such device\", linux.ENODEV)\n\tErrNotDir = New(\"not a directory\", linux.ENOTDIR)\n\tErrIsDir = New(\"is a directory\", linux.EISDIR)\n\tErrInvalidArgument = New(\"invalid argument\", linux.EINVAL)\n\tErrFileTableOverflow = New(\"file table overflow\", linux.ENFILE)\n\tErrTooManyOpenFiles = New(\"too many open files\", linux.EMFILE)\n\tErrNotTTY = New(\"not a typewriter\", linux.ENOTTY)\n\tErrTestFileBusy = New(\"text file busy\", linux.ETXTBSY)\n\tErrFileTooBig = New(\"file too large\", linux.EFBIG)\n\tErrNoSpace = New(\"no space left on device\", linux.ENOSPC)\n\tErrIllegalSeek = New(\"illegal seek\", linux.ESPIPE)\n\tErrReadOnlyFS = New(\"read-only file system\", linux.EROFS)\n\tErrTooManyLinks = New(\"too many links\", linux.EMLINK)\n\tErrBrokenPipe = New(\"broken pipe\", linux.EPIPE)\n\tErrDomain = New(\"math argument out of domain of func\", linux.EDOM)\n\tErrRange = New(\"math result not representable\", linux.ERANGE)\n\tErrDeadlock = New(\"resource deadlock would occur\", linux.EDEADLOCK)\n\tErrNameTooLong = New(\"file name too long\", linux.ENAMETOOLONG)\n\tErrNoLocksAvailable = New(\"no record locks available\", linux.ENOLCK)\n\tErrInvalidSyscall = New(\"invalid system call number\", linux.ENOSYS)\n\tErrDirNotEmpty = New(\"directory not empty\", linux.ENOTEMPTY)\n\tErrLinkLoop = New(\"too many symbolic links encountered\", linux.ELOOP)\n\tErrWouldBlock = New(\"operation would block\", linux.EWOULDBLOCK)\n\tErrNoMessage = New(\"no message of desired type\", linux.ENOMSG)\n\tErrIdentifierRemoved = New(\"identifier removed\", linux.EIDRM)\n\tErrChannelOutOfRange = New(\"channel number out of range\", linux.ECHRNG)\n\tErrLevelTwoNotSynced = New(\"level 2 not synchronized\", linux.EL2NSYNC)\n\tErrLevelThreeHalted = New(\"level 3 halted\", linux.EL3HLT)\n\tErrLevelThreeReset = New(\"level 3 reset\", linux.EL3RST)\n\tErrLinkNumberOutOfRange = New(\"link number out of range\", linux.ELNRNG)\n\tErrProtocolDriverNotAttached = New(\"protocol driver not attached\", linux.EUNATCH)\n\tErrNoCSIAvailable = New(\"no CSI structure available\", linux.ENOCSI)\n\tErrLevelTwoHalted = New(\"level 2 halted\", linux.EL2HLT)\n\tErrInvalidExchange = New(\"invalid exchange\", linux.EBADE)\n\tErrInvalidRequestDescriptor = New(\"invalid request descriptor\", linux.EBADR)\n\tErrExchangeFull = New(\"exchange full\", linux.EXFULL)\n\tErrNoAnode = New(\"no anode\", linux.ENOANO)\n\tErrInvalidRequestCode = New(\"invalid request code\", linux.EBADRQC)\n\tErrInvalidSlot = New(\"invalid slot\", linux.EBADSLT)\n\tErrBadFontFile = New(\"bad font file format\", linux.EBFONT)\n\tErrNotStream = New(\"device not a stream\", linux.ENOSTR)\n\tErrNoDataAvailable = New(\"no data available\", linux.ENODATA)\n\tErrTimerExpired = New(\"timer expired\", linux.ETIME)\n\tErrStreamsResourceDepleted = New(\"out of streams resources\", linux.ENOSR)\n\tErrMachineNotOnNetwork = New(\"machine is not on the network\", linux.ENONET)\n\tErrPackageNotInstalled = New(\"package not installed\", linux.ENOPKG)\n\tErrIsRemote = New(\"object is remote\", linux.EREMOTE)\n\tErrNoLink = New(\"link has been severed\", linux.ENOLINK)\n\tErrAdvertise = New(\"advertise error\", linux.EADV)\n\tErrSRMount = New(\"srmount error\", linux.ESRMNT)\n\tErrSendCommunication = New(\"communication error on send\", linux.ECOMM)\n\tErrProtocol = New(\"protocol error\", linux.EPROTO)\n\tErrMultihopAttempted = New(\"multihop attempted\", linux.EMULTIHOP)\n\tErrRFS = New(\"RFS specific error\", linux.EDOTDOT)\n\tErrInvalidDataMessage = New(\"not a data message\", linux.EBADMSG)\n\tErrOverflow = New(\"value too large for defined data type\", linux.EOVERFLOW)\n\tErrNetworkNameNotUnique = New(\"name not unique on network\", linux.ENOTUNIQ)\n\tErrFDInBadState = New(\"file descriptor in bad state\", linux.EBADFD)\n\tErrRemoteAddressChanged = New(\"remote address changed\", linux.EREMCHG)\n\tErrSharedLibraryInaccessible = New(\"can not access a needed shared library\", linux.ELIBACC)\n\tErrCorruptedSharedLibrary = New(\"accessing a corrupted shared library\", linux.ELIBBAD)\n\tErrLibSectionCorrupted = New(\".lib section in a.out corrupted\", linux.ELIBSCN)\n\tErrTooManySharedLibraries = New(\"attempting to link in too many shared libraries\", linux.ELIBMAX)\n\tErrSharedLibraryExeced = New(\"cannot exec a shared library directly\", linux.ELIBEXEC)\n\tErrIllegalByteSequence = New(\"illegal byte sequence\", linux.EILSEQ)\n\tErrShouldRestart = New(\"interrupted system call should be restarted\", linux.ERESTART)\n\tErrStreamPipe = New(\"streams pipe error\", linux.ESTRPIPE)\n\tErrTooManyUsers = New(\"too many users\", linux.EUSERS)\n\tErrNotASocket = New(\"socket operation on non-socket\", linux.ENOTSOCK)\n\tErrDestinationAddressRequired = New(\"destination address required\", linux.EDESTADDRREQ)\n\tErrMessageTooLong = New(\"message too long\", linux.EMSGSIZE)\n\tErrWrongProtocolForSocket = New(\"protocol wrong type for socket\", linux.EPROTOTYPE)\n\tErrProtocolNotAvailable = New(\"protocol not available\", linux.ENOPROTOOPT)\n\tErrProtocolNotSupported = New(\"protocol not supported\", linux.EPROTONOSUPPORT)\n\tErrSocketNotSupported = New(\"socket type not supported\", linux.ESOCKTNOSUPPORT)\n\tErrEndpointOperation = New(\"operation not supported on transport endpoint\", linux.EOPNOTSUPP)\n\tErrProtocolFamilyNotSupported = New(\"protocol family not supported\", linux.EPFNOSUPPORT)\n\tErrAddressFamilyNotSupported = New(\"address family not supported by protocol\", linux.EAFNOSUPPORT)\n\tErrAddressInUse = New(\"address already in use\", linux.EADDRINUSE)\n\tErrAddressNotAvailable = New(\"cannot assign requested address\", linux.EADDRNOTAVAIL)\n\tErrNetworkDown = New(\"network is down\", linux.ENETDOWN)\n\tErrNetworkUnreachable = New(\"network is unreachable\", linux.ENETUNREACH)\n\tErrNetworkReset = New(\"network dropped connection because of reset\", linux.ENETRESET)\n\tErrConnectionAborted = New(\"software caused connection abort\", linux.ECONNABORTED)\n\tErrConnectionReset = New(\"connection reset by peer\", linux.ECONNRESET)\n\tErrNoBufferSpace = New(\"no buffer space available\", linux.ENOBUFS)\n\tErrAlreadyConnected = New(\"transport endpoint is already connected\", linux.EISCONN)\n\tErrNotConnected = New(\"transport endpoint is not connected\", linux.ENOTCONN)\n\tErrShutdown = New(\"cannot send after transport endpoint shutdown\", linux.ESHUTDOWN)\n\tErrTooManyRefs = New(\"too many references: cannot splice\", linux.ETOOMANYREFS)\n\tErrTimedOut = New(\"connection timed out\", linux.ETIMEDOUT)\n\tErrConnectionRefused = New(\"connection refused\", linux.ECONNREFUSED)\n\tErrHostDown = New(\"host is down\", linux.EHOSTDOWN)\n\tErrNoRoute = New(\"no route to host\", linux.EHOSTUNREACH)\n\tErrAlreadyInProgress = New(\"operation already in progress\", linux.EALREADY)\n\tErrInProgress = New(\"operation now in progress\", linux.EINPROGRESS)\n\tErrStaleFileHandle = New(\"stale file handle\", linux.ESTALE)\n\tErrStructureNeedsCleaning = New(\"structure needs cleaning\", linux.EUCLEAN)\n\tErrIsNamedFile = New(\"is a named type file\", linux.ENOTNAM)\n\tErrRemoteIO = New(\"remote I\/O error\", linux.EREMOTEIO)\n\tErrQuotaExceeded = New(\"quota exceeded\", linux.EDQUOT)\n\tErrNoMedium = New(\"no medium found\", linux.ENOMEDIUM)\n\tErrWrongMediumType = New(\"wrong medium type\", linux.EMEDIUMTYPE)\n\tErrCanceled = New(\"operation Canceled\", linux.ECANCELED)\n\tErrNoKey = New(\"required key not available\", linux.ENOKEY)\n\tErrKeyExpired = New(\"key has expired\", linux.EKEYEXPIRED)\n\tErrKeyRevoked = New(\"key has been revoked\", linux.EKEYREVOKED)\n\tErrKeyRejected = New(\"key was rejected by service\", linux.EKEYREJECTED)\n\tErrOwnerDied = New(\"owner died\", linux.EOWNERDEAD)\n\tErrNotRecoverable = New(\"state not recoverable\", linux.ENOTRECOVERABLE)\n)\n\n\/\/ FromError converts a generic error to an *Error.\n\/\/\n\/\/ TODO: Remove this function.\nfunc FromError(err error) *Error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\tif errno, ok := err.(syscall.Errno); ok {\n\t\treturn FromHost(errno)\n\t}\n\tif errno, ok := syserror.TranslateError(err); ok {\n\t\treturn FromHost(errno)\n\t}\n\tpanic(\"unknown error: \" + err.Error())\n}\n<|endoftext|>"} {"text":"<commit_before>package update\n\nimport (\n\t\"crypto\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\n\t\"time\"\n\t\"unicode\/utf8\"\n\n\t\"math\"\n\n\t\"github.com\/blang\/semver\"\n\t\"github.com\/containerum\/chkit\/pkg\/chkitErrors\"\n\t\"github.com\/inconshreveable\/go-update\"\n\t\"github.com\/vbauerster\/mpb\"\n\t\"github.com\/vbauerster\/mpb\/decor\"\n)\n\nvar PublicKeyB64 = \"cHVibGljIGtleQo=\"\n\nconst (\n\tErrUpdateApply = chkitErrors.Err(\"update apply failed\")\n)\n\nfunc verifiedUpdate(upd *Package) error {\n\tchecksum, err := ioutil.ReadAll(upd.Hash)\n\tif err != nil {\n\t\treturn chkitErrors.Wrap(ErrUpdateApply, err)\n\t}\n\n\tsignature, err := ioutil.ReadAll(upd.Signature)\n\tif err != nil {\n\t\treturn chkitErrors.Wrap(ErrUpdateApply, err)\n\t}\n\n\topts := update.Options{\n\t\tChecksum: checksum,\n\t\tSignature: signature,\n\t\tVerifier: update.NewECDSAVerifier(),\n\t\tHash: crypto.SHA256,\n\t}\n\tpublicKey, err := base64.StdEncoding.DecodeString(PublicKeyB64)\n\tif err != nil {\n\t\treturn chkitErrors.Wrap(ErrUpdateApply, err)\n\t}\n\n\terr = opts.SetPublicKeyPEM(publicKey)\n\tif err != nil {\n\t\treturn chkitErrors.Wrap(ErrUpdateApply, err)\n\t}\n\n\terr = update.Apply(upd.Binary, opts)\n\tif err != nil {\n\t\treturn chkitErrors.Wrap(ErrUpdateApply, err)\n\t}\n\n\treturn nil\n}\n\nfunc speedDecorator(minWidth int, conf byte) decor.DecoratorFunc {\n\tformat := \"%%\"\n\tif (conf & decor.DidentRight) != 0 {\n\t\tformat += \"-\"\n\t}\n\tformat += \"%ds\"\n\tvar (\n\t\tprevCount int64\n\t\tprevDuration time.Duration\n\t\tintegrSpeed float64\n\t\tmeasurementsCount float64\n\t)\n\treturn func(s *decor.Statistics, widthAccumulator chan<- int, widthDistributor <-chan int) string {\n\t\tvar str string\n\t\tmeasurementsCount++\n\t\tcurSpeed := float64(s.Current-prevCount) \/ (s.TimeElapsed - prevDuration).Seconds() \/\/ bytes per second\n\t\tif !math.IsNaN(curSpeed) && !math.IsInf(curSpeed, 1) && !math.IsInf(curSpeed, -1) {\n\t\t\tintegrSpeed += curSpeed\n\t\t}\n\t\tspeedToShow := integrSpeed \/ measurementsCount\n\t\tprevCount = s.Current\n\t\tprevDuration = s.TimeElapsed\n\t\tswitch {\n\t\tcase speedToShow < 1<<10:\n\t\t\tstr = fmt.Sprintf(\"%.1f B\/s\", speedToShow)\n\t\tcase speedToShow >= 1<<10 && speedToShow < 1<<20:\n\t\t\tstr = fmt.Sprintf(\"%.1f KiB\/s\", speedToShow\/(1<<10))\n\t\tcase speedToShow >= 1<<20 && speedToShow < 1<<30:\n\t\t\tstr = fmt.Sprintf(\"%.1f MiB\/s\", speedToShow\/(1<<20))\n\t\tdefault:\n\t\t\tstr = fmt.Sprintf(\"%.1f GiB\/s\", speedToShow\/(1<<30))\n\t\t}\n\t\tif (conf & decor.DwidthSync) != 0 {\n\t\t\twidthAccumulator <- utf8.RuneCountInString(str)\n\t\t\tmax := <-widthDistributor\n\t\t\tif (conf & decor.DextraSpace) != 0 {\n\t\t\t\tmax++\n\t\t\t}\n\t\t\treturn fmt.Sprintf(fmt.Sprintf(format, max), str)\n\t\t}\n\t\treturn fmt.Sprintf(fmt.Sprintf(format, minWidth), str)\n\t}\n}\n\nfunc Update(currentVersion semver.Version, downloader LatestCheckerDownloader, restartAfter bool) error {\n\tlatestVersion, err := downloader.LatestVersion()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif latestVersion.LE(currentVersion) {\n\t\tfmt.Println(\"You already using latest version. Update not needed.\")\n\t\treturn nil\n\t}\n\n\tarchive, size, err := downloader.Download(latestVersion)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer archive.Close()\n\n\tp := mpb.New()\n\tbar := p.AddBar(size, mpb.PrependDecorators(\n\t\tdecor.CountersKiloByte(\"%.1f \/ %.1f\", 3, decor.DextraSpace),\n\t\tdecor.StaticName(\" (\", 1, 0),\n\t\tdecor.Percentage(3, 0),\n\t\tdecor.StaticName(\")\", 1, 0),\n\t), mpb.AppendDecorators(\n\t\tspeedDecorator(3, decor.DextraSpace),\n\t\tdecor.StaticName(\" ETA:\", 3, decor.DextraSpace),\n\t\tdecor.ETA(3, decor.DextraSpace),\n\t))\n\tarchive = bar.ProxyReader(archive)\n\n\tpkg, err := unpack(archive)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.Wait()\n\n\terr = verifiedUpdate(pkg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"Updated to version %s\\n\", latestVersion)\n\n\tif restartAfter {\n\t\tgracefulRestart()\n\t}\n\n\treturn nil\n}\n<commit_msg>Print message about new version.<commit_after>package update\n\nimport (\n\t\"crypto\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\n\t\"time\"\n\t\"unicode\/utf8\"\n\n\t\"math\"\n\n\t\"github.com\/blang\/semver\"\n\t\"github.com\/containerum\/chkit\/pkg\/chkitErrors\"\n\t\"github.com\/inconshreveable\/go-update\"\n\t\"github.com\/vbauerster\/mpb\"\n\t\"github.com\/vbauerster\/mpb\/decor\"\n)\n\nvar PublicKeyB64 = \"cHVibGljIGtleQo=\"\n\nconst (\n\tErrUpdateApply = chkitErrors.Err(\"update apply failed\")\n)\n\nfunc verifiedUpdate(upd *Package) error {\n\tchecksum, err := ioutil.ReadAll(upd.Hash)\n\tif err != nil {\n\t\treturn chkitErrors.Wrap(ErrUpdateApply, err)\n\t}\n\n\tsignature, err := ioutil.ReadAll(upd.Signature)\n\tif err != nil {\n\t\treturn chkitErrors.Wrap(ErrUpdateApply, err)\n\t}\n\n\topts := update.Options{\n\t\tChecksum: checksum,\n\t\tSignature: signature,\n\t\tVerifier: update.NewECDSAVerifier(),\n\t\tHash: crypto.SHA256,\n\t}\n\tpublicKey, err := base64.StdEncoding.DecodeString(PublicKeyB64)\n\tif err != nil {\n\t\treturn chkitErrors.Wrap(ErrUpdateApply, err)\n\t}\n\n\terr = opts.SetPublicKeyPEM(publicKey)\n\tif err != nil {\n\t\treturn chkitErrors.Wrap(ErrUpdateApply, err)\n\t}\n\n\terr = update.Apply(upd.Binary, opts)\n\tif err != nil {\n\t\treturn chkitErrors.Wrap(ErrUpdateApply, err)\n\t}\n\n\treturn nil\n}\n\nfunc speedDecorator(minWidth int, conf byte) decor.DecoratorFunc {\n\tformat := \"%%\"\n\tif (conf & decor.DidentRight) != 0 {\n\t\tformat += \"-\"\n\t}\n\tformat += \"%ds\"\n\tvar (\n\t\tprevCount int64\n\t\tprevDuration time.Duration\n\t\tintegrSpeed float64\n\t\tmeasurementsCount float64\n\t)\n\treturn func(s *decor.Statistics, widthAccumulator chan<- int, widthDistributor <-chan int) string {\n\t\tvar str string\n\t\tmeasurementsCount++\n\t\tcurSpeed := float64(s.Current-prevCount) \/ (s.TimeElapsed - prevDuration).Seconds() \/\/ bytes per second\n\t\tif !math.IsNaN(curSpeed) && !math.IsInf(curSpeed, 1) && !math.IsInf(curSpeed, -1) {\n\t\t\tintegrSpeed += curSpeed\n\t\t}\n\t\tspeedToShow := integrSpeed \/ measurementsCount\n\t\tprevCount = s.Current\n\t\tprevDuration = s.TimeElapsed\n\t\tswitch {\n\t\tcase speedToShow < 1<<10:\n\t\t\tstr = fmt.Sprintf(\"%.1f B\/s\", speedToShow)\n\t\tcase speedToShow >= 1<<10 && speedToShow < 1<<20:\n\t\t\tstr = fmt.Sprintf(\"%.1f KiB\/s\", speedToShow\/(1<<10))\n\t\tcase speedToShow >= 1<<20 && speedToShow < 1<<30:\n\t\t\tstr = fmt.Sprintf(\"%.1f MiB\/s\", speedToShow\/(1<<20))\n\t\tdefault:\n\t\t\tstr = fmt.Sprintf(\"%.1f GiB\/s\", speedToShow\/(1<<30))\n\t\t}\n\t\tif (conf & decor.DwidthSync) != 0 {\n\t\t\twidthAccumulator <- utf8.RuneCountInString(str)\n\t\t\tmax := <-widthDistributor\n\t\t\tif (conf & decor.DextraSpace) != 0 {\n\t\t\t\tmax++\n\t\t\t}\n\t\t\treturn fmt.Sprintf(fmt.Sprintf(format, max), str)\n\t\t}\n\t\treturn fmt.Sprintf(fmt.Sprintf(format, minWidth), str)\n\t}\n}\n\nfunc Update(currentVersion semver.Version, downloader LatestCheckerDownloader, restartAfter bool) error {\n\tlatestVersion, err := downloader.LatestVersion()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif latestVersion.LE(currentVersion) {\n\t\tfmt.Println(\"You already using latest version. Update not needed.\")\n\t\treturn nil\n\t} else {\n\t\tfmt.Printf(\"Found newer version: %s. Updating...\\n\", latestVersion)\n\t}\n\n\tarchive, size, err := downloader.Download(latestVersion)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer archive.Close()\n\n\tp := mpb.New()\n\tbar := p.AddBar(size, mpb.PrependDecorators(\n\t\tdecor.CountersKiloByte(\"%.1f \/ %.1f\", 3, decor.DextraSpace),\n\t\tdecor.StaticName(\" (\", 1, 0),\n\t\tdecor.Percentage(3, 0),\n\t\tdecor.StaticName(\")\", 1, 0),\n\t), mpb.AppendDecorators(\n\t\tspeedDecorator(3, decor.DextraSpace),\n\t\tdecor.StaticName(\" ETA:\", 3, decor.DextraSpace),\n\t\tdecor.ETA(3, decor.DextraSpace),\n\t))\n\tarchive = bar.ProxyReader(archive)\n\n\tpkg, err := unpack(archive)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.Wait()\n\n\terr = verifiedUpdate(pkg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"Updated to version %s\\n\", latestVersion)\n\n\tif restartAfter {\n\t\tgracefulRestart()\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright IBM Corp. 2016 All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\t\t http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage lockbasedtxmgr\n\nimport (\n\tcommonledger \"github.com\/hyperledger\/fabric\/common\/ledger\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/txmgmt\/rwsetutil\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/txmgmt\/statedb\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/txmgmt\/version\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/ledgerconfig\"\n\t\"github.com\/hyperledger\/fabric\/protos\/ledger\/queryresult\"\n\t\"github.com\/hyperledger\/fabric\/protos\/ledger\/rwset\/kvrwset\"\n)\n\ntype queryHelper struct {\n\ttxmgr *LockBasedTxMgr\n\trwsetBuilder *rwsetutil.RWSetBuilder\n\titrs []*resultsItr\n\terr error\n\tdoneInvoked bool\n}\n\nfunc (h *queryHelper) getState(ns string, key string) ([]byte, error) {\n\th.checkDone()\n\tversionedValue, err := h.txmgr.db.GetState(ns, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tval, ver := decomposeVersionedValue(versionedValue)\n\tif h.rwsetBuilder != nil {\n\t\th.rwsetBuilder.AddToReadSet(ns, key, ver)\n\t}\n\treturn val, nil\n}\n\nfunc (h *queryHelper) getStateMultipleKeys(namespace string, keys []string) ([][]byte, error) {\n\th.checkDone()\n\tversionedValues, err := h.txmgr.db.GetStateMultipleKeys(namespace, keys)\n\tif err != nil {\n\t\treturn nil, nil\n\t}\n\tvalues := make([][]byte, len(versionedValues))\n\tfor i, versionedValue := range versionedValues {\n\t\tval, ver := decomposeVersionedValue(versionedValue)\n\t\tif h.rwsetBuilder != nil {\n\t\t\th.rwsetBuilder.AddToReadSet(namespace, keys[i], ver)\n\t\t}\n\t\tvalues[i] = val\n\t}\n\treturn values, nil\n}\n\nfunc (h *queryHelper) getStateRangeScanIterator(namespace string, startKey string, endKey string) (commonledger.ResultsIterator, error) {\n\th.checkDone()\n\titr, err := newResultsItr(namespace, startKey, endKey, h.txmgr.db, h.rwsetBuilder,\n\t\tledgerconfig.IsQueryReadsHashingEnabled(), ledgerconfig.GetMaxDegreeQueryReadsHashing())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\th.itrs = append(h.itrs, itr)\n\treturn itr, nil\n}\n\nfunc (h *queryHelper) executeQuery(namespace, query string) (commonledger.ResultsIterator, error) {\n\tdbItr, err := h.txmgr.db.ExecuteQuery(namespace, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &queryResultsItr{DBItr: dbItr, RWSetBuilder: h.rwsetBuilder}, nil\n}\n\nfunc (h *queryHelper) done() {\n\tif h.doneInvoked {\n\t\treturn\n\t}\n\tdefer h.txmgr.commitRWLock.RUnlock()\n\th.doneInvoked = true\n\tfor _, itr := range h.itrs {\n\t\titr.Close()\n\t\tif h.rwsetBuilder != nil {\n\t\t\tresults, hash, err := itr.rangeQueryResultsHelper.Done()\n\t\t\tif results != nil {\n\t\t\t\titr.rangeQueryInfo.SetRawReads(results)\n\t\t\t}\n\t\t\tif hash != nil {\n\t\t\t\titr.rangeQueryInfo.SetMerkelSummary(hash)\n\t\t\t}\n\t\t\t\/\/ TODO Change the method signature of done() to return error. However, this will have\n\t\t\t\/\/ repercurssions in the chaincode package, so deferring to a separate changeset.\n\t\t\t\/\/ For now, capture the first error that is encountered\n\t\t\t\/\/ during final processing and return the error when the caller retrieves the simulation results.\n\t\t\tif h.err == nil {\n\t\t\t\th.err = err\n\t\t\t}\n\t\t\th.rwsetBuilder.AddToRangeQuerySet(itr.ns, itr.rangeQueryInfo)\n\t\t}\n\t}\n}\n\nfunc (h *queryHelper) checkDone() {\n\tif h.doneInvoked {\n\t\tpanic(\"This instance should not be used after calling Done()\")\n\t}\n}\n\n\/\/ resultsItr implements interface ledger.ResultsIterator\n\/\/ this wraps the actual db iterator and intercept the calls\n\/\/ to build rangeQueryInfo in the ReadWriteSet that is used\n\/\/ for performing phantom read validation during commit\ntype resultsItr struct {\n\tns string\n\tendKey string\n\tdbItr statedb.ResultsIterator\n\trwSetBuilder *rwsetutil.RWSetBuilder\n\trangeQueryInfo *kvrwset.RangeQueryInfo\n\trangeQueryResultsHelper *rwsetutil.RangeQueryResultsHelper\n}\n\nfunc newResultsItr(ns string, startKey string, endKey string,\n\tdb statedb.VersionedDB, rwsetBuilder *rwsetutil.RWSetBuilder, enableHashing bool, maxDegree uint32) (*resultsItr, error) {\n\tdbItr, err := db.GetStateRangeScanIterator(ns, startKey, endKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\titr := &resultsItr{ns: ns, dbItr: dbItr}\n\t\/\/ it's a simulation request so, enable capture of range query info\n\tif rwsetBuilder != nil {\n\t\titr.rwSetBuilder = rwsetBuilder\n\t\titr.endKey = endKey\n\t\t\/\/ just set the StartKey... set the EndKey later below in the Next() method.\n\t\titr.rangeQueryInfo = &kvrwset.RangeQueryInfo{StartKey: startKey}\n\t\tresultsHelper, err := rwsetutil.NewRangeQueryResultsHelper(enableHashing, maxDegree)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\titr.rangeQueryResultsHelper = resultsHelper\n\t}\n\treturn itr, nil\n}\n\n\/\/ Next implements method in interface ledger.ResultsIterator\n\/\/ Before returning the next result, update the EndKey and ItrExhausted in rangeQueryInfo\n\/\/ If we set the EndKey in the constructor (as we do for the StartKey) to what is\n\/\/ supplied in the original query, we may be capturing the unnecessary longer range if the\n\/\/ caller decides to stop iterating at some intermidiate point. Alternatively, we could have\n\/\/ set the EndKey and ItrExhausted in the Close() function but it may not be desirable to change\n\/\/ transactional behaviour based on whether the Close() was invoked or not\nfunc (itr *resultsItr) Next() (commonledger.QueryResult, error) {\n\tqueryResult, err := itr.dbItr.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\titr.updateRangeQueryInfo(queryResult)\n\tif queryResult == nil {\n\t\treturn nil, nil\n\t}\n\tversionedKV := queryResult.(*statedb.VersionedKV)\n\treturn &queryresult.KV{Key: versionedKV.Key, Value: versionedKV.Value}, nil\n}\n\n\/\/ updateRangeQueryInfo updates two attributes of the rangeQueryInfo\n\/\/ 1) The EndKey - set to either a) latest key that is to be returned to the caller (if the iterator is not exhausted)\n\/\/ because, we do not know if the caller is again going to invoke Next() or not.\n\/\/ or b) the last key that was supplied in the original query (if the iterator is exhausted)\n\/\/ 2) The ItrExhausted - set to true if the iterator is going to return nil as a result of the Next() call\nfunc (itr *resultsItr) updateRangeQueryInfo(queryResult statedb.QueryResult) {\n\tif itr.rwSetBuilder == nil {\n\t\treturn\n\t}\n\n\tif queryResult == nil {\n\t\t\/\/ caller scanned till the iterator got exhausted.\n\t\t\/\/ So, set the endKey to the actual endKey supplied in the query\n\t\titr.rangeQueryInfo.ItrExhausted = true\n\t\titr.rangeQueryInfo.EndKey = itr.endKey\n\t\treturn\n\t}\n\tversionedKV := queryResult.(*statedb.VersionedKV)\n\titr.rangeQueryResultsHelper.AddResult(rwsetutil.NewKVRead(versionedKV.Key, versionedKV.Version))\n\t\/\/ Set the end key to the latest key retrieved by the caller.\n\t\/\/ Because, the caller may actually not invoke the Next() function again\n\titr.rangeQueryInfo.EndKey = versionedKV.Key\n}\n\n\/\/ Close implements method in interface ledger.ResultsIterator\nfunc (itr *resultsItr) Close() {\n\titr.dbItr.Close()\n}\n\ntype queryResultsItr struct {\n\tDBItr statedb.ResultsIterator\n\tRWSetBuilder *rwsetutil.RWSetBuilder\n}\n\n\/\/ Next implements method in interface ledger.ResultsIterator\nfunc (itr *queryResultsItr) Next() (commonledger.QueryResult, error) {\n\n\tqueryResult, err := itr.DBItr.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif queryResult == nil {\n\t\treturn nil, nil\n\t}\n\tversionedQueryRecord := queryResult.(*statedb.VersionedKV)\n\tlogger.Debugf(\"queryResultsItr.Next() returned a record:%s\", string(versionedQueryRecord.Value))\n\n\tif itr.RWSetBuilder != nil {\n\t\titr.RWSetBuilder.AddToReadSet(versionedQueryRecord.Namespace, versionedQueryRecord.Key, versionedQueryRecord.Version)\n\t}\n\treturn &queryresult.KV{Key: versionedQueryRecord.Key, Value: versionedQueryRecord.Value}, nil\n}\n\n\/\/ Close implements method in interface ledger.ResultsIterator\nfunc (itr *queryResultsItr) Close() {\n\titr.DBItr.Close()\n}\n\nfunc decomposeVersionedValue(versionedValue *statedb.VersionedValue) ([]byte, *version.Height) {\n\tvar value []byte\n\tvar ver *version.Height\n\tif versionedValue != nil {\n\t\tvalue = versionedValue.Value\n\t\tver = versionedValue.Version\n\t}\n\treturn value, ver\n}\n<commit_msg>[FAB-3270] Cleanup a TODO in Ledger queryHelper done()<commit_after>\/*\nCopyright IBM Corp. 2016 All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\t\t http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage lockbasedtxmgr\n\nimport (\n\tcommonledger \"github.com\/hyperledger\/fabric\/common\/ledger\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/txmgmt\/rwsetutil\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/txmgmt\/statedb\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/txmgmt\/version\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/ledgerconfig\"\n\t\"github.com\/hyperledger\/fabric\/protos\/ledger\/queryresult\"\n\t\"github.com\/hyperledger\/fabric\/protos\/ledger\/rwset\/kvrwset\"\n)\n\ntype queryHelper struct {\n\ttxmgr *LockBasedTxMgr\n\trwsetBuilder *rwsetutil.RWSetBuilder\n\titrs []*resultsItr\n\terr error\n\tdoneInvoked bool\n}\n\nfunc (h *queryHelper) getState(ns string, key string) ([]byte, error) {\n\th.checkDone()\n\tversionedValue, err := h.txmgr.db.GetState(ns, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tval, ver := decomposeVersionedValue(versionedValue)\n\tif h.rwsetBuilder != nil {\n\t\th.rwsetBuilder.AddToReadSet(ns, key, ver)\n\t}\n\treturn val, nil\n}\n\nfunc (h *queryHelper) getStateMultipleKeys(namespace string, keys []string) ([][]byte, error) {\n\th.checkDone()\n\tversionedValues, err := h.txmgr.db.GetStateMultipleKeys(namespace, keys)\n\tif err != nil {\n\t\treturn nil, nil\n\t}\n\tvalues := make([][]byte, len(versionedValues))\n\tfor i, versionedValue := range versionedValues {\n\t\tval, ver := decomposeVersionedValue(versionedValue)\n\t\tif h.rwsetBuilder != nil {\n\t\t\th.rwsetBuilder.AddToReadSet(namespace, keys[i], ver)\n\t\t}\n\t\tvalues[i] = val\n\t}\n\treturn values, nil\n}\n\nfunc (h *queryHelper) getStateRangeScanIterator(namespace string, startKey string, endKey string) (commonledger.ResultsIterator, error) {\n\th.checkDone()\n\titr, err := newResultsItr(namespace, startKey, endKey, h.txmgr.db, h.rwsetBuilder,\n\t\tledgerconfig.IsQueryReadsHashingEnabled(), ledgerconfig.GetMaxDegreeQueryReadsHashing())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\th.itrs = append(h.itrs, itr)\n\treturn itr, nil\n}\n\nfunc (h *queryHelper) executeQuery(namespace, query string) (commonledger.ResultsIterator, error) {\n\tdbItr, err := h.txmgr.db.ExecuteQuery(namespace, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &queryResultsItr{DBItr: dbItr, RWSetBuilder: h.rwsetBuilder}, nil\n}\n\nfunc (h *queryHelper) done() {\n\tif h.doneInvoked {\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\th.txmgr.commitRWLock.RUnlock()\n\t\th.doneInvoked = true\n\t\tfor _, itr := range h.itrs {\n\t\t\titr.Close()\n\t\t}\n\t}()\n\n\tfor _, itr := range h.itrs {\n\t\tif h.rwsetBuilder != nil {\n\t\t\tresults, hash, err := itr.rangeQueryResultsHelper.Done()\n\t\t\tif err != nil {\n\t\t\t\th.err = err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif results != nil {\n\t\t\t\titr.rangeQueryInfo.SetRawReads(results)\n\t\t\t}\n\t\t\tif hash != nil {\n\t\t\t\titr.rangeQueryInfo.SetMerkelSummary(hash)\n\t\t\t}\n\t\t\th.rwsetBuilder.AddToRangeQuerySet(itr.ns, itr.rangeQueryInfo)\n\t\t}\n\t}\n}\n\nfunc (h *queryHelper) checkDone() {\n\tif h.doneInvoked {\n\t\tpanic(\"This instance should not be used after calling Done()\")\n\t}\n}\n\n\/\/ resultsItr implements interface ledger.ResultsIterator\n\/\/ this wraps the actual db iterator and intercept the calls\n\/\/ to build rangeQueryInfo in the ReadWriteSet that is used\n\/\/ for performing phantom read validation during commit\ntype resultsItr struct {\n\tns string\n\tendKey string\n\tdbItr statedb.ResultsIterator\n\trwSetBuilder *rwsetutil.RWSetBuilder\n\trangeQueryInfo *kvrwset.RangeQueryInfo\n\trangeQueryResultsHelper *rwsetutil.RangeQueryResultsHelper\n}\n\nfunc newResultsItr(ns string, startKey string, endKey string,\n\tdb statedb.VersionedDB, rwsetBuilder *rwsetutil.RWSetBuilder, enableHashing bool, maxDegree uint32) (*resultsItr, error) {\n\tdbItr, err := db.GetStateRangeScanIterator(ns, startKey, endKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\titr := &resultsItr{ns: ns, dbItr: dbItr}\n\t\/\/ it's a simulation request so, enable capture of range query info\n\tif rwsetBuilder != nil {\n\t\titr.rwSetBuilder = rwsetBuilder\n\t\titr.endKey = endKey\n\t\t\/\/ just set the StartKey... set the EndKey later below in the Next() method.\n\t\titr.rangeQueryInfo = &kvrwset.RangeQueryInfo{StartKey: startKey}\n\t\tresultsHelper, err := rwsetutil.NewRangeQueryResultsHelper(enableHashing, maxDegree)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\titr.rangeQueryResultsHelper = resultsHelper\n\t}\n\treturn itr, nil\n}\n\n\/\/ Next implements method in interface ledger.ResultsIterator\n\/\/ Before returning the next result, update the EndKey and ItrExhausted in rangeQueryInfo\n\/\/ If we set the EndKey in the constructor (as we do for the StartKey) to what is\n\/\/ supplied in the original query, we may be capturing the unnecessary longer range if the\n\/\/ caller decides to stop iterating at some intermidiate point. Alternatively, we could have\n\/\/ set the EndKey and ItrExhausted in the Close() function but it may not be desirable to change\n\/\/ transactional behaviour based on whether the Close() was invoked or not\nfunc (itr *resultsItr) Next() (commonledger.QueryResult, error) {\n\tqueryResult, err := itr.dbItr.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\titr.updateRangeQueryInfo(queryResult)\n\tif queryResult == nil {\n\t\treturn nil, nil\n\t}\n\tversionedKV := queryResult.(*statedb.VersionedKV)\n\treturn &queryresult.KV{Key: versionedKV.Key, Value: versionedKV.Value}, nil\n}\n\n\/\/ updateRangeQueryInfo updates two attributes of the rangeQueryInfo\n\/\/ 1) The EndKey - set to either a) latest key that is to be returned to the caller (if the iterator is not exhausted)\n\/\/ because, we do not know if the caller is again going to invoke Next() or not.\n\/\/ or b) the last key that was supplied in the original query (if the iterator is exhausted)\n\/\/ 2) The ItrExhausted - set to true if the iterator is going to return nil as a result of the Next() call\nfunc (itr *resultsItr) updateRangeQueryInfo(queryResult statedb.QueryResult) {\n\tif itr.rwSetBuilder == nil {\n\t\treturn\n\t}\n\n\tif queryResult == nil {\n\t\t\/\/ caller scanned till the iterator got exhausted.\n\t\t\/\/ So, set the endKey to the actual endKey supplied in the query\n\t\titr.rangeQueryInfo.ItrExhausted = true\n\t\titr.rangeQueryInfo.EndKey = itr.endKey\n\t\treturn\n\t}\n\tversionedKV := queryResult.(*statedb.VersionedKV)\n\titr.rangeQueryResultsHelper.AddResult(rwsetutil.NewKVRead(versionedKV.Key, versionedKV.Version))\n\t\/\/ Set the end key to the latest key retrieved by the caller.\n\t\/\/ Because, the caller may actually not invoke the Next() function again\n\titr.rangeQueryInfo.EndKey = versionedKV.Key\n}\n\n\/\/ Close implements method in interface ledger.ResultsIterator\nfunc (itr *resultsItr) Close() {\n\titr.dbItr.Close()\n}\n\ntype queryResultsItr struct {\n\tDBItr statedb.ResultsIterator\n\tRWSetBuilder *rwsetutil.RWSetBuilder\n}\n\n\/\/ Next implements method in interface ledger.ResultsIterator\nfunc (itr *queryResultsItr) Next() (commonledger.QueryResult, error) {\n\n\tqueryResult, err := itr.DBItr.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif queryResult == nil {\n\t\treturn nil, nil\n\t}\n\tversionedQueryRecord := queryResult.(*statedb.VersionedKV)\n\tlogger.Debugf(\"queryResultsItr.Next() returned a record:%s\", string(versionedQueryRecord.Value))\n\n\tif itr.RWSetBuilder != nil {\n\t\titr.RWSetBuilder.AddToReadSet(versionedQueryRecord.Namespace, versionedQueryRecord.Key, versionedQueryRecord.Version)\n\t}\n\treturn &queryresult.KV{Key: versionedQueryRecord.Key, Value: versionedQueryRecord.Value}, nil\n}\n\n\/\/ Close implements method in interface ledger.ResultsIterator\nfunc (itr *queryResultsItr) Close() {\n\titr.DBItr.Close()\n}\n\nfunc decomposeVersionedValue(versionedValue *statedb.VersionedValue) ([]byte, *version.Height) {\n\tvar value []byte\n\tvar ver *version.Height\n\tif versionedValue != nil {\n\t\tvalue = versionedValue.Value\n\t\tver = versionedValue.Version\n\t}\n\treturn value, ver\n}\n<|endoftext|>"} {"text":"<commit_before>package lexer\n\n\/\/ PeekingLexer supports arbitrary lookahead as well as cloning.\ntype PeekingLexer struct {\n\tcursor int\n\teof Token\n\ttokens []Token\n}\n\n\/\/ Upgrade a Lexer to a PeekingLexer with arbitrary lookahead.\nfunc Upgrade(lex Lexer) (*PeekingLexer, error) {\n\tr := &PeekingLexer{}\n\tfor {\n\t\tt, err := lex.Next()\n\t\tif err != nil {\n\t\t\treturn r, err\n\t\t}\n\t\tif t.EOF() {\n\t\t\tr.eof = t\n\t\t\tbreak\n\t\t}\n\t\tr.tokens = append(r.tokens, t)\n\t}\n\treturn r, nil\n}\n\n\/\/ Cursor position in tokens.\nfunc (p *PeekingLexer) Cursor() int {\n\treturn p.cursor\n}\n\n\/\/ Next consumes and returns the next token.\nfunc (p *PeekingLexer) Next() (Token, error) {\n\tif p.cursor >= len(p.tokens) {\n\t\treturn p.eof, nil\n\t}\n\tp.cursor++\n\treturn p.tokens[p.cursor-1], nil\n}\n\n\/\/ Peek ahead at the n+1 token. ie. Peek(0) will peek at the next token.\nfunc (p *PeekingLexer) Peek(n int) (Token, error) {\n\ti := p.cursor + n\n\tif i >= len(p.tokens) {\n\t\treturn p.eof, nil\n\t}\n\treturn p.tokens[i], nil\n}\n\n\/\/ Clone creates a clone of this PeekingLexer at its current token.\n\/\/\n\/\/ The parent and clone are completely independent.\nfunc (p *PeekingLexer) Clone() *PeekingLexer {\n\tclone := *p\n\treturn &clone\n}\n<commit_msg>Expose the length of tokens in the peeking lexer<commit_after>package lexer\n\n\/\/ PeekingLexer supports arbitrary lookahead as well as cloning.\ntype PeekingLexer struct {\n\tcursor int\n\teof Token\n\ttokens []Token\n}\n\n\/\/ Upgrade a Lexer to a PeekingLexer with arbitrary lookahead.\nfunc Upgrade(lex Lexer) (*PeekingLexer, error) {\n\tr := &PeekingLexer{}\n\tfor {\n\t\tt, err := lex.Next()\n\t\tif err != nil {\n\t\t\treturn r, err\n\t\t}\n\t\tif t.EOF() {\n\t\t\tr.eof = t\n\t\t\tbreak\n\t\t}\n\t\tr.tokens = append(r.tokens, t)\n\t}\n\treturn r, nil\n}\n\n\/\/ Cursor position in tokens.\nfunc (p *PeekingLexer) Cursor() int {\n\treturn p.cursor\n}\n\n\/\/ Length returns the number of tokens consumed by the lexer.\nfunc (p *PeekingLexer) Length() int {\n\treturn len(p.tokens)\n}\n\n\/\/ Next consumes and returns the next token.\nfunc (p *PeekingLexer) Next() (Token, error) {\n\tif p.cursor >= len(p.tokens) {\n\t\treturn p.eof, nil\n\t}\n\tp.cursor++\n\treturn p.tokens[p.cursor-1], nil\n}\n\n\/\/ Peek ahead at the n+1 token. ie. Peek(0) will peek at the next token.\nfunc (p *PeekingLexer) Peek(n int) (Token, error) {\n\ti := p.cursor + n\n\tif i >= len(p.tokens) {\n\t\treturn p.eof, nil\n\t}\n\treturn p.tokens[i], nil\n}\n\n\/\/ Clone creates a clone of this PeekingLexer at its current token.\n\/\/\n\/\/ The parent and clone are completely independent.\nfunc (p *PeekingLexer) Clone() *PeekingLexer {\n\tclone := *p\n\treturn &clone\n}\n<|endoftext|>"} {"text":"<commit_before>package s3\n\n\/\/ Source: https:\/\/github.com\/pivotal-golang\/s3cli\n\n\/\/ Copyright (c) 2013 Damien Le Berrigaud and Nick Wade\n\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/sha1\"\n\t\"encoding\/base64\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/corehandlers\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/request\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\"\n)\n\nconst (\n\tsignatureVersion = \"2\"\n\tsignatureMethod = \"HmacSHA1\"\n\ttimeFormat = \"2006-01-02T15:04:05Z\"\n)\n\ntype signer struct {\n\t\/\/ Values that must be populated from the request\n\tRequest *http.Request\n\tTime time.Time\n\tCredentials *credentials.Credentials\n\tQuery url.Values\n\tstringToSign string\n\tsignature string\n}\n\nvar s3ParamsToSign = map[string]bool{\n\t\"acl\": true,\n\t\"location\": true,\n\t\"logging\": true,\n\t\"notification\": true,\n\t\"partNumber\": true,\n\t\"policy\": true,\n\t\"requestPayment\": true,\n\t\"torrent\": true,\n\t\"uploadId\": true,\n\t\"uploads\": true,\n\t\"versionId\": true,\n\t\"versioning\": true,\n\t\"versions\": true,\n\t\"response-content-type\": true,\n\t\"response-content-language\": true,\n\t\"response-expires\": true,\n\t\"response-cache-control\": true,\n\t\"response-content-disposition\": true,\n\t\"response-content-encoding\": true,\n\t\"website\": true,\n\t\"delete\": true,\n}\n\n\/\/ setv2Handlers will setup v2 signature signing on the S3 driver\nfunc setv2Handlers(svc *s3.S3) {\n\tsvc.Handlers.Build.PushBack(func(r *request.Request) {\n\t\tparsedURL, err := url.Parse(r.HTTPRequest.URL.String())\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to parse URL: %v\", err)\n\t\t}\n\t\tr.HTTPRequest.URL.Opaque = parsedURL.Path\n\t})\n\n\tsvc.Handlers.Sign.Clear()\n\tsvc.Handlers.Sign.PushBack(Sign)\n\tsvc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)\n}\n\n\/\/ Sign requests with signature version 2.\n\/\/\n\/\/ Will sign the requests with the service config's Credentials object\n\/\/ Signing is skipped if the credentials is the credentials.AnonymousCredentials\n\/\/ object.\nfunc Sign(req *request.Request) {\n\t\/\/ If the request does not need to be signed ignore the signing of the\n\t\/\/ request if the AnonymousCredentials object is used.\n\tif req.Config.Credentials == credentials.AnonymousCredentials {\n\t\treturn\n\t}\n\n\tv2 := signer{\n\t\tRequest: req.HTTPRequest,\n\t\tTime: req.Time,\n\t\tCredentials: req.Config.Credentials,\n\t}\n\tv2.Sign()\n}\n\nfunc (v2 *signer) Sign() error {\n\tcredValue, err := v2.Credentials.Get()\n\tif err != nil {\n\t\treturn err\n\t}\n\taccessKey := credValue.AccessKeyID\n\tvar (\n\t\tmd5, ctype, date, xamz string\n\t\txamzDate bool\n\t\tsarray []string\n\t)\n\n\theaders := v2.Request.Header\n\tparams := v2.Request.URL.Query()\n\tparsedURL, err := url.Parse(v2.Request.URL.String())\n\tif err != nil {\n\t\treturn err\n\t}\n\thost, canonicalPath := parsedURL.Host, parsedURL.Path\n\tv2.Request.Header[\"Host\"] = []string{host}\n\tv2.Request.Header[\"date\"] = []string{v2.Time.In(time.UTC).Format(time.RFC1123)}\n\n\tfor k, v := range headers {\n\t\tk = strings.ToLower(k)\n\t\tswitch k {\n\t\tcase \"content-md5\":\n\t\t\tmd5 = v[0]\n\t\tcase \"content-type\":\n\t\t\tctype = v[0]\n\t\tcase \"date\":\n\t\t\tif !xamzDate {\n\t\t\t\tdate = v[0]\n\t\t\t}\n\t\tdefault:\n\t\t\tif strings.HasPrefix(k, \"x-amz-\") {\n\t\t\t\tvall := strings.Join(v, \",\")\n\t\t\t\tsarray = append(sarray, k+\":\"+vall)\n\t\t\t\tif k == \"x-amz-date\" {\n\t\t\t\t\txamzDate = true\n\t\t\t\t\tdate = \"\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif len(sarray) > 0 {\n\t\tsort.StringSlice(sarray).Sort()\n\t\txamz = strings.Join(sarray, \"\\n\") + \"\\n\"\n\t}\n\n\texpires := false\n\tif v, ok := params[\"Expires\"]; ok {\n\t\texpires = true\n\t\tdate = v[0]\n\t\tparams[\"AWSAccessKeyId\"] = []string{accessKey}\n\t}\n\n\tsarray = sarray[0:0]\n\tfor k, v := range params {\n\t\tif s3ParamsToSign[k] {\n\t\t\tfor _, vi := range v {\n\t\t\t\tif vi == \"\" {\n\t\t\t\t\tsarray = append(sarray, k)\n\t\t\t\t} else {\n\t\t\t\t\tsarray = append(sarray, k+\"=\"+vi)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif len(sarray) > 0 {\n\t\tsort.StringSlice(sarray).Sort()\n\t\tcanonicalPath = canonicalPath + \"?\" + strings.Join(sarray, \"&\")\n\t}\n\n\tv2.stringToSign = strings.Join([]string{\n\t\tv2.Request.Method,\n\t\tmd5,\n\t\tctype,\n\t\tdate,\n\t\txamz + canonicalPath,\n\t}, \"\\n\")\n\thash := hmac.New(sha1.New, []byte(credValue.SecretAccessKey))\n\thash.Write([]byte(v2.stringToSign))\n\tv2.signature = base64.StdEncoding.EncodeToString(hash.Sum(nil))\n\n\tif expires {\n\t\tparams[\"Signature\"] = []string{string(v2.signature)}\n\t} else {\n\t\theaders[\"Authorization\"] = []string{\"AWS \" + accessKey + \":\" + string(v2.signature)}\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"string-to-sign\": v2.stringToSign,\n\t\t\"signature\": v2.signature,\n\t}).Debugln(\"request signature\")\n\treturn nil\n}\n<commit_msg>v2 signer: correctly sort headers<commit_after>package s3\n\n\/\/ Source: https:\/\/github.com\/pivotal-golang\/s3cli\n\n\/\/ Copyright (c) 2013 Damien Le Berrigaud and Nick Wade\n\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/sha1\"\n\t\"encoding\/base64\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/corehandlers\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/request\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\"\n)\n\nconst (\n\tsignatureVersion = \"2\"\n\tsignatureMethod = \"HmacSHA1\"\n\ttimeFormat = \"2006-01-02T15:04:05Z\"\n)\n\ntype signer struct {\n\t\/\/ Values that must be populated from the request\n\tRequest *http.Request\n\tTime time.Time\n\tCredentials *credentials.Credentials\n\tQuery url.Values\n\tstringToSign string\n\tsignature string\n}\n\nvar s3ParamsToSign = map[string]bool{\n\t\"acl\": true,\n\t\"location\": true,\n\t\"logging\": true,\n\t\"notification\": true,\n\t\"partNumber\": true,\n\t\"policy\": true,\n\t\"requestPayment\": true,\n\t\"torrent\": true,\n\t\"uploadId\": true,\n\t\"uploads\": true,\n\t\"versionId\": true,\n\t\"versioning\": true,\n\t\"versions\": true,\n\t\"response-content-type\": true,\n\t\"response-content-language\": true,\n\t\"response-expires\": true,\n\t\"response-cache-control\": true,\n\t\"response-content-disposition\": true,\n\t\"response-content-encoding\": true,\n\t\"website\": true,\n\t\"delete\": true,\n}\n\n\/\/ setv2Handlers will setup v2 signature signing on the S3 driver\nfunc setv2Handlers(svc *s3.S3) {\n\tsvc.Handlers.Build.PushBack(func(r *request.Request) {\n\t\tparsedURL, err := url.Parse(r.HTTPRequest.URL.String())\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to parse URL: %v\", err)\n\t\t}\n\t\tr.HTTPRequest.URL.Opaque = parsedURL.Path\n\t})\n\n\tsvc.Handlers.Sign.Clear()\n\tsvc.Handlers.Sign.PushBack(Sign)\n\tsvc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)\n}\n\n\/\/ Sign requests with signature version 2.\n\/\/\n\/\/ Will sign the requests with the service config's Credentials object\n\/\/ Signing is skipped if the credentials is the credentials.AnonymousCredentials\n\/\/ object.\nfunc Sign(req *request.Request) {\n\t\/\/ If the request does not need to be signed ignore the signing of the\n\t\/\/ request if the AnonymousCredentials object is used.\n\tif req.Config.Credentials == credentials.AnonymousCredentials {\n\t\treturn\n\t}\n\n\tv2 := signer{\n\t\tRequest: req.HTTPRequest,\n\t\tTime: req.Time,\n\t\tCredentials: req.Config.Credentials,\n\t}\n\tv2.Sign()\n}\n\nfunc (v2 *signer) Sign() error {\n\tcredValue, err := v2.Credentials.Get()\n\tif err != nil {\n\t\treturn err\n\t}\n\taccessKey := credValue.AccessKeyID\n\tvar (\n\t\tmd5, ctype, date, xamz string\n\t\txamzDate bool\n\t\tsarray []string\n\t\tsmap map[string]string\n\t\tsharray []string\n\t)\n\n\theaders := v2.Request.Header\n\tparams := v2.Request.URL.Query()\n\tparsedURL, err := url.Parse(v2.Request.URL.String())\n\tif err != nil {\n\t\treturn err\n\t}\n\thost, canonicalPath := parsedURL.Host, parsedURL.Path\n\tv2.Request.Header[\"Host\"] = []string{host}\n\tv2.Request.Header[\"date\"] = []string{v2.Time.In(time.UTC).Format(time.RFC1123)}\n\n\tsmap = make(map[string]string)\n\tfor k, v := range headers {\n\t\tk = strings.ToLower(k)\n\t\tswitch k {\n\t\tcase \"content-md5\":\n\t\t\tmd5 = v[0]\n\t\tcase \"content-type\":\n\t\t\tctype = v[0]\n\t\tcase \"date\":\n\t\t\tif !xamzDate {\n\t\t\t\tdate = v[0]\n\t\t\t}\n\t\tdefault:\n\t\t\tif strings.HasPrefix(k, \"x-amz-\") {\n\t\t\t\tvall := strings.Join(v, \",\")\n\t\t\t\tsmap[k] = k+\":\"+vall\n\t\t\t\tif k == \"x-amz-date\" {\n\t\t\t\t\txamzDate = true\n\t\t\t\t\tdate = \"\"\n\t\t\t\t}\n\t\t\t\tsharray = append(sharray, k)\n\t\t\t}\n\t\t}\n\t}\n\tif len(sharray) > 0 {\n\t\tsort.StringSlice(sharray).Sort()\n\t\tfor _, h := range(sharray) {\n\t\t\tsarray = append(sarray, smap[h])\n\t\t}\n\t\txamz = strings.Join(sarray, \"\\n\") + \"\\n\"\n\t}\n\n\texpires := false\n\tif v, ok := params[\"Expires\"]; ok {\n\t\texpires = true\n\t\tdate = v[0]\n\t\tparams[\"AWSAccessKeyId\"] = []string{accessKey}\n\t}\n\n\tsarray = sarray[0:0]\n\tfor k, v := range params {\n\t\tif s3ParamsToSign[k] {\n\t\t\tfor _, vi := range v {\n\t\t\t\tif vi == \"\" {\n\t\t\t\t\tsarray = append(sarray, k)\n\t\t\t\t} else {\n\t\t\t\t\tsarray = append(sarray, k+\"=\"+vi)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif len(sarray) > 0 {\n\t\tsort.StringSlice(sarray).Sort()\n\t\tcanonicalPath = canonicalPath + \"?\" + strings.Join(sarray, \"&\")\n\t}\n\n\tv2.stringToSign = strings.Join([]string{\n\t\tv2.Request.Method,\n\t\tmd5,\n\t\tctype,\n\t\tdate,\n\t\txamz + canonicalPath,\n\t}, \"\\n\")\n\thash := hmac.New(sha1.New, []byte(credValue.SecretAccessKey))\n\thash.Write([]byte(v2.stringToSign))\n\tv2.signature = base64.StdEncoding.EncodeToString(hash.Sum(nil))\n\n\tif expires {\n\t\tparams[\"Signature\"] = []string{string(v2.signature)}\n\t} else {\n\t\theaders[\"Authorization\"] = []string{\"AWS \" + accessKey + \":\" + string(v2.signature)}\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"string-to-sign\": v2.stringToSign,\n\t\t\"signature\": v2.signature,\n\t}).Debugln(\"request signature\")\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Plotinum Authors. All rights reserved.\n\/\/ Use of this source code is governed by an MIT-style license\n\/\/ that can be found in the LICENSE file.\n\n\/\/ plotutil contains a small number of utilites for creating plots.\n\/\/ This package is under active development so portions of\n\/\/ it may change.\npackage plotutil\n\nimport (\n\t\"code.google.com\/p\/plotinum\/plot\"\n\t\"code.google.com\/p\/plotinum\/vg\"\n\t\"image\/color\"\n)\n\n\/\/ DefaultColors is a set of colors used by the Color funciton.\nvar DefaultColors = SoftColors\n\n\/\/ SoftColors is a set darker colors.\nvar DarkColors = []color.Color{\n\trgb(238, 46, 47),\n\trgb(0, 140, 72),\n\trgb(24, 90, 169),\n\trgb(244, 125, 35),\n\trgb(102, 44, 145),\n\trgb(162, 29, 33),\n\trgb(180, 56, 148),\n}\n\n\/\/ SoftColors is a set of low-intensity colors.\nvar SoftColors = []color.Color{\n\trgb(241, 90, 96),\n\trgb(122, 195, 106),\n\trgb(90, 155, 212),\n\trgb(250, 167, 91),\n\trgb(158, 103, 171),\n\trgb(206, 112, 88),\n\trgb(215, 127, 180),\n}\n\nfunc rgb(r, g, b uint8) color.RGBA {\n\treturn color.RGBA{r, g, b, 255}\n}\n\n\/\/ Color returns the ith default color, wrapping\n\/\/ if i is less than zero or greater than the max\n\/\/ number of colors in the DefaultColors slice.\nfunc Color(i int) color.Color {\n\tn := len(DefaultColors)\n\tif i < 0 {\n\t\treturn DefaultColors[i%n+n]\n\t}\n\treturn DefaultColors[i%n]\n}\n\n\/\/ DefaultGlyphShapes is a set of GlyphDrawers used by\n\/\/ the Shape function.\nvar DefaultGlyphShapes = []plot.GlyphDrawer{\n\tplot.RingGlyph{},\n\tplot.SquareGlyph{},\n\tplot.TriangleGlyph{},\n\tplot.CrossGlyph{},\n\tplot.PlusGlyph{},\n\tplot.CircleGlyph{},\n\tplot.BoxGlyph{},\n\tplot.PyramidGlyph{},\n}\n\n\/\/ Shape returns the ith default glyph shape,\n\/\/ wrapping if i is less than zero or greater\n\/\/ than the max number of GlyphDrawers\n\/\/ in the DefaultGlyphShapes slice.\nfunc Shape(i int) plot.GlyphDrawer {\n\tn := len(DefaultGlyphShapes)\n\tif i < 0 {\n\t\treturn DefaultGlyphShapes[i%n+n]\n\t}\n\treturn DefaultGlyphShapes[i%n]\n}\n\n\/\/ DefaultDashes is a set of dash patterns used by\n\/\/ the Dashes function.\nvar DefaultDashes = [][]vg.Length{\n\t{},\n\n\t{vg.Points(6), vg.Points(2)},\n\n\t{vg.Points(2), vg.Points(2)},\n\n\t{vg.Points(1), vg.Points(1)},\n\n\t{vg.Points(5), vg.Points(2), vg.Points(1), vg.Points(2)},\n\n\t{vg.Points(10), vg.Points(2), vg.Points(2), vg.Points(2),\n\t\tvg.Points(2), vg.Points(2), vg.Points(2), vg.Points(2)},\n\n\t{vg.Points(10), vg.Points(2), vg.Points(2), vg.Points(2)},\n\n\t{vg.Points(5), vg.Points(2), vg.Points(5), vg.Points(2),\n\t\tvg.Points(2), vg.Points(2), vg.Points(2), vg.Points(2)},\n\n\t{vg.Points(4), vg.Points(2), vg.Points(4), vg.Points(1),\n\t\tvg.Points(1), vg.Points(1), vg.Points(1), vg.Points(1),\n\t\tvg.Points(1), vg.Points(1)},\n}\n\n\/\/ Dashes returns the ith default dash pattern,\n\/\/ wrapping if i is less than zero or greater\n\/\/ than the max number of dash patters\n\/\/ in the DefaultDashes slice.\nfunc Dashes(i int) []vg.Length {\n\tn := len(DefaultDashes)\n\tif i < 0 {\n\t\treturn DefaultDashes[i%n+n]\n\t}\n\treturn DefaultDashes[i%n]\n}\n<commit_msg>Fix a comment.<commit_after>\/\/ Copyright 2012 The Plotinum Authors. All rights reserved.\n\/\/ Use of this source code is governed by an MIT-style license\n\/\/ that can be found in the LICENSE file.\n\n\/\/ plotutil contains a small number of utilites for creating plots.\n\/\/ This package is under active development so portions of\n\/\/ it may change.\npackage plotutil\n\nimport (\n\t\"code.google.com\/p\/plotinum\/plot\"\n\t\"code.google.com\/p\/plotinum\/vg\"\n\t\"image\/color\"\n)\n\n\/\/ DefaultColors is a set of colors used by the Color funciton.\nvar DefaultColors = SoftColors\n\n\/\/ DarkColors is a set darker colors.\nvar DarkColors = []color.Color{\n\trgb(238, 46, 47),\n\trgb(0, 140, 72),\n\trgb(24, 90, 169),\n\trgb(244, 125, 35),\n\trgb(102, 44, 145),\n\trgb(162, 29, 33),\n\trgb(180, 56, 148),\n}\n\n\/\/ SoftColors is a set of low-intensity colors.\nvar SoftColors = []color.Color{\n\trgb(241, 90, 96),\n\trgb(122, 195, 106),\n\trgb(90, 155, 212),\n\trgb(250, 167, 91),\n\trgb(158, 103, 171),\n\trgb(206, 112, 88),\n\trgb(215, 127, 180),\n}\n\nfunc rgb(r, g, b uint8) color.RGBA {\n\treturn color.RGBA{r, g, b, 255}\n}\n\n\/\/ Color returns the ith default color, wrapping\n\/\/ if i is less than zero or greater than the max\n\/\/ number of colors in the DefaultColors slice.\nfunc Color(i int) color.Color {\n\tn := len(DefaultColors)\n\tif i < 0 {\n\t\treturn DefaultColors[i%n+n]\n\t}\n\treturn DefaultColors[i%n]\n}\n\n\/\/ DefaultGlyphShapes is a set of GlyphDrawers used by\n\/\/ the Shape function.\nvar DefaultGlyphShapes = []plot.GlyphDrawer{\n\tplot.RingGlyph{},\n\tplot.SquareGlyph{},\n\tplot.TriangleGlyph{},\n\tplot.CrossGlyph{},\n\tplot.PlusGlyph{},\n\tplot.CircleGlyph{},\n\tplot.BoxGlyph{},\n\tplot.PyramidGlyph{},\n}\n\n\/\/ Shape returns the ith default glyph shape,\n\/\/ wrapping if i is less than zero or greater\n\/\/ than the max number of GlyphDrawers\n\/\/ in the DefaultGlyphShapes slice.\nfunc Shape(i int) plot.GlyphDrawer {\n\tn := len(DefaultGlyphShapes)\n\tif i < 0 {\n\t\treturn DefaultGlyphShapes[i%n+n]\n\t}\n\treturn DefaultGlyphShapes[i%n]\n}\n\n\/\/ DefaultDashes is a set of dash patterns used by\n\/\/ the Dashes function.\nvar DefaultDashes = [][]vg.Length{\n\t{},\n\n\t{vg.Points(6), vg.Points(2)},\n\n\t{vg.Points(2), vg.Points(2)},\n\n\t{vg.Points(1), vg.Points(1)},\n\n\t{vg.Points(5), vg.Points(2), vg.Points(1), vg.Points(2)},\n\n\t{vg.Points(10), vg.Points(2), vg.Points(2), vg.Points(2),\n\t\tvg.Points(2), vg.Points(2), vg.Points(2), vg.Points(2)},\n\n\t{vg.Points(10), vg.Points(2), vg.Points(2), vg.Points(2)},\n\n\t{vg.Points(5), vg.Points(2), vg.Points(5), vg.Points(2),\n\t\tvg.Points(2), vg.Points(2), vg.Points(2), vg.Points(2)},\n\n\t{vg.Points(4), vg.Points(2), vg.Points(4), vg.Points(1),\n\t\tvg.Points(1), vg.Points(1), vg.Points(1), vg.Points(1),\n\t\tvg.Points(1), vg.Points(1)},\n}\n\n\/\/ Dashes returns the ith default dash pattern,\n\/\/ wrapping if i is less than zero or greater\n\/\/ than the max number of dash patters\n\/\/ in the DefaultDashes slice.\nfunc Dashes(i int) []vg.Length {\n\tn := len(DefaultDashes)\n\tif i < 0 {\n\t\treturn DefaultDashes[i%n+n]\n\t}\n\treturn DefaultDashes[i%n]\n}\n<|endoftext|>"} {"text":"<commit_before>package handler\n\nimport (\n\t\"go\/build\"\n\t\"log\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\n\t\"github.com\/shurcooL\/go\/gopherjs_http\"\n\t\"github.com\/shurcooL\/httpfs\/httputil\"\n\t\"github.com\/shurcooL\/httpfs\/vfsutil\"\n)\n\nfunc init() {\n\t\/\/ HACK: This code registers routes at root on default mux... That's not very nice.\n\thttp.Handle(\"\/table-of-contents.js\", httputil.FileHandler{gopherjs_http.Package(\"github.com\/shurcooL\/frontend\/table-of-contents\")})\n\thttp.Handle(\"\/table-of-contents.css\", httputil.FileHandler{vfsutil.File(filepath.Join(importPathToDir(\"github.com\/shurcooL\/frontend\/table-of-contents\"), \"style.css\"))})\n}\n\nfunc importPathToDir(importPath string) string {\n\tp, err := build.Import(importPath, \"\", build.FindOnly)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn p.Dir\n}\n<commit_msg>table-of-contents\/handler: Use keyed fields in composite literal.<commit_after>package handler\n\nimport (\n\t\"go\/build\"\n\t\"log\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\n\t\"github.com\/shurcooL\/go\/gopherjs_http\"\n\t\"github.com\/shurcooL\/httpfs\/httputil\"\n\t\"github.com\/shurcooL\/httpfs\/vfsutil\"\n)\n\nfunc init() {\n\t\/\/ HACK: This code registers routes at root on default mux... That's not very nice.\n\thttp.Handle(\"\/table-of-contents.js\", httputil.FileHandler{File: gopherjs_http.Package(\"github.com\/shurcooL\/frontend\/table-of-contents\")})\n\thttp.Handle(\"\/table-of-contents.css\", httputil.FileHandler{File: vfsutil.File(filepath.Join(importPathToDir(\"github.com\/shurcooL\/frontend\/table-of-contents\"), \"style.css\"))})\n}\n\nfunc importPathToDir(importPath string) string {\n\tp, err := build.Import(importPath, \"\", build.FindOnly)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn p.Dir\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ SPDX-License-Identifier: Apache-2.0\n\/\/ Copyright Authors of Cilium\n\npackage RuntimeTest\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/gomega\"\n\n\t. \"github.com\/cilium\/cilium\/test\/ginkgo-ext\"\n\t\"github.com\/cilium\/cilium\/test\/helpers\"\n)\n\nconst (\n\t\/\/ The privileged unit tests can take more than 4 minutes, the default\n\t\/\/ timeout for helper commands.\n\tprivilegedUnitTestTimeout = 16 * time.Minute\n)\n\nvar _ = Describe(\"RuntimePrivilegedUnitTests\", func() {\n\n\tvar vm *helpers.SSHMeta\n\n\tBeforeAll(func() {\n\t\tvm = helpers.InitRuntimeHelper(helpers.Runtime, logger)\n\t\tres := vm.ExecWithSudo(\"systemctl stop cilium\")\n\t\tres.ExpectSuccess(\"Failed trying to stop cilium via systemctl\")\n\t\tExpectCiliumNotRunning(vm)\n\t})\n\n\tAfterAll(func() {\n\t\terr := vm.RestartCilium()\n\t\tExpect(err).Should(BeNil(), \"Failed to restart Cilium\")\n\t\tvm.CloseSSHClient()\n\t})\n\n\tIt(\"Run Tests\", func() {\n\t\tpath, _ := filepath.Split(vm.BasePath())\n\t\tctx, cancel := context.WithTimeout(context.Background(), privilegedUnitTestTimeout)\n\t\tdefer cancel()\n\t\tres := vm.ExecContext(ctx, fmt.Sprintf(\"sudo make -C %s tests-privileged\", path))\n\t\tres.ExpectSuccess(\"Failed to run privileged unit tests\")\n\t})\n})\n<commit_msg>test\/RuntimePrivilegedUnitTests: Log timestamps<commit_after>\/\/ SPDX-License-Identifier: Apache-2.0\n\/\/ Copyright Authors of Cilium\n\npackage RuntimeTest\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/gomega\"\n\n\t. \"github.com\/cilium\/cilium\/test\/ginkgo-ext\"\n\t\"github.com\/cilium\/cilium\/test\/helpers\"\n)\n\nconst (\n\t\/\/ The privileged unit tests can take more than 4 minutes, the default\n\t\/\/ timeout for helper commands.\n\tprivilegedUnitTestTimeout = 16 * time.Minute\n)\n\nvar _ = Describe(\"RuntimePrivilegedUnitTests\", func() {\n\n\tvar vm *helpers.SSHMeta\n\n\tBeforeAll(func() {\n\t\tvm = helpers.InitRuntimeHelper(helpers.Runtime, logger)\n\t\tres := vm.ExecWithSudo(\"systemctl stop cilium\")\n\t\tres.ExpectSuccess(\"Failed trying to stop cilium via systemctl\")\n\t\tExpectCiliumNotRunning(vm)\n\t})\n\n\tAfterAll(func() {\n\t\terr := vm.RestartCilium()\n\t\tExpect(err).Should(BeNil(), \"Failed to restart Cilium\")\n\t\tvm.CloseSSHClient()\n\t})\n\n\tIt(\"Run Tests\", func() {\n\t\tpath, _ := filepath.Split(vm.BasePath())\n\t\tctx, cancel := context.WithTimeout(context.Background(), privilegedUnitTestTimeout)\n\t\tdefer cancel()\n\t\tres := vm.ExecContext(ctx, fmt.Sprintf(\"sudo make -C %s tests-privileged | ts '[%%H:%%M:%%S]'\", path))\n\t\tres.ExpectSuccess(\"Failed to run privileged unit tests\")\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package lfs\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/github\/git-lfs\/git\"\n)\n\ntype Configuration struct {\n\tCurrentRemote string\n\tgitConfig map[string]string\n\tremotes []string\n\thttpClient *http.Client\n\tredirectingHttpClient *http.Client\n\tisTracingHttp bool\n\tloading sync.Mutex\n}\n\ntype Endpoint struct {\n\tUrl string\n\tSshUserAndHost string\n\tSshPath string\n}\n\nvar (\n\tConfig = NewConfig()\n\thttpPrefixRe = regexp.MustCompile(\"\\\\Ahttps?:\/\/\")\n\tdefaultRemote = \"origin\"\n)\n\nfunc NewConfig() *Configuration {\n\tc := &Configuration{\n\t\tCurrentRemote: defaultRemote,\n\t\tisTracingHttp: len(os.Getenv(\"GIT_CURL_VERBOSE\")) > 0,\n\t}\n\treturn c\n}\n\nfunc ObjectUrl(endpoint Endpoint, oid string) (*url.URL, error) {\n\tu, err := url.Parse(endpoint.Url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tu.Path = path.Join(u.Path, \"objects\")\n\tif len(oid) > 0 {\n\t\tu.Path = path.Join(u.Path, oid)\n\t}\n\treturn u, nil\n}\n\nfunc (c *Configuration) Endpoint() Endpoint {\n\tif url, ok := c.GitConfig(\"lfs.url\"); ok {\n\t\treturn Endpoint{Url: url}\n\t}\n\n\tif len(c.CurrentRemote) > 0 && c.CurrentRemote != defaultRemote {\n\t\tif endpoint := c.RemoteEndpoint(c.CurrentRemote); len(endpoint.Url) > 0 {\n\t\t\treturn endpoint\n\t\t}\n\t}\n\n\treturn c.RemoteEndpoint(defaultRemote)\n}\n\nfunc (c *Configuration) ConcurrentTransfers() int {\n\tuploads := 3\n\n\tif v, ok := c.GitConfig(\"lfs.concurrenttransfers\"); ok {\n\t\tn, err := strconv.Atoi(v)\n\t\tif err == nil && n > 0 {\n\t\t\tuploads = n\n\t\t}\n\t}\n\n\treturn uploads\n}\n\nfunc (c *Configuration) BatchTransfer() bool {\n\tif v, ok := c.GitConfig(\"lfs.batch\"); ok {\n\t\tif v == \"true\" || v == \"\" {\n\t\t\treturn true\n\t\t}\n\n\t\t\/\/ Any numeric value except 0 is considered true\n\t\tif n, err := strconv.Atoi(v); err == nil && n != 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (c *Configuration) RemoteEndpoint(remote string) Endpoint {\n\tif len(remote) == 0 {\n\t\tremote = defaultRemote\n\t}\n\n\tif url, ok := c.GitConfig(\"remote.\" + remote + \".lfsurl\"); ok {\n\t\treturn Endpoint{Url: url}\n\t}\n\n\tif url, ok := c.GitConfig(\"remote.\" + remote + \".url\"); ok {\n\t\tendpoint := Endpoint{Url: url}\n\n\t\tif !httpPrefixRe.MatchString(url) {\n\t\t\tpieces := strings.SplitN(url, \":\", 2)\n\t\t\thostPieces := strings.SplitN(pieces[0], \"@\", 2)\n\t\t\tif len(hostPieces) < 2 {\n\t\t\t\tendpoint.Url = \"<unknown>\"\n\t\t\t\treturn endpoint\n\t\t\t}\n\n\t\t\tendpoint.SshUserAndHost = pieces[0]\n\t\t\tendpoint.SshPath = pieces[1]\n\t\t\tendpoint.Url = fmt.Sprintf(\"https:\/\/%s\/%s\", hostPieces[1], pieces[1])\n\t\t}\n\n\t\tif path.Ext(url) == \".git\" {\n\t\t\tendpoint.Url += \"\/info\/lfs\"\n\t\t} else {\n\t\t\tendpoint.Url += \".git\/info\/lfs\"\n\t\t}\n\n\t\treturn endpoint\n\t}\n\n\treturn Endpoint{}\n}\n\nfunc (c *Configuration) Remotes() []string {\n\tc.loadGitConfig()\n\treturn c.remotes\n}\n\nfunc (c *Configuration) GitConfig(key string) (string, bool) {\n\tc.loadGitConfig()\n\tvalue, ok := c.gitConfig[strings.ToLower(key)]\n\treturn value, ok\n}\n\nfunc (c *Configuration) SetConfig(key, value string) {\n\tc.loadGitConfig()\n\tc.gitConfig[key] = value\n}\n\nfunc (c *Configuration) ObjectUrl(oid string) (*url.URL, error) {\n\treturn ObjectUrl(c.Endpoint(), oid)\n}\n\ntype AltConfig struct {\n\tRemote map[string]*struct {\n\t\tMedia string\n\t}\n\n\tMedia struct {\n\t\tUrl string\n\t}\n}\n\nfunc (c *Configuration) loadGitConfig() {\n\tc.loading.Lock()\n\tdefer c.loading.Unlock()\n\n\tif c.gitConfig != nil {\n\t\treturn\n\t}\n\n\tuniqRemotes := make(map[string]bool)\n\n\tc.gitConfig = make(map[string]string)\n\n\tvar output string\n\tlistOutput, err := git.Config.List()\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"Error listing git config: %s\", err))\n\t}\n\n\tconfigFile := filepath.Join(LocalWorkingDir, \".gitconfig\")\n\tfileOutput, err := git.Config.ListFromFile(configFile)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"Error listing git config from file: %s\", err))\n\t}\n\n\toutput = fileOutput + \"\\n\" + listOutput\n\n\tlines := strings.Split(output, \"\\n\")\n\tfor _, line := range lines {\n\t\tpieces := strings.SplitN(line, \"=\", 2)\n\t\tif len(pieces) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tkey := strings.ToLower(pieces[0])\n\t\tc.gitConfig[key] = pieces[1]\n\n\t\tkeyParts := strings.Split(key, \".\")\n\t\tif len(keyParts) > 1 && keyParts[0] == \"remote\" {\n\t\t\tremote := keyParts[1]\n\t\t\tuniqRemotes[remote] = remote == \"origin\"\n\t\t}\n\t}\n\n\tc.remotes = make([]string, 0, len(uniqRemotes))\n\tfor remote, isOrigin := range uniqRemotes {\n\t\tif isOrigin {\n\t\t\tcontinue\n\t\t}\n\t\tc.remotes = append(c.remotes, remote)\n\t}\n}\n<commit_msg>ンンンンン ンンンン<commit_after>package lfs\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/github\/git-lfs\/git\"\n)\n\ntype Configuration struct {\n\tCurrentRemote string\n\tgitConfig map[string]string\n\tremotes []string\n\thttpClient *http.Client\n\tredirectingHttpClient *http.Client\n\tisTracingHttp bool\n\tloading sync.Mutex\n}\n\ntype Endpoint struct {\n\tUrl string\n\tSshUserAndHost string\n\tSshPath string\n}\n\nvar (\n\tConfig = NewConfig()\n\thttpPrefixRe = regexp.MustCompile(\"\\\\Ahttps?:\/\/\")\n\tdefaultRemote = \"origin\"\n)\n\nfunc NewConfig() *Configuration {\n\tc := &Configuration{\n\t\tCurrentRemote: defaultRemote,\n\t\tisTracingHttp: len(os.Getenv(\"GIT_CURL_VERBOSE\")) > 0,\n\t}\n\treturn c\n}\n\nfunc ObjectUrl(endpoint Endpoint, oid string) (*url.URL, error) {\n\tu, err := url.Parse(endpoint.Url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tu.Path = path.Join(u.Path, \"objects\")\n\tif len(oid) > 0 {\n\t\tu.Path = path.Join(u.Path, oid)\n\t}\n\treturn u, nil\n}\n\nfunc (c *Configuration) Endpoint() Endpoint {\n\tif url, ok := c.GitConfig(\"lfs.url\"); ok {\n\t\treturn Endpoint{Url: url}\n\t}\n\n\tif len(c.CurrentRemote) > 0 && c.CurrentRemote != defaultRemote {\n\t\tif endpoint := c.RemoteEndpoint(c.CurrentRemote); len(endpoint.Url) > 0 {\n\t\t\treturn endpoint\n\t\t}\n\t}\n\n\treturn c.RemoteEndpoint(defaultRemote)\n}\n\nfunc (c *Configuration) ConcurrentTransfers() int {\n\tuploads := 3\n\n\tif v, ok := c.GitConfig(\"lfs.concurrenttransfers\"); ok {\n\t\tn, err := strconv.Atoi(v)\n\t\tif err == nil && n > 0 {\n\t\t\tuploads = n\n\t\t}\n\t}\n\n\treturn uploads\n}\n\nfunc (c *Configuration) BatchTransfer() bool {\n\tif v, ok := c.GitConfig(\"lfs.batch\"); ok {\n\t\tif v == \"true\" || v == \"\" {\n\t\t\treturn true\n\t\t}\n\n\t\t\/\/ Any numeric value except 0 is considered true\n\t\tif n, err := strconv.Atoi(v); err == nil && n != 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (c *Configuration) RemoteEndpoint(remote string) Endpoint {\n\tif len(remote) == 0 {\n\t\tremote = defaultRemote\n\t}\n\n\tif url, ok := c.GitConfig(\"remote.\" + remote + \".lfsurl\"); ok {\n\t\treturn Endpoint{Url: url}\n\t}\n\n\tif url, ok := c.GitConfig(\"remote.\" + remote + \".url\"); ok {\n\t\tendpoint := Endpoint{Url: url}\n\n\t\tif !httpPrefixRe.MatchString(url) {\n\t\t\tpieces := strings.SplitN(url, \":\", 2)\n\t\t\thostPieces := strings.SplitN(pieces[0], \"@\", 2)\n\t\t\tif len(hostPieces) < 2 {\n\t\t\t\tendpoint.Url = \"<unknown>\"\n\t\t\t\treturn endpoint\n\t\t\t}\n\n\t\t\tendpoint.SshUserAndHost = pieces[0]\n\t\t\tendpoint.SshPath = pieces[1]\n\t\t\tendpoint.Url = fmt.Sprintf(\"https:\/\/%s\/%s\", hostPieces[1], pieces[1])\n\t\t}\n\n\t\tif path.Ext(url) == \".git\" {\n\t\t\tendpoint.Url += \"\/info\/lfs\"\n\t\t} else {\n\t\t\tendpoint.Url += \".git\/info\/lfs\"\n\t\t}\n\n\t\treturn endpoint\n\t}\n\n\treturn Endpoint{}\n}\n\nfunc (c *Configuration) Remotes() []string {\n\tc.loadGitConfig()\n\treturn c.remotes\n}\n\nfunc (c *Configuration) GitConfig(key string) (string, bool) {\n\tc.loadGitConfig()\n\tvalue, ok := c.gitConfig[strings.ToLower(key)]\n\treturn value, ok\n}\n\nfunc (c *Configuration) SetConfig(key, value string) {\n\tc.loadGitConfig()\n\tc.gitConfig[key] = value\n}\n\nfunc (c *Configuration) ObjectUrl(oid string) (*url.URL, error) {\n\treturn ObjectUrl(c.Endpoint(), oid)\n}\n\nfunc (c *Configuration) loadGitConfig() {\n\tc.loading.Lock()\n\tdefer c.loading.Unlock()\n\n\tif c.gitConfig != nil {\n\t\treturn\n\t}\n\n\tuniqRemotes := make(map[string]bool)\n\n\tc.gitConfig = make(map[string]string)\n\n\tvar output string\n\tlistOutput, err := git.Config.List()\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"Error listing git config: %s\", err))\n\t}\n\n\tconfigFile := filepath.Join(LocalWorkingDir, \".gitconfig\")\n\tfileOutput, err := git.Config.ListFromFile(configFile)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"Error listing git config from file: %s\", err))\n\t}\n\n\toutput = fileOutput + \"\\n\" + listOutput\n\n\tlines := strings.Split(output, \"\\n\")\n\tfor _, line := range lines {\n\t\tpieces := strings.SplitN(line, \"=\", 2)\n\t\tif len(pieces) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tkey := strings.ToLower(pieces[0])\n\t\tc.gitConfig[key] = pieces[1]\n\n\t\tkeyParts := strings.Split(key, \".\")\n\t\tif len(keyParts) > 1 && keyParts[0] == \"remote\" {\n\t\t\tremote := keyParts[1]\n\t\t\tuniqRemotes[remote] = remote == \"origin\"\n\t\t}\n\t}\n\n\tc.remotes = make([]string, 0, len(uniqRemotes))\n\tfor remote, isOrigin := range uniqRemotes {\n\t\tif isOrigin {\n\t\t\tcontinue\n\t\t}\n\t\tc.remotes = append(c.remotes, remote)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"log\"\n)\n\n\/\/ Maximum length of a Cloud Run Service name.\nconst maxCloudRunServiceNameLen = 53\n\n\/\/ cloudRunService represents a Cloud Run Service and holds its name and URL.\ntype cloudRunService struct {\n\t\/\/ the associated sample\n\tsample *sample\n\n\t\/\/ the Service's name\n\tname string\n\n\t\/\/ the Service's root URL\n\turl string\n}\n\n\/\/ newCloudRunService returns a new cloudRunService with the provided name.\nfunc newCloudRunService(sample *sample) *cloudRunService {\n\treturn &cloudRunService{\n\t\tsample: sample,\n\t\tname: serviceName(sample),\n\t}\n}\n\n\/\/ delete calls the external gcloud SDK and deletes the Cloud Run Service associated with the current cloudRunService.\nfunc (s *cloudRunService) delete() {\n\texecCommand(gcloudCommandBuild([]string{\n\t\t\"run\",\n\t\t\"services\",\n\t\t\"delete\",\n\t\ts.name,\n\t\t\"--region=us-east4\",\n\t\t\"--platform=managed\",\n\t}))\n}\n\n\/\/ getURL calls the external gcloud SDK and gets the root URL of the Cloud Run Service associated with the current\n\/\/ cloudRunService.\nfunc (s *cloudRunService) getURL() string {\n\tif s.url == \"\" {\n\t\ts.url = execCommand(gcloudCommandBuild([]string{\n\t\t\t\"run\",\n\t\t\t\"--platform=managed\",\n\t\t\t\"--region=us-east4\",\n\t\t\t\"services\",\n\t\t\t\"describe\",\n\t\t\ts.name,\n\t\t\t\"--format=value(status.url)\",\n\t\t}))\n\t}\n\n\treturn s.url\n}\n\n\/\/ serviceName generates a Cloud Run service name for the provided sample. It concatenates the sample's name with a\n\/\/ random 10-character alphanumeric string.\nfunc serviceName(sample *sample) string {\n\trandBytes := make([]byte, cloudRunServiceNameRandSuffixLen\/2)\n\n\t_, err := rand.Read(randBytes)\n\tif err != nil {\n\t\tlog.Panicf(\"Error generating crypto\/rand bytes: %v\\n\", err)\n\t}\n\n\trandSuffix := fmt.Sprintf(\"-%s\", hex.EncodeToString(randBytes))\n\treturn fmt.Sprintf(\"%s%s\", sample.sampleName(len(randSuffix)), randSuffix)\n}\n<commit_msg>Minor comment changes<commit_after>\/\/ Copyright 2020 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"log\"\n)\n\n\/\/ Maximum length of a Cloud Run Service name.\nconst maxCloudRunServiceNameLen = 53\n\n\/\/ cloudRunService represents a Cloud Run service and stores its parameters.\ntype cloudRunService struct {\n\tsample *sample\n\tname string\n\turl string\n}\n\n\/\/ newCloudRunService returns a new cloudRunService with the provided name.\nfunc newCloudRunService(sample *sample) *cloudRunService {\n\treturn &cloudRunService{\n\t\tsample: sample,\n\t\tname: serviceName(sample),\n\t}\n}\n\n\/\/ delete calls the external gcloud SDK and deletes the Cloud Run Service associated with the current cloudRunService.\nfunc (s *cloudRunService) delete() {\n\texecCommand(gcloudCommandBuild([]string{\n\t\t\"run\",\n\t\t\"services\",\n\t\t\"delete\",\n\t\ts.name,\n\t\t\"--region=us-east4\",\n\t\t\"--platform=managed\",\n\t}))\n}\n\n\/\/ getURL calls the external gcloud SDK and gets the root URL of the Cloud Run Service associated with the current\n\/\/ cloudRunService.\nfunc (s *cloudRunService) getURL() string {\n\tif s.url == \"\" {\n\t\ts.url = execCommand(gcloudCommandBuild([]string{\n\t\t\t\"run\",\n\t\t\t\"--platform=managed\",\n\t\t\t\"--region=us-east4\",\n\t\t\t\"services\",\n\t\t\t\"describe\",\n\t\t\ts.name,\n\t\t\t\"--format=value(status.url)\",\n\t\t}))\n\t}\n\n\treturn s.url\n}\n\n\/\/ serviceName generates a Cloud Run service name for the provided sample. It concatenates the sample's name with a\n\/\/ random 10-character alphanumeric string.\nfunc serviceName(sample *sample) string {\n\trandBytes := make([]byte, cloudRunServiceNameRandSuffixLen\/2)\n\n\t_, err := rand.Read(randBytes)\n\tif err != nil {\n\t\tlog.Panicf(\"Error generating crypto\/rand bytes: %v\\n\", err)\n\t}\n\n\trandSuffix := fmt.Sprintf(\"-%s\", hex.EncodeToString(randBytes))\n\treturn fmt.Sprintf(\"%s%s\", sample.sampleName(len(randSuffix)), randSuffix)\n}\n<|endoftext|>"} {"text":"<commit_before>package clubaux\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/dc0d\/club\"\n\t\"github.com\/hashicorp\/hcl\"\n)\n\n\/\/-----------------------------------------------------------------------------\n\nvar log = club.GetLogger()\n\n\/\/-----------------------------------------------------------------------------\n\n\/\/ OnSignal runs function on receiving the OS signal\nfunc OnSignal(f func(), sig ...os.Signal) {\n\tif f == nil {\n\t\treturn\n\t}\n\tsigc := make(chan os.Signal, 1)\n\tif len(sig) > 0 {\n\t\tsignal.Notify(sigc, sig...)\n\t} else {\n\t\tsignal.Notify(sigc,\n\t\t\tsyscall.SIGINT,\n\t\t\tsyscall.SIGTERM,\n\t\t\tsyscall.SIGQUIT,\n\t\t\tsyscall.SIGSTOP,\n\t\t\tsyscall.SIGABRT,\n\t\t\tsyscall.SIGTSTP,\n\t\t\tsyscall.SIGKILL)\n\t}\n\tgo func() {\n\t\t<-sigc\n\t\tf()\n\t}()\n}\n\nvar (\n\terrNotAvailable = club.Errorf(\"N\/A\")\n)\n\n\/\/-----------------------------------------------------------------------------\n\n\/\/ Here .\nfunc Here(skip ...int) (funcName, fileName string, fileLine int, callerErr error) {\n\tsk := 1\n\tif len(skip) > 0 && skip[0] > 1 {\n\t\tsk = skip[0]\n\t}\n\tvar pc uintptr\n\tvar ok bool\n\tpc, fileName, fileLine, ok = runtime.Caller(sk)\n\tif !ok {\n\t\tcallerErr = errNotAvailable\n\t\treturn\n\t}\n\tfn := runtime.FuncForPC(pc)\n\tname := fn.Name()\n\tix := strings.LastIndex(name, \".\")\n\tif ix > 0 && (ix+1) < len(name) {\n\t\tname = name[ix+1:]\n\t}\n\tfuncName = name\n\tnd, nf := filepath.Split(fileName)\n\tfileName = filepath.Join(filepath.Base(nd), nf)\n\treturn\n}\n\n\/\/-----------------------------------------------------------------------------\n\n\/\/ TimerScope .\nfunc TimerScope(name string, opCount ...int) func() {\n\tif name == \"\" {\n\t\tfuncName, fileName, fileLine, err := Here(2)\n\t\tif err != nil {\n\t\t\tname = \"N\/A\"\n\t\t} else {\n\t\t\tname = fmt.Sprintf(\"%s:%02d %s()\", fileName, fileLine, funcName)\n\t\t}\n\t}\n\tlog.Info(name, `started`)\n\tstart := time.Now()\n\treturn func() {\n\t\telapsed := time.Now().Sub(start)\n\t\tlog.Infof(\"%s took %v\", name, elapsed)\n\t\tif len(opCount) == 0 {\n\t\t\treturn\n\t\t}\n\n\t\tN := opCount[0]\n\t\tif N <= 0 {\n\t\t\treturn\n\t\t}\n\n\t\tE := float64(elapsed)\n\t\tFRC := E \/ float64(N)\n\n\t\tlog.Infof(\"op\/sec %.2f\", float64(N)\/(E\/float64(time.Second)))\n\n\t\tswitch {\n\t\tcase FRC > float64(time.Second):\n\t\t\tlog.Infof(\"sec\/op %.2f\", (E\/float64(time.Second))\/float64(N))\n\t\tcase FRC > float64(time.Millisecond):\n\t\t\tlog.Infof(\"milli-sec\/op %.2f\", (E\/float64(time.Millisecond))\/float64(N))\n\t\tcase FRC > float64(time.Microsecond):\n\t\t\tlog.Infof(\"micro-sec\/op %.2f\", (E\/float64(time.Microsecond))\/float64(N))\n\t\tdefault:\n\t\t\tlog.Infof(\"nano-sec\/op %.2f\", (E\/float64(time.Nanosecond))\/float64(N))\n\t\t}\n\t}\n}\n\n\/\/-----------------------------------------------------------------------------\n\nvar bufferPool = sync.Pool{\n\tNew: func() interface{} {\n\t\treturn &bytes.Buffer{}\n\t},\n}\n\n\/\/ GetBuffer .\nfunc GetBuffer() *bytes.Buffer {\n\tbuff := bufferPool.Get().(*bytes.Buffer)\n\treturn buff\n}\n\n\/\/ PutBuffer .\nfunc PutBuffer(buff *bytes.Buffer) {\n\tbufferPool.Put(buff)\n\tbuff.Reset()\n}\n\n\/\/-----------------------------------------------------------------------------\n\nfunc defaultAppNameHandler() string {\n\treturn filepath.Base(os.Args[0])\n}\n\nfunc defaultConfNameHandler() string {\n\tfp := fmt.Sprintf(\"%s.conf\", defaultAppNameHandler())\n\tif _, err := os.Stat(fp); err != nil {\n\t\tfp = \"app.conf\"\n\t}\n\treturn fp\n}\n\n\/\/ LoadHCL loads hcl conf file. default conf file names (if filePath not provided)\n\/\/ in the same directory are <appname>.conf and if not fount app.conf\nfunc LoadHCL(ptr interface{}, filePath ...string) error {\n\tvar fp string\n\tif len(filePath) > 0 {\n\t\tfp = filePath[0]\n\t}\n\tif fp == \"\" {\n\t\tfp = defaultConfNameHandler()\n\t}\n\tcn, err := ioutil.ReadFile(fp)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = hcl.Unmarshal(cn, ptr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/-----------------------------------------------------------------------------\n<commit_msg>tiny rop (Chain) in clubaux<commit_after>package clubaux\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/dc0d\/club\"\n\t\"github.com\/hashicorp\/hcl\"\n)\n\n\/\/-----------------------------------------------------------------------------\n\nvar log = club.GetLogger()\n\n\/\/-----------------------------------------------------------------------------\n\n\/\/ OnSignal runs function on receiving the OS signal\nfunc OnSignal(f func(), sig ...os.Signal) {\n\tif f == nil {\n\t\treturn\n\t}\n\tsigc := make(chan os.Signal, 1)\n\tif len(sig) > 0 {\n\t\tsignal.Notify(sigc, sig...)\n\t} else {\n\t\tsignal.Notify(sigc,\n\t\t\tsyscall.SIGINT,\n\t\t\tsyscall.SIGTERM,\n\t\t\tsyscall.SIGQUIT,\n\t\t\tsyscall.SIGSTOP,\n\t\t\tsyscall.SIGABRT,\n\t\t\tsyscall.SIGTSTP,\n\t\t\tsyscall.SIGKILL)\n\t}\n\tgo func() {\n\t\t<-sigc\n\t\tf()\n\t}()\n}\n\nvar (\n\terrNotAvailable = club.Errorf(\"N\/A\")\n)\n\n\/\/-----------------------------------------------------------------------------\n\n\/\/ Here .\nfunc Here(skip ...int) (funcName, fileName string, fileLine int, callerErr error) {\n\tsk := 1\n\tif len(skip) > 0 && skip[0] > 1 {\n\t\tsk = skip[0]\n\t}\n\tvar pc uintptr\n\tvar ok bool\n\tpc, fileName, fileLine, ok = runtime.Caller(sk)\n\tif !ok {\n\t\tcallerErr = errNotAvailable\n\t\treturn\n\t}\n\tfn := runtime.FuncForPC(pc)\n\tname := fn.Name()\n\tix := strings.LastIndex(name, \".\")\n\tif ix > 0 && (ix+1) < len(name) {\n\t\tname = name[ix+1:]\n\t}\n\tfuncName = name\n\tnd, nf := filepath.Split(fileName)\n\tfileName = filepath.Join(filepath.Base(nd), nf)\n\treturn\n}\n\n\/\/-----------------------------------------------------------------------------\n\n\/\/ TimerScope .\nfunc TimerScope(name string, opCount ...int) func() {\n\tif name == \"\" {\n\t\tfuncName, fileName, fileLine, err := Here(2)\n\t\tif err != nil {\n\t\t\tname = \"N\/A\"\n\t\t} else {\n\t\t\tname = fmt.Sprintf(\"%s:%02d %s()\", fileName, fileLine, funcName)\n\t\t}\n\t}\n\tlog.Info(name, ` started`)\n\tstart := time.Now()\n\treturn func() {\n\t\telapsed := time.Now().Sub(start)\n\t\tlog.Infof(\"%s took %v\", name, elapsed)\n\t\tif len(opCount) == 0 {\n\t\t\treturn\n\t\t}\n\n\t\tN := opCount[0]\n\t\tif N <= 0 {\n\t\t\treturn\n\t\t}\n\n\t\tE := float64(elapsed)\n\t\tFRC := E \/ float64(N)\n\n\t\tlog.Infof(\"op\/sec %.2f\", float64(N)\/(E\/float64(time.Second)))\n\n\t\tswitch {\n\t\tcase FRC > float64(time.Second):\n\t\t\tlog.Infof(\"sec\/op %.2f\", (E\/float64(time.Second))\/float64(N))\n\t\tcase FRC > float64(time.Millisecond):\n\t\t\tlog.Infof(\"milli-sec\/op %.2f\", (E\/float64(time.Millisecond))\/float64(N))\n\t\tcase FRC > float64(time.Microsecond):\n\t\t\tlog.Infof(\"micro-sec\/op %.2f\", (E\/float64(time.Microsecond))\/float64(N))\n\t\tdefault:\n\t\t\tlog.Infof(\"nano-sec\/op %.2f\", (E\/float64(time.Nanosecond))\/float64(N))\n\t\t}\n\t}\n}\n\n\/\/-----------------------------------------------------------------------------\n\nvar bufferPool = sync.Pool{\n\tNew: func() interface{} {\n\t\treturn &bytes.Buffer{}\n\t},\n}\n\n\/\/ GetBuffer .\nfunc GetBuffer() *bytes.Buffer {\n\tbuff := bufferPool.Get().(*bytes.Buffer)\n\treturn buff\n}\n\n\/\/ PutBuffer .\nfunc PutBuffer(buff *bytes.Buffer) {\n\tbufferPool.Put(buff)\n\tbuff.Reset()\n}\n\n\/\/-----------------------------------------------------------------------------\n\nfunc defaultAppNameHandler() string {\n\treturn filepath.Base(os.Args[0])\n}\n\nfunc defaultConfNameHandler() string {\n\tfp := fmt.Sprintf(\"%s.conf\", defaultAppNameHandler())\n\tif _, err := os.Stat(fp); err != nil {\n\t\tfp = \"app.conf\"\n\t}\n\treturn fp\n}\n\n\/\/ LoadHCL loads hcl conf file. default conf file names (if filePath not provided)\n\/\/ in the same directory are <appname>.conf and if not fount app.conf\nfunc LoadHCL(ptr interface{}, filePath ...string) error {\n\tvar fp string\n\tif len(filePath) > 0 {\n\t\tfp = filePath[0]\n\t}\n\tif fp == \"\" {\n\t\tfp = defaultConfNameHandler()\n\t}\n\tcn, err := ioutil.ReadFile(fp)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = hcl.Unmarshal(cn, ptr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/-----------------------------------------------------------------------------\n\n\/\/ Chain .\nfunc Chain(steps ...func() error) (chainerr error) {\n\tfor _, v := range steps {\n\t\tchainerr = v()\n\t\tif chainerr != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n\/\/-----------------------------------------------------------------------------\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage arch\n\nimport (\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n)\n\n\/\/ The following constants define the machine architectures supported by Juju.\nconst (\n\tAMD64 = \"amd64\"\n\tI386 = \"i386\"\n\tARM = \"armhf\"\n\tARM64 = \"arm64\"\n\tPPC64 = \"ppc64\"\n)\n\n\/\/ AllSupportedArches records the machine architectures recognised by Juju.\nvar AllSupportedArches = []string{\n\tAMD64,\n\tI386,\n\tARM,\n\tARM64,\n\tPPC64,\n}\n\n\/\/ archREs maps regular expressions for matching\n\/\/ `uname -m` to architectures recognised by Juju.\nvar archREs = []struct {\n\t*regexp.Regexp\n\tarch string\n}{\n\t{regexp.MustCompile(\"amd64|x86_64\"), AMD64},\n\t{regexp.MustCompile(\"i?[3-9]86\"), I386},\n\t{regexp.MustCompile(\"armv.*\"), ARM},\n\t{regexp.MustCompile(\"aarch64\"), ARM64},\n\t{regexp.MustCompile(\"ppc64el|ppc64le\"), PPC64},\n}\n\n\/\/ Override for testing.\nvar HostArch = hostArch\n\n\/\/ HostArch returns the Juju architecture of the machine on which it is run.\nfunc hostArch() string {\n\treturn NormaliseArch(runtime.GOARCH)\n}\n\n\/\/ NormaliseArch returns the Juju architecture corresponding to a machine's\n\/\/ reported architecture. The Juju architecture is used to filter simple\n\/\/ streams lookup of tools and images.\nfunc NormaliseArch(rawArch string) string {\n\trawArch = strings.TrimSpace(rawArch)\n\tfor _, re := range archREs {\n\t\tif re.Match([]byte(rawArch)) {\n\t\t\treturn re.arch\n\t\t\tbreak\n\t\t}\n\t}\n\treturn rawArch\n}\n\n\/\/ IsSupportedArch returns true if arch is one supported by Juju.\nfunc IsSupportedArch(arch string) bool {\n\tfor _, a := range AllSupportedArches {\n\t\tif a == arch {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<commit_msg>Fix whitespace<commit_after>\/\/ Copyright 2014 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage arch\n\nimport (\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n)\n\n\/\/ The following constants define the machine architectures supported by Juju.\nconst (\n\tAMD64 = \"amd64\"\n\tI386 = \"i386\"\n\tARM = \"armhf\"\n\tARM64 = \"arm64\"\n\tPPC64 = \"ppc64\"\n)\n\n\/\/ AllSupportedArches records the machine architectures recognised by Juju.\nvar AllSupportedArches = []string{\n\tAMD64,\n\tI386,\n\tARM,\n\tARM64,\n\tPPC64,\n}\n\n\/\/ archREs maps regular expressions for matching\n\/\/ `uname -m` to architectures recognised by Juju.\nvar archREs = []struct {\n\t*regexp.Regexp\n\tarch string\n}{\n\t{regexp.MustCompile(\"amd64|x86_64\"), AMD64},\n\t{regexp.MustCompile(\"i?[3-9]86\"), I386},\n\t{regexp.MustCompile(\"armv.*\"), ARM},\n\t{regexp.MustCompile(\"aarch64\"), ARM64},\n\t{regexp.MustCompile(\"ppc64el|ppc64le\"), PPC64},\n}\n\n\/\/ Override for testing.\nvar HostArch = hostArch\n\n\/\/ HostArch returns the Juju architecture of the machine on which it is run.\nfunc hostArch() string {\n\treturn NormaliseArch(runtime.GOARCH)\n}\n\n\/\/ NormaliseArch returns the Juju architecture corresponding to a machine's\n\/\/ reported architecture. The Juju architecture is used to filter simple\n\/\/ streams lookup of tools and images.\nfunc NormaliseArch(rawArch string) string {\n\trawArch = strings.TrimSpace(rawArch)\n\tfor _, re := range archREs {\n\t\tif re.Match([]byte(rawArch)) {\n\t\t\treturn re.arch\n\t\t\tbreak\n\t\t}\n\t}\n\treturn rawArch\n}\n\n\/\/ IsSupportedArch returns true if arch is one supported by Juju.\nfunc IsSupportedArch(arch string) bool {\n\tfor _, a := range AllSupportedArches {\n\t\tif a == arch {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nThis package is just a collection of use-case for the various aspects of the RIPE API.\nConsider this both as an example on how to use the API and a testing tool for the API wrapper.\n*\/\npackage main\n\nimport (\n\t\"github.com\/keltia\/ripe-atlas\"\n\t\"github.com\/urfave\/cli\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n)\n\nvar (\n\t\/\/ flags\n\tfWant4 bool\n\tfWant6 bool\n\n\t\/\/ True by default\n\tfWantMine = true\n\n\tfAllProbes bool\n\tfAllMeasurements bool\n\n\tfAsn string\n\tfCountry string\n\tfFieldList string\n\tfFormat string\n\tfInclude string\n\tfOptFields string\n\tfProtocol string\n\tfSortOrder string\n\tfMeasureType string\n\n\tfHTTPMethod string\n\tfUserAgent string\n\tfHTTPVersion string\n\n\tfBitCD bool\n\tfDisableDNSSEC bool\n\n\tfDebug bool\n\tfVerbose bool\n\tfWantAnchor bool\n\n\tfMaxHops int\n\tfPacketSize int\n\n\tmycnf *Config\n\n\tcliCommands []cli.Command\n\n\tclient *atlas.Client\n)\n\nconst (\n\tatlasVersion = \"0.11\"\n\tMyName = \"ripe-atlas\"\n\n\t\/\/ WantBoth is the way to ask for both IPv4 & IPv6.\n\tWantBoth = \"64\"\n\n\t\/\/ Want4 only 4\n\tWant4 = \"4\"\n\t\/\/ Want6 only 6\n\tWant6 = \"6\"\n)\n\n\/\/ -4 & -6 are special, if neither is specified, then we turn both as true\n\/\/ Check a few other things while we are here\nfunc finalcheck(c *cli.Context) error {\n\tvar err error\n\n\t\/\/ Load main configuration\n\tmycnf, err = LoadConfig(\"\")\n\tif err != nil {\n\t\tif fVerbose {\n\t\t\tlog.Printf(\"No configuration file found.\")\n\t\t}\n\t}\n\n\t\/\/ Logical\n\tif fDebug {\n\t\tfVerbose = true\n\t\tlog.Printf(\"config: %#v\", mycnf)\n\t}\n\n\t\/\/ Various messages\n\tif fVerbose {\n\t\tif mycnf.APIKey != \"\" {\n\t\t\tlog.Printf(\"Found API key!\")\n\t\t} else {\n\t\t\tlog.Printf(\"No API key!\")\n\t\t}\n\n\t\tif mycnf.DefaultProbe != 0 {\n\t\t\tlog.Printf(\"Found default probe: %d\\n\", mycnf.DefaultProbe)\n\t\t}\n\t}\n\n\t\/\/ Check whether we have proxy authentication (from a separate config file)\n\tauth, err := setupProxyAuth()\n\tif err != nil {\n\t\tif fVerbose {\n\t\t\tlog.Printf(\"Invalid or no proxy auth credentials\")\n\t\t}\n\t}\n\n\t\/\/ Wondering whether to move to the Functional options pattern\n\t\/\/ cf. https:\/\/dave.cheney.net\/2016\/11\/13\/do-not-fear-first-class-functions\n\tclient, err = atlas.NewClient(atlas.Config{\n\t\tAPIKey: mycnf.APIKey,\n\t\tDefaultProbe: mycnf.DefaultProbe,\n\t\tPoolSize: mycnf.PoolSize,\n\t\tProxyAuth: auth,\n\t\tVerbose: fVerbose,\n\t})\n\n\t\/\/ No need to continue if this fails\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating the Atlas client: %v\", err)\n\t}\n\n\tif fWant4 {\n\t\tmycnf.WantAF = Want4\n\t}\n\n\tif fWant6 {\n\t\tmycnf.WantAF = Want6\n\t}\n\n\t\/\/ Both are fine\n\tif fWant4 && fWant6 {\n\t\tmycnf.WantAF = WantBoth\n\t}\n\n\t\/\/ So is neither — common case\n\tif !fWant4 && !fWant6 {\n\t\tmycnf.WantAF = WantBoth\n\t}\n\n\t\/\/ Set global options\n\tclient.SetFormat(fFormat)\n\tclient.SetAF(mycnf.WantAF)\n\tclient.SetInclude(fInclude)\n\n\treturn nil\n}\n\n\/\/ main is the starting point (and everything)\nfunc main() {\n\tcli.VersionFlag = cli.BoolFlag{Name: \"version, V\"}\n\n\tcli.VersionPrinter = func(c *cli.Context) {\n\t\tlog.Printf(\"API wrapper: %s Atlas API: %s\\n\", c.App.Version, atlas.GetVersion())\n\t}\n\n\tapp := cli.NewApp()\n\tapp.Name = \"atlas\"\n\tapp.Usage = \"RIPE Atlas CLI interface\"\n\tapp.Author = \"Ollivier Robert <roberto@keltia.net>\"\n\tapp.Version = atlasVersion\n\t\/\/app.HideVersion = true\n\n\t\/\/ General flags\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"format,f\",\n\t\t\tUsage: \"specify output format\",\n\t\t\tDestination: &fFormat,\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"debug,D\",\n\t\t\tUsage: \"debug mode\",\n\t\t\tDestination: &fDebug,\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"verbose,v\",\n\t\t\tUsage: \"verbose mode\",\n\t\t\tDestination: &fVerbose,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"fields,F\",\n\t\t\tUsage: \"specify which fields are wanted\",\n\t\t\tDestination: &fFieldList,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"include,I\",\n\t\t\tUsage: \"specify whether objects should be expanded\",\n\t\t\tDestination: &fInclude,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"opt-fields,O\",\n\t\t\tUsage: \"specify which optional fields are wanted\",\n\t\t\tDestination: &fOptFields,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"sort,S\",\n\t\t\tUsage: \"sort results\",\n\t\t\tDestination: &fSortOrder,\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"6, ipv6\",\n\t\t\tUsage: \"Only IPv6\",\n\t\t\tDestination: &fWant6,\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"4, ipv4\",\n\t\t\tUsage: \"Only IPv4\",\n\t\t\tDestination: &fWant4,\n\t\t},\n\t}\n\n\t\/\/ Ensure -4 & -6 are treated properly & initialization is done\n\tapp.Before = finalcheck\n\n\tsort.Sort(ByAlphabet(cliCommands))\n\tapp.Commands = cliCommands\n\tapp.Run(os.Args)\n}\n<commit_msg>Complete the list of global options.<commit_after>\/*\nThis package is just a collection of use-case for the various aspects of the RIPE API.\nConsider this both as an example on how to use the API and a testing tool for the API wrapper.\n*\/\npackage main\n\nimport (\n\t\"github.com\/keltia\/ripe-atlas\"\n\t\"github.com\/urfave\/cli\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n)\n\nvar (\n\t\/\/ flags\n\tfWant4 bool\n\tfWant6 bool\n\n\tfAllProbes bool\n\tfAllMeasurements bool\n\n\t\/\/ Global options\n\tfFieldList string\n\tfFormat string\n\tfInclude string\n\tfOptFields string\n\tfPageNum string\n\tfPageSize string\n\tfSortOrder string\n\tfWantMine bool\n\n\t\/\/ Measurement-specific ones\n\tfAsn string\n\tfCountry string\n\tfProtocol string\n\tfMeasureType string\n\n\tfHTTPMethod string\n\tfUserAgent string\n\tfHTTPVersion string\n\n\tfBitCD bool\n\tfDisableDNSSEC bool\n\n\tfDebug bool\n\tfVerbose bool\n\tfWantAnchor bool\n\n\tfMaxHops int\n\tfPacketSize int\n\n\tmycnf *Config\n\n\tcliCommands []cli.Command\n\n\tclient *atlas.Client\n)\n\nconst (\n\tatlasVersion = \"0.11\"\n\tMyName = \"ripe-atlas\"\n\n\t\/\/ WantBoth is the way to ask for both IPv4 & IPv6.\n\tWantBoth = \"64\"\n\n\t\/\/ Want4 only 4\n\tWant4 = \"4\"\n\t\/\/ Want6 only 6\n\tWant6 = \"6\"\n)\n\n\/\/ -4 & -6 are special, if neither is specified, then we turn both as true\n\/\/ Check a few other things while we are here\nfunc finalcheck(c *cli.Context) error {\n\tvar err error\n\n\t\/\/ Load main configuration\n\tmycnf, err = LoadConfig(\"\")\n\tif err != nil {\n\t\tif fVerbose {\n\t\t\tlog.Printf(\"No configuration file found.\")\n\t\t}\n\t}\n\n\t\/\/ Logical\n\tif fDebug {\n\t\tfVerbose = true\n\t\tlog.Printf(\"config: %#v\", mycnf)\n\t}\n\n\t\/\/ Various messages\n\tif fVerbose {\n\t\tif mycnf.APIKey != \"\" {\n\t\t\tlog.Printf(\"Found API key!\")\n\t\t} else {\n\t\t\tlog.Printf(\"No API key!\")\n\t\t}\n\n\t\tif mycnf.DefaultProbe != 0 {\n\t\t\tlog.Printf(\"Found default probe: %d\\n\", mycnf.DefaultProbe)\n\t\t}\n\t}\n\n\t\/\/ Check whether we have proxy authentication (from a separate config file)\n\tauth, err := setupProxyAuth()\n\tif err != nil {\n\t\tif fVerbose {\n\t\t\tlog.Printf(\"Invalid or no proxy auth credentials\")\n\t\t}\n\t}\n\n\t\/\/ Wondering whether to move to the Functional options pattern\n\t\/\/ cf. https:\/\/dave.cheney.net\/2016\/11\/13\/do-not-fear-first-class-functions\n\tclient, err = atlas.NewClient(atlas.Config{\n\t\tAPIKey: mycnf.APIKey,\n\t\tDefaultProbe: mycnf.DefaultProbe,\n\t\tPoolSize: mycnf.PoolSize,\n\t\tProxyAuth: auth,\n\t\tVerbose: fVerbose,\n\t})\n\n\t\/\/ No need to continue if this fails\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating the Atlas client: %v\", err)\n\t}\n\n\tif fWantMine {\n\t\tclient.SetOption(\"mine\", \"true\")\n\t}\n\n\tif fWant4 {\n\t\tmycnf.WantAF = Want4\n\t}\n\n\tif fWant6 {\n\t\tmycnf.WantAF = Want6\n\t}\n\n\t\/\/ Both are fine\n\tif fWant4 && fWant6 {\n\t\tmycnf.WantAF = WantBoth\n\t}\n\n\t\/\/ So is neither — common case\n\tif !fWant4 && !fWant6 {\n\t\tmycnf.WantAF = WantBoth\n\t}\n\n\t\/\/ Set global options\n\t\/\/client.SetAF(mycnf.WantAF)\n\treturn nil\n}\n\n\/\/ main is the starting point (and everything)\nfunc main() {\n\tcli.VersionFlag = cli.BoolFlag{Name: \"version, V\"}\n\n\tcli.VersionPrinter = func(c *cli.Context) {\n\t\tlog.Printf(\"API wrapper: %s Atlas API: %s\\n\", c.App.Version, atlas.GetVersion())\n\t}\n\n\tapp := cli.NewApp()\n\tapp.Name = \"atlas\"\n\tapp.Usage = \"RIPE Atlas CLI interface\"\n\tapp.Author = \"Ollivier Robert <roberto@keltia.net>\"\n\tapp.Version = atlasVersion\n\t\/\/app.HideVersion = true\n\n\t\/\/ General flags\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"format,f\",\n\t\t\tUsage: \"specify output format\",\n\t\t\tDestination: &fFormat,\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"debug,D\",\n\t\t\tUsage: \"debug mode\",\n\t\t\tDestination: &fDebug,\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"verbose,v\",\n\t\t\tUsage: \"verbose mode\",\n\t\t\tDestination: &fVerbose,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"fields,F\",\n\t\t\tUsage: \"specify which fields are wanted\",\n\t\t\tDestination: &fFieldList,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"include,I\",\n\t\t\tUsage: \"specify whether objects should be expanded\",\n\t\t\tDestination: &fInclude,\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"mine,M\",\n\t\t\tUsage: \"limit output to my objects\",\n\t\t\tDestination: &fWantMine,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"opt-fields,O\",\n\t\t\tUsage: \"specify which optional fields are wanted\",\n\t\t\tDestination: &fOptFields,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"page-size,P\",\n\t\t\tUsage: \"page size for results\",\n\t\t\tDestination: &fPageSize,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"sort,S\",\n\t\t\tUsage: \"sort results\",\n\t\t\tDestination: &fSortOrder,\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"6, ipv6\",\n\t\t\tUsage: \"Only IPv6\",\n\t\t\tDestination: &fWant6,\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"4, ipv4\",\n\t\t\tUsage: \"Only IPv4\",\n\t\t\tDestination: &fWant4,\n\t\t},\n\t}\n\n\t\/\/ Ensure -4 & -6 are treated properly & initialization is done\n\tapp.Before = finalcheck\n\n\tsort.Sort(ByAlphabet(cliCommands))\n\tapp.Commands = cliCommands\n\tapp.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\n\/\/ #cgo windows LDFLAGS: -Wl,--allow-multiple-definition -static\nimport \"C\"\n\nvar (\n\tversion = \"head\" \/\/ set by command-line on CI release builds\n\tapp = kingpin.New(\"butler\", \"Your very own itch.io helper\")\n\n\tdlCmd = app.Command(\"dl\", \"Download a file (resumes if can, checks hashes)\")\n\n\tpushCmd = app.Command(\"push\", \"Upload a new version of something to itch.io\")\n)\n\nvar appArgs = struct {\n\tjson *bool\n\tquiet *bool\n\ttimestamps *bool\n}{\n\tapp.Flag(\"json\", \"Enable machine-readable JSON-lines output\").Short('j').Bool(),\n\tapp.Flag(\"quiet\", \"Hide progress indicators & other extra info\").Short('q').Bool(),\n\tapp.Flag(\"timestamps\", \"Prefix all output by timestamps (for logging purposes)\").Bool(),\n}\n\nvar dlArgs = struct {\n\turl *string\n\tdest *string\n}{\n\tdlCmd.Arg(\"url\", \"Address to download from\").Required().String(),\n\tdlCmd.Arg(\"dest\", \"File to write downloaded data to\").Required().String(),\n}\n\nvar pushArgs = struct {\n\tidentity *string\n\taddress *string\n\tsrc *string\n\trepo *string\n}{\n\tpushCmd.Flag(\"identity\", \"Path to the private key used for public key authentication.\").Default(fmt.Sprintf(\"%s\/%s\", os.Getenv(\"HOME\"), \".ssh\/id_rsa\")).Short('i').ExistingFile(),\n\tpushCmd.Flag(\"address\", \"Specify wharf address (advanced)\").Default(\"wharf.itch.zone\").Short('a').Hidden().String(),\n\tpushCmd.Arg(\"src\", \"Directory or zip archive to upload, e.g.\").Required().ExistingFileOrDir(),\n\tpushCmd.Arg(\"repo\", \"Repository to push to, e.g. leafo\/xmoon:win64\").Required().String(),\n}\n\nfunc main() {\n\tapp.HelpFlag.Short('h')\n\tapp.Version(version)\n\tapp.VersionFlag.Short('V')\n\n\tcmd, err := app.Parse(os.Args[1:])\n\tif *appArgs.timestamps {\n\t\tlog.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)\n\t} else {\n\t\tlog.SetFlags(0)\n\t}\n\n\tswitch kingpin.MustParse(cmd, err) {\n\tcase dlCmd.FullCommand():\n\t\tdl(*dlArgs.url, *dlArgs.dest)\n\n\tcase pushCmd.FullCommand():\n\t\tpush(*pushArgs.src, *pushArgs.repo)\n\t}\n}\n<commit_msg>trying to push something to master<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\n\/\/ #cgo windows LDFLAGS: -Wl,--allow-multiple-definition -static\nimport \"C\"\n\nvar (\n\tversion = \"head\" \/\/ set by command-line on CI release builds\n\tapp = kingpin.New(\"butler\", \"Your very own itch.io helper\")\n\n\tdlCmd = app.Command(\"dl\", \"Download a file (resumes if can, checks hashes)\")\n\n\tpushCmd = app.Command(\"push\", \"Upload a new version of something to itch.io\")\n)\n\nvar appArgs = struct {\n\tjson *bool\n\tquiet *bool\n\ttimestamps *bool\n}{\n\tapp.Flag(\"json\", \"Enable machine-readable JSON-lines output\").Short('j').Bool(),\n\tapp.Flag(\"quiet\", \"Hide progress indicators & other extra info\").Short('q').Bool(),\n\tapp.Flag(\"timestamps\", \"Prefix all output by timestamps (for logging purposes)\").Bool(),\n}\n\nvar dlArgs = struct {\n\turl *string\n\tdest *string\n}{\n\tdlCmd.Arg(\"url\", \"Address to download from\").Required().String(),\n\tdlCmd.Arg(\"dest\", \"File to write downloaded data to\").Required().String(),\n}\n\nvar pushArgs = struct {\n\tidentity *string\n\taddress *string\n\tsrc *string\n\trepo *string\n}{\n\tpushCmd.Flag(\"identity\", \"Path to the private key used for public key authentication.\").Default(fmt.Sprintf(\"%s\/%s\", os.Getenv(\"HOME\"), \".ssh\/id_rsa\")).Short('i').ExistingFile(),\n\tpushCmd.Flag(\"address\", \"Specify wharf address (advanced)\").Default(\"wharf.itch.zone\").Short('a').Hidden().String(),\n\tpushCmd.Arg(\"src\", \"Directory or zip archive to upload, e.g.\").Required().ExistingFileOrDir(),\n\tpushCmd.Arg(\"repo\", \"Repository to push to, e.g. leafo\/xmoon:win64\").Required().String(),\n}\n\nfunc main() {\n\tapp.HelpFlag.Short('h')\n\tapp.Version(fmt.Sprintf(\"definitely version %s\", version))\n\tapp.VersionFlag.Short('V')\n\n\tcmd, err := app.Parse(os.Args[1:])\n\tif *appArgs.timestamps {\n\t\tlog.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)\n\t} else {\n\t\tlog.SetFlags(0)\n\t}\n\n\tswitch kingpin.MustParse(cmd, err) {\n\tcase dlCmd.FullCommand():\n\t\tdl(*dlArgs.url, *dlArgs.dest)\n\n\tcase pushCmd.FullCommand():\n\t\tpush(*pushArgs.src, *pushArgs.repo)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"fyne.io\/fyne\"\n\t\"fyne.io\/fyne\/app\"\n\t\"fyne.io\/fyne\/dialog\"\n\t\"fyne.io\/fyne\/theme\"\n\t\"fyne.io\/fyne\/widget\"\n\t\"github.com\/nange\/easyss\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nvar (\n\tss *easyss.Easyss\n\tmu sync.Mutex\n\tesDir string\n\tlogFile string\n\tlogF *os.File\n)\n\nconst (\n\tconfFilename = \"config.json\"\n\tlogFilename = \"easyss.log\"\n)\n\nfunc main() {\n\ta := app.New()\n\tw := a.NewWindow(\"Easyss\")\n\tw.SetFullScreen(false)\n\tw.SetContent(widget.NewVBox(\n\t\tnewMainForm(w),\n\t))\n\ta.Settings().SetTheme(theme.DarkTheme())\n\n\tw.ShowAndRun()\n}\n\nfunc newMainForm(w fyne.Window) *widget.Form {\n\tconf, err := loadConfFromFile(filepath.Join(esDir, confFilename))\n\tif err != nil {\n\t\tlog.Warnf(\"load config from file err:%v\", err)\n\t}\n\n\tserver := widget.NewEntry()\n\tserver.SetPlaceHolder(\"server host addr\")\n\tserver.SetText(conf.Server)\n\n\tserverPort := widget.NewEntry()\n\tserverPort.SetPlaceHolder(\"server port\")\n\tserverPort.SetText(fmt.Sprint(conf.ServerPort))\n\n\tlocalPort := widget.NewEntry()\n\tlocalPort.SetPlaceHolder(\"local server port\")\n\tlocalPort.SetText(\"2080\")\n\tif conf.LocalPort != 0 {\n\t\tlocalPort.SetText(fmt.Sprint(conf.LocalPort))\n\t}\n\n\tpassword := widget.NewPasswordEntry()\n\tpassword.SetPlaceHolder(\"password\")\n\tpassword.SetText(conf.Password)\n\n\tmethod := widget.NewEntry()\n\tmethod.SetPlaceHolder(\"aes-256-gcm, chacha20-poly1305\")\n\tmethod.SetText(\"chacha20-poly1305\")\n\tif conf.Method != \"\" {\n\t\tmethod.SetText(conf.Method)\n\t}\n\n\ttimeout := widget.NewEntry()\n\ttimeout.SetPlaceHolder(\"timeout, default 60s\")\n\ttimeout.SetText(\"60\")\n\tif conf.Timeout != 0 {\n\t\ttimeout.SetText(fmt.Sprint(conf.Timeout))\n\t}\n\n\tform := &widget.Form{\n\t\tItems: []*widget.FormItem{\n\t\t\t{Text: \"Server\", Widget: server},\n\t\t\t{Text: \"Server Port\", Widget: serverPort},\n\t\t\t{Text: \"Local Port\", Widget: localPort},\n\t\t\t{Text: \"Password\", Widget: password},\n\t\t\t{Text: \"Method\", Widget: method},\n\t\t\t{Text: \"Timeout\", Widget: timeout},\n\t\t},\n\t\tOnCancel: func() {\n\t\t\tcnf := dialog.NewConfirm(\"Stop\", \"Continue to stop?\", func(b bool) {\n\t\t\t\tmu.Lock()\n\t\t\t\tdefer mu.Unlock()\n\t\t\t\tif b && ss != nil {\n\t\t\t\t\tss.Close()\n\t\t\t\t\tss = nil\n\t\t\t\t\tserver.Enable()\n\t\t\t\t\tserverPort.Enable()\n\t\t\t\t\tlocalPort.Enable()\n\t\t\t\t\tpassword.Enable()\n\t\t\t\t\tmethod.Enable()\n\t\t\t\t\ttimeout.Enable()\n\t\t\t\t\tshowInfo(w, \"Easyss stoped!\")\n\t\t\t\t}\n\t\t\t\tif logF != nil {\n\t\t\t\t\tlogF.Close()\n\t\t\t\t}\n\t\t\t}, w)\n\t\t\tcnf.SetDismissText(\"No\")\n\t\t\tcnf.SetConfirmText(\"Yes\")\n\t\t\tcnf.Show()\n\t\t},\n\t\tOnSubmit: func() {\n\t\t\tmu.Lock()\n\t\t\tdefer mu.Unlock()\n\t\t\tif ss != nil {\n\t\t\t\tb := make([]byte, 64)\n\t\t\t\t_, err := logF.ReadAt(b, 0)\n\t\t\t\tif err != nil {\n\t\t\t\t\tshowErrorInfo(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tshowInfo(w, string(b))\n\t\t\t\tshowInfo(w, \"Easyss already started!\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar err error\n\t\t\tif logFile == \"\" {\n\t\t\t\tfilesDir := os.Getenv(\"FILESDIR\")\n\t\t\t\tesDir = filepath.Join(filesDir, \"easyss\")\n\t\t\t\tif err = os.MkdirAll(esDir, 0777); err != nil {\n\t\t\t\t\tshowErrorInfo(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlogFile = filepath.Join(esDir, logFilename)\n\t\t\t\tlogF, err = os.OpenFile(logFile, os.O_RDWR|os.O_CREATE, 0666)\n\t\t\t\tif err != nil {\n\t\t\t\t\tshowErrorInfo(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tlog.SetOutput(logF)\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tif err != nil {\n\t\t\t\t\tshowErrorInfo(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tshowInfo(w, \"Easyss started!\")\n\t\t\t\tserver.Disable()\n\t\t\t\tserverPort.Disable()\n\t\t\t\tlocalPort.Disable()\n\t\t\t\tpassword.Disable()\n\t\t\t\tmethod.Disable()\n\t\t\t\ttimeout.Disable()\n\t\t\t}()\n\n\t\t\tprog := showProcess(w)\n\t\t\tdefer prog.Hide()\n\n\t\t\tserverPortInt, err := strconv.ParseInt(serverPort.Text, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"server port is invalid\")\n\t\t\t\tshowErrorInfo(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlocalPortInt, err := strconv.ParseInt(localPort.Text, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"local port is invalid\")\n\t\t\t\tshowErrorInfo(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttimeoutInt, err := strconv.ParseInt(timeout.Text, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"timeout is invalid\")\n\t\t\t\tshowErrorInfo(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconf := &easyss.Config{\n\t\t\t\tServer: server.Text,\n\t\t\t\tServerPort: int(serverPortInt),\n\t\t\t\tLocalPort: int(localPortInt),\n\t\t\t\tPassword: password.Text,\n\t\t\t\tMethod: method.Text,\n\t\t\t\tTimeout: int(timeoutInt),\n\t\t\t}\n\t\t\tss, err = easyss.New(conf)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"new easyss:%v\", err)\n\t\t\t\tshowErrorInfo(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := StartEasyss(ss); err != nil {\n\t\t\t\tlog.Errorf(\"start easyss failed:%v\", err)\n\t\t\t\tshowErrorInfo(w, err)\n\t\t\t}\n\t\t\tif err := writeConfToFile(filepath.Join(esDir, confFilename), *conf); err != nil {\n\t\t\t\tlog.Errorf(\"write config to file err:%v\", err)\n\t\t\t\tshowErrorInfo(w, err)\n\t\t\t}\n\t\t},\n\t}\n\tform.CancelText = \"Stop\"\n\tform.SubmitText = \"Start\"\n\n\treturn form\n}\n\nfunc StartEasyss(ss *easyss.Easyss) error {\n\tlog.Infof(\"on mobile arch, we should ignore systray\")\n\n\tif err := ss.InitTcpPool(); err != nil {\n\t\tlog.Errorf(\"init tcp pool error:%v\", err)\n\t\treturn err\n\t}\n\n\tgo ss.Local() \/\/ start local server\n\tgo ss.HttpLocal() \/\/ start http proxy server\n\n\treturn nil\n}\n\nfunc showProcess(w fyne.Window) *dialog.ProgressDialog {\n\tprog := dialog.NewProgress(\"Processing\", \"Starting...\", w)\n\n\tgo func() {\n\t\tnum := 0.0\n\t\tfor num < 1.0 {\n\t\t\ttime.Sleep(20 * time.Millisecond)\n\t\t\tprog.SetValue(num)\n\t\t\tnum += 0.01\n\t\t}\n\n\t\tprog.SetValue(1)\n\t}()\n\n\tprog.Show()\n\treturn prog\n}\n\nfunc showErrorInfo(w fyne.Window, err error) {\n\tdialog.ShowError(err, w)\n}\n\nfunc showInfo(w fyne.Window, info string) {\n\tdialog.ShowInformation(\"Info\", info, w)\n}\n\nfunc loadConfFromFile(file string) (easyss.Config, error) {\n\tf, err := os.Open(file)\n\tif err != nil {\n\t\treturn easyss.Config{}, err\n\t}\n\tdefer f.Close()\n\n\tvar conf easyss.Config\n\tif err := json.NewDecoder(f).Decode(&conf); err != nil {\n\t\treturn easyss.Config{}, err\n\t}\n\treturn conf, nil\n}\n\nfunc writeConfToFile(file string, conf easyss.Config) error {\n\tb, err := json.Marshal(conf)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(file, b, 0666)\n}\n<commit_msg>cmd\/easyss: fix reloading config form file<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"fyne.io\/fyne\"\n\t\"fyne.io\/fyne\/app\"\n\t\"fyne.io\/fyne\/dialog\"\n\t\"fyne.io\/fyne\/theme\"\n\t\"fyne.io\/fyne\/widget\"\n\t\"github.com\/nange\/easyss\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nvar (\n\tss *easyss.Easyss\n\tmu sync.Mutex\n\tesDir string\n\tlogFile string\n\tlogF *os.File\n)\n\nconst (\n\tconfFilename = \"config.json\"\n\tlogFilename = \"easyss.log\"\n)\n\nfunc main() {\n\ta := app.New()\n\tw := a.NewWindow(\"Easyss\")\n\tw.SetFullScreen(false)\n\tw.SetContent(widget.NewVBox(\n\t\tnewMainForm(w),\n\t))\n\ta.Settings().SetTheme(theme.DarkTheme())\n\n\tw.ShowAndRun()\n}\n\nfunc newMainForm(w fyne.Window) *widget.Form {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\n\tfilesDir := os.Getenv(\"FILESDIR\")\n\tesDir = filepath.Join(filesDir, \"easyss\")\n\tif logFile == \"\" {\n\t\tvar err error\n\t\tif err := os.MkdirAll(esDir, 0777); err != nil {\n\t\t\tshowErrorInfo(w, err)\n\t\t}\n\n\t\tlogFile = filepath.Join(esDir, logFilename)\n\t\tlogF, err = os.OpenFile(logFile, os.O_RDWR|os.O_CREATE, 0666)\n\t\tif err != nil {\n\t\t\tshowErrorInfo(w, err)\n\t\t}\n\t\tlog.SetOutput(logF)\n\t}\n\n\tconf, err := loadConfFromFile(filepath.Join(esDir, confFilename))\n\tif err != nil {\n\t\tlog.Warnf(\"load config from file err:%v\", err)\n\t\tshowErrorInfo(w, err)\n\t}\n\n\tserver := widget.NewEntry()\n\tserver.SetPlaceHolder(\"server host addr\")\n\tserver.SetText(conf.Server)\n\n\tserverPort := widget.NewEntry()\n\tserverPort.SetPlaceHolder(\"server port\")\n\tserverPort.SetText(\"9999\")\n\tif conf.ServerPort != 0 {\n\t\tserverPort.SetText(fmt.Sprint(conf.ServerPort))\n\t}\n\n\tlocalPort := widget.NewEntry()\n\tlocalPort.SetPlaceHolder(\"local server port\")\n\tlocalPort.SetText(\"2080\")\n\tif conf.LocalPort != 0 {\n\t\tlocalPort.SetText(fmt.Sprint(conf.LocalPort))\n\t}\n\n\tpassword := widget.NewPasswordEntry()\n\tpassword.SetPlaceHolder(\"password\")\n\tpassword.SetText(conf.Password)\n\n\tmethod := widget.NewEntry()\n\tmethod.SetPlaceHolder(\"aes-256-gcm, chacha20-poly1305\")\n\tmethod.SetText(\"chacha20-poly1305\")\n\tif conf.Method != \"\" {\n\t\tmethod.SetText(conf.Method)\n\t}\n\n\ttimeout := widget.NewEntry()\n\ttimeout.SetPlaceHolder(\"timeout, default 60s\")\n\ttimeout.SetText(\"60\")\n\tif conf.Timeout != 0 {\n\t\ttimeout.SetText(fmt.Sprint(conf.Timeout))\n\t}\n\n\tform := &widget.Form{\n\t\tItems: []*widget.FormItem{\n\t\t\t{Text: \"Server\", Widget: server},\n\t\t\t{Text: \"Server Port\", Widget: serverPort},\n\t\t\t{Text: \"Local Port\", Widget: localPort},\n\t\t\t{Text: \"Password\", Widget: password},\n\t\t\t{Text: \"Method\", Widget: method},\n\t\t\t{Text: \"Timeout\", Widget: timeout},\n\t\t},\n\t\tOnCancel: func() {\n\t\t\tcnf := dialog.NewConfirm(\"Stop\", \"Continue to stop?\", func(b bool) {\n\t\t\t\tmu.Lock()\n\t\t\t\tdefer mu.Unlock()\n\t\t\t\tif b && ss != nil {\n\t\t\t\t\tss.Close()\n\t\t\t\t\tss = nil\n\t\t\t\t\tserver.Enable()\n\t\t\t\t\tserverPort.Enable()\n\t\t\t\t\tlocalPort.Enable()\n\t\t\t\t\tpassword.Enable()\n\t\t\t\t\tmethod.Enable()\n\t\t\t\t\ttimeout.Enable()\n\t\t\t\t\tshowInfo(w, \"Easyss stoped!\")\n\t\t\t\t}\n\t\t\t\tif logF != nil {\n\t\t\t\t\tlogF.Close()\n\t\t\t\t}\n\t\t\t}, w)\n\t\t\tcnf.SetDismissText(\"No\")\n\t\t\tcnf.SetConfirmText(\"Yes\")\n\t\t\tcnf.Show()\n\t\t},\n\t\tOnSubmit: func() {\n\t\t\tmu.Lock()\n\t\t\tdefer mu.Unlock()\n\t\t\tif ss != nil {\n\t\t\t\tshowInfo(w, \"Easyss already started!\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar err error\n\t\t\tdefer func() {\n\t\t\t\tif err != nil {\n\t\t\t\t\tshowErrorInfo(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tshowInfo(w, \"Easyss started!\")\n\t\t\t\tserver.Disable()\n\t\t\t\tserverPort.Disable()\n\t\t\t\tlocalPort.Disable()\n\t\t\t\tpassword.Disable()\n\t\t\t\tmethod.Disable()\n\t\t\t\ttimeout.Disable()\n\t\t\t}()\n\n\t\t\tprog := showProcess(w)\n\t\t\tdefer prog.Hide()\n\n\t\t\tserverPortInt, err := strconv.ParseInt(serverPort.Text, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"server port is invalid\")\n\t\t\t\tshowErrorInfo(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlocalPortInt, err := strconv.ParseInt(localPort.Text, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"local port is invalid\")\n\t\t\t\tshowErrorInfo(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttimeoutInt, err := strconv.ParseInt(timeout.Text, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"timeout is invalid\")\n\t\t\t\tshowErrorInfo(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconf := &easyss.Config{\n\t\t\t\tServer: server.Text,\n\t\t\t\tServerPort: int(serverPortInt),\n\t\t\t\tLocalPort: int(localPortInt),\n\t\t\t\tPassword: password.Text,\n\t\t\t\tMethod: method.Text,\n\t\t\t\tTimeout: int(timeoutInt),\n\t\t\t}\n\t\t\tss, err = easyss.New(conf)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"new easyss:%v\", err)\n\t\t\t\tshowErrorInfo(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := StartEasyss(ss); err != nil {\n\t\t\t\tlog.Errorf(\"start easyss failed:%v\", err)\n\t\t\t\tshowErrorInfo(w, err)\n\t\t\t}\n\t\t\tif err := writeConfToFile(filepath.Join(esDir, confFilename), *conf); err != nil {\n\t\t\t\tlog.Errorf(\"write config to file err:%v\", err)\n\t\t\t\tshowErrorInfo(w, err)\n\t\t\t}\n\t\t},\n\t}\n\tform.CancelText = \"Stop\"\n\tform.SubmitText = \"Start\"\n\n\treturn form\n}\n\nfunc StartEasyss(ss *easyss.Easyss) error {\n\tlog.Infof(\"on mobile arch, we should ignore systray\")\n\n\tif err := ss.InitTcpPool(); err != nil {\n\t\tlog.Errorf(\"init tcp pool error:%v\", err)\n\t\treturn err\n\t}\n\n\tgo ss.Local() \/\/ start local server\n\tgo ss.HttpLocal() \/\/ start http proxy server\n\n\treturn nil\n}\n\nfunc showProcess(w fyne.Window) *dialog.ProgressDialog {\n\tprog := dialog.NewProgress(\"Processing\", \"Starting...\", w)\n\n\tgo func() {\n\t\tnum := 0.0\n\t\tfor num < 1.0 {\n\t\t\ttime.Sleep(20 * time.Millisecond)\n\t\t\tprog.SetValue(num)\n\t\t\tnum += 0.01\n\t\t}\n\n\t\tprog.SetValue(1)\n\t}()\n\n\tprog.Show()\n\treturn prog\n}\n\nfunc showErrorInfo(w fyne.Window, err error) {\n\tdialog.ShowError(err, w)\n}\n\nfunc showInfo(w fyne.Window, info string) {\n\tdialog.ShowInformation(\"Info\", info, w)\n}\n\nfunc loadConfFromFile(file string) (easyss.Config, error) {\n\tf, err := os.Open(file)\n\tif err != nil {\n\t\treturn easyss.Config{}, err\n\t}\n\tdefer f.Close()\n\n\tvar conf easyss.Config\n\tif err := json.NewDecoder(f).Decode(&conf); err != nil {\n\t\treturn easyss.Config{}, err\n\t}\n\treturn conf, nil\n}\n\nfunc writeConfToFile(file string, conf easyss.Config) error {\n\tb, err := json.Marshal(conf)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(file, b, 0666)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"code.google.com\/p\/go.tools\/go\/types\"\n)\n\nvar (\n\tsource = flag.String(\"s\", \"\", \"only consider packages from src, where src is one of the supported compilers\")\n\tverbose = flag.Bool(\"v\", false, \"verbose mode\")\n)\n\n\/\/ lists of registered sources and corresponding importers\nvar (\n\tsources []string\n\timporters []types.Importer\n\timportFailed = errors.New(\"import failed\")\n)\n\n\/\/ map of imported packages\nvar packages = make(map[string]*types.Package)\n\nfunc usage() {\n\tfmt.Fprintln(os.Stderr, \"usage: godex [flags] {path|qualifiedIdent}\")\n\tflag.PrintDefaults()\n\tos.Exit(2)\n}\n\nfunc report(msg string) {\n\tfmt.Fprintln(os.Stderr, \"error: \"+msg)\n\tos.Exit(2)\n}\n\nfunc main() {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\tif flag.NArg() == 0 {\n\t\treport(\"no package name, path, or file provided\")\n\t}\n\n\timp := tryImports\n\tif *source != \"\" {\n\t\timp = lookup(*source)\n\t\tif imp == nil {\n\t\t\treport(\"source (-s argument) must be one of: \" + strings.Join(sources, \", \"))\n\t\t}\n\t}\n\n\tfor _, arg := range flag.Args() {\n\t\tpath, name := splitPathIdent(arg)\n\t\tlogf(\"\\tprocessing %q: path = %q, name = %s\\n\", arg, path, name)\n\n\t\t\/\/ generate possible package path prefixes\n\t\t\/\/ (at the moment we do this for each argument - should probably cache the generated prefixes)\n\t\tprefixes := make(chan string)\n\t\tgo genPrefixes(prefixes, !filepath.IsAbs(path) && !build.IsLocalImport(path))\n\n\t\t\/\/ import package\n\t\tpkg, err := tryPrefixes(packages, prefixes, path, imp)\n\t\tif err != nil {\n\t\t\tlogf(\"\\t=> ignoring %q: %s\\n\", path, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ filter objects if needed\n\t\tvar filter func(types.Object) bool\n\t\tif name != \"\" {\n\t\t\tfilter = func(obj types.Object) bool {\n\t\t\t\t\/\/ TODO(gri) perhaps use regular expression matching here?\n\t\t\t\treturn obj.Name() == name\n\t\t\t}\n\t\t}\n\n\t\t\/\/ print contents\n\t\tprint(os.Stdout, pkg, filter)\n\t}\n}\n\nfunc logf(format string, args ...interface{}) {\n\tif *verbose {\n\t\tfmt.Fprintf(os.Stderr, format, args...)\n\t}\n}\n\n\/\/ splitPathIdent splits a path.name argument into its components.\n\/\/ All but the last path element may contain dots.\nfunc splitPathIdent(arg string) (path, name string) {\n\tif i := strings.LastIndex(arg, \".\"); i >= 0 {\n\t\tif j := strings.LastIndex(arg, \"\/\"); j < i {\n\t\t\t\/\/ '.' is not part of path\n\t\t\tpath = arg[:i]\n\t\t\tname = arg[i+1:]\n\t\t\treturn\n\t\t}\n\t}\n\tpath = arg\n\treturn\n}\n\n\/\/ tryPrefixes tries to import the package given by (the possibly partial) path using the given importer imp\n\/\/ by prepending all possible prefixes to path. It returns with the first package that it could import, or\n\/\/ with an error.\nfunc tryPrefixes(packages map[string]*types.Package, prefixes chan string, path string, imp types.Importer) (pkg *types.Package, err error) {\n\tfor prefix := range prefixes {\n\t\tlogf(\"\\ttrying prefix %q\\n\", prefix)\n\t\tprepath := filepath.Join(prefix, path)\n\t\tpkg, err = imp(packages, prepath)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tlogf(\"\\t=> importing %q failed: %s\\n\", prepath, err)\n\t}\n\treturn\n}\n\n\/\/ tryImports is an importer that tries all registered importers\n\/\/ successively until one of them succeeds or all of them failed.\nfunc tryImports(packages map[string]*types.Package, path string) (pkg *types.Package, err error) {\n\tfor i, imp := range importers {\n\t\tlogf(\"\\t\\ttrying %s import\\n\", sources[i])\n\t\tpkg, err = imp(packages, path)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tlogf(\"\\t\\t=> %s import failed: %s\\n\", sources[i], err)\n\t}\n\treturn\n}\n\n\/\/ protect protects an importer imp from panics and returns the protected importer.\nfunc protect(imp types.Importer) types.Importer {\n\treturn func(packages map[string]*types.Package, path string) (pkg *types.Package, err error) {\n\t\tdefer func() {\n\t\t\tif recover() != nil {\n\t\t\t\tpkg = nil\n\t\t\t\terr = importFailed\n\t\t\t}\n\t\t}()\n\t\treturn imp(packages, path)\n\t}\n}\n\n\/\/ register registers an importer imp for a given source src.\nfunc register(src string, imp types.Importer) {\n\tif lookup(src) != nil {\n\t\tpanic(src + \" importer already registered\")\n\t}\n\tsources = append(sources, src)\n\timporters = append(importers, protect(imp))\n}\n\n\/\/ lookup returns the importer imp for a given source src.\nfunc lookup(src string) types.Importer {\n\tfor i, s := range sources {\n\t\tif s == src {\n\t\t\treturn importers[i]\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc genPrefixes(out chan string, all bool) {\n\tout <- \"\"\n\tif all {\n\t\tplatform := build.Default.GOOS + \"_\" + build.Default.GOARCH\n\t\tdirnames := append([]string{build.Default.GOROOT}, filepath.SplitList(build.Default.GOPATH)...)\n\t\tfor _, dirname := range dirnames {\n\t\t\twalkDir(filepath.Join(dirname, \"pkg\", platform), \"\", out)\n\t\t}\n\t}\n\tclose(out)\n}\n\nfunc walkDir(dirname, prefix string, out chan string) {\n\tfiList, err := ioutil.ReadDir(dirname)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, fi := range fiList {\n\t\tif fi.IsDir() && !strings.HasPrefix(fi.Name(), \".\") {\n\t\t\tprefix := filepath.Join(prefix, fi.Name())\n\t\t\tout <- prefix\n\t\t\twalkDir(filepath.Join(dirname, fi.Name()), prefix, out)\n\t\t}\n\t}\n}\n<commit_msg>go.tools\/cmd\/godex: make relative package paths work<commit_after>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"code.google.com\/p\/go.tools\/go\/types\"\n)\n\nvar (\n\tsource = flag.String(\"s\", \"\", \"only consider packages from src, where src is one of the supported compilers\")\n\tverbose = flag.Bool(\"v\", false, \"verbose mode\")\n)\n\n\/\/ lists of registered sources and corresponding importers\nvar (\n\tsources []string\n\timporters []types.Importer\n\timportFailed = errors.New(\"import failed\")\n)\n\n\/\/ map of imported packages\nvar packages = make(map[string]*types.Package)\n\nfunc usage() {\n\tfmt.Fprintln(os.Stderr, \"usage: godex [flags] {path|qualifiedIdent}\")\n\tflag.PrintDefaults()\n\tos.Exit(2)\n}\n\nfunc report(msg string) {\n\tfmt.Fprintln(os.Stderr, \"error: \"+msg)\n\tos.Exit(2)\n}\n\nfunc main() {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\tif flag.NArg() == 0 {\n\t\treport(\"no package name, path, or file provided\")\n\t}\n\n\timp := tryImports\n\tif *source != \"\" {\n\t\timp = lookup(*source)\n\t\tif imp == nil {\n\t\t\treport(\"source (-s argument) must be one of: \" + strings.Join(sources, \", \"))\n\t\t}\n\t}\n\n\tfor _, arg := range flag.Args() {\n\t\tpath, name := splitPathIdent(arg)\n\t\tlogf(\"\\tprocessing %q: path = %q, name = %s\\n\", arg, path, name)\n\n\t\t\/\/ generate possible package path prefixes\n\t\t\/\/ (at the moment we do this for each argument - should probably cache the generated prefixes)\n\t\tprefixes := make(chan string)\n\t\tgo genPrefixes(prefixes, !filepath.IsAbs(path) && !build.IsLocalImport(path))\n\n\t\t\/\/ import package\n\t\tpkg, err := tryPrefixes(packages, prefixes, path, imp)\n\t\tif err != nil {\n\t\t\tlogf(\"\\t=> ignoring %q: %s\\n\", path, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ filter objects if needed\n\t\tvar filter func(types.Object) bool\n\t\tif name != \"\" {\n\t\t\tfilter = func(obj types.Object) bool {\n\t\t\t\t\/\/ TODO(gri) perhaps use regular expression matching here?\n\t\t\t\treturn obj.Name() == name\n\t\t\t}\n\t\t}\n\n\t\t\/\/ print contents\n\t\tprint(os.Stdout, pkg, filter)\n\t}\n}\n\nfunc logf(format string, args ...interface{}) {\n\tif *verbose {\n\t\tfmt.Fprintf(os.Stderr, format, args...)\n\t}\n}\n\n\/\/ splitPathIdent splits a path.name argument into its components.\n\/\/ All but the last path element may contain dots.\nfunc splitPathIdent(arg string) (path, name string) {\n\tif i := strings.LastIndex(arg, \".\"); i >= 0 {\n\t\tif j := strings.LastIndex(arg, \"\/\"); j < i {\n\t\t\t\/\/ '.' is not part of path\n\t\t\tpath = arg[:i]\n\t\t\tname = arg[i+1:]\n\t\t\treturn\n\t\t}\n\t}\n\tpath = arg\n\treturn\n}\n\n\/\/ tryPrefixes tries to import the package given by (the possibly partial) path using the given importer imp\n\/\/ by prepending all possible prefixes to path. It returns with the first package that it could import, or\n\/\/ with an error.\nfunc tryPrefixes(packages map[string]*types.Package, prefixes chan string, path string, imp types.Importer) (pkg *types.Package, err error) {\n\tfor prefix := range prefixes {\n\t\tactual := path\n\t\tif prefix == \"\" {\n\t\t\t\/\/ don't use filepath.Join as it will sanitize the path and remove\n\t\t\t\/\/ a leading dot and then the path is not recognized as a relative\n\t\t\t\/\/ package path by the importers anymore\n\t\t\tlogf(\"\\ttrying no prefix\\n\")\n\t\t} else {\n\t\t\tactual = filepath.Join(prefix, path)\n\t\t\tlogf(\"\\ttrying prefix %q\\n\", prefix)\n\t\t}\n\t\tpkg, err = imp(packages, actual)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tlogf(\"\\t=> importing %q failed: %s\\n\", actual, err)\n\t}\n\treturn\n}\n\n\/\/ tryImports is an importer that tries all registered importers\n\/\/ successively until one of them succeeds or all of them failed.\nfunc tryImports(packages map[string]*types.Package, path string) (pkg *types.Package, err error) {\n\tfor i, imp := range importers {\n\t\tlogf(\"\\t\\ttrying %s import\\n\", sources[i])\n\t\tpkg, err = imp(packages, path)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tlogf(\"\\t\\t=> %s import failed: %s\\n\", sources[i], err)\n\t}\n\treturn\n}\n\n\/\/ protect protects an importer imp from panics and returns the protected importer.\nfunc protect(imp types.Importer) types.Importer {\n\treturn func(packages map[string]*types.Package, path string) (pkg *types.Package, err error) {\n\t\tdefer func() {\n\t\t\tif recover() != nil {\n\t\t\t\tpkg = nil\n\t\t\t\terr = importFailed\n\t\t\t}\n\t\t}()\n\t\treturn imp(packages, path)\n\t}\n}\n\n\/\/ register registers an importer imp for a given source src.\nfunc register(src string, imp types.Importer) {\n\tif lookup(src) != nil {\n\t\tpanic(src + \" importer already registered\")\n\t}\n\tsources = append(sources, src)\n\timporters = append(importers, protect(imp))\n}\n\n\/\/ lookup returns the importer imp for a given source src.\nfunc lookup(src string) types.Importer {\n\tfor i, s := range sources {\n\t\tif s == src {\n\t\t\treturn importers[i]\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc genPrefixes(out chan string, all bool) {\n\tout <- \"\"\n\tif all {\n\t\tplatform := build.Default.GOOS + \"_\" + build.Default.GOARCH\n\t\tdirnames := append([]string{build.Default.GOROOT}, filepath.SplitList(build.Default.GOPATH)...)\n\t\tfor _, dirname := range dirnames {\n\t\t\twalkDir(filepath.Join(dirname, \"pkg\", platform), \"\", out)\n\t\t}\n\t}\n\tclose(out)\n}\n\nfunc walkDir(dirname, prefix string, out chan string) {\n\tfiList, err := ioutil.ReadDir(dirname)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, fi := range fiList {\n\t\tif fi.IsDir() && !strings.HasPrefix(fi.Name(), \".\") {\n\t\t\tprefix := filepath.Join(prefix, fi.Name())\n\t\t\tout <- prefix\n\t\t\twalkDir(filepath.Join(dirname, fi.Name()), prefix, out)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/xml\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, `usage: issue [-p project] query\n\nIf query is a single number, prints the full history for the issue.\nOtherwise, prints a table of matching results.\nThe special query 'go1' is shorthand for 'Priority-Go1'.\n`)\n\tos.Exit(2)\n}\n\ntype Feed struct {\n\tEntry Entries `xml:\"entry\"`\n}\n\ntype Entry struct {\n\tID string `xml:\"id\"`\n\tTitle string `xml:\"title\"`\n\tPublished time.Time `xml:\"published\"`\n\tContent string `xml:\"content\"`\n\tUpdates []Update `xml:\"updates\"`\n}\n\ntype Update struct {\n\tSummary string `xml:\"summary\"`\n\tOwner string `xml:\"ownerUpdate\"`\n\tLabel string `xml:\"label\"`\n\tStatus string `xml:\"status\"`\n}\n\ntype Entries []Entry\n\nfunc (e Entries) Len() int { return len(e) }\nfunc (e Entries) Swap(i, j int) { e[i], e[j] = e[j], e[i] }\nfunc (e Entries) Less(i, j int) bool { return e[i].Title < e[j].Title }\n\nvar project = flag.String(\"p\", \"go\", \"code.google.com project identifier\")\nvar v = flag.Bool(\"v\", false, \"verbose\")\n\nfunc main() {\n\tflag.Usage = usage\n\tflag.Parse()\n\tif flag.NArg() != 1 {\n\t\tusage()\n\t}\n\n\tfull := false\n\tq := flag.Arg(0)\n\tn, _ := strconv.Atoi(q)\n\tif n != 0 {\n\t\tq = \"id:\" + q\n\t\tfull = true\n\t}\n\tif q == \"go1\" {\n\t\tq = \"label:Priority-Go1\"\n\t}\n\n\tlog.SetFlags(0)\n\n\tquery := url.Values{\n\t\t\"q\": {q},\n\t\t\"max-results\": {\"400\"},\n\t}\n\tif !full {\n\t\tquery[\"can\"] = []string{\"open\"}\n\t}\n\tu := \"https:\/\/code.google.com\/feeds\/issues\/p\/\" + *project + \"\/issues\/full?\" + query.Encode()\n\tif *v {\n\t\tlog.Print(u)\n\t}\n\tr, err := http.Get(u)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar feed Feed\n\tif err := xml.NewDecoder(r.Body).Decode(&feed); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tr.Body.Close()\n\n\tsort.Sort(feed.Entry)\n\tfor _, e := range feed.Entry {\n\t\tid := e.ID\n\t\tif i := strings.Index(id, \"id=\"); i >= 0 {\n\t\t\tid = id[:i+len(\"id=\")]\n\t\t}\n\t\tfmt.Printf(\"%s\\t%s\\n\", id, e.Title)\n\t\tif full {\n\t\t\tu := \"https:\/\/code.google.com\/feeds\/issues\/p\/\" + *project + \"\/issues\/\" + id + \"\/comments\/full\"\n\t\t\tif *v {\n\t\t\t\tlog.Print(u)\n\t\t\t}\n\t\t\tr, err := http.Get(u)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\tvar feed Feed\n\t\t\tif err := xml.NewDecoder(r.Body).Decode(&feed); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tr.Body.Close()\n\n\t\t\tfor _, e := range feed.Entry {\n\t\t\t\tfmt.Printf(\"\\n%s (%s)\\n\", e.Title, e.Published.Format(\"2006-01-02 15:04:05\"))\n\t\t\t\tfor _, up := range e.Updates {\n\t\t\t\t\tswitch {\n\t\t\t\t\tcase up.Summary != \"\":\n\t\t\t\t\t\tfmt.Printf(\"\\tSummary: %s\\n\", up.Summary)\n\t\t\t\t\tcase up.Owner != \"\":\n\t\t\t\t\t\tfmt.Printf(\"\\tOwner: %s\\n\", up.Owner)\n\t\t\t\t\tcase up.Status != \"\":\n\t\t\t\t\t\tfmt.Printf(\"\\tStatus: %s\\n\", up.Status)\n\t\t\t\t\tcase up.Label != \"\":\n\t\t\t\t\t\tfmt.Printf(\"\\tLabel: %s\\n\", up.Label)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif e.Content != \"\" {\n\t\t\t\t\tfmt.Printf(\"\\n\\t%s\\n\", wrap(e.Content, \"\\t\"))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc wrap(t string, prefix string) string {\n\tout := \"\"\n\tt = strings.Replace(t, \"\\r\\n\", \"\\n\", -1)\n\tlines := strings.Split(t, \"\\n\")\n\tfor i, line := range lines {\n\t\tif i > 0 {\n\t\t\tout += \"\\n\" + prefix\n\t\t}\n\t\ts := line\n\t\tfor len(s) > 70 {\n\t\t\ti := strings.LastIndex(s[:70], \" \")\n\t\t\tif i < 0 {\n\t\t\t\ti = 69\n\t\t\t}\n\t\t\ti++\n\t\t\tout += s[:i] + \"\\n\" + prefix\n\t\t\ts = s[i:]\n\t\t}\n\t\tout += s\n\t}\n\treturn out\n}\n<commit_msg>issue: more fixes<commit_after>package main\n\nimport (\n\t\"encoding\/xml\"\n\t\"flag\"\n\t\"fmt\"\n\t\"html\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, `usage: issue [-p project] query\n\nIf query is a single number, prints the full history for the issue.\nOtherwise, prints a table of matching results.\nThe special query 'go1' is shorthand for 'Priority-Go1'.\n`)\n\tos.Exit(2)\n}\n\ntype Feed struct {\n\tEntry Entries `xml:\"entry\"`\n}\n\ntype Entry struct {\n\tID string `xml:\"id\"`\n\tTitle string `xml:\"title\"`\n\tPublished time.Time `xml:\"published\"`\n\tContent string `xml:\"content\"`\n\tUpdates []Update `xml:\"updates\"`\n\tAuthor struct {\n\t\tName string `xml:\"name\"`\n\t} `xml:\"author\"`\n\tOwner string `xml:\"owner\"`\n\tStatus string `xml:\"status\"`\n\tLabel []string `xml:\"label\"`\n}\n\ntype Update struct {\n\tSummary string `xml:\"summary\"`\n\tOwner string `xml:\"ownerUpdate\"`\n\tLabel string `xml:\"label\"`\n\tStatus string `xml:\"status\"`\n}\n\ntype Entries []Entry\n\nfunc (e Entries) Len() int { return len(e) }\nfunc (e Entries) Swap(i, j int) { e[i], e[j] = e[j], e[i] }\nfunc (e Entries) Less(i, j int) bool { return e[i].Title < e[j].Title }\n\nvar project = flag.String(\"p\", \"go\", \"code.google.com project identifier\")\nvar v = flag.Bool(\"v\", false, \"verbose\")\n\nfunc main() {\n\tflag.Usage = usage\n\tflag.Parse()\n\tif flag.NArg() != 1 {\n\t\tusage()\n\t}\n\n\tfull := false\n\tq := flag.Arg(0)\n\tn, _ := strconv.Atoi(q)\n\tif n != 0 {\n\t\tq = \"id:\" + q\n\t\tfull = true\n\t}\n\tif q == \"go1\" {\n\t\tq = \"label:Priority-Go1\"\n\t}\n\n\tlog.SetFlags(0)\n\n\tquery := url.Values{\n\t\t\"q\": {q},\n\t\t\"max-results\": {\"400\"},\n\t}\n\tif !full {\n\t\tquery[\"can\"] = []string{\"open\"}\n\t}\n\tu := \"https:\/\/code.google.com\/feeds\/issues\/p\/\" + *project + \"\/issues\/full?\" + query.Encode()\n\tif *v {\n\t\tlog.Print(u)\n\t}\n\tr, err := http.Get(u)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar feed Feed\n\tif err := xml.NewDecoder(r.Body).Decode(&feed); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tr.Body.Close()\n\n\tsort.Sort(feed.Entry)\n\tfor _, e := range feed.Entry {\n\t\tid := e.ID\n\t\tif i := strings.Index(id, \"id=\"); i >= 0 {\n\t\t\tid = id[:i+len(\"id=\")]\n\t\t}\n\t\tfmt.Printf(\"%s\\t%s\\n\", id, e.Title)\n\t\tif full {\n\t\t\tfmt.Printf(\"Reported by %s (%s)\\n\", e.Author.Name, e.Published.Format(\"2006-01-02 15:04:05\"))\n\t\t\tif e.Owner != \"\" {\n\t\t\t\tfmt.Printf(\"\\tOwner: %s\\n\", e.Owner)\n\t\t\t}\n\t\t\tif e.Status != \"\" {\n\t\t\t\tfmt.Printf(\"\\tStatus: %s\\n\", e.Status)\n\t\t\t}\n\t\t\tfor _, l := range e.Label {\n\t\t\t\tfmt.Printf(\"\\tLabel: %s\\n\", l)\n\t\t\t}\n\t\t\tif e.Content != \"\" {\n\t\t\t\tfmt.Printf(\"\\n\\t%s\\n\", wrap(html.UnescapeString(e.Content), \"\\t\"))\n\t\t\t}\n\t\t\tu := \"https:\/\/code.google.com\/feeds\/issues\/p\/\" + *project + \"\/issues\/\" + id + \"\/comments\/full\"\n\t\t\tif *v {\n\t\t\t\tlog.Print(u)\n\t\t\t}\n\t\t\tr, err := http.Get(u)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\tvar feed Feed\n\t\t\tif err := xml.NewDecoder(r.Body).Decode(&feed); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tr.Body.Close()\n\n\t\t\tfor _, e := range feed.Entry {\n\t\t\t\tfmt.Printf(\"\\n%s (%s)\\n\", e.Title, e.Published.Format(\"2006-01-02 15:04:05\"))\n\t\t\t\tfor _, up := range e.Updates {\n\t\t\t\t\tif up.Summary != \"\" {\n\t\t\t\t\t\tfmt.Printf(\"\\tSummary: %s\\n\", up.Summary)\n\t\t\t\t\t}\n\t\t\t\t\tif up.Owner != \"\" {\n\t\t\t\t\t\tfmt.Printf(\"\\tOwner: %s\\n\", up.Owner)\n\t\t\t\t\t}\n\t\t\t\t\tif up.Status != \"\" {\n\t\t\t\t\t\tfmt.Printf(\"\\tStatus: %s\\n\", up.Status)\n\t\t\t\t\t}\n\t\t\t\t\tif up.Label != \"\" {\n\t\t\t\t\t\tfmt.Printf(\"\\tLabel: %s\\n\", up.Label)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif e.Content != \"\" {\n\t\t\t\t\tfmt.Printf(\"\\n\\t%s\\n\", wrap(html.UnescapeString(e.Content), \"\\t\"))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc wrap(t string, prefix string) string {\n\tout := \"\"\n\tt = strings.Replace(t, \"\\r\\n\", \"\\n\", -1)\n\tlines := strings.Split(t, \"\\n\")\n\tfor i, line := range lines {\n\t\tif i > 0 {\n\t\t\tout += \"\\n\" + prefix\n\t\t}\n\t\ts := line\n\t\tfor len(s) > 70 {\n\t\t\ti := strings.LastIndex(s[:70], \" \")\n\t\t\tif i < 0 {\n\t\t\t\ti = 69\n\t\t\t}\n\t\t\ti++\n\t\t\tout += s[:i] + \"\\n\" + prefix\n\t\t\ts = s[i:]\n\t\t}\n\t\tout += s\n\t}\n\treturn out\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"github.com\/gonuts\/commander\"\n\t\"github.com\/kusabashira\/lclip\"\n\t\"github.com\/yuya-takeyama\/argf\"\n)\n\nvar cmd_get = &commander.Command{\n\tUsageLine: \"get LABEL\",\n\tShort: \"get text from LABEL\",\n\tRun: func(cmd *commander.Command, args []string) error {\n\t\tif len(args) < 1 {\n\t\t\treturn fmt.Errorf(\"no specify LABEL\")\n\t\t}\n\t\tlabel := args[0]\n\n\t\tpath, err := lclip.DefaultPath()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc, err := lclip.NewClipboard(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdst := c.Get(label)\n\t\tif _, err = os.Stdout.Write(dst); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err = os.Stdout.Write([]byte(\"\\n\")); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t},\n}\n\nvar cmd_set = &commander.Command{\n\tUsageLine: \"set LABEL [FILE]...\",\n\tShort: \"set text to LABEL\",\n\tRun: func(cmd *commander.Command, args []string) error {\n\t\tif len(args) < 1 {\n\t\t\treturn fmt.Errorf(\"no specify LABEL\")\n\t\t}\n\t\tlabel := args[0]\n\n\t\tpath, err := lclip.DefaultPath()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc, err := lclip.NewClipboard(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tr, err := argf.From(args[1:])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsrc, err := ioutil.ReadAll(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc.Set(label, src)\n\t\treturn c.Close()\n\t},\n}\n\nfunc main() {\n}\n<commit_msg>Implement cmd_labels<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"sort\"\n\n\t\"github.com\/gonuts\/commander\"\n\t\"github.com\/kusabashira\/lclip\"\n\t\"github.com\/yuya-takeyama\/argf\"\n)\n\nvar cmd_get = &commander.Command{\n\tUsageLine: \"get LABEL\",\n\tShort: \"get text from LABEL\",\n\tRun: func(cmd *commander.Command, args []string) error {\n\t\tif len(args) < 1 {\n\t\t\treturn fmt.Errorf(\"no specify LABEL\")\n\t\t}\n\t\tlabel := args[0]\n\n\t\tpath, err := lclip.DefaultPath()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc, err := lclip.NewClipboard(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdst := c.Get(label)\n\t\tif _, err = os.Stdout.Write(dst); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err = os.Stdout.Write([]byte(\"\\n\")); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn c.Close()\n\t},\n}\n\nvar cmd_set = &commander.Command{\n\tUsageLine: \"set LABEL [FILE]...\",\n\tShort: \"set text to LABEL\",\n\tRun: func(cmd *commander.Command, args []string) error {\n\t\tif len(args) < 1 {\n\t\t\treturn fmt.Errorf(\"no specify LABEL\")\n\t\t}\n\t\tlabel := args[0]\n\n\t\tpath, err := lclip.DefaultPath()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc, err := lclip.NewClipboard(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tr, err := argf.From(args[1:])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsrc, err := ioutil.ReadAll(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc.Set(label, src)\n\t\treturn c.Close()\n\t},\n}\n\nvar cmd_labels = &commander.Command{\n\tUsageLine: \"labels\",\n\tShort: \"list labels\",\n\tRun: func(cmd *commander.Command, args []string) error {\n\t\tpath, err := lclip.DefaultPath()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc, err := lclip.NewClipboard(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlabels := c.Labels()\n\t\tsort.Strings(labels)\n\t\tfor _, label := range labels {\n\t\t\tfmt.Println(label)\n\t\t}\n\t\treturn c.Close()\n\t},\n}\n\nfunc main() {\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"runtime\/pprof\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/pilosa\/pilosa\"\n)\n\n\/\/ Version holds the version information passed in at compile time.\nvar Version string\n\nfunc init() {\n\tif Version == \"\" {\n\t\tVersion = \"0.0.0\"\n\t}\n\n\trand.Seed(time.Now().UTC().UnixNano())\n}\n\nconst (\n\t\/\/ DefaultDataDir is the default data directory.\n\tDefaultDataDir = \"~\/.pilosa\"\n)\n\nfunc main() {\n\tm := NewMain()\n\tm.Server.Handler.Version = Version\n\tfmt.Fprintf(m.Stderr, \"Pilosa %s\\n\", Version)\n\n\t\/\/ Parse command line arguments.\n\tif err := m.ParseFlags(os.Args[1:]); err != nil {\n\t\tfmt.Fprintln(m.Stderr, err)\n\t\tos.Exit(2)\n\t}\n\n\t\/\/ Start CPU profiling.\n\tif m.CPUProfile != \"\" {\n\t\tf, err := os.Create(m.CPUProfile)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(m.Stderr, \"create cpu profile: %v\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tdefer f.Close()\n\n\t\tfmt.Fprintln(m.Stderr, \"Starting cpu profile\")\n\t\tpprof.StartCPUProfile(f)\n\t\ttime.AfterFunc(m.CPUTime, func() {\n\t\t\tfmt.Fprintln(m.Stderr, \"Stopping cpu profile\")\n\t\t\tpprof.StopCPUProfile()\n\t\t\tf.Close()\n\t\t})\n\t}\n\n\t\/\/ Execute the program.\n\tif err := m.Run(); err != nil {\n\t\tfmt.Fprintln(m.Stderr, err)\n\t\tfmt.Fprintln(m.Stderr, \"stopping profile\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ First SIGKILL causes server to shut down gracefully.\n\tc := make(chan os.Signal, 2)\n\tsignal.Notify(c, os.Interrupt)\n\tsig := <-c\n\tfmt.Fprintf(m.Stderr, \"Received %s; gracefully shutting down...\\n\", sig.String())\n\n\t\/\/ Second signal causes a hard shutdown.\n\tgo func() { <-c; os.Exit(1) }()\n\n\tif err := m.Close(); err != nil {\n\t\tfmt.Fprintln(m.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}\n\n\/\/ Main represents the main program execution.\ntype Main struct {\n\tServer *pilosa.Server\n\n\t\/\/ Configuration options.\n\tConfigPath string\n\tConfig *pilosa.Config\n\n\t\/\/ Profiling options.\n\tCPUProfile string\n\tCPUTime time.Duration\n\n\t\/\/ Standard input\/output\n\tStdin io.Reader\n\tStdout io.Writer\n\tStderr io.Writer\n}\n\n\/\/ NewMain returns a new instance of Main.\nfunc NewMain() *Main {\n\treturn &Main{\n\t\tServer: pilosa.NewServer(),\n\t\tConfig: pilosa.NewConfig(),\n\n\t\tStdin: os.Stdin,\n\t\tStdout: os.Stdout,\n\t\tStderr: os.Stderr,\n\t}\n}\n\n\/\/ Run executes the main program execution.\nfunc (m *Main) Run(args ...string) error {\n\t\/\/ Notify user of config file.\n\tif m.ConfigPath != \"\" {\n\t\tfmt.Fprintf(m.Stdout, \"Using config: %s\\n\", m.ConfigPath)\n\t}\n\n\t\/\/ Setup logging output.\n\tm.Server.LogOutput = m.Stderr\n\n\t\/\/ Configure index.\n\tfmt.Fprintf(m.Stderr, \"Using data from: %s\\n\", m.Config.DataDir)\n\tm.Server.Index.Path = m.Config.DataDir\n\tm.Server.Index.Stats = pilosa.NewExpvarStatsClient()\n\n\t\/\/ Build cluster from config file.\n\tm.Server.Host = m.Config.Host\n\tm.Server.Cluster = m.Config.PilosaCluster()\n\n\t\/\/ Set configuration options.\n\tm.Server.AntiEntropyInterval = time.Duration(m.Config.AntiEntropy.Interval)\n\n\t\/\/ Initialize server.\n\tif err := m.Server.Open(); err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Fprintf(m.Stderr, \"Listening as http:\/\/%s\\n\", m.Server.Host)\n\n\treturn nil\n}\n\n\/\/ Close shuts down the server.\nfunc (m *Main) Close() error {\n\treturn m.Server.Close()\n}\n\n\/\/ ParseFlags parses command line flags from args.\nfunc (m *Main) ParseFlags(args []string) error {\n\tfs := flag.NewFlagSet(\"pilosa\", flag.ContinueOnError)\n\tfs.StringVar(&m.CPUProfile, \"cpuprofile\", \"\", \"cpu profile\")\n\tfs.DurationVar(&m.CPUTime, \"cputime\", 30*time.Second, \"cpu profile duration\")\n\tfs.StringVar(&m.ConfigPath, \"config\", \"\", \"config path\")\n\tfs.SetOutput(m.Stderr)\n\tif err := fs.Parse(args); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Load config, if specified.\n\tif m.ConfigPath != \"\" {\n\t\tif _, err := toml.DecodeFile(m.ConfigPath, &m.Config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Use default data directory if one is not specified.\n\tif m.Config.DataDir == \"\" {\n\t\tm.Config.DataDir = DefaultDataDir\n\t}\n\n\t\/\/ Expand home directory.\n\tprefix := \"~\" + string(filepath.Separator)\n\tif strings.HasPrefix(m.Config.DataDir, prefix) {\n\t\t\/\/\tu, err := user.Current()\n\t\tHomeDir := os.Getenv(\"HOME\")\n\t\t\/*if err != nil {\n\t\t\treturn err\n\t\t} else*\/if HomeDir == \"\" {\n\t\t\treturn errors.New(\"data directory not specified and no home dir available\")\n\t\t}\n\t\tm.Config.DataDir = filepath.Join(HomeDir, strings.TrimPrefix(m.Config.DataDir, prefix))\n\t}\n\n\treturn nil\n}\n<commit_msg>for the default version number, include the `v`<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"runtime\/pprof\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/pilosa\/pilosa\"\n)\n\n\/\/ Version holds the version information passed in at compile time.\nvar Version string\n\nfunc init() {\n\tif Version == \"\" {\n\t\tVersion = \"v0.0.0\"\n\t}\n\n\trand.Seed(time.Now().UTC().UnixNano())\n}\n\nconst (\n\t\/\/ DefaultDataDir is the default data directory.\n\tDefaultDataDir = \"~\/.pilosa\"\n)\n\nfunc main() {\n\tm := NewMain()\n\tm.Server.Handler.Version = Version\n\tfmt.Fprintf(m.Stderr, \"Pilosa %s\\n\", Version)\n\n\t\/\/ Parse command line arguments.\n\tif err := m.ParseFlags(os.Args[1:]); err != nil {\n\t\tfmt.Fprintln(m.Stderr, err)\n\t\tos.Exit(2)\n\t}\n\n\t\/\/ Start CPU profiling.\n\tif m.CPUProfile != \"\" {\n\t\tf, err := os.Create(m.CPUProfile)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(m.Stderr, \"create cpu profile: %v\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tdefer f.Close()\n\n\t\tfmt.Fprintln(m.Stderr, \"Starting cpu profile\")\n\t\tpprof.StartCPUProfile(f)\n\t\ttime.AfterFunc(m.CPUTime, func() {\n\t\t\tfmt.Fprintln(m.Stderr, \"Stopping cpu profile\")\n\t\t\tpprof.StopCPUProfile()\n\t\t\tf.Close()\n\t\t})\n\t}\n\n\t\/\/ Execute the program.\n\tif err := m.Run(); err != nil {\n\t\tfmt.Fprintln(m.Stderr, err)\n\t\tfmt.Fprintln(m.Stderr, \"stopping profile\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ First SIGKILL causes server to shut down gracefully.\n\tc := make(chan os.Signal, 2)\n\tsignal.Notify(c, os.Interrupt)\n\tsig := <-c\n\tfmt.Fprintf(m.Stderr, \"Received %s; gracefully shutting down...\\n\", sig.String())\n\n\t\/\/ Second signal causes a hard shutdown.\n\tgo func() { <-c; os.Exit(1) }()\n\n\tif err := m.Close(); err != nil {\n\t\tfmt.Fprintln(m.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}\n\n\/\/ Main represents the main program execution.\ntype Main struct {\n\tServer *pilosa.Server\n\n\t\/\/ Configuration options.\n\tConfigPath string\n\tConfig *pilosa.Config\n\n\t\/\/ Profiling options.\n\tCPUProfile string\n\tCPUTime time.Duration\n\n\t\/\/ Standard input\/output\n\tStdin io.Reader\n\tStdout io.Writer\n\tStderr io.Writer\n}\n\n\/\/ NewMain returns a new instance of Main.\nfunc NewMain() *Main {\n\treturn &Main{\n\t\tServer: pilosa.NewServer(),\n\t\tConfig: pilosa.NewConfig(),\n\n\t\tStdin: os.Stdin,\n\t\tStdout: os.Stdout,\n\t\tStderr: os.Stderr,\n\t}\n}\n\n\/\/ Run executes the main program execution.\nfunc (m *Main) Run(args ...string) error {\n\t\/\/ Notify user of config file.\n\tif m.ConfigPath != \"\" {\n\t\tfmt.Fprintf(m.Stdout, \"Using config: %s\\n\", m.ConfigPath)\n\t}\n\n\t\/\/ Setup logging output.\n\tm.Server.LogOutput = m.Stderr\n\n\t\/\/ Configure index.\n\tfmt.Fprintf(m.Stderr, \"Using data from: %s\\n\", m.Config.DataDir)\n\tm.Server.Index.Path = m.Config.DataDir\n\tm.Server.Index.Stats = pilosa.NewExpvarStatsClient()\n\n\t\/\/ Build cluster from config file.\n\tm.Server.Host = m.Config.Host\n\tm.Server.Cluster = m.Config.PilosaCluster()\n\n\t\/\/ Set configuration options.\n\tm.Server.AntiEntropyInterval = time.Duration(m.Config.AntiEntropy.Interval)\n\n\t\/\/ Initialize server.\n\tif err := m.Server.Open(); err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Fprintf(m.Stderr, \"Listening as http:\/\/%s\\n\", m.Server.Host)\n\n\treturn nil\n}\n\n\/\/ Close shuts down the server.\nfunc (m *Main) Close() error {\n\treturn m.Server.Close()\n}\n\n\/\/ ParseFlags parses command line flags from args.\nfunc (m *Main) ParseFlags(args []string) error {\n\tfs := flag.NewFlagSet(\"pilosa\", flag.ContinueOnError)\n\tfs.StringVar(&m.CPUProfile, \"cpuprofile\", \"\", \"cpu profile\")\n\tfs.DurationVar(&m.CPUTime, \"cputime\", 30*time.Second, \"cpu profile duration\")\n\tfs.StringVar(&m.ConfigPath, \"config\", \"\", \"config path\")\n\tfs.SetOutput(m.Stderr)\n\tif err := fs.Parse(args); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Load config, if specified.\n\tif m.ConfigPath != \"\" {\n\t\tif _, err := toml.DecodeFile(m.ConfigPath, &m.Config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Use default data directory if one is not specified.\n\tif m.Config.DataDir == \"\" {\n\t\tm.Config.DataDir = DefaultDataDir\n\t}\n\n\t\/\/ Expand home directory.\n\tprefix := \"~\" + string(filepath.Separator)\n\tif strings.HasPrefix(m.Config.DataDir, prefix) {\n\t\t\/\/\tu, err := user.Current()\n\t\tHomeDir := os.Getenv(\"HOME\")\n\t\t\/*if err != nil {\n\t\t\treturn err\n\t\t} else*\/if HomeDir == \"\" {\n\t\t\treturn errors.New(\"data directory not specified and no home dir available\")\n\t\t}\n\t\tm.Config.DataDir = filepath.Join(HomeDir, strings.TrimPrefix(m.Config.DataDir, prefix))\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"html\/template\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\n\t\"golang.org\/x\/tools\/present\"\n)\n\nfunc init() {\n\thttp.HandleFunc(\"\/\", dirHandler)\n}\n\n\/\/ dirHandler serves a directory listing for the requested path, rooted at basePath.\nfunc dirHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path == \"\/favicon.ico\" {\n\t\thttp.Error(w, \"not found\", 404)\n\t\treturn\n\t}\n\tconst base = \".\"\n\tname := filepath.Join(base, r.URL.Path)\n\tif isDoc(name) {\n\t\terr := renderDoc(w, name)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\thttp.Error(w, err.Error(), 500)\n\t\t}\n\t\treturn\n\t}\n\tif isDir, err := dirList(w, name); err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t} else if isDir {\n\t\treturn\n\t}\n\thttp.FileServer(http.Dir(base)).ServeHTTP(w, r)\n}\n\nfunc isDoc(path string) bool {\n\t_, ok := contentTemplate[filepath.Ext(path)]\n\treturn ok\n}\n\nvar (\n\t\/\/ dirListTemplate holds the front page template.\n\tdirListTemplate *template.Template\n\n\t\/\/ contentTemplate maps the presentable file extensions to the\n\t\/\/ template to be executed.\n\tcontentTemplate map[string]*template.Template\n)\n\nfunc initTemplates(base string) error {\n\t\/\/ Locate the template file.\n\tactionTmpl := filepath.Join(base, \"templates\/action.tmpl\")\n\n\tcontentTemplate = make(map[string]*template.Template)\n\n\tfor ext, contentTmpl := range map[string]string{\n\t\t\".slide\": \"slides.tmpl\",\n\t\t\".article\": \"article.tmpl\",\n\t} {\n\t\tcontentTmpl = filepath.Join(base, \"templates\", contentTmpl)\n\n\t\t\/\/ Read and parse the input.\n\t\ttmpl := present.Template()\n\t\ttmpl = tmpl.Funcs(template.FuncMap{\"playable\": playable})\n\t\tif _, err := tmpl.ParseFiles(actionTmpl, contentTmpl); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcontentTemplate[ext] = tmpl\n\t}\n\n\tvar err error\n\tdirListTemplate, err = template.ParseFiles(filepath.Join(base, \"templates\/dir.tmpl\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ renderDoc reads the present file, gets its template representation,\n\/\/ and executes the template, sending output to w.\nfunc renderDoc(w io.Writer, docFile string) error {\n\t\/\/ Read the input and build the doc structure.\n\tdoc, err := parse(docFile, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Find which template should be executed.\n\ttmpl := contentTemplate[filepath.Ext(docFile)]\n\n\t\/\/ Execute the template.\n\treturn doc.Render(w, tmpl)\n}\n\nfunc parse(name string, mode present.ParseMode) (*present.Doc, error) {\n\tf, err := os.Open(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\treturn present.Parse(f, name, 0)\n}\n\n\/\/ dirList scans the given path and writes a directory listing to w.\n\/\/ It parses the first part of each .slide file it encounters to display the\n\/\/ presentation title in the listing.\n\/\/ If the given path is not a directory, it returns (isDir == false, err == nil)\n\/\/ and writes nothing to w.\nfunc dirList(w io.Writer, name string) (isDir bool, err error) {\n\tf, err := os.Open(name)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer f.Close()\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif isDir = fi.IsDir(); !isDir {\n\t\treturn false, nil\n\t}\n\tfis, err := f.Readdir(0)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\td := &dirListData{Path: name}\n\tfor _, fi := range fis {\n\t\t\/\/ skip the golang.org directory\n\t\tif name == \".\" && fi.Name() == \"golang.org\" {\n\t\t\tcontinue\n\t\t}\n\t\te := dirEntry{\n\t\t\tName: fi.Name(),\n\t\t\tPath: filepath.ToSlash(filepath.Join(name, fi.Name())),\n\t\t}\n\t\tif fi.IsDir() && showDir(e.Name) {\n\t\t\td.Dirs = append(d.Dirs, e)\n\t\t\tcontinue\n\t\t}\n\t\tif isDoc(e.Name) {\n\t\t\tif p, err := parse(e.Path, present.TitlesOnly); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\te.Title = p.Title\n\t\t\t}\n\t\t\tswitch filepath.Ext(e.Path) {\n\t\t\tcase \".article\":\n\t\t\t\td.Articles = append(d.Articles, e)\n\t\t\tcase \".slide\":\n\t\t\t\td.Slides = append(d.Slides, e)\n\t\t\t}\n\t\t} else if showFile(e.Name) {\n\t\t\td.Other = append(d.Other, e)\n\t\t}\n\t}\n\tif d.Path == \".\" {\n\t\td.Path = \"\"\n\t}\n\tsort.Sort(d.Dirs)\n\tsort.Sort(d.Slides)\n\tsort.Sort(d.Articles)\n\tsort.Sort(d.Other)\n\treturn true, dirListTemplate.Execute(w, d)\n}\n\n\/\/ showFile reports whether the given file should be displayed in the list.\nfunc showFile(n string) bool {\n\tswitch filepath.Ext(n) {\n\tcase \".pdf\":\n\tcase \".html\":\n\tcase \".go\":\n\tdefault:\n\t\treturn isDoc(n)\n\t}\n\treturn true\n}\n\n\/\/ showDir reports whether the given directory should be displayed in the list.\nfunc showDir(n string) bool {\n\tif len(n) > 0 && (n[0] == '.' || n[0] == '_') || n == \"present\" {\n\t\treturn false\n\t}\n\treturn true\n}\n\ntype dirListData struct {\n\tPath string\n\tDirs, Slides, Articles, Other dirEntrySlice\n}\n\ntype dirEntry struct {\n\tName, Path, Title string\n}\n\ntype dirEntrySlice []dirEntry\n\nfunc (s dirEntrySlice) Len() int { return len(s) }\nfunc (s dirEntrySlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\nfunc (s dirEntrySlice) Less(i, j int) bool { return s[i].Name < s[j].Name }\n<commit_msg>cmd\/present: add request address to logged errors<commit_after>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"html\/template\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\n\t\"golang.org\/x\/tools\/present\"\n)\n\nfunc init() {\n\thttp.HandleFunc(\"\/\", dirHandler)\n}\n\n\/\/ dirHandler serves a directory listing for the requested path, rooted at basePath.\nfunc dirHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path == \"\/favicon.ico\" {\n\t\thttp.Error(w, \"not found\", 404)\n\t\treturn\n\t}\n\tconst base = \".\"\n\tname := filepath.Join(base, r.URL.Path)\n\tif isDoc(name) {\n\t\terr := renderDoc(w, name)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\thttp.Error(w, err.Error(), 500)\n\t\t}\n\t\treturn\n\t}\n\tif isDir, err := dirList(w, name); err != nil {\n\t\taddr, _, e := net.SplitHostPort(r.RemoteAddr)\n\t\tif e != nil {\n\t\t\taddr = r.RemoteAddr\n\t\t}\n\t\tlog.Printf(\"request from %s: %s\", addr, err)\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t} else if isDir {\n\t\treturn\n\t}\n\thttp.FileServer(http.Dir(base)).ServeHTTP(w, r)\n}\n\nfunc isDoc(path string) bool {\n\t_, ok := contentTemplate[filepath.Ext(path)]\n\treturn ok\n}\n\nvar (\n\t\/\/ dirListTemplate holds the front page template.\n\tdirListTemplate *template.Template\n\n\t\/\/ contentTemplate maps the presentable file extensions to the\n\t\/\/ template to be executed.\n\tcontentTemplate map[string]*template.Template\n)\n\nfunc initTemplates(base string) error {\n\t\/\/ Locate the template file.\n\tactionTmpl := filepath.Join(base, \"templates\/action.tmpl\")\n\n\tcontentTemplate = make(map[string]*template.Template)\n\n\tfor ext, contentTmpl := range map[string]string{\n\t\t\".slide\": \"slides.tmpl\",\n\t\t\".article\": \"article.tmpl\",\n\t} {\n\t\tcontentTmpl = filepath.Join(base, \"templates\", contentTmpl)\n\n\t\t\/\/ Read and parse the input.\n\t\ttmpl := present.Template()\n\t\ttmpl = tmpl.Funcs(template.FuncMap{\"playable\": playable})\n\t\tif _, err := tmpl.ParseFiles(actionTmpl, contentTmpl); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcontentTemplate[ext] = tmpl\n\t}\n\n\tvar err error\n\tdirListTemplate, err = template.ParseFiles(filepath.Join(base, \"templates\/dir.tmpl\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ renderDoc reads the present file, gets its template representation,\n\/\/ and executes the template, sending output to w.\nfunc renderDoc(w io.Writer, docFile string) error {\n\t\/\/ Read the input and build the doc structure.\n\tdoc, err := parse(docFile, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Find which template should be executed.\n\ttmpl := contentTemplate[filepath.Ext(docFile)]\n\n\t\/\/ Execute the template.\n\treturn doc.Render(w, tmpl)\n}\n\nfunc parse(name string, mode present.ParseMode) (*present.Doc, error) {\n\tf, err := os.Open(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\treturn present.Parse(f, name, 0)\n}\n\n\/\/ dirList scans the given path and writes a directory listing to w.\n\/\/ It parses the first part of each .slide file it encounters to display the\n\/\/ presentation title in the listing.\n\/\/ If the given path is not a directory, it returns (isDir == false, err == nil)\n\/\/ and writes nothing to w.\nfunc dirList(w io.Writer, name string) (isDir bool, err error) {\n\tf, err := os.Open(name)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer f.Close()\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif isDir = fi.IsDir(); !isDir {\n\t\treturn false, nil\n\t}\n\tfis, err := f.Readdir(0)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\td := &dirListData{Path: name}\n\tfor _, fi := range fis {\n\t\t\/\/ skip the golang.org directory\n\t\tif name == \".\" && fi.Name() == \"golang.org\" {\n\t\t\tcontinue\n\t\t}\n\t\te := dirEntry{\n\t\t\tName: fi.Name(),\n\t\t\tPath: filepath.ToSlash(filepath.Join(name, fi.Name())),\n\t\t}\n\t\tif fi.IsDir() && showDir(e.Name) {\n\t\t\td.Dirs = append(d.Dirs, e)\n\t\t\tcontinue\n\t\t}\n\t\tif isDoc(e.Name) {\n\t\t\tif p, err := parse(e.Path, present.TitlesOnly); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\te.Title = p.Title\n\t\t\t}\n\t\t\tswitch filepath.Ext(e.Path) {\n\t\t\tcase \".article\":\n\t\t\t\td.Articles = append(d.Articles, e)\n\t\t\tcase \".slide\":\n\t\t\t\td.Slides = append(d.Slides, e)\n\t\t\t}\n\t\t} else if showFile(e.Name) {\n\t\t\td.Other = append(d.Other, e)\n\t\t}\n\t}\n\tif d.Path == \".\" {\n\t\td.Path = \"\"\n\t}\n\tsort.Sort(d.Dirs)\n\tsort.Sort(d.Slides)\n\tsort.Sort(d.Articles)\n\tsort.Sort(d.Other)\n\treturn true, dirListTemplate.Execute(w, d)\n}\n\n\/\/ showFile reports whether the given file should be displayed in the list.\nfunc showFile(n string) bool {\n\tswitch filepath.Ext(n) {\n\tcase \".pdf\":\n\tcase \".html\":\n\tcase \".go\":\n\tdefault:\n\t\treturn isDoc(n)\n\t}\n\treturn true\n}\n\n\/\/ showDir reports whether the given directory should be displayed in the list.\nfunc showDir(n string) bool {\n\tif len(n) > 0 && (n[0] == '.' || n[0] == '_') || n == \"present\" {\n\t\treturn false\n\t}\n\treturn true\n}\n\ntype dirListData struct {\n\tPath string\n\tDirs, Slides, Articles, Other dirEntrySlice\n}\n\ntype dirEntry struct {\n\tName, Path, Title string\n}\n\ntype dirEntrySlice []dirEntry\n\nfunc (s dirEntrySlice) Len() int { return len(s) }\nfunc (s dirEntrySlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\nfunc (s dirEntrySlice) Less(i, j int) bool { return s[i].Name < s[j].Name }\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 CodeIgnition. All rights reserved.\n\/\/ Use of this source code is governed by a BSD\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/codeignition\/recon\/cmd\/recond\/config\"\n\t\"github.com\/codeignition\/recon\/policy\"\n\t_ \"github.com\/codeignition\/recon\/policy\/handlers\"\n\t\"github.com\/nats-io\/nats\"\n)\n\nconst agentsAPIPath = \"\/api\/agents\" \/\/ agents path in the marksman server\n\n\/\/ natsEncConn is the opened with the URL obtained from marksman.\n\/\/ It is populated if the agent registers successfully.\nvar natsEncConn *nats.EncodedConn\n\nfunc main() {\n\tlog.SetPrefix(\"recond: \")\n\n\tvar marksmanAddr = flag.String(\"marksman\", \"http:\/\/localhost:3000\", \"address of the marksman server\")\n\tflag.Parse()\n\n\tconf, err := config.Init()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\t\/\/ agent represents a single agent on which the recond\n\t\/\/ is running.\n\tvar agent = &Agent{\n\t\tUID: conf.UID,\n\t}\n\n\terr = agent.register(*marksmanAddr)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tdefer natsEncConn.Close()\n\n\tif err := addSystemDataPolicy(conf); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgo runStoredPolicies(conf)\n\n\tnatsEncConn.Subscribe(agent.UID, func(s string) {\n\t\tfmt.Printf(\"Received a message: %s\\n\", s)\n\t})\n\n\tnatsEncConn.Subscribe(agent.UID+\"_policy\", func(subj, reply string, p *policy.Policy) {\n\t\tfmt.Printf(\"Received a Policy: %v\\n\", p)\n\t\tif err := conf.AddPolicy(*p); err != nil {\n\t\t\tnatsEncConn.Publish(reply, err.Error())\n\t\t\treturn\n\t\t}\n\t\tif err := conf.Save(); err != nil {\n\t\t\tnatsEncConn.Publish(reply, err.Error())\n\t\t\treturn\n\t\t}\n\t\tevents, err := p.Execute(context.TODO())\n\t\tif err != nil {\n\t\t\tnatsEncConn.Publish(reply, err.Error())\n\t\t\treturn\n\t\t}\n\t\tnatsEncConn.Publish(reply, \"policy ack\") \/\/ acknowledge\n\t\tfor e := range events {\n\t\t\tnatsEncConn.Publish(\"policy_events\", e)\n\t\t}\n\t})\n\n\t\/\/ this is just to block the main function from exiting\n\tc := make(chan struct{})\n\t<-c\n}\n\nfunc runStoredPolicies(c *config.Config) {\n\tfor _, p := range c.PolicyConfig {\n\t\tgo func() {\n\t\t\tevents, err := p.Execute(context.TODO())\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err) \/\/ TODO: send to a nats errors channel\n\t\t\t}\n\t\t\tfor e := range events {\n\t\t\t\tnatsEncConn.Publish(\"policy_events\", e)\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc addSystemDataPolicy(c *config.Config) error {\n\t\/\/ if the policy already exists, return silently\n\tfor _, p := range c.PolicyConfig {\n\t\tif p.Name == \"default_system_data\" {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tp := policy.Policy{\n\t\tName: \"default_system_data\",\n\t\tAgentUID: c.UID,\n\t\tType: \"system_data\",\n\t\tM: map[string]string{\n\t\t\t\"interval\": \"5s\",\n\t\t},\n\t}\n\tif err := c.AddPolicy(p); err != nil {\n\t\treturn err\n\n\t}\n\tif err := c.Save(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<commit_msg>fix race condition while running stored policies<commit_after>\/\/ Copyright 2015 CodeIgnition. All rights reserved.\n\/\/ Use of this source code is governed by a BSD\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/codeignition\/recon\/cmd\/recond\/config\"\n\t\"github.com\/codeignition\/recon\/policy\"\n\t_ \"github.com\/codeignition\/recon\/policy\/handlers\"\n\t\"github.com\/nats-io\/nats\"\n)\n\nconst agentsAPIPath = \"\/api\/agents\" \/\/ agents path in the marksman server\n\n\/\/ natsEncConn is the opened with the URL obtained from marksman.\n\/\/ It is populated if the agent registers successfully.\nvar natsEncConn *nats.EncodedConn\n\nfunc main() {\n\tlog.SetPrefix(\"recond: \")\n\n\tvar marksmanAddr = flag.String(\"marksman\", \"http:\/\/localhost:3000\", \"address of the marksman server\")\n\tflag.Parse()\n\n\tconf, err := config.Init()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\t\/\/ agent represents a single agent on which the recond\n\t\/\/ is running.\n\tvar agent = &Agent{\n\t\tUID: conf.UID,\n\t}\n\n\terr = agent.register(*marksmanAddr)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tdefer natsEncConn.Close()\n\n\tif err := addSystemDataPolicy(conf); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgo runStoredPolicies(conf)\n\n\tnatsEncConn.Subscribe(agent.UID, func(s string) {\n\t\tfmt.Printf(\"Received a message: %s\\n\", s)\n\t})\n\n\tnatsEncConn.Subscribe(agent.UID+\"_policy\", func(subj, reply string, p *policy.Policy) {\n\t\tfmt.Printf(\"Received a Policy: %v\\n\", p)\n\t\tif err := conf.AddPolicy(*p); err != nil {\n\t\t\tnatsEncConn.Publish(reply, err.Error())\n\t\t\treturn\n\t\t}\n\t\tif err := conf.Save(); err != nil {\n\t\t\tnatsEncConn.Publish(reply, err.Error())\n\t\t\treturn\n\t\t}\n\t\tevents, err := p.Execute(context.TODO())\n\t\tif err != nil {\n\t\t\tnatsEncConn.Publish(reply, err.Error())\n\t\t\treturn\n\t\t}\n\t\tnatsEncConn.Publish(reply, \"policy ack\") \/\/ acknowledge\n\t\tfor e := range events {\n\t\t\tnatsEncConn.Publish(\"policy_events\", e)\n\t\t}\n\t})\n\n\t\/\/ this is just to block the main function from exiting\n\tc := make(chan struct{})\n\t<-c\n}\n\nfunc runStoredPolicies(c *config.Config) {\n\tlog.Print(\"adding stored policies...\")\n\tfor _, p := range c.PolicyConfig {\n\t\tlog.Print(p.Name)\n\t\tgo func(p policy.Policy) {\n\t\t\tevents, err := p.Execute(context.TODO())\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err) \/\/ TODO: send to a nats errors channel\n\t\t\t}\n\t\t\tfor e := range events {\n\t\t\t\tnatsEncConn.Publish(\"policy_events\", e)\n\t\t\t}\n\t\t}(p)\n\t}\n}\n\nfunc addSystemDataPolicy(c *config.Config) error {\n\t\/\/ if the policy already exists, return silently\n\tfor _, p := range c.PolicyConfig {\n\t\tif p.Name == \"default_system_data\" {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tp := policy.Policy{\n\t\tName: \"default_system_data\",\n\t\tAgentUID: c.UID,\n\t\tType: \"system_data\",\n\t\tM: map[string]string{\n\t\t\t\"interval\": \"5s\",\n\t\t},\n\t}\n\tif err := c.AddPolicy(p); err != nil {\n\t\treturn err\n\n\t}\n\tif err := c.Save(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/howeyc\/gopass\"\n\t_ \"github.com\/lib\/pq\"\n\t\"golang.org\/x\/crypto\/bcrypt\"\n)\n\nfunc EncryptBcrypt(passwd string) (hash string, err error) {\n\thashBytes, err := bcrypt.GenerateFromPassword([]byte(passwd), bcrypt.DefaultCost)\n\thash = string(hashBytes)\n\treturn\n}\n\nfunc updateAuthorRow(connString, uname, passwd, fullname, email, www string) {\n\tdb, err := sql.Open(\"postgres\", connString)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn\n\t}\n\tdefer db.Close()\n\t\/\/stmt, _ := db.Prepare(`insert into author(id, disp_name, passwd, full_name, email, www)\n\t\/\/values($1, $2, $3, $4, $5, $6)`)\n\tstmt, _ := db.Prepare(`update author set\n\tdisp_name=$1, passwd=$2, full_name=$3, email=$4, www=$5\n\twhere id=1`)\n\tdefer stmt.Close()\n\tpasswdHash, err := EncryptBcrypt(passwd)\n\tif err != nil {\n\t\tfmt.Printf(\"Error in Encrypt(): %s\\n\", err)\n\t\treturn\n\t}\n\tfmt.Printf(\"Updating user ID=1...\\n\")\n\tfmt.Printf(\"dbstr: %q\\nuname: %q\\npasswd: %q\\nhash: %q\\nfullname: %q\\nemail: %q\\nwww: %q\\n\",\n\t\tconnString, uname, \"***\", passwdHash, fullname, email, www)\n\tstmt.Exec(uname, passwdHash, fullname, email, www)\n}\n\nfunc main() {\n\tdbFile := os.Getenv(\"RTFBLOG_DB_TEST_URL\")\n\tuname := \"rtfb\"\n\tfmt.Printf(\"New password: \")\n\tpasswd := gopass.GetPasswd()\n\tfmt.Printf(\"Confirm: \")\n\tpasswd2 := gopass.GetPasswd()\n\tif string(passwd2) != string(passwd) {\n\t\tpanic(\"Passwords do not match\")\n\t}\n\tfullname := \"Vytautas Šaltenis\"\n\temail := \"vytas@rtfb.lt\"\n\twww := \"http:\/\/rtfb.lt\/\"\n\tupdateAuthorRow(dbFile, uname, string(passwd), fullname, email, www)\n}\n<commit_msg>cmd\/reset: remove dead code<commit_after>package main\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/howeyc\/gopass\"\n\t_ \"github.com\/lib\/pq\"\n\t\"golang.org\/x\/crypto\/bcrypt\"\n)\n\nfunc EncryptBcrypt(passwd string) (hash string, err error) {\n\thashBytes, err := bcrypt.GenerateFromPassword([]byte(passwd), bcrypt.DefaultCost)\n\thash = string(hashBytes)\n\treturn\n}\n\nfunc updateAuthorRow(connString, uname, passwd, fullname, email, www string) {\n\tdb, err := sql.Open(\"postgres\", connString)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn\n\t}\n\tdefer db.Close()\n\tstmt, _ := db.Prepare(`update author set\n\tdisp_name=$1, passwd=$2, full_name=$3, email=$4, www=$5\n\twhere id=1`)\n\tdefer stmt.Close()\n\tpasswdHash, err := EncryptBcrypt(passwd)\n\tif err != nil {\n\t\tfmt.Printf(\"Error in Encrypt(): %s\\n\", err)\n\t\treturn\n\t}\n\tfmt.Printf(\"Updating user ID=1...\\n\")\n\tfmt.Printf(\"dbstr: %q\\nuname: %q\\npasswd: %q\\nhash: %q\\nfullname: %q\\nemail: %q\\nwww: %q\\n\",\n\t\tconnString, uname, \"***\", passwdHash, fullname, email, www)\n\tstmt.Exec(uname, passwdHash, fullname, email, www)\n}\n\nfunc main() {\n\tdbFile := os.Getenv(\"RTFBLOG_DB_TEST_URL\")\n\tuname := \"rtfb\"\n\tfmt.Printf(\"New password: \")\n\tpasswd := gopass.GetPasswd()\n\tfmt.Printf(\"Confirm: \")\n\tpasswd2 := gopass.GetPasswd()\n\tif string(passwd2) != string(passwd) {\n\t\tpanic(\"Passwords do not match\")\n\t}\n\tfullname := \"Vytautas Šaltenis\"\n\temail := \"vytas@rtfb.lt\"\n\twww := \"http:\/\/rtfb.lt\/\"\n\tupdateAuthorRow(dbFile, uname, string(passwd), fullname, email, www)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage main\n\nimport (\n\t\"github.com\/szabba\/md\/newton\"\n\t\"github.com\/szabba\/md\/vect\"\n)\n\n\/\/ A 'picky' force, that doesn't affect some bodies\ntype PickyForce struct {\n\tforce newton.Force\n\tzeroFor []int\n}\n\n\/\/ Creates a picky version of a force\nfunc NewPicky(f newton.Force, zeroFor ...int) newton.Force {\n\n\treturn &PickyForce{force: f, zeroFor: zeroFor}\n}\n\nfunc (picky *PickyForce) Accel(bs []*newton.Body, i int) (a vect.Vector) {\n\n\tfor _, ignored := range picky.zeroFor {\n\n\t\tif ignored == i {\n\n\t\t\treturn vect.Zero\n\t\t}\n\t}\n\n\treturn picky.force.Accel(bs, i)\n}\n\n\/\/ Creates a system containing a rectangular grid of m times n particles.\nfunc SetUpRect(m, n int) *newton.System {\n\n\tsys := newton.NewSystem(newton.Verlet, m*n)\n\n\tfor i := 0; i < sys.Bodies(); i++ {\n\n\t\tsys.Body(i).SetMass(1)\n\t}\n\n\treturn sys\n}\n\nfunc main() {\n}\n<commit_msg>Add ParticleRect in cmd\/square<commit_after>\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage main\n\nimport (\n\t\"github.com\/szabba\/md\/newton\"\n\t\"github.com\/szabba\/md\/vect\"\n)\n\n\/\/ A 'picky' force, that doesn't affect some bodies\ntype PickyForce struct {\n\tforce newton.Force\n\tzeroFor []int\n}\n\n\/\/ Creates a picky version of a force\nfunc NewPicky(f newton.Force, zeroFor ...int) newton.Force {\n\n\treturn &PickyForce{force: f, zeroFor: zeroFor}\n}\n\nfunc (picky *PickyForce) Accel(bs []*newton.Body, i int) (a vect.Vector) {\n\n\tfor _, ignored := range picky.zeroFor {\n\n\t\tif ignored == i {\n\n\t\t\treturn vect.Zero\n\t\t}\n\t}\n\n\treturn picky.force.Accel(bs, i)\n}\n\ntype ParticleRect struct {\n\t*newton.System\n\trows, cols int\n}\n\n\/\/ Creates a rectangular grid of particles\nfunc NewRect(rows, cols int) *ParticleRect {\n\n\trect := &ParticleRect{\n\t\trows: rows, cols: cols,\n\t}\n\n\trect.System = newton.NewSystem(newton.Verlet, rows*cols)\n\n\tfor i := 0; i < rect.Bodies(); i++ {\n\n\t\trect.Body(i).SetMass(1)\n\t}\n\n\treturn rect\n}\n\nfunc main() {\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/tsuru\/config\"\n\t\"github.com\/tsuru\/tsuru\/app\"\n\t\"github.com\/tsuru\/tsuru\/cmd\"\n\t\"github.com\/tsuru\/tsuru\/db\"\n\t\"github.com\/tsuru\/tsuru\/migration\"\n\t\"github.com\/tsuru\/tsuru\/provision\"\n\t\"github.com\/tsuru\/tsuru\/provision\/docker\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n\t\"launchpad.net\/gnuflag\"\n)\n\nfunc getProvisioner() (string, error) {\n\tprovisioner, err := config.GetString(\"provisioner\")\n\tif provisioner == \"\" {\n\t\tprovisioner = \"docker\"\n\t}\n\treturn provisioner, err\n}\n\ntype migrateCmd struct {\n\tfs *gnuflag.FlagSet\n\tdry bool\n}\n\nfunc (*migrateCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"migrate\",\n\t\tUsage: \"migrate\",\n\t\tDesc: \"Runs migrations from previous versions of tsr\",\n\t}\n}\n\nfunc (c *migrateCmd) Run(context *cmd.Context, client *cmd.Client) error {\n\terr := migration.Register(\"migrate-docker-images\", c.migrateImages)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr := migration.Register(\"migrate-pool\", c.migratePool)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn migration.Run(context.Stdout, c.dry)\n}\n\nfunc (c *migrateCmd) migrateImages() error {\n\tprovisioner, _ := getProvisioner()\n\tif provisioner == \"docker\" {\n\t\tp, err := provision.Get(provisioner)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = p.(provision.InitializableProvisioner).Initialize()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn docker.MigrateImages()\n\t}\n\treturn nil\n}\n\nfunc (c *migrateCmd) migratePool() error {\n\tdb, err := db.Conn()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\tdbName, _ := config.GetString(\"database:name\")\n\tfromColl := fmt.Sprintf(\"%s.docker_scheduler\", dbName)\n\ttoColl := fmt.Sprintf(\"%s.pool\", dbName)\n\tsession := db.Collection(\"docker_scheduler\").Database.Session\n\terr = session.Run(bson.D{{\"renameCollection\", fromColl}, {\"to\", toColl}}, &bson.M{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar apps []app.App\n\terr = db.Apps().Find().All(&apps)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, a := range apps {\n\t\ta.SetPool()\n\t\terr = db.Apps().Update(bson.M{\"name\": a.Name}, bson.M{\"$set\": bson.M{\"pool\": a.Pool}})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *migrateCmd) Flags() *gnuflag.FlagSet {\n\tif c.fs == nil {\n\t\tc.fs = gnuflag.NewFlagSet(\"migrate\", gnuflag.ExitOnError)\n\t\tc.fs.BoolVar(&c.dry, \"dry\", false, \"Do not run migrations, just print what would run\")\n\t\tc.fs.BoolVar(&c.dry, \"n\", false, \"Do not run migrations, just print what would run\")\n\t}\n\treturn c.fs\n}\n<commit_msg>cmd\/tsr\/migrate: fix test<commit_after>\/\/ Copyright 2015 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/tsuru\/config\"\n\t\"github.com\/tsuru\/tsuru\/app\"\n\t\"github.com\/tsuru\/tsuru\/cmd\"\n\t\"github.com\/tsuru\/tsuru\/db\"\n\t\"github.com\/tsuru\/tsuru\/migration\"\n\t\"github.com\/tsuru\/tsuru\/provision\"\n\t\"github.com\/tsuru\/tsuru\/provision\/docker\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n\t\"launchpad.net\/gnuflag\"\n)\n\nfunc getProvisioner() (string, error) {\n\tprovisioner, err := config.GetString(\"provisioner\")\n\tif provisioner == \"\" {\n\t\tprovisioner = \"docker\"\n\t}\n\treturn provisioner, err\n}\n\ntype migrateCmd struct {\n\tfs *gnuflag.FlagSet\n\tdry bool\n}\n\nfunc (*migrateCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"migrate\",\n\t\tUsage: \"migrate\",\n\t\tDesc: \"Runs migrations from previous versions of tsr\",\n\t}\n}\n\nfunc (c *migrateCmd) Run(context *cmd.Context, client *cmd.Client) error {\n\terr := migration.Register(\"migrate-docker-images\", c.migrateImages)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = migration.Register(\"migrate-pool\", c.migratePool)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn migration.Run(context.Stdout, c.dry)\n}\n\nfunc (c *migrateCmd) migrateImages() error {\n\tprovisioner, _ := getProvisioner()\n\tif provisioner == \"docker\" {\n\t\tp, err := provision.Get(provisioner)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = p.(provision.InitializableProvisioner).Initialize()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn docker.MigrateImages()\n\t}\n\treturn nil\n}\n\nfunc (c *migrateCmd) migratePool() error {\n\tdb, err := db.Conn()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\tdbName, _ := config.GetString(\"database:name\")\n\tfromColl := fmt.Sprintf(\"%s.docker_scheduler\", dbName)\n\ttoColl := fmt.Sprintf(\"%s.pool\", dbName)\n\tsession := db.Collection(\"docker_scheduler\").Database.Session\n\terr = session.Run(bson.D{{\"renameCollection\", fromColl}, {\"to\", toColl}}, &bson.M{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar apps []app.App\n\terr = db.Apps().Find(nil).All(&apps)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, a := range apps {\n\t\ta.SetPool()\n\t\terr = db.Apps().Update(bson.M{\"name\": a.Name}, bson.M{\"$set\": bson.M{\"pool\": a.Pool}})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *migrateCmd) Flags() *gnuflag.FlagSet {\n\tif c.fs == nil {\n\t\tc.fs = gnuflag.NewFlagSet(\"migrate\", gnuflag.ExitOnError)\n\t\tc.fs.BoolVar(&c.dry, \"dry\", false, \"Do not run migrations, just print what would run\")\n\t\tc.fs.BoolVar(&c.dry, \"n\", false, \"Do not run migrations, just print what would run\")\n\t}\n\treturn c.fs\n}\n<|endoftext|>"} {"text":"<commit_before>package linux\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Meminfo struct {\n\tMemTotal uint64\n\tMemFree uint64\n\tBuffers uint64\n\tCached uint64\n\tSwapCached uint64\n\tActive uint64\n\tInactive uint64\n\tActiveAnon uint64\n\tInactiveAnon uint64\n\tActiveFile uint64\n\tInactiveFile uint64\n\tUnevictable uint64\n\tMlocked uint64\n\tSwapTotal uint64\n\tSwapFree uint64\n\tDirty uint64\n\tWriteback uint64\n\tAnonPages uint64\n\tMapped uint64\n\tShmem uint64\n\tSlab uint64\n\tSReclaimable uint64\n\tSUnreclaim uint64\n\tKernelStack uint64\n\tPageTables uint64\n\tNFS_Unstable uint64\n\tBounce uint64\n\tWritebackTmp uint64\n\tCommitLimit uint64\n\tCommitted_AS uint64\n\tVmallocTotal uint64\n\tVmallocUsed uint64\n\tVmallocChunk uint64\n\tHardwareCorrupted uint64\n\tAnonHugePages uint64\n\tHugePages_Total uint64\n\tHugePages_Free uint64\n\tHugePages_Rsvd uint64\n\tHugePages_Surp uint64\n\tHugepagesize uint64\n\tDirectMap4k uint64\n\tDirectMap2M uint64\n}\n\nvar procMeminfo = \"\/proc\/meminfo\"\n\nfunc parseMeminfoLine(line string) (name string, val uint64, err error) {\n\tfields := strings.Fields(line)\n\tif len(fields) < 2 {\n\t\terr = fmt.Errorf(\"meminfo line needs at least two fields: %s\", line)\n\t\treturn\n\t}\n\tif len(fields[0]) < 2 {\n\t\terr = fmt.Errorf(\"meminfo field is too short: %s\", fields[0])\n\t\treturn\n\t}\n\tname = fields[0]\n\tname = name[0 : len(name)-1] \/\/ truncate last character\n\tif val, err = strconv.ParseUint(fields[1], 10, 64); err != nil {\n\t\terr = errors.New(\"could not parse stat line: \" + err.Error())\n\t\treturn\n\t}\n\tif len(fields) == 3 {\n\t\tswitch fields[2] {\n\t\tcase \"kB\":\n\t\t\tval = val * 1024\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"unexpected multiplier: %s\", fields[2])\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc ReadMeminfo() (meminfo Meminfo, err error) {\n\tfile, err := os.Open(procMeminfo)\n\tif err != nil {\n\t\treturn meminfo, err\n\t}\n\tdefer file.Close()\n\tscanner := bufio.NewScanner(file)\n\tvar name string\n\tvar val uint64\n\tfor i := 0; scanner.Scan(); i++ {\n\t\tif name, val, err = parseMeminfoLine(scanner.Text()); err != nil {\n\t\t\treturn meminfo, err\n\t\t}\n\t\tswitch name {\n\n\t\tcase \"MemTotal\":\n\t\t\tmeminfo.MemTotal = val\n\t\tcase \"MemFree\":\n\t\t\tmeminfo.MemFree = val\n\t\tcase \"Buffers\":\n\t\t\tmeminfo.Buffers = val\n\t\tcase \"Cached\":\n\t\t\tmeminfo.Cached = val\n\t\tcase \"SwapCached\":\n\t\t\tmeminfo.SwapCached = val\n\t\tcase \"Active\":\n\t\t\tmeminfo.Active = val\n\t\tcase \"Inactive\":\n\t\t\tmeminfo.Inactive = val\n\t\tcase \"Active(anon)\":\n\t\t\tmeminfo.ActiveAnon = val\n\t\tcase \"Inactive(anon)\":\n\t\t\tmeminfo.InactiveAnon = val\n\t\tcase \"Active(file)\":\n\t\t\tmeminfo.ActiveFile = val\n\t\tcase \"Inactive(file)\":\n\t\t\tmeminfo.InactiveFile = val\n\t\tcase \"Unevictable\":\n\t\t\tmeminfo.Unevictable = val\n\t\tcase \"Mlocked\":\n\t\t\tmeminfo.Mlocked = val\n\t\tcase \"SwapTotal\":\n\t\t\tmeminfo.SwapTotal = val\n\t\tcase \"SwapFree\":\n\t\t\tmeminfo.SwapFree = val\n\t\tcase \"Dirty\":\n\t\t\tmeminfo.Dirty = val\n\t\tcase \"Writeback\":\n\t\t\tmeminfo.Writeback = val\n\t\tcase \"AnonPages\":\n\t\t\tmeminfo.AnonPages = val\n\t\tcase \"Mapped\":\n\t\t\tmeminfo.Mapped = val\n\t\tcase \"Shmem\":\n\t\t\tmeminfo.Shmem = val\n\t\tcase \"Slab\":\n\t\t\tmeminfo.Slab = val\n\t\tcase \"SReclaimable\":\n\t\t\tmeminfo.SReclaimable = val\n\t\tcase \"SUnreclaim\":\n\t\t\tmeminfo.SUnreclaim = val\n\t\tcase \"KernelStack\":\n\t\t\tmeminfo.KernelStack = val\n\t\tcase \"PageTables\":\n\t\t\tmeminfo.PageTables = val\n\t\tcase \"NFS_Unstable\":\n\t\t\tmeminfo.NFS_Unstable = val\n\t\tcase \"Bounce\":\n\t\t\tmeminfo.Bounce = val\n\t\tcase \"WritebackTmp\":\n\t\t\tmeminfo.WritebackTmp = val\n\t\tcase \"CommitLimit\":\n\t\t\tmeminfo.CommitLimit = val\n\t\tcase \"Committed_AS\":\n\t\t\tmeminfo.Committed_AS = val\n\t\tcase \"VmallocTotal\":\n\t\t\tmeminfo.VmallocTotal = val\n\t\tcase \"VmallocUsed\":\n\t\t\tmeminfo.VmallocUsed = val\n\t\tcase \"VmallocChunk\":\n\t\t\tmeminfo.VmallocChunk = val\n\t\tcase \"HardwareCorrupted\":\n\t\t\tmeminfo.HardwareCorrupted = val\n\t\tcase \"AnonHugePages\":\n\t\t\tmeminfo.AnonHugePages = val\n\t\tcase \"HugePages_Total\":\n\t\t\tmeminfo.HugePages_Total = val\n\t\tcase \"HugePages_Free\":\n\t\t\tmeminfo.HugePages_Free = val\n\t\tcase \"HugePages_Rsvd\":\n\t\t\tmeminfo.HugePages_Rsvd = val\n\t\tcase \"HugePages_Surp\":\n\t\t\tmeminfo.HugePages_Surp = val\n\t\tcase \"Hugepagesize\":\n\t\t\tmeminfo.Hugepagesize = val\n\t\tcase \"DirectMap4k\":\n\t\t\tmeminfo.DirectMap4k = val\n\t\tcase \"DirectMap2M\":\n\t\t\tmeminfo.DirectMap2M = val\n\t\tdefault:\n\t\t\tlog.Printf(\"ignoring unknown meminfo field %s\", name)\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>add DirectMap1G to meminfo<commit_after>package linux\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Meminfo struct {\n\tMemTotal uint64\n\tMemFree uint64\n\tBuffers uint64\n\tCached uint64\n\tSwapCached uint64\n\tActive uint64\n\tInactive uint64\n\tActiveAnon uint64\n\tInactiveAnon uint64\n\tActiveFile uint64\n\tInactiveFile uint64\n\tUnevictable uint64\n\tMlocked uint64\n\tSwapTotal uint64\n\tSwapFree uint64\n\tDirty uint64\n\tWriteback uint64\n\tAnonPages uint64\n\tMapped uint64\n\tShmem uint64\n\tSlab uint64\n\tSReclaimable uint64\n\tSUnreclaim uint64\n\tKernelStack uint64\n\tPageTables uint64\n\tNFS_Unstable uint64\n\tBounce uint64\n\tWritebackTmp uint64\n\tCommitLimit uint64\n\tCommitted_AS uint64\n\tVmallocTotal uint64\n\tVmallocUsed uint64\n\tVmallocChunk uint64\n\tHardwareCorrupted uint64\n\tAnonHugePages uint64\n\tHugePages_Total uint64\n\tHugePages_Free uint64\n\tHugePages_Rsvd uint64\n\tHugePages_Surp uint64\n\tHugepagesize uint64\n\tDirectMap4k uint64\n\tDirectMap2M uint64\n\tDirectMap1G uint64\n}\n\nvar procMeminfo = \"\/proc\/meminfo\"\n\nfunc parseMeminfoLine(line string) (name string, val uint64, err error) {\n\tfields := strings.Fields(line)\n\tif len(fields) < 2 {\n\t\terr = fmt.Errorf(\"meminfo line needs at least two fields: %s\", line)\n\t\treturn\n\t}\n\tif len(fields[0]) < 2 {\n\t\terr = fmt.Errorf(\"meminfo field is too short: %s\", fields[0])\n\t\treturn\n\t}\n\tname = fields[0]\n\tname = name[0 : len(name)-1] \/\/ truncate last character\n\tif val, err = strconv.ParseUint(fields[1], 10, 64); err != nil {\n\t\terr = errors.New(\"could not parse stat line: \" + err.Error())\n\t\treturn\n\t}\n\tif len(fields) == 3 {\n\t\tswitch fields[2] {\n\t\tcase \"kB\":\n\t\t\tval = val * 1024\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"unexpected multiplier: %s\", fields[2])\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc ReadMeminfo() (meminfo Meminfo, err error) {\n\tfile, err := os.Open(procMeminfo)\n\tif err != nil {\n\t\treturn meminfo, err\n\t}\n\tdefer file.Close()\n\tscanner := bufio.NewScanner(file)\n\tvar name string\n\tvar val uint64\n\tfor i := 0; scanner.Scan(); i++ {\n\t\tif name, val, err = parseMeminfoLine(scanner.Text()); err != nil {\n\t\t\treturn meminfo, err\n\t\t}\n\t\tswitch name {\n\n\t\tcase \"MemTotal\":\n\t\t\tmeminfo.MemTotal = val\n\t\tcase \"MemFree\":\n\t\t\tmeminfo.MemFree = val\n\t\tcase \"Buffers\":\n\t\t\tmeminfo.Buffers = val\n\t\tcase \"Cached\":\n\t\t\tmeminfo.Cached = val\n\t\tcase \"SwapCached\":\n\t\t\tmeminfo.SwapCached = val\n\t\tcase \"Active\":\n\t\t\tmeminfo.Active = val\n\t\tcase \"Inactive\":\n\t\t\tmeminfo.Inactive = val\n\t\tcase \"Active(anon)\":\n\t\t\tmeminfo.ActiveAnon = val\n\t\tcase \"Inactive(anon)\":\n\t\t\tmeminfo.InactiveAnon = val\n\t\tcase \"Active(file)\":\n\t\t\tmeminfo.ActiveFile = val\n\t\tcase \"Inactive(file)\":\n\t\t\tmeminfo.InactiveFile = val\n\t\tcase \"Unevictable\":\n\t\t\tmeminfo.Unevictable = val\n\t\tcase \"Mlocked\":\n\t\t\tmeminfo.Mlocked = val\n\t\tcase \"SwapTotal\":\n\t\t\tmeminfo.SwapTotal = val\n\t\tcase \"SwapFree\":\n\t\t\tmeminfo.SwapFree = val\n\t\tcase \"Dirty\":\n\t\t\tmeminfo.Dirty = val\n\t\tcase \"Writeback\":\n\t\t\tmeminfo.Writeback = val\n\t\tcase \"AnonPages\":\n\t\t\tmeminfo.AnonPages = val\n\t\tcase \"Mapped\":\n\t\t\tmeminfo.Mapped = val\n\t\tcase \"Shmem\":\n\t\t\tmeminfo.Shmem = val\n\t\tcase \"Slab\":\n\t\t\tmeminfo.Slab = val\n\t\tcase \"SReclaimable\":\n\t\t\tmeminfo.SReclaimable = val\n\t\tcase \"SUnreclaim\":\n\t\t\tmeminfo.SUnreclaim = val\n\t\tcase \"KernelStack\":\n\t\t\tmeminfo.KernelStack = val\n\t\tcase \"PageTables\":\n\t\t\tmeminfo.PageTables = val\n\t\tcase \"NFS_Unstable\":\n\t\t\tmeminfo.NFS_Unstable = val\n\t\tcase \"Bounce\":\n\t\t\tmeminfo.Bounce = val\n\t\tcase \"WritebackTmp\":\n\t\t\tmeminfo.WritebackTmp = val\n\t\tcase \"CommitLimit\":\n\t\t\tmeminfo.CommitLimit = val\n\t\tcase \"Committed_AS\":\n\t\t\tmeminfo.Committed_AS = val\n\t\tcase \"VmallocTotal\":\n\t\t\tmeminfo.VmallocTotal = val\n\t\tcase \"VmallocUsed\":\n\t\t\tmeminfo.VmallocUsed = val\n\t\tcase \"VmallocChunk\":\n\t\t\tmeminfo.VmallocChunk = val\n\t\tcase \"HardwareCorrupted\":\n\t\t\tmeminfo.HardwareCorrupted = val\n\t\tcase \"AnonHugePages\":\n\t\t\tmeminfo.AnonHugePages = val\n\t\tcase \"HugePages_Total\":\n\t\t\tmeminfo.HugePages_Total = val\n\t\tcase \"HugePages_Free\":\n\t\t\tmeminfo.HugePages_Free = val\n\t\tcase \"HugePages_Rsvd\":\n\t\t\tmeminfo.HugePages_Rsvd = val\n\t\tcase \"HugePages_Surp\":\n\t\t\tmeminfo.HugePages_Surp = val\n\t\tcase \"Hugepagesize\":\n\t\t\tmeminfo.Hugepagesize = val\n\t\tcase \"DirectMap4k\":\n\t\t\tmeminfo.DirectMap4k = val\n\t\tcase \"DirectMap2M\":\n\t\t\tmeminfo.DirectMap2M = val\n\t\tcase \"DirectMap1G\":\n\t\t\tmeminfo.DirectMap1G = val\n\t\tdefault:\n\t\t\tlog.Printf(\"ignoring unknown meminfo field %s\", name)\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ A facebook graph api client in go.\n\/\/ https:\/\/github.com\/huandu\/facebook\/\n\/\/ \n\/\/ Copyright 2012-2014, Huan Du\n\/\/ Licensed under the MIT license\n\/\/ https:\/\/github.com\/huandu\/facebook\/blob\/master\/LICENSE\n\n\/\/ This is a Go library fully supports Facebook Graph API with file upload,\n\/\/ batch request and FQL. It also supports Graph API 2.0 using the same methods.\n\/\/\n\/\/ Library design is highly influenced by facebook official PHP\/JS SDK.\n\/\/ If you have used PHP\/JS SDK before, it should look quite familiar.\n\/\/\n\/\/ Here is a list of common scenarios to help you to get started.\n\/\/\n\/\/ Scenario 1: Read a graph `user` object without access token.\n\/\/ res, _ := facebook.Get(\"\/huandu\", nil)\n\/\/ fmt.Println(\"my facebook id is\", res[\"id\"])\n\/\/\n\/\/ Scenario 2: Read a graph `user` object with a valid access token.\n\/\/ res, _ := facebook.Get(\"\/me\/feed\", facebook.Params{\n\/\/ \"access_token\": \"a-valid-access-token\",\n\/\/ })\n\/\/\n\/\/ \/\/ read my last feed.\n\/\/ fmt.Println(\"my latest feed story is:\", res.Get(\"data.0.story\"))\n\/\/\n\/\/ Scenario 3: Use App and Session struct. It's recommended to use them\n\/\/ in a production app.\n\/\/ \/\/ create a global App var to hold your app id and secret.\n\/\/ var globalApp = facebook.New(\"your-app-id\", \"your-app-secret\")\n\/\/\n\/\/ \/\/ facebook asks for a valid redirect uri when parsing signed request.\n\/\/ \/\/ it's a new enforced policy starting in late 2013.\n\/\/ \/\/ it can be omitted in a mobile app server.\n\/\/ globalApp.RedirectUri = \"http:\/\/your-site-canvas-url\/\"\n\/\/\n\/\/ \/\/ here comes a client with a facebook signed request string in query string.\n\/\/ \/\/ creates a new session with signed request.\n\/\/ session, _ := globalApp.SessionFromSignedRequest(signedRequest)\n\/\/\n\/\/ \/\/ or, you just get a valid access token in other way.\n\/\/ \/\/ creates a session directly.\n\/\/ seesion := globalApp.Session(token)\n\/\/\n\/\/ \/\/ use session to send api request with your access token.\n\/\/ res, _ := session.Get(\"\/me\/feed\", nil)\n\/\/\n\/\/ \/\/ validate access token. err is nil if token is valid.\n\/\/ err := session.Validate()\n\/\/\n\/\/ Scenario 4: Read graph api response and decode result into a struct.\n\/\/ Struct tag definition is compatible with the definition in `encoding\/json`.\n\/\/ \/\/ define a facebook feed object.\n\/\/ type FacebookFeed struct {\n\/\/ Id string `facebook:\",required\"` \/\/ must exist\n\/\/ Story string\n\/\/ From *FacebookFeedFrom\n\/\/ CreatedTime string `facebook:\"created_time\"` \/\/ use customized field name\n\/\/ }\n\/\/\n\/\/ type FacebookFeedFrom struct {\n\/\/ Name, Id string\n\/\/ }\n\/\/\n\/\/ \/\/ create a feed object direct from graph api result.\n\/\/ var feed FacebookFeed\n\/\/ res, _ := session.Get(\"\/me\/feed\", nil)\n\/\/ res.DecodeField(\"data.0\", &feed) \/\/ read latest feed\n\/\/\n\/\/ Scenario 5: Send a batch request.\n\/\/ params1 := Params{\n\/\/ \"method\": facebook.GET,\n\/\/ \"relative_url\": \"huandu\",\n\/\/ }\n\/\/ params2 := Params{\n\/\/ \"method\": facebook.GET,\n\/\/ \"relative_url\": uint64(100002828925788),\n\/\/ }\n\/\/ res, err := facebook.BatchApi(your_access_token, params1, params2)\n\/\/ \/\/ res is a []Result. if err is nil, res[0] and res[1] are response to\n\/\/ \/\/ params1 and params2 respectively.\n\/\/\n\/\/ Scenario 6: Use it in Google App Engine with `appengine\/urlfetch` package.\n\/\/ import (\n\/\/ \"appengine\"\n\/\/ \"appengine\/urlfetch\"\n\/\/ )\n\/\/\n\/\/ \/\/ suppose it's the appengine context initialized somewhere.\n\/\/ var context appengine.Context\n\/\/\n\/\/ \/\/ default Session object uses http.DefaultClient which is not supported\n\/\/ \/\/ by appengine. we have to create a Session and assign it a special client.\n\/\/ seesion := globalApp.Session(\"a-access-token\")\n\/\/ session.HttpClient = urlfetch.Client(context)\n\/\/\n\/\/ \/\/ now, session uses appengine http client now.\n\/\/ res, err := session.Get(\"\/me\", nil)\n\/\/\n\/\/ Scenario 7: Select Graph API version. See https:\/\/developers.facebook.com\/docs\/apps\/versions .\n\/\/ \/\/ this library uses default version by default.\n\/\/ \/\/ change following global variable to specific a global default version.\n\/\/ Version = \"v2.0\"\n\n\/\/ \/\/ now you will get an error as v2.0 api doesn't allow you to do so.\n\/\/ Api(\"huan.du\", GET, nil)\n\/\/\n\/\/ \/\/ you can also specify version per session.\n\/\/ session := &Session{}\n\/\/ session.Version = \"v2.0\" \/\/ overwrite global default.\n\/\/\n\/\/ I've try my best to add enough information in every public method and type.\n\/\/ If you still have any question or suggestion, feel free to create an issue\n\/\/ or send pull request to me. Thank you.\n\/\/\n\/\/ This library doesn't implement any deprecated old RESTful API. And it won't.\npackage facebook\n\nvar (\n \/\/ Default facebook api version.\n \/\/ It can be \"v1.0\" or \"v2.0\" or empty per facebook current document.\n \/\/ See https:\/\/developers.facebook.com\/docs\/apps\/versions for details.\n Version string\n)\n\n\/\/ Makes a facebook graph api call.\n\/\/\n\/\/ Method can be GET, POST, DELETE or PUT.\n\/\/\n\/\/ Params represents query strings in this call.\n\/\/ Keys and values in params will be encoded for URL automatically. So there is\n\/\/ no need to encode keys or values in params manually. Params can be nil.\n\/\/\n\/\/ If you want to get\n\/\/ https:\/\/graph.facebook.com\/huandu?fields=name,username\n\/\/ Api should be called as following\n\/\/ Api(\"\/huandu\", GET, Params{\"fields\": \"name,username\"})\n\/\/ or in a simplified way\n\/\/ Get(\"\/huandu\", Params{\"fields\": \"name,username\"})\n\/\/\n\/\/ Api is a wrapper of Session.Api(). It's designed for graph api that doesn't require\n\/\/ app id, app secret and access token. It can be called in multiple goroutines.\n\/\/\n\/\/ If app id, app secret or access token is required in graph api, caller should\n\/\/ create a new facebook session through App instance instead.\nfunc Api(path string, method Method, params Params) (Result, error) {\n return defaultSession.Api(path, method, params)\n}\n\n\/\/ Get is a short hand of Api(path, GET, params).\nfunc Get(path string, params Params) (Result, error) {\n return Api(path, GET, params)\n}\n\n\/\/ Post is a short hand of Api(path, POST, params).\nfunc Post(path string, params Params) (Result, error) {\n return Api(path, POST, params)\n}\n\n\/\/ Delete is a short hand of Api(path, DELETE, params).\nfunc Delete(path string, params Params) (Result, error) {\n return Api(path, DELETE, params)\n}\n\n\/\/ Put is a short hand of Api(path, PUT, params).\nfunc Put(path string, params Params) (Result, error) {\n return Api(path, PUT, params)\n}\n\n\/\/ Makes a batch facebook graph api call.\n\/\/\n\/\/ BatchApi supports most kinds of batch calls defines in facebook batch api document,\n\/\/ except uploading binary data. Use Batch to do so.\n\/\/\n\/\/ See https:\/\/developers.facebook.com\/docs\/reference\/api\/batch\/ to learn more about Batch Requests.\nfunc BatchApi(accessToken string, params ...Params) ([]Result, error) {\n return Batch(Params{\"access_token\": accessToken}, params...)\n}\n\n\/\/ Makes a batch facebook graph api call.\n\/\/ Batch is designed for more advanced usage including uploading binary files.\n\/\/\n\/\/ An uploading files sample\n\/\/ \/\/ equivalent to following curl command (borrowed from facebook docs)\n\/\/ \/\/ curl \\\n\/\/ \/\/ -F 'access_token=…' \\\n\/\/ \/\/ -F 'batch=[{\"method\":\"POST\",\"relative_url\":\"me\/photos\",\"body\":\"message=My cat photo\",\"attached_files\":\"file1\"},{\"method\":\"POST\",\"relative_url\":\"me\/photos\",\"body\":\"message=My dog photo\",\"attached_files\":\"file2\"},]' \\\n\/\/ \/\/ -F 'file1=@cat.gif' \\\n\/\/ \/\/ -F 'file2=@dog.jpg' \\\n\/\/ \/\/ https:\/\/graph.facebook.com\n\/\/ Batch(Params{\n\/\/ \"access_token\": \"the-access-token\",\n\/\/ \"file1\": File(\"cat.gif\"),\n\/\/ \"file2\": File(\"dog.jpg\"),\n\/\/ }, Params{\n\/\/ \"method\": \"POST\",\n\/\/ \"relative_url\": \"me\/photos\",\n\/\/ \"body\": \"message=My cat photo\",\n\/\/ \"attached_files\": \"file1\",\n\/\/ }, Params{\n\/\/ \"method\": \"POST\",\n\/\/ \"relative_url\": \"me\/photos\",\n\/\/ \"body\": \"message=My dog photo\",\n\/\/ \"attached_files\": \"file2\",\n\/\/ })\n\/\/\n\/\/ See https:\/\/developers.facebook.com\/docs\/reference\/api\/batch\/ to learn more about Batch Requests.\nfunc Batch(batchParams Params, params ...Params) ([]Result, error) {\n return defaultSession.Batch(batchParams, params...)\n}\n<commit_msg>fix a doc typo.<commit_after>\/\/ A facebook graph api client in go.\n\/\/ https:\/\/github.com\/huandu\/facebook\/\n\/\/ \n\/\/ Copyright 2012-2014, Huan Du\n\/\/ Licensed under the MIT license\n\/\/ https:\/\/github.com\/huandu\/facebook\/blob\/master\/LICENSE\n\n\/\/ This is a Go library fully supports Facebook Graph API with file upload,\n\/\/ batch request and FQL. It also supports Graph API 2.0 using the same methods.\n\/\/\n\/\/ Library design is highly influenced by facebook official PHP\/JS SDK.\n\/\/ If you have used PHP\/JS SDK before, it should look quite familiar.\n\/\/\n\/\/ Here is a list of common scenarios to help you to get started.\n\/\/\n\/\/ Scenario 1: Read a graph `user` object without access token.\n\/\/ res, _ := facebook.Get(\"\/huandu\", nil)\n\/\/ fmt.Println(\"my facebook id is\", res[\"id\"])\n\/\/\n\/\/ Scenario 2: Read a graph `user` object with a valid access token.\n\/\/ res, _ := facebook.Get(\"\/me\/feed\", facebook.Params{\n\/\/ \"access_token\": \"a-valid-access-token\",\n\/\/ })\n\/\/\n\/\/ \/\/ read my last feed.\n\/\/ fmt.Println(\"my latest feed story is:\", res.Get(\"data.0.story\"))\n\/\/\n\/\/ Scenario 3: Use App and Session struct. It's recommended to use them\n\/\/ in a production app.\n\/\/ \/\/ create a global App var to hold your app id and secret.\n\/\/ var globalApp = facebook.New(\"your-app-id\", \"your-app-secret\")\n\/\/\n\/\/ \/\/ facebook asks for a valid redirect uri when parsing signed request.\n\/\/ \/\/ it's a new enforced policy starting in late 2013.\n\/\/ \/\/ it can be omitted in a mobile app server.\n\/\/ globalApp.RedirectUri = \"http:\/\/your-site-canvas-url\/\"\n\/\/\n\/\/ \/\/ here comes a client with a facebook signed request string in query string.\n\/\/ \/\/ creates a new session with signed request.\n\/\/ session, _ := globalApp.SessionFromSignedRequest(signedRequest)\n\/\/\n\/\/ \/\/ or, you just get a valid access token in other way.\n\/\/ \/\/ creates a session directly.\n\/\/ seesion := globalApp.Session(token)\n\/\/\n\/\/ \/\/ use session to send api request with your access token.\n\/\/ res, _ := session.Get(\"\/me\/feed\", nil)\n\/\/\n\/\/ \/\/ validate access token. err is nil if token is valid.\n\/\/ err := session.Validate()\n\/\/\n\/\/ Scenario 4: Read graph api response and decode result into a struct.\n\/\/ Struct tag definition is compatible with the definition in `encoding\/json`.\n\/\/ \/\/ define a facebook feed object.\n\/\/ type FacebookFeed struct {\n\/\/ Id string `facebook:\",required\"` \/\/ must exist\n\/\/ Story string\n\/\/ From *FacebookFeedFrom\n\/\/ CreatedTime string `facebook:\"created_time\"` \/\/ use customized field name\n\/\/ }\n\/\/\n\/\/ type FacebookFeedFrom struct {\n\/\/ Name, Id string\n\/\/ }\n\/\/\n\/\/ \/\/ create a feed object direct from graph api result.\n\/\/ var feed FacebookFeed\n\/\/ res, _ := session.Get(\"\/me\/feed\", nil)\n\/\/ res.DecodeField(\"data.0\", &feed) \/\/ read latest feed\n\/\/\n\/\/ Scenario 5: Send a batch request.\n\/\/ params1 := Params{\n\/\/ \"method\": facebook.GET,\n\/\/ \"relative_url\": \"huandu\",\n\/\/ }\n\/\/ params2 := Params{\n\/\/ \"method\": facebook.GET,\n\/\/ \"relative_url\": uint64(100002828925788),\n\/\/ }\n\/\/ res, err := facebook.BatchApi(your_access_token, params1, params2)\n\/\/ \/\/ res is a []Result. if err is nil, res[0] and res[1] are response to\n\/\/ \/\/ params1 and params2 respectively.\n\/\/\n\/\/ Scenario 6: Use it in Google App Engine with `appengine\/urlfetch` package.\n\/\/ import (\n\/\/ \"appengine\"\n\/\/ \"appengine\/urlfetch\"\n\/\/ )\n\/\/\n\/\/ \/\/ suppose it's the appengine context initialized somewhere.\n\/\/ var context appengine.Context\n\/\/\n\/\/ \/\/ default Session object uses http.DefaultClient which is not supported\n\/\/ \/\/ by appengine. we have to create a Session and assign it a special client.\n\/\/ seesion := globalApp.Session(\"a-access-token\")\n\/\/ session.HttpClient = urlfetch.Client(context)\n\/\/\n\/\/ \/\/ now, session uses appengine http client now.\n\/\/ res, err := session.Get(\"\/me\", nil)\n\/\/\n\/\/ Scenario 7: Select Graph API version. See https:\/\/developers.facebook.com\/docs\/apps\/versions .\n\/\/ \/\/ this library uses default version by default.\n\/\/ \/\/ change following global variable to specific a global default version.\n\/\/ Version = \"v2.0\"\n\/\/\n\/\/ \/\/ now you will get an error as v2.0 api doesn't allow you to do so.\n\/\/ Api(\"huan.du\", GET, nil)\n\/\/\n\/\/ \/\/ you can also specify version per session.\n\/\/ session := &Session{}\n\/\/ session.Version = \"v2.0\" \/\/ overwrite global default.\n\/\/\n\/\/ I've try my best to add enough information in every public method and type.\n\/\/ If you still have any question or suggestion, feel free to create an issue\n\/\/ or send pull request to me. Thank you.\n\/\/\n\/\/ This library doesn't implement any deprecated old RESTful API. And it won't.\npackage facebook\n\nvar (\n \/\/ Default facebook api version.\n \/\/ It can be \"v1.0\" or \"v2.0\" or empty per facebook current document.\n \/\/ See https:\/\/developers.facebook.com\/docs\/apps\/versions for details.\n Version string\n)\n\n\/\/ Makes a facebook graph api call.\n\/\/\n\/\/ Method can be GET, POST, DELETE or PUT.\n\/\/\n\/\/ Params represents query strings in this call.\n\/\/ Keys and values in params will be encoded for URL automatically. So there is\n\/\/ no need to encode keys or values in params manually. Params can be nil.\n\/\/\n\/\/ If you want to get\n\/\/ https:\/\/graph.facebook.com\/huandu?fields=name,username\n\/\/ Api should be called as following\n\/\/ Api(\"\/huandu\", GET, Params{\"fields\": \"name,username\"})\n\/\/ or in a simplified way\n\/\/ Get(\"\/huandu\", Params{\"fields\": \"name,username\"})\n\/\/\n\/\/ Api is a wrapper of Session.Api(). It's designed for graph api that doesn't require\n\/\/ app id, app secret and access token. It can be called in multiple goroutines.\n\/\/\n\/\/ If app id, app secret or access token is required in graph api, caller should\n\/\/ create a new facebook session through App instance instead.\nfunc Api(path string, method Method, params Params) (Result, error) {\n return defaultSession.Api(path, method, params)\n}\n\n\/\/ Get is a short hand of Api(path, GET, params).\nfunc Get(path string, params Params) (Result, error) {\n return Api(path, GET, params)\n}\n\n\/\/ Post is a short hand of Api(path, POST, params).\nfunc Post(path string, params Params) (Result, error) {\n return Api(path, POST, params)\n}\n\n\/\/ Delete is a short hand of Api(path, DELETE, params).\nfunc Delete(path string, params Params) (Result, error) {\n return Api(path, DELETE, params)\n}\n\n\/\/ Put is a short hand of Api(path, PUT, params).\nfunc Put(path string, params Params) (Result, error) {\n return Api(path, PUT, params)\n}\n\n\/\/ Makes a batch facebook graph api call.\n\/\/\n\/\/ BatchApi supports most kinds of batch calls defines in facebook batch api document,\n\/\/ except uploading binary data. Use Batch to do so.\n\/\/\n\/\/ See https:\/\/developers.facebook.com\/docs\/reference\/api\/batch\/ to learn more about Batch Requests.\nfunc BatchApi(accessToken string, params ...Params) ([]Result, error) {\n return Batch(Params{\"access_token\": accessToken}, params...)\n}\n\n\/\/ Makes a batch facebook graph api call.\n\/\/ Batch is designed for more advanced usage including uploading binary files.\n\/\/\n\/\/ An uploading files sample\n\/\/ \/\/ equivalent to following curl command (borrowed from facebook docs)\n\/\/ \/\/ curl \\\n\/\/ \/\/ -F 'access_token=…' \\\n\/\/ \/\/ -F 'batch=[{\"method\":\"POST\",\"relative_url\":\"me\/photos\",\"body\":\"message=My cat photo\",\"attached_files\":\"file1\"},{\"method\":\"POST\",\"relative_url\":\"me\/photos\",\"body\":\"message=My dog photo\",\"attached_files\":\"file2\"},]' \\\n\/\/ \/\/ -F 'file1=@cat.gif' \\\n\/\/ \/\/ -F 'file2=@dog.jpg' \\\n\/\/ \/\/ https:\/\/graph.facebook.com\n\/\/ Batch(Params{\n\/\/ \"access_token\": \"the-access-token\",\n\/\/ \"file1\": File(\"cat.gif\"),\n\/\/ \"file2\": File(\"dog.jpg\"),\n\/\/ }, Params{\n\/\/ \"method\": \"POST\",\n\/\/ \"relative_url\": \"me\/photos\",\n\/\/ \"body\": \"message=My cat photo\",\n\/\/ \"attached_files\": \"file1\",\n\/\/ }, Params{\n\/\/ \"method\": \"POST\",\n\/\/ \"relative_url\": \"me\/photos\",\n\/\/ \"body\": \"message=My dog photo\",\n\/\/ \"attached_files\": \"file2\",\n\/\/ })\n\/\/\n\/\/ See https:\/\/developers.facebook.com\/docs\/reference\/api\/batch\/ to learn more about Batch Requests.\nfunc Batch(batchParams Params, params ...Params) ([]Result, error) {\n return defaultSession.Batch(batchParams, params...)\n}\n<|endoftext|>"} {"text":"<commit_before>package chaining\n\nimport \"jecolasurdo\/go-chaining\/injectionbehavior\"\nimport \"errors\"\n\n\/\/ Flush returns the context's error and final result, and resets the context back to its default state.\nfunc (c *Context) Flush() (interface{}, error) {\n\tlocalError := c.LocalError\n\tfinalResult := c.PreviousActionResult\n\tc.LocalError = nil\n\tc.PreviousActionResult = nil\n\treturn finalResult, localError\n}\n\n\/\/ ApplyNullary executes an action which takes no arguments and returns only an error.\n\/\/\n\/\/ Since the supplied action returns no value aside from an error, the context will supply nil as a pseudo-result\n\/\/ for the supplied action. The context will then apply that nil to the next action called within the context,\n\/\/ unless the override behavior for the next action dictates otherwise.\n\/\/\n\/\/ The error returned by the supplied action is also applied to the current context.\n\/\/ If error is not nil, subsequent actions executed within the same context will be ignored.\nfunc (c *Context) ApplyNullary(action func() error) {\n\tif c.LocalError == nil {\n\t\tc.LocalError = action()\n\t}\n}\n\n\/\/ ApplyNullaryIface executes an action which takes no arguments and returns a tuple of (interface{}, error).\n\/\/\n\/\/ The context will apply the supplied action's interface{} result to the next action called within the context,\n\/\/ unless the override behavior for the next action dictates otherwise.\n\/\/\n\/\/ The error returned by the supplied action is also applied to the current context.\n\/\/ If error is not nil, subsequent actions executed within the same context will be ignored.\nfunc (c *Context) ApplyNullaryIface(action func() (interface{}, error), behavior injectionbehavior.InjectionBehavior) {\n\trestatedAction := func(val interface{}) (interface{}, error) {\n\t\treturn action()\n\t}\n\tc.ApplyUnaryIface(restatedAction, ActionArg{Behavior: behavior})\n}\n\n\/\/ ApplyUnary executes an action which takes one argument returns only an error.\n\/\/\n\/\/ The single interface{} argument accepted by the action can be supplied via the arg parameter.\n\/\/\n\/\/ Since the supplied action returns no value aside from an error, the context will supply nil as a pseudo-result\n\/\/ for the supplied action. The context will then apply that nil to the next action called within the context,\n\/\/ unless the override behavior for the next action dictates otherwise.\n\/\/\n\/\/ The error returned by the supplied action is also applied to the current context.\n\/\/ If error is not nil, subsequent actions executed within the same context will be ignored.\nfunc (c *Context) ApplyUnary(action func(interface{}) error, arg ActionArg) {\n\tc.LocalError = errors.New(\"ApplyUnary not implemented\")\n\tc.PreviousActionResult = nil\n}\n\n\/\/ ApplyUnaryIface executes an action which takes one argument, and returns a tuple of (interface{}, error).\n\/\/\n\/\/ The single interface{} argument accepted by the action can be supplied via the arg parameter.\n\/\/\n\/\/ The context will apply the supplied action's interface{} result to the next action called within the context,\n\/\/ unless the override behavior for the next action dictates otherwise.\n\/\/\n\/\/ The error returned by the supplied action is also applied to the current context.\n\/\/ If error is not nil, subsequent actions executed within the same context will be ignored.\nfunc (c *Context) ApplyUnaryIface(action func(interface{}) (interface{}, error), arg ActionArg) {\n\tif c.LocalError != nil {\n\t\treturn\n\t}\n\tvar valueToInject interface{}\n\tif arg.Behavior == injectionbehavior.InjectSuppliedValue {\n\t\tvalueToInject = arg.Value\n\t} else {\n\t\tvalueToInject = c.PreviousActionResult\n\t}\n\tresult, err := action(valueToInject)\n\tc.LocalError = err\n\tc.PreviousActionResult = result\n}\n\n\/\/ ApplyNullaryBool executes an action which takes no arguments and returns a tuple of (bool, error).\n\/\/\n\/\/ The context will apply the supplied action's bool result to the next action called within the context,\n\/\/ unless the override behavior for the next action dictates otherwise.\n\/\/\n\/\/ The error returned by the supplied action is also applied to the current context.\n\/\/ If error is not nil, subsequent actions executed within the same context will be ignored.\n\/\/\n\/\/ In addition to threading the (bool, error) tuple into the current context, NullaryBool itself also returns a bool.\n\/\/ This is useful for inlining the method in boolean statements.\nfunc (c *Context) ApplyNullaryBool(action func() (bool, error)) bool {\n\tif c.LocalError != nil {\n\t\treturn false\n\t}\n\n\tresult, err := action()\n\tc.LocalError = err\n\treturn result\n}\n\n\/\/ ApplyUnaryBool executes an action which takes one argument and returns a tuple of (bool, error).\n\/\/\n\/\/ The single interface{} argument accepted by the action can be supplied via the arg parameter.\n\/\/\n\/\/ The context will apply the supplied action's bool result to the next action called within the context,\n\/\/ unless the override behavior for the next action dictates otherwise.\n\/\/\n\/\/ The error returned by the supplied action is also applied to the current context.\n\/\/ If error is not nil, subsequent actions executed within the same context will be ignored.\n\/\/\n\/\/ In addition to threading the (bool, error) tuple into the current context, UnaryBool itself also returns a bool.\n\/\/ This is useful for inlining the method in boolean statements.\nfunc (c *Context) ApplyUnaryBool(action func(interface{}) (bool, error), arg ActionArg) bool {\n\trestatedAction := func(val interface{}) (interface{}, error) {\n\t\treturn action(val)\n\t}\n\tc.ApplyUnaryIface(restatedAction, arg)\n\treturn c.PreviousActionResult.(bool)\n}\n<commit_msg>Stated ApplyUnary in terms of ApplyUnaryIface<commit_after>package chaining\n\nimport \"jecolasurdo\/go-chaining\/injectionbehavior\"\n\n\/\/ Flush returns the context's error and final result, and resets the context back to its default state.\nfunc (c *Context) Flush() (interface{}, error) {\n\tlocalError := c.LocalError\n\tfinalResult := c.PreviousActionResult\n\tc.LocalError = nil\n\tc.PreviousActionResult = nil\n\treturn finalResult, localError\n}\n\n\/\/ ApplyNullary executes an action which takes no arguments and returns only an error.\n\/\/\n\/\/ Since the supplied action returns no value aside from an error, the context will supply nil as a pseudo-result\n\/\/ for the supplied action. The context will then apply that nil to the next action called within the context,\n\/\/ unless the override behavior for the next action dictates otherwise.\n\/\/\n\/\/ The error returned by the supplied action is also applied to the current context.\n\/\/ If error is not nil, subsequent actions executed within the same context will be ignored.\nfunc (c *Context) ApplyNullary(action func() error) {\n\tif c.LocalError == nil {\n\t\tc.LocalError = action()\n\t}\n}\n\n\/\/ ApplyNullaryIface executes an action which takes no arguments and returns a tuple of (interface{}, error).\n\/\/\n\/\/ The context will apply the supplied action's interface{} result to the next action called within the context,\n\/\/ unless the override behavior for the next action dictates otherwise.\n\/\/\n\/\/ The error returned by the supplied action is also applied to the current context.\n\/\/ If error is not nil, subsequent actions executed within the same context will be ignored.\nfunc (c *Context) ApplyNullaryIface(action func() (interface{}, error), behavior injectionbehavior.InjectionBehavior) {\n\trestatedAction := func(val interface{}) (interface{}, error) {\n\t\treturn action()\n\t}\n\tc.ApplyUnaryIface(restatedAction, ActionArg{Behavior: behavior})\n}\n\n\/\/ ApplyUnary executes an action which takes one argument returns only an error.\n\/\/\n\/\/ The single interface{} argument accepted by the action can be supplied via the arg parameter.\n\/\/\n\/\/ Since the supplied action returns no value aside from an error, the context will supply nil as a pseudo-result\n\/\/ for the supplied action. The context will then apply that nil to the next action called within the context,\n\/\/ unless the override behavior for the next action dictates otherwise.\n\/\/\n\/\/ The error returned by the supplied action is also applied to the current context.\n\/\/ If error is not nil, subsequent actions executed within the same context will be ignored.\nfunc (c *Context) ApplyUnary(action func(interface{}) error, arg ActionArg) {\n\trestatedAction := func(val interface{}) (interface{}, error) {\n\t\terr := action(val)\n\t\treturn nil, err\n\t}\n\tc.ApplyUnaryIface(restatedAction, arg)\n}\n\n\/\/ ApplyUnaryIface executes an action which takes one argument, and returns a tuple of (interface{}, error).\n\/\/\n\/\/ The single interface{} argument accepted by the action can be supplied via the arg parameter.\n\/\/\n\/\/ The context will apply the supplied action's interface{} result to the next action called within the context,\n\/\/ unless the override behavior for the next action dictates otherwise.\n\/\/\n\/\/ The error returned by the supplied action is also applied to the current context.\n\/\/ If error is not nil, subsequent actions executed within the same context will be ignored.\nfunc (c *Context) ApplyUnaryIface(action func(interface{}) (interface{}, error), arg ActionArg) {\n\tif c.LocalError != nil {\n\t\treturn\n\t}\n\tvar valueToInject interface{}\n\tif arg.Behavior == injectionbehavior.InjectSuppliedValue {\n\t\tvalueToInject = arg.Value\n\t} else {\n\t\tvalueToInject = c.PreviousActionResult\n\t}\n\tresult, err := action(valueToInject)\n\tc.LocalError = err\n\tc.PreviousActionResult = result\n}\n\n\/\/ ApplyNullaryBool executes an action which takes no arguments and returns a tuple of (bool, error).\n\/\/\n\/\/ The context will apply the supplied action's bool result to the next action called within the context,\n\/\/ unless the override behavior for the next action dictates otherwise.\n\/\/\n\/\/ The error returned by the supplied action is also applied to the current context.\n\/\/ If error is not nil, subsequent actions executed within the same context will be ignored.\n\/\/\n\/\/ In addition to threading the (bool, error) tuple into the current context, NullaryBool itself also returns a bool.\n\/\/ This is useful for inlining the method in boolean statements.\nfunc (c *Context) ApplyNullaryBool(action func() (bool, error)) bool {\n\tif c.LocalError != nil {\n\t\treturn false\n\t}\n\n\tresult, err := action()\n\tc.LocalError = err\n\treturn result\n}\n\n\/\/ ApplyUnaryBool executes an action which takes one argument and returns a tuple of (bool, error).\n\/\/\n\/\/ The single interface{} argument accepted by the action can be supplied via the arg parameter.\n\/\/\n\/\/ The context will apply the supplied action's bool result to the next action called within the context,\n\/\/ unless the override behavior for the next action dictates otherwise.\n\/\/\n\/\/ The error returned by the supplied action is also applied to the current context.\n\/\/ If error is not nil, subsequent actions executed within the same context will be ignored.\n\/\/\n\/\/ In addition to threading the (bool, error) tuple into the current context, UnaryBool itself also returns a bool.\n\/\/ This is useful for inlining the method in boolean statements.\nfunc (c *Context) ApplyUnaryBool(action func(interface{}) (bool, error), arg ActionArg) bool {\n\trestatedAction := func(val interface{}) (interface{}, error) {\n\t\treturn action(val)\n\t}\n\tc.ApplyUnaryIface(restatedAction, arg)\n\treturn c.PreviousActionResult.(bool)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/sha1\"\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"github.com\/gorilla\/mux\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n)\n\nconst (\n\tpubLen int = 12\n)\n\nfunc checkOTP(w http.ResponseWriter, r *http.Request, dal *Dal) {\n\tif r.URL.Query()[\"otp\"] == nil || r.URL.Query()[\"nonce\"] == nil || r.URL.Query()[\"id\"] == nil {\n\t\treply(w, \"\", \"\", MISSING_PARAMETER, \"\", dal)\n\t\treturn\n\t}\n\totp := r.URL.Query()[\"otp\"][0]\n\tnonce := r.URL.Query()[\"nonce\"][0]\n\tid := r.URL.Query()[\"id\"][0]\n\tif len(otp) < pubLen {\n\t\treply(w, otp, nonce, BAD_OTP, id, dal)\n\t\treturn\n\t}\n\tpub := otp[:pubLen]\n\n\tk, err := dal.GetKey(pub)\n\tif err != nil {\n\t\treply(w, otp, nonce, BAD_OTP, id, dal)\n\t\treturn\n\t} else {\n\t\tk, err = Gate(k, otp)\n\t\tif err != nil {\n\t\t\treply(w, otp, nonce, err.Error(), id, dal)\n\t\t\treturn\n\t\t} else {\n\t\t\terr = dal.UpdateKey(k)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"fail to update key counter\/session\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\treply(w, otp, nonce, OK, id, dal)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc Sign(values []string, key []byte) []byte {\n\tpayload := \"\"\n\tfor _, v := range values {\n\t\tpayload += v + \"&\"\n\t}\n\tpayload = payload[:len(payload)-1]\n\n\tmac := hmac.New(sha1.New, key)\n\tmac.Write([]byte(payload))\n\treturn mac.Sum(nil)\n}\n\nfunc loadKey(id string, dal *Dal) ([]byte, error) {\n\ti, err := dal.GetApp(id)\n\tif err != nil {\n\t\treturn []byte{}, errors.New(NO_SUCH_CLIENT)\n\t}\n\n\treturn i, nil\n}\n\nfunc reply(w http.ResponseWriter, otp, nonce, status, id string, dal *Dal) {\n\tvalues := []string{}\n\tkey := []byte{}\n\terr := errors.New(\"\")\n\n\tvalues = append(values, \"nonce=\"+nonce)\n\tvalues = append(values, \"otp=\"+otp)\n\tif status != MISSING_PARAMETER {\n\t\tkey, err = loadKey(id, dal)\n\t\tif err == nil {\n\t\t\tvalues = append(values, \"status=\"+status)\n\t\t} else {\n\t\t\tvalues = append(values, \"status=\"+err.Error())\n\t\t}\n\t} else {\n\t\tvalues = append(values, \"status=\"+status)\n\t}\n\tvalues = append(values, \"t=\"+time.Now().Format(time.RFC3339))\n\tif status != MISSING_PARAMETER {\n\t\tvalues = append(values, \"h=\"+base64.StdEncoding.EncodeToString(Sign(values, key)))\n\t}\n\n\tret := \"\"\n\tfor _, v := range values {\n\t\tret += v + \"\\n\"\n\t}\n\n\tw.Write([]byte(ret))\n}\n\nfunc runAPI(dal *Dal, port string) {\n\tr := mux.NewRouter()\n\n\tr.HandleFunc(\"\/wsapi\/2.0\/verify\", func(w http.ResponseWriter, r *http.Request) {\n\t\tcheckOTP(w, r, dal)\n\t}).Methods(\"GET\")\n\n\thttp.Handle(\"\/\", r)\n\tlog.Println(\"Listening on port \" + port + \"...\")\n\thttp.ListenAndServe(\":\"+port, nil)\n}\n<commit_msg>including the key name in the response (for an eventual comparison)<commit_after>package main\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/sha1\"\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"github.com\/gorilla\/mux\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n)\n\nconst (\n\tpubLen int = 12\n)\n\nfunc checkOTP(w http.ResponseWriter, r *http.Request, dal *Dal) {\n\tif r.URL.Query()[\"otp\"] == nil || r.URL.Query()[\"nonce\"] == nil || r.URL.Query()[\"id\"] == nil {\n\t\treply(w, \"\", \"\", \"\", MISSING_PARAMETER, \"\", dal)\n\t\treturn\n\t}\n\totp := r.URL.Query()[\"otp\"][0]\n\tnonce := r.URL.Query()[\"nonce\"][0]\n\tid := r.URL.Query()[\"id\"][0]\n\tname := \"\"\n\n\tif len(otp) < pubLen {\n\t\treply(w, otp, name, nonce, BAD_OTP, id, dal)\n\t\treturn\n\t}\n\tpub := otp[:pubLen]\n\n\tk, err := dal.GetKey(pub)\n\tif err != nil {\n\t\treply(w, otp, name, nonce, BAD_OTP, id, dal)\n\t\treturn\n\t} else {\n\t\tk, err = Gate(k, otp)\n\t\tif err != nil {\n\t\t\treply(w, otp, name, nonce, err.Error(), id, dal)\n\t\t\treturn\n\t\t} else {\n\t\t\terr = dal.UpdateKey(k)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"fail to update key counter\/session\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tname := k.Name\n\t\t\treply(w, otp, name, nonce, OK, id, dal)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc Sign(values []string, key []byte) []byte {\n\tpayload := \"\"\n\tfor _, v := range values {\n\t\tpayload += v + \"&\"\n\t}\n\tpayload = payload[:len(payload)-1]\n\n\tmac := hmac.New(sha1.New, key)\n\tmac.Write([]byte(payload))\n\treturn mac.Sum(nil)\n}\n\nfunc loadKey(id string, dal *Dal) ([]byte, error) {\n\ti, err := dal.GetApp(id)\n\tif err != nil {\n\t\treturn []byte{}, errors.New(NO_SUCH_CLIENT)\n\t}\n\n\treturn i, nil\n}\n\nfunc reply(w http.ResponseWriter, otp, name, nonce, status, id string, dal *Dal) {\n\tvalues := []string{}\n\tkey := []byte{}\n\terr := errors.New(\"\")\n\n\tvalues = append(values, \"nonce=\"+nonce)\n\tvalues = append(values, \"otp=\"+otp)\n\tif status != MISSING_PARAMETER {\n\t\tkey, err = loadKey(id, dal)\n\t\tif err == nil {\n\t\t\tvalues = append(values, \"name=\"+name)\n\t\t\tvalues = append(values, \"status=\"+status)\n\t\t} else {\n\t\t\tvalues = append(values, \"status=\"+err.Error())\n\t\t}\n\t} else {\n\t\tvalues = append(values, \"status=\"+status)\n\t}\n\tvalues = append(values, \"t=\"+time.Now().Format(time.RFC3339))\n\tif status != MISSING_PARAMETER {\n\t\tvalues = append(values, \"h=\"+base64.StdEncoding.EncodeToString(Sign(values, key)))\n\t}\n\n\tret := \"\"\n\tfor _, v := range values {\n\t\tret += v + \"\\n\"\n\t}\n\n\tw.Write([]byte(ret))\n}\n\nfunc runAPI(dal *Dal, port string) {\n\tr := mux.NewRouter()\n\n\tr.HandleFunc(\"\/wsapi\/2.0\/verify\", func(w http.ResponseWriter, r *http.Request) {\n\t\tcheckOTP(w, r, dal)\n\t}).Methods(\"GET\")\n\n\thttp.Handle(\"\/\", r)\n\tlog.Println(\"Listening on port \" + port + \"...\")\n\thttp.ListenAndServe(\":\"+port, nil)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/labstack\/echo\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"fmt\"\n)\n\n\/\/ api\n\n\/\/ отправляет json-ответ с массивом текущих ботов\nfunc listBot(c *echo.Context) error {\n\t\/\/\tTODO сформировать список ботов и отправить клиенту\n\t\/\/ временные данные\n\tlist, err := MB.GetListBots()\n\tif err != nil {\n\t\treturn c.JSON(http.StatusConflict, nil)\n\t}\n\treturn c.JSON(http.StatusOK, list)\n}\n\nfunc createBot(c *echo.Context) error {\n\t\/\/\tTODO создать нового бота\n\treturn c.String(http.StatusOK, \"ok\\n\")\n}\n\n\/\/ sendActionToBot отправить команду боту\nfunc sendActionToBot(c *echo.Context) error {\n\tparam := make(map[string]interface{})\n\tid := c.Param(\"id\")\t\t\t\/\/ распарсить и получить id\n\taction := c.Param(\"action\")\t\/\/ и команду\n\n\tswitch action {\n\tcase \"update\":\n\t\tProcessID, _ := strconv.Atoi(c.Query(\"ProcessID\"))\n\t\tparam[\"ProcessID\"] = ProcessID\n\tcase \"connect\":\n\t\tinfbot := make(map[string]string)\n\t\tinfbot[\"name\"] = \"bot1\"\n\t\tid, _ = MB.AddBot(infbot)\n\tcase \"disconnect\":\n\/\/\t\tTODO отключить бота от игрового клиента\n\t}\n\n\terr := MB.SendActionToBot(id, action, param)\t\/\/ отправить команду боту\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treturn c.String(http.StatusOK, \"ok\\n\")\n}\nfunc deleteBot(c *echo.Context) error {\n\t\/\/\tTODO удалить бота\n\treturn c.String(http.StatusOK, \"ok\\n\")\n}\n<commit_msg>В функции createBot TODO заменен на код: 1. получить данные от веб-клиента 2. передать данные менеджеру ботов и создать новый экземпляр бота 3. вернуть веб-клиенту сообщение, что команда выполнена.<commit_after>package main\n\nimport (\n\t\"github.com\/labstack\/echo\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"fmt\"\n)\n\n\/\/ api\n\n\/\/ отправляет json-ответ с массивом текущих ботов\nfunc listBot(c *echo.Context) error {\n\t\/\/\tTODO сформировать список ботов и отправить клиенту\n\t\/\/ временные данные\n\tlist, err := MB.GetListBots()\n\tif err != nil {\n\t\treturn c.JSON(http.StatusConflict, nil)\n\t}\n\treturn c.JSON(http.StatusOK, list)\n}\n\nfunc createBot(c *echo.Context) error {\n\tinfbot := make(map[string]string)\n\tinfbot[\"name\"] = c.Query(\"name\")\n\tinfbot[\"server\"] = c.Query(\"server\")\n\tinfbot[\"login\"] = c.Query(\"login\")\n\tinfbot[\"password\"] = c .Query(\"password\")\n\tid, err := MB.AddBot(infbot)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn c.String(http.StatusOK, id)\n}\n\n\/\/ sendActionToBot отправить команду боту\nfunc sendActionToBot(c *echo.Context) error {\n\tparam := make(map[string]interface{})\n\tid := c.Param(\"id\")\t\t\t\/\/ распарсить и получить id\n\taction := c.Param(\"action\")\t\/\/ и команду\n\n\tswitch action {\n\tcase \"update\":\n\t\tProcessID, _ := strconv.Atoi(c.Query(\"ProcessID\"))\n\t\tparam[\"ProcessID\"] = ProcessID\n\tcase \"connect\":\n\t\tinfbot := make(map[string]string)\n\t\tinfbot[\"name\"] = \"bot1\"\n\t\tid, _ = MB.AddBot(infbot)\n\tcase \"disconnect\":\n\/\/\t\tTODO отключить бота от игрового клиента\n\t}\n\n\terr := MB.SendActionToBot(id, action, param)\t\/\/ отправить команду боту\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treturn c.String(http.StatusOK, \"ok\\n\")\n}\nfunc deleteBot(c *echo.Context) error {\n\t\/\/\tTODO удалить бота\n\treturn c.String(http.StatusOK, \"ok\\n\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2015 Hraban Luyat <hraban@0brg.net>\n\/\/\n\/\/ License for use of this code is detailed in the LICENSE file\n\npackage opus\n\nimport (\n\t\"fmt\"\n\t\"unsafe\"\n)\n\n\/*\n\/\/ Statically link libopus. Requires a libopus.a in every directory you use this\n\/\/ as a dependency in. Not great, but CGO doesn't offer anything better, right\n\/\/ now. Unless you require everyone who USES this package to have libopus\n\/\/ installed system-wide, which is more of a chore because it's so new. Everyone\n\/\/ will end up having to build it from source anyway, might as well just dump\n\/\/ the pre-built lib in here. At least it will be up to the package maintainer,\n\/\/ not the user.\n\/\/\n\/\/ If I missed something, and somebody knows a better way: please let me know.\n#cgo LDFLAGS: libopus.a -lm\n#cgo CFLAGS: -std=c99 -Wall -Werror -pedantic -Ilibopusbuild\/include\n#include <opus\/opus.h>\n*\/\nimport \"C\"\n\ntype Application int\n\n\/\/ These constants should be taken from the library instead of defined here.\n\/\/ Unfortunatly, they are #defines, and CGO can't import those.\nconst (\n\t\/\/ Optimize encoding for VOIP\n\tAPPLICATION_VOIP Application = 2048\n\t\/\/ Optimize encoding for non-voice signals like music\n\tAPPLICATION_AUDIO Application = 2049\n\t\/\/ Optimize encoding for low latency applications\n\tAPPLICATION_RESTRICTED_LOWDELAY Application = 2051\n)\n\nconst (\n\txMAX_BITRATE = 48000\n\txMAX_FRAME_SIZE_MS = 60\n\txMAX_FRAME_SIZE = xMAX_BITRATE * xMAX_FRAME_SIZE_MS \/ 1000\n)\n\nfunc Version() string {\n\treturn C.GoString(C.opus_get_version_string())\n}\n\ntype Encoder struct {\n\tp *C.struct_OpusEncoder\n}\n\nfunc opuserr(code int) error {\n\treturn fmt.Errorf(\"opus: %s\", C.GoString(C.opus_strerror(C.int(code))))\n}\n\nfunc NewEncoder(sample_rate int, channels int, application Application) (*Encoder, error) {\n\tvar errno int\n\tp := C.opus_encoder_create(C.opus_int32(sample_rate), C.int(channels), C.int(application), (*C.int)(unsafe.Pointer(&errno)))\n\tif errno != 0 {\n\t\treturn nil, opuserr(errno)\n\t}\n\treturn &Encoder{p: p}, nil\n}\n\nfunc (enc *Encoder) EncodeFloat32(pcm []float32) ([]byte, error) {\n\tif pcm == nil || len(pcm) == 0 {\n\t\treturn nil, fmt.Errorf(\"opus: no data supplied\")\n\t}\n\t\/\/ I never know how much to allocate\n\tdata := make([]byte, 10000)\n\tn := int(C.opus_encode_float(\n\t\tenc.p,\n\t\t(*C.float)(&pcm[0]),\n\t\tC.int(len(pcm)),\n\t\t(*C.uchar)(&data[0]),\n\t\tC.opus_int32(cap(data))))\n\tif n < 0 {\n\t\treturn nil, opuserr(n)\n\t}\n\treturn data[:n], nil\n}\n\n\/\/ Returns an error if the encoder was already closed\nfunc (enc *Encoder) Close() error {\n\tif enc.p == nil {\n\t\treturn fmt.Errorf(\"opus: encoder already closed\")\n\t}\n\tC.opus_encoder_destroy(enc.p)\n\tenc.p = nil\n\treturn nil\n}\n\ntype Decoder struct {\n\tp *C.struct_OpusDecoder\n\tsample_rate int\n}\n\nfunc NewDecoder(sample_rate int, channels int) (*Decoder, error) {\n\tvar errno int\n\tp := C.opus_decoder_create(C.opus_int32(sample_rate), C.int(channels), (*C.int)(unsafe.Pointer(&errno)))\n\tif errno != 0 {\n\t\treturn nil, opuserr(errno)\n\t}\n\tdec := &Decoder{\n\t\tp: p,\n\t\tsample_rate: sample_rate,\n\t}\n\treturn dec, nil\n}\n\nfunc (dec *Decoder) DecodeFloat32(data []byte) ([]float32, error) {\n\tif data == nil || len(data) == 0 {\n\t\treturn nil, fmt.Errorf(\"opus: no data supplied\")\n\t}\n\t\/\/ I don't know how big this frame will be, but this is the limit\n\tpcm := make([]float32, xMAX_FRAME_SIZE_MS*dec.sample_rate\/1000)\n\tn := int(C.opus_decode_float(\n\t\tdec.p,\n\t\t(*C.uchar)(&data[0]),\n\t\tC.opus_int32(len(data)),\n\t\t(*C.float)(&pcm[0]),\n\t\tC.int(cap(pcm)),\n\t\t0))\n\tif n < 0 {\n\t\treturn nil, opuserr(n)\n\t}\n\treturn pcm[:n], nil\n}\n\n\/\/ Returns an error if the encoder was already closed\nfunc (dec *Decoder) Close() error {\n\tif dec.p == nil {\n\t\treturn fmt.Errorf(\"opus: decoder already closed\")\n\t}\n\tC.opus_decoder_destroy(dec.p)\n\tdec.p = nil\n\treturn nil\n}\n<commit_msg>Update dependency build directory for cgo<commit_after>\/\/ Copyright © 2015 Hraban Luyat <hraban@0brg.net>\n\/\/\n\/\/ License for use of this code is detailed in the LICENSE file\n\npackage opus\n\nimport (\n\t\"fmt\"\n\t\"unsafe\"\n)\n\n\/*\n\/\/ Statically link libopus. Requires a libopus.a in every directory you use this\n\/\/ as a dependency in. Not great, but CGO doesn't offer anything better, right\n\/\/ now. Unless you require everyone who USES this package to have libopus\n\/\/ installed system-wide, which is more of a chore because it's so new. Everyone\n\/\/ will end up having to build it from source anyway, might as well just dump\n\/\/ the pre-built lib in here. At least it will be up to the package maintainer,\n\/\/ not the user.\n\/\/\n\/\/ If I missed something, and somebody knows a better way: please let me know.\n#cgo LDFLAGS: libopus.a -lm\n#cgo CFLAGS: -std=c99 -Wall -Werror -pedantic -Ibuild\/include\n#include <opus\/opus.h>\n*\/\nimport \"C\"\n\ntype Application int\n\n\/\/ These constants should be taken from the library instead of defined here.\n\/\/ Unfortunatly, they are #defines, and CGO can't import those.\nconst (\n\t\/\/ Optimize encoding for VOIP\n\tAPPLICATION_VOIP Application = 2048\n\t\/\/ Optimize encoding for non-voice signals like music\n\tAPPLICATION_AUDIO Application = 2049\n\t\/\/ Optimize encoding for low latency applications\n\tAPPLICATION_RESTRICTED_LOWDELAY Application = 2051\n)\n\nconst (\n\txMAX_BITRATE = 48000\n\txMAX_FRAME_SIZE_MS = 60\n\txMAX_FRAME_SIZE = xMAX_BITRATE * xMAX_FRAME_SIZE_MS \/ 1000\n)\n\nfunc Version() string {\n\treturn C.GoString(C.opus_get_version_string())\n}\n\ntype Encoder struct {\n\tp *C.struct_OpusEncoder\n}\n\nfunc opuserr(code int) error {\n\treturn fmt.Errorf(\"opus: %s\", C.GoString(C.opus_strerror(C.int(code))))\n}\n\nfunc NewEncoder(sample_rate int, channels int, application Application) (*Encoder, error) {\n\tvar errno int\n\tp := C.opus_encoder_create(C.opus_int32(sample_rate), C.int(channels), C.int(application), (*C.int)(unsafe.Pointer(&errno)))\n\tif errno != 0 {\n\t\treturn nil, opuserr(errno)\n\t}\n\treturn &Encoder{p: p}, nil\n}\n\nfunc (enc *Encoder) EncodeFloat32(pcm []float32) ([]byte, error) {\n\tif pcm == nil || len(pcm) == 0 {\n\t\treturn nil, fmt.Errorf(\"opus: no data supplied\")\n\t}\n\t\/\/ I never know how much to allocate\n\tdata := make([]byte, 10000)\n\tn := int(C.opus_encode_float(\n\t\tenc.p,\n\t\t(*C.float)(&pcm[0]),\n\t\tC.int(len(pcm)),\n\t\t(*C.uchar)(&data[0]),\n\t\tC.opus_int32(cap(data))))\n\tif n < 0 {\n\t\treturn nil, opuserr(n)\n\t}\n\treturn data[:n], nil\n}\n\n\/\/ Returns an error if the encoder was already closed\nfunc (enc *Encoder) Close() error {\n\tif enc.p == nil {\n\t\treturn fmt.Errorf(\"opus: encoder already closed\")\n\t}\n\tC.opus_encoder_destroy(enc.p)\n\tenc.p = nil\n\treturn nil\n}\n\ntype Decoder struct {\n\tp *C.struct_OpusDecoder\n\tsample_rate int\n}\n\nfunc NewDecoder(sample_rate int, channels int) (*Decoder, error) {\n\tvar errno int\n\tp := C.opus_decoder_create(C.opus_int32(sample_rate), C.int(channels), (*C.int)(unsafe.Pointer(&errno)))\n\tif errno != 0 {\n\t\treturn nil, opuserr(errno)\n\t}\n\tdec := &Decoder{\n\t\tp: p,\n\t\tsample_rate: sample_rate,\n\t}\n\treturn dec, nil\n}\n\nfunc (dec *Decoder) DecodeFloat32(data []byte) ([]float32, error) {\n\tif data == nil || len(data) == 0 {\n\t\treturn nil, fmt.Errorf(\"opus: no data supplied\")\n\t}\n\t\/\/ I don't know how big this frame will be, but this is the limit\n\tpcm := make([]float32, xMAX_FRAME_SIZE_MS*dec.sample_rate\/1000)\n\tn := int(C.opus_decode_float(\n\t\tdec.p,\n\t\t(*C.uchar)(&data[0]),\n\t\tC.opus_int32(len(data)),\n\t\t(*C.float)(&pcm[0]),\n\t\tC.int(cap(pcm)),\n\t\t0))\n\tif n < 0 {\n\t\treturn nil, opuserr(n)\n\t}\n\treturn pcm[:n], nil\n}\n\n\/\/ Returns an error if the encoder was already closed\nfunc (dec *Decoder) Close() error {\n\tif dec.p == nil {\n\t\treturn fmt.Errorf(\"opus: decoder already closed\")\n\t}\n\tC.opus_decoder_destroy(dec.p)\n\tdec.p = nil\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package lily\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strconv\"\n)\n\nfunc ExtractPaginationPars(pars *url.Values) (offset uint64, limit uint64, page uint64) {\n\tvar err error\n\tqPar := pars.Get(\"page_size\")\n\tif qPar != \"\" {\n\t\tlimit, err = strconv.ParseUint(qPar, 10, 64)\n\t\tif err != nil {\n\t\t\tlimit = 0\n\t\t}\n\t}\n\tif limit == 0 {\n\t\tlimit = 30\n\t}\n\tqPar = pars.Get(\"page\")\n\tif qPar != \"\" {\n\t\tpage, err = strconv.ParseUint(qPar, 10, 64)\n\t\tif err != nil {\n\t\t\tpage = 0\n\t\t}\n\t}\n\tif page == 0 {\n\t\tpage = 1\n\t}\n\toffset = (page - 1) * limit\n\treturn\n}\n\nfunc ExtractSortPars(pars *url.Values) (sortColumn string, sortDesc bool) {\n\t\/\/ TODO change this fun\n\tsortColumn = pars.Get(\"sort_columns\")\n\tqPar := pars.Get(\"sort_order\")\n\tsortDesc = qPar == \"desc\"\n\treturn\n}\n\nfunc PaginatedResponse(data string, page_size, page, total uint64) string {\n\treturn fmt.Sprintf(`{\"page_size\":%d,\"page\":%d,\"total_count\":%d`, page_size, page, total) +\n\t\t`,\"results\":` + data + `}`\n}\n<commit_msg>add json funs<commit_after>package lily\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strconv\"\n)\n\nfunc ApiExtractPaginationPars(pars *url.Values) (offset uint64, limit uint64, page uint64) {\n\tvar err error\n\tqPar := pars.Get(\"page_size\")\n\tif qPar != \"\" {\n\t\tlimit, err = strconv.ParseUint(qPar, 10, 64)\n\t\tif err != nil {\n\t\t\tlimit = 0\n\t\t}\n\t}\n\tif limit == 0 {\n\t\tlimit = 30\n\t}\n\tqPar = pars.Get(\"page\")\n\tif qPar != \"\" {\n\t\tpage, err = strconv.ParseUint(qPar, 10, 64)\n\t\tif err != nil {\n\t\t\tpage = 0\n\t\t}\n\t}\n\tif page == 0 {\n\t\tpage = 1\n\t}\n\toffset = (page - 1) * limit\n\treturn\n}\n\nfunc ApiExtractSortPars(pars *url.Values) (sortColumn string, sortDesc bool) {\n\t\/\/ TODO change this fun\n\tsortColumn = pars.Get(\"sort_columns\")\n\tqPar := pars.Get(\"sort_order\")\n\tsortDesc = qPar == \"desc\"\n\treturn\n}\n\nfunc ApiPaginatedResponse(data string, page_size, page, total uint64) string {\n\treturn fmt.Sprintf(`{\"page_size\":%d,\"page\":%d,\"total_count\":%d`, page_size, page, total) +\n\t\t`,\"results\":` + data + `}`\n}\n\nfunc ApiJsonListQuery(q string) string {\n\treturn `(select coalesce(array_to_json(array_agg(row_to_json(sq.*))), '[]')\n\t\tfrom (` + q + `) sq)`\n}\n\nfunc ApiJsonListStrQuery(q string) string {\n\treturn `select coalesce(array_to_json(array_agg(row_to_json(sq.*)))::text, '[]')\n\t\tfrom (` + q + `) sq`\n}\n\nfunc ApiJsonRowQuery(q string) string {\n\treturn `(select row_to_json(sq.*) from (` + q + `) sq)`\n}\n\nfunc ApiJsonRowStrQuery(q string) string {\n\treturn `select row_to_json(sq.*)::text from (` + q + `) sq`\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package v3 contains samples for Google Cloud Translation API v3.\npackage v3\n\n\/\/ [START translate_v3_create_glossary]\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\n\ttranslate \"cloud.google.com\/go\/translate\/apiv3\"\n\ttranslatepb \"google.golang.org\/genproto\/googleapis\/cloud\/translate\/v3\"\n)\n\n\/\/ createGlossary creates a glossary to use for other operations.\nfunc createGlossary(w io.Writer, projectID string, location string, glossaryID string, glossaryInputURI string) error {\n\t\/\/ projectID := \"my-project-id\"\n\t\/\/ location := \"us-central1\"\n\t\/\/ glossaryID := \"glossary-id\"\n\t\/\/ glossaryInputURI := \"gs:\/\/cloud-samples-data\/translation\/glossary.csv\"\n\n\tctx := context.Background()\n\tclient, err := translate.NewTranslationClient(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"NewTranslationClient: %v\", err)\n\t}\n\tdefer client.Close()\n\n\treq := &translatepb.CreateGlossaryRequest{\n\t\tParent: fmt.Sprintf(\"projects\/%s\/locations\/%s\", projectID, location),\n\t\tGlossary: &translatepb.Glossary{\n\t\t\tName: fmt.Sprintf(\"projects\/%s\/locations\/%s\/glossaries\/%s\", projectID, location, glossaryID),\n\t\t\tLanguages: &translatepb.Glossary_LanguageCodesSet_{\n\t\t\t\tLanguageCodesSet: &translatepb.Glossary_LanguageCodesSet{\n\t\t\t\t\tLanguageCodes: []string{\"en\", \"ja\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tInputConfig: &translatepb.GlossaryInputConfig{\n\t\t\t\tSource: &translatepb.GlossaryInputConfig_GcsSource{\n\t\t\t\t\tGcsSource: &translatepb.GcsSource{\n\t\t\t\t\t\tInputUri: glossaryInputURI,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t\/\/ The CreateGlossary operation is async.\n\top, err := client.CreateGlossary(ctx, req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"CreateGlossary: %v\", err)\n\t}\n\tfmt.Fprintf(w, \"Processing operation name: %q\\n\", op.Name())\n\n\tresp, err := op.Wait(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Wait: %v\", err)\n\t}\n\n\tfmt.Fprintf(w, \"Created: %v\\n\", resp.GetName())\n\tfmt.Fprintf(w, \"Input URI: %v\\n\", resp.InputConfig.GetGcsSource().GetInputUri())\n\n\treturn nil\n}\n\n\/\/ [END translate_v3_create_glossary]\n<commit_msg>translate: update create_glossary.go sample value (#1120)<commit_after>\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package v3 contains samples for Google Cloud Translation API v3.\npackage v3\n\n\/\/ [START translate_v3_create_glossary]\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\n\ttranslate \"cloud.google.com\/go\/translate\/apiv3\"\n\ttranslatepb \"google.golang.org\/genproto\/googleapis\/cloud\/translate\/v3\"\n)\n\n\/\/ createGlossary creates a glossary to use for other operations.\nfunc createGlossary(w io.Writer, projectID string, location string, glossaryID string, glossaryInputURI string) error {\n\t\/\/ projectID := \"my-project-id\"\n\t\/\/ location := \"us-central1\"\n\t\/\/ glossaryID := \"my-glossary-display-name\"\n\t\/\/ glossaryInputURI := \"gs:\/\/cloud-samples-data\/translation\/glossary.csv\"\n\n\tctx := context.Background()\n\tclient, err := translate.NewTranslationClient(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"NewTranslationClient: %v\", err)\n\t}\n\tdefer client.Close()\n\n\treq := &translatepb.CreateGlossaryRequest{\n\t\tParent: fmt.Sprintf(\"projects\/%s\/locations\/%s\", projectID, location),\n\t\tGlossary: &translatepb.Glossary{\n\t\t\tName: fmt.Sprintf(\"projects\/%s\/locations\/%s\/glossaries\/%s\", projectID, location, glossaryID),\n\t\t\tLanguages: &translatepb.Glossary_LanguageCodesSet_{\n\t\t\t\tLanguageCodesSet: &translatepb.Glossary_LanguageCodesSet{\n\t\t\t\t\tLanguageCodes: []string{\"en\", \"ja\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tInputConfig: &translatepb.GlossaryInputConfig{\n\t\t\t\tSource: &translatepb.GlossaryInputConfig_GcsSource{\n\t\t\t\t\tGcsSource: &translatepb.GcsSource{\n\t\t\t\t\t\tInputUri: glossaryInputURI,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t\/\/ The CreateGlossary operation is async.\n\top, err := client.CreateGlossary(ctx, req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"CreateGlossary: %v\", err)\n\t}\n\tfmt.Fprintf(w, \"Processing operation name: %q\\n\", op.Name())\n\n\tresp, err := op.Wait(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Wait: %v\", err)\n\t}\n\n\tfmt.Fprintf(w, \"Created: %v\\n\", resp.GetName())\n\tfmt.Fprintf(w, \"Input URI: %v\\n\", resp.InputConfig.GetGcsSource().GetInputUri())\n\n\treturn nil\n}\n\n\/\/ [END translate_v3_create_glossary]\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Square Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/hanwen\/go-fuse\/fuse\"\n\t\"github.com\/hanwen\/go-fuse\/fuse\/nodefs\"\n\t\"github.com\/square\/keywhiz-fs\"\n\tklog \"github.com\/square\/keywhiz-fs\/log\"\n\t\"golang.org\/x\/sys\/unix\"\n)\n\nvar (\n\tcertFile = flag.String(\"cert\", \"\", \"PEM-encoded certificate file\")\n\tkeyFile = flag.String(\"key\", \"client.key\", \"PEM-encoded private key file\")\n\tcaFile = flag.String(\"ca\", \"cacert.crt\", \"PEM-encoded CA certificates file\")\n\tuser = flag.String(\"asuser\", \"keywhiz\", \"Default user to own files\")\n\tgroup = flag.String(\"group\", \"keywhiz\", \"Default group to own files\")\n\tping = flag.Bool(\"ping\", false, \"Enable startup ping to server\")\n\tdebug = flag.Bool(\"debug\", false, \"Enable debugging output\")\n\ttimeoutSeconds = flag.Uint(\"timeout\", 20, \"Timeout for communication with server\")\n\tlogger *klog.Logger\n)\n\nfunc main() {\n\tvar Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s [options] url mountpoint\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t}\n\n\tflag.Parse()\n\tif flag.NArg() != 2 {\n\t\tUsage()\n\t\tos.Exit(1)\n\t}\n\n\tserverURL, err := url.Parse(flag.Args()[0])\n\tif err != nil {\n\t\tlog.Fatalf(\"Invalid url: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tmountpoint := flag.Args()[1]\n\n\tlogConfig := klog.Config{*debug, mountpoint}\n\tlogger = klog.New(\"kwfs_main\", logConfig)\n\tdefer logger.Close()\n\n\tif *certFile == \"\" {\n\t\tlogger.Debugf(\"Certificate file not specified, assuming certificate also in %s\", *keyFile)\n\t\tcertFile = keyFile\n\t}\n\n\tlockMemory()\n\n\tclientTimeout := time.Duration(*timeoutSeconds) * time.Second\n\tfreshThreshold := 200 * time.Millisecond\n\tbackendDeadline := 500 * time.Millisecond\n\tmaxWait := clientTimeout + backendDeadline\n\ttimeouts := keywhizfs.Timeouts{freshThreshold, backendDeadline, maxWait}\n\n\tclient := keywhizfs.NewClient(*certFile, *keyFile, *caFile, serverURL, clientTimeout, logConfig, *ping)\n\n\townership := keywhizfs.NewOwnership(*user, *group)\n\tkwfs, root, err := keywhizfs.NewKeywhizFs(&client, ownership, timeouts, logConfig)\n\tif err != nil {\n\t\tlog.Fatalf(\"KeywhizFs init fail: %v\\n\", err)\n\t}\n\n\tmountOptions := &fuse.MountOptions{\n\t\tAllowOther: true,\n\t\tName: kwfs.String(),\n\t\tOptions: []string{\"default_permissions\"},\n\t}\n\n\t\/\/ Empty Options struct avoids setting a global uid\/gid override.\n\tconn := nodefs.NewFileSystemConnector(root, &nodefs.Options{})\n\tserver, err := fuse.NewServer(conn.RawFS(), mountpoint, mountOptions)\n\tif err != nil {\n\t\tlog.Fatalf(\"Mount fail: %v\\n\", err)\n\t}\n\n\tserver.Serve()\n}\n\n\/\/ Locks memory, preventing memory from being written to disk as swap\nfunc lockMemory() {\n\terr := unix.Mlockall(unix.MCL_FUTURE | unix.MCL_CURRENT)\n\tswitch err {\n\tcase nil:\n\tcase unix.ENOSYS:\n\t\tlogger.Warnf(\"mlockall() not implemented on this system\")\n\tcase unix.ENOMEM:\n\t\tlogger.Warnf(\"mlockall() failed with ENOMEM\")\n\tdefault:\n\t\tlog.Fatalf(\"Could not perform mlockall and prevent swapping memory: %v\", err)\n\t}\n}\n<commit_msg>Handle signals and exit cleanly.<commit_after>\/\/ Copyright 2015 Square Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\n\t\"github.com\/hanwen\/go-fuse\/fuse\"\n\t\"github.com\/hanwen\/go-fuse\/fuse\/nodefs\"\n\t\"github.com\/square\/keywhiz-fs\"\n\tklog \"github.com\/square\/keywhiz-fs\/log\"\n\t\"golang.org\/x\/sys\/unix\"\n)\n\nvar (\n\tcertFile = flag.String(\"cert\", \"\", \"PEM-encoded certificate file\")\n\tkeyFile = flag.String(\"key\", \"client.key\", \"PEM-encoded private key file\")\n\tcaFile = flag.String(\"ca\", \"cacert.crt\", \"PEM-encoded CA certificates file\")\n\tuser = flag.String(\"asuser\", \"keywhiz\", \"Default user to own files\")\n\tgroup = flag.String(\"group\", \"keywhiz\", \"Default group to own files\")\n\tping = flag.Bool(\"ping\", false, \"Enable startup ping to server\")\n\tdebug = flag.Bool(\"debug\", false, \"Enable debugging output\")\n\ttimeoutSeconds = flag.Uint(\"timeout\", 20, \"Timeout for communication with server\")\n\tlogger *klog.Logger\n)\n\nfunc main() {\n\tvar Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s [options] url mountpoint\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t}\n\n\tflag.Parse()\n\tif flag.NArg() != 2 {\n\t\tUsage()\n\t\tos.Exit(1)\n\t}\n\n\tserverURL, err := url.Parse(flag.Args()[0])\n\tif err != nil {\n\t\tlog.Fatalf(\"Invalid url: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tmountpoint := flag.Args()[1]\n\n\tlogConfig := klog.Config{*debug, mountpoint}\n\tlogger = klog.New(\"kwfs_main\", logConfig)\n\tdefer logger.Close()\n\n\tif *certFile == \"\" {\n\t\tlogger.Debugf(\"Certificate file not specified, assuming certificate also in %s\", *keyFile)\n\t\tcertFile = keyFile\n\t}\n\n\tlockMemory()\n\n\tclientTimeout := time.Duration(*timeoutSeconds) * time.Second\n\tfreshThreshold := 200 * time.Millisecond\n\tbackendDeadline := 500 * time.Millisecond\n\tmaxWait := clientTimeout + backendDeadline\n\ttimeouts := keywhizfs.Timeouts{freshThreshold, backendDeadline, maxWait}\n\n\tclient := keywhizfs.NewClient(*certFile, *keyFile, *caFile, serverURL, clientTimeout, logConfig, *ping)\n\n\townership := keywhizfs.NewOwnership(*user, *group)\n\tkwfs, root, err := keywhizfs.NewKeywhizFs(&client, ownership, timeouts, logConfig)\n\tif err != nil {\n\t\tlog.Fatalf(\"KeywhizFs init fail: %v\\n\", err)\n\t}\n\n\tmountOptions := &fuse.MountOptions{\n\t\tAllowOther: true,\n\t\tName: kwfs.String(),\n\t\tOptions: []string{\"default_permissions\"},\n\t}\n\n\t\/\/ Empty Options struct avoids setting a global uid\/gid override.\n\tconn := nodefs.NewFileSystemConnector(root, &nodefs.Options{})\n\tserver, err := fuse.NewServer(conn.RawFS(), mountpoint, mountOptions)\n\tif err != nil {\n\t\tlog.Fatalf(\"Mount fail: %v\\n\", err)\n\t}\n\n\t\/\/ Catch SIGINT and SIGKILL and exit cleanly.\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tgo func() {\n\t\tsig := <-c\n\t\tlogger.Warnf(\"Got signal %s, unmounting\", sig)\n\t\tserver.Unmount()\n\t}()\n\n\tserver.Serve()\n}\n\n\/\/ Locks memory, preventing memory from being written to disk as swap\nfunc lockMemory() {\n\terr := unix.Mlockall(unix.MCL_FUTURE | unix.MCL_CURRENT)\n\tswitch err {\n\tcase nil:\n\tcase unix.ENOSYS:\n\t\tlogger.Warnf(\"mlockall() not implemented on this system\")\n\tcase unix.ENOMEM:\n\t\tlogger.Warnf(\"mlockall() failed with ENOMEM\")\n\tdefault:\n\t\tlog.Fatalf(\"Could not perform mlockall and prevent swapping memory: %v\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package models\n\nimport (\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\ntype Group struct {\n\tId bson.ObjectId `bson:\"_id\" json:\"-\"`\n\tBody string `bson:\"body\" json:\"body\"`\n\tTitle string `bson:\"title\" json:\"title\"`\n\tSlug string `bson:\"slug\" json:\"slug\"`\n\tPrivacy string `bson:\"privacy\" json:\"privacy\"`\n\tVisibility string `bson:\"visibility\" json:\"visibility\"`\n\tSocialApiChannelId string `bson:\"socialApiChannelId\" json:\"socialApiChannelId\"`\n\tSocialApiAnnouncementChannelId string `bson:\"socialApiAnnouncementChannelId\" json:\"socialApiAnnouncementChannelId\"`\n\tSocialApiDefaultChannelId string `bson:\"socialApiDefaultChannelId\" json:\"socialApiDefaultChannelId\"`\n\tParent []map[string]interface{} `bson:\"parent\" json:\"parent\"`\n\tCustomize map[string]interface{} `bson:\"customize\" json:\"customize\"`\n\tCounts map[string]interface{} `bson:\"counts\" json:\"counts\"`\n\tMigration string `bson:\"migration,omitempty\" json:\"migration\"`\n\tStackTemplate []string `bson:\"stackTemplates,omitempty\" json:\"stackTemplates\"`\n\t\/\/ DefaultChannels holds the default channels for a group, when a user joins\n\t\/\/ to this group, participants will be automatically added to regarding\n\t\/\/ channels\n\tDefaultChannels []string `bson:\"defaultChannels,omitempty\" json:\"defaultChannels\"`\n}\n<commit_msg>go\/koding: add new parameters to group struct for payment<commit_after>package models\n\nimport (\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\ntype Group struct {\n\tId bson.ObjectId `bson:\"_id\" json:\"-\"`\n\tBody string `bson:\"body\" json:\"body\"`\n\tTitle string `bson:\"title\" json:\"title\"`\n\tSlug string `bson:\"slug\" json:\"slug\"`\n\tPrivacy string `bson:\"privacy\" json:\"privacy\"`\n\tVisibility string `bson:\"visibility\" json:\"visibility\"`\n\tSocialApiChannelId string `bson:\"socialApiChannelId\" json:\"socialApiChannelId\"`\n\tSocialApiAnnouncementChannelId string `bson:\"socialApiAnnouncementChannelId\" json:\"socialApiAnnouncementChannelId\"`\n\tSocialApiDefaultChannelId string `bson:\"socialApiDefaultChannelId\" json:\"socialApiDefaultChannelId\"`\n\tParent []map[string]interface{} `bson:\"parent\" json:\"parent\"`\n\tCustomize map[string]interface{} `bson:\"customize\" json:\"customize\"`\n\tCounts map[string]interface{} `bson:\"counts\" json:\"counts\"`\n\tMigration string `bson:\"migration,omitempty\" json:\"migration\"`\n\tStackTemplate []string `bson:\"stackTemplates,omitempty\" json:\"stackTemplates\"`\n\t\/\/ DefaultChannels holds the default channels for a group, when a user joins\n\t\/\/ to this group, participants will be automatically added to regarding\n\t\/\/ channels\n\tDefaultChannels []string `bson:\"defaultChannels,omitempty\" json:\"defaultChannels\"`\n\tPayment Payment `bson:\"payment\" json:\"payment\"`\n}\n\ntype Payment struct {\n\tSubscription Subscription\n\tCustomer Customer\n}\n\ntype Subscription struct {\n\t\/\/ Allowed values are \"trialing\", \"active\", \"past_due\", \"canceled\", \"unpaid\".\n\tState string `bson:\"state\" json:\"state\"`\n\tID string `bson:\"id\" json:\"id\"`\n}\n\ntype Customer struct {\n\tID string `bson:\"id\" json:\"id\"`\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nProblem: We want to send a message and receive a reply\n\nSolution: Let's write a simple \"Hello World\" client and server.\nWe'll write them as go tests, so that we can verify that they\nwork. To run this test, you can run \"go test -run SimpleHelloWorld\"\nin the same directory as this file.\n*\/\n\npackage main\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/zeromq\/goczmq\"\n)\n\n\/\/ SimpleHelloWorldClient does the following:\n\/\/ * Creates a ZMQ_REQ socket\n\/\/ * Sends a \"Hello\" message\n\/\/ * Waits for a \"World\" reply\nfunc SimpleHelloWorldClient(t *testing.T) {\n\tt.Logf(\"client starting...\")\n\n\t\/\/ Create a ZMQ_REQ client using a \"smart constructor\".\n\t\/\/ See: https:\/\/godoc.org\/github.com\/zeromq\/goczmq#NewReq\n\n\tclient, err := goczmq.NewReq(\"tcp:\/\/localhost:5555\")\n\tif err != nil {\n\t\tt.Fatalf(\"client.NewReq error: %s\", err)\n\t}\n\n\t\/\/ Here, we make sure the socket is destroyed\n\t\/\/ when this function exits. While Go is a garbage\n\t\/\/ collected language, we're binding a C library,\n\t\/\/ so we need to make sure to clean up.\n\n\tdefer client.Destroy()\n\n\t\/\/ Let's create a request message. GoCZMQ uses slices\n\t\/\/ of byte slices for messages, because they map\n\t\/\/ very simply to ZeroMQ \"frames\".\n\n\trequest := [][]byte{[]byte(\"Hello\")}\n\n\t\/\/ Send the message and check for any errors.\n\n\terr = client.SendMessage(request)\n\tif err != nil {\n\t\tt.Fatalf(\"client.SendMessage error: %s\", err)\n\t}\n\n\tt.Logf(\"client.SendMessage '%s'\", request)\n\n\t\/\/ Receive the reply message from the server. Note that\n\t\/\/ this RecvMessage() call will block forever waiting\n\t\/\/ for a message.\n\n\treply, err := client.RecvMessage()\n\tif err != nil {\n\t\tt.Fatalf(\"client.RecvMessage error: %s\", err)\n\t}\n\n\tt.Logf(\"client.RecvMessage: '%s'\", reply)\n}\n\n\/\/ SimpleHelloWorldServer does the following:\n\/\/ * Creates a ZMQ_REP socket\n\/\/ * Waits for a \"Hello\" request\n\/\/ * Sends a \"World\" reply\nfunc SimpleHelloWorldServer(t *testing.T) {\n\tt.Logf(\"server starting...\")\n\n\t\/\/ Create a ZMQ_REP client using a \"smart constructor\".\n\t\/\/ See: https:\/\/godoc.org\/github.com\/zeromq\/goczmq#NewRep\n\n\tserver, err := goczmq.NewRep(\"tcp:\/\/*:5555\")\n\tif err != nil {\n\t\tt.Fatalf(\"server.NewRep error: %s\", err)\n\t}\n\n\t\/\/ Here, we make sure the socket is destroyed\n\t\/\/ when this function exits. While Go is a garbage\n\t\/\/ collected language, we're binding a C library,\n\t\/\/ so we need to make sure to clean up.\n\n\tdefer server.Destroy()\n\n\t\/\/ Let's wait for a message from a client. Note that\n\t\/\/ this RecvMessage call will block forever waiting.\n\n\trequest, err := server.RecvMessage()\n\tif err != nil {\n\t\tt.Fatalf(\"server.RecvMessage error: %s\", err)\n\t}\n\n\tt.Logf(\"server.RecvMessage: '%s'\", request)\n\n\t\/\/ Here we create a reply message. GoCZMQ uses slices\n\t\/\/ of byte slices for messages, because they map\n\t\/\/ very simply to ZeroMQ \"frames\".\n\n\treply := [][]byte{[]byte(\"World\")}\n\n\t\/\/ Send the message and check for any errors.\n\n\terr = server.SendMessage(reply)\n\tif err != nil {\n\t\tt.Fatalf(\"server.SendMessage error: %s\", err)\n\t}\n\n\tt.Logf(\"server.SendMessage: '%s'\", reply)\n}\n\n\/\/ TestSimpleHelloWorld starts SimpleHelloWorldServer in\n\/\/ a goroutine, then starts a SimpleHelloWorldClient\nfunc TestSimpleHelloWorld(t *testing.T) {\n\tgo SimpleHelloWorldServer(t)\n\tSimpleHelloWorldClient(t)\n}\n<commit_msg>Problem: incorrect wording in comment in goczmq hello world example<commit_after>\/*\nProblem: We want to send a message and receive a reply\n\nSolution: Let's write a simple \"Hello World\" client and server.\nWe'll write them as go tests, so that we can verify that they\nwork. To run this test, you can run \"go test -run SimpleHelloWorld\"\nin the same directory as this file.\n*\/\n\npackage main\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/zeromq\/goczmq\"\n)\n\n\/\/ SimpleHelloWorldClient does the following:\n\/\/ * Creates a ZMQ_REQ socket\n\/\/ * Sends a \"Hello\" message\n\/\/ * Waits for a \"World\" reply\nfunc SimpleHelloWorldClient(t *testing.T) {\n\tt.Logf(\"client starting...\")\n\n\t\/\/ Create a ZMQ_REQ client using a \"smart constructor\".\n\t\/\/ See: https:\/\/godoc.org\/github.com\/zeromq\/goczmq#NewReq\n\n\tclient, err := goczmq.NewReq(\"tcp:\/\/localhost:5555\")\n\tif err != nil {\n\t\tt.Fatalf(\"client.NewReq error: %s\", err)\n\t}\n\n\t\/\/ Here, we make sure the socket is destroyed\n\t\/\/ when this function exits. While Go is a garbage\n\t\/\/ collected language, we're binding a C library,\n\t\/\/ so we need to make sure to clean up.\n\n\tdefer client.Destroy()\n\n\t\/\/ Let's create a request message. GoCZMQ uses slices\n\t\/\/ of byte slices for messages, because they map\n\t\/\/ very simply to ZeroMQ \"frames\".\n\n\trequest := [][]byte{[]byte(\"Hello\")}\n\n\t\/\/ Send the message and check for any errors.\n\n\terr = client.SendMessage(request)\n\tif err != nil {\n\t\tt.Fatalf(\"client.SendMessage error: %s\", err)\n\t}\n\n\tt.Logf(\"client.SendMessage '%s'\", request)\n\n\t\/\/ Receive the reply message from the server. Note that\n\t\/\/ this RecvMessage() call will block forever waiting\n\t\/\/ for a message.\n\n\treply, err := client.RecvMessage()\n\tif err != nil {\n\t\tt.Fatalf(\"client.RecvMessage error: %s\", err)\n\t}\n\n\tt.Logf(\"client.RecvMessage: '%s'\", reply)\n}\n\n\/\/ SimpleHelloWorldServer does the following:\n\/\/ * Creates a ZMQ_REP socket\n\/\/ * Waits for a \"Hello\" request\n\/\/ * Sends a \"World\" reply\nfunc SimpleHelloWorldServer(t *testing.T) {\n\tt.Logf(\"server starting...\")\n\n\t\/\/ Create a ZMQ_REP server using a \"smart constructor\".\n\t\/\/ See: https:\/\/godoc.org\/github.com\/zeromq\/goczmq#NewRep\n\n\tserver, err := goczmq.NewRep(\"tcp:\/\/*:5555\")\n\tif err != nil {\n\t\tt.Fatalf(\"server.NewRep error: %s\", err)\n\t}\n\n\t\/\/ Here, we make sure the socket is destroyed\n\t\/\/ when this function exits. While Go is a garbage\n\t\/\/ collected language, we're binding a C library,\n\t\/\/ so we need to make sure to clean up.\n\n\tdefer server.Destroy()\n\n\t\/\/ Let's wait for a message from a client. Note that\n\t\/\/ this RecvMessage call will block forever waiting.\n\n\trequest, err := server.RecvMessage()\n\tif err != nil {\n\t\tt.Fatalf(\"server.RecvMessage error: %s\", err)\n\t}\n\n\tt.Logf(\"server.RecvMessage: '%s'\", request)\n\n\t\/\/ Here we create a reply message. GoCZMQ uses slices\n\t\/\/ of byte slices for messages, because they map\n\t\/\/ very simply to ZeroMQ \"frames\".\n\n\treply := [][]byte{[]byte(\"World\")}\n\n\t\/\/ Send the message and check for any errors.\n\n\terr = server.SendMessage(reply)\n\tif err != nil {\n\t\tt.Fatalf(\"server.SendMessage error: %s\", err)\n\t}\n\n\tt.Logf(\"server.SendMessage: '%s'\", reply)\n}\n\n\/\/ TestSimpleHelloWorld starts SimpleHelloWorldServer in\n\/\/ a goroutine, then starts a SimpleHelloWorldClient\nfunc TestSimpleHelloWorld(t *testing.T) {\n\tgo SimpleHelloWorldServer(t)\n\tSimpleHelloWorldClient(t)\n}\n<|endoftext|>"} {"text":"<commit_before>package dnssd\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n)\n\ntype genericop interface {\n\tStart() error\n\tActive() bool\n\tStop()\n}\n\nfunc StartStopHelper(t *testing.T, op genericop) {\n\tif err := op.Start(); err != nil {\n\t\tt.Fatalf(\"Couldn't start op: %v\", err)\n\t}\n\tif !op.Active() {\n\t\tt.Fatal(\"Op didn't become active\")\n\t}\n\top.Stop()\n\tif op.Active() {\n\t\tt.Fatal(\"Op didn't become inactive\")\n\t}\n}\n\nfunc TestRegisterStartStop(t *testing.T) {\n\tf := func(op *RegisterOp, e error, add bool, name, serviceType, domain string) {\n\t}\n\tStartStopHelper(t, NewRegisterOp(\"go\", \"_go-dnssd._tcp\", 9, f))\n}\n\nfunc TestRegTxt(t *testing.T) {\n\top := &RegisterOp{}\n\tkey, value := \"a\", \"a\"\n\tif err := op.SetTXTPair(key, value); err != nil {\n\t\tt.Fatalf(`Unexpected error setting key \"%s\", value \"%s\": %v`, key, value, err)\n\t}\n\tif l := 2 + len(key) + len(value); op.txt.l != l {\n\t\tt.Fatalf(`Expected length %s after setting key \"%s\", value \"%s\", got: %v`, l, key, value, op.txt.l)\n\t}\n\tif err := op.DeleteTXTPair(key); err != nil {\n\t\tt.Fatalf(`Unexpected error deleting key \"%s\": %v`, key, err)\n\t}\n\tif op.txt.l != 0 {\n\t\tt.Fatalf(`Expected length 0 after deleting key \"%s, got: %v`, key, op.txt.l)\n\t}\n\tkey = strings.Repeat(\"a\", 128)\n\tvalue = key\n\tif err := op.SetTXTPair(key, value); err != ErrTXTStringLen {\n\t\tt.Fatalf(\"Expected ErrTXTStringLen, got: %v\", err)\n\t}\n\tkey = strings.Repeat(\"a\", 126)\n\tvalue = key\n\tssize := 2 + len(key) + len(value)\n\tfor i := 0; i < (65535 \/ ssize); i++ {\n\t\tkey := fmt.Sprintf(\"%0126d\", i)\n\t\tif err := op.SetTXTPair(key, value); err != nil {\n\t\t\tt.Fatalf(\"Unexpected error setting up for ErrTXTLen: %v\", err)\n\t\t}\n\t}\n\tif err := op.SetTXTPair(key, value); err != ErrTXTLen {\n\t\tt.Fatalf(\"Expected ErrTXTLen, got: %v\", err)\n\t}\n\tfor i := 0; i < (65535 \/ ssize); i++ {\n\t\tkey := fmt.Sprintf(\"%0126d\", i)\n\t\tif err := op.DeleteTXTPair(key); err != nil {\n\t\t\tt.Fatalf(\"Unexpected error tearing down from ErrTXTLen test: %v\", err)\n\t\t}\n\t}\n\tif op.txt.l != 0 {\n\t\tt.Fatalf(\"Expected length 0 after tearing down from ErrTXTLen test, got: %v\", op.txt.l)\n\t}\n}\n\nfunc TestBrowseStartStop(t *testing.T) {\n\tf := func(op *BrowseOp, e error, add bool, interfaceIndex int, name string, serviceType string, domain string) {\n\t}\n\tStartStopHelper(t, NewBrowseOp(\"_go-dnssd._tcp\", f))\n}\n\nfunc TestResolveStartStop(t *testing.T) {\n\tf := func(op *ResolveOp, e error, host string, port int, txt map[string]string) {\n\t}\n\tStartStopHelper(t, NewResolveOp(0, \"go\", \"_go-dnssd._tcp\", \"local\", f))\n}\n\nfunc TestDecodeTxtBadLength(t *testing.T) {\n\tb := []byte{255, 'b', '=', 'b'}\n\tm := decodeTxt(b)\n\tif v, p := m[\"b\"]; p != false {\n\t\tt.Fatalf(`Expected pair \"b\" to be missing, instead it's present with value %v`, v)\n\t}\n}\n\nfunc TestDecodeTxtKeyNoValue(t *testing.T) {\n\tb := []byte{1, 'a', 2, 'b', '=', 1, '=', 2, '=', 'a'}\n\tm := decodeTxt(b)\n\tkeys := []string{\"a\", \"b\", \"=\", \"=a\"}\n\tfor _, k := range keys {\n\t\tif v, p := m[k]; v != \"\" {\n\t\t\tt.Fatalf(`Expected \"%s\" to return empty string, got %v instead (present: %v)`, k, v, p)\n\t\t}\n\t}\n}\n\nfunc TestDecodeTxtKeyValue(t *testing.T) {\n\tb := []byte{3, 'a', '=', 'a', 3, 'b', '=', 'b', 5, 'a', 'b', '=', 'a', 'b'}\n\tm := decodeTxt(b)\n\tfor _, kv := range []string{\"a\", \"b\", \"ab\"} {\n\t\tif v, p := m[kv]; v != kv {\n\t\t\tt.Fatalf(`Expected \"%s\" to return \"%s\", got %v instead (present: %v)`, kv, kv, v, p)\n\t\t}\n\t}\n}\n\nfunc TestQueryStartStop(t *testing.T) {\n\tf := func(op *QueryOp, err error, add bool, interfaceIndex int, fullname string, rrtype, rrclass uint16, rdata []byte, ttl uint32) {\n\t}\n\tStartStopHelper(t, NewQueryOp(0, \"golang.org.\", 1, 1, f))\n}\n<commit_msg>Fix printf verb in TestRegTxt<commit_after>package dnssd\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n)\n\ntype genericop interface {\n\tStart() error\n\tActive() bool\n\tStop()\n}\n\nfunc StartStopHelper(t *testing.T, op genericop) {\n\tif err := op.Start(); err != nil {\n\t\tt.Fatalf(\"Couldn't start op: %v\", err)\n\t}\n\tif !op.Active() {\n\t\tt.Fatal(\"Op didn't become active\")\n\t}\n\top.Stop()\n\tif op.Active() {\n\t\tt.Fatal(\"Op didn't become inactive\")\n\t}\n}\n\nfunc TestRegisterStartStop(t *testing.T) {\n\tf := func(op *RegisterOp, e error, add bool, name, serviceType, domain string) {\n\t}\n\tStartStopHelper(t, NewRegisterOp(\"go\", \"_go-dnssd._tcp\", 9, f))\n}\n\nfunc TestRegTxt(t *testing.T) {\n\top := &RegisterOp{}\n\tkey, value := \"a\", \"a\"\n\tif err := op.SetTXTPair(key, value); err != nil {\n\t\tt.Fatalf(`Unexpected error setting key \"%s\", value \"%s\": %v`, key, value, err)\n\t}\n\tif l := 2 + len(key) + len(value); op.txt.l != l {\n\t\tt.Fatalf(`Expected length %d after setting key \"%s\", value \"%s\", got: %d`, l, key, value, op.txt.l)\n\t}\n\tif err := op.DeleteTXTPair(key); err != nil {\n\t\tt.Fatalf(`Unexpected error deleting key \"%s\": %v`, key, err)\n\t}\n\tif op.txt.l != 0 {\n\t\tt.Fatalf(`Expected length 0 after deleting key \"%s, got: %v`, key, op.txt.l)\n\t}\n\tkey = strings.Repeat(\"a\", 128)\n\tvalue = key\n\tif err := op.SetTXTPair(key, value); err != ErrTXTStringLen {\n\t\tt.Fatalf(\"Expected ErrTXTStringLen, got: %v\", err)\n\t}\n\tkey = strings.Repeat(\"a\", 126)\n\tvalue = key\n\tssize := 2 + len(key) + len(value)\n\tfor i := 0; i < (65535 \/ ssize); i++ {\n\t\tkey := fmt.Sprintf(\"%0126d\", i)\n\t\tif err := op.SetTXTPair(key, value); err != nil {\n\t\t\tt.Fatalf(\"Unexpected error setting up for ErrTXTLen: %v\", err)\n\t\t}\n\t}\n\tif err := op.SetTXTPair(key, value); err != ErrTXTLen {\n\t\tt.Fatalf(\"Expected ErrTXTLen, got: %v\", err)\n\t}\n\tfor i := 0; i < (65535 \/ ssize); i++ {\n\t\tkey := fmt.Sprintf(\"%0126d\", i)\n\t\tif err := op.DeleteTXTPair(key); err != nil {\n\t\t\tt.Fatalf(\"Unexpected error tearing down from ErrTXTLen test: %v\", err)\n\t\t}\n\t}\n\tif op.txt.l != 0 {\n\t\tt.Fatalf(\"Expected length 0 after tearing down from ErrTXTLen test, got: %v\", op.txt.l)\n\t}\n}\n\nfunc TestBrowseStartStop(t *testing.T) {\n\tf := func(op *BrowseOp, e error, add bool, interfaceIndex int, name string, serviceType string, domain string) {\n\t}\n\tStartStopHelper(t, NewBrowseOp(\"_go-dnssd._tcp\", f))\n}\n\nfunc TestResolveStartStop(t *testing.T) {\n\tf := func(op *ResolveOp, e error, host string, port int, txt map[string]string) {\n\t}\n\tStartStopHelper(t, NewResolveOp(0, \"go\", \"_go-dnssd._tcp\", \"local\", f))\n}\n\nfunc TestDecodeTxtBadLength(t *testing.T) {\n\tb := []byte{255, 'b', '=', 'b'}\n\tm := decodeTxt(b)\n\tif v, p := m[\"b\"]; p != false {\n\t\tt.Fatalf(`Expected pair \"b\" to be missing, instead it's present with value %v`, v)\n\t}\n}\n\nfunc TestDecodeTxtKeyNoValue(t *testing.T) {\n\tb := []byte{1, 'a', 2, 'b', '=', 1, '=', 2, '=', 'a'}\n\tm := decodeTxt(b)\n\tkeys := []string{\"a\", \"b\", \"=\", \"=a\"}\n\tfor _, k := range keys {\n\t\tif v, p := m[k]; v != \"\" {\n\t\t\tt.Fatalf(`Expected \"%s\" to return empty string, got %v instead (present: %v)`, k, v, p)\n\t\t}\n\t}\n}\n\nfunc TestDecodeTxtKeyValue(t *testing.T) {\n\tb := []byte{3, 'a', '=', 'a', 3, 'b', '=', 'b', 5, 'a', 'b', '=', 'a', 'b'}\n\tm := decodeTxt(b)\n\tfor _, kv := range []string{\"a\", \"b\", \"ab\"} {\n\t\tif v, p := m[kv]; v != kv {\n\t\t\tt.Fatalf(`Expected \"%s\" to return \"%s\", got %v instead (present: %v)`, kv, kv, v, p)\n\t\t}\n\t}\n}\n\nfunc TestQueryStartStop(t *testing.T) {\n\tf := func(op *QueryOp, err error, add bool, interfaceIndex int, fullname string, rrtype, rrclass uint16, rdata []byte, ttl uint32) {\n\t}\n\tStartStopHelper(t, NewQueryOp(0, \"golang.org.\", 1, 1, f))\n}\n<|endoftext|>"} {"text":"<commit_before>package garagepi\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/robdimsdale\/garage-pi\/logger\"\n\t\"strings\"\n)\n\ntype Executor struct {\n\tl logger.Logger\n\twebcamHost string\n\twebcamPort string\n\trtr *mux.Router\n\tosHelper OsHelper\n\tstaticFilesystem http.FileSystem\n\ttemplatesFilesystem http.FileSystem\n}\n\ntype OsHelper interface {\n\tExec(executable string, arg ...string) (string, error)\n}\n\nfunc NewExecutor(\n\tl logger.Logger,\n\thelper OsHelper,\n\tstaticFilesystem http.FileSystem,\n\ttemplatesFilesystem http.FileSystem,\n\twebcamHost string,\n\twebcamPort string) *Executor {\n\n\treturn &Executor{\n\t\tl: l,\n\t\twebcamHost: webcamHost,\n\t\twebcamPort: webcamPort,\n\t\trtr: mux.NewRouter(),\n\t\tosHelper: helper,\n\t\tstaticFilesystem: staticFilesystem,\n\t\ttemplatesFilesystem: templatesFilesystem,\n\t}\n}\n\nfunc (e *Executor) homepageHandler(w http.ResponseWriter, r *http.Request) {\n\te.l.Log(\"homepage\")\n\tbuf := bytes.NewBuffer(nil)\n\tf, err := e.templatesFilesystem.Open(\"homepage.html\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t_, err = io.Copy(buf, f)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tf.Close()\n\tw.Write(buf.Bytes())\n}\n\nfunc (e *Executor) webcamHandler(w http.ResponseWriter, r *http.Request) {\n\tresp, err := http.Get(\"http:\/\/\" + e.webcamHost + \":\" + e.webcamPort + \"\/?action=snapshot&n=\" + r.Form.Get(\"n\"))\n\tif err != nil {\n\t\te.l.Log(\"Error getting image: \" + err.Error())\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\te.l.Log(\"Error closing image request: \" + err.Error())\n\t}\n\tw.Write(body)\n}\n\nfunc (e *Executor) toggleDoorHandler(w http.ResponseWriter, r *http.Request) {\n\te.executeCommand(\"gpio\", \"write\", \"0\", \"1\")\n\tsleepDuration, _ := time.ParseDuration(\"500ms\")\n\te.l.Log(\"sleeping for \" + sleepDuration.String())\n\ttime.Sleep(sleepDuration)\n\te.executeCommand(\"gpio\", \"write\", \"0\", \"0\")\n\n\thttp.Redirect(w, r, \"\/\", http.StatusSeeOther)\n}\n\nfunc (e *Executor) executeCommand(executable string, arg ...string) string {\n\tlogStatement := append([]string{executable}, arg...)\n\te.l.Log(\"executing: '\" + strings.Join(logStatement, \" \") + \"'\")\n\tout, err := e.osHelper.Exec(executable, arg...)\n\tif err != nil {\n\t\te.l.Log(\"ERROR: \" + err.Error())\n\t}\n\treturn out\n}\n\nfunc (e *Executor) startCameraHandler(w http.ResponseWriter, r *http.Request) {\n\te.executeCommand(\"\/etc\/init.d\/garagestreamer\", \"start\")\n\thttp.Redirect(w, r, \"\/\", http.StatusSeeOther)\n}\n\nfunc (e *Executor) stopCameraHandler(w http.ResponseWriter, r *http.Request) {\n\te.executeCommand(\"\/etc\/init.d\/garagestreamer\", \"stop\")\n\thttp.Redirect(w, r, \"\/\", http.StatusSeeOther)\n}\n\nfunc (e *Executor) ServeForever(port string) {\n\thttp.Handle(\"\/static\/\", http.StripPrefix(\"\/static\/\", http.FileServer(e.staticFilesystem)))\n\n\te.rtr.HandleFunc(\"\/\", e.homepageHandler).Methods(\"GET\")\n\te.rtr.HandleFunc(\"\/webcam\", e.webcamHandler).Methods(\"GET\")\n\te.rtr.HandleFunc(\"\/toggle\", e.toggleDoorHandler).Methods(\"POST\")\n\te.rtr.HandleFunc(\"\/start-camera\", e.startCameraHandler).Methods(\"POST\")\n\te.rtr.HandleFunc(\"\/stop-camera\", e.stopCameraHandler).Methods(\"POST\")\n\n\thttp.Handle(\"\/\", e.rtr)\n\te.l.Log(\"Listening on port \" + port + \"...\")\n\thttp.ListenAndServe(\":\"+port, nil)\n}\n\nfunc (e *Executor) Kill() {\n\tos.Exit(0)\n}\n<commit_msg>Gracefully handle no response from webcam url.<commit_after>package garagepi\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/robdimsdale\/garage-pi\/logger\"\n\t\"strings\"\n)\n\ntype Executor struct {\n\tl logger.Logger\n\twebcamHost string\n\twebcamPort string\n\trtr *mux.Router\n\tosHelper OsHelper\n\tstaticFilesystem http.FileSystem\n\ttemplatesFilesystem http.FileSystem\n}\n\ntype OsHelper interface {\n\tExec(executable string, arg ...string) (string, error)\n}\n\nfunc NewExecutor(\n\tl logger.Logger,\n\thelper OsHelper,\n\tstaticFilesystem http.FileSystem,\n\ttemplatesFilesystem http.FileSystem,\n\twebcamHost string,\n\twebcamPort string) *Executor {\n\n\treturn &Executor{\n\t\tl: l,\n\t\twebcamHost: webcamHost,\n\t\twebcamPort: webcamPort,\n\t\trtr: mux.NewRouter(),\n\t\tosHelper: helper,\n\t\tstaticFilesystem: staticFilesystem,\n\t\ttemplatesFilesystem: templatesFilesystem,\n\t}\n}\n\nfunc (e *Executor) homepageHandler(w http.ResponseWriter, r *http.Request) {\n\te.l.Log(\"homepage\")\n\tbuf := bytes.NewBuffer(nil)\n\tf, err := e.templatesFilesystem.Open(\"homepage.html\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t_, err = io.Copy(buf, f)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tf.Close()\n\tw.Write(buf.Bytes())\n}\n\nfunc (e *Executor) webcamHandler(w http.ResponseWriter, r *http.Request) {\n\tresp, err := http.Get(\"http:\/\/\" + e.webcamHost + \":\" + e.webcamPort + \"\/?action=snapshot&n=\" + r.Form.Get(\"n\"))\n\tif err != nil {\n\t\te.l.Log(\"Error getting image: \" + err.Error())\n\t\tif resp == nil {\n\t\t\treturn\n\t\t}\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\te.l.Log(\"Error closing image request: \" + err.Error())\n\t}\n\tw.Write(body)\n}\n\nfunc (e *Executor) toggleDoorHandler(w http.ResponseWriter, r *http.Request) {\n\te.executeCommand(\"gpio\", \"write\", \"0\", \"1\")\n\tsleepDuration, _ := time.ParseDuration(\"500ms\")\n\te.l.Log(\"sleeping for \" + sleepDuration.String())\n\ttime.Sleep(sleepDuration)\n\te.executeCommand(\"gpio\", \"write\", \"0\", \"0\")\n\n\thttp.Redirect(w, r, \"\/\", http.StatusSeeOther)\n}\n\nfunc (e *Executor) executeCommand(executable string, arg ...string) string {\n\tlogStatement := append([]string{executable}, arg...)\n\te.l.Log(\"executing: '\" + strings.Join(logStatement, \" \") + \"'\")\n\tout, err := e.osHelper.Exec(executable, arg...)\n\tif err != nil {\n\t\te.l.Log(\"ERROR: \" + err.Error())\n\t}\n\treturn out\n}\n\nfunc (e *Executor) startCameraHandler(w http.ResponseWriter, r *http.Request) {\n\te.executeCommand(\"\/etc\/init.d\/garagestreamer\", \"start\")\n\thttp.Redirect(w, r, \"\/\", http.StatusSeeOther)\n}\n\nfunc (e *Executor) stopCameraHandler(w http.ResponseWriter, r *http.Request) {\n\te.executeCommand(\"\/etc\/init.d\/garagestreamer\", \"stop\")\n\thttp.Redirect(w, r, \"\/\", http.StatusSeeOther)\n}\n\nfunc (e *Executor) ServeForever(port string) {\n\thttp.Handle(\"\/static\/\", http.StripPrefix(\"\/static\/\", http.FileServer(e.staticFilesystem)))\n\n\te.rtr.HandleFunc(\"\/\", e.homepageHandler).Methods(\"GET\")\n\te.rtr.HandleFunc(\"\/webcam\", e.webcamHandler).Methods(\"GET\")\n\te.rtr.HandleFunc(\"\/toggle\", e.toggleDoorHandler).Methods(\"POST\")\n\te.rtr.HandleFunc(\"\/start-camera\", e.startCameraHandler).Methods(\"POST\")\n\te.rtr.HandleFunc(\"\/stop-camera\", e.stopCameraHandler).Methods(\"POST\")\n\n\thttp.Handle(\"\/\", e.rtr)\n\te.l.Log(\"Listening on port \" + port + \"...\")\n\thttp.ListenAndServe(\":\"+port, nil)\n}\n\nfunc (e *Executor) Kill() {\n\tos.Exit(0)\n}\n<|endoftext|>"} {"text":"<commit_before>package pool\n\nimport \"net\"\nimport \"errors\"\nimport \"sync\"\nimport \"time\"\n\n\/\/blockingPool implements the Pool interface.\n\/\/Connestions from blockingPool offer a kind of blocking mechanism that is derived from buffered channel.\ntype blockingPool struct {\n\t\/\/mutex is to make closing the pool and recycling the connection an atomic operation\n\tmutex sync.Mutex\n\n\t\/\/timeout to Get, default to 3\n\ttimeout int\n\n\t\/\/storage for net.Conn connections\n\tconns chan net.Conn\n\n\t\/\/net.Conn generator\n\tfactory Factory\n}\n\n\/\/Factory is a function to create new connections\n\/\/which is provided by the user\ntype Factory func() (net.Conn, error)\n\n\/\/Create a new blocking pool.\n\/\/As no new connections would be made when the pool is busy, maxCap does not make sense yet.\nfunc NewBlockingPool(initCap, maxCap int, factory Factory) (Pool, error) {\n\tif initCap < 0 || maxCap < 1 || initCap > maxCap {\n\t\treturn nil, errors.New(\"invalid capacity settings\")\n\t}\n\n\tnewPool := &blockingPool{\n\t\ttimeout: 3,\n\t\tconns: make(chan net.Conn, maxCap),\n\t\tfactory: factory\n\t}\n\n\tfor i := 0; i < initCap; i++ {\n\t\tconn, err := factory()\n\t\tif err != nil {\n\t\t\tnewPool.Close()\n\t\t\treturn nil, fmt.Errorf(\"error counted when calling factory: %s\", err)\n\t\t}\n\t\tnewPool.conns <- conn\n\t}\n\treturn newPool, nil\n}\n\nfunc (p *blockingPool) Get() (net.Conn, error) {\n\t\/\/in case that pool is closed and pool.conns is set to nil\n\tconns := p.conns\n\tif conns == nil {\n\t\treturn nil, ErrClosed\n\t}\n\n\tselect {\n\tcase conn := <-conns:\n\t\treturn conn, nil\/*not wrapped yet*\/\n\tcase <-time.After(time.Second*p.timeout) {\n\t\treturn nil, errors.New(\"timeout\")\n\t}\n}\n\n\/\/put puts the connection back to the pool. If the pool is closed, put simply close \n\/\/any connections received and return immediately. A nil net.Conn is illegal and will be rejected.\nfunc (p *blockingPool) put(conn net.Conn) error {\n\tif conn == nil {\n\t\treturn errors.New(\"connection is nil.\")\n\t}\n\n\t\/\/in case that pool is closed and pool.conns is set to nil\n\tconns := p.conns\n\tif conns == nil {\n\t\treturn conn.Close()\n\t}\n\n\t\/\/It is impossible to block as number of connections is never more than length of channel\n\tconns <-conn\n\treturn nil\n}\n\nfunc (p *blockingPool) Close() {}\n\nfunc (p *blockingPool) Len() {}\n<commit_msg>nothing<commit_after>package pool\n\nimport \"net\"\nimport \"errors\"\nimport \"sync\"\nimport \"time\"\n\n\/\/blockingPool implements the Pool interface.\n\/\/Connestions from blockingPool offer a kind of blocking mechanism that is derived from buffered channel.\ntype blockingPool struct {\n\t\/\/mutex is to make closing the pool and recycling the connection an atomic operation\n\tmutex sync.Mutex\n\n\t\/\/timeout to Get, default to 3\n\ttimeout int\n\n\t\/\/storage for net.Conn connections\n\tconns chan net.Conn\n\n\t\/\/net.Conn generator\n\tfactory Factory\n}\n\n\/\/Factory is a function to create new connections\n\/\/which is provided by the user\ntype Factory func() (net.Conn, error)\n\n\/\/Create a new blocking pool.\n\/\/As no new connections would be made when the pool is busy, maxCap does not make sense yet.\nfunc NewBlockingPool(initCap, maxCap int, factory Factory) (Pool, error) {\n\tif initCap < 0 || maxCap < 1 || initCap > maxCap {\n\t\treturn nil, errors.New(\"invalid capacity settings\")\n\t}\n\n\tnewPool := &blockingPool{\n\t\ttimeout: 3,\n\t\tconns: make(chan net.Conn, maxCap),\n\t\tfactory: factory\n\t}\n\n\tfor i := 0; i < initCap; i++ {\n\t\tconn, err := factory()\n\t\tif err != nil {\n\t\t\tnewPool.Close()\n\t\t\treturn nil, fmt.Errorf(\"error counted when calling factory: %s\", err)\n\t\t}\n\t\tnewPool.conns <- conn\n\t}\n\treturn newPool, nil\n}\n\nfunc (p *blockingPool) Get() (net.Conn, error) {\n\t\/\/in case that pool is closed and pool.conns is set to nil\n\tconns := p.conns\n\tif conns == nil {\n\t\treturn nil, ErrClosed\n\t}\n\n\tselect {\n\tcase conn := <-conns:\n\t\treturn conn, nil\/*not wrapped yet*\/\n\tcase <-time.After(time.Second*p.timeout) {\n\t\treturn nil, errors.New(\"timeout\")\n\t}\n}\n\n\/\/put puts the connection back to the pool. If the pool is closed, put simply close \n\/\/any connections received and return immediately. A nil net.Conn is illegal and will be rejected.\nfunc (p *blockingPool) put(conn net.Conn) error {\n\tif conn == nil {\n\t\treturn errors.New(\"connection is nil.\")\n\t}\n\n\t\/\/in case that pool is closed and pool.conns is set to nil\n\tconns := p.conns\n\tif conns == nil {\n\t\treturn conn.Close()\n\t}\n\n\t\/\/It is impossible to block as number of connections is never more than length of channel\n\tconns <-conn\n\treturn nil\n}\n\n\/\/\nfunc (p *blockingPool) Close() {}\n\nfunc (p *blockingPool) Len() {}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package krberror provides error type and functions for gokrb5.\npackage krberror\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\n\/\/ Error type descriptions.\nconst (\n\tseparator = \" < \"\n\tEncodingError = \"Encoding_Error\"\n\tNetworkingError = \"Networking_Error\"\n\tDecryptingError = \"Decrypting_Error\"\n\tEncryptingError = \"Encrypting_Error\"\n\tChksumError = \"Checksum_Error\"\n\tKRBMsgError = \"KRBMessage_Handling_Error\"\n\tConfigError = \"Configuration_Error\"\n)\n\n\/\/ Krberror is an error type for gokrb5\ntype Krberror struct {\n\tRootCause string\n\tEText []string\n}\n\n\/\/ Error function to implement the error interface.\nfunc (e Krberror) Error() string {\n\treturn fmt.Sprintf(\"[Root cause: %s] \", e.RootCause) + strings.Join(e.EText, separator)\n}\n\n\/\/ Add another error statement to the error.\nfunc (e *Krberror) Add(et string, s string) {\n\te.EText = append([]string{fmt.Sprintf(\"%s: %s\", et, s)}, e.EText...)\n}\n\n\/\/ NewKrberror creates a new instance of Krberror.\nfunc NewKrberror(et, s string) Krberror {\n\treturn Krberror{\n\t\tRootCause: et,\n\t\tEText: []string{s},\n\t}\n}\n\n\/\/ Errorf appends to or creates a new Krberror.\nfunc Errorf(err error, et, format string, a ...interface{}) Krberror {\n\tif e, ok := err.(Krberror); ok {\n\t\tif a == nil {\n\t\t\te.EText = append([]string{fmt.Sprintf(\"%s: \"+format, et)}, e.EText...)\n\t\t\treturn e\n\t\t}\n\t\te.EText = append([]string{fmt.Sprintf(\"%s: \"+format, et, a)}, e.EText...)\n\t\treturn e\n\t}\n\treturn NewErrorf(et, format+\": %s\", a, err)\n}\n\n\/\/ NewErrorf creates a new Krberror from a formatted string.\nfunc NewErrorf(et, format string, a ...interface{}) Krberror {\n\tvar s string\n\tif a == nil {\n\t\ts = fmt.Sprintf(\"%s: %s\", et, format)\n\t} else {\n\t\ts = fmt.Sprintf(\"%s: %s\", et, fmt.Sprintf(format, a))\n\t}\n\treturn Krberror{\n\t\tRootCause: et,\n\t\tEText: []string{s},\n\t}\n}\n<commit_msg>fix error text<commit_after>\/\/ Package krberror provides error type and functions for gokrb5.\npackage krberror\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\n\/\/ Error type descriptions.\nconst (\n\tseparator = \" < \"\n\tEncodingError = \"Encoding_Error\"\n\tNetworkingError = \"Networking_Error\"\n\tDecryptingError = \"Decrypting_Error\"\n\tEncryptingError = \"Encrypting_Error\"\n\tChksumError = \"Checksum_Error\"\n\tKRBMsgError = \"KRBMessage_Handling_Error\"\n\tConfigError = \"Configuration_Error\"\n)\n\n\/\/ Krberror is an error type for gokrb5\ntype Krberror struct {\n\tRootCause string\n\tEText []string\n}\n\n\/\/ Error function to implement the error interface.\nfunc (e Krberror) Error() string {\n\treturn fmt.Sprintf(\"[Root cause: %s] \", e.RootCause) + strings.Join(e.EText, separator)\n}\n\n\/\/ Add another error statement to the error.\nfunc (e *Krberror) Add(et string, s string) {\n\te.EText = append([]string{fmt.Sprintf(\"%s: %s\", et, s)}, e.EText...)\n}\n\n\/\/ NewKrberror creates a new instance of Krberror.\nfunc NewKrberror(et, s string) Krberror {\n\treturn Krberror{\n\t\tRootCause: et,\n\t\tEText: []string{s},\n\t}\n}\n\n\/\/ Errorf appends to or creates a new Krberror.\nfunc Errorf(err error, et, format string, a ...interface{}) Krberror {\n\tif e, ok := err.(Krberror); ok {\n\t\tif len(a) > 0 {\n\t\t\te.EText = append([]string{fmt.Sprintf(\"%s: \"+format, et, a)}, e.EText...)\n\t\t\treturn e\n\t\t}\n\t\te.EText = append([]string{fmt.Sprintf(\"%s: \"+format, et)}, e.EText...)\n\t\treturn e\n\t}\n\tif len(a) > 0 {\n\t\treturn NewErrorf(et, format+\": %s\", a, err)\n\t}\n\treturn NewErrorf(et, format+\": %s\", err)\n}\n\n\/\/ NewErrorf creates a new Krberror from a formatted string.\nfunc NewErrorf(et, format string, a ...interface{}) Krberror {\n\tvar s string\n\tif len(a) > 0 {\n\t\ts = fmt.Sprintf(\"%s: %s\", et, fmt.Sprintf(format, a))\n\t} else {\n\t\ts = fmt.Sprintf(\"%s: %s\", et, format)\n\t}\n\treturn Krberror{\n\t\tRootCause: et,\n\t\tEText: []string{s},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package serial\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\n\/\/\n\/\/ Grab the constants with the following little program, to avoid using cgo:\n\/\/\n\/\/ #include <stdio.h>\n\/\/ #include <stdlib.h>\n\/\/ #include <linux\/termios.h>\n\/\/\n\/\/ int main(int argc, const char **argv) {\n\/\/ printf(\"TCGETS2 = 0x%08X\\n\", TCGETS2);\n\/\/ printf(\"TCSETS2 = 0x%08X\\n\", TCSETS2);\n\/\/ printf(\"BOTHER = 0x%08X\\n\", BOTHER);\n\/\/ printf(\"NCCS = %d\\n\", NCCS);\n\/\/ return 0;\n\/\/ }\n\/\/\nconst (\n\tTCGETS2 = 0x802C542A\n\tTCSETS2 = 0x402C542B\n\tBOTHER = 0x1000\n\tNCCS = 19\n)\n\n\/\/\n\/\/ Types from asm-generic\/termbits.h\n\/\/\n\ntype cc_t byte\ntype speed_t uint32\ntype tcflag_t uint32\ntype termios2 struct {\n\tc_iflag tcflag_t \/\/ input mode flags\n\tc_oflag tcflag_t \/\/ output mode flags\n\tc_cflag tcflag_t \/\/ control mode flags\n\tc_lflag tcflag_t \/\/ local mode flags\n\tc_line cc_t \/\/ line discipline\n\tc_cc [NCCS]cc_t \/\/ control characters\n\tc_ispeed speed_t \/\/ input speed\n\tc_ospeed speed_t \/\/ output speed\n}\n\n\/\/\n\/\/ Returns a pointer to an instantiates termios2 struct, based on the given\n\/\/ OpenOptions. Termios2 is a Linux extension which allows arbitrary baud rates\n\/\/ to be specified.\n\/\/\nfunc makeTermios2(options OpenOptions) (*termios2, error) {\n\n\t\/\/ Sanity check inter-character timeout and minimum read size options.\n\n\tvtime := uint(round(float64(options.InterCharacterTimeout)\/100.0) * 100)\n\tvmin := options.MinimumReadSize\n\n\tif vmin == 0 && vtime < 100 {\n\t\treturn nil, errors.New(\"invalid values for InterCharacterTimeout and MinimumReadSize\")\n\t}\n\n\tif vtime > 25500 {\n\t\treturn nil, errors.New(\"invalid value for InterCharacterTimeout\")\n\t}\n\n\tccOpts := [NCCS]cc_t{}\n\tccOpts[syscall.VTIME] = cc_t(vtime \/ 100)\n\tccOpts[syscall.VMIN] = cc_t(vmin)\n\n\tt2 := &termios2{\n\t\tc_cflag: syscall.CLOCAL | syscall.CREAD | BOTHER,\n\t\tc_ispeed: speed_t(options.BaudRate),\n\t\tc_ospeed: speed_t(options.BaudRate),\n\t\tc_cc: ccOpts,\n\t}\n\n\tswitch options.StopBits {\n\tcase 1:\n\tcase 2:\n\t\tt2.c_cflag |= syscall.CSTOPB\n\n\tdefault:\n\t\treturn nil, errors.New(\"invalid setting for StopBits\")\n\t}\n\n\tswitch options.ParityMode {\n\tcase PARITY_NONE:\n\tcase PARITY_ODD:\n\t\tt2.c_cflag |= syscall.PARENB\n\t\tt2.c_cflag |= syscall.PARODD\n\n\tcase PARITY_EVEN:\n\t\tt2.c_cflag |= syscall.PARENB\n\n\tdefault:\n\t\treturn nil, errors.New(\"invalid setting for ParityMode\")\n\t}\n\n\treturn t2, nil\n}\n\nfunc openInternal(options OpenOptions) (io.ReadWriteCloser, error) {\n\n\tfile, openErr :=\n\t\tos.OpenFile(\n\t\t\toptions.PortName,\n\t\t\tsyscall.O_RDWR|syscall.O_NOCTTY|syscall.O_NONBLOCK,\n\t\t\t0600)\n\tif openErr != nil {\n\t\treturn nil, openErr\n\t}\n\n\t\/\/ Clear the non-blocking flag set above.\n\tnonblockErr := syscall.SetNonblock(int(file.Fd()), false)\n\tif nonblockErr != nil {\n\t\treturn nil, nonblockErr\n\t}\n\n\tt2, optErr := makeTermios2(options)\n\tif optErr != nil {\n\t\treturn nil, optErr\n\t}\n\n\tr, _, errno := syscall.Syscall(\n\t\tsyscall.SYS_IOCTL,\n\t\tuintptr(file.Fd()),\n\t\tuintptr(TCSETS2),\n\t\tuintptr(unsafe.Pointer(t2)))\n\n\tif errno != 0 {\n\t\treturn nil, os.NewSyscallError(\"SYS_IOCTL\", errno)\n\t}\n\n\tif r != 0 {\n\t\treturn nil, errors.New(\"unknown error from SYS_IOCTL\")\n\t}\n\n\treturn file, nil\n}\n<commit_msg>Don't export termios consts<commit_after>package serial\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\n\/\/\n\/\/ Grab the constants with the following little program, to avoid using cgo:\n\/\/\n\/\/ #include <stdio.h>\n\/\/ #include <stdlib.h>\n\/\/ #include <linux\/termios.h>\n\/\/\n\/\/ int main(int argc, const char **argv) {\n\/\/ printf(\"TCSETS2 = 0x%08X\\n\", TCSETS2);\n\/\/ printf(\"BOTHER = 0x%08X\\n\", BOTHER);\n\/\/ printf(\"NCCS = %d\\n\", NCCS);\n\/\/ return 0;\n\/\/ }\n\/\/\nconst (\n\tkTCSETS2 = 0x402C542B\n\tkBOTHER = 0x1000\n\tkNCCS = 19\n)\n\n\/\/\n\/\/ Types from asm-generic\/termbits.h\n\/\/\n\ntype cc_t byte\ntype speed_t uint32\ntype tcflag_t uint32\ntype termios2 struct {\n\tc_iflag tcflag_t \/\/ input mode flags\n\tc_oflag tcflag_t \/\/ output mode flags\n\tc_cflag tcflag_t \/\/ control mode flags\n\tc_lflag tcflag_t \/\/ local mode flags\n\tc_line cc_t \/\/ line discipline\n\tc_cc [kNCCS]cc_t \/\/ control characters\n\tc_ispeed speed_t \/\/ input speed\n\tc_ospeed speed_t \/\/ output speed\n}\n\n\/\/\n\/\/ Returns a pointer to an instantiates termios2 struct, based on the given\n\/\/ OpenOptions. Termios2 is a Linux extension which allows arbitrary baud rates\n\/\/ to be specified.\n\/\/\nfunc makeTermios2(options OpenOptions) (*termios2, error) {\n\n\t\/\/ Sanity check inter-character timeout and minimum read size options.\n\n\tvtime := uint(round(float64(options.InterCharacterTimeout)\/100.0) * 100)\n\tvmin := options.MinimumReadSize\n\n\tif vmin == 0 && vtime < 100 {\n\t\treturn nil, errors.New(\"invalid values for InterCharacterTimeout and MinimumReadSize\")\n\t}\n\n\tif vtime > 25500 {\n\t\treturn nil, errors.New(\"invalid value for InterCharacterTimeout\")\n\t}\n\n\tccOpts := [kNCCS]cc_t{}\n\tccOpts[syscall.VTIME] = cc_t(vtime \/ 100)\n\tccOpts[syscall.VMIN] = cc_t(vmin)\n\n\tt2 := &termios2{\n\t\tc_cflag: syscall.CLOCAL | syscall.CREAD | kBOTHER,\n\t\tc_ispeed: speed_t(options.BaudRate),\n\t\tc_ospeed: speed_t(options.BaudRate),\n\t\tc_cc: ccOpts,\n\t}\n\n\tswitch options.StopBits {\n\tcase 1:\n\tcase 2:\n\t\tt2.c_cflag |= syscall.CSTOPB\n\n\tdefault:\n\t\treturn nil, errors.New(\"invalid setting for StopBits\")\n\t}\n\n\tswitch options.ParityMode {\n\tcase PARITY_NONE:\n\tcase PARITY_ODD:\n\t\tt2.c_cflag |= syscall.PARENB\n\t\tt2.c_cflag |= syscall.PARODD\n\n\tcase PARITY_EVEN:\n\t\tt2.c_cflag |= syscall.PARENB\n\n\tdefault:\n\t\treturn nil, errors.New(\"invalid setting for ParityMode\")\n\t}\n\n\treturn t2, nil\n}\n\nfunc openInternal(options OpenOptions) (io.ReadWriteCloser, error) {\n\n\tfile, openErr :=\n\t\tos.OpenFile(\n\t\t\toptions.PortName,\n\t\t\tsyscall.O_RDWR|syscall.O_NOCTTY|syscall.O_NONBLOCK,\n\t\t\t0600)\n\tif openErr != nil {\n\t\treturn nil, openErr\n\t}\n\n\t\/\/ Clear the non-blocking flag set above.\n\tnonblockErr := syscall.SetNonblock(int(file.Fd()), false)\n\tif nonblockErr != nil {\n\t\treturn nil, nonblockErr\n\t}\n\n\tt2, optErr := makeTermios2(options)\n\tif optErr != nil {\n\t\treturn nil, optErr\n\t}\n\n\tr, _, errno := syscall.Syscall(\n\t\tsyscall.SYS_IOCTL,\n\t\tuintptr(file.Fd()),\n\t\tuintptr(kTCSETS2),\n\t\tuintptr(unsafe.Pointer(t2)))\n\n\tif errno != 0 {\n\t\treturn nil, os.NewSyscallError(\"SYS_IOCTL\", errno)\n\t}\n\n\tif r != 0 {\n\t\treturn nil, errors.New(\"unknown error from SYS_IOCTL\")\n\t}\n\n\treturn file, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package gweb\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n\n\t. \"github.com\/eynstudio\/gobreak\"\n\t\"github.com\/eynstudio\/gobreak\/conf\"\n\t\"github.com\/eynstudio\/gobreak\/di\"\n)\n\ntype App struct {\n\tRoot INode\n\tdi.Container\n\tName string\n\t*Cfg\n\tServer *http.Server\n\t*Router\n\t*Tmpl\n\t*Sessions\n}\n\nfunc NewApp(name string) *App {\n\tvar cfg Cfg\n\tconf.MustLoadJsonCfg(&cfg, \"conf\/\"+name+\".json\")\n\treturn NewAppWithCfg(&cfg)\n}\n\nfunc NewAppWithCfg(c *Cfg) *App {\n\tapp := &App{\n\t\tRoot: NewNode(\"\", false),\n\t\tContainer: di.Root,\n\t\tName: \"\",\n\t\tCfg: c,\n\t\tRouter: &Router{},\n\t\tTmpl: &Tmpl{},\n\t\tSessions: &Sessions{},\n\t}\n\n\tif c.UseTmpl {\n\t\tapp.Tmpl.Load()\n\t}\n\tapp.Server = &http.Server{\n\t\tAddr: fmt.Sprintf(\":%d\", c.Port),\n\t\tHandler: http.HandlerFunc(app.handler),\n\t\tReadTimeout: time.Minute,\n\t\tWriteTimeout: time.Minute,\n\t}\n\tapp.Map(c)\n\treturn app\n}\n\nfunc (p *App) Start() {\n\tp.injectNodes(p.Root)\n\n\tif p.Cfg.Tls {\n\t\terr := p.Server.ListenAndServeTLS(p.Cfg.CertFile, p.Cfg.KeyFile)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\terr := p.Server.ListenAndServe()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\nfunc (p *App) injectNodes(n INode) {\n\tp.Apply(n)\n\tlog.Printf(\"xxxxxxxxxxxxxxxxxxx,%#v\", n)\n\n\tnodes := n.GetNodes()\n\tfor i, l := 0, len(nodes); i < l; i++ {\n\t\t\/\/\t\tp.Apply(&nodes[i])\n\t\tp.injectNodes(nodes[i])\n\t}\n}\n\nfunc (p *App) NewCtx(r *http.Request, rw http.ResponseWriter) *Ctx {\n\treq := NewReq(r)\n\tresp := &Resp{rw}\n\tc := &Ctx{App: p, Container: di.New(), Req: req, Resp: resp, Scope: M{}}\n\tc.Map(c) \/\/需要吗?\n\tc.Map(resp)\n\tc.Map(req)\n\tc.SetParent(p)\n\treturn c\n}\n\nfunc (p *App) handler(w http.ResponseWriter, r *http.Request) {\n\tctx := p.NewCtx(r, w)\n\tif !ctx.ServeFile() {\n\t\tp.Route(p.Root, ctx)\n\t\tif !ctx.Handled {\n\t\t\tctx.NotFound()\n\t\t}\n\t}\n\n\tif w, ok := ctx.Resp.ResponseWriter.(io.Closer); ok {\n\t\tw.Close()\n\t}\n}\n<commit_msg>rm log info<commit_after>package gweb\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n\n\t. \"github.com\/eynstudio\/gobreak\"\n\t\"github.com\/eynstudio\/gobreak\/conf\"\n\t\"github.com\/eynstudio\/gobreak\/di\"\n)\n\ntype App struct {\n\tRoot INode\n\tdi.Container\n\tName string\n\t*Cfg\n\tServer *http.Server\n\t*Router\n\t*Tmpl\n\t*Sessions\n}\n\nfunc NewApp(name string) *App {\n\tvar cfg Cfg\n\tconf.MustLoadJsonCfg(&cfg, \"conf\/\"+name+\".json\")\n\treturn NewAppWithCfg(&cfg)\n}\n\nfunc NewAppWithCfg(c *Cfg) *App {\n\tapp := &App{\n\t\tRoot: NewNode(\"\", false),\n\t\tContainer: di.Root,\n\t\tName: \"\",\n\t\tCfg: c,\n\t\tRouter: &Router{},\n\t\tTmpl: &Tmpl{},\n\t\tSessions: &Sessions{},\n\t}\n\n\tif c.UseTmpl {\n\t\tapp.Tmpl.Load()\n\t}\n\tapp.Server = &http.Server{\n\t\tAddr: fmt.Sprintf(\":%d\", c.Port),\n\t\tHandler: http.HandlerFunc(app.handler),\n\t\tReadTimeout: time.Minute,\n\t\tWriteTimeout: time.Minute,\n\t}\n\tapp.Map(c)\n\treturn app\n}\n\nfunc (p *App) Start() {\n\tp.injectNodes(p.Root)\n\n\tif p.Cfg.Tls {\n\t\terr := p.Server.ListenAndServeTLS(p.Cfg.CertFile, p.Cfg.KeyFile)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\terr := p.Server.ListenAndServe()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\nfunc (p *App) injectNodes(n INode) {\n\tp.Apply(n)\n\tnodes := n.GetNodes()\n\tfor i, l := 0, len(nodes); i < l; i++ {\n\t\tp.injectNodes(nodes[i])\n\t}\n}\n\nfunc (p *App) NewCtx(r *http.Request, rw http.ResponseWriter) *Ctx {\n\treq := NewReq(r)\n\tresp := &Resp{rw}\n\tc := &Ctx{App: p, Container: di.New(), Req: req, Resp: resp, Scope: M{}}\n\tc.Map(c) \/\/需要吗?\n\tc.Map(resp)\n\tc.Map(req)\n\tc.SetParent(p)\n\treturn c\n}\n\nfunc (p *App) handler(w http.ResponseWriter, r *http.Request) {\n\tctx := p.NewCtx(r, w)\n\tif !ctx.ServeFile() {\n\t\tp.Route(p.Root, ctx)\n\t\tif !ctx.Handled {\n\t\t\tctx.NotFound()\n\t\t}\n\t}\n\n\tif w, ok := ctx.Resp.ResponseWriter.(io.Closer); ok {\n\t\tw.Close()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\ntype Sources struct {\n\tSources []string\n}\n\ntype Output struct {\n\tSource string\n\tParagraphs []string\n\tShowsParagraphs bool\n}\n\nfunc check(e error) {\n\tif e != nil {\n\t\tpanic(e)\n\t}\n}\n\nfunc getSources() Sources {\n\tsourceDir, err := ioutil.ReadDir(\"lorem\")\n\tcheck(err)\n\n\tsources := Sources{}\n\tfor _, entry := range sourceDir {\n\t\tsources.Sources = append(sources.Sources, entry.Name())\n\t}\n\n\treturn sources\n}\n\nfunc getRandomContent(stripParagraphs bool) (string, []string) {\n\tsources := getSources()\n\tsource := sources.Sources[rand.Intn(len(sources.Sources))]\n\n\tcontent, err := ioutil.ReadFile(\"lorem\/\" + source)\n\tcheck(err)\n\n\tparsedContent := string(content)\n\n\tif stripParagraphs {\n\t\tparsedContent = strings.ReplaceAll(parsedContent, \"<p>\", \"\")\n\t\tparsedContent = strings.ReplaceAll(parsedContent, \"<\/p>\", \"\")\n\t}\n\n\tparsedLines := strings.Split(parsedContent, \"\\n\")\n\n\treturn source, parsedLines\n}\n\nfunc main() {\n\n\ttmpl := template.Must(template.ParseFiles(\"template.html\"))\n\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tsource, content := getRandomContent(true)\n\n\t\toutput := Output{\n\t\t\tSource: strings.TrimSuffix(source, \".txt\"),\n\t\t\tParagraphs: content,\n\t\t\tShowsParagraphs: false,\n\t\t}\n\n\t\ttmpl.Execute(w, output)\n\t})\n\n\thttp.HandleFunc(\"\/p\", func(w http.ResponseWriter, r *http.Request) {\n\t\tsource, content := getRandomContent(false)\n\n\t\toutput := Output{\n\t\t\tSource: strings.TrimSuffix(source, \".txt\"),\n\t\t\tParagraphs: content,\n\t\t\tShowsParagraphs: true,\n\t\t}\n\n\t\ttmpl.Execute(w, output)\n\t})\n\n\thttp.ListenAndServe(\":80\", nil)\n}\n<commit_msg>Shuffle paragraphs before the output<commit_after>package main\n\nimport (\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Sources struct {\n\tSources []string\n}\n\ntype Output struct {\n\tSource string\n\tParagraphs []string\n\tShowsParagraphs bool\n}\n\nfunc check(e error) {\n\tif e != nil {\n\t\tpanic(e)\n\t}\n}\n\nfunc getSources() Sources {\n\tsourceDir, err := ioutil.ReadDir(\"lorem\")\n\tcheck(err)\n\n\tsources := Sources{}\n\tfor _, entry := range sourceDir {\n\t\tsources.Sources = append(sources.Sources, entry.Name())\n\t}\n\n\treturn sources\n}\n\nfunc getRandomContent(stripParagraphs bool) (string, []string) {\n\tsources := getSources()\n\tsource := sources.Sources[rand.Intn(len(sources.Sources))]\n\n\tcontent, err := ioutil.ReadFile(\"lorem\/\" + source)\n\tcheck(err)\n\n\tparsedContent := string(content)\n\n\tif stripParagraphs {\n\t\tparsedContent = strings.ReplaceAll(parsedContent, \"<p>\", \"\")\n\t\tparsedContent = strings.ReplaceAll(parsedContent, \"<\/p>\", \"\")\n\t}\n\n\tparsedLines := strings.Split(parsedContent, \"\\n\")\n\n\trand.Seed(time.Now().UnixNano())\n\trand.Shuffle(len(parsedLines), func(i, j int) {\n\t\tparsedLines[i], parsedLines[j] = parsedLines[j], parsedLines[i]\n\t})\n\n\treturn source, parsedLines\n}\n\nfunc main() {\n\n\ttmpl := template.Must(template.ParseFiles(\"template.html\"))\n\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tsource, content := getRandomContent(true)\n\n\t\toutput := Output{\n\t\t\tSource: strings.TrimSuffix(source, \".txt\"),\n\t\t\tParagraphs: content,\n\t\t\tShowsParagraphs: false,\n\t\t}\n\n\t\ttmpl.Execute(w, output)\n\t})\n\n\thttp.HandleFunc(\"\/p\", func(w http.ResponseWriter, r *http.Request) {\n\t\tsource, content := getRandomContent(false)\n\n\t\toutput := Output{\n\t\t\tSource: strings.TrimSuffix(source, \".txt\"),\n\t\t\tParagraphs: content,\n\t\t\tShowsParagraphs: true,\n\t\t}\n\n\t\ttmpl.Execute(w, output)\n\t})\n\n\thttp.ListenAndServe(\":80\", nil)\n}\n<|endoftext|>"} {"text":"<commit_before>package logger\n\nimport (\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/fatih\/color\"\n)\n\nvar (\n\ttraceLogger *log.Logger \/\/ Just about anything\n\tcommandLogger *log.Logger \/\/ Important information\n\tinfoLogger *log.Logger \/\/ Important information\n\terrorLogger *log.Logger \/\/ Critical problem\n\n\t\/\/ Defining color functions for loggers\n\tred = color.New(color.FgRed).SprintFunc()\n\tcyan = color.New(color.FgCyan).SprintFunc()\n\tyellow = color.New(color.FgYellow).SprintFunc()\n)\n\nfunc init() {\n\n\t\/\/ Initialising loggers\n\ttraceLogger = log.New(ioutil.Discard,\n\t\t\"TRACE: \",\n\t\tlog.Ldate|log.Lmicroseconds)\n\n\tcommandLogger = log.New(os.Stdout,\n\t\tyellow(\"> \"),\n\t\t0)\n\n\tinfoLogger = log.New(os.Stdout,\n\t\tcyan(\"INFO: \"),\n\t\t0)\n\n\terrorLogger = log.New(os.Stderr,\n\t\tred(\"ERROR: \"),\n\t\t0)\n}\n\n\/\/ Log commands\nfunc Command(message string) {\n\tcommandLogger.Printf(\"%s\\n\", yellow(message))\n}\n\n\/\/ Log important information\nfunc Info(message string) {\n\tinfoLogger.Printf(\"%s\\n\", message)\n}\n\n\/\/ Log error information and exit\nfunc Error(err error) {\n\terrorLogger.Fatalf(\"%s\\n\", err)\n}\n\n\/\/ Log anything to trace\nfunc Trace(message interface{}) {\n\ttraceLogger.Printf(\"%+v\\n\", message)\n}\n\n\/\/ Change Trace ouput writer, default is ioutil.Discard.\nfunc SetTraceOutput(w io.Writer) {\n\ttraceLogger.SetOutput(w)\n}\n<commit_msg>Use color.Output to display colors in windows<commit_after>package logger\n\nimport (\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\n\t\"github.com\/fatih\/color\"\n)\n\nvar (\n\ttraceLogger *log.Logger \/\/ Just about anything\n\tcommandLogger *log.Logger \/\/ Important information\n\tinfoLogger *log.Logger \/\/ Important information\n\terrorLogger *log.Logger \/\/ Critical problem\n\n\t\/\/ Defining color functions for loggers\n\tred = color.New(color.FgRed).SprintFunc()\n\tcyan = color.New(color.FgCyan).SprintFunc()\n\tyellow = color.New(color.FgYellow).SprintFunc()\n)\n\nfunc init() {\n\n\t\/\/ Initialising loggers\n\ttraceLogger = log.New(ioutil.Discard,\n\t\t\"TRACE: \",\n\t\tlog.Ldate|log.Lmicroseconds)\n\n\tcommandLogger = log.New(color.Output,\n\t\tyellow(\"> \"),\n\t\t0)\n\n\tinfoLogger = log.New(color.Output,\n\t\tcyan(\"INFO: \"),\n\t\t0)\n\n\terrorLogger = log.New(color.Output,\n\t\tred(\"ERROR: \"),\n\t\t0)\n}\n\n\/\/ Log commands\nfunc Command(message string) {\n\tcommandLogger.Printf(\"%s\\n\", yellow(message))\n}\n\n\/\/ Log important information\nfunc Info(message string) {\n\tinfoLogger.Printf(\"%s\\n\", message)\n}\n\n\/\/ Log error information and exit\nfunc Error(err error) {\n\terrorLogger.Fatalf(\"%s\\n\", err)\n}\n\n\/\/ Log anything to trace\nfunc Trace(message interface{}) {\n\ttraceLogger.Printf(\"%+v\\n\", message)\n}\n\n\/\/ Change Trace ouput writer, default is ioutil.Discard.\nfunc SetTraceOutput(w io.Writer) {\n\ttraceLogger.SetOutput(w)\n}\n<|endoftext|>"} {"text":"<commit_before>package logging\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype SyslogLine struct {\n\tRaw string `json:\"raw,omitempty\"`\n\tTime time.Time `json:\"time,omitempty\"`\n\tHost string `json:\"host,omitempty\"`\n\tTag string `json:\"tag,omitempty\"`\n\tSeverity string `json:\"severity,omitempty\"`\n\tPid int `json:\"pid,omitempty\"`\n\tPort string `json:\"port,omitempty\"`\n\tMessage string `json:\"message,omitempty\"`\n\n\tfields []string\n\tparsed bool\n\ttags map[string]interface{}\n\ttagsParsed bool\n}\n\nfunc (line *SyslogLine) TagString(key string) string {\n\treturn fmt.Sprint(line.Tags()[key])\n}\n\nvar validKeyRegexp = regexp.MustCompile(\"(?i)^[a-z]+$\")\nvar callsRegexp = regexp.MustCompile(\"^([0-9.]+)\\\\\/([0-9]+)$\")\n\nfunc removeQuotes(raw string) string {\n\tif strings.HasPrefix(raw, `\"`) && strings.HasSuffix(raw, `\"`) {\n\t\treturn raw[1 : len(raw)-1]\n\t}\n\treturn raw\n}\n\nfunc Fields(line string) []string {\n\tfields := strings.Fields(line)\n\tinQuotes := false\n\tsep := `\"`\n\tcurrent := \"\"\n\tout := []string{}\n\tfor _, f := range fields {\n\t\tif strings.Contains(f, sep) {\n\t\t\tcnt := strings.Count(f, sep)\n\t\t\treplaced := strings.Replace(f, sep, \"\", -1)\n\t\t\tif cnt == 2 {\n\t\t\t\tout = append(out, replaced)\n\t\t\t\tcontinue\n\t\t\t} else if cnt != 1 {\n\t\t\t\tpanic(\"cnt != 1 or 2 not supported (yet)\")\n\t\t\t}\n\t\t\tswitch inQuotes {\n\t\t\tcase true:\n\t\t\t\tout = append(out, current+\" \"+replaced)\n\t\t\t\tinQuotes = false\n\t\t\tcase false:\n\t\t\t\tcurrent = replaced\n\t\t\t\tinQuotes = true\n\t\t\t}\n\t\t} else if inQuotes {\n\t\t\tcurrent = current + \" \" + f\n\t\t} else {\n\t\t\tout = append(out, f)\n\t\t}\n\t}\n\treturn out\n}\n\nfunc parseTags(raw string) map[string]interface{} {\n\tt := map[string]interface{}{}\n\tfor _, field := range Fields(raw) {\n\t\tkv := strings.SplitN(field, \"=\", 2)\n\t\tif len(kv) == 2 && validKeyRegexp.MatchString(kv[0]) {\n\t\t\tcurrentKey := kv[0]\n\t\t\tvalue := kv[1]\n\t\t\tm := callsRegexp.FindStringSubmatch(value)\n\t\t\tif len(m) == 3 {\n\t\t\t\ttotalTime, e := strconv.ParseFloat(m[1], 64)\n\t\t\t\tif e == nil {\n\t\t\t\t\tcalls, e := strconv.ParseInt(m[2], 10, 64)\n\t\t\t\t\tif e == nil {\n\t\t\t\t\t\tt[currentKey+\"_time\"] = totalTime\n\t\t\t\t\t\tt[currentKey+\"_calls\"] = calls\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tt[currentKey] = parseTagValue(value)\n\t\t\t\tcurrentKey = \"\"\n\t\t\t}\n\t\t}\n\t}\n\treturn t\n}\n\nfunc (line *SyslogLine) Tags() (t map[string]interface{}) {\n\tif !line.tagsParsed {\n\t\tline.tags = parseTags(line.Raw)\n\t}\n\treturn line.tags\n}\n\nfunc parseTagValue(raw string) interface{} {\n\tif i, e := strconv.ParseInt(raw, 10, 64); e == nil {\n\t\treturn i\n\t} else if f, e := strconv.ParseFloat(raw, 64); e == nil {\n\t\treturn f\n\t}\n\treturn removeQuotes(raw)\n}\n\nconst (\n\ttimeLayout = \"2006-01-02T15:04:05.000000-07:00\"\n\ttimeLayoutWithoutMicro = \"2006-01-02T15:04:05-07:00\"\n)\n\nvar TagRegexp = regexp.MustCompile(\"(.*?)\\\\[(\\\\d*)\\\\]\")\n\nfunc (line *SyslogLine) Parse(raw string) (e error) {\n\tif line.parsed {\n\t\treturn nil\n\t}\n\tline.Raw = raw\n\tline.fields = strings.Fields(raw)\n\tif len(line.fields) >= 3 {\n\t\tline.Time, e = time.Parse(timeLayout, line.fields[0])\n\t\tif e != nil {\n\t\t\tline.Time, e = time.Parse(timeLayoutWithoutMicro, line.fields[0])\n\t\t\tif e != nil {\n\t\t\t\treturn e\n\t\t\t}\n\t\t}\n\t\tline.Host = line.fields[1]\n\t\tline.Tag, line.Port, line.Severity, line.Pid = parseTag(line.fields[2])\n\t\tif len(line.fields) > 3 {\n\t\t\tline.Message = strings.Join(line.fields[3:], \" \")\n\t\t}\n\t}\n\tline.parsed = true\n\treturn nil\n}\n\nfunc parseTag(raw string) (tag, port, severity string, pid int) {\n\ttagAndSeverity, pid := splitTagAndPid(raw)\n\ttag, port, severity = splitTagAndSeverity(tagAndSeverity)\n\treturn tag, port, severity, pid\n}\n\nfunc splitTagAndSeverity(raw string) (tag, port, severity string) {\n\ttag = raw\n\tparts := strings.Split(raw, \".\")\n\tif len(parts) == 3 {\n\t\t\/\/ seems to be mongodb\n\t\ttag, port, severity = parts[0], parts[1], parts[2]\n\n\t} else if len(parts) == 2 {\n\t\ttag, severity = parts[0], parts[1]\n\t} else {\n\t\ttag = raw\n\t}\n\treturn tag, port, severity\n}\n\nfunc splitTagAndPid(raw string) (tag string, pid int) {\n\ttag = raw\n\tchunks := TagRegexp.FindStringSubmatch(raw)\n\tif len(chunks) > 2 {\n\t\ttag = chunks[1]\n\t\tpid, _ = strconv.Atoi(chunks[2])\n\t} else {\n\t\tif tag[len(tag)-1] == ':' {\n\t\t\ttag = tag[0 : len(tag)-1]\n\t\t}\n\t}\n\treturn tag, pid\n}\n\nvar UUIDRegexp = regexp.MustCompile(\"([a-z0-9\\\\-]{36})\")\n\ntype UnicornLine struct {\n\tUUID string\n\tSyslogLine\n}\n\nfunc (line *UnicornLine) Parse(raw string) error {\n\te := line.SyslogLine.Parse(raw)\n\tif e != nil {\n\t\treturn e\n\t}\n\tif line.Tag != \"unicorn\" {\n\t\treturn fmt.Errorf(\"tag %q not supported\", line.Tag)\n\t}\n\tif len(line.fields) >= 4 {\n\t\tparts := UUIDRegexp.FindStringSubmatch(raw)\n\t\tif len(parts) > 1 {\n\t\t\tline.UUID = parts[1]\n\t\t}\n\t}\n\treturn nil\n}\n\ntype NginxLine struct {\n\t*SyslogLine\n\tXForwardedFor []string `json:\"x_forwarded_for,omitempty\"`\n\tMethod string `json:\"method,omitempty\"`\n\tStatus string `json:\"status,omitempty\"`\n\tLength int `json:\"length,omitempty\"`\n\tTotalTime float64 `json:\"total_time,omitempty\"`\n\tUnicornTime float64 `json:\"unicorn_time,omitempty\"`\n\tHttpHost string `json:\"http_host,omitempty\"`\n\tUserAgentName string `json:\"user_agent_name,omitempty\"`\n\tUri string `json:\"uri,omitempty\"`\n\tReferer string `json:\"referer,omitempty\"`\n\tAction string `json:\"action,omitempty\"`\n\tRevision string `json:\"revision,omitempty\"`\n\tUUID string `json:\"uuid,omitempty\"`\n\tEtag string `json:\"etag,omitempty\"`\n}\n\nvar quotesRegexp = regexp.MustCompile(`(ua|uri|ref)=\"(.*?)\"`)\n\nfunc (line *NginxLine) Parse(raw string) error {\n\tif line.SyslogLine == nil {\n\t\tline.SyslogLine = &SyslogLine{}\n\t}\n\te := line.SyslogLine.Parse(raw)\n\tif e != nil {\n\t\treturn e\n\t}\n\tif line.Tag != \"ssl_endpoint\" && line.Tag != \"nginx\" {\n\t\treturn fmt.Errorf(\"tag %q not supported\", line.Tag)\n\t}\n\tforwarded := false\n\tfor i, field := range line.fields {\n\t\tif forwarded && strings.Contains(field, \"=\") {\n\t\t\tforwarded = false\n\t\t}\n\t\tparts := strings.SplitN(field, \"=\", 2)\n\t\tif len(parts) == 2 {\n\t\t\tkey := parts[0]\n\t\t\tvalue := parts[1]\n\t\t\tswitch key {\n\t\t\tcase \"rev\":\n\t\t\t\tline.Revision = value\n\t\t\tcase \"action\":\n\t\t\t\tline.Action = value\n\t\t\tcase \"nginx\":\n\t\t\t\tif !strings.Contains(line.Raw, \"forwarded=\") {\n\t\t\t\t\tforwarded = true\n\t\t\t\t}\n\t\t\tcase \"forwarded\":\n\t\t\t\tforwarded = false\n\t\t\t\tline.XForwardedFor = []string{parts[1]}\n\t\t\tcase \"method\":\n\t\t\t\tline.Method = value\n\t\t\tcase \"uuid\":\n\t\t\t\tline.UUID = filterDash(value)\n\t\t\tcase \"etag\":\n\t\t\t\tline.Etag = filterDash(value)\n\t\t\tcase \"status\":\n\t\t\t\tline.Status = value\n\t\t\tcase \"host\":\n\t\t\t\tline.HttpHost = value\n\t\t\tcase \"length\":\n\t\t\t\tline.Length, _ = strconv.Atoi(value)\n\t\t\tcase \"total\":\n\t\t\t\tline.TotalTime, _ = strconv.ParseFloat(value, 64)\n\t\t\tcase \"unicorn_time\":\n\t\t\t\tline.UnicornTime, _ = strconv.ParseFloat(value, 64)\n\t\t\t}\n\t\t} else if i == 2 && strings.HasPrefix(field, \"nginx\") {\n\t\t\tif !strings.Contains(line.Raw, \"forwarded=\") {\n\t\t\t\tforwarded = true\n\t\t\t}\n\t\t} else if forwarded {\n\t\t\tif strings.HasPrefix(field, \"host\") || field == \"-\" {\n\t\t\t\tforwarded = false\n\t\t\t} else {\n\t\t\t\tline.XForwardedFor = append(line.XForwardedFor, strings.TrimSuffix(field, \",\"))\n\t\t\t}\n\n\t\t}\n\t}\n\tquotes := quotesRegexp.FindAllStringSubmatch(raw, -1)\n\tfor _, quote := range quotes {\n\t\tswitch quote[1] {\n\t\tcase \"ua\":\n\t\t\tline.UserAgentName = quote[2]\n\t\tcase \"uri\":\n\t\t\tline.Uri = quote[2]\n\t\tcase \"ref\":\n\t\t\tline.Referer = filterDash(quote[2])\n\t\tdefault:\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc filterDash(raw string) string {\n\tif raw == \"-\" {\n\t\treturn \"\"\n\t}\n\treturn raw\n}\n\ntype HAProxyLine struct {\n\tSyslogLine\n\tFrontend string\n\tBackend string\n\tBackendHost string\n\tBackendImageId string\n\tBackendContainerId string\n\tStatus string\n\tLength int\n\tClientRequestTime int\n\tConnectionQueueTime int\n\tTcpConnectTime int\n\tServerResponseTime int\n\tSessionDurationTime int\n\tActiveConnections int\n\tFrontendConnections int\n\tBackendConnectons int\n\tServerConnections int\n\tRetries int\n\tServerQueue int\n\tBackendQueue int\n\tMethod string\n\tUri string\n}\n\nfunc (line *HAProxyLine) Parse(raw string) error {\n\te := line.SyslogLine.Parse(raw)\n\tif e != nil {\n\t\treturn e\n\t}\n\tif line.Tag != \"haproxy\" {\n\t\treturn fmt.Errorf(\"tag was %s\", line.Tag)\n\t}\n\tif len(line.fields) > 16 {\n\t\tline.Frontend = line.fields[5]\n\t\tbackend := line.fields[6]\n\t\tparts := strings.SplitN(backend, \"\/\", 2)\n\t\tif len(parts) == 2 {\n\t\t\tline.Backend = parts[0]\n\t\t\tbackendContainer := parts[1]\n\t\t\tparts := strings.Split(backendContainer, \":\")\n\t\t\tif len(parts) == 3 {\n\t\t\t\tline.BackendHost = parts[0]\n\t\t\t\tline.BackendImageId = parts[1]\n\t\t\t\tline.BackendContainerId = parts[2]\n\t\t\t}\n\t\t}\n\t\ttimes := line.fields[7]\n\t\tparts = strings.Split(times, \"\/\")\n\t\tif len(parts) == 5 {\n\t\t\tline.ClientRequestTime, _ = strconv.Atoi(parts[0])\n\t\t\tline.ConnectionQueueTime, _ = strconv.Atoi(parts[1])\n\t\t\tline.TcpConnectTime, _ = strconv.Atoi(parts[2])\n\t\t\tline.ServerResponseTime, _ = strconv.Atoi(parts[3])\n\t\t\tline.SessionDurationTime, _ = strconv.Atoi(parts[4])\n\t\t}\n\t\tline.Status = line.fields[8]\n\t\tline.Length, _ = strconv.Atoi(line.fields[9])\n\n\t\tconnections := line.fields[13]\n\t\tparts = strings.Split(connections, \"\/\")\n\t\tif len(parts) == 5 {\n\t\t\tline.ActiveConnections, _ = strconv.Atoi(parts[0])\n\t\t\tline.FrontendConnections, _ = strconv.Atoi(parts[1])\n\t\t\tline.BackendConnectons, _ = strconv.Atoi(parts[2])\n\t\t\tline.ServerConnections, _ = strconv.Atoi(parts[3])\n\t\t\tline.Retries, _ = strconv.Atoi(parts[4])\n\t\t}\n\n\t\tqueues := line.fields[14]\n\t\tparts = strings.Split(queues, \"\/\")\n\t\tif len(parts) == 2 {\n\t\t\tline.ServerQueue, _ = strconv.Atoi(parts[0])\n\t\t\tline.BackendQueue, _ = strconv.Atoi(parts[1])\n\t\t}\n\t\tline.Method = line.fields[15][1:]\n\t\tline.Uri = line.fields[16]\n\t}\n\treturn nil\n}\n<commit_msg>use shellwords for line parsing<commit_after>package logging\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tshellwords \"github.com\/mattn\/go-shellwords\"\n)\n\ntype SyslogLine struct {\n\tRaw string `json:\"raw,omitempty\"`\n\tTime time.Time `json:\"time,omitempty\"`\n\tHost string `json:\"host,omitempty\"`\n\tTag string `json:\"tag,omitempty\"`\n\tSeverity string `json:\"severity,omitempty\"`\n\tPid int `json:\"pid,omitempty\"`\n\tPort string `json:\"port,omitempty\"`\n\tMessage string `json:\"message,omitempty\"`\n\n\tfields []string\n\tparsed bool\n\ttags map[string]interface{}\n\ttagsParsed bool\n}\n\nfunc (line *SyslogLine) TagString(key string) string {\n\treturn fmt.Sprint(line.Tags()[key])\n}\n\nvar validKeyRegexp = regexp.MustCompile(\"(?i)^[a-z]+$\")\nvar callsRegexp = regexp.MustCompile(\"^([0-9.]+)\\\\\/([0-9]+)$\")\n\nfunc removeQuotes(raw string) string {\n\tif strings.HasPrefix(raw, `\"`) && strings.HasSuffix(raw, `\"`) {\n\t\treturn raw[1 : len(raw)-1]\n\t}\n\treturn raw\n}\n\nfunc Fields(line string) []string {\n\tlist, err := shellwords.Parse(line)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn list\n}\n\nfunc parseTags(raw string) map[string]interface{} {\n\tt := map[string]interface{}{}\n\tfor _, field := range Fields(raw) {\n\t\tkv := strings.SplitN(field, \"=\", 2)\n\t\tif len(kv) == 2 && validKeyRegexp.MatchString(kv[0]) {\n\t\t\tcurrentKey := kv[0]\n\t\t\tvalue := kv[1]\n\t\t\tm := callsRegexp.FindStringSubmatch(value)\n\t\t\tif len(m) == 3 {\n\t\t\t\ttotalTime, e := strconv.ParseFloat(m[1], 64)\n\t\t\t\tif e == nil {\n\t\t\t\t\tcalls, e := strconv.ParseInt(m[2], 10, 64)\n\t\t\t\t\tif e == nil {\n\t\t\t\t\t\tt[currentKey+\"_time\"] = totalTime\n\t\t\t\t\t\tt[currentKey+\"_calls\"] = calls\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tt[currentKey] = parseTagValue(value)\n\t\t\t\tcurrentKey = \"\"\n\t\t\t}\n\t\t}\n\t}\n\treturn t\n}\n\nfunc (line *SyslogLine) Tags() (t map[string]interface{}) {\n\tif !line.tagsParsed {\n\t\tline.tags = parseTags(line.Raw)\n\t}\n\treturn line.tags\n}\n\nfunc parseTagValue(raw string) interface{} {\n\tif i, e := strconv.ParseInt(raw, 10, 64); e == nil {\n\t\treturn i\n\t} else if f, e := strconv.ParseFloat(raw, 64); e == nil {\n\t\treturn f\n\t}\n\treturn removeQuotes(raw)\n}\n\nconst (\n\ttimeLayout = \"2006-01-02T15:04:05.000000-07:00\"\n\ttimeLayoutWithoutMicro = \"2006-01-02T15:04:05-07:00\"\n)\n\nvar TagRegexp = regexp.MustCompile(\"(.*?)\\\\[(\\\\d*)\\\\]\")\n\nfunc (line *SyslogLine) Parse(raw string) (e error) {\n\tif line.parsed {\n\t\treturn nil\n\t}\n\tline.Raw = raw\n\tline.fields = strings.Fields(raw)\n\tif len(line.fields) >= 3 {\n\t\tline.Time, e = time.Parse(timeLayout, line.fields[0])\n\t\tif e != nil {\n\t\t\tline.Time, e = time.Parse(timeLayoutWithoutMicro, line.fields[0])\n\t\t\tif e != nil {\n\t\t\t\treturn e\n\t\t\t}\n\t\t}\n\t\tline.Host = line.fields[1]\n\t\tline.Tag, line.Port, line.Severity, line.Pid = parseTag(line.fields[2])\n\t\tif len(line.fields) > 3 {\n\t\t\tline.Message = strings.Join(line.fields[3:], \" \")\n\t\t}\n\t}\n\tline.parsed = true\n\treturn nil\n}\n\nfunc parseTag(raw string) (tag, port, severity string, pid int) {\n\ttagAndSeverity, pid := splitTagAndPid(raw)\n\ttag, port, severity = splitTagAndSeverity(tagAndSeverity)\n\treturn tag, port, severity, pid\n}\n\nfunc splitTagAndSeverity(raw string) (tag, port, severity string) {\n\ttag = raw\n\tparts := strings.Split(raw, \".\")\n\tif len(parts) == 3 {\n\t\t\/\/ seems to be mongodb\n\t\ttag, port, severity = parts[0], parts[1], parts[2]\n\n\t} else if len(parts) == 2 {\n\t\ttag, severity = parts[0], parts[1]\n\t} else {\n\t\ttag = raw\n\t}\n\treturn tag, port, severity\n}\n\nfunc splitTagAndPid(raw string) (tag string, pid int) {\n\ttag = raw\n\tchunks := TagRegexp.FindStringSubmatch(raw)\n\tif len(chunks) > 2 {\n\t\ttag = chunks[1]\n\t\tpid, _ = strconv.Atoi(chunks[2])\n\t} else {\n\t\tif tag[len(tag)-1] == ':' {\n\t\t\ttag = tag[0 : len(tag)-1]\n\t\t}\n\t}\n\treturn tag, pid\n}\n\nvar UUIDRegexp = regexp.MustCompile(\"([a-z0-9\\\\-]{36})\")\n\ntype UnicornLine struct {\n\tUUID string\n\tSyslogLine\n}\n\nfunc (line *UnicornLine) Parse(raw string) error {\n\te := line.SyslogLine.Parse(raw)\n\tif e != nil {\n\t\treturn e\n\t}\n\tif line.Tag != \"unicorn\" {\n\t\treturn fmt.Errorf(\"tag %q not supported\", line.Tag)\n\t}\n\tif len(line.fields) >= 4 {\n\t\tparts := UUIDRegexp.FindStringSubmatch(raw)\n\t\tif len(parts) > 1 {\n\t\t\tline.UUID = parts[1]\n\t\t}\n\t}\n\treturn nil\n}\n\ntype NginxLine struct {\n\t*SyslogLine\n\tXForwardedFor []string `json:\"x_forwarded_for,omitempty\"`\n\tMethod string `json:\"method,omitempty\"`\n\tStatus string `json:\"status,omitempty\"`\n\tLength int `json:\"length,omitempty\"`\n\tTotalTime float64 `json:\"total_time,omitempty\"`\n\tUnicornTime float64 `json:\"unicorn_time,omitempty\"`\n\tHttpHost string `json:\"http_host,omitempty\"`\n\tUserAgentName string `json:\"user_agent_name,omitempty\"`\n\tUri string `json:\"uri,omitempty\"`\n\tReferer string `json:\"referer,omitempty\"`\n\tAction string `json:\"action,omitempty\"`\n\tRevision string `json:\"revision,omitempty\"`\n\tUUID string `json:\"uuid,omitempty\"`\n\tEtag string `json:\"etag,omitempty\"`\n}\n\nvar quotesRegexp = regexp.MustCompile(`(ua|uri|ref)=\"(.*?)\"`)\n\nfunc (line *NginxLine) Parse(raw string) error {\n\tif line.SyslogLine == nil {\n\t\tline.SyslogLine = &SyslogLine{}\n\t}\n\te := line.SyslogLine.Parse(raw)\n\tif e != nil {\n\t\treturn e\n\t}\n\tif line.Tag != \"ssl_endpoint\" && line.Tag != \"nginx\" {\n\t\treturn fmt.Errorf(\"tag %q not supported\", line.Tag)\n\t}\n\tforwarded := false\n\tfor i, field := range line.fields {\n\t\tif forwarded && strings.Contains(field, \"=\") {\n\t\t\tforwarded = false\n\t\t}\n\t\tparts := strings.SplitN(field, \"=\", 2)\n\t\tif len(parts) == 2 {\n\t\t\tkey := parts[0]\n\t\t\tvalue := parts[1]\n\t\t\tswitch key {\n\t\t\tcase \"rev\":\n\t\t\t\tline.Revision = value\n\t\t\tcase \"action\":\n\t\t\t\tline.Action = value\n\t\t\tcase \"nginx\":\n\t\t\t\tif !strings.Contains(line.Raw, \"forwarded=\") {\n\t\t\t\t\tforwarded = true\n\t\t\t\t}\n\t\t\tcase \"forwarded\":\n\t\t\t\tforwarded = false\n\t\t\t\tline.XForwardedFor = []string{parts[1]}\n\t\t\tcase \"method\":\n\t\t\t\tline.Method = value\n\t\t\tcase \"uuid\":\n\t\t\t\tline.UUID = filterDash(value)\n\t\t\tcase \"etag\":\n\t\t\t\tline.Etag = filterDash(value)\n\t\t\tcase \"status\":\n\t\t\t\tline.Status = value\n\t\t\tcase \"host\":\n\t\t\t\tline.HttpHost = value\n\t\t\tcase \"length\":\n\t\t\t\tline.Length, _ = strconv.Atoi(value)\n\t\t\tcase \"total\":\n\t\t\t\tline.TotalTime, _ = strconv.ParseFloat(value, 64)\n\t\t\tcase \"unicorn_time\":\n\t\t\t\tline.UnicornTime, _ = strconv.ParseFloat(value, 64)\n\t\t\t}\n\t\t} else if i == 2 && strings.HasPrefix(field, \"nginx\") {\n\t\t\tif !strings.Contains(line.Raw, \"forwarded=\") {\n\t\t\t\tforwarded = true\n\t\t\t}\n\t\t} else if forwarded {\n\t\t\tif strings.HasPrefix(field, \"host\") || field == \"-\" {\n\t\t\t\tforwarded = false\n\t\t\t} else {\n\t\t\t\tline.XForwardedFor = append(line.XForwardedFor, strings.TrimSuffix(field, \",\"))\n\t\t\t}\n\n\t\t}\n\t}\n\tquotes := quotesRegexp.FindAllStringSubmatch(raw, -1)\n\tfor _, quote := range quotes {\n\t\tswitch quote[1] {\n\t\tcase \"ua\":\n\t\t\tline.UserAgentName = quote[2]\n\t\tcase \"uri\":\n\t\t\tline.Uri = quote[2]\n\t\tcase \"ref\":\n\t\t\tline.Referer = filterDash(quote[2])\n\t\tdefault:\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc filterDash(raw string) string {\n\tif raw == \"-\" {\n\t\treturn \"\"\n\t}\n\treturn raw\n}\n\ntype HAProxyLine struct {\n\tSyslogLine\n\tFrontend string\n\tBackend string\n\tBackendHost string\n\tBackendImageId string\n\tBackendContainerId string\n\tStatus string\n\tLength int\n\tClientRequestTime int\n\tConnectionQueueTime int\n\tTcpConnectTime int\n\tServerResponseTime int\n\tSessionDurationTime int\n\tActiveConnections int\n\tFrontendConnections int\n\tBackendConnectons int\n\tServerConnections int\n\tRetries int\n\tServerQueue int\n\tBackendQueue int\n\tMethod string\n\tUri string\n}\n\nfunc (line *HAProxyLine) Parse(raw string) error {\n\te := line.SyslogLine.Parse(raw)\n\tif e != nil {\n\t\treturn e\n\t}\n\tif line.Tag != \"haproxy\" {\n\t\treturn fmt.Errorf(\"tag was %s\", line.Tag)\n\t}\n\tif len(line.fields) > 16 {\n\t\tline.Frontend = line.fields[5]\n\t\tbackend := line.fields[6]\n\t\tparts := strings.SplitN(backend, \"\/\", 2)\n\t\tif len(parts) == 2 {\n\t\t\tline.Backend = parts[0]\n\t\t\tbackendContainer := parts[1]\n\t\t\tparts := strings.Split(backendContainer, \":\")\n\t\t\tif len(parts) == 3 {\n\t\t\t\tline.BackendHost = parts[0]\n\t\t\t\tline.BackendImageId = parts[1]\n\t\t\t\tline.BackendContainerId = parts[2]\n\t\t\t}\n\t\t}\n\t\ttimes := line.fields[7]\n\t\tparts = strings.Split(times, \"\/\")\n\t\tif len(parts) == 5 {\n\t\t\tline.ClientRequestTime, _ = strconv.Atoi(parts[0])\n\t\t\tline.ConnectionQueueTime, _ = strconv.Atoi(parts[1])\n\t\t\tline.TcpConnectTime, _ = strconv.Atoi(parts[2])\n\t\t\tline.ServerResponseTime, _ = strconv.Atoi(parts[3])\n\t\t\tline.SessionDurationTime, _ = strconv.Atoi(parts[4])\n\t\t}\n\t\tline.Status = line.fields[8]\n\t\tline.Length, _ = strconv.Atoi(line.fields[9])\n\n\t\tconnections := line.fields[13]\n\t\tparts = strings.Split(connections, \"\/\")\n\t\tif len(parts) == 5 {\n\t\t\tline.ActiveConnections, _ = strconv.Atoi(parts[0])\n\t\t\tline.FrontendConnections, _ = strconv.Atoi(parts[1])\n\t\t\tline.BackendConnectons, _ = strconv.Atoi(parts[2])\n\t\t\tline.ServerConnections, _ = strconv.Atoi(parts[3])\n\t\t\tline.Retries, _ = strconv.Atoi(parts[4])\n\t\t}\n\n\t\tqueues := line.fields[14]\n\t\tparts = strings.Split(queues, \"\/\")\n\t\tif len(parts) == 2 {\n\t\t\tline.ServerQueue, _ = strconv.Atoi(parts[0])\n\t\t\tline.BackendQueue, _ = strconv.Atoi(parts[1])\n\t\t}\n\t\tline.Method = line.fields[15][1:]\n\t\tline.Uri = line.fields[16]\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package stack implements utilities to capture, manipulate, and format call\n\/\/ stacks. It provides a simpler API than package runtime.\n\/\/\n\/\/ The implementation takes care of the minutia and special cases of\n\/\/ interpreting the program counter (pc) values returned by runtime.Callers.\n\/\/\n\/\/ Package stack's types implement fmt.Formatter, which provides a simple and\n\/\/ flexible way to declaratively configure formatting when used with logging\n\/\/ or error tracking packages.\npackage stack\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ Call records a single function invocation from a goroutine stack.\ntype Call struct {\n\tfn *runtime.Func\n\tpc uintptr\n}\n\n\/\/ Caller returns a Call from the stack of the current goroutine. The argument\n\/\/ skip is the number of stack frames to ascend, with 0 identifying the\n\/\/ calling function.\nfunc Caller(skip int) Call {\n\tvar pcs [2]uintptr\n\tn := runtime.Callers(skip+1, pcs[:])\n\n\tvar c Call\n\n\tif n < 2 {\n\t\treturn c\n\t}\n\n\tc.pc = pcs[1]\n\tif runtime.FuncForPC(pcs[0]) != sigpanic {\n\t\tc.pc--\n\t}\n\tc.fn = runtime.FuncForPC(c.pc)\n\treturn c\n}\n\n\/\/ String implements fmt.Stinger. It is equivalent to fmt.Sprintf(\"%v\", c).\nfunc (c Call) String() string {\n\treturn fmt.Sprint(c)\n}\n\n\/\/ MarshalText implements encoding.TextMarshaler. It formats the Call the same\n\/\/ as fmt.Sprintf(\"%v\", c).\nfunc (c Call) MarshalText() ([]byte, error) {\n\tif c.fn == nil {\n\t\treturn nil, ErrNoFunc\n\t}\n\tbuf := bytes.Buffer{}\n\tfmt.Fprint(&buf, c)\n\treturn buf.Bytes(), nil\n}\n\n\/\/ ErrNoFunc means that the Call has a nil *runtime.Func. The most likely\n\/\/ cause is a Call with the zero value.\nvar ErrNoFunc = errors.New(\"no call stack information\")\n\n\/\/ Format implements fmt.Formatter with support for the following verbs.\n\/\/\n\/\/ %s source file\n\/\/ %d line number\n\/\/ %n function name\n\/\/ %v equivalent to %s:%d\n\/\/\n\/\/ It accepts the '+' and '#' flags for most of the verbs as follows.\n\/\/\n\/\/ %+s path of source file relative to the compile time GOPATH\n\/\/ %#s full path of source file\n\/\/ %+n import path qualified function name\n\/\/ %+v equivalent to %+s:%d\n\/\/ %#v equivalent to %#s:%d\nfunc (c Call) Format(s fmt.State, verb rune) {\n\tif c.fn == nil {\n\t\tfmt.Fprintf(s, \"%%!%c(NOFUNC)\", verb)\n\t\treturn\n\t}\n\n\tswitch verb {\n\tcase 's', 'v':\n\t\tfile, line := c.fn.FileLine(c.pc)\n\t\tswitch {\n\t\tcase s.Flag('#'):\n\t\t\t\/\/ done\n\t\tcase s.Flag('+'):\n\t\t\t\/\/ Here we want to get the source file path relative to the\n\t\t\t\/\/ compile time GOPATH. As of Go 1.4.x there is no direct way to\n\t\t\t\/\/ know the compiled GOPATH at runtime, but we can infer the\n\t\t\t\/\/ number of path segments in the GOPATH. We note that fn.Name()\n\t\t\t\/\/ returns the function name qualified by the import path, which\n\t\t\t\/\/ does not include the GOPATH. Thus we can trim segments from the\n\t\t\t\/\/ beginning of the file path until the number of path separators\n\t\t\t\/\/ remaining is one more than the number of path separators in the\n\t\t\t\/\/ function name. For example, given:\n\t\t\t\/\/\n\t\t\t\/\/ GOPATH \/home\/user\n\t\t\t\/\/ file \/home\/user\/src\/pkg\/sub\/file.go\n\t\t\t\/\/ fn.Name() pkg\/sub.Type.Method\n\t\t\t\/\/\n\t\t\t\/\/ We want to produce:\n\t\t\t\/\/\n\t\t\t\/\/ pkg\/sub\/file.go\n\t\t\t\/\/\n\t\t\t\/\/ From this we can easily see that fn.Name() has one less path\n\t\t\t\/\/ separator than our desired output. We count separators from the\n\t\t\t\/\/ end of the file path until it finds two more than in the\n\t\t\t\/\/ function name and then move one character forward to preserve\n\t\t\t\/\/ the initial path segment without a leading separator.\n\t\t\tconst sep = \"\/\"\n\t\t\tgoal := strings.Count(c.fn.Name(), sep) + 2\n\t\t\tpathCnt := 0\n\t\t\ti := len(file)\n\t\t\tfor pathCnt < goal {\n\t\t\t\ti = strings.LastIndex(file[:i], sep)\n\t\t\t\tif i == -1 {\n\t\t\t\t\ti = -len(sep)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tpathCnt++\n\t\t\t}\n\t\t\t\/\/ get back to 0 or trim the leading seperator\n\t\t\tfile = file[i+len(sep):]\n\t\tdefault:\n\t\t\tconst sep = \"\/\"\n\t\t\tif i := strings.LastIndex(file, sep); i != -1 {\n\t\t\t\tfile = file[i+len(sep):]\n\t\t\t}\n\t\t}\n\t\tio.WriteString(s, file)\n\t\tif verb == 'v' {\n\t\t\tbuf := [7]byte{':'}\n\t\t\ts.Write(strconv.AppendInt(buf[:1], int64(line), 10))\n\t\t}\n\n\tcase 'd':\n\t\t_, line := c.fn.FileLine(c.pc)\n\t\tbuf := [6]byte{}\n\t\ts.Write(strconv.AppendInt(buf[:0], int64(line), 10))\n\n\tcase 'n':\n\t\tname := c.fn.Name()\n\t\tif !s.Flag('+') {\n\t\t\tconst pathSep = \"\/\"\n\t\t\tif i := strings.LastIndex(name, pathSep); i != -1 {\n\t\t\t\tname = name[i+len(pathSep):]\n\t\t\t}\n\t\t\tconst pkgSep = \".\"\n\t\t\tif i := strings.Index(name, pkgSep); i != -1 {\n\t\t\t\tname = name[i+len(pkgSep):]\n\t\t\t}\n\t\t}\n\t\tio.WriteString(s, name)\n\t}\n}\n\n\/\/ PC returns the program counter for this call frame; multiple frames may\n\/\/ have the same PC value.\nfunc (c Call) PC() uintptr {\n\treturn c.pc\n}\n\n\/\/ name returns the import path qualified name of the function containing the\n\/\/ call.\nfunc (c Call) name() string {\n\tif c.fn == nil {\n\t\treturn \"???\"\n\t}\n\treturn c.fn.Name()\n}\n\nfunc (c Call) file() string {\n\tif c.fn == nil {\n\t\treturn \"???\"\n\t}\n\tfile, _ := c.fn.FileLine(c.pc)\n\treturn file\n}\n\nfunc (c Call) line() int {\n\tif c.fn == nil {\n\t\treturn 0\n\t}\n\t_, line := c.fn.FileLine(c.pc)\n\treturn line\n}\n\n\/\/ CallStack records a sequence of function invocations from a goroutine\n\/\/ stack.\ntype CallStack []Call\n\n\/\/ String implements fmt.Stinger. It is equivalent to fmt.Sprintf(\"%v\", cs).\nfunc (cs CallStack) String() string {\n\treturn fmt.Sprint(cs)\n}\n\nvar (\n\topenBracketBytes = []byte(\"[\")\n\tcloseBracketBytes = []byte(\"]\")\n\tspaceBytes = []byte(\" \")\n)\n\n\/\/ MarshalText implements encoding.TextMarshaler. It formats the CallStack the\n\/\/ same as fmt.Sprintf(\"%v\", cs).\nfunc (cs CallStack) MarshalText() ([]byte, error) {\n\tbuf := bytes.Buffer{}\n\tbuf.Write(openBracketBytes)\n\tfor i, pc := range cs {\n\t\tif pc.fn == nil {\n\t\t\treturn nil, ErrNoFunc\n\t\t}\n\t\tif i > 0 {\n\t\t\tbuf.Write(spaceBytes)\n\t\t}\n\t\tfmt.Fprint(&buf, pc)\n\t}\n\tbuf.Write(closeBracketBytes)\n\treturn buf.Bytes(), nil\n}\n\n\/\/ Format implements fmt.Formatter by printing the CallStack as square brackes\n\/\/ ([, ]) surrounding a space separated list of Calls each formatted with the\n\/\/ supplied verb and options.\nfunc (cs CallStack) Format(s fmt.State, verb rune) {\n\ts.Write(openBracketBytes)\n\tfor i, pc := range cs {\n\t\tif i > 0 {\n\t\t\ts.Write(spaceBytes)\n\t\t}\n\t\tpc.Format(s, verb)\n\t}\n\ts.Write(closeBracketBytes)\n}\n\n\/\/ findSigpanic intentionally executes faulting code to generate a stack trace\n\/\/ containing an entry for runtime.sigpanic.\nfunc findSigpanic() *runtime.Func {\n\tvar fn *runtime.Func\n\tvar p *int\n\tfunc() int {\n\t\tdefer func() {\n\t\t\tif p := recover(); p != nil {\n\t\t\t\tvar pcs [512]uintptr\n\t\t\t\tn := runtime.Callers(2, pcs[:])\n\t\t\t\tfor _, pc := range pcs[:n] {\n\t\t\t\t\tf := runtime.FuncForPC(pc)\n\t\t\t\t\tif f.Name() == \"runtime.sigpanic\" {\n\t\t\t\t\t\tfn = f\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\t\/\/ intentional nil pointer dereference to trigger sigpanic\n\t\treturn *p\n\t}()\n\treturn fn\n}\n\nvar sigpanic = findSigpanic()\n\n\/\/ Trace returns a CallStack for the current goroutine with element 0\n\/\/ identifying the calling function.\nfunc Trace() CallStack {\n\tvar pcs [512]uintptr\n\tn := runtime.Callers(2, pcs[:])\n\tcs := make([]Call, n)\n\n\tfor i, pc := range pcs[:n] {\n\t\tpcFix := pc\n\t\tif i > 0 && cs[i-1].fn != sigpanic {\n\t\t\tpcFix--\n\t\t}\n\t\tcs[i] = Call{\n\t\t\tfn: runtime.FuncForPC(pcFix),\n\t\t\tpc: pcFix,\n\t\t}\n\t}\n\n\treturn cs\n}\n\n\/\/ TrimBelow returns a slice of the CallStack with all entries below c\n\/\/ removed.\nfunc (cs CallStack) TrimBelow(c Call) CallStack {\n\tfor len(cs) > 0 && cs[0].pc != c.pc {\n\t\tcs = cs[1:]\n\t}\n\treturn cs\n}\n\n\/\/ TrimAbove returns a slice of the CallStack with all entries above c\n\/\/ removed.\nfunc (cs CallStack) TrimAbove(c Call) CallStack {\n\tfor len(cs) > 0 && cs[len(cs)-1].pc != c.pc {\n\t\tcs = cs[:len(cs)-1]\n\t}\n\treturn cs\n}\n\nvar goroot string\n\nfunc init() {\n\tgoroot = filepath.ToSlash(runtime.GOROOT())\n\tif runtime.GOOS == \"windows\" {\n\t\tgoroot = strings.ToLower(goroot)\n\t}\n}\n\nfunc inGoroot(c Call) bool {\n\tfile := c.file()\n\tif len(file) == 0 || file[0] == '?' {\n\t\treturn true\n\t}\n\tif runtime.GOOS == \"windows\" {\n\t\tfile = strings.ToLower(file)\n\t}\n\treturn strings.HasPrefix(file, goroot) || strings.HasSuffix(file, \"\/_testmain.go\")\n}\n\n\/\/ TrimRuntime returns a slice of the CallStack with the topmost entries from\n\/\/ the go runtime removed. It considers any calls originating from unknown\n\/\/ files, files under GOROOT, or _testmain.go as part of the runtime.\nfunc (cs CallStack) TrimRuntime() CallStack {\n\tfor len(cs) > 0 && inGoroot(cs[len(cs)-1]) {\n\t\tcs = cs[:len(cs)-1]\n\t}\n\treturn cs\n}\n<commit_msg>Use the compile-time GOROOT instead of the run-time GOROOT.<commit_after>\/\/ Package stack implements utilities to capture, manipulate, and format call\n\/\/ stacks. It provides a simpler API than package runtime.\n\/\/\n\/\/ The implementation takes care of the minutia and special cases of\n\/\/ interpreting the program counter (pc) values returned by runtime.Callers.\n\/\/\n\/\/ Package stack's types implement fmt.Formatter, which provides a simple and\n\/\/ flexible way to declaratively configure formatting when used with logging\n\/\/ or error tracking packages.\npackage stack\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ Call records a single function invocation from a goroutine stack.\ntype Call struct {\n\tfn *runtime.Func\n\tpc uintptr\n}\n\n\/\/ Caller returns a Call from the stack of the current goroutine. The argument\n\/\/ skip is the number of stack frames to ascend, with 0 identifying the\n\/\/ calling function.\nfunc Caller(skip int) Call {\n\tvar pcs [2]uintptr\n\tn := runtime.Callers(skip+1, pcs[:])\n\n\tvar c Call\n\n\tif n < 2 {\n\t\treturn c\n\t}\n\n\tc.pc = pcs[1]\n\tif runtime.FuncForPC(pcs[0]) != sigpanic {\n\t\tc.pc--\n\t}\n\tc.fn = runtime.FuncForPC(c.pc)\n\treturn c\n}\n\n\/\/ String implements fmt.Stinger. It is equivalent to fmt.Sprintf(\"%v\", c).\nfunc (c Call) String() string {\n\treturn fmt.Sprint(c)\n}\n\n\/\/ MarshalText implements encoding.TextMarshaler. It formats the Call the same\n\/\/ as fmt.Sprintf(\"%v\", c).\nfunc (c Call) MarshalText() ([]byte, error) {\n\tif c.fn == nil {\n\t\treturn nil, ErrNoFunc\n\t}\n\tbuf := bytes.Buffer{}\n\tfmt.Fprint(&buf, c)\n\treturn buf.Bytes(), nil\n}\n\n\/\/ ErrNoFunc means that the Call has a nil *runtime.Func. The most likely\n\/\/ cause is a Call with the zero value.\nvar ErrNoFunc = errors.New(\"no call stack information\")\n\n\/\/ Format implements fmt.Formatter with support for the following verbs.\n\/\/\n\/\/ %s source file\n\/\/ %d line number\n\/\/ %n function name\n\/\/ %v equivalent to %s:%d\n\/\/\n\/\/ It accepts the '+' and '#' flags for most of the verbs as follows.\n\/\/\n\/\/ %+s path of source file relative to the compile time GOPATH\n\/\/ %#s full path of source file\n\/\/ %+n import path qualified function name\n\/\/ %+v equivalent to %+s:%d\n\/\/ %#v equivalent to %#s:%d\nfunc (c Call) Format(s fmt.State, verb rune) {\n\tif c.fn == nil {\n\t\tfmt.Fprintf(s, \"%%!%c(NOFUNC)\", verb)\n\t\treturn\n\t}\n\n\tswitch verb {\n\tcase 's', 'v':\n\t\tfile, line := c.fn.FileLine(c.pc)\n\t\tswitch {\n\t\tcase s.Flag('#'):\n\t\t\t\/\/ done\n\t\tcase s.Flag('+'):\n\t\t\tfile = file[pkgIndex(file, c.fn.Name()):]\n\t\tdefault:\n\t\t\tconst sep = \"\/\"\n\t\t\tif i := strings.LastIndex(file, sep); i != -1 {\n\t\t\t\tfile = file[i+len(sep):]\n\t\t\t}\n\t\t}\n\t\tio.WriteString(s, file)\n\t\tif verb == 'v' {\n\t\t\tbuf := [7]byte{':'}\n\t\t\ts.Write(strconv.AppendInt(buf[:1], int64(line), 10))\n\t\t}\n\n\tcase 'd':\n\t\t_, line := c.fn.FileLine(c.pc)\n\t\tbuf := [6]byte{}\n\t\ts.Write(strconv.AppendInt(buf[:0], int64(line), 10))\n\n\tcase 'n':\n\t\tname := c.fn.Name()\n\t\tif !s.Flag('+') {\n\t\t\tconst pathSep = \"\/\"\n\t\t\tif i := strings.LastIndex(name, pathSep); i != -1 {\n\t\t\t\tname = name[i+len(pathSep):]\n\t\t\t}\n\t\t\tconst pkgSep = \".\"\n\t\t\tif i := strings.Index(name, pkgSep); i != -1 {\n\t\t\t\tname = name[i+len(pkgSep):]\n\t\t\t}\n\t\t}\n\t\tio.WriteString(s, name)\n\t}\n}\n\n\/\/ PC returns the program counter for this call frame; multiple frames may\n\/\/ have the same PC value.\nfunc (c Call) PC() uintptr {\n\treturn c.pc\n}\n\n\/\/ name returns the import path qualified name of the function containing the\n\/\/ call.\nfunc (c Call) name() string {\n\tif c.fn == nil {\n\t\treturn \"???\"\n\t}\n\treturn c.fn.Name()\n}\n\nfunc (c Call) file() string {\n\tif c.fn == nil {\n\t\treturn \"???\"\n\t}\n\tfile, _ := c.fn.FileLine(c.pc)\n\treturn file\n}\n\nfunc (c Call) line() int {\n\tif c.fn == nil {\n\t\treturn 0\n\t}\n\t_, line := c.fn.FileLine(c.pc)\n\treturn line\n}\n\n\/\/ CallStack records a sequence of function invocations from a goroutine\n\/\/ stack.\ntype CallStack []Call\n\n\/\/ String implements fmt.Stinger. It is equivalent to fmt.Sprintf(\"%v\", cs).\nfunc (cs CallStack) String() string {\n\treturn fmt.Sprint(cs)\n}\n\nvar (\n\topenBracketBytes = []byte(\"[\")\n\tcloseBracketBytes = []byte(\"]\")\n\tspaceBytes = []byte(\" \")\n)\n\n\/\/ MarshalText implements encoding.TextMarshaler. It formats the CallStack the\n\/\/ same as fmt.Sprintf(\"%v\", cs).\nfunc (cs CallStack) MarshalText() ([]byte, error) {\n\tbuf := bytes.Buffer{}\n\tbuf.Write(openBracketBytes)\n\tfor i, pc := range cs {\n\t\tif pc.fn == nil {\n\t\t\treturn nil, ErrNoFunc\n\t\t}\n\t\tif i > 0 {\n\t\t\tbuf.Write(spaceBytes)\n\t\t}\n\t\tfmt.Fprint(&buf, pc)\n\t}\n\tbuf.Write(closeBracketBytes)\n\treturn buf.Bytes(), nil\n}\n\n\/\/ Format implements fmt.Formatter by printing the CallStack as square brackes\n\/\/ ([, ]) surrounding a space separated list of Calls each formatted with the\n\/\/ supplied verb and options.\nfunc (cs CallStack) Format(s fmt.State, verb rune) {\n\ts.Write(openBracketBytes)\n\tfor i, pc := range cs {\n\t\tif i > 0 {\n\t\t\ts.Write(spaceBytes)\n\t\t}\n\t\tpc.Format(s, verb)\n\t}\n\ts.Write(closeBracketBytes)\n}\n\n\/\/ findSigpanic intentionally executes faulting code to generate a stack trace\n\/\/ containing an entry for runtime.sigpanic.\nfunc findSigpanic() *runtime.Func {\n\tvar fn *runtime.Func\n\tvar p *int\n\tfunc() int {\n\t\tdefer func() {\n\t\t\tif p := recover(); p != nil {\n\t\t\t\tvar pcs [512]uintptr\n\t\t\t\tn := runtime.Callers(2, pcs[:])\n\t\t\t\tfor _, pc := range pcs[:n] {\n\t\t\t\t\tf := runtime.FuncForPC(pc)\n\t\t\t\t\tif f.Name() == \"runtime.sigpanic\" {\n\t\t\t\t\t\tfn = f\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\t\/\/ intentional nil pointer dereference to trigger sigpanic\n\t\treturn *p\n\t}()\n\treturn fn\n}\n\nvar sigpanic = findSigpanic()\n\n\/\/ Trace returns a CallStack for the current goroutine with element 0\n\/\/ identifying the calling function.\nfunc Trace() CallStack {\n\tvar pcs [512]uintptr\n\tn := runtime.Callers(2, pcs[:])\n\tcs := make([]Call, n)\n\n\tfor i, pc := range pcs[:n] {\n\t\tpcFix := pc\n\t\tif i > 0 && cs[i-1].fn != sigpanic {\n\t\t\tpcFix--\n\t\t}\n\t\tcs[i] = Call{\n\t\t\tfn: runtime.FuncForPC(pcFix),\n\t\t\tpc: pcFix,\n\t\t}\n\t}\n\n\treturn cs\n}\n\n\/\/ TrimBelow returns a slice of the CallStack with all entries below c\n\/\/ removed.\nfunc (cs CallStack) TrimBelow(c Call) CallStack {\n\tfor len(cs) > 0 && cs[0].pc != c.pc {\n\t\tcs = cs[1:]\n\t}\n\treturn cs\n}\n\n\/\/ TrimAbove returns a slice of the CallStack with all entries above c\n\/\/ removed.\nfunc (cs CallStack) TrimAbove(c Call) CallStack {\n\tfor len(cs) > 0 && cs[len(cs)-1].pc != c.pc {\n\t\tcs = cs[:len(cs)-1]\n\t}\n\treturn cs\n}\n\n\/\/ pkgIndex returns the index that results in file[index:] being the path of\n\/\/ file relative to the compile time GOPATH, and file[:index] being the\n\/\/ $GOPATH\/src\/ portion of file. funcName must be the name of a function in\n\/\/ file as returned by runtime.Func.Name.\nfunc pkgIndex(file, funcName string) int {\n\t\/\/ As of Go 1.6.2 there is no direct way to know the compile time GOPATH\n\t\/\/ at runtime, but we can infer the number of path segments in the GOPATH.\n\t\/\/ We note that runtime.Func.Name() returns the function name qualified by\n\t\/\/ the import path, which does not include the GOPATH. Thus we can trim\n\t\/\/ segments from the beginning of the file path until the number of path\n\t\/\/ separators remaining is one more than the number of path separators in\n\t\/\/ the function name. For example, given:\n\t\/\/\n\t\/\/ GOPATH \/home\/user\n\t\/\/ file \/home\/user\/src\/pkg\/sub\/file.go\n\t\/\/ fn.Name() pkg\/sub.Type.Method\n\t\/\/\n\t\/\/ We want to produce:\n\t\/\/\n\t\/\/ file[:idx] == \/home\/user\/src\/\n\t\/\/ file[idx:] == pkg\/sub\/file.go\n\t\/\/\n\t\/\/ From this we can easily see that fn.Name() has one less path separator\n\t\/\/ than our desired result for file[idx:]. We count separators from the\n\t\/\/ end of the file path until it finds two more than in the function name\n\t\/\/ and then move one character forward to preserve the initial path\n\t\/\/ segment without a leading separator.\n\tconst sep = \"\/\"\n\ti := len(file)\n\tfor n := strings.Count(funcName, sep) + 2; n > 0; n-- {\n\t\ti = strings.LastIndex(file[:i], sep)\n\t\tif i == -1 {\n\t\t\ti = -len(sep)\n\t\t\tbreak\n\t\t}\n\t}\n\t\/\/ get back to 0 or trim the leading seperator\n\treturn i + len(sep)\n}\n\nvar runtimePath string\n\nfunc init() {\n\tvar pcs [1]uintptr\n\truntime.Callers(0, pcs[:])\n\tfn := runtime.FuncForPC(pcs[0])\n\tfile, _ := fn.FileLine(pcs[0])\n\n\tidx := pkgIndex(file, fn.Name())\n\n\truntimePath = file[:idx]\n\tif runtime.GOOS == \"windows\" {\n\t\truntimePath = strings.ToLower(runtimePath)\n\t}\n}\n\nfunc inGoroot(c Call) bool {\n\tfile := c.file()\n\tif len(file) == 0 || file[0] == '?' {\n\t\treturn true\n\t}\n\tif runtime.GOOS == \"windows\" {\n\t\tfile = strings.ToLower(file)\n\t}\n\treturn strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, \"\/_testmain.go\")\n}\n\n\/\/ TrimRuntime returns a slice of the CallStack with the topmost entries from\n\/\/ the go runtime removed. It considers any calls originating from unknown\n\/\/ files, files under GOROOT, or _testmain.go as part of the runtime.\nfunc (cs CallStack) TrimRuntime() CallStack {\n\tfor len(cs) > 0 && inGoroot(cs[len(cs)-1]) {\n\t\tcs = cs[:len(cs)-1]\n\t}\n\treturn cs\n}\n<|endoftext|>"} {"text":"<commit_before>package logsend\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/ActiveState\/tail\"\n\t\"github.com\/Unknwon\/com\"\n\t\"github.com\/howeyc\/fsnotify\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"study2016\/logshot\/logger\"\n\t\"time\"\n)\n\n\/\/监听的路径-文件映射\nvar WatcherMap map[string]*File = make(map[string]*File)\n\n\/\/监听文件\nfunc WatchFiles(configFile string) {\n\trule, err := LoadConfigFromFile(configFile)\n\tif err != nil {\n\t\tlogger.GetLogger().Errorln(\"Can't load config\", err)\n\t}\n\tfiles := make([]string, 0)\n\tfiles = append(files, rule.watchDir)\n\tassignedFiles, err := assignFiles(files, rule)\n\t\/\/统计分配文件的数量\n\t\/\/当每个文件完成任务后,这个数目减少1\n\t\/\/主要用于判断doneCh的过程中,不至于因为doneCh的阻塞导致deadlock\n\tassignedFilesCount := len(assignedFiles)\n\n\tif err != nil {\n\t\tlogger.GetLogger().Errorln(\"can't assign file per rule\", err)\n\t}\n\n\tdoneCh := make(chan string)\n\tfor _, file := range assignedFiles {\n\t\tfile.doneCh = doneCh\n\t\t\/\/并行处理文件采集\n\t\tgo file.tail()\n\t}\n\n\t\/\/如果监听对象为目录,开启异步监听\n\tif com.IsDir(rule.watchDir) {\n\t\tgo continueWatch(&rule.watchDir, rule, &assignedFilesCount, doneCh)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase fpath := <-doneCh:\n\t\t\tassignedFilesCount = assignedFilesCount - 1\n\t\t\tif assignedFilesCount == 0 {\n\t\t\t\tlogger.GetLogger().Errorln(\"finished reading file %+v\", fpath)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/单文件分配\nfunc assignSingleFile(filepath string, rule *Rule) (*File, error) {\n\tis_dir := com.IsDir(filepath)\n\tif !is_dir {\n\t\tfile, err := NewFile(filepath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfile.rule = rule\n\t\treturn file, nil\n\t}\n\treturn nil, errors.New(\"file not found : \" + filepath)\n}\n\n\/\/为文件分配规则\nfunc assignFiles(allFiles []string, rule *Rule) ([]*File, error) {\n\tfiles := make([]*File, 0)\n\tfor _, f := range allFiles {\n\t\tis_dir := com.IsDir(f)\n\t\t\/\/watch-dir是目录的形式\n\t\tif is_dir {\n\t\t\t\/\/遍历文件目录\n\t\t\tfilepath.Walk(f, func(pth string, info os.FileInfo, err error) error {\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif !info.IsDir() && !strings.Contains(info.Name(), \".DS_Store\") {\n\t\t\t\t\tfile, err := NewFile(pth)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tfile.rule = rule\n\t\t\t\t\tfiles = appendWatch(files, file)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t})\n\t\t} else {\n\t\t\t\/\/分配单文件\n\t\t\tfile, err := assignSingleFile(f, rule)\n\t\t\tif err != nil {\n\t\t\t\treturn files, err\n\t\t\t}\n\t\t\tfiles = appendWatch(files, file)\n\t\t}\n\t}\n\treturn files, nil\n}\n\nfunc appendWatch(files []*File, file *File) []*File {\n\tWatcherMap[file.Tail.Filename] = file\n\tif files != nil {\n\t\tfiles = append(files, file)\n\t}\n\treturn files\n}\n\nfunc continueWatch(dir *string, rule *Rule, totalFileCount *int, doneCh chan string) {\n\t\/\/判断dir是否是目录结构\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlogger.GetLogger().Errorln(err.Error())\n\t}\n\tdone := make(chan bool)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase ev := <-watcher.Event:\n\t\t\t\tif ev.IsCreate() {\n\t\t\t\t\t*totalFileCount = *totalFileCount + 1\n\t\t\t\t\tprintln(\"发现文件创建:\", ev.Name, \"当前监控文件数:\", *totalFileCount)\n\t\t\t\t\tfile, err := assignSingleFile(ev.Name, rule)\n\t\t\t\t\tappendWatch(nil, file)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tfile.doneCh = doneCh\n\t\t\t\t\t\tgo file.tail() \/\/异步传输\n\t\t\t\t\t}\n\t\t\t\t} else if ev.IsDelete() { \/\/文件被删除的情况,需要进行资源回收\n\t\t\t\t\t\/\/获取被删除的file对象,并发完成消息到其doneCh\n\t\t\t\t\tlogger.GetLogger().Infof(\"tailing file is deleted : %s \\n \", ev.Name)\n\t\t\t\t\tif delete_file, ok := WatcherMap[ev.Name]; ok {\n\t\t\t\t\t\tdelete(WatcherMap, ev.Name)\n\t\t\t\t\t\tdelete_file.Tail.Stop()\/\/ stop the line tail\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlogger.GetLogger().Errorf(\"get delete file fail : %s\", ev.Name)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase err := <-watcher.Error:\n\t\t\t\tlogger.GetLogger().Errorln(\"error:\", err)\n\t\t\t}\n\t\t}\n\t}()\n\t\/\/监听目录\n\terr = watcher.Watch(*dir)\n\tif err != nil {\n\t\tlogger.GetLogger().Errorln(err.Error())\n\t}\n\t<-done\n\n\t\/* ... do stuff ... *\/\n\twatcher.Close()\n}\n\n\/\/监听文件结构\ntype File struct {\n\tTail *tail.Tail\n\trule *Rule\n\tdoneCh chan string\n}\n\n\/\/创建监听文件\nfunc NewFile(fpath string) (*File, error) {\n\tfile := &File{}\n\tvar err error\n\n\t\/\/是否采用低版本的poll监听方式(darwin除外)\n\t\/\/Linux2.6.32以下无法使用inotity,需要把Poll打开,采用 Polling的方式\n\tisPoll := Conf.IsPoll\n\tif Conf.ReadWholeLog && Conf.ReadAlway { \/\/全量并持续采集\n\t\tfile.Tail, err = tail.TailFile(fpath, tail.Config{\n\t\t\tFollow: true,\n\t\t\tReOpen: true,\n\t\t\tPoll: isPoll,\n\t\t\tLogger: logger.GetLogger(), \/\/使用自定义的日志器\n\t\t})\n\t} else if Conf.ReadWholeLog { \/\/全量但只采集一次\n\t\tfile.Tail, err = tail.TailFile(fpath, tail.Config{\n\t\t\tPoll: isPoll,\n\t\t\tLogger: logger.GetLogger(), \/\/使用自定义的日志器\n\t\t})\n\t} else {\n\t\t\/\/从当前文件最尾端开始采集\n\t\tseekInfo := &tail.SeekInfo{Offset: 0, Whence: 2}\n\t\tfile.Tail, err = tail.TailFile(fpath, tail.Config{\n\t\t\tFollow: true,\n\t\t\tReOpen: true,\n\t\t\tPoll: isPoll,\n\t\t\tLocation: seekInfo,\n\t\t\tLogger: logger.GetLogger(), \/\/使用自定义的日志器\n\t\t})\n\t}\n\treturn file, err\n}\n\nfunc (self *File) tail() {\n\tlogger.GetLogger().Infoln(fmt.Sprintf(\"start tailing %s\", self.Tail.Filename))\n\n\t\/\/功能收尾\n\tdefer func() {\n\t\t\/\/关闭Sender初始化过程中建立的通讯\n\t\tcloseRule(self.rule)\n\t\t\/\/通知结束通道,让主调用方结束\n\t\tlogger.GetLogger().Infof(\"file-watching has done : %s\", self.Tail.Filename)\n\t\tself.doneCh <- self.Tail.Filename\n\t}()\n\tfor line := range self.Tail.Lines {\n\t\tcheckLineRule(&line.Text, self.rule)\n\t}\n}\n\n\/\/检查并进行发送\nfunc checkLineRule(line *string, rule *Rule) {\n\t\/\/日志行对象\n\tlogline := &LogLine{\n\t\tTs: time.Now().UTC().UnixNano(),\n\t\tLine: []byte(*line),\n\t}\n\trule.SendLogLine(logline)\n}\n\n\/\/关闭规则\nfunc closeRule(rule *Rule) {\n\trule.CloseSender()\n}\n<commit_msg>support some functions<commit_after>package logsend\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/ActiveState\/tail\"\n\t\"github.com\/Unknwon\/com\"\n\t\"github.com\/howeyc\/fsnotify\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"study2016\/logshot\/logger\"\n\t\"time\"\n)\n\n\/\/监听的路径-文件映射\nvar WatcherMap map[string]*File = make(map[string]*File)\n\n\/\/监听文件\nfunc WatchFiles(configFile string) {\n\trule, err := LoadConfigFromFile(configFile)\n\tif err != nil {\n\t\tlogger.GetLogger().Errorln(\"Can't load config\", err)\n\t}\n\tfiles := make([]string, 0)\n\tfiles = append(files, rule.watchDir)\n\tassignedFiles, err := assignFiles(files, rule)\n\t\/\/统计分配文件的数量\n\t\/\/当每个文件完成任务后,这个数目减少1\n\t\/\/主要用于判断doneCh的过程中,不至于因为doneCh的阻塞导致deadlock\n\tassignedFilesCount := len(assignedFiles)\n\n\tif err != nil {\n\t\tlogger.GetLogger().Errorln(\"can't assign file per rule\", err)\n\t}\n\n\tdoneCh := make(chan string)\n\tfor _, file := range assignedFiles {\n\t\tfile.doneCh = doneCh\n\t\t\/\/并行处理文件采集\n\t\tgo file.tail()\n\t}\n\n\t\/\/如果监听对象为目录,开启异步监听\n\tif com.IsDir(rule.watchDir) {\n\t\tgo continueWatch(&rule.watchDir, rule, &assignedFilesCount, doneCh)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase fpath := <-doneCh:\n\t\t\tassignedFilesCount = assignedFilesCount - 1\n\t\t\tif assignedFilesCount == 0 {\n\t\t\t\tlogger.GetLogger().Infof(\"finished reading file %+v\", fpath)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/单文件分配\nfunc assignSingleFile(filepath string, rule *Rule) (*File, error) {\n\tis_dir := com.IsDir(filepath)\n\tif !is_dir {\n\t\tfile, err := NewFile(filepath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfile.rule = rule\n\t\treturn file, nil\n\t}\n\treturn nil, errors.New(\"file not found : \" + filepath)\n}\n\n\/\/为文件分配规则\nfunc assignFiles(allFiles []string, rule *Rule) ([]*File, error) {\n\tfiles := make([]*File, 0)\n\tfor _, f := range allFiles {\n\t\tis_dir := com.IsDir(f)\n\t\t\/\/watch-dir是目录的形式\n\t\tif is_dir {\n\t\t\t\/\/遍历文件目录\n\t\t\tfilepath.Walk(f, func(pth string, info os.FileInfo, err error) error {\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif !info.IsDir() && !strings.Contains(info.Name(), \".DS_Store\") {\n\t\t\t\t\tfile, err := NewFile(pth)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tfile.rule = rule\n\t\t\t\t\tfiles = appendWatch(files, file)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t})\n\t\t} else {\n\t\t\t\/\/分配单文件\n\t\t\tfile, err := assignSingleFile(f, rule)\n\t\t\tif err != nil {\n\t\t\t\treturn files, err\n\t\t\t}\n\t\t\tfiles = appendWatch(files, file)\n\t\t}\n\t}\n\treturn files, nil\n}\n\nfunc appendWatch(files []*File, file *File) []*File {\n\tWatcherMap[file.Tail.Filename] = file\n\tif files != nil {\n\t\tfiles = append(files, file)\n\t}\n\treturn files\n}\n\nfunc continueWatch(dir *string, rule *Rule, totalFileCount *int, doneCh chan string) {\n\t\/\/判断dir是否是目录结构\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlogger.GetLogger().Errorln(err.Error())\n\t}\n\tdone := make(chan bool)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase ev := <-watcher.Event:\n\t\t\t\tif ev.IsCreate() {\n\t\t\t\t\t*totalFileCount = *totalFileCount + 1\n\t\t\t\t\tprintln(\"发现文件创建:\", ev.Name, \"当前监控文件数:\", *totalFileCount)\n\t\t\t\t\tfile, err := assignSingleFile(ev.Name, rule)\n\t\t\t\t\tappendWatch(nil, file)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tfile.doneCh = doneCh\n\t\t\t\t\t\tgo file.tail() \/\/异步传输\n\t\t\t\t\t}\n\t\t\t\t} else if ev.IsDelete() { \/\/文件被删除的情况,需要进行资源回收\n\t\t\t\t\t\/\/获取被删除的file对象,并发完成消息到其doneCh\n\t\t\t\t\tlogger.GetLogger().Infof(\"tailing file is deleted : %s \", ev.Name)\n\t\t\t\t\tif delete_file, ok := WatcherMap[ev.Name]; ok {\n\t\t\t\t\t\tdelete(WatcherMap, ev.Name)\n\t\t\t\t\t\tif len(WatcherMap) == 0 {\n\t\t\t\t\t\t\tcloseRule(rule)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdelete_file.Tail.Stop() \/\/ stop the line tail\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlogger.GetLogger().Errorf(\"get delete file fail : %s\", ev.Name)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase err := <-watcher.Error:\n\t\t\t\tlogger.GetLogger().Errorln(\"error:\", err)\n\t\t\t}\n\t\t}\n\t}()\n\t\/\/监听目录\n\terr = watcher.Watch(*dir)\n\tif err != nil {\n\t\tlogger.GetLogger().Errorln(err.Error())\n\t}\n\t<-done\n\n\t\/* ... do stuff ... *\/\n\twatcher.Close()\n}\n\n\/\/监听文件结构\ntype File struct {\n\tTail *tail.Tail\n\trule *Rule\n\tdoneCh chan string\n}\n\n\/\/创建监听文件\nfunc NewFile(fpath string) (*File, error) {\n\tfile := &File{}\n\tvar err error\n\n\t\/\/是否采用低版本的poll监听方式(darwin除外)\n\t\/\/Linux2.6.32以下无法使用inotity,需要把Poll打开,采用 Polling的方式\n\tisPoll := Conf.IsPoll\n\tif Conf.ReadWholeLog && Conf.ReadAlway { \/\/全量并持续采集\n\t\tfile.Tail, err = tail.TailFile(fpath, tail.Config{\n\t\t\tFollow: true,\n\t\t\tReOpen: true,\n\t\t\tPoll: isPoll,\n\t\t\tLogger: logger.GetLogger(), \/\/使用自定义的日志器\n\t\t})\n\t} else if Conf.ReadWholeLog { \/\/全量但只采集一次\n\t\tfile.Tail, err = tail.TailFile(fpath, tail.Config{\n\t\t\tPoll: isPoll,\n\t\t\tLogger: logger.GetLogger(), \/\/使用自定义的日志器\n\t\t})\n\t} else {\n\t\t\/\/从当前文件最尾端开始采集\n\t\tseekInfo := &tail.SeekInfo{Offset: 0, Whence: 2}\n\t\tfile.Tail, err = tail.TailFile(fpath, tail.Config{\n\t\t\tFollow: true,\n\t\t\tReOpen: true,\n\t\t\tPoll: isPoll,\n\t\t\tLocation: seekInfo,\n\t\t\tLogger: logger.GetLogger(), \/\/使用自定义的日志器\n\t\t})\n\t}\n\treturn file, err\n}\n\nfunc (self *File) tail() {\n\tlogger.GetLogger().Infoln(fmt.Sprintf(\"start tailing %s\", self.Tail.Filename))\n\n\t\/\/功能收尾\n\tdefer func() {\n\t\t\/\/通知结束通道,让主调用方结束\n\t\tlogger.GetLogger().Infof(\"file-watching has done : %s\", self.Tail.Filename)\n\t\tself.doneCh <- self.Tail.Filename\n\t}()\n\tfor line := range self.Tail.Lines {\n\t\tcheckLineRule(&line.Text, self.rule)\n\t}\n}\n\n\/\/检查并进行发送\nfunc checkLineRule(line *string, rule *Rule) {\n\t\/\/日志行对象\n\tlogline := &LogLine{\n\t\tTs: time.Now().UTC().UnixNano(),\n\t\tLine: []byte(*line),\n\t}\n\trule.SendLogLine(logline)\n}\n\n\/\/关闭规则\nfunc closeRule(rule *Rule) {\n\tlogger.GetLogger().Infoln(\"stop rule send channel \")\n\trule.CloseSender()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"database\/sql\"\n\t\"flag\"\n\t\"fmt\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\ntype logEntry struct {\n\tNode string\n\tTime int64\n\tMsg string\n}\n\nvar (\n\ttsFormat = \"2006\/01\/02 15:04:05\"\n\taddr string\n\tdbFile string\n\tlogChan chan *logEntry\n)\n\nvar logSplit = regexp.MustCompile(\"^([\\\\w-:]+) (\\\\d{4}\/\\\\d{2}\/\\\\d{2} \\\\d{2}:\\\\d{2}:\\\\d{2}) (.+)$\")\nvar responseCheck = regexp.MustCompile(\"response time: (\\\\d+)us$\")\n\nfunc init() {\n\tflLogBuffer := flag.Uint(\"b\", 16, \"log entries to buffer\")\n\tflDbFile := flag.String(\"f\", \"logs.db\", \"database file\")\n\tport := flag.Uint(\"p\", 5988, \"port to listen on\")\n\tflag.Parse()\n\n\taddr = fmt.Sprintf(\":%d\", *port)\n\tdbFile = *flDbFile\n\tlogChan = make(chan *logEntry, *flLogBuffer)\n}\n\nfunc main() {\n\tdbSetup()\n\tgo log()\n\tgo listen()\n\n\tsigc := make(chan os.Signal, 1)\n\tsignal.Notify(sigc, os.Kill, os.Interrupt, syscall.SIGTERM)\n\t<-sigc\n\tclose(logChan)\n\t<-time.After(100 * time.Millisecond)\n\n\tos.Exit(1)\n}\n\nfunc listen() {\n\ttcpAddr, err := net.ResolveTCPAddr(\"tcp\", addr)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"[!] failed to resolve TCP address:\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tlistener, err := net.ListenTCP(\"tcp\", tcpAddr)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"[!] failed to set up TCP listener:\", err.Error())\n\t}\n\tfor {\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[!] TCP error:\", err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tgo processMessage(conn)\n\t}\n\n}\n\nfunc processMessage(conn net.Conn) {\n\tfmt.Println(\"[+] client connected:\", conn.RemoteAddr())\n\tdefer conn.Close()\n\tr := bufio.NewReader(conn)\n\n\tfor {\n\t\tmsg, err := r.ReadString(0x0a)\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tfmt.Println(\"[!] error reading from client:\",\n\t\t\t\t\terr.Error())\n\t\t\t}\n\t\t\treturn\n\t\t} else if msg == \"\" {\n\t\t\treturn\n\t\t}\n\t\tmsg = strings.Trim(string(msg), \"\\n \\t\")\n\t\tfmt.Println(\"-- \", msg)\n\n\t\tnodeID := logSplit.ReplaceAllString(msg, \"$1\")\n\t\tdateString := logSplit.ReplaceAllString(msg, \"$2\")\n\t\tlogMsg := logSplit.ReplaceAllString(msg, \"$3\")\n\t\ttm, err := time.Parse(tsFormat, dateString)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"[!] error parsing time %s: %s\\n\",\n\t\t\t\tdateString, err.Error())\n\t\t\treturn\n\t\t}\n\t\tle := &logEntry{nodeID, tm.UTC().Unix(), logMsg}\n\t\tlogChan <- le\n\t}\n}\n\nfunc log() {\n\tdb, err := sql.Open(\"sqlite3\", dbFile)\n\tif err != nil {\n\t\tfmt.Println(\"[!] failed to open DB file:\", err.Error())\n\t\tos.Exit(1)\n\t}\n\tdefer db.Close()\n\n\tfor {\n\t\tle, ok := <-logChan\n\t\tif !ok {\n\t\t\tfmt.Println(\"[+] shutting down database listener\")\n\t\t\treturn\n\t\t}\n\t\t_, err := db.Exec(\"insert into entries values (?, ?, ?)\",\n\t\t\tle.Node, le.Time, le.Msg)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[!] database error:\", err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tif responseCheck.MatchString(le.Msg) {\n\t\t\trespString := responseCheck.ReplaceAllString(le.Msg, \"$1\")\n\t\t\trTime, err := strconv.Atoi(respString)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"[!] error reading response time:\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t\t_, err = db.Exec(\"insert into response_times values (?, ?, ?)\",\n\t\t\t\tle.Node, le.Time, rTime)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"[!] error writing to database:\", err.Error())\n\t\t\t}\n\t\t}\n\n\t}\n}\n\nfunc logResponseTime(db *sql.DB, le *logEntry) {\n}\n\nfunc dbSetup() {\n\tentryTable()\n\trespTable()\n}\n\nfunc entryTable() {\n\tconst createSql = `CREATE TABLE entries (node text, timestamp integer, message string)`\n\tdb, err := sql.Open(\"sqlite3\", dbFile)\n\tif err != nil {\n\t\tfmt.Println(\"[!] failed to open DB file:\", err.Error())\n\t\tos.Exit(1)\n\t}\n\tdefer db.Close()\n\n\trows, err := db.Query(`select sql from sqlite_master\n where type='table' and name='entries'`)\n\tif err != nil {\n\t\tfmt.Println(\"[!] error reading database:\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tvar tblSql string\n\tfor rows.Next() {\n\t\terr = rows.Scan(&tblSql)\n\t\tbreak\n\t}\n\n\tif err != nil {\n\t\tfmt.Println(\"[!] error reading database:\", err.Error())\n\t\tos.Exit(1)\n\t} else if tblSql == \"\" {\n\t\tfmt.Println(\"[+] creating table\")\n\t\t_, err = db.Exec(createSql)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[!] error creating table:\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t} else if tblSql != createSql {\n\t\tfmt.Println(\"[+] schema out of sync\")\n\t\t_, err = db.Exec(`drop table entries`)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[!] error dropping table:\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t\t_, err = db.Exec(createSql)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[!] error creating table:\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n\nfunc respTable() {\n\tconst createSql = `CREATE TABLE response_times (node text, timestamp integer, microsec integer)`\n\tdb, err := sql.Open(\"sqlite3\", dbFile)\n\tif err != nil {\n\t\tfmt.Println(\"[!] failed to open DB file:\", err.Error())\n\t\tos.Exit(1)\n\t}\n\tdefer db.Close()\n\n\trows, err := db.Query(`select sql from sqlite_master\n where type='table' and name='response_times'`)\n\tif err != nil {\n\t\tfmt.Println(\"[!] error reading database:\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tvar tblSql string\n\tfor rows.Next() {\n\t\terr = rows.Scan(&tblSql)\n\t\tbreak\n\t}\n\n\tif err != nil {\n\t\tfmt.Println(\"[!] error reading database:\", err.Error())\n\t\tos.Exit(1)\n\t} else if tblSql == \"\" {\n\t\tfmt.Println(\"[+] creating table\")\n\t\t_, err = db.Exec(createSql)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[!] error creating table:\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t} else if tblSql != createSql {\n\t\tfmt.Println(\"[+] schema out of sync\")\n\t\t_, err = db.Exec(`drop table response_times`)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[!] error dropping table:\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t\t_, err = db.Exec(createSql)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[!] error creating table:\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n<commit_msg>Fix 'database is locked' error.<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"database\/sql\"\n\t\"flag\"\n\t\"fmt\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\ntype logEntry struct {\n\tNode string\n\tTime int64\n\tMsg string\n}\n\nvar (\n\ttsFormat = \"2006\/01\/02 15:04:05\"\n\taddr string\n\tdbFile string\n\tlogChan chan *logEntry\n)\n\nvar logSplit = regexp.MustCompile(\"^([\\\\w-:]+) (\\\\d{4}\/\\\\d{2}\/\\\\d{2} \\\\d{2}:\\\\d{2}:\\\\d{2}) (.+)$\")\nvar responseCheck = regexp.MustCompile(\"response time: (\\\\d+)us$\")\n\nfunc init() {\n\tflLogBuffer := flag.Uint(\"b\", 16, \"log entries to buffer\")\n\tflDbFile := flag.String(\"f\", \"logs.db\", \"database file\")\n\tport := flag.Uint(\"p\", 5988, \"port to listen on\")\n\tflag.Parse()\n\n\taddr = fmt.Sprintf(\":%d\", *port)\n\tdbFile = *flDbFile\n\tlogChan = make(chan *logEntry, *flLogBuffer)\n}\n\nfunc main() {\n\tdbSetup()\n\tgo log()\n\tgo listen()\n\n\tsigc := make(chan os.Signal, 1)\n\tsignal.Notify(sigc, os.Kill, os.Interrupt, syscall.SIGTERM)\n\t<-sigc\n\tclose(logChan)\n\t<-time.After(100 * time.Millisecond)\n\n\tos.Exit(1)\n}\n\nfunc listen() {\n\ttcpAddr, err := net.ResolveTCPAddr(\"tcp\", addr)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"[!] failed to resolve TCP address:\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tlistener, err := net.ListenTCP(\"tcp\", tcpAddr)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"[!] failed to set up TCP listener:\", err.Error())\n\t}\n\tfor {\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[!] TCP error:\", err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tgo processMessage(conn)\n\t}\n\n}\n\nfunc processMessage(conn net.Conn) {\n\tfmt.Println(\"[+] client connected:\", conn.RemoteAddr())\n\tdefer conn.Close()\n\tr := bufio.NewReader(conn)\n\n\tfor {\n\t\tmsg, err := r.ReadString(0x0a)\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tfmt.Println(\"[!] error reading from client:\",\n\t\t\t\t\terr.Error())\n\t\t\t}\n\t\t\treturn\n\t\t} else if msg == \"\" {\n\t\t\treturn\n\t\t}\n\t\tmsg = strings.Trim(string(msg), \"\\n \\t\")\n\t\tfmt.Println(\"-- \", msg)\n\n\t\tnodeID := logSplit.ReplaceAllString(msg, \"$1\")\n\t\tdateString := logSplit.ReplaceAllString(msg, \"$2\")\n\t\tlogMsg := logSplit.ReplaceAllString(msg, \"$3\")\n\t\ttm, err := time.Parse(tsFormat, dateString)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"[!] error parsing time %s: %s\\n\",\n\t\t\t\tdateString, err.Error())\n\t\t\treturn\n\t\t}\n\t\tle := &logEntry{nodeID, tm.UTC().Unix(), logMsg}\n\t\tlogChan <- le\n\t}\n}\n\nfunc log() {\n\tdb, err := sql.Open(\"sqlite3\", dbFile)\n\tif err != nil {\n\t\tfmt.Println(\"[!] failed to open DB file:\", err.Error())\n\t\tos.Exit(1)\n\t}\n\tdefer db.Close()\n\n\tfor {\n\t\tle, ok := <-logChan\n\t\tif !ok {\n\t\t\tfmt.Println(\"[+] shutting down database listener\")\n\t\t\treturn\n\t\t}\n\t\t_, err := db.Exec(\"insert into entries values (?, ?, ?)\",\n\t\t\tle.Node, le.Time, le.Msg)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[!] database error:\", err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tif responseCheck.MatchString(le.Msg) {\n\t\t\trespString := responseCheck.ReplaceAllString(le.Msg, \"$1\")\n\t\t\trTime, err := strconv.Atoi(respString)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"[!] error reading response time:\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t\t_, err = db.Exec(\"insert into response_times values (?, ?, ?)\",\n\t\t\t\tle.Node, le.Time, rTime)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"[!] error writing to database:\", err.Error())\n\t\t\t}\n\t\t}\n\n\t}\n}\n\nfunc logResponseTime(db *sql.DB, le *logEntry) {\n}\n\nfunc dbSetup() {\n\tentryTable()\n\trespTable()\n}\n\nfunc entryTable() {\n\tconst createSql = `CREATE TABLE entries (node text, timestamp integer, message string)`\n\tdb, err := sql.Open(\"sqlite3\", dbFile)\n\tif err != nil {\n\t\tfmt.Println(\"[!] failed to open DB file:\", err.Error())\n\t\tos.Exit(1)\n\t}\n\tdefer db.Close()\n\n\trows, err := db.Query(`select sql from sqlite_master\n where type='table' and name='entries'`)\n\tif err != nil {\n\t\tfmt.Println(\"[!] error reading database:\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tvar tblSql string\n\tfor rows.Next() {\n\t\terr = rows.Scan(&tblSql)\n\t\tbreak\n\t}\n\trows.Close()\n\n\tif err != nil {\n\t\tfmt.Println(\"[!] error reading database:\", err.Error())\n\t\tos.Exit(1)\n\t} else if tblSql == \"\" {\n\t\tfmt.Println(\"[+] creating table\")\n\t\t_, err = db.Exec(createSql)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[!] error creating table:\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t} else if tblSql != createSql {\n\t\tfmt.Println(\"[+] schema out of sync\")\n\t\t_, err = db.Exec(`drop table entries`)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[!] error dropping table:\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t\t_, err = db.Exec(createSql)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[!] error creating table:\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n\nfunc respTable() {\n\tconst createSql = `CREATE TABLE response_times (node text, timestamp integer, microsec integer)`\n\tdb, err := sql.Open(\"sqlite3\", dbFile)\n\tif err != nil {\n\t\tfmt.Println(\"[!] failed to open DB file:\", err.Error())\n\t\tos.Exit(1)\n\t}\n\tdefer db.Close()\n\n\trows, err := db.Query(`select sql from sqlite_master\n where type='table' and name='response_times'`)\n\tif err != nil {\n\t\tfmt.Println(\"[!] error reading database:\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tvar tblSql string\n\tfor rows.Next() {\n\t\terr = rows.Scan(&tblSql)\n\t\tbreak\n\t}\n\trows.Close()\n\n\tif err != nil {\n\t\tfmt.Println(\"[!] error reading database:\", err.Error())\n\t\tos.Exit(1)\n\t} else if tblSql == \"\" {\n\t\tfmt.Println(\"[+] creating table\")\n\t\t_, err = db.Exec(createSql)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[!] error creating table:\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t} else if tblSql != createSql {\n\t\tfmt.Println(\"[+] schema out of sync\")\n\t\t_, err = db.Exec(`drop table response_times`)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[!] error dropping table:\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t\t_, err = db.Exec(createSql)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[!] error creating table:\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n)\n\nfunc main() {\n\tsound := RandomSound()\n\tfmt.Println(sound)\n}\n\nfunc RandomSound() Sound {\n\tsound := Sound{}\n\tsound.randomiseSound()\n\tfor !sound.IsValid() {\n\t\tsound.randomiseSound()\n\t}\n\treturn sound\n}\n\nfunc (sound *Sound) randomiseSound() {\n\tsound.Point = ArticulationPoint(rand.Intn(int(ArticulationPointCount)))\n\tsound.Manner = ArticulationManner(rand.Intn(int(ArticulationMannerCount)))\n\tsound.Shape = TongueShape(rand.Intn(int(TongueShapeCount)))\n\tsound.Voice = Voice(rand.Intn(int(VoiceCount)))\n\tsound.Rounded = rand.Intn(2) == 0\n\tsound.Nasal = rand.Intn(2) == 0\n}\n\ntype Sound struct {\n\tPoint ArticulationPoint\n\tManner ArticulationManner\n\tShape TongueShape\n\tRounded bool\n\tNasal bool\n\tVoice Voice\n}\n\ntype ArticulationPoint int\n\nconst (\n\tLabialLabial ArticulationPoint = iota\n\tLabialDental\n\tCoronalLabial\n\tCoronalDental\n\tCoronalAlveolar\n\tCoronalPostAlveolar\n\tCoronalRetroflex\n\tDorsalPostAlveolar\n\tDorsalPalatal\n\tDorsalPalVel\n\tDorsalVelar\n\tDorsalVelUlu\n\tDorsalUvular\n\tRadicalPharyngeal\n\tRadicalEpiglottal\n\tGlottal\n\tArticulationPointCount\n)\n\ntype ArticulationManner int\n\nconst (\n\tClosed ArticulationManner = iota\n\tStop\n\tFlap\n\tTrill\n\tFricative\n\tApproximant\n\tClose\n\tNearClose\n\tCloseMid\n\tMid\n\tOpenMid\n\tNearOpen\n\tOpen\n\tArticulationMannerCount\n)\n\ntype TongueShape int\n\nconst (\n\tCentral TongueShape = iota\n\tLateral\n\tSibilant\n\tTongueShapeCount\n)\n\ntype Voice int\n\nconst (\n\tAspirated Voice = iota\n\tVoiceless\n\tBreathy\n\tModal\n\tCreaky\n\tVoiceCount\n)\n\nfunc (sound Sound) IsValid() bool {\n\tif sound.Rounded {\n\t\tswitch sound.Point {\n\t\tcase LabialLabial, LabialDental, CoronalLabial:\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif sound.Voice == Voiceless {\n\t\t\/\/this isn't really invalid, just very unusual\n\t\tswitch sound.Manner {\n\t\tcase Close, NearClose, CloseMid, Mid, OpenMid, NearOpen, Open:\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif sound.Voice == Aspirated {\n\t\tswitch sound.Manner {\n\t\tcase Stop:\n\t\t\tbreak\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif sound.Manner == Closed && !sound.Nasal {\n\t\treturn false\n\t}\n\n\tswitch sound.Point {\n\tcase LabialLabial:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase LabialDental:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase CoronalLabial:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase CoronalDental:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase CoronalAlveolar:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase CoronalPostAlveolar:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase CoronalRetroflex:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase DorsalPostAlveolar:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase DorsalPalatal:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Fricative, Approximant, Close, NearClose, CloseMid, Mid, OpenMid, NearOpen, Open:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase DorsalPalVel:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Fricative, Approximant, Close, NearClose, CloseMid, Mid, OpenMid, NearOpen, Open:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase DorsalVelar:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Fricative, Approximant, Close, NearClose, CloseMid, Mid, OpenMid, NearOpen, Open:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase DorsalVelUlu:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Fricative, Approximant, Close, NearClose, CloseMid, Mid, OpenMid, NearOpen, Open:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase DorsalUvular:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant, Close, NearClose, CloseMid, Mid, OpenMid, NearOpen, Open:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase RadicalPharyngeal:\n\t\tswitch sound.Manner {\n\t\tcase Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase RadicalEpiglottal:\n\t\tswitch sound.Manner {\n\t\tcase Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase Glottal:\n\t\tswitch sound.Manner {\n\t\tcase Stop, Fricative:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tdefault:\n\t\tpanic(\"Weird\")\n\t}\n}\n<commit_msg>Implemented Standardise and encode\/decode<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n)\n\nfunc main() {\n\tsound := RandomSound()\n\tfmt.Println(sound)\n}\n\nfunc RandomSound() Sound {\n\tsound := Sound{}\n\tsound.randomiseSound()\n\tfor !sound.IsValid() {\n\t\tsound.randomiseSound()\n\t}\n\tsound.Standardise()\n\treturn sound\n}\n\nfunc (sound *Sound) randomiseSound() {\n\tsound.Point = ArticulationPoint(rand.Intn(int(ArticulationPointCount)))\n\tsound.Manner = ArticulationManner(rand.Intn(int(ArticulationMannerCount)))\n\tsound.Shape = TongueShape(rand.Intn(int(TongueShapeCount)))\n\tsound.Voice = Voice(rand.Intn(int(VoiceCount)))\n\tsound.Rounded = rand.Intn(2) == 0\n\tsound.Nasal = rand.Intn(2) == 0\n}\n\ntype Sound struct {\n\tPoint ArticulationPoint\n\tManner ArticulationManner\n\tShape TongueShape\n\tRounded bool\n\tNasal bool\n\tVoice Voice\n}\n\ntype ArticulationPoint int\n\nconst (\n\tLabialLabial ArticulationPoint = iota\n\tLabialDental\n\tCoronalLabial\n\tCoronalDental\n\tCoronalAlveolar\n\tCoronalPostAlveolar\n\tCoronalRetroflex\n\tDorsalPostAlveolar\n\tDorsalPalatal\n\tDorsalPalVel\n\tDorsalVelar\n\tDorsalVelUlu\n\tDorsalUvular\n\tRadicalPharyngeal\n\tRadicalEpiglottal\n\tGlottal\n\tArticulationPointCount\n)\n\ntype ArticulationManner int\n\nconst (\n\tClosed ArticulationManner = iota\n\tStop\n\tFlap\n\tTrill\n\tFricative\n\tApproximant\n\tClose\n\tNearClose\n\tCloseMid\n\tMid\n\tOpenMid\n\tNearOpen\n\tOpen\n\tArticulationMannerCount\n)\n\ntype TongueShape int\n\nconst (\n\tCentral TongueShape = iota\n\tLateral\n\tSibilant\n\tTongueShapeCount\n)\n\ntype Voice int\n\nconst (\n\tAspirated Voice = iota\n\tVoiceless\n\tBreathy\n\tModal\n\tCreaky\n\tVoiceCount\n)\n\nfunc (sound *Sound) IsValid() bool {\n\tif sound.Rounded {\n\t\tswitch sound.Point {\n\t\tcase LabialLabial, LabialDental, CoronalLabial:\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif sound.Voice == Voiceless {\n\t\t\/\/this isn't really invalid, just very unusual\n\t\tswitch sound.Manner {\n\t\tcase Close, NearClose, CloseMid, Mid, OpenMid, NearOpen, Open:\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif sound.Voice == Aspirated {\n\t\tswitch sound.Manner {\n\t\tcase Stop:\n\t\t\tbreak\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif sound.Manner == Closed && !sound.Nasal {\n\t\treturn false\n\t}\n\n\tswitch sound.Point {\n\tcase LabialLabial:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase LabialDental:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase CoronalLabial:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase CoronalDental:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase CoronalAlveolar:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase CoronalPostAlveolar:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase CoronalRetroflex:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase DorsalPostAlveolar:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase DorsalPalatal:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Fricative, Approximant, Close, NearClose, CloseMid, Mid, OpenMid, NearOpen, Open:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase DorsalPalVel:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Fricative, Approximant, Close, NearClose, CloseMid, Mid, OpenMid, NearOpen, Open:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase DorsalVelar:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Fricative, Approximant, Close, NearClose, CloseMid, Mid, OpenMid, NearOpen, Open:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase DorsalVelUlu:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Fricative, Approximant, Close, NearClose, CloseMid, Mid, OpenMid, NearOpen, Open:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase DorsalUvular:\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant, Close, NearClose, CloseMid, Mid, OpenMid, NearOpen, Open:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase RadicalPharyngeal:\n\t\tswitch sound.Manner {\n\t\tcase Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase RadicalEpiglottal:\n\t\tswitch sound.Manner {\n\t\tcase Stop, Flap, Trill, Fricative, Approximant:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase Glottal:\n\t\tswitch sound.Manner {\n\t\tcase Stop, Fricative:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tdefault:\n\t\tpanic(\"Weird\")\n\t}\n}\n\nfunc (sound *Sound) Standardise() {\n\tif sound.Shape == Sibilant {\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Close, NearClose, CloseMid, Mid, OpenMid, NearOpen, Open:\n\t\t\tsound.Shape = Central\n\t\t}\n\t\tswitch sound.Point {\n\t\tcase LabialLabial, LabialDental, CoronalLabial, DorsalPalatal, DorsalPalVel, DorsalVelar, DorsalVelUlu, DorsalUvular, RadicalPharyngeal, RadicalEpiglottal, Glottal:\n\t\t\tsound.Shape = Central\n\t\t}\n\t}\n\n\tif sound.Shape == Lateral {\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Trill, Close, NearClose, CloseMid, Mid, OpenMid, NearOpen, Open:\n\t\t\tsound.Shape = Central\n\t\t}\n\t\tswitch sound.Point {\n\t\tcase LabialLabial, LabialDental, RadicalPharyngeal, RadicalEpiglottal, Glottal:\n\t\t\tsound.Shape = Central\n\t\t}\n\t}\n\n\tif sound.Point == DorsalPalVel {\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant:\n\t\t\tsound.Point = DorsalPalatal\n\t\t}\n\t}\n\n\tif sound.Point == DorsalVelUlu {\n\t\tswitch sound.Manner {\n\t\tcase Closed, Stop, Flap, Trill, Fricative, Approximant:\n\t\t\tsound.Point = DorsalUvular\n\t\t}\n\t}\n}\n\nfunc (sound *Sound) Encode() int {\n\tenc := int(sound.Point)\n\tenc += 16 * int(sound.Manner)\n\tenc += 16 * 13 * int(sound.Shape)\n\tenc += 16 * 13 * 3 * int(sound.Voice)\n\tvalue := 0\n\tif sound.Rounded {\n\t\tvalue = 1\n\t}\n\tenc += 16 * 13 * 3 * 5 * value\n\tvalue = 0\n\tif sound.Nasal {\n\t\tvalue = 1\n\t}\n\tenc += 16 * 13 * 3 * 5 * 2 * value\n\treturn enc\n}\n\nfunc Decode(enc int) Sound {\n\tsound := new(Sound)\n\tres := enc \/ (16 * 13 * 3 * 5 * 2)\n\tsound.Nasal = res == 1\n\tenc -= res * (16 * 13 * 3 * 5 * 2)\n\tres = enc \/ (16 * 13 * 3 * 5)\n\tsound.Rounded = res == 1\n\tenc -= res * (16 * 13 * 3 * 5)\n\tres = enc \/ (16 * 13 * 3)\n\tsound.Voice = Voice(res)\n\tenc -= res * (16 * 13 * 3)\n\tres = enc \/ (16 * 13)\n\tsound.Shape = TongueShape(res)\n\tenc -= res * (16 * 13)\n\tres = enc \/ 16\n\tsound.Manner = ArticulationManner(res)\n\tenc -= res * 16\n\tsound.Point = ArticulationPoint(enc)\n\treturn *sound\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/xiaonanln\/goworld\"\n\t\"github.com\/xiaonanln\/goworld\/engine\/common\"\n\t\"github.com\/xiaonanln\/goworld\/engine\/consts\"\n\t\"github.com\/xiaonanln\/goworld\/engine\/entity\"\n\t\"github.com\/xiaonanln\/goworld\/engine\/gwlog\"\n)\n\n\/\/ Player 对象代表一名玩家\ntype Player struct {\n\tentity.Entity\n}\n\nfunc (a *Player) DescribeEntityType(desc *entity.EntityTypeDesc) {\n\tdesc.SetPersistent(true).SetUseAOI(true)\n\tdesc.DefineAttr(\"name\", \"AllClients\", \"Persistent\")\n\tdesc.DefineAttr(\"lv\", \"AllClients\", \"Persistent\")\n\tdesc.DefineAttr(\"hp\", \"AllClients\")\n\tdesc.DefineAttr(\"hpmax\", \"AllClients\")\n\tdesc.DefineAttr(\"action\", \"AllClients\")\n\tdesc.DefineAttr(\"spaceKind\", \"Persistent\")\n}\n\n\/\/ OnCreated 在Player对象创建后被调用\nfunc (a *Player) OnCreated() {\n\ta.Entity.OnCreated()\n\ta.setDefaultAttrs()\n}\n\n\/\/ setDefaultAttrs 设置玩家的一些默认属性\nfunc (a *Player) setDefaultAttrs() {\n\ta.Attrs.SetDefaultInt(\"spaceKind\", 1)\n\ta.Attrs.SetDefaultStr(\"name\", \"noname\")\n\ta.Attrs.SetDefaultInt(\"lv\", 1)\n\ta.Attrs.SetDefaultInt(\"hp\", 100)\n\ta.Attrs.SetDefaultInt(\"hpmax\", 100)\n\ta.Attrs.SetDefaultStr(\"action\", \"idle\")\n\n\ta.SetClientSyncing(true)\n}\n\n\/\/ GetSpaceID 获得玩家的场景ID并发给调用者\nfunc (a *Player) GetSpaceID(callerID common.EntityID) {\n\ta.Call(callerID, \"OnGetPlayerSpaceID\", a.ID, a.Space.ID)\n}\n\nfunc (p *Player) enterSpace(spaceKind int) {\n\tif p.Space.Kind == spaceKind {\n\t\treturn\n\t}\n\tif consts.DEBUG_SPACES {\n\t\tgwlog.Infof(\"%s enter space from %d => %d\", p, p.Space.Kind, spaceKind)\n\t}\n\tgoworld.CallService(\"SpaceService\", \"EnterSpace\", p.ID, spaceKind)\n}\n\n\/\/ OnClientConnected is called when client is connected\nfunc (a *Player) OnClientConnected() {\n\tgwlog.Infof(\"%s client connected\", a)\n\ta.enterSpace(int(a.GetInt(\"spaceKind\")))\n}\n\n\/\/ OnClientDisconnected is called when client is lost\nfunc (a *Player) OnClientDisconnected() {\n\tgwlog.Infof(\"%s client disconnected\", a)\n\ta.Destroy()\n}\n\n\/\/ EnterSpace_Client is enter space RPC for client\nfunc (a *Player) EnterSpace_Client(kind int) {\n\ta.enterSpace(kind)\n}\n\n\/\/ DoEnterSpace is called by SpaceService to notify avatar entering specified space\nfunc (a *Player) DoEnterSpace(kind int, spaceID common.EntityID) {\n\t\/\/ let the avatar enter space with spaceID\n\ta.EnterSpace(spaceID, entity.Vector3{})\n}\n\n\/\/func (a *Player) randomPosition() entity.Vector3 {\n\/\/\tminCoord, maxCoord := -400, 400\n\/\/\treturn entity.Vector3{\n\/\/\t\tX: entity.Coord(minCoord + rand.Intn(maxCoord-minCoord)),\n\/\/\t\tY: 0,\n\/\/\t\tZ: entity.Coord(minCoord + rand.Intn(maxCoord-minCoord)),\n\/\/\t}\n\/\/}\n\n\/\/ OnEnterSpace is called when avatar enters a space\nfunc (a *Player) OnEnterSpace() {\n\tgwlog.Infof(\"%s ENTER SPACE %s\", a, a.Space)\n\ta.SetClientSyncing(true)\n}\n\nfunc (a *Player) SetAction_Client(action string) {\n\tif a.GetInt(\"hp\") <= 0 { \/\/ dead already\n\t\treturn\n\t}\n\n\ta.Attrs.SetStr(\"action\", action)\n}\n\nfunc (a *Player) ShootMiss_Client() {\n\ta.CallAllClients(\"Shoot\")\n}\n\nfunc (a *Player) ShootHit_Client(victimID common.EntityID) {\n\ta.CallAllClients(\"Shoot\")\n\tvictim := a.Space.GetEntity(victimID)\n\tif victim == nil {\n\t\tgwlog.Warnf(\"Shoot %s, but monster not found\", victimID)\n\t\treturn\n\t}\n\n\tif victim.Attrs.GetInt(\"hp\") <= 0 {\n\t\treturn\n\t}\n\n\tmonster := victim.I.(*Monster)\n\tmonster.TakeDamage(10)\n}\n\nfunc (player *Player) TakeDamage(damage int64) {\n\thp := player.GetInt(\"hp\")\n\tif hp <= 0 {\n\t\treturn\n\t}\n\n\thp = hp - damage\n\tif hp < 0 {\n\t\thp = 0\n\t}\n\n\tplayer.Attrs.SetInt(\"hp\", hp)\n\n\tif hp <= 0 {\n\t\t\/\/ now player dead ...\n\t\tplayer.Attrs.SetStr(\"action\", \"death\")\n\t\tplayer.SetClientSyncing(false)\n\t}\n}\n<commit_msg>tune goworld<commit_after>package main\n\nimport (\n\t\"github.com\/xiaonanln\/goworld\"\n\t\"github.com\/xiaonanln\/goworld\/engine\/common\"\n\t\"github.com\/xiaonanln\/goworld\/engine\/consts\"\n\t\"github.com\/xiaonanln\/goworld\/engine\/entity\"\n\t\"github.com\/xiaonanln\/goworld\/engine\/gwlog\"\n)\n\n\/\/ Player 对象代表一名玩家\ntype Player struct {\n\tentity.Entity\n}\n\nfunc (a *Player) DescribeEntityType(desc *entity.EntityTypeDesc) {\n\tdesc.SetPersistent(true).SetUseAOI(true)\n\tdesc.DefineAttr(\"name\", \"AllClients\", \"Persistent\")\n\tdesc.DefineAttr(\"lv\", \"AllClients\", \"Persistent\")\n\tdesc.DefineAttr(\"hp\", \"AllClients\")\n\tdesc.DefineAttr(\"hpmax\", \"AllClients\")\n\tdesc.DefineAttr(\"action\", \"AllClients\")\n\tdesc.DefineAttr(\"spaceKind\", \"Persistent\")\n}\n\n\/\/ OnCreated 在Player对象创建后被调用\nfunc (a *Player) OnCreated() {\n\ta.Entity.OnCreated()\n\ta.setDefaultAttrs()\n}\n\n\/\/ setDefaultAttrs 设置玩家的一些默认属性\nfunc (a *Player) setDefaultAttrs() {\n\ta.Attrs.SetDefaultInt(\"spaceKind\", 1)\n\ta.Attrs.SetDefaultStr(\"name\", \"noname\")\n\ta.Attrs.SetDefaultInt(\"lv\", 1)\n\ta.Attrs.SetDefaultInt(\"hp\", 100)\n\ta.Attrs.SetDefaultInt(\"hpmax\", 100)\n\ta.Attrs.SetDefaultStr(\"action\", \"idle\")\n\n\ta.SetClientSyncing(true)\n}\n\n\/\/ GetSpaceID 获得玩家的场景ID并发给调用者\nfunc (a *Player) GetSpaceID(callerID common.EntityID) {\n\ta.Call(callerID, \"OnGetPlayerSpaceID\", a.ID, a.Space.ID)\n}\n\nfunc (p *Player) enterSpace(spaceKind int) {\n\tif p.Space.Kind == spaceKind {\n\t\treturn\n\t}\n\tif consts.DEBUG_SPACES {\n\t\tgwlog.Infof(\"%s enter space from %d => %d\", p, p.Space.Kind, spaceKind)\n\t}\n\tgoworld.CallService(\"SpaceService\", \"EnterSpace\", p.ID, spaceKind)\n}\n\n\/\/ OnClientConnected is called when client is connected\nfunc (a *Player) OnClientConnected() {\n\tgwlog.Infof(\"%s client connected\", a)\n\ta.enterSpace(int(a.GetInt(\"spaceKind\")))\n}\n\n\/\/ OnClientDisconnected is called when client is lost\nfunc (a *Player) OnClientDisconnected() {\n\tgwlog.Infof(\"%s client disconnected\", a)\n\ta.Destroy()\n}\n\n\/\/ EnterSpace_Client is enter space RPC for client\nfunc (a *Player) EnterSpace_Client(kind int) {\n\ta.enterSpace(kind)\n}\n\n\/\/ DoEnterSpace is called by SpaceService to notify avatar entering specified space\nfunc (a *Player) DoEnterSpace(kind int, spaceID common.EntityID) {\n\t\/\/ let the avatar enter space with spaceID\n\ta.EnterSpace(spaceID, entity.Vector3{})\n}\n\n\/\/func (a *Player) randomPosition() entity.Vector3 {\n\/\/\tminCoord, maxCoord := -400, 400\n\/\/\treturn entity.Vector3{\n\/\/\t\tX: entity.Coord(minCoord + rand.Intn(maxCoord-minCoord)),\n\/\/\t\tY: 0,\n\/\/\t\tZ: entity.Coord(minCoord + rand.Intn(maxCoord-minCoord)),\n\/\/\t}\n\/\/}\n\n\/\/ OnEnterSpace is called when avatar enters a space\nfunc (a *Player) OnEnterSpace() {\n\tgwlog.Infof(\"%s ENTER SPACE %s\", a, a.Space)\n\ta.SetClientSyncing(true)\n}\n\nfunc (a *Player) SetAction_Client(action string) {\n\tif a.GetInt(\"hp\") <= 0 { \/\/ dead already\n\t\treturn\n\t}\n\n\ta.Attrs.SetStr(\"action\", action)\n}\n\nfunc (a *Player) ShootMiss_Client() {\n\ta.CallAllClients(\"Shoot\")\n}\n\nfunc (a *Player) ShootHit_Client(victimID common.EntityID) {\n\ta.CallAllClients(\"Shoot\")\n\tvictim := a.Space.GetEntity(victimID)\n\tif victim == nil {\n\t\tgwlog.Warnf(\"Shoot %s, but monster not found\", victimID)\n\t\treturn\n\t}\n\n\tif victim.Attrs.GetInt(\"hp\") <= 0 {\n\t\treturn\n\t}\n\n\tmonster := victim.I.(*Monster)\n\tmonster.TakeDamage(50)\n}\n\nfunc (player *Player) TakeDamage(damage int64) {\n\thp := player.GetInt(\"hp\")\n\tif hp <= 0 {\n\t\treturn\n\t}\n\n\thp = hp - damage\n\tif hp < 0 {\n\t\thp = 0\n\t}\n\n\tplayer.Attrs.SetInt(\"hp\", hp)\n\n\tif hp <= 0 {\n\t\t\/\/ now player dead ...\n\t\tplayer.Attrs.SetStr(\"action\", \"death\")\n\t\tplayer.SetClientSyncing(false)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage types\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"path\"\n\n\t\"github.com\/coreos\/container-linux-config-transpiler\/config\/astyaml\"\n\t\"github.com\/coreos\/container-linux-config-transpiler\/internal\/util\"\n\n\tignTypes \"github.com\/coreos\/ignition\/config\/v2_2\/types\"\n\t\"github.com\/coreos\/ignition\/config\/validate\/astnode\"\n\t\"github.com\/coreos\/ignition\/config\/validate\/report\"\n\t\"github.com\/vincent-petithory\/dataurl\"\n)\n\nvar (\n\tDefaultFileMode = 0644\n\tDefaultDirMode = 0755\n\n\tWarningUnsetFileMode = fmt.Errorf(\"mode unspecified for file, defaulting to %#o\", DefaultFileMode)\n\tWarningUnsetDirMode = fmt.Errorf(\"mode unspecified for directory, defaulting to %#o\", DefaultDirMode)\n\n\tErrTooManyFileSources = errors.New(\"only one of the following can be set: local, inline, remote.url\")\n)\n\ntype FileUser struct {\n\tId *int `yaml:\"id\"`\n\tName string `yaml:\"name\"`\n}\n\ntype FileGroup struct {\n\tId *int `yaml:\"id\"`\n\tName string `yaml:\"name\"`\n}\n\ntype File struct {\n\tFilesystem string `yaml:\"filesystem\"`\n\tPath string `yaml:\"path\"`\n\tUser *FileUser `yaml:\"user\"`\n\tGroup *FileGroup `yaml:\"group\"`\n\tMode *int `yaml:\"mode\"`\n\tContents FileContents `yaml:\"contents\"`\n\tOverwrite *bool `yaml:\"overwrite\"`\n\tAppend bool `yaml:\"append\"`\n}\n\ntype FileContents struct {\n\tRemote Remote `yaml:\"remote\"`\n\tInline string `yaml:\"inline\"`\n\tLocal string `yaml:\"local\"`\n}\n\ntype Remote struct {\n\tUrl string `yaml:\"url\"`\n\tCompression string `yaml:\"compression\"`\n\tVerification Verification `yaml:\"verification\"`\n}\n\ntype Directory struct {\n\tFilesystem string `yaml:\"filesystem\"`\n\tPath string `yaml:\"path\"`\n\tUser *FileUser `yaml:\"user\"`\n\tGroup *FileGroup `yaml:\"group\"`\n\tMode *int `yaml:\"mode\"`\n\tOverwrite *bool `yaml:\"overwrite\"`\n}\n\ntype Link struct {\n\tFilesystem string `yaml:\"filesystem\"`\n\tPath string `yaml:\"path\"`\n\tUser *FileUser `yaml:\"user\"`\n\tGroup *FileGroup `yaml:\"group\"`\n\tHard bool `yaml:\"hard\"`\n\tTarget string `yaml:\"target\"`\n\tOverwrite *bool `yaml:\"overwrite\"`\n}\n\nfunc (f File) ValidateMode() report.Report {\n\tif f.Mode == nil {\n\t\treturn report.ReportFromError(WarningUnsetFileMode, report.EntryWarning)\n\t}\n\treturn report.Report{}\n}\n\nfunc (d Directory) ValidateMode() report.Report {\n\tif d.Mode == nil {\n\t\treturn report.ReportFromError(WarningUnsetDirMode, report.EntryWarning)\n\t}\n\treturn report.Report{}\n}\n\nfunc (fc FileContents) Validate() report.Report {\n\ti := 0\n\tif fc.Remote.Url != \"\" {\n\t\ti++\n\t}\n\tif fc.Inline != \"\" {\n\t\ti++\n\t}\n\tif fc.Local != \"\" {\n\t\ti++\n\t}\n\tif i > 1 {\n\t\treturn report.ReportFromError(ErrTooManyFileSources, report.EntryError)\n\t}\n\treturn report.Report{}\n}\n\nfunc init() {\n\tregister(func(in Config, ast astnode.AstNode, out ignTypes.Config, platform string) (ignTypes.Config, report.Report, astnode.AstNode) {\n\t\tr := report.Report{}\n\t\tfiles_node, _ := getNodeChildPath(ast, \"storage\", \"files\")\n\t\tfor i, file := range in.Storage.Files {\n\t\t\tif file.Mode == nil {\n\t\t\t\tfile.Mode = util.IntToPtr(DefaultFileMode)\n\t\t\t}\n\t\t\tfile_node, _ := getNodeChild(files_node, i)\n\t\t\tnewFile := ignTypes.File{\n\t\t\t\tNode: ignTypes.Node{\n\t\t\t\t\tFilesystem: file.Filesystem,\n\t\t\t\t\tPath: file.Path,\n\t\t\t\t\tOverwrite: file.Overwrite,\n\t\t\t\t},\n\t\t\t\tFileEmbedded1: ignTypes.FileEmbedded1{\n\t\t\t\t\tMode: file.Mode,\n\t\t\t\t\tAppend: file.Append,\n\t\t\t\t},\n\t\t\t}\n\t\t\tif file.User != nil {\n\t\t\t\tnewFile.User = &ignTypes.NodeUser{\n\t\t\t\t\tID: file.User.Id,\n\t\t\t\t\tName: file.User.Name,\n\t\t\t\t}\n\t\t\t}\n\t\t\tif file.Group != nil {\n\t\t\t\tnewFile.Group = &ignTypes.NodeGroup{\n\t\t\t\t\tID: file.Group.Id,\n\t\t\t\t\tName: file.Group.Name,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif file.Contents.Inline != \"\" {\n\t\t\t\tnewFile.Contents = ignTypes.FileContents{\n\t\t\t\t\tSource: (&url.URL{\n\t\t\t\t\t\tScheme: \"data\",\n\t\t\t\t\t\tOpaque: \",\" + dataurl.EscapeString(file.Contents.Inline),\n\t\t\t\t\t}).String(),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif file.Contents.Local != \"\" {\n\t\t\t\t\/\/ The provided local file path is relative to the value of the\n\t\t\t\t\/\/ --files-dir flag.\n\t\t\t\tfilesDir := flag.Lookup(\"files-dir\")\n\t\t\t\tif filesDir == nil || filesDir.Value.String() == \"\" {\n\t\t\t\t\terr := errors.New(\"local files require setting the --files-dir flag to the directory that contains the file\")\n\t\t\t\t\tflagReport := report.ReportFromError(err, report.EntryError)\n\t\t\t\t\tif n, err := getNodeChildPath(file_node, \"contents\", \"local\"); err == nil {\n\t\t\t\t\t\tline, col, _ := n.ValueLineCol(nil)\n\t\t\t\t\t\tflagReport.AddPosition(line, col, \"\")\n\t\t\t\t\t}\n\t\t\t\t\tr.Merge(flagReport)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tlocalPath := path.Join(filesDir.Value.String(), file.Contents.Local)\n\t\t\t\tcontents, err := ioutil.ReadFile(localPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\t\/\/ If the file could not be read, record error and continue.\n\t\t\t\t\tconvertReport := report.ReportFromError(err, report.EntryError)\n\t\t\t\t\tif n, err := getNodeChildPath(file_node, \"contents\", \"local\"); err == nil {\n\t\t\t\t\t\tline, col, _ := n.ValueLineCol(nil)\n\t\t\t\t\t\tconvertReport.AddPosition(line, col, \"\")\n\t\t\t\t\t}\n\t\t\t\t\tr.Merge(convertReport)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t\/\/ Include the contents of the local file as if it were provided inline.\n\t\t\t\tnewFile.Contents = ignTypes.FileContents{\n\t\t\t\t\tSource: (&url.URL{\n\t\t\t\t\t\tScheme: \"data\",\n\t\t\t\t\t\tOpaque: \",\" + dataurl.Escape(contents),\n\t\t\t\t\t}).String(),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif file.Contents.Remote.Url != \"\" {\n\t\t\t\tsource, err := url.Parse(file.Contents.Remote.Url)\n\t\t\t\tif err != nil {\n\t\t\t\t\t\/\/ if invalid, record error and continue\n\t\t\t\t\tconvertReport := report.ReportFromError(err, report.EntryError)\n\t\t\t\t\tif n, err := getNodeChildPath(file_node, \"contents\", \"remote\", \"url\"); err == nil {\n\t\t\t\t\t\tline, col, _ := n.ValueLineCol(nil)\n\t\t\t\t\t\tconvertReport.AddPosition(line, col, \"\")\n\t\t\t\t\t}\n\t\t\t\t\tr.Merge(convertReport)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t\/\/ patch the yaml tree to look like the ignition tree by making contents\n\t\t\t\t\/\/ the remote section and changing the name from url -> source\n\t\t\t\tasYamlNode, ok := file_node.(astyaml.YamlNode)\n\t\t\t\tif ok {\n\t\t\t\t\tnewContents, _ := getNodeChildPath(file_node, \"contents\", \"remote\")\n\t\t\t\t\tnewContentsAsYaml := newContents.(astyaml.YamlNode)\n\t\t\t\t\tasYamlNode.ChangeKey(\"contents\", \"contents\", newContentsAsYaml)\n\n\t\t\t\t\turl, _ := getNodeChild(newContents.(astyaml.YamlNode), \"url\")\n\t\t\t\t\tnewContentsAsYaml.ChangeKey(\"url\", \"source\", url.(astyaml.YamlNode))\n\t\t\t\t}\n\n\t\t\t\tnewFile.Contents = ignTypes.FileContents{Source: source.String()}\n\n\t\t\t}\n\n\t\t\tif newFile.Contents == (ignTypes.FileContents{}) {\n\t\t\t\tnewFile.Contents = ignTypes.FileContents{\n\t\t\t\t\tSource: \"data:,\",\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tnewFile.Contents.Compression = file.Contents.Remote.Compression\n\t\t\tnewFile.Contents.Verification = convertVerification(file.Contents.Remote.Verification)\n\n\t\t\tout.Storage.Files = append(out.Storage.Files, newFile)\n\t\t}\n\t\tfor _, dir := range in.Storage.Directories {\n\t\t\tif dir.Mode == nil {\n\t\t\t\tdir.Mode = util.IntToPtr(DefaultDirMode)\n\t\t\t}\n\t\t\tnewDir := ignTypes.Directory{\n\t\t\t\tNode: ignTypes.Node{\n\t\t\t\t\tFilesystem: dir.Filesystem,\n\t\t\t\t\tPath: dir.Path,\n\t\t\t\t\tOverwrite: dir.Overwrite,\n\t\t\t\t},\n\t\t\t\tDirectoryEmbedded1: ignTypes.DirectoryEmbedded1{\n\t\t\t\t\tMode: dir.Mode,\n\t\t\t\t},\n\t\t\t}\n\t\t\tif dir.User != nil {\n\t\t\t\tnewDir.User = &ignTypes.NodeUser{\n\t\t\t\t\tID: dir.User.Id,\n\t\t\t\t\tName: dir.User.Name,\n\t\t\t\t}\n\t\t\t}\n\t\t\tif dir.Group != nil {\n\t\t\t\tnewDir.Group = &ignTypes.NodeGroup{\n\t\t\t\t\tID: dir.Group.Id,\n\t\t\t\t\tName: dir.Group.Name,\n\t\t\t\t}\n\t\t\t}\n\t\t\tout.Storage.Directories = append(out.Storage.Directories, newDir)\n\t\t}\n\t\tfor _, link := range in.Storage.Links {\n\t\t\tnewLink := ignTypes.Link{\n\t\t\t\tNode: ignTypes.Node{\n\t\t\t\t\tFilesystem: link.Filesystem,\n\t\t\t\t\tPath: link.Path,\n\t\t\t\t\tOverwrite: link.Overwrite,\n\t\t\t\t},\n\t\t\t\tLinkEmbedded1: ignTypes.LinkEmbedded1{\n\t\t\t\t\tHard: link.Hard,\n\t\t\t\t\tTarget: link.Target,\n\t\t\t\t},\n\t\t\t}\n\t\t\tif link.User != nil {\n\t\t\t\tnewLink.User = &ignTypes.NodeUser{\n\t\t\t\t\tID: link.User.Id,\n\t\t\t\t\tName: link.User.Name,\n\t\t\t\t}\n\t\t\t}\n\t\t\tif link.Group != nil {\n\t\t\t\tnewLink.Group = &ignTypes.NodeGroup{\n\t\t\t\t\tID: link.Group.Id,\n\t\t\t\t\tName: link.Group.Name,\n\t\t\t\t}\n\t\t\t}\n\t\t\tout.Storage.Links = append(out.Storage.Links, newLink)\n\t\t}\n\t\treturn out, r, ast\n\t})\n}\n<commit_msg>types\/{files\/dirs\/links}: default to root fs<commit_after>\/\/ Copyright 2016 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage types\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"path\"\n\n\t\"github.com\/coreos\/container-linux-config-transpiler\/config\/astyaml\"\n\t\"github.com\/coreos\/container-linux-config-transpiler\/internal\/util\"\n\n\tignTypes \"github.com\/coreos\/ignition\/config\/v2_2\/types\"\n\t\"github.com\/coreos\/ignition\/config\/validate\/astnode\"\n\t\"github.com\/coreos\/ignition\/config\/validate\/report\"\n\t\"github.com\/vincent-petithory\/dataurl\"\n)\n\nvar (\n\tDefaultFileMode = 0644\n\tDefaultDirMode = 0755\n\n\tWarningUnsetFileMode = fmt.Errorf(\"mode unspecified for file, defaulting to %#o\", DefaultFileMode)\n\tWarningUnsetDirMode = fmt.Errorf(\"mode unspecified for directory, defaulting to %#o\", DefaultDirMode)\n\n\tErrTooManyFileSources = errors.New(\"only one of the following can be set: local, inline, remote.url\")\n)\n\ntype FileUser struct {\n\tId *int `yaml:\"id\"`\n\tName string `yaml:\"name\"`\n}\n\ntype FileGroup struct {\n\tId *int `yaml:\"id\"`\n\tName string `yaml:\"name\"`\n}\n\ntype File struct {\n\tFilesystem string `yaml:\"filesystem\"`\n\tPath string `yaml:\"path\"`\n\tUser *FileUser `yaml:\"user\"`\n\tGroup *FileGroup `yaml:\"group\"`\n\tMode *int `yaml:\"mode\"`\n\tContents FileContents `yaml:\"contents\"`\n\tOverwrite *bool `yaml:\"overwrite\"`\n\tAppend bool `yaml:\"append\"`\n}\n\ntype FileContents struct {\n\tRemote Remote `yaml:\"remote\"`\n\tInline string `yaml:\"inline\"`\n\tLocal string `yaml:\"local\"`\n}\n\ntype Remote struct {\n\tUrl string `yaml:\"url\"`\n\tCompression string `yaml:\"compression\"`\n\tVerification Verification `yaml:\"verification\"`\n}\n\ntype Directory struct {\n\tFilesystem string `yaml:\"filesystem\"`\n\tPath string `yaml:\"path\"`\n\tUser *FileUser `yaml:\"user\"`\n\tGroup *FileGroup `yaml:\"group\"`\n\tMode *int `yaml:\"mode\"`\n\tOverwrite *bool `yaml:\"overwrite\"`\n}\n\ntype Link struct {\n\tFilesystem string `yaml:\"filesystem\"`\n\tPath string `yaml:\"path\"`\n\tUser *FileUser `yaml:\"user\"`\n\tGroup *FileGroup `yaml:\"group\"`\n\tHard bool `yaml:\"hard\"`\n\tTarget string `yaml:\"target\"`\n\tOverwrite *bool `yaml:\"overwrite\"`\n}\n\nfunc (f File) ValidateMode() report.Report {\n\tif f.Mode == nil {\n\t\treturn report.ReportFromError(WarningUnsetFileMode, report.EntryWarning)\n\t}\n\treturn report.Report{}\n}\n\nfunc (d Directory) ValidateMode() report.Report {\n\tif d.Mode == nil {\n\t\treturn report.ReportFromError(WarningUnsetDirMode, report.EntryWarning)\n\t}\n\treturn report.Report{}\n}\n\nfunc (fc FileContents) Validate() report.Report {\n\ti := 0\n\tif fc.Remote.Url != \"\" {\n\t\ti++\n\t}\n\tif fc.Inline != \"\" {\n\t\ti++\n\t}\n\tif fc.Local != \"\" {\n\t\ti++\n\t}\n\tif i > 1 {\n\t\treturn report.ReportFromError(ErrTooManyFileSources, report.EntryError)\n\t}\n\treturn report.Report{}\n}\n\nfunc init() {\n\tregister(func(in Config, ast astnode.AstNode, out ignTypes.Config, platform string) (ignTypes.Config, report.Report, astnode.AstNode) {\n\t\tr := report.Report{}\n\t\tfiles_node, _ := getNodeChildPath(ast, \"storage\", \"files\")\n\t\tfor i, file := range in.Storage.Files {\n\t\t\tif file.Mode == nil {\n\t\t\t\tfile.Mode = util.IntToPtr(DefaultFileMode)\n\t\t\t}\n\t\t\tif file.Filesystem == \"\" {\n\t\t\t\tfile.Filesystem = \"root\"\n\t\t\t}\n\t\t\tfile_node, _ := getNodeChild(files_node, i)\n\t\t\tnewFile := ignTypes.File{\n\t\t\t\tNode: ignTypes.Node{\n\t\t\t\t\tFilesystem: file.Filesystem,\n\t\t\t\t\tPath: file.Path,\n\t\t\t\t\tOverwrite: file.Overwrite,\n\t\t\t\t},\n\t\t\t\tFileEmbedded1: ignTypes.FileEmbedded1{\n\t\t\t\t\tMode: file.Mode,\n\t\t\t\t\tAppend: file.Append,\n\t\t\t\t},\n\t\t\t}\n\t\t\tif file.User != nil {\n\t\t\t\tnewFile.User = &ignTypes.NodeUser{\n\t\t\t\t\tID: file.User.Id,\n\t\t\t\t\tName: file.User.Name,\n\t\t\t\t}\n\t\t\t}\n\t\t\tif file.Group != nil {\n\t\t\t\tnewFile.Group = &ignTypes.NodeGroup{\n\t\t\t\t\tID: file.Group.Id,\n\t\t\t\t\tName: file.Group.Name,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif file.Contents.Inline != \"\" {\n\t\t\t\tnewFile.Contents = ignTypes.FileContents{\n\t\t\t\t\tSource: (&url.URL{\n\t\t\t\t\t\tScheme: \"data\",\n\t\t\t\t\t\tOpaque: \",\" + dataurl.EscapeString(file.Contents.Inline),\n\t\t\t\t\t}).String(),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif file.Contents.Local != \"\" {\n\t\t\t\t\/\/ The provided local file path is relative to the value of the\n\t\t\t\t\/\/ --files-dir flag.\n\t\t\t\tfilesDir := flag.Lookup(\"files-dir\")\n\t\t\t\tif filesDir == nil || filesDir.Value.String() == \"\" {\n\t\t\t\t\terr := errors.New(\"local files require setting the --files-dir flag to the directory that contains the file\")\n\t\t\t\t\tflagReport := report.ReportFromError(err, report.EntryError)\n\t\t\t\t\tif n, err := getNodeChildPath(file_node, \"contents\", \"local\"); err == nil {\n\t\t\t\t\t\tline, col, _ := n.ValueLineCol(nil)\n\t\t\t\t\t\tflagReport.AddPosition(line, col, \"\")\n\t\t\t\t\t}\n\t\t\t\t\tr.Merge(flagReport)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tlocalPath := path.Join(filesDir.Value.String(), file.Contents.Local)\n\t\t\t\tcontents, err := ioutil.ReadFile(localPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\t\/\/ If the file could not be read, record error and continue.\n\t\t\t\t\tconvertReport := report.ReportFromError(err, report.EntryError)\n\t\t\t\t\tif n, err := getNodeChildPath(file_node, \"contents\", \"local\"); err == nil {\n\t\t\t\t\t\tline, col, _ := n.ValueLineCol(nil)\n\t\t\t\t\t\tconvertReport.AddPosition(line, col, \"\")\n\t\t\t\t\t}\n\t\t\t\t\tr.Merge(convertReport)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t\/\/ Include the contents of the local file as if it were provided inline.\n\t\t\t\tnewFile.Contents = ignTypes.FileContents{\n\t\t\t\t\tSource: (&url.URL{\n\t\t\t\t\t\tScheme: \"data\",\n\t\t\t\t\t\tOpaque: \",\" + dataurl.Escape(contents),\n\t\t\t\t\t}).String(),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif file.Contents.Remote.Url != \"\" {\n\t\t\t\tsource, err := url.Parse(file.Contents.Remote.Url)\n\t\t\t\tif err != nil {\n\t\t\t\t\t\/\/ if invalid, record error and continue\n\t\t\t\t\tconvertReport := report.ReportFromError(err, report.EntryError)\n\t\t\t\t\tif n, err := getNodeChildPath(file_node, \"contents\", \"remote\", \"url\"); err == nil {\n\t\t\t\t\t\tline, col, _ := n.ValueLineCol(nil)\n\t\t\t\t\t\tconvertReport.AddPosition(line, col, \"\")\n\t\t\t\t\t}\n\t\t\t\t\tr.Merge(convertReport)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t\/\/ patch the yaml tree to look like the ignition tree by making contents\n\t\t\t\t\/\/ the remote section and changing the name from url -> source\n\t\t\t\tasYamlNode, ok := file_node.(astyaml.YamlNode)\n\t\t\t\tif ok {\n\t\t\t\t\tnewContents, _ := getNodeChildPath(file_node, \"contents\", \"remote\")\n\t\t\t\t\tnewContentsAsYaml := newContents.(astyaml.YamlNode)\n\t\t\t\t\tasYamlNode.ChangeKey(\"contents\", \"contents\", newContentsAsYaml)\n\n\t\t\t\t\turl, _ := getNodeChild(newContents.(astyaml.YamlNode), \"url\")\n\t\t\t\t\tnewContentsAsYaml.ChangeKey(\"url\", \"source\", url.(astyaml.YamlNode))\n\t\t\t\t}\n\n\t\t\t\tnewFile.Contents = ignTypes.FileContents{Source: source.String()}\n\n\t\t\t}\n\n\t\t\tif newFile.Contents == (ignTypes.FileContents{}) {\n\t\t\t\tnewFile.Contents = ignTypes.FileContents{\n\t\t\t\t\tSource: \"data:,\",\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tnewFile.Contents.Compression = file.Contents.Remote.Compression\n\t\t\tnewFile.Contents.Verification = convertVerification(file.Contents.Remote.Verification)\n\n\t\t\tout.Storage.Files = append(out.Storage.Files, newFile)\n\t\t}\n\t\tfor _, dir := range in.Storage.Directories {\n\t\t\tif dir.Mode == nil {\n\t\t\t\tdir.Mode = util.IntToPtr(DefaultDirMode)\n\t\t\t}\n\t\t\tif dir.Filesystem == \"\" {\n\t\t\t\tdir.Filesystem = \"root\"\n\t\t\t}\n\t\t\tnewDir := ignTypes.Directory{\n\t\t\t\tNode: ignTypes.Node{\n\t\t\t\t\tFilesystem: dir.Filesystem,\n\t\t\t\t\tPath: dir.Path,\n\t\t\t\t\tOverwrite: dir.Overwrite,\n\t\t\t\t},\n\t\t\t\tDirectoryEmbedded1: ignTypes.DirectoryEmbedded1{\n\t\t\t\t\tMode: dir.Mode,\n\t\t\t\t},\n\t\t\t}\n\t\t\tif dir.User != nil {\n\t\t\t\tnewDir.User = &ignTypes.NodeUser{\n\t\t\t\t\tID: dir.User.Id,\n\t\t\t\t\tName: dir.User.Name,\n\t\t\t\t}\n\t\t\t}\n\t\t\tif dir.Group != nil {\n\t\t\t\tnewDir.Group = &ignTypes.NodeGroup{\n\t\t\t\t\tID: dir.Group.Id,\n\t\t\t\t\tName: dir.Group.Name,\n\t\t\t\t}\n\t\t\t}\n\t\t\tout.Storage.Directories = append(out.Storage.Directories, newDir)\n\t\t}\n\t\tfor _, link := range in.Storage.Links {\n\t\t\tif link.Filesystem == \"\" {\n\t\t\t\tlink.Filesystem = \"root\"\n\t\t\t}\n\t\t\tnewLink := ignTypes.Link{\n\t\t\t\tNode: ignTypes.Node{\n\t\t\t\t\tFilesystem: link.Filesystem,\n\t\t\t\t\tPath: link.Path,\n\t\t\t\t\tOverwrite: link.Overwrite,\n\t\t\t\t},\n\t\t\t\tLinkEmbedded1: ignTypes.LinkEmbedded1{\n\t\t\t\t\tHard: link.Hard,\n\t\t\t\t\tTarget: link.Target,\n\t\t\t\t},\n\t\t\t}\n\t\t\tif link.User != nil {\n\t\t\t\tnewLink.User = &ignTypes.NodeUser{\n\t\t\t\t\tID: link.User.Id,\n\t\t\t\t\tName: link.User.Name,\n\t\t\t\t}\n\t\t\t}\n\t\t\tif link.Group != nil {\n\t\t\t\tnewLink.Group = &ignTypes.NodeGroup{\n\t\t\t\t\tID: link.Group.Id,\n\t\t\t\t\tName: link.Group.Name,\n\t\t\t\t}\n\t\t\t}\n\t\t\tout.Storage.Links = append(out.Storage.Links, newLink)\n\t\t}\n\t\treturn out, r, ast\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package filesystem\n\nimport (\n\t\"github.com\/Symantec\/Dominator\/lib\/hash\"\n\t\"github.com\/Symantec\/Dominator\/lib\/objectcache\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n)\n\nfunc (objSrv *FileSystemObjectServer) getObjectReader(hash hash.Hash) (uint64,\n\tio.Reader, error) {\n\tfilename := path.Join(objSrv.baseDir, objectcache.HashToFilename(hash))\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn 0, nil, err\n\t}\n\tdefer file.Close()\n\tfi, err := file.Stat()\n\tif err != nil {\n\t\treturn 0, nil, err\n\t}\n\treturn uint64(fi.Size()), file, nil\n}\n<commit_msg>Fix bug in objectserverver.filesystem.getObjectReader(): do not close reader.<commit_after>package filesystem\n\nimport (\n\t\"github.com\/Symantec\/Dominator\/lib\/hash\"\n\t\"github.com\/Symantec\/Dominator\/lib\/objectcache\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n)\n\nfunc (objSrv *FileSystemObjectServer) getObjectReader(hash hash.Hash) (uint64,\n\tio.Reader, error) {\n\tfilename := path.Join(objSrv.baseDir, objectcache.HashToFilename(hash))\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn 0, nil, err\n\t}\n\tfi, err := file.Stat()\n\tif err != nil {\n\t\tfile.Close()\n\t\treturn 0, nil, err\n\t}\n\treturn uint64(fi.Size()), file, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package manager\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/samuel\/go-zookeeper\/zk\"\n)\n\ntype Leadership uint8\n\nconst (\n\tZKFlagNone = 0\n\tLeaderElectionPath = \"\/leader-election\"\n\n\tLeadershipUnknown Leadership = 1\n\tLeadershipLeader Leadership = 2\n\tLeadershipFollower Leadership = 3\n)\n\nvar (\n\tZKDefaultACL = zk.WorldACL(zk.PermAll)\n)\n\nfunc connect(addrs []string) (*zk.Conn, error) {\n\tconn, connChan, err := zk.Connect(addrs, 5*time.Second)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"zk connection to [%v] error: %v\", addrs, err)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase connEvent := <-connChan:\n\t\t\tif connEvent.State == zk.StateConnected {\n\t\t\t\tlog.Info(\"connect to zookeeper server success!\")\n\t\t\t\treturn conn, nil\n\t\t\t}\n\t\t\t\/\/ TODO(nmg) should be re-connect.\n\t\t\tif connEvent.State == zk.StateDisconnected {\n\t\t\t\tlog.Info(\"lost connection from zookeeper\")\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\t\/\/ TODO(nmg) currently not work.\n\t\tcase _ = <-time.After(time.Second * 5):\n\t\t\tconn.Close()\n\t\t\treturn nil, nil\n\t\t}\n\t}\n}\n\nfunc (m *Manager) setLeader(path string) {\n\tp := filepath.Join(m.electRootPath, path)\n\t_, err := m.ZKClient.Set(p, []byte(m.cfg.Advertise), -1)\n\tif err != nil {\n\t\tlog.Infof(\"Update leader address error %s\", err.Error())\n\t}\n}\n\nfunc (m *Manager) getLeader(path string) (string, error) {\n\tp := filepath.Join(m.electRootPath, path)\n\tfor {\n\t\tb, _, err := m.ZKClient.Get(p)\n\t\tif err != nil {\n\t\t\tlog.Infof(\"Get leader address error %s\", err.Error())\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif len(b) > 0 {\n\t\t\treturn string(b), nil\n\t\t}\n\n\t\ttime.Sleep(1 * time.Second)\n\t}\n}\n\nfunc (m *Manager) isLeader(path string) (bool, error, string) {\n\tchildren, _, err := m.ZKClient.Children(m.electRootPath)\n\tif err != nil {\n\t\treturn false, err, \"\"\n\t}\n\n\tsort.Strings(children)\n\n\tp := children[0]\n\n\treturn path == p, nil, p\n}\n\nfunc (m *Manager) elect() (string, error) {\n\tleader, err, p := m.isLeader(m.myid)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif leader {\n\t\tlog.Info(\"Electing leader success.\")\n\t\tm.leader = m.cfg.Advertise\n\t\tm.setLeader(p)\n\t\tm.leadershipChangeCh <- LeadershipLeader\n\n\t\treturn p, nil\n\t}\n\n\tlog.Infof(\"Leader manager has been elected.\")\n\n\tl, err := m.getLeader(p)\n\tif err != nil {\n\t\tif err == zk.ErrNoNode {\n\t\t\tlog.Errorf(\"Leader lost again. start new electing...\")\n\t\t\treturn m.elect()\n\t\t}\n\t\tlog.Errorf(\"Detect new leader error %s\", err.Error())\n\t\treturn \"\", err\n\t}\n\tlog.Infof(\"Detect new leader at %s\", l)\n\n\tm.leader = l\n\n\tm.leadershipChangeCh <- LeadershipFollower\n\n\treturn p, nil\n\n}\n\nfunc (m *Manager) electLeader() (string, error) {\n\tp := filepath.Join(m.electRootPath, \"0\")\n\tpath, err := m.ZKClient.Create(p, nil, zk.FlagEphemeral|zk.FlagSequence, ZKDefaultACL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tm.myid = filepath.Base(path)\n\n\treturn m.elect()\n}\n\nfunc (m *Manager) watchLeader(path string) error {\n\tp := filepath.Join(m.electRootPath, path)\n\t_, _, childCh, err := m.ZKClient.ChildrenW(p)\n\tif err != nil {\n\t\tlog.Infof(\"Watch children error %s\", err)\n\t\treturn err\n\t}\n\n\tfor {\n\t\tchildEvent := <-childCh\n\t\tif childEvent.Type == zk.EventNodeDeleted {\n\t\t\tlog.Info(\"Lost leading manager. Start electing new leader...\")\n\t\t\t\/\/ If it is better to run following steps in a seprated goroutine?\n\t\t\t\/\/ (memory leak maybe)\n\t\t\tp, err := m.elect()\n\t\t\tif err != nil {\n\t\t\t\tlog.Infof(\"Electing new leader error %s\", err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tm.watchLeader(p)\n\t\t}\n\t}\n}\n<commit_msg>temporarily fix bug on potential dead loop for zk HA<commit_after>package manager\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/samuel\/go-zookeeper\/zk\"\n)\n\ntype Leadership uint8\n\nconst (\n\tZKFlagNone = 0\n\tLeaderElectionPath = \"\/leader-election\"\n\n\tLeadershipUnknown Leadership = 1\n\tLeadershipLeader Leadership = 2\n\tLeadershipFollower Leadership = 3\n)\n\nvar (\n\tZKDefaultACL = zk.WorldACL(zk.PermAll)\n)\n\nfunc connect(addrs []string) (*zk.Conn, error) {\n\tconn, connChan, err := zk.Connect(addrs, 5*time.Second)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"zk connection to [%v] error: %v\", addrs, err)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase connEvent := <-connChan:\n\t\t\tif connEvent.State == zk.StateConnected {\n\t\t\t\tlog.Info(\"connect to zookeeper server success!\")\n\t\t\t\treturn conn, nil\n\t\t\t}\n\t\t\t\/\/ TODO(nmg) should be re-connect.\n\t\t\tif connEvent.State == zk.StateDisconnected {\n\t\t\t\tlog.Info(\"lost connection from zookeeper\")\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\t\/\/ TODO(nmg) currently not work.\n\t\tcase _ = <-time.After(time.Second * 5):\n\t\t\tconn.Close()\n\t\t\treturn nil, nil\n\t\t}\n\t}\n}\n\nfunc (m *Manager) setLeader(path string) {\n\tp := filepath.Join(m.electRootPath, path)\n\t_, err := m.ZKClient.Set(p, []byte(m.cfg.Advertise), -1)\n\tif err != nil {\n\t\tlog.Infof(\"Update leader address error %s\", err.Error())\n\t}\n}\n\nfunc (m *Manager) getLeader(path string) (string, error) {\n\tp := filepath.Join(m.electRootPath, path)\n\tfor {\n\t\tb, _, err := m.ZKClient.Get(p)\n\t\tif err != nil {\n\t\t\tlog.Infof(\"Get leader address error %s\", err.Error())\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif len(b) > 0 {\n\t\t\treturn string(b), nil\n\t\t}\n\n\t\ttime.Sleep(1 * time.Second)\n\t}\n}\n\nfunc (m *Manager) isLeader(path string) (bool, error, string) {\n\tchildren, _, err := m.ZKClient.Children(m.electRootPath)\n\tif err != nil {\n\t\treturn false, err, \"\"\n\t}\n\n\tsort.Strings(children)\n\n\tp := children[0]\n\n\treturn path == p, nil, p\n}\n\nfunc (m *Manager) elect() (string, error) {\n\tleader, err, p := m.isLeader(m.myid)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif leader {\n\t\tlog.Info(\"Electing leader success.\")\n\t\tm.leader = m.cfg.Advertise\n\t\tm.setLeader(p)\n\t\tm.leadershipChangeCh <- LeadershipLeader\n\n\t\treturn p, nil\n\t}\n\n\tlog.Infof(\"Leader manager has been elected.\")\n\n\tl, err := m.getLeader(p)\n\tif err != nil {\n\t\tif err == zk.ErrNoNode {\n\t\t\tlog.Errorf(\"Leader lost again. start new electing...\")\n\t\t\treturn m.elect()\n\t\t}\n\t\tlog.Errorf(\"Detect new leader error %s\", err.Error())\n\t\treturn \"\", err\n\t}\n\tlog.Infof(\"Detect new leader at %s\", l)\n\n\tm.leader = l\n\n\tm.leadershipChangeCh <- LeadershipFollower\n\n\treturn p, nil\n\n}\n\nfunc (m *Manager) electLeader() (string, error) {\n\tp := filepath.Join(m.electRootPath, \"0\")\n\tpath, err := m.ZKClient.Create(p, nil, zk.FlagEphemeral|zk.FlagSequence, ZKDefaultACL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tm.myid = filepath.Base(path)\n\n\treturn m.elect()\n}\n\nfunc (m *Manager) watchLeader(path string) error {\n\tp := filepath.Join(m.electRootPath, path)\n\t_, _, childCh, err := m.ZKClient.ChildrenW(p)\n\tif err != nil {\n\t\tlog.Infof(\"Watch children error %s\", err)\n\t\treturn err\n\t}\n\n\tfor {\n\t\tchildEvent := <-childCh\n\t\tif childEvent.Type == zk.EventNodeDeleted {\n\t\t\tlog.Info(\"Lost leading manager. Start electing new leader...\")\n\t\t\t\/\/ If it is better to run following steps in a seprated goroutine?\n\t\t\t\/\/ (memory leak maybe)\n\t\t\tp, err := m.elect()\n\t\t\tif err != nil {\n\t\t\t\tlog.Infof(\"Electing new leader error %s\", err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tm.watchLeader(p)\n\t\t}\n\t\ttime.Sleep(time.Millisecond * 100)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main_test\n\nimport (\n\t\"errors\"\n\n\t. \"github.com\/bluemixgaragelondon\/cf-blue-green-deploy\"\n\t\"github.com\/cloudfoundry-incubator\/candiedyaml\"\n\t\"code.cloudfoundry.org\/cli\/cf\/manifest\"\n\t\"code.cloudfoundry.org\/cli\/cf\/i18n\"\n\t\"code.cloudfoundry.org\/cli\/utils\/generic\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\ntype localeGetter struct{}\n\nfunc (l localeGetter) Locale() string {\n\treturn \"en-us\"\n}\n\nvar _ = Describe(\"Manifest reader\", func() {\n\n\t\/\/ testing code that calls into cf cli requires T to point to a translate func\n\ti18n.T = i18n.Init(localeGetter{})\n\n\tContext(\"When the manifest file is present\", func() {\n\t\tContext(\"when the manifest contain a host but no app name\", func() {\n\t\t\trepo := FakeRepo{yaml: `---\n host: foo`,\n\t\t\t}\n\t\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\n\t\t\tIt(\"Returns params that contain the host\", func() {\n\t\t\t\tExpect(manifestAppFinder.AppParams().Hosts).To(ContainElement(\"foo\"))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the manifest contains a different app name\", func() {\n\t\t\trepo := FakeRepo{yaml: `---\n name: bar\n host: foo`,\n\t\t\t}\n\t\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\n\t\t\tIt(\"Returns nil\", func() {\n\t\t\t\tExpect(manifestAppFinder.AppParams()).To(BeNil())\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the manifest contains multiple apps with 1 matching\", func() {\n\t\t\trepo := FakeRepo{yaml: `---\n applications:\n - name: bar\n host: barhost\n - name: foo\n hosts:\n - host1\n - host2\n domains:\n - example1.com\n - example2.com`,\n\t\t\t}\n\t\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\n\t\t\tIt(\"Returns the correct app\", func() {\n\t\t\t\tExpect(*manifestAppFinder.AppParams().Name).To(Equal(\"foo\"))\n\t\t\t\tExpect(manifestAppFinder.AppParams().Hosts).To(ConsistOf(\"host1\", \"host2\"))\n\t\t\t\tExpect(manifestAppFinder.AppParams().Domains).To(ConsistOf(\"example1.com\", \"example2.com\"))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"When no manifest file is present\", func() {\n\t\trepo := FakeRepo{err: errors.New(\"Error finding manifest\")}\n\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\n\t\tIt(\"Returns nil\", func() {\n\t\t\tExpect(manifestAppFinder.AppParams()).To(BeNil())\n\t\t})\n\t})\n\n\tContext(\"When manifest file is empty\", func() {\n\t\trepo := FakeRepo{yaml: ``}\n\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\n\t\tIt(\"Returns nil\", func() {\n\t\t\tExpect(manifestAppFinder.AppParams()).To(BeNil())\n\t\t})\n\t})\n\n\tDescribe(\"Route Lister\", func() {\n\t\tIt(\"returns a list of Routes from the manifest\", func() {\n\t\t\trepo := FakeRepo{yaml: `---\n name: foo\n hosts:\n - host1\n - host2\n domains:\n - example.com\n - example.net`,\n\t\t\t}\n\t\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\n\t\t\troutes := manifestAppFinder.RoutesFromManifest(\"example.com\")\n\n\t\t\tExpect(routes).To(ConsistOf(\n\t\t\t\tRoute{Host: \"host1\", Domain: Domain{Name: \"example.com\"}},\n\t\t\t\tRoute{Host: \"host1\", Domain: Domain{Name: \"example.net\"}},\n\t\t\t\tRoute{Host: \"host2\", Domain: Domain{Name: \"example.com\"}},\n\t\t\t\tRoute{Host: \"host2\", Domain: Domain{Name: \"example.net\"}},\n\t\t\t))\n\t\t})\n\n\t\tContext(\"when app has just hosts, no domains\", func() {\n\t\t\tIt(\"returns Application\", func() {\n\t\t\t\trepo := FakeRepo{yaml: `---\n name: foo\n hosts:\n - host1\n - host2`,\n\t\t\t\t}\n\t\t\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\t\t\t\troutes := manifestAppFinder.RoutesFromManifest(\"example.com\")\n\n\t\t\t\tExpect(routes).To(ConsistOf(\n\t\t\t\t\tRoute{Host: \"host1\", Domain: Domain{Name: \"example.com\"}},\n\t\t\t\t\tRoute{Host: \"host2\", Domain: Domain{Name: \"example.com\"}},\n\t\t\t\t))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when no matching application\", func() {\n\t\t\tIt(\"returns nil\", func() {\n\t\t\t\trepo := FakeRepo{yaml: ``}\n\t\t\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\n\t\t\t\tExpect(manifestAppFinder.RoutesFromManifest(\"example.com\")).To(BeNil())\n\t\t\t})\n\t\t})\n\t})\n})\n\ntype FakeRepo struct {\n\tyaml string\n\terr error\n}\n\nfunc (r *FakeRepo) ReadManifest(path string) (*manifest.Manifest, error) {\n\tyamlMap := generic.NewMap()\n\tcandiedyaml.Unmarshal([]byte(r.yaml), yamlMap)\n\treturn &manifest.Manifest{Data: yamlMap}, r.err\n}\n<commit_msg>Test to reproduce problem with route support<commit_after>package main_test\n\nimport (\n\t\"errors\"\n\n\t\"code.cloudfoundry.org\/cli\/cf\/i18n\"\n\t\"code.cloudfoundry.org\/cli\/cf\/manifest\"\n\t\"code.cloudfoundry.org\/cli\/utils\/generic\"\n\t. \"github.com\/bluemixgaragelondon\/cf-blue-green-deploy\"\n\t\"github.com\/cloudfoundry-incubator\/candiedyaml\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\ntype localeGetter struct{}\n\nfunc (l localeGetter) Locale() string {\n\treturn \"en-us\"\n}\n\nvar _ = Describe(\"Manifest reader\", func() {\n\n\t\/\/ testing code that calls into cf cli requires T to point to a translate func\n\ti18n.T = i18n.Init(localeGetter{})\n\n\tContext(\"When the manifest file is present\", func() {\n\t\tContext(\"when the manifest contain a host but no app name\", func() {\n\t\t\trepo := FakeRepo{yaml: `---\n host: foo`,\n\t\t\t}\n\t\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\n\t\t\tIt(\"Returns params that contain the host\", func() {\n\t\t\t\tExpect(manifestAppFinder.AppParams().Hosts).To(ContainElement(\"foo\"))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the manifest contains a different app name\", func() {\n\t\t\trepo := FakeRepo{yaml: `---\n name: bar\n host: foo`,\n\t\t\t}\n\t\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\n\t\t\tIt(\"Returns nil\", func() {\n\t\t\t\tExpect(manifestAppFinder.AppParams()).To(BeNil())\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the manifest contains multiple apps with 1 matching\", func() {\n\t\t\trepo := FakeRepo{yaml: `---\n applications:\n - name: bar\n host: barhost\n - name: foo\n hosts:\n - host1\n - host2\n domains:\n - example1.com\n - example2.com`,\n\t\t\t}\n\t\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\n\t\t\tIt(\"Returns the correct app\", func() {\n\t\t\t\tExpect(*manifestAppFinder.AppParams().Name).To(Equal(\"foo\"))\n\t\t\t\tExpect(manifestAppFinder.AppParams().Hosts).To(ConsistOf(\"host1\", \"host2\"))\n\t\t\t\tExpect(manifestAppFinder.AppParams().Domains).To(ConsistOf(\"example1.com\", \"example2.com\"))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"When no manifest file is present\", func() {\n\t\trepo := FakeRepo{err: errors.New(\"Error finding manifest\")}\n\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\n\t\tIt(\"Returns nil\", func() {\n\t\t\tExpect(manifestAppFinder.AppParams()).To(BeNil())\n\t\t})\n\t})\n\n\tContext(\"When manifest file is empty\", func() {\n\t\trepo := FakeRepo{yaml: ``}\n\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\n\t\tIt(\"Returns nil\", func() {\n\t\t\tExpect(manifestAppFinder.AppParams()).To(BeNil())\n\t\t})\n\t})\n\n\tDescribe(\"Route Lister\", func() {\n\t\tIt(\"returns a list of Routes from the manifest\", func() {\n\t\t\trepo := FakeRepo{yaml: `---\n name: foo\n hosts:\n - host1\n - host2\n domains:\n - example.com\n - example.net`,\n\t\t\t}\n\t\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\n\t\t\troutes := manifestAppFinder.RoutesFromManifest(\"example.com\")\n\n\t\t\tExpect(routes).To(ConsistOf(\n\t\t\t\tRoute{Host: \"host1\", Domain: Domain{Name: \"example.com\"}},\n\t\t\t\tRoute{Host: \"host1\", Domain: Domain{Name: \"example.net\"}},\n\t\t\t\tRoute{Host: \"host2\", Domain: Domain{Name: \"example.com\"}},\n\t\t\t\tRoute{Host: \"host2\", Domain: Domain{Name: \"example.net\"}},\n\t\t\t))\n\t\t})\n\n\t\tContext(\"when app has just hosts, no domains\", func() {\n\t\t\tIt(\"returns Application\", func() {\n\t\t\t\trepo := FakeRepo{yaml: `---\n name: foo\n hosts:\n - host1\n - host2`,\n\t\t\t\t}\n\t\t\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\t\t\t\troutes := manifestAppFinder.RoutesFromManifest(\"example.com\")\n\n\t\t\t\tExpect(routes).To(ConsistOf(\n\t\t\t\t\tRoute{Host: \"host1\", Domain: Domain{Name: \"example.com\"}},\n\t\t\t\t\tRoute{Host: \"host2\", Domain: Domain{Name: \"example.com\"}},\n\t\t\t\t))\n\t\t\t})\n\t\t})\n\n\t\tPContext(\"when app has just routes, no hosts or domains\", func() {\n\t\t\tIt(\"returns those routes\", func() {\n\t\t\t\trepo := FakeRepo{yaml: `---\n name: foo\n routes:\n - route1.domain1\n - route2.domain2`,\n\t\t\t\t}\n\t\t\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\t\t\t\troutes := manifestAppFinder.RoutesFromManifest(\"example.com\")\n\n\t\t\t\tExpect(routes).To(ConsistOf(\n\t\t\t\t\tRoute{Host: \"route1\", Domain: Domain{Name: \"domain1\"}},\n\t\t\t\t\tRoute{Host: \"route2\", Domain: Domain{Name: \"domain2\"}},\n\t\t\t\t))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when no matching application\", func() {\n\t\t\tIt(\"returns nil\", func() {\n\t\t\t\trepo := FakeRepo{yaml: ``}\n\t\t\t\tmanifestAppFinder := ManifestAppFinder{AppName: \"foo\", Repo: &repo}\n\n\t\t\t\tExpect(manifestAppFinder.RoutesFromManifest(\"example.com\")).To(BeNil())\n\t\t\t})\n\t\t})\n\t})\n})\n\ntype FakeRepo struct {\n\tyaml string\n\terr error\n}\n\nfunc (r *FakeRepo) ReadManifest(path string) (*manifest.Manifest, error) {\n\tyamlMap := generic.NewMap()\n\tcandiedyaml.Unmarshal([]byte(r.yaml), yamlMap)\n\treturn &manifest.Manifest{Data: yamlMap}, r.err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype M map[string]interface{}\n\ntype Request struct {\n\tMethod string\n\tToken string\n}\n\ntype Response struct {\n\tToken string `json:\"token\"`\n\tError string `json:\"error\"`\n\tData interface{} `json:\"data\"`\n}\n\ntype Broker struct {\n\tserved uint\n\tstart time.Time\n\trLck sync.Mutex\n\twLck sync.Mutex\n\tr io.Reader\n\tw io.Writer\n\tin *bufio.Reader\n\tout *json.Encoder\n\tWg *sync.WaitGroup\n}\n\nfunc NewBroker(r io.Reader, w io.Writer) *Broker {\n\treturn &Broker{\n\t\tr: r,\n\t\tw: w,\n\t\tin: bufio.NewReader(r),\n\t\tout: json.NewEncoder(w),\n\t\tWg: &sync.WaitGroup{},\n\t}\n}\n\nfunc (b *Broker) Send(resp Response) error {\n\terr := b.SendNoLog(resp)\n\tif err != nil {\n\t\tlogger.Println(\"Cannot send result\", err)\n\t}\n\treturn err\n}\n\nfunc (b *Broker) SendNoLog(resp Response) error {\n\tb.wLck.Lock()\n\tdefer b.wLck.Unlock()\n\n\tif resp.Data == nil {\n\t\tresp.Data = M{}\n\t}\n\n\ts, err := json.Marshal(resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ the only expected write failure are due to broken pipes\n\t\/\/ which usually means the client has gone away so just ignore the error\n\tb.w.Write(s)\n\tb.w.Write([]byte{'\\n'})\n\treturn nil\n}\n\nfunc (b *Broker) call(req *Request, cl Caller) {\n\tdefer b.Wg.Done()\n\tb.served++\n\n\tdefer func() {\n\t\terr := recover()\n\t\tif err != nil {\n\t\t\tlogger.Printf(\"%v#%v PANIC: %v\\n\", req.Method, req.Token, err)\n\t\t\tb.Send(Response{\n\t\t\t\tToken: req.Token,\n\t\t\t\tError: \"broker: \" + req.Method + \"#\" + req.Token + \" PANIC\",\n\t\t\t})\n\t\t}\n\t}()\n\n\tres, err := cl.Call()\n\tb.Send(Response{\n\t\tToken: req.Token,\n\t\tError: err,\n\t\tData: res,\n\t})\n}\n\nfunc (b *Broker) accept() (stopLooping bool) {\n\tline, err := b.in.ReadBytes('\\n')\n\n\tif err == io.EOF {\n\t\tstopLooping = true\n\t} else if err != nil {\n\t\tlogger.Println(\"Cannot read input\", err)\n\t\tb.Send(Response{\n\t\t\tError: err.Error(),\n\t\t})\n\t\treturn\n\t}\n\n\treq := &Request{}\n\tdec := json.NewDecoder(bytes.NewReader(line))\n\t\/\/ if this fails, we are unable to return a useful error(no token to send it to)\n\t\/\/ so we'll simply\/implicitly drop the request since it has no method\n\t\/\/ we can safely assume that all such cases will be empty lines and not an actual request\n\tdec.Decode(&req)\n\n\tif req.Method == \"\" {\n\t\treturn\n\t}\n\n\tif req.Method == \"bye-ni\" {\n\t\treturn true\n\t}\n\n\tm := registry.Lookup(req.Method)\n\tif m == nil {\n\t\te := \"Invalid method \" + req.Method\n\t\tlogger.Println(e)\n\t\tb.Send(Response{\n\t\t\tToken: req.Token,\n\t\t\tError: e,\n\t\t})\n\t\treturn\n\t}\n\n\tcl := m(b)\n\terr = dec.Decode(cl)\n\tif err != nil {\n\t\tlogger.Println(\"Cannot decode arg\", err)\n\t\tb.Send(Response{\n\t\t\tToken: req.Token,\n\t\t\tError: err.Error(),\n\t\t})\n\t\treturn\n\t}\n\n\tb.Wg.Add(1)\n\tgo b.call(req, cl)\n\n\treturn\n}\n\nfunc (b *Broker) Loop(decorate bool) {\n\tb.start = time.Now()\n\n\tif decorate {\n\t\tgo b.SendNoLog(Response{\n\t\t\tToken: \"margo.hello\",\n\t\t\tData: M{\n\t\t\t\t\"time\": b.start.String(),\n\t\t\t},\n\t\t})\n\t}\n\n\tfor {\n\t\tstopLooping := b.accept()\n\t\tif stopLooping {\n\t\t\tbreak\n\t\t}\n\t\truntime.Gosched()\n\t}\n\n\tif decorate {\n\t\tb.SendNoLog(Response{\n\t\t\tToken: \"margo.bye-ni\",\n\t\t\tData: M{\n\t\t\t\t\"served\": b.served,\n\t\t\t\t\"uptime\": time.Now().Sub(b.start).String(),\n\t\t\t},\n\t\t})\n\t}\n}\n<commit_msg>* try not to return null results to the client<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype M map[string]interface{}\n\ntype Request struct {\n\tMethod string\n\tToken string\n}\n\ntype Response struct {\n\tToken string `json:\"token\"`\n\tError string `json:\"error\"`\n\tData interface{} `json:\"data\"`\n}\n\ntype Broker struct {\n\tserved uint\n\tstart time.Time\n\trLck sync.Mutex\n\twLck sync.Mutex\n\tr io.Reader\n\tw io.Writer\n\tin *bufio.Reader\n\tout *json.Encoder\n\tWg *sync.WaitGroup\n}\n\nfunc NewBroker(r io.Reader, w io.Writer) *Broker {\n\treturn &Broker{\n\t\tr: r,\n\t\tw: w,\n\t\tin: bufio.NewReader(r),\n\t\tout: json.NewEncoder(w),\n\t\tWg: &sync.WaitGroup{},\n\t}\n}\n\nfunc (b *Broker) Send(resp Response) error {\n\terr := b.SendNoLog(resp)\n\tif err != nil {\n\t\tlogger.Println(\"Cannot send result\", err)\n\t}\n\treturn err\n}\n\nfunc (b *Broker) SendNoLog(resp Response) error {\n\tb.wLck.Lock()\n\tdefer b.wLck.Unlock()\n\n\tif resp.Data == nil {\n\t\tresp.Data = M{}\n\t}\n\n\ts, err := json.Marshal(resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ the only expected write failure are due to broken pipes\n\t\/\/ which usually means the client has gone away so just ignore the error\n\tb.w.Write(s)\n\tb.w.Write([]byte{'\\n'})\n\treturn nil\n}\n\nfunc (b *Broker) call(req *Request, cl Caller) {\n\tdefer b.Wg.Done()\n\tb.served++\n\n\tdefer func() {\n\t\terr := recover()\n\t\tif err != nil {\n\t\t\tlogger.Printf(\"%v#%v PANIC: %v\\n\", req.Method, req.Token, err)\n\t\t\tb.Send(Response{\n\t\t\t\tToken: req.Token,\n\t\t\t\tError: \"broker: \" + req.Method + \"#\" + req.Token + \" PANIC\",\n\t\t\t})\n\t\t}\n\t}()\n\n\tres, err := cl.Call()\n\tif res == nil {\n\t\tres = M{}\n\t} else if v, ok := res.(M); ok && v == nil {\n\t\tres = M{}\n\t}\n\n\tb.Send(Response{\n\t\tToken: req.Token,\n\t\tError: err,\n\t\tData: res,\n\t})\n}\n\nfunc (b *Broker) accept() (stopLooping bool) {\n\tline, err := b.in.ReadBytes('\\n')\n\n\tif err == io.EOF {\n\t\tstopLooping = true\n\t} else if err != nil {\n\t\tlogger.Println(\"Cannot read input\", err)\n\t\tb.Send(Response{\n\t\t\tError: err.Error(),\n\t\t})\n\t\treturn\n\t}\n\n\treq := &Request{}\n\tdec := json.NewDecoder(bytes.NewReader(line))\n\t\/\/ if this fails, we are unable to return a useful error(no token to send it to)\n\t\/\/ so we'll simply\/implicitly drop the request since it has no method\n\t\/\/ we can safely assume that all such cases will be empty lines and not an actual request\n\tdec.Decode(&req)\n\n\tif req.Method == \"\" {\n\t\treturn\n\t}\n\n\tif req.Method == \"bye-ni\" {\n\t\treturn true\n\t}\n\n\tm := registry.Lookup(req.Method)\n\tif m == nil {\n\t\te := \"Invalid method \" + req.Method\n\t\tlogger.Println(e)\n\t\tb.Send(Response{\n\t\t\tToken: req.Token,\n\t\t\tError: e,\n\t\t})\n\t\treturn\n\t}\n\n\tcl := m(b)\n\terr = dec.Decode(cl)\n\tif err != nil {\n\t\tlogger.Println(\"Cannot decode arg\", err)\n\t\tb.Send(Response{\n\t\t\tToken: req.Token,\n\t\t\tError: err.Error(),\n\t\t})\n\t\treturn\n\t}\n\n\tb.Wg.Add(1)\n\tgo b.call(req, cl)\n\n\treturn\n}\n\nfunc (b *Broker) Loop(decorate bool) {\n\tb.start = time.Now()\n\n\tif decorate {\n\t\tgo b.SendNoLog(Response{\n\t\t\tToken: \"margo.hello\",\n\t\t\tData: M{\n\t\t\t\t\"time\": b.start.String(),\n\t\t\t},\n\t\t})\n\t}\n\n\tfor {\n\t\tstopLooping := b.accept()\n\t\tif stopLooping {\n\t\t\tbreak\n\t\t}\n\t\truntime.Gosched()\n\t}\n\n\tif decorate {\n\t\tb.SendNoLog(Response{\n\t\t\tToken: \"margo.bye-ni\",\n\t\t\tData: M{\n\t\t\t\t\"served\": b.served,\n\t\t\t\t\"uptime\": time.Now().Sub(b.start).String(),\n\t\t\t},\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package mastodon\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/url\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestDoAPI(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Query().Get(\"max_id\") == \"999\" {\n\t\t\tw.Header().Set(\"Link\", `<:>; rel=\"next\"`)\n\t\t} else {\n\t\t\tw.Header().Set(\"Link\", `<http:\/\/example.com?max_id=234>; rel=\"next\", <http:\/\/example.com?since_id=890>; rel=\"prev\"`)\n\t\t}\n\t\tfmt.Fprintln(w, `[{\"username\": \"foo\"}, {\"username\": \"bar\"}]`)\n\t}))\n\tdefer ts.Close()\n\n\tc := NewClient(&Config{Server: ts.URL})\n\tvar accounts []Account\n\terr := c.doAPI(context.Background(), http.MethodGet, \"\/\", nil, &accounts, &Pagination{\n\t\tMaxID: \"999\",\n\t})\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\n\tpg := &Pagination{\n\t\tMaxID: \"123\",\n\t\tSinceID: \"789\",\n\t\tLimit: 10,\n\t}\n\terr = c.doAPI(context.Background(), http.MethodGet, \"\/\", url.Values{}, &accounts, pg)\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n\tif pg.MaxID != \"234\" {\n\t\tt.Fatalf(\"want %q but %q\", \"234\", pg.MaxID)\n\t}\n\tif pg.SinceID != \"890\" {\n\t\tt.Fatalf(\"want %q but %q\", \"890\", pg.SinceID)\n\t}\n\tif accounts[0].Username != \"foo\" {\n\t\tt.Fatalf(\"want %q but %q\", \"foo\", accounts[0].Username)\n\t}\n\tif accounts[1].Username != \"bar\" {\n\t\tt.Fatalf(\"want %q but %q\", \"bar\", accounts[1].Username)\n\t}\n\n\tpg = &Pagination{\n\t\tMaxID: \"123\",\n\t\tSinceID: \"789\",\n\t\tLimit: 10,\n\t}\n\terr = c.doAPI(context.Background(), http.MethodGet, \"\/\", nil, &accounts, pg)\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n\tif pg.MaxID != \"234\" {\n\t\tt.Fatalf(\"want %q but %q\", \"234\", pg.MaxID)\n\t}\n\tif pg.SinceID != \"890\" {\n\t\tt.Fatalf(\"want %q but %q\", \"890\", pg.SinceID)\n\t}\n\tif accounts[0].Username != \"foo\" {\n\t\tt.Fatalf(\"want %q but %q\", \"foo\", accounts[0].Username)\n\t}\n\tif accounts[1].Username != \"bar\" {\n\t\tt.Fatalf(\"want %q but %q\", \"bar\", accounts[1].Username)\n\t}\n\n\t\/\/ *Pagination is nil\n\terr = c.doAPI(context.Background(), http.MethodGet, \"\/\", nil, &accounts, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n\tif accounts[0].Username != \"foo\" {\n\t\tt.Fatalf(\"want %q but %q\", \"foo\", accounts[0].Username)\n\t}\n\tif accounts[1].Username != \"bar\" {\n\t\tt.Fatalf(\"want %q but %q\", \"bar\", accounts[1].Username)\n\t}\n}\n\nfunc TestAuthenticate(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.FormValue(\"username\") != \"valid\" || r.FormValue(\"password\") != \"user\" {\n\t\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintln(w, `{\"access_token\": \"zoo\"}`)\n\t\treturn\n\t}))\n\tdefer ts.Close()\n\n\tclient := NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t})\n\terr := client.Authenticate(context.Background(), \"invalid\", \"user\")\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\n\tclient = NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t})\n\terr = client.Authenticate(context.Background(), \"valid\", \"user\")\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n}\n\nfunc TestAuthenticateWithCancel(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttime.Sleep(3 * time.Second)\n\t\treturn\n\t}))\n\tdefer ts.Close()\n\n\tclient := NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t})\n\tctx, cancel := context.WithCancel(context.Background())\n\tcancel()\n\terr := client.Authenticate(ctx, \"invalid\", \"user\")\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\tif want := fmt.Sprintf(\"Post %q: context canceled\", ts.URL+\"\/oauth\/token\"); want != err.Error() {\n\t\tt.Fatalf(\"want %q but %q\", want, err.Error())\n\t}\n}\n\nfunc TestPostStatus(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Header.Get(\"Authorization\") != \"Bearer zoo\" {\n\t\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintln(w, `{\"access_token\": \"zoo\"}`)\n\t\treturn\n\t}))\n\tdefer ts.Close()\n\n\tclient := NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t})\n\t_, err := client.PostStatus(context.Background(), &Toot{\n\t\tStatus: \"foobar\",\n\t})\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\n\tclient = NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t\tAccessToken: \"zoo\",\n\t})\n\t_, err = client.PostStatus(context.Background(), &Toot{\n\t\tStatus: \"foobar\",\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n}\n\nfunc TestPostStatusWithCancel(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttime.Sleep(3 * time.Second)\n\t\treturn\n\t}))\n\tdefer ts.Close()\n\n\tclient := NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t})\n\tctx, cancel := context.WithCancel(context.Background())\n\tcancel()\n\t_, err := client.PostStatus(ctx, &Toot{\n\t\tStatus: \"foobar\",\n\t})\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\tif want := fmt.Sprintf(\"Post %q: context canceled\", ts.URL+\"\/api\/v1\/statuses\"); want != err.Error() {\n\t\tt.Fatalf(\"want %q but %q\", want, err.Error())\n\t}\n}\n\nfunc TestGetTimelineHome(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintln(w, `[{\"content\": \"foo\"}, {\"content\": \"bar\"}]`)\n\t\treturn\n\t}))\n\tdefer ts.Close()\n\n\tclient := NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t})\n\t_, err := client.PostStatus(context.Background(), &Toot{\n\t\tStatus: \"foobar\",\n\t})\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\n\tclient = NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t\tAccessToken: \"zoo\",\n\t})\n\ttl, err := client.GetTimelineHome(context.Background(), nil)\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n\tif len(tl) != 2 {\n\t\tt.Fatalf(\"result should be two: %d\", len(tl))\n\t}\n\tif tl[0].Content != \"foo\" {\n\t\tt.Fatalf(\"want %q but %q\", \"foo\", tl[0].Content)\n\t}\n\tif tl[1].Content != \"bar\" {\n\t\tt.Fatalf(\"want %q but %q\", \"bar\", tl[1].Content)\n\t}\n}\n\nfunc TestGetTimelineHomeWithCancel(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttime.Sleep(3 * time.Second)\n\t\treturn\n\t}))\n\tdefer ts.Close()\n\n\tclient := NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t\tAccessToken: \"zoo\",\n\t})\n\tctx, cancel := context.WithCancel(context.Background())\n\tcancel()\n\t_, err := client.GetTimelineHome(ctx, nil)\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\tif want := fmt.Sprintf(\"Post %q: context canceled\", ts.URL+\"\/api\/v1\/timelines\/home\"); want != err.Error() {\n\t\tt.Fatalf(\"want %q but %q\", want, err.Error())\n\t}\n}\n\nfunc TestForTheCoverages(t *testing.T) {\n\t(*UpdateEvent)(nil).event()\n\t(*NotificationEvent)(nil).event()\n\t(*DeleteEvent)(nil).event()\n\t(*ErrorEvent)(nil).event()\n\t_ = (&ErrorEvent{io.EOF}).Error()\n}\n\nfunc TestNewPagination(t *testing.T) {\n\t_, err := newPagination(\"\")\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\n\t_, err = newPagination(`<:>; rel=\"next\"`)\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\n\t_, err = newPagination(`<:>; rel=\"prev\"`)\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\n\t_, err = newPagination(`<http:\/\/example.com?min_id=abc>; rel=\"prev\"`)\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n\n\tpg, err := newPagination(`<http:\/\/example.com?max_id=123>; rel=\"next\", <http:\/\/example.com?since_id=789>; rel=\"prev\"`)\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n\tif pg.MaxID != \"123\" {\n\t\tt.Fatalf(\"want %q but %q\", \"123\", pg.MaxID)\n\t}\n\tif pg.SinceID != \"789\" {\n\t\tt.Fatalf(\"want %q but %q\", \"789\", pg.SinceID)\n\t}\n}\n\nfunc TestGetPaginationID(t *testing.T) {\n\t_, err := getPaginationID(\":\", \"max_id\")\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\n\t_, err = getPaginationID(\"http:\/\/example.com?max_id=abc\", \"max_id\")\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n\n\tid, err := getPaginationID(\"http:\/\/example.com?max_id=123\", \"max_id\")\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n\tif id != \"123\" {\n\t\tt.Fatalf(\"want %q but %q\", \"123\", id)\n\t}\n}\n\nfunc TestPaginationSetValues(t *testing.T) {\n\tp := &Pagination{\n\t\tMaxID: \"123\",\n\t\tSinceID: \"456\",\n\t\tMinID: \"789\",\n\t\tLimit: 10,\n\t}\n\tbefore := url.Values{\"key\": {\"value\"}}\n\tafter := p.setValues(before)\n\tif after.Get(\"key\") != \"value\" {\n\t\tt.Fatalf(\"want %q but %q\", \"value\", after.Get(\"key\"))\n\t}\n\tif after.Get(\"max_id\") != \"123\" {\n\t\tt.Fatalf(\"want %q but %q\", \"123\", after.Get(\"max_id\"))\n\t}\n\tif after.Get(\"since_id\") != \"456\" {\n\t\tt.Fatalf(\"want %q but %q\", \"456\", after.Get(\"since_id\"))\n\t}\n\tif after.Get(\"min_id\") != \"789\" {\n\t\tt.Fatalf(\"want %q but %q\", \"789\", after.Get(\"min_id\"))\n\t}\n\tif after.Get(\"limit\") != \"10\" {\n\t\tt.Fatalf(\"want %q but %q\", \"10\", after.Get(\"limit\"))\n\t}\n\n\tp = &Pagination{\n\t\tMaxID: \"\",\n\t\tSinceID: \"789\",\n\t}\n\tbefore = url.Values{}\n\tafter = p.setValues(before)\n\tif after.Get(\"max_id\") != \"\" {\n\t\tt.Fatalf(\"result should be empty string: %q\", after.Get(\"max_id\"))\n\t}\n\tif after.Get(\"since_id\") != \"789\" {\n\t\tt.Fatalf(\"want %q but %q\", \"789\", after.Get(\"since_id\"))\n\t}\n\tif after.Get(\"min_id\") != \"\" {\n\t\tt.Fatalf(\"result should be empty string: %q\", after.Get(\"min_id\"))\n\t}\n}\n<commit_msg>Fix test<commit_after>package mastodon\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/url\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestDoAPI(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Query().Get(\"max_id\") == \"999\" {\n\t\t\tw.Header().Set(\"Link\", `<:>; rel=\"next\"`)\n\t\t} else {\n\t\t\tw.Header().Set(\"Link\", `<http:\/\/example.com?max_id=234>; rel=\"next\", <http:\/\/example.com?since_id=890>; rel=\"prev\"`)\n\t\t}\n\t\tfmt.Fprintln(w, `[{\"username\": \"foo\"}, {\"username\": \"bar\"}]`)\n\t}))\n\tdefer ts.Close()\n\n\tc := NewClient(&Config{Server: ts.URL})\n\tvar accounts []Account\n\terr := c.doAPI(context.Background(), http.MethodGet, \"\/\", nil, &accounts, &Pagination{\n\t\tMaxID: \"999\",\n\t})\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\n\tpg := &Pagination{\n\t\tMaxID: \"123\",\n\t\tSinceID: \"789\",\n\t\tLimit: 10,\n\t}\n\terr = c.doAPI(context.Background(), http.MethodGet, \"\/\", url.Values{}, &accounts, pg)\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n\tif pg.MaxID != \"234\" {\n\t\tt.Fatalf(\"want %q but %q\", \"234\", pg.MaxID)\n\t}\n\tif pg.SinceID != \"890\" {\n\t\tt.Fatalf(\"want %q but %q\", \"890\", pg.SinceID)\n\t}\n\tif accounts[0].Username != \"foo\" {\n\t\tt.Fatalf(\"want %q but %q\", \"foo\", accounts[0].Username)\n\t}\n\tif accounts[1].Username != \"bar\" {\n\t\tt.Fatalf(\"want %q but %q\", \"bar\", accounts[1].Username)\n\t}\n\n\tpg = &Pagination{\n\t\tMaxID: \"123\",\n\t\tSinceID: \"789\",\n\t\tLimit: 10,\n\t}\n\terr = c.doAPI(context.Background(), http.MethodGet, \"\/\", nil, &accounts, pg)\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n\tif pg.MaxID != \"234\" {\n\t\tt.Fatalf(\"want %q but %q\", \"234\", pg.MaxID)\n\t}\n\tif pg.SinceID != \"890\" {\n\t\tt.Fatalf(\"want %q but %q\", \"890\", pg.SinceID)\n\t}\n\tif accounts[0].Username != \"foo\" {\n\t\tt.Fatalf(\"want %q but %q\", \"foo\", accounts[0].Username)\n\t}\n\tif accounts[1].Username != \"bar\" {\n\t\tt.Fatalf(\"want %q but %q\", \"bar\", accounts[1].Username)\n\t}\n\n\t\/\/ *Pagination is nil\n\terr = c.doAPI(context.Background(), http.MethodGet, \"\/\", nil, &accounts, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n\tif accounts[0].Username != \"foo\" {\n\t\tt.Fatalf(\"want %q but %q\", \"foo\", accounts[0].Username)\n\t}\n\tif accounts[1].Username != \"bar\" {\n\t\tt.Fatalf(\"want %q but %q\", \"bar\", accounts[1].Username)\n\t}\n}\n\nfunc TestAuthenticate(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.FormValue(\"username\") != \"valid\" || r.FormValue(\"password\") != \"user\" {\n\t\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintln(w, `{\"access_token\": \"zoo\"}`)\n\t\treturn\n\t}))\n\tdefer ts.Close()\n\n\tclient := NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t})\n\terr := client.Authenticate(context.Background(), \"invalid\", \"user\")\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\n\tclient = NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t})\n\terr = client.Authenticate(context.Background(), \"valid\", \"user\")\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n}\n\nfunc TestAuthenticateWithCancel(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttime.Sleep(3 * time.Second)\n\t\treturn\n\t}))\n\tdefer ts.Close()\n\n\tclient := NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t})\n\tctx, cancel := context.WithCancel(context.Background())\n\tcancel()\n\terr := client.Authenticate(ctx, \"invalid\", \"user\")\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\tif want := fmt.Sprintf(\"Post %q: context canceled\", ts.URL+\"\/oauth\/token\"); want != err.Error() {\n\t\tt.Fatalf(\"want %q but %q\", want, err.Error())\n\t}\n}\n\nfunc TestPostStatus(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Header.Get(\"Authorization\") != \"Bearer zoo\" {\n\t\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintln(w, `{\"access_token\": \"zoo\"}`)\n\t\treturn\n\t}))\n\tdefer ts.Close()\n\n\tclient := NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t})\n\t_, err := client.PostStatus(context.Background(), &Toot{\n\t\tStatus: \"foobar\",\n\t})\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\n\tclient = NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t\tAccessToken: \"zoo\",\n\t})\n\t_, err = client.PostStatus(context.Background(), &Toot{\n\t\tStatus: \"foobar\",\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n}\n\nfunc TestPostStatusWithCancel(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttime.Sleep(3 * time.Second)\n\t\treturn\n\t}))\n\tdefer ts.Close()\n\n\tclient := NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t})\n\tctx, cancel := context.WithCancel(context.Background())\n\tcancel()\n\t_, err := client.PostStatus(ctx, &Toot{\n\t\tStatus: \"foobar\",\n\t})\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\tif want := fmt.Sprintf(\"Post %q: context canceled\", ts.URL+\"\/api\/v1\/statuses\"); want != err.Error() {\n\t\tt.Fatalf(\"want %q but %q\", want, err.Error())\n\t}\n}\n\nfunc TestGetTimelineHome(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintln(w, `[{\"content\": \"foo\"}, {\"content\": \"bar\"}]`)\n\t\treturn\n\t}))\n\tdefer ts.Close()\n\n\tclient := NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t})\n\t_, err := client.PostStatus(context.Background(), &Toot{\n\t\tStatus: \"foobar\",\n\t})\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\n\tclient = NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t\tAccessToken: \"zoo\",\n\t})\n\ttl, err := client.GetTimelineHome(context.Background(), nil)\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n\tif len(tl) != 2 {\n\t\tt.Fatalf(\"result should be two: %d\", len(tl))\n\t}\n\tif tl[0].Content != \"foo\" {\n\t\tt.Fatalf(\"want %q but %q\", \"foo\", tl[0].Content)\n\t}\n\tif tl[1].Content != \"bar\" {\n\t\tt.Fatalf(\"want %q but %q\", \"bar\", tl[1].Content)\n\t}\n}\n\nfunc TestGetTimelineHomeWithCancel(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttime.Sleep(3 * time.Second)\n\t\treturn\n\t}))\n\tdefer ts.Close()\n\n\tclient := NewClient(&Config{\n\t\tServer: ts.URL,\n\t\tClientID: \"foo\",\n\t\tClientSecret: \"bar\",\n\t\tAccessToken: \"zoo\",\n\t})\n\tctx, cancel := context.WithCancel(context.Background())\n\tcancel()\n\t_, err := client.GetTimelineHome(ctx, nil)\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\tif want := fmt.Sprintf(\"Get %q: context canceled\", ts.URL+\"\/api\/v1\/timelines\/home\"); want != err.Error() {\n\t\tt.Fatalf(\"want %q but %q\", want, err.Error())\n\t}\n}\n\nfunc TestForTheCoverages(t *testing.T) {\n\t(*UpdateEvent)(nil).event()\n\t(*NotificationEvent)(nil).event()\n\t(*DeleteEvent)(nil).event()\n\t(*ErrorEvent)(nil).event()\n\t_ = (&ErrorEvent{io.EOF}).Error()\n}\n\nfunc TestNewPagination(t *testing.T) {\n\t_, err := newPagination(\"\")\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\n\t_, err = newPagination(`<:>; rel=\"next\"`)\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\n\t_, err = newPagination(`<:>; rel=\"prev\"`)\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\n\t_, err = newPagination(`<http:\/\/example.com?min_id=abc>; rel=\"prev\"`)\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n\n\tpg, err := newPagination(`<http:\/\/example.com?max_id=123>; rel=\"next\", <http:\/\/example.com?since_id=789>; rel=\"prev\"`)\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n\tif pg.MaxID != \"123\" {\n\t\tt.Fatalf(\"want %q but %q\", \"123\", pg.MaxID)\n\t}\n\tif pg.SinceID != \"789\" {\n\t\tt.Fatalf(\"want %q but %q\", \"789\", pg.SinceID)\n\t}\n}\n\nfunc TestGetPaginationID(t *testing.T) {\n\t_, err := getPaginationID(\":\", \"max_id\")\n\tif err == nil {\n\t\tt.Fatalf(\"should be fail: %v\", err)\n\t}\n\n\t_, err = getPaginationID(\"http:\/\/example.com?max_id=abc\", \"max_id\")\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n\n\tid, err := getPaginationID(\"http:\/\/example.com?max_id=123\", \"max_id\")\n\tif err != nil {\n\t\tt.Fatalf(\"should not be fail: %v\", err)\n\t}\n\tif id != \"123\" {\n\t\tt.Fatalf(\"want %q but %q\", \"123\", id)\n\t}\n}\n\nfunc TestPaginationSetValues(t *testing.T) {\n\tp := &Pagination{\n\t\tMaxID: \"123\",\n\t\tSinceID: \"456\",\n\t\tMinID: \"789\",\n\t\tLimit: 10,\n\t}\n\tbefore := url.Values{\"key\": {\"value\"}}\n\tafter := p.setValues(before)\n\tif after.Get(\"key\") != \"value\" {\n\t\tt.Fatalf(\"want %q but %q\", \"value\", after.Get(\"key\"))\n\t}\n\tif after.Get(\"max_id\") != \"123\" {\n\t\tt.Fatalf(\"want %q but %q\", \"123\", after.Get(\"max_id\"))\n\t}\n\tif after.Get(\"since_id\") != \"456\" {\n\t\tt.Fatalf(\"want %q but %q\", \"456\", after.Get(\"since_id\"))\n\t}\n\tif after.Get(\"min_id\") != \"789\" {\n\t\tt.Fatalf(\"want %q but %q\", \"789\", after.Get(\"min_id\"))\n\t}\n\tif after.Get(\"limit\") != \"10\" {\n\t\tt.Fatalf(\"want %q but %q\", \"10\", after.Get(\"limit\"))\n\t}\n\n\tp = &Pagination{\n\t\tMaxID: \"\",\n\t\tSinceID: \"789\",\n\t}\n\tbefore = url.Values{}\n\tafter = p.setValues(before)\n\tif after.Get(\"max_id\") != \"\" {\n\t\tt.Fatalf(\"result should be empty string: %q\", after.Get(\"max_id\"))\n\t}\n\tif after.Get(\"since_id\") != \"789\" {\n\t\tt.Fatalf(\"want %q but %q\", \"789\", after.Get(\"since_id\"))\n\t}\n\tif after.Get(\"min_id\") != \"\" {\n\t\tt.Fatalf(\"result should be empty string: %q\", after.Get(\"min_id\"))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package memtree\n\nimport (\n\t\"github.com\/google\/e2e-key-server\/db\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype MemTree struct {\n\tdb db.Mapper\n}\n\nfunc New(db db.Mapper) *MemTree {\n\treturn &MemTree{db}\n}\n\nfunc (m *MemTree) ReadLeaf(ctx context.Context, index []byte) ([]byte, error) {\n\treturn m.db.ReadLeaf(ctx, index)\n}\nfunc (m *MemTree) WriteLeaf(ctx context.Context, index, leaf []byte) error {\n\treturn m.db.WriteLeaf(ctx, index, leaf)\n}\nfunc (m *MemTree) ReadRoot(ctx context.Context) ([]byte, error) {\n\treturn []byte(\"\"), nil\n}\n<commit_msg>Fix memtree<commit_after>package memtree\n\nimport (\n\t\"crypto\/sha512\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst IndexSize = sha512.Size256\n\ntype MemTree struct {\n\tleaves map[[IndexSize]byte][]byte\n\tnodes map[[IndexSize]byte][]byte\n}\n\nfunc New() *MemTree {\n\treturn &MemTree{\n\t\tleaves: make(map[[IndexSize]byte][]byte),\n\t\tnodes: make(map[[IndexSize]byte][]byte),\n\t}\n}\n\nfunc (m *MemTree) ReadLeaf(ctx context.Context, index []byte) ([]byte, error) {\n\tvar k [IndexSize]byte\n\tcopy(k[:], index[:IndexSize])\n\treturn m.leaves[k], nil\n}\nfunc (m *MemTree) WriteLeaf(ctx context.Context, index, leaf []byte) error {\n\tvar k [IndexSize]byte\n\tcopy(k[:], index[:IndexSize])\n\tm.leaves[k] = leaf\n\treturn nil\n}\nfunc (m *MemTree) ReadRoot(ctx context.Context) ([]byte, error) {\n\treturn []byte(\"\"), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\tpb \"github.com\/PomeloCloud\/BFTRaft4go\/proto\/server\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"log\"\n\t\"github.com\/PomeloCloud\/BFTRaft4go\/utils\"\n\t\"github.com\/dgraph-io\/badger\"\n)\n\nconst (\n\tNODE_JOIN = 0\n\tREG_NODE = 1\n\tNEW_CLIENT = 2\n\tNODE_GROUP = 3\n)\n\nfunc (s *BFTRaftServer) RegisterMembershipCommands() {\n\ts.RegisterRaftFunc(utils.ALPHA_GROUP, NODE_JOIN, s.NodeJoin)\n\ts.RegisterRaftFunc(utils.ALPHA_GROUP, REG_NODE, s.RegNode)\n\ts.RegisterRaftFunc(utils.ALPHA_GROUP, NEW_CLIENT, s.NewClient)\n\ts.RegisterRaftFunc(utils.ALPHA_GROUP, NODE_GROUP, s.NewGroup)\n}\n\n\n\/\/ Register a node into the network\n\/\/ The node may be new or it was rejoined with new address\nfunc (s *BFTRaftServer) RegNode(arg *[]byte, entry *pb.LogEntry) []byte {\n\tnode := pb.Node{}\n\tif err := proto.Unmarshal(*arg, &node); err == nil {\n\t\tnode.Id = HashPublicKeyBytes(node.PublicKey)\n\t\tnode.Online = true\n\t\ts.DB.Update(func(txn *badger.Txn) error {\n\t\t\ts.SaveNode(txn, &node)\n\t\t\treturn nil\n\t\t})\n\t\treturn []byte{1}\n\t} else {\n\t\tlog.Println(err)\n\t\treturn []byte{0}\n\t}\n}\n\nfunc (s *BFTRaftServer) NodeJoin(arg *[]byte, entry *pb.LogEntry) []byte {\n\treq := pb.NodeJoinGroupEntry{}\n\tif err := proto.Unmarshal(*arg, &req); err == nil {\n\t\tnode := entry.Command.ClientId\n\t\t\/\/ this should be fine, a public key can use both for client and node\n\t\tgroup := req.Group\n\t\tif node == s.Id {\n\t\t\t\/\/ skip if current node is the joined node\n\t\t\t\/\/ when joined a group, the node should do all of\n\t\t\t\/\/ those following things by itself after the log is replicated\n\t\t\treturn []byte{2}\n\t\t}\n\t\tpeer := pb.Peer{\n\t\t\tId: node,\n\t\t\tGroup: group,\n\t\t\tHost: node,\n\t\t\tNextIndex: 0,\n\t\t\tMatchIndex: 0,\n\t\t}\n\t\t\/\/ first, save the peer\n\t\ts.DB.Update(func(txn *badger.Txn) error {\n\t\t\treturn s.SavePeer(txn, &peer)\n\t\t})\n\t\t\/\/ next, check if this node is in the group. Add it on board if found.\n\t\t\/\/ because membership logs entries will be replicated on every node\n\t\t\/\/ this function will also be executed every where\n\t\tif meta, found := s.GroupsOnboard[group]; found {\n\t\t\tmeta.Lock.Lock()\n\t\t\tdefer meta.Lock.Unlock()\n\t\t\tmeta.GroupPeers[peer.Id] = &peer\n\t\t}\n\t\treturn []byte{1}\n\t} else {\n\t\treturn []byte{0}\n\t}\n}\n\n\nfunc (s *BFTRaftServer) NewClient(arg *[]byte, entry *pb.LogEntry) []byte {\n\tclient := pb.Client{}\n\tproto.Unmarshal(*arg, &client)\n\tclient.Id = HashPublicKeyBytes(client.PrivateKey)\n\tif err := s.SaveClient(&client); err != nil {\n\t\treturn []byte{1}\n\t} else {\n\t\tlog.Println(err)\n\t\treturn []byte{0}\n\t}\n}\n\nfunc (s *BFTRaftServer) NewGroup(arg *[]byte, entry *pb.LogEntry) []byte {\n\treturn []byte{}\n}\n<commit_msg>check replication when join<commit_after>package server\n\nimport (\n\tpb \"github.com\/PomeloCloud\/BFTRaft4go\/proto\/server\"\n\t\"github.com\/PomeloCloud\/BFTRaft4go\/utils\"\n\t\"github.com\/dgraph-io\/badger\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"log\"\n\t\"errors\"\n)\n\nconst (\n\tNODE_JOIN = 0\n\tREG_NODE = 1\n\tNEW_CLIENT = 2\n\tNODE_GROUP = 3\n)\n\nfunc (s *BFTRaftServer) RegisterMembershipCommands() {\n\ts.RegisterRaftFunc(utils.ALPHA_GROUP, NODE_JOIN, s.NodeJoin)\n\ts.RegisterRaftFunc(utils.ALPHA_GROUP, REG_NODE, s.RegNode)\n\ts.RegisterRaftFunc(utils.ALPHA_GROUP, NEW_CLIENT, s.NewClient)\n\ts.RegisterRaftFunc(utils.ALPHA_GROUP, NODE_GROUP, s.NewGroup)\n}\n\n\/\/ Register a node into the network\n\/\/ The node may be new or it was rejoined with new address\nfunc (s *BFTRaftServer) RegNode(arg *[]byte, entry *pb.LogEntry) []byte {\n\tnode := pb.Node{}\n\tif err := proto.Unmarshal(*arg, &node); err == nil {\n\t\tnode.Id = HashPublicKeyBytes(node.PublicKey)\n\t\tnode.Online = true\n\t\ts.DB.Update(func(txn *badger.Txn) error {\n\t\t\ts.SaveNode(txn, &node)\n\t\t\treturn nil\n\t\t})\n\t\treturn []byte{1}\n\t} else {\n\t\tlog.Println(err)\n\t\treturn []byte{0}\n\t}\n}\n\nfunc (s *BFTRaftServer) NodeJoin(arg *[]byte, entry *pb.LogEntry) []byte {\n\treq := pb.NodeJoinGroupEntry{}\n\tif err := proto.Unmarshal(*arg, &req); err == nil {\n\t\tnode := entry.Command.ClientId\n\t\t\/\/ this should be fine, a public key can use both for client and node\n\t\tgroupId := req.Group\n\t\tif node == s.Id {\n\t\t\t\/\/ skip if current node is the joined node\n\t\t\t\/\/ when joined a groupId, the node should do all of\n\t\t\t\/\/ those following things by itself after the log is replicated\n\t\t\treturn []byte{2}\n\t\t}\n\n\t\tpeer := pb.Peer{\n\t\t\tId: node,\n\t\t\tGroup: groupId,\n\t\t\tHost: node,\n\t\t\tNextIndex: 0,\n\t\t\tMatchIndex: 0,\n\t\t}\n\t\tif err := s.DB.Update(func(txn *badger.Txn) error {\n\t\t\tgroup := s.GetGroup(txn, groupId)\n\t\t\t\/\/ check if this groupId exceeds it's replication\n\t\t\tif len(GetGroupPeersFromKV(txn, groupId)) >= int(group.Replications) {\n\t\t\t\treturn errors.New(\"exceed replications\")\n\t\t\t}\n\t\t\t\/\/ first, save the peer\n\t\t\treturn s.SavePeer(txn, &peer)\n\t\t}); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn []byte{0}\n\t\t}\n\t\t\/\/ next, check if this node is in the groupId. Add it on board if found.\n\t\t\/\/ because membership logs entries will be replicated on every node\n\t\t\/\/ this function will also be executed every where\n\t\tif meta, found := s.GroupsOnboard[groupId]; found {\n\t\t\tmeta.Lock.Lock()\n\t\t\tdefer meta.Lock.Unlock()\n\t\t\tmeta.GroupPeers[peer.Id] = &peer\n\t\t}\n\t\treturn []byte{1}\n\t} else {\n\t\treturn []byte{0}\n\t}\n}\n\nfunc (s *BFTRaftServer) NewClient(arg *[]byte, entry *pb.LogEntry) []byte {\n\tclient := pb.Client{}\n\tproto.Unmarshal(*arg, &client)\n\tclient.Id = HashPublicKeyBytes(client.PrivateKey)\n\tif err := s.SaveClient(&client); err != nil {\n\t\treturn []byte{1}\n\t} else {\n\t\tlog.Println(err)\n\t\treturn []byte{0}\n\t}\n}\n\nfunc (s *BFTRaftServer) NewGroup(arg *[]byte, entry *pb.LogEntry) []byte {\n\treturn []byte{}\n}\n<|endoftext|>"} {"text":"<commit_before>package vegeta\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ Attacker is an attack executor which wraps an http.Client\ntype Attacker struct {\n\tdialer *net.Dialer\n\tclient http.Client\n\tstop chan struct{}\n\tworkers uint64\n}\n\nvar (\n\t\/\/ DefaultRedirects is the default number of times an Attacker follows\n\t\/\/ redirects.\n\tDefaultRedirects = 10\n\t\/\/ DefaultTimeout is the default amount of time an Attacker waits for a request\n\t\/\/ before it times out.\n\tDefaultTimeout = 30 * time.Second\n\t\/\/ DefaultLocalAddr is the default local IP address an Attacker uses.\n\tDefaultLocalAddr = net.IPAddr{IP: net.IPv4zero}\n\t\/\/ DefaultTLSConfig is the default tls.Config an Attacker uses.\n\tDefaultTLSConfig = &tls.Config{InsecureSkipVerify: true}\n)\n\n\/\/ NewAttacker returns a new Attacker with default options which are overridden\n\/\/ by the optionally provided opts.\nfunc NewAttacker(opts ...func(*Attacker)) *Attacker {\n\ta := &Attacker{stop: make(chan struct{})}\n\ta.dialer = &net.Dialer{\n\t\tLocalAddr: &net.TCPAddr{IP: DefaultLocalAddr.IP, Zone: DefaultLocalAddr.Zone},\n\t\tKeepAlive: 30 * time.Second,\n\t\tTimeout: DefaultTimeout,\n\t}\n\ta.client = http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tDial: a.dialer.Dial,\n\t\t\tResponseHeaderTimeout: DefaultTimeout,\n\t\t\tTLSClientConfig: DefaultTLSConfig,\n\t\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\t},\n\t}\n\tfor _, opt := range opts {\n\t\topt(a)\n\t}\n\treturn a\n}\n\n\/\/ Workers returns a functional option which sets the number of workers\n\/\/ an Attacker uses to hit its targets.\n\/\/ If zero or greater than the total number of hits, workers will be capped\n\/\/ to that maximum.\nfunc Workers(n uint64) func(*Attacker) {\n\treturn func(a *Attacker) { a.workers = n }\n}\n\n\/\/ Redirects returns a functional option which sets the maximum\n\/\/ number of redirects an Attacker will follow.\nfunc Redirects(n int) func(*Attacker) {\n\treturn func(a *Attacker) {\n\t\ta.client.CheckRedirect = func(_ *http.Request, via []*http.Request) error {\n\t\t\tif len(via) > n {\n\t\t\t\treturn fmt.Errorf(\"stopped after %d redirects\", n)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\n\/\/ Timeout returns a functional option which sets the maximum amount of time\n\/\/ an Attacker will wait for a request to be responded to.\nfunc Timeout(d time.Duration) func(*Attacker) {\n\treturn func(a *Attacker) {\n\t\ttr := a.client.Transport.(*http.Transport)\n\t\ttr.ResponseHeaderTimeout = d\n\t\ta.dialer.Timeout = d\n\t\ttr.Dial = a.dialer.Dial\n\t}\n}\n\n\/\/ LocalAddr returns a functional option which sets the local address\n\/\/ an Attacker will use with its requests.\nfunc LocalAddr(addr net.IPAddr) func(*Attacker) {\n\treturn func(a *Attacker) {\n\t\ttr := a.client.Transport.(*http.Transport)\n\t\ta.dialer.LocalAddr = &net.TCPAddr{IP: addr.IP, Zone: addr.Zone}\n\t\ttr.Dial = a.dialer.Dial\n\t}\n}\n\n\/\/ KeepAlive returns a functional option which toggles KeepAlive\n\/\/ connections on the dialer and transport.\nfunc KeepAlive(keepalive bool) func(*Attacker) {\n\treturn func(a *Attacker) {\n\t\ttr := a.client.Transport.(*http.Transport)\n\t\ttr.DisableKeepAlives = !keepalive\n\t\tif !keepalive {\n\t\t\ta.dialer.KeepAlive = 0\n\t\t\ttr.Dial = a.dialer.Dial\n\t\t}\n\t}\n}\n\n\/\/ TLSConfig returns a functional option which sets the *tls.Config for a\n\/\/ Attacker to use with its requests.\nfunc TLSConfig(c *tls.Config) func(*Attacker) {\n\treturn func(a *Attacker) {\n\t\ttr := a.client.Transport.(*http.Transport)\n\t\ttr.TLSClientConfig = c\n\t}\n}\n\n\/\/ Attack reads its Targets from the passed Targeter and attacks them at\n\/\/ the rate specified for duration time. Results are put into the returned channel\n\/\/ as soon as they arrive.\nfunc (a *Attacker) Attack(tr Targeter, rate uint64, du time.Duration) chan *Result {\n\tresc := make(chan *Result)\n\tthrottle := time.NewTicker(time.Duration(1e9 \/ rate))\n\thits := rate * uint64(du.Seconds())\n\twrk := a.workers\n\tif wrk == 0 || wrk > hits {\n\t\twrk = hits\n\t}\n\tshare := hits \/ wrk\n\n\tvar wg sync.WaitGroup\n\tfor i := uint64(0); i < wrk; i++ {\n\t\twg.Add(1)\n\t\tgo func(share uint64) {\n\t\t\tdefer wg.Done()\n\t\t\tfor j := uint64(0); j < share; j++ {\n\t\t\t\tselect {\n\t\t\t\tcase <-throttle.C:\n\t\t\t\t\tresc <- a.hit(tr)\n\t\t\t\tcase <-a.stop:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(share)\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(resc)\n\t\tthrottle.Stop()\n\t}()\n\n\treturn resc\n}\n\n\/\/ Stop stops the current attack.\nfunc (a *Attacker) Stop() { close(a.stop) }\n\nfunc (a *Attacker) hit(tr Targeter) *Result {\n\tres := Result{Timestamp: time.Now()}\n\tdefer func() { res.Latency = time.Since(res.Timestamp) }()\n\n\ttgt, err := tr()\n\tif err != nil {\n\t\tres.Error = err.Error()\n\t\treturn &res\n\t}\n\n\treq, err := tgt.Request()\n\tif err != nil {\n\t\tres.Error = err.Error()\n\t\treturn &res\n\t}\n\n\tr, err := a.client.Do(req)\n\tif err != nil {\n\t\tres.Error = err.Error()\n\t\treturn &res\n\t}\n\tdefer r.Body.Close()\n\n\tres.BytesOut = uint64(req.ContentLength)\n\tres.Code = uint16(r.StatusCode)\n\tif body, err := ioutil.ReadAll(r.Body); err != nil {\n\t\tif res.Code < 200 || res.Code >= 400 {\n\t\t\tres.Error = string(body)\n\t\t}\n\t} else {\n\t\tres.BytesIn = uint64(len(body))\n\t}\n\treturn &res\n}\n<commit_msg>Use exact tick time for Timestamp<commit_after>package vegeta\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ Attacker is an attack executor which wraps an http.Client\ntype Attacker struct {\n\tdialer *net.Dialer\n\tclient http.Client\n\tstop chan struct{}\n\tworkers uint64\n}\n\nvar (\n\t\/\/ DefaultRedirects is the default number of times an Attacker follows\n\t\/\/ redirects.\n\tDefaultRedirects = 10\n\t\/\/ DefaultTimeout is the default amount of time an Attacker waits for a request\n\t\/\/ before it times out.\n\tDefaultTimeout = 30 * time.Second\n\t\/\/ DefaultLocalAddr is the default local IP address an Attacker uses.\n\tDefaultLocalAddr = net.IPAddr{IP: net.IPv4zero}\n\t\/\/ DefaultTLSConfig is the default tls.Config an Attacker uses.\n\tDefaultTLSConfig = &tls.Config{InsecureSkipVerify: true}\n)\n\n\/\/ NewAttacker returns a new Attacker with default options which are overridden\n\/\/ by the optionally provided opts.\nfunc NewAttacker(opts ...func(*Attacker)) *Attacker {\n\ta := &Attacker{stop: make(chan struct{})}\n\ta.dialer = &net.Dialer{\n\t\tLocalAddr: &net.TCPAddr{IP: DefaultLocalAddr.IP, Zone: DefaultLocalAddr.Zone},\n\t\tKeepAlive: 30 * time.Second,\n\t\tTimeout: DefaultTimeout,\n\t}\n\ta.client = http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tDial: a.dialer.Dial,\n\t\t\tResponseHeaderTimeout: DefaultTimeout,\n\t\t\tTLSClientConfig: DefaultTLSConfig,\n\t\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\t},\n\t}\n\tfor _, opt := range opts {\n\t\topt(a)\n\t}\n\treturn a\n}\n\n\/\/ Workers returns a functional option which sets the number of workers\n\/\/ an Attacker uses to hit its targets.\n\/\/ If zero or greater than the total number of hits, workers will be capped\n\/\/ to that maximum.\nfunc Workers(n uint64) func(*Attacker) {\n\treturn func(a *Attacker) { a.workers = n }\n}\n\n\/\/ Redirects returns a functional option which sets the maximum\n\/\/ number of redirects an Attacker will follow.\nfunc Redirects(n int) func(*Attacker) {\n\treturn func(a *Attacker) {\n\t\ta.client.CheckRedirect = func(_ *http.Request, via []*http.Request) error {\n\t\t\tif len(via) > n {\n\t\t\t\treturn fmt.Errorf(\"stopped after %d redirects\", n)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\n\/\/ Timeout returns a functional option which sets the maximum amount of time\n\/\/ an Attacker will wait for a request to be responded to.\nfunc Timeout(d time.Duration) func(*Attacker) {\n\treturn func(a *Attacker) {\n\t\ttr := a.client.Transport.(*http.Transport)\n\t\ttr.ResponseHeaderTimeout = d\n\t\ta.dialer.Timeout = d\n\t\ttr.Dial = a.dialer.Dial\n\t}\n}\n\n\/\/ LocalAddr returns a functional option which sets the local address\n\/\/ an Attacker will use with its requests.\nfunc LocalAddr(addr net.IPAddr) func(*Attacker) {\n\treturn func(a *Attacker) {\n\t\ttr := a.client.Transport.(*http.Transport)\n\t\ta.dialer.LocalAddr = &net.TCPAddr{IP: addr.IP, Zone: addr.Zone}\n\t\ttr.Dial = a.dialer.Dial\n\t}\n}\n\n\/\/ KeepAlive returns a functional option which toggles KeepAlive\n\/\/ connections on the dialer and transport.\nfunc KeepAlive(keepalive bool) func(*Attacker) {\n\treturn func(a *Attacker) {\n\t\ttr := a.client.Transport.(*http.Transport)\n\t\ttr.DisableKeepAlives = !keepalive\n\t\tif !keepalive {\n\t\t\ta.dialer.KeepAlive = 0\n\t\t\ttr.Dial = a.dialer.Dial\n\t\t}\n\t}\n}\n\n\/\/ TLSConfig returns a functional option which sets the *tls.Config for a\n\/\/ Attacker to use with its requests.\nfunc TLSConfig(c *tls.Config) func(*Attacker) {\n\treturn func(a *Attacker) {\n\t\ttr := a.client.Transport.(*http.Transport)\n\t\ttr.TLSClientConfig = c\n\t}\n}\n\n\/\/ Attack reads its Targets from the passed Targeter and attacks them at\n\/\/ the rate specified for duration time. Results are put into the returned channel\n\/\/ as soon as they arrive.\nfunc (a *Attacker) Attack(tr Targeter, rate uint64, du time.Duration) chan *Result {\n\tresc := make(chan *Result)\n\tthrottle := time.NewTicker(time.Duration(1e9 \/ rate))\n\thits := rate * uint64(du.Seconds())\n\twrk := a.workers\n\tif wrk == 0 || wrk > hits {\n\t\twrk = hits\n\t}\n\tshare := hits \/ wrk\n\n\tvar wg sync.WaitGroup\n\tfor i := uint64(0); i < wrk; i++ {\n\t\twg.Add(1)\n\t\tgo func(share uint64) {\n\t\t\tdefer wg.Done()\n\t\t\tfor j := uint64(0); j < share; j++ {\n\t\t\t\tselect {\n\t\t\t\tcase tm := <-throttle.C:\n\t\t\t\t\tresc <- a.hit(tr, tm)\n\t\t\t\tcase <-a.stop:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(share)\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(resc)\n\t\tthrottle.Stop()\n\t}()\n\n\treturn resc\n}\n\n\/\/ Stop stops the current attack.\nfunc (a *Attacker) Stop() { close(a.stop) }\n\nfunc (a *Attacker) hit(tr Targeter, tm time.Time) *Result {\n\tres := Result{Timestamp: tm}\n\tdefer func() { res.Latency = time.Since(tm) }()\n\n\ttgt, err := tr()\n\tif err != nil {\n\t\tres.Error = err.Error()\n\t\treturn &res\n\t}\n\n\treq, err := tgt.Request()\n\tif err != nil {\n\t\tres.Error = err.Error()\n\t\treturn &res\n\t}\n\n\tr, err := a.client.Do(req)\n\tif err != nil {\n\t\tres.Error = err.Error()\n\t\treturn &res\n\t}\n\tdefer r.Body.Close()\n\n\tres.BytesOut = uint64(req.ContentLength)\n\tres.Code = uint16(r.StatusCode)\n\tif body, err := ioutil.ReadAll(r.Body); err != nil {\n\t\tif res.Code < 200 || res.Code >= 400 {\n\t\t\tres.Error = string(body)\n\t\t}\n\t} else {\n\t\tres.BytesIn = uint64(len(body))\n\t}\n\treturn &res\n}\n<|endoftext|>"} {"text":"<commit_before>package sudoku\n\n\/\/ Holds the global Sudoku board state\n\n\/\/ Communication is handled by four major mechanics:\n\/\/ * A channel to distribute the current board state\n\/\/ * A channel to notify threads of updates\n\/\/ * A channel to update known & possible values\n\n\/\/ coord contains x and y elements for a given position\ntype coord struct {\n\tx int\n\ty int\n}\n\n\/\/ A cell holds all of the required knowledge for a cell.\n\/\/ It contains it's own address on the board (guarenteed unique)\n\/\/ if actual is set to 0, actual value is unknown\ntype cell struct {\n\tlocation coord\n\tactual int\n\tpossible []int\n}\n\nfunc createBoard(level int) (board map[coord]cell) {\n\tfor i := 0; i < level*level; i++ {\n\t\tfor j := 0; j < level*level; j++ {\n\t\t\tboard[coord{x:i, y:j}].location = coord{x:i, y:j}\n\t\t\tfor k := 1; k =< level*level; k++ {\n\t\t\t\tboard[coord{x:i, y:j}].possible = append(board[coord{x:i, y:j}].possible, k) \n\t\t\t}\n\t\t}\n\t}\n\treturn board\n}\n\n\/\/ The boardQueue is run in a go thread\n\/\/ It serves a given value to any requestor whenever asked, or recieves updates to the value to serve.\nfunc boardQueue(up, down chan map[coord]cell) {\n\tcurrentBoard := <-up\n\tvar done bool\n\tdefer close(down)\n\n\tfor {\n\t\tselect {\n\t\tcase currentBoard, done = <-up:\n\t\t\tif done {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase down <- currentBoard:\n\t\t}\n\t}\n}\n\n\/\/ The updateQueue holds an update and pushes it down stream.\n\/\/ It will not push the same update down more than once.\nfunc updateQueue(up, down chan coord) {\n\tvar push bool\n\tvar done bool\n\tvar val coord\n\tdefer close(down)\n\n\tfor {\n\t\tif push {\n\t\t\t\/\/ Otherwise wait to either push it down, or updates\n\t\t\tselect {\n\t\t\tcase val, done = <-up:\n\t\t\t\tif done {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tpush = true\n\t\t\tcase down <- val:\n\t\t\t\tpush = false\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ If it's been pushed, wait for upstream.\n\t\t\tval, done = <-up\n\t\t\tif done {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpush = true\n\t\t}\n\t}\n}\n\nfunc broadcastUpdate(up chan coord, childs []chan coord) {\n\tvar val coord\n\tvar done bool\n\n\t\/\/ clean up downstream channels\n\tdefer func() {\n\t\tfor _, each := range childs {\n\t\t\tclose(each)\n\t\t}\n\t}()\n\n\tfor {\n\t\tval, done = <-up\n\t\tif done {\n\t\t\treturn\n\t\t}\n\t\tfor _, each := range childs {\n\t\t\teach <- val\n\t\t}\n\t}\n}\n<commit_msg>added things required to ort a cluster<commit_after>package sudoku\n\n\/\/ Holds the global Sudoku board state\n\n\/\/ Communication is handled by four major mechanics:\n\/\/ * A channel to distribute the current board state\n\/\/ * A channel to notify threads of updates\n\/\/ * A channel to update known & possible values\n\n\/\/ coord contains x and y elements for a given position\ntype coord struct {\n\tx int\n\ty int\n}\n\n\/\/ A cell holds all of the required knowledge for a cell.\n\/\/ It contains it's own address on the board (guarenteed unique)\n\/\/ if actual is set to 0, actual value is unknown\ntype cell struct {\n\tlocation coord\n\tactual int\n\tpossible []int\n}\n\ntype Cluster []cell\n\n\/*\ntype Cluster interface {\n\tLen() int\n\tSwap(i, j int)\n\tLess(i, j int)\n}\n*\/\n\nfunc (c Cluster) Len() int {\n\treturn len(c)\n}\nfunc (c Cluster) Swap(i, j int) {\n\tc[i], c[j] = c[j], c[i]\n}\nfunc (c Cluster) Less(i, j int) bool {\n\tif c[i].x < c[j].x {\n\t\treturn true\n\t}\n\tif c[i].x == c[j].x && c[i].y < c[j].y {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc createBoard(level int) (board map[coord]cell) {\n\tfor i := 0; i < level*level; i++ {\n\t\tfor j := 0; j < level*level; j++ {\n\t\t\tboard[coord{x: i, y: j}].location = coord{x: i, y: j}\n\t\t\tfor k := 1; k <= level*level; k++ {\n\t\t\t\tboard[coord{x: i, y: j}].possible = append(board[coord{x: i, y: j}].possible, k)\n\t\t\t}\n\t\t}\n\t}\n\treturn board\n}\n\n\/\/ The boardQueue is run in a go thread\n\/\/ It serves a given value to any requestor whenever asked, or recieves updates to the value to serve.\nfunc boardQueue(up, down chan map[coord]cell) {\n\tcurrentBoard := <-up\n\tvar done bool\n\tdefer close(down)\n\n\tfor {\n\t\tselect {\n\t\tcase currentBoard, done = <-up:\n\t\t\tif done {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase down <- currentBoard:\n\t\t}\n\t}\n}\n\n\/\/ The updateQueue holds an update and pushes it down stream.\n\/\/ It will not push the same update down more than once.\nfunc updateQueue(up, down chan coord) {\n\tvar push bool\n\tvar done bool\n\tvar val coord\n\tdefer close(down)\n\n\tfor {\n\t\tif push {\n\t\t\t\/\/ Otherwise wait to either push it down, or updates\n\t\t\tselect {\n\t\t\tcase val, done = <-up:\n\t\t\t\tif done {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tpush = true\n\t\t\tcase down <- val:\n\t\t\t\tpush = false\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ If it's been pushed, wait for upstream.\n\t\t\tval, done = <-up\n\t\t\tif done {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpush = true\n\t\t}\n\t}\n}\n\nfunc broadcastUpdate(up chan coord, childs []chan coord) {\n\tvar val coord\n\tvar done bool\n\n\t\/\/ clean up downstream channels\n\tdefer func() {\n\t\tfor _, each := range childs {\n\t\t\tclose(each)\n\t\t}\n\t}()\n\n\tfor {\n\t\tval, done = <-up\n\t\tif done {\n\t\t\treturn\n\t\t}\n\t\tfor _, each := range childs {\n\t\t\teach <- val\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 Mirantis\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e\n\nimport (\n\t\"time\"\n\n\t. \"github.com\/onsi\/gomega\"\n\t\"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\t\"github.com\/Mirantis\/virtlet\/tests\/e2e\/framework\"\n\t. \"github.com\/Mirantis\/virtlet\/tests\/e2e\/ginkgo-ext\"\n)\n\nfunc describeInjectingFiles(what, source, podName string, createSource, delSource func() error) {\n\tContext(\"VM with files modified by content from a \"+what, func() {\n\t\tvar vm *framework.VMInterface\n\n\t\tBeforeAll(func() {\n\t\t\tExpect(createSource()).To(Succeed())\n\n\t\t\tvm = controller.VM(podName)\n\t\t\tExpect(vm.CreateAndWait(VMOptions{\n\t\t\t\tInjectFilesToRootfsFrom: source,\n\t\t\t}.ApplyDefaults(), time.Minute*5, nil)).To(Succeed())\n\t\t})\n\n\t\tAfterAll(func() {\n\t\t\tdeleteVM(vm)\n\t\t\tExpect(delSource()).To(Succeed())\n\t\t})\n\n\t\tIt(\"should put files onto the rootfs [Conformance]\", func() {\n\t\t\tssh := waitSSH(vm)\n\t\t\tExpect(framework.RunSimple(ssh, \"cat\", \"\/tmp\/test-file1\", \"\/tmp\/test-file2\")).To(Equal(\"Hello World!\"))\n\t\t})\n\t})\n}\n\nvar _ = Describe(\"Injecting files into rootfs\", func() {\n\ttestData := map[string]string{\n\t\t\/\/ plain encoding\n\t\t\"test_file1\": \"Hello \",\n\t\t\"test_file1_path\": \"\/tmp\/test-file1\",\n\t\t\"test_file1_encoding\": \"plain\",\n\t\t\/\/ base64 encoding - encoded string: \"World!\"\n\t\t\"test_file2\": \"V29ybGQh\",\n\t\t\"test_file2_path\": \"\/tmp\/test-file2\",\n\t}\n\n\tdescribeInjectingFiles(\"ConfigMap\", \"configmap\/files\", \"rootfs-cm\", func() error {\n\t\tcm := &v1.ConfigMap{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"files\",\n\t\t\t},\n\t\t\tData: testData,\n\t\t}\n\t\t_, err := controller.ConfigMaps().Create(cm)\n\t\treturn err\n\t}, func() error {\n\t\treturn controller.ConfigMaps().Delete(\"files\", nil)\n\t})\n\n\tdescribeInjectingFiles(\"Secret\", \"secret\/files\", \"rootfs-secret\", func() error {\n\t\tsecret := &v1.Secret{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"files\",\n\t\t\t},\n\t\t\tStringData: testData,\n\t\t}\n\t\t_, err := controller.Secrets().Create(secret)\n\t\treturn err\n\t}, func() error {\n\t\treturn controller.Secrets().Delete(\"files\", nil)\n\t})\n})\n<commit_msg>Fix inject to rootfs e2e test<commit_after>\/*\nCopyright 2018 Mirantis\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e\n\nimport (\n\t\"time\"\n\n\t. \"github.com\/onsi\/gomega\"\n\t\"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\t\"github.com\/Mirantis\/virtlet\/tests\/e2e\/framework\"\n\t. \"github.com\/Mirantis\/virtlet\/tests\/e2e\/ginkgo-ext\"\n)\n\nfunc describeInjectingFiles(what, source, podName string, createSource, delSource func() error) {\n\tContext(\"VM with files modified by content from a \"+what, func() {\n\t\tvar vm *framework.VMInterface\n\n\t\tBeforeAll(func() {\n\t\t\tExpect(createSource()).To(Succeed())\n\n\t\t\tvm = controller.VM(podName)\n\t\t\tExpect(vm.CreateAndWait(VMOptions{\n\t\t\t\tInjectFilesToRootfsFrom: source,\n\t\t\t}.ApplyDefaults(), time.Minute*5, nil)).To(Succeed())\n\t\t})\n\n\t\tAfterAll(func() {\n\t\t\tdeleteVM(vm)\n\t\t\tExpect(delSource()).To(Succeed())\n\t\t})\n\n\t\tIt(\"should put files onto the rootfs [Conformance]\", func() {\n\t\t\tssh := waitSSH(vm)\n\t\t\tExpect(framework.RunSimple(ssh, \"cat\", \"\/test-file1\", \"\/test-file2\")).To(Equal(\"Hello World!\"))\n\t\t})\n\t})\n}\n\nvar _ = Describe(\"Injecting files into rootfs\", func() {\n\ttestData := map[string]string{\n\t\t\/\/ plain encoding\n\t\t\"test_file1\": \"Hello \",\n\t\t\"test_file1_path\": \"\/test-file1\",\n\t\t\"test_file1_encoding\": \"plain\",\n\t\t\/\/ base64 encoding - encoded string: \"World!\"\n\t\t\"test_file2\": \"V29ybGQh\",\n\t\t\"test_file2_path\": \"\/test-file2\",\n\t}\n\n\tdescribeInjectingFiles(\"ConfigMap\", \"configmap\/files\", \"rootfs-cm\", func() error {\n\t\tcm := &v1.ConfigMap{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"files\",\n\t\t\t},\n\t\t\tData: testData,\n\t\t}\n\t\t_, err := controller.ConfigMaps().Create(cm)\n\t\treturn err\n\t}, func() error {\n\t\treturn controller.ConfigMaps().Delete(\"files\", nil)\n\t})\n\n\tdescribeInjectingFiles(\"Secret\", \"secret\/files\", \"rootfs-secret\", func() error {\n\t\tsecret := &v1.Secret{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"files\",\n\t\t\t},\n\t\t\tStringData: testData,\n\t\t}\n\t\t_, err := controller.Secrets().Create(secret)\n\t\treturn err\n\t}, func() error {\n\t\treturn controller.Secrets().Delete(\"files\", nil)\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package client\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"time\"\n\n\t\"strings\"\n\n\t\"github.com\/ncodes\/cocoon\/core\/orderer\"\n\tproto \"github.com\/ncodes\/cocoon\/core\/runtime\/golang\/proto\"\n\t\"github.com\/ncodes\/cocoon\/core\/types\"\n\tlogging \"github.com\/op\/go-logging\"\n\tcmap \"github.com\/orcaman\/concurrent-map\"\n\tcontext \"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n)\n\nvar log = logging.MustGetLogger(\"connector.client\")\n\n\/\/ txChannels holds the channels to send transaction responses to\nvar txRespChannels = cmap.New()\n\n\/\/ Client represents a cocoon code GRPC client\n\/\/ that interacts with a cocoon code.\ntype Client struct {\n\tccodeAddr string\n\tstub proto.StubClient\n\tconCtx context.Context\n\tconCancel context.CancelFunc\n\torderDiscoTicker *time.Ticker\n\tordererAddrs []string\n\tstream proto.Stub_TransactClient\n\tcocoonID string\n}\n\n\/\/ NewClient creates a new cocoon code client\nfunc NewClient() *Client {\n\treturn &Client{}\n}\n\n\/\/ SetCocoonID sets the cocoon id\nfunc (c *Client) SetCocoonID(id string) {\n\tc.cocoonID = id\n}\n\n\/\/ SetCocoonCodeAddr sets the cocoon code bind address\nfunc (c *Client) SetCocoonCodeAddr(ccAddr string) {\n\tc.ccodeAddr = ccAddr\n}\n\n\/\/ getCCAddr returns the cocoon code bind address.\n\/\/ For development, if DEV_COCOON_ADDR is set, it connects to it.\nfunc (c *Client) getCCAddr() string {\n\tif devCCodeAddr := os.Getenv(\"DEV_COCOON_ADDR\"); len(devCCodeAddr) > 0 {\n\t\treturn devCCodeAddr\n\t}\n\treturn c.ccodeAddr\n}\n\n\/\/ Close the stream and cancel connections\nfunc (c *Client) Close() {\n\tif c.stream != nil {\n\t\tc.stream.CloseSend()\n\t}\n\tif c.conCancel != nil {\n\t\tc.conCancel()\n\t}\n}\n\n\/\/ GetStream returns the grpc stream connected to the grpc cocoon code server\nfunc (c *Client) GetStream() proto.Stub_TransactClient {\n\treturn c.stream\n}\n\n\/\/ Connect connects to a cocoon code server\n\/\/ running on a known port\nfunc (c *Client) Connect() error {\n\n\tlog.Info(\"Starting cocoon code client\")\n\n\t\/\/ start a ticker to continously discover orderer addreses\n\tgo func() {\n\t\tc.orderDiscoTicker = time.NewTicker(60 * time.Second)\n\t\tfor _ = range c.orderDiscoTicker.C {\n\t\t\tvar ordererAddrs []string\n\t\t\tordererAddrs, err := orderer.DiscoverOrderers()\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.ordererAddrs = ordererAddrs\n\t\t}\n\t}()\n\n\tvar ordererAddrs []string\n\tordererAddrs, err := orderer.DiscoverOrderers()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.ordererAddrs = ordererAddrs\n\n\tif len(c.ordererAddrs) > 0 {\n\t\tlog.Infof(\"Orderer address list updated. Contains %d orderer address(es)\", len(c.ordererAddrs))\n\t} else {\n\t\tlog.Warning(\"No orderer address was found. We won't be able to reach the orderer. \")\n\t}\n\n\tconn, err := grpc.Dial(c.getCCAddr(), grpc.WithInsecure())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to connect to cocoon code server. %s\", err)\n\t}\n\tdefer conn.Close()\n\n\tlog.Debugf(\"Now connected to cocoon code at port=%s\", strings.Split(c.getCCAddr(), \":\")[1])\n\n\tc.stub = proto.NewStubClient(conn)\n\n\t\/\/ if err = c.Do(conn); err != nil {\n\t\/\/ \treturn err\n\t\/\/ }\n\ts := make(chan bool)\n\t<-s\n\n\treturn nil\n}\n\n\/\/ Do starts a request loop that continously asks the\n\/\/ server for transactions. When it receives a transaction,\n\/\/ it processes it and returns the result.\nfunc (c *Client) Do(conn *grpc.ClientConn) error {\n\n\tvar err error\n\n\t\/\/ create a context so we have complete controll of the connection\n\tc.conCtx, c.conCancel = context.WithCancel(context.Background())\n\n\t\/\/ connect to the cocoon code\n\tc.stream, err = c.stub.Transact(c.conCtx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to start transaction stream with cocoon code. %s\", err)\n\t}\n\n\tfor {\n\n\t\tin, err := c.stream.Recv()\n\t\tif err == io.EOF {\n\t\t\treturn fmt.Errorf(\"connection with cocoon code has ended\")\n\t\t}\n\t\tif err != nil {\n\t\t\tif strings.Contains(err.Error(), \"context canceled\") {\n\t\t\t\tlog.Info(\"Connection to cocoon code closed\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"failed to read message from cocoon code. %s\", err)\n\t\t}\n\n\t\tswitch in.Invoke {\n\t\tcase true:\n\t\t\tgo func() {\n\t\t\t\tlog.Debugf(\"New invoke transaction (%s) from cocoon code\", in.GetId())\n\t\t\t\tif err = c.handleInvokeTransaction(in); err != nil {\n\t\t\t\t\tlog.Error(err.Error())\n\t\t\t\t\tc.stream.Send(&proto.Tx{\n\t\t\t\t\t\tResponse: true,\n\t\t\t\t\t\tId: in.GetId(),\n\t\t\t\t\t\tStatus: 500,\n\t\t\t\t\t\tBody: []byte(err.Error()),\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}()\n\t\tcase false:\n\t\t\tlog.Debugf(\"New response transaction (%s) from cocoon code\", in.GetId())\n\t\t\tgo func() {\n\t\t\t\tif err = c.handleRespTransaction(in); err != nil {\n\t\t\t\t\tlog.Error(err.Error())\n\t\t\t\t\tc.stream.Send(&proto.Tx{\n\t\t\t\t\t\tResponse: true,\n\t\t\t\t\t\tId: in.GetId(),\n\t\t\t\t\t\tStatus: 500,\n\t\t\t\t\t\tBody: []byte(err.Error()),\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n}\n\n\/\/ handleInvokeTransaction processes invoke transaction requests\nfunc (c *Client) handleInvokeTransaction(tx *proto.Tx) error {\n\tswitch tx.GetName() {\n\tcase types.TxCreateLedger:\n\t\treturn c.createLedger(tx)\n\tcase types.TxPut:\n\t\treturn c.put(tx)\n\tcase types.TxGetLedger:\n\t\treturn c.getLedger(tx)\n\tcase types.TxGet:\n\t\treturn c.get(tx, false)\n\tcase types.TxGetByID:\n\t\treturn c.get(tx, true)\n\tcase types.TxGetBlockByID:\n\t\treturn c.getBlock(tx)\n\tcase types.TxRangeGet:\n\t\treturn c.getRange(tx)\n\tdefault:\n\t\treturn c.stream.Send(&proto.Tx{\n\t\t\tId: tx.GetId(),\n\t\t\tResponse: true,\n\t\t\tStatus: 500,\n\t\t\tBody: []byte(fmt.Sprintf(\"unsupported transaction name (%s)\", tx.GetName())),\n\t\t})\n\t}\n}\n\n\/\/ handleRespTransaction passes the transaction to a response\n\/\/ channel with a matching transaction id and deletes the channel afterwards.\nfunc (c *Client) handleRespTransaction(tx *proto.Tx) error {\n\tif !txRespChannels.Has(tx.GetId()) {\n\t\treturn fmt.Errorf(\"response transaction (%s) does not have a corresponding response channel\", tx.GetId())\n\t}\n\n\ttxRespCh, _ := txRespChannels.Get(tx.GetId())\n\ttxRespCh.(chan *proto.Tx) <- tx\n\ttxRespChannels.Remove(tx.GetId())\n\treturn nil\n}\n\n\/\/ SendTx sends a transaction to the cocoon code\n\/\/ and saves the response channel. The response channel will\n\/\/ be passed a response when it is available in the Transact loop.\nfunc (c *Client) SendTx(tx *proto.Tx, respCh chan *proto.Tx) error {\n\ttxRespChannels.Set(tx.GetId(), respCh)\n\tif err := c.stream.Send(tx); err != nil {\n\t\ttxRespChannels.Remove(tx.GetId())\n\t\treturn err\n\t}\n\tlog.Debugf(\"Successfully sent transaction (%s) to cocoon code\", tx.GetId())\n\treturn nil\n}\n<commit_msg>debug<commit_after>package client\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"time\"\n\n\t\"strings\"\n\n\t\"github.com\/ncodes\/cocoon\/core\/orderer\"\n\tproto \"github.com\/ncodes\/cocoon\/core\/runtime\/golang\/proto\"\n\t\"github.com\/ncodes\/cocoon\/core\/types\"\n\tlogging \"github.com\/op\/go-logging\"\n\tcmap \"github.com\/orcaman\/concurrent-map\"\n\tcontext \"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n)\n\nvar log = logging.MustGetLogger(\"connector.client\")\n\n\/\/ txChannels holds the channels to send transaction responses to\nvar txRespChannels = cmap.New()\n\n\/\/ Client represents a cocoon code GRPC client\n\/\/ that interacts with a cocoon code.\ntype Client struct {\n\tccodeAddr string\n\tstub proto.StubClient\n\tconCtx context.Context\n\tconCancel context.CancelFunc\n\torderDiscoTicker *time.Ticker\n\tordererAddrs []string\n\tstream proto.Stub_TransactClient\n\tcocoonID string\n}\n\n\/\/ NewClient creates a new cocoon code client\nfunc NewClient() *Client {\n\treturn &Client{}\n}\n\n\/\/ SetCocoonID sets the cocoon id\nfunc (c *Client) SetCocoonID(id string) {\n\tc.cocoonID = id\n}\n\n\/\/ SetCocoonCodeAddr sets the cocoon code bind address\nfunc (c *Client) SetCocoonCodeAddr(ccAddr string) {\n\tc.ccodeAddr = ccAddr\n}\n\n\/\/ getCCAddr returns the cocoon code bind address.\n\/\/ For development, if DEV_COCOON_ADDR is set, it connects to it.\nfunc (c *Client) getCCAddr() string {\n\tif devCCodeAddr := os.Getenv(\"DEV_COCOON_ADDR\"); len(devCCodeAddr) > 0 {\n\t\treturn devCCodeAddr\n\t}\n\treturn c.ccodeAddr\n}\n\n\/\/ Close the stream and cancel connections\nfunc (c *Client) Close() {\n\tif c.stream != nil {\n\t\tc.stream.CloseSend()\n\t}\n\tif c.conCancel != nil {\n\t\tc.conCancel()\n\t}\n}\n\n\/\/ GetStream returns the grpc stream connected to the grpc cocoon code server\nfunc (c *Client) GetStream() proto.Stub_TransactClient {\n\treturn c.stream\n}\n\n\/\/ Connect connects to a cocoon code server\n\/\/ running on a known port\nfunc (c *Client) Connect() error {\n\n\tlog.Info(\"Starting cocoon code client\")\n\n\t\/\/ start a ticker to continously discover orderer addreses\n\tgo func() {\n\t\tc.orderDiscoTicker = time.NewTicker(60 * time.Second)\n\t\tfor _ = range c.orderDiscoTicker.C {\n\t\t\tvar ordererAddrs []string\n\t\t\tordererAddrs, err := orderer.DiscoverOrderers()\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.ordererAddrs = ordererAddrs\n\t\t}\n\t}()\n\n\tvar ordererAddrs []string\n\tordererAddrs, err := orderer.DiscoverOrderers()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.ordererAddrs = ordererAddrs\n\n\tif len(c.ordererAddrs) > 0 {\n\t\tlog.Infof(\"Orderer address list updated. Contains %d orderer address(es)\", len(c.ordererAddrs))\n\t} else {\n\t\tlog.Warning(\"No orderer address was found. We won't be able to reach the orderer. \")\n\t}\n\n\tconn, err := grpc.Dial(c.getCCAddr(), grpc.WithInsecure())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to connect to cocoon code server. %s\", err)\n\t}\n\tdefer conn.Close()\n\n\tlog.Debugf(\"Now connected to cocoon code at port=%s\", strings.Split(c.getCCAddr(), \":\")[1])\n\n\t\/\/ c.stub = proto.NewStubClient(conn)\n\n\t\/\/ if err = c.Do(conn); err != nil {\n\t\/\/ \treturn err\n\t\/\/ }\n\ts := make(chan bool)\n\t<-s\n\n\treturn nil\n}\n\n\/\/ Do starts a request loop that continously asks the\n\/\/ server for transactions. When it receives a transaction,\n\/\/ it processes it and returns the result.\nfunc (c *Client) Do(conn *grpc.ClientConn) error {\n\n\tvar err error\n\n\t\/\/ create a context so we have complete controll of the connection\n\tc.conCtx, c.conCancel = context.WithCancel(context.Background())\n\n\t\/\/ connect to the cocoon code\n\tc.stream, err = c.stub.Transact(c.conCtx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to start transaction stream with cocoon code. %s\", err)\n\t}\n\n\tfor {\n\n\t\tin, err := c.stream.Recv()\n\t\tif err == io.EOF {\n\t\t\treturn fmt.Errorf(\"connection with cocoon code has ended\")\n\t\t}\n\t\tif err != nil {\n\t\t\tif strings.Contains(err.Error(), \"context canceled\") {\n\t\t\t\tlog.Info(\"Connection to cocoon code closed\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"failed to read message from cocoon code. %s\", err)\n\t\t}\n\n\t\tswitch in.Invoke {\n\t\tcase true:\n\t\t\tgo func() {\n\t\t\t\tlog.Debugf(\"New invoke transaction (%s) from cocoon code\", in.GetId())\n\t\t\t\tif err = c.handleInvokeTransaction(in); err != nil {\n\t\t\t\t\tlog.Error(err.Error())\n\t\t\t\t\tc.stream.Send(&proto.Tx{\n\t\t\t\t\t\tResponse: true,\n\t\t\t\t\t\tId: in.GetId(),\n\t\t\t\t\t\tStatus: 500,\n\t\t\t\t\t\tBody: []byte(err.Error()),\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}()\n\t\tcase false:\n\t\t\tlog.Debugf(\"New response transaction (%s) from cocoon code\", in.GetId())\n\t\t\tgo func() {\n\t\t\t\tif err = c.handleRespTransaction(in); err != nil {\n\t\t\t\t\tlog.Error(err.Error())\n\t\t\t\t\tc.stream.Send(&proto.Tx{\n\t\t\t\t\t\tResponse: true,\n\t\t\t\t\t\tId: in.GetId(),\n\t\t\t\t\t\tStatus: 500,\n\t\t\t\t\t\tBody: []byte(err.Error()),\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n}\n\n\/\/ handleInvokeTransaction processes invoke transaction requests\nfunc (c *Client) handleInvokeTransaction(tx *proto.Tx) error {\n\tswitch tx.GetName() {\n\tcase types.TxCreateLedger:\n\t\treturn c.createLedger(tx)\n\tcase types.TxPut:\n\t\treturn c.put(tx)\n\tcase types.TxGetLedger:\n\t\treturn c.getLedger(tx)\n\tcase types.TxGet:\n\t\treturn c.get(tx, false)\n\tcase types.TxGetByID:\n\t\treturn c.get(tx, true)\n\tcase types.TxGetBlockByID:\n\t\treturn c.getBlock(tx)\n\tcase types.TxRangeGet:\n\t\treturn c.getRange(tx)\n\tdefault:\n\t\treturn c.stream.Send(&proto.Tx{\n\t\t\tId: tx.GetId(),\n\t\t\tResponse: true,\n\t\t\tStatus: 500,\n\t\t\tBody: []byte(fmt.Sprintf(\"unsupported transaction name (%s)\", tx.GetName())),\n\t\t})\n\t}\n}\n\n\/\/ handleRespTransaction passes the transaction to a response\n\/\/ channel with a matching transaction id and deletes the channel afterwards.\nfunc (c *Client) handleRespTransaction(tx *proto.Tx) error {\n\tif !txRespChannels.Has(tx.GetId()) {\n\t\treturn fmt.Errorf(\"response transaction (%s) does not have a corresponding response channel\", tx.GetId())\n\t}\n\n\ttxRespCh, _ := txRespChannels.Get(tx.GetId())\n\ttxRespCh.(chan *proto.Tx) <- tx\n\ttxRespChannels.Remove(tx.GetId())\n\treturn nil\n}\n\n\/\/ SendTx sends a transaction to the cocoon code\n\/\/ and saves the response channel. The response channel will\n\/\/ be passed a response when it is available in the Transact loop.\nfunc (c *Client) SendTx(tx *proto.Tx, respCh chan *proto.Tx) error {\n\ttxRespChannels.Set(tx.GetId(), respCh)\n\tif err := c.stream.Send(tx); err != nil {\n\t\ttxRespChannels.Remove(tx.GetId())\n\t\treturn err\n\t}\n\tlog.Debugf(\"Successfully sent transaction (%s) to cocoon code\", tx.GetId())\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package gitmedia\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nconst Version = \"0.2.0\"\n\nvar (\n\tLargeSizeThreshold = 5 * 1024 * 1024\n\tTempDir = filepath.Join(os.TempDir(), \"git-media\")\n\tLocalWorkingDir string\n\tLocalGitDir string\n\tLocalMediaDir string\n\tLocalLogDir string\n\tcheckedTempDir string\n)\n\nfunc TempFile(prefix string) (*os.File, error) {\n\tif checkedTempDir != TempDir {\n\t\tif err := os.MkdirAll(TempDir, 0774); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcheckedTempDir = TempDir\n\t}\n\n\treturn ioutil.TempFile(TempDir, prefix)\n}\n\nfunc ResetTempDir() error {\n\tcheckedTempDir = \"\"\n\treturn os.RemoveAll(TempDir)\n}\n\nfunc LocalMediaPath(sha string) (string, error) {\n\tpath := filepath.Join(LocalMediaDir, sha[0:2], sha[2:4])\n\tif err := os.MkdirAll(path, 0744); err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error trying to create local media directory in '%s': %s\", path, err)\n\t}\n\n\treturn filepath.Join(path, sha), nil\n}\n\nfunc Environ() []string {\n\tosEnviron := os.Environ()\n\tenv := make([]string, 4, len(osEnviron)+4)\n\tenv[0] = fmt.Sprintf(\"LocalWorkingDir=%s\", LocalWorkingDir)\n\tenv[1] = fmt.Sprintf(\"LocalGitDir=%s\", LocalGitDir)\n\tenv[2] = fmt.Sprintf(\"LocalMediaDir=%s\", LocalMediaDir)\n\tenv[3] = fmt.Sprintf(\"TempDir=%s\", TempDir)\n\n\tfor _, e := range osEnviron {\n\t\tif !strings.Contains(e, \"GIT_\") {\n\t\t\tcontinue\n\t\t}\n\t\tenv = append(env, e)\n\t}\n\n\treturn env\n}\n\nfunc InRepo() bool {\n\treturn LocalWorkingDir != \"\"\n}\n\nfunc init() {\n\tvar err error\n\tLocalWorkingDir, LocalGitDir, err = resolveGitDir()\n\tif err == nil {\n\t\tLocalMediaDir = filepath.Join(LocalGitDir, \"media\")\n\t\tLocalLogDir = filepath.Join(LocalMediaDir, \"logs\")\n\t\tTempDir = filepath.Join(LocalMediaDir, \"tmp\")\n\t\tqueueDir = setupQueueDir()\n\n\t\tif err := os.MkdirAll(TempDir, 0744); err != nil {\n\t\t\tpanic(fmt.Errorf(\"Error trying to create temp directory in '%s': %s\", TempDir, err))\n\t\t}\n\t}\n}\n\nfunc resolveGitDir() (string, string, error) {\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn recursiveResolveGitDir(wd)\n}\n\nfunc recursiveResolveGitDir(dir string) (string, string, error) {\n\tvar cleanDir = filepath.Clean(dir)\n\tif cleanDir[len(cleanDir)-1] == os.PathSeparator {\n\t\treturn \"\", \"\", fmt.Errorf(\"Git repository not found\")\n\t}\n\n\tif filepath.Base(dir) == gitExt {\n\t\treturn filepath.Dir(dir), dir, nil\n\t}\n\n\tgitDir := filepath.Join(dir, gitExt)\n\tif info, err := os.Stat(gitDir); err == nil {\n\t\tif info.IsDir() {\n\t\t\treturn dir, gitDir, nil\n\t\t} else {\n\t\t\treturn processDotGitFile(gitDir)\n\t\t}\n\t}\n\n\treturn recursiveResolveGitDir(filepath.Dir(dir))\n}\n\nfunc processDotGitFile(file string) (string, string, error) {\n\tf, err := os.Open(file)\n\tdefer f.Close()\n\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tdata := make([]byte, 512)\n\tn, err := f.Read(data)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tcontents := string(data[0:n])\n\twd, _ := os.Getwd()\n\tif strings.HasPrefix(contents, gitPtrPrefix) {\n\t\tdir := strings.TrimSpace(strings.Split(contents, gitPtrPrefix)[1])\n\t\tabsDir, _ := filepath.Abs(dir)\n\t\treturn wd, absDir, nil\n\t}\n\n\treturn wd, \"\", nil\n}\n\nconst (\n\tgitExt = \".git\"\n\tgitPtrPrefix = \"gitdir: \"\n)\n<commit_msg>forgot to push the actual version bump<commit_after>package gitmedia\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nconst Version = \"0.2.1\"\n\nvar (\n\tLargeSizeThreshold = 5 * 1024 * 1024\n\tTempDir = filepath.Join(os.TempDir(), \"git-media\")\n\tLocalWorkingDir string\n\tLocalGitDir string\n\tLocalMediaDir string\n\tLocalLogDir string\n\tcheckedTempDir string\n)\n\nfunc TempFile(prefix string) (*os.File, error) {\n\tif checkedTempDir != TempDir {\n\t\tif err := os.MkdirAll(TempDir, 0774); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcheckedTempDir = TempDir\n\t}\n\n\treturn ioutil.TempFile(TempDir, prefix)\n}\n\nfunc ResetTempDir() error {\n\tcheckedTempDir = \"\"\n\treturn os.RemoveAll(TempDir)\n}\n\nfunc LocalMediaPath(sha string) (string, error) {\n\tpath := filepath.Join(LocalMediaDir, sha[0:2], sha[2:4])\n\tif err := os.MkdirAll(path, 0744); err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error trying to create local media directory in '%s': %s\", path, err)\n\t}\n\n\treturn filepath.Join(path, sha), nil\n}\n\nfunc Environ() []string {\n\tosEnviron := os.Environ()\n\tenv := make([]string, 4, len(osEnviron)+4)\n\tenv[0] = fmt.Sprintf(\"LocalWorkingDir=%s\", LocalWorkingDir)\n\tenv[1] = fmt.Sprintf(\"LocalGitDir=%s\", LocalGitDir)\n\tenv[2] = fmt.Sprintf(\"LocalMediaDir=%s\", LocalMediaDir)\n\tenv[3] = fmt.Sprintf(\"TempDir=%s\", TempDir)\n\n\tfor _, e := range osEnviron {\n\t\tif !strings.Contains(e, \"GIT_\") {\n\t\t\tcontinue\n\t\t}\n\t\tenv = append(env, e)\n\t}\n\n\treturn env\n}\n\nfunc InRepo() bool {\n\treturn LocalWorkingDir != \"\"\n}\n\nfunc init() {\n\tvar err error\n\tLocalWorkingDir, LocalGitDir, err = resolveGitDir()\n\tif err == nil {\n\t\tLocalMediaDir = filepath.Join(LocalGitDir, \"media\")\n\t\tLocalLogDir = filepath.Join(LocalMediaDir, \"logs\")\n\t\tTempDir = filepath.Join(LocalMediaDir, \"tmp\")\n\t\tqueueDir = setupQueueDir()\n\n\t\tif err := os.MkdirAll(TempDir, 0744); err != nil {\n\t\t\tpanic(fmt.Errorf(\"Error trying to create temp directory in '%s': %s\", TempDir, err))\n\t\t}\n\t}\n}\n\nfunc resolveGitDir() (string, string, error) {\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn recursiveResolveGitDir(wd)\n}\n\nfunc recursiveResolveGitDir(dir string) (string, string, error) {\n\tvar cleanDir = filepath.Clean(dir)\n\tif cleanDir[len(cleanDir)-1] == os.PathSeparator {\n\t\treturn \"\", \"\", fmt.Errorf(\"Git repository not found\")\n\t}\n\n\tif filepath.Base(dir) == gitExt {\n\t\treturn filepath.Dir(dir), dir, nil\n\t}\n\n\tgitDir := filepath.Join(dir, gitExt)\n\tif info, err := os.Stat(gitDir); err == nil {\n\t\tif info.IsDir() {\n\t\t\treturn dir, gitDir, nil\n\t\t} else {\n\t\t\treturn processDotGitFile(gitDir)\n\t\t}\n\t}\n\n\treturn recursiveResolveGitDir(filepath.Dir(dir))\n}\n\nfunc processDotGitFile(file string) (string, string, error) {\n\tf, err := os.Open(file)\n\tdefer f.Close()\n\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tdata := make([]byte, 512)\n\tn, err := f.Read(data)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tcontents := string(data[0:n])\n\twd, _ := os.Getwd()\n\tif strings.HasPrefix(contents, gitPtrPrefix) {\n\t\tdir := strings.TrimSpace(strings.Split(contents, gitPtrPrefix)[1])\n\t\tabsDir, _ := filepath.Abs(dir)\n\t\treturn wd, absDir, nil\n\t}\n\n\treturn wd, \"\", nil\n}\n\nconst (\n\tgitExt = \".git\"\n\tgitPtrPrefix = \"gitdir: \"\n)\n<|endoftext|>"} {"text":"<commit_before>package model\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/go-sql-driver\/mysql\"\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/oinume\/lekcije\/server\/errors\"\n)\n\ntype User struct {\n\tID uint32 `gorm:\"primary_key;AUTO_INCREMENT\"`\n\tName string\n\tEmail string\n\tEmailVerified bool\n\tPlanID uint8\n\tFollowedTeacherAt mysql.NullTime\n\tCreatedAt time.Time\n\tUpdatedAt time.Time\n}\n\nfunc (*User) TableName() string {\n\treturn \"user\"\n}\n\ntype UserService struct {\n\tdb *gorm.DB\n}\n\nfunc NewUserService(db *gorm.DB) *UserService {\n\treturn &UserService{db: db}\n}\n\nfunc (s *UserService) TableName() string {\n\treturn (&User{}).TableName()\n}\n\nfunc (s *UserService) FindByPK(id uint32) (*User, error) {\n\tuser := &User{}\n\tif result := s.db.First(user, &User{ID: id}); result.Error != nil {\n\t\tif err := wrapNotFound(result, user.TableName(), \"id\", fmt.Sprint(id)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithResource(errors.NewResource(user.TableName(), \"id\", id)),\n\t\t)\n\t}\n\tif err := s.db.First(user, &User{ID: id}).Error; err != nil {\n\t\treturn nil, err\n\t}\n\treturn user, nil\n}\n\nfunc (s *UserService) FindByEmail(email string) (*User, error) {\n\tuser := &User{}\n\tif result := s.db.First(user, &User{Email: email}); result.Error != nil {\n\t\tif err := wrapNotFound(result, user.TableName(), \"email\", email); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithResource(errors.NewResource(user.TableName(), \"email\", email)),\n\t\t)\n\t}\n\treturn user, nil\n}\n\nfunc (s *UserService) FindByGoogleID(googleID string) (*User, error) {\n\tuser := &User{}\n\tsql := `\n\tSELECT u.* FROM user AS u\n\tINNER JOIN user_google AS ug ON u.id = ug.user_id\n\tWHERE ug.google_id = ?\n\tLIMIT 1\n\t`\n\tif result := s.db.Raw(sql, googleID).Scan(user); result.Error != nil {\n\t\tif err := wrapNotFound(result, \"user_google\", \"google_id\", googleID); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithResource(errors.NewResource(\"user_google\", \"google_id\", googleID)),\n\t\t)\n\t}\n\treturn user, nil\n}\n\nfunc (s *UserService) FindByUserAPIToken(userAPIToken string) (*User, error) {\n\tuser := &User{}\n\tsql := `\n\tSELECT u.* FROM user AS u\n\tINNER JOIN user_api_token AS uat ON u.id = uat.user_id\n\tWHERE uat.token = ?\n\t`\n\tif result := s.db.Raw(sql, userAPIToken).Scan(user); result.Error != nil {\n\t\tif err := wrapNotFound(result, user.TableName(), \"userAPIToken\", userAPIToken); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithResource(errors.NewResource(user.TableName(), \"userAPIToken\", userAPIToken)),\n\t\t)\n\t}\n\treturn user, nil\n}\n\n\/\/ Returns an empty slice if no users found\nfunc (s *UserService) FindAllEmailVerifiedIsTrue(notificationInterval int) ([]*User, error) {\n\tvar users []*User\n\tsql := `\n\tSELECT u.* FROM following_teacher AS ft\n\tINNER JOIN user AS u ON ft.user_id = u.id\n\tINNER JOIN m_plan AS mp ON u.plan_id = mp.id\n\tWHERE\n\t u.email_verified = 1\n\t AND mp.notification_interval = ?\n\t`\n\tresult := s.db.Raw(strings.TrimSpace(sql), notificationInterval).Scan(&users)\n\tif result.Error != nil && !result.RecordNotFound() {\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithMessage(\"Failed to find Users\"),\n\t\t)\n\t}\n\treturn users, nil\n}\n\n\/\/ Returns an empty slice if no users found\nfunc (s *UserService) FindAllFollowedTeacherAtIsNull(createdAt time.Time) ([]*User, error) {\n\tvar users []*User\n\tsql := `SELECT * FROM user WHERE followed_teacher_at IS NULL AND CAST(created_at AS DATE) = ? ORDER BY id`\n\tresult := s.db.Raw(sql, createdAt.Format(dbDateFormat)).Scan(&users)\n\tif result.Error != nil && !result.RecordNotFound() {\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithMessage(\"Failed to find users\"),\n\t\t)\n\t}\n\treturn users, nil\n}\n\nfunc (s *UserService) Create(name, email string) (*User, error) {\n\tuser := &User{\n\t\tName: name,\n\t\tEmail: email,\n\t\tEmailVerified: true,\n\t\tPlanID: DefaultMPlanID,\n\t}\n\tif result := s.db.Create(user); result.Error != nil {\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithMessage(\"Failed to Create user\"),\n\t\t\terrors.WithResource(errors.NewResource(\"user\", \"email\", email)),\n\t\t)\n\t}\n\treturn user, nil\n}\n\nfunc (s *UserService) CreateWithGoogle(name, email, googleID string) (*User, *UserGoogle, error) {\n\tuser, err := s.FindByEmail(email)\n\tif e, ok := err.(*errors.AnnotatedError); ok && e.IsNotFound() {\n\t\tuser = &User{\n\t\t\tName: name,\n\t\t\tEmail: email,\n\t\t\tEmailVerified: true,\n\t\t\tPlanID: DefaultMPlanID,\n\t\t}\n\t\tif result := s.db.Create(user); result.Error != nil {\n\t\t\treturn nil, nil, errors.NewInternalError(\n\t\t\t\terrors.WithError(result.Error),\n\t\t\t\terrors.WithMessage(\"Failed to create User\"),\n\t\t\t\terrors.WithResource(errors.NewResourceWithEntries(\n\t\t\t\t\t\"user\", []errors.ResourceEntry{\n\t\t\t\t\t\t{\"email\", email}, {\"googleID\", googleID},\n\t\t\t\t\t},\n\t\t\t\t)),\n\t\t\t)\n\t\t}\n\t}\n\t\/\/ Do nothing if the user exists.\n\n\tuserGoogleService := NewUserGoogleService(s.db)\n\tuserGoogle, err := userGoogleService.FindByUserID(user.ID)\n\tif e, ok := err.(*errors.AnnotatedError); ok && e.IsNotFound() {\n\t\tuserGoogle = &UserGoogle{\n\t\t\tGoogleID: googleID,\n\t\t\tUserID: user.ID,\n\t\t}\n\t\tif result := s.db.Create(userGoogle); result.Error != nil {\n\t\t\treturn nil, nil, errors.NewInternalError(\n\t\t\t\terrors.WithError(result.Error),\n\t\t\t\terrors.WithMessage(\"Failed to create UserGoogle\"),\n\t\t\t\terrors.WithResource(errors.NewResource(\"user_google\", \"googleID\", googleID)),\n\t\t\t)\n\t\t}\n\t}\n\t\/\/ Do nothing if the user google exists.\n\n\treturn user, userGoogle, nil\n}\n\nfunc (s *UserService) UpdateEmail(user *User, newEmail string) error {\n\tresult := s.db.Exec(\"UPDATE user SET email = ? WHERE id = ?\", newEmail, user.ID)\n\tif result.Error != nil {\n\t\treturn errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithMessage(\"Failed to update user email\"),\n\t\t\terrors.WithResource(errors.NewResourceWithEntries(\n\t\t\t\tuser.TableName(), []errors.ResourceEntry{\n\t\t\t\t\t{\"id\", user.ID}, {\"email\", newEmail},\n\t\t\t\t},\n\t\t\t)),\n\t\t)\n\t}\n\treturn nil\n}\n\nfunc (s *UserService) UpdateFollowedTeacherAt(user *User) error {\n\tsql := \"UPDATE user SET followed_teacher_at = NOW() WHERE id = ?\"\n\tif err := s.db.Exec(sql, user.ID).Error; err != nil {\n\t\treturn errors.NewInternalError(\n\t\t\terrors.WithError(err),\n\t\t\terrors.WithMessage(\"Failed to update followed_teacher_at\"),\n\t\t\terrors.WithResource(errors.NewResource(user.TableName(), \"id\", user.ID)),\n\t\t)\n\t}\n\treturn nil\n}\n\nfunc (s *UserService) FindLoggedInUser(token string) (*User, error) {\n\tuser := &User{}\n\tsql := `\n\t\tSELECT * FROM user AS u\n\t\tINNER JOIN user_api_token AS uat ON u.id = uat.user_id\n\t\tWHERE uat.token = ?\n\t\t`\n\tresult := s.db.Model(&User{}).Raw(strings.TrimSpace(sql), token).Scan(user)\n\tif result.Error != nil {\n\t\tif err := wrapNotFound(result, user.TableName(), \"token\", token); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithResource(errors.NewResource(user.TableName(), \"token\", token)),\n\t\t)\n\t}\n\treturn user, nil\n}\n<commit_msg>Fix slow SQL<commit_after>package model\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/go-sql-driver\/mysql\"\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/oinume\/lekcije\/server\/errors\"\n)\n\ntype User struct {\n\tID uint32 `gorm:\"primary_key;AUTO_INCREMENT\"`\n\tName string\n\tEmail string\n\tEmailVerified bool\n\tPlanID uint8\n\tFollowedTeacherAt mysql.NullTime\n\tCreatedAt time.Time\n\tUpdatedAt time.Time\n}\n\nfunc (*User) TableName() string {\n\treturn \"user\"\n}\n\ntype UserService struct {\n\tdb *gorm.DB\n}\n\nfunc NewUserService(db *gorm.DB) *UserService {\n\treturn &UserService{db: db}\n}\n\nfunc (s *UserService) TableName() string {\n\treturn (&User{}).TableName()\n}\n\nfunc (s *UserService) FindByPK(id uint32) (*User, error) {\n\tuser := &User{}\n\tif result := s.db.First(user, &User{ID: id}); result.Error != nil {\n\t\tif err := wrapNotFound(result, user.TableName(), \"id\", fmt.Sprint(id)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithResource(errors.NewResource(user.TableName(), \"id\", id)),\n\t\t)\n\t}\n\tif err := s.db.First(user, &User{ID: id}).Error; err != nil {\n\t\treturn nil, err\n\t}\n\treturn user, nil\n}\n\nfunc (s *UserService) FindByEmail(email string) (*User, error) {\n\tuser := &User{}\n\tif result := s.db.First(user, &User{Email: email}); result.Error != nil {\n\t\tif err := wrapNotFound(result, user.TableName(), \"email\", email); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithResource(errors.NewResource(user.TableName(), \"email\", email)),\n\t\t)\n\t}\n\treturn user, nil\n}\n\nfunc (s *UserService) FindByGoogleID(googleID string) (*User, error) {\n\tuser := &User{}\n\tsql := `\n\tSELECT u.* FROM user AS u\n\tINNER JOIN user_google AS ug ON u.id = ug.user_id\n\tWHERE ug.google_id = ?\n\tLIMIT 1\n\t`\n\tif result := s.db.Raw(sql, googleID).Scan(user); result.Error != nil {\n\t\tif err := wrapNotFound(result, \"user_google\", \"google_id\", googleID); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithResource(errors.NewResource(\"user_google\", \"google_id\", googleID)),\n\t\t)\n\t}\n\treturn user, nil\n}\n\nfunc (s *UserService) FindByUserAPIToken(userAPIToken string) (*User, error) {\n\tuser := &User{}\n\tsql := `\n\tSELECT u.* FROM user AS u\n\tINNER JOIN user_api_token AS uat ON u.id = uat.user_id\n\tWHERE uat.token = ?\n\t`\n\tif result := s.db.Raw(sql, userAPIToken).Scan(user); result.Error != nil {\n\t\tif err := wrapNotFound(result, user.TableName(), \"userAPIToken\", userAPIToken); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithResource(errors.NewResource(user.TableName(), \"userAPIToken\", userAPIToken)),\n\t\t)\n\t}\n\treturn user, nil\n}\n\n\/\/ Returns an empty slice if no users found\nfunc (s *UserService) FindAllEmailVerifiedIsTrue(notificationInterval int) ([]*User, error) {\n\tvar users []*User\n\tsql := `\n\tSELECT u.* FROM (SELECT DISTINCT(user_id) FROM following_teacher) AS ft\n\tINNER JOIN user AS u ON ft.user_id = u.id\n\tINNER JOIN m_plan AS mp ON u.plan_id = mp.id\n\tWHERE\n\t u.email_verified = 1\n\t AND mp.notification_interval = ?\n\t`\n\tresult := s.db.Raw(strings.TrimSpace(sql), notificationInterval).Scan(&users)\n\tif result.Error != nil && !result.RecordNotFound() {\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithMessage(\"Failed to find Users\"),\n\t\t)\n\t}\n\treturn users, nil\n}\n\n\/\/ Returns an empty slice if no users found\nfunc (s *UserService) FindAllFollowedTeacherAtIsNull(createdAt time.Time) ([]*User, error) {\n\tvar users []*User\n\tsql := `SELECT * FROM user WHERE followed_teacher_at IS NULL AND CAST(created_at AS DATE) = ? ORDER BY id`\n\tresult := s.db.Raw(sql, createdAt.Format(dbDateFormat)).Scan(&users)\n\tif result.Error != nil && !result.RecordNotFound() {\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithMessage(\"Failed to find users\"),\n\t\t)\n\t}\n\treturn users, nil\n}\n\nfunc (s *UserService) Create(name, email string) (*User, error) {\n\tuser := &User{\n\t\tName: name,\n\t\tEmail: email,\n\t\tEmailVerified: true,\n\t\tPlanID: DefaultMPlanID,\n\t}\n\tif result := s.db.Create(user); result.Error != nil {\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithMessage(\"Failed to Create user\"),\n\t\t\terrors.WithResource(errors.NewResource(\"user\", \"email\", email)),\n\t\t)\n\t}\n\treturn user, nil\n}\n\nfunc (s *UserService) CreateWithGoogle(name, email, googleID string) (*User, *UserGoogle, error) {\n\tuser, err := s.FindByEmail(email)\n\tif e, ok := err.(*errors.AnnotatedError); ok && e.IsNotFound() {\n\t\tuser = &User{\n\t\t\tName: name,\n\t\t\tEmail: email,\n\t\t\tEmailVerified: true,\n\t\t\tPlanID: DefaultMPlanID,\n\t\t}\n\t\tif result := s.db.Create(user); result.Error != nil {\n\t\t\treturn nil, nil, errors.NewInternalError(\n\t\t\t\terrors.WithError(result.Error),\n\t\t\t\terrors.WithMessage(\"Failed to create User\"),\n\t\t\t\terrors.WithResource(errors.NewResourceWithEntries(\n\t\t\t\t\t\"user\", []errors.ResourceEntry{\n\t\t\t\t\t\t{\"email\", email}, {\"googleID\", googleID},\n\t\t\t\t\t},\n\t\t\t\t)),\n\t\t\t)\n\t\t}\n\t}\n\t\/\/ Do nothing if the user exists.\n\n\tuserGoogleService := NewUserGoogleService(s.db)\n\tuserGoogle, err := userGoogleService.FindByUserID(user.ID)\n\tif e, ok := err.(*errors.AnnotatedError); ok && e.IsNotFound() {\n\t\tuserGoogle = &UserGoogle{\n\t\t\tGoogleID: googleID,\n\t\t\tUserID: user.ID,\n\t\t}\n\t\tif result := s.db.Create(userGoogle); result.Error != nil {\n\t\t\treturn nil, nil, errors.NewInternalError(\n\t\t\t\terrors.WithError(result.Error),\n\t\t\t\terrors.WithMessage(\"Failed to create UserGoogle\"),\n\t\t\t\terrors.WithResource(errors.NewResource(\"user_google\", \"googleID\", googleID)),\n\t\t\t)\n\t\t}\n\t}\n\t\/\/ Do nothing if the user google exists.\n\n\treturn user, userGoogle, nil\n}\n\nfunc (s *UserService) UpdateEmail(user *User, newEmail string) error {\n\tresult := s.db.Exec(\"UPDATE user SET email = ? WHERE id = ?\", newEmail, user.ID)\n\tif result.Error != nil {\n\t\treturn errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithMessage(\"Failed to update user email\"),\n\t\t\terrors.WithResource(errors.NewResourceWithEntries(\n\t\t\t\tuser.TableName(), []errors.ResourceEntry{\n\t\t\t\t\t{\"id\", user.ID}, {\"email\", newEmail},\n\t\t\t\t},\n\t\t\t)),\n\t\t)\n\t}\n\treturn nil\n}\n\nfunc (s *UserService) UpdateFollowedTeacherAt(user *User) error {\n\tsql := \"UPDATE user SET followed_teacher_at = NOW() WHERE id = ?\"\n\tif err := s.db.Exec(sql, user.ID).Error; err != nil {\n\t\treturn errors.NewInternalError(\n\t\t\terrors.WithError(err),\n\t\t\terrors.WithMessage(\"Failed to update followed_teacher_at\"),\n\t\t\terrors.WithResource(errors.NewResource(user.TableName(), \"id\", user.ID)),\n\t\t)\n\t}\n\treturn nil\n}\n\nfunc (s *UserService) FindLoggedInUser(token string) (*User, error) {\n\tuser := &User{}\n\tsql := `\n\t\tSELECT * FROM user AS u\n\t\tINNER JOIN user_api_token AS uat ON u.id = uat.user_id\n\t\tWHERE uat.token = ?\n\t\t`\n\tresult := s.db.Model(&User{}).Raw(strings.TrimSpace(sql), token).Scan(user)\n\tif result.Error != nil {\n\t\tif err := wrapNotFound(result, user.TableName(), \"token\", token); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.NewInternalError(\n\t\t\terrors.WithError(result.Error),\n\t\t\terrors.WithResource(errors.NewResource(user.TableName(), \"token\", token)),\n\t\t)\n\t}\n\treturn user, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package gitbackup\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\ntype repository struct {\n\tname string\n\tcloneURL string\n}\n\n\/\/ BackupTarget backs up an entity that holds one or more git repositories and\n\/\/ has an interface to retrieve that list of repositories.\n\/\/ Examples of entities include:\n\/\/ - A GitHub user.\n\/\/ - A BitBucket user.\n\/\/ - A GitHub organization.\nfunc BackupTarget(target Target, backupDirectory string) error {\n\tfmt.Printf(\"########## Backing up target %s ##########\\n\\n\", target.Name)\n\n\t\/\/ Retrieve a list of all the git repositories available from the target.\n\tvar repoList []repository\n\tvar err error\n\tswitch target.Source {\n\tcase \"github\":\n\t\trepoList, err = getGitHubRepoList(target, backupDirectory)\n\tcase \"bitbucket\":\n\t\trepoList, err = getBitBucketRepoList(target, backupDirectory)\n\tdefault:\n\t\terr = fmt.Errorf(`\"%s\" is not a recognized source type`, target.Source)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Back up each repository found.\n\tfor _, repo := range repoList {\n\t\tfmt.Println(fmt.Sprintf(\"#> %s\", repo.name))\n\t\tif includeRepository(repo.name, target) {\n\t\t\tbackupRepository(\n\t\t\t\ttarget.Name,\n\t\t\t\trepo.name,\n\t\t\t\trepo.cloneURL,\n\t\t\t\tbackupDirectory,\n\t\t\t)\n\t\t\tfmt.Println(\"\")\n\t\t} else {\n\t\t\tfmt.Print(\"Skipped.\\n\\n\")\n\t\t}\n\t}\n\n\tfmt.Println(\"\")\n\n\treturn nil\n}\n\n\/\/ getGitHubRepoList finds all the repositories belonging to a given user or\n\/\/ organization on GitHub.\nfunc getGitHubRepoList(target Target, backupDirectory string) ([]repository, error) {\n\t\/\/ Create URL to request list of repos.\n\trequestURL := fmt.Sprintf(\n\t\t\"https:\/\/api.github.com\/%s\/%s\/repos?access_token=%s&per_page=200\",\n\t\ttarget.Type,\n\t\ttarget.Entity,\n\t\ttarget.Token,\n\t)\n\n\t\/\/ Retrieve list of repositories.\n\tresponse, err := http.Get(requestURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to connect with the source to retrieve the list of repositories: %s\", err)\n\t}\n\tdefer response.Body.Close()\n\tcontents, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to retrieve the list of repositories: %s\", err)\n\t}\n\n\t\/\/ Parse JSON response.\n\tvar data []map[string]interface{}\n\tif err := json.Unmarshal(contents, &data); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to parse JSON: %s\", err)\n\t}\n\n\t\/\/ Make a list of repositories.\n\trepoList := make([]repository, len(data))\n\tfor i, repo := range data {\n\t\trepoName, _ := repo[\"name\"].(string)\n\t\tcloneURL, _ := repo[\"clone_url\"].(string)\n\t\tcloneURL = strings.Replace(\n\t\t\tcloneURL,\n\t\t\t\"https:\/\/\",\n\t\t\tfmt.Sprintf(\"https:\/\/%s:%s@\", target.Entity, target.Token),\n\t\t\t1,\n\t\t)\n\t\trepoList[i] = repository{name: repoName, cloneURL: cloneURL}\n\t}\n\n\t\/\/ No errors.\n\treturn repoList, nil\n}\n\n\/\/ getBitBucketRepoList finds all the repositories belonging to a given user on\n\/\/ BitBucket.\nfunc getBitBucketRepoList(target Target, backupDirectory string) ([]repository, error) {\n\t\/\/ Create URL to request list of repos.\n\t\/\/ TODO: support pagination.\n\trequestURL := fmt.Sprintf(\n\t\t\"https:\/\/%s:%s@bitbucket.org\/api\/2.0\/repositories\/%s?page=1&pagelen=100\",\n\t\ttarget.Entity,\n\t\ttarget.Password,\n\t\ttarget.Entity,\n\t)\n\n\t\/\/ Retrieve list of repositories.\n\tresponse, err := http.Get(requestURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to connect with the source to retrieve the list of repositories: %s\", err)\n\t}\n\tdefer response.Body.Close()\n\tcontents, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to retrieve the list of repositories: %s\", err)\n\t}\n\n\t\/\/ Parse JSON response.\n\tvar metadata map[string]json.RawMessage\n\tif err := json.Unmarshal(contents, &metadata); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to parse JSON: %s\", err)\n\t}\n\tvar data []map[string]json.RawMessage\n\tif err := json.Unmarshal(metadata[\"values\"], &data); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to parse JSON: %s\", err)\n\t}\n\n\t\/\/ Make a list of repositories.\n\trepoList := make([]repository, len(data))\n\tfor i, repo := range data {\n\t\t\/\/ Parse the remaining JSON message that pertains to this repository.\n\t\tvar repoName string\n\t\tif err := json.Unmarshal(repo[\"name\"], &repoName); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to parse JSON: %s\", err)\n\t\t}\n\t\tvar links map[string]json.RawMessage\n\t\tif err := json.Unmarshal(repo[\"links\"], &links); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to parse JSON: %s\", err)\n\t\t}\n\t\tvar cloneLinks []map[string]string\n\t\tif err := json.Unmarshal(links[\"clone\"], &cloneLinks); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to parse JSON: %s\", err)\n\t\t}\n\n\t\t\/\/ Find the https URL to use for cloning.\n\t\tvar cloneURL string\n\t\tfor _, link := range cloneLinks {\n\t\t\tif link[\"name\"] == \"https\" {\n\t\t\t\tcloneURL = link[\"href\"]\n\t\t\t}\n\t\t}\n\t\tif cloneURL == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"Could not determine HTTPS cloning URL: %s\", cloneLinks)\n\t\t}\n\n\t\t\/\/ Determine URL for cloning.\n\t\tcloneURL = strings.Replace(\n\t\t\tcloneURL,\n\t\t\tfmt.Sprintf(\"https:\/\/%s@\", target.Entity),\n\t\t\tfmt.Sprintf(\"https:\/\/%s:%s@\", target.Entity, target.Password),\n\t\t\t1,\n\t\t)\n\n\t\trepoList[i] = repository{name: repoName, cloneURL: cloneURL}\n\t}\n\n\t\/\/ No errors.\n\treturn repoList, nil\n}\n\n\/\/ backupRepository takes a remote git repository and backs it up locally.\n\/\/ Note that this makes a mirror repository - in other words, the backup only\n\/\/ contains the content of a normal .git repository but no working directory,\n\/\/ which saves space. You can always get a normal repository from the backup by\n\/\/ doing a normal git clone of the backup itself.\nfunc backupRepository(targetName string, repoName string, cloneURL string, backupDirectory string) {\n\tcloneDirectory := filepath.Join(backupDirectory, targetName, repoName)\n\n\tif _, err := os.Stat(cloneDirectory); os.IsNotExist(err) {\n\t\t\/\/ The repo doesn't exist locally, clone it.\n\t\tcmd := exec.Command(\"git\", \"clone\", \"--mirror\", cloneURL, cloneDirectory)\n\t\tcmdOut, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error cloning the repository:\", err)\n\t\t} else {\n\t\t\tif len(cmdOut) > 0 {\n\t\t\t\tfmt.Printf(string(cmdOut))\n\t\t\t}\n\t\t\tfmt.Println(\"Cloned repository.\")\n\t\t}\n\t} else {\n\t\t\/\/ The repo already exists, pull updates.\n\t\tcmd := exec.Command(\"git\", \"fetch\", \"-p\", cloneURL)\n\t\tcmd.Dir = cloneDirectory\n\t\tcmdOut, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error pulling in the repository:\", err)\n\t\t} else {\n\t\t\t\/\/ Display pulled information.\n\t\t\tif len(cmdOut) > 0 {\n\t\t\t\tfmt.Printf(string(cmdOut))\n\t\t\t}\n\t\t\tfmt.Println(\"Pulled latest updates in the repository.\")\n\t\t}\n\t}\n}\n\nfunc includeRepository(repoName string, target Target) bool {\n\tif target.Skip != \"\" {\n\t\tr, err := regexp.Compile(target.Skip)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(`\"skip\" does not specify a valid regular expression: %s`, err)\n\t\t}\n\t\treturn r.MatchString(repoName) == false\n\t}\n\tif target.Only != \"\" {\n\t\tr, err := regexp.Compile(target.Only)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(`\"only\" does not specify a valid regular expression: %s`, err)\n\t\t}\n\t\treturn r.MatchString(repoName) == true\n\t}\n\n\treturn true\n}\n<commit_msg>Document function.<commit_after>package gitbackup\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\ntype repository struct {\n\tname string\n\tcloneURL string\n}\n\n\/\/ BackupTarget backs up an entity that holds one or more git repositories and\n\/\/ has an interface to retrieve that list of repositories.\n\/\/ Examples of entities include:\n\/\/ - A GitHub user.\n\/\/ - A BitBucket user.\n\/\/ - A GitHub organization.\nfunc BackupTarget(target Target, backupDirectory string) error {\n\tfmt.Printf(\"########## Backing up target %s ##########\\n\\n\", target.Name)\n\n\t\/\/ Retrieve a list of all the git repositories available from the target.\n\tvar repoList []repository\n\tvar err error\n\tswitch target.Source {\n\tcase \"github\":\n\t\trepoList, err = getGitHubRepoList(target, backupDirectory)\n\tcase \"bitbucket\":\n\t\trepoList, err = getBitBucketRepoList(target, backupDirectory)\n\tdefault:\n\t\terr = fmt.Errorf(`\"%s\" is not a recognized source type`, target.Source)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Back up each repository found.\n\tfor _, repo := range repoList {\n\t\tfmt.Println(fmt.Sprintf(\"#> %s\", repo.name))\n\t\tif includeRepository(repo.name, target) {\n\t\t\tbackupRepository(\n\t\t\t\ttarget.Name,\n\t\t\t\trepo.name,\n\t\t\t\trepo.cloneURL,\n\t\t\t\tbackupDirectory,\n\t\t\t)\n\t\t\tfmt.Println(\"\")\n\t\t} else {\n\t\t\tfmt.Print(\"Skipped.\\n\\n\")\n\t\t}\n\t}\n\n\tfmt.Println(\"\")\n\n\treturn nil\n}\n\n\/\/ getGitHubRepoList finds all the repositories belonging to a given user or\n\/\/ organization on GitHub.\nfunc getGitHubRepoList(target Target, backupDirectory string) ([]repository, error) {\n\t\/\/ Create URL to request list of repos.\n\trequestURL := fmt.Sprintf(\n\t\t\"https:\/\/api.github.com\/%s\/%s\/repos?access_token=%s&per_page=200\",\n\t\ttarget.Type,\n\t\ttarget.Entity,\n\t\ttarget.Token,\n\t)\n\n\t\/\/ Retrieve list of repositories.\n\tresponse, err := http.Get(requestURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to connect with the source to retrieve the list of repositories: %s\", err)\n\t}\n\tdefer response.Body.Close()\n\tcontents, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to retrieve the list of repositories: %s\", err)\n\t}\n\n\t\/\/ Parse JSON response.\n\tvar data []map[string]interface{}\n\tif err := json.Unmarshal(contents, &data); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to parse JSON: %s\", err)\n\t}\n\n\t\/\/ Make a list of repositories.\n\trepoList := make([]repository, len(data))\n\tfor i, repo := range data {\n\t\trepoName, _ := repo[\"name\"].(string)\n\t\tcloneURL, _ := repo[\"clone_url\"].(string)\n\t\tcloneURL = strings.Replace(\n\t\t\tcloneURL,\n\t\t\t\"https:\/\/\",\n\t\t\tfmt.Sprintf(\"https:\/\/%s:%s@\", target.Entity, target.Token),\n\t\t\t1,\n\t\t)\n\t\trepoList[i] = repository{name: repoName, cloneURL: cloneURL}\n\t}\n\n\t\/\/ No errors.\n\treturn repoList, nil\n}\n\n\/\/ getBitBucketRepoList finds all the repositories belonging to a given user on\n\/\/ BitBucket.\nfunc getBitBucketRepoList(target Target, backupDirectory string) ([]repository, error) {\n\t\/\/ Create URL to request list of repos.\n\t\/\/ TODO: support pagination.\n\trequestURL := fmt.Sprintf(\n\t\t\"https:\/\/%s:%s@bitbucket.org\/api\/2.0\/repositories\/%s?page=1&pagelen=100\",\n\t\ttarget.Entity,\n\t\ttarget.Password,\n\t\ttarget.Entity,\n\t)\n\n\t\/\/ Retrieve list of repositories.\n\tresponse, err := http.Get(requestURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to connect with the source to retrieve the list of repositories: %s\", err)\n\t}\n\tdefer response.Body.Close()\n\tcontents, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to retrieve the list of repositories: %s\", err)\n\t}\n\n\t\/\/ Parse JSON response.\n\tvar metadata map[string]json.RawMessage\n\tif err := json.Unmarshal(contents, &metadata); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to parse JSON: %s\", err)\n\t}\n\tvar data []map[string]json.RawMessage\n\tif err := json.Unmarshal(metadata[\"values\"], &data); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to parse JSON: %s\", err)\n\t}\n\n\t\/\/ Make a list of repositories.\n\trepoList := make([]repository, len(data))\n\tfor i, repo := range data {\n\t\t\/\/ Parse the remaining JSON message that pertains to this repository.\n\t\tvar repoName string\n\t\tif err := json.Unmarshal(repo[\"name\"], &repoName); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to parse JSON: %s\", err)\n\t\t}\n\t\tvar links map[string]json.RawMessage\n\t\tif err := json.Unmarshal(repo[\"links\"], &links); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to parse JSON: %s\", err)\n\t\t}\n\t\tvar cloneLinks []map[string]string\n\t\tif err := json.Unmarshal(links[\"clone\"], &cloneLinks); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to parse JSON: %s\", err)\n\t\t}\n\n\t\t\/\/ Find the https URL to use for cloning.\n\t\tvar cloneURL string\n\t\tfor _, link := range cloneLinks {\n\t\t\tif link[\"name\"] == \"https\" {\n\t\t\t\tcloneURL = link[\"href\"]\n\t\t\t}\n\t\t}\n\t\tif cloneURL == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"Could not determine HTTPS cloning URL: %s\", cloneLinks)\n\t\t}\n\n\t\t\/\/ Determine URL for cloning.\n\t\tcloneURL = strings.Replace(\n\t\t\tcloneURL,\n\t\t\tfmt.Sprintf(\"https:\/\/%s@\", target.Entity),\n\t\t\tfmt.Sprintf(\"https:\/\/%s:%s@\", target.Entity, target.Password),\n\t\t\t1,\n\t\t)\n\n\t\trepoList[i] = repository{name: repoName, cloneURL: cloneURL}\n\t}\n\n\t\/\/ No errors.\n\treturn repoList, nil\n}\n\n\/\/ backupRepository takes a remote git repository and backs it up locally.\n\/\/ Note that this makes a mirror repository - in other words, the backup only\n\/\/ contains the content of a normal .git repository but no working directory,\n\/\/ which saves space. You can always get a normal repository from the backup by\n\/\/ doing a normal git clone of the backup itself.\nfunc backupRepository(targetName string, repoName string, cloneURL string, backupDirectory string) {\n\tcloneDirectory := filepath.Join(backupDirectory, targetName, repoName)\n\n\tif _, err := os.Stat(cloneDirectory); os.IsNotExist(err) {\n\t\t\/\/ The repo doesn't exist locally, clone it.\n\t\tcmd := exec.Command(\"git\", \"clone\", \"--mirror\", cloneURL, cloneDirectory)\n\t\tcmdOut, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error cloning the repository:\", err)\n\t\t} else {\n\t\t\tif len(cmdOut) > 0 {\n\t\t\t\tfmt.Printf(string(cmdOut))\n\t\t\t}\n\t\t\tfmt.Println(\"Cloned repository.\")\n\t\t}\n\t} else {\n\t\t\/\/ The repo already exists, pull updates.\n\t\tcmd := exec.Command(\"git\", \"fetch\", \"-p\", cloneURL)\n\t\tcmd.Dir = cloneDirectory\n\t\tcmdOut, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error pulling in the repository:\", err)\n\t\t} else {\n\t\t\t\/\/ Display pulled information.\n\t\t\tif len(cmdOut) > 0 {\n\t\t\t\tfmt.Printf(string(cmdOut))\n\t\t\t}\n\t\t\tfmt.Println(\"Pulled latest updates in the repository.\")\n\t\t}\n\t}\n}\n\n\/\/ includeRepository takes a repository name and the information about the\n\/\/ target it is part of, and determines whether the repository should be backed\n\/\/ up or not.\nfunc includeRepository(repoName string, target Target) bool {\n\tif target.Skip != \"\" {\n\t\tr, err := regexp.Compile(target.Skip)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(`\"skip\" does not specify a valid regular expression: %s`, err)\n\t\t}\n\t\treturn r.MatchString(repoName) == false\n\t}\n\tif target.Only != \"\" {\n\t\tr, err := regexp.Compile(target.Only)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(`\"only\" does not specify a valid regular expression: %s`, err)\n\t\t}\n\t\treturn r.MatchString(repoName) == true\n\t}\n\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2014,2015,2016 Docker, Inc.\n\/\/ Copyright (c) 2017 Intel Corporation\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"github.com\/urfave\/cli\"\n)\n\nvar stateCommand = cli.Command{\n\tName: \"state\",\n\tUsage: \"output the state of a container\",\n\tArgsUsage: `<container-id>\n\n <container-id> is your name for the instance of the container`,\n\tDescription: `The state command outputs current state information for the\ninstance of a container.`,\n\tAction: func(context *cli.Context) error {\n\t\t\/\/ TODO\n\t\treturn nil\n\t},\n}\n<commit_msg>state: Implement OCI runtime spec for state<commit_after>\/\/ Copyright (c) 2014,2015,2016 Docker, Inc.\n\/\/ Copyright (c) 2017 Intel Corporation\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\n\tvc \"github.com\/containers\/virtcontainers\"\n\t\"github.com\/containers\/virtcontainers\/pkg\/oci\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar stateCommand = cli.Command{\n\tName: \"state\",\n\tUsage: \"output the state of a container\",\n\tArgsUsage: `<container-id>\n\n <container-id> is your name for the instance of the container`,\n\tDescription: `The state command outputs current state information for the\ninstance of a container.`,\n\tAction: func(context *cli.Context) error {\n\t\treturn state(context.String(\"container-id\"))\n\t},\n}\n\nfunc state(containerID string) error {\n\t\/\/ Checks the MUST and MUST NOT from OCI runtime specification\n\tif err := validContainer(containerID); err != nil {\n\t\treturn err\n\t}\n\n\tpodStatus, err := vc.StatusPod(containerID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Convert the status to the expected State structure\n\tstate, err := oci.StatusToOCIState(podStatus)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Update status of process\n\trunning, err := processRunning(state.Pid)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif running == false {\n\t\tstate.Status = \"stopped\"\n\t}\n\n\tstateJSON, err := json.Marshal(state)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Print stateJSON to stdout\n\tfmt.Fprintf(os.Stdout, \"%s\", stateJSON)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package velox\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/mattbaird\/jsonpatch\"\n)\n\nvar (\n\t\/\/15ms is approximately highest resolution on the JS eventloop\n\tMinThrottle = 15 * time.Millisecond\n\tDefaultThrottle = 200 * time.Millisecond\n)\n\n\/\/State must be embedded into a struct to make it syncable.\ntype State struct {\n\t\/\/configuration\n\tThrottle time.Duration `json:\"-\"`\n\t\/\/internal state\n\tinitMut sync.Mutex\n\tinitd bool\n\tgostruct interface{}\n\tdataMut sync.RWMutex \/\/protects bytes\/delta\/version\n\tbytes []byte\n\tdelta []byte\n\tversion int64\n\tconnMut sync.Mutex\n\tconns map[string]*conn\n\tpush struct {\n\t\tmut sync.Mutex\n\t\ting bool\n\t\tqueued bool\n\t\tstart time.Time\n\t\twg sync.WaitGroup\n\t}\n}\n\nfunc (s *State) init(gostruct interface{}) error {\n\tif s.Throttle < MinThrottle {\n\t\ts.Throttle = DefaultThrottle\n\t}\n\t\/\/initial JSON bytes\n\tif b, err := json.Marshal(gostruct); err != nil {\n\t\treturn fmt.Errorf(\"JSON marshalling failed: %s\", err)\n\t} else {\n\t\ts.bytes = b\n\t}\n\ts.gostruct = gostruct\n\ts.version = 1\n\ts.conns = map[string]*conn{}\n\ts.initd = true\n\treturn nil\n}\n\nfunc (s *State) sync(gostruct interface{}) (*State, error) {\n\ts.initMut.Lock()\n\tdefer s.initMut.Unlock()\n\tif !s.initd {\n\t\tif err := s.init(gostruct); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if s.gostruct != gostruct {\n\t\treturn nil, errors.New(\"A different struct is already synced\")\n\t}\n\treturn s, nil\n}\n\nfunc (s *State) subscribe(conn *conn) {\n\t\/\/subscribe\n\ts.connMut.Lock()\n\ts.conns[conn.id] = conn\n\ts.connMut.Unlock()\n\t\/\/and then unsubscribe on close\n\tgo func() {\n\t\tconn.Wait()\n\t\ts.connMut.Lock()\n\t\tdelete(s.conns, conn.id)\n\t\ts.connMut.Unlock()\n\t}()\n}\n\nfunc (s *State) NumConnections() int {\n\ts.connMut.Lock()\n\tn := len(s.conns)\n\ts.connMut.Unlock()\n\treturn n\n}\n\n\/\/Send the changes from this object to all connected clients.\n\/\/Push is thread-safe and is throttled so it can be called\n\/\/with abandon.\nfunc (s *State) Push() {\n\tgo s.gopush()\n}\n\n\/\/non-blocking push\nfunc (s *State) gopush() {\n\ts.push.mut.Lock()\n\tif s.push.ing {\n\t\ts.push.queued = true\n\t\ts.push.mut.Unlock()\n\t\treturn\n\t}\n\ts.push.ing = true\n\ts.push.start = time.Now()\n\t\/\/queue cleanup\n\tdefer func() {\n\t\t\/\/measure time passed, ensure we wait at least Throttle time\n\t\ttdelta := time.Now().Sub(s.push.start)\n\t\tif t := s.Throttle - tdelta; t > 0 {\n\t\t\ttime.Sleep(t)\n\t\t}\n\t\t\/\/cleanup\n\t\ts.push.ing = false\n\t\tif s.push.queued {\n\t\t\ts.push.queued = false\n\t\t\ts.push.mut.Unlock()\n\t\t\ts.Push() \/\/auto-push\n\t\t} else {\n\t\t\ts.push.mut.Unlock()\n\t\t}\n\t}()\n\t\/\/calculate new json state\n\tnewBytes, err := json.Marshal(s.gostruct)\n\tif err != nil {\n\t\tlog.Printf(\"velox: marshal failed: %s\", err)\n\t\treturn\n\t}\n\t\/\/calculate change set from last version\n\tops, _ := jsonpatch.CreatePatch(s.bytes, newBytes)\n\tif len(s.bytes) > 0 && len(ops) > 0 {\n\t\t\/\/changes! bump version\n\t\ts.dataMut.Lock()\n\t\ts.delta, _ = json.Marshal(ops)\n\t\ts.bytes = newBytes\n\t\ts.version++\n\t\ts.dataMut.Unlock()\n\t}\n\t\/\/send this new change to each subscriber\n\ts.connMut.Lock()\n\tfor _, c := range s.conns {\n\t\tif c.version != s.version {\n\t\t\ts.push.wg.Add(1)\n\t\t\tgo func(c *conn) {\n\t\t\t\ts.pushTo(c)\n\t\t\t\ts.push.wg.Done()\n\t\t\t}(c)\n\t\t}\n\t}\n\ts.connMut.Unlock()\n\t\/\/wait for all connection pushes\n\ts.push.wg.Wait()\n\t\/\/cleanup()\n}\n\nfunc (s *State) pushTo(c *conn) {\n\tif c.version == s.version {\n\t\treturn\n\t}\n\tupdate := &update{Version: s.version}\n\ts.dataMut.RLock()\n\t\/\/choose optimal update (send the smallest)\n\tif s.delta != nil && c.version == (s.version-1) && len(s.bytes) > 0 && len(s.delta) < len(s.bytes) {\n\t\tupdate.Delta = true\n\t\tupdate.Body = s.delta\n\t} else {\n\t\tupdate.Delta = false\n\t\tupdate.Body = s.bytes\n\t}\n\t\/\/send update\n\tlog.Printf(\"[%s] sending version %d (delta: %v)\", c.id, s.version, update.Delta)\n\tif err := c.send(update); err == nil {\n\t\tc.version = s.version \/\/sent! mark this version\n\t}\n\ts.dataMut.RUnlock()\n}\n\n\/\/A single update. May contain compression flags in future.\ntype update struct {\n\tPing bool `json:\"ping,omitempty\"`\n\tDelta bool `json:\"delta,omitempty\"`\n\tVersion int64 `json:\"version,omitempty\"` \/\/53 usable bits\n\tBody json.RawMessage `json:\"body,omitempty\"`\n}\n\n\/\/implement eventsource.Event interface\nfunc (u *update) Id() string { return strconv.FormatInt(u.Version, 10) }\nfunc (u *update) Event() string { return \"\" }\nfunc (u *update) Data() string {\n\tb, _ := json.Marshal(u)\n\treturn string(b)\n}\n<commit_msg>remove rogue print statement<commit_after>package velox\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/mattbaird\/jsonpatch\"\n)\n\nvar (\n\t\/\/15ms is approximately highest resolution on the JS eventloop\n\tMinThrottle = 15 * time.Millisecond\n\tDefaultThrottle = 200 * time.Millisecond\n)\n\n\/\/State must be embedded into a struct to make it syncable.\ntype State struct {\n\t\/\/configuration\n\tThrottle time.Duration `json:\"-\"`\n\t\/\/internal state\n\tinitMut sync.Mutex\n\tinitd bool\n\tgostruct interface{}\n\tdataMut sync.RWMutex \/\/protects bytes\/delta\/version\n\tbytes []byte\n\tdelta []byte\n\tversion int64\n\tconnMut sync.Mutex\n\tconns map[string]*conn\n\tpush struct {\n\t\tmut sync.Mutex\n\t\ting bool\n\t\tqueued bool\n\t\tstart time.Time\n\t\twg sync.WaitGroup\n\t}\n}\n\nfunc (s *State) init(gostruct interface{}) error {\n\tif s.Throttle < MinThrottle {\n\t\ts.Throttle = DefaultThrottle\n\t}\n\t\/\/initial JSON bytes\n\tif b, err := json.Marshal(gostruct); err != nil {\n\t\treturn fmt.Errorf(\"JSON marshalling failed: %s\", err)\n\t} else {\n\t\ts.bytes = b\n\t}\n\ts.gostruct = gostruct\n\ts.version = 1\n\ts.conns = map[string]*conn{}\n\ts.initd = true\n\treturn nil\n}\n\nfunc (s *State) sync(gostruct interface{}) (*State, error) {\n\ts.initMut.Lock()\n\tdefer s.initMut.Unlock()\n\tif !s.initd {\n\t\tif err := s.init(gostruct); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if s.gostruct != gostruct {\n\t\treturn nil, errors.New(\"A different struct is already synced\")\n\t}\n\treturn s, nil\n}\n\nfunc (s *State) subscribe(conn *conn) {\n\t\/\/subscribe\n\ts.connMut.Lock()\n\ts.conns[conn.id] = conn\n\ts.connMut.Unlock()\n\t\/\/and then unsubscribe on close\n\tgo func() {\n\t\tconn.Wait()\n\t\ts.connMut.Lock()\n\t\tdelete(s.conns, conn.id)\n\t\ts.connMut.Unlock()\n\t}()\n}\n\nfunc (s *State) NumConnections() int {\n\ts.connMut.Lock()\n\tn := len(s.conns)\n\ts.connMut.Unlock()\n\treturn n\n}\n\n\/\/Send the changes from this object to all connected clients.\n\/\/Push is thread-safe and is throttled so it can be called\n\/\/with abandon.\nfunc (s *State) Push() {\n\tgo s.gopush()\n}\n\n\/\/non-blocking push\nfunc (s *State) gopush() {\n\ts.push.mut.Lock()\n\tif s.push.ing {\n\t\ts.push.queued = true\n\t\ts.push.mut.Unlock()\n\t\treturn\n\t}\n\ts.push.ing = true\n\ts.push.start = time.Now()\n\t\/\/queue cleanup\n\tdefer func() {\n\t\t\/\/measure time passed, ensure we wait at least Throttle time\n\t\ttdelta := time.Now().Sub(s.push.start)\n\t\tif t := s.Throttle - tdelta; t > 0 {\n\t\t\ttime.Sleep(t)\n\t\t}\n\t\t\/\/cleanup\n\t\ts.push.ing = false\n\t\tif s.push.queued {\n\t\t\ts.push.queued = false\n\t\t\ts.push.mut.Unlock()\n\t\t\ts.Push() \/\/auto-push\n\t\t} else {\n\t\t\ts.push.mut.Unlock()\n\t\t}\n\t}()\n\t\/\/calculate new json state\n\tnewBytes, err := json.Marshal(s.gostruct)\n\tif err != nil {\n\t\tlog.Printf(\"velox: marshal failed: %s\", err)\n\t\treturn\n\t}\n\t\/\/calculate change set from last version\n\tops, _ := jsonpatch.CreatePatch(s.bytes, newBytes)\n\tif len(s.bytes) > 0 && len(ops) > 0 {\n\t\t\/\/changes! bump version\n\t\ts.dataMut.Lock()\n\t\ts.delta, _ = json.Marshal(ops)\n\t\ts.bytes = newBytes\n\t\ts.version++\n\t\ts.dataMut.Unlock()\n\t}\n\t\/\/send this new change to each subscriber\n\ts.connMut.Lock()\n\tfor _, c := range s.conns {\n\t\tif c.version != s.version {\n\t\t\ts.push.wg.Add(1)\n\t\t\tgo func(c *conn) {\n\t\t\t\ts.pushTo(c)\n\t\t\t\ts.push.wg.Done()\n\t\t\t}(c)\n\t\t}\n\t}\n\ts.connMut.Unlock()\n\t\/\/wait for all connection pushes\n\ts.push.wg.Wait()\n\t\/\/cleanup()\n}\n\nfunc (s *State) pushTo(c *conn) {\n\tif c.version == s.version {\n\t\treturn\n\t}\n\tupdate := &update{Version: s.version}\n\ts.dataMut.RLock()\n\t\/\/choose optimal update (send the smallest)\n\tif s.delta != nil && c.version == (s.version-1) && len(s.bytes) > 0 && len(s.delta) < len(s.bytes) {\n\t\tupdate.Delta = true\n\t\tupdate.Body = s.delta\n\t} else {\n\t\tupdate.Delta = false\n\t\tupdate.Body = s.bytes\n\t}\n\t\/\/send update\n\tif err := c.send(update); err == nil {\n\t\tc.version = s.version \/\/sent! mark this version\n\t}\n\ts.dataMut.RUnlock()\n}\n\n\/\/A single update. May contain compression flags in future.\ntype update struct {\n\tPing bool `json:\"ping,omitempty\"`\n\tDelta bool `json:\"delta,omitempty\"`\n\tVersion int64 `json:\"version,omitempty\"` \/\/53 usable bits\n\tBody json.RawMessage `json:\"body,omitempty\"`\n}\n\n\/\/implement eventsource.Event interface\nfunc (u *update) Id() string { return strconv.FormatInt(u.Version, 10) }\nfunc (u *update) Event() string { return \"\" }\nfunc (u *update) Data() string {\n\tb, _ := json.Marshal(u)\n\treturn string(b)\n}\n<|endoftext|>"} {"text":"<commit_before>package boomer\n\nimport (\n\t\"time\"\n)\n\ntype RequestStats struct {\n\tEntries map[string]*StatsEntry\n\tErrors map[string]*StatsError\n\tNumRequests int64\n\tNumFailures int64\n\tMaxRequests int64\n\tLastRequestTimestamp int64\n\tStartTime int64\n}\n\nfunc (this *RequestStats) get(name string, method string) (entry *StatsEntry) {\n\tentry, ok := this.Entries[name+method]\n\tif !ok {\n\t\tentry = &StatsEntry{\n\t\t\tStats: this,\n\t\t\tName: name,\n\t\t\tMethod: method,\n\t\t\tNumReqsPerSec: make(map[int64]int64),\n\t\t\tResponseTimes: make(map[float64]int64),\n\t\t}\n\t\tentry.reset()\n\t\tthis.Entries[name+method] = entry\n\t}\n\treturn this.Entries[name+method]\n}\n\nfunc (this *RequestStats) clearAll() {\n\tthis.NumRequests = 0\n\tthis.NumFailures = 0\n\tthis.Entries = make(map[string]*StatsEntry)\n\tthis.Errors = make(map[string]*StatsError)\n\tthis.MaxRequests = 0\n\tthis.LastRequestTimestamp = 0\n\tthis.StartTime = 0\n}\n\ntype StatsEntry struct {\n\tStats *RequestStats\n\tName string\n\tMethod string\n\tNumRequests int64\n\tNumFailures int64\n\tTotalResponseTime float64\n\tMinResponseTime float64\n\tMaxResponseTime float64\n\tNumReqsPerSec map[int64]int64\n\tResponseTimes map[float64]int64\n\tTotalContentLength int64\n\tStartTime int64\n\tLastRequestTimestamp int64\n}\n\nfunc (this *StatsEntry) reset() {\n\tthis.StartTime = int64(time.Now().Unix())\n\tthis.NumRequests = 0\n\tthis.NumFailures = 0\n\tthis.TotalResponseTime = 0\n\tthis.ResponseTimes = make(map[float64]int64)\n\tthis.MinResponseTime = 0\n\tthis.MaxResponseTime = 0\n\tthis.LastRequestTimestamp = int64(time.Now().Unix())\n\tthis.NumReqsPerSec = make(map[int64]int64)\n\tthis.TotalContentLength = 0\n}\n\nfunc (this *StatsEntry) log(responseTime float64, contentLength int64) {\n\n\tthis.NumRequests += 1\n\n\tthis.logTimeOfRequest()\n\tthis.logResponseTime(responseTime)\n\n\tthis.TotalContentLength += contentLength\n\n}\n\nfunc (this *StatsEntry) logTimeOfRequest() {\n\n\tnow := int64(time.Now().Unix())\n\n\t_, ok := this.NumReqsPerSec[now]\n\tif !ok {\n\t\tthis.NumReqsPerSec[now] = 0\n\t} else {\n\t\tthis.NumReqsPerSec[now] += 1\n\t}\n\n\tthis.LastRequestTimestamp = now\n\n}\n\nfunc (this *StatsEntry) logResponseTime(responseTime float64) {\n\tthis.TotalResponseTime += responseTime\n\n\tif this.MinResponseTime == 0 {\n\t\tthis.MinResponseTime = responseTime\n\t}\n\n\tif responseTime < this.MinResponseTime {\n\t\tthis.MinResponseTime = responseTime\n\t}\n\n\tif responseTime > this.MaxResponseTime {\n\t\tthis.MaxResponseTime = responseTime\n\t}\n\n\troundedResponseTime := float64(0)\n\n\tif responseTime < 100 {\n\t\troundedResponseTime = responseTime\n\t} else if responseTime < 1000 {\n\t\troundedResponseTime = float64(Round(responseTime, .5, -1))\n\t} else if responseTime < 10000 {\n\t\troundedResponseTime = float64(Round(responseTime, .5, -2))\n\t} else {\n\t\troundedResponseTime = float64(Round(responseTime, .5, -3))\n\t}\n\n\t_, ok := this.ResponseTimes[roundedResponseTime]\n\tif !ok {\n\t\tthis.ResponseTimes[roundedResponseTime] = 0\n\t} else {\n\t\tthis.ResponseTimes[roundedResponseTime] += 1\n\t}\n\n}\n\nfunc (this *StatsEntry) logError(err string) {\n\tthis.NumFailures += 1\n\tkey := MD5(this.Method, this.Name, err)\n\tentry, ok := this.Stats.Errors[key]\n\tif !ok {\n\t\tentry = &StatsError{\n\t\t\tName: this.Name,\n\t\t\tMethod: this.Method,\n\t\t\tError: err,\n\t\t}\n\t\tthis.Stats.Errors[key] = entry\n\t}\n\tentry.occured()\n}\n\nfunc (this *StatsEntry) serialize() map[string]interface{} {\n\tresult := make(map[string]interface{})\n\tresult[\"name\"] = this.Name\n\tresult[\"method\"] = this.Method\n\tresult[\"last_request_timestamp\"] = this.LastRequestTimestamp\n\tresult[\"start_time\"] = this.StartTime\n\tresult[\"num_requests\"] = this.NumRequests\n\tresult[\"num_failures\"] = this.NumFailures\n\tresult[\"total_response_time\"] = this.TotalResponseTime\n\tresult[\"max_response_time\"] = this.MaxResponseTime\n\tresult[\"min_response_time\"] = this.MinResponseTime\n\tresult[\"total_content_length\"] = this.TotalContentLength\n\tresult[\"response_times\"] = this.ResponseTimes\n\tresult[\"num_reqs_per_sec\"] = this.NumReqsPerSec\n\treturn result\n}\n\nfunc (this *StatsEntry) getStrippedReport() map[string]interface{} {\n\treport := this.serialize()\n\tthis.reset()\n\treturn report\n}\n\ntype StatsError struct {\n\tName string\n\tMethod string\n\tError string\n\tOccurences int64\n}\n\nfunc (this *StatsError) occured() {\n\tthis.Occurences += 1\n}\n\nfunc (this *StatsError) toMap() map[string]interface{} {\n\tm := make(map[string]interface{})\n\tm[\"method\"] = this.Method\n\tm[\"name\"] = this.Name\n\tm[\"error\"] = this.Error\n\tm[\"occurences\"] = this.Occurences\n\treturn m\n}\n\nfunc reportToMasterHandler(data *map[string]interface{}) {\n\tentries := make([]interface{}, 0, len(stats.Entries))\n\tfor _, v := range stats.Entries {\n\t\tif !(v.NumRequests == 0 && v.NumFailures == 0) {\n\t\t\tentries = append(entries, v.getStrippedReport())\n\t\t}\n\t}\n\n\terrors := make(map[string]map[string]interface{})\n\tfor k, v := range stats.Errors {\n\t\terrors[k] = v.toMap()\n\t}\n\n\t(*data)[\"stats\"] = entries\n\t(*data)[\"errors\"] = errors\n\tstats.Entries = make(map[string]*StatsEntry)\n\tstats.Errors = make(map[string]*StatsError)\n}\n\ntype RequestSuccess struct {\n\trequestType string\n\tname string\n\tresponseTime float64\n\tresponseLength int64\n}\n\ntype RequestFailure struct {\n\trequestType string\n\tname string\n\tresponseTime float64\n\terror string\n}\n\nvar stats = new(RequestStats)\nvar RequestSuccessChannel = make(chan *RequestSuccess, 100)\nvar RequestFailureChannel = make(chan *RequestFailure, 100)\nvar ClearStatsChannel = make(chan bool)\n\nfunc init() {\n\tEvents.Subscribe(\"boomer:report_to_master\", reportToMasterHandler)\n\tstats.Entries = make(map[string]*StatsEntry)\n\tstats.Errors = make(map[string]*StatsError)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase m := <-RequestSuccessChannel:\n\t\t\t\tstats.get(m.name, m.requestType).log(m.responseTime, m.responseLength)\n\t\t\tcase n := <-RequestFailureChannel:\n\t\t\t\tstats.get(n.name, n.requestType).logError(n.error)\n\t\t\tcase <-ClearStatsChannel:\n\t\t\t\tstats.clearAll()\n\t\t\t}\n\t\t}\n\t}()\n}\n<commit_msg>double check<commit_after>package boomer\n\nimport (\n\t\"time\"\n\t\"log\"\n)\n\ntype RequestStats struct {\n\tEntries map[string]*StatsEntry\n\tErrors map[string]*StatsError\n\tNumRequests int64\n\tNumFailures int64\n\tMaxRequests int64\n\tLastRequestTimestamp int64\n\tStartTime int64\n}\n\nfunc (this *RequestStats) get(name string, method string) (entry *StatsEntry) {\n\tentry, ok := this.Entries[name+method]\n\tif !ok {\n\t\tentry = &StatsEntry{\n\t\t\tStats: this,\n\t\t\tName: name,\n\t\t\tMethod: method,\n\t\t\tNumReqsPerSec: make(map[int64]int64),\n\t\t\tResponseTimes: make(map[float64]int64),\n\t\t}\n\t\tentry.reset()\n\t\tthis.Entries[name+method] = entry\n\t}\n\tentry, ok = this.Entries[name+method]\n\tif !ok {\n\t\tlog.Fatal(ok, \"entry is null, why?\")\n\t}\n\treturn entry\n}\n\nfunc (this *RequestStats) clearAll() {\n\tthis.NumRequests = 0\n\tthis.NumFailures = 0\n\tthis.Entries = make(map[string]*StatsEntry)\n\tthis.Errors = make(map[string]*StatsError)\n\tthis.MaxRequests = 0\n\tthis.LastRequestTimestamp = 0\n\tthis.StartTime = 0\n}\n\ntype StatsEntry struct {\n\tStats *RequestStats\n\tName string\n\tMethod string\n\tNumRequests int64\n\tNumFailures int64\n\tTotalResponseTime float64\n\tMinResponseTime float64\n\tMaxResponseTime float64\n\tNumReqsPerSec map[int64]int64\n\tResponseTimes map[float64]int64\n\tTotalContentLength int64\n\tStartTime int64\n\tLastRequestTimestamp int64\n}\n\nfunc (this *StatsEntry) reset() {\n\tthis.StartTime = int64(time.Now().Unix())\n\tthis.NumRequests = 0\n\tthis.NumFailures = 0\n\tthis.TotalResponseTime = 0\n\tthis.ResponseTimes = make(map[float64]int64)\n\tthis.MinResponseTime = 0\n\tthis.MaxResponseTime = 0\n\tthis.LastRequestTimestamp = int64(time.Now().Unix())\n\tthis.NumReqsPerSec = make(map[int64]int64)\n\tthis.TotalContentLength = 0\n}\n\nfunc (this *StatsEntry) log(responseTime float64, contentLength int64) {\n\n\tthis.NumRequests += 1\n\n\tthis.logTimeOfRequest()\n\tthis.logResponseTime(responseTime)\n\n\tthis.TotalContentLength += contentLength\n\n}\n\nfunc (this *StatsEntry) logTimeOfRequest() {\n\n\tnow := int64(time.Now().Unix())\n\n\t_, ok := this.NumReqsPerSec[now]\n\tif !ok {\n\t\tthis.NumReqsPerSec[now] = 0\n\t} else {\n\t\tthis.NumReqsPerSec[now] += 1\n\t}\n\n\tthis.LastRequestTimestamp = now\n\n}\n\nfunc (this *StatsEntry) logResponseTime(responseTime float64) {\n\tthis.TotalResponseTime += responseTime\n\n\tif this.MinResponseTime == 0 {\n\t\tthis.MinResponseTime = responseTime\n\t}\n\n\tif responseTime < this.MinResponseTime {\n\t\tthis.MinResponseTime = responseTime\n\t}\n\n\tif responseTime > this.MaxResponseTime {\n\t\tthis.MaxResponseTime = responseTime\n\t}\n\n\troundedResponseTime := float64(0)\n\n\tif responseTime < 100 {\n\t\troundedResponseTime = responseTime\n\t} else if responseTime < 1000 {\n\t\troundedResponseTime = float64(Round(responseTime, .5, -1))\n\t} else if responseTime < 10000 {\n\t\troundedResponseTime = float64(Round(responseTime, .5, -2))\n\t} else {\n\t\troundedResponseTime = float64(Round(responseTime, .5, -3))\n\t}\n\n\t_, ok := this.ResponseTimes[roundedResponseTime]\n\tif !ok {\n\t\tthis.ResponseTimes[roundedResponseTime] = 0\n\t} else {\n\t\tthis.ResponseTimes[roundedResponseTime] += 1\n\t}\n\n}\n\nfunc (this *StatsEntry) logError(err string) {\n\tthis.NumFailures += 1\n\tkey := MD5(this.Method, this.Name, err)\n\tentry, ok := this.Stats.Errors[key]\n\tif !ok {\n\t\tentry = &StatsError{\n\t\t\tName: this.Name,\n\t\t\tMethod: this.Method,\n\t\t\tError: err,\n\t\t}\n\t\tthis.Stats.Errors[key] = entry\n\t}\n\tentry.occured()\n}\n\nfunc (this *StatsEntry) serialize() map[string]interface{} {\n\tresult := make(map[string]interface{})\n\tresult[\"name\"] = this.Name\n\tresult[\"method\"] = this.Method\n\tresult[\"last_request_timestamp\"] = this.LastRequestTimestamp\n\tresult[\"start_time\"] = this.StartTime\n\tresult[\"num_requests\"] = this.NumRequests\n\tresult[\"num_failures\"] = this.NumFailures\n\tresult[\"total_response_time\"] = this.TotalResponseTime\n\tresult[\"max_response_time\"] = this.MaxResponseTime\n\tresult[\"min_response_time\"] = this.MinResponseTime\n\tresult[\"total_content_length\"] = this.TotalContentLength\n\tresult[\"response_times\"] = this.ResponseTimes\n\tresult[\"num_reqs_per_sec\"] = this.NumReqsPerSec\n\treturn result\n}\n\nfunc (this *StatsEntry) getStrippedReport() map[string]interface{} {\n\treport := this.serialize()\n\tthis.reset()\n\treturn report\n}\n\ntype StatsError struct {\n\tName string\n\tMethod string\n\tError string\n\tOccurences int64\n}\n\nfunc (this *StatsError) occured() {\n\tthis.Occurences += 1\n}\n\nfunc (this *StatsError) toMap() map[string]interface{} {\n\tm := make(map[string]interface{})\n\tm[\"method\"] = this.Method\n\tm[\"name\"] = this.Name\n\tm[\"error\"] = this.Error\n\tm[\"occurences\"] = this.Occurences\n\treturn m\n}\n\nfunc reportToMasterHandler(data *map[string]interface{}) {\n\tentries := make([]interface{}, 0, len(stats.Entries))\n\tfor _, v := range stats.Entries {\n\t\tif !(v.NumRequests == 0 && v.NumFailures == 0) {\n\t\t\tentries = append(entries, v.getStrippedReport())\n\t\t}\n\t}\n\n\terrors := make(map[string]map[string]interface{})\n\tfor k, v := range stats.Errors {\n\t\terrors[k] = v.toMap()\n\t}\n\n\t(*data)[\"stats\"] = entries\n\t(*data)[\"errors\"] = errors\n\tstats.Entries = make(map[string]*StatsEntry)\n\tstats.Errors = make(map[string]*StatsError)\n}\n\ntype RequestSuccess struct {\n\trequestType string\n\tname string\n\tresponseTime float64\n\tresponseLength int64\n}\n\ntype RequestFailure struct {\n\trequestType string\n\tname string\n\tresponseTime float64\n\terror string\n}\n\nvar stats = new(RequestStats)\nvar RequestSuccessChannel = make(chan *RequestSuccess, 100)\nvar RequestFailureChannel = make(chan *RequestFailure, 100)\nvar ClearStatsChannel = make(chan bool)\n\nfunc init() {\n\tEvents.Subscribe(\"boomer:report_to_master\", reportToMasterHandler)\n\tstats.Entries = make(map[string]*StatsEntry)\n\tstats.Errors = make(map[string]*StatsError)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase m := <-RequestSuccessChannel:\n\t\t\t\tstats.get(m.name, m.requestType).log(m.responseTime, m.responseLength)\n\t\t\tcase n := <-RequestFailureChannel:\n\t\t\t\tstats.get(n.name, n.requestType).logError(n.error)\n\t\t\tcase <-ClearStatsChannel:\n\t\t\t\tstats.clearAll()\n\t\t\t}\n\t\t}\n\t}()\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"fmt\"\n\t\"runtime\/debug\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/micro\/go-log\"\n\t\"github.com\/micro\/go-micro\/broker\"\n\t\"github.com\/micro\/go-micro\/codec\"\n\t\"github.com\/micro\/go-micro\/metadata\"\n\t\"github.com\/micro\/go-micro\/registry\"\n\t\"github.com\/micro\/go-micro\/transport\"\n\n\t\"github.com\/micro\/misc\/lib\/addr\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype rpcServer struct {\n\trpc *server\n\texit chan chan error\n\n\tsync.RWMutex\n\topts Options\n\thandlers map[string]Handler\n\tsubscribers map[*subscriber][]broker.Subscriber\n\t\/\/ used for first registration\n\tregistered bool\n\t\/\/ graceful exit\n\twg sync.WaitGroup\n}\n\nfunc newRpcServer(opts ...Option) Server {\n\toptions := newOptions(opts...)\n\treturn &rpcServer{\n\t\topts: options,\n\t\trpc: &server{\n\t\t\tname: options.Name,\n\t\t\tserviceMap: make(map[string]*service),\n\t\t\thdlrWrappers: options.HdlrWrappers,\n\t\t},\n\t\thandlers: make(map[string]Handler),\n\t\tsubscribers: make(map[*subscriber][]broker.Subscriber),\n\t\texit: make(chan chan error),\n\t}\n}\n\nfunc (s *rpcServer) accept(sock transport.Socket) {\n\tdefer func() {\n\t\t\/\/ close socket\n\t\tsock.Close()\n\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Log(\"panic recovered: \", r)\n\t\t\tlog.Log(string(debug.Stack()))\n\t\t}\n\t}()\n\n\tfor {\n\t\tvar msg transport.Message\n\t\tif err := sock.Recv(&msg); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ we use this Timeout header to set a server deadline\n\t\tto := msg.Header[\"Timeout\"]\n\t\t\/\/ we use this Content-Type header to identify the codec needed\n\t\tct := msg.Header[\"Content-Type\"]\n\n\t\tcf, err := s.newCodec(ct)\n\t\t\/\/ TODO: needs better error handling\n\t\tif err != nil {\n\t\t\tsock.Send(&transport.Message{\n\t\t\t\tHeader: map[string]string{\n\t\t\t\t\t\"Content-Type\": \"text\/plain\",\n\t\t\t\t},\n\t\t\t\tBody: []byte(err.Error()),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\tcodec := newRpcPlusCodec(&msg, sock, cf)\n\n\t\t\/\/ strip our headers\n\t\thdr := make(map[string]string)\n\t\tfor k, v := range msg.Header {\n\t\t\thdr[k] = v\n\t\t}\n\t\tdelete(hdr, \"Content-Type\")\n\t\tdelete(hdr, \"Timeout\")\n\n\t\tctx := metadata.NewContext(context.Background(), hdr)\n\n\t\t\/\/ set the timeout if we have it\n\t\tif len(to) > 0 {\n\t\t\tif n, err := strconv.ParseUint(to, 10, 64); err == nil {\n\t\t\t\tctx, _ = context.WithTimeout(ctx, time.Duration(n))\n\t\t\t}\n\t\t}\n\n\t\t\/\/ add to wait group\n\t\ts.wg.Add(1)\n\t\tdefer s.wg.Done()\n\n\t\t\/\/ TODO: needs better error handling\n\t\tif err := s.rpc.serveRequest(ctx, codec, ct); err != nil {\n\t\t\tlog.Logf(\"Unexpected error serving request, closing socket: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (s *rpcServer) newCodec(contentType string) (codec.NewCodec, error) {\n\tif cf, ok := s.opts.Codecs[contentType]; ok {\n\t\treturn cf, nil\n\t}\n\tif cf, ok := defaultCodecs[contentType]; ok {\n\t\treturn cf, nil\n\t}\n\treturn nil, fmt.Errorf(\"Unsupported Content-Type: %s\", contentType)\n}\n\nfunc (s *rpcServer) Options() Options {\n\ts.RLock()\n\topts := s.opts\n\ts.RUnlock()\n\treturn opts\n}\n\nfunc (s *rpcServer) Init(opts ...Option) error {\n\ts.Lock()\n\tfor _, opt := range opts {\n\t\topt(&s.opts)\n\t}\n\t\/\/ update internal server\n\ts.rpc = &server{\n\t\tname: s.opts.Name,\n\t\tserviceMap: s.rpc.serviceMap,\n\t\thdlrWrappers: s.opts.HdlrWrappers,\n\t}\n\ts.Unlock()\n\treturn nil\n}\n\nfunc (s *rpcServer) NewHandler(h interface{}, opts ...HandlerOption) Handler {\n\treturn newRpcHandler(h, opts...)\n}\n\nfunc (s *rpcServer) Handle(h Handler) error {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tif err := s.rpc.register(h.Handler()); err != nil {\n\t\treturn err\n\t}\n\n\ts.handlers[h.Name()] = h\n\n\treturn nil\n}\n\nfunc (s *rpcServer) NewSubscriber(topic string, sb interface{}, opts ...SubscriberOption) Subscriber {\n\treturn newSubscriber(topic, sb, opts...)\n}\n\nfunc (s *rpcServer) Subscribe(sb Subscriber) error {\n\tsub, ok := sb.(*subscriber)\n\tif !ok {\n\t\treturn fmt.Errorf(\"invalid subscriber: expected *subscriber\")\n\t}\n\tif len(sub.handlers) == 0 {\n\t\treturn fmt.Errorf(\"invalid subscriber: no handler functions\")\n\t}\n\n\tif err := validateSubscriber(sb); err != nil {\n\t\treturn err\n\t}\n\n\ts.Lock()\n\t_, ok = s.subscribers[sub]\n\tif ok {\n\t\treturn fmt.Errorf(\"subscriber %v already exists\", s)\n\t}\n\ts.subscribers[sub] = nil\n\ts.Unlock()\n\treturn nil\n}\n\nfunc (s *rpcServer) Register() error {\n\t\/\/ parse address for host, port\n\tconfig := s.Options()\n\tvar advt, host string\n\tvar port int\n\n\t\/\/ check the advertise address first\n\t\/\/ if it exists then use it, otherwise\n\t\/\/ use the address\n\tif len(config.Advertise) > 0 {\n\t\tadvt = config.Advertise\n\t} else {\n\t\tadvt = config.Address\n\t}\n\n\tparts := strings.Split(advt, \":\")\n\tif len(parts) > 1 {\n\t\thost = strings.Join(parts[:len(parts)-1], \":\")\n\t\tport, _ = strconv.Atoi(parts[len(parts)-1])\n\t} else {\n\t\thost = parts[0]\n\t}\n\n\taddr, err := addr.Extract(host)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ register service\n\tnode := ®istry.Node{\n\t\tId: config.Name + \"-\" + config.Id,\n\t\tAddress: addr,\n\t\tPort: port,\n\t\tMetadata: config.Metadata,\n\t}\n\n\tnode.Metadata[\"transport\"] = config.Transport.String()\n\tnode.Metadata[\"broker\"] = config.Broker.String()\n\tnode.Metadata[\"server\"] = s.String()\n\tnode.Metadata[\"registry\"] = config.Registry.String()\n\n\ts.RLock()\n\t\/\/ Maps are ordered randomly, sort the keys for consistency\n\tvar handlerList []string\n\tfor n, e := range s.handlers {\n\t\t\/\/ Only advertise non internal handlers\n\t\tif !e.Options().Internal {\n\t\t\thandlerList = append(handlerList, n)\n\t\t}\n\t}\n\tsort.Strings(handlerList)\n\n\tvar subscriberList []*subscriber\n\tfor e := range s.subscribers {\n\t\t\/\/ Only advertise non internal subscribers\n\t\tif !e.Options().Internal {\n\t\t\tsubscriberList = append(subscriberList, e)\n\t\t}\n\t}\n\tsort.Slice(subscriberList, func(i, j int) bool {\n\t\treturn subscriberList[i].topic > subscriberList[j].topic\n\t})\n\n\tvar endpoints []*registry.Endpoint\n\tfor _, n := range handlerList {\n\t\tendpoints = append(endpoints, s.handlers[n].Endpoints()...)\n\t}\n\tfor _, e := range subscriberList {\n\t\tendpoints = append(endpoints, e.Endpoints()...)\n\t}\n\ts.RUnlock()\n\n\tservice := ®istry.Service{\n\t\tName: config.Name,\n\t\tVersion: config.Version,\n\t\tNodes: []*registry.Node{node},\n\t\tEndpoints: endpoints,\n\t}\n\n\ts.Lock()\n\tregistered := s.registered\n\ts.Unlock()\n\n\tif !registered {\n\t\tlog.Logf(\"Registering node: %s\", node.Id)\n\t}\n\n\t\/\/ create registry options\n\trOpts := []registry.RegisterOption{registry.RegisterTTL(config.RegisterTTL)}\n\n\tif err := config.Registry.Register(service, rOpts...); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ already registered? don't need to register subscribers\n\tif registered {\n\t\treturn nil\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\ts.registered = true\n\n\tfor sb, _ := range s.subscribers {\n\t\thandler := s.createSubHandler(sb, s.opts)\n\t\tvar opts []broker.SubscribeOption\n\t\tif queue := sb.Options().Queue; len(queue) > 0 {\n\t\t\topts = append(opts, broker.Queue(queue))\n\t\t}\n\t\tsub, err := config.Broker.Subscribe(sb.Topic(), handler, opts...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.subscribers[sb] = []broker.Subscriber{sub}\n\t}\n\n\treturn nil\n}\n\nfunc (s *rpcServer) Deregister() error {\n\tconfig := s.Options()\n\tvar advt, host string\n\tvar port int\n\n\t\/\/ check the advertise address first\n\t\/\/ if it exists then use it, otherwise\n\t\/\/ use the address\n\tif len(config.Advertise) > 0 {\n\t\tadvt = config.Advertise\n\t} else {\n\t\tadvt = config.Address\n\t}\n\n\tparts := strings.Split(advt, \":\")\n\tif len(parts) > 1 {\n\t\thost = strings.Join(parts[:len(parts)-1], \":\")\n\t\tport, _ = strconv.Atoi(parts[len(parts)-1])\n\t} else {\n\t\thost = parts[0]\n\t}\n\n\taddr, err := addr.Extract(host)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnode := ®istry.Node{\n\t\tId: config.Name + \"-\" + config.Id,\n\t\tAddress: addr,\n\t\tPort: port,\n\t}\n\n\tservice := ®istry.Service{\n\t\tName: config.Name,\n\t\tVersion: config.Version,\n\t\tNodes: []*registry.Node{node},\n\t}\n\n\tlog.Logf(\"Deregistering node: %s\", node.Id)\n\tif err := config.Registry.Deregister(service); err != nil {\n\t\treturn err\n\t}\n\n\ts.Lock()\n\n\tif !s.registered {\n\t\ts.Unlock()\n\t\treturn nil\n\t}\n\n\ts.registered = false\n\n\tfor sb, subs := range s.subscribers {\n\t\tfor _, sub := range subs {\n\t\t\tlog.Logf(\"Unsubscribing from topic: %s\", sub.Topic())\n\t\t\tsub.Unsubscribe()\n\t\t}\n\t\ts.subscribers[sb] = nil\n\t}\n\n\ts.Unlock()\n\treturn nil\n}\n\nfunc (s *rpcServer) Start() error {\n\tregisterDebugHandler(s)\n\tconfig := s.Options()\n\n\tts, err := config.Transport.Listen(config.Address)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Logf(\"Listening on %s\", ts.Addr())\n\ts.Lock()\n\ts.opts.Address = ts.Addr()\n\ts.Unlock()\n\n\tgo ts.Accept(s.accept)\n\n\tgo func() {\n\t\t\/\/ wait for exit\n\t\tch := <-s.exit\n\n\t\t\/\/ wait for requests to finish\n\t\tif wait(s.opts.Context) {\n\t\t\ts.wg.Wait()\n\t\t}\n\n\t\t\/\/ close transport listener\n\t\tch <- ts.Close()\n\n\t\t\/\/ disconnect the broker\n\t\tconfig.Broker.Disconnect()\n\t}()\n\n\t\/\/ TODO: subscribe to cruft\n\treturn config.Broker.Connect()\n}\n\nfunc (s *rpcServer) Stop() error {\n\tch := make(chan error)\n\ts.exit <- ch\n\treturn <-ch\n}\n\nfunc (s *rpcServer) String() string {\n\treturn \"rpc\"\n}\n<commit_msg>fix possible deadlock since code can return without unlocking the Mutex<commit_after>package server\n\nimport (\n\t\"fmt\"\n\t\"runtime\/debug\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/micro\/go-log\"\n\t\"github.com\/micro\/go-micro\/broker\"\n\t\"github.com\/micro\/go-micro\/codec\"\n\t\"github.com\/micro\/go-micro\/metadata\"\n\t\"github.com\/micro\/go-micro\/registry\"\n\t\"github.com\/micro\/go-micro\/transport\"\n\n\t\"github.com\/micro\/misc\/lib\/addr\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype rpcServer struct {\n\trpc *server\n\texit chan chan error\n\n\tsync.RWMutex\n\topts Options\n\thandlers map[string]Handler\n\tsubscribers map[*subscriber][]broker.Subscriber\n\t\/\/ used for first registration\n\tregistered bool\n\t\/\/ graceful exit\n\twg sync.WaitGroup\n}\n\nfunc newRpcServer(opts ...Option) Server {\n\toptions := newOptions(opts...)\n\treturn &rpcServer{\n\t\topts: options,\n\t\trpc: &server{\n\t\t\tname: options.Name,\n\t\t\tserviceMap: make(map[string]*service),\n\t\t\thdlrWrappers: options.HdlrWrappers,\n\t\t},\n\t\thandlers: make(map[string]Handler),\n\t\tsubscribers: make(map[*subscriber][]broker.Subscriber),\n\t\texit: make(chan chan error),\n\t}\n}\n\nfunc (s *rpcServer) accept(sock transport.Socket) {\n\tdefer func() {\n\t\t\/\/ close socket\n\t\tsock.Close()\n\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Log(\"panic recovered: \", r)\n\t\t\tlog.Log(string(debug.Stack()))\n\t\t}\n\t}()\n\n\tfor {\n\t\tvar msg transport.Message\n\t\tif err := sock.Recv(&msg); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ we use this Timeout header to set a server deadline\n\t\tto := msg.Header[\"Timeout\"]\n\t\t\/\/ we use this Content-Type header to identify the codec needed\n\t\tct := msg.Header[\"Content-Type\"]\n\n\t\tcf, err := s.newCodec(ct)\n\t\t\/\/ TODO: needs better error handling\n\t\tif err != nil {\n\t\t\tsock.Send(&transport.Message{\n\t\t\t\tHeader: map[string]string{\n\t\t\t\t\t\"Content-Type\": \"text\/plain\",\n\t\t\t\t},\n\t\t\t\tBody: []byte(err.Error()),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\tcodec := newRpcPlusCodec(&msg, sock, cf)\n\n\t\t\/\/ strip our headers\n\t\thdr := make(map[string]string)\n\t\tfor k, v := range msg.Header {\n\t\t\thdr[k] = v\n\t\t}\n\t\tdelete(hdr, \"Content-Type\")\n\t\tdelete(hdr, \"Timeout\")\n\n\t\tctx := metadata.NewContext(context.Background(), hdr)\n\n\t\t\/\/ set the timeout if we have it\n\t\tif len(to) > 0 {\n\t\t\tif n, err := strconv.ParseUint(to, 10, 64); err == nil {\n\t\t\t\tctx, _ = context.WithTimeout(ctx, time.Duration(n))\n\t\t\t}\n\t\t}\n\n\t\t\/\/ add to wait group\n\t\ts.wg.Add(1)\n\t\tdefer s.wg.Done()\n\n\t\t\/\/ TODO: needs better error handling\n\t\tif err := s.rpc.serveRequest(ctx, codec, ct); err != nil {\n\t\t\tlog.Logf(\"Unexpected error serving request, closing socket: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (s *rpcServer) newCodec(contentType string) (codec.NewCodec, error) {\n\tif cf, ok := s.opts.Codecs[contentType]; ok {\n\t\treturn cf, nil\n\t}\n\tif cf, ok := defaultCodecs[contentType]; ok {\n\t\treturn cf, nil\n\t}\n\treturn nil, fmt.Errorf(\"Unsupported Content-Type: %s\", contentType)\n}\n\nfunc (s *rpcServer) Options() Options {\n\ts.RLock()\n\topts := s.opts\n\ts.RUnlock()\n\treturn opts\n}\n\nfunc (s *rpcServer) Init(opts ...Option) error {\n\ts.Lock()\n\tfor _, opt := range opts {\n\t\topt(&s.opts)\n\t}\n\t\/\/ update internal server\n\ts.rpc = &server{\n\t\tname: s.opts.Name,\n\t\tserviceMap: s.rpc.serviceMap,\n\t\thdlrWrappers: s.opts.HdlrWrappers,\n\t}\n\ts.Unlock()\n\treturn nil\n}\n\nfunc (s *rpcServer) NewHandler(h interface{}, opts ...HandlerOption) Handler {\n\treturn newRpcHandler(h, opts...)\n}\n\nfunc (s *rpcServer) Handle(h Handler) error {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tif err := s.rpc.register(h.Handler()); err != nil {\n\t\treturn err\n\t}\n\n\ts.handlers[h.Name()] = h\n\n\treturn nil\n}\n\nfunc (s *rpcServer) NewSubscriber(topic string, sb interface{}, opts ...SubscriberOption) Subscriber {\n\treturn newSubscriber(topic, sb, opts...)\n}\n\nfunc (s *rpcServer) Subscribe(sb Subscriber) error {\n\tsub, ok := sb.(*subscriber)\n\tif !ok {\n\t\treturn fmt.Errorf(\"invalid subscriber: expected *subscriber\")\n\t}\n\tif len(sub.handlers) == 0 {\n\t\treturn fmt.Errorf(\"invalid subscriber: no handler functions\")\n\t}\n\n\tif err := validateSubscriber(sb); err != nil {\n\t\treturn err\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\t_, ok = s.subscribers[sub]\n\tif ok {\n\t\treturn fmt.Errorf(\"subscriber %v already exists\", s)\n\t}\n\ts.subscribers[sub] = nil\n\treturn nil\n}\n\nfunc (s *rpcServer) Register() error {\n\t\/\/ parse address for host, port\n\tconfig := s.Options()\n\tvar advt, host string\n\tvar port int\n\n\t\/\/ check the advertise address first\n\t\/\/ if it exists then use it, otherwise\n\t\/\/ use the address\n\tif len(config.Advertise) > 0 {\n\t\tadvt = config.Advertise\n\t} else {\n\t\tadvt = config.Address\n\t}\n\n\tparts := strings.Split(advt, \":\")\n\tif len(parts) > 1 {\n\t\thost = strings.Join(parts[:len(parts)-1], \":\")\n\t\tport, _ = strconv.Atoi(parts[len(parts)-1])\n\t} else {\n\t\thost = parts[0]\n\t}\n\n\taddr, err := addr.Extract(host)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ register service\n\tnode := ®istry.Node{\n\t\tId: config.Name + \"-\" + config.Id,\n\t\tAddress: addr,\n\t\tPort: port,\n\t\tMetadata: config.Metadata,\n\t}\n\n\tnode.Metadata[\"transport\"] = config.Transport.String()\n\tnode.Metadata[\"broker\"] = config.Broker.String()\n\tnode.Metadata[\"server\"] = s.String()\n\tnode.Metadata[\"registry\"] = config.Registry.String()\n\n\ts.RLock()\n\t\/\/ Maps are ordered randomly, sort the keys for consistency\n\tvar handlerList []string\n\tfor n, e := range s.handlers {\n\t\t\/\/ Only advertise non internal handlers\n\t\tif !e.Options().Internal {\n\t\t\thandlerList = append(handlerList, n)\n\t\t}\n\t}\n\tsort.Strings(handlerList)\n\n\tvar subscriberList []*subscriber\n\tfor e := range s.subscribers {\n\t\t\/\/ Only advertise non internal subscribers\n\t\tif !e.Options().Internal {\n\t\t\tsubscriberList = append(subscriberList, e)\n\t\t}\n\t}\n\tsort.Slice(subscriberList, func(i, j int) bool {\n\t\treturn subscriberList[i].topic > subscriberList[j].topic\n\t})\n\n\tvar endpoints []*registry.Endpoint\n\tfor _, n := range handlerList {\n\t\tendpoints = append(endpoints, s.handlers[n].Endpoints()...)\n\t}\n\tfor _, e := range subscriberList {\n\t\tendpoints = append(endpoints, e.Endpoints()...)\n\t}\n\ts.RUnlock()\n\n\tservice := ®istry.Service{\n\t\tName: config.Name,\n\t\tVersion: config.Version,\n\t\tNodes: []*registry.Node{node},\n\t\tEndpoints: endpoints,\n\t}\n\n\ts.Lock()\n\tregistered := s.registered\n\ts.Unlock()\n\n\tif !registered {\n\t\tlog.Logf(\"Registering node: %s\", node.Id)\n\t}\n\n\t\/\/ create registry options\n\trOpts := []registry.RegisterOption{registry.RegisterTTL(config.RegisterTTL)}\n\n\tif err := config.Registry.Register(service, rOpts...); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ already registered? don't need to register subscribers\n\tif registered {\n\t\treturn nil\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\ts.registered = true\n\n\tfor sb, _ := range s.subscribers {\n\t\thandler := s.createSubHandler(sb, s.opts)\n\t\tvar opts []broker.SubscribeOption\n\t\tif queue := sb.Options().Queue; len(queue) > 0 {\n\t\t\topts = append(opts, broker.Queue(queue))\n\t\t}\n\t\tsub, err := config.Broker.Subscribe(sb.Topic(), handler, opts...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.subscribers[sb] = []broker.Subscriber{sub}\n\t}\n\n\treturn nil\n}\n\nfunc (s *rpcServer) Deregister() error {\n\tconfig := s.Options()\n\tvar advt, host string\n\tvar port int\n\n\t\/\/ check the advertise address first\n\t\/\/ if it exists then use it, otherwise\n\t\/\/ use the address\n\tif len(config.Advertise) > 0 {\n\t\tadvt = config.Advertise\n\t} else {\n\t\tadvt = config.Address\n\t}\n\n\tparts := strings.Split(advt, \":\")\n\tif len(parts) > 1 {\n\t\thost = strings.Join(parts[:len(parts)-1], \":\")\n\t\tport, _ = strconv.Atoi(parts[len(parts)-1])\n\t} else {\n\t\thost = parts[0]\n\t}\n\n\taddr, err := addr.Extract(host)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnode := ®istry.Node{\n\t\tId: config.Name + \"-\" + config.Id,\n\t\tAddress: addr,\n\t\tPort: port,\n\t}\n\n\tservice := ®istry.Service{\n\t\tName: config.Name,\n\t\tVersion: config.Version,\n\t\tNodes: []*registry.Node{node},\n\t}\n\n\tlog.Logf(\"Deregistering node: %s\", node.Id)\n\tif err := config.Registry.Deregister(service); err != nil {\n\t\treturn err\n\t}\n\n\ts.Lock()\n\n\tif !s.registered {\n\t\ts.Unlock()\n\t\treturn nil\n\t}\n\n\ts.registered = false\n\n\tfor sb, subs := range s.subscribers {\n\t\tfor _, sub := range subs {\n\t\t\tlog.Logf(\"Unsubscribing from topic: %s\", sub.Topic())\n\t\t\tsub.Unsubscribe()\n\t\t}\n\t\ts.subscribers[sb] = nil\n\t}\n\n\ts.Unlock()\n\treturn nil\n}\n\nfunc (s *rpcServer) Start() error {\n\tregisterDebugHandler(s)\n\tconfig := s.Options()\n\n\tts, err := config.Transport.Listen(config.Address)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Logf(\"Listening on %s\", ts.Addr())\n\ts.Lock()\n\ts.opts.Address = ts.Addr()\n\ts.Unlock()\n\n\tgo ts.Accept(s.accept)\n\n\tgo func() {\n\t\t\/\/ wait for exit\n\t\tch := <-s.exit\n\n\t\t\/\/ wait for requests to finish\n\t\tif wait(s.opts.Context) {\n\t\t\ts.wg.Wait()\n\t\t}\n\n\t\t\/\/ close transport listener\n\t\tch <- ts.Close()\n\n\t\t\/\/ disconnect the broker\n\t\tconfig.Broker.Disconnect()\n\t}()\n\n\t\/\/ TODO: subscribe to cruft\n\treturn config.Broker.Connect()\n}\n\nfunc (s *rpcServer) Stop() error {\n\tch := make(chan error)\n\ts.exit <- ch\n\treturn <-ch\n}\n\nfunc (s *rpcServer) String() string {\n\treturn \"rpc\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2019 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage adb\n\nimport (\n\t\"bufio\"\n\t\"container\/ring\"\n\t\"context\"\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"io\"\n\t\"regexp\"\n\t\"strings\"\n\n\tperfetto_pb \"perfetto\/config\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/google\/gapid\/core\/app\/crash\"\n\t\"github.com\/google\/gapid\/core\/event\/task\"\n\t\"github.com\/google\/gapid\/core\/log\"\n)\n\n\/\/ StartPerfettoTrace starts a perfetto trace on this device.\nfunc (b *binding) StartPerfettoTrace(ctx context.Context, config *perfetto_pb.TraceConfig, out string, stop task.Signal) error {\n\t\/\/ Silently attempt to delete the old trace in case the user has changed\n\t\/\/ If the user has changed from shell => root, this is fine. For root => shell,\n\t\/\/ the root user will have still have to manually delete the old trace if it exists\n\tb.RemoveFile(ctx, out)\n\n\treader, stdout := io.Pipe()\n\tlogRing := ring.New(10)\n\tdata, err := proto.Marshal(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfail := make(chan error, 1)\n\tcrash.Go(func() {\n\t\tbuf := bufio.NewReader(reader)\n\t\tfor {\n\t\t\tline, e := buf.ReadString('\\n')\n\t\t\tlogRing.Value = line\n\t\t\tlogRing = logRing.Next()\n\t\t\tswitch e {\n\t\t\tdefault:\n\t\t\t\tlog.E(ctx, \"[perfetto] Read error %v\", e)\n\t\t\t\tfail <- e\n\t\t\t\treturn\n\t\t\tcase io.EOF:\n\t\t\t\tfail <- nil\n\t\t\t\treturn\n\t\t\tcase nil:\n\t\t\t\tlog.I(ctx, \"[perfetto] %s\", strings.TrimSuffix(line, \"\\n\"))\n\t\t\t}\n\t\t}\n\t})\n\n\tprocess, err := b.Shell(\"base64\", \"-d\", \"|\", \"perfetto\", \"-c\", \"-\", \"-o\", out).\n\t\tRead(strings.NewReader(base64.StdEncoding.EncodeToString(data))).\n\t\tCapture(stdout, stdout).\n\t\tStart(ctx)\n\tif err != nil {\n\t\tstdout.Close()\n\t\treturn err\n\t}\n\n\twait := make(chan error, 1)\n\tcrash.Go(func() {\n\t\twait <- process.Wait(ctx)\n\t})\n\n\tselect {\n\tcase err = <-fail:\n\t\treturn err\n\tcase err = <-wait:\n\t\t\/\/ Use perfetto's error messaging instead of the default\n\t\tif err != nil {\n\t\t\tmsg := \"\"\n\t\t\tlogRing.Do(func(p interface{}) {\n\t\t\t\tif p != nil && len(p.(string)) > 0 {\n\t\t\t\t\tmsg += p.(string) + \"\\n\"\n\t\t\t\t}\n\t\t\t})\n\t\t\t\/\/ Remove ANSI color codes from perfetto output\n\t\t\tansiRegex := regexp.MustCompile(\"\\u001b\\\\[\\\\d+m\")\n\t\t\tmsg = ansiRegex.ReplaceAllString(msg, \"\")\n\t\t\terr = errors.New(msg)\n\t\t}\n\tcase <-stop:\n\t\t\/\/ TODO: figure out why \"killall -2 perfetto\" doesn't work.\n\t\tvar pid string\n\t\tif pid, err = b.Shell(\"pidof perfetto\").Call(ctx); err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif err = b.Shell(\"kill -2 \" + pid).Run(ctx); err != nil {\n\t\t\tbreak\n\t\t}\n\t\terr = <-wait\n\t}\n\n\tstdout.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn <-fail\n}\n<commit_msg>Small fix to the Perfetto error handling.<commit_after>\/\/ Copyright (C) 2019 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage adb\n\nimport (\n\t\"bufio\"\n\t\"container\/ring\"\n\t\"context\"\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"io\"\n\t\"regexp\"\n\t\"strings\"\n\n\tperfetto_pb \"perfetto\/config\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/google\/gapid\/core\/app\/crash\"\n\t\"github.com\/google\/gapid\/core\/event\/task\"\n\t\"github.com\/google\/gapid\/core\/log\"\n)\n\nvar (\n\tansiRegex = regexp.MustCompile(\"\\u001b\\\\[\\\\d+m\")\n)\n\n\/\/ StartPerfettoTrace starts a perfetto trace on this device.\nfunc (b *binding) StartPerfettoTrace(ctx context.Context, config *perfetto_pb.TraceConfig, out string, stop task.Signal) error {\n\t\/\/ Silently attempt to delete the old trace in case the user has changed\n\t\/\/ If the user has changed from shell => root, this is fine. For root => shell,\n\t\/\/ the root user will have still have to manually delete the old trace if it exists\n\tb.RemoveFile(ctx, out)\n\n\treader, stdout := io.Pipe()\n\tlogRing := ring.New(10)\n\tdata, err := proto.Marshal(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfail := make(chan error, 1)\n\tcrash.Go(func() {\n\t\tbuf := bufio.NewReader(reader)\n\t\tfor {\n\t\t\tline, e := buf.ReadString('\\n')\n\t\t\tlogRing.Value = line\n\t\t\tlogRing = logRing.Next()\n\t\t\tswitch e {\n\t\t\tdefault:\n\t\t\t\tlog.E(ctx, \"[perfetto] Read error %v\", e)\n\t\t\t\tfail <- e\n\t\t\t\treturn\n\t\t\tcase io.EOF:\n\t\t\t\tfail <- nil\n\t\t\t\treturn\n\t\t\tcase nil:\n\t\t\t\tlog.I(ctx, \"[perfetto] %s\", strings.TrimSuffix(line, \"\\n\"))\n\t\t\t}\n\t\t}\n\t})\n\n\tprocess, err := b.Shell(\"base64\", \"-d\", \"|\", \"perfetto\", \"-c\", \"-\", \"-o\", out).\n\t\tRead(strings.NewReader(base64.StdEncoding.EncodeToString(data))).\n\t\tCapture(stdout, stdout).\n\t\tStart(ctx)\n\tif err != nil {\n\t\tstdout.Close()\n\t\treturn err\n\t}\n\n\twait := make(chan error, 1)\n\tcrash.Go(func() {\n\t\twait <- process.Wait(ctx)\n\t})\n\n\tselect {\n\tcase err = <-fail:\n\t\treturn err\n\tcase err = <-wait:\n\t\t\/\/ Use perfetto's error messaging instead of the default\n\t\tif err != nil {\n\t\t\tmsg := \"\"\n\t\t\tlogRing.Do(func(p interface{}) {\n\t\t\t\tif s, ok := p.(string); ok && len(s) > 0 {\n\t\t\t\t\tmsg += \"\\n\" + s\n\t\t\t\t}\n\t\t\t})\n\t\t\t\/\/ Remove ANSI color codes from perfetto output\n\t\t\tmsg = ansiRegex.ReplaceAllString(msg, \"\")\n\n\t\t\tif msg != \"\" {\n\t\t\t\terr = errors.New(err.Error() + \"\\nPerfetto Output:\" + msg)\n\t\t\t}\n\t\t}\n\tcase <-stop:\n\t\t\/\/ TODO: figure out why \"killall -2 perfetto\" doesn't work.\n\t\tvar pid string\n\t\tif pid, err = b.Shell(\"pidof perfetto\").Call(ctx); err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif err = b.Shell(\"kill -2 \" + pid).Run(ctx); err != nil {\n\t\t\tbreak\n\t\t}\n\t\terr = <-wait\n\t}\n\n\tstdout.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn <-fail\n}\n<|endoftext|>"} {"text":"<commit_before>package stats\n\nimport (\n\t\"math\"\n\t\"sort\"\n)\n\n\/\/ Min finds the lowest number in a slice\nfunc Min(input []float64) (min float64) {\n\n\t\/\/ Get the initial value\n\t\/\/ @todo add error if no length\n\tif len(input) > 0 {\n\t\tmin = input[0]\n\t}\n\n\t\/\/ Iterate until done checking for a lower value\n\tfor i := 1; i < len(input); i++ {\n\t\tif input[i] < min {\n\t\t\tmin = input[i]\n\t\t}\n\t}\n\treturn min\n}\n\n\/\/ Max finds the highest number in a slice\nfunc Max(input []float64) (max float64) {\n\tif len(input) > 0 {\n\t\tmax = input[0]\n\t}\n\tfor i := 1; i < len(input); i++ {\n\t\tif input[i] > max {\n\t\t\tmax = input[i]\n\t\t}\n\t}\n\treturn max\n}\n\n\/\/ Sum adds all the numbers of a slice together\nfunc Sum(input []float64) (sum float64) {\n\tfor _, n := range input {\n\t\tsum += float64(n)\n\t}\n\treturn sum\n}\n\n\/\/ Mean gets the average of a slice of numbers\nfunc Mean(input []float64) (mean float64) {\n\tif len(input) == 0 {\n\t\treturn 0.0\n\t}\n\n\tsum := Sum(input)\n\n\treturn sum \/ float64(len(input))\n}\n\n\/\/ Median gets the median number in a slice of numbers\nfunc Median(input []float64) (median float64) {\n\n\t\/\/ Start by sorting the slice\n\tsort.Float64s(input)\n\n\tl := len(input)\n\n\t\/\/ No math is needed if there are no numbers\n\t\/\/ For even numbers we add the two middle numbers\n\t\/\/ and divide by two using the mean function above\n\t\/\/ For odd numbers we just use the middle number\n\tif l == 0 {\n\t\treturn 0.0\n\t} else if l%2 == 0 {\n\t\tmedian = Mean(input[l\/2-1 : l\/2+1])\n\t} else {\n\t\tmedian = float64(input[l\/2])\n\t}\n\n\treturn median\n}\n\n\/\/ Mode gets the mode of a slice of numbers\nfunc Mode(input []float64) (mode []float64) {\n\n\t\/\/ Create a map with the counts for each number\n\tm := make(map[float64]int)\n\tfor _, v := range input {\n\t\tm[v]++\n\t}\n\n\t\/\/ Find the highest counts to return as a slice\n\t\/\/ of ints to accomodate duplicate counts\n\tvar current int\n\tfor k, v := range m {\n\n\t\t\/\/ Depending if the count is lower, higher\n\t\t\/\/ or equal to the current numbers count\n\t\t\/\/ we return nothing, start a new mode or\n\t\t\/\/ append to the current mode\n\t\tswitch {\n\t\tcase v < current:\n\t\tcase v > current:\n\t\t\tcurrent = v\n\t\t\tmode = append(mode[:0], k)\n\t\tdefault:\n\t\t\tmode = append(mode, k)\n\t\t}\n\t}\n\n\t\/\/ Finally we check to see if there actually was\n\t\/\/ a mode by checking the length of the input and\n\t\/\/ mode against eachother\n\tl := len(input)\n\tlm := len(mode)\n\tif l == lm {\n\t\treturn []float64{}\n\t}\n\treturn mode\n}\n\n\/\/ Variance finds the variance for both population and sample data\nfunc Variance(input []float64, sample int) (variance float64) {\n\tif len(input) == 0 {\n\t\treturn 0.0\n\t}\n\n\t\/\/ Sum the square of the mean subtracted from each number\n\tm := Mean(input)\n\tfor _, n := range input {\n\t\tvariance += (float64(n) - m) * (float64(n) - m)\n\t}\n\n\t\/\/ When getting the mean of the squared differences\n\t\/\/ \"sample\" will allow us to know if it's a sample\n\t\/\/ or population and wether to subtract by one or not\n\treturn variance \/ float64((len(input) - (1 * sample)))\n}\n\n\/\/ VarP finds the amount of variance within a population\nfunc VarP(input []float64) (sdev float64) {\n\treturn Variance(input, 0)\n}\n\n\/\/ VarS finds the amount of variance within a sample\nfunc VarS(input []float64) (sdev float64) {\n\treturn Variance(input, 1)\n}\n\n\/\/ StdDevP finds the amount of variation from the population\nfunc StdDevP(input []float64) (sdev float64) {\n\tif len(input) == 0 {\n\t\treturn 0.0\n\t}\n\n\t\/\/ Get the population variance\n\tm := VarP(input)\n\n\t\/\/ Return the population standard deviation\n\treturn math.Pow(m, 0.5)\n}\n\n\/\/ StdDevS finds the amount of variation from a sample\nfunc StdDevS(input []float64) (sdev float64) {\n\tif len(input) == 0 {\n\t\treturn 0.0\n\t}\n\n\t\/\/ Get the sample variance\n\tm := VarS(input)\n\n\t\/\/ Return the sample standard deviation\n\treturn math.Pow(m, 0.5)\n}\n\n\/\/ Round a float to a specific decimal place or precision\nfunc Round(input float64, places int) (rounded float64) {\n\n\t\/\/ If the float is not a number we just return it\n\t\/\/ @todo add an error\n\tif math.IsNaN(input) {\n\t\treturn input\n\t}\n\n\t\/\/ Find out the actual sign and correct the input for later\n\tsign := 1.0\n\tif input < 0 {\n\t\tsign = -1\n\t\tinput *= -1\n\t}\n\n\t\/\/ Use the places arg to get the amount of precision wanted\n\tprecision := math.Pow(10, float64(places))\n\n\t\/\/ Find the decimal place we are looking to round\n\tdigit := input * precision\n\n\t\/\/ Get the actual decimal number as a fraction to be compared\n\t_, decimal := math.Modf(digit)\n\n\t\/\/ If the decimal is less than .5 we round down otherwise up\n\tif decimal >= 0.5 {\n\t\trounded = math.Ceil(digit)\n\t} else {\n\t\trounded = math.Floor(digit)\n\t}\n\n\t\/\/ Finally we do the math to actually create a rounded number\n\treturn rounded \/ precision * sign\n}\n\n\/\/ Percentile finds the relative standing in a slice of floats\nfunc Percentile(input []float64, percent float64) (percentile float64) {\n\n\t\/\/ Sort the data\n\tsort.Float64s(input)\n\n\t\/\/ Multiple percent by length of input\n\tindex := (percent \/ 100) * float64(len(input))\n\n\t\/\/ Check if the index is a whole number\n\tif index == float64(int64(index)) {\n\n\t\t\/\/ Convert float to int\n\t\ti := Float64ToInt(index)\n\n\t\t\/\/ Find the average of the index and following values\n\t\tpercentile = Mean([]float64{input[i-1], input[i]})\n\n\t} else {\n\n\t\t\/\/ Convert float to int\n\t\ti := Float64ToInt(index)\n\n\t\t\/\/ Find the value at the index\n\t\tpercentile = input[i-1]\n\n\t}\n\n\treturn percentile\n\n}\n\n\/\/ Float64ToInt rounds a float64 to an int\nfunc Float64ToInt(input float64) (output int) {\n\n\t\/\/ Round input to nearest whole number and convert to int\n\treturn int(Round(input, 0))\n\n}\n<commit_msg>Add check for slices with a single item<commit_after>package stats\n\nimport (\n\t\"math\"\n\t\"sort\"\n)\n\n\/\/ Min finds the lowest number in a slice\nfunc Min(input []float64) (min float64) {\n\n\t\/\/ Get the initial value\n\t\/\/ @todo add error if no length\n\tif len(input) > 0 {\n\t\tmin = input[0]\n\t}\n\n\t\/\/ Iterate until done checking for a lower value\n\tfor i := 1; i < len(input); i++ {\n\t\tif input[i] < min {\n\t\t\tmin = input[i]\n\t\t}\n\t}\n\treturn min\n}\n\n\/\/ Max finds the highest number in a slice\nfunc Max(input []float64) (max float64) {\n\tif len(input) > 0 {\n\t\tmax = input[0]\n\t}\n\tfor i := 1; i < len(input); i++ {\n\t\tif input[i] > max {\n\t\t\tmax = input[i]\n\t\t}\n\t}\n\treturn max\n}\n\n\/\/ Sum adds all the numbers of a slice together\nfunc Sum(input []float64) (sum float64) {\n\tfor _, n := range input {\n\t\tsum += float64(n)\n\t}\n\treturn sum\n}\n\n\/\/ Mean gets the average of a slice of numbers\nfunc Mean(input []float64) (mean float64) {\n\tif len(input) == 0 {\n\t\treturn 0.0\n\t}\n\n\tsum := Sum(input)\n\n\treturn sum \/ float64(len(input))\n}\n\n\/\/ Median gets the median number in a slice of numbers\nfunc Median(input []float64) (median float64) {\n\n\t\/\/ Start by sorting the slice\n\tsort.Float64s(input)\n\n\tl := len(input)\n\n\t\/\/ No math is needed if there are no numbers\n\t\/\/ For even numbers we add the two middle numbers\n\t\/\/ and divide by two using the mean function above\n\t\/\/ For odd numbers we just use the middle number\n\tif l == 0 {\n\t\treturn 0.0\n\t} else if l%2 == 0 {\n\t\tmedian = Mean(input[l\/2-1 : l\/2+1])\n\t} else {\n\t\tmedian = float64(input[l\/2])\n\t}\n\n\treturn median\n}\n\n\/\/ Mode gets the mode of a slice of numbers\nfunc Mode(input []float64) (mode []float64) {\n\n\t\/\/ Return the input if there's only one number\n\t\/\/ @todo add error for empty slice\n\tl := len(input)\n\tif l == 1 {\n\t\treturn input\n\t}\n\n\t\/\/ Create a map with the counts for each number\n\tm := make(map[float64]int)\n\tfor _, v := range input {\n\t\tm[v]++\n\t}\n\n\t\/\/ Find the highest counts to return as a slice\n\t\/\/ of ints to accomodate duplicate counts\n\tvar current int\n\tfor k, v := range m {\n\n\t\t\/\/ Depending if the count is lower, higher\n\t\t\/\/ or equal to the current numbers count\n\t\t\/\/ we return nothing, start a new mode or\n\t\t\/\/ append to the current mode\n\t\tswitch {\n\t\tcase v < current:\n\t\tcase v > current:\n\t\t\tcurrent = v\n\t\t\tmode = append(mode[:0], k)\n\t\tdefault:\n\t\t\tmode = append(mode, k)\n\t\t}\n\t}\n\n\t\/\/ Finally we check to see if there actually was\n\t\/\/ a mode by checking the length of the input and\n\t\/\/ mode against eachother\n\tlm := len(mode)\n\tif l == lm {\n\t\treturn []float64{}\n\t}\n\treturn mode\n}\n\n\/\/ Variance finds the variance for both population and sample data\nfunc Variance(input []float64, sample int) (variance float64) {\n\tif len(input) == 0 {\n\t\treturn 0.0\n\t}\n\n\t\/\/ Sum the square of the mean subtracted from each number\n\tm := Mean(input)\n\tfor _, n := range input {\n\t\tvariance += (float64(n) - m) * (float64(n) - m)\n\t}\n\n\t\/\/ When getting the mean of the squared differences\n\t\/\/ \"sample\" will allow us to know if it's a sample\n\t\/\/ or population and wether to subtract by one or not\n\treturn variance \/ float64((len(input) - (1 * sample)))\n}\n\n\/\/ VarP finds the amount of variance within a population\nfunc VarP(input []float64) (sdev float64) {\n\treturn Variance(input, 0)\n}\n\n\/\/ VarS finds the amount of variance within a sample\nfunc VarS(input []float64) (sdev float64) {\n\treturn Variance(input, 1)\n}\n\n\/\/ StdDevP finds the amount of variation from the population\nfunc StdDevP(input []float64) (sdev float64) {\n\tif len(input) == 0 {\n\t\treturn 0.0\n\t}\n\n\t\/\/ Get the population variance\n\tm := VarP(input)\n\n\t\/\/ Return the population standard deviation\n\treturn math.Pow(m, 0.5)\n}\n\n\/\/ StdDevS finds the amount of variation from a sample\nfunc StdDevS(input []float64) (sdev float64) {\n\tif len(input) == 0 {\n\t\treturn 0.0\n\t}\n\n\t\/\/ Get the sample variance\n\tm := VarS(input)\n\n\t\/\/ Return the sample standard deviation\n\treturn math.Pow(m, 0.5)\n}\n\n\/\/ Round a float to a specific decimal place or precision\nfunc Round(input float64, places int) (rounded float64) {\n\n\t\/\/ If the float is not a number we just return it\n\t\/\/ @todo add an error\n\tif math.IsNaN(input) {\n\t\treturn input\n\t}\n\n\t\/\/ Find out the actual sign and correct the input for later\n\tsign := 1.0\n\tif input < 0 {\n\t\tsign = -1\n\t\tinput *= -1\n\t}\n\n\t\/\/ Use the places arg to get the amount of precision wanted\n\tprecision := math.Pow(10, float64(places))\n\n\t\/\/ Find the decimal place we are looking to round\n\tdigit := input * precision\n\n\t\/\/ Get the actual decimal number as a fraction to be compared\n\t_, decimal := math.Modf(digit)\n\n\t\/\/ If the decimal is less than .5 we round down otherwise up\n\tif decimal >= 0.5 {\n\t\trounded = math.Ceil(digit)\n\t} else {\n\t\trounded = math.Floor(digit)\n\t}\n\n\t\/\/ Finally we do the math to actually create a rounded number\n\treturn rounded \/ precision * sign\n}\n\n\/\/ Percentile finds the relative standing in a slice of floats\nfunc Percentile(input []float64, percent float64) (percentile float64) {\n\n\t\/\/ Sort the data\n\tsort.Float64s(input)\n\n\t\/\/ Multiple percent by length of input\n\tindex := (percent \/ 100) * float64(len(input))\n\n\t\/\/ Check if the index is a whole number\n\tif index == float64(int64(index)) {\n\n\t\t\/\/ Convert float to int\n\t\ti := Float64ToInt(index)\n\n\t\t\/\/ Find the average of the index and following values\n\t\tpercentile = Mean([]float64{input[i-1], input[i]})\n\n\t} else {\n\n\t\t\/\/ Convert float to int\n\t\ti := Float64ToInt(index)\n\n\t\t\/\/ Find the value at the index\n\t\tpercentile = input[i-1]\n\n\t}\n\n\treturn percentile\n\n}\n\n\/\/ Float64ToInt rounds a float64 to an int\nfunc Float64ToInt(input float64) (output int) {\n\n\t\/\/ Round input to nearest whole number and convert to int\n\treturn int(Round(input, 0))\n\n}\n<|endoftext|>"} {"text":"<commit_before>package stein\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"gopkg.in\/v1\/yaml\"\n\t\"io\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\tdocSep = regexp.MustCompile(`(?m)^---$|^\\.\\.\\.$`)\n\ttapTestLine = regexp.MustCompile(`(?m)^(ok|not ok)(?: (\\d+))?( [^#]+)?(?:\\s*#(.*))?$`)\n\ttapPlan = regexp.MustCompile(`(?m)^\\d..(\\d+|N).*$`)\n\ttapVersion = regexp.MustCompile(`(?m)^TAP version \\d+$`)\n\ttapBailOut = regexp.MustCompile(`(?m)^Bail out!\\s*(.*)$`)\n)\n\nfunc Parse(r io.Reader) (*Suite, error) {\n\tparser := Parser{&DefaultHandler{}}\n\treturn parser.Parse(r)\n}\n\ntype Parser struct {\n\tHandler Handler\n}\n\nfunc (p *Parser) Parse(r io.Reader) (*Suite, error) {\n\trd := bufio.NewReader(r)\n\tb, err := rd.Peek(1)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif p.Handler == nil {\n\t\tp.Handler = &DefaultHandler{}\n\t}\n\n\ts := new(Suite)\n\tswitch b[0] {\n\tcase '{':\n\t\treturn s, s.parse(newDecoder(\"json\", rd), p.Handler)\n\tcase '-':\n\t\treturn s, s.parse(newDecoder(\"yaml\", rd), p.Handler)\n\tdefault:\n\t\treturn ParseTap(rd, p.Handler)\n\t}\n}\n\ntype Time struct {\n\ttime.Time\n}\n\nfunc (t *Time) parse(val string) (err error) {\n\tvar ti time.Time\n\tif ti, err = time.Parse(\"2006-01-02 15:04:05\", val); err != nil {\n\t\tif ti, err = time.Parse(time.RFC3339, val); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\t(*t).Time = ti\n\treturn\n}\n\nfunc (t *Time) UnmarshalJSON(data []byte) (err error) {\n\t\/\/ parse time, stripping the quotes\n\treturn t.parse(string(data[1 : len(data)-1]))\n}\nfunc (t *Time) GetYAML() (tag string, value interface{}) {\n\treturn \"\", t.String()\n}\nfunc (t *Time) SetYAML(tag string, value interface{}) bool {\n\tif v, ok := value.(string); ok {\n\t\treturn t.parse(strings.Trim(v, `'\"`)) == nil\n\t}\n\treturn false\n}\n\ntype Suite struct {\n\tType string\n\tStart *Time\n\tCount int\n\tSeed int\n\tRev int\n\tExtra interface{} `yaml:\",omitempty\" json:\",omitempty\"`\n\n\tTests []*Test\n\tCases []*Case\n\tNotes []Note\n\tFinal Tally\n}\n\nfunc (s *Suite) parse(dec decoder, handler Handler) (err error) {\n\thandler.HandleBeforeStream()\n\tvar lastCase *Case\n\tfor dec.Scan() {\n\t\tdoc := dec.Scanner.Text()\n\t\thandler.HandleDoc(doc, dec.format)\n\n\t\tif strings.TrimSpace(doc) == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif s.Start == nil {\n\t\t\tif err = dec.Unmarshal([]byte(doc), &s); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandler.HandleSuite(s)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar typ string\n\t\tif typ, err = dec.Type([]byte(doc)); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tswitch typ {\n\t\tcase \"case\":\n\t\t\tvar c Case\n\t\t\tif err = dec.Unmarshal([]byte(doc), &c); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif c.Level == 0 {\n\t\t\t\ts.Cases = append(s.Cases, &c)\n\t\t\t\tlastCase = &c\n\t\t\t} else if lastCase == nil {\n\t\t\t\t\/\/ TODO: possibly just add this case to the suite\n\t\t\t\treturn fmt.Errorf(\"No parent found for test case: %v\", c)\n\t\t\t} else {\n\t\t\t\t\/\/ find the parent\n\t\t\t\tfor c.Level <= lastCase.Level {\n\t\t\t\t\tlastCase = lastCase.parent\n\t\t\t\t}\n\t\t\t\t\/\/ TODO: strict check to ensure c.Level == lastCase.Level + 1\n\t\t\t\tc.parent = lastCase\n\t\t\t\tlastCase.Subcases = append(lastCase.Subcases, &c)\n\t\t\t}\n\t\t\thandler.HandleCase(&c)\n\t\tcase \"test\":\n\t\t\tvar t Test\n\t\t\tif err = dec.Unmarshal([]byte(doc), &t); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif lastCase != nil {\n\t\t\t\tlastCase.Tests = append(lastCase.Tests, &t)\n\t\t\t} else {\n\t\t\t\ts.Tests = append(s.Tests, &t)\n\t\t\t}\n\t\t\thandler.HandleTest(&t)\n\t\tcase \"note\":\n\t\t\tvar n Note\n\t\t\tif err = dec.Unmarshal([]byte(doc), &n); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\ts.Notes = append(s.Notes, n)\n\t\t\thandler.HandleNote(&n)\n\t\tcase \"tally\":\n\t\t\tvar t Tally\n\t\t\tif err = dec.Unmarshal([]byte(doc), &t); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandler.HandleTally(&t)\n\t\tcase \"final\":\n\t\t\tvar t Tally\n\t\t\tif err = dec.Unmarshal([]byte(doc), &t); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\ts.Final = t\n\t\t\thandler.HandleFinal(&t)\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"Invalid type: %s\", typ)\n\t\t\treturn\n\t\t}\n\t}\n\thandler.HandleAfterStream()\n\treturn\n}\n\nfunc (s Suite) Docs() (ret []interface{}) {\n\tret = append(ret, s)\n\tfor _, t := range s.Tests {\n\t\tret = append(ret, t)\n\t}\n\n\tfor _, c := range s.Cases {\n\t\tret = append(ret, c.Docs()...)\n\t}\n\n\tfor _, n := range s.Notes {\n\t\tret = append(ret, n)\n\t}\n\n\tret = append(ret, s.Final)\n\treturn\n}\n\nfunc (s Suite) ToTapY() ([]byte, error) {\n\tvar ret []byte\n\tfor _, doc := range s.Docs() {\n\t\tif b, err := yaml.Marshal(doc); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tret = append(ret, []byte(\"---\\n\")...)\n\t\t\tret = append(ret, b...)\n\t\t\tret = append(ret, '\\n')\n\t\t}\n\t}\n\treturn append(ret, []byte(\"...\")...), nil\n}\n\ntype Case struct {\n\tType string\n\tSubtype string\n\tLabel string\n\tLevel int\n\tExtra interface{} `yaml:\",omitempty\" json:\",omitempty\"`\n\n\tTests []*Test `yaml:\"-\" json:\"-\"`\n\tSubcases []*Case `yaml:\"-\" json:\"-\"`\n\n\tparent *Case\n}\n\nfunc (c Case) Docs() (ret []interface{}) {\n\tret = append(ret, c)\n\tfor _, t := range c.Tests {\n\t\tret = append(ret, t)\n\t}\n\tfor _, c := range c.Subcases {\n\t\tret = append(ret, c.Docs()...)\n\t}\n\treturn\n}\n\n\/\/ TODO: allow strings via UnmarshalJSON\/YAML\ntype Snippet []map[string]string\n\ntype Test struct {\n\tType string\n\tSubtype string `yaml:\",omitempty\" json:\",omitempty\"`\n\tStatus string\n\tSetup string `yaml:\",omitempty\" json:\",omitempty\"`\n\tLabel string\n\tExpected interface{} `yaml:\",omitempty\" json:\",omitempty\"`\n\tReturned interface{} `yaml:\",omitempty\" json:\",omitempty\"`\n\tFile string `yaml:\",omitempty\" json:\",omitempty\"`\n\tLine int `yaml:\",omitempty\" json:\",omitempty\"`\n\tSource string `yaml:\",omitempty\" json:\",omitempty\"`\n\tSnippet Snippet `yaml:\",omitempty\" json:\",omitempty\"`\n\tCoverage struct {\n\t\tFile string\n\t\tLine interface{}\n\t\tCode string\n\t} `yaml:\",omitempty\" json:\",omitempty\"`\n\tException struct {\n\t\tMessage string\n\t\tFile string\n\t\tLine int\n\t\tSource string\n\t\tSnippet Snippet\n\t\tBacktrace interface{}\n\t} `yaml:\",omitempty\" json:\",omitempty\"`\n\tStdout string `yaml:\",omitempty\" json:\",omitempty\"`\n\tStderr string `yaml:\",omitempty\" json:\",omitempty\"`\n\tTime float64\n\tExtra interface{} `yaml:\",omitempty\" json:\",omitempty\"`\n}\n\ntype Note struct {\n\tType string\n\tText string\n\tExtra interface{} `yaml:\",omitempty\" json:\",omitempty\"`\n}\n\ntype Tally struct {\n\tType string\n\tTime float64\n\tCounts struct {\n\t\tTotal int\n\t\tPass int\n\t\tFail int\n\t\tError int\n\t\tOmit int\n\t\tTodo int\n\t}\n\tExtra interface{} `yaml:\",omitempty\" json:\",omitempty\"`\n}\n\ntype decoder struct {\n\t*bufio.Scanner\n\tformat string\n\tUnmarshal func([]byte, interface{}) error\n}\n\nfunc scanYAMLDoc(data []byte, atEOF bool) (advance int, token []byte, err error) {\n\tif loc := docSep.FindIndex(data); loc != nil {\n\t\tif loc[0] == 0 {\n\t\t\t\/\/ just a start document marker\n\t\t\treturn loc[1], nil, nil\n\t\t}\n\t\t\/\/ We found a complete document\n\t\treturn loc[1], data[0:loc[0]], nil\n\t}\n\tif atEOF {\n\t\t\/\/ if we're at EOF without having matched docSep, there's nothing else to get\n\t\treturn len(data), nil, nil\n\t}\n\t\/\/ Request more data.\n\treturn 0, nil, nil\n}\n\nfunc newDecoder(typ string, r io.Reader) decoder {\n\tdec := decoder{Scanner: bufio.NewScanner(r)}\n\t\/\/ TODO: handle plain TAP with a decoder as well\n\tswitch typ {\n\tcase \"json\":\n\t\tdec.format = \"json\"\n\t\tdec.Unmarshal = json.Unmarshal\n\tcase \"yaml\":\n\t\tdec.format = \"yaml\"\n\t\tdec.Unmarshal = yaml.Unmarshal\n\t\tdec.Scanner.Split(scanYAMLDoc)\n\tdefault:\n\t\tpanic(\"Unsupported decoder\")\n\t}\n\treturn dec\n}\n\nfunc (dec decoder) Type(b []byte) (string, error) {\n\tm := make(map[string]interface{})\n\tif err := dec.Unmarshal(b, &m); err != nil {\n\t\treturn \"\", fmt.Errorf(\"Invalid document: %s\", err)\n\t}\n\tif typ, ok := m[\"type\"].(string); !ok {\n\t\treturn \"\", fmt.Errorf(\"Missing 'type' key in document\")\n\t} else {\n\t\treturn typ, nil\n\t}\n}\n\nfunc ParseTap(r io.Reader, handler Handler) (*Suite, error) {\n\tif handler == nil {\n\t\thandler = &DefaultHandler{}\n\t}\n\thandler.HandleBeforeStream()\n\trd := bufio.NewReader(r)\n\tvar s Suite\n\ts.Start = &Time{time.Now()}\n\tfirst := true\n\tvar totalTests int\n\tfor {\n\t\tline, err := rd.ReadString('\\n')\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thandler.HandleDoc(line, \"tap\")\n\n\t\tif line[0] == '#' {\n\t\t\t\/\/ TODO: do something with diagnostic lines\n\t\t\tcontinue\n\t\t}\n\n\t\tif matches := tapBailOut.FindStringSubmatch(line); matches != nil {\n\t\t\t\/\/ TODO: do something with the bail reason\n\t\t\tbreak\n\t\t}\n\n\t\tif first {\n\t\t\tif matches := tapVersion.FindStringSubmatch(line); matches != nil {\n\t\t\t\t\/\/ TAP 13+\n\t\t\t\treturn nil, fmt.Errorf(\"TAP13 not supported yet\")\n\t\t\t}\n\t\t\tfirst = false\n\t\t\tif matches := tapPlan.FindStringSubmatch(line); matches != nil {\n\t\t\t\ttotalTests, _ = strconv.Atoi(matches[1])\n\t\t\t\ts.Count = totalTests\n\t\t\t\t\/\/ assuming plan line at beginning of output\n\t\t\t\thandler.HandleSuite(&s)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tmatches := tapTestLine.FindStringSubmatch(line)\n\t\tif matches == nil {\n\t\t\t\/\/ TODO: handle extra data\n\t\t\tcontinue\n\t\t}\n\n\t\tt := Test{Type: \"test\"}\n\t\tswitch matches[1] {\n\t\tcase \"ok\":\n\t\t\tt.Status = \"pass\"\n\t\tcase \"not ok\":\n\t\t\t\/\/ TODO: error?\n\t\t\tt.Status = \"fail\"\n\t\t}\n\t\t\/\/ ignore number\n\t\tt.Label = strings.TrimSpace(matches[3])\n\t\tdirective := strings.TrimSpace(matches[4])\n\t\tswitch {\n\t\tcase strings.HasPrefix(strings.ToUpper(directive), \"TODO\"):\n\t\t\ts.Final.Counts.Todo++\n\t\t\tt.Label = directive\n\t\t\tt.Status = \"todo\"\n\t\tcase strings.HasPrefix(strings.ToUpper(directive), \"SKIP\"):\n\t\t\ts.Final.Counts.Omit++\n\t\t\tt.Label = directive\n\t\t\tt.Status = \"omit\"\n\t\tcase t.Status == \"pass\":\n\t\t\ts.Final.Counts.Pass++\n\t\tdefault:\n\t\t\ts.Final.Counts.Fail++\n\t\t}\n\t\thandler.HandleTest(&t)\n\t\ts.Final.Counts.Total++\n\t\ts.Tests = append(s.Tests, &t)\n\t}\n\n\t\/\/ fixup missing tests\n\tif totalTests > 0 && totalTests > s.Final.Counts.Total {\n\t\ts.Final.Counts.Fail += (totalTests - s.Final.Counts.Total)\n\t\ts.Final.Counts.Total = totalTests\n\t}\n\n\thandler.HandleFinal(&s.Final)\n\thandler.HandleAfterStream()\n\treturn &s, nil\n}\n<commit_msg>Store tests and subcases in the database<commit_after>package stein\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"gopkg.in\/v1\/yaml\"\n\t\"io\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\tdocSep = regexp.MustCompile(`(?m)^---$|^\\.\\.\\.$`)\n\ttapTestLine = regexp.MustCompile(`(?m)^(ok|not ok)(?: (\\d+))?( [^#]+)?(?:\\s*#(.*))?$`)\n\ttapPlan = regexp.MustCompile(`(?m)^\\d..(\\d+|N).*$`)\n\ttapVersion = regexp.MustCompile(`(?m)^TAP version \\d+$`)\n\ttapBailOut = regexp.MustCompile(`(?m)^Bail out!\\s*(.*)$`)\n)\n\nfunc Parse(r io.Reader) (*Suite, error) {\n\tparser := Parser{&DefaultHandler{}}\n\treturn parser.Parse(r)\n}\n\ntype Parser struct {\n\tHandler Handler\n}\n\nfunc (p *Parser) Parse(r io.Reader) (*Suite, error) {\n\trd := bufio.NewReader(r)\n\tb, err := rd.Peek(1)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif p.Handler == nil {\n\t\tp.Handler = &DefaultHandler{}\n\t}\n\n\ts := new(Suite)\n\tswitch b[0] {\n\tcase '{':\n\t\treturn s, s.parse(newDecoder(\"json\", rd), p.Handler)\n\tcase '-':\n\t\treturn s, s.parse(newDecoder(\"yaml\", rd), p.Handler)\n\tdefault:\n\t\treturn ParseTap(rd, p.Handler)\n\t}\n}\n\ntype Time struct {\n\ttime.Time\n}\n\nfunc (t *Time) parse(val string) (err error) {\n\tvar ti time.Time\n\tif ti, err = time.Parse(\"2006-01-02 15:04:05\", val); err != nil {\n\t\tif ti, err = time.Parse(time.RFC3339, val); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\t(*t).Time = ti\n\treturn\n}\n\nfunc (t *Time) UnmarshalJSON(data []byte) (err error) {\n\t\/\/ parse time, stripping the quotes\n\treturn t.parse(string(data[1 : len(data)-1]))\n}\nfunc (t *Time) GetYAML() (tag string, value interface{}) {\n\treturn \"\", t.String()\n}\nfunc (t *Time) SetYAML(tag string, value interface{}) bool {\n\tif v, ok := value.(string); ok {\n\t\treturn t.parse(strings.Trim(v, `'\"`)) == nil\n\t}\n\treturn false\n}\n\ntype Suite struct {\n\tType string\n\tStart *Time\n\tCount int\n\tSeed int\n\tRev int\n\tExtra interface{} `yaml:\",omitempty\" json:\",omitempty\"`\n\n\tTests []*Test\n\tCases []*Case\n\tNotes []Note\n\tFinal Tally\n}\n\nfunc (s *Suite) parse(dec decoder, handler Handler) (err error) {\n\thandler.HandleBeforeStream()\n\tvar lastCase *Case\n\tfor dec.Scan() {\n\t\tdoc := dec.Scanner.Text()\n\t\thandler.HandleDoc(doc, dec.format)\n\n\t\tif strings.TrimSpace(doc) == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif s.Start == nil {\n\t\t\tif err = dec.Unmarshal([]byte(doc), &s); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandler.HandleSuite(s)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar typ string\n\t\tif typ, err = dec.Type([]byte(doc)); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tswitch typ {\n\t\tcase \"case\":\n\t\t\tvar c Case\n\t\t\tif err = dec.Unmarshal([]byte(doc), &c); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif c.Level == 0 {\n\t\t\t\ts.Cases = append(s.Cases, &c)\n\t\t\t\tlastCase = &c\n\t\t\t} else if lastCase == nil {\n\t\t\t\t\/\/ TODO: possibly just add this case to the suite\n\t\t\t\treturn fmt.Errorf(\"No parent found for test case: %v\", c)\n\t\t\t} else {\n\t\t\t\t\/\/ find the parent\n\t\t\t\tfor c.Level <= lastCase.Level {\n\t\t\t\t\tlastCase = lastCase.parent\n\t\t\t\t}\n\t\t\t\t\/\/ TODO: strict check to ensure c.Level == lastCase.Level + 1\n\t\t\t\tc.parent = lastCase\n\t\t\t\tlastCase.Subcases = append(lastCase.Subcases, &c)\n\t\t\t}\n\t\t\thandler.HandleCase(&c)\n\t\tcase \"test\":\n\t\t\tvar t Test\n\t\t\tif err = dec.Unmarshal([]byte(doc), &t); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif lastCase != nil {\n\t\t\t\tlastCase.Tests = append(lastCase.Tests, &t)\n\t\t\t} else {\n\t\t\t\ts.Tests = append(s.Tests, &t)\n\t\t\t}\n\t\t\thandler.HandleTest(&t)\n\t\tcase \"note\":\n\t\t\tvar n Note\n\t\t\tif err = dec.Unmarshal([]byte(doc), &n); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\ts.Notes = append(s.Notes, n)\n\t\t\thandler.HandleNote(&n)\n\t\tcase \"tally\":\n\t\t\tvar t Tally\n\t\t\tif err = dec.Unmarshal([]byte(doc), &t); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandler.HandleTally(&t)\n\t\tcase \"final\":\n\t\t\tvar t Tally\n\t\t\tif err = dec.Unmarshal([]byte(doc), &t); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\ts.Final = t\n\t\t\thandler.HandleFinal(&t)\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"Invalid type: %s\", typ)\n\t\t\treturn\n\t\t}\n\t}\n\thandler.HandleAfterStream()\n\treturn\n}\n\nfunc (s Suite) Docs() (ret []interface{}) {\n\tret = append(ret, s)\n\tfor _, t := range s.Tests {\n\t\tret = append(ret, t)\n\t}\n\n\tfor _, c := range s.Cases {\n\t\tret = append(ret, c.Docs()...)\n\t}\n\n\tfor _, n := range s.Notes {\n\t\tret = append(ret, n)\n\t}\n\n\tret = append(ret, s.Final)\n\treturn\n}\n\nfunc (s Suite) ToTapY() ([]byte, error) {\n\tvar ret []byte\n\tfor _, doc := range s.Docs() {\n\t\tif b, err := yaml.Marshal(doc); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tret = append(ret, []byte(\"---\\n\")...)\n\t\t\tret = append(ret, b...)\n\t\t\tret = append(ret, '\\n')\n\t\t}\n\t}\n\treturn append(ret, []byte(\"...\")...), nil\n}\n\ntype Case struct {\n\tType string\n\tSubtype string\n\tLabel string\n\tLevel int\n\tExtra interface{} `yaml:\",omitempty\" json:\",omitempty\"`\n\n\tTests []*Test\n\tSubcases []*Case\n\n\tparent *Case\n}\n\nfunc (c Case) Docs() (ret []interface{}) {\n\tret = append(ret, c)\n\tfor _, t := range c.Tests {\n\t\tret = append(ret, t)\n\t}\n\tfor _, c := range c.Subcases {\n\t\tret = append(ret, c.Docs()...)\n\t}\n\treturn\n}\n\n\/\/ TODO: allow strings via UnmarshalJSON\/YAML\ntype Snippet []map[string]string\n\ntype Test struct {\n\tType string\n\tSubtype string `yaml:\",omitempty\" json:\",omitempty\"`\n\tStatus string\n\tSetup string `yaml:\",omitempty\" json:\",omitempty\"`\n\tLabel string\n\tExpected interface{} `yaml:\",omitempty\" json:\",omitempty\"`\n\tReturned interface{} `yaml:\",omitempty\" json:\",omitempty\"`\n\tFile string `yaml:\",omitempty\" json:\",omitempty\"`\n\tLine int `yaml:\",omitempty\" json:\",omitempty\"`\n\tSource string `yaml:\",omitempty\" json:\",omitempty\"`\n\tSnippet Snippet `yaml:\",omitempty\" json:\",omitempty\"`\n\tCoverage struct {\n\t\tFile string\n\t\tLine interface{}\n\t\tCode string\n\t} `yaml:\",omitempty\" json:\",omitempty\"`\n\tException struct {\n\t\tMessage string\n\t\tFile string\n\t\tLine int\n\t\tSource string\n\t\tSnippet Snippet\n\t\tBacktrace interface{}\n\t} `yaml:\",omitempty\" json:\",omitempty\"`\n\tStdout string `yaml:\",omitempty\" json:\",omitempty\"`\n\tStderr string `yaml:\",omitempty\" json:\",omitempty\"`\n\tTime float64\n\tExtra interface{} `yaml:\",omitempty\" json:\",omitempty\"`\n}\n\ntype Note struct {\n\tType string\n\tText string\n\tExtra interface{} `yaml:\",omitempty\" json:\",omitempty\"`\n}\n\ntype Tally struct {\n\tType string\n\tTime float64\n\tCounts struct {\n\t\tTotal int\n\t\tPass int\n\t\tFail int\n\t\tError int\n\t\tOmit int\n\t\tTodo int\n\t}\n\tExtra interface{} `yaml:\",omitempty\" json:\",omitempty\"`\n}\n\ntype decoder struct {\n\t*bufio.Scanner\n\tformat string\n\tUnmarshal func([]byte, interface{}) error\n}\n\nfunc scanYAMLDoc(data []byte, atEOF bool) (advance int, token []byte, err error) {\n\tif loc := docSep.FindIndex(data); loc != nil {\n\t\tif loc[0] == 0 {\n\t\t\t\/\/ just a start document marker\n\t\t\treturn loc[1], nil, nil\n\t\t}\n\t\t\/\/ We found a complete document\n\t\treturn loc[1], data[0:loc[0]], nil\n\t}\n\tif atEOF {\n\t\t\/\/ if we're at EOF without having matched docSep, there's nothing else to get\n\t\treturn len(data), nil, nil\n\t}\n\t\/\/ Request more data.\n\treturn 0, nil, nil\n}\n\nfunc newDecoder(typ string, r io.Reader) decoder {\n\tdec := decoder{Scanner: bufio.NewScanner(r)}\n\t\/\/ TODO: handle plain TAP with a decoder as well\n\tswitch typ {\n\tcase \"json\":\n\t\tdec.format = \"json\"\n\t\tdec.Unmarshal = json.Unmarshal\n\tcase \"yaml\":\n\t\tdec.format = \"yaml\"\n\t\tdec.Unmarshal = yaml.Unmarshal\n\t\tdec.Scanner.Split(scanYAMLDoc)\n\tdefault:\n\t\tpanic(\"Unsupported decoder\")\n\t}\n\treturn dec\n}\n\nfunc (dec decoder) Type(b []byte) (string, error) {\n\tm := make(map[string]interface{})\n\tif err := dec.Unmarshal(b, &m); err != nil {\n\t\treturn \"\", fmt.Errorf(\"Invalid document: %s\", err)\n\t}\n\tif typ, ok := m[\"type\"].(string); !ok {\n\t\treturn \"\", fmt.Errorf(\"Missing 'type' key in document\")\n\t} else {\n\t\treturn typ, nil\n\t}\n}\n\nfunc ParseTap(r io.Reader, handler Handler) (*Suite, error) {\n\tif handler == nil {\n\t\thandler = &DefaultHandler{}\n\t}\n\thandler.HandleBeforeStream()\n\trd := bufio.NewReader(r)\n\tvar s Suite\n\ts.Start = &Time{time.Now()}\n\tfirst := true\n\tvar totalTests int\n\tfor {\n\t\tline, err := rd.ReadString('\\n')\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thandler.HandleDoc(line, \"tap\")\n\n\t\tif line[0] == '#' {\n\t\t\t\/\/ TODO: do something with diagnostic lines\n\t\t\tcontinue\n\t\t}\n\n\t\tif matches := tapBailOut.FindStringSubmatch(line); matches != nil {\n\t\t\t\/\/ TODO: do something with the bail reason\n\t\t\tbreak\n\t\t}\n\n\t\tif first {\n\t\t\tif matches := tapVersion.FindStringSubmatch(line); matches != nil {\n\t\t\t\t\/\/ TAP 13+\n\t\t\t\treturn nil, fmt.Errorf(\"TAP13 not supported yet\")\n\t\t\t}\n\t\t\tfirst = false\n\t\t\tif matches := tapPlan.FindStringSubmatch(line); matches != nil {\n\t\t\t\ttotalTests, _ = strconv.Atoi(matches[1])\n\t\t\t\ts.Count = totalTests\n\t\t\t\t\/\/ assuming plan line at beginning of output\n\t\t\t\thandler.HandleSuite(&s)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tmatches := tapTestLine.FindStringSubmatch(line)\n\t\tif matches == nil {\n\t\t\t\/\/ TODO: handle extra data\n\t\t\tcontinue\n\t\t}\n\n\t\tt := Test{Type: \"test\"}\n\t\tswitch matches[1] {\n\t\tcase \"ok\":\n\t\t\tt.Status = \"pass\"\n\t\tcase \"not ok\":\n\t\t\t\/\/ TODO: error?\n\t\t\tt.Status = \"fail\"\n\t\t}\n\t\t\/\/ ignore number\n\t\tt.Label = strings.TrimSpace(matches[3])\n\t\tdirective := strings.TrimSpace(matches[4])\n\t\tswitch {\n\t\tcase strings.HasPrefix(strings.ToUpper(directive), \"TODO\"):\n\t\t\ts.Final.Counts.Todo++\n\t\t\tt.Label = directive\n\t\t\tt.Status = \"todo\"\n\t\tcase strings.HasPrefix(strings.ToUpper(directive), \"SKIP\"):\n\t\t\ts.Final.Counts.Omit++\n\t\t\tt.Label = directive\n\t\t\tt.Status = \"omit\"\n\t\tcase t.Status == \"pass\":\n\t\t\ts.Final.Counts.Pass++\n\t\tdefault:\n\t\t\ts.Final.Counts.Fail++\n\t\t}\n\t\thandler.HandleTest(&t)\n\t\ts.Final.Counts.Total++\n\t\ts.Tests = append(s.Tests, &t)\n\t}\n\n\t\/\/ fixup missing tests\n\tif totalTests > 0 && totalTests > s.Final.Counts.Total {\n\t\ts.Final.Counts.Fail += (totalTests - s.Final.Counts.Total)\n\t\ts.Final.Counts.Total = totalTests\n\t}\n\n\thandler.HandleFinal(&s.Final)\n\thandler.HandleAfterStream()\n\treturn &s, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package struc\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n)\n\nfunc prep(data interface{}) (reflect.Value, Packable, error) {\n\tvalue := reflect.ValueOf(data)\n\tfor value.Kind() == reflect.Ptr {\n\t\tnext := value.Elem().Kind()\n\t\tif next == reflect.Struct || next == reflect.Ptr {\n\t\t\tvalue = value.Elem()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tswitch value.Kind() {\n\tcase reflect.Struct:\n\t\tfields, err := parseFields(value)\n\t\treturn value, fields, err\n\tdefault:\n\t\tif !value.IsValid() {\n\t\t\treturn reflect.Value{}, nil, fmt.Errorf(\"Invalid reflect.Value for %+v\", data)\n\t\t}\n\t\treturn value, &binaryFallback{value, binary.BigEndian}, nil\n\t}\n}\n\nfunc Pack(w io.Writer, data interface{}) error {\n\treturn PackWithOrder(w, data, nil)\n}\n\n\/\/ TODO: this is destructive with caching\nfunc PackWithOrder(w io.Writer, data interface{}, order binary.ByteOrder) error {\n\tval, fields, err := prep(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif order != nil {\n\t\tfields.SetByteOrder(order)\n\t}\n\tsize := fields.Sizeof(val)\n\tbuf := make([]byte, size)\n\tif err := fields.Pack(buf, val); err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(buf)\n\treturn err\n}\n\nfunc Unpack(r io.Reader, data interface{}) error {\n\treturn UnpackWithOrder(r, data, nil)\n}\n\nfunc UnpackWithOrder(r io.Reader, data interface{}, order binary.ByteOrder) error {\n\tval, fields, err := prep(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif order != nil {\n\t\tfields.SetByteOrder(order)\n\t}\n\treturn fields.Unpack(r, val)\n}\n\nfunc Sizeof(data interface{}) (int, error) {\n\tval, fields, err := prep(data)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn fields.Sizeof(val), nil\n}\n<commit_msg>fix raw packing strings<commit_after>package struc\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n)\n\nfunc prep(data interface{}) (reflect.Value, Packable, error) {\n\tvalue := reflect.ValueOf(data)\n\tfor value.Kind() == reflect.Ptr {\n\t\tnext := value.Elem().Kind()\n\t\tif next == reflect.Struct || next == reflect.Ptr {\n\t\t\tvalue = value.Elem()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tswitch value.Kind() {\n\tcase reflect.Struct:\n\t\tfields, err := parseFields(value)\n\t\treturn value, fields, err\n\tdefault:\n\t\tif !value.IsValid() {\n\t\t\treturn reflect.Value{}, nil, fmt.Errorf(\"Invalid reflect.Value for %+v\", data)\n\t\t}\n\t\treturn value, &binaryFallback{value, binary.BigEndian}, nil\n\t}\n}\n\nfunc Pack(w io.Writer, data interface{}) error {\n\treturn PackWithOrder(w, data, nil)\n}\n\n\/\/ TODO: this is destructive with caching\nfunc PackWithOrder(w io.Writer, data interface{}, order binary.ByteOrder) error {\n\tval, fields, err := prep(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif order != nil {\n\t\tfields.SetByteOrder(order)\n\t}\n\tif val.Type().Kind() == reflect.String {\n\t\tval = val.Convert(reflect.TypeOf([]byte{}))\n\t}\n\tsize := fields.Sizeof(val)\n\tbuf := make([]byte, size)\n\tif err := fields.Pack(buf, val); err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(buf)\n\treturn err\n}\n\nfunc Unpack(r io.Reader, data interface{}) error {\n\treturn UnpackWithOrder(r, data, nil)\n}\n\nfunc UnpackWithOrder(r io.Reader, data interface{}, order binary.ByteOrder) error {\n\tval, fields, err := prep(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif order != nil {\n\t\tfields.SetByteOrder(order)\n\t}\n\treturn fields.Unpack(r, val)\n}\n\nfunc Sizeof(data interface{}) (int, error) {\n\tval, fields, err := prep(data)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn fields.Sizeof(val), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"bazil.org\/fuse\"\n\t\"bazil.org\/fuse\/fs\"\n\t\"github.com\/mdlayher\/gosubsonic\"\n)\n\n\/\/ subsonic stores the instance of the gosubsonic client\nvar subsonic gosubsonic.Client\n\n\/\/ nameToDir maps a directory name to its SubDir\nvar nameToDir map[string]SubDir\n\n\/\/ nameToFile maps a file name to its SubFile\nvar nameToFile map[string]SubFile\n\n\/\/ fileCache maps a file name to its file pointer\nvar fileCache map[string]os.File\n\n\/\/ cacheTotal is the total size of local files in the cache\nvar cacheTotal int64\n\n\/\/ streamMap maps a fileID to a channel containing a file stream\nvar streamMap map[int64]chan []byte\n\n\/\/ host is the host of the Subsonic server\nvar host = flag.String(\"host\", \"\", \"Host of Subsonic server\")\n\n\/\/ user is the username to connect to the Subsonic server\nvar user = flag.String(\"user\", \"\", \"Username for the Subsonic server\")\n\n\/\/ password is the password to connect to the Subsonic server\nvar password = flag.String(\"password\", \"\", \"Password for the Subsonic server\")\n\n\/\/ mount is the path where subfs will be mounted\nvar mount = flag.String(\"mount\", \"\", \"Path where subfs will be mounted\")\n\n\/\/ cacheSize is the maximum size of the local file cache in megabytes\nvar cacheSize = flag.Int64(\"cache\", 100, \"Size of the local file cache, in megabytes\")\n\nfunc main() {\n\t\/\/ Parse command line flags\n\tflag.Parse()\n\n\t\/\/ Open connection to Subsonic\n\tsub, err := gosubsonic.New(*host, *user, *password)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not connect to Subsonic server: %s\", err.Error())\n\t}\n\n\t\/\/ Store subsonic client for global use\n\tsubsonic = *sub\n\n\t\/\/ Initialize lookup maps\n\tnameToDir = map[string]SubDir{}\n\tnameToFile = map[string]SubFile{}\n\n\t\/\/ Initialize file cache\n\tfileCache = map[string]os.File{}\n\tcacheTotal = 0\n\n\t\/\/ Initialize stream map\n\tstreamMap = map[int64]chan []byte{}\n\n\t\/\/ Attempt to mount filesystem\n\tc, err := fuse.Mount(*mount)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not mount subfs at %s: %s\", *mount, err.Error())\n\t}\n\n\t\/\/ Serve the FUSE filesystem\n\tlog.Printf(\"subfs: %s@%s -> %s [cache: %d MB]\", *user, *host, *mount, *cacheSize)\n\tgo fs.Serve(c, SubFS{})\n\n\t\/\/ Wait for termination singals\n\tsigChan := make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, os.Interrupt)\n\tsignal.Notify(sigChan, syscall.SIGTERM)\n\tfor sig := range sigChan {\n\t\tlog.Println(\"subfs: caught signal:\", sig)\n\t\tbreak\n\t}\n\n\t\/\/ Purge all cached files\n\tfor _, f := range fileCache {\n\t\t\/\/ Close file\n\t\tif err := f.Close(); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\t\/\/ Remove file\n\t\tif err := os.Remove(f.Name()); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n\n\tlog.Printf(\"subfs: removed %d cached file(s)\", len(fileCache))\n\n\t\/\/ Attempt to unmount the FUSE filesystem\n\tretry := 3\n\tfor i := 0; i < retry+1; i++ {\n\t\t\/\/ Wait between attempts\n\t\tif i > 0 {\n\t\t\t<-time.After(time.Second * 3)\n\t\t}\n\n\t\t\/\/ Try unmount\n\t\tif err := fuse.Unmount(*mount); err != nil {\n\t\t\t\/\/ Force exit on last attempt\n\t\t\tif i == retry {\n\t\t\t\tlog.Printf(\"subfs: could not unmount %s, halting!\", *mount)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\tlog.Printf(\"subfs: could not unmount %s, retrying %d of %d...\", *mount, i+1, retry)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ Close the FUSE filesystem\n\tif err := c.Close(); err != nil {\n\t\tlog.Fatalf(\"Could not close subfs: %s\", err.Error())\n\t}\n\n\tlog.Printf(\"subfs: done!\")\n\treturn\n}\n\n\/\/ SubFS represents the root of the filesystem\ntype SubFS struct{}\n\n\/\/ Root is called to get the root directory node of this filesystem\nfunc (fs SubFS) Root() (fs.Node, fuse.Error) {\n\treturn &SubDir{RelPath: \"\"}, nil\n}\n\n\/\/ SubDir represents a directory in the filesystem\ntype SubDir struct {\n\tID int64\n\tRelPath string\n}\n\n\/\/ Attr retrives the attributes for this SubDir\nfunc (SubDir) Attr() fuse.Attr {\n\treturn fuse.Attr{\n\t\tMode: os.ModeDir | 0555,\n\t}\n}\n\n\/\/ ReadDir returns a list of directory entries depending on the current path\nfunc (d SubDir) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {\n\t\/\/ List of directory entries to return\n\tdirectories := make([]fuse.Dirent, 0)\n\n\t\/\/ If at root of filesystem, fetch indexes\n\tif d.RelPath == \"\" {\n\t\tindex, err := subsonic.GetIndexes(-1, -1)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to retrieve indexes: %s\", err.Error())\n\t\t\treturn nil, fuse.ENOENT\n\t\t}\n\n\t\t\/\/ Iterate indices\n\t\tfor _, i := range index {\n\t\t\t\/\/ Iterate all artists\n\t\t\tfor _, a := range i.Artist {\n\t\t\t\t\/\/ Map artist's name to directory\n\t\t\t\tnameToDir[a.Name] = SubDir{\n\t\t\t\t\tID: a.ID,\n\t\t\t\t\tRelPath: \"\",\n\t\t\t\t}\n\n\t\t\t\t\/\/ Create a directory entry\n\t\t\t\tdir := fuse.Dirent{\n\t\t\t\t\tName: a.Name,\n\t\t\t\t\tType: fuse.DT_Dir,\n\t\t\t\t}\n\n\t\t\t\t\/\/ Append entry\n\t\t\t\tdirectories = append(directories, dir)\n\t\t\t}\n\t\t}\n\t} else {\n\t\t\/\/ Get this directory's contents\n\t\tcontent, err := subsonic.GetMusicDirectory(d.ID)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to retrieve directory %d: %s\", d.ID, err.Error())\n\t\t\treturn nil, fuse.ENOENT\n\t\t}\n\n\t\t\/\/ Iterate all returned directories\n\t\tfor _, dir := range content.Directories {\n\t\t\t\/\/ Create a directory entry\n\t\t\tentry := fuse.Dirent{\n\t\t\t\tName: dir.Title,\n\t\t\t\tType: fuse.DT_Dir,\n\t\t\t}\n\n\t\t\t\/\/ Add SubDir directory to lookup map\n\t\t\tnameToDir[dir.Title] = SubDir{\n\t\t\t\tID: dir.ID,\n\t\t\t\tRelPath: d.RelPath + dir.Title,\n\t\t\t}\n\n\t\t\t\/\/ Append to list\n\t\t\tdirectories = append(directories, entry)\n\t\t}\n\n\t\t\/\/ List of bad characters which should be replaced in filenames\n\t\tbadChars := []string{\"\/\", \"\\\\\"}\n\n\t\t\/\/ Iterate all returned audio\n\t\tfor _, a := range content.Audio {\n\t\t\t\/\/ Predefined audio filename format\n\t\t\taudioFormat := fmt.Sprintf(\"%02d - %s - %s.%s\", a.Track, a.Artist, a.Title, a.Suffix)\n\n\t\t\t\/\/ Check for any characters which may cause trouble with filesystem display\n\t\t\tfor _, b := range badChars {\n\t\t\t\taudioFormat = strings.Replace(audioFormat, b, \"_\", -1)\n\t\t\t}\n\n\t\t\t\/\/ Create a directory entry\n\t\t\tdir := fuse.Dirent{\n\t\t\t\tName: audioFormat,\n\t\t\t\tType: fuse.DT_File,\n\t\t\t}\n\n\t\t\t\/\/ Add SubFile file to lookup map\n\t\t\tnameToFile[dir.Name] = SubFile{\n\t\t\t\tID: a.ID,\n\t\t\t\tCreated: a.Created,\n\t\t\t\tFileName: audioFormat,\n\t\t\t\tSize: a.Size,\n\t\t\t}\n\n\t\t\t\/\/ Append to list\n\t\t\tdirectories = append(directories, dir)\n\t\t}\n\n\t\t\/\/ Iterate all returned video\n\t\tfor _, v := range content.Video {\n\t\t\t\/\/ Predefined video filename format\n\t\t\tvideoFormat := fmt.Sprintf(\"%s.%s\", v.Title, v.Suffix)\n\n\t\t\t\/\/ Check for any characters which may cause trouble with filesystem display\n\t\t\tfor _, b := range badChars {\n\t\t\t\tvideoFormat = strings.Replace(videoFormat, b, \"_\", -1)\n\t\t\t}\n\n\t\t\t\/\/ Create a directory entry\n\t\t\tdir := fuse.Dirent{\n\t\t\t\tName: videoFormat,\n\t\t\t\tType: fuse.DT_File,\n\t\t\t}\n\n\t\t\t\/\/ Add SubFile file to lookup map\n\t\t\tnameToFile[dir.Name] = SubFile{\n\t\t\t\tID: v.ID,\n\t\t\t\tCreated: v.Created,\n\t\t\t\tFileName: videoFormat,\n\t\t\t\tSize: v.Size,\n\t\t\t}\n\n\t\t\t\/\/ Append to list\n\t\t\tdirectories = append(directories, dir)\n\t\t}\n\t}\n\n\t\/\/ Return all directory entries\n\treturn directories, nil\n}\n\n\/\/ Lookup scans the current directory for matching files or directories\nfunc (d SubDir) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {\n\t\/\/ Lookup directory by name\n\tif dir, ok := nameToDir[name]; ok {\n\t\tdir.RelPath = name + \"\/\"\n\t\treturn dir, nil\n\t}\n\n\t\/\/ Lookup file by name\n\tif f, ok := nameToFile[name]; ok {\n\t\treturn f, nil\n\t}\n\n\t\/\/ File not found\n\treturn nil, fuse.ENOENT\n}\n\n\/\/ SubFile represents a file in Subsonic library\ntype SubFile struct {\n\tID int64\n\tCreated time.Time\n\tFileName string\n\tSize int64\n}\n\n\/\/ Attr returns file attributes (all files read-only)\nfunc (s SubFile) Attr() fuse.Attr {\n\treturn fuse.Attr{\n\t\tMode: 0644,\n\t\tMtime: s.Created,\n\t\tSize: uint64(s.Size),\n\t}\n}\n\n\/\/ ReadAll opens a file stream from Subsonic and returns the resulting bytes\nfunc (s SubFile) ReadAll(intr fs.Intr) ([]byte, fuse.Error) {\n\t\/\/ Byte stream to return data\n\tbyteChan := make(chan []byte)\n\n\t\/\/ Fetch file in background\n\tgo func() {\n\t\t\/\/ Check for file in cache\n\t\tif cFile, ok := fileCache[s.FileName]; ok {\n\t\t\t\/\/ Check for empty file, meaning the cached file got wiped out\n\t\t\tbuf, err := ioutil.ReadFile(cFile.Name())\n\t\t\tif len(buf) == 0 && strings.Contains(err.Error(), \"no such file or directory\") {\n\t\t\t\t\/\/ Purge item from cache\n\t\t\t\tlog.Printf(\"Cache missing: [%d] %s\", s.ID, s.FileName)\n\t\t\t\tdelete(fileCache, s.FileName)\n\t\t\t\tcacheTotal = atomic.AddInt64(&cacheTotal, -1*s.Size)\n\n\t\t\t\t\/\/ Print some cache metrics\n\t\t\t\tcacheUse := float64(cacheTotal) \/ 1024 \/ 1024\n\t\t\t\tcacheDel := float64(s.Size) \/ 1024 \/ 1024\n\t\t\t\tlog.Printf(\"Cache use: %0.3f \/ %d.000 MB (-%0.3f MB)\", cacheUse, *cacheSize, cacheDel)\n\n\t\t\t\t\/\/ Close file handle\n\t\t\t\tif err := cFile.Close(); err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t\/\/ Return cached file\n\t\t\t\tbyteChan <- buf\n\t\t\t\tclose(byteChan)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Check for pre-existing stream in progress, so that multiple clients can receive it without\n\t\t\/\/ requesting the stream multiple times. Yeah concurrency!\n\t\tif streamChan, ok := streamMap[s.ID]; ok {\n\t\t\t\/\/ Wait for stream to be ready, and return it\n\t\t\tbyteChan <- <-streamChan\n\t\t\tclose(byteChan)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Generate a channel for clients wishing to wait on this stream\n\t\tstreamMap[s.ID] = make(chan []byte, 0)\n\n\t\t\/\/ Open stream\n\t\tlog.Printf(\"Opening stream: [%d] %s\", s.ID, s.FileName)\n\t\tstream, err := subsonic.Stream(s.ID, nil)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tbyteChan <- nil\n\t\t\tclose(byteChan)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Read in stream\n\t\tfile, err := ioutil.ReadAll(stream)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tbyteChan <- nil\n\t\t\tclose(byteChan)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Close stream\n\t\tif err := stream.Close(); err != nil {\n\t\t\tlog.Println(err)\n\t\t\tbyteChan <- nil\n\t\t\tclose(byteChan)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Return bytes\n\t\tlog.Printf(\"Closing stream: [%d] %s\", s.ID, s.FileName)\n\t\tbyteChan <- file\n\t\tclose(byteChan)\n\n\t\t\/\/ Attempt to return bytes to others waiting, remove this stream\n\t\tgo func() {\n\t\t\t\/\/ Time out after waiting for 10 seconds\n\t\t\tselect {\n\t\t\tcase streamMap[s.ID] <- file:\n\t\t\tcase <-time.After(time.Second * 10):\n\t\t\t}\n\n\t\t\t\/\/ Remove stream from map\n\t\t\tclose(streamMap[s.ID])\n\t\t\tdelete(streamMap, s.ID)\n\t\t}()\n\n\t\t\/\/ Check for maximum cache size\n\t\tif cacheTotal > *cacheSize*1024*1024 {\n\t\t\tlog.Printf(\"Cache full (%d MB), skipping local cache\", *cacheSize)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Check if cache will overflow if file is added\n\t\tif cacheTotal+s.Size > *cacheSize*1024*1024 {\n\t\t\tlog.Printf(\"File will overflow cache (%0.3f MB), skipping local cache\", float64(s.Size)\/1024\/1024)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ If file is greater than 50MB, skip caching to conserve memory\n\t\tthreshold := 50\n\t\tif s.Size > int64(threshold*1024*1024) {\n\t\t\tlog.Printf(\"File too large (%0.3f > %0d MB), skipping local cache\", float64(s.Size)\/1024\/1024, threshold)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Generate a temporary file\n\t\ttmpFile, err := ioutil.TempFile(os.TempDir(), \"subfs\")\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Write out temporary file\n\t\tif _, err := tmpFile.Write(file); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Add file to cache map\n\t\tlog.Printf(\"Caching file: [%d] %s\", s.ID, s.FileName)\n\t\tfileCache[s.FileName] = *tmpFile\n\n\t\t\/\/ Add file's size to cache total size\n\t\tcacheTotal = atomic.AddInt64(&cacheTotal, s.Size)\n\n\t\t\/\/ Print some cache metrics\n\t\tcacheUse := float64(cacheTotal) \/ 1024 \/ 1024\n\t\tcacheAdd := float64(s.Size) \/ 1024 \/ 1024\n\t\tlog.Printf(\"Cache use: %0.3f \/ %d.000 MB (+%0.3f MB)\", cacheUse, *cacheSize, cacheAdd)\n\n\t\treturn\n\t}()\n\n\t\/\/ Wait for an event on read\n\tselect {\n\t\/\/ Byte stream channel\n\tcase stream := <-byteChan:\n\t\treturn stream, nil\n\t\/\/ Interrupt channel\n\tcase <-intr:\n\t\treturn nil, fuse.EINTR\n\t}\n}\n<commit_msg>Add differentiation between video and audio, fetch all video at 1280x720<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"bazil.org\/fuse\"\n\t\"bazil.org\/fuse\/fs\"\n\t\"github.com\/mdlayher\/gosubsonic\"\n)\n\n\/\/ subsonic stores the instance of the gosubsonic client\nvar subsonic gosubsonic.Client\n\n\/\/ nameToDir maps a directory name to its SubDir\nvar nameToDir map[string]SubDir\n\n\/\/ nameToFile maps a file name to its SubFile\nvar nameToFile map[string]SubFile\n\n\/\/ fileCache maps a file name to its file pointer\nvar fileCache map[string]os.File\n\n\/\/ cacheTotal is the total size of local files in the cache\nvar cacheTotal int64\n\n\/\/ streamMap maps a fileID to a channel containing a file stream\nvar streamMap map[int64]chan []byte\n\n\/\/ host is the host of the Subsonic server\nvar host = flag.String(\"host\", \"\", \"Host of Subsonic server\")\n\n\/\/ user is the username to connect to the Subsonic server\nvar user = flag.String(\"user\", \"\", \"Username for the Subsonic server\")\n\n\/\/ password is the password to connect to the Subsonic server\nvar password = flag.String(\"password\", \"\", \"Password for the Subsonic server\")\n\n\/\/ mount is the path where subfs will be mounted\nvar mount = flag.String(\"mount\", \"\", \"Path where subfs will be mounted\")\n\n\/\/ cacheSize is the maximum size of the local file cache in megabytes\nvar cacheSize = flag.Int64(\"cache\", 100, \"Size of the local file cache, in megabytes\")\n\nfunc main() {\n\t\/\/ Parse command line flags\n\tflag.Parse()\n\n\t\/\/ Open connection to Subsonic\n\tsub, err := gosubsonic.New(*host, *user, *password)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not connect to Subsonic server: %s\", err.Error())\n\t}\n\n\t\/\/ Store subsonic client for global use\n\tsubsonic = *sub\n\n\t\/\/ Initialize lookup maps\n\tnameToDir = map[string]SubDir{}\n\tnameToFile = map[string]SubFile{}\n\n\t\/\/ Initialize file cache\n\tfileCache = map[string]os.File{}\n\tcacheTotal = 0\n\n\t\/\/ Initialize stream map\n\tstreamMap = map[int64]chan []byte{}\n\n\t\/\/ Attempt to mount filesystem\n\tc, err := fuse.Mount(*mount)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not mount subfs at %s: %s\", *mount, err.Error())\n\t}\n\n\t\/\/ Serve the FUSE filesystem\n\tlog.Printf(\"subfs: %s@%s -> %s [cache: %d MB]\", *user, *host, *mount, *cacheSize)\n\tgo fs.Serve(c, SubFS{})\n\n\t\/\/ Wait for termination singals\n\tsigChan := make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, os.Interrupt)\n\tsignal.Notify(sigChan, syscall.SIGTERM)\n\tfor sig := range sigChan {\n\t\tlog.Println(\"subfs: caught signal:\", sig)\n\t\tbreak\n\t}\n\n\t\/\/ Purge all cached files\n\tfor _, f := range fileCache {\n\t\t\/\/ Close file\n\t\tif err := f.Close(); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\t\/\/ Remove file\n\t\tif err := os.Remove(f.Name()); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n\n\tlog.Printf(\"subfs: removed %d cached file(s)\", len(fileCache))\n\n\t\/\/ Attempt to unmount the FUSE filesystem\n\tretry := 3\n\tfor i := 0; i < retry+1; i++ {\n\t\t\/\/ Wait between attempts\n\t\tif i > 0 {\n\t\t\t<-time.After(time.Second * 3)\n\t\t}\n\n\t\t\/\/ Try unmount\n\t\tif err := fuse.Unmount(*mount); err != nil {\n\t\t\t\/\/ Force exit on last attempt\n\t\t\tif i == retry {\n\t\t\t\tlog.Printf(\"subfs: could not unmount %s, halting!\", *mount)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\tlog.Printf(\"subfs: could not unmount %s, retrying %d of %d...\", *mount, i+1, retry)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ Close the FUSE filesystem\n\tif err := c.Close(); err != nil {\n\t\tlog.Fatalf(\"Could not close subfs: %s\", err.Error())\n\t}\n\n\tlog.Printf(\"subfs: done!\")\n\treturn\n}\n\n\/\/ SubFS represents the root of the filesystem\ntype SubFS struct{}\n\n\/\/ Root is called to get the root directory node of this filesystem\nfunc (fs SubFS) Root() (fs.Node, fuse.Error) {\n\treturn &SubDir{RelPath: \"\"}, nil\n}\n\n\/\/ SubDir represents a directory in the filesystem\ntype SubDir struct {\n\tID int64\n\tRelPath string\n}\n\n\/\/ Attr retrives the attributes for this SubDir\nfunc (SubDir) Attr() fuse.Attr {\n\treturn fuse.Attr{\n\t\tMode: os.ModeDir | 0555,\n\t}\n}\n\n\/\/ ReadDir returns a list of directory entries depending on the current path\nfunc (d SubDir) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {\n\t\/\/ List of directory entries to return\n\tdirectories := make([]fuse.Dirent, 0)\n\n\t\/\/ If at root of filesystem, fetch indexes\n\tif d.RelPath == \"\" {\n\t\tindex, err := subsonic.GetIndexes(-1, -1)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to retrieve indexes: %s\", err.Error())\n\t\t\treturn nil, fuse.ENOENT\n\t\t}\n\n\t\t\/\/ Iterate indices\n\t\tfor _, i := range index {\n\t\t\t\/\/ Iterate all artists\n\t\t\tfor _, a := range i.Artist {\n\t\t\t\t\/\/ Map artist's name to directory\n\t\t\t\tnameToDir[a.Name] = SubDir{\n\t\t\t\t\tID: a.ID,\n\t\t\t\t\tRelPath: \"\",\n\t\t\t\t}\n\n\t\t\t\t\/\/ Create a directory entry\n\t\t\t\tdir := fuse.Dirent{\n\t\t\t\t\tName: a.Name,\n\t\t\t\t\tType: fuse.DT_Dir,\n\t\t\t\t}\n\n\t\t\t\t\/\/ Append entry\n\t\t\t\tdirectories = append(directories, dir)\n\t\t\t}\n\t\t}\n\t} else {\n\t\t\/\/ Get this directory's contents\n\t\tcontent, err := subsonic.GetMusicDirectory(d.ID)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to retrieve directory %d: %s\", d.ID, err.Error())\n\t\t\treturn nil, fuse.ENOENT\n\t\t}\n\n\t\t\/\/ Iterate all returned directories\n\t\tfor _, dir := range content.Directories {\n\t\t\t\/\/ Create a directory entry\n\t\t\tentry := fuse.Dirent{\n\t\t\t\tName: dir.Title,\n\t\t\t\tType: fuse.DT_Dir,\n\t\t\t}\n\n\t\t\t\/\/ Add SubDir directory to lookup map\n\t\t\tnameToDir[dir.Title] = SubDir{\n\t\t\t\tID: dir.ID,\n\t\t\t\tRelPath: d.RelPath + dir.Title,\n\t\t\t}\n\n\t\t\t\/\/ Append to list\n\t\t\tdirectories = append(directories, entry)\n\t\t}\n\n\t\t\/\/ List of bad characters which should be replaced in filenames\n\t\tbadChars := []string{\"\/\", \"\\\\\"}\n\n\t\t\/\/ Iterate all returned audio\n\t\tfor _, a := range content.Audio {\n\t\t\t\/\/ Predefined audio filename format\n\t\t\taudioFormat := fmt.Sprintf(\"%02d - %s - %s.%s\", a.Track, a.Artist, a.Title, a.Suffix)\n\n\t\t\t\/\/ Check for any characters which may cause trouble with filesystem display\n\t\t\tfor _, b := range badChars {\n\t\t\t\taudioFormat = strings.Replace(audioFormat, b, \"_\", -1)\n\t\t\t}\n\n\t\t\t\/\/ Create a directory entry\n\t\t\tdir := fuse.Dirent{\n\t\t\t\tName: audioFormat,\n\t\t\t\tType: fuse.DT_File,\n\t\t\t}\n\n\t\t\t\/\/ Add SubFile file to lookup map\n\t\t\tnameToFile[dir.Name] = SubFile{\n\t\t\t\tID: a.ID,\n\t\t\t\tCreated: a.Created,\n\t\t\t\tFileName: audioFormat,\n\t\t\t\tSize: a.Size,\n\t\t\t\tIsVideo: false,\n\t\t\t}\n\n\t\t\t\/\/ Append to list\n\t\t\tdirectories = append(directories, dir)\n\t\t}\n\n\t\t\/\/ Iterate all returned video\n\t\tfor _, v := range content.Video {\n\t\t\t\/\/ Predefined video filename format\n\t\t\tvideoFormat := fmt.Sprintf(\"%s.%s\", v.Title, v.Suffix)\n\n\t\t\t\/\/ Check for any characters which may cause trouble with filesystem display\n\t\t\tfor _, b := range badChars {\n\t\t\t\tvideoFormat = strings.Replace(videoFormat, b, \"_\", -1)\n\t\t\t}\n\n\t\t\t\/\/ Create a directory entry\n\t\t\tdir := fuse.Dirent{\n\t\t\t\tName: videoFormat,\n\t\t\t\tType: fuse.DT_File,\n\t\t\t}\n\n\t\t\t\/\/ Add SubFile file to lookup map\n\t\t\tnameToFile[dir.Name] = SubFile{\n\t\t\t\tID: v.ID,\n\t\t\t\tCreated: v.Created,\n\t\t\t\tFileName: videoFormat,\n\t\t\t\tSize: v.Size,\n\t\t\t\tIsVideo: true,\n\t\t\t}\n\n\t\t\t\/\/ Append to list\n\t\t\tdirectories = append(directories, dir)\n\t\t}\n\t}\n\n\t\/\/ Return all directory entries\n\treturn directories, nil\n}\n\n\/\/ Lookup scans the current directory for matching files or directories\nfunc (d SubDir) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {\n\t\/\/ Lookup directory by name\n\tif dir, ok := nameToDir[name]; ok {\n\t\tdir.RelPath = name + \"\/\"\n\t\treturn dir, nil\n\t}\n\n\t\/\/ Lookup file by name\n\tif f, ok := nameToFile[name]; ok {\n\t\treturn f, nil\n\t}\n\n\t\/\/ File not found\n\treturn nil, fuse.ENOENT\n}\n\n\/\/ SubFile represents a file in Subsonic library\ntype SubFile struct {\n\tID int64\n\tCreated time.Time\n\tFileName string\n\tIsVideo bool\n\tSize int64\n}\n\n\/\/ Attr returns file attributes (all files read-only)\nfunc (s SubFile) Attr() fuse.Attr {\n\treturn fuse.Attr{\n\t\tMode: 0644,\n\t\tMtime: s.Created,\n\t\tSize: uint64(s.Size),\n\t}\n}\n\n\/\/ ReadAll opens a file stream from Subsonic and returns the resulting bytes\nfunc (s SubFile) ReadAll(intr fs.Intr) ([]byte, fuse.Error) {\n\t\/\/ Byte stream to return data\n\tbyteChan := make(chan []byte)\n\n\t\/\/ Fetch file in background\n\tgo func() {\n\t\t\/\/ Check for file in cache\n\t\tif cFile, ok := fileCache[s.FileName]; ok {\n\t\t\t\/\/ Check for empty file, meaning the cached file got wiped out\n\t\t\tbuf, err := ioutil.ReadFile(cFile.Name())\n\t\t\tif len(buf) == 0 && strings.Contains(err.Error(), \"no such file or directory\") {\n\t\t\t\t\/\/ Purge item from cache\n\t\t\t\tlog.Printf(\"Cache missing: [%d] %s\", s.ID, s.FileName)\n\t\t\t\tdelete(fileCache, s.FileName)\n\t\t\t\tcacheTotal = atomic.AddInt64(&cacheTotal, -1*s.Size)\n\n\t\t\t\t\/\/ Print some cache metrics\n\t\t\t\tcacheUse := float64(cacheTotal) \/ 1024 \/ 1024\n\t\t\t\tcacheDel := float64(s.Size) \/ 1024 \/ 1024\n\t\t\t\tlog.Printf(\"Cache use: %0.3f \/ %d.000 MB (-%0.3f MB)\", cacheUse, *cacheSize, cacheDel)\n\n\t\t\t\t\/\/ Close file handle\n\t\t\t\tif err := cFile.Close(); err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t\/\/ Return cached file\n\t\t\t\tbyteChan <- buf\n\t\t\t\tclose(byteChan)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Check for pre-existing stream in progress, so that multiple clients can receive it without\n\t\t\/\/ requesting the stream multiple times. Yeah concurrency!\n\t\tif streamChan, ok := streamMap[s.ID]; ok {\n\t\t\t\/\/ Wait for stream to be ready, and return it\n\t\t\tbyteChan <- <-streamChan\n\t\t\tclose(byteChan)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Generate a channel for clients wishing to wait on this stream\n\t\tstreamMap[s.ID] = make(chan []byte, 0)\n\n\t\t\/\/ Open stream, depending on if item is video or audio\n\t\tvar streamOptions gosubsonic.StreamOptions\n\t\tif s.IsVideo {\n\t\t\tstreamOptions = gosubsonic.StreamOptions{\n\t\t\t\tSize: \"1280x720\",\n\t\t\t}\n\n\t\t\tlog.Printf(\"Opening video stream: [%d] %s [%s]\", s.ID, s.FileName, streamOptions.Size)\n\t\t} else {\n\t\t\tlog.Printf(\"Opening audio stream: [%d] %s\", s.ID, s.FileName)\n\t\t}\n\t\tstream, err := subsonic.Stream(s.ID, &streamOptions)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tbyteChan <- nil\n\t\t\tclose(byteChan)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Read in stream\n\t\tfile, err := ioutil.ReadAll(stream)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tbyteChan <- nil\n\t\t\tclose(byteChan)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Close stream\n\t\tif err := stream.Close(); err != nil {\n\t\t\tlog.Println(err)\n\t\t\tbyteChan <- nil\n\t\t\tclose(byteChan)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Return bytes\n\t\tlog.Printf(\"Closing stream: [%d] %s\", s.ID, s.FileName)\n\t\tbyteChan <- file\n\t\tclose(byteChan)\n\n\t\t\/\/ Attempt to return bytes to others waiting, remove this stream\n\t\tgo func() {\n\t\t\t\/\/ Time out after waiting for 10 seconds\n\t\t\tselect {\n\t\t\tcase streamMap[s.ID] <- file:\n\t\t\tcase <-time.After(time.Second * 10):\n\t\t\t}\n\n\t\t\t\/\/ Remove stream from map\n\t\t\tclose(streamMap[s.ID])\n\t\t\tdelete(streamMap, s.ID)\n\t\t}()\n\n\t\t\/\/ Check for maximum cache size\n\t\tif cacheTotal > *cacheSize*1024*1024 {\n\t\t\tlog.Printf(\"Cache full (%d MB), skipping local cache\", *cacheSize)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Check if cache will overflow if file is added\n\t\tif cacheTotal+s.Size > *cacheSize*1024*1024 {\n\t\t\tlog.Printf(\"File will overflow cache (%0.3f MB), skipping local cache\", float64(s.Size)\/1024\/1024)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ If file is greater than 50MB, skip caching to conserve memory\n\t\tthreshold := 50\n\t\tif s.Size > int64(threshold*1024*1024) {\n\t\t\tlog.Printf(\"File too large (%0.3f > %0d MB), skipping local cache\", float64(s.Size)\/1024\/1024, threshold)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Generate a temporary file\n\t\ttmpFile, err := ioutil.TempFile(os.TempDir(), \"subfs\")\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Write out temporary file\n\t\tif _, err := tmpFile.Write(file); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Add file to cache map\n\t\tlog.Printf(\"Caching file: [%d] %s\", s.ID, s.FileName)\n\t\tfileCache[s.FileName] = *tmpFile\n\n\t\t\/\/ Add file's size to cache total size\n\t\tcacheTotal = atomic.AddInt64(&cacheTotal, s.Size)\n\n\t\t\/\/ Print some cache metrics\n\t\tcacheUse := float64(cacheTotal) \/ 1024 \/ 1024\n\t\tcacheAdd := float64(s.Size) \/ 1024 \/ 1024\n\t\tlog.Printf(\"Cache use: %0.3f \/ %d.000 MB (+%0.3f MB)\", cacheUse, *cacheSize, cacheAdd)\n\n\t\treturn\n\t}()\n\n\t\/\/ Wait for an event on read\n\tselect {\n\t\/\/ Byte stream channel\n\tcase stream := <-byteChan:\n\t\treturn stream, nil\n\t\/\/ Interrupt channel\n\tcase <-intr:\n\t\treturn nil, fuse.EINTR\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package user\n\nimport (\n\t\"net\/http\"\n\n\t\"code.google.com\/p\/go.crypto\/bcrypt\"\n\t\"github.com\/op\/go-logging\"\n\t\"github.com\/usmanismail\/go-messenger\/go-auth\/database\"\n)\n\nvar log = logging.MustGetLogger(\"user\")\n\ntype User interface {\n\tGetUserId() string\n\tGetPasswordHash() []byte\n}\n\ntype UserS struct {\n\tuserId string\n\tpasswordHash []byte\n}\n\nfunc NewUser(userId string, password string) (User, error) {\n\thash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn &UserS{userId, hash}, nil\n\t}\n}\n\nfunc GetUser(userId string, passwordHash []byte) User {\n\treturn &UserS{userId, passwordHash}\n}\n\nfunc RegisterUser(userData database.UserData, userId string, password string) int {\n\tif len(userId) > 0 && len(password) > 0 {\n\t\tuserObj, _ := NewUser(userId, password)\n\t\t\/\/ Check if user already exists\n\t\tuser, err := userData.GetUser(userObj.GetUserId())\n\t\tif user != nil {\n\t\t\tlog.Warning(\"Duplicate username tried: %s\", userId)\n\t\t\treturn http.StatusConflict\n\t\t}\n\t\t\/\/ Save User to database\n\t\terr = userData.SaveUser(userObj.GetUserId(), userObj.GetPasswordHash())\n\t\tif err != nil {\n\t\t\tlog.Warning(\"Unable to create user: %s\", err.Error())\n\t\t\treturn http.StatusInternalServerError\n\t\t} else {\n\t\t\treturn http.StatusOK\n\t\t}\n\t} else {\n\t\tlog.Debug(\"Unable to create user, username or password missing\")\n\t\treturn http.StatusBadRequest\n\t}\n}\n\nfunc DeleteUser(userData database.UserData, userId string, password string) int {\n\tif len(userId) > 0 && len(password) > 0 {\n\t\tpasswordHash, _ := userData.GetUser(userId)\n\t\tif passwordHash == nil {\n\t\t\tlog.Warning(\"User not found, cannot delete: %s\", userId)\n\t\t\treturn http.StatusNotFound\n\t\t} else {\n\t\t\tif CompareHashAndPassword(passwordHash, password) == nil {\n\t\t\t\tuserData.DeleteUser(userId)\n\t\t\t\treturn http.StatusOK\n\t\t\t} else {\n\t\t\t\treturn http.StatusBadRequest\n\t\t\t}\n\n\t\t}\n\t} else {\n\t\tlog.Debug(\"Unable to delete user, username or password missing\")\n\t\treturn http.StatusBadRequest\n\t}\n}\n\nfunc CompareHashAndPassword(hashedPassword []byte, password string) error {\n\terr := bcrypt.CompareHashAndPassword(hashedPassword, []byte(password))\n\tif err != nil {\n\t\tlog.Warning(\"Invalid password: %s\", err.Error())\n\t\treturn err\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (u *UserS) GetUserId() string {\n\treturn u.userId\n}\n\nfunc (u *UserS) GetPasswordHash() []byte {\n\treturn u.passwordHash\n}\n<commit_msg>google => github<commit_after>package user\n\nimport (\n\t\"net\/http\"\n\n\t\"golang.org\/x\/crypto\/bcrypt\"\n\t\"github.com\/op\/go-logging\"\n\t\"github.com\/usmanismail\/go-messenger\/go-auth\/database\"\n)\n\nvar log = logging.MustGetLogger(\"user\")\n\ntype User interface {\n\tGetUserId() string\n\tGetPasswordHash() []byte\n}\n\ntype UserS struct {\n\tuserId string\n\tpasswordHash []byte\n}\n\nfunc NewUser(userId string, password string) (User, error) {\n\thash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn &UserS{userId, hash}, nil\n\t}\n}\n\nfunc GetUser(userId string, passwordHash []byte) User {\n\treturn &UserS{userId, passwordHash}\n}\n\nfunc RegisterUser(userData database.UserData, userId string, password string) int {\n\tif len(userId) > 0 && len(password) > 0 {\n\t\tuserObj, _ := NewUser(userId, password)\n\t\t\/\/ Check if user already exists\n\t\tuser, err := userData.GetUser(userObj.GetUserId())\n\t\tif user != nil {\n\t\t\tlog.Warning(\"Duplicate username tried: %s\", userId)\n\t\t\treturn http.StatusConflict\n\t\t}\n\t\t\/\/ Save User to database\n\t\terr = userData.SaveUser(userObj.GetUserId(), userObj.GetPasswordHash())\n\t\tif err != nil {\n\t\t\tlog.Warning(\"Unable to create user: %s\", err.Error())\n\t\t\treturn http.StatusInternalServerError\n\t\t} else {\n\t\t\treturn http.StatusOK\n\t\t}\n\t} else {\n\t\tlog.Debug(\"Unable to create user, username or password missing\")\n\t\treturn http.StatusBadRequest\n\t}\n}\n\nfunc DeleteUser(userData database.UserData, userId string, password string) int {\n\tif len(userId) > 0 && len(password) > 0 {\n\t\tpasswordHash, _ := userData.GetUser(userId)\n\t\tif passwordHash == nil {\n\t\t\tlog.Warning(\"User not found, cannot delete: %s\", userId)\n\t\t\treturn http.StatusNotFound\n\t\t} else {\n\t\t\tif CompareHashAndPassword(passwordHash, password) == nil {\n\t\t\t\tuserData.DeleteUser(userId)\n\t\t\t\treturn http.StatusOK\n\t\t\t} else {\n\t\t\t\treturn http.StatusBadRequest\n\t\t\t}\n\n\t\t}\n\t} else {\n\t\tlog.Debug(\"Unable to delete user, username or password missing\")\n\t\treturn http.StatusBadRequest\n\t}\n}\n\nfunc CompareHashAndPassword(hashedPassword []byte, password string) error {\n\terr := bcrypt.CompareHashAndPassword(hashedPassword, []byte(password))\n\tif err != nil {\n\t\tlog.Warning(\"Invalid password: %s\", err.Error())\n\t\treturn err\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (u *UserS) GetUserId() string {\n\treturn u.userId\n}\n\nfunc (u *UserS) GetPasswordHash() []byte {\n\treturn u.passwordHash\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"os\"\n\t\"log\"\n\t\"os\/user\"\n\t\"io\/ioutil\"\n\t\"github.com\/kyokomi\/go-gitlab-client\/gogitlab\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"time\"\n\t\"github.com\/wsxiaoys\/terminal\/color\"\n\t\"github.com\/kyokomi\/emoji\"\n\t\"fmt\"\n\t\"strings\"\n\t\"os\/exec\"\n)\n\ntype GitlabAccessConfig struct {\n\tHost string `json:\"host\"`\n\tApiPath string `json:\"api_path\"`\n\tToken string `json:\"token\"`\n\tIconPath string `json:\"icon_path\"`\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Version = \"0.0.3\"\n\tapp.Name = \"go-gitlab-notifer\"\n\tapp.Usage = \"make an explosive entrance\"\n\n\tapp.Flags = []cli.Flag {\n\t\tcli.BoolFlag{\"gitlab.skip-cert-check\", \"If set to true, gitlab client will skip certificate checking for https, possibly exposing your system to MITM attack.\"},\n\t}\n\n\tgitlab := createGitlab()\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"issue\",\n\t\t\tShortName: \"i\",\n\t\t\tUsage: \"my issue list\",\n\t\t\tAction: func(_ *cli.Context) {\n\t\t\t\tprintGitlabIssues(gitlab)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"activity\",\n\t\t\tShortName: \"a\",\n\t\t\tUsage: \"my activiy list\",\n\t\t\tAction: func(_ *cli.Context) {\n\t\t\t\tprintActivity(gitlab)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"project\",\n\t\t\tShortName: \"p\",\n\t\t\tUsage: \"my project list\",\n\t\t\tAction: func(_ *cli.Context) {\n\t\t\t\tprintGitlabProjects(gitlab)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"tick\",\n\t\t\tShortName: \"t\",\n\t\t\tUsage: \"my activity list N seconds tick\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.IntFlag{\"second\", 60, \"second N.\"},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\ttickGitlabActivity(gitlab, c.Int(\"second\"))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\/\/ add v0.0.3\n\t\t\tName: \"events\",\n\t\t\tShortName: \"e\",\n\t\t\tUsage: \"chice project events\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.IntFlag{\"project-id\", 1, \"projectId.\"},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tgetProjectIssues(gitlab, c.Int(\"project-id\"))\n\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc getProjectIssues(gitlab *gogitlab.Gitlab, projectId int) {\n\n\tevents := gitlab.ProjectEvents(projectId)\n\tfor _, event := range events {\n\n\t\tvar iconName string\n\t\tswitch (event.TargetType) {\n\t\tcase \"Issue\":\n\t\t\ticonName = \":beer:\"\n\t\tdefault:\n\t\t\ticonName = \":punch:\"\n\t\t}\n\n\t\t\/\/fmt.Printf(\"ProjectID[%d] action[%s] targetId[%d] targetType[%s] targetTitle[%s]\\n\", event.ProductId, event.ActionName,event.TargetId, event.TargetType, event.TargetTitle)\n\t\tif event.TargetId != 0 {\n\t\t\tactionText := color.Sprintf(\"@y[%s]\", event.ActionName)\n\t\t\trepositoriesText := color.Sprintf(\"@c%s(%d)\", event.TargetType, event.TargetId)\n\t\t\tuserText := color.Sprintf(\"@c%s\", event.Data.UserName)\n\t\t\ttitleText := color.Sprintf(\"@g%s\", event.TargetTitle)\n\t\t\temoji.Println(\"@{\"+iconName+\"}\", actionText, repositoriesText, userText, titleText)\n\n\t\t} else if event.TargetId == 0 {\n\n\t\t\tactionText := color.Sprintf(\"@y[%s]\", event.ActionName)\n\t\t\trepositoriesText := color.Sprintf(\"@c%s\", event.Data.Repository.Name)\n\t\t\tuserText := color.Sprintf(\"@c%s\", event.Data.UserName)\n\t\t\tvar titleText string\n\t\t\tif event.Data.TotalCommitsCount > 0 {\n\t\t\t\tcommitMessage := event.Data.Commits[0].Message\n\t\t\t\tcommitMessage = strings.Replace(commitMessage, \"\\n\\n\", \"\\t\", -1)\n\t\t\t\ttitleText = color.Sprintf(\"@g%s\", commitMessage)\n\t\t\t} else if event.Data.Before == \"0000000000000000000000000000000000000000\" {\n\t\t\t\ttitleText = color.Sprintf(\"@g%s %s\", emoji.Sprint(\"@{:fire:}\"), \"create New branch\")\n\t\t\t}\n\t\t\temoji.Println(\"@{\"+iconName+\"}\", actionText, repositoriesText, userText, titleText)\n\n\/\/\t\t\tfmt.Println(\" \\t user -> \", event.Data.UserName, event.Data.UserId)\n\/\/\t\t\tfmt.Println(\" \\t author -> \", event.Data.AuthorId)\n\/\/\n\/\/\t\t\tfmt.Println(\" \\t\\t name -> \", event.Data.Repository.Name)\n\/\/\t\t\tfmt.Println(\" \\t\\t description -> \", event.Data.Repository.Description)\n\/\/\t\t\tfmt.Println(\" \\t\\t gitUrl -> \", event.Data.Repository.GitUrl)\n\/\/\t\t\tfmt.Println(\" \\t\\t pageUrl -> \", event.Data.Repository.PageUrl)\n\/\/\n\/\/\t\t\tfmt.Println(\" \\t\\t totalCount -> \", event.Data.TotalCommitsCount)\n\/\/\n\/\/\t\t\tif event.Data.TotalCommitsCount > 0 {\n\/\/\t\t\t\tfmt.Println(\" \\t\\t message -> \", event.Data.Commits[0].Message)\n\/\/\t\t\t\tfmt.Println(\" \\t\\t time -> \", event.Data.Commits[0].Timestamp)\n\/\/\t\t\t}\n\t\t}\n\t}\n\/\/\n\/\/\tfor _, event := range events {\n\/\/\n\/\/\t}\n}\n\nfunc tickGitlabActivity(gitlab *gogitlab.Gitlab) {\n\n\tch := time.Tick(60 * time.Second)\n\n\tlastedFeed, err := gitlab.Activity()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t\treturn\n\t}\n\n\tcurrentUser, err := gitlab.CurrentUser()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t\treturn\n\t}\n\n\tfor i := len(lastedFeed.Entries) - 1; i >= 0; i-- {\n\t\tfeedCommentText := createFeedCommentText(¤tUser, lastedFeed.Entries[i])\n\t\tfmt.Println(feedCommentText)\n\t}\n\n\tfor now := range ch {\n\t\temoji.Printf(\"@{:beer:}[%v] tick \\n\", now)\n\n\t\tfeed, err := gitlab.Activity()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tfor i := len(feed.Entries) - 1; i >= 0; i-- {\n\t\t\tfeedComment := feed.Entries[i]\n\n\t\t\tif feedComment.Updated.After(lastedFeed.Entries[0].Updated) {\n\t\t\t\tfeedCommentText := createFeedShortCommentText(feedComment)\n\t\t\t\tfmt.Println(feedCommentText)\n\t\t\t\tNotifier(feedCommentText, feedComment.Id, feedComment.Summary, \"\")\n\t\t\t}\n\t\t}\n\t\tlastedFeed = feed\n\t}\n}\n\n\/\/ 自分の関わってるプロジェクトを取得して一覧表示する\nfunc printGitlabProjects(gitlab *gogitlab.Gitlab) {\n\n\tprojects, err := gitlab.Projects()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t\treturn\n\t}\n\n\tfor _, project := range projects {\n\t\tfmt.Printf(\"[%4d] [%20s] (%s)\\n\", project.Id, project.Name, project.HttpRepoUrl)\n\t}\n}\n\n\/\/ 自分のIssueを取得して一覧表示する\nfunc printGitlabIssues(gitlab *gogitlab.Gitlab) {\n\n\tissues, err := gitlab.Issues()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t\treturn\n\t}\n\n\tfor _, issue := range issues {\n\t\tfmt.Printf(\"[%4d] [%s]\\n\", issue.Id, issue.Title)\n\t}\n}\n\n\/\/ Activity(dashboard.atpm)の表示。XMLパースなので情報欠落が辛い感じ。廃止予定\nfunc printActivity(gitlab *gogitlab.Gitlab) {\n\n\tfeed, err := gitlab.Activity()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t\treturn\n\t}\n\tcurrentUser, err := gitlab.CurrentUser()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t\treturn\n\t}\n\n\tfor _, feedCommit := range feed.Entries {\n\t\tfeedCommentText := createFeedCommentText(¤tUser, feedCommit)\n\t\tfmt.Println(feedCommentText)\n\t}\n}\n\n\/\/ 絵文字付きターミナル用のコメント作成(Activiyベース)\nfunc createFeedCommentText(currentUser *gogitlab.User, feedCommit *gogitlab.FeedCommit) string {\n\n\tvar iconName string\n\tif strings.Contains(feedCommit.Title, \"commented\") {\n\t\ticonName = \":speech_balloon:\"\n\t} else if strings.Contains(feedCommit.Title, \"pushed\") {\n\t\ticonName = \":punch:\"\n\t} else if strings.Contains(feedCommit.Title, \"closed\") {\n\t\ticonName = \":white_check_mark:\"\n\t} else if strings.Contains(feedCommit.Title, \"opened\") {\n\t\ticonName = \":fire:\"\n\t} else if strings.Contains(feedCommit.Title, \"accepted\") {\n\t\ticonName = \":atm:\"\n\t} else {\n\t\ticonName = \":beer:\"\n\t}\n\n\tif currentUser.Name == feedCommit.Author.Name {\n\t\treturn emoji.Sprint(\"@{\"+iconName+\"}\", color.Sprintf(\"@y[%s] %s\", feedCommit.Updated.Format(time.ANSIC), feedCommit.Title))\n\t} else {\n\t\treturn emoji.Sprint(\"@{\"+iconName+\"}\", fmt.Sprintf(\"[%s] %s\", feedCommit.Updated.Format(time.ANSIC), feedCommit.Title))\n\t}\n}\n\n\/\/ 絵文字付き通知用の簡易コメント作成(Activiyベース)\nfunc createFeedShortCommentText(feedCommit *gogitlab.FeedCommit) string {\n\n\tvar iconName string\n\tif strings.Contains(feedCommit.Title, \"commented\") {\n\t\ticonName = \":speech_balloon:\"\n\t} else if strings.Contains(feedCommit.Title, \"pushed\") {\n\t\ticonName = \":punch:\"\n\t} else if strings.Contains(feedCommit.Title, \"closed\") {\n\t\ticonName = \":white_check_mark:\"\n\t} else if strings.Contains(feedCommit.Title, \"opened\") {\n\t\ticonName = \":fire:\"\n\t} else if strings.Contains(feedCommit.Title, \"accepted\") {\n\t\ticonName = \":atm:\"\n\t} else {\n\t\ticonName = \":beer:\"\n\t}\n\n\treturn emoji.Sprint(\"@{\"+iconName+\"}\", fmt.Sprintf(\"%s\", feedCommit.Title))\n}\n\n\/\/ Gitlabクライアントを作成する\nfunc createGitlab() *gogitlab.Gitlab {\n\tconfig := readGitlabAccessTokenJson()\n\n\t\/\/ --gitlab.skip-cert-checkを読み込む\n\tflag.Parse()\n\t\n\treturn gogitlab.NewGitlab(config.Host, config.ApiPath, config.Token)\n}\n\n\/\/ アクセストークンを保存してるローカルファイルを読み込んで返却\nfunc readGitlabAccessTokenJson() GitlabAccessConfig {\n\tusr, err := user.Current()\n\tif err != nil {\n\t\tlog.Fatal( err )\n\t\tos.Exit(1)\n\t}\n\n\tfile, e := ioutil.ReadFile(usr.HomeDir + \"\/.ggn\/config.json\")\n\tif e != nil {\n\t\tlog.Fatal(\"Config file error: %v\\n\", e)\n\t\tos.Exit(1)\n\t}\n\n\tvar config GitlabAccessConfig\n\tjson.Unmarshal(file, &config)\n\tconfig.IconPath = usr.HomeDir + \"\/.ggn\/logo.png\"\n\n\treturn config\n}\n\n\/\/ 通知のやつ $ brew install terminal-notiferが必要\nfunc Notifier(title, message, subTitle, openUrl string) {\n\tconfig := readGitlabAccessTokenJson()\n\tcmd := exec.Command(\"terminal-notifier\", \"-title\", title, \"-message\", message, \"-subtitle\", subTitle, \"-appIcon\", config.IconPath, \"-open\", openUrl)\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>add project issue command<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"os\"\n\t\"log\"\n\t\"os\/user\"\n\t\"io\/ioutil\"\n\t\"github.com\/kyokomi\/go-gitlab-client\/gogitlab\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"time\"\n\t\"github.com\/wsxiaoys\/terminal\/color\"\n\t\"github.com\/kyokomi\/emoji\"\n\t\"fmt\"\n\t\"strings\"\n\t\"os\/exec\"\n)\n\ntype GitlabAccessConfig struct {\n\tHost string `json:\"host\"`\n\tApiPath string `json:\"api_path\"`\n\tToken string `json:\"token\"`\n\tIconPath string `json:\"icon_path\"`\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Version = \"0.0.3\"\n\tapp.Name = \"go-gitlab-notifer\"\n\tapp.Usage = \"make an explosive entrance\"\n\n\tapp.Flags = []cli.Flag {\n\t\tcli.BoolFlag{\"gitlab.skip-cert-check\", \"If set to true, gitlab client will skip certificate checking for https, possibly exposing your system to MITM attack.\"},\n\t}\n\n\tgitlab := createGitlab()\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"issue\",\n\t\t\tShortName: \"i\",\n\t\t\tUsage: \"my issue list\",\n\t\t\tAction: func(_ *cli.Context) {\n\t\t\t\tprintGitlabIssues(gitlab)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"activity\",\n\t\t\tShortName: \"a\",\n\t\t\tUsage: \"my activiy list\",\n\t\t\tAction: func(_ *cli.Context) {\n\t\t\t\tprintActivity(gitlab)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"project\",\n\t\t\tShortName: \"p\",\n\t\t\tUsage: \"my project list\",\n\t\t\tAction: func(_ *cli.Context) {\n\t\t\t\tprintGitlabProjects(gitlab)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"tick\",\n\t\t\tShortName: \"t\",\n\t\t\tUsage: \"my activity list N seconds tick\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.IntFlag{\"second\", 60, \"second N.\"},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\ttickGitlabActivity(gitlab, c.Int(\"second\"))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\/\/ add v0.0.3\n\t\t\tName: \"events\",\n\t\t\tShortName: \"e\",\n\t\t\tUsage: \"chice project events\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.IntFlag{\"project-id\", 1, \"projectId.\"},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tgetProjectIssues(gitlab, c.Int(\"project-id\"))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"project_issue\",\n\t\t\tShortName: \"pi\",\n\t\t\tUsage: \"project issue list\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.IntFlag{\"project-id\", 1, \"projectId.\"},\n\t\t\t},\n\t\t\tAction: func(c *cli.Context) {\n\t\t\t\tprintGitlabProjectIssues(gitlab, c.Int(\"project-id\"))\n\t\t\t},\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc getProjectIssues(gitlab *gogitlab.Gitlab, projectId int) {\n\n\tevents := gitlab.ProjectEvents(projectId)\n\tfor _, event := range events {\n\n\t\tvar iconName string\n\t\tswitch (event.TargetType) {\n\t\tcase \"Issue\":\n\t\t\ticonName = \":beer:\"\n\t\tdefault:\n\t\t\ticonName = \":punch:\"\n\t\t}\n\n\t\t\/\/fmt.Printf(\"ProjectID[%d] action[%s] targetId[%d] targetType[%s] targetTitle[%s]\\n\", event.ProductId, event.ActionName,event.TargetId, event.TargetType, event.TargetTitle)\n\t\tif event.TargetId != 0 {\n\t\t\tactionText := color.Sprintf(\"@y[%s]\", event.ActionName)\n\t\t\trepositoriesText := color.Sprintf(\"@c%s(%d)\", event.TargetType, event.TargetId)\n\t\t\tuserText := color.Sprintf(\"@c%s\", event.Data.UserName)\n\t\t\ttitleText := color.Sprintf(\"@g%s\", event.TargetTitle)\n\t\t\temoji.Println(\"@{\"+iconName+\"}\", actionText, repositoriesText, userText, titleText)\n\n\t\t} else if event.TargetId == 0 {\n\n\t\t\tactionText := color.Sprintf(\"@y[%s]\", event.ActionName)\n\t\t\trepositoriesText := color.Sprintf(\"@c%s\", event.Data.Repository.Name)\n\t\t\tuserText := color.Sprintf(\"@c%s\", event.Data.UserName)\n\t\t\tvar titleText string\n\t\t\tif event.Data.TotalCommitsCount > 0 {\n\t\t\t\tcommitMessage := event.Data.Commits[0].Message\n\t\t\t\tcommitMessage = strings.Replace(commitMessage, \"\\n\\n\", \"\\t\", -1)\n\t\t\t\ttitleText = color.Sprintf(\"@g%s\", commitMessage)\n\t\t\t} else if event.Data.Before == \"0000000000000000000000000000000000000000\" {\n\t\t\t\ttitleText = color.Sprintf(\"@g%s %s\", emoji.Sprint(\"@{:fire:}\"), \"create New branch\")\n\t\t\t}\n\t\t\temoji.Println(\"@{\"+iconName+\"}\", actionText, repositoriesText, userText, titleText)\n\n\/\/\t\t\tfmt.Println(\" \\t user -> \", event.Data.UserName, event.Data.UserId)\n\/\/\t\t\tfmt.Println(\" \\t author -> \", event.Data.AuthorId)\n\/\/\n\/\/\t\t\tfmt.Println(\" \\t\\t name -> \", event.Data.Repository.Name)\n\/\/\t\t\tfmt.Println(\" \\t\\t description -> \", event.Data.Repository.Description)\n\/\/\t\t\tfmt.Println(\" \\t\\t gitUrl -> \", event.Data.Repository.GitUrl)\n\/\/\t\t\tfmt.Println(\" \\t\\t pageUrl -> \", event.Data.Repository.PageUrl)\n\/\/\n\/\/\t\t\tfmt.Println(\" \\t\\t totalCount -> \", event.Data.TotalCommitsCount)\n\/\/\n\/\/\t\t\tif event.Data.TotalCommitsCount > 0 {\n\/\/\t\t\t\tfmt.Println(\" \\t\\t message -> \", event.Data.Commits[0].Message)\n\/\/\t\t\t\tfmt.Println(\" \\t\\t time -> \", event.Data.Commits[0].Timestamp)\n\/\/\t\t\t}\n\t\t}\n\t}\n\/\/\n\/\/\tfor _, event := range events {\n\/\/\n\/\/\t}\n}\n\nfunc tickGitlabActivity(gitlab *gogitlab.Gitlab, tickSecond int) {\n\n\tch := time.Tick(time.Second * time.Duration(10))\n\n\tlastedFeed, err := gitlab.Activity()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t\treturn\n\t}\n\n\tcurrentUser, err := gitlab.CurrentUser()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t\treturn\n\t}\n\n\tfor i := len(lastedFeed.Entries) - 1; i >= 0; i-- {\n\t\tfeedCommentText := createFeedCommentText(¤tUser, lastedFeed.Entries[i])\n\t\tfmt.Println(feedCommentText)\n\t}\n\n\tfor now := range ch {\n\t\temoji.Printf(\"@{:beer:}[%v] tick \\n\", now)\n\n\t\tfeed, err := gitlab.Activity()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tfor i := len(feed.Entries) - 1; i >= 0; i-- {\n\t\t\tfeedComment := feed.Entries[i]\n\n\t\t\tif feedComment.Updated.After(lastedFeed.Entries[0].Updated) {\n\t\t\t\tfeedCommentText := createFeedShortCommentText(feedComment)\n\t\t\t\tfmt.Println(feedCommentText)\n\t\t\t\tNotifier(feedCommentText, feedComment.Id, feedComment.Summary, \"\")\n\t\t\t}\n\t\t}\n\t\tlastedFeed = feed\n\t}\n}\n\n\/\/ 自分の関わってるプロジェクトを取得して一覧表示する\nfunc printGitlabProjects(gitlab *gogitlab.Gitlab) {\n\n\tprojects, err := gitlab.Projects()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t\treturn\n\t}\n\n\tfor _, project := range projects {\n\t\tfmt.Printf(\"[%4d] [%20s] (%s)\\n\", project.Id, project.Name, project.HttpRepoUrl)\n\t}\n}\n\n\/\/ 自分のIssueを取得して一覧表示する\nfunc printGitlabIssues(gitlab *gogitlab.Gitlab) {\n\n\tissues, err := gitlab.Issues()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t\treturn\n\t}\n\n\tfor _, issue := range issues {\n\t\tfmt.Printf(\"[%4d] [%s]\\n\", issue.Id, issue.Title)\n\t}\n}\n\nfunc printGitlabProjectIssues(gitlab *gogitlab.Gitlab, projectId int) {\n\n\tpage := 1\n\tfor {\n\t\tissues, err := gitlab.ProjectIssues(projectId, page)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err.Error())\n\t\t\treturn\n\t\t}\n\t\tif len(issues) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tfor _, issue := range issues {\n\t\t\tif issue.State == \"closed\" {\n\t\t\t\tcolor.Printf(\"@r[%4d(%d)] [%s] %s state=%s\\n\", issue.Id, issue.LocalId, issue.Title, issue.Description, issue.State)\n\t\t\t} else {\n\t\t\t\tcolor.Printf(\"@g[%4d(%d)] [%s] %s state=%s\\n\", issue.Id, issue.LocalId, issue.Title, issue.Description, issue.State)\n\t\t\t}\n\t\t}\n\n\t\tpage++\n\t}\n}\n\n\/\/ Activity(dashboard.atpm)の表示。XMLパースなので情報欠落が辛い感じ。廃止予定\nfunc printActivity(gitlab *gogitlab.Gitlab) {\n\n\tfeed, err := gitlab.Activity()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t\treturn\n\t}\n\tcurrentUser, err := gitlab.CurrentUser()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t\treturn\n\t}\n\n\tfor _, feedCommit := range feed.Entries {\n\t\tfeedCommentText := createFeedCommentText(¤tUser, feedCommit)\n\t\tfmt.Println(feedCommentText)\n\t}\n}\n\n\/\/ 絵文字付きターミナル用のコメント作成(Activiyベース)\nfunc createFeedCommentText(currentUser *gogitlab.User, feedCommit *gogitlab.FeedCommit) string {\n\n\tvar iconName string\n\tif strings.Contains(feedCommit.Title, \"commented\") {\n\t\ticonName = \":speech_balloon:\"\n\t} else if strings.Contains(feedCommit.Title, \"pushed\") {\n\t\ticonName = \":punch:\"\n\t} else if strings.Contains(feedCommit.Title, \"closed\") {\n\t\ticonName = \":white_check_mark:\"\n\t} else if strings.Contains(feedCommit.Title, \"opened\") {\n\t\ticonName = \":fire:\"\n\t} else if strings.Contains(feedCommit.Title, \"accepted\") {\n\t\ticonName = \":atm:\"\n\t} else {\n\t\ticonName = \":beer:\"\n\t}\n\n\tif currentUser.Name == feedCommit.Author.Name {\n\t\treturn emoji.Sprint(\"@{\"+iconName+\"}\", color.Sprintf(\"@y[%s] %s\", feedCommit.Updated.Format(time.ANSIC), feedCommit.Title))\n\t} else {\n\t\treturn emoji.Sprint(\"@{\"+iconName+\"}\", fmt.Sprintf(\"[%s] %s\", feedCommit.Updated.Format(time.ANSIC), feedCommit.Title))\n\t}\n}\n\n\/\/ 絵文字付き通知用の簡易コメント作成(Activiyベース)\nfunc createFeedShortCommentText(feedCommit *gogitlab.FeedCommit) string {\n\n\tvar iconName string\n\tif strings.Contains(feedCommit.Title, \"commented\") {\n\t\ticonName = \":speech_balloon:\"\n\t} else if strings.Contains(feedCommit.Title, \"pushed\") {\n\t\ticonName = \":punch:\"\n\t} else if strings.Contains(feedCommit.Title, \"closed\") {\n\t\ticonName = \":white_check_mark:\"\n\t} else if strings.Contains(feedCommit.Title, \"opened\") {\n\t\ticonName = \":fire:\"\n\t} else if strings.Contains(feedCommit.Title, \"accepted\") {\n\t\ticonName = \":atm:\"\n\t} else {\n\t\ticonName = \":beer:\"\n\t}\n\n\treturn emoji.Sprint(\"@{\"+iconName+\"}\", fmt.Sprintf(\"%s\", feedCommit.Title))\n}\n\n\/\/ Gitlabクライアントを作成する\nfunc createGitlab() *gogitlab.Gitlab {\n\tconfig := readGitlabAccessTokenJson()\n\n\t\/\/ --gitlab.skip-cert-checkを読み込む\n\tflag.Parse()\n\t\n\treturn gogitlab.NewGitlab(config.Host, config.ApiPath, config.Token)\n}\n\n\/\/ アクセストークンを保存してるローカルファイルを読み込んで返却\nfunc readGitlabAccessTokenJson() GitlabAccessConfig {\n\tusr, err := user.Current()\n\tif err != nil {\n\t\tlog.Fatal( err )\n\t\tos.Exit(1)\n\t}\n\n\tfile, err := ioutil.ReadFile(usr.HomeDir + \"\/.ggn\/config.json\")\n\tif err != nil {\n\t\tlog.Fatalf(\"Config file error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tvar config GitlabAccessConfig\n\tjson.Unmarshal(file, &config)\n\tconfig.IconPath = usr.HomeDir + \"\/.ggn\/logo.png\"\n\n\treturn config\n}\n\n\/\/ 通知のやつ $ brew install terminal-notiferが必要\nfunc Notifier(title, message, subTitle, openUrl string) {\n\tconfig := readGitlabAccessTokenJson()\n\tcmd := exec.Command(\"terminal-notifier\", \"-title\", title, \"-message\", message, \"-subtitle\", subTitle, \"-appIcon\", config.IconPath, \"-open\", openUrl)\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage service\n\nimport (\n\t\"github.com\/keybase\/client\/go\/engine\"\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\tkeybase1 \"github.com\/keybase\/client\/go\/protocol\"\n\trpc \"github.com\/keybase\/go-framed-msgpack-rpc\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ DeviceHandler is the RPC handler for the device interface.\ntype DeviceHandler struct {\n\t*BaseHandler\n\tlibkb.Contextified\n}\n\n\/\/ NewDeviceHandler creates a DeviceHandler for the xp transport.\nfunc NewDeviceHandler(xp rpc.Transporter, g *libkb.GlobalContext) *DeviceHandler {\n\treturn &DeviceHandler{\n\t\tBaseHandler: NewBaseHandler(xp),\n\t\tContextified: libkb.NewContextified(g),\n\t}\n}\n\n\/\/ DeviceList returns a list of all the devices for a user.\nfunc (h *DeviceHandler) DeviceList(_ context.Context, sessionID int) ([]keybase1.Device, error) {\n\tctx := &engine.Context{\n\t\tLogUI: h.getLogUI(sessionID),\n\t\tSessionID: sessionID,\n\t}\n\teng := engine.NewDevList(h.G())\n\tif err := engine.RunEngine(eng, ctx); err != nil {\n\t\treturn nil, err\n\t}\n\treturn eng.List(), nil\n}\n\n\/\/ DeviceAdd starts the kex2 device provisioning on the\n\/\/ provisioner (device X\/C1)\nfunc (h *DeviceHandler) DeviceAdd(_ context.Context, sessionID int) error {\n\tctx := &engine.Context{\n\t\tProvisionUI: h.getProvisionUI(sessionID),\n\t\tSecretUI: h.getSecretUI(sessionID, h.G()),\n\t\tSessionID: sessionID,\n\t}\n\teng := engine.NewDeviceAdd(h.G())\n\treturn engine.RunEngine(eng, ctx)\n}\n\n\/\/ CheckDeviceNameFormat verifies that the device name has a valid\n\/\/ format.\nfunc (h *DeviceHandler) CheckDeviceNameFormat(_ context.Context, arg keybase1.CheckDeviceNameFormatArg) (bool, error) {\n\treturn libkb.CheckDeviceName.F(arg.Name), nil\n}\n<commit_msg>Return checker hint<commit_after>\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage service\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/keybase\/client\/go\/engine\"\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\tkeybase1 \"github.com\/keybase\/client\/go\/protocol\"\n\trpc \"github.com\/keybase\/go-framed-msgpack-rpc\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ DeviceHandler is the RPC handler for the device interface.\ntype DeviceHandler struct {\n\t*BaseHandler\n\tlibkb.Contextified\n}\n\n\/\/ NewDeviceHandler creates a DeviceHandler for the xp transport.\nfunc NewDeviceHandler(xp rpc.Transporter, g *libkb.GlobalContext) *DeviceHandler {\n\treturn &DeviceHandler{\n\t\tBaseHandler: NewBaseHandler(xp),\n\t\tContextified: libkb.NewContextified(g),\n\t}\n}\n\n\/\/ DeviceList returns a list of all the devices for a user.\nfunc (h *DeviceHandler) DeviceList(_ context.Context, sessionID int) ([]keybase1.Device, error) {\n\tctx := &engine.Context{\n\t\tLogUI: h.getLogUI(sessionID),\n\t\tSessionID: sessionID,\n\t}\n\teng := engine.NewDevList(h.G())\n\tif err := engine.RunEngine(eng, ctx); err != nil {\n\t\treturn nil, err\n\t}\n\treturn eng.List(), nil\n}\n\n\/\/ DeviceAdd starts the kex2 device provisioning on the\n\/\/ provisioner (device X\/C1)\nfunc (h *DeviceHandler) DeviceAdd(_ context.Context, sessionID int) error {\n\tctx := &engine.Context{\n\t\tProvisionUI: h.getProvisionUI(sessionID),\n\t\tSecretUI: h.getSecretUI(sessionID, h.G()),\n\t\tSessionID: sessionID,\n\t}\n\teng := engine.NewDeviceAdd(h.G())\n\treturn engine.RunEngine(eng, ctx)\n}\n\n\/\/ CheckDeviceNameFormat verifies that the device name has a valid\n\/\/ format.\nfunc (h *DeviceHandler) CheckDeviceNameFormat(_ context.Context, arg keybase1.CheckDeviceNameFormatArg) (bool, error) {\n\tok := libkb.CheckDeviceName.F(arg.Name)\n\tif ok {\n\t\treturn ok, nil\n\t}\n\treturn false, errors.New(libkb.CheckDeviceName.Hint)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"oval\"\n)\n\nconst (\n\t_ = iota\n\tOPMODE_LIST\n\tOPMODE_RUN\n)\n\nvar cfg config\n\nfunc runMode() {\n\tod, ret := oval.Parse(cfg.flagRun)\n\tif ret != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", ret)\n\t\tos.Exit(1)\n\t}\n\n\tresults := oval.Execute(od)\n\tfor _, v := range results {\n\t\tfmt.Fprintf(os.Stdout, \"%v %v %v\\n\", v.ID, v.StatusString(), v.Title)\n\t}\n}\n\nfunc listMode() {\n\tod, ret := oval.Parse(cfg.flagList)\n\tif ret != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", ret)\n\t\tos.Exit(1)\n\t}\n\n\tfor _, v := range od.Definitions.Definitions {\n\t\tfmt.Printf(\"%v %v\\n\", v.ID, v.Metadata.Title)\n\t}\n}\n\nfunc main() {\n\tvar opmode int = 0\n\n\tcfg = defaultConfig()\n\tflag.BoolVar(&cfg.flagDebug, \"d\", false, \"enable debugging\")\n\tflag.StringVar(&cfg.flagList, \"l\", \"path\", \"list checks\")\n\tflag.StringVar(&cfg.flagRun, \"r\", \"path\", \"run checks\")\n\tflag.IntVar(&cfg.maxChecks, \"n\", 10, \"concurrent checks\")\n\tif len(os.Args) < 2 {\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\tflag.Parse()\n\n\tvar validmode bool = false\n\tif cfg.flagList != \"path\" {\n\t\topmode = OPMODE_LIST\n\t\tvalidmode = true\n\t} else if cfg.flagRun != \"path\" {\n\t\topmode = OPMODE_RUN\n\t\tvalidmode = true\n\t}\n\tif !validmode {\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\n\toval.Init()\n\n\tif cfg.flagDebug {\n\t\tsetDebug(true)\n\t\t\/\/ If we enable debugging on the command line we also enable\n\t\t\/\/ it in the OVAL library.\n\t\toval.SetDebug(true)\n\t\tdebugPrint(\"debugging enabled\\n\")\n\t}\n\toval.SetMaxChecks(cfg.maxChecks)\n\n\tswitch opmode {\n\tcase OPMODE_LIST:\n\t\tdebugPrint(\"entering list mode\\n\")\n\t\tlistMode()\n\tcase OPMODE_RUN:\n\t\tdebugPrint(\"entering run mode\\n\")\n\t\trunMode()\n\tdefault:\n\t\tflag.Usage()\n\t}\n}\n<commit_msg>results notice field on errors and true evaluations<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"oval\"\n)\n\nconst (\n\t_ = iota\n\tOPMODE_LIST\n\tOPMODE_RUN\n)\n\nvar cfg config\n\nfunc runMode() {\n\tod, ret := oval.Parse(cfg.flagRun)\n\tif ret != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", ret)\n\t\tos.Exit(1)\n\t}\n\n\tresults := oval.Execute(od)\n\tfor _, v := range results {\n\t\tnotice := \"--\"\n\t\tif v.StatusString() != \"false\" {\n\t\t\tnotice = \"!!\"\n\t\t}\n\t\tfmt.Fprintf(os.Stdout, \"%v %v %v %v\\n\", notice, v.ID, v.StatusString(), v.Title)\n\t}\n}\n\nfunc listMode() {\n\tod, ret := oval.Parse(cfg.flagList)\n\tif ret != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", ret)\n\t\tos.Exit(1)\n\t}\n\n\tfor _, v := range od.Definitions.Definitions {\n\t\tfmt.Printf(\"%v %v\\n\", v.ID, v.Metadata.Title)\n\t}\n}\n\nfunc main() {\n\tvar opmode int = 0\n\n\tcfg = defaultConfig()\n\tflag.BoolVar(&cfg.flagDebug, \"d\", false, \"enable debugging\")\n\tflag.StringVar(&cfg.flagList, \"l\", \"path\", \"list checks\")\n\tflag.StringVar(&cfg.flagRun, \"r\", \"path\", \"run checks\")\n\tflag.IntVar(&cfg.maxChecks, \"n\", 10, \"concurrent checks\")\n\tif len(os.Args) < 2 {\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\tflag.Parse()\n\n\tvar validmode bool = false\n\tif cfg.flagList != \"path\" {\n\t\topmode = OPMODE_LIST\n\t\tvalidmode = true\n\t} else if cfg.flagRun != \"path\" {\n\t\topmode = OPMODE_RUN\n\t\tvalidmode = true\n\t}\n\tif !validmode {\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\n\toval.Init()\n\n\tif cfg.flagDebug {\n\t\tsetDebug(true)\n\t\t\/\/ If we enable debugging on the command line we also enable\n\t\t\/\/ it in the OVAL library.\n\t\toval.SetDebug(true)\n\t\tdebugPrint(\"debugging enabled\\n\")\n\t}\n\toval.SetMaxChecks(cfg.maxChecks)\n\n\tswitch opmode {\n\tcase OPMODE_LIST:\n\t\tdebugPrint(\"entering list mode\\n\")\n\t\tlistMode()\n\tcase OPMODE_RUN:\n\t\tdebugPrint(\"entering run mode\\n\")\n\t\trunMode()\n\tdefault:\n\t\tflag.Usage()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package servenv\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/youtube\/vitess\/go\/netutil\"\n\t\"github.com\/youtube\/vitess\/go\/proc\"\n)\n\nvar (\n\tonCloseHooks hooks\n\n\t\/\/ filled in when calling Run or RunSecure\n\tListeningURL url.URL\n)\n\n\/\/ Run starts listening for RPC and HTTP requests on the given port,\n\/\/ and blocks until it the process gets a signal.\nfunc Run(port int) {\n\tonRunHooks.Fire()\n\tRunSecure(port, 0, \"\", \"\", \"\")\n}\n\n\/\/ RunSecure is like Run, but it additionally listens for RPC and HTTP\n\/\/ requests using TLS on securePort, using the passed certificate,\n\/\/ key, and CA certificate.\nfunc RunSecure(port int, securePort int, cert, key, caCert string) {\n\tonRunHooks.Fire()\n\tServeRPC()\n\n\tl, err := proc.Listen(fmt.Sprintf(\"%v\", port))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\thost, err := netutil.FullyQualifiedHostname()\n\tif err != nil {\n\t\thost, err = os.Hostname()\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"os.Hostname() failed: %v\", err)\n\t\t}\n\t}\n\tListeningURL = url.URL{\n\t\tScheme: \"http\",\n\t\tHost: fmt.Sprintf(\"%v:%v\", host, port),\n\t\tPath: \"\/\",\n\t}\n\n\tgo http.Serve(l, nil)\n\n\tif securePort != 0 {\n\t\tlog.Infof(\"listening on secure port %v\", securePort)\n\t\tSecureServe(fmt.Sprintf(\":%d\", securePort), cert, key, caCert)\n\t}\n\tproc.Wait()\n\tl.Close()\n\tClose()\n}\n\n\/\/ Close runs any registered exit hooks in parallel.\nfunc Close() {\n\tonCloseHooks.Fire()\n\tListeningURL = url.URL{}\n}\n\n\/\/ OnClose registers f to be run at the end of the app lifecycle. All\n\/\/ hooks are run in parallel.\nfunc OnClose(f func()) {\n\tonCloseHooks.Add(f)\n}\n<commit_msg>Fixing fatal -> fatalf<commit_after>package servenv\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/youtube\/vitess\/go\/netutil\"\n\t\"github.com\/youtube\/vitess\/go\/proc\"\n)\n\nvar (\n\tonCloseHooks hooks\n\n\t\/\/ filled in when calling Run or RunSecure\n\tListeningURL url.URL\n)\n\n\/\/ Run starts listening for RPC and HTTP requests on the given port,\n\/\/ and blocks until it the process gets a signal.\nfunc Run(port int) {\n\tonRunHooks.Fire()\n\tRunSecure(port, 0, \"\", \"\", \"\")\n}\n\n\/\/ RunSecure is like Run, but it additionally listens for RPC and HTTP\n\/\/ requests using TLS on securePort, using the passed certificate,\n\/\/ key, and CA certificate.\nfunc RunSecure(port int, securePort int, cert, key, caCert string) {\n\tonRunHooks.Fire()\n\tServeRPC()\n\n\tl, err := proc.Listen(fmt.Sprintf(\"%v\", port))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\thost, err := netutil.FullyQualifiedHostname()\n\tif err != nil {\n\t\thost, err = os.Hostname()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"os.Hostname() failed: %v\", err)\n\t\t}\n\t}\n\tListeningURL = url.URL{\n\t\tScheme: \"http\",\n\t\tHost: fmt.Sprintf(\"%v:%v\", host, port),\n\t\tPath: \"\/\",\n\t}\n\n\tgo http.Serve(l, nil)\n\n\tif securePort != 0 {\n\t\tlog.Infof(\"listening on secure port %v\", securePort)\n\t\tSecureServe(fmt.Sprintf(\":%d\", securePort), cert, key, caCert)\n\t}\n\tproc.Wait()\n\tl.Close()\n\tClose()\n}\n\n\/\/ Close runs any registered exit hooks in parallel.\nfunc Close() {\n\tonCloseHooks.Fire()\n\tListeningURL = url.URL{}\n}\n\n\/\/ OnClose registers f to be run at the end of the app lifecycle. All\n\/\/ hooks are run in parallel.\nfunc OnClose(f func()) {\n\tonCloseHooks.Add(f)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/gopherjs\/gopherjs\/js\"\n\t\"honnef.co\/go\/js\/dom\"\n)\n\ntype Canvas struct {\n\tctx *dom.CanvasRenderingContext2D\n\tAnimate func(t time.Duration)\n}\n\nfunc MakeCanvas(id string) *Canvas {\n\tc := Canvas{}\n\n\tdoc := dom.GetWindow().Document()\n\tcanvas := doc.GetElementByID(id).(*dom.HTMLCanvasElement)\n\tc.ctx = canvas.GetContext2d()\n\treturn &c\n}\n\nfunc main() {\n\n\tjs.Global.Call(\"addEventListener\", \"load\", func() {\n\t\tdoc := dom.GetWindow().Document()\n\t\timg := doc.GetElementByID(\"img_elephant\").(*dom.HTMLImageElement)\n\t\tcanvas := MakeCanvas(\"canvas\")\n\t\tprintln(img)\n\t\tvar x = 1\n\t\tvar aniFrm func(t time.Duration)\n\t\taniFrm = func(t time.Duration) {\n\t\t\tcanvas.ctx.FillStyle = \"black\"\n\t\t\tcanvas.ctx.FillRect(0, 0, 640, 480)\n\t\t\tcanvas.ctx.FillStyle = \"red\"\n\t\t\ts := fmt.Sprint(t)\n\t\t\tcanvas.ctx.FillText(s, x, 100, 300)\n\n\t\t\tcanvas.ctx.Call(\"drawImage\", img.Object, x, 100, 100, 100)\n\n\t\t\tx++\n\t\t\tif x > 640 {\n\t\t\t\tx = 0\n\t\t\t}\n\t\t\tdom.GetWindow().RequestAnimationFrame(aniFrm)\n\t\t}\n\t\tdom.GetWindow().RequestAnimationFrame(aniFrm)\n\t})\n}\n<commit_msg>adding helper funtions<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/gopherjs\/gopherjs\/js\"\n\t\"honnef.co\/go\/js\/dom\"\n)\n\ntype Canvas struct {\n\tctx *dom.CanvasRenderingContext2D\n\tanimateFunc func(t time.Duration)\n\tanimateEnable bool\n}\n\nfunc MakeCanvas(id string) *Canvas {\n\tc := Canvas{}\n\n\tdoc := dom.GetWindow().Document()\n\tcanvas := doc.GetElementByID(id).(*dom.HTMLCanvasElement)\n\tc.ctx = canvas.GetContext2d()\n\treturn &c\n}\n\nfunc (c *Canvas) SetAnimateFunc(f func(t time.Duration)) {\n\tc.animateFunc = f\n}\n\nfunc (c *Canvas) doAnimate(t time.Duration) {\n\tif c.animateFunc != nil && c.animateEnable {\n\t\tc.animateFunc(t)\n\t\tdom.GetWindow().RequestAnimationFrame(c.doAnimate)\n\t}\n}\n\nfunc (c *Canvas) Animate(animate bool) {\n\tc.animateEnable = animate\n\tdom.GetWindow().RequestAnimationFrame(c.doAnimate)\n}\n\nfunc main() {\n\n\tjs.Global.Call(\"addEventListener\", \"load\", func() {\n\t\tdoc := dom.GetWindow().Document()\n\t\timg := doc.GetElementByID(\"img_elephant\").(*dom.HTMLImageElement)\n\t\tcanvas := MakeCanvas(\"canvas\")\n\t\tprintln(img)\n\t\tvar x = 1\n\t\tcanvas.SetAnimateFunc(func(t time.Duration) {\n\t\t\tcanvas.ctx.FillStyle = \"black\"\n\t\t\tcanvas.ctx.FillRect(0, 0, 640, 480)\n\t\t\tcanvas.ctx.FillStyle = \"red\"\n\t\t\ts := fmt.Sprint(t)\n\t\t\tcanvas.ctx.FillText(s, x, 100, 300)\n\n\t\t\tcanvas.ctx.Call(\"drawImage\", img.Object, x, 100, 100, 100)\n\n\t\t\tx++\n\t\t\tif x > 640 {\n\t\t\t\tx = 0\n\t\t\t}\n\t\t})\n\n\t\tcanvas.Animate(true)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage flags\n\nimport (\n\t\"crypto\/sha1\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/vmware\/govmomi\/session\"\n\t\"github.com\/vmware\/govmomi\/vim25\"\n\t\"github.com\/vmware\/govmomi\/vim25\/soap\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tenvURL = \"GOVC_URL\"\n\tenvInsecure = \"GOVC_INSECURE\"\n\tenvMinAPIVersion = \"GOVC_MIN_API_VERSION\"\n)\n\nconst cDescr = \"ESX or vCenter URL\"\n\ntype ClientFlag struct {\n\t*DebugFlag\n\n\tregister sync.Once\n\n\turl *url.URL\n\tinsecure bool\n\tminAPIVersion string\n\n\tclient *vim25.Client\n}\n\nfunc (flag *ClientFlag) URLWithoutPassword() *url.URL {\n\tif flag.url == nil {\n\t\treturn nil\n\t}\n\n\twithoutCredentials := *flag.url\n\twithoutCredentials.User = url.User(flag.url.User.Username())\n\treturn &withoutCredentials\n}\n\nfunc (flag *ClientFlag) String() string {\n\turl := flag.URLWithoutPassword()\n\tif url == nil {\n\t\treturn \"\"\n\t}\n\n\treturn url.String()\n}\n\nvar schemeMatch = regexp.MustCompile(`^\\w+:\/\/`)\n\nfunc (flag *ClientFlag) Set(s string) error {\n\tvar err error\n\n\tif s != \"\" {\n\t\t\/\/ Default the scheme to https\n\t\tif !schemeMatch.MatchString(s) {\n\t\t\ts = \"https:\/\/\" + s\n\t\t}\n\n\t\tflag.url, err = url.Parse(s)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Default the path to \/sdk\n\t\tif flag.url.Path == \"\" {\n\t\t\tflag.url.Path = \"\/sdk\"\n\t\t}\n\n\t\tif flag.url.User == nil {\n\t\t\tflag.url.User = url.UserPassword(\"\", \"\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (flag *ClientFlag) Register(f *flag.FlagSet) {\n\tflag.register.Do(func() {\n\t\t{\n\t\t\tflag.Set(os.Getenv(envURL))\n\t\t\tusage := fmt.Sprintf(\"%s [%s]\", cDescr, envURL)\n\t\t\tf.Var(flag, \"u\", usage)\n\t\t}\n\n\t\t{\n\t\t\tinsecure := false\n\t\t\tswitch env := strings.ToLower(os.Getenv(envInsecure)); env {\n\t\t\tcase \"1\", \"true\":\n\t\t\t\tinsecure = true\n\t\t\t}\n\n\t\t\tusage := fmt.Sprintf(\"Skip verification of server certificate [%s]\", envInsecure)\n\t\t\tf.BoolVar(&flag.insecure, \"k\", insecure, usage)\n\t\t}\n\n\t\t{\n\t\t\tenv := os.Getenv(envMinAPIVersion)\n\t\t\tif env == \"\" {\n\t\t\t\tenv = \"5.5\"\n\t\t\t}\n\n\t\t\tflag.minAPIVersion = env\n\t\t}\n\t})\n}\n\nfunc (flag *ClientFlag) Process() error {\n\tif flag.url == nil {\n\t\treturn errors.New(\"specify an \" + cDescr)\n\t}\n\n\treturn nil\n}\n\nfunc (flag *ClientFlag) sessionFile() string {\n\turl := flag.URLWithoutPassword()\n\n\t\/\/ Key session file off of full URI and insecure setting.\n\t\/\/ Hash key to get a predictable, canonical format.\n\tkey := fmt.Sprintf(\"%s#insecure=%t\", url.String(), flag.insecure)\n\tname := fmt.Sprintf(\"%040x\", sha1.Sum([]byte(key)))\n\treturn filepath.Join(os.Getenv(\"HOME\"), \".govmomi\", \"sessions\", name)\n}\n\nfunc (flag *ClientFlag) saveClient(c *vim25.Client) error {\n\tp := flag.sessionFile()\n\terr := os.MkdirAll(filepath.Dir(p), 0700)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf, err := os.OpenFile(p, os.O_CREATE|os.O_WRONLY, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer f.Close()\n\n\tenc := json.NewEncoder(f)\n\terr = enc.Encode(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (flag *ClientFlag) restoreClient(c *vim25.Client) (bool, error) {\n\tf, err := os.Open(flag.sessionFile())\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn false, err\n\t}\n\n\tdefer f.Close()\n\n\tdec := json.NewDecoder(f)\n\terr = dec.Decode(c)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\nfunc (flag *ClientFlag) loadClient() (*vim25.Client, error) {\n\tc := new(vim25.Client)\n\tok, err := flag.restoreClient(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !ok || !c.Valid() {\n\t\treturn nil, nil\n\t}\n\n\tm := session.NewManager(c)\n\tu, err := m.UserSession(context.TODO())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If the session is nil, the client is not authenticated\n\tif u == nil {\n\t\treturn nil, nil\n\t}\n\n\treturn c, nil\n}\n\nfunc (flag *ClientFlag) newClient() (*vim25.Client, error) {\n\tsc := soap.NewClient(flag.url, flag.insecure)\n\tc, err := vim25.NewClient(context.TODO(), sc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm := session.NewManager(c)\n\terr = m.Login(context.TODO(), flag.url.User)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = flag.saveClient(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}\n\n\/\/ apiVersionValid returns whether or not the API version supported by the\n\/\/ server the client is connected to is not recent enough.\nfunc apiVersionValid(c *vim25.Client, minVersionString string) error {\n\trealVersion, err := ParseVersion(c.ServiceContent.About.ApiVersion)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tminVersion, err := ParseVersion(minVersionString)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !minVersion.Lte(realVersion) {\n\t\terr = fmt.Errorf(\"Require API version %s, connected to API version %s (set %s to override)\",\n\t\t\tminVersionString,\n\t\t\tc.ServiceContent.About.ApiVersion,\n\t\t\tenvMinAPIVersion)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (flag *ClientFlag) Client() (*vim25.Client, error) {\n\tif flag.client != nil {\n\t\treturn flag.client, nil\n\t}\n\n\tc, err := flag.loadClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ loadClient returns nil if it was unable to load a session from disk\n\tif c == nil {\n\t\tc, err = flag.newClient()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Check that the endpoint has the right API version\n\terr = apiVersionValid(c, flag.minAPIVersion)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tflag.client = c\n\treturn flag.client, nil\n}\n<commit_msg>Add flag to toggle persisting session to disk<commit_after>\/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage flags\n\nimport (\n\t\"crypto\/sha1\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/vmware\/govmomi\/session\"\n\t\"github.com\/vmware\/govmomi\/vim25\"\n\t\"github.com\/vmware\/govmomi\/vim25\/soap\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tenvURL = \"GOVC_URL\"\n\tenvInsecure = \"GOVC_INSECURE\"\n\tenvPersist = \"GOVC_PERSIST_SESSION\"\n\tenvMinAPIVersion = \"GOVC_MIN_API_VERSION\"\n)\n\nconst cDescr = \"ESX or vCenter URL\"\n\ntype ClientFlag struct {\n\t*DebugFlag\n\n\tregister sync.Once\n\n\turl *url.URL\n\tinsecure bool\n\tpersist bool\n\tminAPIVersion string\n\n\tclient *vim25.Client\n}\n\nfunc (flag *ClientFlag) URLWithoutPassword() *url.URL {\n\tif flag.url == nil {\n\t\treturn nil\n\t}\n\n\twithoutCredentials := *flag.url\n\twithoutCredentials.User = url.User(flag.url.User.Username())\n\treturn &withoutCredentials\n}\n\nfunc (flag *ClientFlag) String() string {\n\turl := flag.URLWithoutPassword()\n\tif url == nil {\n\t\treturn \"\"\n\t}\n\n\treturn url.String()\n}\n\nvar schemeMatch = regexp.MustCompile(`^\\w+:\/\/`)\n\nfunc (flag *ClientFlag) Set(s string) error {\n\tvar err error\n\n\tif s != \"\" {\n\t\t\/\/ Default the scheme to https\n\t\tif !schemeMatch.MatchString(s) {\n\t\t\ts = \"https:\/\/\" + s\n\t\t}\n\n\t\tflag.url, err = url.Parse(s)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Default the path to \/sdk\n\t\tif flag.url.Path == \"\" {\n\t\t\tflag.url.Path = \"\/sdk\"\n\t\t}\n\n\t\tif flag.url.User == nil {\n\t\t\tflag.url.User = url.UserPassword(\"\", \"\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (flag *ClientFlag) Register(f *flag.FlagSet) {\n\tflag.register.Do(func() {\n\t\t{\n\t\t\tflag.Set(os.Getenv(envURL))\n\t\t\tusage := fmt.Sprintf(\"%s [%s]\", cDescr, envURL)\n\t\t\tf.Var(flag, \"u\", usage)\n\t\t}\n\n\t\t{\n\t\t\tinsecure := false\n\t\t\tswitch env := strings.ToLower(os.Getenv(envInsecure)); env {\n\t\t\tcase \"1\", \"true\":\n\t\t\t\tinsecure = true\n\t\t\t}\n\n\t\t\tusage := fmt.Sprintf(\"Skip verification of server certificate [%s]\", envInsecure)\n\t\t\tf.BoolVar(&flag.insecure, \"k\", insecure, usage)\n\t\t}\n\n\t\t{\n\t\t\tpersist := true\n\t\t\tswitch env := strings.ToLower(os.Getenv(envPersist)); env {\n\t\t\tcase \"0\", \"false\":\n\t\t\t\tpersist = false\n\t\t\t}\n\n\t\t\tusage := fmt.Sprintf(\"Persist session to disk [%s]\", envPersist)\n\t\t\tf.BoolVar(&flag.persist, \"persist\", persist, usage)\n\t\t}\n\n\t\t{\n\t\t\tenv := os.Getenv(envMinAPIVersion)\n\t\t\tif env == \"\" {\n\t\t\t\tenv = \"5.5\"\n\t\t\t}\n\n\t\t\tflag.minAPIVersion = env\n\t\t}\n\t})\n}\n\nfunc (flag *ClientFlag) Process() error {\n\tif flag.url == nil {\n\t\treturn errors.New(\"specify an \" + cDescr)\n\t}\n\n\treturn nil\n}\n\nfunc (flag *ClientFlag) sessionFile() string {\n\turl := flag.URLWithoutPassword()\n\n\t\/\/ Key session file off of full URI and insecure setting.\n\t\/\/ Hash key to get a predictable, canonical format.\n\tkey := fmt.Sprintf(\"%s#insecure=%t\", url.String(), flag.insecure)\n\tname := fmt.Sprintf(\"%040x\", sha1.Sum([]byte(key)))\n\treturn filepath.Join(os.Getenv(\"HOME\"), \".govmomi\", \"sessions\", name)\n}\n\nfunc (flag *ClientFlag) saveClient(c *vim25.Client) error {\n\tif !flag.persist {\n\t\treturn nil\n\t}\n\n\tp := flag.sessionFile()\n\terr := os.MkdirAll(filepath.Dir(p), 0700)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf, err := os.OpenFile(p, os.O_CREATE|os.O_WRONLY, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer f.Close()\n\n\tenc := json.NewEncoder(f)\n\terr = enc.Encode(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (flag *ClientFlag) restoreClient(c *vim25.Client) (bool, error) {\n\tif !flag.persist {\n\t\treturn false, nil\n\t}\n\n\tf, err := os.Open(flag.sessionFile())\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn false, err\n\t}\n\n\tdefer f.Close()\n\n\tdec := json.NewDecoder(f)\n\terr = dec.Decode(c)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\nfunc (flag *ClientFlag) loadClient() (*vim25.Client, error) {\n\tc := new(vim25.Client)\n\tok, err := flag.restoreClient(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !ok || !c.Valid() {\n\t\treturn nil, nil\n\t}\n\n\tm := session.NewManager(c)\n\tu, err := m.UserSession(context.TODO())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If the session is nil, the client is not authenticated\n\tif u == nil {\n\t\treturn nil, nil\n\t}\n\n\treturn c, nil\n}\n\nfunc (flag *ClientFlag) newClient() (*vim25.Client, error) {\n\tsc := soap.NewClient(flag.url, flag.insecure)\n\tc, err := vim25.NewClient(context.TODO(), sc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm := session.NewManager(c)\n\terr = m.Login(context.TODO(), flag.url.User)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = flag.saveClient(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}\n\n\/\/ apiVersionValid returns whether or not the API version supported by the\n\/\/ server the client is connected to is not recent enough.\nfunc apiVersionValid(c *vim25.Client, minVersionString string) error {\n\trealVersion, err := ParseVersion(c.ServiceContent.About.ApiVersion)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tminVersion, err := ParseVersion(minVersionString)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !minVersion.Lte(realVersion) {\n\t\terr = fmt.Errorf(\"Require API version %s, connected to API version %s (set %s to override)\",\n\t\t\tminVersionString,\n\t\t\tc.ServiceContent.About.ApiVersion,\n\t\t\tenvMinAPIVersion)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (flag *ClientFlag) Client() (*vim25.Client, error) {\n\tif flag.client != nil {\n\t\treturn flag.client, nil\n\t}\n\n\tc, err := flag.loadClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ loadClient returns nil if it was unable to load a session from disk\n\tif c == nil {\n\t\tc, err = flag.newClient()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Check that the endpoint has the right API version\n\terr = apiVersionValid(c, flag.minAPIVersion)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tflag.client = c\n\treturn flag.client, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package xql\r\n\r\nimport (\r\n \"reflect\"\r\n \"strings\"\r\n \/\/\"fmt\"\r\n)\r\n\r\ntype Table struct {\r\n TableName string\r\n Schema string\r\n Entity interface{}\r\n Columns map[string]*Column\r\n PrimaryKey []string\r\n columns_by_jtag map[string]*Column\r\n\r\n}\r\n\r\ntype Column struct {\r\n FieldName string\r\n PropertyName string\r\n JTAG string\r\n Type string\r\n Length uint16\r\n Unique bool\r\n Nullable bool\r\n Indexed bool\r\n Auto bool\r\n PrimaryKey bool\r\n}\r\n\r\nfunc _in_slice(a string, ls []string) bool {\r\n for _, s := range ls {\r\n if a == s {\r\n return true\r\n }\r\n }\r\n return false\r\n}\r\n\r\nfunc _get_skips(tags []string) (skips []string) {\r\n if nil == tags || len(tags) < 1 {\r\n return\r\n }\r\n for _, tag := range tags {\r\n if strings.HasPrefix(tag, \"skips:\") {\r\n s := strings.TrimLeft(tag, \"skips:\")\r\n for _, n := range strings.Split(s, \";\"){\r\n if n != \"\" {\r\n skips = append(skips, n)\r\n }\r\n }\r\n return\r\n }\r\n }\r\n return\r\n}\r\n\r\nfunc _make_columns(entity interface{}, skips ...string) (cols []*Column) {\r\n if nil != entity {\r\n et := reflect.TypeOf(entity)\r\n ev := reflect.ValueOf(entity)\r\n for i:=0; i< et.Elem().NumField(); i++ {\r\n f := et.Elem().Field(i)\r\n if _in_slice(f.Name, skips) {\r\n continue\r\n }\r\n x_tags := strings.Split(f.Tag.Get(\"xql\"),\",\")\r\n if f.Anonymous {\r\n if x_tags[0] != \"-\" {\r\n sks := _get_skips(x_tags)\r\n for _, c := range _make_columns(ev.Elem().Field(i).Addr().Interface(),sks...) {\r\n cols = append(cols, c)\r\n }\r\n }else{\r\n continue\r\n }\r\n continue\r\n }\r\n c := &Column{PropertyName:f.Name}\r\n if len(x_tags) < 1 {\r\n c.FieldName = Camel2Underscore(f.Name)\r\n }else if x_tags[0] == \"-\" {\r\n continue\r\n }\r\n if x_tags[0]==\"\" {\r\n c.FieldName = Camel2Underscore(f.Name)\r\n }else{\r\n c.FieldName = x_tags[0]\r\n }\r\n if len(x_tags) > 1 {\r\n for _, x := range x_tags[1:] {\r\n switch x {\r\n case \"pk\":\r\n c.PrimaryKey = true\r\n \/\/t.PrimaryKey = append(t.PrimaryKey, x)\r\n case \"indexed\":\r\n c.Indexed = true\r\n case \"nullable\":\r\n c.Nullable = true\r\n case \"unique\":\r\n c.Unique = true\r\n case \"auto\":\r\n c.Auto = true\r\n }\r\n }\r\n }\r\n json_tags := strings.Split(f.Tag.Get(\"json\"),\",\")\r\n if len(json_tags) < 1 {\r\n c.JTAG = c.PropertyName\r\n }else if json_tags[0] != \"-\" {\r\n if json_tags[0] == \"\" {\r\n c.JTAG = c.PropertyName\r\n }else{\r\n c.JTAG = json_tags[0]\r\n }\r\n }\r\n cols = append(cols, c)\r\n \/\/t.Columns[c.FieldName] = c\r\n }\r\n }\r\n return cols\r\n}\r\n\r\nfunc DeclareTable(name string, entity interface{}, schema ...string) *Table {\r\n t := &Table{\r\n TableName:name,\r\n Entity: entity,\r\n Columns: make(map[string]*Column),\r\n columns_by_jtag: make(map[string]*Column),\r\n }\r\n if len(schema) > 0 {\r\n t.Schema = schema[0]\r\n }\r\n if nil != entity {\r\n for _, c := range _make_columns(entity) {\r\n t.Columns[c.FieldName] = c\r\n if c.JTAG != \"\" {\r\n t.columns_by_jtag[c.JTAG] = c\r\n }\r\n if c.PrimaryKey {\r\n t.PrimaryKey = append(t.PrimaryKey, c.FieldName)\r\n }\r\n }\r\n }\r\n return t\r\n}\r\n\r\nfunc (t *Table) GetColumn(name string) (*Column, bool) {\r\n if c, ok := t.Columns[name]; ok {\r\n return c, true\r\n }\r\n if c, ok := t.columns_by_jtag[name]; ok {\r\n return c, true\r\n }\r\n return nil, false\r\n}<commit_msg>Add recursived param to void second recursive calling.<commit_after>package xql\r\n\r\nimport (\r\n \"reflect\"\r\n \"strings\"\r\n \/\/\"fmt\"\r\n)\r\n\r\ntype Table struct {\r\n TableName string\r\n Schema string\r\n Entity interface{}\r\n Columns map[string]*Column\r\n PrimaryKey []string\r\n columns_by_jtag map[string]*Column\r\n\r\n}\r\n\r\ntype Column struct {\r\n FieldName string\r\n PropertyName string\r\n JTAG string\r\n Type string\r\n Length uint16\r\n Unique bool\r\n Nullable bool\r\n Indexed bool\r\n Auto bool\r\n PrimaryKey bool\r\n}\r\n\r\nfunc _in_slice(a string, ls []string) bool {\r\n for _, s := range ls {\r\n if a == s {\r\n return true\r\n }\r\n }\r\n return false\r\n}\r\n\r\nfunc _get_skips(tags []string) (skips []string) {\r\n if nil == tags || len(tags) < 1 {\r\n return\r\n }\r\n for _, tag := range tags {\r\n if strings.HasPrefix(tag, \"skips:\") {\r\n s := strings.TrimLeft(tag, \"skips:\")\r\n for _, n := range strings.Split(s, \";\"){\r\n if n != \"\" {\r\n skips = append(skips, n)\r\n }\r\n }\r\n return\r\n }\r\n }\r\n return\r\n}\r\n\r\nfunc _make_columns(entity interface{}, recursive bool, skips ...string) (cols []*Column) {\r\n if nil != entity {\r\n et := reflect.TypeOf(entity)\r\n ev := reflect.ValueOf(entity)\r\n for i:=0; i< et.Elem().NumField(); i++ {\r\n f := et.Elem().Field(i)\r\n if _in_slice(f.Name, skips) {\r\n continue\r\n }\r\n x_tags := strings.Split(f.Tag.Get(\"xql\"),\",\")\r\n if f.Anonymous && ! recursive {\r\n if x_tags[0] != \"-\" {\r\n sks := _get_skips(x_tags)\r\n for _, c := range _make_columns(ev.Elem().Field(i).Addr().Interface(),true, sks...) {\r\n cols = append(cols, c)\r\n }\r\n }else{\r\n continue\r\n }\r\n continue\r\n }\r\n c := &Column{PropertyName:f.Name}\r\n if len(x_tags) < 1 {\r\n c.FieldName = Camel2Underscore(f.Name)\r\n }else if x_tags[0] == \"-\" {\r\n continue\r\n }\r\n if x_tags[0]==\"\" {\r\n c.FieldName = Camel2Underscore(f.Name)\r\n }else{\r\n c.FieldName = x_tags[0]\r\n }\r\n if len(x_tags) > 1 {\r\n for _, x := range x_tags[1:] {\r\n switch x {\r\n case \"pk\":\r\n c.PrimaryKey = true\r\n \/\/t.PrimaryKey = append(t.PrimaryKey, x)\r\n case \"indexed\":\r\n c.Indexed = true\r\n case \"nullable\":\r\n c.Nullable = true\r\n case \"unique\":\r\n c.Unique = true\r\n case \"auto\":\r\n c.Auto = true\r\n }\r\n }\r\n }\r\n json_tags := strings.Split(f.Tag.Get(\"json\"),\",\")\r\n if len(json_tags) < 1 {\r\n c.JTAG = c.PropertyName\r\n }else if json_tags[0] != \"-\" {\r\n if json_tags[0] == \"\" {\r\n c.JTAG = c.PropertyName\r\n }else{\r\n c.JTAG = json_tags[0]\r\n }\r\n }\r\n cols = append(cols, c)\r\n \/\/t.Columns[c.FieldName] = c\r\n }\r\n }\r\n return cols\r\n}\r\n\r\nfunc DeclareTable(name string, entity interface{}, schema ...string) *Table {\r\n t := &Table{\r\n TableName:name,\r\n Entity: entity,\r\n Columns: make(map[string]*Column),\r\n columns_by_jtag: make(map[string]*Column),\r\n }\r\n if len(schema) > 0 {\r\n t.Schema = schema[0]\r\n }\r\n if nil != entity {\r\n for _, c := range _make_columns(entity, false) {\r\n t.Columns[c.FieldName] = c\r\n if c.JTAG != \"\" {\r\n t.columns_by_jtag[c.JTAG] = c\r\n }\r\n if c.PrimaryKey {\r\n t.PrimaryKey = append(t.PrimaryKey, c.FieldName)\r\n }\r\n }\r\n }\r\n return t\r\n}\r\n\r\nfunc (t *Table) GetColumn(name string) (*Column, bool) {\r\n if c, ok := t.Columns[name]; ok {\r\n return c, true\r\n }\r\n if c, ok := t.columns_by_jtag[name]; ok {\r\n return c, true\r\n }\r\n return nil, false\r\n}<|endoftext|>"} {"text":"<commit_before>package zenodb\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/getlantern\/bytemap\"\n\t\"github.com\/getlantern\/errors\"\n\t\"github.com\/getlantern\/goexpr\"\n\t\"github.com\/getlantern\/golog\"\n\t\"github.com\/getlantern\/wal\"\n\t\"github.com\/getlantern\/zenodb\/core\"\n\t\"github.com\/getlantern\/zenodb\/encoding\"\n\t\"github.com\/getlantern\/zenodb\/sql\"\n)\n\n\/\/ TableStats presents statistics for a given table (currently only since the\n\/\/ last time the database process was started).\ntype TableStats struct {\n\tFilteredPoints int64\n\tQueuedPoints int64\n\tInsertedPoints int64\n\tDroppedPoints int64\n\tExpiredValues int64\n}\n\n\/\/ TableOpts configures a table.\ntype TableOpts struct {\n\t\/\/ Name is the name of the table.\n\tName string\n\t\/\/ View indicates if this table is a view on top of an existing table.\n\tView bool\n\t\/\/ MinFlushLatency sets a lower bound on how frequently the memstore is\n\t\/\/ flushed to disk.\n\tMinFlushLatency time.Duration\n\t\/\/ MaxFlushLatency sets an upper bound on how long to wait before flushing the\n\t\/\/ memstore to disk.\n\tMaxFlushLatency time.Duration\n\t\/\/ RetentionPeriod limits how long data is kept in the table (based on the\n\t\/\/ timestamp of the data itself).\n\tRetentionPeriod time.Duration\n\t\/\/ SQL is the SELECT query that determines the fields, filtering and input\n\t\/\/ source for this table.\n\tSQL string\n\tdependencyOf []*TableOpts\n}\n\ntype table struct {\n\t*TableOpts\n\tsql.Query\n\tdb *DB\n\trowStore *rowStore\n\tlog golog.Logger\n\twhereMutex sync.RWMutex\n\tstats TableStats\n\tstatsMutex sync.RWMutex\n\twal *wal.Reader\n\thighWaterMarkDisk int64\n\thighWaterMarkMemory int64\n\thighWaterMarkMx sync.RWMutex\n}\n\n\/\/ CreateTable creates a table based on the given opts.\nfunc (db *DB) CreateTable(opts *TableOpts) error {\n\tq, err := sql.Parse(opts.SQL, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn db.doCreateTable(opts, q)\n}\n\n\/\/ CreateView creates a view based on the given opts.\nfunc (db *DB) CreateView(opts *TableOpts) error {\n\ttable, err := sql.TableFor(opts.SQL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Get existing fields from existing table\n\tt := db.getTable(table)\n\tif t == nil {\n\t\treturn fmt.Errorf(\"Table '%v' not found\", table)\n\t}\n\tq, err := sql.Parse(opts.SQL, db.getFieldsOptional)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Point view at same stream as table\n\t\/\/ TODO: populate view with existing data from table\n\tq.From = t.From\n\n\tif q.GroupBy == nil {\n\t\tq.GroupBy = t.GroupBy\n\t\tq.GroupByAll = t.GroupByAll\n\t}\n\n\tif q.Resolution == 0 {\n\t\tq.Resolution = t.Resolution\n\t}\n\n\t\/\/ Combine where clauses\n\tif t.Where != nil {\n\t\tif q.Where == nil {\n\t\t\tq.Where = t.Where\n\t\t} else {\n\t\t\tcombined, err := goexpr.Binary(\"AND\", q.Where, t.Where)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tq.Where = combined\n\t\t}\n\t}\n\n\treturn db.doCreateTable(opts, q)\n}\n\nfunc (db *DB) doCreateTable(opts *TableOpts, q *sql.Query) error {\n\tif opts.RetentionPeriod <= 0 {\n\t\treturn errors.New(\"Please specify a positive RetentionPeriod\")\n\t}\n\tif opts.MinFlushLatency <= 0 {\n\t\tlog.Debug(\"MinFlushLatency disabled\")\n\t}\n\tif opts.MaxFlushLatency <= 0 {\n\t\topts.MaxFlushLatency = time.Duration(math.MaxInt64)\n\t\tlog.Debug(\"MaxFlushLatency disabled\")\n\t}\n\topts.Name = strings.ToLower(opts.Name)\n\n\t\/\/ prepend a magic _points field\n\tnewFields := make([]core.Field, 0, len(q.Fields)+1)\n\tnewFields = append(newFields, sql.PointsField)\n\tfor _, field := range q.Fields {\n\t\t\/\/ Don't add _points twice\n\t\tif field.Name != \"_points\" {\n\t\t\tnewFields = append(newFields, field)\n\t\t}\n\t}\n\tq.Fields = newFields\n\n\tt := &table{\n\t\tTableOpts: opts,\n\t\tQuery: *q,\n\t\tdb: db,\n\t\tlog: golog.LoggerFor(\"zenodb.\" + opts.Name),\n\t}\n\n\tt.applyWhere(q.Where)\n\n\tvar rsErr error\n\tvar walOffset wal.Offset\n\tt.rowStore, walOffset, rsErr = t.openRowStore(&rowStoreOptions{\n\t\tdir: filepath.Join(db.opts.Dir, t.Name),\n\t\tminFlushLatency: t.MinFlushLatency,\n\t\tmaxFlushLatency: t.MaxFlushLatency,\n\t})\n\tif rsErr != nil {\n\t\treturn rsErr\n\t}\n\n\tdb.tablesMutex.Lock()\n\tdefer db.tablesMutex.Unlock()\n\n\tif db.tables[t.Name] != nil {\n\t\treturn fmt.Errorf(\"Table %v already exists\", t.Name)\n\t}\n\tdb.tables[t.Name] = t\n\tdb.orderedTables = append(db.orderedTables, t)\n\tvar walErr error\n\tw := db.streams[q.From]\n\tif w == nil {\n\t\twalDir := filepath.Join(db.opts.Dir, \"_wal\", q.From)\n\t\tdirErr := os.MkdirAll(walDir, 0755)\n\t\tif dirErr != nil && !os.IsExist(dirErr) {\n\t\t\treturn dirErr\n\t\t}\n\t\tw, walErr = wal.Open(walDir, db.opts.WALSyncInterval)\n\t\tif walErr != nil {\n\t\t\treturn walErr\n\t\t}\n\t\tgo db.capWALAge(w)\n\t\tdb.streams[q.From] = w\n\n\t\tif db.opts.Follow != nil {\n\t\t\tvar offset wal.Offset\n\t\t\tlatest, _, err := w.Latest()\n\t\t\tif err != nil || len(latest) < wal.OffsetSize {\n\t\t\t\tlog.Debugf(\"Unable to obtain latest data from wal, assuming we're at beginning: %v\", err)\n\t\t\t} else {\n\t\t\t\t\/\/ Followers store offset as first 16 bytes of data in WAL\n\t\t\t\toffset = wal.Offset(latest[:wal.OffsetSize])\n\t\t\t}\n\t\t\tlog.Debugf(\"Following stream %v starting at %v\", q.From, offset)\n\t\t\tgo db.opts.Follow(&Follow{q.From, offset, db.opts.Partition}, func(data []byte, newOffset wal.Offset) error {\n\t\t\t\t_, err := w.Write(newOffset, data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Debug(err)\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t})\n\t\t}\n\t}\n\n\tif db.opts.Passthrough {\n\t\tlog.Debugf(\"Passthrough will not insert data to table %v\", t.Name)\n\t\treturn nil\n\t}\n\n\tlog.Debugf(\"%v will read inserts from %v at offset %v\", t.Name, q.From, walOffset)\n\tt.wal, walErr = w.NewReader(t.Name, walOffset)\n\tif walErr != nil {\n\t\treturn fmt.Errorf(\"Unable to obtain WAL reader: %v\", walErr)\n\t}\n\n\tgo t.processInserts()\n\tif !t.db.opts.Passthrough {\n\t\tgo t.logHighWaterMark()\n\t}\n\treturn nil\n}\n\nfunc (t *table) applyWhere(where goexpr.Expr) {\n\tt.whereMutex.Lock()\n\tt.Where = where\n\tt.whereMutex.Unlock()\n}\n\nfunc (t *table) truncateBefore() time.Time {\n\treturn t.db.clock.Now().Add(-1 * t.RetentionPeriod)\n}\n\nfunc (t *table) iterate(fields []string, includeMemStore bool, onValue func(bytemap.ByteMap, []encoding.Sequence)) error {\n\treturn t.rowStore.iterate(fields, includeMemStore, onValue)\n}\n\n\/\/ shouldSort determines whether or not a flush should be sorted. The flush will\n\/\/ sort if the table is the next table in line to be sorted, and no other sort\n\/\/ is currently happening. If shouldSort returns true, the flushing process\n\/\/ must call stopSorting when finished so that other tables have a chance to\n\/\/ sort.\nfunc (t *table) shouldSort() bool {\n\tif t.db.opts.MaxMemoryRatio <= 0 {\n\t\treturn false\n\t}\n\n\tt.db.tablesMutex.RLock()\n\tif t.db.nextTableToSort >= len(t.db.orderedTables) {\n\t\tt.db.nextTableToSort = 0\n\t}\n\tnextTableToSort := t.db.orderedTables[t.db.nextTableToSort]\n\tresult := t.Name == nextTableToSort.Name && !t.db.isSorting\n\tt.db.tablesMutex.RUnlock()\n\treturn result\n}\n\nfunc (t *table) stopSorting() {\n\tt.db.tablesMutex.RLock()\n\tt.db.isSorting = false\n\tt.db.nextTableToSort++\n\tt.db.tablesMutex.RUnlock()\n}\n\nfunc (t *table) memStoreSize() int {\n\treturn t.rowStore.memStoreSize()\n}\n\nfunc (t *table) forceFlush() {\n\tt.rowStore.forceFlush()\n}\n\nfunc (t *table) logHighWaterMark() {\n\tfor {\n\t\ttime.Sleep(15 * time.Second)\n\t\tt.highWaterMarkMx.RLock()\n\t\tdisk := t.highWaterMarkDisk\n\t\tmemory := t.highWaterMarkMemory\n\t\tt.highWaterMarkMx.RUnlock()\n\t\tt.log.Debugf(\"High Water Mark disk: %v memory: %v\", encoding.TimeFromInt(disk).In(time.UTC), encoding.TimeFromInt(memory).In(time.UTC))\n\t}\n}\n\nfunc (t *table) updateHighWaterMarkDisk(ts int64) {\n\tt.highWaterMarkMx.Lock()\n\tif ts > t.highWaterMarkDisk {\n\t\tt.highWaterMarkDisk = ts\n\t}\n\tt.highWaterMarkMx.Unlock()\n}\n\nfunc (t *table) updateHighWaterMarkMemory(ts int64) {\n\tt.highWaterMarkMx.Lock()\n\tif ts > t.highWaterMarkMemory {\n\t\tt.highWaterMarkMemory = ts\n\t}\n\tt.highWaterMarkMx.Unlock()\n}\n<commit_msg>Followers don't look further back than their own WAL retention period<commit_after>package zenodb\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/getlantern\/bytemap\"\n\t\"github.com\/getlantern\/errors\"\n\t\"github.com\/getlantern\/goexpr\"\n\t\"github.com\/getlantern\/golog\"\n\t\"github.com\/getlantern\/wal\"\n\t\"github.com\/getlantern\/zenodb\/core\"\n\t\"github.com\/getlantern\/zenodb\/encoding\"\n\t\"github.com\/getlantern\/zenodb\/sql\"\n)\n\n\/\/ TableStats presents statistics for a given table (currently only since the\n\/\/ last time the database process was started).\ntype TableStats struct {\n\tFilteredPoints int64\n\tQueuedPoints int64\n\tInsertedPoints int64\n\tDroppedPoints int64\n\tExpiredValues int64\n}\n\n\/\/ TableOpts configures a table.\ntype TableOpts struct {\n\t\/\/ Name is the name of the table.\n\tName string\n\t\/\/ View indicates if this table is a view on top of an existing table.\n\tView bool\n\t\/\/ MinFlushLatency sets a lower bound on how frequently the memstore is\n\t\/\/ flushed to disk.\n\tMinFlushLatency time.Duration\n\t\/\/ MaxFlushLatency sets an upper bound on how long to wait before flushing the\n\t\/\/ memstore to disk.\n\tMaxFlushLatency time.Duration\n\t\/\/ RetentionPeriod limits how long data is kept in the table (based on the\n\t\/\/ timestamp of the data itself).\n\tRetentionPeriod time.Duration\n\t\/\/ SQL is the SELECT query that determines the fields, filtering and input\n\t\/\/ source for this table.\n\tSQL string\n\tdependencyOf []*TableOpts\n}\n\ntype table struct {\n\t*TableOpts\n\tsql.Query\n\tdb *DB\n\trowStore *rowStore\n\tlog golog.Logger\n\twhereMutex sync.RWMutex\n\tstats TableStats\n\tstatsMutex sync.RWMutex\n\twal *wal.Reader\n\thighWaterMarkDisk int64\n\thighWaterMarkMemory int64\n\thighWaterMarkMx sync.RWMutex\n}\n\n\/\/ CreateTable creates a table based on the given opts.\nfunc (db *DB) CreateTable(opts *TableOpts) error {\n\tq, err := sql.Parse(opts.SQL, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn db.doCreateTable(opts, q)\n}\n\n\/\/ CreateView creates a view based on the given opts.\nfunc (db *DB) CreateView(opts *TableOpts) error {\n\ttable, err := sql.TableFor(opts.SQL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Get existing fields from existing table\n\tt := db.getTable(table)\n\tif t == nil {\n\t\treturn fmt.Errorf(\"Table '%v' not found\", table)\n\t}\n\tq, err := sql.Parse(opts.SQL, db.getFieldsOptional)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Point view at same stream as table\n\t\/\/ TODO: populate view with existing data from table\n\tq.From = t.From\n\n\tif q.GroupBy == nil {\n\t\tq.GroupBy = t.GroupBy\n\t\tq.GroupByAll = t.GroupByAll\n\t}\n\n\tif q.Resolution == 0 {\n\t\tq.Resolution = t.Resolution\n\t}\n\n\t\/\/ Combine where clauses\n\tif t.Where != nil {\n\t\tif q.Where == nil {\n\t\t\tq.Where = t.Where\n\t\t} else {\n\t\t\tcombined, err := goexpr.Binary(\"AND\", q.Where, t.Where)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tq.Where = combined\n\t\t}\n\t}\n\n\treturn db.doCreateTable(opts, q)\n}\n\nfunc (db *DB) doCreateTable(opts *TableOpts, q *sql.Query) error {\n\tif opts.RetentionPeriod <= 0 {\n\t\treturn errors.New(\"Please specify a positive RetentionPeriod\")\n\t}\n\tif opts.MinFlushLatency <= 0 {\n\t\tlog.Debug(\"MinFlushLatency disabled\")\n\t}\n\tif opts.MaxFlushLatency <= 0 {\n\t\topts.MaxFlushLatency = time.Duration(math.MaxInt64)\n\t\tlog.Debug(\"MaxFlushLatency disabled\")\n\t}\n\topts.Name = strings.ToLower(opts.Name)\n\n\t\/\/ prepend a magic _points field\n\tnewFields := make([]core.Field, 0, len(q.Fields)+1)\n\tnewFields = append(newFields, sql.PointsField)\n\tfor _, field := range q.Fields {\n\t\t\/\/ Don't add _points twice\n\t\tif field.Name != \"_points\" {\n\t\t\tnewFields = append(newFields, field)\n\t\t}\n\t}\n\tq.Fields = newFields\n\n\tt := &table{\n\t\tTableOpts: opts,\n\t\tQuery: *q,\n\t\tdb: db,\n\t\tlog: golog.LoggerFor(\"zenodb.\" + opts.Name),\n\t}\n\n\tt.applyWhere(q.Where)\n\n\tvar rsErr error\n\tvar walOffset wal.Offset\n\tt.rowStore, walOffset, rsErr = t.openRowStore(&rowStoreOptions{\n\t\tdir: filepath.Join(db.opts.Dir, t.Name),\n\t\tminFlushLatency: t.MinFlushLatency,\n\t\tmaxFlushLatency: t.MaxFlushLatency,\n\t})\n\tif rsErr != nil {\n\t\treturn rsErr\n\t}\n\n\tdb.tablesMutex.Lock()\n\tdefer db.tablesMutex.Unlock()\n\n\tif db.tables[t.Name] != nil {\n\t\treturn fmt.Errorf(\"Table %v already exists\", t.Name)\n\t}\n\tdb.tables[t.Name] = t\n\tdb.orderedTables = append(db.orderedTables, t)\n\tvar walErr error\n\tw := db.streams[q.From]\n\tif w == nil {\n\t\twalDir := filepath.Join(db.opts.Dir, \"_wal\", q.From)\n\t\tdirErr := os.MkdirAll(walDir, 0755)\n\t\tif dirErr != nil && !os.IsExist(dirErr) {\n\t\t\treturn dirErr\n\t\t}\n\t\tw, walErr = wal.Open(walDir, db.opts.WALSyncInterval)\n\t\tif walErr != nil {\n\t\t\treturn walErr\n\t\t}\n\t\tgo db.capWALAge(w)\n\t\tdb.streams[q.From] = w\n\n\t\tif db.opts.Follow != nil {\n\t\t\tvar offset wal.Offset\n\t\t\tlatest, _, err := w.Latest()\n\t\t\tif err != nil || len(latest) < wal.OffsetSize {\n\t\t\t\tlog.Debugf(\"Unable to obtain latest data from wal, assuming we're at beginning: %v\", err)\n\t\t\t\t\/\/ Start at offset commensurate with max WAL age\n\t\t\t\toffset = wal.NewOffsetForTS(db.clock.Now().Add(-1 * db.opts.MaxWALAge))\n\t\t\t} else {\n\t\t\t\t\/\/ Followers store offset as first 16 bytes of data in WAL\n\t\t\t\toffset = wal.Offset(latest[:wal.OffsetSize])\n\t\t\t}\n\t\t\tlog.Debugf(\"Following stream %v starting at %v\", q.From, offset)\n\t\t\tgo db.opts.Follow(&Follow{q.From, offset, db.opts.Partition}, func(data []byte, newOffset wal.Offset) error {\n\t\t\t\t_, err := w.Write(newOffset, data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Debug(err)\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t})\n\t\t}\n\t}\n\n\tif db.opts.Passthrough {\n\t\tlog.Debugf(\"Passthrough will not insert data to table %v\", t.Name)\n\t\treturn nil\n\t}\n\n\tlog.Debugf(\"%v will read inserts from %v at offset %v\", t.Name, q.From, walOffset)\n\tt.wal, walErr = w.NewReader(t.Name, walOffset)\n\tif walErr != nil {\n\t\treturn fmt.Errorf(\"Unable to obtain WAL reader: %v\", walErr)\n\t}\n\n\tgo t.processInserts()\n\tif !t.db.opts.Passthrough {\n\t\tgo t.logHighWaterMark()\n\t}\n\treturn nil\n}\n\nfunc (t *table) applyWhere(where goexpr.Expr) {\n\tt.whereMutex.Lock()\n\tt.Where = where\n\tt.whereMutex.Unlock()\n}\n\nfunc (t *table) truncateBefore() time.Time {\n\treturn t.db.clock.Now().Add(-1 * t.RetentionPeriod)\n}\n\nfunc (t *table) iterate(fields []string, includeMemStore bool, onValue func(bytemap.ByteMap, []encoding.Sequence)) error {\n\treturn t.rowStore.iterate(fields, includeMemStore, onValue)\n}\n\n\/\/ shouldSort determines whether or not a flush should be sorted. The flush will\n\/\/ sort if the table is the next table in line to be sorted, and no other sort\n\/\/ is currently happening. If shouldSort returns true, the flushing process\n\/\/ must call stopSorting when finished so that other tables have a chance to\n\/\/ sort.\nfunc (t *table) shouldSort() bool {\n\tif t.db.opts.MaxMemoryRatio <= 0 {\n\t\treturn false\n\t}\n\n\tt.db.tablesMutex.RLock()\n\tif t.db.nextTableToSort >= len(t.db.orderedTables) {\n\t\tt.db.nextTableToSort = 0\n\t}\n\tnextTableToSort := t.db.orderedTables[t.db.nextTableToSort]\n\tresult := t.Name == nextTableToSort.Name && !t.db.isSorting\n\tt.db.tablesMutex.RUnlock()\n\treturn result\n}\n\nfunc (t *table) stopSorting() {\n\tt.db.tablesMutex.RLock()\n\tt.db.isSorting = false\n\tt.db.nextTableToSort++\n\tt.db.tablesMutex.RUnlock()\n}\n\nfunc (t *table) memStoreSize() int {\n\treturn t.rowStore.memStoreSize()\n}\n\nfunc (t *table) forceFlush() {\n\tt.rowStore.forceFlush()\n}\n\nfunc (t *table) logHighWaterMark() {\n\tfor {\n\t\ttime.Sleep(15 * time.Second)\n\t\tt.highWaterMarkMx.RLock()\n\t\tdisk := t.highWaterMarkDisk\n\t\tmemory := t.highWaterMarkMemory\n\t\tt.highWaterMarkMx.RUnlock()\n\t\tt.log.Debugf(\"High Water Mark disk: %v memory: %v\", encoding.TimeFromInt(disk).In(time.UTC), encoding.TimeFromInt(memory).In(time.UTC))\n\t}\n}\n\nfunc (t *table) updateHighWaterMarkDisk(ts int64) {\n\tt.highWaterMarkMx.Lock()\n\tif ts > t.highWaterMarkDisk {\n\t\tt.highWaterMarkDisk = ts\n\t}\n\tt.highWaterMarkMx.Unlock()\n}\n\nfunc (t *table) updateHighWaterMarkMemory(ts int64) {\n\tt.highWaterMarkMx.Lock()\n\tif ts > t.highWaterMarkMemory {\n\t\tt.highWaterMarkMemory = ts\n\t}\n\tt.highWaterMarkMx.Unlock()\n}\n<|endoftext|>"} {"text":"<commit_before>package tcgif\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\t\"image\/gif\"\n\t\"sort\"\n)\n\ntype coord struct {\n\tX, Y int\n}\n\ntype colorCount struct {\n\tC color.Color\n\tCoords []coord\n}\n\ntype colorCountList []colorCount\n\nfunc (p colorCountList) Len() int { return len(p) }\nfunc (p colorCountList) Less(i, j int) bool { return len(p[i].Coords) < len(p[j].Coords) }\nfunc (p colorCountList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }\n\ntype GIFMaker struct {\n\tframeDelay int\n\tfinalDelay int\n\n\tframeLimit uint\n\n\tbackfill bool\n\tpopsort bool\n}\n\ntype Option func(*GIFMaker)\n\nfunc WithFrameDelay(delay int) Option {\n\treturn func(g *GIFMaker) {\n\t\tg.frameDelay = delay\n\t}\n}\n\nfunc WithFinalDelay(delay int) Option {\n\treturn func(g *GIFMaker) {\n\t\tg.finalDelay = delay\n\t}\n}\n\nfunc WithFrameLimit(limit uint) Option {\n\treturn func(g *GIFMaker) {\n\t\tg.frameLimit = limit\n\t}\n}\n\nfunc WithBackfill(backfill bool) Option {\n\treturn func(g *GIFMaker) {\n\t\tg.backfill = backfill\n\t}\n}\n\nfunc WithPopularitySort(popsort bool) Option {\n\treturn func(g *GIFMaker) {\n\t\tg.popsort = popsort\n\t}\n}\n\nfunc NewGIFMaker(opts ...Option) *GIFMaker {\n\tgm := &GIFMaker{\n\t\tframeDelay: 2,\n\t\tfinalDelay: 300,\n\t\tframeLimit: 0,\n\t\tbackfill: true,\n\t\tpopsort: true,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(gm)\n\t}\n\n\treturn gm\n}\n\nfunc (gm *GIFMaker) MakeGIF(img image.Image) (*gif.GIF, error) {\n\tb := img.Bounds()\n\tcolormap := make(map[color.Color][]coord)\n\n\tfor y := 0; y <= b.Max.Y; y++ {\n\t\tfor x := 0; x <= b.Max.X; x++ {\n\t\t\tc := img.At(x, y)\n\t\t\tcolormap[c] = append(colormap[c], coord{x, y})\n\t\t}\n\t}\n\n\tcolorhisto := make(colorCountList, 0)\n\tfor c, e := range colormap {\n\t\tcolorhisto = append(colorhisto, colorCount{c, e})\n\t}\n\n\tif gm.popsort {\n\t\tsort.Sort(sort.Reverse(colorhisto))\n\t}\n\n\tseglen := (len(colorhisto) \/ 254) + 1\n\tsegments := make([]colorCountList, seglen)\n\n\tx := 0\n\tfor _, xxx := range colorhisto {\n\t\tn := x \/ 254 \/\/integer division\n\t\tsegments[n] = append(segments[n], xxx)\n\n\t\tx++\n\t}\n\n\tlimitSeglen := seglen\n\tif gm.frameLimit != 0 && int(gm.frameLimit) < limitSeglen {\n\t\tlimitSeglen = int(gm.frameLimit)\n\t}\n\n\tg := &gif.GIF{}\n\tfor i := 0; i < limitSeglen; i++ {\n\t\tpimg := image.NewPaletted(b, color.Palette{})\n\t\t\/\/ Add trasparency first so it's used as the matte color\n\t\tpimg.Palette = append(pimg.Palette, color.Transparent)\n\t\tg.Image = append(g.Image, pimg)\n\n\t\tfor _, ch := range segments[i] {\n\t\t\tpimg.Palette = append(pimg.Palette, ch.C)\n\t\t\tind := pimg.Palette.Index(ch.C)\n\n\t\t\tfor _, ccoord := range ch.Coords {\n\t\t\t\tpimg.SetColorIndex(ccoord.X, ccoord.Y, uint8(ind))\n\t\t\t}\n\t\t}\n\n\t\tif gm.backfill {\n\t\t\tfor j := i + 1; j < seglen; j++ {\n\t\t\t\tfor _, ch := range segments[j] {\n\t\t\t\t\tind := pimg.Palette.Index(ch.C)\n\n\t\t\t\t\tfor _, ccoord := range ch.Coords {\n\t\t\t\t\t\tpimg.SetColorIndex(ccoord.X, ccoord.Y, uint8(ind))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tg.Delay = make([]int, len(g.Image))\n\tfor i := range g.Delay {\n\t\tg.Delay[i] = gm.frameDelay\n\t}\n\n\tg.Delay[len(g.Delay)-1] = gm.finalDelay\n\n\treturn g, nil\n}\n<commit_msg>Adds docs<commit_after>package tcgif\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\t\"image\/gif\"\n\t\"sort\"\n)\n\ntype coord struct {\n\tX, Y int\n}\n\ntype colorCount struct {\n\tC color.Color\n\tCoords []coord\n}\n\ntype colorCountList []colorCount\n\nfunc (p colorCountList) Len() int { return len(p) }\nfunc (p colorCountList) Less(i, j int) bool { return len(p[i].Coords) < len(p[j].Coords) }\nfunc (p colorCountList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }\n\n\/\/ GIFMaker is a struct used to create a \"Truecolor GIF\" from an image\ntype GIFMaker struct {\n\tframeDelay int\n\tfinalDelay int\n\n\tframeLimit uint\n\n\tbackfill bool\n\tpopsort bool\n}\n\n\/\/ Option is a function passed to NewGIFMaker to modify the behavior of GIFMaker\ntype Option func(*GIFMaker)\n\n\/\/ WithFrameDelay sets the delay for each frame in the GIF in 100ths of a second\n\/\/ excluding the final frame which is determined by WithFinalDelay\nfunc WithFrameDelay(delay int) Option {\n\treturn func(g *GIFMaker) {\n\t\tg.frameDelay = delay\n\t}\n}\n\n\/\/ WithFinalDelay sets the delay for the final frame of the GIF\n\/\/ in 100ths of a second - default is 300\nfunc WithFinalDelay(delay int) Option {\n\treturn func(g *GIFMaker) {\n\t\tg.finalDelay = delay\n\t}\n}\n\n\/\/ WithFrameLimit sets the maximum number of frames to be included in the GIF\nfunc WithFrameLimit(limit uint) Option {\n\treturn func(g *GIFMaker) {\n\t\tg.frameLimit = limit\n\t}\n}\n\n\/\/ WithBackfill sets whether the GIF will be backfilled preemptively with the\n\/\/ closest color in the palette.\nfunc WithBackfill(backfill bool) Option {\n\treturn func(g *GIFMaker) {\n\t\tg.backfill = backfill\n\t}\n}\n\n\/\/ WithPopularitySort sets whether the total palette is sorted by popularity\n\/\/\n\/\/ Disabling this option will result in the palette being sorted by the order\n\/\/ in which the colors were found to the image. This can improve performance\n\/\/ but may result in a less visually pleasing GIF\nfunc WithPopularitySort(popsort bool) Option {\n\treturn func(g *GIFMaker) {\n\t\tg.popsort = popsort\n\t}\n}\n\n\/\/ NewGIFMaker creates a new GIFMaker with the specified options\nfunc NewGIFMaker(opts ...Option) *GIFMaker {\n\tgm := &GIFMaker{\n\t\tframeDelay: 2,\n\t\tfinalDelay: 300,\n\t\tframeLimit: 0,\n\t\tbackfill: true,\n\t\tpopsort: true,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(gm)\n\t}\n\n\treturn gm\n}\n\n\/\/ MakeGIF creates a \"Truecolor GIF\" from the given image using the given options\nfunc (gm *GIFMaker) MakeGIF(img image.Image) (*gif.GIF, error) {\n\tb := img.Bounds()\n\tcolormap := make(map[color.Color][]coord)\n\n\tfor y := 0; y <= b.Max.Y; y++ {\n\t\tfor x := 0; x <= b.Max.X; x++ {\n\t\t\tc := img.At(x, y)\n\t\t\tcolormap[c] = append(colormap[c], coord{x, y})\n\t\t}\n\t}\n\n\tcolorhisto := make(colorCountList, 0)\n\tfor c, e := range colormap {\n\t\tcolorhisto = append(colorhisto, colorCount{c, e})\n\t}\n\n\tif gm.popsort {\n\t\tsort.Sort(sort.Reverse(colorhisto))\n\t}\n\n\tseglen := (len(colorhisto) \/ 254) + 1\n\tsegments := make([]colorCountList, seglen)\n\n\tx := 0\n\tfor _, xxx := range colorhisto {\n\t\tn := x \/ 254 \/\/integer division\n\t\tsegments[n] = append(segments[n], xxx)\n\n\t\tx++\n\t}\n\n\tlimitSeglen := seglen\n\tif gm.frameLimit != 0 && int(gm.frameLimit) < limitSeglen {\n\t\tlimitSeglen = int(gm.frameLimit)\n\t}\n\n\tg := &gif.GIF{}\n\tfor i := 0; i < limitSeglen; i++ {\n\t\tpimg := image.NewPaletted(b, color.Palette{})\n\t\t\/\/ Add trasparency first so it's used as the matte color\n\t\tpimg.Palette = append(pimg.Palette, color.Transparent)\n\t\tg.Image = append(g.Image, pimg)\n\n\t\tfor _, ch := range segments[i] {\n\t\t\tpimg.Palette = append(pimg.Palette, ch.C)\n\t\t\tind := pimg.Palette.Index(ch.C)\n\n\t\t\tfor _, ccoord := range ch.Coords {\n\t\t\t\tpimg.SetColorIndex(ccoord.X, ccoord.Y, uint8(ind))\n\t\t\t}\n\t\t}\n\n\t\tif gm.backfill {\n\t\t\tfor j := i + 1; j < seglen; j++ {\n\t\t\t\tfor _, ch := range segments[j] {\n\t\t\t\t\tind := pimg.Palette.Index(ch.C)\n\n\t\t\t\t\tfor _, ccoord := range ch.Coords {\n\t\t\t\t\t\tpimg.SetColorIndex(ccoord.X, ccoord.Y, uint8(ind))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tg.Delay = make([]int, len(g.Image))\n\tfor i := range g.Delay {\n\t\tg.Delay[i] = gm.frameDelay\n\t}\n\n\tg.Delay[len(g.Delay)-1] = gm.finalDelay\n\n\treturn g, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/juju\/loggo\"\n\n\t\"launchpad.net\/juju-core\/juju\/osenv\"\n)\n\nvar (\n\taptLogger = loggo.GetLogger(\"juju.utils.apt\")\n\taptProxyRE = regexp.MustCompile(`(?im)^\\s*Acquire::(?P<protocol>[a-z]+)::Proxy\\s+\"(?P<proxy>[^\"]+)\";\\s*$`)\n\n\t\/\/ AptConfFile is the full file path for the proxy settings that are\n\t\/\/ written by cloud-init and the machine environ worker.\n\tAptConfFile = \"\/etc\/apt\/apt.conf.d\/42-juju-proxy-settings\"\n)\n\n\/\/ Some helpful functions for running apt in a sane way\n\n\/\/ AptCommandOutput calls cmd.Output, this is used as an overloading point so we\n\/\/ can test what *would* be run without actually executing another program\nvar AptCommandOutput = (*exec.Cmd).CombinedOutput\n\n\/\/ This is the default apt-get command used in cloud-init, the various settings\n\/\/ mean that apt won't actually block waiting for a prompt from the user.\nvar aptGetCommand = []string{\n\t\"apt-get\", \"--option=Dpkg::Options::=--force-confold\",\n\t\"--option=Dpkg::options::=--force-unsafe-io\", \"--assume-yes\", \"--quiet\",\n}\n\n\/\/ aptEnvOptions are options we need to pass to apt-get to not have it prompt\n\/\/ the user\nvar aptGetEnvOptions = []string{\"DEBIAN_FRONTEND=noninteractive\"}\n\n\/\/ cloudArchivePackages maintaines a list of packages that AptGetPreparePackages\n\/\/ should reference when determining the --target-release for a given series.\n\/\/ http:\/\/reqorts.qa.ubuntu.com\/reports\/ubuntu-server\/cloud-archive\/cloud-tools_versions.html\nvar cloudArchivePackages = map[string]bool{\n\t\"cloud-utils\": true,\n\t\"curtin\": true,\n\t\"djorm-ext-pgarray\": true,\n\t\"golang\": true,\n\t\"iproute2\": true,\n\t\"isc-dhcp\": true,\n\t\"juju-core\": true,\n\t\"libseccomp\": true,\n\t\"libv8-3.14\": true,\n\t\"lxc\": true,\n\t\"maas\": true,\n\t\"mongodb\": true,\n\t\"python-django\": true,\n\t\"python-django-piston\": true,\n\t\"python-jujuclient\": true,\n\t\"python-tx-tftp\": true,\n\t\"python-websocket-client\": true,\n\t\"raphael 2.1.0-1ubuntu1\": true,\n\t\"simplestreams\": true,\n\t\"txlongpoll\": true,\n\t\"uvtool\": true,\n\t\"yui3\": true,\n}\n\n\/\/ targetRelease returns a string base on the current series\n\/\/ that is suitable for use with the apt-get --target-release option\nfunc targetRelease(series string) string {\n\tswitch series {\n\tcase \"precise\":\n\t\treturn \"precise-updates\/cloud-tools\"\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\n\/\/ AptGetPreparePackages returns a slice of installCommands. Each item\n\/\/ in the slice is suitable for passing directly to AptGetInstall.\n\/\/\n\/\/ AptGetPreparePackages will inspect the series passed to it\n\/\/ and properly generate an installCommand entry with a --target-release\n\/\/ should the series be an LTS release with cloud archive packages.\nfunc AptGetPreparePackages(packages []string, series string) [][]string {\n\tvar installCommands [][]string\n\tif target := targetRelease(series); target != \"\" {\n\t\tvar pkgs []string\n\t\tpkgs_with_target := []string{\"--target-release\", target}\n\t\tfor _, pkg := range packages {\n\t\t\tif cloudArchivePackages[pkg] {\n\t\t\t\tpkgs_with_target = append(pkgs_with_target, pkg)\n\t\t\t} else {\n\t\t\t\tpkgs = append(pkgs, pkg)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ We check for >2 here so that we only append pkgs_with_target\n\t\t\/\/ if there was an actual package in the slice.\n\t\tif len(pkgs_with_target) > 2 {\n\t\t\tinstallCommands = append(installCommands, pkgs_with_target)\n\t\t}\n\n\t\t\/\/ Sometimes we may end up with all cloudArchivePackages\n\t\t\/\/ in that case we do not want to append an empty slice of pkgs\n\t\tif len(pkgs) > 0 {\n\t\t\tinstallCommands = append(installCommands, pkgs)\n\t\t}\n\t} else {\n\t\tinstallCommands = append(installCommands, packages)\n\t}\n\treturn installCommands\n}\n\n\/\/ AptGetInstall runs 'apt-get install packages' for the packages listed here\nfunc AptGetInstall(packages ...string) error {\n\tcmdArgs := append([]string(nil), aptGetCommand...)\n\tcmdArgs = append(cmdArgs, \"install\")\n\tcmdArgs = append(cmdArgs, packages...)\n\taptLogger.Infof(\"Running: %s\", cmdArgs)\n\tcmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)\n\tcmd.Env = append(os.Environ(), aptGetEnvOptions...)\n\tout, err := AptCommandOutput(cmd)\n\tif err != nil {\n\t\taptLogger.Errorf(\"apt-get command failed: %v\\nargs: %#v\\n%s\",\n\t\t\terr, cmdArgs, string(out))\n\t\treturn fmt.Errorf(\"apt-get failed: %v\", err)\n\t}\n\treturn nil\n}\n\n\/\/ AptConfigProxy will consult apt-config about the configured proxy\n\/\/ settings. If there are no proxy settings configured, an empty string is\n\/\/ returned.\nfunc AptConfigProxy() (string, error) {\n\tcmdArgs := []string{\n\t\t\"apt-config\",\n\t\t\"dump\",\n\t\t\"Acquire::http::Proxy\",\n\t\t\"Acquire::https::Proxy\",\n\t\t\"Acquire::ftp::Proxy\",\n\t}\n\tcmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)\n\tout, err := AptCommandOutput(cmd)\n\tif err != nil {\n\t\taptLogger.Errorf(\"apt-config command failed: %v\\nargs: %#v\\n%s\",\n\t\t\terr, cmdArgs, string(out))\n\t\treturn \"\", fmt.Errorf(\"apt-config failed: %v\", err)\n\t}\n\treturn string(bytes.Join(aptProxyRE.FindAll(out, -1), []byte(\"\\n\"))), nil\n}\n\n\/\/ DetectAptProxies will parse the results of AptConfigProxy to return a\n\/\/ ProxySettings instance.\nfunc DetectAptProxies() (result osenv.ProxySettings, err error) {\n\toutput, err := AptConfigProxy()\n\tif err != nil {\n\t\treturn result, err\n\t}\n\tfor _, match := range aptProxyRE.FindAllStringSubmatch(output, -1) {\n\t\tswitch match[1] {\n\t\tcase \"http\":\n\t\t\tresult.Http = match[2]\n\t\tcase \"https\":\n\t\t\tresult.Https = match[2]\n\t\tcase \"ftp\":\n\t\t\tresult.Ftp = match[2]\n\t\t}\n\t}\n\treturn result, nil\n}\n\n\/\/ AptProxyContent produces the format expected by the apt config files\n\/\/ from the ProxySettings struct.\nfunc AptProxyContent(proxy osenv.ProxySettings) string {\n\tlines := []string{}\n\taddLine := func(proxy, value string) {\n\t\tif value != \"\" {\n\t\t\tlines = append(lines, fmt.Sprintf(\n\t\t\t\t\"Acquire::%s::Proxy %q;\", proxy, value))\n\t\t}\n\t}\n\taddLine(\"http\", proxy.Http)\n\taddLine(\"https\", proxy.Https)\n\taddLine(\"ftp\", proxy.Ftp)\n\treturn strings.Join(lines, \"\\n\")\n}\n\n\/\/ IsUbuntu executes lxb_release to see if the host OS is Ubuntu.\nfunc IsUbuntu() bool {\n\tout, err := RunCommand(\"lsb_release\", \"-i\", \"-s\")\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn strings.TrimSpace(out) == \"Ubuntu\"\n}\n\n\/\/ IsPackageInstalled uses dpkg-query to determine if the `packageName`\n\/\/ package is installed.\nfunc IsPackageInstalled(packageName string) bool {\n\t_, err := RunCommand(\"dpkg-query\", \"--status\", packageName)\n\treturn err == nil\n}\n<commit_msg>utils\/apt: updates based on review<commit_after>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/juju\/loggo\"\n\n\t\"launchpad.net\/juju-core\/juju\/osenv\"\n)\n\nvar (\n\taptLogger = loggo.GetLogger(\"juju.utils.apt\")\n\taptProxyRE = regexp.MustCompile(`(?im)^\\s*Acquire::(?P<protocol>[a-z]+)::Proxy\\s+\"(?P<proxy>[^\"]+)\";\\s*$`)\n\n\t\/\/ AptConfFile is the full file path for the proxy settings that are\n\t\/\/ written by cloud-init and the machine environ worker.\n\tAptConfFile = \"\/etc\/apt\/apt.conf.d\/42-juju-proxy-settings\"\n)\n\n\/\/ Some helpful functions for running apt in a sane way\n\n\/\/ AptCommandOutput calls cmd.Output, this is used as an overloading point so we\n\/\/ can test what *would* be run without actually executing another program\nvar AptCommandOutput = (*exec.Cmd).CombinedOutput\n\n\/\/ This is the default apt-get command used in cloud-init, the various settings\n\/\/ mean that apt won't actually block waiting for a prompt from the user.\nvar aptGetCommand = []string{\n\t\"apt-get\", \"--option=Dpkg::Options::=--force-confold\",\n\t\"--option=Dpkg::options::=--force-unsafe-io\", \"--assume-yes\", \"--quiet\",\n}\n\n\/\/ aptEnvOptions are options we need to pass to apt-get to not have it prompt\n\/\/ the user\nvar aptGetEnvOptions = []string{\"DEBIAN_FRONTEND=noninteractive\"}\n\n\/\/ cloudArchivePackages maintaines a list of packages that AptGetPreparePackages\n\/\/ should reference when determining the --target-release for a given series.\n\/\/ http:\/\/reqorts.qa.ubuntu.com\/reports\/ubuntu-server\/cloud-archive\/cloud-tools_versions.html\nvar cloudArchivePackages = map[string]bool{\n\t\"cloud-utils\": true,\n\t\"curtin\": true,\n\t\"djorm-ext-pgarray\": true,\n\t\"golang\": true,\n\t\"iproute2\": true,\n\t\"isc-dhcp\": true,\n\t\"juju-core\": true,\n\t\"libseccomp\": true,\n\t\"libv8-3.14\": true,\n\t\"lxc\": true,\n\t\"maas\": true,\n\t\"mongodb\": true,\n\t\"python-django\": true,\n\t\"python-django-piston\": true,\n\t\"python-jujuclient\": true,\n\t\"python-tx-tftp\": true,\n\t\"python-websocket-client\": true,\n\t\"raphael 2.1.0-1ubuntu1\": true,\n\t\"simplestreams\": true,\n\t\"txlongpoll\": true,\n\t\"uvtool\": true,\n\t\"yui3\": true,\n}\n\n\/\/ targetRelease returns a string base on the current series\n\/\/ that is suitable for use with the apt-get --target-release option\nfunc targetRelease(series string) string {\n\tswitch series {\n\tcase \"precise\":\n\t\treturn \"precise-updates\/cloud-tools\"\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\n\/\/ AptGetPreparePackages returns a slice of installCommands. Each item\n\/\/ in the slice is suitable for passing directly to AptGetInstall.\n\/\/\n\/\/ AptGetPreparePackages will inspect the series passed to it\n\/\/ and properly generate an installCommand entry with a --target-release\n\/\/ should the series be an LTS release with cloud archive packages.\nfunc AptGetPreparePackages(packages []string, series string) [][]string {\n\tvar installCommands [][]string\n\tif target := targetRelease(series); target == \"\" {\n\t\treturn append(installCommands, packages)\n\t} else {\n\t\tvar pkgs []string\n\t\tpkgs_with_target := []string{\"--target-release\", target}\n\t\tfor _, pkg := range packages {\n\t\t\tif cloudArchivePackages[pkg] {\n\t\t\t\tpkgs_with_target = append(pkgs_with_target, pkg)\n\t\t\t} else {\n\t\t\t\tpkgs = append(pkgs, pkg)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ We check for >2 here so that we only append pkgs_with_target\n\t\t\/\/ if there was an actual package in the slice.\n\t\tif len(pkgs_with_target) > 2 {\n\t\t\tinstallCommands = append(installCommands, pkgs_with_target)\n\t\t}\n\n\t\t\/\/ Sometimes we may end up with all cloudArchivePackages\n\t\t\/\/ in that case we do not want to append an empty slice of pkgs\n\t\tif len(pkgs) > 0 {\n\t\t\tinstallCommands = append(installCommands, pkgs)\n\t\t}\n\n\t\treturn installCommands\n\t}\n}\n\n\/\/ AptGetInstall runs 'apt-get install packages' for the packages listed here\nfunc AptGetInstall(packages ...string) error {\n\tcmdArgs := append([]string(nil), aptGetCommand...)\n\tcmdArgs = append(cmdArgs, \"install\")\n\tcmdArgs = append(cmdArgs, packages...)\n\taptLogger.Infof(\"Running: %s\", cmdArgs)\n\tcmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)\n\tcmd.Env = append(os.Environ(), aptGetEnvOptions...)\n\tout, err := AptCommandOutput(cmd)\n\tif err != nil {\n\t\taptLogger.Errorf(\"apt-get command failed: %v\\nargs: %#v\\n%s\",\n\t\t\terr, cmdArgs, string(out))\n\t\treturn fmt.Errorf(\"apt-get failed: %v\", err)\n\t}\n\treturn nil\n}\n\n\/\/ AptConfigProxy will consult apt-config about the configured proxy\n\/\/ settings. If there are no proxy settings configured, an empty string is\n\/\/ returned.\nfunc AptConfigProxy() (string, error) {\n\tcmdArgs := []string{\n\t\t\"apt-config\",\n\t\t\"dump\",\n\t\t\"Acquire::http::Proxy\",\n\t\t\"Acquire::https::Proxy\",\n\t\t\"Acquire::ftp::Proxy\",\n\t}\n\tcmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)\n\tout, err := AptCommandOutput(cmd)\n\tif err != nil {\n\t\taptLogger.Errorf(\"apt-config command failed: %v\\nargs: %#v\\n%s\",\n\t\t\terr, cmdArgs, string(out))\n\t\treturn \"\", fmt.Errorf(\"apt-config failed: %v\", err)\n\t}\n\treturn string(bytes.Join(aptProxyRE.FindAll(out, -1), []byte(\"\\n\"))), nil\n}\n\n\/\/ DetectAptProxies will parse the results of AptConfigProxy to return a\n\/\/ ProxySettings instance.\nfunc DetectAptProxies() (result osenv.ProxySettings, err error) {\n\toutput, err := AptConfigProxy()\n\tif err != nil {\n\t\treturn result, err\n\t}\n\tfor _, match := range aptProxyRE.FindAllStringSubmatch(output, -1) {\n\t\tswitch match[1] {\n\t\tcase \"http\":\n\t\t\tresult.Http = match[2]\n\t\tcase \"https\":\n\t\t\tresult.Https = match[2]\n\t\tcase \"ftp\":\n\t\t\tresult.Ftp = match[2]\n\t\t}\n\t}\n\treturn result, nil\n}\n\n\/\/ AptProxyContent produces the format expected by the apt config files\n\/\/ from the ProxySettings struct.\nfunc AptProxyContent(proxy osenv.ProxySettings) string {\n\tlines := []string{}\n\taddLine := func(proxy, value string) {\n\t\tif value != \"\" {\n\t\t\tlines = append(lines, fmt.Sprintf(\n\t\t\t\t\"Acquire::%s::Proxy %q;\", proxy, value))\n\t\t}\n\t}\n\taddLine(\"http\", proxy.Http)\n\taddLine(\"https\", proxy.Https)\n\taddLine(\"ftp\", proxy.Ftp)\n\treturn strings.Join(lines, \"\\n\")\n}\n\n\/\/ IsUbuntu executes lxb_release to see if the host OS is Ubuntu.\nfunc IsUbuntu() bool {\n\tout, err := RunCommand(\"lsb_release\", \"-i\", \"-s\")\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn strings.TrimSpace(out) == \"Ubuntu\"\n}\n\n\/\/ IsPackageInstalled uses dpkg-query to determine if the `packageName`\n\/\/ package is installed.\nfunc IsPackageInstalled(packageName string) bool {\n\t_, err := RunCommand(\"dpkg-query\", \"--status\", packageName)\n\treturn err == nil\n}\n<|endoftext|>"} {"text":"<commit_before>package gherkin\n\ntype Location struct {\n\tLine int `json:\"line\"`\n\tColumn int `json:\"column\"`\n}\n\ntype Node struct {\n\tLocation *Location `json:\"location,omitempty\"`\n\tType string `json:\"type\"`\n}\n\ntype Feature struct {\n\tNode\n\tTags []*Tag `json:\"tags\"`\n\tLanguage string `json:\"language,omitempty\"`\n\tKeyword string `json:\"keyword\"`\n\tName string `json:\"name\"`\n\tDescription string `json:\"description,omitempty\"`\n\tBackground *Background `json:\"background,omitempty\"`\n\tScenarioDefinitions []interface{} `json:\"scenarioDefinitions\"`\n}\n\nfunc (f *Feature) DescribeTo(v Visitor) {\n\tv.VisitFeature(f)\n}\n\ntype Tag struct {\n\tNode\n\tLocation *Location `json:\"location,omitempty\"`\n\tName string `json:\"name\"`\n}\n\ntype Background struct {\n\tScenarioDefinition\n}\n\nfunc (b *Background) DescribeTo(v Visitor) {\n\tv.VisitBackground(b)\n}\n\ntype Scenario struct {\n\tScenarioDefinition\n\tTags []*Tag `json:\"tags\"`\n}\n\nfunc (s *Scenario) DescribeTo(v Visitor) {\n\tv.VisitScenario(s)\n}\n\ntype ScenarioOutline struct {\n\tScenarioDefinition\n\tTags []*Tag `json:\"tags\"`\n\tExamples []*Examples `json:\"examples,omitempty\"`\n}\n\nfunc (s *ScenarioOutline) DescribeTo(v Visitor) {\n\tv.VisitScenarioOutline(s)\n}\n\ntype Examples struct {\n\tNode\n\tTags []*Tag `json:\"tags\"`\n\tKeyword string `json:\"keyword\"`\n\tName string `json:\"name\"`\n\tDescription string `json:\"description,omitempty\"`\n\tRows []*TableRow `json:\"rows\"`\n}\n\nfunc (e *Examples) DescribeTo(v Visitor) {\n\tv.VisitExamples(e)\n}\n\ntype TableRow struct {\n\tNode\n\tCells []*TableCell `json:\"cells\"`\n}\n\ntype TableCell struct {\n\tNode\n\tValue string `json:\"value\"`\n}\n\ntype ScenarioDefinition struct {\n\tNode\n\tKeyword string `json:\"keyword\"`\n\tName string `json:\"name\"`\n\tDescription string `json:\"description,omitempty\"`\n\tSteps []*Step `json:\"steps\"`\n}\n\ntype Step struct {\n\tNode\n\tKeyword string `json:\"keyword\"`\n\tText string `json:\"text\"`\n\tArgument interface{} `json:\"argument,omitempty\"`\n}\n\nfunc (s *Step) DescribeTo(v Visitor) {\n\tv.VisitStep(s)\n}\n\ntype DocString struct {\n\tNode\n\tContentType string `json:\"contentType\"`\n\tContent string `json:\"content\"`\n\tDelimitter string `json:\"-\"`\n}\n\ntype DataTable struct {\n\tNode\n\tRows []*TableRow `json:\"rows\"`\n}\n\ntype HasTags interface {\n\tTags() []*Tag\n}\n\ntype HasSteps interface {\n\tSteps() []*Step\n}\n\ntype DescribesItself interface {\n\tDescribeTo(Visitor)\n}\n\ntype Visitor interface {\n\tVisitFeature(*Feature)\n\tVisitBackground(*Background)\n\tVisitScenario(*Scenario)\n\tVisitScenarioOutline(*ScenarioOutline)\n\tVisitExamples(*Examples)\n\tVisitStep(*Step)\n}\n<commit_msg>Remove unused visitor and interfaces from go impl<commit_after>package gherkin\n\ntype Location struct {\n\tLine int `json:\"line\"`\n\tColumn int `json:\"column\"`\n}\n\ntype Node struct {\n\tLocation *Location `json:\"location,omitempty\"`\n\tType string `json:\"type\"`\n}\n\ntype Feature struct {\n\tNode\n\tTags []*Tag `json:\"tags\"`\n\tLanguage string `json:\"language,omitempty\"`\n\tKeyword string `json:\"keyword\"`\n\tName string `json:\"name\"`\n\tDescription string `json:\"description,omitempty\"`\n\tBackground *Background `json:\"background,omitempty\"`\n\tScenarioDefinitions []interface{} `json:\"scenarioDefinitions\"`\n}\n\ntype Tag struct {\n\tNode\n\tLocation *Location `json:\"location,omitempty\"`\n\tName string `json:\"name\"`\n}\n\ntype Background struct {\n\tScenarioDefinition\n}\n\ntype Scenario struct {\n\tScenarioDefinition\n\tTags []*Tag `json:\"tags\"`\n}\n\ntype ScenarioOutline struct {\n\tScenarioDefinition\n\tTags []*Tag `json:\"tags\"`\n\tExamples []*Examples `json:\"examples,omitempty\"`\n}\n\ntype Examples struct {\n\tNode\n\tTags []*Tag `json:\"tags\"`\n\tKeyword string `json:\"keyword\"`\n\tName string `json:\"name\"`\n\tDescription string `json:\"description,omitempty\"`\n\tRows []*TableRow `json:\"rows\"`\n}\n\ntype TableRow struct {\n\tNode\n\tCells []*TableCell `json:\"cells\"`\n}\n\ntype TableCell struct {\n\tNode\n\tValue string `json:\"value\"`\n}\n\ntype ScenarioDefinition struct {\n\tNode\n\tKeyword string `json:\"keyword\"`\n\tName string `json:\"name\"`\n\tDescription string `json:\"description,omitempty\"`\n\tSteps []*Step `json:\"steps\"`\n}\n\ntype Step struct {\n\tNode\n\tKeyword string `json:\"keyword\"`\n\tText string `json:\"text\"`\n\tArgument interface{} `json:\"argument,omitempty\"`\n}\n\ntype DocString struct {\n\tNode\n\tContentType string `json:\"contentType\"`\n\tContent string `json:\"content\"`\n\tDelimitter string `json:\"-\"`\n}\n\ntype DataTable struct {\n\tNode\n\tRows []*TableRow `json:\"rows\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package radius\n\nimport (\n\t\"bytes\"\n\t\"crypto\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\ntype AVP struct {\n\tType AttributeType\n\tValue []byte\n}\n\nfunc (a AVP) Copy() AVP {\n\tvalue := make([]byte, len(a.Value))\n\tcopy(value, a.Value)\n\treturn AVP{\n\t\tType: a.Type,\n\t\tValue: a.Value,\n\t}\n}\nfunc (a AVP) Encode(b []byte) (n int, err error) {\n\tfullLen := len(a.Value) + 2 \/\/type and length\n\tif fullLen > 255 || fullLen < 2 {\n\t\treturn 0, errors.New(\"value too big for attribute\")\n\t}\n\tb[0] = uint8(a.Type)\n\tb[1] = uint8(fullLen)\n\tcopy(b[2:], a.Value)\n\treturn fullLen, err\n}\n\nfunc (a AVP) Decode(p *Packet) interface{} {\n\treturn getAttributeTypeDesc(a.Type).dataType.Value(p, a)\n}\n\nfunc (a AVP) String() string {\n\treturn \"AVP type: \" + a.Type.String() + \" \" + getAttributeTypeDesc(a.Type).dataType.String(nil, a)\n}\n\nfunc (a AVP) StringWithPacket(p *Packet) string {\n\treturn \"AVP type: \" + a.Type.String() + \" \" + getAttributeTypeDesc(a.Type).dataType.String(p, a)\n}\n\ntype avpDataType interface {\n\tValue(p *Packet, a AVP) interface{}\n\tString(p *Packet, a AVP) string\n}\n\nvar avpString avpStringt\n\ntype avpStringt struct{}\n\nfunc (s avpStringt) Value(p *Packet, a AVP) interface{} {\n\treturn string(a.Value)\n}\nfunc (s avpStringt) String(p *Packet, a AVP) string {\n\treturn string(a.Value)\n}\n\nvar avpIP avpIPt\n\ntype avpIPt struct{}\n\nfunc (s avpIPt) Value(p *Packet, a AVP) interface{} {\n\treturn net.IP(a.Value)\n}\nfunc (s avpIPt) String(p *Packet, a AVP) string {\n\treturn net.IP(a.Value).String()\n}\n\nvar avpUint32 avpUint32t\n\ntype avpUint32t struct{}\n\nfunc (s avpUint32t) Value(p *Packet, a AVP) interface{} {\n\treturn uint32(binary.BigEndian.Uint32(a.Value))\n}\nfunc (s avpUint32t) String(p *Packet, a AVP) string {\n\treturn strconv.Itoa(int(binary.BigEndian.Uint32(a.Value)))\n}\n\nvar avpBinary avpBinaryt\n\ntype avpBinaryt struct{}\n\nfunc (s avpBinaryt) Value(p *Packet, a AVP) interface{} {\n\treturn a.Value\n}\nfunc (s avpBinaryt) String(p *Packet, a AVP) string {\n\treturn fmt.Sprintf(\"%#v\", a.Value)\n}\n\nvar avpPassword avpPasswordt\n\ntype avpPasswordt struct{}\n\nfunc (s avpPasswordt) Value(p *Packet, a AVP) interface{} {\n\tif p == nil {\n\t\treturn \"\"\n\t}\n\n\tbuff := a.Value\n\tpass := make([]byte, 0)\n\tlast := make([]byte, 16)\n\tcopy(last, p.Authenticator[:])\n\n\tfor len(buff) > 0 {\n\t\tm := crypto.Hash(crypto.MD5).New()\n\t\tm.Write(append([]byte(p.Secret), last...))\n\t\th := m.Sum(nil)\n\t\tfor i := 0; i < 16; i++ {\n\t\t\tpass = append(pass, buff[i]^h[i])\n\t\t}\n\t\tlast = buff[:16]\n\t\tbuff = buff[16:]\n\t}\n\n\tpass = bytes.TrimRight(pass, string([]rune{0}))\n\treturn string(pass)\n}\nfunc (s avpPasswordt) String(p *Packet, a AVP) string {\n\treturn s.Value(p, a).(string)\n}\n\ntype avpUint32EnumList []string\n\nfunc (s avpUint32EnumList) Value(p *Packet, a AVP) interface{} {\n\treturn uint32(binary.BigEndian.Uint32(a.Value))\n}\nfunc (s avpUint32EnumList) String(p *Packet, a AVP) string {\n\tnumber := int(binary.BigEndian.Uint32(a.Value))\n\tif number > len(s) {\n\t\treturn \"unknow \" + strconv.Itoa(number)\n\t}\n\tout := s[number]\n\tif out == \"\" {\n\t\treturn \"unknow \" + strconv.Itoa(number)\n\t}\n\treturn out\n}\n\ntype avpUint32Enum struct {\n\tt interface{} \/\/ t should from a uint32 type like AcctStatusTypeEnum\n}\n\nfunc (s avpUint32Enum) Value(p *Packet, a AVP) interface{} {\n\tvalue := reflect.New(reflect.TypeOf(s.t)).Elem()\n\tvalue.SetUint(uint64(binary.BigEndian.Uint32(a.Value)))\n\treturn value.Interface()\n}\nfunc (s avpUint32Enum) String(p *Packet, a AVP) string {\n\tnumber := binary.BigEndian.Uint32(a.Value)\n\tvalue := reflect.New(reflect.TypeOf(s.t)).Elem()\n\tvalue.SetUint(uint64(number))\n\tmethod := value.MethodByName(\"String\")\n\tif !method.IsValid() {\n\t\treturn strconv.Itoa(int(number))\n\t}\n\tout := method.Call(nil)\n\treturn out[0].Interface().(string)\n}\n\nvar avpEapMessage avpEapMessaget\n\ntype avpEapMessaget struct{}\n\nfunc (s avpEapMessaget) Value(p *Packet, a AVP) interface{} {\n\teap, err := EapDecode(a.Value)\n\tif err != nil {\n\t\t\/\/TODO error handle\n\t\tfmt.Println(\"EapDecode fail \", err)\n\t\treturn nil\n\t}\n\treturn eap\n\n}\nfunc (s avpEapMessaget) String(p *Packet, a AVP) string {\n\teap := s.Value(p, a)\n\tif eap == nil {\n\t\treturn \"nil\"\n\t}\n\treturn eap.(*EapPacket).String()\n}\n\ntype AcctStatusTypeEnum uint32\n\nconst (\n\tAcctStatusTypeEnumStart AcctStatusTypeEnum = 1\n\tAcctStatusTypeEnumStop AcctStatusTypeEnum = 2\n\tAcctStatusTypeEnumInterimUpdate AcctStatusTypeEnum = 3\n\tAcctStatusTypeEnumAccountingOn AcctStatusTypeEnum = 7\n\tAcctStatusTypeEnumAccountingOff AcctStatusTypeEnum = 8\n)\n\nfunc (e AcctStatusTypeEnum) String() string {\n\tswitch e {\n\tcase AcctStatusTypeEnumStart:\n\t\treturn \"Start\"\n\tcase AcctStatusTypeEnumStop:\n\t\treturn \"Stop\"\n\tcase AcctStatusTypeEnumInterimUpdate:\n\t\treturn \"InterimUpdate\"\n\tcase AcctStatusTypeEnumAccountingOn:\n\t\treturn \"AccountingOn\"\n\tcase AcctStatusTypeEnumAccountingOff:\n\t\treturn \"AccountingOff\"\n\t}\n\treturn \"unknow code \" + strconv.Itoa(int(e))\n}\n\ntype NASPortTypeEnum uint32\n\n\/\/ TODO finish it\nconst (\n\tNASPortTypeEnumAsync NASPortTypeEnum = 0\n\tNASPortTypeEnumSync NASPortTypeEnum = 1\n\tNASPortTypeEnumISDNSync NASPortTypeEnum = 2\n\tNASPortTypeEnumISDNSyncV120 NASPortTypeEnum = 3\n\tNASPortTypeEnumISDNSyncV110 NASPortTypeEnum = 4\n\tNASPortTypeEnumVirtual NASPortTypeEnum = 5\n\tNASPortTypeEnumPIAFS NASPortTypeEnum = 6\n\tNASPortTypeEnumHDLCClearChannel NASPortTypeEnum = 7\n\tNASPortTypeEnumEthernet NASPortTypeEnum = 15\n\tNASPortTypeEnumCable NASPortTypeEnum = 17\n)\n\nfunc (e NASPortTypeEnum) String() string {\n\tswitch e {\n\tcase NASPortTypeEnumAsync:\n\t\treturn \"Async\"\n\tcase NASPortTypeEnumSync:\n\t\treturn \"Sync\"\n\tcase NASPortTypeEnumISDNSync:\n\t\treturn \"ISDNSync\"\n\tcase NASPortTypeEnumISDNSyncV120:\n\t\treturn \"ISDNSyncV120\"\n\tcase NASPortTypeEnumISDNSyncV110:\n\t\treturn \"ISDNSyncV110\"\n\tcase NASPortTypeEnumVirtual:\n\t\treturn \"Virtual\"\n\tcase NASPortTypeEnumPIAFS:\n\t\treturn \"PIAFS\"\n\tcase NASPortTypeEnumHDLCClearChannel:\n\t\treturn \"HDLCClearChannel\"\n\tcase NASPortTypeEnumEthernet:\n\t\treturn \"Ethernet\"\n\tcase NASPortTypeEnumCable:\n\t\treturn \"Cable\"\n\t}\n\treturn \"unknow code \" + strconv.Itoa(int(e))\n}\n\ntype ServiceTypeEnum uint32\n\n\/\/ TODO finish it\nconst (\n\tServiceTypeEnumLogin ServiceTypeEnum = 1\n\tServiceTypeEnumFramed ServiceTypeEnum = 2\n\tServiceTypeEnumCallbackLogin ServiceTypeEnum = 3\n\tServiceTypeEnumCallbackFramed ServiceTypeEnum = 4\n\tServiceTypeEnumOutbound ServiceTypeEnum = 5\n)\n\nfunc (e ServiceTypeEnum) String() string {\n\tswitch e {\n\tcase ServiceTypeEnumLogin:\n\t\treturn \"Login\"\n\tcase ServiceTypeEnumFramed:\n\t\treturn \"Framed\"\n\tcase ServiceTypeEnumCallbackLogin:\n\t\treturn \"CallbackLogin\"\n\tcase ServiceTypeEnumCallbackFramed:\n\t\treturn \"CallbackFramed\"\n\tcase ServiceTypeEnumOutbound:\n\t\treturn \"Outbound\"\n\t}\n\treturn \"unknow code \" + strconv.Itoa(int(e))\n}\n\ntype AcctTerminateCauseEnum uint32\n\nconst (\n\tAcctTerminateCauseEnumUserRequest AcctTerminateCauseEnum = 1\n\tAcctTerminateCauseEnumLostCarrier AcctTerminateCauseEnum = 2\n\tAcctTerminateCauseEnumLostService AcctTerminateCauseEnum = 3\n\tAcctTerminateCauseEnumIdleTimeout AcctTerminateCauseEnum = 4\n\tAcctTerminateCauseEnumSessionTimout AcctTerminateCauseEnum = 5\n\tAcctTerminateCauseEnumAdminReset AcctTerminateCauseEnum = 6\n\tAcctTerminateCauseEnumAdminReboot AcctTerminateCauseEnum = 7\n\tAcctTerminateCauseEnumPortError AcctTerminateCauseEnum = 8\n\tAcctTerminateCauseEnumNASError AcctTerminateCauseEnum = 9\n\tAcctTerminateCauseEnumNASRequest AcctTerminateCauseEnum = 10\n\tAcctTerminateCauseEnumNASReboot AcctTerminateCauseEnum = 11\n\tAcctTerminateCauseEnumPortUnneeded AcctTerminateCauseEnum = 12\n\tAcctTerminateCauseEnumPortPreempted AcctTerminateCauseEnum = 13\n\tAcctTerminateCauseEnumPortSuspended AcctTerminateCauseEnum = 14\n\tAcctTerminateCauseEnumServiceUnavailable AcctTerminateCauseEnum = 15\n\tAcctTerminateCauseEnumCallbkack AcctTerminateCauseEnum = 16\n\tAcctTerminateCauseEnumUserError AcctTerminateCauseEnum = 17\n\tAcctTerminateCauseEnumHostRequest AcctStatusTypeEnum = 18\n)\n\nfunc (e AcctTerminateCauseEnum) String() string {\n\tswitch e {\n\tcase AcctTerminateCauseEnumUserRequest:\n\t\treturn \"UserRequest\"\n\tcase AcctTerminateCauseEnumLostCarrier:\n\t\treturn \"LostCarrier\"\n\tcase AcctTerminateCauseEnumLostService:\n\t\treturn \"LostService\"\n\tcase AcctTerminateCauseEnumIdleTimeout:\n\t\treturn \"IdleTimeout\"\n\tcase AcctTerminateCauseEnumSessionTimout:\n\t\treturn \"SessionTimeout\"\n\tcase AcctTerminateCauseEnumAdminReset:\n\t\treturn \"AdminReset\"\n\tcase AcctTerminateCauseEnumAdminReboot:\n\t\treturn \"AdminReboot\"\n\tcase AcctTerminateCauseEnumPortError:\n\t\treturn \"PortError\"\n\tcase AcctTerminateCauseEnumNASError:\n\t\treturn \"NASError\"\n\tcase AcctTerminateCauseEnumNASRequest:\n\t\treturn \"NASRequest\"\n\tcase AcctTerminateCauseEnumNASReboot:\n\t\treturn \"NASReboot\"\n\tcase AcctTerminateCauseEnumPortUnneeded:\n\t\treturn \"PortUnneeded\"\n\tcase AcctTerminateCauseEnumPortPreempted:\n\t\treturn \"PortPreempted\"\n\tcase AcctTerminateCauseEnumPortSuspended:\n\t\treturn \"PortSuspended\"\n\tcase AcctTerminateCauseEnumServiceUnavailable:\n\t\treturn \"ServiceUnavailable\"\n\tcase AcctTerminateCauseEnumCallbkack:\n\t\treturn \"Callback\"\n\tcase AcctTerminateCauseEnumUserError:\n\t\treturn \"UserError\"\n\tcase AcctTerminateCauseEnumHostRequest:\n\t\treturn \"HostRequest\"\n\t}\n\treturn \"unknow code \" + strconv.Itoa(int(e))\n}\n<commit_msg>fix avp AcctTerminateCauseEnum<commit_after>package radius\n\nimport (\n\t\"bytes\"\n\t\"crypto\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\ntype AVP struct {\n\tType AttributeType\n\tValue []byte\n}\n\nfunc (a AVP) Copy() AVP {\n\tvalue := make([]byte, len(a.Value))\n\tcopy(value, a.Value)\n\treturn AVP{\n\t\tType: a.Type,\n\t\tValue: a.Value,\n\t}\n}\nfunc (a AVP) Encode(b []byte) (n int, err error) {\n\tfullLen := len(a.Value) + 2 \/\/type and length\n\tif fullLen > 255 || fullLen < 2 {\n\t\treturn 0, errors.New(\"value too big for attribute\")\n\t}\n\tb[0] = uint8(a.Type)\n\tb[1] = uint8(fullLen)\n\tcopy(b[2:], a.Value)\n\treturn fullLen, err\n}\n\nfunc (a AVP) Decode(p *Packet) interface{} {\n\treturn getAttributeTypeDesc(a.Type).dataType.Value(p, a)\n}\n\nfunc (a AVP) String() string {\n\treturn \"AVP type: \" + a.Type.String() + \" \" + getAttributeTypeDesc(a.Type).dataType.String(nil, a)\n}\n\nfunc (a AVP) StringWithPacket(p *Packet) string {\n\treturn \"AVP type: \" + a.Type.String() + \" \" + getAttributeTypeDesc(a.Type).dataType.String(p, a)\n}\n\ntype avpDataType interface {\n\tValue(p *Packet, a AVP) interface{}\n\tString(p *Packet, a AVP) string\n}\n\nvar avpString avpStringt\n\ntype avpStringt struct{}\n\nfunc (s avpStringt) Value(p *Packet, a AVP) interface{} {\n\treturn string(a.Value)\n}\nfunc (s avpStringt) String(p *Packet, a AVP) string {\n\treturn string(a.Value)\n}\n\nvar avpIP avpIPt\n\ntype avpIPt struct{}\n\nfunc (s avpIPt) Value(p *Packet, a AVP) interface{} {\n\treturn net.IP(a.Value)\n}\nfunc (s avpIPt) String(p *Packet, a AVP) string {\n\treturn net.IP(a.Value).String()\n}\n\nvar avpUint32 avpUint32t\n\ntype avpUint32t struct{}\n\nfunc (s avpUint32t) Value(p *Packet, a AVP) interface{} {\n\treturn uint32(binary.BigEndian.Uint32(a.Value))\n}\nfunc (s avpUint32t) String(p *Packet, a AVP) string {\n\treturn strconv.Itoa(int(binary.BigEndian.Uint32(a.Value)))\n}\n\nvar avpBinary avpBinaryt\n\ntype avpBinaryt struct{}\n\nfunc (s avpBinaryt) Value(p *Packet, a AVP) interface{} {\n\treturn a.Value\n}\nfunc (s avpBinaryt) String(p *Packet, a AVP) string {\n\treturn fmt.Sprintf(\"%#v\", a.Value)\n}\n\nvar avpPassword avpPasswordt\n\ntype avpPasswordt struct{}\n\nfunc (s avpPasswordt) Value(p *Packet, a AVP) interface{} {\n\tif p == nil {\n\t\treturn \"\"\n\t}\n\n\tbuff := a.Value\n\tpass := make([]byte, 0)\n\tlast := make([]byte, 16)\n\tcopy(last, p.Authenticator[:])\n\n\tfor len(buff) > 0 {\n\t\tm := crypto.Hash(crypto.MD5).New()\n\t\tm.Write(append([]byte(p.Secret), last...))\n\t\th := m.Sum(nil)\n\t\tfor i := 0; i < 16; i++ {\n\t\t\tpass = append(pass, buff[i]^h[i])\n\t\t}\n\t\tlast = buff[:16]\n\t\tbuff = buff[16:]\n\t}\n\n\tpass = bytes.TrimRight(pass, string([]rune{0}))\n\treturn string(pass)\n}\nfunc (s avpPasswordt) String(p *Packet, a AVP) string {\n\treturn s.Value(p, a).(string)\n}\n\ntype avpUint32EnumList []string\n\nfunc (s avpUint32EnumList) Value(p *Packet, a AVP) interface{} {\n\treturn uint32(binary.BigEndian.Uint32(a.Value))\n}\nfunc (s avpUint32EnumList) String(p *Packet, a AVP) string {\n\tnumber := int(binary.BigEndian.Uint32(a.Value))\n\tif number > len(s) {\n\t\treturn \"unknow \" + strconv.Itoa(number)\n\t}\n\tout := s[number]\n\tif out == \"\" {\n\t\treturn \"unknow \" + strconv.Itoa(number)\n\t}\n\treturn out\n}\n\ntype avpUint32Enum struct {\n\tt interface{} \/\/ t should from a uint32 type like AcctStatusTypeEnum\n}\n\nfunc (s avpUint32Enum) Value(p *Packet, a AVP) interface{} {\n\tvalue := reflect.New(reflect.TypeOf(s.t)).Elem()\n\tvalue.SetUint(uint64(binary.BigEndian.Uint32(a.Value)))\n\treturn value.Interface()\n}\nfunc (s avpUint32Enum) String(p *Packet, a AVP) string {\n\tnumber := binary.BigEndian.Uint32(a.Value)\n\tvalue := reflect.New(reflect.TypeOf(s.t)).Elem()\n\tvalue.SetUint(uint64(number))\n\tmethod := value.MethodByName(\"String\")\n\tif !method.IsValid() {\n\t\treturn strconv.Itoa(int(number))\n\t}\n\tout := method.Call(nil)\n\treturn out[0].Interface().(string)\n}\n\nvar avpEapMessage avpEapMessaget\n\ntype avpEapMessaget struct{}\n\nfunc (s avpEapMessaget) Value(p *Packet, a AVP) interface{} {\n\teap, err := EapDecode(a.Value)\n\tif err != nil {\n\t\t\/\/TODO error handle\n\t\tfmt.Println(\"EapDecode fail \", err)\n\t\treturn nil\n\t}\n\treturn eap\n\n}\nfunc (s avpEapMessaget) String(p *Packet, a AVP) string {\n\teap := s.Value(p, a)\n\tif eap == nil {\n\t\treturn \"nil\"\n\t}\n\treturn eap.(*EapPacket).String()\n}\n\ntype AcctStatusTypeEnum uint32\n\nconst (\n\tAcctStatusTypeEnumStart AcctStatusTypeEnum = 1\n\tAcctStatusTypeEnumStop AcctStatusTypeEnum = 2\n\tAcctStatusTypeEnumInterimUpdate AcctStatusTypeEnum = 3\n\tAcctStatusTypeEnumAccountingOn AcctStatusTypeEnum = 7\n\tAcctStatusTypeEnumAccountingOff AcctStatusTypeEnum = 8\n)\n\nfunc (e AcctStatusTypeEnum) String() string {\n\tswitch e {\n\tcase AcctStatusTypeEnumStart:\n\t\treturn \"Start\"\n\tcase AcctStatusTypeEnumStop:\n\t\treturn \"Stop\"\n\tcase AcctStatusTypeEnumInterimUpdate:\n\t\treturn \"InterimUpdate\"\n\tcase AcctStatusTypeEnumAccountingOn:\n\t\treturn \"AccountingOn\"\n\tcase AcctStatusTypeEnumAccountingOff:\n\t\treturn \"AccountingOff\"\n\t}\n\treturn \"unknow code \" + strconv.Itoa(int(e))\n}\n\ntype NASPortTypeEnum uint32\n\n\/\/ TODO finish it\nconst (\n\tNASPortTypeEnumAsync NASPortTypeEnum = 0\n\tNASPortTypeEnumSync NASPortTypeEnum = 1\n\tNASPortTypeEnumISDNSync NASPortTypeEnum = 2\n\tNASPortTypeEnumISDNSyncV120 NASPortTypeEnum = 3\n\tNASPortTypeEnumISDNSyncV110 NASPortTypeEnum = 4\n\tNASPortTypeEnumVirtual NASPortTypeEnum = 5\n\tNASPortTypeEnumPIAFS NASPortTypeEnum = 6\n\tNASPortTypeEnumHDLCClearChannel NASPortTypeEnum = 7\n\tNASPortTypeEnumEthernet NASPortTypeEnum = 15\n\tNASPortTypeEnumCable NASPortTypeEnum = 17\n)\n\nfunc (e NASPortTypeEnum) String() string {\n\tswitch e {\n\tcase NASPortTypeEnumAsync:\n\t\treturn \"Async\"\n\tcase NASPortTypeEnumSync:\n\t\treturn \"Sync\"\n\tcase NASPortTypeEnumISDNSync:\n\t\treturn \"ISDNSync\"\n\tcase NASPortTypeEnumISDNSyncV120:\n\t\treturn \"ISDNSyncV120\"\n\tcase NASPortTypeEnumISDNSyncV110:\n\t\treturn \"ISDNSyncV110\"\n\tcase NASPortTypeEnumVirtual:\n\t\treturn \"Virtual\"\n\tcase NASPortTypeEnumPIAFS:\n\t\treturn \"PIAFS\"\n\tcase NASPortTypeEnumHDLCClearChannel:\n\t\treturn \"HDLCClearChannel\"\n\tcase NASPortTypeEnumEthernet:\n\t\treturn \"Ethernet\"\n\tcase NASPortTypeEnumCable:\n\t\treturn \"Cable\"\n\t}\n\treturn \"unknow code \" + strconv.Itoa(int(e))\n}\n\ntype ServiceTypeEnum uint32\n\n\/\/ TODO finish it\nconst (\n\tServiceTypeEnumLogin ServiceTypeEnum = 1\n\tServiceTypeEnumFramed ServiceTypeEnum = 2\n\tServiceTypeEnumCallbackLogin ServiceTypeEnum = 3\n\tServiceTypeEnumCallbackFramed ServiceTypeEnum = 4\n\tServiceTypeEnumOutbound ServiceTypeEnum = 5\n)\n\nfunc (e ServiceTypeEnum) String() string {\n\tswitch e {\n\tcase ServiceTypeEnumLogin:\n\t\treturn \"Login\"\n\tcase ServiceTypeEnumFramed:\n\t\treturn \"Framed\"\n\tcase ServiceTypeEnumCallbackLogin:\n\t\treturn \"CallbackLogin\"\n\tcase ServiceTypeEnumCallbackFramed:\n\t\treturn \"CallbackFramed\"\n\tcase ServiceTypeEnumOutbound:\n\t\treturn \"Outbound\"\n\t}\n\treturn \"unknow code \" + strconv.Itoa(int(e))\n}\n\ntype AcctTerminateCauseEnum uint32\n\nconst (\n\tAcctTerminateCauseEnumUserRequest AcctTerminateCauseEnum = 1\n\tAcctTerminateCauseEnumLostCarrier AcctTerminateCauseEnum = 2\n\tAcctTerminateCauseEnumLostService AcctTerminateCauseEnum = 3\n\tAcctTerminateCauseEnumIdleTimeout AcctTerminateCauseEnum = 4\n\tAcctTerminateCauseEnumSessionTimout AcctTerminateCauseEnum = 5\n\tAcctTerminateCauseEnumAdminReset AcctTerminateCauseEnum = 6\n\tAcctTerminateCauseEnumAdminReboot AcctTerminateCauseEnum = 7\n\tAcctTerminateCauseEnumPortError AcctTerminateCauseEnum = 8\n\tAcctTerminateCauseEnumNASError AcctTerminateCauseEnum = 9\n\tAcctTerminateCauseEnumNASRequest AcctTerminateCauseEnum = 10\n\tAcctTerminateCauseEnumNASReboot AcctTerminateCauseEnum = 11\n\tAcctTerminateCauseEnumPortUnneeded AcctTerminateCauseEnum = 12\n\tAcctTerminateCauseEnumPortPreempted AcctTerminateCauseEnum = 13\n\tAcctTerminateCauseEnumPortSuspended AcctTerminateCauseEnum = 14\n\tAcctTerminateCauseEnumServiceUnavailable AcctTerminateCauseEnum = 15\n\tAcctTerminateCauseEnumCallbkack AcctTerminateCauseEnum = 16\n\tAcctTerminateCauseEnumUserError AcctTerminateCauseEnum = 17\n\tAcctTerminateCauseEnumHostRequest AcctTerminateCauseEnum = 18\n)\n\nfunc (e AcctTerminateCauseEnum) String() string {\n\tswitch e {\n\tcase AcctTerminateCauseEnumUserRequest:\n\t\treturn \"UserRequest\"\n\tcase AcctTerminateCauseEnumLostCarrier:\n\t\treturn \"LostCarrier\"\n\tcase AcctTerminateCauseEnumLostService:\n\t\treturn \"LostService\"\n\tcase AcctTerminateCauseEnumIdleTimeout:\n\t\treturn \"IdleTimeout\"\n\tcase AcctTerminateCauseEnumSessionTimout:\n\t\treturn \"SessionTimeout\"\n\tcase AcctTerminateCauseEnumAdminReset:\n\t\treturn \"AdminReset\"\n\tcase AcctTerminateCauseEnumAdminReboot:\n\t\treturn \"AdminReboot\"\n\tcase AcctTerminateCauseEnumPortError:\n\t\treturn \"PortError\"\n\tcase AcctTerminateCauseEnumNASError:\n\t\treturn \"NASError\"\n\tcase AcctTerminateCauseEnumNASRequest:\n\t\treturn \"NASRequest\"\n\tcase AcctTerminateCauseEnumNASReboot:\n\t\treturn \"NASReboot\"\n\tcase AcctTerminateCauseEnumPortUnneeded:\n\t\treturn \"PortUnneeded\"\n\tcase AcctTerminateCauseEnumPortPreempted:\n\t\treturn \"PortPreempted\"\n\tcase AcctTerminateCauseEnumPortSuspended:\n\t\treturn \"PortSuspended\"\n\tcase AcctTerminateCauseEnumServiceUnavailable:\n\t\treturn \"ServiceUnavailable\"\n\tcase AcctTerminateCauseEnumCallbkack:\n\t\treturn \"Callback\"\n\tcase AcctTerminateCauseEnumUserError:\n\t\treturn \"UserError\"\n\tcase AcctTerminateCauseEnumHostRequest:\n\t\treturn \"HostRequest\"\n\t}\n\treturn \"unknow code \" + strconv.Itoa(int(e))\n}\n<|endoftext|>"} {"text":"<commit_before>package mpb\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"time\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/vbauerster\/mpb\/decor\"\n)\n\nconst (\n\trLeft = iota\n\trFill\n\trTip\n\trEmpty\n\trRight\n)\n\nconst (\n\tformatLen = 5\n\tetaAlpha = 0.25\n)\n\ntype fmtRunes [formatLen]rune\n\n\/\/ Bar represents a progress Bar\ntype Bar struct {\n\t\/\/ quit channel to request b.server to quit\n\tquit chan struct{}\n\t\/\/ done channel is receiveable after b.server has been quit\n\tdone chan struct{}\n\tops chan func(*state)\n\n\t\/\/ following are used after b.done is receiveable\n\tcacheState state\n\n\tonce sync.Once\n}\n\ntype (\n\trefill struct {\n\t\tchar rune\n\t\ttill int64\n\t}\n\tstate struct {\n\t\tid int\n\t\twidth int\n\t\tformat fmtRunes\n\t\tetaAlpha float64\n\t\ttotal int64\n\t\tcurrent int64\n\t\tdropRatio int64\n\t\ttrimLeftSpace bool\n\t\ttrimRightSpace bool\n\t\tcompleted bool\n\t\taborted bool\n\t\tdynamic bool\n\t\tstartTime time.Time\n\t\ttimeElapsed time.Duration\n\t\tblockStartTime time.Time\n\t\ttimePerItem time.Duration\n\t\tappendFuncs []decor.DecoratorFunc\n\t\tprependFuncs []decor.DecoratorFunc\n\t\trefill *refill\n\t\tbufP, bufB, bufA *bytes.Buffer\n\t\tpanic string\n\t}\n\twriteBuf struct {\n\t\tbuf []byte\n\t\tcompleteAfterFlush bool\n\t}\n)\n\nfunc newBar(ID int, total int64, wg *sync.WaitGroup, cancel <-chan struct{}, options ...BarOption) *Bar {\n\tif total <= 0 {\n\t\ttotal = time.Now().Unix()\n\t}\n\n\ts := state{\n\t\tid: ID,\n\t\ttotal: total,\n\t\tetaAlpha: etaAlpha,\n\t\tdropRatio: 10,\n\t}\n\n\tfor _, opt := range options {\n\t\topt(&s)\n\t}\n\n\ts.bufP = bytes.NewBuffer(make([]byte, 0, s.width\/2))\n\ts.bufB = bytes.NewBuffer(make([]byte, 0, s.width))\n\ts.bufA = bytes.NewBuffer(make([]byte, 0, s.width\/2))\n\n\tb := &Bar{\n\t\tquit: make(chan struct{}),\n\t\tdone: make(chan struct{}),\n\t\tops: make(chan func(*state)),\n\t}\n\n\tgo b.server(s, wg, cancel)\n\treturn b\n}\n\n\/\/ RemoveAllPrependers removes all prepend functions\nfunc (b *Bar) RemoveAllPrependers() {\n\tselect {\n\tcase b.ops <- func(s *state) {\n\t\ts.prependFuncs = nil\n\t}:\n\tcase <-b.quit:\n\t}\n}\n\n\/\/ RemoveAllAppenders removes all append functions\nfunc (b *Bar) RemoveAllAppenders() {\n\tselect {\n\tcase b.ops <- func(s *state) {\n\t\ts.appendFuncs = nil\n\t}:\n\tcase <-b.quit:\n\t}\n}\n\n\/\/ ProxyReader wrapper for io operations, like io.Copy\nfunc (b *Bar) ProxyReader(r io.Reader) *Reader {\n\treturn &Reader{r, b}\n}\n\n\/\/ Increment shorthand for b.Incr(1)\nfunc (b *Bar) Increment() {\n\tb.Incr(1)\n}\n\n\/\/ Incr increments progress bar\nfunc (b *Bar) Incr(n int) {\n\tif n < 1 {\n\t\treturn\n\t}\n\tselect {\n\tcase b.ops <- func(s *state) {\n\t\tnext := time.Now()\n\t\tif s.current == 0 {\n\t\t\ts.startTime = next\n\t\t\ts.blockStartTime = next\n\t\t} else {\n\t\t\tnow := time.Now()\n\t\t\ts.updateTimePerItemEstimate(n, now, next)\n\t\t\ts.timeElapsed = now.Sub(s.startTime)\n\t\t}\n\t\tsum := s.current + int64(n)\n\t\tif s.total > 0 && sum >= s.total {\n\t\t\tif s.dynamic {\n\t\t\t\tsum -= sum * s.dropRatio \/ 100\n\t\t\t} else {\n\t\t\t\ts.current = s.total\n\t\t\t\ts.completed = true\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\ts.current = sum\n\t}:\n\tcase <-b.quit:\n\t}\n}\n\n\/\/ ResumeFill fills bar with different r rune,\n\/\/ from 0 to till amount of progress.\nfunc (b *Bar) ResumeFill(r rune, till int64) {\n\tif till < 1 {\n\t\treturn\n\t}\n\tselect {\n\tcase b.ops <- func(s *state) {\n\t\ts.refill = &refill{r, till}\n\t}:\n\tcase <-b.quit:\n\t}\n}\n\nfunc (b *Bar) NumOfAppenders() int {\n\tresult := make(chan int, 1)\n\tselect {\n\tcase b.ops <- func(s *state) { result <- len(s.appendFuncs) }:\n\t\treturn <-result\n\tcase <-b.done:\n\t\treturn len(b.cacheState.appendFuncs)\n\t}\n}\n\nfunc (b *Bar) NumOfPrependers() int {\n\tresult := make(chan int, 1)\n\tselect {\n\tcase b.ops <- func(s *state) { result <- len(s.prependFuncs) }:\n\t\treturn <-result\n\tcase <-b.done:\n\t\treturn len(b.cacheState.prependFuncs)\n\t}\n}\n\n\/\/ ID returs id of the bar\nfunc (b *Bar) ID() int {\n\tresult := make(chan int, 1)\n\tselect {\n\tcase b.ops <- func(s *state) { result <- s.id }:\n\t\treturn <-result\n\tcase <-b.done:\n\t\treturn b.cacheState.id\n\t}\n}\n\nfunc (b *Bar) Current() int64 {\n\tresult := make(chan int64, 1)\n\tselect {\n\tcase b.ops <- func(s *state) { result <- s.current }:\n\t\treturn <-result\n\tcase <-b.done:\n\t\treturn b.cacheState.current\n\t}\n}\n\nfunc (b *Bar) Total() int64 {\n\tresult := make(chan int64, 1)\n\tselect {\n\tcase b.ops <- func(s *state) { result <- s.total }:\n\t\treturn <-result\n\tcase <-b.done:\n\t\treturn b.cacheState.total\n\t}\n}\n\n\/\/ SetTotal sets total dynamically. The final param indicates the very last set,\n\/\/ in other words you should set it to true when total is determined.\n\/\/ Also you may consider providing your drop ratio via BarDropRatio BarOption func.\nfunc (b *Bar) SetTotal(total int64, final bool) {\n\tselect {\n\tcase b.ops <- func(s *state) {\n\t\ts.total = total\n\t\ts.dynamic = !final\n\t}:\n\tcase <-b.quit:\n\t}\n}\n\n\/\/ InProgress returns true, while progress is running.\n\/\/ Can be used as condition in for loop\nfunc (b *Bar) InProgress() bool {\n\tselect {\n\tcase <-b.quit:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}\n\n\/\/ Complete signals to the bar, that process has been completed.\n\/\/ You should call this method when total is unknown and you've reached the point\n\/\/ of process completion. If you don't call this method, it will be called\n\/\/ implicitly, upon p.Stop() call.\nfunc (b *Bar) Complete() {\n\tb.once.Do(b.shutdown)\n}\n\nfunc (b *Bar) shutdown() {\n\tclose(b.quit)\n}\n\nfunc (b *Bar) server(s state, wg *sync.WaitGroup, cancel <-chan struct{}) {\n\tdefer func() {\n\t\tb.cacheState = s\n\t\tclose(b.done)\n\t\twg.Done()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase op := <-b.ops:\n\t\t\top(&s)\n\t\tcase <-cancel:\n\t\t\ts.aborted = true\n\t\t\tcancel = nil\n\t\t\tb.Complete()\n\t\tcase <-b.quit:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (b *Bar) render(tw int, prependWs, appendWs *widthSync) <-chan *writeBuf {\n\tch := make(chan *writeBuf, 1)\n\n\tgo func() {\n\t\tselect {\n\t\tcase b.ops <- func(s *state) {\n\t\t\tdefer func() {\n\t\t\t\t\/\/ recovering if external decorators panic\n\t\t\t\tif p := recover(); p != nil {\n\t\t\t\t\ts.panic = fmt.Sprintf(\"b#%02d panic: %v\\n\", s.id, p)\n\t\t\t\t\ts.prependFuncs = nil\n\t\t\t\t\ts.appendFuncs = nil\n\n\t\t\t\t\tch <- &writeBuf{[]byte(s.panic), true}\n\t\t\t\t}\n\t\t\t\tclose(ch)\n\t\t\t}()\n\t\t\ts.draw(tw, prependWs, appendWs)\n\t\t\tch <- &writeBuf{s.toBytes(), s.isFull()}\n\t\t}:\n\t\tcase <-b.done:\n\t\t\ts := b.cacheState\n\t\t\tvar buf []byte\n\t\t\tif s.panic != \"\" {\n\t\t\t\tbuf = []byte(s.panic)\n\t\t\t} else {\n\t\t\t\ts.draw(tw, prependWs, appendWs)\n\t\t\t\tbuf = s.toBytes()\n\t\t\t}\n\t\t\tch <- &writeBuf{buf, false}\n\t\t\tclose(ch)\n\t\t}\n\t}()\n\n\treturn ch\n}\n\nfunc (s *state) toBytes() []byte {\n\tbuf := make([]byte, 0, s.bufP.Len()+s.bufB.Len()+s.bufA.Len())\n\tbuf = concatenateBlocks(buf, s.bufP.Bytes(), s.bufB.Bytes(), s.bufA.Bytes())\n\treturn buf\n}\n\nfunc (s *state) updateTimePerItemEstimate(amount int, now, next time.Time) {\n\tlastBlockTime := now.Sub(s.blockStartTime)\n\tlastItemEstimate := float64(lastBlockTime) \/ float64(amount)\n\ts.timePerItem = time.Duration((s.etaAlpha * lastItemEstimate) + (1-s.etaAlpha)*float64(s.timePerItem))\n\ts.blockStartTime = next\n}\n\nfunc (s *state) isFull() bool {\n\tif !s.completed {\n\t\treturn false\n\t}\n\tbar := s.bufB.Bytes()\n\tvar r rune\n\tvar n int\n\tfor i := 0; len(bar) > 0; i++ {\n\t\tr, n = utf8.DecodeLastRune(bar)\n\t\tbar = bar[:len(bar)-n]\n\t\tif i == 1 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn r == s.format[rFill]\n}\n\nfunc (s *state) draw(termWidth int, prependWs, appendWs *widthSync) {\n\tif termWidth <= 0 {\n\t\ttermWidth = s.width\n\t}\n\n\tstat := newStatistics(s)\n\n\t\/\/ render prepend functions to the left of the bar\n\ts.bufP.Reset()\n\tfor i, f := range s.prependFuncs {\n\t\ts.bufP.WriteString(f(stat, prependWs.Listen[i], prependWs.Result[i]))\n\t}\n\n\tif !s.trimLeftSpace {\n\t\ts.bufP.WriteByte(' ')\n\t}\n\n\t\/\/ render append functions to the right of the bar\n\ts.bufA.Reset()\n\tif !s.trimRightSpace {\n\t\ts.bufA.WriteByte(' ')\n\t}\n\n\tfor i, f := range s.appendFuncs {\n\t\ts.bufA.WriteString(f(stat, appendWs.Listen[i], appendWs.Result[i]))\n\t}\n\n\tprependCount := utf8.RuneCount(s.bufP.Bytes())\n\tappendCount := utf8.RuneCount(s.bufA.Bytes())\n\n\ts.fillBar(s.width)\n\tbarCount := utf8.RuneCount(s.bufB.Bytes())\n\ttotalCount := prependCount + barCount + appendCount\n\tif totalCount > termWidth {\n\t\tshrinkWidth := termWidth - prependCount - appendCount\n\t\ts.fillBar(shrinkWidth)\n\t}\n\ts.bufA.WriteByte('\\n')\n}\n\nfunc (s *state) fillBar(width int) {\n\ts.bufB.Reset()\n\tif width <= 2 {\n\t\treturn\n\t}\n\n\t\/\/ bar s.width without leftEnd and rightEnd runes\n\tbarWidth := width - 2\n\n\tcompletedWidth := decor.CalcPercentage(s.total, s.current, barWidth)\n\n\ts.bufB.WriteRune(s.format[rLeft])\n\n\tif s.refill != nil {\n\t\ttill := decor.CalcPercentage(s.total, s.refill.till, barWidth)\n\t\t\/\/ append refill rune\n\t\tfor i := 0; i < till; i++ {\n\t\t\ts.bufB.WriteRune(s.refill.char)\n\t\t}\n\t\tfor i := till; i < completedWidth; i++ {\n\t\t\ts.bufB.WriteRune(s.format[rFill])\n\t\t}\n\t} else {\n\t\tfor i := 0; i < completedWidth; i++ {\n\t\t\ts.bufB.WriteRune(s.format[rFill])\n\t\t}\n\t}\n\n\tif completedWidth < barWidth && completedWidth > 0 {\n\t\t_, size := utf8.DecodeLastRune(s.bufB.Bytes())\n\t\ts.bufB.Truncate(s.bufB.Len() - size)\n\t\ts.bufB.WriteRune(s.format[rTip])\n\t}\n\n\tfor i := completedWidth; i < barWidth; i++ {\n\t\ts.bufB.WriteRune(s.format[rEmpty])\n\t}\n\n\ts.bufB.WriteRune(s.format[rRight])\n}\n\nfunc newStatistics(s *state) *decor.Statistics {\n\treturn &decor.Statistics{\n\t\tID: s.id,\n\t\tCompleted: s.completed,\n\t\tAborted: s.aborted,\n\t\tTotal: s.total,\n\t\tCurrent: s.current,\n\t\tStartTime: s.startTime,\n\t\tTimeElapsed: s.timeElapsed,\n\t\tTimePerItemEstimate: s.timePerItem,\n\t}\n}\n\nfunc concatenateBlocks(buf []byte, blocks ...[]byte) []byte {\n\tfor _, block := range blocks {\n\t\tbuf = append(buf, block...)\n\t}\n\treturn buf\n}\n\nfunc (s *state) updateFormat(format string) {\n\tfor i, n := 0, 0; len(format) > 0; i++ {\n\t\ts.format[i], n = utf8.DecodeRuneInString(format)\n\t\tformat = format[n:]\n\t}\n}\n<commit_msg>Drop progress while total is dynamic<commit_after>package mpb\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"time\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/vbauerster\/mpb\/decor\"\n)\n\nconst (\n\trLeft = iota\n\trFill\n\trTip\n\trEmpty\n\trRight\n)\n\nconst (\n\tformatLen = 5\n\tetaAlpha = 0.25\n)\n\ntype fmtRunes [formatLen]rune\n\n\/\/ Bar represents a progress Bar\ntype Bar struct {\n\t\/\/ quit channel to request b.server to quit\n\tquit chan struct{}\n\t\/\/ done channel is receiveable after b.server has been quit\n\tdone chan struct{}\n\tops chan func(*state)\n\n\t\/\/ following are used after b.done is receiveable\n\tcacheState state\n\n\tonce sync.Once\n}\n\ntype (\n\trefill struct {\n\t\tchar rune\n\t\ttill int64\n\t}\n\tstate struct {\n\t\tid int\n\t\twidth int\n\t\tformat fmtRunes\n\t\tetaAlpha float64\n\t\ttotal int64\n\t\tcurrent int64\n\t\tdropRatio int64\n\t\ttrimLeftSpace bool\n\t\ttrimRightSpace bool\n\t\tcompleted bool\n\t\taborted bool\n\t\tdynamic bool\n\t\tstartTime time.Time\n\t\ttimeElapsed time.Duration\n\t\tblockStartTime time.Time\n\t\ttimePerItem time.Duration\n\t\tappendFuncs []decor.DecoratorFunc\n\t\tprependFuncs []decor.DecoratorFunc\n\t\trefill *refill\n\t\tbufP, bufB, bufA *bytes.Buffer\n\t\tpanic string\n\t}\n\twriteBuf struct {\n\t\tbuf []byte\n\t\tcompleteAfterFlush bool\n\t}\n)\n\nfunc newBar(ID int, total int64, wg *sync.WaitGroup, cancel <-chan struct{}, options ...BarOption) *Bar {\n\tif total <= 0 {\n\t\ttotal = time.Now().Unix()\n\t}\n\n\ts := state{\n\t\tid: ID,\n\t\ttotal: total,\n\t\tetaAlpha: etaAlpha,\n\t\tdropRatio: 10,\n\t}\n\n\tfor _, opt := range options {\n\t\topt(&s)\n\t}\n\n\ts.bufP = bytes.NewBuffer(make([]byte, 0, s.width\/2))\n\ts.bufB = bytes.NewBuffer(make([]byte, 0, s.width))\n\ts.bufA = bytes.NewBuffer(make([]byte, 0, s.width\/2))\n\n\tb := &Bar{\n\t\tquit: make(chan struct{}),\n\t\tdone: make(chan struct{}),\n\t\tops: make(chan func(*state)),\n\t}\n\n\tgo b.server(s, wg, cancel)\n\treturn b\n}\n\n\/\/ RemoveAllPrependers removes all prepend functions\nfunc (b *Bar) RemoveAllPrependers() {\n\tselect {\n\tcase b.ops <- func(s *state) {\n\t\ts.prependFuncs = nil\n\t}:\n\tcase <-b.quit:\n\t}\n}\n\n\/\/ RemoveAllAppenders removes all append functions\nfunc (b *Bar) RemoveAllAppenders() {\n\tselect {\n\tcase b.ops <- func(s *state) {\n\t\ts.appendFuncs = nil\n\t}:\n\tcase <-b.quit:\n\t}\n}\n\n\/\/ ProxyReader wrapper for io operations, like io.Copy\nfunc (b *Bar) ProxyReader(r io.Reader) *Reader {\n\treturn &Reader{r, b}\n}\n\n\/\/ Increment shorthand for b.Incr(1)\nfunc (b *Bar) Increment() {\n\tb.Incr(1)\n}\n\n\/\/ Incr increments progress bar\nfunc (b *Bar) Incr(n int) {\n\tif n < 1 {\n\t\treturn\n\t}\n\tselect {\n\tcase b.ops <- func(s *state) {\n\t\tnext := time.Now()\n\t\tif s.current == 0 {\n\t\t\ts.startTime = next\n\t\t\ts.blockStartTime = next\n\t\t} else {\n\t\t\tnow := time.Now()\n\t\t\ts.updateTimePerItemEstimate(n, now, next)\n\t\t\ts.timeElapsed = now.Sub(s.startTime)\n\t\t}\n\t\ts.current += int64(n)\n\t\tif s.dynamic {\n\t\t\tfor s.current >= s.total {\n\t\t\t\ts.current -= s.current * s.dropRatio \/ 100\n\t\t\t}\n\t\t} else if s.current >= s.total {\n\t\t\ts.current = s.total\n\t\t\ts.completed = true\n\t\t}\n\t}:\n\tcase <-b.quit:\n\t}\n}\n\n\/\/ ResumeFill fills bar with different r rune,\n\/\/ from 0 to till amount of progress.\nfunc (b *Bar) ResumeFill(r rune, till int64) {\n\tif till < 1 {\n\t\treturn\n\t}\n\tselect {\n\tcase b.ops <- func(s *state) {\n\t\ts.refill = &refill{r, till}\n\t}:\n\tcase <-b.quit:\n\t}\n}\n\nfunc (b *Bar) NumOfAppenders() int {\n\tresult := make(chan int, 1)\n\tselect {\n\tcase b.ops <- func(s *state) { result <- len(s.appendFuncs) }:\n\t\treturn <-result\n\tcase <-b.done:\n\t\treturn len(b.cacheState.appendFuncs)\n\t}\n}\n\nfunc (b *Bar) NumOfPrependers() int {\n\tresult := make(chan int, 1)\n\tselect {\n\tcase b.ops <- func(s *state) { result <- len(s.prependFuncs) }:\n\t\treturn <-result\n\tcase <-b.done:\n\t\treturn len(b.cacheState.prependFuncs)\n\t}\n}\n\n\/\/ ID returs id of the bar\nfunc (b *Bar) ID() int {\n\tresult := make(chan int, 1)\n\tselect {\n\tcase b.ops <- func(s *state) { result <- s.id }:\n\t\treturn <-result\n\tcase <-b.done:\n\t\treturn b.cacheState.id\n\t}\n}\n\nfunc (b *Bar) Current() int64 {\n\tresult := make(chan int64, 1)\n\tselect {\n\tcase b.ops <- func(s *state) { result <- s.current }:\n\t\treturn <-result\n\tcase <-b.done:\n\t\treturn b.cacheState.current\n\t}\n}\n\nfunc (b *Bar) Total() int64 {\n\tresult := make(chan int64, 1)\n\tselect {\n\tcase b.ops <- func(s *state) { result <- s.total }:\n\t\treturn <-result\n\tcase <-b.done:\n\t\treturn b.cacheState.total\n\t}\n}\n\n\/\/ SetTotal sets total dynamically. The final param indicates the very last set,\n\/\/ in other words you should set it to true when total is determined.\n\/\/ Also you may consider providing your drop ratio via BarDropRatio BarOption func.\nfunc (b *Bar) SetTotal(total int64, final bool) {\n\tselect {\n\tcase b.ops <- func(s *state) {\n\t\ts.total = total\n\t\ts.dynamic = !final\n\t}:\n\tcase <-b.quit:\n\t}\n}\n\n\/\/ InProgress returns true, while progress is running.\n\/\/ Can be used as condition in for loop\nfunc (b *Bar) InProgress() bool {\n\tselect {\n\tcase <-b.quit:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}\n\n\/\/ Complete signals to the bar, that process has been completed.\n\/\/ You should call this method when total is unknown and you've reached the point\n\/\/ of process completion. If you don't call this method, it will be called\n\/\/ implicitly, upon p.Stop() call.\nfunc (b *Bar) Complete() {\n\tb.once.Do(b.shutdown)\n}\n\nfunc (b *Bar) shutdown() {\n\tclose(b.quit)\n}\n\nfunc (b *Bar) server(s state, wg *sync.WaitGroup, cancel <-chan struct{}) {\n\tdefer func() {\n\t\tb.cacheState = s\n\t\tclose(b.done)\n\t\twg.Done()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase op := <-b.ops:\n\t\t\top(&s)\n\t\tcase <-cancel:\n\t\t\ts.aborted = true\n\t\t\tcancel = nil\n\t\t\tb.Complete()\n\t\tcase <-b.quit:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (b *Bar) render(tw int, prependWs, appendWs *widthSync) <-chan *writeBuf {\n\tch := make(chan *writeBuf, 1)\n\n\tgo func() {\n\t\tselect {\n\t\tcase b.ops <- func(s *state) {\n\t\t\tdefer func() {\n\t\t\t\t\/\/ recovering if external decorators panic\n\t\t\t\tif p := recover(); p != nil {\n\t\t\t\t\ts.panic = fmt.Sprintf(\"b#%02d panic: %v\\n\", s.id, p)\n\t\t\t\t\ts.prependFuncs = nil\n\t\t\t\t\ts.appendFuncs = nil\n\n\t\t\t\t\tch <- &writeBuf{[]byte(s.panic), true}\n\t\t\t\t}\n\t\t\t\tclose(ch)\n\t\t\t}()\n\t\t\ts.draw(tw, prependWs, appendWs)\n\t\t\tch <- &writeBuf{s.toBytes(), s.isFull()}\n\t\t}:\n\t\tcase <-b.done:\n\t\t\ts := b.cacheState\n\t\t\tvar buf []byte\n\t\t\tif s.panic != \"\" {\n\t\t\t\tbuf = []byte(s.panic)\n\t\t\t} else {\n\t\t\t\ts.draw(tw, prependWs, appendWs)\n\t\t\t\tbuf = s.toBytes()\n\t\t\t}\n\t\t\tch <- &writeBuf{buf, false}\n\t\t\tclose(ch)\n\t\t}\n\t}()\n\n\treturn ch\n}\n\nfunc (s *state) toBytes() []byte {\n\tbuf := make([]byte, 0, s.bufP.Len()+s.bufB.Len()+s.bufA.Len())\n\tbuf = concatenateBlocks(buf, s.bufP.Bytes(), s.bufB.Bytes(), s.bufA.Bytes())\n\treturn buf\n}\n\nfunc (s *state) updateTimePerItemEstimate(amount int, now, next time.Time) {\n\tlastBlockTime := now.Sub(s.blockStartTime)\n\tlastItemEstimate := float64(lastBlockTime) \/ float64(amount)\n\ts.timePerItem = time.Duration((s.etaAlpha * lastItemEstimate) + (1-s.etaAlpha)*float64(s.timePerItem))\n\ts.blockStartTime = next\n}\n\nfunc (s *state) isFull() bool {\n\tif !s.completed {\n\t\treturn false\n\t}\n\tbar := s.bufB.Bytes()\n\tvar r rune\n\tvar n int\n\tfor i := 0; len(bar) > 0; i++ {\n\t\tr, n = utf8.DecodeLastRune(bar)\n\t\tbar = bar[:len(bar)-n]\n\t\tif i == 1 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn r == s.format[rFill]\n}\n\nfunc (s *state) draw(termWidth int, prependWs, appendWs *widthSync) {\n\tif termWidth <= 0 {\n\t\ttermWidth = s.width\n\t}\n\n\tstat := newStatistics(s)\n\n\t\/\/ render prepend functions to the left of the bar\n\ts.bufP.Reset()\n\tfor i, f := range s.prependFuncs {\n\t\ts.bufP.WriteString(f(stat, prependWs.Listen[i], prependWs.Result[i]))\n\t}\n\n\tif !s.trimLeftSpace {\n\t\ts.bufP.WriteByte(' ')\n\t}\n\n\t\/\/ render append functions to the right of the bar\n\ts.bufA.Reset()\n\tif !s.trimRightSpace {\n\t\ts.bufA.WriteByte(' ')\n\t}\n\n\tfor i, f := range s.appendFuncs {\n\t\ts.bufA.WriteString(f(stat, appendWs.Listen[i], appendWs.Result[i]))\n\t}\n\n\tprependCount := utf8.RuneCount(s.bufP.Bytes())\n\tappendCount := utf8.RuneCount(s.bufA.Bytes())\n\n\ts.fillBar(s.width)\n\tbarCount := utf8.RuneCount(s.bufB.Bytes())\n\ttotalCount := prependCount + barCount + appendCount\n\tif totalCount > termWidth {\n\t\tshrinkWidth := termWidth - prependCount - appendCount\n\t\ts.fillBar(shrinkWidth)\n\t}\n\ts.bufA.WriteByte('\\n')\n}\n\nfunc (s *state) fillBar(width int) {\n\ts.bufB.Reset()\n\tif width <= 2 {\n\t\treturn\n\t}\n\n\t\/\/ bar s.width without leftEnd and rightEnd runes\n\tbarWidth := width - 2\n\n\tcompletedWidth := decor.CalcPercentage(s.total, s.current, barWidth)\n\n\ts.bufB.WriteRune(s.format[rLeft])\n\n\tif s.refill != nil {\n\t\ttill := decor.CalcPercentage(s.total, s.refill.till, barWidth)\n\t\t\/\/ append refill rune\n\t\tfor i := 0; i < till; i++ {\n\t\t\ts.bufB.WriteRune(s.refill.char)\n\t\t}\n\t\tfor i := till; i < completedWidth; i++ {\n\t\t\ts.bufB.WriteRune(s.format[rFill])\n\t\t}\n\t} else {\n\t\tfor i := 0; i < completedWidth; i++ {\n\t\t\ts.bufB.WriteRune(s.format[rFill])\n\t\t}\n\t}\n\n\tif completedWidth < barWidth && completedWidth > 0 {\n\t\t_, size := utf8.DecodeLastRune(s.bufB.Bytes())\n\t\ts.bufB.Truncate(s.bufB.Len() - size)\n\t\ts.bufB.WriteRune(s.format[rTip])\n\t}\n\n\tfor i := completedWidth; i < barWidth; i++ {\n\t\ts.bufB.WriteRune(s.format[rEmpty])\n\t}\n\n\ts.bufB.WriteRune(s.format[rRight])\n}\n\nfunc newStatistics(s *state) *decor.Statistics {\n\treturn &decor.Statistics{\n\t\tID: s.id,\n\t\tCompleted: s.completed,\n\t\tAborted: s.aborted,\n\t\tTotal: s.total,\n\t\tCurrent: s.current,\n\t\tStartTime: s.startTime,\n\t\tTimeElapsed: s.timeElapsed,\n\t\tTimePerItemEstimate: s.timePerItem,\n\t}\n}\n\nfunc concatenateBlocks(buf []byte, blocks ...[]byte) []byte {\n\tfor _, block := range blocks {\n\t\tbuf = append(buf, block...)\n\t}\n\treturn buf\n}\n\nfunc (s *state) updateFormat(format string) {\n\tfor i, n := 0, 0; len(format) > 0; i++ {\n\t\ts.format[i], n = utf8.DecodeRuneInString(format)\n\t\tformat = format[n:]\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Tabix queries for go\npackage bix\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unsafe\"\n\n\t\"github.com\/biogo\/hts\/bgzf\"\n\t\"github.com\/biogo\/hts\/bgzf\/index\"\n\t\"github.com\/biogo\/hts\/tabix\"\n\t\"github.com\/brentp\/irelate\/interfaces\"\n\t\"github.com\/brentp\/irelate\/parsers\"\n\t\"github.com\/brentp\/vcfgo\"\n)\n\n\/\/ Bix provides read access to tabix files.\ntype Bix struct {\n\t*tabix.Index\n\tbgzf *bgzf.Reader\n\tpath string\n\n\tVReader *vcfgo.Reader\n\t\/\/ index for 'ref' and 'alt' columns if they were present.\n\trefalt []int\n\n\tfile *os.File\n\tbuf *bufio.Reader\n}\n\n\/\/ create a new bix that does as little as possible from the old bix\nfunc newShort(old *Bix) (*Bix, error) {\n\ttbx := &Bix{\n\t\tIndex: old.Index,\n\t\tpath: old.path,\n\t\tVReader: old.VReader,\n\t\trefalt: old.refalt,\n\t}\n\tvar err error\n\ttbx.file, err = os.Open(tbx.path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttbx.bgzf, err = bgzf.NewReader(tbx.file, 1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn tbx, nil\n}\n\n\/\/ New returns a &Bix\nfunc New(path string, workers ...int) (*Bix, error) {\n\tf, err := os.Open(path + \".tbi\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tgz, err := gzip.NewReader(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer gz.Close()\n\n\tidx, err := tabix.ReadFrom(gz)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn := 1\n\tif len(workers) > 0 {\n\t\tn = workers[0]\n\t}\n\n\tb, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbgz, err := bgzf.NewReader(b, n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar h []string\n\ttbx := &Bix{bgzf: bgz, path: path, file: b}\n\n\tbuf := bufio.NewReader(bgz)\n\tl, err := buf.ReadString('\\n')\n\tif err != nil {\n\t\treturn tbx, err\n\t}\n\n\tfor i := 0; i < int(idx.Skip) || rune(l[0]) == idx.MetaChar; i++ {\n\t\th = append(h, l)\n\t\tl, err = buf.ReadString('\\n')\n\t\tif err != nil {\n\t\t\treturn tbx, err\n\t\t}\n\t}\n\theader := strings.Join(h, \"\")\n\n\tif len(h) > 0 && strings.HasSuffix(tbx.path, \".vcf.gz\") {\n\t\tvar err error\n\t\th := strings.NewReader(header)\n\n\t\ttbx.VReader, err = vcfgo.NewReader(h, true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if len(h) > 0 {\n\t\thtab := strings.Split(strings.TrimSpace(h[len(h)-1]), \"\\t\")\n\t\t\/\/ try to find ref and alternate columns to make an IREFALT\n\t\tfor i, hdr := range htab {\n\t\t\tif l := strings.ToLower(hdr); l == \"ref\" || l == \"reference\" {\n\t\t\t\ttbx.refalt = append(tbx.refalt, i)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tfor i, hdr := range htab {\n\t\t\tif l := strings.ToLower(hdr); l == \"alt\" || l == \"alternate\" {\n\t\t\t\ttbx.refalt = append(tbx.refalt, i)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif len(tbx.refalt) != 2 {\n\t\t\ttbx.refalt = nil\n\t\t}\n\t}\n\ttbx.buf = buf\n\ttbx.Index = idx\n\treturn tbx, nil\n}\n\nfunc (b *Bix) Close() error {\n\tb.bgzf.Close()\n\tb.file.Close()\n\treturn nil\n}\n\nfunc (tbx *Bix) toPosition(toks [][]byte) interfaces.Relatable {\n\tisVCF := tbx.VReader != nil\n\tvar g *parsers.Interval\n\n\tif isVCF {\n\t\tv := tbx.VReader.Parse(toks)\n\t\treturn interfaces.AsRelatable(v)\n\n\t} else {\n\t\tg, _ = newgeneric(toks, int(tbx.Index.NameColumn-1), int(tbx.Index.BeginColumn-1),\n\t\t\tint(tbx.Index.EndColumn-1), tbx.Index.ZeroBased)\n\t}\n\tif tbx.refalt != nil {\n\t\tra := parsers.RefAltInterval{Interval: *g, HasEnd: tbx.Index.EndColumn != tbx.Index.BeginColumn}\n\t\tra.SetRefAlt(tbx.refalt)\n\t\treturn &ra\n\t}\n\treturn g\n}\n\nfunc unsafeString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}\n\n\/\/ return an interval using the info from the tabix index\nfunc newgeneric(fields [][]byte, chromCol int, startCol int, endCol int, zeroBased bool) (*parsers.Interval, error) {\n\ts, err := strconv.Atoi(unsafeString(fields[startCol]))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !zeroBased {\n\t\ts -= 1\n\t}\n\te, err := strconv.Atoi(unsafeString(fields[endCol]))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parsers.NewInterval(string(fields[chromCol]), uint32(s), uint32(e), fields, uint32(0), nil), nil\n}\n\nfunc (tbx *Bix) ChunkedReader(chrom string, start, end int) (io.ReadCloser, error) {\n\tchunks, err := tbx.Chunks(chrom, start, end)\n\tif err == index.ErrNoReference {\n\t\tif strings.HasPrefix(chrom, \"chr\") {\n\t\t\tchunks, err = tbx.Chunks(chrom[3:], start, end)\n\t\t} else {\n\t\t\tchunks, err = tbx.Chunks(\"chr\"+chrom, start, end)\n\t\t}\n\t}\n\tif err == index.ErrInvalid {\n\t\treturn index.NewChunkReader(tbx.bgzf, []bgzf.Chunk{})\n\t} else if err == index.ErrNoReference {\n\t\tlog.Printf(\"chromosome %s not found in %s\\n\", chrom, tbx.path)\n\t\treturn index.NewChunkReader(tbx.bgzf, []bgzf.Chunk{})\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\tcr, err := index.NewChunkReader(tbx.bgzf, chunks)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cr, nil\n}\n\n\/\/ bixerator meets interfaces.RelatableIterator\ntype bixerator struct {\n\trdr io.ReadCloser\n\tbuf *bufio.Reader\n\ttbx *Bix\n\n\tregion interfaces.IPosition\n}\n\nfunc makeFields(line []byte) [][]byte {\n\tfields := make([][]byte, 9)\n\tcopy(fields[:8], bytes.SplitN(line, []byte{'\\t'}, 8))\n\ts := 0\n\tfor i, f := range fields {\n\t\tif i == 7 {\n\t\t\tbreak\n\t\t}\n\t\ts += len(f) + 1\n\t}\n\te := bytes.IndexByte(line[s:], '\\t')\n\tif e == -1 {\n\t\te = len(line)\n\t} else {\n\t\te += s\n\t}\n\n\tfields[7] = line[s:e]\n\tif len(line) > e+1 {\n\t\tfields[8] = line[e+1:]\n\t} else {\n\t\tfields = fields[:8]\n\t}\n\n\treturn fields\n}\n\nfunc (b bixerator) Next() (interfaces.Relatable, error) {\n\n\tfor {\n\t\tline, err := b.buf.ReadBytes('\\n')\n\n\t\tif err == io.EOF && len(line) == 0 {\n\t\t\treturn nil, io.EOF\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(line) == 0 {\n\t\t\treturn nil, io.EOF\n\t\t}\n\t\tif line[len(line)-1] == '\\n' {\n\t\t\tline = line[:len(line)-1]\n\t\t}\n\t\tin := true\n\t\tvar toks [][]byte\n\t\tif b.region != nil {\n\t\t\tvar err error\n\n\t\t\tin, err, toks = b.inBounds(line)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\tif b.tbx.VReader != nil {\n\t\t\t\ttoks = makeFields(line)\n\t\t\t} else {\n\t\t\t\ttoks = bytes.Split(line, []byte{'\\t'})\n\t\t\t}\n\t\t}\n\n\t\tif in {\n\t\t\treturn b.tbx.toPosition(toks), nil\n\t\t}\n\t}\n\treturn nil, io.EOF\n}\n\nfunc (b bixerator) Close() error {\n\tif b.rdr != nil {\n\t\tb.rdr.Close()\n\t}\n\treturn b.tbx.Close()\n}\n\nvar _ interfaces.RelatableIterator = bixerator{}\n\nfunc (tbx *Bix) Query(region interfaces.IPosition) (interfaces.RelatableIterator, error) {\n\ttbx2, err := newShort(tbx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif region == nil {\n\t\tvar l string\n\t\tvar err error\n\t\tbuf := bufio.NewReader(tbx2.bgzf)\n\t\tl, err = buf.ReadString('\\n')\n\t\tfor i := 0; i < int(tbx2.Index.Skip) || rune(l[0]) == tbx2.Index.MetaChar; i++ {\n\t\t\tl, err = buf.ReadString('\\n')\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tif tbx2.Index.Skip == 0 && rune(l[0]) != tbx2.Index.MetaChar {\n\t\t\tbuf = bufio.NewReader(io.MultiReader(strings.NewReader(l), buf))\n\t\t}\n\t\treturn bixerator{nil, buf, tbx2, region}, nil\n\t}\n\n\tcr, err := tbx2.ChunkedReader(region.Chrom(), int(region.Start()), int(region.End()))\n\tif err != nil {\n\t\tif cr != nil {\n\t\t\ttbx2.Close()\n\t\t\tcr.Close()\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn bixerator{cr, bufio.NewReader(cr), tbx2, region}, nil\n}\n\nfunc (tbx *Bix) AddInfoToHeader(id, number, vtype, desc string) {\n\tif tbx.VReader == nil {\n\t\treturn\n\t}\n\ttbx.VReader.AddInfoToHeader(id, number, vtype, desc)\n}\n\nfunc (tbx *Bix) GetHeaderType(field string) string {\n\tif tbx.VReader == nil {\n\t\treturn \"\"\n\t}\n\treturn tbx.VReader.GetHeaderType(field)\n}\n\nfunc (tbx *Bix) GetHeaderDescription(field string) string {\n\tif tbx.VReader == nil {\n\t\treturn \"\"\n\t}\n\tif h, ok := tbx.VReader.Header.Infos[field]; ok {\n\t\treturn h.Description\n\t}\n\treturn \"\"\n}\n\nfunc (tbx *Bix) GetHeaderNumber(field string) string {\n\tif tbx.VReader == nil {\n\t\treturn \"1\"\n\t}\n\tif h, ok := tbx.VReader.Header.Infos[field]; ok {\n\t\treturn h.Number\n\t}\n\treturn \"1\"\n}\n\nfunc (b *bixerator) inBounds(line []byte) (bool, error, [][]byte) {\n\n\tvar readErr error\n\tline = bytes.TrimRight(line, \"\\r\\n\")\n\tvar toks [][]byte\n\tif b.tbx.VReader != nil {\n\t\ttoks = makeFields(line)\n\t} else {\n\t\ttoks = bytes.Split(line, []byte{'\\t'})\n\t}\n\n\ts, err := strconv.Atoi(unsafeString(toks[b.tbx.BeginColumn-1]))\n\tif err != nil {\n\t\treturn false, err, toks\n\t}\n\n\tpos := s\n\tif !b.tbx.ZeroBased {\n\t\tpos -= 1\n\t}\n\tif pos >= int(b.region.End()) {\n\t\treturn false, io.EOF, toks\n\t}\n\n\tif b.tbx.EndColumn != 0 {\n\t\te, err := strconv.Atoi(unsafeString(toks[b.tbx.EndColumn-1]))\n\t\tif err != nil {\n\t\t\treturn false, err, toks\n\t\t}\n\t\tif e < int(b.region.Start()) {\n\t\t\treturn false, readErr, toks\n\t\t}\n\t\treturn true, readErr, toks\n\t} else if b.tbx.VReader != nil {\n\t\tstart := int(b.region.Start())\n\t\talt := strings.Split(string(toks[4]), \",\")\n\t\tlref := len(toks[3])\n\t\tif start >= pos+lref {\n\t\t\tfor _, a := range alt {\n\t\t\t\tif a[0] != '<' || a == \"<CN0>\" {\n\t\t\t\t\te := pos + lref\n\t\t\t\t\tif e > start {\n\t\t\t\t\t\treturn true, readErr, toks\n\t\t\t\t\t}\n\t\t\t\t} else if strings.HasPrefix(a, \"<DEL\") || strings.HasPrefix(a, \"<DUP\") || strings.HasPrefix(a, \"<INV\") || strings.HasPrefix(a, \"<CN\") {\n\t\t\t\t\tinfo := string(toks[7])\n\t\t\t\t\tif idx := strings.Index(info, \";END=\"); idx != -1 {\n\t\t\t\t\t\tv := info[idx+5 : idx+5+strings.Index(info[idx+5:], \";\")]\n\t\t\t\t\t\te, err := strconv.Atoi(v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn false, err, toks\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif e > start {\n\t\t\t\t\t\t\treturn true, readErr, toks\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.Println(\"no end:\", b.tbx.path, string(toks[0]), pos, string(toks[3]), a)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\treturn true, readErr, toks\n\t\t}\n\t\treturn false, readErr, toks\n\t}\n\treturn false, readErr, toks\n\n}\n<commit_msg>propagate workers<commit_after>\/\/ Tabix queries for go\npackage bix\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unsafe\"\n\n\t\"github.com\/biogo\/hts\/bgzf\"\n\t\"github.com\/biogo\/hts\/bgzf\/index\"\n\t\"github.com\/biogo\/hts\/tabix\"\n\t\"github.com\/brentp\/irelate\/interfaces\"\n\t\"github.com\/brentp\/irelate\/parsers\"\n\t\"github.com\/brentp\/vcfgo\"\n)\n\n\/\/ Bix provides read access to tabix files.\ntype Bix struct {\n\t*tabix.Index\n\tbgzf *bgzf.Reader\n\tpath string\n\tworkers int\n\n\tVReader *vcfgo.Reader\n\t\/\/ index for 'ref' and 'alt' columns if they were present.\n\trefalt []int\n\n\tfile *os.File\n\tbuf *bufio.Reader\n}\n\n\/\/ create a new bix that does as little as possible from the old bix\nfunc newShort(old *Bix) (*Bix, error) {\n\ttbx := &Bix{\n\t\tIndex: old.Index,\n\t\tpath: old.path,\n\t\tworkers: old.workers,\n\t\tVReader: old.VReader,\n\t\trefalt: old.refalt,\n\t}\n\tvar err error\n\ttbx.file, err = os.Open(tbx.path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttbx.bgzf, err = bgzf.NewReader(tbx.file, old.workers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn tbx, nil\n}\n\n\/\/ New returns a &Bix\nfunc New(path string, workers ...int) (*Bix, error) {\n\tf, err := os.Open(path + \".tbi\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tgz, err := gzip.NewReader(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer gz.Close()\n\n\tidx, err := tabix.ReadFrom(gz)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn := 1\n\tif len(workers) > 0 {\n\t\tn = workers[0]\n\t}\n\n\tb, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbgz, err := bgzf.NewReader(b, n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar h []string\n\ttbx := &Bix{bgzf: bgz, path: path, file: b, workers: n}\n\n\tbuf := bufio.NewReader(bgz)\n\tl, err := buf.ReadString('\\n')\n\tif err != nil {\n\t\treturn tbx, err\n\t}\n\n\tfor i := 0; i < int(idx.Skip) || rune(l[0]) == idx.MetaChar; i++ {\n\t\th = append(h, l)\n\t\tl, err = buf.ReadString('\\n')\n\t\tif err != nil {\n\t\t\treturn tbx, err\n\t\t}\n\t}\n\theader := strings.Join(h, \"\")\n\n\tif len(h) > 0 && strings.HasSuffix(tbx.path, \".vcf.gz\") {\n\t\tvar err error\n\t\th := strings.NewReader(header)\n\n\t\ttbx.VReader, err = vcfgo.NewReader(h, true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if len(h) > 0 {\n\t\thtab := strings.Split(strings.TrimSpace(h[len(h)-1]), \"\\t\")\n\t\t\/\/ try to find ref and alternate columns to make an IREFALT\n\t\tfor i, hdr := range htab {\n\t\t\tif l := strings.ToLower(hdr); l == \"ref\" || l == \"reference\" {\n\t\t\t\ttbx.refalt = append(tbx.refalt, i)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tfor i, hdr := range htab {\n\t\t\tif l := strings.ToLower(hdr); l == \"alt\" || l == \"alternate\" {\n\t\t\t\ttbx.refalt = append(tbx.refalt, i)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif len(tbx.refalt) != 2 {\n\t\t\ttbx.refalt = nil\n\t\t}\n\t}\n\ttbx.buf = buf\n\ttbx.Index = idx\n\treturn tbx, nil\n}\n\nfunc (b *Bix) Close() error {\n\tb.bgzf.Close()\n\tb.file.Close()\n\treturn nil\n}\n\nfunc (tbx *Bix) toPosition(toks [][]byte) interfaces.Relatable {\n\tisVCF := tbx.VReader != nil\n\tvar g *parsers.Interval\n\n\tif isVCF {\n\t\tv := tbx.VReader.Parse(toks)\n\t\treturn interfaces.AsRelatable(v)\n\n\t} else {\n\t\tg, _ = newgeneric(toks, int(tbx.Index.NameColumn-1), int(tbx.Index.BeginColumn-1),\n\t\t\tint(tbx.Index.EndColumn-1), tbx.Index.ZeroBased)\n\t}\n\tif tbx.refalt != nil {\n\t\tra := parsers.RefAltInterval{Interval: *g, HasEnd: tbx.Index.EndColumn != tbx.Index.BeginColumn}\n\t\tra.SetRefAlt(tbx.refalt)\n\t\treturn &ra\n\t}\n\treturn g\n}\n\nfunc unsafeString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}\n\n\/\/ return an interval using the info from the tabix index\nfunc newgeneric(fields [][]byte, chromCol int, startCol int, endCol int, zeroBased bool) (*parsers.Interval, error) {\n\ts, err := strconv.Atoi(unsafeString(fields[startCol]))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !zeroBased {\n\t\ts -= 1\n\t}\n\te, err := strconv.Atoi(unsafeString(fields[endCol]))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parsers.NewInterval(string(fields[chromCol]), uint32(s), uint32(e), fields, uint32(0), nil), nil\n}\n\nfunc (tbx *Bix) ChunkedReader(chrom string, start, end int) (io.ReadCloser, error) {\n\tchunks, err := tbx.Chunks(chrom, start, end)\n\tif err == index.ErrNoReference {\n\t\tif strings.HasPrefix(chrom, \"chr\") {\n\t\t\tchunks, err = tbx.Chunks(chrom[3:], start, end)\n\t\t} else {\n\t\t\tchunks, err = tbx.Chunks(\"chr\"+chrom, start, end)\n\t\t}\n\t}\n\tif err == index.ErrInvalid {\n\t\treturn index.NewChunkReader(tbx.bgzf, []bgzf.Chunk{})\n\t} else if err == index.ErrNoReference {\n\t\tlog.Printf(\"chromosome %s not found in %s\\n\", chrom, tbx.path)\n\t\treturn index.NewChunkReader(tbx.bgzf, []bgzf.Chunk{})\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\tcr, err := index.NewChunkReader(tbx.bgzf, chunks)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cr, nil\n}\n\n\/\/ bixerator meets interfaces.RelatableIterator\ntype bixerator struct {\n\trdr io.ReadCloser\n\tbuf *bufio.Reader\n\ttbx *Bix\n\n\tregion interfaces.IPosition\n}\n\nfunc makeFields(line []byte) [][]byte {\n\tfields := make([][]byte, 9)\n\tcopy(fields[:8], bytes.SplitN(line, []byte{'\\t'}, 8))\n\ts := 0\n\tfor i, f := range fields {\n\t\tif i == 7 {\n\t\t\tbreak\n\t\t}\n\t\ts += len(f) + 1\n\t}\n\te := bytes.IndexByte(line[s:], '\\t')\n\tif e == -1 {\n\t\te = len(line)\n\t} else {\n\t\te += s\n\t}\n\n\tfields[7] = line[s:e]\n\tif len(line) > e+1 {\n\t\tfields[8] = line[e+1:]\n\t} else {\n\t\tfields = fields[:8]\n\t}\n\n\treturn fields\n}\n\nfunc (b bixerator) Next() (interfaces.Relatable, error) {\n\n\tfor {\n\t\tline, err := b.buf.ReadBytes('\\n')\n\n\t\tif err == io.EOF && len(line) == 0 {\n\t\t\treturn nil, io.EOF\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(line) == 0 {\n\t\t\treturn nil, io.EOF\n\t\t}\n\t\tif line[len(line)-1] == '\\n' {\n\t\t\tline = line[:len(line)-1]\n\t\t}\n\t\tin := true\n\t\tvar toks [][]byte\n\t\tif b.region != nil {\n\t\t\tvar err error\n\n\t\t\tin, err, toks = b.inBounds(line)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\tif b.tbx.VReader != nil {\n\t\t\t\ttoks = makeFields(line)\n\t\t\t} else {\n\t\t\t\ttoks = bytes.Split(line, []byte{'\\t'})\n\t\t\t}\n\t\t}\n\n\t\tif in {\n\t\t\treturn b.tbx.toPosition(toks), nil\n\t\t}\n\t}\n\treturn nil, io.EOF\n}\n\nfunc (b bixerator) Close() error {\n\tif b.rdr != nil {\n\t\tb.rdr.Close()\n\t}\n\treturn b.tbx.Close()\n}\n\nvar _ interfaces.RelatableIterator = bixerator{}\n\nfunc (tbx *Bix) Query(region interfaces.IPosition) (interfaces.RelatableIterator, error) {\n\ttbx2, err := newShort(tbx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif region == nil {\n\t\tvar l string\n\t\tvar err error\n\t\tbuf := bufio.NewReader(tbx2.bgzf)\n\t\tl, err = buf.ReadString('\\n')\n\t\tfor i := 0; i < int(tbx2.Index.Skip) || rune(l[0]) == tbx2.Index.MetaChar; i++ {\n\t\t\tl, err = buf.ReadString('\\n')\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tif tbx2.Index.Skip == 0 && rune(l[0]) != tbx2.Index.MetaChar {\n\t\t\tbuf = bufio.NewReader(io.MultiReader(strings.NewReader(l), buf))\n\t\t}\n\t\treturn bixerator{nil, buf, tbx2, region}, nil\n\t}\n\n\tcr, err := tbx2.ChunkedReader(region.Chrom(), int(region.Start()), int(region.End()))\n\tif err != nil {\n\t\tif cr != nil {\n\t\t\ttbx2.Close()\n\t\t\tcr.Close()\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn bixerator{cr, bufio.NewReader(cr), tbx2, region}, nil\n}\n\nfunc (tbx *Bix) AddInfoToHeader(id, number, vtype, desc string) {\n\tif tbx.VReader == nil {\n\t\treturn\n\t}\n\ttbx.VReader.AddInfoToHeader(id, number, vtype, desc)\n}\n\nfunc (tbx *Bix) GetHeaderType(field string) string {\n\tif tbx.VReader == nil {\n\t\treturn \"\"\n\t}\n\treturn tbx.VReader.GetHeaderType(field)\n}\n\nfunc (tbx *Bix) GetHeaderDescription(field string) string {\n\tif tbx.VReader == nil {\n\t\treturn \"\"\n\t}\n\tif h, ok := tbx.VReader.Header.Infos[field]; ok {\n\t\treturn h.Description\n\t}\n\treturn \"\"\n}\n\nfunc (tbx *Bix) GetHeaderNumber(field string) string {\n\tif tbx.VReader == nil {\n\t\treturn \"1\"\n\t}\n\tif h, ok := tbx.VReader.Header.Infos[field]; ok {\n\t\treturn h.Number\n\t}\n\treturn \"1\"\n}\n\nfunc (b *bixerator) inBounds(line []byte) (bool, error, [][]byte) {\n\n\tvar readErr error\n\tline = bytes.TrimRight(line, \"\\r\\n\")\n\tvar toks [][]byte\n\tif b.tbx.VReader != nil {\n\t\ttoks = makeFields(line)\n\t} else {\n\t\ttoks = bytes.Split(line, []byte{'\\t'})\n\t}\n\n\ts, err := strconv.Atoi(unsafeString(toks[b.tbx.BeginColumn-1]))\n\tif err != nil {\n\t\treturn false, err, toks\n\t}\n\n\tpos := s\n\tif !b.tbx.ZeroBased {\n\t\tpos -= 1\n\t}\n\tif pos >= int(b.region.End()) {\n\t\treturn false, io.EOF, toks\n\t}\n\n\tif b.tbx.EndColumn != 0 {\n\t\te, err := strconv.Atoi(unsafeString(toks[b.tbx.EndColumn-1]))\n\t\tif err != nil {\n\t\t\treturn false, err, toks\n\t\t}\n\t\tif e < int(b.region.Start()) {\n\t\t\treturn false, readErr, toks\n\t\t}\n\t\treturn true, readErr, toks\n\t} else if b.tbx.VReader != nil {\n\t\tstart := int(b.region.Start())\n\t\talt := strings.Split(string(toks[4]), \",\")\n\t\tlref := len(toks[3])\n\t\tif start >= pos+lref {\n\t\t\tfor _, a := range alt {\n\t\t\t\tif a[0] != '<' || a == \"<CN0>\" {\n\t\t\t\t\te := pos + lref\n\t\t\t\t\tif e > start {\n\t\t\t\t\t\treturn true, readErr, toks\n\t\t\t\t\t}\n\t\t\t\t} else if strings.HasPrefix(a, \"<DEL\") || strings.HasPrefix(a, \"<DUP\") || strings.HasPrefix(a, \"<INV\") || strings.HasPrefix(a, \"<CN\") {\n\t\t\t\t\tinfo := string(toks[7])\n\t\t\t\t\tif idx := strings.Index(info, \";END=\"); idx != -1 {\n\t\t\t\t\t\tv := info[idx+5 : idx+5+strings.Index(info[idx+5:], \";\")]\n\t\t\t\t\t\te, err := strconv.Atoi(v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn false, err, toks\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif e > start {\n\t\t\t\t\t\t\treturn true, readErr, toks\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.Println(\"no end:\", b.tbx.path, string(toks[0]), pos, string(toks[3]), a)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\treturn true, readErr, toks\n\t\t}\n\t\treturn false, readErr, toks\n\t}\n\treturn false, readErr, toks\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage fitasks\n\nimport (\n\t\"crypto\/x509\/pkix\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/kops\/pkg\/pki\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\"\n)\n\n\/\/ +kops:fitask\ntype Keypair struct {\n\t\/\/ Name is the name of the keypair\n\tName *string\n\t\/\/ AlternateNames a list of alternative names for this certificate\n\tAlternateNames []string `json:\"alternateNames\"`\n\t\/\/ Lifecycle is context for a task\n\tLifecycle fi.Lifecycle\n\t\/\/ Signer is the keypair to use to sign, for when we want to use an alternative CA\n\tSigner *Keypair\n\t\/\/ Subject is the certificate subject\n\tSubject string `json:\"subject\"`\n\t\/\/ Type the type of certificate i.e. CA, server, client etc\n\tType string `json:\"type\"`\n\t\/\/ LegacyFormat is whether the keypair is stored in a legacy format.\n\tLegacyFormat bool `json:\"oldFormat\"`\n\n\tcertificates *fi.TaskDependentResource\n\tkeyset *fi.Keyset\n}\n\nvar _ fi.HasCheckExisting = &Keypair{}\nvar _ fi.HasName = &Keypair{}\n\n\/\/ It's important always to check for the existing key, so we don't regenerate keys e.g. on terraform\nfunc (e *Keypair) CheckExisting(c *fi.Context) bool {\n\treturn true\n}\n\nvar _ fi.CompareWithID = &Keypair{}\n\nfunc (e *Keypair) CompareWithID() *string {\n\treturn &e.Subject\n}\n\nfunc (e *Keypair) Find(c *fi.Context) (*Keypair, error) {\n\tname := fi.StringValue(e.Name)\n\tif name == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tkeyset, err := c.Keystore.FindKeyset(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif keyset == nil || keyset.Primary == nil || keyset.Primary.Certificate == nil {\n\t\treturn nil, nil\n\t}\n\tcert := keyset.Primary.Certificate\n\tif keyset.Primary.PrivateKey == nil {\n\t\treturn nil, fmt.Errorf(\"found cert in store, but did not find private key: %q\", name)\n\t}\n\n\tvar alternateNames []string\n\talternateNames = append(alternateNames, cert.Certificate.DNSNames...)\n\talternateNames = append(alternateNames, cert.Certificate.EmailAddresses...)\n\tfor _, ip := range cert.Certificate.IPAddresses {\n\t\talternateNames = append(alternateNames, ip.String())\n\t}\n\tsort.Strings(alternateNames)\n\n\tactual := &Keypair{\n\t\tName: &name,\n\t\tAlternateNames: alternateNames,\n\t\tSubject: pki.PkixNameToString(&cert.Subject),\n\t\tType: pki.BuildTypeDescription(cert.Certificate),\n\t\tLegacyFormat: keyset.LegacyFormat,\n\t}\n\n\tactual.Signer = &Keypair{Subject: pki.PkixNameToString(&cert.Certificate.Issuer)}\n\n\t\/\/ Avoid spurious changes\n\tactual.Lifecycle = e.Lifecycle\n\n\tif err := e.setResources(keyset); err != nil {\n\t\treturn nil, fmt.Errorf(\"error setting resources: %v\", err)\n\t}\n\n\treturn actual, nil\n}\n\nfunc (e *Keypair) Run(c *fi.Context) error {\n\terr := e.normalize()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn fi.DefaultDeltaRunMethod(e, c)\n}\n\nfunc (e *Keypair) normalize() error {\n\tvar alternateNames []string\n\n\tfor _, s := range e.AlternateNames {\n\t\ts = strings.TrimSpace(s)\n\t\tif s == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\talternateNames = append(alternateNames, s)\n\t}\n\n\tsort.Strings(alternateNames)\n\te.AlternateNames = alternateNames\n\n\treturn nil\n}\n\nfunc (_ *Keypair) CheckChanges(a, e, changes *Keypair) error {\n\tif a != nil {\n\t\tif changes.Name != nil {\n\t\t\treturn fi.CannotChangeField(\"Name\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (_ *Keypair) Render(c *fi.Context, a, e, changes *Keypair) error {\n\tname := fi.StringValue(e.Name)\n\tif name == \"\" {\n\t\treturn fi.RequiredField(\"Name\")\n\t}\n\n\tchangeStoredFormat := false\n\tcreateCertificate := false\n\tif a == nil {\n\t\tcreateCertificate = true\n\t\tklog.V(8).Infof(\"creating brand new certificate\")\n\t} else if changes != nil {\n\t\tklog.V(8).Infof(\"creating certificate as changes are not nil\")\n\t\tif changes.AlternateNames != nil {\n\t\t\tcreateCertificate = true\n\t\t\tklog.V(8).Infof(\"creating certificate new AlternateNames\")\n\t\t} else if changes.Subject != \"\" {\n\t\t\tcreateCertificate = true\n\t\t\tklog.V(8).Infof(\"creating certificate new Subject\")\n\t\t} else if changes.Type != \"\" {\n\t\t\tcreateCertificate = true\n\t\t\tklog.Infof(\"creating certificate %q as Type has changed (actual=%v, expected=%v)\", name, a.Type, e.Type)\n\t\t} else if a.LegacyFormat {\n\t\t\tchangeStoredFormat = true\n\t\t} else {\n\t\t\tklog.Warningf(\"Ignoring changes in key: %v\", fi.DebugAsJsonString(changes))\n\t\t}\n\t}\n\n\tif createCertificate {\n\t\tklog.V(2).Infof(\"Creating PKI keypair %q\", name)\n\n\t\tkeyset, err := c.Keystore.FindKeyset(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif keyset == nil {\n\t\t\tkeyset = &fi.Keyset{\n\t\t\t\tItems: map[string]*fi.KeysetItem{},\n\t\t\t}\n\t\t}\n\n\t\t\/\/ We always reuse the private key if it exists,\n\t\t\/\/ if we change keys we often have to regenerate e.g. the service accounts\n\t\t\/\/ TODO: Eventually rotate keys \/ don't always reuse?\n\t\tvar privateKey *pki.PrivateKey\n\t\tif keyset.Primary != nil {\n\t\t\tprivateKey = keyset.Primary.PrivateKey\n\t\t}\n\t\tif privateKey == nil {\n\t\t\tklog.V(2).Infof(\"Creating privateKey %q\", name)\n\t\t}\n\n\t\tsigner := fi.CertificateIDCA\n\t\tif e.Signer != nil {\n\t\t\tsigner = fi.StringValue(e.Signer.Name)\n\t\t}\n\n\t\tklog.Infof(\"Issuing new certificate: %q\", *e.Name)\n\n\t\tserial := pki.BuildPKISerial(time.Now().UnixNano())\n\n\t\tsubjectPkix, err := parsePkixName(e.Subject)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error parsing Subject: %v\", err)\n\t\t}\n\n\t\tif len(subjectPkix.ToRDNSequence()) == 0 {\n\t\t\treturn fmt.Errorf(\"subject name was empty for SSL keypair %q\", *e.Name)\n\t\t}\n\n\t\treq := pki.IssueCertRequest{\n\t\t\tSigner: signer,\n\t\t\tType: e.Type,\n\t\t\tSubject: *subjectPkix,\n\t\t\tAlternateNames: e.AlternateNames,\n\t\t\tPrivateKey: privateKey,\n\t\t\tSerial: serial,\n\t\t}\n\t\tcert, privateKey, _, err := pki.IssueCert(&req, c.Keystore)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tserialString := cert.Certificate.SerialNumber.String()\n\t\tki := &fi.KeysetItem{\n\t\t\tId: serialString,\n\t\t\tCertificate: cert,\n\t\t\tPrivateKey: privateKey,\n\t\t}\n\n\t\tkeyset.LegacyFormat = false\n\t\tkeyset.Items[ki.Id] = ki\n\t\tkeyset.Primary = ki\n\t\terr = c.Keystore.StoreKeyset(name, keyset)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := e.setResources(keyset); err != nil {\n\t\t\treturn fmt.Errorf(\"error setting resources: %v\", err)\n\t\t}\n\n\t\t\/\/ Make double-sure it round-trips\n\t\t_, err = c.Keystore.FindKeyset(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tklog.V(8).Infof(\"created certificate with cn=%s\", cert.Subject.CommonName)\n\t}\n\n\t\/\/ TODO: Check correct subject \/ flags\n\n\tif changeStoredFormat {\n\t\t\/\/ We fetch and reinsert the same keypair, forcing an update to our preferred format\n\t\t\/\/ TODO: We're assuming that we want to save in the preferred format\n\t\tkeyset, err := c.Keystore.FindKeyset(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tkeyset.LegacyFormat = false\n\t\terr = c.Keystore.StoreKeyset(name, keyset)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tklog.Infof(\"updated Keypair %q to new format\", name)\n\t}\n\n\treturn nil\n}\n\nfunc parsePkixName(s string) (*pkix.Name, error) {\n\tname := new(pkix.Name)\n\n\ttokens := strings.Split(s, \",\")\n\tfor _, token := range tokens {\n\t\ttoken = strings.TrimSpace(token)\n\t\tkv := strings.SplitN(token, \"=\", 2)\n\t\tif len(kv) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"unrecognized token (expected k=v): %q\", token)\n\t\t}\n\t\tk := strings.ToLower(kv[0])\n\t\tv := kv[1]\n\n\t\tswitch k {\n\t\tcase \"cn\":\n\t\t\tname.CommonName = v\n\t\tcase \"o\":\n\t\t\tname.Organization = append(name.Organization, v)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unrecognized key %q in token %q\", k, token)\n\t\t}\n\t}\n\n\treturn name, nil\n}\n\nfunc (e *Keypair) ensureResources() {\n\tif e.certificates == nil {\n\t\te.certificates = &fi.TaskDependentResource{Task: e}\n\t}\n}\n\nfunc (e *Keypair) setResources(keyset *fi.Keyset) error {\n\te.ensureResources()\n\n\ts, err := keyset.ToCertificateBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\te.certificates.Resource = fi.NewBytesResource(s)\n\n\te.keyset = keyset\n\treturn nil\n}\n\nfunc (e *Keypair) Keyset() *fi.Keyset {\n\treturn e.keyset\n}\n\nfunc (e *Keypair) Certificates() *fi.TaskDependentResource {\n\te.ensureResources()\n\treturn e.certificates\n}\n<commit_msg>Fix dryrun cluster creation<commit_after>\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage fitasks\n\nimport (\n\t\"crypto\/x509\/pkix\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/kops\/pkg\/pki\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\"\n)\n\n\/\/ +kops:fitask\ntype Keypair struct {\n\t\/\/ Name is the name of the keypair\n\tName *string\n\t\/\/ AlternateNames a list of alternative names for this certificate\n\tAlternateNames []string `json:\"alternateNames\"`\n\t\/\/ Lifecycle is context for a task\n\tLifecycle fi.Lifecycle\n\t\/\/ Signer is the keypair to use to sign, for when we want to use an alternative CA\n\tSigner *Keypair\n\t\/\/ Subject is the certificate subject\n\tSubject string `json:\"subject\"`\n\t\/\/ Type the type of certificate i.e. CA, server, client etc\n\tType string `json:\"type\"`\n\t\/\/ LegacyFormat is whether the keypair is stored in a legacy format.\n\tLegacyFormat bool `json:\"oldFormat\"`\n\n\tcertificates *fi.TaskDependentResource\n\tkeyset *fi.Keyset\n}\n\nvar _ fi.HasCheckExisting = &Keypair{}\nvar _ fi.HasName = &Keypair{}\n\n\/\/ It's important always to check for the existing key, so we don't regenerate keys e.g. on terraform\nfunc (e *Keypair) CheckExisting(c *fi.Context) bool {\n\treturn true\n}\n\nvar _ fi.CompareWithID = &Keypair{}\n\nfunc (e *Keypair) CompareWithID() *string {\n\treturn &e.Subject\n}\n\nfunc (e *Keypair) Find(c *fi.Context) (*Keypair, error) {\n\tname := fi.StringValue(e.Name)\n\tif name == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tkeyset, err := c.Keystore.FindKeyset(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif keyset == nil || keyset.Primary == nil || keyset.Primary.Certificate == nil {\n\t\treturn nil, nil\n\t}\n\tcert := keyset.Primary.Certificate\n\tif keyset.Primary.PrivateKey == nil {\n\t\treturn nil, fmt.Errorf(\"found cert in store, but did not find private key: %q\", name)\n\t}\n\n\tvar alternateNames []string\n\talternateNames = append(alternateNames, cert.Certificate.DNSNames...)\n\talternateNames = append(alternateNames, cert.Certificate.EmailAddresses...)\n\tfor _, ip := range cert.Certificate.IPAddresses {\n\t\talternateNames = append(alternateNames, ip.String())\n\t}\n\tsort.Strings(alternateNames)\n\n\tactual := &Keypair{\n\t\tName: &name,\n\t\tAlternateNames: alternateNames,\n\t\tSubject: pki.PkixNameToString(&cert.Subject),\n\t\tType: pki.BuildTypeDescription(cert.Certificate),\n\t\tLegacyFormat: keyset.LegacyFormat,\n\t}\n\n\tactual.Signer = &Keypair{Subject: pki.PkixNameToString(&cert.Certificate.Issuer)}\n\n\t\/\/ Avoid spurious changes\n\tactual.Lifecycle = e.Lifecycle\n\n\tif err := e.setResources(keyset); err != nil {\n\t\treturn nil, fmt.Errorf(\"error setting resources: %v\", err)\n\t}\n\n\treturn actual, nil\n}\n\nfunc (e *Keypair) Run(c *fi.Context) error {\n\terr := e.normalize()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn fi.DefaultDeltaRunMethod(e, c)\n}\n\nfunc (e *Keypair) normalize() error {\n\tvar alternateNames []string\n\n\tfor _, s := range e.AlternateNames {\n\t\ts = strings.TrimSpace(s)\n\t\tif s == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\talternateNames = append(alternateNames, s)\n\t}\n\n\tsort.Strings(alternateNames)\n\te.AlternateNames = alternateNames\n\n\treturn nil\n}\n\nfunc (_ *Keypair) CheckChanges(a, e, changes *Keypair) error {\n\tif a != nil {\n\t\tif changes.Name != nil {\n\t\t\treturn fi.CannotChangeField(\"Name\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (_ *Keypair) Render(c *fi.Context, a, e, changes *Keypair) error {\n\tname := fi.StringValue(e.Name)\n\tif name == \"\" {\n\t\treturn fi.RequiredField(\"Name\")\n\t}\n\n\tchangeStoredFormat := false\n\tcreateCertificate := false\n\tif a == nil {\n\t\tcreateCertificate = true\n\t\tklog.V(8).Infof(\"creating brand new certificate\")\n\t} else if changes != nil {\n\t\tklog.V(8).Infof(\"creating certificate as changes are not nil\")\n\t\tif changes.AlternateNames != nil {\n\t\t\tcreateCertificate = true\n\t\t\tklog.V(8).Infof(\"creating certificate new AlternateNames\")\n\t\t} else if changes.Subject != \"\" {\n\t\t\tcreateCertificate = true\n\t\t\tklog.V(8).Infof(\"creating certificate new Subject\")\n\t\t} else if changes.Type != \"\" {\n\t\t\tcreateCertificate = true\n\t\t\tklog.Infof(\"creating certificate %q as Type has changed (actual=%v, expected=%v)\", name, a.Type, e.Type)\n\t\t} else if a.LegacyFormat {\n\t\t\tchangeStoredFormat = true\n\t\t} else {\n\t\t\tklog.Warningf(\"Ignoring changes in key: %v\", fi.DebugAsJsonString(changes))\n\t\t}\n\t}\n\n\tif createCertificate {\n\t\tklog.V(2).Infof(\"Creating PKI keypair %q\", name)\n\n\t\tkeyset, err := c.Keystore.FindKeyset(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif keyset == nil {\n\t\t\tkeyset = &fi.Keyset{\n\t\t\t\tItems: map[string]*fi.KeysetItem{},\n\t\t\t}\n\t\t}\n\n\t\t\/\/ We always reuse the private key if it exists,\n\t\t\/\/ if we change keys we often have to regenerate e.g. the service accounts\n\t\t\/\/ TODO: Eventually rotate keys \/ don't always reuse?\n\t\tvar privateKey *pki.PrivateKey\n\t\tif keyset.Primary != nil {\n\t\t\tprivateKey = keyset.Primary.PrivateKey\n\t\t}\n\t\tif privateKey == nil {\n\t\t\tklog.V(2).Infof(\"Creating privateKey %q\", name)\n\t\t}\n\n\t\tsigner := fi.CertificateIDCA\n\t\tif e.Signer != nil {\n\t\t\tsigner = fi.StringValue(e.Signer.Name)\n\t\t}\n\n\t\tklog.Infof(\"Issuing new certificate: %q\", *e.Name)\n\n\t\tserial := pki.BuildPKISerial(time.Now().UnixNano())\n\n\t\tsubjectPkix, err := parsePkixName(e.Subject)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error parsing Subject: %v\", err)\n\t\t}\n\n\t\tif len(subjectPkix.ToRDNSequence()) == 0 {\n\t\t\treturn fmt.Errorf(\"subject name was empty for SSL keypair %q\", *e.Name)\n\t\t}\n\n\t\treq := pki.IssueCertRequest{\n\t\t\tSigner: signer,\n\t\t\tType: e.Type,\n\t\t\tSubject: *subjectPkix,\n\t\t\tAlternateNames: e.AlternateNames,\n\t\t\tPrivateKey: privateKey,\n\t\t\tSerial: serial,\n\t\t}\n\t\tcert, privateKey, _, err := pki.IssueCert(&req, c.Keystore)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tserialString := cert.Certificate.SerialNumber.String()\n\t\tki := &fi.KeysetItem{\n\t\t\tId: serialString,\n\t\t\tCertificate: cert,\n\t\t\tPrivateKey: privateKey,\n\t\t}\n\n\t\tkeyset.LegacyFormat = false\n\t\tkeyset.Items[ki.Id] = ki\n\t\tkeyset.Primary = ki\n\t\terr = c.Keystore.StoreKeyset(name, keyset)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := e.setResources(keyset); err != nil {\n\t\t\treturn fmt.Errorf(\"error setting resources: %v\", err)\n\t\t}\n\n\t\t\/\/ Make double-sure it round-trips\n\t\t_, err = c.Keystore.FindKeyset(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tklog.V(8).Infof(\"created certificate with cn=%s\", cert.Subject.CommonName)\n\t}\n\n\t\/\/ TODO: Check correct subject \/ flags\n\n\tif changeStoredFormat {\n\t\t\/\/ We fetch and reinsert the same keypair, forcing an update to our preferred format\n\t\t\/\/ TODO: We're assuming that we want to save in the preferred format\n\t\tkeyset, err := c.Keystore.FindKeyset(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tkeyset.LegacyFormat = false\n\t\terr = c.Keystore.StoreKeyset(name, keyset)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tklog.Infof(\"updated Keypair %q to new format\", name)\n\t}\n\n\treturn nil\n}\n\nfunc parsePkixName(s string) (*pkix.Name, error) {\n\tname := new(pkix.Name)\n\n\ttokens := strings.Split(s, \",\")\n\tfor _, token := range tokens {\n\t\ttoken = strings.TrimSpace(token)\n\t\tkv := strings.SplitN(token, \"=\", 2)\n\t\tif len(kv) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"unrecognized token (expected k=v): %q\", token)\n\t\t}\n\t\tk := strings.ToLower(kv[0])\n\t\tv := kv[1]\n\n\t\tswitch k {\n\t\tcase \"cn\":\n\t\t\tname.CommonName = v\n\t\tcase \"o\":\n\t\t\tname.Organization = append(name.Organization, v)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unrecognized key %q in token %q\", k, token)\n\t\t}\n\t}\n\n\treturn name, nil\n}\n\nfunc (e *Keypair) ensureResources() {\n\tif e.certificates == nil {\n\t\te.certificates = &fi.TaskDependentResource{\n\t\t\tResource: fi.NewStringResource(\"<< TO BE GENERATED >>\\n\"),\n\t\t\tTask: e,\n\t\t}\n\t\te.keyset = &fi.Keyset{\n\t\t\tPrimary: &fi.KeysetItem{\n\t\t\t\tId: \"<< TO BE GENERATED >>\",\n\t\t\t},\n\t\t}\n\t}\n}\n\nfunc (e *Keypair) setResources(keyset *fi.Keyset) error {\n\te.ensureResources()\n\n\ts, err := keyset.ToCertificateBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\te.certificates.Resource = fi.NewBytesResource(s)\n\n\te.keyset = keyset\n\treturn nil\n}\n\nfunc (e *Keypair) Keyset() *fi.Keyset {\n\treturn e.keyset\n}\n\nfunc (e *Keypair) Certificates() *fi.TaskDependentResource {\n\te.ensureResources()\n\treturn e.certificates\n}\n<|endoftext|>"} {"text":"<commit_before>package geoffrey\n\nimport (\n \"github.com\/jriddick\/geoffrey\/irc\"\n \"github.com\/jriddick\/geoffrey\/msg\"\n)\n\ntype Bot struct {\n client *irc.IRC\n writer chan<- string\n reader <-chan *msg.Message\n config Config\n}\n\ntype Config struct {\n irc.Config\n Nick string\n User string\n Name string\n}\n\n\/\/ Creates a new bot\nfunc NewBot(config Config) *Bot {\n \/\/ Create the bot\n bot := &Bot{\n client: irc.NewIRC(config),\n config: config,\n }\n\n \/\/ Get the reader and writer channels\n bot.writer = bot.client.Writer()\n bot.reader = bot.client.Reader()\n\n return bot\n} \n<commit_msg>added bot read handler function<commit_after>package geoffrey\n\nimport (\n \"github.com\/jriddick\/geoffrey\/irc\"\n \"github.com\/jriddick\/geoffrey\/msg\"\n\t\"fmt\"\n)\n\ntype Bot struct {\n client *irc.IRC\n writer chan<- string\n reader <-chan *msg.Message\n config Config\n}\n\ntype Config struct {\n irc.Config\n Nick string\n User string\n Name string\n}\n\n\/\/ Creates a new bot\nfunc NewBot(config Config) *Bot {\n \/\/ Create the bot\n bot := &Bot{\n client: irc.NewIRC(config),\n config: config,\n }\n\n \/\/ Get the reader and writer channels\n bot.writer = bot.client.Writer()\n bot.reader = bot.client.Reader()\n\n return bot\n} \n\nfunc (b *Bot) Handler() {\n for msg := range b.reader {\n \/\/ Send nick and user after connecting\n if msg.Trailing == \"*** Looking up your hostname...\" {\n b.writer <- fmt.Sprintf(\"NICK %s\", b.config.Nick)\n b.writer <- fmt.Sprintf(\"USER %s 0 * :%s\", b.config.User, b.config.Name)\n }\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package miniporte\n\nimport (\n\t\"log\"\n\n\t\"github.com\/cenkalti\/backoff\"\n\tirc \"github.com\/fluffle\/goirc\/client\"\n\tlink \"github.com\/oz\/miniporte\/link\"\n)\n\ntype Bot struct {\n\tChans []string\n\tConfig *irc.Config\n\tClient *irc.Conn\n\tCtl (chan string)\n}\n\n\/\/ Create a new Bot\nfunc New(server, nick, name, ident string, chans []string) *Bot {\n\tcfg := irc.NewConfig(nick)\n\tcfg.SSL = true\n\tcfg.Me.Name = name\n\tcfg.Me.Ident = ident\n\tcfg.Server = server\n\tcfg.NewNick = func(n string) string { return n + \"_\" }\n\n\treturn &Bot{\n\t\tChans: chans,\n\t\tConfig: cfg,\n\t\tClient: irc.Client(cfg),\n\t\tCtl: make(chan string),\n\t}\n}\n\nfunc (b *Bot) joinChannels() {\n\tlog.Println(\"Joining channels\", b.Chans)\n\tfor _, c := range b.Chans {\n\t\tb.Client.Join(c)\n\t}\n}\n\nfunc (b *Bot) Run() {\n\tb.initializeHandlers()\n\tgo b.connect()\n\tb.commandLoop()\n\tlog.Println(\"Bot quitting...\")\n}\n\nfunc (b *Bot) onMessage(msg *irc.Line) {\n\t\/\/ Ignore non-public messages\n\tif !msg.Public() {\n\t\treturn\n\t}\n\n\tif url := link.Find(msg.Text()); url != \"\" {\n\t\ttags := link.Tags(msg.Text())\n\t\tif len(tags) == 0 {\n\t\t\ttags = []string{\"private\"}\n\t\t}\n\t\ttags = append(tags, msg.Nick, msg.Target())\n\t\tgo func() {\n\t\t\tif err := link.Save(url, tags); err != nil {\n\t\t\t\tif !link.IncludesPrivate(tags) {\n\t\t\t\t\tb.Client.Privmsg(msg.Target(), \"Oops! \"+err.Error())\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !link.IncludesPrivate(tags) {\n\t\t\t\tb.Client.Privmsg(msg.Target(), \"Saved!\")\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (b *Bot) initializeHandlers() {\n\t\/\/ Connected\n\tb.Client.HandleFunc(\"connected\", func(conn *irc.Conn, line *irc.Line) {\n\t\tlog.Println(\"Connected!\")\n\t\tb.joinChannels()\n\t})\n\n\t\/\/ Disconnected\n\tb.Client.HandleFunc(\"disconnected\", func(conn *irc.Conn, line *irc.Line) {\n\t\tlog.Println(\"Disconnected\")\n\t\tb.Ctl <- \"disconnected\"\n\t})\n\n\t\/\/ PRIVMSG\n\tb.Client.HandleFunc(\"PRIVMSG\", func(conn *irc.Conn, line *irc.Line) {\n\t\tb.onMessage(line)\n\t})\n}\n\nfunc (b *Bot) connect() {\n\top := func() error {\n\t\tlog.Println(\"Connecting to\", b.Config.Server)\n\t\treturn b.Client.Connect()\n\t}\n\tif err := backoff.Retry(op, backoff.NewExponentialBackOff()); err != nil {\n\t\tlog.Printf(\"Connection error: %s\\n\", err)\n\t\tb.Ctl <- \"connection-error\"\n\t}\n}\n\n\/\/ Connection loop\nfunc (b *Bot) commandLoop() {\n\tfor {\n\t\tfor cmd := range b.Ctl {\n\t\t\tswitch cmd {\n\t\t\tcase \"disconnected\":\n\t\t\tcase \"connection-error\":\n\t\t\t\tlog.Println(\"Disconnected:\", cmd)\n\t\t\t\tgo b.connect()\n\t\t\tdefault:\n\t\t\t\tlog.Println(\"Ignoring command\", cmd)\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>Fix reconnects w\/ backoff when conn is lost<commit_after>package miniporte\n\nimport (\n\t\"log\"\n\n\t\"github.com\/cenkalti\/backoff\"\n\tirc \"github.com\/fluffle\/goirc\/client\"\n\tlink \"github.com\/oz\/miniporte\/link\"\n)\n\ntype Bot struct {\n\tChans []string\n\tConfig *irc.Config\n\tClient *irc.Conn\n\tCtl (chan string)\n}\n\n\/\/ Create a new Bot\nfunc New(server, nick, name, ident string, chans []string) *Bot {\n\tcfg := irc.NewConfig(nick)\n\tcfg.SSL = true\n\tcfg.Me.Name = name\n\tcfg.Me.Ident = ident\n\tcfg.Server = server\n\tcfg.NewNick = func(n string) string { return n + \"_\" }\n\n\treturn &Bot{\n\t\tChans: chans,\n\t\tConfig: cfg,\n\t\tClient: irc.Client(cfg),\n\t\tCtl: make(chan string),\n\t}\n}\n\nfunc (b *Bot) joinChannels() {\n\tlog.Println(\"Joining channels\", b.Chans)\n\tfor _, c := range b.Chans {\n\t\tb.Client.Join(c)\n\t}\n}\n\nfunc (b *Bot) Run() {\n\tb.initializeHandlers()\n\tgo b.Connect()\n\tb.commandLoop()\n\tlog.Println(\"Bot quitting...\")\n}\n\nfunc (b *Bot) onMessage(msg *irc.Line) {\n\t\/\/ Ignore non-public messages\n\tif !msg.Public() {\n\t\treturn\n\t}\n\n\tif url := link.Find(msg.Text()); url != \"\" {\n\t\ttags := link.Tags(msg.Text())\n\t\tif len(tags) == 0 {\n\t\t\ttags = []string{\"private\"}\n\t\t}\n\t\ttags = append(tags, msg.Nick, msg.Target())\n\t\tgo func() {\n\t\t\tif err := link.Save(url, tags); err != nil {\n\t\t\t\tif !link.IncludesPrivate(tags) {\n\t\t\t\t\tb.Client.Privmsg(msg.Target(), \"Oops! \"+err.Error())\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !link.IncludesPrivate(tags) {\n\t\t\t\tb.Client.Privmsg(msg.Target(), \"Saved!\")\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (b *Bot) initializeHandlers() {\n\t\/\/ Connected\n\tb.Client.HandleFunc(\"connected\", func(conn *irc.Conn, line *irc.Line) {\n\t\tlog.Println(\"Connected!\")\n\t\tb.joinChannels()\n\t})\n\n\t\/\/ Disconnected\n\tb.Client.HandleFunc(\"disconnected\", func(conn *irc.Conn, line *irc.Line) {\n\t\tlog.Println(\"Disconnected\")\n\t\tb.Ctl <- \"disconnected\"\n\t})\n\n\t\/\/ PRIVMSG\n\tb.Client.HandleFunc(\"PRIVMSG\", func(conn *irc.Conn, line *irc.Line) {\n\t\tb.onMessage(line)\n\t})\n}\n\nfunc (b *Bot) Connect() error {\n\tlog.Println(\"Connecting to\", b.Config.Server)\n\treturn b.Client.Connect()\n}\n\nfunc (b *Bot) connect() {\n\tif err := backoff.Retry(bot.Connect, backoff.NewExponentialBackOff()); err != nil {\n\t\tlog.Printf(\"Connection error: %s\\n\", err)\n\t\tb.Ctl <- \"connection-error\"\n\t}\n}\n\n\/\/ Connection loop\nfunc (b *Bot) commandLoop() {\n\tfor {\n\t\tfor cmd := range b.Ctl {\n\t\t\tswitch cmd {\n\t\t\tcase \"disconnected\", \"connection-error\":\n\t\t\t\tlog.Println(\"Disconnected:\", cmd)\n\t\t\t\tgo b.connect()\n\t\t\tdefault:\n\t\t\t\tlog.Println(\"Ignoring command\", cmd)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package ircbot\n\nimport (\n\t\"bufio\"\n\t\"crypto\/rand\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/textproto\"\n\t\"strings\"\n)\n\ntype IrcBot struct {\n\t\/\/ identity\n\tUser string\n\tNick string\n\tpassword string\n\n\t\/\/ server info\n\tserver string\n\tport string\n\tchannels []string\n\n\t\/\/ tcp communication\n\tconn net.Conn\n\treader *textproto.Reader\n\twriter *textproto.Writer\n\n\t\/\/ crypto\n\tencrypted bool\n\tconfig tls.Config\n\n\t\/\/ data flow\n\tChIn chan *IrcMsg\n\tChOut chan *IrcMsg\n\tChError chan error\n\n\t\/\/ exit flag\n\tExit chan bool\n\n\t\/\/action handlers\n\thandlersIntern map[string][]Actioner \/\/handler of interanl commands\n\tHandlersUser map[string]Actioner \/\/ handler of commands fired by user\n\n\tdb *DB\n}\n\nfunc NewIrcBot(user, nick, password, server, port string, channels []string) *IrcBot {\n\tbot := IrcBot{\n\t\tUser: user,\n\t\tNick: nick,\n\t\tpassword: password,\n\t\tserver: server,\n\t\tport: port,\n\t\tchannels: channels,\n\n\t\thandlersIntern: make(map[string][]Actioner),\n\t\tHandlersUser: make(map[string]Actioner),\n\t\tChIn: make(chan *IrcMsg),\n\t\tChOut: make(chan *IrcMsg),\n\t\tChError: make(chan error),\n\t\tExit: make(chan bool),\n\n\t\tdb: newDB(),\n\t}\n\n\t\/\/init database\n\tif err := bot.db.open(\"irc.db\"); err != nil {\n\t\tpanic(err)\n\t}\n\tif err := bot.db.init(); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &bot\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/Public function\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/Connect connects the bot to the server and joins the channels\nfunc (b *IrcBot) Connect() error {\n\t\/\/launch a go routine that handle errors\n\t\/\/ b.handleError()\n\n\tlog.Println(\"Info> connection to\", b.url())\n\n\tif b.encrypted {\n\t\tcert, err := tls.LoadX509KeyPair(\"cert.pem\", \"key.pem\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tconfig := tls.Config{Certificates: []tls.Certificate{cert}}\n\t\tconfig.Rand = rand.Reader\n\t\tconn, err := tls.Dial(\"tcp\", b.url(), &config)\n\t\tif err != nil {\n\t\t\treturn err\n\n\t\t}\n\t\tb.conn = conn\n\t} else {\n\t\tconn, err := net.Dial(\"tcp\", b.url())\n\t\tif err != nil {\n\t\t\treturn err\n\n\t\t}\n\t\tb.conn = conn\n\t}\n\n\tr := bufio.NewReader(b.conn)\n\tw := bufio.NewWriter(b.conn)\n\tb.reader = textproto.NewReader(r)\n\tb.writer = textproto.NewWriter(w)\n\n\t\/\/connect to server\n\tb.writer.PrintfLine(\"USER %s 8 * :%s\", b.Nick, b.Nick)\n\tb.writer.PrintfLine(\"NICK %s\", b.Nick)\n\n\t\/\/launch go routines that handle requests\n\tb.listen()\n\tb.handleActionIn()\n\tb.handleActionOut()\n\tb.handlerError()\n\n\tb.identify()\n\n\treturn nil\n}\n\n\/\/Disconnect sends QUIT command to server and closes connections\nfunc (b *IrcBot) Disconnect() {\n\tb.writer.PrintfLine(\"QUIT\")\n\tb.conn.Close()\n\tb.Exit <- true\n}\n\n\/\/Say makes the bot say text to channel\nfunc (b *IrcBot) Say(channel string, text string) {\n\tmsg := NewIrcMsg()\n\tmsg.Command = \"PRIVMSG\"\n\tmsg.CmdParams = []string{channel}\n\tmsg.Trailing = []string{\":\", text}\n\n\tb.ChOut <- msg\n}\n\n\/\/AddInternAction add an action to excecute on internal command (join,connect,...)\n\/\/command is the internal command to handle, action is an ActionFunc callback\nfunc (b *IrcBot) AddInternAction(a Actioner) {\n\taddAction(a, b.handlersIntern)\n}\n\n\/\/AddUserAction add an action fired by the user to handle\n\/\/command is the commands send by user, action is an ActionFunc callback\nfunc (b *IrcBot) AddUserAction(a Actioner) {\n\tfor _, cmd := range a.Command() {\n\t\tb.HandlersUser[cmd] = a\n\t}\n}\n\n\/\/String implements the Stringer interface\nfunc (b *IrcBot) String() string {\n\ts := fmt.Sprintf(\"server: %s\\n\", b.server)\n\ts += fmt.Sprintf(\"port: %s\\n\", b.port)\n\ts += fmt.Sprintf(\"ssl: %t\\n\", b.encrypted)\n\n\tif len(b.channels) > 0 {\n\t\ts += \"channels: \"\n\t\tfor _, v := range b.channels {\n\t\t\ts += fmt.Sprintf(\"%s \", v)\n\t\t}\n\t}\n\n\treturn s\n}\n\nfunc (b *IrcBot) url() string {\n\treturn fmt.Sprintf(\"%s:%s\", b.server, b.port)\n}\n\nfunc (b *IrcBot) join() {\n\n\tfor _, v := range b.channels {\n\t\ts := fmt.Sprintf(\"JOIN %s\", v)\n\t\tfmt.Println(\"irc >> \", s)\n\t\tb.writer.PrintfLine(s)\n\t}\n}\n\nfunc (b *IrcBot) identify() {\n\t\/\/idenify with nickserv\n\tif b.password != \"\" {\n\t\ts := fmt.Sprintf(\"PRIVMSG NickServ :identify %s %s\", b.Nick, b.password)\n\t\tfmt.Println(\"irc >> \", s)\n\t\tb.writer.PrintfLine(s)\n\t}\n}\n\nfunc (b *IrcBot) listen() {\n\n\tgo func() {\n\n\t\tfor {\n\t\t\t\/\/block read line from socket\n\t\t\tline, err := b.reader.ReadLine()\n\t\t\tif err != nil {\n\t\t\t\tb.ChError <- err\n\t\t\t}\n\t\t\t\/\/ fmt.Println(\"DEBUG:\", line)\n\n\t\t\t\/\/remove prefix from raw message\n\t\t\t\/\/usefull to select how to handle message\n\t\t\twithoutPrefix := strings.SplitAfterN(line, \" \", 2)[1]\n\n\t\t\t\/\/ end of MODT\n\t\t\tif strings.Contains(withoutPrefix, \"376\") {\n\t\t\t\tb.join()\n\t\t\t}\n\n\t\t\tif strings.Contains(withoutPrefix, \"PING\") {\n\t\t\t\tout := strings.Replace(withoutPrefix, \"PING\", \"PONG\", -1)\n\t\t\t\tb.writer.PrintfLine(out)\n\t\t\t\t\/\/ fmt.Println(\"DEBUG:\", out)\n\t\t\t}\n\n\t\t\tif strings.Contains(line, \"PRIVMSG\") || strings.Contains(line, \"JOIN\") {\n\t\t\t\t\/\/convert line into IrcMsg\n\t\t\t\tmsg := ParseLine(line)\n\t\t\t\tb.ChIn <- msg\n\n\t\t\t\tif err := logMsg(msg, b.db); err != nil {\n\t\t\t\t\tb.ChError <- err\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\n\t}()\n}\n\nfunc addAction(a Actioner, acts map[string][]Actioner) {\n\tif len(a.Command()) > 1 {\n\t\tfor _, cmd := range a.Command() {\n\t\t\tacts[cmd] = append(acts[cmd], a)\n\t\t}\n\t\treturn\n\t}\n\tacts[a.Command()[0]] = append(acts[a.Command()[0]], a)\n}\n\nfunc (b *IrcBot) handleActionIn() {\n\tgo func() {\n\t\tfor {\n\t\t\t\/\/receive new message\n\t\t\tmsg := <-b.ChIn\n\t\t\t\/\/ fmt.Println(\"DEBUG :\", msg)\n\n\t\t\t\/\/action fired by user\n\t\t\tif msg.Command == \"PRIVMSG\" && strings.HasPrefix(msg.Trailing[0], \".\") {\n\t\t\t\taction, ok := b.HandlersUser[msg.Trailing[0]]\n\t\t\t\tif ok {\n\t\t\t\t\taction.Do(b, msg)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/action fired by event\n\t\t\tactions, ok := b.handlersIntern[msg.Command]\n\t\t\tif ok && len(actions) > 0 {\n\t\t\t\tfor _, action := range actions {\n\t\t\t\t\taction.Do(b, msg)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (b *IrcBot) handleActionOut() {\n\tgo func() {\n\t\tfor {\n\t\t\tmsg := <-b.ChOut\n\n\t\t\ts := fmt.Sprintf(\"%s %s %s\", msg.Command, strings.Join(msg.CmdParams, \" \"), strings.Join(msg.Trailing, \" \"))\n\t\t\tfmt.Println(\"irc >> \", s)\n\t\t\tb.writer.PrintfLine(s)\n\t\t}\n\t}()\n}\n\nfunc (b *IrcBot) handlerError() {\n\tgo func() {\n\t\tfor {\n\t\t\terr := <-b.ChError\n\t\t\tfmt.Printf(\"error >> %s\", err)\n\t\t\tif err != nil {\n\t\t\t\tb.Disconnect()\n\t\t\t\tlog.Fatalln(\"ChError ocurs :\", err)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (b *IrcBot) errChk(err error) {\n\tif err != nil {\n\t\tlog.Println(\"ChError> \", err)\n\t\tb.ChError <- err\n\t}\n}\n<commit_msg>better action dispatching<commit_after>package ircbot\n\nimport (\n\t\"bufio\"\n\t\"crypto\/rand\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/textproto\"\n\t\"strings\"\n)\n\ntype IrcBot struct {\n\t\/\/ identity\n\tUser string\n\tNick string\n\tpassword string\n\n\t\/\/ server info\n\tserver string\n\tport string\n\tchannels []string\n\n\t\/\/ tcp communication\n\tconn net.Conn\n\treader *textproto.Reader\n\twriter *textproto.Writer\n\n\t\/\/ crypto\n\tencrypted bool\n\tconfig tls.Config\n\n\t\/\/ data flow\n\tChIn chan *IrcMsg\n\tChOut chan *IrcMsg\n\tChError chan error\n\n\t\/\/ exit flag\n\tExit chan bool\n\n\t\/\/action handlers\n\thandlersIntern map[string][]Actioner \/\/handler of interanl commands\n\tHandlersUser map[string]Actioner \/\/ handler of commands fired by user\n\n\tdb *DB\n}\n\nfunc NewIrcBot(user, nick, password, server, port string, channels []string) *IrcBot {\n\tbot := IrcBot{\n\t\tUser: user,\n\t\tNick: nick,\n\t\tpassword: password,\n\t\tserver: server,\n\t\tport: port,\n\t\tchannels: channels,\n\n\t\thandlersIntern: make(map[string][]Actioner),\n\t\tHandlersUser: make(map[string]Actioner),\n\t\tChIn: make(chan *IrcMsg),\n\t\tChOut: make(chan *IrcMsg),\n\t\tChError: make(chan error),\n\t\tExit: make(chan bool),\n\n\t\tdb: newDB(),\n\t}\n\n\t\/\/init database\n\tif err := bot.db.open(\"irc.db\"); err != nil {\n\t\tpanic(err)\n\t}\n\tif err := bot.db.init(); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &bot\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/Public function\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/Connect connects the bot to the server and joins the channels\nfunc (b *IrcBot) Connect() error {\n\t\/\/launch a go routine that handle errors\n\t\/\/ b.handleError()\n\n\tlog.Println(\"Info> connection to\", b.url())\n\n\tif b.encrypted {\n\t\tcert, err := tls.LoadX509KeyPair(\"cert.pem\", \"key.pem\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tconfig := tls.Config{Certificates: []tls.Certificate{cert}}\n\t\tconfig.Rand = rand.Reader\n\t\tconn, err := tls.Dial(\"tcp\", b.url(), &config)\n\t\tif err != nil {\n\t\t\treturn err\n\n\t\t}\n\t\tb.conn = conn\n\t} else {\n\t\tconn, err := net.Dial(\"tcp\", b.url())\n\t\tif err != nil {\n\t\t\treturn err\n\n\t\t}\n\t\tb.conn = conn\n\t}\n\n\tr := bufio.NewReader(b.conn)\n\tw := bufio.NewWriter(b.conn)\n\tb.reader = textproto.NewReader(r)\n\tb.writer = textproto.NewWriter(w)\n\n\t\/\/connect to server\n\tb.writer.PrintfLine(\"USER %s 8 * :%s\", b.Nick, b.Nick)\n\tb.writer.PrintfLine(\"NICK %s\", b.Nick)\n\n\t\/\/launch go routines that handle requests\n\tb.listen()\n\tb.handleActionIn()\n\tb.handleActionOut()\n\tb.handlerError()\n\n\tb.identify()\n\n\treturn nil\n}\n\n\/\/Disconnect sends QUIT command to server and closes connections\nfunc (b *IrcBot) Disconnect() {\n\tb.writer.PrintfLine(\"QUIT\")\n\tb.conn.Close()\n\tb.Exit <- true\n}\n\n\/\/Say makes the bot say text to channel\nfunc (b *IrcBot) Say(channel string, text string) {\n\tmsg := NewIrcMsg()\n\tmsg.Command = \"PRIVMSG\"\n\tmsg.CmdParams = []string{channel}\n\tmsg.Trailing = []string{\":\", text}\n\n\tb.ChOut <- msg\n}\n\n\/\/AddInternAction add an action to excecute on internal command (join,connect,...)\n\/\/command is the internal command to handle, action is an ActionFunc callback\nfunc (b *IrcBot) AddInternAction(a Actioner) {\n\taddAction(a, b.handlersIntern)\n}\n\n\/\/AddUserAction add an action fired by the user to handle\n\/\/command is the commands send by user, action is an ActionFunc callback\nfunc (b *IrcBot) AddUserAction(a Actioner) {\n\tfor _, cmd := range a.Command() {\n\t\tb.HandlersUser[cmd] = a\n\t}\n}\n\n\/\/String implements the Stringer interface\nfunc (b *IrcBot) String() string {\n\ts := fmt.Sprintf(\"server: %s\\n\", b.server)\n\ts += fmt.Sprintf(\"port: %s\\n\", b.port)\n\ts += fmt.Sprintf(\"ssl: %t\\n\", b.encrypted)\n\n\tif len(b.channels) > 0 {\n\t\ts += \"channels: \"\n\t\tfor _, v := range b.channels {\n\t\t\ts += fmt.Sprintf(\"%s \", v)\n\t\t}\n\t}\n\n\treturn s\n}\n\nfunc (b *IrcBot) url() string {\n\treturn fmt.Sprintf(\"%s:%s\", b.server, b.port)\n}\n\nfunc (b *IrcBot) join() {\n\n\tfor _, v := range b.channels {\n\t\ts := fmt.Sprintf(\"JOIN %s\", v)\n\t\tfmt.Println(\"irc >> \", s)\n\t\tb.writer.PrintfLine(s)\n\t}\n}\n\nfunc (b *IrcBot) identify() {\n\t\/\/idenify with nickserv\n\tif b.password != \"\" {\n\t\ts := fmt.Sprintf(\"PRIVMSG NickServ :identify %s %s\", b.Nick, b.password)\n\t\tfmt.Println(\"irc >> \", s)\n\t\tb.writer.PrintfLine(s)\n\t}\n}\n\nfunc (b *IrcBot) listen() {\n\n\tgo func() {\n\n\t\tfor {\n\t\t\t\/\/block read line from socket\n\t\t\tline, err := b.reader.ReadLine()\n\t\t\tif err != nil {\n\t\t\t\tb.ChError <- err\n\t\t\t}\n\t\t\tfmt.Println(\"DEBUG:\", line)\n\n\t\t\t\/\/convert line into IrcMsg\n\t\t\tmsg := ParseLine(line)\n\n\t\t\t\/\/ end of MODT\n\t\t\tif msg.Command == \"376\" {\n\t\t\t\tb.join()\n\t\t\t}\n\n\t\t\tif msg.Command == \"PING\" {\n\t\t\t\tout := strings.Replace(line, \"PING\", \"PONG\", -1)\n\t\t\t\tb.writer.PrintfLine(out)\n\t\t\t\tfmt.Println(\"DEBUG:\", out)\n\t\t\t}\n\n\t\t\tif msg.Command == \"PRIVMSG\" || msg.Command == \"JOIN\" {\n\t\t\t\tb.ChIn <- msg\n\n\t\t\t\tif err := logMsg(msg, b.db); err != nil {\n\t\t\t\t\tb.ChError <- err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}()\n}\n\nfunc addAction(a Actioner, acts map[string][]Actioner) {\n\tif len(a.Command()) > 1 {\n\t\tfor _, cmd := range a.Command() {\n\t\t\tacts[cmd] = append(acts[cmd], a)\n\t\t}\n\t\treturn\n\t}\n\tacts[a.Command()[0]] = append(acts[a.Command()[0]], a)\n}\n\nfunc (b *IrcBot) handleActionIn() {\n\tgo func() {\n\t\tfor {\n\t\t\t\/\/receive new message\n\t\t\tmsg := <-b.ChIn\n\t\t\t\/\/ fmt.Println(\"DEBUG :\", msg)\n\n\t\t\t\/\/action fired by user\n\t\t\tif msg.Command == \"PRIVMSG\" && strings.HasPrefix(msg.Trailing[0], \".\") {\n\t\t\t\taction, ok := b.HandlersUser[msg.Trailing[0]]\n\t\t\t\tif ok {\n\t\t\t\t\taction.Do(b, msg)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/action fired by event\n\t\t\tactions, ok := b.handlersIntern[msg.Command]\n\t\t\tif ok && len(actions) > 0 {\n\t\t\t\tfor _, action := range actions {\n\t\t\t\t\taction.Do(b, msg)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (b *IrcBot) handleActionOut() {\n\tgo func() {\n\t\tfor {\n\t\t\tmsg := <-b.ChOut\n\n\t\t\ts := fmt.Sprintf(\"%s %s %s\", msg.Command, strings.Join(msg.CmdParams, \" \"), strings.Join(msg.Trailing, \" \"))\n\t\t\tfmt.Println(\"irc >> \", s)\n\t\t\tb.writer.PrintfLine(s)\n\t\t}\n\t}()\n}\n\nfunc (b *IrcBot) handlerError() {\n\tgo func() {\n\t\tfor {\n\t\t\terr := <-b.ChError\n\t\t\tfmt.Printf(\"error >> %s\", err)\n\t\t\tif err != nil {\n\t\t\t\tb.Disconnect()\n\t\t\t\tlog.Fatalln(\"ChError ocurs :\", err)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (b *IrcBot) errChk(err error) {\n\tif err != nil {\n\t\tlog.Println(\"ChError> \", err)\n\t\tb.ChError <- err\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\".\/images\"\n\t\".\/reddit\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/bwmarrin\/discordgo\"\n\t\"google.golang.org\/appengine\"\n\t\"net\/url\"\n\t\"strings\"\n)\n\nfunc main() {\n\ttoken := flag.String(\"token\", \"\", \"Client token\")\n\tflag.Parse()\n\n\tif *token == \"\" {\n\t\tfmt.Println(\"Must provide client token. Run with -h for more info\")\n\t\treturn\n\t}\n\tdiscord, err := discordgo.New(\"Bot \" + *token)\n\tif err != nil {\n\t\tfmt.Println(\"Error authenticating: \", err)\n\t\treturn\n\t}\n\tdiscord.AddHandler(newMessage)\n\terr = discord.Open()\n\tif err != nil {\n\t\tfmt.Println(\"Error opening Discord session: \", err)\n\t\treturn\n\t}\n\n\tfmt.Println(\"DankBot is now running.\")\n\tappengine.Main()\n}\n\nfunc newMessage(s *discordgo.Session, m *discordgo.MessageCreate) {\n\targs := strings.Split(m.Content, \" \")\n\tswitch args[0] {\n\tcase \"!sombra\":\n\t\tsendMessage(s, m.ChannelID, images.Sombra())\n\tcase \"!reddit\":\n\t\tvar msg string\n\t\tif len(args) >= 3 {\n\t\t\tmsg = reddit.RandomSearch(args[1], strings.Join(args[1:], \" \"))\n\t\t} else if len(args) == 2 {\n\t\t\tmsg = reddit.Random(args[1])\n\t\t} else {\n\t\t\tmsg = \"*Must provide a subreddit.*\"\n\t\t}\n\t\tif msg == \"\" {\n\t\t\tmsg = \"*No results found.*\"\n\t\t}\n\t\tsendMessage(s, m.ChannelID, msg)\n\tcase \"!rip\":\n\t\tname := url.QueryEscape(strings.Join(args[1:], \" \"))\n\t\tsendMessage(s, m.ChannelID, images.GenerateRIP(name))\n\tcase \"!retro\":\n\t\tlines := strings.Split(strings.Join(args[1:], \" \"), \",\")\n\t\tvar text1, text2, text3 string\n\t\tswitch {\n\t\tcase len(lines) >= 3:\n\t\t\ttext3 = strings.Join(lines[2:], \" \")\n\t\t\tfallthrough\n\t\tcase len(lines) == 2:\n\t\t\ttext2 = lines[1]\n\t\t\tfallthrough\n\t\tcase len(lines) == 1:\n\t\t\ttext1 = lines[0]\n\t\t\tfallthrough\n\t\tdefault:\n\t\t\tsendMessage(s, m.ChannelID, images.GenerateRetro(text1, text2, text3))\n\t\t}\n\t}\n}\n\nfunc sendMessage(s *discordgo.Session, channelID string, content string) {\n\t_, err := s.ChannelMessageSend(channelID, content)\n\tif err != nil {\n\t\tfmt.Println(\"Error sending message: \", err)\n\t}\n}\n<commit_msg>Use fully qualified imports<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/bwmarrin\/discordgo\"\n\t\"github.com\/coolbrow\/dankbot\/images\"\n\t\"github.com\/coolbrow\/dankbot\/reddit\"\n\t\"google.golang.org\/appengine\"\n\t\"net\/url\"\n\t\"strings\"\n)\n\nfunc main() {\n\ttoken := flag.String(\"token\", \"\", \"Client token\")\n\tflag.Parse()\n\n\tif *token == \"\" {\n\t\tfmt.Println(\"Must provide client token. Run with -h for more info\")\n\t\treturn\n\t}\n\tdiscord, err := discordgo.New(\"Bot \" + *token)\n\tif err != nil {\n\t\tfmt.Println(\"Error authenticating: \", err)\n\t\treturn\n\t}\n\tdiscord.AddHandler(newMessage)\n\terr = discord.Open()\n\tif err != nil {\n\t\tfmt.Println(\"Error opening Discord session: \", err)\n\t\treturn\n\t}\n\n\tfmt.Println(\"DankBot is now running.\")\n\tappengine.Main()\n}\n\nfunc newMessage(s *discordgo.Session, m *discordgo.MessageCreate) {\n\targs := strings.Split(m.Content, \" \")\n\tswitch args[0] {\n\tcase \"!sombra\":\n\t\tsendMessage(s, m.ChannelID, images.Sombra())\n\tcase \"!reddit\":\n\t\tvar msg string\n\t\tif len(args) >= 3 {\n\t\t\tmsg = reddit.RandomSearch(args[1], strings.Join(args[1:], \" \"))\n\t\t} else if len(args) == 2 {\n\t\t\tmsg = reddit.Random(args[1])\n\t\t} else {\n\t\t\tmsg = \"*Must provide a subreddit.*\"\n\t\t}\n\t\tif msg == \"\" {\n\t\t\tmsg = \"*No results found.*\"\n\t\t}\n\t\tsendMessage(s, m.ChannelID, msg)\n\tcase \"!rip\":\n\t\tname := url.QueryEscape(strings.Join(args[1:], \" \"))\n\t\tsendMessage(s, m.ChannelID, images.GenerateRIP(name))\n\tcase \"!retro\":\n\t\tlines := strings.Split(strings.Join(args[1:], \" \"), \",\")\n\t\tvar text1, text2, text3 string\n\t\tswitch {\n\t\tcase len(lines) >= 3:\n\t\t\ttext3 = strings.Join(lines[2:], \" \")\n\t\t\tfallthrough\n\t\tcase len(lines) == 2:\n\t\t\ttext2 = lines[1]\n\t\t\tfallthrough\n\t\tcase len(lines) == 1:\n\t\t\ttext1 = lines[0]\n\t\t\tfallthrough\n\t\tdefault:\n\t\t\tsendMessage(s, m.ChannelID, images.GenerateRetro(text1, text2, text3))\n\t\t}\n\t}\n}\n\nfunc sendMessage(s *discordgo.Session, channelID string, content string) {\n\t_, err := s.ChannelMessageSend(channelID, content)\n\tif err != nil {\n\t\tfmt.Println(\"Error sending message: \", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Let's Encrypt client to go!\n\/\/ CLI application for generating Let's Encrypt certificates using the ACME package.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/urfave\/cli\"\n\t\"github.com\/xenolf\/lego\/acme\"\n)\n\n\/\/ Logger is used to log errors; if nil, the default log.Logger is used.\nvar Logger *log.Logger\n\n\/\/ logger is an helper function to retrieve the available logger\nfunc logger() *log.Logger {\n\tif Logger == nil {\n\t\tLogger = log.New(os.Stderr, \"\", log.LstdFlags)\n\t}\n\treturn Logger\n}\n\nvar gittag string\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"lego\"\n\tapp.Usage = \"Let's Encrypt client written in Go\"\n\n\tversion := \"0.4.1\"\n\tif strings.HasPrefix(gittag, \"v\") {\n\t\tversion = gittag\n\t}\n\n\tapp.Version = version\n\n\tacme.UserAgent = \"lego\/\" + app.Version\n\n\tdefaultPath := \"\"\n\tcwd, err := os.Getwd()\n\tif err == nil {\n\t\tdefaultPath = path.Join(cwd, \".lego\")\n\t}\n\n\tapp.Before = func(c *cli.Context) error {\n\t\tif c.GlobalString(\"path\") == \"\" {\n\t\t\tlogger().Fatal(\"Could not determine current working directory. Please pass --path.\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"run\",\n\t\t\tUsage: \"Register an account, then create and install a certificate\",\n\t\t\tAction: run,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"no-bundle\",\n\t\t\t\t\tUsage: \"Do not create a certificate bundle by adding the issuers certificate to the new certificate.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"must-staple\",\n\t\t\t\t\tUsage: \"Include the OCSP must staple TLS extension in the CSR and generated certificate. Only works if the CSR is generated by lego.\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"revoke\",\n\t\t\tUsage: \"Revoke a certificate\",\n\t\t\tAction: revoke,\n\t\t},\n\t\t{\n\t\t\tName: \"renew\",\n\t\t\tUsage: \"Renew a certificate\",\n\t\t\tAction: renew,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"days\",\n\t\t\t\t\tValue: 0,\n\t\t\t\t\tUsage: \"The number of days left on a certificate to renew it.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"reuse-key\",\n\t\t\t\t\tUsage: \"Used to indicate you want to reuse your current private key for the new certificate.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"no-bundle\",\n\t\t\t\t\tUsage: \"Do not create a certificate bundle by adding the issuers certificate to the new certificate.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"must-staple\",\n\t\t\t\t\tUsage: \"Include the OCSP must staple TLS extension in the CSR and generated certificate. Only works if the CSR is generated by lego.\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"dnshelp\",\n\t\t\tUsage: \"Shows additional help for the --dns global option\",\n\t\t\tAction: dnshelp,\n\t\t},\n\t}\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"domains, d\",\n\t\t\tUsage: \"Add a domain to the process. Can be specified multiple times.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"csr, c\",\n\t\t\tUsage: \"Certificate signing request filename, if an external CSR is to be used\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"server, s\",\n\t\t\tValue: \"https:\/\/acme-v01.api.letsencrypt.org\/directory\",\n\t\t\tUsage: \"CA hostname (and optionally :port). The server certificate must be trusted in order to avoid further modifications to the client.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"email, m\",\n\t\t\tUsage: \"Email used for registration and recovery contact.\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"accept-tos, a\",\n\t\t\tUsage: \"By setting this flag to true you indicate that you accept the current Let's Encrypt terms of service.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"key-type, k\",\n\t\t\tValue: \"rsa2048\",\n\t\t\tUsage: \"Key type to use for private keys. Supported: rsa2048, rsa4096, rsa8192, ec256, ec384\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"path\",\n\t\t\tUsage: \"Directory to use for storing the data\",\n\t\t\tValue: defaultPath,\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"exclude, x\",\n\t\t\tUsage: \"Explicitly disallow solvers by name from being used. Solvers: \\\"http-01\\\", \\\"tls-sni-01\\\".\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"webroot\",\n\t\t\tUsage: \"Set the webroot folder to use for HTTP based challenges to write directly in a file in .well-known\/acme-challenge\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"memcached-host\",\n\t\t\tUsage: \"Set the memcached host(s) to use for HTTP based challenges. Challenges will be written to all specified hosts.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"http\",\n\t\t\tUsage: \"Set the port and interface to use for HTTP based challenges to listen on. Supported: interface:port or :port\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"tls\",\n\t\t\tUsage: \"Set the port and interface to use for TLS based challenges to listen on. Supported: interface:port or :port\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dns\",\n\t\t\tUsage: \"Solve a DNS challenge using the specified provider. Disables all other challenges. Run 'lego dnshelp' for help on usage.\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"http-timeout\",\n\t\t\tUsage: \"Set the HTTP timeout value to a specific value in seconds. The default is 10 seconds.\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"dns-timeout\",\n\t\t\tUsage: \"Set the DNS timeout value to a specific value in seconds. The default is 10 seconds.\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"dns-resolvers\",\n\t\t\tUsage: \"Set the resolvers to use for performing recursive DNS queries. Supported: host:port. The default is to use Google's DNS resolvers.\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"pem\",\n\t\t\tUsage: \"Generate a .pem file by concatanating the .key and .crt files together.\",\n\t\t},\n\t}\n\n\terr = app.Run(os.Args)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc dnshelp(c *cli.Context) error {\n\tfmt.Printf(\n\t\t`Credentials for DNS providers must be passed through environment variables.\n\nHere is an example bash command using the CloudFlare DNS provider:\n\n $ CLOUDFLARE_EMAIL=foo@bar.com \\\n CLOUDFLARE_API_KEY=b9841238feb177a84330febba8a83208921177bffe733 \\\n lego --dns cloudflare --domains www.example.com --email me@bar.com run\n\n`)\n\n\tw := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\\t', 0)\n\tfmt.Fprintln(w, \"Valid providers and their associated credential environment variables:\")\n\tfmt.Fprintln(w)\n\tfmt.Fprintln(w, \"\\tazure:\\tAZURE_CLIENT_ID, AZURE_CLIENT_SECRET, AZURE_SUBSCRIPTION_ID, AZURE_TENANT_ID, AZURE_RESOURCE_GROUP\")\n\tfmt.Fprintln(w, \"\\tauroradns:\\tAURORA_USER_ID, AURORA_KEY, AURORA_ENDPOINT\")\n\tfmt.Fprintln(w, \"\\tcloudflare:\\tCLOUDFLARE_EMAIL, CLOUDFLARE_API_KEY\")\n\tfmt.Fprintln(w, \"\\tdigitalocean:\\tDO_AUTH_TOKEN\")\n\tfmt.Fprintln(w, \"\\tdnsimple:\\tDNSIMPLE_EMAIL, DNSIMPLE_OAUTH_TOKEN\")\n\tfmt.Fprintln(w, \"\\tdnsmadeeasy:\\tDNSMADEEASY_API_KEY, DNSMADEEASY_API_SECRET\")\n\tfmt.Fprintln(w, \"\\texoscale:\\tEXOSCALE_API_KEY, EXOSCALE_API_SECRET, EXOSCALE_ENDPOINT\")\n\tfmt.Fprintln(w, \"\\tgandi:\\tGANDI_API_KEY\")\n\tfmt.Fprintln(w, \"\\tgcloud:\\tGCE_PROJECT, GCE_SERVICE_ACCOUNT_FILE\")\n\tfmt.Fprintln(w, \"\\tlinode:\\tLINODE_API_KEY\")\n\tfmt.Fprintln(w, \"\\tmanual:\\tnone\")\n\tfmt.Fprintln(w, \"\\tnamecheap:\\tNAMECHEAP_API_USER, NAMECHEAP_API_KEY\")\n\tfmt.Fprintln(w, \"\\trackspace:\\tRACKSPACE_USER, RACKSPACE_API_KEY\")\n\tfmt.Fprintln(w, \"\\trfc2136:\\tRFC2136_TSIG_KEY, RFC2136_TSIG_SECRET,\\n\\t\\tRFC2136_TSIG_ALGORITHM, RFC2136_NAMESERVER\")\n\tfmt.Fprintln(w, \"\\troute53:\\tAWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION, AWS_HOSTED_ZONE_ID\")\n\tfmt.Fprintln(w, \"\\tdyn:\\tDYN_CUSTOMER_NAME, DYN_USER_NAME, DYN_PASSWORD\")\n\tfmt.Fprintln(w, \"\\tvultr:\\tVULTR_API_KEY\")\n\tfmt.Fprintln(w, \"\\tovh:\\tOVH_ENDPOINT, OVH_APPLICATION_KEY, OVH_APPLICATION_SECRET, OVH_CONSUMER_KEY\")\n\tfmt.Fprintln(w, \"\\tpdns:\\tPDNS_API_KEY, PDNS_API_URL\")\n\tfmt.Fprintln(w, \"\\tdnspod:\\tDNSPOD_API_KEY\")\n\tfmt.Fprintln(w, \"\\totc:\\tOTC_USER_NAME, OTC_PASSWORD, OTC_PROJECT_NAME, OTC_DOMAIN_NAME, OTC_IDENTITY_ENDPOINT\")\n\tw.Flush()\n\n\tfmt.Println(`\nFor a more detailed explanation of a DNS provider's credential variables,\nplease consult their online documentation.`)\n\n\treturn nil\n}\n<commit_msg>cli: Correct help text for --dns-resolvers default (#462)<commit_after>\/\/ Let's Encrypt client to go!\n\/\/ CLI application for generating Let's Encrypt certificates using the ACME package.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/urfave\/cli\"\n\t\"github.com\/xenolf\/lego\/acme\"\n)\n\n\/\/ Logger is used to log errors; if nil, the default log.Logger is used.\nvar Logger *log.Logger\n\n\/\/ logger is an helper function to retrieve the available logger\nfunc logger() *log.Logger {\n\tif Logger == nil {\n\t\tLogger = log.New(os.Stderr, \"\", log.LstdFlags)\n\t}\n\treturn Logger\n}\n\nvar gittag string\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"lego\"\n\tapp.Usage = \"Let's Encrypt client written in Go\"\n\n\tversion := \"0.4.1\"\n\tif strings.HasPrefix(gittag, \"v\") {\n\t\tversion = gittag\n\t}\n\n\tapp.Version = version\n\n\tacme.UserAgent = \"lego\/\" + app.Version\n\n\tdefaultPath := \"\"\n\tcwd, err := os.Getwd()\n\tif err == nil {\n\t\tdefaultPath = path.Join(cwd, \".lego\")\n\t}\n\n\tapp.Before = func(c *cli.Context) error {\n\t\tif c.GlobalString(\"path\") == \"\" {\n\t\t\tlogger().Fatal(\"Could not determine current working directory. Please pass --path.\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"run\",\n\t\t\tUsage: \"Register an account, then create and install a certificate\",\n\t\t\tAction: run,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"no-bundle\",\n\t\t\t\t\tUsage: \"Do not create a certificate bundle by adding the issuers certificate to the new certificate.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"must-staple\",\n\t\t\t\t\tUsage: \"Include the OCSP must staple TLS extension in the CSR and generated certificate. Only works if the CSR is generated by lego.\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"revoke\",\n\t\t\tUsage: \"Revoke a certificate\",\n\t\t\tAction: revoke,\n\t\t},\n\t\t{\n\t\t\tName: \"renew\",\n\t\t\tUsage: \"Renew a certificate\",\n\t\t\tAction: renew,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"days\",\n\t\t\t\t\tValue: 0,\n\t\t\t\t\tUsage: \"The number of days left on a certificate to renew it.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"reuse-key\",\n\t\t\t\t\tUsage: \"Used to indicate you want to reuse your current private key for the new certificate.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"no-bundle\",\n\t\t\t\t\tUsage: \"Do not create a certificate bundle by adding the issuers certificate to the new certificate.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"must-staple\",\n\t\t\t\t\tUsage: \"Include the OCSP must staple TLS extension in the CSR and generated certificate. Only works if the CSR is generated by lego.\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"dnshelp\",\n\t\t\tUsage: \"Shows additional help for the --dns global option\",\n\t\t\tAction: dnshelp,\n\t\t},\n\t}\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"domains, d\",\n\t\t\tUsage: \"Add a domain to the process. Can be specified multiple times.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"csr, c\",\n\t\t\tUsage: \"Certificate signing request filename, if an external CSR is to be used\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"server, s\",\n\t\t\tValue: \"https:\/\/acme-v01.api.letsencrypt.org\/directory\",\n\t\t\tUsage: \"CA hostname (and optionally :port). The server certificate must be trusted in order to avoid further modifications to the client.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"email, m\",\n\t\t\tUsage: \"Email used for registration and recovery contact.\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"accept-tos, a\",\n\t\t\tUsage: \"By setting this flag to true you indicate that you accept the current Let's Encrypt terms of service.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"key-type, k\",\n\t\t\tValue: \"rsa2048\",\n\t\t\tUsage: \"Key type to use for private keys. Supported: rsa2048, rsa4096, rsa8192, ec256, ec384\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"path\",\n\t\t\tUsage: \"Directory to use for storing the data\",\n\t\t\tValue: defaultPath,\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"exclude, x\",\n\t\t\tUsage: \"Explicitly disallow solvers by name from being used. Solvers: \\\"http-01\\\", \\\"tls-sni-01\\\".\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"webroot\",\n\t\t\tUsage: \"Set the webroot folder to use for HTTP based challenges to write directly in a file in .well-known\/acme-challenge\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"memcached-host\",\n\t\t\tUsage: \"Set the memcached host(s) to use for HTTP based challenges. Challenges will be written to all specified hosts.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"http\",\n\t\t\tUsage: \"Set the port and interface to use for HTTP based challenges to listen on. Supported: interface:port or :port\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"tls\",\n\t\t\tUsage: \"Set the port and interface to use for TLS based challenges to listen on. Supported: interface:port or :port\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dns\",\n\t\t\tUsage: \"Solve a DNS challenge using the specified provider. Disables all other challenges. Run 'lego dnshelp' for help on usage.\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"http-timeout\",\n\t\t\tUsage: \"Set the HTTP timeout value to a specific value in seconds. The default is 10 seconds.\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"dns-timeout\",\n\t\t\tUsage: \"Set the DNS timeout value to a specific value in seconds. The default is 10 seconds.\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"dns-resolvers\",\n\t\t\tUsage: \"Set the resolvers to use for performing recursive DNS queries. Supported: host:port. The default is to use the system resolvers, or Google's DNS resolvers if the system's cannot be determined.\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"pem\",\n\t\t\tUsage: \"Generate a .pem file by concatanating the .key and .crt files together.\",\n\t\t},\n\t}\n\n\terr = app.Run(os.Args)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc dnshelp(c *cli.Context) error {\n\tfmt.Printf(\n\t\t`Credentials for DNS providers must be passed through environment variables.\n\nHere is an example bash command using the CloudFlare DNS provider:\n\n $ CLOUDFLARE_EMAIL=foo@bar.com \\\n CLOUDFLARE_API_KEY=b9841238feb177a84330febba8a83208921177bffe733 \\\n lego --dns cloudflare --domains www.example.com --email me@bar.com run\n\n`)\n\n\tw := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\\t', 0)\n\tfmt.Fprintln(w, \"Valid providers and their associated credential environment variables:\")\n\tfmt.Fprintln(w)\n\tfmt.Fprintln(w, \"\\tazure:\\tAZURE_CLIENT_ID, AZURE_CLIENT_SECRET, AZURE_SUBSCRIPTION_ID, AZURE_TENANT_ID, AZURE_RESOURCE_GROUP\")\n\tfmt.Fprintln(w, \"\\tauroradns:\\tAURORA_USER_ID, AURORA_KEY, AURORA_ENDPOINT\")\n\tfmt.Fprintln(w, \"\\tcloudflare:\\tCLOUDFLARE_EMAIL, CLOUDFLARE_API_KEY\")\n\tfmt.Fprintln(w, \"\\tdigitalocean:\\tDO_AUTH_TOKEN\")\n\tfmt.Fprintln(w, \"\\tdnsimple:\\tDNSIMPLE_EMAIL, DNSIMPLE_OAUTH_TOKEN\")\n\tfmt.Fprintln(w, \"\\tdnsmadeeasy:\\tDNSMADEEASY_API_KEY, DNSMADEEASY_API_SECRET\")\n\tfmt.Fprintln(w, \"\\texoscale:\\tEXOSCALE_API_KEY, EXOSCALE_API_SECRET, EXOSCALE_ENDPOINT\")\n\tfmt.Fprintln(w, \"\\tgandi:\\tGANDI_API_KEY\")\n\tfmt.Fprintln(w, \"\\tgcloud:\\tGCE_PROJECT, GCE_SERVICE_ACCOUNT_FILE\")\n\tfmt.Fprintln(w, \"\\tlinode:\\tLINODE_API_KEY\")\n\tfmt.Fprintln(w, \"\\tmanual:\\tnone\")\n\tfmt.Fprintln(w, \"\\tnamecheap:\\tNAMECHEAP_API_USER, NAMECHEAP_API_KEY\")\n\tfmt.Fprintln(w, \"\\trackspace:\\tRACKSPACE_USER, RACKSPACE_API_KEY\")\n\tfmt.Fprintln(w, \"\\trfc2136:\\tRFC2136_TSIG_KEY, RFC2136_TSIG_SECRET,\\n\\t\\tRFC2136_TSIG_ALGORITHM, RFC2136_NAMESERVER\")\n\tfmt.Fprintln(w, \"\\troute53:\\tAWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION, AWS_HOSTED_ZONE_ID\")\n\tfmt.Fprintln(w, \"\\tdyn:\\tDYN_CUSTOMER_NAME, DYN_USER_NAME, DYN_PASSWORD\")\n\tfmt.Fprintln(w, \"\\tvultr:\\tVULTR_API_KEY\")\n\tfmt.Fprintln(w, \"\\tovh:\\tOVH_ENDPOINT, OVH_APPLICATION_KEY, OVH_APPLICATION_SECRET, OVH_CONSUMER_KEY\")\n\tfmt.Fprintln(w, \"\\tpdns:\\tPDNS_API_KEY, PDNS_API_URL\")\n\tfmt.Fprintln(w, \"\\tdnspod:\\tDNSPOD_API_KEY\")\n\tfmt.Fprintln(w, \"\\totc:\\tOTC_USER_NAME, OTC_PASSWORD, OTC_PROJECT_NAME, OTC_DOMAIN_NAME, OTC_IDENTITY_ENDPOINT\")\n\tw.Flush()\n\n\tfmt.Println(`\nFor a more detailed explanation of a DNS provider's credential variables,\nplease consult their online documentation.`)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package weso\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/peterh\/liner\"\n\t\"golang.org\/x\/net\/websocket\"\n)\n\n\/\/ Parse parse string to template name and args.\nfunc Parse(s string) []string {\n\tr, _ := regexp.Compile(\"(\\\".*\\\"|[^\\\\s]*)\")\n\tss := r.FindAllString(s+\" \", -1)\n\tfor i, s := range ss {\n\t\tif strings.HasPrefix(s, \"\\\"\") {\n\t\t\tss[i] = s[1 : len(s)-1]\n\t\t}\n\t}\n\treturn ss\n}\n\n\/\/ CLI is interface for websocket.\ntype CLI struct {\n\tconn *websocket.Conn\n\tliner *liner.State\n\ttemplate *Template\n\tdebug bool\n}\n\n\/\/ NewCLI create CLI from Config.\nfunc NewCLI(conf *Config) (*CLI, error) {\n\torigin := \"http:\/\/127.0.0.1\"\n\tif conf.Origin != \"\" {\n\t\torigin = conf.Origin\n\t}\n\tconn, err := websocket.Dial(conf.URL, \"\", origin)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttemplate := EmptyTemplate\n\tif _, err := os.Stat(conf.Template); err == nil {\n\t\ttemplate, err = NewTemplateFile(conf.Template)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tcli := &CLI{\n\t\tconn,\n\t\tliner.NewLiner(),\n\t\ttemplate,\n\t\tconf.Debug,\n\t}\n\n\tcli.liner.SetCtrlCAborts(true)\n\tcli.liner.SetCompleter(Completer(template))\n\n\treturn cli, nil\n}\n\n\/\/ QuitRequest is used when websocket should be closed.\nvar QuitRequest = errors.New(\"quit request\")\n\n\/\/ Close close connection.\nfunc (c *CLI) Close() {\n\tc.conn.Close()\n\tc.liner.Close()\n}\n\n\/\/ Run start receive and send loop.\nfunc (c *CLI) Run() error {\n\tec := make(chan error)\n\tgo func() {\n\t\tfor {\n\t\t\terr := c.Send()\n\t\t\tif err != nil {\n\t\t\t\tec <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor {\n\t\t\terr := c.Receive()\n\t\t\tif err != nil {\n\t\t\t\tec <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\terr := <-ec\n\tif err == QuitRequest {\n\t\treturn nil\n\t}\n\treturn err\n}\n\n\/\/ Send read command from Stdin and send message.\nfunc (c *CLI) Send() error {\n\ttext, err := c.liner.Prompt(\"> \")\n\tif err == io.EOF || err == liner.ErrPromptAborted {\n\t\treturn QuitRequest\n\t} else if err != nil {\n\t\treturn err\n\t} else if text == \"\" {\n\t\treturn nil\n\t}\n\n\tc.liner.AppendHistory(text)\n\n\tif strings.HasPrefix(text, \".\") {\n\t\targs := Parse(text)\n\t\tname := args[0][1:]\n\t\tif c.template.IsDefined(name) {\n\t\t\tmsg, err := c.template.Apply(name, args[1:]...)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"! error on template\", name, err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif c.debug {\n\t\t\t\tfmt.Println(\"! expanded template:\", string(msg))\n\t\t\t}\n\t\t\tif _, err := c.conn.Write(msg); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(\"! no template for\", name)\n\t\t}\n\t} else {\n\t\tif _, err := c.conn.Write([]byte(text)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Receive receive messages and print them.\nfunc (c *CLI) Receive() error {\n\tdata := make([]byte, 1024)\n\tif _, err := c.conn.Read(data); err != nil {\n\t\tif err == io.EOF {\n\t\t\tfmt.Println(\"\\r! connection closed\")\n\t\t\treturn QuitRequest\n\t\t}\n\t\treturn err\n\t}\n\tfmt.Println(\"\\r<<\", string(data))\n\treturn nil\n}\n<commit_msg>Fix Parse bug<commit_after>package weso\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/peterh\/liner\"\n\t\"golang.org\/x\/net\/websocket\"\n)\n\n\/\/ Parse parse string to template name and args.\nfunc Parse(s string) []string {\n\tr, _ := regexp.Compile(\"(\\\".+\\\"|[^\\\\s]+)\")\n\tss := r.FindAllString(s+\" \", -1)\n\tfor i, s := range ss {\n\t\tif strings.HasPrefix(s, \"\\\"\") && strings.HasSuffix(s, \"\\\"\") && len(s) >= 2 {\n\t\t\tss[i] = s[1 : len(s)-1]\n\t\t}\n\t}\n\treturn ss\n}\n\n\/\/ CLI is interface for websocket.\ntype CLI struct {\n\tconn *websocket.Conn\n\tliner *liner.State\n\ttemplate *Template\n\tdebug bool\n}\n\n\/\/ NewCLI create CLI from Config.\nfunc NewCLI(conf *Config) (*CLI, error) {\n\torigin := \"http:\/\/127.0.0.1\"\n\tif conf.Origin != \"\" {\n\t\torigin = conf.Origin\n\t}\n\tconn, err := websocket.Dial(conf.URL, \"\", origin)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttemplate := EmptyTemplate\n\tif _, err := os.Stat(conf.Template); err == nil {\n\t\ttemplate, err = NewTemplateFile(conf.Template)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tcli := &CLI{\n\t\tconn,\n\t\tliner.NewLiner(),\n\t\ttemplate,\n\t\tconf.Debug,\n\t}\n\n\tcli.liner.SetCtrlCAborts(true)\n\tcli.liner.SetCompleter(Completer(template))\n\n\treturn cli, nil\n}\n\n\/\/ QuitRequest is used when websocket should be closed.\nvar QuitRequest = errors.New(\"quit request\")\n\n\/\/ Close close connection.\nfunc (c *CLI) Close() {\n\tc.conn.Close()\n\tc.liner.Close()\n}\n\n\/\/ Run start receive and send loop.\nfunc (c *CLI) Run() error {\n\tec := make(chan error)\n\tgo func() {\n\t\tfor {\n\t\t\terr := c.Send()\n\t\t\tif err != nil {\n\t\t\t\tec <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor {\n\t\t\terr := c.Receive()\n\t\t\tif err != nil {\n\t\t\t\tec <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\terr := <-ec\n\tif err == QuitRequest {\n\t\treturn nil\n\t}\n\treturn err\n}\n\n\/\/ Send read command from Stdin and send message.\nfunc (c *CLI) Send() error {\n\ttext, err := c.liner.Prompt(\"> \")\n\tif err == io.EOF || err == liner.ErrPromptAborted {\n\t\treturn QuitRequest\n\t} else if err != nil {\n\t\treturn err\n\t} else if text == \"\" {\n\t\treturn nil\n\t}\n\n\tc.liner.AppendHistory(text)\n\n\tif strings.HasPrefix(text, \".\") {\n\t\targs := Parse(text)\n\t\tname := args[0][1:]\n\t\tif c.template.IsDefined(name) {\n\t\t\tmsg, err := c.template.Apply(name, args[1:]...)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"! error on template\", name, err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif c.debug {\n\t\t\t\tfmt.Println(\"! expanded template:\", string(msg))\n\t\t\t}\n\t\t\tif _, err := c.conn.Write(msg); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(\"! no template for\", name)\n\t\t}\n\t} else {\n\t\tif _, err := c.conn.Write([]byte(text)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Receive receive messages and print them.\nfunc (c *CLI) Receive() error {\n\tdata := make([]byte, 1024)\n\tif _, err := c.conn.Read(data); err != nil {\n\t\tif err == io.EOF {\n\t\t\tfmt.Println(\"\\r! connection closed\")\n\t\t\treturn QuitRequest\n\t\t}\n\t\treturn err\n\t}\n\tfmt.Println(\"\\r<<\", string(data))\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 com authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\n\/\/ Package com is an open source project for commonly used functions for the Go programming language.\npackage com\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strings\"\n)\n\n\/\/ ------------------------------\n\/\/ Color log output.\n\/\/ ------------------------------\n\nconst (\n\tGray = uint8(iota + 90)\n\tRed\n\tGreen\n\tYellow\n\tBlue\n\tMagenta\n\t\/\/NRed = uint8(31) \/\/ Normal\n\tEndColor = \"\\033[0m\"\n)\n\n\/\/ ColorLog colors log and print to stdout.\n\/\/ See color rules in function 'ColorLogS'.\nfunc ColorLog(format string, a ...interface{}) {\n\tfmt.Print(ColorLogS(format, a...))\n}\n\n\/\/ ColorLogS colors log and return colored content.\n\/\/ Log format: <level> <content [highlight][path]> [ error ].\n\/\/ Level: TRAC -> blue; ERRO -> red; WARN -> Magenta; SUCC -> green; others -> default.\n\/\/ Content: default; path: yellow; error -> red.\n\/\/ Level has to be surrounded by \"[\" and \"]\".\n\/\/ Highlights have to be surrounded by \"# \" and \" #\"(space), \"#\" will be deleted.\n\/\/ Paths have to be surrounded by \"( \" and \" )\"(space).\n\/\/ Errors have to be surrounded by \"[ \" and \" ]\"(space).\n\/\/ Note: it hasn't support windows yet, contribute is welcome.\nfunc ColorLogS(format string, a ...interface{}) string {\n\tlog := fmt.Sprintf(format, a...)\n\n\tvar clog string\n\n\tif runtime.GOOS != \"windows\" {\n\n\t\t\/\/ Level.\n\t\ti := strings.Index(log, \"]\")\n\t\tif log[0] == '[' && i > -1 {\n\t\t\tclog += \"[\" + getColorLevel(log[1:i]) + \"]\"\n\t\t}\n\n\t\tlog = log[i+1:]\n\n\t\t\/\/ Error.\n\t\tlog = strings.Replace(log, \"[ \", fmt.Sprintf(\"[\\033[%dm\", Red), -1)\n\t\tlog = strings.Replace(log, \" ]\", EndColor+\"]\", -1)\n\n\t\t\/\/ Path.\n\t\tlog = strings.Replace(log, \"( \", fmt.Sprintf(\"(\\033[%dm\", Yellow), -1)\n\t\tlog = strings.Replace(log, \" )\", EndColor+\")\", -1)\n\n\t\t\/\/ Highlights.\n\t\tlog = strings.Replace(log, \"# \", fmt.Sprintf(\"\\033[%dm\", Gray), -1)\n\t\tlog = strings.Replace(log, \" #\", EndColor, -1)\n\n\t\tlog = clog + log\n\n\t} else {\n\t\t\/\/ Level.\n\t\ti := strings.Index(log, \"]\")\n\t\tif log[0] == '[' && i > -1 {\n\t\t\tclog += \"[\" + log[1:i] + \"]\"\n\t\t}\n\n\t\tlog = log[i+1:]\n\n\t\t\/\/ Error.\n\t\tlog = strings.Replace(log, \"[ \", \"[\", -1)\n\t\tlog = strings.Replace(log, \" ]\", \"]\", -1)\n\n\t\t\/\/ Path.\n\t\tlog = strings.Replace(log, \"( \", \"(\", -1)\n\t\tlog = strings.Replace(log, \" )\", \")\", -1)\n\n\t\t\/\/ Highlights.\n\t\tlog = strings.Replace(log, \"# \", \"\", -1)\n\t\tlog = strings.Replace(log, \" #\", \"\", -1)\n\n\t\tlog = clog + log\n\t}\n\n\treturn log\n}\n\n\/\/ getColorLevel returns colored level string by given level.\nfunc getColorLevel(level string) string {\n\tlevel = strings.ToUpper(level)\n\tswitch level {\n\tcase \"TRAC\":\n\t\treturn fmt.Sprintf(\"\\033[%dm%s\\033[0m\", Blue, level)\n\tcase \"ERRO\":\n\t\treturn fmt.Sprintf(\"\\033[%dm%s\\033[0m\", Red, level)\n\tcase \"WARN\":\n\t\treturn fmt.Sprintf(\"\\033[%dm%s\\033[0m\", Magenta, level)\n\tcase \"SUCC\":\n\t\treturn fmt.Sprintf(\"\\033[%dm%s\\033[0m\", Green, level)\n\tdefault:\n\t\treturn level\n\t}\n\treturn level\n}\n\n\/\/ ------------- END ------------\n\/\/ ExecCmd executes system command and returns output, stderr(both string type) and error in given work directory.\nfunc ExecCmdDir(dir, cmdName string, args ...string) (string, string, error) {\n\tbufOut := new(bytes.Buffer)\n\tbufErr := new(bytes.Buffer)\n\n\tcmd := exec.Command(cmdName, args...)\n\tcmd.Dir = dir\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tio.Copy(bufOut, stdout)\n\tio.Copy(bufErr, stderr)\n\n\tcmd.Wait()\n\n\treturn bufOut.String(), bufErr.String(), nil\n}\n\n\/\/ ExecCmd executes system command and returns output, stderr(both string type) and error.\nfunc ExecCmd(cmdName string, args ...string) (string, string, error) {\n\treturn ExecCmdDir(\"\", cmdName, args...)\n}\n<commit_msg>Add func<commit_after>\/\/ Copyright 2013 com authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\n\/\/ Package com is an open source project for commonly used functions for the Go programming language.\npackage com\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strings\"\n)\n\n\/\/ ------------------------------\n\/\/ Color log output.\n\/\/ ------------------------------\n\nconst (\n\tGray = uint8(iota + 90)\n\tRed\n\tGreen\n\tYellow\n\tBlue\n\tMagenta\n\t\/\/NRed = uint8(31) \/\/ Normal\n\tEndColor = \"\\033[0m\"\n)\n\n\/\/ ColorLog colors log and print to stdout.\n\/\/ See color rules in function 'ColorLogS'.\nfunc ColorLog(format string, a ...interface{}) {\n\tfmt.Print(ColorLogS(format, a...))\n}\n\n\/\/ ColorLogS colors log and return colored content.\n\/\/ Log format: <level> <content [highlight][path]> [ error ].\n\/\/ Level: TRAC -> blue; ERRO -> red; WARN -> Magenta; SUCC -> green; others -> default.\n\/\/ Content: default; path: yellow; error -> red.\n\/\/ Level has to be surrounded by \"[\" and \"]\".\n\/\/ Highlights have to be surrounded by \"# \" and \" #\"(space), \"#\" will be deleted.\n\/\/ Paths have to be surrounded by \"( \" and \" )\"(space).\n\/\/ Errors have to be surrounded by \"[ \" and \" ]\"(space).\n\/\/ Note: it hasn't support windows yet, contribute is welcome.\nfunc ColorLogS(format string, a ...interface{}) string {\n\tlog := fmt.Sprintf(format, a...)\n\n\tvar clog string\n\n\tif runtime.GOOS != \"windows\" {\n\n\t\t\/\/ Level.\n\t\ti := strings.Index(log, \"]\")\n\t\tif log[0] == '[' && i > -1 {\n\t\t\tclog += \"[\" + getColorLevel(log[1:i]) + \"]\"\n\t\t}\n\n\t\tlog = log[i+1:]\n\n\t\t\/\/ Error.\n\t\tlog = strings.Replace(log, \"[ \", fmt.Sprintf(\"[\\033[%dm\", Red), -1)\n\t\tlog = strings.Replace(log, \" ]\", EndColor+\"]\", -1)\n\n\t\t\/\/ Path.\n\t\tlog = strings.Replace(log, \"( \", fmt.Sprintf(\"(\\033[%dm\", Yellow), -1)\n\t\tlog = strings.Replace(log, \" )\", EndColor+\")\", -1)\n\n\t\t\/\/ Highlights.\n\t\tlog = strings.Replace(log, \"# \", fmt.Sprintf(\"\\033[%dm\", Gray), -1)\n\t\tlog = strings.Replace(log, \" #\", EndColor, -1)\n\n\t\tlog = clog + log\n\n\t} else {\n\t\t\/\/ Level.\n\t\ti := strings.Index(log, \"]\")\n\t\tif log[0] == '[' && i > -1 {\n\t\t\tclog += \"[\" + log[1:i] + \"]\"\n\t\t}\n\n\t\tlog = log[i+1:]\n\n\t\t\/\/ Error.\n\t\tlog = strings.Replace(log, \"[ \", \"[\", -1)\n\t\tlog = strings.Replace(log, \" ]\", \"]\", -1)\n\n\t\t\/\/ Path.\n\t\tlog = strings.Replace(log, \"( \", \"(\", -1)\n\t\tlog = strings.Replace(log, \" )\", \")\", -1)\n\n\t\t\/\/ Highlights.\n\t\tlog = strings.Replace(log, \"# \", \"\", -1)\n\t\tlog = strings.Replace(log, \" #\", \"\", -1)\n\n\t\tlog = clog + log\n\t}\n\n\treturn log\n}\n\n\/\/ getColorLevel returns colored level string by given level.\nfunc getColorLevel(level string) string {\n\tlevel = strings.ToUpper(level)\n\tswitch level {\n\tcase \"TRAC\":\n\t\treturn fmt.Sprintf(\"\\033[%dm%s\\033[0m\", Blue, level)\n\tcase \"ERRO\":\n\t\treturn fmt.Sprintf(\"\\033[%dm%s\\033[0m\", Red, level)\n\tcase \"WARN\":\n\t\treturn fmt.Sprintf(\"\\033[%dm%s\\033[0m\", Magenta, level)\n\tcase \"SUCC\":\n\t\treturn fmt.Sprintf(\"\\033[%dm%s\\033[0m\", Green, level)\n\tdefault:\n\t\treturn level\n\t}\n\treturn level\n}\n\n\/\/ ------------- END ------------\n\nfunc ExecCmdDirBytes(dir, cmdName string, args ...string) ([]byte, []byte, error) {\n\tzeroByte := []byte(\"\")\n\tbufOut := new(bytes.Buffer)\n\tbufErr := new(bytes.Buffer)\n\n\tcmd := exec.Command(cmdName, args...)\n\tcmd.Dir = dir\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn zeroByte, zeroByte, err\n\t}\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn zeroByte, zeroByte, err\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn zeroByte, zeroByte, err\n\t}\n\n\tio.Copy(bufOut, stdout)\n\tio.Copy(bufErr, stderr)\n\n\tcmd.Wait()\n\n\treturn bufOut.Bytes(), bufErr.Bytes(), nil\n}\n\n\/\/ ExecCmd executes system command and returns output, stderr(both string type) and error in given work directory.\nfunc ExecCmdDir(dir, cmdName string, args ...string) (string, string, error) {\n\tbufOut, bufErr, err := ExecCmdDirBytes(dir, cmdName, args...)\n\treturn string(bufOut), string(bufErr), err\n}\n\n\/\/ ExecCmd executes system command and returns output, stderr(both string type) and error.\nfunc ExecCmd(cmdName string, args ...string) (string, string, error) {\n\treturn ExecCmdDir(\"\", cmdName, args...)\n}\n\nfunc ExecCmdBytes(cmdName string, args ...string) ([]byte, []byte, error) {\n\treturn ExecCmdDirBytes(\"\", cmdName, args...)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com\/aktau\/github-release\/github\"\n)\n\nfunc infocmd(opt Options) error {\n\tuser := nvls(opt.Info.User, EnvUser)\n\trepo := nvls(opt.Info.Repo, EnvRepo)\n\ttoken := nvls(opt.Info.Token, EnvToken)\n\ttag := opt.Info.Tag\n\n\tif user == \"\" || repo == \"\" {\n\t\treturn fmt.Errorf(\"user and repo need to be passed as arguments\")\n\t}\n\n\t\/* find regular git tags *\/\n\tallTags, err := Tags(user, repo, token)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not fetch tags, %v\", err)\n\t}\n\tif len(allTags) == 0 {\n\t\treturn fmt.Errorf(\"no tags available for %v\/%v\", user, repo)\n\t}\n\n\t\/* list all tags *\/\n\ttags := make([]Tag, 0, len(allTags))\n\tfor _, t := range allTags {\n\t\t\/* if the user only requested see one tag, skip the ones that\n\t\t * don't match *\/\n\t\tif tag != \"\" && t.Name != tag {\n\t\t\tcontinue\n\t\t}\n\t\ttags = append(tags, t)\n\t}\n\n\t\/* if no tags conformed to the users' request, exit *\/\n\tif len(tags) == 0 {\n\t\treturn fmt.Errorf(\"no tag '%v' was found for %v\/%v\", tag, user, repo)\n\t}\n\n\tfmt.Println(\"git tags:\")\n\tfor _, t := range tags {\n\t\tfmt.Println(\"-\", t.String())\n\t}\n\n\t\/* list releases + assets *\/\n\tvar releases []Release\n\tif tag == \"\" {\n\t\t\/* get all releases *\/\n\t\tvprintf(\"%v\/%v: getting information for all releases\\n\", user, repo)\n\t\treleases, err = Releases(user, repo, token)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t\/* get only one release *\/\n\t\tvprintf(\"%v\/%v\/%v: getting information for the release\\n\", user, repo, tag)\n\t\trelease, err := ReleaseOfTag(user, repo, tag, token)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treleases = []Release{*release}\n\t}\n\n\t\/* if no tags conformed to the users' request, exit *\/\n\tif len(releases) == 0 {\n\t\treturn fmt.Errorf(\"no release(s) were found for %v\/%v (%v)\", user, repo, tag)\n\t}\n\n\tfmt.Println(\"releases:\")\n\tfor _, release := range releases {\n\t\tfmt.Println(\"-\", release.String())\n\t}\n\n\treturn nil\n}\n\nfunc uploadcmd(opt Options) error {\n\tuser := nvls(opt.Upload.User, EnvUser)\n\trepo := nvls(opt.Upload.Repo, EnvRepo)\n\ttoken := nvls(opt.Upload.Token, EnvToken)\n\ttag := opt.Upload.Tag\n\tname := opt.Upload.Name\n\tlabel := opt.Upload.Label\n\tfile := opt.Upload.File\n\n\tvprintln(\"uploading...\")\n\n\tif file == nil {\n\t\treturn fmt.Errorf(\"provided file was not valid\")\n\t}\n\tdefer file.Close()\n\n\tif err := ValidateCredentials(user, repo, token, tag); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Find the release corresponding to the entered tag, if any.\n\trel, err := ReleaseOfTag(user, repo, tag, token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tv := url.Values{}\n\tv.Set(\"name\", name)\n\tif label != \"\" {\n\t\tv.Set(\"label\", label)\n\t}\n\n\turl := rel.CleanUploadUrl() + \"?\" + v.Encode()\n\n\tresp, err := github.DoAuthRequest(\"POST\", url, \"application\/octet-stream\",\n\t\ttoken, nil, file)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't create upload request to %v, %v\", url, err)\n\t}\n\tdefer resp.Body.Close()\n\n\tvprintln(\"RESPONSE:\", resp)\n\tif resp.StatusCode != http.StatusCreated {\n\t\tif msg, err := ToMessage(resp.Body); err == nil {\n\t\t\treturn fmt.Errorf(\"could not upload, status code (%v), %v\",\n\t\t\t\tresp.Status, msg)\n\t\t}\n\t\treturn fmt.Errorf(\"could not upload, status code (%v)\", resp.Status)\n\t}\n\n\tif VERBOSITY != 0 {\n\t\tvprintf(\"BODY: \")\n\t\tif _, err := io.Copy(os.Stderr, resp.Body); err != nil {\n\t\t\treturn fmt.Errorf(\"while reading response, %v\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc downloadcmd(opt Options) error {\n\tuser := nvls(opt.Download.User, EnvUser)\n\trepo := nvls(opt.Download.Repo, EnvRepo)\n\ttoken := nvls(opt.Download.Token, EnvToken)\n\ttag := opt.Download.Tag\n\tname := opt.Download.Name\n\tlatest := opt.Download.Latest\n\n\tvprintln(\"downloading...\")\n\n\tif err := ValidateTarget(user, repo, tag, latest); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Find the release corresponding to the entered tag, if any.\n\tvar rel *Release\n\tvar err error\n\tif latest {\n\t\trel, err = LatestRelease(user, repo, token)\n\t} else {\n\t\trel, err = ReleaseOfTag(user, repo, tag, token)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tassetId := 0\n\tfor _, asset := range rel.Assets {\n\t\tif asset.Name == name {\n\t\t\tassetId = asset.Id\n\t\t}\n\t}\n\n\tif assetId == 0 {\n\t\treturn fmt.Errorf(\"coud not find asset named %s\", name)\n\t}\n\n\tvar resp *http.Response\n\tvar url string\n\tif token == \"\" {\n\t\tconst GithubURL = \"https:\/\/github.com\"\n\t\turl = GithubURL + fmt.Sprintf(\"\/%s\/%s\/releases\/download\/%s\/%s\", user, repo, tag, name)\n\t\tresp, err = http.Get(url)\n\t} else {\n\t\turl = nvls(EnvApiEndpoint, github.DefaultBaseURL) + fmt.Sprintf(ASSET_DOWNLOAD_URI, user, repo, assetId)\n\t\tresp, err = github.DoAuthRequest(\"GET\", url, \"\", token, map[string]string{\n\t\t\t\"Accept\": \"application\/octet-stream\",\n\t\t}, nil)\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not fetch releases, %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tvprintln(\"GET\", url, \"->\", resp)\n\n\tcontentLength, err := strconv.ParseInt(resp.Header.Get(\"Content-Length\"), 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"github did not respond with 200 OK but with %v\", resp.Status)\n\t}\n\n\tout := os.Stdout \/\/ Pipe the asset to stdout by default.\n\tif isCharDevice(out) {\n\t\t\/\/ If stdout is a char device, assume it's a TTY (terminal). In this\n\t\t\/\/ case, don't pipe th easset to stdout, but create it as a file in\n\t\t\/\/ the current working folder.\n\t\tif out, err = os.Create(name); err != nil {\n\t\t\treturn fmt.Errorf(\"could not create file %s\", name)\n\t\t}\n\t\tdefer out.Close()\n\t}\n\n\tn, err := io.Copy(out, resp.Body)\n\tif n != contentLength {\n\t\treturn fmt.Errorf(\"downloaded data did not match content length %d != %d\", contentLength, n)\n\t}\n\treturn err\n}\n\nfunc ValidateTarget(user, repo, tag string, latest bool) error {\n\tif user == \"\" {\n\t\treturn fmt.Errorf(\"empty user\")\n\t}\n\tif repo == \"\" {\n\t\treturn fmt.Errorf(\"empty repo\")\n\t}\n\tif tag == \"\" && !latest {\n\t\treturn fmt.Errorf(\"empty tag\")\n\t}\n\treturn nil\n}\n\nfunc ValidateCredentials(user, repo, token, tag string) error {\n\tif err := ValidateTarget(user, repo, tag, false); err != nil {\n\t\treturn err\n\t}\n\tif token == \"\" {\n\t\treturn fmt.Errorf(\"empty token\")\n\t}\n\treturn nil\n}\n\nfunc releasecmd(opt Options) error {\n\tcmdopt := opt.Release\n\tuser := nvls(cmdopt.User, EnvUser)\n\trepo := nvls(cmdopt.Repo, EnvRepo)\n\ttoken := nvls(cmdopt.Token, EnvToken)\n\ttag := cmdopt.Tag\n\tname := nvls(cmdopt.Name, tag)\n\tdesc := nvls(cmdopt.Desc, tag)\n\ttarget := nvls(cmdopt.Target)\n\tdraft := cmdopt.Draft\n\tprerelease := cmdopt.Prerelease\n\n\tvprintln(\"releasing...\")\n\n\tif err := ValidateCredentials(user, repo, token, tag); err != nil {\n\t\treturn err\n\t}\n\n\tparams := ReleaseCreate{\n\t\tTagName: tag,\n\t\tTargetCommitish: target,\n\t\tName: name,\n\t\tBody: desc,\n\t\tDraft: draft,\n\t\tPrerelease: prerelease,\n\t}\n\n\t\/* encode params as json *\/\n\tpayload, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't encode release creation params, %v\", err)\n\t}\n\treader := bytes.NewReader(payload)\n\n\tURL := nvls(EnvApiEndpoint, github.DefaultBaseURL) + fmt.Sprintf(\"\/repos\/%s\/%s\/releases\", user, repo)\n\tresp, err := github.DoAuthRequest(\"POST\", URL, \"application\/json\", token, nil, reader)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"while submitting %v, %v\", string(payload), err)\n\t}\n\tdefer resp.Body.Close()\n\n\tvprintln(\"RESPONSE:\", resp)\n\tif resp.StatusCode != http.StatusCreated {\n\t\tif resp.StatusCode == 422 {\n\t\t\treturn fmt.Errorf(\"github returned %v (this is probably because the release already exists)\",\n\t\t\t\tresp.Status)\n\t\t}\n\t\treturn fmt.Errorf(\"github returned %v\", resp.Status)\n\t}\n\n\tif VERBOSITY != 0 {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error while reading response, %v\", err)\n\t\t}\n\t\tvprintln(\"BODY:\", string(body))\n\t}\n\n\treturn nil\n}\n\nfunc editcmd(opt Options) error {\n\tcmdopt := opt.Edit\n\tuser := nvls(cmdopt.User, EnvUser)\n\trepo := nvls(cmdopt.Repo, EnvRepo)\n\ttoken := nvls(cmdopt.Token, EnvToken)\n\ttag := cmdopt.Tag\n\tname := nvls(cmdopt.Name, tag)\n\tdesc := nvls(cmdopt.Desc, tag)\n\tdraft := cmdopt.Draft\n\tprerelease := cmdopt.Prerelease\n\n\tvprintln(\"editing...\")\n\n\tif err := ValidateCredentials(user, repo, token, tag); err != nil {\n\t\treturn err\n\t}\n\n\tid, err := IdOfTag(user, repo, tag, token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvprintf(\"release %v has id %v\\n\", tag, id)\n\n\t\/* the release create struct works for editing releases as well *\/\n\tparams := ReleaseCreate{\n\t\tTagName: tag,\n\t\tName: name,\n\t\tBody: desc,\n\t\tDraft: draft,\n\t\tPrerelease: prerelease,\n\t}\n\n\t\/* encode the parameters as JSON, as required by the github API *\/\n\tpayload, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't encode release creation params, %v\", err)\n\t}\n\n\tURL := nvls(EnvApiEndpoint, github.DefaultBaseURL) + fmt.Sprintf(\"\/repos\/%s\/%s\/releases\/%d\", user, repo, id)\n\tresp, err := github.DoAuthRequest(\"PATCH\", URL, \"application\/json\", token, nil, bytes.NewReader(payload))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"while submitting %v, %v\", string(payload), err)\n\t}\n\tdefer resp.Body.Close()\n\n\tvprintln(\"RESPONSE:\", resp)\n\tif resp.StatusCode != http.StatusOK {\n\t\tif resp.StatusCode == 422 {\n\t\t\treturn fmt.Errorf(\"github returned %v (this is probably because the release already exists)\",\n\t\t\t\tresp.Status)\n\t\t}\n\t\treturn fmt.Errorf(\"github returned unexpected status code %v\", resp.Status)\n\t}\n\n\tif VERBOSITY != 0 {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error while reading response, %v\", err)\n\t\t}\n\t\tvprintln(\"BODY:\", string(body))\n\t}\n\n\treturn nil\n}\n\nfunc deletecmd(opt Options) error {\n\tuser, repo, token, tag := nvls(opt.Delete.User, EnvUser),\n\t\tnvls(opt.Delete.Repo, EnvRepo),\n\t\tnvls(opt.Delete.Token, EnvToken),\n\t\topt.Delete.Tag\n\tvprintln(\"deleting...\")\n\n\tid, err := IdOfTag(user, repo, tag, token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvprintf(\"release %v has id %v\\n\", tag, id)\n\n\tbaseURL := nvls(EnvApiEndpoint, github.DefaultBaseURL)\n\tresp, err := github.DoAuthRequest(\"DELETE\", baseURL+fmt.Sprintf(\"\/repos\/%s\/%s\/releases\/%d\",\n\t\tuser, repo, id), \"application\/json\", token, nil, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"release deletion unsuccesful, %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusNoContent {\n\t\treturn fmt.Errorf(\"could not delete the release corresponding to tag %s on repo %s\/%s\",\n\t\t\ttag, user, repo)\n\t}\n\n\treturn nil\n}\n<commit_msg>info: no tags\/releases is no longer an error<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com\/aktau\/github-release\/github\"\n)\n\nfunc infocmd(opt Options) error {\n\tuser := nvls(opt.Info.User, EnvUser)\n\trepo := nvls(opt.Info.Repo, EnvRepo)\n\ttoken := nvls(opt.Info.Token, EnvToken)\n\ttag := opt.Info.Tag\n\n\tif user == \"\" || repo == \"\" {\n\t\treturn fmt.Errorf(\"user and repo need to be passed as arguments\")\n\t}\n\n\t\/\/ Find regular git tags.\n\ttags, err := Tags(user, repo, token)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not fetch tags, %v\", err)\n\t}\n\tif len(tags) == 0 {\n\t\treturn fmt.Errorf(\"no tags available for %v\/%v\", user, repo)\n\t}\n\n\tfmt.Println(\"tags:\")\n\tfor _, t := range tags {\n\t\t\/\/ If the user only requested one tag, filter out the rest.\n\t\tif tag == \"\" || t.Name == tag {\n\t\t\tfmt.Println(\"-\", &t)\n\t\t}\n\t}\n\n\t\/\/ List releases + assets.\n\tvar releases []Release\n\tif tag == \"\" {\n\t\t\/\/ Get all releases.\n\t\tvprintf(\"%v\/%v: getting information for all releases\\n\", user, repo)\n\t\treleases, err = Releases(user, repo, token)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t\/\/ Get only one release.\n\t\tvprintf(\"%v\/%v\/%v: getting information for the release\\n\", user, repo, tag)\n\t\trelease, err := ReleaseOfTag(user, repo, tag, token)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treleases = []Release{*release}\n\t}\n\n\tfmt.Println(\"releases:\")\n\tfor _, release := range releases {\n\t\tfmt.Println(\"-\", &release)\n\t}\n\n\treturn nil\n}\n\nfunc uploadcmd(opt Options) error {\n\tuser := nvls(opt.Upload.User, EnvUser)\n\trepo := nvls(opt.Upload.Repo, EnvRepo)\n\ttoken := nvls(opt.Upload.Token, EnvToken)\n\ttag := opt.Upload.Tag\n\tname := opt.Upload.Name\n\tlabel := opt.Upload.Label\n\tfile := opt.Upload.File\n\n\tvprintln(\"uploading...\")\n\n\tif file == nil {\n\t\treturn fmt.Errorf(\"provided file was not valid\")\n\t}\n\tdefer file.Close()\n\n\tif err := ValidateCredentials(user, repo, token, tag); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Find the release corresponding to the entered tag, if any.\n\trel, err := ReleaseOfTag(user, repo, tag, token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tv := url.Values{}\n\tv.Set(\"name\", name)\n\tif label != \"\" {\n\t\tv.Set(\"label\", label)\n\t}\n\n\turl := rel.CleanUploadUrl() + \"?\" + v.Encode()\n\n\tresp, err := github.DoAuthRequest(\"POST\", url, \"application\/octet-stream\",\n\t\ttoken, nil, file)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't create upload request to %v, %v\", url, err)\n\t}\n\tdefer resp.Body.Close()\n\n\tvprintln(\"RESPONSE:\", resp)\n\tif resp.StatusCode != http.StatusCreated {\n\t\tif msg, err := ToMessage(resp.Body); err == nil {\n\t\t\treturn fmt.Errorf(\"could not upload, status code (%v), %v\",\n\t\t\t\tresp.Status, msg)\n\t\t}\n\t\treturn fmt.Errorf(\"could not upload, status code (%v)\", resp.Status)\n\t}\n\n\tif VERBOSITY != 0 {\n\t\tvprintf(\"BODY: \")\n\t\tif _, err := io.Copy(os.Stderr, resp.Body); err != nil {\n\t\t\treturn fmt.Errorf(\"while reading response, %v\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc downloadcmd(opt Options) error {\n\tuser := nvls(opt.Download.User, EnvUser)\n\trepo := nvls(opt.Download.Repo, EnvRepo)\n\ttoken := nvls(opt.Download.Token, EnvToken)\n\ttag := opt.Download.Tag\n\tname := opt.Download.Name\n\tlatest := opt.Download.Latest\n\n\tvprintln(\"downloading...\")\n\n\tif err := ValidateTarget(user, repo, tag, latest); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Find the release corresponding to the entered tag, if any.\n\tvar rel *Release\n\tvar err error\n\tif latest {\n\t\trel, err = LatestRelease(user, repo, token)\n\t} else {\n\t\trel, err = ReleaseOfTag(user, repo, tag, token)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tassetId := 0\n\tfor _, asset := range rel.Assets {\n\t\tif asset.Name == name {\n\t\t\tassetId = asset.Id\n\t\t}\n\t}\n\n\tif assetId == 0 {\n\t\treturn fmt.Errorf(\"coud not find asset named %s\", name)\n\t}\n\n\tvar resp *http.Response\n\tvar url string\n\tif token == \"\" {\n\t\tconst GithubURL = \"https:\/\/github.com\"\n\t\turl = GithubURL + fmt.Sprintf(\"\/%s\/%s\/releases\/download\/%s\/%s\", user, repo, tag, name)\n\t\tresp, err = http.Get(url)\n\t} else {\n\t\turl = nvls(EnvApiEndpoint, github.DefaultBaseURL) + fmt.Sprintf(ASSET_DOWNLOAD_URI, user, repo, assetId)\n\t\tresp, err = github.DoAuthRequest(\"GET\", url, \"\", token, map[string]string{\n\t\t\t\"Accept\": \"application\/octet-stream\",\n\t\t}, nil)\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not fetch releases, %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tvprintln(\"GET\", url, \"->\", resp)\n\n\tcontentLength, err := strconv.ParseInt(resp.Header.Get(\"Content-Length\"), 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"github did not respond with 200 OK but with %v\", resp.Status)\n\t}\n\n\tout := os.Stdout \/\/ Pipe the asset to stdout by default.\n\tif isCharDevice(out) {\n\t\t\/\/ If stdout is a char device, assume it's a TTY (terminal). In this\n\t\t\/\/ case, don't pipe th easset to stdout, but create it as a file in\n\t\t\/\/ the current working folder.\n\t\tif out, err = os.Create(name); err != nil {\n\t\t\treturn fmt.Errorf(\"could not create file %s\", name)\n\t\t}\n\t\tdefer out.Close()\n\t}\n\n\tn, err := io.Copy(out, resp.Body)\n\tif n != contentLength {\n\t\treturn fmt.Errorf(\"downloaded data did not match content length %d != %d\", contentLength, n)\n\t}\n\treturn err\n}\n\nfunc ValidateTarget(user, repo, tag string, latest bool) error {\n\tif user == \"\" {\n\t\treturn fmt.Errorf(\"empty user\")\n\t}\n\tif repo == \"\" {\n\t\treturn fmt.Errorf(\"empty repo\")\n\t}\n\tif tag == \"\" && !latest {\n\t\treturn fmt.Errorf(\"empty tag\")\n\t}\n\treturn nil\n}\n\nfunc ValidateCredentials(user, repo, token, tag string) error {\n\tif err := ValidateTarget(user, repo, tag, false); err != nil {\n\t\treturn err\n\t}\n\tif token == \"\" {\n\t\treturn fmt.Errorf(\"empty token\")\n\t}\n\treturn nil\n}\n\nfunc releasecmd(opt Options) error {\n\tcmdopt := opt.Release\n\tuser := nvls(cmdopt.User, EnvUser)\n\trepo := nvls(cmdopt.Repo, EnvRepo)\n\ttoken := nvls(cmdopt.Token, EnvToken)\n\ttag := cmdopt.Tag\n\tname := nvls(cmdopt.Name, tag)\n\tdesc := nvls(cmdopt.Desc, tag)\n\ttarget := nvls(cmdopt.Target)\n\tdraft := cmdopt.Draft\n\tprerelease := cmdopt.Prerelease\n\n\tvprintln(\"releasing...\")\n\n\tif err := ValidateCredentials(user, repo, token, tag); err != nil {\n\t\treturn err\n\t}\n\n\tparams := ReleaseCreate{\n\t\tTagName: tag,\n\t\tTargetCommitish: target,\n\t\tName: name,\n\t\tBody: desc,\n\t\tDraft: draft,\n\t\tPrerelease: prerelease,\n\t}\n\n\t\/* encode params as json *\/\n\tpayload, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't encode release creation params, %v\", err)\n\t}\n\treader := bytes.NewReader(payload)\n\n\tURL := nvls(EnvApiEndpoint, github.DefaultBaseURL) + fmt.Sprintf(\"\/repos\/%s\/%s\/releases\", user, repo)\n\tresp, err := github.DoAuthRequest(\"POST\", URL, \"application\/json\", token, nil, reader)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"while submitting %v, %v\", string(payload), err)\n\t}\n\tdefer resp.Body.Close()\n\n\tvprintln(\"RESPONSE:\", resp)\n\tif resp.StatusCode != http.StatusCreated {\n\t\tif resp.StatusCode == 422 {\n\t\t\treturn fmt.Errorf(\"github returned %v (this is probably because the release already exists)\",\n\t\t\t\tresp.Status)\n\t\t}\n\t\treturn fmt.Errorf(\"github returned %v\", resp.Status)\n\t}\n\n\tif VERBOSITY != 0 {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error while reading response, %v\", err)\n\t\t}\n\t\tvprintln(\"BODY:\", string(body))\n\t}\n\n\treturn nil\n}\n\nfunc editcmd(opt Options) error {\n\tcmdopt := opt.Edit\n\tuser := nvls(cmdopt.User, EnvUser)\n\trepo := nvls(cmdopt.Repo, EnvRepo)\n\ttoken := nvls(cmdopt.Token, EnvToken)\n\ttag := cmdopt.Tag\n\tname := nvls(cmdopt.Name, tag)\n\tdesc := nvls(cmdopt.Desc, tag)\n\tdraft := cmdopt.Draft\n\tprerelease := cmdopt.Prerelease\n\n\tvprintln(\"editing...\")\n\n\tif err := ValidateCredentials(user, repo, token, tag); err != nil {\n\t\treturn err\n\t}\n\n\tid, err := IdOfTag(user, repo, tag, token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvprintf(\"release %v has id %v\\n\", tag, id)\n\n\t\/* the release create struct works for editing releases as well *\/\n\tparams := ReleaseCreate{\n\t\tTagName: tag,\n\t\tName: name,\n\t\tBody: desc,\n\t\tDraft: draft,\n\t\tPrerelease: prerelease,\n\t}\n\n\t\/* encode the parameters as JSON, as required by the github API *\/\n\tpayload, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't encode release creation params, %v\", err)\n\t}\n\n\tURL := nvls(EnvApiEndpoint, github.DefaultBaseURL) + fmt.Sprintf(\"\/repos\/%s\/%s\/releases\/%d\", user, repo, id)\n\tresp, err := github.DoAuthRequest(\"PATCH\", URL, \"application\/json\", token, nil, bytes.NewReader(payload))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"while submitting %v, %v\", string(payload), err)\n\t}\n\tdefer resp.Body.Close()\n\n\tvprintln(\"RESPONSE:\", resp)\n\tif resp.StatusCode != http.StatusOK {\n\t\tif resp.StatusCode == 422 {\n\t\t\treturn fmt.Errorf(\"github returned %v (this is probably because the release already exists)\",\n\t\t\t\tresp.Status)\n\t\t}\n\t\treturn fmt.Errorf(\"github returned unexpected status code %v\", resp.Status)\n\t}\n\n\tif VERBOSITY != 0 {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error while reading response, %v\", err)\n\t\t}\n\t\tvprintln(\"BODY:\", string(body))\n\t}\n\n\treturn nil\n}\n\nfunc deletecmd(opt Options) error {\n\tuser, repo, token, tag := nvls(opt.Delete.User, EnvUser),\n\t\tnvls(opt.Delete.Repo, EnvRepo),\n\t\tnvls(opt.Delete.Token, EnvToken),\n\t\topt.Delete.Tag\n\tvprintln(\"deleting...\")\n\n\tid, err := IdOfTag(user, repo, tag, token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvprintf(\"release %v has id %v\\n\", tag, id)\n\n\tbaseURL := nvls(EnvApiEndpoint, github.DefaultBaseURL)\n\tresp, err := github.DoAuthRequest(\"DELETE\", baseURL+fmt.Sprintf(\"\/repos\/%s\/%s\/releases\/%d\",\n\t\tuser, repo, id), \"application\/json\", token, nil, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"release deletion unsuccesful, %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusNoContent {\n\t\treturn fmt.Errorf(\"could not delete the release corresponding to tag %s on repo %s\/%s\",\n\t\t\ttag, user, repo)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package nomockutil\n\nimport (\n\t\"bytes\"\n\t\"log\"\n\t\"os\/exec\"\n\n\t\"github.com\/go-errors\/errors\"\n)\n\nfunc RunCmd(name string, args ...string) (*bytes.Buffer, *bytes.Buffer, error) {\n\tlog.Printf(\"%v %v\", name, args)\n\tcmd := exec.Command(name, args...)\n\tvar outBuffer bytes.Buffer\n\tvar errBuffer bytes.Buffer\n\tcmd.Stdout = &outBuffer\n\tcmd.Stderr = &errBuffer\n\terr := cmd.Run()\n\tvar e error = nil\n\tif err != nil {\n\t\te = errors.Wrap(err, 1)\n\t\tlog.Printf(\"Err output: %v %v\", err, errBuffer.String())\n\t}\n\treturn &outBuffer, &errBuffer, e\n}\n<commit_msg>add Exists<commit_after>package nomockutil\n\nimport (\n\t\"bytes\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\n\t\"github.com\/go-errors\/errors\"\n)\n\nfunc RunCmd(name string, args ...string) (*bytes.Buffer, *bytes.Buffer, error) {\n\tlog.Printf(\"%v %v\", name, args)\n\tcmd := exec.Command(name, args...)\n\tvar outBuffer bytes.Buffer\n\tvar errBuffer bytes.Buffer\n\tcmd.Stdout = &outBuffer\n\tcmd.Stderr = &errBuffer\n\terr := cmd.Run()\n\tvar e error = nil\n\tif err != nil {\n\t\te = errors.Wrap(err, 1)\n\t\tlog.Printf(\"Err output: %v %v\", err, errBuffer.String())\n\t}\n\treturn &outBuffer, &errBuffer, e\n}\n\nfunc Exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true \/\/should record the error\n}\n<|endoftext|>"} {"text":"<commit_before>package csv\n\nimport (\n\t\"encoding\/csv\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/oleiade\/reflections\"\n)\n\nvar (\n\tErrNoStruct = errors.New(\"interface is not a struct\")\n\tErrNoValidRecords = errors.New(\"no valid records found\")\n\tErrHeaderNotComplete = errors.New(\"header not complete\")\n\tErrUnsupportedCSVType = errors.New(\"unsupported csv type\")\n)\n\ntype stringSlice []string\n\ntype fieldInfo struct {\n\tposition int\n\theaderName string\n\tfieldName string\n\tkind reflect.Kind\n}\n\ntype fieldInfos []fieldInfo\n\n\/\/ isComplete checks if the all field positions could be detected from the csv file.\nfunc (fieldInfos *fieldInfos) isComplete() bool {\n\tfor _, fieldInfo := range *fieldInfos {\n\t\tif fieldInfo.position < 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ createFieldInfos creates the fieldInfos for a struct s.\n\/\/ Only information from the struct (headerName, fieldName and kind) is available,\n\/\/ all field positions are initialized with an invalid value of -1\nfunc createFieldInfos(s interface{}) (fieldInfos, error) {\n\tif reflect.TypeOf(s).Kind() != reflect.Struct {\n\t\treturn nil, ErrNoStruct\n\t}\n\tfieldInfos := []fieldInfo{}\n\theaderNameMap := map[string]interface{}{} \/\/ to detect duplicate csv tag names\n\tfieldNames, err := reflections.Fields(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, fieldName := range fieldNames {\n\t\theaderName, err := reflections.GetFieldTag(s, fieldName, \"csv\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ csv fieldtags that contain a dash are ignored\n\t\tif strings.Contains(headerName, \"-\") {\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := headerNameMap[headerName]; ok {\n\t\t\treturn nil, fmt.Errorf(\"duplicate csv tag name: %s\", headerName)\n\t\t}\n\t\theaderNameMap[headerName] = nil\n\t\tkind, err := reflections.GetFieldKind(s, fieldName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(headerName) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"empty csv tag for field: %s\", fieldName)\n\t\t}\n\t\tfieldInfos = append(fieldInfos, fieldInfo{\n\t\t\theaderName: headerName,\n\t\t\tfieldName: fieldName,\n\t\t\tposition: -1,\n\t\t\tkind: kind,\n\t\t})\n\t}\n\treturn fieldInfos, nil\n}\n\nfunc (s stringSlice) pos(item string) int {\n\tfor i, v := range s {\n\t\tif item == v {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\n\/\/ Marshaler reads a csv file and unmarshalls it to an endpoint struct.\ntype Marshaler struct {\n\tReader *csv.Reader\n\tfieldInfos fieldInfos\n\tendPointStruct interface{}\n\terrors ParseErrors\n\tLazy bool \/\/ if true, marshaler does not exit on first cvs.ParseError but coninues and appends errors\n}\n\n\/\/ NewMarshaler returns a new Marshaler\nfunc NewMarshaler(endPointStruct interface{}, r io.Reader) (*Marshaler, error) {\n\tfieldInfos, err := createFieldInfos(endPointStruct)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcr := csv.NewReader(r)\n\treturn &Marshaler{\n\t\tReader: cr,\n\t\tfieldInfos: fieldInfos,\n\t\tendPointStruct: endPointStruct,\n\t\terrors: ParseErrors{},\n\t}, nil\n}\n\n\/\/ ParseErrors is a slice of csv.ParseError\ntype ParseErrors []csv.ParseError\n\n\/\/ Error returns te ParseErrors as string\nfunc (errs ParseErrors) Error() string {\n\ts := \"\"\n\tfor _, err := range errs {\n\t\ts = s + fmt.Sprintf(\"line:%d,position:%d,err:%s\\n\", err.Line, err.Column, err.Error)\n\t}\n\treturn s\n}\n\n\/\/ Unmarshal parses a csv file and stores its value to a list of entpoint structs\nfunc (m *Marshaler) Unmarshal() ([]interface{}, error) {\n\tstructs := *new([]interface{})\n\n\tline := 0\n\tfor {\n\t\tline++\n\t\tvar record stringSlice\n\t\trecord, err := m.Reader.Read()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !m.Lazy {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif pe, ok := err.(*csv.ParseError); ok {\n\t\t\t\tm.errors = append(m.errors, *pe)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif line == 1 { \/\/ first line contains header information\n\t\t\tfor i, fieldInfo := range m.fieldInfos {\n\t\t\t\tindex := record.pos(fieldInfo.headerName)\n\t\t\t\tif index >= 0 {\n\t\t\t\t\tm.fieldInfos[i].position = index\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !m.fieldInfos.isComplete() {\n\t\t\t\treturn nil, &csv.ParseError{Err: ErrHeaderNotComplete}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ if len(m.fieldInfos) > len(record) {\n\t\t\/\/ return nil, &csv.ParseError{Line: line, Err: errors.New(\"bla\")}\n\t\t\/\/ }\n\t\tsPtr := reflect.New(reflect.TypeOf(m.endPointStruct)).Interface()\n\t\tfor _, fieldInfo := range m.fieldInfos {\n\t\t\tvar (\n\t\t\t\tvalue interface{}\n\t\t\t\terr error\n\t\t\t)\n\t\t\tswitch fieldInfo.kind {\n\t\t\tcase reflect.Bool:\n\t\t\t\tvalue, err = strconv.ParseBool(record[fieldInfo.position])\n\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\tvalue, err = strconv.Atoi(record[fieldInfo.position])\n\t\t\tcase reflect.Float32, reflect.Float64:\n\t\t\t\tvalue, err = strconv.ParseFloat(record[fieldInfo.position], 64)\n\t\t\tcase reflect.String:\n\t\t\t\tvalue = record[fieldInfo.position]\n\t\t\tdefault:\n\t\t\t\terr = ErrUnsupportedCSVType\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tm.errors = append(m.errors, csv.ParseError{\n\t\t\t\t\tColumn: fieldInfo.position,\n\t\t\t\t\tLine: line,\n\t\t\t\t\tErr: err,\n\t\t\t\t})\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treflections.SetField(sPtr, fieldInfo.fieldName, value)\n\t\t}\n\t\tv := reflect.ValueOf(sPtr).Elem().Interface()\n\t\tstructs = append(structs, v)\n\t}\n\tif len(m.errors) == 0 {\n\t\treturn structs, nil\n\t}\n\treturn structs, m.errors\n}\n<commit_msg>some refactoring<commit_after>package csv\n\nimport (\n\t\"encoding\/csv\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/oleiade\/reflections\"\n)\n\nvar (\n\tErrNoStruct = errors.New(\"interface is not a struct\")\n\tErrNoValidRecords = errors.New(\"no valid records found\")\n\tErrHeaderNotComplete = errors.New(\"header not complete\")\n\tErrUnsupportedCSVType = errors.New(\"unsupported csv type\")\n)\n\n\/\/ Marshaler reads a csv file and unmarshalls it to an endpoint struct.\ntype Marshaler struct {\n\tReader *csv.Reader\n\tLazy bool \/\/ if true, marshaler does not exit on first cvs.ParseError but continues and append all errors\n\tfieldInfos fieldInfos\n\tendPointStruct interface{}\n\terrors ParseErrors\n}\n\n\/\/ NewMarshaler returns a new Marshaler\nfunc NewMarshaler(endPointStruct interface{}, r io.Reader) (*Marshaler, error) {\n\tfieldInfos, err := createFieldInfos(endPointStruct)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcr := csv.NewReader(r)\n\treturn &Marshaler{\n\t\tReader: cr,\n\t\tfieldInfos: fieldInfos,\n\t\tendPointStruct: endPointStruct,\n\t\terrors: ParseErrors{},\n\t}, nil\n}\n\n\/\/ Unmarshal parses a csv file and stores its value to a list of entpoint structs\nfunc (m *Marshaler) Unmarshal() ([]interface{}, error) {\n\tstructs := *new([]interface{})\n\n\tline := 0\n\tfor {\n\t\tline++\n\t\tvar record stringSlice\n\t\trecord, err := m.Reader.Read()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !m.Lazy {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif pe, ok := err.(*csv.ParseError); ok {\n\t\t\t\tm.errors = append(m.errors, *pe)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif line == 1 { \/\/ first line contains header information\n\t\t\tfor i, fieldInfo := range m.fieldInfos {\n\t\t\t\tindex := record.pos(fieldInfo.headerName)\n\t\t\t\tif index >= 0 {\n\t\t\t\t\tm.fieldInfos[i].position = index\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !m.fieldInfos.isComplete() {\n\t\t\t\treturn nil, &csv.ParseError{Err: ErrHeaderNotComplete}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ if len(m.fieldInfos) > len(record) {\n\t\t\/\/ return nil, &csv.ParseError{Line: line, Err: errors.New(\"bla\")}\n\t\t\/\/ }\n\t\tsPtr := reflect.New(reflect.TypeOf(m.endPointStruct)).Interface()\n\t\tfor _, fieldInfo := range m.fieldInfos {\n\t\t\tvar (\n\t\t\t\tvalue interface{}\n\t\t\t\terr error\n\t\t\t)\n\t\t\tswitch fieldInfo.kind {\n\t\t\tcase reflect.Bool:\n\t\t\t\tvalue, err = strconv.ParseBool(record[fieldInfo.position])\n\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\tvalue, err = strconv.Atoi(record[fieldInfo.position])\n\t\t\tcase reflect.Float32, reflect.Float64:\n\t\t\t\tvalue, err = strconv.ParseFloat(record[fieldInfo.position], 64)\n\t\t\tcase reflect.String:\n\t\t\t\tvalue = record[fieldInfo.position]\n\t\t\tdefault:\n\t\t\t\terr = ErrUnsupportedCSVType\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tm.errors = append(m.errors, csv.ParseError{\n\t\t\t\t\tColumn: fieldInfo.position,\n\t\t\t\t\tLine: line,\n\t\t\t\t\tErr: err,\n\t\t\t\t})\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treflections.SetField(sPtr, fieldInfo.fieldName, value)\n\t\t}\n\t\tv := reflect.ValueOf(sPtr).Elem().Interface()\n\t\tstructs = append(structs, v)\n\t}\n\tif len(m.errors) == 0 {\n\t\treturn structs, nil\n\t}\n\treturn structs, m.errors\n}\n\n\/\/ ParseErrors is a slice of csv.ParseError\ntype ParseErrors []csv.ParseError\n\n\/\/ Error returns te ParseErrors as string\nfunc (errs ParseErrors) Error() string {\n\ts := \"\"\n\tfor _, err := range errs {\n\t\ts = s + fmt.Sprintf(\"line:%d,position:%d,err:%s\\n\", err.Line, err.Column, err.Error)\n\t}\n\treturn s\n}\n\n\/\/ fieldInfo descripes the mapping between the endpointStruct end the header in a csv file.\ntype fieldInfo struct {\n\tposition int\n\theaderName string\n\tfieldName string\n\tkind reflect.Kind\n}\n\ntype fieldInfos []fieldInfo\n\n\/\/ isComplete checks if the all field positions could be detected from the csv file.\nfunc (fieldInfos *fieldInfos) isComplete() bool {\n\tfor _, fieldInfo := range *fieldInfos {\n\t\tif fieldInfo.position < 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ createFieldInfos creates the fieldInfos for a struct s.\n\/\/ Only information from the struct (headerName, fieldName and kind) is available,\n\/\/ all field positions are initialized with an invalid value of -1\nfunc createFieldInfos(s interface{}) (fieldInfos, error) {\n\tif reflect.TypeOf(s).Kind() != reflect.Struct {\n\t\treturn nil, ErrNoStruct\n\t}\n\tfieldInfos := []fieldInfo{}\n\theaderNameMap := map[string]interface{}{} \/\/ to detect duplicate csv tag names\n\tfieldNames, err := reflections.Fields(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, fieldName := range fieldNames {\n\t\theaderName, err := reflections.GetFieldTag(s, fieldName, \"csv\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ csv fieldtags that contain a dash are ignored\n\t\tif strings.Contains(headerName, \"-\") {\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := headerNameMap[headerName]; ok {\n\t\t\treturn nil, fmt.Errorf(\"duplicate csv tag name: %s\", headerName)\n\t\t}\n\t\theaderNameMap[headerName] = nil\n\t\tkind, err := reflections.GetFieldKind(s, fieldName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(headerName) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"empty csv tag for field: %s\", fieldName)\n\t\t}\n\t\tfieldInfos = append(fieldInfos, fieldInfo{\n\t\t\theaderName: headerName,\n\t\t\tfieldName: fieldName,\n\t\t\tposition: -1,\n\t\t\tkind: kind,\n\t\t})\n\t}\n\treturn fieldInfos, nil\n}\n\ntype stringSlice []string\n\nfunc (s stringSlice) pos(item string) int {\n\tfor i, v := range s {\n\t\tif item == v {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Jonathan Picques. All rights reserved.\n\/\/ Use of this source code is governed by a MIT license\n\/\/ The license can be found in the LICENSE file.\n\n\/\/ The GoCSV package aims to provide easy CSV serialization and deserialization to the golang programming language\n\npackage gocsv\n\nimport (\n\t\"bytes\"\n\t\"encoding\/csv\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n)\n\n\/\/ FailIfUnmatchedStructTags indicates whether it is considered an error when there is an unmatched\n\/\/ struct tag.\nvar FailIfUnmatchedStructTags = false\n\n\/\/ FailIfDoubleHeaderNames indicates whether it is considered an error when a header name is repeated\n\/\/ in the csv header.\nvar FailIfDoubleHeaderNames = false\n\n\/\/ ShouldAlignDuplicateHeadersWithStructFieldOrder indicates whether we should align duplicate CSV\n\/\/ headers per their alignment in the struct definition.\nvar ShouldAlignDuplicateHeadersWithStructFieldOrder = false\n\n\/\/ TagSeparator defines seperator string for multiple csv tags in struct fields\nvar TagSeparator = \",\"\n\n\/\/ Normalizer is a function that takes and returns a string. It is applied to\n\/\/ struct and header field values before they are compared. It can be used to alter\n\/\/ names for comparison. For instance, you could allow case insensitive matching\n\/\/ or convert '-' to '_'.\ntype Normalizer func(string) string\n\ntype ErrorHandler func(*csv.ParseError) bool\n\n\/\/ normalizeName function initially set to a nop Normalizer.\nvar normalizeName = DefaultNameNormalizer()\n\n\/\/ DefaultNameNormalizer is a nop Normalizer.\nfunc DefaultNameNormalizer() Normalizer { return func(s string) string { return s } }\n\n\/\/ SetHeaderNormalizer sets the normalizer used to normalize struct and header field names.\nfunc SetHeaderNormalizer(f Normalizer) { normalizeName = f }\n\n\/\/ --------------------------------------------------------------------------\n\/\/ CSVWriter used to format CSV\n\nvar selfCSVWriter = DefaultCSVWriter\n\n\/\/ DefaultCSVWriter is the default SafeCSVWriter used to format CSV (cf. csv.NewWriter)\nfunc DefaultCSVWriter(out io.Writer) *SafeCSVWriter {\n\twriter := NewSafeCSVWriter(csv.NewWriter(out))\n\n\t\/\/ As only one rune can be defined as a CSV separator, we are going to trim\n\t\/\/ the custom tag separator and use the first rune.\n\tif runes := []rune(strings.TrimSpace(TagSeparator)); len(runes) > 0 {\n\t\twriter.Comma = runes[0]\n\t}\n\n\treturn writer\n}\n\n\/\/ SetCSVWriter sets the SafeCSVWriter used to format CSV.\nfunc SetCSVWriter(csvWriter func(io.Writer) *SafeCSVWriter) {\n\tselfCSVWriter = csvWriter\n}\n\nfunc getCSVWriter(out io.Writer) *SafeCSVWriter {\n\treturn selfCSVWriter(out)\n}\n\n\/\/ --------------------------------------------------------------------------\n\/\/ CSVReader used to parse CSV\n\nvar selfCSVReader = DefaultCSVReader\n\n\/\/ DefaultCSVReader is the default CSV reader used to parse CSV (cf. csv.NewReader)\nfunc DefaultCSVReader(in io.Reader) CSVReader {\n\treturn csv.NewReader(in)\n}\n\n\/\/ LazyCSVReader returns a lazy CSV reader, with LazyQuotes and TrimLeadingSpace.\nfunc LazyCSVReader(in io.Reader) CSVReader {\n\tcsvReader := csv.NewReader(in)\n\tcsvReader.LazyQuotes = true\n\tcsvReader.TrimLeadingSpace = true\n\treturn csvReader\n}\n\n\/\/ SetCSVReader sets the CSV reader used to parse CSV.\nfunc SetCSVReader(csvReader func(io.Reader) CSVReader) {\n\tselfCSVReader = csvReader\n}\n\nfunc getCSVReader(in io.Reader) CSVReader {\n\treturn selfCSVReader(in)\n}\n\n\/\/ --------------------------------------------------------------------------\n\/\/ Marshal functions\n\n\/\/ MarshalFile saves the interface as CSV in the file.\nfunc MarshalFile(in interface{}, file *os.File) (err error) {\n\treturn Marshal(in, file)\n}\n\n\/\/ MarshalString returns the CSV string from the interface.\nfunc MarshalString(in interface{}) (out string, err error) {\n\tbufferString := bytes.NewBufferString(out)\n\tif err := Marshal(in, bufferString); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn bufferString.String(), nil\n}\n\n\/\/ MarshalBytes returns the CSV bytes from the interface.\nfunc MarshalBytes(in interface{}) (out []byte, err error) {\n\tbufferString := bytes.NewBuffer(out)\n\tif err := Marshal(in, bufferString); err != nil {\n\t\treturn nil, err\n\t}\n\treturn bufferString.Bytes(), nil\n}\n\n\/\/ Marshal returns the CSV in writer from the interface.\nfunc Marshal(in interface{}, out io.Writer) (err error) {\n\twriter := getCSVWriter(out)\n\treturn writeTo(writer, in, false)\n}\n\n\/\/ MarshalWithoutHeaders returns the CSV in writer from the interface.\nfunc MarshalWithoutHeaders(in interface{}, out io.Writer) (err error) {\n\twriter := getCSVWriter(out)\n\treturn writeTo(writer, in, true)\n}\n\n\/\/ MarshalChan returns the CSV read from the channel.\nfunc MarshalChan(c <-chan interface{}, out *SafeCSVWriter) error {\n\treturn writeFromChan(out, c)\n}\n\n\/\/ MarshalCSV returns the CSV in writer from the interface.\nfunc MarshalCSV(in interface{}, out *SafeCSVWriter) (err error) {\n\treturn writeTo(out, in, false)\n}\n\n\/\/ MarshalCSVWithoutHeaders returns the CSV in writer from the interface.\nfunc MarshalCSVWithoutHeaders(in interface{}, out *SafeCSVWriter) (err error) {\n\treturn writeTo(out, in, true)\n}\n\n\/\/ --------------------------------------------------------------------------\n\/\/ Unmarshal functions\n\n\/\/ UnmarshalFile parses the CSV from the file in the interface.\nfunc UnmarshalFile(in *os.File, out interface{}) error {\n\treturn Unmarshal(in, out)\n}\n\n\/\/ UnmarshalFile parses the CSV from the file in the interface.\nfunc UnmarshalFileWithErrorHandler(in *os.File, errHandler ErrorHandler, out interface{}) error {\n\treturn UnmarshalWithErrorHandler(in, errHandler, out)\n}\n\n\/\/ UnmarshalString parses the CSV from the string in the interface.\nfunc UnmarshalString(in string, out interface{}) error {\n\treturn Unmarshal(strings.NewReader(in), out)\n}\n\n\/\/ UnmarshalBytes parses the CSV from the bytes in the interface.\nfunc UnmarshalBytes(in []byte, out interface{}) error {\n\treturn Unmarshal(bytes.NewReader(in), out)\n}\n\n\/\/ Unmarshal parses the CSV from the reader in the interface.\nfunc Unmarshal(in io.Reader, out interface{}) error {\n\treturn readTo(newSimpleDecoderFromReader(in), out)\n}\n\n\/\/ Unmarshal parses the CSV from the reader in the interface.\nfunc UnmarshalWithErrorHandler(in io.Reader, errHandle ErrorHandler, out interface{}) error {\n\treturn readToWithErrorHandler(newSimpleDecoderFromReader(in), errHandle, out)\n}\n\n\/\/ UnmarshalWithoutHeaders parses the CSV from the reader in the interface.\nfunc UnmarshalWithoutHeaders(in io.Reader, out interface{}) error {\n\treturn readToWithoutHeaders(newSimpleDecoderFromReader(in), out)\n}\n\n\/\/ UnmarshalCSVWithoutHeaders parses a headerless CSV with passed in CSV reader\nfunc UnmarshalCSVWithoutHeaders(in CSVReader, out interface{}) error {\n\treturn readToWithoutHeaders(csvDecoder{in}, out)\n}\n\n\/\/ UnmarshalDecoder parses the CSV from the decoder in the interface\nfunc UnmarshalDecoder(in Decoder, out interface{}) error {\n\treturn readTo(in, out)\n}\n\n\/\/ UnmarshalCSV parses the CSV from the reader in the interface.\nfunc UnmarshalCSV(in CSVReader, out interface{}) error {\n\treturn readTo(csvDecoder{in}, out)\n}\n\n\/\/ UnmarshalToChan parses the CSV from the reader and send each value in the chan c.\n\/\/ The channel must have a concrete type.\nfunc UnmarshalToChan(in io.Reader, c interface{}) error {\n\tif c == nil {\n\t\treturn fmt.Errorf(\"goscv: channel is %v\", c)\n\t}\n\treturn readEach(newSimpleDecoderFromReader(in), c)\n}\n\n\/\/ UnmarshalToChanWithoutHeaders parses the CSV from the reader and send each value in the chan c.\n\/\/ The channel must have a concrete type.\nfunc UnmarshalToChanWithoutHeaders(in io.Reader, c interface{}) error {\n\tif c == nil {\n\t\treturn fmt.Errorf(\"goscv: channel is %v\", c)\n\t}\n\treturn readEachWithoutHeaders(newSimpleDecoderFromReader(in), c)\n}\n\n\/\/ UnmarshalDecoderToChan parses the CSV from the decoder and send each value in the chan c.\n\/\/ The channel must have a concrete type.\nfunc UnmarshalDecoderToChan(in SimpleDecoder, c interface{}) error {\n\tif c == nil {\n\t\treturn fmt.Errorf(\"goscv: channel is %v\", c)\n\t}\n\treturn readEach(in, c)\n}\n\n\/\/ UnmarshalStringToChan parses the CSV from the string and send each value in the chan c.\n\/\/ The channel must have a concrete type.\nfunc UnmarshalStringToChan(in string, c interface{}) error {\n\treturn UnmarshalToChan(strings.NewReader(in), c)\n}\n\n\/\/ UnmarshalBytesToChan parses the CSV from the bytes and send each value in the chan c.\n\/\/ The channel must have a concrete type.\nfunc UnmarshalBytesToChan(in []byte, c interface{}) error {\n\treturn UnmarshalToChan(bytes.NewReader(in), c)\n}\n\n\/\/ UnmarshalToCallback parses the CSV from the reader and send each value to the given func f.\n\/\/ The func must look like func(Struct).\nfunc UnmarshalToCallback(in io.Reader, f interface{}) error {\n\tvalueFunc := reflect.ValueOf(f)\n\tt := reflect.TypeOf(f)\n\tif t.NumIn() != 1 {\n\t\treturn fmt.Errorf(\"the given function must have exactly one parameter\")\n\t}\n\tcerr := make(chan error)\n\tc := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, t.In(0)), 0)\n\tgo func() {\n\t\tcerr <- UnmarshalToChan(in, c.Interface())\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase err := <-cerr:\n\t\t\treturn err\n\t\tdefault:\n\t\t}\n\t\tv, notClosed := c.Recv()\n\t\tif !notClosed || v.Interface() == nil {\n\t\t\tbreak\n\t\t}\n\t\tvalueFunc.Call([]reflect.Value{v})\n\t}\n\treturn nil\n}\n\n\/\/ UnmarshalToCallbackWithError parses the CSV from the reader and\n\/\/ send each value to the given func f.\n\/\/\n\/\/ If func returns error, it will stop processing, drain the\n\/\/ parser and propagate the error to caller.\n\/\/\n\/\/ The func must look like func(Struct) error.\nfunc UnmarshalBytesToCallbackWithError(in []byte, f interface{}) error {\n\tvalueFunc := reflect.ValueOf(f)\n\tt := reflect.TypeOf(f)\n\tif t.NumIn() != 1 {\n\t\treturn fmt.Errorf(\"the given function must have exactly one parameter\")\n\t}\n\tif t.NumOut() != 1 {\n\t\treturn fmt.Errorf(\"the given function must have exactly one return value\")\n\t}\n\tif !isErrorType(t.Out(0)) {\n\t\treturn fmt.Errorf(\"the given function must only return error.\")\n\t}\n\n\tcerr := make(chan error)\n\tc := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, t.In(0)), 0)\n\tgo func() {\n\t\tcerr <- UnmarshalBytesToChan(in, c.Interface())\n\t}()\n\n\tvar fErr error\n\tfor {\n\t\tselect {\n\t\tcase err := <-cerr:\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn fErr\n\t\tdefault:\n\t\t}\n\t\tv, notClosed := c.Recv()\n\t\tif !notClosed || v.Interface() == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ callback f has already returned an error, stop processing but keep draining the chan c\n\t\tif fErr != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tresults := valueFunc.Call([]reflect.Value{v})\n\n\t\t\/\/ If the callback f returns an error, stores it and returns it in future.\n\t\terrValue := results[0].Interface()\n\t\tif errValue != nil {\n\t\t\tfErr = errValue.(error)\n\t\t}\n\t}\n\treturn fErr\n}\n\n\/\/ UnmarshalDecoderToCallback parses the CSV from the decoder and send each value to the given func f.\n\/\/ The func must look like func(Struct).\nfunc UnmarshalDecoderToCallback(in SimpleDecoder, f interface{}) error {\n\tvalueFunc := reflect.ValueOf(f)\n\tt := reflect.TypeOf(f)\n\tif t.NumIn() != 1 {\n\t\treturn fmt.Errorf(\"the given function must have exactly one parameter\")\n\t}\n\tcerr := make(chan error)\n\tc := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, t.In(0)), 0)\n\tgo func() {\n\t\tcerr <- UnmarshalDecoderToChan(in, c.Interface())\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase err := <-cerr:\n\t\t\treturn err\n\t\tdefault:\n\t\t}\n\t\tv, notClosed := c.Recv()\n\t\tif !notClosed || v.Interface() == nil {\n\t\t\tbreak\n\t\t}\n\t\tvalueFunc.Call([]reflect.Value{v})\n\t}\n\treturn nil\n}\n\n\/\/ UnmarshalBytesToCallback parses the CSV from the bytes and send each value to the given func f.\n\/\/ The func must look like func(Struct).\nfunc UnmarshalBytesToCallback(in []byte, f interface{}) error {\n\treturn UnmarshalToCallback(bytes.NewReader(in), f)\n}\n\n\/\/ UnmarshalStringToCallback parses the CSV from the string and send each value to the given func f.\n\/\/ The func must look like func(Struct).\nfunc UnmarshalStringToCallback(in string, c interface{}) (err error) {\n\treturn UnmarshalToCallback(strings.NewReader(in), c)\n}\n\n\/\/ CSVToMap creates a simple map from a CSV of 2 columns.\nfunc CSVToMap(in io.Reader) (map[string]string, error) {\n\tdecoder := newSimpleDecoderFromReader(in)\n\theader, err := decoder.getCSVRow()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(header) != 2 {\n\t\treturn nil, fmt.Errorf(\"maps can only be created for csv of two columns\")\n\t}\n\tm := make(map[string]string)\n\tfor {\n\t\tline, err := decoder.getCSVRow()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tm[line[0]] = line[1]\n\t}\n\treturn m, nil\n}\n\n\/\/ CSVToMaps takes a reader and returns an array of dictionaries, using the header row as the keys\nfunc CSVToMaps(reader io.Reader) ([]map[string]string, error) {\n\tr := csv.NewReader(reader)\n\trows := []map[string]string{}\n\tvar header []string\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif header == nil {\n\t\t\theader = record\n\t\t} else {\n\t\t\tdict := map[string]string{}\n\t\t\tfor i := range header {\n\t\t\t\tdict[header[i]] = record[i]\n\t\t\t}\n\t\t\trows = append(rows, dict)\n\t\t}\n\t}\n\treturn rows, nil\n}\n<commit_msg>Define the following 3 functions as a group: - UnmarshalToCallbackWithError - UnmarshalBytesToCallbackWithError - UnmarshalStringToCallbackWithError<commit_after>\/\/ Copyright 2014 Jonathan Picques. All rights reserved.\n\/\/ Use of this source code is governed by a MIT license\n\/\/ The license can be found in the LICENSE file.\n\n\/\/ The GoCSV package aims to provide easy CSV serialization and deserialization to the golang programming language\n\npackage gocsv\n\nimport (\n\t\"bytes\"\n\t\"encoding\/csv\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n)\n\n\/\/ FailIfUnmatchedStructTags indicates whether it is considered an error when there is an unmatched\n\/\/ struct tag.\nvar FailIfUnmatchedStructTags = false\n\n\/\/ FailIfDoubleHeaderNames indicates whether it is considered an error when a header name is repeated\n\/\/ in the csv header.\nvar FailIfDoubleHeaderNames = false\n\n\/\/ ShouldAlignDuplicateHeadersWithStructFieldOrder indicates whether we should align duplicate CSV\n\/\/ headers per their alignment in the struct definition.\nvar ShouldAlignDuplicateHeadersWithStructFieldOrder = false\n\n\/\/ TagSeparator defines seperator string for multiple csv tags in struct fields\nvar TagSeparator = \",\"\n\n\/\/ Normalizer is a function that takes and returns a string. It is applied to\n\/\/ struct and header field values before they are compared. It can be used to alter\n\/\/ names for comparison. For instance, you could allow case insensitive matching\n\/\/ or convert '-' to '_'.\ntype Normalizer func(string) string\n\ntype ErrorHandler func(*csv.ParseError) bool\n\n\/\/ normalizeName function initially set to a nop Normalizer.\nvar normalizeName = DefaultNameNormalizer()\n\n\/\/ DefaultNameNormalizer is a nop Normalizer.\nfunc DefaultNameNormalizer() Normalizer { return func(s string) string { return s } }\n\n\/\/ SetHeaderNormalizer sets the normalizer used to normalize struct and header field names.\nfunc SetHeaderNormalizer(f Normalizer) { normalizeName = f }\n\n\/\/ --------------------------------------------------------------------------\n\/\/ CSVWriter used to format CSV\n\nvar selfCSVWriter = DefaultCSVWriter\n\n\/\/ DefaultCSVWriter is the default SafeCSVWriter used to format CSV (cf. csv.NewWriter)\nfunc DefaultCSVWriter(out io.Writer) *SafeCSVWriter {\n\twriter := NewSafeCSVWriter(csv.NewWriter(out))\n\n\t\/\/ As only one rune can be defined as a CSV separator, we are going to trim\n\t\/\/ the custom tag separator and use the first rune.\n\tif runes := []rune(strings.TrimSpace(TagSeparator)); len(runes) > 0 {\n\t\twriter.Comma = runes[0]\n\t}\n\n\treturn writer\n}\n\n\/\/ SetCSVWriter sets the SafeCSVWriter used to format CSV.\nfunc SetCSVWriter(csvWriter func(io.Writer) *SafeCSVWriter) {\n\tselfCSVWriter = csvWriter\n}\n\nfunc getCSVWriter(out io.Writer) *SafeCSVWriter {\n\treturn selfCSVWriter(out)\n}\n\n\/\/ --------------------------------------------------------------------------\n\/\/ CSVReader used to parse CSV\n\nvar selfCSVReader = DefaultCSVReader\n\n\/\/ DefaultCSVReader is the default CSV reader used to parse CSV (cf. csv.NewReader)\nfunc DefaultCSVReader(in io.Reader) CSVReader {\n\treturn csv.NewReader(in)\n}\n\n\/\/ LazyCSVReader returns a lazy CSV reader, with LazyQuotes and TrimLeadingSpace.\nfunc LazyCSVReader(in io.Reader) CSVReader {\n\tcsvReader := csv.NewReader(in)\n\tcsvReader.LazyQuotes = true\n\tcsvReader.TrimLeadingSpace = true\n\treturn csvReader\n}\n\n\/\/ SetCSVReader sets the CSV reader used to parse CSV.\nfunc SetCSVReader(csvReader func(io.Reader) CSVReader) {\n\tselfCSVReader = csvReader\n}\n\nfunc getCSVReader(in io.Reader) CSVReader {\n\treturn selfCSVReader(in)\n}\n\n\/\/ --------------------------------------------------------------------------\n\/\/ Marshal functions\n\n\/\/ MarshalFile saves the interface as CSV in the file.\nfunc MarshalFile(in interface{}, file *os.File) (err error) {\n\treturn Marshal(in, file)\n}\n\n\/\/ MarshalString returns the CSV string from the interface.\nfunc MarshalString(in interface{}) (out string, err error) {\n\tbufferString := bytes.NewBufferString(out)\n\tif err := Marshal(in, bufferString); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn bufferString.String(), nil\n}\n\n\/\/ MarshalBytes returns the CSV bytes from the interface.\nfunc MarshalBytes(in interface{}) (out []byte, err error) {\n\tbufferString := bytes.NewBuffer(out)\n\tif err := Marshal(in, bufferString); err != nil {\n\t\treturn nil, err\n\t}\n\treturn bufferString.Bytes(), nil\n}\n\n\/\/ Marshal returns the CSV in writer from the interface.\nfunc Marshal(in interface{}, out io.Writer) (err error) {\n\twriter := getCSVWriter(out)\n\treturn writeTo(writer, in, false)\n}\n\n\/\/ MarshalWithoutHeaders returns the CSV in writer from the interface.\nfunc MarshalWithoutHeaders(in interface{}, out io.Writer) (err error) {\n\twriter := getCSVWriter(out)\n\treturn writeTo(writer, in, true)\n}\n\n\/\/ MarshalChan returns the CSV read from the channel.\nfunc MarshalChan(c <-chan interface{}, out *SafeCSVWriter) error {\n\treturn writeFromChan(out, c)\n}\n\n\/\/ MarshalCSV returns the CSV in writer from the interface.\nfunc MarshalCSV(in interface{}, out *SafeCSVWriter) (err error) {\n\treturn writeTo(out, in, false)\n}\n\n\/\/ MarshalCSVWithoutHeaders returns the CSV in writer from the interface.\nfunc MarshalCSVWithoutHeaders(in interface{}, out *SafeCSVWriter) (err error) {\n\treturn writeTo(out, in, true)\n}\n\n\/\/ --------------------------------------------------------------------------\n\/\/ Unmarshal functions\n\n\/\/ UnmarshalFile parses the CSV from the file in the interface.\nfunc UnmarshalFile(in *os.File, out interface{}) error {\n\treturn Unmarshal(in, out)\n}\n\n\/\/ UnmarshalFile parses the CSV from the file in the interface.\nfunc UnmarshalFileWithErrorHandler(in *os.File, errHandler ErrorHandler, out interface{}) error {\n\treturn UnmarshalWithErrorHandler(in, errHandler, out)\n}\n\n\/\/ UnmarshalString parses the CSV from the string in the interface.\nfunc UnmarshalString(in string, out interface{}) error {\n\treturn Unmarshal(strings.NewReader(in), out)\n}\n\n\/\/ UnmarshalBytes parses the CSV from the bytes in the interface.\nfunc UnmarshalBytes(in []byte, out interface{}) error {\n\treturn Unmarshal(bytes.NewReader(in), out)\n}\n\n\/\/ Unmarshal parses the CSV from the reader in the interface.\nfunc Unmarshal(in io.Reader, out interface{}) error {\n\treturn readTo(newSimpleDecoderFromReader(in), out)\n}\n\n\/\/ Unmarshal parses the CSV from the reader in the interface.\nfunc UnmarshalWithErrorHandler(in io.Reader, errHandle ErrorHandler, out interface{}) error {\n\treturn readToWithErrorHandler(newSimpleDecoderFromReader(in), errHandle, out)\n}\n\n\/\/ UnmarshalWithoutHeaders parses the CSV from the reader in the interface.\nfunc UnmarshalWithoutHeaders(in io.Reader, out interface{}) error {\n\treturn readToWithoutHeaders(newSimpleDecoderFromReader(in), out)\n}\n\n\/\/ UnmarshalCSVWithoutHeaders parses a headerless CSV with passed in CSV reader\nfunc UnmarshalCSVWithoutHeaders(in CSVReader, out interface{}) error {\n\treturn readToWithoutHeaders(csvDecoder{in}, out)\n}\n\n\/\/ UnmarshalDecoder parses the CSV from the decoder in the interface\nfunc UnmarshalDecoder(in Decoder, out interface{}) error {\n\treturn readTo(in, out)\n}\n\n\/\/ UnmarshalCSV parses the CSV from the reader in the interface.\nfunc UnmarshalCSV(in CSVReader, out interface{}) error {\n\treturn readTo(csvDecoder{in}, out)\n}\n\n\/\/ UnmarshalToChan parses the CSV from the reader and send each value in the chan c.\n\/\/ The channel must have a concrete type.\nfunc UnmarshalToChan(in io.Reader, c interface{}) error {\n\tif c == nil {\n\t\treturn fmt.Errorf(\"goscv: channel is %v\", c)\n\t}\n\treturn readEach(newSimpleDecoderFromReader(in), c)\n}\n\n\/\/ UnmarshalToChanWithoutHeaders parses the CSV from the reader and send each value in the chan c.\n\/\/ The channel must have a concrete type.\nfunc UnmarshalToChanWithoutHeaders(in io.Reader, c interface{}) error {\n\tif c == nil {\n\t\treturn fmt.Errorf(\"goscv: channel is %v\", c)\n\t}\n\treturn readEachWithoutHeaders(newSimpleDecoderFromReader(in), c)\n}\n\n\/\/ UnmarshalDecoderToChan parses the CSV from the decoder and send each value in the chan c.\n\/\/ The channel must have a concrete type.\nfunc UnmarshalDecoderToChan(in SimpleDecoder, c interface{}) error {\n\tif c == nil {\n\t\treturn fmt.Errorf(\"goscv: channel is %v\", c)\n\t}\n\treturn readEach(in, c)\n}\n\n\/\/ UnmarshalStringToChan parses the CSV from the string and send each value in the chan c.\n\/\/ The channel must have a concrete type.\nfunc UnmarshalStringToChan(in string, c interface{}) error {\n\treturn UnmarshalToChan(strings.NewReader(in), c)\n}\n\n\/\/ UnmarshalBytesToChan parses the CSV from the bytes and send each value in the chan c.\n\/\/ The channel must have a concrete type.\nfunc UnmarshalBytesToChan(in []byte, c interface{}) error {\n\treturn UnmarshalToChan(bytes.NewReader(in), c)\n}\n\n\/\/ UnmarshalToCallback parses the CSV from the reader and send each value to the given func f.\n\/\/ The func must look like func(Struct).\nfunc UnmarshalToCallback(in io.Reader, f interface{}) error {\n\tvalueFunc := reflect.ValueOf(f)\n\tt := reflect.TypeOf(f)\n\tif t.NumIn() != 1 {\n\t\treturn fmt.Errorf(\"the given function must have exactly one parameter\")\n\t}\n\tcerr := make(chan error)\n\tc := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, t.In(0)), 0)\n\tgo func() {\n\t\tcerr <- UnmarshalToChan(in, c.Interface())\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase err := <-cerr:\n\t\t\treturn err\n\t\tdefault:\n\t\t}\n\t\tv, notClosed := c.Recv()\n\t\tif !notClosed || v.Interface() == nil {\n\t\t\tbreak\n\t\t}\n\t\tvalueFunc.Call([]reflect.Value{v})\n\t}\n\treturn nil\n}\n\n\/\/ UnmarshalDecoderToCallback parses the CSV from the decoder and send each value to the given func f.\n\/\/ The func must look like func(Struct).\nfunc UnmarshalDecoderToCallback(in SimpleDecoder, f interface{}) error {\n\tvalueFunc := reflect.ValueOf(f)\n\tt := reflect.TypeOf(f)\n\tif t.NumIn() != 1 {\n\t\treturn fmt.Errorf(\"the given function must have exactly one parameter\")\n\t}\n\tcerr := make(chan error)\n\tc := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, t.In(0)), 0)\n\tgo func() {\n\t\tcerr <- UnmarshalDecoderToChan(in, c.Interface())\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase err := <-cerr:\n\t\t\treturn err\n\t\tdefault:\n\t\t}\n\t\tv, notClosed := c.Recv()\n\t\tif !notClosed || v.Interface() == nil {\n\t\t\tbreak\n\t\t}\n\t\tvalueFunc.Call([]reflect.Value{v})\n\t}\n\treturn nil\n}\n\n\/\/ UnmarshalBytesToCallback parses the CSV from the bytes and send each value to the given func f.\n\/\/ The func must look like func(Struct).\nfunc UnmarshalBytesToCallback(in []byte, f interface{}) error {\n\treturn UnmarshalToCallback(bytes.NewReader(in), f)\n}\n\n\/\/ UnmarshalStringToCallback parses the CSV from the string and send each value to the given func f.\n\/\/ The func must look like func(Struct).\nfunc UnmarshalStringToCallback(in string, c interface{}) (err error) {\n\treturn UnmarshalToCallback(strings.NewReader(in), c)\n}\n\n\/\/ UnmarshalToCallbackWithError parses the CSV from the reader and\n\/\/ send each value to the given func f.\n\/\/\n\/\/ If func returns error, it will stop processing, drain the\n\/\/ parser and propagate the error to caller.\n\/\/\n\/\/ The func must look like func(Struct) error.\nfunc UnmarshalToCallbackWithError(in io.Reader, f interface{}) error {\n\tvalueFunc := reflect.ValueOf(f)\n\tt := reflect.TypeOf(f)\n\tif t.NumIn() != 1 {\n\t\treturn fmt.Errorf(\"the given function must have exactly one parameter\")\n\t}\n\tif t.NumOut() != 1 {\n\t\treturn fmt.Errorf(\"the given function must have exactly one return value\")\n\t}\n\tif !isErrorType(t.Out(0)) {\n\t\treturn fmt.Errorf(\"the given function must only return error.\")\n\t}\n\n\tcerr := make(chan error)\n\tc := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, t.In(0)), 0)\n\tgo func() {\n\t\tcerr <- UnmarshalToChan(in, c.Interface())\n\t}()\n\n\tvar fErr error\n\tfor {\n\t\tselect {\n\t\tcase err := <-cerr:\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn fErr\n\t\tdefault:\n\t\t}\n\t\tv, notClosed := c.Recv()\n\t\tif !notClosed || v.Interface() == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ callback f has already returned an error, stop processing but keep draining the chan c\n\t\tif fErr != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tresults := valueFunc.Call([]reflect.Value{v})\n\n\t\t\/\/ If the callback f returns an error, stores it and returns it in future.\n\t\terrValue := results[0].Interface()\n\t\tif errValue != nil {\n\t\t\tfErr = errValue.(error)\n\t\t}\n\t}\n\treturn fErr\n}\n\n\/\/ UnmarshalBytesToCallbackWithError parses the CSV from the bytes and\n\/\/ send each value to the given func f.\n\/\/\n\/\/ If func returns error, it will stop processing, drain the\n\/\/ parser and propagate the error to caller.\n\/\/\n\/\/ The func must look like func(Struct) error.\nfunc UnmarshalBytesToCallbackWithError(in []byte, f interface{}) error {\n\treturn UnmarshalToCallbackWithError(bytes.NewReader(in), f)\n}\n\n\/\/ UnmarshalStringToCallbackWithError parses the CSV from the string and\n\/\/ send each value to the given func f.\n\/\/\n\/\/ If func returns error, it will stop processing, drain the\n\/\/ parser and propagate the error to caller.\n\/\/\n\/\/ The func must look like func(Struct) error.\nfunc UnmarshalStringToCallbackWithError(in string, c interface{}) (err error) {\n\treturn UnmarshalToCallbackWithError(strings.NewReader(in), c)\n}\n\n\/\/ CSVToMap creates a simple map from a CSV of 2 columns.\nfunc CSVToMap(in io.Reader) (map[string]string, error) {\n\tdecoder := newSimpleDecoderFromReader(in)\n\theader, err := decoder.getCSVRow()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(header) != 2 {\n\t\treturn nil, fmt.Errorf(\"maps can only be created for csv of two columns\")\n\t}\n\tm := make(map[string]string)\n\tfor {\n\t\tline, err := decoder.getCSVRow()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tm[line[0]] = line[1]\n\t}\n\treturn m, nil\n}\n\n\/\/ CSVToMaps takes a reader and returns an array of dictionaries, using the header row as the keys\nfunc CSVToMaps(reader io.Reader) ([]map[string]string, error) {\n\tr := csv.NewReader(reader)\n\trows := []map[string]string{}\n\tvar header []string\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif header == nil {\n\t\t\theader = record\n\t\t} else {\n\t\t\tdict := map[string]string{}\n\t\t\tfor i := range header {\n\t\t\t\tdict[header[i]] = record[i]\n\t\t\t}\n\t\t\trows = append(rows, dict)\n\t\t}\n\t}\n\treturn rows, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package revel\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"mime\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"net\/http\/cookiejar\"\n\t\"net\/textproto\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"code.google.com\/p\/go.net\/websocket\"\n)\n\ntype TestSuite struct {\n\tClient *http.Client\n\tResponse *http.Response\n\tResponseBody []byte\n\tSession Session\n}\n\ntype TestRequest struct {\n\t*http.Request\n\ttestSuite *TestSuite\n}\n\nvar TestSuites []interface{} \/\/ Array of structs that embed TestSuite\n\n\/\/ NewTestSuite returns an initialized TestSuite ready for use. It is invoked\n\/\/ by the test harness to initialize the embedded field in application tests.\nfunc NewTestSuite() TestSuite {\n\tjar, _ := cookiejar.New(nil)\n\treturn TestSuite{\n\t\tClient: &http.Client{Jar: jar},\n\t\tSession: make(Session),\n\t}\n}\n\n\/\/ Return the address and port of the server, e.g. \"127.0.0.1:8557\"\nfunc (t *TestSuite) Host() string {\n\tif Server.Addr[0] == ':' {\n\t\treturn \"127.0.0.1\" + Server.Addr\n\t}\n\treturn Server.Addr\n}\n\n\/\/ Return the base http\/https URL of the server, e.g. \"http:\/\/127.0.0.1:8557\".\n\/\/ The scheme is set to https if http.ssl is set to true in the configuration file.\nfunc (t *TestSuite) BaseUrl() string {\n\tif HttpSsl {\n\t\treturn \"https:\/\/\" + t.Host()\n\t} else {\n\t\treturn \"http:\/\/\" + t.Host()\n\t}\n}\n\n\/\/ Return the base websocket URL of the server, e.g. \"ws:\/\/127.0.0.1:8557\"\nfunc (t *TestSuite) WebSocketUrl() string {\n\treturn \"ws:\/\/\" + t.Host()\n}\n\n\/\/ Issue a GET request to the given path and store the result in Response and\n\/\/ ResponseBody.\nfunc (t *TestSuite) Get(path string) {\n\tt.GetCustom(t.BaseUrl() + path).Send()\n}\n\n\/\/ Return a GET request to the given uri in a form of its wrapper.\nfunc (t *TestSuite) GetCustom(uri string) *TestRequest {\n\treq, err := http.NewRequest(\"GET\", uri, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &TestRequest{\n\t\tRequest: req,\n\t\ttestSuite: t,\n\t}\n}\n\n\/\/ Issue a DELETE request to the given path and store the result in Response and\n\/\/ ResponseBody.\nfunc (t *TestSuite) Delete(path string) {\n\tt.DeleteCustom(t.BaseUrl() + path).Send()\n}\n\n\/\/ Return a DELETE request to the given uri in a form of its wrapper.\nfunc (t *TestSuite) DeleteCustom(uri string) *TestRequest {\n\treq, err := http.NewRequest(\"DELETE\", uri, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &TestRequest{\n\t\tRequest: req,\n\t\ttestSuite: t,\n\t}\n}\n\n\/\/ Issue a POST request to the given path, sending the given Content-Type and\n\/\/ data, and store the result in Response and ResponseBody. \"data\" may be nil.\nfunc (t *TestSuite) Post(path string, contentType string, reader io.Reader) {\n\tt.PostCustom(t.BaseUrl()+path, contentType, reader).Send()\n}\n\n\/\/ Return a POST request to the given uri with specified Content-Type and data\n\/\/ in a form of wrapper. \"data\" may be nil.\nfunc (t *TestSuite) PostCustom(uri string, contentType string, reader io.Reader) *TestRequest {\n\treq, err := http.NewRequest(\"POST\", uri, reader)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treq.Header.Set(\"Content-Type\", contentType)\n\treturn &TestRequest{\n\t\tRequest: req,\n\t\ttestSuite: t,\n\t}\n}\n\n\/\/ Issue a POST request to the given path as a form post of the given key and\n\/\/ values, and store the result in Response and ResponseBody.\nfunc (t *TestSuite) PostForm(path string, data url.Values) {\n\tt.PostFormCustom(t.BaseUrl()+path, data).Send()\n}\n\n\/\/ Return a POST request to the given uri as a form post of the given key and values.\n\/\/ The request is in a form of TestRequest wrapper.\nfunc (t *TestSuite) PostFormCustom(uri string, data url.Values) *TestRequest {\n\treturn t.PostCustom(uri, \"application\/x-www-form-urlencoded\", strings.NewReader(data.Encode()))\n}\n\n\/\/ Issue a multipart request to the given path sending given params and files,\n\/\/ and store the result in Response and ResponseBody.\nfunc (t *TestSuite) PostFile(path string, params url.Values, filePaths url.Values) {\n\tt.PostFileCustom(t.BaseUrl()+path, params, filePaths).Send()\n}\n\n\/\/ Return a multipart request to the given uri in a form of its wrapper\n\/\/ with the given params and files.\nfunc (t *TestSuite) PostFileCustom(uri string, params url.Values, filePaths url.Values) *TestRequest {\n\tbody := &bytes.Buffer{}\n\twriter := multipart.NewWriter(body)\n\n\tfor key, values := range filePaths {\n\t\tfor _, value := range values {\n\t\t\tcreateFormFile(writer, key, value)\n\t\t}\n\t}\n\n\tfor key, values := range params {\n\t\tfor _, value := range values {\n\t\t\terr := writer.WriteField(key, value)\n\t\t\tt.AssertEqual(nil, err)\n\t\t}\n\t}\n\terr := writer.Close()\n\tt.AssertEqual(nil, err)\n\n\treturn t.PostCustom(uri, writer.FormDataContentType(), body)\n}\n\n\/\/ Issue any request and read the response. If successful, the caller may\n\/\/ examine the Response and ResponseBody properties. Session data will be\n\/\/ added to the request cookies for you.\nfunc (r *TestRequest) Send() {\n\tr.AddCookie(r.testSuite.Session.cookie())\n\tr.MakeRequest()\n}\n\n\/\/ Issue any request and read the response. If successful, the caller may\n\/\/ examine the Response and ResponseBody properties. You will need to\n\/\/ manage session \/ cookie data manually\nfunc (r *TestRequest) MakeRequest() {\n\tvar err error\n\tif r.testSuite.Response, err = r.testSuite.Client.Do(r.Request); err != nil {\n\t\tpanic(err)\n\t}\n\tif r.testSuite.ResponseBody, err = ioutil.ReadAll(r.testSuite.Response.Body); err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ Look for a session cookie in the response and parse it.\n\tsessionCookieName := r.testSuite.Session.cookie().Name\n\tfor _, cookie := range r.testSuite.Client.Jar.Cookies(r.Request.URL) {\n\t\tif cookie.Name == sessionCookieName {\n\t\t\tr.testSuite.Session = getSessionFromCookie(cookie)\n\t\t\tbreak\n\t\t}\n\t}\n}\n\n\/\/ Create a websocket connection to the given path and return the connection\nfunc (t *TestSuite) WebSocket(path string) *websocket.Conn {\n\torigin := t.BaseUrl() + \"\/\"\n\turl := t.WebSocketUrl() + path\n\tws, err := websocket.Dial(url, \"\", origin)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ws\n}\n\nfunc (t *TestSuite) AssertOk() {\n\tt.AssertStatus(http.StatusOK)\n}\n\nfunc (t *TestSuite) AssertNotFound() {\n\tt.AssertStatus(http.StatusNotFound)\n}\n\nfunc (t *TestSuite) AssertStatus(status int) {\n\tif t.Response.StatusCode != status {\n\t\tpanic(fmt.Errorf(\"Status: (expected) %d != %d (actual)\", status, t.Response.StatusCode))\n\t}\n}\n\nfunc (t *TestSuite) AssertContentType(contentType string) {\n\tt.AssertHeader(\"Content-Type\", contentType)\n}\n\nfunc (t *TestSuite) AssertHeader(name, value string) {\n\tactual := t.Response.Header.Get(name)\n\tif actual != value {\n\t\tpanic(fmt.Errorf(\"Header %s: (expected) %s != %s (actual)\", name, value, actual))\n\t}\n}\n\nfunc (t *TestSuite) AssertEqual(expected, actual interface{}) {\n\tif !Equal(expected, actual) {\n\t\tpanic(fmt.Errorf(\"(expected) %v != %v (actual)\", expected, actual))\n\t}\n}\n\nfunc (t *TestSuite) AssertNotEqual(expected, actual interface{}) {\n\tif Equal(expected, actual) {\n\t\tpanic(fmt.Errorf(\"(expected) %v == %v (actual)\", expected, actual))\n\t}\n}\n\nfunc (t *TestSuite) Assert(exp bool) {\n\tt.Assertf(exp, \"Assertion failed\")\n}\n\nfunc (t *TestSuite) Assertf(exp bool, formatStr string, args ...interface{}) {\n\tif !exp {\n\t\tpanic(fmt.Errorf(formatStr, args...))\n\t}\n}\n\n\/\/ Assert that the response contains the given string.\nfunc (t *TestSuite) AssertContains(s string) {\n\tif !bytes.Contains(t.ResponseBody, []byte(s)) {\n\t\tpanic(fmt.Errorf(\"Assertion failed. Expected response to contain %s\", s))\n\t}\n}\n\n\/\/ Assert that the response does not contain the given string.\nfunc (t *TestSuite) AssertNotContains(s string) {\n\tif bytes.Contains(t.ResponseBody, []byte(s)) {\n\t\tpanic(fmt.Errorf(\"Assertion failed. Expected response not to contain %s\", s))\n\t}\n}\n\n\/\/ Assert that the response matches the given regular expression.\nfunc (t *TestSuite) AssertContainsRegex(regex string) {\n\tr := regexp.MustCompile(regex)\n\n\tif !r.Match(t.ResponseBody) {\n\t\tpanic(fmt.Errorf(\"Assertion failed. Expected response to match regexp %s\", regex))\n\t}\n}\n\nfunc createFormFile(writer *multipart.Writer, fieldname, filename string) {\n\t\/\/ Try to open the file.\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer file.Close()\n\n\t\/\/ Create a new form-data header with the provided field name and file name.\n\t\/\/ Determine Content-Type of the file by its extension.\n\th := textproto.MIMEHeader{}\n\th.Set(\"Content-Disposition\", fmt.Sprintf(\n\t\t`form-data; name=\"%s\"; filename=\"%s\"`,\n\t\tescapeQuotes(fieldname),\n\t\tescapeQuotes(filepath.Base(filename)),\n\t))\n\th.Set(\"Content-Type\", \"application\/octet-stream\")\n\tif ct := mime.TypeByExtension(filepath.Ext(filename)); ct != \"\" {\n\t\th.Set(\"Content-Type\", ct)\n\t}\n\tpart, err := writer.CreatePart(h)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ Copy the content of the file we have opened not reading the whole\n\t\/\/ file into memory.\n\t_, err = io.Copy(part, file)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nvar quoteEscaper = strings.NewReplacer(\"\\\\\", \"\\\\\\\\\", `\"`, \"\\\\\\\"\")\n\n\/\/ This function was borrowed from mime\/multipart package.\nfunc escapeQuotes(s string) string {\n\treturn quoteEscaper.Replace(s)\n}\n<commit_msg>Add testing support for HTTP PUT & PATCH<commit_after>package revel\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"mime\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"net\/http\/cookiejar\"\n\t\"net\/textproto\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"code.google.com\/p\/go.net\/websocket\"\n)\n\ntype TestSuite struct {\n\tClient *http.Client\n\tResponse *http.Response\n\tResponseBody []byte\n\tSession Session\n}\n\ntype TestRequest struct {\n\t*http.Request\n\ttestSuite *TestSuite\n}\n\nvar TestSuites []interface{} \/\/ Array of structs that embed TestSuite\n\n\/\/ NewTestSuite returns an initialized TestSuite ready for use. It is invoked\n\/\/ by the test harness to initialize the embedded field in application tests.\nfunc NewTestSuite() TestSuite {\n\tjar, _ := cookiejar.New(nil)\n\treturn TestSuite{\n\t\tClient: &http.Client{Jar: jar},\n\t\tSession: make(Session),\n\t}\n}\n\n\/\/ Return the address and port of the server, e.g. \"127.0.0.1:8557\"\nfunc (t *TestSuite) Host() string {\n\tif Server.Addr[0] == ':' {\n\t\treturn \"127.0.0.1\" + Server.Addr\n\t}\n\treturn Server.Addr\n}\n\n\/\/ Return the base http\/https URL of the server, e.g. \"http:\/\/127.0.0.1:8557\".\n\/\/ The scheme is set to https if http.ssl is set to true in the configuration file.\nfunc (t *TestSuite) BaseUrl() string {\n\tif HttpSsl {\n\t\treturn \"https:\/\/\" + t.Host()\n\t} else {\n\t\treturn \"http:\/\/\" + t.Host()\n\t}\n}\n\n\/\/ Return the base websocket URL of the server, e.g. \"ws:\/\/127.0.0.1:8557\"\nfunc (t *TestSuite) WebSocketUrl() string {\n\treturn \"ws:\/\/\" + t.Host()\n}\n\n\/\/ Issue a GET request to the given path and store the result in Response and\n\/\/ ResponseBody.\nfunc (t *TestSuite) Get(path string) {\n\tt.GetCustom(t.BaseUrl() + path).Send()\n}\n\n\/\/ Return a GET request to the given uri in a form of its wrapper.\nfunc (t *TestSuite) GetCustom(uri string) *TestRequest {\n\treq, err := http.NewRequest(\"GET\", uri, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &TestRequest{\n\t\tRequest: req,\n\t\ttestSuite: t,\n\t}\n}\n\n\/\/ Issue a DELETE request to the given path and store the result in Response and\n\/\/ ResponseBody.\nfunc (t *TestSuite) Delete(path string) {\n\tt.DeleteCustom(t.BaseUrl() + path).Send()\n}\n\n\/\/ Return a DELETE request to the given uri in a form of its wrapper.\nfunc (t *TestSuite) DeleteCustom(uri string) *TestRequest {\n\treq, err := http.NewRequest(\"DELETE\", uri, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &TestRequest{\n\t\tRequest: req,\n\t\ttestSuite: t,\n\t}\n}\n\n\/\/ Issue a PUT request to the given path, sending the given Content-Type and\n\/\/ data, and store the result in Response and ResponseBody. \"data\" may be nil.\nfunc (t *TestSuite) Put(path string, contentType string, reader io.Reader) {\n\tt.PutCustom(t.BaseUrl()+path, contentType, reader).Send()\n}\n\n\/\/ Return a PUT request to the given uri with specified Content-Type and data\n\/\/ in a form of wrapper. \"data\" may be nil.\nfunc (t *TestSuite) PutCustom(uri string, contentType string, reader io.Reader) *TestRequest {\n\treq, err := http.NewRequest(\"PUT\", uri, reader)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treq.Header.Set(\"Content-Type\", contentType)\n\treturn &TestRequest{\n\t\tRequest: req,\n\t\ttestSuite: t,\n\t}\n}\n\n\/\/ Issue a PATCH request to the given path, sending the given Content-Type and\n\/\/ data, and store the result in Response and ResponseBody. \"data\" may be nil.\nfunc (t *TestSuite) Patch(path string, contentType string, reader io.Reader) {\n\tt.PatchCustom(t.BaseUrl()+path, contentType, reader).Send()\n}\n\n\/\/ Return a PATCH request to the given uri with specified Content-Type and data\n\/\/ in a form of wrapper. \"data\" may be nil.\nfunc (t *TestSuite) PatchCustom(uri string, contentType string, reader io.Reader) *TestRequest {\n\treq, err := http.NewRequest(\"PATCH\", uri, reader)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treq.Header.Set(\"Content-Type\", contentType)\n\treturn &TestRequest{\n\t\tRequest: req,\n\t\ttestSuite: t,\n\t}\n}\n\n\/\/ Issue a POST request to the given path, sending the given Content-Type and\n\/\/ data, and store the result in Response and ResponseBody. \"data\" may be nil.\nfunc (t *TestSuite) Post(path string, contentType string, reader io.Reader) {\n\tt.PostCustom(t.BaseUrl()+path, contentType, reader).Send()\n}\n\n\/\/ Return a POST request to the given uri with specified Content-Type and data\n\/\/ in a form of wrapper. \"data\" may be nil.\nfunc (t *TestSuite) PostCustom(uri string, contentType string, reader io.Reader) *TestRequest {\n\treq, err := http.NewRequest(\"POST\", uri, reader)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treq.Header.Set(\"Content-Type\", contentType)\n\treturn &TestRequest{\n\t\tRequest: req,\n\t\ttestSuite: t,\n\t}\n}\n\n\/\/ Issue a POST request to the given path as a form post of the given key and\n\/\/ values, and store the result in Response and ResponseBody.\nfunc (t *TestSuite) PostForm(path string, data url.Values) {\n\tt.PostFormCustom(t.BaseUrl()+path, data).Send()\n}\n\n\/\/ Return a POST request to the given uri as a form post of the given key and values.\n\/\/ The request is in a form of TestRequest wrapper.\nfunc (t *TestSuite) PostFormCustom(uri string, data url.Values) *TestRequest {\n\treturn t.PostCustom(uri, \"application\/x-www-form-urlencoded\", strings.NewReader(data.Encode()))\n}\n\n\/\/ Issue a multipart request to the given path sending given params and files,\n\/\/ and store the result in Response and ResponseBody.\nfunc (t *TestSuite) PostFile(path string, params url.Values, filePaths url.Values) {\n\tt.PostFileCustom(t.BaseUrl()+path, params, filePaths).Send()\n}\n\n\/\/ Return a multipart request to the given uri in a form of its wrapper\n\/\/ with the given params and files.\nfunc (t *TestSuite) PostFileCustom(uri string, params url.Values, filePaths url.Values) *TestRequest {\n\tbody := &bytes.Buffer{}\n\twriter := multipart.NewWriter(body)\n\n\tfor key, values := range filePaths {\n\t\tfor _, value := range values {\n\t\t\tcreateFormFile(writer, key, value)\n\t\t}\n\t}\n\n\tfor key, values := range params {\n\t\tfor _, value := range values {\n\t\t\terr := writer.WriteField(key, value)\n\t\t\tt.AssertEqual(nil, err)\n\t\t}\n\t}\n\terr := writer.Close()\n\tt.AssertEqual(nil, err)\n\n\treturn t.PostCustom(uri, writer.FormDataContentType(), body)\n}\n\n\/\/ Issue any request and read the response. If successful, the caller may\n\/\/ examine the Response and ResponseBody properties. Session data will be\n\/\/ added to the request cookies for you.\nfunc (r *TestRequest) Send() {\n\tr.AddCookie(r.testSuite.Session.cookie())\n\tr.MakeRequest()\n}\n\n\/\/ Issue any request and read the response. If successful, the caller may\n\/\/ examine the Response and ResponseBody properties. You will need to\n\/\/ manage session \/ cookie data manually\nfunc (r *TestRequest) MakeRequest() {\n\tvar err error\n\tif r.testSuite.Response, err = r.testSuite.Client.Do(r.Request); err != nil {\n\t\tpanic(err)\n\t}\n\tif r.testSuite.ResponseBody, err = ioutil.ReadAll(r.testSuite.Response.Body); err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ Look for a session cookie in the response and parse it.\n\tsessionCookieName := r.testSuite.Session.cookie().Name\n\tfor _, cookie := range r.testSuite.Client.Jar.Cookies(r.Request.URL) {\n\t\tif cookie.Name == sessionCookieName {\n\t\t\tr.testSuite.Session = getSessionFromCookie(cookie)\n\t\t\tbreak\n\t\t}\n\t}\n}\n\n\/\/ Create a websocket connection to the given path and return the connection\nfunc (t *TestSuite) WebSocket(path string) *websocket.Conn {\n\torigin := t.BaseUrl() + \"\/\"\n\turl := t.WebSocketUrl() + path\n\tws, err := websocket.Dial(url, \"\", origin)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ws\n}\n\nfunc (t *TestSuite) AssertOk() {\n\tt.AssertStatus(http.StatusOK)\n}\n\nfunc (t *TestSuite) AssertNotFound() {\n\tt.AssertStatus(http.StatusNotFound)\n}\n\nfunc (t *TestSuite) AssertStatus(status int) {\n\tif t.Response.StatusCode != status {\n\t\tpanic(fmt.Errorf(\"Status: (expected) %d != %d (actual)\", status, t.Response.StatusCode))\n\t}\n}\n\nfunc (t *TestSuite) AssertContentType(contentType string) {\n\tt.AssertHeader(\"Content-Type\", contentType)\n}\n\nfunc (t *TestSuite) AssertHeader(name, value string) {\n\tactual := t.Response.Header.Get(name)\n\tif actual != value {\n\t\tpanic(fmt.Errorf(\"Header %s: (expected) %s != %s (actual)\", name, value, actual))\n\t}\n}\n\nfunc (t *TestSuite) AssertEqual(expected, actual interface{}) {\n\tif !Equal(expected, actual) {\n\t\tpanic(fmt.Errorf(\"(expected) %v != %v (actual)\", expected, actual))\n\t}\n}\n\nfunc (t *TestSuite) AssertNotEqual(expected, actual interface{}) {\n\tif Equal(expected, actual) {\n\t\tpanic(fmt.Errorf(\"(expected) %v == %v (actual)\", expected, actual))\n\t}\n}\n\nfunc (t *TestSuite) Assert(exp bool) {\n\tt.Assertf(exp, \"Assertion failed\")\n}\n\nfunc (t *TestSuite) Assertf(exp bool, formatStr string, args ...interface{}) {\n\tif !exp {\n\t\tpanic(fmt.Errorf(formatStr, args...))\n\t}\n}\n\n\/\/ Assert that the response contains the given string.\nfunc (t *TestSuite) AssertContains(s string) {\n\tif !bytes.Contains(t.ResponseBody, []byte(s)) {\n\t\tpanic(fmt.Errorf(\"Assertion failed. Expected response to contain %s\", s))\n\t}\n}\n\n\/\/ Assert that the response does not contain the given string.\nfunc (t *TestSuite) AssertNotContains(s string) {\n\tif bytes.Contains(t.ResponseBody, []byte(s)) {\n\t\tpanic(fmt.Errorf(\"Assertion failed. Expected response not to contain %s\", s))\n\t}\n}\n\n\/\/ Assert that the response matches the given regular expression.\nfunc (t *TestSuite) AssertContainsRegex(regex string) {\n\tr := regexp.MustCompile(regex)\n\n\tif !r.Match(t.ResponseBody) {\n\t\tpanic(fmt.Errorf(\"Assertion failed. Expected response to match regexp %s\", regex))\n\t}\n}\n\nfunc createFormFile(writer *multipart.Writer, fieldname, filename string) {\n\t\/\/ Try to open the file.\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer file.Close()\n\n\t\/\/ Create a new form-data header with the provided field name and file name.\n\t\/\/ Determine Content-Type of the file by its extension.\n\th := textproto.MIMEHeader{}\n\th.Set(\"Content-Disposition\", fmt.Sprintf(\n\t\t`form-data; name=\"%s\"; filename=\"%s\"`,\n\t\tescapeQuotes(fieldname),\n\t\tescapeQuotes(filepath.Base(filename)),\n\t))\n\th.Set(\"Content-Type\", \"application\/octet-stream\")\n\tif ct := mime.TypeByExtension(filepath.Ext(filename)); ct != \"\" {\n\t\th.Set(\"Content-Type\", ct)\n\t}\n\tpart, err := writer.CreatePart(h)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ Copy the content of the file we have opened not reading the whole\n\t\/\/ file into memory.\n\t_, err = io.Copy(part, file)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nvar quoteEscaper = strings.NewReplacer(\"\\\\\", \"\\\\\\\\\", `\"`, \"\\\\\\\"\")\n\n\/\/ This function was borrowed from mime\/multipart package.\nfunc escapeQuotes(s string) string {\n\treturn quoteEscaper.Replace(s)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ go:generate go-bindata -o bindata.go template mapconfig\npackage main\n\nimport (\n\t\"bytes\"\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"go\/format\"\n\t\"log\"\n\t\"sort\"\n\t\"text\/template\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/achiku\/varfmt\"\n\t\"github.com\/pkg\/errors\"\n)\n\nconst pgLoadColumnDef = `\nSELECT\n a.attnum AS field_ordinal,\n a.attname AS column_name,\n format_type(a.atttypid, a.atttypmod) AS data_type,\n a.attnotnull AS not_null,\n COALESCE(pg_get_expr(ad.adbin, ad.adrelid), '') AS default_value,\n COALESCE(ct.contype = 'p', false) AS is_primary_key,\n CASE WHEN a.atttypid = ANY ('{int,int8,int2}'::regtype[])\n AND EXISTS (\n SELECT 1 FROM pg_attrdef ad\n WHERE ad.adrelid = a.attrelid\n AND ad.adnum = a.attnum\n AND ad.adsrc = 'nextval('''\n || (pg_get_serial_sequence (a.attrelid::regclass::text\n , a.attname))::regclass\n || '''::regclass)'\n )\n THEN CASE a.atttypid\n WHEN 'int'::regtype THEN 'serial'\n WHEN 'int8'::regtype THEN 'bigserial'\n WHEN 'int2'::regtype THEN 'smallserial'\n END\n ELSE format_type(a.atttypid, a.atttypmod)\n END AS data_type\nFROM pg_attribute a\nJOIN ONLY pg_class c ON c.oid = a.attrelid\nJOIN ONLY pg_namespace n ON n.oid = c.relnamespace\nLEFT JOIN pg_constraint ct ON ct.conrelid = c.oid\nAND a.attnum = ANY(ct.conkey) AND ct.contype IN ('p', 'u')\nLEFT JOIN pg_attrdef ad ON ad.adrelid = c.oid AND ad.adnum = a.attnum\nWHERE a.attisdropped = false\nAND n.nspname = $1\nAND c.relname = $2\nAND a.attnum > 0\nORDER BY a.attnum\n`\n\nconst pgLoadTableDef = `\nSELECT\nc.relkind AS type,\nc.relname AS table_name\nFROM pg_class c\nJOIN ONLY pg_namespace n ON n.oid = c.relnamespace\nWHERE n.nspname = $1\nAND c.relkind = 'r'\n`\n\n\/\/ TypeMap go\/db type map struct\ntype TypeMap struct {\n\tDBTypes []string `toml:\"db_types\"`\n\tNotNullGoType string `toml:\"notnull_go_type\"`\n\tNullableGoType string `toml:\"nullable_go_type\"`\n}\n\n\/\/ AutoKeyMap auto generating key config\ntype AutoKeyMap struct {\n\tTypes []string `toml:\"db_types\"`\n}\n\n\/\/ PgTypeMapConfig go\/db type map struct toml config\ntype PgTypeMapConfig map[string]TypeMap\n\n\/\/ PgTable postgres table\ntype PgTable struct {\n\tSchema string\n\tName string\n\tDataType string\n\tAutoGenPk bool\n\tPrimaryKeys []*PgColumn\n\tColumns []*PgColumn\n}\n\nvar autoGenKeyCfg = &AutoKeyMap{\n\tTypes: []string{\"serial\", \"bigserial\", \"UUID\"},\n}\n\nfunc (t *PgTable) setPrimaryKeyInfo(cfg *AutoKeyMap) {\n\tt.AutoGenPk = false\n\tfor _, c := range t.Columns {\n\t\tif c.IsPrimaryKey {\n\t\t\tt.PrimaryKeys = append(t.PrimaryKeys, c)\n\t\t\tfor _, typ := range cfg.Types {\n\t\t\t\tif c.DDLType == typ {\n\t\t\t\t\tt.AutoGenPk = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ PgColumn postgres columns\ntype PgColumn struct {\n\tFieldOrdinal int\n\tName string\n\tDataType string\n\tDDLType string\n\tNotNull bool\n\tDefaultValue sql.NullString\n\tIsPrimaryKey bool\n}\n\n\/\/ Struct go struct\ntype Struct struct {\n\tName string\n\tTable *PgTable\n\tComment string\n\tFields []*StructField\n}\n\n\/\/ StructTmpl go struct passed to template\ntype StructTmpl struct {\n\tStruct *Struct\n}\n\n\/\/ StructField go struct field\ntype StructField struct {\n\tName string\n\tType string\n\tTag string\n\tColumn *PgColumn\n}\n\n\/\/ PgLoadTypeMapFromFile load type map from toml file\nfunc PgLoadTypeMapFromFile(filePath string) (*PgTypeMapConfig, error) {\n\tvar conf PgTypeMapConfig\n\tif _, err := toml.DecodeFile(filePath, &conf); err != nil {\n\t\treturn nil, errors.Wrap(err, \"faild to parse config file\")\n\t}\n\treturn &conf, nil\n}\n\n\/\/ PgLoadColumnDef load Postgres column definition\nfunc PgLoadColumnDef(db Queryer, schema string, table string) ([]*PgColumn, error) {\n\tcolDefs, err := db.Query(pgLoadColumnDef, schema, table)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to load table def\")\n\t}\n\n\tcols := []*PgColumn{}\n\tfor colDefs.Next() {\n\t\tc := &PgColumn{}\n\t\terr := colDefs.Scan(\n\t\t\t&c.FieldOrdinal,\n\t\t\t&c.Name,\n\t\t\t&c.DataType,\n\t\t\t&c.NotNull,\n\t\t\t&c.DefaultValue,\n\t\t\t&c.IsPrimaryKey,\n\t\t\t&c.DDLType,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to scan\")\n\t\t}\n\t\tcols = append(cols, c)\n\t}\n\treturn cols, nil\n}\n\n\/\/ PgLoadTableDef load Postgres table definition\nfunc PgLoadTableDef(db Queryer, schema string) ([]*PgTable, error) {\n\ttbDefs, err := db.Query(pgLoadTableDef, schema)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to load table def\")\n\t}\n\ttbs := []*PgTable{}\n\tfor tbDefs.Next() {\n\t\tt := &PgTable{Schema: schema}\n\t\terr := tbDefs.Scan(\n\t\t\t&t.DataType,\n\t\t\t&t.Name,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to scan\")\n\t\t}\n\t\tcols, err := PgLoadColumnDef(db, schema, t.Name)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"failed to get columns of %s\", t.Name))\n\t\t}\n\t\tt.Columns = cols\n\t\ttbs = append(tbs, t)\n\t}\n\treturn tbs, nil\n}\n\nfunc contains(v string, l []string) bool {\n\tsort.Strings(l)\n\ti := sort.SearchStrings(l, v)\n\tif i < len(l) && l[i] == v {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ PgConvertType converts type\nfunc PgConvertType(col *PgColumn, typeCfg *PgTypeMapConfig) string {\n\tcfg := map[string]TypeMap(*typeCfg)\n\ttyp := cfg[\"default\"].NotNullGoType\n\tfor _, v := range cfg {\n\t\tif contains(col.DataType, v.DBTypes) {\n\t\t\tif col.NotNull {\n\t\t\t\treturn v.NotNullGoType\n\t\t\t}\n\t\t\treturn v.NullableGoType\n\t\t}\n\t}\n\treturn typ\n}\n\n\/\/ PgColToField converts pg column to go struct field\nfunc PgColToField(col *PgColumn, typeCfg *PgTypeMapConfig) (*StructField, error) {\n\tstfType := PgConvertType(col, typeCfg)\n\tstf := &StructField{\n\t\tName: varfmt.PublicVarName(col.Name),\n\t\tType: stfType,\n\t\tColumn: col,\n\t}\n\treturn stf, nil\n}\n\n\/\/ PgTableToStruct converts table def to go struct\nfunc PgTableToStruct(t *PgTable, typeCfg *PgTypeMapConfig, keyConfig *AutoKeyMap) (*Struct, error) {\n\tt.setPrimaryKeyInfo(keyConfig)\n\ts := &Struct{\n\t\tName: varfmt.PublicVarName(t.Name),\n\t\tTable: t,\n\t}\n\tvar fs []*StructField\n\tfor _, c := range t.Columns {\n\t\tf, err := PgColToField(c, typeCfg)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"faield to convert col to field\")\n\t\t}\n\t\tfs = append(fs, f)\n\t}\n\ts.Fields = fs\n\treturn s, nil\n}\n\n\/\/ PgExecuteStructTmpl execute struct template with *Struct\nfunc PgExecuteStructTmpl(st *StructTmpl, path string) ([]byte, error) {\n\tvar src []byte\n\td, err := Asset(path)\n\tif err != nil {\n\t\treturn src, errors.Wrap(err, \"failed to load asset\")\n\t}\n\ttpl, err := template.New(\"struct\").Funcs(tmplFuncMap).Parse(string(d))\n\tif err != nil {\n\t\treturn src, errors.Wrap(err, \"failed to parse template\")\n\t}\n\tbuf := new(bytes.Buffer)\n\tif err := tpl.Execute(buf, st); err != nil {\n\t\treturn src, errors.Wrap(err, fmt.Sprintf(\"failed to execute template:\\n%s\", src))\n\t}\n\tsrc, err = format.Source(buf.Bytes())\n\tif err != nil {\n\t\tlog.Printf(\"%s\", buf)\n\t\treturn src, errors.Wrap(err, fmt.Sprintf(\"failed to format code:\\n%s\", src))\n\t}\n\treturn src, nil\n}\n\n\/\/ PgCreateStruct creates struct from given schema\nfunc PgCreateStruct(db Queryer, schema, typeMapPath, pkgName string) ([]byte, error) {\n\tvar src []byte\n\tpkgDef := []byte(fmt.Sprintf(\"package %s\\n\\n\", pkgName))\n\tsrc = append(src, pkgDef...)\n\n\ttbls, err := PgLoadTableDef(db, schema)\n\tif err != nil {\n\t\treturn src, errors.Wrap(err, \"faield to load table definitions\")\n\t}\n\tcfg := &PgTypeMapConfig{}\n\tif typeMapPath == \"\" {\n\t\tif _, err := toml.Decode(typeMap, cfg); err != nil {\n\t\t\treturn src, errors.Wrap(err, \"faield to read type map\")\n\t\t}\n\t} else {\n\t\tif _, err := toml.DecodeFile(typeMapPath, cfg); err != nil {\n\t\t\treturn src, errors.Wrap(err, fmt.Sprintf(\"failed to decode type map file %s\", typeMapPath))\n\t\t}\n\t}\n\tfor _, tbl := range tbls {\n\t\tst, err := PgTableToStruct(tbl, cfg, autoGenKeyCfg)\n\t\tif err != nil {\n\t\t\treturn src, errors.Wrap(err, \"faield to convert table definition to struct\")\n\t\t}\n\t\ts, err := PgExecuteStructTmpl(&StructTmpl{Struct: st}, \"template\/struct.tmpl\")\n\t\tif err != nil {\n\t\t\treturn src, errors.Wrap(err, \"faield to execute template\")\n\t\t}\n\t\tm, err := PgExecuteStructTmpl(&StructTmpl{Struct: st}, \"template\/method.tmpl\")\n\t\tif err != nil {\n\t\t\treturn src, errors.Wrap(err, \"faield to execute template\")\n\t\t}\n\t\tsrc = append(src, s...)\n\t\tsrc = append(src, m...)\n\t}\n\treturn src, nil\n}\n<commit_msg>Order by table name<commit_after>\/\/ go:generate go-bindata -o bindata.go template mapconfig\npackage main\n\nimport (\n\t\"bytes\"\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"go\/format\"\n\t\"log\"\n\t\"sort\"\n\t\"text\/template\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/achiku\/varfmt\"\n\t\"github.com\/pkg\/errors\"\n)\n\nconst pgLoadColumnDef = `\nSELECT\n a.attnum AS field_ordinal,\n a.attname AS column_name,\n format_type(a.atttypid, a.atttypmod) AS data_type,\n a.attnotnull AS not_null,\n COALESCE(pg_get_expr(ad.adbin, ad.adrelid), '') AS default_value,\n COALESCE(ct.contype = 'p', false) AS is_primary_key,\n CASE WHEN a.atttypid = ANY ('{int,int8,int2}'::regtype[])\n AND EXISTS (\n SELECT 1 FROM pg_attrdef ad\n WHERE ad.adrelid = a.attrelid\n AND ad.adnum = a.attnum\n AND ad.adsrc = 'nextval('''\n || (pg_get_serial_sequence (a.attrelid::regclass::text\n , a.attname))::regclass\n || '''::regclass)'\n )\n THEN CASE a.atttypid\n WHEN 'int'::regtype THEN 'serial'\n WHEN 'int8'::regtype THEN 'bigserial'\n WHEN 'int2'::regtype THEN 'smallserial'\n END\n ELSE format_type(a.atttypid, a.atttypmod)\n END AS data_type\nFROM pg_attribute a\nJOIN ONLY pg_class c ON c.oid = a.attrelid\nJOIN ONLY pg_namespace n ON n.oid = c.relnamespace\nLEFT JOIN pg_constraint ct ON ct.conrelid = c.oid\nAND a.attnum = ANY(ct.conkey) AND ct.contype IN ('p', 'u')\nLEFT JOIN pg_attrdef ad ON ad.adrelid = c.oid AND ad.adnum = a.attnum\nWHERE a.attisdropped = false\nAND n.nspname = $1\nAND c.relname = $2\nAND a.attnum > 0\nORDER BY a.attnum\n`\n\nconst pgLoadTableDef = `\nSELECT\nc.relkind AS type,\nc.relname AS table_name\nFROM pg_class c\nJOIN ONLY pg_namespace n ON n.oid = c.relnamespace\nWHERE n.nspname = $1\nAND c.relkind = 'r'\nORDER BY c.relname\n`\n\n\/\/ TypeMap go\/db type map struct\ntype TypeMap struct {\n\tDBTypes []string `toml:\"db_types\"`\n\tNotNullGoType string `toml:\"notnull_go_type\"`\n\tNullableGoType string `toml:\"nullable_go_type\"`\n}\n\n\/\/ AutoKeyMap auto generating key config\ntype AutoKeyMap struct {\n\tTypes []string `toml:\"db_types\"`\n}\n\n\/\/ PgTypeMapConfig go\/db type map struct toml config\ntype PgTypeMapConfig map[string]TypeMap\n\n\/\/ PgTable postgres table\ntype PgTable struct {\n\tSchema string\n\tName string\n\tDataType string\n\tAutoGenPk bool\n\tPrimaryKeys []*PgColumn\n\tColumns []*PgColumn\n}\n\nvar autoGenKeyCfg = &AutoKeyMap{\n\tTypes: []string{\"serial\", \"bigserial\", \"UUID\"},\n}\n\nfunc (t *PgTable) setPrimaryKeyInfo(cfg *AutoKeyMap) {\n\tt.AutoGenPk = false\n\tfor _, c := range t.Columns {\n\t\tif c.IsPrimaryKey {\n\t\t\tt.PrimaryKeys = append(t.PrimaryKeys, c)\n\t\t\tfor _, typ := range cfg.Types {\n\t\t\t\tif c.DDLType == typ {\n\t\t\t\t\tt.AutoGenPk = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ PgColumn postgres columns\ntype PgColumn struct {\n\tFieldOrdinal int\n\tName string\n\tDataType string\n\tDDLType string\n\tNotNull bool\n\tDefaultValue sql.NullString\n\tIsPrimaryKey bool\n}\n\n\/\/ Struct go struct\ntype Struct struct {\n\tName string\n\tTable *PgTable\n\tComment string\n\tFields []*StructField\n}\n\n\/\/ StructTmpl go struct passed to template\ntype StructTmpl struct {\n\tStruct *Struct\n}\n\n\/\/ StructField go struct field\ntype StructField struct {\n\tName string\n\tType string\n\tTag string\n\tColumn *PgColumn\n}\n\n\/\/ PgLoadTypeMapFromFile load type map from toml file\nfunc PgLoadTypeMapFromFile(filePath string) (*PgTypeMapConfig, error) {\n\tvar conf PgTypeMapConfig\n\tif _, err := toml.DecodeFile(filePath, &conf); err != nil {\n\t\treturn nil, errors.Wrap(err, \"faild to parse config file\")\n\t}\n\treturn &conf, nil\n}\n\n\/\/ PgLoadColumnDef load Postgres column definition\nfunc PgLoadColumnDef(db Queryer, schema string, table string) ([]*PgColumn, error) {\n\tcolDefs, err := db.Query(pgLoadColumnDef, schema, table)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to load table def\")\n\t}\n\n\tcols := []*PgColumn{}\n\tfor colDefs.Next() {\n\t\tc := &PgColumn{}\n\t\terr := colDefs.Scan(\n\t\t\t&c.FieldOrdinal,\n\t\t\t&c.Name,\n\t\t\t&c.DataType,\n\t\t\t&c.NotNull,\n\t\t\t&c.DefaultValue,\n\t\t\t&c.IsPrimaryKey,\n\t\t\t&c.DDLType,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to scan\")\n\t\t}\n\t\tcols = append(cols, c)\n\t}\n\treturn cols, nil\n}\n\n\/\/ PgLoadTableDef load Postgres table definition\nfunc PgLoadTableDef(db Queryer, schema string) ([]*PgTable, error) {\n\ttbDefs, err := db.Query(pgLoadTableDef, schema)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to load table def\")\n\t}\n\ttbs := []*PgTable{}\n\tfor tbDefs.Next() {\n\t\tt := &PgTable{Schema: schema}\n\t\terr := tbDefs.Scan(\n\t\t\t&t.DataType,\n\t\t\t&t.Name,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to scan\")\n\t\t}\n\t\tcols, err := PgLoadColumnDef(db, schema, t.Name)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"failed to get columns of %s\", t.Name))\n\t\t}\n\t\tt.Columns = cols\n\t\ttbs = append(tbs, t)\n\t}\n\treturn tbs, nil\n}\n\nfunc contains(v string, l []string) bool {\n\tsort.Strings(l)\n\ti := sort.SearchStrings(l, v)\n\tif i < len(l) && l[i] == v {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ PgConvertType converts type\nfunc PgConvertType(col *PgColumn, typeCfg *PgTypeMapConfig) string {\n\tcfg := map[string]TypeMap(*typeCfg)\n\ttyp := cfg[\"default\"].NotNullGoType\n\tfor _, v := range cfg {\n\t\tif contains(col.DataType, v.DBTypes) {\n\t\t\tif col.NotNull {\n\t\t\t\treturn v.NotNullGoType\n\t\t\t}\n\t\t\treturn v.NullableGoType\n\t\t}\n\t}\n\treturn typ\n}\n\n\/\/ PgColToField converts pg column to go struct field\nfunc PgColToField(col *PgColumn, typeCfg *PgTypeMapConfig) (*StructField, error) {\n\tstfType := PgConvertType(col, typeCfg)\n\tstf := &StructField{\n\t\tName: varfmt.PublicVarName(col.Name),\n\t\tType: stfType,\n\t\tColumn: col,\n\t}\n\treturn stf, nil\n}\n\n\/\/ PgTableToStruct converts table def to go struct\nfunc PgTableToStruct(t *PgTable, typeCfg *PgTypeMapConfig, keyConfig *AutoKeyMap) (*Struct, error) {\n\tt.setPrimaryKeyInfo(keyConfig)\n\ts := &Struct{\n\t\tName: varfmt.PublicVarName(t.Name),\n\t\tTable: t,\n\t}\n\tvar fs []*StructField\n\tfor _, c := range t.Columns {\n\t\tf, err := PgColToField(c, typeCfg)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"faield to convert col to field\")\n\t\t}\n\t\tfs = append(fs, f)\n\t}\n\ts.Fields = fs\n\treturn s, nil\n}\n\n\/\/ PgExecuteStructTmpl execute struct template with *Struct\nfunc PgExecuteStructTmpl(st *StructTmpl, path string) ([]byte, error) {\n\tvar src []byte\n\td, err := Asset(path)\n\tif err != nil {\n\t\treturn src, errors.Wrap(err, \"failed to load asset\")\n\t}\n\ttpl, err := template.New(\"struct\").Funcs(tmplFuncMap).Parse(string(d))\n\tif err != nil {\n\t\treturn src, errors.Wrap(err, \"failed to parse template\")\n\t}\n\tbuf := new(bytes.Buffer)\n\tif err := tpl.Execute(buf, st); err != nil {\n\t\treturn src, errors.Wrap(err, fmt.Sprintf(\"failed to execute template:\\n%s\", src))\n\t}\n\tsrc, err = format.Source(buf.Bytes())\n\tif err != nil {\n\t\tlog.Printf(\"%s\", buf)\n\t\treturn src, errors.Wrap(err, fmt.Sprintf(\"failed to format code:\\n%s\", src))\n\t}\n\treturn src, nil\n}\n\n\/\/ PgCreateStruct creates struct from given schema\nfunc PgCreateStruct(db Queryer, schema, typeMapPath, pkgName string) ([]byte, error) {\n\tvar src []byte\n\tpkgDef := []byte(fmt.Sprintf(\"package %s\\n\\n\", pkgName))\n\tsrc = append(src, pkgDef...)\n\n\ttbls, err := PgLoadTableDef(db, schema)\n\tif err != nil {\n\t\treturn src, errors.Wrap(err, \"faield to load table definitions\")\n\t}\n\tcfg := &PgTypeMapConfig{}\n\tif typeMapPath == \"\" {\n\t\tif _, err := toml.Decode(typeMap, cfg); err != nil {\n\t\t\treturn src, errors.Wrap(err, \"faield to read type map\")\n\t\t}\n\t} else {\n\t\tif _, err := toml.DecodeFile(typeMapPath, cfg); err != nil {\n\t\t\treturn src, errors.Wrap(err, fmt.Sprintf(\"failed to decode type map file %s\", typeMapPath))\n\t\t}\n\t}\n\tfor _, tbl := range tbls {\n\t\tst, err := PgTableToStruct(tbl, cfg, autoGenKeyCfg)\n\t\tif err != nil {\n\t\t\treturn src, errors.Wrap(err, \"faield to convert table definition to struct\")\n\t\t}\n\t\ts, err := PgExecuteStructTmpl(&StructTmpl{Struct: st}, \"template\/struct.tmpl\")\n\t\tif err != nil {\n\t\t\treturn src, errors.Wrap(err, \"faield to execute template\")\n\t\t}\n\t\tm, err := PgExecuteStructTmpl(&StructTmpl{Struct: st}, \"template\/method.tmpl\")\n\t\tif err != nil {\n\t\t\treturn src, errors.Wrap(err, \"faield to execute template\")\n\t\t}\n\t\tsrc = append(src, s...)\n\t\tsrc = append(src, m...)\n\t}\n\treturn src, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package dht\n\nimport (\n\t\"crypto\"\n\t_ \"crypto\/sha1\"\n\t\"errors\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com\/anacrolix\/missinggo\"\n\t\"github.com\/anacrolix\/torrent\/iplist\"\n\t\"github.com\/anacrolix\/torrent\/metainfo\"\n\n\t\"github.com\/anacrolix\/dht\/krpc\"\n)\n\nfunc defaultQueryResendDelay() time.Duration {\n\treturn jitterDuration(5*time.Second, time.Second)\n}\n\n\/\/ Uniquely identifies a transaction to us.\ntype transactionKey struct {\n\tRemoteAddr string \/\/ host:port\n\tT string \/\/ The KRPC transaction ID.\n}\n\n\/\/ ServerConfig allows to set up a configuration of the `Server` instance\n\/\/ to be created with NewServer\ntype ServerConfig struct {\n\t\/\/ Set NodeId Manually. Caller must ensure that if NodeId does not conform\n\t\/\/ to DHT Security Extensions, that NoSecurity is also set.\n\tNodeId [20]byte\n\tConn net.PacketConn\n\t\/\/ Don't respond to queries from other nodes.\n\tPassive bool\n\tStartingNodes func() ([]Addr, error)\n\t\/\/ Disable the DHT security extension:\n\t\/\/ http:\/\/www.libtorrent.org\/dht_sec.html.\n\tNoSecurity bool\n\t\/\/ Initial IP blocklist to use. Applied before serving and bootstrapping\n\t\/\/ begins.\n\tIPBlocklist iplist.Ranger\n\t\/\/ Used to secure the server's ID. Defaults to the Conn's LocalAddr(). Set\n\t\/\/ to the IP that remote nodes will see, as that IP is what they'll use to\n\t\/\/ validate our ID.\n\tPublicIP net.IP\n\n\t\/\/ Hook received queries. Return false if you don't want to propagate to\n\t\/\/ the default handlers.\n\tOnQuery func(query *krpc.Msg, source net.Addr) (propagate bool)\n\t\/\/ Called when a peer successfully announces to us.\n\tOnAnnouncePeer func(infoHash metainfo.Hash, peer Peer)\n\t\/\/ How long to wait before resending queries that haven't received a\n\t\/\/ response. Defaults to a random value between 4.5 and 5.5s.\n\tQueryResendDelay func() time.Duration\n\t\/\/ TODO: Expose Peers, to return NodeInfo for received get_peers queries.\n}\n\n\/\/ ServerStats instance is returned by Server.Stats() and stores Server metrics\ntype ServerStats struct {\n\t\/\/ Count of nodes in the node table that responded to our last query or\n\t\/\/ haven't yet been queried.\n\tGoodNodes int\n\t\/\/ Count of nodes in the node table.\n\tNodes int\n\t\/\/ Transactions awaiting a response.\n\tOutstandingTransactions int\n\t\/\/ Individual announce_peer requests that got a success response.\n\tConfirmedAnnounces int\n\t\/\/ Nodes that have been blocked.\n\tBadNodes uint\n}\n\nfunc jitterDuration(average time.Duration, plusMinus time.Duration) time.Duration {\n\treturn average - plusMinus\/2 + time.Duration(rand.Int63n(int64(plusMinus)))\n}\n\ntype Peer = krpc.NodeAddr\n\nfunc GlobalBootstrapAddrs() (addrs []Addr, err error) {\n\tfor _, s := range []string{\n\t\t\"router.utorrent.com:6881\",\n\t\t\"router.bittorrent.com:6881\",\n\t\t\"dht.transmissionbt.com:6881\",\n\t\t\"dht.aelitis.com:6881\", \/\/ Vuze\n\t} {\n\t\thost, port, err := net.SplitHostPort(s)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\thostAddrs, err := net.LookupHost(host)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error looking up %q: %v\", s, err)\n\t\t\tcontinue\n\t\t}\n\t\tfor _, a := range hostAddrs {\n\t\t\tua, err := net.ResolveUDPAddr(\"udp\", net.JoinHostPort(a, port))\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error resolving %q: %v\", a, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\taddrs = append(addrs, NewAddr(ua))\n\t\t}\n\t}\n\tif len(addrs) == 0 {\n\t\terr = errors.New(\"nothing resolved\")\n\t}\n\treturn\n}\n\nfunc RandomNodeID() (id [20]byte) {\n\trand.Read(id[:])\n\treturn\n}\n\nfunc MakeDeterministicNodeID(public net.Addr) (id [20]byte) {\n\th := crypto.SHA1.New()\n\th.Write([]byte(public.String()))\n\th.Sum(id[:0:20])\n\tSecureNodeId(&id, missinggo.AddrIP(public))\n\treturn\n}\n<commit_msg>RandomNodeId wasn't cryptographically random<commit_after>package dht\n\nimport (\n\t\"crypto\"\n\tcrand \"crypto\/rand\"\n\t_ \"crypto\/sha1\"\n\t\"errors\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com\/anacrolix\/missinggo\"\n\t\"github.com\/anacrolix\/torrent\/iplist\"\n\t\"github.com\/anacrolix\/torrent\/metainfo\"\n\n\t\"github.com\/anacrolix\/dht\/krpc\"\n)\n\nfunc defaultQueryResendDelay() time.Duration {\n\treturn jitterDuration(5*time.Second, time.Second)\n}\n\n\/\/ Uniquely identifies a transaction to us.\ntype transactionKey struct {\n\tRemoteAddr string \/\/ host:port\n\tT string \/\/ The KRPC transaction ID.\n}\n\n\/\/ ServerConfig allows to set up a configuration of the `Server` instance\n\/\/ to be created with NewServer\ntype ServerConfig struct {\n\t\/\/ Set NodeId Manually. Caller must ensure that if NodeId does not conform\n\t\/\/ to DHT Security Extensions, that NoSecurity is also set.\n\tNodeId [20]byte\n\tConn net.PacketConn\n\t\/\/ Don't respond to queries from other nodes.\n\tPassive bool\n\tStartingNodes func() ([]Addr, error)\n\t\/\/ Disable the DHT security extension:\n\t\/\/ http:\/\/www.libtorrent.org\/dht_sec.html.\n\tNoSecurity bool\n\t\/\/ Initial IP blocklist to use. Applied before serving and bootstrapping\n\t\/\/ begins.\n\tIPBlocklist iplist.Ranger\n\t\/\/ Used to secure the server's ID. Defaults to the Conn's LocalAddr(). Set\n\t\/\/ to the IP that remote nodes will see, as that IP is what they'll use to\n\t\/\/ validate our ID.\n\tPublicIP net.IP\n\n\t\/\/ Hook received queries. Return false if you don't want to propagate to\n\t\/\/ the default handlers.\n\tOnQuery func(query *krpc.Msg, source net.Addr) (propagate bool)\n\t\/\/ Called when a peer successfully announces to us.\n\tOnAnnouncePeer func(infoHash metainfo.Hash, peer Peer)\n\t\/\/ How long to wait before resending queries that haven't received a\n\t\/\/ response. Defaults to a random value between 4.5 and 5.5s.\n\tQueryResendDelay func() time.Duration\n\t\/\/ TODO: Expose Peers, to return NodeInfo for received get_peers queries.\n}\n\n\/\/ ServerStats instance is returned by Server.Stats() and stores Server metrics\ntype ServerStats struct {\n\t\/\/ Count of nodes in the node table that responded to our last query or\n\t\/\/ haven't yet been queried.\n\tGoodNodes int\n\t\/\/ Count of nodes in the node table.\n\tNodes int\n\t\/\/ Transactions awaiting a response.\n\tOutstandingTransactions int\n\t\/\/ Individual announce_peer requests that got a success response.\n\tConfirmedAnnounces int\n\t\/\/ Nodes that have been blocked.\n\tBadNodes uint\n}\n\nfunc jitterDuration(average time.Duration, plusMinus time.Duration) time.Duration {\n\treturn average - plusMinus\/2 + time.Duration(rand.Int63n(int64(plusMinus)))\n}\n\ntype Peer = krpc.NodeAddr\n\nfunc GlobalBootstrapAddrs() (addrs []Addr, err error) {\n\tfor _, s := range []string{\n\t\t\"router.utorrent.com:6881\",\n\t\t\"router.bittorrent.com:6881\",\n\t\t\"dht.transmissionbt.com:6881\",\n\t\t\"dht.aelitis.com:6881\", \/\/ Vuze\n\t} {\n\t\thost, port, err := net.SplitHostPort(s)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\thostAddrs, err := net.LookupHost(host)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error looking up %q: %v\", s, err)\n\t\t\tcontinue\n\t\t}\n\t\tfor _, a := range hostAddrs {\n\t\t\tua, err := net.ResolveUDPAddr(\"udp\", net.JoinHostPort(a, port))\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error resolving %q: %v\", a, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\taddrs = append(addrs, NewAddr(ua))\n\t\t}\n\t}\n\tif len(addrs) == 0 {\n\t\terr = errors.New(\"nothing resolved\")\n\t}\n\treturn\n}\n\nfunc RandomNodeID() (id [20]byte) {\n\tcrand.Read(id[:])\n\treturn\n}\n\nfunc MakeDeterministicNodeID(public net.Addr) (id [20]byte) {\n\th := crypto.SHA1.New()\n\th.Write([]byte(public.String()))\n\th.Sum(id[:0:20])\n\tSecureNodeId(&id, missinggo.AddrIP(public))\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage chess is a go library designed to accomplish the following:\n - chess game \/ turn management\n - move validation\n - PGN encoding \/ decoding\n - FEN encoding \/ decoding\n\nUsing Moves\n game := chess.NewGame()\n moves := game.ValidMoves()\n game.Move(moves[0])\n\nUsing Algebraic Notation\n game := chess.NewGame()\n game.MoveStr(\"e4\")\n\nUsing PGN\n pgn, _ := chess.PGN(pgnReader)\n game := chess.NewGame(pgn)\n\nUsing FEN\n fen, _ := chess.FEN(\"rnbqkbnr\/pppppppp\/8\/8\/8\/8\/PPPPPPPP\/RNBQKBNR w KQkq - 0 1\")\n game := chess.NewGame(fen)\n\nRandom Game\n package main\n\n import (\n \"fmt\"\n\n \"github.com\/notnil\/chess\"\n )\n\n func main() {\n game := chess.NewGame()\n \/\/ generate moves until game is over\n for game.Outcome() == chess.NoOutcome {\n \/\/ select a random move\n moves := game.ValidMoves()\n move := moves[rand.Intn(len(moves))]\n game.Move(move)\n }\n \/\/ print outcome and game PGN\n fmt.Println(game.Position().Board().Draw())\n fmt.Printf(\"Game completed. %s by %s.\\n\", game.Outcome(), game.Method())\n fmt.Println(game.String())\n }\n*\/\npackage chess\n<commit_msg>Added a missing dependency in the documentation<commit_after>\/*\nPackage chess is a go library designed to accomplish the following:\n - chess game \/ turn management\n - move validation\n - PGN encoding \/ decoding\n - FEN encoding \/ decoding\n\nUsing Moves\n game := chess.NewGame()\n moves := game.ValidMoves()\n game.Move(moves[0])\n\nUsing Algebraic Notation\n game := chess.NewGame()\n game.MoveStr(\"e4\")\n\nUsing PGN\n pgn, _ := chess.PGN(pgnReader)\n game := chess.NewGame(pgn)\n\nUsing FEN\n fen, _ := chess.FEN(\"rnbqkbnr\/pppppppp\/8\/8\/8\/8\/PPPPPPPP\/RNBQKBNR w KQkq - 0 1\")\n game := chess.NewGame(fen)\n\nRandom Game\n package main\n\n import (\n \"fmt\"\n \"github.com\/notnil\/chess\"\n \"math\/rand\"\n )\n\n func main() {\n game := chess.NewGame()\n \/\/ generate moves until game is over\n for game.Outcome() == chess.NoOutcome {\n \/\/ select a random move\n moves := game.ValidMoves()\n move := moves[rand.Intn(len(moves))]\n game.Move(move)\n }\n \/\/ print outcome and game PGN\n fmt.Println(game.Position().Board().Draw())\n fmt.Printf(\"Game completed. %s by %s.\\n\", game.Outcome(), game.Method())\n fmt.Println(game.String())\n }\n*\/\npackage chess\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage fasthttp provides fast HTTP server and client API.\n\nFasthttp provides the following features:\n\n * Optimized for speed. Fasthttp is faster than standard net\/http package.\n * Optimized for low memory usage.\n * Easily handles more than 1M concurrent keep-alive connections on modern\n hardware.\n * Server supports requests' pipelining. Multiple requests may be read from\n a single network packet and multiple responses may be sent in a single\n network packet. This may be useful for highly loaded REST services.\n * Server is packed with the following anti-DoS limits:\n\n * The number of concurrent connections.\n * The number of concurrent connections per client IP.\n * The number of requests per connection.\n * Request read timeout.\n * Response write timeout.\n * Maximum request header size.\n\n * A lot of useful info is exposed to request handler:\n\n * Server and client address.\n * Per-request logger.\n * Unique request id.\n * Request start time.\n * Connection start time.\n * Request sequence number for the current connection.\n\n * Client supports automatic retry on idempotent requests' failure.\n * It is quite easy building custom (and fast) client and server\n implementations using fasthttp API.\n*\/\npackage fasthttp\n<commit_msg>Updated docs<commit_after>\/*\nPackage fasthttp provides fast HTTP server and client API.\n\nFasthttp provides the following features:\n\n * Optimized for speed. Easily handles more than 100K qps and more than 1M\n concurrent keep-alive connections on modern hardware.\n * Optimized for low memory usage.\n * Server supports requests' pipelining. Multiple requests may be read from\n a single network packet and multiple responses may be sent in a single\n network packet. This may be useful for highly loaded REST services.\n * Server provides the following anti-DoS limits:\n\n * The number of concurrent connections.\n * The number of concurrent connections per client IP.\n * The number of requests per connection.\n * Request read timeout.\n * Response write timeout.\n * Maximum request header size.\n * Maximum request execution time.\n\n * A lot of additional useful info is exposed to request handler:\n\n * Server and client address.\n * Per-request logger.\n * Unique request id.\n * Request start time.\n * Connection start time.\n * Request sequence number for the current connection.\n\n * Client supports automatic retry on idempotent requests' failure.\n * Fasthttp API is designed with the ability to extend existing client\n and server implementations or to write custom client and server\n implementations from scratch.\n*\/\npackage fasthttp\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage complete provides a tool for bash writing bash completion in go, and bash completion for the go command line.\n\nWriting bash completion scripts is a hard work. This package provides an easy way\nto create bash completion scripts for any command, and also an easy way to install\/uninstall\nthe completion of the command.\n\n## go command bash completion\n\nIn [gocomplete](.\/cmd\/gocomplete) there is an example for bash completion for the `go` command line.\n\nThis is an example that uses the `complete` package on the `go` command - the `complete` package\ncan also be used to implement any completions, see [Usage](#usage).\n\n### Install\n\n1. Type in your shell:\n\n\tgo get -u github.com\/posener\/complete\/gocomplete\n\tgocomplete -install\n\n2. Restart your shell\n\nUninstall by `gocomplete -uninstall`\n\n### Features\n\n- Complete `go` command, including sub commands and all flags.\n- Complete packages names or `.go` files when necessary.\n- Complete test names after `-run` flag.\n\n## complete package\n\nSupported shells:\n\n- [x] bash\n- [x] zsh\n- [x] fish\n\n### Usage\n\nAssuming you have program called `run` and you want to have bash completion\nfor it, meaning, if you type `run` then space, then press the `Tab` key,\nthe shell will suggest relevant complete options.\n\nIn that case, we will create a program called `runcomplete`, a go program,\nwith a `func main()` and so, that will make the completion of the `run`\nprogram. Once the `runcomplete` will be in a binary form, we could\n`runcomplete -install` and that will add to our shell all the bash completion\noptions for `run`.\n\nSo here it is:\n\n\timport \"github.com\/posener\/complete\"\n\n\tfunc main() {\n\n\t\t\/\/ create a Command object, that represents the command we want\n\t\t\/\/ to complete.\n\t\trun := complete.Command{\n\n\t\t\t\/\/ Sub defines a list of sub commands of the program,\n\t\t\t\/\/ this is recursive, since every command is of type command also.\n\t\t\tSub: complete.Commands{\n\n\t\t\t\t\/\/ add a build sub command\n\t\t\t\t\"build\": complete.Command {\n\n\t\t\t\t\t\/\/ define flags of the build sub command\n\t\t\t\t\tFlags: complete.Flags{\n\t\t\t\t\t\t\/\/ build sub command has a flag '-cpus', which\n\t\t\t\t\t\t\/\/ expects number of cpus after it. in that case\n\t\t\t\t\t\t\/\/ anything could complete this flag.\n\t\t\t\t\t\t\"-cpus\": complete.PredictAnything,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\/\/ define flags of the 'run' main command\n\t\t\tFlags: complete.Flags{\n\t\t\t\t\/\/ a flag -o, which expects a file ending with .out after\n\t\t\t\t\/\/ it, the tab completion will auto complete for files matching\n\t\t\t\t\/\/ the given pattern.\n\t\t\t\t\"-o\": complete.PredictFiles(\"*.out\"),\n\t\t\t},\n\n\t\t\t\/\/ define global flags of the 'run' main command\n\t\t\t\/\/ those will show up also when a sub command was entered in the\n\t\t\t\/\/ command line\n\t\t\tGlobalFlags: complete.Flags{\n\n\t\t\t\t\/\/ a flag '-h' which does not expects anything after it\n\t\t\t\t\"-h\": complete.PredictNothing,\n\t\t\t},\n\t\t}\n\n\t\t\/\/ run the command completion, as part of the main() function.\n\t\t\/\/ this triggers the autocompletion when needed.\n\t\t\/\/ name must be exactly as the binary that we want to complete.\n\t\tcomplete.New(\"run\", run).Run()\n\t}\n\n### Self completing program\n\nIn case that the program that we want to complete is written in go we\ncan make it self completing.\n\nHere is an [example](.\/example\/self\/main.go)\n\n*\/\npackage complete\n<commit_msg>fix doc<commit_after>\/*\nPackage complete provides a tool for bash writing bash completion in go, and bash completion for the go command line.\n\nWriting bash completion scripts is a hard work. This package provides an easy way\nto create bash completion scripts for any command, and also an easy way to install\/uninstall\nthe completion of the command.\n\ngo command bash completion\n\nIn [gocomplete](.\/cmd\/gocomplete) there is an example for bash completion for the `go` command line.\n\nThis is an example that uses the `complete` package on the `go` command - the `complete` package\ncan also be used to implement any completions, see [Usage](#usage).\n\nInstall\n\n1. Type in your shell:\n\n\tgo get -u github.com\/posener\/complete\/gocomplete\n\tgocomplete -install\n\n2. Restart your shell\n\nUninstall by `gocomplete -uninstall`\n\n\nFeatures\n\n- Complete `go` command, including sub commands and all flags.\n- Complete packages names or `.go` files when necessary.\n- Complete test names after `-run` flag.\n\ncomplete package\n\nSupported shells:\n\n- [x] bash\n- [x] zsh\n- [x] fish\n\nUsage\n\nAssuming you have program called `run` and you want to have bash completion\nfor it, meaning, if you type `run` then space, then press the `Tab` key,\nthe shell will suggest relevant complete options.\n\nIn that case, we will create a program called `runcomplete`, a go program,\nwith a `func main()` and so, that will make the completion of the `run`\nprogram. Once the `runcomplete` will be in a binary form, we could\n`runcomplete -install` and that will add to our shell all the bash completion\noptions for `run`.\n\nSo here it is:\n\n\timport \"github.com\/posener\/complete\"\n\n\tfunc main() {\n\n\t\t\/\/ create a Command object, that represents the command we want\n\t\t\/\/ to complete.\n\t\trun := complete.Command{\n\n\t\t\t\/\/ Sub defines a list of sub commands of the program,\n\t\t\t\/\/ this is recursive, since every command is of type command also.\n\t\t\tSub: complete.Commands{\n\n\t\t\t\t\/\/ add a build sub command\n\t\t\t\t\"build\": complete.Command {\n\n\t\t\t\t\t\/\/ define flags of the build sub command\n\t\t\t\t\tFlags: complete.Flags{\n\t\t\t\t\t\t\/\/ build sub command has a flag '-cpus', which\n\t\t\t\t\t\t\/\/ expects number of cpus after it. in that case\n\t\t\t\t\t\t\/\/ anything could complete this flag.\n\t\t\t\t\t\t\"-cpus\": complete.PredictAnything,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\/\/ define flags of the 'run' main command\n\t\t\tFlags: complete.Flags{\n\t\t\t\t\/\/ a flag -o, which expects a file ending with .out after\n\t\t\t\t\/\/ it, the tab completion will auto complete for files matching\n\t\t\t\t\/\/ the given pattern.\n\t\t\t\t\"-o\": complete.PredictFiles(\"*.out\"),\n\t\t\t},\n\n\t\t\t\/\/ define global flags of the 'run' main command\n\t\t\t\/\/ those will show up also when a sub command was entered in the\n\t\t\t\/\/ command line\n\t\t\tGlobalFlags: complete.Flags{\n\n\t\t\t\t\/\/ a flag '-h' which does not expects anything after it\n\t\t\t\t\"-h\": complete.PredictNothing,\n\t\t\t},\n\t\t}\n\n\t\t\/\/ run the command completion, as part of the main() function.\n\t\t\/\/ this triggers the autocompletion when needed.\n\t\t\/\/ name must be exactly as the binary that we want to complete.\n\t\tcomplete.New(\"run\", run).Run()\n\t}\n\nSelf completing program\n\nIn case that the program that we want to complete is written in go we\ncan make it self completing.\nHere is an [example](.\/example\/self\/main.go)\n\n*\/\npackage complete\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package configeur is an easy to use multi-layer configuration system.\n\/\/\n\/\/ configeur makes use of Checkers, which are used to retrieve values from their respective data sources.\n\/\/ There are three built in Checkers, Environment, Flag and JSON. Environment retrieves environment variables.\n\/\/ Flag retrieves variables within the flags of a command. JSON retrieves values from a JSON file\/blob. Checkers\n\/\/ can be essentially thought of as \"configuration middlewear\", in fact parts of the package API was inspired by\n\/\/ negroni (https:\/\/github.com\/codegangsta\/negroni, special thanks to codegangsta for the awesome package!) and the\n\/\/ standard libraries flag package.\n\/\/\n\/\/ It is very easy to create your own Checkers, all they have to do is satisfy the Checker interface.\n\/\/ That is a, Int method, String method and a Bool method. To retrieve their respective data types.\n\/\/ If you do create your own Checkers I would be more than happy to add a link to them somewhere in the repository.\n\/\/\n\/\/ A standard use-case can be found in the example folder and a getting started guide can be found on github (http:\/\/github.com\/paked\/configeur).\npackage configeur\n<commit_msg>Update information in README<commit_after>\/\/ Package configeur is an easy to use multi-layer configuration system.\n\/\/\n\/\/ Examples can be found in the example folder (http:\/\/github.com\/paked\/configeur\/blob\/master\/examples\/)\n\/\/ as well as a getting started guide in the main README file (http:\/\/github.com\/paked\/configeur).\n\/\/\n\/\/ configeur makes use of Checkers, which are used to retrieve values from their respective data sources.\n\/\/ There are three built in Checkers, Environment, Flag and JSON. Environment retrieves environment variables.\n\/\/ Flag retrieves variables within the flags of a command. JSON retrieves values from a JSON file\/blob. Checkers\n\/\/ can be essentially thought of as \"middlewear for configuration\", in fact parts of the package API was inspired by\n\/\/ negroni (https:\/\/github.com\/codegangsta\/negroni, the awesome net\/http middlewear manager) and the standard library's flag package.\n\/\/\n\/\/ It is very easy to create your own Checkers, all they have to do is satisfy the Checker interface.\n\/\/ type Checker interface {\n\/\/\t Int(name string) (int, error)\n\/\/ \t Bool(name string) (int, error)\n\/\/ \t String(name string) (string, error)\n\/\/ }\n\/\/ That is an, Int method, String method and a Bool method. These functions are used to retrieve their respective data types.\n\/\/ If you do create your own Checkers I would be more than happy to add a link to the README in the github repository.\n\/\/\npackage configeur\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Marcel Gotsch. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package goserv provides a fast, easy and minimalistic framework for\n\/\/ web applications in Go.\n\/\/\n\/\/\npackage goserv\n<commit_msg>Add path documentation and more to doc.go<commit_after>\/\/ Copyright 2016 Marcel Gotsch. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package goserv provides a fast, easy and minimalistic framework for\n\/\/ web applications in Go.\n\/\/\n\/\/ Getting Started\n\/\/\n\/\/ Every goserv application starts with a single instance of goserv.Server. This\n\/\/ instance is the entry point for all incoming requests.\n\/\/\n\/\/ server := goserv.NewServer()\n\/\/\n\/\/ To start handling paths you must register handlers to paths using one of the Server's\n\/\/ embedded Router functions, like Get.\n\/\/\n\/\/ server.Get(\"\/\", homeHandler)\n\/\/\n\/\/ The first argument in the Get() call is the path for which the\n\/\/ handler gets registered. To learn more about paths take a look at the Path section.\n\/\/ As the name of the function suggests the requests are only getting dispatched to the handler\n\/\/ if the request method is \"GET\". There are a lot more methods like this one available, just take\n\/\/ a look at the documentation of the Router.\n\/\/\n\/\/ At this point it is worth noting that goserv provides it's own Handler type using the\n\/\/ goserv.ResponseWriter and goserv.Request. This means that all http.Handlers cannot\n\/\/ be used with goserv without wrapping them in a goserv.Handler. To make this more convenient\n\/\/ a function called WrapHTTPHandler exists.\n\/\/\n\/\/ server.Get(\"\/\", WrapHTTPHandler(httpHomeHandler))\n\/\/\n\/\/ Sometimes it is useful to have handlers that are invoked all the time, also known as middleware.\n\/\/ To register middleware use the Use() function.\n\/\/\n\/\/ server.Use(accessLogger)\n\/\/\n\/\/ Path Syntax\n\/\/\n\/\/ All Routes and Routers are registered under a path. This path is matched against the path\n\/\/ of incoming requests.\n\/\/\n\/\/ A Route created from \"\/mypath\" will only match the request path if it is exactly\n\/\/ \"\/mypath\".\n\/\/\n\/\/ In case that the Route should match everything starting with \"\/mypath\" a wildcard\n\/\/ can be appended to the path, i.e.\n\/\/\t\/mypath*\n\/\/\n\/\/ The wildcard can be at any position in the path. It also possible to just use the wildcard\n\/\/ as path.\n\/\/\n\/\/ Sometimes it is necessary to capture values from parts of the request path, so called parameters.\n\/\/ To include parameters in a Route the path must contain a named parameter, e.g.\n\/\/\t\/users\/:user_id\n\/\/\n\/\/ Parameters always start with a \":\" after a \"\/\". The name (without the leading \":\") can contain\n\/\/ alphanumeric symbols as well as \"_\" and \"-\". The number of parameters in a Route is not limited, but\n\/\/ they must be separated by at least a single \"\/\".\n\/\/\t\/:value1\/:value2\n\/\/\n\/\/ Routers allow you to register handlers for each parameter in a Route. Each handler gets invoked\n\/\/ once per parameter and request. That means even though a Router may invoked multiple Routes the\n\/\/ parameter handlers are invoked only once.\n\/\/\n\/\/ router.Get(\"\/users\/:user_id\", handler)\n\/\/ router.Param(\"user_id\", userIDHandler)\n\/\/\n\/\/ When a Route processes a Request it automatically extracts the captured parameter values from the path\n\/\/ and stores the values under their name in the .Param field of the Request.\n\/\/\n\/\/ Strict vs non-strict Slash\n\/\/\n\/\/ A Route can have either strict slash or non-strict slash behavior. In non-strict mode paths with or\n\/\/ without a trailing slash are considered to be the same, i.e. a Route registered with \"\/mypath\" in\n\/\/ non-strict mode matches both \"\/mypath\" and \"\/mypath\/\". In strict mode both\n\/\/ paths are considered to be different.\n\/\/\n\/\/ Order matters\n\/\/\n\/\/ The order in which Handlers are registered does matter. An incoming request goes through in the\n\/\/ exact same order. After each handler the Router checks wether an error was set on the\n\/\/ ResponseWriter or if a response was written and ends the processing if necessary. In case of an error\n\/\/ the Router forwards the request along with the error to its ErrorHandler, but only if one is available.\n\/\/ All sub Routers have no ErrorHandler by default, so all errors are handled by the top level Server. It\n\/\/ is possible though to handle errors in a sub Router by setting a custom ErrorHandler.\n\/\/\n\/\/ A small example shows how requests are processed in a Router:\n\/\/\n\/\/ server.Use(A)\n\/\/ server.Get(\"\/\", B)\n\/\/ server.Use(C)\n\/\/ server.SubRouter(\"\/sub\", D)\n\/\/\n\/\/ All incoming request will follow the order \"ABCD\". The\npackage goserv\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package try provides retry functionality.\npackage try\n<commit_msg>added example<commit_after>\/\/ Package try provides retry functionality.\n\/\/ var value string\n\/\/ err := try.Do(func(attempt int) (error, bool) {\n\/\/ var err error\n\/\/ value, err = SomeFunction()\n\/\/ return err, attempt < 5 \/\/ try 5 times\n\/\/ })\n\/\/ if err != nil {\n\/\/ log.Fatalln(\"error:\", err)\n\/\/ }\npackage try\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\ncrane is a command line tool for service providers\/administrators.\n\nIt provides some commands that allow the service administrator to register\nhimself\/herself, manage teams, apps and services.\n\nUsage:\n\n\t% crane <command> [args]\n\nThe currently available commands are (grouped by subject):\n\n\ttarget changes or retrive the current tsuru server\n\tversion displays current tsuru version\n\n\tuser-create creates a new user\n\tlogin authenticates the user with tsuru server\n\tlogout finishes the session with tsuru server\n\tkey-add adds a public key to tsuru deploy server\n\tkey-remove removes a public key from tsuru deploy server\n\n\tteam-create creates a new team (adding the current user to it automatically)\n\tteam-list list teams that the user is member\n\tteam-user-add adds a user to a team\n\tteam-user-remove removes a user from a team\n\n\ttemplate generates a new manifest file, so you can just fill information for your service\n\tcreate creates a new service from a manifest file\n\tupdate updates a service using a manifest file\n\tremove removes a service\n\tlist list all services that the user is administrator of\n\n\tdoc-add updates service's documentation\n\tdoc-get gets current docs of the service\n\nUse \"crane help <command>\" for more information about a command.\n\n\nChange or retrieve remote crane server\n\nUsage:\n\n\t% crane target [target]\n\nThis command should be used to set current crane target, or retrieve current\ntarget.\n\nThe target is the crane server to which all operations will be directed to.\n\n\nCheck current version\n\nUsage:\n\n\t% crane version\n\nThis command returns the current version of crane command.\n\n\nCreate a user\n\nUsage:\n\n\t% crane user-create <email>\n\nuser-create creates a user within crane remote server. It will ask for the\npassword before issue the request.\n\n\nAuthenticate within remote crane server\n\nUsage:\n\n\t% crane login <email>\n\nLogin will ask for the password and check if the user is successfully\nauthenticated. If so, the token generated by the crane server will be stored in\n${HOME}\/.crane_token.\n\nAll crane actions require the user to be authenticated (except login and\nuser-create, obviously).\n\n\nLogout from remote crane server\n\nUsage:\n\n\t% crane logout\n\nLogout will delete the token file and terminate the session within crane\nserver.\n\n\nCreate a new team for the user\n\nUsage:\n\n\t% crane team-create <teamname>\n\nteam-create will create a team for the user. crane requires a user to be a\nmember of at least one team in order to create a service.\n\nWhen you create a team, you're automatically member of this team.\n\n\nList teams that the user is member of\n\nUsage:\n\n\t% crane team-list\n\nteam-list will list all teams that you are member of.\n\n\nAdd a user to a team\n\nUsage:\n\n\t% crane team-user-add <teamname> <useremail>\n\nteam-user-add adds a user to a team. You need to be a member of the team to be\nable to add another user to it.\n\n\nRemove a user from a team\n\nUsage:\n\n\t% crane team-user-remove <teamname> <useremail>\n\nteam-user-remove removes a user from a team. You need to be a member of the\nteam to be able to remove a user from it.\n\nA team can never have 0 users. If you are the last member of a team, you can't\nremove yourself from it.\n\n\nCreate an empty manifest file\n\nUsage:\n\n\t% crane template\n\nTemplate will create a file named \"manifest.yaml\" with the following content:\n\n\tid: servicename\n\tendpoint:\n\t production: production-endpoint.com\n\t test: test-endpoint.com:8080\n\nChange it at will to configure your service. Id is the id of your service, it\nmust be unique. You must provide a production endpoint that will be invoked by\ntsuru when application developers ask for new instances and are binding their\napps to their instances. For more details, see the text \"Services API\nWorkflow\": http:\/\/tsuru.rtfd.org\/services-api-workflow.\n\n\nCreate a new service\n\nUsage:\n\n\t% crane create <manifest-file.yaml>\n\nCreate will create a new service with information present in the manifest file.\nHere is an example of usage:\n\n\t% cat \/home\/gopher\/projects\/mysqlapi\/manifest.yaml\n\tid: mysqlapi\n\tendpoint:\n\t production: https:\/\/mysqlapi.com:7777\n\t% crane create \/home\/gopher\/projects\/mysqlapi\/manifest.yaml\n\tsuccess\n\nYou can use \"crane template\" to generate a template. Both id and production\nendpoint are required fields.\n\nWhen creating a new service, crane will add all user's teams as administrator\nteams of the service.\n\n\nUpdate a service\n\nUsage:\n\n\t% crane update <manifest-file.yaml>\n\nUpdate will update a service using a manifest file. Currently, it's only\npossible to edit an endpoint, or add new endpoints. You need to be an\nadministrator of the team to perform an update.\n\n\nRemove a service\n\nUsage:\n\n\t% crane remove <service-id>\n\nRemove will remove a service from crane server. You need to be an administrator\nof the team to remove it.\n\n\nList services that you administrate\n\nUsage:\n\n\t% crane list\n\nList will list all services that you administrate, and the instances of each\nservice, created by application developers.\n\n\nUpdate service's documentation\n\nUsage:\n\n\t% crane doc-add <service-id> <doc-file.txt>\n\ndoc-add will update service's doc. Example of usage:\n\n\t% cat doc.txt\n\tmysqlapi\n\n\tThis service is used for mysql connections.\n\n\tOnce binded, you will be able to use the following environment variables:\n\n\t\t- MYSQL_HOST: host of MySQL server\n\t\t- MYSQL_PORT: port of MySQL instance\n\t\t- MYSQL_DATABASE_NAME: name of the database\n\t\t- MYSQL_USER: MySQL user for connections\n\t\t- MYSQL_PASSWORD: MySQL password for connections\n\t% crane doc-add mysqlapi doc.txt\n\tDocumentation for 'mysqlapi' successfully updated.\n\nYou need to be an administrator of the service to update its docs.\n\n\nRetrieve service's documentation\n\nUsage:\n\n\t% crane doc-get <service-id>\n\ndoc-get will retrieve the current documentation of the service.\n*\/\npackage documentation\n<commit_msg>cmd, cmd\/crane, cmd\/tsuru: register user-remove in base manager<commit_after>\/\/ Copyright 2012 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\ncrane is a command line tool for service providers\/administrators.\n\nIt provides some commands that allow the service administrator to register\nhimself\/herself, manage teams, apps and services.\n\nUsage:\n\n\t% crane <command> [args]\n\nThe currently available commands are (grouped by subject):\n\n\ttarget changes or retrive the current tsuru server\n\tversion displays current tsuru version\n\n\tuser-create creates a new user\n\tuser-remove removes your user from tsuru server\n\tlogin authenticates the user with tsuru server\n\tlogout finishes the session with tsuru server\n\tkey-add adds a public key to tsuru deploy server\n\tkey-remove removes a public key from tsuru deploy server\n\n\tteam-create creates a new team (adding the current user to it automatically)\n\tteam-list list teams that the user is member\n\tteam-user-add adds a user to a team\n\tteam-user-remove removes a user from a team\n\n\ttemplate generates a new manifest file, so you can just fill information for your service\n\tcreate creates a new service from a manifest file\n\tupdate updates a service using a manifest file\n\tremove removes a service\n\tlist list all services that the user is administrator of\n\n\tdoc-add updates service's documentation\n\tdoc-get gets current docs of the service\n\nUse \"crane help <command>\" for more information about a command.\n\n\nChange or retrieve remote crane server\n\nUsage:\n\n\t% crane target [target]\n\nThis command should be used to set current crane target, or retrieve current\ntarget.\n\nThe target is the crane server to which all operations will be directed to.\n\n\nCheck current version\n\nUsage:\n\n\t% crane version\n\nThis command returns the current version of crane command.\n\n\nCreate a user\n\nUsage:\n\n\t% crane user-create <email>\n\nuser-create creates a user within crane remote server. It will ask for the\npassword before issue the request.\n\n\nRemove your user from tsuru server\n\nUsage:\n\n\t% tsuru user-remove\n\nuser-remove will remove currently authenticated user from remote tsuru server.\nsince there cannot exist any orphan teams, tsuru will refuse to remove a user\nthat is the last member of some team. if this is your case, make sure you\nremove the team using \"team-remove\" before removing the user.\n\n\nAuthenticate within remote crane server\n\nUsage:\n\n\t% crane login <email>\n\nLogin will ask for the password and check if the user is successfully\nauthenticated. If so, the token generated by the crane server will be stored in\n${HOME}\/.crane_token.\n\nAll crane actions require the user to be authenticated (except login and\nuser-create, obviously).\n\n\nLogout from remote crane server\n\nUsage:\n\n\t% crane logout\n\nLogout will delete the token file and terminate the session within crane\nserver.\n\n\nCreate a new team for the user\n\nUsage:\n\n\t% crane team-create <teamname>\n\nteam-create will create a team for the user. crane requires a user to be a\nmember of at least one team in order to create a service.\n\nWhen you create a team, you're automatically member of this team.\n\n\nList teams that the user is member of\n\nUsage:\n\n\t% crane team-list\n\nteam-list will list all teams that you are member of.\n\n\nAdd a user to a team\n\nUsage:\n\n\t% crane team-user-add <teamname> <useremail>\n\nteam-user-add adds a user to a team. You need to be a member of the team to be\nable to add another user to it.\n\n\nRemove a user from a team\n\nUsage:\n\n\t% crane team-user-remove <teamname> <useremail>\n\nteam-user-remove removes a user from a team. You need to be a member of the\nteam to be able to remove a user from it.\n\nA team can never have 0 users. If you are the last member of a team, you can't\nremove yourself from it.\n\n\nCreate an empty manifest file\n\nUsage:\n\n\t% crane template\n\nTemplate will create a file named \"manifest.yaml\" with the following content:\n\n\tid: servicename\n\tendpoint:\n\t production: production-endpoint.com\n\t test: test-endpoint.com:8080\n\nChange it at will to configure your service. Id is the id of your service, it\nmust be unique. You must provide a production endpoint that will be invoked by\ntsuru when application developers ask for new instances and are binding their\napps to their instances. For more details, see the text \"Services API\nWorkflow\": http:\/\/tsuru.rtfd.org\/services-api-workflow.\n\n\nCreate a new service\n\nUsage:\n\n\t% crane create <manifest-file.yaml>\n\nCreate will create a new service with information present in the manifest file.\nHere is an example of usage:\n\n\t% cat \/home\/gopher\/projects\/mysqlapi\/manifest.yaml\n\tid: mysqlapi\n\tendpoint:\n\t production: https:\/\/mysqlapi.com:7777\n\t% crane create \/home\/gopher\/projects\/mysqlapi\/manifest.yaml\n\tsuccess\n\nYou can use \"crane template\" to generate a template. Both id and production\nendpoint are required fields.\n\nWhen creating a new service, crane will add all user's teams as administrator\nteams of the service.\n\n\nUpdate a service\n\nUsage:\n\n\t% crane update <manifest-file.yaml>\n\nUpdate will update a service using a manifest file. Currently, it's only\npossible to edit an endpoint, or add new endpoints. You need to be an\nadministrator of the team to perform an update.\n\n\nRemove a service\n\nUsage:\n\n\t% crane remove <service-id>\n\nRemove will remove a service from crane server. You need to be an administrator\nof the team to remove it.\n\n\nList services that you administrate\n\nUsage:\n\n\t% crane list\n\nList will list all services that you administrate, and the instances of each\nservice, created by application developers.\n\n\nUpdate service's documentation\n\nUsage:\n\n\t% crane doc-add <service-id> <doc-file.txt>\n\ndoc-add will update service's doc. Example of usage:\n\n\t% cat doc.txt\n\tmysqlapi\n\n\tThis service is used for mysql connections.\n\n\tOnce binded, you will be able to use the following environment variables:\n\n\t\t- MYSQL_HOST: host of MySQL server\n\t\t- MYSQL_PORT: port of MySQL instance\n\t\t- MYSQL_DATABASE_NAME: name of the database\n\t\t- MYSQL_USER: MySQL user for connections\n\t\t- MYSQL_PASSWORD: MySQL password for connections\n\t% crane doc-add mysqlapi doc.txt\n\tDocumentation for 'mysqlapi' successfully updated.\n\nYou need to be an administrator of the service to update its docs.\n\n\nRetrieve service's documentation\n\nUsage:\n\n\t% crane doc-get <service-id>\n\ndoc-get will retrieve the current documentation of the service.\n*\/\npackage documentation\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Andrew O'Neill\n\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/*\npackage choices provides a library for simple a\/b and multivariate testing.\n\nchoices uses hashing to uniquely assign users to experiments and decide the\nvalues the user is assigned. This allows us to quickly assign a user to an\nexperiment with out having to look up what they were assigned previously. Most\nof the ideas in this package are based off of Facebook's Planout.\n\nIn choices there are three main concepts. Namespaces, Experiments, and Params.\nNamespaces split traffic between the experiments they contain. Experiments are\nthe element you are testing. Experiments are made up of one or more Params.\nParams are a key value pair. The value can be either a Uniform Choice value or\na Weighted Choice value.\n\nIn most cases you will want to create one namespace per experiment. If you have\nexperiments that might have interactions you can use namespaces to split the\ntraffic between them. For example, if you are running a test on a banner and\nanother test that takes over the whole page, you will want to split the traffic\nbetween these two tests. Another example, is if you want to run a test on a\nsmall percent of traffic. Namespaces contain a list of TeamID's. These TeamID's\nare used to deterime which Experiments to return to the caller.\n\nExperiments contain the Params for a running experiment. When a caller queries\nNamespaces, Namespaces will hash the user into a segment, It will then check if\nthe segment is contained in an Experiment. If the segment is contained in the\nexperiment then the experiment will be evaluated. An experiment will in turn\nevaluate each of it's Params.\n\nParams are key-value pairs. They Options for value are Uniform choice or\nWeighted choice. Uniform choices will be selected in a uniform fashion.\nWeighted choices will be selected based on the proportions supplied in the\nweights.\n*\/\n\npackage choices\n<commit_msg>choices: fix docs<commit_after>\/\/ Copyright 2016 Andrew O'Neill\n\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package choices provides a library for simple a\/b and multivariate testing.\n\/\/\n\/\/ choices uses hashing to uniquely assign users to experiments and decide the\n\/\/ values the user is assigned. This allows us to quickly assign a user to an\n\/\/ experiment with out having to look up what they were assigned previously.\n\/\/ Most of the ideas in this package are based off of Facebook's Planout.\n\/\/\n\/\/ In choices there are three main concepts. Namespaces, Experiments, and\n\/\/ Params. Namespaces split traffic between the experiments they contain.\n\/\/ Experiments are the element you are testing. Experiments are made up of one\n\/\/ or more Params. Params are a key value pair. The value can be either a\n\/\/ Uniform Choice value or a Weighted Choice value.\n\/\/\n\/\/ In most cases you will want to create one namespace per experiment. If you\n\/\/ have experiments that might have interactions you can use namespaces to\n\/\/ split the traffic between them. For example, if you are running a test on a\n\/\/ banner and another test that takes over the whole page, you will want to\n\/\/ split the traffic between these two tests. Another example, is if you want\n\/\/ to run a test on a small percent of traffic. Namespaces contain a list of\n\/\/ TeamID's. These TeamID's are used to deterime which Experiments to return to\n\/\/ the caller.\n\/\/\n\/\/ Experiments contain the Params for a running experiment. When a caller\n\/\/ queries Namespaces, Namespaces will hash the user into a segment, It will\n\/\/ then check if the segment is contained in an Experiment. If the segment is\n\/\/ contained in the experiment then the experiment will be evaluated. An\n\/\/ experiment will in turn evaluate each of it's Params.\n\/\/\n\/\/ Params are key-value pairs. They Options for value are Uniform choice or\n\/\/ Weighted choice. Uniform choices will be selected in a uniform fashion.\n\/\/ Weighted choices will be selected based on the proportions supplied in the\n\/\/ weights.\n\npackage choices\n<|endoftext|>"} {"text":"<commit_before>\/\/ Licensed under the MIT license, see LICENCE file for details.\n\n\/*\nPackage quicktest provides a collection of Go helpers for writing tests.\n\nQuicktest helpers can be easily integrated inside regular Go tests, for\ninstance:\n\n import qt \"github.com\/frankban\/quicktest\"\n\n func TestFoo(t *testing.T) {\n t.Run(\"numbers\", func(t *testing.T) {\n c := qt.New(t)\n numbers, err := somepackage.Numbers()\n c.Assert(numbers, qt.DeepEquals, []int{42, 47})\n c.Assert(err, qt.ErrorMatches, \"bad wolf\")\n })\n t.Run(\"nil\", func(t *testing.T) {\n c := qt.New(t)\n got := somepackage.MaybeNil()\n c.Assert(got, qt.IsNil, qt.Commentf(\"value: %v\", somepackage.Value))\n })\n }\n\nDeferred execution\n\nQuicktest provides the ability to defer the execution of functions that will be\nrun when the test completes. This is often useful for creating OS-level\nresources such as temporary directories (see c.Mkdir). The functions will be\nrun in last-in, first-out order.\n\n func (c *C) Defer(f func())\n\nTo trigger the deferred behavior, call c.Done:\n\n func (c *C) Done()\n\nIf you create a *C instance at the top level, you’ll have to add a defer to\ntrigger the cleanups at the end of the test:\n\n defer c.Done()\n\nHowever, if you use quicktest to create a subtest, Done will be called\nautomatically at the end of that subtest. For example:\n\n func TestFoo(t *testing.T) {\n c := qt.New(t)\n c.Run(\"subtest\", func(c *qt.C) {\n c.Setenv(\"HOME\", c.Mkdir())\n \/\/ Here $HOME is set the path to a newly created directory.\n \/\/ At the end of the test the directory will be removed\n \/\/ and HOME set back to its original value.\n })\n }\n\nAssertions\n\nAn assertion looks like this, where qt.Equals could be replaced by any\navailable checker. If the assertion fails, the underlying Fatal method is\ncalled to describe the error and abort the test.\n\n c.Assert(someValue, qt.Equals, wantValue)\n\nIf you don’t want to abort on failure, use Check instead, which calls Error\ninstead of Fatal:\n\n c.Check(someValue, qt.Equals, wantValue)\n\nThe library provides some base checkers like Equals, DeepEquals, Matches,\nErrorMatches, IsNil and others. More can be added by implementing the Checker\ninterface. Below, we list the checkers implemented by the package in alphabetical\norder.\n\nAll\n\nAll returns a Checker that uses the given checker to check elements of slice or\narray or the values of a map. It succeeds if all elements pass the check.\nOn failure it prints the error from the first index that failed.\n\nFor example:\n\n\tc.Assert([]int{3, 5, 8}, qt.All(qt.Not(qt.Equals)), 0)\n\tc.Assert([][]string{{\"a\", \"b\"}, {\"a\", \"b\"}}, qt.All(qt.DeepEquals), []string{\"c\", \"d\"})\n\nSee also Any and Contains.\n\nAny\n\nAny returns a Checker that uses the given checker to check elements of a slice\nor array or the values from a map. It succeeds if any element passes the check.\n\nFor example:\n\n\tc.Assert([]int{3,5,7,99}, qt.Any(qt.Equals), 7)\n\tc.Assert([][]string{{\"a\", \"b\"}, {\"c\", \"d\"}}, qt.Any(qt.DeepEquals), []string{\"c\", \"d\"})\n\nSee also All and Contains.\n\nCmpEquals\n\nCmpEquals checks equality of two arbitrary values according to the provided\ncompare options. DeepEquals is more commonly used when no compare options are\nrequired.\n\nExample calls:\n\n c.Assert(list, qt.CmpEquals(cmpopts.SortSlices), []int{42, 47})\n c.Assert(got, qt.CmpEquals(), []int{42, 47}) \/\/ Same as qt.DeepEquals.\n\nCodecEquals\n\nCodecEquals returns a checker that checks for codec value equivalence.\n\n func CodecEquals(\n marshal func(interface{}) ([]byte, error),\n unmarshal func([]byte, interface{}) error,\n opts ...cmp.Option,\n ) Checker\n\nIt expects two arguments: a byte slice or a string containing some\ncodec-marshaled data, and a Go value.\n\nIt uses unmarshal to unmarshal the data into an interface{} value.\nIt marshals the Go value using marshal, then unmarshals the result into\nan interface{} value.\n\nIt then checks that the two interface{} values are deep-equal to one another,\nusing CmpEquals(opts) to perform the check.\n\nSee JSONEquals for an example of this in use.\n\nContains\n\nContains checks that a map, slice, array or string contains a value. It's the\nsame as using Any(Equals), except that it has a special case for strings - if\nthe first argument is a string, the second argument must also be a string and\nstrings.Contains will be used.\n\nFor example:\n\n\tc.Assert(\"hello world\", qt.Contains, \"world\")\n\tc.Assert([]int{3,5,7,99}, qt.Contains, 7)\n\nContentEquals\n\nContentEquals is is like DeepEquals but any slices in the compared values will be sorted before being compared.\n\nFor example:\n\n\tc.Assert([]string{\"c\", \"a\", \"b\"}, qt.ContentEquals, []string{\"a\", \"b\", \"c\"})\n\nDeepEquals\n\nDeepEquals checks that two arbitrary values are deeply equal.\nThe comparison is done using the github.com\/google\/go-cmp\/cmp package.\nWhen comparing structs, by default no exported fields are allowed.\nIf a more sophisticated comparison is required, use CmpEquals (see below).\n\nExample call:\n\n c.Assert(got, qt.DeepEquals, []int{42, 47})\n\nEquals\n\nEquals checks that two values are equal, as compared with Go's == operator.\n\nFor instance:\n\n c.Assert(answer, qt.Equals, 42)\n\nNote that the following will fail:\n\n c.Assert((*sometype)(nil), qt.Equals, nil)\n\nUse the IsNil checker below for this kind of nil check.\n\nErrorMatches\n\nErrorMatches checks that the provided value is an error whose message matches\nthe provided regular expression.\n\nFor instance:\n\n c.Assert(err, qt.ErrorMatches, `bad wolf .*`)\n\nHasLen\n\nHasLen checks that the provided value has the given length.\n\nFor instance:\n\n c.Assert([]int{42, 47}, qt.HasLen, 2)\n c.Assert(myMap, qt.HasLen, 42)\n\nIsNil\n\nIsNil checks that the provided value is nil.\n\nFor instance:\n\n c.Assert(got, qt.IsNil)\n\nAs a special case, if the value is nil but implements the\nerror interface, it is still considered to be non-nil.\nThis means that IsNil will fail on an error value that happens\nto have an underlying nil value, because that's\ninvariably a mistake. See https:\/\/golang.org\/doc\/faq#nil_error.\n\nSo it's just fine to check an error like this:\n\n c.Assert(err, qt.IsNil)\n\nJSONEquals\n\nJSONEquals checks whether a byte slice or string is JSON-equivalent to a Go\nvalue. See CodecEquals for more information.\n\nIt uses DeepEquals to do the comparison. If a more sophisticated comparison is\nrequired, use CodecEquals directly.\n\nFor instance:\n\n c.Assert(`{\"First\": 47.11}`, qt.JSONEquals, &MyStruct{First: 47.11})\n\nMatches\n\nMatches checks that a string or result of calling the String method\n(if the value implements fmt.Stringer) matches the provided regular expression.\n\nFor instance:\n\n c.Assert(\"these are the voyages\", qt.Matches, `these are .*`)\n c.Assert(net.ParseIP(\"1.2.3.4\"), qt.Matches, `1.*`)\n\nNot\n\nNot returns a Checker negating the given Checker.\n\nFor instance:\n\n c.Assert(got, qt.Not(qt.IsNil))\n c.Assert(answer, qt.Not(qt.Equals), 42)\n\nPanicMatches\n\nPanicMatches checks that the provided function panics with a message matching\nthe provided regular expression.\n\nFor instance:\n\n c.Assert(func() {panic(\"bad wolf ...\")}, qt.PanicMatches, `bad wolf .*`)\n\nSatisfies\n\nSatisfies checks that the provided value, when used as argument of the provided\npredicate function, causes the function to return true. The function must be of\ntype func(T) bool, having got assignable to T.\n\nFor instance:\n\n \/\/ Check that an error from os.Open satisfies os.IsNotExist.\n c.Assert(err, qt.Satisfies, os.IsNotExist)\n\n \/\/ Check that a floating point number is a not-a-number.\n c.Assert(f, qt.Satisfies, math.IsNaN)\n*\/\npackage quicktest\n<commit_msg>Add IsTrue and IsFalse to doc.go<commit_after>\/\/ Licensed under the MIT license, see LICENCE file for details.\n\n\/*\nPackage quicktest provides a collection of Go helpers for writing tests.\n\nQuicktest helpers can be easily integrated inside regular Go tests, for\ninstance:\n\n import qt \"github.com\/frankban\/quicktest\"\n\n func TestFoo(t *testing.T) {\n t.Run(\"numbers\", func(t *testing.T) {\n c := qt.New(t)\n numbers, err := somepackage.Numbers()\n c.Assert(numbers, qt.DeepEquals, []int{42, 47})\n c.Assert(err, qt.ErrorMatches, \"bad wolf\")\n })\n t.Run(\"nil\", func(t *testing.T) {\n c := qt.New(t)\n got := somepackage.MaybeNil()\n c.Assert(got, qt.IsNil, qt.Commentf(\"value: %v\", somepackage.Value))\n })\n }\n\nDeferred execution\n\nQuicktest provides the ability to defer the execution of functions that will be\nrun when the test completes. This is often useful for creating OS-level\nresources such as temporary directories (see c.Mkdir). The functions will be\nrun in last-in, first-out order.\n\n func (c *C) Defer(f func())\n\nTo trigger the deferred behavior, call c.Done:\n\n func (c *C) Done()\n\nIf you create a *C instance at the top level, you’ll have to add a defer to\ntrigger the cleanups at the end of the test:\n\n defer c.Done()\n\nHowever, if you use quicktest to create a subtest, Done will be called\nautomatically at the end of that subtest. For example:\n\n func TestFoo(t *testing.T) {\n c := qt.New(t)\n c.Run(\"subtest\", func(c *qt.C) {\n c.Setenv(\"HOME\", c.Mkdir())\n \/\/ Here $HOME is set the path to a newly created directory.\n \/\/ At the end of the test the directory will be removed\n \/\/ and HOME set back to its original value.\n })\n }\n\nAssertions\n\nAn assertion looks like this, where qt.Equals could be replaced by any\navailable checker. If the assertion fails, the underlying Fatal method is\ncalled to describe the error and abort the test.\n\n c.Assert(someValue, qt.Equals, wantValue)\n\nIf you don’t want to abort on failure, use Check instead, which calls Error\ninstead of Fatal:\n\n c.Check(someValue, qt.Equals, wantValue)\n\nThe library provides some base checkers like Equals, DeepEquals, Matches,\nErrorMatches, IsNil and others. More can be added by implementing the Checker\ninterface. Below, we list the checkers implemented by the package in alphabetical\norder.\n\nAll\n\nAll returns a Checker that uses the given checker to check elements of slice or\narray or the values of a map. It succeeds if all elements pass the check.\nOn failure it prints the error from the first index that failed.\n\nFor example:\n\n\tc.Assert([]int{3, 5, 8}, qt.All(qt.Not(qt.Equals)), 0)\n\tc.Assert([][]string{{\"a\", \"b\"}, {\"a\", \"b\"}}, qt.All(qt.DeepEquals), []string{\"c\", \"d\"})\n\nSee also Any and Contains.\n\nAny\n\nAny returns a Checker that uses the given checker to check elements of a slice\nor array or the values from a map. It succeeds if any element passes the check.\n\nFor example:\n\n\tc.Assert([]int{3,5,7,99}, qt.Any(qt.Equals), 7)\n\tc.Assert([][]string{{\"a\", \"b\"}, {\"c\", \"d\"}}, qt.Any(qt.DeepEquals), []string{\"c\", \"d\"})\n\nSee also All and Contains.\n\nCmpEquals\n\nCmpEquals checks equality of two arbitrary values according to the provided\ncompare options. DeepEquals is more commonly used when no compare options are\nrequired.\n\nExample calls:\n\n c.Assert(list, qt.CmpEquals(cmpopts.SortSlices), []int{42, 47})\n c.Assert(got, qt.CmpEquals(), []int{42, 47}) \/\/ Same as qt.DeepEquals.\n\nCodecEquals\n\nCodecEquals returns a checker that checks for codec value equivalence.\n\n func CodecEquals(\n marshal func(interface{}) ([]byte, error),\n unmarshal func([]byte, interface{}) error,\n opts ...cmp.Option,\n ) Checker\n\nIt expects two arguments: a byte slice or a string containing some\ncodec-marshaled data, and a Go value.\n\nIt uses unmarshal to unmarshal the data into an interface{} value.\nIt marshals the Go value using marshal, then unmarshals the result into\nan interface{} value.\n\nIt then checks that the two interface{} values are deep-equal to one another,\nusing CmpEquals(opts) to perform the check.\n\nSee JSONEquals for an example of this in use.\n\nContains\n\nContains checks that a map, slice, array or string contains a value. It's the\nsame as using Any(Equals), except that it has a special case for strings - if\nthe first argument is a string, the second argument must also be a string and\nstrings.Contains will be used.\n\nFor example:\n\n\tc.Assert(\"hello world\", qt.Contains, \"world\")\n\tc.Assert([]int{3,5,7,99}, qt.Contains, 7)\n\nContentEquals\n\nContentEquals is is like DeepEquals but any slices in the compared values will be sorted before being compared.\n\nFor example:\n\n\tc.Assert([]string{\"c\", \"a\", \"b\"}, qt.ContentEquals, []string{\"a\", \"b\", \"c\"})\n\nDeepEquals\n\nDeepEquals checks that two arbitrary values are deeply equal.\nThe comparison is done using the github.com\/google\/go-cmp\/cmp package.\nWhen comparing structs, by default no exported fields are allowed.\nIf a more sophisticated comparison is required, use CmpEquals (see below).\n\nExample call:\n\n c.Assert(got, qt.DeepEquals, []int{42, 47})\n\nEquals\n\nEquals checks that two values are equal, as compared with Go's == operator.\n\nFor instance:\n\n c.Assert(answer, qt.Equals, 42)\n\nNote that the following will fail:\n\n c.Assert((*sometype)(nil), qt.Equals, nil)\n\nUse the IsNil checker below for this kind of nil check.\n\nErrorMatches\n\nErrorMatches checks that the provided value is an error whose message matches\nthe provided regular expression.\n\nFor instance:\n\n c.Assert(err, qt.ErrorMatches, `bad wolf .*`)\n\nHasLen\n\nHasLen checks that the provided value has the given length.\n\nFor instance:\n\n c.Assert([]int{42, 47}, qt.HasLen, 2)\n c.Assert(myMap, qt.HasLen, 42)\n\nIsFalse\n\nIsFalse checks that the provided value is false.\nThe value must have a boolean underlying type.\n\nFor instance:\n\n c.Assert(false, qt.IsFalse)\n c.Assert(IsValid(), qt.IsFalse)\n\nIsNil\n\nIsNil checks that the provided value is nil.\n\nFor instance:\n\n c.Assert(got, qt.IsNil)\n\nAs a special case, if the value is nil but implements the\nerror interface, it is still considered to be non-nil.\nThis means that IsNil will fail on an error value that happens\nto have an underlying nil value, because that's\ninvariably a mistake. See https:\/\/golang.org\/doc\/faq#nil_error.\n\nSo it's just fine to check an error like this:\n\n c.Assert(err, qt.IsNil)\n\nIsTrue\n\nIsTrue checks that the provided value is true.\nThe value must have a boolean underlying type.\n\nFor instance:\n\n c.Assert(true, qt.IsTrue)\n c.Assert(myBoolean(false), qt.IsTrue)\n\nJSONEquals\n\nJSONEquals checks whether a byte slice or string is JSON-equivalent to a Go\nvalue. See CodecEquals for more information.\n\nIt uses DeepEquals to do the comparison. If a more sophisticated comparison is\nrequired, use CodecEquals directly.\n\nFor instance:\n\n c.Assert(`{\"First\": 47.11}`, qt.JSONEquals, &MyStruct{First: 47.11})\n\nMatches\n\nMatches checks that a string or result of calling the String method\n(if the value implements fmt.Stringer) matches the provided regular expression.\n\nFor instance:\n\n c.Assert(\"these are the voyages\", qt.Matches, `these are .*`)\n c.Assert(net.ParseIP(\"1.2.3.4\"), qt.Matches, `1.*`)\n\nNot\n\nNot returns a Checker negating the given Checker.\n\nFor instance:\n\n c.Assert(got, qt.Not(qt.IsNil))\n c.Assert(answer, qt.Not(qt.Equals), 42)\n\nPanicMatches\n\nPanicMatches checks that the provided function panics with a message matching\nthe provided regular expression.\n\nFor instance:\n\n c.Assert(func() {panic(\"bad wolf ...\")}, qt.PanicMatches, `bad wolf .*`)\n\nSatisfies\n\nSatisfies checks that the provided value, when used as argument of the provided\npredicate function, causes the function to return true. The function must be of\ntype func(T) bool, having got assignable to T.\n\nFor instance:\n\n \/\/ Check that an error from os.Open satisfies os.IsNotExist.\n c.Assert(err, qt.Satisfies, os.IsNotExist)\n\n \/\/ Check that a floating point number is a not-a-number.\n c.Assert(f, qt.Satisfies, math.IsNaN)\n*\/\npackage quicktest\n<|endoftext|>"} {"text":"<commit_before>\/*\nmuts - Go package with utilities to create Make-like files in Go\n\nExample of a make.go\n\n\tfunc main() {\n\t\tflag.Parse()\n\t\tif len(*DeployableVersion) == 0 || len(*BuildNumber) == 0 || len(*BuildDate) == 0 {\n\t\t\tlog.Fatal(\"one of the required flags is missing\")\n\t\t}\n\t\tTasks[\"clean\"] = taskClean\n\t\tTasks[\"build\"] = taskBuild\n\t\tTasks[\"compile\"] = taskCompile\n\t\tTasks[\"compile_tester\"] = taskCompileTester\n\t\tTasks[\"compile_dbtester\"] = taskCompileDBTester\n\t\tTasks[\"compile_composer\"] = taskCompileComposer\n\t\tTasks[\"apidocs\"] = func() { Call(\"tar -zcf .\/target\/swagger.tar.gz swagger\") }\n\t\tTasks[\"dashboard\"] = func() { Call(\"tar -zcf .\/target\/gui.tar.gz dashboard\") }\n\t\tTasks[\"readme\"] = func() { Call(\"cp -v readme.md .\/target\/\") }\n\t\tTasks[\"startsh\"] = func() { Call(\"cp -v start.sh .\/target\/\") }\n\t\tTasks[\"db\"] = taskArchiveDB\n\t\tTasks[\"app\"] = taskArchiveApp\n\t\tTasks[\"standalone\"] = taskStandalone\n\t\tTasks[\"unit\"] = func() { Call(\"godep go test .\/internal\/...\") }\n\t\tTasks[\"local\"] = func() { Call(\"godep go build -o target\/boqs main.go\") }\n\t\tTasks[\"publish\"] = taskPublish\n\t\tRunTasksFromArgs()\n\t}\n\nUse \tit like this\n\n\tgo run make.go -version=1.1.1.1 -buildnumber=42 -builddate=`date +%Y:%m:%d.%H:%M:%S` build\n\n\nSome background\n\nThis package contains a collection of small helper functions to create scripts the easy way.\nMost of the time, shell scripting is fine but soon it can become complex once you need functions,loops and decision trees.\nSo why not use the Go language and its rich SDK to write real programs which can be organized much easier.\n\nIt all started with the Call function that mimics what you would write in a shell script.\nIt can be used both with a single line command and one that is composed of a list of strings.\nThe CallWait version lets you wait for the program to finish or return the process ID for stopping it later.\n\n\tCall(\"zip\", \"-q\", \"-r\", fmt.Sprintf(\"%s\/sql\/boqs-db-%s.zip\", versionDir, *DeployableVersion), \".\")\n\nNext, we added the concept of a simple task (without the dependencies).\nA task is just a no-argument function.\nBy putting these tasks in the global Tasks map, you can execute them just by passing their names to your program:\n\n\tgo run make.go clean build unit\n\nThe last feature to mention is the Workspace variable that refers to the directory in which the program was started.\nTask execution may change this directory (Chdir) so to keep things simpler, the current directory is reset after each task.\n\nMost functions will produce a log entry.\nIf an error occurs then the program exits (calling the Fatalln function).\n\n*\/\npackage muts\n<commit_msg>simplify doc<commit_after>\/*\nmuts - Go package with utilities to create Make-like files in Go\n\nExample of a make.go\n\n\tpackage main\n\n\timport (\n\t\t\"flag\"\n\t\t. \"github.com\/bolcom\/muts\"\n\t)\n\n\tvar BuildNumber = flag.String(\"buildnumber\", \"0\", \"build sequence number\")\n\n\tfunc main() {\n\t\tflag.Parse()\n\t\tTasks[\"clean\"] = taskClean\n\t\tTasks[\"readme\"] = func() { Call(\"cp -v readme.md .\/target\/\") }\n\t\tTasks[\"build\"] = taskBuild\n\t\tRunTasksFromArgs()\n\t}\n\n\tfunc taskClean() { ... }\n\tfunc taskBuild() { ... }\n\nUse\tit like this\n\n\tgo run make.go -buildnumber=42 build\n\n\nSome background\n\nThis package contains a collection of small helper functions to create scripts the easy way.\nMost of the time, shell scripting is fine but soon it can become complex once you need functions,loops and decision trees.\nSo why not use the Go language and its rich SDK to write real programs which can be organized much easier.\n\nIt all started with the Call function that mimics what you would write in a shell script.\nIt can be used both with a single line command and one that is composed of a list of strings.\nThe CallWait version lets you wait for the program to finish or return the process ID for stopping it later.\n\n\tCall(\"zip\", \"-q\", \"-r\", fmt.Sprintf(\"%s\/sql\/boqs-db-%s.zip\", versionDir, *DeployableVersion), \".\")\n\nNext, we added the concept of a simple task (without the dependencies).\nA task is just a no-argument function.\nBy putting these tasks in the global Tasks map, you can execute them just by passing their names to your program:\n\n\tgo run make.go clean build unit\n\nThe last feature to mention is the Workspace variable that refers to the directory in which the program was started.\nTask execution may change this directory (Chdir) so to keep things simpler, the current directory is reset after each task.\n\nMost functions will produce a log entry.\nIf an error occurs then the program exits (calling the Fatalln function).\n\n*\/\npackage muts\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage graph implements functions and interfaces to deal with formal discrete graphs. It aims to be first and foremost flexible, with speed as a strong second priority.\n\nIn this package, graphs are taken to be directed, and undirected graphs are considered to be a special case of directed graphs that happen to have reciprocal edges. Graphs are, by default, unweighted,\nbut functions that require weighted edges have several methods of dealing with this. In order of precedence:\n\n1. These functions have an argument called Cost (and in some cases, HeuristicCost). If this is present, it will always be used to determine the cost between two nodes.\n\n2. These functions will check if your graph implements the Coster (and\/or HeuristicCoster) interface. If this is present, and the Cost (or HeuristicCost) argument is nil, these functions will be used\n\n3. Finally, if no user data is supplied, it will use the functions UniformCost (always returns 1) and\/or NulLHeuristic (always returns 0).\n\nFor information on the specification for Cost functions, please see the Coster interface.\n\nThis package will never modify a graph that is not Mutable (and the interface does not allow it to do so). However, return values are free to be modified, so never pass a reference to your own edge list or node list.\nIt also guarantees that any nodes passed back to the user will be the same nodes returned to it -- that is, it will never take a Node's ID and then wrap the ID in a new struct and return that. You'll always get back your\noriginal data.\n*\/\npackage graph\n<commit_msg>Documentation<commit_after>\/*\nPackage graph implements functions and interfaces to deal with formal discrete graphs. It aims to be first and foremost flexible, with speed as a strong second priority.\n\nIn this package, graphs are taken to be directed, and undirected graphs are considered to be a special case of directed graphs that happen to have reciprocal edges. Graphs are, by default, unweighted,\nbut functions that require weighted edges have several methods of dealing with this. In order of precedence:\n\n1. These functions have an argument called Cost (and in some cases, HeuristicCost). If this is present, it will always be used to determine the cost between two nodes.\n\n2. These functions will check if your graph implements the Coster (and\/or HeuristicCoster) interface. If this is present, and the Cost (or HeuristicCost) argument is nil, these functions will be used\n\n3. Finally, if no user data is supplied, it will use the functions UniformCost (always returns 1) and\/or NulLHeuristic (always returns 0).\n\nFor information on the specification for Cost functions, please see the Coster interface.\n\nFinally, although the functions take in a Graph -- they will always use the correct behavior. If your graph implements DirectedGraph, it will use Successors and Predecessors where applicable,\nif undirected, it will use Neighbors instead. If it implements neither, it will scan the edge list for successors and predecessors where applicable. (This is slow, you should always implement either Directed or Undirected)\n\nThis package will never modify a graph that is not Mutable (and the interface does not allow it to do so). However, return values are free to be modified, so never pass a reference to your own edge list or node list.\nIt also guarantees that any nodes passed back to the user will be the same nodes returned to it -- that is, it will never take a Node's ID and then wrap the ID in a new struct and return that. You'll always get back your\noriginal data.\n*\/\npackage graph\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package image provides libraries and commands to interact with containers images.\n\/\/\n\/\/ package main\n\/\/\n\/\/ import (\n\/\/ \t\"fmt\"\n\/\/\n\/\/ \t\"github.com\/containers\/image\/docker\"\n\/\/ )\n\/\/\n\/\/ func main() {\n\/\/ \tref, err := docker.ParseReference(\"fedora\")\n\/\/ \tif err != nil {\n\/\/ \t\tpanic(err)\n\/\/ \t}\n\/\/ \timg, err := ref.NewImage(nil)\n\/\/ \tif err != nil {\n\/\/ \t\tpanic(err)\n\/\/ \t}\n\/\/ defer img.Close()\n\/\/ \tb, _, err := img.Manifest()\n\/\/ \tif err != nil {\n\/\/ \t\tpanic(err)\n\/\/ \t}\n\/\/ \tfmt.Printf(\"%s\", string(b))\n\/\/ }\n\/\/\n\/\/ TODO(runcom)\npackage image\n<commit_msg>doc.go: Update intro source example<commit_after>\/\/ Package image provides libraries and commands to interact with containers images.\n\/\/\n\/\/ \tpackage main\n\/\/\n\/\/ \timport (\n\/\/ \t\t\"fmt\"\n\/\/\n\/\/ \t\t\"github.com\/containers\/image\/docker\"\n\/\/ \t)\n\/\/\n\/\/ \tfunc main() {\n\/\/ \t\tref, err := docker.ParseReference(\"\/\/fedora\")\n\/\/ \t\tif err != nil {\n\/\/ \t\t\tpanic(err)\n\/\/ \t\t}\n\/\/ \t\timg, err := ref.NewImage(nil)\n\/\/ \t\tif err != nil {\n\/\/ \t\t\tpanic(err)\n\/\/ \t\t}\n\/\/ \t\tdefer img.Close()\n\/\/ \t\tb, _, err := img.Manifest()\n\/\/ \t\tif err != nil {\n\/\/ \t\t\tpanic(err)\n\/\/ \t\t}\n\/\/ \t\tfmt.Printf(\"%s\", string(b))\n\/\/ \t}\n\/\/\n\/\/ TODO(runcom)\npackage image\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package pgx is a PostgreSQL database driver.\n\/*\npgx provides lower level access to PostgreSQL than the standard database\/sql. It remains as similar to the database\/sql\ninterface as possible while providing better speed and access to PostgreSQL specific features. Import\ngithub.com\/jackc\/pgx\/v4\/stdlib to use pgx as a database\/sql compatible driver.\n\nEstablishing a Connection\n\nThe primary way of establishing a connection is with `pgx.Connect`.\n\n conn, err := pgx.Connect(context.Background(), os.Getenv(\"DATABASE_URL\"))\n\nThe database connection string can be in URL or DSN format. Both PostgreSQL settings and pgx settings can be specified\nhere. In addition, a config struct can be created by `ParseConfig` and modified before establishing the connection with\n`ConnectConfig`.\n\n config, err := pgx.ParseConfig(os.Getenv(\"DATABASE_URL\"))\n if err != nil {\n \/\/ ...\n }\n config.Logger = log15adapter.NewLogger(log.New(\"module\", \"pgx\"))\n\n conn, err := pgx.ConnectConfig(context.Background(), config)\n\nQuery Interface\n\npgx implements Query and Scan in the familiar database\/sql style.\n\n var sum int32\n\n \/\/ Send the query to the server. The returned rows MUST be closed\n \/\/ before conn can be used again.\n rows, err := conn.Query(context.Background(), \"select generate_series(1,$1)\", 10)\n if err != nil {\n return err\n }\n\n \/\/ rows.Close is called by rows.Next when all rows are read\n \/\/ or an error occurs in Next or Scan. So it may optionally be\n \/\/ omitted if nothing in the rows.Next loop can panic. It is\n \/\/ safe to close rows multiple times.\n defer rows.Close()\n\n \/\/ Iterate through the result set\n for rows.Next() {\n var n int32\n err = rows.Scan(&n)\n if err != nil {\n return err\n }\n sum += n\n }\n\n \/\/ Any errors encountered by rows.Next or rows.Scan will be returned here\n if rows.Err() != nil {\n return rows.Err()\n }\n\n \/\/ No errors found - do something with sum\n\npgx also implements QueryRow in the same style as database\/sql.\n\n var name string\n var weight int64\n err := conn.QueryRow(context.Background(), \"select name, weight from widgets where id=$1\", 42).Scan(&name, &weight)\n if err != nil {\n return err\n }\n\nUse Exec to execute a query that does not return a result set.\n\n commandTag, err := conn.Exec(context.Background(), \"delete from widgets where id=$1\", 42)\n if err != nil {\n return err\n }\n if commandTag.RowsAffected() != 1 {\n return errors.New(\"No row found to delete\")\n }\n\nConnection Pool\n\nSee sub-package pgxpool for a connection pool.\n\nBase Type Mapping\n\npgx maps between all common base types directly between Go and PostgreSQL. In particular:\n\n Go PostgreSQL\n -----------------------\n string varchar\n text\n\n \/\/ Integers are automatically be converted to any other integer type if\n \/\/ it can be done without overflow or underflow.\n int8\n int16 smallint\n int32 int\n int64 bigint\n int\n uint8\n uint16\n uint32\n uint64\n uint\n\n \/\/ Floats are strict and do not automatically convert like integers.\n float32 float4\n float64 float8\n\n time.Time date\n timestamp\n timestamptz\n\n []byte bytea\n\n\nNull Mapping\n\npgx can map nulls in two ways. The first is package pgtype provides types that have a data field and a status field.\nThey work in a similar fashion to database\/sql. The second is to use a pointer to a pointer.\n\n var foo pgtype.Varchar\n var bar *string\n err := conn.QueryRow(\"select foo, bar from widgets where id=$1\", 42).Scan(&foo, &bar)\n if err != nil {\n return err\n }\n\nArray Mapping\n\npgx maps between int16, int32, int64, float32, float64, and string Go slices and the equivalent PostgreSQL array type.\nGo slices of native types do not support nulls, so if a PostgreSQL array that contains a null is read into a native Go\nslice an error will occur. The pgtype package includes many more array types for PostgreSQL types that do not directly\nmap to native Go types.\n\nJSON and JSONB Mapping\n\npgx includes built-in support to marshal and unmarshal between Go types and the PostgreSQL JSON and JSONB.\n\nInet and CIDR Mapping\n\npgx encodes from net.IPNet to and from inet and cidr PostgreSQL types. In addition, as a convenience pgx will encode\nfrom a net.IP; it will assume a \/32 netmask for IPv4 and a \/128 for IPv6.\n\nCustom Type Support\n\npgx includes support for the common data types like integers, floats, strings, dates, and times that have direct\nmappings between Go and SQL. In addition, pgx uses the github.com\/jackc\/pgx\/pgtype library to support more types. See\ndocumention for that library for instructions on how to implement custom types.\n\nSee example_custom_type_test.go for an example of a custom type for the PostgreSQL point type.\n\npgx also includes support for custom types implementing the database\/sql.Scanner and database\/sql\/driver.Valuer\ninterfaces.\n\nIf pgx does cannot natively encode a type and that type is a renamed type (e.g. type MyTime time.Time) pgx will attempt\nto encode the underlying type. While this is usually desired behavior it can produce surprising behavior if one the\nunderlying type and the renamed type each implement database\/sql interfaces and the other implements pgx interfaces. It\nis recommended that this situation be avoided by implementing pgx interfaces on the renamed type.\n\nComposite types and row values\n\nRow values and composite types are represented as pgtype.Record (https:\/\/pkg.go.dev\/github.com\/jackc\/pgtype?tab=doc#Record).\nIt is possible to get values of your custom type by implementing DecodeBinary interface. Decoding into\npgtype.Record first can simplify process by avoiding dealing with raw protocol directly.\n\nFor example:\n\n type MyType struct {\n a int \/\/ NULL will cause decoding error\n b *string \/\/ there can be NULL in this position in SQL\n }\n\n func (t *MyType) DecodeBinary(ci *pgtype.ConnInfo, src []byte) error {\n r := pgtype.Record{\n Fields: []pgtype.Value{&pgtype.Int4{}, &pgtype.Text{}},\n }\n\n if err := r.DecodeBinary(ci, src); err != nil {\n return err\n }\n\n if r.Status != pgtype.Present {\n return errors.New(\"BUG: decoding should not be called on NULL value\")\n }\n\n a := r.Fields[0].(*pgtype.Int4)\n b := r.Fields[1].(*pgtype.Text)\n\n \/\/ type compatibility is checked by AssignTo\n \/\/ only lossless assignments will succeed\n if err := a.AssignTo(&t.a); err != nil {\n return err\n }\n\n \/\/ AssignTo also deals with null value handling\n if err := b.AssignTo(&t.b); err != nil {\n return err\n }\n return nil\n }\n\n result := MyType{}\n err := conn.QueryRow(context.Background(), \"select row(1, 'foo'::text)\", pgx.QueryResultFormats{pgx.BinaryFormatCode}).Scan(&r)\n\nRaw Bytes Mapping\n\n[]byte passed as arguments to Query, QueryRow, and Exec are passed unmodified to PostgreSQL.\n\nTransactions\n\nTransactions are started by calling Begin.\n\n tx, err := conn.Begin(context.Background())\n if err != nil {\n return err\n }\n \/\/ Rollback is safe to call even if the tx is already closed, so if\n \/\/ the tx commits successfully, this is a no-op\n defer tx.Rollback(context.Background())\n\n _, err = tx.Exec(context.Background(), \"insert into foo(id) values (1)\")\n if err != nil {\n return err\n }\n\n err = tx.Commit(context.Background())\n if err != nil {\n return err\n }\n\nThe Tx returned from Begin also implements the Begin method. This can be used to implement pseudo nested transactions.\nThese are internally implemented with savepoints.\n\nUse BeginTx to control the transaction mode.\n\nPrepared Statements\n\nPrepared statements can be manually created with the Prepare method. However, this is rarely necessary because pgx\nincludes an automatic statement cache by default. Queries run through the normal Query, QueryRow, and Exec functions are\nautomatically prepared on first execution and the prepared statement is reused on subsequent executions. See ParseConfig\nfor information on how to customize or disable the statement cache.\n\nCopy Protocol\n\nUse CopyFrom to efficiently insert multiple rows at a time using the PostgreSQL copy protocol. CopyFrom accepts a\nCopyFromSource interface. If the data is already in a [][]interface{} use CopyFromRows to wrap it in a CopyFromSource\ninterface. Or implement CopyFromSource to avoid buffering the entire data set in memory.\n\n rows := [][]interface{}{\n {\"John\", \"Smith\", int32(36)},\n {\"Jane\", \"Doe\", int32(29)},\n }\n\n copyCount, err := conn.CopyFrom(\n context.Background(),\n pgx.Identifier{\"people\"},\n []string{\"first_name\", \"last_name\", \"age\"},\n pgx.CopyFromRows(rows),\n )\n\nCopyFrom can be faster than an insert with as few as 5 rows.\n\nListen and Notify\n\npgx can listen to the PostgreSQL notification system with the `Conn.WaitForNotification` method. It blocks until a\ncontext is received or the context is canceled.\n\n _, err := conn.Exec(context.Background(), \"listen channelname\")\n if err != nil {\n return nil\n }\n\n if notification, err := conn.WaitForNotification(context.Background()); err != nil {\n \/\/ do something with notification\n }\n\n\nLogging\n\npgx defines a simple logger interface. Connections optionally accept a logger that satisfies this interface. Set\nLogLevel to control logging verbosity. Adapters for github.com\/inconshreveable\/log15, github.com\/sirupsen\/logrus,\ngo.uber.org\/zap, github.com\/rs\/zerolog, and the testing log are provided in the log directory.\n\nLower Level PostgreSQL Functionality\n\npgx is implemented on top of github.com\/jackc\/pgconn a lower level PostgreSQL driver. The Conn.PgConn() method can be\nused to access this lower layer.\n\nPgBouncer\n\npgx is compatible with PgBouncer in two modes. One is when the connection has a statement cache in \"describe\" mode. The\nother is when the connection is using the simple protocol. This can be set with the PreferSimpleProtocol config option.\n*\/\npackage pgx\n<commit_msg>Tweak docs to make conn \/ conn pool distinction clearer<commit_after>\/\/ Package pgx is a PostgreSQL database driver.\n\/*\npgx provides lower level access to PostgreSQL than the standard database\/sql. It remains as similar to the database\/sql\ninterface as possible while providing better speed and access to PostgreSQL specific features. Import\ngithub.com\/jackc\/pgx\/v4\/stdlib to use pgx as a database\/sql compatible driver.\n\nEstablishing a Connection\n\nThe primary way of establishing a connection is with `pgx.Connect`.\n\n conn, err := pgx.Connect(context.Background(), os.Getenv(\"DATABASE_URL\"))\n\nThe database connection string can be in URL or DSN format. Both PostgreSQL settings and pgx settings can be specified\nhere. In addition, a config struct can be created by `ParseConfig` and modified before establishing the connection with\n`ConnectConfig`.\n\n config, err := pgx.ParseConfig(os.Getenv(\"DATABASE_URL\"))\n if err != nil {\n \/\/ ...\n }\n config.Logger = log15adapter.NewLogger(log.New(\"module\", \"pgx\"))\n\n conn, err := pgx.ConnectConfig(context.Background(), config)\n\nConnection Pool\n\n`*pgx.Conn` represents a single connection to the database and is not concurrency safe. Use sub-package pgxpool for a\nconcurrency safe connection pool.\n\nQuery Interface\n\npgx implements Query and Scan in the familiar database\/sql style.\n\n var sum int32\n\n \/\/ Send the query to the server. The returned rows MUST be closed\n \/\/ before conn can be used again.\n rows, err := conn.Query(context.Background(), \"select generate_series(1,$1)\", 10)\n if err != nil {\n return err\n }\n\n \/\/ rows.Close is called by rows.Next when all rows are read\n \/\/ or an error occurs in Next or Scan. So it may optionally be\n \/\/ omitted if nothing in the rows.Next loop can panic. It is\n \/\/ safe to close rows multiple times.\n defer rows.Close()\n\n \/\/ Iterate through the result set\n for rows.Next() {\n var n int32\n err = rows.Scan(&n)\n if err != nil {\n return err\n }\n sum += n\n }\n\n \/\/ Any errors encountered by rows.Next or rows.Scan will be returned here\n if rows.Err() != nil {\n return rows.Err()\n }\n\n \/\/ No errors found - do something with sum\n\npgx also implements QueryRow in the same style as database\/sql.\n\n var name string\n var weight int64\n err := conn.QueryRow(context.Background(), \"select name, weight from widgets where id=$1\", 42).Scan(&name, &weight)\n if err != nil {\n return err\n }\n\nUse Exec to execute a query that does not return a result set.\n\n commandTag, err := conn.Exec(context.Background(), \"delete from widgets where id=$1\", 42)\n if err != nil {\n return err\n }\n if commandTag.RowsAffected() != 1 {\n return errors.New(\"No row found to delete\")\n }\n\nBase Type Mapping\n\npgx maps between all common base types directly between Go and PostgreSQL. In particular:\n\n Go PostgreSQL\n -----------------------\n string varchar\n text\n\n \/\/ Integers are automatically be converted to any other integer type if\n \/\/ it can be done without overflow or underflow.\n int8\n int16 smallint\n int32 int\n int64 bigint\n int\n uint8\n uint16\n uint32\n uint64\n uint\n\n \/\/ Floats are strict and do not automatically convert like integers.\n float32 float4\n float64 float8\n\n time.Time date\n timestamp\n timestamptz\n\n []byte bytea\n\n\nNull Mapping\n\npgx can map nulls in two ways. The first is package pgtype provides types that have a data field and a status field.\nThey work in a similar fashion to database\/sql. The second is to use a pointer to a pointer.\n\n var foo pgtype.Varchar\n var bar *string\n err := conn.QueryRow(\"select foo, bar from widgets where id=$1\", 42).Scan(&foo, &bar)\n if err != nil {\n return err\n }\n\nArray Mapping\n\npgx maps between int16, int32, int64, float32, float64, and string Go slices and the equivalent PostgreSQL array type.\nGo slices of native types do not support nulls, so if a PostgreSQL array that contains a null is read into a native Go\nslice an error will occur. The pgtype package includes many more array types for PostgreSQL types that do not directly\nmap to native Go types.\n\nJSON and JSONB Mapping\n\npgx includes built-in support to marshal and unmarshal between Go types and the PostgreSQL JSON and JSONB.\n\nInet and CIDR Mapping\n\npgx encodes from net.IPNet to and from inet and cidr PostgreSQL types. In addition, as a convenience pgx will encode\nfrom a net.IP; it will assume a \/32 netmask for IPv4 and a \/128 for IPv6.\n\nCustom Type Support\n\npgx includes support for the common data types like integers, floats, strings, dates, and times that have direct\nmappings between Go and SQL. In addition, pgx uses the github.com\/jackc\/pgx\/pgtype library to support more types. See\ndocumention for that library for instructions on how to implement custom types.\n\nSee example_custom_type_test.go for an example of a custom type for the PostgreSQL point type.\n\npgx also includes support for custom types implementing the database\/sql.Scanner and database\/sql\/driver.Valuer\ninterfaces.\n\nIf pgx does cannot natively encode a type and that type is a renamed type (e.g. type MyTime time.Time) pgx will attempt\nto encode the underlying type. While this is usually desired behavior it can produce surprising behavior if one the\nunderlying type and the renamed type each implement database\/sql interfaces and the other implements pgx interfaces. It\nis recommended that this situation be avoided by implementing pgx interfaces on the renamed type.\n\nComposite types and row values\n\nRow values and composite types are represented as pgtype.Record (https:\/\/pkg.go.dev\/github.com\/jackc\/pgtype?tab=doc#Record).\nIt is possible to get values of your custom type by implementing DecodeBinary interface. Decoding into\npgtype.Record first can simplify process by avoiding dealing with raw protocol directly.\n\nFor example:\n\n type MyType struct {\n a int \/\/ NULL will cause decoding error\n b *string \/\/ there can be NULL in this position in SQL\n }\n\n func (t *MyType) DecodeBinary(ci *pgtype.ConnInfo, src []byte) error {\n r := pgtype.Record{\n Fields: []pgtype.Value{&pgtype.Int4{}, &pgtype.Text{}},\n }\n\n if err := r.DecodeBinary(ci, src); err != nil {\n return err\n }\n\n if r.Status != pgtype.Present {\n return errors.New(\"BUG: decoding should not be called on NULL value\")\n }\n\n a := r.Fields[0].(*pgtype.Int4)\n b := r.Fields[1].(*pgtype.Text)\n\n \/\/ type compatibility is checked by AssignTo\n \/\/ only lossless assignments will succeed\n if err := a.AssignTo(&t.a); err != nil {\n return err\n }\n\n \/\/ AssignTo also deals with null value handling\n if err := b.AssignTo(&t.b); err != nil {\n return err\n }\n return nil\n }\n\n result := MyType{}\n err := conn.QueryRow(context.Background(), \"select row(1, 'foo'::text)\", pgx.QueryResultFormats{pgx.BinaryFormatCode}).Scan(&r)\n\nRaw Bytes Mapping\n\n[]byte passed as arguments to Query, QueryRow, and Exec are passed unmodified to PostgreSQL.\n\nTransactions\n\nTransactions are started by calling Begin.\n\n tx, err := conn.Begin(context.Background())\n if err != nil {\n return err\n }\n \/\/ Rollback is safe to call even if the tx is already closed, so if\n \/\/ the tx commits successfully, this is a no-op\n defer tx.Rollback(context.Background())\n\n _, err = tx.Exec(context.Background(), \"insert into foo(id) values (1)\")\n if err != nil {\n return err\n }\n\n err = tx.Commit(context.Background())\n if err != nil {\n return err\n }\n\nThe Tx returned from Begin also implements the Begin method. This can be used to implement pseudo nested transactions.\nThese are internally implemented with savepoints.\n\nUse BeginTx to control the transaction mode.\n\nPrepared Statements\n\nPrepared statements can be manually created with the Prepare method. However, this is rarely necessary because pgx\nincludes an automatic statement cache by default. Queries run through the normal Query, QueryRow, and Exec functions are\nautomatically prepared on first execution and the prepared statement is reused on subsequent executions. See ParseConfig\nfor information on how to customize or disable the statement cache.\n\nCopy Protocol\n\nUse CopyFrom to efficiently insert multiple rows at a time using the PostgreSQL copy protocol. CopyFrom accepts a\nCopyFromSource interface. If the data is already in a [][]interface{} use CopyFromRows to wrap it in a CopyFromSource\ninterface. Or implement CopyFromSource to avoid buffering the entire data set in memory.\n\n rows := [][]interface{}{\n {\"John\", \"Smith\", int32(36)},\n {\"Jane\", \"Doe\", int32(29)},\n }\n\n copyCount, err := conn.CopyFrom(\n context.Background(),\n pgx.Identifier{\"people\"},\n []string{\"first_name\", \"last_name\", \"age\"},\n pgx.CopyFromRows(rows),\n )\n\nCopyFrom can be faster than an insert with as few as 5 rows.\n\nListen and Notify\n\npgx can listen to the PostgreSQL notification system with the `Conn.WaitForNotification` method. It blocks until a\ncontext is received or the context is canceled.\n\n _, err := conn.Exec(context.Background(), \"listen channelname\")\n if err != nil {\n return nil\n }\n\n if notification, err := conn.WaitForNotification(context.Background()); err != nil {\n \/\/ do something with notification\n }\n\n\nLogging\n\npgx defines a simple logger interface. Connections optionally accept a logger that satisfies this interface. Set\nLogLevel to control logging verbosity. Adapters for github.com\/inconshreveable\/log15, github.com\/sirupsen\/logrus,\ngo.uber.org\/zap, github.com\/rs\/zerolog, and the testing log are provided in the log directory.\n\nLower Level PostgreSQL Functionality\n\npgx is implemented on top of github.com\/jackc\/pgconn a lower level PostgreSQL driver. The Conn.PgConn() method can be\nused to access this lower layer.\n\nPgBouncer\n\npgx is compatible with PgBouncer in two modes. One is when the connection has a statement cache in \"describe\" mode. The\nother is when the connection is using the simple protocol. This can be set with the PreferSimpleProtocol config option.\n*\/\npackage pgx\n<|endoftext|>"} {"text":"<commit_before>\/*\nCommand pigeon generates parsers in Go from a PEG grammar.\n\nFrom Wikipedia [0]:\n\n\tA parsing expression grammar is a type of analytic formal grammar, i.e.\n\tit describes a formal language in terms of a set of rules for recognizing\n\tstrings in the language.\n\nIts features and syntax are inspired by the PEG.js project [1], while\nthe implementation is loosely based on [2]. Formal presentation of the\nPEG theory by Bryan Ford is also an important reference [3].\n\n\t[0]: http:\/\/en.wikipedia.org\/wiki\/Parsing_expression_grammar\n\t[1]: http:\/\/pegjs.org\/\n\t[2]: http:\/\/www.codeproject.com\/Articles\/29713\/Parsing-Expression-Grammar-Support-for-C-Part\n\t[3]: http:\/\/pdos.csail.mit.edu\/~baford\/packrat\/popl04\/peg-popl04.pdf\n\nCommand-line usage\n\nThe pigeon tool must be called with a PEG grammar file as defined\nby the accepted PEG syntax below. The grammar may be provided by a\nfile or read from stdin. The generated parser is written to stdout\nby default.\n\n\tpigeon [options] [GRAMMAR_FILE]\n\nThe following options can be specified:\n\n\t-cache : cache parser results to avoid exponential parsing time in\n\tpathological cases. Can make the parsing slower for typical\n\tcases and uses more memory (default: false).\n\n\t-debug : boolean, print debugging info to stdout (default: false).\n\n\t-o=FILE : string, output file where the generated parser will be\n\twritten (default: stdout).\n\n\t-x : boolean, if set, do not build the parser, just parse the input grammar\n\t(default: false).\n\n\t-receiver-name=NAME : string, name of the receiver variable for the generated\n\tcode blocks. Non-initializer code blocks in the grammar end up as methods on the\n\t*current type, and this option sets the name of the receiver (default: c).\n\nThe tool makes no attempt to format the code, nor to detect the\nrequired imports. It is recommended to use goimports to properly generate\nthe output code:\n\tpigeon GRAMMAR_FILE | goimports > output_file.go\n\nThe goimports tool can be installed with:\n\tgo get golang.org\/x\/tools\/cmd\/goimports\n\nIf the code blocks in the grammar are golint- and go vet-compliant, then\nthe resulting generated code will also be golint- and go vet-compliant.\n\nThe generated code doesn't use any third-party dependency unless code blocks\nin the grammar require such a dependency.\n\nPEG syntax\n\nThe accepted syntax for the grammar is formally defined in the\ngrammar\/pigeon.peg file, using the PEG syntax. What follows is an informal\ndescription of this syntax.\n\nIdentifiers, whitespace, comments and literals follow the same\nnotation as the Go language, as defined in the language specification\n(http:\/\/golang.org\/ref\/spec#Source_code_representation):\n\n\t\/\/ single line comment*\/\n\/\/\t\/* multi-line comment *\/\n\/*\t'x' (single quotes for single char literal)\n\t\"double quotes for string literal\"\n\t`backtick quotes for raw string literal`\n\tRuleName (a valid identifier)\n\nThe grammar must be Unicode text encoded in UTF-8. New lines are identified\nby the \\n character (U+000A). Space (U+0020), horizontal tabs (U+0009) and\ncarriage returns (U+000D) are considered whitespace and are ignored except\nto separate tokens.\n\nRules\n\nA PEG grammar consists of a set of rules. A rule is an identifier followed\nby a rule definition operator and an expression. An optional display name -\na string literal used in error messages instead of the rule identifier - can\nbe specified after the rule identifier. E.g.:\n\tRuleA \"friendly name\" = 'a'+ \/\/ RuleA is one or more lowercase 'a's\n\nThe rule definition operator can be any one of those:\n\t=, <-, ← (U+2190), ⟵ (U+27F5)\n\nExpressions\n\nA rule is defined by an expression. The following sections describe the\nvarious expression types. Expressions can be grouped by using parentheses,\nand a rule can be referenced by its identifier in place of an expression.\n\nChoice expression\n\nThe choice expression is a list of expressions that will be tested in the\norder they are defined. The first one that matches will be used. Expressions\nare separated by the forward slash character \"\/\". E.g.:\n\tChoiceExpr = A \/ B \/ C \/\/ A, B and C should be rules declared in the grammar\n\nBecause the first match is used, it is important to think about the order\nof expressions. For example, in this rule, \"<=\" would never be used because\nthe \"<\" expression comes first:\n\tBadChoiceExpr = \"<\" \/ \"<=\"\n\nSequence expression\n\nThe sequence expression is a list of expressions that must all match in\nthat same order for the sequence expression to be considered a match.\nExpressions are separated by whitespace. E.g.:\n\tSeqExpr = \"A\" \"b\" \"c\" \/\/ matches \"Abc\", but not \"Acb\"\n\nLabeled expression\n\nA labeled expression consists of an identifier followed by a colon \":\"\nand an expression. A labeled expression introduces a variable named with\nthe label that can be referenced in the expression's code block.\nThe variable will have the value of the expression that follows the colon.\nE.g.:\n\tLabeledExpr = value:[a-z]+ {\n\t\tfmt.Println(value)\n\t\treturn value, nil\n\t}\n\nAnd and not expressions\n\nAn expression prefixed with the ampersand \"&\" is the \"and\" predicate\nexpression: it is considered a match if the following expression is a match,\nbut it does not consume any input.\n\nAn expression prefixed with the exclamation point \"!\" is the \"not\" predicate\nexpression: it is considered a match if the following expression is not\na match, but it does not consume any input. E.g.:\n\tAndExpr = \"A\" &\"B\" \/\/ matches \"A\" if followed by a \"B\" (does not consume \"B\")\n\tNotExpr = \"A\" !\"B\" \/\/ matches \"A\" if not followed by a \"B\" (does not consume \"B\")\n\nThe expression following the & and ! operators can be a code block. In that\ncase, the code block must return a bool and an error. The operator's semantic\nis the same, & is a match if the code block returns true, ! is a match if the\ncode block returns false. The code block has access to any labeled value\ndefined in its scope. E.g.:\n\tCodeAndExpr = value:[a-z] &{\n\t\t\/\/ can access the value local variable...\n\t\treturn true, nil\n\t}\n\nRepeating expressions\n\nAn expression followed by \"*\", \"?\" or \"+\" is a match if the expression\noccurs zero or more times (\"*\"), zero or one time \"?\" or one or more times\n(\"+\") respectively. The match is greedy, it will match as many times as\npossible. E.g.\n\tZeroOrMoreAs = \"A\"*\n\nLiteral matcher\n\nA literal matcher tries to match the input against a single character or a\nstring literal. The literal may be a single-quoted single character, a\ndouble-quoted string or a backtick-quoted raw string. The same rules as in Go\napply regarding the allowed characters and escapes.\n\nThe literal may be followed by a lowercase \"i\" (outside the ending quote)\nto indicate that the match is case-insensitive. E.g.:\n\tLiteralMatch = \"Awesome\\n\"i \/\/ matches \"awesome\" followed by a newline\n\nCharacter class matcher\n\nA character class matcher tries to match the input against a class of characters\ninside square brackets \"[...]\". Inside the brackets, characters represent\nthemselves and the same escapes as in string literals are available, except\nthat the single- and double-quote escape is not valid, instead the closing\nsquare bracket \"]\" must be escaped to be used.\n\nCharacter ranges can be specified using the \"[a-z]\" notation. Unicode\nclasses can be specified using the \"[\\pL]\" notation, where L is a\nsingle-letter Unicode class of characters, or using the \"[\\p{Class}]\"\nnotation where Class is a valid Unicode class (e.g. \"Latin\").\n\nAs for string literals, a lowercase \"i\" may follow the matcher (outside\nthe ending square bracket) to indicate that the match is case-insensitive.\nA \"^\" as first character inside the square brackets indicates that the match\nis inverted (it is a match if the input does not match the character class\nmatcher). E.g.:\n\tNotAZ = [^a-z]i\n\nAny matcher\n\nThe any matcher is represented by the dot \".\". It matches any character\nexcept the end of file, thus the \"!.\" expression is used to indicate \"match\nthe end of file\". E.g.:\n\tAnyChar = . \/\/ match a single character\n\tEOF = !.\n\nCode block\n\nCode blocks can be added to generate custom Go code. There are three kinds\nof code blocks: the initializer, the action and the predicate. All code blocks\nappear inside curly braces \"{...}\".\n\nThe initializer must appear first in the grammar, before any rule. It is\ncopied as-is (minus the wrapping curly braces) at the top of the generated\nparser. It may contain function declarations, types, variables, etc. just\nlike any Go file. Every symbol declared here will be available to all other\ncode blocks. Although the initializer is optional in a valid grammar, it is\nusually required to generate a valid Go source code file (for the package\nclause). E.g.:\n\t{\n\t\tpackage main\n\n\t\tfunc someHelper() {\n\t\t\t\/\/ ...\n\t\t}\n\t}\n\nAction code blocks are code blocks declared after an expression in a rule.\nThose code blocks are turned into a method on the \"*current\" type in the\ngenerated source code. The method receives any labeled expression's value\nas argument (as interface{}) and must return two values, the first being\nthe value of the expression (an interface{}), and the second an error.\nIf a non-nil error is returned, it is added to the list of errors that the\nparser will return. E.g.:\n\tRuleA = \"A\"+ {\n\t\t\/\/ return the matched string, \"c\" is the default name for\n\t\t\/\/ the *current receiver variable.\n\t\treturn string(c.text), nil\n\t}\n\nPredicate code blocks are code blocks declared immediately after the and \"&\"\nor the not \"!\" operators. Like action code blocks, predicate code blocks\nare turned into a method on the \"*current\" type in the generated source code.\nThe method receives any labeled expression's value as argument (as interface{})\nand must return two values, the first being a bool and the second an error.\nIf a non-nil error is returned, it is added to the list of errors that the\nparser will return. E.g.:\n\tRuleAB = [ab]i+ &{\n\t\treturn true, nil\n\t}\n\nThe current type is a struct that provides two useful fields that can be\naccessed in action and predicate code blocks: \"pos\" and \"text\".\n\nThe \"pos\" field indicates the current position of the parser in the source\ninput. It is itself a struct with three fields: \"line\", \"col\" and \"offset\".\nLine is a 1-based line number, col is a 1-based column number that counts\nrunes from the start of the line, and offset is a 0-based byte offset.\n\nThe \"text\" field is the slice of bytes of the current match. It is empty\nin a predicate code block.\n\nUsing the generated parser\n\nThe parser generated by pigeon exports a few symbols so that it can be used\nas a package with public functions to parse input text. The exported API is:\n\t- Parse(string, []byte) (interface{}, error)\n\t- ParseFile(string) (interface{}, error)\n\t- ParseReader(string, io.Reader) (interface{}, error)\n\nSee the godoc page of the generated parser for the test\/predicates grammar\nfor an example documentation page of the exported API:\nhttp:\/\/godoc.org\/github.com\/PuerkitoBio\/pigeon\/test\/predicates.\n\nLike the grammar used to generate the parser, the input text must be\nUTF-8-encoded Unicode.\n\nThe start rule of the parser is the first rule in the PEG grammar used\nto generate the parser. A call to any of the Parse* functions returns\nthe value generated by executing the grammar on the provided input text,\nand an optional error.\n\nTypically, the grammar should generate some kind of abstract syntax tree (AST),\nbut for simple grammars it may evaluate the result immediately, such as in\nthe examples\/calculator example. There are no constraints imposed on the\nauthor of the grammar, it can return whatever is needed.\n\nError reporting\n\nWhen the parser returns a non-nil error, the error is always of type errList,\nwhich is defined as a slice of errors ([]error). Each error in the list is\nof type *parserError. This is a struct that has an \"Inner\" field that can be\nused to access the original error.\n\nSo if a code block returns some well-known error like:\n\t{\n\t\treturn nil, io.EOF\n\t}\n\nThe original error can be accessed this way:\n\t_, err := ParseFile(\"some_file\")\n\tif err != nil {\n\t\tlist := err.(errList)\n\t\tfor _, err := range list {\n\t\t\tpe := err.(*parserError)\n\t\t\tif pe.Inner == io.EOF {\n\t\t\t\t\/\/ ...\n\t\t\t}\n\t\t}\n\t}\n\nBy defaut the parser will continue after an error is returned and will\ncumulate all errors found during parsing. If the grammar reaches a point\nwhere it shouldn't continue, a panic statement can be used to terminate\nparsing. The panic will be caught at the top-level of the Parse* call\nand will be converted into a *parserError like any error, and an errList\nwill still be returned to the caller.\n\nThe divide by zero error in the examples\/calculator grammar leverages this\nfeature (no special code is needed to handle division by zero, if it\nhappens, the runtime panics and it is recovered and returned as a parsing\nerror).\n\nProviding good error reporting in a parser is not a trivial task. Part\nof it is provided by the pigeon tool, by offering features such as\nfilename, position and rule name in the error message, but an\nimportant part of good error reporting needs to be done by the grammar\nauthor.\n\nFor example, many programming languages use double-quotes for string literals.\nUsually, if the opening quote is found, the closing quote is expected, and if\nnone is found, there won't be any other rule that will match, there's no need\nto backtrack and try other choices, an error should be added to the list\nand the match should be consumed.\n\nIn order to do this, the grammar can look something like this:\n\n\tStringLiteral = '\"' ValidStringChar* '\"' {\n\t\t\/\/ this is the valid case, build string literal node\n\t\t\/\/ node = ...\n\t\treturn node, nil\n\t} \/ '\"' ValidStringChar* !'\"' {\n\t\t\/\/ invalid case, build a replacement string literal node or build a BadNode\n\t\t\/\/ node = ...\n\t\treturn node, errors.New(\"string literal not terminated\")\n\t}\n\nThis is just one example, but it illustrates the idea that error reporting\nneeds to be thought out when designing the grammar.\n\n*\/\npackage main\n<commit_msg>doc: document new Option API<commit_after>\/*\nCommand pigeon generates parsers in Go from a PEG grammar.\n\nFrom Wikipedia [0]:\n\n\tA parsing expression grammar is a type of analytic formal grammar, i.e.\n\tit describes a formal language in terms of a set of rules for recognizing\n\tstrings in the language.\n\nIts features and syntax are inspired by the PEG.js project [1], while\nthe implementation is loosely based on [2]. Formal presentation of the\nPEG theory by Bryan Ford is also an important reference [3].\n\n\t[0]: http:\/\/en.wikipedia.org\/wiki\/Parsing_expression_grammar\n\t[1]: http:\/\/pegjs.org\/\n\t[2]: http:\/\/www.codeproject.com\/Articles\/29713\/Parsing-Expression-Grammar-Support-for-C-Part\n\t[3]: http:\/\/pdos.csail.mit.edu\/~baford\/packrat\/popl04\/peg-popl04.pdf\n\nCommand-line usage\n\nThe pigeon tool must be called with a PEG grammar file as defined\nby the accepted PEG syntax below. The grammar may be provided by a\nfile or read from stdin. The generated parser is written to stdout\nby default.\n\n\tpigeon [options] [GRAMMAR_FILE]\n\nThe following options can be specified:\n\n\t-cache : cache parser results to avoid exponential parsing time in\n\tpathological cases. Can make the parsing slower for typical\n\tcases and uses more memory (default: false).\n\n\t-debug : boolean, print debugging info to stdout (default: false).\n\n\t-o=FILE : string, output file where the generated parser will be\n\twritten (default: stdout).\n\n\t-x : boolean, if set, do not build the parser, just parse the input grammar\n\t(default: false).\n\n\t-receiver-name=NAME : string, name of the receiver variable for the generated\n\tcode blocks. Non-initializer code blocks in the grammar end up as methods on the\n\t*current type, and this option sets the name of the receiver (default: c).\n\nThe tool makes no attempt to format the code, nor to detect the\nrequired imports. It is recommended to use goimports to properly generate\nthe output code:\n\tpigeon GRAMMAR_FILE | goimports > output_file.go\n\nThe goimports tool can be installed with:\n\tgo get golang.org\/x\/tools\/cmd\/goimports\n\nIf the code blocks in the grammar are golint- and go vet-compliant, then\nthe resulting generated code will also be golint- and go vet-compliant.\n\nThe generated code doesn't use any third-party dependency unless code blocks\nin the grammar require such a dependency.\n\nPEG syntax\n\nThe accepted syntax for the grammar is formally defined in the\ngrammar\/pigeon.peg file, using the PEG syntax. What follows is an informal\ndescription of this syntax.\n\nIdentifiers, whitespace, comments and literals follow the same\nnotation as the Go language, as defined in the language specification\n(http:\/\/golang.org\/ref\/spec#Source_code_representation):\n\n\t\/\/ single line comment*\/\n\/\/\t\/* multi-line comment *\/\n\/*\t'x' (single quotes for single char literal)\n\t\"double quotes for string literal\"\n\t`backtick quotes for raw string literal`\n\tRuleName (a valid identifier)\n\nThe grammar must be Unicode text encoded in UTF-8. New lines are identified\nby the \\n character (U+000A). Space (U+0020), horizontal tabs (U+0009) and\ncarriage returns (U+000D) are considered whitespace and are ignored except\nto separate tokens.\n\nRules\n\nA PEG grammar consists of a set of rules. A rule is an identifier followed\nby a rule definition operator and an expression. An optional display name -\na string literal used in error messages instead of the rule identifier - can\nbe specified after the rule identifier. E.g.:\n\tRuleA \"friendly name\" = 'a'+ \/\/ RuleA is one or more lowercase 'a's\n\nThe rule definition operator can be any one of those:\n\t=, <-, ← (U+2190), ⟵ (U+27F5)\n\nExpressions\n\nA rule is defined by an expression. The following sections describe the\nvarious expression types. Expressions can be grouped by using parentheses,\nand a rule can be referenced by its identifier in place of an expression.\n\nChoice expression\n\nThe choice expression is a list of expressions that will be tested in the\norder they are defined. The first one that matches will be used. Expressions\nare separated by the forward slash character \"\/\". E.g.:\n\tChoiceExpr = A \/ B \/ C \/\/ A, B and C should be rules declared in the grammar\n\nBecause the first match is used, it is important to think about the order\nof expressions. For example, in this rule, \"<=\" would never be used because\nthe \"<\" expression comes first:\n\tBadChoiceExpr = \"<\" \/ \"<=\"\n\nSequence expression\n\nThe sequence expression is a list of expressions that must all match in\nthat same order for the sequence expression to be considered a match.\nExpressions are separated by whitespace. E.g.:\n\tSeqExpr = \"A\" \"b\" \"c\" \/\/ matches \"Abc\", but not \"Acb\"\n\nLabeled expression\n\nA labeled expression consists of an identifier followed by a colon \":\"\nand an expression. A labeled expression introduces a variable named with\nthe label that can be referenced in the expression's code block.\nThe variable will have the value of the expression that follows the colon.\nE.g.:\n\tLabeledExpr = value:[a-z]+ {\n\t\tfmt.Println(value)\n\t\treturn value, nil\n\t}\n\nAnd and not expressions\n\nAn expression prefixed with the ampersand \"&\" is the \"and\" predicate\nexpression: it is considered a match if the following expression is a match,\nbut it does not consume any input.\n\nAn expression prefixed with the exclamation point \"!\" is the \"not\" predicate\nexpression: it is considered a match if the following expression is not\na match, but it does not consume any input. E.g.:\n\tAndExpr = \"A\" &\"B\" \/\/ matches \"A\" if followed by a \"B\" (does not consume \"B\")\n\tNotExpr = \"A\" !\"B\" \/\/ matches \"A\" if not followed by a \"B\" (does not consume \"B\")\n\nThe expression following the & and ! operators can be a code block. In that\ncase, the code block must return a bool and an error. The operator's semantic\nis the same, & is a match if the code block returns true, ! is a match if the\ncode block returns false. The code block has access to any labeled value\ndefined in its scope. E.g.:\n\tCodeAndExpr = value:[a-z] &{\n\t\t\/\/ can access the value local variable...\n\t\treturn true, nil\n\t}\n\nRepeating expressions\n\nAn expression followed by \"*\", \"?\" or \"+\" is a match if the expression\noccurs zero or more times (\"*\"), zero or one time \"?\" or one or more times\n(\"+\") respectively. The match is greedy, it will match as many times as\npossible. E.g.\n\tZeroOrMoreAs = \"A\"*\n\nLiteral matcher\n\nA literal matcher tries to match the input against a single character or a\nstring literal. The literal may be a single-quoted single character, a\ndouble-quoted string or a backtick-quoted raw string. The same rules as in Go\napply regarding the allowed characters and escapes.\n\nThe literal may be followed by a lowercase \"i\" (outside the ending quote)\nto indicate that the match is case-insensitive. E.g.:\n\tLiteralMatch = \"Awesome\\n\"i \/\/ matches \"awesome\" followed by a newline\n\nCharacter class matcher\n\nA character class matcher tries to match the input against a class of characters\ninside square brackets \"[...]\". Inside the brackets, characters represent\nthemselves and the same escapes as in string literals are available, except\nthat the single- and double-quote escape is not valid, instead the closing\nsquare bracket \"]\" must be escaped to be used.\n\nCharacter ranges can be specified using the \"[a-z]\" notation. Unicode\nclasses can be specified using the \"[\\pL]\" notation, where L is a\nsingle-letter Unicode class of characters, or using the \"[\\p{Class}]\"\nnotation where Class is a valid Unicode class (e.g. \"Latin\").\n\nAs for string literals, a lowercase \"i\" may follow the matcher (outside\nthe ending square bracket) to indicate that the match is case-insensitive.\nA \"^\" as first character inside the square brackets indicates that the match\nis inverted (it is a match if the input does not match the character class\nmatcher). E.g.:\n\tNotAZ = [^a-z]i\n\nAny matcher\n\nThe any matcher is represented by the dot \".\". It matches any character\nexcept the end of file, thus the \"!.\" expression is used to indicate \"match\nthe end of file\". E.g.:\n\tAnyChar = . \/\/ match a single character\n\tEOF = !.\n\nCode block\n\nCode blocks can be added to generate custom Go code. There are three kinds\nof code blocks: the initializer, the action and the predicate. All code blocks\nappear inside curly braces \"{...}\".\n\nThe initializer must appear first in the grammar, before any rule. It is\ncopied as-is (minus the wrapping curly braces) at the top of the generated\nparser. It may contain function declarations, types, variables, etc. just\nlike any Go file. Every symbol declared here will be available to all other\ncode blocks. Although the initializer is optional in a valid grammar, it is\nusually required to generate a valid Go source code file (for the package\nclause). E.g.:\n\t{\n\t\tpackage main\n\n\t\tfunc someHelper() {\n\t\t\t\/\/ ...\n\t\t}\n\t}\n\nAction code blocks are code blocks declared after an expression in a rule.\nThose code blocks are turned into a method on the \"*current\" type in the\ngenerated source code. The method receives any labeled expression's value\nas argument (as interface{}) and must return two values, the first being\nthe value of the expression (an interface{}), and the second an error.\nIf a non-nil error is returned, it is added to the list of errors that the\nparser will return. E.g.:\n\tRuleA = \"A\"+ {\n\t\t\/\/ return the matched string, \"c\" is the default name for\n\t\t\/\/ the *current receiver variable.\n\t\treturn string(c.text), nil\n\t}\n\nPredicate code blocks are code blocks declared immediately after the and \"&\"\nor the not \"!\" operators. Like action code blocks, predicate code blocks\nare turned into a method on the \"*current\" type in the generated source code.\nThe method receives any labeled expression's value as argument (as interface{})\nand must return two values, the first being a bool and the second an error.\nIf a non-nil error is returned, it is added to the list of errors that the\nparser will return. E.g.:\n\tRuleAB = [ab]i+ &{\n\t\treturn true, nil\n\t}\n\nThe current type is a struct that provides two useful fields that can be\naccessed in action and predicate code blocks: \"pos\" and \"text\".\n\nThe \"pos\" field indicates the current position of the parser in the source\ninput. It is itself a struct with three fields: \"line\", \"col\" and \"offset\".\nLine is a 1-based line number, col is a 1-based column number that counts\nrunes from the start of the line, and offset is a 0-based byte offset.\n\nThe \"text\" field is the slice of bytes of the current match. It is empty\nin a predicate code block.\n\nUsing the generated parser\n\nThe parser generated by pigeon exports a few symbols so that it can be used\nas a package with public functions to parse input text. The exported API is:\n\t- Parse(string, []byte, ...Option) (interface{}, error)\n\t- ParseFile(string, ...Option) (interface{}, error)\n\t- ParseReader(string, io.Reader, ...Option) (interface{}, error)\n\t- Debug(bool) Option\n\t- Memoize(bool) Option\n\nSee the godoc page of the generated parser for the test\/predicates grammar\nfor an example documentation page of the exported API:\nhttp:\/\/godoc.org\/github.com\/PuerkitoBio\/pigeon\/test\/predicates.\n\nLike the grammar used to generate the parser, the input text must be\nUTF-8-encoded Unicode.\n\nThe start rule of the parser is the first rule in the PEG grammar used\nto generate the parser. A call to any of the Parse* functions returns\nthe value generated by executing the grammar on the provided input text,\nand an optional error.\n\nTypically, the grammar should generate some kind of abstract syntax tree (AST),\nbut for simple grammars it may evaluate the result immediately, such as in\nthe examples\/calculator example. There are no constraints imposed on the\nauthor of the grammar, it can return whatever is needed.\n\nError reporting\n\nWhen the parser returns a non-nil error, the error is always of type errList,\nwhich is defined as a slice of errors ([]error). Each error in the list is\nof type *parserError. This is a struct that has an \"Inner\" field that can be\nused to access the original error.\n\nSo if a code block returns some well-known error like:\n\t{\n\t\treturn nil, io.EOF\n\t}\n\nThe original error can be accessed this way:\n\t_, err := ParseFile(\"some_file\")\n\tif err != nil {\n\t\tlist := err.(errList)\n\t\tfor _, err := range list {\n\t\t\tpe := err.(*parserError)\n\t\t\tif pe.Inner == io.EOF {\n\t\t\t\t\/\/ ...\n\t\t\t}\n\t\t}\n\t}\n\nBy defaut the parser will continue after an error is returned and will\ncumulate all errors found during parsing. If the grammar reaches a point\nwhere it shouldn't continue, a panic statement can be used to terminate\nparsing. The panic will be caught at the top-level of the Parse* call\nand will be converted into a *parserError like any error, and an errList\nwill still be returned to the caller.\n\nThe divide by zero error in the examples\/calculator grammar leverages this\nfeature (no special code is needed to handle division by zero, if it\nhappens, the runtime panics and it is recovered and returned as a parsing\nerror).\n\nProviding good error reporting in a parser is not a trivial task. Part\nof it is provided by the pigeon tool, by offering features such as\nfilename, position and rule name in the error message, but an\nimportant part of good error reporting needs to be done by the grammar\nauthor.\n\nFor example, many programming languages use double-quotes for string literals.\nUsually, if the opening quote is found, the closing quote is expected, and if\nnone is found, there won't be any other rule that will match, there's no need\nto backtrack and try other choices, an error should be added to the list\nand the match should be consumed.\n\nIn order to do this, the grammar can look something like this:\n\n\tStringLiteral = '\"' ValidStringChar* '\"' {\n\t\t\/\/ this is the valid case, build string literal node\n\t\t\/\/ node = ...\n\t\treturn node, nil\n\t} \/ '\"' ValidStringChar* !'\"' {\n\t\t\/\/ invalid case, build a replacement string literal node or build a BadNode\n\t\t\/\/ node = ...\n\t\treturn node, errors.New(\"string literal not terminated\")\n\t}\n\nThis is just one example, but it illustrates the idea that error reporting\nneeds to be thought out when designing the grammar.\n\n*\/\npackage main\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Square Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/*\nPackage squalor provides SQL utility routines, such as validation of\nmodels against table schemas, marshalling and unmarshalling of model\nstructs into table rows and programmatic construction of SQL\nstatements.\n\nLimitations\n\nWhile squalor uses the database\/sql package, the SQL it utilizes is\nMySQL specific (e.g. REPLACE, INSERT ON DUPLICATE KEY UPDATE, etc).\n\nModel Binding and Validation\n\nGiven a simple table definition:\n\n CREATE TABLE users (\n id bigint PRIMARY KEY NOT NULL,\n name varchar(767) NOT NULL,\n INDEX (id, name)\n )\n\nAnd a model for a row of the table:\n\n type User struct {\n ID int64 `db:\"id\"`\n Name string `db:\"name\"`\n }\n\nThe BindModel method will validate that the model and the table\ndefinition are compatible. For example, it would be an error for the\nUser.ID field to have the type string.\n\n users, err := db.BindModel(\"users\", User{})\n\nThe table definition is loaded from the database allowing custom\nchecks such as verifying the existence of indexes.\n\n if users.GetKey(\"id\") == nil {\n \/\/ The index (\"id\") does not exist!\n }\n if users.GetKey(\"id\", \"name\") == nil {\n \/\/ The index (\"id\", \"name\") does not exist!\n }\n\nCustom Types\n\nWhile fields are often primitive types such as int64 or string, it is\nsometimes desirable to use a custom type. This can be accomplished by\nhaving the type implement the sql.Scanner and driver.Valuer\ninterfaces. For example, you might create a GzippedText type which\nautomatically compresses the value when it is written to the database\nand uncompressed when it is read:\n\n type GzippedText []byte\n func (g GzippedText) Value() (driver.Value, error) {\n buf := &bytes.Buffer{}\n w := gzip.NewWriter(buf)\n defer w.Close()\n w.Write(g)\n return buf.Bytes(), nil\n }\n func (g *GzippedText) Scan(src interface{}) error {\n var source []byte\n switch t := src.(type) {\n case string:\n source = []byte(t)\n case []byte:\n source = t\n default:\n return errors.New(\"Incompatible type for GzippedText\")\n }\n reader, err := gzip.NewReader(bytes.NewReader(source))\n defer reader.Close()\n b, err := ioutil.ReadAll(reader)\n if err != nil {\n return err\n }\n *g = GzippedText(b)\n return nil\n }\n\nInsert\n\nThe Insert method is used to insert one or more rows in a table.\n\n err := db.Insert(User{ID:42, Name:\"Peter Mattis\"})\n\nYou can pass either a struct or a pointer to a struct.\n\n err := db.Insert(&User{ID:43, Name:\"Spencer Kimball\"})\n\nMultiple rows can be inserted at once. Doing so offers convenience and\nperformance.\n\n err := db.Insert(&User{ID:42, Name:\"Peter Mattis\"},\n &User{ID:43, Name:\"Spencer Kimball\"})\n\nWhen multiple rows are batch inserted the returned error corresponds\nto the first SQL error encountered which may make it impossible to\ndetermine which row caused the error. If the rows correspond to\ndifferent tables the order of insertion into the tables is\nundefined. If you care about the order of insertion into multiple\ntables and determining which row is causing an insertion error,\nstructure your calls to Insert appropriately (i.e. insert into a\nsingle table or insert a single row).\n\nAfter a successful insert on a table with an auto increment primary\nkey, the auto increment will be set back in the corresponding field of\nthe object. For example, if the user table was defined as:\n\n CREATE TABLE users (\n id bigint PRIMARY KEY AUTO INCREMENT NOT NULL,\n name varchar(767) NOT NULL,\n INDEX (id, name)\n )\n\nThen we could create a new user by doing:\n\n u := &User{Name:\"Peter Mattis\"}\n err := db.Insert(u)\n if err != nil {\n ...\n }\n \/\/ u.ID will be correctly populated at this point\n\nReplace\n\nThe Replace method replaces a row in table, either inserting the row\nif it doesn't exist, or deleting and then inserting the row. See the\nMySQL docs for the difference between REPLACE, UPDATE and INSERT ON\nDUPLICATE KEY UPDATE (Upsert).\n\n err := db.Replace(&User{ID:42, Name:\"Peter Mattis\"})\n\nUpdate\n\nThe Update method updates a row in a table, returning the number of\nrows modified.\n\n count, err := db.Update(&User{ID:42, Name:\"Peter Mattis\"})\n\nUpsert\n\nThe Upsert method inserts or updates a row.\n\n err := db.Upsert(&User{ID:42, Name:\"Peter Mattis\"})\n\nGet\n\nThe Get method retrieves a single row by primary key and binds the\nresult columns to a struct.\n\n user := &User{}\n err := db.Get(user, 42)\n \/\/ Returns an error if user 42 cannot be found.\n\nDelete\n\nThe delete method deletes rows by primary key.\n\n err := db.Delete(&User{ID:42})\n\nSee the documentation for DB.Delete for performance limitations when\nbatch deleting multiple rows from a table with a primary key composed\nof multiple columns.\n\nProgrammatic SQL Construction\n\nProgrammatic construction of SQL queries prohibits SQL injection\nattacks while keeping query construction both readable and similar to\nSQL itself. Support is provided for most of the SQL DML (data\nmanipulation language), though the constructed queries are targetted\nto MySQL.\n\n q := users.Select(\"*\").Where(users.C(\"id\").Eq(foo))\n \/\/ q.String() == \"SELECT `users`.* FROM users WHERE `users`.`id` = 'foo'\"\n var results []User\n err := db.Select(&results, q)\n\nDB provides wrappers for the sql.Query and sql.QueryRow\ninterfaces. The Rows struct returned from Query has an additional\nStructScan method for binding the columns for a row result to a\nstruct.\n\n rows, err := db.Query(q)\n if err != nil {\n return err\n }\n defer rows.Close()\n for rows.Next() {\n u := User{}\n if err := rows.StructScan(&u); err != nil {\n return err\n }\n \/\/ Process u\n }\n\nQueryRow can be used to easily query and scan a single value:\n\n var count int\n err := db.QueryRow(users.Select(users.C(\"id\").Count())).Scan(&count)\n\nPerformance\n\nIn addition to the convenience of inserting, deleting and updating\ntable rows using a struct, attention has been paid to\nperformance. Since squalor is carefully constructing the SQL queries\nfor insertion, deletion and update, it eschews the use of placeholders\nin favor of properly escaping values in the query. With the Go MySQL\ndrivers, this saves a roundtrip to the database for each query because\nqueries with placeholders must be prepared before being executed.\n\nMarshalling and unmarshalling of data from structs utilizes\nreflection, but care is taken to minimize the use of reflection in\norder to improve performance. For example, reflection data for models\nis cached when the model is bound to a table.\n\nBatch operations are utilized when possible. The Delete, Insert,\nReplace, Update and Upsert operations will perform multiple operations\nper SQL statement. This can provide an order of magnitude speed\nimprovement over performing the mutations one row at a time due to\nminimizing the network overhead.\n*\/\npackage squalor\n<commit_msg>Remove reference to defunct String()<commit_after>\/\/ Copyright 2014 Square Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/*\nPackage squalor provides SQL utility routines, such as validation of\nmodels against table schemas, marshalling and unmarshalling of model\nstructs into table rows and programmatic construction of SQL\nstatements.\n\nLimitations\n\nWhile squalor uses the database\/sql package, the SQL it utilizes is\nMySQL specific (e.g. REPLACE, INSERT ON DUPLICATE KEY UPDATE, etc).\n\nModel Binding and Validation\n\nGiven a simple table definition:\n\n CREATE TABLE users (\n id bigint PRIMARY KEY NOT NULL,\n name varchar(767) NOT NULL,\n INDEX (id, name)\n )\n\nAnd a model for a row of the table:\n\n type User struct {\n ID int64 `db:\"id\"`\n Name string `db:\"name\"`\n }\n\nThe BindModel method will validate that the model and the table\ndefinition are compatible. For example, it would be an error for the\nUser.ID field to have the type string.\n\n users, err := db.BindModel(\"users\", User{})\n\nThe table definition is loaded from the database allowing custom\nchecks such as verifying the existence of indexes.\n\n if users.GetKey(\"id\") == nil {\n \/\/ The index (\"id\") does not exist!\n }\n if users.GetKey(\"id\", \"name\") == nil {\n \/\/ The index (\"id\", \"name\") does not exist!\n }\n\nCustom Types\n\nWhile fields are often primitive types such as int64 or string, it is\nsometimes desirable to use a custom type. This can be accomplished by\nhaving the type implement the sql.Scanner and driver.Valuer\ninterfaces. For example, you might create a GzippedText type which\nautomatically compresses the value when it is written to the database\nand uncompressed when it is read:\n\n type GzippedText []byte\n func (g GzippedText) Value() (driver.Value, error) {\n buf := &bytes.Buffer{}\n w := gzip.NewWriter(buf)\n defer w.Close()\n w.Write(g)\n return buf.Bytes(), nil\n }\n func (g *GzippedText) Scan(src interface{}) error {\n var source []byte\n switch t := src.(type) {\n case string:\n source = []byte(t)\n case []byte:\n source = t\n default:\n return errors.New(\"Incompatible type for GzippedText\")\n }\n reader, err := gzip.NewReader(bytes.NewReader(source))\n defer reader.Close()\n b, err := ioutil.ReadAll(reader)\n if err != nil {\n return err\n }\n *g = GzippedText(b)\n return nil\n }\n\nInsert\n\nThe Insert method is used to insert one or more rows in a table.\n\n err := db.Insert(User{ID:42, Name:\"Peter Mattis\"})\n\nYou can pass either a struct or a pointer to a struct.\n\n err := db.Insert(&User{ID:43, Name:\"Spencer Kimball\"})\n\nMultiple rows can be inserted at once. Doing so offers convenience and\nperformance.\n\n err := db.Insert(&User{ID:42, Name:\"Peter Mattis\"},\n &User{ID:43, Name:\"Spencer Kimball\"})\n\nWhen multiple rows are batch inserted the returned error corresponds\nto the first SQL error encountered which may make it impossible to\ndetermine which row caused the error. If the rows correspond to\ndifferent tables the order of insertion into the tables is\nundefined. If you care about the order of insertion into multiple\ntables and determining which row is causing an insertion error,\nstructure your calls to Insert appropriately (i.e. insert into a\nsingle table or insert a single row).\n\nAfter a successful insert on a table with an auto increment primary\nkey, the auto increment will be set back in the corresponding field of\nthe object. For example, if the user table was defined as:\n\n CREATE TABLE users (\n id bigint PRIMARY KEY AUTO INCREMENT NOT NULL,\n name varchar(767) NOT NULL,\n INDEX (id, name)\n )\n\nThen we could create a new user by doing:\n\n u := &User{Name:\"Peter Mattis\"}\n err := db.Insert(u)\n if err != nil {\n ...\n }\n \/\/ u.ID will be correctly populated at this point\n\nReplace\n\nThe Replace method replaces a row in table, either inserting the row\nif it doesn't exist, or deleting and then inserting the row. See the\nMySQL docs for the difference between REPLACE, UPDATE and INSERT ON\nDUPLICATE KEY UPDATE (Upsert).\n\n err := db.Replace(&User{ID:42, Name:\"Peter Mattis\"})\n\nUpdate\n\nThe Update method updates a row in a table, returning the number of\nrows modified.\n\n count, err := db.Update(&User{ID:42, Name:\"Peter Mattis\"})\n\nUpsert\n\nThe Upsert method inserts or updates a row.\n\n err := db.Upsert(&User{ID:42, Name:\"Peter Mattis\"})\n\nGet\n\nThe Get method retrieves a single row by primary key and binds the\nresult columns to a struct.\n\n user := &User{}\n err := db.Get(user, 42)\n \/\/ Returns an error if user 42 cannot be found.\n\nDelete\n\nThe delete method deletes rows by primary key.\n\n err := db.Delete(&User{ID:42})\n\nSee the documentation for DB.Delete for performance limitations when\nbatch deleting multiple rows from a table with a primary key composed\nof multiple columns.\n\nProgrammatic SQL Construction\n\nProgrammatic construction of SQL queries prohibits SQL injection\nattacks while keeping query construction both readable and similar to\nSQL itself. Support is provided for most of the SQL DML (data\nmanipulation language), though the constructed queries are targetted\nto MySQL.\n\n q := users.Select(\"*\").Where(users.C(\"id\").Eq(foo))\n \/\/ squalor.Serialize(q) == \"SELECT `users`.* FROM users WHERE `users`.`id` = 'foo'\", nil\n var results []User\n err := db.Select(&results, q)\n\nDB provides wrappers for the sql.Query and sql.QueryRow\ninterfaces. The Rows struct returned from Query has an additional\nStructScan method for binding the columns for a row result to a\nstruct.\n\n rows, err := db.Query(q)\n if err != nil {\n return err\n }\n defer rows.Close()\n for rows.Next() {\n u := User{}\n if err := rows.StructScan(&u); err != nil {\n return err\n }\n \/\/ Process u\n }\n\nQueryRow can be used to easily query and scan a single value:\n\n var count int\n err := db.QueryRow(users.Select(users.C(\"id\").Count())).Scan(&count)\n\nPerformance\n\nIn addition to the convenience of inserting, deleting and updating\ntable rows using a struct, attention has been paid to\nperformance. Since squalor is carefully constructing the SQL queries\nfor insertion, deletion and update, it eschews the use of placeholders\nin favor of properly escaping values in the query. With the Go MySQL\ndrivers, this saves a roundtrip to the database for each query because\nqueries with placeholders must be prepared before being executed.\n\nMarshalling and unmarshalling of data from structs utilizes\nreflection, but care is taken to minimize the use of reflection in\norder to improve performance. For example, reflection data for models\nis cached when the model is bound to a table.\n\nBatch operations are utilized when possible. The Delete, Insert,\nReplace, Update and Upsert operations will perform multiple operations\nper SQL statement. This can provide an order of magnitude speed\nimprovement over performing the mutations one row at a time due to\nminimizing the network overhead.\n*\/\npackage squalor\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 by caixw, All rights reserved.\n\/\/ Use of this source code is governed by a MIT\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ orm提供了一个相对统一的数据库操作,目前内置了对部分数据库的支持,\n\/\/ 用户可以通过实现自定义来实现对特定数据库的支持。\n\/\/\n\/\/ 支持的数据库:\n\/\/ 1. sqlite3: github.com\/mattn\/go-sqlite3\n\/\/ 2. mysql: github.com\/go-sql-driver\/mysql\n\/\/ 3. postgres:github.com\/lib\/pq\n\/\/ 其它数据库,用户可以通过实现orm\/core.Dialect接口,\n\/\/ 然后调用orm\/dialect.Register()注册来实现支持。\n\/\/\n\/\/ 初始化:\n\/\/\n\/\/ 默认情况下,orm包并不会加载任何数据库的实例。所以想要用哪个数据库,需要手动初始化:\n\/\/ \/\/ 加载dialect管理包\n\/\/ import github.com\/issue9\/orm\/dialect\n\/\/\n\/\/ \/\/ 加载数据库驱动\n\/\/ import _ github.com\/mattn\/go-sqlite3\n\/\/\n\/\/ \/\/ 向orm\/dialect包注册dialect\n\/\/ dialect.Register(\"sqlite3\", &dialect.Sqlite3{})\n\/\/\n\/\/ \/\/ 之后可以调用db1的各类方法操作数据库\n\/\/ db1 := orm.New(\"sqlite3\", \".\/db1\", \"db1\", \"prefix_\")\n\/\/\n\/\/ Model:\n\/\/ type User struct {\n\/\/ \/\/ 对应表中的id字段,为自增列,从0开始\n\/\/ Id int64 `orm:\"name(id);ai(0);\"`\n\/\/ \/\/ 对应表中的first_name字段,为索引index_name的一部分\n\/\/ FirstName string `orm:\"name(first_name);index(index_name)\"`\n\/\/ LastName string `orm:\"name(first_name);index(index_name)\"`\n\/\/ }\n\/\/\n\/\/ \/\/ 通过orm\/core.Metaer接口,指定表的额外数据。若不需要,可不用实现该接口\n\/\/ func(u *User) Meta() string {\n\/\/ return \"name(user);engine(innodb);charset(utf-8)\"\n\/\/ }\n\/\/ 通过struct tag可以直接将一个结构体定义为一个数据表结构,\n\/\/ struct tag的语法结构,如上面代码所示,目前支持以下的struct tag:\n\/\/\n\/\/ name(fieldName): 将当前的字段映射到数据表中的fieldName字段。\n\/\/\n\/\/ len(l1, l2): 指定字段的长度,比如mysql中的int(5),varchar(255),double(1,2),\n\/\/ 仅部分数据库支持,比如sqlite3不支持该属性。\n\/\/\n\/\/ nullable: 相当于定义表结构时的NULL,建议尽量少用该属性,若非用不可的话,\n\/\/ 与之对应的Go属性必须声明为NullString之类的结构。\n\/\/\n\/\/ pk: 主键,支持联合主键,给多个字段加上pk的struct tag即可。\n\/\/\n\/\/ ai(start,step): 自增,若指定了自增列,则将自动取消其它的pk设置。\n\/\/ start,step指定了自增列的起始值及步长,但并不是所有的数据库都支持。\n\/\/\n\/\/ unique(index_name): 唯一索引,支持联合索引,index_name为约束名,\n\/\/ 会将index_name为一样的字段定义为一个联合索引。\n\/\/\n\/\/ index(index_name): 普通的关键字索引,同unique一样会将名称相同的索引定义为一个联合索引。\n\/\/\n\/\/ default(value): 指定默认值。相当于定义表结构时的DEFAULT。\n\/\/\n\/\/ fk(fk_name,refTable,refColName,updateRule,deleteRule):\n\/\/ 定义物理外键,最少需要指定fk_name,refTabl,refColName三个值。\n\/\/ 分别对应约束名,引用的表和引用的字段,updateRule,deleteRule,\n\/\/ 在不指定的情况下,使用数据库的默认值。\n\/\/\n\/\/ 关于core.Metaer接口。\n\/\/\n\/\/ 在go不能将struct tag作用于结构体,所以为了指定一些表级别的属性,\n\/\/ 只能通过接口的方式指定,然后在接口方法中返回一段类似于struct tag的字符串,\n\/\/ 以达到相同的目的。\n\/\/ 当然在go中receive区分值类型和指针类型,所以指定接口时,需要注意这个情况。\n\/\/\n\/\/\n\/\/\n\/\/ 如何使用:\n\/\/\n\/\/ Create:\n\/\/ 可以通过Engine.Create()或是Tx.Create()创建一张表。\n\/\/ \/\/ 创建或是更新表\n\/\/ e.Create(&User{})\n\/\/ \/\/ 创建或是更新多个表\n\/\/ e.Create([]*User{&User{},&Email{}})\n\/\/\n\/\/ Update:\n\/\/ \/\/ 将id为1的记录的FirstName更改为abc\n\/\/ e.Update(&User{Id:1,FirstName:\"abc\"})\n\/\/ e.Where(\"id=?\", 1).Add(\"FirstName\", \"abc\").Update()\n\/\/ e.Where(\"id=?\").Columns(\"FirstName\").Update(\"abc\", 1)\n\/\/\n\/\/ Delete:\n\/\/ \/\/ 删除id为1的记录\n\/\/ e.Delete(&User{Id:1})\n\/\/ e.Where(\"id=?\").Delte(1)\n\/\/ e.Where(\"id=?\", 1).Delete()\n\/\/\n\/\/ Insert:\n\/\/ \/\/ 一次性插入一条数据\n\/\/ e.Insert(&User{Id:1,FirstName:\"abc\"})\n\/\/ \/\/ 一次性插入多条数据\n\/\/ e.Insert([]*User{&User{Id:1,FirstName:\"abc\"},&User{Id:1,FirstName:\"abc\"}})\n\/\/\n\/\/ Select:\n\/\/ \/\/ 导出id=1的数据\n\/\/ m, err := e.Where(\"id=?\", 1).FetchMap()\n\/\/ \/\/ 导出id<5的所有数据\n\/\/ m, err := e.Where(\"id<?\", 1).FetchMaps(5)\n\/\/\n\/\/ 事务:\n\/\/\n\/\/ 默认的Engine是不支持事务的,若需要事务支持,则需要调用Engine.Begin()\n\/\/ 返回事务对象Tx,当然并不是所有的数据库都支持事务操作的。\n\/\/ Tx拥有与Engine相似的接口。\npackage orm\n\n\/\/ 版本号\nconst Version = \"0.2.2.141230\"\n<commit_msg>更新部分文档<commit_after>\/\/ Copyright 2014 by caixw, All rights reserved.\n\/\/ Use of this source code is governed by a MIT\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ orm提供了一个相对统一的数据库操作,目前内置了对部分数据库的支持,\n\/\/ 用户可以通过实现自定义来实现对特定数据库的支持。\n\/\/\n\/\/ 支持的数据库:\n\/\/ 1. sqlite3: github.com\/mattn\/go-sqlite3\n\/\/ 2. mysql: github.com\/go-sql-driver\/mysql\n\/\/ 3. postgres:github.com\/lib\/pq\n\/\/ 其它数据库,用户可以通过实现orm\/core.Dialect接口,\n\/\/ 然后调用orm\/dialect.Register()注册来实现支持。\n\/\/\n\/\/ 初始化:\n\/\/\n\/\/ 默认情况下,orm包并不会加载任何数据库的实例。所以想要用哪个数据库,需要手动初始化:\n\/\/ import (\n\/\/ github.com\/issue9\/orm\/dialect \/\/ 加载dialect管理包\n\/\/ _ github.com\/mattn\/go-sqlite3 \/\/ 加载数据库驱动\n\/\/ )\n\/\/\n\/\/ \/\/ 向orm\/dialect包注册dialect\n\/\/ dialect.Register(\"sqlite3\", &dialect.Sqlite3{})\n\/\/\n\/\/ \/\/ 初始化一个Engine,表前缀为prefix_\n\/\/ db1 := orm.New(\"sqlite3\", \".\/db1\", \"db1\", \"prefix_\")\n\/\/\n\/\/ \/\/ 另一个Engine\n\/\/ db2 := orm.New(\"sqlite3\", \".\/db2\", \"db2\", \"db2_\")\n\/\/\n\/\/ Model:\n\/\/ type User struct {\n\/\/ Id int64 `orm:\"name(id);ai(0);\"`\n\/\/ FirstName string `orm:\"name(first_name);index(index_name)\"`\n\/\/ LastName string `orm:\"name(first_name);index(index_name)\"`\n\/\/ }\n\/\/\n\/\/ \/\/ 通过orm\/core.Metaer接口,指定表的额外数据。若不需要,可不用实现该接口\n\/\/ func(u *User) Meta() string {\n\/\/ return \"name(user);engine(innodb);charset(utf-8)\"\n\/\/ }\n\/\/ 通过struct tag可以直接将一个结构体定义为一个数据表结构,\n\/\/ struct tag的语法结构,如上面代码所示,目前支持以下的struct tag:\n\/\/\n\/\/ name(fieldName): 将当前的字段映射到数据表中的fieldName字段。\n\/\/\n\/\/ len(l1, l2): 指定字段的长度,比如mysql中的int(5),varchar(255),double(1,2),\n\/\/ 仅部分数据库支持,比如sqlite3不支持该属性。\n\/\/\n\/\/ nullable: 相当于定义表结构时的NULL,建议尽量少用该属性,若非用不可的话,\n\/\/ 与之对应的Go属性必须声明为NullString之类的结构。\n\/\/\n\/\/ pk: 主键,支持联合主键,给多个字段加上pk的struct tag即可。\n\/\/\n\/\/ ai(start,step): 自增,若指定了自增列,则将自动取消其它的pk设置。\n\/\/ start,step指定了自增列的起始值及步长,但并不是所有的数据库都支持。\n\/\/\n\/\/ unique(index_name): 唯一索引,支持联合索引,index_name为约束名,\n\/\/ 会将index_name为一样的字段定义为一个联合索引。\n\/\/\n\/\/ index(index_name): 普通的关键字索引,同unique一样会将名称相同的索引定义为一个联合索引。\n\/\/\n\/\/ default(value): 指定默认值。相当于定义表结构时的DEFAULT。\n\/\/\n\/\/ fk(fk_name,refTable,refColName,updateRule,deleteRule):\n\/\/ 定义物理外键,最少需要指定fk_name,refTabl,refColName三个值。\n\/\/ 分别对应约束名,引用的表和引用的字段,updateRule,deleteRule,\n\/\/ 在不指定的情况下,使用数据库的默认值。\n\/\/\n\/\/ 关于core.Metaer接口。\n\/\/\n\/\/ 在go不能将struct tag作用于结构体,所以为了指定一些表级别的属性,\n\/\/ 只能通过接口的形式,然后在接口方法中返回一段类似于struct tag的字符串,\n\/\/ 以达到相同的目的。\n\/\/ 当然在go中receive区分值类型和指针类型,所以指定接口时,需要注意这个情况。\n\/\/\n\/\/\n\/\/ 如何使用:\n\/\/\n\/\/ Create:\n\/\/ 可以通过Engine.Create()或是Tx.Create()创建一张表。\n\/\/ \/\/ 创建或是更新表\n\/\/ e.Create(&User{})\n\/\/ \/\/ 创建或是更新多个表\n\/\/ e.Create([]*User{&User{},&Email{}})\n\/\/\n\/\/ Update:\n\/\/ \/\/ 将id为1的记录的FirstName更改为abc\n\/\/ e.Update(&User{Id:1,FirstName:\"abc\"})\n\/\/ e.Where(\"id=?\", 1).Add(\"FirstName\", \"abc\").Update()\n\/\/ e.Where(\"id=?\").Columns(\"FirstName\").Update(\"abc\", 1)\n\/\/\n\/\/ Delete:\n\/\/ \/\/ 删除id为1的记录\n\/\/ e.Delete(&User{Id:1})\n\/\/ e.Where(\"id=?\").Delte(1)\n\/\/ e.Where(\"id=?\", 1).Delete()\n\/\/\n\/\/ Insert:\n\/\/ \/\/ 一次性插入一条数据\n\/\/ e.Insert(&User{Id:1,FirstName:\"abc\"})\n\/\/ \/\/ 一次性插入多条数据\n\/\/ e.Insert([]*User{&User{Id:1,FirstName:\"abc\"},&User{Id:1,FirstName:\"abc\"}})\n\/\/\n\/\/ Select:\n\/\/ \/\/ 导出id=1的数据\n\/\/ m, err := e.Where(\"id=?\", 1).FetchMap()\n\/\/ \/\/ 导出id<5的所有数据\n\/\/ m, err := e.Where(\"id<?\", 1).FetchMaps(5)\n\/\/\n\/\/\n\/\/ 事务:\n\/\/\n\/\/ 默认的Engine是不支持事务的,若需要事务支持,则需要调用Engine.Begin()\n\/\/ 返回事务对象Tx,当然并不是所有的数据库都支持事务操作的。\n\/\/ Tx拥有与Engine相似的接口。\npackage orm\n\n\/\/ 版本号\nconst Version = \"0.2.2.141230\"\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The gocui Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nPackage gocui allows to create console user interfaces.\n\nCreate a new GUI:\n\n\tg := gocui.NewGui()\n\tif err := g.Init(); err != nil {\n\t\t\/\/ handle error\n\t}\n\tdefer g.Close()\n\n\t\/\/ Set layout and key bindings\n\t\/\/ ...\n\n\tif err := g.MainLoop(); err != nil && err != gocui.ErrQuit {\n\t\t\/\/ handle error\n\t}\n\nSet the layout function:\n\n\tg.SetLayout(fcn)\n\nOn each iteration of the GUI's main loop, the \"layout function\" is executed.\nThese layout functions can be used to set-up and update the application's main\nviews, being possible to freely switch between them. Also, it is important to\nmention that a main loop iteration is executed on each reported event\n(key-press, mouse event, window resize, etc).\n\nGUIs are composed by Views, you can think of it as buffers. Views implement the\nio.ReadWriter interface, so you can just write to them if you want to modify\ntheir content. The same is valid for reading.\n\nCreate and initialize a view with absolute coordinates:\n\n\tif v, err := g.SetView(\"viewname\", 2, 2, 22, 7); err != nil {\n\t\tif err != gocui.ErrUnknownView {\n\t\t\t\/\/ handle error\n\t\t}\n\t\tfmt.Fprintln(v, \"This is a new view\")\n\t\t\/\/ ...\n\t}\n\nViews can also be created using relative coordinates:\n\n\tmaxX, maxY := g.Size()\n\tif v, err := g.SetView(\"viewname\", maxX\/2-30, maxY\/2, maxX\/2+30, maxY\/2+2); err != nil {\n\t\t\/\/ ...\n\t}\n\nIMPORTANT: Views can only be created, destroyed or updated in two ways: from a\nlayout funcion or via *Gui.Execute(). The reason for this is that it allows\ngocui to be conccurent-safe. So, if you want to update your GUI from a\ngoroutine, you must use *Gui.Execute(). For example:\n\n\tg.Execute(func(g *gocui.Gui) error {\n\t\tv, err := g.View(\"viewname\")\n\t\tif err != nil {\n\t\t\t\/\/ handle error\n\t\t}\n\t\tv.Clear()\n\t\tfmt.Fprintln(v, \"Writing from different goroutines\")\n\t\treturn nil\n\t})\n\nConfigure keybindings:\n\n\tif err := g.SetKeybinding(\"viewname\", gocui.KeyEnter, gocui.ModNone, fcn); err != nil {\n\t\t\/\/ handle error\n\t}\n\ngocui implements full mouse support that can be enabled with:\n\n\tg.Mouse = true\n\nMouse events are handled like any other keybinding:\n\n\tif err := g.SetKeybinding(\"viewname\", gocui.MouseLeft, gocui.ModNone, fcn); err != nil {\n\t\t\/\/ handle error\n\t}\n\nBy default, gocui provides a basic edition mode. This mode can be extended\nand customized creating a new Editor and assigning it to *Gui.Editor:\n\n\ttype Editor interface {\n\t\tEdit(v *View, key Key, ch rune, mod Modifier)\n\t}\n\nDefaultEditor can be taken as example to create your own custom Editor:\n\n\tvar DefaultEditor Editor = EditorFunc(simpleEditor)\n\n\tfunc simpleEditor(v *View, key Key, ch rune, mod Modifier) {\n\t\tswitch {\n\t\tcase ch != 0 && mod == 0:\n\t\t\tv.EditWrite(ch)\n\t\tcase key == KeySpace:\n\t\t\tv.EditWrite(' ')\n\t\tcase key == KeyBackspace || key == KeyBackspace2:\n\t\t\tv.EditDelete(true)\n\t\t\/\/ ...\n\t\t}\n\t}\n\nFor more information, see the examples in folder \"_examples\/\".\n*\/\npackage gocui\n<commit_msg>Minor fixes in doc<commit_after>\/\/ Copyright 2014 The gocui Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nPackage gocui allows to create console user interfaces.\n\nCreate a new GUI:\n\n\tg := gocui.NewGui()\n\tif err := g.Init(); err != nil {\n\t\t\/\/ handle error\n\t}\n\tdefer g.Close()\n\n\t\/\/ Set layout and key bindings\n\t\/\/ ...\n\n\tif err := g.MainLoop(); err != nil && err != gocui.ErrQuit {\n\t\t\/\/ handle error\n\t}\n\nSet the layout function:\n\n\tg.SetLayout(fcn)\n\nOn each iteration of the GUI's main loop, the \"layout function\" is executed.\nThese layout functions can be used to set-up and update the application's main\nviews, being possible to freely switch between them. Also, it is important to\nmention that a main loop iteration is executed on each reported event\n(key-press, mouse event, window resize, etc).\n\nGUIs are composed by Views, you can think of it as buffers. Views implement the\nio.ReadWriter interface, so you can just write to them if you want to modify\ntheir content. The same is valid for reading.\n\nCreate and initialize a view with absolute coordinates:\n\n\tif v, err := g.SetView(\"viewname\", 2, 2, 22, 7); err != nil {\n\t\tif err != gocui.ErrUnknownView {\n\t\t\t\/\/ handle error\n\t\t}\n\t\tfmt.Fprintln(v, \"This is a new view\")\n\t\t\/\/ ...\n\t}\n\nViews can also be created using relative coordinates:\n\n\tmaxX, maxY := g.Size()\n\tif v, err := g.SetView(\"viewname\", maxX\/2-30, maxY\/2, maxX\/2+30, maxY\/2+2); err != nil {\n\t\t\/\/ ...\n\t}\n\nConfigure keybindings:\n\n\tif err := g.SetKeybinding(\"viewname\", gocui.KeyEnter, gocui.ModNone, fcn); err != nil {\n\t\t\/\/ handle error\n\t}\n\ngocui implements full mouse support that can be enabled with:\n\n\tg.Mouse = true\n\nMouse events are handled like any other keybinding:\n\n\tif err := g.SetKeybinding(\"viewname\", gocui.MouseLeft, gocui.ModNone, fcn); err != nil {\n\t\t\/\/ handle error\n\t}\n\nIMPORTANT: Views can only be created, destroyed or updated in three ways: from\nlayout funcions, from keybinding callbacks or via *Gui.Execute(). The reason\nfor this is that it allows gocui to be conccurent-safe. So, if you want to\nupdate your GUI from a goroutine, you must use *Gui.Execute(). For example:\n\n\tg.Execute(func(g *gocui.Gui) error {\n\t\tv, err := g.View(\"viewname\")\n\t\tif err != nil {\n\t\t\t\/\/ handle error\n\t\t}\n\t\tv.Clear()\n\t\tfmt.Fprintln(v, \"Writing from different goroutines\")\n\t\treturn nil\n\t})\n\nBy default, gocui provides a basic edition mode. This mode can be extended\nand customized creating a new Editor and assigning it to *Gui.Editor:\n\n\ttype Editor interface {\n\t\tEdit(v *View, key Key, ch rune, mod Modifier)\n\t}\n\nDefaultEditor can be taken as example to create your own custom Editor:\n\n\tvar DefaultEditor Editor = EditorFunc(simpleEditor)\n\n\tfunc simpleEditor(v *View, key Key, ch rune, mod Modifier) {\n\t\tswitch {\n\t\tcase ch != 0 && mod == 0:\n\t\t\tv.EditWrite(ch)\n\t\tcase key == KeySpace:\n\t\t\tv.EditWrite(' ')\n\t\tcase key == KeyBackspace || key == KeyBackspace2:\n\t\t\tv.EditDelete(true)\n\t\t\/\/ ...\n\t\t}\n\t}\n\nFor more information, see the examples in folder \"_examples\/\".\n*\/\npackage gocui\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage jpegsegs reads and writes JPEG markers and segment data. The\nsegment data isn't decoded but could be further processed to extract\nor modify metadata in formats such as Exif and XMP.\n\nExample programs are in the repository:\n\njpegsegsprint prints the markers and segment lengths for a JPEG file, up to th start of scan (SOS) marker.\n\njpegsegscopy makes an unmodified copy of a JPEG file.\n\njpegsegsstrip makes a copy of a JPEG file with all COM, APP and JPG segments removed.\n\n*\/\npackage jpegsegs\n<commit_msg>fix a typo<commit_after>\/*\nPackage jpegsegs reads and writes JPEG markers and segment data. The\nsegment data isn't decoded but could be further processed to extract\nor modify metadata in formats such as Exif and XMP.\n\nExample programs are in the repository:\n\njpegsegsprint prints the markers and segment lengths for a JPEG file, up to the start of scan (SOS) marker.\n\njpegsegscopy makes an unmodified copy of a JPEG file.\n\njpegsegsstrip makes a copy of a JPEG file with all COM, APP and JPG segments removed.\n\n*\/\npackage jpegsegs\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage dsl contains the main Pact DSL used in the Consumer\ncollaboration test cases, and Provider contract test verification.\n\nConsumer tests\n\nConsumer side Pact testing is an isolated test that ensures a given component\nis able to collaborate with another (remote) component. Pact will automatically\nstart a Mock server in the background that will act as the collaborators' test\ndouble.\n\nThis implies that any interactions expected on the Mock server will be validated,\nmeaning a test will fail if all interactions were not completed, or if unexpected\ninteractions were found:\n\nA typical consumer-side test would look something like this:\n\n\tfunc TestLogin(t *testing.T) {\n\n\t\t\/\/ Create Pact, connecting to local Daemon\n\t\t\/\/ Ensure the port matches the daemon port!\n\t\tpact := &Pact{\n\t\t\tPort: 6666,\n\t\t\tConsumer: \"My Consumer\",\n\t\t\tProvider: \"My Provider\",\n\t\t}\n\t\t\/\/ Shuts down Mock Service when done\n\t\tdefer pact.Teardown()\n\n\t\t\/\/ Pass in your test case as a function to Verify()\n\t\tvar test = func() error {\n\t\t\t_, err := http.Get(\"http:\/\/localhost:8000\/\")\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Set up our interactions. Note we have multiple in this test case!\n\t\tpact.\n\t\t\tAddInteraction().\n\t\t\tGiven(\"User Matt exists\"). \/\/ Provider State\n\t\t\tUponReceiving(\"A request to login\"). \/\/ Test Case Name\n\t\t\tWithRequest(&Request{\n\t\t\t\tMethod: \"GET\",\n\t\t\t\tPath: \"\/login\",\n\t\t\t}).\n\t\t\tWillRespondWith(&Response{\n\t\t\t\tStatus: 200,\n\t\t\t})\n\n\t\t\/\/ Run the test and verify the interactions.\n\t\terr := pact.Verify(test)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error on Verify: %v\", err)\n\t\t}\n\t}\n\nIf this test completed successfully, a Pact file should have been written to\n.\/pacts\/my_consumer-my_provider.json containing all of the interactions\nexpected to occur between the Consumer and Provider.\n\nProvider tests\n\nProvider side Pact testing, involves verifying that the contract - the Pact file\n- can be satisfied by the Provider.\n\nA typical Provider side test would like something like:\n\n\tfunc TestProvider_PactContract(t *testing.T) {\n\t\tgo startMyAPI(\"http:\/\/localhost:8000\")\n\n\t\tresponse := pact.VerifyProvider(&types.VerifyRequest{\n\t\t\tProviderBaseURL: \"http:\/\/localhost:8000\",\n\t\t\tPactURLs: []string{\".\/pacts\/my_consumer-my_provider.json\"},\n\t\t\tProviderStatesURL: \"http:\/\/localhost:8000\/states\",\n\t\t\tProviderStatesSetupURL: \"http:\/\/localhost:8000\/setup\",\n\t\t})\n\n\t\tif response.ExitCode != 0 {\n\t\t\tt.Fatalf(\"Got non-zero exit code '%d', expected 0\", response.ExitCode)\n\t\t}\n\t}\n\nNote that `PactURLs` can be a list of local pact files or remote based\nurls (possibly from a Pact Broker\n- http:\/\/docs.pact.io\/documentation\/sharings_pacts.html).\n\nPact reads the specified pact files (from remote or local sources) and replays\nthe interactions against a running Provider. If all of the interactions are met\nwe can say that both sides of the contract are satisfied and the test passes.\n\nMatching - Consumer Tests\n\nIn addition to verbatim value matching, you have 3 useful matching functions\nin the `dsl` package that can increase expressiveness and reduce brittle test\ncases.\n\n\tTerm(example, matcher)\ttells Pact that the value should match using a given regular expression, using `example` in mock responses. `example` must be a string.\n\tLike(content)\t\ttells Pact that the value itself is not important, as long as the element _type_ (valid JSON number, string, object etc.) itself matches.\n\tEachLike(content, min)\ttells Pact that the value should be an array type, consisting of elements like those passed in. `min` must be >= 1. `content` may be a valid JSON value: e.g. strings, numbers and objects.\n\nHere is a complex example that shows how all 3 terms can be used together:\n\n\tjumper := Like(`\"jumper\"`)\n\tshirt := Like(`\"shirt\"`)\n\ttag := EachLike(fmt.Sprintf(`[%s, %s]`, jumper, shirt), 2)\n\tsize := Like(10)\n\tcolour := Term(\"red\", \"red|green|blue\")\n\n\tmatch := formatJSON(\n\t\tEachLike(\n\t\t\tEachLike(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t`{\n\t\t\t\t\t\t\"size\": %s,\n\t\t\t\t\t\t\"colour\": %s,\n\t\t\t\t\t\t\"tag\": %s\n\t\t\t\t\t}`, size, colour, tag),\n\t\t\t\t1),\n\t\t\t1))\n\n\nThis example will result in a response body from the mock server that looks like:\n\t[\n\t [\n\t {\n\t \"size\": 10,\n\t \"colour\": \"red\",\n\t \"tag\": [\n\t [\n\t \"jumper\",\n\t \"shirt\"\n\t ],\n\t [\n\t \"jumper\",\n\t \"shirt\"\n\t ]\n\t ]\n\t }\n\t ]\n\t]\n\nSee the examples in the dsl package and the matcher tests\n(https:\/\/github.com\/pact-foundation\/pact-go\/blob\/master\/dsl\/matcher_test.go)\nfor more matching examples.\n\nNOTE: You will need to use valid Ruby regular expressions\n(http:\/\/ruby-doc.org\/core-2.1.5\/Regexp.html) and double escape backslashes.\n\nRead more about flexible matching (https:\/\/github.com\/realestate-com-au\/pact\/wiki\/Regular-expressions-and-type-matching-with-Pact.\n\nPublishing Pacts to a Broker and Tagging Pacts\n\nSee the Pact Broker (http:\/\/docs.pact.io\/documentation\/sharings_pacts.html)\ndocumentation for more details on the Broker and this article\n(http:\/\/rea.tech\/enter-the-pact-matrix-or-how-to-decouple-the-release-cycles-of-your-microservices\/)\non how to make it work for you.\n\nUsing the Pact Broker with Basic authentication\n\nThe following flags are required to use basic authentication when\npublishing or retrieving Pact files to\/from a Pact Broker:\n\n\tBrokerUsername\tthe username for Pact Broker basic authentication.\n\tBrokerPassword\tthe password for Pact Broker basic authentication.\n\nOutput Logging\n\nPact Go uses a simple log utility (logutils - https:\/\/github.com\/hashicorp\/logutils)\nto filter log messages. The CLI already contains flags to manage this,\nshould you want to control log level in your tests, you can set it like so:\n\n\tpact := &Pact{\n\t ...\n\t\tLogLevel: \"DEBUG\", \/\/ One of DEBUG, INFO, ERROR, NONE\n\t}\n*\/\npackage main\n<commit_msg>docs(godoc): update main preamble<commit_after>\/*\nPact Go enables consumer driven contract testing, providing a mock service and\nDSL for the consumer project, and interaction playback and verification\nfor the service provider project.\n\nConsumer tests\n\nConsumer side Pact testing is an isolated test that ensures a given component\nis able to collaborate with another (remote) component. Pact will automatically\nstart a Mock server in the background that will act as the collaborators' test\ndouble.\n\nThis implies that any interactions expected on the Mock server will be validated,\nmeaning a test will fail if all interactions were not completed, or if unexpected\ninteractions were found:\n\nA typical consumer-side test would look something like this:\n\n\tfunc TestLogin(t *testing.T) {\n\n\t\t\/\/ Create Pact, connecting to local Daemon\n\t\t\/\/ Ensure the port matches the daemon port!\n\t\tpact := &Pact{\n\t\t\tPort: 6666,\n\t\t\tConsumer: \"My Consumer\",\n\t\t\tProvider: \"My Provider\",\n\t\t}\n\t\t\/\/ Shuts down Mock Service when done\n\t\tdefer pact.Teardown()\n\n\t\t\/\/ Pass in your test case as a function to Verify()\n\t\tvar test = func() error {\n\t\t\t_, err := http.Get(\"http:\/\/localhost:8000\/\")\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Set up our interactions. Note we have multiple in this test case!\n\t\tpact.\n\t\t\tAddInteraction().\n\t\t\tGiven(\"User Matt exists\"). \/\/ Provider State\n\t\t\tUponReceiving(\"A request to login\"). \/\/ Test Case Name\n\t\t\tWithRequest(&Request{\n\t\t\t\tMethod: \"GET\",\n\t\t\t\tPath: \"\/login\",\n\t\t\t}).\n\t\t\tWillRespondWith(&Response{\n\t\t\t\tStatus: 200,\n\t\t\t})\n\n\t\t\/\/ Run the test and verify the interactions.\n\t\terr := pact.Verify(test)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error on Verify: %v\", err)\n\t\t}\n\t}\n\nIf this test completed successfully, a Pact file should have been written to\n.\/pacts\/my_consumer-my_provider.json containing all of the interactions\nexpected to occur between the Consumer and Provider.\n\nProvider tests\n\nProvider side Pact testing, involves verifying that the contract - the Pact file\n- can be satisfied by the Provider.\n\nA typical Provider side test would like something like:\n\n\tfunc TestProvider_PactContract(t *testing.T) {\n\t\tgo startMyAPI(\"http:\/\/localhost:8000\")\n\n\t\tresponse := pact.VerifyProvider(&types.VerifyRequest{\n\t\t\tProviderBaseURL: \"http:\/\/localhost:8000\",\n\t\t\tPactURLs: []string{\".\/pacts\/my_consumer-my_provider.json\"},\n\t\t\tProviderStatesURL: \"http:\/\/localhost:8000\/states\",\n\t\t\tProviderStatesSetupURL: \"http:\/\/localhost:8000\/setup\",\n\t\t})\n\n\t\tif response.ExitCode != 0 {\n\t\t\tt.Fatalf(\"Got non-zero exit code '%d', expected 0\", response.ExitCode)\n\t\t}\n\t}\n\nNote that `PactURLs` can be a list of local pact files or remote based\nurls (possibly from a Pact Broker\n- http:\/\/docs.pact.io\/documentation\/sharings_pacts.html).\n\nPact reads the specified pact files (from remote or local sources) and replays\nthe interactions against a running Provider. If all of the interactions are met\nwe can say that both sides of the contract are satisfied and the test passes.\n\nMatching - Consumer Tests\n\nIn addition to verbatim value matching, you have 3 useful matching functions\nin the `dsl` package that can increase expressiveness and reduce brittle test\ncases.\n\n\tTerm(example, matcher)\ttells Pact that the value should match using a given regular expression, using `example` in mock responses. `example` must be a string.\n\tLike(content)\t\ttells Pact that the value itself is not important, as long as the element _type_ (valid JSON number, string, object etc.) itself matches.\n\tEachLike(content, min)\ttells Pact that the value should be an array type, consisting of elements like those passed in. `min` must be >= 1. `content` may be a valid JSON value: e.g. strings, numbers and objects.\n\nHere is a complex example that shows how all 3 terms can be used together:\n\n\tjumper := Like(`\"jumper\"`)\n\tshirt := Like(`\"shirt\"`)\n\ttag := EachLike(fmt.Sprintf(`[%s, %s]`, jumper, shirt), 2)\n\tsize := Like(10)\n\tcolour := Term(\"red\", \"red|green|blue\")\n\n\tmatch := formatJSON(\n\t\tEachLike(\n\t\t\tEachLike(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t`{\n\t\t\t\t\t\t\"size\": %s,\n\t\t\t\t\t\t\"colour\": %s,\n\t\t\t\t\t\t\"tag\": %s\n\t\t\t\t\t}`, size, colour, tag),\n\t\t\t\t1),\n\t\t\t1))\n\n\nThis example will result in a response body from the mock server that looks like:\n\t[\n\t [\n\t {\n\t \"size\": 10,\n\t \"colour\": \"red\",\n\t \"tag\": [\n\t [\n\t \"jumper\",\n\t \"shirt\"\n\t ],\n\t [\n\t \"jumper\",\n\t \"shirt\"\n\t ]\n\t ]\n\t }\n\t ]\n\t]\n\nSee the examples in the dsl package and the matcher tests\n(https:\/\/github.com\/pact-foundation\/pact-go\/blob\/master\/dsl\/matcher_test.go)\nfor more matching examples.\n\nNOTE: You will need to use valid Ruby regular expressions\n(http:\/\/ruby-doc.org\/core-2.1.5\/Regexp.html) and double escape backslashes.\n\nRead more about flexible matching (https:\/\/github.com\/realestate-com-au\/pact\/wiki\/Regular-expressions-and-type-matching-with-Pact.\n\nPublishing Pacts to a Broker and Tagging Pacts\n\nSee the Pact Broker (http:\/\/docs.pact.io\/documentation\/sharings_pacts.html)\ndocumentation for more details on the Broker and this article\n(http:\/\/rea.tech\/enter-the-pact-matrix-or-how-to-decouple-the-release-cycles-of-your-microservices\/)\non how to make it work for you.\n\nUsing the Pact Broker with Basic authentication\n\nThe following flags are required to use basic authentication when\npublishing or retrieving Pact files to\/from a Pact Broker:\n\n\tBrokerUsername\tthe username for Pact Broker basic authentication.\n\tBrokerPassword\tthe password for Pact Broker basic authentication.\n\nOutput Logging\n\nPact Go uses a simple log utility (logutils - https:\/\/github.com\/hashicorp\/logutils)\nto filter log messages. The CLI already contains flags to manage this,\nshould you want to control log level in your tests, you can set it like so:\n\n\tpact := &Pact{\n\t ...\n\t\tLogLevel: \"DEBUG\", \/\/ One of DEBUG, INFO, ERROR, NONE\n\t}\n*\/\npackage main\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage dotquote serializes data into the \"dotquote\" format.\n\nThe \"dotquote\" format is a more human-readable for a list of namespaced key-valued pairs.\n\nThe \"dotquote\" format is also useful for fitting your entire data on a single line of text.\n\nExample\n\n\t\"app\".\"name\"=\"myapi\" \"app\".\"build\".\"number\"=\"23\" \"apple\"=\"one\" \"banana\"=\"two\" \"cherry\"=\"three\" \"trace\".\"id\"=\"DtehCQqBnw93Tw4h\"\n\nThe example dotquote line could have been generated by the following code:\n\n\tm := map[string]interface{}{\n\t\t\"app\":map[string]interface{}{\n\t\t\t\"name\":\"myapi\",\n\t\t\t\"build\":map[string]interface{\n\t\t\t\t\"number\":23,\n\t\t\t},\n\t\t},\n\t\t\"apple\":\"one\",\n\t\t\"banana\":\"two\",\n\t\t\"cherry\":\"three\",\n\t\t\"trace\":map[string]interface{}{\n\t\t\t\"id\":\"DtehCQqBnw93Tw4h\",\n\t\t},\n\t}\n\t\n\tvar p []byte\n\tp = dotquote.AppendMap(p, m)\n*\/\npackage dotquote\n<commit_msg>add some more docs<commit_after>\/*\nPackage dotquote serializes data into the \"dotquote\" format.\n\nThe \"dotquote\" format is a more human-readable for a list of namespaced key-valued pairs.\n\nThe \"dotquote\" format is also useful for fitting your entire data on a single line of text.\n\nExample\n\n\t\"app\".\"name\"=\"myapi\" \"app\".\"build\".\"number\"=\"23\" \"apple\"=\"one\" \"banana\"=\"two\" \"cherry\"=\"three\" \"trace\".\"id\"=\"DtehCQqBnw93Tw4h\"\n\nThe example dotquote line could have been generated by the following code:\n\n\tm := map[string]interface{}{\n\t\t\"app\":map[string]interface{}{\n\t\t\t\"name\":\"myapi\",\n\t\t\t\"build\":map[string]interface{\n\t\t\t\t\"number\":23,\n\t\t\t},\n\t\t},\n\t\t\"apple\":\"one\",\n\t\t\"banana\":\"two\",\n\t\t\"cherry\":\"three\",\n\t\t\"trace\":map[string]interface{}{\n\t\t\t\"id\":\"DtehCQqBnw93Tw4h\",\n\t\t},\n\t}\n\t\n\tvar p []byte\n\tp = dotquote.AppendMap(p, m)\n\nAlternatively, it that example dotquote line could have been generated by the following code:\n\n\tvar p []byte\n\t\n\tp = dotquote.AppendString(p, \"myapi\", \"app\", \"name\")\n\tp = dotquote.AppendString(p, \"23\", \"app\", \"build\", \"number\")\n\tp = dotquote.AppendString(p, \"one\", \"apple\")\n\tp = dotquote.AppendString(p, \"two\", \"banana\")\n\tp = dotquote.AppendString(p, \"three\", \"cherry\")\n\tp = dotquote.AppendString(p, \"DtehCQqBnw93Tw4h\", \"trace\", \"id\")\n*\/\npackage dotquote\n<|endoftext|>"} {"text":"<commit_before>package lmdb\n\n\/*\n#include <stdlib.h>\n#include <stdio.h>\n#include \"lmdb.h\"\n*\/\nimport \"C\"\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"unsafe\"\n)\n\n\/\/ success is a value returned from the LMDB API to indicate a successful call.\n\/\/ The functions in this API this behavior and its use is not required.\nconst success = C.MDB_SUCCESS\n\nconst (\n\t\/\/ Flags for Env.Open.\n\t\/\/\n\t\/\/ See mdb_env_open\n\tFixedMap = C.MDB_FIXEDMAP \/\/ Danger zone. Map memory at a fixed address.\n\tNoSubdir = C.MDB_NOSUBDIR \/\/ Argument to Open is a file, not a directory.\n\tReadonly = C.MDB_RDONLY \/\/ Used in several functions to denote an object as readonly.\n\tWriteMap = C.MDB_WRITEMAP \/\/ Use a writable memory map.\n\tNoMetaSync = C.MDB_NOMETASYNC \/\/ Don't fsync metapage after commit.\n\tNoSync = C.MDB_NOSYNC \/\/ Don't fsync after commit.\n\tMapAsync = C.MDB_MAPASYNC \/\/ Flush asynchronously when using the WriteMap flag.\n\tNoTLS = C.MDB_NOTLS \/\/ Danger zone. Tie reader locktable slots to Txn objects instead of threads.\n\tNoLock = C.MDB_NOLOCK \/\/ Danger zone. LMDB does not use any locks. All transactions must serialize.\n\tNoReadahead = C.MDB_NORDAHEAD \/\/ Disable readahead. Requires OS support.\n\tNoMemInit = C.MDB_NOMEMINIT \/\/ Disable LMDB memory initialization.\n)\n\n\/\/ DBI is a handle for a database in an Env.\n\/\/\n\/\/ See MDB_dbi\ntype DBI C.MDB_dbi\n\n\/\/ Env is opaque structure for a database environment. A DB environment\n\/\/ supports multiple databases, all residing in the same shared-memory map.\n\/\/\n\/\/ See MDB_env.\ntype Env struct {\n\t_env *C.MDB_env\n}\n\n\/\/ NewEnv allocates and initializes a new Env.\n\/\/\n\/\/ See mdb_env_create.\nfunc NewEnv() (*Env, error) {\n\tvar _env *C.MDB_env\n\tret := C.mdb_env_create(&_env)\n\tif ret != success {\n\t\treturn nil, errno(ret)\n\t}\n\treturn &Env{_env}, nil\n}\n\n\/\/ Open an environment handle. If this function fails Close() must be called to\n\/\/ discard the Env handle. Open passes flags|NoTLS to mdb_env_open.\n\/\/\n\/\/ See mdb_env_open.\nfunc (env *Env) Open(path string, flags uint, mode os.FileMode) error {\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\tret := C.mdb_env_open(env._env, cpath, C.uint(NoTLS|flags), C.mdb_mode_t(mode))\n\treturn errno(ret)\n}\n\n\/\/ Close shuts down the environment and releases the memory map.\n\/\/\n\/\/ See mdb_env_close.\nfunc (env *Env) Close() error {\n\tif env._env == nil {\n\t\treturn errors.New(\"Environment already closed\")\n\t}\n\tC.mdb_env_close(env._env)\n\tenv._env = nil\n\treturn nil\n}\n\n\/\/ Copy copies the data in env to an environment at path.\n\/\/\n\/\/ See mdb_env_copy.\nfunc (env *Env) Copy(path string) error {\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\tret := C.mdb_env_copy(env._env, cpath)\n\treturn errno(ret)\n}\n\n\/\/ CopyFlag copies the data in env to an environment at path created with flags.\n\/\/\n\/\/ See mdb_env_copy2.\nfunc (env *Env) CopyFlag(path string, flags uint) error {\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\tret := C.mdb_env_copy2(env._env, cpath, C.uint(flags))\n\treturn errno(ret)\n}\n\n\/\/ Statistics for a database in the environment\n\/\/\n\/\/ See MDB_stat.\ntype Stat struct {\n\tPSize uint \/\/ Size of a database page. This is currently the same for all databases.\n\tDepth uint \/\/ Depth (height) of the B-tree\n\tBranchPages uint64 \/\/ Number of internal (non-leaf) pages\n\tLeafPages uint64 \/\/ Number of leaf pages\n\tOverflowPages uint64 \/\/ Number of overflow pages\n\tEntries uint64 \/\/ Number of data items\n}\n\n\/\/ Stat returns statistics about the environment.\n\/\/\n\/\/ See mdb_env_stat.\nfunc (env *Env) Stat() (*Stat, error) {\n\tvar _stat C.MDB_stat\n\tret := C.mdb_env_stat(env._env, &_stat)\n\tif ret != success {\n\t\treturn nil, errno(ret)\n\t}\n\tstat := Stat{PSize: uint(_stat.ms_psize),\n\t\tDepth: uint(_stat.ms_depth),\n\t\tBranchPages: uint64(_stat.ms_branch_pages),\n\t\tLeafPages: uint64(_stat.ms_leaf_pages),\n\t\tOverflowPages: uint64(_stat.ms_overflow_pages),\n\t\tEntries: uint64(_stat.ms_entries)}\n\treturn &stat, nil\n}\n\n\/\/ Information about the environment.\n\/\/\n\/\/ See MDB_envinfo.\ntype EnvInfo struct {\n\tMapSize int64 \/\/ Size of the data memory map\n\tLastPNO int64 \/\/ ID of the last used page\n\tLastTxnID int64 \/\/ ID of the last committed transaction\n\tMaxReaders uint \/\/ maximum number of threads for the environment\n\tNumReaders uint \/\/ maximum number of threads used in the environment\n}\n\n\/\/ Info returns information about the environment.\n\/\/\n\/\/ See mdb_env_info.\nfunc (env *Env) Info() (*EnvInfo, error) {\n\tvar _info C.MDB_envinfo\n\tret := C.mdb_env_info(env._env, &_info)\n\tif ret != success {\n\t\treturn nil, errno(ret)\n\t}\n\tinfo := EnvInfo{\n\t\tMapSize: int64(_info.me_mapsize),\n\t\tLastPNO: int64(_info.me_last_pgno),\n\t\tLastTxnID: int64(_info.me_last_txnid),\n\t\tMaxReaders: uint(_info.me_maxreaders),\n\t\tNumReaders: uint(_info.me_numreaders),\n\t}\n\treturn &info, nil\n}\n\n\/\/ Sync flushes buffers to disk.\n\/\/\n\/\/ See mdb_env_sync.\nfunc (env *Env) Sync(force bool) error {\n\tret := C.mdb_env_sync(env._env, cbool(force))\n\treturn errno(ret)\n}\n\n\/\/ SetFlags enables\/disables flags in the environment.\n\/\/\n\/\/ See mdb_env_set_flags.\nfunc (env *Env) SetFlags(flags uint, onoff int) error {\n\tret := C.mdb_env_set_flags(env._env, C.uint(flags), C.int(onoff))\n\treturn errno(ret)\n}\n\n\/\/ Flags returns the flags set in the environment.\n\/\/\n\/\/ See mdb_env_get_flags.\nfunc (env *Env) Flags() (uint, error) {\n\tvar _flags C.uint\n\tret := C.mdb_env_get_flags(env._env, &_flags)\n\tif ret != success {\n\t\treturn 0, errno(ret)\n\t}\n\treturn uint(_flags), nil\n}\n\n\/\/ Path returns the path argument passed to Open. Path returns an error if the\n\/\/ Env was not opened previously. Calling Path on a closed Env has undefined\n\/\/ results.\n\/\/\n\/\/ See mdb_env_path.\nfunc (env *Env) Path() (string, error) {\n\tvar path string\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\tret := C.mdb_env_get_path(env._env, &cpath)\n\tif ret != success {\n\t\treturn \"\", errno(ret)\n\t}\n\tif cpath == nil {\n\t\treturn \"\", fmt.Errorf(\"not open\")\n\t}\n\treturn C.GoString(cpath), nil\n}\n\n\/\/ SetMapSize sets the size of the environment memory map.\n\/\/\n\/\/ See mdb_env_set_map_size.\nfunc (env *Env) SetMapSize(size int64) error {\n\tif size < 0 {\n\t\treturn fmt.Errorf(\"negative size\")\n\t}\n\tret := C.mdb_env_set_mapsize(env._env, C.size_t(size))\n\treturn errno(ret)\n}\n\n\/\/ SetMaxReaders sets the maximum number of reader slots in the environment.\n\/\/\n\/\/ See mdb_env_set_max_readers.\nfunc (env *Env) SetMaxReaders(size int) error {\n\tif size < 0 {\n\t\treturn fmt.Errorf(\"negative size\")\n\t}\n\tret := C.mdb_env_set_maxreaders(env._env, C.uint(size))\n\treturn errno(ret)\n}\n\n\/\/ SetMaxDBs sets the maximum number of named databases for the environment.\n\/\/\n\/\/ See mdb_env_set_maxdbs.\nfunc (env *Env) SetMaxDBs(size int) error {\n\tif size < 0 {\n\t\treturn fmt.Errorf(\"negative size\")\n\t}\n\tret := C.mdb_env_set_maxdbs(env._env, C.MDB_dbi(size))\n\treturn errno(ret)\n}\n\n\/\/ BeginTxn is a low-level (potentially dangerous) method to initialize a new\n\/\/ transaction on env. BeginTxn does not attempt to serialize operations on\n\/\/ write transactions to the same OS thread and its use for write transactions\n\/\/ can cause undefined results without care.\n\/\/\n\/\/ Instead of BeginTxn users should call the View, Update, RunTxn methods.\n\/\/\n\/\/ See mdb_txn_begin.\nfunc (env *Env) BeginTxn(parent *Txn, flags uint) (*Txn, error) {\n\treturn beginTxn(env, parent, flags)\n}\n\n\/\/ Run creates a new Txn and calls fn with it as an argument. Run commits the\n\/\/ transaction if fn returns nil otherwise the transaction is aborted.\n\/\/\n\/\/ Because Run terminates the transaction goroutines should not retain\n\/\/ references to it after fn returns. Writable transactions (without the\n\/\/ Readonly flag) must not be used from any goroutines other than the one\n\/\/ running fn.\n\/\/\n\/\/ See mdb_txn_begin.\nfunc (env *Env) RunTxn(flags uint, fn TxnOp) error {\n\tif isReadonly(flags) {\n\t\treturn env.run(flags, fn)\n\t}\n\treturn env.runUpdate(flags, fn)\n}\n\n\/\/ View creates a readonly transaction with a consistent view of the\n\/\/ environment and passes it to fn. View terminates its transaction after fn\n\/\/ returns. Any error encountered by View is returned.\n\/\/\n\/\/ Any call to Commit, Abort, Reset or Renew on a Txn created by View will\n\/\/ panic.\nfunc (env *Env) View(fn TxnOp) error {\n\treturn env.run(Readonly, fn)\n}\n\n\/\/ Update creates a writable transaction and passes it to fn. Update commits\n\/\/ the transaction if fn returns without error otherwise Update aborts the\n\/\/ transaction and returns the error.\n\/\/\n\/\/ The Txn passed to fn must not be used from multiple goroutines, even with\n\/\/ synchronization.\n\/\/\n\/\/ Any call to Commit, Abort, Reset or Renew on a Txn created by Update will\n\/\/ panic.\nfunc (env *Env) Update(fn TxnOp) error {\n\treturn env.runUpdate(0, fn)\n}\n\nfunc (env *Env) run(flags uint, fn TxnOp) error {\n\ttxn, err := env.BeginTxn(nil, flags)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttxn.managed = true\n\terr = fn(txn)\n\tif err != nil {\n\t\ttxn.abort()\n\t\treturn err\n\t}\n\treturn txn.commit()\n}\n\n\/\/ runUpdate calls fn with a managed transaction on another goroutine. The\n\/\/ transaction runs inside a goroutine so thread locking does not interact\n\/\/ negatively with other objects which would like to lock their thread.\nfunc (env *Env) runUpdate(flags uint, fn TxnOp) error {\n\terrc := make(chan error)\n\tgo func() {\n\t\tdefer close(errc)\n\t\truntime.LockOSThread()\n\t\tdefer runtime.UnlockOSThread()\n\t\terr := env.run(flags, fn)\n\t\tif err != nil {\n\t\t\terrc <- err\n\t\t}\n\t}()\n\treturn <-errc\n}\n\n\/\/ CloseDBI closes the database handle, db. Normally calling CloseDBI\n\/\/ explicitly is not necessary.\n\/\/\n\/\/ It is the caller's responsibility to serialize calls to CloseDBI.\n\/\/\n\/\/ See mdb_dbi_close.\nfunc (env *Env) CloseDBI(db DBI) {\n\tC.mdb_dbi_close(env._env, C.MDB_dbi(db))\n}\n\nfunc isReadonly(flags uint) bool {\n\treturn flags&Readonly != 0\n}\n<commit_msg>split Env.SetFlags into two methods, Env.SetFlags and Env.UnsetFlags<commit_after>package lmdb\n\n\/*\n#include <stdlib.h>\n#include <stdio.h>\n#include \"lmdb.h\"\n*\/\nimport \"C\"\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"unsafe\"\n)\n\n\/\/ success is a value returned from the LMDB API to indicate a successful call.\n\/\/ The functions in this API this behavior and its use is not required.\nconst success = C.MDB_SUCCESS\n\nconst (\n\t\/\/ Flags for Env.Open.\n\t\/\/\n\t\/\/ See mdb_env_open\n\tFixedMap = C.MDB_FIXEDMAP \/\/ Danger zone. Map memory at a fixed address.\n\tNoSubdir = C.MDB_NOSUBDIR \/\/ Argument to Open is a file, not a directory.\n\tReadonly = C.MDB_RDONLY \/\/ Used in several functions to denote an object as readonly.\n\tWriteMap = C.MDB_WRITEMAP \/\/ Use a writable memory map.\n\tNoMetaSync = C.MDB_NOMETASYNC \/\/ Don't fsync metapage after commit.\n\tNoSync = C.MDB_NOSYNC \/\/ Don't fsync after commit.\n\tMapAsync = C.MDB_MAPASYNC \/\/ Flush asynchronously when using the WriteMap flag.\n\tNoTLS = C.MDB_NOTLS \/\/ Danger zone. Tie reader locktable slots to Txn objects instead of threads.\n\tNoLock = C.MDB_NOLOCK \/\/ Danger zone. LMDB does not use any locks. All transactions must serialize.\n\tNoReadahead = C.MDB_NORDAHEAD \/\/ Disable readahead. Requires OS support.\n\tNoMemInit = C.MDB_NOMEMINIT \/\/ Disable LMDB memory initialization.\n)\n\n\/\/ DBI is a handle for a database in an Env.\n\/\/\n\/\/ See MDB_dbi\ntype DBI C.MDB_dbi\n\n\/\/ Env is opaque structure for a database environment. A DB environment\n\/\/ supports multiple databases, all residing in the same shared-memory map.\n\/\/\n\/\/ See MDB_env.\ntype Env struct {\n\t_env *C.MDB_env\n}\n\n\/\/ NewEnv allocates and initializes a new Env.\n\/\/\n\/\/ See mdb_env_create.\nfunc NewEnv() (*Env, error) {\n\tvar _env *C.MDB_env\n\tret := C.mdb_env_create(&_env)\n\tif ret != success {\n\t\treturn nil, errno(ret)\n\t}\n\treturn &Env{_env}, nil\n}\n\n\/\/ Open an environment handle. If this function fails Close() must be called to\n\/\/ discard the Env handle. Open passes flags|NoTLS to mdb_env_open.\n\/\/\n\/\/ See mdb_env_open.\nfunc (env *Env) Open(path string, flags uint, mode os.FileMode) error {\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\tret := C.mdb_env_open(env._env, cpath, C.uint(NoTLS|flags), C.mdb_mode_t(mode))\n\treturn errno(ret)\n}\n\n\/\/ Close shuts down the environment and releases the memory map.\n\/\/\n\/\/ See mdb_env_close.\nfunc (env *Env) Close() error {\n\tif env._env == nil {\n\t\treturn errors.New(\"Environment already closed\")\n\t}\n\tC.mdb_env_close(env._env)\n\tenv._env = nil\n\treturn nil\n}\n\n\/\/ Copy copies the data in env to an environment at path.\n\/\/\n\/\/ See mdb_env_copy.\nfunc (env *Env) Copy(path string) error {\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\tret := C.mdb_env_copy(env._env, cpath)\n\treturn errno(ret)\n}\n\n\/\/ CopyFlag copies the data in env to an environment at path created with flags.\n\/\/\n\/\/ See mdb_env_copy2.\nfunc (env *Env) CopyFlag(path string, flags uint) error {\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\tret := C.mdb_env_copy2(env._env, cpath, C.uint(flags))\n\treturn errno(ret)\n}\n\n\/\/ Statistics for a database in the environment\n\/\/\n\/\/ See MDB_stat.\ntype Stat struct {\n\tPSize uint \/\/ Size of a database page. This is currently the same for all databases.\n\tDepth uint \/\/ Depth (height) of the B-tree\n\tBranchPages uint64 \/\/ Number of internal (non-leaf) pages\n\tLeafPages uint64 \/\/ Number of leaf pages\n\tOverflowPages uint64 \/\/ Number of overflow pages\n\tEntries uint64 \/\/ Number of data items\n}\n\n\/\/ Stat returns statistics about the environment.\n\/\/\n\/\/ See mdb_env_stat.\nfunc (env *Env) Stat() (*Stat, error) {\n\tvar _stat C.MDB_stat\n\tret := C.mdb_env_stat(env._env, &_stat)\n\tif ret != success {\n\t\treturn nil, errno(ret)\n\t}\n\tstat := Stat{PSize: uint(_stat.ms_psize),\n\t\tDepth: uint(_stat.ms_depth),\n\t\tBranchPages: uint64(_stat.ms_branch_pages),\n\t\tLeafPages: uint64(_stat.ms_leaf_pages),\n\t\tOverflowPages: uint64(_stat.ms_overflow_pages),\n\t\tEntries: uint64(_stat.ms_entries)}\n\treturn &stat, nil\n}\n\n\/\/ Information about the environment.\n\/\/\n\/\/ See MDB_envinfo.\ntype EnvInfo struct {\n\tMapSize int64 \/\/ Size of the data memory map\n\tLastPNO int64 \/\/ ID of the last used page\n\tLastTxnID int64 \/\/ ID of the last committed transaction\n\tMaxReaders uint \/\/ maximum number of threads for the environment\n\tNumReaders uint \/\/ maximum number of threads used in the environment\n}\n\n\/\/ Info returns information about the environment.\n\/\/\n\/\/ See mdb_env_info.\nfunc (env *Env) Info() (*EnvInfo, error) {\n\tvar _info C.MDB_envinfo\n\tret := C.mdb_env_info(env._env, &_info)\n\tif ret != success {\n\t\treturn nil, errno(ret)\n\t}\n\tinfo := EnvInfo{\n\t\tMapSize: int64(_info.me_mapsize),\n\t\tLastPNO: int64(_info.me_last_pgno),\n\t\tLastTxnID: int64(_info.me_last_txnid),\n\t\tMaxReaders: uint(_info.me_maxreaders),\n\t\tNumReaders: uint(_info.me_numreaders),\n\t}\n\treturn &info, nil\n}\n\n\/\/ Sync flushes buffers to disk. If force is true a synchronous flush occurs\n\/\/ and ignores any NoSync or MapAsync flag on the environment.\n\/\/\n\/\/ See mdb_env_sync.\nfunc (env *Env) Sync(force bool) error {\n\tret := C.mdb_env_sync(env._env, cbool(force))\n\treturn errno(ret)\n}\n\n\/\/ SetFlags sets flags in the environment.\n\/\/\n\/\/ See mdb_env_set_flags.\nfunc (env *Env) SetFlags(flags uint) error {\n\tret := C.mdb_env_set_flags(env._env, C.uint(flags), C.int(1))\n\treturn errno(ret)\n}\n\n\/\/ UnsetFlags clears flags in the environment.\n\/\/\n\/\/ See mdb_env_set_flags.\nfunc (env *Env) UnsetFlags(flags uint) error {\n\tret := C.mdb_env_set_flags(env._env, C.uint(flags), C.int(0))\n\treturn errno(ret)\n}\n\n\/\/ Flags returns the flags set in the environment.\n\/\/\n\/\/ See mdb_env_get_flags.\nfunc (env *Env) Flags() (uint, error) {\n\tvar _flags C.uint\n\tret := C.mdb_env_get_flags(env._env, &_flags)\n\tif ret != success {\n\t\treturn 0, errno(ret)\n\t}\n\treturn uint(_flags), nil\n}\n\n\/\/ Path returns the path argument passed to Open. Path returns an error if the\n\/\/ Env was not opened previously. Calling Path on a closed Env has undefined\n\/\/ results.\n\/\/\n\/\/ See mdb_env_path.\nfunc (env *Env) Path() (string, error) {\n\tvar path string\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\tret := C.mdb_env_get_path(env._env, &cpath)\n\tif ret != success {\n\t\treturn \"\", errno(ret)\n\t}\n\tif cpath == nil {\n\t\treturn \"\", fmt.Errorf(\"not open\")\n\t}\n\treturn C.GoString(cpath), nil\n}\n\n\/\/ SetMapSize sets the size of the environment memory map.\n\/\/\n\/\/ See mdb_env_set_map_size.\nfunc (env *Env) SetMapSize(size int64) error {\n\tif size < 0 {\n\t\treturn fmt.Errorf(\"negative size\")\n\t}\n\tret := C.mdb_env_set_mapsize(env._env, C.size_t(size))\n\treturn errno(ret)\n}\n\n\/\/ SetMaxReaders sets the maximum number of reader slots in the environment.\n\/\/\n\/\/ See mdb_env_set_max_readers.\nfunc (env *Env) SetMaxReaders(size int) error {\n\tif size < 0 {\n\t\treturn fmt.Errorf(\"negative size\")\n\t}\n\tret := C.mdb_env_set_maxreaders(env._env, C.uint(size))\n\treturn errno(ret)\n}\n\n\/\/ SetMaxDBs sets the maximum number of named databases for the environment.\n\/\/\n\/\/ See mdb_env_set_maxdbs.\nfunc (env *Env) SetMaxDBs(size int) error {\n\tif size < 0 {\n\t\treturn fmt.Errorf(\"negative size\")\n\t}\n\tret := C.mdb_env_set_maxdbs(env._env, C.MDB_dbi(size))\n\treturn errno(ret)\n}\n\n\/\/ BeginTxn is a low-level (potentially dangerous) method to initialize a new\n\/\/ transaction on env. BeginTxn does not attempt to serialize operations on\n\/\/ write transactions to the same OS thread and its use for write transactions\n\/\/ can cause undefined results without care.\n\/\/\n\/\/ Instead of BeginTxn users should call the View, Update, RunTxn methods.\n\/\/\n\/\/ See mdb_txn_begin.\nfunc (env *Env) BeginTxn(parent *Txn, flags uint) (*Txn, error) {\n\treturn beginTxn(env, parent, flags)\n}\n\n\/\/ Run creates a new Txn and calls fn with it as an argument. Run commits the\n\/\/ transaction if fn returns nil otherwise the transaction is aborted.\n\/\/\n\/\/ Because Run terminates the transaction goroutines should not retain\n\/\/ references to it after fn returns. Writable transactions (without the\n\/\/ Readonly flag) must not be used from any goroutines other than the one\n\/\/ running fn.\n\/\/\n\/\/ See mdb_txn_begin.\nfunc (env *Env) RunTxn(flags uint, fn TxnOp) error {\n\tif isReadonly(flags) {\n\t\treturn env.run(flags, fn)\n\t}\n\treturn env.runUpdate(flags, fn)\n}\n\n\/\/ View creates a readonly transaction with a consistent view of the\n\/\/ environment and passes it to fn. View terminates its transaction after fn\n\/\/ returns. Any error encountered by View is returned.\n\/\/\n\/\/ Any call to Commit, Abort, Reset or Renew on a Txn created by View will\n\/\/ panic.\nfunc (env *Env) View(fn TxnOp) error {\n\treturn env.run(Readonly, fn)\n}\n\n\/\/ Update creates a writable transaction and passes it to fn. Update commits\n\/\/ the transaction if fn returns without error otherwise Update aborts the\n\/\/ transaction and returns the error.\n\/\/\n\/\/ The Txn passed to fn must not be used from multiple goroutines, even with\n\/\/ synchronization.\n\/\/\n\/\/ Any call to Commit, Abort, Reset or Renew on a Txn created by Update will\n\/\/ panic.\nfunc (env *Env) Update(fn TxnOp) error {\n\treturn env.runUpdate(0, fn)\n}\n\nfunc (env *Env) run(flags uint, fn TxnOp) error {\n\ttxn, err := env.BeginTxn(nil, flags)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttxn.managed = true\n\terr = fn(txn)\n\tif err != nil {\n\t\ttxn.abort()\n\t\treturn err\n\t}\n\treturn txn.commit()\n}\n\n\/\/ runUpdate calls fn with a managed transaction on another goroutine. The\n\/\/ transaction runs inside a goroutine so thread locking does not interact\n\/\/ negatively with other objects which would like to lock their thread.\nfunc (env *Env) runUpdate(flags uint, fn TxnOp) error {\n\terrc := make(chan error)\n\tgo func() {\n\t\tdefer close(errc)\n\t\truntime.LockOSThread()\n\t\tdefer runtime.UnlockOSThread()\n\t\terr := env.run(flags, fn)\n\t\tif err != nil {\n\t\t\terrc <- err\n\t\t}\n\t}()\n\treturn <-errc\n}\n\n\/\/ CloseDBI closes the database handle, db. Normally calling CloseDBI\n\/\/ explicitly is not necessary.\n\/\/\n\/\/ It is the caller's responsibility to serialize calls to CloseDBI.\n\/\/\n\/\/ See mdb_dbi_close.\nfunc (env *Env) CloseDBI(db DBI) {\n\tC.mdb_dbi_close(env._env, C.MDB_dbi(db))\n}\n\nfunc isReadonly(flags uint) bool {\n\treturn flags&Readonly != 0\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"os\"\nimport \"fmt\"\nimport \"strings\"\nimport \"math\/rand\"\nimport \"github.com\/fatih\/color\"\n\nfunc getfles() []func(string) string {\n\treturn []func(string) string {\n\t\t\tfunc(d string) string {return color.New(color.FgYellow).SprintFunc()(d)},\n\t\t\tfunc(d string) string {return color.New(color.FgBlue).SprintFunc()(d)},\n\t\t\tfunc(d string) string {return color.New(color.FgRed).SprintFunc()(d)},\n\t\t\tfunc(d string) string {return color.New(color.FgYellow).SprintFunc()(d)},\n\t\t\tfunc(d string) string {return color.New(color.FgMagenta).SprintFunc()(d)},\n\t\t\tfunc(d string) string {return color.New(color.FgCyan).SprintFunc()(d)},\n\t\t\tfunc(d string) string {return color.New(color.FgWhite).SprintFunc()(d)},\n\t\t}\n}\n\nfunc splitargs() string {\n\targs := os.Args[1:]\n\ts := \"\"\n\n\tfor i := 0; i < len(args); i++ {\n\t\ts += args[i] + \" \"\n\t}\n\n\treturn s\n}\n\nfunc stringtoslice(s string) []string {\n\treturn strings.Split(s, \"\")\n}\n\nfunc main() {\n\tflist := getfles()\n\n\targstring := splitargs()\n\ts := stringtoslice(argstring)\n\tresult := \"\"\n\n\tfor i := 0; i < len(s); i++ {\n\t\tresult += flist[rand.Intn(len(flist))](s[i])\n\t}\n\n\tfmt.Printf(result + \"\\n\")\n}\n<commit_msg>major code rework for better efficiency<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/fatih\/color\"\n)\n\nfunc getfles() []func(string) string {\n\treturn []func(string) string{\n\t\tfunc(d string) string { return color.New(color.FgYellow).SprintFunc()(d) },\n\t\tfunc(d string) string { return color.New(color.FgBlue).SprintFunc()(d) },\n\t\tfunc(d string) string { return color.New(color.FgRed).SprintFunc()(d) },\n\t\tfunc(d string) string { return color.New(color.FgYellow).SprintFunc()(d) },\n\t\tfunc(d string) string { return color.New(color.FgMagenta).SprintFunc()(d) },\n\t\tfunc(d string) string { return color.New(color.FgCyan).SprintFunc()(d) },\n\t\tfunc(d string) string { return color.New(color.FgWhite).SprintFunc()(d) },\n\t}\n}\n\nfunc splitargs() string {\n\targs := os.Args[1:]\n\ts := \"\"\n\n\tfor i := 0; i < len(args); i++ {\n\t\ts += args[i] + \" \"\n\t}\n\n\treturn s\n}\n\nfunc stringtoslice(s string) []string {\n\treturn strings.Split(s, \"\")\n}\n\nfunc main() {\n\tflist := getfles()\n\n\targstring := splitargs()\n\ts := stringtoslice(argstring)\n\tresult := \"\"\n\n\tfor i := 0; i < len(s); i++ {\n\t\tresult += flist[rand.Intn(len(flist))](s[i])\n\t}\n\n\tfmt.Printf(result + \"\\n\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n)\n\nconst (\n\tVERSION = \"0.0.1\"\n\tdefaultPlaceholder = \"{{}}\"\n\tkeyBackspace = 8\n\tkeyDelete = 127\n\tkeyEndOfTransmission = 4\n\tkeyLineFeed = 10\n\tkeyCarriageReturn = 13\n\tkeyEndOfTransmissionBlock = 23\n\tkeyEscape = 27\n)\n\nvar placeholder string\n\nvar usage = `fzz allows you to run a command interactively.\n\nUsage:\n\n\tfzz command\n\nThe command MUST include the placeholder '{{}}'.\n\nArguments:\n\n\t-v\t\tPrint version and exit\n`\n\nfunc printUsage() {\n\tfmt.Printf(usage)\n}\n\nfunc isPipe(f *os.File) bool {\n\ts, err := f.Stat()\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn s.Mode()&os.ModeNamedPipe != 0\n}\n\nfunc containsPlaceholder(s []string, ph string) bool {\n\tfor _, v := range s {\n\t\tif strings.Contains(v, ph) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc validPlaceholder(p string) bool {\n\treturn len(p)%2 == 0\n}\n\nfunc removeLastWord(s []byte) []byte {\n\tfields := bytes.Fields(s)\n\tif len(fields) > 0 {\n\t\tr := bytes.Join(fields[:len(fields)-1], []byte{' '})\n\t\tif len(r) > 1 {\n\t\t\tr = append(r, ' ')\n\t\t}\n\t\treturn r\n\t}\n\treturn []byte{}\n}\n\nfunc removeLastCharacter(s []byte) []byte {\n\tif len(s) > 1 {\n\t\tr, rsize := utf8.DecodeLastRune(s)\n\t\tif r == utf8.RuneError {\n\t\t\treturn s[:len(s)-1]\n\t\t} else {\n\t\t\treturn s[:len(s)-rsize]\n\t\t}\n\t} else if len(s) == 1 {\n\t\treturn nil\n\t}\n\treturn s\n}\n\nfunc readCharacter(r io.Reader) <-chan []byte {\n\tch := make(chan []byte)\n\trs := bufio.NewScanner(r)\n\n\tgo func() {\n\t\trs.Split(bufio.ScanRunes)\n\n\t\tfor rs.Scan() {\n\t\t\tb := rs.Bytes()\n\t\t\tch <- b\n\t\t}\n\t}()\n\n\treturn ch\n}\n\ntype Fzz struct {\n\ttty *TTY\n\tprinter *Printer\n\tstdinbuf *bytes.Buffer\n\tcurrentRunner *Runner\n\tinput []byte\n}\n\nfunc (fzz *Fzz) Loop() {\n\tttych := readCharacter(fzz.tty)\n\n\tfzz.tty.resetScreen()\n\tfzz.tty.printPrompt(fzz.input[:len(fzz.input)])\n\n\tfor {\n\t\tb := <-ttych\n\n\t\tswitch b[0] {\n\t\tcase keyBackspace, keyDelete:\n\t\t\tfzz.input = removeLastCharacter(fzz.input)\n\t\tcase keyEndOfTransmission, keyLineFeed, keyCarriageReturn:\n\t\t\tif fzz.currentRunner != nil {\n\t\t\t\tfzz.currentRunner.Wait()\n\t\t\t\tfzz.tty.resetScreen()\n\t\t\t\tio.Copy(os.Stdout, fzz.currentRunner.stdoutbuf)\n\t\t\t} else {\n\t\t\t\tfzz.tty.resetScreen()\n\t\t\t}\n\t\t\treturn\n\t\tcase keyEscape:\n\t\t\tfzz.tty.resetScreen()\n\t\t\treturn\n\t\tcase keyEndOfTransmissionBlock:\n\t\t\tfzz.input = removeLastWord(fzz.input)\n\t\tdefault:\n\t\t\t\/\/ TODO: Default is wrong here. Only append printable characters to\n\t\t\t\/\/ input\n\t\t\tfzz.input = append(fzz.input, b...)\n\t\t}\n\n\t\tfzz.killCurrentRunner()\n\n\t\tfzz.tty.resetScreen()\n\t\tfzz.tty.printPrompt(fzz.input[:len(fzz.input)])\n\n\t\tfzz.printer.Reset()\n\n\t\tif len(fzz.input) > 0 {\n\t\t\tfzz.currentRunner = NewRunner(flag.Args(), placeholder, string(fzz.input), fzz.stdinbuf)\n\t\t\tch, err := fzz.currentRunner.Run()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\tgo func(inputlen int) {\n\t\t\t\tfor line := range ch {\n\t\t\t\t\tfzz.printer.Print(line)\n\t\t\t\t}\n\t\t\t\tfzz.tty.cursorAfterPrompt(inputlen)\n\t\t\t}(utf8.RuneCount(fzz.input))\n\t\t}\n\t}\n}\n\nfunc (fzz *Fzz) killCurrentRunner() {\n\tif fzz.currentRunner != nil {\n\t\tgo func(runner *Runner) {\n\t\t\trunner.KillWait()\n\t\t}(fzz.currentRunner)\n\t}\n}\n\nfunc main() {\n\tflVersion := flag.Bool(\"v\", false, \"Print fzz version and quit\")\n\tflag.Usage = printUsage\n\tflag.Parse()\n\n\tif *flVersion {\n\t\tfmt.Printf(\"fzz %s\\n\", VERSION)\n\t\tos.Exit(0)\n\t}\n\n\tif len(flag.Args()) < 2 {\n\t\tfmt.Fprintf(os.Stderr, usage)\n\t\tos.Exit(1)\n\t}\n\n\tif placeholder = os.Getenv(\"FZZ_PLACEHOLDER\"); placeholder == \"\" {\n\t\tplaceholder = defaultPlaceholder\n\t}\n\n\tif !validPlaceholder(placeholder) {\n\t\tfmt.Fprintln(os.Stderr, \"Placeholder is not valid, needs even number of characters\")\n\t\tos.Exit(1)\n\t}\n\n\tif !containsPlaceholder(flag.Args(), placeholder) {\n\t\tfmt.Fprintln(os.Stderr, \"No placeholder in arguments\")\n\t\tos.Exit(1)\n\t}\n\n\ttty, err := NewTTY()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer tty.resetState()\n\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tgo func() {\n\t\t<-c\n\t\ttty.resetState()\n\t\tos.Exit(1)\n\t}()\n\ttty.setSttyState(\"cbreak\", \"-echo\")\n\n\tstdinbuf := bytes.Buffer{}\n\tif isPipe(os.Stdin) {\n\t\tio.Copy(&stdinbuf, os.Stdin)\n\t}\n\n\tprinter := NewPrinter(tty, tty.cols, tty.rows-1) \/\/ prompt is one row\n\tfzz := &Fzz{\n\t\tprinter: printer,\n\t\ttty: tty,\n\t\tstdinbuf: &stdinbuf,\n\t\tinput: make([]byte, 0),\n\t}\n\tfzz.Loop()\n}\n<commit_msg>Add placeholder field to Fzz struct<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n)\n\nconst (\n\tVERSION = \"0.0.1\"\n\tdefaultPlaceholder = \"{{}}\"\n\tkeyBackspace = 8\n\tkeyDelete = 127\n\tkeyEndOfTransmission = 4\n\tkeyLineFeed = 10\n\tkeyCarriageReturn = 13\n\tkeyEndOfTransmissionBlock = 23\n\tkeyEscape = 27\n)\n\nvar placeholder string\n\nvar usage = `fzz allows you to run a command interactively.\n\nUsage:\n\n\tfzz command\n\nThe command MUST include the placeholder '{{}}'.\n\nArguments:\n\n\t-v\t\tPrint version and exit\n`\n\nfunc printUsage() {\n\tfmt.Printf(usage)\n}\n\nfunc isPipe(f *os.File) bool {\n\ts, err := f.Stat()\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn s.Mode()&os.ModeNamedPipe != 0\n}\n\nfunc containsPlaceholder(s []string, ph string) bool {\n\tfor _, v := range s {\n\t\tif strings.Contains(v, ph) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc validPlaceholder(p string) bool {\n\treturn len(p)%2 == 0\n}\n\nfunc removeLastWord(s []byte) []byte {\n\tfields := bytes.Fields(s)\n\tif len(fields) > 0 {\n\t\tr := bytes.Join(fields[:len(fields)-1], []byte{' '})\n\t\tif len(r) > 1 {\n\t\t\tr = append(r, ' ')\n\t\t}\n\t\treturn r\n\t}\n\treturn []byte{}\n}\n\nfunc removeLastCharacter(s []byte) []byte {\n\tif len(s) > 1 {\n\t\tr, rsize := utf8.DecodeLastRune(s)\n\t\tif r == utf8.RuneError {\n\t\t\treturn s[:len(s)-1]\n\t\t} else {\n\t\t\treturn s[:len(s)-rsize]\n\t\t}\n\t} else if len(s) == 1 {\n\t\treturn nil\n\t}\n\treturn s\n}\n\nfunc readCharacter(r io.Reader) <-chan []byte {\n\tch := make(chan []byte)\n\trs := bufio.NewScanner(r)\n\n\tgo func() {\n\t\trs.Split(bufio.ScanRunes)\n\n\t\tfor rs.Scan() {\n\t\t\tb := rs.Bytes()\n\t\t\tch <- b\n\t\t}\n\t}()\n\n\treturn ch\n}\n\ntype Fzz struct {\n\ttty *TTY\n\tprinter *Printer\n\tstdinbuf *bytes.Buffer\n\tcurrentRunner *Runner\n\tinput []byte\n\tplaceholder string\n}\n\nfunc (fzz *Fzz) Loop() {\n\tttych := readCharacter(fzz.tty)\n\n\tfzz.tty.resetScreen()\n\tfzz.tty.printPrompt(fzz.input[:len(fzz.input)])\n\n\tfor {\n\t\tb := <-ttych\n\n\t\tswitch b[0] {\n\t\tcase keyBackspace, keyDelete:\n\t\t\tfzz.input = removeLastCharacter(fzz.input)\n\t\tcase keyEndOfTransmission, keyLineFeed, keyCarriageReturn:\n\t\t\tif fzz.currentRunner != nil {\n\t\t\t\tfzz.currentRunner.Wait()\n\t\t\t\tfzz.tty.resetScreen()\n\t\t\t\tio.Copy(os.Stdout, fzz.currentRunner.stdoutbuf)\n\t\t\t} else {\n\t\t\t\tfzz.tty.resetScreen()\n\t\t\t}\n\t\t\treturn\n\t\tcase keyEscape:\n\t\t\tfzz.tty.resetScreen()\n\t\t\treturn\n\t\tcase keyEndOfTransmissionBlock:\n\t\t\tfzz.input = removeLastWord(fzz.input)\n\t\tdefault:\n\t\t\t\/\/ TODO: Default is wrong here. Only append printable characters to\n\t\t\t\/\/ input\n\t\t\tfzz.input = append(fzz.input, b...)\n\t\t}\n\n\t\tfzz.killCurrentRunner()\n\n\t\tfzz.tty.resetScreen()\n\t\tfzz.tty.printPrompt(fzz.input[:len(fzz.input)])\n\n\t\tfzz.printer.Reset()\n\n\t\tif len(fzz.input) > 0 {\n\t\t\tfzz.currentRunner = NewRunner(flag.Args(), fzz.placeholder, string(fzz.input), fzz.stdinbuf)\n\t\t\tch, err := fzz.currentRunner.Run()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\tgo func(inputlen int) {\n\t\t\t\tfor line := range ch {\n\t\t\t\t\tfzz.printer.Print(line)\n\t\t\t\t}\n\t\t\t\tfzz.tty.cursorAfterPrompt(inputlen)\n\t\t\t}(utf8.RuneCount(fzz.input))\n\t\t}\n\t}\n}\n\nfunc (fzz *Fzz) killCurrentRunner() {\n\tif fzz.currentRunner != nil {\n\t\tgo func(runner *Runner) {\n\t\t\trunner.KillWait()\n\t\t}(fzz.currentRunner)\n\t}\n}\n\nfunc main() {\n\tflVersion := flag.Bool(\"v\", false, \"Print fzz version and quit\")\n\tflag.Usage = printUsage\n\tflag.Parse()\n\n\tif *flVersion {\n\t\tfmt.Printf(\"fzz %s\\n\", VERSION)\n\t\tos.Exit(0)\n\t}\n\n\tif len(flag.Args()) < 2 {\n\t\tfmt.Fprintf(os.Stderr, usage)\n\t\tos.Exit(1)\n\t}\n\n\tif placeholder = os.Getenv(\"FZZ_PLACEHOLDER\"); placeholder == \"\" {\n\t\tplaceholder = defaultPlaceholder\n\t}\n\n\tif !validPlaceholder(placeholder) {\n\t\tfmt.Fprintln(os.Stderr, \"Placeholder is not valid, needs even number of characters\")\n\t\tos.Exit(1)\n\t}\n\n\tif !containsPlaceholder(flag.Args(), placeholder) {\n\t\tfmt.Fprintln(os.Stderr, \"No placeholder in arguments\")\n\t\tos.Exit(1)\n\t}\n\n\ttty, err := NewTTY()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer tty.resetState()\n\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tgo func() {\n\t\t<-c\n\t\ttty.resetState()\n\t\tos.Exit(1)\n\t}()\n\ttty.setSttyState(\"cbreak\", \"-echo\")\n\n\tstdinbuf := bytes.Buffer{}\n\tif isPipe(os.Stdin) {\n\t\tio.Copy(&stdinbuf, os.Stdin)\n\t}\n\n\tprinter := NewPrinter(tty, tty.cols, tty.rows-1) \/\/ prompt is one row\n\tfzz := &Fzz{\n\t\tprinter: printer,\n\t\ttty: tty,\n\t\tstdinbuf: &stdinbuf,\n\t\tinput: make([]byte, 0),\n\t\tplaceholder: placeholder,\n\t}\n\tfzz.Loop()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"flag\"\n \"fmt\"\n \"os\"\n \"regexp\"\n\n customCar \"github.com\/jtyr\/gbt\/gbt\/cars\/custom\"\n dirCar \"github.com\/jtyr\/gbt\/gbt\/cars\/dir\"\n exectimeCar \"github.com\/jtyr\/gbt\/gbt\/cars\/exectime\"\n gitCar \"github.com\/jtyr\/gbt\/gbt\/cars\/git\"\n hostnameCar \"github.com\/jtyr\/gbt\/gbt\/cars\/hostname\"\n osCar \"github.com\/jtyr\/gbt\/gbt\/cars\/os\"\n pyvirtenvCar \"github.com\/jtyr\/gbt\/gbt\/cars\/pyvirtenv\"\n signCar \"github.com\/jtyr\/gbt\/gbt\/cars\/sign\"\n statusCar \"github.com\/jtyr\/gbt\/gbt\/cars\/status\"\n timeCar \"github.com\/jtyr\/gbt\/gbt\/cars\/time\"\n\n \"github.com\/jtyr\/gbt\/gbt\/core\/car\"\n \"github.com\/jtyr\/gbt\/gbt\/core\/utils\"\n)\n\ntype Cars interface {\n Init()\n Format() string\n SetParamStr(string, string)\n GetColor(string, bool) string\n DecorateElement(element, bg, fg, fm, text string) string\n GetModel() map[string]car.ModelElement\n GetDisplay() bool\n GetSep() string\n GetWrap() bool\n}\n\nfunc printCars(cars []Cars, right bool) {\n prevBg := \"\\000\"\n prevDisplay := true\n fakeCar := car.Car{}\n\n separator := utils.GetEnv(\"GBT_SEPARATOR\", \"\")\n\n if right {\n separator = utils.GetEnv(\"GBT_RSEPARATOR\", \"\")\n }\n\n if ! right && utils.GetEnv(\"GBT_BEGINNING_TEXT\", \"\") != \"\" {\n fmt.Print(\n fakeCar.DecorateElement(\n \"\",\n fakeCar.GetColor(utils.GetEnv(\"GBT_BEGINNING_BG\", \"default\"), false),\n fakeCar.GetColor(utils.GetEnv(\"GBT_BEGINNING_FG\", \"default\"), true),\n fakeCar.GetColor(utils.GetEnv(\"GBT_BEGINNING_FM\", \"none\"), false),\n utils.GetEnv(\"GBT_BEGINNING_TEXT\", \"\")))\n }\n\n for _, c := range cars {\n c.Init()\n\n cModel := c.GetModel()\n cDisplay := c.GetDisplay()\n cSep := c.GetSep()\n cWrap := c.GetWrap()\n\n if cSep != \"\\000\" {\n separator = cSep\n }\n\n if cDisplay {\n fmt.Print(fakeCar.GetColor(\"RESETALL\", false))\n\n if prevBg != \"\\000\" && prevDisplay {\n bg := c.GetColor(cModel[\"root\"].Bg, false)\n fg := c.GetColor(cModel[\"root\"].Bg, true)\n\n if cWrap {\n bg = c.GetColor(\"default\", false)\n fg = c.GetColor(\"default\", true)\n }\n\n if right {\n fmt.Print(\n c.DecorateElement(\n \"\",\n c.GetColor(prevBg, false),\n fg,\n \"\",\n separator))\n } else {\n fmt.Print(\n c.DecorateElement(\n \"\",\n bg,\n c.GetColor(prevBg, true),\n \"\",\n separator))\n }\n\n if cWrap {\n fmt.Print(\"\\n\")\n }\n }\n\n prevBg = cModel[\"root\"].Bg\n prevDisplay = cDisplay\n\n fmt.Print(c.Format())\n }\n }\n\n fmt.Print(fakeCar.GetColor(\"RESETALL\", false))\n}\n\nfunc main() {\n var argsHelp, argsVersion, argsRight bool\n\n flag.BoolVar(&argsHelp, \"help\", false, \"show this help message and exit\")\n flag.BoolVar(&argsVersion, \"version\", false, \"show version and exit\")\n flag.BoolVar(&argsRight, \"right\", false, \"compose right hand site prompt\")\n flag.Parse()\n\n if argsHelp {\n flag.PrintDefaults()\n os.Exit(0)\n }\n if argsVersion {\n fmt.Println(\"GBT v1.1.0\")\n os.Exit(0)\n }\n\n carsStr := utils.GetEnv(\"GBT_CARS\", \"Status, Os, Hostname, Dir, Git, Sign\")\n\n if argsRight {\n carsStr = utils.GetEnv(\"GBT_RCARS\", \"Time\")\n }\n\n reCarSplit := regexp.MustCompile(`\\s*,\\s*`)\n carsNames := reCarSplit.Split(carsStr, -1)\n carsFactory := map[string]Cars{\n \"Custom\": &customCar.Car{},\n \"Dir\": &dirCar.Car{},\n \"ExecTime\": &exectimeCar.Car{},\n \"Git\": &gitCar.Car{},\n \"Hostname\": &hostnameCar.Car{},\n \"Os\": &osCar.Car{},\n \"PyVirtEnv\": &pyvirtenvCar.Car{},\n \"Sign\": &signCar.Car{},\n \"Status\": &statusCar.Car{},\n \"Time\": &timeCar.Car{},\n }\n cars := []Cars{}\n\n for _, cn := range carsNames {\n custom := \"\\000\"\n\n if len(cn) >= 6 && cn[:6] == \"Custom\" {\n custom = cn[6:]\n cn = cn[:6]\n }\n\n if val, ok := carsFactory[cn]; ok {\n if cn == \"Status\" && len(flag.Args()) > 0 {\n val.SetParamStr(\"args\", flag.Args()[0])\n } else if custom != \"\\000\" {\n val = &customCar.Car{}\n val.SetParamStr(\"name\", custom)\n }\n\n cars = append(cars, val)\n }\n }\n\n printCars(cars, argsRight)\n}\n<commit_msg>Fixing per-car separator issue<commit_after>package main\n\nimport (\n \"flag\"\n \"fmt\"\n \"os\"\n \"regexp\"\n\n customCar \"github.com\/jtyr\/gbt\/gbt\/cars\/custom\"\n dirCar \"github.com\/jtyr\/gbt\/gbt\/cars\/dir\"\n exectimeCar \"github.com\/jtyr\/gbt\/gbt\/cars\/exectime\"\n gitCar \"github.com\/jtyr\/gbt\/gbt\/cars\/git\"\n hostnameCar \"github.com\/jtyr\/gbt\/gbt\/cars\/hostname\"\n osCar \"github.com\/jtyr\/gbt\/gbt\/cars\/os\"\n pyvirtenvCar \"github.com\/jtyr\/gbt\/gbt\/cars\/pyvirtenv\"\n signCar \"github.com\/jtyr\/gbt\/gbt\/cars\/sign\"\n statusCar \"github.com\/jtyr\/gbt\/gbt\/cars\/status\"\n timeCar \"github.com\/jtyr\/gbt\/gbt\/cars\/time\"\n\n \"github.com\/jtyr\/gbt\/gbt\/core\/car\"\n \"github.com\/jtyr\/gbt\/gbt\/core\/utils\"\n)\n\ntype Cars interface {\n Init()\n Format() string\n SetParamStr(string, string)\n GetColor(string, bool) string\n DecorateElement(element, bg, fg, fm, text string) string\n GetModel() map[string]car.ModelElement\n GetDisplay() bool\n GetSep() string\n GetWrap() bool\n}\n\nfunc printCars(cars []Cars, right bool) {\n prevBg := \"\\000\"\n prevDisplay := true\n fakeCar := car.Car{}\n defaultSeparator := utils.GetEnv(\"GBT_SEPARATOR\", \"\")\n\n if right {\n defaultSeparator = utils.GetEnv(\"GBT_RSEPARATOR\", \"\")\n }\n\n if ! right && utils.GetEnv(\"GBT_BEGINNING_TEXT\", \"\") != \"\" {\n fmt.Print(\n fakeCar.DecorateElement(\n \"\",\n fakeCar.GetColor(utils.GetEnv(\"GBT_BEGINNING_BG\", \"default\"), false),\n fakeCar.GetColor(utils.GetEnv(\"GBT_BEGINNING_FG\", \"default\"), true),\n fakeCar.GetColor(utils.GetEnv(\"GBT_BEGINNING_FM\", \"none\"), false),\n utils.GetEnv(\"GBT_BEGINNING_TEXT\", \"\")))\n }\n\n for _, c := range cars {\n c.Init()\n\n cModel := c.GetModel()\n cDisplay := c.GetDisplay()\n cSep := c.GetSep()\n cWrap := c.GetWrap()\n\n separator := defaultSeparator\n\n if cSep != \"\\000\" {\n separator = cSep\n }\n\n if cDisplay {\n fmt.Print(fakeCar.GetColor(\"RESETALL\", false))\n\n if prevBg != \"\\000\" && prevDisplay {\n bg := c.GetColor(cModel[\"root\"].Bg, false)\n fg := c.GetColor(cModel[\"root\"].Bg, true)\n\n if cWrap {\n bg = c.GetColor(\"default\", false)\n fg = c.GetColor(\"default\", true)\n }\n\n if right {\n fmt.Print(\n c.DecorateElement(\n \"\",\n c.GetColor(prevBg, false),\n fg,\n \"\",\n separator))\n } else {\n fmt.Print(\n c.DecorateElement(\n \"\",\n bg,\n c.GetColor(prevBg, true),\n \"\",\n separator))\n }\n\n if cWrap {\n fmt.Print(\"\\n\")\n }\n }\n\n prevBg = cModel[\"root\"].Bg\n prevDisplay = cDisplay\n\n fmt.Print(c.Format())\n }\n }\n\n fmt.Print(fakeCar.GetColor(\"RESETALL\", false))\n}\n\nfunc main() {\n var argsHelp, argsVersion, argsRight bool\n\n flag.BoolVar(&argsHelp, \"help\", false, \"show this help message and exit\")\n flag.BoolVar(&argsVersion, \"version\", false, \"show version and exit\")\n flag.BoolVar(&argsRight, \"right\", false, \"compose right hand site prompt\")\n flag.Parse()\n\n if argsHelp {\n flag.PrintDefaults()\n os.Exit(0)\n }\n if argsVersion {\n fmt.Println(\"GBT v1.1.0\")\n os.Exit(0)\n }\n\n carsStr := utils.GetEnv(\"GBT_CARS\", \"Status, Os, Hostname, Dir, Git, Sign\")\n\n if argsRight {\n carsStr = utils.GetEnv(\"GBT_RCARS\", \"Time\")\n }\n\n reCarSplit := regexp.MustCompile(`\\s*,\\s*`)\n carsNames := reCarSplit.Split(carsStr, -1)\n carsFactory := map[string]Cars{\n \"Custom\": &customCar.Car{},\n \"Dir\": &dirCar.Car{},\n \"ExecTime\": &exectimeCar.Car{},\n \"Git\": &gitCar.Car{},\n \"Hostname\": &hostnameCar.Car{},\n \"Os\": &osCar.Car{},\n \"PyVirtEnv\": &pyvirtenvCar.Car{},\n \"Sign\": &signCar.Car{},\n \"Status\": &statusCar.Car{},\n \"Time\": &timeCar.Car{},\n }\n cars := []Cars{}\n\n for _, cn := range carsNames {\n custom := \"\\000\"\n\n if len(cn) >= 6 && cn[:6] == \"Custom\" {\n custom = cn[6:]\n cn = cn[:6]\n }\n\n if val, ok := carsFactory[cn]; ok {\n if cn == \"Status\" && len(flag.Args()) > 0 {\n val.SetParamStr(\"args\", flag.Args()[0])\n } else if custom != \"\\000\" {\n val = &customCar.Car{}\n val.SetParamStr(\"name\", custom)\n }\n\n cars = append(cars, val)\n }\n }\n\n printCars(cars, argsRight)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/ \n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ \n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ \n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package gcm provides send and receive GCM functionality.\npackage gcm\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/mattn\/go-xmpp\"\n)\n\nconst (\n\thttpAddress = \"https:\/\/android.googleapis.com\/gcm\/send\"\n\txmppHost = \"gcm.googleapis.com\"\n\txmppPort = \"5235\"\n\txmppAddress = xmppHost + \":\" + xmppPort\n)\n\nvar (\n\t\/\/ DebugMode determines whether to have verbose logging.\n\tDebugMode = true\n)\n\nfunc debug(m string, v interface{}) {\n\tif DebugMode {\n\t\tlog.Printf(m+\":%+v\", v)\n\t}\n}\n\n\/\/ downstream is a downstream (server -> client) message.\ntype downstream struct {\n\tTo string `json:\"to,omitempty\"`\n\tData Message `json:\"data,omitempty\"`\n}\n\n\/\/ upstream is an upstream (client -> server) message.\ntype upstream struct {\n\tFrom string `json:\"from\"`\n\tData Message `json:\"data,omitempty\"`\n}\n\n\/\/ Message is the custom data passed with every GCM message.\ntype Message map[string]interface{}\n\n\/\/ MessageHandler is the type for a function that handles a GCM message.\ntype MessageHandler func(from string, m Message) error\n\n\/\/ Send sends a downstream message to a given device.\nfunc Send(apiKey, to string, m Message) error {\n\tbs, err := json.Marshal(&downstream{To: to, Data: m})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error marshalling message>%v\", err)\n\t}\n\tdebug(\"sending\", string(bs))\n\treq, err := http.NewRequest(\"POST\", httpAddress, bytes.NewReader(bs))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating request>%v\", err)\n\t}\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\treq.Header.Add(\"Authorization\", authHeader(apiKey))\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error making request>%v\", err)\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error reading body>%v\", err)\n\t}\n\tdebug(\"response body\", string(body))\n\treturn nil\n}\n\n\/\/ Listen blocks and connects to GCM waiting for messages, calling the handler\n\/\/ for every \"normal\" type message.\nfunc Listen(senderId, apiKey string, h MessageHandler) error {\n\tcl, err := xmpp.NewClient(xmppAddress, xmppUser(senderId), apiKey, DebugMode)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error connecting client>%v\", err)\n\t}\n\tfor {\n\t\tstanza, err := cl.Recv()\n\t\tif err != nil {\n\t\t\t\/\/ This is likely fatal, so return.\n\t\t\treturn fmt.Errorf(\"error on Recv>%v\", err)\n\t\t}\n\t\tv, ok := stanza.(xmpp.Chat)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tswitch v.Type {\n\t\tcase \"normal\":\n\t\t\tup := &upstream{}\n\t\t\tjson.Unmarshal([]byte(v.Other[0]), up)\n\t\t\tgo h(up.From, up.Data)\n\t\tcase \"error\":\n\t\t\tdebug(\"error response\", v)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ authHeader generates an authorization header value for an api key.\nfunc authHeader(apiKey string) string {\n\treturn fmt.Sprintf(\"key=%v\", apiKey)\n}\n\n\/\/ xmppUser generates an xmpp username from a sender ID.\nfunc xmppUser(senderId string) string {\n\treturn senderId + \"@\" + xmppHost\n}\n<commit_msg>Allow ability to stop the listener.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package gcm provides send and receive GCM functionality.\npackage gcm\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/mattn\/go-xmpp\"\n)\n\nconst (\n\thttpAddress = \"https:\/\/android.googleapis.com\/gcm\/send\"\n\txmppHost = \"gcm.googleapis.com\"\n\txmppPort = \"5235\"\n\txmppAddress = xmppHost + \":\" + xmppPort\n)\n\nvar (\n\t\/\/ DebugMode determines whether to have verbose logging.\n\tDebugMode = true\n)\n\nfunc debug(m string, v interface{}) {\n\tif DebugMode {\n\t\tlog.Printf(m+\":%+v\", v)\n\t}\n}\n\n\/\/ downstream is a downstream (server -> client) message.\ntype downstream struct {\n\tTo string `json:\"to,omitempty\"`\n\tData Message `json:\"data,omitempty\"`\n}\n\n\/\/ upstream is an upstream (client -> server) message.\ntype upstream struct {\n\tFrom string `json:\"from\"`\n\tData Message `json:\"data,omitempty\"`\n}\n\n\/\/ Message is the custom data passed with every GCM message.\ntype Message map[string]interface{}\n\n\/\/ StopChannel is a channel type to stop the server.\ntype StopChannel chan bool\n\n\/\/ MessageHandler is the type for a function that handles a GCM message.\ntype MessageHandler func(from string, m Message) error\n\n\/\/ Send sends a downstream message to a given device.\nfunc Send(apiKey, to string, m Message) error {\n\tbs, err := json.Marshal(&downstream{To: to, Data: m})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error marshalling message>%v\", err)\n\t}\n\tdebug(\"sending\", string(bs))\n\treq, err := http.NewRequest(\"POST\", httpAddress, bytes.NewReader(bs))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating request>%v\", err)\n\t}\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\treq.Header.Add(\"Authorization\", authHeader(apiKey))\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error making request>%v\", err)\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error reading body>%v\", err)\n\t}\n\tdebug(\"response body\", string(body))\n\treturn nil\n}\n\n\/\/ Listen blocks and connects to GCM waiting for messages, calling the handler\n\/\/ for every \"normal\" type message. An optional stop channel can be provided to\n\/\/ stop listening.\nfunc Listen(senderId, apiKey string, h MessageHandler, stop StopChannel) error {\n\tcl, err := xmpp.NewClient(xmppAddress, xmppUser(senderId), apiKey, DebugMode)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error connecting client>%v\", err)\n\t}\n\tif stop != nil {\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-stop:\n\t\t\t\tcl.Close()\n\t\t\t}\n\t\t}()\n\t}\n\tfor {\n\t\tstanza, err := cl.Recv()\n\t\tif err != nil {\n\t\t\t\/\/ This is likely fatal, so return.\n\t\t\treturn fmt.Errorf(\"error on Recv>%v\", err)\n\t\t}\n\t\tv, ok := stanza.(xmpp.Chat)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tswitch v.Type {\n\t\tcase \"normal\":\n\t\t\tup := &upstream{}\n\t\t\tjson.Unmarshal([]byte(v.Other[0]), up)\n\t\t\tgo h(up.From, up.Data)\n\t\tcase \"error\":\n\t\t\tdebug(\"error response\", v)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ authHeader generates an authorization header value for an api key.\nfunc authHeader(apiKey string) string {\n\treturn fmt.Sprintf(\"key=%v\", apiKey)\n}\n\n\/\/ xmppUser generates an xmpp username from a sender ID.\nfunc xmppUser(senderId string) string {\n\treturn senderId + \"@\" + xmppHost\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>\n\/\/ See LICENSE for licensing information\n\npackage sh\n\ntype Token int\n\nconst (\n\tILLEGAL Token = -iota\n\tEOF\n\tCOMMENT\n\tLIT\n\n\tIF\n\tTHEN\n\tELIF\n\tELSE\n\tFI\n\tWHILE\n\tFOR\n\tIN\n\tDO\n\tDONE\n\tCASE\n\tESAC\n\n\tAND \/\/ &\n\tLAND \/\/ &&\n\tOR \/\/ |\n\tLOR \/\/ ||\n\n\tEXP \/\/ $\n\tLPAREN \/\/ (\n\tLBRACE \/\/ {\n\tDLPAREN \/\/ ((\n\n\tRPAREN \/\/ )\n\tRBRACE \/\/ }\n\tDRPAREN \/\/ ))\n\tSEMICOLON \/\/ ;\n\tDSEMICOLON \/\/ ;;\n\n\tRDRIN \/\/ <\n\tRDROUT \/\/ >\n\tAPPEND \/\/ >>\n)\n\nvar tokNames = map[Token]string{\n\tILLEGAL: `ILLEGAL`,\n\tEOF: `EOF`,\n\tCOMMENT: `comment`,\n\tLIT: `literal`,\n\n\tIF: \"if\",\n\tTHEN: \"then\",\n\tELIF: \"elif\",\n\tELSE: \"else\",\n\tFI: \"fi\",\n\tWHILE: \"while\",\n\tFOR: \"for\",\n\tIN: \"in\",\n\tDO: \"do\",\n\tDONE: \"done\",\n\tCASE: \"case\",\n\tESAC: \"esac\",\n\n\tAND: \"&\",\n\tLAND: \"&&\",\n\tOR: \"|\",\n\tLOR: \"||\",\n\n\tEXP: \")\",\n\tLPAREN: \"(\",\n\tLBRACE: \"{\",\n\tDLPAREN: \"((\",\n\n\tRPAREN: \")\",\n\tRBRACE: \"}\",\n\tDRPAREN: \"))\",\n\tSEMICOLON: \";\",\n\tDSEMICOLON: \";;\",\n\n\tRDRIN: \"<\",\n\tRDROUT: \">\",\n\tAPPEND: \">>\",\n}\n\nfunc (t Token) String() string {\n\tif s, e := tokNames[t]; e {\n\t\treturn s\n\t}\n\treturn string(rune(t))\n}\n\nfunc doToken(r rune, readOnly func(rune) bool) Token {\n\tswitch r {\n\tcase '&':\n\t\tif readOnly('&') {\n\t\t\treturn LAND\n\t\t}\n\t\treturn AND\n\tcase '|':\n\t\tif readOnly('|') {\n\t\t\treturn LOR\n\t\t}\n\t\treturn OR\n\tcase '(':\n\t\tif readOnly('(') {\n\t\t\treturn DLPAREN\n\t\t}\n\t\treturn LPAREN\n\tcase '{':\n\t\treturn LBRACE\n\tcase ')':\n\t\tif readOnly(')') {\n\t\t\treturn DRPAREN\n\t\t}\n\t\treturn RPAREN\n\tcase '}':\n\t\treturn RBRACE\n\tcase '$':\n\t\treturn EXP\n\tcase ';':\n\t\tif readOnly(';') {\n\t\t\treturn DSEMICOLON\n\t\t}\n\t\treturn SEMICOLON\n\tcase '<':\n\t\treturn RDRIN\n\tcase '>':\n\t\tif readOnly('>') {\n\t\t\treturn APPEND\n\t\t}\n\t\treturn RDROUT\n\tdefault:\n\t\treturn Token(r)\n\t}\n}\n<commit_msg>Fix EXP string<commit_after>\/\/ Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>\n\/\/ See LICENSE for licensing information\n\npackage sh\n\ntype Token int\n\nconst (\n\tILLEGAL Token = -iota\n\tEOF\n\tCOMMENT\n\tLIT\n\n\tIF\n\tTHEN\n\tELIF\n\tELSE\n\tFI\n\tWHILE\n\tFOR\n\tIN\n\tDO\n\tDONE\n\tCASE\n\tESAC\n\n\tAND \/\/ &\n\tLAND \/\/ &&\n\tOR \/\/ |\n\tLOR \/\/ ||\n\n\tEXP \/\/ $\n\tLPAREN \/\/ (\n\tLBRACE \/\/ {\n\tDLPAREN \/\/ ((\n\n\tRPAREN \/\/ )\n\tRBRACE \/\/ }\n\tDRPAREN \/\/ ))\n\tSEMICOLON \/\/ ;\n\tDSEMICOLON \/\/ ;;\n\n\tRDRIN \/\/ <\n\tRDROUT \/\/ >\n\tAPPEND \/\/ >>\n)\n\nvar tokNames = map[Token]string{\n\tILLEGAL: `ILLEGAL`,\n\tEOF: `EOF`,\n\tCOMMENT: `comment`,\n\tLIT: `literal`,\n\n\tIF: \"if\",\n\tTHEN: \"then\",\n\tELIF: \"elif\",\n\tELSE: \"else\",\n\tFI: \"fi\",\n\tWHILE: \"while\",\n\tFOR: \"for\",\n\tIN: \"in\",\n\tDO: \"do\",\n\tDONE: \"done\",\n\tCASE: \"case\",\n\tESAC: \"esac\",\n\n\tAND: \"&\",\n\tLAND: \"&&\",\n\tOR: \"|\",\n\tLOR: \"||\",\n\n\tEXP: \"$\",\n\tLPAREN: \"(\",\n\tLBRACE: \"{\",\n\tDLPAREN: \"((\",\n\n\tRPAREN: \")\",\n\tRBRACE: \"}\",\n\tDRPAREN: \"))\",\n\tSEMICOLON: \";\",\n\tDSEMICOLON: \";;\",\n\n\tRDRIN: \"<\",\n\tRDROUT: \">\",\n\tAPPEND: \">>\",\n}\n\nfunc (t Token) String() string {\n\tif s, e := tokNames[t]; e {\n\t\treturn s\n\t}\n\treturn string(rune(t))\n}\n\nfunc doToken(r rune, readOnly func(rune) bool) Token {\n\tswitch r {\n\tcase '&':\n\t\tif readOnly('&') {\n\t\t\treturn LAND\n\t\t}\n\t\treturn AND\n\tcase '|':\n\t\tif readOnly('|') {\n\t\t\treturn LOR\n\t\t}\n\t\treturn OR\n\tcase '(':\n\t\tif readOnly('(') {\n\t\t\treturn DLPAREN\n\t\t}\n\t\treturn LPAREN\n\tcase '{':\n\t\treturn LBRACE\n\tcase ')':\n\t\tif readOnly(')') {\n\t\t\treturn DRPAREN\n\t\t}\n\t\treturn RPAREN\n\tcase '}':\n\t\treturn RBRACE\n\tcase '$':\n\t\treturn EXP\n\tcase ';':\n\t\tif readOnly(';') {\n\t\t\treturn DSEMICOLON\n\t\t}\n\t\treturn SEMICOLON\n\tcase '<':\n\t\treturn RDRIN\n\tcase '>':\n\t\tif readOnly('>') {\n\t\t\treturn APPEND\n\t\t}\n\t\treturn RDROUT\n\tdefault:\n\t\treturn Token(r)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package contour\n\n\/\/ E versions, these return the error. Non-e versions are just wrapped calls to\n\/\/ these functions with the error dropped.\n\n\/\/ Config Get Methods\n\n\/\/ E versions, these return the error. Non-e versions are just wrapped calls to\n\/\/ these functions with the error dropped.\n\n\/\/ GetE returns the setting Value as an interface{}. If its not a valid\n\/\/ setting, an error is returned.\nfunc (c *Cfg) GetE(k string) (interface{}, error) {\n\t_, ok := c.Settings[k]\n\tif !ok {\n\t\treturn nil, notFoundErr(k)\n\t}\n\n\treturn c.Settings[k].Value, nil\n}\n\n\/\/ GetBoolE returns the setting Value as a bool.\nfunc (c *Cfg) GetBoolE(k string) (bool, error) {\n\t_, ok := c.Settings[k]\n\tif !ok {\n\t\treturn false, notFoundErr(k)\n\t}\n\n\tswitch c.Settings[k].Value.(type) {\n\tcase bool:\n\t\treturn c.Settings[k].Value.(bool), nil\n\tcase *bool:\n\t\treturn *c.Settings[k].Value.(*bool), nil\n\t}\n\n\t\/\/ Should never happen, but since we know the setting is there and we\n\t\/\/ expect it to be bool, given this method was called, we assume any\n\t\/\/ non-bool\/*bool type == false.\n\treturn false, nil\n}\n\n\/\/ GetIntE returns the setting Value as an int.\nfunc (c *Cfg) GetIntE(k string) (int, error) {\n\t_, ok := c.Settings[k]\n\tif !ok {\n\t\treturn 0, notFoundErr(k)\n\t}\n\n\tswitch c.Settings[k].Value.(type) {\n\tcase int:\n\t\treturn c.Settings.k.Value.(int), nil\n\tcase *int:\n\t\treturn c.Settings[k].Value.(*int), nil\n\t}\n\n\treturn 0, nil\n}\n\n\/\/ GetStringE returns the setting Value as a string.\nfunc (c *Cfg) GetStringE(k string) (string, error) {\n\t_, ok := c.Settings[k]\n\tif !ok {\n\t\treturn \"\", notFoundErr(k)\n\t}\n\n\tswitch c.Settings[k].Value.(type) {\n\tcase string:\n\t\treturn c.Settings[k].Value.(string), nil\n\tcase *string:\n\t\treturn c.Settings[k].Value.(*string), nil\n\t}\n\n\treturn \"\", nil\n}\n\n\/\/ GetInterfaceE is a convenience wrapper function to Get\nfunc (c *Cfg) GetInterfaceE(k string) (interface{}, error) {\n\treturn c.GetE(k)\n}\n\nfunc (c *Cfg) Get(k string) interface{} {\n\ts, _ := c.GetE(k)\n\treturn s\n}\n\n\/\/ GetBool returns the setting Value as a bool.\nfunc (c *Cfg) GetBool(k string) bool {\n\ts, _ := c.GetBoolE(k)\n\treturn s\n}\n\n\/\/ GetInt returns the setting Value as an int.\nfunc (c *Cfg) GetInt(k string) int {\n\ts, _ := c.GetIntE(k)\n\treturn s\n}\n\n\/\/ GetString returns the setting Value as a string.\nfunc (c *Cfg) GetString(k string) string {\n\ts, _ := c.GetStringE(k)\n\treturn s\n}\n\n\/\/ GetInterfac returns the setting Value as an interface\nfunc (c *Cfg) GetInterface(k string) interface{} {\n\treturn c.Get(k)\n}\n\n\/\/ Filter Methods obtain a list of flags of the filter type, e.g. boolFilter\n\/\/ for bool flags, and returns them.\n\/\/ GetBoolFilterNames returns a list of filter names (flags).\nfunc (c *Cfg) GetBoolFilterNames() []string {\n\tvar names []string\n\n\tfor k, setting := range c.Settings {\n\t\tif setting.IsFlag && setting.Type == \"bool\" {\n\t\t\tnames = append(names, k)\n\t\t}\n\t}\n\n\treturn names\n}\n\n\/\/ GetIntFilterNames returns a list of filter names (flags).\nfunc (c *Cfg) GetIntFilterNames() []string {\n\tvar names []string\n\n\tfor k, setting := range c.Settings {\n\t\tif setting.IsFlag && setting.Type == \"int\" {\n\t\t\tnames = append(names, k)\n\t\t}\n\t}\n\n\treturn names\n}\n\n\/\/ GetStringFilterNames returns a list of filter names (flags).\nfunc (c *Cfg) GetStringFilterNames() []string {\n\tvar names []string\n\n\tfor k, setting := range c.Settings {\n\t\tif setting.IsFlag && setting.Type == \"string\" {\n\t\t\tnames = append(names, k)\n\t\t}\n\t}\n\n\treturn names\n}\n\n\/\/ Convenience functions for configs[app]\n\/\/ Get returns the setting Value as an interface{}.\n\/\/ GetE returns the setting Value as an interface{}.\nfunc GetE(k string) (interface{}, error) {\n\t_, ok := configs[app].Settings[k]\n\tif !ok {\n\t\treturn nil, notFoundErr(k)\n\t}\n\n\treturn configs[app].Settings[k].Value, nil\n}\n\n\/\/ GetBoolE returns the setting Value as a bool.\nfunc GetBoolE(k string) (bool, error) {\n\t_, ok := configs[app].Settings[k]\n\tif !ok {\n\t\treturn false, notFoundErr(k)\n\t}\n\n\treturn *configs[app].Settings[k].Value.(*bool), nil\n}\n\n\/\/ GetIntE returns the setting Value as an int.\nfunc GetIntE(k string) (int, error) {\n\t_, ok := configs[app].Settings[k]\n\tif !ok {\n\t\treturn 0, notFoundErr(k)\n\t}\n\n\treturn *configs[app].Settings[k].Value.(*int), nil\n}\n\n\/\/ GetStringE returns the setting Value as a string.\nfunc GetStringE(k string) (string, error) {\n\t_, ok := configs[app].Settings[k]\n\tif !ok {\n\t\treturn \"\", notFoundErr(k)\n\t}\n\n\treturn configs[app].Settings[k].Value.(string), nil\n}\n\n\/\/ GetInterfaceE is a convenience wrapper function to Get\nfunc GetInterfaceE(k string) (interface{}, error) {\n\treturn configs[app].GetE(k)\n}\n\nfunc Get(k string) interface{} {\n\ts, _ := configs[app].GetE(k)\n\treturn s\n}\n\n\/\/ GetBool returns the setting Value as a bool.\nfunc GetBool(k string) bool {\n\ts, _ := configs[app].GetBoolE(k)\n\treturn s\n}\n\n\/\/ GetInt returns the setting Value as an int.\nfunc GetInt(k string) int {\n\ts, _ := configs[app].GetIntE(k)\n\treturn s\n}\n\n\/\/ GetString returns the setting Value as a string.\nfunc GetString(k string) string {\n\ts, _ := configs[app].GetStringE(k)\n\treturn s\n}\n\n\/\/ GetInterface is a convenience wrapper function to Get\nfunc GetInterface(k string) interface{} {\n\treturn configs[app].Get(k)\n}\n\n\/\/ GetBoolFilterNames returns a list of filter names (flags).\nfunc GetBoolFilterNames() []string {\n\tvar names []string\n\n\tfor k, setting := range configs[app].Settings {\n\t\tif setting.IsFlag && setting.Type == \"bool\" {\n\t\t\tnames = append(names, k)\n\t\t}\n\t}\n\n\treturn names\n}\n\n\/\/ GetIntFilterNames returns a list of filter names (flags).\nfunc GetIntFilterNames() []string {\n\tvar names []string\n\n\tfor k, setting := range configs[app].Settings {\n\t\tif setting.IsFlag && setting.Type == \"int\" {\n\t\t\tnames = append(names, k)\n\t\t}\n\t}\n\n\treturn names\n}\n\n\/\/ GetStringFilterNames returns a list of filter names (flags).\nfunc GetStringFilterNames() []string {\n\tvar names []string\n\n\tfor k, setting := range configs[app].Settings {\n\t\tif setting.IsFlag && setting.Type == \"string\" {\n\t\t\tnames = append(names, k)\n\t\t}\n\t}\n\n\treturn names\n}\n<commit_msg>fix errors in pointer fixes<commit_after>package contour\n\n\/\/ E versions, these return the error. Non-e versions are just wrapped calls to\n\/\/ these functions with the error dropped.\n\n\/\/ Config Get Methods\n\n\/\/ E versions, these return the error. Non-e versions are just wrapped calls to\n\/\/ these functions with the error dropped.\n\n\/\/ GetE returns the setting Value as an interface{}. If its not a valid\n\/\/ setting, an error is returned.\nfunc (c *Cfg) GetE(k string) (interface{}, error) {\n\t_, ok := c.Settings[k]\n\tif !ok {\n\t\treturn nil, notFoundErr(k)\n\t}\n\n\treturn c.Settings[k].Value, nil\n}\n\n\/\/ GetBoolE returns the setting Value as a bool.\nfunc (c *Cfg) GetBoolE(k string) (bool, error) {\n\t_, ok := c.Settings[k]\n\tif !ok {\n\t\treturn false, notFoundErr(k)\n\t}\n\n\tswitch c.Settings[k].Value.(type) {\n\tcase bool:\n\t\treturn c.Settings[k].Value.(bool), nil\n\tcase *bool:\n\t\treturn *c.Settings[k].Value.(*bool), nil\n\t}\n\n\t\/\/ Should never happen, but since we know the setting is there and we\n\t\/\/ expect it to be bool, given this method was called, we assume any\n\t\/\/ non-bool\/*bool type == false.\n\treturn false, nil\n}\n\n\/\/ GetIntE returns the setting Value as an int.\nfunc (c *Cfg) GetIntE(k string) (int, error) {\n\t_, ok := c.Settings[k]\n\tif !ok {\n\t\treturn 0, notFoundErr(k)\n\t}\n\n\tswitch c.Settings[k].Value.(type) {\n\tcase int:\n\t\treturn c.Settings[k].Value.(int), nil\n\tcase *int:\n\t\treturn *c.Settings[k].Value.(*int), nil\n\t}\n\n\treturn 0, nil\n}\n\n\/\/ GetStringE returns the setting Value as a string.\nfunc (c *Cfg) GetStringE(k string) (string, error) {\n\t_, ok := c.Settings[k]\n\tif !ok {\n\t\treturn \"\", notFoundErr(k)\n\t}\n\n\tswitch c.Settings[k].Value.(type) {\n\tcase string:\n\t\treturn c.Settings[k].Value.(string), nil\n\tcase *string:\n\t\treturn *c.Settings[k].Value.(*string), nil\n\t}\n\n\treturn \"\", nil\n}\n\n\/\/ GetInterfaceE is a convenience wrapper function to Get\nfunc (c *Cfg) GetInterfaceE(k string) (interface{}, error) {\n\treturn c.GetE(k)\n}\n\nfunc (c *Cfg) Get(k string) interface{} {\n\ts, _ := c.GetE(k)\n\treturn s\n}\n\n\/\/ GetBool returns the setting Value as a bool.\nfunc (c *Cfg) GetBool(k string) bool {\n\ts, _ := c.GetBoolE(k)\n\treturn s\n}\n\n\/\/ GetInt returns the setting Value as an int.\nfunc (c *Cfg) GetInt(k string) int {\n\ts, _ := c.GetIntE(k)\n\treturn s\n}\n\n\/\/ GetString returns the setting Value as a string.\nfunc (c *Cfg) GetString(k string) string {\n\ts, _ := c.GetStringE(k)\n\treturn s\n}\n\n\/\/ GetInterfac returns the setting Value as an interface\nfunc (c *Cfg) GetInterface(k string) interface{} {\n\treturn c.Get(k)\n}\n\n\/\/ Filter Methods obtain a list of flags of the filter type, e.g. boolFilter\n\/\/ for bool flags, and returns them.\n\/\/ GetBoolFilterNames returns a list of filter names (flags).\nfunc (c *Cfg) GetBoolFilterNames() []string {\n\tvar names []string\n\n\tfor k, setting := range c.Settings {\n\t\tif setting.IsFlag && setting.Type == \"bool\" {\n\t\t\tnames = append(names, k)\n\t\t}\n\t}\n\n\treturn names\n}\n\n\/\/ GetIntFilterNames returns a list of filter names (flags).\nfunc (c *Cfg) GetIntFilterNames() []string {\n\tvar names []string\n\n\tfor k, setting := range c.Settings {\n\t\tif setting.IsFlag && setting.Type == \"int\" {\n\t\t\tnames = append(names, k)\n\t\t}\n\t}\n\n\treturn names\n}\n\n\/\/ GetStringFilterNames returns a list of filter names (flags).\nfunc (c *Cfg) GetStringFilterNames() []string {\n\tvar names []string\n\n\tfor k, setting := range c.Settings {\n\t\tif setting.IsFlag && setting.Type == \"string\" {\n\t\t\tnames = append(names, k)\n\t\t}\n\t}\n\n\treturn names\n}\n\n\/\/ Convenience functions for configs[app]\n\/\/ Get returns the setting Value as an interface{}.\n\/\/ GetE returns the setting Value as an interface{}.\nfunc GetE(k string) (interface{}, error) {\n\t_, ok := configs[app].Settings[k]\n\tif !ok {\n\t\treturn nil, notFoundErr(k)\n\t}\n\n\treturn configs[app].Settings[k].Value, nil\n}\n\n\/\/ GetBoolE returns the setting Value as a bool.\nfunc GetBoolE(k string) (bool, error) {\n\t_, ok := configs[app].Settings[k]\n\tif !ok {\n\t\treturn false, notFoundErr(k)\n\t}\n\n\treturn *configs[app].Settings[k].Value.(*bool), nil\n}\n\n\/\/ GetIntE returns the setting Value as an int.\nfunc GetIntE(k string) (int, error) {\n\t_, ok := configs[app].Settings[k]\n\tif !ok {\n\t\treturn 0, notFoundErr(k)\n\t}\n\n\treturn *configs[app].Settings[k].Value.(*int), nil\n}\n\n\/\/ GetStringE returns the setting Value as a string.\nfunc GetStringE(k string) (string, error) {\n\t_, ok := configs[app].Settings[k]\n\tif !ok {\n\t\treturn \"\", notFoundErr(k)\n\t}\n\n\treturn configs[app].Settings[k].Value.(string), nil\n}\n\n\/\/ GetInterfaceE is a convenience wrapper function to Get\nfunc GetInterfaceE(k string) (interface{}, error) {\n\treturn configs[app].GetE(k)\n}\n\nfunc Get(k string) interface{} {\n\ts, _ := configs[app].GetE(k)\n\treturn s\n}\n\n\/\/ GetBool returns the setting Value as a bool.\nfunc GetBool(k string) bool {\n\ts, _ := configs[app].GetBoolE(k)\n\treturn s\n}\n\n\/\/ GetInt returns the setting Value as an int.\nfunc GetInt(k string) int {\n\ts, _ := configs[app].GetIntE(k)\n\treturn s\n}\n\n\/\/ GetString returns the setting Value as a string.\nfunc GetString(k string) string {\n\ts, _ := configs[app].GetStringE(k)\n\treturn s\n}\n\n\/\/ GetInterface is a convenience wrapper function to Get\nfunc GetInterface(k string) interface{} {\n\treturn configs[app].Get(k)\n}\n\n\/\/ GetBoolFilterNames returns a list of filter names (flags).\nfunc GetBoolFilterNames() []string {\n\tvar names []string\n\n\tfor k, setting := range configs[app].Settings {\n\t\tif setting.IsFlag && setting.Type == \"bool\" {\n\t\t\tnames = append(names, k)\n\t\t}\n\t}\n\n\treturn names\n}\n\n\/\/ GetIntFilterNames returns a list of filter names (flags).\nfunc GetIntFilterNames() []string {\n\tvar names []string\n\n\tfor k, setting := range configs[app].Settings {\n\t\tif setting.IsFlag && setting.Type == \"int\" {\n\t\t\tnames = append(names, k)\n\t\t}\n\t}\n\n\treturn names\n}\n\n\/\/ GetStringFilterNames returns a list of filter names (flags).\nfunc GetStringFilterNames() []string {\n\tvar names []string\n\n\tfor k, setting := range configs[app].Settings {\n\t\tif setting.IsFlag && setting.Type == \"string\" {\n\t\t\tnames = append(names, k)\n\t\t}\n\t}\n\n\treturn names\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\t\"hawx.me\/code\/ggg\/repos\"\n\t\"hawx.me\/code\/ggg\/web\/assets\"\n\t\"hawx.me\/code\/ggg\/web\/filters\"\n\t\"hawx.me\/code\/ggg\/web\/handlers\"\n\t\"hawx.me\/code\/mux\"\n\t\"hawx.me\/code\/route\"\n\t\"hawx.me\/code\/serve\"\n\t\"hawx.me\/code\/uberich\"\n)\n\ntype Conf struct {\n\tTitle string\n\tURL string\n\tSecret string\n\tGitDir string\n\tDbPath string\n\n\tUberich struct {\n\t\tAppName string\n\t\tAppURL string\n\t\tUberichURL string\n\t\tSecret string\n\t}\n}\n\nfunc main() {\n\tvar (\n\t\tsettingsPath = flag.String(\"settings\", \".\/settings.toml\", \"Path to 'settings.toml'\")\n\t\tport = flag.String(\"port\", \"8080\", \"Port to run on\")\n\t\tsocket = flag.String(\"socket\", \"\", \"\")\n\t)\n\tflag.Parse()\n\n\tvar conf *Conf\n\tif _, err := toml.DecodeFile(*settingsPath, &conf); err != nil {\n\t\tlog.Fatal(\"toml: \", err)\n\t}\n\n\tdb := repos.Open(conf.DbPath, conf.GitDir)\n\tdefer db.Close()\n\n\tstore := uberich.NewStore(conf.Secret)\n\tuberich := uberich.NewClient(conf.Uberich.AppName, conf.Uberich.AppURL, conf.Uberich.UberichURL, conf.Uberich.Secret, store)\n\n\tshield := func(h http.Handler) http.Handler {\n\t\treturn uberich.Protect(h, http.NotFoundHandler())\n\t}\n\n\tlist := handlers.List(db, conf.Title, conf.URL)\n\trepo := handlers.Repo(db, conf.Title, conf.URL, uberich.Protect)\n\n\troute.Handle(\"\/\", mux.Method{\"GET\": uberich.Protect(list.All, list.Public)})\n\n\troute.HandleFunc(\"\/:name\/*path\", func(w http.ResponseWriter, r *http.Request) {\n\t\tvars := route.Vars(r)\n\n\t\tif strings.HasSuffix(vars[\"name\"], \".git\") {\n\t\t\trepo.Git.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\trepo.Html.ServeHTTP(w, r)\n\t})\n\n\troute.Handle(\"\/:name\/edit\", shield(handlers.Edit(db, conf.Title)))\n\troute.Handle(\"\/:name\/delete\", shield(handlers.Delete(db)))\n\n\troute.Handle(\"\/-\/create\", shield(handlers.Create(db, conf.Title)))\n\troute.Handle(\"\/-\/sign-in\", uberich.SignIn(\"\/\"))\n\troute.Handle(\"\/-\/sign-out\", uberich.SignOut(\"\/\"))\n\n\troute.Handle(\"\/assets\/styles.css\", mux.Method{\"GET\": assets.Styles})\n\troute.Handle(\"\/assets\/highlight.js\", mux.Method{\"GET\": assets.Highlight})\n\troute.Handle(\"\/assets\/filter.js\", mux.Method{\"GET\": assets.Filter})\n\n\tserve.Serve(*port, *socket, filters.Log(route.Default))\n}\n<commit_msg>Use indieauth instead of uberich for authentication<commit_after>package main\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"flag\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/gorilla\/sessions\"\n\t\"hawx.me\/code\/ggg\/repos\"\n\t\"hawx.me\/code\/ggg\/web\/assets\"\n\t\"hawx.me\/code\/ggg\/web\/filters\"\n\t\"hawx.me\/code\/ggg\/web\/handlers\"\n\t\"hawx.me\/code\/indieauth\"\n\t\"hawx.me\/code\/mux\"\n\t\"hawx.me\/code\/route\"\n\t\"hawx.me\/code\/serve\"\n)\n\ntype Conf struct {\n\tTitle string\n\tURL string\n\tMe string\n\tSecret string\n\tGitDir string\n\tDbPath string\n}\n\nfunc main() {\n\tvar (\n\t\tsettingsPath = flag.String(\"settings\", \".\/settings.toml\", \"Path to 'settings.toml'\")\n\t\tport = flag.String(\"port\", \"8080\", \"Port to run on\")\n\t\tsocket = flag.String(\"socket\", \"\", \"\")\n\t)\n\tflag.Parse()\n\n\tvar conf *Conf\n\tif _, err := toml.DecodeFile(*settingsPath, &conf); err != nil {\n\t\tlog.Fatal(\"toml: \", err)\n\t}\n\n\tdb := repos.Open(conf.DbPath, conf.GitDir)\n\tdefer db.Close()\n\n\tauth, err := indieauth.Authentication(conf.URL, conf.URL+\"\/-\/callback\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tendpoints, err := indieauth.FindEndpoints(conf.Me)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tstore := NewStore(conf.Secret)\n\n\tprotect := func(good, bad http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tif addr := store.Get(r); addr == conf.Me {\n\t\t\t\tgood.ServeHTTP(w, r)\n\t\t\t} else {\n\t\t\t\tbad.ServeHTTP(w, r)\n\t\t\t}\n\t\t})\n\t}\n\n\tshield := func(h http.Handler) http.Handler {\n\t\treturn protect(h, http.NotFoundHandler())\n\t}\n\n\tlist := handlers.List(db, conf.Title, conf.URL)\n\trepo := handlers.Repo(db, conf.Title, conf.URL, protect)\n\n\troute.Handle(\"\/\", mux.Method{\"GET\": protect(list.All, list.Public)})\n\n\troute.HandleFunc(\"\/:name\/*path\", func(w http.ResponseWriter, r *http.Request) {\n\t\tvars := route.Vars(r)\n\n\t\tif strings.HasSuffix(vars[\"name\"], \".git\") {\n\t\t\trepo.Git.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\trepo.Html.ServeHTTP(w, r)\n\t})\n\n\troute.Handle(\"\/:name\/edit\", shield(handlers.Edit(db, conf.Title)))\n\troute.Handle(\"\/:name\/delete\", shield(handlers.Delete(db)))\n\n\troute.Handle(\"\/-\/create\", shield(handlers.Create(db, conf.Title)))\n\n\troute.HandleFunc(\"\/-\/sign-in\", func(w http.ResponseWriter, r *http.Request) {\n\t\tstate, err := store.SetState(w, r)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"could not start auth\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tredirectURL := auth.RedirectURL(endpoints, conf.Me, state)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\thttp.Redirect(w, r, redirectURL, http.StatusFound)\n\t})\n\n\troute.HandleFunc(\"\/-\/callback\", func(w http.ResponseWriter, r *http.Request) {\n\t\tstate := store.GetState(r)\n\n\t\tif r.FormValue(\"state\") != state {\n\t\t\thttp.Error(w, \"state is bad\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tme, err := auth.Exchange(endpoints, r.FormValue(\"code\"))\n\t\tif err != nil || me != conf.Me {\n\t\t\thttp.Error(w, \"nope\", http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\n\t\tstore.Set(w, r, me)\n\t\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n\t})\n\n\troute.HandleFunc(\"\/-\/sign-out\", func(w http.ResponseWriter, r *http.Request) {\n\t\tstore.Set(w, r, \"\")\n\t\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n\t})\n\n\troute.Handle(\"\/assets\/styles.css\", mux.Method{\"GET\": assets.Styles})\n\troute.Handle(\"\/assets\/highlight.js\", mux.Method{\"GET\": assets.Highlight})\n\troute.Handle(\"\/assets\/filter.js\", mux.Method{\"GET\": assets.Filter})\n\n\tserve.Serve(*port, *socket, filters.Log(route.Default))\n}\n\ntype meStore struct {\n\tstore sessions.Store\n}\n\nfunc NewStore(secret string) *meStore {\n\treturn &meStore{sessions.NewCookieStore([]byte(secret))}\n}\n\nfunc (s meStore) Get(r *http.Request) string {\n\tsession, _ := s.store.Get(r, \"session\")\n\n\tif v, ok := session.Values[\"me\"].(string); ok {\n\t\treturn v\n\t}\n\n\treturn \"\"\n}\n\nfunc (s meStore) Set(w http.ResponseWriter, r *http.Request, me string) {\n\tsession, _ := s.store.Get(r, \"session\")\n\tsession.Values[\"me\"] = me\n\tsession.Save(r, w)\n}\n\nfunc (s meStore) SetState(w http.ResponseWriter, r *http.Request) (string, error) {\n\tbytes := make([]byte, 32)\n\n\tif _, err := io.ReadFull(rand.Reader, bytes); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tstate := base64.StdEncoding.EncodeToString(bytes)\n\n\tsession, _ := s.store.Get(r, \"session\")\n\tsession.Values[\"state\"] = state\n\treturn state, session.Save(r, w)\n}\n\nfunc (s meStore) GetState(r *http.Request) string {\n\tsession, _ := s.store.Get(r, \"session\")\n\n\tif v, ok := session.Values[\"state\"].(string); ok {\n\t\treturn v\n\t}\n\n\treturn \"\"\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/google\/go-github\/github\"\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\ntype GHR struct {\n\tGitHub GitHub\n\n\toutStream io.Writer\n}\n\nfunc (g *GHR) CreateRelease(ctx context.Context, req *github.RepositoryRelease, recreate bool) (*github.RepositoryRelease, error) {\n\n\t\/\/ When draft release creation is requested,\n\t\/\/ create it witout any check (it can).\n\tif *req.Draft {\n\t\tfmt.Fprintln(g.outStream, \"==> Create a draft release\")\n\t\treturn g.GitHub.CreateRelease(ctx, req)\n\t}\n\n\t\/\/ Check release is exist or not.\n\t\/\/ If release is not found, then create a new release.\n\trelease, err := g.GitHub.GetRelease(ctx, *req.TagName)\n\tif err != nil {\n\t\tif err != RelaseNotFound {\n\t\t\treturn nil, errors.Wrap(err, \"failed to get release\")\n\t\t}\n\t\tDebugf(\"Release (with tag %s) is not found: create a new one\",\n\t\t\t*req.TagName)\n\n\t\tif recreate {\n\t\t\tfmt.Fprintf(g.outStream,\n\t\t\t\t\"WARNING: '-recreate' is specified but release (%s) is not found\",\n\t\t\t\t*req.TagName)\n\t\t}\n\n\t\tfmt.Fprintln(g.outStream, \"==> Create a new release\")\n\t\treturn g.GitHub.CreateRelease(ctx, req)\n\t}\n\n\t\/\/ recreae is not true. Then use that exiting release.\n\tif !recreate {\n\t\tDebugf(\"Release (with tag %s) exists: use exsiting one\",\n\t\t\t*req.TagName)\n\n\t\tfmt.Fprintf(g.outStream, \"WARNING: found release (%s). Use existing one.\",\n\t\t\t*req.TagName)\n\t\treturn release, nil\n\t}\n\n\t\/\/ When recreate is requested, delete exsiting release\n\t\/\/ and create a new release.\n\tfmt.Fprintln(g.outStream, \"==> Recreate a release\")\n\tif err := g.DeleteRelease(ctx, *release.ID, *req.TagName); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn g.GitHub.CreateRelease(ctx, req)\n}\n\nfunc (g *GHR) DeleteRelease(ctx context.Context, ID int, tag string) error {\n\n\terr := g.GitHub.DeleteRelease(ctx, ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = g.GitHub.DeleteTag(ctx, tag)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ This is because sometimes process of creating release on GitHub is more\n\t\/\/ fast than deleting tag.\n\ttime.Sleep(5 * time.Second)\n\n\treturn nil\n}\n\nfunc (g *GHR) UploadAssets(ctx context.Context, releaseID int, localAssets []string, parallel int) error {\n\tstart := time.Now()\n\tdefer func() {\n\t\tDebugf(\"UploadAssets: time: %d ms\", int(time.Since(start).Seconds()*1000))\n\t}()\n\n\teg, ctx := errgroup.WithContext(ctx)\n\tsemaphore := make(chan struct{}, parallel)\n\tfor _, localAsset := range localAssets {\n\t\tlocalAsset := localAsset\n\t\teg.Go(func() error {\n\t\t\tsemaphore <- struct{}{}\n\t\t\tdefer func() {\n\t\t\t\t<-semaphore\n\t\t\t}()\n\n\t\t\tfmt.Fprintf(g.outStream, \"--> Uploading: %15s\\n\", filepath.Base(localAsset))\n\t\t\t_, err := g.GitHub.UploadAsset(ctx, releaseID, localAsset)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err,\n\t\t\t\t\t\"failed to upload asset: %s\", localAsset)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tif err := eg.Wait(); err != nil {\n\t\treturn errors.Wrap(err, \"one of goroutines is failed\")\n\t}\n\n\treturn nil\n}\n\nfunc (g *GHR) DeleteAssets(ctx context.Context, releaseID int, localAssets []string, parallel int) error {\n\tstart := time.Now()\n\tdefer func() {\n\t\tDebugf(\"DeleteAssets: time: %d ms\", int(time.Since(start).Seconds()*1000))\n\t}()\n\n\teg, ctx := errgroup.WithContext(ctx)\n\n\tassets, err := g.GitHub.ListAssets(ctx, releaseID)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to list assets\")\n\t}\n\n\tsemaphore := make(chan struct{}, parallel)\n\tfor _, localAsset := range localAssets {\n\t\tfor _, asset := range assets {\n\t\t\t\/\/ https:\/\/golang.org\/doc\/faq#closures_and_goroutines\n\t\t\tlocalAsset, asset := localAsset, asset\n\n\t\t\t\/\/ Uploaded asset name is same as basename of local file\n\t\t\tif *asset.Name == filepath.Base(localAsset) {\n\t\t\t\teg.Go(func() error {\n\t\t\t\t\tsemaphore <- struct{}{}\n\t\t\t\t\tdefer func() {\n\t\t\t\t\t\t<-semaphore\n\t\t\t\t\t}()\n\n\t\t\t\t\tfmt.Fprintf(g.outStream, \"--> Deleting: %15s\\n\", *asset.Name)\n\t\t\t\t\tif err := g.GitHub.DeleteAsset(ctx, *asset.ID); err != nil {\n\t\t\t\t\t\treturn errors.Wrapf(err,\n\t\t\t\t\t\t\t\"failed to delete asset: %s\", *asset.Name)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := eg.Wait(); err != nil {\n\t\treturn errors.Wrap(err, \"one of goroutines is failed\")\n\t}\n\n\treturn nil\n}\n<commit_msg>chore(ghr): fix typo<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/google\/go-github\/github\"\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\ntype GHR struct {\n\tGitHub GitHub\n\n\toutStream io.Writer\n}\n\nfunc (g *GHR) CreateRelease(ctx context.Context, req *github.RepositoryRelease, recreate bool) (*github.RepositoryRelease, error) {\n\n\t\/\/ When draft release creation is requested,\n\t\/\/ create it witout any check (it can).\n\tif *req.Draft {\n\t\tfmt.Fprintln(g.outStream, \"==> Create a draft release\")\n\t\treturn g.GitHub.CreateRelease(ctx, req)\n\t}\n\n\t\/\/ Check release is exist or not.\n\t\/\/ If release is not found, then create a new release.\n\trelease, err := g.GitHub.GetRelease(ctx, *req.TagName)\n\tif err != nil {\n\t\tif err != RelaseNotFound {\n\t\t\treturn nil, errors.Wrap(err, \"failed to get release\")\n\t\t}\n\t\tDebugf(\"Release (with tag %s) is not found: create a new one\",\n\t\t\t*req.TagName)\n\n\t\tif recreate {\n\t\t\tfmt.Fprintf(g.outStream,\n\t\t\t\t\"WARNING: '-recreate' is specified but release (%s) is not found\",\n\t\t\t\t*req.TagName)\n\t\t}\n\n\t\tfmt.Fprintln(g.outStream, \"==> Create a new release\")\n\t\treturn g.GitHub.CreateRelease(ctx, req)\n\t}\n\n\t\/\/ recreate is not true. Then use that exiting release.\n\tif !recreate {\n\t\tDebugf(\"Release (with tag %s) exists: use exsiting one\",\n\t\t\t*req.TagName)\n\n\t\tfmt.Fprintf(g.outStream, \"WARNING: found release (%s). Use existing one.\",\n\t\t\t*req.TagName)\n\t\treturn release, nil\n\t}\n\n\t\/\/ When recreate is requested, delete exsiting release\n\t\/\/ and create a new release.\n\tfmt.Fprintln(g.outStream, \"==> Recreate a release\")\n\tif err := g.DeleteRelease(ctx, *release.ID, *req.TagName); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn g.GitHub.CreateRelease(ctx, req)\n}\n\nfunc (g *GHR) DeleteRelease(ctx context.Context, ID int, tag string) error {\n\n\terr := g.GitHub.DeleteRelease(ctx, ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = g.GitHub.DeleteTag(ctx, tag)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ This is because sometimes process of creating release on GitHub is more\n\t\/\/ fast than deleting tag.\n\ttime.Sleep(5 * time.Second)\n\n\treturn nil\n}\n\nfunc (g *GHR) UploadAssets(ctx context.Context, releaseID int, localAssets []string, parallel int) error {\n\tstart := time.Now()\n\tdefer func() {\n\t\tDebugf(\"UploadAssets: time: %d ms\", int(time.Since(start).Seconds()*1000))\n\t}()\n\n\teg, ctx := errgroup.WithContext(ctx)\n\tsemaphore := make(chan struct{}, parallel)\n\tfor _, localAsset := range localAssets {\n\t\tlocalAsset := localAsset\n\t\teg.Go(func() error {\n\t\t\tsemaphore <- struct{}{}\n\t\t\tdefer func() {\n\t\t\t\t<-semaphore\n\t\t\t}()\n\n\t\t\tfmt.Fprintf(g.outStream, \"--> Uploading: %15s\\n\", filepath.Base(localAsset))\n\t\t\t_, err := g.GitHub.UploadAsset(ctx, releaseID, localAsset)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err,\n\t\t\t\t\t\"failed to upload asset: %s\", localAsset)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tif err := eg.Wait(); err != nil {\n\t\treturn errors.Wrap(err, \"one of goroutines is failed\")\n\t}\n\n\treturn nil\n}\n\nfunc (g *GHR) DeleteAssets(ctx context.Context, releaseID int, localAssets []string, parallel int) error {\n\tstart := time.Now()\n\tdefer func() {\n\t\tDebugf(\"DeleteAssets: time: %d ms\", int(time.Since(start).Seconds()*1000))\n\t}()\n\n\teg, ctx := errgroup.WithContext(ctx)\n\n\tassets, err := g.GitHub.ListAssets(ctx, releaseID)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to list assets\")\n\t}\n\n\tsemaphore := make(chan struct{}, parallel)\n\tfor _, localAsset := range localAssets {\n\t\tfor _, asset := range assets {\n\t\t\t\/\/ https:\/\/golang.org\/doc\/faq#closures_and_goroutines\n\t\t\tlocalAsset, asset := localAsset, asset\n\n\t\t\t\/\/ Uploaded asset name is same as basename of local file\n\t\t\tif *asset.Name == filepath.Base(localAsset) {\n\t\t\t\teg.Go(func() error {\n\t\t\t\t\tsemaphore <- struct{}{}\n\t\t\t\t\tdefer func() {\n\t\t\t\t\t\t<-semaphore\n\t\t\t\t\t}()\n\n\t\t\t\t\tfmt.Fprintf(g.outStream, \"--> Deleting: %15s\\n\", *asset.Name)\n\t\t\t\t\tif err := g.GitHub.DeleteAsset(ctx, *asset.ID); err != nil {\n\t\t\t\t\t\treturn errors.Wrapf(err,\n\t\t\t\t\t\t\t\"failed to delete asset: %s\", *asset.Name)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := eg.Wait(); err != nil {\n\t\treturn errors.Wrap(err, \"one of goroutines is failed\")\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/libgit2\/git2go\"\n)\n\n\/\/ the git segment provides useful information about a git repository such as the domain of the \"origin\" remote (with an icon), the current branch, and whether the HEAD is dirty\nfunc gitSegment(segment *segment) {\n\tdir, err := os.Getwd()\n\tcheck(err)\n\trepo, err := git.OpenRepositoryExtended(dir, 0, \"\/\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar domain string\n\tremote, err := repo.Remotes.Lookup(\"origin\")\n\tif err == nil {\n\t\turi, err := url.Parse(remote.Url())\n\t\tcheck(err)\n\t\tdomain = uri.Hostname()\n\t\tremote.Free()\n\t}\n\n\tvar stashes int\n\trepo.Stashes.Foreach(func(int, string, *git.Oid) error {\n\t\tstashes++\n\t\treturn nil\n\t})\n\n\tvar commitsAhead int\n\tvar branch string\n\tvar dirty, modified, staged bool\n\n\tvar segments []string\n\tswitch domain {\n\tcase \"github.com\":\n\t\tsegments = append(segments, iconGithub)\n\tcase \"gitlab.com\":\n\t\tsegments = append(segments, iconGitlab)\n\tcase \"bitbucket.com\":\n\t\tsegments = append(segments, iconBitbucket)\n\tdefault:\n\t\tsegments = append(segments, iconGit)\n\t}\n\tif stashes != 0 || commitsAhead != 0 {\n\t\tsegments = append(segments, strings.Repeat(iconStash, stashes)+strings.Repeat(iconAhead, commitsAhead))\n\t}\n\tif branch != \"\" {\n\t\tsegments = append(segments, branch)\n\t}\n\tif dirty {\n\t\tsegment.background = \"yellow\"\n\n\t\tvar icons string\n\t\tif modified {\n\t\t\ticons += iconCircle\n\t\t}\n\t\tif staged {\n\t\t\ticons += iconPlus\n\t\t}\n\t\tif icons != \"\" {\n\t\t\tsegments = append(segments, icons)\n\t\t}\n\t}\n\tsegment.visible = true\n\tsegment.value = strings.Join(segments, \" \")\n}\n<commit_msg>check whether the repo is dirty<commit_after>package main\n\nimport (\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/libgit2\/git2go\"\n)\n\n\/\/ the git segment provides useful information about a git repository such as the domain of the \"origin\" remote (with an icon), the current branch, and whether the HEAD is dirty\nfunc gitSegment(segment *segment) {\n\tdir, err := os.Getwd()\n\tcheck(err)\n\trepo, err := git.OpenRepositoryExtended(dir, 0, \"\/\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar domain string\n\tremote, err := repo.Remotes.Lookup(\"origin\")\n\tif err == nil {\n\t\turi, err := url.Parse(remote.Url())\n\t\tcheck(err)\n\t\tdomain = uri.Hostname()\n\t\tremote.Free()\n\t}\n\n\tvar stashes int\n\trepo.Stashes.Foreach(func(int, string, *git.Oid) error {\n\t\tstashes++\n\t\treturn nil\n\t})\n\n\tvar commitsAhead int\n\tvar branch string\n\n\tvar dirty, modified, staged bool\n\tstatus, err := repo.StatusList(&git.StatusOptions{\n\t\tFlags: git.StatusOptIncludeUntracked,\n\t})\n\tcheck(err)\n\tcount, err := status.EntryCount()\n\tcheck(err)\n\tif count != 0 {\n\t\tdirty = true\n\t}\n\tfor i := 0; i < count; i++ {\n\t\tentry, err := status.ByIndex(i)\n\t\tcheck(err)\n\t\tif entry.Status&git.StatusWtNew != 0 || entry.Status&git.StatusWtModified != 0 || entry.Status&git.StatusWtDeleted != 0 || entry.Status&git.StatusWtTypeChange != 0 || entry.Status&git.StatusWtRenamed != 0 {\n\t\t\tmodified = true\n\t\t}\n\t\tif entry.Status&git.StatusIndexNew != 0 || entry.Status&git.StatusIndexModified != 0 || entry.Status&git.StatusIndexDeleted != 0 || entry.Status&git.StatusIndexRenamed != 0 || entry.Status&git.StatusIndexTypeChange != 0 {\n\t\t\tstaged = true\n\t\t}\n\t}\n\tstatus.Free()\n\n\tvar segments []string\n\tswitch domain {\n\tcase \"github.com\":\n\t\tsegments = append(segments, iconGithub)\n\tcase \"gitlab.com\":\n\t\tsegments = append(segments, iconGitlab)\n\tcase \"bitbucket.com\":\n\t\tsegments = append(segments, iconBitbucket)\n\tdefault:\n\t\tsegments = append(segments, iconGit)\n\t}\n\tif stashes != 0 || commitsAhead != 0 {\n\t\tsegments = append(segments, strings.Repeat(iconStash, stashes)+strings.Repeat(iconAhead, commitsAhead))\n\t}\n\tif branch != \"\" {\n\t\tsegments = append(segments, branch)\n\t}\n\tif dirty {\n\t\tsegment.background = \"yellow\"\n\n\t\tvar icons string\n\t\tif modified {\n\t\t\ticons += iconCircle\n\t\t}\n\t\tif staged {\n\t\t\ticons += iconPlus\n\t\t}\n\t\tif icons != \"\" {\n\t\t\tsegments = append(segments, icons)\n\t\t}\n\t}\n\tsegment.visible = true\n\tsegment.value = strings.Join(segments, \" \")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\n\/\/ gitVersion returns the tag of the HEAD, if one exists,\n\/\/ or else the commit hash.\nfunc gitVersion() string {\n\ttag := cmd(\"git\", \"tag\", \"--contains\", \"HEAD\").OutputLine()\n\n\tif tag != \"\" {\n\t\treturn tag\n\t}\n\n\treturn cmd(\"git\", \"rev-parse\", \"--short\", \"HEAD\").OutputLine()\n}\n<commit_msg>Check that Git is in the PATH<commit_after>package main\n\nimport \"os\/exec\"\n\n\/\/ gitVersion returns one of the following:\n\/\/ - Git tag of the HEAD if one exists, or\n\/\/ - Commit hash of the HEAD, or\n\/\/ - Empty string if Git is not in the PATH.\nfunc gitVersion() string {\n\tcmdName := \"git\"\n\n\tif _, err := exec.LookPath(cmdName); err != nil {\n\t\treturn \"\"\n\t}\n\n\ttag := cmd(cmdName, \"tag\", \"--contains\", \"HEAD\").OutputLine()\n\tif tag != \"\" {\n\t\treturn tag\n\t}\n\n\treturn cmd(cmdName, \"rev-parse\", \"--short\", \"HEAD\").OutputLine()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Marin Procureur. All rights reserved.\n\/\/ Use of gjp source code is governed by a MIT license\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nPackage gjp stands for Go JobPool, and is willing to be a simple jobpool manager. It maintains\na number of queues determined at the init. No priority whatsoever, just every queues are\nprocessing one job at a time.\n*\/\n\npackage gjp\n\nimport (\n\t\"container\/list\"\n\t\"fmt\"\n\t\"time\"\n\t\"strings\"\n)\n\n\/*\n TYPES\n*\/\n\ntype (\n\t\/\/ JobPool principal s²tructure maintaining queues of jobs\n\t\/\/ from a priority range\n\tJobPool struct {\n\t\tpoolRange int\n\t\tqueue []*JobQueue \/\/Containing a \"poolRange\" number of queues\n\t\tshutdownChannel chan string \/\/NYI\n\t\tworking bool\n\t}\n\n\t\/\/ JobQueue is structure to control jobs queues\n\tJobQueue struct {\n\t\tjobs *list.List \/\/list of waiting jobs\n\t\texecutionChannel chan *Job \/\/Channel to contain current job to execute in queue\n\t\treportChannel chan *Job \/\/Channel taking job back when its execution has finished\n\t\tworking bool \/\/Indicate whether or not the queue is working\n\t\tjobsRemaining int \/\/Remaining jobs in the queue\n\t\ttotalExecutionTime time.Duration\n\t}\n\n\t\/\/ Job\n\tJob struct {\n\t\tJobRunner\n\t\tname string \/\/Public property retrievable\n\t\tstatus int \/\/Status of the current job\n\t\terror *JobError\n\t\texecutionTime time.Duration\n\t}\n\n\t\/\/Error handling structure\n\tJobError struct {\n\t\tJobName string \/\/job name\n\t\tErrorString string \/\/error as a string\n\t}\n)\n\n\/*\n SHUTDOWN INSTRUCTIONS\n*\/\nconst (\n\tabord string = \"abort\" \/\/stop immediatly cuting jobs immediatly\n\tclassic string = \"classic\" \/\/stop by non authorizing new jobs in the pool and shutting down after\n\tlong string = \"long\" \/\/stop when there is no more job to process\n)\n\n\/*\n JOB STATUS\n*\/\nconst (\n\tfailed int = 0\n\tsuccess int = 1\n\twaiting int = 2\n)\n\n\/*\n INTERFACES\n*\/\n\ntype JobRunner interface {\n\tExecuteJob() (*JobError)\n}\n\n\/*\n PUBLIC FUNCTIONS\n*\/\n\n\/\/Initialization function\nfunc New(poolRange int) (jp *JobPool) {\n\tjp = &JobPool{\n\t\tpoolRange: poolRange,\n\t\tworking: false,\n\t}\n\tjp.queue = make([]*JobQueue, jp.poolRange)\n\tfor i := 0; i < len(jp.queue); i++ {\n\t\tjp.queue[i] = &JobQueue{\n\t\t\tjobs: list.New(),\n\t\t\texecutionChannel: make(chan *Job, 2),\n\t\t\treportChannel: make(chan *Job, 2),\n\t\t\tworking: false,\n\t\t}\n\t}\n\n\t\/\/launch the loop\n\tgo jp.ProcessJobs()\n\tgo jp.StopProcessingJobs()\n\n\treturn\n}\n\n\/\/List current waiting jobs in each queues\nfunc (jp *JobPool) ListWaitingJobs() {\n\tfor i, _ := range jp.queue {\n\t\tif jp.queue[i] != nil {\n\t\t\tfmt.Println(\"in place\", i, \"there is\", jp.queue[i].jobs.Len(), \"job waiting\")\n\t\t}\n\t}\n}\n\n\/\/Queue new job to currentJobPool taking on\nfunc (jp *JobPool) QueueJob(jobName string, jobRunner JobRunner, poolNumber int) (job *Job) {\n\tdefer catchPanic(jobName, \"QueueJob\")\n\n\tjob = &Job{\n\t\tJobRunner: jobRunner,\n\t\tname: jobName,\n\t\tstatus: waiting,\n\t}\n\t\/\/Add new job to the queue\n\tjp.queue[poolNumber].jobsRemaining += 1\n\tjp.queue[poolNumber].jobs.PushBack(job)\n\n\treturn\n}\n\n\/\/Loop for processing jobs\n\/\/Started at the creation of the pool\nfunc (jp *JobPool) ProcessJobs() {\n\tdefer catchPanic(\"ProcessJobs\")\n\n\tjp.working = true\n\n\t\/\/While loop\n\tfor jp.working == true {\n\n\t\t\/\/iterate through each queue containing jobs\n\t\tfor i := 0; i < len(jp.queue); i++ {\n\t\t\tif jp.queue[i].jobsRemaining > 0 &&\n\t\t\t\tjp.queue[i].working == false {\n\n\t\t\t\t\/\/lock queue to avoid double processing\n\t\t\t\tjp.queue[i].lockQueue()\n\n\t\t\t\t\/\/Launch the queue execution in a thread\n\t\t\t\tgo jp.queue[i].executeJobQueue()\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n\/\/Stop the jobPool and release all memory allowed\n\/\/NYI\nfunc (jp *JobPool) StopProcessingJobs() {\n\tjp.working = false\n}\n\n\/*\n PRIVATE FUNCTIONS\n*\/\n\n\/\/Handle error\nfunc catchPanic(v ...string) {\n\tif r := recover(); r != nil {\n\t\tfmt.Println(\"Recover\", r)\n\t\tfmt.Println(\"Details :\", strings.Join(v, \" \"))\n\t}\n}\n\n\/\/lock queue while executing\nfunc (jq *JobQueue) lockQueue() {\n\tjq.working = true\n}\n\n\/\/unlock queue when jobs are done\nfunc (jq *JobQueue) unlockQueue() {\n\tjq.working = false\n}\n\n\/\/Remove job from currentQueue\nfunc (jq *JobQueue) dequeueJob(e *list.Element) {\n\tjq.jobs.Remove(e)\n}\n\n\/\/execute current joblist\nfunc (jq *JobQueue) executeJobQueue() {\n\tdefer catchPanic(\"executeJobQueue\")\n\tfor jq.jobsRemaining > 0 {\n\t\t\/\/Always take the first job in queue\n\t\tj := jq.jobs.Front().Value.(*Job)\n\n\t\t\/\/Since job is retrieved remove it from the waiting queue\n\t\tjq.dequeueJob(jq.jobs.Front())\n\n\t\t\/\/start job execution\n\t\tgo jq.launchJobExecution()\n\n\t\t\/\/put jo in the executionChannel\n\t\tjq.executionChannel <- j\n\n\t\t\/\/Retrieve the job report from the reportChannel\n\t\t\/\/Waiting until job is finished\n\t\tjobReport := <-jq.reportChannel\n\n\t\t\/\/Checking status on report\n\t\tswitch jobReport.status {\n\t\t\/\/Through an error if failed\n\t\tcase failed:\n\t\t\tif jobReport.error != nil {\n\t\t\t\tfmt.Println(jobReport.error.fmtError())\n\t\t\t} else {\n\t\t\t\tfmt.Println(jobReport.name, \"panicked after an execution of\", jobReport.executionTime)\n\t\t\t}\n\t\t\tbreak\n\t\tcase success:\n\t\t\tfmt.Println(\"Job\",\n\t\t\t\tjobReport.name,\n\t\t\t\t\"executed in\",\n\t\t\t\tjobReport.executionTime)\n\t\t\tbreak\n\t\t}\n\t\tjq.jobsRemaining -= 1\n\t\t\/\/Go to the next job\n\t}\n\t\/\/unlock queue to allow new jobs to be push to it\n\tjq.unlockQueue()\n\treturn\n}\n\n\/\/Launch the JobExecution\nfunc (jq *JobQueue) launchJobExecution() {\n\tdefer catchPanic(\"launchJobExecution\")\n\n\t\/\/Retrieve job from execution channel of the queue\n\tj := <-jq.executionChannel\n\n\t\/\/execute the job synchronously with time starter\n\tj.status = j.executeJob(time.Now())\n\t\/\/add this time to the queue execution time\n\tjq.totalExecutionTime += j.executionTime\n\n\t\/\/Send job to the report channel\n\tjq.reportChannel <- j\n}\n\n\/\/execute the job safely and set the status back for the reportChannel\nfunc (j *Job) executeJob(start time.Time) (jobStatus int) {\n\tdefer catchPanic(\"Job\", j.name, \"failed in executeJob\")\n\t\/\/Set the execution time for this job\n\tdefer func() {\n\t\tj.executionTime = time.Since(start)\n\t}()\n\n\tj.error = j.ExecuteJob()\n\t\/\/Set the status required for the job\n\tswitch j.error {\n\tcase nil:\n\t\tjobStatus = success\n\t\tbreak\n\tdefault:\n\t\tjobStatus = failed\n\t\tbreak\n\t}\n\treturn\n}\n\n\/\/create an error well formated\nfunc (je *JobError) fmtError() (errorString string) {\n\terrorString = fmt.Sprintln(\"Job\",\n\t\tje.JobName,\n\t\t\"has failed with a error :\",\n\t\tje.ErrorString)\n\treturn\n}\n\n\/*\nImplementation example :\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/FaXaq\/gjp\"\n\t\"time\"\n)\n\ntype (\n\tMyJob struct {\n\t\tname string\n\t}\n)\n\nfunc (myjob *MyJob) ExecuteJob() (err *gjp.JobError) {\n\tif myjob.name == \"YES\" {\n\t\tdefer panic(\"plz send haelp\")\n\t\terr = &gjp.JobError{\n\t\t\tmyjob.name,\n\t\t\t\"nooooooooo\",\n\t\t}\n\t\treturn\n\t}\n\tfor i := 0; i < 1; i++ {\n\t\tfmt.Println(myjob.name)\n\t}\n\treturn\n}\n\nfunc main(){\n\tjobPool := gjp.New(2)\n\tnewJob := &MyJob{\n\t\tname: \"YES\",\n\t}\n\tnewJob2 := &MyJob{\n\t\tname: \"THAT\",\n\t}\n\tnewJob3 := &MyJob{\n\t\tname: \"ROCKS\",\n\t}\n\tjobPool.QueueJob(newJob.name, newJob, 0)\n\tjobPool.QueueJob(newJob2.name, newJob2, 1)\n\tjobPool.QueueJob(newJob3.name, newJob3, 1)\n\ttime.Sleep(time.Millisecond * 30)\n}\n\n\nThe execution should retrieve something like :\n\nRecover plz send haelp\nDetails : Job YES failed in executeJob\nYES panicked after an execution of 14.046µs\nTHAT\nJob THAT executed in 24.399µs\nROCKS\nJob ROCKS executed in 21.57µs\n\n\n*\/\n<commit_msg>Make use of the JobPool shutdown channel<commit_after>\/\/ Copyright 2016 Marin Procureur. All rights reserved.\n\/\/ Use of gjp source code is governed by a MIT license\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nPackage gjp stands for Go JobPool, and is willing to be a simple jobpool manager. It maintains\na number of queues determined at the init. No priority whatsoever, just every queues are\nprocessing one job at a time.\n*\/\n\npackage gjp\n\nimport (\n\t\"container\/list\"\n\t\"fmt\"\n\t\"time\"\n\t\"strings\"\n)\n\n\/*\n TYPES\n*\/\n\ntype (\n\t\/\/ JobPool principal s²tructure maintaining queues of jobs\n\t\/\/ from a priority range\n\tJobPool struct {\n\t\tpoolRange int\n\t\tqueue []*JobQueue \/\/Containing a \"poolRange\" number of queues\n\t\tshutdownChannel chan string \/\/NYI\n\t\tworking bool\n\t}\n\n\t\/\/ JobQueue is structure to control jobs queues\n\tJobQueue struct {\n\t\tjobs *list.List \/\/list of waiting jobs\n\t\texecutionChannel chan *Job \/\/Channel to contain current job to execute in queue\n\t\treportChannel chan *Job \/\/Channel taking job back when its execution has finished\n\t\tworking bool \/\/Indicate whether or not the queue is working\n\t\tjobsRemaining int \/\/Remaining jobs in the queue\n\t\ttotalExecutionTime time.Duration\n\t}\n\n\t\/\/ Job\n\tJob struct {\n\t\tJobRunner\n\t\tname string \/\/Public property retrievable\n\t\tstatus int \/\/Status of the current job\n\t\terror *JobError\n\t\texecutionTime time.Duration\n\t}\n\n\t\/\/Error handling structure\n\tJobError struct {\n\t\tJobName string \/\/job name\n\t\tErrorString string \/\/error as a string\n\t}\n)\n\n\/*\n SHUTDOWN INSTRUCTIONS\n*\/\nconst (\n\tabort string = \"abort\" \/\/stop immediatly cuting jobs immediatly\n\tclassic string = \"classic\" \/\/stop by non authorizing new jobs in the pool and shutting down after\n\tlong string = \"long\" \/\/stop when there is no more job to process\n)\n\n\/*\n JOB STATUS\n*\/\nconst (\n\tfailed int = 0\n\tsuccess int = 1\n\twaiting int = 2\n)\n\n\/*\n INTERFACES\n*\/\n\ntype JobRunner interface {\n\tExecuteJob() (*JobError)\n}\n\n\/*\n PUBLIC FUNCTIONS\n*\/\n\n\/\/Initialization function\nfunc New(poolRange int) (jp *JobPool) {\n\t\/\/Create the jobPool\n\tjp = &JobPool{\n\t\tpoolRange: poolRange,\n\t\tworking: false,\n\t\tshutdownChannel: make(chan string),\n\t}\n\n\t\/\/create the queuer ranges\n\tjp.queue = make([]*JobQueue, jp.poolRange)\n\n\tfor i := 0; i < len(jp.queue); i++ {\n\t\tjp.queue[i] = &JobQueue{\n\t\t\tjobs: list.New(),\n\t\t\texecutionChannel: make(chan *Job, 2),\n\t\t\treportChannel: make(chan *Job, 2),\n\t\t\tworking: false,\n\t\t}\n\t}\n\n\t\/\/launch the loop\n\tgo jp.ProcessJobs()\n\tgo jp.ListenForShutdown()\n\n\treturn\n}\n\n\/\/List current waiting jobs in each queues\nfunc (jp *JobPool) ListWaitingJobs() {\n\tfor i, _ := range jp.queue {\n\t\tif jp.queue[i] != nil {\n\t\t\tfmt.Println(\"in place\", i, \"there is\", jp.queue[i].jobs.Len(), \"job waiting\")\n\t\t}\n\t}\n}\n\n\/\/Queue new job to currentJobPool taking on\nfunc (jp *JobPool) QueueJob(jobName string, jobRunner JobRunner, poolNumber int) (job *Job) {\n\tdefer catchPanic(jobName, \"QueueJob\")\n\n\tjob = &Job{\n\t\tJobRunner: jobRunner,\n\t\tname: jobName,\n\t\tstatus: waiting,\n\t}\n\t\/\/Add new job to the queue\n\tjp.queue[poolNumber].jobsRemaining += 1\n\tjp.queue[poolNumber].jobs.PushBack(job)\n\n\treturn\n}\n\n\/\/Loop for processing jobs\n\/\/Started at the creation of the pool\nfunc (jp *JobPool) ProcessJobs() {\n\tdefer catchPanic(\"ProcessJobs\")\n\n\tjp.working = true\n\n\t\/\/While loop\n\tfor jp.working == true {\n\n\t\t\/\/iterate through each queue containing jobs\n\t\tfor i := 0; i < len(jp.queue); i++ {\n\t\t\tif jp.queue[i].jobsRemaining > 0 &&\n\t\t\t\tjp.queue[i].working == false {\n\n\t\t\t\t\/\/lock queue to avoid double processing\n\t\t\t\tjp.queue[i].lockQueue()\n\n\t\t\t\t\/\/Launch the queue execution in a thread\n\t\t\t\tgo jp.queue[i].executeJobQueue()\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n\/\/Stop the jobPool and release all memory allowed\n\/\/NYI\nfunc (jp *JobPool) ListenForShutdown() {\n\tfmt.Println(\"Waiting for shutdown\")\n\t<- jp.shutdownChannel\n\tjp.working = false\n}\n\nfunc (jp *JobPool) ShutdownWorkPool() {\n\tjp.shutdownChannel <- abort\n}\n\n\/*\n PRIVATE FUNCTIONS\n*\/\n\n\/\/Handle error\nfunc catchPanic(v ...string) {\n\tif r := recover(); r != nil {\n\t\tfmt.Println(\"Recover\", r)\n\t\tfmt.Println(\"Details :\", strings.Join(v, \" \"))\n\t}\n}\n\n\/\/lock queue while executing\nfunc (jq *JobQueue) lockQueue() {\n\tjq.working = true\n}\n\n\/\/unlock queue when jobs are done\nfunc (jq *JobQueue) unlockQueue() {\n\tjq.working = false\n}\n\n\/\/Remove job from currentQueue\nfunc (jq *JobQueue) dequeueJob(e *list.Element) {\n\tjq.jobs.Remove(e)\n}\n\n\/\/execute current joblist\nfunc (jq *JobQueue) executeJobQueue() {\n\tdefer catchPanic(\"executeJobQueue\")\n\tfor jq.jobsRemaining > 0 {\n\t\t\/\/Always take the first job in queue\n\t\tj := jq.jobs.Front().Value.(*Job)\n\n\t\t\/\/Since job is retrieved remove it from the waiting queue\n\t\tjq.dequeueJob(jq.jobs.Front())\n\n\t\t\/\/start job execution\n\t\tgo jq.launchJobExecution()\n\n\t\t\/\/put jo in the executionChannel\n\t\tjq.executionChannel <- j\n\n\t\t\/\/Retrieve the job report from the reportChannel\n\t\t\/\/Waiting until job is finished\n\t\tjobReport := <-jq.reportChannel\n\n\t\t\/\/Checking status on report\n\t\tswitch jobReport.status {\n\t\t\/\/Through an error if failed\n\t\tcase failed:\n\t\t\tif jobReport.error != nil {\n\t\t\t\tfmt.Println(jobReport.error.fmtError())\n\t\t\t} else {\n\t\t\t\tfmt.Println(jobReport.name, \"panicked after an execution of\", jobReport.executionTime)\n\t\t\t}\n\t\t\tbreak\n\t\tcase success:\n\t\t\tfmt.Println(\"Job\",\n\t\t\t\tjobReport.name,\n\t\t\t\t\"executed in\",\n\t\t\t\tjobReport.executionTime)\n\t\t\tbreak\n\t\t}\n\t\tjq.jobsRemaining -= 1\n\t\t\/\/Go to the next job\n\t}\n\t\/\/unlock queue to allow new jobs to be push to it\n\tjq.unlockQueue()\n\treturn\n}\n\n\/\/Launch the JobExecution\nfunc (jq *JobQueue) launchJobExecution() {\n\tdefer catchPanic(\"launchJobExecution\")\n\n\t\/\/Retrieve job from execution channel of the queue\n\tj := <-jq.executionChannel\n\n\t\/\/execute the job synchronously with time starter\n\tj.status = j.executeJob(time.Now())\n\t\/\/add this time to the queue execution time\n\tjq.totalExecutionTime += j.executionTime\n\n\t\/\/Send job to the report channel\n\tjq.reportChannel <- j\n}\n\n\/\/execute the job safely and set the status back for the reportChannel\nfunc (j *Job) executeJob(start time.Time) (jobStatus int) {\n\tdefer catchPanic(\"Job\", j.name, \"failed in executeJob\")\n\t\/\/Set the execution time for this job\n\tdefer func() {\n\t\tj.executionTime = time.Since(start)\n\t}()\n\n\tj.error = j.ExecuteJob()\n\t\/\/Set the status required for the job\n\tswitch j.error {\n\tcase nil:\n\t\tjobStatus = success\n\t\tbreak\n\tdefault:\n\t\tjobStatus = failed\n\t\tbreak\n\t}\n\treturn\n}\n\n\/\/create an error well formated\nfunc (je *JobError) fmtError() (errorString string) {\n\terrorString = fmt.Sprintln(\"Job\",\n\t\tje.JobName,\n\t\t\"has failed with a error :\",\n\t\tje.ErrorString)\n\treturn\n}\n\n\/*\nImplementation example :\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/FaXaq\/gjp\"\n\t\"time\"\n)\n\ntype (\n\tMyJob struct {\n\t\tname string\n\t}\n)\n\nfunc (myjob *MyJob) ExecuteJob() (err *gjp.JobError) {\n\tif myjob.name == \"YES\" {\n\t\tdefer panic(\"plz send haelp\")\n\t\terr = &gjp.JobError{\n\t\t\tmyjob.name,\n\t\t\t\"nooooooooo\",\n\t\t}\n\t\treturn\n\t}\n\tfor i := 0; i < 1; i++ {\n\t\tfmt.Println(myjob.name)\n\t}\n\treturn\n}\n\nfunc main(){\n\tjobPool := gjp.New(2)\n\tnewJob := &MyJob{\n\t\tname: \"YES\",\n\t}\n\tnewJob2 := &MyJob{\n\t\tname: \"THAT\",\n\t}\n\tnewJob3 := &MyJob{\n\t\tname: \"ROCKS\",\n\t}\n\tjobPool.QueueJob(newJob.name, newJob, 0)\n\tjobPool.QueueJob(newJob2.name, newJob2, 1)\n\tjobPool.QueueJob(newJob3.name, newJob3, 1)\n\ttime.Sleep(time.Millisecond * 30)\n}\n\n\nThe execution should retrieve something like :\n\nRecover plz send haelp\nDetails : Job YES failed in executeJob\nYES panicked after an execution of 14.046µs\nTHAT\nJob THAT executed in 24.399µs\nROCKS\nJob ROCKS executed in 21.57µs\n\n\n*\/\n<|endoftext|>"} {"text":"<commit_before>package gntagger\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/jroimartin\/gocui\"\n)\n\nvar (\n\tnames = &Names{}\n\ttext = &Text{}\n\ta = Annotation{}\n)\n\nfunc InitGUI(t *Text, n *Names) {\n\ttext = t\n\tnames = n\n\tg, err := gocui.NewGui(gocui.OutputNormal)\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n\tdefer g.Close()\n\n\tg.Cursor = true\n\n\tg.SetManagerFunc(Layout)\n\n\tif err := Keybindings(g); err != nil {\n\t\tlog.Panicln(err)\n\t}\n\n\tif err := g.MainLoop(); err != nil && err != gocui.ErrQuit {\n\t\tlog.Panicln(err)\n\t}\n}\n\nfunc Keybindings(g *gocui.Gui) error {\n\tif err := g.SetKeybinding(\"\", gocui.KeyCtrlC, gocui.ModNone,\n\t\tquit); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", gocui.KeyCtrlS, gocui.ModNone,\n\t\tsave); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", gocui.KeyArrowLeft, gocui.ModNone,\n\t\tlistBack); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", gocui.KeyArrowRight, gocui.ModNone,\n\t\tlistForward); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", gocui.KeySpace, gocui.ModNone,\n\t\tnoName); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", 'y', gocui.ModNone,\n\t\tyesName); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", 's', gocui.ModNone,\n\t\tspeciesName); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", 'g', gocui.ModNone,\n\t\tgenusName); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", 'u', gocui.ModNone,\n\t\tuninomialName); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", 'd', gocui.ModNone,\n\t\tdoubtfulName); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc Layout(g *gocui.Gui) error {\n\tmaxX, maxY := g.Size()\n\n\terr := viewNames(g, maxX, maxY)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = viewText(g, maxX, maxY)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = viewHelp(g, maxX, maxY)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc viewNames(g *gocui.Gui, maxX, maxY int) error {\n\tif v, err := g.SetView(\"names\", -1, 3, 35, maxY-1); err != nil {\n\t\tif err != gocui.ErrUnknownView {\n\t\t\treturn err\n\t\t}\n\t\tv.Title = \"Names\"\n\t\terr := v.SetOrigin(0, 5)\n\t\tif err != nil {\n\t\t\tlog.Panicln(err)\n\t\t}\n\t\tfor i := 0; i <= maxY\/2+1; i++ {\n\t\t\tfmt.Fprintln(v)\n\t\t}\n\t\tfmt.Fprintln(v, names.String())\n\t\tox, oy := v.Origin()\n\t\terr = v.SetOrigin(ox, oy+(4*names.Data.Meta.CurrentName))\n\t\tif err != nil {\n\t\t\tlog.Panicln(err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc viewText(g *gocui.Gui, maxX, maxY int) error {\n\tif v, err := g.SetView(\"text\", 35, 3, maxX, maxY-1); err != nil {\n\t\tif err != gocui.ErrUnknownView {\n\t\t\treturn err\n\t\t}\n\t\tv.Title = \"Text\"\n\t\tfor i := 0; i <= maxY\/2+2; i++ {\n\t\t\tfmt.Fprintln(v)\n\t\t}\n\t\tfmt.Fprintf(v, \"%s\", text.Markup(names))\n\n\t\tfmt.Fprintln(v, text.Markup(names))\n\t\terr := v.SetOrigin(0, text.OffsetY+3)\n\t\tif err != gocui.ErrUnknownView {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc viewHelp(g *gocui.Gui, maxX, maxY int) error {\n\tif v, err := g.SetView(\"help\", -1, maxY-2, maxX, maxY); err != nil {\n\t\tif err != gocui.ErrUnknownView {\n\t\t\treturn err\n\t\t}\n\t\tv.Frame = false\n\t\tv.BgColor = gocui.ColorWhite\n\t\tv.FgColor = gocui.ColorBlack\n\t\tfmt.Fprintln(v,\n\t\t\t\"→ (yes*) next, ← back, Space no, y yes, s species, g genus, u uninomial, d doubt, ^S save, ^C exit\")\n\t}\n\treturn nil\n}\n\nfunc quit(g *gocui.Gui, v *gocui.View) error {\n\tsave(g, v)\n\treturn gocui.ErrQuit\n}\n\nfunc save(g *gocui.Gui, v *gocui.View) error {\n\terr := names.Save()\n\treturn err\n}\n\nfunc speciesName(g *gocui.Gui, v *gocui.View) error {\n\terr := setKey(g, v, a.Species())\n\treturn err\n}\n\nfunc genusName(g *gocui.Gui, v *gocui.View) error {\n\terr := setKey(g, v, a.Genus())\n\treturn err\n}\n\nfunc uninomialName(g *gocui.Gui, v *gocui.View) error {\n\terr := setKey(g, v, a.Uninomial())\n\treturn err\n}\n\nfunc doubtfulName(g *gocui.Gui, v *gocui.View) error {\n\terr := setKey(g, v, a.Doubtful())\n\treturn err\n}\n\nfunc yesName(g *gocui.Gui, v *gocui.View) error {\n\terr := setKey(g, v, a.Accepted())\n\treturn err\n}\n\nfunc noName(g *gocui.Gui, v *gocui.View) error {\n\terr := setKey(g, v, a.NotName())\n\treturn err\n}\n\nfunc setKey(g *gocui.Gui, v *gocui.View, annot string) error {\n\tfor _, view := range g.Views() {\n\t\tif view.Name() == \"names\" {\n\t\t\tv = view\n\t\t\tbreak\n\t\t}\n\t}\n\terr := updateNamesView(g, v, 0, annot)\n\treturn err\n}\n\nfunc listForward(g *gocui.Gui, viewNames *gocui.View) error {\n\tvar viewText *gocui.View\n\tif names.Data.Meta.CurrentName == len(names.Data.Names) {\n\t\tnames.Data.Meta.CurrentName--\n\t\treturn nil\n\t}\n\tfor _, v := range g.Views() {\n\t\tif v.Name() == \"names\" {\n\t\t\tviewNames = v\n\t\t} else if v.Name() == \"text\" {\n\t\t\tviewText = v\n\t\t}\n\t}\n\terr := updateNamesView(g, viewNames, 1, a.Accepted())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = updateText(g, viewText)\n\treturn err\n}\n\nfunc listBack(g *gocui.Gui, viewNames *gocui.View) error {\n\tvar viewText *gocui.View\n\tif names.Data.Meta.CurrentName == 0 {\n\t\treturn nil\n\t}\n\tfor _, v := range g.Views() {\n\t\tif v.Name() == \"names\" {\n\t\t\tviewNames = v\n\t\t} else if v.Name() == \"text\" {\n\t\t\tviewText = v\n\t\t}\n\t}\n\terr := updateNamesView(g, viewNames, -1, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = updateText(g, viewText)\n\treturn err\n}\n\nfunc updateText(g *gocui.Gui, v *gocui.View) error {\n\t_, maxY := g.Size()\n\tv.Clear()\n\tfor i := 0; i <= maxY\/2+2; i++ {\n\t\tfmt.Fprintln(v)\n\t}\n\tfmt.Fprintln(v, text.Markup(names))\n\terr := v.SetOrigin(0, text.OffsetY+3)\n\treturn err\n}\n\nfunc updateNamesView(g *gocui.Gui, v *gocui.View,\n\tincrement int, annot string) error {\n\t_, maxY := g.Size()\n\tname := &names.Data.Names[names.Data.Meta.CurrentName]\n\tif annot == a.Accepted() {\n\t\tif increment == 1 && name.Annotation == \"\" {\n\t\t\tname.Annotation = annot\n\t\t} else if increment == 0 {\n\t\t\tname.Annotation = annot\n\t\t}\n\t} else if annot != \"\" {\n\t\tname.Annotation = annot\n\t}\n\tnames.Data.Meta.CurrentName += increment\n\tv.Clear()\n\tfor i := 0; i <= maxY\/2+1; i++ {\n\t\tfmt.Fprintln(v)\n\t}\n\tfmt.Fprintln(v, names.String())\n\tox, oy := v.Origin()\n\terr := v.SetOrigin(ox, oy+(4*increment))\n\treturn err\n}\n<commit_msg>Fix #14 autosave every 30 edits<commit_after>package gntagger\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/jroimartin\/gocui\"\n)\n\nvar (\n\tnames = &Names{}\n\ttext = &Text{}\n\ta = Annotation{}\n\tsaveCount = 0\n)\n\nfunc InitGUI(t *Text, n *Names) {\n\ttext = t\n\tnames = n\n\tg, err := gocui.NewGui(gocui.OutputNormal)\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n\tdefer g.Close()\n\n\tg.Cursor = true\n\n\tg.SetManagerFunc(Layout)\n\n\tif err := Keybindings(g); err != nil {\n\t\tlog.Panicln(err)\n\t}\n\n\tif err := g.MainLoop(); err != nil && err != gocui.ErrQuit {\n\t\tlog.Panicln(err)\n\t}\n}\n\nfunc Keybindings(g *gocui.Gui) error {\n\tif err := g.SetKeybinding(\"\", gocui.KeyCtrlC, gocui.ModNone,\n\t\tquit); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", gocui.KeyCtrlS, gocui.ModNone,\n\t\tsave); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", gocui.KeyArrowLeft, gocui.ModNone,\n\t\tlistBack); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", gocui.KeyArrowRight, gocui.ModNone,\n\t\tlistForward); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", gocui.KeySpace, gocui.ModNone,\n\t\tnoName); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", 'y', gocui.ModNone,\n\t\tyesName); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", 's', gocui.ModNone,\n\t\tspeciesName); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", 'g', gocui.ModNone,\n\t\tgenusName); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", 'u', gocui.ModNone,\n\t\tuninomialName); err != nil {\n\t\treturn err\n\t}\n\n\tif err := g.SetKeybinding(\"\", 'd', gocui.ModNone,\n\t\tdoubtfulName); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc Layout(g *gocui.Gui) error {\n\tmaxX, maxY := g.Size()\n\n\terr := viewNames(g, maxX, maxY)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = viewText(g, maxX, maxY)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = viewHelp(g, maxX, maxY)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc viewNames(g *gocui.Gui, maxX, maxY int) error {\n\tif v, err := g.SetView(\"names\", -1, 3, 35, maxY-1); err != nil {\n\t\tif err != gocui.ErrUnknownView {\n\t\t\treturn err\n\t\t}\n\t\tv.Title = \"Names\"\n\t\terr := v.SetOrigin(0, 5)\n\t\tif err != nil {\n\t\t\tlog.Panicln(err)\n\t\t}\n\t\tfor i := 0; i <= maxY\/2+1; i++ {\n\t\t\tfmt.Fprintln(v)\n\t\t}\n\t\tfmt.Fprintln(v, names.String())\n\t\tox, oy := v.Origin()\n\t\terr = v.SetOrigin(ox, oy+(4*names.Data.Meta.CurrentName))\n\t\tif err != nil {\n\t\t\tlog.Panicln(err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc viewText(g *gocui.Gui, maxX, maxY int) error {\n\tif v, err := g.SetView(\"text\", 35, 3, maxX, maxY-1); err != nil {\n\t\tif err != gocui.ErrUnknownView {\n\t\t\treturn err\n\t\t}\n\t\tv.Title = \"Text\"\n\t\tfor i := 0; i <= maxY\/2+2; i++ {\n\t\t\tfmt.Fprintln(v)\n\t\t}\n\t\tfmt.Fprintf(v, \"%s\", text.Markup(names))\n\n\t\tfmt.Fprintln(v, text.Markup(names))\n\t\terr := v.SetOrigin(0, text.OffsetY+3)\n\t\tif err != gocui.ErrUnknownView {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc viewHelp(g *gocui.Gui, maxX, maxY int) error {\n\tif v, err := g.SetView(\"help\", -1, maxY-2, maxX, maxY); err != nil {\n\t\tif err != gocui.ErrUnknownView {\n\t\t\treturn err\n\t\t}\n\t\tv.Frame = false\n\t\tv.BgColor = gocui.ColorWhite\n\t\tv.FgColor = gocui.ColorBlack\n\t\tfmt.Fprintln(v,\n\t\t\t\"→ (yes*) next, ← back, Space no, y yes, s species, g genus, u uninomial, d doubt, ^S save, ^C exit\")\n\t}\n\treturn nil\n}\n\nfunc quit(g *gocui.Gui, v *gocui.View) error {\n\tsave(g, v)\n\treturn gocui.ErrQuit\n}\n\nfunc save(g *gocui.Gui, v *gocui.View) error {\n\terr := names.Save()\n\treturn err\n}\n\nfunc speciesName(g *gocui.Gui, v *gocui.View) error {\n\terr := setKey(g, v, a.Species())\n\treturn err\n}\n\nfunc genusName(g *gocui.Gui, v *gocui.View) error {\n\terr := setKey(g, v, a.Genus())\n\treturn err\n}\n\nfunc uninomialName(g *gocui.Gui, v *gocui.View) error {\n\terr := setKey(g, v, a.Uninomial())\n\treturn err\n}\n\nfunc doubtfulName(g *gocui.Gui, v *gocui.View) error {\n\terr := setKey(g, v, a.Doubtful())\n\treturn err\n}\n\nfunc yesName(g *gocui.Gui, v *gocui.View) error {\n\terr := setKey(g, v, a.Accepted())\n\treturn err\n}\n\nfunc noName(g *gocui.Gui, v *gocui.View) error {\n\terr := setKey(g, v, a.NotName())\n\treturn err\n}\n\nfunc setKey(g *gocui.Gui, v *gocui.View, annot string) error {\n\tfor _, view := range g.Views() {\n\t\tif view.Name() == \"names\" {\n\t\t\tv = view\n\t\t\tbreak\n\t\t}\n\t}\n\terr := updateNamesView(g, v, 0, annot)\n\treturn err\n}\n\nfunc listForward(g *gocui.Gui, viewNames *gocui.View) error {\n\tvar viewText *gocui.View\n\tif names.Data.Meta.CurrentName == len(names.Data.Names) {\n\t\tnames.Data.Meta.CurrentName--\n\t\treturn nil\n\t}\n\tfor _, v := range g.Views() {\n\t\tif v.Name() == \"names\" {\n\t\t\tviewNames = v\n\t\t} else if v.Name() == \"text\" {\n\t\t\tviewText = v\n\t\t}\n\t}\n\terr := updateNamesView(g, viewNames, 1, a.Accepted())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = updateText(g, viewText)\n\treturn err\n}\n\nfunc listBack(g *gocui.Gui, viewNames *gocui.View) error {\n\tvar viewText *gocui.View\n\tif names.Data.Meta.CurrentName == 0 {\n\t\treturn nil\n\t}\n\tfor _, v := range g.Views() {\n\t\tif v.Name() == \"names\" {\n\t\t\tviewNames = v\n\t\t} else if v.Name() == \"text\" {\n\t\t\tviewText = v\n\t\t}\n\t}\n\terr := updateNamesView(g, viewNames, -1, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = updateText(g, viewText)\n\treturn err\n}\n\nfunc updateText(g *gocui.Gui, v *gocui.View) error {\n\t_, maxY := g.Size()\n\tv.Clear()\n\tfor i := 0; i <= maxY\/2+2; i++ {\n\t\tfmt.Fprintln(v)\n\t}\n\tfmt.Fprintln(v, text.Markup(names))\n\terr := v.SetOrigin(0, text.OffsetY+3)\n\treturn err\n}\n\nfunc updateNamesView(g *gocui.Gui, v *gocui.View,\n\tincrement int, annot string) error {\n\tsaveCount++\n\tif saveCount == 30 {\n\t\tsave(g, v)\n\t\tsaveCount = 0\n\t}\n\t_, maxY := g.Size()\n\tname := &names.Data.Names[names.Data.Meta.CurrentName]\n\tif annot == a.Accepted() {\n\t\tif increment == 1 && name.Annotation == \"\" {\n\t\t\tname.Annotation = annot\n\t\t} else if increment == 0 {\n\t\t\tname.Annotation = annot\n\t\t}\n\t} else if annot != \"\" {\n\t\tname.Annotation = annot\n\t}\n\tnames.Data.Meta.CurrentName += increment\n\tv.Clear()\n\tfor i := 0; i <= maxY\/2+1; i++ {\n\t\tfmt.Fprintln(v)\n\t}\n\tfmt.Fprintln(v, names.String())\n\tox, oy := v.Origin()\n\terr := v.SetOrigin(ox, oy+(4*increment))\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/polydawn\/gosh\"\n\n\t\"polydawn.net\/repeatr\/def\"\n\t\"polydawn.net\/repeatr\/io\"\n\t\"polydawn.net\/repeatr\/io\/placer\"\n\t\"polydawn.net\/repeatr\/io\/transmat\/cachedir\"\n\t\"polydawn.net\/repeatr\/io\/transmat\/dir\"\n\t\"polydawn.net\/repeatr\/io\/transmat\/git\"\n\t\"polydawn.net\/repeatr\/io\/transmat\/s3\"\n\t\"polydawn.net\/repeatr\/io\/transmat\/tar\"\n\t\"polydawn.net\/repeatr\/io\/transmat\/tarexec\"\n)\n\n\/*\n\tThe default, \"universal\", dispatching Transmat.\n\tYou should be able to throw pretty much any type of input spec at it.\n\n\tIf you're building your own transports and data warehousing integrations,\n\tyou'll need to assemble your own Transmat instead of this one --\n\t`integrity.DispatchingTransmat` is good for composing them so you can still\n\tuse one interface to get any kind of data you want.\n*\/\nfunc DefaultTransmat() integrity.Transmat {\n\tworkDir := filepath.Join(def.Base(), \"io\")\n\tdirCacher := cachedir.New(filepath.Join(workDir, \"dircacher\"), map[integrity.TransmatKind]integrity.TransmatFactory{\n\t\tintegrity.TransmatKind(\"dir\"): dir.New,\n\t\tintegrity.TransmatKind(\"tar\"): tar.New,\n\t\tintegrity.TransmatKind(\"s3\"): s3.New,\n\t})\n\tuniversalTransmat := integrity.NewDispatchingTransmat(map[integrity.TransmatKind]integrity.Transmat{\n\t\tintegrity.TransmatKind(\"dir\"): dirCacher,\n\t\tintegrity.TransmatKind(\"tar\"): dirCacher,\n\t\tintegrity.TransmatKind(\"exec-tar\"): tarexec.New(filepath.Join(workDir, \"tarexec\")),\n\t\tintegrity.TransmatKind(\"s3\"): dirCacher,\n\t\tintegrity.TransmatKind(\"git\"): git.New(filepath.Join(workDir, \"git\")),\n\t})\n\treturn universalTransmat\n}\n\nfunc BestAssembler() integrity.Assembler {\n\tif bestAssembler == nil {\n\t\tbestAssembler = determineBestAssembler()\n\t}\n\treturn bestAssembler\n}\n\nvar bestAssembler integrity.Assembler\n\nfunc determineBestAssembler() integrity.Assembler {\n\tif os.Getuid() != 0 {\n\t\t\/\/ Can't mount without root.\n\t\tfmt.Fprintf(os.Stderr, \"WARN: using slow fs assembly system: need root privs to use faster systems.\\n\")\n\t\treturn placer.NewAssembler(placer.CopyingPlacer)\n\t}\n\tif os.Getenv(\"TRAVIS\") != \"\" {\n\t\t\/\/ Travis's own virtualization denies mounting. whee.\n\t\tfmt.Fprintf(os.Stderr, \"WARN: using slow fs assembly system: travis' environment blocks faster systems.\\n\")\n\t\treturn placer.NewAssembler(placer.CopyingPlacer)\n\t}\n\t\/\/ If we *can* mount...\n\tif isAUFSAvailable() {\n\t\t\/\/ if AUFS is installed, AUFS+Bind is The Winner.\n\t\treturn placer.NewAssembler(placer.NewAufsPlacer(filepath.Join(def.Base(), \"aufs\")))\n\t}\n\t\/\/ last fallback... :( copy it is\n\tfmt.Fprintf(os.Stderr, \"WARN: using slow fs assembly system: install AUFS to use faster systems.\\n\")\n\treturn placer.NewAssembler(placer.CopyingPlacer)\n\t\/\/ TODO we should be able to use copy for fallback RW isolator but still bind for RO. write a new placer for that. or really, maybe bind should chain.\n}\n\nfunc isAUFSAvailable() bool {\n\t\/\/ the greatest thing to do would of course just be to issue the syscall once and see if it flies\n\t\/\/ but that's a distrubingly stateful and messy operation so we're gonna check a bunch\n\t\/\/ of next-best-things instead.\n\n\t\/\/ if we can't find it in \/proc\/filesystems, it's probably not installed\n\tif fs, err := ioutil.ReadFile(\"\/proc\/filesystems\"); err == nil {\n\t\tfound := false\n\t\tfsLines := strings.Split(string(fs), \"\\n\")\n\t\tfor _, line := range fsLines {\n\t\t\tparts := strings.Split(line, \"\\t\")\n\t\t\tif len(parts) < 2 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif parts[1] == \"aufs\" {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\n\t\/\/ if we can't find it in modprobe, it's probably not installed\n\tmodprobeCode := gosh.Sh(\n\t\t\"modprobe\", \"aufs\",\n\t\tgosh.NullIO,\n\t\tgosh.Opts{OkExit: gosh.AnyExit},\n\t).GetExitCodeSoon(100 * time.Millisecond)\n\tif modprobeCode != 0 {\n\t\treturn false\n\t}\n\n\treturn true\n}\n<commit_msg>Cache git checkouts.<commit_after>package util\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/polydawn\/gosh\"\n\n\t\"polydawn.net\/repeatr\/def\"\n\t\"polydawn.net\/repeatr\/io\"\n\t\"polydawn.net\/repeatr\/io\/placer\"\n\t\"polydawn.net\/repeatr\/io\/transmat\/cachedir\"\n\t\"polydawn.net\/repeatr\/io\/transmat\/dir\"\n\t\"polydawn.net\/repeatr\/io\/transmat\/git\"\n\t\"polydawn.net\/repeatr\/io\/transmat\/s3\"\n\t\"polydawn.net\/repeatr\/io\/transmat\/tar\"\n\t\"polydawn.net\/repeatr\/io\/transmat\/tarexec\"\n)\n\n\/*\n\tThe default, \"universal\", dispatching Transmat.\n\tYou should be able to throw pretty much any type of input spec at it.\n\n\tIf you're building your own transports and data warehousing integrations,\n\tyou'll need to assemble your own Transmat instead of this one --\n\t`integrity.DispatchingTransmat` is good for composing them so you can still\n\tuse one interface to get any kind of data you want.\n*\/\nfunc DefaultTransmat() integrity.Transmat {\n\tworkDir := filepath.Join(def.Base(), \"io\")\n\tdirCacher := cachedir.New(filepath.Join(workDir, \"dircacher\"), map[integrity.TransmatKind]integrity.TransmatFactory{\n\t\tintegrity.TransmatKind(\"dir\"): dir.New,\n\t\tintegrity.TransmatKind(\"tar\"): tar.New,\n\t\tintegrity.TransmatKind(\"s3\"): s3.New,\n\t})\n\tgitCacher := cachedir.New(filepath.Join(workDir, \"dircacher-git\"), map[integrity.TransmatKind]integrity.TransmatFactory{\n\t\tintegrity.TransmatKind(\"git\"): git.New,\n\t})\n\tuniversalTransmat := integrity.NewDispatchingTransmat(map[integrity.TransmatKind]integrity.Transmat{\n\t\tintegrity.TransmatKind(\"dir\"): dirCacher,\n\t\tintegrity.TransmatKind(\"tar\"): dirCacher,\n\t\tintegrity.TransmatKind(\"exec-tar\"): tarexec.New(filepath.Join(workDir, \"tarexec\")),\n\t\tintegrity.TransmatKind(\"s3\"): dirCacher,\n\t\tintegrity.TransmatKind(\"git\"): gitCacher,\n\t})\n\treturn universalTransmat\n}\n\nfunc BestAssembler() integrity.Assembler {\n\tif bestAssembler == nil {\n\t\tbestAssembler = determineBestAssembler()\n\t}\n\treturn bestAssembler\n}\n\nvar bestAssembler integrity.Assembler\n\nfunc determineBestAssembler() integrity.Assembler {\n\tif os.Getuid() != 0 {\n\t\t\/\/ Can't mount without root.\n\t\tfmt.Fprintf(os.Stderr, \"WARN: using slow fs assembly system: need root privs to use faster systems.\\n\")\n\t\treturn placer.NewAssembler(placer.CopyingPlacer)\n\t}\n\tif os.Getenv(\"TRAVIS\") != \"\" {\n\t\t\/\/ Travis's own virtualization denies mounting. whee.\n\t\tfmt.Fprintf(os.Stderr, \"WARN: using slow fs assembly system: travis' environment blocks faster systems.\\n\")\n\t\treturn placer.NewAssembler(placer.CopyingPlacer)\n\t}\n\t\/\/ If we *can* mount...\n\tif isAUFSAvailable() {\n\t\t\/\/ if AUFS is installed, AUFS+Bind is The Winner.\n\t\treturn placer.NewAssembler(placer.NewAufsPlacer(filepath.Join(def.Base(), \"aufs\")))\n\t}\n\t\/\/ last fallback... :( copy it is\n\tfmt.Fprintf(os.Stderr, \"WARN: using slow fs assembly system: install AUFS to use faster systems.\\n\")\n\treturn placer.NewAssembler(placer.CopyingPlacer)\n\t\/\/ TODO we should be able to use copy for fallback RW isolator but still bind for RO. write a new placer for that. or really, maybe bind should chain.\n}\n\nfunc isAUFSAvailable() bool {\n\t\/\/ the greatest thing to do would of course just be to issue the syscall once and see if it flies\n\t\/\/ but that's a distrubingly stateful and messy operation so we're gonna check a bunch\n\t\/\/ of next-best-things instead.\n\n\t\/\/ if we can't find it in \/proc\/filesystems, it's probably not installed\n\tif fs, err := ioutil.ReadFile(\"\/proc\/filesystems\"); err == nil {\n\t\tfound := false\n\t\tfsLines := strings.Split(string(fs), \"\\n\")\n\t\tfor _, line := range fsLines {\n\t\t\tparts := strings.Split(line, \"\\t\")\n\t\t\tif len(parts) < 2 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif parts[1] == \"aufs\" {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\n\t\/\/ if we can't find it in modprobe, it's probably not installed\n\tmodprobeCode := gosh.Sh(\n\t\t\"modprobe\", \"aufs\",\n\t\tgosh.NullIO,\n\t\tgosh.Opts{OkExit: gosh.AnyExit},\n\t).GetExitCodeSoon(100 * time.Millisecond)\n\tif modprobeCode != 0 {\n\t\treturn false\n\t}\n\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage expression\n\nimport (\n\t\"time\"\n\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/pingcap\/tidb\/ast\"\n\t\"github.com\/pingcap\/tidb\/model\"\n\t\"github.com\/pingcap\/tidb\/mysql\"\n\t\"github.com\/pingcap\/tidb\/sessionctx\/variable\"\n\t\"github.com\/pingcap\/tidb\/util\/codec\"\n\t\"github.com\/pingcap\/tidb\/util\/mock\"\n\t\"github.com\/pingcap\/tidb\/util\/types\"\n\t\"github.com\/pingcap\/tipb\/go-tipb\"\n)\n\nvar distFuncs = map[tipb.ExprType]string{\n\t\/\/ compare op\n\ttipb.ExprType_LT: ast.LT,\n\ttipb.ExprType_LE: ast.LE,\n\ttipb.ExprType_GT: ast.GT,\n\ttipb.ExprType_GE: ast.GE,\n\ttipb.ExprType_EQ: ast.EQ,\n\ttipb.ExprType_NE: ast.NE,\n\ttipb.ExprType_NullEQ: ast.NullEQ,\n\n\t\/\/ bit op\n\ttipb.ExprType_BitAnd: ast.And,\n\ttipb.ExprType_BitOr: ast.Or,\n\ttipb.ExprType_BitXor: ast.Xor,\n\ttipb.ExprType_RighShift: ast.RightShift,\n\ttipb.ExprType_LeftShift: ast.LeftShift,\n\ttipb.ExprType_BitNeg: ast.BitNeg,\n\n\t\/\/ logical op\n\ttipb.ExprType_And: ast.LogicAnd,\n\ttipb.ExprType_Or: ast.LogicOr,\n\ttipb.ExprType_Xor: ast.LogicXor,\n\ttipb.ExprType_Not: ast.UnaryNot,\n\n\t\/\/ arithmetic operator\n\ttipb.ExprType_Plus: ast.Plus,\n\ttipb.ExprType_Minus: ast.Minus,\n\ttipb.ExprType_Mul: ast.Mul,\n\ttipb.ExprType_Div: ast.Div,\n\ttipb.ExprType_IntDiv: ast.IntDiv,\n\ttipb.ExprType_Mod: ast.Mod,\n\n\t\/\/ control operator\n\ttipb.ExprType_Case: ast.Case,\n\ttipb.ExprType_If: ast.If,\n\ttipb.ExprType_IfNull: ast.Ifnull,\n\ttipb.ExprType_NullIf: ast.Nullif,\n\n\t\/\/ other operator\n\ttipb.ExprType_Like: ast.Like,\n\ttipb.ExprType_In: ast.In,\n\ttipb.ExprType_IsNull: ast.IsNull,\n\ttipb.ExprType_Coalesce: ast.Coalesce,\n\n\t\/\/ for json functions.\n\ttipb.ExprType_JsonType: ast.JSONType,\n\ttipb.ExprType_JsonExtract: ast.JSONExtract,\n\ttipb.ExprType_JsonUnquote: ast.JSONUnquote,\n\ttipb.ExprType_JsonMerge: ast.JSONMerge,\n\ttipb.ExprType_JsonSet: ast.JSONSet,\n\ttipb.ExprType_JsonInsert: ast.JSONInsert,\n\ttipb.ExprType_JsonReplace: ast.JSONReplace,\n\ttipb.ExprType_JsonRemove: ast.JSONRemove,\n\ttipb.ExprType_JsonArray: ast.JSONArray,\n\ttipb.ExprType_JsonObject: ast.JSONObject,\n}\n\n\/\/ newDistSQLFunction only creates function for mock-tikv.\nfunc newDistSQLFunction(sc *variable.StatementContext, exprType tipb.ExprType, args []Expression) (Expression, error) {\n\tname, ok := distFuncs[exprType]\n\tif !ok {\n\t\treturn nil, errFunctionNotExists.GenByArgs(exprType)\n\t}\n\t\/\/ TODO: Too ugly...\n\tctx := mock.NewContext()\n\tctx.GetSessionVars().StmtCtx = sc\n\ttp, err := reinferFuncType(sc, name, args)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\treturn NewFunction(ctx, name, tp, args...)\n}\n\n\/\/ reinferFuncType re-infer FieldType of ScalarFunction because FieldType information will be lost after ScalarFunction be converted to pb.\n\/\/ reinferFuncType is only used by mock-tikv, the real TiKV do not need to re-infer field type.\n\/\/ This is a temporary solution to make the new type inferer works normally, and will be replaced by passing function signature in the future.\nfunc reinferFuncType(sc *variable.StatementContext, funcName string, args []Expression) (*types.FieldType, error) {\n\tnewArgs := make([]ast.ExprNode, len(args))\n\tfor i, arg := range args {\n\t\tswitch x := arg.(type) {\n\t\tcase *Constant:\n\t\t\tnewArgs[i] = &ast.ValueExpr{}\n\t\t\tnewArgs[i].SetValue(x.Value.GetValue())\n\t\tcase *Column:\n\t\t\tnewArgs[i] = &ast.ColumnNameExpr{\n\t\t\t\tRefer: &ast.ResultField{\n\t\t\t\t\tColumn: &model.ColumnInfo{\n\t\t\t\t\t\tFieldType: *x.GetType(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\tcase *ScalarFunction:\n\t\t\tnewArgs[i] = &ast.FuncCallExpr{FnName: x.FuncName}\n\t\t\t_, err := reinferFuncType(sc, x.FuncName.O, x.GetArgs())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t}\n\tfuncNode := &ast.FuncCallExpr{FnName: model.NewCIStr(funcName), Args: newArgs}\n\terr := InferType(sc, funcNode)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\treturn funcNode.GetType(), nil\n}\n\n\/\/ PBToExpr converts pb structure to expression.\nfunc PBToExpr(expr *tipb.Expr, tps []*types.FieldType, sc *variable.StatementContext) (Expression, error) {\n\tswitch expr.Tp {\n\tcase tipb.ExprType_ColumnRef:\n\t\t_, offset, err := codec.DecodeInt(expr.Val)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\treturn &Column{Index: int(offset), RetType: tps[offset]}, nil\n\tcase tipb.ExprType_Null:\n\t\treturn &Constant{Value: types.Datum{}, RetType: types.NewFieldType(mysql.TypeNull)}, nil\n\tcase tipb.ExprType_Int64:\n\t\treturn convertInt(expr.Val)\n\tcase tipb.ExprType_Uint64:\n\t\treturn convertUint(expr.Val)\n\tcase tipb.ExprType_String:\n\t\treturn convertString(expr.Val)\n\tcase tipb.ExprType_Bytes:\n\t\treturn &Constant{Value: types.NewBytesDatum(expr.Val), RetType: types.NewFieldType(mysql.TypeString)}, nil\n\tcase tipb.ExprType_Float32:\n\t\treturn convertFloat(expr.Val, true)\n\tcase tipb.ExprType_Float64:\n\t\treturn convertFloat(expr.Val, false)\n\tcase tipb.ExprType_MysqlDecimal:\n\t\treturn convertDecimal(expr.Val)\n\tcase tipb.ExprType_MysqlDuration:\n\t\treturn convertDuration(expr.Val)\n\t}\n\t\/\/ Then it must be a scalar function.\n\targs := make([]Expression, 0, len(expr.Children))\n\tfor _, child := range expr.Children {\n\t\tif child.Tp == tipb.ExprType_ValueList {\n\t\t\tresults, err := decodeValueList(child.Val)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tif len(results) == 0 {\n\t\t\t\treturn &Constant{Value: types.NewDatum(false), RetType: types.NewFieldType(mysql.TypeLonglong)}, nil\n\t\t\t}\n\t\t\targs = append(args, results...)\n\t\t\tcontinue\n\t\t}\n\t\targ, err := PBToExpr(child, tps, sc)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\targs = append(args, arg)\n\t}\n\treturn newDistSQLFunction(sc, expr.Tp, args)\n}\n\nfunc decodeValueList(data []byte) ([]Expression, error) {\n\tif len(data) == 0 {\n\t\treturn nil, nil\n\t}\n\tlist, err := codec.Decode(data, 1)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresult := make([]Expression, 0, len(list))\n\tfor _, value := range list {\n\t\tresult = append(result, &Constant{Value: value})\n\t}\n\treturn result, nil\n}\n\nfunc convertInt(val []byte) (*Constant, error) {\n\tvar d types.Datum\n\t_, i, err := codec.DecodeInt(val)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"invalid int % x\", val)\n\t}\n\td.SetInt64(i)\n\treturn &Constant{Value: d, RetType: types.NewFieldType(mysql.TypeLonglong)}, nil\n}\n\nfunc convertUint(val []byte) (*Constant, error) {\n\tvar d types.Datum\n\t_, u, err := codec.DecodeUint(val)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"invalid uint % x\", val)\n\t}\n\td.SetUint64(u)\n\treturn &Constant{Value: d, RetType: types.NewFieldType(mysql.TypeLonglong)}, nil\n}\n\nfunc convertString(val []byte) (*Constant, error) {\n\tvar d types.Datum\n\td.SetBytesAsString(val)\n\treturn &Constant{Value: d, RetType: types.NewFieldType(mysql.TypeVarString)}, nil\n}\n\nfunc convertFloat(val []byte, f32 bool) (*Constant, error) {\n\tvar d types.Datum\n\t_, f, err := codec.DecodeFloat(val)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"invalid float % x\", val)\n\t}\n\tif f32 {\n\t\td.SetFloat32(float32(f))\n\t} else {\n\t\td.SetFloat64(f)\n\t}\n\treturn &Constant{Value: d, RetType: types.NewFieldType(mysql.TypeDouble)}, nil\n}\n\nfunc convertDecimal(val []byte) (*Constant, error) {\n\t_, dec, err := codec.DecodeDecimal(val)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"invalid decimal % x\", val)\n\t}\n\treturn &Constant{Value: dec, RetType: types.NewFieldType(mysql.TypeNewDecimal)}, nil\n}\n\nfunc convertDuration(val []byte) (*Constant, error) {\n\tvar d types.Datum\n\t_, i, err := codec.DecodeInt(val)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"invalid duration %d\", i)\n\t}\n\td.SetMysqlDuration(types.Duration{Duration: time.Duration(i), Fsp: types.MaxFsp})\n\treturn &Constant{Value: d, RetType: types.NewFieldType(mysql.TypeDuration)}, nil\n}\n<commit_msg>expression: clean code. (#4223)<commit_after>\/\/ Copyright 2017 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage expression\n\nimport (\n\t\"time\"\n\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/pingcap\/tidb\/ast\"\n\t\"github.com\/pingcap\/tidb\/mysql\"\n\t\"github.com\/pingcap\/tidb\/sessionctx\/variable\"\n\t\"github.com\/pingcap\/tidb\/util\/codec\"\n\t\"github.com\/pingcap\/tidb\/util\/mock\"\n\t\"github.com\/pingcap\/tidb\/util\/types\"\n\t\"github.com\/pingcap\/tipb\/go-tipb\"\n)\n\nvar distFuncs = map[tipb.ExprType]string{\n\t\/\/ compare op\n\ttipb.ExprType_LT: ast.LT,\n\ttipb.ExprType_LE: ast.LE,\n\ttipb.ExprType_GT: ast.GT,\n\ttipb.ExprType_GE: ast.GE,\n\ttipb.ExprType_EQ: ast.EQ,\n\ttipb.ExprType_NE: ast.NE,\n\ttipb.ExprType_NullEQ: ast.NullEQ,\n\n\t\/\/ bit op\n\ttipb.ExprType_BitAnd: ast.And,\n\ttipb.ExprType_BitOr: ast.Or,\n\ttipb.ExprType_BitXor: ast.Xor,\n\ttipb.ExprType_RighShift: ast.RightShift,\n\ttipb.ExprType_LeftShift: ast.LeftShift,\n\ttipb.ExprType_BitNeg: ast.BitNeg,\n\n\t\/\/ logical op\n\ttipb.ExprType_And: ast.LogicAnd,\n\ttipb.ExprType_Or: ast.LogicOr,\n\ttipb.ExprType_Xor: ast.LogicXor,\n\ttipb.ExprType_Not: ast.UnaryNot,\n\n\t\/\/ arithmetic operator\n\ttipb.ExprType_Plus: ast.Plus,\n\ttipb.ExprType_Minus: ast.Minus,\n\ttipb.ExprType_Mul: ast.Mul,\n\ttipb.ExprType_Div: ast.Div,\n\ttipb.ExprType_IntDiv: ast.IntDiv,\n\ttipb.ExprType_Mod: ast.Mod,\n\n\t\/\/ control operator\n\ttipb.ExprType_Case: ast.Case,\n\ttipb.ExprType_If: ast.If,\n\ttipb.ExprType_IfNull: ast.Ifnull,\n\ttipb.ExprType_NullIf: ast.Nullif,\n\n\t\/\/ other operator\n\ttipb.ExprType_Like: ast.Like,\n\ttipb.ExprType_In: ast.In,\n\ttipb.ExprType_IsNull: ast.IsNull,\n\ttipb.ExprType_Coalesce: ast.Coalesce,\n\n\t\/\/ for json functions.\n\ttipb.ExprType_JsonType: ast.JSONType,\n\ttipb.ExprType_JsonExtract: ast.JSONExtract,\n\ttipb.ExprType_JsonUnquote: ast.JSONUnquote,\n\ttipb.ExprType_JsonMerge: ast.JSONMerge,\n\ttipb.ExprType_JsonSet: ast.JSONSet,\n\ttipb.ExprType_JsonInsert: ast.JSONInsert,\n\ttipb.ExprType_JsonReplace: ast.JSONReplace,\n\ttipb.ExprType_JsonRemove: ast.JSONRemove,\n\ttipb.ExprType_JsonArray: ast.JSONArray,\n\ttipb.ExprType_JsonObject: ast.JSONObject,\n}\n\n\/\/ newDistSQLFunction only creates function for mock-tikv.\nfunc newDistSQLFunction(sc *variable.StatementContext, exprType tipb.ExprType, args []Expression) (Expression, error) {\n\tname, ok := distFuncs[exprType]\n\tif !ok {\n\t\treturn nil, errFunctionNotExists.GenByArgs(exprType)\n\t}\n\t\/\/ TODO: Too ugly...\n\tctx := mock.NewContext()\n\tctx.GetSessionVars().StmtCtx = sc\n\treturn NewFunction(ctx, name, types.NewFieldType(mysql.TypeUnspecified), args...)\n}\n\n\/\/ PBToExpr converts pb structure to expression.\nfunc PBToExpr(expr *tipb.Expr, tps []*types.FieldType, sc *variable.StatementContext) (Expression, error) {\n\tswitch expr.Tp {\n\tcase tipb.ExprType_ColumnRef:\n\t\t_, offset, err := codec.DecodeInt(expr.Val)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\treturn &Column{Index: int(offset), RetType: tps[offset]}, nil\n\tcase tipb.ExprType_Null:\n\t\treturn &Constant{Value: types.Datum{}, RetType: types.NewFieldType(mysql.TypeNull)}, nil\n\tcase tipb.ExprType_Int64:\n\t\treturn convertInt(expr.Val)\n\tcase tipb.ExprType_Uint64:\n\t\treturn convertUint(expr.Val)\n\tcase tipb.ExprType_String:\n\t\treturn convertString(expr.Val)\n\tcase tipb.ExprType_Bytes:\n\t\treturn &Constant{Value: types.NewBytesDatum(expr.Val), RetType: types.NewFieldType(mysql.TypeString)}, nil\n\tcase tipb.ExprType_Float32:\n\t\treturn convertFloat(expr.Val, true)\n\tcase tipb.ExprType_Float64:\n\t\treturn convertFloat(expr.Val, false)\n\tcase tipb.ExprType_MysqlDecimal:\n\t\treturn convertDecimal(expr.Val)\n\tcase tipb.ExprType_MysqlDuration:\n\t\treturn convertDuration(expr.Val)\n\t}\n\t\/\/ Then it must be a scalar function.\n\targs := make([]Expression, 0, len(expr.Children))\n\tfor _, child := range expr.Children {\n\t\tif child.Tp == tipb.ExprType_ValueList {\n\t\t\tresults, err := decodeValueList(child.Val)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tif len(results) == 0 {\n\t\t\t\treturn &Constant{Value: types.NewDatum(false), RetType: types.NewFieldType(mysql.TypeLonglong)}, nil\n\t\t\t}\n\t\t\targs = append(args, results...)\n\t\t\tcontinue\n\t\t}\n\t\targ, err := PBToExpr(child, tps, sc)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\targs = append(args, arg)\n\t}\n\treturn newDistSQLFunction(sc, expr.Tp, args)\n}\n\nfunc decodeValueList(data []byte) ([]Expression, error) {\n\tif len(data) == 0 {\n\t\treturn nil, nil\n\t}\n\tlist, err := codec.Decode(data, 1)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tresult := make([]Expression, 0, len(list))\n\tfor _, value := range list {\n\t\tresult = append(result, &Constant{Value: value})\n\t}\n\treturn result, nil\n}\n\nfunc convertInt(val []byte) (*Constant, error) {\n\tvar d types.Datum\n\t_, i, err := codec.DecodeInt(val)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"invalid int % x\", val)\n\t}\n\td.SetInt64(i)\n\treturn &Constant{Value: d, RetType: types.NewFieldType(mysql.TypeLonglong)}, nil\n}\n\nfunc convertUint(val []byte) (*Constant, error) {\n\tvar d types.Datum\n\t_, u, err := codec.DecodeUint(val)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"invalid uint % x\", val)\n\t}\n\td.SetUint64(u)\n\treturn &Constant{Value: d, RetType: types.NewFieldType(mysql.TypeLonglong)}, nil\n}\n\nfunc convertString(val []byte) (*Constant, error) {\n\tvar d types.Datum\n\td.SetBytesAsString(val)\n\treturn &Constant{Value: d, RetType: types.NewFieldType(mysql.TypeVarString)}, nil\n}\n\nfunc convertFloat(val []byte, f32 bool) (*Constant, error) {\n\tvar d types.Datum\n\t_, f, err := codec.DecodeFloat(val)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"invalid float % x\", val)\n\t}\n\tif f32 {\n\t\td.SetFloat32(float32(f))\n\t} else {\n\t\td.SetFloat64(f)\n\t}\n\treturn &Constant{Value: d, RetType: types.NewFieldType(mysql.TypeDouble)}, nil\n}\n\nfunc convertDecimal(val []byte) (*Constant, error) {\n\t_, dec, err := codec.DecodeDecimal(val)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"invalid decimal % x\", val)\n\t}\n\treturn &Constant{Value: dec, RetType: types.NewFieldType(mysql.TypeNewDecimal)}, nil\n}\n\nfunc convertDuration(val []byte) (*Constant, error) {\n\tvar d types.Datum\n\t_, i, err := codec.DecodeInt(val)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"invalid duration %d\", i)\n\t}\n\td.SetMysqlDuration(types.Duration{Duration: time.Duration(i), Fsp: types.MaxFsp})\n\treturn &Constant{Value: d, RetType: types.NewFieldType(mysql.TypeDuration)}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage storage\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/m3db\/m3\/src\/dbnode\/clock\"\n\t\"github.com\/m3db\/m3\/src\/dbnode\/storage\/bootstrap\"\n\txerrors \"github.com\/m3db\/m3\/src\/x\/errors\"\n\t\"github.com\/m3db\/m3\/src\/x\/instrument\"\n\n\t\"github.com\/uber-go\/tally\"\n\t\"go.uber.org\/zap\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\nvar (\n\t\/\/ errNamespaceIsBootstrapping raised when trying to bootstrap a namespace that's being bootstrapped.\n\terrNamespaceIsBootstrapping = errors.New(\"namespace is bootstrapping\")\n\n\t\/\/ errNamespaceNotBootstrapped raised when trying to flush\/snapshot data for a namespace that's not yet bootstrapped.\n\terrNamespaceNotBootstrapped = errors.New(\"namespace is not yet bootstrapped\")\n\n\t\/\/ errShardIsBootstrapping raised when trying to bootstrap a shard that's being bootstrapped.\n\terrShardIsBootstrapping = errors.New(\"shard is bootstrapping\")\n\n\t\/\/ errShardNotBootstrappedToFlush raised when trying to flush data for a shard that's not yet bootstrapped.\n\terrShardNotBootstrappedToFlush = errors.New(\"shard is not yet bootstrapped to flush\")\n\n\t\/\/ errShardNotBootstrappedToSnapshot raised when trying to snapshot data for a shard that's not yet bootstrapped.\n\terrShardNotBootstrappedToSnapshot = errors.New(\"shard is not yet bootstrapped to snapshot\")\n\n\t\/\/ errShardNotBootstrappedToRead raised when trying to read data for a shard that's not yet bootstrapped.\n\terrShardNotBootstrappedToRead = errors.New(\"shard is not yet bootstrapped to read\")\n\n\t\/\/ errIndexNotBootstrappedToRead raised when trying to read the index before being bootstrapped.\n\terrIndexNotBootstrappedToRead = errors.New(\"index is not yet bootstrapped to read\")\n\n\t\/\/ errBootstrapEnqueued raised when trying to bootstrap and bootstrap becomes enqueued.\n\terrBootstrapEnqueued = errors.New(\"database bootstrapping enqueued bootstrap\")\n)\n\nconst (\n\tbootstrapRetryInterval = 10 * time.Second\n)\n\ntype bootstrapFn func() error\n\ntype bootstrapManager struct {\n\tsync.RWMutex\n\n\tdatabase database\n\tmediator databaseMediator\n\topts Options\n\tlog *zap.Logger\n\tbootstrapFn bootstrapFn\n\tnowFn clock.NowFn\n\tsleepFn sleepFn\n\tprocessProvider bootstrap.ProcessProvider\n\tstate BootstrapState\n\thasPending bool\n\tstatus tally.Gauge\n\tbootstrapDuration tally.Timer\n\tlastBootstrapCompletionTime time.Time\n}\n\nfunc newBootstrapManager(\n\tdatabase database,\n\tmediator databaseMediator,\n\topts Options,\n) databaseBootstrapManager {\n\tscope := opts.InstrumentOptions().MetricsScope()\n\tm := &bootstrapManager{\n\t\tdatabase: database,\n\t\tmediator: mediator,\n\t\topts: opts,\n\t\tlog: opts.InstrumentOptions().Logger(),\n\t\tnowFn: opts.ClockOptions().NowFn(),\n\t\tsleepFn: time.Sleep,\n\t\tprocessProvider: opts.BootstrapProcessProvider(),\n\t\tstatus: scope.Gauge(\"bootstrapped\"),\n\t\tbootstrapDuration: scope.Timer(\"bootstrap-duration\"),\n\t}\n\tm.bootstrapFn = m.bootstrap\n\treturn m\n}\n\nfunc (m *bootstrapManager) IsBootstrapped() bool {\n\tm.RLock()\n\tstate := m.state\n\tm.RUnlock()\n\treturn state == Bootstrapped\n}\n\nfunc (m *bootstrapManager) LastBootstrapCompletionTime() (time.Time, bool) {\n\treturn m.lastBootstrapCompletionTime, !m.lastBootstrapCompletionTime.IsZero()\n}\n\nfunc (m *bootstrapManager) Bootstrap() (BootstrapResult, error) {\n\tm.Lock()\n\tswitch m.state {\n\tcase Bootstrapping:\n\t\t\/\/ NB(r): Already bootstrapping, now a consequent bootstrap\n\t\t\/\/ request comes in - we queue this up to bootstrap again\n\t\t\/\/ once the current bootstrap has completed.\n\t\t\/\/ This is an edge case that can occur if during either an\n\t\t\/\/ initial bootstrap or a resharding bootstrap if a new\n\t\t\/\/ reshard occurs and we need to bootstrap more shards.\n\t\tm.hasPending = true\n\t\tm.Unlock()\n\t\treturn BootstrapResult{AlreadyBootstrapping: true}, errBootstrapEnqueued\n\tdefault:\n\t\tm.state = Bootstrapping\n\t}\n\tm.Unlock()\n\n\t\/\/ NB(xichen): disable filesystem manager before we bootstrap to minimize\n\t\/\/ the impact of file operations on bootstrapping performance\n\tm.mediator.DisableFileOps()\n\tdefer m.mediator.EnableFileOps()\n\n\t\/\/ Keep performing bootstraps until none pending and no error returned.\n\tvar result BootstrapResult\n\tfor i := 0; true; i++ {\n\t\t\/\/ NB(r): Decouple implementation of bootstrap so can override in tests.\n\t\tbootstrapErr := m.bootstrapFn()\n\t\tif bootstrapErr != nil {\n\t\t\tresult.ErrorsBootstrap = append(result.ErrorsBootstrap, bootstrapErr)\n\t\t}\n\n\t\tm.Lock()\n\t\tcurrPending := m.hasPending\n\t\tif currPending {\n\t\t\t\/\/ New bootstrap calls should now enqueue another pending bootstrap\n\t\t\tm.hasPending = false\n\t\t} else {\n\t\t\tm.state = Bootstrapped\n\t\t}\n\t\tm.Unlock()\n\n\t\tif currPending {\n\t\t\t\/\/ NB(r): Requires another bootstrap.\n\t\t\tcontinue\n\t\t}\n\n\t\tif bootstrapErr != nil {\n\t\t\t\/\/ NB(r): Last bootstrap failed, since this could be due to transient\n\t\t\t\/\/ failure we retry the bootstrap again. This is to avoid operators\n\t\t\t\/\/ needing to manually intervene for cases where failures are transient.\n\t\t\tm.log.Warn(\"retrying bootstrap after backoff\",\n\t\t\t\tzap.Duration(\"backoff\", bootstrapRetryInterval),\n\t\t\t\tzap.Int(\"numRetries\", i+1))\n\t\t\tm.sleepFn(bootstrapRetryInterval)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ No pending bootstraps and last finished successfully.\n\t\tbreak\n\t}\n\n\t\/\/ NB(xichen): in order for bootstrapped data to be flushed to disk, a tick\n\t\/\/ needs to happen to drain the in-memory buffers and a consequent flush will\n\t\/\/ flush all the data onto disk. However, this has shown to be too intensive\n\t\/\/ to do immediately after bootstrap due to bootstrapping nodes simultaneously\n\t\/\/ attempting to tick through their series and flushing data, adding significant\n\t\/\/ load to the cluster. It turns out to be better to let ticking happen naturally\n\t\/\/ on its own course so that the load of ticking and flushing is more spread out\n\t\/\/ across the cluster.\n\n\tm.lastBootstrapCompletionTime = m.nowFn()\n\treturn result, nil\n}\n\nfunc (m *bootstrapManager) Report() {\n\tif m.IsBootstrapped() {\n\t\tm.status.Update(1)\n\t} else {\n\t\tm.status.Update(0)\n\t}\n}\n\ntype bootstrapNamespace struct {\n\tnamespace databaseNamespace\n\tshards []databaseShard\n}\n\nfunc (m *bootstrapManager) bootstrap() error {\n\t\/\/ NB(r): construct new instance of the bootstrap process to avoid\n\t\/\/ state being kept around by bootstrappers.\n\tprocess, err := m.processProvider.Provide()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnamespaces, err := m.database.GetOwnedNamespaces()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\taccmulators := make([]bootstrap.NamespaceDataAccumulator, 0, len(namespaces))\n\tdefer func() {\n\t\t\/\/ Close all accumulators at bootstrap completion, only error\n\t\t\/\/ it returns is if already closed, so this is a code bug if ever\n\t\t\/\/ an error returned.\n\t\tfor _, accumulator := range accmulators {\n\t\t\tif err := accumulator.Close(); err != nil {\n\t\t\t\tinstrument.EmitAndLogInvariantViolation(m.opts.InstrumentOptions(),\n\t\t\t\t\tfunc(l *zap.Logger) {\n\t\t\t\t\t\tl.Error(\"could not close bootstrap data accumulator\",\n\t\t\t\t\t\t\tzap.Error(err))\n\t\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}()\n\n\tstart := m.nowFn()\n\tm.log.Info(\"bootstrap prepare\")\n\n\tvar (\n\t\tbootstrapNamespaces = make([]bootstrapNamespace, len(namespaces))\n\t\tprepareWg sync.WaitGroup\n\t\tprepareLock sync.Mutex\n\t\tprepareMultiErr xerrors.MultiError\n\t)\n\tfor i, namespace := range namespaces {\n\t\ti, namespace := i, namespace\n\t\tprepareWg.Add(1)\n\t\tgo func() {\n\t\t\tshards, err := namespace.PrepareBootstrap()\n\n\t\t\tprepareLock.Lock()\n\t\t\tdefer func() {\n\t\t\t\tprepareLock.Unlock()\n\t\t\t\tprepareWg.Done()\n\t\t\t}()\n\n\t\t\tif err != nil {\n\t\t\t\tprepareMultiErr = prepareMultiErr.Add(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tbootstrapNamespaces[i] = bootstrapNamespace{\n\t\t\t\tnamespace: namespace,\n\t\t\t\tshards: shards,\n\t\t\t}\n\t\t}()\n\t}\n\n\tprepareWg.Wait()\n\n\tif err := prepareMultiErr.FinalError(); err != nil {\n\t\tm.log.Error(\"bootstrap prepare failed\", zap.Error(err))\n\t\treturn err\n\t}\n\n\tvar uniqueShards map[uint32]struct{}\n\ttargets := make([]bootstrap.ProcessNamespace, 0, len(namespaces))\n\tfor _, ns := range bootstrapNamespaces {\n\t\tbootstrapShards := make([]uint32, 0, len(ns.shards))\n\t\tif uniqueShards == nil {\n\t\t\tuniqueShards = make(map[uint32]struct{}, len(ns.shards))\n\t\t}\n\n\t\tfor _, shard := range ns.shards {\n\t\t\tif shard.IsBootstrapped() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tuniqueShards[shard.ID()] = struct{}{}\n\t\t\tbootstrapShards = append(bootstrapShards, shard.ID())\n\t\t}\n\n\t\t\/\/ Add hooks so that each bootstrapper when it interacts\n\t\t\/\/ with the namespace and shards during data accumulation\n\t\t\/\/ gets an up to date view of all the file volumes that\n\t\t\/\/ actually exist on disk (since bootstrappers can write\n\t\t\/\/ new blocks to disk).\n\t\thooks := bootstrap.NewNamespaceHooks(bootstrap.NamespaceHooksOptions{\n\t\t\tBootstrapSourceEnd: func() error {\n\t\t\t\tvar wg sync.WaitGroup\n\t\t\t\tfor _, shard := range ns.shards {\n\t\t\t\t\tshard := shard\n\t\t\t\t\twg.Add(1)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tshard.UpdateFlushStates()\n\t\t\t\t\t\twg.Done()\n\t\t\t\t\t}()\n\t\t\t\t}\n\t\t\t\twg.Wait()\n\t\t\t\treturn nil\n\t\t\t},\n\t\t})\n\n\t\taccumulator := NewDatabaseNamespaceDataAccumulator(ns.namespace)\n\t\taccmulators = append(accmulators, accumulator)\n\n\t\ttargets = append(targets, bootstrap.ProcessNamespace{\n\t\t\tMetadata: ns.namespace.Metadata(),\n\t\t\tShards: bootstrapShards,\n\t\t\tHooks: hooks,\n\t\t\tDataAccumulator: accumulator,\n\t\t})\n\t}\n\n\tlogFields := []zapcore.Field{\n\t\tzap.Int(\"numShards\", len(uniqueShards)),\n\t}\n\tm.log.Info(\"bootstrap started\", logFields...)\n\n\t\/\/ Run the bootstrap.\n\tbootstrapResult, err := process.Run(start, targets)\n\n\tbootstrapDuration := m.nowFn().Sub(start)\n\tm.bootstrapDuration.Record(bootstrapDuration)\n\tlogFields = append(logFields,\n\t\tzap.Duration(\"bootstrapDuration\", bootstrapDuration))\n\n\tif err != nil {\n\t\tm.log.Error(\"bootstrap failed\",\n\t\t\tappend(logFields, zap.Error(err))...)\n\t\treturn err\n\t}\n\n\tm.log.Info(\"bootstrap succeeded, marking namespaces complete\", logFields...)\n\t\/\/ Use a multi-error here because we want to at least bootstrap\n\t\/\/ as many of the namespaces as possible.\n\tmultiErr := xerrors.NewMultiError()\n\tfor _, namespace := range namespaces {\n\t\tid := namespace.ID()\n\t\tresult, ok := bootstrapResult.Results.Get(id)\n\t\tif !ok {\n\t\t\terr := fmt.Errorf(\"missing namespace from bootstrap result: %v\",\n\t\t\t\tid.String())\n\t\t\ti := m.opts.InstrumentOptions()\n\t\t\tinstrument.EmitAndLogInvariantViolation(i, func(l *zap.Logger) {\n\t\t\t\tl.Error(\"bootstrap failed\",\n\t\t\t\t\tappend(logFields, zap.Error(err))...)\n\t\t\t})\n\t\t\treturn err\n\t\t}\n\n\t\tif err := namespace.Bootstrap(result); err != nil {\n\t\t\tm.log.Info(\"bootstrap error\", append(logFields, []zapcore.Field{\n\t\t\t\tzap.String(\"namespace\", id.String()),\n\t\t\t\tzap.Error(err),\n\t\t\t}...)...)\n\t\t\tmultiErr = multiErr.Add(err)\n\t\t}\n\t}\n\n\tif err := multiErr.FinalError(); err != nil {\n\t\tm.log.Info(\"bootstrap namespaces failed\",\n\t\t\tappend(logFields, zap.Error(err))...)\n\t\treturn err\n\t}\n\n\tm.log.Info(\"bootstrap success\", logFields...)\n\treturn nil\n}\n<commit_msg>Add metric for BootstrappedAndDurable (#2210)<commit_after>\/\/ Copyright (c) 2016 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage storage\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/m3db\/m3\/src\/dbnode\/clock\"\n\t\"github.com\/m3db\/m3\/src\/dbnode\/storage\/bootstrap\"\n\txerrors \"github.com\/m3db\/m3\/src\/x\/errors\"\n\t\"github.com\/m3db\/m3\/src\/x\/instrument\"\n\n\t\"github.com\/uber-go\/tally\"\n\t\"go.uber.org\/zap\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\nvar (\n\t\/\/ errNamespaceIsBootstrapping raised when trying to bootstrap a namespace that's being bootstrapped.\n\terrNamespaceIsBootstrapping = errors.New(\"namespace is bootstrapping\")\n\n\t\/\/ errNamespaceNotBootstrapped raised when trying to flush\/snapshot data for a namespace that's not yet bootstrapped.\n\terrNamespaceNotBootstrapped = errors.New(\"namespace is not yet bootstrapped\")\n\n\t\/\/ errShardIsBootstrapping raised when trying to bootstrap a shard that's being bootstrapped.\n\terrShardIsBootstrapping = errors.New(\"shard is bootstrapping\")\n\n\t\/\/ errShardNotBootstrappedToFlush raised when trying to flush data for a shard that's not yet bootstrapped.\n\terrShardNotBootstrappedToFlush = errors.New(\"shard is not yet bootstrapped to flush\")\n\n\t\/\/ errShardNotBootstrappedToSnapshot raised when trying to snapshot data for a shard that's not yet bootstrapped.\n\terrShardNotBootstrappedToSnapshot = errors.New(\"shard is not yet bootstrapped to snapshot\")\n\n\t\/\/ errShardNotBootstrappedToRead raised when trying to read data for a shard that's not yet bootstrapped.\n\terrShardNotBootstrappedToRead = errors.New(\"shard is not yet bootstrapped to read\")\n\n\t\/\/ errIndexNotBootstrappedToRead raised when trying to read the index before being bootstrapped.\n\terrIndexNotBootstrappedToRead = errors.New(\"index is not yet bootstrapped to read\")\n\n\t\/\/ errBootstrapEnqueued raised when trying to bootstrap and bootstrap becomes enqueued.\n\terrBootstrapEnqueued = errors.New(\"database bootstrapping enqueued bootstrap\")\n)\n\nconst (\n\tbootstrapRetryInterval = 10 * time.Second\n)\n\ntype bootstrapFn func() error\n\ntype bootstrapManager struct {\n\tsync.RWMutex\n\n\tdatabase database\n\tmediator databaseMediator\n\topts Options\n\tlog *zap.Logger\n\tbootstrapFn bootstrapFn\n\tnowFn clock.NowFn\n\tsleepFn sleepFn\n\tprocessProvider bootstrap.ProcessProvider\n\tstate BootstrapState\n\thasPending bool\n\tstatus tally.Gauge\n\tbootstrapDuration tally.Timer\n\tdurableStatus tally.Gauge\n\tlastBootstrapCompletionTime time.Time\n}\n\nfunc newBootstrapManager(\n\tdatabase database,\n\tmediator databaseMediator,\n\topts Options,\n) databaseBootstrapManager {\n\tscope := opts.InstrumentOptions().MetricsScope()\n\tm := &bootstrapManager{\n\t\tdatabase: database,\n\t\tmediator: mediator,\n\t\topts: opts,\n\t\tlog: opts.InstrumentOptions().Logger(),\n\t\tnowFn: opts.ClockOptions().NowFn(),\n\t\tsleepFn: time.Sleep,\n\t\tprocessProvider: opts.BootstrapProcessProvider(),\n\t\tstatus: scope.Gauge(\"bootstrapped\"),\n\t\tbootstrapDuration: scope.Timer(\"bootstrap-duration\"),\n\t\tdurableStatus: scope.Gauge(\"bootstrapped-durable\"),\n\t}\n\tm.bootstrapFn = m.bootstrap\n\treturn m\n}\n\nfunc (m *bootstrapManager) IsBootstrapped() bool {\n\tm.RLock()\n\tstate := m.state\n\tm.RUnlock()\n\treturn state == Bootstrapped\n}\n\nfunc (m *bootstrapManager) LastBootstrapCompletionTime() (time.Time, bool) {\n\treturn m.lastBootstrapCompletionTime, !m.lastBootstrapCompletionTime.IsZero()\n}\n\nfunc (m *bootstrapManager) Bootstrap() (BootstrapResult, error) {\n\tm.Lock()\n\tswitch m.state {\n\tcase Bootstrapping:\n\t\t\/\/ NB(r): Already bootstrapping, now a consequent bootstrap\n\t\t\/\/ request comes in - we queue this up to bootstrap again\n\t\t\/\/ once the current bootstrap has completed.\n\t\t\/\/ This is an edge case that can occur if during either an\n\t\t\/\/ initial bootstrap or a resharding bootstrap if a new\n\t\t\/\/ reshard occurs and we need to bootstrap more shards.\n\t\tm.hasPending = true\n\t\tm.Unlock()\n\t\treturn BootstrapResult{AlreadyBootstrapping: true}, errBootstrapEnqueued\n\tdefault:\n\t\tm.state = Bootstrapping\n\t}\n\tm.Unlock()\n\n\t\/\/ NB(xichen): disable filesystem manager before we bootstrap to minimize\n\t\/\/ the impact of file operations on bootstrapping performance\n\tm.mediator.DisableFileOps()\n\tdefer m.mediator.EnableFileOps()\n\n\t\/\/ Keep performing bootstraps until none pending and no error returned.\n\tvar result BootstrapResult\n\tfor i := 0; true; i++ {\n\t\t\/\/ NB(r): Decouple implementation of bootstrap so can override in tests.\n\t\tbootstrapErr := m.bootstrapFn()\n\t\tif bootstrapErr != nil {\n\t\t\tresult.ErrorsBootstrap = append(result.ErrorsBootstrap, bootstrapErr)\n\t\t}\n\n\t\tm.Lock()\n\t\tcurrPending := m.hasPending\n\t\tif currPending {\n\t\t\t\/\/ New bootstrap calls should now enqueue another pending bootstrap\n\t\t\tm.hasPending = false\n\t\t} else {\n\t\t\tm.state = Bootstrapped\n\t\t}\n\t\tm.Unlock()\n\n\t\tif currPending {\n\t\t\t\/\/ NB(r): Requires another bootstrap.\n\t\t\tcontinue\n\t\t}\n\n\t\tif bootstrapErr != nil {\n\t\t\t\/\/ NB(r): Last bootstrap failed, since this could be due to transient\n\t\t\t\/\/ failure we retry the bootstrap again. This is to avoid operators\n\t\t\t\/\/ needing to manually intervene for cases where failures are transient.\n\t\t\tm.log.Warn(\"retrying bootstrap after backoff\",\n\t\t\t\tzap.Duration(\"backoff\", bootstrapRetryInterval),\n\t\t\t\tzap.Int(\"numRetries\", i+1))\n\t\t\tm.sleepFn(bootstrapRetryInterval)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ No pending bootstraps and last finished successfully.\n\t\tbreak\n\t}\n\n\t\/\/ NB(xichen): in order for bootstrapped data to be flushed to disk, a tick\n\t\/\/ needs to happen to drain the in-memory buffers and a consequent flush will\n\t\/\/ flush all the data onto disk. However, this has shown to be too intensive\n\t\/\/ to do immediately after bootstrap due to bootstrapping nodes simultaneously\n\t\/\/ attempting to tick through their series and flushing data, adding significant\n\t\/\/ load to the cluster. It turns out to be better to let ticking happen naturally\n\t\/\/ on its own course so that the load of ticking and flushing is more spread out\n\t\/\/ across the cluster.\n\n\tm.lastBootstrapCompletionTime = m.nowFn()\n\treturn result, nil\n}\n\nfunc (m *bootstrapManager) Report() {\n\tif m.IsBootstrapped() {\n\t\tm.status.Update(1)\n\t} else {\n\t\tm.status.Update(0)\n\t}\n\n\tif m.database.IsBootstrappedAndDurable() {\n\t\tm.durableStatus.Update(1)\n\t} else {\n\t\tm.durableStatus.Update(0)\n\t}\n}\n\ntype bootstrapNamespace struct {\n\tnamespace databaseNamespace\n\tshards []databaseShard\n}\n\nfunc (m *bootstrapManager) bootstrap() error {\n\t\/\/ NB(r): construct new instance of the bootstrap process to avoid\n\t\/\/ state being kept around by bootstrappers.\n\tprocess, err := m.processProvider.Provide()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnamespaces, err := m.database.GetOwnedNamespaces()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\taccmulators := make([]bootstrap.NamespaceDataAccumulator, 0, len(namespaces))\n\tdefer func() {\n\t\t\/\/ Close all accumulators at bootstrap completion, only error\n\t\t\/\/ it returns is if already closed, so this is a code bug if ever\n\t\t\/\/ an error returned.\n\t\tfor _, accumulator := range accmulators {\n\t\t\tif err := accumulator.Close(); err != nil {\n\t\t\t\tinstrument.EmitAndLogInvariantViolation(m.opts.InstrumentOptions(),\n\t\t\t\t\tfunc(l *zap.Logger) {\n\t\t\t\t\t\tl.Error(\"could not close bootstrap data accumulator\",\n\t\t\t\t\t\t\tzap.Error(err))\n\t\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}()\n\n\tstart := m.nowFn()\n\tm.log.Info(\"bootstrap prepare\")\n\n\tvar (\n\t\tbootstrapNamespaces = make([]bootstrapNamespace, len(namespaces))\n\t\tprepareWg sync.WaitGroup\n\t\tprepareLock sync.Mutex\n\t\tprepareMultiErr xerrors.MultiError\n\t)\n\tfor i, namespace := range namespaces {\n\t\ti, namespace := i, namespace\n\t\tprepareWg.Add(1)\n\t\tgo func() {\n\t\t\tshards, err := namespace.PrepareBootstrap()\n\n\t\t\tprepareLock.Lock()\n\t\t\tdefer func() {\n\t\t\t\tprepareLock.Unlock()\n\t\t\t\tprepareWg.Done()\n\t\t\t}()\n\n\t\t\tif err != nil {\n\t\t\t\tprepareMultiErr = prepareMultiErr.Add(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tbootstrapNamespaces[i] = bootstrapNamespace{\n\t\t\t\tnamespace: namespace,\n\t\t\t\tshards: shards,\n\t\t\t}\n\t\t}()\n\t}\n\n\tprepareWg.Wait()\n\n\tif err := prepareMultiErr.FinalError(); err != nil {\n\t\tm.log.Error(\"bootstrap prepare failed\", zap.Error(err))\n\t\treturn err\n\t}\n\n\tvar uniqueShards map[uint32]struct{}\n\ttargets := make([]bootstrap.ProcessNamespace, 0, len(namespaces))\n\tfor _, ns := range bootstrapNamespaces {\n\t\tbootstrapShards := make([]uint32, 0, len(ns.shards))\n\t\tif uniqueShards == nil {\n\t\t\tuniqueShards = make(map[uint32]struct{}, len(ns.shards))\n\t\t}\n\n\t\tfor _, shard := range ns.shards {\n\t\t\tif shard.IsBootstrapped() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tuniqueShards[shard.ID()] = struct{}{}\n\t\t\tbootstrapShards = append(bootstrapShards, shard.ID())\n\t\t}\n\n\t\t\/\/ Add hooks so that each bootstrapper when it interacts\n\t\t\/\/ with the namespace and shards during data accumulation\n\t\t\/\/ gets an up to date view of all the file volumes that\n\t\t\/\/ actually exist on disk (since bootstrappers can write\n\t\t\/\/ new blocks to disk).\n\t\thooks := bootstrap.NewNamespaceHooks(bootstrap.NamespaceHooksOptions{\n\t\t\tBootstrapSourceEnd: func() error {\n\t\t\t\tvar wg sync.WaitGroup\n\t\t\t\tfor _, shard := range ns.shards {\n\t\t\t\t\tshard := shard\n\t\t\t\t\twg.Add(1)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tshard.UpdateFlushStates()\n\t\t\t\t\t\twg.Done()\n\t\t\t\t\t}()\n\t\t\t\t}\n\t\t\t\twg.Wait()\n\t\t\t\treturn nil\n\t\t\t},\n\t\t})\n\n\t\taccumulator := NewDatabaseNamespaceDataAccumulator(ns.namespace)\n\t\taccmulators = append(accmulators, accumulator)\n\n\t\ttargets = append(targets, bootstrap.ProcessNamespace{\n\t\t\tMetadata: ns.namespace.Metadata(),\n\t\t\tShards: bootstrapShards,\n\t\t\tHooks: hooks,\n\t\t\tDataAccumulator: accumulator,\n\t\t})\n\t}\n\n\tlogFields := []zapcore.Field{\n\t\tzap.Int(\"numShards\", len(uniqueShards)),\n\t}\n\tm.log.Info(\"bootstrap started\", logFields...)\n\n\t\/\/ Run the bootstrap.\n\tbootstrapResult, err := process.Run(start, targets)\n\n\tbootstrapDuration := m.nowFn().Sub(start)\n\tm.bootstrapDuration.Record(bootstrapDuration)\n\tlogFields = append(logFields,\n\t\tzap.Duration(\"bootstrapDuration\", bootstrapDuration))\n\n\tif err != nil {\n\t\tm.log.Error(\"bootstrap failed\",\n\t\t\tappend(logFields, zap.Error(err))...)\n\t\treturn err\n\t}\n\n\tm.log.Info(\"bootstrap succeeded, marking namespaces complete\", logFields...)\n\t\/\/ Use a multi-error here because we want to at least bootstrap\n\t\/\/ as many of the namespaces as possible.\n\tmultiErr := xerrors.NewMultiError()\n\tfor _, namespace := range namespaces {\n\t\tid := namespace.ID()\n\t\tresult, ok := bootstrapResult.Results.Get(id)\n\t\tif !ok {\n\t\t\terr := fmt.Errorf(\"missing namespace from bootstrap result: %v\",\n\t\t\t\tid.String())\n\t\t\ti := m.opts.InstrumentOptions()\n\t\t\tinstrument.EmitAndLogInvariantViolation(i, func(l *zap.Logger) {\n\t\t\t\tl.Error(\"bootstrap failed\",\n\t\t\t\t\tappend(logFields, zap.Error(err))...)\n\t\t\t})\n\t\t\treturn err\n\t\t}\n\n\t\tif err := namespace.Bootstrap(result); err != nil {\n\t\t\tm.log.Info(\"bootstrap error\", append(logFields, []zapcore.Field{\n\t\t\t\tzap.String(\"namespace\", id.String()),\n\t\t\t\tzap.Error(err),\n\t\t\t}...)...)\n\t\t\tmultiErr = multiErr.Add(err)\n\t\t}\n\t}\n\n\tif err := multiErr.FinalError(); err != nil {\n\t\tm.log.Info(\"bootstrap namespaces failed\",\n\t\t\tappend(logFields, zap.Error(err))...)\n\t\treturn err\n\t}\n\n\tm.log.Info(\"bootstrap success\", logFields...)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gosym\n\nimport (\n\t\"debug\/elf\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar (\n\tpclineTempDir string\n\tpclinetestBinary string\n)\n\nfunc dotest(self bool) bool {\n\t\/\/ For now, only works on amd64 platforms.\n\tif runtime.GOARCH != \"amd64\" {\n\t\treturn false\n\t}\n\t\/\/ Self test reads test binary; only works on Linux.\n\tif self && runtime.GOOS != \"linux\" {\n\t\treturn false\n\t}\n\t\/\/ Command below expects \"sh\", so Unix.\n\tif runtime.GOOS == \"windows\" || runtime.GOOS == \"plan9\" {\n\t\treturn false\n\t}\n\tif pclinetestBinary != \"\" {\n\t\treturn true\n\t}\n\tvar err error\n\tpclineTempDir, err = ioutil.TempDir(\"\", \"pclinetest\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif strings.Contains(pclineTempDir, \" \") {\n\t\tpanic(\"unexpected space in tempdir\")\n\t}\n\t\/\/ This command builds pclinetest from pclinetest.asm;\n\t\/\/ the resulting binary looks like it was built from pclinetest.s,\n\t\/\/ but we have renamed it to keep it away from the go tool.\n\tpclinetestBinary = filepath.Join(pclineTempDir, \"pclinetest\")\n\tcommand := fmt.Sprintf(\"go tool asm -o %s.6 pclinetest.asm && go tool 6l -H linux -E main -o %s %s.6\",\n\t\tpclinetestBinary, pclinetestBinary, pclinetestBinary)\n\tcmd := exec.Command(\"sh\", \"-c\", command)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\tpanic(err)\n\t}\n\treturn true\n}\n\nfunc endtest() {\n\tif pclineTempDir != \"\" {\n\t\tos.RemoveAll(pclineTempDir)\n\t\tpclineTempDir = \"\"\n\t\tpclinetestBinary = \"\"\n\t}\n}\n\nfunc getTable(t *testing.T) *Table {\n\tf, tab := crack(os.Args[0], t)\n\tf.Close()\n\treturn tab\n}\n\nfunc crack(file string, t *testing.T) (*elf.File, *Table) {\n\t\/\/ Open self\n\tf, err := elf.Open(file)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn parse(file, f, t)\n}\n\nfunc parse(file string, f *elf.File, t *testing.T) (*elf.File, *Table) {\n\tsymdat, err := f.Section(\".gosymtab\").Data()\n\tif err != nil {\n\t\tf.Close()\n\t\tt.Fatalf(\"reading %s gosymtab: %v\", file, err)\n\t}\n\tpclndat, err := f.Section(\".gopclntab\").Data()\n\tif err != nil {\n\t\tf.Close()\n\t\tt.Fatalf(\"reading %s gopclntab: %v\", file, err)\n\t}\n\n\tpcln := NewLineTable(pclndat, f.Section(\".text\").Addr)\n\ttab, err := NewTable(symdat, pcln)\n\tif err != nil {\n\t\tf.Close()\n\t\tt.Fatalf(\"parsing %s gosymtab: %v\", file, err)\n\t}\n\n\treturn f, tab\n}\n\nvar goarch = os.Getenv(\"O\")\n\nfunc TestLineFromAline(t *testing.T) {\n\tif !dotest(true) {\n\t\treturn\n\t}\n\tdefer endtest()\n\n\ttab := getTable(t)\n\tif tab.go12line != nil {\n\t\t\/\/ aline's don't exist in the Go 1.2 table.\n\t\tt.Skip(\"not relevant to Go 1.2 symbol table\")\n\t}\n\n\t\/\/ Find the sym package\n\tpkg := tab.LookupFunc(\"debug\/gosym.TestLineFromAline\").Obj\n\tif pkg == nil {\n\t\tt.Fatalf(\"nil pkg\")\n\t}\n\n\t\/\/ Walk every absolute line and ensure that we hit every\n\t\/\/ source line monotonically\n\tlastline := make(map[string]int)\n\tfinal := -1\n\tfor i := 0; i < 10000; i++ {\n\t\tpath, line := pkg.lineFromAline(i)\n\t\t\/\/ Check for end of object\n\t\tif path == \"\" {\n\t\t\tif final == -1 {\n\t\t\t\tfinal = i - 1\n\t\t\t}\n\t\t\tcontinue\n\t\t} else if final != -1 {\n\t\t\tt.Fatalf(\"reached end of package at absolute line %d, but absolute line %d mapped to %s:%d\", final, i, path, line)\n\t\t}\n\t\t\/\/ It's okay to see files multiple times (e.g., sys.a)\n\t\tif line == 1 {\n\t\t\tlastline[path] = 1\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Check that the is the next line in path\n\t\tll, ok := lastline[path]\n\t\tif !ok {\n\t\t\tt.Errorf(\"file %s starts on line %d\", path, line)\n\t\t} else if line != ll+1 {\n\t\t\tt.Fatalf(\"expected next line of file %s to be %d, got %d\", path, ll+1, line)\n\t\t}\n\t\tlastline[path] = line\n\t}\n\tif final == -1 {\n\t\tt.Errorf(\"never reached end of object\")\n\t}\n}\n\nfunc TestLineAline(t *testing.T) {\n\tif !dotest(true) {\n\t\treturn\n\t}\n\tdefer endtest()\n\n\ttab := getTable(t)\n\tif tab.go12line != nil {\n\t\t\/\/ aline's don't exist in the Go 1.2 table.\n\t\tt.Skip(\"not relevant to Go 1.2 symbol table\")\n\t}\n\n\tfor _, o := range tab.Files {\n\t\t\/\/ A source file can appear multiple times in a\n\t\t\/\/ object. alineFromLine will always return alines in\n\t\t\/\/ the first file, so track which lines we've seen.\n\t\tfound := make(map[string]int)\n\t\tfor i := 0; i < 1000; i++ {\n\t\t\tpath, line := o.lineFromAline(i)\n\t\t\tif path == \"\" {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t\/\/ cgo files are full of 'Z' symbols, which we don't handle\n\t\t\tif len(path) > 4 && path[len(path)-4:] == \".cgo\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif minline, ok := found[path]; path != \"\" && ok {\n\t\t\t\tif minline >= line {\n\t\t\t\t\t\/\/ We've already covered this file\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tfound[path] = line\n\n\t\t\ta, err := o.alineFromLine(path, line)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"absolute line %d in object %s maps to %s:%d, but mapping that back gives error %s\", i, o.Paths[0].Name, path, line, err)\n\t\t\t} else if a != i {\n\t\t\t\tt.Errorf(\"absolute line %d in object %s maps to %s:%d, which maps back to absolute line %d\\n\", i, o.Paths[0].Name, path, line, a)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestPCLine(t *testing.T) {\n\tif !dotest(false) {\n\t\treturn\n\t}\n\tdefer endtest()\n\n\tf, tab := crack(pclinetestBinary, t)\n\ttext := f.Section(\".text\")\n\ttextdat, err := text.Data()\n\tif err != nil {\n\t\tt.Fatalf(\"reading .text: %v\", err)\n\t}\n\n\t\/\/ Test PCToLine\n\tsym := tab.LookupFunc(\"linefrompc\")\n\twantLine := 0\n\tfor pc := sym.Entry; pc < sym.End; pc++ {\n\t\toff := pc - text.Addr \/\/ TODO(rsc): should not need off; bug in 8g\n\t\tif textdat[off] == 255 {\n\t\t\tbreak\n\t\t}\n\t\twantLine += int(textdat[off])\n\t\tt.Logf(\"off is %d %#x (max %d)\", off, textdat[off], sym.End-pc)\n\t\tfile, line, fn := tab.PCToLine(pc)\n\t\tif fn == nil {\n\t\t\tt.Errorf(\"failed to get line of PC %#x\", pc)\n\t\t} else if !strings.HasSuffix(file, \"pclinetest.asm\") || line != wantLine || fn != sym {\n\t\t\tt.Errorf(\"PCToLine(%#x) = %s:%d (%s), want %s:%d (%s)\", pc, file, line, fn.Name, \"pclinetest.asm\", wantLine, sym.Name)\n\t\t}\n\t}\n\n\t\/\/ Test LineToPC\n\tsym = tab.LookupFunc(\"pcfromline\")\n\tlookupline := -1\n\twantLine = 0\n\toff := uint64(0) \/\/ TODO(rsc): should not need off; bug in 8g\n\tfor pc := sym.Value; pc < sym.End; pc += 2 + uint64(textdat[off]) {\n\t\tfile, line, fn := tab.PCToLine(pc)\n\t\toff = pc - text.Addr\n\t\tif textdat[off] == 255 {\n\t\t\tbreak\n\t\t}\n\t\twantLine += int(textdat[off])\n\t\tif line != wantLine {\n\t\t\tt.Errorf(\"expected line %d at PC %#x in pcfromline, got %d\", wantLine, pc, line)\n\t\t\toff = pc + 1 - text.Addr\n\t\t\tcontinue\n\t\t}\n\t\tif lookupline == -1 {\n\t\t\tlookupline = line\n\t\t}\n\t\tfor ; lookupline <= line; lookupline++ {\n\t\t\tpc2, fn2, err := tab.LineToPC(file, lookupline)\n\t\t\tif lookupline != line {\n\t\t\t\t\/\/ Should be nothing on this line\n\t\t\t\tif err == nil {\n\t\t\t\t\tt.Errorf(\"expected no PC at line %d, got %#x (%s)\", lookupline, pc2, fn2.Name)\n\t\t\t\t}\n\t\t\t} else if err != nil {\n\t\t\t\tt.Errorf(\"failed to get PC of line %d: %s\", lookupline, err)\n\t\t\t} else if pc != pc2 {\n\t\t\t\tt.Errorf(\"expected PC %#x (%s) at line %d, got PC %#x (%s)\", pc, fn.Name, line, pc2, fn2.Name)\n\t\t\t}\n\t\t}\n\t\toff = pc + 1 - text.Addr\n\t}\n}\n<commit_msg>debug\/gosym: skip tests when .gosymtab section not found<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gosym\n\nimport (\n\t\"debug\/elf\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar (\n\tpclineTempDir string\n\tpclinetestBinary string\n)\n\nfunc dotest(self bool) bool {\n\t\/\/ For now, only works on amd64 platforms.\n\tif runtime.GOARCH != \"amd64\" {\n\t\treturn false\n\t}\n\t\/\/ Self test reads test binary; only works on Linux.\n\tif self && runtime.GOOS != \"linux\" {\n\t\treturn false\n\t}\n\t\/\/ Command below expects \"sh\", so Unix.\n\tif runtime.GOOS == \"windows\" || runtime.GOOS == \"plan9\" {\n\t\treturn false\n\t}\n\tif pclinetestBinary != \"\" {\n\t\treturn true\n\t}\n\tvar err error\n\tpclineTempDir, err = ioutil.TempDir(\"\", \"pclinetest\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif strings.Contains(pclineTempDir, \" \") {\n\t\tpanic(\"unexpected space in tempdir\")\n\t}\n\t\/\/ This command builds pclinetest from pclinetest.asm;\n\t\/\/ the resulting binary looks like it was built from pclinetest.s,\n\t\/\/ but we have renamed it to keep it away from the go tool.\n\tpclinetestBinary = filepath.Join(pclineTempDir, \"pclinetest\")\n\tcommand := fmt.Sprintf(\"go tool asm -o %s.6 pclinetest.asm && go tool 6l -H linux -E main -o %s %s.6\",\n\t\tpclinetestBinary, pclinetestBinary, pclinetestBinary)\n\tcmd := exec.Command(\"sh\", \"-c\", command)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\tpanic(err)\n\t}\n\treturn true\n}\n\nfunc endtest() {\n\tif pclineTempDir != \"\" {\n\t\tos.RemoveAll(pclineTempDir)\n\t\tpclineTempDir = \"\"\n\t\tpclinetestBinary = \"\"\n\t}\n}\n\nfunc getTable(t *testing.T) *Table {\n\tf, tab := crack(os.Args[0], t)\n\tf.Close()\n\treturn tab\n}\n\nfunc crack(file string, t *testing.T) (*elf.File, *Table) {\n\t\/\/ Open self\n\tf, err := elf.Open(file)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn parse(file, f, t)\n}\n\nfunc parse(file string, f *elf.File, t *testing.T) (*elf.File, *Table) {\n\ts := f.Section(\".gosymtab\")\n\tif s == nil {\n\t\tt.Skip(\"no .gosymtab section\")\n\t}\n\tsymdat, err := s.Data()\n\tif err != nil {\n\t\tf.Close()\n\t\tt.Fatalf(\"reading %s gosymtab: %v\", file, err)\n\t}\n\tpclndat, err := f.Section(\".gopclntab\").Data()\n\tif err != nil {\n\t\tf.Close()\n\t\tt.Fatalf(\"reading %s gopclntab: %v\", file, err)\n\t}\n\n\tpcln := NewLineTable(pclndat, f.Section(\".text\").Addr)\n\ttab, err := NewTable(symdat, pcln)\n\tif err != nil {\n\t\tf.Close()\n\t\tt.Fatalf(\"parsing %s gosymtab: %v\", file, err)\n\t}\n\n\treturn f, tab\n}\n\nvar goarch = os.Getenv(\"O\")\n\nfunc TestLineFromAline(t *testing.T) {\n\tif !dotest(true) {\n\t\treturn\n\t}\n\tdefer endtest()\n\n\ttab := getTable(t)\n\tif tab.go12line != nil {\n\t\t\/\/ aline's don't exist in the Go 1.2 table.\n\t\tt.Skip(\"not relevant to Go 1.2 symbol table\")\n\t}\n\n\t\/\/ Find the sym package\n\tpkg := tab.LookupFunc(\"debug\/gosym.TestLineFromAline\").Obj\n\tif pkg == nil {\n\t\tt.Fatalf(\"nil pkg\")\n\t}\n\n\t\/\/ Walk every absolute line and ensure that we hit every\n\t\/\/ source line monotonically\n\tlastline := make(map[string]int)\n\tfinal := -1\n\tfor i := 0; i < 10000; i++ {\n\t\tpath, line := pkg.lineFromAline(i)\n\t\t\/\/ Check for end of object\n\t\tif path == \"\" {\n\t\t\tif final == -1 {\n\t\t\t\tfinal = i - 1\n\t\t\t}\n\t\t\tcontinue\n\t\t} else if final != -1 {\n\t\t\tt.Fatalf(\"reached end of package at absolute line %d, but absolute line %d mapped to %s:%d\", final, i, path, line)\n\t\t}\n\t\t\/\/ It's okay to see files multiple times (e.g., sys.a)\n\t\tif line == 1 {\n\t\t\tlastline[path] = 1\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Check that the is the next line in path\n\t\tll, ok := lastline[path]\n\t\tif !ok {\n\t\t\tt.Errorf(\"file %s starts on line %d\", path, line)\n\t\t} else if line != ll+1 {\n\t\t\tt.Fatalf(\"expected next line of file %s to be %d, got %d\", path, ll+1, line)\n\t\t}\n\t\tlastline[path] = line\n\t}\n\tif final == -1 {\n\t\tt.Errorf(\"never reached end of object\")\n\t}\n}\n\nfunc TestLineAline(t *testing.T) {\n\tif !dotest(true) {\n\t\treturn\n\t}\n\tdefer endtest()\n\n\ttab := getTable(t)\n\tif tab.go12line != nil {\n\t\t\/\/ aline's don't exist in the Go 1.2 table.\n\t\tt.Skip(\"not relevant to Go 1.2 symbol table\")\n\t}\n\n\tfor _, o := range tab.Files {\n\t\t\/\/ A source file can appear multiple times in a\n\t\t\/\/ object. alineFromLine will always return alines in\n\t\t\/\/ the first file, so track which lines we've seen.\n\t\tfound := make(map[string]int)\n\t\tfor i := 0; i < 1000; i++ {\n\t\t\tpath, line := o.lineFromAline(i)\n\t\t\tif path == \"\" {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t\/\/ cgo files are full of 'Z' symbols, which we don't handle\n\t\t\tif len(path) > 4 && path[len(path)-4:] == \".cgo\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif minline, ok := found[path]; path != \"\" && ok {\n\t\t\t\tif minline >= line {\n\t\t\t\t\t\/\/ We've already covered this file\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tfound[path] = line\n\n\t\t\ta, err := o.alineFromLine(path, line)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"absolute line %d in object %s maps to %s:%d, but mapping that back gives error %s\", i, o.Paths[0].Name, path, line, err)\n\t\t\t} else if a != i {\n\t\t\t\tt.Errorf(\"absolute line %d in object %s maps to %s:%d, which maps back to absolute line %d\\n\", i, o.Paths[0].Name, path, line, a)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestPCLine(t *testing.T) {\n\tif !dotest(false) {\n\t\treturn\n\t}\n\tdefer endtest()\n\n\tf, tab := crack(pclinetestBinary, t)\n\ttext := f.Section(\".text\")\n\ttextdat, err := text.Data()\n\tif err != nil {\n\t\tt.Fatalf(\"reading .text: %v\", err)\n\t}\n\n\t\/\/ Test PCToLine\n\tsym := tab.LookupFunc(\"linefrompc\")\n\twantLine := 0\n\tfor pc := sym.Entry; pc < sym.End; pc++ {\n\t\toff := pc - text.Addr \/\/ TODO(rsc): should not need off; bug in 8g\n\t\tif textdat[off] == 255 {\n\t\t\tbreak\n\t\t}\n\t\twantLine += int(textdat[off])\n\t\tt.Logf(\"off is %d %#x (max %d)\", off, textdat[off], sym.End-pc)\n\t\tfile, line, fn := tab.PCToLine(pc)\n\t\tif fn == nil {\n\t\t\tt.Errorf(\"failed to get line of PC %#x\", pc)\n\t\t} else if !strings.HasSuffix(file, \"pclinetest.asm\") || line != wantLine || fn != sym {\n\t\t\tt.Errorf(\"PCToLine(%#x) = %s:%d (%s), want %s:%d (%s)\", pc, file, line, fn.Name, \"pclinetest.asm\", wantLine, sym.Name)\n\t\t}\n\t}\n\n\t\/\/ Test LineToPC\n\tsym = tab.LookupFunc(\"pcfromline\")\n\tlookupline := -1\n\twantLine = 0\n\toff := uint64(0) \/\/ TODO(rsc): should not need off; bug in 8g\n\tfor pc := sym.Value; pc < sym.End; pc += 2 + uint64(textdat[off]) {\n\t\tfile, line, fn := tab.PCToLine(pc)\n\t\toff = pc - text.Addr\n\t\tif textdat[off] == 255 {\n\t\t\tbreak\n\t\t}\n\t\twantLine += int(textdat[off])\n\t\tif line != wantLine {\n\t\t\tt.Errorf(\"expected line %d at PC %#x in pcfromline, got %d\", wantLine, pc, line)\n\t\t\toff = pc + 1 - text.Addr\n\t\t\tcontinue\n\t\t}\n\t\tif lookupline == -1 {\n\t\t\tlookupline = line\n\t\t}\n\t\tfor ; lookupline <= line; lookupline++ {\n\t\t\tpc2, fn2, err := tab.LineToPC(file, lookupline)\n\t\t\tif lookupline != line {\n\t\t\t\t\/\/ Should be nothing on this line\n\t\t\t\tif err == nil {\n\t\t\t\t\tt.Errorf(\"expected no PC at line %d, got %#x (%s)\", lookupline, pc2, fn2.Name)\n\t\t\t\t}\n\t\t\t} else if err != nil {\n\t\t\t\tt.Errorf(\"failed to get PC of line %d: %s\", lookupline, err)\n\t\t\t} else if pc != pc2 {\n\t\t\t\tt.Errorf(\"expected PC %#x (%s) at line %d, got PC %#x (%s)\", pc, fn.Name, line, pc2, fn2.Name)\n\t\t\t}\n\t\t}\n\t\toff = pc + 1 - text.Addr\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\r\n\r\nimport (\r\n\t\"errors\"\r\n\t\"flag\"\r\n\t\"fmt\"\r\n\t\"io\/ioutil\"\r\n\t\"os\"\r\n\t\"strings\"\r\n\r\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\r\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\r\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\r\n\t\"github.com\/aws\/aws-sdk-go\/aws\/ec2metadata\"\r\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\/ec2rolecreds\"\r\n\r\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\r\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\"\r\n)\r\n\r\ntype awsHostContext struct {\r\n\tCreds *credentials.Credentials\r\n\tId string\r\n\tRegion string\r\n}\r\n\r\nvar DEBUG bool\r\nfunc debug(m ...interface{}) {\r\n\tif(DEBUG) {\r\n\t\tfmt.Println(m...)\r\n\t}\r\n}\r\n\r\nfunc main() {\r\n\t\/\/ currently not using any of the client flags\r\n\t_ = flag.String(\"u\", \"\", \"The user account\")\r\n\t_ = flag.String(\"k\", \"\", \"The public key proffered by the client\")\r\n\t_ = flag.String(\"t\", \"\", \"The key encryption type\")\r\n\t_ = flag.String(\"f\", \"\", \"The fingerprint of the public key\")\r\n\tgroupTag := flag.String(\"group_tag\", \"access-groups\", \"The instance tag with csv ssh sccess groups in it\")\r\n\ts3Bucket := flag.String(\"s3_bucket\", \"keys\", \"The bucket where the access group public keys are stored\")\r\n\ts3Region := flag.String(\"s3_region\", \"eu-west-1\", \"The region in which the bucket is located\")\r\n\tdebugOutput := flag.Bool(\"debug\", false, \"Enable debug output\")\r\n\tflag.Parse()\r\n\r\n\tif(*debugOutput) {\r\n\t\tDEBUG = true\r\n\t\tdebug(\"debug output enabled\")\r\n\t\tdebug(\"grouptag: \", *groupTag)\r\n\t\tdebug(\"bucket: \", *s3Bucket)\r\n\t\tdebug(\"region: \", *s3Region)\r\n\t\tdebug(\"\")\r\n\t}\r\n\r\n\t\/\/ get aws host context\r\n\thctx, err := getAwsHostContext()\r\n\tif err != nil {\r\n\t\tdebug(err.Error())\r\n\t\tos.Exit(1)\r\n\t}\r\n\r\n\t\/\/ get the list of access groups the intance belongs to\r\n\taccessGroups, err := getInstanceAccessGroups(hctx, *groupTag)\r\n\tif err != nil {\r\n\t\tdebug(err.Error())\r\n\t\tos.Exit(1)\r\n\t}\r\n\r\n\t\/\/ get the authorized_keys files for those instances\r\n\tauthorizedKeys, err := getAccessKeys(hctx, *s3Bucket, *s3Region, accessGroups)\r\n\tif err != nil {\r\n\t\tdebug(err.Error())\r\n\t\tos.Exit(1)\r\n\t}\r\n\r\n\t\/\/ print all authorized_keys\r\n\tdebug(\"print all group keys:\")\r\n\tfor _, k := range authorizedKeys {\r\n\t\tfmt.Print(k)\r\n\t}\r\n}\r\n\r\n\r\nfunc getAwsHostContext() (*awsHostContext, error) {\r\n\t\/\/setup\r\n\thctx := new(awsHostContext)\r\n\tcfg := aws.NewConfig()\r\n\tsvc := ec2metadata.New(session.New(), cfg)\r\n\tp := &ec2rolecreds.EC2RoleProvider{\r\n\t\tClient: svc,\r\n\t}\r\n\t\/\/get creds\r\n\thctx.Creds = credentials.NewCredentials(p)\r\n\t\/\/get instance id\r\n\tid, err := svc.GetMetadata(\"instance-id\")\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\thctx.Id = id\r\n\tdebug(\"instance-id:\", id)\r\n\t\/\/get region\r\n\tregion, err := svc.Region()\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\thctx.Region = region\r\n\tdebug(\"region: \", region)\r\n\treturn hctx, nil\r\n}\r\n\r\n\r\nfunc getInstanceAccessGroups(hctx *awsHostContext, tag string) ([]string, error) {\r\n\t\/\/ setup\r\n\tsvc := ec2.New(session.New(aws.NewConfig().WithCredentials(hctx.Creds).WithRegion(hctx.Region)))\r\n\tparams := &ec2.DescribeTagsInput{\r\n\t\tFilters: []*ec2.Filter{\r\n\t\t\t{\r\n\t\t\t\tName: aws.String(\"resource-id\"),\r\n\t\t\t\tValues: []*string{ aws.String(hctx.Id) },\r\n\t\t\t},\r\n\t\t\t{\r\n\t\t\t\tName: aws.String(\"key\"),\r\n\t\t\t\tValues: []*string{ aws.String(tag) },\r\n\t\t\t},\r\n\t\t},\r\n\t}\r\n\t\/\/ fetch tags\r\n\tdebug(\"requesting tag: \" + tag + \", from resource: \" + hctx.Id)\r\n\tresp, err := svc.DescribeTags(params)\r\n\tif err != nil {\r\n\t\treturn []string{}, err\r\n\t}\r\n\tdebug(\"recieved:\", resp.Tags)\r\n\r\n\t\/\/ valiade response\r\n\tif len(resp.Tags) == 0 {\r\n\t\treturn []string{}, errors.New(\"tag \" + tag + \" does not exist!\")\r\n\t}\r\n\t\/\/ parse and return response\r\n\tvar out []string\r\n\tfor _, s := range strings.Split(*resp.Tags[0].Value, \",\") {\r\n\t\tout = append(out, strings.TrimSpace(s))\r\n\t}\r\n\tdebug(\"access groups:\", out)\r\n\treturn out, nil\r\n}\r\n\r\n\r\nfunc getAccessKeys(hctx *awsHostContext, s3Bucket, s3Region string, accessGroups []string) ([]string, error) {\r\n\t\/\/ setup\r\n\tsvc := s3.New(session.New(aws.NewConfig().WithCredentials(hctx.Creds).WithRegion(s3Region)))\r\n\r\n\tvar out []string\r\n\t\/\/ fetch authorized_keys files\r\n\tfor _, group := range accessGroups {\r\n\t\tparams := &s3.GetObjectInput{\r\n\t\t\tBucket: aws.String(s3Bucket),\r\n\t\t\tKey: aws.String(group + \"\/authorized_keys\"), \/\/ Required\r\n\t\t}\r\n\t\tdebug(\"requesting authorized_keys file: s3:\/\/\" + s3Bucket + \"\/\" + group + \"\/authorized_keys\")\r\n\t\tresp, err := svc.GetObject(params)\r\n\t\tif err != nil {\r\n\t\t\tdebug(err.Error())\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\tdebug(\"retrieved, appending to output\")\r\n\t\tif b, err := ioutil.ReadAll(resp.Body); err == nil {\r\n\t\tout = append(out, string(b))\r\n\t\t}\r\n\t}\r\n\r\n\treturn out, nil\r\n}\r\n<commit_msg>Fix spelling of group_tag description<commit_after>package main\r\n\r\nimport (\r\n\t\"errors\"\r\n\t\"flag\"\r\n\t\"fmt\"\r\n\t\"io\/ioutil\"\r\n\t\"os\"\r\n\t\"strings\"\r\n\r\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\r\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\r\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\r\n\t\"github.com\/aws\/aws-sdk-go\/aws\/ec2metadata\"\r\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\/ec2rolecreds\"\r\n\r\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\r\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\"\r\n)\r\n\r\ntype awsHostContext struct {\r\n\tCreds *credentials.Credentials\r\n\tId string\r\n\tRegion string\r\n}\r\n\r\nvar DEBUG bool\r\nfunc debug(m ...interface{}) {\r\n\tif(DEBUG) {\r\n\t\tfmt.Println(m...)\r\n\t}\r\n}\r\n\r\nfunc main() {\r\n\t\/\/ currently not using any of the client flags\r\n\t_ = flag.String(\"u\", \"\", \"The user account\")\r\n\t_ = flag.String(\"k\", \"\", \"The public key proffered by the client\")\r\n\t_ = flag.String(\"t\", \"\", \"The key encryption type\")\r\n\t_ = flag.String(\"f\", \"\", \"The fingerprint of the public key\")\r\n\tgroupTag := flag.String(\"group_tag\", \"access-groups\", \"The instance tag with csv ssh access groups in it\")\r\n\ts3Bucket := flag.String(\"s3_bucket\", \"keys\", \"The bucket where the access group public keys are stored\")\r\n\ts3Region := flag.String(\"s3_region\", \"eu-west-1\", \"The region in which the bucket is located\")\r\n\tdebugOutput := flag.Bool(\"debug\", false, \"Enable debug output\")\r\n\tflag.Parse()\r\n\r\n\tif(*debugOutput) {\r\n\t\tDEBUG = true\r\n\t\tdebug(\"debug output enabled\")\r\n\t\tdebug(\"grouptag: \", *groupTag)\r\n\t\tdebug(\"bucket: \", *s3Bucket)\r\n\t\tdebug(\"region: \", *s3Region)\r\n\t\tdebug(\"\")\r\n\t}\r\n\r\n\t\/\/ get aws host context\r\n\thctx, err := getAwsHostContext()\r\n\tif err != nil {\r\n\t\tdebug(err.Error())\r\n\t\tos.Exit(1)\r\n\t}\r\n\r\n\t\/\/ get the list of access groups the intance belongs to\r\n\taccessGroups, err := getInstanceAccessGroups(hctx, *groupTag)\r\n\tif err != nil {\r\n\t\tdebug(err.Error())\r\n\t\tos.Exit(1)\r\n\t}\r\n\r\n\t\/\/ get the authorized_keys files for those instances\r\n\tauthorizedKeys, err := getAccessKeys(hctx, *s3Bucket, *s3Region, accessGroups)\r\n\tif err != nil {\r\n\t\tdebug(err.Error())\r\n\t\tos.Exit(1)\r\n\t}\r\n\r\n\t\/\/ print all authorized_keys\r\n\tdebug(\"print all group keys:\")\r\n\tfor _, k := range authorizedKeys {\r\n\t\tfmt.Print(k)\r\n\t}\r\n}\r\n\r\n\r\nfunc getAwsHostContext() (*awsHostContext, error) {\r\n\t\/\/setup\r\n\thctx := new(awsHostContext)\r\n\tcfg := aws.NewConfig()\r\n\tsvc := ec2metadata.New(session.New(), cfg)\r\n\tp := &ec2rolecreds.EC2RoleProvider{\r\n\t\tClient: svc,\r\n\t}\r\n\t\/\/get creds\r\n\thctx.Creds = credentials.NewCredentials(p)\r\n\t\/\/get instance id\r\n\tid, err := svc.GetMetadata(\"instance-id\")\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\thctx.Id = id\r\n\tdebug(\"instance-id:\", id)\r\n\t\/\/get region\r\n\tregion, err := svc.Region()\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\thctx.Region = region\r\n\tdebug(\"region: \", region)\r\n\treturn hctx, nil\r\n}\r\n\r\n\r\nfunc getInstanceAccessGroups(hctx *awsHostContext, tag string) ([]string, error) {\r\n\t\/\/ setup\r\n\tsvc := ec2.New(session.New(aws.NewConfig().WithCredentials(hctx.Creds).WithRegion(hctx.Region)))\r\n\tparams := &ec2.DescribeTagsInput{\r\n\t\tFilters: []*ec2.Filter{\r\n\t\t\t{\r\n\t\t\t\tName: aws.String(\"resource-id\"),\r\n\t\t\t\tValues: []*string{ aws.String(hctx.Id) },\r\n\t\t\t},\r\n\t\t\t{\r\n\t\t\t\tName: aws.String(\"key\"),\r\n\t\t\t\tValues: []*string{ aws.String(tag) },\r\n\t\t\t},\r\n\t\t},\r\n\t}\r\n\t\/\/ fetch tags\r\n\tdebug(\"requesting tag: \" + tag + \", from resource: \" + hctx.Id)\r\n\tresp, err := svc.DescribeTags(params)\r\n\tif err != nil {\r\n\t\treturn []string{}, err\r\n\t}\r\n\tdebug(\"received:\", resp.Tags)\r\n\r\n\t\/\/ valiade response\r\n\tif len(resp.Tags) == 0 {\r\n\t\treturn []string{}, errors.New(\"tag \" + tag + \" does not exist!\")\r\n\t}\r\n\t\/\/ parse and return response\r\n\tvar out []string\r\n\tfor _, s := range strings.Split(*resp.Tags[0].Value, \",\") {\r\n\t\tout = append(out, strings.TrimSpace(s))\r\n\t}\r\n\tdebug(\"access groups:\", out)\r\n\treturn out, nil\r\n}\r\n\r\n\r\nfunc getAccessKeys(hctx *awsHostContext, s3Bucket, s3Region string, accessGroups []string) ([]string, error) {\r\n\t\/\/ setup\r\n\tsvc := s3.New(session.New(aws.NewConfig().WithCredentials(hctx.Creds).WithRegion(s3Region)))\r\n\r\n\tvar out []string\r\n\t\/\/ fetch authorized_keys files\r\n\tfor _, group := range accessGroups {\r\n\t\tparams := &s3.GetObjectInput{\r\n\t\t\tBucket: aws.String(s3Bucket),\r\n\t\t\tKey: aws.String(group + \"\/authorized_keys\"), \/\/ Required\r\n\t\t}\r\n\t\tdebug(\"requesting authorized_keys file: s3:\/\/\" + s3Bucket + \"\/\" + group + \"\/authorized_keys\")\r\n\t\tresp, err := svc.GetObject(params)\r\n\t\tif err != nil {\r\n\t\t\tdebug(err.Error())\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\tdebug(\"retrieved, appending to output\")\r\n\t\tif b, err := ioutil.ReadAll(resp.Body); err == nil {\r\n\t\tout = append(out, string(b))\r\n\t\t}\r\n\t}\r\n\r\n\treturn out, nil\r\n}\r\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Pivo Authors. All rights reserved.\n\/\/ Use of this source code is governed by a Simplified BSD\n\/\/ license that can be found in the LICENSE file.\n\npackage pivo\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"time\"\n)\n\n\/\/ The Pivo package version numbers\nconst Version = \"2.0.0\"\n\n\/\/ Default value for the join rate limit.\n\/\/ Currently defined as a maximum of 64 per seconds.\nconst DefaultJoinLimitRateInterval = time.Second \/ 64\n\n\/\/ Default value for the allowed burst over the join limit.\n\/\/ Currently defined as a maximum of 32 more connections.\nconst DefaultJoinLimitRateBurst = 32\n\n\/\/ Default value for the size of the join queue.\n\/\/ Currently defined as 256 pending connections before new\n\/\/ ones are discarded.\nconst DefaultJoinMaxQueueSize = 256\n\n\/\/ Marker for binary message type\nconst IsBinaryMessage = 1\n\n\/\/ Marker for text message type\nconst IsTextMessage = 0\n\n\/\/ Error is thrown when the join queue is full.\nvar ErrJoinQueueIsFull = errors.New(\"join queue is full\")\n\n\/\/ Error is thrown when a connector has its port buffer full.\nvar ErrPortBufferIsFull = errors.New(\"port buffer is full\")\n\n\/\/ Error is thrown when unexpected code path is reached.\nvar ErrShouldNotReachThis = errors.New(\"should not reach this\")\n\n\/\/ Connector is the interface that wraps the basic methods needed\n\/\/ to use a connector with the hub.\ntype Connector interface {\n\tClose(error) error\n\tProtocol() string\n\tRemoteAddr() net.Addr\n\tSend(*Message) error\n}\n\n\/\/ OnBinaryReader is called when binary data is read from\n\/\/ a connector.\ntype OnBinaryReader interface {\n\tOnBinaryRead([]byte) error\n}\n\n\/\/ OnBinaryReadCloser is the interface that wraps basic methods\n\/\/ both for reading binary data and getting notified about a\n\/\/ closed connection.\ntype OnBinaryReadCloser interface {\n\tOnBinaryReader\n\tOnCloser\n}\n\n\/\/ OnCloser is the interface that requires a method to call upon\n\/\/ disconnection of a connector.\ntype OnCloser interface {\n\tOnClose(error) error\n}\n\n\/\/ OnReader is the interface that wraps the methods called when\n\/\/ data is read from a connector.\ntype OnReader interface {\n\tOnBinaryReader\n\tOnTextReader\n}\n\n\/\/ OnReadCloser wraps the basic methods needed to be notified\n\/\/ when data has been read or the connector has closed.\ntype OnReadCloser interface {\n\tOnCloser\n\tOnReader\n}\n\n\/\/ OnTextReader is called when text data is read from\n\/\/ a connector.\ntype OnTextReader interface {\n\tOnTextRead(string) error\n}\n\n\/\/ OnTextReadCloser is the interface that wraps basic methods\n\/\/ both for reading text data and getting notified about a\n\/\/ closed connection.\ntype OnTextReadCloser interface {\n\tOnTextReader\n\tOnCloser\n}\n\n\/\/ A Hub is a collection of connectors with some specified settings.\ntype Hub struct {\n\t\/\/ The interval at which new connections are processed.\n\t\/\/ A Duration of time.Second \/ 100 would mean hundred\n\t\/\/ of new connections are processed in a second.\n\tJoinLimitRateInterval time.Duration\n\n\t\/\/ The size of the burst allowed when the join rate limit\n\t\/\/ is reached. A value of 10 would mean that 10 extra\n\t\/\/ connections may wait to join the hub.\n\tJoinLimitRateBurst uint\n\n\t\/\/ The maximum number of new connections waiting to\n\t\/\/ join the hub before ErrJoinQueueIsFull is being\n\t\/\/ thrown.\n\tJoinMaxQueueSize uint\n\n\tbroadcast Port \/\/ Broadcast channel\n\tjoin chan Connector \/\/ Join the connector\n\tleave chan Connector \/\/ Disjoin the connector\n\tports Ports \/\/ Map of connected connectors\n\tstop chan bool \/\/ Stop the hub\n\ttimeslots chan bool \/\/ Join burst timeslots\n}\n\n\/\/ OnHubBroadcaster is triggered before the message is delivered.\n\/\/ Callee may alter the message in any way as well as\n\/\/ provide a different list of recipients ports.\n\/\/ If error is thrown, broadcast will not occur at all.\ntype OnHubBroadcaster interface {\n\tOnHubBroadcast(*Message) (Ports, error)\n}\n\n\/\/ OnHubJoiner is triggered before a connector may join the hub.\n\/\/ If error is thrown, join will not occur at all.\ntype OnHubJoiner interface {\n\tOnHubJoin(Connector) error\n}\n\n\/\/ OnHubLeaver is triggered before a connector disjoin the hub.\n\/\/ Nothing may prevent this from happening at this stage.\ntype OnHubLeaver interface {\n\tOnHubLeave(Connector)\n}\n\n\/\/ OnHubCloser is triggered right before the hub stops.\n\/\/ The map of connected connectors is provided and is\n\/\/ guaranteed to be accurate at the time the callee has it.\ntype OnHubCloser interface {\n\tOnHubClose(Ports)\n}\n\n\/\/ Message is a data structure with bytes of data, type of data,\n\/\/ and its original connector.\ntype Message struct {\n\tData []byte \/\/ Bytes of data\n\tFrom Connector \/\/ Original connector\n\tType int \/\/ Type of data\n}\n\n\/\/ Port is a channel transporting a pointer to a Message.\ntype Port chan *Message\n\n\/\/ Ports is a map of Connectors that have joined the hub.\ntype Ports map[Connector]bool\n\n\/\/ throttler is the timeslots ticker.\ntype throttler struct {\n\tstop chan bool \/\/ Stop the throttler\n}\n\n\/\/ BinaryMessage formats a binary message and returns\n\/\/ a pointer to it.\nfunc BinaryMessage(from Connector, bin []byte) *Message {\n\treturn &Message{bin, from, IsBinaryMessage}\n}\n\n\/\/ DefaultHub instantiate a new hub with default settings.\nfunc DefaultHub() *Hub {\n\treturn &Hub{\n\t\tJoinLimitRateBurst: DefaultJoinLimitRateBurst,\n\t\tJoinLimitRateInterval: DefaultJoinLimitRateInterval,\n\t\tJoinMaxQueueSize: DefaultJoinMaxQueueSize,\n\t}\n}\n\n\/\/ TextMessage formats a text message and returns\n\/\/ a pointer to it.\nfunc TextMessage(from Connector, text string) *Message {\n\treturn &Message{[]byte(text), from, IsTextMessage}\n}\n\n\/\/ throttler fills the timeslots channel at specified interval.\nfunc (h *Hub) throttler(rate time.Duration) *throttler {\n\tthrottler := &throttler{stop: make(chan bool)}\n\tgo func() {\n\t\th.timeslots = make(chan bool, h.JoinLimitRateBurst+1)\n\t\tticker := time.NewTicker(rate)\n\t\tdefer func() { ticker.Stop(); close(h.timeslots) }()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-throttler.stop:\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tselect {\n\t\t\t\tcase h.timeslots <- true:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\treturn throttler\n}\n\n\/\/ Broadcast sends a message to all connectors on the hub.\nfunc (h *Hub) Broadcast(m *Message) { h.broadcast <- m }\n\n\/\/ Leave disjoins the given connector from the hub.\nfunc (h *Hub) Leave(c Connector) { h.leave <- c }\n\n\/\/ Stop brings the hub down.\nfunc (h *Hub) Stop() { h.stop <- true }\n\n\/\/ Join adds the given connector to the hub.\nfunc (h *Hub) Join(c Connector) error {\n\tselect {\n\tcase h.join <- c:\n\tdefault:\n\t\treturn ErrJoinQueueIsFull\n\t}\n\treturn nil\n}\n\n\/\/ StartAndServe serves the hub until Stop() is requested.\nfunc (h *Hub) StartAndServe(trigger interface{}) error {\n\tvar throttler *throttler\n\n\t\/\/ Start a throttler if necessary\n\tif h.JoinLimitRateInterval > 0 {\n\t\tthrottler = h.throttler(h.JoinLimitRateInterval)\n\t\tdefer func() { throttler.stop <- true }()\n\t}\n\n\t\/\/ Initialize control channels\n\th.broadcast = make(Port)\n\th.join = make(chan Connector, h.JoinMaxQueueSize)\n\th.leave = make(chan Connector)\n\th.stop = make(chan bool)\n\n\t\/\/ Initialize ports map\n\th.ports = Ports{}\n\n\t\/\/ Loop until Stop()\n\tfor {\n\t\tselect {\n\n\t\t\/\/ Broadcast\n\t\tcase msg := <-h.broadcast:\n\t\t\tvar recipients = h.ports\n\n\t\t\t\/\/ OnHubBroadcaster event trigger\n\t\t\tif t, ok := trigger.(OnHubBroadcaster); ok {\n\t\t\t\tports, err := t.OnHubBroadcast(msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t} else if ports != nil {\n\t\t\t\t\trecipients = ports\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Send message to the recipients\n\t\t\tfor conn, _ := range recipients {\n\t\t\t\tif msg.From == conn {\n\t\t\t\t\t\/\/ Skip original connector\n\t\t\t\t\tcontinue\n\t\t\t\t} else if err := conn.Send(msg); err != nil {\n\t\t\t\t\tconn.Close(err)\n\t\t\t\t\tdelete(h.ports, conn)\n\t\t\t\t\t\/\/ OnHubLeaver event trigger\n\t\t\t\t\tif t, ok := trigger.(OnHubLeaver); ok {\n\t\t\t\t\t\tt.OnHubLeave(conn)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\/\/ Join\n\t\tcase conn := <-h.join:\n\t\t\t\/\/ Throttle\n\t\t\tif throttler != nil {\n\t\t\t\t<-h.timeslots\n\t\t\t}\n\n\t\t\t\/\/ OnHubJoiner event trigger\n\t\t\tif t, ok := trigger.(OnHubJoiner); ok {\n\t\t\t\tif t.OnHubJoin(conn) != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\th.ports[conn] = true\n\n\t\t\/\/ Leave\n\t\tcase conn := <-h.leave:\n\t\t\tdelete(h.ports, conn)\n\t\t\t\/\/ OnHubLeaver event trigger\n\t\t\tif t, ok := trigger.(OnHubLeaver); ok {\n\t\t\t\tt.OnHubLeave(conn)\n\t\t\t}\n\n\t\t\/\/ Stop\n\t\tcase <-h.stop:\n\t\t\t\/\/ OnHubCloser event trigger\n\t\t\tif t, ok := trigger.(OnHubCloser); ok {\n\t\t\t\tt.OnHubClose(h.ports)\n\t\t\t}\n\t\t\treturn nil\n\n\t\t}\n\t}\n\treturn ErrShouldNotReachThis\n}\n<commit_msg>Bugfix release 2.0.1<commit_after>\/\/ Copyright 2015 The Pivo Authors. All rights reserved.\n\/\/ Use of this source code is governed by a Simplified BSD\n\/\/ license that can be found in the LICENSE file.\n\npackage pivo\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"time\"\n)\n\n\/\/ The Pivo package version numbers\nconst Version = \"2.0.1\"\n\n\/\/ Default value for the join rate limit.\n\/\/ Currently defined as a maximum of 64 per seconds.\nconst DefaultJoinLimitRateInterval = time.Second \/ 64\n\n\/\/ Default value for the allowed burst over the join limit.\n\/\/ Currently defined as a maximum of 32 more connections.\nconst DefaultJoinLimitRateBurst = 32\n\n\/\/ Default value for the size of the join queue.\n\/\/ Currently defined as 256 pending connections before new\n\/\/ ones are discarded.\nconst DefaultJoinMaxQueueSize = 256\n\n\/\/ Marker for binary message type\nconst IsBinaryMessage = 1\n\n\/\/ Marker for text message type\nconst IsTextMessage = 0\n\n\/\/ Error is thrown when the join queue is full.\nvar ErrJoinQueueIsFull = errors.New(\"join queue is full\")\n\n\/\/ Error is thrown when a connector has its port buffer full.\nvar ErrPortBufferIsFull = errors.New(\"port buffer is full\")\n\n\/\/ Error is thrown when unexpected code path is reached.\nvar ErrShouldNotReachThis = errors.New(\"should not reach this\")\n\n\/\/ Connector is the interface that wraps the basic methods needed\n\/\/ to use a connector with the hub.\ntype Connector interface {\n\tClose(error) error\n\tProtocol() string\n\tRemoteAddr() net.Addr\n\tSend(*Message) error\n}\n\n\/\/ OnBinaryReader is called when binary data is read from\n\/\/ a connector.\ntype OnBinaryReader interface {\n\tOnBinaryRead([]byte) error\n}\n\n\/\/ OnBinaryReadCloser is the interface that wraps basic methods\n\/\/ both for reading binary data and getting notified about a\n\/\/ closed connection.\ntype OnBinaryReadCloser interface {\n\tOnBinaryReader\n\tOnCloser\n}\n\n\/\/ OnCloser is the interface that requires a method to call upon\n\/\/ disconnection of a connector.\ntype OnCloser interface {\n\tOnClose(error) error\n}\n\n\/\/ OnReader is the interface that wraps the methods called when\n\/\/ data is read from a connector.\ntype OnReader interface {\n\tOnBinaryReader\n\tOnTextReader\n}\n\n\/\/ OnReadCloser wraps the basic methods needed to be notified\n\/\/ when data has been read or the connector has closed.\ntype OnReadCloser interface {\n\tOnCloser\n\tOnReader\n}\n\n\/\/ OnTextReader is called when text data is read from\n\/\/ a connector.\ntype OnTextReader interface {\n\tOnTextRead(string) error\n}\n\n\/\/ OnTextReadCloser is the interface that wraps basic methods\n\/\/ both for reading text data and getting notified about a\n\/\/ closed connection.\ntype OnTextReadCloser interface {\n\tOnTextReader\n\tOnCloser\n}\n\n\/\/ A Hub is a collection of connectors with some specified settings.\ntype Hub struct {\n\t\/\/ The interval at which new connections are processed.\n\t\/\/ A Duration of time.Second \/ 100 would mean hundred\n\t\/\/ of new connections are processed in a second.\n\tJoinLimitRateInterval time.Duration\n\n\t\/\/ The size of the burst allowed when the join rate limit\n\t\/\/ is reached. A value of 10 would mean that 10 extra\n\t\/\/ connections may wait to join the hub.\n\tJoinLimitRateBurst uint\n\n\t\/\/ The maximum number of new connections waiting to\n\t\/\/ join the hub before ErrJoinQueueIsFull is being\n\t\/\/ thrown.\n\tJoinMaxQueueSize uint\n\n\tbroadcast Port \/\/ Broadcast channel\n\tjoin chan Connector \/\/ Join the connector\n\tleave chan Connector \/\/ Disjoin the connector\n\tports Ports \/\/ Map of connected connectors\n\tstop chan bool \/\/ Stop the hub\n\ttimeslots chan bool \/\/ Join burst timeslots\n}\n\n\/\/ OnHubBroadcaster is triggered before the message is delivered.\n\/\/ Callee may alter the message in any way as well as\n\/\/ provide a different list of recipients ports.\n\/\/ If error is thrown, broadcast will not occur at all.\ntype OnHubBroadcaster interface {\n\tOnHubBroadcast(*Message) (Ports, error)\n}\n\n\/\/ OnHubJoiner is triggered before a connector may join the hub.\n\/\/ If error is thrown, join will not occur at all.\ntype OnHubJoiner interface {\n\tOnHubJoin(Connector) error\n}\n\n\/\/ OnHubLeaver is triggered before a connector disjoin the hub.\n\/\/ Nothing may prevent this from happening at this stage.\ntype OnHubLeaver interface {\n\tOnHubLeave(Connector)\n}\n\n\/\/ OnHubCloser is triggered right before the hub stops.\n\/\/ The map of connected connectors is provided and is\n\/\/ guaranteed to be accurate at the time the callee has it.\ntype OnHubCloser interface {\n\tOnHubClose(Ports)\n}\n\n\/\/ Message is a data structure with bytes of data, type of data,\n\/\/ and its original connector.\ntype Message struct {\n\tData []byte \/\/ Bytes of data\n\tFrom Connector \/\/ Original connector\n\tType int \/\/ Type of data\n}\n\n\/\/ Port is a channel transporting a pointer to a Message.\ntype Port chan *Message\n\n\/\/ Ports is a map of Connectors that have joined the hub.\ntype Ports map[Connector]bool\n\n\/\/ throttler is the timeslots ticker.\ntype throttler struct {\n\tstop chan bool \/\/ Stop the throttler\n}\n\n\/\/ BinaryMessage formats a binary message and returns\n\/\/ a pointer to it.\nfunc BinaryMessage(from Connector, bin []byte) *Message {\n\treturn &Message{bin, from, IsBinaryMessage}\n}\n\n\/\/ DefaultHub instantiate a new hub with default settings.\nfunc DefaultHub() *Hub {\n\treturn &Hub{\n\t\tJoinLimitRateBurst: DefaultJoinLimitRateBurst,\n\t\tJoinLimitRateInterval: DefaultJoinLimitRateInterval,\n\t\tJoinMaxQueueSize: DefaultJoinMaxQueueSize,\n\t}\n}\n\n\/\/ TextMessage formats a text message and returns\n\/\/ a pointer to it.\nfunc TextMessage(from Connector, text string) *Message {\n\treturn &Message{[]byte(text), from, IsTextMessage}\n}\n\n\/\/ throttler fills the timeslots channel at specified interval.\nfunc (h *Hub) throttler(rate time.Duration) *throttler {\n\tthrottler := &throttler{stop: make(chan bool)}\n\tgo func() {\n\t\th.timeslots = make(chan bool, h.JoinLimitRateBurst+1)\n\t\tticker := time.NewTicker(rate)\n\t\tdefer func() { ticker.Stop(); close(h.timeslots) }()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-throttler.stop:\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tselect {\n\t\t\t\tcase h.timeslots <- true:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\treturn throttler\n}\n\n\/\/ Broadcast sends a message to all connectors on the hub.\nfunc (h *Hub) Broadcast(m *Message) { h.broadcast <- m }\n\n\/\/ Leave disjoins the given connector from the hub.\nfunc (h *Hub) Leave(c Connector) { h.leave <- c }\n\n\/\/ Stop brings the hub down.\nfunc (h *Hub) Stop() { h.stop <- true }\n\n\/\/ Join adds the given connector to the hub.\nfunc (h *Hub) Join(c Connector) error {\n\tselect {\n\tcase h.join <- c:\n\tdefault:\n\t\treturn ErrJoinQueueIsFull\n\t}\n\treturn nil\n}\n\n\/\/ StartAndServe serves the hub until Stop() is requested.\nfunc (h *Hub) StartAndServe(trigger interface{}) error {\n\tvar throttler *throttler\n\n\t\/\/ Start a throttler if necessary\n\tif h.JoinLimitRateInterval > 0 {\n\t\tthrottler = h.throttler(h.JoinLimitRateInterval)\n\t\tdefer func() { throttler.stop <- true }()\n\t}\n\n\t\/\/ Initialize control channels\n\th.broadcast = make(Port)\n\th.join = make(chan Connector, h.JoinMaxQueueSize)\n\th.leave = make(chan Connector)\n\th.stop = make(chan bool)\n\n\t\/\/ Initialize ports map\n\th.ports = Ports{}\n\n\t\/\/ Loop until Stop()\n\tfor {\n\t\tselect {\n\n\t\t\/\/ Broadcast\n\t\tcase msg := <-h.broadcast:\n\t\t\tvar recipients = h.ports\n\n\t\t\t\/\/ OnHubBroadcaster event trigger\n\t\t\tif t, ok := trigger.(OnHubBroadcaster); ok {\n\t\t\t\tports, err := t.OnHubBroadcast(msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t} else if ports != nil {\n\t\t\t\t\trecipients = ports\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Send message to the recipients\n\t\t\tfor conn, _ := range recipients {\n\t\t\t\tif msg.From == conn {\n\t\t\t\t\t\/\/ Skip original connector\n\t\t\t\t\tcontinue\n\t\t\t\t} else if err := conn.Send(msg); err != nil {\n\t\t\t\t\tconn.Close(err)\n\t\t\t\t\tdelete(h.ports, conn)\n\t\t\t\t\t\/\/ OnHubLeaver event trigger\n\t\t\t\t\tif t, ok := trigger.(OnHubLeaver); ok {\n\t\t\t\t\t\tt.OnHubLeave(conn)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\/\/ Join\n\t\tcase conn := <-h.join:\n\t\t\t\/\/ Throttle\n\t\t\tif throttler != nil {\n\t\t\t\t<-h.timeslots\n\t\t\t}\n\n\t\t\t\/\/ OnHubJoiner event trigger\n\t\t\tif t, ok := trigger.(OnHubJoiner); ok {\n\t\t\t\tif t.OnHubJoin(conn) != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\th.ports[conn] = true\n\n\t\t\/\/ Leave\n\t\tcase conn := <-h.leave:\n\t\t\tdelete(h.ports, conn)\n\t\t\t\/\/ OnHubLeaver event trigger\n\t\t\tif t, ok := trigger.(OnHubLeaver); ok {\n\t\t\t\tt.OnHubLeave(conn)\n\t\t\t}\n\n\t\t\/\/ Stop\n\t\tcase <-h.stop:\n\t\t\t\/\/ OnHubCloser event trigger\n\t\t\tif t, ok := trigger.(OnHubCloser); ok {\n\t\t\t\tt.OnHubClose(h.ports)\n\t\t\t}\n\t\t\treturn nil\n\n\t\t}\n\t}\n\treturn ErrShouldNotReachThis\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage mixins\n\nimport (\n\t\"github.com\/google\/gxui\"\n\t\"github.com\/google\/gxui\/math\"\n\t\"github.com\/google\/gxui\/mixins\/outer\"\n\t\"github.com\/google\/gxui\/mixins\/parts\"\n)\n\ntype WindowOuter interface {\n\tgxui.Window\n\touter.Attachable\n\touter.Bounds\n\touter.IsVisibler\n\touter.LayoutChildren\n\touter.PaintChilder\n\touter.Painter\n\touter.Parenter\n}\n\ntype Window struct {\n\tparts.Attachable\n\tparts.Container\n\tparts.Paddable\n\tparts.PaintChildren\n\n\tdriver gxui.Driver\n\touter WindowOuter\n\tviewport gxui.Viewport\n\tmouseController *gxui.MouseController\n\tkeyboardController *gxui.KeyboardController\n\tfocusController *gxui.FocusController\n\tlayoutPending bool\n\tdrawPending bool\n\tupdatePending bool\n\tonAttach gxui.Event\n\tonDetach gxui.Event\n}\n\nfunc (w *Window) requestUpdate() {\n\tif !w.updatePending {\n\t\tw.updatePending = true\n\t\tw.driver.Call(w.update)\n\t}\n}\n\nfunc (w *Window) update() {\n\tif !w.Attached() {\n\t\t\/\/ Window was detached between requestUpdate() and update()\n\t\tw.updatePending = false\n\t\tw.layoutPending = false\n\t\tw.drawPending = false\n\t\treturn\n\t}\n\tw.updatePending = false\n\tif w.layoutPending {\n\t\tw.layoutPending = false\n\t\tw.drawPending = true\n\t\tw.outer.LayoutChildren()\n\t}\n\tif w.drawPending {\n\t\tw.drawPending = false\n\t\tw.Draw()\n\t}\n}\n\nfunc (w *Window) Init(outer WindowOuter, driver gxui.Driver, width, height int, title string) {\n\tw.Attachable.Init(outer)\n\tw.Container.Init(outer)\n\tw.Paddable.Init(outer)\n\tw.PaintChildren.Init(outer)\n\tw.outer = outer\n\tw.driver = driver\n\tw.viewport = driver.CreateViewport(width, height, title)\n\tw.focusController = gxui.CreateFocusController(outer)\n\tw.mouseController = gxui.CreateMouseController(outer, w.focusController)\n\tw.keyboardController = gxui.CreateKeyboardController(outer)\n\tw.viewport.OnResize(func() {\n\t\tw.outer.LayoutChildren()\n\t\tw.Draw()\n\t})\n\n\t\/\/ Window starts shown\n\tw.Attach()\n\n\t\/\/ Interface compliance test\n\t_ = gxui.Window(w)\n}\n\nfunc (w *Window) Draw() gxui.Canvas {\n\tc := w.driver.CreateCanvas(w.viewport.SizeDips())\n\tw.outer.Paint(c)\n\tc.Complete()\n\tw.viewport.SetCanvas(c)\n\tc.Release()\n\treturn c\n}\n\nfunc (w *Window) LayoutChildren() {\n\ts := w.Bounds().Size().Contract(w.Padding())\n\to := w.Padding().LT()\n\tfor _, c := range w.outer.Children() {\n\t\tc.Layout(c.DesiredSize(math.ZeroSize, s).Rect().Offset(o))\n\t}\n}\n\nfunc (w *Window) Bounds() math.Rect {\n\ts := w.viewport.SizeDips()\n\treturn math.CreateRect(0, 0, s.W, s.H)\n}\n\nfunc (w *Window) Parent() gxui.Container {\n\treturn nil\n}\n\nfunc (w *Window) Viewport() gxui.Viewport {\n\treturn w.viewport\n}\n\nfunc (w *Window) Title() string {\n\treturn w.viewport.Title()\n}\n\nfunc (w *Window) SetTitle(t string) {\n\tw.viewport.SetTitle(t)\n}\n\nfunc (w *Window) Scale() float32 {\n\treturn w.viewport.Scale()\n}\n\nfunc (w *Window) SetScale(scale float32) {\n\tw.viewport.SetScale(scale)\n}\n\nfunc (w *Window) Show() {\n\tw.Attach()\n\tw.viewport.Show()\n}\n\nfunc (w *Window) Hide() {\n\tw.Detach()\n\tw.viewport.Hide()\n}\n\nfunc (w *Window) Close() {\n\tw.Detach()\n\tw.viewport.Close()\n}\n\nfunc (w *Window) Focus() gxui.Focusable {\n\treturn w.focusController.Focus()\n}\n\nfunc (w *Window) SetFocus(c gxui.Control) bool {\n\tfc := w.focusController\n\tif c == nil {\n\t\tfc.SetFocus(nil)\n\t\treturn true\n\t}\n\tif f := fc.Focusable(c); f != nil {\n\t\tfc.SetFocus(f)\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (w *Window) IsVisible() bool {\n\treturn true\n}\n\nfunc (w *Window) OnClose(f func()) gxui.EventSubscription {\n\treturn w.viewport.OnClose(f)\n}\n\nfunc (w *Window) OnResize(f func()) gxui.EventSubscription {\n\treturn w.viewport.OnResize(f)\n}\n\nfunc (w *Window) OnMouseMove(f func(gxui.MouseEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnMouseMove(func(ev gxui.MouseEvent) {\n\t\tev.Window = w\n\t\tev.WindowPoint = ev.Point\n\t\tf(ev)\n\t})\n}\n\nfunc (w *Window) OnMouseEnter(f func(gxui.MouseEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnMouseEnter(func(ev gxui.MouseEvent) {\n\t\tev.Window = w\n\t\tev.WindowPoint = ev.Point\n\t\tf(ev)\n\t})\n}\n\nfunc (w *Window) OnMouseExit(f func(gxui.MouseEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnMouseExit(func(ev gxui.MouseEvent) {\n\t\tev.Window = w\n\t\tev.WindowPoint = ev.Point\n\t\tf(ev)\n\t})\n}\n\nfunc (w *Window) OnMouseDown(f func(gxui.MouseEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnMouseDown(func(ev gxui.MouseEvent) {\n\t\tev.Window = w\n\t\tev.WindowPoint = ev.Point\n\t\tf(ev)\n\t})\n}\n\nfunc (w *Window) OnMouseUp(f func(gxui.MouseEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnMouseUp(func(ev gxui.MouseEvent) {\n\t\tev.Window = w\n\t\tev.WindowPoint = ev.Point\n\t\tf(ev)\n\t})\n}\n\nfunc (w *Window) OnMouseScroll(f func(gxui.MouseEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnMouseScroll(func(ev gxui.MouseEvent) {\n\t\tev.Window = w\n\t\tev.WindowPoint = ev.Point\n\t\tf(ev)\n\t})\n}\n\nfunc (w *Window) OnKeyDown(f func(gxui.KeyboardEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnKeyDown(f)\n}\n\nfunc (w *Window) OnKeyUp(f func(gxui.KeyboardEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnKeyUp(f)\n}\n\nfunc (w *Window) OnKeyRepeat(f func(gxui.KeyboardEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnKeyRepeat(f)\n}\n\nfunc (w *Window) OnKeyStroke(f func(gxui.KeyStrokeEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnKeyStroke(f)\n}\n\nfunc (w *Window) Relayout() {\n\tw.layoutPending = true\n\tw.requestUpdate()\n}\n\nfunc (w *Window) Redraw() {\n\tw.drawPending = true\n\tw.requestUpdate()\n}\n\nfunc (w *Window) Click(gxui.MouseEvent) {}\nfunc (w *Window) DoubleClick(gxui.MouseEvent) {}\n\nfunc (w *Window) KeyPress(ev gxui.KeyboardEvent) {\n\tif ev.Key == gxui.KeyTab {\n\t\tif ev.Modifier&gxui.ModShift != 0 {\n\t\t\tw.focusController.FocusPrev()\n\t\t} else {\n\t\t\tw.focusController.FocusNext()\n\t\t}\n\t}\n}\nfunc (w *Window) KeyStroke(gxui.KeyStrokeEvent) {}\n<commit_msg>Don't attempt to create a zero-size canvas when the window is minimized.<commit_after>\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage mixins\n\nimport (\n\t\"github.com\/google\/gxui\"\n\t\"github.com\/google\/gxui\/math\"\n\t\"github.com\/google\/gxui\/mixins\/outer\"\n\t\"github.com\/google\/gxui\/mixins\/parts\"\n)\n\ntype WindowOuter interface {\n\tgxui.Window\n\touter.Attachable\n\touter.Bounds\n\touter.IsVisibler\n\touter.LayoutChildren\n\touter.PaintChilder\n\touter.Painter\n\touter.Parenter\n}\n\ntype Window struct {\n\tparts.Attachable\n\tparts.Container\n\tparts.Paddable\n\tparts.PaintChildren\n\n\tdriver gxui.Driver\n\touter WindowOuter\n\tviewport gxui.Viewport\n\tmouseController *gxui.MouseController\n\tkeyboardController *gxui.KeyboardController\n\tfocusController *gxui.FocusController\n\tlayoutPending bool\n\tdrawPending bool\n\tupdatePending bool\n\tonAttach gxui.Event\n\tonDetach gxui.Event\n}\n\nfunc (w *Window) requestUpdate() {\n\tif !w.updatePending {\n\t\tw.updatePending = true\n\t\tw.driver.Call(w.update)\n\t}\n}\n\nfunc (w *Window) update() {\n\tif !w.Attached() {\n\t\t\/\/ Window was detached between requestUpdate() and update()\n\t\tw.updatePending = false\n\t\tw.layoutPending = false\n\t\tw.drawPending = false\n\t\treturn\n\t}\n\tw.updatePending = false\n\tif w.layoutPending {\n\t\tw.layoutPending = false\n\t\tw.drawPending = true\n\t\tw.outer.LayoutChildren()\n\t}\n\tif w.drawPending {\n\t\tw.drawPending = false\n\t\tw.Draw()\n\t}\n}\n\nfunc (w *Window) Init(outer WindowOuter, driver gxui.Driver, width, height int, title string) {\n\tw.Attachable.Init(outer)\n\tw.Container.Init(outer)\n\tw.Paddable.Init(outer)\n\tw.PaintChildren.Init(outer)\n\tw.outer = outer\n\tw.driver = driver\n\tw.viewport = driver.CreateViewport(width, height, title)\n\tw.focusController = gxui.CreateFocusController(outer)\n\tw.mouseController = gxui.CreateMouseController(outer, w.focusController)\n\tw.keyboardController = gxui.CreateKeyboardController(outer)\n\tw.viewport.OnResize(func() {\n\t\tw.outer.LayoutChildren()\n\t\tw.Draw()\n\t})\n\n\t\/\/ Window starts shown\n\tw.Attach()\n\n\t\/\/ Interface compliance test\n\t_ = gxui.Window(w)\n}\n\nfunc (w *Window) Draw() gxui.Canvas {\n\tif s := w.viewport.SizeDips(); s != math.ZeroSize {\n\t\tc := w.driver.CreateCanvas(w.viewport.SizeDips())\n\t\tw.outer.Paint(c)\n\t\tc.Complete()\n\t\tw.viewport.SetCanvas(c)\n\t\tc.Release()\n\t\treturn c\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (w *Window) LayoutChildren() {\n\ts := w.Bounds().Size().Contract(w.Padding())\n\to := w.Padding().LT()\n\tfor _, c := range w.outer.Children() {\n\t\tc.Layout(c.DesiredSize(math.ZeroSize, s).Rect().Offset(o))\n\t}\n}\n\nfunc (w *Window) Bounds() math.Rect {\n\ts := w.viewport.SizeDips()\n\treturn math.CreateRect(0, 0, s.W, s.H)\n}\n\nfunc (w *Window) Parent() gxui.Container {\n\treturn nil\n}\n\nfunc (w *Window) Viewport() gxui.Viewport {\n\treturn w.viewport\n}\n\nfunc (w *Window) Title() string {\n\treturn w.viewport.Title()\n}\n\nfunc (w *Window) SetTitle(t string) {\n\tw.viewport.SetTitle(t)\n}\n\nfunc (w *Window) Scale() float32 {\n\treturn w.viewport.Scale()\n}\n\nfunc (w *Window) SetScale(scale float32) {\n\tw.viewport.SetScale(scale)\n}\n\nfunc (w *Window) Show() {\n\tw.Attach()\n\tw.viewport.Show()\n}\n\nfunc (w *Window) Hide() {\n\tw.Detach()\n\tw.viewport.Hide()\n}\n\nfunc (w *Window) Close() {\n\tw.Detach()\n\tw.viewport.Close()\n}\n\nfunc (w *Window) Focus() gxui.Focusable {\n\treturn w.focusController.Focus()\n}\n\nfunc (w *Window) SetFocus(c gxui.Control) bool {\n\tfc := w.focusController\n\tif c == nil {\n\t\tfc.SetFocus(nil)\n\t\treturn true\n\t}\n\tif f := fc.Focusable(c); f != nil {\n\t\tfc.SetFocus(f)\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (w *Window) IsVisible() bool {\n\treturn true\n}\n\nfunc (w *Window) OnClose(f func()) gxui.EventSubscription {\n\treturn w.viewport.OnClose(f)\n}\n\nfunc (w *Window) OnResize(f func()) gxui.EventSubscription {\n\treturn w.viewport.OnResize(f)\n}\n\nfunc (w *Window) OnMouseMove(f func(gxui.MouseEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnMouseMove(func(ev gxui.MouseEvent) {\n\t\tev.Window = w\n\t\tev.WindowPoint = ev.Point\n\t\tf(ev)\n\t})\n}\n\nfunc (w *Window) OnMouseEnter(f func(gxui.MouseEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnMouseEnter(func(ev gxui.MouseEvent) {\n\t\tev.Window = w\n\t\tev.WindowPoint = ev.Point\n\t\tf(ev)\n\t})\n}\n\nfunc (w *Window) OnMouseExit(f func(gxui.MouseEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnMouseExit(func(ev gxui.MouseEvent) {\n\t\tev.Window = w\n\t\tev.WindowPoint = ev.Point\n\t\tf(ev)\n\t})\n}\n\nfunc (w *Window) OnMouseDown(f func(gxui.MouseEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnMouseDown(func(ev gxui.MouseEvent) {\n\t\tev.Window = w\n\t\tev.WindowPoint = ev.Point\n\t\tf(ev)\n\t})\n}\n\nfunc (w *Window) OnMouseUp(f func(gxui.MouseEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnMouseUp(func(ev gxui.MouseEvent) {\n\t\tev.Window = w\n\t\tev.WindowPoint = ev.Point\n\t\tf(ev)\n\t})\n}\n\nfunc (w *Window) OnMouseScroll(f func(gxui.MouseEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnMouseScroll(func(ev gxui.MouseEvent) {\n\t\tev.Window = w\n\t\tev.WindowPoint = ev.Point\n\t\tf(ev)\n\t})\n}\n\nfunc (w *Window) OnKeyDown(f func(gxui.KeyboardEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnKeyDown(f)\n}\n\nfunc (w *Window) OnKeyUp(f func(gxui.KeyboardEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnKeyUp(f)\n}\n\nfunc (w *Window) OnKeyRepeat(f func(gxui.KeyboardEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnKeyRepeat(f)\n}\n\nfunc (w *Window) OnKeyStroke(f func(gxui.KeyStrokeEvent)) gxui.EventSubscription {\n\treturn w.viewport.OnKeyStroke(f)\n}\n\nfunc (w *Window) Relayout() {\n\tw.layoutPending = true\n\tw.requestUpdate()\n}\n\nfunc (w *Window) Redraw() {\n\tw.drawPending = true\n\tw.requestUpdate()\n}\n\nfunc (w *Window) Click(gxui.MouseEvent) {}\nfunc (w *Window) DoubleClick(gxui.MouseEvent) {}\n\nfunc (w *Window) KeyPress(ev gxui.KeyboardEvent) {\n\tif ev.Key == gxui.KeyTab {\n\t\tif ev.Modifier&gxui.ModShift != 0 {\n\t\t\tw.focusController.FocusPrev()\n\t\t} else {\n\t\t\tw.focusController.FocusNext()\n\t\t}\n\t}\n}\nfunc (w *Window) KeyStroke(gxui.KeyStrokeEvent) {}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014-2015 The Dename Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n\/\/ use this file except in compliance with the License. You may obtain a copy of\n\/\/ the License at\n\/\/\n\/\/ \thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations under\n\/\/ the License.\n\npackage keyserver\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"strings\"\n\n\t\"github.com\/yahoo\/coname\"\n\t\"github.com\/yahoo\/coname\/keyserver\/dkim\"\n\t\"github.com\/yahoo\/coname\/keyserver\/replication\"\n\t\"github.com\/yahoo\/coname\/proto\"\n\t\"github.com\/yahoo\/coname\/vrf\"\n\t\"golang.org\/x\/crypto\/sha3\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc (ks *Keyserver) verifyUpdateDeterministic(prevUpdate *proto.UpdateRequest, req *proto.UpdateRequest) error {\n\tvar prevEntry *proto.Entry\n\tif prevUpdate != nil {\n\t\tprevEntry = &prevUpdate.Update.NewEntry.Entry\n\t}\n\tif err := coname.VerifyUpdate(prevEntry, req.Update); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (ks *Keyserver) verifyUpdateEdge(req *proto.UpdateRequest) error {\n\tprevUpdate, err := ks.getUpdate(req.Update.NewEntry.Index, math.MaxUint64)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn fmt.Errorf(\"internal error\")\n\t}\n\tif prevUpdate == nil { \/\/ registration: check email proof\n\t\tif !ks.insecureSkipEmailProof {\n\t\t\temail, payload, err := dkim.CheckEmailProof(req.DKIMProof, ks.emailProofToAddr,\n\t\t\t\tks.emailProofSubjectPrefix, ks.lookupTXT, ks.clk.Now)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif got, want := email, req.LookupParameters.UserId; got != want {\n\t\t\t\treturn fmt.Errorf(\"requested user ID does not match the email proof: %q != %q\", got, want)\n\t\t\t}\n\t\t\tlastAtIndex := strings.LastIndex(req.LookupParameters.UserId, \"@\")\n\t\t\tif lastAtIndex == -1 {\n\t\t\t\treturn fmt.Errorf(\"requested user id is not a valid email address: %q\", req.LookupParameters.UserId)\n\t\t\t}\n\t\t\tif _, ok := ks.emailProofAllowedDomains[req.LookupParameters.UserId[:lastAtIndex]]; !ok {\n\t\t\t\treturn fmt.Errorf(\"domain not in registration whitelist: %q\", req.LookupParameters.UserId[:lastAtIndex])\n\t\t\t}\n\t\t\tentryHash, err := base64.StdEncoding.DecodeString(payload)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"bad base64 in email proof: %q\", payload)\n\t\t\t}\n\t\t\tvar entryHashProposed [32]byte\n\t\t\tsha3.ShakeSum256(entryHashProposed[:], req.Update.NewEntry.Encoding)\n\t\t\tif !bytes.Equal(entryHashProposed[:], entryHash[:]) {\n\t\t\t\treturn fmt.Errorf(\"email proof does not match requested entry\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ks.verifyUpdateDeterministic(prevUpdate, req)\n}\n\ntype updateOutput struct {\n\tEpoch uint64 \/\/ which epoch the update will appear in\n\tError error\n}\n\n\/\/ Update implements proto.E2EKS.UpdateServer\nfunc (ks *Keyserver) Update(ctx context.Context, req *proto.UpdateRequest) (*proto.LookupProof, error) {\n\tctx, _ = context.WithTimeout(ctx, ks.clientTimeout)\n\treq.Update.NewEntry.Index = vrf.Compute([]byte(req.LookupParameters.UserId), ks.vrfSecret)\n\treq.Update.NewEntry.UpdateEncoding()\n\tif err := ks.verifyUpdateEdge(req); err != nil {\n\t\treturn nil, err\n\t}\n\n\tuid := genUID()\n\tch := ks.wr.Wait(uid)\n\tks.log.Propose(ctx, replication.LogEntry{Data: proto.MustMarshal(&proto.KeyserverStep{\n\t\tUID: uid,\n\t\tUpdate: req,\n\t})})\n\tselect {\n\tcase <-ctx.Done():\n\t\tks.wr.Notify(uid, nil)\n\t\treturn nil, ctx.Err()\n\tcase v := <-ch:\n\t\tout := v.(updateOutput)\n\t\tif out.Error != nil {\n\t\t\treturn nil, out.Error\n\t\t}\n\t\treturn ks.blockingLookup(ctx, req.LookupParameters, out.Epoch)\n\t}\n}\n<commit_msg>server: better errors for dkim proofs<commit_after>\/\/ Copyright 2014-2015 The Dename Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n\/\/ use this file except in compliance with the License. You may obtain a copy of\n\/\/ the License at\n\/\/\n\/\/ \thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations under\n\/\/ the License.\n\npackage keyserver\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"strings\"\n\n\t\"github.com\/yahoo\/coname\"\n\t\"github.com\/yahoo\/coname\/keyserver\/dkim\"\n\t\"github.com\/yahoo\/coname\/keyserver\/replication\"\n\t\"github.com\/yahoo\/coname\/proto\"\n\t\"github.com\/yahoo\/coname\/vrf\"\n\t\"golang.org\/x\/crypto\/sha3\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc (ks *Keyserver) verifyUpdateDeterministic(prevUpdate *proto.UpdateRequest, req *proto.UpdateRequest) error {\n\tvar prevEntry *proto.Entry\n\tif prevUpdate != nil {\n\t\tprevEntry = &prevUpdate.Update.NewEntry.Entry\n\t}\n\tif err := coname.VerifyUpdate(prevEntry, req.Update); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (ks *Keyserver) verifyUpdateEdge(req *proto.UpdateRequest) error {\n\tprevUpdate, err := ks.getUpdate(req.Update.NewEntry.Index, math.MaxUint64)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn fmt.Errorf(\"internal error\")\n\t}\n\tif prevUpdate == nil { \/\/ registration: check email proof\n\t\tif !ks.insecureSkipEmailProof {\n\t\t\temail, payload, err := dkim.CheckEmailProof(req.DKIMProof, ks.emailProofToAddr,\n\t\t\t\tks.emailProofSubjectPrefix, ks.lookupTXT, ks.clk.Now)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to verify DKIM proof: %s\", err)\n\t\t\t}\n\t\t\tif got, want := email, req.LookupParameters.UserId; got != want {\n\t\t\t\treturn fmt.Errorf(\"requested user ID does not match the email proof: %q != %q\", got, want)\n\t\t\t}\n\t\t\tlastAtIndex := strings.LastIndex(req.LookupParameters.UserId, \"@\")\n\t\t\tif lastAtIndex == -1 {\n\t\t\t\treturn fmt.Errorf(\"requested user id is not a valid email address: %q\", req.LookupParameters.UserId)\n\t\t\t}\n\t\t\tif _, ok := ks.emailProofAllowedDomains[req.LookupParameters.UserId[:lastAtIndex]]; !ok {\n\t\t\t\treturn fmt.Errorf(\"domain not in registration whitelist: %q\", req.LookupParameters.UserId[:lastAtIndex])\n\t\t\t}\n\t\t\tentryHash, err := base64.StdEncoding.DecodeString(payload)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"bad base64 in email proof: %q\", payload)\n\t\t\t}\n\t\t\tvar entryHashProposed [32]byte\n\t\t\tsha3.ShakeSum256(entryHashProposed[:], req.Update.NewEntry.Encoding)\n\t\t\tif !bytes.Equal(entryHashProposed[:], entryHash[:]) {\n\t\t\t\treturn fmt.Errorf(\"email proof does not match requested entry\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ks.verifyUpdateDeterministic(prevUpdate, req)\n}\n\ntype updateOutput struct {\n\tEpoch uint64 \/\/ which epoch the update will appear in\n\tError error\n}\n\n\/\/ Update implements proto.E2EKS.UpdateServer\nfunc (ks *Keyserver) Update(ctx context.Context, req *proto.UpdateRequest) (*proto.LookupProof, error) {\n\tctx, _ = context.WithTimeout(ctx, ks.clientTimeout)\n\treq.Update.NewEntry.Index = vrf.Compute([]byte(req.LookupParameters.UserId), ks.vrfSecret)\n\treq.Update.NewEntry.UpdateEncoding()\n\tif err := ks.verifyUpdateEdge(req); err != nil {\n\t\treturn nil, err\n\t}\n\n\tuid := genUID()\n\tch := ks.wr.Wait(uid)\n\tks.log.Propose(ctx, replication.LogEntry{Data: proto.MustMarshal(&proto.KeyserverStep{\n\t\tUID: uid,\n\t\tUpdate: req,\n\t})})\n\tselect {\n\tcase <-ctx.Done():\n\t\tks.wr.Notify(uid, nil)\n\t\treturn nil, ctx.Err()\n\tcase v := <-ch:\n\t\tout := v.(updateOutput)\n\t\tif out.Error != nil {\n\t\t\treturn nil, out.Error\n\t\t}\n\t\treturn ks.blockingLookup(ctx, req.LookupParameters, out.Epoch)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Library functions for working with HUE\npackage hue\n\nimport (\n \"fmt\"\n \"net\/http\"\n \"encoding\/json\"\n \"io\"\n)\n\nconst meethue = \"http:\/\/www.meethue.com\/api\/nupnp\"\nconst developer_name = \"newdeveloper\"\n\nfunc ConvertJsonToMap(b io.Reader) (m map[string] interface{}, ok bool) {\n dec := json.NewDecoder(b)\n var f interface{}\n dec.Decode(&f)\n \/\/Its possible we got an array rather than a simple map\n \/\/In this case, take the first element\n switch v := f.(type) {\n case []interface{}:\n m, ok = v[0].(map[string]interface{})\n default:\n m, ok = v.(map[string] interface{})\n }\n return\n}\n\nfunc GetHueIp() string {\n resp, err := http.Get(meethue)\n if (err != nil) {\n fmt.Println(\"Error %v\", err)\n }\n ms, ok := ConvertJsonToMap(resp.Body)\n if (!ok) {\n fmt.Println(\"Could not process the response\")\n }\n\n ip := ms[\"internalipaddress\"].(string)\n return ip\n}\n\n\/\/func IssueGetRequest(request_uri string) map[string] interface{} {\n\/\/}\n\n<commit_msg>created struct for status message<commit_after>\/\/ Library functions for working with HUE\npackage hue\n\nimport (\n \"fmt\"\n \"net\/http\"\n \"encoding\/json\"\n \"io\"\n)\n\ntype MsgWrapper struct {\n msg StatusMessage\n}\n\ntype StatusMessage struct {\n Config map[string] interface{}\n Lights map[string] LightMessage\n Groups map[string] interface{}\n Schedules map[string] interface{}\n Scenes map[string] interface{}\n}\n\ntype LightState struct {\n On bool\n Bri int\n Hue int\n Sat int\n Xy []float64\n Alert string\n Effect string\n Colormode string\n Reachable bool\n}\n\ntype LightMessage struct {\n State LightState\n Name string\n}\n\ntype LightsDict struct {\n dict map[string] interface{}\n}\n\ntype Lights interface{}\ntype Groups interface{}\ntype Config interface{}\ntype Schedules interface{}\n\nconst meethue = \"http:\/\/www.meethue.com\/api\/nupnp\"\nconst developer_name = \"newdeveloper\"\n\nfunc DoPut(url, data string) {\n req, err := http.NewRequest(\"PUT\", url, data)\n if err != nil {\n fmt.Printf(\"Error: %v\\n\", err)\n }\n client := http.Client{}\n resp, err := client.Do(req)\n fmt.Printf(\"Response: %v\\n\", resp)\n\n}\n\nfunc GetStatusMsg() string {\n resp, err := http.Get(\"http:\/\/10.0.1.4\/api\/newdeveloper\")\n if err != nil {\n fmt.Println(\"Error: %v\", err)\n }\n fmt.Println(\"%v\", resp)\n dec := json.NewDecoder(resp.Body)\n var m StatusMessage\n err = dec.Decode(&m)\n if err != nil {\n fmt.Println(\"Error: %v\", err)\n }\n fmt.Printf(\"%v\\n\", m.Lights[\"1\"])\n resp, err = DoPut(\"http:\/\/10.0.1.4\/api\/lights\/1\/state\", \"{ \\\"on\\\":false }\")\n if err != nil {\n fmt.Println(\"Error: %v\", err)\n }\n fmt.Println(resp)\n return \"success\"\n}\n\nfunc ConvertJsonToMap(b io.Reader) (m map[string] interface{}, ok bool) {\n dec := json.NewDecoder(b)\n var f interface{}\n dec.Decode(&f)\n \/\/Its possible we got an array rather than a simple map\n \/\/In this case, take the first element\n switch v := f.(type) {\n case []interface{}:\n m, ok = v[0].(map[string]interface{})\n default:\n m, ok = v.(map[string] interface{})\n }\n return\n}\n\nfunc GetHueIp() string {\n resp, err := http.Get(meethue)\n if (err != nil) {\n fmt.Println(\"Error %v\", err)\n }\n ms, ok := ConvertJsonToMap(resp.Body)\n if (!ok) {\n fmt.Println(\"Could not process the response\")\n }\n\n ip := ms[\"internalipaddress\"].(string)\n return ip\n}\n\n\/\/func IssueGetRequest(request_uri string) map[string] interface{} {\n\/\/}\n\n<|endoftext|>"} {"text":"<commit_before>package bbio\n\n\/\/ #include <stddef.h>\n\/\/ #include <sys\/types.h>\n\/\/ #include <linux\/i2c-dev.h>\nimport \"C\"\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"syscall\"\n)\n\n\/\/ #include <linux\/i2c.h>\n\ntype I2C struct {\n\taddress int\n\tfile *os.File\n}\n\nfunc NewI2C(address int) (*I2C, error) {\n\tfilename := fmt.Sprintf(\"\/dev\/i2c-%d\", address)\n\tfile, err := os.OpenFile(filename, os.O_RDWR, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, _, err = syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), C.I2C_SLAVE, uintptr(address))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &I2C{address, file}, nil\n}\n\nfunc (i2c *I2C) errMsg() error {\n\treturn fmt.Errorf(\"Error accessing 0x%02X: Check your I2C address\", i2c.address)\n}\n\nfunc (i2c *I2C) ReadUint8() (uint8, error) {\n\tresult := C.i2c_smbus_read_byte(C.int(i2c.file.Fd()))\n\tif result == -1 {\n\t\treturn 0, i2c.errMsg()\n\t}\n\treturn uint8(result), nil\n}\n\nfunc (i2c *I2C) WriteUint8(value uint8) error {\n\tresult := C.i2c_smbus_write_byte(C.int(i2c.file.Fd()), C.__u8(value))\n\tif result == -1 {\n\t\treturn i2c.errMsg()\n\t}\n\treturn nil\n}\n\nfunc (i2c *I2C) ReadInt8() (int8, error) {\n\tresult := C.i2c_smbus_read_byte(C.int(i2c.file.Fd()))\n\tif result == -1 {\n\t\treturn 0, i2c.errMsg()\n\t}\n\tif result >= 1<<7 {\n\t\tresult -= 1 << 8\n\t}\n\treturn int8(result), nil\n}\n\nfunc (i2c *I2C) WriteInt8(value int8) error {\n\tresult := C.i2c_smbus_write_byte(C.int(i2c.file.Fd()), C.__u8(value))\n\tif result == -1 {\n\t\treturn i2c.errMsg()\n\t}\n\treturn nil\n}\n\nfunc (i2c *I2C) ReadUint8Cmd(command uint8) (uint8, error) {\n\tresult := C.i2c_smbus_read_byte_data(C.int(i2c.file.Fd()), C.__u8(command))\n\tif result == -1 {\n\t\treturn 0, i2c.errMsg()\n\t}\n\treturn uint8(result), nil\n}\n\nfunc (i2c *I2C) WriteUint8Cmd(command uint8, value uint8) error {\n\tresult := C.i2c_smbus_write_byte_data(C.int(i2c.file.Fd()), C.__u8(command), C.__u8(value))\n\tif result == -1 {\n\t\treturn i2c.errMsg()\n\t}\n\treturn nil\n}\n\nfunc (i2c *I2C) ReadInt8Cmd(command uint8) (int8, error) {\n\tresult := C.i2c_smbus_read_byte_data(C.int(i2c.file.Fd()), C.__u8(command))\n\tif result == -1 {\n\t\treturn 0, i2c.errMsg()\n\t}\n\tif result >= 1<<7 {\n\t\tresult -= 1 << 8\n\t}\n\treturn int8(result), nil\n}\n\nfunc (i2c *I2C) WriteInt8Cmd(command uint8, value int8) error {\n\tresult := C.i2c_smbus_write_byte_data(C.int(i2c.file.Fd()), C.__u8(command), C.__u8(value))\n\tif result == -1 {\n\t\treturn i2c.errMsg()\n\t}\n\treturn nil\n}\n\nfunc (i2c *I2C) ReadUint16Cmd(command uint8) (uint16, error) {\n\tresult := C.i2c_smbus_read_word_data(C.int(i2c.file.Fd()), C.__u8(command))\n\tif result == -1 {\n\t\treturn 0, i2c.errMsg()\n\t}\n\treturn uint16(result), nil\n}\n\nfunc (i2c *I2C) WriteUint16Cmd(command uint8, value uint16) error {\n\tresult := C.i2c_smbus_write_word_data(C.int(i2c.file.Fd()), C.__u8(command), C.__u16(value))\n\tif result == -1 {\n\t\treturn i2c.errMsg()\n\t}\n\treturn nil\n}\n\nfunc (i2c *I2C) ReadInt16Cmd(command uint8) (int16, error) {\n\tresult := C.i2c_smbus_read_word_data(C.int(i2c.file.Fd()), C.__u8(command))\n\tif result == -1 {\n\t\treturn 0, i2c.errMsg()\n\t}\n\tif result >= 1<<15 {\n\t\tresult -= 1 << 16\n\t}\n\treturn int16(result), nil\n}\n\nfunc (i2c *I2C) WriteInt16Cmd(command uint8, value int16) error {\n\tresult := C.i2c_smbus_write_word_data(C.int(i2c.file.Fd()), C.__u8(command), C.__u16(value))\n\tif result == -1 {\n\t\treturn i2c.errMsg()\n\t}\n\treturn nil\n}\n\nfunc (i2c *I2C) WriteQuick() error {\n\tresult := C.i2c_smbus_write_quick(C.int(i2c.file.Fd()), C.I2C_SMBUS_WRITE)\n\tif result != 0 {\n\t\treturn i2c.errMsg()\n\t}\n\treturn nil\n}\n\nfunc (i2c *I2C) ProcessCall(command uint8, value uint16) error {\n\tresult := C.i2c_smbus_process_call(C.int(i2c.file.Fd()), C.__u8(command), C.__u16(value))\n\tif result == -1 {\n\t\treturn i2c.errMsg()\n\t}\n\treturn nil\n}\n\nfunc (i2c *I2C) Read(data []byte) (n int, err error) {\n\treturn\n}\n\nfunc (i2c *I2C) Write(data []byte) (n int, err error) {\n\treturn\n}\n\nfunc (i2c *I2C) Close() error {\n\treturn i2c.file.Close()\n}\n<commit_msg>finished I2C<commit_after>package bbio\n\n\/\/ #include <stddef.h>\n\/\/ #include <sys\/types.h>\n\/\/ #include <linux\/i2c-dev.h>\nimport \"C\"\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\n\/\/ I2C is a port of https:\/\/github.com\/bivab\/smbus-cffi\/\ntype I2C struct {\n\tfile *os.File\n\taddress int\n}\n\n\/\/ Connects the object to the specified SMBus.\nfunc NewI2C(bus, address int) (*I2C, error) {\n\tfilename := fmt.Sprintf(\"\/dev\/i2c-%d\", bus)\n\tfile, err := os.OpenFile(filename, os.O_RDWR, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ti2c := &I2C{file: file, address: -1}\n\terr = i2c.SetAddress(address)\n\tif err != nil {\n\t\tfile.Close()\n\t\treturn nil, err\n\t}\n\n\treturn i2c, nil\n}\n\nfunc (i2c *I2C) Address() int {\n\treturn i2c.address\n}\n\nfunc (i2c *I2C) SetAddress(address int) error {\n\tif address != i2c.address {\n\t\tresult, _, err := syscall.Syscall(syscall.SYS_IOCTL, i2c.file.Fd(), C.I2C_SLAVE, uintptr(address))\n\t\tif result != 0 {\n\t\t\treturn err\n\t\t}\n\t\ti2c.address = address\n\t}\n\treturn nil\n}\n\n\/\/ Perform SMBus Read Byte transaction.\nfunc (i2c *I2C) ReadUint8() (uint8, error) {\n\tresult, err := C.i2c_smbus_read_byte(C.int(i2c.file.Fd()))\n\tif result == -1 {\n\t\treturn 0, err\n\t}\n\treturn uint8(result), nil\n}\n\n\/\/ Perform SMBus Write Byte transaction.\nfunc (i2c *I2C) WriteUint8(value uint8) error {\n\tresult, err := C.i2c_smbus_write_byte(C.int(i2c.file.Fd()), C.__u8(value))\n\tif result == -1 {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Perform SMBus Read Byte transaction.\nfunc (i2c *I2C) ReadInt8() (int8, error) {\n\tresult, err := C.i2c_smbus_read_byte(C.int(i2c.file.Fd()))\n\tif result == -1 {\n\t\treturn 0, err\n\t}\n\tif result >= 1<<7 {\n\t\tresult -= 1 << 8\n\t}\n\treturn int8(result), nil\n}\n\n\/\/ Perform SMBus Write Byte transaction.\nfunc (i2c *I2C) WriteInt8(value int8) error {\n\treturn i2c.WriteUint8(uint8(value))\n}\n\n\/\/ Perform SMBus Read Byte Data transaction.\nfunc (i2c *I2C) ReadUint8Cmd(command uint8) (uint8, error) {\n\tresult, err := C.i2c_smbus_read_byte_data(C.int(i2c.file.Fd()), C.__u8(command))\n\tif result == -1 {\n\t\treturn 0, err\n\t}\n\treturn uint8(result), nil\n}\n\n\/\/ Perform SMBus Write Byte Data transaction.\nfunc (i2c *I2C) WriteUint8Cmd(command uint8, value uint8) error {\n\tresult, err := C.i2c_smbus_write_byte_data(C.int(i2c.file.Fd()), C.__u8(command), C.__u8(value))\n\tif result == -1 {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Perform SMBus Read Byte Data transaction.\nfunc (i2c *I2C) ReadInt8Cmd(command uint8) (int8, error) {\n\tresult, err := C.i2c_smbus_read_byte_data(C.int(i2c.file.Fd()), C.__u8(command))\n\tif result == -1 {\n\t\treturn 0, err\n\t}\n\tif result >= 1<<7 {\n\t\tresult -= 1 << 8\n\t}\n\treturn int8(result), nil\n}\n\n\/\/ Perform SMBus Write Byte Data transaction.\nfunc (i2c *I2C) WriteInt8Cmd(command uint8, value int8) error {\n\treturn i2c.WriteUint8Cmd(command, uint8(value))\n}\n\n\/\/ Perform SMBus Read Word Data transaction.\nfunc (i2c *I2C) ReadUint16Cmd(command uint8) (uint16, error) {\n\tresult, err := C.i2c_smbus_read_word_data(C.int(i2c.file.Fd()), C.__u8(command))\n\tif result == -1 {\n\t\treturn 0, err\n\t}\n\treturn uint16(result), nil\n}\n\n\/\/ Perform SMBus Write Word Data transaction.\nfunc (i2c *I2C) WriteUint16Cmd(command uint8, value uint16) error {\n\tresult, err := C.i2c_smbus_write_word_data(C.int(i2c.file.Fd()), C.__u8(command), C.__u16(value))\n\tif result == -1 {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Perform SMBus Read Word Data transaction.\nfunc (i2c *I2C) ReadInt16Cmd(command uint8) (int16, error) {\n\tresult, err := C.i2c_smbus_read_word_data(C.int(i2c.file.Fd()), C.__u8(command))\n\tif result == -1 {\n\t\treturn 0, err\n\t}\n\tif result >= 1<<15 {\n\t\tresult -= 1 << 16\n\t}\n\treturn int16(result), nil\n}\n\n\/\/ Perform SMBus Write Word Data transaction.\nfunc (i2c *I2C) WriteInt16Cmd(command uint8, value int16) error {\n\treturn i2c.WriteUint16Cmd(command, uint16(value))\n}\n\n\/\/ Perform SMBus Quick transaction.\nfunc (i2c *I2C) WriteQuick() error {\n\tresult, err := C.i2c_smbus_write_quick(C.int(i2c.file.Fd()), C.I2C_SMBUS_WRITE)\n\tif result != 0 {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Perform SMBus Process Call transaction.\n\/\/\n\/\/ Note: although i2c_smbus_process_call returns a value, according to\n\/\/ smbusmodule.c this method does not return a value by default.\n\/\/\n\/\/ Set _compat = False on the SMBus instance to get a return value.\nfunc (i2c *I2C) ProcessCall(command uint8, value uint16) (uint16, error) {\n\tresult, err := C.i2c_smbus_process_call(C.int(i2c.file.Fd()), C.__u8(command), C.__u16(value))\n\tif result == -1 {\n\t\treturn 0, err\n\t}\n\treturn uint16(result), nil\n}\n\n\/\/ Perform SMBus Block Process Call transaction.\nfunc (i2c *I2C) ProcessCallBlock(command uint8, block []byte) ([]byte, error) {\n\tlength := len(block)\n\tif length == 0 || length > C.I2C_SMBUS_BLOCK_MAX {\n\t\treturn nil, fmt.Errorf(\"Length of block is %d, but must be in the range 1 to %d\", length, C.I2C_SMBUS_BLOCK_MAX)\n\t}\n\tdata := make([]byte, length+1, C.I2C_SMBUS_BLOCK_MAX+2)\n\tdata[0] = byte(length)\n\tcopy(data[1:], block)\n\tresult, err := C.i2c_smbus_access(C.int(i2c.file.Fd()), C.I2C_SMBUS_WRITE, C.__u8(command), C.I2C_SMBUS_BLOCK_PROC_CALL, (*C.union_i2c_smbus_data)(unsafe.Pointer(&data[0])))\n\tif result != 0 {\n\t\treturn nil, err\n\t}\n\treturn data[1 : 1+data[0]], nil\n}\n\n\/\/ Perform SMBus Read Block Data transaction.\nfunc (i2c *I2C) ReadBlock(command uint8) ([]byte, error) {\n\tdata := make([]byte, C.I2C_SMBUS_BLOCK_MAX+2)\n\tresult, err := C.i2c_smbus_access(C.int(i2c.file.Fd()), C.I2C_SMBUS_READ, C.__u8(command), C.I2C_SMBUS_BLOCK_DATA, (*C.union_i2c_smbus_data)(unsafe.Pointer(&data[0])))\n\tif result != 0 {\n\t\treturn nil, err\n\t}\n\treturn data[1 : 1+data[0]], nil\n}\n\n\/\/ Perform SMBus Write Block Data transaction.\nfunc (i2c *I2C) WriteBlock(command uint8, block []byte) error {\n\tlength := len(block)\n\tif length == 0 || length > C.I2C_SMBUS_BLOCK_MAX {\n\t\treturn fmt.Errorf(\"Length of block is %d, but must be in the range 1 to %d\", length, C.I2C_SMBUS_BLOCK_MAX)\n\t}\n\tdata := make([]byte, length+1)\n\tdata[0] = byte(length)\n\tcopy(data[1:], block)\n\tresult, err := C.i2c_smbus_access(C.int(i2c.file.Fd()), C.I2C_SMBUS_WRITE, C.__u8(command), C.I2C_SMBUS_BLOCK_DATA, (*C.union_i2c_smbus_data)(unsafe.Pointer(&data[0])))\n\tif result != 0 {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ TODO: Perform I2C Block Read transaction.\n\/\/ With if len == 32 then arg = C.I2C_SMBUS_I2C_BLOCK_BROKEN\n\nfunc (i2c *I2C) Close() error {\n\treturn i2c.file.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package drawing defines Drawing struct for DXF.\npackage drawing\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/yofu\/dxf\/block\"\n\t\"github.com\/yofu\/dxf\/class\"\n\t\"github.com\/yofu\/dxf\/color\"\n\t\"github.com\/yofu\/dxf\/entity\"\n\t\"github.com\/yofu\/dxf\/format\"\n\t\"github.com\/yofu\/dxf\/handle\"\n\t\"github.com\/yofu\/dxf\/header\"\n\t\"github.com\/yofu\/dxf\/object\"\n\t\"github.com\/yofu\/dxf\/table\"\n)\n\n\/\/ Drawing contains DXF drawing data.\ntype Drawing struct {\n\tFileName string\n\tLayers map[string]*table.Layer\n\tGroups map[string]*object.Group\n\tStyles map[string]*table.Style\n\tCurrentLayer *table.Layer\n\tCurrentStyle *table.Style\n\tformatter format.Formatter\n\tSections []Section\n\tdictionary *object.Dictionary\n\tgroupdict *object.Dictionary\n\tPlotStyle handle.Handler\n}\n\n\/\/ New creates a new Drawing.\nfunc New() *Drawing {\n\td := new(Drawing)\n\td.Layers = make(map[string]*table.Layer)\n\td.Layers[\"0\"] = table.LY_0\n\td.Groups = make(map[string]*object.Group)\n\td.CurrentLayer = d.Layers[\"0\"]\n\td.Styles = make(map[string]*table.Style)\n\td.Styles[\"STANDARD\"] = table.ST_STANDARD\n\td.CurrentStyle = d.Styles[\"STANDARD\"]\n\td.formatter = format.NewASCII()\n\td.formatter.SetPrecision(16)\n\td.Sections = []Section{\n\t\theader.New(),\n\t\tclass.New(),\n\t\ttable.New(),\n\t\tblock.New(),\n\t\tentity.New(),\n\t\tobject.New(),\n\t}\n\td.dictionary = object.NewDictionary()\n\td.addObject(d.dictionary)\n\twd, ph := object.NewAcDbDictionaryWDFLT(d.dictionary)\n\td.dictionary.AddItem(\"ACAD_PLOTSTYLENAME\", wd)\n\td.addObject(wd)\n\td.addObject(ph)\n\td.groupdict = object.NewDictionary()\n\td.addObject(d.groupdict)\n\td.dictionary.AddItem(\"ACAD_GROUP\", d.groupdict)\n\td.PlotStyle = ph\n\td.Layers[\"0\"].SetPlotStyle(d.PlotStyle)\n\treturn d\n}\n\nfunc (d *Drawing) saveFile(filename string) error {\n\td.setHandle()\n\td.formatter.Reset()\n\tfor _, s := range d.Sections {\n\t\ts.WriteTo(d.formatter)\n\t}\n\td.formatter.WriteString(0, \"EOF\")\n\tw, err := os.Create(filename)\n\tdefer w.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\td.formatter.WriteTo(w)\n\treturn nil\n}\n\n\/\/ Save saves the drawing file.\n\/\/ If it is the first time, use SaveAs(filename).\nfunc (d *Drawing) Save() error {\n\tif d.FileName == \"\" {\n\t\treturn errors.New(\"filename is blank, use SaveAs(filename)\")\n\t}\n\treturn d.saveFile(d.FileName)\n}\n\n\/\/ SaveAs saves the drawing file as given filename.\nfunc (d *Drawing) SaveAs(filename string) error {\n\td.FileName = filename\n\treturn d.saveFile(filename)\n}\n\n\/\/ setHandle sets all the handles contained in Drawing.\nfunc (d *Drawing) setHandle() {\n\th := 1\n\tfor _, s := range d.Sections[1:] {\n\t\ts.SetHandle(&h)\n\t}\n\td.Sections[0].SetHandle(&h)\n}\n\nfunc (d *Drawing) Header() *header.Header {\n\treturn d.Sections[0].(*header.Header)\n}\n\n\/\/ Layer returns the named layer if exists.\n\/\/ If setcurrent is true, set current layer to it.\nfunc (d *Drawing) Layer(name string, setcurrent bool) (*table.Layer, error) {\n\tif l, exist := d.Layers[name]; exist {\n\t\tif setcurrent {\n\t\t\td.CurrentLayer = l\n\t\t}\n\t\treturn l, nil\n\t}\n\treturn nil, fmt.Errorf(\"layer %s doesn't exist\", name)\n}\n\n\/\/ AddLayer adds a new layer with given name, color and line type.\n\/\/ If setcurrent is true, set current layer to it.\nfunc (d *Drawing) AddLayer(name string, cl color.ColorNumber, lt *table.LineType, setcurrent bool) (*table.Layer, error) {\n\tif l, exist := d.Layers[name]; exist {\n\t\tif setcurrent {\n\t\t\td.CurrentLayer = l\n\t\t}\n\t\treturn l, fmt.Errorf(\"layer %s already exists\", name)\n\t}\n\tl := table.NewLayer(name, cl, lt)\n\tl.SetPlotStyle(d.PlotStyle)\n\td.Layers[name] = l\n\td.Sections[2].(table.Tables).AddLayer(l)\n\tif setcurrent {\n\t\td.CurrentLayer = l\n\t}\n\treturn l, nil\n}\n\n\/\/ ChangeLayer changes current layer to the named layer.\nfunc (d *Drawing) ChangeLayer(name string) error {\n\tif l, exist := d.Layers[name]; exist {\n\t\td.CurrentLayer = l\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"layer %s doesn't exist\", name)\n}\n\n\/\/ Style returns the named text style if exists.\n\/\/ If setcurrent is true, set current style to it.\nfunc (d *Drawing) Style(name string, setcurrent bool) (*table.Style, error) {\n\tif s, exist := d.Styles[name]; exist {\n\t\tif setcurrent {\n\t\t\td.CurrentStyle = s\n\t\t}\n\t\treturn s, nil\n\t}\n\treturn nil, fmt.Errorf(\"style %s doesn't exist\", name)\n}\n\n\/\/ AddStyle adds a new text style.\n\/\/ If setcurrent is true, set current style to it.\nfunc (d *Drawing) AddStyle(name string, fontname, bigfontname string, setcurrent bool) (*table.Style, error) {\n\tif s, exist := d.Styles[name]; exist {\n\t\tif setcurrent {\n\t\t\td.CurrentStyle = s\n\t\t}\n\t\treturn s, fmt.Errorf(\"style %s already exists\", name)\n\t}\n\ts := table.NewStyle(name)\n\ts.FontName = fontname\n\ts.BigFontName = bigfontname\n\td.Styles[name] = s\n\td.Sections[TABLES].(table.Tables)[table.STYLE].Add(s)\n\tif setcurrent {\n\t\td.CurrentStyle = s\n\t}\n\treturn s, nil\n}\n\n\/\/ LineType returns the named line type if exists.\nfunc (d *Drawing) LineType(name string) (*table.LineType, error) {\n\tlt, err := d.Sections[TABLES].(table.Tables)[table.LTYPE].Contains(name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"linetype %s\", err.Error())\n\t}\n\treturn lt.(*table.LineType), nil\n}\n\n\/\/ AddLineType adds a new linetype.\nfunc (d *Drawing) AddLineType(name string, desc string, ls ...float64) (*table.LineType, error) {\n\tlt, _ := d.Sections[TABLES].(table.Tables)[table.LTYPE].Contains(name)\n\tif lt != nil {\n\t\treturn lt.(*table.LineType), fmt.Errorf(\"linetype %s already exists\", name)\n\t}\n\tnewlt := table.NewLineType(name, desc, ls...)\n\td.Sections[TABLES].(table.Tables)[table.LTYPE].Add(newlt)\n\treturn newlt, nil\n}\n\n\/\/ Entities returns slice of all entities contained in Drawing.\nfunc (d *Drawing) Entities() entity.Entities {\n\treturn d.Sections[ENTITIES].(entity.Entities)\n}\n\n\/\/ AddEntity adds a new entity.\nfunc (d *Drawing) AddEntity(e entity.Entity) {\n\td.Sections[4] = d.Sections[4].(entity.Entities).Add(e)\n}\n\n\/\/ Point creates a new POINT at (x, y, z).\nfunc (d *Drawing) Point(x, y, z float64) (*entity.Point, error) {\n\tp := entity.NewPoint()\n\tp.Coord = []float64{x, y, z}\n\tp.SetLayer(d.CurrentLayer)\n\td.AddEntity(p)\n\treturn p, nil\n}\n\n\/\/ Line creates a new LINE from (x1, y1, z1) to (x2, y2, z2).\nfunc (d *Drawing) Line(x1, y1, z1, x2, y2, z2 float64) (*entity.Line, error) {\n\tl := entity.NewLine()\n\tl.Start = []float64{x1, y1, z1}\n\tl.End = []float64{x2, y2, z2}\n\tl.SetLayer(d.CurrentLayer)\n\td.AddEntity(l)\n\treturn l, nil\n}\n\n\/\/ Circle creates a new CIRCLE at (x, y, z) with radius r.\nfunc (d *Drawing) Circle(x, y, z, r float64) (*entity.Circle, error) {\n\tc := entity.NewCircle()\n\tc.Center = []float64{x, y, z}\n\tc.Radius = r\n\tc.SetLayer(d.CurrentLayer)\n\td.AddEntity(c)\n\treturn c, nil\n}\n\n\/\/ Polyline creates a new POLYLINE with given vertices.\nfunc (d *Drawing) Polyline(closed bool, vertices ...[]float64) (*entity.Polyline, error) {\n\tp := entity.NewPolyline()\n\tp.SetLayer(d.CurrentLayer)\n\tfor _, v := range vertices {\n\t\tp.AddVertex(v[0], v[1], v[2])\n\t}\n\tif closed {\n\t\tp.Close()\n\t}\n\td.AddEntity(p)\n\treturn p, nil\n}\n\n\/\/ LwPolyline creates a new LWPOLYLINE with given vertices.\nfunc (d *Drawing) LwPolyline(closed bool, vertices ...[]float64) (*entity.LwPolyline, error) {\n\tsize := len(vertices)\n\tl := entity.NewLwPolyline(size)\n\tfor i := 0; i < size; i++ {\n\t\tl.Vertices[i] = vertices[i]\n\t}\n\tif closed {\n\t\tl.Close()\n\t}\n\tl.SetLayer(d.CurrentLayer)\n\td.AddEntity(l)\n\treturn l, nil\n}\n\n\/\/ ThreeDFace creates a new 3DFACE with given points.\nfunc (d *Drawing) ThreeDFace(points [][]float64) (*entity.ThreeDFace, error) {\n\tf := entity.New3DFace()\n\tif len(points) < 3 {\n\t\treturn nil, errors.New(\"3DFace needs 3 or more points\")\n\t}\n\tfor i := 0; i < 3; i++ {\n\t\tf.Points[i] = points[i]\n\t}\n\tif len(points) >= 4 {\n\t\tf.Points[3] = points[3]\n\t} else {\n\t\tf.Points[3] = points[2]\n\t}\n\tf.SetLayer(d.CurrentLayer)\n\td.AddEntity(f)\n\treturn f, nil\n}\n\n\/\/ Text creates a new TEXT str at (x, y, z) with given height.\nfunc (d *Drawing) Text(str string, x, y, z, height float64) (*entity.Text, error) {\n\tt := entity.NewText()\n\tt.Coord1 = []float64{x, y, z}\n\tt.Height = height\n\tt.Value = str\n\tt.SetLayer(d.CurrentLayer)\n\tt.Style = d.CurrentStyle\n\tt.WidthFactor = t.Style.WidthFactor\n\tt.ObliqueAngle = t.Style.ObliqueAngle\n\tt.Style.LastHeightUsed = height\n\td.AddEntity(t)\n\treturn t, nil\n}\n\nfunc (d *Drawing) addObject(o object.Object) {\n\td.Sections[5] = d.Sections[5].(object.Objects).Add(o)\n}\n\n\/\/ Group adds given entities to the named group.\n\/\/ If the named group doesn't exist, create it.\nfunc (d *Drawing) Group(name, desc string, es ...entity.Entity) (*object.Group, error) {\n\tif g, exist := d.Groups[name]; exist {\n\t\tg.AddEntity(es...)\n\t\treturn g, fmt.Errorf(\"group %s already exists\", name)\n\t}\n\tg := object.NewGroup(name, desc, es...)\n\td.Groups[name] = g\n\tg.SetOwner(d.groupdict)\n\td.addObject(g)\n\treturn g, nil\n}\n\n\/\/ AddToGroup adds given entities to the named group.\n\/\/ If the named group doesn't exist, returns error.\nfunc (d *Drawing) AddToGroup(name string, es ...entity.Entity) error {\n\tif g, exist := d.Groups[name]; exist {\n\t\tg.AddEntity(es...)\n\t}\n\treturn fmt.Errorf(\"group %s doesn't exist\", name)\n}\n\nfunc (d *Drawing) SetExt() {\n\tmins := []float64{1e16, 1e16, 1e16}\n\tmaxs := []float64{-1e16, -1e16, -1e16}\n\tfor _, en := range d.Entities() {\n\t\ttmpmins, tmpmaxs := en.BBox()\n\t\tfor i := 0; i < 3; i++ {\n\t\t\tif tmpmins[i] < mins[i] {\n\t\t\t\tmins[i] = tmpmins[i]\n\t\t\t}\n\t\t\tif tmpmaxs[i] > maxs[i] {\n\t\t\t\tmaxs[i] = tmpmaxs[i]\n\t\t\t}\n\t\t}\n\t}\n\th := d.Header()\n\tfor i := 0; i < 3; i++ {\n\t\th.ExtMin[i] = mins[i]\n\t\th.ExtMax[i] = maxs[i]\n\t}\n}\n<commit_msg>Added WriteTo method<commit_after>\/\/ Package drawing defines Drawing struct for DXF.\npackage drawing\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t\"github.com\/yofu\/dxf\/block\"\n\t\"github.com\/yofu\/dxf\/class\"\n\t\"github.com\/yofu\/dxf\/color\"\n\t\"github.com\/yofu\/dxf\/entity\"\n\t\"github.com\/yofu\/dxf\/format\"\n\t\"github.com\/yofu\/dxf\/handle\"\n\t\"github.com\/yofu\/dxf\/header\"\n\t\"github.com\/yofu\/dxf\/object\"\n\t\"github.com\/yofu\/dxf\/table\"\n)\n\n\/\/ Drawing contains DXF drawing data.\ntype Drawing struct {\n\tFileName string\n\tLayers map[string]*table.Layer\n\tGroups map[string]*object.Group\n\tStyles map[string]*table.Style\n\tCurrentLayer *table.Layer\n\tCurrentStyle *table.Style\n\tformatter format.Formatter\n\tSections []Section\n\tdictionary *object.Dictionary\n\tgroupdict *object.Dictionary\n\tPlotStyle handle.Handler\n\t\/\/ savebuff is used internally for the io.Reader options.\n\tsavebuff *bytes.Buffer\n}\n\n\/\/ New creates a new Drawing.\nfunc New() *Drawing {\n\td := new(Drawing)\n\td.Layers = make(map[string]*table.Layer)\n\td.Layers[\"0\"] = table.LY_0\n\td.Groups = make(map[string]*object.Group)\n\td.CurrentLayer = d.Layers[\"0\"]\n\td.Styles = make(map[string]*table.Style)\n\td.Styles[\"STANDARD\"] = table.ST_STANDARD\n\td.CurrentStyle = d.Styles[\"STANDARD\"]\n\td.formatter = format.NewASCII()\n\td.formatter.SetPrecision(16)\n\td.Sections = []Section{\n\t\theader.New(),\n\t\tclass.New(),\n\t\ttable.New(),\n\t\tblock.New(),\n\t\tentity.New(),\n\t\tobject.New(),\n\t}\n\td.dictionary = object.NewDictionary()\n\td.addObject(d.dictionary)\n\twd, ph := object.NewAcDbDictionaryWDFLT(d.dictionary)\n\td.dictionary.AddItem(\"ACAD_PLOTSTYLENAME\", wd)\n\td.addObject(wd)\n\td.addObject(ph)\n\td.groupdict = object.NewDictionary()\n\td.addObject(d.groupdict)\n\td.dictionary.AddItem(\"ACAD_GROUP\", d.groupdict)\n\td.PlotStyle = ph\n\td.Layers[\"0\"].SetPlotStyle(d.PlotStyle)\n\treturn d\n}\n\nfunc (d *Drawing) saveFile(filename string) error {\n\tw, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer w.Close()\n\t_, err = d.WriteTo(w)\n\treturn err\n}\n\n\/\/ Save saves the drawing file.\n\/\/ If it is the first time, use SaveAs(filename).\nfunc (d *Drawing) Save() error {\n\tif d.FileName == \"\" {\n\t\treturn errors.New(\"filename is blank, use SaveAs(filename)\")\n\t}\n\treturn d.saveFile(d.FileName)\n}\n\n\/\/ SaveAs saves the drawing file as given filename.\nfunc (d *Drawing) SaveAs(filename string) error {\n\td.FileName = filename\n\treturn d.saveFile(filename)\n}\n\n\/\/ setHandle sets all the handles contained in Drawing.\nfunc (d *Drawing) setHandle() {\n\th := 1\n\tfor _, s := range d.Sections[1:] {\n\t\ts.SetHandle(&h)\n\t}\n\td.Sections[0].SetHandle(&h)\n}\n\nfunc (d *Drawing) Header() *header.Header {\n\treturn d.Sections[0].(*header.Header)\n}\n\n\/\/ Layer returns the named layer if exists.\n\/\/ If setcurrent is true, set current layer to it.\nfunc (d *Drawing) Layer(name string, setcurrent bool) (*table.Layer, error) {\n\tif l, exist := d.Layers[name]; exist {\n\t\tif setcurrent {\n\t\t\td.CurrentLayer = l\n\t\t}\n\t\treturn l, nil\n\t}\n\treturn nil, fmt.Errorf(\"layer %s doesn't exist\", name)\n}\n\n\/\/ AddLayer adds a new layer with given name, color and line type.\n\/\/ If setcurrent is true, set current layer to it.\nfunc (d *Drawing) AddLayer(name string, cl color.ColorNumber, lt *table.LineType, setcurrent bool) (*table.Layer, error) {\n\tif l, exist := d.Layers[name]; exist {\n\t\tif setcurrent {\n\t\t\td.CurrentLayer = l\n\t\t}\n\t\treturn l, fmt.Errorf(\"layer %s already exists\", name)\n\t}\n\tl := table.NewLayer(name, cl, lt)\n\tl.SetPlotStyle(d.PlotStyle)\n\td.Layers[name] = l\n\td.Sections[2].(table.Tables).AddLayer(l)\n\tif setcurrent {\n\t\td.CurrentLayer = l\n\t}\n\treturn l, nil\n}\n\n\/\/ ChangeLayer changes current layer to the named layer.\nfunc (d *Drawing) ChangeLayer(name string) error {\n\tif l, exist := d.Layers[name]; exist {\n\t\td.CurrentLayer = l\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"layer %s doesn't exist\", name)\n}\n\n\/\/ Style returns the named text style if exists.\n\/\/ If setcurrent is true, set current style to it.\nfunc (d *Drawing) Style(name string, setcurrent bool) (*table.Style, error) {\n\tif s, exist := d.Styles[name]; exist {\n\t\tif setcurrent {\n\t\t\td.CurrentStyle = s\n\t\t}\n\t\treturn s, nil\n\t}\n\treturn nil, fmt.Errorf(\"style %s doesn't exist\", name)\n}\n\n\/\/ AddStyle adds a new text style.\n\/\/ If setcurrent is true, set current style to it.\nfunc (d *Drawing) AddStyle(name string, fontname, bigfontname string, setcurrent bool) (*table.Style, error) {\n\tif s, exist := d.Styles[name]; exist {\n\t\tif setcurrent {\n\t\t\td.CurrentStyle = s\n\t\t}\n\t\treturn s, fmt.Errorf(\"style %s already exists\", name)\n\t}\n\ts := table.NewStyle(name)\n\ts.FontName = fontname\n\ts.BigFontName = bigfontname\n\td.Styles[name] = s\n\td.Sections[TABLES].(table.Tables)[table.STYLE].Add(s)\n\tif setcurrent {\n\t\td.CurrentStyle = s\n\t}\n\treturn s, nil\n}\n\n\/\/ LineType returns the named line type if exists.\nfunc (d *Drawing) LineType(name string) (*table.LineType, error) {\n\tlt, err := d.Sections[TABLES].(table.Tables)[table.LTYPE].Contains(name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"linetype %s\", err.Error())\n\t}\n\treturn lt.(*table.LineType), nil\n}\n\n\/\/ AddLineType adds a new linetype.\nfunc (d *Drawing) AddLineType(name string, desc string, ls ...float64) (*table.LineType, error) {\n\tlt, _ := d.Sections[TABLES].(table.Tables)[table.LTYPE].Contains(name)\n\tif lt != nil {\n\t\treturn lt.(*table.LineType), fmt.Errorf(\"linetype %s already exists\", name)\n\t}\n\tnewlt := table.NewLineType(name, desc, ls...)\n\td.Sections[TABLES].(table.Tables)[table.LTYPE].Add(newlt)\n\treturn newlt, nil\n}\n\n\/\/ Entities returns slice of all entities contained in Drawing.\nfunc (d *Drawing) Entities() entity.Entities {\n\treturn d.Sections[ENTITIES].(entity.Entities)\n}\n\n\/\/ AddEntity adds a new entity.\nfunc (d *Drawing) AddEntity(e entity.Entity) {\n\td.Sections[4] = d.Sections[4].(entity.Entities).Add(e)\n}\n\n\/\/ Point creates a new POINT at (x, y, z).\nfunc (d *Drawing) Point(x, y, z float64) (*entity.Point, error) {\n\tp := entity.NewPoint()\n\tp.Coord = []float64{x, y, z}\n\tp.SetLayer(d.CurrentLayer)\n\td.AddEntity(p)\n\treturn p, nil\n}\n\n\/\/ Line creates a new LINE from (x1, y1, z1) to (x2, y2, z2).\nfunc (d *Drawing) Line(x1, y1, z1, x2, y2, z2 float64) (*entity.Line, error) {\n\tl := entity.NewLine()\n\tl.Start = []float64{x1, y1, z1}\n\tl.End = []float64{x2, y2, z2}\n\tl.SetLayer(d.CurrentLayer)\n\td.AddEntity(l)\n\treturn l, nil\n}\n\n\/\/ Circle creates a new CIRCLE at (x, y, z) with radius r.\nfunc (d *Drawing) Circle(x, y, z, r float64) (*entity.Circle, error) {\n\tc := entity.NewCircle()\n\tc.Center = []float64{x, y, z}\n\tc.Radius = r\n\tc.SetLayer(d.CurrentLayer)\n\td.AddEntity(c)\n\treturn c, nil\n}\n\n\/\/ Polyline creates a new POLYLINE with given vertices.\nfunc (d *Drawing) Polyline(closed bool, vertices ...[]float64) (*entity.Polyline, error) {\n\tp := entity.NewPolyline()\n\tp.SetLayer(d.CurrentLayer)\n\tfor _, v := range vertices {\n\t\tp.AddVertex(v[0], v[1], v[2])\n\t}\n\tif closed {\n\t\tp.Close()\n\t}\n\td.AddEntity(p)\n\treturn p, nil\n}\n\n\/\/ LwPolyline creates a new LWPOLYLINE with given vertices.\nfunc (d *Drawing) LwPolyline(closed bool, vertices ...[]float64) (*entity.LwPolyline, error) {\n\tsize := len(vertices)\n\tl := entity.NewLwPolyline(size)\n\tfor i := 0; i < size; i++ {\n\t\tl.Vertices[i] = vertices[i]\n\t}\n\tif closed {\n\t\tl.Close()\n\t}\n\tl.SetLayer(d.CurrentLayer)\n\td.AddEntity(l)\n\treturn l, nil\n}\n\n\/\/ ThreeDFace creates a new 3DFACE with given points.\nfunc (d *Drawing) ThreeDFace(points [][]float64) (*entity.ThreeDFace, error) {\n\tf := entity.New3DFace()\n\tif len(points) < 3 {\n\t\treturn nil, errors.New(\"3DFace needs 3 or more points\")\n\t}\n\tfor i := 0; i < 3; i++ {\n\t\tf.Points[i] = points[i]\n\t}\n\tif len(points) >= 4 {\n\t\tf.Points[3] = points[3]\n\t} else {\n\t\tf.Points[3] = points[2]\n\t}\n\tf.SetLayer(d.CurrentLayer)\n\td.AddEntity(f)\n\treturn f, nil\n}\n\n\/\/ Text creates a new TEXT str at (x, y, z) with given height.\nfunc (d *Drawing) Text(str string, x, y, z, height float64) (*entity.Text, error) {\n\tt := entity.NewText()\n\tt.Coord1 = []float64{x, y, z}\n\tt.Height = height\n\tt.Value = str\n\tt.SetLayer(d.CurrentLayer)\n\tt.Style = d.CurrentStyle\n\tt.WidthFactor = t.Style.WidthFactor\n\tt.ObliqueAngle = t.Style.ObliqueAngle\n\tt.Style.LastHeightUsed = height\n\td.AddEntity(t)\n\treturn t, nil\n}\n\nfunc (d *Drawing) addObject(o object.Object) {\n\td.Sections[5] = d.Sections[5].(object.Objects).Add(o)\n}\n\n\/\/ Group adds given entities to the named group.\n\/\/ If the named group doesn't exist, create it.\nfunc (d *Drawing) Group(name, desc string, es ...entity.Entity) (*object.Group, error) {\n\tif g, exist := d.Groups[name]; exist {\n\t\tg.AddEntity(es...)\n\t\treturn g, fmt.Errorf(\"group %s already exists\", name)\n\t}\n\tg := object.NewGroup(name, desc, es...)\n\td.Groups[name] = g\n\tg.SetOwner(d.groupdict)\n\td.addObject(g)\n\treturn g, nil\n}\n\n\/\/ AddToGroup adds given entities to the named group.\n\/\/ If the named group doesn't exist, returns error.\nfunc (d *Drawing) AddToGroup(name string, es ...entity.Entity) error {\n\tif g, exist := d.Groups[name]; exist {\n\t\tg.AddEntity(es...)\n\t}\n\treturn fmt.Errorf(\"group %s doesn't exist\", name)\n}\n\n\/\/ WriteTo write the dxf file data to the given writer until\n\/\/ there is no more data to write or if an error occurs. The return\n\/\/ value n is the number of bytes writer.\n\/\/ This method full fills the io.WriterTo interface.\nfunc (d *Drawing) WriteTo(w io.Writer) (n int64, err error) {\n\tif d == nil {\n\t\treturn 0, nil\n\t}\n\td.setHandle()\n\td.formatter.Reset()\n\tfor _, s := range d.Sections {\n\t\ts.WriteTo(d.formatter)\n\t}\n\td.formatter.WriteString(0, \"EOF\")\n\treturn d.formatter.WriteTo(w)\n}\n\nvar _ io.WriterTo = &Drawing{}\n\n\/\/ Read implements the standard Read interface: it reads data from a buffer\n\/\/ containing the drawing, the buffer in initialized on the first read or\n\/\/ a read call after a close. If the drawing is nil, read returns 0 for n,\n\/\/ nil for the error.\nfunc (d *Drawing) Read(p []byte) (n int, err error) {\n\tif d == nil {\n\t\treturn 0, nil\n\t}\n\tif d.savebuff == nil {\n\t\t\/\/ We need to initilize our buffer, and write the contents of the\n\t\t\/\/ drawing into it.\n\t\td.savebuff = new(bytes.Buffer)\n\t\t_, err := d.WriteTo(d.savebuff)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn d.savebuff.Read(p)\n}\n\n\/\/ Close implements the standard Close interface: it close the buffer used by\n\/\/ the read command if one was initilized, and frees the memory held by it. Close\n\/\/ will always return nil for the error.\nfunc (d *Drawing) Close() error {\n\tif d != nil && d.savebuff != nil {\n\t\td.savebuff = nil\n\t}\n\treturn nil\n}\n\nvar _ io.ReadCloser = &Drawing{}\n\n\/\/ SetExt sets the extents of the drawing based on the entities\n\/\/ in the drawing. If the drawing is nil, this function will panic.\nfunc (d *Drawing) SetExt() {\n\tmins := []float64{1e16, 1e16, 1e16}\n\tmaxs := []float64{-1e16, -1e16, -1e16}\n\tfor _, en := range d.Entities() {\n\t\ttmpmins, tmpmaxs := en.BBox()\n\t\tfor i := 0; i < 3; i++ {\n\t\t\tif tmpmins[i] < mins[i] {\n\t\t\t\tmins[i] = tmpmins[i]\n\t\t\t}\n\t\t\tif tmpmaxs[i] > maxs[i] {\n\t\t\t\tmaxs[i] = tmpmaxs[i]\n\t\t\t}\n\t\t}\n\t}\n\th := d.Header()\n\tfor i := 0; i < 3; i++ {\n\t\th.ExtMin[i] = mins[i]\n\t\th.ExtMax[i] = maxs[i]\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package ultimate\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/hellodudu\/comment\/config\"\n)\n\ntype TcpServer struct {\n}\n\nfunc NewTcpServer() (*TcpServer, error) {\n\treturn &TcpServer{}, nil\n}\n\nfunc (server *TcpServer) Run() {\n\taddr := config.TcpListenAddr\n\tln, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tdefer ln.Close()\n\n\tlog.Println(\"tcp listening at \", addr)\n\n\tfor {\n\t\tcon, err := ln.Accept()\n\t\tif nerr, ok := err.(net.Error); ok && nerr.Temporary() {\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tlog.Println(color.CyanString(\"new tcp connection!\"))\n\t\tgo handleTcpConnection(con)\n\t\t\/\/ go handleTcpReadCon(con)\n\t}\n}\n\nfunc handleTcpReadCon(con net.Conn) {\n\t\/\/ byLen := [4]byte\n\t\/\/ if _, err := io.ReadFull(con, byLen); err != nil {\n\t\/\/ \tlog.Println(color.RedString(\"tcp read error:\", err.Error()))\n\t\/\/ }\n}\n\nfunc handleTcpConnection(con net.Conn) {\n\tdefer con.Close()\n\n\tcon.(*net.TCPConn).SetKeepAlive(true)\n\tcon.(*net.TCPConn).SetKeepAlivePeriod(30 * time.Second)\n\t\/\/ con.(*net.TCPConn).SetWriteDeadline(time.Now().Add(5 * time.Second))\n\tscanner := bufio.NewScanner(con)\n\n\t\/\/ first 4 bytes represent tcp package size, split it\n\tscanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {\n\t\tif atEOF {\n\t\t\treturn advance, token, io.EOF\n\t\t}\n\n\t\tif len(data) > 4 {\n\t\t\tlength := uint32(0)\n\t\t\tbinary.Read(bytes.NewReader(data[:4]), binary.LittleEndian, &length)\n\t\t\tif int(length)+4 <= len(data) {\n\t\t\t\treturn int(length) + 4, data[:int(length)+4], nil\n\t\t\t}\n\t\t}\n\t\treturn\n\t})\n\n\tfor {\n\t\tok := scanner.Scan()\n\t\tif ok {\n\t\t\tbyMsg := scanner.Bytes()\n\t\t\tGetUltimateAPI().AddTask(func() {\n\t\t\t\tGetUltimateAPI().GetWorldSession().HandleMessage(con, byMsg)\n\t\t\t})\n\t\t} else if err := scanner.Err(); err != nil {\n\t\t\tlog.Println(color.YellowString(\"scan error:%s\", err.Error()))\n\t\t\t\/\/ end of connection\n\t\t\tGetUltimateAPI().GetWorldSession().DisconnectWorld(con)\n\t\t\tbreak\n\t\t} else {\n\t\t\tlog.Println(color.CyanString(\"connection shut down!\"))\n\t\t\tbreak\n\t\t}\n\t}\n}\n<commit_msg>fix bug:client socket shut down cause tcp connection handle loop continue<commit_after>package ultimate\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/hellodudu\/comment\/config\"\n)\n\ntype TcpServer struct {\n}\n\nfunc NewTcpServer() (*TcpServer, error) {\n\treturn &TcpServer{}, nil\n}\n\nfunc (server *TcpServer) Run() {\n\taddr := config.TcpListenAddr\n\tln, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tdefer ln.Close()\n\n\tlog.Println(\"tcp listening at \", addr)\n\n\tfor {\n\t\tcon, err := ln.Accept()\n\t\tif nerr, ok := err.(net.Error); ok && nerr.Temporary() {\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tlog.Println(color.CyanString(\"a new tcp connection!\"))\n\t\tgo handleTcpConnection(con)\n\t}\n}\n\nfunc handleTcpConnection(con net.Conn) {\n\tdefer con.Close()\n\n\tcon.(*net.TCPConn).SetKeepAlive(true)\n\tcon.(*net.TCPConn).SetKeepAlivePeriod(30 * time.Second)\n\tcon.(*net.TCPConn).SetWriteDeadline(time.Now().Add(5 * time.Second))\n\tscanner := bufio.NewScanner(con)\n\n\t\/\/ first 4 bytes represent tcp package size, split it\n\tscanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {\n\t\tif atEOF {\n\t\t\treturn advance, token, io.EOF\n\t\t}\n\n\t\tif len(data) > 4 {\n\t\t\tlength := uint32(0)\n\t\t\tbinary.Read(bytes.NewReader(data[:4]), binary.LittleEndian, &length)\n\t\t\tif int(length)+4 <= len(data) {\n\t\t\t\treturn int(length) + 4, data[:int(length)+4], nil\n\t\t\t}\n\t\t}\n\t\treturn\n\t})\n\n\tfor {\n\t\tok := scanner.Scan()\n\t\tif ok {\n\t\t\tbyMsg := scanner.Bytes()\n\t\t\tGetUltimateAPI().AddTask(func() {\n\t\t\t\tGetUltimateAPI().GetWorldSession().HandleMessage(con, byMsg)\n\t\t\t})\n\t\t} else if err := scanner.Err(); err != nil {\n\t\t\tlog.Println(color.YellowString(\"scan error:%s\", err.Error()))\n\t\t\t\/\/ end of connection\n\t\t\tGetUltimateAPI().GetWorldSession().DisconnectWorld(con)\n\t\t\tbreak\n\t\t} else {\n\t\t\tlog.Println(color.CyanString(\"one client connection shut down!\"))\n\t\t\tbreak\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package tiff\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"text\/tabwriter\"\n)\n\n\/\/ IFD represents the data structure of an IFD in a TIFF File.\ntype IFD interface {\n\tNumEntries() uint16\n\tFields() []Field\n\tNextOffset() uint32\n\tHasField(id uint16) bool\n\tGetField(id uint16) Field\n}\n\ntype imageFileDirectory struct {\n\tnumEntries uint16\n\tfields []Field\n\tnextOffset uint32\n\tfieldMap map[uint16]Field\n}\n\nfunc (ifd *imageFileDirectory) NumEntries() uint16 {\n\treturn ifd.numEntries\n}\n\nfunc (ifd *imageFileDirectory) Fields() []Field {\n\treturn ifd.fields\n}\n\nfunc (ifd *imageFileDirectory) NextOffset() uint32 {\n\treturn ifd.nextOffset\n}\n\nfunc (ifd *imageFileDirectory) HasField(id uint16) bool {\n\t_, ok := ifd.fieldMap[id]\n\treturn ok\n}\n\nfunc (ifd *imageFileDirectory) GetField(id uint16) Field {\n\treturn ifd.fieldMap[id]\n}\n\nfunc (ifd *imageFileDirectory) String() string {\n\tfmtStr := `\nNumEntries: %d\nNextOffset: %d\nFields (%d):\n%s\n`\n\tw := new(tabwriter.Writer)\n\tvar buf bytes.Buffer\n\n\tw.Init(&buf, 5, 0, 1, ' ', 0)\n\tfor _, f := range ifd.Fields() {\n\t\tfmt.Fprintf(w, \" %v\\n\", f)\n\t}\n\tw.Flush()\n\n\treturn fmt.Sprintf(fmtStr, ifd.numEntries, ifd.nextOffset, len(ifd.fields), buf.String())\n}\n\nfunc ParseIFD(br BReader, offset uint32, tsp TagSpace, ftsp FieldTypeSpace) (out IFD, err error) {\n\tif br == nil {\n\t\treturn nil, errors.New(\"tiff: no BReader supplied\")\n\t}\n\tif ftsp == nil {\n\t\tftsp = DefaultFieldTypeSpace\n\t}\n\tif tsp == nil {\n\t\ttsp = DefaultTagSpace\n\t}\n\tifd := &imageFileDirectory{\n\t\tfieldMap: make(map[uint16]Field, 1),\n\t}\n\tbr.Seek(int64(offset), 0)\n\tif err = br.BRead(&ifd.numEntries); err != nil {\n\t\terr = fmt.Errorf(\"tiff: unable to read the number of entries for the IFD at offset %#08x: %v\", offset, err)\n\t\treturn\n\t}\n\tfor i := uint16(0); i < ifd.numEntries; i++ {\n\t\tvar f Field\n\t\tif f, err = ParseField(br, tsp, ftsp); err != nil {\n\t\t\treturn\n\t\t}\n\t\tifd.fields = append(ifd.fields, f)\n\t\tifd.fieldMap[f.Tag().ID()] = f\n\t}\n\tif err = br.BRead(&ifd.nextOffset); err != nil {\n\t\terr = fmt.Errorf(\"tiff: unable to read the offset for the next ifd: %v\", err)\n\t\treturn\n\t}\n\treturn ifd, nil\n}\n<commit_msg>Add index value to the printing of fields in an ifd<commit_after>package tiff\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"text\/tabwriter\"\n)\n\n\/\/ IFD represents the data structure of an IFD in a TIFF File.\ntype IFD interface {\n\tNumEntries() uint16\n\tFields() []Field\n\tNextOffset() uint32\n\tHasField(id uint16) bool\n\tGetField(id uint16) Field\n}\n\ntype imageFileDirectory struct {\n\tnumEntries uint16\n\tfields []Field\n\tnextOffset uint32\n\tfieldMap map[uint16]Field\n}\n\nfunc (ifd *imageFileDirectory) NumEntries() uint16 {\n\treturn ifd.numEntries\n}\n\nfunc (ifd *imageFileDirectory) Fields() []Field {\n\treturn ifd.fields\n}\n\nfunc (ifd *imageFileDirectory) NextOffset() uint32 {\n\treturn ifd.nextOffset\n}\n\nfunc (ifd *imageFileDirectory) HasField(id uint16) bool {\n\t_, ok := ifd.fieldMap[id]\n\treturn ok\n}\n\nfunc (ifd *imageFileDirectory) GetField(id uint16) Field {\n\treturn ifd.fieldMap[id]\n}\n\nfunc (ifd *imageFileDirectory) String() string {\n\tfmtStr := `\nNumEntries: %d\nNextOffset: %d\nFields (%d):\n%s\n`\n\tw := new(tabwriter.Writer)\n\tvar buf bytes.Buffer\n\n\tw.Init(&buf, 5, 0, 1, ' ', 0)\n\tfor i, f := range ifd.Fields() {\n\t\tfmt.Fprintf(w, \" %2d: %v\\n\", i, f)\n\t}\n\tw.Flush()\n\n\treturn fmt.Sprintf(fmtStr, ifd.numEntries, ifd.nextOffset, len(ifd.fields), buf.String())\n}\n\nfunc ParseIFD(br BReader, offset uint32, tsp TagSpace, ftsp FieldTypeSpace) (out IFD, err error) {\n\tif br == nil {\n\t\treturn nil, errors.New(\"tiff: no BReader supplied\")\n\t}\n\tif ftsp == nil {\n\t\tftsp = DefaultFieldTypeSpace\n\t}\n\tif tsp == nil {\n\t\ttsp = DefaultTagSpace\n\t}\n\tifd := &imageFileDirectory{\n\t\tfieldMap: make(map[uint16]Field, 1),\n\t}\n\tbr.Seek(int64(offset), 0)\n\tif err = br.BRead(&ifd.numEntries); err != nil {\n\t\terr = fmt.Errorf(\"tiff: unable to read the number of entries for the IFD at offset %#08x: %v\", offset, err)\n\t\treturn\n\t}\n\tfor i := uint16(0); i < ifd.numEntries; i++ {\n\t\tvar f Field\n\t\tif f, err = ParseField(br, tsp, ftsp); err != nil {\n\t\t\treturn\n\t\t}\n\t\tifd.fields = append(ifd.fields, f)\n\t\tifd.fieldMap[f.Tag().ID()] = f\n\t}\n\tif err = br.BRead(&ifd.nextOffset); err != nil {\n\t\terr = fmt.Errorf(\"tiff: unable to read the offset for the next ifd: %v\", err)\n\t\treturn\n\t}\n\treturn ifd, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/cvmfs\/docker-graphdriver\/daemon\/lib\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(imageInDatabaseCmd)\n}\n\nvar imageInDatabaseCmd = &cobra.Command{\n\tUse: \"image-in-database\",\n\tShort: \"Check that the provide image is already in the database.\",\n\tArgs: cobra.MinimumNArgs(1),\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\timg, err := lib.ParseImage(args[0])\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\t_, err = lib.GetImageId(img)\n\t\tif err != nil {\n\t\t\tif err == sql.ErrNoRows {\n\t\t\t\tfmt.Println(0)\n\n\t\t\t} else {\n\t\t\t\tlib.LogE(err).Fatal(\"Error in executing SQL query\")\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(1)\n\t\t}\n\t\tos.Exit(0)\n\t},\n}\n<commit_msg>remove image-in-db command<commit_after><|endoftext|>"} {"text":"<commit_before>package auth\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/micro\/cli\/v2\"\n\t\"github.com\/micro\/go-micro\/v2\"\n\t\"github.com\/micro\/go-micro\/v2\/auth\"\n\tsrvAuth \"github.com\/micro\/go-micro\/v2\/auth\/service\"\n\tpb \"github.com\/micro\/go-micro\/v2\/auth\/service\/proto\"\n\t\"github.com\/micro\/go-micro\/v2\/auth\/token\"\n\t\"github.com\/micro\/go-micro\/v2\/auth\/token\/jwt\"\n\t\"github.com\/micro\/go-micro\/v2\/config\/cmd\"\n\tlog \"github.com\/micro\/go-micro\/v2\/logger\"\n\tcliutil \"github.com\/micro\/micro\/v2\/client\/cli\/util\"\n\t\"github.com\/micro\/micro\/v2\/internal\/client\"\n\t\"github.com\/micro\/micro\/v2\/internal\/config\"\n\t\"github.com\/micro\/micro\/v2\/service\/auth\/api\"\n\taccountsHandler \"github.com\/micro\/micro\/v2\/service\/auth\/handler\/accounts\"\n\tauthHandler \"github.com\/micro\/micro\/v2\/service\/auth\/handler\/auth\"\n\trulesHandler \"github.com\/micro\/micro\/v2\/service\/auth\/handler\/rules\"\n)\n\nvar (\n\t\/\/ Name of the service\n\tName = \"go.micro.auth\"\n\t\/\/ Address of the service\n\tAddress = \":8010\"\n\t\/\/ ServiceFlags are provided to commands which run micro services\n\tServiceFlags = []cli.Flag{\n\t\t&cli.StringFlag{\n\t\t\tName: \"address\",\n\t\t\tUsage: \"Set the auth http address e.g 0.0.0.0:8010\",\n\t\t\tEnvVars: []string{\"MICRO_SERVER_ADDRESS\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"auth_provider\",\n\t\t\tEnvVars: []string{\"MICRO_AUTH_PROVIDER\"},\n\t\t\tUsage: \"Auth provider enables account generation\",\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"auth_public_key\",\n\t\t\tEnvVars: []string{\"MICRO_AUTH_PUBLIC_KEY\"},\n\t\t\tUsage: \"Public key for JWT auth (base64 encoded PEM)\",\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"auth_private_key\",\n\t\t\tEnvVars: []string{\"MICRO_AUTH_PRIVATE_KEY\"},\n\t\t\tUsage: \"Private key for JWT auth (base64 encoded PEM)\",\n\t\t},\n\t}\n\t\/\/ RuleFlags are provided to commands which create or delete rules\n\tRuleFlags = []cli.Flag{\n\t\t&cli.StringFlag{\n\t\t\tName: \"role\",\n\t\t\tUsage: \"The role to amend, e.g. 'user' or '*' to represent all\",\n\t\t\tRequired: true,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"resource\",\n\t\t\tUsage: \"The resource to amend in the format namespace:type:name:endpoint, e.g. micro:service:go.micro.auth:*\",\n\t\t\tRequired: true,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"access\",\n\t\t\tUsage: \"The access level, must be granted or denied\",\n\t\t\tValue: \"granted\",\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName: \"priority\",\n\t\t\tUsage: \"The priority level, default is 0, the greater the number the higher the priority\",\n\t\t\tValue: 0,\n\t\t},\n\t}\n\t\/\/ AccountFlags are provided to the create account command\n\tAccountFlags = []cli.Flag{\n\t\t&cli.StringFlag{\n\t\t\tName: \"id\",\n\t\t\tUsage: \"The account id\",\n\t\t\tRequired: true,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"secret\",\n\t\t\tUsage: \"The account secret (password)\",\n\t\t\tRequired: true,\n\t\t},\n\t\t&cli.StringSliceFlag{\n\t\t\tName: \"roles\",\n\t\t\tUsage: \"Comma seperated list of roles to give the account\",\n\t\t},\n\t}\n)\n\n\/\/ run the auth service\nfunc Run(ctx *cli.Context, srvOpts ...micro.Option) {\n\tlog.Init(log.WithFields(map[string]interface{}{\"service\": \"auth\"}))\n\n\t\/\/ Init plugins\n\tfor _, p := range Plugins() {\n\t\tp.Init(ctx)\n\t}\n\n\tif len(ctx.String(\"address\")) > 0 {\n\t\tAddress = ctx.String(\"address\")\n\t}\n\tif len(Address) > 0 {\n\t\tsrvOpts = append(srvOpts, micro.Address(Address))\n\t}\n\n\t\/\/ Init plugins\n\tfor _, p := range Plugins() {\n\t\tp.Init(ctx)\n\t}\n\n\t\/\/ setup the handlers\n\tauthH := &authHandler.Auth{}\n\truleH := &rulesHandler.Rules{}\n\taccountH := &accountsHandler.Accounts{}\n\n\t\/\/ setup the auth handler to use JWTs\n\tpubKey := ctx.String(\"auth_public_key\")\n\tprivKey := ctx.String(\"auth_private_key\")\n\tif len(pubKey) > 0 || len(privKey) > 0 {\n\t\tauthH.TokenProvider = jwt.NewTokenProvider(\n\t\t\ttoken.WithPublicKey(pubKey),\n\t\t\ttoken.WithPrivateKey(privKey),\n\t\t)\n\t}\n\n\tst := *cmd.DefaultCmd.Options().Store\n\n\t\/\/ set the handlers store\n\tauthH.Init(auth.Store(st))\n\truleH.Init(auth.Store(st))\n\taccountH.Init(auth.Store(st))\n\n\t\/\/ setup service\n\tsrvOpts = append(srvOpts, micro.Name(Name))\n\tservice := micro.NewService(srvOpts...)\n\n\t\/\/ register handlers\n\tpb.RegisterAuthHandler(service.Server(), authH)\n\tpb.RegisterRulesHandler(service.Server(), ruleH)\n\tpb.RegisterAccountsHandler(service.Server(), accountH)\n\n\t\/\/ run service\n\tif err := service.Run(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc authFromContext(ctx *cli.Context) auth.Auth {\n\tif cliutil.IsLocal() {\n\t\treturn *cmd.DefaultCmd.Options().Auth\n\t}\n\treturn srvAuth.NewAuth(\n\t\tauth.WithClient(client.New(ctx)),\n\t)\n}\n\n\/\/ login using a token\nfunc login(ctx *cli.Context) {\n\t\/\/ check for the token flag\n\tif tok := ctx.String(\"token\"); len(tok) > 0 {\n\t\t_, err := authFromContext(ctx).Inspect(tok)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tvar env cliutil.Env\n\t\tif len(ctx.String(\"env\")) > 0 {\n\t\t\tenv = cliutil.GetEnvByName(ctx.String(\"env\"))\n\t\t} else {\n\t\t\tenv = cliutil.GetEnv()\n\t\t}\n\t\tif err := config.Set(tok, \"micro\", \"auth\", env.Name, \"token\"); err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfmt.Println(\"You have been logged in\")\n\t\treturn\n\t}\n\n\tif ctx.Args().Len() != 2 {\n\t\tfmt.Println(\"Usage: `micro login {id} {secret} OR micro login --token {token}`\")\n\t\tos.Exit(1)\n\t}\n\tid := ctx.Args().Get(0)\n\tsecret := ctx.Args().Get(1)\n\n\t\/\/ Execute the request\n\ttok, err := authFromContext(ctx).Token(auth.WithCredentials(id, secret), auth.WithExpiry(time.Hour*24))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Store the access token in micro config\n\tif err := config.Set(tok.AccessToken, \"micro\", \"auth\", \"token\"); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\t\/\/ Store the refresh token in micro config\n\tif err := config.Set(tok.RefreshToken, \"micro\", \"auth\", \"refresh-token\"); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Inform the user\n\tfmt.Println(\"You have been logged in\")\n}\n\n\/\/ whoami returns info about the logged in user\nfunc whoami(ctx *cli.Context) {\n\t\/\/ Get the token from micro config\n\ttok, err := config.Get(\"micro\", \"auth\", \"token\")\n\tif err != nil {\n\t\tfmt.Println(\"You are not logged in\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Inspect the token\n\tacc, err := authFromContext(ctx).Inspect(tok)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"ID: %v\\n\", acc.ID)\n\tfmt.Printf(\"Roles: %v\\n\", strings.Join(acc.Roles, \", \"))\n}\n\n\/\/Commands for auth\nfunc Commands(srvOpts ...micro.Option) []*cli.Command {\n\tcommands := []*cli.Command{\n\t\t{\n\t\t\tName: \"auth\",\n\t\t\tUsage: \"Run the auth service\",\n\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\tRun(ctx)\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tSubcommands: append([]*cli.Command{\n\t\t\t\t{\n\t\t\t\t\tName: \"api\",\n\t\t\t\t\tUsage: \"Run the auth api\",\n\t\t\t\t\tDescription: \"Run the auth api\",\n\t\t\t\t\tFlags: ServiceFlags,\n\t\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\t\tapi.Run(ctx, srvOpts...)\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"list\",\n\t\t\t\t\tUsage: \"List auth resources\",\n\t\t\t\t\tSubcommands: append([]*cli.Command{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"rules\",\n\t\t\t\t\t\t\tUsage: \"List auth rules\",\n\t\t\t\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\t\t\t\tlistRules(ctx)\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"accounts\",\n\t\t\t\t\t\t\tUsage: \"List auth accounts\",\n\t\t\t\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\t\t\t\tlistAccounts(ctx)\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"create\",\n\t\t\t\t\tUsage: \"Create an auth resource\",\n\t\t\t\t\tSubcommands: append([]*cli.Command{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"rule\",\n\t\t\t\t\t\t\tUsage: \"Create an auth rule\",\n\t\t\t\t\t\t\tFlags: append(RuleFlags),\n\t\t\t\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\t\t\t\tcreateRule(ctx)\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"account\",\n\t\t\t\t\t\t\tUsage: \"Create an auth account\",\n\t\t\t\t\t\t\tFlags: append(AccountFlags),\n\t\t\t\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\t\t\t\tcreateAccount(ctx)\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"delete\",\n\t\t\t\t\tUsage: \"Delete a auth resource\",\n\t\t\t\t\tSubcommands: append([]*cli.Command{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"rule\",\n\t\t\t\t\t\t\tUsage: \"Delete an auth rule\",\n\t\t\t\t\t\t\tFlags: RuleFlags,\n\t\t\t\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\t\t\t\tdeleteRule(ctx)\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}),\n\t\t\t\t},\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tName: \"login\",\n\t\t\tUsage: \"Login using a token\",\n\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\tlogin(ctx)\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tFlags: []cli.Flag{\n\t\t\t\t&cli.StringFlag{\n\t\t\t\t\tName: \"token\",\n\t\t\t\t\tUsage: \"The token to set\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"whoami\",\n\t\t\tUsage: \"Account information\",\n\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\twhoami(ctx)\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, c := range commands {\n\t\tfor _, p := range Plugins() {\n\t\t\tif cmds := p.Commands(); len(cmds) > 0 {\n\t\t\t\tc.Subcommands = append(c.Subcommands, cmds...)\n\t\t\t}\n\n\t\t\tif flags := p.Flags(); len(flags) > 0 {\n\t\t\t\tc.Flags = append(c.Flags, flags...)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn commands\n}\n<commit_msg>Add missing pieces<commit_after>package auth\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/micro\/cli\/v2\"\n\t\"github.com\/micro\/go-micro\/v2\"\n\t\"github.com\/micro\/go-micro\/v2\/auth\"\n\tsrvAuth \"github.com\/micro\/go-micro\/v2\/auth\/service\"\n\tpb \"github.com\/micro\/go-micro\/v2\/auth\/service\/proto\"\n\t\"github.com\/micro\/go-micro\/v2\/auth\/token\"\n\t\"github.com\/micro\/go-micro\/v2\/auth\/token\/jwt\"\n\t\"github.com\/micro\/go-micro\/v2\/config\/cmd\"\n\tlog \"github.com\/micro\/go-micro\/v2\/logger\"\n\tcliutil \"github.com\/micro\/micro\/v2\/client\/cli\/util\"\n\t\"github.com\/micro\/micro\/v2\/internal\/client\"\n\t\"github.com\/micro\/micro\/v2\/internal\/config\"\n\t\"github.com\/micro\/micro\/v2\/service\/auth\/api\"\n\taccountsHandler \"github.com\/micro\/micro\/v2\/service\/auth\/handler\/accounts\"\n\tauthHandler \"github.com\/micro\/micro\/v2\/service\/auth\/handler\/auth\"\n\trulesHandler \"github.com\/micro\/micro\/v2\/service\/auth\/handler\/rules\"\n)\n\nvar (\n\t\/\/ Name of the service\n\tName = \"go.micro.auth\"\n\t\/\/ Address of the service\n\tAddress = \":8010\"\n\t\/\/ ServiceFlags are provided to commands which run micro services\n\tServiceFlags = []cli.Flag{\n\t\t&cli.StringFlag{\n\t\t\tName: \"address\",\n\t\t\tUsage: \"Set the auth http address e.g 0.0.0.0:8010\",\n\t\t\tEnvVars: []string{\"MICRO_SERVER_ADDRESS\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"auth_provider\",\n\t\t\tEnvVars: []string{\"MICRO_AUTH_PROVIDER\"},\n\t\t\tUsage: \"Auth provider enables account generation\",\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"auth_public_key\",\n\t\t\tEnvVars: []string{\"MICRO_AUTH_PUBLIC_KEY\"},\n\t\t\tUsage: \"Public key for JWT auth (base64 encoded PEM)\",\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"auth_private_key\",\n\t\t\tEnvVars: []string{\"MICRO_AUTH_PRIVATE_KEY\"},\n\t\t\tUsage: \"Private key for JWT auth (base64 encoded PEM)\",\n\t\t},\n\t}\n\t\/\/ RuleFlags are provided to commands which create or delete rules\n\tRuleFlags = []cli.Flag{\n\t\t&cli.StringFlag{\n\t\t\tName: \"role\",\n\t\t\tUsage: \"The role to amend, e.g. 'user' or '*' to represent all\",\n\t\t\tRequired: true,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"resource\",\n\t\t\tUsage: \"The resource to amend in the format namespace:type:name:endpoint, e.g. micro:service:go.micro.auth:*\",\n\t\t\tRequired: true,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"access\",\n\t\t\tUsage: \"The access level, must be granted or denied\",\n\t\t\tValue: \"granted\",\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName: \"priority\",\n\t\t\tUsage: \"The priority level, default is 0, the greater the number the higher the priority\",\n\t\t\tValue: 0,\n\t\t},\n\t}\n\t\/\/ AccountFlags are provided to the create account command\n\tAccountFlags = []cli.Flag{\n\t\t&cli.StringFlag{\n\t\t\tName: \"id\",\n\t\t\tUsage: \"The account id\",\n\t\t\tRequired: true,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"secret\",\n\t\t\tUsage: \"The account secret (password)\",\n\t\t\tRequired: true,\n\t\t},\n\t\t&cli.StringSliceFlag{\n\t\t\tName: \"roles\",\n\t\t\tUsage: \"Comma seperated list of roles to give the account\",\n\t\t},\n\t}\n)\n\n\/\/ run the auth service\nfunc Run(ctx *cli.Context, srvOpts ...micro.Option) {\n\tlog.Init(log.WithFields(map[string]interface{}{\"service\": \"auth\"}))\n\n\t\/\/ Init plugins\n\tfor _, p := range Plugins() {\n\t\tp.Init(ctx)\n\t}\n\n\tif len(ctx.String(\"address\")) > 0 {\n\t\tAddress = ctx.String(\"address\")\n\t}\n\tif len(Address) > 0 {\n\t\tsrvOpts = append(srvOpts, micro.Address(Address))\n\t}\n\n\t\/\/ Init plugins\n\tfor _, p := range Plugins() {\n\t\tp.Init(ctx)\n\t}\n\n\t\/\/ setup the handlers\n\tauthH := &authHandler.Auth{}\n\truleH := &rulesHandler.Rules{}\n\taccountH := &accountsHandler.Accounts{}\n\n\t\/\/ setup the auth handler to use JWTs\n\tpubKey := ctx.String(\"auth_public_key\")\n\tprivKey := ctx.String(\"auth_private_key\")\n\tif len(pubKey) > 0 || len(privKey) > 0 {\n\t\tauthH.TokenProvider = jwt.NewTokenProvider(\n\t\t\ttoken.WithPublicKey(pubKey),\n\t\t\ttoken.WithPrivateKey(privKey),\n\t\t)\n\t}\n\n\tst := *cmd.DefaultCmd.Options().Store\n\n\t\/\/ set the handlers store\n\tauthH.Init(auth.Store(st))\n\truleH.Init(auth.Store(st))\n\taccountH.Init(auth.Store(st))\n\n\t\/\/ setup service\n\tsrvOpts = append(srvOpts, micro.Name(Name))\n\tservice := micro.NewService(srvOpts...)\n\n\t\/\/ register handlers\n\tpb.RegisterAuthHandler(service.Server(), authH)\n\tpb.RegisterRulesHandler(service.Server(), ruleH)\n\tpb.RegisterAccountsHandler(service.Server(), accountH)\n\n\t\/\/ run service\n\tif err := service.Run(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc authFromContext(ctx *cli.Context) auth.Auth {\n\tif cliutil.IsLocal() {\n\t\treturn *cmd.DefaultCmd.Options().Auth\n\t}\n\treturn srvAuth.NewAuth(\n\t\tauth.WithClient(client.New(ctx)),\n\t)\n}\n\n\/\/ login using a token\nfunc login(ctx *cli.Context) {\n\t\/\/ check for the token flag\n\tvar env cliutil.Env\n\tif len(ctx.String(\"env\")) > 0 {\n\t\tenv = cliutil.GetEnvByName(ctx.String(\"env\"))\n\t} else {\n\t\tenv = cliutil.GetEnv()\n\t}\n\tif tok := ctx.String(\"token\"); len(tok) > 0 {\n\t\t_, err := authFromContext(ctx).Inspect(tok)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tif err := config.Set(tok, \"micro\", \"auth\", env.Name, \"token\"); err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfmt.Println(\"You have been logged in\")\n\t\treturn\n\t}\n\n\tif ctx.Args().Len() != 2 {\n\t\tfmt.Println(\"Usage: `micro login {id} {secret} OR micro login --token {token}`\")\n\t\tos.Exit(1)\n\t}\n\tid := ctx.Args().Get(0)\n\tsecret := ctx.Args().Get(1)\n\n\t\/\/ Execute the request\n\ttok, err := authFromContext(ctx).Token(auth.WithCredentials(id, secret), auth.WithExpiry(time.Hour*24))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Store the access token in micro config\n\tif err := config.Set(tok.AccessToken, \"micro\", \"auth\", env.Name, \"token\"); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\t\/\/ Store the refresh token in micro config\n\tif err := config.Set(tok.RefreshToken, \"micro\", \"auth\", env.Name, \"refresh-token\"); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Inform the user\n\tfmt.Println(\"You have been logged in\")\n}\n\n\/\/ whoami returns info about the logged in user\nfunc whoami(ctx *cli.Context) {\n\t\/\/ Get the token from micro config\n\ttok, err := config.Get(\"micro\", \"auth\", \"token\")\n\tif err != nil {\n\t\tfmt.Println(\"You are not logged in\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Inspect the token\n\tacc, err := authFromContext(ctx).Inspect(tok)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"ID: %v\\n\", acc.ID)\n\tfmt.Printf(\"Roles: %v\\n\", strings.Join(acc.Roles, \", \"))\n}\n\n\/\/Commands for auth\nfunc Commands(srvOpts ...micro.Option) []*cli.Command {\n\tcommands := []*cli.Command{\n\t\t{\n\t\t\tName: \"auth\",\n\t\t\tUsage: \"Run the auth service\",\n\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\tRun(ctx)\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tSubcommands: append([]*cli.Command{\n\t\t\t\t{\n\t\t\t\t\tName: \"api\",\n\t\t\t\t\tUsage: \"Run the auth api\",\n\t\t\t\t\tDescription: \"Run the auth api\",\n\t\t\t\t\tFlags: ServiceFlags,\n\t\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\t\tapi.Run(ctx, srvOpts...)\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"list\",\n\t\t\t\t\tUsage: \"List auth resources\",\n\t\t\t\t\tSubcommands: append([]*cli.Command{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"rules\",\n\t\t\t\t\t\t\tUsage: \"List auth rules\",\n\t\t\t\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\t\t\t\tlistRules(ctx)\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"accounts\",\n\t\t\t\t\t\t\tUsage: \"List auth accounts\",\n\t\t\t\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\t\t\t\tlistAccounts(ctx)\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"create\",\n\t\t\t\t\tUsage: \"Create an auth resource\",\n\t\t\t\t\tSubcommands: append([]*cli.Command{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"rule\",\n\t\t\t\t\t\t\tUsage: \"Create an auth rule\",\n\t\t\t\t\t\t\tFlags: append(RuleFlags),\n\t\t\t\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\t\t\t\tcreateRule(ctx)\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"account\",\n\t\t\t\t\t\t\tUsage: \"Create an auth account\",\n\t\t\t\t\t\t\tFlags: append(AccountFlags),\n\t\t\t\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\t\t\t\tcreateAccount(ctx)\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"delete\",\n\t\t\t\t\tUsage: \"Delete a auth resource\",\n\t\t\t\t\tSubcommands: append([]*cli.Command{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"rule\",\n\t\t\t\t\t\t\tUsage: \"Delete an auth rule\",\n\t\t\t\t\t\t\tFlags: RuleFlags,\n\t\t\t\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\t\t\t\tdeleteRule(ctx)\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}),\n\t\t\t\t},\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tName: \"login\",\n\t\t\tUsage: \"Login using a token\",\n\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\tlogin(ctx)\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tFlags: []cli.Flag{\n\t\t\t\t&cli.StringFlag{\n\t\t\t\t\tName: \"token\",\n\t\t\t\t\tUsage: \"The token to set\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"whoami\",\n\t\t\tUsage: \"Account information\",\n\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\twhoami(ctx)\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, c := range commands {\n\t\tfor _, p := range Plugins() {\n\t\t\tif cmds := p.Commands(); len(cmds) > 0 {\n\t\t\t\tc.Subcommands = append(c.Subcommands, cmds...)\n\t\t\t}\n\n\t\t\tif flags := p.Flags(); len(flags) > 0 {\n\t\t\t\tc.Flags = append(c.Flags, flags...)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn commands\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage worker\n\nimport (\n\t\"github.com\/Matir\/gobuster\/logging\"\n\t\"github.com\/Matir\/gobuster\/util\"\n\t\"github.com\/Matir\/gobuster\/workqueue\"\n\t\"golang.org\/x\/net\/html\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n)\n\ntype HTMLWorker struct {\n\t\/\/ Function to add future work\n\tadder workqueue.QueueAddFunc\n}\n\nfunc NewHTMLWorker(adder workqueue.QueueAddFunc) *HTMLWorker {\n\treturn &HTMLWorker{adder: adder}\n}\n\n\/\/ Work on this response\nfunc (w *HTMLWorker) Handle(URL *url.URL, body io.Reader) {\n\tlinks := w.GetLinks(body)\n\tfoundURLs := make([]*url.URL, 0, len(links))\n\tfor _, l := range links {\n\t\tu, err := url.Parse(l)\n\t\tif err != nil {\n\t\t\tlogging.Logf(logging.LogInfo, \"Error parsing URL (%s): %s\", l, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tfoundURLs = append(foundURLs, URL.ResolveReference(u))\n\t}\n\tw.adder(foundURLs...)\n}\n\n\/\/ Check if this response can be handled by this worker\nfunc (*HTMLWorker) Eligible(resp *http.Response) bool {\n\tct := resp.Header.Get(\"Content-type\")\n\tif strings.ToLower(ct) != \"text\/html\" {\n\t\treturn false\n\t}\n\treturn resp.ContentLength > 0 && resp.ContentLength < 1024*1024\n}\n\n\/\/ Get the links for the body.\nfunc (*HTMLWorker) GetLinks(body io.Reader) []string {\n\ttree, err := html.Parse(body)\n\tif err != nil {\n\t\tlogging.Logf(logging.LogInfo, \"Unable to parse HTML document: %s\", err.Error())\n\t\treturn nil\n\t}\n\tlinks := collectElementAttributes(tree, \"a\", \"href\")\n\tlinks = append(links, collectElementAttributes(tree, \"img\", \"src\")...)\n\tlinks = append(links, collectElementAttributes(tree, \"script\", \"src\")...)\n\tlinks = append(links, collectElementAttributes(tree, \"style\", \"src\")...)\n\treturn util.DedupeStrings(links)\n}\n\nfunc getElementsByTagName(root *html.Node, name string) []*html.Node {\n\tresults := make([]*html.Node, 0)\n\tvar handleNode func(*html.Node)\n\thandleNode = func(node *html.Node) {\n\t\tif node.Type == html.ElementNode && strings.ToLower(node.Data) == name {\n\t\t\tresults = append(results, node)\n\t\t}\n\t\tfor n := node.FirstChild; n != nil; n = n.NextSibling {\n\t\t\thandleNode(n)\n\t\t}\n\t}\n\thandleNode(root)\n\treturn results\n}\n\nfunc getElementAttribute(node *html.Node, attrName string) *string {\n\tfor _, a := range node.Attr {\n\t\tif strings.ToLower(a.Key) == attrName {\n\t\t\treturn &a.Val\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc collectElementAttributes(root *html.Node, tagName, attrName string) []string {\n\tresults := make([]string, 0)\n\tfor _, el := range getElementsByTagName(root, tagName) {\n\t\tif val := getElementAttribute(el, attrName); val != nil {\n\t\t\tresults = append(results, *val)\n\t\t}\n\t}\n\treturn results\n}\n<commit_msg>Enhanced crawling of URLs from resources.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage worker\n\nimport (\n\t\"github.com\/Matir\/gobuster\/logging\"\n\t\"github.com\/Matir\/gobuster\/util\"\n\t\"github.com\/Matir\/gobuster\/workqueue\"\n\t\"golang.org\/x\/net\/html\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n)\n\ntype HTMLWorker struct {\n\t\/\/ Function to add future work\n\tadder workqueue.QueueAddFunc\n}\n\nfunc NewHTMLWorker(adder workqueue.QueueAddFunc) *HTMLWorker {\n\treturn &HTMLWorker{adder: adder}\n}\n\n\/\/ Work on this response\nfunc (w *HTMLWorker) Handle(URL *url.URL, body io.Reader) {\n\tlinks := w.GetLinks(body)\n\tfoundURLs := make([]*url.URL, 0, len(links))\n\tfor _, l := range links {\n\t\tu, err := url.Parse(l)\n\t\tif err != nil {\n\t\t\tlogging.Logf(logging.LogInfo, \"Error parsing URL (%s): %s\", l, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tresolved := URL.ResolveReference(u)\n\t\tfoundURLs = append(foundURLs, util.GetParentPaths(resolved)...)\n\t}\n\tw.adder(foundURLs...)\n}\n\n\/\/ Check if this response can be handled by this worker\nfunc (*HTMLWorker) Eligible(resp *http.Response) bool {\n\tct := resp.Header.Get(\"Content-type\")\n\tif strings.ToLower(ct) != \"text\/html\" {\n\t\treturn false\n\t}\n\treturn resp.ContentLength > 0 && resp.ContentLength < 1024*1024\n}\n\n\/\/ Get the links for the body.\nfunc (*HTMLWorker) GetLinks(body io.Reader) []string {\n\ttree, err := html.Parse(body)\n\tif err != nil {\n\t\tlogging.Logf(logging.LogInfo, \"Unable to parse HTML document: %s\", err.Error())\n\t\treturn nil\n\t}\n\tlinks := collectElementAttributes(tree, \"a\", \"href\")\n\tlinks = append(links, collectElementAttributes(tree, \"img\", \"src\")...)\n\tlinks = append(links, collectElementAttributes(tree, \"script\", \"src\")...)\n\tlinks = append(links, collectElementAttributes(tree, \"style\", \"src\")...)\n\treturn util.DedupeStrings(links)\n}\n\nfunc getElementsByTagName(root *html.Node, name string) []*html.Node {\n\tresults := make([]*html.Node, 0)\n\tvar handleNode func(*html.Node)\n\thandleNode = func(node *html.Node) {\n\t\tif node.Type == html.ElementNode && strings.ToLower(node.Data) == name {\n\t\t\tresults = append(results, node)\n\t\t}\n\t\tfor n := node.FirstChild; n != nil; n = n.NextSibling {\n\t\t\thandleNode(n)\n\t\t}\n\t}\n\thandleNode(root)\n\treturn results\n}\n\nfunc getElementAttribute(node *html.Node, attrName string) *string {\n\tfor _, a := range node.Attr {\n\t\tif strings.ToLower(a.Key) == attrName {\n\t\t\treturn &a.Val\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc collectElementAttributes(root *html.Node, tagName, attrName string) []string {\n\tresults := make([]string, 0)\n\tfor _, el := range getElementsByTagName(root, tagName) {\n\t\tif val := getElementAttribute(el, attrName); val != nil {\n\t\t\tresults = append(results, *val)\n\t\t}\n\t}\n\treturn results\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Bulldozer Framework\n * Copyright (C) DesertBit\n *\/\n\npackage settings\n\nimport (\n\t\"fmt\"\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/desertbit\/bulldozer\/log\"\n\t\"github.com\/desertbit\/bulldozer\/utils\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\t\/*\n\t * Public\n\t *\/\n\n\tTemplateExtension = \".bt\"\n\tScssSuffix = \".scss\"\n\n\tDefaultSettingsFileName = \"settings.toml\"\n\n\t\/\/ If this prefix is set to string values, then\n\t\/\/ the value is obtained from the environment variables.\n\tParseEnvVarPrefix = \"ENV:\"\n\n\t\/\/ The socket types\n\tTypeTcpSocket SocketType = 1 << iota\n\tTypeUnixSocket SocketType = 1 << iota\n\n\t\/\/ Static URL paths\n\tUrlPublic = \"\/public\/\"\n\tUrlBulldozerResources = \"\/bulldozer\/res\/\"\n\n\t\/*\n\t * Private\n\t *\/\n\n\tbulldozerGoPath = \"src\/github.com\/desertbit\/bulldozer\/\"\n\ttmpDirName = \"bulldozer\"\n\tsessionsDatabaseName = \"sessions.db\"\n\n\t\/\/ Default cookie keys\n\tdefaultCookieHashKey = \"R7DqYdgWlztQ06diRM4z7ByuDwfiAvehLxTwAEDHFvgjkA4CcPrWBhZk6FJIBuDs\"\n\tdefaultCookieBlockKey = \"2Mox41MlNDHOzShGfiO6AMq3isx5hz9r\"\n\n\t\/\/ Default password key\n\tdefaultPasswordEncryptionKey = \"gNlmWx0jurl8ohIVZMi8k9eRZxP25kEeQq68TeTVLD9omFZmP7sSqLK\"\n)\n\nvar (\n\tSettings settings\n)\n\ntype SocketType int\n\nfunc init() {\n\t\/\/ Set the default values\n\tSettings = settings{\n\t\tAutoSetGOMAXPROCS: true,\n\t\tAutoParseFlags: true,\n\t\tAutoCatchInterrupts: true,\n\n\t\tSiteUrl: \"http:\/\/127.0.0.1:9000\",\n\t\tSecureHttpsAccess: false,\n\t\tSocketType: TypeTcpSocket,\n\t\tListenAddress: \":9000\",\n\t\tServeFiles: true,\n\n\t\tDatabaseAddr: \"localhost\",\n\t\tDatabasePort: \"28015\",\n\t\tDatabaseName: \"test\",\n\t\tDatabaseMaxIdle: 50,\n\t\tDatabaseMaxOpen: 50,\n\t\tDatabaseTimeout: time.Minute,\n\n\t\tCookieHashKey: defaultCookieHashKey,\n\t\tCookieBlockKey: defaultCookieBlockKey,\n\t\tSessionMaxAge: 60 * 60 * 24 * 14, \/\/ 14 Days\n\n\t\tFirewallMaxRequestsPerMinute: 100,\n\t\tFirewallReleaseBlockAfter: 60 * 5, \/\/ 5 minutes\n\n\t\tScssCmd: \"scss\",\n\n\t\tRegistrationDisabled: true,\n\t\tPasswordEncryptionKey: defaultPasswordEncryptionKey,\n\t\tRemoveNotConfirmedUsersTimeout: 60 * 60 * 24 * 14, \/\/ 14 Days\n\n\t\tMailSMTPPort: 587,\n\t\tMailSkipCertificateVerify: false,\n\t}\n\n\t\/\/ Set the temporary directory path\n\tSettings.TmpPath = utils.AddTrailingSlashToPath(utils.AddTrailingSlashToPath(os.TempDir()) + tmpDirName)\n\n\t\/\/ Get the current working directory path\n\tvar err error\n\tSettings.WorkingPath, err = os.Getwd()\n\tif err != nil {\n\t\tlog.L.Fatalf(\"failed to obtain current work directory path: %v\", err)\n\t}\n\n\t\/\/ Set the GOPATH\n\tSettings.GoPath = os.Getenv(\"GOPATH\")\n\tif len(Settings.GoPath) == 0 {\n\t\tlog.L.Fatalf(\"GOPATH is not set!\")\n\t}\n\n\t\/\/ Append a trailing slash if not already present\n\tSettings.GoPath = utils.AddTrailingSlashToPath(Settings.GoPath)\n\tSettings.WorkingPath = utils.AddTrailingSlashToPath(Settings.WorkingPath)\n\n\t\/\/ Set the paths\n\tSettings.SessionsDatabasePath = Settings.TmpPath + sessionsDatabaseName\n\n\tSettings.PublicPath = Settings.WorkingPath + \"public\"\n\tSettings.TemplatesPath = Settings.WorkingPath + \"templates\"\n\tSettings.PagesPath = Settings.TemplatesPath + \"\/pages\"\n\tSettings.TranslationPath = Settings.WorkingPath + \"translations\"\n\tSettings.DataPath = Settings.WorkingPath + \"data\"\n\tSettings.ScssPath = Settings.DataPath + \"\/scss\"\n\tSettings.CssPath = Settings.PublicPath + \"\/css\"\n\n\tSettings.ScssArgs = []string{\n\t\t\"--unix-newlines\",\n\t\t\"--no-cache\",\n\t\t\"--sourcemap=none\",\n\t\t\"-t\",\n\t\t\"compressed\",\n\t\t\"--update\",\n\t\tSettings.ScssPath + \":\" + Settings.CssPath,\n\t}\n\n\tSettings.BulldozerSourcePath = Settings.GoPath + bulldozerGoPath\n\tSettings.BulldozerTemplatesPath = Settings.BulldozerSourcePath + \"\/data\/templates\"\n\tSettings.BulldozerCoreTemplatesPath = Settings.BulldozerTemplatesPath + \"\/core\"\n\tSettings.BulldozerResourcesPath = Settings.BulldozerSourcePath + \"\/data\/resources\"\n\tSettings.BulldozerTranslationPath = Settings.BulldozerSourcePath + \"\/data\/translations\"\n\tSettings.BulldozerPrototypesPath = Settings.BulldozerSourcePath + \"\/data\/prototypes\"\n}\n\n\/\/##############\/\/\n\/\/### Public ###\/\/\n\/\/##############\/\/\n\n\/\/ Prepare checks if the settings are correct and valid and initializes some values.\nfunc Prepare() error {\n\t\/\/ getEnv gets the environment variable or if empty fallsback to the default value.\n\tgetEnv := func(v, d string) string {\n\t\tenvV := os.Getenv(v)\n\t\tif len(envV) > 0 {\n\t\t\treturn envV\n\t\t}\n\n\t\treturn d\n\t}\n\n\t\/\/ Always get the following values first from the environment variables.\n\tSettings.DatabaseAddr = getEnv(\"BULLDOZER_DB_ADDR\", Settings.DatabaseAddr)\n\tSettings.DatabasePort = getEnv(\"BULLDOZER_DB_PORT\", Settings.DatabasePort)\n\n\t\/\/ Get environment variable values if the environment prefix is set on struct field strings.\n\ts := reflect.ValueOf(&Settings).Elem()\n\tfor x := 0; x < s.NumField(); x++ {\n\t\tf := s.Field(x)\n\n\t\tif !f.CanSet() || f.Kind() != reflect.String {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Get the struct field string value.\n\t\tv := f.String()\n\n\t\t\/\/ Skip if no environment prefix is set.\n\t\tif !strings.HasPrefix(v, ParseEnvVarPrefix) {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Remove the prefix.\n\t\tv = strings.TrimPrefix(v, ParseEnvVarPrefix)\n\n\t\t\/\/ Get the value from the environment variable.\n\t\tenvV := os.Getenv(v)\n\t\tif len(envV) == 0 {\n\t\t\tlog.L.Warning(\"settings environment variable '%s' is not set!\", v)\n\t\t}\n\n\t\t\/\/ Set the new value.\n\t\tf.SetString(envV)\n\t}\n\n\t\/\/ Check if the Site url is valid\n\tif !strings.HasPrefix(Settings.SiteUrl, \"http:\/\/\") &&\n\t\t!strings.HasPrefix(Settings.SiteUrl, \"https:\/\/\") {\n\t\treturn fmt.Errorf(\"settings: site url is invalid: missing 'http:\/\/' or 'https:\/\/': '%s'\", Settings.SiteUrl)\n\t}\n\n\t\/\/ Check if the length of the cookie keys are valid\n\tl := len(Settings.CookieHashKeyBytes())\n\tif l != 32 && l != 64 {\n\t\treturn fmt.Errorf(\"settings: the cookie hash key has an invalid length of %v bytes! Valid lengths are 32 or 64 bytes...\", l)\n\t}\n\tl = len(Settings.CookieBlockKeyBytes())\n\tif l != 16 && l != 24 && l != 32 {\n\t\treturn fmt.Errorf(\"settings: the cookie block key has an invalid length of %v bytes! For AES, used by default, valid lengths are 16, 24, or 32 bytes to select AES-128, AES-192, or AES-256.\", l)\n\t}\n\n\t\/\/ Print a warning if the default cookie keys are set\n\tif Settings.CookieHashKey == defaultCookieHashKey {\n\t\tlog.L.Warning(\"[WARNING] settings: the default cookie hash key is set! You should replace this with a secret key!\")\n\t}\n\tif Settings.CookieBlockKey == defaultCookieBlockKey {\n\t\tlog.L.Warning(\"[WARNING] settings: the default cookie block key is set! You should replace this with a secret key!\")\n\t}\n\n\t\/\/ Print a warning if the SecureHttpsAccess flag is false\n\tif !Settings.SecureHttpsAccess {\n\t\tlog.L.Warning(\"[WARNING] settings: the secure https access flag is false! You should provide a secure https access!\")\n\t}\n\n\t\/\/ Print a warning if the default password encryption key is used.\n\tif Settings.PasswordEncryptionKey == defaultPasswordEncryptionKey {\n\t\tlog.L.Warning(\"[WARNING] settings: the default password encryption key is set! You should replace this with a secret key!\")\n\t}\n\n\t\/\/ Warn the user about possible forgotten root slashes.\n\tfor _, url := range Settings.StaticJavaScripts {\n\t\tif !strings.HasPrefix(url, \"\/\") {\n\t\t\tlog.L.Warning(\"static javascript url does not start with a slash: '%s'\", url)\n\t\t}\n\t}\n\tfor _, url := range Settings.StaticStyleSheets {\n\t\tif !strings.HasPrefix(url, \"\/\") {\n\t\t\tlog.L.Warning(\"static style sheet url does not start with a slash: '%s'\", url)\n\t\t}\n\t}\n\n\t\/\/ Remove leading \/ from the site url\n\tSettings.SiteUrl = strings.TrimSuffix(Settings.SiteUrl, \"\/\")\n\n\treturn nil\n}\n\n\/\/ Load loads the settings file.\nfunc Load(path string) error {\n\tlog.L.Info(\"Loading settings from file: '%s'\", path)\n\n\texists, err := utils.Exists(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !exists {\n\t\treturn fmt.Errorf(\"settings file does not exists: '%s'\", path)\n\t}\n\n\t\/\/ Parse the configuration\n\tif _, err = toml.DecodeFile(path, &Settings); err != nil {\n\t\treturn fmt.Errorf(\"failed to parse settings file '%s': %v\", path, err)\n\t}\n\n\treturn nil\n}\n\n\/\/#######################\/\/\n\/\/### Settings struct ###\/\/\n\/\/#######################\/\/\n\ntype settings struct {\n\t\/\/ If some jobs should be done automatically by the Bulldoze() function\n\tAutoSetGOMAXPROCS bool\n\tAutoParseFlags bool\n\tAutoCatchInterrupts bool\n\n\t\/\/ This is the address to access this goji application. It should include the http:\/\/ part too.\n\tSiteUrl string\n\n\t\/\/ Whenever this application is accessible through a secure HTTPs connection.\n\t\/\/ This flag affects some important security mechanisms, as settings the secure flag on cookies.\n\tSecureHttpsAccess bool\n\n\tSocketType SocketType\n\tListenAddress string\n\tServeFiles bool\n\n\tMinifyTemplates bool\n\n\tDatabaseAddr string\n\tDatabasePort string\n\tDatabaseName string\n\n\t\/\/ Maximum number of idle connections in the pool.\n\tDatabaseMaxIdle int\n\n\t\/\/ Maximum number of connections allocated by the pool at a given time.\n\t\/\/ When zero, there is no limit on the number of connections in the pool.\n\tDatabaseMaxOpen int\n\n\t\/\/ Close connections after remaining idle for this duration. If the value\n\t\/\/ is zero, then idle connections are not closed. Applications should set\n\t\/\/ the timeout to a value less than the server's timeout.\n\tDatabaseTimeout time.Duration\n\n\tGoPath string\n\tWorkingPath string\n\tTmpPath string\n\tSessionsDatabasePath string\n\n\tPublicPath string\n\tPagesPath string\n\tTemplatesPath string\n\tTranslationPath string\n\tDataPath string\n\n\tScssPath string\n\tCssPath string\n\n\tScssCmd string\n\tScssArgs []string\n\n\tBulldozerSourcePath string\n\tBulldozerTemplatesPath string\n\tBulldozerCoreTemplatesPath string\n\tBulldozerResourcesPath string\n\tBulldozerTranslationPath string\n\tBulldozerPrototypesPath string\n\n\t\/\/ The CookieHashKey is required, used to authenticate the cookie value using HMAC.\n\t\/\/ It is recommended to use a key with 32 or 64 bytes.\n\tCookieHashKey string\n\t\/\/ The CookieBlockKey is used to encrypt the cookie value.\n\t\/\/ The length must correspond to the block size of the encryption algorithm.\n\t\/\/ For AES, used by default, valid lengths are 16, 24, or 32 bytes to select AES-128, AES-192, or AES-256.\n\tCookieBlockKey string\n\n\t\/\/ The maximum session age in seconds\n\tSessionMaxAge int\n\n\t\/\/ The maximum allowed requests per minute before the IP is blocked\n\tFirewallMaxRequestsPerMinute int\n\t\/\/ Release the blocked remote address after x seconds\n\tFirewallReleaseBlockAfter int\n\n\t\/\/ This are the static stylesheets and javascripts which\n\t\/\/ will be always loaded.\n\t\/\/ Don't manipulate this slices after Bulldozer initialization!\n\tStaticJavaScripts []string\n\tStaticStyleSheets []string\n\n\t\/\/ Authentication stuff\n\tRegistrationDisabled bool\n\tPasswordEncryptionKey string\n\tRemoveNotConfirmedUsersTimeout int\n\n\t\/\/ Mail\n\tMailFrom string\n\tMailUsername string\n\tMailPassword string\n\tMailSMTPHost string\n\tMailSMTPPort int\n\tMailSkipCertificateVerify bool\n}\n\nfunc (s *settings) CookieHashKeyBytes() []byte {\n\treturn []byte(s.CookieHashKey)\n}\n\nfunc (s *settings) CookieBlockKeyBytes() []byte {\n\treturn []byte(s.CookieBlockKey)\n}\n<commit_msg>changed RemoveNotConfirmedUsersTimeout value<commit_after>\/*\n * Bulldozer Framework\n * Copyright (C) DesertBit\n *\/\n\npackage settings\n\nimport (\n\t\"fmt\"\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/desertbit\/bulldozer\/log\"\n\t\"github.com\/desertbit\/bulldozer\/utils\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\t\/*\n\t * Public\n\t *\/\n\n\tTemplateExtension = \".bt\"\n\tScssSuffix = \".scss\"\n\n\tDefaultSettingsFileName = \"settings.toml\"\n\n\t\/\/ If this prefix is set to string values, then\n\t\/\/ the value is obtained from the environment variables.\n\tParseEnvVarPrefix = \"ENV:\"\n\n\t\/\/ The socket types\n\tTypeTcpSocket SocketType = 1 << iota\n\tTypeUnixSocket SocketType = 1 << iota\n\n\t\/\/ Static URL paths\n\tUrlPublic = \"\/public\/\"\n\tUrlBulldozerResources = \"\/bulldozer\/res\/\"\n\n\t\/*\n\t * Private\n\t *\/\n\n\tbulldozerGoPath = \"src\/github.com\/desertbit\/bulldozer\/\"\n\ttmpDirName = \"bulldozer\"\n\tsessionsDatabaseName = \"sessions.db\"\n\n\t\/\/ Default cookie keys\n\tdefaultCookieHashKey = \"R7DqYdgWlztQ06diRM4z7ByuDwfiAvehLxTwAEDHFvgjkA4CcPrWBhZk6FJIBuDs\"\n\tdefaultCookieBlockKey = \"2Mox41MlNDHOzShGfiO6AMq3isx5hz9r\"\n\n\t\/\/ Default password key\n\tdefaultPasswordEncryptionKey = \"gNlmWx0jurl8ohIVZMi8k9eRZxP25kEeQq68TeTVLD9omFZmP7sSqLK\"\n)\n\nvar (\n\tSettings settings\n)\n\ntype SocketType int\n\nfunc init() {\n\t\/\/ Set the default values\n\tSettings = settings{\n\t\tAutoSetGOMAXPROCS: true,\n\t\tAutoParseFlags: true,\n\t\tAutoCatchInterrupts: true,\n\n\t\tSiteUrl: \"http:\/\/127.0.0.1:9000\",\n\t\tSecureHttpsAccess: false,\n\t\tSocketType: TypeTcpSocket,\n\t\tListenAddress: \":9000\",\n\t\tServeFiles: true,\n\n\t\tDatabaseAddr: \"localhost\",\n\t\tDatabasePort: \"28015\",\n\t\tDatabaseName: \"test\",\n\t\tDatabaseMaxIdle: 50,\n\t\tDatabaseMaxOpen: 50,\n\t\tDatabaseTimeout: time.Minute,\n\n\t\tCookieHashKey: defaultCookieHashKey,\n\t\tCookieBlockKey: defaultCookieBlockKey,\n\t\tSessionMaxAge: 60 * 60 * 24 * 14, \/\/ 14 Days\n\n\t\tFirewallMaxRequestsPerMinute: 100,\n\t\tFirewallReleaseBlockAfter: 60 * 5, \/\/ 5 minutes\n\n\t\tScssCmd: \"scss\",\n\n\t\tRegistrationDisabled: true,\n\t\tPasswordEncryptionKey: defaultPasswordEncryptionKey,\n\t\tRemoveNotConfirmedUsersTimeout: 60 * 60 * 24 * 20, \/\/ 20 Days\n\n\t\tMailSMTPPort: 587,\n\t\tMailSkipCertificateVerify: false,\n\t}\n\n\t\/\/ Set the temporary directory path\n\tSettings.TmpPath = utils.AddTrailingSlashToPath(utils.AddTrailingSlashToPath(os.TempDir()) + tmpDirName)\n\n\t\/\/ Get the current working directory path\n\tvar err error\n\tSettings.WorkingPath, err = os.Getwd()\n\tif err != nil {\n\t\tlog.L.Fatalf(\"failed to obtain current work directory path: %v\", err)\n\t}\n\n\t\/\/ Set the GOPATH\n\tSettings.GoPath = os.Getenv(\"GOPATH\")\n\tif len(Settings.GoPath) == 0 {\n\t\tlog.L.Fatalf(\"GOPATH is not set!\")\n\t}\n\n\t\/\/ Append a trailing slash if not already present\n\tSettings.GoPath = utils.AddTrailingSlashToPath(Settings.GoPath)\n\tSettings.WorkingPath = utils.AddTrailingSlashToPath(Settings.WorkingPath)\n\n\t\/\/ Set the paths\n\tSettings.SessionsDatabasePath = Settings.TmpPath + sessionsDatabaseName\n\n\tSettings.PublicPath = Settings.WorkingPath + \"public\"\n\tSettings.TemplatesPath = Settings.WorkingPath + \"templates\"\n\tSettings.PagesPath = Settings.TemplatesPath + \"\/pages\"\n\tSettings.TranslationPath = Settings.WorkingPath + \"translations\"\n\tSettings.DataPath = Settings.WorkingPath + \"data\"\n\tSettings.ScssPath = Settings.DataPath + \"\/scss\"\n\tSettings.CssPath = Settings.PublicPath + \"\/css\"\n\n\tSettings.ScssArgs = []string{\n\t\t\"--unix-newlines\",\n\t\t\"--no-cache\",\n\t\t\"--sourcemap=none\",\n\t\t\"-t\",\n\t\t\"compressed\",\n\t\t\"--update\",\n\t\tSettings.ScssPath + \":\" + Settings.CssPath,\n\t}\n\n\tSettings.BulldozerSourcePath = Settings.GoPath + bulldozerGoPath\n\tSettings.BulldozerTemplatesPath = Settings.BulldozerSourcePath + \"\/data\/templates\"\n\tSettings.BulldozerCoreTemplatesPath = Settings.BulldozerTemplatesPath + \"\/core\"\n\tSettings.BulldozerResourcesPath = Settings.BulldozerSourcePath + \"\/data\/resources\"\n\tSettings.BulldozerTranslationPath = Settings.BulldozerSourcePath + \"\/data\/translations\"\n\tSettings.BulldozerPrototypesPath = Settings.BulldozerSourcePath + \"\/data\/prototypes\"\n}\n\n\/\/##############\/\/\n\/\/### Public ###\/\/\n\/\/##############\/\/\n\n\/\/ Prepare checks if the settings are correct and valid and initializes some values.\nfunc Prepare() error {\n\t\/\/ getEnv gets the environment variable or if empty fallsback to the default value.\n\tgetEnv := func(v, d string) string {\n\t\tenvV := os.Getenv(v)\n\t\tif len(envV) > 0 {\n\t\t\treturn envV\n\t\t}\n\n\t\treturn d\n\t}\n\n\t\/\/ Always get the following values first from the environment variables.\n\t\/\/ TODO: Get this values with reflect.\n\tSettings.DatabaseAddr = getEnv(\"BULLDOZER_DB_ADDR\", Settings.DatabaseAddr)\n\tSettings.DatabasePort = getEnv(\"BULLDOZER_DB_PORT\", Settings.DatabasePort)\n\n\t\/\/ Get environment variable values if the environment prefix is set on struct field strings.\n\ts := reflect.ValueOf(&Settings).Elem()\n\tfor x := 0; x < s.NumField(); x++ {\n\t\tf := s.Field(x)\n\n\t\tif !f.CanSet() || f.Kind() != reflect.String {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Get the struct field string value.\n\t\tv := f.String()\n\n\t\t\/\/ Skip if no environment prefix is set.\n\t\tif !strings.HasPrefix(v, ParseEnvVarPrefix) {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Remove the prefix.\n\t\tv = strings.TrimPrefix(v, ParseEnvVarPrefix)\n\n\t\t\/\/ Get the value from the environment variable.\n\t\tenvV := os.Getenv(v)\n\t\tif len(envV) == 0 {\n\t\t\tlog.L.Warning(\"settings environment variable '%s' is not set!\", v)\n\t\t}\n\n\t\t\/\/ Set the new value.\n\t\tf.SetString(envV)\n\t}\n\n\t\/\/ Check if the Site url is valid\n\tif !strings.HasPrefix(Settings.SiteUrl, \"http:\/\/\") &&\n\t\t!strings.HasPrefix(Settings.SiteUrl, \"https:\/\/\") {\n\t\treturn fmt.Errorf(\"settings: site url is invalid: missing 'http:\/\/' or 'https:\/\/': '%s'\", Settings.SiteUrl)\n\t}\n\n\t\/\/ Check if the length of the cookie keys are valid\n\tl := len(Settings.CookieHashKeyBytes())\n\tif l != 32 && l != 64 {\n\t\treturn fmt.Errorf(\"settings: the cookie hash key has an invalid length of %v bytes! Valid lengths are 32 or 64 bytes...\", l)\n\t}\n\tl = len(Settings.CookieBlockKeyBytes())\n\tif l != 16 && l != 24 && l != 32 {\n\t\treturn fmt.Errorf(\"settings: the cookie block key has an invalid length of %v bytes! For AES, used by default, valid lengths are 16, 24, or 32 bytes to select AES-128, AES-192, or AES-256.\", l)\n\t}\n\n\t\/\/ Print a warning if the default cookie keys are set\n\tif Settings.CookieHashKey == defaultCookieHashKey {\n\t\tlog.L.Warning(\"[WARNING] settings: the default cookie hash key is set! You should replace this with a secret key!\")\n\t}\n\tif Settings.CookieBlockKey == defaultCookieBlockKey {\n\t\tlog.L.Warning(\"[WARNING] settings: the default cookie block key is set! You should replace this with a secret key!\")\n\t}\n\n\t\/\/ Print a warning if the SecureHttpsAccess flag is false\n\tif !Settings.SecureHttpsAccess {\n\t\tlog.L.Warning(\"[WARNING] settings: the secure https access flag is false! You should provide a secure https access!\")\n\t}\n\n\t\/\/ Print a warning if the default password encryption key is used.\n\tif Settings.PasswordEncryptionKey == defaultPasswordEncryptionKey {\n\t\tlog.L.Warning(\"[WARNING] settings: the default password encryption key is set! You should replace this with a secret key!\")\n\t}\n\n\t\/\/ Warn the user about possible forgotten root slashes.\n\tfor _, url := range Settings.StaticJavaScripts {\n\t\tif !strings.HasPrefix(url, \"\/\") {\n\t\t\tlog.L.Warning(\"static javascript url does not start with a slash: '%s'\", url)\n\t\t}\n\t}\n\tfor _, url := range Settings.StaticStyleSheets {\n\t\tif !strings.HasPrefix(url, \"\/\") {\n\t\t\tlog.L.Warning(\"static style sheet url does not start with a slash: '%s'\", url)\n\t\t}\n\t}\n\n\t\/\/ Remove leading \/ from the site url\n\tSettings.SiteUrl = strings.TrimSuffix(Settings.SiteUrl, \"\/\")\n\n\treturn nil\n}\n\n\/\/ Load loads the settings file.\nfunc Load(path string) error {\n\tlog.L.Info(\"Loading settings from file: '%s'\", path)\n\n\texists, err := utils.Exists(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !exists {\n\t\treturn fmt.Errorf(\"settings file does not exists: '%s'\", path)\n\t}\n\n\t\/\/ Parse the configuration\n\tif _, err = toml.DecodeFile(path, &Settings); err != nil {\n\t\treturn fmt.Errorf(\"failed to parse settings file '%s': %v\", path, err)\n\t}\n\n\treturn nil\n}\n\n\/\/#######################\/\/\n\/\/### Settings struct ###\/\/\n\/\/#######################\/\/\n\ntype settings struct {\n\t\/\/ If some jobs should be done automatically by the Bulldoze() function\n\tAutoSetGOMAXPROCS bool\n\tAutoParseFlags bool\n\tAutoCatchInterrupts bool\n\n\t\/\/ This is the address to access this goji application. It should include the http:\/\/ part too.\n\tSiteUrl string\n\n\t\/\/ Whenever this application is accessible through a secure HTTPs connection.\n\t\/\/ This flag affects some important security mechanisms, as settings the secure flag on cookies.\n\tSecureHttpsAccess bool\n\n\tSocketType SocketType\n\tListenAddress string\n\tServeFiles bool\n\n\tMinifyTemplates bool\n\n\tDatabaseAddr string\n\tDatabasePort string\n\tDatabaseName string\n\n\t\/\/ Maximum number of idle connections in the pool.\n\tDatabaseMaxIdle int\n\n\t\/\/ Maximum number of connections allocated by the pool at a given time.\n\t\/\/ When zero, there is no limit on the number of connections in the pool.\n\tDatabaseMaxOpen int\n\n\t\/\/ Close connections after remaining idle for this duration. If the value\n\t\/\/ is zero, then idle connections are not closed. Applications should set\n\t\/\/ the timeout to a value less than the server's timeout.\n\tDatabaseTimeout time.Duration\n\n\tGoPath string\n\tWorkingPath string\n\tTmpPath string\n\tSessionsDatabasePath string\n\n\tPublicPath string\n\tPagesPath string\n\tTemplatesPath string\n\tTranslationPath string\n\tDataPath string\n\n\tScssPath string\n\tCssPath string\n\n\tScssCmd string\n\tScssArgs []string\n\n\tBulldozerSourcePath string\n\tBulldozerTemplatesPath string\n\tBulldozerCoreTemplatesPath string\n\tBulldozerResourcesPath string\n\tBulldozerTranslationPath string\n\tBulldozerPrototypesPath string\n\n\t\/\/ The CookieHashKey is required, used to authenticate the cookie value using HMAC.\n\t\/\/ It is recommended to use a key with 32 or 64 bytes.\n\tCookieHashKey string\n\t\/\/ The CookieBlockKey is used to encrypt the cookie value.\n\t\/\/ The length must correspond to the block size of the encryption algorithm.\n\t\/\/ For AES, used by default, valid lengths are 16, 24, or 32 bytes to select AES-128, AES-192, or AES-256.\n\tCookieBlockKey string\n\n\t\/\/ The maximum session age in seconds\n\tSessionMaxAge int\n\n\t\/\/ The maximum allowed requests per minute before the IP is blocked\n\tFirewallMaxRequestsPerMinute int\n\t\/\/ Release the blocked remote address after x seconds\n\tFirewallReleaseBlockAfter int\n\n\t\/\/ This are the static stylesheets and javascripts which\n\t\/\/ will be always loaded.\n\t\/\/ Don't manipulate this slices after Bulldozer initialization!\n\tStaticJavaScripts []string\n\tStaticStyleSheets []string\n\n\t\/\/ Authentication stuff\n\tRegistrationDisabled bool\n\tPasswordEncryptionKey string\n\tRemoveNotConfirmedUsersTimeout int\n\n\t\/\/ Mail\n\tMailFrom string\n\tMailUsername string\n\tMailPassword string\n\tMailSMTPHost string\n\tMailSMTPPort int\n\tMailSkipCertificateVerify bool\n}\n\nfunc (s *settings) CookieHashKeyBytes() []byte {\n\treturn []byte(s.CookieHashKey)\n}\n\nfunc (s *settings) CookieBlockKeyBytes() []byte {\n\treturn []byte(s.CookieBlockKey)\n}\n<|endoftext|>"} {"text":"<commit_before>package wire\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"io\"\n)\n\n\/\/ Byte\n\nfunc WriteByte(b byte, w io.Writer, n *int, err *error) {\n\tWriteTo([]byte{b}, w, n, err)\n}\n\nfunc ReadByte(r io.Reader, n *int, err *error) byte {\n\tbuf := make([]byte, 1)\n\tReadFull(buf, r, n, err)\n\treturn buf[0]\n}\n\n\/\/ Int8\n\nfunc WriteInt8(i int8, w io.Writer, n *int, err *error) {\n\tWriteByte(byte(i), w, n, err)\n}\n\nfunc ReadInt8(r io.Reader, n *int, err *error) int8 {\n\treturn int8(ReadByte(r, n, err))\n}\n\n\/\/ Uint8\n\nfunc WriteUint8(i uint8, w io.Writer, n *int, err *error) {\n\tWriteByte(byte(i), w, n, err)\n}\n\nfunc ReadUint8(r io.Reader, n *int, err *error) uint8 {\n\treturn uint8(ReadByte(r, n, err))\n}\n\n\/\/ Int16\n\nfunc WriteInt16(i int16, w io.Writer, n *int, err *error) {\n\tbuf := make([]byte, 2)\n\tbinary.BigEndian.PutUint16(buf, uint16(i))\n\t*n += 2\n\tWriteTo(buf, w, n, err)\n}\n\nfunc ReadInt16(r io.Reader, n *int, err *error) int16 {\n\tbuf := make([]byte, 2)\n\tReadFull(buf, r, n, err)\n\treturn int16(binary.BigEndian.Uint16(buf))\n}\n\nfunc PutInt16(buf []byte, i int16) {\n\tbinary.BigEndian.PutUint16(buf, uint16(i))\n}\n\nfunc GetInt16(buf []byte) int16 {\n\treturn int16(binary.BigEndian.Uint16(buf))\n}\n\n\/\/ Uint16\n\nfunc WriteUint16(i uint16, w io.Writer, n *int, err *error) {\n\tbuf := make([]byte, 2)\n\tbinary.BigEndian.PutUint16(buf, uint16(i))\n\t*n += 2\n\tWriteTo(buf, w, n, err)\n}\n\nfunc ReadUint16(r io.Reader, n *int, err *error) uint16 {\n\tbuf := make([]byte, 2)\n\tReadFull(buf, r, n, err)\n\treturn uint16(binary.BigEndian.Uint16(buf))\n}\n\nfunc PutUint16(buf []byte, i uint16) {\n\tbinary.BigEndian.PutUint16(buf, i)\n}\n\nfunc GetUint16(buf []byte) uint16 {\n\treturn binary.BigEndian.Uint16(buf)\n}\n\n\/\/ []Uint16\n\nfunc WriteUint16s(iz []uint16, w io.Writer, n *int, err *error) {\n\tWriteUint32(uint32(len(iz)), w, n, err)\n\tfor _, i := range iz {\n\t\tWriteUint16(i, w, n, err)\n\t\tif *err != nil {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc ReadUint16s(r io.Reader, n *int, err *error) []uint16 {\n\tlength := ReadUint32(r, n, err)\n\tif *err != nil {\n\t\treturn nil\n\t}\n\tiz := make([]uint16, length)\n\tfor j := uint32(0); j < length; j++ {\n\t\tii := ReadUint16(r, n, err)\n\t\tif *err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tiz[j] = ii\n\t}\n\treturn iz\n}\n\n\/\/ Int32\n\nfunc WriteInt32(i int32, w io.Writer, n *int, err *error) {\n\tbuf := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(buf, uint32(i))\n\t*n += 4\n\tWriteTo(buf, w, n, err)\n}\n\nfunc ReadInt32(r io.Reader, n *int, err *error) int32 {\n\tbuf := make([]byte, 4)\n\tReadFull(buf, r, n, err)\n\treturn int32(binary.BigEndian.Uint32(buf))\n}\n\nfunc PutInt32(buf []byte, i int32) {\n\tbinary.BigEndian.PutUint32(buf, uint32(i))\n}\n\nfunc GetInt32(buf []byte) int32 {\n\treturn int32(binary.BigEndian.Uint32(buf))\n}\n\n\/\/ Uint32\n\nfunc WriteUint32(i uint32, w io.Writer, n *int, err *error) {\n\tbuf := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(buf, uint32(i))\n\t*n += 4\n\tWriteTo(buf, w, n, err)\n}\n\nfunc ReadUint32(r io.Reader, n *int, err *error) uint32 {\n\tbuf := make([]byte, 4)\n\tReadFull(buf, r, n, err)\n\treturn uint32(binary.BigEndian.Uint32(buf))\n}\n\nfunc PutUint32(buf []byte, i uint32) {\n\tbinary.BigEndian.PutUint32(buf, i)\n}\n\nfunc GetUint32(buf []byte) uint32 {\n\treturn binary.BigEndian.Uint32(buf)\n}\n\n\/\/ Int64\n\nfunc WriteInt64(i int64, w io.Writer, n *int, err *error) {\n\tbuf := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(buf, uint64(i))\n\t*n += 8\n\tWriteTo(buf, w, n, err)\n}\n\nfunc ReadInt64(r io.Reader, n *int, err *error) int64 {\n\tbuf := make([]byte, 8)\n\tReadFull(buf, r, n, err)\n\treturn int64(binary.BigEndian.Uint64(buf))\n}\n\nfunc PutInt64(buf []byte, i int64) {\n\tbinary.BigEndian.PutUint64(buf, uint64(i))\n}\n\nfunc GetInt64(buf []byte) int64 {\n\treturn int64(binary.BigEndian.Uint64(buf))\n}\n\n\/\/ Uint64\n\nfunc WriteUint64(i uint64, w io.Writer, n *int, err *error) {\n\tbuf := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(buf, uint64(i))\n\t*n += 8\n\tWriteTo(buf, w, n, err)\n}\n\nfunc ReadUint64(r io.Reader, n *int, err *error) uint64 {\n\tbuf := make([]byte, 8)\n\tReadFull(buf, r, n, err)\n\treturn uint64(binary.BigEndian.Uint64(buf))\n}\n\nfunc PutUint64(buf []byte, i uint64) {\n\tbinary.BigEndian.PutUint64(buf, i)\n}\n\nfunc GetUint64(buf []byte) uint64 {\n\treturn binary.BigEndian.Uint64(buf)\n}\n\n\/\/ Varint\n\nfunc uvarintSize(i uint64) int {\n\tif i == 0 {\n\t\treturn 0\n\t}\n\tif i < 1<<8 {\n\t\treturn 1\n\t}\n\tif i < 1<<16 {\n\t\treturn 2\n\t}\n\tif i < 1<<24 {\n\t\treturn 3\n\t}\n\tif i < 1<<32 {\n\t\treturn 4\n\t}\n\tif i < 1<<40 {\n\t\treturn 5\n\t}\n\tif i < 1<<48 {\n\t\treturn 6\n\t}\n\tif i < 1<<56 {\n\t\treturn 7\n\t}\n\treturn 8\n}\n\nfunc WriteVarint(i int, w io.Writer, n *int, err *error) {\n\tvar negate = false\n\tif i < 0 {\n\t\tnegate = true\n\t\ti = -i\n\t}\n\tvar size = uvarintSize(uint64(i))\n\tif negate {\n\t\t\/\/ e.g. 0xF1 for a single negative byte\n\t\tWriteUint8(uint8(size+0xF0), w, n, err)\n\t} else {\n\t\tWriteUint8(uint8(size), w, n, err)\n\t}\n\tif size > 0 {\n\t\tbuf := make([]byte, 8)\n\t\tbinary.BigEndian.PutUint64(buf, uint64(i))\n\t\tWriteTo(buf[(8-size):], w, n, err)\n\t}\n}\n\nfunc ReadVarint(r io.Reader, n *int, err *error) int {\n\tvar size = ReadUint8(r, n, err)\n\tvar negate = false\n\tif (size >> 4) == 0xF {\n\t\tnegate = true\n\t\tsize = size & 0x0F\n\t}\n\tif size > 8 {\n\t\tsetFirstErr(err, errors.New(\"Varint overflow\"))\n\t\treturn 0\n\t}\n\tif size == 0 {\n\t\tif negate {\n\t\t\tsetFirstErr(err, errors.New(\"Varint does not allow negative zero\"))\n\t\t}\n\t\treturn 0\n\t}\n\tbuf := make([]byte, 8)\n\tReadFull(buf[(8-size):], r, n, err)\n\tvar i = int(binary.BigEndian.Uint64(buf))\n\tif negate {\n\t\treturn -i\n\t} else {\n\t\treturn i\n\t}\n}\n\nfunc PutVarint(buf []byte, i int) (n int, err error) {\n\tvar negate = false\n\tif i < 0 {\n\t\tnegate = true\n\t\ti = -i\n\t}\n\tvar size = uvarintSize(uint64(i))\n\tif len(buf) < size+1 {\n\t\treturn 0, errors.New(\"Insufficient buffer length\")\n\t}\n\tif negate {\n\t\t\/\/ e.g. 0xF1 for a single negative byte\n\t\tbuf[0] = byte(size + 0xF0)\n\t} else {\n\t\tbuf[0] = byte(size)\n\t}\n\tif size > 0 {\n\t\tbuf2 := make([]byte, 8)\n\t\tbinary.BigEndian.PutUint64(buf2, uint64(i))\n\t\tcopy(buf[1:], buf2[(8-size):])\n\t}\n\treturn size + 1, nil\n}\n\nfunc GetVarint(buf []byte) (i int, n int, err error) {\n\tif len(buf) == 0 {\n\t\treturn 0, 0, errors.New(\"Insufficent buffer length\")\n\t}\n\tvar size = int(buf[0])\n\tvar negate = false\n\tif (size >> 4) == 0xF {\n\t\tnegate = true\n\t\tsize = size & 0x0F\n\t}\n\tif size > 8 {\n\t\treturn 0, 0, errors.New(\"Varint overflow\")\n\t}\n\tif size == 0 {\n\t\tif negate {\n\t\t\treturn 0, 0, errors.New(\"Varint does not allow negative zero\")\n\t\t}\n\t\treturn 0, 1, nil\n\t}\n\tif len(buf) < 1+size {\n\t\treturn 0, 0, errors.New(\"Insufficient buffer length\")\n\t}\n\tbuf2 := make([]byte, 8)\n\tcopy(buf2[(8-size):], buf[1:1+size])\n\ti = int(binary.BigEndian.Uint64(buf2))\n\tif negate {\n\t\treturn -i, size + 1, nil\n\t} else {\n\t\treturn i, size + 1, nil\n\t}\n}\n\n\/\/ Uvarint\n\nfunc WriteUvarint(i uint, w io.Writer, n *int, err *error) {\n\tvar size = uvarintSize(uint64(i))\n\tWriteUint8(uint8(size), w, n, err)\n\tif size > 0 {\n\t\tbuf := make([]byte, 8)\n\t\tbinary.BigEndian.PutUint64(buf, uint64(i))\n\t\tWriteTo(buf[(8-size):], w, n, err)\n\t}\n}\n\nfunc ReadUvarint(r io.Reader, n *int, err *error) uint {\n\tvar size = ReadUint8(r, n, err)\n\tif size > 8 {\n\t\tsetFirstErr(err, errors.New(\"Uvarint overflow\"))\n\t\treturn 0\n\t}\n\tif size == 0 {\n\t\treturn 0\n\t}\n\tbuf := make([]byte, 8)\n\tReadFull(buf[(8-size):], r, n, err)\n\treturn uint(binary.BigEndian.Uint64(buf))\n}\n\nfunc PutUvarint(buf []byte, i uint) (n int, err error) {\n\tvar size = uvarintSize(uint64(i))\n\tif len(buf) < size+1 {\n\t\treturn 0, errors.New(\"Insufficient buffer length\")\n\t}\n\tbuf[0] = byte(size)\n\tif size > 0 {\n\t\tbuf2 := make([]byte, 8)\n\t\tbinary.BigEndian.PutUint64(buf2, uint64(i))\n\t\tcopy(buf[1:], buf2[(8-size):])\n\t}\n\treturn size + 1, nil\n}\n\nfunc GetUvarint(buf []byte) (i uint, n int, err error) {\n\tif len(buf) == 0 {\n\t\treturn 0, 0, errors.New(\"Insufficent buffer length\")\n\t}\n\tvar size = int(buf[0])\n\tif size > 8 {\n\t\treturn 0, 0, errors.New(\"Uvarint overflow\")\n\t}\n\tif size == 0 {\n\t\treturn 0, 1, nil\n\t}\n\tif len(buf) < 1+size {\n\t\treturn 0, 0, errors.New(\"Insufficient buffer length\")\n\t}\n\tbuf2 := make([]byte, 8)\n\tcopy(buf2[(8-size):], buf[1:1+size])\n\ti = uint(binary.BigEndian.Uint64(buf2))\n\treturn i, size + 1, nil\n}\n\nfunc setFirstErr(err *error, newErr error) {\n\tif *err == nil && newErr != nil {\n\t\t*err = newErr\n\t}\n}\n<commit_msg>Expose UvarintSize<commit_after>package wire\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"io\"\n)\n\n\/\/ Byte\n\nfunc WriteByte(b byte, w io.Writer, n *int, err *error) {\n\tWriteTo([]byte{b}, w, n, err)\n}\n\nfunc ReadByte(r io.Reader, n *int, err *error) byte {\n\tbuf := make([]byte, 1)\n\tReadFull(buf, r, n, err)\n\treturn buf[0]\n}\n\n\/\/ Int8\n\nfunc WriteInt8(i int8, w io.Writer, n *int, err *error) {\n\tWriteByte(byte(i), w, n, err)\n}\n\nfunc ReadInt8(r io.Reader, n *int, err *error) int8 {\n\treturn int8(ReadByte(r, n, err))\n}\n\n\/\/ Uint8\n\nfunc WriteUint8(i uint8, w io.Writer, n *int, err *error) {\n\tWriteByte(byte(i), w, n, err)\n}\n\nfunc ReadUint8(r io.Reader, n *int, err *error) uint8 {\n\treturn uint8(ReadByte(r, n, err))\n}\n\n\/\/ Int16\n\nfunc WriteInt16(i int16, w io.Writer, n *int, err *error) {\n\tbuf := make([]byte, 2)\n\tbinary.BigEndian.PutUint16(buf, uint16(i))\n\t*n += 2\n\tWriteTo(buf, w, n, err)\n}\n\nfunc ReadInt16(r io.Reader, n *int, err *error) int16 {\n\tbuf := make([]byte, 2)\n\tReadFull(buf, r, n, err)\n\treturn int16(binary.BigEndian.Uint16(buf))\n}\n\nfunc PutInt16(buf []byte, i int16) {\n\tbinary.BigEndian.PutUint16(buf, uint16(i))\n}\n\nfunc GetInt16(buf []byte) int16 {\n\treturn int16(binary.BigEndian.Uint16(buf))\n}\n\n\/\/ Uint16\n\nfunc WriteUint16(i uint16, w io.Writer, n *int, err *error) {\n\tbuf := make([]byte, 2)\n\tbinary.BigEndian.PutUint16(buf, uint16(i))\n\t*n += 2\n\tWriteTo(buf, w, n, err)\n}\n\nfunc ReadUint16(r io.Reader, n *int, err *error) uint16 {\n\tbuf := make([]byte, 2)\n\tReadFull(buf, r, n, err)\n\treturn uint16(binary.BigEndian.Uint16(buf))\n}\n\nfunc PutUint16(buf []byte, i uint16) {\n\tbinary.BigEndian.PutUint16(buf, i)\n}\n\nfunc GetUint16(buf []byte) uint16 {\n\treturn binary.BigEndian.Uint16(buf)\n}\n\n\/\/ []Uint16\n\nfunc WriteUint16s(iz []uint16, w io.Writer, n *int, err *error) {\n\tWriteUint32(uint32(len(iz)), w, n, err)\n\tfor _, i := range iz {\n\t\tWriteUint16(i, w, n, err)\n\t\tif *err != nil {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc ReadUint16s(r io.Reader, n *int, err *error) []uint16 {\n\tlength := ReadUint32(r, n, err)\n\tif *err != nil {\n\t\treturn nil\n\t}\n\tiz := make([]uint16, length)\n\tfor j := uint32(0); j < length; j++ {\n\t\tii := ReadUint16(r, n, err)\n\t\tif *err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tiz[j] = ii\n\t}\n\treturn iz\n}\n\n\/\/ Int32\n\nfunc WriteInt32(i int32, w io.Writer, n *int, err *error) {\n\tbuf := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(buf, uint32(i))\n\t*n += 4\n\tWriteTo(buf, w, n, err)\n}\n\nfunc ReadInt32(r io.Reader, n *int, err *error) int32 {\n\tbuf := make([]byte, 4)\n\tReadFull(buf, r, n, err)\n\treturn int32(binary.BigEndian.Uint32(buf))\n}\n\nfunc PutInt32(buf []byte, i int32) {\n\tbinary.BigEndian.PutUint32(buf, uint32(i))\n}\n\nfunc GetInt32(buf []byte) int32 {\n\treturn int32(binary.BigEndian.Uint32(buf))\n}\n\n\/\/ Uint32\n\nfunc WriteUint32(i uint32, w io.Writer, n *int, err *error) {\n\tbuf := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(buf, uint32(i))\n\t*n += 4\n\tWriteTo(buf, w, n, err)\n}\n\nfunc ReadUint32(r io.Reader, n *int, err *error) uint32 {\n\tbuf := make([]byte, 4)\n\tReadFull(buf, r, n, err)\n\treturn uint32(binary.BigEndian.Uint32(buf))\n}\n\nfunc PutUint32(buf []byte, i uint32) {\n\tbinary.BigEndian.PutUint32(buf, i)\n}\n\nfunc GetUint32(buf []byte) uint32 {\n\treturn binary.BigEndian.Uint32(buf)\n}\n\n\/\/ Int64\n\nfunc WriteInt64(i int64, w io.Writer, n *int, err *error) {\n\tbuf := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(buf, uint64(i))\n\t*n += 8\n\tWriteTo(buf, w, n, err)\n}\n\nfunc ReadInt64(r io.Reader, n *int, err *error) int64 {\n\tbuf := make([]byte, 8)\n\tReadFull(buf, r, n, err)\n\treturn int64(binary.BigEndian.Uint64(buf))\n}\n\nfunc PutInt64(buf []byte, i int64) {\n\tbinary.BigEndian.PutUint64(buf, uint64(i))\n}\n\nfunc GetInt64(buf []byte) int64 {\n\treturn int64(binary.BigEndian.Uint64(buf))\n}\n\n\/\/ Uint64\n\nfunc WriteUint64(i uint64, w io.Writer, n *int, err *error) {\n\tbuf := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(buf, uint64(i))\n\t*n += 8\n\tWriteTo(buf, w, n, err)\n}\n\nfunc ReadUint64(r io.Reader, n *int, err *error) uint64 {\n\tbuf := make([]byte, 8)\n\tReadFull(buf, r, n, err)\n\treturn uint64(binary.BigEndian.Uint64(buf))\n}\n\nfunc PutUint64(buf []byte, i uint64) {\n\tbinary.BigEndian.PutUint64(buf, i)\n}\n\nfunc GetUint64(buf []byte) uint64 {\n\treturn binary.BigEndian.Uint64(buf)\n}\n\n\/\/ Varint\n\nfunc UvarintSize(i uint64) int {\n\tif i == 0 {\n\t\treturn 0\n\t}\n\tif i < 1<<8 {\n\t\treturn 1\n\t}\n\tif i < 1<<16 {\n\t\treturn 2\n\t}\n\tif i < 1<<24 {\n\t\treturn 3\n\t}\n\tif i < 1<<32 {\n\t\treturn 4\n\t}\n\tif i < 1<<40 {\n\t\treturn 5\n\t}\n\tif i < 1<<48 {\n\t\treturn 6\n\t}\n\tif i < 1<<56 {\n\t\treturn 7\n\t}\n\treturn 8\n}\n\nfunc WriteVarint(i int, w io.Writer, n *int, err *error) {\n\tvar negate = false\n\tif i < 0 {\n\t\tnegate = true\n\t\ti = -i\n\t}\n\tvar size = UvarintSize(uint64(i))\n\tif negate {\n\t\t\/\/ e.g. 0xF1 for a single negative byte\n\t\tWriteUint8(uint8(size+0xF0), w, n, err)\n\t} else {\n\t\tWriteUint8(uint8(size), w, n, err)\n\t}\n\tif size > 0 {\n\t\tbuf := make([]byte, 8)\n\t\tbinary.BigEndian.PutUint64(buf, uint64(i))\n\t\tWriteTo(buf[(8-size):], w, n, err)\n\t}\n}\n\nfunc ReadVarint(r io.Reader, n *int, err *error) int {\n\tvar size = ReadUint8(r, n, err)\n\tvar negate = false\n\tif (size >> 4) == 0xF {\n\t\tnegate = true\n\t\tsize = size & 0x0F\n\t}\n\tif size > 8 {\n\t\tsetFirstErr(err, errors.New(\"Varint overflow\"))\n\t\treturn 0\n\t}\n\tif size == 0 {\n\t\tif negate {\n\t\t\tsetFirstErr(err, errors.New(\"Varint does not allow negative zero\"))\n\t\t}\n\t\treturn 0\n\t}\n\tbuf := make([]byte, 8)\n\tReadFull(buf[(8-size):], r, n, err)\n\tvar i = int(binary.BigEndian.Uint64(buf))\n\tif negate {\n\t\treturn -i\n\t} else {\n\t\treturn i\n\t}\n}\n\nfunc PutVarint(buf []byte, i int) (n int, err error) {\n\tvar negate = false\n\tif i < 0 {\n\t\tnegate = true\n\t\ti = -i\n\t}\n\tvar size = UvarintSize(uint64(i))\n\tif len(buf) < size+1 {\n\t\treturn 0, errors.New(\"Insufficient buffer length\")\n\t}\n\tif negate {\n\t\t\/\/ e.g. 0xF1 for a single negative byte\n\t\tbuf[0] = byte(size + 0xF0)\n\t} else {\n\t\tbuf[0] = byte(size)\n\t}\n\tif size > 0 {\n\t\tbuf2 := make([]byte, 8)\n\t\tbinary.BigEndian.PutUint64(buf2, uint64(i))\n\t\tcopy(buf[1:], buf2[(8-size):])\n\t}\n\treturn size + 1, nil\n}\n\nfunc GetVarint(buf []byte) (i int, n int, err error) {\n\tif len(buf) == 0 {\n\t\treturn 0, 0, errors.New(\"Insufficent buffer length\")\n\t}\n\tvar size = int(buf[0])\n\tvar negate = false\n\tif (size >> 4) == 0xF {\n\t\tnegate = true\n\t\tsize = size & 0x0F\n\t}\n\tif size > 8 {\n\t\treturn 0, 0, errors.New(\"Varint overflow\")\n\t}\n\tif size == 0 {\n\t\tif negate {\n\t\t\treturn 0, 0, errors.New(\"Varint does not allow negative zero\")\n\t\t}\n\t\treturn 0, 1, nil\n\t}\n\tif len(buf) < 1+size {\n\t\treturn 0, 0, errors.New(\"Insufficient buffer length\")\n\t}\n\tbuf2 := make([]byte, 8)\n\tcopy(buf2[(8-size):], buf[1:1+size])\n\ti = int(binary.BigEndian.Uint64(buf2))\n\tif negate {\n\t\treturn -i, size + 1, nil\n\t} else {\n\t\treturn i, size + 1, nil\n\t}\n}\n\n\/\/ Uvarint\n\nfunc WriteUvarint(i uint, w io.Writer, n *int, err *error) {\n\tvar size = UvarintSize(uint64(i))\n\tWriteUint8(uint8(size), w, n, err)\n\tif size > 0 {\n\t\tbuf := make([]byte, 8)\n\t\tbinary.BigEndian.PutUint64(buf, uint64(i))\n\t\tWriteTo(buf[(8-size):], w, n, err)\n\t}\n}\n\nfunc ReadUvarint(r io.Reader, n *int, err *error) uint {\n\tvar size = ReadUint8(r, n, err)\n\tif size > 8 {\n\t\tsetFirstErr(err, errors.New(\"Uvarint overflow\"))\n\t\treturn 0\n\t}\n\tif size == 0 {\n\t\treturn 0\n\t}\n\tbuf := make([]byte, 8)\n\tReadFull(buf[(8-size):], r, n, err)\n\treturn uint(binary.BigEndian.Uint64(buf))\n}\n\nfunc PutUvarint(buf []byte, i uint) (n int, err error) {\n\tvar size = UvarintSize(uint64(i))\n\tif len(buf) < size+1 {\n\t\treturn 0, errors.New(\"Insufficient buffer length\")\n\t}\n\tbuf[0] = byte(size)\n\tif size > 0 {\n\t\tbuf2 := make([]byte, 8)\n\t\tbinary.BigEndian.PutUint64(buf2, uint64(i))\n\t\tcopy(buf[1:], buf2[(8-size):])\n\t}\n\treturn size + 1, nil\n}\n\nfunc GetUvarint(buf []byte) (i uint, n int, err error) {\n\tif len(buf) == 0 {\n\t\treturn 0, 0, errors.New(\"Insufficent buffer length\")\n\t}\n\tvar size = int(buf[0])\n\tif size > 8 {\n\t\treturn 0, 0, errors.New(\"Uvarint overflow\")\n\t}\n\tif size == 0 {\n\t\treturn 0, 1, nil\n\t}\n\tif len(buf) < 1+size {\n\t\treturn 0, 0, errors.New(\"Insufficient buffer length\")\n\t}\n\tbuf2 := make([]byte, 8)\n\tcopy(buf2[(8-size):], buf[1:1+size])\n\ti = uint(binary.BigEndian.Uint64(buf2))\n\treturn i, size + 1, nil\n}\n\nfunc setFirstErr(err *error, newErr error) {\n\tif *err == nil && newErr != nil {\n\t\t*err = newErr\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\n\/\/ ServerEnvironment represents the read-only environment fields of a LXD server\ntype ServerEnvironment struct {\n\t\/\/ List of addresses the server is listening on\n\t\/\/ Example: [\":8443\"]\n\tAddresses []string `json:\"addresses\" yaml:\"addresses\"`\n\n\t\/\/ List of architectures supported by the server\n\t\/\/ Example: [\"x86_64\", \"i686\"]\n\tArchitectures []string `json:\"architectures\" yaml:\"architectures\"`\n\n\t\/\/ Server certificate as PEM encoded X509\n\t\/\/ Example: X509 PEM certificate\n\tCertificate string `json:\"certificate\" yaml:\"certificate\"`\n\n\t\/\/ Server certificate fingerprint as SHA256\n\t\/\/ Example: fd200419b271f1dc2a5591b693cc5774b7f234e1ff8c6b78ad703b6888fe2b69\n\tCertificateFingerprint string `json:\"certificate_fingerprint\" yaml:\"certificate_fingerprint\"`\n\n\t\/\/ List of supported instance drivers (separate by \" | \")\n\t\/\/ Example: lxc | qemu\n\tDriver string `json:\"driver\" yaml:\"driver\"`\n\n\t\/\/ List of supported instance driver versions (separate by \" | \")\n\t\/\/ Example: 4.0.7 | 5.2.0\n\tDriverVersion string `json:\"driver_version\" yaml:\"driver_version\"`\n\n\t\/\/ Current firewall driver\n\t\/\/ Example: nftables\n\t\/\/\n\t\/\/ API extension: firewall_driver\n\tFirewall string `json:\"firewall\" yaml:\"firewall\"`\n\n\t\/\/ OS kernel name\n\t\/\/ Example: Linux\n\tKernel string `json:\"kernel\" yaml:\"kernel\"`\n\n\t\/\/ OS kernel architecture\n\t\/\/ Example: x86_64\n\tKernelArchitecture string `json:\"kernel_architecture\" yaml:\"kernel_architecture\"`\n\n\t\/\/ Map of kernel features that were tested on startup\n\t\/\/ Example: {\"netnsid_getifaddrs\": \"true\", \"seccomp_listener\": \"true\"}\n\t\/\/\n\t\/\/ API extension: kernel_features\n\tKernelFeatures map[string]string `json:\"kernel_features\" yaml:\"kernel_features\"`\n\n\t\/\/ Kernel version\n\t\/\/ Example: 5.4.0-36-generic\n\tKernelVersion string `json:\"kernel_version\" yaml:\"kernel_version\"`\n\n\t\/\/ Map of LXC features that were tested on startup\n\t\/\/ Example: {\"cgroup2\": \"true\", \"devpts_fd\": \"true\", \"pidfd\": \"true\"}\n\t\/\/\n\t\/\/ API extension: lxc_features\n\tLXCFeatures map[string]string `json:\"lxc_features\" yaml:\"lxc_features\"`\n\n\t\/\/ Name of the operating system (Linux distribution)\n\t\/\/ Example: Ubuntu\n\t\/\/\n\t\/\/ API extension: api_os\n\tOSName string `json:\"os_name\" yaml:\"os_name\"`\n\n\t\/\/ Version of the operating system (Linux distribution)\n\t\/\/ Example: 20.04\n\t\/\/\n\t\/\/ API extension: api_os\n\tOSVersion string `json:\"os_version\" yaml:\"os_version\"`\n\n\t\/\/ Current project name\n\t\/\/ Example: default\n\t\/\/\n\t\/\/ API extension: projects\n\tProject string `json:\"project\" yaml:\"project\"`\n\n\t\/\/ Server implementation name\n\t\/\/ Example: lxd\n\tServer string `json:\"server\" yaml:\"server\"`\n\n\t\/\/ Whether the server is part of a cluster\n\t\/\/ Example: false\n\t\/\/\n\t\/\/ API extension: clustering\n\tServerClustered bool `json:\"server_clustered\" yaml:\"server_clustered\"`\n\n\t\/\/ Server hostname\n\t\/\/ Example: castiana\n\t\/\/\n\t\/\/ API extension: clustering\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n\n\t\/\/ PID of the LXD process\n\t\/\/ Example: 1453969\n\tServerPid int `json:\"server_pid\" yaml:\"server_pid\"`\n\n\t\/\/ Server version\n\t\/\/ Example: 4.11\n\tServerVersion string `json:\"server_version\" yaml:\"server_version\"`\n\n\t\/\/ List of active storage drivers (separate by \" | \")\n\t\/\/ Example: dir | zfs\n\tStorage string `json:\"storage\" yaml:\"storage\"`\n\n\t\/\/ List of active storage driver versions (separate by \" | \")\n\t\/\/ Example: 1 | 0.8.4-1ubuntu11\n\tStorageVersion string `json:\"storage_version\" yaml:\"storage_version\"`\n}\n\n\/\/ ServerPut represents the modifiable fields of a LXD server configuration\n\/\/\n\/\/ swagger:model\ntype ServerPut struct {\n\t\/\/ Server configuration map (refer to doc\/server.md)\n\t\/\/ Example: {\"core.https_address\": \":8443\", \"core.trust_password\": true}\n\tConfig map[string]interface{} `json:\"config\" yaml:\"config\"`\n}\n\n\/\/ ServerUntrusted represents a LXD server for an untrusted client\n\/\/\n\/\/ swagger:model\ntype ServerUntrusted struct {\n\t\/\/ List of supported API extensions\n\t\/\/ Read only: true\n\t\/\/ Example: [\"etag\", \"patch\", \"network\", \"storage\"]\n\tAPIExtensions []string `json:\"api_extensions\" yaml:\"api_extensions\"`\n\n\t\/\/ Support status of the current API (one of \"devel\", \"stable\" or \"deprecated\")\n\t\/\/ Read only: true\n\t\/\/ Example: stable\n\tAPIStatus string `json:\"api_status\" yaml:\"api_status\"`\n\n\t\/\/ API version number\n\t\/\/ Read only: true\n\t\/\/ Example: 1.0\n\tAPIVersion string `json:\"api_version\" yaml:\"api_version\"`\n\n\t\/\/ Whether the client is trusted (one of \"trusted\" or \"untrusted\")\n\t\/\/ Read only: true\n\t\/\/ Example: untrusted\n\tAuth string `json:\"auth\" yaml:\"auth\"`\n\n\t\/\/ Whether the server is public-only (only public endpoints are implemented)\n\t\/\/ Read only: true\n\t\/\/ Example: false\n\tPublic bool `json:\"public\" yaml:\"public\"`\n\n\t\/\/ List of supported authentication methods\n\t\/\/ Read only: true\n\t\/\/ Example: [\"tls\", \"candid\"]\n\t\/\/\n\t\/\/ API extension: macaroon_authentication\n\tAuthMethods []string `json:\"auth_methods\" yaml:\"auth_methods\"`\n}\n\n\/\/ Server represents a LXD server\n\/\/\n\/\/ swagger:model\ntype Server struct {\n\tServerPut `yaml:\",inline\"`\n\tServerUntrusted `yaml:\",inline\"`\n\n\t\/\/ Read-only status\/configuration information\n\t\/\/ Read only: true\n\tEnvironment ServerEnvironment `json:\"environment\" yaml:\"environment\"`\n}\n\n\/\/ Writable converts a full Server struct into a ServerPut struct (filters read-only fields)\nfunc (srv *Server) Writable() ServerPut {\n\treturn srv.ServerPut\n}\n<commit_msg>shared\/api\/server: Adds ServerStorageDriverInfo and adds StorageSupportedDrivers field to ServerEnvironment<commit_after>package api\n\n\/\/ ServerEnvironment represents the read-only environment fields of a LXD server\ntype ServerEnvironment struct {\n\t\/\/ List of addresses the server is listening on\n\t\/\/ Example: [\":8443\"]\n\tAddresses []string `json:\"addresses\" yaml:\"addresses\"`\n\n\t\/\/ List of architectures supported by the server\n\t\/\/ Example: [\"x86_64\", \"i686\"]\n\tArchitectures []string `json:\"architectures\" yaml:\"architectures\"`\n\n\t\/\/ Server certificate as PEM encoded X509\n\t\/\/ Example: X509 PEM certificate\n\tCertificate string `json:\"certificate\" yaml:\"certificate\"`\n\n\t\/\/ Server certificate fingerprint as SHA256\n\t\/\/ Example: fd200419b271f1dc2a5591b693cc5774b7f234e1ff8c6b78ad703b6888fe2b69\n\tCertificateFingerprint string `json:\"certificate_fingerprint\" yaml:\"certificate_fingerprint\"`\n\n\t\/\/ List of supported instance drivers (separate by \" | \")\n\t\/\/ Example: lxc | qemu\n\tDriver string `json:\"driver\" yaml:\"driver\"`\n\n\t\/\/ List of supported instance driver versions (separate by \" | \")\n\t\/\/ Example: 4.0.7 | 5.2.0\n\tDriverVersion string `json:\"driver_version\" yaml:\"driver_version\"`\n\n\t\/\/ Current firewall driver\n\t\/\/ Example: nftables\n\t\/\/\n\t\/\/ API extension: firewall_driver\n\tFirewall string `json:\"firewall\" yaml:\"firewall\"`\n\n\t\/\/ OS kernel name\n\t\/\/ Example: Linux\n\tKernel string `json:\"kernel\" yaml:\"kernel\"`\n\n\t\/\/ OS kernel architecture\n\t\/\/ Example: x86_64\n\tKernelArchitecture string `json:\"kernel_architecture\" yaml:\"kernel_architecture\"`\n\n\t\/\/ Map of kernel features that were tested on startup\n\t\/\/ Example: {\"netnsid_getifaddrs\": \"true\", \"seccomp_listener\": \"true\"}\n\t\/\/\n\t\/\/ API extension: kernel_features\n\tKernelFeatures map[string]string `json:\"kernel_features\" yaml:\"kernel_features\"`\n\n\t\/\/ Kernel version\n\t\/\/ Example: 5.4.0-36-generic\n\tKernelVersion string `json:\"kernel_version\" yaml:\"kernel_version\"`\n\n\t\/\/ Map of LXC features that were tested on startup\n\t\/\/ Example: {\"cgroup2\": \"true\", \"devpts_fd\": \"true\", \"pidfd\": \"true\"}\n\t\/\/\n\t\/\/ API extension: lxc_features\n\tLXCFeatures map[string]string `json:\"lxc_features\" yaml:\"lxc_features\"`\n\n\t\/\/ Name of the operating system (Linux distribution)\n\t\/\/ Example: Ubuntu\n\t\/\/\n\t\/\/ API extension: api_os\n\tOSName string `json:\"os_name\" yaml:\"os_name\"`\n\n\t\/\/ Version of the operating system (Linux distribution)\n\t\/\/ Example: 20.04\n\t\/\/\n\t\/\/ API extension: api_os\n\tOSVersion string `json:\"os_version\" yaml:\"os_version\"`\n\n\t\/\/ Current project name\n\t\/\/ Example: default\n\t\/\/\n\t\/\/ API extension: projects\n\tProject string `json:\"project\" yaml:\"project\"`\n\n\t\/\/ Server implementation name\n\t\/\/ Example: lxd\n\tServer string `json:\"server\" yaml:\"server\"`\n\n\t\/\/ Whether the server is part of a cluster\n\t\/\/ Example: false\n\t\/\/\n\t\/\/ API extension: clustering\n\tServerClustered bool `json:\"server_clustered\" yaml:\"server_clustered\"`\n\n\t\/\/ Server hostname\n\t\/\/ Example: castiana\n\t\/\/\n\t\/\/ API extension: clustering\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n\n\t\/\/ PID of the LXD process\n\t\/\/ Example: 1453969\n\tServerPid int `json:\"server_pid\" yaml:\"server_pid\"`\n\n\t\/\/ Server version\n\t\/\/ Example: 4.11\n\tServerVersion string `json:\"server_version\" yaml:\"server_version\"`\n\n\t\/\/ List of active storage drivers (separate by \" | \")\n\t\/\/ Example: dir | zfs\n\tStorage string `json:\"storage\" yaml:\"storage\"`\n\n\t\/\/ List of active storage driver versions (separate by \" | \")\n\t\/\/ Example: 1 | 0.8.4-1ubuntu11\n\tStorageVersion string `json:\"storage_version\" yaml:\"storage_version\"`\n\n\t\/\/ List of supported storage drivers\n\tStorageSupportedDrivers []ServerStorageDriverInfo `json:\"storage_supported_drivers\" yaml:\"storage_supported_drivers\"`\n}\n\n\/\/ ServerStorageDriverInfo represents the read-only info about a storage driver\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: server_supported_storage_drivers\ntype ServerStorageDriverInfo struct {\n\t\/\/ Name of the driver\n\t\/\/ Example: zfs\n\t\/\/\n\t\/\/ API extension: server_supported_storage_drivers\n\tName string\n\n\t\/\/ Version of the driver\n\t\/\/ Example: 0.8.4-1ubuntu11\n\t\/\/\n\t\/\/ API extension: server_supported_storage_drivers\n\tVersion string\n\n\t\/\/ Whether the driver has remote volumes\n\t\/\/ Example: false\n\t\/\/\n\t\/\/ API extension: server_supported_storage_drivers\n\tRemote bool\n}\n\n\/\/ ServerPut represents the modifiable fields of a LXD server configuration\n\/\/\n\/\/ swagger:model\ntype ServerPut struct {\n\t\/\/ Server configuration map (refer to doc\/server.md)\n\t\/\/ Example: {\"core.https_address\": \":8443\", \"core.trust_password\": true}\n\tConfig map[string]interface{} `json:\"config\" yaml:\"config\"`\n}\n\n\/\/ ServerUntrusted represents a LXD server for an untrusted client\n\/\/\n\/\/ swagger:model\ntype ServerUntrusted struct {\n\t\/\/ List of supported API extensions\n\t\/\/ Read only: true\n\t\/\/ Example: [\"etag\", \"patch\", \"network\", \"storage\"]\n\tAPIExtensions []string `json:\"api_extensions\" yaml:\"api_extensions\"`\n\n\t\/\/ Support status of the current API (one of \"devel\", \"stable\" or \"deprecated\")\n\t\/\/ Read only: true\n\t\/\/ Example: stable\n\tAPIStatus string `json:\"api_status\" yaml:\"api_status\"`\n\n\t\/\/ API version number\n\t\/\/ Read only: true\n\t\/\/ Example: 1.0\n\tAPIVersion string `json:\"api_version\" yaml:\"api_version\"`\n\n\t\/\/ Whether the client is trusted (one of \"trusted\" or \"untrusted\")\n\t\/\/ Read only: true\n\t\/\/ Example: untrusted\n\tAuth string `json:\"auth\" yaml:\"auth\"`\n\n\t\/\/ Whether the server is public-only (only public endpoints are implemented)\n\t\/\/ Read only: true\n\t\/\/ Example: false\n\tPublic bool `json:\"public\" yaml:\"public\"`\n\n\t\/\/ List of supported authentication methods\n\t\/\/ Read only: true\n\t\/\/ Example: [\"tls\", \"candid\"]\n\t\/\/\n\t\/\/ API extension: macaroon_authentication\n\tAuthMethods []string `json:\"auth_methods\" yaml:\"auth_methods\"`\n}\n\n\/\/ Server represents a LXD server\n\/\/\n\/\/ swagger:model\ntype Server struct {\n\tServerPut `yaml:\",inline\"`\n\tServerUntrusted `yaml:\",inline\"`\n\n\t\/\/ Read-only status\/configuration information\n\t\/\/ Read only: true\n\tEnvironment ServerEnvironment `json:\"environment\" yaml:\"environment\"`\n}\n\n\/\/ Writable converts a full Server struct into a ServerPut struct (filters read-only fields)\nfunc (srv *Server) Writable() ServerPut {\n\treturn srv.ServerPut\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"bufio\"\n\t\"regexp\"\n\t\"net\"\n)\n\ntype Irc struct {\n\tReader *bufio.Reader\n\tWriter *bufio.Writer\n\tR chan string\n\tW chan string\n}\n\ntype Bot struct {\n\tNick, Name string\n\tEvents chan string\n\tConn *Irc\n}\n\nfunc Connect(server string) *Bot {\n\tconn, err := net.Dial(\"tcp\", server)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treader := bufio.NewReader(conn)\n\twriter := bufio.NewWriter(conn)\n\tin := make(chan string, 1000)\n\tout := make(chan string, 1000)\n\tevents := make(chan string, 1000)\n\tirc := &Irc{reader, writer, in, out}\n\tbot := &Bot{\"GoBot\", \"GoBot\", events, irc}\n\n\tgo func() {\n\t\tfor {\n\t\t\tln, err := bot.Conn.Reader.ReadString(byte('\\n'))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\tbot.Conn.R <- ln\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor {\n\t\t\tstr := <-bot.Conn.W\n\t\t\t_, err := bot.Conn.Writer.WriteString(str + \"\\r\\n\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\n\t\t\twriter.Flush()\n\t\t}\n\t}()\n\n\treturn bot\n}\n\nfunc (b *Bot) Join(ch string) {\n\tb.Conn.W <- \"JOIN \" + ch\n}\n\nfunc (b *Bot) Login(nick, name string) {\n\tb.setNickName(nick, name)\n\tb.Conn.W <- \"NICK \" + b.Nick\n\tb.Conn.W <- \"USER \" + b.Nick + \" 0 * :\" + b.Name\n}\n\nfunc (b *Bot) setNickName(nick, name string) {\n\tb.Nick = nick\n\tb.Name = name\n}\n\nfunc (b *Bot) RunLoop() {\n\tfor {\n\t\tln := <-b.Conn.R\n\t\tfmt.Print(ln)\n\t\tif regexp.MustCompile(\"^PING\").Match([]byte(ln)) {\n\t\t\tb.Conn.W <- \"PONG \" + regexp.MustCompile(\":.*\").FindString(ln)\n\t\t}\n\t}\n}\n<commit_msg>rename package back to irg<commit_after>package irg\n\nimport (\n\t\"fmt\"\n\t\"bufio\"\n\t\"regexp\"\n\t\"net\"\n)\n\ntype Irc struct {\n\tReader *bufio.Reader\n\tWriter *bufio.Writer\n\tR chan string\n\tW chan string\n}\n\ntype Bot struct {\n\tNick, Name string\n\tEvents chan string\n\tConn *Irc\n}\n\nfunc Connect(server string) *Bot {\n\tconn, err := net.Dial(\"tcp\", server)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treader := bufio.NewReader(conn)\n\twriter := bufio.NewWriter(conn)\n\tin := make(chan string, 1000)\n\tout := make(chan string, 1000)\n\tevents := make(chan string, 1000)\n\tirc := &Irc{reader, writer, in, out}\n\tbot := &Bot{\"GoBot\", \"GoBot\", events, irc}\n\n\tgo func() {\n\t\tfor {\n\t\t\tln, err := bot.Conn.Reader.ReadString(byte('\\n'))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\tbot.Conn.R <- ln\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor {\n\t\t\tstr := <-bot.Conn.W\n\t\t\t_, err := bot.Conn.Writer.WriteString(str + \"\\r\\n\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\n\t\t\twriter.Flush()\n\t\t}\n\t}()\n\n\treturn bot\n}\n\nfunc (b *Bot) Join(ch string) {\n\tb.Conn.W <- \"JOIN \" + ch\n}\n\nfunc (b *Bot) Login(nick, name string) {\n\tb.setNickName(nick, name)\n\tb.Conn.W <- \"NICK \" + b.Nick\n\tb.Conn.W <- \"USER \" + b.Nick + \" 0 * :\" + b.Name\n}\n\nfunc (b *Bot) setNickName(nick, name string) {\n\tb.Nick = nick\n\tb.Name = name\n}\n\nfunc (b *Bot) RunLoop() {\n\tfor {\n\t\tln := <-b.Conn.R\n\t\tfmt.Print(ln)\n\t\tif regexp.MustCompile(\"^PING\").Match([]byte(ln)) {\n\t\t\tb.Conn.W <- \"PONG \" + regexp.MustCompile(\":.*\").FindString(ln)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package utils\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\n\t\"github.com\/onsi\/ginkgo\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tstoragev1 \"k8s.io\/api\/storage\/v1\"\n\tapiequality \"k8s.io\/apimachinery\/pkg\/api\/equality\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/client\"\n\n\tcdiv1 \"kubevirt.io\/containerized-data-importer-api\/pkg\/apis\/core\/v1beta1\"\n\t\"kubevirt.io\/containerized-data-importer\/pkg\/controller\"\n)\n\n\/\/ cdi-file-host pod\/service relative values\nconst (\n\t\/\/RegistryHostName provides a deploymnet and service name for registry\n\tRegistryHostName = \"cdi-docker-registry-host\"\n\t\/\/ FileHostName provides a deployment and service name for tests\n\tFileHostName = \"cdi-file-host\"\n\t\/\/ FileHostS3Bucket provides an S3 bucket name for tests (e.g. http:\/\/<serviceIP:port>\/FileHostS3Bucket\/image)\n\tFileHostS3Bucket = \"images\"\n\t\/\/ AccessKeyValue provides a username to use for http and S3 (see hack\/build\/docker\/cdi-func-test-file-host-http\/htpasswd)\n\tAccessKeyValue = \"admin\"\n\t\/\/ SecretKeyValue provides a password to use for http and S3 (see hack\/build\/docker\/cdi-func-test-file-host-http\/htpasswd)\n\tSecretKeyValue = \"password\"\n\t\/\/ HttpAuthPort provides a cdi-file-host service auth port for tests\n\tHTTPAuthPort = 81\n\t\/\/ HttpNoAuthPort provides a cdi-file-host service no-auth port for tests, requires AccessKeyValue and SecretKeyValue\n\tHTTPNoAuthPort = 80\n\t\/\/ HTTPRateLimitPort provides a cdi-file-host service rate limit port for tests, speed is limited to 25k\/s to allow for testing slow connection behavior. No auth.\n\tHTTPRateLimitPort = 82\n\t\/\/ S3Port provides a cdi-file-host service S3 port, requires AccessKey and SecretKeyValue\n\tS3Port = 9000\n\t\/\/ HTTPSPort is the https port of cdi-file-host\n\tHTTPSNoAuthPort = 443\n\t\/\/ RegistryCertConfigMap is the ConfigMap where the cert for the docker registry is stored\n\tRegistryCertConfigMap = \"cdi-docker-registry-host-certs\"\n\t\/\/ FileHostCertConfigMap is the ConfigMap where the cert fir the file host is stored\n\tFileHostCertConfigMap = \"cdi-file-host-certs\"\n\t\/\/ ImageIOCertConfigMap is the ConfigMap where the cert fir the file host is stored\n\tImageIOCertConfigMap = \"imageio-certs\"\n)\n\nvar (\n\t\/\/ DefaultStorageClass the default storage class used in tests\n\tDefaultStorageClass *storagev1.StorageClass\n\t\/\/ DefaultStorageClassCsiDriver the default storage class CSI driver if it exists.\n\tDefaultStorageClassCsiDriver *storagev1.CSIDriver\n\n\t\/\/ NfsService is the service in the cdi namespace that will be created if KUBEVIRT_STORAGE=nfs\n\tNfsService *corev1.Service\n\tnfsChecked bool\n\t\/\/ DefaultStorageCSIRespectsFsGroup is true if the default storage class is CSI and respects fsGroup, false other wise.\n\tDefaultStorageCSIRespectsFsGroup bool\n)\n\n\/\/ GetDefaultStorageClass return the storage class which is marked as default in the cluster\nfunc GetDefaultStorageClass(client *kubernetes.Clientset) *storagev1.StorageClass {\n\tstorageclasses, err := client.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{})\n\tif err != nil {\n\t\tginkgo.Fail(\"Unable to list storage classes\")\n\t\treturn nil\n\t}\n\tfor _, storageClass := range storageclasses.Items {\n\t\tif storageClass.Annotations[\"storageclass.kubernetes.io\/is-default-class\"] == \"true\" {\n\t\t\treturn &storageClass\n\t\t}\n\t}\n\tginkgo.Fail(\"Unable to find default storage classes\")\n\treturn nil\n}\n\nfunc getDefaultStorageClassCsiDriver(client *kubernetes.Clientset) *storagev1.CSIDriver {\n\tif DefaultStorageClass != nil {\n\t\tcsidrivers, err := client.StorageV1().CSIDrivers().List(context.TODO(), metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\tginkgo.Fail(fmt.Sprintf(\"Unable to get csi driver: %v\", err))\n\t\t}\n\t\tfor _, driver := range csidrivers.Items {\n\t\t\tif driver.Name == DefaultStorageClass.Provisioner {\n\t\t\t\treturn &driver\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc isDefaultStorageClassCSIRespectsFsGroup() bool {\n\treturn DefaultStorageClassCsiDriver != nil && DefaultStorageClassCsiDriver.Spec.FSGroupPolicy != nil && *DefaultStorageClassCsiDriver.Spec.FSGroupPolicy != storagev1.NoneFSGroupPolicy\n}\n\n\/\/ IsHostpathProvisioner returns true if hostpath-provisioner is the default storage class\nfunc IsHostpathProvisioner() bool {\n\tif DefaultStorageClass == nil {\n\t\treturn false\n\t}\n\treturn DefaultStorageClass.Provisioner == \"kubevirt.io\/hostpath-provisioner\"\n}\n\n\/\/ GetTestNamespaceList returns a list of namespaces that have been created by the functional tests.\nfunc GetTestNamespaceList(client *kubernetes.Clientset, nsPrefix string) (*corev1.NamespaceList, error) {\n\t\/\/Ensure that no namespaces with the prefix label exist\n\treturn client.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{\n\t\tLabelSelector: nsPrefix,\n\t})\n}\n\n\/\/ CacheTestsData fetch and cache data required for tests\nfunc CacheTestsData(client *kubernetes.Clientset, cdiNs string) {\n\tif DefaultStorageClass == nil {\n\t\tDefaultStorageClass = GetDefaultStorageClass(client)\n\t}\n\tDefaultStorageClassCsiDriver = getDefaultStorageClassCsiDriver(client)\n\tDefaultStorageCSIRespectsFsGroup = isDefaultStorageClassCSIRespectsFsGroup()\n\n\tif !nfsChecked {\n\t\tNfsService = getNfsService(client, cdiNs)\n\t\tnfsChecked = true\n\t}\n}\n\nfunc getNfsService(client *kubernetes.Clientset, cdiNs string) *corev1.Service {\n\tservice, err := client.CoreV1().Services(cdiNs).Get(context.TODO(), \"nfs-service\", metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn service\n}\n\n\/\/ IsNfs returns true if the default storage class is the static nfs storage class with no provisioner\nfunc IsNfs() bool {\n\tif NfsService == nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/ UpdateCDIConfigWithOptions updates CDIConfig with specific UpdateOptions\nfunc UpdateCDIConfigWithOptions(c client.Client, opts metav1.UpdateOptions, updateFunc func(*cdiv1.CDIConfigSpec)) error {\n\tcdi, err := controller.GetActiveCDI(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cdi.Spec.Config == nil {\n\t\tcdi.Spec.Config = &cdiv1.CDIConfigSpec{}\n\t}\n\n\tupdateFunc(cdi.Spec.Config)\n\n\tif err = c.Update(context.TODO(), cdi, &client.UpdateOptions{Raw: &opts}); err != nil {\n\t\treturn err\n\t}\n\n\tif err = wait.PollImmediate(1*time.Second, 20*time.Second, func() (bool, error) {\n\t\tcfg := &cdiv1.CDIConfig{}\n\t\terr := c.Get(context.TODO(), types.NamespacedName{Name: \"config\"}, cfg)\n\t\treturn apiequality.Semantic.DeepEqual(&cfg.Spec, cdi.Spec.Config), err\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ UpdateCDIConfig updates CDIConfig\nfunc UpdateCDIConfig(c client.Client, updateFunc func(*cdiv1.CDIConfigSpec)) error {\n\treturn UpdateCDIConfigWithOptions(c, metav1.UpdateOptions{}, updateFunc)\n}\n\n\/\/ EnableFeatureGate sets specified FeatureGate in the CDIConfig\nfunc EnableFeatureGate(c client.Client, feature string) (*bool, error) {\n\tvar previousValue = false\n\n\tif err := UpdateCDIConfig(c, func(config *cdiv1.CDIConfigSpec) {\n\t\tif hasString(config.FeatureGates, feature) {\n\t\t\tpreviousValue = true\n\t\t\treturn\n\t\t}\n\n\t\tconfig.FeatureGates = append(config.FeatureGates, feature)\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &previousValue, nil\n}\n\n\/\/ DisableFeatureGate unsets specified FeatureGate in the CDIConfig\nfunc DisableFeatureGate(c client.Client, featureGate string) (*bool, error) {\n\tvar previousValue = false\n\n\tif err := UpdateCDIConfig(c, func(config *cdiv1.CDIConfigSpec) {\n\t\tif !hasString(config.FeatureGates, featureGate) {\n\t\t\treturn\n\t\t}\n\n\t\tpreviousValue = true\n\t\tconfig.FeatureGates = removeString(config.FeatureGates, featureGate)\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &previousValue, nil\n}\n\n\/\/ AddInsecureRegistry adds the registry to CDIConfig InsecureRegistries to mark it as allowed to be insecure\nfunc AddInsecureRegistry(c client.Client, registryURL string) error {\n\tparsedURL, err := url.Parse(registryURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = UpdateCDIConfig(c, func(config *cdiv1.CDIConfigSpec) {\n\t\tif hasString(config.InsecureRegistries, parsedURL.Host) {\n\t\t\treturn\n\t\t}\n\t\tconfig.InsecureRegistries = append(config.InsecureRegistries, parsedURL.Host)\n\t})\n\treturn err\n}\n\n\/\/ RemoveInsecureRegistry removed the registry from CDIConfig InsecureRegistries so it is not allowed to be insecure\nfunc RemoveInsecureRegistry(c client.Client, registryURL string) error {\n\tparsedURL, err := url.Parse(registryURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = UpdateCDIConfig(c, func(config *cdiv1.CDIConfigSpec) {\n\t\tif !hasString(config.InsecureRegistries, parsedURL.Host) {\n\t\t\treturn\n\t\t}\n\t\tconfig.InsecureRegistries = removeString(config.InsecureRegistries, parsedURL.Host)\n\t})\n\treturn err\n}\n\n\/\/ HasInsecureRegistry checks if registry appears in CDIConfig InsecureRegistries so it is allowed to be insecure\nfunc HasInsecureRegistry(c client.Client, registryURL string) (bool, error) {\n\tparsedURL, err := url.Parse(registryURL)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tcfg := &cdiv1.CDIConfig{}\n\terr = c.Get(context.TODO(), types.NamespacedName{Name: \"config\"}, cfg)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn hasString(cfg.Spec.InsecureRegistries, parsedURL.Host), nil\n}\n\nfunc removeString(strings []string, str string) []string {\n\tvar output []string\n\tfor _, s := range strings {\n\t\tif s != str {\n\t\t\toutput = append(output, s)\n\t\t}\n\t}\n\treturn output\n}\n\nfunc hasString(strings []string, str string) bool {\n\tfor _, s := range strings {\n\t\tif s == str {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/IsOpenshift checks if we are on OpenShift platform\nfunc IsOpenshift(client kubernetes.Interface) bool {\n\t\/\/OpenShift 3.X check\n\tresult := client.Discovery().RESTClient().Get().AbsPath(\"\/oapi\/v1\").Do(context.TODO())\n\tvar statusCode int\n\tresult.StatusCode(&statusCode)\n\n\tif result.Error() == nil {\n\t\t\/\/ It is OpenShift\n\t\tif statusCode == http.StatusOK {\n\t\t\treturn true\n\t\t}\n\t} else {\n\t\t\/\/ Got 404 so this is not Openshift 3.X, let's check OpenShift 4\n\t\tresult = client.Discovery().RESTClient().Get().AbsPath(\"\/apis\/route.openshift.io\").Do(context.TODO())\n\t\tvar statusCode int\n\t\tresult.StatusCode(&statusCode)\n\n\t\tif result.Error() == nil {\n\t\t\t\/\/ It is OpenShift\n\t\t\tif statusCode == http.StatusOK {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}\n<commit_msg>Increase time we wait for cdiconfig update (#2155)<commit_after>package utils\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\n\t\"github.com\/onsi\/ginkgo\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tstoragev1 \"k8s.io\/api\/storage\/v1\"\n\tapiequality \"k8s.io\/apimachinery\/pkg\/api\/equality\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/client\"\n\n\tcdiv1 \"kubevirt.io\/containerized-data-importer-api\/pkg\/apis\/core\/v1beta1\"\n\t\"kubevirt.io\/containerized-data-importer\/pkg\/controller\"\n)\n\n\/\/ cdi-file-host pod\/service relative values\nconst (\n\t\/\/RegistryHostName provides a deploymnet and service name for registry\n\tRegistryHostName = \"cdi-docker-registry-host\"\n\t\/\/ FileHostName provides a deployment and service name for tests\n\tFileHostName = \"cdi-file-host\"\n\t\/\/ FileHostS3Bucket provides an S3 bucket name for tests (e.g. http:\/\/<serviceIP:port>\/FileHostS3Bucket\/image)\n\tFileHostS3Bucket = \"images\"\n\t\/\/ AccessKeyValue provides a username to use for http and S3 (see hack\/build\/docker\/cdi-func-test-file-host-http\/htpasswd)\n\tAccessKeyValue = \"admin\"\n\t\/\/ SecretKeyValue provides a password to use for http and S3 (see hack\/build\/docker\/cdi-func-test-file-host-http\/htpasswd)\n\tSecretKeyValue = \"password\"\n\t\/\/ HttpAuthPort provides a cdi-file-host service auth port for tests\n\tHTTPAuthPort = 81\n\t\/\/ HttpNoAuthPort provides a cdi-file-host service no-auth port for tests, requires AccessKeyValue and SecretKeyValue\n\tHTTPNoAuthPort = 80\n\t\/\/ HTTPRateLimitPort provides a cdi-file-host service rate limit port for tests, speed is limited to 25k\/s to allow for testing slow connection behavior. No auth.\n\tHTTPRateLimitPort = 82\n\t\/\/ S3Port provides a cdi-file-host service S3 port, requires AccessKey and SecretKeyValue\n\tS3Port = 9000\n\t\/\/ HTTPSPort is the https port of cdi-file-host\n\tHTTPSNoAuthPort = 443\n\t\/\/ RegistryCertConfigMap is the ConfigMap where the cert for the docker registry is stored\n\tRegistryCertConfigMap = \"cdi-docker-registry-host-certs\"\n\t\/\/ FileHostCertConfigMap is the ConfigMap where the cert fir the file host is stored\n\tFileHostCertConfigMap = \"cdi-file-host-certs\"\n\t\/\/ ImageIOCertConfigMap is the ConfigMap where the cert fir the file host is stored\n\tImageIOCertConfigMap = \"imageio-certs\"\n)\n\nvar (\n\t\/\/ DefaultStorageClass the default storage class used in tests\n\tDefaultStorageClass *storagev1.StorageClass\n\t\/\/ DefaultStorageClassCsiDriver the default storage class CSI driver if it exists.\n\tDefaultStorageClassCsiDriver *storagev1.CSIDriver\n\n\t\/\/ NfsService is the service in the cdi namespace that will be created if KUBEVIRT_STORAGE=nfs\n\tNfsService *corev1.Service\n\tnfsChecked bool\n\t\/\/ DefaultStorageCSIRespectsFsGroup is true if the default storage class is CSI and respects fsGroup, false other wise.\n\tDefaultStorageCSIRespectsFsGroup bool\n)\n\n\/\/ GetDefaultStorageClass return the storage class which is marked as default in the cluster\nfunc GetDefaultStorageClass(client *kubernetes.Clientset) *storagev1.StorageClass {\n\tstorageclasses, err := client.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{})\n\tif err != nil {\n\t\tginkgo.Fail(\"Unable to list storage classes\")\n\t\treturn nil\n\t}\n\tfor _, storageClass := range storageclasses.Items {\n\t\tif storageClass.Annotations[\"storageclass.kubernetes.io\/is-default-class\"] == \"true\" {\n\t\t\treturn &storageClass\n\t\t}\n\t}\n\tginkgo.Fail(\"Unable to find default storage classes\")\n\treturn nil\n}\n\nfunc getDefaultStorageClassCsiDriver(client *kubernetes.Clientset) *storagev1.CSIDriver {\n\tif DefaultStorageClass != nil {\n\t\tcsidrivers, err := client.StorageV1().CSIDrivers().List(context.TODO(), metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\tginkgo.Fail(fmt.Sprintf(\"Unable to get csi driver: %v\", err))\n\t\t}\n\t\tfor _, driver := range csidrivers.Items {\n\t\t\tif driver.Name == DefaultStorageClass.Provisioner {\n\t\t\t\treturn &driver\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc isDefaultStorageClassCSIRespectsFsGroup() bool {\n\treturn DefaultStorageClassCsiDriver != nil && DefaultStorageClassCsiDriver.Spec.FSGroupPolicy != nil && *DefaultStorageClassCsiDriver.Spec.FSGroupPolicy != storagev1.NoneFSGroupPolicy\n}\n\n\/\/ IsHostpathProvisioner returns true if hostpath-provisioner is the default storage class\nfunc IsHostpathProvisioner() bool {\n\tif DefaultStorageClass == nil {\n\t\treturn false\n\t}\n\treturn DefaultStorageClass.Provisioner == \"kubevirt.io\/hostpath-provisioner\"\n}\n\n\/\/ GetTestNamespaceList returns a list of namespaces that have been created by the functional tests.\nfunc GetTestNamespaceList(client *kubernetes.Clientset, nsPrefix string) (*corev1.NamespaceList, error) {\n\t\/\/Ensure that no namespaces with the prefix label exist\n\treturn client.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{\n\t\tLabelSelector: nsPrefix,\n\t})\n}\n\n\/\/ CacheTestsData fetch and cache data required for tests\nfunc CacheTestsData(client *kubernetes.Clientset, cdiNs string) {\n\tif DefaultStorageClass == nil {\n\t\tDefaultStorageClass = GetDefaultStorageClass(client)\n\t}\n\tDefaultStorageClassCsiDriver = getDefaultStorageClassCsiDriver(client)\n\tDefaultStorageCSIRespectsFsGroup = isDefaultStorageClassCSIRespectsFsGroup()\n\n\tif !nfsChecked {\n\t\tNfsService = getNfsService(client, cdiNs)\n\t\tnfsChecked = true\n\t}\n}\n\nfunc getNfsService(client *kubernetes.Clientset, cdiNs string) *corev1.Service {\n\tservice, err := client.CoreV1().Services(cdiNs).Get(context.TODO(), \"nfs-service\", metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn service\n}\n\n\/\/ IsNfs returns true if the default storage class is the static nfs storage class with no provisioner\nfunc IsNfs() bool {\n\tif NfsService == nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/ UpdateCDIConfigWithOptions updates CDIConfig with specific UpdateOptions\nfunc UpdateCDIConfigWithOptions(c client.Client, opts metav1.UpdateOptions, updateFunc func(*cdiv1.CDIConfigSpec)) error {\n\tcdi, err := controller.GetActiveCDI(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cdi.Spec.Config == nil {\n\t\tcdi.Spec.Config = &cdiv1.CDIConfigSpec{}\n\t}\n\n\tupdateFunc(cdi.Spec.Config)\n\n\tif err = c.Update(context.TODO(), cdi, &client.UpdateOptions{Raw: &opts}); err != nil {\n\t\treturn err\n\t}\n\n\tif err = wait.PollImmediate(1*time.Second, 60*time.Second, func() (bool, error) {\n\t\tcfg := &cdiv1.CDIConfig{}\n\t\terr := c.Get(context.TODO(), types.NamespacedName{Name: \"config\"}, cfg)\n\t\treturn apiequality.Semantic.DeepEqual(&cfg.Spec, cdi.Spec.Config), err\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ UpdateCDIConfig updates CDIConfig\nfunc UpdateCDIConfig(c client.Client, updateFunc func(*cdiv1.CDIConfigSpec)) error {\n\treturn UpdateCDIConfigWithOptions(c, metav1.UpdateOptions{}, updateFunc)\n}\n\n\/\/ EnableFeatureGate sets specified FeatureGate in the CDIConfig\nfunc EnableFeatureGate(c client.Client, feature string) (*bool, error) {\n\tvar previousValue = false\n\n\tif err := UpdateCDIConfig(c, func(config *cdiv1.CDIConfigSpec) {\n\t\tif hasString(config.FeatureGates, feature) {\n\t\t\tpreviousValue = true\n\t\t\treturn\n\t\t}\n\n\t\tconfig.FeatureGates = append(config.FeatureGates, feature)\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &previousValue, nil\n}\n\n\/\/ DisableFeatureGate unsets specified FeatureGate in the CDIConfig\nfunc DisableFeatureGate(c client.Client, featureGate string) (*bool, error) {\n\tvar previousValue = false\n\n\tif err := UpdateCDIConfig(c, func(config *cdiv1.CDIConfigSpec) {\n\t\tif !hasString(config.FeatureGates, featureGate) {\n\t\t\treturn\n\t\t}\n\n\t\tpreviousValue = true\n\t\tconfig.FeatureGates = removeString(config.FeatureGates, featureGate)\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &previousValue, nil\n}\n\n\/\/ AddInsecureRegistry adds the registry to CDIConfig InsecureRegistries to mark it as allowed to be insecure\nfunc AddInsecureRegistry(c client.Client, registryURL string) error {\n\tparsedURL, err := url.Parse(registryURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = UpdateCDIConfig(c, func(config *cdiv1.CDIConfigSpec) {\n\t\tif hasString(config.InsecureRegistries, parsedURL.Host) {\n\t\t\treturn\n\t\t}\n\t\tconfig.InsecureRegistries = append(config.InsecureRegistries, parsedURL.Host)\n\t})\n\treturn err\n}\n\n\/\/ RemoveInsecureRegistry removed the registry from CDIConfig InsecureRegistries so it is not allowed to be insecure\nfunc RemoveInsecureRegistry(c client.Client, registryURL string) error {\n\tparsedURL, err := url.Parse(registryURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = UpdateCDIConfig(c, func(config *cdiv1.CDIConfigSpec) {\n\t\tif !hasString(config.InsecureRegistries, parsedURL.Host) {\n\t\t\treturn\n\t\t}\n\t\tconfig.InsecureRegistries = removeString(config.InsecureRegistries, parsedURL.Host)\n\t})\n\treturn err\n}\n\n\/\/ HasInsecureRegistry checks if registry appears in CDIConfig InsecureRegistries so it is allowed to be insecure\nfunc HasInsecureRegistry(c client.Client, registryURL string) (bool, error) {\n\tparsedURL, err := url.Parse(registryURL)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tcfg := &cdiv1.CDIConfig{}\n\terr = c.Get(context.TODO(), types.NamespacedName{Name: \"config\"}, cfg)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn hasString(cfg.Spec.InsecureRegistries, parsedURL.Host), nil\n}\n\nfunc removeString(strings []string, str string) []string {\n\tvar output []string\n\tfor _, s := range strings {\n\t\tif s != str {\n\t\t\toutput = append(output, s)\n\t\t}\n\t}\n\treturn output\n}\n\nfunc hasString(strings []string, str string) bool {\n\tfor _, s := range strings {\n\t\tif s == str {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/IsOpenshift checks if we are on OpenShift platform\nfunc IsOpenshift(client kubernetes.Interface) bool {\n\t\/\/OpenShift 3.X check\n\tresult := client.Discovery().RESTClient().Get().AbsPath(\"\/oapi\/v1\").Do(context.TODO())\n\tvar statusCode int\n\tresult.StatusCode(&statusCode)\n\n\tif result.Error() == nil {\n\t\t\/\/ It is OpenShift\n\t\tif statusCode == http.StatusOK {\n\t\t\treturn true\n\t\t}\n\t} else {\n\t\t\/\/ Got 404 so this is not Openshift 3.X, let's check OpenShift 4\n\t\tresult = client.Discovery().RESTClient().Get().AbsPath(\"\/apis\/route.openshift.io\").Do(context.TODO())\n\t\tvar statusCode int\n\t\tresult.StatusCode(&statusCode)\n\n\t\tif result.Error() == nil {\n\t\t\t\/\/ It is OpenShift\n\t\t\tif statusCode == http.StatusOK {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package ebs\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"github.com\/libopenstorage\/openstorage\/api\"\n\t\"github.com\/libopenstorage\/openstorage\/volume\"\n)\n\nconst (\n\tName = \"aws\"\n)\n\nvar (\n\tr *rand.Rand\n\tdevMinor int32\n)\n\n\/\/ Implements the open storage volume interface.\ntype awsProvider struct {\n\tec2 *ec2.EC2\n}\n\nfunc Init(params volume.DriverParams) (volume.VolumeDriver, error) {\n\t\/\/ Initialize the EC2 interface.\n\tcreds := credentials.NewChainCredentials(\n\t\t[]credentials.Provider{\n\t\t\t&credentials.EnvProvider{},\n\t\t\t&credentials.EC2RoleProvider{},\n\t\t})\n\n\tinst := &awsProvider{ec2: ec2.New(&aws.Config{\n\t\tRegion: \"us-west-1\",\n\t\tCredentials: creds,\n\t}),\n\t}\n\n\treturn inst, nil\n}\n\n\/\/ AWS provisioned IOPS range is 100 - 20000.\nfunc mapIops(cos api.VolumeCos) int64 {\n\tif cos < 3 {\n\t\treturn 1000\n\t} else if cos < 7 {\n\t\treturn 10000\n\t} else {\n\t\treturn 20000\n\t}\n}\n\nfunc (self *awsProvider) String() string {\n\treturn Name\n}\n\nfunc (self *awsProvider) Create(l api.VolumeLocator, opt *api.CreateOptions, spec *api.VolumeSpec) (api.VolumeID, error) {\n\tavailabilityZone := \"us-west-1a\"\n\tsz := int64(spec.Size \/ (1024 * 1024 * 1024))\n\tiops := mapIops(spec.Cos)\n\treq := &ec2.CreateVolumeInput{\n\t\tAvailabilityZone: &availabilityZone,\n\t\tSize: &sz,\n\t\tIOPS: &iops}\n\tv, err := self.ec2.CreateVolume(req)\n\n\treturn api.VolumeID(*v.VolumeID), err\n}\n\nfunc (self *awsProvider) AttachInfo(volInfo *api.VolumeInfo) (int32, string, error) {\n\ts := fmt.Sprintf(\"\/tmp\/gdd_%v\", int(devMinor))\n\treturn devMinor, s, nil\n}\n\nfunc (self *awsProvider) Attach(volInfo api.VolumeID, path string) (string, error) {\n\tdevMinor++\n\ts := fmt.Sprintf(\"\/tmp\/gdd_%v\", int(devMinor))\n\tos.Create(s)\n\treturn s, nil\n}\n\nfunc (self *awsProvider) Detach(volID api.VolumeID) error {\n\treturn nil\n}\n\nfunc (self *awsProvider) Delete(volID api.VolumeID) error {\n\treturn nil\n}\n\nfunc (self *awsProvider) Format(id api.VolumeID) error {\n\treturn nil\n}\n\nfunc (self *awsProvider) Inspect(ids []api.VolumeID) ([]api.Volume, error) {\n\treturn nil, nil\n}\n\nfunc (self *awsProvider) Enumerate(locator api.VolumeLocator, labels api.Labels) []api.Volume {\n\treturn nil\n}\n\nfunc (self *awsProvider) Snapshot(volID api.VolumeID, labels api.Labels) (snap api.SnapID, err error) {\n\treturn \"\", nil\n}\n\nfunc (self *awsProvider) SnapDelete(snapID api.SnapID) (err error) {\n\treturn nil\n}\n\nfunc (self *awsProvider) SnapInspect(snapID api.SnapID) (snap api.VolumeSnap, err error) {\n\treturn api.VolumeSnap{}, nil\n}\n\nfunc (self *awsProvider) SnapEnumerate(locator api.VolumeLocator, labels api.Labels) *[]api.SnapID {\n\treturn nil\n}\n\nfunc (self *awsProvider) Stats(volID api.VolumeID) (stats api.VolumeStats, err error) {\n\treturn api.VolumeStats{}, nil\n}\n\nfunc (self *awsProvider) Alerts(volID api.VolumeID) (stats api.VolumeAlerts, err error) {\n\treturn api.VolumeAlerts{}, nil\n}\n\nfunc (self *awsProvider) Shutdown() {\n\tfmt.Printf(\"%s Shutting down\", Name)\n}\n\nfunc init() {\n\t\/\/ Register ourselves as an openstorage volume driver.\n\tvolume.Register(Name, Init)\n}\n<commit_msg>Added support for EC2 attach<commit_after>package ebs\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"github.com\/libopenstorage\/openstorage\/api\"\n\t\"github.com\/libopenstorage\/openstorage\/volume\"\n)\n\nconst (\n\tName = \"aws\"\n)\n\nvar (\n\tdevMinor int32\n)\n\n\/\/ Implements the open storage volume interface.\ntype awsProvider struct {\n\tec2 *ec2.EC2\n}\n\nfunc Init(params volume.DriverParams) (volume.VolumeDriver, error) {\n\t\/\/ Initialize the EC2 interface.\n\tcreds := credentials.NewEnvCredentials()\n\tinst := &awsProvider{ec2: ec2.New(&aws.Config{\n\t\tRegion: \"us-west-1\",\n\t\tCredentials: creds,\n\t}),\n\t}\n\n\treturn inst, nil\n}\n\n\/\/ AWS provisioned IOPS range is 100 - 20000.\nfunc mapIops(cos api.VolumeCos) int64 {\n\tif cos < 3 {\n\t\treturn 1000\n\t} else if cos < 7 {\n\t\treturn 10000\n\t} else {\n\t\treturn 20000\n\t}\n}\n\nfunc (self *awsProvider) String() string {\n\treturn Name\n}\n\nfunc (self *awsProvider) Create(l api.VolumeLocator, opt *api.CreateOptions, spec *api.VolumeSpec) (api.VolumeID, error) {\n\tavailabilityZone := \"us-west-1a\"\n\tsz := int64(spec.Size \/ (1024 * 1024 * 1024))\n\tiops := mapIops(spec.Cos)\n\treq := &ec2.CreateVolumeInput{\n\t\tAvailabilityZone: &availabilityZone,\n\t\tSize: &sz,\n\t\tIOPS: &iops}\n\tv, err := self.ec2.CreateVolume(req)\n\n\treturn api.VolumeID(*v.VolumeID), err\n}\n\nfunc (self *awsProvider) AttachInfo(volInfo *api.VolumeInfo) (int32, string, error) {\n\ts := fmt.Sprintf(\"\/tmp\/gdd_%v\", int(devMinor))\n\treturn devMinor, s, nil\n}\n\nfunc (self *awsProvider) Attach(volInfo api.VolumeID, path string) (string, error) {\n\tvolumeID := string(api.VolumeID)\n\treq := ec2.AttachVolumeInput{\n\t\tDevice: &device,\n\t\tInstanceID: &instanceID,\n\t\tVolumeID: &volumeID,\n\t}\n\tdevMinor++\n\ts := fmt.Sprintf(\"\/tmp\/gdd_%v\", int(devMinor))\n\tos.Create(s)\n\treturn s, nil\n}\n\nfunc (self *awsProvider) Detach(volID api.VolumeID) error {\n\treturn nil\n}\n\nfunc (self *awsProvider) Delete(volID api.VolumeID) error {\n\treturn nil\n}\n\nfunc (self *awsProvider) Format(id api.VolumeID) error {\n\treturn nil\n}\n\nfunc (self *awsProvider) Inspect(ids []api.VolumeID) ([]api.Volume, error) {\n\treturn nil, nil\n}\n\nfunc (self *awsProvider) Enumerate(locator api.VolumeLocator, labels api.Labels) []api.Volume {\n\treturn nil\n}\n\nfunc (self *awsProvider) Snapshot(volID api.VolumeID, labels api.Labels) (snap api.SnapID, err error) {\n\treturn \"\", nil\n}\n\nfunc (self *awsProvider) SnapDelete(snapID api.SnapID) (err error) {\n\treturn nil\n}\n\nfunc (self *awsProvider) SnapInspect(snapID api.SnapID) (snap api.VolumeSnap, err error) {\n\treturn api.VolumeSnap{}, nil\n}\n\nfunc (self *awsProvider) SnapEnumerate(locator api.VolumeLocator, labels api.Labels) *[]api.SnapID {\n\treturn nil\n}\n\nfunc (self *awsProvider) Stats(volID api.VolumeID) (stats api.VolumeStats, err error) {\n\treturn api.VolumeStats{}, nil\n}\n\nfunc (self *awsProvider) Alerts(volID api.VolumeID) (stats api.VolumeAlerts, err error) {\n\treturn api.VolumeAlerts{}, nil\n}\n\nfunc (self *awsProvider) Shutdown() {\n\tfmt.Printf(\"%s Shutting down\", Name)\n}\n\nfunc init() {\n\t\/\/ Register ourselves as an openstorage volume driver.\n\tvolume.Register(Name, Init)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Vadim Kravcenko\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\npackage gojenkins\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"net\/url\"\n\t\"strconv\"\n)\n\ntype Job struct {\n\tRaw *jobResponse\n\tJenkins *Jenkins\n\tBase string\n}\n\ntype jobBuild struct {\n\tNumber int64\n\tURL string\n}\n\ntype job struct {\n\tName string `json:\"name\"`\n\tUrl string `json:\"url\"`\n\tColor string `json:\"color\"`\n}\n\ntype parameterDefinition struct {\n\tDefaultParameterValue struct {\n\t\tName string `json:\"name\"`\n\t\tValue bool `json:\"value\"`\n\t} `json:\"defaultParameterValue\"`\n\tDescription string `json:\"description\"`\n\tName string `json:\"name\"`\n\tType string `json:\"type\"`\n}\n\ntype jobResponse struct {\n\tActions []generalObj\n\tBuildable bool `json:\"buildable\"`\n\tBuilds []jobBuild\n\tColor string `json:\"color\"`\n\tConcurrentBuild bool `json:\"concurrentBuild\"`\n\tDescription string `json:\"description\"`\n\tDisplayName string `json:\"displayName\"`\n\tDisplayNameOrNull interface{} `json:\"displayNameOrNull\"`\n\tDownstreamProjects []job `json:\"downstreamProjects\"`\n\tFirstBuild jobBuild\n\tHealthReport []struct {\n\t\tDescription string `json:\"description\"`\n\t\tIconClassName string `json:\"iconClassName\"`\n\t\tIconUrl string `json:\"iconUrl\"`\n\t\tScore int64 `json:\"score\"`\n\t} `json:\"healthReport\"`\n\tInQueue bool `json:\"inQueue\"`\n\tKeepDependencies bool `json:\"keepDependencies\"`\n\tLastBuild jobBuild `json:\"lastBuild\"`\n\tLastCompletedBuild jobBuild `json:\"lastCompletedBuild\"`\n\tLastFailedBuild jobBuild `json:\"lastFailedBuild\"`\n\tLastStableBuild jobBuild `json:\"lastStableBuild\"`\n\tLastSuccessfulBuild jobBuild `json:\"lastSuccessfulBuild\"`\n\tLastUnstableBuild jobBuild `json:\"lastUnstableBuild\"`\n\tLastUnsuccessfulBuild jobBuild `json:\"lastUnsuccessfulBuild\"`\n\tName string `json:\"name\"`\n\tNextBuildNumber int64 `json:\"nextBuildNumber\"`\n\tProperty []struct {\n\t\tParameterDefinitions []parameterDefinition `json:\"parameterDefinitions\"`\n\t} `json:\"property\"`\n\tQueueItem interface{} `json:\"queueItem\"`\n\tScm struct{} `json:\"scm\"`\n\tUpstreamProjects []job `json:\"upstreamProjects\"`\n\tURL string `json:\"url\"`\n}\n\nfunc (j *Job) GetName() string {\n\treturn j.Raw.Name\n}\n\nfunc (j *Job) GetDescription() string {\n\treturn j.Raw.Description\n}\n\nfunc (j *Job) GetDetails() *jobResponse {\n\treturn j.Raw\n}\n\nfunc (j *Job) GetBuild(id int64) (*Build, error) {\n\tbuild := Build{Jenkins: j.Jenkins, Job: j, Raw: new(buildResponse), Depth: 1, Base: \"\/job\/\" + j.GetName() + \"\/\" + strconv.FormatInt(id, 10)}\n\tstatus, err := build.Poll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif status == 200 {\n\t\treturn &build, nil\n\t}\n\treturn nil, errors.New(strconv.Itoa(status))\n}\n\nfunc (j *Job) getBuildByType(buildType string) (*Build, error) {\n\tallowed := map[string]jobBuild{\n\t\t\"lastStableBuild\": j.Raw.LastStableBuild,\n\t\t\"lastSuccessfulBuild\": j.Raw.LastSuccessfulBuild,\n\t\t\"lastBuild\": j.Raw.LastBuild,\n\t\t\"lastCompletedBuild\": j.Raw.LastCompletedBuild,\n\t\t\"firstBuild\": j.Raw.FirstBuild,\n\t\t\"lastFailedBuild\": j.Raw.LastFailedBuild,\n\t}\n\tnumber := \"\"\n\tif val, ok := allowed[buildType]; ok {\n\t\tnumber = strconv.FormatInt(val.Number, 10)\n\t} else {\n\t\tpanic(\"No Such Build\")\n\t}\n\tbuild := Build{\n\t\tJenkins: j.Jenkins,\n\t\tDepth: 1,\n\t\tJob: j,\n\t\tRaw: new(buildResponse),\n\t\tBase: \"\/job\/\" + j.GetName() + \"\/\" + number}\n\tstatus, err := build.Poll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif status == 200 {\n\t\treturn &build, nil\n\t}\n\treturn nil, errors.New(strconv.Itoa(status))\n}\n\nfunc (j *Job) GetLastSuccessfulBuild() (*Build, error) {\n\treturn j.getBuildByType(\"lastSuccessfulBuild\")\n}\n\nfunc (j *Job) GetFirstBuild() (*Build, error) {\n\treturn j.getBuildByType(\"firstBuild\")\n}\n\nfunc (j *Job) GetLastBuild() (*Build, error) {\n\treturn j.getBuildByType(\"lastBuild\")\n}\n\nfunc (j *Job) GetLastStableBuild() (*Build, error) {\n\treturn j.getBuildByType(\"lastStableBuild\")\n}\n\nfunc (j *Job) GetLastFailedBuild() (*Build, error) {\n\treturn j.getBuildByType(\"lastFailedBuild\")\n}\n\nfunc (j *Job) GetLastCompletedBuild() (*Build, error) {\n\treturn j.getBuildByType(\"lastCompletedBuild\")\n}\n\n\/\/ Returns All Builds with Number and URL\nfunc (j *Job) GetAllBuildIds() ([]jobBuild, error) {\n\tvar buildsResp struct {\n\t\tBuilds []jobBuild `json:\"allBuilds\"`\n\t}\n\t_, err := j.Jenkins.Requester.GetJSON(j.Base, &buildsResp, map[string]string{\"tree\": \"allBuilds[number,url]\"})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buildsResp.Builds, nil\n}\n\nfunc (j *Job) GetUpstreamJobsMetadata() []job {\n\treturn j.Raw.UpstreamProjects\n}\n\nfunc (j *Job) GetDownstreamJobsMetadata() []job {\n\treturn j.Raw.DownstreamProjects\n}\n\nfunc (j *Job) GetUpstreamJobs() ([]*Job, error) {\n\tjobs := make([]*Job, len(j.Raw.UpstreamProjects))\n\tfor i, job := range j.Raw.UpstreamProjects {\n\t\tji, err := j.Jenkins.GetJob(job.Name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tjobs[i] = ji\n\t}\n\treturn jobs, nil\n}\n\nfunc (j *Job) GetDownstreamJobs() ([]*Job, error) {\n\tjobs := make([]*Job, len(j.Raw.DownstreamProjects))\n\tfor i, job := range j.Raw.DownstreamProjects {\n\t\tji, err := j.Jenkins.GetJob(job.Name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tjobs[i] = ji\n\t}\n\treturn jobs, nil\n}\n\nfunc (j *Job) Enable() (bool, error) {\n\tresp, err := j.Jenkins.Requester.Post(j.Base+\"\/enable\", nil, nil, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn false, errors.New(strconv.Itoa(resp.StatusCode))\n\t}\n\treturn true, nil\n}\n\nfunc (j *Job) Disable() (bool, error) {\n\tresp, err := j.Jenkins.Requester.Post(j.Base+\"\/disable\", nil, nil, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn false, errors.New(strconv.Itoa(resp.StatusCode))\n\t}\n\treturn true, nil\n}\n\nfunc (j *Job) Delete() (bool, error) {\n\tresp, err := j.Jenkins.Requester.Post(j.Base+\"\/doDelete\", nil, nil, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn false, errors.New(strconv.Itoa(resp.StatusCode))\n\t}\n\treturn true, nil\n}\n\nfunc (j *Job) Rename(name string) (bool, error) {\n\tdata := url.Values{}\n\tdata.Set(\"newName\", name)\n\t_, err := j.Jenkins.Requester.Post(j.Base+\"\/doRename\", bytes.NewBufferString(data.Encode()), nil, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\nfunc (j *Job) Create(config string, qr ...interface{}) (*Job, error) {\n\tvar querystring map[string]string\n\tif len(qr) > 0 {\n\t\tquerystring = qr[0].(map[string]string)\n\t}\n\tresp, err := j.Jenkins.Requester.PostXML(\"\/createItem\", config, j.Raw, querystring)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode == 200 {\n\t\tj.Poll()\n\t\treturn j, nil\n\t}\n\treturn nil, errors.New(strconv.Itoa(resp.StatusCode))\n}\n\nfunc (j *Job) Copy(from string, newName string) (*Job, error) {\n\tqr := map[string]string{\"name\": newName, \"from\": from, \"mode\": \"copy\"}\n\tresp, err := j.Jenkins.Requester.Post(\"\/createItem\", nil, nil, qr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode == 200 {\n\t\treturn j, nil\n\t}\n\treturn nil, errors.New(strconv.Itoa(resp.StatusCode))\n}\n\nfunc (j *Job) GetConfig() (string, error) {\n\tvar data string\n\t_, err := j.Jenkins.Requester.GetXML(j.Base+\"\/config.xml\", &data, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn data, nil\n}\n\nfunc (j *Job) GetParameters() ([]parameterDefinition, error) {\n\t_, err := j.Poll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar parameters []parameterDefinition\n\tfor _, property := range j.Raw.Property {\n\t\tfor _, param := range property.ParameterDefinitions {\n\t\t\tparameters = append(parameters, param)\n\t\t}\n\t}\n\treturn parameters, nil\n}\n\nfunc (j *Job) IsQueued() (bool, error) {\n\tif _, err := j.Poll(); err != nil {\n\t\treturn false, err\n\t}\n\treturn j.Raw.InQueue, nil\n}\n\nfunc (j *Job) IsRunning() (bool, error) {\n\tif _, err := j.Poll(); err != nil {\n\t\treturn false, err\n\t}\n\tlastBuild, err := j.GetLastBuild()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn lastBuild.IsRunning(), nil\n}\n\nfunc (j *Job) IsEnabled() (bool, error) {\n\tif _, err := j.Poll(); err != nil {\n\t\treturn false, err\n\t}\n\treturn j.Raw.Color != \"disabled\", nil\n}\n\nfunc (j *Job) HasQueuedBuild() {\n\n}\n\nfunc (j *Job) InvokeSimple(params map[string]string) (bool, error) {\n\tisQueued, err := j.IsQueued()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif isQueued {\n\t\tError.Printf(\"%s is already running\", j.GetName())\n\t\treturn false, nil\n\t}\n\n\tendpoint := \"\/build\"\n\tparameters, err := j.GetParameters()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif len(parameters) > 0 {\n\t\tendpoint = \"\/buildWithParameters\"\n\t}\n\tdata := url.Values{}\n\tfor k, v := range params {\n\t\tdata.Set(k, v)\n\t}\n\tresp, err := j.Jenkins.Requester.Post(j.Base+endpoint, bytes.NewBufferString(data.Encode()), nil, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif resp.StatusCode != 200 && resp.StatusCode != 201 {\n\t\tError.Println(\"Could not invoke job %s\", j.GetName())\n\t\treturn false, errors.New(\"Could not invoke job \" + j.GetName())\n\t}\n\treturn true, nil\n}\n\nfunc (j *Job) Invoke(files []string, skipIfRunning bool, params map[string]string, cause string, securityToken string) (bool, error) {\n\tisQueued, err := j.IsQueued()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif isQueued {\n\t\tError.Printf(\"%s is already running\", j.GetName())\n\t\treturn false, nil\n\t}\n\tisRunning, err := j.IsRunning()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif isRunning && skipIfRunning {\n\t\tWarning.Printf(\"%s Will not request new build because %s is already running\", j.GetName())\n\t}\n\n\tbase := \"\/build\"\n\n\t\/\/ If parameters are specified - url is \/builWithParameters\n\tif params != nil {\n\t\tbase = \"\/buildWithParameters\"\n\t} else {\n\t\tparams = make(map[string]string)\n\t}\n\n\t\/\/ If files are specified - url is \/build\n\tif files != nil {\n\t\tbase = \"\/build\"\n\t}\n\treqParams := map[string]string{}\n\tbuildParams := map[string]string{}\n\tif securityToken != \"\" {\n\t\treqParams[\"token\"] = securityToken\n\t}\n\n\tbuildParams[\"json\"] = string(makeJson(params))\n\tb, _ := json.Marshal(buildParams)\n\tresp, err := j.Jenkins.Requester.PostFiles(j.Base+base, bytes.NewBuffer(b), nil, reqParams, files)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif resp.StatusCode == 200 || resp.StatusCode == 201 {\n\t\treturn true, nil\n\t}\n\treturn false, errors.New(strconv.Itoa(resp.StatusCode))\n}\n\nfunc (j *Job) Poll() (int, error) {\n\t_, err := j.Jenkins.Requester.GetJSON(j.Base, j.Raw, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn j.Jenkins.Requester.LastResponse.StatusCode, nil\n}\n<commit_msg>chore(job.go): add not implemented information<commit_after>\/\/ Copyright 2015 Vadim Kravcenko\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\npackage gojenkins\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"net\/url\"\n\t\"strconv\"\n)\n\ntype Job struct {\n\tRaw *jobResponse\n\tJenkins *Jenkins\n\tBase string\n}\n\ntype jobBuild struct {\n\tNumber int64\n\tURL string\n}\n\ntype job struct {\n\tName string `json:\"name\"`\n\tUrl string `json:\"url\"`\n\tColor string `json:\"color\"`\n}\n\ntype parameterDefinition struct {\n\tDefaultParameterValue struct {\n\t\tName string `json:\"name\"`\n\t\tValue bool `json:\"value\"`\n\t} `json:\"defaultParameterValue\"`\n\tDescription string `json:\"description\"`\n\tName string `json:\"name\"`\n\tType string `json:\"type\"`\n}\n\ntype jobResponse struct {\n\tActions []generalObj\n\tBuildable bool `json:\"buildable\"`\n\tBuilds []jobBuild\n\tColor string `json:\"color\"`\n\tConcurrentBuild bool `json:\"concurrentBuild\"`\n\tDescription string `json:\"description\"`\n\tDisplayName string `json:\"displayName\"`\n\tDisplayNameOrNull interface{} `json:\"displayNameOrNull\"`\n\tDownstreamProjects []job `json:\"downstreamProjects\"`\n\tFirstBuild jobBuild\n\tHealthReport []struct {\n\t\tDescription string `json:\"description\"`\n\t\tIconClassName string `json:\"iconClassName\"`\n\t\tIconUrl string `json:\"iconUrl\"`\n\t\tScore int64 `json:\"score\"`\n\t} `json:\"healthReport\"`\n\tInQueue bool `json:\"inQueue\"`\n\tKeepDependencies bool `json:\"keepDependencies\"`\n\tLastBuild jobBuild `json:\"lastBuild\"`\n\tLastCompletedBuild jobBuild `json:\"lastCompletedBuild\"`\n\tLastFailedBuild jobBuild `json:\"lastFailedBuild\"`\n\tLastStableBuild jobBuild `json:\"lastStableBuild\"`\n\tLastSuccessfulBuild jobBuild `json:\"lastSuccessfulBuild\"`\n\tLastUnstableBuild jobBuild `json:\"lastUnstableBuild\"`\n\tLastUnsuccessfulBuild jobBuild `json:\"lastUnsuccessfulBuild\"`\n\tName string `json:\"name\"`\n\tNextBuildNumber int64 `json:\"nextBuildNumber\"`\n\tProperty []struct {\n\t\tParameterDefinitions []parameterDefinition `json:\"parameterDefinitions\"`\n\t} `json:\"property\"`\n\tQueueItem interface{} `json:\"queueItem\"`\n\tScm struct{} `json:\"scm\"`\n\tUpstreamProjects []job `json:\"upstreamProjects\"`\n\tURL string `json:\"url\"`\n}\n\nfunc (j *Job) GetName() string {\n\treturn j.Raw.Name\n}\n\nfunc (j *Job) GetDescription() string {\n\treturn j.Raw.Description\n}\n\nfunc (j *Job) GetDetails() *jobResponse {\n\treturn j.Raw\n}\n\nfunc (j *Job) GetBuild(id int64) (*Build, error) {\n\tbuild := Build{Jenkins: j.Jenkins, Job: j, Raw: new(buildResponse), Depth: 1, Base: \"\/job\/\" + j.GetName() + \"\/\" + strconv.FormatInt(id, 10)}\n\tstatus, err := build.Poll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif status == 200 {\n\t\treturn &build, nil\n\t}\n\treturn nil, errors.New(strconv.Itoa(status))\n}\n\nfunc (j *Job) getBuildByType(buildType string) (*Build, error) {\n\tallowed := map[string]jobBuild{\n\t\t\"lastStableBuild\": j.Raw.LastStableBuild,\n\t\t\"lastSuccessfulBuild\": j.Raw.LastSuccessfulBuild,\n\t\t\"lastBuild\": j.Raw.LastBuild,\n\t\t\"lastCompletedBuild\": j.Raw.LastCompletedBuild,\n\t\t\"firstBuild\": j.Raw.FirstBuild,\n\t\t\"lastFailedBuild\": j.Raw.LastFailedBuild,\n\t}\n\tnumber := \"\"\n\tif val, ok := allowed[buildType]; ok {\n\t\tnumber = strconv.FormatInt(val.Number, 10)\n\t} else {\n\t\tpanic(\"No Such Build\")\n\t}\n\tbuild := Build{\n\t\tJenkins: j.Jenkins,\n\t\tDepth: 1,\n\t\tJob: j,\n\t\tRaw: new(buildResponse),\n\t\tBase: \"\/job\/\" + j.GetName() + \"\/\" + number}\n\tstatus, err := build.Poll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif status == 200 {\n\t\treturn &build, nil\n\t}\n\treturn nil, errors.New(strconv.Itoa(status))\n}\n\nfunc (j *Job) GetLastSuccessfulBuild() (*Build, error) {\n\treturn j.getBuildByType(\"lastSuccessfulBuild\")\n}\n\nfunc (j *Job) GetFirstBuild() (*Build, error) {\n\treturn j.getBuildByType(\"firstBuild\")\n}\n\nfunc (j *Job) GetLastBuild() (*Build, error) {\n\treturn j.getBuildByType(\"lastBuild\")\n}\n\nfunc (j *Job) GetLastStableBuild() (*Build, error) {\n\treturn j.getBuildByType(\"lastStableBuild\")\n}\n\nfunc (j *Job) GetLastFailedBuild() (*Build, error) {\n\treturn j.getBuildByType(\"lastFailedBuild\")\n}\n\nfunc (j *Job) GetLastCompletedBuild() (*Build, error) {\n\treturn j.getBuildByType(\"lastCompletedBuild\")\n}\n\n\/\/ Returns All Builds with Number and URL\nfunc (j *Job) GetAllBuildIds() ([]jobBuild, error) {\n\tvar buildsResp struct {\n\t\tBuilds []jobBuild `json:\"allBuilds\"`\n\t}\n\t_, err := j.Jenkins.Requester.GetJSON(j.Base, &buildsResp, map[string]string{\"tree\": \"allBuilds[number,url]\"})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buildsResp.Builds, nil\n}\n\nfunc (j *Job) GetUpstreamJobsMetadata() []job {\n\treturn j.Raw.UpstreamProjects\n}\n\nfunc (j *Job) GetDownstreamJobsMetadata() []job {\n\treturn j.Raw.DownstreamProjects\n}\n\nfunc (j *Job) GetUpstreamJobs() ([]*Job, error) {\n\tjobs := make([]*Job, len(j.Raw.UpstreamProjects))\n\tfor i, job := range j.Raw.UpstreamProjects {\n\t\tji, err := j.Jenkins.GetJob(job.Name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tjobs[i] = ji\n\t}\n\treturn jobs, nil\n}\n\nfunc (j *Job) GetDownstreamJobs() ([]*Job, error) {\n\tjobs := make([]*Job, len(j.Raw.DownstreamProjects))\n\tfor i, job := range j.Raw.DownstreamProjects {\n\t\tji, err := j.Jenkins.GetJob(job.Name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tjobs[i] = ji\n\t}\n\treturn jobs, nil\n}\n\nfunc (j *Job) Enable() (bool, error) {\n\tresp, err := j.Jenkins.Requester.Post(j.Base+\"\/enable\", nil, nil, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn false, errors.New(strconv.Itoa(resp.StatusCode))\n\t}\n\treturn true, nil\n}\n\nfunc (j *Job) Disable() (bool, error) {\n\tresp, err := j.Jenkins.Requester.Post(j.Base+\"\/disable\", nil, nil, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn false, errors.New(strconv.Itoa(resp.StatusCode))\n\t}\n\treturn true, nil\n}\n\nfunc (j *Job) Delete() (bool, error) {\n\tresp, err := j.Jenkins.Requester.Post(j.Base+\"\/doDelete\", nil, nil, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn false, errors.New(strconv.Itoa(resp.StatusCode))\n\t}\n\treturn true, nil\n}\n\nfunc (j *Job) Rename(name string) (bool, error) {\n\tdata := url.Values{}\n\tdata.Set(\"newName\", name)\n\t_, err := j.Jenkins.Requester.Post(j.Base+\"\/doRename\", bytes.NewBufferString(data.Encode()), nil, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\nfunc (j *Job) Create(config string, qr ...interface{}) (*Job, error) {\n\tvar querystring map[string]string\n\tif len(qr) > 0 {\n\t\tquerystring = qr[0].(map[string]string)\n\t}\n\tresp, err := j.Jenkins.Requester.PostXML(\"\/createItem\", config, j.Raw, querystring)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode == 200 {\n\t\tj.Poll()\n\t\treturn j, nil\n\t}\n\treturn nil, errors.New(strconv.Itoa(resp.StatusCode))\n}\n\nfunc (j *Job) Copy(from string, newName string) (*Job, error) {\n\tqr := map[string]string{\"name\": newName, \"from\": from, \"mode\": \"copy\"}\n\tresp, err := j.Jenkins.Requester.Post(\"\/createItem\", nil, nil, qr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode == 200 {\n\t\treturn j, nil\n\t}\n\treturn nil, errors.New(strconv.Itoa(resp.StatusCode))\n}\n\nfunc (j *Job) GetConfig() (string, error) {\n\tvar data string\n\t_, err := j.Jenkins.Requester.GetXML(j.Base+\"\/config.xml\", &data, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn data, nil\n}\n\nfunc (j *Job) GetParameters() ([]parameterDefinition, error) {\n\t_, err := j.Poll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar parameters []parameterDefinition\n\tfor _, property := range j.Raw.Property {\n\t\tfor _, param := range property.ParameterDefinitions {\n\t\t\tparameters = append(parameters, param)\n\t\t}\n\t}\n\treturn parameters, nil\n}\n\nfunc (j *Job) IsQueued() (bool, error) {\n\tif _, err := j.Poll(); err != nil {\n\t\treturn false, err\n\t}\n\treturn j.Raw.InQueue, nil\n}\n\nfunc (j *Job) IsRunning() (bool, error) {\n\tif _, err := j.Poll(); err != nil {\n\t\treturn false, err\n\t}\n\tlastBuild, err := j.GetLastBuild()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn lastBuild.IsRunning(), nil\n}\n\nfunc (j *Job) IsEnabled() (bool, error) {\n\tif _, err := j.Poll(); err != nil {\n\t\treturn false, err\n\t}\n\treturn j.Raw.Color != \"disabled\", nil\n}\n\nfunc (j *Job) HasQueuedBuild() {\n\tpanic(\"Not Implemented yet\")\n}\n\nfunc (j *Job) InvokeSimple(params map[string]string) (bool, error) {\n\tisQueued, err := j.IsQueued()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif isQueued {\n\t\tError.Printf(\"%s is already running\", j.GetName())\n\t\treturn false, nil\n\t}\n\n\tendpoint := \"\/build\"\n\tparameters, err := j.GetParameters()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif len(parameters) > 0 {\n\t\tendpoint = \"\/buildWithParameters\"\n\t}\n\tdata := url.Values{}\n\tfor k, v := range params {\n\t\tdata.Set(k, v)\n\t}\n\tresp, err := j.Jenkins.Requester.Post(j.Base+endpoint, bytes.NewBufferString(data.Encode()), nil, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif resp.StatusCode != 200 && resp.StatusCode != 201 {\n\t\tError.Println(\"Could not invoke job %s\", j.GetName())\n\t\treturn false, errors.New(\"Could not invoke job \" + j.GetName())\n\t}\n\treturn true, nil\n}\n\nfunc (j *Job) Invoke(files []string, skipIfRunning bool, params map[string]string, cause string, securityToken string) (bool, error) {\n\tisQueued, err := j.IsQueued()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif isQueued {\n\t\tError.Printf(\"%s is already running\", j.GetName())\n\t\treturn false, nil\n\t}\n\tisRunning, err := j.IsRunning()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif isRunning && skipIfRunning {\n\t\tWarning.Printf(\"%s Will not request new build because %s is already running\", j.GetName())\n\t}\n\n\tbase := \"\/build\"\n\n\t\/\/ If parameters are specified - url is \/builWithParameters\n\tif params != nil {\n\t\tbase = \"\/buildWithParameters\"\n\t} else {\n\t\tparams = make(map[string]string)\n\t}\n\n\t\/\/ If files are specified - url is \/build\n\tif files != nil {\n\t\tbase = \"\/build\"\n\t}\n\treqParams := map[string]string{}\n\tbuildParams := map[string]string{}\n\tif securityToken != \"\" {\n\t\treqParams[\"token\"] = securityToken\n\t}\n\n\tbuildParams[\"json\"] = string(makeJson(params))\n\tb, _ := json.Marshal(buildParams)\n\tresp, err := j.Jenkins.Requester.PostFiles(j.Base+base, bytes.NewBuffer(b), nil, reqParams, files)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif resp.StatusCode == 200 || resp.StatusCode == 201 {\n\t\treturn true, nil\n\t}\n\treturn false, errors.New(strconv.Itoa(resp.StatusCode))\n}\n\nfunc (j *Job) Poll() (int, error) {\n\t_, err := j.Jenkins.Requester.GetJSON(j.Base, j.Raw, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn j.Jenkins.Requester.LastResponse.StatusCode, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"code.google.com\/p\/gosshold\/ssh\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"launchpad.net\/goamz\/aws\"\n\t\"launchpad.net\/goamz\/ec2\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nvar ErrNoInstancesFound = errors.New(\"No instances found; run provisioner first\")\n\nconst AfterDeployHookScript = \".moltar-after-deploy\"\n\ntype Job struct {\n\tregion aws.Region\n\tenv string\n\tcluster string\n\tproject string\n\tpackageNames []string\n\tinstances []*ec2.Instance\n\tinstanceSshClients map[*ec2.Instance]*ssh.ClientConn\n\tinstanceLoggers map[*ec2.Instance]*log.Logger\n\toutput io.Writer\n\tlogger *log.Logger\n\tinstallVersionRev uint64\n\tshouldOutputAnsiEscapes bool\n}\n\nfunc getInstancesTagged(ec2client *ec2.EC2, project string, env string, cluster string, packageName string) (instances []*ec2.Instance, err error) {\n\tinstanceFilter := ec2.NewFilter()\n\tinstanceFilter.Add(\"instance-state-name\", \"running\")\n\tinstanceFilter.Add(\"tag:Project\", project)\n\tqueryEnv := env\n\tif env == \"\" {\n\t\tqueryEnv = \"*\"\n\t}\n\tinstanceFilter.Add(\"tag:Environment\", queryEnv)\n\tif cluster != \"\" {\n\t\tinstanceFilter.Add(\"tag:Cluster\", cluster)\n\t}\n\n\tif packageName != \"\" {\n\t\tinstanceFilter.Add(\"tag:Packages\", \"*|\"+packageName+\"|*\")\n\t}\n\n\tinstancesResp, err := ec2client.Instances(nil, instanceFilter)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tinstances = make([]*ec2.Instance, 0, 20)\n\tfor _, res := range instancesResp.Reservations {\n\t\tfor _, inst := range res.Instances {\n\t\t\tnewInst := inst\n\t\t\tinstances = append(instances, &newInst)\n\t\t}\n\t}\n\n\treturn instances, nil\n}\n\nfunc NewJob(awsConf AWSConf, env string, cluster string, project string, packageNames []string, searchPackageNames []string, output io.Writer, shouldOutputAnsiEscapes bool) (job *Job, err error) {\n\te := ec2.New(awsConf.Auth, awsConf.Region)\n\n\tif searchPackageNames == nil || len(searchPackageNames) == 0 {\n\t\tsearchPackageNames = []string{\"\"}\n\t}\n\n\tinstancesSet := map[string]*ec2.Instance{}\n\tinstancesCount := map[string]int{}\n\tfor _, packageName := range searchPackageNames {\n\t\tinstances, err := getInstancesTagged(e, project, env, cluster, packageName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, instance := range instances {\n\t\t\tinstancesSet[instance.InstanceId] = instance\n\t\t\tinstancesCount[instance.InstanceId] += 1\n\t\t}\n\t}\n\n\tinstances := make([]*ec2.Instance, 0, len(instancesSet))\n\tfor _, instance := range instancesSet {\n\t\tif instancesCount[instance.InstanceId] == len(searchPackageNames) {\n\t\t\tinstances = append(instances, instance)\n\t\t}\n\t}\n\n\tif len(instances) == 0 {\n\t\treturn nil, ErrNoInstancesFound\n\t}\n\n\tlogger := log.New(output, \"\", 0)\n\n\treturn &Job{region: awsConf.Region, env: env, cluster: cluster,\n\t\tproject: project, packageNames: packageNames, instances: instances,\n\t\tinstanceSshClients: make(map[*ec2.Instance]*ssh.ClientConn),\n\t\tinstanceLoggers: make(map[*ec2.Instance]*log.Logger),\n\t\toutput: output, logger: logger,\n\t\tshouldOutputAnsiEscapes: shouldOutputAnsiEscapes}, nil\n}\n\nfunc (self *Job) Exec(cmd string) (errs []error) {\n\terrChan := make(chan error, len(self.instances))\n\terrs = make([]error, 0, len(self.instances))\n\n\tfor _, instance := range self.instances {\n\t\tgo func(inst ec2.Instance) {\n\t\t\tconn, err := self.sshClient(&inst)\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlogger := self.instanceLogger(&inst)\n\t\t\t_, returnChan, err := sshRunOutLogger(conn, cmd, logger, nil)\n\t\t\tif err == nil {\n\t\t\t\terr = <-returnChan\n\t\t\t} else {\n\t\t\t\terrChan <- err\n\t\t\t}\n\t\t\terrChan <- err\n\t\t}(*instance)\n\t}\n\tstartStdinRead()\n\n\tfor _ = range self.instances {\n\t\tif err := <-errChan; err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (self *Job) ExecList(cmds []string) (errs []error) {\n\tfor _, cmd := range cmds {\n\t\tfmt.Printf(\"\\n%s\\n\\n\", cmd)\n\t\terrs = self.Exec(cmd)\n\t\tif len(errs) > 0 {\n\t\t\treturn\n\t\t}\n\t}\n\treturn []error{}\n}\n\nfunc (self *Job) Deploy(runHooks bool) (errs []error) {\n\terrs = self.ExecList([]string{\n\t\t\"sudo apt-get update -qq\",\n\t\t\"sudo DEBIAN_FRONTEND=noninteractive apt-get install -qy '\" +\n\t\t\tstrings.Join(self.packageNames, \"' '\") + \"'\",\n\t\t\"sudo DEBIAN_FRONTEND=noninteractive apt-get autoremove -yq\",\n\t\t\"sudo apt-get clean -yq\",\n\t})\n\tif len(errs) > 0 {\n\t\treturn\n\t}\n\n\tif _, err := os.Stat(AfterDeployHookScript); err != nil {\n\t\treturn\n\t}\n\n\tif runHooks {\n\t\tprepareExec()\n\t\tpwd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tsyscall.Exec(path.Join(pwd, AfterDeployHookScript),\n\t\t\t[]string{AfterDeployHookScript},\n\t\t\tappend(os.Environ(), \"ENV=\"+self.env))\n\t}\n\treturn\n}\n\nfunc (self *Job) Ssh(criteria string, sshArgs []string) (err error) {\n\tsshPath, err := exec.LookPath(\"ssh\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar instance *ec2.Instance\n\tmatches := self.instances\n\n\tif criteria != \"-1\" {\n\t\tif criteria != \"\" {\n\t\t\tmatches = make([]*ec2.Instance, 0, len(self.instances))\n\t\t\tfor _, instance = range self.instances {\n\t\t\t\tif matchCriteria(instance, criteria) {\n\t\t\t\t\tinstanceLogName(instance)\n\t\t\t\t\tmatches = append(matches, instance)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(matches) == 0 {\n\t\t\tself.logger.Fatalf(\"Instance '%s' not found\\n\", criteria)\n\t\t} else if len(matches) > 1 {\n\t\t\tself.logger.Printf(\"Multiple matches for '%s' found:\\n\", criteria)\n\t\t\tself.printInstances(matches)\n\t\t\tself.logger.Fatal(\"\")\n\t\t}\n\t}\n\n\tinstance = matches[0]\n\n\texecArgs := []string{\"ssh\"}\n\tkeyFile := self.keyFile()\n\tif keyFile != \"\" {\n\t\texecArgs = append(execArgs, \"-i\", keyFile)\n\t}\n\n\texecArgs = append(execArgs,\n\t\tfmt.Sprintf(\"%s@%s\", self.sshUserName(instance), instance.DNSName))\n\texecArgs = append(execArgs, sshArgs...)\n\n\tfPrintShellCommand(self.output, \"\", execArgs)\n\tfmt.Fprintln(self.output, \"\")\n\n\tprepareExec()\n\terr = syscall.Exec(sshPath, execArgs, os.Environ())\n\treturn\n}\n\nfunc (self *Job) Scp(args []string) (err error) {\n\tscpPath, err := exec.LookPath(\"scp\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefaultArgs := []string{\"-q\"}\n\tkeyFile := self.keyFile()\n\tif keyFile != \"\" {\n\t\tdefaultArgs = append(defaultArgs, []string{\n\t\t\t\"-i\", keyFile,\n\t\t}...)\n\t}\n\tscpArgs := make([]string, len(defaultArgs)+len(args))\n\tcopy(scpArgs, defaultArgs)\n\tcopy(scpArgs[len(defaultArgs):], args)\n\n\tvar dstIndex = -1\n\tfor i, arg := range scpArgs {\n\t\tif arg[0] == ':' {\n\t\t\tdstIndex = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif dstIndex == -1 {\n\t\tdstIndex = len(scpArgs)\n\t\tscpArgs = append(scpArgs, \":\")\n\t}\n\n\terrChan := make(chan error, len(self.instances))\n\n\tfor _, instance := range self.instances {\n\t\tgo func(instance *ec2.Instance) {\n\t\t\tvar err error\n\t\t\targs := make([]string, len(scpArgs))\n\t\t\tcopy(args, scpArgs)\n\n\t\t\tlogger := self.instanceLogger(instance)\n\t\t\targs[dstIndex] = fmt.Sprintf(\"%s@%s%s\",\n\t\t\t\tself.sshUserName(instance), instance.DNSName, args[dstIndex])\n\n\t\t\tfPrintShellCommand(self.output, \"scp\", args)\n\n\t\t\tcmd := exec.Command(scpPath, args...)\n\t\t\toutPipeRead, outPipeWrite, err := os.Pipe()\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"error creating pipe: %s\\n\", err)\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcmd.Stdout = outPipeWrite\n\t\t\tcmd.Stderr = outPipeWrite\n\n\t\t\terr = cmd.Start()\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"error starting scp: %s\\n\", err)\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\toutPipeWrite.Close()\n\t\t\tstdoutReader := bufio.NewReader(outPipeRead)\n\t\t\tfor {\n\t\t\t\tin, err := stdoutReader.ReadString('\\n')\n\t\t\t\tif (err == io.EOF && in != \"\") || err == nil {\n\t\t\t\t\tlogger.Print(in)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\terr = cmd.Wait()\n\t\t\toutPipeRead.Close()\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"error running scp: %s\\n\", err)\n\t\t\t}\n\t\t\terrChan <- err\n\t\t}(instance)\n\t}\n\n\tvar scpErr error\n\tfor _ = range self.instances {\n\t\tscpErr = <-errChan\n\t\tif err == nil && scpErr != nil {\n\t\t\terr = errors.New(\"at least one scp failed\")\n\t\t}\n\t}\n\treturn\n}\n\nfunc (self *Job) List() (err error) {\n\tself.printInstances(self.instances)\n\treturn nil\n}\n\nfunc (self *Job) Hostname(instanceName string) (err error) {\n\tfor _, instance := range self.instances {\n\t\tif instanceLogName(instance) == instanceName {\n\t\t\tfmt.Fprintln(self.output, instance.DNSName)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn errors.New(instanceName + \" not found\")\n}\n\n\/\/\/ Subtasks\n\nfunc (self *Job) sshClient(i *ec2.Instance) (conn *ssh.ClientConn, err error) {\n\tconn = self.instanceSshClients[i]\n\tif conn == nil {\n\t\tconn, err = self.sshDial(i)\n\t\tif err == nil {\n\t\t\tself.instanceSshClients[i] = conn\n\t\t}\n\t}\n\treturn\n}\n\nfunc (self *Job) instanceLogger(i *ec2.Instance) (logger *log.Logger) {\n\tlogger = self.instanceLoggers[i]\n\tif logger == nil {\n\t\tprefix := instanceLogName(i)\n\t\tif self.shouldOutputAnsiEscapes {\n\t\t\tprefix = \"\\033[1m\" + prefix + \"\\033[0m\"\n\t\t}\n\t\tlogger = log.New(self.output, prefix+\" \", 0)\n\t\tself.instanceLoggers[i] = logger\n\t}\n\treturn\n}\n\nfunc (self *Job) keyFile() (path string) {\n\tfileName := self.project\n\tif len(self.packageNames) > 0 {\n\t\tfileName += fmt.Sprintf(\"-%s\", self.packageNames[0])\n\t}\n\tpath = fmt.Sprintf(os.ExpandEnv(\"${HOME}\/Google Drive\/%s Ops\/Keys\/%s.pem\"),\n\t\tself.project, fileName)\n\n\tif _, err := os.Stat(path); err == nil {\n\t\treturn path\n\t} else {\n\t\treturn \"\"\n\t}\n}\n\nfunc (self *Job) sshUserName(_ *ec2.Instance) (userName string) {\n\t\/\/ TODO: be more clever about this\n\treturn \"ubuntu\"\n}\n\nfunc (self *Job) sshDial(i *ec2.Instance) (conn *ssh.ClientConn, err error) {\n\tconn, err = sshDial(i.DNSName+\":22\", self.sshUserName(i), self.keyFile())\n\treturn\n}\n\nfunc (self *Job) printInstances(instances []*ec2.Instance) {\n\tfields := make([][]string, len(instances))\n\tfor i, instance := range instances {\n\t\tfields[i] = []string{instance.InstanceId, instanceLogName(instance),\n\t\t\tinstance.DNSName}\n\t}\n\tfmt.Fprint(self.output, formatTable(fields))\n}\n\nfunc instanceLogName(i *ec2.Instance) string {\n\tfor _, tag := range i.Tags {\n\t\tif tag.Key == \"Name\" && tag.Value != \"\" {\n\t\t\treturn tag.Value\n\t\t}\n\t}\n\treturn i.InstanceId\n}\n\nfunc fPrintShellCommand(w io.Writer, n string, cmd []string) {\n\tif n != \"\" {\n\t\tfmt.Fprintf(w, \"%s \", n)\n\t}\n\tfor i, cmdPart := range cmd {\n\t\t\/\/ TODO: this escaping will work most of the time, but isn't that great\n\t\tif strings.ContainsAny(cmdPart, \" $\") {\n\t\t\tfmt.Fprintf(w, \"'%s'\", cmdPart)\n\t\t} else {\n\t\t\tfmt.Fprint(w, cmdPart)\n\t\t}\n\t\tif i < (len(cmd) - 1) {\n\t\t\tfmt.Fprint(w, \" \")\n\t\t}\n\t}\n\tfmt.Fprint(w, \"\\n\")\n}\n\nfunc matchCriteria(instance *ec2.Instance, criteria string) bool {\n\tvar found bool\n\tfor _, value := range strings.Split(criteria, \"\/\") {\n\t\tfound = false\n\t\tfor _, tag := range instance.Tags {\n\t\t\tif strings.Contains(tag.Value, value) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !strings.Contains(instance.InstanceId, value) && !strings.Contains(instance.PrivateDNSName, value) && !strings.Contains(instance.DNSName, value) && found == false {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc prepareExec() {\n\t\/* There appears to be a bug with goamz where some fds are left open, and\n\t * just closing them causes a crash. If we ask all fds > 2 to close on\n\t * exec, all is well.\n\t *\/\n\tvar rlimit syscall.Rlimit\n\terr := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tmaxFds := int(rlimit.Cur)\n\tfor fd := 3; fd < maxFds; fd++ {\n\t\tsyscall.CloseOnExec(fd)\n\t}\n}\n<commit_msg>Added support for deploy failure hook.<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"code.google.com\/p\/gosshold\/ssh\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"launchpad.net\/goamz\/aws\"\n\t\"launchpad.net\/goamz\/ec2\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nvar ErrNoInstancesFound = errors.New(\"No instances found; run provisioner first\")\n\nconst AfterDeployHookScript = \".moltar-after-deploy\"\nconst FailedDeployHookScript = \".moltar-failed-deploy\"\n\ntype ExecError struct {\n\tinstance ec2.Instance\n\terror error\n}\n\nfunc (f ExecError) Error() string {\n\treturn fmt.Sprintf(\"%s : %s\", instanceLogName(&f.instance), f.error)\n}\n\ntype Job struct {\n\tregion aws.Region\n\tenv string\n\tcluster string\n\tproject string\n\tpackageNames []string\n\tinstances []*ec2.Instance\n\tinstanceSshClients map[*ec2.Instance]*ssh.ClientConn\n\tinstanceLoggers map[*ec2.Instance]*log.Logger\n\toutput io.Writer\n\tlogger *log.Logger\n\tinstallVersionRev uint64\n\tshouldOutputAnsiEscapes bool\n}\n\nfunc getInstancesTagged(ec2client *ec2.EC2, project string, env string, cluster string, packageName string) (instances []*ec2.Instance, err error) {\n\tinstanceFilter := ec2.NewFilter()\n\tinstanceFilter.Add(\"instance-state-name\", \"running\")\n\tinstanceFilter.Add(\"tag:Project\", project)\n\tqueryEnv := env\n\tif env == \"\" {\n\t\tqueryEnv = \"*\"\n\t}\n\tinstanceFilter.Add(\"tag:Environment\", queryEnv)\n\tif cluster != \"\" {\n\t\tinstanceFilter.Add(\"tag:Cluster\", cluster)\n\t}\n\n\tif packageName != \"\" {\n\t\tinstanceFilter.Add(\"tag:Packages\", \"*|\"+packageName+\"|*\")\n\t}\n\n\tinstancesResp, err := ec2client.Instances(nil, instanceFilter)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tinstances = make([]*ec2.Instance, 0, 20)\n\tfor _, res := range instancesResp.Reservations {\n\t\tfor _, inst := range res.Instances {\n\t\t\tnewInst := inst\n\t\t\tinstances = append(instances, &newInst)\n\t\t}\n\t}\n\n\treturn instances, nil\n}\n\nfunc NewJob(awsConf AWSConf, env string, cluster string, project string, packageNames []string, searchPackageNames []string, output io.Writer, shouldOutputAnsiEscapes bool) (job *Job, err error) {\n\te := ec2.New(awsConf.Auth, awsConf.Region)\n\n\tif searchPackageNames == nil || len(searchPackageNames) == 0 {\n\t\tsearchPackageNames = []string{\"\"}\n\t}\n\n\tinstancesSet := map[string]*ec2.Instance{}\n\tinstancesCount := map[string]int{}\n\tfor _, packageName := range searchPackageNames {\n\t\tinstances, err := getInstancesTagged(e, project, env, cluster, packageName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, instance := range instances {\n\t\t\tinstancesSet[instance.InstanceId] = instance\n\t\t\tinstancesCount[instance.InstanceId] += 1\n\t\t}\n\t}\n\n\tinstances := make([]*ec2.Instance, 0, len(instancesSet))\n\tfor _, instance := range instancesSet {\n\t\tif instancesCount[instance.InstanceId] == len(searchPackageNames) {\n\t\t\tinstances = append(instances, instance)\n\t\t}\n\t}\n\n\tif len(instances) == 0 {\n\t\treturn nil, ErrNoInstancesFound\n\t}\n\n\tlogger := log.New(output, \"\", 0)\n\n\treturn &Job{region: awsConf.Region, env: env, cluster: cluster,\n\t\tproject: project, packageNames: packageNames, instances: instances,\n\t\tinstanceSshClients: make(map[*ec2.Instance]*ssh.ClientConn),\n\t\tinstanceLoggers: make(map[*ec2.Instance]*log.Logger),\n\t\toutput: output, logger: logger,\n\t\tshouldOutputAnsiEscapes: shouldOutputAnsiEscapes}, nil\n}\n\nfunc (self *Job) Exec(cmd string) (errs []error) {\n\texecErrs := make([]ExecError, 0, len(self.instances))\n\texecErrs = self.exec(cmd)\n\tif len(execErrs) > 0 {\n\t\tfor _, execErr := range execErrs {\n\t\t\terrs = append(errs, execErr)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (self *Job) Deploy(runHooks bool) (errs []error) {\n\texecErrs := make([]ExecError, 0, len(self.instances))\n\texecErrs = self.execList([]string{\n\t\t\"sudo apt-get update -qq\",\n\t\t\"sudo DEBIAN_FRONTEND=noninteractive apt-get install -qy '\" +\n\t\t\tstrings.Join(self.packageNames, \"' '\") + \"'\",\n\t\t\"sudo DEBIAN_FRONTEND=noninteractive apt-get autoremove -yq\",\n\t\t\"sudo apt-get clean -yq\",\n\t})\n\n\thosts := make([]string, 0, len(execErrs))\n\tfor _, execErr := range execErrs {\n\t\thosts = append(hosts, execErr.instance.DNSName)\n\t\terrs = append(errs, execErr)\n\t}\n\n\tif runHooks {\n\t\tif _, err := os.Stat(FailedDeployHookScript); len(execErrs) > 0 && err == nil {\n\t\t\tfmt.Println(getHookMessage(FailedDeployHookScript))\n\t\t\trunHook(FailedDeployHookScript,\n\t\t\t\t[]string{\"ENV=\" + self.env, \"FAILED_HOSTS=\" + strings.Join(hosts, \" \")})\n\t\t} else if _, err := os.Stat(AfterDeployHookScript); err == nil {\n\t\t\tfmt.Println(getHookMessage(AfterDeployHookScript))\n\t\t\trunHook(AfterDeployHookScript, []string{\"ENV=\" + self.env})\n\t\t}\n\t}\n\treturn\n}\n\nfunc (self *Job) Ssh(criteria string, sshArgs []string) (err error) {\n\tsshPath, err := exec.LookPath(\"ssh\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar instance *ec2.Instance\n\tmatches := self.instances\n\n\tif criteria != \"-1\" {\n\t\tif criteria != \"\" {\n\t\t\tmatches = make([]*ec2.Instance, 0, len(self.instances))\n\t\t\tfor _, instance = range self.instances {\n\t\t\t\tif matchCriteria(instance, criteria) {\n\t\t\t\t\tinstanceLogName(instance)\n\t\t\t\t\tmatches = append(matches, instance)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(matches) == 0 {\n\t\t\tself.logger.Fatalf(\"Instance '%s' not found\\n\", criteria)\n\t\t} else if len(matches) > 1 {\n\t\t\tself.logger.Printf(\"Multiple matches for '%s' found:\\n\", criteria)\n\t\t\tself.printInstances(matches)\n\t\t\tself.logger.Fatal(\"\")\n\t\t}\n\t}\n\n\tinstance = matches[0]\n\n\texecArgs := []string{\"ssh\"}\n\tkeyFile := self.keyFile()\n\tif keyFile != \"\" {\n\t\texecArgs = append(execArgs, \"-i\", keyFile)\n\t}\n\n\texecArgs = append(execArgs,\n\t\tfmt.Sprintf(\"%s@%s\", self.sshUserName(instance), instance.DNSName))\n\texecArgs = append(execArgs, sshArgs...)\n\n\tfPrintShellCommand(self.output, \"\", execArgs)\n\tfmt.Fprintln(self.output, \"\")\n\n\tprepareExec()\n\terr = syscall.Exec(sshPath, execArgs, os.Environ())\n\treturn\n}\n\nfunc (self *Job) Scp(args []string) (err error) {\n\tscpPath, err := exec.LookPath(\"scp\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefaultArgs := []string{\"-q\"}\n\tkeyFile := self.keyFile()\n\tif keyFile != \"\" {\n\t\tdefaultArgs = append(defaultArgs, []string{\n\t\t\t\"-i\", keyFile,\n\t\t}...)\n\t}\n\tscpArgs := make([]string, len(defaultArgs)+len(args))\n\tcopy(scpArgs, defaultArgs)\n\tcopy(scpArgs[len(defaultArgs):], args)\n\n\tvar dstIndex = -1\n\tfor i, arg := range scpArgs {\n\t\tif arg[0] == ':' {\n\t\t\tdstIndex = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif dstIndex == -1 {\n\t\tdstIndex = len(scpArgs)\n\t\tscpArgs = append(scpArgs, \":\")\n\t}\n\n\terrChan := make(chan error, len(self.instances))\n\n\tfor _, instance := range self.instances {\n\t\tgo func(instance *ec2.Instance) {\n\t\t\tvar err error\n\t\t\targs := make([]string, len(scpArgs))\n\t\t\tcopy(args, scpArgs)\n\n\t\t\tlogger := self.instanceLogger(instance)\n\t\t\targs[dstIndex] = fmt.Sprintf(\"%s@%s%s\",\n\t\t\t\tself.sshUserName(instance), instance.DNSName, args[dstIndex])\n\n\t\t\tfPrintShellCommand(self.output, \"scp\", args)\n\n\t\t\tcmd := exec.Command(scpPath, args...)\n\t\t\toutPipeRead, outPipeWrite, err := os.Pipe()\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"error creating pipe: %s\\n\", err)\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcmd.Stdout = outPipeWrite\n\t\t\tcmd.Stderr = outPipeWrite\n\n\t\t\terr = cmd.Start()\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"error starting scp: %s\\n\", err)\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\toutPipeWrite.Close()\n\t\t\tstdoutReader := bufio.NewReader(outPipeRead)\n\t\t\tfor {\n\t\t\t\tin, err := stdoutReader.ReadString('\\n')\n\t\t\t\tif (err == io.EOF && in != \"\") || err == nil {\n\t\t\t\t\tlogger.Print(in)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\terr = cmd.Wait()\n\t\t\toutPipeRead.Close()\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"error running scp: %s\\n\", err)\n\t\t\t}\n\t\t\terrChan <- err\n\t\t}(instance)\n\t}\n\n\tvar scpErr error\n\tfor _ = range self.instances {\n\t\tscpErr = <-errChan\n\t\tif err == nil && scpErr != nil {\n\t\t\terr = errors.New(\"at least one scp failed\")\n\t\t}\n\t}\n\treturn\n}\n\nfunc (self *Job) List() (err error) {\n\tself.printInstances(self.instances)\n\treturn nil\n}\n\nfunc (self *Job) Hostname(instanceName string) (err error) {\n\tfor _, instance := range self.instances {\n\t\tif instanceLogName(instance) == instanceName {\n\t\t\tfmt.Fprintln(self.output, instance.DNSName)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn errors.New(instanceName + \" not found\")\n}\n\n\/\/\/ Subtasks\n\nfunc (self *Job) sshClient(i *ec2.Instance) (conn *ssh.ClientConn, err error) {\n\tconn = self.instanceSshClients[i]\n\tif conn == nil {\n\t\tconn, err = self.sshDial(i)\n\t\tif err == nil {\n\t\t\tself.instanceSshClients[i] = conn\n\t\t}\n\t}\n\treturn\n}\n\nfunc (self *Job) instanceLogger(i *ec2.Instance) (logger *log.Logger) {\n\tlogger = self.instanceLoggers[i]\n\tif logger == nil {\n\t\tprefix := instanceLogName(i)\n\t\tif self.shouldOutputAnsiEscapes {\n\t\t\tprefix = \"\\033[1m\" + prefix + \"\\033[0m\"\n\t\t}\n\t\tlogger = log.New(self.output, prefix+\" \", 0)\n\t\tself.instanceLoggers[i] = logger\n\t}\n\treturn\n}\n\nfunc (self *Job) exec(cmd string) (errs []ExecError) {\n\terrChan := make(chan ExecError, len(self.instances))\n\terrs = make([]ExecError, 0, len(self.instances))\n\n\tfor _, instance := range self.instances {\n\t\tgo func(inst ec2.Instance) {\n\t\t\tconn, err := self.sshClient(&inst)\n\t\t\tif err != nil {\n\t\t\t\terrChan <- ExecError{error: err, instance: inst}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlogger := self.instanceLogger(&inst)\n\t\t\t_, returnChan, err := sshRunOutLogger(conn, cmd, logger, nil)\n\t\t\tif err == nil {\n\t\t\t\terr = <-returnChan\n\t\t\t}\n\t\t\terrChan <- ExecError{error: err, instance: inst}\n\t\t}(*instance)\n\t}\n\tstartStdinRead()\n\n\tfor _ = range self.instances {\n\t\tif err := <-errChan; err.error != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (self *Job) execList(cmds []string) (errs []ExecError) {\n\tfor _, cmd := range cmds {\n\t\tfmt.Printf(\"\\n%s\\n\\n\", cmd)\n\t\terrs = self.exec(cmd)\n\t\tif len(errs) > 0 {\n\t\t\treturn\n\t\t}\n\t}\n\treturn []ExecError{}\n}\n\nfunc (self *Job) keyFile() (path string) {\n\tfileName := self.project\n\tif len(self.packageNames) > 0 {\n\t\tfileName += fmt.Sprintf(\"-%s\", self.packageNames[0])\n\t}\n\tpath = fmt.Sprintf(os.ExpandEnv(\"${HOME}\/Google Drive\/%s Ops\/Keys\/%s.pem\"),\n\t\tself.project, fileName)\n\n\tif _, err := os.Stat(path); err == nil {\n\t\treturn path\n\t} else {\n\t\treturn \"\"\n\t}\n}\n\nfunc (self *Job) sshUserName(_ *ec2.Instance) (userName string) {\n\t\/\/ TODO: be more clever about this\n\treturn \"ubuntu\"\n}\n\nfunc (self *Job) sshDial(i *ec2.Instance) (conn *ssh.ClientConn, err error) {\n\tconn, err = sshDial(i.DNSName+\":22\", self.sshUserName(i), self.keyFile())\n\treturn\n}\n\nfunc (self *Job) printInstances(instances []*ec2.Instance) {\n\tfields := make([][]string, len(instances))\n\tfor i, instance := range instances {\n\t\tfields[i] = []string{instance.InstanceId, instanceLogName(instance),\n\t\t\tinstance.DNSName}\n\t}\n\tfmt.Fprint(self.output, formatTable(fields))\n}\n\nfunc instanceLogName(i *ec2.Instance) string {\n\tfor _, tag := range i.Tags {\n\t\tif tag.Key == \"Name\" && tag.Value != \"\" {\n\t\t\treturn tag.Value\n\t\t}\n\t}\n\treturn i.InstanceId\n}\n\nfunc fPrintShellCommand(w io.Writer, n string, cmd []string) {\n\tif n != \"\" {\n\t\tfmt.Fprintf(w, \"%s \", n)\n\t}\n\tfor i, cmdPart := range cmd {\n\t\t\/\/ TODO: this escaping will work most of the time, but isn't that great\n\t\tif strings.ContainsAny(cmdPart, \" $\") {\n\t\t\tfmt.Fprintf(w, \"'%s'\", cmdPart)\n\t\t} else {\n\t\t\tfmt.Fprint(w, cmdPart)\n\t\t}\n\t\tif i < (len(cmd) - 1) {\n\t\t\tfmt.Fprint(w, \" \")\n\t\t}\n\t}\n\tfmt.Fprint(w, \"\\n\")\n}\n\nfunc runHook(script_path string, environment []string) {\n\tprepareExec()\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn\n\t}\n\tvars := make([]string, 0, len(os.Environ()))\n\tfor _, env := range environment {\n\t\tvars = append(vars, env)\n\t}\n\tfor _, env := range os.Environ() {\n\t\tvars = append(vars, env)\n\t}\n\tsyscall.Exec(path.Join(pwd, script_path),\n\t\t[]string{script_path},\n\t\tvars)\n\n}\n\nfunc matchCriteria(instance *ec2.Instance, criteria string) bool {\n\tvar found bool\n\tfor _, value := range strings.Split(criteria, \"\/\") {\n\t\tfound = false\n\t\tfor _, tag := range instance.Tags {\n\t\t\tif strings.Contains(tag.Value, value) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !strings.Contains(instance.InstanceId, value) && !strings.Contains(instance.PrivateDNSName, value) && !strings.Contains(instance.DNSName, value) && found == false {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc getHookMessage(script string) string {\n\treturn fmt.Sprintf(\"Running deploy failure hook %s\", script)\n}\n\nfunc prepareExec() {\n\t\/* There appears to be a bug with goamz where some fds are left open, and\n\t * just closing them causes a crash. If we ask all fds > 2 to close on\n\t * exec, all is well.\n\t *\/\n\tvar rlimit syscall.Rlimit\n\terr := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tmaxFds := int(rlimit.Cur)\n\tfor fd := 3; fd < maxFds; fd++ {\n\t\tsyscall.CloseOnExec(fd)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package kontrol\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/koding\/kite\"\n\t\"github.com\/koding\/kite\/dnode\"\n\tkontrolprotocol \"github.com\/koding\/kite\/kontrol\/protocol\"\n\t\"github.com\/koding\/kite\/protocol\"\n)\n\nfunc (k *Kontrol) handleRegister(r *kite.Request) (interface{}, error) {\n\tk.log.Info(\"Register request from: %s\", r.Client.Kite)\n\n\tif r.Args.One().MustMap()[\"url\"].MustString() == \"\" {\n\t\treturn nil, errors.New(\"invalid url\")\n\t}\n\n\tvar args struct {\n\t\tURL string `json:\"url\"`\n\t}\n\tr.Args.One().MustUnmarshal(&args)\n\tif args.URL == \"\" {\n\t\treturn nil, errors.New(\"empty url\")\n\t}\n\n\t\/\/ Only accept requests with kiteKey because we need this info\n\t\/\/ for generating tokens for this kite.\n\tif r.Auth.Type != \"kiteKey\" {\n\t\treturn nil, fmt.Errorf(\"Unexpected authentication type: %s\", r.Auth.Type)\n\t}\n\n\tkiteURL := args.URL\n\tremote := r.Client\n\n\tif err := validateKiteKey(&remote.Kite); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalue := &kontrolprotocol.RegisterValue{\n\t\tURL: kiteURL,\n\t}\n\n\t\/\/ Register first by adding the value to the storage. Return if there is\n\t\/\/ any error.\n\tif err := k.storage.Upsert(&remote.Kite, value); err != nil {\n\t\tk.log.Error(\"storage add '%s' error: %s\", remote.Kite, err)\n\t\treturn nil, errors.New(\"internal error - register\")\n\t}\n\n\t\/\/ we create a new ticker which is going to update the key periodically in\n\t\/\/ the storage so it's always up to date. Instead of updating the key\n\t\/\/ periodically according to the HeartBeatInterval below, we are buffering\n\t\/\/ the write speed here with the UpdateInterval.\n\tupdater := time.NewTicker(UpdateInterval)\n\tupdaterFunc := func() {\n\t\tfor _ = range updater.C {\n\t\t\tk.log.Debug(\"Kite is active, updating the value %s\", remote.Kite)\n\t\t\terr := k.storage.Update(&remote.Kite, value)\n\t\t\tif err != nil {\n\t\t\t\tk.log.Error(\"storage update '%s' error: %s\", remote.Kite, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tgo updaterFunc()\n\n\t\/\/ lostFunc is called when we don't get any heartbeat or we lost\n\t\/\/ connection. In any case it will stop the updater\n\tlostFunc := func(reason string) func() {\n\t\treturn func() {\n\t\t\tk.log.Info(\"Kite %s. Stopping the updater %s\", reason, remote.Kite)\n\t\t\t\/\/ stop the updater so it doesn't update it in the background\n\t\t\tupdater.Stop()\n\t\t}\n\t}\n\n\t\/\/ we are now creating a timer that is going to call the lostFunc, which\n\t\/\/ stops the background updater if it's not resetted.\n\tupdateTimer := time.AfterFunc(HeartbeatInterval+HeartbeatDelay,\n\t\tlostFunc(\"didn't get heartbeat\"))\n\n\theartbeatArgs := []interface{}{\n\t\tHeartbeatInterval \/ time.Second,\n\t\tdnode.Callback(func(args *dnode.Partial) {\n\t\t\t\/\/ try to reset the timer every time the remote kite calls this\n\t\t\t\/\/ callback(sends us heartbeat). Because the timer get reset, the\n\t\t\t\/\/ lostFunc above will never be fired, so the value get always\n\t\t\t\/\/ updated with the updater in the background according to the\n\t\t\t\/\/ write interval. If the kite doesn't send any heartbeat, the\n\t\t\t\/\/ deleteFunc is being called, which stops the updater so the key\n\t\t\t\/\/ is being deleted automatically via the TTL mechanism.\n\t\t\tk.log.Debug(\"Kite send us an heartbeat. %s\", remote.Kite)\n\n\t\t\tk.clientLocks.Get(remote.Kite.ID).Lock()\n\t\t\tupdateTimer.Reset(HeartbeatInterval + HeartbeatDelay)\n\t\t\tk.clientLocks.Get(remote.Kite.ID).Unlock()\n\t\t}),\n\t}\n\n\t\/\/ now trigger the remote kite so it sends us periodically an heartbeat\n\tremote.GoWithTimeout(\"kite.heartbeat\", 4*time.Second, heartbeatArgs...)\n\n\tk.log.Info(\"Kite registered: %s\", remote.Kite)\n\n\tremote.OnDisconnect(lostFunc(\"disconnected\")) \/\/ also call it when we disconnect\n\n\t\/\/ send response back to the kite, also identify him with the new name\n\treturn &protocol.RegisterResult{URL: args.URL}, nil\n}\n\nfunc (k *Kontrol) handleGetKites(r *kite.Request) (interface{}, error) {\n\t\/\/ This type is here until inversion branch is merged.\n\t\/\/ Reason: We can't use the same struct for marshaling and unmarshaling.\n\t\/\/ TODO use the struct in protocol\n\ttype GetKitesArgs struct {\n\t\tQuery *protocol.KontrolQuery `json:\"query\"`\n\t}\n\n\tvar args GetKitesArgs\n\tr.Args.One().MustUnmarshal(&args)\n\n\tquery := args.Query\n\n\t\/\/ audience will go into the token as \"aud\" claim.\n\taudience := getAudience(query)\n\n\t\/\/ Generate token once here because we are using the same token for every\n\t\/\/ kite we return and generating many tokens is really slow.\n\ttoken, err := generateToken(audience, r.Username,\n\t\tk.Kite.Kite().Username, k.privateKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get kites from the storage\n\tkites, err := k.storage.Get(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Attach tokens to kites\n\tkites.Attach(token)\n\n\treturn &protocol.GetKitesResult{\n\t\tKites: kites,\n\t}, nil\n}\n\nfunc (k *Kontrol) handleGetToken(r *kite.Request) (interface{}, error) {\n\tvar query *protocol.KontrolQuery\n\terr := r.Args.One().Unmarshal(&query)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Invalid query\")\n\t}\n\n\t\/\/ check if it's exist\n\tkites, err := k.storage.Get(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(kites) > 1 {\n\t\treturn nil, errors.New(\"query matches more than one kite\")\n\t}\n\n\taudience := getAudience(query)\n\n\treturn generateToken(audience, r.Username, k.Kite.Kite().Username, k.privateKey)\n}\n\nfunc (k *Kontrol) handleMachine(r *kite.Request) (interface{}, error) {\n\tif k.MachineAuthenticate != nil {\n\t\tif err := k.MachineAuthenticate(r); err != nil {\n\t\t\treturn nil, errors.New(\"cannot authenticate user\")\n\t\t}\n\t}\n\n\tusername := r.Args.One().MustString() \/\/ username should be send as an argument\n\treturn k.registerUser(username)\n}\n<commit_msg>kontrol\/handlers: if we still get heartbeats, add them back<commit_after>package kontrol\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/koding\/kite\"\n\t\"github.com\/koding\/kite\/dnode\"\n\tkontrolprotocol \"github.com\/koding\/kite\/kontrol\/protocol\"\n\t\"github.com\/koding\/kite\/protocol\"\n\t\"github.com\/tjgq\/ticker\"\n)\n\nfunc (k *Kontrol) handleRegister(r *kite.Request) (interface{}, error) {\n\tk.log.Info(\"Register request from: %s\", r.Client.Kite)\n\n\tif r.Args.One().MustMap()[\"url\"].MustString() == \"\" {\n\t\treturn nil, errors.New(\"invalid url\")\n\t}\n\n\tvar args struct {\n\t\tURL string `json:\"url\"`\n\t}\n\tr.Args.One().MustUnmarshal(&args)\n\tif args.URL == \"\" {\n\t\treturn nil, errors.New(\"empty url\")\n\t}\n\n\t\/\/ Only accept requests with kiteKey because we need this info\n\t\/\/ for generating tokens for this kite.\n\tif r.Auth.Type != \"kiteKey\" {\n\t\treturn nil, fmt.Errorf(\"Unexpected authentication type: %s\", r.Auth.Type)\n\t}\n\n\tkiteURL := args.URL\n\tremote := r.Client\n\n\tif err := validateKiteKey(&remote.Kite); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalue := &kontrolprotocol.RegisterValue{\n\t\tURL: kiteURL,\n\t}\n\n\t\/\/ Register first by adding the value to the storage. Return if there is\n\t\/\/ any error.\n\tif err := k.storage.Upsert(&remote.Kite, value); err != nil {\n\t\tk.log.Error(\"storage add '%s' error: %s\", remote.Kite, err)\n\t\treturn nil, errors.New(\"internal error - register\")\n\t}\n\n\t\/\/ we create a new ticker which is going to update the key periodically in\n\t\/\/ the storage so it's always up to date. Instead of updating the key\n\t\/\/ periodically according to the HeartBeatInterval below, we are buffering\n\t\/\/ the write speed here with the UpdateInterval.\n\tupdater := ticker.New(UpdateInterval)\n\tupdater.Start()\n\n\tupdaterFunc := func() {\n\t\tfor _ = range updater.C {\n\t\t\tk.log.Debug(\"Kite is active, updating the value %s\", remote.Kite)\n\t\t\terr := k.storage.Update(&remote.Kite, value)\n\t\t\tif err != nil {\n\t\t\t\tk.log.Error(\"storage update '%s' error: %s\", remote.Kite, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tgo updaterFunc()\n\n\t\/\/ lostFunc is called when we don't get any heartbeat or we lost\n\t\/\/ connection. In any case it will stop the updater\n\tlostFunc := func(reason string) func() {\n\t\treturn func() {\n\t\t\tk.log.Info(\"Kite %s. Stopping the updater %s\", reason, remote.Kite)\n\t\t\t\/\/ stop the updater so it doesn't update it in the background\n\t\t\tupdater.Stop()\n\t\t}\n\n\t}\n\n\t\/\/ we are now creating a timer that is going to call the lostFunc, which\n\t\/\/ stops the background updater if it's not resetted.\n\tupdateTimer := time.AfterFunc(HeartbeatInterval+HeartbeatDelay,\n\t\tlostFunc(\"didn't get heartbeat\"))\n\n\theartbeatArgs := []interface{}{\n\t\tHeartbeatInterval \/ time.Second,\n\t\tdnode.Callback(func(args *dnode.Partial) {\n\t\t\t\/\/ try to reset the timer every time the remote kite calls this\n\t\t\t\/\/ callback(sends us heartbeat). Because the timer get reset, the\n\t\t\t\/\/ lostFunc above will never be fired, so the value get always\n\t\t\t\/\/ updated with the updater in the background according to the\n\t\t\t\/\/ write interval. If the kite doesn't send any heartbeat, the\n\t\t\t\/\/ deleteFunc is being called, which stops the updater so the key\n\t\t\t\/\/ is being deleted automatically via the TTL mechanism.\n\t\t\tk.log.Debug(\"Kite send us an heartbeat. %s\", remote.Kite)\n\n\t\t\tk.clientLocks.Get(remote.Kite.ID).Lock()\n\t\t\tupdateTimer.Reset(HeartbeatInterval + HeartbeatDelay)\n\t\t\tk.clientLocks.Get(remote.Kite.ID).Unlock()\n\n\t\t\t\/\/ seems we miss a heartbeat, so start it again!\n\t\t\tif updater.Stopped() {\n\t\t\t\tk.log.Warning(\"Updater was closed, but we are still getting heartbeats. Starting again %s\", remote.Kite)\n\t\t\t\tupdater.Start()\n\t\t\t}\n\t\t}),\n\t}\n\n\t\/\/ now trigger the remote kite so it sends us periodically an heartbeat\n\tremote.GoWithTimeout(\"kite.heartbeat\", 4*time.Second, heartbeatArgs...)\n\n\tk.log.Info(\"Kite registered: %s\", remote.Kite)\n\n\tremote.OnDisconnect(lostFunc(\"disconnected\")) \/\/ also call it when we disconnect\n\n\t\/\/ send response back to the kite, also identify him with the new name\n\treturn &protocol.RegisterResult{URL: args.URL}, nil\n}\n\nfunc (k *Kontrol) handleGetKites(r *kite.Request) (interface{}, error) {\n\t\/\/ This type is here until inversion branch is merged.\n\t\/\/ Reason: We can't use the same struct for marshaling and unmarshaling.\n\t\/\/ TODO use the struct in protocol\n\ttype GetKitesArgs struct {\n\t\tQuery *protocol.KontrolQuery `json:\"query\"`\n\t}\n\n\tvar args GetKitesArgs\n\tr.Args.One().MustUnmarshal(&args)\n\n\tquery := args.Query\n\n\t\/\/ audience will go into the token as \"aud\" claim.\n\taudience := getAudience(query)\n\n\t\/\/ Generate token once here because we are using the same token for every\n\t\/\/ kite we return and generating many tokens is really slow.\n\ttoken, err := generateToken(audience, r.Username,\n\t\tk.Kite.Kite().Username, k.privateKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get kites from the storage\n\tkites, err := k.storage.Get(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Attach tokens to kites\n\tkites.Attach(token)\n\n\treturn &protocol.GetKitesResult{\n\t\tKites: kites,\n\t}, nil\n}\n\nfunc (k *Kontrol) handleGetToken(r *kite.Request) (interface{}, error) {\n\tvar query *protocol.KontrolQuery\n\terr := r.Args.One().Unmarshal(&query)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Invalid query\")\n\t}\n\n\t\/\/ check if it's exist\n\tkites, err := k.storage.Get(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(kites) > 1 {\n\t\treturn nil, errors.New(\"query matches more than one kite\")\n\t}\n\n\taudience := getAudience(query)\n\n\treturn generateToken(audience, r.Username, k.Kite.Kite().Username, k.privateKey)\n}\n\nfunc (k *Kontrol) handleMachine(r *kite.Request) (interface{}, error) {\n\tif k.MachineAuthenticate != nil {\n\t\tif err := k.MachineAuthenticate(r); err != nil {\n\t\t\treturn nil, errors.New(\"cannot authenticate user\")\n\t\t}\n\t}\n\n\tusername := r.Args.One().MustString() \/\/ username should be send as an argument\n\treturn k.registerUser(username)\n}\n<|endoftext|>"} {"text":"<commit_before>package kontrol\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/koding\/kite\"\n\t\"github.com\/koding\/kite\/dnode\"\n\t\"github.com\/koding\/kite\/kontrol\/onceevery\"\n\tkontrolprotocol \"github.com\/koding\/kite\/kontrol\/protocol\"\n\t\"github.com\/koding\/kite\/protocol\"\n)\n\nfunc (k *Kontrol) handleRegister(r *kite.Request) (interface{}, error) {\n\tk.log.Info(\"Register request from: %s\", r.Client.Kite)\n\n\tif r.Args.One().MustMap()[\"url\"].MustString() == \"\" {\n\t\treturn nil, errors.New(\"invalid url\")\n\t}\n\n\tvar args struct {\n\t\tURL string `json:\"url\"`\n\t}\n\tr.Args.One().MustUnmarshal(&args)\n\tif args.URL == \"\" {\n\t\treturn nil, errors.New(\"empty url\")\n\t}\n\n\t\/\/ Only accept requests with kiteKey because we need this info\n\t\/\/ for generating tokens for this kite.\n\tif r.Auth.Type != \"kiteKey\" {\n\t\treturn nil, fmt.Errorf(\"Unexpected authentication type: %s\", r.Auth.Type)\n\t}\n\n\tkiteURL := args.URL\n\tremote := r.Client\n\n\tif err := validateKiteKey(&remote.Kite); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalue := &kontrolprotocol.RegisterValue{\n\t\tURL: kiteURL,\n\t}\n\n\t\/\/ Register first by adding the value to the storage. Return if there is\n\t\/\/ any error.\n\tif err := k.storage.Upsert(&remote.Kite, value); err != nil {\n\t\tk.log.Error(\"storage add '%s' error: %s\", remote.Kite, err)\n\t\treturn nil, errors.New(\"internal error - register\")\n\t}\n\n\tevery := onceevery.New(UpdateInterval)\n\n\tping := make(chan struct{}, 1)\n\tclosed := false\n\n\tupdaterFunc := func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ping:\n\t\t\t\tk.log.Info(\"Kite is active, got a ping %s\", remote.Kite)\n\t\t\t\tevery.Do(func() {\n\t\t\t\t\tk.log.Info(\"Kite is active, updating the value %s\", remote.Kite)\n\t\t\t\t\terr := k.storage.Update(&remote.Kite, value)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tk.log.Error(\"storage update '%s' error: %s\", remote.Kite, err)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\tcase <-time.After(HeartbeatInterval + HeartbeatDelay):\n\t\t\t\tk.log.Info(\"Kite didn't sent any heartbeat %s.\", remote.Kite)\n\t\t\t\tclosed = true\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tgo updaterFunc()\n\n\theartbeatArgs := []interface{}{\n\t\tHeartbeatInterval \/ time.Second,\n\t\tdnode.Callback(func(args *dnode.Partial) {\n\t\t\tk.log.Info(\"Kite send us an heartbeat. %s\", remote.Kite)\n\n\t\t\tk.clientLocks.Get(remote.Kite.ID).Lock()\n\t\t\tdefer k.clientLocks.Get(remote.Kite.ID).Unlock()\n\n\t\t\tselect {\n\t\t\tcase ping <- struct{}{}:\n\t\t\tdefault:\n\t\t\t}\n\n\t\t\t\/\/ seems we miss a heartbeat, so start it again!\n\t\t\tif closed {\n\t\t\t\tclosed = false\n\t\t\t\tk.log.Warning(\"Updater was closed, but we are still getting heartbeats. Starting again %s\",\n\t\t\t\t\tremote.Kite)\n\n\t\t\t\t\/\/ it might be removed because the ttl cleaner would come\n\t\t\t\t\/\/ before us, so try to add it again, the updater will than\n\t\t\t\t\/\/ continue to update it afterwards.\n\t\t\t\tk.storage.Upsert(&remote.Kite, value)\n\t\t\t\tgo updaterFunc()\n\t\t\t}\n\t\t}),\n\t}\n\n\t\/\/ now trigger the remote kite so it sends us periodically an heartbeat\n\tremote.GoWithTimeout(\"kite.heartbeat\", 4*time.Second, heartbeatArgs...)\n\n\tk.log.Info(\"Kite registered: %s\", remote.Kite)\n\n\tremote.OnDisconnect(func() {\n\t\tk.log.Info(\"Kite disconnected: %s\", remote.Kite)\n\t\tevery.Stop()\n\t})\n\n\t\/\/ send response back to the kite, also identify him with the new name\n\treturn &protocol.RegisterResult{URL: args.URL}, nil\n}\n\nfunc (k *Kontrol) handleGetKites(r *kite.Request) (interface{}, error) {\n\t\/\/ This type is here until inversion branch is merged.\n\t\/\/ Reason: We can't use the same struct for marshaling and unmarshaling.\n\t\/\/ TODO use the struct in protocol\n\ttype GetKitesArgs struct {\n\t\tQuery *protocol.KontrolQuery `json:\"query\"`\n\t}\n\n\tvar args GetKitesArgs\n\tr.Args.One().MustUnmarshal(&args)\n\n\tquery := args.Query\n\n\t\/\/ audience will go into the token as \"aud\" claim.\n\taudience := getAudience(query)\n\n\t\/\/ Generate token once here because we are using the same token for every\n\t\/\/ kite we return and generating many tokens is really slow.\n\ttoken, err := generateToken(audience, r.Username,\n\t\tk.Kite.Kite().Username, k.privateKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get kites from the storage\n\tkites, err := k.storage.Get(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Attach tokens to kites\n\tkites.Attach(token)\n\n\treturn &protocol.GetKitesResult{\n\t\tKites: kites,\n\t}, nil\n}\n\nfunc (k *Kontrol) handleGetToken(r *kite.Request) (interface{}, error) {\n\tvar query *protocol.KontrolQuery\n\terr := r.Args.One().Unmarshal(&query)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Invalid query\")\n\t}\n\n\t\/\/ check if it's exist\n\tkites, err := k.storage.Get(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(kites) > 1 {\n\t\treturn nil, errors.New(\"query matches more than one kite\")\n\t}\n\n\taudience := getAudience(query)\n\n\treturn generateToken(audience, r.Username, k.Kite.Kite().Username, k.privateKey)\n}\n\nfunc (k *Kontrol) handleMachine(r *kite.Request) (interface{}, error) {\n\tif k.MachineAuthenticate != nil {\n\t\tif err := k.MachineAuthenticate(r); err != nil {\n\t\t\treturn nil, errors.New(\"cannot authenticate user\")\n\t\t}\n\t}\n\n\tusername := r.Args.One().MustString() \/\/ username should be send as an argument\n\treturn k.registerUser(username)\n}\n<commit_msg>kontrol\/handlers: disable those logs which were debugs previously<commit_after>package kontrol\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/koding\/kite\"\n\t\"github.com\/koding\/kite\/dnode\"\n\t\"github.com\/koding\/kite\/kontrol\/onceevery\"\n\tkontrolprotocol \"github.com\/koding\/kite\/kontrol\/protocol\"\n\t\"github.com\/koding\/kite\/protocol\"\n)\n\nfunc (k *Kontrol) handleRegister(r *kite.Request) (interface{}, error) {\n\tk.log.Info(\"Register request from: %s\", r.Client.Kite)\n\n\tif r.Args.One().MustMap()[\"url\"].MustString() == \"\" {\n\t\treturn nil, errors.New(\"invalid url\")\n\t}\n\n\tvar args struct {\n\t\tURL string `json:\"url\"`\n\t}\n\tr.Args.One().MustUnmarshal(&args)\n\tif args.URL == \"\" {\n\t\treturn nil, errors.New(\"empty url\")\n\t}\n\n\t\/\/ Only accept requests with kiteKey because we need this info\n\t\/\/ for generating tokens for this kite.\n\tif r.Auth.Type != \"kiteKey\" {\n\t\treturn nil, fmt.Errorf(\"Unexpected authentication type: %s\", r.Auth.Type)\n\t}\n\n\tkiteURL := args.URL\n\tremote := r.Client\n\n\tif err := validateKiteKey(&remote.Kite); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalue := &kontrolprotocol.RegisterValue{\n\t\tURL: kiteURL,\n\t}\n\n\t\/\/ Register first by adding the value to the storage. Return if there is\n\t\/\/ any error.\n\tif err := k.storage.Upsert(&remote.Kite, value); err != nil {\n\t\tk.log.Error(\"storage add '%s' error: %s\", remote.Kite, err)\n\t\treturn nil, errors.New(\"internal error - register\")\n\t}\n\n\tevery := onceevery.New(UpdateInterval)\n\n\tping := make(chan struct{}, 1)\n\tclosed := false\n\n\tupdaterFunc := func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ping:\n\t\t\t\tk.log.Debug(\"Kite is active, got a ping %s\", remote.Kite)\n\t\t\t\tevery.Do(func() {\n\t\t\t\t\tk.log.Info(\"Kite is active, updating the value %s\", remote.Kite)\n\t\t\t\t\terr := k.storage.Update(&remote.Kite, value)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tk.log.Error(\"storage update '%s' error: %s\", remote.Kite, err)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\tcase <-time.After(HeartbeatInterval + HeartbeatDelay):\n\t\t\t\tk.log.Info(\"Kite didn't sent any heartbeat %s.\", remote.Kite)\n\t\t\t\tclosed = true\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tgo updaterFunc()\n\n\theartbeatArgs := []interface{}{\n\t\tHeartbeatInterval \/ time.Second,\n\t\tdnode.Callback(func(args *dnode.Partial) {\n\t\t\tk.log.Debug(\"Kite send us an heartbeat. %s\", remote.Kite)\n\n\t\t\tk.clientLocks.Get(remote.Kite.ID).Lock()\n\t\t\tdefer k.clientLocks.Get(remote.Kite.ID).Unlock()\n\n\t\t\tselect {\n\t\t\tcase ping <- struct{}{}:\n\t\t\tdefault:\n\t\t\t}\n\n\t\t\t\/\/ seems we miss a heartbeat, so start it again!\n\t\t\tif closed {\n\t\t\t\tclosed = false\n\t\t\t\tk.log.Warning(\"Updater was closed, but we are still getting heartbeats. Starting again %s\",\n\t\t\t\t\tremote.Kite)\n\n\t\t\t\t\/\/ it might be removed because the ttl cleaner would come\n\t\t\t\t\/\/ before us, so try to add it again, the updater will than\n\t\t\t\t\/\/ continue to update it afterwards.\n\t\t\t\tk.storage.Upsert(&remote.Kite, value)\n\t\t\t\tgo updaterFunc()\n\t\t\t}\n\t\t}),\n\t}\n\n\t\/\/ now trigger the remote kite so it sends us periodically an heartbeat\n\tremote.GoWithTimeout(\"kite.heartbeat\", 4*time.Second, heartbeatArgs...)\n\n\tk.log.Info(\"Kite registered: %s\", remote.Kite)\n\n\tremote.OnDisconnect(func() {\n\t\tk.log.Info(\"Kite disconnected: %s\", remote.Kite)\n\t\tevery.Stop()\n\t})\n\n\t\/\/ send response back to the kite, also identify him with the new name\n\treturn &protocol.RegisterResult{URL: args.URL}, nil\n}\n\nfunc (k *Kontrol) handleGetKites(r *kite.Request) (interface{}, error) {\n\t\/\/ This type is here until inversion branch is merged.\n\t\/\/ Reason: We can't use the same struct for marshaling and unmarshaling.\n\t\/\/ TODO use the struct in protocol\n\ttype GetKitesArgs struct {\n\t\tQuery *protocol.KontrolQuery `json:\"query\"`\n\t}\n\n\tvar args GetKitesArgs\n\tr.Args.One().MustUnmarshal(&args)\n\n\tquery := args.Query\n\n\t\/\/ audience will go into the token as \"aud\" claim.\n\taudience := getAudience(query)\n\n\t\/\/ Generate token once here because we are using the same token for every\n\t\/\/ kite we return and generating many tokens is really slow.\n\ttoken, err := generateToken(audience, r.Username,\n\t\tk.Kite.Kite().Username, k.privateKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get kites from the storage\n\tkites, err := k.storage.Get(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Attach tokens to kites\n\tkites.Attach(token)\n\n\treturn &protocol.GetKitesResult{\n\t\tKites: kites,\n\t}, nil\n}\n\nfunc (k *Kontrol) handleGetToken(r *kite.Request) (interface{}, error) {\n\tvar query *protocol.KontrolQuery\n\terr := r.Args.One().Unmarshal(&query)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Invalid query\")\n\t}\n\n\t\/\/ check if it's exist\n\tkites, err := k.storage.Get(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(kites) > 1 {\n\t\treturn nil, errors.New(\"query matches more than one kite\")\n\t}\n\n\taudience := getAudience(query)\n\n\treturn generateToken(audience, r.Username, k.Kite.Kite().Username, k.privateKey)\n}\n\nfunc (k *Kontrol) handleMachine(r *kite.Request) (interface{}, error) {\n\tif k.MachineAuthenticate != nil {\n\t\tif err := k.MachineAuthenticate(r); err != nil {\n\t\t\treturn nil, errors.New(\"cannot authenticate user\")\n\t\t}\n\t}\n\n\tusername := r.Args.One().MustString() \/\/ username should be send as an argument\n\treturn k.registerUser(username)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2018 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage kvscheduler\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/unrolled\/render\"\n\n\t\"github.com\/ligato\/cn-infra\/rpc\/rest\"\n\t. \"github.com\/ligato\/cn-infra\/kvscheduler\/api\"\n\t\"github.com\/ligato\/cn-infra\/kvscheduler\/internal\/graph\"\n)\n\nconst (\n\t\/\/ prefix used for REST urls of the scheduler.\n\turlPrefix = \"\/scheduler\/\"\n\n\t\/\/ txnHistoryURL is URL used to obtain the transaction history.\n\ttxnHistoryURL = urlPrefix + \"txn-history\"\n\n\t\/\/ sinceArg is the name of the argument used to define the start of the time\n\t\/\/ window for the transaction history to display.\n\tsinceArg = \"since\"\n\n\t\/\/ untilArg is the name of the argument used to define the end of the time\n\t\/\/ window for the transaction history to display.\n\tuntilArg = \"until\"\n\n\t\/\/ seqNumArg is the name of the argument used to define the sequence number\n\t\/\/ of the transaction to display (txnHistoryURL).\n\tseqNumArg = \"seq-num\"\n\n\t\/\/ keyTimelineURL is URL used to obtain timeline of value changes for a given key.\n\tkeyTimelineURL = urlPrefix + \"key-timeline\"\n\n\t\/\/ keyArg is the name of the argument used to define key for \"key-timeline\" API.\n\tkeyArg = \"key\"\n\n\t\/\/ graphSnapshotURL is URL used to obtain graph snapshot from a given point in time.\n\tgraphSnapshotURL = urlPrefix + \"graph-snapshot\"\n\n\t\/\/ flagStatsURL is URL used to obtain flag statistics.\n\tflagStatsURL = urlPrefix + \"flag-stats\"\n\n\t\/\/ flagArg is the name of the argument used to define flag for \"flag-stats\" API.\n\tflagArg = \"flag\"\n\n\t\/\/ prefixArg is the name of the argument used to define prefix to filter keys\n\t\/\/ for \"flag-stats\" API.\n\tprefixArg = \"prefix\"\n\n\t\/\/ time is the name of the argument used to define point in time for a graph snapshot\n\t\/\/ to retrieve.\n\ttimeArg = \"time\"\n\n\t\/\/ downstreamResyncURL is URL used to trigger downstream-resync.\n\tdownstreamResyncURL = urlPrefix + \"downstream-resync\"\n\n\t\/\/ dumpURL is URL used to dump either SB or scheduler's internal state of kv-pairs\n\t\/\/ under the given descriptor.\n\tdumpURL = urlPrefix + \"dump\"\n\n\t\/\/ descriptorArg is the name of the argument used to define descriptor for \"dump\" API.\n\tdescriptorArg = \"descriptor\"\n\n\t\/\/ internalArg is the name of the argument used for \"dump\" API to tell whether\n\t\/\/ to dump SB or the scheduler's internal view of SB.\n\tinternalArg = \"internal\"\n)\n\n\/\/ registerHandlers registers all supported REST APIs.\nfunc (scheduler *Scheduler) registerHandlers(http rest.HTTPHandlers) {\n\tif http == nil {\n\t\tscheduler.Log.Warn(\"No http handler provided, skipping registration of KVScheduler REST handlers\")\n\t\treturn\n\t}\n\thttp.RegisterHTTPHandler(txnHistoryURL, scheduler.txnHistoryGetHandler, \"GET\")\n\thttp.RegisterHTTPHandler(keyTimelineURL, scheduler.keyTimelineGetHandler, \"GET\")\n\thttp.RegisterHTTPHandler(graphSnapshotURL, scheduler.graphSnapshotGetHandler, \"GET\")\n\thttp.RegisterHTTPHandler(flagStatsURL, scheduler.flagStatsGetHandler, \"GET\")\n\thttp.RegisterHTTPHandler(downstreamResyncURL, scheduler.downstreamResyncPostHandler, \"POST\")\n\thttp.RegisterHTTPHandler(dumpURL, scheduler.dumpGetHandler, \"GET\")\n}\n\n\/\/ txnHistoryGetHandler is the GET handler for \"txn-history\" API.\nfunc (scheduler *Scheduler) txnHistoryGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tvar since, until time.Time\n\t\tvar seqNum int\n\t\targs := req.URL.Query()\n\n\t\t\/\/ parse optional *seq-num* argument\n\t\tif seqNumStr, withSeqNum := args[seqNumArg]; withSeqNum && len(seqNumStr) == 1 {\n\t\t\tvar err error\n\t\t\tseqNum, err = strconv.Atoi(seqNumStr[0])\n\t\t\tif err != nil {\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ sequence number takes precedence over the since-until time window\n\t\t\ttxn := scheduler.getRecordedTransaction(uint(seqNum))\n\t\t\tif txn == nil {\n\t\t\t\tformatter.JSON(w, http.StatusNotFound, \"transaction with such sequence is not recorded\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tformatter.Text(w, http.StatusOK, txn.StringWithOpts(false, 0))\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ parse optional *until* argument\n\t\tif untilStr, withUntil := args[untilArg]; withUntil && len(untilStr) == 1 {\n\t\t\tvar err error\n\t\t\tuntil, err = stringToTime(untilStr[0])\n\t\t\tif err != nil {\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ parse optional *since* argument\n\t\tif sinceStr, withSince := args[sinceArg]; withSince && len(sinceStr) == 1 {\n\t\t\tvar err error\n\t\t\tsince, err = stringToTime(sinceStr[0])\n\t\t\tif err != nil {\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\ttxnHistory := scheduler.getTransactionHistory(since, until)\n\t\tformatter.Text(w, http.StatusOK, txnHistory.StringWithOpts(false, 0))\n\t}\n}\n\n\/\/ keyTimelineGetHandler is the GET handler for \"key-timeline\" API.\nfunc (scheduler *Scheduler) keyTimelineGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\targs := req.URL.Query()\n\n\t\t\/\/ parse mandatory *key* argument\n\t\tif keys, withKey := args[keyArg]; withKey && len(keys) == 1 {\n\t\t\tgraphR := scheduler.graph.Read()\n\t\t\tdefer graphR.Release()\n\n\t\t\ttimeline := graphR.GetNodeTimeline(keys[0])\n\t\t\tformatter.JSON(w, http.StatusOK, timeline)\n\t\t\treturn\n\t\t}\n\n\t\terr := errors.New(\"missing key argument\")\n\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n}\n\n\/\/ graphSnapshotGetHandler is the GET handler for \"graph-snapshot\" API.\nfunc (scheduler *Scheduler) graphSnapshotGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\ttimeVal := time.Now()\n\t\targs := req.URL.Query()\n\n\t\t\/\/ parse optional *time* argument\n\t\tif timeStr, withTime := args[timeArg]; withTime && len(timeStr) == 1 {\n\t\t\tvar err error\n\t\t\ttimeVal, err = stringToTime(timeStr[0])\n\t\t\tif err != nil {\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tgraphR := scheduler.graph.Read()\n\t\tdefer graphR.Release()\n\n\t\tsnapshot := graphR.GetSnapshot(timeVal)\n\t\tformatter.JSON(w, http.StatusOK, snapshot)\n\t}\n}\n\n\/\/ flagStatsGetHandler is the GET handler for \"flag-stats\" API.\nfunc (scheduler *Scheduler) flagStatsGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\targs := req.URL.Query()\n\t\tvar prefixes []string\n\n\t\t\/\/ parse repeated *prefix* argument\n\t\tprefixes, _ = args[prefixArg]\n\n\t\tif flags, withFlag := args[flagArg]; withFlag && len(flags) == 1 {\n\t\t\tgraphR := scheduler.graph.Read()\n\t\t\tdefer graphR.Release()\n\n\t\t\tstats := graphR.GetFlagStats(flags[0], func(key string) bool {\n\t\t\t\tif len(prefixes) == 0 {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t\tfor _, prefix := range prefixes {\n\t\t\t\t\tif strings.HasPrefix(key, prefix) {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn false\n\t\t\t})\n\t\t\tformatter.JSON(w, http.StatusOK, stats)\n\t\t\treturn\n\t\t}\n\n\t\terr := errors.New(\"missing flag argument\")\n\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n}\n\n\/\/ downstreamResyncPostHandler is the POST handler for \"downstream-resync\" API.\nfunc (scheduler *Scheduler) downstreamResyncPostHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tctx := context.Background()\n\t\tctx = WithDownstreamResync(ctx)\n\t\tkvErrors, txnError := scheduler.StartNBTransaction().Commit(ctx)\n\t\tif txnError != nil {\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, txnError)\n\t\t\treturn\n\t\t}\n\t\tif len(kvErrors) > 0 {\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, kvErrors)\n\t\t\treturn\n\t\t}\n\t\tformatter.Text(w, http.StatusOK, \"SB was successfully synchronized with KVScheduler\")\n\t\treturn\n\t}\n}\n\n\/\/ dumpGetHandler is the GET handler for \"dump\" API.\nfunc (scheduler *Scheduler) dumpGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\targs := req.URL.Query()\n\n\t\t\/\/ parse mandatory *descriptor* argument\n\t\tdescriptors, withDescriptor := args[descriptorArg]\n\t\tif !withDescriptor {\n\t\t\terr := errors.New(\"missing descriptor argument\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\t\tif len(descriptors) != 1 {\n\t\t\terr := errors.New(\"descriptor argument listed more than once\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\t\tdescriptor := descriptors[0]\n\n\t\t\/\/ parse optional *internal* argument\n\t\tinternalDump := false\n\t\tif internalStr, withInternal := args[internalArg]; withInternal && len(internalStr) == 1 {\n\t\t\tinternalVal := internalStr[0]\n\t\t\tif internalVal == \"true\" || internalVal == \"1\" {\n\t\t\t\tinternalDump = true\n\t\t\t}\n\t\t}\n\n\t\t\/\/ pause transaction processing\n\t\tif !internalDump {\n\t\t\tscheduler.txnLock.Lock()\n\t\t\tdefer scheduler.txnLock.Unlock()\n\t\t}\n\n\t\tgraphR := scheduler.graph.Read()\n\t\tdefer graphR.Release()\n\n\t\t\/\/ dump from the in-memory graph first (for SB Dump it is used for correlation)\n\t\tinMemNodes := nodesToKVPairsWithMetadata(\n\t\t\tgraphR.GetNodes(nil,\n\t\t\t\tgraph.WithFlags(&DescriptorFlag{descriptor}),\n\t\t\t\tgraph.WithoutFlags(&PendingFlag{}, &DerivedFlag{})))\n\n\t\tif internalDump {\n\t\t\t\/\/ return the scheduler's view of SB for the given descriptor\n\t\t\tformatter.JSON(w, http.StatusOK, inMemNodes)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ obtain Dump handler from the descriptor\n\t\tkvDescriptor := scheduler.registry.GetDescriptor(descriptor)\n\t\tif kvDescriptor == nil {\n\t\t\terr := errors.New(\"descriptor is not registered\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\t\tif kvDescriptor.Dump == nil {\n\t\t\terr := errors.New(\"descriptor does not support Dump operation\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ dump the state directly from SB via descriptor\n\t\tdump, err := kvDescriptor.Dump(inMemNodes)\n\t\tif err != nil {\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\t\tformatter.JSON(w, http.StatusOK, dump)\n\t\treturn\n\t}\n}\n\n\/\/ stringToTime converts Unix timestamp from string to time.Time.\nfunc stringToTime(s string) (time.Time, error) {\n\tsec, err := strconv.ParseInt(s, 10, 64)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\treturn time.Unix(sec, 0), nil\n}\n<commit_msg>Add missing newline.<commit_after>\/\/ Copyright (c) 2018 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage kvscheduler\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/unrolled\/render\"\n\n\t\"github.com\/ligato\/cn-infra\/rpc\/rest\"\n\t. \"github.com\/ligato\/cn-infra\/kvscheduler\/api\"\n\t\"github.com\/ligato\/cn-infra\/kvscheduler\/internal\/graph\"\n)\n\nconst (\n\t\/\/ prefix used for REST urls of the scheduler.\n\turlPrefix = \"\/scheduler\/\"\n\n\t\/\/ txnHistoryURL is URL used to obtain the transaction history.\n\ttxnHistoryURL = urlPrefix + \"txn-history\"\n\n\t\/\/ sinceArg is the name of the argument used to define the start of the time\n\t\/\/ window for the transaction history to display.\n\tsinceArg = \"since\"\n\n\t\/\/ untilArg is the name of the argument used to define the end of the time\n\t\/\/ window for the transaction history to display.\n\tuntilArg = \"until\"\n\n\t\/\/ seqNumArg is the name of the argument used to define the sequence number\n\t\/\/ of the transaction to display (txnHistoryURL).\n\tseqNumArg = \"seq-num\"\n\n\t\/\/ keyTimelineURL is URL used to obtain timeline of value changes for a given key.\n\tkeyTimelineURL = urlPrefix + \"key-timeline\"\n\n\t\/\/ keyArg is the name of the argument used to define key for \"key-timeline\" API.\n\tkeyArg = \"key\"\n\n\t\/\/ graphSnapshotURL is URL used to obtain graph snapshot from a given point in time.\n\tgraphSnapshotURL = urlPrefix + \"graph-snapshot\"\n\n\t\/\/ flagStatsURL is URL used to obtain flag statistics.\n\tflagStatsURL = urlPrefix + \"flag-stats\"\n\n\t\/\/ flagArg is the name of the argument used to define flag for \"flag-stats\" API.\n\tflagArg = \"flag\"\n\n\t\/\/ prefixArg is the name of the argument used to define prefix to filter keys\n\t\/\/ for \"flag-stats\" API.\n\tprefixArg = \"prefix\"\n\n\t\/\/ time is the name of the argument used to define point in time for a graph snapshot\n\t\/\/ to retrieve.\n\ttimeArg = \"time\"\n\n\t\/\/ downstreamResyncURL is URL used to trigger downstream-resync.\n\tdownstreamResyncURL = urlPrefix + \"downstream-resync\"\n\n\t\/\/ dumpURL is URL used to dump either SB or scheduler's internal state of kv-pairs\n\t\/\/ under the given descriptor.\n\tdumpURL = urlPrefix + \"dump\"\n\n\t\/\/ descriptorArg is the name of the argument used to define descriptor for \"dump\" API.\n\tdescriptorArg = \"descriptor\"\n\n\t\/\/ internalArg is the name of the argument used for \"dump\" API to tell whether\n\t\/\/ to dump SB or the scheduler's internal view of SB.\n\tinternalArg = \"internal\"\n)\n\n\/\/ registerHandlers registers all supported REST APIs.\nfunc (scheduler *Scheduler) registerHandlers(http rest.HTTPHandlers) {\n\tif http == nil {\n\t\tscheduler.Log.Warn(\"No http handler provided, skipping registration of KVScheduler REST handlers\")\n\t\treturn\n\t}\n\thttp.RegisterHTTPHandler(txnHistoryURL, scheduler.txnHistoryGetHandler, \"GET\")\n\thttp.RegisterHTTPHandler(keyTimelineURL, scheduler.keyTimelineGetHandler, \"GET\")\n\thttp.RegisterHTTPHandler(graphSnapshotURL, scheduler.graphSnapshotGetHandler, \"GET\")\n\thttp.RegisterHTTPHandler(flagStatsURL, scheduler.flagStatsGetHandler, \"GET\")\n\thttp.RegisterHTTPHandler(downstreamResyncURL, scheduler.downstreamResyncPostHandler, \"POST\")\n\thttp.RegisterHTTPHandler(dumpURL, scheduler.dumpGetHandler, \"GET\")\n}\n\n\/\/ txnHistoryGetHandler is the GET handler for \"txn-history\" API.\nfunc (scheduler *Scheduler) txnHistoryGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tvar since, until time.Time\n\t\tvar seqNum int\n\t\targs := req.URL.Query()\n\n\t\t\/\/ parse optional *seq-num* argument\n\t\tif seqNumStr, withSeqNum := args[seqNumArg]; withSeqNum && len(seqNumStr) == 1 {\n\t\t\tvar err error\n\t\t\tseqNum, err = strconv.Atoi(seqNumStr[0])\n\t\t\tif err != nil {\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ sequence number takes precedence over the since-until time window\n\t\t\ttxn := scheduler.getRecordedTransaction(uint(seqNum))\n\t\t\tif txn == nil {\n\t\t\t\tformatter.JSON(w, http.StatusNotFound, \"transaction with such sequence is not recorded\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tformatter.Text(w, http.StatusOK, txn.StringWithOpts(false, 0))\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ parse optional *until* argument\n\t\tif untilStr, withUntil := args[untilArg]; withUntil && len(untilStr) == 1 {\n\t\t\tvar err error\n\t\t\tuntil, err = stringToTime(untilStr[0])\n\t\t\tif err != nil {\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ parse optional *since* argument\n\t\tif sinceStr, withSince := args[sinceArg]; withSince && len(sinceStr) == 1 {\n\t\t\tvar err error\n\t\t\tsince, err = stringToTime(sinceStr[0])\n\t\t\tif err != nil {\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\ttxnHistory := scheduler.getTransactionHistory(since, until)\n\t\tformatter.Text(w, http.StatusOK, txnHistory.StringWithOpts(false, 0))\n\t}\n}\n\n\/\/ keyTimelineGetHandler is the GET handler for \"key-timeline\" API.\nfunc (scheduler *Scheduler) keyTimelineGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\targs := req.URL.Query()\n\n\t\t\/\/ parse mandatory *key* argument\n\t\tif keys, withKey := args[keyArg]; withKey && len(keys) == 1 {\n\t\t\tgraphR := scheduler.graph.Read()\n\t\t\tdefer graphR.Release()\n\n\t\t\ttimeline := graphR.GetNodeTimeline(keys[0])\n\t\t\tformatter.JSON(w, http.StatusOK, timeline)\n\t\t\treturn\n\t\t}\n\n\t\terr := errors.New(\"missing key argument\")\n\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n}\n\n\/\/ graphSnapshotGetHandler is the GET handler for \"graph-snapshot\" API.\nfunc (scheduler *Scheduler) graphSnapshotGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\ttimeVal := time.Now()\n\t\targs := req.URL.Query()\n\n\t\t\/\/ parse optional *time* argument\n\t\tif timeStr, withTime := args[timeArg]; withTime && len(timeStr) == 1 {\n\t\t\tvar err error\n\t\t\ttimeVal, err = stringToTime(timeStr[0])\n\t\t\tif err != nil {\n\t\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tgraphR := scheduler.graph.Read()\n\t\tdefer graphR.Release()\n\n\t\tsnapshot := graphR.GetSnapshot(timeVal)\n\t\tformatter.JSON(w, http.StatusOK, snapshot)\n\t}\n}\n\n\/\/ flagStatsGetHandler is the GET handler for \"flag-stats\" API.\nfunc (scheduler *Scheduler) flagStatsGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\targs := req.URL.Query()\n\t\tvar prefixes []string\n\n\t\t\/\/ parse repeated *prefix* argument\n\t\tprefixes, _ = args[prefixArg]\n\n\t\tif flags, withFlag := args[flagArg]; withFlag && len(flags) == 1 {\n\t\t\tgraphR := scheduler.graph.Read()\n\t\t\tdefer graphR.Release()\n\n\t\t\tstats := graphR.GetFlagStats(flags[0], func(key string) bool {\n\t\t\t\tif len(prefixes) == 0 {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t\tfor _, prefix := range prefixes {\n\t\t\t\t\tif strings.HasPrefix(key, prefix) {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn false\n\t\t\t})\n\t\t\tformatter.JSON(w, http.StatusOK, stats)\n\t\t\treturn\n\t\t}\n\n\t\terr := errors.New(\"missing flag argument\")\n\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n}\n\n\/\/ downstreamResyncPostHandler is the POST handler for \"downstream-resync\" API.\nfunc (scheduler *Scheduler) downstreamResyncPostHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tctx := context.Background()\n\t\tctx = WithDownstreamResync(ctx)\n\t\tkvErrors, txnError := scheduler.StartNBTransaction().Commit(ctx)\n\t\tif txnError != nil {\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, txnError)\n\t\t\treturn\n\t\t}\n\t\tif len(kvErrors) > 0 {\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, kvErrors)\n\t\t\treturn\n\t\t}\n\t\tformatter.Text(w, http.StatusOK, \"SB was successfully synchronized with KVScheduler\\n\")\n\t\treturn\n\t}\n}\n\n\/\/ dumpGetHandler is the GET handler for \"dump\" API.\nfunc (scheduler *Scheduler) dumpGetHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\targs := req.URL.Query()\n\n\t\t\/\/ parse mandatory *descriptor* argument\n\t\tdescriptors, withDescriptor := args[descriptorArg]\n\t\tif !withDescriptor {\n\t\t\terr := errors.New(\"missing descriptor argument\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\t\tif len(descriptors) != 1 {\n\t\t\terr := errors.New(\"descriptor argument listed more than once\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\t\tdescriptor := descriptors[0]\n\n\t\t\/\/ parse optional *internal* argument\n\t\tinternalDump := false\n\t\tif internalStr, withInternal := args[internalArg]; withInternal && len(internalStr) == 1 {\n\t\t\tinternalVal := internalStr[0]\n\t\t\tif internalVal == \"true\" || internalVal == \"1\" {\n\t\t\t\tinternalDump = true\n\t\t\t}\n\t\t}\n\n\t\t\/\/ pause transaction processing\n\t\tif !internalDump {\n\t\t\tscheduler.txnLock.Lock()\n\t\t\tdefer scheduler.txnLock.Unlock()\n\t\t}\n\n\t\tgraphR := scheduler.graph.Read()\n\t\tdefer graphR.Release()\n\n\t\t\/\/ dump from the in-memory graph first (for SB Dump it is used for correlation)\n\t\tinMemNodes := nodesToKVPairsWithMetadata(\n\t\t\tgraphR.GetNodes(nil,\n\t\t\t\tgraph.WithFlags(&DescriptorFlag{descriptor}),\n\t\t\t\tgraph.WithoutFlags(&PendingFlag{}, &DerivedFlag{})))\n\n\t\tif internalDump {\n\t\t\t\/\/ return the scheduler's view of SB for the given descriptor\n\t\t\tformatter.JSON(w, http.StatusOK, inMemNodes)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ obtain Dump handler from the descriptor\n\t\tkvDescriptor := scheduler.registry.GetDescriptor(descriptor)\n\t\tif kvDescriptor == nil {\n\t\t\terr := errors.New(\"descriptor is not registered\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\t\tif kvDescriptor.Dump == nil {\n\t\t\terr := errors.New(\"descriptor does not support Dump operation\")\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ dump the state directly from SB via descriptor\n\t\tdump, err := kvDescriptor.Dump(inMemNodes)\n\t\tif err != nil {\n\t\t\tformatter.JSON(w, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\t\tformatter.JSON(w, http.StatusOK, dump)\n\t\treturn\n\t}\n}\n\n\/\/ stringToTime converts Unix timestamp from string to time.Time.\nfunc stringToTime(s string) (time.Time, error) {\n\tsec, err := strconv.ParseInt(s, 10, 64)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\treturn time.Unix(sec, 0), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0.txt\n *\n * Copyright 2017 OpsVision Solutions\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * \thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage signalfx\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/intelsdi-x\/snap-plugin-lib-go\/v1\/plugin\"\n\t\"github.com\/signalfx\/golib\/datapoint\"\n\t\"github.com\/signalfx\/golib\/sfxclient\"\n\t\"golang.org\/x\/net\/context\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n)\n\nconst (\n\tNS_VENDOR = \"opsvision\"\t\t\/\/ plugin vendor\n\tNS_PLUGIN = \"signalfx\"\t\t\/\/ plugin name\n\tVERSION = 1\t\t\t\/\/ plugin version\n)\n\n\/\/ Our SignalFx object\ntype SignalFx struct {\n\ttoken string\t\t\/\/ SignalFx API token\n\thostname string\t\t\/\/ Hostname\n\tnamespace string\t\t\/\/ Metric namespace\n}\n\n\/\/ Constructor\nfunc New() *SignalFx {\n\treturn new(SignalFx)\n}\n\n\/**\n * Returns the configPolicy for the plugin\n *\/\nfunc (s *SignalFx) GetConfigPolicy() (plugin.ConfigPolicy, error) {\n\tpolicy := plugin.NewConfigPolicy()\n\n\t\/\/ The SignalFx token\n\tpolicy.AddNewStringRule([]string{NS_VENDOR, NS_PLUGIN},\n\t\t\"token\",\n\t\ttrue)\n\n\t\/\/ The hostname to use (defaults to local hostname)\n\tpolicy.AddNewStringRule([]string{NS_VENDOR, NS_PLUGIN},\n\t\t\"hostname\",\n\t\tfalse)\n\n\t\/\/ The file name to use when debugging\n\tpolicy.AddNewStringRule([]string{NS_VENDOR, NS_PLUGIN},\n\t\t\"debug-file\",\n\t\tfalse)\n\n\treturn *policy, nil\n}\n\n\/**\n * Publish metrics to SignalFx using the TOKEN found in the config\n *\/\nfunc (s *SignalFx) Publish(mts []plugin.Metric, cfg plugin.Config) error {\n\t\/\/ Enable debugging if the debug-file config property was set\n\tfileName, err := cfg.GetString(\"debug-file\")\n\tif err != nil {\n\t\t\/\/ Open the output file\n\t\tf, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\n\t\t\/\/ Set logging output for debugging\n\t\tlog.SetOutput(f)\n\t}\n\n\t\/\/ Fetch the token\n\ttoken, err := cfg.GetString(\"token\")\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.token = token\n\n\t\/\/ Attempt to set the hostname\n\thostname, err := cfg.GetString(\"hostname\")\n\tif err != nil {\n\t\thostname, err = os.Hostname()\n\t\tif err != nil {\n\t\t\thostname = \"localhost\"\n\t\t}\n\t}\n\ts.hostname = hostname\n\n\t\/\/ Iterate over the supplied metrics\n\tfor _, m := range mts {\n\t\tvar buffer bytes.Buffer\n\n\t\t\/\/ Convert the namespace to dot notation\n\t\tfmt.Fprintf(&buffer, \"snap.%s\", strings.Join(m.Namespace.Strings(), \".\"))\n\t\ts.namespace = buffer.String()\n\n\t\t\/\/ Do some type conversion and send the data\n\t\tswitch v := m.Data.(type) {\n\t\tcase uint:\n\t\t\ts.sendIntValue(int64(v))\n\t\tcase uint32:\n\t\t\ts.sendIntValue(int64(v))\n\t\tcase uint64:\n\t\t\ts.sendIntValue(int64(v))\n\t\tcase int:\n\t\t\ts.sendIntValue(int64(v))\n\t\tcase int32:\n\t\t\ts.sendIntValue(int64(v))\n\t\tcase int64:\n\t\t\ts.sendIntValue(int64(v))\n\t\tcase float32:\n\t\t\ts.sendFloatValue(float64(v))\n\t\tcase float64:\n\t\t\ts.sendFloatValue(float64(v))\n\t\tdefault:\n\t\t\tlog.Printf(\"Ignoring %T: %v\\n\", v, v)\n\t\t\tlog.Printf(\"Contact the plugin author if you think this is an error\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/**\n * Method for sending int64 values to SignalFx\n *\/\nfunc (s *SignalFx) sendIntValue(value int64) {\n\tlog.Printf(\"Sending [int64] %s -> %v\", s.namespace, value)\n\n\tclient := sfxclient.NewHTTPDatapointSink()\n\tclient.AuthToken = s.token\n\tctx := context.Background()\n\tclient.AddDatapoints(ctx, []*datapoint.Datapoint{\n\t\tsfxclient.Gauge(s.namespace, map[string]string{\n\t\t\t\"host\": s.hostname,\n\t\t}, value),\n\t})\n}\n\n\/**\n * Method for sending float64 values to SignalFx\n *\/\nfunc (s *SignalFx) sendFloatValue(value float64) {\n\tlog.Printf(\"Sending [float64] %s -> %v\", s.namespace, value)\n\n\tclient := sfxclient.NewHTTPDatapointSink()\n\tclient.AuthToken = s.token\n\tctx := context.Background()\n\tclient.AddDatapoints(ctx, []*datapoint.Datapoint{\n\t\tsfxclient.GaugeF(s.namespace, map[string]string{\n\t\t\t\"host\": s.hostname,\n\t\t}, value),\n\t})\n}\n<commit_msg>Fixed up the comments per lint<commit_after>\/*\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0.txt\n *\n * Copyright 2017 OpsVision Solutions\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * \thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage signalfx\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/intelsdi-x\/snap-plugin-lib-go\/v1\/plugin\"\n\t\"github.com\/signalfx\/golib\/datapoint\"\n\t\"github.com\/signalfx\/golib\/sfxclient\"\n\t\"golang.org\/x\/net\/context\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n)\n\nconst (\n\tNS_VENDOR = \"opsvision\"\t\t\/\/ plugin vendor\n\tNS_PLUGIN = \"signalfx\"\t\t\/\/ plugin name\n\tVERSION = 1\t\t\t\/\/ plugin version\n)\n\n\/\/ Our SignalFx object\ntype SignalFx struct {\n\ttoken string\t\t\/\/ SignalFx API token\n\thostname string\t\t\/\/ Hostname\n\tnamespace string\t\t\/\/ Metric namespace\n}\n\n\/\/ New() - Constructor\nfunc New() *SignalFx {\n\treturn new(SignalFx)\n}\n\n\/**\n * GetConfigPolicy() - Returns the configPolicy for the plugin\n *\/\nfunc (s *SignalFx) GetConfigPolicy() (plugin.ConfigPolicy, error) {\n\tpolicy := plugin.NewConfigPolicy()\n\n\t\/\/ The SignalFx token\n\tpolicy.AddNewStringRule([]string{NS_VENDOR, NS_PLUGIN},\n\t\t\"token\",\n\t\ttrue)\n\n\t\/\/ The hostname to use (defaults to local hostname)\n\tpolicy.AddNewStringRule([]string{NS_VENDOR, NS_PLUGIN},\n\t\t\"hostname\",\n\t\tfalse)\n\n\t\/\/ The file name to use when debugging\n\tpolicy.AddNewStringRule([]string{NS_VENDOR, NS_PLUGIN},\n\t\t\"debug-file\",\n\t\tfalse)\n\n\treturn *policy, nil\n}\n\n\/**\n * Publish() - Publish metrics to SignalFx using the TOKEN found in the config\n *\/\nfunc (s *SignalFx) Publish(mts []plugin.Metric, cfg plugin.Config) error {\n\t\/\/ Enable debugging if the debug-file config property was set\n\tfileName, err := cfg.GetString(\"debug-file\")\n\tif err != nil {\n\t\t\/\/ Open the output file\n\t\tf, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\n\t\t\/\/ Set logging output for debugging\n\t\tlog.SetOutput(f)\n\t}\n\n\t\/\/ Fetch the token\n\ttoken, err := cfg.GetString(\"token\")\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.token = token\n\n\t\/\/ Attempt to set the hostname\n\thostname, err := cfg.GetString(\"hostname\")\n\tif err != nil {\n\t\thostname, err = os.Hostname()\n\t\tif err != nil {\n\t\t\thostname = \"localhost\"\n\t\t}\n\t}\n\ts.hostname = hostname\n\n\t\/\/ Iterate over the supplied metrics\n\tfor _, m := range mts {\n\t\tvar buffer bytes.Buffer\n\n\t\t\/\/ Convert the namespace to dot notation\n\t\tfmt.Fprintf(&buffer, \"snap.%s\", strings.Join(m.Namespace.Strings(), \".\"))\n\t\ts.namespace = buffer.String()\n\n\t\t\/\/ Do some type conversion and send the data\n\t\tswitch v := m.Data.(type) {\n\t\tcase uint:\n\t\t\ts.sendIntValue(int64(v))\n\t\tcase uint32:\n\t\t\ts.sendIntValue(int64(v))\n\t\tcase uint64:\n\t\t\ts.sendIntValue(int64(v))\n\t\tcase int:\n\t\t\ts.sendIntValue(int64(v))\n\t\tcase int32:\n\t\t\ts.sendIntValue(int64(v))\n\t\tcase int64:\n\t\t\ts.sendIntValue(int64(v))\n\t\tcase float32:\n\t\t\ts.sendFloatValue(float64(v))\n\t\tcase float64:\n\t\t\ts.sendFloatValue(float64(v))\n\t\tdefault:\n\t\t\tlog.Printf(\"Ignoring %T: %v\\n\", v, v)\n\t\t\tlog.Printf(\"Contact the plugin author if you think this is an error\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/**\n * Method for sending int64 values to SignalFx\n *\/\nfunc (s *SignalFx) sendIntValue(value int64) {\n\tlog.Printf(\"Sending [int64] %s -> %v\", s.namespace, value)\n\n\tclient := sfxclient.NewHTTPDatapointSink()\n\tclient.AuthToken = s.token\n\tctx := context.Background()\n\tclient.AddDatapoints(ctx, []*datapoint.Datapoint{\n\t\tsfxclient.Gauge(s.namespace, map[string]string{\n\t\t\t\"host\": s.hostname,\n\t\t}, value),\n\t})\n}\n\n\/**\n * Method for sending float64 values to SignalFx\n *\/\nfunc (s *SignalFx) sendFloatValue(value float64) {\n\tlog.Printf(\"Sending [float64] %s -> %v\", s.namespace, value)\n\n\tclient := sfxclient.NewHTTPDatapointSink()\n\tclient.AuthToken = s.token\n\tctx := context.Background()\n\tclient.AddDatapoints(ctx, []*datapoint.Datapoint{\n\t\tsfxclient.GaugeF(s.namespace, map[string]string{\n\t\t\t\"host\": s.hostname,\n\t\t}, value),\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/fsnotify\/fsnotify\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/carlostrub\/sisyphus\"\n\t\"gopkg.in\/urfave\/cli.v2\"\n)\n\nvar (\n\tversion string\n)\n\nfunc main() {\n\n\t\/\/ Define App\n\tapp := cli.NewApp()\n\tapp.Name = \"Sisyphus\"\n\tapp.Usage = \"Intelligent Junk and Spam Mail Handler\"\n\tapp.UsageText = `sisyphus [global options] command [command options]\n\t\n\tSisyphus applies artificial intelligence to filter\n\tJunk mail in an unobtrusive way. Both, classification and learning\n\toperate directly on the Maildir of a user in a fully transparent mode,\n\twithout any need for configuration or active operation.`\n\tapp.HelpName = \"Intelligent Junk and Spam Mail Handler\"\n\tapp.Version = version\n\tapp.Copyright = \"(c) 2017, Carlo Strub. All rights reserved. This binary is licensed under a BSD 3-Clause License.\"\n\tapp.Authors = []cli.Author{\n\t\t{\n\t\t\tName: \"Carlo Strub\",\n\t\t\tEmail: \"cs@carlostrub.ch\",\n\t\t},\n\t}\n\n\tmaildirPaths := cli.StringSlice([]string{})\n\n\tvar pidfile *string\n\tpidfile = new(string)\n\n\tapp.Flags = []cli.Flag{\n\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"maildir, d\",\n\t\t\tValue: &maildirPaths,\n\t\t\tEnvVar: \"SISYPHUS_DIRS\",\n\t\t\tUsage: \"Call multiple Maildirs by repeating this flag, i.e. --maildir \\\".\/Maildir\\\" --maildir \\\".\/Maildir2\\\"\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"pidfile, p\",\n\t\t\tValue: \"\/tmp\/sisyphus.pid\",\n\t\t\tEnvVar: \"SISYPHUS_PID\",\n\t\t\tUsage: \"Location of PID file\",\n\t\t\tDestination: pidfile,\n\t\t},\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"run\",\n\t\t\tAliases: []string{\"u\"},\n\t\t\tUsage: \"run sisyphus\",\n\t\t\tAction: func(c *cli.Context) {\n\n\t\t\t\t\/\/ check if daemon already running.\n\t\t\t\tif _, err := os.Stat(*pidfile); err == nil {\n\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\"pidfile\": *pidfile,\n\t\t\t\t\t}).Fatal(\"Already running or pidfile exists\")\n\t\t\t\t}\n\n\t\t\t\tfmt.Print(`\n\n\n\t███████╗██╗███████╗██╗ ██╗██████╗ ██╗ ██╗██╗ ██╗███████╗\n\t██╔════╝██║██╔════╝╚██╗ ██╔╝██╔══██╗██║ ██║██║ ██║██╔════╝\n\t███████╗██║███████╗ ╚████╔╝ ██████╔╝███████║██║ ██║███████╗\n\t╚════██║██║╚════██║ ╚██╔╝ ██╔═══╝ ██╔══██║██║ ██║╚════██║\n\t███████║██║███████║ ██║ ██║ ██║ ██║╚██████╔╝███████║\n\t╚══════╝╚═╝╚══════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝\n\n\tby Carlo Strub <cs@carlostrub.ch>\n\n\n`)\n\t\t\t\t\/\/ Make arrangement to remove PID file upon receiving the SIGTERM from kill command\n\t\t\t\tch := make(chan os.Signal, 1)\n\t\t\t\tsignal.Notify(ch, os.Interrupt, os.Kill, syscall.SIGTERM)\n\n\t\t\t\tgo func() {\n\t\t\t\t\tsignalType := <-ch\n\t\t\t\t\tsignal.Stop(ch)\n\t\t\t\t\tlog.Info(\"Exit command received. Exiting sisyphus...\")\n\n\t\t\t\t\t\/\/ this is a good place to flush everything to disk\n\t\t\t\t\t\/\/ before terminating.\n\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\"signal\": signalType,\n\t\t\t\t\t}).Info(\"Received signal\")\n\n\t\t\t\t\t\/\/ remove PID file\n\t\t\t\t\tos.Remove(*pidfile)\n\n\t\t\t\t\tos.Exit(0)\n\n\t\t\t\t}()\n\n\t\t\t\tif len(maildirPaths) < 1 {\n\t\t\t\t\tlog.Fatal(\"No Maildir set. Please check the manual.\")\n\t\t\t\t}\n\n\t\t\t\t\/\/ Populate maildir with the maildirs given by setting the flag.\n\t\t\t\tvar maildirs []sisyphus.Maildir\n\t\t\t\tfor _, val := range maildirPaths {\n\t\t\t\t\tmaildirs = append(maildirs, sisyphus.Maildir(val))\n\t\t\t\t}\n\n\t\t\t\t\/\/ Load all mails\n\t\t\t\tmails, err := sisyphus.LoadMails(maildirs)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\"err\": err,\n\t\t\t\t\t}).Fatal(\"Cannot load mails\")\n\t\t\t\t}\n\n\t\t\t\t\/\/ Open all databases\n\t\t\t\tdbs, err := sisyphus.LoadDatabases(maildirs)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\"err\": err,\n\t\t\t\t\t}).Fatal(\"Cannot load databases\")\n\t\t\t\t}\n\t\t\t\tdefer sisyphus.CloseDatabases(dbs)\n\n\t\t\t\t\/\/ Learn at startup\n\t\t\t\tfor _, d := range maildirs {\n\t\t\t\t\tdb := dbs[d]\n\t\t\t\t\tm := mails[d]\n\t\t\t\t\tfor _, val := range m {\n\t\t\t\t\t\terr := val.Learn(db, d)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\t\"err\": err,\n\t\t\t\t\t\t\t\t\"mail\": val.Key,\n\t\t\t\t\t\t\t}).Warning(\"Cannot learn mail\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tlog.Info(\"All mails learned\")\n\n\t\t\t\t\/\/ Classify whenever a mail arrives in \"new\"\n\t\t\t\twatcher, err := fsnotify.NewWatcher()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\"err\": err,\n\t\t\t\t\t}).Fatal(\"Cannot setup directory watcher\")\n\t\t\t\t}\n\t\t\t\tdefer watcher.Close()\n\n\t\t\t\tdone := make(chan bool)\n\t\t\t\tgo func() {\n\t\t\t\t\tfor {\n\t\t\t\t\t\tselect {\n\t\t\t\t\t\tcase event := <-watcher.Events:\n\t\t\t\t\t\t\tif event.Op&fsnotify.Create == fsnotify.Create {\n\t\t\t\t\t\t\t\tpath := strings.Split(event.Name, \"\/new\/\")\n\t\t\t\t\t\t\t\tm := sisyphus.Mail{\n\t\t\t\t\t\t\t\t\tKey: path[1],\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\terr = m.Classify(dbs[sisyphus.Maildir(path[0])], sisyphus.Maildir(path[0]))\n\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\t\t\t\"err\": err,\n\t\t\t\t\t\t\t\t\t}).Error(\"Classify mail\")\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tcase err := <-watcher.Errors:\n\t\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\t\"err\": err,\n\t\t\t\t\t\t\t}).Error(\"Problem with directory watcher\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}()\n\n\t\t\t\tfor _, val := range maildirPaths {\n\t\t\t\t\terr = watcher.Add(val + \"\/new\")\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\"err\": err,\n\t\t\t\t\t\t\t\"dir\": val + \"\/new\",\n\t\t\t\t\t\t}).Error(\"Cannot watch directory\")\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t<-done\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\/\/ See\n\t\t\t\/\/ https:\/\/www.socketloop.com\/tutorials\/golang-daemonizing-a-simple-web-server-process-example\n\t\t\t\/\/ for the process we are using to daemonize\n\t\t\tName: \"start\",\n\t\t\tAliases: []string{\"s\"},\n\t\t\tUsage: \"start sisyphus daemon in the background\",\n\t\t\tAction: func(c *cli.Context) error {\n\n\t\t\t\tsisyphus.Pidfile(*pidfile).DaemonStart()\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"stop\",\n\t\t\tAliases: []string{\"e\"},\n\t\t\tUsage: \"stop sisyphus daemon\",\n\t\t\tAction: func(c *cli.Context) error {\n\n\t\t\t\tsisyphus.Pidfile(*pidfile).DaemonStop()\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"restart\",\n\t\t\tAliases: []string{\"r\"},\n\t\t\tUsage: \"restart sisyphus daemon\",\n\t\t\tAction: func(c *cli.Context) error {\n\n\t\t\t\tsisyphus.Pidfile(*pidfile).DaemonRestart()\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"status\",\n\t\t\tAliases: []string{\"i\"},\n\t\t\tUsage: \"status of sisyphus\",\n\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\tlog.Info(\"here, we should get statistics from the db, TBD...\")\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n}\n<commit_msg>junk is just fine<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/fsnotify\/fsnotify\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/carlostrub\/sisyphus\"\n\t\"gopkg.in\/urfave\/cli.v2\"\n)\n\nvar (\n\tversion string\n)\n\nfunc main() {\n\n\t\/\/ Define App\n\tapp := cli.NewApp()\n\tapp.Name = \"Sisyphus\"\n\tapp.Usage = \"Intelligent Junk Mail Handler\"\n\tapp.UsageText = `sisyphus [global options] command [command options]\n\t\n\tSisyphus applies artificial intelligence to filter\n\tJunk mail in an unobtrusive way. Both, classification and learning\n\toperate directly on the Maildir of a user in a fully transparent mode,\n\twithout any need for configuration or active operation.`\n\tapp.HelpName = \"Intelligent Junk Mail Handler\"\n\tapp.Version = version\n\tapp.Copyright = \"(c) 2017, Carlo Strub. All rights reserved. This binary is licensed under a BSD 3-Clause License.\"\n\tapp.Authors = []cli.Author{\n\t\t{\n\t\t\tName: \"Carlo Strub\",\n\t\t\tEmail: \"cs@carlostrub.ch\",\n\t\t},\n\t}\n\n\tmaildirPaths := cli.StringSlice([]string{})\n\n\tvar pidfile *string\n\tpidfile = new(string)\n\n\tapp.Flags = []cli.Flag{\n\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"maildir, d\",\n\t\t\tValue: &maildirPaths,\n\t\t\tEnvVar: \"SISYPHUS_DIRS\",\n\t\t\tUsage: \"Call multiple Maildirs by repeating this flag, i.e. --maildir \\\".\/Maildir\\\" --maildir \\\".\/Maildir2\\\"\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"pidfile, p\",\n\t\t\tValue: \"\/tmp\/sisyphus.pid\",\n\t\t\tEnvVar: \"SISYPHUS_PID\",\n\t\t\tUsage: \"Location of PID file\",\n\t\t\tDestination: pidfile,\n\t\t},\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"run\",\n\t\t\tAliases: []string{\"u\"},\n\t\t\tUsage: \"run sisyphus\",\n\t\t\tAction: func(c *cli.Context) {\n\n\t\t\t\t\/\/ check if daemon already running.\n\t\t\t\tif _, err := os.Stat(*pidfile); err == nil {\n\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\"pidfile\": *pidfile,\n\t\t\t\t\t}).Fatal(\"Already running or pidfile exists\")\n\t\t\t\t}\n\n\t\t\t\tfmt.Print(`\n\n\n\t███████╗██╗███████╗██╗ ██╗██████╗ ██╗ ██╗██╗ ██╗███████╗\n\t██╔════╝██║██╔════╝╚██╗ ██╔╝██╔══██╗██║ ██║██║ ██║██╔════╝\n\t███████╗██║███████╗ ╚████╔╝ ██████╔╝███████║██║ ██║███████╗\n\t╚════██║██║╚════██║ ╚██╔╝ ██╔═══╝ ██╔══██║██║ ██║╚════██║\n\t███████║██║███████║ ██║ ██║ ██║ ██║╚██████╔╝███████║\n\t╚══════╝╚═╝╚══════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝\n\n\tby Carlo Strub <cs@carlostrub.ch>\n\n\n`)\n\t\t\t\t\/\/ Make arrangement to remove PID file upon receiving the SIGTERM from kill command\n\t\t\t\tch := make(chan os.Signal, 1)\n\t\t\t\tsignal.Notify(ch, os.Interrupt, os.Kill, syscall.SIGTERM)\n\n\t\t\t\tgo func() {\n\t\t\t\t\tsignalType := <-ch\n\t\t\t\t\tsignal.Stop(ch)\n\t\t\t\t\tlog.Info(\"Exit command received. Exiting sisyphus...\")\n\n\t\t\t\t\t\/\/ this is a good place to flush everything to disk\n\t\t\t\t\t\/\/ before terminating.\n\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\"signal\": signalType,\n\t\t\t\t\t}).Info(\"Received signal\")\n\n\t\t\t\t\t\/\/ remove PID file\n\t\t\t\t\tos.Remove(*pidfile)\n\n\t\t\t\t\tos.Exit(0)\n\n\t\t\t\t}()\n\n\t\t\t\tif len(maildirPaths) < 1 {\n\t\t\t\t\tlog.Fatal(\"No Maildir set. Please check the manual.\")\n\t\t\t\t}\n\n\t\t\t\t\/\/ Populate maildir with the maildirs given by setting the flag.\n\t\t\t\tvar maildirs []sisyphus.Maildir\n\t\t\t\tfor _, val := range maildirPaths {\n\t\t\t\t\tmaildirs = append(maildirs, sisyphus.Maildir(val))\n\t\t\t\t}\n\n\t\t\t\t\/\/ Load all mails\n\t\t\t\tmails, err := sisyphus.LoadMails(maildirs)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\"err\": err,\n\t\t\t\t\t}).Fatal(\"Cannot load mails\")\n\t\t\t\t}\n\n\t\t\t\t\/\/ Open all databases\n\t\t\t\tdbs, err := sisyphus.LoadDatabases(maildirs)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\"err\": err,\n\t\t\t\t\t}).Fatal(\"Cannot load databases\")\n\t\t\t\t}\n\t\t\t\tdefer sisyphus.CloseDatabases(dbs)\n\n\t\t\t\t\/\/ Learn at startup\n\t\t\t\tfor _, d := range maildirs {\n\t\t\t\t\tdb := dbs[d]\n\t\t\t\t\tm := mails[d]\n\t\t\t\t\tfor _, val := range m {\n\t\t\t\t\t\terr := val.Learn(db, d)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\t\"err\": err,\n\t\t\t\t\t\t\t\t\"mail\": val.Key,\n\t\t\t\t\t\t\t}).Warning(\"Cannot learn mail\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tlog.Info(\"All mails learned\")\n\n\t\t\t\t\/\/ Classify whenever a mail arrives in \"new\"\n\t\t\t\twatcher, err := fsnotify.NewWatcher()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\"err\": err,\n\t\t\t\t\t}).Fatal(\"Cannot setup directory watcher\")\n\t\t\t\t}\n\t\t\t\tdefer watcher.Close()\n\n\t\t\t\tdone := make(chan bool)\n\t\t\t\tgo func() {\n\t\t\t\t\tfor {\n\t\t\t\t\t\tselect {\n\t\t\t\t\t\tcase event := <-watcher.Events:\n\t\t\t\t\t\t\tif event.Op&fsnotify.Create == fsnotify.Create {\n\t\t\t\t\t\t\t\tpath := strings.Split(event.Name, \"\/new\/\")\n\t\t\t\t\t\t\t\tm := sisyphus.Mail{\n\t\t\t\t\t\t\t\t\tKey: path[1],\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\terr = m.Classify(dbs[sisyphus.Maildir(path[0])], sisyphus.Maildir(path[0]))\n\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\t\t\t\"err\": err,\n\t\t\t\t\t\t\t\t\t}).Error(\"Classify mail\")\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tcase err := <-watcher.Errors:\n\t\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\t\"err\": err,\n\t\t\t\t\t\t\t}).Error(\"Problem with directory watcher\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}()\n\n\t\t\t\tfor _, val := range maildirPaths {\n\t\t\t\t\terr = watcher.Add(val + \"\/new\")\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\"err\": err,\n\t\t\t\t\t\t\t\"dir\": val + \"\/new\",\n\t\t\t\t\t\t}).Error(\"Cannot watch directory\")\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t<-done\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\/\/ See\n\t\t\t\/\/ https:\/\/www.socketloop.com\/tutorials\/golang-daemonizing-a-simple-web-server-process-example\n\t\t\t\/\/ for the process we are using to daemonize\n\t\t\tName: \"start\",\n\t\t\tAliases: []string{\"s\"},\n\t\t\tUsage: \"start sisyphus daemon in the background\",\n\t\t\tAction: func(c *cli.Context) error {\n\n\t\t\t\tsisyphus.Pidfile(*pidfile).DaemonStart()\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"stop\",\n\t\t\tAliases: []string{\"e\"},\n\t\t\tUsage: \"stop sisyphus daemon\",\n\t\t\tAction: func(c *cli.Context) error {\n\n\t\t\t\tsisyphus.Pidfile(*pidfile).DaemonStop()\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"restart\",\n\t\t\tAliases: []string{\"r\"},\n\t\t\tUsage: \"restart sisyphus daemon\",\n\t\t\tAction: func(c *cli.Context) error {\n\n\t\t\t\tsisyphus.Pidfile(*pidfile).DaemonRestart()\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"status\",\n\t\t\tAliases: []string{\"i\"},\n\t\t\tUsage: \"status of sisyphus\",\n\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\tlog.Info(\"here, we should get statistics from the db, TBD...\")\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>package otp\n\nimport (\n\t\"crypto\/md5\"\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"crypto\/sha512\"\n)\n\nvar methods = []string{\"totp\", \"hotp\"}\n\n\/\/ Supported hash algorithms.\nvar Hashes = []Hash{sha1.New, sha256.New, sha512.New, md5.New}\n\n\/\/ Key defines a set of parameters required for code generation as well as metadata.\ntype Key struct {\n\tMethod string \/\/ Initialization method. Acceptable values are either 'totp' or 'hotp' for time-based or counter-based, respectively.\n\tLabel string \/\/ Label for the key.\n\tSecret string \/\/ String representation of base32 encoded integer.\n\tIssuer string \/\/ The issuer of the key.\n\tAlgo Hash \/\/ The hash algorithm used in the HMAC. SHA1, SHA256, SHA512, and MD5 are supported.\n\tDigits int \/\/ The length of the code. 6 or 8 are acceptable.\n\tPeriod int \/\/ The number of seconds the code is valid for. Applies only to 'totp'.\n\tCounter int \/\/ The initial counter value. Applies only to 'hotp'.\n}\n\n\/\/ GetCode accepts an initialization value and returns a corresponding code.\nfunc (k Key) GetCode(iv int64) (string, error) {\n\tcode, err := GetCode(k.Secret, iv, k.Algo, k.Digits)\n\treturn code, err\n}\n<commit_msg>key docs<commit_after>package otp\n\nimport (\n\t\"crypto\/md5\"\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"crypto\/sha512\"\n)\n\nvar methods = []string{\"totp\", \"hotp\"}\n\n\/\/ Supported hash algorithms.\nvar Hashes = []Hash{sha1.New, sha256.New, sha512.New, md5.New}\n\n\/\/ Key represents a one-time password. It supports time-based (totp) and HMAC-based (hotp) approaches.\ntype Key struct {\n\tMethod string \/\/ Initialization method. Either 'totp' or 'hotp'.\n\tLabel string \/\/ Descriptive label..\n\tSecret string \/\/ Base32-encoded secret key.\n\tIssuer string \/\/ Key issuer.\n\tAlgo Hash \/\/ Hash algorithm. See Hashes.\n\tDigits int \/\/ Length of the code. Either 6 or 8.\n\tPeriod int \/\/ Seconds code is valid for. Applies only to 'totp'.\n\tCounter int \/\/ Initial counter value. Applies only to 'hotp'.\n}\n\n\/\/ GetCode returns a one-time password code an initial value..\nfunc (k Key) GetCode(iv int64) (string, error) {\n\tcode, err := GetCode(k.Secret, iv, k.Algo, k.Digits)\n\treturn code, err\n}\n<|endoftext|>"} {"text":"<commit_before>package models\n\nimport (\n\t\"time\"\n\n\tenvmanModels \"github.com\/bitrise-io\/envman\/models\"\n\tstepmanModels \"github.com\/bitrise-io\/stepman\/models\"\n)\n\nconst (\n\t\/\/ StepRunStatusCodeSuccess ...\n\tStepRunStatusCodeSuccess = 0\n\t\/\/ StepRunStatusCodeFailed ...\n\tStepRunStatusCodeFailed = 1\n\t\/\/ StepRunStatusCodeFailedSkippable ...\n\tStepRunStatusCodeFailedSkippable = 2\n\t\/\/ StepRunStatusCodeSkipped ...\n\tStepRunStatusCodeSkipped = 3\n\t\/\/ StepRunStatusCodeSkippedWithRunIf ...\n\tStepRunStatusCodeSkippedWithRunIf = 4\n\n\t\/\/ Version ...\n\tVersion = \"1.0.0\"\n)\n\n\/\/ StepListItemModel ...\ntype StepListItemModel map[string]stepmanModels.StepModel\n\n\/\/ WorkflowModel ...\ntype WorkflowModel struct {\n\tTitle string `json:\"title,omitempty\" yaml:\"title,omitempty\"`\n\tDescription string `json:\"description,omitempty\" yaml:\"description,omitempty\"`\n\tSummary string `json:\"summary,omitempty\" yaml:\"summary,omitempty\"`\n\tBeforeRun []string `json:\"before_run,omitempty\" yaml:\"before_run,omitempty\"`\n\tAfterRun []string `json:\"after_run,omitempty\" yaml:\"after_run,omitempty\"`\n\tEnvironments []envmanModels.EnvironmentItemModel `json:\"envs,omitempty\" yaml:\"envs,omitempty\"`\n\tSteps []StepListItemModel `json:\"steps,omitempty\" yaml:\"steps,omitempty\"`\n}\n\n\/\/ AppModel ...\ntype AppModel struct {\n\tTitle string `json:\"title,omitempty\" yaml:\"title,omitempty\"`\n\tDescription string `json:\"description,omitempty\" yaml:\"description,omitempty\"`\n\tSummary string `json:\"summary,omitempty\" yaml:\"summary,omitempty\"`\n\tEnvironments []envmanModels.EnvironmentItemModel `json:\"envs,omitempty\" yaml:\"envs,omitempty\"`\n}\n\n\/\/ TriggerMapItemModel ...\ntype TriggerMapItemModel struct {\n\tPattern string `json:\"pattern,omitempty\" yaml:\"pattern,omitempty\"`\n\tIsPullRequestAllowed bool `json:\"is_pull_request_allowed,omitempty\" yaml:\"is_pull_request_allowed,omitempty\"`\n\tWorkflowID string `json:\"workflow,omitempty\" yaml:\"workflow,omitempty\"`\n}\n\n\/\/ BitriseDataModel ...\ntype BitriseDataModel struct {\n\tFormatVersion string `json:\"format_version\" yaml:\"format_version\"`\n\tDefaultStepLibSource string `json:\"default_step_lib_source,omitempty\" yaml:\"default_step_lib_source,omitempty\"`\n\tTriggerMap []TriggerMapItemModel `json:\"trigger_map,omitempty\" yaml:\"trigger_map,omitempty\"`\n\tApp AppModel `json:\"app,omitempty\" yaml:\"app,omitempty\"`\n\tWorkflows map[string]WorkflowModel `json:\"workflows,omitempty\" yaml:\"workflows,omitempty\"`\n}\n\n\/\/ StepIDData ...\n\/\/ structured representation of a composite-step-id\n\/\/ a composite step id is: step-lib-source::step-id@1.0.0\ntype StepIDData struct {\n\t\/\/ IDOrURI : ID if steplib is provided, URI if local step or in case a direct git url provided\n\tIDorURI string\n\t\/\/ Version : version in the steplib, or in case of a direct git step the tag-or-branch to use\n\tVersion string\n\t\/\/ SteplibSource : steplib source uri, or in case of local path just \"path\", and in case of direct git url just \"git\"\n\tSteplibSource string\n}\n\n\/\/ BuildRunResultsModel ...\ntype BuildRunResultsModel struct {\n\tStartTime time.Time\n\tSuccessSteps []StepRunResultsModel\n\tFailedSteps []StepRunResultsModel\n\tFailedSkippableSteps []StepRunResultsModel\n\tSkippedSteps []StepRunResultsModel\n}\n\n\/\/ StepRunResultsModel ...\ntype StepRunResultsModel struct {\n\tStepName string\n\tStatus int\n\tIdx int\n\tRunTime time.Duration\n\tError error\n\tExitCode int\n}\n<commit_msg>Title, Summary, Description added to AppModel (main config model) & reordered the three, to be in this order in every model.<commit_after>package models\n\nimport (\n\t\"time\"\n\n\tenvmanModels \"github.com\/bitrise-io\/envman\/models\"\n\tstepmanModels \"github.com\/bitrise-io\/stepman\/models\"\n)\n\nconst (\n\t\/\/ StepRunStatusCodeSuccess ...\n\tStepRunStatusCodeSuccess = 0\n\t\/\/ StepRunStatusCodeFailed ...\n\tStepRunStatusCodeFailed = 1\n\t\/\/ StepRunStatusCodeFailedSkippable ...\n\tStepRunStatusCodeFailedSkippable = 2\n\t\/\/ StepRunStatusCodeSkipped ...\n\tStepRunStatusCodeSkipped = 3\n\t\/\/ StepRunStatusCodeSkippedWithRunIf ...\n\tStepRunStatusCodeSkippedWithRunIf = 4\n\n\t\/\/ Version ...\n\tVersion = \"1.0.1\"\n)\n\n\/\/ StepListItemModel ...\ntype StepListItemModel map[string]stepmanModels.StepModel\n\n\/\/ WorkflowModel ...\ntype WorkflowModel struct {\n\tTitle string `json:\"title,omitempty\" yaml:\"title,omitempty\"`\n\tSummary string `json:\"summary,omitempty\" yaml:\"summary,omitempty\"`\n\tDescription string `json:\"description,omitempty\" yaml:\"description,omitempty\"`\n\tBeforeRun []string `json:\"before_run,omitempty\" yaml:\"before_run,omitempty\"`\n\tAfterRun []string `json:\"after_run,omitempty\" yaml:\"after_run,omitempty\"`\n\tEnvironments []envmanModels.EnvironmentItemModel `json:\"envs,omitempty\" yaml:\"envs,omitempty\"`\n\tSteps []StepListItemModel `json:\"steps,omitempty\" yaml:\"steps,omitempty\"`\n}\n\n\/\/ AppModel ...\ntype AppModel struct {\n\tTitle string `json:\"title,omitempty\" yaml:\"title,omitempty\"`\n\tSummary string `json:\"summary,omitempty\" yaml:\"summary,omitempty\"`\n\tDescription string `json:\"description,omitempty\" yaml:\"description,omitempty\"`\n\tEnvironments []envmanModels.EnvironmentItemModel `json:\"envs,omitempty\" yaml:\"envs,omitempty\"`\n}\n\n\/\/ TriggerMapItemModel ...\ntype TriggerMapItemModel struct {\n\tPattern string `json:\"pattern,omitempty\" yaml:\"pattern,omitempty\"`\n\tIsPullRequestAllowed bool `json:\"is_pull_request_allowed,omitempty\" yaml:\"is_pull_request_allowed,omitempty\"`\n\tWorkflowID string `json:\"workflow,omitempty\" yaml:\"workflow,omitempty\"`\n}\n\n\/\/ BitriseDataModel ...\ntype BitriseDataModel struct {\n\tFormatVersion string `json:\"format_version\" yaml:\"format_version\"`\n\tDefaultStepLibSource string `json:\"default_step_lib_source,omitempty\" yaml:\"default_step_lib_source,omitempty\"`\n\tTitle string `json:\"title,omitempty\" yaml:\"title,omitempty\"`\n\tSummary string `json:\"summary,omitempty\" yaml:\"summary,omitempty\"`\n\tDescription string `json:\"description,omitempty\" yaml:\"description,omitempty\"`\n\tTriggerMap []TriggerMapItemModel `json:\"trigger_map,omitempty\" yaml:\"trigger_map,omitempty\"`\n\tApp AppModel `json:\"app,omitempty\" yaml:\"app,omitempty\"`\n\tWorkflows map[string]WorkflowModel `json:\"workflows,omitempty\" yaml:\"workflows,omitempty\"`\n}\n\n\/\/ StepIDData ...\n\/\/ structured representation of a composite-step-id\n\/\/ a composite step id is: step-lib-source::step-id@1.0.0\ntype StepIDData struct {\n\t\/\/ IDOrURI : ID if steplib is provided, URI if local step or in case a direct git url provided\n\tIDorURI string\n\t\/\/ Version : version in the steplib, or in case of a direct git step the tag-or-branch to use\n\tVersion string\n\t\/\/ SteplibSource : steplib source uri, or in case of local path just \"path\", and in case of direct git url just \"git\"\n\tSteplibSource string\n}\n\n\/\/ BuildRunResultsModel ...\ntype BuildRunResultsModel struct {\n\tStartTime time.Time\n\tSuccessSteps []StepRunResultsModel\n\tFailedSteps []StepRunResultsModel\n\tFailedSkippableSteps []StepRunResultsModel\n\tSkippedSteps []StepRunResultsModel\n}\n\n\/\/ StepRunResultsModel ...\ntype StepRunResultsModel struct {\n\tStepName string\n\tStatus int\n\tIdx int\n\tRunTime time.Duration\n\tError error\n\tExitCode int\n}\n<|endoftext|>"} {"text":"<commit_before>package models\n\nimport (\n\t\"time\"\n)\n\n\/\/ List of possible \"volumes\" (book types)\nconst (\n\tVolumePaper = \"paperbook\"\n\tVolumeElectro = \"ebook\"\n\tVolumeAudio = \"audiobook\"\n)\n\n\/\/ GetVolumes returns list of possible \"volumes\" (book types)\nfunc GetVolumes() []string {\n\tvolumes := []string{VolumeAudio, VolumeElectro, VolumePaper}\n\treturn volumes\n}\n\n\/\/ CheckVolume\nfunc CheckVolume(volume string) bool {\n\tswitch volume {\n\tcase VolumePaper:\n\tcase VolumeElectro:\n\tcase VolumeAudio:\n\tdefault:\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/go:generate reform\n\n\/\/ Volume describes \"type\" of book - paperbook, ebook or audiobook.\n\/\/reform:volumes\ntype Volume struct {\n\tID int32 `reform:\"id,pk\"`\n\tBookID int32 `reform:\"book_id\"`\n\tType string `reform:\"type\"`\n\tCreatedAt time.Time `reform:\"created_at\"`\n\tUpdatedAt time.Time `reform:\"updated_at\"`\n}\n\n\/\/ BeforeInsert set CreatedAt and UpdatedAt.\nfunc (v *Volume) BeforeInsert() error {\n\tv.CreatedAt = time.Now().UTC().Truncate(time.Second)\n\tv.UpdatedAt = v.CreatedAt\n\treturn nil\n}\n\n\/\/ BeforeUpdate set UpdatedAt.\nfunc (v *Volume) BeforeUpdate() error {\n\tv.UpdatedAt = time.Now().UTC().Truncate(time.Second)\n\treturn nil\n}\n<commit_msg>Documentation & spelling<commit_after>package models\n\nimport (\n\t\"time\"\n)\n\n\/\/ List of possible \"volumes\" (book types)\nconst (\n\tVolumePaper = \"paperbook\"\n\tVolumeElectro = \"ebook\"\n\tVolumeAudio = \"audiobook\"\n)\n\n\/\/ GetVolumes returns list of possible \"volumes\" (book types)\nfunc GetVolumes() []string {\n\tvolumes := []string{VolumeAudio, VolumeElectro, VolumePaper}\n\treturn volumes\n}\n\n\/\/ CheckVolume checks if the given volume is supported\nfunc CheckVolume(volume string) bool {\n\tswitch volume {\n\tcase VolumePaper:\n\tcase VolumeElectro:\n\tcase VolumeAudio:\n\tdefault:\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/go:generate reform\n\n\/\/ Volume describes \"type\" of book - paperbook, ebook or audiobook.\n\/\/reform:volumes\ntype Volume struct {\n\tID int32 `reform:\"id,pk\"`\n\tBookID int32 `reform:\"book_id\"`\n\tType string `reform:\"type\"`\n\tCreatedAt time.Time `reform:\"created_at\"`\n\tUpdatedAt time.Time `reform:\"updated_at\"`\n}\n\n\/\/ BeforeInsert set CreatedAt and UpdatedAt.\nfunc (v *Volume) BeforeInsert() error {\n\tv.CreatedAt = time.Now().UTC().Truncate(time.Second)\n\tv.UpdatedAt = v.CreatedAt\n\treturn nil\n}\n\n\/\/ BeforeUpdate set UpdatedAt.\nfunc (v *Volume) BeforeUpdate() error {\n\tv.UpdatedAt = time.Now().UTC().Truncate(time.Second)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package entity\n\nimport (\n\t\"fmt\"\n\t\/\/\t\"fmt\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com\/gfandada\/gserver\/util\"\n)\n\n\/\/ 一万个可行走点测试aoi\nconst (\n\tX = 100\n\tY = 0\n\tZ = 100\n)\n\nfunc Test_space(t *testing.T) {\n\t\/\/ 构建一个场景\n\tspace := NewSpace(1, new(Space))\n\tRegisterSpace(space)\n\tfor i := 0; i < X; i++ {\n\t\tfor j := 0; j < Z; j++ {\n\t\t\tentity := NewEntity(1, \"entity\"+strconv.Itoa(i)+\":\"+strconv.Itoa(j),\n\t\t\t\ttrue, true)\n\t\t\tentity.BindIentity(new(Entity))\n\t\t\tRegisterEntity(entity)\n\t\t\tentity.EnterSpace(space.Id, Vector3{\n\t\t\t\tX: Coord(i),\n\t\t\t\tY: Coord(0),\n\t\t\t\tZ: Coord(j),\n\t\t\t})\n\t\t}\n\t}\n\t\/\/ 主进程调度\n\tfor i := 0; i < 1; i++ {\n\t\tfor key := range space.entities {\n\t\t\tx := util.RandInterval(0, X-1)\n\t\t\tz := util.RandInterval(0, Z-1)\n\t\t\t\/\/fmt.Println(\"调度时的随机坐标\", x, \":\", z)\n\t\t\tkey.MoveSpace(Vector3{\n\t\t\t\tX: Coord(x),\n\t\t\t\tY: Coord(0),\n\t\t\t\tZ: Coord(z),\n\t\t\t})\n\t\t}\n\t\tfor key := range space.entities {\n\t\t\tfmt.Println(\"我是\", key.aoi.pos, key.Desc.Name, \"我有\", len(key.Neighbors()), \"个邻居\")\n\t\t}\n\t}\n}\n<commit_msg>update space test<commit_after>package entity\n\nimport (\n\t\/\/\t\"fmt\"\n\t\/\/\t\"fmt\"\n\t\/\/\t\"fmt\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\/\/\"github.com\/beefsack\/go-astar\"\n\t\"github.com\/gfandada\/gserver\/util\"\n)\n\n\/\/ 一万个可行走点测试aoi\nconst (\n\tX = 100\n\tY = 0\n\tZ = 100\n)\n\nfunc Test_space(t *testing.T) {\n\t\/\/ 构建一个场景\n\tspace := NewSpace(1, new(Space))\n\tRegisterSpace(space)\n\tfor i := 0; i < X; i++ {\n\t\tfor j := 0; j < Z; j++ {\n\t\t\tentity := NewEntity(1, \"entity\"+strconv.Itoa(i)+\":\"+strconv.Itoa(j),\n\t\t\t\ttrue, true)\n\t\t\tentity.BindIentity(new(Entity))\n\t\t\tRegisterEntity(entity)\n\t\t\tentity.EnterSpace(space.Id, Vector3{\n\t\t\t\tX: Coord(i),\n\t\t\t\tY: Coord(0),\n\t\t\t\tZ: Coord(j),\n\t\t\t})\n\t\t}\n\t}\n\t\/\/ 主进程调度\n\tfor i := 0; i < 1; i++ {\n\t\tfor key := range space.entities {\n\t\t\tx := util.RandInterval(0, X-1)\n\t\t\tz := util.RandInterval(0, Z-1)\n\t\t\t\/\/fmt.Println(\"调度时的随机坐标\", x, \":\", z)\n\t\t\tkey.MoveSpace(Vector3{\n\t\t\t\tX: Coord(x),\n\t\t\t\tY: Coord(0),\n\t\t\t\tZ: Coord(z),\n\t\t\t})\n\t\t}\n\t\t\/\/\t\tfor key := range space.entities {\n\t\t\/\/\t\t\tfmt.Println(\"我是\", key.aoi.pos, key.Desc.Name, \"我有\", len(key.Neighbors()), \"个邻居\")\n\t\t\/\/\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"fmt\"\n\nvar mountains [8]int\n\ntype ship struct {\n\tx, y, target int\n}\n\nfunc (s *ship) GetTarget() {\n\ty := 0\n\ts.target = -1\n\tfor i, mountain := range mountains {\n\t\tif mountain >= y {\n\t\t\ty = mountain\n\t\t\ts.target = i\n\t\t}\n\t}\n}\n\nfunc (s *ship) FireOrHold() string {\n\tif s.target == -1 {\n\t\ts.GetTarget()\n\t}\n\n\tif s.x == s.target {\n\t\ts.target = -1\n\t\treturn \"FIRE\"\n\t}\n\n\treturn \"HOLD\"\n}\n\nfunc main() {\n\thero := ship{0, 0, -1}\n\n\tfor {\n\t\tfmt.Scanf(\"%d %d\\n\", &hero.x, &hero.y)\n\t\tfor i := range mountains {\n\t\t\tfmt.Scanf(\"%d\\n\", &mountains[i])\n\t\t}\n\n\t\tfmt.Println(hero.FireOrHold())\n\t}\n}\n<commit_msg>kirk solution works for first 5 input files<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/glendc\/cgreader\"\n)\n\nvar mountains [8]int\n\ntype ship struct {\n\tx, y, target int\n}\n\nfunc (s *ship) GetTarget() {\n\ty := 0\n\ts.target = -1\n\tfor i, mountain := range mountains {\n\t\tif mountain >= y {\n\t\t\ty = mountain\n\t\t\ts.target = i\n\t\t}\n\t}\n}\n\nfunc (s *ship) FireOrHold() string {\n\tif s.target == -1 {\n\t\ts.GetTarget()\n\t}\n\n\tif s.x == s.target {\n\t\ts.target = -1\n\t\treturn \"FIRE\"\n\t}\n\n\treturn \"HOLD\"\n}\n\nvar hero ship\n\nfunc Initialize(input <-chan string) {\n\thero = ship{0, 0, -1}\n}\n\nfunc Update(input <-chan string, output chan string) {\n\tfmt.Sscanf(<-input, \"%d %d\", &hero.x, &hero.y)\n\tfor i := range mountains {\n\t\tfmt.Sscanf(<-input, \"%d\", &mountains[i])\n\t}\n\n\toutput <- hero.FireOrHold()\n}\n\nfunc main() {\n\tcgreader.SetFrameRate(5)\n\tcgreader.RunKirkProgram(\"..\/..\/input\/kirk_6.txt\", true, Initialize, Update)\n\t\/*cgreader.RunKirkPrograms(\n\tcgreader.GetFileList(\"..\/..\/input\/kirk_%d.txt\", 6),\n\tfalse,\n\tInitialize,\n\tUpdate)*\/\n}\n<|endoftext|>"} {"text":"<commit_before>package game_engine\n\nimport (\n \"fmt\"\n \"encoding\/json\"\n \"errors\"\n \"requests\"\n)\n\ntype Game struct {\n terrain [][]terrain\n unitMap map[location]*unit\n players []*player\n numPlayers int\n turnOwner int\n}\n\nfunc NewGame(playerIds []int, worldId int) (*Game, error) {\n numPlayers := len(playerIds)\n if numPlayers > 4 || numPlayers < 1 {\n return nil, errors.New(\"must have between 1 and 4 players\")\n }\n players := make([]*player, numPlayers)\n for i, playerId := range(playerIds) {\n players[i] = newPlayer(playerId, nation(i), team(i))\n }\n ret_game := &Game{\n terrain: [][]terrain{\n []terrain{plains, plains, plains, plains, plains, plains, plains, plains},\n []terrain{plains, plains, plains, plains, plains, plains, plains, plains},\n []terrain{plains, plains, plains, plains, plains, plains, plains, plains},\n []terrain{plains, plains, plains, plains, plains, plains, plains, plains},\n []terrain{plains, plains, plains, plains, plains, plains, plains, plains},\n []terrain{plains, plains, plains, plains, plains, plains, plains, plains}},\n unitMap: make(map[location]*unit),\n players: players,\n numPlayers: numPlayers,\n turnOwner: 0}\n if worldId == 0 {\n ret_game.AddUnit(newLocation(0, 0), tank(red))\n }\n return ret_game, nil\n}\n\nfunc (game *Game) verifyTurnOwner(playerId int) error {\n if playerId != game.players[game.turnOwner].playerId {\n return errors.New(\"Not the turn owner\")\n }\n return nil\n}\n\nfunc (game *Game) AddUnit(location location, unit *unit) error {\n fmt.Println(\"adding unit\")\n _, ok := game.unitMap[location]; if !ok {\n game.unitMap[location] = unit\n fmt.Println(\"added unit\")\n return nil\n } else {\n fmt.Println(\"failed to add unit\")\n return errors.New(\"location already occupied\")\n }\n}\n\nfunc (game *Game) EndTurn(playerId int) error {\n ownerError := game.verifyTurnOwner(playerId)\n if ownerError != nil {\n return ownerError\n }\n nextOwner := game.turnOwner + 1\n if nextOwner >= game.numPlayers {\n game.turnOwner = 0\n } else {\n game.turnOwner = nextOwner\n }\n return nil\n}\n\nfunc (game *Game) MoveUnit(playerId int, rawLocations []requests.LocationStruct) error {\n ownerError := game.verifyTurnOwner(playerId)\n if ownerError != nil {\n return ownerError\n }\n locations := make([]location, len(rawLocations))\n for i, location := range(rawLocations) {\n locations[i] = newLocation(location.X, location.Y)\n }\n if len(locations) < 1 {\n message := \"must supply more than zero locations\"\n fmt.Println(message)\n return errors.New(message)\n }\n\n tiles := make([]terrain, len(locations))\n for i, location := range(locations) {\n tiles[i] = game.terrain[location.x][location.y]\n }\n \/\/fmt.Println(tiles)\n \/\/fmt.Println(game.unitMap)\n \/\/fmt.Println(locations)\n unit, ok := game.unitMap[locations[0]]; if ok {\n moveErr := validMove(\n unit.movement.distance,\n unit.movement, tiles, locations)\n if moveErr == nil {\n end := len(locations)\n unit = game.unitMap[newLocation(locations[0].x, locations[0].y)]\n game.unitMap[newLocation(locations[end-1].x, locations[end-1].y)] = unit\n delete(game.unitMap, newLocation(locations[0].x, locations[0].y))\n return nil\n } else {\n fmt.Println(moveErr)\n return moveErr\n }\n } else {\n message := \"Invalid starting location\"\n fmt.Println(message)\n return errors.New(message)\n }\n}\n\nfunc (game *Game) Serialize(playerId int) ([]byte, error) {\n players := make([]*requests.PlayerStruct, len(game.players))\n for i, player := range(game.players) {\n players[i] = player.serialize()\n }\n terrainInts := make([][]int, len(game.terrain))\n for i, t := range(game.terrain) {\n thoriz := make([]int, len(t))\n for j, t_ := range(t) {\n thoriz[j] = int(t_)\n }\n terrainInts[i] = thoriz\n }\n units := make([]*requests.UnitStruct, len(game.unitMap))\n i := 0\n for location, unit := range(game.unitMap) {\n units[i] = unit.serialize(location)\n i += 1\n }\n return json.Marshal(requests.WorldStruct{\n Terrain: terrainInts,\n Units: units,\n Players: players,\n TurnOwner: game.players[game.turnOwner].playerId})\n}\n<commit_msg>Reorder functions in world.go<commit_after>package game_engine\n\nimport (\n \"fmt\"\n \"encoding\/json\"\n \"errors\"\n \"requests\"\n)\n\ntype Game struct {\n terrain [][]terrain\n unitMap map[location]*unit\n players []*player\n numPlayers int\n turnOwner int\n}\n\nfunc NewGame(playerIds []int, worldId int) (*Game, error) {\n numPlayers := len(playerIds)\n if numPlayers > 4 || numPlayers < 1 {\n return nil, errors.New(\"must have between 1 and 4 players\")\n }\n players := make([]*player, numPlayers)\n for i, playerId := range(playerIds) {\n players[i] = newPlayer(playerId, nation(i), team(i))\n }\n ret_game := &Game{\n terrain: [][]terrain{\n []terrain{plains, plains, plains, plains, plains, plains, plains, plains},\n []terrain{plains, plains, plains, plains, plains, plains, plains, plains},\n []terrain{plains, plains, plains, plains, plains, plains, plains, plains},\n []terrain{plains, plains, plains, plains, plains, plains, plains, plains},\n []terrain{plains, plains, plains, plains, plains, plains, plains, plains},\n []terrain{plains, plains, plains, plains, plains, plains, plains, plains}},\n unitMap: make(map[location]*unit),\n players: players,\n numPlayers: numPlayers,\n turnOwner: 0}\n if worldId == 0 {\n ret_game.AddUnit(newLocation(0, 0), tank(red))\n }\n return ret_game, nil\n}\n\nfunc (game *Game) verifyTurnOwner(playerId int) error {\n if playerId != game.players[game.turnOwner].playerId {\n return errors.New(\"Not the turn owner\")\n }\n return nil\n}\n\nfunc (game *Game) AddUnit(location location, unit *unit) error {\n fmt.Println(\"adding unit\")\n _, ok := game.unitMap[location]; if !ok {\n game.unitMap[location] = unit\n fmt.Println(\"added unit\")\n return nil\n } else {\n fmt.Println(\"failed to add unit\")\n return errors.New(\"location already occupied\")\n }\n}\n\nfunc (game *Game) Serialize(playerId int) ([]byte, error) {\n players := make([]*requests.PlayerStruct, len(game.players))\n for i, player := range(game.players) {\n players[i] = player.serialize()\n }\n terrainInts := make([][]int, len(game.terrain))\n for i, t := range(game.terrain) {\n thoriz := make([]int, len(t))\n for j, t_ := range(t) {\n thoriz[j] = int(t_)\n }\n terrainInts[i] = thoriz\n }\n units := make([]*requests.UnitStruct, len(game.unitMap))\n i := 0\n for location, unit := range(game.unitMap) {\n units[i] = unit.serialize(location)\n i += 1\n }\n return json.Marshal(requests.WorldStruct{\n Terrain: terrainInts,\n Units: units,\n Players: players,\n TurnOwner: game.players[game.turnOwner].playerId})\n}\n\nfunc (game *Game) MoveUnit(playerId int, rawLocations []requests.LocationStruct) error {\n ownerError := game.verifyTurnOwner(playerId)\n if ownerError != nil {\n return ownerError\n }\n locations := make([]location, len(rawLocations))\n for i, location := range(rawLocations) {\n locations[i] = newLocation(location.X, location.Y)\n }\n if len(locations) < 1 {\n message := \"must supply more than zero locations\"\n fmt.Println(message)\n return errors.New(message)\n }\n\n tiles := make([]terrain, len(locations))\n for i, location := range(locations) {\n tiles[i] = game.terrain[location.x][location.y]\n }\n \/\/fmt.Println(tiles)\n \/\/fmt.Println(game.unitMap)\n \/\/fmt.Println(locations)\n unit, ok := game.unitMap[locations[0]]; if ok {\n moveErr := validMove(\n unit.movement.distance,\n unit.movement, tiles, locations)\n if moveErr == nil {\n end := len(locations)\n unit = game.unitMap[newLocation(locations[0].x, locations[0].y)]\n game.unitMap[newLocation(locations[end-1].x, locations[end-1].y)] = unit\n delete(game.unitMap, newLocation(locations[0].x, locations[0].y))\n return nil\n } else {\n fmt.Println(moveErr)\n return moveErr\n }\n } else {\n message := \"Invalid starting location\"\n fmt.Println(message)\n return errors.New(message)\n }\n}\n\nfunc (game *Game) EndTurn(playerId int) error {\n ownerError := game.verifyTurnOwner(playerId)\n if ownerError != nil {\n return ownerError\n }\n nextOwner := game.turnOwner + 1\n if nextOwner >= game.numPlayers {\n game.turnOwner = 0\n } else {\n game.turnOwner = nextOwner\n }\n return nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n)\n\n\/\/ token represents the basic lexicographical units of the language.\ntype token struct {\n\tkind tokenType \/\/ The kind of token with which we're dealing.\n\tpos Pos \/\/ The byte offset of the beginning of the token with respect to the beginning of the input.\n\tval string \/\/ The token's value. Error message for lexError; otherwise, the token's constituent text.\n}\n\n\/\/ Defining the String function satisfies the Stinger interface.\n\/\/ Satisfying Stringer allows package fmt to pretty-print our tokens.\n\/\/ func (t *token) String() string {\n\/\/ \tswitch {\n\/\/ \tcase t.kind == tokError:\n\/\/ \t\treturn t.val\n\/\/ \tcase t.kind == tokEOF:\n\/\/ \t\treturn \"EOF\"\n\/\/ \tcase t.kind > tokKeyword:\n\/\/ \t\treturn fmt.Sprintf(\"<%s>\", t.val)\n\/\/ \tcase len(t.val) > 10:\n\/\/ \t\treturn fmt.Sprintf(\"%.10q...\", t.val) \/\/ Limit the max width for long tokens\n\/\/ \tcase t.kind == tokSpace:\n\/\/ \t\treturn \"_\"\n\/\/ \tdefault:\n\/\/ \t\treturn t.val\n\/\/ \t}\n\/\/ }\n\n\/\/ tokenType identifies the type of a token.\ntype tokenType int\n\n\/\/ The list of tokenTypes.\nconst (\n\t\/\/ special\n\ttokError tokenType = iota \/\/ error occurred\n\ttokEOF\n\ttokComment\n\n\t\/\/ punctuation\n\ttokSpace\n\ttokSemicolon\n\ttokComma\n\ttokLeftParen\n\ttokRightParen\n\n\t\/\/ literals\n\ttokNumber\n\n\t\/\/ identifiers\n\ttokIdentifier\n\n\t\/\/ keywords\n\ttokKeyword \/\/ used to delineate keywords\n\ttokDefine\n\ttokExtern\n\ttokIf\n\ttokThen\n\ttokElse\n\ttokFor\n\ttokIn\n\ttokBinary\n\ttokUnary\n\ttokVariable\n\n\t\/\/ operators\n\ttokUserUnaryOp \/\/ additionally used to delineate operators\n\ttokUserBinaryOp\n\ttokEqual\n\ttokPlus\n\ttokMinus\n\ttokStar\n\ttokSlash\n\ttokLessThan\n)\n\nvar key = map[string]tokenType{\n\t\"def\": tokDefine,\n\t\"extern\": tokExtern,\n\t\"if\": tokIf,\n\t\"then\": tokThen,\n\t\"else\": tokElse,\n\t\"for\": tokFor,\n\t\"in\": tokIn,\n\t\"binary\": tokBinary,\n\t\"unary\": tokUnary,\n\t\"var\": tokVariable,\n}\n\n\/\/ op maps built-in operators to tokenTypes\n\/\/ As this should never be written to, it is safe to share between lexer goroutines.\nvar op = map[rune]tokenType{\n\t'=': tokEqual,\n\t'+': tokPlus,\n\t'-': tokMinus,\n\t'*': tokStar,\n\t'\/': tokSlash,\n\t'<': tokLessThan,\n}\n\n\/\/ userOpType differentiates a user-defined unary, binary or not found operator.\ntype userOpType int\n\nconst (\n\tuopNOP userOpType = iota \/\/ Signals that the rune is not a user operator.\n\tuopUnaryOp\n\tuopBinaryOp\n)\n\n\/\/ stateFn represents the state of the scanner as a function that returns the next state.\ntype stateFn func(*lexer) stateFn\n\n\/\/ lexer holds the state of the scanner.\ntype lexer struct {\n\tname string \/\/ name of input file; used in error reports\n\tinput string \/\/ input being scanned\n\tstate stateFn \/\/ next lexing function to be called\n\tpos Pos \/\/ current position in input\n\tstart Pos \/\/ beginning position of the current token\n\twidth Pos \/\/ width of last rune read from input\n\ttokens chan token \/\/ channel of lexed items\n\tuserOperators map[rune]userOpType \/\/ userOperators maps user defined operators to number of operands (Implication: no multi-char operators)\n\tparenDepth int \/\/ nested layers of paren expressions\n\tprintTokens bool \/\/ print tokens before sending\n}\n\n\/\/ NewLex creates and runs a new lexer from the input string.\nfunc NewLex(name, input string, printTokens bool) *lexer {\n\tl := &lexer{\n\t\tname: name,\n\t\tinput: input,\n\t\ttokens: make(chan token, 10),\n\t\tuserOperators: map[rune]userOpType{},\n\t}\n\tgo l.run()\n\treturn l\n}\n\n\/\/ l.next() returns eof to signal end of file to a stateFn.\nconst eof = -1\n\n\/\/ word returns the value of the token that would be emitted if\n\/\/ l.emit() were to be called.\nfunc (l *lexer) word() string {\n\treturn l.input[l.start:l.pos]\n}\n\n\/\/ next returns the next rune from the input and advances the scan.\n\/\/ It returns the eof constant (-1) if the scanner is at the end of\n\/\/ the input.\nfunc (l *lexer) next() rune {\n\tif int(l.pos) >= len(l.input) {\n\t\tl.width = 0\n\t\treturn eof\n\t}\n\tr, w := utf8.DecodeRuneInString(l.input[l.pos:])\n\tl.width = Pos(w)\n\tl.pos += l.width\n\treturn r\n}\n\n\/\/ peek returns the next rune without moving the scan forward.\nfunc (l *lexer) peek() rune {\n\tr := l.next()\n\tl.backup()\n\treturn r\n}\n\n\/\/ backup moves the scan back one rune.\nfunc (l *lexer) backup() {\n\tl.pos -= l.width\n}\n\n\/\/ ignore skips the pending input before this point.\nfunc (l *lexer) ignore() {\n\tl.start = l.pos\n}\n\n\/\/ acceptRun consumes a run of runes from valid set.\nfunc (l *lexer) acceptRun(valid string) {\n\tfor strings.IndexRune(valid, l.next()) >= 0 {\n\t}\n\tl.backup()\n}\n\n\/\/ lineNumber returns the line on which a given rune occurred.\n\/\/ If we need the line number for many tokens, it'd be better to\n\/\/ track this with l.next() and l.backup(). However, we only use\n\/\/ this to report lexing & parsing errors—something that is hopefully\n\/\/ rare compared to the number of valid tokens and parse nodes.\nfunc (l *lexer) lineNumber(p Pos) int {\n\treturn 1 + strings.Count(l.input[:p], \"\\n\")\n}\n\n\/\/ errorf sending an error token and terminates the scan by passing nil as the next stateFn\nfunc (l *lexer) errorf(format string, args ...interface{}) stateFn {\n\tt := token{\n\t\tkind: tokError,\n\t\tpos: l.start,\n\t\tval: fmt.Sprintf(format, args...)}\n\tspew.Dump(t)\n\tl.tokens <- t\n\treturn nil\n}\n\n\/\/ emit passes the current token.\nfunc (l *lexer) emit(tt tokenType) {\n\tt := token{\n\t\tkind: tt,\n\t\tpos: l.start,\n\t\tval: l.word(),\n\t}\n\tspew.Dump(t)\n\tl.tokens <- t\n\tl.start = l.pos\n}\n\n\/\/ run runs the state machine for the lexer.\nfunc (l *lexer) run() {\n\tfor l.state = lexTopLevel; l.state != nil; {\n\t\tl.state = l.state(l)\n\t\t\/\/ Println(runtime.FuncForPC(reflect.ValueOf(l.state).Pointer()).Name())\n\t}\n\tclose(l.tokens) \/\/ Tells the client no more tokens will be delivered.\n}\n\n\/\/ State Functions\n\n\/\/ lexTopLevel lexes any top level statement. Because our language is simple,\n\/\/ our lexer rarely needs to know its prior state and therefore this amounts\n\/\/ to the giant-switch style of lexing. Nevertheless, the stateFn technique\n\/\/ allows us to easy extend our lexer to more complex grammars.\nfunc lexTopLevel(l *lexer) stateFn {\n\t\/\/ Either whitespace, an empty line, a comment,\n\t\/\/ a number, a paren, identifier, or unary operator.\n\tr := l.next()\n\tswitch {\n\tcase r == eof:\n\t\tl.emit(tokEOF)\n\t\treturn nil\n\tcase isSpace(r):\n\t\tl.backup()\n\t\treturn lexSpace\n\tcase isEOL(r):\n\t\tl.start = l.pos\n\t\treturn lexTopLevel\n\tcase r == ';':\n\t\tl.emit(tokSemicolon)\n\t\treturn lexTopLevel\n\tcase r == ',':\n\t\tl.emit(tokComma)\n\t\treturn lexTopLevel\n\tcase r == '#':\n\t\treturn lexComment\n\tcase r == '(':\n\t\tl.parenDepth++\n\t\tl.emit(tokLeftParen)\n\t\treturn lexTopLevel\n\tcase r == ')':\n\t\tl.parenDepth--\n\t\tl.emit(tokRightParen)\n\t\tif l.parenDepth < 0 {\n\t\t\treturn l.errorf(\"unexpected right paren\")\n\t\t}\n\t\treturn lexTopLevel\n\tcase '0' <= r && r <= '9':\n\t\tl.backup()\n\t\treturn lexNumber\n\tcase isAlphaNumeric(r):\n\t\tl.backup()\n\t\treturn lexIdentifer\n\tcase op[r] > tokUserBinaryOp:\n\t\tl.emit(op[r])\n\t\treturn lexTopLevel\n\tcase l.userOperators[r] == uopBinaryOp:\n\t\tl.emit(tokUserBinaryOp)\n\t\treturn lexTopLevel\n\tcase l.userOperators[r] == uopUnaryOp:\n\t\tl.emit(tokUserUnaryOp)\n\t\treturn lexTopLevel\n\tdefault:\n\t\treturn l.errorf(\"unrecognized character: %#U\", r)\n\t}\n}\n\n\/\/ lexSpace globs contiguous whitespace.\nfunc lexSpace(l *lexer) stateFn {\n\tglobWhitespace(l)\n\treturn lexTopLevel\n}\n\n\/\/ globWhitespace globs contiguous whitespace, but sometimes we\n\/\/ don't want to return to lexTopLevel after doing this.\nfunc globWhitespace(l *lexer) {\n\tfor isSpace(l.next()) {\n\t}\n\tl.backup()\n\tif l.start != l.pos {\n\t\tl.emit(tokSpace)\n\t}\n}\n\n\/\/ lexComment runs from '#' to the end of line or end of file.\nfunc lexComment(l *lexer) stateFn {\n\tfor !isEOL(l.next()) {\n\t}\n\tl.backup()\n\tl.emit(tokComment)\n\treturn lexTopLevel\n}\n\n\/\/ lexNumber globs potential number-like strings. We let the parser\n\/\/ verify that the token is actually a valid number.\n\/\/ e.g. \"3.A.8\" could be emitted by this function.\nfunc lexNumber(l *lexer) stateFn {\n\tnumberish := \"0123456789.xabcdefABCDEF\" \/\/ Implication: cannot have \".\" operator\n\tl.acceptRun(numberish)\n\t\/\/ if isAlphaNumeric(l.peek()) { \/\/ probably a mistyped identifier\n\t\/\/ \tl.next()\n\t\/\/ \treturn l.errorf(\"bad number syntax: %q\", l.word())\n\t\/\/ }\n\tl.emit(tokNumber)\n\treturn lexTopLevel\n}\n\n\/\/ lexIdentfier globs unicode alpha-numerics, determines if they\n\/\/ represent a keyword or identifier, and output the appropriate\n\/\/ token. For the \"binary\" & \"unary\" keywords, we need to add their\n\/\/ associated user-defined operator to our map so that we can\n\/\/ identify it later.\nfunc lexIdentifer(l *lexer) stateFn {\nLoop:\n\tfor {\n\t\tswitch r := l.next(); {\n\t\tcase isAlphaNumeric(r):\n\t\t\t\/\/ absorb\n\t\tdefault:\n\t\t\tl.backup()\n\t\t\tword := l.word()\n\t\t\tif key[word] > tokKeyword { \/\/ We already know it's not an operator.\n\t\t\t\tl.emit(key[word])\n\t\t\t\tswitch word {\n\t\t\t\tcase \"binary\":\n\t\t\t\t\treturn lexUserBinaryOp\n\t\t\t\tcase \"unary\":\n\t\t\t\t\treturn lexUserUnaryOp\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tl.emit(tokIdentifier)\n\t\t\t}\n\t\t\tbreak Loop\n\t\t}\n\t}\n\treturn lexTopLevel\n}\n\n\/\/ lexUserBinaryOp checks for spaces and then identifies and maps.\n\/\/ the newly defined user operator.\nfunc lexUserBinaryOp(l *lexer) stateFn {\n\tglobWhitespace(l)\n\tr := l.next() \/\/ Implication: no multi-char operators\n\tl.userOperators[r] = uopBinaryOp\n\tl.emit(tokUserBinaryOp)\n\treturn lexTopLevel\n}\n\n\/\/ lexUserBinaryOp checks for spaces and then identifies and maps.\n\/\/ the newly defined user operator.\nfunc lexUserUnaryOp(l *lexer) stateFn {\n\tglobWhitespace(l)\n\tr := l.next() \/\/ Implication: no multi-char operators\n\tl.userOperators[r] = uopUnaryOp\n\tl.emit(tokUserUnaryOp)\n\treturn lexTopLevel\n}\n\n\/\/ Helper Functions\n\n\/\/ isSpace reports whether r is whitespace.\nfunc isSpace(r rune) bool {\n\treturn r == ' ' || r == '\\t'\n}\n\n\/\/ isEOL reports whether r is an end-of-line character or an EOF.\nfunc isEOL(r rune) bool {\n\treturn r == '\\n' || r == '\\r' || r == eof\n}\n\n\/\/ isValidIdefRune reports if r may be part of an identifier name.\nfunc isAlphaNumeric(r rune) bool {\n\treturn r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)\n}\n<commit_msg>Bugfix: Set and check for -tok flag<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n)\n\n\/\/ token represents the basic lexicographical units of the language.\ntype token struct {\n\tkind tokenType \/\/ The kind of token with which we're dealing.\n\tpos Pos \/\/ The byte offset of the beginning of the token with respect to the beginning of the input.\n\tval string \/\/ The token's value. Error message for lexError; otherwise, the token's constituent text.\n}\n\n\/\/ Defining the String function satisfies the Stinger interface.\n\/\/ Satisfying Stringer allows package fmt to pretty-print our tokens.\n\/\/ func (t *token) String() string {\n\/\/ \tswitch {\n\/\/ \tcase t.kind == tokError:\n\/\/ \t\treturn t.val\n\/\/ \tcase t.kind == tokEOF:\n\/\/ \t\treturn \"EOF\"\n\/\/ \tcase t.kind > tokKeyword:\n\/\/ \t\treturn fmt.Sprintf(\"<%s>\", t.val)\n\/\/ \tcase len(t.val) > 10:\n\/\/ \t\treturn fmt.Sprintf(\"%.10q...\", t.val) \/\/ Limit the max width for long tokens\n\/\/ \tcase t.kind == tokSpace:\n\/\/ \t\treturn \"_\"\n\/\/ \tdefault:\n\/\/ \t\treturn t.val\n\/\/ \t}\n\/\/ }\n\n\/\/ tokenType identifies the type of a token.\ntype tokenType int\n\n\/\/ The list of tokenTypes.\nconst (\n\t\/\/ special\n\ttokError tokenType = iota \/\/ error occurred\n\ttokEOF\n\ttokComment\n\n\t\/\/ punctuation\n\ttokSpace\n\ttokSemicolon\n\ttokComma\n\ttokLeftParen\n\ttokRightParen\n\n\t\/\/ literals\n\ttokNumber\n\n\t\/\/ identifiers\n\ttokIdentifier\n\n\t\/\/ keywords\n\ttokKeyword \/\/ used to delineate keywords\n\ttokDefine\n\ttokExtern\n\ttokIf\n\ttokThen\n\ttokElse\n\ttokFor\n\ttokIn\n\ttokBinary\n\ttokUnary\n\ttokVariable\n\n\t\/\/ operators\n\ttokUserUnaryOp \/\/ additionally used to delineate operators\n\ttokUserBinaryOp\n\ttokEqual\n\ttokPlus\n\ttokMinus\n\ttokStar\n\ttokSlash\n\ttokLessThan\n)\n\nvar key = map[string]tokenType{\n\t\"def\": tokDefine,\n\t\"extern\": tokExtern,\n\t\"if\": tokIf,\n\t\"then\": tokThen,\n\t\"else\": tokElse,\n\t\"for\": tokFor,\n\t\"in\": tokIn,\n\t\"binary\": tokBinary,\n\t\"unary\": tokUnary,\n\t\"var\": tokVariable,\n}\n\n\/\/ op maps built-in operators to tokenTypes\n\/\/ As this should never be written to, it is safe to share between lexer goroutines.\nvar op = map[rune]tokenType{\n\t'=': tokEqual,\n\t'+': tokPlus,\n\t'-': tokMinus,\n\t'*': tokStar,\n\t'\/': tokSlash,\n\t'<': tokLessThan,\n}\n\n\/\/ userOpType differentiates a user-defined unary, binary or not found operator.\ntype userOpType int\n\nconst (\n\tuopNOP userOpType = iota \/\/ Signals that the rune is not a user operator.\n\tuopUnaryOp\n\tuopBinaryOp\n)\n\n\/\/ stateFn represents the state of the scanner as a function that returns the next state.\ntype stateFn func(*lexer) stateFn\n\n\/\/ lexer holds the state of the scanner.\ntype lexer struct {\n\tname string \/\/ name of input file; used in error reports\n\tinput string \/\/ input being scanned\n\tstate stateFn \/\/ next lexing function to be called\n\tpos Pos \/\/ current position in input\n\tstart Pos \/\/ beginning position of the current token\n\twidth Pos \/\/ width of last rune read from input\n\ttokens chan token \/\/ channel of lexed items\n\tuserOperators map[rune]userOpType \/\/ userOperators maps user defined operators to number of operands (Implication: no multi-char operators)\n\tparenDepth int \/\/ nested layers of paren expressions\n\tprintTokens bool \/\/ print tokens before sending\n}\n\n\/\/ NewLex creates and runs a new lexer from the input string.\nfunc NewLex(name, input string, printTokens bool) *lexer {\n\tl := &lexer{\n\t\tname: name,\n\t\tinput: input,\n\t\ttokens: make(chan token, 10),\n\t\tuserOperators: map[rune]userOpType{},\n\t\tprintTokens: printTokens,\n\t}\n\tgo l.run()\n\treturn l\n}\n\n\/\/ l.next() returns eof to signal end of file to a stateFn.\nconst eof = -1\n\n\/\/ word returns the value of the token that would be emitted if\n\/\/ l.emit() were to be called.\nfunc (l *lexer) word() string {\n\treturn l.input[l.start:l.pos]\n}\n\n\/\/ next returns the next rune from the input and advances the scan.\n\/\/ It returns the eof constant (-1) if the scanner is at the end of\n\/\/ the input.\nfunc (l *lexer) next() rune {\n\tif int(l.pos) >= len(l.input) {\n\t\tl.width = 0\n\t\treturn eof\n\t}\n\tr, w := utf8.DecodeRuneInString(l.input[l.pos:])\n\tl.width = Pos(w)\n\tl.pos += l.width\n\treturn r\n}\n\n\/\/ peek returns the next rune without moving the scan forward.\nfunc (l *lexer) peek() rune {\n\tr := l.next()\n\tl.backup()\n\treturn r\n}\n\n\/\/ backup moves the scan back one rune.\nfunc (l *lexer) backup() {\n\tl.pos -= l.width\n}\n\n\/\/ ignore skips the pending input before this point.\nfunc (l *lexer) ignore() {\n\tl.start = l.pos\n}\n\n\/\/ acceptRun consumes a run of runes from valid set.\nfunc (l *lexer) acceptRun(valid string) {\n\tfor strings.IndexRune(valid, l.next()) >= 0 {\n\t}\n\tl.backup()\n}\n\n\/\/ lineNumber returns the line on which a given rune occurred.\n\/\/ If we need the line number for many tokens, it'd be better to\n\/\/ track this with l.next() and l.backup(). However, we only use\n\/\/ this to report lexing & parsing errors—something that is hopefully\n\/\/ rare compared to the number of valid tokens and parse nodes.\nfunc (l *lexer) lineNumber(p Pos) int {\n\treturn 1 + strings.Count(l.input[:p], \"\\n\")\n}\n\n\/\/ errorf sending an error token and terminates the scan by passing nil as the next stateFn\nfunc (l *lexer) errorf(format string, args ...interface{}) stateFn {\n\tt := token{\n\t\tkind: tokError,\n\t\tpos: l.start,\n\t\tval: fmt.Sprintf(format, args...)}\n\tif l.printTokens {\n\t\tspew.Dump(t)\n\t}\n\tl.tokens <- t\n\treturn nil\n}\n\n\/\/ emit passes the current token.\nfunc (l *lexer) emit(tt tokenType) {\n\tt := token{\n\t\tkind: tt,\n\t\tpos: l.start,\n\t\tval: l.word(),\n\t}\n\tif l.printTokens {\n\t\tspew.Dump(t)\n\t}\n\tl.tokens <- t\n\tl.start = l.pos\n}\n\n\/\/ run runs the state machine for the lexer.\nfunc (l *lexer) run() {\n\tfor l.state = lexTopLevel; l.state != nil; {\n\t\tl.state = l.state(l)\n\t\t\/\/ Println(runtime.FuncForPC(reflect.ValueOf(l.state).Pointer()).Name())\n\t}\n\tclose(l.tokens) \/\/ Tells the client no more tokens will be delivered.\n}\n\n\/\/ State Functions\n\n\/\/ lexTopLevel lexes any top level statement. Because our language is simple,\n\/\/ our lexer rarely needs to know its prior state and therefore this amounts\n\/\/ to the giant-switch style of lexing. Nevertheless, the stateFn technique\n\/\/ allows us to easy extend our lexer to more complex grammars.\nfunc lexTopLevel(l *lexer) stateFn {\n\t\/\/ Either whitespace, an empty line, a comment,\n\t\/\/ a number, a paren, identifier, or unary operator.\n\tr := l.next()\n\tswitch {\n\tcase r == eof:\n\t\tl.emit(tokEOF)\n\t\treturn nil\n\tcase isSpace(r):\n\t\tl.backup()\n\t\treturn lexSpace\n\tcase isEOL(r):\n\t\tl.start = l.pos\n\t\treturn lexTopLevel\n\tcase r == ';':\n\t\tl.emit(tokSemicolon)\n\t\treturn lexTopLevel\n\tcase r == ',':\n\t\tl.emit(tokComma)\n\t\treturn lexTopLevel\n\tcase r == '#':\n\t\treturn lexComment\n\tcase r == '(':\n\t\tl.parenDepth++\n\t\tl.emit(tokLeftParen)\n\t\treturn lexTopLevel\n\tcase r == ')':\n\t\tl.parenDepth--\n\t\tl.emit(tokRightParen)\n\t\tif l.parenDepth < 0 {\n\t\t\treturn l.errorf(\"unexpected right paren\")\n\t\t}\n\t\treturn lexTopLevel\n\tcase '0' <= r && r <= '9':\n\t\tl.backup()\n\t\treturn lexNumber\n\tcase isAlphaNumeric(r):\n\t\tl.backup()\n\t\treturn lexIdentifer\n\tcase op[r] > tokUserBinaryOp:\n\t\tl.emit(op[r])\n\t\treturn lexTopLevel\n\tcase l.userOperators[r] == uopBinaryOp:\n\t\tl.emit(tokUserBinaryOp)\n\t\treturn lexTopLevel\n\tcase l.userOperators[r] == uopUnaryOp:\n\t\tl.emit(tokUserUnaryOp)\n\t\treturn lexTopLevel\n\tdefault:\n\t\treturn l.errorf(\"unrecognized character: %#U\", r)\n\t}\n}\n\n\/\/ lexSpace globs contiguous whitespace.\nfunc lexSpace(l *lexer) stateFn {\n\tglobWhitespace(l)\n\treturn lexTopLevel\n}\n\n\/\/ globWhitespace globs contiguous whitespace, but sometimes we\n\/\/ don't want to return to lexTopLevel after doing this.\nfunc globWhitespace(l *lexer) {\n\tfor isSpace(l.next()) {\n\t}\n\tl.backup()\n\tif l.start != l.pos {\n\t\tl.emit(tokSpace)\n\t}\n}\n\n\/\/ lexComment runs from '#' to the end of line or end of file.\nfunc lexComment(l *lexer) stateFn {\n\tfor !isEOL(l.next()) {\n\t}\n\tl.backup()\n\tl.emit(tokComment)\n\treturn lexTopLevel\n}\n\n\/\/ lexNumber globs potential number-like strings. We let the parser\n\/\/ verify that the token is actually a valid number.\n\/\/ e.g. \"3.A.8\" could be emitted by this function.\nfunc lexNumber(l *lexer) stateFn {\n\tnumberish := \"0123456789.xabcdefABCDEF\" \/\/ Implication: cannot have \".\" operator\n\tl.acceptRun(numberish)\n\t\/\/ if isAlphaNumeric(l.peek()) { \/\/ probably a mistyped identifier\n\t\/\/ \tl.next()\n\t\/\/ \treturn l.errorf(\"bad number syntax: %q\", l.word())\n\t\/\/ }\n\tl.emit(tokNumber)\n\treturn lexTopLevel\n}\n\n\/\/ lexIdentfier globs unicode alpha-numerics, determines if they\n\/\/ represent a keyword or identifier, and output the appropriate\n\/\/ token. For the \"binary\" & \"unary\" keywords, we need to add their\n\/\/ associated user-defined operator to our map so that we can\n\/\/ identify it later.\nfunc lexIdentifer(l *lexer) stateFn {\nLoop:\n\tfor {\n\t\tswitch r := l.next(); {\n\t\tcase isAlphaNumeric(r):\n\t\t\t\/\/ absorb\n\t\tdefault:\n\t\t\tl.backup()\n\t\t\tword := l.word()\n\t\t\tif key[word] > tokKeyword { \/\/ We already know it's not an operator.\n\t\t\t\tl.emit(key[word])\n\t\t\t\tswitch word {\n\t\t\t\tcase \"binary\":\n\t\t\t\t\treturn lexUserBinaryOp\n\t\t\t\tcase \"unary\":\n\t\t\t\t\treturn lexUserUnaryOp\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tl.emit(tokIdentifier)\n\t\t\t}\n\t\t\tbreak Loop\n\t\t}\n\t}\n\treturn lexTopLevel\n}\n\n\/\/ lexUserBinaryOp checks for spaces and then identifies and maps.\n\/\/ the newly defined user operator.\nfunc lexUserBinaryOp(l *lexer) stateFn {\n\tglobWhitespace(l)\n\tr := l.next() \/\/ Implication: no multi-char operators\n\tl.userOperators[r] = uopBinaryOp\n\tl.emit(tokUserBinaryOp)\n\treturn lexTopLevel\n}\n\n\/\/ lexUserBinaryOp checks for spaces and then identifies and maps.\n\/\/ the newly defined user operator.\nfunc lexUserUnaryOp(l *lexer) stateFn {\n\tglobWhitespace(l)\n\tr := l.next() \/\/ Implication: no multi-char operators\n\tl.userOperators[r] = uopUnaryOp\n\tl.emit(tokUserUnaryOp)\n\treturn lexTopLevel\n}\n\n\/\/ Helper Functions\n\n\/\/ isSpace reports whether r is whitespace.\nfunc isSpace(r rune) bool {\n\treturn r == ' ' || r == '\\t'\n}\n\n\/\/ isEOL reports whether r is an end-of-line character or an EOF.\nfunc isEOL(r rune) bool {\n\treturn r == '\\n' || r == '\\r' || r == eof\n}\n\n\/\/ isValidIdefRune reports if r may be part of an identifier name.\nfunc isAlphaNumeric(r rune) bool {\n\treturn r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)\n}\n<|endoftext|>"} {"text":"<commit_before>package handlers\n\nimport (\n \"encoding\/json\"\n \"fmt\"\n \"html\/template\"\n \"math\/rand\"\n \"net\/http\"\n)\n\ntype PageOpts struct {\n Page string\n OnSale bool\n}\n\ntype Product struct {\n Id int `json:\"id\"`\n Name string `json:\"name\"`\n Description string `json:\"description\"`\n Price int `json:\"price\"`\n Weight int `json:\"weight\"`\n}\n\nfunc Index(w http.ResponseWriter, r *http.Request) {\n fmt.Printf(\"Index: %s\\n\", r.URL.String())\n if r.URL.Path != \"\/\" {\n Error(w, r, http.StatusNotFound)\n return\n }\n\n opts := PageOpts{ Page: \"index\" }\n if rand.Intn(5) == 0 {\n opts.OnSale = true\n }\n processLayout(w, r, opts)\n}\n\nfunc About(w http.ResponseWriter, r *http.Request) {\n fmt.Printf(\"Products: %s\\n\", r.URL.String())\n opts := PageOpts{ Page: \"about\" }\n processLayout(w, r, opts)\n}\n\nfunc Products(w http.ResponseWriter, r *http.Request) {\n fmt.Printf(\"Products: %s\\n\", r.URL.String())\n opts := PageOpts{ Page: \"products\" }\n processLayout(w, r, opts)\n}\n\nfunc ProductData(w http.ResponseWriter, r *http.Request) {\n fmt.Printf(\"Product Data: %s\\n\", r.URL.String())\n product := Product{Id:1, Name:\"asdf\", Description:\"cool thing\", Price:1000, Weight:10}\n output, _ := json.MarshalIndent(&product, \"\", \"\\t\")\n w.Header().Set(\"Content-Type\", \"application\/json\")\n w.Write(output)\n}\n\nfunc Error(w http.ResponseWriter, r *http.Request, status int) {\n fmt.Printf(\"Error: %s\\t%d\\n\", r.URL.String(), status)\n w.WriteHeader(status)\n\n opts := PageOpts{ Page: \"error\" }\n processLayout(w, r, opts)\n}\n\nfunc processLayout(w http.ResponseWriter, r *http.Request, pageOpts PageOpts) {\n extraFile := fmt.Sprintf(\"templates\/%s.html\", pageOpts.Page)\n t, _ := template.ParseFiles(\"templates\/layout.html\", extraFile)\n t.ExecuteTemplate(w, \"layout\", pageOpts)\n}\n<commit_msg>add time to output<commit_after>package handlers\n\nimport (\n \"encoding\/json\"\n \"fmt\"\n \"html\/template\"\n \"math\/rand\"\n \"net\/http\"\n \"time\"\n)\n\ntype PageOpts struct {\n Page string\n OnSale bool\n}\n\ntype Product struct {\n Id int `json:\"id\"`\n Name string `json:\"name\"`\n Description string `json:\"description\"`\n Price int `json:\"price\"`\n Weight int `json:\"weight\"`\n}\n\nfunc Index(w http.ResponseWriter, r *http.Request) {\n fmt.Printf(\"%s\\t%s\\n\", time.Now().String(), r.URL.String())\n if r.URL.Path != \"\/\" {\n Error(w, r, http.StatusNotFound)\n return\n }\n\n opts := PageOpts{ Page: \"index\" }\n if rand.Intn(5) == 0 {\n opts.OnSale = true\n }\n processLayout(w, r, opts)\n}\n\nfunc About(w http.ResponseWriter, r *http.Request) {\n fmt.Printf(\"%s\\t%s\\n\", time.Now().String(), r.URL.String())\n opts := PageOpts{ Page: \"about\" }\n processLayout(w, r, opts)\n}\n\nfunc Products(w http.ResponseWriter, r *http.Request) {\n fmt.Printf(\"%s\\t%s\\n\", time.Now().String(), r.URL.String())\n opts := PageOpts{ Page: \"products\" }\n processLayout(w, r, opts)\n}\n\nfunc ProductData(w http.ResponseWriter, r *http.Request) {\n fmt.Printf(\"%s\\t%s\\n\", time.Now().String(), r.URL.String())\n product := Product{Id:1, Name:\"asdf\", Description:\"cool thing\", Price:1000, Weight:10}\n output, _ := json.MarshalIndent(&product, \"\", \"\\t\")\n w.Header().Set(\"Content-Type\", \"application\/json\")\n w.Write(output)\n}\n\nfunc Error(w http.ResponseWriter, r *http.Request, status int) {\n fmt.Printf(\"%s\\tERROR\\t%s\\n\", time.Now().String(), r.URL.String())\n w.WriteHeader(status)\n\n opts := PageOpts{ Page: \"error\" }\n processLayout(w, r, opts)\n}\n\nfunc processLayout(w http.ResponseWriter, r *http.Request, pageOpts PageOpts) {\n extraFile := fmt.Sprintf(\"templates\/%s.html\", pageOpts.Page)\n t, _ := template.ParseFiles(\"templates\/layout.html\", extraFile)\n t.ExecuteTemplate(w, \"layout\", pageOpts)\n}\n<|endoftext|>"} {"text":"<commit_before>package gocsv\n\nimport (\n\t\"encoding\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"encoding\/json\"\n)\n\n\/\/ --------------------------------------------------------------------------\n\/\/ Conversion interfaces\n\n\/\/ TypeMarshaller is implemented by any value that has a MarshalCSV method\n\/\/ This converter is used to convert the value to it string representation\ntype TypeMarshaller interface {\n\tMarshalCSV() (string, error)\n}\n\n\/\/ TypeUnmarshaller is implemented by any value that has an UnmarshalCSV method\n\/\/ This converter is used to convert a string to your value representation of that string\ntype TypeUnmarshaller interface {\n\tUnmarshalCSV(string) error\n}\n\n\/\/ NoUnmarshalFuncError is the custom error type to be raised in case there is no unmarshal function defined on type\ntype NoUnmarshalFuncError struct {\n\tmsg string\n}\n\nfunc (e NoUnmarshalFuncError) Error() string {\n\treturn e.msg\n}\n\n\/\/ NoMarshalFuncError is the custom error type to be raised in case there is no marshal function defined on type\ntype NoMarshalFuncError struct {\n\tty reflect.Type\n}\n\nfunc (e NoMarshalFuncError) Error() string {\n\treturn \"No known conversion from \" + e.ty.String() + \" to string, \" + e.ty.String() + \" does not implement TypeMarshaller nor Stringer\"\n}\n\n\/\/ --------------------------------------------------------------------------\n\/\/ Conversion helpers\n\nfunc toString(in interface{}) (string, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\treturn inValue.String(), nil\n\tcase reflect.Bool:\n\t\tb := inValue.Bool()\n\t\tif b {\n\t\t\treturn \"true\", nil\n\t\t}\n\t\treturn \"false\", nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn fmt.Sprintf(\"%v\", inValue.Int()), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn fmt.Sprintf(\"%v\", inValue.Uint()), nil\n\tcase reflect.Float32:\n\t\treturn strconv.FormatFloat(inValue.Float(), byte('f'), -1, 32), nil\n\tcase reflect.Float64:\n\t\treturn strconv.FormatFloat(inValue.Float(), byte('f'), -1, 64), nil\n\t}\n\treturn \"\", fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to string\")\n}\n\nfunc toBool(in interface{}) (bool, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\ts := inValue.String()\n\t\ts = strings.TrimSpace(s)\n\t\tif strings.EqualFold(s, \"yes\") {\n\t\t\treturn true, nil\n\t\t} else if strings.EqualFold(s, \"no\") || s == \"\" {\n\t\t\treturn false, nil\n\t\t} else {\n\t\t\treturn strconv.ParseBool(s)\n\t\t}\n\tcase reflect.Bool:\n\t\treturn inValue.Bool(), nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\ti := inValue.Int()\n\t\tif i != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\ti := inValue.Uint()\n\t\tif i != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\tcase reflect.Float32, reflect.Float64:\n\t\tf := inValue.Float()\n\t\tif f != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t}\n\treturn false, fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to bool\")\n}\n\nfunc toInt(in interface{}) (int64, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\ts := strings.TrimSpace(inValue.String())\n\t\tif s == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn strconv.ParseInt(s, 0, 64)\n\tcase reflect.Bool:\n\t\tif inValue.Bool() {\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn inValue.Int(), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn int64(inValue.Uint()), nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn int64(inValue.Float()), nil\n\t}\n\treturn 0, fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to int\")\n}\n\nfunc toUint(in interface{}) (uint64, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\ts := strings.TrimSpace(inValue.String())\n\t\tif s == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\n\t\t\/\/ support the float input\n\t\tif strings.Contains(s, \".\") {\n\t\t\tf, err := strconv.ParseFloat(s, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\treturn uint64(f), nil\n\t\t}\n\t\treturn strconv.ParseUint(s, 0, 64)\n\tcase reflect.Bool:\n\t\tif inValue.Bool() {\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn uint64(inValue.Int()), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn inValue.Uint(), nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn uint64(inValue.Float()), nil\n\t}\n\treturn 0, fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to uint\")\n}\n\nfunc toFloat(in interface{}) (float64, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\ts := strings.TrimSpace(inValue.String())\n\t\tif s == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn strconv.ParseFloat(s, 64)\n\tcase reflect.Bool:\n\t\tif inValue.Bool() {\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn float64(inValue.Int()), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn float64(inValue.Uint()), nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn inValue.Float(), nil\n\t}\n\treturn 0, fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to float\")\n}\n\nfunc setField(field reflect.Value, value string, omitEmpty bool) error {\n\tif field.Kind() == reflect.Ptr {\n\t\tif omitEmpty && value == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tif field.IsNil() {\n\t\t\tfield.Set(reflect.New(field.Type().Elem()))\n\t\t}\n\t\tfield = field.Elem()\n\t}\n\n\tswitch field.Interface().(type) {\n\tcase string:\n\t\ts, err := toString(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetString(s)\n\tcase bool:\n\t\tb, err := toBool(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetBool(b)\n\tcase int, int8, int16, int32, int64:\n\t\ti, err := toInt(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetInt(i)\n\tcase uint, uint8, uint16, uint32, uint64:\n\t\tui, err := toUint(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetUint(ui)\n\tcase float32, float64:\n\t\tf, err := toFloat(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetFloat(f)\n\tdefault:\n\t\t\/\/ Not a native type, check for unmarshal method\n\t\tif err := unmarshall(field, value); err != nil {\n\t\t\tif _, ok := err.(NoUnmarshalFuncError); !ok {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t\/\/ Could not unmarshal, check for kind, e.g. renamed type from basic type\n\t\t\tswitch field.Kind() {\n\t\t\tcase reflect.String:\n\t\t\t\ts, err := toString(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetString(s)\n\t\t\tcase reflect.Bool:\n\t\t\t\tb, err := toBool(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetBool(b)\n\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\ti, err := toInt(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetInt(i)\n\t\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\t\tui, err := toUint(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetUint(ui)\n\t\t\tcase reflect.Float32, reflect.Float64:\n\t\t\t\tf, err := toFloat(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetFloat(f)\n\t\t\tcase reflect.Slice, reflect.Struct:\n\t\t\t\terr := json.Unmarshal([]byte(value), field.Addr().Interface())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getFieldAsString(field reflect.Value) (str string, err error) {\n\tswitch field.Kind() {\n\tcase reflect.Interface:\n\tcase reflect.Ptr:\n\t\tif field.IsNil() {\n\t\t\treturn \"\", nil\n\t\t}\n\t\treturn getFieldAsString(field.Elem())\n\tdefault:\n\t\t\/\/ Check if field is go native type\n\t\tswitch field.Interface().(type) {\n\t\tcase string:\n\t\t\treturn field.String(), nil\n\t\tcase bool:\n\t\t\tstr, err = toString(field.Bool())\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tcase int, int8, int16, int32, int64:\n\t\t\tstr, err = toString(field.Int())\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tcase uint, uint8, uint16, uint32, uint64:\n\t\t\tstr, err = toString(field.Uint())\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tcase float32:\n\t\t\tstr, err = toString(float32(field.Float()))\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tcase float64:\n\t\t\tstr, err = toString(field.Float())\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tdefault:\n\t\t\t\/\/ Not a native type, check for marshal method\n\t\t\tstr, err = marshall(field)\n\t\t\tif err != nil {\n\t\t\t\tif _, ok := err.(NoMarshalFuncError); !ok {\n\t\t\t\t\treturn str, err\n\t\t\t\t}\n\t\t\t\t\/\/ If not marshal method, is field compatible with\/renamed from native type\n\t\t\t\tswitch field.Kind() {\n\t\t\t\tcase reflect.String:\n\t\t\t\t\treturn field.String(), nil\n\t\t\t\tcase reflect.Bool:\n\t\t\t\t\tstr, err = toString(field.Bool())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\t\tstr, err = toString(field.Int())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\t\t\tstr, err = toString(field.Uint())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\tcase reflect.Float32:\n\t\t\t\t\tstr, err = toString(float32(field.Float()))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\tcase reflect.Float64:\n\t\t\t\t\tstr, err = toString(field.Float())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn str, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn str, nil\n}\n\n\/\/ --------------------------------------------------------------------------\n\/\/ Un\/serializations helpers\n\nfunc canMarshal(t reflect.Type) bool {\n\t\/\/ unless it implements marshalText or marshalCSV. Structs that implement this\n\t\/\/ should result in one value and not have their fields exposed\n\t_, canMarshalText := t.MethodByName(\"MarshalText\")\n\t_, canMarshalCSV := t.MethodByName(\"MarshalCSV\")\n\treturn canMarshalCSV || canMarshalText\n}\n\nfunc unmarshall(field reflect.Value, value string) error {\n\tdupField := field\n\tunMarshallIt := func(finalField reflect.Value) error {\n\t\tif finalField.CanInterface() {\n\t\t\tfieldIface := finalField.Interface()\n\n\t\t\tfieldTypeUnmarshaller, ok := fieldIface.(TypeUnmarshaller)\n\t\t\tif ok {\n\t\t\t\treturn fieldTypeUnmarshaller.UnmarshalCSV(value)\n\t\t\t}\n\n\t\t\t\/\/ Otherwise try to use TextUnmarshaler\n\t\t\tfieldTextUnmarshaler, ok := fieldIface.(encoding.TextUnmarshaler)\n\t\t\tif ok {\n\t\t\t\treturn fieldTextUnmarshaler.UnmarshalText([]byte(value))\n\t\t\t}\n\t\t}\n\n\t\treturn NoUnmarshalFuncError{\"No known conversion from string to \" + field.Type().String() + \", \" + field.Type().String() + \" does not implement TypeUnmarshaller\"}\n\t}\n\tfor dupField.Kind() == reflect.Interface || dupField.Kind() == reflect.Ptr {\n\t\tif dupField.IsNil() {\n\t\t\tdupField = reflect.New(field.Type().Elem())\n\t\t\tfield.Set(dupField)\n\t\t\treturn unMarshallIt(dupField)\n\t\t}\n\t\tdupField = dupField.Elem()\n\t}\n\tif dupField.CanAddr() {\n\t\treturn unMarshallIt(dupField.Addr())\n\t}\n\treturn NoUnmarshalFuncError{\"No known conversion from string to \" + field.Type().String() + \", \" + field.Type().String() + \" does not implement TypeUnmarshaller\"}\n}\n\nfunc marshall(field reflect.Value) (value string, err error) {\n\tdupField := field\n\tmarshallIt := func(finalField reflect.Value) (string, error) {\n\t\tif finalField.CanInterface() {\n\t\t\tfieldIface := finalField.Interface()\n\n\t\t\t\/\/ Use TypeMarshaller when possible\n\t\t\tfieldTypeMarhaller, ok := fieldIface.(TypeMarshaller)\n\t\t\tif ok {\n\t\t\t\treturn fieldTypeMarhaller.MarshalCSV()\n\t\t\t}\n\n\t\t\t\/\/ Otherwise try to use TextMarshaller\n\t\t\tfieldTextMarshaler, ok := fieldIface.(encoding.TextMarshaler)\n\t\t\tif ok {\n\t\t\t\ttext, err := fieldTextMarshaler.MarshalText()\n\t\t\t\treturn string(text), err\n\t\t\t}\n\n\t\t\t\/\/ Otherwise try to use Stringer\n\t\t\tfieldStringer, ok := fieldIface.(fmt.Stringer)\n\t\t\tif ok {\n\t\t\t\treturn fieldStringer.String(), nil\n\t\t\t}\n\t\t}\n\n\t\treturn value, NoMarshalFuncError{field.Type()}\n\t}\n\tfor dupField.Kind() == reflect.Interface || dupField.Kind() == reflect.Ptr {\n\t\tif dupField.IsNil() {\n\t\t\treturn value, nil\n\t\t}\n\t\tdupField = dupField.Elem()\n\t}\n\tif dupField.CanAddr() {\n\t\tdupField = dupField.Addr()\n\t}\n\treturn marshallIt(dupField)\n}\n<commit_msg>Fix switch statement fall-through bug.<commit_after>package gocsv\n\nimport (\n\t\"encoding\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"encoding\/json\"\n)\n\n\/\/ --------------------------------------------------------------------------\n\/\/ Conversion interfaces\n\n\/\/ TypeMarshaller is implemented by any value that has a MarshalCSV method\n\/\/ This converter is used to convert the value to it string representation\ntype TypeMarshaller interface {\n\tMarshalCSV() (string, error)\n}\n\n\/\/ TypeUnmarshaller is implemented by any value that has an UnmarshalCSV method\n\/\/ This converter is used to convert a string to your value representation of that string\ntype TypeUnmarshaller interface {\n\tUnmarshalCSV(string) error\n}\n\n\/\/ NoUnmarshalFuncError is the custom error type to be raised in case there is no unmarshal function defined on type\ntype NoUnmarshalFuncError struct {\n\tmsg string\n}\n\nfunc (e NoUnmarshalFuncError) Error() string {\n\treturn e.msg\n}\n\n\/\/ NoMarshalFuncError is the custom error type to be raised in case there is no marshal function defined on type\ntype NoMarshalFuncError struct {\n\tty reflect.Type\n}\n\nfunc (e NoMarshalFuncError) Error() string {\n\treturn \"No known conversion from \" + e.ty.String() + \" to string, \" + e.ty.String() + \" does not implement TypeMarshaller nor Stringer\"\n}\n\n\/\/ --------------------------------------------------------------------------\n\/\/ Conversion helpers\n\nfunc toString(in interface{}) (string, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\treturn inValue.String(), nil\n\tcase reflect.Bool:\n\t\tb := inValue.Bool()\n\t\tif b {\n\t\t\treturn \"true\", nil\n\t\t}\n\t\treturn \"false\", nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn fmt.Sprintf(\"%v\", inValue.Int()), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn fmt.Sprintf(\"%v\", inValue.Uint()), nil\n\tcase reflect.Float32:\n\t\treturn strconv.FormatFloat(inValue.Float(), byte('f'), -1, 32), nil\n\tcase reflect.Float64:\n\t\treturn strconv.FormatFloat(inValue.Float(), byte('f'), -1, 64), nil\n\t}\n\treturn \"\", fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to string\")\n}\n\nfunc toBool(in interface{}) (bool, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\ts := inValue.String()\n\t\ts = strings.TrimSpace(s)\n\t\tif strings.EqualFold(s, \"yes\") {\n\t\t\treturn true, nil\n\t\t} else if strings.EqualFold(s, \"no\") || s == \"\" {\n\t\t\treturn false, nil\n\t\t} else {\n\t\t\treturn strconv.ParseBool(s)\n\t\t}\n\tcase reflect.Bool:\n\t\treturn inValue.Bool(), nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\ti := inValue.Int()\n\t\tif i != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\ti := inValue.Uint()\n\t\tif i != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\tcase reflect.Float32, reflect.Float64:\n\t\tf := inValue.Float()\n\t\tif f != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t}\n\treturn false, fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to bool\")\n}\n\nfunc toInt(in interface{}) (int64, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\ts := strings.TrimSpace(inValue.String())\n\t\tif s == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn strconv.ParseInt(s, 0, 64)\n\tcase reflect.Bool:\n\t\tif inValue.Bool() {\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn inValue.Int(), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn int64(inValue.Uint()), nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn int64(inValue.Float()), nil\n\t}\n\treturn 0, fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to int\")\n}\n\nfunc toUint(in interface{}) (uint64, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\ts := strings.TrimSpace(inValue.String())\n\t\tif s == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\n\t\t\/\/ support the float input\n\t\tif strings.Contains(s, \".\") {\n\t\t\tf, err := strconv.ParseFloat(s, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\treturn uint64(f), nil\n\t\t}\n\t\treturn strconv.ParseUint(s, 0, 64)\n\tcase reflect.Bool:\n\t\tif inValue.Bool() {\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn uint64(inValue.Int()), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn inValue.Uint(), nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn uint64(inValue.Float()), nil\n\t}\n\treturn 0, fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to uint\")\n}\n\nfunc toFloat(in interface{}) (float64, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\ts := strings.TrimSpace(inValue.String())\n\t\tif s == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn strconv.ParseFloat(s, 64)\n\tcase reflect.Bool:\n\t\tif inValue.Bool() {\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn float64(inValue.Int()), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn float64(inValue.Uint()), nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn inValue.Float(), nil\n\t}\n\treturn 0, fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to float\")\n}\n\nfunc setField(field reflect.Value, value string, omitEmpty bool) error {\n\tif field.Kind() == reflect.Ptr {\n\t\tif omitEmpty && value == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tif field.IsNil() {\n\t\t\tfield.Set(reflect.New(field.Type().Elem()))\n\t\t}\n\t\tfield = field.Elem()\n\t}\n\n\tswitch field.Interface().(type) {\n\tcase string:\n\t\ts, err := toString(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetString(s)\n\tcase bool:\n\t\tb, err := toBool(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetBool(b)\n\tcase int, int8, int16, int32, int64:\n\t\ti, err := toInt(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetInt(i)\n\tcase uint, uint8, uint16, uint32, uint64:\n\t\tui, err := toUint(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetUint(ui)\n\tcase float32, float64:\n\t\tf, err := toFloat(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetFloat(f)\n\tdefault:\n\t\t\/\/ Not a native type, check for unmarshal method\n\t\tif err := unmarshall(field, value); err != nil {\n\t\t\tif _, ok := err.(NoUnmarshalFuncError); !ok {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t\/\/ Could not unmarshal, check for kind, e.g. renamed type from basic type\n\t\t\tswitch field.Kind() {\n\t\t\tcase reflect.String:\n\t\t\t\ts, err := toString(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetString(s)\n\t\t\tcase reflect.Bool:\n\t\t\t\tb, err := toBool(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetBool(b)\n\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\ti, err := toInt(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetInt(i)\n\t\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\t\tui, err := toUint(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetUint(ui)\n\t\t\tcase reflect.Float32, reflect.Float64:\n\t\t\t\tf, err := toFloat(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetFloat(f)\n\t\t\tcase reflect.Slice, reflect.Struct:\n\t\t\t\terr := json.Unmarshal([]byte(value), field.Addr().Interface())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getFieldAsString(field reflect.Value) (str string, err error) {\n\tswitch field.Kind() {\n\tcase reflect.Interface, reflect.Ptr:\n\t\tif field.IsNil() {\n\t\t\treturn \"\", nil\n\t\t}\n\t\treturn getFieldAsString(field.Elem())\n\tdefault:\n\t\t\/\/ Check if field is go native type\n\t\tswitch field.Interface().(type) {\n\t\tcase string:\n\t\t\treturn field.String(), nil\n\t\tcase bool:\n\t\t\tstr, err = toString(field.Bool())\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tcase int, int8, int16, int32, int64:\n\t\t\tstr, err = toString(field.Int())\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tcase uint, uint8, uint16, uint32, uint64:\n\t\t\tstr, err = toString(field.Uint())\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tcase float32:\n\t\t\tstr, err = toString(float32(field.Float()))\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tcase float64:\n\t\t\tstr, err = toString(field.Float())\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tdefault:\n\t\t\t\/\/ Not a native type, check for marshal method\n\t\t\tstr, err = marshall(field)\n\t\t\tif err != nil {\n\t\t\t\tif _, ok := err.(NoMarshalFuncError); !ok {\n\t\t\t\t\treturn str, err\n\t\t\t\t}\n\t\t\t\t\/\/ If not marshal method, is field compatible with\/renamed from native type\n\t\t\t\tswitch field.Kind() {\n\t\t\t\tcase reflect.String:\n\t\t\t\t\treturn field.String(), nil\n\t\t\t\tcase reflect.Bool:\n\t\t\t\t\tstr, err = toString(field.Bool())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\t\tstr, err = toString(field.Int())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\t\t\tstr, err = toString(field.Uint())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\tcase reflect.Float32:\n\t\t\t\t\tstr, err = toString(float32(field.Float()))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\tcase reflect.Float64:\n\t\t\t\t\tstr, err = toString(field.Float())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn str, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn str, nil\n}\n\n\/\/ --------------------------------------------------------------------------\n\/\/ Un\/serializations helpers\n\nfunc canMarshal(t reflect.Type) bool {\n\t\/\/ unless it implements marshalText or marshalCSV. Structs that implement this\n\t\/\/ should result in one value and not have their fields exposed\n\t_, canMarshalText := t.MethodByName(\"MarshalText\")\n\t_, canMarshalCSV := t.MethodByName(\"MarshalCSV\")\n\treturn canMarshalCSV || canMarshalText\n}\n\nfunc unmarshall(field reflect.Value, value string) error {\n\tdupField := field\n\tunMarshallIt := func(finalField reflect.Value) error {\n\t\tif finalField.CanInterface() {\n\t\t\tfieldIface := finalField.Interface()\n\n\t\t\tfieldTypeUnmarshaller, ok := fieldIface.(TypeUnmarshaller)\n\t\t\tif ok {\n\t\t\t\treturn fieldTypeUnmarshaller.UnmarshalCSV(value)\n\t\t\t}\n\n\t\t\t\/\/ Otherwise try to use TextUnmarshaler\n\t\t\tfieldTextUnmarshaler, ok := fieldIface.(encoding.TextUnmarshaler)\n\t\t\tif ok {\n\t\t\t\treturn fieldTextUnmarshaler.UnmarshalText([]byte(value))\n\t\t\t}\n\t\t}\n\n\t\treturn NoUnmarshalFuncError{\"No known conversion from string to \" + field.Type().String() + \", \" + field.Type().String() + \" does not implement TypeUnmarshaller\"}\n\t}\n\tfor dupField.Kind() == reflect.Interface || dupField.Kind() == reflect.Ptr {\n\t\tif dupField.IsNil() {\n\t\t\tdupField = reflect.New(field.Type().Elem())\n\t\t\tfield.Set(dupField)\n\t\t\treturn unMarshallIt(dupField)\n\t\t}\n\t\tdupField = dupField.Elem()\n\t}\n\tif dupField.CanAddr() {\n\t\treturn unMarshallIt(dupField.Addr())\n\t}\n\treturn NoUnmarshalFuncError{\"No known conversion from string to \" + field.Type().String() + \", \" + field.Type().String() + \" does not implement TypeUnmarshaller\"}\n}\n\nfunc marshall(field reflect.Value) (value string, err error) {\n\tdupField := field\n\tmarshallIt := func(finalField reflect.Value) (string, error) {\n\t\tif finalField.CanInterface() {\n\t\t\tfieldIface := finalField.Interface()\n\n\t\t\t\/\/ Use TypeMarshaller when possible\n\t\t\tfieldTypeMarhaller, ok := fieldIface.(TypeMarshaller)\n\t\t\tif ok {\n\t\t\t\treturn fieldTypeMarhaller.MarshalCSV()\n\t\t\t}\n\n\t\t\t\/\/ Otherwise try to use TextMarshaller\n\t\t\tfieldTextMarshaler, ok := fieldIface.(encoding.TextMarshaler)\n\t\t\tif ok {\n\t\t\t\ttext, err := fieldTextMarshaler.MarshalText()\n\t\t\t\treturn string(text), err\n\t\t\t}\n\n\t\t\t\/\/ Otherwise try to use Stringer\n\t\t\tfieldStringer, ok := fieldIface.(fmt.Stringer)\n\t\t\tif ok {\n\t\t\t\treturn fieldStringer.String(), nil\n\t\t\t}\n\t\t}\n\n\t\treturn value, NoMarshalFuncError{field.Type()}\n\t}\n\tfor dupField.Kind() == reflect.Interface || dupField.Kind() == reflect.Ptr {\n\t\tif dupField.IsNil() {\n\t\t\treturn value, nil\n\t\t}\n\t\tdupField = dupField.Elem()\n\t}\n\tif dupField.CanAddr() {\n\t\tdupField = dupField.Addr()\n\t}\n\treturn marshallIt(dupField)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Discordgo - Discord bindings for Go\n\/\/ Available at https:\/\/github.com\/bwmarrin\/discordgo\n\n\/\/ Copyright 2015-2016 Bruce Marriner <bruce@sqls.net>. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file contains custom types, currently only a timestamp wrapper.\n\npackage discordgo\n\nimport (\n\t\"time\"\n)\n\ntype Timestamp string\n\nfunc (t Timestamp) Parse() (time.Time, error) {\n\treturn time.Parse(\"2006-01-02T15:04:05.000000-07:00\", string(t))\n}\n<commit_msg>Comments<commit_after>\/\/ Discordgo - Discord bindings for Go\n\/\/ Available at https:\/\/github.com\/bwmarrin\/discordgo\n\n\/\/ Copyright 2015-2016 Bruce Marriner <bruce@sqls.net>. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file contains custom types, currently only a timestamp wrapper.\n\npackage discordgo\n\nimport (\n\t\"time\"\n)\n\n\/\/ A timestamp, in the format YYYY-MM-DDTHH:MM:SS.MSMSMS-TZ:TZ.\ntype Timestamp string\n\n\/\/ Parse parses a timestamp string into a time.Time object.\n\/\/ The only time this can fail is if you're parsing an invalid timestamp.\nfunc (t Timestamp) Parse() (time.Time, error) {\n\treturn time.Parse(\"2006-01-02T15:04:05.000000-07:00\", string(t))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Miek Gieben. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage pkcs11\n\n\/*\n#define CK_PTR *\n#ifndef NULL_PTR\n#define NULL_PTR 0\n#endif\n#define CK_DEFINE_FUNCTION(returnType, name) returnType name\n#define CK_DECLARE_FUNCTION(returnType, name) returnType name\n#define CK_DECLARE_FUNCTION_POINTER(returnType, name) returnType (* name)\n#define CK_CALLBACK_FUNCTION(returnType, name) returnType (* name)\n\n#include <stdlib.h>\n#include \"pkcs11.h\"\n\nCK_ULONG Index(CK_ULONG_PTR array, CK_ULONG i)\n{\n\treturn array[i];\n}\n\nCK_ULONG Sizeof()\n{\n\treturn sizeof(CK_ULONG);\n}\n*\/\nimport \"C\"\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\t\"unsafe\"\n)\n\n\/\/ toList converts from a C style array to a []uint.\nfunc toList(clist C.CK_ULONG_PTR, size C.CK_ULONG) []uint {\n\tl := make([]uint, int(size))\n\tfor i := 0; i < len(l); i++ {\n\t\tl[i] = uint(C.Index(clist, C.CK_ULONG(i)))\n\t}\n\tdefer C.free(unsafe.Pointer(clist))\n\treturn l\n}\n\n\/\/ cBBool converts a bool to a CK_BBOOL.\nfunc cBBool(x bool) C.CK_BBOOL {\n\tif x {\n\t\treturn C.CK_BBOOL(C.CK_TRUE)\n\t}\n\treturn C.CK_BBOOL(C.CK_FALSE)\n}\n\n\/\/ Error represents an PKCS#11 error.\ntype Error uint\n\nfunc (e Error) Error() string {\n\treturn \"pkcs11: \" + fmt.Sprintf(\"pkcs11: 0x%X: %s\", uint(e), strerror[uint(e)])\n}\n\nfunc toError(e C.CK_RV) error {\n\tif e == C.CKR_OK {\n\t\treturn nil\n\t}\n\treturn Error(e)\n}\n\n\/* SessionHandle is a Cryptoki-assigned value that identifies a session. *\/\ntype SessionHandle uint\n\n\/* ObjectHandle is a token-specific identifier for an object. *\/\ntype ObjectHandle uint\n\n\/\/ Version represents any version information from the library.\ntype Version struct {\n\tMajor byte\n\tMinor byte\n}\n\nfunc toVersion(version C.CK_VERSION) Version {\n\treturn Version{byte(version.major), byte(version.minor)}\n}\n\n\/\/ SlotEvent holds the SlotID which for which an slot event (token insertion,\n\/\/ removal, etc.) occurred.\ntype SlotEvent struct {\n\tSlotID uint\n}\n\n\/\/ Info provides information about the library and hardware used.\ntype Info struct {\n\tCryptokiVersion Version\n\tManufacturerID string\n\tFlags uint\n\tLibraryDescription string\n\tLibraryVersion Version\n}\n\n\/* SlotInfo provides information about a slot. *\/\ntype SlotInfo struct {\n\tSlotDescription string \/\/ 64 bytes.\n\tManufacturerID string \/\/ 32 bytes.\n\tFlags uint\n\tHardwareVersion Version\n\tFirmwareVersion Version\n}\n\n\/* TokenInfo provides information about a token. *\/\ntype TokenInfo struct {\n\tLabel string\n\tManufacturerID string\n\tModel string\n\tSerialNumber string\n\tFlags uint\n\tMaxSessionCount uint\n\tSessionCount uint\n\tMaxRwSessionCount uint\n\tRwSessionCount uint\n\tMaxPinLen uint\n\tMinPinLen uint\n\tTotalPublicMemory uint\n\tFreePublicMemory uint\n\tTotalPrivateMemory uint\n\tFreePrivateMemory uint\n\tHardwareVersion Version\n\tFirmwareVersion Version\n\tUTCTime string\n}\n\n\/* SesionInfo provides information about a session. *\/\ntype SessionInfo struct {\n\tSlotID uint\n\tState uint\n\tFlags uint\n\tDeviceError uint\n}\n\n\/\/ Attribute holds an attribute type\/value combination.\ntype Attribute struct {\n\tType uint\n\tValue []byte\n}\n\nfunc NewAttribute(typ uint, x interface{}) *Attribute {\n\ta := new(Attribute)\n\ta.Type = typ\n\tif x == nil {\n\t\treturn a\n\t}\n\tswitch x.(type) {\n\tcase bool: \/\/ create bbool\n\t\tif x.(bool) {\n\t\t\ta.Value = []byte{1}\n\t\t\tbreak\n\t\t}\n\t\ta.Value = []byte{0}\n\tcase uint, int:\n\t\tvar y uint\n\t\tif _, ok := x.(int); ok {\n\t\t\ty = uint(x.(int))\n\t\t}\n\t\tif _, ok := x.(uint); ok {\n\t\t\ty = x.(uint)\n\t\t}\n\t\tswitch int(C.Sizeof()) {\n\t\tcase 4:\n\t\t\ta.Value = make([]byte, 4)\n\t\t\ta.Value[0] = byte(y)\n\t\t\ta.Value[1] = byte(y >> 8)\n\t\t\ta.Value[2] = byte(y >> 16)\n\t\t\ta.Value[3] = byte(y >> 24)\n\t\tcase 8:\n\t\t\ta.Value = make([]byte, 8)\n\t\t\ta.Value[0] = byte(y)\n\t\t\ta.Value[1] = byte(y >> 8)\n\t\t\ta.Value[2] = byte(y >> 16)\n\t\t\ta.Value[3] = byte(y >> 24)\n\t\t\ta.Value[4] = byte(y >> 32)\n\t\t\ta.Value[5] = byte(y >> 40)\n\t\t\ta.Value[6] = byte(y >> 48)\n\t\t\ta.Value[7] = byte(y >> 56)\n\t\t}\n\tcase string:\n\t\ta.Value = []byte(x.(string))\n\tcase []byte: \/\/ just copy\n\t\ta.Value = x.([]byte)\n\tcase time.Time: \/\/ for CKA_DATE\n\t\ta.Value = cDate(x.(time.Time))\n\tdefault:\n\t\tpanic(\"pkcs11: unhandled attribute type\")\n\t}\n\treturn a\n}\n\n\/\/ cAttribute returns the start address and the length of an attribute list.\nfunc cAttributeList(a []*Attribute) (C.CK_ATTRIBUTE_PTR, C.CK_ULONG) {\n\tif len(a) == 0 {\n\t\treturn nil, 0\n\t}\n\tpa := make([]C.CK_ATTRIBUTE, len(a))\n\tfor i := 0; i < len(a); i++ {\n\t\tpa[i]._type = C.CK_ATTRIBUTE_TYPE(a[i].Type)\n\t\tif a[i].Value == nil {\n\t\t\tcontinue\n\t\t}\n\t\tpa[i].pValue = C.CK_VOID_PTR((&a[i].Value[0]))\n\t\tpa[i].ulValueLen = C.CK_ULONG(len(a[i].Value))\n\t}\n\treturn C.CK_ATTRIBUTE_PTR(&pa[0]), C.CK_ULONG(len(a))\n}\n\nfunc cDate(t time.Time) []byte {\n\tb := make([]byte, 8)\n\tyear, month, day := t.Date()\n\ty := fmt.Sprintf(\"%4d\", year)\n\tm := fmt.Sprintf(\"%02d\", month)\n\td1 := fmt.Sprintf(\"%02d\", day)\n\tb[0], b[1], b[2], b[3] = y[0], y[1], y[2], y[3]\n\tb[4], b[5] = m[0], m[1]\n\tb[6], b[7] = d1[0], d1[1]\n\treturn b\n}\n\n\/\/ Mechanism holds an mechanism type\/value combination.\ntype Mechanism struct {\n\tMechanism uint\n\tParameter []byte\n}\n\nfunc NewMechanism(mech uint, x interface{}) *Mechanism {\n\tm := new(Mechanism)\n\tm.Mechanism = mech\n\tif x == nil {\n\t\treturn m\n\t}\n\t\/\/ TODO(miek): Not seen anything as elaborate as Attributes, so for know do nothing.\n\treturn m\n}\n\nfunc cMechanismList(m []*Mechanism) (C.CK_MECHANISM_PTR, C.CK_ULONG) {\n\tif len(m) == 0 {\n\t\treturn nil, 0\n\t}\n\tpm := make([]C.CK_MECHANISM, len(m))\n\tfor i := 0; i < len(m); i++ {\n\t\tpm[i].mechanism = C.CK_MECHANISM_TYPE(m[i].Mechanism)\n\t\tif m[i].Parameter == nil {\n\t\t\tcontinue\n\t\t}\n\t\tpm[i].pParameter = C.CK_VOID_PTR(&(m[i].Parameter[0]))\n\t\tpm[i].ulParameterLen = C.CK_ULONG(len(m[i].Parameter))\n\t}\n\treturn C.CK_MECHANISM_PTR(&pm[0]), C.CK_ULONG(len(m))\n}\n\n\/\/ MechanismInfo provides information about a particular mechanism.\ntype MechanismInfo struct {\n\tMinKeySize uint\n\tMaxKeySize uint\n\tFlags uint\n}\n<commit_msg>doc update<commit_after>\/\/ Copyright 2013 Miek Gieben. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage pkcs11\n\n\/*\n#define CK_PTR *\n#ifndef NULL_PTR\n#define NULL_PTR 0\n#endif\n#define CK_DEFINE_FUNCTION(returnType, name) returnType name\n#define CK_DECLARE_FUNCTION(returnType, name) returnType name\n#define CK_DECLARE_FUNCTION_POINTER(returnType, name) returnType (* name)\n#define CK_CALLBACK_FUNCTION(returnType, name) returnType (* name)\n\n#include <stdlib.h>\n#include \"pkcs11.h\"\n\nCK_ULONG Index(CK_ULONG_PTR array, CK_ULONG i)\n{\n\treturn array[i];\n}\n\nCK_ULONG Sizeof()\n{\n\treturn sizeof(CK_ULONG);\n}\n*\/\nimport \"C\"\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\t\"unsafe\"\n)\n\n\/\/ toList converts from a C style array to a []uint.\nfunc toList(clist C.CK_ULONG_PTR, size C.CK_ULONG) []uint {\n\tl := make([]uint, int(size))\n\tfor i := 0; i < len(l); i++ {\n\t\tl[i] = uint(C.Index(clist, C.CK_ULONG(i)))\n\t}\n\tdefer C.free(unsafe.Pointer(clist))\n\treturn l\n}\n\n\/\/ cBBool converts a bool to a CK_BBOOL.\nfunc cBBool(x bool) C.CK_BBOOL {\n\tif x {\n\t\treturn C.CK_BBOOL(C.CK_TRUE)\n\t}\n\treturn C.CK_BBOOL(C.CK_FALSE)\n}\n\n\/\/ Error represents an PKCS#11 error.\ntype Error uint\n\nfunc (e Error) Error() string {\n\treturn \"pkcs11: \" + fmt.Sprintf(\"pkcs11: 0x%X: %s\", uint(e), strerror[uint(e)])\n}\n\nfunc toError(e C.CK_RV) error {\n\tif e == C.CKR_OK {\n\t\treturn nil\n\t}\n\treturn Error(e)\n}\n\n\/* SessionHandle is a Cryptoki-assigned value that identifies a session. *\/\ntype SessionHandle uint\n\n\/* ObjectHandle is a token-specific identifier for an object. *\/\ntype ObjectHandle uint\n\n\/\/ Version represents any version information from the library.\ntype Version struct {\n\tMajor byte\n\tMinor byte\n}\n\nfunc toVersion(version C.CK_VERSION) Version {\n\treturn Version{byte(version.major), byte(version.minor)}\n}\n\n\/\/ SlotEvent holds the SlotID which for which an slot event (token insertion,\n\/\/ removal, etc.) occurred.\ntype SlotEvent struct {\n\tSlotID uint\n}\n\n\/\/ Info provides information about the library and hardware used.\ntype Info struct {\n\tCryptokiVersion Version\n\tManufacturerID string\n\tFlags uint\n\tLibraryDescription string\n\tLibraryVersion Version\n}\n\n\/* SlotInfo provides information about a slot. *\/\ntype SlotInfo struct {\n\tSlotDescription string \/\/ 64 bytes.\n\tManufacturerID string \/\/ 32 bytes.\n\tFlags uint\n\tHardwareVersion Version\n\tFirmwareVersion Version\n}\n\n\/* TokenInfo provides information about a token. *\/\ntype TokenInfo struct {\n\tLabel string\n\tManufacturerID string\n\tModel string\n\tSerialNumber string\n\tFlags uint\n\tMaxSessionCount uint\n\tSessionCount uint\n\tMaxRwSessionCount uint\n\tRwSessionCount uint\n\tMaxPinLen uint\n\tMinPinLen uint\n\tTotalPublicMemory uint\n\tFreePublicMemory uint\n\tTotalPrivateMemory uint\n\tFreePrivateMemory uint\n\tHardwareVersion Version\n\tFirmwareVersion Version\n\tUTCTime string\n}\n\n\/* SesionInfo provides information about a session. *\/\ntype SessionInfo struct {\n\tSlotID uint\n\tState uint\n\tFlags uint\n\tDeviceError uint\n}\n\n\/\/ Attribute holds an attribute type\/value combination.\ntype Attribute struct {\n\tType uint\n\tValue []byte\n}\n\n\/\/ NewAttribute allocates a Attribute and returns a pointer to it.\n\/\/ Note that this is merely a convience function, as values returned \n\/\/ from the HSM are not converted back to Go values. There are raw\n\/\/ byte slices.\nfunc NewAttribute(typ uint, x interface{}) *Attribute {\n\ta := new(Attribute)\n\ta.Type = typ\n\tif x == nil {\n\t\treturn a\n\t}\n\tswitch x.(type) {\n\tcase bool: \/\/ create bbool\n\t\tif x.(bool) {\n\t\t\ta.Value = []byte{1}\n\t\t\tbreak\n\t\t}\n\t\ta.Value = []byte{0}\n\tcase uint, int:\n\t\tvar y uint\n\t\tif _, ok := x.(int); ok {\n\t\t\ty = uint(x.(int))\n\t\t}\n\t\tif _, ok := x.(uint); ok {\n\t\t\ty = x.(uint)\n\t\t}\n\t\tswitch int(C.Sizeof()) {\n\t\tcase 4:\n\t\t\ta.Value = make([]byte, 4)\n\t\t\ta.Value[0] = byte(y)\n\t\t\ta.Value[1] = byte(y >> 8)\n\t\t\ta.Value[2] = byte(y >> 16)\n\t\t\ta.Value[3] = byte(y >> 24)\n\t\tcase 8:\n\t\t\ta.Value = make([]byte, 8)\n\t\t\ta.Value[0] = byte(y)\n\t\t\ta.Value[1] = byte(y >> 8)\n\t\t\ta.Value[2] = byte(y >> 16)\n\t\t\ta.Value[3] = byte(y >> 24)\n\t\t\ta.Value[4] = byte(y >> 32)\n\t\t\ta.Value[5] = byte(y >> 40)\n\t\t\ta.Value[6] = byte(y >> 48)\n\t\t\ta.Value[7] = byte(y >> 56)\n\t\t}\n\tcase string:\n\t\ta.Value = []byte(x.(string))\n\tcase []byte: \/\/ just copy\n\t\ta.Value = x.([]byte)\n\tcase time.Time: \/\/ for CKA_DATE\n\t\ta.Value = cDate(x.(time.Time))\n\tdefault:\n\t\tpanic(\"pkcs11: unhandled attribute type\")\n\t}\n\treturn a\n}\n\n\/\/ cAttribute returns the start address and the length of an attribute list.\nfunc cAttributeList(a []*Attribute) (C.CK_ATTRIBUTE_PTR, C.CK_ULONG) {\n\tif len(a) == 0 {\n\t\treturn nil, 0\n\t}\n\tpa := make([]C.CK_ATTRIBUTE, len(a))\n\tfor i := 0; i < len(a); i++ {\n\t\tpa[i]._type = C.CK_ATTRIBUTE_TYPE(a[i].Type)\n\t\tif a[i].Value == nil {\n\t\t\tcontinue\n\t\t}\n\t\tpa[i].pValue = C.CK_VOID_PTR((&a[i].Value[0]))\n\t\tpa[i].ulValueLen = C.CK_ULONG(len(a[i].Value))\n\t}\n\treturn C.CK_ATTRIBUTE_PTR(&pa[0]), C.CK_ULONG(len(a))\n}\n\nfunc cDate(t time.Time) []byte {\n\tb := make([]byte, 8)\n\tyear, month, day := t.Date()\n\ty := fmt.Sprintf(\"%4d\", year)\n\tm := fmt.Sprintf(\"%02d\", month)\n\td1 := fmt.Sprintf(\"%02d\", day)\n\tb[0], b[1], b[2], b[3] = y[0], y[1], y[2], y[3]\n\tb[4], b[5] = m[0], m[1]\n\tb[6], b[7] = d1[0], d1[1]\n\treturn b\n}\n\n\/\/ Mechanism holds an mechanism type\/value combination.\ntype Mechanism struct {\n\tMechanism uint\n\tParameter []byte\n}\n\nfunc NewMechanism(mech uint, x interface{}) *Mechanism {\n\tm := new(Mechanism)\n\tm.Mechanism = mech\n\tif x == nil {\n\t\treturn m\n\t}\n\t\/\/ TODO(miek): Not seen anything as elaborate as Attributes, so for know do nothing.\n\treturn m\n}\n\nfunc cMechanismList(m []*Mechanism) (C.CK_MECHANISM_PTR, C.CK_ULONG) {\n\tif len(m) == 0 {\n\t\treturn nil, 0\n\t}\n\tpm := make([]C.CK_MECHANISM, len(m))\n\tfor i := 0; i < len(m); i++ {\n\t\tpm[i].mechanism = C.CK_MECHANISM_TYPE(m[i].Mechanism)\n\t\tif m[i].Parameter == nil {\n\t\t\tcontinue\n\t\t}\n\t\tpm[i].pParameter = C.CK_VOID_PTR(&(m[i].Parameter[0]))\n\t\tpm[i].ulParameterLen = C.CK_ULONG(len(m[i].Parameter))\n\t}\n\treturn C.CK_MECHANISM_PTR(&pm[0]), C.CK_ULONG(len(m))\n}\n\n\/\/ MechanismInfo provides information about a particular mechanism.\ntype MechanismInfo struct {\n\tMinKeySize uint\n\tMaxKeySize uint\n\tFlags uint\n}\n<|endoftext|>"} {"text":"<commit_before>package blockcy\n\nimport \"time\"\n\n\/\/Blockchain represents information about\n\/\/the state of a blockchain.\ntype Blockchain struct {\n\tName string `json:\"name\"`\n\tHeight int `json:\"height\"`\n\tHash string `json:\"hash\"`\n\tTime time.Time `json:\"time\"`\n\tLatestURL string `json:\"latest_url\"`\n\tPrevHash string `json:\"previous_hash\"`\n\tPrevURL string `json:\"previous_url\"`\n\tPeerCount int `json:\"peer_count\"`\n\tHighFee int `json:\"high_fee_per_kb\"`\n\tMediumFee int `json:\"medium_fee_per_kb\"`\n\tLowFee int `json:\"low_fee_per_kb\"`\n\tUnconfirmedCount int `json:\"unconfirmed_count\"`\n\tLastForkHeight int `json:\"last_fork_height\"`\n\tLastForkHash string `json:\"last_fork_hash\"`\n}\n\n\/\/Block represents information about the state\n\/\/of a given block in a blockchain.\ntype Block struct {\n\tHash string `json:\"hash\"`\n\tHeight int `json:\"height\"`\n\tDepth int `json:\"depth\"`\n\tChain string `json:\"chain\"`\n\tTotal int `json:\"total\"`\n\tFees int `json:\"fees\"`\n\tVer int `json:\"ver\"`\n\tTime time.Time `json:\"time\"`\n\tReceivedTime time.Time `json:\"received_time\"`\n\tRelayedBy string `json:\"relayed_by,omitempty\"`\n\tBits int `json:\"bits\"`\n\tNonce int `json:\"nonce\"`\n\tNumTX int `json:\"n_tx\"`\n\tPrevBlock string `json:\"prev_block\"`\n\tPrevBlockURL string `json:\"prev_block_url\"`\n\tMerkleRoot string `json:\"mrkl_root\"`\n\tTXids []string `json:\"txids\"`\n\tNextTXs string `json:\"next_txids\"`\n}\n\n\/\/TX represents information about the state\n\/\/of a given transaction in a blockchain.\ntype TX struct {\n\tBlockHash string `json:\"block_hash,omitempty\"`\n\tBlockHeight int `json:\"block_height,omitempty\"`\n\tHash string `json:\"hash,omitempty\"`\n\tAddresses []string `json:\"addresses,omitempty\"`\n\tTotal int `json:\"total,omitempty\"`\n\tFees int `json:\"fees,omitempty\"`\n\tSize int `json:\"size\"`\n\tPreference string `json:\"preference,omitempty\"`\n\tRelayedBy string `json:\"relayed_by,omitempty\"`\n\tReceived time.Time `json:\"received,omitempty\"`\n\tConfirmed time.Time `json:\"confirmed,omitempty\"`\n\tConfirmations int `json:\"confirmations,omitempty\"`\n\tConfidence float64 `json:\"confidence,omitempty\"`\n\tVer int `json:\"ver,omitempty\"`\n\tLockTime int `json:\"lock_time,omitempty\"`\n\tDoubleSpend bool `json:\"double_spend,omitempty\"`\n\tDoubleOf string `json:\"double_of,omitempty\"`\n\tReceiveCount int `json:\"receive_count,omitempty\"`\n\tVinSize int `json:\"vin_sz,omitempty\"`\n\tVoutSize int `json:\"vout_sz,omitempty\"`\n\tHex string `json:\"hex,omitempty\"`\n\tChangeAddress string `json:\"change_address,omitempty\"`\n\tNextInputs string `json:\"next_inputs,omitempty\"`\n\tNextOutputs string `json:\"next_outputs,omitempty\"`\n\tInputs []TXInput `json:\"inputs\"`\n\tOutputs []TXOutput `json:\"outputs\"`\n}\n\n\/\/TXInput represents the state of a transaction input\ntype TXInput struct {\n\tPrevHash string `json:\"prev_hash,omitempty\"`\n\tOutputIndex int `json:\"output_index,omitempty\"`\n\tOutputValue int `json:\"output_value,omitempty\"`\n\tAddresses []string `json:\"addresses\"`\n\tSequence int `json:\"sequence,omitempty\"`\n\tScriptType string `json:\"script_type,omitempty\"`\n\tScript string `json:\"script,omitempty\"`\n\tAge int `json:\"age,omitempty\"`\n\tWalletName string `json:\"wallet_name,omitempty\"`\n}\n\n\/\/TXOutput represents the state of a transaction output\ntype TXOutput struct {\n\tSpentBy string `json:\"spent_by,omitempty\"`\n\tValue int `json:\"value\"`\n\tAddresses []string `json:\"addresses\"`\n\tScriptType string `json:\"script_type,omitempty\"`\n\tScript string `json:\"script,omitempty\"`\n\tDataHex string `json:\"data_hex,omitempty\"`\n\tDataString string `json:\"data_string,omitempty\"`\n}\n\n\/\/TXConf represents information about the\n\/\/confidence of an unconfirmed transaction.\ntype TXConf struct {\n\tAge int `json:\"age_millis\"`\n\tReceiveCount int `json:\"receive_count,omitempty\"`\n\tConfidence float64 `json:\"confidence\"`\n\tTXHash string `json:\"txhash\"`\n}\n\n\/\/TXRef represents summarized data about a\n\/\/transaction input or output.\ntype TXRef struct {\n\tBlockHeight int `json:\"block_height\"`\n\tTXHash string `json:\"tx_hash\"`\n\tTXInputN int `json:\"tx_input_n\"`\n\tTXOutputN int `json:\"tx_output_n\"`\n\tValue int `json:\"value\"`\n\tPref string `json:\"preference\"`\n\tSpent bool `json:\"spent\"`\n\tDoubleSpend bool `json:\"double_spend\"`\n\tDoubleOf string `json:\"double_of,omitempty\"`\n\tConfirmations int `json:\"confirmations\"`\n\tRefBalance int `json:\"ref_balance,omitempty\"`\n\tConfidence float64 `json:\"confidence,omitempty\"`\n\tConfirmed time.Time `json:\"confirmed,omitempty\"`\n\tSpentBy string `json:\"spent_by,omitempty\"`\n\tReceived time.Time `json:\"received,omitempty\"`\n\tReceivedCount int `json:\"received_count,omitempty\"`\n}\n\n\/\/TXSkel represents the return call to BlockCypher's\n\/\/txs\/new endpoint, and includes error information,\n\/\/hex transactions that need to be signed, and space\n\/\/for the signed transactions and associated public keys.\ntype TXSkel struct {\n\tTrans TX `json:\"tx\"`\n\tToSign []string `json:\"tosign\"`\n\tSignatures []string `json:\"signatures\"`\n\tPubKeys []string `json:\"pubkeys,omitempty\"`\n\tToSignTX []string `json:\"tosign_tx,omitempty\"`\n\tErrors []txSkelErr `json:\"errors,omitempty\"`\n}\n\n\/\/used within for JSON serialization.\ntype txSkelErr struct {\n\tError string `json:\"error,omitempty\"`\n}\n\n\/\/NullData represents the call and return to BlockCypher's\n\/\/Data API, allowing you to embed up to 80 bytes into\n\/\/a blockchain via an OP_RETURN.\ntype NullData struct {\n\tData string `json:\"data\"`\n\tEncoding string `json:\"encoding,omitempty\"`\n\tHash string `json:\"hash,omitempty\"`\n}\n\n\/\/MicroTX represents a microtransaction. For small-value\n\/\/transactions, BlockCypher will sign the transaction\n\/\/on your behalf, with your private key (if provided).\n\/\/Setting a separate change address is recommended.\n\/\/Where your application model allows it, consider\n\/\/only using public keys with microtransactions,\n\/\/and sign the microtransaction with your private key\n\/\/(without sending to BlockCypher's server).\ntype MicroTX struct {\n\t\/\/Only one of Pubkey\/Private\/Wif is required\n\tPubkey string `json:\"from_pubkey,omitempty\"`\n\tPriv string `json:\"from_private,omitempty\"`\n\tWif string `json:\"from_wif,omitempty\"`\n\tToAddr string `json:\"to_address\"`\n\tValue int `json:\"value_satoshis\"`\n\tChangeAddr string `json:\"change_address,omitempty\"`\n\tWait bool `json:\"wait_guarantee,omitempty\"`\n\tToSign []string `json:\"tosign,omitempty\"`\n\tSignatures []string `json:\"signatures,omitempty\"`\n\tHash string `json:\"hash,omitempty\"`\n\tInputs []MicroInput `json:\"inputs,omitempty\"`\n\tOutputs []MicroOutput `json:\"outputs,omitempty\"`\n\tFees int `json:\"fees,omitempty\"`\n}\n\n\/\/MicroInput represents pared down TXInput data\n\/\/within a Microtransaction.\ntype MicroInput struct {\n\tPrevHash string `json:\"prev_hash\"`\n\tOutputIndex int `json:\"output_index\"`\n}\n\n\/\/MicroOutput represents pared down TXOutput data\n\/\/within a Microtransaction.\ntype MicroOutput struct {\n\tValue int `json:\"value\"`\n\tAddress string `json:\"address\"`\n}\n\n\/\/Addr represents information about the state\n\/\/of a public address.\ntype Addr struct {\n\tAddress string `json:\"address\"`\n\tTotalReceived int `json:\"total_received\"`\n\tTotalSent int `json:\"total_sent\"`\n\tBalance int `json:\"balance\"`\n\tUnconfirmedBalance int `json:\"unconfirmed_balance\"`\n\tFinalBalance int `json:\"final_balance\"`\n\tNumTX int `json:\"n_tx\"`\n\tUnconfirmedNumTX int `json:\"unconfirmed_n_tx\"`\n\tFinalNumTX int `json:\"final_n_tx\"`\n\tTXs []TX `json:\"txs,omitempty\"`\n\tTXRefs []TXRef `json:\"txrefs,omitempty\"`\n\tUnconfirmedTXRefs []TXRef `json:\"unconfirmed_txrefs,omitempty\"`\n\tHasMore bool `json:\"hasMore,omitempty\"`\n}\n\n\/\/AddrKeychain represents information about a generated\n\/\/public-private key pair from BlockCypher's address\n\/\/generation API. Large amounts are not recommended to be\n\/\/stored with these addresses.\ntype AddrKeychain struct {\n\tAddress string `json:\"address,omitempty\"`\n\tPrivate string `json:\"private,omitempty\"`\n\tPublic string `json:\"public,omitempty\"`\n\tWif string `json:\"wif,omitempty\"`\n\tPubKeys []string `json:\"pubkeys,omitempty\"`\n\tScriptType string `json:\"script_type,omitempty\"`\n}\n\n\/\/Wallet represents information about a standard wallet.\n\/\/Typically, wallets can be used wherever an address can be\n\/\/used within the API.\ntype Wallet struct {\n\tName string `json:\"name,omitempty\"`\n\tAddresses []string `json:\"addresses,omitempty\"`\n}\n\n\/\/Hook represents a WebHook\/WebSockets event.\n\/\/BlockCypher supports the following events:\n\/\/\tEvent = \"unconfirmed-tx\"\n\/\/\tEvent = \"new-block\"\n\/\/\tEvent = \"confirmed-tx\"\n\/\/\tEvent = \"tx-confirmation\"\n\/\/\tEvent = \"double-spend-tx\"\n\/\/ Event = \"tx-confidence\"\n\/\/Hash, Address, and Script are all optional; creating\n\/\/a WebHook with any of them will filter the resulting\n\/\/notifications, if appropriate. ID is returned by\n\/\/BlockCyphers servers after Posting a new WebHook; you\n\/\/shouldn't manually generate this field.\ntype Hook struct {\n\tID string `json:\"id,omitempty\"`\n\tEvent string `json:\"event\"`\n\tHash string `json:\"hash,omitempty\"`\n\tWalletName string `json:\"wallet_name,omitempty\"`\n\tAddress string `json:\"address,omitempty\"`\n\tConfirmations int `json:\"confirmations,omitempty\"`\n\tConfidence float32 `json:\"confidence,omitempty\"`\n\tScript string `json:\"script,omitempty\"`\n\tURL string `json:\"url,omitempty\"`\n\tCallbackErrs int `json:\"callback_errors,omitempty\"`\n}\n\n\/\/PaymentFwd represents a reference to\n\/\/a Payment Forwarding request.\ntype PaymentFwd struct {\n\tID string `json:\"id,omitempty\"`\n\tDestination string `json:\"destination\"`\n\tInputAddr string `json:\"input_address,omitempty\"`\n\tProcessAddr string `json:\"process_fees_address,omitempty\"`\n\tProcessPercent float64 `json:\"process_fees_percent,omitempty\"`\n\tProcessValue int `json:\"process_fees_satoshis,omitempty\"`\n\tCallbackURL string `json:\"callback_url,omitempty\"`\n\tEnableConfirm bool `json:\"enable_confirmations,omitempty\"`\n\tMiningFees int `json:\"mining_fees_satoshis,omitempty\"`\n\tTXHistory []string `json:\"transactions,omitempty\"`\n}\n\n\/\/Payback represents a Payment Forwarding Callback.\n\/\/It's more fun to call it a \"payback.\"\ntype Payback struct {\n\tValue int `json:\"value\"`\n\tDestination string `json:\"destination\"`\n\tDestHash string `json:\"transaction_hash\"`\n\tInputAddr string `json:\"input_address\"`\n\tInputHash string `json:\"input_transaction_hash\"`\n}\n<commit_msg>add dataprotocol to tx type<commit_after>package blockcy\n\nimport \"time\"\n\n\/\/Blockchain represents information about\n\/\/the state of a blockchain.\ntype Blockchain struct {\n\tName string `json:\"name\"`\n\tHeight int `json:\"height\"`\n\tHash string `json:\"hash\"`\n\tTime time.Time `json:\"time\"`\n\tLatestURL string `json:\"latest_url\"`\n\tPrevHash string `json:\"previous_hash\"`\n\tPrevURL string `json:\"previous_url\"`\n\tPeerCount int `json:\"peer_count\"`\n\tHighFee int `json:\"high_fee_per_kb\"`\n\tMediumFee int `json:\"medium_fee_per_kb\"`\n\tLowFee int `json:\"low_fee_per_kb\"`\n\tUnconfirmedCount int `json:\"unconfirmed_count\"`\n\tLastForkHeight int `json:\"last_fork_height\"`\n\tLastForkHash string `json:\"last_fork_hash\"`\n}\n\n\/\/Block represents information about the state\n\/\/of a given block in a blockchain.\ntype Block struct {\n\tHash string `json:\"hash\"`\n\tHeight int `json:\"height\"`\n\tDepth int `json:\"depth\"`\n\tChain string `json:\"chain\"`\n\tTotal int `json:\"total\"`\n\tFees int `json:\"fees\"`\n\tVer int `json:\"ver\"`\n\tTime time.Time `json:\"time\"`\n\tReceivedTime time.Time `json:\"received_time\"`\n\tRelayedBy string `json:\"relayed_by,omitempty\"`\n\tBits int `json:\"bits\"`\n\tNonce int `json:\"nonce\"`\n\tNumTX int `json:\"n_tx\"`\n\tPrevBlock string `json:\"prev_block\"`\n\tPrevBlockURL string `json:\"prev_block_url\"`\n\tMerkleRoot string `json:\"mrkl_root\"`\n\tTXids []string `json:\"txids\"`\n\tNextTXs string `json:\"next_txids\"`\n}\n\n\/\/TX represents information about the state\n\/\/of a given transaction in a blockchain.\ntype TX struct {\n\tBlockHash string `json:\"block_hash,omitempty\"`\n\tBlockHeight int `json:\"block_height,omitempty\"`\n\tHash string `json:\"hash,omitempty\"`\n\tAddresses []string `json:\"addresses,omitempty\"`\n\tTotal int `json:\"total,omitempty\"`\n\tFees int `json:\"fees,omitempty\"`\n\tSize int `json:\"size\"`\n\tPreference string `json:\"preference,omitempty\"`\n\tRelayedBy string `json:\"relayed_by,omitempty\"`\n\tReceived time.Time `json:\"received,omitempty\"`\n\tConfirmed time.Time `json:\"confirmed,omitempty\"`\n\tConfirmations int `json:\"confirmations,omitempty\"`\n\tConfidence float64 `json:\"confidence,omitempty\"`\n\tVer int `json:\"ver,omitempty\"`\n\tLockTime int `json:\"lock_time,omitempty\"`\n\tDoubleSpend bool `json:\"double_spend,omitempty\"`\n\tDoubleOf string `json:\"double_of,omitempty\"`\n\tReceiveCount int `json:\"receive_count,omitempty\"`\n\tVinSize int `json:\"vin_sz,omitempty\"`\n\tVoutSize int `json:\"vout_sz,omitempty\"`\n\tHex string `json:\"hex,omitempty\"`\n\tDataProtocol string `json:\"data_protocol,omitempty\"`\n\tChangeAddress string `json:\"change_address,omitempty\"`\n\tNextInputs string `json:\"next_inputs,omitempty\"`\n\tNextOutputs string `json:\"next_outputs,omitempty\"`\n\tInputs []TXInput `json:\"inputs\"`\n\tOutputs []TXOutput `json:\"outputs\"`\n}\n\n\/\/TXInput represents the state of a transaction input\ntype TXInput struct {\n\tPrevHash string `json:\"prev_hash,omitempty\"`\n\tOutputIndex int `json:\"output_index,omitempty\"`\n\tOutputValue int `json:\"output_value,omitempty\"`\n\tAddresses []string `json:\"addresses\"`\n\tSequence int `json:\"sequence,omitempty\"`\n\tScriptType string `json:\"script_type,omitempty\"`\n\tScript string `json:\"script,omitempty\"`\n\tAge int `json:\"age,omitempty\"`\n\tWalletName string `json:\"wallet_name,omitempty\"`\n}\n\n\/\/TXOutput represents the state of a transaction output\ntype TXOutput struct {\n\tSpentBy string `json:\"spent_by,omitempty\"`\n\tValue int `json:\"value\"`\n\tAddresses []string `json:\"addresses\"`\n\tScriptType string `json:\"script_type,omitempty\"`\n\tScript string `json:\"script,omitempty\"`\n\tDataHex string `json:\"data_hex,omitempty\"`\n\tDataString string `json:\"data_string,omitempty\"`\n}\n\n\/\/TXConf represents information about the\n\/\/confidence of an unconfirmed transaction.\ntype TXConf struct {\n\tAge int `json:\"age_millis\"`\n\tReceiveCount int `json:\"receive_count,omitempty\"`\n\tConfidence float64 `json:\"confidence\"`\n\tTXHash string `json:\"txhash\"`\n}\n\n\/\/TXRef represents summarized data about a\n\/\/transaction input or output.\ntype TXRef struct {\n\tBlockHeight int `json:\"block_height\"`\n\tTXHash string `json:\"tx_hash\"`\n\tTXInputN int `json:\"tx_input_n\"`\n\tTXOutputN int `json:\"tx_output_n\"`\n\tValue int `json:\"value\"`\n\tPref string `json:\"preference\"`\n\tSpent bool `json:\"spent\"`\n\tDoubleSpend bool `json:\"double_spend\"`\n\tDoubleOf string `json:\"double_of,omitempty\"`\n\tConfirmations int `json:\"confirmations\"`\n\tRefBalance int `json:\"ref_balance,omitempty\"`\n\tConfidence float64 `json:\"confidence,omitempty\"`\n\tConfirmed time.Time `json:\"confirmed,omitempty\"`\n\tSpentBy string `json:\"spent_by,omitempty\"`\n\tReceived time.Time `json:\"received,omitempty\"`\n\tReceivedCount int `json:\"received_count,omitempty\"`\n}\n\n\/\/TXSkel represents the return call to BlockCypher's\n\/\/txs\/new endpoint, and includes error information,\n\/\/hex transactions that need to be signed, and space\n\/\/for the signed transactions and associated public keys.\ntype TXSkel struct {\n\tTrans TX `json:\"tx\"`\n\tToSign []string `json:\"tosign\"`\n\tSignatures []string `json:\"signatures\"`\n\tPubKeys []string `json:\"pubkeys,omitempty\"`\n\tToSignTX []string `json:\"tosign_tx,omitempty\"`\n\tErrors []txSkelErr `json:\"errors,omitempty\"`\n}\n\n\/\/used within for JSON serialization.\ntype txSkelErr struct {\n\tError string `json:\"error,omitempty\"`\n}\n\n\/\/NullData represents the call and return to BlockCypher's\n\/\/Data API, allowing you to embed up to 80 bytes into\n\/\/a blockchain via an OP_RETURN.\ntype NullData struct {\n\tData string `json:\"data\"`\n\tEncoding string `json:\"encoding,omitempty\"`\n\tHash string `json:\"hash,omitempty\"`\n}\n\n\/\/MicroTX represents a microtransaction. For small-value\n\/\/transactions, BlockCypher will sign the transaction\n\/\/on your behalf, with your private key (if provided).\n\/\/Setting a separate change address is recommended.\n\/\/Where your application model allows it, consider\n\/\/only using public keys with microtransactions,\n\/\/and sign the microtransaction with your private key\n\/\/(without sending to BlockCypher's server).\ntype MicroTX struct {\n\t\/\/Only one of Pubkey\/Private\/Wif is required\n\tPubkey string `json:\"from_pubkey,omitempty\"`\n\tPriv string `json:\"from_private,omitempty\"`\n\tWif string `json:\"from_wif,omitempty\"`\n\tToAddr string `json:\"to_address\"`\n\tValue int `json:\"value_satoshis\"`\n\tChangeAddr string `json:\"change_address,omitempty\"`\n\tWait bool `json:\"wait_guarantee,omitempty\"`\n\tToSign []string `json:\"tosign,omitempty\"`\n\tSignatures []string `json:\"signatures,omitempty\"`\n\tHash string `json:\"hash,omitempty\"`\n\tInputs []MicroInput `json:\"inputs,omitempty\"`\n\tOutputs []MicroOutput `json:\"outputs,omitempty\"`\n\tFees int `json:\"fees,omitempty\"`\n}\n\n\/\/MicroInput represents pared down TXInput data\n\/\/within a Microtransaction.\ntype MicroInput struct {\n\tPrevHash string `json:\"prev_hash\"`\n\tOutputIndex int `json:\"output_index\"`\n}\n\n\/\/MicroOutput represents pared down TXOutput data\n\/\/within a Microtransaction.\ntype MicroOutput struct {\n\tValue int `json:\"value\"`\n\tAddress string `json:\"address\"`\n}\n\n\/\/Addr represents information about the state\n\/\/of a public address.\ntype Addr struct {\n\tAddress string `json:\"address\"`\n\tTotalReceived int `json:\"total_received\"`\n\tTotalSent int `json:\"total_sent\"`\n\tBalance int `json:\"balance\"`\n\tUnconfirmedBalance int `json:\"unconfirmed_balance\"`\n\tFinalBalance int `json:\"final_balance\"`\n\tNumTX int `json:\"n_tx\"`\n\tUnconfirmedNumTX int `json:\"unconfirmed_n_tx\"`\n\tFinalNumTX int `json:\"final_n_tx\"`\n\tTXs []TX `json:\"txs,omitempty\"`\n\tTXRefs []TXRef `json:\"txrefs,omitempty\"`\n\tUnconfirmedTXRefs []TXRef `json:\"unconfirmed_txrefs,omitempty\"`\n\tHasMore bool `json:\"hasMore,omitempty\"`\n}\n\n\/\/AddrKeychain represents information about a generated\n\/\/public-private key pair from BlockCypher's address\n\/\/generation API. Large amounts are not recommended to be\n\/\/stored with these addresses.\ntype AddrKeychain struct {\n\tAddress string `json:\"address,omitempty\"`\n\tPrivate string `json:\"private,omitempty\"`\n\tPublic string `json:\"public,omitempty\"`\n\tWif string `json:\"wif,omitempty\"`\n\tPubKeys []string `json:\"pubkeys,omitempty\"`\n\tScriptType string `json:\"script_type,omitempty\"`\n}\n\n\/\/Wallet represents information about a standard wallet.\n\/\/Typically, wallets can be used wherever an address can be\n\/\/used within the API.\ntype Wallet struct {\n\tName string `json:\"name,omitempty\"`\n\tAddresses []string `json:\"addresses,omitempty\"`\n}\n\n\/\/Hook represents a WebHook\/WebSockets event.\n\/\/BlockCypher supports the following events:\n\/\/\tEvent = \"unconfirmed-tx\"\n\/\/\tEvent = \"new-block\"\n\/\/\tEvent = \"confirmed-tx\"\n\/\/\tEvent = \"tx-confirmation\"\n\/\/\tEvent = \"double-spend-tx\"\n\/\/ Event = \"tx-confidence\"\n\/\/Hash, Address, and Script are all optional; creating\n\/\/a WebHook with any of them will filter the resulting\n\/\/notifications, if appropriate. ID is returned by\n\/\/BlockCyphers servers after Posting a new WebHook; you\n\/\/shouldn't manually generate this field.\ntype Hook struct {\n\tID string `json:\"id,omitempty\"`\n\tEvent string `json:\"event\"`\n\tHash string `json:\"hash,omitempty\"`\n\tWalletName string `json:\"wallet_name,omitempty\"`\n\tAddress string `json:\"address,omitempty\"`\n\tConfirmations int `json:\"confirmations,omitempty\"`\n\tConfidence float32 `json:\"confidence,omitempty\"`\n\tScript string `json:\"script,omitempty\"`\n\tURL string `json:\"url,omitempty\"`\n\tCallbackErrs int `json:\"callback_errors,omitempty\"`\n}\n\n\/\/PaymentFwd represents a reference to\n\/\/a Payment Forwarding request.\ntype PaymentFwd struct {\n\tID string `json:\"id,omitempty\"`\n\tDestination string `json:\"destination\"`\n\tInputAddr string `json:\"input_address,omitempty\"`\n\tProcessAddr string `json:\"process_fees_address,omitempty\"`\n\tProcessPercent float64 `json:\"process_fees_percent,omitempty\"`\n\tProcessValue int `json:\"process_fees_satoshis,omitempty\"`\n\tCallbackURL string `json:\"callback_url,omitempty\"`\n\tEnableConfirm bool `json:\"enable_confirmations,omitempty\"`\n\tMiningFees int `json:\"mining_fees_satoshis,omitempty\"`\n\tTXHistory []string `json:\"transactions,omitempty\"`\n}\n\n\/\/Payback represents a Payment Forwarding Callback.\n\/\/It's more fun to call it a \"payback.\"\ntype Payback struct {\n\tValue int `json:\"value\"`\n\tDestination string `json:\"destination\"`\n\tDestHash string `json:\"transaction_hash\"`\n\tInputAddr string `json:\"input_address\"`\n\tInputHash string `json:\"input_transaction_hash\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package tripeg\n\nimport (\n\t\"math\"\n\t\"math\/rand\"\n\t\"time\"\n)\n\n\/\/Hole struct that contains information\n\/\/about a hole in the board, its location\n\/\/and whether or not it has a peg in it.\ntype Hole struct {\n\tRow int \/\/make of 5\n\tCol int \/\/max of 9\n\tPeg bool\n\tLinks []*Hole \/\/Other Holes the hole is connected to\n}\n\n\/\/Jump moves a peg from one hole to another\n\/\/If it can jump, it removes the peg from the\n\/\/overHole hole.\nfunc (h *Hole) Jump(b *Board, overHole *Hole) bool {\n\tif !overHole.Peg {\n\t\t\/\/If there is no peg in the overHole, no jump possible\n\t\treturn false\n\t}\n\n\trDif := h.Row - overHole.Row\n\tcDif := overHole.Col - h.Col\n\tif cDif == 0 && rDif == 0 {\n\t\t\/\/Holes are the same, not valid\n\t\treturn false\n\t}\n\tif math.Abs(float64(rDif)) > 1 {\n\t\t\/\/You can't jump over more than 1 row horizontally\n\t\treturn false\n\t}\n\tif rDif > 0 && math.Abs(float64(cDif)) > 1 {\n\t\t\/\/You can't jump over more than 1 col vertically\n\t\treturn false\n\t}\n\tif rDif == 0 && math.Abs(float64(cDif)) > 2 {\n\t\treturn false\n\t\t\/\/You can't jump more than 2 cols horizontally\n\t}\n\ttargetR := 0\n\ttargetC := 0\n\tif rDif == 0 {\n\t\t\/\/This is a horizontal jump\n\t\ttargetR = h.Row\n\t}\n\tif rDif > 0 {\n\t\ttargetR = overHole.Row - 1\n\t\t\/\/This is a up\n\t}\n\tif rDif < 0 {\n\t\ttargetR = overHole.Row + 1\n\t\t\/\/This is a jump down\n\t}\n\tif cDif < 0 {\n\t\tx := 1\n\t\tif rDif == 0 {\n\t\t\tx = 2\n\t\t}\n\t\ttargetC = overHole.Col - x\n\t\t\/\/This is a jump left\n\t}\n\tif cDif > 0 {\n\t\tx := 1\n\t\tif rDif == 0 {\n\t\t\tx = 2\n\t\t}\n\t\ttargetC = overHole.Col + x\n\t\t\/\/This is a jump right\n\t}\n\ttargetHole := b.GetHole(targetR, targetC)\n\tif targetHole == nil {\n\t\treturn false\n\t}\n\tif targetHole.Peg {\n\t\treturn false\n\t}\n\th.Peg = false\n\toverHole.Peg = false\n\ttargetHole.Peg = true\n\treturn true\n}\n\n\/\/Board contains all the holes that contain the pegs\ntype Board struct {\n\tHoles []*Hole\n\tMoveLog []string\n}\n\n\/\/GetHole gets a pointer to a hole based on the row,col coordinates\nfunc (b Board) GetHole(r, c int) *Hole {\n\tif r < 0 || r > 6 || c < 0 || c > 9 {\n\t\treturn nil\n\t}\n\tfor _, v := range b.Holes {\n\t\tif v.Col == c && v.Row == r {\n\t\t\treturn v\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/BuildBoard makes a board of peg holes.\n\/\/All holes have a peg except one randomly assigned.\n\/\/The top row has 1, then\n\/\/2,3,4,5 for a total of 16 holes.\nfunc BuildBoard(empty int) Board {\n\tvar b Board\n\ts2 := rand.NewSource(time.Now().UnixNano())\n\tr2 := rand.New(s2)\n\tif empty == 0 {\n\t\tempty = r2.Intn(15)\n\t} else {\n\t\tempty--\n\t}\n\n\tfor r := 1; r < 6; r++ {\n\t\tfor c := 1; c < r+1; c++ {\n\t\t\tcol := 4 - (r) + (c * 2)\n\t\t\th := Hole{Row: r, Col: col, Peg: true}\n\t\t\tif empty == len(b.Holes) {\n\t\t\t\th.Peg = false\n\t\t\t}\n\t\t\tb.Holes = append(b.Holes, &h)\n\t\t}\n\t}\n\treturn b\n}\n\nfunc (b *Board) Solve() {\n\tb.MoveLog = []string{}\n\t\/\/Find out how many holes\n\t\/\/can make a legal Move\n\t\/\/randomly pick one of those holes\n\t\/\/find out how many moves can be made from that hole\n\t\/\/randomly pick one of those\n\t\/\/try again until there are no moves to make\n\t\/\/or 14 legal moves have been made, (winner)\n\t\/\/Print out all the winning moves\n\tcMoves := []*Hole{}\n\t_ = cMoves\n\tfor _, v := range b.Holes {\n\t\tif v.Peg == true {\n\t\t\to := b.GetHole(v.Row-1, v.Col-2)\n\t\t\tif v.Jump(b, o) {\n\t\t\t\t\/\/upleft\n\n\t\t\t}\n\t\t\t\/\/upright\n\t\t\t\/\/right\n\t\t\t\/\/left\n\t\t\t\/\/downleft\n\t\t\t\/\/downright\n\t\t}\n\t}\n}\n\nfunc (b Board) String() string {\n\tresult := \"\\n\"\n\tfor r := 1; r < 6; r++ {\n\t\tfor c := 1; c < 10; c++ {\n\t\t\th := b.GetHole(r, c)\n\t\t\tmark := \" \"\n\t\t\tif h != nil {\n\t\t\t\tmark = \"O\"\n\t\t\t\tif h.Peg {\n\t\t\t\t\tmark = \"*\"\n\t\t\t\t}\n\t\t\t}\n\t\t\tresult += mark\n\t\t}\n\t\tresult += \"\\n\"\n\t}\n\treturn result\n}\nfunc even(number int) bool {\n\treturn number%2 == 0\n}\n<commit_msg>up and to the right doesn't work<commit_after>package tripeg\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"time\"\n)\n\n\/\/Hole struct that contains information\n\/\/about a hole in the board, its location\n\/\/and whether or not it has a peg in it.\ntype Hole struct {\n\tRow int \/\/make of 5\n\tCol int \/\/max of 9\n\tPeg bool\n\tLinks []*Hole \/\/Other Holes the hole is connected to\n}\n\n\/\/Jump moves a peg from one hole to another\n\/\/If it can jump, it removes the peg from the\n\/\/overHole hole.\nfunc (h *Hole) Jump(b *Board, overHole *Hole) bool {\n\tif !overHole.Peg {\n\t\t\/\/If there is no peg in the overHole, no jump possible\n\t\treturn false\n\t}\n\n\trDif := h.Row - overHole.Row\n\tcDif := overHole.Col - h.Col\n\tif cDif == 0 && rDif == 0 {\n\t\t\/\/Holes are the same, not valid\n\t\treturn false\n\t}\n\tif math.Abs(float64(rDif)) > 1 {\n\t\t\/\/You can't jump over more than 1 row horizontally\n\t\treturn false\n\t}\n\tif rDif > 0 && math.Abs(float64(cDif)) > 1 {\n\t\t\/\/You can't jump over more than 1 col vertically\n\t\treturn false\n\t}\n\tif rDif == 0 && math.Abs(float64(cDif)) > 2 {\n\t\treturn false\n\t\t\/\/You can't jump more than 2 cols horizontally\n\t}\n\ttargetR := 0\n\ttargetC := 0\n\tif rDif == 0 {\n\t\t\/\/This is a horizontal jump\n\t\ttargetR = h.Row\n\t}\n\tif rDif > 0 {\n\t\ttargetR = overHole.Row - 1\n\t\t\/\/This is a up\n\t}\n\tif rDif < 0 {\n\t\ttargetR = overHole.Row + 1\n\t\t\/\/This is a jump down\n\t}\n\tif cDif < 0 {\n\t\tx := 1\n\t\tif rDif == 0 {\n\t\t\tx = 2\n\t\t}\n\t\ttargetC = overHole.Col - x\n\t\t\/\/This is a jump left\n\t}\n\tif cDif > 0 {\n\t\tx := 1\n\t\tif rDif == 0 {\n\t\t\tx = 2\n\t\t}\n\t\ttargetC = overHole.Col + x\n\t\t\/\/This is a jump right\n\t}\n\ttargetHole := b.GetHole(targetR, targetC)\n\tif targetHole == nil {\n\t\treturn false\n\t}\n\tif targetHole.Peg {\n\t\treturn false\n\t}\n\th.Peg = false\n\toverHole.Peg = false\n\ttargetHole.Peg = true\n\treturn true\n}\n\n\/\/Board contains all the holes that contain the pegs\ntype Board struct {\n\tHoles []*Hole\n\tMoveLog []string\n}\n\n\/\/GetHole gets a pointer to a hole based on the row,col coordinates\nfunc (b Board) GetHole(r, c int) *Hole {\n\tif r < 0 || r > 6 || c < 0 || c > 9 {\n\t\treturn nil\n\t}\n\tfor _, v := range b.Holes {\n\t\tif v.Col == c && v.Row == r {\n\t\t\treturn v\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/BuildBoard makes a board of peg holes.\n\/\/All holes have a peg except one randomly assigned.\n\/\/The top row has 1, then\n\/\/2,3,4,5 for a total of 16 holes.\nfunc BuildBoard(empty int) Board {\n\tvar b Board\n\ts2 := rand.NewSource(time.Now().UnixNano())\n\tr2 := rand.New(s2)\n\tif empty == 0 {\n\t\tempty = r2.Intn(15)\n\t} else {\n\t\tempty--\n\t}\n\n\tfor r := 1; r < 6; r++ {\n\t\tfor c := 1; c < r+1; c++ {\n\t\t\tcol := 4 - (r) + (c * 2)\n\t\t\th := Hole{Row: r, Col: col, Peg: true}\n\t\t\tif empty == len(b.Holes) {\n\t\t\t\th.Peg = false\n\t\t\t}\n\t\t\tb.Holes = append(b.Holes, &h)\n\t\t}\n\t}\n\treturn b\n}\n\nfunc (b *Board) Solve() {\n\tb.MoveLog = []string{}\n\t\/\/Find out how many holes\n\t\/\/can make a legal Move\n\t\/\/find out how many moves can be made from those hole\n\t\/\/randomly pick one of those\n\t\/\/try again until there are no moves to make\n\t\/\/or 14 legal moves have been made, (winner)\n\t\/\/Print out all the winning moves\n\ttype cMove struct {\n\t\tH *Hole\n\t\tO *Hole\n\t}\n\tcMoves := []cMove{}\n\tmoves := 0\n\tfor _, v := range b.Holes {\n\t\tif v.Peg == true {\n\t\t\to := b.GetHole(v.Row-1, v.Col-1)\n\t\t\tif o != nil {\n\t\t\t\tif v.Jump(b, o) {\n\t\t\t\t\t\/\/upleft\n\t\t\t\t\tcMoves = append(cMoves, cMove{H: v, O: o})\n\t\t\t\t}\n\t\t\t}\n\t\t\to = b.GetHole(v.Row-1, v.Col+1)\n\t\t\tif o != nil {\n\t\t\t\tif v.Jump(b, o) {\n\t\t\t\t\t\/\/upright\n\t\t\t\t\tcMoves = append(cMoves, cMove{H: v, O: o})\n\t\t\t\t}\n\t\t\t}\n\t\t\to = b.GetHole(v.Row, v.Col+2)\n\t\t\tif o != nil {\n\t\t\t\tif v.Jump(b, o) {\n\t\t\t\t\t\/\/right\n\t\t\t\t\tcMoves = append(cMoves, cMove{H: v, O: o})\n\t\t\t\t}\n\t\t\t}\n\t\t\to = b.GetHole(v.Row, v.Col-2)\n\t\t\tif o != nil {\n\t\t\t\tif v.Jump(b, o) {\n\t\t\t\t\t\/\/left\n\t\t\t\t\tcMoves = append(cMoves, cMove{H: v, O: o})\n\t\t\t\t}\n\t\t\t}\n\t\t\to = b.GetHole(v.Row+1, v.Col-2)\n\t\t\tif o != nil {\n\t\t\t\tif v.Jump(b, o) {\n\t\t\t\t\t\/\/downleft\n\t\t\t\t\tcMoves = append(cMoves, cMove{H: v, O: o})\n\t\t\t\t}\n\t\t\t}\n\t\t\to = b.GetHole(v.Row+1, v.Col+2)\n\t\t\tif o != nil {\n\t\t\t\tif v.Jump(b, o) {\n\t\t\t\t\t\/\/downright\n\t\t\t\t\tcMoves = append(cMoves, cMove{H: v, O: o})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/s2 := rand.NewSource(time.Now().UnixNano())\n\t\t\/\/r2 := rand.New(s2)\n\t\t\/\/ if len(cMoves) == 0 {\n\t\t\/\/ \tfmt.Println(\"Dead End...\", moves)\n\t\t\/\/ \treturn\n\t\t\/\/ }\n\t\t\/\/ m := cMoves[r2.Intn(len(cMoves))]\n\t\t\/\/ if m.H.Jump(b, m.O) {\n\t\t\/\/ \tmoves++\n\t\t\/\/ }\n\t\t\/\/ if moves == 3 {\n\t\t\/\/ \tfmt.Println(\"Made to the goal\")\n\t\t\/\/ }\n\t}\n\t_ = moves\n\tfor _, mv := range cMoves {\n\t\tfmt.Println(mv.H, mv.O)\n\t}\n\treturn\n}\n\nfunc (b Board) String() string {\n\tresult := \"\\n\"\n\tfor r := 1; r < 6; r++ {\n\t\tfor c := 1; c < 10; c++ {\n\t\t\th := b.GetHole(r, c)\n\t\t\tmark := \" \"\n\t\t\tif h != nil {\n\t\t\t\tmark = \"O\"\n\t\t\t\tif h.Peg {\n\t\t\t\t\tmark = \"*\"\n\t\t\t\t}\n\t\t\t}\n\t\t\tresult += mark\n\t\t}\n\t\tresult += \"\\n\"\n\t}\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright ©2014 The gonum Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage optimize\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"time\"\n\n\t\"github.com\/gonum\/matrix\/mat64\"\n)\n\nconst defaultGradientAbsTol = 1e-6\n\n\/\/ EvaluationType is used by a Method to specify the objective-function\n\/\/ information needed at an x location.\ntype EvaluationType uint\n\n\/\/ Evaluation types can be composed together using the binary or operator, for\n\/\/ example 'FuncEvaluation | GradEvaluation' to evaluate both the function\n\/\/ value and the gradient.\nconst (\n\tNoEvaluation EvaluationType = 0\n\tFuncEvaluation EvaluationType = 1 << iota\n\tGradEvaluation\n\tHessEvaluation\n)\n\nfunc (e EvaluationType) String() string {\n\tif s, ok := evaluationStrings[e]; ok {\n\t\treturn s\n\t}\n\treturn fmt.Sprintf(\"EvaluationType(%d)\", e)\n}\n\nvar evaluationStrings = map[EvaluationType]string{\n\tNoEvaluation: \"NoEvaluation\",\n\tFuncEvaluation: \"FuncEvaluation\",\n\tGradEvaluation: \"GradEvaluation\",\n\tHessEvaluation: \"HessEvaluation\",\n}\n\n\/\/ IterationType specifies the type of iteration.\ntype IterationType int\n\nconst (\n\tNoIteration IterationType = iota\n\tMajorIteration\n\tMinorIteration\n\tSubIteration\n\tInitIteration\n\tPostIteration \/\/ Iteration after the optimization. Sent to Recorder.\n)\n\nfunc (i IterationType) String() string {\n\tif i < 0 || int(i) >= len(iterationStrings) {\n\t\treturn fmt.Sprintf(\"IterationType(%d)\", i)\n\t}\n\treturn iterationStrings[i]\n}\n\nvar iterationStrings = [...]string{\n\t\"NoIteration\",\n\t\"MajorIteration\",\n\t\"MinorIteration\",\n\t\"SubIteration\",\n\t\"InitIteration\",\n\t\"PostIteration\",\n}\n\n\/\/ Location represents a location in the optimization procedure.\ntype Location struct {\n\tX []float64\n\tF float64\n\tGradient []float64\n\tHessian *mat64.SymDense\n}\n\n\/\/ LinesearchLocation is a location for a linesearch subiteration\ntype LinesearchLocation struct {\n\tF float64 \/\/ Function value at the step\n\tDerivative float64 \/\/ Projected gradient in the linesearch direction\n}\n\n\/\/ Result represents the answer of an optimization run. It contains the optimum\n\/\/ location as well as the Status at convergence and Statistics taken during the\n\/\/ run.\ntype Result struct {\n\tLocation\n\tStats\n\tStatus Status\n}\n\n\/\/ Stats contains the statistics of the run.\ntype Stats struct {\n\tMajorIterations int \/\/ Total number of major iterations\n\tFuncEvaluations int \/\/ Number of evaluations of Func()\n\tGradEvaluations int \/\/ Number of evaluations of Grad()\n\tHessEvaluations int \/\/ Number of evaluations of Hess()\n\tRuntime time.Duration \/\/ Total runtime of the optimization\n}\n\n\/\/ FunctionInfo is data to give to the optimizer about the objective function.\ntype FunctionInfo struct {\n\tIsGradient bool\n\tIsHessian bool\n\tIsFunctionGradient bool\n\tIsFunctionGradientHessian bool\n\tIsStatuser bool\n}\n\n\/\/ functionInfo contains information about which interfaces the objective\n\/\/ function F implements and the actual methods of F that have been\n\/\/ successfully type switched.\ntype functionInfo struct {\n\tFunctionInfo\n\n\tfunction Function\n\tgradient Gradient\n\thessian Hessian\n\tstatuser Statuser\n}\n\nfunc newFunctionInfo(f Function) *functionInfo {\n\tgradient, isGradient := f.(Gradient)\n\thessian, isHessian := f.(Hessian)\n\tstatuser, isStatuser := f.(Statuser)\n\n\treturn &functionInfo{\n\t\tFunctionInfo: FunctionInfo{\n\t\t\tIsGradient: isGradient,\n\t\t\tIsHessian: isHessian,\n\t\t\tIsStatuser: isStatuser,\n\t\t},\n\t\tfunction: f,\n\t\tgradient: gradient,\n\t\thessian: hessian,\n\t\tstatuser: statuser,\n\t}\n}\n\n\/\/ TODO(btracey): Think about making this an exported function when the\n\/\/ constraint interface is designed.\nfunc (f functionInfo) satisfies(method Method) error {\n\tif method.Needs().Gradient && !f.IsGradient {\n\t\treturn errors.New(\"optimize: function does not implement needed Gradient interface\")\n\t}\n\tif method.Needs().Hessian && !f.IsHessian {\n\t\treturn errors.New(\"optimize: function does not implement needed Hessian interface\")\n\t}\n\treturn nil\n}\n\n\/\/ complementEval returns an evaluation type that evaluates fields of loc not\n\/\/ evaluated by eval.\nfunc complementEval(loc *Location, eval EvaluationType) (complEval EvaluationType) {\n\tif eval&FuncEvaluation == 0 {\n\t\tcomplEval = FuncEvaluation\n\t}\n\tif loc.Gradient != nil && eval&GradEvaluation == 0 {\n\t\tcomplEval |= GradEvaluation\n\t}\n\tif loc.Hessian != nil && eval&HessEvaluation == 0 {\n\t\tcomplEval |= HessEvaluation\n\t}\n\treturn complEval\n}\n\n\/\/ Settings represents settings of the optimization run. It contains initial\n\/\/ settings, convergence information, and Recorder information. In general, users\n\/\/ should use DefaultSettings() rather than constructing a Settings literal.\n\/\/\n\/\/ If UseInitData is true, InitialValue, InitialGradient and InitialHessian\n\/\/ specify function information at the initial location.\n\/\/\n\/\/ If Recorder is nil, no information will be recorded.\ntype Settings struct {\n\tUseInitialData bool \/\/ Use supplied information about the conditions at the initial x.\n\tInitialValue float64 \/\/ Func(x) at the initial x.\n\tInitialGradient []float64 \/\/ Grad(x) at the initial x.\n\tInitialHessian *mat64.SymDense \/\/ Hess(x) at the initial x.\n\n\t\/\/ FunctionThreshold is the threshold for acceptably small values of the\n\t\/\/ objective function. FunctionThreshold status is returned if\n\t\/\/ the objective function is less than this value.\n\t\/\/ The default value is -inf.\n\tFunctionThreshold float64\n\n\t\/\/ GradientThreshold determines the accuracy to which the minimum is found.\n\t\/\/ GradientThreshold status is returned if the infinity norm of\n\t\/\/ the gradient is less than this value.\n\t\/\/ Has no effect if gradient information is not used.\n\t\/\/ The default value is 1e-6.\n\tGradientThreshold float64\n\n\t\/\/ FunctionConverge tests that the function value decreases by a significant\n\t\/\/ amount over the specified number of iterations. If\n\t\/\/ f < f_best && f_best - f > Relative * maxabs(f, f_best) + Absolute\n\t\/\/ then a significant decrease has occured, and f_best is updated. If there is\n\t\/\/ no significant decrease for Iterations major iterations, FunctionConvergence\n\t\/\/ is returned. If this is nil or if Iterations == 0, it has no effect.\n\tFunctionConverge *FunctionConverge\n\n\t\/\/ MajorIterations is the maximum number of iterations allowed.\n\t\/\/ IterationLimit status is returned if the number of major iterations\n\t\/\/ equals or exceeds this value.\n\t\/\/ If it equals zero, this setting has no effect.\n\t\/\/ The default value is 0.\n\tMajorIterations int\n\n\t\/\/ Runtime is the maximum runtime allowed. RuntimeLimit status is returned\n\t\/\/ if the duration of the run is longer than this value. Runtime is only\n\t\/\/ checked at iterations of the Method.\n\t\/\/ If it equals zero, this setting has no effect.\n\t\/\/ The default value is 0.\n\tRuntime time.Duration\n\n\t\/\/ FuncEvaluations is the maximum allowed number of function evaluations.\n\t\/\/ FunctionEvaluationLimit status is returned if the total number of calls\n\t\/\/ to Func() equals or exceeds this number.\n\t\/\/ If it equals zero, this setting has no effect.\n\t\/\/ The default value is 0.\n\tFuncEvaluations int\n\n\t\/\/ GradEvaluations is the maximum allowed number of gradient evaluations.\n\t\/\/ GradientEvaluationLimit status is returned if the total number of calls\n\t\/\/ to Grad() equals or exceeds this number.\n\t\/\/ If it equals zero, this setting has no effect.\n\t\/\/ The default value is 0.\n\tGradEvaluations int\n\n\t\/\/ HessEvaluations is the maximum allowed number of Hessian evaluations.\n\t\/\/ HessianEvaluationLimit status is returned if the total number of calls\n\t\/\/ to Hess() equals or exceeds this number.\n\t\/\/ If it equals zero, this setting has no effect.\n\t\/\/ The default value is 0.\n\tHessEvaluations int\n\n\tRecorder Recorder\n}\n\n\/\/ DefaultSettings returns a new Settings struct containing the default settings.\nfunc DefaultSettings() *Settings {\n\treturn &Settings{\n\t\tGradientThreshold: defaultGradientAbsTol,\n\t\tFunctionThreshold: math.Inf(-1),\n\t\tRecorder: NewPrinter(),\n\t\tFunctionConverge: &FunctionConverge{\n\t\t\tAbsolute: 1e-10,\n\t\t\tIterations: 20,\n\t\t},\n\t}\n}\n\n\/\/ resize takes x and returns a slice of length dim. It returns a resliced x\n\/\/ if cap(x) >= dim, and a new slice otherwise.\nfunc resize(x []float64, dim int) []float64 {\n\tif dim > cap(x) {\n\t\treturn make([]float64, dim)\n\t}\n\treturn x[:dim]\n}\n<commit_msg>Change formatting of EvaluationType<commit_after>\/\/ Copyright ©2014 The gonum Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage optimize\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"time\"\n\n\t\"github.com\/gonum\/matrix\/mat64\"\n)\n\nconst defaultGradientAbsTol = 1e-6\n\n\/\/ EvaluationType is used by a Method to specify the objective-function\n\/\/ information needed at an x location.\ntype EvaluationType uint\n\n\/\/ Evaluation types can be composed together using the binary or operator, for\n\/\/ example 'FuncEvaluation | GradEvaluation' to evaluate both the function\n\/\/ value and the gradient.\nconst (\n\tNoEvaluation EvaluationType = 0\n\tFuncEvaluation EvaluationType = 1 << (iota - 1)\n\tGradEvaluation\n\tHessEvaluation\n)\n\nfunc (e EvaluationType) String() string {\n\treturn fmt.Sprintf(\"EvaluationType(Func: %t, Grad: %t, Hess: %t, Extra: 0b%b)\",\n\t\te&FuncEvaluation != 0,\n\t\te&GradEvaluation != 0,\n\t\te&HessEvaluation != 0,\n\t\te&^(FuncEvaluation|GradEvaluation|HessEvaluation))\n}\n\n\/\/ IterationType specifies the type of iteration.\ntype IterationType int\n\nconst (\n\tNoIteration IterationType = iota\n\tMajorIteration\n\tMinorIteration\n\tSubIteration\n\tInitIteration\n\tPostIteration \/\/ Iteration after the optimization. Sent to Recorder.\n)\n\nfunc (i IterationType) String() string {\n\tif i < 0 || int(i) >= len(iterationStrings) {\n\t\treturn fmt.Sprintf(\"IterationType(%d)\", i)\n\t}\n\treturn iterationStrings[i]\n}\n\nvar iterationStrings = [...]string{\n\t\"NoIteration\",\n\t\"MajorIteration\",\n\t\"MinorIteration\",\n\t\"SubIteration\",\n\t\"InitIteration\",\n\t\"PostIteration\",\n}\n\n\/\/ Location represents a location in the optimization procedure.\ntype Location struct {\n\tX []float64\n\tF float64\n\tGradient []float64\n\tHessian *mat64.SymDense\n}\n\n\/\/ LinesearchLocation is a location for a linesearch subiteration\ntype LinesearchLocation struct {\n\tF float64 \/\/ Function value at the step\n\tDerivative float64 \/\/ Projected gradient in the linesearch direction\n}\n\n\/\/ Result represents the answer of an optimization run. It contains the optimum\n\/\/ location as well as the Status at convergence and Statistics taken during the\n\/\/ run.\ntype Result struct {\n\tLocation\n\tStats\n\tStatus Status\n}\n\n\/\/ Stats contains the statistics of the run.\ntype Stats struct {\n\tMajorIterations int \/\/ Total number of major iterations\n\tFuncEvaluations int \/\/ Number of evaluations of Func()\n\tGradEvaluations int \/\/ Number of evaluations of Grad()\n\tHessEvaluations int \/\/ Number of evaluations of Hess()\n\tRuntime time.Duration \/\/ Total runtime of the optimization\n}\n\n\/\/ FunctionInfo is data to give to the optimizer about the objective function.\ntype FunctionInfo struct {\n\tIsGradient bool\n\tIsHessian bool\n\tIsFunctionGradient bool\n\tIsFunctionGradientHessian bool\n\tIsStatuser bool\n}\n\n\/\/ functionInfo contains information about which interfaces the objective\n\/\/ function F implements and the actual methods of F that have been\n\/\/ successfully type switched.\ntype functionInfo struct {\n\tFunctionInfo\n\n\tfunction Function\n\tgradient Gradient\n\thessian Hessian\n\tstatuser Statuser\n}\n\nfunc newFunctionInfo(f Function) *functionInfo {\n\tgradient, isGradient := f.(Gradient)\n\thessian, isHessian := f.(Hessian)\n\tstatuser, isStatuser := f.(Statuser)\n\n\treturn &functionInfo{\n\t\tFunctionInfo: FunctionInfo{\n\t\t\tIsGradient: isGradient,\n\t\t\tIsHessian: isHessian,\n\t\t\tIsStatuser: isStatuser,\n\t\t},\n\t\tfunction: f,\n\t\tgradient: gradient,\n\t\thessian: hessian,\n\t\tstatuser: statuser,\n\t}\n}\n\n\/\/ TODO(btracey): Think about making this an exported function when the\n\/\/ constraint interface is designed.\nfunc (f functionInfo) satisfies(method Method) error {\n\tif method.Needs().Gradient && !f.IsGradient {\n\t\treturn errors.New(\"optimize: function does not implement needed Gradient interface\")\n\t}\n\tif method.Needs().Hessian && !f.IsHessian {\n\t\treturn errors.New(\"optimize: function does not implement needed Hessian interface\")\n\t}\n\treturn nil\n}\n\n\/\/ complementEval returns an evaluation type that evaluates fields of loc not\n\/\/ evaluated by eval.\nfunc complementEval(loc *Location, eval EvaluationType) (complEval EvaluationType) {\n\tif eval&FuncEvaluation == 0 {\n\t\tcomplEval = FuncEvaluation\n\t}\n\tif loc.Gradient != nil && eval&GradEvaluation == 0 {\n\t\tcomplEval |= GradEvaluation\n\t}\n\tif loc.Hessian != nil && eval&HessEvaluation == 0 {\n\t\tcomplEval |= HessEvaluation\n\t}\n\treturn complEval\n}\n\n\/\/ Settings represents settings of the optimization run. It contains initial\n\/\/ settings, convergence information, and Recorder information. In general, users\n\/\/ should use DefaultSettings() rather than constructing a Settings literal.\n\/\/\n\/\/ If UseInitData is true, InitialValue, InitialGradient and InitialHessian\n\/\/ specify function information at the initial location.\n\/\/\n\/\/ If Recorder is nil, no information will be recorded.\ntype Settings struct {\n\tUseInitialData bool \/\/ Use supplied information about the conditions at the initial x.\n\tInitialValue float64 \/\/ Func(x) at the initial x.\n\tInitialGradient []float64 \/\/ Grad(x) at the initial x.\n\tInitialHessian *mat64.SymDense \/\/ Hess(x) at the initial x.\n\n\t\/\/ FunctionThreshold is the threshold for acceptably small values of the\n\t\/\/ objective function. FunctionThreshold status is returned if\n\t\/\/ the objective function is less than this value.\n\t\/\/ The default value is -inf.\n\tFunctionThreshold float64\n\n\t\/\/ GradientThreshold determines the accuracy to which the minimum is found.\n\t\/\/ GradientThreshold status is returned if the infinity norm of\n\t\/\/ the gradient is less than this value.\n\t\/\/ Has no effect if gradient information is not used.\n\t\/\/ The default value is 1e-6.\n\tGradientThreshold float64\n\n\t\/\/ FunctionConverge tests that the function value decreases by a significant\n\t\/\/ amount over the specified number of iterations. If\n\t\/\/ f < f_best && f_best - f > Relative * maxabs(f, f_best) + Absolute\n\t\/\/ then a significant decrease has occured, and f_best is updated. If there is\n\t\/\/ no significant decrease for Iterations major iterations, FunctionConvergence\n\t\/\/ is returned. If this is nil or if Iterations == 0, it has no effect.\n\tFunctionConverge *FunctionConverge\n\n\t\/\/ MajorIterations is the maximum number of iterations allowed.\n\t\/\/ IterationLimit status is returned if the number of major iterations\n\t\/\/ equals or exceeds this value.\n\t\/\/ If it equals zero, this setting has no effect.\n\t\/\/ The default value is 0.\n\tMajorIterations int\n\n\t\/\/ Runtime is the maximum runtime allowed. RuntimeLimit status is returned\n\t\/\/ if the duration of the run is longer than this value. Runtime is only\n\t\/\/ checked at iterations of the Method.\n\t\/\/ If it equals zero, this setting has no effect.\n\t\/\/ The default value is 0.\n\tRuntime time.Duration\n\n\t\/\/ FuncEvaluations is the maximum allowed number of function evaluations.\n\t\/\/ FunctionEvaluationLimit status is returned if the total number of calls\n\t\/\/ to Func() equals or exceeds this number.\n\t\/\/ If it equals zero, this setting has no effect.\n\t\/\/ The default value is 0.\n\tFuncEvaluations int\n\n\t\/\/ GradEvaluations is the maximum allowed number of gradient evaluations.\n\t\/\/ GradientEvaluationLimit status is returned if the total number of calls\n\t\/\/ to Grad() equals or exceeds this number.\n\t\/\/ If it equals zero, this setting has no effect.\n\t\/\/ The default value is 0.\n\tGradEvaluations int\n\n\t\/\/ HessEvaluations is the maximum allowed number of Hessian evaluations.\n\t\/\/ HessianEvaluationLimit status is returned if the total number of calls\n\t\/\/ to Hess() equals or exceeds this number.\n\t\/\/ If it equals zero, this setting has no effect.\n\t\/\/ The default value is 0.\n\tHessEvaluations int\n\n\tRecorder Recorder\n}\n\n\/\/ DefaultSettings returns a new Settings struct containing the default settings.\nfunc DefaultSettings() *Settings {\n\treturn &Settings{\n\t\tGradientThreshold: defaultGradientAbsTol,\n\t\tFunctionThreshold: math.Inf(-1),\n\t\tRecorder: NewPrinter(),\n\t\tFunctionConverge: &FunctionConverge{\n\t\t\tAbsolute: 1e-10,\n\t\t\tIterations: 20,\n\t\t},\n\t}\n}\n\n\/\/ resize takes x and returns a slice of length dim. It returns a resliced x\n\/\/ if cap(x) >= dim, and a new slice otherwise.\nfunc resize(x []float64, dim int) []float64 {\n\tif dim > cap(x) {\n\t\treturn make([]float64, dim)\n\t}\n\treturn x[:dim]\n}\n<|endoftext|>"} {"text":"<commit_before>package gogo\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/dolab\/httpdispatch\"\n)\n\n\/\/ Handler represents server handlers\ntype Handler interface {\n\thttp.Handler\n\n\tHandle(string, string, httpdispatch.Handle)\n\tServeFiles(string, http.FileSystem)\n}\n\n\/\/ Middleware represents request filters and resource handler\n\/\/ NOTE: It is the filter's responsibility to invoke ctx.Next() for chainning.\ntype Middleware func(ctx *Context)\n\n\/\/ Render represents HTTP response render\ntype Render interface {\n\tContentType() string\n\tRender(v interface{}) error\n}\n\n\/\/ StatusCoder represents HTTP response status code\ntype StatusCoder interface {\n\tStatusCode() int\n}\n\n\/\/ Responser represents HTTP response interface\ntype Responser interface {\n\thttp.ResponseWriter\n\thttp.Flusher\n\n\tBefore(filter ResponseFilter) \/\/ register before filter\n\tSize() int \/\/ return the size of response body\n\tStatus() int \/\/ response status code\n\tHeaderFlushed() bool \/\/ whether response header has been sent?\n\tFlushHeader() \/\/ send response header\n}\n\n\/\/ ResponseFilter defines filter interface applied to response\ntype ResponseFilter func(w Responser, b []byte) []byte\n\n\/\/ Logger defines interface of application log apis.\ntype Logger interface {\n\tNew(requestID string) Logger\n\tReuse(log Logger)\n\tRequestID() string\n\tSetLevelByName(level string) error\n\tSetColor(color bool)\n\n\tPrint(v ...interface{})\n\tPrintf(format string, v ...interface{})\n\tDebug(v ...interface{})\n\tDebugf(format string, v ...interface{})\n\tInfo(v ...interface{})\n\tInfof(format string, v ...interface{})\n\tWarn(v ...interface{})\n\tWarnf(format string, v ...interface{})\n\tError(v ...interface{})\n\tErrorf(format string, v ...interface{})\n\tFatal(v ...interface{})\n\tFatalf(format string, v ...interface{})\n\tPanic(v ...interface{})\n\tPanicf(format string, v ...interface{})\n}\n<commit_msg>What: rename httpdispatch.Handle to httpdispatch.Handler<commit_after>package gogo\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/dolab\/httpdispatch\"\n)\n\n\/\/ Handler represents server handlers\ntype Handler interface {\n\thttp.Handler\n\n\tHandle(string, string, httpdispatch.Handler)\n\tServeFiles(string, http.FileSystem)\n}\n\n\/\/ Middleware represents request filters and resource handler\n\/\/ NOTE: It is the filter's responsibility to invoke ctx.Next() for chainning.\ntype Middleware func(ctx *Context)\n\n\/\/ Responser represents HTTP response interface\ntype Responser interface {\n\thttp.ResponseWriter\n\thttp.Flusher\n\n\tBefore(filter ResponseFilter) \/\/ register before filter\n\tSize() int \/\/ return the size of response body\n\tStatus() int \/\/ response status code\n\tHeaderFlushed() bool \/\/ whether response header has been sent?\n\tFlushHeader() \/\/ send response header\n}\n\n\/\/ ResponseFilter defines filter interface applied to response\ntype ResponseFilter func(w Responser, b []byte) []byte\n\n\/\/ StatusCoder represents HTTP response status code\n\/\/ it is useful for custom response data with response status code\ntype StatusCoder interface {\n\tStatusCode() int\n}\n\n\/\/ Render represents HTTP response render\ntype Render interface {\n\tContentType() string\n\tRender(v interface{}) error\n}\n\n\/\/ Logger defines interface of application log apis.\ntype Logger interface {\n\tNew(requestID string) Logger\n\tReuse(log Logger)\n\tRequestID() string\n\tSetLevelByName(level string) error\n\tSetColor(color bool)\n\n\tPrint(v ...interface{})\n\tPrintf(format string, v ...interface{})\n\tDebug(v ...interface{})\n\tDebugf(format string, v ...interface{})\n\tInfo(v ...interface{})\n\tInfof(format string, v ...interface{})\n\tWarn(v ...interface{})\n\tWarnf(format string, v ...interface{})\n\tError(v ...interface{})\n\tErrorf(format string, v ...interface{})\n\tFatal(v ...interface{})\n\tFatalf(format string, v ...interface{})\n\tPanic(v ...interface{})\n\tPanicf(format string, v ...interface{})\n}\n<|endoftext|>"} {"text":"<commit_before>package projects\n\nimport (\n\t\"crypto\/x509\"\n\t\"encoding\/json\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/ion-channel\/ionic\/aliases\"\n\t\"github.com\/ion-channel\/ionic\/rulesets\"\n\t\"github.com\/ion-channel\/ionic\/tags\"\n)\n\nconst (\n\tvalidEmailRegex = `(?i)^[a-z0-9._%+\\-]+@[a-z0-9.\\-]+\\.[a-z]{2,}$`\n\tvalidGitURIRegex = `^(?:(?:http|ftp|gopher|mailto|mid|cid|news|nntp|prospero|telnet|rlogin|tn3270|wais|svn|git|rsync)+\\+ssh\\:\\\/\\\/|git\\+https?:\\\/\\\/|git\\@|(?:http|ftp|gopher|mailto|mid|cid|news|nntp|prospero|telnet|rlogin|tn3270|wais|svn|git|rsync|ssh|file)+s?:\\\/\\\/)[^\\s]+$`\n)\n\nconst (\n\t\/\/ CreateProjectEndpoint is a string representation of the current endpoint for creating project\n\tCreateProjectEndpoint = \"v1\/project\/createProject\"\n\t\/\/ CreateProjectsFromCSVEndpoint is a string representation of the current endpoint for creating projects from CSV\n\tCreateProjectsFromCSVEndpoint = \"v1\/project\/createProjectsCSV\"\n\t\/\/ GetProjectEndpoint is a string representation of the current endpoint for getting project\n\tGetProjectEndpoint = \"v1\/project\/getProject\"\n\t\/\/ GetProjectByURLEndpoint is a string representation of the current endpoint for getting project by URL\n\tGetProjectByURLEndpoint = \"v1\/project\/getProjectByUrl\"\n\t\/\/ GetProjectsEndpoint is a string representation of the current endpoint for getting projects\n\tGetProjectsEndpoint = \"v1\/project\/getProjects\"\n\t\/\/ UpdateProjectEndpoint is a string representation of the current endpoint for updating project\n\tUpdateProjectEndpoint = \"v1\/project\/updateProject\"\n)\n\nvar (\n\t\/\/ ErrInvalidProject is returned when a given project does not pass the\n\t\/\/ standards for a project\n\tErrInvalidProject = fmt.Errorf(\"project has invalid fields\")\n)\n\n\/\/Project is a representation of a project within the Ion Channel system\ntype Project struct {\n\tID *string `json:\"id,omitempty\"`\n\tTeamID *string `json:\"team_id,omitempty\"`\n\tRulesetID *string `json:\"ruleset_id,omitempty\"`\n\tName *string `json:\"name,omitempty\"`\n\tType *string `json:\"type,omitempty\"`\n\tSource *string `json:\"source,omitempty\"`\n\tBranch *string `json:\"branch,omitempty\"`\n\tDescription *string `json:\"description,omitempty\"`\n\tActive bool `json:\"active\"`\n\tChatChannel string `json:\"chat_channel\"`\n\tCreatedAt time.Time `json:\"created_at\"`\n\tUpdatedAt time.Time `json:\"updated_at\"`\n\tDeployKey string `json:\"deploy_key\"`\n\tMonitor bool `json:\"should_monitor\"`\n\tPOCName string `json:\"poc_name\"`\n\tPOCEmail string `json:\"poc_email\"`\n\tUsername string `json:\"username\"`\n\tPassword string `json:\"password\"`\n\tKeyFingerprint string `json:\"key_fingerprint\"`\n\tPrivate bool `json:\"private\"`\n\tAliases []aliases.Alias `json:\"aliases\"`\n\tTags []tags.Tag `json:\"tags\"`\n}\n\n\/\/ String returns a JSON formatted string of the project object\nfunc (p Project) String() string {\n\tb, err := json.Marshal(p)\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"failed to format project: %v\", err.Error())\n\t}\n\treturn string(b)\n}\n\n\/\/ Validate takes an http client, baseURL, and token; returns a slice of fields as a string and\n\/\/ an error. The fields will be a list of fields that did not pass the\n\/\/ validation. An error will only be returned if any of the fields fail their\n\/\/ validation.\nfunc (p *Project) Validate(client *http.Client, baseURL *url.URL, token string) (map[string]string, error) {\n\tinvalidFields := make(map[string]string)\n\tvar projErr error\n\n\tif p.TeamID == nil {\n\t\tinvalidFields[\"team_id\"] = \"missing team id\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tif p.RulesetID == nil {\n\t\tinvalidFields[\"ruleset_id\"] = \"missing ruleset id\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tif p.Name == nil {\n\t\tinvalidFields[\"name\"] = \"missing name\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tif p.Type == nil {\n\t\tinvalidFields[\"type\"] = \"missing type\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tif p.Source == nil {\n\t\tinvalidFields[\"source\"] = \"missing source\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tif p.Branch == nil && p.Type != nil && strings.ToLower(*p.Type) == \"git\" {\n\t\tinvalidFields[\"branch\"] = \"missing branch\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tif p.Description == nil {\n\t\tinvalidFields[\"description\"] = \"missing description\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tif p.RulesetID != nil && p.TeamID != nil {\n\t\texists, err := rulesets.RuleSetExists(client, baseURL, *p.RulesetID, *p.TeamID, token)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to determine if ruleset exists: %v\", err.Error())\n\t\t}\n\n\t\tif !exists {\n\t\t\tinvalidFields[\"ruleset_id\"] = \"ruleset id does not match to a valid ruleset\"\n\t\t\tprojErr = ErrInvalidProject\n\t\t}\n\t}\n\n\tp.POCEmail = strings.TrimSpace(p.POCEmail)\n\n\tr := regexp.MustCompile(validEmailRegex)\n\tif p.POCEmail != \"\" && !r.MatchString(p.POCEmail) {\n\t\tinvalidFields[\"poc_email\"] = \"invalid email supplied\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tisFinger, err := regexp.MatchString(\"[a-f0-9]{2}\\\\:[a-f0-9]{2}\\\\:[a-f0-9]{2}\\\\:\", p.DeployKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to detect deploy key fingerprint: %v\", err.Error())\n\t}\n\n\tif isFinger {\n\t\tp.DeployKey = \"\"\n\t}\n\n\tblock, rest := pem.Decode([]byte(p.DeployKey))\n\tif block != nil {\n\t\tpkey, err := x509.ParsePKCS1PrivateKey(block.Bytes)\n\t\tif err != nil {\n\t\t\tinvalidFields[\"deploy_key\"] = \"must be a valid ssh key\"\n\t\t\tprojErr = ErrInvalidProject\n\t\t} else {\n\t\t\terr = pkey.Validate()\n\t\t\tif err != nil {\n\t\t\t\tinvalidFields[\"deploy_key\"] = \"must be a valid ssh key\"\n\t\t\t\tprojErr = ErrInvalidProject\n\t\t\t}\n\t\t}\n\t}\n\n\tif block == nil && rest != nil && string(rest) != \"\" {\n\t\tinvalidFields[\"deploy_key\"] = \"must be a valid ssh key\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tif p.Type != nil {\n\t\tswitch strings.ToLower(*p.Type) {\n\t\tcase \"artifact\":\n\t\t\tu, err := url.Parse(*p.Source)\n\t\t\tif err != nil {\n\t\t\t\tinvalidFields[\"source\"] = fmt.Sprintf(\"source must be a valid url: %v\", err.Error())\n\t\t\t\tprojErr = ErrInvalidProject\n\t\t\t}\n\n\t\t\tif u != nil {\n\t\t\t\tres, err := client.Head(u.String())\n\t\t\t\tif err != nil {\n\t\t\t\t\tinvalidFields[\"source\"] = \"source failed to return a response\"\n\t\t\t\t\tprojErr = ErrInvalidProject\n\t\t\t\t}\n\n\t\t\t\tif res != nil && res.StatusCode == http.StatusNotFound {\n\t\t\t\t\tinvalidFields[\"source\"] = \"source returned a not found\"\n\t\t\t\t\tprojErr = ErrInvalidProject\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"git\", \"svn\":\n\t\t\tr := regexp.MustCompile(validGitURIRegex)\n\t\t\tif p.Source != nil && !r.MatchString(*p.Source) {\n\t\t\t\tinvalidFields[\"source\"] = \"source must be a valid uri\"\n\t\t\t\tprojErr = ErrInvalidProject\n\t\t\t}\n\t\tdefault:\n\t\t\tinvalidFields[\"type\"] = fmt.Sprintf(\"invalid type value\")\n\t\t\tprojErr = ErrInvalidProject\n\t\t}\n\t}\n\n\treturn invalidFields, projErr\n}\n\n\/\/ ProjectFilter represents the available fields to filter a get project request\n\/\/ with.\ntype ProjectFilter struct {\n\tType *string `json:\"type,omitempty\"`\n\tActive *bool `json:\"active,omitempty\"`\n}\n\n\/\/ Param converts the non nil fields of the Project Filter into a string usable\n\/\/ for URL query params.\nfunc (pf *ProjectFilter) Param() string {\n\tps := make([]string, 0)\n\n\tfields := reflect.TypeOf(pf)\n\tvalues := reflect.ValueOf(pf)\n\n\tif fields.Kind() == reflect.Ptr {\n\t\tfields = fields.Elem()\n\t\tvalues = values.Elem()\n\t}\n\n\tfor i := 0; i < fields.NumField(); i++ {\n\t\tvalue := values.Field(i)\n\n\t\tif value.IsNil() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif value.Kind() == reflect.Ptr {\n\t\t\tvalue = value.Elem()\n\t\t}\n\n\t\tname := strings.ToLower(fields.Field(i).Name)\n\n\t\tswitch value.Kind() {\n\t\tcase reflect.String:\n\t\t\tps = append(ps, fmt.Sprintf(\"%v:%v\", name, value.String()))\n\t\tcase reflect.Bool:\n\t\t\tps = append(ps, fmt.Sprintf(\"%v:%v\", name, value.Bool()))\n\t\t}\n\t}\n\n\treturn strings.Join(ps, \",\")\n}\n<commit_msg>make the tests pass<commit_after>package projects\n\nimport (\n\t\"crypto\/x509\"\n\t\"encoding\/json\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/ion-channel\/ionic\/aliases\"\n\t\"github.com\/ion-channel\/ionic\/rulesets\"\n\t\"github.com\/ion-channel\/ionic\/tags\"\n)\n\nconst (\n\tvalidEmailRegex = `(?i)^[a-z0-9._%+\\-]+@[a-z0-9.\\-]+\\.[a-z]{2,}$`\n\tvalidGitURIRegex = `^(?:(?:http|ftp|gopher|mailto|mid|cid|news|nntp|prospero|telnet|rlogin|tn3270|wais|svn|git|rsync)+\\+ssh\\:\\\/\\\/|git\\+https?:\\\/\\\/|git\\@|(?:http|ftp|gopher|mailto|mid|cid|news|nntp|prospero|telnet|rlogin|tn3270|wais|svn|git|rsync|ssh|file)+s?:\\\/\\\/)[^\\s]+$`\n)\n\nconst (\n\t\/\/ CreateProjectEndpoint is a string representation of the current endpoint for creating project\n\tCreateProjectEndpoint = \"v1\/project\/createProject\"\n\t\/\/ CreateProjectsFromCSVEndpoint is a string representation of the current endpoint for creating projects from CSV\n\tCreateProjectsFromCSVEndpoint = \"v1\/project\/createProjectsCSV\"\n\t\/\/ GetProjectEndpoint is a string representation of the current endpoint for getting project\n\tGetProjectEndpoint = \"v1\/project\/getProject\"\n\t\/\/ GetProjectByURLEndpoint is a string representation of the current endpoint for getting project by URL\n\tGetProjectByURLEndpoint = \"v1\/project\/getProjectByUrl\"\n\t\/\/ GetProjectsEndpoint is a string representation of the current endpoint for getting projects\n\tGetProjectsEndpoint = \"v1\/project\/getProjects\"\n\t\/\/ UpdateProjectEndpoint is a string representation of the current endpoint for updating project\n\tUpdateProjectEndpoint = \"v1\/project\/updateProject\"\n)\n\nvar (\n\t\/\/ ErrInvalidProject is returned when a given project does not pass the\n\t\/\/ standards for a project\n\tErrInvalidProject = fmt.Errorf(\"project has invalid fields\")\n)\n\n\/\/Project is a representation of a project within the Ion Channel system\ntype Project struct {\n\tID *string `json:\"id,omitempty\"`\n\tTeamID *string `json:\"team_id,omitempty\"`\n\tRulesetID *string `json:\"ruleset_id,omitempty\"`\n\tName *string `json:\"name,omitempty\"`\n\tType *string `json:\"type,omitempty\"`\n\tSource *string `json:\"source,omitempty\"`\n\tBranch *string `json:\"branch,omitempty\"`\n\tDescription *string `json:\"description,omitempty\"`\n\tActive bool `json:\"active\"`\n\tChatChannel string `json:\"chat_channel\"`\n\tCreatedAt time.Time `json:\"created_at\"`\n\tUpdatedAt time.Time `json:\"updated_at\"`\n\tDeployKey string `json:\"deploy_key\"`\n\tMonitor bool `json:\"should_monitor\"`\n\tPOCName string `json:\"poc_name\"`\n\tPOCEmail string `json:\"poc_email\"`\n\tUsername string `json:\"username\"`\n\tPassword string `json:\"password\"`\n\tKeyFingerprint string `json:\"key_fingerprint\"`\n\tPrivate bool `json:\"private\"`\n\tAliases []aliases.Alias `json:\"aliases\"`\n\tTags []tags.Tag `json:\"tags\"`\n}\n\n\/\/ String returns a JSON formatted string of the project object\nfunc (p Project) String() string {\n\tb, err := json.Marshal(p)\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"failed to format project: %v\", err.Error())\n\t}\n\treturn string(b)\n}\n\n\/\/ Validate takes an http client, baseURL, and token; returns a slice of fields as a string and\n\/\/ an error. The fields will be a list of fields that did not pass the\n\/\/ validation. An error will only be returned if any of the fields fail their\n\/\/ validation.\nfunc (p *Project) Validate(client *http.Client, baseURL *url.URL, token string) (map[string]string, error) {\n\tinvalidFields := make(map[string]string)\n\tvar projErr error\n\n\tif p.TeamID == nil {\n\t\tinvalidFields[\"team_id\"] = \"missing team id\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tif p.RulesetID == nil {\n\t\tinvalidFields[\"ruleset_id\"] = \"missing ruleset id\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tif p.Name == nil {\n\t\tinvalidFields[\"name\"] = \"missing name\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tif p.Type == nil {\n\t\tinvalidFields[\"type\"] = \"missing type\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tif p.Source == nil {\n\t\tinvalidFields[\"source\"] = \"missing source\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tif p.Branch == nil && p.Type != nil && strings.ToLower(*p.Type) == \"git\" {\n\t\tinvalidFields[\"branch\"] = \"missing branch\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tif p.Description == nil {\n\t\tinvalidFields[\"description\"] = \"missing description\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tif p.RulesetID != nil && p.TeamID != nil {\n\t\texists, err := rulesets.RuleSetExists(client, baseURL, *p.RulesetID, *p.TeamID, token)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to determine if ruleset exists: %v\", err.Error())\n\t\t}\n\n\t\tif !exists {\n\t\t\tinvalidFields[\"ruleset_id\"] = \"ruleset id does not match to a valid ruleset\"\n\t\t\tprojErr = ErrInvalidProject\n\t\t}\n\t}\n\n\tp.POCEmail = strings.TrimSpace(p.POCEmail)\n\n\tr := regexp.MustCompile(validEmailRegex)\n\tif p.POCEmail != \"\" && !r.MatchString(p.POCEmail) {\n\t\tinvalidFields[\"poc_email\"] = \"invalid email supplied\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tisFinger, err := regexp.MatchString(\"[a-f0-9]{2}\\\\:[a-f0-9]{2}\\\\:[a-f0-9]{2}\\\\:\", p.DeployKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to detect deploy key fingerprint: %v\", err.Error())\n\t}\n\n\tif isFinger {\n\t\tp.DeployKey = \"\"\n\t}\n\n\tblock, rest := pem.Decode([]byte(p.DeployKey))\n\tif block != nil {\n\t\tpkey, err := x509.ParsePKCS1PrivateKey(block.Bytes)\n\t\tif err != nil {\n\t\t\tinvalidFields[\"deploy_key\"] = \"must be a valid ssh key\"\n\t\t\tprojErr = ErrInvalidProject\n\t\t} else {\n\t\t\terr = pkey.Validate()\n\t\t\tif err != nil {\n\t\t\t\tinvalidFields[\"deploy_key\"] = \"must be a valid ssh key\"\n\t\t\t\tprojErr = ErrInvalidProject\n\t\t\t}\n\t\t}\n\t}\n\n\tif block == nil && rest != nil && string(rest) != \"\" {\n\t\tinvalidFields[\"deploy_key\"] = \"must be a valid ssh key\"\n\t\tprojErr = ErrInvalidProject\n\t}\n\n\tif p.Type != nil {\n\t\tswitch strings.ToLower(*p.Type) {\n\t\tcase \"artifact\":\n\t\t\tu, err := url.Parse(*p.Source)\n\t\t\tif err != nil {\n\t\t\t\tinvalidFields[\"source\"] = fmt.Sprintf(\"source must be a valid url: %v\", err.Error())\n\t\t\t\tprojErr = ErrInvalidProject\n\t\t\t}\n\n\t\t\tif u != nil {\n\t\t\t\tres, err := client.Head(u.String())\n\t\t\t\tif err != nil {\n\t\t\t\t\tinvalidFields[\"source\"] = \"source failed to return a response\"\n\t\t\t\t\tprojErr = ErrInvalidProject\n\t\t\t\t}\n\n\t\t\t\tif res != nil && res.StatusCode == http.StatusNotFound {\n\t\t\t\t\tinvalidFields[\"source\"] = \"source returned a not found\"\n\t\t\t\t\tprojErr = ErrInvalidProject\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"git\", \"svn\":\n\t\t\tr := regexp.MustCompile(validGitURIRegex)\n\t\t\tif p.Source != nil && !r.MatchString(*p.Source) {\n\t\t\t\tinvalidFields[\"source\"] = \"source must be a valid uri\"\n\t\t\t\tprojErr = ErrInvalidProject\n\t\t\t}\n\t\tdefault:\n\t\t\tinvalidFields[\"type\"] = fmt.Sprintf(\"invalid type value\")\n\t\t\tprojErr = ErrInvalidProject\n\t\t}\n\t}\n\n\treturn invalidFields, projErr\n}\n\n\/\/ ProjectFilter represents the available fields to filter a get project request\n\/\/ with.\ntype ProjectFilter struct {\n\tID *string `sql:\"id\"`\n\tTeamID *string `sql:\"team_id\"`\n\tSource *string `sql:\"source\"`\n\tType *string `sql:\"type\"`\n\tActive *bool `sql:\"active\"`\n\tMonitor *bool `sql:\"should_monitor\"`\n}\n\n\/\/ ParseParam takes a param string, breaks it apart, and repopulates it into a\n\/\/ struct for further use. Any invalid or incomplete interpretations of a field\n\/\/ will be ignored and only valid entries put into the struct.\nfunc ParseParam(param string) *ProjectFilter {\n\tif param == \"\" || !strings.ContainsAny(param, \":\") {\n\t\treturn nil\n\t}\n\n\tfvs := strings.Split(param, \",\")\n\n\tpf := ProjectFilter{}\n\n\tfor i := range fvs {\n\t\tparts := strings.Split(fvs[i], \":\")\n\n\t\tif len(parts) == 2 {\n\t\t\tname := strings.Title(parts[0])\n\t\t\tvalue := parts[1]\n\n\t\t\tfield, _ := reflect.TypeOf(&pf).Elem().FieldByName(name)\n\t\t\tkind := field.Type.Kind()\n\n\t\t\tif kind == reflect.Ptr {\n\t\t\t\tkind = field.Type.Elem().Kind()\n\t\t\t}\n\n\t\t\tswitch kind {\n\t\t\tcase reflect.String:\n\t\t\t\treflect.ValueOf(&pf).Elem().FieldByName(name).Set(reflect.ValueOf(&value))\n\t\t\tcase reflect.Bool:\n\t\t\t\tb, err := strconv.ParseBool(value)\n\t\t\t\tif err == nil {\n\t\t\t\t\treflect.ValueOf(&pf).Elem().FieldByName(name).Set(reflect.ValueOf(&b))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &pf\n}\n\n\/\/ Param converts the non nil fields of the Project Filter into a string usable\n\/\/ for URL query params.\nfunc (pf *ProjectFilter) Param() string {\n\tps := make([]string, 0)\n\n\tfields := reflect.TypeOf(pf)\n\tvalues := reflect.ValueOf(pf)\n\n\tif fields.Kind() == reflect.Ptr {\n\t\tfields = fields.Elem()\n\t\tvalues = values.Elem()\n\t}\n\n\tfor i := 0; i < fields.NumField(); i++ {\n\t\tvalue := values.Field(i)\n\n\t\tif value.IsNil() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif value.Kind() == reflect.Ptr {\n\t\t\tvalue = value.Elem()\n\t\t}\n\n\t\tname := strings.ToLower(fields.Field(i).Name)\n\n\t\tswitch value.Kind() {\n\t\tcase reflect.String:\n\t\t\tps = append(ps, fmt.Sprintf(\"%v:%v\", name, value.String()))\n\t\tcase reflect.Bool:\n\t\t\tps = append(ps, fmt.Sprintf(\"%v:%v\", name, value.Bool()))\n\t\t}\n\t}\n\n\treturn strings.Join(ps, \",\")\n}\n\n\/\/ SQL takes an identifier and returns the filter as a constructed where clause\n\/\/ and set of values for use in a query as SQL params. If the identifier is left\n\/\/ blank it will not be included in the resulting where clause.\nfunc (pf *ProjectFilter) SQL(identifier string) (string, []interface{}) {\n\twheres := make([]string, 0)\n\tvals := make([]interface{}, 0)\n\n\tfields := reflect.TypeOf(pf)\n\tvalues := reflect.ValueOf(pf)\n\n\tif fields.Kind() == reflect.Ptr {\n\t\tfields = fields.Elem()\n\t\tvalues = values.Elem()\n\t}\n\n\tidx := 1\n\tfor i := 0; i < fields.NumField(); i++ {\n\t\tvalue := values.Field(i)\n\n\t\tif value.IsNil() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif value.Kind() == reflect.Ptr {\n\t\t\tvalue = value.Elem()\n\t\t}\n\n\t\ttag, ok := fields.Field(i).Tag.Lookup(\"sql\")\n\t\tif !ok {\n\t\t\ttag = fields.Field(i).Name\n\t\t}\n\n\t\tident := \"\"\n\t\tif identifier != \"\" {\n\t\t\tident = fmt.Sprintf(\"%v.\", identifier)\n\t\t}\n\n\t\tname := strings.ToLower(tag)\n\t\twheres = append(wheres, fmt.Sprintf(\"%v%v=$%v\", ident, name, idx))\n\t\tvals = append(vals, value.Interface())\n\t\tidx++\n\t}\n\n\treturn strings.Join(wheres, \" AND \"), vals\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage lease\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ A function used by read proxies to refresh their contents. See notes on\n\/\/ NewReadProxy.\ntype RefreshContentsFunc func(context.Context) (io.ReadCloser, error)\n\n\/\/ Create a read proxy.\n\/\/\n\/\/ The supplied function will be used to obtain the proxy's contents, the first\n\/\/ time they're needed and whenever the supplied file leaser decides to expire\n\/\/ the temporary copy thus obtained. It must return the same contents every\n\/\/ time, and the contents must be of the given size.\nfunc NewReadProxy(\n\tfl FileLeaser,\n\tsize int64,\n\trefresh RefreshContentsFunc) (rp *ReadProxy) {\n\trp = &ReadProxy{\n\t\tleaser: fl,\n\t\tsize: size,\n\t\trefresh: refresh,\n\t}\n\n\treturn\n}\n\n\/\/ A wrapper around a read lease, exposing a similar interface with the\n\/\/ following differences:\n\/\/\n\/\/ * Contents are fetched and re-fetched automatically when needed. Therefore\n\/\/ the user need not worry about lease expiration.\n\/\/\n\/\/ * Methods that may involve fetching the contents (reading, seeking) accept\n\/\/ context arguments, so as to be cancellable.\n\/\/\n\/\/ External synchronization is required.\ntype ReadProxy struct {\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Constant data\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tsize int64\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Dependencies\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tleaser FileLeaser\n\trefresh RefreshContentsFunc\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ The current wrapped lease, or nil if one has never been issued.\n\tlease ReadLease\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ Attempt to clean up after the supplied read\/write lease.\nfunc destroyReadWriteLease(rwl ReadWriteLease) {\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error destroying read\/write lease: %v\", err)\n\t\t}\n\t}()\n\n\t\/\/ Downgrade to a read lease.\n\trl, err := rwl.Downgrade()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Downgrade: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Revoke the read lease.\n\trl.Revoke()\n}\n\nfunc isRevokedErr(err error) bool {\n\t_, ok := err.(*RevokedError)\n\treturn ok\n}\n\n\/\/ Set up a read\/write lease and fill in our contents.\n\/\/\n\/\/ REQUIRES: The caller has observed that rl.lease has expired.\nfunc (rl *autoRefreshingReadLease) getContents() (\n\trwl ReadWriteLease, err error) {\n\t\/\/ Obtain some space to write the contents.\n\trwl, err = rl.leaser.NewFile()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"NewFile: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Attempt to clean up if we exit early.\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tdestroyReadWriteLease(rwl)\n\t\t}\n\t}()\n\n\t\/\/ Obtain the reader for our contents.\n\trc, err := rl.f()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"User function: %v\", err)\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tcloseErr := rc.Close()\n\t\tif closeErr != nil && err == nil {\n\t\t\terr = fmt.Errorf(\"Close: %v\", closeErr)\n\t\t}\n\t}()\n\n\t\/\/ Copy into the read\/write lease.\n\tcopied, err := io.Copy(rwl, rc)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Copy: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Did the user lie about the size?\n\tif copied != rl.Size() {\n\t\terr = fmt.Errorf(\"Copied %v bytes; expected %v\", copied, rl.Size())\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Downgrade and save the supplied read\/write lease obtained with getContents\n\/\/ for later use.\nfunc (rl *autoRefreshingReadLease) saveContents(rwl ReadWriteLease) {\n\tdowngraded, err := rwl.Downgrade()\n\tif err != nil {\n\t\tlog.Printf(\"Failed to downgrade write lease (%q); abandoning.\", err.Error())\n\t\treturn\n\t}\n\n\trl.wrapped = downgraded\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Public interface\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ Semantics matching io.Reader, except with context support.\nfunc (rl *autoRefreshingReadLease) Read(\n\tctx context.Context,\n\tp []byte) (n int, err error) {\n\t\/\/ Special case: have we been permanently revoked?\n\tif rl.revoked {\n\t\terr = &RevokedError{}\n\t\treturn\n\t}\n\n\t\/\/ Common case: is the existing lease still valid?\n\tif rl.wrapped != nil {\n\t\tn, err = rl.wrapped.Read(p)\n\t\tif !isRevokedErr(err) {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Clear the revoked error.\n\t\terr = nil\n\t}\n\n\t\/\/ Get hold of a read\/write lease containing our contents.\n\trwl, err := rl.getContents()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"getContents: %v\", err)\n\t\treturn\n\t}\n\n\tdefer rl.saveContents(rwl)\n\n\t\/\/ Serve from the read\/write lease.\n\tn, err = rwl.Read(p)\n\n\treturn\n}\n\n\/\/ Semantics matching io.Seeker, except with context support.\nfunc (rl *autoRefreshingReadLease) Seek(\n\tctx context.Context,\n\toffset int64,\n\twhence int) (off int64, err error) {\n\t\/\/ Special case: have we been permanently revoked?\n\tif rl.revoked {\n\t\terr = &RevokedError{}\n\t\treturn\n\t}\n\n\t\/\/ Common case: is the existing lease still valid?\n\tif rl.wrapped != nil {\n\t\toff, err = rl.wrapped.Seek(offset, whence)\n\t\tif !isRevokedErr(err) {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Clear the revoked error.\n\t\terr = nil\n\t}\n\n\t\/\/ Get hold of a read\/write lease containing our contents.\n\trwl, err := rl.getContents()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"getContents: %v\", err)\n\t\treturn\n\t}\n\n\tdefer rl.saveContents(rwl)\n\n\t\/\/ Serve from the read\/write lease.\n\toff, err = rwl.Seek(offset, whence)\n\n\treturn\n}\n\n\/\/ Semantics matching io.ReaderAt, except with context support.\nfunc (rl *autoRefreshingReadLease) ReadAt(\n\tctx context.Context,\n\tp []byte,\n\toff int64) (n int, err error) {\n\t\/\/ Special case: have we been permanently revoked?\n\tif rl.revoked {\n\t\terr = &RevokedError{}\n\t\treturn\n\t}\n\n\t\/\/ Common case: is the existing lease still valid?\n\tif rl.wrapped != nil {\n\t\tn, err = rl.wrapped.ReadAt(p, off)\n\t\tif !isRevokedErr(err) {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Clear the revoked error.\n\t\terr = nil\n\t}\n\n\t\/\/ Get hold of a read\/write lease containing our contents.\n\trwl, err := rl.getContents()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"getContents: %v\", err)\n\t\treturn\n\t}\n\n\tdefer rl.saveContents(rwl)\n\n\t\/\/ Serve from the read\/write lease.\n\tn, err = rwl.ReadAt(p, off)\n\n\treturn\n}\n\n\/\/ Return the size of the proxied content. Guarantees to not block.\nfunc (rl *autoRefreshingReadLease) Size() (size int64) {\n\tsize = rl.size\n\treturn\n}\n\n\/\/ For testing use only; do not touch.\nfunc (rl *autoRefreshingReadLease) Destroyed() (destroyed bool) {\n\tpanic(\"TODO\")\n}\n\n\/\/ Return a read\/write lease for the proxied contents. The read proxy must not\n\/\/ be used after calling this method.\nfunc (rl *autoRefreshingReadLease) Upgrade() (rwl ReadWriteLease, err error) {\n\t\/\/ Special case: have we been permanently revoked?\n\tif rl.revoked {\n\t\terr = &RevokedError{}\n\t\treturn\n\t}\n\n\t\/\/ If we succeed, we are now revoked.\n\tdefer func() {\n\t\tif err == nil {\n\t\t\trl.revoked = true\n\t\t}\n\t}()\n\n\t\/\/ Common case: is the existing lease still valid?\n\tif rl.wrapped != nil {\n\t\trwl, err = rl.wrapped.Upgrade()\n\t\tif !isRevokedErr(err) {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Clear the revoked error.\n\t\terr = nil\n\t}\n\n\t\/\/ Build the read\/write lease anew.\n\trwl, err = rl.getContents()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"getContents: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Destroy any resources in use by the read proxy. It must not be used further.\nfunc (rl *autoRefreshingReadLease) Destroy() {\n\trl.revoked = true\n\tif rl.wrapped != nil {\n\t\trl.wrapped.Revoke()\n\t}\n}\n<commit_msg>ReadProxy.getContents<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage lease\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ A function used by read proxies to refresh their contents. See notes on\n\/\/ NewReadProxy.\ntype RefreshContentsFunc func(context.Context) (io.ReadCloser, error)\n\n\/\/ Create a read proxy.\n\/\/\n\/\/ The supplied function will be used to obtain the proxy's contents, the first\n\/\/ time they're needed and whenever the supplied file leaser decides to expire\n\/\/ the temporary copy thus obtained. It must return the same contents every\n\/\/ time, and the contents must be of the given size.\nfunc NewReadProxy(\n\tfl FileLeaser,\n\tsize int64,\n\trefresh RefreshContentsFunc) (rp *ReadProxy) {\n\trp = &ReadProxy{\n\t\tleaser: fl,\n\t\tsize: size,\n\t\trefresh: refresh,\n\t}\n\n\treturn\n}\n\n\/\/ A wrapper around a read lease, exposing a similar interface with the\n\/\/ following differences:\n\/\/\n\/\/ * Contents are fetched and re-fetched automatically when needed. Therefore\n\/\/ the user need not worry about lease expiration.\n\/\/\n\/\/ * Methods that may involve fetching the contents (reading, seeking) accept\n\/\/ context arguments, so as to be cancellable.\n\/\/\n\/\/ External synchronization is required.\ntype ReadProxy struct {\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Constant data\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tsize int64\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Dependencies\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tleaser FileLeaser\n\trefresh RefreshContentsFunc\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ The current wrapped lease, or nil if one has never been issued.\n\tlease ReadLease\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ Attempt to clean up after the supplied read\/write lease.\nfunc destroyReadWriteLease(rwl ReadWriteLease) {\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error destroying read\/write lease: %v\", err)\n\t\t}\n\t}()\n\n\t\/\/ Downgrade to a read lease.\n\trl, err := rwl.Downgrade()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Downgrade: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Revoke the read lease.\n\trl.Revoke()\n}\n\nfunc isRevokedErr(err error) bool {\n\t_, ok := err.(*RevokedError)\n\treturn ok\n}\n\n\/\/ Set up a read\/write lease and fill in our contents.\n\/\/\n\/\/ REQUIRES: The caller has observed that rp.lease has expired.\nfunc (rp *ReadProxy) getContents(\n\tctx context.Context) (rwl ReadWriteLease, err error) {\n\t\/\/ Obtain some space to write the contents.\n\trwl, err = rp.leaser.NewFile()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"NewFile: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Attempt to clean up if we exit early.\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tdestroyReadWriteLease(rwl)\n\t\t}\n\t}()\n\n\t\/\/ Obtain the reader for our contents.\n\trc, err := rp.refresh()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"User function: %v\", err)\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tcloseErr := rc.Close()\n\t\tif closeErr != nil && err == nil {\n\t\t\terr = fmt.Errorf(\"Close: %v\", closeErr)\n\t\t}\n\t}()\n\n\t\/\/ Copy into the read\/write lease.\n\tcopied, err := io.Copy(rwl, rc)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Copy: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Did the user lie about the size?\n\tif copied != rp.Size() {\n\t\terr = fmt.Errorf(\"Copied %v bytes; expected %v\", copied, rl.Size())\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Downgrade and save the supplied read\/write lease obtained with getContents\n\/\/ for later use.\nfunc (rl *autoRefreshingReadLease) saveContents(rwl ReadWriteLease) {\n\tdowngraded, err := rwl.Downgrade()\n\tif err != nil {\n\t\tlog.Printf(\"Failed to downgrade write lease (%q); abandoning.\", err.Error())\n\t\treturn\n\t}\n\n\trl.wrapped = downgraded\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Public interface\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ Semantics matching io.Reader, except with context support.\nfunc (rl *autoRefreshingReadLease) Read(\n\tctx context.Context,\n\tp []byte) (n int, err error) {\n\t\/\/ Special case: have we been permanently revoked?\n\tif rl.revoked {\n\t\terr = &RevokedError{}\n\t\treturn\n\t}\n\n\t\/\/ Common case: is the existing lease still valid?\n\tif rl.wrapped != nil {\n\t\tn, err = rl.wrapped.Read(p)\n\t\tif !isRevokedErr(err) {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Clear the revoked error.\n\t\terr = nil\n\t}\n\n\t\/\/ Get hold of a read\/write lease containing our contents.\n\trwl, err := rl.getContents()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"getContents: %v\", err)\n\t\treturn\n\t}\n\n\tdefer rl.saveContents(rwl)\n\n\t\/\/ Serve from the read\/write lease.\n\tn, err = rwl.Read(p)\n\n\treturn\n}\n\n\/\/ Semantics matching io.Seeker, except with context support.\nfunc (rl *autoRefreshingReadLease) Seek(\n\tctx context.Context,\n\toffset int64,\n\twhence int) (off int64, err error) {\n\t\/\/ Special case: have we been permanently revoked?\n\tif rl.revoked {\n\t\terr = &RevokedError{}\n\t\treturn\n\t}\n\n\t\/\/ Common case: is the existing lease still valid?\n\tif rl.wrapped != nil {\n\t\toff, err = rl.wrapped.Seek(offset, whence)\n\t\tif !isRevokedErr(err) {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Clear the revoked error.\n\t\terr = nil\n\t}\n\n\t\/\/ Get hold of a read\/write lease containing our contents.\n\trwl, err := rl.getContents()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"getContents: %v\", err)\n\t\treturn\n\t}\n\n\tdefer rl.saveContents(rwl)\n\n\t\/\/ Serve from the read\/write lease.\n\toff, err = rwl.Seek(offset, whence)\n\n\treturn\n}\n\n\/\/ Semantics matching io.ReaderAt, except with context support.\nfunc (rl *autoRefreshingReadLease) ReadAt(\n\tctx context.Context,\n\tp []byte,\n\toff int64) (n int, err error) {\n\t\/\/ Special case: have we been permanently revoked?\n\tif rl.revoked {\n\t\terr = &RevokedError{}\n\t\treturn\n\t}\n\n\t\/\/ Common case: is the existing lease still valid?\n\tif rl.wrapped != nil {\n\t\tn, err = rl.wrapped.ReadAt(p, off)\n\t\tif !isRevokedErr(err) {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Clear the revoked error.\n\t\terr = nil\n\t}\n\n\t\/\/ Get hold of a read\/write lease containing our contents.\n\trwl, err := rl.getContents()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"getContents: %v\", err)\n\t\treturn\n\t}\n\n\tdefer rl.saveContents(rwl)\n\n\t\/\/ Serve from the read\/write lease.\n\tn, err = rwl.ReadAt(p, off)\n\n\treturn\n}\n\n\/\/ Return the size of the proxied content. Guarantees to not block.\nfunc (rl *autoRefreshingReadLease) Size() (size int64) {\n\tsize = rl.size\n\treturn\n}\n\n\/\/ For testing use only; do not touch.\nfunc (rl *autoRefreshingReadLease) Destroyed() (destroyed bool) {\n\tpanic(\"TODO\")\n}\n\n\/\/ Return a read\/write lease for the proxied contents. The read proxy must not\n\/\/ be used after calling this method.\nfunc (rl *autoRefreshingReadLease) Upgrade() (rwl ReadWriteLease, err error) {\n\t\/\/ Special case: have we been permanently revoked?\n\tif rl.revoked {\n\t\terr = &RevokedError{}\n\t\treturn\n\t}\n\n\t\/\/ If we succeed, we are now revoked.\n\tdefer func() {\n\t\tif err == nil {\n\t\t\trl.revoked = true\n\t\t}\n\t}()\n\n\t\/\/ Common case: is the existing lease still valid?\n\tif rl.wrapped != nil {\n\t\trwl, err = rl.wrapped.Upgrade()\n\t\tif !isRevokedErr(err) {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Clear the revoked error.\n\t\terr = nil\n\t}\n\n\t\/\/ Build the read\/write lease anew.\n\trwl, err = rl.getContents()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"getContents: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Destroy any resources in use by the read proxy. It must not be used further.\nfunc (rl *autoRefreshingReadLease) Destroy() {\n\trl.revoked = true\n\tif rl.wrapped != nil {\n\t\trl.wrapped.Revoke()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package provider\n\nimport (\n\t\"github.com\/NetSys\/di\/db\"\n\t\"github.com\/NetSys\/di\/dsl\"\n)\n\n\/\/ Machine represents an instance of a machine booted by a Provider.\ntype Machine struct {\n\tID string\n\tPublicIP string\n\tPrivateIP string\n\tSize string\n\tSSHKeys []string\n\tProvider db.Provider\n\tRegion string\n}\n\n\/\/ Provider defines an interface for interacting with cloud providers.\ntype Provider interface {\n\tStart(conn db.Conn, id int, namespace string) error\n\n\tGet() ([]Machine, error)\n\n\tBoot(bootSet []Machine) error\n\n\tStop([]Machine) error\n\n\tDisconnect()\n\n\tPickBestSize(ram dsl.Range, cpu dsl.Range, maxPrice float64) string\n}\n\n\/\/ New returns an empty instance of the Provider represented by `dbp`\nfunc New(dbp db.Provider) Provider {\n\tswitch dbp {\n\tcase db.AmazonSpot:\n\t\treturn &awsSpotCluster{}\n\tcase db.Google:\n\t\treturn &gceCluster{}\n\tcase db.Azure:\n\t\treturn &azureCluster{}\n\tcase db.Vagrant:\n\t\treturn &vagrantCluster{}\n\tdefault:\n\t\tpanic(\"Unimplemented\")\n\t}\n}\n\n\/\/ GroupBy transforms the `machines` into a map of `db.Provider` to the machines\n\/\/ with that provider.\nfunc GroupBy(machines []Machine) map[db.Provider][]Machine {\n\tmachineMap := make(map[db.Provider][]Machine)\n\tfor _, m := range machines {\n\t\tif _, ok := machineMap[m.Provider]; !ok {\n\t\t\tmachineMap[m.Provider] = []Machine{}\n\t\t}\n\t\tmachineMap[m.Provider] = append(machineMap[m.Provider], m)\n\t}\n\n\treturn machineMap\n}\n<commit_msg>provider: Trivial tweak to provider interface<commit_after>package provider\n\nimport (\n\t\"github.com\/NetSys\/di\/db\"\n\t\"github.com\/NetSys\/di\/dsl\"\n)\n\n\/\/ Machine represents an instance of a machine booted by a Provider.\ntype Machine struct {\n\tID string\n\tPublicIP string\n\tPrivateIP string\n\tSize string\n\tSSHKeys []string\n\tProvider db.Provider\n\tRegion string\n}\n\n\/\/ Provider defines an interface for interacting with cloud providers.\ntype Provider interface {\n\tStart(conn db.Conn, id int, namespace string) error\n\n\tGet() ([]Machine, error)\n\n\tBoot([]Machine) error\n\n\tStop([]Machine) error\n\n\tDisconnect()\n\n\tPickBestSize(ram dsl.Range, cpu dsl.Range, maxPrice float64) string\n}\n\n\/\/ New returns an empty instance of the Provider represented by `dbp`\nfunc New(dbp db.Provider) Provider {\n\tswitch dbp {\n\tcase db.AmazonSpot:\n\t\treturn &awsSpotCluster{}\n\tcase db.Google:\n\t\treturn &gceCluster{}\n\tcase db.Azure:\n\t\treturn &azureCluster{}\n\tcase db.Vagrant:\n\t\treturn &vagrantCluster{}\n\tdefault:\n\t\tpanic(\"Unimplemented\")\n\t}\n}\n\n\/\/ GroupBy transforms the `machines` into a map of `db.Provider` to the machines\n\/\/ with that provider.\nfunc GroupBy(machines []Machine) map[db.Provider][]Machine {\n\tmachineMap := make(map[db.Provider][]Machine)\n\tfor _, m := range machines {\n\t\tif _, ok := machineMap[m.Provider]; !ok {\n\t\t\tmachineMap[m.Provider] = []Machine{}\n\t\t}\n\t\tmachineMap[m.Provider] = append(machineMap[m.Provider], m)\n\t}\n\n\treturn machineMap\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/Syfaro\/telegram-bot-api\"\n)\n\n\/\/ One User\ntype User struct {\n\tUserName string\n\tFirstName string\n\tLastName string\n\tAuthCode string\n\tIsAuthorized bool\n\tIsRoot bool\n\tPrivateChatID int\n}\n\n\/\/ Users in chat\ntype Users struct {\n\tlist map[int]*User\n\tallowedUsers map[string]bool\n\trootUsers map[string]bool\n}\n\n\/\/ length of random code in bytes\nconst CODE_BYTES_LENGTH = 15\n\n\/\/ new Users object\nfunc NewUsers(appConfig Config) Users {\n\tusers := Users{\n\t\tlist: map[int]*User{},\n\t\tallowedUsers: map[string]bool{},\n\t\trootUsers: map[string]bool{},\n\t}\n\n\tfor _, name := range appConfig.allowUsers {\n\t\tusers.allowedUsers[name] = true\n\t}\n\tfor _, name := range appConfig.rootUsers {\n\t\tusers.allowedUsers[name] = true\n\t\tusers.rootUsers[name] = true\n\t}\n\treturn users\n}\n\n\/\/ add new user if not exists\nfunc (users Users) AddNew(tgbot_user tgbotapi.User, tgbot_chat tgbotapi.UserOrGroupChat) {\n\tprivateChatID := 0\n\tif tgbot_chat.Title == \"\" {\n\t\tprivateChatID = tgbot_chat.ID\n\t}\n\n\tif _, ok := users.list[tgbot_user.ID]; ok && privateChatID > 0 {\n\t\tusers.list[tgbot_user.ID].PrivateChatID = privateChatID\n\t} else if !ok {\n\t\tusers.list[tgbot_user.ID] = &User{\n\t\t\tUserName: tgbot_user.UserName,\n\t\t\tFirstName: tgbot_user.FirstName,\n\t\t\tLastName: tgbot_user.LastName,\n\t\t\tIsAuthorized: users.allowedUsers[tgbot_user.UserName],\n\t\t\tIsRoot: users.rootUsers[tgbot_user.UserName],\n\t\t\tPrivateChatID: privateChatID,\n\t\t}\n\t}\n}\n\n\/\/ generate code\nfunc (users Users) DoLogin(user_id int) {\n\tusers.list[user_id].IsAuthorized = false\n\tusers.list[user_id].AuthCode = getRandomCode()\n}\n\n\/\/ check code for user\nfunc (users Users) IsValidCode(user_id int, code string) bool {\n\tresult := code != \"\" && code == users.list[user_id].AuthCode\n\tif result {\n\t\tusers.list[user_id].IsAuthorized = true\n\t}\n\n\treturn result\n}\n\n\/\/ check code for user\nfunc (users Users) IsAuthorized(tgbot_user tgbotapi.User) bool {\n\tisAuthorized := false\n\tif tgbot_user.UserName != \"\" && users.allowedUsers[tgbot_user.UserName] {\n\t\tisAuthorized = true\n\t} else if _, ok := users.list[tgbot_user.ID]; ok && users.list[tgbot_user.ID].IsAuthorized {\n\t\tisAuthorized = true\n\t}\n\n\treturn isAuthorized\n}\n\n\/\/ send message to all root users\nfunc (users Users) broadcastForRoots(bot *tgbotapi.BotAPI, message string) {\n\tfor _, user := range users.list {\n\t\tif user.IsRoot && user.PrivateChatID > 0 {\n\t\t\tbot.SendMessage(tgbotapi.NewMessage(user.PrivateChatID, message))\n\t\t}\n\t}\n}\n\n\/\/ Format user name\nfunc (users Users) String(user_id int) string {\n\tresult := fmt.Sprintf(\"%s %s\", users.list[user_id].FirstName, users.list[user_id].LastName)\n\tif users.list[user_id].UserName != \"\" {\n\t\tresult += fmt.Sprintf(\" (@%s)\", users.list[user_id].UserName)\n\t}\n\treturn result\n}\n\n\/\/ generate random code for authorize user\nfunc getRandomCode() string {\n\tbuffer := make([]byte, CODE_BYTES_LENGTH)\n\t_, err := rand.Read(buffer)\n\tif err != nil {\n\t\tlog.Fatal(\"Get code error:\", err)\n\t}\n\n\treturn base64.URLEncoding.EncodeToString(buffer)\n}\n<commit_msg>Refactoring<commit_after>package main\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/Syfaro\/telegram-bot-api\"\n)\n\n\/\/ One User\ntype User struct {\n\tUserName string\n\tFirstName string\n\tLastName string\n\tAuthCode string\n\tIsAuthorized bool\n\tIsRoot bool\n\tPrivateChatID int\n}\n\n\/\/ Users in chat\ntype Users struct {\n\tlist map[int]*User\n\tallowedUsers map[string]bool\n\trootUsers map[string]bool\n}\n\n\/\/ length of random code in bytes\nconst CODE_BYTES_LENGTH = 15\n\n\/\/ new Users object\nfunc NewUsers(appConfig Config) Users {\n\tusers := Users{\n\t\tlist: map[int]*User{},\n\t\tallowedUsers: map[string]bool{},\n\t\trootUsers: map[string]bool{},\n\t}\n\n\tfor _, name := range appConfig.allowUsers {\n\t\tusers.allowedUsers[name] = true\n\t}\n\tfor _, name := range appConfig.rootUsers {\n\t\tusers.allowedUsers[name] = true\n\t\tusers.rootUsers[name] = true\n\t}\n\treturn users\n}\n\n\/\/ add new user if not exists\nfunc (users Users) AddNew(tgbot_user tgbotapi.User, tgbot_chat tgbotapi.UserOrGroupChat) {\n\tprivateChatID := 0\n\tif tgbot_chat.Title == \"\" {\n\t\tprivateChatID = tgbot_chat.ID\n\t}\n\n\tif _, ok := users.list[tgbot_user.ID]; ok && privateChatID > 0 {\n\t\tusers.list[tgbot_user.ID].PrivateChatID = privateChatID\n\t} else if !ok {\n\t\tusers.list[tgbot_user.ID] = &User{\n\t\t\tUserName: tgbot_user.UserName,\n\t\t\tFirstName: tgbot_user.FirstName,\n\t\t\tLastName: tgbot_user.LastName,\n\t\t\tIsAuthorized: users.allowedUsers[tgbot_user.UserName],\n\t\t\tIsRoot: users.rootUsers[tgbot_user.UserName],\n\t\t\tPrivateChatID: privateChatID,\n\t\t}\n\t}\n}\n\n\/\/ generate code\nfunc (users Users) DoLogin(user_id int) {\n\tusers.list[user_id].IsAuthorized = false\n\tusers.list[user_id].AuthCode = getRandomCode()\n}\n\n\/\/ check code for user\nfunc (users Users) IsValidCode(user_id int, code string) bool {\n\tresult := code != \"\" && code == users.list[user_id].AuthCode\n\tif result {\n\t\tusers.list[user_id].IsAuthorized = true\n\t}\n\n\treturn result\n}\n\n\/\/ check code for user\nfunc (users Users) IsAuthorized(tgbot_user tgbotapi.User) bool {\n\tisAuthorized := false\n\tif _, ok := users.list[tgbot_user.ID]; ok && users.list[tgbot_user.ID].IsAuthorized {\n\t\tisAuthorized = true\n\t}\n\n\treturn isAuthorized\n}\n\n\/\/ send message to all root users\nfunc (users Users) broadcastForRoots(bot *tgbotapi.BotAPI, message string) {\n\tfor _, user := range users.list {\n\t\tif user.IsRoot && user.PrivateChatID > 0 {\n\t\t\tbot.SendMessage(tgbotapi.NewMessage(user.PrivateChatID, message))\n\t\t}\n\t}\n}\n\n\/\/ Format user name\nfunc (users Users) String(user_id int) string {\n\tresult := fmt.Sprintf(\"%s %s\", users.list[user_id].FirstName, users.list[user_id].LastName)\n\tif users.list[user_id].UserName != \"\" {\n\t\tresult += fmt.Sprintf(\" (@%s)\", users.list[user_id].UserName)\n\t}\n\treturn result\n}\n\n\/\/ generate random code for authorize user\nfunc getRandomCode() string {\n\tbuffer := make([]byte, CODE_BYTES_LENGTH)\n\t_, err := rand.Read(buffer)\n\tif err != nil {\n\t\tlog.Fatal(\"Get code error:\", err)\n\t}\n\n\treturn base64.URLEncoding.EncodeToString(buffer)\n}\n<|endoftext|>"} {"text":"<commit_before>package jsonschema\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"unicode\/utf8\"\n)\n\n\/\/ MaxLength MUST be a non-negative integer.\n\/\/ A string instance is valid against this keyword if its length is less than, or equal to, the value of this keyword.\n\/\/ The length of a string instance is defined as the number of its characters as defined by RFC 7159 [RFC7159].\ntype MaxLength int\n\n\/\/ NewMaxLength allocates a new MaxLength validator\nfunc NewMaxLength() Validator {\n\treturn new(MaxLength)\n}\n\n\/\/ Validate implements the Validator interface for MaxLength\nfunc (m MaxLength) Validate(propPath string, data interface{}, errs *[]ValError) {\n\tif str, ok := data.(string); ok {\n\t\tif utf8.RuneCountInString(str) > int(m) {\n\t\t\tAddError(errs, propPath, data, fmt.Sprintf(\"max length of %d characters exceeded: %s\", m, str))\n\t\t}\n\t}\n}\n\n\/\/ MinLength MUST be a non-negative integer.\n\/\/ A string instance is valid against this keyword if its length is greater than, or equal to, the value of this keyword.\n\/\/ The length of a string instance is defined as the number of its characters as defined by RFC 7159 [RFC7159].\n\/\/ Omitting this keyword has the same behavior as a value of 0.\ntype MinLength int\n\n\/\/ NewMinLength allocates a new MinLength validator\nfunc NewMinLength() Validator {\n\treturn new(MinLength)\n}\n\n\/\/ Validate implements the Validator interface for MinLength\nfunc (m MinLength) Validate(propPath string, data interface{}, errs *[]ValError) {\n\tif str, ok := data.(string); ok {\n\t\tif utf8.RuneCountInString(str) < int(m) {\n\t\t\tAddError(errs, propPath, data, fmt.Sprintf(\"min length of %d characters required: %s\", m, str))\n\t\t}\n\t}\n}\n\n\/\/ Pattern MUST be a string. This string SHOULD be a valid regular expression,\n\/\/ according to the ECMA 262 regular expression dialect.\n\/\/ A string instance is considered valid if the regular expression matches the instance successfully.\n\/\/ Recall: regular expressions are not implicitly anchored.\ntype Pattern regexp.Regexp\n\n\/\/ NewPattern allocates a new Pattern validator\nfunc NewPattern() Validator {\n\treturn &Pattern{}\n}\n\n\/\/ Validate implements the Validator interface for Pattern\nfunc (p Pattern) Validate(propPath string, data interface{}, errs *[]ValError) {\n\tre := regexp.Regexp(p)\n\tif str, ok := data.(string); ok {\n\t\tif !re.Match([]byte(str)) {\n\t\t\tAddError(errs, propPath, data, fmt.Sprintf(\"regexp pattrn %s mismatch on string: %s\", re.String(), str))\n\t\t}\n\t}\n}\n\n\/\/ UnmarshalJSON implements the json.Unmarshaler interface for Pattern\nfunc (p *Pattern) UnmarshalJSON(data []byte) error {\n\tvar str string\n\tif err := json.Unmarshal(data, &str); err != nil {\n\t\treturn err\n\t}\n\n\tptn, err := regexp.Compile(str)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*p = Pattern(*ptn)\n\treturn nil\n}\n\n\/\/ MarshalJSON implements json.Marshaler for Pattern\nfunc (p Pattern) MarshalJSON() ([]byte, error) {\n\tre := regexp.Regexp(p)\n\trep := &re\n\treturn json.Marshal(rep.String())\n}\n<commit_msg>fix: Typo (#52)<commit_after>package jsonschema\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"unicode\/utf8\"\n)\n\n\/\/ MaxLength MUST be a non-negative integer.\n\/\/ A string instance is valid against this keyword if its length is less than, or equal to, the value of this keyword.\n\/\/ The length of a string instance is defined as the number of its characters as defined by RFC 7159 [RFC7159].\ntype MaxLength int\n\n\/\/ NewMaxLength allocates a new MaxLength validator\nfunc NewMaxLength() Validator {\n\treturn new(MaxLength)\n}\n\n\/\/ Validate implements the Validator interface for MaxLength\nfunc (m MaxLength) Validate(propPath string, data interface{}, errs *[]ValError) {\n\tif str, ok := data.(string); ok {\n\t\tif utf8.RuneCountInString(str) > int(m) {\n\t\t\tAddError(errs, propPath, data, fmt.Sprintf(\"max length of %d characters exceeded: %s\", m, str))\n\t\t}\n\t}\n}\n\n\/\/ MinLength MUST be a non-negative integer.\n\/\/ A string instance is valid against this keyword if its length is greater than, or equal to, the value of this keyword.\n\/\/ The length of a string instance is defined as the number of its characters as defined by RFC 7159 [RFC7159].\n\/\/ Omitting this keyword has the same behavior as a value of 0.\ntype MinLength int\n\n\/\/ NewMinLength allocates a new MinLength validator\nfunc NewMinLength() Validator {\n\treturn new(MinLength)\n}\n\n\/\/ Validate implements the Validator interface for MinLength\nfunc (m MinLength) Validate(propPath string, data interface{}, errs *[]ValError) {\n\tif str, ok := data.(string); ok {\n\t\tif utf8.RuneCountInString(str) < int(m) {\n\t\t\tAddError(errs, propPath, data, fmt.Sprintf(\"min length of %d characters required: %s\", m, str))\n\t\t}\n\t}\n}\n\n\/\/ Pattern MUST be a string. This string SHOULD be a valid regular expression,\n\/\/ according to the ECMA 262 regular expression dialect.\n\/\/ A string instance is considered valid if the regular expression matches the instance successfully.\n\/\/ Recall: regular expressions are not implicitly anchored.\ntype Pattern regexp.Regexp\n\n\/\/ NewPattern allocates a new Pattern validator\nfunc NewPattern() Validator {\n\treturn &Pattern{}\n}\n\n\/\/ Validate implements the Validator interface for Pattern\nfunc (p Pattern) Validate(propPath string, data interface{}, errs *[]ValError) {\n\tre := regexp.Regexp(p)\n\tif str, ok := data.(string); ok {\n\t\tif !re.Match([]byte(str)) {\n\t\t\tAddError(errs, propPath, data, fmt.Sprintf(\"regexp pattern %s mismatch on string: %s\", re.String(), str))\n\t\t}\n\t}\n}\n\n\/\/ UnmarshalJSON implements the json.Unmarshaler interface for Pattern\nfunc (p *Pattern) UnmarshalJSON(data []byte) error {\n\tvar str string\n\tif err := json.Unmarshal(data, &str); err != nil {\n\t\treturn err\n\t}\n\n\tptn, err := regexp.Compile(str)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*p = Pattern(*ptn)\n\treturn nil\n}\n\n\/\/ MarshalJSON implements json.Marshaler for Pattern\nfunc (p Pattern) MarshalJSON() ([]byte, error) {\n\tre := regexp.Regexp(p)\n\trep := &re\n\treturn json.Marshal(rep.String())\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n)\n\nvar maxSize = flag.Int64(\"maxSize\", 20971520, \"the maximum size in bytes a user is allowed to upload\")\nvar addr = flag.String(\"addr\", \"localhost:8000\", \"host:port format IP address to listen on\")\nvar subpath = flag.String(\"subpath\", \"\/\", \"configure a subdirectory, for use with a reverse proxy (example: .\/9000server -subpath=\/image9000\/)\")\nvar logrequests = flag.Bool(\"logrequests\", false, \"print all HTTP requests to stdout\")\n\nvar acceptedfmt = map[string]string{\n\t\"image\/jpeg\": \"jpg\",\n\t\"image\/png\": \"png\",\n\t\"image\/gif\": \"gif\",\n\t\"video\/webm\": \"webm\",\n\t\"video\/x-matroska\": \"mkv\",\n\t\"video\/ogg\": \"ogv\",\n\t\"application\/ogg\": \"ogg\",\n\t\"audio\/ogg\": \"ogg\",\n\t\"audio\/mp3\": \"mp3\",\n}\n\nfunc CreateFileId(bt []byte, ext string) string {\n\tbytes := sha256.Sum256(bt)\n\tsha := hex.EncodeToString(bytes[:])\n\treturn sha[:14] + \".\" + ext\n}\n\nfunc Log(handler http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif *logrequests {\n\t\t\tfmt.Printf(\"%s (%s) %s %s\\n\", r.RemoteAddr, r.UserAgent(), r.Method, r.URL)\n\t\t}\n\t\thandler.ServeHTTP(w, r)\n\t})\n}\n\nfunc UploadHandler(rw http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tr.ParseMultipartForm(*maxSize)\n\t\tfile, _, _ := r.FormFile(\"file\")\n\t\tif r.ContentLength > *maxSize {\n\t\t\thttp.Error(rw, \"File Too big!\", http.StatusRequestEntityTooLarge)\n\t\t\treturn\n\t\t}\n\n\t\tImageReader := bufio.NewReader(file)\n\t\tImageBuffer := make([]byte, r.ContentLength)\n\t\t_, err := ImageReader.Read(ImageBuffer)\n\t\tif err != nil {\n\t\t\thttp.Error(rw, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tImageType := http.DetectContentType(ImageBuffer)\n\n\t\tif acceptedfmt[ImageType] != \"\" {\n\t\t\tid := CreateFileId(ImageBuffer, acceptedfmt[ImageType])\n\t\t\tif _, err := os.Stat(\"web\/img\/\" + id); os.IsNotExist(err) {\n\t\t\t\terr = ioutil.WriteFile(\"web\/img\/\"+id, ImageBuffer, 0666)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\thttp.Redirect(rw, r, *subpath+\"img\/\"+id, 301)\n\t\t\t} else {\n\t\t\t\thttp.Redirect(rw, r, \"https:\/\/www.youtube.com\/watch?v=dQw4w9WgXcQ\", 301)\n\t\t\t}\n\t\t} else {\n\t\t\thttp.Error(rw, fmt.Sprintf(\"File type (%s) not supported.\", ImageType), http.StatusBadRequest)\n\t\t}\n\tdefault:\n\t\thttp.Error(rw, \"Bad Request\", http.StatusBadRequest)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif _, err := os.Stat(\"web\/img\"); os.IsNotExist(err) {\n\t\tos.Mkdir(\"web\/img\", 0666)\n\t}\n\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(\"web\")))\n\n\thttp.HandleFunc(\"\/upload\", UploadHandler)\n\thttp.ListenAndServe(*addr, Log(http.DefaultServeMux))\n}\n<commit_msg>fix Content-Length issue<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n)\n\nvar maxSize = flag.Int64(\"maxSize\", 20971520, \"the maximum size in bytes a user is allowed to upload\")\nvar addr = flag.String(\"addr\", \"localhost:8000\", \"host:port format IP address to listen on\")\nvar subpath = flag.String(\"subpath\", \"\/\", \"configure a subdirectory, for use with a reverse proxy (example: .\/9000server -subpath=\/image9000\/)\")\nvar logrequests = flag.Bool(\"logrequests\", false, \"print all HTTP requests to stdout\")\n\nvar acceptedfmt = map[string]string{\n\t\"image\/jpeg\": \"jpg\",\n\t\"image\/png\": \"png\",\n\t\"image\/gif\": \"gif\",\n\t\"video\/webm\": \"webm\",\n\t\"video\/x-matroska\": \"mkv\",\n\t\"video\/ogg\": \"ogv\",\n\t\"application\/ogg\": \"ogg\",\n\t\"audio\/ogg\": \"ogg\",\n\t\"audio\/mp3\": \"mp3\",\n}\n\nfunc CreateFileId(bt []byte, ext string) string {\n\tbytes := sha256.Sum256(bt)\n\tsha := hex.EncodeToString(bytes[:])\n\treturn sha[:14] + \".\" + ext\n}\n\nfunc Log(handler http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif *logrequests {\n\t\t\tfmt.Printf(\"%s (%s) %s %s\\n\", r.RemoteAddr, r.UserAgent(), r.Method, r.URL)\n\t\t}\n\t\thandler.ServeHTTP(w, r)\n\t})\n}\n\nfunc UploadHandler(rw http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tr.ParseMultipartForm(*maxSize)\n\t\tfile, _, _ := r.FormFile(\"file\")\n\n\t\tif r.ContentLength > *maxSize {\n\t\t\thttp.Error(rw, \"File Too big!\", http.StatusRequestEntityTooLarge)\n\t\t\treturn\n\t\t}\n\n\n\t\tvar ImageBuf bytes.Buffer\n\n\t\tio.Copy(&ImageBuf, file)\n\n\t\tImageBuffer := ImageBuf.Bytes()\n\t\tImageType := http.DetectContentType(ImageBuffer)\n\n\t\tif acceptedfmt[ImageType] != \"\" {\n\t\t\tid := CreateFileId(ImageBuffer, acceptedfmt[ImageType])\n\t\t\tfmt.Println(\"File id:\", id)\n\t\t\tif _, err := os.Stat(\"web\/img\/\" + id); os.IsNotExist(err) {\n\t\t\t\terr = ioutil.WriteFile(\"web\/img\/\"+id, ImageBuffer, 0666)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\thttp.Redirect(rw, r, *subpath+\"img\/\"+id, 301)\n\t\t\t} else {\n\t\t\t\thttp.Redirect(rw, r, \"https:\/\/www.youtube.com\/watch?v=dQw4w9WgXcQ\", 301)\n\t\t\t}\n\t\t} else {\n\t\t\thttp.Error(rw, fmt.Sprintf(\"File type (%s) not supported.\", ImageType), http.StatusBadRequest)\n\t\t}\n\tdefault:\n\t\thttp.Error(rw, \"Bad Request\", http.StatusBadRequest)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif _, err := os.Stat(\"web\/img\"); os.IsNotExist(err) {\n\t\tos.Mkdir(\"web\/img\", 0666)\n\t}\n\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(\"web\")))\n\n\thttp.HandleFunc(\"\/upload\", UploadHandler)\n\thttp.ListenAndServe(*addr, Log(http.DefaultServeMux))\n}\n<|endoftext|>"} {"text":"<commit_before>package edit\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/elves\/elvish\/edit\/uitypes\"\n\t\"github.com\/elves\/elvish\/util\"\n)\n\n\/\/ Completion subsystem.\n\n\/\/ Interface.\n\ntype completion struct {\n\tcompleter string\n\tbegin, end int\n\tcandidates []*candidate\n\n\tfiltering bool\n\tfilter string\n\tfiltered []*candidate\n\tselected int\n\tfirstShown int\n\tlastShownInFull int\n\theight int\n}\n\nfunc (*completion) Mode() ModeType {\n\treturn modeCompletion\n}\n\nfunc (c *completion) needScrollbar() bool {\n\treturn c.firstShown > 0 || c.lastShownInFull < len(c.filtered)-1\n}\n\nfunc (c *completion) ModeLine() renderer {\n\tml := modeLineRenderer{fmt.Sprintf(\" COMPLETING %s \", c.completer), c.filter}\n\tif !c.needScrollbar() {\n\t\treturn ml\n\t}\n\treturn modeLineWithScrollBarRenderer{ml,\n\t\tlen(c.filtered), c.firstShown, c.lastShownInFull + 1}\n}\n\nfunc (c *completion) CursorOnModeLine() bool {\n\treturn c.filtering\n}\n\nfunc startCompl(ed *Editor) {\n\tstartCompletionInner(ed, false)\n}\n\nfunc complPrefixOrStartCompl(ed *Editor) {\n\tstartCompletionInner(ed, true)\n}\n\nfunc complUp(ed *Editor) {\n\ted.completion.prev(false)\n}\n\nfunc complDown(ed *Editor) {\n\ted.completion.next(false)\n}\n\nfunc complLeft(ed *Editor) {\n\tif c := ed.completion.selected - ed.completion.height; c >= 0 {\n\t\ted.completion.selected = c\n\t}\n}\n\nfunc complRight(ed *Editor) {\n\tif c := ed.completion.selected + ed.completion.height; c < len(ed.completion.filtered) {\n\t\ted.completion.selected = c\n\t}\n}\n\nfunc complDownCycle(ed *Editor) {\n\ted.completion.next(true)\n}\n\n\/\/ acceptCompletion accepts currently selected completion candidate.\nfunc complAccept(ed *Editor) {\n\tc := ed.completion\n\tif 0 <= c.selected && c.selected < len(c.filtered) {\n\t\ted.line, ed.dot = c.apply(ed.line, ed.dot)\n\t}\n\ted.mode = &ed.insert\n}\n\nfunc complDefault(ed *Editor) {\n\tk := ed.lastKey\n\tc := &ed.completion\n\tif c.filtering && likeChar(k) {\n\t\tc.changeFilter(c.filter + string(k.Rune))\n\t} else if c.filtering && k == (uitypes.Key{uitypes.Backspace, 0}) {\n\t\t_, size := utf8.DecodeLastRuneInString(c.filter)\n\t\tif size > 0 {\n\t\t\tc.changeFilter(c.filter[:len(c.filter)-size])\n\t\t}\n\t} else {\n\t\tcomplAccept(ed)\n\t\ted.nextAction = action{typ: reprocessKey}\n\t}\n}\n\nfunc complTriggerFilter(ed *Editor) {\n\tc := &ed.completion\n\tif c.filtering {\n\t\tc.filtering = false\n\t\tc.changeFilter(\"\")\n\t} else {\n\t\tc.filtering = true\n\t}\n}\n\nfunc (comp *completion) selectedCandidate() *candidate {\n\tif comp.selected == -1 {\n\t\treturn &candidate{}\n\t}\n\treturn comp.filtered[comp.selected]\n}\n\n\/\/ apply returns the line and dot after applying a candidate.\nfunc (comp *completion) apply(line string, dot int) (string, int) {\n\ttext := comp.selectedCandidate().text\n\treturn line[:comp.begin] + text + line[comp.end:], comp.begin + len(text)\n}\n\nfunc (c *completion) prev(cycle bool) {\n\tc.selected--\n\tif c.selected == -1 {\n\t\tif cycle {\n\t\t\tc.selected = len(c.filtered) - 1\n\t\t} else {\n\t\t\tc.selected++\n\t\t}\n\t}\n}\n\nfunc (c *completion) next(cycle bool) {\n\tc.selected++\n\tif c.selected == len(c.filtered) {\n\t\tif cycle {\n\t\t\tc.selected = 0\n\t\t} else {\n\t\t\tc.selected--\n\t\t}\n\t}\n}\n\nfunc startCompletionInner(ed *Editor, acceptPrefix bool) {\n\tnode := findLeafNode(ed.chunk, ed.dot)\n\tif node == nil {\n\t\treturn\n\t}\n\n\tc := &completion{begin: -1}\n\tshownError := false\n\tfor _, item := range completers {\n\t\tcompl, err := item.completer(node, ed.evaler)\n\t\tif compl != nil {\n\t\t\tc.completer = item.name\n\t\t\tc.begin, c.end, c.candidates = compl.begin, compl.end, compl.candidates\n\t\t\tc.filtered = c.candidates\n\t\t\tbreak\n\t\t} else if err != nil && err != errCompletionUnapplicable {\n\t\t\ted.addTip(\"%v\", err)\n\t\t\tshownError = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif c.begin < 0 {\n\t\tif !shownError {\n\t\t\ted.addTip(\"unsupported completion :(\")\n\t\t}\n\t\tLogger.Println(\"path to current leaf, leaf first\")\n\t\tfor n := node; n != nil; n = n.Parent() {\n\t\t\tLogger.Printf(\"%T (%d-%d)\", n, n.Begin(), n.End())\n\t\t}\n\t} else if len(c.filtered) == 0 {\n\t\ted.addTip(\"no candidate for %s\", c.completer)\n\t} else {\n\t\tif acceptPrefix {\n\t\t\t\/\/ If there is a non-empty longest common prefix, insert it and\n\t\t\t\/\/ don't start completion mode.\n\t\t\t\/\/\n\t\t\t\/\/ As a special case, when there is exactly one candidate, it is\n\t\t\t\/\/ immeidately accepted.\n\t\t\tprefix := c.filtered[0].text\n\t\t\tfor _, cand := range c.filtered[1:] {\n\t\t\t\tprefix = commonPrefix(prefix, cand.text)\n\t\t\t\tif prefix == \"\" {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif prefix != \"\" && prefix != ed.line[c.begin:c.end] {\n\t\t\t\ted.line = ed.line[:c.begin] + prefix + ed.line[c.end:]\n\t\t\t\ted.dot = c.begin + len(prefix)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\ted.completion = *c\n\t\ted.mode = &ed.completion\n\t}\n}\n\n\/\/ commonPrefix returns the longest common prefix of two strings.\nfunc commonPrefix(s, t string) string {\n\tfor i, r := range s {\n\t\tif i >= len(t) {\n\t\t\treturn s[:i]\n\t\t}\n\t\tr2, _ := utf8.DecodeRuneInString(t[i:])\n\t\tif r2 != r {\n\t\t\treturn s[:i]\n\t\t}\n\t}\n\treturn s\n}\n\nconst (\n\tcompletionColMarginLeft = 1\n\tcompletionColMarginRight = 1\n\tcompletionColMarginTotal = completionColMarginLeft + completionColMarginRight\n)\n\n\/\/ maxWidth finds the maximum wcwidth of display texts of candidates [lo, hi).\n\/\/ hi may be larger than the number of candidates, in which case it is truncated\n\/\/ to the number of candidates.\nfunc (comp *completion) maxWidth(lo, hi int) int {\n\tif hi > len(comp.filtered) {\n\t\thi = len(comp.filtered)\n\t}\n\twidth := 0\n\tfor i := lo; i < hi; i++ {\n\t\tw := util.Wcswidth(comp.filtered[i].display.text)\n\t\tif width < w {\n\t\t\twidth = w\n\t\t}\n\t}\n\treturn width\n}\n\nfunc (comp *completion) ListRender(width, maxHeight int) *buffer {\n\tb := newBuffer(width)\n\tcands := comp.filtered\n\tif len(cands) == 0 {\n\t\tb.writes(util.TrimWcwidth(\"(no result)\", width), \"\")\n\t\treturn b\n\t}\n\tif maxHeight <= 1 || width <= 2 {\n\t\tb.writes(util.TrimWcwidth(\"(terminal too small)\", width), \"\")\n\t\treturn b\n\t}\n\n\t\/\/ Reserve the the rightmost row as margins.\n\twidth -= 1\n\n\t\/\/ Determine comp.height and comp.firstShown.\n\t\/\/ First determine whether all candidates can be fit in the screen,\n\t\/\/ assuming that they are all of maximum width. If that is the case, we use\n\t\/\/ the computed height as the height for the listing, and the first\n\t\/\/ candidate to show is 0. Otherwise, we use min(height, len(cands)) as the\n\t\/\/ height and find the first candidate to show.\n\tperLine := max(1, width\/(comp.maxWidth(0, len(cands))+completionColMarginTotal))\n\theightBound := util.CeilDiv(len(cands), perLine)\n\tfirst := 0\n\theight := 0\n\tif heightBound < maxHeight {\n\t\theight = heightBound\n\t} else {\n\t\theight = min(maxHeight, len(cands))\n\t\t\/\/ Determine the first column to show. We start with the column in which the\n\t\t\/\/ selected one is found, moving to the left until either the width is\n\t\t\/\/ exhausted, or the old value of firstShown has been hit.\n\t\tfirst = comp.selected \/ height * height\n\t\tw := comp.maxWidth(first, first+height) + completionColMarginTotal\n\t\tfor ; first > comp.firstShown; first -= height {\n\t\t\tdw := comp.maxWidth(first-height, first) + completionColMarginTotal\n\t\t\tif w+dw > width {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tw += dw\n\t\t}\n\t}\n\tcomp.height = height\n\tcomp.firstShown = first\n\n\tvar i, j int\n\tremainedWidth := width\n\ttrimmed := false\n\t\/\/ Show the results in columns, until width is exceeded.\n\tfor i = first; i < len(cands); i += height {\n\t\t\/\/ Determine the width of the column (without the margin)\n\t\tcolWidth := comp.maxWidth(i, min(i+height, len(cands)))\n\t\ttotalColWidth := colWidth + completionColMarginTotal\n\t\tif totalColWidth > remainedWidth {\n\t\t\ttotalColWidth = remainedWidth\n\t\t\tcolWidth = totalColWidth - completionColMarginTotal\n\t\t\ttrimmed = true\n\t\t}\n\n\t\tcol := newBuffer(totalColWidth)\n\t\tfor j = i; j < i+height; j++ {\n\t\t\tif j > i {\n\t\t\t\tcol.newline()\n\t\t\t}\n\t\t\tif j >= len(cands) {\n\t\t\t\t\/\/ Write padding to make the listing a rectangle.\n\t\t\t\tcol.writePadding(totalColWidth, styleForCompletion.String())\n\t\t\t} else {\n\t\t\t\tcol.writePadding(completionColMarginLeft, styleForCompletion.String())\n\t\t\t\ts := joinStyles(styleForCompletion, cands[j].display.styles)\n\t\t\t\tif j == comp.selected {\n\t\t\t\t\ts = append(s, styleForSelectedCompletion.String())\n\t\t\t\t}\n\t\t\t\tcol.writes(util.ForceWcwidth(cands[j].display.text, colWidth), s.String())\n\t\t\t\tcol.writePadding(completionColMarginRight, styleForCompletion.String())\n\t\t\t\tif !trimmed {\n\t\t\t\t\tcomp.lastShownInFull = j\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tb.extendHorizontal(col, 0)\n\t\tremainedWidth -= totalColWidth\n\t\tif remainedWidth <= completionColMarginTotal {\n\t\t\tbreak\n\t\t}\n\t}\n\t\/\/ When the listing is incomplete, always use up the entire width.\n\tif remainedWidth > 0 && comp.needScrollbar() {\n\t\tcol := newBuffer(remainedWidth)\n\t\tfor i := 0; i < height; i++ {\n\t\t\tif i > 0 {\n\t\t\t\tcol.newline()\n\t\t\t}\n\t\t\tcol.writePadding(remainedWidth, styleForCompletion.String())\n\t\t}\n\t\tb.extendHorizontal(col, 0)\n\t\tremainedWidth = 0\n\t}\n\treturn b\n}\n\nfunc (c *completion) changeFilter(f string) {\n\tc.filter = f\n\tif f == \"\" {\n\t\tc.filtered = c.candidates\n\t\treturn\n\t}\n\tc.filtered = nil\n\tfor _, cand := range c.candidates {\n\t\tif strings.Contains(cand.display.text, f) {\n\t\t\tc.filtered = append(c.filtered, cand)\n\t\t}\n\t}\n\tif len(c.filtered) > 0 {\n\t\tc.selected = 0\n\t} else {\n\t\tc.selected = -1\n\t}\n}\n<commit_msg>Let edit.completion embed edit.compl.<commit_after>package edit\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/elves\/elvish\/edit\/uitypes\"\n\t\"github.com\/elves\/elvish\/util\"\n)\n\n\/\/ Completion subsystem.\n\n\/\/ Interface.\n\ntype completion struct {\n\tcompl\n\tcompleter string\n\n\tfiltering bool\n\tfilter string\n\tfiltered []*candidate\n\tselected int\n\tfirstShown int\n\tlastShownInFull int\n\theight int\n}\n\nfunc (*completion) Mode() ModeType {\n\treturn modeCompletion\n}\n\nfunc (c *completion) needScrollbar() bool {\n\treturn c.firstShown > 0 || c.lastShownInFull < len(c.filtered)-1\n}\n\nfunc (c *completion) ModeLine() renderer {\n\tml := modeLineRenderer{fmt.Sprintf(\" COMPLETING %s \", c.completer), c.filter}\n\tif !c.needScrollbar() {\n\t\treturn ml\n\t}\n\treturn modeLineWithScrollBarRenderer{ml,\n\t\tlen(c.filtered), c.firstShown, c.lastShownInFull + 1}\n}\n\nfunc (c *completion) CursorOnModeLine() bool {\n\treturn c.filtering\n}\n\nfunc startCompl(ed *Editor) {\n\tstartCompletionInner(ed, false)\n}\n\nfunc complPrefixOrStartCompl(ed *Editor) {\n\tstartCompletionInner(ed, true)\n}\n\nfunc complUp(ed *Editor) {\n\ted.completion.prev(false)\n}\n\nfunc complDown(ed *Editor) {\n\ted.completion.next(false)\n}\n\nfunc complLeft(ed *Editor) {\n\tif c := ed.completion.selected - ed.completion.height; c >= 0 {\n\t\ted.completion.selected = c\n\t}\n}\n\nfunc complRight(ed *Editor) {\n\tif c := ed.completion.selected + ed.completion.height; c < len(ed.completion.filtered) {\n\t\ted.completion.selected = c\n\t}\n}\n\nfunc complDownCycle(ed *Editor) {\n\ted.completion.next(true)\n}\n\n\/\/ acceptCompletion accepts currently selected completion candidate.\nfunc complAccept(ed *Editor) {\n\tc := ed.completion\n\tif 0 <= c.selected && c.selected < len(c.filtered) {\n\t\ted.line, ed.dot = c.apply(ed.line, ed.dot)\n\t}\n\ted.mode = &ed.insert\n}\n\nfunc complDefault(ed *Editor) {\n\tk := ed.lastKey\n\tc := &ed.completion\n\tif c.filtering && likeChar(k) {\n\t\tc.changeFilter(c.filter + string(k.Rune))\n\t} else if c.filtering && k == (uitypes.Key{uitypes.Backspace, 0}) {\n\t\t_, size := utf8.DecodeLastRuneInString(c.filter)\n\t\tif size > 0 {\n\t\t\tc.changeFilter(c.filter[:len(c.filter)-size])\n\t\t}\n\t} else {\n\t\tcomplAccept(ed)\n\t\ted.nextAction = action{typ: reprocessKey}\n\t}\n}\n\nfunc complTriggerFilter(ed *Editor) {\n\tc := &ed.completion\n\tif c.filtering {\n\t\tc.filtering = false\n\t\tc.changeFilter(\"\")\n\t} else {\n\t\tc.filtering = true\n\t}\n}\n\nfunc (comp *completion) selectedCandidate() *candidate {\n\tif comp.selected == -1 {\n\t\treturn &candidate{}\n\t}\n\treturn comp.filtered[comp.selected]\n}\n\n\/\/ apply returns the line and dot after applying a candidate.\nfunc (comp *completion) apply(line string, dot int) (string, int) {\n\ttext := comp.selectedCandidate().text\n\treturn line[:comp.begin] + text + line[comp.end:], comp.begin + len(text)\n}\n\nfunc (c *completion) prev(cycle bool) {\n\tc.selected--\n\tif c.selected == -1 {\n\t\tif cycle {\n\t\t\tc.selected = len(c.filtered) - 1\n\t\t} else {\n\t\t\tc.selected++\n\t\t}\n\t}\n}\n\nfunc (c *completion) next(cycle bool) {\n\tc.selected++\n\tif c.selected == len(c.filtered) {\n\t\tif cycle {\n\t\t\tc.selected = 0\n\t\t} else {\n\t\t\tc.selected--\n\t\t}\n\t}\n}\n\nfunc startCompletionInner(ed *Editor, acceptPrefix bool) {\n\tnode := findLeafNode(ed.chunk, ed.dot)\n\tif node == nil {\n\t\treturn\n\t}\n\n\tc := &completion{}\n\tshownError := false\n\tfor _, item := range completers {\n\t\tcompl, err := item.completer(node, ed.evaler)\n\t\tif compl != nil {\n\t\t\tc.completer = item.name\n\t\t\tc.compl = *compl\n\t\t\tc.filtered = c.candidates\n\t\t\tbreak\n\t\t} else if err != nil && err != errCompletionUnapplicable {\n\t\t\ted.addTip(\"%v\", err)\n\t\t\tshownError = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif c.completer == \"\" {\n\t\tif !shownError {\n\t\t\ted.addTip(\"unsupported completion :(\")\n\t\t}\n\t\tLogger.Println(\"path to current leaf, leaf first\")\n\t\tfor n := node; n != nil; n = n.Parent() {\n\t\t\tLogger.Printf(\"%T (%d-%d)\", n, n.Begin(), n.End())\n\t\t}\n\t} else if len(c.filtered) == 0 {\n\t\ted.addTip(\"no candidate for %s\", c.completer)\n\t} else {\n\t\tif acceptPrefix {\n\t\t\t\/\/ If there is a non-empty longest common prefix, insert it and\n\t\t\t\/\/ don't start completion mode.\n\t\t\t\/\/\n\t\t\t\/\/ As a special case, when there is exactly one candidate, it is\n\t\t\t\/\/ immeidately accepted.\n\t\t\tprefix := c.filtered[0].text\n\t\t\tfor _, cand := range c.filtered[1:] {\n\t\t\t\tprefix = commonPrefix(prefix, cand.text)\n\t\t\t\tif prefix == \"\" {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif prefix != \"\" && prefix != ed.line[c.begin:c.end] {\n\t\t\t\ted.line = ed.line[:c.begin] + prefix + ed.line[c.end:]\n\t\t\t\ted.dot = c.begin + len(prefix)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\ted.completion = *c\n\t\ted.mode = &ed.completion\n\t}\n}\n\n\/\/ commonPrefix returns the longest common prefix of two strings.\nfunc commonPrefix(s, t string) string {\n\tfor i, r := range s {\n\t\tif i >= len(t) {\n\t\t\treturn s[:i]\n\t\t}\n\t\tr2, _ := utf8.DecodeRuneInString(t[i:])\n\t\tif r2 != r {\n\t\t\treturn s[:i]\n\t\t}\n\t}\n\treturn s\n}\n\nconst (\n\tcompletionColMarginLeft = 1\n\tcompletionColMarginRight = 1\n\tcompletionColMarginTotal = completionColMarginLeft + completionColMarginRight\n)\n\n\/\/ maxWidth finds the maximum wcwidth of display texts of candidates [lo, hi).\n\/\/ hi may be larger than the number of candidates, in which case it is truncated\n\/\/ to the number of candidates.\nfunc (comp *completion) maxWidth(lo, hi int) int {\n\tif hi > len(comp.filtered) {\n\t\thi = len(comp.filtered)\n\t}\n\twidth := 0\n\tfor i := lo; i < hi; i++ {\n\t\tw := util.Wcswidth(comp.filtered[i].display.text)\n\t\tif width < w {\n\t\t\twidth = w\n\t\t}\n\t}\n\treturn width\n}\n\nfunc (comp *completion) ListRender(width, maxHeight int) *buffer {\n\tb := newBuffer(width)\n\tcands := comp.filtered\n\tif len(cands) == 0 {\n\t\tb.writes(util.TrimWcwidth(\"(no result)\", width), \"\")\n\t\treturn b\n\t}\n\tif maxHeight <= 1 || width <= 2 {\n\t\tb.writes(util.TrimWcwidth(\"(terminal too small)\", width), \"\")\n\t\treturn b\n\t}\n\n\t\/\/ Reserve the the rightmost row as margins.\n\twidth -= 1\n\n\t\/\/ Determine comp.height and comp.firstShown.\n\t\/\/ First determine whether all candidates can be fit in the screen,\n\t\/\/ assuming that they are all of maximum width. If that is the case, we use\n\t\/\/ the computed height as the height for the listing, and the first\n\t\/\/ candidate to show is 0. Otherwise, we use min(height, len(cands)) as the\n\t\/\/ height and find the first candidate to show.\n\tperLine := max(1, width\/(comp.maxWidth(0, len(cands))+completionColMarginTotal))\n\theightBound := util.CeilDiv(len(cands), perLine)\n\tfirst := 0\n\theight := 0\n\tif heightBound < maxHeight {\n\t\theight = heightBound\n\t} else {\n\t\theight = min(maxHeight, len(cands))\n\t\t\/\/ Determine the first column to show. We start with the column in which the\n\t\t\/\/ selected one is found, moving to the left until either the width is\n\t\t\/\/ exhausted, or the old value of firstShown has been hit.\n\t\tfirst = comp.selected \/ height * height\n\t\tw := comp.maxWidth(first, first+height) + completionColMarginTotal\n\t\tfor ; first > comp.firstShown; first -= height {\n\t\t\tdw := comp.maxWidth(first-height, first) + completionColMarginTotal\n\t\t\tif w+dw > width {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tw += dw\n\t\t}\n\t}\n\tcomp.height = height\n\tcomp.firstShown = first\n\n\tvar i, j int\n\tremainedWidth := width\n\ttrimmed := false\n\t\/\/ Show the results in columns, until width is exceeded.\n\tfor i = first; i < len(cands); i += height {\n\t\t\/\/ Determine the width of the column (without the margin)\n\t\tcolWidth := comp.maxWidth(i, min(i+height, len(cands)))\n\t\ttotalColWidth := colWidth + completionColMarginTotal\n\t\tif totalColWidth > remainedWidth {\n\t\t\ttotalColWidth = remainedWidth\n\t\t\tcolWidth = totalColWidth - completionColMarginTotal\n\t\t\ttrimmed = true\n\t\t}\n\n\t\tcol := newBuffer(totalColWidth)\n\t\tfor j = i; j < i+height; j++ {\n\t\t\tif j > i {\n\t\t\t\tcol.newline()\n\t\t\t}\n\t\t\tif j >= len(cands) {\n\t\t\t\t\/\/ Write padding to make the listing a rectangle.\n\t\t\t\tcol.writePadding(totalColWidth, styleForCompletion.String())\n\t\t\t} else {\n\t\t\t\tcol.writePadding(completionColMarginLeft, styleForCompletion.String())\n\t\t\t\ts := joinStyles(styleForCompletion, cands[j].display.styles)\n\t\t\t\tif j == comp.selected {\n\t\t\t\t\ts = append(s, styleForSelectedCompletion.String())\n\t\t\t\t}\n\t\t\t\tcol.writes(util.ForceWcwidth(cands[j].display.text, colWidth), s.String())\n\t\t\t\tcol.writePadding(completionColMarginRight, styleForCompletion.String())\n\t\t\t\tif !trimmed {\n\t\t\t\t\tcomp.lastShownInFull = j\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tb.extendHorizontal(col, 0)\n\t\tremainedWidth -= totalColWidth\n\t\tif remainedWidth <= completionColMarginTotal {\n\t\t\tbreak\n\t\t}\n\t}\n\t\/\/ When the listing is incomplete, always use up the entire width.\n\tif remainedWidth > 0 && comp.needScrollbar() {\n\t\tcol := newBuffer(remainedWidth)\n\t\tfor i := 0; i < height; i++ {\n\t\t\tif i > 0 {\n\t\t\t\tcol.newline()\n\t\t\t}\n\t\t\tcol.writePadding(remainedWidth, styleForCompletion.String())\n\t\t}\n\t\tb.extendHorizontal(col, 0)\n\t\tremainedWidth = 0\n\t}\n\treturn b\n}\n\nfunc (c *completion) changeFilter(f string) {\n\tc.filter = f\n\tif f == \"\" {\n\t\tc.filtered = c.candidates\n\t\treturn\n\t}\n\tc.filtered = nil\n\tfor _, cand := range c.candidates {\n\t\tif strings.Contains(cand.display.text, f) {\n\t\t\tc.filtered = append(c.filtered, cand)\n\t\t}\n\t}\n\tif len(c.filtered) > 0 {\n\t\tc.selected = 0\n\t} else {\n\t\tc.selected = -1\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package langdet\n\n\/\/ Token represents a text token and its occurence in an analyzed text\ntype Token struct {\n\tOccurrence int\n\tKey string\n}\n\n\/\/ ByOccurrence represents an array of tokens which can be sorted by occurrences of the tokens.\ntype ByOccurrence []Token\n\nfunc (a ByOccurrence) Len() int { return len(a) }\nfunc (a ByOccurrence) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a ByOccurrence) Less(i, j int) bool {\n\tif a[i].Occurrence == a[j].Occurrence {\n\t\treturn a[i].Key < a[i].Key\n\t}\n\treturn a[i].Occurrence < a[j].Occurrence\n}\n\n\/\/ Language represents a language by its name and the profile ( map[token]OccurrenceRank )\ntype Language struct {\n\tProfile map[string]int\n\tName string\n}\n\n\/\/ DetectionResult represents the result from comparing 2 Profiles. It includes the confidence which is basically the\n\/\/ the relative distance between the two profiles.\ntype DetectionResult struct {\n\tName string\n\tConfidence int\n}\n\n\/\/ResByConf represents an array of DetectionResult and can be sorted by Confidence.\ntype ResByConf []DetectionResult\n\nfunc (a ResByConf) Len() int { return len(a) }\nfunc (a ResByConf) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a ResByConf) Less(i, j int) bool { return a[i].Confidence > a[j].Confidence }\n<commit_msg>Fix typo in ngram sorting code.<commit_after>package langdet\n\n\/\/ Token represents a text token and its occurence in an analyzed text\ntype Token struct {\n\tOccurrence int\n\tKey string\n}\n\n\/\/ ByOccurrence represents an array of tokens which can be sorted by occurrences of the tokens.\ntype ByOccurrence []Token\n\nfunc (a ByOccurrence) Len() int { return len(a) }\nfunc (a ByOccurrence) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a ByOccurrence) Less(i, j int) bool {\n\tif a[i].Occurrence == a[j].Occurrence {\n\t\treturn a[i].Key < a[j].Key\n\t}\n\treturn a[i].Occurrence < a[j].Occurrence\n}\n\n\/\/ Language represents a language by its name and the profile ( map[token]OccurrenceRank )\ntype Language struct {\n\tProfile map[string]int\n\tName string\n}\n\n\/\/ DetectionResult represents the result from comparing 2 Profiles. It includes the confidence which is basically the\n\/\/ the relative distance between the two profiles.\ntype DetectionResult struct {\n\tName string\n\tConfidence int\n}\n\n\/\/ResByConf represents an array of DetectionResult and can be sorted by Confidence.\ntype ResByConf []DetectionResult\n\nfunc (a ResByConf) Len() int { return len(a) }\nfunc (a ResByConf) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a ResByConf) Less(i, j int) bool { return a[i].Confidence > a[j].Confidence }\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package certs implements a CertSource which speaks to the public Cloud SQL API endpoint.\npackage certs\n\nimport (\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\tmrand \"math\/rand\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/cloudsql-proxy\/logging\"\n\t\"github.com\/GoogleCloudPlatform\/cloudsql-proxy\/proxy\/util\"\n\t\"golang.org\/x\/oauth2\"\n\t\"google.golang.org\/api\/googleapi\"\n\tsqladmin \"google.golang.org\/api\/sqladmin\/v1beta4\"\n)\n\nconst defaultUserAgent = \"custom cloud_sql_proxy version >= 1.10\"\n\n\/\/ NewCertSource returns a CertSource which can be used to authenticate using\n\/\/ the provided client, which must not be nil.\n\/\/\n\/\/ This function is deprecated; use NewCertSourceOpts instead.\nfunc NewCertSource(host string, c *http.Client, checkRegion bool) *RemoteCertSource {\n\treturn NewCertSourceOpts(c, RemoteOpts{\n\t\tAPIBasePath: host,\n\t\tIgnoreRegion: !checkRegion,\n\t\tUserAgent: defaultUserAgent,\n\t})\n}\n\n\/\/ RemoteOpts are a collection of options for NewCertSourceOpts. All fields are\n\/\/ optional.\ntype RemoteOpts struct {\n\t\/\/ APIBasePath specifies the base path for the sqladmin API. If left blank,\n\t\/\/ the default from the autogenerated sqladmin library is used (which is\n\t\/\/ sufficient for nearly all users)\n\tAPIBasePath string\n\n\t\/\/ IgnoreRegion specifies whether a missing or mismatched region in the\n\t\/\/ instance name should be ignored. In a future version this value will be\n\t\/\/ forced to 'false' by the RemoteCertSource.\n\tIgnoreRegion bool\n\n\t\/\/ A string for the RemoteCertSource to identify itself when contacting the\n\t\/\/ sqladmin API.\n\tUserAgent string\n\n\t\/\/ IP address type options\n\tIPAddrTypeOpts []string\n\n\t\/\/ Enable IAM proxy db authentication\n\tEnableIAMLogin bool\n\n\t\/\/ Token source for token information used in cert creation\n\tTokenSource oauth2.TokenSource\n\n\t\/\/ DelayKeyGenerate, if true, causes the RSA key to be generated lazily\n\t\/\/ on the first connection to a database. The default behavior is to generate\n\t\/\/ the key when the CertSource is created.\n\tDelayKeyGenerate bool\n}\n\n\/\/ NewCertSourceOpts returns a CertSource configured with the provided Opts.\n\/\/ The provided http.Client must not be nil.\n\/\/\n\/\/ Use this function instead of NewCertSource; it has a more forward-compatible\n\/\/ signature.\nfunc NewCertSourceOpts(c *http.Client, opts RemoteOpts) *RemoteCertSource {\n\tserv, err := sqladmin.New(c)\n\tif err != nil {\n\t\tpanic(err) \/\/ Only will happen if the provided client is nil.\n\t}\n\tif opts.APIBasePath != \"\" {\n\t\tserv.BasePath = opts.APIBasePath\n\t}\n\tua := opts.UserAgent\n\tif ua == \"\" {\n\t\tua = defaultUserAgent\n\t}\n\tserv.UserAgent = ua\n\n\t\/\/ Set default value to be \"PUBLIC,PRIVATE\" if not specified\n\tif len(opts.IPAddrTypeOpts) == 0 {\n\t\topts.IPAddrTypeOpts = []string{\"PUBLIC\", \"PRIVATE\"}\n\t}\n\n\t\/\/ Add \"PUBLIC\" as an alias for \"PRIMARY\"\n\tfor index, ipAddressType := range opts.IPAddrTypeOpts {\n\t\tif strings.ToUpper(ipAddressType) == \"PUBLIC\" {\n\t\t\topts.IPAddrTypeOpts[index] = \"PRIMARY\"\n\t\t}\n\t}\n\n\tcertSource := &RemoteCertSource{\n\t\tserv: serv,\n\t\tcheckRegion: !opts.IgnoreRegion,\n\t\tIPAddrTypes: opts.IPAddrTypeOpts,\n\t\tEnableIAMLogin: opts.EnableIAMLogin,\n\t\tTokenSource: opts.TokenSource,\n\t}\n\tif !opts.DelayKeyGenerate {\n\t\t\/\/ Generate the RSA key now, but don't block on it.\n\t\tgo certSource.generateKey()\n\t}\n\n\treturn certSource\n}\n\n\/\/ RemoteCertSource implements a CertSource, using Cloud SQL APIs to\n\/\/ return Local certificates for identifying oneself as a specific user\n\/\/ to the remote instance and Remote certificates for confirming the\n\/\/ remote database's identity.\ntype RemoteCertSource struct {\n\t\/\/ keyOnce is used to create `key` lazily.\n\tkeyOnce sync.Once\n\t\/\/ key is the private key used for certificates returned by Local.\n\tkey *rsa.PrivateKey\n\t\/\/ serv is used to make authenticated API calls to Cloud SQL.\n\tserv *sqladmin.Service\n\t\/\/ If set, providing an incorrect region in their connection string will be\n\t\/\/ treated as an error. This is to provide the same functionality that will\n\t\/\/ occur when API calls require the region.\n\tcheckRegion bool\n\t\/\/ a list of ip address types that users select\n\tIPAddrTypes []string\n\t\/\/ flag to enable IAM proxy db authentication\n\tEnableIAMLogin bool\n\t\/\/ token source for the token information used in cert creation\n\tTokenSource oauth2.TokenSource\n}\n\n\/\/ Constants for backoffAPIRetry. These cause the retry logic to scale the\n\/\/ backoff delay from 200ms to around 3.5s.\nconst (\n\tbaseBackoff = float64(200 * time.Millisecond)\n\tbackoffMult = 1.618\n\tbackoffRetries = 5\n)\n\nfunc backoffAPIRetry(desc, instance string, do func() error) error {\n\tvar err error\n\tfor i := 0; i < backoffRetries; i++ {\n\t\terr = do()\n\t\tgErr, ok := err.(*googleapi.Error)\n\t\tswitch {\n\t\tcase !ok:\n\t\t\t\/\/ 'ok' will also be false if err is nil.\n\t\t\treturn err\n\t\tcase gErr.Code == 403 && len(gErr.Errors) > 0 && gErr.Errors[0].Reason == \"insufficientPermissions\":\n\t\t\t\/\/ The case where the admin API has not yet been enabled.\n\t\t\treturn fmt.Errorf(\"ensure that the Cloud SQL API is enabled for your project (https:\/\/console.cloud.google.com\/flows\/enableapi?apiid=sqladmin). Error during %s %s: %v\", desc, instance, err)\n\t\tcase gErr.Code == 404 || gErr.Code == 403:\n\t\t\treturn fmt.Errorf(\"ensure that the account has access to %q (and make sure there's no typo in that name). Error during %s %s: %v\", instance, desc, instance, err)\n\t\tcase gErr.Code < 500:\n\t\t\t\/\/ Only Server-level HTTP errors are immediately retryable.\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ sleep = baseBackoff * backoffMult^(retries + randomFactor)\n\t\texp := float64(i+1) + mrand.Float64()\n\t\tsleep := time.Duration(baseBackoff * math.Pow(backoffMult, exp))\n\t\tlogging.Errorf(\"Error in %s %s: %v; retrying in %v\", desc, instance, err, sleep)\n\t\ttime.Sleep(sleep)\n\t}\n\treturn err\n}\n\nfunc refreshToken(ts oauth2.TokenSource, tok *oauth2.Token) (*oauth2.Token, error) {\n\texpiredToken := &oauth2.Token{\n\t\tAccessToken: tok.AccessToken,\n\t\tTokenType: tok.TokenType,\n\t\tRefreshToken: tok.RefreshToken,\n\t\tExpiry: time.Time{}.Add(1), \/\/ Expired\n\t}\n\treturn oauth2.ReuseTokenSource(expiredToken, ts).Token()\n}\n\n\/\/ Local returns a certificate that may be used to establish a TLS\n\/\/ connection to the specified instance.\nfunc (s *RemoteCertSource) Local(instance string) (tls.Certificate, error) {\n\tpkix, err := x509.MarshalPKIXPublicKey(s.generateKey().Public())\n\tif err != nil {\n\t\treturn tls.Certificate{}, err\n\t}\n\n\tp, r, n := util.SplitName(instance)\n\tregionName := fmt.Sprintf(\"%s~%s\", r, n)\n\tpubKey := string(pem.EncodeToMemory(&pem.Block{Bytes: pkix, Type: \"RSA PUBLIC KEY\"}))\n\tcreateEphemeralRequest := sqladmin.SslCertsCreateEphemeralRequest{\n\t\tPublicKey: pubKey,\n\t}\n\tvar tok *oauth2.Token\n\t\/\/ If IAM login is enabled, add the OAuth2 token into the ephemeral\n\t\/\/ certificate request.\n\tif s.EnableIAMLogin {\n\t\tvar tokErr error\n\t\ttok, tokErr = s.TokenSource.Token()\n\t\tif tokErr != nil {\n\t\t\treturn tls.Certificate{}, tokErr\n\t\t}\n\t\t\/\/ Always refresh the token to ensure its expiration is far enough in\n\t\t\/\/ the future.\n\t\ttok, tokErr = refreshToken(s.TokenSource, tok)\n\t\tif tokErr != nil {\n\t\t\treturn tls.Certificate{}, tokErr\n\t\t}\n\t\tcreateEphemeralRequest.AccessToken = tok.AccessToken\n\t}\n\treq := s.serv.SslCerts.CreateEphemeral(p, regionName, &createEphemeralRequest)\n\n\tvar data *sqladmin.SslCert\n\terr = backoffAPIRetry(\"createEphemeral for\", instance, func() error {\n\t\tdata, err = req.Do()\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn tls.Certificate{}, err\n\t}\n\n\tc, err := parseCert(data.Cert)\n\tif err != nil {\n\t\treturn tls.Certificate{}, fmt.Errorf(\"couldn't parse ephemeral certificate for instance %q: %v\", instance, err)\n\t}\n\tif s.EnableIAMLogin {\n\t\t\/\/ Adjust the certificate's expiration to be the earlier of tok.Expiry or c.NotAfter\n\t\tif tok.Expiry.Before(c.NotAfter) {\n\t\t\tc.NotAfter = tok.Expiry\n\t\t}\n\t}\n\treturn tls.Certificate{\n\t\tCertificate: [][]byte{c.Raw},\n\t\tPrivateKey: s.generateKey(),\n\t\tLeaf: c,\n\t}, nil\n}\n\nfunc parseCert(pemCert string) (*x509.Certificate, error) {\n\tbl, _ := pem.Decode([]byte(pemCert))\n\tif bl == nil {\n\t\treturn nil, errors.New(\"invalid PEM: \" + pemCert)\n\t}\n\treturn x509.ParseCertificate(bl.Bytes)\n}\n\n\/\/ Return the RSA private key, which is lazily initialized.\nfunc (s *RemoteCertSource) generateKey() *rsa.PrivateKey {\n\ts.keyOnce.Do(func() {\n\t\tstart := time.Now()\n\t\tpkey, err := rsa.GenerateKey(rand.Reader, 2048)\n\t\tif err != nil {\n\t\t\tpanic(err) \/\/ very unexpected.\n\t\t}\n\t\tlogging.Verbosef(\"Generated RSA key in %v\", time.Since(start))\n\t\ts.key = pkey\n\t})\n\treturn s.key\n}\n\n\/\/ Find the first matching IP address by user input IP address types\nfunc (s *RemoteCertSource) findIPAddr(data *sqladmin.DatabaseInstance, instance string) (ipAddrInUse string, err error) {\n\tfor _, eachIPAddrTypeByUser := range s.IPAddrTypes {\n\t\tfor _, eachIPAddrTypeOfInstance := range data.IpAddresses {\n\t\t\tif strings.ToUpper(eachIPAddrTypeOfInstance.Type) == strings.ToUpper(eachIPAddrTypeByUser) {\n\t\t\t\tipAddrInUse = eachIPAddrTypeOfInstance.IpAddress\n\t\t\t\treturn ipAddrInUse, nil\n\t\t\t}\n\t\t}\n\t}\n\n\tipAddrTypesOfInstance := \"\"\n\tfor _, eachIPAddrTypeOfInstance := range data.IpAddresses {\n\t\tipAddrTypesOfInstance += fmt.Sprintf(\"(TYPE=%v, IP_ADDR=%v)\", eachIPAddrTypeOfInstance.Type, eachIPAddrTypeOfInstance.IpAddress)\n\t}\n\n\tipAddrTypeOfUser := fmt.Sprintf(\"%v\", s.IPAddrTypes)\n\n\treturn \"\", fmt.Errorf(\"User input IP address type %v does not match the instance %v, the instance's IP addresses are %v \", ipAddrTypeOfUser, instance, ipAddrTypesOfInstance)\n}\n\n\/\/ Remote returns the specified instance's CA certificate, address, and name.\nfunc (s *RemoteCertSource) Remote(instance string) (cert *x509.Certificate, addr, name, version string, err error) {\n\tp, region, n := util.SplitName(instance)\n\tregionName := fmt.Sprintf(\"%s~%s\", region, n)\n\treq := s.serv.Instances.Get(p, regionName)\n\n\tvar data *sqladmin.DatabaseInstance\n\terr = backoffAPIRetry(\"get instance\", instance, func() error {\n\t\tdata, err = req.Do()\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, \"\", \"\", \"\", err\n\t}\n\n\t\/\/ TODO(chowski): remove this when us-central is removed.\n\tif data.Region == \"us-central\" {\n\t\tdata.Region = \"us-central1\"\n\t}\n\tif data.Region != region {\n\t\tif region == \"\" {\n\t\t\terr = fmt.Errorf(\"instance %v doesn't provide region\", instance)\n\t\t} else {\n\t\t\terr = fmt.Errorf(`for connection string \"%s\": got region %q, want %q`, instance, region, data.Region)\n\t\t}\n\t\tif s.checkRegion {\n\t\t\treturn nil, \"\", \"\", \"\", err\n\t\t}\n\t\tlogging.Errorf(\"%v\", err)\n\t\tlogging.Errorf(\"WARNING: specifying the correct region in an instance string will become required in a future version!\")\n\t}\n\n\tif len(data.IpAddresses) == 0 {\n\t\treturn nil, \"\", \"\", \"\", fmt.Errorf(\"no IP address found for %v\", instance)\n\t}\n\tif data.BackendType == \"FIRST_GEN\" {\n\t\tlogging.Errorf(\"WARNING: proxy client does not support first generation Cloud SQL instances.\")\n\t\treturn nil, \"\", \"\", \"\", fmt.Errorf(\"%q is a first generation instance\", instance)\n\t}\n\n\t\/\/ Find the first matching IP address by user input IP address types\n\tipAddrInUse := \"\"\n\tipAddrInUse, err = s.findIPAddr(data, instance)\n\tif err != nil {\n\t\treturn nil, \"\", \"\", \"\", err\n\t}\n\n\tc, err := parseCert(data.ServerCaCert.Cert)\n\n\treturn c, ipAddrInUse, p + \":\" + n, data.DatabaseVersion, err\n}\n<commit_msg>fix: strip padding from access tokens if present (#851)<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package certs implements a CertSource which speaks to the public Cloud SQL API endpoint.\npackage certs\n\nimport (\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\tmrand \"math\/rand\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/cloudsql-proxy\/logging\"\n\t\"github.com\/GoogleCloudPlatform\/cloudsql-proxy\/proxy\/util\"\n\t\"golang.org\/x\/oauth2\"\n\t\"google.golang.org\/api\/googleapi\"\n\tsqladmin \"google.golang.org\/api\/sqladmin\/v1beta4\"\n)\n\nconst defaultUserAgent = \"custom cloud_sql_proxy version >= 1.10\"\n\n\/\/ NewCertSource returns a CertSource which can be used to authenticate using\n\/\/ the provided client, which must not be nil.\n\/\/\n\/\/ This function is deprecated; use NewCertSourceOpts instead.\nfunc NewCertSource(host string, c *http.Client, checkRegion bool) *RemoteCertSource {\n\treturn NewCertSourceOpts(c, RemoteOpts{\n\t\tAPIBasePath: host,\n\t\tIgnoreRegion: !checkRegion,\n\t\tUserAgent: defaultUserAgent,\n\t})\n}\n\n\/\/ RemoteOpts are a collection of options for NewCertSourceOpts. All fields are\n\/\/ optional.\ntype RemoteOpts struct {\n\t\/\/ APIBasePath specifies the base path for the sqladmin API. If left blank,\n\t\/\/ the default from the autogenerated sqladmin library is used (which is\n\t\/\/ sufficient for nearly all users)\n\tAPIBasePath string\n\n\t\/\/ IgnoreRegion specifies whether a missing or mismatched region in the\n\t\/\/ instance name should be ignored. In a future version this value will be\n\t\/\/ forced to 'false' by the RemoteCertSource.\n\tIgnoreRegion bool\n\n\t\/\/ A string for the RemoteCertSource to identify itself when contacting the\n\t\/\/ sqladmin API.\n\tUserAgent string\n\n\t\/\/ IP address type options\n\tIPAddrTypeOpts []string\n\n\t\/\/ Enable IAM proxy db authentication\n\tEnableIAMLogin bool\n\n\t\/\/ Token source for token information used in cert creation\n\tTokenSource oauth2.TokenSource\n\n\t\/\/ DelayKeyGenerate, if true, causes the RSA key to be generated lazily\n\t\/\/ on the first connection to a database. The default behavior is to generate\n\t\/\/ the key when the CertSource is created.\n\tDelayKeyGenerate bool\n}\n\n\/\/ NewCertSourceOpts returns a CertSource configured with the provided Opts.\n\/\/ The provided http.Client must not be nil.\n\/\/\n\/\/ Use this function instead of NewCertSource; it has a more forward-compatible\n\/\/ signature.\nfunc NewCertSourceOpts(c *http.Client, opts RemoteOpts) *RemoteCertSource {\n\tserv, err := sqladmin.New(c)\n\tif err != nil {\n\t\tpanic(err) \/\/ Only will happen if the provided client is nil.\n\t}\n\tif opts.APIBasePath != \"\" {\n\t\tserv.BasePath = opts.APIBasePath\n\t}\n\tua := opts.UserAgent\n\tif ua == \"\" {\n\t\tua = defaultUserAgent\n\t}\n\tserv.UserAgent = ua\n\n\t\/\/ Set default value to be \"PUBLIC,PRIVATE\" if not specified\n\tif len(opts.IPAddrTypeOpts) == 0 {\n\t\topts.IPAddrTypeOpts = []string{\"PUBLIC\", \"PRIVATE\"}\n\t}\n\n\t\/\/ Add \"PUBLIC\" as an alias for \"PRIMARY\"\n\tfor index, ipAddressType := range opts.IPAddrTypeOpts {\n\t\tif strings.ToUpper(ipAddressType) == \"PUBLIC\" {\n\t\t\topts.IPAddrTypeOpts[index] = \"PRIMARY\"\n\t\t}\n\t}\n\n\tcertSource := &RemoteCertSource{\n\t\tserv: serv,\n\t\tcheckRegion: !opts.IgnoreRegion,\n\t\tIPAddrTypes: opts.IPAddrTypeOpts,\n\t\tEnableIAMLogin: opts.EnableIAMLogin,\n\t\tTokenSource: opts.TokenSource,\n\t}\n\tif !opts.DelayKeyGenerate {\n\t\t\/\/ Generate the RSA key now, but don't block on it.\n\t\tgo certSource.generateKey()\n\t}\n\n\treturn certSource\n}\n\n\/\/ RemoteCertSource implements a CertSource, using Cloud SQL APIs to\n\/\/ return Local certificates for identifying oneself as a specific user\n\/\/ to the remote instance and Remote certificates for confirming the\n\/\/ remote database's identity.\ntype RemoteCertSource struct {\n\t\/\/ keyOnce is used to create `key` lazily.\n\tkeyOnce sync.Once\n\t\/\/ key is the private key used for certificates returned by Local.\n\tkey *rsa.PrivateKey\n\t\/\/ serv is used to make authenticated API calls to Cloud SQL.\n\tserv *sqladmin.Service\n\t\/\/ If set, providing an incorrect region in their connection string will be\n\t\/\/ treated as an error. This is to provide the same functionality that will\n\t\/\/ occur when API calls require the region.\n\tcheckRegion bool\n\t\/\/ a list of ip address types that users select\n\tIPAddrTypes []string\n\t\/\/ flag to enable IAM proxy db authentication\n\tEnableIAMLogin bool\n\t\/\/ token source for the token information used in cert creation\n\tTokenSource oauth2.TokenSource\n}\n\n\/\/ Constants for backoffAPIRetry. These cause the retry logic to scale the\n\/\/ backoff delay from 200ms to around 3.5s.\nconst (\n\tbaseBackoff = float64(200 * time.Millisecond)\n\tbackoffMult = 1.618\n\tbackoffRetries = 5\n)\n\nfunc backoffAPIRetry(desc, instance string, do func() error) error {\n\tvar err error\n\tfor i := 0; i < backoffRetries; i++ {\n\t\terr = do()\n\t\tgErr, ok := err.(*googleapi.Error)\n\t\tswitch {\n\t\tcase !ok:\n\t\t\t\/\/ 'ok' will also be false if err is nil.\n\t\t\treturn err\n\t\tcase gErr.Code == 403 && len(gErr.Errors) > 0 && gErr.Errors[0].Reason == \"insufficientPermissions\":\n\t\t\t\/\/ The case where the admin API has not yet been enabled.\n\t\t\treturn fmt.Errorf(\"ensure that the Cloud SQL API is enabled for your project (https:\/\/console.cloud.google.com\/flows\/enableapi?apiid=sqladmin). Error during %s %s: %v\", desc, instance, err)\n\t\tcase gErr.Code == 404 || gErr.Code == 403:\n\t\t\treturn fmt.Errorf(\"ensure that the account has access to %q (and make sure there's no typo in that name). Error during %s %s: %v\", instance, desc, instance, err)\n\t\tcase gErr.Code < 500:\n\t\t\t\/\/ Only Server-level HTTP errors are immediately retryable.\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ sleep = baseBackoff * backoffMult^(retries + randomFactor)\n\t\texp := float64(i+1) + mrand.Float64()\n\t\tsleep := time.Duration(baseBackoff * math.Pow(backoffMult, exp))\n\t\tlogging.Errorf(\"Error in %s %s: %v; retrying in %v\", desc, instance, err, sleep)\n\t\ttime.Sleep(sleep)\n\t}\n\treturn err\n}\n\nfunc refreshToken(ts oauth2.TokenSource, tok *oauth2.Token) (*oauth2.Token, error) {\n\texpiredToken := &oauth2.Token{\n\t\tAccessToken: tok.AccessToken,\n\t\tTokenType: tok.TokenType,\n\t\tRefreshToken: tok.RefreshToken,\n\t\tExpiry: time.Time{}.Add(1), \/\/ Expired\n\t}\n\treturn oauth2.ReuseTokenSource(expiredToken, ts).Token()\n}\n\n\/\/ Local returns a certificate that may be used to establish a TLS\n\/\/ connection to the specified instance.\nfunc (s *RemoteCertSource) Local(instance string) (tls.Certificate, error) {\n\tpkix, err := x509.MarshalPKIXPublicKey(s.generateKey().Public())\n\tif err != nil {\n\t\treturn tls.Certificate{}, err\n\t}\n\n\tp, r, n := util.SplitName(instance)\n\tregionName := fmt.Sprintf(\"%s~%s\", r, n)\n\tpubKey := string(pem.EncodeToMemory(&pem.Block{Bytes: pkix, Type: \"RSA PUBLIC KEY\"}))\n\tcreateEphemeralRequest := sqladmin.SslCertsCreateEphemeralRequest{\n\t\tPublicKey: pubKey,\n\t}\n\tvar tok *oauth2.Token\n\t\/\/ If IAM login is enabled, add the OAuth2 token into the ephemeral\n\t\/\/ certificate request.\n\tif s.EnableIAMLogin {\n\t\tvar tokErr error\n\t\ttok, tokErr = s.TokenSource.Token()\n\t\tif tokErr != nil {\n\t\t\treturn tls.Certificate{}, tokErr\n\t\t}\n\t\t\/\/ Always refresh the token to ensure its expiration is far enough in\n\t\t\/\/ the future.\n\t\ttok, tokErr = refreshToken(s.TokenSource, tok)\n\t\tif tokErr != nil {\n\t\t\treturn tls.Certificate{}, tokErr\n\t\t}\n\t\t\/\/ TODO: remove this once issue with OAuth2 Tokens is resolved.\n\t\t\/\/ See https:\/\/github.com\/GoogleCloudPlatform\/cloudsql-proxy\/issues\/852.\n\t\tcreateEphemeralRequest.AccessToken = strings.TrimRight(tok.AccessToken, \".\")\n\t}\n\treq := s.serv.SslCerts.CreateEphemeral(p, regionName, &createEphemeralRequest)\n\n\tvar data *sqladmin.SslCert\n\terr = backoffAPIRetry(\"createEphemeral for\", instance, func() error {\n\t\tdata, err = req.Do()\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn tls.Certificate{}, err\n\t}\n\n\tc, err := parseCert(data.Cert)\n\tif err != nil {\n\t\treturn tls.Certificate{}, fmt.Errorf(\"couldn't parse ephemeral certificate for instance %q: %v\", instance, err)\n\t}\n\tif s.EnableIAMLogin {\n\t\t\/\/ Adjust the certificate's expiration to be the earlier of tok.Expiry or c.NotAfter\n\t\tif tok.Expiry.Before(c.NotAfter) {\n\t\t\tc.NotAfter = tok.Expiry\n\t\t}\n\t}\n\treturn tls.Certificate{\n\t\tCertificate: [][]byte{c.Raw},\n\t\tPrivateKey: s.generateKey(),\n\t\tLeaf: c,\n\t}, nil\n}\n\nfunc parseCert(pemCert string) (*x509.Certificate, error) {\n\tbl, _ := pem.Decode([]byte(pemCert))\n\tif bl == nil {\n\t\treturn nil, errors.New(\"invalid PEM: \" + pemCert)\n\t}\n\treturn x509.ParseCertificate(bl.Bytes)\n}\n\n\/\/ Return the RSA private key, which is lazily initialized.\nfunc (s *RemoteCertSource) generateKey() *rsa.PrivateKey {\n\ts.keyOnce.Do(func() {\n\t\tstart := time.Now()\n\t\tpkey, err := rsa.GenerateKey(rand.Reader, 2048)\n\t\tif err != nil {\n\t\t\tpanic(err) \/\/ very unexpected.\n\t\t}\n\t\tlogging.Verbosef(\"Generated RSA key in %v\", time.Since(start))\n\t\ts.key = pkey\n\t})\n\treturn s.key\n}\n\n\/\/ Find the first matching IP address by user input IP address types\nfunc (s *RemoteCertSource) findIPAddr(data *sqladmin.DatabaseInstance, instance string) (ipAddrInUse string, err error) {\n\tfor _, eachIPAddrTypeByUser := range s.IPAddrTypes {\n\t\tfor _, eachIPAddrTypeOfInstance := range data.IpAddresses {\n\t\t\tif strings.ToUpper(eachIPAddrTypeOfInstance.Type) == strings.ToUpper(eachIPAddrTypeByUser) {\n\t\t\t\tipAddrInUse = eachIPAddrTypeOfInstance.IpAddress\n\t\t\t\treturn ipAddrInUse, nil\n\t\t\t}\n\t\t}\n\t}\n\n\tipAddrTypesOfInstance := \"\"\n\tfor _, eachIPAddrTypeOfInstance := range data.IpAddresses {\n\t\tipAddrTypesOfInstance += fmt.Sprintf(\"(TYPE=%v, IP_ADDR=%v)\", eachIPAddrTypeOfInstance.Type, eachIPAddrTypeOfInstance.IpAddress)\n\t}\n\n\tipAddrTypeOfUser := fmt.Sprintf(\"%v\", s.IPAddrTypes)\n\n\treturn \"\", fmt.Errorf(\"User input IP address type %v does not match the instance %v, the instance's IP addresses are %v \", ipAddrTypeOfUser, instance, ipAddrTypesOfInstance)\n}\n\n\/\/ Remote returns the specified instance's CA certificate, address, and name.\nfunc (s *RemoteCertSource) Remote(instance string) (cert *x509.Certificate, addr, name, version string, err error) {\n\tp, region, n := util.SplitName(instance)\n\tregionName := fmt.Sprintf(\"%s~%s\", region, n)\n\treq := s.serv.Instances.Get(p, regionName)\n\n\tvar data *sqladmin.DatabaseInstance\n\terr = backoffAPIRetry(\"get instance\", instance, func() error {\n\t\tdata, err = req.Do()\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, \"\", \"\", \"\", err\n\t}\n\n\t\/\/ TODO(chowski): remove this when us-central is removed.\n\tif data.Region == \"us-central\" {\n\t\tdata.Region = \"us-central1\"\n\t}\n\tif data.Region != region {\n\t\tif region == \"\" {\n\t\t\terr = fmt.Errorf(\"instance %v doesn't provide region\", instance)\n\t\t} else {\n\t\t\terr = fmt.Errorf(`for connection string \"%s\": got region %q, want %q`, instance, region, data.Region)\n\t\t}\n\t\tif s.checkRegion {\n\t\t\treturn nil, \"\", \"\", \"\", err\n\t\t}\n\t\tlogging.Errorf(\"%v\", err)\n\t\tlogging.Errorf(\"WARNING: specifying the correct region in an instance string will become required in a future version!\")\n\t}\n\n\tif len(data.IpAddresses) == 0 {\n\t\treturn nil, \"\", \"\", \"\", fmt.Errorf(\"no IP address found for %v\", instance)\n\t}\n\tif data.BackendType == \"FIRST_GEN\" {\n\t\tlogging.Errorf(\"WARNING: proxy client does not support first generation Cloud SQL instances.\")\n\t\treturn nil, \"\", \"\", \"\", fmt.Errorf(\"%q is a first generation instance\", instance)\n\t}\n\n\t\/\/ Find the first matching IP address by user input IP address types\n\tipAddrInUse := \"\"\n\tipAddrInUse, err = s.findIPAddr(data, instance)\n\tif err != nil {\n\t\treturn nil, \"\", \"\", \"\", err\n\t}\n\n\tc, err := parseCert(data.ServerCaCert.Cert)\n\n\treturn c, ipAddrInUse, p + \":\" + n, data.DatabaseVersion, err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t_ \"github.com\/bmizerany\/pq\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc NewDbController(user, pass, database string) (c DbController, err error) {\n\tdb, err := sql.Open(\"postgres\", fmt.Sprintf(\"user=%s password=%s dbname=%s host=localhost sslmode=disable\", user, pass, database))\n\tif err == nil {\n\t\tc = DbController{db}\n\t}\n\treturn\n}\n\ntype DbController struct {\n\tdb *sql.DB\n}\n\nfunc (d *DbController) Close() {\n\td.db.Close()\n}\n\nfunc (d *DbController) GetUser(uid int) (u User, err error) {\n\trow := d.db.QueryRow(\n\t\t`SELECT id, username, email\n\t\t FROM auth_user\n\t\t WHERE id = $1;`, uid)\n\terr = row.Scan(&u.Id, &u.username, &u.Email)\n\treturn\n}\n\nfunc (d *DbController) GetUsers() (users []User, err error) {\n\trows, err := d.db.Query(\n\t\t`SELECT id, username, email\n\t\t FROM auth_user;`)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tvar u User\n\t\terr = rows.Scan(&u.Id, &u.username, &u.Email)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tusers = append(users, u)\n\t}\n\treturn\n}\n\nfunc (d *DbController) GetUserRepos(user_id int) ([]GithubRepo, error) {\n\t\/\/ guess as to initial size. Likely very few users following < 10 repos.\n\tvar repos = make([]GithubRepo, 10)\n\n\t\/\/ TODO(justinabrahms): Should really alter the schema such\n\t\/\/ that the join from user <-> repo doesn't go through\n\t\/\/ userprofiles.\n\trows, err := d.db.Query(\n\t\t`SELECT r.id, username, project_name\n\t FROM streamer_repo r\n\t\t JOIN streamer_userprofile_repos upr ON upr.repo_id = r.id \n\t\t JOIN streamer_userprofile up ON up.id = upr.userprofile_id\n WHERE up.user_id = $1;`, user_id)\n\t\/\/ what's a good way to handle this not working?\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tvar username, project_name string\n\tvar pk int\n\tfor rows.Next() {\n\t\terr = rows.Scan(&pk, &username, &project_name)\n\t\tif err != nil {\n\t\t\tlog.Print(\"Error fetching user's repos: \", user_id, err)\n\t\t}\n\t\tvar repo = GithubRepo{pk, username, project_name}\n\t\trepos = append(repos, repo)\n\t}\n\n\treturn repos, err\n}\n\nfunc (d *DbController) GetRepoActivity(repo *GithubRepo, userId int) (activity_list []Activity, err error) {\n\tactivity_list = make([]Activity, 0)\n\trows, err := d.db.Query(\n\t\t`SELECT a.id, a.event_id, a.type, a.created_at, ghu.name, r.username, r.project_name, meta\n\t\t FROM streamer_activity a\n\t\t JOIN streamer_repo r on r.id=a.repo_id\n\t\t JOIN streamer_githubuser ghu on ghu.id=a.user_id\n\t\t JOIN streamer_userprofile_repos upr on r.id=upr.repo_id\n JOIN streamer_userprofile up on up.id=upr.userprofile_id\n\t\t WHERE r.id = $1\n AND up.user_id = $2\n\t\t AND a.created_at > (NOW() - INTERVAL '5 days') -- don't send things more than a few days old. Think, new users who subscribe to rails\/rails\n\t\t AND (upr.last_sent is null -- hasn't been sent at all\n\t\t OR a.created_at > upr.last_sent); -- or hasn't been sent since we've gotten new stuff`,\n\t\trepo.Id, userId)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer rows.Close()\n\n\tvar (\n\t\tpk, github_id, repo_id int\n\t\tactivity_type, username, repo_project, repo_user, meta string\n\t\tcreated_at time.Time\n\t)\n\n\tfor rows.Next() {\n\t\terr = rows.Scan(&pk, &github_id, &activity_type, &created_at, &username, &repo_user, &repo_project, &meta)\n\t\tif err != nil {\n\t\t\tlog.Print(\"Error getting activity for repo: \", repo, err)\n\t\t}\n\n\t\tactivity_list = append(activity_list,\n\t\t\tActivity{pk, github_id, activity_type, created_at, username,\n\t\t\t\tGithubRepo{repo_id, repo_user, repo_project}, meta})\n\t}\n\n\treturn\n}\n\nfunc (d *DbController) MarkUserRepoSent(user User, repos []GithubRepo) (err error) {\n\t\/\/ finds the streamer_userprofile_repo row for the repo \/ user\n\t\/\/ combo, mark its last_sent as now\n\tids := make([]string, 0)\n\tfor _, repo := range repos {\n\t\t\/\/ TODO: why are they 0?\n\t\tif repo.Id != 0 {\n\t\t\tids = append(ids, strconv.FormatInt(int64(repo.Id), 10))\n\t\t}\n\t}\n\n\t\/\/ There is likely a better way to get parameterization, but\n\t\/\/ it wasn't working for me with ?'s.\n\tstr := fmt.Sprintf(\n\t\t`UPDATE streamer_userprofile_repos\n\t\t SET last_sent=NOW()\n\t\t WHERE repo_id IN ($1)`, strings.Join(ids, \",\"))\n\t_, err = d.db.Exec(str)\n\treturn\n}\n<commit_msg>Reverting \"fixing string sub format stuff.\" Revert \"fixing string sub format stuff.\"<commit_after>package main\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t_ \"github.com\/bmizerany\/pq\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc NewDbController(user, pass, database string) (c DbController, err error) {\n\tdb, err := sql.Open(\"postgres\", fmt.Sprintf(\"user=%s password=%s dbname=%s host=localhost sslmode=disable\", user, pass, database))\n\tif err == nil {\n\t\tc = DbController{db}\n\t}\n\treturn\n}\n\ntype DbController struct {\n\tdb *sql.DB\n}\n\nfunc (d *DbController) Close() {\n\td.db.Close()\n}\n\nfunc (d *DbController) GetUser(uid int) (u User, err error) {\n\trow := d.db.QueryRow(\n\t\t`SELECT id, username, email\n\t\t FROM auth_user\n\t\t WHERE id = $1;`, uid)\n\terr = row.Scan(&u.Id, &u.username, &u.Email)\n\treturn\n}\n\nfunc (d *DbController) GetUsers() (users []User, err error) {\n\trows, err := d.db.Query(\n\t\t`SELECT id, username, email\n\t\t FROM auth_user;`)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tvar u User\n\t\terr = rows.Scan(&u.Id, &u.username, &u.Email)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tusers = append(users, u)\n\t}\n\treturn\n}\n\nfunc (d *DbController) GetUserRepos(user_id int) ([]GithubRepo, error) {\n\t\/\/ guess as to initial size. Likely very few users following < 10 repos.\n\tvar repos = make([]GithubRepo, 10)\n\n\t\/\/ TODO(justinabrahms): Should really alter the schema such\n\t\/\/ that the join from user <-> repo doesn't go through\n\t\/\/ userprofiles.\n\trows, err := d.db.Query(\n\t\t`SELECT r.id, username, project_name\n\t FROM streamer_repo r\n\t\t JOIN streamer_userprofile_repos upr ON upr.repo_id = r.id \n\t\t JOIN streamer_userprofile up ON up.id = upr.userprofile_id\n WHERE up.user_id = $1;`, user_id)\n\t\/\/ what's a good way to handle this not working?\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tvar username, project_name string\n\tvar pk int\n\tfor rows.Next() {\n\t\terr = rows.Scan(&pk, &username, &project_name)\n\t\tif err != nil {\n\t\t\tlog.Print(\"Error fetching user's repos: \", user_id, err)\n\t\t}\n\t\tvar repo = GithubRepo{pk, username, project_name}\n\t\trepos = append(repos, repo)\n\t}\n\n\treturn repos, err\n}\n\nfunc (d *DbController) GetRepoActivity(repo *GithubRepo, userId int) (activity_list []Activity, err error) {\n\tactivity_list = make([]Activity, 0)\n\trows, err := d.db.Query(\n\t\t`SELECT a.id, a.event_id, a.type, a.created_at, ghu.name, r.username, r.project_name, meta\n\t\t FROM streamer_activity a\n\t\t JOIN streamer_repo r on r.id=a.repo_id\n\t\t JOIN streamer_githubuser ghu on ghu.id=a.user_id\n\t\t JOIN streamer_userprofile_repos upr on r.id=upr.repo_id\n JOIN streamer_userprofile up on up.id=upr.userprofile_id\n\t\t WHERE r.id = $1\n AND up.user_id = $2\n\t\t AND a.created_at > (NOW() - INTERVAL '5 days') -- don't send things more than a few days old. Think, new users who subscribe to rails\/rails\n\t\t AND (upr.last_sent is null -- hasn't been sent at all\n\t\t OR a.created_at > upr.last_sent); -- or hasn't been sent since we've gotten new stuff`,\n\t\trepo.Id, userId)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer rows.Close()\n\n\tvar (\n\t\tpk, github_id, repo_id int\n\t\tactivity_type, username, repo_project, repo_user, meta string\n\t\tcreated_at time.Time\n\t)\n\n\tfor rows.Next() {\n\t\terr = rows.Scan(&pk, &github_id, &activity_type, &created_at, &username, &repo_user, &repo_project, &meta)\n\t\tif err != nil {\n\t\t\tlog.Print(\"Error getting activity for repo: \", repo, err)\n\t\t}\n\n\t\tactivity_list = append(activity_list,\n\t\t\tActivity{pk, github_id, activity_type, created_at, username,\n\t\t\t\tGithubRepo{repo_id, repo_user, repo_project}, meta})\n\t}\n\n\treturn\n}\n\nfunc (d *DbController) MarkUserRepoSent(user User, repos []GithubRepo) (err error) {\n\t\/\/ finds the streamer_userprofile_repo row for the repo \/ user\n\t\/\/ combo, mark its last_sent as now\n\tids := make([]string, 0)\n\tfor _, repo := range repos {\n\t\t\/\/ TODO: why are they 0?\n\t\tif repo.Id != 0 {\n\t\t\tids = append(ids, strconv.FormatInt(int64(repo.Id), 10))\n\t\t}\n\t}\n\n\t\/\/ There is likely a better way to get parameterization, but\n\t\/\/ it wasn't working for me with ?'s.\n\tstr := fmt.Sprintf(\n\t\t`UPDATE streamer_userprofile_repos\n\t\t SET last_sent=NOW()\n\t\t WHERE repo_id IN (%s)`, strings.Join(ids, \",\"))\n\t_, err = d.db.Exec(str)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ (c) 2019-2020, Ava Labs, Inc. All rights reserved.\n\/\/ See the file LICENSE for licensing terms.\n\npackage secp256k1fx\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\n\t\"github.com\/ava-labs\/avalanchego\/utils\/constants\"\n\t\"github.com\/ava-labs\/avalanchego\/utils\/formatting\"\n\n\t\"github.com\/ava-labs\/avalanchego\/snow\"\n\n\t\"github.com\/ava-labs\/avalanchego\/ids\"\n\t\"github.com\/ava-labs\/avalanchego\/vms\/components\/verify\"\n)\n\nvar (\n\terrNilOutput = errors.New(\"nil output\")\n\terrOutputUnspendable = errors.New(\"output is unspendable\")\n\terrOutputUnoptimized = errors.New(\"output representation should be optimized\")\n\terrAddrsNotSortedUnique = errors.New(\"addresses not sorted and unique\")\n\terrMarshal = errors.New(\"cannot marshal without ctx\")\n\t_ verify.State = &OutputOwners{}\n)\n\n\/\/ OutputOwners ...\ntype OutputOwners struct {\n\tLocktime uint64 `serialize:\"true\" json:\"locktime\"`\n\tThreshold uint32 `serialize:\"true\" json:\"threshold\"`\n\tAddrs []ids.ShortID `serialize:\"true\" json:\"addresses\"`\n\t\/\/ ctx is used in MarshalJSON to convert Addrs into human readable\n\t\/\/ format with ChainID and NetworkID. Unexported because we don't use\n\t\/\/ it outside this object.\n\tctx *snow.Context `serialize:\"false\"`\n}\n\n\/\/ InitCtx assigns the OutputOwners.ctx object to given [ctx] object\n\/\/ Must be called at least once for MarshalJSON to work successfully\nfunc (out *OutputOwners) InitCtx(ctx *snow.Context) {\n\tout.ctx = ctx\n}\n\n\/\/ MarshalJSON marshals OutputOwners as JSON with human readable addresses.\n\/\/ OutputOwners.InitCtx must be called before marshalling this or one of\n\/\/ the parent objects to json. Uses the OutputOwners.ctx method to format\n\/\/ the addresses. Returns errMarshal error if OutputOwners.ctx is not set.\nfunc (out *OutputOwners) MarshalJSON() ([]byte, error) {\n\taddrsLen := len(out.Addrs)\n\n\t\/\/ we need out.ctx to do this, if its absent, throw error\n\tif addrsLen > 0 && out.ctx == nil {\n\t\treturn nil, errMarshal\n\t}\n\n\tresult := make(map[string]interface{})\n\tresult[\"locktime\"] = out.Locktime\n\tresult[\"threshold\"] = out.Threshold\n\n\taddresses := make([]string, addrsLen)\n\tfor i, n := 0, addrsLen; i < n; i++ {\n\t\t\/\/ for each [addr] in [Addrs] we attempt to format it given\n\t\t\/\/ the [out.ctx] object\n\t\taddr := out.Addrs[i]\n\t\tfAddr, err := FormatAddress(out.ctx, addr)\n\t\tif err != nil {\n\t\t\t\/\/ we expect these addresses to be valid, return error\n\t\t\t\/\/ if they are not\n\t\t\treturn nil, err\n\t\t}\n\t\taddresses[i] = fAddr\n\t}\n\tresult[\"addresses\"] = addresses\n\n\treturn json.Marshal(result)\n}\n\n\/\/ Addresses returns the addresses that manage this output\nfunc (out *OutputOwners) Addresses() [][]byte {\n\taddrs := make([][]byte, len(out.Addrs))\n\tfor i, addr := range out.Addrs {\n\t\taddrs[i] = addr.Bytes()\n\t}\n\treturn addrs\n}\n\n\/\/ AddressesSet returns addresses as a set\nfunc (out *OutputOwners) AddressesSet() ids.ShortSet {\n\tset := ids.NewShortSet(len(out.Addrs))\n\tset.Add(out.Addrs...)\n\treturn set\n}\n\n\/\/ Equals returns true if the provided owners create the same condition\nfunc (out *OutputOwners) Equals(other *OutputOwners) bool {\n\tif out == other {\n\t\treturn true\n\t}\n\tif out == nil || other == nil || out.Locktime != other.Locktime || out.Threshold != other.Threshold || len(out.Addrs) != len(other.Addrs) {\n\t\treturn false\n\t}\n\tfor i, addr := range out.Addrs {\n\t\totherAddr := other.Addrs[i]\n\t\tif addr != otherAddr {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Verify ...\nfunc (out *OutputOwners) Verify() error {\n\tswitch {\n\tcase out == nil:\n\t\treturn errNilOutput\n\tcase out.Threshold > uint32(len(out.Addrs)):\n\t\treturn errOutputUnspendable\n\tcase out.Threshold == 0 && len(out.Addrs) > 0:\n\t\treturn errOutputUnoptimized\n\tcase !ids.IsSortedAndUniqueShortIDs(out.Addrs):\n\t\treturn errAddrsNotSortedUnique\n\tdefault:\n\t\treturn nil\n\t}\n}\n\n\/\/ VerifyState ...\nfunc (out *OutputOwners) VerifyState() error { return out.Verify() }\n\n\/\/ Sort ...\nfunc (out *OutputOwners) Sort() { ids.SortShortIDs(out.Addrs) }\n\n\/\/ FormatAddress formats a given [addr] into human readable format using\n\/\/ [ChainID] and [NetworkID] from the provided [ctx].\nfunc FormatAddress(ctx *snow.Context, addr ids.ShortID) (string, error) {\n\tchainIDAlias, err := ctx.BCLookup.PrimaryAlias(ctx.ChainID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\thrp := constants.GetHRP(ctx.NetworkID)\n\treturn formatting.FormatAddress(chainIDAlias, hrp, addr.Bytes())\n}\n<commit_msg>directly init map<commit_after>\/\/ (c) 2019-2020, Ava Labs, Inc. All rights reserved.\n\/\/ See the file LICENSE for licensing terms.\n\npackage secp256k1fx\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\n\t\"github.com\/ava-labs\/avalanchego\/utils\/constants\"\n\t\"github.com\/ava-labs\/avalanchego\/utils\/formatting\"\n\n\t\"github.com\/ava-labs\/avalanchego\/snow\"\n\n\t\"github.com\/ava-labs\/avalanchego\/ids\"\n\t\"github.com\/ava-labs\/avalanchego\/vms\/components\/verify\"\n)\n\nvar (\n\terrNilOutput = errors.New(\"nil output\")\n\terrOutputUnspendable = errors.New(\"output is unspendable\")\n\terrOutputUnoptimized = errors.New(\"output representation should be optimized\")\n\terrAddrsNotSortedUnique = errors.New(\"addresses not sorted and unique\")\n\terrMarshal = errors.New(\"cannot marshal without ctx\")\n\t_ verify.State = &OutputOwners{}\n)\n\n\/\/ OutputOwners ...\ntype OutputOwners struct {\n\tLocktime uint64 `serialize:\"true\" json:\"locktime\"`\n\tThreshold uint32 `serialize:\"true\" json:\"threshold\"`\n\tAddrs []ids.ShortID `serialize:\"true\" json:\"addresses\"`\n\t\/\/ ctx is used in MarshalJSON to convert Addrs into human readable\n\t\/\/ format with ChainID and NetworkID. Unexported because we don't use\n\t\/\/ it outside this object.\n\tctx *snow.Context `serialize:\"false\"`\n}\n\n\/\/ InitCtx assigns the OutputOwners.ctx object to given [ctx] object\n\/\/ Must be called at least once for MarshalJSON to work successfully\nfunc (out *OutputOwners) InitCtx(ctx *snow.Context) {\n\tout.ctx = ctx\n}\n\n\/\/ MarshalJSON marshals OutputOwners as JSON with human readable addresses.\n\/\/ OutputOwners.InitCtx must be called before marshalling this or one of\n\/\/ the parent objects to json. Uses the OutputOwners.ctx method to format\n\/\/ the addresses. Returns errMarshal error if OutputOwners.ctx is not set.\nfunc (out *OutputOwners) MarshalJSON() ([]byte, error) {\n\taddrsLen := len(out.Addrs)\n\n\t\/\/ we need out.ctx to do this, if its absent, throw error\n\tif addrsLen > 0 && out.ctx == nil {\n\t\treturn nil, errMarshal\n\t}\n\n\taddresses := make([]string, addrsLen)\n\tfor i, n := 0, addrsLen; i < n; i++ {\n\t\t\/\/ for each [addr] in [Addrs] we attempt to format it given\n\t\t\/\/ the [out.ctx] object\n\t\taddr := out.Addrs[i]\n\t\tfAddr, err := FormatAddress(out.ctx, addr)\n\t\tif err != nil {\n\t\t\t\/\/ we expect these addresses to be valid, return error\n\t\t\t\/\/ if they are not\n\t\t\treturn nil, err\n\t\t}\n\t\taddresses[i] = fAddr\n\t}\n\tresult := map[string]interface{}{\n\t\t\"locktime\": out.Locktime,\n\t\t\"threshold\": out.Threshold,\n\t\t\"addresses\": addresses,\n\t}\n\n\treturn json.Marshal(result)\n}\n\n\/\/ Addresses returns the addresses that manage this output\nfunc (out *OutputOwners) Addresses() [][]byte {\n\taddrs := make([][]byte, len(out.Addrs))\n\tfor i, addr := range out.Addrs {\n\t\taddrs[i] = addr.Bytes()\n\t}\n\treturn addrs\n}\n\n\/\/ AddressesSet returns addresses as a set\nfunc (out *OutputOwners) AddressesSet() ids.ShortSet {\n\tset := ids.NewShortSet(len(out.Addrs))\n\tset.Add(out.Addrs...)\n\treturn set\n}\n\n\/\/ Equals returns true if the provided owners create the same condition\nfunc (out *OutputOwners) Equals(other *OutputOwners) bool {\n\tif out == other {\n\t\treturn true\n\t}\n\tif out == nil || other == nil || out.Locktime != other.Locktime || out.Threshold != other.Threshold || len(out.Addrs) != len(other.Addrs) {\n\t\treturn false\n\t}\n\tfor i, addr := range out.Addrs {\n\t\totherAddr := other.Addrs[i]\n\t\tif addr != otherAddr {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Verify ...\nfunc (out *OutputOwners) Verify() error {\n\tswitch {\n\tcase out == nil:\n\t\treturn errNilOutput\n\tcase out.Threshold > uint32(len(out.Addrs)):\n\t\treturn errOutputUnspendable\n\tcase out.Threshold == 0 && len(out.Addrs) > 0:\n\t\treturn errOutputUnoptimized\n\tcase !ids.IsSortedAndUniqueShortIDs(out.Addrs):\n\t\treturn errAddrsNotSortedUnique\n\tdefault:\n\t\treturn nil\n\t}\n}\n\n\/\/ VerifyState ...\nfunc (out *OutputOwners) VerifyState() error { return out.Verify() }\n\n\/\/ Sort ...\nfunc (out *OutputOwners) Sort() { ids.SortShortIDs(out.Addrs) }\n\n\/\/ FormatAddress formats a given [addr] into human readable format using\n\/\/ [ChainID] and [NetworkID] from the provided [ctx].\nfunc FormatAddress(ctx *snow.Context, addr ids.ShortID) (string, error) {\n\tchainIDAlias, err := ctx.BCLookup.PrimaryAlias(ctx.ChainID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\thrp := constants.GetHRP(ctx.NetworkID)\n\treturn formatting.FormatAddress(chainIDAlias, hrp, addr.Bytes())\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\tflags \"github.com\/jessevdk\/go-flags\"\n\t\"github.com\/mit-dci\/lit\/coinparam\"\n\t\"github.com\/mit-dci\/lit\/litbamf\"\n\t\"github.com\/mit-dci\/lit\/litrpc\"\n\t\"github.com\/mit-dci\/lit\/lnutil\"\n\t\"github.com\/mit-dci\/lit\/qln\"\n)\n\ntype config struct { \/\/ define a struct for usage with go-flags\n\tTn3host string `long:\"tn3\" description:\"Connect to bitcoin testnet3.\"`\n\tBc2host string `long:\"bc2\" description:\"bc2 full node.\"`\n\tLt4host string `long:\"lt4\" description:\"Connect to litecoin testnet4.\"`\n\tReghost string `long:\"reg\" description:\"Connect to bitcoin regtest.\"`\n\tLitereghost string `long:\"litereg\" description:\"Connect to litecoin regtest.\"`\n\tTvtchost string `long:\"tvtc\" description:\"Connect to Vertcoin test node.\"`\n\tVtchost string `long:\"vtc\" description:\"Connect to Vertcoin.\"`\n\tLitHomeDir string `long:\"dir\" description:\"Specify Home Directory of lit as an absolute path.\"`\n\tConfigFile string\n\n\tReSync bool `short:\"r\"long:\"reSync\" description:\"Resync from the given tip.\"`\n\tTower bool `long:\"tower\" description:\"Watchtower: Run a watching node\"`\n\tHard bool `short:\"t\" long:\"hard\" description:\"Flag to set networks.\"`\n\tVerbose bool `short:\"v\" long:\"verbose\" description:\"Set verbosity to true.\"`\n\n\tRpcport uint16 `short:\"p\"long:\"rpcport\" description:\"Set rpcport to connect to.\"`\n\n\tParams *coinparam.Params\n}\n\nvar (\n\tdefaultLitHomeDirName = os.Getenv(\"HOME\") + \"\/.lit\"\n\tdefaultKeyFileName = \"privkey.hex\"\n\tdefaultConfigFilename = \"lit.conf\"\n\tdefaultHomeDir = os.Getenv(\"HOME\")\n\tdefaultConfigFile = filepath.Join(os.Getenv(\"HOME\"), \"\/.lit\/lit.conf\")\n\tdefaultRpcport = uint16(8001)\n)\n\nfunc fileExists(name string) bool {\n\tif _, err := os.Stat(name); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ newConfigParser returns a new command line flags parser.\nfunc newConfigParser(conf *config, options flags.Options) *flags.Parser {\n\tparser := flags.NewParser(conf, options)\n\treturn parser\n}\n\nfunc linkWallets(node *qln.LitNode, key *[32]byte, conf *config) error {\n\t\/\/ for now, wallets are linked to the litnode on startup, and\n\t\/\/ can't appear \/ disappear while it's running. Later\n\t\/\/ could support dynamically adding \/ removing wallets\n\n\t\/\/ order matters; the first registered wallet becomes the default\n\n\tvar err error\n\t\/\/ try regtest\n\tif conf.Reghost != \"\" {\n\t\tp := &coinparam.RegressionNetParams\n\t\tif !strings.Contains(conf.Reghost, \":\") {\n\t\t\tconf.Reghost = conf.Reghost + \":\" + p.DefaultPort\n\t\t}\n\t\tfmt.Printf(\"reg: %s\\n\", conf.Reghost)\n\t\terr = node.LinkBaseWallet(key, 120, conf.ReSync, conf.Tower, conf.Reghost, p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t\/\/ try testnet3\n\tif conf.Tn3host != \"\" {\n\t\tp := &coinparam.TestNet3Params\n\t\tif !strings.Contains(conf.Tn3host, \":\") {\n\t\t\tconf.Tn3host = conf.Tn3host + \":\" + p.DefaultPort\n\t\t}\n\t\terr = node.LinkBaseWallet(\n\t\t\tkey, 1150000, conf.ReSync, conf.Tower,\n\t\t\tconf.Tn3host, p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t\/\/ try litecoin regtest\n\tif conf.Litereghost != \"\" {\n\t\tp := &coinparam.LiteRegNetParams\n\t\tif !strings.Contains(conf.Litereghost, \":\") {\n\t\t\tconf.Litereghost = conf.Litereghost + \":\" + p.DefaultPort\n\t\t}\n\t\terr = node.LinkBaseWallet(key, 120, conf.ReSync, conf.Tower, conf.Litereghost, p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ try litecoin testnet4\n\tif conf.Lt4host != \"\" {\n\t\tp := &coinparam.LiteCoinTestNet4Params\n\t\tif !strings.Contains(conf.Lt4host, \":\") {\n\t\t\tconf.Lt4host = conf.Lt4host + \":\" + p.DefaultPort\n\t\t}\n\t\terr = node.LinkBaseWallet(\n\t\t\tkey, p.StartHeight, conf.ReSync, conf.Tower,\n\t\t\tconf.Lt4host, p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t\/\/ try vertcoin testnet\n\tif conf.Tvtchost != \"\" {\n\t\tp := &coinparam.VertcoinTestNetParams\n\t\tif !strings.Contains(conf.Tvtchost, \":\") {\n\t\t\tconf.Tvtchost = conf.Tvtchost + \":\" + p.DefaultPort\n\t\t}\n\t\terr = node.LinkBaseWallet(\n\t\t\tkey, 0, conf.ReSync, conf.Tower,\n\t\t\tconf.Tvtchost, p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t\/\/ try vertcoin mainnet\n\tif conf.Vtchost != \"\" {\n\t\tp := &coinparam.VertcoinParams\n\t\tif !strings.Contains(conf.Vtchost, \":\") {\n\t\t\tconf.Vtchost = conf.Vtchost + \":\" + p.DefaultPort\n\t\t}\n\t\terr = node.LinkBaseWallet(\n\t\t\tkey, p.StartHeight, conf.ReSync, conf.Tower,\n\t\t\tconf.Vtchost, p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc main() {\n\n\tconf := config{\n\t\tLitHomeDir: defaultLitHomeDirName,\n\t\tConfigFile: defaultConfigFile,\n\t\tRpcport: defaultRpcport,\n\t}\n\n\t\/\/ Pre-parse the command line options to see if an alternative config\n\t\/\/ file or the version flag was specified. Any errors aside from the\n\t\/\/ help message error can be ignored here since they will be caught by\n\t\/\/ the final parse below.\n\tpreconf := conf\n\tpreParser := newConfigParser(&preconf, flags.HelpFlag)\n\t_, err := preParser.Parse()\n\tif err != nil { \/\/ if there is some sort of error while parsing the CLI arguments\n\t\tif e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t\t\/\/ return nil, nil, err\n\t\t}\n\t}\n\n\t\/\/ appName := filepath.Base(os.Args[0])\n\t\/\/ appName = strings.TrimSuffix(appName, filepath.Ext(appName))\n\t\/\/ usageMessage := fmt.Sprintf(\"Use %s -h to show usage\", appName)\n\t\/\/ if preconf.ShowVersion {\n\t\/\/ \tfmt.Println(appName, \"version\", version())\n\t\/\/ \tos.Exit(0)\n\t\/\/ }\n\n\t\/\/ Load additional config from file\n\tvar configFileError error\n\tparser := newConfigParser(&conf, flags.Default) \/\/ Single line command to read all the CLI params passed\n\n\t\/\/ creates a directory in the absolute sense\n\tif _, err := os.Stat(preconf.LitHomeDir); os.IsNotExist(err) {\n\t\tos.Mkdir(preconf.LitHomeDir, 0700)\n\t\tfmt.Println(\"Creating a new config file\")\n\t\terr1 := createDefaultConfigFile(preconf.LitHomeDir) \/\/ Source of error\n\t\tif err1 != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Error creating a \"+\n\t\t\t\t\"default config file: %v\\n\", err)\n\t\t}\n\t}\n\n\tif err != nil {\n\t\tfmt.Println(\"Error while creating a directory\")\n\t\tfmt.Println(err)\n\t}\n\n\tif !(preconf.ConfigFile != defaultConfigFile) {\n\t\t\/\/ passing works fine.\n\t\t\/\/ fmt.Println(\"Watch out\")\n\t\t\/\/ fmt.Println(filepath.Join(preconf.LitHomeDir))\n\t\tif _, err := os.Stat(filepath.Join(filepath.Join(preconf.LitHomeDir), \"lit.conf\")); os.IsNotExist(err) {\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\tfmt.Println(\"Creating a new config file\")\n\t\t\terr1 := createDefaultConfigFile(filepath.Join(preconf.LitHomeDir)) \/\/ Source of error\n\t\t\tif err1 != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Error creating a \"+\n\t\t\t\t\t\"default config file: %v\\n\", err)\n\t\t\t}\n\t\t}\n\t\tpreconf.ConfigFile = filepath.Join(filepath.Join(preconf.LitHomeDir), \"lit.conf\")\n\t\terr := flags.NewIniParser(parser).ParseFile(preconf.ConfigFile) \/\/ lets parse the config file provided, if any\n\t\tif err != nil {\n\t\t\tif _, ok := err.(*os.PathError); !ok {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Error parsing config \"+\n\t\t\t\t\t\"file: %v\\n\", err)\n\t\t\t\t\/\/ fmt.Fprintln(os.Stderr, usageMessage)\n\t\t\t\tlog.Fatal(err)\n\t\t\t\t\/\/ return nil, nil, err\n\t\t\t}\n\t\t\tconfigFileError = err\n\t\t}\n\t}\n\n\t\/\/ Parse command line options again to ensure they take precedence.\n\tremainingArgs, err := parser.Parse() \/\/ no extra work, free overloading.\n\tif err != nil {\n\t\tif e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {\n\t\t\t\/\/ fmt.Fprintln(os.Stderr, usageMessage)\n\t\t}\n\t\tlog.Fatal(err)\n\t\t\/\/ return nil, nil, err\n\t}\n\n\tif configFileError != nil {\n\t\tfmt.Printf(\"%v\", configFileError)\n\t}\n\n\tif remainingArgs != nil {\n\t\t\/\/fmt.Printf(\"%v\", remainingArgs)\n\t}\n\n\tlogFilePath := filepath.Join(conf.LitHomeDir, \"lit.log\")\n\n\tlogfile, err := os.OpenFile(logFilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n\tdefer logfile.Close()\n\n\tlog.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)\n\n\tif conf.Verbose {\n\t\tlogOutput := io.MultiWriter(os.Stdout, logfile)\n\t\tlog.SetOutput(logOutput)\n\t} else {\n\t\tlog.SetOutput(logfile)\n\t}\n\n\t\/\/ Allow node with no linked wallets, for testing.\n\t\/\/ TODO Should update tests and disallow nodes without wallets later.\n\t\/\/\tif conf.Tn3host == \"\" && conf.Lt4host == \"\" && conf.Reghost == \"\" {\n\t\/\/\t\tlog.Fatal(\"error: no network specified; use -tn3, -reg, -lt4\")\n\t\/\/\t}\n\n\t\/\/ Keys: the litNode, and wallits, all get 32 byte keys.\n\t\/\/ Right now though, they all get the *same* key. For lit as a single binary\n\t\/\/ now, all using the same key makes sense; could split up later.\n\n\tkeyFilePath := filepath.Join(conf.LitHomeDir, defaultKeyFileName)\n\n\t\/\/ read key file (generate if not found)\n\tkey, err := lnutil.ReadKeyFile(keyFilePath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Setup LN node. Activate Tower if in hard mode.\n\t\/\/ give node and below file pathof lit home directoy\n\tnode, err := qln.NewLitNode(key, conf.LitHomeDir)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ node is up; link wallets based on args\n\terr = linkWallets(node, key, &conf)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trpcl := new(litrpc.LitRPC)\n\trpcl.Node = node\n\trpcl.OffButton = make(chan bool, 1)\n\n\tlitrpc.RPCListen(rpcl, conf.Rpcport)\n\tlitbamf.BamfListen(conf.Rpcport, conf.LitHomeDir)\n\n\t<-rpcl.OffButton\n\tfmt.Printf(\"Got stop request\\n\")\n\ttime.Sleep(time.Second)\n\n\treturn\n\t\/\/ New directory being created over at PWD\n\t\/\/ conf file being created at \/\n}\n\nfunc createDefaultConfigFile(destinationPath string) error {\n\n\t\/\/ We assume sample config file path is same as binary TODO: change to ~\/.lit\/config\/\n\tpath, err := filepath.Abs(filepath.Dir(os.Args[0]))\n\tif err != nil {\n\t\treturn err\n\t}\n\tsampleConfigPath := filepath.Join(path, defaultConfigFilename)\n\n\t\/\/ We generate a random user and password\n\trandomBytes := make([]byte, 20)\n\t_, err = rand.Read(randomBytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgeneratedRPCUser := base64.StdEncoding.EncodeToString(randomBytes)\n\n\t_, err = rand.Read(randomBytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgeneratedRPCPass := base64.StdEncoding.EncodeToString(randomBytes)\n\n\tsrc, err := os.Open(sampleConfigPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer src.Close()\n\n\tdest, err := os.OpenFile(filepath.Join(destinationPath, defaultConfigFilename),\n\t\tos.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer dest.Close()\n\n\t\/\/ We copy every line from the sample config file to the destination,\n\t\/\/ only replacing the two lines for rpcuser and rpcpass\n\treader := bufio.NewReader(src)\n\tfor err != io.EOF {\n\t\tvar line string\n\t\tline, err = reader.ReadString('\\n')\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn err\n\t\t}\n\n\t\tif strings.Contains(line, \"rpcuser=\") {\n\t\t\tline = \"rpcuser=\" + generatedRPCUser + \"\\n\"\n\t\t} else if strings.Contains(line, \"rpcpass=\") {\n\t\t\tline = \"rpcpass=\" + generatedRPCPass + \"\\n\"\n\t\t}\n\n\t\tif _, err := dest.WriteString(line); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>bump height for testnet3<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\tflags \"github.com\/jessevdk\/go-flags\"\n\t\"github.com\/mit-dci\/lit\/coinparam\"\n\t\"github.com\/mit-dci\/lit\/litbamf\"\n\t\"github.com\/mit-dci\/lit\/litrpc\"\n\t\"github.com\/mit-dci\/lit\/lnutil\"\n\t\"github.com\/mit-dci\/lit\/qln\"\n)\n\ntype config struct { \/\/ define a struct for usage with go-flags\n\tTn3host string `long:\"tn3\" description:\"Connect to bitcoin testnet3.\"`\n\tBc2host string `long:\"bc2\" description:\"bc2 full node.\"`\n\tLt4host string `long:\"lt4\" description:\"Connect to litecoin testnet4.\"`\n\tReghost string `long:\"reg\" description:\"Connect to bitcoin regtest.\"`\n\tLitereghost string `long:\"litereg\" description:\"Connect to litecoin regtest.\"`\n\tTvtchost string `long:\"tvtc\" description:\"Connect to Vertcoin test node.\"`\n\tVtchost string `long:\"vtc\" description:\"Connect to Vertcoin.\"`\n\tLitHomeDir string `long:\"dir\" description:\"Specify Home Directory of lit as an absolute path.\"`\n\tConfigFile string\n\n\tReSync bool `short:\"r\"long:\"reSync\" description:\"Resync from the given tip.\"`\n\tTower bool `long:\"tower\" description:\"Watchtower: Run a watching node\"`\n\tHard bool `short:\"t\" long:\"hard\" description:\"Flag to set networks.\"`\n\tVerbose bool `short:\"v\" long:\"verbose\" description:\"Set verbosity to true.\"`\n\n\tRpcport uint16 `short:\"p\"long:\"rpcport\" description:\"Set rpcport to connect to.\"`\n\n\tParams *coinparam.Params\n}\n\nvar (\n\tdefaultLitHomeDirName = os.Getenv(\"HOME\") + \"\/.lit\"\n\tdefaultKeyFileName = \"privkey.hex\"\n\tdefaultConfigFilename = \"lit.conf\"\n\tdefaultHomeDir = os.Getenv(\"HOME\")\n\tdefaultConfigFile = filepath.Join(os.Getenv(\"HOME\"), \"\/.lit\/lit.conf\")\n\tdefaultRpcport = uint16(8001)\n)\n\nfunc fileExists(name string) bool {\n\tif _, err := os.Stat(name); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ newConfigParser returns a new command line flags parser.\nfunc newConfigParser(conf *config, options flags.Options) *flags.Parser {\n\tparser := flags.NewParser(conf, options)\n\treturn parser\n}\n\nfunc linkWallets(node *qln.LitNode, key *[32]byte, conf *config) error {\n\t\/\/ for now, wallets are linked to the litnode on startup, and\n\t\/\/ can't appear \/ disappear while it's running. Later\n\t\/\/ could support dynamically adding \/ removing wallets\n\n\t\/\/ order matters; the first registered wallet becomes the default\n\n\tvar err error\n\t\/\/ try regtest\n\tif conf.Reghost != \"\" {\n\t\tp := &coinparam.RegressionNetParams\n\t\tif !strings.Contains(conf.Reghost, \":\") {\n\t\t\tconf.Reghost = conf.Reghost + \":\" + p.DefaultPort\n\t\t}\n\t\tfmt.Printf(\"reg: %s\\n\", conf.Reghost)\n\t\terr = node.LinkBaseWallet(key, 120, conf.ReSync, conf.Tower, conf.Reghost, p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t\/\/ try testnet3\n\tif conf.Tn3host != \"\" {\n\t\tp := &coinparam.TestNet3Params\n\t\tif !strings.Contains(conf.Tn3host, \":\") {\n\t\t\tconf.Tn3host = conf.Tn3host + \":\" + p.DefaultPort\n\t\t}\n\t\terr = node.LinkBaseWallet(\n\t\t\tkey, 1210000, conf.ReSync, conf.Tower,\n\t\t\tconf.Tn3host, p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t\/\/ try litecoin regtest\n\tif conf.Litereghost != \"\" {\n\t\tp := &coinparam.LiteRegNetParams\n\t\tif !strings.Contains(conf.Litereghost, \":\") {\n\t\t\tconf.Litereghost = conf.Litereghost + \":\" + p.DefaultPort\n\t\t}\n\t\terr = node.LinkBaseWallet(key, 120, conf.ReSync, conf.Tower, conf.Litereghost, p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ try litecoin testnet4\n\tif conf.Lt4host != \"\" {\n\t\tp := &coinparam.LiteCoinTestNet4Params\n\t\tif !strings.Contains(conf.Lt4host, \":\") {\n\t\t\tconf.Lt4host = conf.Lt4host + \":\" + p.DefaultPort\n\t\t}\n\t\terr = node.LinkBaseWallet(\n\t\t\tkey, p.StartHeight, conf.ReSync, conf.Tower,\n\t\t\tconf.Lt4host, p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t\/\/ try vertcoin testnet\n\tif conf.Tvtchost != \"\" {\n\t\tp := &coinparam.VertcoinTestNetParams\n\t\tif !strings.Contains(conf.Tvtchost, \":\") {\n\t\t\tconf.Tvtchost = conf.Tvtchost + \":\" + p.DefaultPort\n\t\t}\n\t\terr = node.LinkBaseWallet(\n\t\t\tkey, 0, conf.ReSync, conf.Tower,\n\t\t\tconf.Tvtchost, p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t\/\/ try vertcoin mainnet\n\tif conf.Vtchost != \"\" {\n\t\tp := &coinparam.VertcoinParams\n\t\tif !strings.Contains(conf.Vtchost, \":\") {\n\t\t\tconf.Vtchost = conf.Vtchost + \":\" + p.DefaultPort\n\t\t}\n\t\terr = node.LinkBaseWallet(\n\t\t\tkey, p.StartHeight, conf.ReSync, conf.Tower,\n\t\t\tconf.Vtchost, p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc main() {\n\n\tconf := config{\n\t\tLitHomeDir: defaultLitHomeDirName,\n\t\tConfigFile: defaultConfigFile,\n\t\tRpcport: defaultRpcport,\n\t}\n\n\t\/\/ Pre-parse the command line options to see if an alternative config\n\t\/\/ file or the version flag was specified. Any errors aside from the\n\t\/\/ help message error can be ignored here since they will be caught by\n\t\/\/ the final parse below.\n\tpreconf := conf\n\tpreParser := newConfigParser(&preconf, flags.HelpFlag)\n\t_, err := preParser.Parse()\n\tif err != nil { \/\/ if there is some sort of error while parsing the CLI arguments\n\t\tif e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t\t\/\/ return nil, nil, err\n\t\t}\n\t}\n\n\t\/\/ appName := filepath.Base(os.Args[0])\n\t\/\/ appName = strings.TrimSuffix(appName, filepath.Ext(appName))\n\t\/\/ usageMessage := fmt.Sprintf(\"Use %s -h to show usage\", appName)\n\t\/\/ if preconf.ShowVersion {\n\t\/\/ \tfmt.Println(appName, \"version\", version())\n\t\/\/ \tos.Exit(0)\n\t\/\/ }\n\n\t\/\/ Load additional config from file\n\tvar configFileError error\n\tparser := newConfigParser(&conf, flags.Default) \/\/ Single line command to read all the CLI params passed\n\n\t\/\/ creates a directory in the absolute sense\n\tif _, err := os.Stat(preconf.LitHomeDir); os.IsNotExist(err) {\n\t\tos.Mkdir(preconf.LitHomeDir, 0700)\n\t\tfmt.Println(\"Creating a new config file\")\n\t\terr1 := createDefaultConfigFile(preconf.LitHomeDir) \/\/ Source of error\n\t\tif err1 != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Error creating a \"+\n\t\t\t\t\"default config file: %v\\n\", err)\n\t\t}\n\t}\n\n\tif err != nil {\n\t\tfmt.Println(\"Error while creating a directory\")\n\t\tfmt.Println(err)\n\t}\n\n\tif !(preconf.ConfigFile != defaultConfigFile) {\n\t\t\/\/ passing works fine.\n\t\t\/\/ fmt.Println(\"Watch out\")\n\t\t\/\/ fmt.Println(filepath.Join(preconf.LitHomeDir))\n\t\tif _, err := os.Stat(filepath.Join(filepath.Join(preconf.LitHomeDir), \"lit.conf\")); os.IsNotExist(err) {\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\tfmt.Println(\"Creating a new config file\")\n\t\t\terr1 := createDefaultConfigFile(filepath.Join(preconf.LitHomeDir)) \/\/ Source of error\n\t\t\tif err1 != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Error creating a \"+\n\t\t\t\t\t\"default config file: %v\\n\", err)\n\t\t\t}\n\t\t}\n\t\tpreconf.ConfigFile = filepath.Join(filepath.Join(preconf.LitHomeDir), \"lit.conf\")\n\t\terr := flags.NewIniParser(parser).ParseFile(preconf.ConfigFile) \/\/ lets parse the config file provided, if any\n\t\tif err != nil {\n\t\t\tif _, ok := err.(*os.PathError); !ok {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Error parsing config \"+\n\t\t\t\t\t\"file: %v\\n\", err)\n\t\t\t\t\/\/ fmt.Fprintln(os.Stderr, usageMessage)\n\t\t\t\tlog.Fatal(err)\n\t\t\t\t\/\/ return nil, nil, err\n\t\t\t}\n\t\t\tconfigFileError = err\n\t\t}\n\t}\n\n\t\/\/ Parse command line options again to ensure they take precedence.\n\tremainingArgs, err := parser.Parse() \/\/ no extra work, free overloading.\n\tif err != nil {\n\t\tif e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {\n\t\t\t\/\/ fmt.Fprintln(os.Stderr, usageMessage)\n\t\t}\n\t\tlog.Fatal(err)\n\t\t\/\/ return nil, nil, err\n\t}\n\n\tif configFileError != nil {\n\t\tfmt.Printf(\"%v\", configFileError)\n\t}\n\n\tif remainingArgs != nil {\n\t\t\/\/fmt.Printf(\"%v\", remainingArgs)\n\t}\n\n\tlogFilePath := filepath.Join(conf.LitHomeDir, \"lit.log\")\n\n\tlogfile, err := os.OpenFile(logFilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n\tdefer logfile.Close()\n\n\tlog.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)\n\n\tif conf.Verbose {\n\t\tlogOutput := io.MultiWriter(os.Stdout, logfile)\n\t\tlog.SetOutput(logOutput)\n\t} else {\n\t\tlog.SetOutput(logfile)\n\t}\n\n\t\/\/ Allow node with no linked wallets, for testing.\n\t\/\/ TODO Should update tests and disallow nodes without wallets later.\n\t\/\/\tif conf.Tn3host == \"\" && conf.Lt4host == \"\" && conf.Reghost == \"\" {\n\t\/\/\t\tlog.Fatal(\"error: no network specified; use -tn3, -reg, -lt4\")\n\t\/\/\t}\n\n\t\/\/ Keys: the litNode, and wallits, all get 32 byte keys.\n\t\/\/ Right now though, they all get the *same* key. For lit as a single binary\n\t\/\/ now, all using the same key makes sense; could split up later.\n\n\tkeyFilePath := filepath.Join(conf.LitHomeDir, defaultKeyFileName)\n\n\t\/\/ read key file (generate if not found)\n\tkey, err := lnutil.ReadKeyFile(keyFilePath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Setup LN node. Activate Tower if in hard mode.\n\t\/\/ give node and below file pathof lit home directoy\n\tnode, err := qln.NewLitNode(key, conf.LitHomeDir)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ node is up; link wallets based on args\n\terr = linkWallets(node, key, &conf)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trpcl := new(litrpc.LitRPC)\n\trpcl.Node = node\n\trpcl.OffButton = make(chan bool, 1)\n\n\tlitrpc.RPCListen(rpcl, conf.Rpcport)\n\tlitbamf.BamfListen(conf.Rpcport, conf.LitHomeDir)\n\n\t<-rpcl.OffButton\n\tfmt.Printf(\"Got stop request\\n\")\n\ttime.Sleep(time.Second)\n\n\treturn\n\t\/\/ New directory being created over at PWD\n\t\/\/ conf file being created at \/\n}\n\nfunc createDefaultConfigFile(destinationPath string) error {\n\n\t\/\/ We assume sample config file path is same as binary TODO: change to ~\/.lit\/config\/\n\tpath, err := filepath.Abs(filepath.Dir(os.Args[0]))\n\tif err != nil {\n\t\treturn err\n\t}\n\tsampleConfigPath := filepath.Join(path, defaultConfigFilename)\n\n\t\/\/ We generate a random user and password\n\trandomBytes := make([]byte, 20)\n\t_, err = rand.Read(randomBytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgeneratedRPCUser := base64.StdEncoding.EncodeToString(randomBytes)\n\n\t_, err = rand.Read(randomBytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgeneratedRPCPass := base64.StdEncoding.EncodeToString(randomBytes)\n\n\tsrc, err := os.Open(sampleConfigPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer src.Close()\n\n\tdest, err := os.OpenFile(filepath.Join(destinationPath, defaultConfigFilename),\n\t\tos.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer dest.Close()\n\n\t\/\/ We copy every line from the sample config file to the destination,\n\t\/\/ only replacing the two lines for rpcuser and rpcpass\n\treader := bufio.NewReader(src)\n\tfor err != io.EOF {\n\t\tvar line string\n\t\tline, err = reader.ReadString('\\n')\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn err\n\t\t}\n\n\t\tif strings.Contains(line, \"rpcuser=\") {\n\t\t\tline = \"rpcuser=\" + generatedRPCUser + \"\\n\"\n\t\t} else if strings.Contains(line, \"rpcpass=\") {\n\t\t\tline = \"rpcpass=\" + generatedRPCPass + \"\\n\"\n\t\t}\n\n\t\tif _, err := dest.WriteString(line); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\/\/\"strconv\"\n)\n\nfunc path_exists(name string) bool {\n\t_, err := os.Stat(name)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn false\n}\n\nfunc handle_err(err error) {\n\tif err != nil {\n\t\tg_ctx.Errorf(\"%v\\n\", err.Error())\n\t\t\/\/panic(err.Error())\n\n\t\tif g_ctx != nil {\n\t\t\tg_ctx.Exit(1)\n\t\t}\n\t\tos.Exit(1)\n\t}\n}\n\nfunc is_git_repo(dirname string) bool {\n\treturn path_exists(filepath.Join(dirname, \".git\"))\n}\n\nfunc copytree(dstdir, srcdir string) error {\n\tvar err error\n\n\tif !path_exists(dstdir) {\n\t\terr = os.MkdirAll(dstdir, 0755)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = filepath.Walk(srcdir, func(path string, info os.FileInfo, err error) error {\n\t\trel := \"\"\n\t\trel, err = filepath.Rel(srcdir, path)\n\t\tout := filepath.Join(dstdir, rel)\n\t\tfmode := info.Mode()\n\t\tif fmode.IsDir() {\n\t\t\terr = os.MkdirAll(out, fmode.Perm())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if fmode.IsRegular() {\n\t\t\tdst, err := os.OpenFile(out, os.O_CREATE|os.O_RDWR, fmode.Perm())\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tsrc, err := os.Open(path)\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t_, err = io.Copy(dst, src)\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t} else if (fmode & os.ModeSymlink) != 0 {\n\t\t\trlink, err := os.Readlink(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = os.Symlink(rlink, out)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"unhandled mode (%v) for path [%s]\", fmode, path)\n\t\t}\n\t\treturn nil\n\t})\n\treturn err\n}\n\n\/\/ EOF\n<commit_msg>utils.copytree: close destination. return errors<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\/\/\"strconv\"\n)\n\nfunc path_exists(name string) bool {\n\t_, err := os.Stat(name)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn false\n}\n\nfunc handle_err(err error) {\n\tif err != nil {\n\t\tg_ctx.Errorf(\"%v\\n\", err.Error())\n\t\t\/\/panic(err.Error())\n\n\t\tif g_ctx != nil {\n\t\t\tg_ctx.Exit(1)\n\t\t}\n\t\tos.Exit(1)\n\t}\n}\n\nfunc is_git_repo(dirname string) bool {\n\treturn path_exists(filepath.Join(dirname, \".git\"))\n}\n\nfunc copytree(dstdir, srcdir string) error {\n\tvar err error\n\n\tif !path_exists(dstdir) {\n\t\terr = os.MkdirAll(dstdir, 0755)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = filepath.Walk(srcdir, func(path string, info os.FileInfo, err error) error {\n\t\trel := \"\"\n\t\trel, err = filepath.Rel(srcdir, path)\n\t\tout := filepath.Join(dstdir, rel)\n\t\tfmode := info.Mode()\n\t\tif fmode.IsDir() {\n\t\t\terr = os.MkdirAll(out, fmode.Perm())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if fmode.IsRegular() {\n\t\t\tdst, err := os.OpenFile(out, os.O_CREATE|os.O_RDWR, fmode.Perm())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer func() { err = dst.Close() }()\n\t\t\tsrc, err := os.Open(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t_, err = io.Copy(dst, src)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if (fmode & os.ModeSymlink) != 0 {\n\t\t\trlink, err := os.Readlink(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = os.Symlink(rlink, out)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"unhandled mode (%v) for path [%s]\", fmode, path)\n\t\t}\n\t\treturn nil\n\t})\n\treturn err\n}\n\n\/\/ EOF\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/md5\"\n\t\"encoding\/hex\"\n\t\"io\"\n\t\"sort\"\n\n\t\"github.com\/wvanbergen\/kazoo-go\"\n)\n\nfunc getGroupName(url string) string {\n\tm := md5.New()\n\tm.Write([]byte(url))\n\ts := hex.EncodeToString(m.Sum(nil))\n\treturn s\n}\n\nfunc discardBody(r io.Reader) {\n\ttempBuf := make([]byte, 4096)\n\tfor {\n\t\t_, e := r.Read(tempBuf)\n\t\tif e != nil {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc retrievePartitionLeaders(partitions kazoo.PartitionList) (partitionLeaders, error) {\n\n\tpls := make(partitionLeaders, 0, len(partitions))\n\tfor _, partition := range partitions {\n\t\tleader, err := partition.Leader()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpl := partitionLeader{id: partition.ID, leader: leader, partition: partition}\n\t\tpls = append(pls, pl)\n\t}\n\n\treturn pls, nil\n}\n\n\/\/ Divides a set of partitions between a set of consumers.\nfunc dividePartitionsBetweenConsumers(consumers kazoo.ConsumergroupInstanceList, partitions partitionLeaders) map[string][]*kazoo.Partition {\n\tresult := make(map[string][]*kazoo.Partition)\n\n\tplen := len(partitions)\n\tclen := len(consumers)\n\tif clen == 0 {\n\t\treturn result\n\t}\n\n\tsort.Sort(partitions)\n\tsort.Sort(consumers)\n\n\tn := plen \/ clen\n\tm := plen % clen\n\tp := 0\n\tfor i, consumer := range consumers {\n\t\tfirst := p\n\t\tlast := first + n\n\t\tif m > 0 && i < m {\n\t\t\tlast++\n\t\t}\n\t\tif last > plen {\n\t\t\tlast = plen\n\t\t}\n\n\t\tfor _, pl := range partitions[first:last] {\n\t\t\tresult[consumer.ID] = append(result[consumer.ID], pl.partition)\n\t\t}\n\t\tp = last\n\t}\n\n\treturn result\n}\n\ntype partitionLeader struct {\n\tid int32\n\tleader int32\n\tpartition *kazoo.Partition\n}\n\n\/\/ A sortable slice of PartitionLeader structs\ntype partitionLeaders []partitionLeader\n\nfunc (pls partitionLeaders) Len() int {\n\treturn len(pls)\n}\n\nfunc (pls partitionLeaders) Less(i, j int) bool {\n\treturn pls[i].leader < pls[j].leader || (pls[i].leader == pls[j].leader && pls[i].id < pls[j].id)\n}\n\nfunc (s partitionLeaders) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n<commit_msg>Remove discardBody from utils.go<commit_after>package main\n\nimport (\n\t\"crypto\/md5\"\n\t\"encoding\/hex\"\n\t\"sort\"\n\n\t\"github.com\/wvanbergen\/kazoo-go\"\n)\n\nfunc getGroupName(url string) string {\n\tm := md5.New()\n\tm.Write([]byte(url))\n\ts := hex.EncodeToString(m.Sum(nil))\n\treturn s\n}\n\nfunc retrievePartitionLeaders(partitions kazoo.PartitionList) (partitionLeaders, error) {\n\n\tpls := make(partitionLeaders, 0, len(partitions))\n\tfor _, partition := range partitions {\n\t\tleader, err := partition.Leader()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpl := partitionLeader{id: partition.ID, leader: leader, partition: partition}\n\t\tpls = append(pls, pl)\n\t}\n\n\treturn pls, nil\n}\n\n\/\/ Divides a set of partitions between a set of consumers.\nfunc dividePartitionsBetweenConsumers(consumers kazoo.ConsumergroupInstanceList, partitions partitionLeaders) map[string][]*kazoo.Partition {\n\tresult := make(map[string][]*kazoo.Partition)\n\n\tplen := len(partitions)\n\tclen := len(consumers)\n\tif clen == 0 {\n\t\treturn result\n\t}\n\n\tsort.Sort(partitions)\n\tsort.Sort(consumers)\n\n\tn := plen \/ clen\n\tm := plen % clen\n\tp := 0\n\tfor i, consumer := range consumers {\n\t\tfirst := p\n\t\tlast := first + n\n\t\tif m > 0 && i < m {\n\t\t\tlast++\n\t\t}\n\t\tif last > plen {\n\t\t\tlast = plen\n\t\t}\n\n\t\tfor _, pl := range partitions[first:last] {\n\t\t\tresult[consumer.ID] = append(result[consumer.ID], pl.partition)\n\t\t}\n\t\tp = last\n\t}\n\n\treturn result\n}\n\ntype partitionLeader struct {\n\tid int32\n\tleader int32\n\tpartition *kazoo.Partition\n}\n\n\/\/ A sortable slice of PartitionLeader structs\ntype partitionLeaders []partitionLeader\n\nfunc (pls partitionLeaders) Len() int {\n\treturn len(pls)\n}\n\nfunc (pls partitionLeaders) Less(i, j int) bool {\n\treturn pls[i].leader < pls[j].leader || (pls[i].leader == pls[j].leader && pls[i].id < pls[j].id)\n}\n\nfunc (s partitionLeaders) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\tpb \"github.com\/clawio\/service.localstore.meta\/proto\"\n\t\"io\"\n\t\"mime\"\n\t\"os\"\n\t\"path\"\n)\n\nfunc (s *server) getMeta(p string) (*pb.Metadata, error) {\n\tfinfo, err := os.Stat(p)\n\tif err != nil {\n\t\treturn &pb.Metadata{}, err\n\t}\n\n\tm := &pb.Metadata{}\n\tm.Id = \"TODO\"\n\tm.Path = path.Clean(p)\n\tm.Size = uint32(finfo.Size())\n\tm.IsContainer = finfo.IsDir()\n\tm.Modified = uint32(finfo.ModTime().Unix())\n\tm.Etag = \"TODO\"\n\tm.Permissions = 0\n\tm.MimeType = mime.TypeByExtension(path.Ext(m.Path))\n\n\treturn m, nil\n}\n\nfunc copyFile(src, dst string, size int64) (err error) {\n\treader, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer reader.Close()\n\n\twriter, err := os.Create(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer writer.Close()\n\n\t_, err = io.CopyN(writer, reader, size)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc copyDir(src, dst string) (err error) {\n\terr = os.Mkdir(dst, dirPerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdirectory, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer directory.Close()\n\n\tobjects, err := directory.Readdir(-1)\n\n\tfor _, obj := range objects {\n\n\t\t_src := path.Join(src, obj.Name())\n\t\t_dst := path.Join(dst, obj.Name())\n\n\t\tif obj.IsDir() {\n\t\t\t\/\/ create sub-directories - recursively\n\t\t\terr = copyDir(_src, _dst)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ perform copy\n\t\t\terr = copyFile(_src, _dst, obj.Size())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>Default mime to octet-stream<commit_after>package main\n\nimport (\n\tpb \"github.com\/clawio\/service.localstore.meta\/proto\"\n\t\"io\"\n\t\"mime\"\n\t\"os\"\n\t\"path\"\n)\n\nfunc (s *server) getMeta(p string) (*pb.Metadata, error) {\n\tfinfo, err := os.Stat(p)\n\tif err != nil {\n\t\treturn &pb.Metadata{}, err\n\t}\n\n\tm := &pb.Metadata{}\n\tm.Id = \"TODO\"\n\tm.Path = path.Clean(p)\n\tm.Size = uint32(finfo.Size())\n\tm.IsContainer = finfo.IsDir()\n\tm.Modified = uint32(finfo.ModTime().Unix())\n\tm.Etag = \"TODO\"\n\tm.Permissions = 0\n\tm.MimeType = mime.TypeByExtension(path.Ext(m.Path))\n\n\tif m.MimeType == \"\" {\n\t\tm.MimeType = \"application\/octet-stream\"\n\t}\n\n\treturn m, nil\n}\n\nfunc copyFile(src, dst string, size int64) (err error) {\n\treader, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer reader.Close()\n\n\twriter, err := os.Create(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer writer.Close()\n\n\t_, err = io.CopyN(writer, reader, size)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc copyDir(src, dst string) (err error) {\n\terr = os.Mkdir(dst, dirPerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdirectory, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer directory.Close()\n\n\tobjects, err := directory.Readdir(-1)\n\n\tfor _, obj := range objects {\n\n\t\t_src := path.Join(src, obj.Name())\n\t\t_dst := path.Join(dst, obj.Name())\n\n\t\tif obj.IsDir() {\n\t\t\t\/\/ create sub-directories - recursively\n\t\t\terr = copyDir(_src, _dst)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ perform copy\n\t\t\terr = copyFile(_src, _dst, obj.Size())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package citadel\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/samalba\/dockerclient\"\n)\n\n\/\/ ValidateContainer ensures that the required fields are set on the container\nfunc ValidateContainer(c *Container) error {\n\tswitch {\n\tcase c.Cpus == 0:\n\t\treturn fmt.Errorf(\"container cannot have cpus equal to 0\")\n\tcase c.Memory == 0:\n\t\treturn fmt.Errorf(\"container cannot have memory equal to 0\")\n\tcase c.Image == \"\":\n\t\treturn fmt.Errorf(\"container must have an image\")\n\tcase c.Name == \"\":\n\t\treturn fmt.Errorf(\"container must have a name\")\n\tcase c.Type == \"\":\n\t\treturn fmt.Errorf(\"container must have a type\")\n\t}\n\n\treturn nil\n}\n\nfunc asCitadelContainer(container *dockerclient.Container, engine *Engine) (*Container, error) {\n\tinfo, err := engine.client.InspectContainer(container.Id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ports []*Port\n\n\tfor _, port := range container.Ports {\n\t\tp := &Port{\n\t\t\tProto: port.Type,\n\t\t\tPort: port.PublicPort,\n\t\t}\n\t\tports = append(ports, p)\n\t}\n\n\tplacement := &Placement{\n\t\tEngine: engine,\n\t\tInternalIP: info.NetworkSettings.IpAddress,\n\t\tPorts: ports,\n\t}\n\n\tvar (\n\t\tcType = \"\"\n\t\tlabels = []string{}\n\t\tenv = make(map[string]string)\n\t)\n\n\tfor _, e := range info.Config.Env {\n\t\tvals := strings.Split(e, \"=\")\n\t\tk, v := vals[0], vals[1]\n\n\t\tswitch k {\n\t\tcase \"_citadel_type\":\n\t\t\tcType = v\n\t\tcase \"_citadel_labels\":\n\t\t\tlabels = strings.Split(v, \",\")\n\t\tcase \"HOME\", \"DEBIAN_FRONTEND\", \"PATH\":\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tenv[k] = v\n\t\t}\n\t}\n\n\treturn &Container{\n\t\tName: strings.TrimPrefix(info.Name, \"\/\"),\n\t\tImage: container.Image,\n\t\tCpus: float64(info.Config.CpuShares) \/ 100.0 * engine.Cpus,\n\t\tMemory: float64(info.Config.Memory \/ 1024 \/ 1024),\n\t\tEnvironment: env,\n\t\tHostname: info.Config.Hostname,\n\t\tDomainname: info.Config.Domainname,\n\t\tType: cType,\n\t\tLabels: labels,\n\t\tPlacement: placement,\n\t}, nil\n}\n<commit_msg>Add container port to utils<commit_after>package citadel\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/samalba\/dockerclient\"\n)\n\n\/\/ ValidateContainer ensures that the required fields are set on the container\nfunc ValidateContainer(c *Container) error {\n\tswitch {\n\tcase c.Cpus == 0:\n\t\treturn fmt.Errorf(\"container cannot have cpus equal to 0\")\n\tcase c.Memory == 0:\n\t\treturn fmt.Errorf(\"container cannot have memory equal to 0\")\n\tcase c.Image == \"\":\n\t\treturn fmt.Errorf(\"container must have an image\")\n\tcase c.Name == \"\":\n\t\treturn fmt.Errorf(\"container must have a name\")\n\tcase c.Type == \"\":\n\t\treturn fmt.Errorf(\"container must have a type\")\n\t}\n\n\treturn nil\n}\n\nfunc asCitadelContainer(container *dockerclient.Container, engine *Engine) (*Container, error) {\n\tinfo, err := engine.client.InspectContainer(container.Id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ports []*Port\n\tfor _, port := range container.Ports {\n\t\tp := &Port{\n\t\t\tProto: port.Type,\n\t\t\tPort: port.PublicPort,\n\t\t\tContainerPort: port.PrivatePort,\n\t\t}\n\t\tports = append(ports, p)\n\t}\n\n\tplacement := &Placement{\n\t\tEngine: engine,\n\t\tInternalIP: info.NetworkSettings.IpAddress,\n\t\tPorts: ports,\n\t}\n\n\tvar (\n\t\tcType = \"\"\n\t\tlabels = []string{}\n\t\tenv = make(map[string]string)\n\t)\n\n\tfor _, e := range info.Config.Env {\n\t\tvals := strings.Split(e, \"=\")\n\t\tk, v := vals[0], vals[1]\n\n\t\tswitch k {\n\t\tcase \"_citadel_type\":\n\t\t\tcType = v\n\t\tcase \"_citadel_labels\":\n\t\t\tlabels = strings.Split(v, \",\")\n\t\tcase \"HOME\", \"DEBIAN_FRONTEND\", \"PATH\":\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tenv[k] = v\n\t\t}\n\t}\n\n\treturn &Container{\n\t\tName: strings.TrimPrefix(info.Name, \"\/\"),\n\t\tImage: container.Image,\n\t\tCpus: float64(info.Config.CpuShares) \/ 100.0 * engine.Cpus,\n\t\tMemory: float64(info.Config.Memory \/ 1024 \/ 1024),\n\t\tEnvironment: env,\n\t\tHostname: info.Config.Hostname,\n\t\tDomainname: info.Config.Domainname,\n\t\tType: cType,\n\t\tLabels: labels,\n\t\tPlacement: placement,\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package storage\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/containers\/storage\/pkg\/homedir\"\n\t\"github.com\/containers\/storage\/pkg\/idtools\"\n\t\"github.com\/containers\/storage\/pkg\/system\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping\nfunc ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap string) (*IDMappingOptions, error) {\n\toptions := IDMappingOptions{\n\t\tHostUIDMapping: true,\n\t\tHostGIDMapping: true,\n\t}\n\tif subGIDMap == \"\" && subUIDMap != \"\" {\n\t\tsubGIDMap = subUIDMap\n\t}\n\tif subUIDMap == \"\" && subGIDMap != \"\" {\n\t\tsubUIDMap = subGIDMap\n\t}\n\tif len(GIDMapSlice) == 0 && len(UIDMapSlice) != 0 {\n\t\tGIDMapSlice = UIDMapSlice\n\t}\n\tif len(UIDMapSlice) == 0 && len(GIDMapSlice) != 0 {\n\t\tUIDMapSlice = GIDMapSlice\n\t}\n\tif len(UIDMapSlice) == 0 && subUIDMap == \"\" && os.Getuid() != 0 {\n\t\tUIDMapSlice = []string{fmt.Sprintf(\"0:%d:1\", os.Getuid())}\n\t}\n\tif len(GIDMapSlice) == 0 && subGIDMap == \"\" && os.Getuid() != 0 {\n\t\tGIDMapSlice = []string{fmt.Sprintf(\"0:%d:1\", os.Getgid())}\n\t}\n\n\tif subUIDMap != \"\" && subGIDMap != \"\" {\n\t\tmappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to create NewIDMappings for uidmap=%s gidmap=%s\", subUIDMap, subGIDMap)\n\t\t}\n\t\toptions.UIDMap = mappings.UIDs()\n\t\toptions.GIDMap = mappings.GIDs()\n\t}\n\tparsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, \"UID\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create ParseUIDMap UID=%s\", UIDMapSlice)\n\t}\n\tparsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, \"GID\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create ParseGIDMap GID=%s\", UIDMapSlice)\n\t}\n\toptions.UIDMap = append(options.UIDMap, parsedUIDMap...)\n\toptions.GIDMap = append(options.GIDMap, parsedGIDMap...)\n\tif len(options.UIDMap) > 0 {\n\t\toptions.HostUIDMapping = false\n\t}\n\tif len(options.GIDMap) > 0 {\n\t\toptions.HostGIDMapping = false\n\t}\n\treturn &options, nil\n}\n\n\/\/ GetRootlessRuntimeDir returns the runtime directory when running as non root\nfunc GetRootlessRuntimeDir(rootlessUID int) (string, error) {\n\tpath, err := getRootlessRuntimeDir(rootlessUID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tpath = filepath.Join(path, \"containers\")\n\tif err := os.MkdirAll(path, 0700); err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"unable to make rootless runtime dir %s\", path)\n\t}\n\treturn path, nil\n}\n\ntype rootlessRuntimeDirEnvironment interface {\n\tgetProcCommandFile() string\n\tgetRunUserDir() string\n\tgetTmpPerUserDir() string\n\n\thomeDirGetRuntimeDir() (string, error)\n\tsystemLstat(string) (*system.StatT, error)\n\thomedirGet() string\n}\n\ntype rootlessRuntimeDirEnvironmentImplementation struct {\n\tprocCommandFile string\n\trunUserDir string\n\ttmpPerUserDir string\n}\n\nfunc (env rootlessRuntimeDirEnvironmentImplementation) getProcCommandFile() string {\n\treturn env.procCommandFile\n}\nfunc (env rootlessRuntimeDirEnvironmentImplementation) getRunUserDir() string {\n\treturn env.runUserDir\n}\nfunc (env rootlessRuntimeDirEnvironmentImplementation) getTmpPerUserDir() string {\n\treturn env.tmpPerUserDir\n}\nfunc (rootlessRuntimeDirEnvironmentImplementation) homeDirGetRuntimeDir() (string, error) {\n\treturn homedir.GetRuntimeDir()\n}\nfunc (rootlessRuntimeDirEnvironmentImplementation) systemLstat(path string) (*system.StatT, error) {\n\treturn system.Lstat(path)\n}\nfunc (rootlessRuntimeDirEnvironmentImplementation) homedirGet() string {\n\treturn homedir.Get()\n}\n\nfunc isRootlessRuntimeDirOwner(dir string, env rootlessRuntimeDirEnvironment) bool {\n\tst, err := env.systemLstat(dir)\n\treturn err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000\n}\n\n\/\/ getRootlessRuntimeDirIsolated is an internal implementation detail of getRootlessRuntimeDir to allow testing.\n\/\/ Everyone but the tests this is intended for should only call getRootlessRuntimeDir, never this function.\nfunc getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, error) {\n\truntimeDir, err := env.homeDirGetRuntimeDir()\n\tif err == nil {\n\t\treturn runtimeDir, nil\n\t}\n\n\tinitCommand, err := ioutil.ReadFile(env.getProcCommandFile())\n\tif err != nil || string(initCommand) == \"systemd\" {\n\t\trunUserDir := env.getRunUserDir()\n\t\tif isRootlessRuntimeDirOwner(runUserDir, env) {\n\t\t\treturn runUserDir, nil\n\t\t}\n\t}\n\n\ttmpPerUserDir := env.getTmpPerUserDir()\n\tif _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) {\n\t\tif err := os.Mkdir(tmpPerUserDir, 0700); err != nil {\n\t\t\tlogrus.Errorf(\"failed to create temp directory for user: %v\", err)\n\t\t} else {\n\t\t\treturn tmpPerUserDir, nil\n\t\t}\n\t} else if isRootlessRuntimeDirOwner(tmpPerUserDir, env) {\n\t\treturn tmpPerUserDir, nil\n\t}\n\n\thomeDir := env.homedirGet()\n\tif homeDir == \"\" {\n\t\treturn \"\", errors.New(\"neither XDG_RUNTIME_DIR not temp dir nor HOME was set non-empty\")\n\t}\n\tresolvedHomeDir, err := filepath.EvalSymlinks(homeDir)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"cannot resolve %s\", homeDir)\n\t}\n\treturn filepath.Join(resolvedHomeDir, \"rundir\"), nil\n}\n\nfunc getRootlessRuntimeDir(rootlessUID int) (string, error) {\n\treturn getRootlessRuntimeDirIsolated(\n\t\trootlessRuntimeDirEnvironmentImplementation{\n\t\t\t\"\/proc\/1\/comm\",\n\t\t\tfmt.Sprintf(\"\/run\/user\/%d\", rootlessUID),\n\t\t\tfmt.Sprintf(\"%s\/containers-user-%d\", os.TempDir(), rootlessUID),\n\t\t},\n\t)\n}\n\n\/\/ getRootlessDirInfo returns the parent path of where the storage for containers and\n\/\/ volumes will be in rootless mode\nfunc getRootlessDirInfo(rootlessUID int) (string, string, error) {\n\trootlessRuntime, err := GetRootlessRuntimeDir(rootlessUID)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tdataDir, err := homedir.GetDataHome()\n\tif err == nil {\n\t\treturn dataDir, rootlessRuntime, nil\n\t}\n\n\thome := homedir.Get()\n\tif home == \"\" {\n\t\treturn \"\", \"\", errors.Wrapf(err, \"neither XDG_DATA_HOME nor HOME was set non-empty\")\n\t}\n\t\/\/ runc doesn't like symlinks in the rootfs path, and at least\n\t\/\/ on CoreOS \/home is a symlink to \/var\/home, so resolve any symlink.\n\tresolvedHome, err := filepath.EvalSymlinks(home)\n\tif err != nil {\n\t\treturn \"\", \"\", errors.Wrapf(err, \"cannot resolve %s\", home)\n\t}\n\tdataDir = filepath.Join(resolvedHome, \".local\", \"share\")\n\n\treturn dataDir, rootlessRuntime, nil\n}\n\n\/\/ getRootlessStorageOpts returns the storage opts for containers running as non root\nfunc getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) {\n\tvar opts StoreOptions\n\n\tdataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID)\n\tif err != nil {\n\t\treturn opts, err\n\t}\n\topts.RunRoot = rootlessRuntime\n\topts.GraphRoot = filepath.Join(dataDir, \"containers\", \"storage\")\n\tif systemOpts.RootlessStoragePath != \"\" {\n\t\topts.RootlessStoragePath = systemOpts.RootlessStoragePath\n\t} else {\n\t\topts.RootlessStoragePath = opts.GraphRoot\n\t}\n\tif path, err := exec.LookPath(\"fuse-overlayfs\"); err == nil {\n\t\topts.GraphDriverName = \"overlay\"\n\t\topts.GraphDriverOptions = []string{fmt.Sprintf(\"overlay.mount_program=%s\", path)}\n\t\tfor _, o := range systemOpts.GraphDriverOptions {\n\t\t\tif strings.Contains(o, \"ignore_chown_errors\") {\n\t\t\t\topts.GraphDriverOptions = append(opts.GraphDriverOptions, o)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t} else {\n\t\topts.GraphDriverName = \"vfs\"\n\t}\n\treturn opts, nil\n}\n\nfunc getRootlessUID() int {\n\tuidEnv := os.Getenv(\"_CONTAINERS_ROOTLESS_UID\")\n\tif uidEnv != \"\" {\n\t\tu, _ := strconv.Atoi(uidEnv)\n\t\treturn u\n\t}\n\treturn os.Geteuid()\n}\n\n\/\/ DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers\nfunc DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) {\n\tuid := getRootlessUID()\n\treturn DefaultStoreOptions(uid != 0, uid)\n}\n\n\/\/ defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing.\n\/\/ Everyone but the tests this is intended for should only call DefaultStoreOptions, never this function.\nfunc defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf string) (StoreOptions, error) {\n\tvar (\n\t\tdefaultRootlessRunRoot string\n\t\tdefaultRootlessGraphRoot string\n\t\terr error\n\t)\n\tstorageOpts := defaultStoreOptions\n\tif rootless && rootlessUID != 0 {\n\t\tstorageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts)\n\t\tif err != nil {\n\t\t\treturn storageOpts, err\n\t\t}\n\t}\n\t_, err = os.Stat(storageConf)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn storageOpts, errors.Wrapf(err, \"cannot stat %s\", storageConf)\n\t}\n\tif err == nil {\n\t\tdefaultRootlessRunRoot = storageOpts.RunRoot\n\t\tdefaultRootlessGraphRoot = storageOpts.GraphRoot\n\t\tstorageOpts = StoreOptions{}\n\t\treloadConfigurationFileIfNeeded(storageConf, &storageOpts)\n\t}\n\tif storageOpts.RunRoot != \"\" {\n\t\trunRoot, err := expandEnvPath(storageOpts.RunRoot, rootlessUID)\n\t\tif err != nil {\n\t\t\treturn storageOpts, err\n\t\t}\n\t\tstorageOpts.RunRoot = runRoot\n\t}\n\tif storageOpts.GraphRoot != \"\" {\n\t\tgraphRoot, err := expandEnvPath(storageOpts.GraphRoot, rootlessUID)\n\t\tif err != nil {\n\t\t\treturn storageOpts, err\n\t\t}\n\t\tstorageOpts.GraphRoot = graphRoot\n\t}\n\n\tif rootless && rootlessUID != 0 {\n\t\tif err == nil {\n\t\t\t\/\/ If the file did not specify a graphroot or runroot,\n\t\t\t\/\/ set sane defaults so we don't try and use root-owned\n\t\t\t\/\/ directories\n\t\t\tif storageOpts.RunRoot == \"\" {\n\t\t\t\tstorageOpts.RunRoot = defaultRootlessRunRoot\n\t\t\t}\n\t\t\tif storageOpts.GraphRoot == \"\" {\n\t\t\t\tstorageOpts.GraphRoot = defaultRootlessGraphRoot\n\t\t\t}\n\t\t\tif storageOpts.RootlessStoragePath != \"\" {\n\t\t\t\trootlessStoragePath, err := expandEnvPath(storageOpts.RootlessStoragePath, rootlessUID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn storageOpts, err\n\t\t\t\t}\n\t\t\t\tstorageOpts.GraphRoot = rootlessStoragePath\n\t\t\t}\n\t\t}\n\t}\n\treturn storageOpts, nil\n}\n\n\/\/ DefaultStoreOptions returns the default storage ops for containers\nfunc DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) {\n\tstorageConf, err := DefaultConfigFile(rootless && rootlessUID != 0)\n\tif err != nil {\n\t\treturn defaultStoreOptions, err\n\t}\n\treturn defaultStoreOptionsIsolated(rootless, rootlessUID, storageConf)\n}\n\nfunc expandEnvPath(path string, rootlessUID int) (string, error) {\n\tpath = strings.Replace(path, \"$UID\", strconv.Itoa(rootlessUID), -1)\n\tpath = os.ExpandEnv(path)\n\treturn path, nil\n}\n\nfunc validateMountOptions(mountOptions []string) error {\n\tvar Empty struct{}\n\t\/\/ Add invalid options for ImageMount() here.\n\tinvalidOptions := map[string]struct{}{\n\t\t\"rw\": Empty,\n\t}\n\n\tfor _, opt := range mountOptions {\n\t\tif _, ok := invalidOptions[opt]; ok {\n\t\t\treturn fmt.Errorf(\" %q option not supported\", opt)\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Fix usage of rootless_storage_path from system storage.conf file<commit_after>package storage\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/containers\/storage\/pkg\/homedir\"\n\t\"github.com\/containers\/storage\/pkg\/idtools\"\n\t\"github.com\/containers\/storage\/pkg\/system\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping\nfunc ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap string) (*IDMappingOptions, error) {\n\toptions := IDMappingOptions{\n\t\tHostUIDMapping: true,\n\t\tHostGIDMapping: true,\n\t}\n\tif subGIDMap == \"\" && subUIDMap != \"\" {\n\t\tsubGIDMap = subUIDMap\n\t}\n\tif subUIDMap == \"\" && subGIDMap != \"\" {\n\t\tsubUIDMap = subGIDMap\n\t}\n\tif len(GIDMapSlice) == 0 && len(UIDMapSlice) != 0 {\n\t\tGIDMapSlice = UIDMapSlice\n\t}\n\tif len(UIDMapSlice) == 0 && len(GIDMapSlice) != 0 {\n\t\tUIDMapSlice = GIDMapSlice\n\t}\n\tif len(UIDMapSlice) == 0 && subUIDMap == \"\" && os.Getuid() != 0 {\n\t\tUIDMapSlice = []string{fmt.Sprintf(\"0:%d:1\", os.Getuid())}\n\t}\n\tif len(GIDMapSlice) == 0 && subGIDMap == \"\" && os.Getuid() != 0 {\n\t\tGIDMapSlice = []string{fmt.Sprintf(\"0:%d:1\", os.Getgid())}\n\t}\n\n\tif subUIDMap != \"\" && subGIDMap != \"\" {\n\t\tmappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to create NewIDMappings for uidmap=%s gidmap=%s\", subUIDMap, subGIDMap)\n\t\t}\n\t\toptions.UIDMap = mappings.UIDs()\n\t\toptions.GIDMap = mappings.GIDs()\n\t}\n\tparsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, \"UID\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create ParseUIDMap UID=%s\", UIDMapSlice)\n\t}\n\tparsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, \"GID\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create ParseGIDMap GID=%s\", UIDMapSlice)\n\t}\n\toptions.UIDMap = append(options.UIDMap, parsedUIDMap...)\n\toptions.GIDMap = append(options.GIDMap, parsedGIDMap...)\n\tif len(options.UIDMap) > 0 {\n\t\toptions.HostUIDMapping = false\n\t}\n\tif len(options.GIDMap) > 0 {\n\t\toptions.HostGIDMapping = false\n\t}\n\treturn &options, nil\n}\n\n\/\/ GetRootlessRuntimeDir returns the runtime directory when running as non root\nfunc GetRootlessRuntimeDir(rootlessUID int) (string, error) {\n\tpath, err := getRootlessRuntimeDir(rootlessUID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tpath = filepath.Join(path, \"containers\")\n\tif err := os.MkdirAll(path, 0700); err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"unable to make rootless runtime dir %s\", path)\n\t}\n\treturn path, nil\n}\n\ntype rootlessRuntimeDirEnvironment interface {\n\tgetProcCommandFile() string\n\tgetRunUserDir() string\n\tgetTmpPerUserDir() string\n\n\thomeDirGetRuntimeDir() (string, error)\n\tsystemLstat(string) (*system.StatT, error)\n\thomedirGet() string\n}\n\ntype rootlessRuntimeDirEnvironmentImplementation struct {\n\tprocCommandFile string\n\trunUserDir string\n\ttmpPerUserDir string\n}\n\nfunc (env rootlessRuntimeDirEnvironmentImplementation) getProcCommandFile() string {\n\treturn env.procCommandFile\n}\nfunc (env rootlessRuntimeDirEnvironmentImplementation) getRunUserDir() string {\n\treturn env.runUserDir\n}\nfunc (env rootlessRuntimeDirEnvironmentImplementation) getTmpPerUserDir() string {\n\treturn env.tmpPerUserDir\n}\nfunc (rootlessRuntimeDirEnvironmentImplementation) homeDirGetRuntimeDir() (string, error) {\n\treturn homedir.GetRuntimeDir()\n}\nfunc (rootlessRuntimeDirEnvironmentImplementation) systemLstat(path string) (*system.StatT, error) {\n\treturn system.Lstat(path)\n}\nfunc (rootlessRuntimeDirEnvironmentImplementation) homedirGet() string {\n\treturn homedir.Get()\n}\n\nfunc isRootlessRuntimeDirOwner(dir string, env rootlessRuntimeDirEnvironment) bool {\n\tst, err := env.systemLstat(dir)\n\treturn err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000\n}\n\n\/\/ getRootlessRuntimeDirIsolated is an internal implementation detail of getRootlessRuntimeDir to allow testing.\n\/\/ Everyone but the tests this is intended for should only call getRootlessRuntimeDir, never this function.\nfunc getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, error) {\n\truntimeDir, err := env.homeDirGetRuntimeDir()\n\tif err == nil {\n\t\treturn runtimeDir, nil\n\t}\n\n\tinitCommand, err := ioutil.ReadFile(env.getProcCommandFile())\n\tif err != nil || string(initCommand) == \"systemd\" {\n\t\trunUserDir := env.getRunUserDir()\n\t\tif isRootlessRuntimeDirOwner(runUserDir, env) {\n\t\t\treturn runUserDir, nil\n\t\t}\n\t}\n\n\ttmpPerUserDir := env.getTmpPerUserDir()\n\tif _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) {\n\t\tif err := os.Mkdir(tmpPerUserDir, 0700); err != nil {\n\t\t\tlogrus.Errorf(\"failed to create temp directory for user: %v\", err)\n\t\t} else {\n\t\t\treturn tmpPerUserDir, nil\n\t\t}\n\t} else if isRootlessRuntimeDirOwner(tmpPerUserDir, env) {\n\t\treturn tmpPerUserDir, nil\n\t}\n\n\thomeDir := env.homedirGet()\n\tif homeDir == \"\" {\n\t\treturn \"\", errors.New(\"neither XDG_RUNTIME_DIR not temp dir nor HOME was set non-empty\")\n\t}\n\tresolvedHomeDir, err := filepath.EvalSymlinks(homeDir)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"cannot resolve %s\", homeDir)\n\t}\n\treturn filepath.Join(resolvedHomeDir, \"rundir\"), nil\n}\n\nfunc getRootlessRuntimeDir(rootlessUID int) (string, error) {\n\treturn getRootlessRuntimeDirIsolated(\n\t\trootlessRuntimeDirEnvironmentImplementation{\n\t\t\t\"\/proc\/1\/comm\",\n\t\t\tfmt.Sprintf(\"\/run\/user\/%d\", rootlessUID),\n\t\t\tfmt.Sprintf(\"%s\/containers-user-%d\", os.TempDir(), rootlessUID),\n\t\t},\n\t)\n}\n\n\/\/ getRootlessDirInfo returns the parent path of where the storage for containers and\n\/\/ volumes will be in rootless mode\nfunc getRootlessDirInfo(rootlessUID int) (string, string, error) {\n\trootlessRuntime, err := GetRootlessRuntimeDir(rootlessUID)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tdataDir, err := homedir.GetDataHome()\n\tif err == nil {\n\t\treturn dataDir, rootlessRuntime, nil\n\t}\n\n\thome := homedir.Get()\n\tif home == \"\" {\n\t\treturn \"\", \"\", errors.Wrapf(err, \"neither XDG_DATA_HOME nor HOME was set non-empty\")\n\t}\n\t\/\/ runc doesn't like symlinks in the rootfs path, and at least\n\t\/\/ on CoreOS \/home is a symlink to \/var\/home, so resolve any symlink.\n\tresolvedHome, err := filepath.EvalSymlinks(home)\n\tif err != nil {\n\t\treturn \"\", \"\", errors.Wrapf(err, \"cannot resolve %s\", home)\n\t}\n\tdataDir = filepath.Join(resolvedHome, \".local\", \"share\")\n\n\treturn dataDir, rootlessRuntime, nil\n}\n\n\/\/ getRootlessStorageOpts returns the storage opts for containers running as non root\nfunc getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) {\n\tvar opts StoreOptions\n\n\tdataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID)\n\tif err != nil {\n\t\treturn opts, err\n\t}\n\topts.RunRoot = rootlessRuntime\n\tif systemOpts.RootlessStoragePath != \"\" {\n\t\topts.GraphRoot = systemOpts.RootlessStoragePath\n\t} else {\n\t\topts.GraphRoot = filepath.Join(dataDir, \"containers\", \"storage\")\n\t}\n\tif path, err := exec.LookPath(\"fuse-overlayfs\"); err == nil {\n\t\topts.GraphDriverName = \"overlay\"\n\t\topts.GraphDriverOptions = []string{fmt.Sprintf(\"overlay.mount_program=%s\", path)}\n\t\tfor _, o := range systemOpts.GraphDriverOptions {\n\t\t\tif strings.Contains(o, \"ignore_chown_errors\") {\n\t\t\t\topts.GraphDriverOptions = append(opts.GraphDriverOptions, o)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t} else {\n\t\topts.GraphDriverName = \"vfs\"\n\t}\n\treturn opts, nil\n}\n\nfunc getRootlessUID() int {\n\tuidEnv := os.Getenv(\"_CONTAINERS_ROOTLESS_UID\")\n\tif uidEnv != \"\" {\n\t\tu, _ := strconv.Atoi(uidEnv)\n\t\treturn u\n\t}\n\treturn os.Geteuid()\n}\n\n\/\/ DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers\nfunc DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) {\n\tuid := getRootlessUID()\n\treturn DefaultStoreOptions(uid != 0, uid)\n}\n\n\/\/ defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing.\n\/\/ Everyone but the tests this is intended for should only call DefaultStoreOptions, never this function.\nfunc defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf string) (StoreOptions, error) {\n\tvar (\n\t\tdefaultRootlessRunRoot string\n\t\tdefaultRootlessGraphRoot string\n\t\terr error\n\t)\n\tstorageOpts := defaultStoreOptions\n\tif rootless && rootlessUID != 0 {\n\t\tstorageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts)\n\t\tif err != nil {\n\t\t\treturn storageOpts, err\n\t\t}\n\t}\n\t_, err = os.Stat(storageConf)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn storageOpts, errors.Wrapf(err, \"cannot stat %s\", storageConf)\n\t}\n\tif err == nil {\n\t\tdefaultRootlessRunRoot = storageOpts.RunRoot\n\t\tdefaultRootlessGraphRoot = storageOpts.GraphRoot\n\t\tstorageOpts = StoreOptions{}\n\t\treloadConfigurationFileIfNeeded(storageConf, &storageOpts)\n\t\tif rootless && rootlessUID != 0 {\n\t\t\t\/\/ If the file did not specify a graphroot or runroot,\n\t\t\t\/\/ set sane defaults so we don't try and use root-owned\n\t\t\t\/\/ directories\n\t\t\tif storageOpts.RunRoot == \"\" {\n\t\t\t\tstorageOpts.RunRoot = defaultRootlessRunRoot\n\t\t\t}\n\t\t\tif storageOpts.GraphRoot == \"\" {\n\t\t\t\tstorageOpts.GraphRoot = defaultRootlessGraphRoot\n\t\t\t}\n\t\t}\n\t}\n\tif storageOpts.RunRoot != \"\" {\n\t\trunRoot, err := expandEnvPath(storageOpts.RunRoot, rootlessUID)\n\t\tif err != nil {\n\t\t\treturn storageOpts, err\n\t\t}\n\t\tstorageOpts.RunRoot = runRoot\n\t}\n\tif storageOpts.GraphRoot != \"\" {\n\t\tgraphRoot, err := expandEnvPath(storageOpts.GraphRoot, rootlessUID)\n\t\tif err != nil {\n\t\t\treturn storageOpts, err\n\t\t}\n\t\tstorageOpts.GraphRoot = graphRoot\n\t}\n\n\treturn storageOpts, nil\n}\n\n\/\/ DefaultStoreOptions returns the default storage ops for containers\nfunc DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) {\n\tstorageConf, err := DefaultConfigFile(rootless && rootlessUID != 0)\n\tif err != nil {\n\t\treturn defaultStoreOptions, err\n\t}\n\treturn defaultStoreOptionsIsolated(rootless, rootlessUID, storageConf)\n}\n\nfunc expandEnvPath(path string, rootlessUID int) (string, error) {\n\tpath = strings.Replace(path, \"$UID\", strconv.Itoa(rootlessUID), -1)\n\tpath = os.ExpandEnv(path)\n\treturn path, nil\n}\n\nfunc validateMountOptions(mountOptions []string) error {\n\tvar Empty struct{}\n\t\/\/ Add invalid options for ImageMount() here.\n\tinvalidOptions := map[string]struct{}{\n\t\t\"rw\": Empty,\n\t}\n\n\tfor _, opt := range mountOptions {\n\t\tif _, ok := invalidOptions[opt]; ok {\n\t\t\treturn fmt.Errorf(\" %q option not supported\", opt)\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package app\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ A Logger represents an active logging object that generates lines of output.\ntype Logger interface {\n\t\/\/ Log logs a message using the default formats for its operands.\n\t\/\/ Spaces are always added between operands and a newline is appended.\n\tLog(v ...interface{})\n\n\t\/\/ Logf logs a message according to a format specifier.\n\tLogf(format string, v ...interface{})\n\n\t\/\/ Error logs an error using the default formats for its operands.\n\t\/\/ Spaces are always added between operands and a newline is appended.\n\tError(v ...interface{})\n\n\t\/\/ Errorf logs an error according to a format specifier.\n\tErrorf(format string, v ...interface{})\n}\n\nconst (\n\tdefaultColor string = \"\\033[00m\"\n\taccentColor string = \"\\033[94m\"\n\terrColor string = \"\\033[91m\"\n)\n\n\/\/ NewLogger creates a logger that writes on the given writer.\n\/\/ Logs are written only if debug is enabled.\n\/\/ It is safe for concurrent access.\nfunc NewLogger(w io.Writer, debug bool) Logger {\n\tlogger := newLogger(w, debug)\n\treturn NewConcurrentLogger(logger)\n}\n\ntype logger struct {\n\twriter io.Writer\n\tdebug bool\n}\n\nfunc newLogger(w io.Writer, debug bool) *logger {\n\treturn &logger{\n\t\twriter: w,\n\t\tdebug: debug,\n\t}\n}\n\nfunc (l *logger) Log(v ...interface{}) {\n\tif !l.debug {\n\t\treturn\n\t}\n\tprintLogPrefix(l.writer, \"Log \", accentColor)\n\tfmt.Fprintln(l.writer, v...)\n}\n\nfunc (l *logger) Logf(format string, v ...interface{}) {\n\tif !l.debug {\n\t\treturn\n\t}\n\tprintLogPrefix(l.writer, \"Log \", accentColor)\n\tfmt.Fprintf(l.writer, format, v...)\n\tfmt.Fprintln(l.writer)\n}\n\nfunc (l *logger) Error(v ...interface{}) {\n\tprintLogPrefix(l.writer, \"Error\", errColor)\n\tfmt.Fprintln(l.writer, v...)\n}\n\nfunc (l *logger) Errorf(format string, v ...interface{}) {\n\tprintLogPrefix(l.writer, \"Error\", errColor)\n\tfmt.Fprintf(l.writer, format, v...)\n\tfmt.Fprintln(l.writer)\n}\n\nfunc printLogPrefix(w io.Writer, level, color string) {\n\tfile, line := caller()\n\tnow := time.Now().Format(\"2006\/01\/02 15:04:05\")\n\tfmt.Fprintf(w,\n\t\t\"%s%s%s %s %s:%v |> \",\n\t\tcolor,\n\t\tstrings.ToUpper(level),\n\t\tdefaultColor,\n\t\tnow,\n\t\tfile,\n\t\tline,\n\t)\n}\n\nfunc caller() (file string, line int) {\n\t_, file, line, _ = runtime.Caller(3)\n\tfile = filepath.Base(file)\n\treturn\n}\n\n\/\/ NewConsole creates a logger that writes messages on standard outputs.\n\/\/ Logs are written on stdout, only if debug is enabled.\n\/\/ Errors are written on stderr.\n\/\/ It is safe for concurrent access.\nfunc NewConsole(debug bool) Logger {\n\tlogger := newConsole(debug)\n\treturn NewConcurrentLogger(logger)\n}\n\ntype console struct {\n\tstd Logger\n\terr Logger\n}\n\nfunc newConsole(debug bool) *console {\n\treturn &console{\n\t\tstd: newLogger(os.Stdout, debug),\n\t\terr: newLogger(os.Stderr, debug),\n\t}\n}\n\nfunc (c *console) Log(v ...interface{}) {\n\tc.std.Log(v...)\n}\n\nfunc (c *console) Logf(format string, v ...interface{}) {\n\tc.std.Logf(format, v...)\n}\n\nfunc (c *console) Error(v ...interface{}) {\n\tc.err.Error(v...)\n}\n\nfunc (c *console) Errorf(format string, v ...interface{}) {\n\tc.err.Errorf(format, v...)\n}\n\n\/\/ NewMultiLogger creates a logger that aggregate multiple loggers.\nfunc NewMultiLogger(loggers ...Logger) Logger {\n\treturn &multiLogger{\n\t\tloggers: loggers,\n\t}\n}\n\ntype multiLogger struct {\n\tloggers []Logger\n}\n\nfunc (l *multiLogger) Log(v ...interface{}) {\n\tfor _, logger := range l.loggers {\n\t\tlogger.Log(v...)\n\t}\n}\n\nfunc (l *multiLogger) Logf(format string, v ...interface{}) {\n\tfor _, logger := range l.loggers {\n\t\tlogger.Logf(format, v...)\n\t}\n}\n\nfunc (l *multiLogger) Error(v ...interface{}) {\n\tfor _, logger := range l.loggers {\n\t\tlogger.Error(v...)\n\t}\n}\n\nfunc (l *multiLogger) Errorf(format string, v ...interface{}) {\n\tfor _, logger := range l.loggers {\n\t\tlogger.Errorf(format, v...)\n\t}\n}\n\n\/\/ NewConcurrentLogger decorates the given logger to ensure concurrent access\n\/\/ safety.\nfunc NewConcurrentLogger(l Logger) Logger {\n\treturn &concurrentLogger{\n\t\tlogger: l,\n\t}\n}\n\ntype concurrentLogger struct {\n\tmutex sync.Mutex\n\tlogger Logger\n}\n\nfunc (l *concurrentLogger) Log(v ...interface{}) {\n\tl.mutex.Lock()\n\tl.logger.Log(v...)\n\tl.mutex.Unlock()\n}\n\nfunc (l *concurrentLogger) Logf(format string, v ...interface{}) {\n\tl.mutex.Lock()\n\tl.logger.Logf(format, v...)\n\tl.mutex.Unlock()\n}\n\nfunc (l *concurrentLogger) Error(v ...interface{}) {\n\tl.mutex.Lock()\n\tl.logger.Error(v...)\n\tl.mutex.Unlock()\n}\n\nfunc (l *concurrentLogger) Errorf(format string, v ...interface{}) {\n\tl.mutex.Lock()\n\tl.logger.Errorf(format, v...)\n\tl.mutex.Unlock()\n}\n<commit_msg>Remove caller in logs and better colors<commit_after>package app\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ A Logger represents an active logging object that generates lines of output.\ntype Logger interface {\n\t\/\/ Log logs a message using the default formats for its operands.\n\t\/\/ Spaces are always added between operands and a newline is appended.\n\tLog(v ...interface{})\n\n\t\/\/ Logf logs a message according to a format specifier.\n\tLogf(format string, v ...interface{})\n\n\t\/\/ Error logs an error using the default formats for its operands.\n\t\/\/ Spaces are always added between operands and a newline is appended.\n\tError(v ...interface{})\n\n\t\/\/ Errorf logs an error according to a format specifier.\n\tErrorf(format string, v ...interface{})\n}\n\nconst (\n\tdefaultColor string = \"\\033[00m\"\n\taccentColor string = \"\\033[94m\"\n\terrColor string = \"\\033[91m\"\n)\n\n\/\/ NewLogger creates a logger that writes on the given writer.\n\/\/ Logs are written only if debug is enabled.\n\/\/ It is safe for concurrent access.\nfunc NewLogger(w io.Writer, debug bool) Logger {\n\tlogger := newLogger(w, debug)\n\treturn NewConcurrentLogger(logger)\n}\n\ntype logger struct {\n\twriter io.Writer\n\tdebug bool\n}\n\nfunc newLogger(w io.Writer, debug bool) *logger {\n\treturn &logger{\n\t\twriter: w,\n\t\tdebug: debug,\n\t}\n}\n\nfunc (l *logger) Log(v ...interface{}) {\n\tif !l.debug {\n\t\treturn\n\t}\n\tprintLogPrefix(l.writer, \"Log \", accentColor)\n\tfmt.Fprintln(l.writer, v...)\n}\n\nfunc (l *logger) Logf(format string, v ...interface{}) {\n\tif !l.debug {\n\t\treturn\n\t}\n\tprintLogPrefix(l.writer, \"Log \", accentColor)\n\tfmt.Fprintf(l.writer, format, v...)\n\tfmt.Fprintln(l.writer)\n}\n\nfunc (l *logger) Error(v ...interface{}) {\n\tprintLogPrefix(l.writer, \"Error\", errColor)\n\tfmt.Fprintln(l.writer, v...)\n}\n\nfunc (l *logger) Errorf(format string, v ...interface{}) {\n\tprintLogPrefix(l.writer, \"Error\", errColor)\n\tfmt.Fprintf(l.writer, format, v...)\n\tfmt.Fprintln(l.writer)\n}\n\nfunc printLogPrefix(w io.Writer, level, color string) {\n\tnow := time.Now().Format(\"2006\/01\/02 15:04:05\")\n\tfmt.Fprintf(w,\n\t\t\"%s%s%s %s %s|>%s \",\n\t\tcolor,\n\t\tstrings.ToUpper(level),\n\t\tdefaultColor,\n\t\tnow,\n\t\tcolor,\n\t\tdefaultColor,\n\t)\n}\n\n\/\/ NewConsole creates a logger that writes messages on standard outputs.\n\/\/ Logs are written on stdout, only if debug is enabled.\n\/\/ Errors are written on stderr.\n\/\/ It is safe for concurrent access.\nfunc NewConsole(debug bool) Logger {\n\tlogger := newConsole(debug)\n\treturn NewConcurrentLogger(logger)\n}\n\ntype console struct {\n\tstd Logger\n\terr Logger\n}\n\nfunc newConsole(debug bool) *console {\n\treturn &console{\n\t\tstd: newLogger(os.Stdout, debug),\n\t\terr: newLogger(os.Stderr, debug),\n\t}\n}\n\nfunc (c *console) Log(v ...interface{}) {\n\tc.std.Log(v...)\n}\n\nfunc (c *console) Logf(format string, v ...interface{}) {\n\tc.std.Logf(format, v...)\n}\n\nfunc (c *console) Error(v ...interface{}) {\n\tc.err.Error(v...)\n}\n\nfunc (c *console) Errorf(format string, v ...interface{}) {\n\tc.err.Errorf(format, v...)\n}\n\n\/\/ NewMultiLogger creates a logger that aggregate multiple loggers.\nfunc NewMultiLogger(loggers ...Logger) Logger {\n\treturn &multiLogger{\n\t\tloggers: loggers,\n\t}\n}\n\ntype multiLogger struct {\n\tloggers []Logger\n}\n\nfunc (l *multiLogger) Log(v ...interface{}) {\n\tfor _, logger := range l.loggers {\n\t\tlogger.Log(v...)\n\t}\n}\n\nfunc (l *multiLogger) Logf(format string, v ...interface{}) {\n\tfor _, logger := range l.loggers {\n\t\tlogger.Logf(format, v...)\n\t}\n}\n\nfunc (l *multiLogger) Error(v ...interface{}) {\n\tfor _, logger := range l.loggers {\n\t\tlogger.Error(v...)\n\t}\n}\n\nfunc (l *multiLogger) Errorf(format string, v ...interface{}) {\n\tfor _, logger := range l.loggers {\n\t\tlogger.Errorf(format, v...)\n\t}\n}\n\n\/\/ NewConcurrentLogger decorates the given logger to ensure concurrent access\n\/\/ safety.\nfunc NewConcurrentLogger(l Logger) Logger {\n\treturn &concurrentLogger{\n\t\tlogger: l,\n\t}\n}\n\ntype concurrentLogger struct {\n\tmutex sync.Mutex\n\tlogger Logger\n}\n\nfunc (l *concurrentLogger) Log(v ...interface{}) {\n\tl.mutex.Lock()\n\tl.logger.Log(v...)\n\tl.mutex.Unlock()\n}\n\nfunc (l *concurrentLogger) Logf(format string, v ...interface{}) {\n\tl.mutex.Lock()\n\tl.logger.Logf(format, v...)\n\tl.mutex.Unlock()\n}\n\nfunc (l *concurrentLogger) Error(v ...interface{}) {\n\tl.mutex.Lock()\n\tl.logger.Error(v...)\n\tl.mutex.Unlock()\n}\n\nfunc (l *concurrentLogger) Errorf(format string, v ...interface{}) {\n\tl.mutex.Lock()\n\tl.logger.Errorf(format, v...)\n\tl.mutex.Unlock()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/btcsuite\/btclog\"\n\t\"github.com\/btcsuite\/seelog\"\n\t\"github.com\/lightningnetwork\/lnd\/chainntnfs\"\n\t\"github.com\/lightningnetwork\/lnd\/channeldb\"\n\t\"github.com\/lightningnetwork\/lnd\/discovery\"\n\t\"github.com\/lightningnetwork\/lnd\/htlcswitch\"\n\t\"github.com\/lightningnetwork\/lnd\/lnwallet\"\n\t\"github.com\/lightningnetwork\/lnd\/routing\"\n\t\"github.com\/roasbeef\/btcd\/connmgr\"\n)\n\n\/\/ Loggers per subsystem. Note that backendLog is a seelog logger that all of\n\/\/ the subsystem loggers route their messages to. When adding new subsystems,\n\/\/ add a reference here, to the subsystemLoggers map, and the useLogger\n\/\/ function.\nvar (\n\tbackendLog = seelog.Disabled\n\tltndLog = btclog.Disabled\n\tlnwlLog = btclog.Disabled\n\tpeerLog = btclog.Disabled\n\tdiscLog = btclog.Disabled\n\tfndgLog = btclog.Disabled\n\trpcsLog = btclog.Disabled\n\tsrvrLog = btclog.Disabled\n\tntfnLog = btclog.Disabled\n\tchdbLog = btclog.Disabled\n\thswcLog = btclog.Disabled\n\tutxnLog = btclog.Disabled\n\tbrarLog = btclog.Disabled\n\tcmgrLog = btclog.Disabled\n\tcrtrLog = btclog.Disabled\n)\n\n\/\/ subsystemLoggers maps each subsystem identifier to its associated logger.\nvar subsystemLoggers = map[string]btclog.Logger{\n\t\"LTND\": ltndLog,\n\t\"LNWL\": lnwlLog,\n\t\"PEER\": peerLog,\n\t\"DISC\": discLog,\n\t\"RPCS\": rpcsLog,\n\t\"SRVR\": srvrLog,\n\t\"NTFN\": ntfnLog,\n\t\"CHDB\": chdbLog,\n\t\"FNDG\": fndgLog,\n\t\"HSWC\": hswcLog,\n\t\"UTXN\": utxnLog,\n\t\"BRAR\": brarLog,\n\t\"CMGR\": cmgrLog,\n\t\"CRTR\": crtrLog,\n}\n\n\/\/ useLogger updates the logger references for subsystemID to logger. Invalid\n\/\/ subsystems are ignored.\nfunc useLogger(subsystemID string, logger btclog.Logger) {\n\tif _, ok := subsystemLoggers[subsystemID]; !ok {\n\t\treturn\n\t}\n\tsubsystemLoggers[subsystemID] = logger\n\n\tswitch subsystemID {\n\tcase \"LTND\":\n\t\tltndLog = logger\n\n\tcase \"LNWL\":\n\t\tlnwlLog = logger\n\t\tlnwallet.UseLogger(logger)\n\n\tcase \"PEER\":\n\t\tpeerLog = logger\n\n\tcase \"DISC\":\n\t\tdiscLog = logger\n\t\tdiscovery.UseLogger(logger)\n\n\tcase \"RPCS\":\n\t\trpcsLog = logger\n\n\tcase \"SRVR\":\n\t\tsrvrLog = logger\n\n\tcase \"NTFN\":\n\t\tntfnLog = logger\n\t\tchainntnfs.UseLogger(logger)\n\n\tcase \"CHDB\":\n\t\tchdbLog = logger\n\t\tchanneldb.UseLogger(logger)\n\n\tcase \"FNDG\":\n\t\tfndgLog = logger\n\n\tcase \"HSWC\":\n\t\thswcLog = logger\n\t\thtlcswitch.UseLogger(logger)\n\n\tcase \"UTXN\":\n\t\tutxnLog = logger\n\n\tcase \"BRAR\":\n\t\tbrarLog = logger\n\n\tcase \"CMGR\":\n\t\tcmgrLog = logger\n\t\tconnmgr.UseLogger(logger)\n\n\tcase \"CRTR\":\n\t\tcrtrLog = logger\n\t\trouting.UseLogger(crtrLog)\n\t}\n}\n\n\/\/ initSeelogLogger initializes a new seelog logger that is used as the backend\n\/\/ for all logging subsystems.\nfunc initSeelogLogger(logFile string) {\n\tconfig := `\n\t<seelog type=\"adaptive\" mininterval=\"2000000\" maxinterval=\"100000000\"\n\t\tcritmsgcount=\"500\" minlevel=\"trace\">\n\t\t<outputs formatid=\"all\">\n\t\t\t<console \/>\n\t\t\t<rollingfile type=\"size\" filename=\"%s\" maxsize=\"10485760\" maxrolls=\"3\" \/>\n\t\t<\/outputs>\n\t\t<formats>\n\t\t\t<format id=\"all\" format=\"%%Time %%Date [%%LEV] %%Msg%%n\" \/>\n\t\t<\/formats>\n\t<\/seelog>`\n\tconfig = fmt.Sprintf(config, logFile)\n\n\tlogger, err := seelog.LoggerFromConfigAsString(config)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to create logger: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\tbackendLog = logger\n}\n\n\/\/ setLogLevel sets the logging level for provided subsystem. Invalid\n\/\/ subsystems are ignored. Uninitialized subsystems are dynamically created as\n\/\/ needed.\nfunc setLogLevel(subsystemID string, logLevel string) {\n\t\/\/ Ignore invalid subsystems.\n\tlogger, ok := subsystemLoggers[subsystemID]\n\tif !ok {\n\t\treturn\n\t}\n\n\t\/\/ Default to info if the log level is invalid.\n\tlevel, ok := btclog.LogLevelFromString(logLevel)\n\tif !ok {\n\t\tlevel = btclog.InfoLvl\n\t}\n\n\t\/\/ Create new logger for the subsystem if needed.\n\tif logger == btclog.Disabled {\n\t\tlogger = btclog.NewSubsystemLogger(backendLog, subsystemID+\": \")\n\t\tuseLogger(subsystemID, logger)\n\t}\n\tlogger.SetLevel(level)\n}\n\n\/\/ setLogLevels sets the log level for all subsystem loggers to the passed\n\/\/ level. It also dynamically creates the subsystem loggers as needed, so it\n\/\/ can be used to initialize the logging system.\nfunc setLogLevels(logLevel string) {\n\t\/\/ Configure all subsystems with the new logging level. Dynamically\n\t\/\/ create loggers as needed.\n\tfor subsystemID := range subsystemLoggers {\n\t\tsetLogLevel(subsystemID, logLevel)\n\t}\n}\n\n\/\/ logClosure is used to provide a closure over expensive logging operations\n\/\/ so don't have to be performed when the logging level doesn't warrant it.\ntype logClosure func() string\n\n\/\/ String invokes the underlying function and returns the result.\nfunc (c logClosure) String() string {\n\treturn c()\n}\n\n\/\/ newLogClosure returns a new closure over a function that returns a string\n\/\/ which itself provides a Stringer interface so that it can be used with the\n\/\/ logging system.\nfunc newLogClosure(c func() string) logClosure {\n\treturn logClosure(c)\n}\n<commit_msg>log: add logger for new light client<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/btcsuite\/btclog\"\n\t\"github.com\/btcsuite\/seelog\"\n\t\"github.com\/lightninglabs\/neutrino\"\n\t\"github.com\/lightningnetwork\/lnd\/chainntnfs\"\n\t\"github.com\/lightningnetwork\/lnd\/channeldb\"\n\t\"github.com\/lightningnetwork\/lnd\/discovery\"\n\t\"github.com\/lightningnetwork\/lnd\/htlcswitch\"\n\t\"github.com\/lightningnetwork\/lnd\/lnwallet\"\n\t\"github.com\/lightningnetwork\/lnd\/routing\"\n\t\"github.com\/roasbeef\/btcd\/connmgr\"\n)\n\n\/\/ Loggers per subsystem. Note that backendLog is a seelog logger that all of\n\/\/ the subsystem loggers route their messages to. When adding new subsystems,\n\/\/ add a reference here, to the subsystemLoggers map, and the useLogger\n\/\/ function.\nvar (\n\tbackendLog = seelog.Disabled\n\tltndLog = btclog.Disabled\n\tlnwlLog = btclog.Disabled\n\tpeerLog = btclog.Disabled\n\tdiscLog = btclog.Disabled\n\tfndgLog = btclog.Disabled\n\trpcsLog = btclog.Disabled\n\tsrvrLog = btclog.Disabled\n\tntfnLog = btclog.Disabled\n\tchdbLog = btclog.Disabled\n\thswcLog = btclog.Disabled\n\tutxnLog = btclog.Disabled\n\tbrarLog = btclog.Disabled\n\tcmgrLog = btclog.Disabled\n\tcrtrLog = btclog.Disabled\n\tbtcnLog = btclog.Disabled\n)\n\n\/\/ subsystemLoggers maps each subsystem identifier to its associated logger.\nvar subsystemLoggers = map[string]btclog.Logger{\n\t\"LTND\": ltndLog,\n\t\"LNWL\": lnwlLog,\n\t\"PEER\": peerLog,\n\t\"DISC\": discLog,\n\t\"RPCS\": rpcsLog,\n\t\"SRVR\": srvrLog,\n\t\"NTFN\": ntfnLog,\n\t\"CHDB\": chdbLog,\n\t\"FNDG\": fndgLog,\n\t\"HSWC\": hswcLog,\n\t\"UTXN\": utxnLog,\n\t\"BRAR\": brarLog,\n\t\"CMGR\": cmgrLog,\n\t\"CRTR\": crtrLog,\n\t\"BTCN\": btcnLog,\n}\n\n\/\/ useLogger updates the logger references for subsystemID to logger. Invalid\n\/\/ subsystems are ignored.\nfunc useLogger(subsystemID string, logger btclog.Logger) {\n\tif _, ok := subsystemLoggers[subsystemID]; !ok {\n\t\treturn\n\t}\n\tsubsystemLoggers[subsystemID] = logger\n\n\tswitch subsystemID {\n\tcase \"LTND\":\n\t\tltndLog = logger\n\n\tcase \"LNWL\":\n\t\tlnwlLog = logger\n\t\tlnwallet.UseLogger(logger)\n\n\tcase \"PEER\":\n\t\tpeerLog = logger\n\n\tcase \"DISC\":\n\t\tdiscLog = logger\n\t\tdiscovery.UseLogger(logger)\n\n\tcase \"RPCS\":\n\t\trpcsLog = logger\n\n\tcase \"SRVR\":\n\t\tsrvrLog = logger\n\n\tcase \"NTFN\":\n\t\tntfnLog = logger\n\t\tchainntnfs.UseLogger(logger)\n\n\tcase \"CHDB\":\n\t\tchdbLog = logger\n\t\tchanneldb.UseLogger(logger)\n\n\tcase \"FNDG\":\n\t\tfndgLog = logger\n\n\tcase \"HSWC\":\n\t\thswcLog = logger\n\t\thtlcswitch.UseLogger(logger)\n\n\tcase \"UTXN\":\n\t\tutxnLog = logger\n\n\tcase \"BRAR\":\n\t\tbrarLog = logger\n\n\tcase \"CMGR\":\n\t\tcmgrLog = logger\n\t\tconnmgr.UseLogger(logger)\n\n\tcase \"CRTR\":\n\t\tcrtrLog = logger\n\t\trouting.UseLogger(crtrLog)\n\n\tcase \"BTCN\":\n\t\tbtcnLog = logger\n\t\tneutrino.UseLogger(logger)\n\t}\n}\n\n\/\/ initSeelogLogger initializes a new seelog logger that is used as the backend\n\/\/ for all logging subsystems.\nfunc initSeelogLogger(logFile string) {\n\tconfig := `\n\t<seelog type=\"adaptive\" mininterval=\"2000000\" maxinterval=\"100000000\"\n\t\tcritmsgcount=\"500\" minlevel=\"trace\">\n\t\t<outputs formatid=\"all\">\n\t\t\t<console \/>\n\t\t\t<rollingfile type=\"size\" filename=\"%s\" maxsize=\"10485760\" maxrolls=\"3\" \/>\n\t\t<\/outputs>\n\t\t<formats>\n\t\t\t<format id=\"all\" format=\"%%Time %%Date [%%LEV] %%Msg%%n\" \/>\n\t\t<\/formats>\n\t<\/seelog>`\n\tconfig = fmt.Sprintf(config, logFile)\n\n\tlogger, err := seelog.LoggerFromConfigAsString(config)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to create logger: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\tbackendLog = logger\n}\n\n\/\/ setLogLevel sets the logging level for provided subsystem. Invalid\n\/\/ subsystems are ignored. Uninitialized subsystems are dynamically created as\n\/\/ needed.\nfunc setLogLevel(subsystemID string, logLevel string) {\n\t\/\/ Ignore invalid subsystems.\n\tlogger, ok := subsystemLoggers[subsystemID]\n\tif !ok {\n\t\treturn\n\t}\n\n\t\/\/ Default to info if the log level is invalid.\n\tlevel, ok := btclog.LogLevelFromString(logLevel)\n\tif !ok {\n\t\tlevel = btclog.InfoLvl\n\t}\n\n\t\/\/ Create new logger for the subsystem if needed.\n\tif logger == btclog.Disabled {\n\t\tlogger = btclog.NewSubsystemLogger(backendLog, subsystemID+\": \")\n\t\tuseLogger(subsystemID, logger)\n\t}\n\tlogger.SetLevel(level)\n}\n\n\/\/ setLogLevels sets the log level for all subsystem loggers to the passed\n\/\/ level. It also dynamically creates the subsystem loggers as needed, so it\n\/\/ can be used to initialize the logging system.\nfunc setLogLevels(logLevel string) {\n\t\/\/ Configure all subsystems with the new logging level. Dynamically\n\t\/\/ create loggers as needed.\n\tfor subsystemID := range subsystemLoggers {\n\t\tsetLogLevel(subsystemID, logLevel)\n\t}\n}\n\n\/\/ logClosure is used to provide a closure over expensive logging operations\n\/\/ so don't have to be performed when the logging level doesn't warrant it.\ntype logClosure func() string\n\n\/\/ String invokes the underlying function and returns the result.\nfunc (c logClosure) String() string {\n\treturn c()\n}\n\n\/\/ newLogClosure returns a new closure over a function that returns a string\n\/\/ which itself provides a Stringer interface so that it can be used with the\n\/\/ logging system.\nfunc newLogClosure(c func() string) logClosure {\n\treturn logClosure(c)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2015, Peter Mrekaj. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE.txt file.\n\npackage ptypes\n\n\/\/ IntWeight returns number of bits set to 1 in x.\nfunc IntWeight(x uint64) (w int) {\n\tfor x > 0 {\n\t\tw++\n\t\tx &= (x - 1)\n\t}\n\treturn w\n}\n\n\/\/ ClosestInt returns integer closest to x with the same weight.\nfunc ClosestInt(x uint64) (ci uint64, ok bool) {\n\tfor i := uint(0); i < 63; i++ {\n\t\tif (x>>i)&1 != (x >> (i + 1) & 1) {\n\t\t\tx ^= 1<<i | 1<<(i+1)\n\t\t\treturn x, true\n\t\t}\n\t}\n\treturn 0, false \/\/ If all bits are 0 or 1.\n}\n<commit_msg>Add time\/space compexity analysis for ptypes.ClosestInt function<commit_after>\/\/ Copyright (c) 2015, Peter Mrekaj. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE.txt file.\n\npackage ptypes\n\n\/\/ IntWeight returns number of bits set to 1 in x.\nfunc IntWeight(x uint64) (w int) {\n\tfor x > 0 {\n\t\tw++\n\t\tx &= (x - 1)\n\t}\n\treturn w\n}\n\n\/\/ ClosestInt returns integer closest to x with the same weight.\n\/\/ The time complexity is O(n), where n is the integer width.\n\/\/ The O(1) additional space is needed.\nfunc ClosestInt(x uint64) (ci uint64, ok bool) {\n\tfor i := uint(0); i < 63; i++ {\n\t\tif (x>>i)&1 != x>>(i+1)&1 {\n\t\t\tx ^= 1<<i | 1<<(i+1)\n\t\t\treturn x, true\n\t\t}\n\t}\n\treturn 0, false \/\/ If all bits are 0 or 1.\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 beego Author. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Usage:\n\/\/\n\/\/ import \"github.com\/astaxie\/beego\/logs\"\n\/\/\n\/\/\tlog := NewLogger(10000)\n\/\/\tlog.SetLogger(\"console\", \"\")\n\/\/\n\/\/\t> the first params stand for how many channel\n\/\/\n\/\/ Use it like this:\n\/\/\n\/\/\tlog.Trace(\"trace\")\n\/\/\tlog.Info(\"info\")\n\/\/\tlog.Warn(\"warning\")\n\/\/\tlog.Debug(\"debug\")\n\/\/\tlog.Critical(\"critical\")\n\/\/\n\/\/ more docs http:\/\/beego.me\/docs\/module\/logs.md\npackage logs\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/gogap\/errors\"\n)\n\n\/\/ RFC5424 log message levels.\nconst (\n\tLevelError = iota\n\tLevelWarn\n\tLevelInfo\n\tLevelDebug\n)\n\n\/\/ Legacy loglevel constants to ensure backwards compatibility.\n\/\/\n\/\/ Deprecated: will be removed in 1.5.0.\n\ntype loggerType func() LoggerInterface\n\n\/\/ LoggerInterface defines the behavior of a log provider.\ntype LoggerInterface interface {\n\tInit(config string) error\n\tWriteMsg(msg string, level int) error\n\tDestroy()\n\tFlush()\n}\n\nvar adapters = make(map[string]loggerType)\n\n\/\/ Register makes a log provide available by the provided name.\n\/\/ If Register is called twice with the same name or if driver is nil,\n\/\/ it panics.\nfunc Register(name string, log loggerType) {\n\tif log == nil {\n\t\tpanic(\"logs: Register provide is nil\")\n\t}\n\tif _, dup := adapters[name]; dup {\n\t\tpanic(\"logs: Register called twice for provider \" + name)\n\t}\n\tadapters[name] = log\n}\n\n\/\/ BeeLogger is default logger in beego application.\n\/\/ it can contain several providers and log message into all providers.\ntype BeeLogger struct {\n\tlock sync.Mutex\n\tlevel int\n\tenableFuncCallDepth bool\n\tloggerFuncCallDepth int\n\tmsg chan *logMsg\n\toutputs map[string]LoggerInterface\n}\n\ntype logMsg struct {\n\tlevel int\n\tmsg string\n}\n\n\/\/ NewLogger returns a new BeeLogger.\n\/\/ channellen means the number of messages in chan.\n\/\/ if the buffering chan is full, logger adapters write to file or other way.\nfunc NewLogger(channellen int64) *BeeLogger {\n\tbl := new(BeeLogger)\n\tbl.level = LevelDebug\n\tbl.loggerFuncCallDepth = 2\n\tbl.msg = make(chan *logMsg, channellen)\n\tbl.outputs = make(map[string]LoggerInterface)\n\t\/\/bl.SetLogger(\"console\", \"\") \/\/ default output to console\n\tgo bl.startLogger()\n\treturn bl\n}\n\nfunc NewFileLogger(file string) *BeeLogger {\n\tl := NewLogger(1024)\n\tpath := strings.Split(file, \"\/\")\n\tif len(path) > 1 {\n\t\texec.Command(\"mkdir\", path[0]).Run()\n\t}\n\tl.SetLogger(\"console\", \"\")\n\tl.SetLogger(\"file\", fmt.Sprintf(`{\"filename\":\"%s\",\"maxdays\":7}`, file))\n\tl.EnableFuncCallDepth(true)\n\tl.SetLogFuncCallDepth(2)\n\treturn l\n}\n\n\/\/ SetLogger provides a given logger adapter into BeeLogger with config string.\n\/\/ config need to be correct JSON as string: {\"interval\":360}.\nfunc (bl *BeeLogger) SetLogger(adaptername string, config string) error {\n\tbl.lock.Lock()\n\tdefer bl.lock.Unlock()\n\tif log, ok := adapters[adaptername]; ok {\n\t\tlg := log()\n\t\terr := lg.Init(config)\n\t\tbl.outputs[adaptername] = lg\n\t\tif err != nil {\n\t\t\tfmt.Println(\"logs.BeeLogger.SetLogger: \" + err.Error())\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"logs: unknown adaptername %q (forgotten Register?)\", adaptername)\n\t}\n\treturn nil\n}\n\n\/\/ remove a logger adapter in BeeLogger.\nfunc (bl *BeeLogger) DelLogger(adaptername string) error {\n\tbl.lock.Lock()\n\tdefer bl.lock.Unlock()\n\tif lg, ok := bl.outputs[adaptername]; ok {\n\t\tlg.Destroy()\n\t\tdelete(bl.outputs, adaptername)\n\t\treturn nil\n\t} else {\n\t\treturn fmt.Errorf(\"logs: unknown adaptername %q (forgotten Register?)\", adaptername)\n\t}\n}\n\nfunc (bl *BeeLogger) writerMsg(loglevel int, msg string) error {\n\tif loglevel > bl.level {\n\t\treturn nil\n\t}\n\tlm := new(logMsg)\n\tlm.level = loglevel\n\tif bl.enableFuncCallDepth {\n\t\t_, file, line, ok := runtime.Caller(bl.loggerFuncCallDepth)\n\t\tif ok {\n\t\t\t_, filename := path.Split(file)\n\t\t\tlm.msg = fmt.Sprintf(\"[%s:%d] %s\", filename, line, msg)\n\t\t} else {\n\t\t\tlm.msg = msg\n\t\t}\n\t} else {\n\t\tlm.msg = msg\n\t}\n\tbl.msg <- lm\n\treturn nil\n}\n\n\/\/ Set log message level.\n\/\/\n\/\/ If message level (such as LevelDebug) is higher than logger level (such as LevelWarning),\n\/\/ log providers will not even be sent the message.\nfunc (bl *BeeLogger) SetLevel(l int) {\n\tbl.level = l\n}\n\n\/\/ set log funcCallDepth\nfunc (bl *BeeLogger) SetLogFuncCallDepth(d int) {\n\tbl.loggerFuncCallDepth = d\n}\n\n\/\/ enable log funcCallDepth\nfunc (bl *BeeLogger) EnableFuncCallDepth(b bool) {\n\tbl.enableFuncCallDepth = b\n}\n\n\/\/ start logger chan reading.\n\/\/ when chan is not empty, write logs.\nfunc (bl *BeeLogger) startLogger() {\n\tfor {\n\t\tselect {\n\t\tcase bm := <-bl.msg:\n\t\t\tfor _, l := range bl.outputs {\n\t\t\t\terr := l.WriteMsg(bm.msg, bm.level)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"ERROR, unable to WriteMsg:\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Log ERROR level message.\nfunc (bl *BeeLogger) Error(v ...interface{}) {\n\tbl.log(\"Error\", LevelError, v)\n}\n\n\/\/ Log WARNING level message.\nfunc (bl *BeeLogger) Warn(v ...interface{}) {\n\tbl.log(\"Warn\", LevelWarn, v)\n}\n\n\/\/ Log INFORMATIONAL level message.\nfunc (bl *BeeLogger) Info(v ...interface{}) {\n\tbl.log(\"Info\", LevelInfo, v)\n}\n\n\/\/ Log DEBUG level message.\nfunc (bl *BeeLogger) Debug(v ...interface{}) {\n\tbl.log(\"Debug\", LevelDebug, v)\n}\n\nfunc (bl *BeeLogger) log(tp string, level int, v ...interface{}) {\n\tmsg := fmt.Sprintf(\"[\"+tp+\"] \"+generateFmtStr(len(v)), v...)\n\tfor _, item := range v {\n\t\tif items, ok := item.([]interface{}); ok {\n\t\t\tif len(items) > 0 {\n\t\t\t\titem = items[0]\n\t\t\t}\n\t\t}\n\t\tstack := handleError(item)\n\t\tif stack != \"\" {\n\t\t\tmsg = msg + \"\\n\" + stack\n\t\t}\n\t}\n\tbl.writerMsg(level, msg)\n}\n\nfunc (bl *BeeLogger) Pretty(v interface{}, message string) {\n\tb, _ := json.MarshalIndent(v, \" \", \" \")\n\tif message == \"\" {\n\t\tmessage = reflect.TypeOf(v).String()\n\t}\n\tbl.writerMsg(LevelDebug, fmt.Sprintf(\"[Pretty]\\n%s\\n%s\", message, string(b)))\n}\n\n\/\/ flush all chan data.\nfunc (bl *BeeLogger) Flush() {\n\tfor _, l := range bl.outputs {\n\t\tl.Flush()\n\t}\n}\n\n\/\/ close logger, flush all chan data and destroy all adapters in BeeLogger.\nfunc (bl *BeeLogger) Close() {\n\tfor {\n\t\tif len(bl.msg) > 0 {\n\t\t\tbm := <-bl.msg\n\t\t\tfor _, l := range bl.outputs {\n\t\t\t\terr := l.WriteMsg(bm.msg, bm.level)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"ERROR, unable to WriteMsg (while closing logger):\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tfor _, l := range bl.outputs {\n\t\tl.Flush()\n\t\tl.Destroy()\n\t}\n}\n\nfunc generateFmtStr(n int) string {\n\treturn strings.Repeat(\"%v \", n)\n}\n\nfunc handleError(v interface{}) (msg string) {\n\tif err, ok := v.(errors.ErrCode); ok {\n\t\tmsg = msg + err.StackTrace()\n\t}\n\treturn\n}\n<commit_msg>fix console<commit_after>\/\/ Copyright 2014 beego Author. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Usage:\n\/\/\n\/\/ import \"github.com\/astaxie\/beego\/logs\"\n\/\/\n\/\/\tlog := NewLogger(10000)\n\/\/\tlog.SetLogger(\"console\", \"\")\n\/\/\n\/\/\t> the first params stand for how many channel\n\/\/\n\/\/ Use it like this:\n\/\/\n\/\/\tlog.Trace(\"trace\")\n\/\/\tlog.Info(\"info\")\n\/\/\tlog.Warn(\"warning\")\n\/\/\tlog.Debug(\"debug\")\n\/\/\tlog.Critical(\"critical\")\n\/\/\n\/\/ more docs http:\/\/beego.me\/docs\/module\/logs.md\npackage logs\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/gogap\/errors\"\n)\n\n\/\/ RFC5424 log message levels.\nconst (\n\tLevelError = iota\n\tLevelWarn\n\tLevelInfo\n\tLevelDebug\n)\n\n\/\/ Legacy loglevel constants to ensure backwards compatibility.\n\/\/\n\/\/ Deprecated: will be removed in 1.5.0.\n\ntype loggerType func() LoggerInterface\n\n\/\/ LoggerInterface defines the behavior of a log provider.\ntype LoggerInterface interface {\n\tInit(config string) error\n\tWriteMsg(msg string, level int) error\n\tDestroy()\n\tFlush()\n}\n\nvar adapters = make(map[string]loggerType)\n\n\/\/ Register makes a log provide available by the provided name.\n\/\/ If Register is called twice with the same name or if driver is nil,\n\/\/ it panics.\nfunc Register(name string, log loggerType) {\n\tif log == nil {\n\t\tpanic(\"logs: Register provide is nil\")\n\t}\n\tif _, dup := adapters[name]; dup {\n\t\tpanic(\"logs: Register called twice for provider \" + name)\n\t}\n\tadapters[name] = log\n}\n\n\/\/ BeeLogger is default logger in beego application.\n\/\/ it can contain several providers and log message into all providers.\ntype BeeLogger struct {\n\tlock sync.Mutex\n\tlevel int\n\tenableFuncCallDepth bool\n\tloggerFuncCallDepth int\n\tmsg chan *logMsg\n\toutputs map[string]LoggerInterface\n}\n\ntype logMsg struct {\n\tlevel int\n\tmsg string\n}\n\n\/\/ NewLogger returns a new BeeLogger.\n\/\/ channellen means the number of messages in chan.\n\/\/ if the buffering chan is full, logger adapters write to file or other way.\nfunc NewLogger(channellen int64) *BeeLogger {\n\tbl := new(BeeLogger)\n\tbl.level = LevelDebug\n\tbl.loggerFuncCallDepth = 2\n\tbl.msg = make(chan *logMsg, channellen)\n\tbl.outputs = make(map[string]LoggerInterface)\n\tbl.SetLogger(\"console\", \"\") \/\/ default output to console\n\tgo bl.startLogger()\n\treturn bl\n}\n\nfunc NewFileLogger(file string) *BeeLogger {\n\tl := NewLogger(1024)\n\tpath := strings.Split(file, \"\/\")\n\tif len(path) > 1 {\n\t\texec.Command(\"mkdir\", path[0]).Run()\n\t}\n\tl.SetLogger(\"file\", fmt.Sprintf(`{\"filename\":\"%s\",\"maxdays\":7}`, file))\n\tl.EnableFuncCallDepth(true)\n\tl.SetLogFuncCallDepth(2)\n\treturn l\n}\n\n\/\/ SetLogger provides a given logger adapter into BeeLogger with config string.\n\/\/ config need to be correct JSON as string: {\"interval\":360}.\nfunc (bl *BeeLogger) SetLogger(adaptername string, config string) error {\n\tbl.lock.Lock()\n\tdefer bl.lock.Unlock()\n\tif log, ok := adapters[adaptername]; ok {\n\t\tlg := log()\n\t\terr := lg.Init(config)\n\t\tbl.outputs[adaptername] = lg\n\t\tif err != nil {\n\t\t\tfmt.Println(\"logs.BeeLogger.SetLogger: \" + err.Error())\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"logs: unknown adaptername %q (forgotten Register?)\", adaptername)\n\t}\n\treturn nil\n}\n\n\/\/ remove a logger adapter in BeeLogger.\nfunc (bl *BeeLogger) DelLogger(adaptername string) error {\n\tbl.lock.Lock()\n\tdefer bl.lock.Unlock()\n\tif lg, ok := bl.outputs[adaptername]; ok {\n\t\tlg.Destroy()\n\t\tdelete(bl.outputs, adaptername)\n\t\treturn nil\n\t} else {\n\t\treturn fmt.Errorf(\"logs: unknown adaptername %q (forgotten Register?)\", adaptername)\n\t}\n}\n\nfunc (bl *BeeLogger) writerMsg(loglevel int, msg string) error {\n\tif loglevel > bl.level {\n\t\treturn nil\n\t}\n\tlm := new(logMsg)\n\tlm.level = loglevel\n\tif bl.enableFuncCallDepth {\n\t\t_, file, line, ok := runtime.Caller(bl.loggerFuncCallDepth)\n\t\tif ok {\n\t\t\t_, filename := path.Split(file)\n\t\t\tlm.msg = fmt.Sprintf(\"[%s:%d] %s\", filename, line, msg)\n\t\t} else {\n\t\t\tlm.msg = msg\n\t\t}\n\t} else {\n\t\tlm.msg = msg\n\t}\n\tbl.msg <- lm\n\treturn nil\n}\n\n\/\/ Set log message level.\n\/\/\n\/\/ If message level (such as LevelDebug) is higher than logger level (such as LevelWarning),\n\/\/ log providers will not even be sent the message.\nfunc (bl *BeeLogger) SetLevel(l int) {\n\tbl.level = l\n}\n\n\/\/ set log funcCallDepth\nfunc (bl *BeeLogger) SetLogFuncCallDepth(d int) {\n\tbl.loggerFuncCallDepth = d\n}\n\n\/\/ enable log funcCallDepth\nfunc (bl *BeeLogger) EnableFuncCallDepth(b bool) {\n\tbl.enableFuncCallDepth = b\n}\n\n\/\/ start logger chan reading.\n\/\/ when chan is not empty, write logs.\nfunc (bl *BeeLogger) startLogger() {\n\tfor {\n\t\tselect {\n\t\tcase bm := <-bl.msg:\n\t\t\tfor _, l := range bl.outputs {\n\t\t\t\terr := l.WriteMsg(bm.msg, bm.level)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"ERROR, unable to WriteMsg:\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Log ERROR level message.\nfunc (bl *BeeLogger) Error(v ...interface{}) {\n\tbl.log(\"Error\", LevelError, v)\n}\n\n\/\/ Log WARNING level message.\nfunc (bl *BeeLogger) Warn(v ...interface{}) {\n\tbl.log(\"Warn\", LevelWarn, v)\n}\n\n\/\/ Log INFORMATIONAL level message.\nfunc (bl *BeeLogger) Info(v ...interface{}) {\n\tbl.log(\"Info\", LevelInfo, v)\n}\n\n\/\/ Log DEBUG level message.\nfunc (bl *BeeLogger) Debug(v ...interface{}) {\n\tbl.log(\"Debug\", LevelDebug, v)\n}\n\nfunc (bl *BeeLogger) log(tp string, level int, v ...interface{}) {\n\tmsg := fmt.Sprintf(\"[\"+tp+\"] \"+generateFmtStr(len(v)), v...)\n\tfor _, item := range v {\n\t\tif items, ok := item.([]interface{}); ok {\n\t\t\tif len(items) > 0 {\n\t\t\t\titem = items[0]\n\t\t\t}\n\t\t}\n\t\tstack := handleError(item)\n\t\tif stack != \"\" {\n\t\t\tmsg = msg + \"\\n\" + stack\n\t\t}\n\t}\n\tbl.writerMsg(level, msg)\n}\n\nfunc (bl *BeeLogger) Pretty(v interface{}, message string) {\n\tb, _ := json.MarshalIndent(v, \" \", \" \")\n\tif message == \"\" {\n\t\tmessage = reflect.TypeOf(v).String()\n\t}\n\tbl.writerMsg(LevelDebug, fmt.Sprintf(\"[Pretty]\\n%s\\n%s\", message, string(b)))\n}\n\n\/\/ flush all chan data.\nfunc (bl *BeeLogger) Flush() {\n\tfor _, l := range bl.outputs {\n\t\tl.Flush()\n\t}\n}\n\n\/\/ close logger, flush all chan data and destroy all adapters in BeeLogger.\nfunc (bl *BeeLogger) Close() {\n\tfor {\n\t\tif len(bl.msg) > 0 {\n\t\t\tbm := <-bl.msg\n\t\t\tfor _, l := range bl.outputs {\n\t\t\t\terr := l.WriteMsg(bm.msg, bm.level)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"ERROR, unable to WriteMsg (while closing logger):\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tfor _, l := range bl.outputs {\n\t\tl.Flush()\n\t\tl.Destroy()\n\t}\n}\n\nfunc generateFmtStr(n int) string {\n\treturn strings.Repeat(\"%v \", n)\n}\n\nfunc handleError(v interface{}) (msg string) {\n\tif err, ok := v.(errors.ErrCode); ok {\n\t\tmsg = msg + err.StackTrace()\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2014 Couchbase, Inc.\n\npackage gofast\n\nimport \"io\"\nimport \"os\"\nimport \"fmt\"\nimport \"time\"\nimport \"strings\"\n\n\/\/ Logger interface for gofast logging, applications can\n\/\/ supply a logger object implementing this interface or\n\/\/ gofast will fall back to the defaul logger.\ntype Logger interface {\n\tFatalf(format string, v ...interface{})\n\tErrorf(format string, v ...interface{})\n\tWarnf(format string, v ...interface{})\n\tInfof(format string, v ...interface{})\n\tVerbosef(format string, v ...interface{})\n\tDebugf(format string, v ...interface{})\n\tTracef(format string, v ...interface{})\n}\n\n\/\/ * default log file is os.Stdout\n\/\/ * default level is LogLevelInfo\ntype DefaultLogger struct {\n\tlevel logLevel\n\toutput io.Writer\n}\n\ntype logLevel int\n\nconst (\n\tlogLevelIgnore logLevel = iota + 1\n\tlogLevelFatal\n\tlogLevelError\n\tlogLevelWarn\n\tlogLevelInfo\n\tlogLevelVerbose\n\tlogLevelDebug\n\tlogLevelTrace\n)\n\nvar log Logger \/\/ object used by gofast component for logging.\n\nfunc setLogger(logger Logger, config map[string]interface{}) Logger {\n\tif logger != nil {\n\t\tlog = logger\n\t\treturn log\n\t}\n\n\tvar err error\n\tlevel := logLevelInfo\n\tif val, ok := config[\"log.level\"]; ok {\n\t\tlevel = string2logLevel(val.(string))\n\t}\n\tlogfd := os.Stdout\n\tif val, ok := config[\"log.file\"]; ok {\n\t\tlogfile := val.(string)\n\t\tlogfd, err = os.OpenFile(logfile, os.O_RDWR|os.O_APPEND, 0660)\n\t\tif err != nil {\n\t\t\tif logfd, err = os.Create(logfile); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t}\n\tlog = &DefaultLogger{level: level, output: logfd}\n\treturn log\n}\n\nfunc (l *DefaultLogger) Fatalf(format string, v ...interface{}) {\n\tl.printf(logLevelFatal, format, v...)\n}\n\nfunc (l *DefaultLogger) Errorf(format string, v ...interface{}) {\n\tl.printf(logLevelError, format, v...)\n}\n\nfunc (l *DefaultLogger) Warnf(format string, v ...interface{}) {\n\tl.printf(logLevelWarn, format, v...)\n}\n\nfunc (l *DefaultLogger) Infof(format string, v ...interface{}) {\n\tl.printf(logLevelInfo, format, v...)\n}\n\nfunc (l *DefaultLogger) Verbosef(format string, v ...interface{}) {\n\tl.printf(logLevelVerbose, format, v...)\n}\n\nfunc (l *DefaultLogger) Debugf(format string, v ...interface{}) {\n\tl.printf(logLevelDebug, format, v...)\n}\n\nfunc (l *DefaultLogger) Tracef(format string, v ...interface{}) {\n\tl.printf(logLevelTrace, format, v...)\n}\n\nfunc (l *DefaultLogger) printf(level logLevel, format string, v ...interface{}) {\n\tif l.canlog(level) {\n\t\tts := time.Now().Format(\"2006-01-02T15:04:05.999Z-07:00\")\n\t\tfmt.Fprintf(l.output, ts+\" [\"+l.level.String()+\"] \"+format, v...)\n\t}\n}\n\nfunc (l *DefaultLogger) canlog(level logLevel) bool {\n\tif level <= l.level {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (l logLevel) String() string {\n\tswitch l {\n\tcase logLevelIgnore:\n\t\treturn \"Ignore\"\n\tcase logLevelFatal:\n\t\treturn \"Fatal\"\n\tcase logLevelError:\n\t\treturn \"Error\"\n\tcase logLevelWarn:\n\t\treturn \"Warn\"\n\tcase logLevelInfo:\n\t\treturn \"Info\"\n\tcase logLevelVerbose:\n\t\treturn \"Verbose\"\n\tcase logLevelDebug:\n\t\treturn \"Debug\"\n\tcase logLevelTrace:\n\t\treturn \"Trace\"\n\t}\n\tpanic(\"unexpected log level\") \/\/ should never reach here\n}\n\nfunc string2logLevel(s string) logLevel {\n\ts = strings.ToLower(s)\n\tswitch s {\n\tcase \"ignore\":\n\t\treturn logLevelIgnore\n\tcase \"fatal\":\n\t\treturn logLevelFatal\n\tcase \"error\":\n\t\treturn logLevelError\n\tcase \"warn\":\n\t\treturn logLevelWarn\n\tcase \"info\":\n\t\treturn logLevelInfo\n\tcase \"verbose\":\n\t\treturn logLevelVerbose\n\tcase \"debug\":\n\t\treturn logLevelDebug\n\tcase \"trace\":\n\t\treturn logLevelTrace\n\t}\n\tpanic(\"unexpected log level\") \/\/ should never reach here\n}\n<commit_msg>bug fix in log leveling.<commit_after>\/\/ Copyright (c) 2014 Couchbase, Inc.\n\npackage gofast\n\nimport \"io\"\nimport \"os\"\nimport \"fmt\"\nimport \"time\"\nimport \"strings\"\n\n\/\/ Logger interface for gofast logging, applications can\n\/\/ supply a logger object implementing this interface or\n\/\/ gofast will fall back to the defaul logger.\ntype Logger interface {\n\tFatalf(format string, v ...interface{})\n\tErrorf(format string, v ...interface{})\n\tWarnf(format string, v ...interface{})\n\tInfof(format string, v ...interface{})\n\tVerbosef(format string, v ...interface{})\n\tDebugf(format string, v ...interface{})\n\tTracef(format string, v ...interface{})\n}\n\n\/\/ * default log file is os.Stdout\n\/\/ * default level is LogLevelInfo\ntype DefaultLogger struct {\n\tlevel logLevel\n\toutput io.Writer\n}\n\ntype logLevel int\n\nconst (\n\tlogLevelIgnore logLevel = iota + 1\n\tlogLevelFatal\n\tlogLevelError\n\tlogLevelWarn\n\tlogLevelInfo\n\tlogLevelVerbose\n\tlogLevelDebug\n\tlogLevelTrace\n)\n\nvar log Logger \/\/ object used by gofast component for logging.\n\nfunc setLogger(logger Logger, config map[string]interface{}) Logger {\n\tif logger != nil {\n\t\tlog = logger\n\t\treturn log\n\t}\n\n\tvar err error\n\tlevel := logLevelInfo\n\tif val, ok := config[\"log.level\"]; ok {\n\t\tlevel = string2logLevel(val.(string))\n\t}\n\tlogfd := os.Stdout\n\tif val, ok := config[\"log.file\"]; ok {\n\t\tlogfile := val.(string)\n\t\tlogfd, err = os.OpenFile(logfile, os.O_RDWR|os.O_APPEND, 0660)\n\t\tif err != nil {\n\t\t\tif logfd, err = os.Create(logfile); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t}\n\tlog = &DefaultLogger{level: level, output: logfd}\n\treturn log\n}\n\nfunc (l *DefaultLogger) Fatalf(format string, v ...interface{}) {\n\tl.printf(logLevelFatal, format, v...)\n}\n\nfunc (l *DefaultLogger) Errorf(format string, v ...interface{}) {\n\tl.printf(logLevelError, format, v...)\n}\n\nfunc (l *DefaultLogger) Warnf(format string, v ...interface{}) {\n\tl.printf(logLevelWarn, format, v...)\n}\n\nfunc (l *DefaultLogger) Infof(format string, v ...interface{}) {\n\tl.printf(logLevelInfo, format, v...)\n}\n\nfunc (l *DefaultLogger) Verbosef(format string, v ...interface{}) {\n\tl.printf(logLevelVerbose, format, v...)\n}\n\nfunc (l *DefaultLogger) Debugf(format string, v ...interface{}) {\n\tl.printf(logLevelDebug, format, v...)\n}\n\nfunc (l *DefaultLogger) Tracef(format string, v ...interface{}) {\n\tl.printf(logLevelTrace, format, v...)\n}\n\nfunc (l *DefaultLogger) printf(level logLevel, format string, v ...interface{}) {\n\tif l.canlog(level) {\n\t\tts := time.Now().Format(\"2006-01-02T15:04:05.999Z-07:00\")\n\t\tfmt.Fprintf(l.output, ts+\" [\"+level.String()+\"] \"+format, v...)\n\t}\n}\n\nfunc (l *DefaultLogger) canlog(level logLevel) bool {\n\tif level <= l.level {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (l logLevel) String() string {\n\tswitch l {\n\tcase logLevelIgnore:\n\t\treturn \"Ignore\"\n\tcase logLevelFatal:\n\t\treturn \"Fatal\"\n\tcase logLevelError:\n\t\treturn \"Error\"\n\tcase logLevelWarn:\n\t\treturn \"Warn\"\n\tcase logLevelInfo:\n\t\treturn \"Info\"\n\tcase logLevelVerbose:\n\t\treturn \"Verbose\"\n\tcase logLevelDebug:\n\t\treturn \"Debug\"\n\tcase logLevelTrace:\n\t\treturn \"Trace\"\n\t}\n\tpanic(\"unexpected log level\") \/\/ should never reach here\n}\n\nfunc string2logLevel(s string) logLevel {\n\ts = strings.ToLower(s)\n\tswitch s {\n\tcase \"ignore\":\n\t\treturn logLevelIgnore\n\tcase \"fatal\":\n\t\treturn logLevelFatal\n\tcase \"error\":\n\t\treturn logLevelError\n\tcase \"warn\":\n\t\treturn logLevelWarn\n\tcase \"info\":\n\t\treturn logLevelInfo\n\tcase \"verbose\":\n\t\treturn logLevelVerbose\n\tcase \"debug\":\n\t\treturn logLevelDebug\n\tcase \"trace\":\n\t\treturn logLevelTrace\n\t}\n\tpanic(\"unexpected log level\") \/\/ should never reach here\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"fmt\"\nimport \"io\"\nimport \"os\"\nimport \"os\/exec\"\nimport \"strings\"\n\nimport . \".\/lua\"\nimport \".\/alias\"\nimport \".\/conio\/readline\"\nimport \".\/dos\"\nimport \".\/interpreter\"\nimport \".\/mbcs\"\n\nimport \"github.com\/shiena\/ansicolor\"\n\ntype LuaFunction struct {\n\tL *Lua\n\tregistoryKey string\n}\n\nvar LuaInstanceToCmd = map[uintptr]*interpreter.Interpreter{}\n\nfunc (this LuaFunction) String() string {\n\treturn \"<<Lua-function>>\"\n}\n\nfunc (this LuaFunction) Call(cmd *interpreter.Interpreter) (interpreter.NextT, error) {\n\tthis.L.GetField(Registory, this.registoryKey)\n\tthis.L.NewTable()\n\tfor i, arg1 := range cmd.Args {\n\t\tthis.L.PushInteger(i)\n\t\tthis.L.PushString(arg1)\n\t\tthis.L.SetTable(-3)\n\t}\n\tLuaInstanceToCmd[this.L.Id()] = cmd\n\terr := this.L.Call(1, 0)\n\treturn interpreter.CONTINUE, err\n}\n\nfunc cmdAlias(L *Lua) int {\n\tname, nameErr := L.ToString(1)\n\tif nameErr != nil {\n\t\tL.PushNil()\n\t\tL.PushString(nameErr.Error())\n\t\treturn 2\n\t}\n\tkey := strings.ToLower(name)\n\tswitch L.GetType(2) {\n\tcase TSTRING:\n\t\tvalue, err := L.ToString(2)\n\t\tif err == nil {\n\t\t\talias.Table[key] = alias.New(value)\n\t\t} else {\n\t\t\tL.PushNil()\n\t\t\tL.PushString(err.Error())\n\t\t\treturn 2\n\t\t}\n\tcase TFUNCTION:\n\t\tregkey := \"nyagos.alias.\" + key\n\t\tL.SetField(Registory, regkey)\n\t\talias.Table[key] = LuaFunction{L, regkey}\n\t}\n\tL.PushBool(true)\n\treturn 1\n}\n\nfunc cmdSetEnv(L *Lua) int {\n\tname, nameErr := L.ToString(1)\n\tif nameErr != nil {\n\t\tL.PushNil()\n\t\tL.PushString(nameErr.Error())\n\t\treturn 2\n\t}\n\tvalue, valueErr := L.ToString(2)\n\tif valueErr != nil {\n\t\tL.PushNil()\n\t\tL.PushString(valueErr.Error())\n\t\treturn 2\n\t}\n\tos.Setenv(name, value)\n\tL.PushBool(true)\n\treturn 1\n}\n\nfunc cmdGetEnv(L *Lua) int {\n\tname, nameErr := L.ToString(1)\n\tif nameErr != nil {\n\t\tL.PushNil()\n\t\treturn 1\n\t}\n\tvalue := os.Getenv(name)\n\tif len(value) > 0 {\n\t\tL.PushString(value)\n\t} else {\n\t\tL.PushNil()\n\t}\n\treturn 1\n}\n\nfunc cmdExec(L *Lua) int {\n\tstatement, statementErr := L.ToString(1)\n\tif statementErr != nil {\n\t\tL.PushNil()\n\t\tL.PushString(statementErr.Error())\n\t\treturn 2\n\t}\n\t_, err := interpreter.New().Interpret(statement)\n\n\tif err != nil {\n\t\tL.PushNil()\n\t\tL.PushString(err.Error())\n\t\treturn 2\n\t}\n\tL.PushBool(true)\n\treturn 1\n}\n\nfunc cmdEval(L *Lua) int {\n\tstatement, statementErr := L.ToString(1)\n\tif statementErr != nil {\n\t\tL.PushNil()\n\t\tL.PushString(statementErr.Error())\n\t\treturn 2\n\t}\n\tr, w, err := os.Pipe()\n\tif err != nil {\n\t\tL.PushNil()\n\t\tL.PushString(err.Error())\n\t\treturn 2\n\t}\n\tgo func(statement string, w *os.File) {\n\t\tit := interpreter.New()\n\t\tit.Stdout = w\n\t\tit.Interpret(statement)\n\t\tw.Close()\n\t}(statement, w)\n\n\tvar result = []byte{}\n\tfor {\n\t\tbuffer := make([]byte, 256)\n\t\tsize, err := r.Read(buffer)\n\t\tif err != nil || size <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tresult = append(result, buffer[0:size]...)\n\t}\n\tr.Close()\n\tL.PushAnsiString(result)\n\treturn 1\n}\n\nfunc cmdWrite(L *Lua) int {\n\tvar out io.Writer = os.Stdout\n\tcmd, cmdOk := LuaInstanceToCmd[L.Id()]\n\tif cmdOk && cmd != nil && cmd.Stdout != nil {\n\t\tout = cmd.Stdout\n\t}\n\tswitch out.(type) {\n\tcase *os.File:\n\t\tout = ansicolor.NewAnsiColorWriter(out)\n\t}\n\n\tn := L.GetTop()\n\tfor i := 1; i <= n; i++ {\n\t\tstr, err := L.ToString(i)\n\t\tif err != nil {\n\t\t\tL.PushNil()\n\t\t\tL.PushString(err.Error())\n\t\t\treturn 2\n\t\t}\n\t\tif i > 1 {\n\t\t\tfmt.Fprint(out, \"\\t\")\n\t\t}\n\t\tfmt.Fprint(out, str)\n\t}\n\tL.PushBool(true)\n\treturn 1\n}\n\nfunc cmdGetwd(L *Lua) int {\n\twd, err := os.Getwd()\n\tif err == nil {\n\t\tL.PushString(wd)\n\t\treturn 1\n\t} else {\n\t\treturn 0\n\t}\n}\n\nfunc cmdWhich(L *Lua) int {\n\tif L.GetType(-1) != TSTRING {\n\t\treturn 0\n\t}\n\tname, nameErr := L.ToString(-1)\n\tif nameErr != nil {\n\t\tL.PushNil()\n\t\tL.PushString(nameErr.Error())\n\t\treturn 2\n\t}\n\tpath, err := exec.LookPath(name)\n\tif err == nil {\n\t\tL.PushString(path)\n\t\treturn 1\n\t} else {\n\t\tL.PushNil()\n\t\tL.PushString(err.Error())\n\t\treturn 2\n\t}\n}\n\nfunc cmdAtoU(L *Lua) int {\n\tstr, err := mbcs.AtoU(L.ToAnsiString(1))\n\tif err == nil {\n\t\tL.PushString(str)\n\t\treturn 1\n\t} else {\n\t\treturn 0\n\t}\n}\n\nfunc cmdUtoA(L *Lua) int {\n\tutf8, utf8err := L.ToString(1)\n\tif utf8err != nil {\n\t\tL.PushNil()\n\t\tL.PushString(utf8err.Error())\n\t\treturn 2\n\t}\n\tstr, err := mbcs.UtoA(utf8)\n\tif err == nil {\n\t\tif len(str) >= 1 {\n\t\t\tL.PushAnsiString(str[:len(str)-1])\n\t\t} else {\n\t\t\tL.PushString(\"\")\n\t\t}\n\t\tL.PushNil()\n\t\treturn 2\n\t} else {\n\t\tL.PushNil()\n\t\tL.PushString(err.Error())\n\t\treturn 2\n\t}\n}\n\nfunc cmdGlob(L *Lua) int {\n\tif !L.IsString(-1) {\n\t\treturn 0\n\t}\n\twildcard, wildcardErr := L.ToString(-1)\n\tif wildcardErr != nil {\n\t\tL.PushNil()\n\t\tL.PushString(wildcardErr.Error())\n\t\treturn 2\n\t}\n\tlist, err := dos.Glob(wildcard)\n\tif err != nil {\n\t\tL.PushNil()\n\t\tL.PushString(err.Error())\n\t\treturn 2\n\t} else {\n\t\tL.NewTable()\n\t\tfor i := 0; i < len(list); i++ {\n\t\t\tL.PushInteger(i + 1)\n\t\t\tL.PushString(list[i])\n\t\t\tL.SetTable(-3)\n\t\t}\n\t\treturn 1\n\t}\n}\n\ntype KeyLuaFuncT struct {\n\tL *Lua\n\tregistoryKey string\n}\n\nfunc (this *KeyLuaFuncT) Call(buffer *readline.Buffer) readline.Result {\n\tthis.L.GetField(Registory, this.registoryKey)\n\tif err := this.L.Call(0, 0); err != nil {\n\t\tfmt.Println(os.Stderr, err)\n\t}\n\treturn readline.CONTINUE\n}\n\nfunc cmdBindKey(L *Lua) int {\n\tkey, keyErr := L.ToString(-2)\n\tif keyErr != nil {\n\t\tL.PushString(keyErr.Error())\n\t\treturn 1\n\t}\n\tkey = strings.Replace(strings.ToUpper(key), \"-\", \"_\", -1)\n\tswitch L.GetType(-1) {\n\tcase TFUNCTION:\n\t\tregkey := \"nyagos.bind.\" + key\n\t\tL.SetField(Registory, regkey)\n\t\tif err := readline.BindKeyFunc(key, &KeyLuaFuncT{L, regkey}); err != nil {\n\t\t\tL.PushNil()\n\t\t\tL.PushString(err.Error())\n\t\t\treturn 2\n\t\t} else {\n\t\t\tL.PushBool(true)\n\t\t\treturn 1\n\t\t}\n\tdefault:\n\t\tval, valErr := L.ToString(-1)\n\t\tif valErr != nil {\n\t\t\tL.PushNil()\n\t\t\tL.PushString(valErr.Error())\n\t\t\treturn 2\n\t\t}\n\t\terr := readline.BindKeySymbol(key, val)\n\t\tif err != nil {\n\t\t\tL.PushNil()\n\t\t\tL.PushString(err.Error())\n\t\t\treturn 2\n\t\t} else {\n\t\t\tL.PushBool(true)\n\t\t\treturn 1\n\t\t}\n\t}\n}\n\nfunc SetLuaFunctions(this *Lua) {\n\tstackPos := this.GetTop()\n\tdefer this.SetTop(stackPos)\n\tthis.NewTable()\n\tthis.PushGoFunction(cmdAlias)\n\tthis.SetField(-2, \"alias\")\n\tthis.PushGoFunction(cmdSetEnv)\n\tthis.SetField(-2, \"setenv\")\n\tthis.PushGoFunction(cmdGetEnv)\n\tthis.SetField(-2, \"getenv\")\n\tthis.PushGoFunction(cmdExec)\n\tthis.SetField(-2, \"exec\")\n\tthis.PushGoFunction(cmdWrite)\n\tthis.SetField(-2, \"write\")\n\tthis.PushGoFunction(cmdAtoU)\n\tthis.SetField(-2, \"atou\")\n\tthis.PushGoFunction(cmdUtoA)\n\tthis.SetField(-2, \"utoa\")\n\tthis.PushGoFunction(cmdGetwd)\n\tthis.SetField(-2, \"getwd\")\n\tthis.PushGoFunction(cmdWhich)\n\tthis.SetField(-2, \"which\")\n\tthis.PushGoFunction(cmdEval)\n\tthis.SetField(-2, \"eval\")\n\tthis.PushGoFunction(cmdGlob)\n\tthis.SetField(-2, \"glob\")\n\tthis.PushGoFunction(cmdBindKey)\n\tthis.SetField(-2, \"bindkey\")\n\texeName, exeNameErr := dos.GetModuleFileName()\n\tif exeNameErr != nil {\n\t\tfmt.Fprintln(os.Stderr, exeNameErr)\n\t} else {\n\t\tthis.PushString(exeName)\n\t\tthis.SetField(-2, \"exe\")\n\t}\n\tthis.SetGlobal(\"nyagos\")\n\n\t\/\/ replace io.getenv\n\tthis.GetGlobal(\"os\")\n\tthis.PushGoFunction(cmdGetEnv)\n\tthis.SetField(-2, \"getenv\")\n\n\tvar orgArgHook func([]string) []string\n\torgArgHook = interpreter.SetArgsHook(func(args []string) []string {\n\t\tpos := this.GetTop()\n\t\tdefer this.SetTop(pos)\n\t\tthis.GetGlobal(\"nyagos\")\n\t\tthis.GetField(-1, \"argsfilter\")\n\t\tif !this.IsFunction(-1) {\n\t\t\treturn orgArgHook(args)\n\t\t}\n\t\tthis.NewTable()\n\t\tfor i := 0; i < len(args); i++ {\n\t\t\tthis.PushInteger(i)\n\t\t\tthis.PushString(args[i])\n\t\t\tthis.SetTable(-3)\n\t\t}\n\t\tif err := this.Call(1, 1); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err)\n\t\t\treturn orgArgHook(args)\n\t\t}\n\t\tif this.GetType(-1) != TTABLE {\n\t\t\treturn orgArgHook(args)\n\t\t}\n\t\tnewargs := []string{}\n\t\tfor i := 0; true; i++ {\n\t\t\tthis.PushInteger(i)\n\t\t\tthis.GetTable(-2)\n\t\t\tif this.GetType(-1) == TNIL {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\targ1, arg1err := this.ToString(-1)\n\t\t\tif arg1err == nil {\n\t\t\t\tnewargs = append(newargs, arg1)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintln(os.Stderr, arg1err.Error())\n\t\t\t}\n\t\t\tthis.Pop(1)\n\t\t}\n\t\treturn orgArgHook(newargs)\n\t})\n}\n<commit_msg>import . \".\/lua\" -> import \".\/lua\"<commit_after>package main\n\nimport \"fmt\"\nimport \"io\"\nimport \"os\"\nimport \"os\/exec\"\nimport \"strings\"\n\nimport \".\/lua\"\nimport \".\/alias\"\nimport \".\/conio\/readline\"\nimport \".\/dos\"\nimport \".\/interpreter\"\nimport \".\/mbcs\"\n\nimport \"github.com\/shiena\/ansicolor\"\n\ntype LuaFunction struct {\n\tL *lua.Lua\n\tregistoryKey string\n}\n\nvar LuaInstanceToCmd = map[uintptr]*interpreter.Interpreter{}\n\nfunc (this LuaFunction) String() string {\n\treturn \"<<Lua-function>>\"\n}\n\nfunc (this LuaFunction) Call(cmd *interpreter.Interpreter) (interpreter.NextT, error) {\n\tthis.L.GetField(lua.Registory, this.registoryKey)\n\tthis.L.NewTable()\n\tfor i, arg1 := range cmd.Args {\n\t\tthis.L.PushInteger(i)\n\t\tthis.L.PushString(arg1)\n\t\tthis.L.SetTable(-3)\n\t}\n\tLuaInstanceToCmd[this.L.Id()] = cmd\n\terr := this.L.Call(1, 0)\n\treturn interpreter.CONTINUE, err\n}\n\nfunc cmdAlias(L *lua.Lua) int {\n\tname, nameErr := L.ToString(1)\n\tif nameErr != nil {\n\t\tL.PushNil()\n\t\tL.PushString(nameErr.Error())\n\t\treturn 2\n\t}\n\tkey := strings.ToLower(name)\n\tswitch L.GetType(2) {\n\tcase lua.TSTRING:\n\t\tvalue, err := L.ToString(2)\n\t\tif err == nil {\n\t\t\talias.Table[key] = alias.New(value)\n\t\t} else {\n\t\t\tL.PushNil()\n\t\t\tL.PushString(err.Error())\n\t\t\treturn 2\n\t\t}\n\tcase lua.TFUNCTION:\n\t\tregkey := \"nyagos.alias.\" + key\n\t\tL.SetField(lua.Registory, regkey)\n\t\talias.Table[key] = LuaFunction{L, regkey}\n\t}\n\tL.PushBool(true)\n\treturn 1\n}\n\nfunc cmdSetEnv(L *lua.Lua) int {\n\tname, nameErr := L.ToString(1)\n\tif nameErr != nil {\n\t\tL.PushNil()\n\t\tL.PushString(nameErr.Error())\n\t\treturn 2\n\t}\n\tvalue, valueErr := L.ToString(2)\n\tif valueErr != nil {\n\t\tL.PushNil()\n\t\tL.PushString(valueErr.Error())\n\t\treturn 2\n\t}\n\tos.Setenv(name, value)\n\tL.PushBool(true)\n\treturn 1\n}\n\nfunc cmdGetEnv(L *lua.Lua) int {\n\tname, nameErr := L.ToString(1)\n\tif nameErr != nil {\n\t\tL.PushNil()\n\t\treturn 1\n\t}\n\tvalue := os.Getenv(name)\n\tif len(value) > 0 {\n\t\tL.PushString(value)\n\t} else {\n\t\tL.PushNil()\n\t}\n\treturn 1\n}\n\nfunc cmdExec(L *lua.Lua) int {\n\tstatement, statementErr := L.ToString(1)\n\tif statementErr != nil {\n\t\tL.PushNil()\n\t\tL.PushString(statementErr.Error())\n\t\treturn 2\n\t}\n\t_, err := interpreter.New().Interpret(statement)\n\n\tif err != nil {\n\t\tL.PushNil()\n\t\tL.PushString(err.Error())\n\t\treturn 2\n\t}\n\tL.PushBool(true)\n\treturn 1\n}\n\nfunc cmdEval(L *lua.Lua) int {\n\tstatement, statementErr := L.ToString(1)\n\tif statementErr != nil {\n\t\tL.PushNil()\n\t\tL.PushString(statementErr.Error())\n\t\treturn 2\n\t}\n\tr, w, err := os.Pipe()\n\tif err != nil {\n\t\tL.PushNil()\n\t\tL.PushString(err.Error())\n\t\treturn 2\n\t}\n\tgo func(statement string, w *os.File) {\n\t\tit := interpreter.New()\n\t\tit.Stdout = w\n\t\tit.Interpret(statement)\n\t\tw.Close()\n\t}(statement, w)\n\n\tvar result = []byte{}\n\tfor {\n\t\tbuffer := make([]byte, 256)\n\t\tsize, err := r.Read(buffer)\n\t\tif err != nil || size <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tresult = append(result, buffer[0:size]...)\n\t}\n\tr.Close()\n\tL.PushAnsiString(result)\n\treturn 1\n}\n\nfunc cmdWrite(L *lua.Lua) int {\n\tvar out io.Writer = os.Stdout\n\tcmd, cmdOk := LuaInstanceToCmd[L.Id()]\n\tif cmdOk && cmd != nil && cmd.Stdout != nil {\n\t\tout = cmd.Stdout\n\t}\n\tswitch out.(type) {\n\tcase *os.File:\n\t\tout = ansicolor.NewAnsiColorWriter(out)\n\t}\n\n\tn := L.GetTop()\n\tfor i := 1; i <= n; i++ {\n\t\tstr, err := L.ToString(i)\n\t\tif err != nil {\n\t\t\tL.PushNil()\n\t\t\tL.PushString(err.Error())\n\t\t\treturn 2\n\t\t}\n\t\tif i > 1 {\n\t\t\tfmt.Fprint(out, \"\\t\")\n\t\t}\n\t\tfmt.Fprint(out, str)\n\t}\n\tL.PushBool(true)\n\treturn 1\n}\n\nfunc cmdGetwd(L *lua.Lua) int {\n\twd, err := os.Getwd()\n\tif err == nil {\n\t\tL.PushString(wd)\n\t\treturn 1\n\t} else {\n\t\treturn 0\n\t}\n}\n\nfunc cmdWhich(L *lua.Lua) int {\n\tif L.GetType(-1) != lua.TSTRING {\n\t\treturn 0\n\t}\n\tname, nameErr := L.ToString(-1)\n\tif nameErr != nil {\n\t\tL.PushNil()\n\t\tL.PushString(nameErr.Error())\n\t\treturn 2\n\t}\n\tpath, err := exec.LookPath(name)\n\tif err == nil {\n\t\tL.PushString(path)\n\t\treturn 1\n\t} else {\n\t\tL.PushNil()\n\t\tL.PushString(err.Error())\n\t\treturn 2\n\t}\n}\n\nfunc cmdAtoU(L *lua.Lua) int {\n\tstr, err := mbcs.AtoU(L.ToAnsiString(1))\n\tif err == nil {\n\t\tL.PushString(str)\n\t\treturn 1\n\t} else {\n\t\treturn 0\n\t}\n}\n\nfunc cmdUtoA(L *lua.Lua) int {\n\tutf8, utf8err := L.ToString(1)\n\tif utf8err != nil {\n\t\tL.PushNil()\n\t\tL.PushString(utf8err.Error())\n\t\treturn 2\n\t}\n\tstr, err := mbcs.UtoA(utf8)\n\tif err == nil {\n\t\tif len(str) >= 1 {\n\t\t\tL.PushAnsiString(str[:len(str)-1])\n\t\t} else {\n\t\t\tL.PushString(\"\")\n\t\t}\n\t\tL.PushNil()\n\t\treturn 2\n\t} else {\n\t\tL.PushNil()\n\t\tL.PushString(err.Error())\n\t\treturn 2\n\t}\n}\n\nfunc cmdGlob(L *lua.Lua) int {\n\tif !L.IsString(-1) {\n\t\treturn 0\n\t}\n\twildcard, wildcardErr := L.ToString(-1)\n\tif wildcardErr != nil {\n\t\tL.PushNil()\n\t\tL.PushString(wildcardErr.Error())\n\t\treturn 2\n\t}\n\tlist, err := dos.Glob(wildcard)\n\tif err != nil {\n\t\tL.PushNil()\n\t\tL.PushString(err.Error())\n\t\treturn 2\n\t} else {\n\t\tL.NewTable()\n\t\tfor i := 0; i < len(list); i++ {\n\t\t\tL.PushInteger(i + 1)\n\t\t\tL.PushString(list[i])\n\t\t\tL.SetTable(-3)\n\t\t}\n\t\treturn 1\n\t}\n}\n\ntype KeyLuaFuncT struct {\n\tL *lua.Lua\n\tregistoryKey string\n}\n\nfunc (this *KeyLuaFuncT) Call(buffer *readline.Buffer) readline.Result {\n\tthis.L.GetField(lua.Registory, this.registoryKey)\n\tif err := this.L.Call(0, 0); err != nil {\n\t\tfmt.Println(os.Stderr, err)\n\t}\n\treturn readline.CONTINUE\n}\n\nfunc cmdBindKey(L *lua.Lua) int {\n\tkey, keyErr := L.ToString(-2)\n\tif keyErr != nil {\n\t\tL.PushString(keyErr.Error())\n\t\treturn 1\n\t}\n\tkey = strings.Replace(strings.ToUpper(key), \"-\", \"_\", -1)\n\tswitch L.GetType(-1) {\n\tcase lua.TFUNCTION:\n\t\tregkey := \"nyagos.bind.\" + key\n\t\tL.SetField(lua.Registory, regkey)\n\t\tif err := readline.BindKeyFunc(key, &KeyLuaFuncT{L, regkey}); err != nil {\n\t\t\tL.PushNil()\n\t\t\tL.PushString(err.Error())\n\t\t\treturn 2\n\t\t} else {\n\t\t\tL.PushBool(true)\n\t\t\treturn 1\n\t\t}\n\tdefault:\n\t\tval, valErr := L.ToString(-1)\n\t\tif valErr != nil {\n\t\t\tL.PushNil()\n\t\t\tL.PushString(valErr.Error())\n\t\t\treturn 2\n\t\t}\n\t\terr := readline.BindKeySymbol(key, val)\n\t\tif err != nil {\n\t\t\tL.PushNil()\n\t\t\tL.PushString(err.Error())\n\t\t\treturn 2\n\t\t} else {\n\t\t\tL.PushBool(true)\n\t\t\treturn 1\n\t\t}\n\t}\n}\n\nfunc SetLuaFunctions(this *lua.Lua) {\n\tstackPos := this.GetTop()\n\tdefer this.SetTop(stackPos)\n\tthis.NewTable()\n\tthis.PushGoFunction(cmdAlias)\n\tthis.SetField(-2, \"alias\")\n\tthis.PushGoFunction(cmdSetEnv)\n\tthis.SetField(-2, \"setenv\")\n\tthis.PushGoFunction(cmdGetEnv)\n\tthis.SetField(-2, \"getenv\")\n\tthis.PushGoFunction(cmdExec)\n\tthis.SetField(-2, \"exec\")\n\tthis.PushGoFunction(cmdWrite)\n\tthis.SetField(-2, \"write\")\n\tthis.PushGoFunction(cmdAtoU)\n\tthis.SetField(-2, \"atou\")\n\tthis.PushGoFunction(cmdUtoA)\n\tthis.SetField(-2, \"utoa\")\n\tthis.PushGoFunction(cmdGetwd)\n\tthis.SetField(-2, \"getwd\")\n\tthis.PushGoFunction(cmdWhich)\n\tthis.SetField(-2, \"which\")\n\tthis.PushGoFunction(cmdEval)\n\tthis.SetField(-2, \"eval\")\n\tthis.PushGoFunction(cmdGlob)\n\tthis.SetField(-2, \"glob\")\n\tthis.PushGoFunction(cmdBindKey)\n\tthis.SetField(-2, \"bindkey\")\n\texeName, exeNameErr := dos.GetModuleFileName()\n\tif exeNameErr != nil {\n\t\tfmt.Fprintln(os.Stderr, exeNameErr)\n\t} else {\n\t\tthis.PushString(exeName)\n\t\tthis.SetField(-2, \"exe\")\n\t}\n\tthis.SetGlobal(\"nyagos\")\n\n\t\/\/ replace io.getenv\n\tthis.GetGlobal(\"os\")\n\tthis.PushGoFunction(cmdGetEnv)\n\tthis.SetField(-2, \"getenv\")\n\n\tvar orgArgHook func([]string) []string\n\torgArgHook = interpreter.SetArgsHook(func(args []string) []string {\n\t\tpos := this.GetTop()\n\t\tdefer this.SetTop(pos)\n\t\tthis.GetGlobal(\"nyagos\")\n\t\tthis.GetField(-1, \"argsfilter\")\n\t\tif !this.IsFunction(-1) {\n\t\t\treturn orgArgHook(args)\n\t\t}\n\t\tthis.NewTable()\n\t\tfor i := 0; i < len(args); i++ {\n\t\t\tthis.PushInteger(i)\n\t\t\tthis.PushString(args[i])\n\t\t\tthis.SetTable(-3)\n\t\t}\n\t\tif err := this.Call(1, 1); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err)\n\t\t\treturn orgArgHook(args)\n\t\t}\n\t\tif this.GetType(-1) != lua.TTABLE {\n\t\t\treturn orgArgHook(args)\n\t\t}\n\t\tnewargs := []string{}\n\t\tfor i := 0; true; i++ {\n\t\t\tthis.PushInteger(i)\n\t\t\tthis.GetTable(-2)\n\t\t\tif this.GetType(-1) == lua.TNIL {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\targ1, arg1err := this.ToString(-1)\n\t\t\tif arg1err == nil {\n\t\t\t\tnewargs = append(newargs, arg1)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintln(os.Stderr, arg1err.Error())\n\t\t\t}\n\t\t\tthis.Pop(1)\n\t\t}\n\t\treturn orgArgHook(newargs)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2021 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage unix\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\n\/\/go:cgo_import_dynamic libc_getentropy getentropy \"\/usr\/lib\/libSystem.B.dylib\"\n\nfunc libc_getentropy_trampoline()\n\n\/\/ GetEntropy calls the macOS getentropy system call.\nfunc GetEntropy(p []byte) error {\n\t_, _, errno := syscall_syscall(funcPC(libc_getentropy_trampoline),\n\t\tuintptr(unsafe.Pointer(&p[0])),\n\t\tuintptr(len(p)),\n\t\t0)\n\tif errno != 0 {\n\t\treturn errno\n\t}\n\treturn nil\n}\n\n\/\/go:linkname syscall_syscall syscall.syscall\nfunc syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno)\n\n\/\/go:linkname funcPC runtime.funcPC\nfunc funcPC(f interface{}) uintptr\n<commit_msg>internal\/syscall\/unix: use internal\/abi.FuncPC for syscall wrapper<commit_after>\/\/ Copyright 2021 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage unix\n\nimport (\n\t\"internal\/abi\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\n\/\/go:cgo_import_dynamic libc_getentropy getentropy \"\/usr\/lib\/libSystem.B.dylib\"\n\nfunc libc_getentropy_trampoline()\n\n\/\/ GetEntropy calls the macOS getentropy system call.\nfunc GetEntropy(p []byte) error {\n\t_, _, errno := syscall_syscall(abi.FuncPCABI0(libc_getentropy_trampoline),\n\t\tuintptr(unsafe.Pointer(&p[0])),\n\t\tuintptr(len(p)),\n\t\t0)\n\tif errno != 0 {\n\t\treturn errno\n\t}\n\treturn nil\n}\n\n\/\/go:linkname syscall_syscall syscall.syscall\nfunc syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno)\n<|endoftext|>"} {"text":"<commit_before>package lz4\n\n\/*\n#cgo LDFLAGS: -llz4\n#include \"lz4.h\"\n#include \"lz4hc.h\"\n*\/\nimport \"C\"\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"hash\"\n\t\"io\"\n\t\"unsafe\"\n\n\t\"github.com\/vova616\/xxhash\"\n)\n\nconst (\n\tBestSpeed = 3\n\tBestCompression = 9\n\tDefaultCompression = -1\n\tlz4EOM = uint32(0)\n\tlz4Magic = uint32(0x184D2204)\n\tlz4BlockSizeID = 7\n\tlz4BlockSize = 1 << (8 + (2 * lz4BlockSizeID))\n)\n\nvar lz4Header = []byte{\n\t0x4, 0x22, 0x4d, 0x18, 0x64, 0x70, 0xb9,\n}\n\nfunc blockSize(blockId uint32) uint32 {\n\treturn (1 << (8 + (2 * blockId)))\n}\n\ntype writer struct {\n\tlevel int\n\terr error\n\tcompressor func(src []byte, dst []byte, maxSize uint32) (int, error)\n\n\th hash.Hash32\n\tw io.Writer\n}\n\n\/\/ NewWriter creates a new Writer that satisfies writes by compressing data\n\/\/ written to w.\nfunc NewWriter(w io.Writer) io.WriteCloser {\n\tz, _ := NewWriterLevel(w, DefaultCompression)\n\treturn z\n}\n\n\/\/ NewWriterLevel is like NewWriter but specifies the compression level instead\n\/\/ of assuming DefaultCompression.\nfunc NewWriterLevel(w io.Writer, level int) (io.WriteCloser, error) {\n\tif level < DefaultCompression || level > BestCompression {\n\t\treturn nil, fmt.Errorf(\"lz4: invalid compression level: %d\", level)\n\t}\n\treturn &writer{\n\t\tlevel: level,\n\t\tw: w,\n\t\th: xxhash.New(0),\n\t}, nil\n}\n\nfunc (z *writer) writeHeader() error {\n\t\/\/ Write magic number and header\n\tif _, err := z.w.Write(lz4Header); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (z *writer) write(v interface{}) error {\n\treturn binary.Write(z.w, binary.LittleEndian, v)\n}\n\n\/\/ Write writes a compressed form of p to the underlying io.Writer.\nfunc (z *writer) Write(p []byte) (int, error) {\n\tif z.err != nil {\n\t\treturn 0, z.err\n\t}\n\t\/\/ Write headers\n\tif z.compressor == nil {\n\t\tif z.level == BestCompression {\n\t\t\tz.compressor = lz4CompressBest\n\t\t} else {\n\t\t\tz.compressor = lz4CompressSpeed\n\t\t}\n\t\tz.err = z.writeHeader()\n\t\tif z.err != nil {\n\t\t\treturn 0, z.err\n\t\t}\n\t}\n\n\tcompressed := make([]byte, lz4BlockSize)\n\tn, err := z.compressor(p, compressed, lz4BlockSize)\n\tif err != nil {\n\t\tz.err = err\n\t\treturn 0, z.err\n\t}\n\n\tif n > 0 {\n\t\tz.err = z.write(uint32(n))\n\t\tif z.err != nil {\n\t\t\treturn 0, z.err\n\t\t}\n\t\t\/\/ Write compressed block\n\t\t_, z.err = z.w.Write(compressed[0:n])\n\t\tif z.err != nil {\n\t\t\treturn 0, z.err\n\t\t}\n\t} else {\n\t\tz.err = z.write(uint32(len(p)) | 0x80000000)\n\t\tif z.err != nil {\n\t\t\treturn 0, z.err\n\t\t}\n\t\t\/\/ Write uncompressed block\n\t\t_, z.err = z.w.Write(p)\n\t\tif z.err != nil {\n\t\t\treturn 0, z.err\n\t\t}\n\t}\n\n\tif len(p) > 0 {\n\t\tz.h.Write(p)\n\t}\n\n\treturn len(p), nil\n}\n\n\/\/ Close closes the Writer. It does not close the underlying io.Writer.\nfunc (z *writer) Close() error {\n\tz.err = z.write(uint32(0))\n\tif z.err != nil {\n\t\treturn z.err\n\t}\n\treturn z.write(z.h.Sum32())\n}\n\nfunc lz4CompressSpeed(src []byte, dst []byte, maxSize uint32) (int, error) {\n\tif len(src) == 0 {\n\t\treturn 0, nil\n\t}\n\tn := C.LZ4_compress_limitedOutput((*C.char)(unsafe.Pointer(&src[0])), (*C.char)(unsafe.Pointer(&dst[0])), C.int(len(src)), C.int(maxSize))\n\tif n <= 0 {\n\t\treturn 0, errors.New(\"lz4: data corruption\")\n\t}\n\treturn int(n), nil\n}\n\nfunc lz4CompressBest(src []byte, dst []byte, maxSize uint32) (int, error) {\n\tn := C.LZ4_compressHC_limitedOutput((*C.char)(unsafe.Pointer(&src[0])), (*C.char)(unsafe.Pointer(&dst[0])), C.int(len(src)), C.int(maxSize))\n\tif n <= 0 {\n\t\treturn 0, errors.New(\"lz4: data corruption\")\n\t}\n\treturn int(n), nil\n}\n\ntype reader struct {\n\tmaxBlockSize uint32\n\tcontentChecksumFlag bool\n\tblockChecksumFlag bool\n\n\tbuf []byte\n\tr io.Reader\n\th hash.Hash32\n\terr error\n}\n\n\/\/ NewReader creates a new Reader reading the given reader.\nfunc NewReader(r io.Reader) (io.ReadCloser, error) {\n\tz := &reader{\n\t\tr: r,\n\t\th: xxhash.New(0),\n\t}\n\tif err := z.readFrame(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn z, nil\n}\n\nfunc (z *reader) readFrame() error {\n\t\/\/ Read and check magic\n\tvar magic uint32\n\tif err := z.read(&magic); err != nil {\n\t\treturn err\n\t}\n\tif magic != lz4Magic {\n\t\treturn errors.New(\"lz4: invalid header\")\n\t}\n\tbr := newBitReader(io.LimitReader(z.r, 3))\n\tversion, err := br.ReadBits(2)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif version != 1 {\n\t\treturn errors.New(\"lz4: wrong version number\")\n\t}\n\tindependenceFlag, err := br.ReadBit()\n\tif err != nil || !independenceFlag {\n\t\treturn err\n\t}\n\tblockChecksumFlag, err := br.ReadBit()\n\tif err != nil {\n\t\treturn err\n\t}\n\tz.blockChecksumFlag = blockChecksumFlag\n\tcontentSizeFlag, err := br.ReadBit()\n\tif err != nil || contentSizeFlag {\n\t\treturn errors.New(\"lz4: does not support stream size\")\n\t}\n\tcontentChecksumFlag, err := br.ReadBit()\n\tif err != nil {\n\t\treturn err\n\t}\n\tz.contentChecksumFlag = contentChecksumFlag\n\tif reserved, err := br.ReadBit(); err != nil || reserved {\n\t\treturn errors.New(\"lz4: wrong value for reserved bits\")\n\t}\n\tdictionaryFlag, err := br.ReadBit()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif dictionaryFlag {\n\t\treturn errors.New(\"lz4: does not support dictionary\")\n\t}\n\tif reserved, err := br.ReadBit(); err != nil || reserved {\n\t\treturn errors.New(\"lz4: wrong value for reserved bits\")\n\t}\n\tblockMaxSize, err := br.ReadBits(3)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif blockMaxSize < 4 {\n\t\treturn errors.New(\"lz4: unsupported block size\")\n\t}\n\tz.maxBlockSize = blockSize(blockMaxSize)\n\tif reserved, err := br.ReadBits(4); err != nil || reserved != 0 {\n\t\treturn errors.New(\"lz4: wrong value for reserved bits\")\n\t}\n\tsum := br.Sum32() >> 8 & 0xFF\n\tchecksum, err := br.ReadBits(8)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif checksum != sum {\n\t\treturn errors.New(\"lz4: stream descriptor error detected\")\n\t}\n\tz.h.Reset()\n\treturn nil\n}\n\nfunc (z *reader) nextBlock() {\n\t\/\/ Read block size\n\tvar blockSize uint32\n\tz.err = z.read(&blockSize)\n\tif z.err != nil {\n\t\treturn\n\t}\n\n\tuncompressedFlag := (blockSize >> 31) != 0\n\tblockSize &= 0x7FFFFFFF\n\n\tif blockSize == lz4EOM {\n\t\tz.err = io.EOF\n\t\treturn\n\t}\n\n\tif blockSize > z.maxBlockSize {\n\t\tz.err = errors.New(\"lz4: invalid block size\")\n\t\treturn\n\t}\n\n\t\/\/ Read block data\n\tblock := make([]byte, blockSize)\n\t_, z.err = io.ReadFull(z.r, block)\n\tif z.err != nil {\n\t\treturn\n\t}\n\n\tif z.blockChecksumFlag {\n\t\t\/\/ Check block checksum\n\t\tvar checksum uint32\n\t\tz.err = z.read(&checksum)\n\t\tif z.err != nil {\n\t\t\treturn\n\t\t}\n\t\tif checksum != xxhash.Checksum32(block) {\n\t\t\tz.err = errors.New(\"lz4: invalid block checksum detected\")\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Decompress\n\tdata := make([]byte, z.maxBlockSize)\n\tif !uncompressedFlag {\n\t\tn, err := lz4Decompress(block, data, z.maxBlockSize)\n\t\tif err != nil {\n\t\t\tz.err = err\n\t\t\treturn\n\t\t}\n\t\tdata = data[0:n]\n\t} else {\n\t\tcopy(data, block)\n\t\tdata = data[0:blockSize]\n\t}\n\n\tif z.contentChecksumFlag {\n\t\tz.h.Write(data)\n\t}\n\n\t\/\/ Add block to our history\n\tz.buf = append(z.buf, data...)\n}\n\nfunc (z *reader) read(data interface{}) error {\n\treturn binary.Read(z.r, binary.LittleEndian, data)\n}\n\n\/\/ Read reads a decompressed form of p from the underlying io.Reader.\nfunc (z *reader) Read(p []byte) (int, error) {\n\tfor {\n\t\tif len(z.buf) > 0 {\n\t\t\tn := copy(p, z.buf)\n\t\t\tz.buf = z.buf[n:]\n\t\t\treturn n, nil\n\t\t}\n\t\tif z.err != nil {\n\t\t\treturn 0, z.err\n\t\t}\n\t\tz.nextBlock()\n\t}\n}\n\n\/\/ Close closes the Reader. It does not close the underlying io.Reader.\nfunc (z *reader) Close() error {\n\tif z.contentChecksumFlag {\n\t\t\/\/ Check content checksum\n\t\tvar checksum uint32\n\t\tz.err = z.read(&checksum)\n\t\tif z.err != nil || checksum != z.h.Sum32() {\n\t\t\tz.err = errors.New(\"lz4: invalid content checksum detected\")\n\t\t\treturn z.err\n\t\t}\n\t}\n\tif z.err == io.EOF {\n\t\treturn nil\n\t}\n\treturn z.err\n}\n\nfunc lz4Decompress(src []byte, dst []byte, maxSize uint32) (int, error) {\n\tn := C.LZ4_decompress_safe((*C.char)(unsafe.Pointer(&src[0])),\n\t\t(*C.char)(unsafe.Pointer(&dst[0])), C.int(len(src)), C.int(maxSize))\n\tif n < 0 {\n\t\treturn 0, errors.New(\"lz4: data corruption\")\n\t}\n\treturn int(n), nil\n}\n<commit_msg>rename blockId to blockID<commit_after>package lz4\n\n\/*\n#cgo LDFLAGS: -llz4\n#include \"lz4.h\"\n#include \"lz4hc.h\"\n*\/\nimport \"C\"\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"hash\"\n\t\"io\"\n\t\"unsafe\"\n\n\t\"github.com\/vova616\/xxhash\"\n)\n\nconst (\n\tBestSpeed = 3\n\tBestCompression = 9\n\tDefaultCompression = -1\n\tlz4EOM = uint32(0)\n\tlz4Magic = uint32(0x184D2204)\n\tlz4BlockSizeID = 7\n\tlz4BlockSize = 1 << (8 + (2 * lz4BlockSizeID))\n)\n\nvar lz4Header = []byte{\n\t0x4, 0x22, 0x4d, 0x18, 0x64, 0x70, 0xb9,\n}\n\nfunc blockSize(blockID uint32) uint32 {\n\treturn (1 << (8 + (2 * blockID)))\n}\n\ntype writer struct {\n\tlevel int\n\terr error\n\tcompressor func(src []byte, dst []byte, maxSize uint32) (int, error)\n\n\th hash.Hash32\n\tw io.Writer\n}\n\n\/\/ NewWriter creates a new Writer that satisfies writes by compressing data\n\/\/ written to w.\nfunc NewWriter(w io.Writer) io.WriteCloser {\n\tz, _ := NewWriterLevel(w, DefaultCompression)\n\treturn z\n}\n\n\/\/ NewWriterLevel is like NewWriter but specifies the compression level instead\n\/\/ of assuming DefaultCompression.\nfunc NewWriterLevel(w io.Writer, level int) (io.WriteCloser, error) {\n\tif level < DefaultCompression || level > BestCompression {\n\t\treturn nil, fmt.Errorf(\"lz4: invalid compression level: %d\", level)\n\t}\n\treturn &writer{\n\t\tlevel: level,\n\t\tw: w,\n\t\th: xxhash.New(0),\n\t}, nil\n}\n\nfunc (z *writer) writeHeader() error {\n\t\/\/ Write magic number and header\n\tif _, err := z.w.Write(lz4Header); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (z *writer) write(v interface{}) error {\n\treturn binary.Write(z.w, binary.LittleEndian, v)\n}\n\n\/\/ Write writes a compressed form of p to the underlying io.Writer.\nfunc (z *writer) Write(p []byte) (int, error) {\n\tif z.err != nil {\n\t\treturn 0, z.err\n\t}\n\t\/\/ Write headers\n\tif z.compressor == nil {\n\t\tif z.level == BestCompression {\n\t\t\tz.compressor = lz4CompressBest\n\t\t} else {\n\t\t\tz.compressor = lz4CompressSpeed\n\t\t}\n\t\tz.err = z.writeHeader()\n\t\tif z.err != nil {\n\t\t\treturn 0, z.err\n\t\t}\n\t}\n\n\tcompressed := make([]byte, lz4BlockSize)\n\tn, err := z.compressor(p, compressed, lz4BlockSize)\n\tif err != nil {\n\t\tz.err = err\n\t\treturn 0, z.err\n\t}\n\n\tif n > 0 {\n\t\tz.err = z.write(uint32(n))\n\t\tif z.err != nil {\n\t\t\treturn 0, z.err\n\t\t}\n\t\t\/\/ Write compressed block\n\t\t_, z.err = z.w.Write(compressed[0:n])\n\t\tif z.err != nil {\n\t\t\treturn 0, z.err\n\t\t}\n\t} else {\n\t\tz.err = z.write(uint32(len(p)) | 0x80000000)\n\t\tif z.err != nil {\n\t\t\treturn 0, z.err\n\t\t}\n\t\t\/\/ Write uncompressed block\n\t\t_, z.err = z.w.Write(p)\n\t\tif z.err != nil {\n\t\t\treturn 0, z.err\n\t\t}\n\t}\n\n\tif len(p) > 0 {\n\t\tz.h.Write(p)\n\t}\n\n\treturn len(p), nil\n}\n\n\/\/ Close closes the Writer. It does not close the underlying io.Writer.\nfunc (z *writer) Close() error {\n\tz.err = z.write(uint32(0))\n\tif z.err != nil {\n\t\treturn z.err\n\t}\n\treturn z.write(z.h.Sum32())\n}\n\nfunc lz4CompressSpeed(src []byte, dst []byte, maxSize uint32) (int, error) {\n\tif len(src) == 0 {\n\t\treturn 0, nil\n\t}\n\tn := C.LZ4_compress_limitedOutput((*C.char)(unsafe.Pointer(&src[0])), (*C.char)(unsafe.Pointer(&dst[0])), C.int(len(src)), C.int(maxSize))\n\tif n <= 0 {\n\t\treturn 0, errors.New(\"lz4: data corruption\")\n\t}\n\treturn int(n), nil\n}\n\nfunc lz4CompressBest(src []byte, dst []byte, maxSize uint32) (int, error) {\n\tn := C.LZ4_compressHC_limitedOutput((*C.char)(unsafe.Pointer(&src[0])), (*C.char)(unsafe.Pointer(&dst[0])), C.int(len(src)), C.int(maxSize))\n\tif n <= 0 {\n\t\treturn 0, errors.New(\"lz4: data corruption\")\n\t}\n\treturn int(n), nil\n}\n\ntype reader struct {\n\tmaxBlockSize uint32\n\tcontentChecksumFlag bool\n\tblockChecksumFlag bool\n\n\tbuf []byte\n\tr io.Reader\n\th hash.Hash32\n\terr error\n}\n\n\/\/ NewReader creates a new Reader reading the given reader.\nfunc NewReader(r io.Reader) (io.ReadCloser, error) {\n\tz := &reader{\n\t\tr: r,\n\t\th: xxhash.New(0),\n\t}\n\tif err := z.readFrame(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn z, nil\n}\n\nfunc (z *reader) readFrame() error {\n\t\/\/ Read and check magic\n\tvar magic uint32\n\tif err := z.read(&magic); err != nil {\n\t\treturn err\n\t}\n\tif magic != lz4Magic {\n\t\treturn errors.New(\"lz4: invalid header\")\n\t}\n\tbr := newBitReader(io.LimitReader(z.r, 3))\n\tversion, err := br.ReadBits(2)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif version != 1 {\n\t\treturn errors.New(\"lz4: wrong version number\")\n\t}\n\tindependenceFlag, err := br.ReadBit()\n\tif err != nil || !independenceFlag {\n\t\treturn err\n\t}\n\tblockChecksumFlag, err := br.ReadBit()\n\tif err != nil {\n\t\treturn err\n\t}\n\tz.blockChecksumFlag = blockChecksumFlag\n\tcontentSizeFlag, err := br.ReadBit()\n\tif err != nil || contentSizeFlag {\n\t\treturn errors.New(\"lz4: does not support stream size\")\n\t}\n\tcontentChecksumFlag, err := br.ReadBit()\n\tif err != nil {\n\t\treturn err\n\t}\n\tz.contentChecksumFlag = contentChecksumFlag\n\tif reserved, err := br.ReadBit(); err != nil || reserved {\n\t\treturn errors.New(\"lz4: wrong value for reserved bits\")\n\t}\n\tdictionaryFlag, err := br.ReadBit()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif dictionaryFlag {\n\t\treturn errors.New(\"lz4: does not support dictionary\")\n\t}\n\tif reserved, err := br.ReadBit(); err != nil || reserved {\n\t\treturn errors.New(\"lz4: wrong value for reserved bits\")\n\t}\n\tblockMaxSize, err := br.ReadBits(3)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif blockMaxSize < 4 {\n\t\treturn errors.New(\"lz4: unsupported block size\")\n\t}\n\tz.maxBlockSize = blockSize(blockMaxSize)\n\tif reserved, err := br.ReadBits(4); err != nil || reserved != 0 {\n\t\treturn errors.New(\"lz4: wrong value for reserved bits\")\n\t}\n\tsum := br.Sum32() >> 8 & 0xFF\n\tchecksum, err := br.ReadBits(8)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif checksum != sum {\n\t\treturn errors.New(\"lz4: stream descriptor error detected\")\n\t}\n\tz.h.Reset()\n\treturn nil\n}\n\nfunc (z *reader) nextBlock() {\n\t\/\/ Read block size\n\tvar blockSize uint32\n\tz.err = z.read(&blockSize)\n\tif z.err != nil {\n\t\treturn\n\t}\n\n\tuncompressedFlag := (blockSize >> 31) != 0\n\tblockSize &= 0x7FFFFFFF\n\n\tif blockSize == lz4EOM {\n\t\tz.err = io.EOF\n\t\treturn\n\t}\n\n\tif blockSize > z.maxBlockSize {\n\t\tz.err = errors.New(\"lz4: invalid block size\")\n\t\treturn\n\t}\n\n\t\/\/ Read block data\n\tblock := make([]byte, blockSize)\n\t_, z.err = io.ReadFull(z.r, block)\n\tif z.err != nil {\n\t\treturn\n\t}\n\n\tif z.blockChecksumFlag {\n\t\t\/\/ Check block checksum\n\t\tvar checksum uint32\n\t\tz.err = z.read(&checksum)\n\t\tif z.err != nil {\n\t\t\treturn\n\t\t}\n\t\tif checksum != xxhash.Checksum32(block) {\n\t\t\tz.err = errors.New(\"lz4: invalid block checksum detected\")\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Decompress\n\tdata := make([]byte, z.maxBlockSize)\n\tif !uncompressedFlag {\n\t\tn, err := lz4Decompress(block, data, z.maxBlockSize)\n\t\tif err != nil {\n\t\t\tz.err = err\n\t\t\treturn\n\t\t}\n\t\tdata = data[0:n]\n\t} else {\n\t\tcopy(data, block)\n\t\tdata = data[0:blockSize]\n\t}\n\n\tif z.contentChecksumFlag {\n\t\tz.h.Write(data)\n\t}\n\n\t\/\/ Add block to our history\n\tz.buf = append(z.buf, data...)\n}\n\nfunc (z *reader) read(data interface{}) error {\n\treturn binary.Read(z.r, binary.LittleEndian, data)\n}\n\n\/\/ Read reads a decompressed form of p from the underlying io.Reader.\nfunc (z *reader) Read(p []byte) (int, error) {\n\tfor {\n\t\tif len(z.buf) > 0 {\n\t\t\tn := copy(p, z.buf)\n\t\t\tz.buf = z.buf[n:]\n\t\t\treturn n, nil\n\t\t}\n\t\tif z.err != nil {\n\t\t\treturn 0, z.err\n\t\t}\n\t\tz.nextBlock()\n\t}\n}\n\n\/\/ Close closes the Reader. It does not close the underlying io.Reader.\nfunc (z *reader) Close() error {\n\tif z.contentChecksumFlag {\n\t\t\/\/ Check content checksum\n\t\tvar checksum uint32\n\t\tz.err = z.read(&checksum)\n\t\tif z.err != nil || checksum != z.h.Sum32() {\n\t\t\tz.err = errors.New(\"lz4: invalid content checksum detected\")\n\t\t\treturn z.err\n\t\t}\n\t}\n\tif z.err == io.EOF {\n\t\treturn nil\n\t}\n\treturn z.err\n}\n\nfunc lz4Decompress(src []byte, dst []byte, maxSize uint32) (int, error) {\n\tn := C.LZ4_decompress_safe((*C.char)(unsafe.Pointer(&src[0])),\n\t\t(*C.char)(unsafe.Pointer(&dst[0])), C.int(len(src)), C.int(maxSize))\n\tif n < 0 {\n\t\treturn 0, errors.New(\"lz4: data corruption\")\n\t}\n\treturn int(n), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package kontrol\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/go-version\"\n\t_ \"github.com\/lib\/pq\"\n\n\t\"github.com\/koding\/kite\"\n\tkontrolprotocol \"github.com\/koding\/kite\/kontrol\/protocol\"\n\t\"github.com\/koding\/kite\/protocol\"\n\t\"github.com\/koding\/logging\"\n)\n\n\/\/ Postgres holds Postgresql database related configuration\ntype PostgresConfig struct {\n\tHost string\n\tPort int\n\tUsername string\n\tPassword string\n\tDBName string\n}\n\ntype Postgres struct {\n\tDB *sql.DB\n\tLog logging.Logger\n}\n\nfunc NewPostgres(conf *PostgresConfig, log kite.Logger) *Postgres {\n\tif conf.Port == 0 {\n\t\tconf.Port = 5432\n\t}\n\n\tif conf.Host == \"\" {\n\t\tconf.Host = \"localhost\"\n\t}\n\n\tif conf.DBName == \"\" {\n\t\tconf.DBName = \"test\"\n\t}\n\n\tconnString := fmt.Sprintf(\n\t\t\"host=%s port=%d dbname=%s sslmode=disable\",\n\t\tconf.Host, conf.Port, conf.DBName,\n\t)\n\n\tif conf.Password != \"\" {\n\t\tconnString += \" password=\" + conf.Password\n\t}\n\n\tif conf.Username != \"\" {\n\t\tconnString += \" user=\" + conf.Username\n\t}\n\n\tdb, err := sql.Open(\"postgres\", connString)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ add a limit so we don't hit a \"too many open connections\" errors. We\n\t\/\/ might change this in the future to tweak according to the machine and\n\t\/\/ usage behaviour\n\tdb.SetMaxIdleConns(100)\n\tdb.SetMaxOpenConns(100)\n\n\t\/\/ enable the ltree module which we are going to use, any error means it's\n\t\/\/ failed so there is no sense to continue, panic!\n\tenableTree := `CREATE EXTENSION IF NOT EXISTS ltree`\n\tif _, err := db.Exec(enableTree); err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ create our initial kites table\n\t\/\/ * kite is going to be our ltree\n\t\/\/ * url is containing the kite's register url\n\t\/\/ * id is going to be kites' unique id (which also exists in the ltree\n\t\/\/ path). We are adding it as a primary key so each kite with the full\n\t\/\/ path can only exist once.\n\t\/\/ * created_at and updated_at are updated at creation and updating (like\n\t\/\/ if the URL has changed)\n\t\/\/ Some notes:\n\t\/\/ * path label can only contain a sequence of alphanumeric characters\n\t\/\/ and underscores. So for example a version string of \"1.0.4\" needs to\n\t\/\/ be converted to \"1_0_4\" or uuid of 1111-2222-3333-4444 needs to be\n\t\/\/ converted to 1111_2222_3333_4444.\n\ttable := `CREATE TABLE IF NOT EXISTS kites (\n\t\tkite ltree NOT NULL,\n\t\turl text NOT NULL,\n\t\tid uuid PRIMARY KEY,\n\t\tcreated_at timestamptz NOT NULL DEFAULT (NOW() AT TIME ZONE 'UTC'),\n\t\tupdated_at timestamptz NOT NULL DEFAULT (NOW() AT TIME ZONE 'UTC')\n\t);`\n\n\tif _, err := db.Exec(table); err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ We enable index on the kite field. We don't return on errors because the\n\t\/\/ operator `IF NOT EXISTS` doesn't work for index creation, therefore we\n\t\/\/ assume the indexes might be already created.\n\tenableGistIndex := `CREATE INDEX kite_path_gist_idx ON kites USING GIST(kite)`\n\tenableBtreeIndex := `CREATE INDEX kite_path_btree_idx ON kites USING BTREE(kite)`\n\n\tif _, err := db.Exec(enableGistIndex); err != nil {\n\t\tlog.Warning(\"postgres: enable gist index: \", err)\n\t}\n\n\tif _, err := db.Exec(enableBtreeIndex); err != nil {\n\t\tlog.Warning(\"postgres: enable btree index: \", err)\n\t}\n\n\treturn &Postgres{\n\t\tDB: db,\n\t}\n}\n\nfunc (p *Postgres) Get(query *protocol.KontrolQuery) (Kites, error) {\n\t\/\/ only let query with usernames, otherwise the whole tree will be fetched\n\t\/\/ which is not good for us\n\tif query.Username == \"\" {\n\t\treturn nil, errors.New(\"username is not specified in query\")\n\t}\n\n\t\/\/ NewVersion returns an error if it's a constraint, like: \">= 1.0, < 1.4\"\n\t_, err := version.NewVersion(query.Version)\n\tif err != nil && query.Version != \"\" {\n\t\treturn nil, errors.New(\"version constraint is not implemented\")\n\t}\n\n\tpath := ltreePath(query)\n\n\t\/\/ TODO: cache the statement or throttle it in a safe way, for example we\n\t\/\/ could cache it for every incoming 10 Get call\n\tstmt, err := p.DB.Prepare(`SELECT kite, url FROM kites WHERE kite <@ $1`)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer stmt.Close()\n\n\trows, err := stmt.Query(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tvar kitePath string\n\tvar url string\n\n\tkites := make(Kites, 0)\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(&kitePath, &url)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tkiteProt, err := kiteFromPath(kitePath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tkites = append(kites, &protocol.KiteWithToken{\n\t\t\tKite: *kiteProt,\n\t\t\tURL: url,\n\t\t})\n\n\t\tfmt.Printf(\"kitePath %+v\\n\", kitePath)\n\t\tfmt.Printf(\"url %+v\\n\", url)\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tkites.Shuffle()\n\n\treturn kites, nil\n}\n\nfunc (p *Postgres) Add(kiteProt *protocol.Kite, value *kontrolprotocol.RegisterValue) error {\n\t\/\/ check that the incoming URL is valid to prevent malformed input\n\t_, err := url.Parse(value.URL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = p.DB.Exec(\"INSERT into kites(kite, url, id) VALUES($1, $2, $3)\",\n\t\tltreePath(kiteProt.Query()),\n\t\tvalue.URL,\n\t\tkiteProt.ID,\n\t)\n\treturn err\n}\n\nfunc (p *Postgres) Update(kiteProt *protocol.Kite, value *kontrolprotocol.RegisterValue) error {\n\t\/\/ check that the incoming url is valid to prevent malformed input\n\t_, err := url.Parse(value.URL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ TODO: also consider just using WHERE id = kiteProt.ID, see how it's\n\t\/\/ performs out\n\t_, err = p.DB.Exec(`UPDATE kites SET url = $1, updated_at = (now() at time zone 'utc') \n\tWHERE kite ~ $2`,\n\t\tvalue.URL, ltreePath(kiteProt.Query()))\n\n\treturn err\n}\n\nfunc (p *Postgres) Delete(kite *protocol.Kite) error {\n\treturn errors.New(\"DELETE is not implemented\")\n}\n\n\/\/ ltreeLabel satisfies a invalid ltree definition of a label in path.\n\/\/ According to the definition it is: \"A label is a sequence of alphanumeric\n\/\/ characters and underscores (for example, in C locale the characters\n\/\/ A-Za-z0-9_ are allowed). Labels must be less than 256 bytes long.\"\n\/\/\n\/\/ We could express one character with \"[A-Za-z0-9_]\", a word with\n\/\/ \"[A-Za-z0-9_]+\". However we want to catch words that are not valid labels so\n\/\/ we negate them with the \"^\" character, so it will be : \"[^[A-Za-z0-9_]]+\".\n\/\/ Finally we cann use the POSIX character class: [:word:] which is:\n\/\/ \"Alphanumeric characters plus \"_\"\", so the final regexp will be\n\/\/ \"[^[:word]]+\"\nvar invalidLabelRe = regexp.MustCompile(\"[^[:word:]]+\")\n\n\/\/ ltreePath returns a query path to be used with the ltree module in postgress\n\/\/ in the form of \"username.environment.kitename.version.region.hostname.id\"\nfunc ltreePath(query *protocol.KontrolQuery) string {\n\tpath := \"\"\n\tfields := query.Fields()\n\n\t\/\/ we stop for the first empty value\n\tfor _, key := range keyOrder {\n\t\tv := fields[key]\n\t\tif v == \"\" {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ replace anything that doesn't match the definition for a ltree path\n\t\t\/\/ label with a underscore, so the version \"0.0.1\" will be \"0_0_1\", or\n\t\t\/\/ uuid of \"1111-2222-3333-4444\" will be converted to\n\t\t\/\/ 1111_2222_3333_4444. Strings that satisfies the requirement are\n\t\t\/\/ untouched.\n\t\tv = invalidLabelRe.ReplaceAllLiteralString(v, \"_\")\n\n\t\tpath = path + v + \".\"\n\t}\n\n\t\/\/ remove the latest dot which causes an invalid query\n\tpath = strings.TrimSuffix(path, \".\")\n\treturn path\n}\n\n\/\/ kiteFromPath returns a Query from the given ltree path label\nfunc kiteFromPath(path string) (*protocol.Kite, error) {\n\tfields := strings.Split(path, \".\")\n\n\tif len(fields) != 7 {\n\t\treturn nil, fmt.Errorf(\"invalid ltree path: %s\", path)\n\t}\n\n\t\/\/ those labels were converted by us, therefore convert them back\n\tversion := strings.Replace(fields[3], \"_\", \".\", -1)\n\tid := strings.Replace(fields[6], \"_\", \"-\", -1)\n\n\treturn &protocol.Kite{\n\t\tUsername: fields[0],\n\t\tEnvironment: fields[1],\n\t\tName: fields[2],\n\t\tVersion: version,\n\t\tRegion: fields[4],\n\t\tHostname: fields[5],\n\t\tID: id,\n\t}, nil\n\n}\n<commit_msg>kontrol\/postgres: set back to a query<commit_after>package kontrol\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/go-version\"\n\t_ \"github.com\/lib\/pq\"\n\n\t\"github.com\/koding\/kite\"\n\tkontrolprotocol \"github.com\/koding\/kite\/kontrol\/protocol\"\n\t\"github.com\/koding\/kite\/protocol\"\n\t\"github.com\/koding\/logging\"\n)\n\n\/\/ Postgres holds Postgresql database related configuration\ntype PostgresConfig struct {\n\tHost string\n\tPort int\n\tUsername string\n\tPassword string\n\tDBName string\n}\n\ntype Postgres struct {\n\tDB *sql.DB\n\tLog logging.Logger\n}\n\nfunc NewPostgres(conf *PostgresConfig, log kite.Logger) *Postgres {\n\tif conf.Port == 0 {\n\t\tconf.Port = 5432\n\t}\n\n\tif conf.Host == \"\" {\n\t\tconf.Host = \"localhost\"\n\t}\n\n\tif conf.DBName == \"\" {\n\t\tconf.DBName = \"test\"\n\t}\n\n\tconnString := fmt.Sprintf(\n\t\t\"host=%s port=%d dbname=%s sslmode=disable\",\n\t\tconf.Host, conf.Port, conf.DBName,\n\t)\n\n\tif conf.Password != \"\" {\n\t\tconnString += \" password=\" + conf.Password\n\t}\n\n\tif conf.Username != \"\" {\n\t\tconnString += \" user=\" + conf.Username\n\t}\n\n\tdb, err := sql.Open(\"postgres\", connString)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ add a limit so we don't hit a \"too many open connections\" errors. We\n\t\/\/ might change this in the future to tweak according to the machine and\n\t\/\/ usage behaviour\n\tdb.SetMaxIdleConns(100)\n\tdb.SetMaxOpenConns(100)\n\n\t\/\/ enable the ltree module which we are going to use, any error means it's\n\t\/\/ failed so there is no sense to continue, panic!\n\tenableTree := `CREATE EXTENSION IF NOT EXISTS ltree`\n\tif _, err := db.Exec(enableTree); err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ create our initial kites table\n\t\/\/ * kite is going to be our ltree\n\t\/\/ * url is containing the kite's register url\n\t\/\/ * id is going to be kites' unique id (which also exists in the ltree\n\t\/\/ path). We are adding it as a primary key so each kite with the full\n\t\/\/ path can only exist once.\n\t\/\/ * created_at and updated_at are updated at creation and updating (like\n\t\/\/ if the URL has changed)\n\t\/\/ Some notes:\n\t\/\/ * path label can only contain a sequence of alphanumeric characters\n\t\/\/ and underscores. So for example a version string of \"1.0.4\" needs to\n\t\/\/ be converted to \"1_0_4\" or uuid of 1111-2222-3333-4444 needs to be\n\t\/\/ converted to 1111_2222_3333_4444.\n\ttable := `CREATE TABLE IF NOT EXISTS kites (\n\t\tkite ltree NOT NULL,\n\t\turl text NOT NULL,\n\t\tid uuid PRIMARY KEY,\n\t\tcreated_at timestamptz NOT NULL DEFAULT (NOW() AT TIME ZONE 'UTC'),\n\t\tupdated_at timestamptz NOT NULL DEFAULT (NOW() AT TIME ZONE 'UTC')\n\t);`\n\n\tif _, err := db.Exec(table); err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ We enable index on the kite field. We don't return on errors because the\n\t\/\/ operator `IF NOT EXISTS` doesn't work for index creation, therefore we\n\t\/\/ assume the indexes might be already created.\n\tenableGistIndex := `CREATE INDEX kite_path_gist_idx ON kites USING GIST(kite)`\n\tenableBtreeIndex := `CREATE INDEX kite_path_btree_idx ON kites USING BTREE(kite)`\n\n\tif _, err := db.Exec(enableGistIndex); err != nil {\n\t\tlog.Warning(\"postgres: enable gist index: \", err)\n\t}\n\n\tif _, err := db.Exec(enableBtreeIndex); err != nil {\n\t\tlog.Warning(\"postgres: enable btree index: \", err)\n\t}\n\n\treturn &Postgres{\n\t\tDB: db,\n\t}\n}\n\nfunc (p *Postgres) Get(query *protocol.KontrolQuery) (Kites, error) {\n\t\/\/ only let query with usernames, otherwise the whole tree will be fetched\n\t\/\/ which is not good for us\n\tif query.Username == \"\" {\n\t\treturn nil, errors.New(\"username is not specified in query\")\n\t}\n\n\t\/\/ NewVersion returns an error if it's a constraint, like: \">= 1.0, < 1.4\"\n\t_, err := version.NewVersion(query.Version)\n\tif err != nil && query.Version != \"\" {\n\t\treturn nil, errors.New(\"version constraint is not implemented\")\n\t}\n\n\tpath := ltreePath(query)\n\n\trows, err := p.DB.Query(`SELECT kite, url FROM kites WHERE kite <@ $1`, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tvar kitePath string\n\tvar url string\n\n\tkites := make(Kites, 0)\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(&kitePath, &url)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tkiteProt, err := kiteFromPath(kitePath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tkites = append(kites, &protocol.KiteWithToken{\n\t\t\tKite: *kiteProt,\n\t\t\tURL: url,\n\t\t})\n\n\t\tfmt.Printf(\"kitePath %+v\\n\", kitePath)\n\t\tfmt.Printf(\"url %+v\\n\", url)\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tkites.Shuffle()\n\n\treturn kites, nil\n}\n\nfunc (p *Postgres) Add(kiteProt *protocol.Kite, value *kontrolprotocol.RegisterValue) error {\n\t\/\/ check that the incoming URL is valid to prevent malformed input\n\t_, err := url.Parse(value.URL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = p.DB.Exec(\"INSERT into kites(kite, url, id) VALUES($1, $2, $3)\",\n\t\tltreePath(kiteProt.Query()),\n\t\tvalue.URL,\n\t\tkiteProt.ID,\n\t)\n\treturn err\n}\n\nfunc (p *Postgres) Update(kiteProt *protocol.Kite, value *kontrolprotocol.RegisterValue) error {\n\t\/\/ check that the incoming url is valid to prevent malformed input\n\t_, err := url.Parse(value.URL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ TODO: also consider just using WHERE id = kiteProt.ID, see how it's\n\t\/\/ performs out\n\t_, err = p.DB.Exec(`UPDATE kites SET url = $1, updated_at = (now() at time zone 'utc') \n\tWHERE kite ~ $2`,\n\t\tvalue.URL, ltreePath(kiteProt.Query()))\n\n\treturn err\n}\n\nfunc (p *Postgres) Delete(kite *protocol.Kite) error {\n\treturn errors.New(\"DELETE is not implemented\")\n}\n\n\/\/ ltreeLabel satisfies a invalid ltree definition of a label in path.\n\/\/ According to the definition it is: \"A label is a sequence of alphanumeric\n\/\/ characters and underscores (for example, in C locale the characters\n\/\/ A-Za-z0-9_ are allowed). Labels must be less than 256 bytes long.\"\n\/\/\n\/\/ We could express one character with \"[A-Za-z0-9_]\", a word with\n\/\/ \"[A-Za-z0-9_]+\". However we want to catch words that are not valid labels so\n\/\/ we negate them with the \"^\" character, so it will be : \"[^[A-Za-z0-9_]]+\".\n\/\/ Finally we cann use the POSIX character class: [:word:] which is:\n\/\/ \"Alphanumeric characters plus \"_\"\", so the final regexp will be\n\/\/ \"[^[:word]]+\"\nvar invalidLabelRe = regexp.MustCompile(\"[^[:word:]]+\")\n\n\/\/ ltreePath returns a query path to be used with the ltree module in postgress\n\/\/ in the form of \"username.environment.kitename.version.region.hostname.id\"\nfunc ltreePath(query *protocol.KontrolQuery) string {\n\tpath := \"\"\n\tfields := query.Fields()\n\n\t\/\/ we stop for the first empty value\n\tfor _, key := range keyOrder {\n\t\tv := fields[key]\n\t\tif v == \"\" {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ replace anything that doesn't match the definition for a ltree path\n\t\t\/\/ label with a underscore, so the version \"0.0.1\" will be \"0_0_1\", or\n\t\t\/\/ uuid of \"1111-2222-3333-4444\" will be converted to\n\t\t\/\/ 1111_2222_3333_4444. Strings that satisfies the requirement are\n\t\t\/\/ untouched.\n\t\tv = invalidLabelRe.ReplaceAllLiteralString(v, \"_\")\n\n\t\tpath = path + v + \".\"\n\t}\n\n\t\/\/ remove the latest dot which causes an invalid query\n\tpath = strings.TrimSuffix(path, \".\")\n\treturn path\n}\n\n\/\/ kiteFromPath returns a Query from the given ltree path label\nfunc kiteFromPath(path string) (*protocol.Kite, error) {\n\tfields := strings.Split(path, \".\")\n\n\tif len(fields) != 7 {\n\t\treturn nil, fmt.Errorf(\"invalid ltree path: %s\", path)\n\t}\n\n\t\/\/ those labels were converted by us, therefore convert them back\n\tversion := strings.Replace(fields[3], \"_\", \".\", -1)\n\tid := strings.Replace(fields[6], \"_\", \"-\", -1)\n\n\treturn &protocol.Kite{\n\t\tUsername: fields[0],\n\t\tEnvironment: fields[1],\n\t\tName: fields[2],\n\t\tVersion: version,\n\t\tRegion: fields[4],\n\t\tHostname: fields[5],\n\t\tID: id,\n\t}, nil\n\n}\n<|endoftext|>"} {"text":"<commit_before>package encoders\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n\n\t\"code.cloudfoundry.org\/lager\"\n\n\t\"github.com\/3d0c\/gmf\"\n\t\"github.com\/snickers\/snickers\/db\"\n\t\"github.com\/snickers\/snickers\/types\"\n)\n\nfunc addStream(job types.Job, codecName string, oc *gmf.FmtCtx, ist *gmf.Stream) (int, int, error) {\n\tvar codecContext *gmf.CodecCtx\n\tvar ost *gmf.Stream\n\n\tcodec, err := gmf.FindEncoder(codecName)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tif ost = oc.NewStream(codec); ost == nil {\n\t\treturn 0, 0, errors.New(\"unable to create stream in output context\")\n\t}\n\tdefer gmf.Release(ost)\n\n\tif codecContext = gmf.NewCodecCtx(codec); codecContext == nil {\n\t\treturn 0, 0, errors.New(\"unable to create codec context\")\n\t}\n\tdefer gmf.Release(codecContext)\n\n\t\/\/ https:\/\/ffmpeg.org\/pipermail\/ffmpeg-devel\/2008-January\/046900.html\n\tif oc.IsGlobalHeader() {\n\t\tcodecContext.SetFlag(gmf.CODEC_FLAG_GLOBAL_HEADER)\n\t}\n\n\tif codec.IsExperimental() {\n\t\tcodecContext.SetStrictCompliance(gmf.FF_COMPLIANCE_EXPERIMENTAL)\n\t}\n\n\tif codecContext.Type() == gmf.AVMEDIA_TYPE_AUDIO {\n\t\terr := setAudioCtxParams(codecContext, ist, job)\n\t\tif err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\t}\n\n\tif codecContext.Type() == gmf.AVMEDIA_TYPE_VIDEO {\n\t\terr := setVideoCtxParams(codecContext, ist, job)\n\t\tif err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\t}\n\n\tif err := codecContext.Open(nil); err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tost.SetCodecCtx(codecContext)\n\n\treturn ist.Index(), ost.Index(), nil\n}\n\n\/\/ FFMPEGEncode function is responsible for encoding the file\nfunc FFMPEGEncode(logger lager.Logger, dbInstance db.Storage, jobID string) error {\n\tlog := logger.Session(\"ffmpeg-encode\")\n\tlog.Info(\"started\", lager.Data{\"job\": jobID})\n\tdefer log.Info(\"finished\")\n\n\tgmf.LogSetLevel(gmf.AV_LOG_FATAL)\n\tjob, _ := dbInstance.RetrieveJob(jobID)\n\tstMap := make(map[int]int, 0)\n\tvar lastDelta int64\n\n\tinputCtx, err := gmf.NewInputCtx(job.LocalSource)\n\tif err != nil {\n\t\tlog.Error(\"input-failed\", err)\n\t\treturn err\n\t}\n\tdefer inputCtx.CloseInputAndRelease()\n\n\toutputCtx, err := gmf.NewOutputCtx(job.LocalDestination)\n\tif err != nil {\n\t\tlog.Error(\"output-failed\", err)\n\t\treturn err\n\t}\n\tdefer outputCtx.CloseOutputAndRelease()\n\n\tjob.Status = types.JobEncoding\n\tjob.Details = \"0%\"\n\tdbInstance.UpdateJob(job.ID, job)\n\n\tsrcVideoStream, _ := inputCtx.GetBestStream(gmf.AVMEDIA_TYPE_VIDEO)\n\n\tvideoCodec := getCodec(job)\n\n\tlog.Info(\"add-stream-start\", lager.Data{\"code\": videoCodec})\n\ti, o, err := addStream(job, videoCodec, outputCtx, srcVideoStream)\n\tif err != nil {\n\t\tlog.Error(\"add-stream-failed\", err)\n\t\treturn err\n\t}\n\tlog.Info(\"add-stream-finished\")\n\tstMap[i] = o\n\n\tsrcAudioStream, err := inputCtx.GetBestStream(gmf.AVMEDIA_TYPE_AUDIO)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\taudioCodec := \"aac\"\n\tif job.Preset.Audio.Codec != \"aac\" {\n\t\taudioCodec = job.Preset.Audio.Codec\n\t}\n\n\ti, o, err = addStream(job, audioCodec, outputCtx, srcAudioStream)\n\tif err != nil {\n\t\treturn err\n\t}\n\tstMap[i] = o\n\n\tif err := outputCtx.WriteHeader(); err != nil {\n\t\treturn err\n\t}\n\ttotalFrames := float64(srcVideoStream.NbFrames() + srcAudioStream.NbFrames())\n\tframesCount := float64(0)\n\n\tfor packet := range inputCtx.GetNewPackets() {\n\t\tist, err := inputCtx.GetStream(packet.StreamIndex())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tost, err := outputCtx.GetStream(stMap[ist.Index()])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor frame := range packet.Frames(ist.CodecCtx()) {\n\t\t\tif ost.IsAudio() {\n\t\t\t\tfsTb := gmf.AVR{Num: 1, Den: ist.CodecCtx().SampleRate()}\n\t\t\t\toutTb := gmf.AVR{Num: 1, Den: ist.CodecCtx().SampleRate()}\n\n\t\t\t\tframe.SetPts(packet.Pts())\n\n\t\t\t\tpts := gmf.RescaleDelta(ist.TimeBase(), frame.Pts(), fsTb.AVRational(), frame.NbSamples(), &lastDelta, outTb.AVRational())\n\n\t\t\t\tframe.\n\t\t\t\t\tSetNbSamples(ost.CodecCtx().FrameSize()).\n\t\t\t\t\tSetFormat(ost.CodecCtx().SampleFmt()).\n\t\t\t\t\tSetChannelLayout(ost.CodecCtx().ChannelLayout()).\n\t\t\t\t\tSetPts(pts)\n\t\t\t} else {\n\t\t\t\tframe.SetPts(ost.Pts)\n\t\t\t}\n\n\t\t\tif p, ready, _ := frame.EncodeNewPacket(ost.CodecCtx()); ready {\n\t\t\t\tif p.Pts() != gmf.AV_NOPTS_VALUE {\n\t\t\t\t\tp.SetPts(gmf.RescaleQ(p.Pts(), ost.CodecCtx().TimeBase(), ost.TimeBase()))\n\t\t\t\t}\n\n\t\t\t\tif p.Dts() != gmf.AV_NOPTS_VALUE {\n\t\t\t\t\tp.SetDts(gmf.RescaleQ(p.Dts(), ost.CodecCtx().TimeBase(), ost.TimeBase()))\n\t\t\t\t}\n\n\t\t\t\tp.SetStreamIndex(ost.Index())\n\n\t\t\t\tif err := outputCtx.WritePacket(p); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tgmf.Release(p)\n\t\t\t}\n\n\t\t\tost.Pts++\n\t\t\tframesCount++\n\t\t\tpercentage := string(strconv.FormatInt(int64(framesCount\/totalFrames*100), 10) + \"%\")\n\t\t\tif percentage != job.Details {\n\t\t\t\tjob.Details = percentage\n\t\t\t\tdbInstance.UpdateJob(job.ID, job)\n\t\t\t}\n\t\t}\n\t\tgmf.Release(packet)\n\t}\n\n\tfor i := 0; i < outputCtx.StreamsCnt(); i++ {\n\t\tist, err := inputCtx.GetStream(0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tost, err := outputCtx.GetStream(stMap[ist.Index()])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tframe := gmf.NewFrame()\n\n\t\tfor {\n\t\t\tif p, ready, _ := frame.FlushNewPacket(ost.CodecCtx()); ready {\n\t\t\t\tif p.Pts() != gmf.AV_NOPTS_VALUE {\n\t\t\t\t\tp.SetPts(gmf.RescaleQ(p.Pts(), ost.CodecCtx().TimeBase(), ost.TimeBase()))\n\t\t\t\t}\n\n\t\t\t\tif p.Dts() != gmf.AV_NOPTS_VALUE {\n\t\t\t\t\tp.SetDts(gmf.RescaleQ(p.Dts(), ost.CodecCtx().TimeBase(), ost.TimeBase()))\n\t\t\t\t}\n\n\t\t\t\tp.SetStreamIndex(ost.Index())\n\n\t\t\t\tif err := outputCtx.WritePacket(p); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tgmf.Release(p)\n\t\t\t} else {\n\t\t\t\tgmf.Release(p)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tost.Pts++\n\t\t}\n\n\t\tgmf.Release(frame)\n\t}\n\tif job.Details != \"100%\" {\n\t\tjob.Details = \"100%\"\n\t\tdbInstance.UpdateJob(job.ID, job)\n\t}\n\n\treturn nil\n}\n\nfunc getProfile(job types.Job) int {\n\tprofiles := map[string]int{\n\t\t\"baseline\": gmf.FF_PROFILE_H264_BASELINE,\n\t\t\"main\": gmf.FF_PROFILE_H264_MAIN,\n\t\t\"high\": gmf.FF_PROFILE_H264_HIGH,\n\t}\n\n\tif job.Preset.Video.Profile != \"\" {\n\t\treturn profiles[job.Preset.Video.Profile]\n\t}\n\treturn gmf.FF_PROFILE_H264_MAIN\n}\n\nfunc getCodec(job types.Job) string {\n\tcodecs := map[string]string{\n\t\t\"h264\": \"libx264\",\n\t\t\"vp8\": \"libvpx\",\n\t\t\"vp9\": \"libvpx-vp9\",\n\t\t\"theora\": \"libtheora\",\n\t}\n\n\tif job.Preset.Video.Codec != \"\" {\n\t\treturn codecs[job.Preset.Video.Codec]\n\t}\n\treturn \"libx264\"\n}\n\nfunc getResolution(job types.Job, inputWidth int, inputHeight int) (int, int) {\n\tvar width, height int\n\tif job.Preset.Video.Width == \"\" && job.Preset.Video.Height == \"\" {\n\t\treturn inputWidth, inputHeight\n\t} else if job.Preset.Video.Width == \"\" {\n\t\theight, _ = strconv.Atoi(job.Preset.Video.Height)\n\t\twidth = (inputWidth * height) \/ inputHeight\n\t} else if job.Preset.Video.Height == \"\" {\n\t\twidth, _ = strconv.Atoi(job.Preset.Video.Width)\n\t\theight = (inputHeight * width) \/ inputWidth\n\t} else {\n\t\twidth, _ = strconv.Atoi(job.Preset.Video.Width)\n\t\theight, _ = strconv.Atoi(job.Preset.Video.Height)\n\t}\n\treturn width, height\n}\n\nfunc setAudiooCtxParams(codecContext *gmf.CodecCtx, ist *gmf.Stream, job types.Job) error {\n\tbitrate, err := strconv.Atoi(job.Preset.Audio.Bitrate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcodecContext.SetBitRate(bitrate)\n\tcodecContext.SetSampleFmt(ist.CodecCtx().SampleFmt())\n\tcodecContext.SetSampleRate(ist.CodecCtx().SampleRate())\n\tcodecContext.SetChannels(ist.CodecCtx().Channels())\n\tcodecContext.SelectChannelLayout()\n\tcodecContext.SelectSampleRate()\n\treturn nil\n}\n\nfunc setVideoCtxParams(codecContext *gmf.CodecCtx, ist *gmf.Stream, job types.Job) error {\n\tcodecContext.SetTimeBase(gmf.AVR{Num: 1, Den: 25}) \/\/ what is this\n\n\tif job.Preset.Video.Codec == \"h264\" {\n\t\tprofile := getProfile(job)\n\t\tcodecContext.SetProfile(profile)\n\t}\n\n\tgop, err := strconv.Atoi(job.Preset.Video.GopSize)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twidth, height := getResolution(job, ist.CodecCtx().Width(), ist.CodecCtx().Height())\n\n\tbitrate, err := strconv.Atoi(job.Preset.Video.Bitrate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcodecContext.SetDimension(width, height)\n\tcodecContext.SetGopSize(gop)\n\tcodecContext.SetBitRate(bitrate)\n\tcodecContext.SetPixFmt(ist.CodecCtx().PixFmt())\n\n\treturn nil\n}\n<commit_msg>encoders\/ffmpeg: fix tests<commit_after>package encoders\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n\n\t\"code.cloudfoundry.org\/lager\"\n\n\t\"github.com\/3d0c\/gmf\"\n\t\"github.com\/snickers\/snickers\/db\"\n\t\"github.com\/snickers\/snickers\/types\"\n)\n\nfunc addStream(job types.Job, codecName string, oc *gmf.FmtCtx, ist *gmf.Stream) (int, int, error) {\n\tvar codecContext *gmf.CodecCtx\n\tvar ost *gmf.Stream\n\n\tcodec, err := gmf.FindEncoder(codecName)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tif ost = oc.NewStream(codec); ost == nil {\n\t\treturn 0, 0, errors.New(\"unable to create stream in output context\")\n\t}\n\tdefer gmf.Release(ost)\n\n\tif codecContext = gmf.NewCodecCtx(codec); codecContext == nil {\n\t\treturn 0, 0, errors.New(\"unable to create codec context\")\n\t}\n\tdefer gmf.Release(codecContext)\n\n\t\/\/ https:\/\/ffmpeg.org\/pipermail\/ffmpeg-devel\/2008-January\/046900.html\n\tif oc.IsGlobalHeader() {\n\t\tcodecContext.SetFlag(gmf.CODEC_FLAG_GLOBAL_HEADER)\n\t}\n\n\tif codec.IsExperimental() {\n\t\tcodecContext.SetStrictCompliance(gmf.FF_COMPLIANCE_EXPERIMENTAL)\n\t}\n\n\tif codecContext.Type() == gmf.AVMEDIA_TYPE_AUDIO {\n\t\terr := setAudioCtxParams(codecContext, ist, job)\n\t\tif err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\t}\n\n\tif codecContext.Type() == gmf.AVMEDIA_TYPE_VIDEO {\n\t\terr := setVideoCtxParams(codecContext, ist, job)\n\t\tif err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\t}\n\n\tif err := codecContext.Open(nil); err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tost.SetCodecCtx(codecContext)\n\n\treturn ist.Index(), ost.Index(), nil\n}\n\n\/\/ FFMPEGEncode function is responsible for encoding the file\nfunc FFMPEGEncode(logger lager.Logger, dbInstance db.Storage, jobID string) error {\n\tlog := logger.Session(\"ffmpeg-encode\")\n\tlog.Info(\"started\", lager.Data{\"job\": jobID})\n\tdefer log.Info(\"finished\")\n\n\tgmf.LogSetLevel(gmf.AV_LOG_FATAL)\n\tjob, _ := dbInstance.RetrieveJob(jobID)\n\tstMap := make(map[int]int, 0)\n\tvar lastDelta int64\n\n\tinputCtx, err := gmf.NewInputCtx(job.LocalSource)\n\tif err != nil {\n\t\tlog.Error(\"input-failed\", err)\n\t\treturn err\n\t}\n\tdefer inputCtx.CloseInputAndRelease()\n\n\toutputCtx, err := gmf.NewOutputCtx(job.LocalDestination)\n\tif err != nil {\n\t\tlog.Error(\"output-failed\", err)\n\t\treturn err\n\t}\n\tdefer outputCtx.CloseOutputAndRelease()\n\n\tjob.Status = types.JobEncoding\n\tjob.Details = \"0%\"\n\tdbInstance.UpdateJob(job.ID, job)\n\n\tsrcVideoStream, _ := inputCtx.GetBestStream(gmf.AVMEDIA_TYPE_VIDEO)\n\n\tvideoCodec := getCodec(job)\n\n\tlog.Info(\"add-stream-start\", lager.Data{\"code\": videoCodec})\n\ti, o, err := addStream(job, videoCodec, outputCtx, srcVideoStream)\n\tif err != nil {\n\t\tlog.Error(\"add-stream-failed\", err)\n\t\treturn err\n\t}\n\tlog.Info(\"add-stream-finished\")\n\tstMap[i] = o\n\n\tsrcAudioStream, err := inputCtx.GetBestStream(gmf.AVMEDIA_TYPE_AUDIO)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\taudioCodec := \"aac\"\n\tif job.Preset.Audio.Codec != \"aac\" {\n\t\taudioCodec = job.Preset.Audio.Codec\n\t}\n\n\ti, o, err = addStream(job, audioCodec, outputCtx, srcAudioStream)\n\tif err != nil {\n\t\treturn err\n\t}\n\tstMap[i] = o\n\n\tif err := outputCtx.WriteHeader(); err != nil {\n\t\treturn err\n\t}\n\ttotalFrames := float64(srcVideoStream.NbFrames() + srcAudioStream.NbFrames())\n\tframesCount := float64(0)\n\n\tfor packet := range inputCtx.GetNewPackets() {\n\t\tist, err := inputCtx.GetStream(packet.StreamIndex())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tost, err := outputCtx.GetStream(stMap[ist.Index()])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor frame := range packet.Frames(ist.CodecCtx()) {\n\t\t\tif ost.IsAudio() {\n\t\t\t\tfsTb := gmf.AVR{Num: 1, Den: ist.CodecCtx().SampleRate()}\n\t\t\t\toutTb := gmf.AVR{Num: 1, Den: ist.CodecCtx().SampleRate()}\n\n\t\t\t\tframe.SetPts(packet.Pts())\n\n\t\t\t\tpts := gmf.RescaleDelta(ist.TimeBase(), frame.Pts(), fsTb.AVRational(), frame.NbSamples(), &lastDelta, outTb.AVRational())\n\n\t\t\t\tframe.\n\t\t\t\t\tSetNbSamples(ost.CodecCtx().FrameSize()).\n\t\t\t\t\tSetFormat(ost.CodecCtx().SampleFmt()).\n\t\t\t\t\tSetChannelLayout(ost.CodecCtx().ChannelLayout()).\n\t\t\t\t\tSetPts(pts)\n\t\t\t} else {\n\t\t\t\tframe.SetPts(ost.Pts)\n\t\t\t}\n\n\t\t\tif p, ready, _ := frame.EncodeNewPacket(ost.CodecCtx()); ready {\n\t\t\t\tif p.Pts() != gmf.AV_NOPTS_VALUE {\n\t\t\t\t\tp.SetPts(gmf.RescaleQ(p.Pts(), ost.CodecCtx().TimeBase(), ost.TimeBase()))\n\t\t\t\t}\n\n\t\t\t\tif p.Dts() != gmf.AV_NOPTS_VALUE {\n\t\t\t\t\tp.SetDts(gmf.RescaleQ(p.Dts(), ost.CodecCtx().TimeBase(), ost.TimeBase()))\n\t\t\t\t}\n\n\t\t\t\tp.SetStreamIndex(ost.Index())\n\n\t\t\t\tif err := outputCtx.WritePacket(p); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tgmf.Release(p)\n\t\t\t}\n\n\t\t\tost.Pts++\n\t\t\tframesCount++\n\t\t\tpercentage := string(strconv.FormatInt(int64(framesCount\/totalFrames*100), 10) + \"%\")\n\t\t\tif percentage != job.Details {\n\t\t\t\tjob.Details = percentage\n\t\t\t\tdbInstance.UpdateJob(job.ID, job)\n\t\t\t}\n\t\t}\n\t\tgmf.Release(packet)\n\t}\n\n\tfor i := 0; i < outputCtx.StreamsCnt(); i++ {\n\t\tist, err := inputCtx.GetStream(0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tost, err := outputCtx.GetStream(stMap[ist.Index()])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tframe := gmf.NewFrame()\n\n\t\tfor {\n\t\t\tif p, ready, _ := frame.FlushNewPacket(ost.CodecCtx()); ready {\n\t\t\t\tif p.Pts() != gmf.AV_NOPTS_VALUE {\n\t\t\t\t\tp.SetPts(gmf.RescaleQ(p.Pts(), ost.CodecCtx().TimeBase(), ost.TimeBase()))\n\t\t\t\t}\n\n\t\t\t\tif p.Dts() != gmf.AV_NOPTS_VALUE {\n\t\t\t\t\tp.SetDts(gmf.RescaleQ(p.Dts(), ost.CodecCtx().TimeBase(), ost.TimeBase()))\n\t\t\t\t}\n\n\t\t\t\tp.SetStreamIndex(ost.Index())\n\n\t\t\t\tif err := outputCtx.WritePacket(p); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tgmf.Release(p)\n\t\t\t} else {\n\t\t\t\tgmf.Release(p)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tost.Pts++\n\t\t}\n\n\t\tgmf.Release(frame)\n\t}\n\tif job.Details != \"100%\" {\n\t\tjob.Details = \"100%\"\n\t\tdbInstance.UpdateJob(job.ID, job)\n\t}\n\n\treturn nil\n}\n\nfunc getProfile(job types.Job) int {\n\tprofiles := map[string]int{\n\t\t\"baseline\": gmf.FF_PROFILE_H264_BASELINE,\n\t\t\"main\": gmf.FF_PROFILE_H264_MAIN,\n\t\t\"high\": gmf.FF_PROFILE_H264_HIGH,\n\t}\n\n\tif job.Preset.Video.Profile != \"\" {\n\t\treturn profiles[job.Preset.Video.Profile]\n\t}\n\treturn gmf.FF_PROFILE_H264_MAIN\n}\n\nfunc getCodec(job types.Job) string {\n\tcodecs := map[string]string{\n\t\t\"h264\": \"libx264\",\n\t\t\"vp8\": \"libvpx\",\n\t\t\"vp9\": \"libvpx-vp9\",\n\t\t\"theora\": \"libtheora\",\n\t}\n\n\tif job.Preset.Video.Codec != \"\" {\n\t\treturn codecs[job.Preset.Video.Codec]\n\t}\n\treturn \"libx264\"\n}\n\nfunc GetResolution(job types.Job, inputWidth int, inputHeight int) (int, int) {\n\tvar width, height int\n\tif job.Preset.Video.Width == \"\" && job.Preset.Video.Height == \"\" {\n\t\treturn inputWidth, inputHeight\n\t} else if job.Preset.Video.Width == \"\" {\n\t\theight, _ = strconv.Atoi(job.Preset.Video.Height)\n\t\twidth = (inputWidth * height) \/ inputHeight\n\t} else if job.Preset.Video.Height == \"\" {\n\t\twidth, _ = strconv.Atoi(job.Preset.Video.Width)\n\t\theight = (inputHeight * width) \/ inputWidth\n\t} else {\n\t\twidth, _ = strconv.Atoi(job.Preset.Video.Width)\n\t\theight, _ = strconv.Atoi(job.Preset.Video.Height)\n\t}\n\treturn width, height\n}\n\nfunc setAudioCtxParams(codecContext *gmf.CodecCtx, ist *gmf.Stream, job types.Job) error {\n\tbitrate, err := strconv.Atoi(job.Preset.Audio.Bitrate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcodecContext.SetBitRate(bitrate)\n\tcodecContext.SetSampleFmt(ist.CodecCtx().SampleFmt())\n\tcodecContext.SetSampleRate(ist.CodecCtx().SampleRate())\n\tcodecContext.SetChannels(ist.CodecCtx().Channels())\n\tcodecContext.SelectChannelLayout()\n\tcodecContext.SelectSampleRate()\n\treturn nil\n}\n\nfunc setVideoCtxParams(codecContext *gmf.CodecCtx, ist *gmf.Stream, job types.Job) error {\n\tcodecContext.SetTimeBase(gmf.AVR{Num: 1, Den: 25}) \/\/ what is this\n\n\tif job.Preset.Video.Codec == \"h264\" {\n\t\tprofile := getProfile(job)\n\t\tcodecContext.SetProfile(profile)\n\t}\n\n\tgop, err := strconv.Atoi(job.Preset.Video.GopSize)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twidth, height := GetResolution(job, ist.CodecCtx().Width(), ist.CodecCtx().Height())\n\n\tbitrate, err := strconv.Atoi(job.Preset.Video.Bitrate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcodecContext.SetDimension(width, height)\n\tcodecContext.SetGopSize(gop)\n\tcodecContext.SetBitRate(bitrate)\n\tcodecContext.SetPixFmt(ist.CodecCtx().PixFmt())\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package encoders\n\nimport (\n\t\"errors\"\n\t\/\/ \"fmt\"\n\t\"strconv\"\n\n\t\"code.cloudfoundry.org\/lager\"\n\n\t\"github.com\/3d0c\/gmf\"\n\t\"github.com\/snickers\/snickers\/db\"\n\t\"github.com\/snickers\/snickers\/types\"\n)\n\n\/\/ FFMPEGEncode function is responsible for encoding the file\nfunc FFMPEGEncode(logger lager.Logger, dbInstance db.Storage, jobID string) error {\n\tlog := logger.Session(\"ffmpeg-encode\")\n\tlog.Info(\"started\", lager.Data{\"job\": jobID})\n\tdefer log.Info(\"finished\")\n\n\tgmf.LogSetLevel(gmf.AV_LOG_FATAL)\n\tjob, _ := dbInstance.RetrieveJob(jobID)\n\n\t\/\/ create input context\n\tinputCtx, err := gmf.NewInputCtx(job.LocalSource)\n\tif err != nil {\n\t\tlog.Error(\"input-failed\", err)\n\t\treturn err\n\t}\n\tdefer inputCtx.CloseInputAndRelease()\n\n\t\/\/ create output context\n\toutputCtx, err := gmf.NewOutputCtx(job.LocalDestination)\n\tif err != nil {\n\t\tlog.Error(\"output-failed\", err)\n\t\treturn err\n\t}\n\tdefer outputCtx.CloseOutputAndRelease()\n\n\tjob.Status = types.JobEncoding\n\tjob.Details = \"0%\"\n\tdbInstance.UpdateJob(job.ID, job)\n\n\t\/\/get audio and video stream and the streaMap\n\tstreamMap, srcVideoStream, srcAudioStream, err := getAudioVideoStreams(inputCtx, outputCtx, job)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/calculate total number of frames\n\ttotalFrames := float64(srcVideoStream.NbFrames() + srcAudioStream.NbFrames())\n\t\/\/process all frames and update the job progress\n\terr = processAllFramesAndUpdateJobProgress(inputCtx, outputCtx, streamMap, job, dbInstance, totalFrames)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprocessNewFrames(inputCtx, outputCtx, streamMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif job.Details != \"100%\" {\n\t\tjob.Details = \"100%\"\n\t\tdbInstance.UpdateJob(job.ID, job)\n\t}\n\n\treturn nil\n}\n\nfunc processNewFrames(inputCtx *gmf.FmtCtx, outputCtx *gmf.FmtCtx, streamMap map[int]int) error {\n\tfor i := 0; i < outputCtx.StreamsCnt(); i++ {\n\t\t_, ost, err := getOutputAndInputStream(inputCtx, outputCtx, streamMap, 0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tframe := gmf.NewFrame()\n\n\t\tfor {\n\t\t\tif p, ready, _ := frame.FlushNewPacket(ost.CodecCtx()); ready {\n\t\t\t\tconfigurePacket(p, ost, frame)\n\t\t\t\tif err := outputCtx.WritePacket(p); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tgmf.Release(p)\n\t\t\t} else {\n\t\t\t\tgmf.Release(p)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tost.Pts++\n\t\t}\n\n\t\tgmf.Release(frame)\n\t}\n\n\treturn nil\n}\n\nfunc processAllFramesAndUpdateJobProgress(inputCtx *gmf.FmtCtx, outputCtx *gmf.FmtCtx, streamMap map[int]int, job types.Job, dbInstance db.Storage, totalFrames float64) error {\n\tvar lastDelta int64\n\tfor packet := range inputCtx.GetNewPackets() {\n\t\tist, ost, err := getOutputAndInputStream(inputCtx, outputCtx, streamMap, packet.StreamIndex())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tframesCount := float64(0)\n\t\tfor frame := range packet.Frames(ist.CodecCtx()) {\n\t\t\terr := proccessFrame(ist, ost, packet, frame, outputCtx, &lastDelta)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tost.Pts++\n\t\t\tframesCount++\n\t\t\tpercentage := string(strconv.FormatInt(int64(framesCount\/totalFrames*100), 10) + \"%\")\n\t\t\tif percentage != job.Details {\n\t\t\t\tjob.Details = percentage\n\t\t\t\tdbInstance.UpdateJob(job.ID, job)\n\t\t\t}\n\t\t}\n\n\t\tgmf.Release(packet)\n\t}\n\treturn nil\n}\n\nfunc getOutputAndInputStream(inputCtx *gmf.FmtCtx, outputCtx *gmf.FmtCtx, streamMap map[int]int, inputIndex int) (*gmf.Stream, *gmf.Stream, error) {\n\tinputStream, err := inputCtx.GetStream(inputIndex)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\toutputStream, err := outputCtx.GetStream(streamMap[inputStream.Index()])\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn inputStream, outputStream, nil\n}\n\nfunc getAudioVideoStreams(inputCtx *gmf.FmtCtx, outputCtx *gmf.FmtCtx, job types.Job) (map[int]int, *gmf.Stream, *gmf.Stream, error) {\n\tstreamMap := make(map[int]int, 0)\n\n\t\/\/ add video stream to streamMap\n\tsrcVideoStream, err := inputCtx.GetBestStream(gmf.AVMEDIA_TYPE_VIDEO)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.New(\"unable to get the best video stream inside the input context\")\n\t}\n\tvideoCodec := getVideoCodec(job)\n\tinputIndex, outputIndex, err := addStream(job, videoCodec, outputCtx, srcVideoStream)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tstreamMap[inputIndex] = outputIndex\n\n\t\/\/ add audio stream to streamMap\n\tsrcAudioStream, err := inputCtx.GetBestStream(gmf.AVMEDIA_TYPE_AUDIO)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.New(\"unable to get the best audio stream inside the input context\")\n\t}\n\taudioCodec := getAudioCodec(job)\n\tinputIndex, outputIndex, err = addStream(job, audioCodec, outputCtx, srcAudioStream)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tstreamMap[inputIndex] = outputIndex\n\tif err := outputCtx.WriteHeader(); err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\treturn streamMap, srcVideoStream, srcAudioStream, nil\n}\n\nfunc configureAudioFrame(packet *gmf.Packet, inputStream *gmf.Stream, outputStream *gmf.Stream, frame *gmf.Frame, lastDelta *int64) {\n\tfsTb := gmf.AVR{Num: 1, Den: inputStream.CodecCtx().SampleRate()}\n\toutTb := gmf.AVR{Num: 1, Den: inputStream.CodecCtx().SampleRate()}\n\n\tframe.SetPts(packet.Pts())\n\n\tpts := gmf.RescaleDelta(inputStream.TimeBase(), frame.Pts(), fsTb.AVRational(), frame.NbSamples(), lastDelta, outTb.AVRational())\n\n\tframe.SetNbSamples(outputStream.CodecCtx().FrameSize())\n\tframe.SetFormat(outputStream.CodecCtx().SampleFmt())\n\tframe.SetChannelLayout(outputStream.CodecCtx().ChannelLayout())\n\tframe.SetPts(pts)\n}\n\nfunc configurePacket(packet *gmf.Packet, outputStream *gmf.Stream, frame *gmf.Frame) *gmf.Packet {\n\tif packet.Pts() != gmf.AV_NOPTS_VALUE {\n\t\tpacket.SetPts(gmf.RescaleQ(packet.Pts(), outputStream.CodecCtx().TimeBase(), outputStream.TimeBase()))\n\t}\n\n\tif packet.Dts() != gmf.AV_NOPTS_VALUE {\n\t\tpacket.SetDts(gmf.RescaleQ(packet.Dts(), outputStream.CodecCtx().TimeBase(), outputStream.TimeBase()))\n\t}\n\n\tpacket.SetStreamIndex(outputStream.Index())\n\n\treturn packet\n}\n\nfunc proccessFrame(inputStream *gmf.Stream, outputStream *gmf.Stream, packet *gmf.Packet, frame *gmf.Frame, outputCtx *gmf.FmtCtx, lastDelta *int64) error {\n\tif outputStream.IsAudio() {\n\t\tconfigureAudioFrame(packet, inputStream, outputStream, frame, lastDelta)\n\t} else {\n\t\tframe.SetPts(outputStream.Pts)\n\t}\n\n\tif newPacket, ready, _ := frame.EncodeNewPacket(outputStream.CodecCtx()); ready {\n\t\tconfigurePacket(newPacket, outputStream, frame)\n\t\tif err := outputCtx.WritePacket(newPacket); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgmf.Release(newPacket)\n\t}\n\n\treturn nil\n}\n\nfunc addStream(job types.Job, codecName string, oc *gmf.FmtCtx, inputStream *gmf.Stream) (int, int, error) {\n\tvar codecContext *gmf.CodecCtx\n\tvar outputStream *gmf.Stream\n\n\tcodec, err := gmf.FindEncoder(codecName)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tif outputStream = oc.NewStream(codec); outputStream == nil {\n\t\treturn 0, 0, errors.New(\"unable to create stream in output context\")\n\t}\n\tdefer gmf.Release(outputStream)\n\n\tif codecContext = gmf.NewCodecCtx(codec); codecContext == nil {\n\t\treturn 0, 0, errors.New(\"unable to create codec context\")\n\t}\n\tdefer gmf.Release(codecContext)\n\n\t\/\/ https:\/\/ffmpeg.org\/pipermail\/ffmpeg-devel\/2008-January\/046900.html\n\tif oc.IsGlobalHeader() {\n\t\tcodecContext.SetFlag(gmf.CODEC_FLAG_GLOBAL_HEADER)\n\t}\n\n\tif codec.IsExperimental() {\n\t\tcodecContext.SetStrictCompliance(gmf.FF_COMPLIANCE_EXPERIMENTAL)\n\t}\n\n\tif codecContext.Type() == gmf.AVMEDIA_TYPE_AUDIO {\n\t\terr := setAudioCtxParams(codecContext, inputStream, job)\n\t\tif err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\t}\n\n\tif codecContext.Type() == gmf.AVMEDIA_TYPE_VIDEO {\n\t\terr := setVideoCtxParams(codecContext, inputStream, job)\n\t\tif err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\t}\n\n\tif err := codecContext.Open(nil); err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\toutputStream.SetCodecCtx(codecContext)\n\n\treturn inputStream.Index(), outputStream.Index(), nil\n}\n\nfunc getProfile(job types.Job) int {\n\tprofiles := map[string]int{\n\t\t\"baseline\": gmf.FF_PROFILE_H264_BASELINE,\n\t\t\"main\": gmf.FF_PROFILE_H264_MAIN,\n\t\t\"high\": gmf.FF_PROFILE_H264_HIGH,\n\t}\n\n\tif job.Preset.Video.Profile != \"\" {\n\t\treturn profiles[job.Preset.Video.Profile]\n\t}\n\treturn gmf.FF_PROFILE_H264_MAIN\n}\n\nfunc getVideoCodec(job types.Job) string {\n\tcodecs := map[string]string{\n\t\t\"h264\": \"libx264\",\n\t\t\"vp8\": \"libvpx\",\n\t\t\"vp9\": \"libvpx-vp9\",\n\t\t\"theora\": \"libtheora\",\n\t\t\"aac\": \"aac\",\n\t}\n\n\tif codec, ok := codecs[job.Preset.Video.Codec]; ok {\n\t\treturn codec\n\t}\n\treturn \"libx264\"\n}\n\nfunc getAudioCodec(job types.Job) string {\n\tcodecs := map[string]string{\n\t\t\"aac\": \"aac\",\n\t\t\"vorbis\": \"vorbis\",\n\t}\n\tif codec, ok := codecs[job.Preset.Audio.Codec]; ok {\n\t\treturn codec\n\t}\n\treturn \"aac\"\n}\n\nfunc GetResolution(job types.Job, inputWidth int, inputHeight int) (int, int) {\n\tvar width, height int\n\tif job.Preset.Video.Width == \"\" && job.Preset.Video.Height == \"\" {\n\t\treturn inputWidth, inputHeight\n\t} else if job.Preset.Video.Width == \"\" {\n\t\theight, _ = strconv.Atoi(job.Preset.Video.Height)\n\t\twidth = (inputWidth * height) \/ inputHeight\n\t} else if job.Preset.Video.Height == \"\" {\n\t\twidth, _ = strconv.Atoi(job.Preset.Video.Width)\n\t\theight = (inputHeight * width) \/ inputWidth\n\t} else {\n\t\twidth, _ = strconv.Atoi(job.Preset.Video.Width)\n\t\theight, _ = strconv.Atoi(job.Preset.Video.Height)\n\t}\n\treturn width, height\n}\n\nfunc setAudioCtxParams(codecContext *gmf.CodecCtx, ist *gmf.Stream, job types.Job) error {\n\tbitrate, err := strconv.Atoi(job.Preset.Audio.Bitrate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcodecContext.SetBitRate(bitrate)\n\tcodecContext.SetSampleFmt(ist.CodecCtx().SampleFmt())\n\tcodecContext.SetSampleRate(ist.CodecCtx().SampleRate())\n\tcodecContext.SetChannels(ist.CodecCtx().Channels())\n\tcodecContext.SelectChannelLayout()\n\tcodecContext.SelectSampleRate()\n\treturn nil\n}\n\nfunc setVideoCtxParams(codecContext *gmf.CodecCtx, ist *gmf.Stream, job types.Job) error {\n\tcodecContext.SetTimeBase(gmf.AVR{Num: 1, Den: 25}) \/\/ what is this\n\n\tif job.Preset.Video.Codec == \"h264\" {\n\t\tprofile := getProfile(job)\n\t\tcodecContext.SetProfile(profile)\n\t}\n\n\tgop, err := strconv.Atoi(job.Preset.Video.GopSize)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twidth, height := GetResolution(job, ist.CodecCtx().Width(), ist.CodecCtx().Height())\n\n\tbitrate, err := strconv.Atoi(job.Preset.Video.Bitrate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcodecContext.SetDimension(width, height)\n\tcodecContext.SetGopSize(gop)\n\tcodecContext.SetBitRate(bitrate)\n\tcodecContext.SetPixFmt(ist.CodecCtx().PixFmt())\n\n\treturn nil\n}\n<commit_msg>removed commented import, simplified getStream<commit_after>package encoders\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n\n\t\"code.cloudfoundry.org\/lager\"\n\n\t\"github.com\/3d0c\/gmf\"\n\t\"github.com\/snickers\/snickers\/db\"\n\t\"github.com\/snickers\/snickers\/types\"\n)\n\n\/\/ FFMPEGEncode function is responsible for encoding the file\nfunc FFMPEGEncode(logger lager.Logger, dbInstance db.Storage, jobID string) error {\n\tlog := logger.Session(\"ffmpeg-encode\")\n\tlog.Info(\"started\", lager.Data{\"job\": jobID})\n\tdefer log.Info(\"finished\")\n\n\tgmf.LogSetLevel(gmf.AV_LOG_FATAL)\n\tjob, _ := dbInstance.RetrieveJob(jobID)\n\n\t\/\/ create input context\n\tinputCtx, err := gmf.NewInputCtx(job.LocalSource)\n\tif err != nil {\n\t\tlog.Error(\"input-failed\", err)\n\t\treturn err\n\t}\n\tdefer inputCtx.CloseInputAndRelease()\n\n\t\/\/ create output context\n\toutputCtx, err := gmf.NewOutputCtx(job.LocalDestination)\n\tif err != nil {\n\t\tlog.Error(\"output-failed\", err)\n\t\treturn err\n\t}\n\tdefer outputCtx.CloseOutputAndRelease()\n\n\tjob.Status = types.JobEncoding\n\tjob.Details = \"0%\"\n\tdbInstance.UpdateJob(job.ID, job)\n\n\t\/\/get audio and video stream and the streaMap\n\tstreamMap, srcVideoStream, srcAudioStream, err := getAudioVideoStreamSource(inputCtx, outputCtx, job)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/calculate total number of frames\n\ttotalFrames := float64(srcVideoStream.NbFrames() + srcAudioStream.NbFrames())\n\t\/\/process all frames and update the job progress\n\terr = processAllFramesAndUpdateJobProgress(inputCtx, outputCtx, streamMap, job, dbInstance, totalFrames)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprocessNewFrames(inputCtx, outputCtx, streamMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif job.Details != \"100%\" {\n\t\tjob.Details = \"100%\"\n\t\tdbInstance.UpdateJob(job.ID, job)\n\t}\n\n\treturn nil\n}\n\nfunc processNewFrames(inputCtx *gmf.FmtCtx, outputCtx *gmf.FmtCtx, streamMap map[int]int) error {\n\tfor i := 0; i < outputCtx.StreamsCnt(); i++ {\n\t\tinputStream, err := getStream(inputCtx, 0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\toutputStream, err := getStream(outputCtx, streamMap[inputStream.Index()])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tframe := gmf.NewFrame()\n\n\t\tfor {\n\t\t\tif p, ready, _ := frame.FlushNewPacket(outputStream.CodecCtx()); ready {\n\t\t\t\tconfigurePacket(p, outputStream, frame)\n\t\t\t\tif err := outputCtx.WritePacket(p); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tgmf.Release(p)\n\t\t\t} else {\n\t\t\t\tgmf.Release(p)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\toutputStream.Pts++\n\t\t}\n\n\t\tgmf.Release(frame)\n\t}\n\n\treturn nil\n}\n\nfunc processAllFramesAndUpdateJobProgress(inputCtx *gmf.FmtCtx, outputCtx *gmf.FmtCtx, streamMap map[int]int, job types.Job, dbInstance db.Storage, totalFrames float64) error {\n\tvar lastDelta int64\n\tfor packet := range inputCtx.GetNewPackets() {\n\t\tinputStream, err := getStream(inputCtx, packet.StreamIndex())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\toutputStream, err := getStream(outputCtx, streamMap[inputStream.Index()])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tframesCount := float64(0)\n\t\tfor frame := range packet.Frames(inputStream.CodecCtx()) {\n\t\t\terr := proccessFrame(inputStream, outputStream, packet, frame, outputCtx, &lastDelta)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\toutputStream.Pts++\n\t\t\tframesCount++\n\t\t\tpercentage := string(strconv.FormatInt(int64(framesCount\/totalFrames*100), 10) + \"%\")\n\t\t\tif percentage != job.Details {\n\t\t\t\tjob.Details = percentage\n\t\t\t\tdbInstance.UpdateJob(job.ID, job)\n\t\t\t}\n\t\t}\n\n\t\tgmf.Release(packet)\n\t}\n\treturn nil\n}\n\nfunc getStream(context *gmf.FmtCtx, streamIndex int) (*gmf.Stream, error) {\n\treturn context.GetStream(streamIndex)\n}\n\nfunc getAudioVideoStreamSource(inputCtx *gmf.FmtCtx, outputCtx *gmf.FmtCtx, job types.Job) (map[int]int, *gmf.Stream, *gmf.Stream, error) {\n\tstreamMap := make(map[int]int, 0)\n\n\t\/\/ add video stream to streamMap\n\tsrcVideoStream, err := inputCtx.GetBestStream(gmf.AVMEDIA_TYPE_VIDEO)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.New(\"unable to get the best video stream inside the input context\")\n\t}\n\tvideoCodec := getVideoCodec(job)\n\tinputIndex, outputIndex, err := addStream(job, videoCodec, outputCtx, srcVideoStream)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tstreamMap[inputIndex] = outputIndex\n\n\t\/\/ add audio stream to streamMap\n\tsrcAudioStream, err := inputCtx.GetBestStream(gmf.AVMEDIA_TYPE_AUDIO)\n\tif err != nil {\n\t\treturn nil, nil, nil, errors.New(\"unable to get the best audio stream inside the input context\")\n\t}\n\taudioCodec := getAudioCodec(job)\n\tinputIndex, outputIndex, err = addStream(job, audioCodec, outputCtx, srcAudioStream)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tstreamMap[inputIndex] = outputIndex\n\tif err := outputCtx.WriteHeader(); err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\treturn streamMap, srcVideoStream, srcAudioStream, nil\n}\n\nfunc configureAudioFrame(packet *gmf.Packet, inputStream *gmf.Stream, outputStream *gmf.Stream, frame *gmf.Frame, lastDelta *int64) {\n\tfsTb := gmf.AVR{Num: 1, Den: inputStream.CodecCtx().SampleRate()}\n\toutTb := gmf.AVR{Num: 1, Den: inputStream.CodecCtx().SampleRate()}\n\n\tframe.SetPts(packet.Pts())\n\n\tpts := gmf.RescaleDelta(inputStream.TimeBase(), frame.Pts(), fsTb.AVRational(), frame.NbSamples(), lastDelta, outTb.AVRational())\n\n\tframe.SetNbSamples(outputStream.CodecCtx().FrameSize())\n\tframe.SetFormat(outputStream.CodecCtx().SampleFmt())\n\tframe.SetChannelLayout(outputStream.CodecCtx().ChannelLayout())\n\tframe.SetPts(pts)\n}\n\nfunc configurePacket(packet *gmf.Packet, outputStream *gmf.Stream, frame *gmf.Frame) *gmf.Packet {\n\tif packet.Pts() != gmf.AV_NOPTS_VALUE {\n\t\tpacket.SetPts(gmf.RescaleQ(packet.Pts(), outputStream.CodecCtx().TimeBase(), outputStream.TimeBase()))\n\t}\n\n\tif packet.Dts() != gmf.AV_NOPTS_VALUE {\n\t\tpacket.SetDts(gmf.RescaleQ(packet.Dts(), outputStream.CodecCtx().TimeBase(), outputStream.TimeBase()))\n\t}\n\n\tpacket.SetStreamIndex(outputStream.Index())\n\n\treturn packet\n}\n\nfunc proccessFrame(inputStream *gmf.Stream, outputStream *gmf.Stream, packet *gmf.Packet, frame *gmf.Frame, outputCtx *gmf.FmtCtx, lastDelta *int64) error {\n\tif outputStream.IsAudio() {\n\t\tconfigureAudioFrame(packet, inputStream, outputStream, frame, lastDelta)\n\t} else {\n\t\tframe.SetPts(outputStream.Pts)\n\t}\n\n\tif newPacket, ready, _ := frame.EncodeNewPacket(outputStream.CodecCtx()); ready {\n\t\tconfigurePacket(newPacket, outputStream, frame)\n\t\tif err := outputCtx.WritePacket(newPacket); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgmf.Release(newPacket)\n\t}\n\n\treturn nil\n}\n\nfunc addStream(job types.Job, codecName string, oc *gmf.FmtCtx, inputStream *gmf.Stream) (int, int, error) {\n\tvar codecContext *gmf.CodecCtx\n\tvar outputStream *gmf.Stream\n\n\tcodec, err := gmf.FindEncoder(codecName)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tif outputStream = oc.NewStream(codec); outputStream == nil {\n\t\treturn 0, 0, errors.New(\"unable to create stream in output context\")\n\t}\n\tdefer gmf.Release(outputStream)\n\n\tif codecContext = gmf.NewCodecCtx(codec); codecContext == nil {\n\t\treturn 0, 0, errors.New(\"unable to create codec context\")\n\t}\n\tdefer gmf.Release(codecContext)\n\n\t\/\/ https:\/\/ffmpeg.org\/pipermail\/ffmpeg-devel\/2008-January\/046900.html\n\tif oc.IsGlobalHeader() {\n\t\tcodecContext.SetFlag(gmf.CODEC_FLAG_GLOBAL_HEADER)\n\t}\n\n\tif codec.IsExperimental() {\n\t\tcodecContext.SetStrictCompliance(gmf.FF_COMPLIANCE_EXPERIMENTAL)\n\t}\n\n\tif codecContext.Type() == gmf.AVMEDIA_TYPE_AUDIO {\n\t\terr := setAudioCtxParams(codecContext, inputStream, job)\n\t\tif err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\t}\n\n\tif codecContext.Type() == gmf.AVMEDIA_TYPE_VIDEO {\n\t\terr := setVideoCtxParams(codecContext, inputStream, job)\n\t\tif err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\t}\n\n\tif err := codecContext.Open(nil); err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\toutputStream.SetCodecCtx(codecContext)\n\n\treturn inputStream.Index(), outputStream.Index(), nil\n}\n\nfunc getProfile(job types.Job) int {\n\tprofiles := map[string]int{\n\t\t\"baseline\": gmf.FF_PROFILE_H264_BASELINE,\n\t\t\"main\": gmf.FF_PROFILE_H264_MAIN,\n\t\t\"high\": gmf.FF_PROFILE_H264_HIGH,\n\t}\n\n\tif job.Preset.Video.Profile != \"\" {\n\t\treturn profiles[job.Preset.Video.Profile]\n\t}\n\treturn gmf.FF_PROFILE_H264_MAIN\n}\n\nfunc getVideoCodec(job types.Job) string {\n\tcodecs := map[string]string{\n\t\t\"h264\": \"libx264\",\n\t\t\"vp8\": \"libvpx\",\n\t\t\"vp9\": \"libvpx-vp9\",\n\t\t\"theora\": \"libtheora\",\n\t\t\"aac\": \"aac\",\n\t}\n\n\tif codec, ok := codecs[job.Preset.Video.Codec]; ok {\n\t\treturn codec\n\t}\n\treturn \"libx264\"\n}\n\nfunc getAudioCodec(job types.Job) string {\n\tcodecs := map[string]string{\n\t\t\"aac\": \"aac\",\n\t\t\"vorbis\": \"vorbis\",\n\t}\n\tif codec, ok := codecs[job.Preset.Audio.Codec]; ok {\n\t\treturn codec\n\t}\n\treturn \"aac\"\n}\n\nfunc GetResolution(job types.Job, inputWidth int, inputHeight int) (int, int) {\n\tvar width, height int\n\tif job.Preset.Video.Width == \"\" && job.Preset.Video.Height == \"\" {\n\t\treturn inputWidth, inputHeight\n\t} else if job.Preset.Video.Width == \"\" {\n\t\theight, _ = strconv.Atoi(job.Preset.Video.Height)\n\t\twidth = (inputWidth * height) \/ inputHeight\n\t} else if job.Preset.Video.Height == \"\" {\n\t\twidth, _ = strconv.Atoi(job.Preset.Video.Width)\n\t\theight = (inputHeight * width) \/ inputWidth\n\t} else {\n\t\twidth, _ = strconv.Atoi(job.Preset.Video.Width)\n\t\theight, _ = strconv.Atoi(job.Preset.Video.Height)\n\t}\n\treturn width, height\n}\n\nfunc setAudioCtxParams(codecContext *gmf.CodecCtx, ist *gmf.Stream, job types.Job) error {\n\tbitrate, err := strconv.Atoi(job.Preset.Audio.Bitrate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcodecContext.SetBitRate(bitrate)\n\tcodecContext.SetSampleFmt(ist.CodecCtx().SampleFmt())\n\tcodecContext.SetSampleRate(ist.CodecCtx().SampleRate())\n\tcodecContext.SetChannels(ist.CodecCtx().Channels())\n\tcodecContext.SelectChannelLayout()\n\tcodecContext.SelectSampleRate()\n\treturn nil\n}\n\nfunc setVideoCtxParams(codecContext *gmf.CodecCtx, ist *gmf.Stream, job types.Job) error {\n\tcodecContext.SetTimeBase(gmf.AVR{Num: 1, Den: 25}) \/\/ what is this\n\n\tif job.Preset.Video.Codec == \"h264\" {\n\t\tprofile := getProfile(job)\n\t\tcodecContext.SetProfile(profile)\n\t}\n\n\tgop, err := strconv.Atoi(job.Preset.Video.GopSize)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twidth, height := GetResolution(job, ist.CodecCtx().Width(), ist.CodecCtx().Height())\n\n\tbitrate, err := strconv.Atoi(job.Preset.Video.Bitrate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcodecContext.SetDimension(width, height)\n\tcodecContext.SetGopSize(gop)\n\tcodecContext.SetBitRate(bitrate)\n\tcodecContext.SetPixFmt(ist.CodecCtx().PixFmt())\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright ©2016 The gonum Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage native\n\nimport (\n\t\"github.com\/gonum\/blas\"\n\t\"github.com\/gonum\/blas\/blas64\"\n)\n\n\/\/ Dsytrd reduces a symmetric n×n matrix A to symmetric tridiagonal form by an\n\/\/ orthogonal similarity transformation\n\/\/ Q^T * A * Q = T\n\/\/ where Q is an orthonormal matrix and T is symmetric and tridiagonal.\n\/\/\n\/\/ On entry, a contains the elements of the input matrix in the triangle specified\n\/\/ by uplo. On exit, the diagonal and sub\/super-diagonal are overwritten by the\n\/\/ corresponding elements of the tridiagonal matrix T. The remaining elements in\n\/\/ the triangle, along with the array tau, contain the data to construct Q as\n\/\/ the product of elementary reflectors.\n\/\/\n\/\/ If uplo == blas.Upper, Q is constructed with\n\/\/ Q = H[n-2] * ... * H[1] * H[0]\n\/\/ where\n\/\/ H[i] = I - tau * v * v^T\n\/\/ v is constructed as v[i+1:n] = 0, v[i] = 1, v[0:i-1] is stored in A[0:i-1, i+1],\n\/\/ and tau is in tau[i]. The elements of A are\n\/\/ [ d e v2 v3 v4]\n\/\/ [ d e v3 v4]\n\/\/ [ d e v4]\n\/\/ [ d e]\n\/\/ [ e]\n\/\/\n\/\/ If uplo == blas.Lower, Q is constructed with\n\/\/ Q = H[0] * H[1] * ... * H[n-2]\n\/\/ where\n\/\/ H[i] = I - tau * v * v^T\n\/\/ v is constructed as v[0:i+1] = 0, v[i+1] = 1, v[i+2:n] is stored in A[i+2:n, i],\n\/\/ and tau is in tau[i]. The elements of A are\n\/\/ [ d ]\n\/\/ [ e d ]\n\/\/ [v1 e d ]\n\/\/ [v1 v2 e d ]\n\/\/ [v1 v2 v3 e d]\n\/\/\n\/\/ d must have length n, and e and tau must have length n-1. Dsytrd will panic if\n\/\/ these conditions are not met.\n\/\/\n\/\/ work is temporary storage, and lwork specifies the usable memory length. At minimum,\n\/\/ lwork >= 1, and Dsytrd will panic otherwise. The amount of blocking is\n\/\/ limited by the usable length.\n\/\/ If lwork == -1, instead of computing Dsytrd the optimal work length is stored\n\/\/ into work[0].\nfunc (impl Implementation) Dsytrd(uplo blas.Uplo, n int, a []float64, lda int, d, e, tau, work []float64, lwork int) {\n\tupper := uplo == blas.Upper\n\topts := \"U\"\n\tif !upper {\n\t\topts = \"L\"\n\t}\n\tnb := impl.Ilaenv(1, \"DSYTRD\", opts, n, -1, -1, -1)\n\tlworkopt := n * nb\n\twork[0] = float64(lworkopt)\n\tif lwork == -1 {\n\t\treturn\n\t}\n\tif n == 0 {\n\t\twork[0] = 1\n\t}\n\tnx := n\n\n\tbi := blas64.Implementation()\n\tvar ldwork int\n\tif nb > 1 && nb < n {\n\t\t\/\/ Determine when to cross over from blocked to unblocked code. The last\n\t\t\/\/ block is always handled by unblocked code.\n\t\topts := \"L\"\n\t\tif upper {\n\t\t\topts = \"U\"\n\t\t}\n\t\tnx = max(nb, impl.Ilaenv(3, \"DSYTRD\", opts, n, -1, -1, -1))\n\t\tif nx < n {\n\t\t\t\/\/ Determine if workspace is large enough for blocked code.\n\t\t\tldwork = nb\n\t\t\tiws := n * ldwork\n\t\t\tif lwork < iws {\n\t\t\t\t\/\/ Not enough workspace to use optimal nb: determine the minimum\n\t\t\t\t\/\/ value of nb and reduce nb or force use of unblocked code by\n\t\t\t\t\/\/ setting nx = n.\n\t\t\t\tnb = max(lwork\/n, 1)\n\t\t\t\tnbmin := impl.Ilaenv(2, \"DSYTRD\", opts, n, -1, -1, -1)\n\t\t\t\tif nb < nbmin {\n\t\t\t\t\tnx = n\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tnx = n\n\t\t}\n\t} else {\n\t\tnb = 1\n\t}\n\tldwork = nb\n\n\tif upper {\n\t\t\/\/ Reduce the upper triangle of A. Columns 0:kk are handled by the\n\t\t\/\/ unblocked method.\n\t\tvar i int\n\t\tkk := n - ((n-nx+nb-1)\/nb)*nb\n\t\tfor i = n - nb; i >= kk; i -= nb {\n\t\t\t\/\/ Reduce columns i:i+nb to tridiagonal form and form the matrix W\n\t\t\t\/\/ which is needed to update the unreduced part of the matrix.\n\t\t\timpl.Dlatrd(uplo, i+nb, nb, a, lda, e, tau, work, ldwork)\n\n\t\t\t\/\/ Update the unreduced submatrix A[0:i-1,0:i-1], using an update\n\t\t\t\/\/ of the form A = A - V*W^T - W*V^T.\n\t\t\tbi.Dsyr2k(uplo, blas.NoTrans, i, nb, -1, a[i:], lda, work, ldwork, 1, a, lda)\n\n\t\t\t\/\/ Copy superdiagonal elements back into A, and diagonal elements into D.\n\t\t\tfor j := i; j < i+nb; j++ {\n\t\t\t\ta[(j-1)*lda+j] = e[j-1]\n\t\t\t\td[j] = a[j*lda+j]\n\t\t\t}\n\t\t}\n\t\t\/\/ Use unblocked code to reduce the last or only block\n\t\t\/\/ check that i == kk.\n\t\timpl.Dsytd2(uplo, kk, a, lda, d, e, tau)\n\t} else {\n\t\tvar i int\n\t\t\/\/ Reduce the lower triangle of A.\n\t\tfor i = 0; i < n-nx; i += nb {\n\t\t\t\/\/ Reduce columns 0:i+nb to tridiagonal form and form the matrix W\n\t\t\t\/\/ which is needed to update the unreduced part of the matrix.\n\t\t\timpl.Dlatrd(uplo, n-i, nb, a[i*lda+i:], lda, e[i:], tau[i:], work, ldwork)\n\n\t\t\t\/\/ Update the unreduced submatrix A[i+ib:n, i+ib:n], using an update\n\t\t\t\/\/ of the form A = A + V*W^T - W*V^T.\n\t\t\tbi.Dsyr2k(uplo, blas.NoTrans, n-i-nb, nb, -1, a[(i+nb)*lda+i:], lda,\n\t\t\t\twork[nb*ldwork:], ldwork, 1, a[(i+nb)*lda+i+nb:], lda)\n\n\t\t\t\/\/ Copy subdiagonal elements back into A, and diagonal elements into D.\n\t\t\tfor j := i; j < i+nb; j++ {\n\t\t\t\ta[(j+1)*lda+j] = e[j]\n\t\t\t\td[j] = a[j*lda+j]\n\t\t\t}\n\t\t}\n\t\t\/\/ Use unblocked code to reduce the last or only block.\n\t\timpl.Dsytd2(uplo, n-i, a[i*lda+i:], lda, d[i:], e[i:], tau[i:])\n\t}\n\twork[0] = float64(lworkopt)\n}\n<commit_msg>native: fix missed matrix for typography changes<commit_after>\/\/ Copyright ©2016 The gonum Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage native\n\nimport (\n\t\"github.com\/gonum\/blas\"\n\t\"github.com\/gonum\/blas\/blas64\"\n)\n\n\/\/ Dsytrd reduces a symmetric n×n matrix A to symmetric tridiagonal form by an\n\/\/ orthogonal similarity transformation\n\/\/ Q^T * A * Q = T\n\/\/ where Q is an orthonormal matrix and T is symmetric and tridiagonal.\n\/\/\n\/\/ On entry, a contains the elements of the input matrix in the triangle specified\n\/\/ by uplo. On exit, the diagonal and sub\/super-diagonal are overwritten by the\n\/\/ corresponding elements of the tridiagonal matrix T. The remaining elements in\n\/\/ the triangle, along with the array tau, contain the data to construct Q as\n\/\/ the product of elementary reflectors.\n\/\/\n\/\/ If uplo == blas.Upper, Q is constructed with\n\/\/ Q = H[n-2] * ... * H[1] * H[0]\n\/\/ where\n\/\/ H[i] = I - tau * v * v^T\n\/\/ v is constructed as v[i+1:n] = 0, v[i] = 1, v[0:i-1] is stored in A[0:i-1, i+1],\n\/\/ and tau is in tau[i]. The elements of A are\n\/\/ [ d e v2 v3 v4]\n\/\/ [ d e v3 v4]\n\/\/ [ d e v4]\n\/\/ [ d e]\n\/\/ [ e]\n\/\/\n\/\/ If uplo == blas.Lower, Q is constructed with\n\/\/ Q = H[0] * H[1] * ... * H[n-2]\n\/\/ where\n\/\/ H[i] = I - tau * v * v^T\n\/\/ v is constructed as v[0:i+1] = 0, v[i+1] = 1, v[i+2:n] is stored in A[i+2:n, i],\n\/\/ and tau is in tau[i]. The elements of A are\n\/\/ [ d ]\n\/\/ [ e d ]\n\/\/ [v1 e d ]\n\/\/ [v1 v2 e d ]\n\/\/ [v1 v2 v3 e d]\n\/\/\n\/\/ d must have length n, and e and tau must have length n-1. Dsytrd will panic if\n\/\/ these conditions are not met.\n\/\/\n\/\/ work is temporary storage, and lwork specifies the usable memory length. At minimum,\n\/\/ lwork >= 1, and Dsytrd will panic otherwise. The amount of blocking is\n\/\/ limited by the usable length.\n\/\/ If lwork == -1, instead of computing Dsytrd the optimal work length is stored\n\/\/ into work[0].\nfunc (impl Implementation) Dsytrd(uplo blas.Uplo, n int, a []float64, lda int, d, e, tau, work []float64, lwork int) {\n\tupper := uplo == blas.Upper\n\topts := \"U\"\n\tif !upper {\n\t\topts = \"L\"\n\t}\n\tnb := impl.Ilaenv(1, \"DSYTRD\", opts, n, -1, -1, -1)\n\tlworkopt := n * nb\n\twork[0] = float64(lworkopt)\n\tif lwork == -1 {\n\t\treturn\n\t}\n\tif n == 0 {\n\t\twork[0] = 1\n\t}\n\tnx := n\n\n\tbi := blas64.Implementation()\n\tvar ldwork int\n\tif nb > 1 && nb < n {\n\t\t\/\/ Determine when to cross over from blocked to unblocked code. The last\n\t\t\/\/ block is always handled by unblocked code.\n\t\topts := \"L\"\n\t\tif upper {\n\t\t\topts = \"U\"\n\t\t}\n\t\tnx = max(nb, impl.Ilaenv(3, \"DSYTRD\", opts, n, -1, -1, -1))\n\t\tif nx < n {\n\t\t\t\/\/ Determine if workspace is large enough for blocked code.\n\t\t\tldwork = nb\n\t\t\tiws := n * ldwork\n\t\t\tif lwork < iws {\n\t\t\t\t\/\/ Not enough workspace to use optimal nb: determine the minimum\n\t\t\t\t\/\/ value of nb and reduce nb or force use of unblocked code by\n\t\t\t\t\/\/ setting nx = n.\n\t\t\t\tnb = max(lwork\/n, 1)\n\t\t\t\tnbmin := impl.Ilaenv(2, \"DSYTRD\", opts, n, -1, -1, -1)\n\t\t\t\tif nb < nbmin {\n\t\t\t\t\tnx = n\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tnx = n\n\t\t}\n\t} else {\n\t\tnb = 1\n\t}\n\tldwork = nb\n\n\tif upper {\n\t\t\/\/ Reduce the upper triangle of A. Columns 0:kk are handled by the\n\t\t\/\/ unblocked method.\n\t\tvar i int\n\t\tkk := n - ((n-nx+nb-1)\/nb)*nb\n\t\tfor i = n - nb; i >= kk; i -= nb {\n\t\t\t\/\/ Reduce columns i:i+nb to tridiagonal form and form the matrix W\n\t\t\t\/\/ which is needed to update the unreduced part of the matrix.\n\t\t\timpl.Dlatrd(uplo, i+nb, nb, a, lda, e, tau, work, ldwork)\n\n\t\t\t\/\/ Update the unreduced submatrix A[0:i-1,0:i-1], using an update\n\t\t\t\/\/ of the form A = A - V*W^T - W*V^T.\n\t\t\tbi.Dsyr2k(uplo, blas.NoTrans, i, nb, -1, a[i:], lda, work, ldwork, 1, a, lda)\n\n\t\t\t\/\/ Copy superdiagonal elements back into A, and diagonal elements into D.\n\t\t\tfor j := i; j < i+nb; j++ {\n\t\t\t\ta[(j-1)*lda+j] = e[j-1]\n\t\t\t\td[j] = a[j*lda+j]\n\t\t\t}\n\t\t}\n\t\t\/\/ Use unblocked code to reduce the last or only block\n\t\t\/\/ check that i == kk.\n\t\timpl.Dsytd2(uplo, kk, a, lda, d, e, tau)\n\t} else {\n\t\tvar i int\n\t\t\/\/ Reduce the lower triangle of A.\n\t\tfor i = 0; i < n-nx; i += nb {\n\t\t\t\/\/ Reduce columns 0:i+nb to tridiagonal form and form the matrix W\n\t\t\t\/\/ which is needed to update the unreduced part of the matrix.\n\t\t\timpl.Dlatrd(uplo, n-i, nb, a[i*lda+i:], lda, e[i:], tau[i:], work, ldwork)\n\n\t\t\t\/\/ Update the unreduced submatrix A[i+ib:n, i+ib:n], using an update\n\t\t\t\/\/ of the form A = A + V*W^T - W*V^T.\n\t\t\tbi.Dsyr2k(uplo, blas.NoTrans, n-i-nb, nb, -1, a[(i+nb)*lda+i:], lda,\n\t\t\t\twork[nb*ldwork:], ldwork, 1, a[(i+nb)*lda+i+nb:], lda)\n\n\t\t\t\/\/ Copy subdiagonal elements back into A, and diagonal elements into D.\n\t\t\tfor j := i; j < i+nb; j++ {\n\t\t\t\ta[(j+1)*lda+j] = e[j]\n\t\t\t\td[j] = a[j*lda+j]\n\t\t\t}\n\t\t}\n\t\t\/\/ Use unblocked code to reduce the last or only block.\n\t\timpl.Dsytd2(uplo, n-i, a[i*lda+i:], lda, d[i:], e[i:], tau[i:])\n\t}\n\twork[0] = float64(lworkopt)\n}\n<|endoftext|>"} {"text":"<commit_before>package executor\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"syscall\"\n\t\"testing\"\n\t\"time\"\n\n\thclog \"github.com\/hashicorp\/go-hclog\"\n\t\"github.com\/hashicorp\/nomad\/client\/allocdir\"\n\t\"github.com\/hashicorp\/nomad\/client\/taskenv\"\n\t\"github.com\/hashicorp\/nomad\/helper\/testlog\"\n\t\"github.com\/hashicorp\/nomad\/nomad\/mock\"\n\t\"github.com\/hashicorp\/nomad\/plugins\/drivers\"\n\ttu \"github.com\/hashicorp\/nomad\/testutil\"\n\tps \"github.com\/mitchellh\/go-ps\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nvar executorFactories = map[string]func(hclog.Logger) Executor{}\nvar universalFactory = func(l hclog.Logger) Executor {\n\treturn NewExecutor(l)\n}\n\nfunc init() {\n\texecutorFactories[\"UniversalExecutor\"] = universalFactory\n}\n\n\/\/ testExecutorContext returns an ExecutorContext and AllocDir.\n\/\/\n\/\/ The caller is responsible for calling AllocDir.Destroy() to cleanup.\nfunc testExecutorCommand(t *testing.T) (*ExecCommand, *allocdir.AllocDir) {\n\talloc := mock.Alloc()\n\ttask := alloc.Job.TaskGroups[0].Tasks[0]\n\ttaskEnv := taskenv.NewBuilder(mock.Node(), alloc, task, \"global\").Build()\n\n\tallocDir := allocdir.NewAllocDir(testlog.HCLogger(t), filepath.Join(os.TempDir(), alloc.ID))\n\tif err := allocDir.Build(); err != nil {\n\t\tt.Fatalf(\"AllocDir.Build() failed: %v\", err)\n\t}\n\tif err := allocDir.NewTaskDir(task.Name).Build(false, nil); err != nil {\n\t\tallocDir.Destroy()\n\t\tt.Fatalf(\"allocDir.NewTaskDir(%q) failed: %v\", task.Name, err)\n\t}\n\ttd := allocDir.TaskDirs[task.Name]\n\tcmd := &ExecCommand{\n\t\tEnv: taskEnv.List(),\n\t\tTaskDir: td.Dir,\n\t\tResources: &drivers.Resources{\n\t\t\tNomadResources: alloc.AllocatedResources.Tasks[task.Name],\n\t\t},\n\t}\n\n\tsetupRootfs(t, td.Dir)\n\tconfigureTLogging(cmd)\n\treturn cmd, allocDir\n}\n\ntype bufferCloser struct {\n\tbytes.Buffer\n}\n\nfunc (_ *bufferCloser) Close() error { return nil }\n\nfunc configureTLogging(cmd *ExecCommand) (stdout bufferCloser, stderr bufferCloser) {\n\tcmd.SetWriters(&stdout, &stderr)\n\treturn\n}\n\nfunc TestExecutor_Start_Invalid(pt *testing.T) {\n\tpt.Parallel()\n\tinvalid := \"\/bin\/foobar\"\n\tfor name, factory := range executorFactories {\n\t\tpt.Run(name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\texecCmd, allocDir := testExecutorCommand(t)\n\t\t\texecCmd.Cmd = invalid\n\t\t\texecCmd.Args = []string{\"1\"}\n\t\t\tdefer allocDir.Destroy()\n\t\t\texecutor := factory(testlog.HCLogger(t))\n\t\t\tdefer executor.Shutdown(\"\", 0)\n\n\t\t\t_, err := executor.Launch(execCmd)\n\t\t\trequire.Error(err)\n\t\t})\n\t}\n}\n\nfunc TestExecutor_Start_Wait_Failure_Code(pt *testing.T) {\n\tpt.Parallel()\n\tfor name, factory := range executorFactories {\n\t\tpt.Run(name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\texecCmd, allocDir := testExecutorCommand(t)\n\t\t\texecCmd.Cmd = \"\/bin\/date\"\n\t\t\texecCmd.Args = []string{\"fail\"}\n\t\t\tdefer allocDir.Destroy()\n\t\t\texecutor := factory(testlog.HCLogger(t))\n\t\t\tdefer executor.Shutdown(\"\", 0)\n\n\t\t\tps, err := executor.Launch(execCmd)\n\t\t\trequire.NoError(err)\n\t\t\trequire.NotZero(ps.Pid)\n\t\t\tps, _ = executor.Wait(context.Background())\n\t\t\trequire.NotZero(ps.ExitCode, \"expected exit code to be non zero\")\n\t\t\trequire.NoError(executor.Shutdown(\"SIGINT\", 100*time.Millisecond))\n\t\t})\n\t}\n}\n\nfunc TestExecutor_Start_Wait(pt *testing.T) {\n\tpt.Parallel()\n\tfor name, factory := range executorFactories {\n\t\tpt.Run(name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\texecCmd, allocDir := testExecutorCommand(t)\n\t\t\texecCmd.Cmd = \"\/bin\/echo\"\n\t\t\texecCmd.Args = []string{\"hello world\"}\n\n\t\t\tdefer allocDir.Destroy()\n\t\t\texecutor := factory(testlog.HCLogger(t))\n\t\t\tdefer executor.Shutdown(\"\", 0)\n\n\t\t\tps, err := executor.Launch(execCmd)\n\t\t\trequire.NoError(err)\n\t\t\trequire.NotZero(ps.Pid)\n\n\t\t\tps, err = executor.Wait(context.Background())\n\t\t\trequire.NoError(err)\n\t\t\trequire.NoError(executor.Shutdown(\"SIGINT\", 100*time.Millisecond))\n\n\t\t\texpected := \"hello world\"\n\t\t\ttu.WaitForResult(func() (bool, error) {\n\t\t\t\toutWriter, _ := execCmd.GetWriters()\n\t\t\t\toutput := outWriter.(*bufferCloser).String()\n\t\t\t\tact := strings.TrimSpace(string(output))\n\t\t\t\tif expected != act {\n\t\t\t\t\treturn false, fmt.Errorf(\"expected: '%s' actual: '%s'\", expected, act)\n\t\t\t\t}\n\t\t\t\treturn true, nil\n\t\t\t}, func(err error) {\n\t\t\t\trequire.NoError(err)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestExecutor_WaitExitSignal(pt *testing.T) {\n\tpt.Parallel()\n\tfor name, factory := range executorFactories {\n\t\tpt.Run(name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\texecCmd, allocDir := testExecutorCommand(t)\n\t\t\texecCmd.Cmd = \"\/bin\/sleep\"\n\t\t\texecCmd.Args = []string{\"10000\"}\n\t\t\texecCmd.ResourceLimits = true\n\t\t\tdefer allocDir.Destroy()\n\t\t\texecutor := factory(testlog.HCLogger(t))\n\t\t\tdefer executor.Shutdown(\"\", 0)\n\n\t\t\tps, err := executor.Launch(execCmd)\n\t\t\trequire.NoError(err)\n\n\t\t\tgo func() {\n\t\t\t\ttu.WaitForResult(func() (bool, error) {\n\t\t\t\t\tch, err := executor.Stats(context.Background(), time.Second)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, err\n\t\t\t\t\t}\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-time.After(time.Second):\n\t\t\t\t\t\treturn false, fmt.Errorf(\"stats failed to send on interval\")\n\t\t\t\t\tcase ru := <-ch:\n\t\t\t\t\t\tassert.NotEmpty(t, ru.Pids, \"no pids recorded in stats\")\n\t\t\t\t\t\tassert.NotZero(t, ru.ResourceUsage.MemoryStats.RSS)\n\t\t\t\t\t\tassert.WithinDuration(t, time.Now(), time.Unix(0, ru.Timestamp), time.Second)\n\t\t\t\t\t}\n\t\t\t\t\tproc, err := os.FindProcess(ps.Pid)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, err\n\t\t\t\t\t}\n\t\t\t\t\terr = proc.Signal(syscall.SIGKILL)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, err\n\t\t\t\t\t}\n\t\t\t\t\treturn true, nil\n\t\t\t\t}, func(err error) {\n\t\t\t\t\tassert.NoError(t, executor.Signal(os.Kill))\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t})\n\t\t\t}()\n\n\t\t\tps, err = executor.Wait(context.Background())\n\t\t\trequire.NoError(err)\n\t\t\trequire.Equal(ps.Signal, int(syscall.SIGKILL))\n\t\t})\n\t}\n}\n\nfunc TestExecutor_Start_Kill(pt *testing.T) {\n\tpt.Parallel()\n\tfor name, factory := range executorFactories {\n\t\tpt.Run(name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\texecCmd, allocDir := testExecutorCommand(t)\n\t\t\texecCmd.Cmd = \"\/bin\/sleep\"\n\t\t\texecCmd.Args = []string{\"10\"}\n\t\t\tdefer allocDir.Destroy()\n\t\t\texecutor := factory(testlog.HCLogger(t))\n\t\t\tdefer executor.Shutdown(\"\", 0)\n\n\t\t\tps, err := executor.Launch(execCmd)\n\t\t\trequire.NoError(err)\n\t\t\trequire.NotZero(ps.Pid)\n\n\t\t\trequire.NoError(executor.Shutdown(\"SIGINT\", 100*time.Millisecond))\n\n\t\t\ttime.Sleep(time.Duration(tu.TestMultiplier()*2) * time.Second)\n\t\t\toutWriter, _ := execCmd.GetWriters()\n\t\t\toutput := outWriter.(*bufferCloser).String()\n\t\t\texpected := \"\"\n\t\t\tact := strings.TrimSpace(string(output))\n\t\t\tif act != expected {\n\t\t\t\tt.Fatalf(\"Command output incorrectly: want %v; got %v\", expected, act)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestExecutor_Shutdown_Exit(t *testing.T) {\n\trequire := require.New(t)\n\tt.Parallel()\n\texecCmd, allocDir := testExecutorCommand(t)\n\texecCmd.Cmd = \"\/bin\/sleep\"\n\texecCmd.Args = []string{\"100\"}\n\tcfg := &ExecutorConfig{\n\t\tLogFile: \"\/dev\/null\",\n\t}\n\texecutor, pluginClient, err := CreateExecutor(testlog.HCLogger(t), nil, cfg)\n\trequire.NoError(err)\n\n\tproc, err := executor.Launch(execCmd)\n\trequire.NoError(err)\n\trequire.NotZero(proc.Pid)\n\n\texecutor.Shutdown(\"\", 0)\n\tpluginClient.Kill()\n\ttu.WaitForResult(func() (bool, error) {\n\t\tp, err := ps.FindProcess(proc.Pid)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn p == nil, fmt.Errorf(\"process found: %d\", proc.Pid)\n\t}, func(err error) {\n\t\trequire.NoError(err)\n\t})\n\trequire.NoError(allocDir.Destroy())\n}\n\nfunc TestUniversalExecutor_MakeExecutable(t *testing.T) {\n\tt.Parallel()\n\t\/\/ Create a temp file\n\tf, err := ioutil.TempFile(\"\", \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer f.Close()\n\tdefer os.Remove(f.Name())\n\n\t\/\/ Set its permissions to be non-executable\n\tf.Chmod(os.FileMode(0610))\n\n\terr = makeExecutable(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"makeExecutable() failed: %v\", err)\n\t}\n\n\t\/\/ Check the permissions\n\tstat, err := f.Stat()\n\tif err != nil {\n\t\tt.Fatalf(\"Stat() failed: %v\", err)\n\t}\n\n\tact := stat.Mode().Perm()\n\texp := os.FileMode(0755)\n\tif act != exp {\n\t\tt.Fatalf(\"expected permissions %v; got %v\", exp, act)\n\t}\n}\n\nfunc TestUniversalExecutor_LookupPath(t *testing.T) {\n\tt.Parallel()\n\t\/\/ Create a temp dir\n\ttmpDir, err := ioutil.TempDir(\"\", \"\")\n\trequire := require.New(t)\n\trequire.Nil(err)\n\tdefer os.Remove(tmpDir)\n\n\t\/\/ Make a foo subdir\n\tos.MkdirAll(filepath.Join(tmpDir, \"foo\"), 0700)\n\n\t\/\/ Write a file under foo\n\tfilePath := filepath.Join(tmpDir, \"foo\", \"tmp.txt\")\n\terr = ioutil.WriteFile(filePath, []byte{1, 2}, os.ModeAppend)\n\trequire.Nil(err)\n\n\t\/\/ Lookup with full path to binary\n\t_, err = lookupBin(\"dummy\", filePath)\n\trequire.Nil(err)\n\n\t\/\/ Write a file under local subdir\n\tos.MkdirAll(filepath.Join(tmpDir, \"local\"), 0700)\n\tfilePath2 := filepath.Join(tmpDir, \"local\", \"tmp.txt\")\n\tioutil.WriteFile(filePath2, []byte{1, 2}, os.ModeAppend)\n\n\t\/\/ Lookup with file name, should find the one we wrote above\n\t_, err = lookupBin(tmpDir, \"tmp.txt\")\n\trequire.Nil(err)\n\n\t\/\/ Write a file under task dir\n\tfilePath3 := filepath.Join(tmpDir, \"tmp.txt\")\n\tioutil.WriteFile(filePath3, []byte{1, 2}, os.ModeAppend)\n\n\t\/\/ Lookup with file name, should find the one we wrote above\n\t_, err = lookupBin(tmpDir, \"tmp.txt\")\n\trequire.Nil(err)\n\n}\n\n\/\/ setupRoootfs setups the rootfs for libcontainer executor\n\/\/ It uses busybox to make some binaries available - somewhat cheaper\n\/\/ than mounting the underlying host filesystem\nfunc setupRootfs(t *testing.T, rootfs string) {\n\tpaths := []string{\n\t\t\"\/bin\/sh\",\n\t\t\"\/bin\/sleep\",\n\t\t\"\/bin\/echo\",\n\t\t\"\/bin\/date\",\n\t}\n\n\tfor _, p := range paths {\n\t\tsetupRootfsBinary(t, rootfs, p)\n\t}\n}\n\n\/\/ setupRootfsBinary installs a busybox link in the desired path\nfunc setupRootfsBinary(t *testing.T, rootfs, path string) {\n\tt.Helper()\n\n\tdst := filepath.Join(rootfs, path)\n\terr := os.MkdirAll(filepath.Dir(dst), 666)\n\trequire.NoError(t, err)\n\n\tsrc := filepath.Join(\n\t\t\"test-resources\", \"busybox\",\n\t\tfmt.Sprintf(\"busybox-%s\", runtime.GOARCH),\n\t)\n\n\terr = os.Link(src, dst)\n\trequire.NoError(t, err)\n}\n<commit_msg>test kill wait<commit_after>package executor\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"syscall\"\n\t\"testing\"\n\t\"time\"\n\n\thclog \"github.com\/hashicorp\/go-hclog\"\n\t\"github.com\/hashicorp\/nomad\/client\/allocdir\"\n\t\"github.com\/hashicorp\/nomad\/client\/taskenv\"\n\t\"github.com\/hashicorp\/nomad\/helper\/testlog\"\n\t\"github.com\/hashicorp\/nomad\/nomad\/mock\"\n\t\"github.com\/hashicorp\/nomad\/plugins\/drivers\"\n\ttu \"github.com\/hashicorp\/nomad\/testutil\"\n\tps \"github.com\/mitchellh\/go-ps\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nvar executorFactories = map[string]func(hclog.Logger) Executor{}\nvar universalFactory = func(l hclog.Logger) Executor {\n\treturn NewExecutor(l)\n}\n\nfunc init() {\n\texecutorFactories[\"UniversalExecutor\"] = universalFactory\n}\n\n\/\/ testExecutorContext returns an ExecutorContext and AllocDir.\n\/\/\n\/\/ The caller is responsible for calling AllocDir.Destroy() to cleanup.\nfunc testExecutorCommand(t *testing.T) (*ExecCommand, *allocdir.AllocDir) {\n\talloc := mock.Alloc()\n\ttask := alloc.Job.TaskGroups[0].Tasks[0]\n\ttaskEnv := taskenv.NewBuilder(mock.Node(), alloc, task, \"global\").Build()\n\n\tallocDir := allocdir.NewAllocDir(testlog.HCLogger(t), filepath.Join(os.TempDir(), alloc.ID))\n\tif err := allocDir.Build(); err != nil {\n\t\tt.Fatalf(\"AllocDir.Build() failed: %v\", err)\n\t}\n\tif err := allocDir.NewTaskDir(task.Name).Build(false, nil); err != nil {\n\t\tallocDir.Destroy()\n\t\tt.Fatalf(\"allocDir.NewTaskDir(%q) failed: %v\", task.Name, err)\n\t}\n\ttd := allocDir.TaskDirs[task.Name]\n\tcmd := &ExecCommand{\n\t\tEnv: taskEnv.List(),\n\t\tTaskDir: td.Dir,\n\t\tResources: &drivers.Resources{\n\t\t\tNomadResources: alloc.AllocatedResources.Tasks[task.Name],\n\t\t},\n\t}\n\n\tsetupRootfs(t, td.Dir)\n\tconfigureTLogging(cmd)\n\treturn cmd, allocDir\n}\n\ntype bufferCloser struct {\n\tbytes.Buffer\n}\n\nfunc (_ *bufferCloser) Close() error { return nil }\n\nfunc configureTLogging(cmd *ExecCommand) (stdout bufferCloser, stderr bufferCloser) {\n\tcmd.SetWriters(&stdout, &stderr)\n\treturn\n}\n\nfunc TestExecutor_Start_Invalid(pt *testing.T) {\n\tpt.Parallel()\n\tinvalid := \"\/bin\/foobar\"\n\tfor name, factory := range executorFactories {\n\t\tpt.Run(name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\texecCmd, allocDir := testExecutorCommand(t)\n\t\t\texecCmd.Cmd = invalid\n\t\t\texecCmd.Args = []string{\"1\"}\n\t\t\tdefer allocDir.Destroy()\n\t\t\texecutor := factory(testlog.HCLogger(t))\n\t\t\tdefer executor.Shutdown(\"\", 0)\n\n\t\t\t_, err := executor.Launch(execCmd)\n\t\t\trequire.Error(err)\n\t\t})\n\t}\n}\n\nfunc TestExecutor_Start_Wait_Failure_Code(pt *testing.T) {\n\tpt.Parallel()\n\tfor name, factory := range executorFactories {\n\t\tpt.Run(name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\texecCmd, allocDir := testExecutorCommand(t)\n\t\t\texecCmd.Cmd = \"\/bin\/date\"\n\t\t\texecCmd.Args = []string{\"fail\"}\n\t\t\tdefer allocDir.Destroy()\n\t\t\texecutor := factory(testlog.HCLogger(t))\n\t\t\tdefer executor.Shutdown(\"\", 0)\n\n\t\t\tps, err := executor.Launch(execCmd)\n\t\t\trequire.NoError(err)\n\t\t\trequire.NotZero(ps.Pid)\n\t\t\tps, _ = executor.Wait(context.Background())\n\t\t\trequire.NotZero(ps.ExitCode, \"expected exit code to be non zero\")\n\t\t\trequire.NoError(executor.Shutdown(\"SIGINT\", 100*time.Millisecond))\n\t\t})\n\t}\n}\n\nfunc TestExecutor_Start_Wait(pt *testing.T) {\n\tpt.Parallel()\n\tfor name, factory := range executorFactories {\n\t\tpt.Run(name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\texecCmd, allocDir := testExecutorCommand(t)\n\t\t\texecCmd.Cmd = \"\/bin\/echo\"\n\t\t\texecCmd.Args = []string{\"hello world\"}\n\n\t\t\tdefer allocDir.Destroy()\n\t\t\texecutor := factory(testlog.HCLogger(t))\n\t\t\tdefer executor.Shutdown(\"\", 0)\n\n\t\t\tps, err := executor.Launch(execCmd)\n\t\t\trequire.NoError(err)\n\t\t\trequire.NotZero(ps.Pid)\n\n\t\t\tps, err = executor.Wait(context.Background())\n\t\t\trequire.NoError(err)\n\t\t\trequire.NoError(executor.Shutdown(\"SIGINT\", 100*time.Millisecond))\n\n\t\t\texpected := \"hello world\"\n\t\t\ttu.WaitForResult(func() (bool, error) {\n\t\t\t\toutWriter, _ := execCmd.GetWriters()\n\t\t\t\toutput := outWriter.(*bufferCloser).String()\n\t\t\t\tact := strings.TrimSpace(string(output))\n\t\t\t\tif expected != act {\n\t\t\t\t\treturn false, fmt.Errorf(\"expected: '%s' actual: '%s'\", expected, act)\n\t\t\t\t}\n\t\t\t\treturn true, nil\n\t\t\t}, func(err error) {\n\t\t\t\trequire.NoError(err)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestExecutor_WaitExitSignal(pt *testing.T) {\n\tpt.Parallel()\n\tfor name, factory := range executorFactories {\n\t\tpt.Run(name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\texecCmd, allocDir := testExecutorCommand(t)\n\t\t\texecCmd.Cmd = \"\/bin\/sleep\"\n\t\t\texecCmd.Args = []string{\"10000\"}\n\t\t\texecCmd.ResourceLimits = true\n\t\t\tdefer allocDir.Destroy()\n\t\t\texecutor := factory(testlog.HCLogger(t))\n\t\t\tdefer executor.Shutdown(\"\", 0)\n\n\t\t\tps, err := executor.Launch(execCmd)\n\t\t\trequire.NoError(err)\n\n\t\t\tgo func() {\n\t\t\t\ttu.WaitForResult(func() (bool, error) {\n\t\t\t\t\tch, err := executor.Stats(context.Background(), time.Second)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, err\n\t\t\t\t\t}\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-time.After(time.Second):\n\t\t\t\t\t\treturn false, fmt.Errorf(\"stats failed to send on interval\")\n\t\t\t\t\tcase ru := <-ch:\n\t\t\t\t\t\tassert.NotEmpty(t, ru.Pids, \"no pids recorded in stats\")\n\t\t\t\t\t\tassert.NotZero(t, ru.ResourceUsage.MemoryStats.RSS)\n\t\t\t\t\t\tassert.WithinDuration(t, time.Now(), time.Unix(0, ru.Timestamp), time.Second)\n\t\t\t\t\t}\n\t\t\t\t\tproc, err := os.FindProcess(ps.Pid)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, err\n\t\t\t\t\t}\n\t\t\t\t\terr = proc.Signal(syscall.SIGKILL)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, err\n\t\t\t\t\t}\n\t\t\t\t\treturn true, nil\n\t\t\t\t}, func(err error) {\n\t\t\t\t\tassert.NoError(t, executor.Signal(os.Kill))\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t})\n\t\t\t}()\n\n\t\t\tps, err = executor.Wait(context.Background())\n\t\t\trequire.NoError(err)\n\t\t\trequire.Equal(ps.Signal, int(syscall.SIGKILL))\n\t\t})\n\t}\n}\n\nfunc TestExecutor_Start_Kill(pt *testing.T) {\n\tpt.Parallel()\n\tfor name, factory := range executorFactories {\n\t\tpt.Run(name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\texecCmd, allocDir := testExecutorCommand(t)\n\t\t\texecCmd.Cmd = \"\/bin\/sleep\"\n\t\t\texecCmd.Args = []string{\"10\"}\n\t\t\tdefer allocDir.Destroy()\n\t\t\texecutor := factory(testlog.HCLogger(t))\n\t\t\tdefer executor.Shutdown(\"\", 0)\n\n\t\t\tps, err := executor.Launch(execCmd)\n\t\t\trequire.NoError(err)\n\t\t\trequire.NotZero(ps.Pid)\n\n\t\t\trequire.NoError(executor.Shutdown(\"SIGINT\", 100*time.Millisecond))\n\n\t\t\ttime.Sleep(time.Duration(tu.TestMultiplier()*2) * time.Second)\n\t\t\toutWriter, _ := execCmd.GetWriters()\n\t\t\toutput := outWriter.(*bufferCloser).String()\n\t\t\texpected := \"\"\n\t\t\tact := strings.TrimSpace(string(output))\n\t\t\tif act != expected {\n\t\t\t\tt.Fatalf(\"Command output incorrectly: want %v; got %v\", expected, act)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestExecutor_Shutdown_Exit(t *testing.T) {\n\trequire := require.New(t)\n\tt.Parallel()\n\texecCmd, allocDir := testExecutorCommand(t)\n\texecCmd.Cmd = \"\/bin\/sleep\"\n\texecCmd.Args = []string{\"100\"}\n\tcfg := &ExecutorConfig{\n\t\tLogFile: \"\/dev\/null\",\n\t}\n\texecutor, pluginClient, err := CreateExecutor(testlog.HCLogger(t), nil, cfg)\n\trequire.NoError(err)\n\n\tproc, err := executor.Launch(execCmd)\n\trequire.NoError(err)\n\trequire.NotZero(proc.Pid)\n\n\texecutor.Shutdown(\"\", 0)\n\tpluginClient.Kill()\n\ttu.WaitForResult(func() (bool, error) {\n\t\tp, err := ps.FindProcess(proc.Pid)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn p == nil, fmt.Errorf(\"process found: %d\", proc.Pid)\n\t}, func(err error) {\n\t\trequire.NoError(err)\n\t})\n\trequire.NoError(allocDir.Destroy())\n}\n\nfunc TestUniversalExecutor_MakeExecutable(t *testing.T) {\n\tt.Parallel()\n\t\/\/ Create a temp file\n\tf, err := ioutil.TempFile(\"\", \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer f.Close()\n\tdefer os.Remove(f.Name())\n\n\t\/\/ Set its permissions to be non-executable\n\tf.Chmod(os.FileMode(0610))\n\n\terr = makeExecutable(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"makeExecutable() failed: %v\", err)\n\t}\n\n\t\/\/ Check the permissions\n\tstat, err := f.Stat()\n\tif err != nil {\n\t\tt.Fatalf(\"Stat() failed: %v\", err)\n\t}\n\n\tact := stat.Mode().Perm()\n\texp := os.FileMode(0755)\n\tif act != exp {\n\t\tt.Fatalf(\"expected permissions %v; got %v\", exp, act)\n\t}\n}\n\nfunc TestUniversalExecutor_LookupPath(t *testing.T) {\n\tt.Parallel()\n\t\/\/ Create a temp dir\n\ttmpDir, err := ioutil.TempDir(\"\", \"\")\n\trequire := require.New(t)\n\trequire.Nil(err)\n\tdefer os.Remove(tmpDir)\n\n\t\/\/ Make a foo subdir\n\tos.MkdirAll(filepath.Join(tmpDir, \"foo\"), 0700)\n\n\t\/\/ Write a file under foo\n\tfilePath := filepath.Join(tmpDir, \"foo\", \"tmp.txt\")\n\terr = ioutil.WriteFile(filePath, []byte{1, 2}, os.ModeAppend)\n\trequire.Nil(err)\n\n\t\/\/ Lookup with full path to binary\n\t_, err = lookupBin(\"dummy\", filePath)\n\trequire.Nil(err)\n\n\t\/\/ Write a file under local subdir\n\tos.MkdirAll(filepath.Join(tmpDir, \"local\"), 0700)\n\tfilePath2 := filepath.Join(tmpDir, \"local\", \"tmp.txt\")\n\tioutil.WriteFile(filePath2, []byte{1, 2}, os.ModeAppend)\n\n\t\/\/ Lookup with file name, should find the one we wrote above\n\t_, err = lookupBin(tmpDir, \"tmp.txt\")\n\trequire.Nil(err)\n\n\t\/\/ Write a file under task dir\n\tfilePath3 := filepath.Join(tmpDir, \"tmp.txt\")\n\tioutil.WriteFile(filePath3, []byte{1, 2}, os.ModeAppend)\n\n\t\/\/ Lookup with file name, should find the one we wrote above\n\t_, err = lookupBin(tmpDir, \"tmp.txt\")\n\trequire.Nil(err)\n\n}\n\n\/\/ setupRoootfs setups the rootfs for libcontainer executor\n\/\/ It uses busybox to make some binaries available - somewhat cheaper\n\/\/ than mounting the underlying host filesystem\nfunc setupRootfs(t *testing.T, rootfs string) {\n\tpaths := []string{\n\t\t\"\/bin\/sh\",\n\t\t\"\/bin\/sleep\",\n\t\t\"\/bin\/echo\",\n\t\t\"\/bin\/date\",\n\t}\n\n\tfor _, p := range paths {\n\t\tsetupRootfsBinary(t, rootfs, p)\n\t}\n}\n\n\/\/ setupRootfsBinary installs a busybox link in the desired path\nfunc setupRootfsBinary(t *testing.T, rootfs, path string) {\n\tt.Helper()\n\n\tdst := filepath.Join(rootfs, path)\n\terr := os.MkdirAll(filepath.Dir(dst), 666)\n\trequire.NoError(t, err)\n\n\tsrc := filepath.Join(\n\t\t\"test-resources\", \"busybox\",\n\t\tfmt.Sprintf(\"busybox-%s\", runtime.GOARCH),\n\t)\n\n\terr = os.Link(src, dst)\n\trequire.NoError(t, err)\n}\n\nfunc TestExecutor_Start_Kill_Immediately_NoGrace(pt *testing.T) {\n\tpt.Parallel()\n\tfor name, factory := range executorFactories {\n\t\tpt.Run(name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\texecCmd, allocDir := testExecutorCommand(t)\n\t\t\texecCmd.Cmd = \"\/bin\/sleep\"\n\t\t\texecCmd.Args = []string{\"100\"}\n\t\t\tdefer allocDir.Destroy()\n\t\t\texecutor := factory(testlog.HCLogger(t))\n\t\t\tdefer executor.Shutdown(\"\", 0)\n\n\t\t\tps, err := executor.Launch(execCmd)\n\t\t\trequire.NoError(err)\n\t\t\trequire.NotZero(ps.Pid)\n\n\t\t\twaitCh := make(chan interface{})\n\t\t\tgo func() {\n\t\t\t\tdefer close(waitCh)\n\t\t\t\texecutor.Wait(context.Background())\n\t\t\t}()\n\n\t\t\trequire.NoError(executor.Shutdown(\"SIGKILL\", 0))\n\n\t\t\tselect {\n\t\t\tcase <-waitCh:\n\t\t\t\t\/\/ all good!\n\t\t\tcase <-time.After(4 * time.Second * time.Duration(tu.TestMultiplier())):\n\t\t\t\trequire.Fail(\"process did not terminate despite SIGKILL\")\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestExecutor_Start_Kill_Immediately_WithGrace(pt *testing.T) {\n\tpt.Parallel()\n\tfor name, factory := range executorFactories {\n\t\tpt.Run(name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\texecCmd, allocDir := testExecutorCommand(t)\n\t\t\texecCmd.Cmd = \"\/bin\/sleep\"\n\t\t\texecCmd.Args = []string{\"100\"}\n\t\t\tdefer allocDir.Destroy()\n\t\t\texecutor := factory(testlog.HCLogger(t))\n\t\t\tdefer executor.Shutdown(\"\", 0)\n\n\t\t\tps, err := executor.Launch(execCmd)\n\t\t\trequire.NoError(err)\n\t\t\trequire.NotZero(ps.Pid)\n\n\t\t\twaitCh := make(chan interface{})\n\t\t\tgo func() {\n\t\t\t\tdefer close(waitCh)\n\t\t\t\texecutor.Wait(context.Background())\n\t\t\t}()\n\n\t\t\trequire.NoError(executor.Shutdown(\"SIGKILL\", 100*time.Millisecond))\n\n\t\t\tselect {\n\t\t\tcase <-waitCh:\n\t\t\t\t\/\/ all good!\n\t\t\tcase <-time.After(4 * time.Second * time.Duration(tu.TestMultiplier())):\n\t\t\t\trequire.Fail(\"process did not terminate despite SIGKILL\")\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nThis file implements the uWSGI protocol.\nThis implements run as net.Listener:\n\n\n\t\tl, err = net.Listen(\"unix\", \"\/path\/to\/socket\")\n\t\thttp.Serve(&UwsgiListener{l}, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\tw.Header().Set(\"Content-Length\", 11)\n\t\t\tw.Write([]byte(\"hello world\"))\n\t\t})\n*\/\n\npackage uwsgi\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Listener struct {\n\tnet.Listener\n}\n\ntype Conn struct {\n\tnet.Conn\n\tenv map[string][]string\n\treader io.Reader\n\thdrdone bool\n\tready bool\n\treadych chan bool\n\terr error\n}\n\nfunc (c *Conn) Read(b []byte) (n int, e error) {\n\t\/\/ Wait until headers have been processed\n\tif !c.ready && c.err == nil {\n\t\t<-c.readych\n\t\tc.ready = true\n\t}\n\tif c.err != nil {\n\t\treturn 0, c.err\n\t}\n\n\t\/\/ After headers have been read by HTTP server, transfer\n\t\/\/ socket over to the underlying connection for direct read.\n\tif !c.hdrdone {\n\t\tn, e = c.reader.Read(b)\n\t\tif n == 0 || e != nil {\n\t\t\tc.hdrdone = true\n\t\t}\n\t}\n\tif c.hdrdone {\n\t\tn, e = c.Conn.Read(b)\n\t\tc.err = e\n\t}\n\n\treturn n, e\n}\n\nfunc (c *Conn) Write(b []byte) (int, error) {\n\tif c.err != nil {\n\t\treturn 0, c.err\n\t}\n\n\treturn c.Conn.Write(b)\n}\n\nfunc (c *Conn) SetDeadline(t time.Time) error {\n\tif c.err != nil {\n\t\treturn c.err\n\t}\n\n\treturn c.Conn.SetDeadline(t)\n}\n\nfunc (c *Conn) SetReadDeadline(t time.Time) error {\n\tif c.err != nil {\n\t\treturn c.err\n\t}\n\n\treturn c.Conn.SetReadDeadline(t)\n}\n\nfunc (c *Conn) SetWriteDeadline(t time.Time) error {\n\tif c.err != nil {\n\t\treturn c.err\n\t}\n\n\treturn c.Conn.SetWriteDeadline(t)\n}\n\nvar headerMappings = map[string]string{\n\t\"HTTP_HOST\": \"Host\",\n\t\"CONTENT_TYPE\": \"Content-Type\",\n\t\"HTTP_ACCEPT\": \"Accept\",\n\t\"HTTP_ACCEPT_ENCODING\": \"Accept-Encoding\",\n\t\"HTTP_ACCEPT_LANGUAGE\": \"Accept-Language\",\n\t\"HTTP_ACCEPT_CHARSET\": \"Accept-Charset\",\n\t\"HTTP_CONTENT_TYPE\": \"Content-Type\",\n\t\"HTTP_COOKIE\": \"Cookie\",\n\t\"HTTP_IF_MATCH\": \"If-Match\",\n\t\"HTTP_IF_MODIFIED_SINCE\": \"If-Modified-Since\",\n\t\"HTTP_IF_NONE_MATCH\": \"If-None-Match\",\n\t\"HTTP_IF_RANGE\": \"If-Range\",\n\t\"HTTP_RANGE\": \"Range\",\n\t\"HTTP_REFERER\": \"Referer\",\n\t\"HTTP_USER_AGENT\": \"User-Agent\",\n\t\"HTTP_X_REQUESTED_WITH\": \"Requested-With\",\n}\n\n\/\/ Accept conduct as net.Listener. uWSGI protocol is working good for CGI.\n\/\/ This function parse headers and pass to the Server.\nfunc (l *Listener) Accept() (net.Conn, error) {\n\tfd, err := l.Listener.Accept()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tc := &Conn{fd, make(map[string][]string), buf, false, false, make(chan bool, 1), nil}\n\n\tgo func() {\n\t\t\/*\n\t\t * uwsgi header:\n\t\t * struct {\n\t\t * uint8 modifier1;\n\t\t * uint16 datasize;\n\t\t * uint8 modifier2;\n\t\t * }\n\t\t * -- for HTTP, mod1 and mod2 = 0\n\t\t *\/\n\t\tvar head [4]byte\n\t\tfd.Read(head[:])\n\t\tb := []byte{head[1], head[2]}\n\t\tenvsize := binary.LittleEndian.Uint16(b)\n\n\t\tenvbuf := make([]byte, envsize)\n\t\tif _, err := io.ReadFull(fd, envbuf); err != nil {\n\t\t\tfd.Close()\n\t\t\tc.err = err\n\t\t\treturn\n\t\t}\n\n\t\t\/*\n\t\t * uwsgi vars are linear lists of the form:\n\t\t * struct {\n\t\t * uint16 key_size;\n\t\t * uint8 key[key_size];\n\t\t * uint16 val_size;\n\t\t * uint8 val[val_size];\n\t\t * }\n\t\t *\/\n\t\ti := uint16(0)\n\t\tvar reqMethod string\n\t\tvar reqURI string\n\t\tvar reqProtocol string\n\t\tfor {\n\t\t\t\/\/ Ensure no corrupted payload; shouldn't happen but it has...\n\t\t\tif i+1 >= uint16(len(envbuf)) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tb := []byte{envbuf[i], envbuf[i+1]}\n\t\t\tkl := binary.LittleEndian.Uint16(b)\n\t\t\ti += 2\n\n\t\t\tif i+kl > uint16(len(envbuf)) {\n\t\t\t\tfd.Close()\n\t\t\t\tc.err = errors.New(\"Invalid uwsgi request; uwsgi vars index out of range\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tk := string(envbuf[i : i+kl])\n\t\t\ti += kl\n\n\t\t\tif i+1 >= uint16(len(envbuf)) {\n\t\t\t\tfd.Close()\n\t\t\t\tc.err = errors.New(\"Invalid uwsgi request; uwsgi vars index out of range\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tb = []byte{envbuf[i], envbuf[i+1]}\n\t\t\tvl := binary.LittleEndian.Uint16(b)\n\t\t\ti += 2\n\n\t\t\tif i+vl > uint16(len(envbuf)) {\n\t\t\t\tfd.Close()\n\t\t\t\tc.err = errors.New(\"Invalid uwsgi request; uwsgi vars index out of range\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tv := string(envbuf[i : i+vl])\n\t\t\ti += vl\n\n\t\t\tif k == \"REQUEST_METHOD\" {\n\t\t\t\treqMethod = v\n\t\t\t} else if k == \"REQUEST_URI\" {\n\t\t\t\treqURI = v\n\t\t\t} else if k == \"SERVER_PROTOCOL\" {\n\t\t\t\treqProtocol = v\n\t\t\t}\n\n\t\t\tval, ok := c.env[k]\n\t\t\tif !ok {\n\t\t\t\tval = make([]string, 0, 2)\n\t\t\t}\n\t\t\tval = append(val, v)\n\t\t\tc.env[k] = val\n\n\t\t\tif i >= envsize {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif reqProtocol == \"\" {\n\t\t\t\/\/ Invalid protocol\n\t\t\tfd.Close()\n\t\t\tc.err = errors.New(\"Invalid uwsgi request; no protocol specified\")\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Fprintf(buf, \"%s %s %s\\r\\n\", reqMethod, reqURI, reqProtocol)\n\n\t\tvar cl int64\n\t\tfor i := range c.env {\n\t\t\tswitch i {\n\t\t\tcase \"CONTENT_LENGTH\":\n\t\t\t\tcl, _ = strconv.ParseInt(c.env[i][0], 10, 64)\n\t\t\t\tif cl > 0 {\n\t\t\t\t\tfmt.Fprintf(buf, \"Content-Length: %d\\r\\n\", cl)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\thname, ok := headerMappings[i]\n\t\t\t\tif !ok {\n\t\t\t\t\thname = i\n\t\t\t\t}\n\t\t\t\tfor v := range c.env[i] {\n\t\t\t\t\tfmt.Fprintf(buf, \"%s: %s\\r\\n\", hname, c.env[i][v])\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tbuf.Write([]byte(\"\\r\\n\"))\n\n\t\t\/\/ Signal to indicate header processing is complete and remaining\n\t\t\/\/ payload can be read from the socket itself.\n\t\tc.readych <- true\n\t}()\n\n\treturn c, nil\n}\n\ntype Passenger struct {\n\tNet string\n\tAddr string\n}\n\nvar trailingPort = regexp.MustCompile(`:([0-9]+)$`)\n\nfunc (p Passenger) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tconn, err := net.Dial(p.Net, p.Addr)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdefer conn.Close()\n\n\tport := \"80\"\n\tif matches := trailingPort.FindStringSubmatch(req.Host); len(matches) != 0 {\n\t\tport = matches[1]\n\t}\n\n\theader := make(map[string][]string)\n\theader[\"REQUEST_METHOD\"] = []string{req.Method}\n\theader[\"REQUEST_URI\"] = []string{req.RequestURI}\n\theader[\"CONTENT_LENGTH\"] = []string{strconv.Itoa(int(req.ContentLength))}\n\theader[\"SERVER_PROTOCOL\"] = []string{req.Proto}\n\theader[\"SERVER_NAME\"] = []string{req.Host}\n\theader[\"SERVER_ADDR\"] = []string{req.RemoteAddr}\n\theader[\"SERVER_PORT\"] = []string{port}\n\theader[\"REMOTE_HOST\"] = []string{req.RemoteAddr}\n\theader[\"REMOTE_ADDR\"] = []string{req.RemoteAddr}\n\theader[\"SCRIPT_NAME\"] = []string{req.URL.Path}\n\theader[\"PATH_INFO\"] = []string{req.URL.Path}\n\theader[\"QUERY_STRING\"] = []string{req.URL.RawQuery}\n\tif ctype := req.Header.Get(\"Content-Type\"); ctype != \"\" {\n\t\theader[\"CONTENT_TYPE\"] = []string{ctype}\n\t}\n\tfor k, v := range req.Header {\n\t\tif _, ok := header[k]; ok == false {\n\t\t\tk = \"HTTP_\" + strings.ToUpper(strings.Replace(k, \"-\", \"_\", -1))\n\t\t\theader[k] = v\n\t\t}\n\t}\n\n\tvar size uint16\n\tfor k, v := range header {\n\t\tfor _, vv := range v {\n\t\t\tsize += uint16(len(([]byte)(k))) + 2\n\t\t\tsize += uint16(len(([]byte)(vv))) + 2\n\t\t}\n\t}\n\n\thsize := make([]byte, 4)\n\tbinary.LittleEndian.PutUint16(hsize[1:3], size)\n\tconn.Write(hsize)\n\n\tfor k, v := range header {\n\t\tfor _, vv := range v {\n\t\t\tbinary.Write(conn, binary.LittleEndian, uint16(len(([]byte)(k))))\n\t\t\tconn.Write([]byte(k))\n\t\t\tbinary.Write(conn, binary.LittleEndian, uint16(len(([]byte)(vv))))\n\t\t\tconn.Write([]byte(vv))\n\t\t}\n\t}\n\n\tio.Copy(conn, req.Body)\n\n\tres, err := http.ReadResponse(bufio.NewReader(conn), req)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tfor k, v := range res.Header {\n\t\tw.Header().Del(k)\n\t\tfor _, vv := range v {\n\t\t\tw.Header().Add(k, vv)\n\t\t}\n\t}\n\tio.Copy(w, res.Body)\n}\n<commit_msg>golint<commit_after>\/*\nThis file implements the uWSGI protocol.\nThis implements run as net.Listener:\n\n\n\t\tl, err = net.Listen(\"unix\", \"\/path\/to\/socket\")\n\t\thttp.Serve(&UwsgiListener{l}, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\tw.Header().Set(\"Content-Length\", 11)\n\t\t\tw.Write([]byte(\"hello world\"))\n\t\t})\n*\/\n\npackage uwsgi\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ Listener behave as net.Listener\ntype Listener struct {\n\tnet.Listener\n}\n\n\/\/ Conn is connection for uWSGI\ntype Conn struct {\n\tnet.Conn\n\tenv map[string][]string\n\treader io.Reader\n\thdrdone bool\n\tready bool\n\treadych chan bool\n\terr error\n}\n\nfunc (c *Conn) Read(b []byte) (n int, e error) {\n\t\/\/ Wait until headers have been processed\n\tif !c.ready && c.err == nil {\n\t\t<-c.readych\n\t\tc.ready = true\n\t}\n\tif c.err != nil {\n\t\treturn 0, c.err\n\t}\n\n\t\/\/ After headers have been read by HTTP server, transfer\n\t\/\/ socket over to the underlying connection for direct read.\n\tif !c.hdrdone {\n\t\tn, e = c.reader.Read(b)\n\t\tif n == 0 || e != nil {\n\t\t\tc.hdrdone = true\n\t\t}\n\t}\n\tif c.hdrdone {\n\t\tn, e = c.Conn.Read(b)\n\t\tc.err = e\n\t}\n\n\treturn n, e\n}\n\n\/\/ Writer behave as same as net.Listener\nfunc (c *Conn) Write(b []byte) (int, error) {\n\tif c.err != nil {\n\t\treturn 0, c.err\n\t}\n\n\treturn c.Conn.Write(b)\n}\n\n\/\/ SetDeadline behave as same as net.Listener\nfunc (c *Conn) SetDeadline(t time.Time) error {\n\tif c.err != nil {\n\t\treturn c.err\n\t}\n\n\treturn c.Conn.SetDeadline(t)\n}\n\n\/\/ SetReadDeadline behave as same as net.Listener\nfunc (c *Conn) SetReadDeadline(t time.Time) error {\n\tif c.err != nil {\n\t\treturn c.err\n\t}\n\n\treturn c.Conn.SetReadDeadline(t)\n}\n\n\/\/ SetWriteDeadline behave as same as net.Listener\nfunc (c *Conn) SetWriteDeadline(t time.Time) error {\n\tif c.err != nil {\n\t\treturn c.err\n\t}\n\n\treturn c.Conn.SetWriteDeadline(t)\n}\n\nvar headerMappings = map[string]string{\n\t\"HTTP_HOST\": \"Host\",\n\t\"CONTENT_TYPE\": \"Content-Type\",\n\t\"HTTP_ACCEPT\": \"Accept\",\n\t\"HTTP_ACCEPT_ENCODING\": \"Accept-Encoding\",\n\t\"HTTP_ACCEPT_LANGUAGE\": \"Accept-Language\",\n\t\"HTTP_ACCEPT_CHARSET\": \"Accept-Charset\",\n\t\"HTTP_CONTENT_TYPE\": \"Content-Type\",\n\t\"HTTP_COOKIE\": \"Cookie\",\n\t\"HTTP_IF_MATCH\": \"If-Match\",\n\t\"HTTP_IF_MODIFIED_SINCE\": \"If-Modified-Since\",\n\t\"HTTP_IF_NONE_MATCH\": \"If-None-Match\",\n\t\"HTTP_IF_RANGE\": \"If-Range\",\n\t\"HTTP_RANGE\": \"Range\",\n\t\"HTTP_REFERER\": \"Referer\",\n\t\"HTTP_USER_AGENT\": \"User-Agent\",\n\t\"HTTP_X_REQUESTED_WITH\": \"Requested-With\",\n}\n\n\/\/ Accept conduct as net.Listener. uWSGI protocol is working good for CGI.\n\/\/ This function parse headers and pass to the Server.\nfunc (l *Listener) Accept() (net.Conn, error) {\n\tfd, err := l.Listener.Accept()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tc := &Conn{fd, make(map[string][]string), buf, false, false, make(chan bool, 1), nil}\n\n\tgo func() {\n\t\t\/*\n\t\t * uwsgi header:\n\t\t * struct {\n\t\t * uint8 modifier1;\n\t\t * uint16 datasize;\n\t\t * uint8 modifier2;\n\t\t * }\n\t\t * -- for HTTP, mod1 and mod2 = 0\n\t\t *\/\n\t\tvar head [4]byte\n\t\tfd.Read(head[:])\n\t\tb := []byte{head[1], head[2]}\n\t\tenvsize := binary.LittleEndian.Uint16(b)\n\n\t\tenvbuf := make([]byte, envsize)\n\t\tif _, err := io.ReadFull(fd, envbuf); err != nil {\n\t\t\tfd.Close()\n\t\t\tc.err = err\n\t\t\treturn\n\t\t}\n\n\t\t\/*\n\t\t * uwsgi vars are linear lists of the form:\n\t\t * struct {\n\t\t * uint16 key_size;\n\t\t * uint8 key[key_size];\n\t\t * uint16 val_size;\n\t\t * uint8 val[val_size];\n\t\t * }\n\t\t *\/\n\t\ti := uint16(0)\n\t\tvar reqMethod string\n\t\tvar reqURI string\n\t\tvar reqProtocol string\n\t\tfor {\n\t\t\t\/\/ Ensure no corrupted payload; shouldn't happen but it has...\n\t\t\tif i+1 >= uint16(len(envbuf)) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tb := []byte{envbuf[i], envbuf[i+1]}\n\t\t\tkl := binary.LittleEndian.Uint16(b)\n\t\t\ti += 2\n\n\t\t\tif i+kl > uint16(len(envbuf)) {\n\t\t\t\tfd.Close()\n\t\t\t\tc.err = errors.New(\"Invalid uwsgi request; uwsgi vars index out of range\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tk := string(envbuf[i : i+kl])\n\t\t\ti += kl\n\n\t\t\tif i+1 >= uint16(len(envbuf)) {\n\t\t\t\tfd.Close()\n\t\t\t\tc.err = errors.New(\"Invalid uwsgi request; uwsgi vars index out of range\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tb = []byte{envbuf[i], envbuf[i+1]}\n\t\t\tvl := binary.LittleEndian.Uint16(b)\n\t\t\ti += 2\n\n\t\t\tif i+vl > uint16(len(envbuf)) {\n\t\t\t\tfd.Close()\n\t\t\t\tc.err = errors.New(\"Invalid uwsgi request; uwsgi vars index out of range\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tv := string(envbuf[i : i+vl])\n\t\t\ti += vl\n\n\t\t\tif k == \"REQUEST_METHOD\" {\n\t\t\t\treqMethod = v\n\t\t\t} else if k == \"REQUEST_URI\" {\n\t\t\t\treqURI = v\n\t\t\t} else if k == \"SERVER_PROTOCOL\" {\n\t\t\t\treqProtocol = v\n\t\t\t}\n\n\t\t\tval, ok := c.env[k]\n\t\t\tif !ok {\n\t\t\t\tval = make([]string, 0, 2)\n\t\t\t}\n\t\t\tval = append(val, v)\n\t\t\tc.env[k] = val\n\n\t\t\tif i >= envsize {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif reqProtocol == \"\" {\n\t\t\t\/\/ Invalid protocol\n\t\t\tfd.Close()\n\t\t\tc.err = errors.New(\"Invalid uwsgi request; no protocol specified\")\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Fprintf(buf, \"%s %s %s\\r\\n\", reqMethod, reqURI, reqProtocol)\n\n\t\tvar cl int64\n\t\tfor i := range c.env {\n\t\t\tswitch i {\n\t\t\tcase \"CONTENT_LENGTH\":\n\t\t\t\tcl, _ = strconv.ParseInt(c.env[i][0], 10, 64)\n\t\t\t\tif cl > 0 {\n\t\t\t\t\tfmt.Fprintf(buf, \"Content-Length: %d\\r\\n\", cl)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\thname, ok := headerMappings[i]\n\t\t\t\tif !ok {\n\t\t\t\t\thname = i\n\t\t\t\t}\n\t\t\t\tfor v := range c.env[i] {\n\t\t\t\t\tfmt.Fprintf(buf, \"%s: %s\\r\\n\", hname, c.env[i][v])\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tbuf.Write([]byte(\"\\r\\n\"))\n\n\t\t\/\/ Signal to indicate header processing is complete and remaining\n\t\t\/\/ payload can be read from the socket itself.\n\t\tc.readych <- true\n\t}()\n\n\treturn c, nil\n}\n\n\/\/ Passenger works as uWSGI transport\ntype Passenger struct {\n\tNet string\n\tAddr string\n}\n\nvar trailingPort = regexp.MustCompile(`:([0-9]+)$`)\n\nfunc (p Passenger) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tconn, err := net.Dial(p.Net, p.Addr)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdefer conn.Close()\n\n\tport := \"80\"\n\tif matches := trailingPort.FindStringSubmatch(req.Host); len(matches) != 0 {\n\t\tport = matches[1]\n\t}\n\n\theader := make(map[string][]string)\n\theader[\"REQUEST_METHOD\"] = []string{req.Method}\n\theader[\"REQUEST_URI\"] = []string{req.RequestURI}\n\theader[\"CONTENT_LENGTH\"] = []string{strconv.Itoa(int(req.ContentLength))}\n\theader[\"SERVER_PROTOCOL\"] = []string{req.Proto}\n\theader[\"SERVER_NAME\"] = []string{req.Host}\n\theader[\"SERVER_ADDR\"] = []string{req.RemoteAddr}\n\theader[\"SERVER_PORT\"] = []string{port}\n\theader[\"REMOTE_HOST\"] = []string{req.RemoteAddr}\n\theader[\"REMOTE_ADDR\"] = []string{req.RemoteAddr}\n\theader[\"SCRIPT_NAME\"] = []string{req.URL.Path}\n\theader[\"PATH_INFO\"] = []string{req.URL.Path}\n\theader[\"QUERY_STRING\"] = []string{req.URL.RawQuery}\n\tif ctype := req.Header.Get(\"Content-Type\"); ctype != \"\" {\n\t\theader[\"CONTENT_TYPE\"] = []string{ctype}\n\t}\n\tfor k, v := range req.Header {\n\t\tif _, ok := header[k]; ok == false {\n\t\t\tk = \"HTTP_\" + strings.ToUpper(strings.Replace(k, \"-\", \"_\", -1))\n\t\t\theader[k] = v\n\t\t}\n\t}\n\n\tvar size uint16\n\tfor k, v := range header {\n\t\tfor _, vv := range v {\n\t\t\tsize += uint16(len(([]byte)(k))) + 2\n\t\t\tsize += uint16(len(([]byte)(vv))) + 2\n\t\t}\n\t}\n\n\thsize := make([]byte, 4)\n\tbinary.LittleEndian.PutUint16(hsize[1:3], size)\n\tconn.Write(hsize)\n\n\tfor k, v := range header {\n\t\tfor _, vv := range v {\n\t\t\tbinary.Write(conn, binary.LittleEndian, uint16(len(([]byte)(k))))\n\t\t\tconn.Write([]byte(k))\n\t\t\tbinary.Write(conn, binary.LittleEndian, uint16(len(([]byte)(vv))))\n\t\t\tconn.Write([]byte(vv))\n\t\t}\n\t}\n\n\tio.Copy(conn, req.Body)\n\n\tres, err := http.ReadResponse(bufio.NewReader(conn), req)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tfor k, v := range res.Header {\n\t\tw.Header().Del(k)\n\t\tfor _, vv := range v {\n\t\t\tw.Header().Add(k, vv)\n\t\t}\n\t}\n\tio.Copy(w, res.Body)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nThe MIT License (MIT)\n\nCopyright (c) 2016 winlin\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n*\/\n\n\/*\n This the main entrance of https-proxy, proxy to api or other http server.\n*\/\npackage main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/ossrs\/go-oryx-lib\/https\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n)\n\nfunc main() {\n\tvar httpPort, httpsPort int\n\tflag.IntVar(&httpPort, \"http\", 80, \"http listen at. 0 to disable http.\")\n\tflag.IntVar(&httpsPort, \"https\", 443, \"https listen at. 0 to disable https. 443 to serve.\")\n\tflag.Parse()\n\n\tif httpsPort != 0 && httpsPort != 443 {\n\t\tfmt.Println(\"https must be 0(disabled) or 443(enabled)\")\n\t\tos.Exit(-1)\n\t}\n\tif httpPort == 0 && httpsPort == 0 {\n\t\tfmt.Println(\"http or https are disabled\")\n\t\tos.Exit(-1)\n\t}\n\n\thtml := http.Dir(\".\/html\")\n\tfh := http.FileServer(html)\n\thttp.Handle(\"\/\", fh)\n\n\tvar protos []string\n\tif httpPort != 0 {\n\t\tprotos = append(protos, fmt.Sprintf(\"http(:%v)\", httpPort))\n\t}\n\tif httpsPort != 0 {\n\t\tprotos = append(protos, fmt.Sprintf(\"https(:%v)\", httpsPort))\n\t}\n\tfmt.Println(fmt.Sprintf(\"%v html root at %v\", strings.Join(protos, \", \"), string(html)))\n\n\twg := sync.WaitGroup{}\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tif httpPort == 0 {\n\t\t\treturn\n\t\t}\n\n\t\tif err := http.ListenAndServe(fmt.Sprintf(\":%v\", httpPort), nil); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\twg.Add(1)\n\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tif httpsPort == 0 {\n\t\t\treturn\n\t\t}\n\n\t\tvar err error\n\t\tvar m https.Manager\n\t\tif m, err = https.NewLetsencryptManager(\"\", nil, \"letsencrypt.cache\"); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tsvr := &http.Server{\n\t\t\tAddr: fmt.Sprintf(\":%v\", httpsPort),\n\t\t\tTLSConfig: &tls.Config{\n\t\t\t\tGetCertificate: m.GetCertificate,\n\t\t\t},\n\t\t}\n\n\t\tif err := svr.ListenAndServeTLS(\"\", \"\"); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\twg.Wait()\n}\n<commit_msg>support domains<commit_after>\/*\nThe MIT License (MIT)\n\nCopyright (c) 2016 winlin\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n*\/\n\n\/*\n This the main entrance of https-proxy, proxy to api or other http server.\n*\/\npackage main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/ossrs\/go-oryx-lib\/https\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n)\n\nfunc main() {\n\tvar httpPort, httpsPort int\n\tvar httpsDomains string\n\tflag.IntVar(&httpPort, \"http\", 80, \"http listen at. 0 to disable http.\")\n\tflag.IntVar(&httpsPort, \"https\", 443, \"https listen at. 0 to disable https. 443 to serve. \")\n\tflag.StringVar(&httpsDomains, \"domains\", \"\", \"the allow domains, empty to allow all. for example: ossrs.net,www.ossrs.net\")\n\tflag.Parse()\n\n\tif httpsPort != 0 && httpsPort != 443 {\n\t\tfmt.Println(\"https must be 0(disabled) or 443(enabled)\")\n\t\tos.Exit(-1)\n\t}\n\tif httpPort == 0 && httpsPort == 0 {\n\t\tfmt.Println(\"http or https are disabled\")\n\t\tos.Exit(-1)\n\t}\n\n\thtml := http.Dir(\".\/html\")\n\tfh := http.FileServer(html)\n\thttp.Handle(\"\/\", fh)\n\n\tvar protos []string\n\tif httpPort != 0 {\n\t\tprotos = append(protos, fmt.Sprintf(\"http(:%v)\", httpPort))\n\t}\n\tif httpsPort != 0 {\n\t\ts := httpsDomains\n\t\tif httpsDomains == \"\" {\n\t\t\ts = \"all domains\"\n\t\t}\n\t\tprotos = append(protos, fmt.Sprintf(\"https(:%v, %v)\", httpsPort, s))\n\t}\n\tfmt.Println(fmt.Sprintf(\"%v html root at %v\", strings.Join(protos, \", \"), string(html)))\n\n\twg := sync.WaitGroup{}\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tif httpPort == 0 {\n\t\t\treturn\n\t\t}\n\n\t\tif err := http.ListenAndServe(fmt.Sprintf(\":%v\", httpPort), nil); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\twg.Add(1)\n\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tif httpsPort == 0 {\n\t\t\treturn\n\t\t}\n\n\t\tvar domains []string\n\t\tif httpsDomains != \"\" {\n\t\t\tdomains = strings.Split(httpsDomains, \",\")\n\t\t}\n\n\t\tvar err error\n\t\tvar m https.Manager\n\t\tif m, err = https.NewLetsencryptManager(\"\", domains, \"letsencrypt.cache\"); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tsvr := &http.Server{\n\t\t\tAddr: fmt.Sprintf(\":%v\", httpsPort),\n\t\t\tTLSConfig: &tls.Config{\n\t\t\t\tGetCertificate: m.GetCertificate,\n\t\t\t},\n\t\t}\n\n\t\tif err := svr.ListenAndServeTLS(\"\", \"\"); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\twg.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>package eval\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/cirbo-lang\/cirbo\/cbty\"\n\t\"github.com\/cirbo-lang\/cirbo\/source\"\n)\n\ntype StmtBlock struct {\n\tscope *Scope\n\tstmts []Stmt\n}\n\n\/\/ NewStmtBlock constructs and returns a new statement block containing the\n\/\/ given statements, which are assumed to be populating the given scope.\n\/\/\n\/\/ The caller must not read or write the given statements slice after it has\n\/\/ been passed to NewStmtBlock. Ownership is transferred to the returned\n\/\/ object and the slice's backing array may be modified in unspecified ways.\nfunc MakeStmtBlock(scope *Scope, stmts []Stmt) (StmtBlock, source.Diags) {\n\tvar diags source.Diags\n\n\tproviders := make(map[*Symbol]Stmt, len(stmts))\n\tenables := make(map[Stmt][]Stmt, len(stmts)) \/\/ slice so that we preserve input ordering when ordering is ambiguous\n\tinDeg := make(map[Stmt]int, len(stmts))\n\tfor _, stmt := range stmts {\n\t\tif sym := stmt.s.definedSymbol(); sym != nil {\n\t\t\tproviders[sym] = stmt\n\t\t}\n\t}\n\tfor _, stmt := range stmts {\n\t\tsyms := stmt.s.requiredSymbols(scope)\n\t\tfor sym := range syms {\n\t\t\tif provider, provided := providers[sym]; provided {\n\t\t\t\tenables[provider] = append(enables[provider], stmt)\n\t\t\t\tinDeg[stmt]++\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ We place both \"result\" and \"queue\" at the head of our input array.\n\t\/\/ We know that the length of the queue and the length of the result\n\t\/\/ must sum up to less than or equal to the original list, so we can\n\t\/\/ safely use the original underlying array as storage for both. The\n\t\/\/ start of the queue will gradually move through the array just as\n\t\/\/ the result slice grows to include the elements it has vacated.\n\tresult := stmts[0:0]\n\tqueueStart := 0 \/\/ index into stmts underlying array\n\tqueue := stmts[queueStart:queueStart]\n\n\t\/\/ Seed the queue with statements that have no dependencies\n\tfor _, stmt := range stmts {\n\t\tif inDeg[stmt] == 0 {\n\t\t\tqueue = append(queue, stmt)\n\t\t}\n\t}\n\n\tfor len(queue) > 0 {\n\t\tstmt := queue[0]\n\n\t\t\/\/ Adjust the head of the queue to one element later in our array.\n\t\tqueueStart++\n\t\tqueue = stmts[queueStart : queueStart+(len(queue)-1)]\n\n\t\t\/\/ Adjust the result list to include the element that we just\n\t\t\/\/ removed from the queue.\n\t\tresult = stmts[:len(result)+1]\n\n\t\tfor _, enabled := range enables[stmt] {\n\t\t\tinDeg[enabled]--\n\t\t\tif inDeg[enabled] == 0 {\n\t\t\t\tqueue = append(queue, enabled)\n\t\t\t\tdelete(inDeg, enabled)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ When we reach this point, if there were no cycles then result already\n\t\/\/ equals stmts, but the list may have shrunk if there _were_ cycles and\n\t\/\/ so we need to do some adjusting.\n\tstmts = result\n\n\tif len(inDeg) > 0 {\n\t\t\/\/ Indicates that we have at least one cycle.\n\t\t\/\/ TODO: This error message isn't great; ideally we would provide\n\t\t\/\/ more context to help the user understand the reason for the\n\t\t\/\/ cycle, since it might be via multiple levels of indirection.\n\t\tranges := make([]source.Range, 0, len(inDeg))\n\t\tfor stmt := range inDeg {\n\t\t\tranges = append(ranges, stmt.s.sourceRange())\n\t\t}\n\n\t\tif len(ranges) == 1 {\n\t\t\tdiags = append(diags, source.Diag{\n\t\t\t\tLevel: source.Error,\n\t\t\t\tSummary: \"Self-referential symbol definition\",\n\t\t\t\tDetail: \"Definition statement depends (possibly indirectly) on its own result.\",\n\t\t\t\tRanges: ranges,\n\t\t\t})\n\t\t} else {\n\t\t\tdiags = append(diags, source.Diag{\n\t\t\t\tLevel: source.Error,\n\t\t\t\tSummary: \"Self-referential symbol definitions\",\n\t\t\t\tDetail: \"Definition statements depend (possibly indirectly) on their own results.\",\n\t\t\t\tRanges: ranges,\n\t\t\t})\n\t\t}\n\t}\n\n\t\/\/ Don't permit any future modifications to the scope, since we're now\n\t\/\/ depending on its contents.\n\tscope.final = true\n\n\treturn StmtBlock{\n\t\tscope: scope,\n\t\tstmts: stmts,\n\t}, diags\n}\n\nfunc (cb StmtBlock) RequiredSymbols(scope *Scope) SymbolSet {\n\tret := NewSymbolSet()\n\tfor _, stmt := range cb.stmts {\n\t\treqd := stmt.RequiredSymbols(scope)\n\t\tfor sym := range reqd {\n\t\t\tret.Add(sym)\n\t\t}\n\t}\n\treturn ret\n}\n\nfunc (sb StmtBlock) PackagesImported() []PackageRef {\n\treturn sb.PackagesImportedAppend(nil)\n}\n\nfunc (sb StmtBlock) PackagesImportedAppend(ppaths []PackageRef) []PackageRef {\n\tfor _, stmt := range sb.stmts {\n\t\tif imp, isImp := stmt.s.(*importStmt); isImp {\n\t\t\tppaths = append(ppaths, PackageRef{\n\t\t\t\tPath: imp.ppath,\n\t\t\t\tRange: imp.sourceRange(),\n\t\t\t})\n\t\t}\n\t}\n\treturn ppaths\n}\n\ntype PackageRef struct {\n\tPath string\n\tRange source.Range\n}\n\ntype StmtBlockAttr struct {\n\tSymbol *Symbol\n\tType cbty.Type\n\tRequired bool\n\tDefRange source.Range\n}\n\n\/\/ Attributes returns a description of the attributes defined by the block.\n\/\/\n\/\/ When executing the block, values for some or all of these (depending on\n\/\/ their Required status) should be provided in the StmtBlockExecute\n\/\/ instance.\n\/\/\n\/\/ The given context is used to resolve the type or default value expressions\n\/\/ in the attribute statements. The given context must therefore be the same\n\/\/ context that would ultimately be provided to Execute in the StmtBlockExecute\n\/\/ object or else the result may be incorrect.\nfunc (sb StmtBlock) Attributes(ctx *Context) (map[string]StmtBlockAttr, source.Diags) {\n\tvar diags source.Diags\n\tret := map[string]StmtBlockAttr{}\n\tfor _, stmt := range sb.stmts {\n\t\tif attr, isAttr := stmt.s.(*attrStmt); isAttr {\n\t\t\tname := attr.sym.DeclaredName()\n\t\t\tdef := StmtBlockAttr{\n\t\t\t\tSymbol: attr.sym,\n\t\t\t}\n\n\t\t\tswitch {\n\t\t\tcase attr.valueType != NilExpr:\n\t\t\t\ttyVal, exprDiags := attr.valueType.Value(ctx)\n\t\t\t\tdiags = append(diags, exprDiags...)\n\n\t\t\t\tif tyVal.Type() != cbty.TypeType {\n\t\t\t\t\tdiags = append(diags, source.Diag{\n\t\t\t\t\t\tLevel: source.Error,\n\t\t\t\t\t\tSummary: \"Invalid attribute type\",\n\t\t\t\t\t\tDetail: fmt.Sprintf(\"Expected a type, but given a value of type %s. To assign a default value, use the '=' (equals) symbol.\", tyVal.Type().Name()),\n\t\t\t\t\t\tRanges: attr.valueType.sourceRange().List(),\n\t\t\t\t\t})\n\t\t\t\t\tdef.Type = cbty.PlaceholderVal.Type()\n\t\t\t\t\tdef.Required = true\n\t\t\t\t} else {\n\t\t\t\t\tdef.Type = tyVal.UnwrapType()\n\t\t\t\t\tdef.Required = true\n\t\t\t\t}\n\t\t\tcase attr.defValue != NilExpr:\n\t\t\t\tval, exprDiags := attr.defValue.Value(ctx)\n\t\t\t\tdiags = append(diags, exprDiags...)\n\n\t\t\t\tdef.Type = val.Type()\n\t\t\t\tdef.Required = false\n\t\t\tdefault:\n\t\t\t\t\/\/ should never happen\n\t\t\t\tpanic(\"attrStmt with neither value type nor default value\")\n\t\t\t}\n\n\t\t\tret[name] = def\n\t\t}\n\t}\n\treturn ret, diags\n}\n\n\/\/ ImplicitExports returns a SymbolSet of the symbols defined in the block's\n\/\/ scope that are eligible to be included in an implicit export object.\n\/\/\n\/\/ This includes most definitions, but specifically excludes imports as they are\n\/\/ assumed to be for internal use and could be requested directly by any caller.\n\/\/\n\/\/ This method is intended for creating an implicit export object for a module,\n\/\/ and so it will likely not produce a useful or sensible result for blocks\n\/\/ created in other contexts.\nfunc (sb StmtBlock) ImplicitExports() SymbolSet {\n\tvar ret SymbolSet\n\tfor _, stmt := range sb.stmts {\n\t\tif _, isImport := stmt.s.(*importStmt); isImport {\n\t\t\tcontinue\n\t\t}\n\t\tsym := stmt.s.definedSymbol()\n\t\tif sym != nil {\n\t\t\tif ret == nil {\n\t\t\t\tret = SymbolSet{}\n\t\t\t}\n\t\t\tret.Add(sym)\n\t\t}\n\t}\n\treturn ret\n}\n\nfunc (sb StmtBlock) Execute(exec StmtBlockExecute) (*StmtBlockResult, source.Diags) {\n\t\/\/ Make a new child context to work in. (We get \"exec\" by value here, so\n\t\/\/ we can mutate it without upsetting the caller.)\n\texec.Context = exec.Context.NewChild()\n\n\tresult := StmtBlockResult{}\n\tvar diags source.Diags\n\n\tresult.Scope = sb.scope\n\tresult.Context = exec.Context\n\n\tfor _, stmt := range sb.stmts {\n\t\tstmtDiags := stmt.s.execute(&exec, &result)\n\t\tdiags = append(diags, stmtDiags...)\n\t}\n\n\tresult.Context.final = true \/\/ no more modifications allowed\n\n\treturn &result, diags\n}\n\ntype StmtBlockExecute struct {\n\tContext *Context\n\tPackages map[string]cbty.Value\n\tAttrs map[*Symbol]cbty.Value\n}\n\ntype StmtBlockResult struct {\n\t\/\/ Context is the context that was created to evaluate the block.\n\t\/\/ It is provided only so that NewChild may be called on it for child\n\t\/\/ blocks; it should not be modified once returned.\n\tContext *Context\n\n\t\/\/ Scope is the scope that was created for the block during its\n\t\/\/ compilation. This object is shared between all executions of the same\n\t\/\/ block, and so should not be modified.\n\tScope *Scope\n\n\t\/\/ ExportValue is the value exported by an \"export\" statement, if any.\n\tExportValue cbty.Value\n}\n<commit_msg>eval: StmtBlock.AttributeNames<commit_after>package eval\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/cirbo-lang\/cirbo\/cbty\"\n\t\"github.com\/cirbo-lang\/cirbo\/source\"\n)\n\ntype StmtBlock struct {\n\tscope *Scope\n\tstmts []Stmt\n}\n\n\/\/ NewStmtBlock constructs and returns a new statement block containing the\n\/\/ given statements, which are assumed to be populating the given scope.\n\/\/\n\/\/ The caller must not read or write the given statements slice after it has\n\/\/ been passed to NewStmtBlock. Ownership is transferred to the returned\n\/\/ object and the slice's backing array may be modified in unspecified ways.\nfunc MakeStmtBlock(scope *Scope, stmts []Stmt) (StmtBlock, source.Diags) {\n\tvar diags source.Diags\n\n\tproviders := make(map[*Symbol]Stmt, len(stmts))\n\tenables := make(map[Stmt][]Stmt, len(stmts)) \/\/ slice so that we preserve input ordering when ordering is ambiguous\n\tinDeg := make(map[Stmt]int, len(stmts))\n\tfor _, stmt := range stmts {\n\t\tif sym := stmt.s.definedSymbol(); sym != nil {\n\t\t\tproviders[sym] = stmt\n\t\t}\n\t}\n\tfor _, stmt := range stmts {\n\t\tsyms := stmt.s.requiredSymbols(scope)\n\t\tfor sym := range syms {\n\t\t\tif provider, provided := providers[sym]; provided {\n\t\t\t\tenables[provider] = append(enables[provider], stmt)\n\t\t\t\tinDeg[stmt]++\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ We place both \"result\" and \"queue\" at the head of our input array.\n\t\/\/ We know that the length of the queue and the length of the result\n\t\/\/ must sum up to less than or equal to the original list, so we can\n\t\/\/ safely use the original underlying array as storage for both. The\n\t\/\/ start of the queue will gradually move through the array just as\n\t\/\/ the result slice grows to include the elements it has vacated.\n\tresult := stmts[0:0]\n\tqueueStart := 0 \/\/ index into stmts underlying array\n\tqueue := stmts[queueStart:queueStart]\n\n\t\/\/ Seed the queue with statements that have no dependencies\n\tfor _, stmt := range stmts {\n\t\tif inDeg[stmt] == 0 {\n\t\t\tqueue = append(queue, stmt)\n\t\t}\n\t}\n\n\tfor len(queue) > 0 {\n\t\tstmt := queue[0]\n\n\t\t\/\/ Adjust the head of the queue to one element later in our array.\n\t\tqueueStart++\n\t\tqueue = stmts[queueStart : queueStart+(len(queue)-1)]\n\n\t\t\/\/ Adjust the result list to include the element that we just\n\t\t\/\/ removed from the queue.\n\t\tresult = stmts[:len(result)+1]\n\n\t\tfor _, enabled := range enables[stmt] {\n\t\t\tinDeg[enabled]--\n\t\t\tif inDeg[enabled] == 0 {\n\t\t\t\tqueue = append(queue, enabled)\n\t\t\t\tdelete(inDeg, enabled)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ When we reach this point, if there were no cycles then result already\n\t\/\/ equals stmts, but the list may have shrunk if there _were_ cycles and\n\t\/\/ so we need to do some adjusting.\n\tstmts = result\n\n\tif len(inDeg) > 0 {\n\t\t\/\/ Indicates that we have at least one cycle.\n\t\t\/\/ TODO: This error message isn't great; ideally we would provide\n\t\t\/\/ more context to help the user understand the reason for the\n\t\t\/\/ cycle, since it might be via multiple levels of indirection.\n\t\tranges := make([]source.Range, 0, len(inDeg))\n\t\tfor stmt := range inDeg {\n\t\t\tranges = append(ranges, stmt.s.sourceRange())\n\t\t}\n\n\t\tif len(ranges) == 1 {\n\t\t\tdiags = append(diags, source.Diag{\n\t\t\t\tLevel: source.Error,\n\t\t\t\tSummary: \"Self-referential symbol definition\",\n\t\t\t\tDetail: \"Definition statement depends (possibly indirectly) on its own result.\",\n\t\t\t\tRanges: ranges,\n\t\t\t})\n\t\t} else {\n\t\t\tdiags = append(diags, source.Diag{\n\t\t\t\tLevel: source.Error,\n\t\t\t\tSummary: \"Self-referential symbol definitions\",\n\t\t\t\tDetail: \"Definition statements depend (possibly indirectly) on their own results.\",\n\t\t\t\tRanges: ranges,\n\t\t\t})\n\t\t}\n\t}\n\n\t\/\/ Don't permit any future modifications to the scope, since we're now\n\t\/\/ depending on its contents.\n\tscope.final = true\n\n\treturn StmtBlock{\n\t\tscope: scope,\n\t\tstmts: stmts,\n\t}, diags\n}\n\nfunc (cb StmtBlock) RequiredSymbols(scope *Scope) SymbolSet {\n\tret := NewSymbolSet()\n\tfor _, stmt := range cb.stmts {\n\t\treqd := stmt.RequiredSymbols(scope)\n\t\tfor sym := range reqd {\n\t\t\tret.Add(sym)\n\t\t}\n\t}\n\treturn ret\n}\n\nfunc (sb StmtBlock) PackagesImported() []PackageRef {\n\treturn sb.PackagesImportedAppend(nil)\n}\n\nfunc (sb StmtBlock) PackagesImportedAppend(ppaths []PackageRef) []PackageRef {\n\tfor _, stmt := range sb.stmts {\n\t\tif imp, isImp := stmt.s.(*importStmt); isImp {\n\t\t\tppaths = append(ppaths, PackageRef{\n\t\t\t\tPath: imp.ppath,\n\t\t\t\tRange: imp.sourceRange(),\n\t\t\t})\n\t\t}\n\t}\n\treturn ppaths\n}\n\ntype PackageRef struct {\n\tPath string\n\tRange source.Range\n}\n\ntype StmtBlockAttr struct {\n\tSymbol *Symbol\n\tType cbty.Type\n\tRequired bool\n\tDefRange source.Range\n}\n\n\/\/ AttributeNames returns a map of the attribute names required by the block,\n\/\/ which can be resolved without needing a context. This can be used for\n\/\/ early validation, though Attributes should be called with a context to\n\/\/ get the full description of the attributes.\nfunc (sb StmtBlock) AttributeNames() map[string]*Symbol {\n\tret := map[string]*Symbol{}\n\tfor _, stmt := range sb.stmts {\n\t\tif attr, isAttr := stmt.s.(*attrStmt); isAttr {\n\t\t\tret[attr.sym.DeclaredName()] = attr.sym\n\t\t}\n\t}\n\treturn ret\n}\n\n\/\/ Attributes returns a description of the attributes defined by the block.\n\/\/\n\/\/ When executing the block, values for some or all of these (depending on\n\/\/ their Required status) should be provided in the StmtBlockExecute\n\/\/ instance.\n\/\/\n\/\/ The given context is used to resolve the type or default value expressions\n\/\/ in the attribute statements. The given context must therefore be the same\n\/\/ context that would ultimately be provided to Execute in the StmtBlockExecute\n\/\/ object or else the result may be incorrect.\nfunc (sb StmtBlock) Attributes(ctx *Context) (map[string]StmtBlockAttr, source.Diags) {\n\tvar diags source.Diags\n\tret := map[string]StmtBlockAttr{}\n\tfor _, stmt := range sb.stmts {\n\t\tif attr, isAttr := stmt.s.(*attrStmt); isAttr {\n\t\t\tname := attr.sym.DeclaredName()\n\t\t\tdef := StmtBlockAttr{\n\t\t\t\tSymbol: attr.sym,\n\t\t\t}\n\n\t\t\tswitch {\n\t\t\tcase attr.valueType != NilExpr:\n\t\t\t\ttyVal, exprDiags := attr.valueType.Value(ctx)\n\t\t\t\tdiags = append(diags, exprDiags...)\n\n\t\t\t\tif tyVal.Type() != cbty.TypeType {\n\t\t\t\t\tdiags = append(diags, source.Diag{\n\t\t\t\t\t\tLevel: source.Error,\n\t\t\t\t\t\tSummary: \"Invalid attribute type\",\n\t\t\t\t\t\tDetail: fmt.Sprintf(\"Expected a type, but given a value of type %s. To assign a default value, use the '=' (equals) symbol.\", tyVal.Type().Name()),\n\t\t\t\t\t\tRanges: attr.valueType.sourceRange().List(),\n\t\t\t\t\t})\n\t\t\t\t\tdef.Type = cbty.PlaceholderVal.Type()\n\t\t\t\t\tdef.Required = true\n\t\t\t\t} else {\n\t\t\t\t\tdef.Type = tyVal.UnwrapType()\n\t\t\t\t\tdef.Required = true\n\t\t\t\t}\n\t\t\tcase attr.defValue != NilExpr:\n\t\t\t\tval, exprDiags := attr.defValue.Value(ctx)\n\t\t\t\tdiags = append(diags, exprDiags...)\n\n\t\t\t\tdef.Type = val.Type()\n\t\t\t\tdef.Required = false\n\t\t\tdefault:\n\t\t\t\t\/\/ should never happen\n\t\t\t\tpanic(\"attrStmt with neither value type nor default value\")\n\t\t\t}\n\n\t\t\tret[name] = def\n\t\t}\n\t}\n\treturn ret, diags\n}\n\n\/\/ ImplicitExports returns a SymbolSet of the symbols defined in the block's\n\/\/ scope that are eligible to be included in an implicit export object.\n\/\/\n\/\/ This includes most definitions, but specifically excludes imports as they are\n\/\/ assumed to be for internal use and could be requested directly by any caller.\n\/\/\n\/\/ This method is intended for creating an implicit export object for a module,\n\/\/ and so it will likely not produce a useful or sensible result for blocks\n\/\/ created in other contexts.\nfunc (sb StmtBlock) ImplicitExports() SymbolSet {\n\tvar ret SymbolSet\n\tfor _, stmt := range sb.stmts {\n\t\tif _, isImport := stmt.s.(*importStmt); isImport {\n\t\t\tcontinue\n\t\t}\n\t\tsym := stmt.s.definedSymbol()\n\t\tif sym != nil {\n\t\t\tif ret == nil {\n\t\t\t\tret = SymbolSet{}\n\t\t\t}\n\t\t\tret.Add(sym)\n\t\t}\n\t}\n\treturn ret\n}\n\nfunc (sb StmtBlock) Execute(exec StmtBlockExecute) (*StmtBlockResult, source.Diags) {\n\t\/\/ Make a new child context to work in. (We get \"exec\" by value here, so\n\t\/\/ we can mutate it without upsetting the caller.)\n\texec.Context = exec.Context.NewChild()\n\n\tresult := StmtBlockResult{}\n\tvar diags source.Diags\n\n\tresult.Scope = sb.scope\n\tresult.Context = exec.Context\n\n\tfor _, stmt := range sb.stmts {\n\t\tstmtDiags := stmt.s.execute(&exec, &result)\n\t\tdiags = append(diags, stmtDiags...)\n\t}\n\n\tresult.Context.final = true \/\/ no more modifications allowed\n\n\treturn &result, diags\n}\n\ntype StmtBlockExecute struct {\n\tContext *Context\n\tPackages map[string]cbty.Value\n\tAttrs map[*Symbol]cbty.Value\n}\n\ntype StmtBlockResult struct {\n\t\/\/ Context is the context that was created to evaluate the block.\n\t\/\/ It is provided only so that NewChild may be called on it for child\n\t\/\/ blocks; it should not be modified once returned.\n\tContext *Context\n\n\t\/\/ Scope is the scope that was created for the block during its\n\t\/\/ compilation. This object is shared between all executions of the same\n\t\/\/ block, and so should not be modified.\n\tScope *Scope\n\n\t\/\/ ExportValue is the value exported by an \"export\" statement, if any.\n\tExportValue cbty.Value\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2014-2015 by Michael Dvorkin. All Rights Reserved.\n\/\/ Use of this source code is governed by a MIT-style license that can\n\/\/ be found in the LICENSE file.\n\npackage donna\n\nfunc (e *Evaluation) analyzeSafety() {\n\tvar score Score\n\tvar cover, safety Total\n\toppositeBishops := e.oppositeBishops()\n\n\tif engine.trace {\n\t\tdefer func() {\n\t\t\tvar his, her Score\n\t\t\te.checkpoint(`+King`, Total{*his.add(cover.white).add(safety.white), *her.add(cover.black).add(safety.black)})\n\t\t\te.checkpoint(`-Cover`, cover)\n\t\t\te.checkpoint(`-Safety`, safety)\n\t\t}()\n\t}\n\n\t\/\/ If any of the pawns or a king have moved then recalculate cover score.\n\tif e.position.king[White] != e.pawns.king[White] {\n\t\te.pawns.cover[White] = e.kingCover(White)\n\t\te.pawns.king[White] = e.position.king[White]\n\t}\n\tif e.position.king[Black] != e.pawns.king[Black] {\n\t\te.pawns.cover[Black] = e.kingCover(Black)\n\t\te.pawns.king[Black] = e.position.king[Black]\n\t}\n\n\t\/\/ Fetch king cover score from the pawn cache.\n\tcover.white.add(e.pawns.cover[White])\n\tcover.black.add(e.pawns.cover[Black])\n\n\t\/\/ Compute king's safety for both sides.\n\tif e.safety[White].threats > 0 {\n\t\tsafety.white = e.kingSafety(White)\n\t}\n\tif e.safety[Black].threats > 0 {\n\t\tsafety.black = e.kingSafety(Black)\n\t}\n\n\t\/\/ Less safe with opposite bishops.\n\tif oppositeBishops && e.isKingUnsafe(White) && safety.white.midgame < -onePawn \/ 10 * 8 {\n\t\tsafety.white.midgame -= bishopDanger.midgame\n\t}\n\tif oppositeBishops && e.isKingUnsafe(Black) && safety.black.midgame < -onePawn \/ 10 * 8 {\n\t\tsafety.black.midgame -= bishopDanger.midgame\n\t}\n\n\t\/\/ Calculate total king safety and pawn cover score.\n\tscore.add(safety.white).sub(safety.black).apply(weightSafety)\n\tscore.add(cover.white).sub(cover.black)\n\te.score.add(score)\n}\n\nfunc (e *Evaluation) kingSafety(color uint8) (score Score) {\n\tp, rival := e.position, color^1\n\tsafetyIndex, square := 0, int(p.king[color])\n\n\t\/\/ Find squares around the king that are being attacked by the\n\t\/\/ enemy and defended by our king only.\n\tdefended := e.attacks[pawn(color)] | e.attacks[knight(color)] |\n\t e.attacks[bishop(color)] | e.attacks[rook(color)] |\n\t e.attacks[queen(color)]\n\tweak := e.attacks[king(color)] & e.attacks[rival] & ^defended\n\n\t\/\/ Find possible queen checks on weak squares around the king.\n\t\/\/ We only consider squares where the queen is protected and\n\t\/\/ can't be captured by the king.\n\tprotected := e.attacks[pawn(rival)] | e.attacks[knight(rival)] |\n\t\t e.attacks[bishop(rival)] | e.attacks[rook(rival)] |\n\t\t e.attacks[king(rival)]\n\tchecks := weak & e.attacks[queen(rival)] & protected & ^p.outposts[rival]\n\tif checks.any() {\n\t\tsafetyIndex += queenCheck * checks.count()\n\t}\n\n\t\/\/ Out of all squares available for enemy pieces select the ones\n\t\/\/ that are not under our attack.\n\tsafe := ^(e.attacks[color] | p.outposts[rival])\n\n\t\/\/ Are there any safe squares from where enemy Knight could give\n\t\/\/ us a check?\n\tif checks := knightMoves[square] & safe & e.attacks[knight(rival)]; checks.any() {\n\t\tsafetyIndex += checks.count()\n\t}\n\n\t\/\/ Are there any safe squares from where enemy Bishop could give us a check?\n\tsafeBishopMoves := p.bishopMoves(square) & safe\n\tif checks := safeBishopMoves & e.attacks[bishop(rival)]; checks.any() {\n\t\tsafetyIndex += checks.count()\n\t}\n\n\t\/\/ Are there any safe squares from where enemy Rook could give us a check?\n\tsafeRookMoves := p.rookMoves(square) & safe\n\tif checks := safeRookMoves & e.attacks[rook(rival)]; checks.any() {\n\t\tsafetyIndex += checks.count()\n\t}\n\n\t\/\/ Are there any safe squares from where enemy Queen could give us a check?\n\tif checks := (safeBishopMoves | safeRookMoves) & e.attacks[queen(rival)]; checks.any() {\n\t\tsafetyIndex += queenCheck \/ 2 * checks.count()\n\t}\n\n\tthreatIndex := min(16, e.safety[color].attackers * e.safety[color].threats \/ 2) +\n\t\t\t(e.safety[color].attacks + weak.count()) * 3 +\n\t\t\trank(color, square) - e.pawns.cover[color].midgame \/ 16\n\tsafetyIndex = min(63, max(0, safetyIndex + threatIndex))\n\n\tscore.midgame -= kingSafety[safetyIndex]\n\n\treturn\n}\n\nfunc (e *Evaluation) kingCover(color uint8) (bonus Score) {\n\tp, square := e.position, int(e.position.king[color])\n\n\t\/\/ Don't bother with the cover if the king is too far out.\n\tif rank(color, square) <= A3H3 {\n\t\t\/\/ If we still have castle rights encourage castle pawns to stay intact\n\t\t\/\/ by scoring least safe castle.\n\t\tbonus.midgame = e.kingCoverBonus(color, square)\n\t\tif p.castles & castleKingside[color] != 0 {\n\t\t\tbonus.midgame = max(bonus.midgame, e.kingCoverBonus(color, homeKing[color] + 2))\n\t\t}\n\t\tif p.castles & castleQueenside[color] != 0 {\n\t\t\tbonus.midgame = max(bonus.midgame, e.kingCoverBonus(color, homeKing[color] - 2))\n\t\t}\n\t}\n\n\tbonus.endgame = e.kingPawnProximity(color, square)\n\n\treturn\n}\n\nfunc (e *Evaluation) kingCoverBonus(color uint8, square int) (bonus int) {\n\tbonus = onePawn + onePawn \/ 3\n\n\t\/\/ Get pawns adjacent to and in front of the king.\n\trow, col := coordinate(square)\n\tarea := maskRank[row] | maskPassed[color][square]\n\tcover := e.position.outposts[pawn(color)] & area\n\tstorm := e.position.outposts[pawn(color^1)] & area\n\n\t\/\/ For each of the cover files find the closest friendly pawn. The penalty\n\t\/\/ is carried if the pawn is missing or is too far from the king.\n\tfrom, to := max(B1, col) - 1, min(G1, col) + 1\n\tfor c := from; c <= to; c++ {\n\n\t\t\/\/ Friendly pawns protecting the kings.\n\t\tclosest := 0\n\t\tif pawns := (cover & maskFile[c]); pawns.any() {\n\t\t\tclosest = rank(color, pawns.closest(color))\n\t\t}\n\t\tbonus -= penaltyCover[closest]\n\n\t\t\/\/ Enemy pawns facing the king.\n\t\tif pawns := (storm & maskFile[c]); pawns.any() {\n\t\t\tfarthest := rank(color, pawns.farthest(color^1))\n\t\t\tif closest == 0 { \/\/ No opposing friendly pawn.\n\t\t\t\tbonus -= penaltyStorm[farthest]\n\t\t\t} else if farthest == closest + 1 {\n\t\t\t\tbonus -= penaltyStormBlocked[farthest]\n\t\t\t} else {\n\t\t\t\tbonus -= penaltyStormUnblocked[farthest]\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ Calculates endgame penalty to encourage a king stay closer to friendly pawns.\nfunc (e *Evaluation) kingPawnProximity(color uint8, square int) (penalty int) {\n\tif pawns := e.position.outposts[pawn(color)]; pawns.any() && (pawns & e.attacks[king(color)]).empty() {\n\t\tproximity := 8\n\n\t\tfor pawns.any() {\n\t\t\tproximity = min(proximity, distance[square][pawns.pop()])\n\t\t}\n\n\t\tpenalty = -kingByPawn.endgame * (proximity - 1)\n\t}\n\n\treturn\n}\n\n<commit_msg>Tempo bonus for checks<commit_after>\/\/ Copyright (c) 2014-2015 by Michael Dvorkin. All Rights Reserved.\n\/\/ Use of this source code is governed by a MIT-style license that can\n\/\/ be found in the LICENSE file.\n\npackage donna\n\nfunc (e *Evaluation) analyzeSafety() {\n\tvar score Score\n\tvar cover, safety Total\n\n\tif engine.trace {\n\t\tdefer func() {\n\t\t\tvar our, their Score\n\t\t\te.checkpoint(`+King`, Total{*our.add(cover.white).add(safety.white), *their.add(cover.black).add(safety.black)})\n\t\t\te.checkpoint(`-Cover`, cover)\n\t\t\te.checkpoint(`-Safety`, safety)\n\t\t}()\n\t}\n\n\t\/\/ If any of the pawns or a king have moved then recalculate cover score.\n\tif e.position.king[White] != e.pawns.king[White] {\n\t\te.pawns.cover[White] = e.kingCover(White)\n\t\te.pawns.king[White] = e.position.king[White]\n\t}\n\tif e.position.king[Black] != e.pawns.king[Black] {\n\t\te.pawns.cover[Black] = e.kingCover(Black)\n\t\te.pawns.king[Black] = e.position.king[Black]\n\t}\n\n\t\/\/ Fetch king cover score from the pawn cache.\n\tcover.white.add(e.pawns.cover[White])\n\tcover.black.add(e.pawns.cover[Black])\n\n\t\/\/ Calculate king's safety for both sides.\n\tif e.safety[White].threats > 0 {\n\t\tsafety.white = e.kingSafety(White)\n\t}\n\tif e.safety[Black].threats > 0 {\n\t\tsafety.black = e.kingSafety(Black)\n\t}\n\n\t\/\/ Calculate total king safety and pawn cover score.\n\tscore.add(safety.white).sub(safety.black).apply(weightSafety)\n\tscore.add(cover.white).sub(cover.black)\n\te.score.add(score)\n}\n\nfunc (e *Evaluation) kingSafety(color uint8) (score Score) {\n\tp, rival := e.position, color^1\n\tsafetyIndex, checkers, square := 0, 0, int(p.king[color])\n\n\t\/\/ Find squares around the king that are being attacked by the\n\t\/\/ enemy and defended by our king only.\n\tdefended := e.attacks[pawn(color)] | e.attacks[knight(color)] |\n\t e.attacks[bishop(color)] | e.attacks[rook(color)] |\n\t e.attacks[queen(color)]\n\tweak := e.attacks[king(color)] & e.attacks[rival] & ^defended\n\n\t\/\/ Find possible queen checks on weak squares around the king.\n\t\/\/ We only consider squares where the queen is protected and\n\t\/\/ can't be captured by the king.\n\tprotected := e.attacks[pawn(rival)] | e.attacks[knight(rival)] |\n\t\t e.attacks[bishop(rival)] | e.attacks[rook(rival)] |\n\t\t e.attacks[king(rival)]\n\tchecks := weak & e.attacks[queen(rival)] & protected & ^p.outposts[rival]\n\tif checks.any() {\n\t\tcheckers++\n\t\tsafetyIndex += queenCheck * checks.count()\n\t}\n\n\t\/\/ Out of all squares available for enemy pieces select the ones\n\t\/\/ that are not under our attack.\n\tsafe := ^(e.attacks[color] | p.outposts[rival])\n\n\t\/\/ Are there any safe squares from where enemy Knight could give\n\t\/\/ us a check?\n\tif checks := knightMoves[square] & safe & e.attacks[knight(rival)]; checks.any() {\n\t\tcheckers++\n\t\tsafetyIndex += checks.count()\n\t}\n\n\t\/\/ Are there any safe squares from where enemy Bishop could give us a check?\n\tsafeBishopMoves := p.bishopMoves(square) & safe\n\tif checks := safeBishopMoves & e.attacks[bishop(rival)]; checks.any() {\n\t\tcheckers++\n\t\tsafetyIndex += checks.count()\n\t}\n\n\t\/\/ Are there any safe squares from where enemy Rook could give us a check?\n\tsafeRookMoves := p.rookMoves(square) & safe\n\tif checks := safeRookMoves & e.attacks[rook(rival)]; checks.any() {\n\t\tcheckers++\n\t\tsafetyIndex += checks.count()\n\t}\n\n\t\/\/ Are there any safe squares from where enemy Queen could give us a check?\n\tif checks := (safeBishopMoves | safeRookMoves) & e.attacks[queen(rival)]; checks.any() {\n\t\tcheckers++\n\t\tsafetyIndex += queenCheck \/ 2 * checks.count()\n\t}\n\n\tthreatIndex := min(16, e.safety[color].attackers * e.safety[color].threats \/ 2) +\n\t\t\t(e.safety[color].attacks + weak.count()) * 3 +\n\t\t\trank(color, square) - e.pawns.cover[color].midgame \/ 16\n\tsafetyIndex = min(63, max(0, safetyIndex + threatIndex))\n\n\tscore.midgame -= kingSafety[safetyIndex]\n\n\tif checkers > 0 {\n\t\tscore.add(rightToMove)\n\t\tif count > 1 {\n\t\t\tscore.add(rightToMove)\n\t\t}\n\t}\n\n\treturn score\n}\n\nfunc (e *Evaluation) kingCover(color uint8) (bonus Score) {\n\tp, square := e.position, int(e.position.king[color])\n\n\t\/\/ Don't bother with the cover if the king is too far out.\n\tif rank(color, square) <= A3H3 {\n\t\t\/\/ If we still have castle rights encourage castle pawns to stay intact\n\t\t\/\/ by scoring least safe castle.\n\t\tbonus.midgame = e.kingCoverBonus(color, square)\n\t\tif p.castles & castleKingside[color] != 0 {\n\t\t\tbonus.midgame = max(bonus.midgame, e.kingCoverBonus(color, homeKing[color] + 2))\n\t\t}\n\t\tif p.castles & castleQueenside[color] != 0 {\n\t\t\tbonus.midgame = max(bonus.midgame, e.kingCoverBonus(color, homeKing[color] - 2))\n\t\t}\n\t}\n\n\tbonus.endgame = e.kingPawnProximity(color, square)\n\n\treturn\n}\n\nfunc (e *Evaluation) kingCoverBonus(color uint8, square int) (bonus int) {\n\tbonus = onePawn + onePawn \/ 3\n\n\t\/\/ Get pawns adjacent to and in front of the king.\n\trow, col := coordinate(square)\n\tarea := maskRank[row] | maskPassed[color][square]\n\tcover := e.position.outposts[pawn(color)] & area\n\tstorm := e.position.outposts[pawn(color^1)] & area\n\n\t\/\/ For each of the cover files find the closest friendly pawn. The penalty\n\t\/\/ is carried if the pawn is missing or is too far from the king.\n\tfrom, to := max(B1, col) - 1, min(G1, col) + 1\n\tfor c := from; c <= to; c++ {\n\n\t\t\/\/ Friendly pawns protecting the kings.\n\t\tclosest := 0\n\t\tif pawns := (cover & maskFile[c]); pawns.any() {\n\t\t\tclosest = rank(color, pawns.closest(color))\n\t\t}\n\t\tbonus -= penaltyCover[closest]\n\n\t\t\/\/ Enemy pawns facing the king.\n\t\tif pawns := (storm & maskFile[c]); pawns.any() {\n\t\t\tfarthest := rank(color, pawns.farthest(color^1))\n\t\t\tif closest == 0 { \/\/ No opposing friendly pawn.\n\t\t\t\tbonus -= penaltyStorm[farthest]\n\t\t\t} else if farthest == closest + 1 {\n\t\t\t\tbonus -= penaltyStormBlocked[farthest]\n\t\t\t} else {\n\t\t\t\tbonus -= penaltyStormUnblocked[farthest]\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ Calculates endgame penalty to encourage a king stay closer to friendly pawns.\nfunc (e *Evaluation) kingPawnProximity(color uint8, square int) (penalty int) {\n\tif pawns := e.position.outposts[pawn(color)]; pawns.any() && (pawns & e.attacks[king(color)]).empty() {\n\t\tproximity := 8\n\n\t\tfor pawns.any() {\n\t\t\tproximity = min(proximity, distance[square][pawns.pop()])\n\t\t}\n\n\t\tpenalty = -kingByPawn.endgame * (proximity - 1)\n\t}\n\n\treturn\n}\n\n<|endoftext|>"} {"text":"<commit_before>\/\/ A set of utility functions for working with maps.\n\/\/ Generally, maps and slices of any kind will work, but performance\n\/\/ is optimized for maps returned by json.Unmarshal(b, &interface{}). If\n\/\/ all the maps are map[string]interface{}, and all the slices are\n\/\/ []interface{}, and all the rest of the values are primitives, then\n\/\/ reflection is avoided.\npackage maps\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/ansel1\/merry\"\n\t\"github.com\/elgs\/gosplitargs\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ return a slice of the keys in the map\nfunc Keys(m map[string]interface{}) (keys []string) {\n\tfor key := range m {\n\t\tkeys = append(keys, key)\n\t}\n\treturn keys\n}\n\n\/\/ return a new map, which is the deep merge of m1 and m2.\n\/\/ values in m2 override values in m1.\n\/\/ This recurses into nested slices and maps\n\/\/ Slices are merged simply by adding any m2 values which aren't\n\/\/ already in m1's slice. This won't do anything fancy with\n\/\/ slices that have duplicate values. Order is ignored. E.g.:\n\/\/\n\/\/ [5, 6, 7] + [5, 5, 5, 4] = [5, 6, 7, 4]\n\/\/\n\/\/ All values in the result will be normalized according to the following\n\/\/ rules:\n\/\/\n\/\/ 1. All maps with string keys will be converted into map[string]interface{}\n\/\/ 2. All slices will be converted to []interface{}\n\/\/ 3. All primitive numeric types will be converted into float64\n\/\/ 4. All other types will not be converted, and will not be merged. v2's value will just overwrite v1's\n\/\/\n\/\/ New copies of all maps and slices are made, so v1 and v2 will not be modified.\nfunc Merge(v1, v2 interface{}) interface{} {\n\tv1, _ = normalize(v1, true, false, true)\n\tv2, _ = normalize(v2, true, false, true)\n\tswitch t1 := v1.(type) {\n\tcase map[string]interface{}:\n\t\tif t2, isMap := v2.(map[string]interface{}); isMap {\n\t\t\tfor key, value := range t2 {\n\t\t\t\tt1[key] = Merge(t1[key], value)\n\t\t\t}\n\t\t\treturn t1\n\t\t}\n\tcase []interface{}:\n\t\tif t2, isSlice := v2.([]interface{}); isSlice {\n\t\t\torig := t1[:]\n\t\t\tfor _, value := range t2 {\n\t\t\t\tif !sliceContains(orig, value) {\n\t\t\t\t\tt1 = append(t1, value)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn t1\n\t\t}\n\t}\n\treturn v2\n}\n\nfunc sliceContains(s []interface{}, v interface{}) bool {\n\tswitch v.(type) {\n\tcase string, float64, bool, nil:\n\t\tfor _, value := range s {\n\t\t\tif value == v {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, value := range s {\n\t\tif reflect.DeepEqual(v, value) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Returns true if m1 contains all the key paths as m2, and\n\/\/ the values at those paths are the equal. I.E. returns\n\/\/ true if m2 is a subset of m1.\n\/\/ This will recurse into nested maps.\n\/\/ When comparing to slice values, it will return true if\n\/\/ slice 1 has at least one value which contains each of the\n\/\/ values in slice 2. It's kind of dumb though. If slice 1\n\/\/ contains a single value, say a big map, which contains *all*\n\/\/ the values in slice 2, then this will return true. In other words.\n\/\/ when a match in slice 1 is found, that item is *not* removed from\n\/\/ the search when matching the next value in slice 2.\n\/\/ Examples:\n\/\/\n\/\/ {\"color\":\"red\"} contains {}\n\/\/ {\"color\":\"red\"} contains {\"color\":\"red\"}\n\/\/ {\"color\":\"red\",\"flavor\":\"beef\"} contains {\"color\":\"red\"}\n\/\/ {\"labels\":{\"color\":\"red\",\"flavor\":\"beef\"}} contains {\"labels\":{\"flavor\":\"beef\"}}\n\/\/ {\"tags\":[\"red\",\"green\",\"blue\"]} contains {\"tags\":[\"red\",\"green\"]}\n\n\/\/ This is what I mean about slice containment being a little simplistic:\n\/\/\n\/\/ {\"resources\":[{\"type\":\"car\",\"color\":\"red\",\"wheels\":4}]} contains {\"resources\":[{\"type\":\"car\"},{\"color\",\"red\"},{\"wheels\":4}]}\n\/\/\n\/\/ That will return true, despite there being 3 items in contained slice and only one item in the containing slice. The\n\/\/ one item in the containing slice matches each of the items in the contained slice.\nfunc Contains(v1, v2 interface{}) bool {\n\tv1, _ = normalize(v1, false, false, false)\n\tv2, _ = normalize(v2, false, false, false)\n\n\tswitch t1 := v1.(type) {\n\tcase bool, nil, string, float64:\n\t\treturn v1 == v2\n\tcase map[string]interface{}:\n\t\tt2, isMap := v2.(map[string]interface{})\n\t\tif !isMap {\n\t\t\t\/\/ v1 is a map, but v2 isn't; v1 can't contain v2\n\t\t\treturn false\n\t\t}\n\t\tif len(t2) > len(t1) {\n\t\t\t\/\/ if t2 is bigger than t1, then t1 can't contain t2\n\t\t\treturn false\n\t\t}\n\t\tfor key, val2 := range t2 {\n\t\t\tval1, present := t1[key]\n\t\t\tif !present || !Contains(val1, val2) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase []interface{}:\n\t\tt2, isSlice := v2.([]interface{})\n\t\tif !isSlice {\n\t\t\t\/\/ v1 is a slice, but v2 isn't; v1 can't contain v2\n\t\t\treturn false\n\t\t}\n\t\t\/\/ first, normalize the values in v1, so we\n\t\t\/\/ don't re-normalize them in each loop\n\t\tt1copy := make([]interface{}, len(t1))\n\t\tfor i, value := range t1 {\n\t\t\tt1copy[i], _ = normalize(value, false, false, false)\n\t\t}\n\t\tfor _, val2 := range t2 {\n\t\t\tfound := false\n\t\tSearch:\n\t\t\tfor _, value := range t1copy {\n\t\t\t\tif Contains(value, val2) {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak Search\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\t\/\/ one of the values in v2 was not found in v1\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tdefault:\n\t\treturn reflect.DeepEqual(v1, v2)\n\t}\n}\n\n\/\/ returns true if trees share common key paths, but the values\n\/\/ at those paths are not equal.\n\/\/ i.e. if the two maps were merged, no values would be overwritten\n\/\/ conflicts == !contains(v1, v2) && !excludes(v1, v2)\n\/\/ conflicts == !contains(merge(v1, v2), v1)\nfunc Conflicts(m1, m2 map[string]interface{}) bool {\n\treturn !Contains(Merge(m1, m2), m1)\n}\n\nfunc normalize(v interface{}, makeCopies, doMarshaling, recurse bool) (v2 interface{}, err error) {\n\tv2 = v\n\tcopied := false\n\tswitch t := v.(type) {\n\tcase bool, string, nil, float64:\n\t\treturn\n\tcase int:\n\t\treturn float64(t), nil\n\tcase int8:\n\t\treturn float64(t), nil\n\tcase int16:\n\t\treturn float64(t), nil\n\tcase int32:\n\t\treturn float64(t), nil\n\tcase int64:\n\t\treturn float64(t), nil\n\tcase float32:\n\t\treturn float64(t), nil\n\tcase uint:\n\t\treturn float64(t), nil\n\tcase uint8:\n\t\treturn float64(t), nil\n\tcase uint16:\n\t\treturn float64(t), nil\n\tcase uint32:\n\t\treturn float64(t), nil\n\tcase uint64:\n\t\treturn float64(t), nil\n\tcase map[string]interface{}, []interface{}:\n\t\tif !makeCopies && !recurse {\n\t\t\treturn\n\t\t}\n\tdefault:\n\t\trv := reflect.ValueOf(v)\n\t\tif rv.Kind() == reflect.Map && rv.Type().Key().Kind() == reflect.String {\n\t\t\tcopied = true\n\t\t\tm := make(map[string]interface{}, rv.Len())\n\t\t\tfor _, v := range rv.MapKeys() {\n\t\t\t\tm[v.String()] = rv.MapIndex(v).Interface()\n\t\t\t}\n\t\t\tv2 = m\n\t\t} else if rv.Kind() == reflect.Slice {\n\t\t\tcopied = true\n\t\t\tl := rv.Len()\n\t\t\ts := make([]interface{}, l)\n\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\ts[i] = rv.Index(i).Interface()\n\t\t\t}\n\t\t\tv2 = s\n\t\t} else if doMarshaling {\n\t\t\t\/\/ marshal\/unmarshal\n\t\t\tvar b []byte\n\t\t\tb, err = json.Marshal(v)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tv2 = nil\n\t\t\terr = json.Unmarshal(b, &v2)\n\t\t\treturn\n\t\t}\n\t}\n\tif recurse || (makeCopies && !copied) {\n\t\tswitch t := v2.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tvar m map[string]interface{}\n\t\t\tif makeCopies && !copied {\n\t\t\t\tm = make(map[string]interface{}, len(t))\n\t\t\t} else {\n\t\t\t\t\/\/ modify in place\n\t\t\t\tm = t\n\t\t\t}\n\t\t\tv2 = m\n\t\t\tfor key, value := range t {\n\t\t\t\tif recurse {\n\t\t\t\t\tif value, err = normalize(value, makeCopies, doMarshaling, recurse); err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tm[key] = value\n\t\t\t}\n\t\tcase []interface{}:\n\t\t\tvar s []interface{}\n\t\t\tif makeCopies && !copied {\n\t\t\t\ts = make([]interface{}, len(t))\n\t\t\t} else {\n\t\t\t\t\/\/ modify in place\n\t\t\t\ts = t\n\t\t\t}\n\t\t\tv2 = s\n\t\t\tfor i := 0; i < len(t); i++ {\n\t\t\t\tif recurse {\n\t\t\t\t\tif s[i], err = normalize(t[i], makeCopies, doMarshaling, recurse); err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\ts[i] = t[i]\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(\"Should be either a map or slice by now\")\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ Recursively converts v1 into a tree of maps, slices, and primitives.\n\/\/ The types in the result will be the types the json package uses for unmarshalling\n\/\/ into interface{}. The rules are:\n\/\/\n\/\/ 1. All maps with string keys will be converted into map[string]interface{}\n\/\/ 2. All slices will be converted to []interface{}\n\/\/ 3. All primitive numeric types will be converted into float64\n\/\/ 4. string, bool, and nil are unmodified\n\/\/ 5. All other values will be converted into the above types by doing a json.Marshal and Unmarshal\n\/\/\n\/\/ Values in v1 will be modified in place if possible\nfunc Normalize(v1 interface{}) (interface{}, error) {\n\treturn normalize(v1, false, true, true)\n}\n\nvar PathNotFoundError = merry.New(\"Path not found\")\nvar PathNotMapError = merry.New(\"Path not map\")\nvar PathNotSliceError = merry.New(\"Path not slice\")\nvar IndexOutOfBoundsError = merry.New(\"Index out of bounds\")\n\n\/\/ Extracts the value at path from v.\n\/\/ Path is in the form:\n\/\/\n\/\/ response.things[2].color.red\n\/\/\n\/\/ You can use `merry` to test the types of return errors:\n\/\/\n\/\/ _, err := maps.Get(\"\",\"\")\n\/\/ if merry.Is(err, maps.PathNotFoundError) {\n\/\/ ...\n\/\/\n\/\/ `v` can be any primitive, map (must be keyed by string, but any value type), or slice, nested arbitrarily deep\nfunc Get(v interface{}, path string) (interface{}, error) {\n\tparts, err := gosplitargs.SplitArgs(path, \"\\\\.\", false)\n\tif err != nil {\n\t\treturn nil, merry.Prepend(err, \"Couldn't parse the path\")\n\t}\n\tout := v\n\tfor i := 0; i < len(parts); i++ {\n\t\tpart := strings.TrimSpace(parts[i])\n\t\tif len(part) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tsliceIdx := -1\n\t\t\/\/ first check of the path part ends in an array index, like\n\t\t\/\/\n\t\t\/\/ tags[2]\n\t\t\/\/\n\t\t\/\/ Extract the \"2\", and truncate the part to \"tags\"\n\t\tif bracketIdx := strings.Index(part, \"[\"); bracketIdx > -1 && strings.HasSuffix(part, \"]\") {\n\t\t\tif idx, err := strconv.Atoi(part[bracketIdx+1 : len(part)-1]); err == nil {\n\t\t\t\tsliceIdx = idx\n\t\t\t\tpart = part[0:bracketIdx]\n\t\t\t}\n\t\t}\n\n\t\tif part = strings.TrimSpace(part); len(part) > 0 {\n\t\t\t\/\/ map key\n\t\t\tout, _ = normalize(out, false, false, false)\n\t\t\tif m, ok := out.(map[string]interface{}); ok {\n\t\t\t\tvar present bool\n\t\t\t\tif out, present = m[part]; !present {\n\t\t\t\t\treturn nil, PathNotFoundError.WithMessagef(\"%s not found\", strings.Join(parts[0:i+1], \".\"))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terrPath := strings.Join(parts[0:i], \".\")\n\t\t\t\tif len(errPath) == 0 {\n\t\t\t\t\terrPath = \"v\"\n\t\t\t\t}\n\t\t\t\treturn nil, PathNotMapError.WithMessagef(\"%s is not a map\", errPath)\n\t\t\t}\n\t\t}\n\t\tif sliceIdx > -1 {\n\t\t\t\/\/ slice index\n\t\t\tout, _ = normalize(out, false, false, false)\n\t\t\tif s, ok := out.([]interface{}); ok {\n\t\t\t\tif l := len(s); l <= sliceIdx {\n\t\t\t\t\treturn nil, IndexOutOfBoundsError.WithMessagef(\"Index out of bounds at %s (len = %v)\", strings.Join(parts[0:i+1], \".\"), l)\n\t\t\t\t} else {\n\t\t\t\t\tout = s[sliceIdx]\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terrPath := strings.Join(append(parts[0:i], part), \".\")\n\t\t\t\tif len(errPath) == 0 {\n\t\t\t\t\terrPath = \"v\"\n\t\t\t\t}\n\t\t\t\treturn nil, PathNotSliceError.WithMessagef(\"%s is not a slice\", errPath)\n\t\t\t}\n\t\t}\n\t}\n\treturn out, nil\n}\n\n\/\/ returns true if v is:\n\/\/\n\/\/ 1. nil\n\/\/ 2. an empty string\n\/\/ 3. an empty slice\n\/\/ 4. an empty map\n\/\/ 5. an empty array\n\/\/ 6. an empty channel\n\/\/\n\/\/ returns false otherwise\nfunc Empty(v interface{}) bool {\n\t\/\/ no op. just means the value wasn't a type that supports Len()\n\tdefer func() { recover() }()\n\tswitch t := v.(type) {\n\tcase bool, int, int8, int16, int32, int64, float32, float64, uint, uint8, uint16, uint32, uint64:\n\t\treturn false\n\tcase nil:\n\t\treturn true\n\tcase string:\n\t\treturn len(strings.TrimSpace(t)) == 0\n\tcase map[string]interface{}:\n\t\treturn len(t) == 0\n\tcase []interface{}:\n\t\treturn len(t) == 0\n\tdefault:\n\t\trv := reflect.ValueOf(v)\n\t\tif rv.IsNil() {\n\t\t\t\/\/ handle case of (*Widget)(nil)\n\t\t\treturn true\n\t\t}\n\t\tif rv.Kind() == reflect.Ptr {\n\t\t\treturn Empty(rv.Elem())\n\t\t}\n\t\treturn reflect.ValueOf(v).Len() == 0\n\t}\n}\n<commit_msg>capture new stack for get errors<commit_after>\/\/ A set of utility functions for working with maps.\n\/\/ Generally, maps and slices of any kind will work, but performance\n\/\/ is optimized for maps returned by json.Unmarshal(b, &interface{}). If\n\/\/ all the maps are map[string]interface{}, and all the slices are\n\/\/ []interface{}, and all the rest of the values are primitives, then\n\/\/ reflection is avoided.\npackage maps\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/ansel1\/merry\"\n\t\"github.com\/elgs\/gosplitargs\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ return a slice of the keys in the map\nfunc Keys(m map[string]interface{}) (keys []string) {\n\tfor key := range m {\n\t\tkeys = append(keys, key)\n\t}\n\treturn keys\n}\n\n\/\/ return a new map, which is the deep merge of m1 and m2.\n\/\/ values in m2 override values in m1.\n\/\/ This recurses into nested slices and maps\n\/\/ Slices are merged simply by adding any m2 values which aren't\n\/\/ already in m1's slice. This won't do anything fancy with\n\/\/ slices that have duplicate values. Order is ignored. E.g.:\n\/\/\n\/\/ [5, 6, 7] + [5, 5, 5, 4] = [5, 6, 7, 4]\n\/\/\n\/\/ All values in the result will be normalized according to the following\n\/\/ rules:\n\/\/\n\/\/ 1. All maps with string keys will be converted into map[string]interface{}\n\/\/ 2. All slices will be converted to []interface{}\n\/\/ 3. All primitive numeric types will be converted into float64\n\/\/ 4. All other types will not be converted, and will not be merged. v2's value will just overwrite v1's\n\/\/\n\/\/ New copies of all maps and slices are made, so v1 and v2 will not be modified.\nfunc Merge(v1, v2 interface{}) interface{} {\n\tv1, _ = normalize(v1, true, false, true)\n\tv2, _ = normalize(v2, true, false, true)\n\tswitch t1 := v1.(type) {\n\tcase map[string]interface{}:\n\t\tif t2, isMap := v2.(map[string]interface{}); isMap {\n\t\t\tfor key, value := range t2 {\n\t\t\t\tt1[key] = Merge(t1[key], value)\n\t\t\t}\n\t\t\treturn t1\n\t\t}\n\tcase []interface{}:\n\t\tif t2, isSlice := v2.([]interface{}); isSlice {\n\t\t\torig := t1[:]\n\t\t\tfor _, value := range t2 {\n\t\t\t\tif !sliceContains(orig, value) {\n\t\t\t\t\tt1 = append(t1, value)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn t1\n\t\t}\n\t}\n\treturn v2\n}\n\nfunc sliceContains(s []interface{}, v interface{}) bool {\n\tswitch v.(type) {\n\tcase string, float64, bool, nil:\n\t\tfor _, value := range s {\n\t\t\tif value == v {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, value := range s {\n\t\tif reflect.DeepEqual(v, value) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Returns true if m1 contains all the key paths as m2, and\n\/\/ the values at those paths are the equal. I.E. returns\n\/\/ true if m2 is a subset of m1.\n\/\/ This will recurse into nested maps.\n\/\/ When comparing to slice values, it will return true if\n\/\/ slice 1 has at least one value which contains each of the\n\/\/ values in slice 2. It's kind of dumb though. If slice 1\n\/\/ contains a single value, say a big map, which contains *all*\n\/\/ the values in slice 2, then this will return true. In other words.\n\/\/ when a match in slice 1 is found, that item is *not* removed from\n\/\/ the search when matching the next value in slice 2.\n\/\/ Examples:\n\/\/\n\/\/ {\"color\":\"red\"} contains {}\n\/\/ {\"color\":\"red\"} contains {\"color\":\"red\"}\n\/\/ {\"color\":\"red\",\"flavor\":\"beef\"} contains {\"color\":\"red\"}\n\/\/ {\"labels\":{\"color\":\"red\",\"flavor\":\"beef\"}} contains {\"labels\":{\"flavor\":\"beef\"}}\n\/\/ {\"tags\":[\"red\",\"green\",\"blue\"]} contains {\"tags\":[\"red\",\"green\"]}\n\n\/\/ This is what I mean about slice containment being a little simplistic:\n\/\/\n\/\/ {\"resources\":[{\"type\":\"car\",\"color\":\"red\",\"wheels\":4}]} contains {\"resources\":[{\"type\":\"car\"},{\"color\",\"red\"},{\"wheels\":4}]}\n\/\/\n\/\/ That will return true, despite there being 3 items in contained slice and only one item in the containing slice. The\n\/\/ one item in the containing slice matches each of the items in the contained slice.\nfunc Contains(v1, v2 interface{}) bool {\n\tv1, _ = normalize(v1, false, false, false)\n\tv2, _ = normalize(v2, false, false, false)\n\n\tswitch t1 := v1.(type) {\n\tcase bool, nil, string, float64:\n\t\treturn v1 == v2\n\tcase map[string]interface{}:\n\t\tt2, isMap := v2.(map[string]interface{})\n\t\tif !isMap {\n\t\t\t\/\/ v1 is a map, but v2 isn't; v1 can't contain v2\n\t\t\treturn false\n\t\t}\n\t\tif len(t2) > len(t1) {\n\t\t\t\/\/ if t2 is bigger than t1, then t1 can't contain t2\n\t\t\treturn false\n\t\t}\n\t\tfor key, val2 := range t2 {\n\t\t\tval1, present := t1[key]\n\t\t\tif !present || !Contains(val1, val2) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase []interface{}:\n\t\tt2, isSlice := v2.([]interface{})\n\t\tif !isSlice {\n\t\t\t\/\/ v1 is a slice, but v2 isn't; v1 can't contain v2\n\t\t\treturn false\n\t\t}\n\t\t\/\/ first, normalize the values in v1, so we\n\t\t\/\/ don't re-normalize them in each loop\n\t\tt1copy := make([]interface{}, len(t1))\n\t\tfor i, value := range t1 {\n\t\t\tt1copy[i], _ = normalize(value, false, false, false)\n\t\t}\n\t\tfor _, val2 := range t2 {\n\t\t\tfound := false\n\t\tSearch:\n\t\t\tfor _, value := range t1copy {\n\t\t\t\tif Contains(value, val2) {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak Search\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\t\/\/ one of the values in v2 was not found in v1\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tdefault:\n\t\treturn reflect.DeepEqual(v1, v2)\n\t}\n}\n\n\/\/ returns true if trees share common key paths, but the values\n\/\/ at those paths are not equal.\n\/\/ i.e. if the two maps were merged, no values would be overwritten\n\/\/ conflicts == !contains(v1, v2) && !excludes(v1, v2)\n\/\/ conflicts == !contains(merge(v1, v2), v1)\nfunc Conflicts(m1, m2 map[string]interface{}) bool {\n\treturn !Contains(Merge(m1, m2), m1)\n}\n\nfunc normalize(v interface{}, makeCopies, doMarshaling, recurse bool) (v2 interface{}, err error) {\n\tv2 = v\n\tcopied := false\n\tswitch t := v.(type) {\n\tcase bool, string, nil, float64:\n\t\treturn\n\tcase int:\n\t\treturn float64(t), nil\n\tcase int8:\n\t\treturn float64(t), nil\n\tcase int16:\n\t\treturn float64(t), nil\n\tcase int32:\n\t\treturn float64(t), nil\n\tcase int64:\n\t\treturn float64(t), nil\n\tcase float32:\n\t\treturn float64(t), nil\n\tcase uint:\n\t\treturn float64(t), nil\n\tcase uint8:\n\t\treturn float64(t), nil\n\tcase uint16:\n\t\treturn float64(t), nil\n\tcase uint32:\n\t\treturn float64(t), nil\n\tcase uint64:\n\t\treturn float64(t), nil\n\tcase map[string]interface{}, []interface{}:\n\t\tif !makeCopies && !recurse {\n\t\t\treturn\n\t\t}\n\tdefault:\n\t\trv := reflect.ValueOf(v)\n\t\tif rv.Kind() == reflect.Map && rv.Type().Key().Kind() == reflect.String {\n\t\t\tcopied = true\n\t\t\tm := make(map[string]interface{}, rv.Len())\n\t\t\tfor _, v := range rv.MapKeys() {\n\t\t\t\tm[v.String()] = rv.MapIndex(v).Interface()\n\t\t\t}\n\t\t\tv2 = m\n\t\t} else if rv.Kind() == reflect.Slice {\n\t\t\tcopied = true\n\t\t\tl := rv.Len()\n\t\t\ts := make([]interface{}, l)\n\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\ts[i] = rv.Index(i).Interface()\n\t\t\t}\n\t\t\tv2 = s\n\t\t} else if doMarshaling {\n\t\t\t\/\/ marshal\/unmarshal\n\t\t\tvar b []byte\n\t\t\tb, err = json.Marshal(v)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tv2 = nil\n\t\t\terr = json.Unmarshal(b, &v2)\n\t\t\treturn\n\t\t}\n\t}\n\tif recurse || (makeCopies && !copied) {\n\t\tswitch t := v2.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tvar m map[string]interface{}\n\t\t\tif makeCopies && !copied {\n\t\t\t\tm = make(map[string]interface{}, len(t))\n\t\t\t} else {\n\t\t\t\t\/\/ modify in place\n\t\t\t\tm = t\n\t\t\t}\n\t\t\tv2 = m\n\t\t\tfor key, value := range t {\n\t\t\t\tif recurse {\n\t\t\t\t\tif value, err = normalize(value, makeCopies, doMarshaling, recurse); err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tm[key] = value\n\t\t\t}\n\t\tcase []interface{}:\n\t\t\tvar s []interface{}\n\t\t\tif makeCopies && !copied {\n\t\t\t\ts = make([]interface{}, len(t))\n\t\t\t} else {\n\t\t\t\t\/\/ modify in place\n\t\t\t\ts = t\n\t\t\t}\n\t\t\tv2 = s\n\t\t\tfor i := 0; i < len(t); i++ {\n\t\t\t\tif recurse {\n\t\t\t\t\tif s[i], err = normalize(t[i], makeCopies, doMarshaling, recurse); err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\ts[i] = t[i]\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(\"Should be either a map or slice by now\")\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ Recursively converts v1 into a tree of maps, slices, and primitives.\n\/\/ The types in the result will be the types the json package uses for unmarshalling\n\/\/ into interface{}. The rules are:\n\/\/\n\/\/ 1. All maps with string keys will be converted into map[string]interface{}\n\/\/ 2. All slices will be converted to []interface{}\n\/\/ 3. All primitive numeric types will be converted into float64\n\/\/ 4. string, bool, and nil are unmodified\n\/\/ 5. All other values will be converted into the above types by doing a json.Marshal and Unmarshal\n\/\/\n\/\/ Values in v1 will be modified in place if possible\nfunc Normalize(v1 interface{}) (interface{}, error) {\n\treturn normalize(v1, false, true, true)\n}\n\nvar PathNotFoundError = merry.New(\"Path not found\")\nvar PathNotMapError = merry.New(\"Path not map\")\nvar PathNotSliceError = merry.New(\"Path not slice\")\nvar IndexOutOfBoundsError = merry.New(\"Index out of bounds\")\n\n\/\/ Extracts the value at path from v.\n\/\/ Path is in the form:\n\/\/\n\/\/ response.things[2].color.red\n\/\/\n\/\/ You can use `merry` to test the types of return errors:\n\/\/\n\/\/ _, err := maps.Get(\"\",\"\")\n\/\/ if merry.Is(err, maps.PathNotFoundError) {\n\/\/ ...\n\/\/\n\/\/ `v` can be any primitive, map (must be keyed by string, but any value type), or slice, nested arbitrarily deep\nfunc Get(v interface{}, path string) (interface{}, error) {\n\tparts, err := gosplitargs.SplitArgs(path, \"\\\\.\", false)\n\tif err != nil {\n\t\treturn nil, merry.Prepend(err, \"Couldn't parse the path\")\n\t}\n\tout := v\n\tfor i := 0; i < len(parts); i++ {\n\t\tpart := strings.TrimSpace(parts[i])\n\t\tif len(part) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tsliceIdx := -1\n\t\t\/\/ first check of the path part ends in an array index, like\n\t\t\/\/\n\t\t\/\/ tags[2]\n\t\t\/\/\n\t\t\/\/ Extract the \"2\", and truncate the part to \"tags\"\n\t\tif bracketIdx := strings.Index(part, \"[\"); bracketIdx > -1 && strings.HasSuffix(part, \"]\") {\n\t\t\tif idx, err := strconv.Atoi(part[bracketIdx+1 : len(part)-1]); err == nil {\n\t\t\t\tsliceIdx = idx\n\t\t\t\tpart = part[0:bracketIdx]\n\t\t\t}\n\t\t}\n\n\t\tif part = strings.TrimSpace(part); len(part) > 0 {\n\t\t\t\/\/ map key\n\t\t\tout, _ = normalize(out, false, false, false)\n\t\t\tif m, ok := out.(map[string]interface{}); ok {\n\t\t\t\tvar present bool\n\t\t\t\tif out, present = m[part]; !present {\n\t\t\t\t\treturn nil, PathNotFoundError.Here().WithMessagef(\"%s not found\", strings.Join(parts[0:i+1], \".\"))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terrPath := strings.Join(parts[0:i], \".\")\n\t\t\t\tif len(errPath) == 0 {\n\t\t\t\t\terrPath = \"v\"\n\t\t\t\t}\n\t\t\t\treturn nil, PathNotMapError.Here().WithMessagef(\"%s is not a map\", errPath)\n\t\t\t}\n\t\t}\n\t\tif sliceIdx > -1 {\n\t\t\t\/\/ slice index\n\t\t\tout, _ = normalize(out, false, false, false)\n\t\t\tif s, ok := out.([]interface{}); ok {\n\t\t\t\tif l := len(s); l <= sliceIdx {\n\t\t\t\t\treturn nil, IndexOutOfBoundsError.Here().WithMessagef(\"Index out of bounds at %s (len = %v)\", strings.Join(parts[0:i+1], \".\"), l)\n\t\t\t\t} else {\n\t\t\t\t\tout = s[sliceIdx]\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terrPath := strings.Join(append(parts[0:i], part), \".\")\n\t\t\t\tif len(errPath) == 0 {\n\t\t\t\t\terrPath = \"v\"\n\t\t\t\t}\n\t\t\t\treturn nil, PathNotSliceError.Here().WithMessagef(\"%s is not a slice\", errPath)\n\t\t\t}\n\t\t}\n\t}\n\treturn out, nil\n}\n\n\/\/ returns true if v is:\n\/\/\n\/\/ 1. nil\n\/\/ 2. an empty string\n\/\/ 3. an empty slice\n\/\/ 4. an empty map\n\/\/ 5. an empty array\n\/\/ 6. an empty channel\n\/\/\n\/\/ returns false otherwise\nfunc Empty(v interface{}) bool {\n\t\/\/ no op. just means the value wasn't a type that supports Len()\n\tdefer func() { recover() }()\n\tswitch t := v.(type) {\n\tcase bool, int, int8, int16, int32, int64, float32, float64, uint, uint8, uint16, uint32, uint64:\n\t\treturn false\n\tcase nil:\n\t\treturn true\n\tcase string:\n\t\treturn len(strings.TrimSpace(t)) == 0\n\tcase map[string]interface{}:\n\t\treturn len(t) == 0\n\tcase []interface{}:\n\t\treturn len(t) == 0\n\tdefault:\n\t\trv := reflect.ValueOf(v)\n\t\tif rv.IsNil() {\n\t\t\t\/\/ handle case of (*Widget)(nil)\n\t\t\treturn true\n\t\t}\n\t\tif rv.Kind() == reflect.Ptr {\n\t\t\treturn Empty(rv.Elem())\n\t\t}\n\t\treturn reflect.ValueOf(v).Len() == 0\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/schollz\/goagrep\/goagrep\"\n)\n\nfunc init() {\n\t\/\/ Build database\n\t\/\/ only needs to be done once!\n\tdatabaseFile := \"words.db\"\n\twordlist := \"testlist\"\n\ttupleLength := 3\n\tif _, err := os.Stat(databaseFile); os.IsNotExist(err) {\n\t\tgoagrep.GenerateDB(wordlist, databaseFile, tupleLength)\n\t}\n\n}\nfunc main() {\n\t\/\/ Find word\n\tdatabaseFile := \"words.db\"\n\tsearchWord := \"heroint\"\n\tword, score := goagrep.GetMatch(searchWord, databaseFile)\n\tfmt.Println(word, score)\n}\n<commit_msg>Fixed to use 1.6 version<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/schollz\/goagrep\/goagrep\"\n)\n\nvar databaseFile string\nvar wordlist string\nvar tupleLength int\n\nfunc init() {\n\tdatabaseFile = \"words.db\"\n\twordlist = \"testlist\"\n\ttupleLength = 5\n\n\t\/\/ Build database\n\tif _, err := os.Stat(databaseFile); os.IsNotExist(err) {\n\t\tgoagrep.GenerateDB(wordlist, databaseFile, tupleLength, true)\n\t}\n}\n\nfunc main() {\n\t\/\/ Find word\n\tsearchWord := \"heroint\"\n\tword, score, err := goagrep.GetMatch(searchWord, databaseFile)\n\tfmt.Println(word, score, err)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\n\t\/\/ sm \"github.com\/ebittleman\/go-servicemanager\"\n\tsm \"gopkg.in\/ebittleman\/go-servicemanager.v0.0.1\"\n)\n\nconst LOG_SERVICE = \"Log\"\nconst CONFIG_SERVICE = \"Config\"\n\ntype Adder interface {\n\tAdd(int, int) int\n}\n\ntype simpleAdder struct{}\n\nfunc (s *simpleAdder) Add(a, b int) int {\n\treturn a + b\n}\n\ntype halfAdder struct{}\n\nfunc (s *halfAdder) Add(a, b int) int {\n\treturn int((a + b) \/ 2)\n}\n\ntype notAdder struct{}\n\ntype Calculator struct {\n\tAdder Adder\n\tLog *log.Logger `inject:\"Log\"`\n}\n\nfunc (c *Calculator) Add(a, b int) int {\n\tans := c.Adder.Add(a, b)\n\tc.Log.Printf(\"%d + %d = %d\\n\", a, b, ans)\n\treturn ans\n}\n\nvar factories map[string]sm.ServiceFactoryCallback\n\nfunc init() {\n\tfactories = map[string]sm.ServiceFactoryCallback{\n\t\t\"SimpleAdder\": func(sl sm.ServiceLocator) (interface{}, error) {\n\t\t\treturn &simpleAdder{}, nil\n\t\t},\n\n\t\t\"HalfAdder\": func(sl sm.ServiceLocator) (interface{}, error) {\n\t\t\treturn &halfAdder{}, nil\n\t\t},\n\n\t\t\"NotAdder\": func(sl sm.ServiceLocator) (interface{}, error) {\n\t\t\treturn ¬Adder{}, nil\n\t\t},\n\n\t\t\"Calculator\": func(sl sm.ServiceLocator) (interface{}, error) {\n\t\t\tadder, _ := sl.Get(\"SimpleAdder\")\n\t\t\treturn &Calculator{Adder: adder.(Adder)}, nil\n\t\t},\n\n\t\t\"HalfCalculator\": func(sl sm.ServiceLocator) (interface{}, error) {\n\t\t\tadder, _ := sl.Get(\"HalfAdder\")\n\t\t\treturn &Calculator{Adder: adder.(Adder)}, nil\n\t\t},\n\n\t\t\"NotCalculator\": func(sl sm.ServiceLocator) (interface{}, error) {\n\t\t\tadder, _ := sl.Get(\"NotAdder\")\n\t\t\treturn &Calculator{Adder: adder.(Adder)}, nil\n\t\t},\n\n\t\tCONFIG_SERVICE: func(sl sm.ServiceLocator) (interface{}, error) {\n\t\t\treturn map[string]interface{}{\n\t\t\t\t\"log.writer\": os.Stderr,\n\t\t\t\t\"log.prefix\": \"example: \",\n\t\t\t\t\"log.flags\": log.LstdFlags,\n\t\t\t}, nil\n\t\t},\n\n\t\tLOG_SERVICE: func(sl sm.ServiceLocator) (interface{}, error) {\n\t\t\tconfig := GetConfig(sl)\n\t\t\treturn log.New(\n\t\t\t\tconfig[\"log.writer\"].(io.Writer),\n\t\t\t\tconfig[\"log.prefix\"].(string),\n\t\t\t\tconfig[\"log.flags\"].(int),\n\t\t\t), nil\n\t\t},\n\t}\n}\n\nfunc main() {\n\tmanager := sm.New()\n\n\tfor name, factory := range factories {\n\t\tmanager.Set(name, factory)\n\t}\n\n\tlog := GetLog(manager)\n\n\t_, err := GetCalculator(\"NotCalculator\", manager)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tcalc, err := GetCalculator(\"Calculator\", manager)\n\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\thalfy, err := GetCalculator(\"HalfCalculator\", manager)\n\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tone := halfy.Add(1, 1)\n\n\tif one != 1 {\n\t\tlog.Fatalln(fmt.Errorf(\"Expected: 1, Got: %d\", one))\n\t}\n\n\ttwo := calc.Add(1, 1)\n\n\tif two != 2 {\n\t\tlog.Fatalln(fmt.Errorf(\"Expected: 2, Got: %d\", two))\n\t}\n\n}\n\nfunc GetLog(sl sm.ServiceLocator) *log.Logger {\n\tinst, err := sl.Get(LOG_SERVICE)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn inst.(*log.Logger)\n}\n\nfunc GetConfig(sl sm.ServiceLocator) map[string]interface{} {\n\tinst, err := sl.Get(CONFIG_SERVICE)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn inst.(map[string]interface{})\n}\n\nfunc GetCalculator(name string, sl sm.ServiceLocator) (*Calculator, error) {\n\tinst, err := sl.Get(name)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcalculator, ok := inst.(*Calculator)\n\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Expected: *Calculator, Got: %v\", reflect.TypeOf(inst))\n\t}\n\n\treturn calculator, nil\n}\n<commit_msg>setting import to major package<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\n\t\/\/ sm \"github.com\/ebittleman\/go-servicemanager\"\n\tsm \"gopkg.in\/ebittleman\/go-servicemanager.v0\"\n)\n\nconst LOG_SERVICE = \"Log\"\nconst CONFIG_SERVICE = \"Config\"\n\ntype Adder interface {\n\tAdd(int, int) int\n}\n\ntype simpleAdder struct{}\n\nfunc (s *simpleAdder) Add(a, b int) int {\n\treturn a + b\n}\n\ntype halfAdder struct{}\n\nfunc (s *halfAdder) Add(a, b int) int {\n\treturn int((a + b) \/ 2)\n}\n\ntype notAdder struct{}\n\ntype Calculator struct {\n\tAdder Adder\n\tLog *log.Logger `inject:\"Log\"`\n}\n\nfunc (c *Calculator) Add(a, b int) int {\n\tans := c.Adder.Add(a, b)\n\tc.Log.Printf(\"%d + %d = %d\\n\", a, b, ans)\n\treturn ans\n}\n\nvar factories map[string]sm.ServiceFactoryCallback\n\nfunc init() {\n\tfactories = map[string]sm.ServiceFactoryCallback{\n\t\t\"SimpleAdder\": func(sl sm.ServiceLocator) (interface{}, error) {\n\t\t\treturn &simpleAdder{}, nil\n\t\t},\n\n\t\t\"HalfAdder\": func(sl sm.ServiceLocator) (interface{}, error) {\n\t\t\treturn &halfAdder{}, nil\n\t\t},\n\n\t\t\"NotAdder\": func(sl sm.ServiceLocator) (interface{}, error) {\n\t\t\treturn ¬Adder{}, nil\n\t\t},\n\n\t\t\"Calculator\": func(sl sm.ServiceLocator) (interface{}, error) {\n\t\t\tadder, _ := sl.Get(\"SimpleAdder\")\n\t\t\treturn &Calculator{Adder: adder.(Adder)}, nil\n\t\t},\n\n\t\t\"HalfCalculator\": func(sl sm.ServiceLocator) (interface{}, error) {\n\t\t\tadder, _ := sl.Get(\"HalfAdder\")\n\t\t\treturn &Calculator{Adder: adder.(Adder)}, nil\n\t\t},\n\n\t\t\"NotCalculator\": func(sl sm.ServiceLocator) (interface{}, error) {\n\t\t\tadder, _ := sl.Get(\"NotAdder\")\n\t\t\treturn &Calculator{Adder: adder.(Adder)}, nil\n\t\t},\n\n\t\tCONFIG_SERVICE: func(sl sm.ServiceLocator) (interface{}, error) {\n\t\t\treturn map[string]interface{}{\n\t\t\t\t\"log.writer\": os.Stderr,\n\t\t\t\t\"log.prefix\": \"example: \",\n\t\t\t\t\"log.flags\": log.LstdFlags,\n\t\t\t}, nil\n\t\t},\n\n\t\tLOG_SERVICE: func(sl sm.ServiceLocator) (interface{}, error) {\n\t\t\tconfig := GetConfig(sl)\n\t\t\treturn log.New(\n\t\t\t\tconfig[\"log.writer\"].(io.Writer),\n\t\t\t\tconfig[\"log.prefix\"].(string),\n\t\t\t\tconfig[\"log.flags\"].(int),\n\t\t\t), nil\n\t\t},\n\t}\n}\n\nfunc main() {\n\tmanager := sm.New()\n\n\tfor name, factory := range factories {\n\t\tmanager.Set(name, factory)\n\t}\n\n\tlog := GetLog(manager)\n\n\t_, err := GetCalculator(\"NotCalculator\", manager)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tcalc, err := GetCalculator(\"Calculator\", manager)\n\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\thalfy, err := GetCalculator(\"HalfCalculator\", manager)\n\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tone := halfy.Add(1, 1)\n\n\tif one != 1 {\n\t\tlog.Fatalln(fmt.Errorf(\"Expected: 1, Got: %d\", one))\n\t}\n\n\ttwo := calc.Add(1, 1)\n\n\tif two != 2 {\n\t\tlog.Fatalln(fmt.Errorf(\"Expected: 2, Got: %d\", two))\n\t}\n\n}\n\nfunc GetLog(sl sm.ServiceLocator) *log.Logger {\n\tinst, err := sl.Get(LOG_SERVICE)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn inst.(*log.Logger)\n}\n\nfunc GetConfig(sl sm.ServiceLocator) map[string]interface{} {\n\tinst, err := sl.Get(CONFIG_SERVICE)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn inst.(map[string]interface{})\n}\n\nfunc GetCalculator(name string, sl sm.ServiceLocator) (*Calculator, error) {\n\tinst, err := sl.Get(name)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcalculator, ok := inst.(*Calculator)\n\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Expected: *Calculator, Got: %v\", reflect.TypeOf(inst))\n\t}\n\n\treturn calculator, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package workercmd\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/lager\"\n\t\"github.com\/concourse\/concourse\/atc\"\n\tconcourseCmd \"github.com\/concourse\/concourse\/cmd\"\n\t\"github.com\/concourse\/flag\"\n\t\"github.com\/jessevdk\/go-flags\"\n\t\"github.com\/tedsuo\/ifrit\"\n)\n\ntype Certs struct {\n\tDir string `long:\"certs-dir\" description:\"Directory to use when creating the resource certificates volume.\"`\n}\n\ntype RuntimeConfiguration struct {\n\tRuntime string `long:\"runtime\" default:\"guardian\" choice:\"guardian\" choice:\"containerd\" choice:\"houdini\" description:\"Runtime to use with the worker. Please note that Houdini is insecure and doesn't run 'tasks' in containers.\"`\n}\n\ntype GuardianRuntime struct {\n\tBin string `long:\"bin\" description:\"Path to a garden server executable (non-absolute names get resolved from $PATH).\"`\n\tDNS DNSConfig `group:\"DNS Proxy Configuration\" namespace:\"dns-proxy\"`\n\tRequestTimeout time.Duration `long:\"request-timeout\" default:\"5m\" description:\"How long to wait for requests to the Garden server to complete. 0 means no timeout.\"`\n\n\tConfig flag.File `long:\"config\" description:\"Path to a config file to use for the Garden backend. e.g. 'foo-bar=a,b' for '--foo-bar a --foo-bar b'.\"`\n\tBinaryFlags GdnBinaryFlags\n}\n\ntype ContainerdRuntime struct {\n\tConfig flag.File `long:\"config\" description:\"Path to a config file to use for the Containerd daemon.\"`\n\tBin string `long:\"bin\" description:\"Path to a containerd executable (non-absolute names get resolved from $PATH).\"`\n\tInitBin string `long:\"init-bin\" default:\"\/usr\/local\/concourse\/bin\/init\" description:\"Path to an init executable (non-absolute names get resolved from $PATH).\"`\n\tCNIPluginsDir string `long:\"cni-plugins-dir\" default:\"\/usr\/local\/concourse\/bin\" description:\"Path to CNI network plugins.\"`\n\tRequestTimeout time.Duration `long:\"request-timeout\" default:\"5m\" description:\"How long to wait for requests to Containerd to complete. 0 means no timeout.\"`\n\n\tNetwork struct {\n\t\tExternalIP flag.IP `long:\"external-ip\" description:\"IP address to use to reach container's mapped ports. Autodetected if not specified.\"`\n\t\t\/\/TODO can DNSConfig be simplifed to just a bool rather than struct with a bool?\n\t\tDNS DNSConfig `group:\"DNS Proxy Configuration\" namespace:\"dns-proxy\"`\n\t\tDNSServers []string `long:\"dns-server\" description:\"DNS server IP address to use instead of automatically determined servers. Can be specified multiple times.\"`\n\t\tRestrictedNetworks []string `long:\"restricted-network\" description:\"Network ranges to which traffic from containers will be restricted. Can be specified multiple times.\"`\n\t\tPool string `long:\"network-pool\" default:\"10.80.0.0\/16\" description:\"Network range to use for dynamically allocated container subnets.\"`\n\t\tMTU int `long:\"mtu\" description:\"MTU size for container network interfaces. Defaults to the MTU of the interface used for outbound access by the host. Max allowed value is 1500.\"`\n\t} `group:\"Container Networking\"`\n\n\tMaxContainers int `long:\"max-containers\" default:\"250\" description:\"Max container capacity. 0 means no limit.\"`\n}\n\nconst containerdRuntime = \"containerd\"\nconst guardianRuntime = \"guardian\"\nconst houdiniRuntime = \"houdini\"\n\nfunc (cmd WorkerCommand) LessenRequirements(prefix string, command *flags.Command) {\n\t\/\/ configured as work-dir\/volumes\n\tcommand.FindOptionByLongName(prefix + \"baggageclaim-volumes\").Required = false\n}\n\n\/\/ Chooses the appropriate runtime based on CONCOURSE_RUNTIME_TYPE.\n\/\/ The runtime is represented as a Ifrit runner that must include a Garden Server process. The Garden server exposes API\n\/\/ endpoints that allow the ATC to make container related requests to the worker.\n\/\/ The runner may also include additional processes such as the runtime's daemon or a DNS proxy server.\nfunc (cmd *WorkerCommand) gardenServerRunner(logger lager.Logger) (atc.Worker, ifrit.Runner, error) {\n\terr := cmd.checkRoot()\n\tif err != nil {\n\t\treturn atc.Worker{}, nil, err\n\t}\n\n\terr = cmd.verifyRuntimeFlags()\n\tif err != nil {\n\t\treturn atc.Worker{}, nil, err\n\t}\n\n\tworker := cmd.Worker.Worker()\n\tworker.Platform = \"linux\"\n\n\tif cmd.Certs.Dir != \"\" {\n\t\tworker.CertsPath = &cmd.Certs.Dir\n\t}\n\n\tworker.ResourceTypes, err = cmd.loadResources(logger.Session(\"load-resources\"))\n\tif err != nil {\n\t\treturn atc.Worker{}, nil, err\n\t}\n\n\tworker.Name, err = cmd.workerName()\n\tif err != nil {\n\t\treturn atc.Worker{}, nil, err\n\t}\n\n\ttrySetConcourseDirInPATH()\n\n\tvar runner ifrit.Runner\n\n\tswitch {\n\tcase cmd.Runtime == houdiniRuntime:\n\t\trunner, err = cmd.houdiniRunner(logger)\n\tcase cmd.Runtime == containerdRuntime:\n\t\trunner, err = cmd.containerdRunner(logger)\n\tcase cmd.Runtime == guardianRuntime:\n\t\trunner, err = cmd.guardianRunner(logger)\n\tdefault:\n\t\terr = fmt.Errorf(\"unsupported Runtime :%s\", cmd.Runtime)\n\t}\n\n\tif err != nil {\n\t\treturn atc.Worker{}, nil, err\n\t}\n\n\treturn worker, runner, nil\n}\n\nfunc trySetConcourseDirInPATH() {\n\tbinDir := concourseCmd.DiscoverAsset(\"bin\")\n\tif binDir == \"\" {\n\t\treturn\n\t}\n\n\terr := os.Setenv(\"PATH\", binDir+\":\"+os.Getenv(\"PATH\"))\n\tif err != nil {\n\t\t\/\/ programming mistake\n\t\tpanic(fmt.Errorf(\"failed to set PATH environment variable: %w\", err))\n\t}\n}\n\nvar ErrNotRoot = errors.New(\"worker must be run as root\")\n\nfunc (cmd *WorkerCommand) checkRoot() error {\n\tcurrentUser, err := user.Current()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif currentUser.Uid != \"0\" {\n\t\treturn ErrNotRoot\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *WorkerCommand) dnsProxyRunner(logger lager.Logger) (ifrit.Runner, error) {\n\tserver, err := cmd.Guardian.DNS.Server()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {\n\t\tserver.NotifyStartedFunc = func() {\n\t\t\tclose(ready)\n\t\t\tlogger.Info(\"started\")\n\t\t}\n\n\t\tserveErr := make(chan error, 1)\n\n\t\tgo func() {\n\t\t\tserveErr <- server.ListenAndServe()\n\t\t}()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase err := <-serveErr:\n\t\t\t\treturn err\n\t\t\tcase <-signals:\n\t\t\t\tserver.Shutdown()\n\t\t\t}\n\t\t}\n\t}), nil\n}\n\nfunc (cmd *WorkerCommand) loadResources(logger lager.Logger) ([]atc.WorkerResourceType, error) {\n\tvar types []atc.WorkerResourceType\n\n\tif cmd.ResourceTypes != \"\" {\n\t\tbasePath := cmd.ResourceTypes.Path()\n\n\t\tentries, err := ioutil.ReadDir(basePath)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"failed-to-read-resources-dir\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, e := range entries {\n\t\t\tmeta, err := ioutil.ReadFile(filepath.Join(basePath, e.Name(), \"resource_metadata.json\"))\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"failed-to-read-resource-type-metadata\", err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tvar t atc.WorkerResourceType\n\t\t\terr = json.Unmarshal(meta, &t)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"failed-to-unmarshal-resource-type-metadata\", err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tt.Image = filepath.Join(basePath, e.Name(), \"rootfs.tgz\")\n\n\t\t\ttypes = append(types, t)\n\t\t}\n\t}\n\n\treturn types, nil\n}\n\nfunc (cmd *WorkerCommand) hasFlags(prefix string) bool {\n\tenv := os.Environ()\n\n\tfor _, envVar := range env {\n\t\tif strings.HasPrefix(envVar, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nconst guardianEnvPrefix = \"CONCOURSE_GARDEN_\"\nconst containerdEnvPrefix = \"CONCOURSE_CONTAINERD_\"\n\n\/\/ Checks if runtime specific flags provided match the selected runtime type\nfunc (cmd *WorkerCommand) verifyRuntimeFlags() error {\n\tswitch {\n\tcase cmd.Runtime == houdiniRuntime:\n\t\tif cmd.hasFlags(guardianEnvPrefix) || cmd.hasFlags(containerdEnvPrefix) {\n\t\t\treturn fmt.Errorf(\"cannot use %s or %s environment variables with Houdini\", guardianEnvPrefix, containerdEnvPrefix)\n\t\t}\n\tcase cmd.Runtime == containerdRuntime:\n\t\tif cmd.hasFlags(guardianEnvPrefix) {\n\t\t\treturn fmt.Errorf(\"cannot use %s environment variables with Containerd\", guardianEnvPrefix)\n\t\t}\n\tcase cmd.Runtime == guardianRuntime:\n\t\tif cmd.hasFlags(containerdEnvPrefix) {\n\t\t\treturn fmt.Errorf(\"cannot use %s environment variables with Guardian\", containerdEnvPrefix)\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported Runtime :%s\", cmd.Runtime)\n\t}\n\n\treturn nil\n}\n<commit_msg>remove claimed max allowed value<commit_after>package workercmd\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/lager\"\n\t\"github.com\/concourse\/concourse\/atc\"\n\tconcourseCmd \"github.com\/concourse\/concourse\/cmd\"\n\t\"github.com\/concourse\/flag\"\n\t\"github.com\/jessevdk\/go-flags\"\n\t\"github.com\/tedsuo\/ifrit\"\n)\n\ntype Certs struct {\n\tDir string `long:\"certs-dir\" description:\"Directory to use when creating the resource certificates volume.\"`\n}\n\ntype RuntimeConfiguration struct {\n\tRuntime string `long:\"runtime\" default:\"guardian\" choice:\"guardian\" choice:\"containerd\" choice:\"houdini\" description:\"Runtime to use with the worker. Please note that Houdini is insecure and doesn't run 'tasks' in containers.\"`\n}\n\ntype GuardianRuntime struct {\n\tBin string `long:\"bin\" description:\"Path to a garden server executable (non-absolute names get resolved from $PATH).\"`\n\tDNS DNSConfig `group:\"DNS Proxy Configuration\" namespace:\"dns-proxy\"`\n\tRequestTimeout time.Duration `long:\"request-timeout\" default:\"5m\" description:\"How long to wait for requests to the Garden server to complete. 0 means no timeout.\"`\n\n\tConfig flag.File `long:\"config\" description:\"Path to a config file to use for the Garden backend. e.g. 'foo-bar=a,b' for '--foo-bar a --foo-bar b'.\"`\n\tBinaryFlags GdnBinaryFlags\n}\n\ntype ContainerdRuntime struct {\n\tConfig flag.File `long:\"config\" description:\"Path to a config file to use for the Containerd daemon.\"`\n\tBin string `long:\"bin\" description:\"Path to a containerd executable (non-absolute names get resolved from $PATH).\"`\n\tInitBin string `long:\"init-bin\" default:\"\/usr\/local\/concourse\/bin\/init\" description:\"Path to an init executable (non-absolute names get resolved from $PATH).\"`\n\tCNIPluginsDir string `long:\"cni-plugins-dir\" default:\"\/usr\/local\/concourse\/bin\" description:\"Path to CNI network plugins.\"`\n\tRequestTimeout time.Duration `long:\"request-timeout\" default:\"5m\" description:\"How long to wait for requests to Containerd to complete. 0 means no timeout.\"`\n\n\tNetwork struct {\n\t\tExternalIP flag.IP `long:\"external-ip\" description:\"IP address to use to reach container's mapped ports. Autodetected if not specified.\"`\n\t\t\/\/TODO can DNSConfig be simplifed to just a bool rather than struct with a bool?\n\t\tDNS DNSConfig `group:\"DNS Proxy Configuration\" namespace:\"dns-proxy\"`\n\t\tDNSServers []string `long:\"dns-server\" description:\"DNS server IP address to use instead of automatically determined servers. Can be specified multiple times.\"`\n\t\tRestrictedNetworks []string `long:\"restricted-network\" description:\"Network ranges to which traffic from containers will be restricted. Can be specified multiple times.\"`\n\t\tPool string `long:\"network-pool\" default:\"10.80.0.0\/16\" description:\"Network range to use for dynamically allocated container subnets.\"`\n\t\tMTU int `long:\"mtu\" description:\"MTU size for container network interfaces. Defaults to the MTU of the interface used for outbound access by the host.\"`\n\t} `group:\"Container Networking\"`\n\n\tMaxContainers int `long:\"max-containers\" default:\"250\" description:\"Max container capacity. 0 means no limit.\"`\n}\n\nconst containerdRuntime = \"containerd\"\nconst guardianRuntime = \"guardian\"\nconst houdiniRuntime = \"houdini\"\n\nfunc (cmd WorkerCommand) LessenRequirements(prefix string, command *flags.Command) {\n\t\/\/ configured as work-dir\/volumes\n\tcommand.FindOptionByLongName(prefix + \"baggageclaim-volumes\").Required = false\n}\n\n\/\/ Chooses the appropriate runtime based on CONCOURSE_RUNTIME_TYPE.\n\/\/ The runtime is represented as a Ifrit runner that must include a Garden Server process. The Garden server exposes API\n\/\/ endpoints that allow the ATC to make container related requests to the worker.\n\/\/ The runner may also include additional processes such as the runtime's daemon or a DNS proxy server.\nfunc (cmd *WorkerCommand) gardenServerRunner(logger lager.Logger) (atc.Worker, ifrit.Runner, error) {\n\terr := cmd.checkRoot()\n\tif err != nil {\n\t\treturn atc.Worker{}, nil, err\n\t}\n\n\terr = cmd.verifyRuntimeFlags()\n\tif err != nil {\n\t\treturn atc.Worker{}, nil, err\n\t}\n\n\tworker := cmd.Worker.Worker()\n\tworker.Platform = \"linux\"\n\n\tif cmd.Certs.Dir != \"\" {\n\t\tworker.CertsPath = &cmd.Certs.Dir\n\t}\n\n\tworker.ResourceTypes, err = cmd.loadResources(logger.Session(\"load-resources\"))\n\tif err != nil {\n\t\treturn atc.Worker{}, nil, err\n\t}\n\n\tworker.Name, err = cmd.workerName()\n\tif err != nil {\n\t\treturn atc.Worker{}, nil, err\n\t}\n\n\ttrySetConcourseDirInPATH()\n\n\tvar runner ifrit.Runner\n\n\tswitch {\n\tcase cmd.Runtime == houdiniRuntime:\n\t\trunner, err = cmd.houdiniRunner(logger)\n\tcase cmd.Runtime == containerdRuntime:\n\t\trunner, err = cmd.containerdRunner(logger)\n\tcase cmd.Runtime == guardianRuntime:\n\t\trunner, err = cmd.guardianRunner(logger)\n\tdefault:\n\t\terr = fmt.Errorf(\"unsupported Runtime :%s\", cmd.Runtime)\n\t}\n\n\tif err != nil {\n\t\treturn atc.Worker{}, nil, err\n\t}\n\n\treturn worker, runner, nil\n}\n\nfunc trySetConcourseDirInPATH() {\n\tbinDir := concourseCmd.DiscoverAsset(\"bin\")\n\tif binDir == \"\" {\n\t\treturn\n\t}\n\n\terr := os.Setenv(\"PATH\", binDir+\":\"+os.Getenv(\"PATH\"))\n\tif err != nil {\n\t\t\/\/ programming mistake\n\t\tpanic(fmt.Errorf(\"failed to set PATH environment variable: %w\", err))\n\t}\n}\n\nvar ErrNotRoot = errors.New(\"worker must be run as root\")\n\nfunc (cmd *WorkerCommand) checkRoot() error {\n\tcurrentUser, err := user.Current()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif currentUser.Uid != \"0\" {\n\t\treturn ErrNotRoot\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *WorkerCommand) dnsProxyRunner(logger lager.Logger) (ifrit.Runner, error) {\n\tserver, err := cmd.Guardian.DNS.Server()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {\n\t\tserver.NotifyStartedFunc = func() {\n\t\t\tclose(ready)\n\t\t\tlogger.Info(\"started\")\n\t\t}\n\n\t\tserveErr := make(chan error, 1)\n\n\t\tgo func() {\n\t\t\tserveErr <- server.ListenAndServe()\n\t\t}()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase err := <-serveErr:\n\t\t\t\treturn err\n\t\t\tcase <-signals:\n\t\t\t\tserver.Shutdown()\n\t\t\t}\n\t\t}\n\t}), nil\n}\n\nfunc (cmd *WorkerCommand) loadResources(logger lager.Logger) ([]atc.WorkerResourceType, error) {\n\tvar types []atc.WorkerResourceType\n\n\tif cmd.ResourceTypes != \"\" {\n\t\tbasePath := cmd.ResourceTypes.Path()\n\n\t\tentries, err := ioutil.ReadDir(basePath)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"failed-to-read-resources-dir\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, e := range entries {\n\t\t\tmeta, err := ioutil.ReadFile(filepath.Join(basePath, e.Name(), \"resource_metadata.json\"))\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"failed-to-read-resource-type-metadata\", err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tvar t atc.WorkerResourceType\n\t\t\terr = json.Unmarshal(meta, &t)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"failed-to-unmarshal-resource-type-metadata\", err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tt.Image = filepath.Join(basePath, e.Name(), \"rootfs.tgz\")\n\n\t\t\ttypes = append(types, t)\n\t\t}\n\t}\n\n\treturn types, nil\n}\n\nfunc (cmd *WorkerCommand) hasFlags(prefix string) bool {\n\tenv := os.Environ()\n\n\tfor _, envVar := range env {\n\t\tif strings.HasPrefix(envVar, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nconst guardianEnvPrefix = \"CONCOURSE_GARDEN_\"\nconst containerdEnvPrefix = \"CONCOURSE_CONTAINERD_\"\n\n\/\/ Checks if runtime specific flags provided match the selected runtime type\nfunc (cmd *WorkerCommand) verifyRuntimeFlags() error {\n\tswitch {\n\tcase cmd.Runtime == houdiniRuntime:\n\t\tif cmd.hasFlags(guardianEnvPrefix) || cmd.hasFlags(containerdEnvPrefix) {\n\t\t\treturn fmt.Errorf(\"cannot use %s or %s environment variables with Houdini\", guardianEnvPrefix, containerdEnvPrefix)\n\t\t}\n\tcase cmd.Runtime == containerdRuntime:\n\t\tif cmd.hasFlags(guardianEnvPrefix) {\n\t\t\treturn fmt.Errorf(\"cannot use %s environment variables with Containerd\", guardianEnvPrefix)\n\t\t}\n\tcase cmd.Runtime == guardianRuntime:\n\t\tif cmd.hasFlags(containerdEnvPrefix) {\n\t\t\treturn fmt.Errorf(\"cannot use %s environment variables with Guardian\", containerdEnvPrefix)\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported Runtime :%s\", cmd.Runtime)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package capn\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"io\"\n\t\"math\"\n)\n\nvar (\n\terrBufferCall = errors.New(\"capn: can't call on a memory buffer\")\n\tErrInvalidSegment = errors.New(\"capn: invalid segment id\")\n\tErrTooMuchData = errors.New(\"capn: too much data in stream\")\n)\n\ntype buffer struct {\n\tSegment\n\tcapTable\n}\n\n\/\/ NewBuffer creates an expanding single segment buffer. Creating new objects\n\/\/ will expand the buffer. Data can be nil (or length 0 with some capacity) if\n\/\/ creating a new session. If parsing an existing segment than data should be\n\/\/ the segment contents and will not be copied.\nfunc NewBuffer(data []byte) *Segment {\n\tif uint64(len(data)) > uint64(math.MaxUint32) {\n\t\treturn nil\n\t}\n\n\tb := new(buffer)\n\tb.Message = b\n\tb.Data = data\n\treturn &b.Segment\n}\n\nfunc (b *buffer) NewSegment(minsz int) (*Segment, error) {\n\tif minsz < 4096 {\n\t\tminsz = 4096\n\t}\n\tif uint64(len(b.Data)) > uint64(math.MaxUint32)-uint64(minsz) {\n\t\treturn nil, ErrOverlarge\n\t}\n\tb.Data = append(b.Data, make([]byte, minsz)...)\n\tb.Data = b.Data[:len(b.Data)-minsz]\n\treturn &b.Segment, nil\n}\n\nfunc (b *buffer) Lookup(segid uint32) (*Segment, error) {\n\tif segid == 0 {\n\t\treturn &b.Segment, nil\n\t} else {\n\t\treturn nil, ErrInvalidSegment\n\t}\n}\n\ntype multiBuffer struct {\n\tsegments []*Segment\n\tcapTable\n}\n\n\/\/ NewmultiBuffer creates a new multi segment message. Creating new objects\n\/\/ will try and reuse the buffers available, but will create new ones if there\n\/\/ is insufficient capacity. When parsing an existing message data should be\n\/\/ the list of segments. The data buffers will not be copied.\nfunc NewmultiBuffer(data [][]byte) *Segment {\n\tm := &multiBuffer{\n\t\tsegments: make([]*Segment, len(data)),\n\t}\n\tfor i, d := range data {\n\t\tm.segments[i] = &Segment{m, d, uint32(i), false}\n\t}\n\tif len(data) > 0 {\n\t\treturn m.segments[0]\n\t}\n\treturn &Segment{m, nil, 0xFFFFFFFF, false}\n}\n\nvar (\n\tMaxSegmentNumber = 1024\n\tMaxTotalSize = 1024 * 1024 * 1024\n)\n\nfunc (m *multiBuffer) NewSegment(minsz int) (*Segment, error) {\n\tfor _, s := range m.segments {\n\t\tif len(s.Data)+minsz <= cap(s.Data) {\n\t\t\treturn s, nil\n\t\t}\n\t}\n\n\tif minsz < 4096 {\n\t\tminsz = 4096\n\t}\n\ts := &Segment{m, make([]byte, 0, minsz), uint32(len(m.segments)), false}\n\tm.segments = append(m.segments, s)\n\treturn s, nil\n}\n\nfunc (m *multiBuffer) Lookup(segid uint32) (*Segment, error) {\n\tif uint(segid) < uint(len(m.segments)) {\n\t\treturn m.segments[segid], nil\n\t} else {\n\t\treturn nil, ErrInvalidSegment\n\t}\n}\n\n\/\/ ReadFromStream reads a non-packed serialized stream from r. buf is used to\n\/\/ buffer the read contents, can be nil, and is provided so that the buffer\n\/\/ can be reused between messages. The returned segment is the first segment\n\/\/ read, which contains the root pointer.\nfunc ReadFromStream(r io.Reader, buf *bytes.Buffer) (*Segment, error) {\n\tif buf == nil {\n\t\tbuf = new(bytes.Buffer)\n\t} else {\n\t\tbuf.Reset()\n\t}\n\n\tif _, err := io.CopyN(buf, r, 4); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif binary.LittleEndian.Uint32(buf.Bytes()[:]) >= uint32(MaxSegmentNumber) {\n\t\treturn nil, ErrTooMuchData\n\t}\n\n\tsegnum := int(binary.LittleEndian.Uint32(buf.Bytes()[:]) + 1)\n\thdrsz := 8*(segnum\/2) + 4\n\n\tif _, err := io.CopyN(buf, r, int64(hdrsz)); err != nil {\n\t\treturn nil, err\n\t}\n\n\ttotal := 0\n\tfor i := 0; i < segnum; i++ {\n\t\tsz := binary.LittleEndian.Uint32(buf.Bytes()[4*i+4:])\n\t\tif uint64(total)+uint64(sz)*8 > uint64(MaxTotalSize) {\n\t\t\treturn nil, ErrTooMuchData\n\t\t}\n\t\ttotal += int(sz) * 8\n\t}\n\n\tif _, err := io.CopyN(buf, r, int64(total)); err != nil {\n\t\treturn nil, err\n\t}\n\n\thdrv := buf.Bytes()[4 : hdrsz+4]\n\tdatav := buf.Bytes()[hdrsz+4:]\n\n\tif segnum == 1 {\n\t\tsz := int(binary.LittleEndian.Uint32(hdrv)) * 8\n\t\treturn NewBuffer(datav[:sz]), nil\n\t}\n\n\tm := &multiBuffer{segments: make([]*Segment, segnum)}\n\tfor i := 0; i < segnum; i++ {\n\t\tsz := int(binary.LittleEndian.Uint32(hdrv[4*i:])) * 8\n\t\tm.segments[i] = &Segment{m, datav[:sz], uint32(i), false}\n\t\tdatav = datav[sz:]\n\t}\n\n\treturn m.segments[0], nil\n}\n\n\/\/ ReadFromMemoryZeroCopy: like ReadFromStream, but reads a non-packed\n\/\/ serialized stream that already resides in memory in the argument data.\n\/\/ The returned segment is the first segment read, which contains\n\/\/ the root pointer. The returned bytesRead says how many bytes were\n\/\/ consumed from data in making seg. The caller should advance the\n\/\/ data slice by doing data = data[bytesRead:] between successive calls\n\/\/ to ReadFromMemoryZeroCopy().\nfunc ReadFromMemoryZeroCopy(data []byte) (seg *Segment, bytesRead int64, err error) {\n\n\tif len(data) < 4 {\n\t\treturn nil, 0, io.EOF\n\t}\n\n\tif binary.LittleEndian.Uint32(data[0:4]) >= uint32(MaxSegmentNumber) {\n\t\treturn nil, 0, ErrTooMuchData\n\t}\n\n\tsegnum := int(binary.LittleEndian.Uint32(data[0:4]) + 1)\n\thdrsz := 8*(segnum\/2) + 4\n\n\tb := data[0:(hdrsz + 4)]\n\n\ttotal := 0\n\tfor i := 0; i < segnum; i++ {\n\t\tsz := binary.LittleEndian.Uint32(b[4*i+4:])\n\t\tif uint64(total)+uint64(sz)*8 > uint64(MaxTotalSize) {\n\t\t\treturn nil, 0, ErrTooMuchData\n\t\t}\n\t\ttotal += int(sz) * 8\n\t}\n\tif total == 0 {\n\t\treturn nil, 0, io.EOF\n\t}\n\n\thdrv := data[4:(hdrsz + 4)]\n\tdatav := data[hdrsz+4:]\n\tm := &multiBuffer{segments: make([]*Segment, segnum)}\n\tfor i := 0; i < segnum; i++ {\n\t\tsz := int(binary.LittleEndian.Uint32(hdrv[4*i:])) * 8\n\t\tm.segments[i] = &Segment{m, datav[:sz], uint32(i), false}\n\t\tdatav = datav[sz:]\n\t}\n\n\treturn m.segments[0], int64(4 + hdrsz + total), nil\n}\n\n\/\/ WriteTo writes the message that the segment is part of to the\n\/\/ provided stream in serialized form.\nfunc (s *Segment) WriteTo(w io.Writer) (int64, error) {\n\tsegnum := uint32(1)\n\tfor {\n\t\tif seg, _ := s.Message.Lookup(segnum); seg == nil {\n\t\t\tbreak\n\t\t}\n\t\tsegnum++\n\t}\n\n\thdrv := make([]uint8, 8*(segnum\/2)+8)\n\tbinary.LittleEndian.PutUint32(hdrv, segnum-1)\n\tfor i := uint32(0); i < segnum; i++ {\n\t\tseg, _ := s.Message.Lookup(i)\n\t\tbinary.LittleEndian.PutUint32(hdrv[4*i+4:], uint32(len(seg.Data)\/8))\n\t}\n\n\tif n, err := w.Write(hdrv); err != nil {\n\t\treturn int64(n), err\n\t}\n\twritten := int64(len(hdrv))\n\n\tfor i := uint32(0); i < segnum; i++ {\n\t\tseg, _ := s.Message.Lookup(i)\n\t\tif n, err := w.Write(seg.Data); err != nil {\n\t\t\treturn written + int64(n), err\n\t\t} else {\n\t\t\twritten += int64(n)\n\t\t}\n\t}\n\n\treturn written, nil\n}\n\ntype capTable []Client\n\nfunc (tab capTable) CapTable() []Client {\n\treturn []Client(tab)\n}\n\nfunc (tab *capTable) AddCap(c Client) uint32 {\n\tn := uint32(len(*tab))\n\t*tab = append(*tab, c)\n\treturn n\n}\n<commit_msg>s\/NewmultiBuffer\/NewMultiBuffer\/<commit_after>package capn\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"io\"\n\t\"math\"\n)\n\nvar (\n\terrBufferCall = errors.New(\"capn: can't call on a memory buffer\")\n\tErrInvalidSegment = errors.New(\"capn: invalid segment id\")\n\tErrTooMuchData = errors.New(\"capn: too much data in stream\")\n)\n\ntype buffer struct {\n\tSegment\n\tcapTable\n}\n\n\/\/ NewBuffer creates an expanding single segment buffer. Creating new objects\n\/\/ will expand the buffer. Data can be nil (or length 0 with some capacity) if\n\/\/ creating a new session. If parsing an existing segment then data should be\n\/\/ the segment contents and will not be copied.\nfunc NewBuffer(data []byte) *Segment {\n\tif uint64(len(data)) > uint64(math.MaxUint32) {\n\t\treturn nil\n\t}\n\n\tb := new(buffer)\n\tb.Message = b\n\tb.Data = data\n\treturn &b.Segment\n}\n\nfunc (b *buffer) NewSegment(minsz int) (*Segment, error) {\n\tif minsz < 4096 {\n\t\tminsz = 4096\n\t}\n\tif uint64(len(b.Data)) > uint64(math.MaxUint32)-uint64(minsz) {\n\t\treturn nil, ErrOverlarge\n\t}\n\tb.Data = append(b.Data, make([]byte, minsz)...)\n\tb.Data = b.Data[:len(b.Data)-minsz]\n\treturn &b.Segment, nil\n}\n\nfunc (b *buffer) Lookup(segid uint32) (*Segment, error) {\n\tif segid == 0 {\n\t\treturn &b.Segment, nil\n\t} else {\n\t\treturn nil, ErrInvalidSegment\n\t}\n}\n\ntype multiBuffer struct {\n\tsegments []*Segment\n\tcapTable\n}\n\n\/\/ NewMultiBuffer creates a new multi segment message. Creating new objects\n\/\/ will try and reuse the buffers available, but will create new ones if there\n\/\/ is insufficient capacity. When parsing an existing message data should be\n\/\/ the list of segments. The data buffers will not be copied.\nfunc NewMultiBuffer(data [][]byte) *Segment {\n\tm := &multiBuffer{\n\t\tsegments: make([]*Segment, len(data)),\n\t}\n\tfor i, d := range data {\n\t\tm.segments[i] = &Segment{m, d, uint32(i), false}\n\t}\n\tif len(data) > 0 {\n\t\treturn m.segments[0]\n\t}\n\treturn &Segment{m, nil, 0xFFFFFFFF, false}\n}\n\nvar (\n\tMaxSegmentNumber = 1024\n\tMaxTotalSize = 1024 * 1024 * 1024\n)\n\nfunc (m *multiBuffer) NewSegment(minsz int) (*Segment, error) {\n\tfor _, s := range m.segments {\n\t\tif len(s.Data)+minsz <= cap(s.Data) {\n\t\t\treturn s, nil\n\t\t}\n\t}\n\n\tif minsz < 4096 {\n\t\tminsz = 4096\n\t}\n\ts := &Segment{m, make([]byte, 0, minsz), uint32(len(m.segments)), false}\n\tm.segments = append(m.segments, s)\n\treturn s, nil\n}\n\nfunc (m *multiBuffer) Lookup(segid uint32) (*Segment, error) {\n\tif uint(segid) < uint(len(m.segments)) {\n\t\treturn m.segments[segid], nil\n\t} else {\n\t\treturn nil, ErrInvalidSegment\n\t}\n}\n\n\/\/ ReadFromStream reads a non-packed serialized stream from r. buf is used to\n\/\/ buffer the read contents, can be nil, and is provided so that the buffer\n\/\/ can be reused between messages. The returned segment is the first segment\n\/\/ read, which contains the root pointer.\nfunc ReadFromStream(r io.Reader, buf *bytes.Buffer) (*Segment, error) {\n\tif buf == nil {\n\t\tbuf = new(bytes.Buffer)\n\t} else {\n\t\tbuf.Reset()\n\t}\n\n\tif _, err := io.CopyN(buf, r, 4); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif binary.LittleEndian.Uint32(buf.Bytes()[:]) >= uint32(MaxSegmentNumber) {\n\t\treturn nil, ErrTooMuchData\n\t}\n\n\tsegnum := int(binary.LittleEndian.Uint32(buf.Bytes()[:]) + 1)\n\thdrsz := 8*(segnum\/2) + 4\n\n\tif _, err := io.CopyN(buf, r, int64(hdrsz)); err != nil {\n\t\treturn nil, err\n\t}\n\n\ttotal := 0\n\tfor i := 0; i < segnum; i++ {\n\t\tsz := binary.LittleEndian.Uint32(buf.Bytes()[4*i+4:])\n\t\tif uint64(total)+uint64(sz)*8 > uint64(MaxTotalSize) {\n\t\t\treturn nil, ErrTooMuchData\n\t\t}\n\t\ttotal += int(sz) * 8\n\t}\n\n\tif _, err := io.CopyN(buf, r, int64(total)); err != nil {\n\t\treturn nil, err\n\t}\n\n\thdrv := buf.Bytes()[4 : hdrsz+4]\n\tdatav := buf.Bytes()[hdrsz+4:]\n\n\tif segnum == 1 {\n\t\tsz := int(binary.LittleEndian.Uint32(hdrv)) * 8\n\t\treturn NewBuffer(datav[:sz]), nil\n\t}\n\n\tm := &multiBuffer{segments: make([]*Segment, segnum)}\n\tfor i := 0; i < segnum; i++ {\n\t\tsz := int(binary.LittleEndian.Uint32(hdrv[4*i:])) * 8\n\t\tm.segments[i] = &Segment{m, datav[:sz], uint32(i), false}\n\t\tdatav = datav[sz:]\n\t}\n\n\treturn m.segments[0], nil\n}\n\n\/\/ ReadFromMemoryZeroCopy: like ReadFromStream, but reads a non-packed\n\/\/ serialized stream that already resides in memory in the argument data.\n\/\/ The returned segment is the first segment read, which contains\n\/\/ the root pointer. The returned bytesRead says how many bytes were\n\/\/ consumed from data in making seg. The caller should advance the\n\/\/ data slice by doing data = data[bytesRead:] between successive calls\n\/\/ to ReadFromMemoryZeroCopy().\nfunc ReadFromMemoryZeroCopy(data []byte) (seg *Segment, bytesRead int64, err error) {\n\n\tif len(data) < 4 {\n\t\treturn nil, 0, io.EOF\n\t}\n\n\tif binary.LittleEndian.Uint32(data[0:4]) >= uint32(MaxSegmentNumber) {\n\t\treturn nil, 0, ErrTooMuchData\n\t}\n\n\tsegnum := int(binary.LittleEndian.Uint32(data[0:4]) + 1)\n\thdrsz := 8*(segnum\/2) + 4\n\n\tb := data[0:(hdrsz + 4)]\n\n\ttotal := 0\n\tfor i := 0; i < segnum; i++ {\n\t\tsz := binary.LittleEndian.Uint32(b[4*i+4:])\n\t\tif uint64(total)+uint64(sz)*8 > uint64(MaxTotalSize) {\n\t\t\treturn nil, 0, ErrTooMuchData\n\t\t}\n\t\ttotal += int(sz) * 8\n\t}\n\tif total == 0 {\n\t\treturn nil, 0, io.EOF\n\t}\n\n\thdrv := data[4:(hdrsz + 4)]\n\tdatav := data[hdrsz+4:]\n\tm := &multiBuffer{segments: make([]*Segment, segnum)}\n\tfor i := 0; i < segnum; i++ {\n\t\tsz := int(binary.LittleEndian.Uint32(hdrv[4*i:])) * 8\n\t\tm.segments[i] = &Segment{m, datav[:sz], uint32(i), false}\n\t\tdatav = datav[sz:]\n\t}\n\n\treturn m.segments[0], int64(4 + hdrsz + total), nil\n}\n\n\/\/ WriteTo writes the message that the segment is part of to the\n\/\/ provided stream in serialized form.\nfunc (s *Segment) WriteTo(w io.Writer) (int64, error) {\n\tsegnum := uint32(1)\n\tfor {\n\t\tif seg, _ := s.Message.Lookup(segnum); seg == nil {\n\t\t\tbreak\n\t\t}\n\t\tsegnum++\n\t}\n\n\thdrv := make([]uint8, 8*(segnum\/2)+8)\n\tbinary.LittleEndian.PutUint32(hdrv, segnum-1)\n\tfor i := uint32(0); i < segnum; i++ {\n\t\tseg, _ := s.Message.Lookup(i)\n\t\tbinary.LittleEndian.PutUint32(hdrv[4*i+4:], uint32(len(seg.Data)\/8))\n\t}\n\n\tif n, err := w.Write(hdrv); err != nil {\n\t\treturn int64(n), err\n\t}\n\twritten := int64(len(hdrv))\n\n\tfor i := uint32(0); i < segnum; i++ {\n\t\tseg, _ := s.Message.Lookup(i)\n\t\tif n, err := w.Write(seg.Data); err != nil {\n\t\t\treturn written + int64(n), err\n\t\t} else {\n\t\t\twritten += int64(n)\n\t\t}\n\t}\n\n\treturn written, nil\n}\n\ntype capTable []Client\n\nfunc (tab capTable) CapTable() []Client {\n\treturn []Client(tab)\n}\n\nfunc (tab *capTable) AddCap(c Client) uint32 {\n\tn := uint32(len(*tab))\n\t*tab = append(*tab, c)\n\treturn n\n}\n<|endoftext|>"} {"text":"<commit_before>package vm\n\nimport (\n\t\"fmt\"\n\t\"github.com\/goby-lang\/goby\/bytecode\"\n\t\"github.com\/goby-lang\/goby\/parser\"\n\t\"io\/ioutil\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nvar stackTrace int\n\ntype isIndexTable struct {\n\tData map[string]int\n}\n\nfunc newISIndexTable() *isIndexTable {\n\treturn &isIndexTable{Data: make(map[string]int)}\n}\n\ntype isTable map[string][]*instructionSet\n\ntype filename string\n\ntype errorMessage string\n\nvar standardLibraries = map[string]func(*VM){\n\t\"file\": initializeFileClass,\n\t\"net\/http\": initializeHTTPClass,\n\t\"net\/simple_server\": initializeSimpleServerClass,\n\t\"uri\": initializeURIClass,\n}\n\n\/\/ VM represents a stack based virtual machine.\ntype VM struct {\n\tmainThread *thread\n\t\/\/ a map holds pointers of constants\n\tconstants map[string]*Pointer\n\t\/\/ a map holds different types of label tables\n\tisTables map[labelType]isTable\n\t\/\/ method instruction set table\n\tmethodISIndexTables map[filename]*isIndexTable\n\t\/\/ class instruction set table\n\tclassISIndexTables map[filename]*isIndexTable\n\t\/\/ block instruction set table\n\tblockTables map[filename]map[string]*instructionSet\n\t\/\/ fileDir indicates executed file's directory\n\tfileDir string\n\t\/\/ args are command line arguments\n\targs []string\n\t\/\/ projectRoot is goby root's absolute path, which is $GOROOT\/src\/github.com\/goby-lang\/goby\n\tprojectRoot string\n\n\tthreadCount int\n}\n\n\/\/ New initializes a vm to initialize state and returns it.\nfunc New(fileDir string, args []string) *VM {\n\tvm := &VM{args: args}\n\tvm.mainThread = vm.newThread()\n\n\tvm.initConstants()\n\tvm.methodISIndexTables = map[filename]*isIndexTable{\n\t\tfilename(fileDir): newISIndexTable(),\n\t}\n\tvm.classISIndexTables = map[filename]*isIndexTable{\n\t\tfilename(fileDir): newISIndexTable(),\n\t}\n\tvm.blockTables = make(map[filename]map[string]*instructionSet)\n\tvm.isTables = map[labelType]isTable{\n\t\tbytecode.LabelDef: make(isTable),\n\t\tbytecode.LabelDefClass: make(isTable),\n\t}\n\tvm.fileDir = fileDir\n\n\tp, _ := filepath.Abs(\"..\/\")\n\n\tif !strings.HasSuffix(p, \"goby\") {\n\t\tvm.projectRoot = path.Join(p, \"goby\")\n\t} else {\n\t\tvm.projectRoot = p\n\t}\n\n\treturn vm\n}\n\nfunc (vm *VM) newThread() *thread {\n\ts := &stack{}\n\tcfs := &callFrameStack{callFrames: []*callFrame{}}\n\tt := &thread{stack: s, callFrameStack: cfs, sp: 0, cfp: 0}\n\ts.thread = t\n\tcfs.thread = t\n\tt.vm = vm\n\tvm.threadCount++\n\treturn t\n}\n\n\/\/ ExecBytecodes accepts a sequence of bytecodes and use vm to evaluate them.\nfunc (vm *VM) ExecBytecodes(bytecodes, fn string) {\n\tfilename := filename(fn)\n\tp := newBytecodeParser(filename)\n\tp.vm = vm\n\tp.parseBytecode(bytecodes)\n\n\t\/\/ Keep update label table after parsed new files.\n\t\/\/ TODO: Find more efficient way to do this.\n\tfor labelType, table := range p.labelTable {\n\t\tfor labelName, is := range table {\n\t\t\tvm.isTables[labelType][labelName] = is\n\t\t}\n\t}\n\n\tvm.blockTables[p.filename] = p.blockTable\n\tvm.classISIndexTables[filename] = newISIndexTable()\n\tvm.methodISIndexTables[filename] = newISIndexTable()\n\n\tdefer func() {\n\t\tif p := recover(); p != nil {\n\t\t\tswitch p.(type) {\n\t\t\tcase errorMessage:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t}\n\t}()\n\n\tcf := newCallFrame(p.program)\n\tcf.self = mainObj\n\tvm.mainThread.callFrameStack.push(cf)\n\tvm.startFromTopFrame()\n}\n\n\/\/ GetExecResult returns stack's top most value. Normally it's used in tests.\nfunc (vm *VM) GetExecResult() Object {\n\treturn vm.mainThread.stack.top().Target\n}\n\nfunc (vm *VM) initConstants() {\n\tvm.constants = make(map[string]*Pointer)\n\tconstants := make(map[string]*Pointer)\n\n\tbuiltInClasses := []Class{\n\t\tintegerClass,\n\t\tstringClass,\n\t\tbooleanClass,\n\t\tnullClass,\n\t\tarrayClass,\n\t\thashClass,\n\t\tclassClass,\n\t\tmethodClass,\n\t}\n\n\targs := []Object{}\n\n\tfor _, arg := range vm.args {\n\t\targs = append(args, initializeString(arg))\n\t}\n\n\tfor _, c := range builtInClasses {\n\t\tp := &Pointer{Target: c}\n\t\tconstants[c.ReturnName()] = p\n\t}\n\n\tconstants[\"ARGV\"] = &Pointer{Target: initializeArray(args)}\n\tobjectClass.constants = constants\n\tvm.constants[\"Object\"] = &Pointer{objectClass}\n}\n\n\/\/ Start evaluation from top most call frame\nfunc (vm *VM) startFromTopFrame() {\n\tvm.mainThread.startFromTopFrame()\n}\n\nfunc (vm *VM) currentFilePath() string {\n\treturn string(vm.mainThread.callFrameStack.top().instructionSet.filename)\n}\n\nfunc (vm *VM) printDebugInfo(i *instruction) {\n\tfmt.Println(i.inspect())\n}\n\nfunc (vm *VM) getBlock(name string, filename filename) *instructionSet {\n\t\/\/ The \"name\" here is actually an index from label\n\t\/\/ for example <Block:1>'s name is \"1\"\n\tis, ok := vm.blockTables[filename][name]\n\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"Can't find block %s\", name))\n\t}\n\n\treturn is\n}\n\nfunc (vm *VM) getMethodIS(name string, filename filename) (*instructionSet, bool) {\n\tiss, ok := vm.isTables[bytecode.LabelDef][name]\n\n\tif !ok {\n\t\treturn nil, false\n\t}\n\n\tis := iss[vm.methodISIndexTables[filename].Data[name]]\n\n\tvm.methodISIndexTables[filename].Data[name]++\n\treturn is, ok\n}\n\nfunc (vm *VM) getClassIS(name string, filename filename) *instructionSet {\n\tiss, ok := vm.isTables[bytecode.LabelDefClass][name]\n\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"Can't find class %s's instructions\", name))\n\t}\n\n\tis := iss[vm.classISIndexTables[filename].Data[name]]\n\n\tvm.classISIndexTables[filename].Data[name]++\n\treturn is\n}\n\nfunc (vm *VM) loadConstant(name string, isModule bool) *RClass {\n\tvar c *RClass\n\tvar ptr *Pointer\n\n\tptr = objectClass.constants[name]\n\n\tif ptr == nil {\n\t\tc = initializeClass(name, isModule)\n\t\tobjectClass.constants[name] = &Pointer{Target: c}\n\t} else {\n\t\tc = ptr.Target.(*RClass)\n\t}\n\n\treturn c\n}\n\nfunc (vm *VM) lookupConstant(cf *callFrame, constName string) *Pointer {\n\tvar constant *Pointer\n\tvar namespace Class\n\tvar hasNamespace bool\n\n\ttop := vm.mainThread.stack.top()\n\n\tif top == nil {\n\t\thasNamespace = false\n\t} else {\n\t\tnamespace, hasNamespace = top.Target.(Class)\n\t}\n\n\tif hasNamespace {\n\t\tif namespace != cf.self {\n\t\t\tvm.mainThread.stack.pop()\n\t\t}\n\n\t\tconstant = namespace.lookupConstant(constName, true)\n\n\t\tif constant != nil {\n\t\t\treturn constant\n\t\t}\n\t}\n\n\tswitch s := cf.self.(type) {\n\tcase Class:\n\t\tconstant = s.lookupConstant(constName, true)\n\t\tif constant != nil {\n\t\t\treturn constant\n\t\t}\n\tdefault:\n\t\tc := s.returnClass()\n\n\t\tconstant = c.lookupConstant(constName, true)\n\t\tif constant != nil {\n\t\t\treturn constant\n\t\t}\n\t}\n\n\tconstant = vm.constants[constName]\n\treturn constant\n}\n\nfunc (vm *VM) execGobyLib(libName string) {\n\tlibPath := path.Join(vm.projectRoot, \"lib\", libName)\n\tfile, err := ioutil.ReadFile(libPath)\n\n\tif err != nil {\n\t\tvm.mainThread.returnError(err.Error())\n\t}\n\n\tvm.execRequiredFile(libPath, file)\n}\n\nfunc (vm *VM) execRequiredFile(filepath string, file []byte) {\n\tprogram := parser.BuildAST(file)\n\tg := bytecode.NewGenerator(program)\n\tbytecodes := g.GenerateByteCode(program)\n\n\toldMethodTable := isTable{}\n\toldClassTable := isTable{}\n\n\t\/\/ Copy current file's instruction sets.\n\tfor name, is := range vm.isTables[bytecode.LabelDef] {\n\t\toldMethodTable[name] = is\n\t}\n\n\tfor name, is := range vm.isTables[bytecode.LabelDefClass] {\n\t\toldClassTable[name] = is\n\t}\n\n\t\/\/ This creates new execution environments for required file, including new instruction set table.\n\t\/\/ So we need to copy old instruction sets and restore them later, otherwise current program's instruction set would be overwrite.\n\tvm.ExecBytecodes(bytecodes, filepath)\n\n\t\/\/ Restore instruction sets.\n\tvm.isTables[bytecode.LabelDef] = oldMethodTable\n\tvm.isTables[bytecode.LabelDefClass] = oldClassTable\n}\n\nfunc newError(format string, args ...interface{}) *Error {\n\treturn &Error{Message: fmt.Sprintf(format, args...)}\n}\n<commit_msg>Remove thread count since it's for debugging.<commit_after>package vm\n\nimport (\n\t\"fmt\"\n\t\"github.com\/goby-lang\/goby\/bytecode\"\n\t\"github.com\/goby-lang\/goby\/parser\"\n\t\"io\/ioutil\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nvar stackTrace int\n\ntype isIndexTable struct {\n\tData map[string]int\n}\n\nfunc newISIndexTable() *isIndexTable {\n\treturn &isIndexTable{Data: make(map[string]int)}\n}\n\ntype isTable map[string][]*instructionSet\n\ntype filename string\n\ntype errorMessage string\n\nvar standardLibraries = map[string]func(*VM){\n\t\"file\": initializeFileClass,\n\t\"net\/http\": initializeHTTPClass,\n\t\"net\/simple_server\": initializeSimpleServerClass,\n\t\"uri\": initializeURIClass,\n}\n\n\/\/ VM represents a stack based virtual machine.\ntype VM struct {\n\tmainThread *thread\n\t\/\/ a map holds pointers of constants\n\tconstants map[string]*Pointer\n\t\/\/ a map holds different types of label tables\n\tisTables map[labelType]isTable\n\t\/\/ method instruction set table\n\tmethodISIndexTables map[filename]*isIndexTable\n\t\/\/ class instruction set table\n\tclassISIndexTables map[filename]*isIndexTable\n\t\/\/ block instruction set table\n\tblockTables map[filename]map[string]*instructionSet\n\t\/\/ fileDir indicates executed file's directory\n\tfileDir string\n\t\/\/ args are command line arguments\n\targs []string\n\t\/\/ projectRoot is goby root's absolute path, which is $GOROOT\/src\/github.com\/goby-lang\/goby\n\tprojectRoot string\n}\n\n\/\/ New initializes a vm to initialize state and returns it.\nfunc New(fileDir string, args []string) *VM {\n\tvm := &VM{args: args}\n\tvm.mainThread = vm.newThread()\n\n\tvm.initConstants()\n\tvm.methodISIndexTables = map[filename]*isIndexTable{\n\t\tfilename(fileDir): newISIndexTable(),\n\t}\n\tvm.classISIndexTables = map[filename]*isIndexTable{\n\t\tfilename(fileDir): newISIndexTable(),\n\t}\n\tvm.blockTables = make(map[filename]map[string]*instructionSet)\n\tvm.isTables = map[labelType]isTable{\n\t\tbytecode.LabelDef: make(isTable),\n\t\tbytecode.LabelDefClass: make(isTable),\n\t}\n\tvm.fileDir = fileDir\n\n\tp, _ := filepath.Abs(\"..\/\")\n\n\tif !strings.HasSuffix(p, \"goby\") {\n\t\tvm.projectRoot = path.Join(p, \"goby\")\n\t} else {\n\t\tvm.projectRoot = p\n\t}\n\n\treturn vm\n}\n\nfunc (vm *VM) newThread() *thread {\n\ts := &stack{}\n\tcfs := &callFrameStack{callFrames: []*callFrame{}}\n\tt := &thread{stack: s, callFrameStack: cfs, sp: 0, cfp: 0}\n\ts.thread = t\n\tcfs.thread = t\n\tt.vm = vm\n\treturn t\n}\n\n\/\/ ExecBytecodes accepts a sequence of bytecodes and use vm to evaluate them.\nfunc (vm *VM) ExecBytecodes(bytecodes, fn string) {\n\tfilename := filename(fn)\n\tp := newBytecodeParser(filename)\n\tp.vm = vm\n\tp.parseBytecode(bytecodes)\n\n\t\/\/ Keep update label table after parsed new files.\n\t\/\/ TODO: Find more efficient way to do this.\n\tfor labelType, table := range p.labelTable {\n\t\tfor labelName, is := range table {\n\t\t\tvm.isTables[labelType][labelName] = is\n\t\t}\n\t}\n\n\tvm.blockTables[p.filename] = p.blockTable\n\tvm.classISIndexTables[filename] = newISIndexTable()\n\tvm.methodISIndexTables[filename] = newISIndexTable()\n\n\tdefer func() {\n\t\tif p := recover(); p != nil {\n\t\t\tswitch p.(type) {\n\t\t\tcase errorMessage:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t}\n\t}()\n\n\tcf := newCallFrame(p.program)\n\tcf.self = mainObj\n\tvm.mainThread.callFrameStack.push(cf)\n\tvm.startFromTopFrame()\n}\n\n\/\/ GetExecResult returns stack's top most value. Normally it's used in tests.\nfunc (vm *VM) GetExecResult() Object {\n\treturn vm.mainThread.stack.top().Target\n}\n\nfunc (vm *VM) initConstants() {\n\tvm.constants = make(map[string]*Pointer)\n\tconstants := make(map[string]*Pointer)\n\n\tbuiltInClasses := []Class{\n\t\tintegerClass,\n\t\tstringClass,\n\t\tbooleanClass,\n\t\tnullClass,\n\t\tarrayClass,\n\t\thashClass,\n\t\tclassClass,\n\t\tmethodClass,\n\t}\n\n\targs := []Object{}\n\n\tfor _, arg := range vm.args {\n\t\targs = append(args, initializeString(arg))\n\t}\n\n\tfor _, c := range builtInClasses {\n\t\tp := &Pointer{Target: c}\n\t\tconstants[c.ReturnName()] = p\n\t}\n\n\tconstants[\"ARGV\"] = &Pointer{Target: initializeArray(args)}\n\tobjectClass.constants = constants\n\tvm.constants[\"Object\"] = &Pointer{objectClass}\n}\n\n\/\/ Start evaluation from top most call frame\nfunc (vm *VM) startFromTopFrame() {\n\tvm.mainThread.startFromTopFrame()\n}\n\nfunc (vm *VM) currentFilePath() string {\n\treturn string(vm.mainThread.callFrameStack.top().instructionSet.filename)\n}\n\nfunc (vm *VM) printDebugInfo(i *instruction) {\n\tfmt.Println(i.inspect())\n}\n\nfunc (vm *VM) getBlock(name string, filename filename) *instructionSet {\n\t\/\/ The \"name\" here is actually an index from label\n\t\/\/ for example <Block:1>'s name is \"1\"\n\tis, ok := vm.blockTables[filename][name]\n\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"Can't find block %s\", name))\n\t}\n\n\treturn is\n}\n\nfunc (vm *VM) getMethodIS(name string, filename filename) (*instructionSet, bool) {\n\tiss, ok := vm.isTables[bytecode.LabelDef][name]\n\n\tif !ok {\n\t\treturn nil, false\n\t}\n\n\tis := iss[vm.methodISIndexTables[filename].Data[name]]\n\n\tvm.methodISIndexTables[filename].Data[name]++\n\treturn is, ok\n}\n\nfunc (vm *VM) getClassIS(name string, filename filename) *instructionSet {\n\tiss, ok := vm.isTables[bytecode.LabelDefClass][name]\n\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"Can't find class %s's instructions\", name))\n\t}\n\n\tis := iss[vm.classISIndexTables[filename].Data[name]]\n\n\tvm.classISIndexTables[filename].Data[name]++\n\treturn is\n}\n\nfunc (vm *VM) loadConstant(name string, isModule bool) *RClass {\n\tvar c *RClass\n\tvar ptr *Pointer\n\n\tptr = objectClass.constants[name]\n\n\tif ptr == nil {\n\t\tc = initializeClass(name, isModule)\n\t\tobjectClass.constants[name] = &Pointer{Target: c}\n\t} else {\n\t\tc = ptr.Target.(*RClass)\n\t}\n\n\treturn c\n}\n\nfunc (vm *VM) lookupConstant(cf *callFrame, constName string) *Pointer {\n\tvar constant *Pointer\n\tvar namespace Class\n\tvar hasNamespace bool\n\n\ttop := vm.mainThread.stack.top()\n\n\tif top == nil {\n\t\thasNamespace = false\n\t} else {\n\t\tnamespace, hasNamespace = top.Target.(Class)\n\t}\n\n\tif hasNamespace {\n\t\tif namespace != cf.self {\n\t\t\tvm.mainThread.stack.pop()\n\t\t}\n\n\t\tconstant = namespace.lookupConstant(constName, true)\n\n\t\tif constant != nil {\n\t\t\treturn constant\n\t\t}\n\t}\n\n\tswitch s := cf.self.(type) {\n\tcase Class:\n\t\tconstant = s.lookupConstant(constName, true)\n\t\tif constant != nil {\n\t\t\treturn constant\n\t\t}\n\tdefault:\n\t\tc := s.returnClass()\n\n\t\tconstant = c.lookupConstant(constName, true)\n\t\tif constant != nil {\n\t\t\treturn constant\n\t\t}\n\t}\n\n\tconstant = vm.constants[constName]\n\treturn constant\n}\n\nfunc (vm *VM) execGobyLib(libName string) {\n\tlibPath := path.Join(vm.projectRoot, \"lib\", libName)\n\tfile, err := ioutil.ReadFile(libPath)\n\n\tif err != nil {\n\t\tvm.mainThread.returnError(err.Error())\n\t}\n\n\tvm.execRequiredFile(libPath, file)\n}\n\nfunc (vm *VM) execRequiredFile(filepath string, file []byte) {\n\tprogram := parser.BuildAST(file)\n\tg := bytecode.NewGenerator(program)\n\tbytecodes := g.GenerateByteCode(program)\n\n\toldMethodTable := isTable{}\n\toldClassTable := isTable{}\n\n\t\/\/ Copy current file's instruction sets.\n\tfor name, is := range vm.isTables[bytecode.LabelDef] {\n\t\toldMethodTable[name] = is\n\t}\n\n\tfor name, is := range vm.isTables[bytecode.LabelDefClass] {\n\t\toldClassTable[name] = is\n\t}\n\n\t\/\/ This creates new execution environments for required file, including new instruction set table.\n\t\/\/ So we need to copy old instruction sets and restore them later, otherwise current program's instruction set would be overwrite.\n\tvm.ExecBytecodes(bytecodes, filepath)\n\n\t\/\/ Restore instruction sets.\n\tvm.isTables[bytecode.LabelDef] = oldMethodTable\n\tvm.isTables[bytecode.LabelDefClass] = oldClassTable\n}\n\nfunc newError(format string, args ...interface{}) *Error {\n\treturn &Error{Message: fmt.Sprintf(format, args...)}\n}\n<|endoftext|>"} {"text":"<commit_before>package vm\n\nimport (\n\t\"fmt\"\n\t\"github.com\/goby-lang\/goby\/compiler\/bytecode\"\n\t\"github.com\/goby-lang\/goby\/vm\/classes\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ Version stores current Goby version\nconst Version = \"0.1.8.1\"\n\n\/\/ These are the enums for marking parser's mode, which decides whether it should pop unused values.\nconst (\n\tNormalMode int = iota\n\tREPLMode\n\tTestMode\n)\n\ntype isIndexTable struct {\n\tData map[string]int\n}\n\nfunc newISIndexTable() *isIndexTable {\n\treturn &isIndexTable{Data: make(map[string]int)}\n}\n\ntype isTable map[string][]*instructionSet\n\ntype filename = string\n\nvar standardLibraries = map[string]func(*VM){\n\t\"net\/http\": initHTTPClass,\n\t\"net\/simple_server\": initSimpleServerClass,\n\t\"uri\": initURIClass,\n\t\"db\": initDBClass,\n\t\"plugin\": initPluginClass,\n\t\"json\": initJSONClass,\n\t\"concurrent\/array\": initConcurrentArrayClass,\n\t\"concurrent\/hash\": initConcurrentHashClass,\n\t\"concurrent\/rw_lock\": initConcurrentRWLockClass,\n\t\"spec\": initSpecClass,\n}\n\n\/\/ VM represents a stack based virtual machine.\ntype VM struct {\n\tmainObj *RObject\n\tmainThread *thread\n\tobjectClass *RClass\n\t\/\/ a map holds different types of instruction set tables\n\tisTables map[setType]isTable\n\t\/\/ method instruction set table\n\tmethodISIndexTables map[filename]*isIndexTable\n\t\/\/ class instruction set table\n\tclassISIndexTables map[filename]*isIndexTable\n\t\/\/ block instruction set table\n\tblockTables map[filename]map[string]*instructionSet\n\t\/\/ fileDir indicates executed file's directory\n\tfileDir string\n\t\/\/ args are command line arguments\n\targs []string\n\t\/\/ projectRoot is goby root's absolute path, which is $GOROOT\/src\/github.com\/goby-lang\/goby\n\tprojectRoot string\n\n\tchannelObjectMap *objectMap\n\n\tmode int\n\n\tlibFiles []string\n}\n\n\/\/ New initializes a vm to initialize state and returns it.\nfunc New(fileDir string, args []string) (vm *VM, e error) {\n\tvm = &VM{args: args}\n\tvm.mainThread = vm.newThread()\n\n\tvm.methodISIndexTables = map[filename]*isIndexTable{\n\t\tfileDir: newISIndexTable(),\n\t}\n\tvm.classISIndexTables = map[filename]*isIndexTable{\n\t\tfileDir: newISIndexTable(),\n\t}\n\tvm.blockTables = make(map[filename]map[string]*instructionSet)\n\tvm.isTables = map[setType]isTable{\n\t\tbytecode.MethodDef: make(isTable),\n\t\tbytecode.ClassDef: make(isTable),\n\t}\n\tvm.fileDir = fileDir\n\n\tgobyRoot := os.Getenv(\"GOBY_ROOT\")\n\n\tif len(gobyRoot) == 0 {\n\t\tvm.projectRoot = fmt.Sprintf(\"\/usr\/local\/Cellar\/goby\/%s\", Version)\n\n\t\t_, err := os.Stat(vm.projectRoot)\n\n\t\tif err != nil {\n\t\t\tpath, _ := filepath.Abs(\"$GOPATH\/src\/github.com\/goby-lang\/goby\")\n\t\t\t_, err = os.Stat(path)\n\n\t\t\tif err != nil {\n\t\t\t\te = fmt.Errorf(\"You haven't set $GOBY_ROOT properly\")\n\t\t\t\treturn nil, e\n\t\t\t}\n\n\t\t\tvm.projectRoot = path\n\t\t}\n\t} else {\n\t\tvm.projectRoot = gobyRoot\n\t}\n\n\tvm.initConstants()\n\tvm.mainObj = vm.initMainObj()\n\tvm.channelObjectMap = &objectMap{store: &sync.Map{}}\n\n\tfor _, fn := range vm.libFiles {\n\t\terr := vm.mainThread.execGobyLib(fn)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"An error occurs when loading lib file %s:\\n\", string(fn))\n\t\t\tfmt.Println(err.Error())\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (vm *VM) newThread() *thread {\n\ts := &stack{RWMutex: new(sync.RWMutex)}\n\tcfs := &callFrameStack{callFrames: []callFrame{}}\n\tt := &thread{stack: s, callFrameStack: cfs, sp: 0, cfp: 0}\n\ts.thread = t\n\tcfs.thread = t\n\tt.vm = vm\n\treturn t\n}\n\n\/\/ ExecInstructions accepts a sequence of bytecodes and use vm to evaluate them.\nfunc (vm *VM) ExecInstructions(sets []*bytecode.InstructionSet, fn string) {\n\ttranslator := newInstructionTranslator(fn)\n\ttranslator.vm = vm\n\ttranslator.transferInstructionSets(sets)\n\n\t\/\/ Keep instruction set table updated after parsed new files.\n\t\/\/ TODO: Find more efficient way to do this.\n\tfor setType, table := range translator.setTable {\n\t\tfor name, is := range table {\n\t\t\tvm.isTables[setType][name] = is\n\t\t}\n\t}\n\n\tvm.blockTables[translator.filename] = translator.blockTable\n\tvm.SetClassISIndexTable(translator.filename)\n\tvm.SetMethodISIndexTable(translator.filename)\n\n\tcf := newNormalCallFrame(translator.program, translator.filename, 1)\n\tcf.self = vm.mainObj\n\tvm.mainThread.callFrameStack.push(cf)\n\n\tdefer func() {\n\t\terr, ok := recover().(*Error)\n\n\t\tif ok && vm.mode == NormalMode {\n\t\t\tfmt.Println(err.Message())\n\t\t}\n\t}()\n\n\tvm.mainThread.startFromTopFrame()\n}\n\n\/\/ SetClassISIndexTable adds new instruction set's index table to vm.classISIndexTables\nfunc (vm *VM) SetClassISIndexTable(fn filename) {\n\tvm.classISIndexTables[fn] = newISIndexTable()\n}\n\n\/\/ SetMethodISIndexTable adds new instruction set's index table to vm.methodISIndexTables\nfunc (vm *VM) SetMethodISIndexTable(fn filename) {\n\tvm.methodISIndexTables[fn] = newISIndexTable()\n}\n\n\/\/ main object singleton methods -----------------------------------------------------\nfunc builtinMainObjSingletonMethods() []*BuiltinMethodObject {\n\treturn []*BuiltinMethodObject{\n\t\t{\n\t\t\tName: \"to_s\",\n\t\t\tFn: func(receiver Object, sourceLine int) builtinMethodBody {\n\t\t\t\treturn func(thread *thread, objects []Object, frame *normalCallFrame) Object {\n\t\t\t\t\treturn thread.vm.initStringObject(\"main\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc (vm *VM) initMainObj() *RObject {\n\tobj := vm.objectClass.initializeInstance()\n\tsingletonClass := vm.initializeClass(fmt.Sprintf(\"#<Class:%s>\", obj.toString()), false)\n\tsingletonClass.Methods.set(\"include\", vm.topLevelClass(classes.ClassClass).lookupMethod(\"include\"))\n\tsingletonClass.setBuiltinMethods(builtinMainObjSingletonMethods(), false)\n\tobj.singletonClass = singletonClass\n\n\treturn obj\n}\n\nfunc (vm *VM) initConstants() {\n\t\/\/ Init Class and Object\n\tcClass := initClassClass()\n\tvm.objectClass = initObjectClass(cClass)\n\tvm.topLevelClass(classes.ObjectClass).setClassConstant(cClass)\n\n\t\/\/ Init builtin classes\n\tbuiltinClasses := []*RClass{\n\t\tvm.initIntegerClass(),\n\t\tvm.initFloatClass(),\n\t\tvm.initStringClass(),\n\t\tvm.initBoolClass(),\n\t\tvm.initNullClass(),\n\t\tvm.initArrayClass(),\n\t\tvm.initHashClass(),\n\t\tvm.initRangeClass(),\n\t\tvm.initMethodClass(),\n\t\tvm.initBlockClass(),\n\t\tvm.initChannelClass(),\n\t\tvm.initGoClass(),\n\t\tvm.initFileClass(),\n\t\tvm.initRegexpClass(),\n\t\tvm.initMatchDataClass(),\n\t\tvm.initGoMapClass(),\n\t\tvm.initDecimalClass(),\n\t}\n\n\t\/\/ Init error classes\n\tvm.initErrorClasses()\n\n\tfor _, c := range builtinClasses {\n\t\tvm.objectClass.setClassConstant(c)\n\t}\n\n\t\/\/ Init ARGV\n\targs := []Object{}\n\n\tfor _, arg := range vm.args {\n\t\targs = append(args, vm.initStringObject(arg))\n\t}\n\n\tvm.objectClass.constants[\"ARGV\"] = &Pointer{Target: vm.initArrayObject(args)}\n\n\t\/\/ Init ENV\n\tenvs := map[string]Object{}\n\n\tfor _, e := range os.Environ() {\n\t\tpair := strings.Split(e, \"=\")\n\t\tenvs[pair[0]] = vm.initStringObject(pair[1])\n\t}\n\n\tvm.objectClass.constants[\"ENV\"] = &Pointer{Target: vm.initHashObject(envs)}\n\tvm.objectClass.constants[\"STDOUT\"] = &Pointer{Target: vm.initFileObject(os.Stdout)}\n\tvm.objectClass.constants[\"STDERR\"] = &Pointer{Target: vm.initFileObject(os.Stderr)}\n\tvm.objectClass.constants[\"STDIN\"] = &Pointer{Target: vm.initFileObject(os.Stdin)}\n}\n\nfunc (vm *VM) topLevelClass(cn string) *RClass {\n\tobjClass := vm.objectClass\n\n\tif cn == classes.ObjectClass {\n\t\treturn objClass\n\t}\n\n\treturn objClass.constants[cn].Target.(*RClass)\n}\n\nfunc (vm *VM) currentFilePath() string {\n\tframe := vm.mainThread.callFrameStack.top()\n\treturn frame.FileName()\n}\n\n\/\/ loadConstant makes sure we don't create a class twice.\nfunc (vm *VM) loadConstant(name string, isModule bool) *RClass {\n\tvar c *RClass\n\tvar ptr *Pointer\n\n\tptr = vm.objectClass.constants[name]\n\n\tif ptr == nil {\n\t\tc = vm.initializeClass(name, isModule)\n\t\tvm.objectClass.setClassConstant(c)\n\t} else {\n\t\tc = ptr.Target.(*RClass)\n\t}\n\n\treturn c\n}\n\nfunc (vm *VM) lookupConstant(cf callFrame, constName string) (constant *Pointer) {\n\tvar namespace *RClass\n\tvar hasNamespace bool\n\n\ttop := vm.mainThread.stack.top()\n\n\tif top == nil {\n\t\thasNamespace = false\n\t} else {\n\t\tnamespace, hasNamespace = top.Target.(*RClass)\n\t}\n\n\tif hasNamespace {\n\t\tconstant = namespace.lookupConstantUnderAllScope(constName)\n\n\t\tif constant != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tconstant = cf.lookupConstantUnderAllScope(constName)\n\n\tif constant == nil {\n\t\tconstant = vm.objectClass.constants[constName]\n\t}\n\n\tif constName == classes.ObjectClass {\n\t\tconstant = &Pointer{Target: vm.objectClass}\n\t}\n\n\treturn\n}\n<commit_msg>Bump version to 0.1.9<commit_after>package vm\n\nimport (\n\t\"fmt\"\n\t\"github.com\/goby-lang\/goby\/compiler\/bytecode\"\n\t\"github.com\/goby-lang\/goby\/vm\/classes\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ Version stores current Goby version\nconst Version = \"0.1.9\"\n\n\/\/ These are the enums for marking parser's mode, which decides whether it should pop unused values.\nconst (\n\tNormalMode int = iota\n\tREPLMode\n\tTestMode\n)\n\ntype isIndexTable struct {\n\tData map[string]int\n}\n\nfunc newISIndexTable() *isIndexTable {\n\treturn &isIndexTable{Data: make(map[string]int)}\n}\n\ntype isTable map[string][]*instructionSet\n\ntype filename = string\n\nvar standardLibraries = map[string]func(*VM){\n\t\"net\/http\": initHTTPClass,\n\t\"net\/simple_server\": initSimpleServerClass,\n\t\"uri\": initURIClass,\n\t\"db\": initDBClass,\n\t\"plugin\": initPluginClass,\n\t\"json\": initJSONClass,\n\t\"concurrent\/array\": initConcurrentArrayClass,\n\t\"concurrent\/hash\": initConcurrentHashClass,\n\t\"concurrent\/rw_lock\": initConcurrentRWLockClass,\n\t\"spec\": initSpecClass,\n}\n\n\/\/ VM represents a stack based virtual machine.\ntype VM struct {\n\tmainObj *RObject\n\tmainThread *thread\n\tobjectClass *RClass\n\t\/\/ a map holds different types of instruction set tables\n\tisTables map[setType]isTable\n\t\/\/ method instruction set table\n\tmethodISIndexTables map[filename]*isIndexTable\n\t\/\/ class instruction set table\n\tclassISIndexTables map[filename]*isIndexTable\n\t\/\/ block instruction set table\n\tblockTables map[filename]map[string]*instructionSet\n\t\/\/ fileDir indicates executed file's directory\n\tfileDir string\n\t\/\/ args are command line arguments\n\targs []string\n\t\/\/ projectRoot is goby root's absolute path, which is $GOROOT\/src\/github.com\/goby-lang\/goby\n\tprojectRoot string\n\n\tchannelObjectMap *objectMap\n\n\tmode int\n\n\tlibFiles []string\n}\n\n\/\/ New initializes a vm to initialize state and returns it.\nfunc New(fileDir string, args []string) (vm *VM, e error) {\n\tvm = &VM{args: args}\n\tvm.mainThread = vm.newThread()\n\n\tvm.methodISIndexTables = map[filename]*isIndexTable{\n\t\tfileDir: newISIndexTable(),\n\t}\n\tvm.classISIndexTables = map[filename]*isIndexTable{\n\t\tfileDir: newISIndexTable(),\n\t}\n\tvm.blockTables = make(map[filename]map[string]*instructionSet)\n\tvm.isTables = map[setType]isTable{\n\t\tbytecode.MethodDef: make(isTable),\n\t\tbytecode.ClassDef: make(isTable),\n\t}\n\tvm.fileDir = fileDir\n\n\tgobyRoot := os.Getenv(\"GOBY_ROOT\")\n\n\tif len(gobyRoot) == 0 {\n\t\tvm.projectRoot = fmt.Sprintf(\"\/usr\/local\/Cellar\/goby\/%s\", Version)\n\n\t\t_, err := os.Stat(vm.projectRoot)\n\n\t\tif err != nil {\n\t\t\tpath, _ := filepath.Abs(\"$GOPATH\/src\/github.com\/goby-lang\/goby\")\n\t\t\t_, err = os.Stat(path)\n\n\t\t\tif err != nil {\n\t\t\t\te = fmt.Errorf(\"You haven't set $GOBY_ROOT properly\")\n\t\t\t\treturn nil, e\n\t\t\t}\n\n\t\t\tvm.projectRoot = path\n\t\t}\n\t} else {\n\t\tvm.projectRoot = gobyRoot\n\t}\n\n\tvm.initConstants()\n\tvm.mainObj = vm.initMainObj()\n\tvm.channelObjectMap = &objectMap{store: &sync.Map{}}\n\n\tfor _, fn := range vm.libFiles {\n\t\terr := vm.mainThread.execGobyLib(fn)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"An error occurs when loading lib file %s:\\n\", string(fn))\n\t\t\tfmt.Println(err.Error())\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (vm *VM) newThread() *thread {\n\ts := &stack{RWMutex: new(sync.RWMutex)}\n\tcfs := &callFrameStack{callFrames: []callFrame{}}\n\tt := &thread{stack: s, callFrameStack: cfs, sp: 0, cfp: 0}\n\ts.thread = t\n\tcfs.thread = t\n\tt.vm = vm\n\treturn t\n}\n\n\/\/ ExecInstructions accepts a sequence of bytecodes and use vm to evaluate them.\nfunc (vm *VM) ExecInstructions(sets []*bytecode.InstructionSet, fn string) {\n\ttranslator := newInstructionTranslator(fn)\n\ttranslator.vm = vm\n\ttranslator.transferInstructionSets(sets)\n\n\t\/\/ Keep instruction set table updated after parsed new files.\n\t\/\/ TODO: Find more efficient way to do this.\n\tfor setType, table := range translator.setTable {\n\t\tfor name, is := range table {\n\t\t\tvm.isTables[setType][name] = is\n\t\t}\n\t}\n\n\tvm.blockTables[translator.filename] = translator.blockTable\n\tvm.SetClassISIndexTable(translator.filename)\n\tvm.SetMethodISIndexTable(translator.filename)\n\n\tcf := newNormalCallFrame(translator.program, translator.filename, 1)\n\tcf.self = vm.mainObj\n\tvm.mainThread.callFrameStack.push(cf)\n\n\tdefer func() {\n\t\terr, ok := recover().(*Error)\n\n\t\tif ok && vm.mode == NormalMode {\n\t\t\tfmt.Println(err.Message())\n\t\t}\n\t}()\n\n\tvm.mainThread.startFromTopFrame()\n}\n\n\/\/ SetClassISIndexTable adds new instruction set's index table to vm.classISIndexTables\nfunc (vm *VM) SetClassISIndexTable(fn filename) {\n\tvm.classISIndexTables[fn] = newISIndexTable()\n}\n\n\/\/ SetMethodISIndexTable adds new instruction set's index table to vm.methodISIndexTables\nfunc (vm *VM) SetMethodISIndexTable(fn filename) {\n\tvm.methodISIndexTables[fn] = newISIndexTable()\n}\n\n\/\/ main object singleton methods -----------------------------------------------------\nfunc builtinMainObjSingletonMethods() []*BuiltinMethodObject {\n\treturn []*BuiltinMethodObject{\n\t\t{\n\t\t\tName: \"to_s\",\n\t\t\tFn: func(receiver Object, sourceLine int) builtinMethodBody {\n\t\t\t\treturn func(thread *thread, objects []Object, frame *normalCallFrame) Object {\n\t\t\t\t\treturn thread.vm.initStringObject(\"main\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc (vm *VM) initMainObj() *RObject {\n\tobj := vm.objectClass.initializeInstance()\n\tsingletonClass := vm.initializeClass(fmt.Sprintf(\"#<Class:%s>\", obj.toString()), false)\n\tsingletonClass.Methods.set(\"include\", vm.topLevelClass(classes.ClassClass).lookupMethod(\"include\"))\n\tsingletonClass.setBuiltinMethods(builtinMainObjSingletonMethods(), false)\n\tobj.singletonClass = singletonClass\n\n\treturn obj\n}\n\nfunc (vm *VM) initConstants() {\n\t\/\/ Init Class and Object\n\tcClass := initClassClass()\n\tvm.objectClass = initObjectClass(cClass)\n\tvm.topLevelClass(classes.ObjectClass).setClassConstant(cClass)\n\n\t\/\/ Init builtin classes\n\tbuiltinClasses := []*RClass{\n\t\tvm.initIntegerClass(),\n\t\tvm.initFloatClass(),\n\t\tvm.initStringClass(),\n\t\tvm.initBoolClass(),\n\t\tvm.initNullClass(),\n\t\tvm.initArrayClass(),\n\t\tvm.initHashClass(),\n\t\tvm.initRangeClass(),\n\t\tvm.initMethodClass(),\n\t\tvm.initBlockClass(),\n\t\tvm.initChannelClass(),\n\t\tvm.initGoClass(),\n\t\tvm.initFileClass(),\n\t\tvm.initRegexpClass(),\n\t\tvm.initMatchDataClass(),\n\t\tvm.initGoMapClass(),\n\t\tvm.initDecimalClass(),\n\t}\n\n\t\/\/ Init error classes\n\tvm.initErrorClasses()\n\n\tfor _, c := range builtinClasses {\n\t\tvm.objectClass.setClassConstant(c)\n\t}\n\n\t\/\/ Init ARGV\n\targs := []Object{}\n\n\tfor _, arg := range vm.args {\n\t\targs = append(args, vm.initStringObject(arg))\n\t}\n\n\tvm.objectClass.constants[\"ARGV\"] = &Pointer{Target: vm.initArrayObject(args)}\n\n\t\/\/ Init ENV\n\tenvs := map[string]Object{}\n\n\tfor _, e := range os.Environ() {\n\t\tpair := strings.Split(e, \"=\")\n\t\tenvs[pair[0]] = vm.initStringObject(pair[1])\n\t}\n\n\tvm.objectClass.constants[\"ENV\"] = &Pointer{Target: vm.initHashObject(envs)}\n\tvm.objectClass.constants[\"STDOUT\"] = &Pointer{Target: vm.initFileObject(os.Stdout)}\n\tvm.objectClass.constants[\"STDERR\"] = &Pointer{Target: vm.initFileObject(os.Stderr)}\n\tvm.objectClass.constants[\"STDIN\"] = &Pointer{Target: vm.initFileObject(os.Stdin)}\n}\n\nfunc (vm *VM) topLevelClass(cn string) *RClass {\n\tobjClass := vm.objectClass\n\n\tif cn == classes.ObjectClass {\n\t\treturn objClass\n\t}\n\n\treturn objClass.constants[cn].Target.(*RClass)\n}\n\nfunc (vm *VM) currentFilePath() string {\n\tframe := vm.mainThread.callFrameStack.top()\n\treturn frame.FileName()\n}\n\n\/\/ loadConstant makes sure we don't create a class twice.\nfunc (vm *VM) loadConstant(name string, isModule bool) *RClass {\n\tvar c *RClass\n\tvar ptr *Pointer\n\n\tptr = vm.objectClass.constants[name]\n\n\tif ptr == nil {\n\t\tc = vm.initializeClass(name, isModule)\n\t\tvm.objectClass.setClassConstant(c)\n\t} else {\n\t\tc = ptr.Target.(*RClass)\n\t}\n\n\treturn c\n}\n\nfunc (vm *VM) lookupConstant(cf callFrame, constName string) (constant *Pointer) {\n\tvar namespace *RClass\n\tvar hasNamespace bool\n\n\ttop := vm.mainThread.stack.top()\n\n\tif top == nil {\n\t\thasNamespace = false\n\t} else {\n\t\tnamespace, hasNamespace = top.Target.(*RClass)\n\t}\n\n\tif hasNamespace {\n\t\tconstant = namespace.lookupConstantUnderAllScope(constName)\n\n\t\tif constant != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tconstant = cf.lookupConstantUnderAllScope(constName)\n\n\tif constant == nil {\n\t\tconstant = vm.objectClass.constants[constName]\n\t}\n\n\tif constName == classes.ObjectClass {\n\t\tconstant = &Pointer{Target: vm.objectClass}\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/zefer\/gompd\/mpd\"\n)\n\ntype watchConn struct {\n\twatcher *mpd.Watcher\n}\n\nfunc newWatchConn(addr string) (*watchConn, error) {\n\tw, err := mpd.NewWatcher(\"tcp\", addr, \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconn := &watchConn{w}\n\tgo conn.errorLoop()\n\tgo conn.eventLoop()\n\n\treturn conn, nil\n}\n\nfunc (w *watchConn) Close() {\n\tw.watcher.Close()\n}\n\nfunc (w *watchConn) eventLoop() {\n\tfor subsystem := range w.watcher.Event {\n\t\tglog.Info(\"Changed subsystem:\", subsystem)\n\t\tbroadcastStatus()\n\t}\n}\n\nfunc (w *watchConn) errorLoop() {\n\tfor err := range w.watcher.Error {\n\t\tglog.Errorf(\"Watcher: %v\", err)\n\t}\n}\n<commit_msg>Retry watcher connection on start-up<commit_after>package main\n\nimport (\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/zefer\/gompd\/mpd\"\n)\n\nconst retryDur time.Duration = time.Second * 3\n\ntype watchConn struct {\n\twatcher *mpd.Watcher\n\taddr string\n}\n\nfunc newWatchConn(addr string) (*watchConn, error) {\n\tconn := &watchConn{addr: addr}\n\tconn.retryConnect()\n\tgo conn.errorLoop()\n\tgo conn.eventLoop()\n\treturn conn, nil\n}\n\nfunc (w *watchConn) connect() error {\n\twatcher, err := mpd.NewWatcher(\"tcp\", w.addr, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.watcher = watcher\n\treturn nil\n}\n\nfunc (w *watchConn) retryConnect() {\n\tfor {\n\t\terr := w.connect()\n\t\tif err == nil {\n\t\t\tglog.Infof(\"Watcher: connected to %s\", w.addr)\n\t\t\treturn\n\t\t}\n\t\tglog.Errorf(\"Watcher: connect failed. Waiting then retrying. %v\", err)\n\t\ttime.Sleep(retryDur)\n\t}\n}\n\nfunc (w *watchConn) Close() {\n\tw.watcher.Close()\n}\n\nfunc (w *watchConn) eventLoop() {\n\tfor subsystem := range w.watcher.Event {\n\t\tglog.Info(\"Changed subsystem:\", subsystem)\n\t\tbroadcastStatus()\n\t}\n}\n\nfunc (w *watchConn) errorLoop() {\n\tfor err := range w.watcher.Error {\n\t\tglog.Errorf(\"Watcher: %v\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright Peter Herth 2016\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc readWord(reader *bufio.Reader) (word string, eof bool) {\n\tvar b bytes.Buffer\n\teof = skipWS(reader)\n\tfor {\n\t\trune, size, _ := reader.ReadRune()\n\t\tif size == 0 {\n\t\t\teof = true\n\t\t\tbreak\n\t\t}\n\t\tif rune == 32 {\n\t\t\tbreak\n\t\t}\n\t\tif rune == 13 {\n\t\t\tbreak\n\t\t}\n\t\tif rune == 10 {\n\t\t\tbreak\n\t\t}\n\n\t\tb.WriteRune(rune)\n\t}\n\treturn b.String(), eof\n}\n\nfunc readLong(reader *bufio.Reader) (word string, eof bool) {\n\tvar b bytes.Buffer\n\teof = skipWS(reader)\n\tfor {\n\t\trune, size, _ := reader.ReadRune()\n\t\tif size == 0 {\n\t\t\teof = true\n\t\t\tbreak\n\t\t}\n\t\tif rune == '~' {\n\t\t\tbreak\n\t\t}\n\t\tb.WriteRune(rune)\n\t}\n\treturn b.String(), eof\n}\n\nfunc readEOL(reader *bufio.Reader) (word string, eof bool) {\n\tvar b bytes.Buffer\n\teof = false\n\tfor {\n\t\trune, size, _ := reader.ReadRune()\n\t\tif size == 0 {\n\t\t\teof = true\n\t\t\tbreak\n\t\t}\n\t\tif rune == '\\r' {\n\t\t\tcontinue\n\t\t}\n\t\tif rune == '\\n' {\n\t\t\tbreak\n\t\t}\n\t\tb.WriteRune(rune)\n\t}\n\treturn b.String(), eof\n}\n\nfunc skipTo(reader *bufio.Reader, stopRune rune) (eof bool) {\n\teof = false\n\tfor {\n\t\trune, size, _ := reader.ReadRune()\n\t\tif size == 0 {\n\t\t\teof = true\n\t\t\tbreak\n\t\t}\n\t\tif rune == stopRune {\n\t\t\treader.UnreadRune()\n\t\t\tbreak\n\t\t}\n\t}\n\treturn eof\n}\n\nfunc skipWS(reader *bufio.Reader) (eof bool) {\n\teof = false\n\tfor {\n\t\trune, size, _ := reader.ReadRune()\n\t\tif size == 0 {\n\t\t\teof = true\n\t\t\tbreak\n\t\t}\n\t\tif rune == 32 {\n\t\t\tcontinue\n\t\t}\n\t\tif rune == 13 {\n\t\t\tcontinue\n\t\t}\n\t\tif rune == 10 {\n\t\t\tcontinue\n\t\t}\n\t\tif rune == 9 {\n\t\t\tcontinue\n\t\t}\n\t\treader.UnreadRune()\n\t\tbreak\n\t}\n\treturn eof\n}\n\ntype Mob struct {\n\tID string\n\tName string\n\tShortDesc string\n\tLongDesc string\n\tDescription string\n\tRace string\n}\n\ntype Item struct {\n\tID string\n\tName string\n\tShortDesc string\n\tLongDesc string\n}\n\ntype Exit struct {\n\tDescription string\n\tKeyword string\n\tState string\n\tKey string\n\tTo string\n}\n\ntype ExtraDescription struct {\n\tKeyword string\n\tDescription string\n}\n\ntype Room struct {\n\tID string\n\tName string\n\tDescription string\n\tExit [6]Exit\n\tExtra []ExtraDescription\n}\n\ntype Area struct {\n\tName string\n\tFileName string\n\tNr int\n\tFlags string\n\tMobs [](*Mob)\n\tItems [](*Item)\n\tRooms [](*Room)\n}\n\nvar areas [](*Area)\n\nfunc (mob *Mob) read(r *bufio.Reader) {\n\tmob.Name, _ = readLong(r)\n\tmob.ShortDesc, _ = readLong(r)\n\tmob.LongDesc, _ = readLong(r)\n\tmob.Description, _ = readLong(r)\n\tmob.Race, _ = readLong(r)\n\tskipTo(r, '#')\n\treturn\n}\n\nfunc (area *Area) readMobs(r *bufio.Reader) {\n\tfor {\n\t\tvnum, eof := readWord(r)\n\t\tif vnum == \"#0\" {\n\t\t\treturn\n\t\t}\n\n\t\tif eof {\n\t\t\tfmt.Println(\"Eof while reading mobiles\")\n\t\t\treturn\n\t\t}\n\t\tif len(vnum) < 1 {\n\t\t\tfmt.Println(\"empty vnum\")\n\t\t\treturn\n\t\t}\n\n\t\t\/\/fmt.Printf(\"vnum:%v<<<\\n\", vnum)\n\t\tmob := Mob{ID: vnum[1:]}\n\t\tmob.read(r)\n\t\t\/\/fmt.Println(\"read mob:\", mob.Vnum, mob.Name)\n\t\tarea.Mobs = append(area.Mobs, &mob)\n\t\t\/\/fmt.Println(\"mobs:\", area.Mobs)\n\t}\n}\n\nfunc (item *Item) read(r *bufio.Reader) {\n\titem.Name, _ = readLong(r)\n\titem.ShortDesc, _ = readLong(r)\n\titem.LongDesc, _ = readLong(r)\n\tskipTo(r, '#')\n\treturn\n}\n\nfunc (area *Area) readItems(r *bufio.Reader) {\n\tfor {\n\t\tvnum, eof := readWord(r)\n\t\tif vnum == \"#0\" {\n\t\t\treturn\n\t\t}\n\t\tif eof {\n\t\t\tfmt.Println(\"Eof while reading items in area\", area.FileName)\n\t\t\t\/\/panic(\"items eof\")\n\t\t\treturn\n\t\t}\n\t\tif len(vnum) < 1 {\n\t\t\tfmt.Println(\"empty vnum\")\n\t\t\treturn\n\t\t}\n\n\t\t\/\/fmt.Printf(\"vnum:%v<<<\\n\", vnum)\n\t\titem := Item{ID: vnum[1:]}\n\t\titem.read(r)\n\t\t\/\/fmt.Println(\"read item:\", item.Vnum, item.Name)\n\t\tarea.Items = append(area.Items, &item)\n\t\t\/\/fmt.Println(\"items:\", area.Items)\n\t}\n}\n\nfunc (room *Room) read(r *bufio.Reader) {\n\troom.Name, _ = readLong(r)\n\troom.Description, _ = readLong(r)\n\treadWord(r)\n\treadWord(r)\n\treadWord(r)\n\tfor {\n\t\tentry, _ := readWord(r)\n\t\t\/\/fmt.Println(\"entry\", entry)\n\t\te := entry[0]\n\t\tif e == 'D' { \/\/ exit\n\t\t\tdir := entry[1] - '0'\n\t\t\t\/\/fmt.Println(\"dir=\", dir)\n\t\t\tdesc, _ := readLong(r)\n\t\t\t\/\/fmt.Println(\"desc=\", desc)\n\t\t\tkeyword, _ := readLong(r)\n\t\t\t\/\/fmt.Println(\"keyword=\", keyword)\n\t\t\tstate, _ := readWord(r)\n\t\t\t\/\/fmt.Println(\"state=\", state)\n\t\t\tkey, _ := readWord(r)\n\t\t\t\/\/fmt.Println(\"key=\", key)\n\t\t\ttoRoom, _ := readWord(r)\n\t\t\t\/\/fmt.Println(\"toRooom=\", toRoom)\n\t\t\tex := Exit{Description: desc, Keyword: keyword, State: state, Key: key, To: toRoom}\n\t\t\t\/\/fmt.Println(\"exx=\", ex)\n\n\t\t\t\/\/fmt.Println()\n\n\t\t\troom.Exit[dir] = ex\n\t\t\t\/\/fmt.Println(\"exits\", room.Exit)\n\t\t\tcontinue\n\t\t}\n\t\tif entry == \"S\" {\n\t\t\tskipTo(r, '#')\n\t\t\treturn\n\t\t}\n\t\tif entry == \"E\" {\n\t\t\tk, _ := readLong(r)\n\t\t\td, _ := readLong(r)\n\t\t\troom.Extra = append(room.Extra, ExtraDescription{Keyword: k, Description: d})\n\t\t\tcontinue\n\t\t}\n\t\tif entry == \"H\" {\n\t\t\treadWord(r)\n\t\t\tcontinue\n\t\t}\n\t\tif entry == \"M\" {\n\t\t\treadWord(r)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t\t\/\/ case \"H\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"M\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"K\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"amap\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"WLH\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"X\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"watches\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"clan\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"O\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"A\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"damage\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"encounters\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"encountr\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"populate\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"religion\":\n\t\t\/\/ \tbreak\n\n\t}\n\tskipTo(r, '#')\n\treturn\n}\n\nfunc (area *Area) readRooms(r *bufio.Reader) {\n\tfor {\n\t\tvnum, eof := readWord(r)\n\t\tif vnum == \"#0\" {\n\t\t\treturn\n\t\t}\n\n\t\tif eof {\n\t\t\tfmt.Println(\"Eof while reading rooms\")\n\t\t\treturn\n\t\t}\n\t\tif len(vnum) < 1 {\n\t\t\tfmt.Println(\"empty vnum\")\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ \/\/fmt.Printf(\"vnum:%v<<<\\n\", vnum)\n\t\troom := Room{ID: vnum[1:]}\n\t\troom.read(r)\n\t\t\/\/fmt.Println(\"read room:\", room.Vnum, room.Name, room)\n\t\tarea.Rooms = append(area.Rooms, &room)\n\t}\n}\n\nfunc (area *Area) readResets(r *bufio.Reader) {\n\tfor {\n\t\tcode, eof := readWord(r)\n\t\tif code == \"S\" {\n\t\t\treturn\n\t\t}\n\t\tif eof {\n\t\t\tfmt.Println(\"Eof while reading resets\")\n\t\t\treturn\n\t\t}\n\t\t_, _ = readEOL(r)\n\n\t}\n}\n\nfunc (area *Area) load() {\n\tfile, err := os.Open(\".\/area\/\" + area.FileName)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\t\/\/\tfmt.Println(\"reading\", area.FileName)\n\n\tr := bufio.NewReader(file)\n\tfor {\n\t\tword, eof := readWord(r)\n\t\t\/\/fmt.Println(\"word\", word)\n\t\tswitch word {\n\t\tcase \"#AREA\":\n\t\t\t_, _ = readLong(r)\n\t\t\tarea.Name, _ = readLong(r)\n\t\t\t_, _ = readLong(r)\n\t\t\tcontinue\n\t\tcase \"#MOBILES\":\n\t\t\tarea.readMobs(r)\n\t\tcase \"#OBJECTS\":\n\t\t\tarea.readItems(r)\n\t\tcase \"#ROOMS\":\n\t\t\tarea.readRooms(r)\n\t\tcase \"#RESETS\":\n\t\t\tarea.readResets(r)\n\t\tdefault:\n\t\t\t\/\/fmt.Println(\"skipping word\", word)\n\t\t}\n\n\t\tif eof {\n\t\t\treturn\n\t\t}\n\t}\n\n}\n\nfunc loadAreas() {\n\tfile, err := os.Open(\".\/area\/area.list\")\n\tn := 0\n\tif err != nil {\n\t\treturn\n\t}\n\tvar c chan (int)\n\tparallel := true\n\tif parallel {\n\t\tc = make(chan int)\n\t}\n\tdefer file.Close()\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tl := scanner.Text()\n\t\t\/\/fmt.Println(\"area:\", l)\n\t\tif l[0] != '#' {\n\t\t\twords := bufio.NewScanner(strings.NewReader(l))\n\t\t\twords.Split(bufio.ScanWords)\n\t\t\twords.Scan()\n\t\t\tnr, _ := strconv.Atoi(words.Text())\n\t\t\twords.Scan()\n\t\t\tflags := words.Text()\n\t\t\twords.Scan()\n\t\t\tfileName := words.Text()\n\t\t\t\/\/parts := strings.Split(l, \" \")\n\t\t\t\/\/fmt.Println(\"parts:\", len(parts), parts)\n\t\t\tarea := Area{Nr: nr, FileName: fileName, Flags: flags}\n\n\t\t\tareas = append(areas, &area)\n\t\t\t\/\/\t\t\tarea.load()\n\t\t\t\/\/fmt.Println(\"loaded\", area)\n\t\t\tif parallel {\n\t\t\t\tgo func() {\n\t\t\t\t\tarea.load()\n\t\t\t\t\tc <- 1 \/\/ mark area loaded\n\t\t\t\t}()\n\t\t\t} else {\n\t\t\t\tarea.load()\n\t\t\t}\n\t\t\tn++\n\t\t}\n\t}\n\tif parallel {\n\t\tfor i := 0; i < n; i++ {\n\t\t\t<-c \/\/ wait till all areas sent load signal\n\t\t}\n\t}\n\tfmt.Println(n, \"areas loaded\", len(areas))\n\t\/\/fmt.Println(areas)\n}\n\nfunc handler(c net.Conn) {\n\t\/\/buffer := make([]byte, 1024)\n\tscanner := bufio.NewScanner(c)\n\tlog.Printf(\"New connection from %v\\n\", c.RemoteAddr())\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tlog.Println(\"read:\", line)\n\t}\n\n\t\/\/ for {\n\t\/\/ \tn, err := c.Read(buffer)\n\n\t\/\/ \tif err == io.EOF {\n\t\/\/ \t\tlog.Printf(\"socket closed %v\\n\", c.RemoteAddr())\n\t\/\/ \t\tc.Close()\n\t\/\/ \t\treturn\n\t\/\/ \t}\n\t\/\/ \tif err != nil {\n\t\/\/ \t\tlog.Println(\"error reading socket\", err)\n\t\/\/ \t\tc.Close()\n\t\/\/ \t\treturn\n\t\/\/ \t}\n\t\/\/ \tif n == 0 {\n\t\/\/ \t\tlog.Println(\"zero read\")\n\t\/\/ \t\tc.Close()\n\t\/\/ \t\treturn\n\t\/\/ \t}\n\t\/\/ \tlog.Println(\"read\", string(buffer[:n]))\n\t\/\/ }\n\tlog.Printf(\"socket closed %v\\n\", c.RemoteAddr())\n}\n\nfunc serve() {\n\tlistener, err := net.Listen(\"tcp\", \":8000\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer listener.Close()\n\tfor {\n\t\t\/\/ Wait for a connection.\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tgo handler(conn)\n\t}\n}\n\nfunc main() {\n\tt1 := time.Now()\n\t\/\/\truntime.ThreadCreateProfile()\n\tloadAreas()\n\n\t\/\/time.Sleep(1 * time.Second)\n\t\/\/ for _, area := range areas {\n\t\/\/ \t\/\/fmt.Println(area)\n\t\/\/ \tfmt.Println(area.Name, len(area.Mobs), \"mobs\", len(area.Items), \"items\", len(area.Rooms), \"rooms\")\n\t\/\/ }\n\tfmt.Fprintf(os.Stderr, \"loading: %v\\n\", time.Since(t1))\n\tfmt.Println(runtime.NumCPU(), \"cpus\")\n\n\tserve()\n}\n<commit_msg>cleanup<commit_after>\/\/ Copyright Peter Herth 2016\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc readWord(reader *bufio.Reader) (word string, eof bool) {\n\tvar b bytes.Buffer\n\teof = skipWS(reader)\n\tfor {\n\t\trune, size, _ := reader.ReadRune()\n\t\tif size == 0 {\n\t\t\teof = true\n\t\t\tbreak\n\t\t}\n\t\tif rune == 32 {\n\t\t\tbreak\n\t\t}\n\t\tif rune == 13 {\n\t\t\tbreak\n\t\t}\n\t\tif rune == 10 {\n\t\t\tbreak\n\t\t}\n\n\t\tb.WriteRune(rune)\n\t}\n\treturn b.String(), eof\n}\n\nfunc readLong(reader *bufio.Reader) (word string, eof bool) {\n\tvar b bytes.Buffer\n\teof = skipWS(reader)\n\tfor {\n\t\trune, size, _ := reader.ReadRune()\n\t\tif size == 0 {\n\t\t\teof = true\n\t\t\tbreak\n\t\t}\n\t\tif rune == '~' {\n\t\t\tbreak\n\t\t}\n\t\tb.WriteRune(rune)\n\t}\n\treturn b.String(), eof\n}\n\nfunc readEOL(reader *bufio.Reader) (word string, eof bool) {\n\tvar b bytes.Buffer\n\teof = false\n\tfor {\n\t\trune, size, _ := reader.ReadRune()\n\t\tif size == 0 {\n\t\t\teof = true\n\t\t\tbreak\n\t\t}\n\t\tif rune == '\\r' {\n\t\t\tcontinue\n\t\t}\n\t\tif rune == '\\n' {\n\t\t\tbreak\n\t\t}\n\t\tb.WriteRune(rune)\n\t}\n\treturn b.String(), eof\n}\n\nfunc skipTo(reader *bufio.Reader, stopRune rune) (eof bool) {\n\teof = false\n\tfor {\n\t\trune, size, _ := reader.ReadRune()\n\t\tif size == 0 {\n\t\t\teof = true\n\t\t\tbreak\n\t\t}\n\t\tif rune == stopRune {\n\t\t\treader.UnreadRune()\n\t\t\tbreak\n\t\t}\n\t}\n\treturn eof\n}\n\nfunc skipWS(reader *bufio.Reader) (eof bool) {\n\teof = false\n\tfor {\n\t\trune, size, _ := reader.ReadRune()\n\t\tif size == 0 {\n\t\t\teof = true\n\t\t\tbreak\n\t\t}\n\t\tif rune == 32 {\n\t\t\tcontinue\n\t\t}\n\t\tif rune == 13 {\n\t\t\tcontinue\n\t\t}\n\t\tif rune == 10 {\n\t\t\tcontinue\n\t\t}\n\t\tif rune == 9 {\n\t\t\tcontinue\n\t\t}\n\t\treader.UnreadRune()\n\t\tbreak\n\t}\n\treturn eof\n}\n\ntype Mob struct {\n\tID string\n\tName string\n\tShortDesc string\n\tLongDesc string\n\tDescription string\n\tRace string\n}\n\ntype Item struct {\n\tID string\n\tName string\n\tShortDesc string\n\tLongDesc string\n}\n\ntype Exit struct {\n\tDescription string\n\tKeyword string\n\tState string\n\tKey string\n\tTo string\n}\n\ntype ExtraDescription struct {\n\tKeyword string\n\tDescription string\n}\n\ntype Room struct {\n\tID string\n\tName string\n\tDescription string\n\tExit [6]Exit\n\tExtra []ExtraDescription\n}\n\ntype Area struct {\n\tName string\n\tFileName string\n\tNr int\n\tFlags string\n\tMobs [](*Mob)\n\tItems [](*Item)\n\tRooms [](*Room)\n}\n\nvar areas [](*Area)\n\nfunc (mob *Mob) read(r *bufio.Reader) {\n\tmob.Name, _ = readLong(r)\n\tmob.ShortDesc, _ = readLong(r)\n\tmob.LongDesc, _ = readLong(r)\n\tmob.Description, _ = readLong(r)\n\tmob.Race, _ = readLong(r)\n\tskipTo(r, '#')\n\treturn\n}\n\nfunc (area *Area) readMobs(r *bufio.Reader) {\n\tfor {\n\t\tvnum, eof := readWord(r)\n\t\tif vnum == \"#0\" {\n\t\t\treturn\n\t\t}\n\n\t\tif eof {\n\t\t\tfmt.Println(\"Eof while reading mobiles\")\n\t\t\treturn\n\t\t}\n\t\tif len(vnum) < 1 {\n\t\t\tfmt.Println(\"empty vnum\")\n\t\t\treturn\n\t\t}\n\n\t\t\/\/fmt.Printf(\"vnum:%v<<<\\n\", vnum)\n\t\tmob := Mob{ID: vnum[1:]}\n\t\tmob.read(r)\n\t\t\/\/fmt.Println(\"read mob:\", mob.Vnum, mob.Name)\n\t\tarea.Mobs = append(area.Mobs, &mob)\n\t\t\/\/fmt.Println(\"mobs:\", area.Mobs)\n\t}\n}\n\nfunc (item *Item) read(r *bufio.Reader) {\n\titem.Name, _ = readLong(r)\n\titem.ShortDesc, _ = readLong(r)\n\titem.LongDesc, _ = readLong(r)\n\tskipTo(r, '#')\n\treturn\n}\n\nfunc (area *Area) readItems(r *bufio.Reader) {\n\tfor {\n\t\tvnum, eof := readWord(r)\n\t\tif vnum == \"#0\" {\n\t\t\treturn\n\t\t}\n\t\tif eof {\n\t\t\tfmt.Println(\"Eof while reading items in area\", area.FileName)\n\t\t\t\/\/panic(\"items eof\")\n\t\t\treturn\n\t\t}\n\t\tif len(vnum) < 1 {\n\t\t\tfmt.Println(\"empty vnum\")\n\t\t\treturn\n\t\t}\n\n\t\t\/\/fmt.Printf(\"vnum:%v<<<\\n\", vnum)\n\t\titem := Item{ID: vnum[1:]}\n\t\titem.read(r)\n\t\t\/\/fmt.Println(\"read item:\", item.Vnum, item.Name)\n\t\tarea.Items = append(area.Items, &item)\n\t\t\/\/fmt.Println(\"items:\", area.Items)\n\t}\n}\n\nfunc (room *Room) read(r *bufio.Reader) {\n\troom.Name, _ = readLong(r)\n\troom.Description, _ = readLong(r)\n\treadWord(r)\n\treadWord(r)\n\treadWord(r)\n\tfor {\n\t\tentry, _ := readWord(r)\n\t\t\/\/fmt.Println(\"entry\", entry)\n\t\te := entry[0]\n\t\tif e == 'D' { \/\/ exit\n\t\t\tdir := entry[1] - '0'\n\t\t\t\/\/fmt.Println(\"dir=\", dir)\n\t\t\tdesc, _ := readLong(r)\n\t\t\t\/\/fmt.Println(\"desc=\", desc)\n\t\t\tkeyword, _ := readLong(r)\n\t\t\t\/\/fmt.Println(\"keyword=\", keyword)\n\t\t\tstate, _ := readWord(r)\n\t\t\t\/\/fmt.Println(\"state=\", state)\n\t\t\tkey, _ := readWord(r)\n\t\t\t\/\/fmt.Println(\"key=\", key)\n\t\t\ttoRoom, _ := readWord(r)\n\t\t\t\/\/fmt.Println(\"toRooom=\", toRoom)\n\t\t\tex := Exit{Description: desc, Keyword: keyword, State: state, Key: key, To: toRoom}\n\t\t\t\/\/fmt.Println(\"exx=\", ex)\n\n\t\t\t\/\/fmt.Println()\n\n\t\t\troom.Exit[dir] = ex\n\t\t\t\/\/fmt.Println(\"exits\", room.Exit)\n\t\t\tcontinue\n\t\t}\n\t\tif entry == \"S\" {\n\t\t\tskipTo(r, '#')\n\t\t\treturn\n\t\t}\n\t\tif entry == \"E\" {\n\t\t\tk, _ := readLong(r)\n\t\t\td, _ := readLong(r)\n\t\t\troom.Extra = append(room.Extra, ExtraDescription{Keyword: k, Description: d})\n\t\t\tcontinue\n\t\t}\n\t\tif entry == \"H\" {\n\t\t\treadWord(r)\n\t\t\tcontinue\n\t\t}\n\t\tif entry == \"M\" {\n\t\t\treadWord(r)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t\t\/\/ case \"H\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"M\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"K\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"amap\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"WLH\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"X\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"watches\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"clan\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"O\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"A\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"damage\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"encounters\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"encountr\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"populate\":\n\t\t\/\/ \tbreak\n\t\t\/\/ case \"religion\":\n\t\t\/\/ \tbreak\n\n\t}\n\tskipTo(r, '#')\n\treturn\n}\n\nfunc (area *Area) readRooms(r *bufio.Reader) {\n\tfor {\n\t\tvnum, eof := readWord(r)\n\t\tif vnum == \"#0\" {\n\t\t\treturn\n\t\t}\n\n\t\tif eof {\n\t\t\tfmt.Println(\"Eof while reading rooms\")\n\t\t\treturn\n\t\t}\n\t\tif len(vnum) < 1 {\n\t\t\tfmt.Println(\"empty vnum\")\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ \/\/fmt.Printf(\"vnum:%v<<<\\n\", vnum)\n\t\troom := Room{ID: vnum[1:]}\n\t\troom.read(r)\n\t\t\/\/fmt.Println(\"read room:\", room.Vnum, room.Name, room)\n\t\tarea.Rooms = append(area.Rooms, &room)\n\t}\n}\n\nfunc (area *Area) readResets(r *bufio.Reader) {\n\tfor {\n\t\tcode, eof := readWord(r)\n\t\tif code == \"S\" {\n\t\t\treturn\n\t\t}\n\t\tif eof {\n\t\t\tfmt.Println(\"Eof while reading resets\")\n\t\t\treturn\n\t\t}\n\t\t_, _ = readEOL(r)\n\n\t}\n}\n\nfunc (area *Area) load() {\n\tfile, err := os.Open(\".\/area\/\" + area.FileName)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\t\/\/\tfmt.Println(\"reading\", area.FileName)\n\n\tr := bufio.NewReader(file)\n\tfor {\n\t\tword, eof := readWord(r)\n\t\t\/\/fmt.Println(\"word\", word)\n\t\tswitch word {\n\t\tcase \"#AREA\":\n\t\t\t_, _ = readLong(r)\n\t\t\tarea.Name, _ = readLong(r)\n\t\t\t_, _ = readLong(r)\n\t\t\tcontinue\n\t\tcase \"#MOBILES\":\n\t\t\tarea.readMobs(r)\n\t\tcase \"#OBJECTS\":\n\t\t\tarea.readItems(r)\n\t\tcase \"#ROOMS\":\n\t\t\tarea.readRooms(r)\n\t\tcase \"#RESETS\":\n\t\t\tarea.readResets(r)\n\t\tdefault:\n\t\t\t\/\/fmt.Println(\"skipping word\", word)\n\t\t}\n\n\t\tif eof {\n\t\t\treturn\n\t\t}\n\t}\n\n}\n\nfunc loadAreas() {\n\tfile, err := os.Open(\".\/area\/area.list\")\n\tn := 0\n\tif err != nil {\n\t\treturn\n\t}\n\tvar c chan (int)\n\tparallel := true\n\tif parallel {\n\t\tc = make(chan int)\n\t}\n\tdefer file.Close()\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tl := scanner.Text()\n\t\t\/\/fmt.Println(\"area:\", l)\n\t\tif l[0] != '#' {\n\t\t\twords := bufio.NewScanner(strings.NewReader(l))\n\t\t\twords.Split(bufio.ScanWords)\n\t\t\twords.Scan()\n\t\t\tnr, _ := strconv.Atoi(words.Text())\n\t\t\twords.Scan()\n\t\t\tflags := words.Text()\n\t\t\twords.Scan()\n\t\t\tfileName := words.Text()\n\t\t\t\/\/parts := strings.Split(l, \" \")\n\t\t\t\/\/fmt.Println(\"parts:\", len(parts), parts)\n\t\t\tarea := Area{Nr: nr, FileName: fileName, Flags: flags}\n\n\t\t\tareas = append(areas, &area)\n\t\t\t\/\/\t\t\tarea.load()\n\t\t\t\/\/fmt.Println(\"loaded\", area)\n\t\t\tif parallel {\n\t\t\t\tgo func() {\n\t\t\t\t\tarea.load()\n\t\t\t\t\tc <- 1 \/\/ mark area loaded\n\t\t\t\t}()\n\t\t\t} else {\n\t\t\t\tarea.load()\n\t\t\t}\n\t\t\tn++\n\t\t}\n\t}\n\tif parallel {\n\t\tfor i := 0; i < n; i++ {\n\t\t\t<-c \/\/ wait till all areas sent load signal\n\t\t}\n\t}\n\tfmt.Println(n, \"areas loaded\", len(areas))\n\t\/\/fmt.Println(areas)\n}\n\nfunc handler(c net.Conn) {\n\t\/\/buffer := make([]byte, 1024)\n\tscanner := bufio.NewScanner(c)\n\tlog.Printf(\"New connection from %v\\n\", c.RemoteAddr())\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tlog.Println(\"read:\", line)\n\t}\n\n\t\/\/ for {\n\t\/\/ \tn, err := c.Read(buffer)\n\n\t\/\/ \tif err == io.EOF {\n\t\/\/ \t\tlog.Printf(\"socket closed %v\\n\", c.RemoteAddr())\n\t\/\/ \t\tc.Close()\n\t\/\/ \t\treturn\n\t\/\/ \t}\n\t\/\/ \tif err != nil {\n\t\/\/ \t\tlog.Println(\"error reading socket\", err)\n\t\/\/ \t\tc.Close()\n\t\/\/ \t\treturn\n\t\/\/ \t}\n\t\/\/ \tif n == 0 {\n\t\/\/ \t\tlog.Println(\"zero read\")\n\t\/\/ \t\tc.Close()\n\t\/\/ \t\treturn\n\t\/\/ \t}\n\t\/\/ \tlog.Println(\"read\", string(buffer[:n]))\n\t\/\/ }\n\tlog.Printf(\"socket closed %v\\n\", c.RemoteAddr())\n}\n\nfunc serve() {\n\tlistener, err := net.Listen(\"tcp\", \":8000\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer listener.Close()\n\tfor {\n\t\t\/\/ Wait for a connection.\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tgo handler(conn)\n\t}\n}\n\nfunc main() {\n\tt1 := time.Now()\n\tloadAreas()\n\tfmt.Fprintf(os.Stderr, \"loading: %v\\n\", time.Since(t1))\n\tfmt.Println(runtime.NumCPU(), \"cpus\")\n\n\tserve()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ This file is part of fs1up.\n\/\/ Copyright (C) 2014 Andreas Klauer <Andreas.Klauer@metamorpher.de>\n\/\/ License: GPL-2\n\n\/\/ Package nbd uses the Linux NBD layer to emulate a block device in user space\npackage nbd\n\nimport (\n\t\"encoding\/binary\"\n\t\/\/\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"syscall\"\n)\n\nconst (\n\t\/\/ Defined in <linux\/fs.h>:\n\tBLKROSET = 4701\n\t\/\/ Defined in <linux\/nbd.h>:\n\tNBD_SET_SOCK = 43776\n\tNBD_SET_BLKSIZE = 43777\n\tNBD_SET_SIZE = 43778\n\tNBD_DO_IT = 43779\n\tNBD_CLEAR_SOCK = 43780\n\tNBD_CLEAR_QUE = 43781\n\tNBD_PRINT_DEBUG = 43782\n\tNBD_SET_SIZE_BLOCKS = 43783\n\tNBD_DISCONNECT = 43784\n\tNBD_SET_TIMEOUT = 43785\n\tNBD_SET_FLAGS = 43786\n\t\/\/ enum\n\tNBD_CMD_READ = 0\n\tNBD_CMD_WRITE = 1\n\tNBD_CMD_DISC = 2\n\tNBD_CMD_FLUSH = 3\n\tNBD_CMD_TRIM = 4\n\t\/\/ values for flags field\n\tNBD_FLAG_HAS_FLAGS = (1 << 0) \/\/ nbd-server supports flags\n\tNBD_FLAG_READ_ONLY = (1 << 1) \/\/ device is read-only\n\tNBD_FLAG_SEND_FLUSH = (1 << 2) \/\/ can flush writeback cache\n\t\/\/ there is a gap here to match userspace\n\tNBD_FLAG_SEND_TRIM = (1 << 5) \/\/ send trim\/discard\n\t\/\/ These are sent over the network in the request\/reply magic fields\n\tNBD_REQUEST_MAGIC = 0x25609513\n\tNBD_REPLY_MAGIC = 0x67446698\n\t\/\/ Do *not* use magics: 0x12560953 0x96744668.\n)\n\n\/\/ Device interface is a subset of os.File.\ntype Device interface {\n\tReadAt(b []byte, off int64) (n int, err error)\n\tWriteAt(b []byte, off int64) (n int, err error)\n}\n\ntype request struct {\n\tmagic uint32\n\ttypus uint32\n\thandle uint64\n\tfrom uint64\n\tlen uint32\n}\n\nfunc handle(fd int, d Device) {\n\tbuf := make([]byte, 2<<19)\n\tvar x request\n\n\tfor {\n\t\tn, err := syscall.Read(fd, buf[0:28])\n\t\tif err != nil {\n\t\t\tn += 1\n\t\t}\n\t\t\/\/fmt.Println(\"actually read\", n, err)\n\t\tx.magic = binary.BigEndian.Uint32(buf)\n\t\tx.typus = binary.BigEndian.Uint32(buf[4:8])\n\t\tx.handle = binary.BigEndian.Uint64(buf[8:16])\n\t\tx.from = binary.BigEndian.Uint64(buf[16:24])\n\t\tx.len = binary.BigEndian.Uint32(buf[24:28])\n\n\t\t\/\/fmt.Println(\"read\", x)\n\n\t\tswitch x.magic {\n\t\tcase NBD_REPLY_MAGIC:\n\t\t\tfallthrough\n\t\tcase NBD_REQUEST_MAGIC:\n\t\t\tswitch x.typus {\n\t\t\tcase NBD_CMD_READ:\n\t\t\t\tn, _ = d.ReadAt(buf[16:16+x.len], int64(x.from))\n\t\t\t\t\/\/fmt.Println(\"got\", n, \"bytes to send back\")\n\t\t\t\tbinary.BigEndian.PutUint32(buf[0:4], NBD_REPLY_MAGIC)\n\t\t\t\tbinary.BigEndian.PutUint32(buf[4:8], 0)\n\t\t\t\tn, err = syscall.Write(fd, buf[0:16+x.len])\n\t\t\t\t\/\/fmt.Println(\"actually wrote\", n-16, err)\n\t\t\tcase NBD_CMD_WRITE:\n\t\t\t\t\/\/fmt.Println(\"write\", x)\n\t\t\tcase NBD_CMD_DISC:\n\t\t\t\tpanic(\"Disconnect\")\n\t\t\tcase NBD_CMD_FLUSH:\n\t\t\t\t\/\/fmt.Println(\"flush\", x)\n\t\t\tcase NBD_CMD_TRIM:\n\t\t\t\t\/\/fmt.Println(\"trim\", x)\n\t\t\tdefault:\n\t\t\t\tpanic(\"unknown command\")\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(\"Invalid packet\")\n\t\t}\n\n\t\t\/\/ syscall.Write(fd, buf[0:n])\n\t\t\/\/ fmt.Println(\"wrote\", buf[0:n])\n\t}\n}\n\nfunc Client(d Device, offset int64, size int64) {\n\tnbd, _ := os.Open(\"\/dev\/nbd0\") \/\/ TODO: find a free one\n\tfd, _ := syscall.Socketpair(syscall.SOCK_STREAM, syscall.AF_UNIX, 0)\n\tgo handle(fd[1], d)\n\truntime.LockOSThread()\n\tsyscall.Syscall(syscall.SYS_IOCTL, nbd.Fd(), NBD_SET_SOCK, uintptr(fd[0]))\n\tsyscall.Syscall(syscall.SYS_IOCTL, nbd.Fd(), NBD_SET_BLKSIZE, 4096)\n\tsyscall.Syscall(syscall.SYS_IOCTL, nbd.Fd(), NBD_SET_SIZE_BLOCKS, uintptr(size\/4096))\n\tsyscall.Syscall(syscall.SYS_IOCTL, nbd.Fd(), NBD_SET_FLAGS, 1)\n\tsyscall.Syscall(syscall.SYS_IOCTL, nbd.Fd(), BLKROSET, 0) \/\/ || 1\n\tsyscall.Syscall(syscall.SYS_IOCTL, nbd.Fd(), NBD_DO_IT, 0) \/\/ doesn't return\n\tsyscall.Syscall(syscall.SYS_IOCTL, nbd.Fd(), NBD_DISCONNECT, 0)\n\tsyscall.Syscall(syscall.SYS_IOCTL, nbd.Fd(), NBD_CLEAR_SOCK, 0)\n\truntime.UnlockOSThread()\n}\n<commit_msg>nbd write<commit_after>\/\/ This file is part of fs1up.\n\/\/ Copyright (C) 2014 Andreas Klauer <Andreas.Klauer@metamorpher.de>\n\/\/ License: GPL-2\n\n\/\/ Package nbd uses the Linux NBD layer to emulate a block device in user space\npackage nbd\n\nimport (\n\t\"encoding\/binary\"\n\t\/\/\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"syscall\"\n)\n\nconst (\n\t\/\/ Defined in <linux\/fs.h>:\n\tBLKROSET = 4701\n\t\/\/ Defined in <linux\/nbd.h>:\n\tNBD_SET_SOCK = 43776\n\tNBD_SET_BLKSIZE = 43777\n\tNBD_SET_SIZE = 43778\n\tNBD_DO_IT = 43779\n\tNBD_CLEAR_SOCK = 43780\n\tNBD_CLEAR_QUE = 43781\n\tNBD_PRINT_DEBUG = 43782\n\tNBD_SET_SIZE_BLOCKS = 43783\n\tNBD_DISCONNECT = 43784\n\tNBD_SET_TIMEOUT = 43785\n\tNBD_SET_FLAGS = 43786\n\t\/\/ enum\n\tNBD_CMD_READ = 0\n\tNBD_CMD_WRITE = 1\n\tNBD_CMD_DISC = 2\n\tNBD_CMD_FLUSH = 3\n\tNBD_CMD_TRIM = 4\n\t\/\/ values for flags field\n\tNBD_FLAG_HAS_FLAGS = (1 << 0) \/\/ nbd-server supports flags\n\tNBD_FLAG_READ_ONLY = (1 << 1) \/\/ device is read-only\n\tNBD_FLAG_SEND_FLUSH = (1 << 2) \/\/ can flush writeback cache\n\t\/\/ there is a gap here to match userspace\n\tNBD_FLAG_SEND_TRIM = (1 << 5) \/\/ send trim\/discard\n\t\/\/ These are sent over the network in the request\/reply magic fields\n\tNBD_REQUEST_MAGIC = 0x25609513\n\tNBD_REPLY_MAGIC = 0x67446698\n\t\/\/ Do *not* use magics: 0x12560953 0x96744668.\n)\n\n\/\/ Device interface is a subset of os.File.\ntype Device interface {\n\tReadAt(b []byte, off int64) (n int, err error)\n\tWriteAt(b []byte, off int64) (n int, err error)\n}\n\ntype request struct {\n\tmagic uint32\n\ttypus uint32\n\thandle uint64\n\tfrom uint64\n\tlen uint32\n}\n\ntype reply struct {\n\tmagic uint32\n\terror uint32\n\thandle uint64\n}\n\nfunc handle(fd int, d Device) {\n\tbuf := make([]byte, 2<<19)\n\tvar x request\n\n\tfor {\n\t\tsyscall.Read(fd, buf[0:28])\n\n\t\tx.magic = binary.BigEndian.Uint32(buf)\n\t\tx.typus = binary.BigEndian.Uint32(buf[4:8])\n\t\tx.handle = binary.BigEndian.Uint64(buf[8:16])\n\t\tx.from = binary.BigEndian.Uint64(buf[16:24])\n\t\tx.len = binary.BigEndian.Uint32(buf[24:28])\n\n\t\tswitch x.magic {\n\t\tcase NBD_REPLY_MAGIC:\n\t\t\tfallthrough\n\t\tcase NBD_REQUEST_MAGIC:\n\t\t\tswitch x.typus {\n\t\t\tcase NBD_CMD_READ:\n\t\t\t\td.ReadAt(buf[16:16+x.len], int64(x.from))\n\t\t\t\tbinary.BigEndian.PutUint32(buf[0:4], NBD_REPLY_MAGIC)\n\t\t\t\tbinary.BigEndian.PutUint32(buf[4:8], 0)\n\t\t\t\tsyscall.Write(fd, buf[0:16+x.len])\n\t\t\tcase NBD_CMD_WRITE:\n\t\t\t\tn, _ := syscall.Read(fd, buf[28:28+x.len])\n\t\t\t\tfor uint32(n) < x.len {\n\t\t\t\t\tm, _ := syscall.Read(fd, buf[28+n:28+x.len])\n\t\t\t\t\tn += m\n\t\t\t\t}\n\t\t\t\td.WriteAt(buf[28:28+x.len], int64(x.from))\n\t\t\t\tbinary.BigEndian.PutUint32(buf[0:4], NBD_REPLY_MAGIC)\n\t\t\t\tbinary.BigEndian.PutUint32(buf[4:8], 0)\n\t\t\t\tsyscall.Write(fd, buf[0:16])\n\t\t\tcase NBD_CMD_DISC:\n\t\t\t\tpanic(\"Disconnect\")\n\t\t\tcase NBD_CMD_FLUSH:\n\t\t\tcase NBD_CMD_TRIM:\n\t\t\tdefault:\n\t\t\t\tpanic(\"unknown command\")\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(\"Invalid packet\")\n\t\t}\n\n\t\t\/\/ syscall.Write(fd, buf[0:n])\n\t\t\/\/ fmt.Println(\"wrote\", buf[0:n])\n\t}\n}\n\nfunc Client(d Device, offset int64, size int64) {\n\tnbd, _ := os.Open(\"\/dev\/nbd0\") \/\/ TODO: find a free one\n\tfd, _ := syscall.Socketpair(syscall.SOCK_STREAM, syscall.AF_UNIX, 0)\n\tgo handle(fd[1], d)\n\truntime.LockOSThread()\n\tsyscall.Syscall(syscall.SYS_IOCTL, nbd.Fd(), NBD_SET_SOCK, uintptr(fd[0]))\n\tsyscall.Syscall(syscall.SYS_IOCTL, nbd.Fd(), NBD_SET_BLKSIZE, 4096)\n\tsyscall.Syscall(syscall.SYS_IOCTL, nbd.Fd(), NBD_SET_SIZE_BLOCKS, uintptr(size\/4096))\n\tsyscall.Syscall(syscall.SYS_IOCTL, nbd.Fd(), NBD_SET_FLAGS, 1)\n\tsyscall.Syscall(syscall.SYS_IOCTL, nbd.Fd(), BLKROSET, 0) \/\/ || 1\n\tsyscall.Syscall(syscall.SYS_IOCTL, nbd.Fd(), NBD_DO_IT, 0) \/\/ doesn't return\n\tsyscall.Syscall(syscall.SYS_IOCTL, nbd.Fd(), NBD_DISCONNECT, 0)\n\tsyscall.Syscall(syscall.SYS_IOCTL, nbd.Fd(), NBD_CLEAR_SOCK, 0)\n\truntime.UnlockOSThread()\n}\n<|endoftext|>"} {"text":"<commit_before>package ncp\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/cookiejar\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ NCp values:\n\/\/ client http.Client with cookie\ntype NCp struct {\n\tclient http.Client\n}\n\n\/\/ Topic from forum\ntype Topic struct {\n\tHref string\n\tName string\n\tYear string\n\tQuality string\n\tBody []byte\n}\n\n\/\/ Film all values\n\/\/ ID id\n\/\/ Name Название\n\/\/ EngName Английское название\n\/\/ Href Ссылка\n\/\/ Year Год\n\/\/ Genre Жанр\n\/\/ Country Производство\n\/\/ Director Режиссер\n\/\/ Producer Продюсер\n\/\/ Actors Актеры\n\/\/ Description Описание\n\/\/ Age Возраст\n\/\/ ReleaseDate Дата мировой премьеры\n\/\/ RussianDate Дата премьеры в России\n\/\/ Duration Продолжительность\n\/\/ Quality Качество видео\n\/\/ Translation Перевод\n\/\/ SubtitlesType Вид субтитров\n\/\/ Subtitles Субтитры\n\/\/ Video Видео\n\/\/ Resolution Разрешение видео\n\/\/ Audio Аудио\n\/\/ Kinopoisk Рейтинг кинопоиска\n\/\/ Imdb Рейтинг IMDb\n\/\/ NNM Рейтинг nnm-club\n\/\/ Sound Звук\n\/\/ Size Размер\n\/\/ DateCreate Дата создания раздачи\n\/\/ Torrent Ссылка на torrent\n\/\/ Poster Ссылка на постер\n\/\/ Seeders Количество раздающих\n\/\/ Leechers Количество скачивающих\n\/\/ UpdatedAt Дата обновления записи БД\n\/\/ CreatedAt Дата создания записи БД\ntype Film struct {\n\tID int64 `gorm:\"column:id\" db:\"id\" sql:\"AUTO_INCREMENT\"`\n\tName string `gorm:\"column:name\" db:\"name\" sql:\"type:text\"`\n\tEngName string `gorm:\"column:eng_name\" db:\"eng_name\" sql:\"type:text\"`\n\tHref string `gorm:\"column:href\" db:\"href\" sql:\"type:text\"`\n\tYear int64 `gorm:\"column:year\" db:\"year\"`\n\tGenre string `gorm:\"column:genre\" db:\"genre\" sql:\"type:text\"`\n\tCountry string `gorm:\"column:country\" db:\"country\" sql:\"type:text\"`\n\tDirector string `gorm:\"column:director\" db:\"director\" sql:\"type:text\"`\n\tProducer string `gorm:\"column:producer\" db:\"producer\" sql:\"type:text\"`\n\tActors string `gorm:\"column:actors\" db:\"actors\" sql:\"type:text\"`\n\tDescription string `gorm:\"column:description\" db:\"description\" sql:\"type:text\"`\n\tAge string `gorm:\"column:age\" db:\"age\" sql:\"type:text\"`\n\tReleaseDate string `gorm:\"column:release_date\" db:\"release_date\" sql:\"type:text\"`\n\tRussianDate string `gorm:\"column:russian_date\" db:\"russian_date\" sql:\"type:text\"`\n\tDuration int64 `gorm:\"column:duration\" db:\"duration\"`\n\tQuality string `gorm:\"column:quality\" db:\"quality\" sql:\"type:text\"`\n\tTranslation string `gorm:\"column:translation\" db:\"translation\" sql:\"type:text\"`\n\tSubtitlesType string `gorm:\"column:subtitles_type\" db:\"subtitles_type\" sql:\"type:text\"`\n\tSubtitles string `gorm:\"column:subtitles\" db:\"subtitles\" sql:\"type:text\"`\n\tVideo string `gorm:\"column:video\" db:\"video\" sql:\"type:text\"`\n\tResolution string `gorm:\"column:resolution\" db:\"resolution\" sql:\"type:text\"`\n\tAudio string `gorm:\"column:audio\" db:\"audio\" sql:\"type:text\"`\n\tKinopoisk float64 `gorm:\"column:kinopoisk\" db:\"kinopoisk\"`\n\tIMDb float64 `gorm:\"column:imdb\" db:\"imdb\"`\n\tNNM float64 `gorm:\"column:nnm\" db:\"nnm\"`\n\tSound string `gorm:\"column:sound\" db:\"sound\" sql:\"type:text\"`\n\tSize int64 `gorm:\"column:size\" db:\"size\"`\n\tDateCreate string `gorm:\"column:date_create\" db:\"date_create\" sql:\"type:text\"`\n\tTorrent string `gorm:\"column:torrent\" db:\"torrent\" sql:\"type:text\"`\n\tPoster string `gorm:\"column:poster\" db:\"poster\" sql:\"type:text\"`\n\tSeeders int64 `gorm:\"column:seeders\" db:\"seeders\"`\n\tLeechers int64 `gorm:\"column:leechers\" db:\"leechers\"`\n\tUpdatedAt time.Time `gorm:\"column:updated_at\" db:\"updated_at\"`\n\tCreatedAt time.Time `gorm:\"column:created_at\" db:\"created_at\"`\n}\n\n\/\/ Init nnmc with login password\nfunc Init(login string, password string) (*NCp, error) {\n\tvar client http.Client\n\tcookieJar, _ := cookiejar.New(nil)\n\tclient.Jar = cookieJar\n\turlPost := \"http:\/\/nnm-club.me\/forum\/login.php\"\n\tform := url.Values{}\n\tform.Set(\"username\", login)\n\tform.Add(\"password\", password)\n\tform.Add(\"redirect\", \"\")\n\tform.Add(\"login\", \"âõîä\")\n\treq, _ := http.NewRequest(\"POST\", urlPost, bytes.NewBufferString(form.Encode()))\n\treq.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\treq.Header.Add(\"Content-Length\", strconv.Itoa(len(form.Encode())))\n\t_, err := client.Do(req)\n\treturn &NCp{client: client}, err\n}\n\n\/\/ getHTML get body from url\nfunc getHTML(url string, n *NCp) ([]byte, error) {\n\tresp, err := n.client.Get(url)\n\tif err != nil {\n\t\tlog.Println(\"client Get error:\", err)\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Println(\"ioutil.ReadAll error:\", err)\n\t}\n\tbuffer := bytes.NewBufferString(\"\")\n\tfor _, char := range body {\n\t\tvar ch = Utf(char)\n\t\tfmt.Fprintf(buffer, \"%c\", ch)\n\t}\n\tdoc := buffer.Bytes()\n\tdoc = replaceAll(doc, \" \", \" \")\n\tdoc = replaceAll(doc, \"&\", \"&\")\n\tdoc = replaceAll(doc, \"<br \/>\", \"\")\n\treturn doc, nil\n}\n\n\/\/ ParseForumTree get topics from forumTree\nfunc (n *NCp) ParseForumTree(url string) ([]Topic, error) {\n\tvar (\n\t\ttopics []Topic\n\t\treTree = regexp.MustCompile(`<a href=\"(viewtopic.php\\?t=\\d+)\"class=\"topictitle\">(.+?)\\s\\((\\d{4})\\)\\s(.+?)<\/a>`)\n\t)\n\tbody, err := getHTML(url, n)\n\tif err != nil {\n\t\treturn topics, err\n\t}\n\tif reTree.Match(body) == false {\n\t\treturn topics, fmt.Errorf(\"No topic in body\")\n\t}\n\tfindResult := reTree.FindAllSubmatch(body, -1)\n\tfor _, v := range findResult {\n\t\tvar t Topic\n\t\tt.Href = \"http:\/\/nnm-club.me\/forum\/\" + string(v[1])\n\t\tt.Name = string(v[2])\n\t\tt.Year = string(v[3])\n\t\tt.Quality = string(v[4])\n\t\ttopics = append(topics, t)\n\t}\n\treturn topics, nil\n}\n\n\/\/ ParseTopic get film from topic\nfunc (n *NCp) ParseTopic(topic Topic) (Film, error) {\n\tvar (\n\t\tfilm Film\n\t)\n\tname := strings.Split(topic.Name, \" \/ \")\n\tswitch len(name) {\n\tcase 1:\n\t\tfilm.Name = strings.Trim(name[0], \" \")\n\tcase 2:\n\t\tfilm.Name = strings.Trim(name[0], \" \")\n\t\tfilm.EngName = strings.Trim(name[1], \" \")\n\tcase 3:\n\t\tfilm.Name = strings.Trim(name[0], \" \")\n\t\tfilm.EngName = strings.Trim(name[1], \" \")\n\t}\n\tfilm.Href = topic.Href\n\tif year64, err := strconv.ParseInt(topic.Year, 10, 64); err == nil {\n\t\tfilm.Year = year64\n\t}\n\tbody, err := getHTML(film.Href, n)\n\tif err != nil {\n\t\treturn film, err\n\t}\n\ttopic.Body = body\n\tfilm.Country = topic.getCountry()\n\tfilm.Genre = topic.getGenre()\n\tfilm.Director = topic.getDirector()\n\tfilm.Producer = topic.getProducer()\n\tfilm.Actors = topic.getActors()\n\tfilm.Description = topic.getDescription()\n\tfilm.Age = topic.getAge()\n\tfilm.ReleaseDate = topic.getReleaseDate()\n\tfilm.RussianDate = topic.getRussianDate()\n\tfilm.Duration = topic.getDuration()\n\tfilm.Quality = topic.getQuality()\n\tfilm.Translation = topic.getTranslation()\n\tfilm.SubtitlesType = topic.getSubtitlesType()\n\tfilm.Subtitles = topic.getSubtitles()\n\tfilm.Video = topic.getVideo()\n\tfilm.Resolution = getResolution(film.Video)\n\tfilm.Audio = topic.getAudio()\n\tfilm.Torrent = topic.getTorrent()\n\tfilm.DateCreate = topic.getDate()\n\tfilm.Size = topic.getSize()\n\tfilm.NNM = topic.getRating()\n\tfilm.Poster = topic.getPoster()\n\tfilm.Seeders = topic.getSeeds()\n\tfilm.Leechers = topic.getLeechs()\n\treturn film, nil\n}\n\nfunc replaceAll(body []byte, from string, to string) []byte {\n\tvar reStr = regexp.MustCompile(from)\n\tresult := reStr.ReplaceAll(body, []byte(to))\n\treturn result\n}\n\nfunc replaceDate(s string) string {\n\ts = strings.Replace(s, \" Янв \", \".01.\", -1)\n\ts = strings.Replace(s, \" Фев \", \".02.\", -1)\n\ts = strings.Replace(s, \" Мар \", \".03.\", -1)\n\ts = strings.Replace(s, \" Апр \", \".04.\", -1)\n\ts = strings.Replace(s, \" Май \", \".05.\", -1)\n\ts = strings.Replace(s, \" Июн \", \".06.\", -1)\n\ts = strings.Replace(s, \" Июл \", \".07.\", -1)\n\ts = strings.Replace(s, \" Авг \", \".08.\", -1)\n\ts = strings.Replace(s, \" Сен \", \".09.\", -1)\n\ts = strings.Replace(s, \" Окт \", \".10.\", -1)\n\ts = strings.Replace(s, \" Ноя \", \".11.\", -1)\n\ts = strings.Replace(s, \" Дек \", \".12.\", -1)\n\treturn s\n}\n\nfunc caseInsensitiveContains(s, substr string) bool {\n\ts, substr = strings.ToUpper(s), strings.ToUpper(substr)\n\treturn strings.Contains(s, substr)\n}\n\nfunc cleanStr(str string) string {\n\tvar reSpan = regexp.MustCompile(\"<span .*?>\")\n\tstr = reSpan.ReplaceAllString(str, \"\")\n\tstr = strings.Trim(str, \" \")\n\treturn str\n}\n<commit_msg>more clean body<commit_after>package ncp\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/cookiejar\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ NCp values:\n\/\/ client http.Client with cookie\ntype NCp struct {\n\tclient http.Client\n}\n\n\/\/ Topic from forum\ntype Topic struct {\n\tHref string\n\tName string\n\tYear string\n\tQuality string\n\tBody []byte\n}\n\n\/\/ Film all values\n\/\/ ID id\n\/\/ Name Название\n\/\/ EngName Английское название\n\/\/ Href Ссылка\n\/\/ Year Год\n\/\/ Genre Жанр\n\/\/ Country Производство\n\/\/ Director Режиссер\n\/\/ Producer Продюсер\n\/\/ Actors Актеры\n\/\/ Description Описание\n\/\/ Age Возраст\n\/\/ ReleaseDate Дата мировой премьеры\n\/\/ RussianDate Дата премьеры в России\n\/\/ Duration Продолжительность\n\/\/ Quality Качество видео\n\/\/ Translation Перевод\n\/\/ SubtitlesType Вид субтитров\n\/\/ Subtitles Субтитры\n\/\/ Video Видео\n\/\/ Resolution Разрешение видео\n\/\/ Audio Аудио\n\/\/ Kinopoisk Рейтинг кинопоиска\n\/\/ Imdb Рейтинг IMDb\n\/\/ NNM Рейтинг nnm-club\n\/\/ Sound Звук\n\/\/ Size Размер\n\/\/ DateCreate Дата создания раздачи\n\/\/ Torrent Ссылка на torrent\n\/\/ Poster Ссылка на постер\n\/\/ Seeders Количество раздающих\n\/\/ Leechers Количество скачивающих\n\/\/ UpdatedAt Дата обновления записи БД\n\/\/ CreatedAt Дата создания записи БД\ntype Film struct {\n\tID int64 `gorm:\"column:id\" db:\"id\" sql:\"AUTO_INCREMENT\"`\n\tName string `gorm:\"column:name\" db:\"name\" sql:\"type:text\"`\n\tEngName string `gorm:\"column:eng_name\" db:\"eng_name\" sql:\"type:text\"`\n\tHref string `gorm:\"column:href\" db:\"href\" sql:\"type:text\"`\n\tYear int64 `gorm:\"column:year\" db:\"year\"`\n\tGenre string `gorm:\"column:genre\" db:\"genre\" sql:\"type:text\"`\n\tCountry string `gorm:\"column:country\" db:\"country\" sql:\"type:text\"`\n\tDirector string `gorm:\"column:director\" db:\"director\" sql:\"type:text\"`\n\tProducer string `gorm:\"column:producer\" db:\"producer\" sql:\"type:text\"`\n\tActors string `gorm:\"column:actors\" db:\"actors\" sql:\"type:text\"`\n\tDescription string `gorm:\"column:description\" db:\"description\" sql:\"type:text\"`\n\tAge string `gorm:\"column:age\" db:\"age\" sql:\"type:text\"`\n\tReleaseDate string `gorm:\"column:release_date\" db:\"release_date\" sql:\"type:text\"`\n\tRussianDate string `gorm:\"column:russian_date\" db:\"russian_date\" sql:\"type:text\"`\n\tDuration int64 `gorm:\"column:duration\" db:\"duration\"`\n\tQuality string `gorm:\"column:quality\" db:\"quality\" sql:\"type:text\"`\n\tTranslation string `gorm:\"column:translation\" db:\"translation\" sql:\"type:text\"`\n\tSubtitlesType string `gorm:\"column:subtitles_type\" db:\"subtitles_type\" sql:\"type:text\"`\n\tSubtitles string `gorm:\"column:subtitles\" db:\"subtitles\" sql:\"type:text\"`\n\tVideo string `gorm:\"column:video\" db:\"video\" sql:\"type:text\"`\n\tResolution string `gorm:\"column:resolution\" db:\"resolution\" sql:\"type:text\"`\n\tAudio string `gorm:\"column:audio\" db:\"audio\" sql:\"type:text\"`\n\tKinopoisk float64 `gorm:\"column:kinopoisk\" db:\"kinopoisk\"`\n\tIMDb float64 `gorm:\"column:imdb\" db:\"imdb\"`\n\tNNM float64 `gorm:\"column:nnm\" db:\"nnm\"`\n\tSound string `gorm:\"column:sound\" db:\"sound\" sql:\"type:text\"`\n\tSize int64 `gorm:\"column:size\" db:\"size\"`\n\tDateCreate string `gorm:\"column:date_create\" db:\"date_create\" sql:\"type:text\"`\n\tTorrent string `gorm:\"column:torrent\" db:\"torrent\" sql:\"type:text\"`\n\tPoster string `gorm:\"column:poster\" db:\"poster\" sql:\"type:text\"`\n\tSeeders int64 `gorm:\"column:seeders\" db:\"seeders\"`\n\tLeechers int64 `gorm:\"column:leechers\" db:\"leechers\"`\n\tUpdatedAt time.Time `gorm:\"column:updated_at\" db:\"updated_at\"`\n\tCreatedAt time.Time `gorm:\"column:created_at\" db:\"created_at\"`\n}\n\n\/\/ Init nnmc with login password\nfunc Init(login string, password string) (*NCp, error) {\n\tvar client http.Client\n\tcookieJar, _ := cookiejar.New(nil)\n\tclient.Jar = cookieJar\n\turlPost := \"http:\/\/nnm-club.me\/forum\/login.php\"\n\tform := url.Values{}\n\tform.Set(\"username\", login)\n\tform.Add(\"password\", password)\n\tform.Add(\"redirect\", \"\")\n\tform.Add(\"login\", \"âõîä\")\n\treq, _ := http.NewRequest(\"POST\", urlPost, bytes.NewBufferString(form.Encode()))\n\treq.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\treq.Header.Add(\"Content-Length\", strconv.Itoa(len(form.Encode())))\n\t_, err := client.Do(req)\n\treturn &NCp{client: client}, err\n}\n\n\/\/ getHTML get body from url\nfunc getHTML(url string, n *NCp) ([]byte, error) {\n\tresp, err := n.client.Get(url)\n\tif err != nil {\n\t\tlog.Println(\"client Get error:\", err)\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Println(\"ioutil.ReadAll error:\", err)\n\t}\n\tbuffer := bytes.NewBufferString(\"\")\n\tfor _, char := range body {\n\t\tvar ch = Utf(char)\n\t\tfmt.Fprintf(buffer, \"%c\", ch)\n\t}\n\tdoc := buffer.Bytes()\n\tdoc = replaceAll(doc, \" \", \" \")\n\tdoc = replaceAll(doc, \"&\", \"&\")\n\tdoc = replaceAll(doc, \"<br \/>\", \" \")\n\tdoc = replaceAll(body, \"<\/span>:\", \":<\/span>\")\n\tdoc = replaceAll(doc, \" \", \" \")\n\tdoc = removeTag(doc, `<span style=\"text-decoration:.+?\">(.+?)<\/span>`)\n\treturn doc, nil\n}\n\n\/\/ ParseForumTree get topics from forumTree\nfunc (n *NCp) ParseForumTree(url string) ([]Topic, error) {\n\tvar (\n\t\ttopics []Topic\n\t\treTree = regexp.MustCompile(`<a href=\"(viewtopic.php\\?t=\\d+)\"class=\"topictitle\">(.+?)\\s\\((\\d{4})\\)\\s(.+?)<\/a>`)\n\t)\n\tbody, err := getHTML(url, n)\n\tif err != nil {\n\t\treturn topics, err\n\t}\n\tif reTree.Match(body) == false {\n\t\treturn topics, fmt.Errorf(\"No topic in body\")\n\t}\n\tfindResult := reTree.FindAllSubmatch(body, -1)\n\tfor _, v := range findResult {\n\t\tvar t Topic\n\t\tt.Href = \"http:\/\/nnm-club.me\/forum\/\" + string(v[1])\n\t\tt.Name = string(v[2])\n\t\tt.Year = string(v[3])\n\t\tt.Quality = string(v[4])\n\t\ttopics = append(topics, t)\n\t}\n\treturn topics, nil\n}\n\n\/\/ ParseTopic get film from topic\nfunc (n *NCp) ParseTopic(topic Topic) (Film, error) {\n\tvar (\n\t\tfilm Film\n\t)\n\tname := strings.Split(topic.Name, \" \/ \")\n\tswitch len(name) {\n\tcase 1:\n\t\tfilm.Name = strings.Trim(name[0], \" \")\n\tcase 2:\n\t\tfilm.Name = strings.Trim(name[0], \" \")\n\t\tfilm.EngName = strings.Trim(name[1], \" \")\n\tcase 3:\n\t\tfilm.Name = strings.Trim(name[0], \" \")\n\t\tfilm.EngName = strings.Trim(name[1], \" \")\n\t}\n\tfilm.Href = topic.Href\n\tif year64, err := strconv.ParseInt(topic.Year, 10, 64); err == nil {\n\t\tfilm.Year = year64\n\t}\n\tbody, err := getHTML(film.Href, n)\n\tif err != nil {\n\t\treturn film, err\n\t}\n\ttopic.Body = body\n\tfilm.Country = topic.getCountry()\n\tfilm.Genre = topic.getGenre()\n\tfilm.Director = topic.getDirector()\n\tfilm.Producer = topic.getProducer()\n\tfilm.Actors = topic.getActors()\n\tfilm.Description = topic.getDescription()\n\tfilm.Age = topic.getAge()\n\tfilm.ReleaseDate = topic.getReleaseDate()\n\tfilm.RussianDate = topic.getRussianDate()\n\tfilm.Duration = topic.getDuration()\n\tfilm.Quality = topic.getQuality()\n\tfilm.Translation = topic.getTranslation()\n\tfilm.SubtitlesType = topic.getSubtitlesType()\n\tfilm.Subtitles = topic.getSubtitles()\n\tfilm.Video = topic.getVideo()\n\tfilm.Resolution = getResolution(film.Video)\n\tfilm.Audio = topic.getAudio()\n\tfilm.Torrent = topic.getTorrent()\n\tfilm.DateCreate = topic.getDate()\n\tfilm.Size = topic.getSize()\n\tfilm.NNM = topic.getRating()\n\tfilm.Poster = topic.getPoster()\n\tfilm.Seeders = topic.getSeeds()\n\tfilm.Leechers = topic.getLeechs()\n\treturn film, nil\n}\n\nfunc replaceAll(body []byte, from string, to string) []byte {\n\tvar reStr = regexp.MustCompile(from)\n\tif reStr.Match(body) == false {\n\t\treturn body\n\t}\n\treturn reStr.ReplaceAll(body, []byte(to))\n}\n\nfunc replaceDate(s string) string {\n\ts = strings.Replace(s, \" Янв \", \".01.\", -1)\n\ts = strings.Replace(s, \" Фев \", \".02.\", -1)\n\ts = strings.Replace(s, \" Мар \", \".03.\", -1)\n\ts = strings.Replace(s, \" Апр \", \".04.\", -1)\n\ts = strings.Replace(s, \" Май \", \".05.\", -1)\n\ts = strings.Replace(s, \" Июн \", \".06.\", -1)\n\ts = strings.Replace(s, \" Июл \", \".07.\", -1)\n\ts = strings.Replace(s, \" Авг \", \".08.\", -1)\n\ts = strings.Replace(s, \" Сен \", \".09.\", -1)\n\ts = strings.Replace(s, \" Окт \", \".10.\", -1)\n\ts = strings.Replace(s, \" Ноя \", \".11.\", -1)\n\ts = strings.Replace(s, \" Дек \", \".12.\", -1)\n\treturn s\n}\n\nfunc caseInsensitiveContains(s, substr string) bool {\n\ts, substr = strings.ToUpper(s), strings.ToUpper(substr)\n\treturn strings.Contains(s, substr)\n}\n\nfunc cleanStr(str string) string {\n\tvar reSpan = regexp.MustCompile(\"<span .*?>\")\n\tstr = reSpan.ReplaceAllString(str, \"\")\n\tstr = strings.Trim(str, \" \")\n\treturn str\n}\n\nfunc removeTag(body []byte, tag string) []byte {\n\tvar reTag = regexp.MustCompile(tag)\n\tif reTag.Match(body) == false {\n\t\treturn body\n\t}\n\ttags := reTag.FindAllSubmatch(body, -1)\n\tfor _, item := range tags {\n\t\tbody = bytes.Replace(body, item[0], item[1], 1)\n\t}\n\treturn body\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015, David Howden\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage tag\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n)\n\nconst (\n\tidType int = 1\n\tcommentType int = 3\n)\n\n\/\/ ReadOGGTags reads OGG metadata from the io.ReadSeeker, returning the resulting\n\/\/ metadata in a Metadata implementation, or non-nil error if there was a problem.\n\/\/ See http:\/\/www.xiph.org\/vorbis\/doc\/Vorbis_I_spec.html\n\/\/ and http:\/\/www.xiph.org\/ogg\/doc\/framing.html for details.\nfunc ReadOGGTags(r io.ReadSeeker) (Metadata, error) {\n\toggs, err := readString(r, 4)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif oggs != \"OggS\" {\n\t\treturn nil, errors.New(\"expected 'OggS'\")\n\t}\n\n\t\/\/ Skip 22 bytes of Page header to read page_segments length byte at position 26\n\t\/\/ See http:\/\/www.xiph.org\/ogg\/doc\/framing.html\n\t_, err = r.Seek(22, io.SeekCurrent)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnS, err := readInt(r, 1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Seek and discard the segments\n\t_, err = r.Seek(int64(nS), io.SeekCurrent)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ First packet type is identification, type 1\n\tt, err := readInt(r, 1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif t != idType {\n\t\treturn nil, errors.New(\"expected 'vorbis' identification type 1\")\n\t}\n\n\t\/\/ Seek and discard 29 bytes from common and identification header\n\t\/\/ See http:\/\/www.xiph.org\/vorbis\/doc\/Vorbis_I_spec.html#x1-610004.2\n\t_, err = r.Seek(29, io.SeekCurrent)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Read comment header packet. May include setup header packet, if it is on the\n\t\/\/ same page. First audio packet is guaranteed to be on the separate page.\n\t\/\/ See https:\/\/www.xiph.org\/vorbis\/doc\/Vorbis_I_spec.html#x1-132000A.2\n\tch, err := readPackets(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tchr := bytes.NewReader(ch)\n\n\t\/\/ First packet type is comment, type 3\n\tt, err = readInt(chr, 1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif t != commentType {\n\t\treturn nil, errors.New(\"expected 'vorbis' comment type 3\")\n\t}\n\n\t\/\/ Seek and discard 6 bytes from common header\n\t_, err = chr.Seek(6, io.SeekCurrent)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm := &metadataOGG{\n\t\tnewMetadataVorbis(),\n\t}\n\n\terr = m.readVorbisComment(chr)\n\treturn m, err\n}\n\n\/\/ readPackets reads vorbis header packets from contiguous ogg pages in ReadSeeker.\n\/\/ The pages are considered contiguous, if the first lacing value in second\n\/\/ page's segment table continues rather than begins a packet. This is indicated\n\/\/ by setting header_type_flag 0x1 (continued packet).\n\/\/ See https:\/\/www.xiph.org\/ogg\/doc\/framing.html on packets spanning pages.\nfunc readPackets(r io.ReadSeeker) ([]byte, error) {\n\tbuf := &bytes.Buffer{}\n\n\tfirstPage := true\n\tfor {\n\t\t\/\/ Read capture pattern\n\t\toggs, err := readString(r, 4)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif oggs != \"OggS\" {\n\t\t\treturn nil, errors.New(\"expected 'OggS'\")\n\t\t}\n\n\t\t\/\/ Read page header\n\t\thead, err := readBytes(r, 22)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\theaderTypeFlag := head[1]\n\n\t\tcontinuation := headerTypeFlag&0x1 > 0\n\t\tif !(firstPage || continuation) {\n\t\t\t\/\/ Rewind to the beginning of the page\n\t\t\t_, err = r.Seek(-26, io.SeekCurrent)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tfirstPage = false\n\n\t\t\/\/ Read the number of segments\n\t\tnS, err := readUint(r, 1)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Read segment table\n\t\tsegments, err := readBytes(r, nS)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Calculate remaining page size\n\t\tpageSize := 0\n\t\tfor i := uint(0); i < nS; i++ {\n\t\t\tpageSize += int(segments[i])\n\t\t}\n\n\t\t_, err = io.CopyN(buf, r, int64(pageSize))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn buf.Bytes(), nil\n}\n\ntype metadataOGG struct {\n\t*metadataVorbis\n}\n\nfunc (m *metadataOGG) FileType() FileType {\n\treturn OGG\n}\n<commit_msg>Add Ogg Opus metadata support (#69)<commit_after>\/\/ Copyright 2015, David Howden\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage tag\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n)\n\nvar (\n\tvorbisCommentPrefix = []byte(\"\\x03vorbis\")\n\topusTagsPrefix = []byte(\"OpusTags\")\n)\n\nvar oggCRC32Poly04c11db7 = oggCRCTable(0x04c11db7)\n\ntype crc32Table [256]uint32\n\nfunc oggCRCTable(poly uint32) *crc32Table {\n\tvar t crc32Table\n\n\tfor i := 0; i < 256; i++ {\n\t\tcrc := uint32(i) << 24\n\t\tfor j := 0; j < 8; j++ {\n\t\t\tif crc&0x80000000 != 0 {\n\t\t\t\tcrc = (crc << 1) ^ poly\n\t\t\t} else {\n\t\t\t\tcrc <<= 1\n\t\t\t}\n\t\t}\n\t\tt[i] = crc\n\t}\n\n\treturn &t\n}\n\nfunc oggCRCUpdate(crc uint32, tab *crc32Table, p []byte) uint32 {\n\tfor _, v := range p {\n\t\tcrc = (crc << 8) ^ tab[byte(crc>>24)^v]\n\t}\n\treturn crc\n}\n\ntype oggPageHeader struct {\n\tMagic [4]byte \/\/ \"OggS\"\n\tVersion uint8\n\tFlags uint8\n\tGranulePosition uint64\n\tSerialNumber uint32\n\tSequenceNumber uint32\n\tCRC uint32\n\tSegments uint8\n}\n\ntype oggDemuxer struct {\n\tpacketBufs map[uint32]*bytes.Buffer\n}\n\n\/\/ Read ogg packets, can return empty slice of packets and nil err\n\/\/ if more data is needed\nfunc (o *oggDemuxer) Read(r io.Reader) ([][]byte, error) {\n\theaderBuf := &bytes.Buffer{}\n\tvar oh oggPageHeader\n\tif err := binary.Read(io.TeeReader(r, headerBuf), binary.LittleEndian, &oh); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif bytes.Compare(oh.Magic[:], []byte(\"OggS\")) != 0 {\n\t\t\/\/ TODO: seek for syncword?\n\t\treturn nil, errors.New(\"expected 'OggS'\")\n\t}\n\n\tsegmentTable := make([]byte, oh.Segments)\n\tif _, err := io.ReadFull(r, segmentTable); err != nil {\n\t\treturn nil, err\n\t}\n\tvar segmentsSize int64\n\tfor _, s := range segmentTable {\n\t\tsegmentsSize += int64(s)\n\t}\n\tsegmentsData := make([]byte, segmentsSize)\n\tif _, err := io.ReadFull(r, segmentsData); err != nil {\n\t\treturn nil, err\n\t}\n\n\theaderBytes := headerBuf.Bytes()\n\t\/\/ reset CRC to zero in header before checksum\n\theaderBytes[22] = 0\n\theaderBytes[23] = 0\n\theaderBytes[24] = 0\n\theaderBytes[25] = 0\n\tcrc := oggCRCUpdate(0, oggCRC32Poly04c11db7, headerBytes)\n\tcrc = oggCRCUpdate(crc, oggCRC32Poly04c11db7, segmentTable)\n\tcrc = oggCRCUpdate(crc, oggCRC32Poly04c11db7, segmentsData)\n\tif crc != oh.CRC {\n\t\treturn nil, fmt.Errorf(\"expected crc %x != %x\", oh.CRC, crc)\n\t}\n\n\tif o.packetBufs == nil {\n\t\to.packetBufs = map[uint32]*bytes.Buffer{}\n\t}\n\n\tvar packetBuf *bytes.Buffer\n\tcontinued := oh.Flags&0x1 != 0\n\tif continued {\n\t\tif b, ok := o.packetBufs[oh.SerialNumber]; ok {\n\t\t\tpacketBuf = b\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"could not find continued packet %d\", oh.SerialNumber)\n\t\t}\n\t} else {\n\t\tpacketBuf = &bytes.Buffer{}\n\t}\n\n\tvar packets [][]byte\n\tvar p int\n\tfor _, s := range segmentTable {\n\t\tpacketBuf.Write(segmentsData[p : p+int(s)])\n\t\tif s < 255 {\n\t\t\tpackets = append(packets, packetBuf.Bytes())\n\t\t\tpacketBuf = &bytes.Buffer{}\n\t\t}\n\t\tp += int(s)\n\t}\n\n\to.packetBufs[oh.SerialNumber] = packetBuf\n\n\treturn packets, nil\n}\n\n\/\/ ReadOGGTags reads OGG metadata from the io.ReadSeeker, returning the resulting\n\/\/ metadata in a Metadata implementation, or non-nil error if there was a problem.\n\/\/ See http:\/\/www.xiph.org\/vorbis\/doc\/Vorbis_I_spec.html\n\/\/ and http:\/\/www.xiph.org\/ogg\/doc\/framing.html for details.\n\/\/ For Opus see https:\/\/tools.ietf.org\/html\/rfc7845\nfunc ReadOGGTags(r io.Reader) (Metadata, error) {\n\tod := &oggDemuxer{}\n\tfor {\n\t\tbs, err := od.Read(r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, b := range bs {\n\t\t\tswitch {\n\t\t\tcase bytes.HasPrefix(b, vorbisCommentPrefix):\n\t\t\t\tm := &metadataOGG{\n\t\t\t\t\tnewMetadataVorbis(),\n\t\t\t\t}\n\t\t\t\terr = m.readVorbisComment(bytes.NewReader(b[len(vorbisCommentPrefix):]))\n\t\t\t\treturn m, err\n\t\t\tcase bytes.HasPrefix(b, opusTagsPrefix):\n\t\t\t\tm := &metadataOGG{\n\t\t\t\t\tnewMetadataVorbis(),\n\t\t\t\t}\n\t\t\t\terr = m.readVorbisComment(bytes.NewReader(b[len(opusTagsPrefix):]))\n\t\t\t\treturn m, err\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype metadataOGG struct {\n\t*metadataVorbis\n}\n\nfunc (m *metadataOGG) FileType() FileType {\n\treturn OGG\n}\n<|endoftext|>"} {"text":"<commit_before>package php\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"unicode\"\n\n\t\"github.com\/stephens2424\/php\/token\"\n)\n\nconst shortPHPBegin = \"<?\"\nconst longPHPBegin = \"<?php\"\nconst phpEnd = \"?>\"\n\nconst eof = -1\n\n\/\/ lexHTML consumes and emits an html t until it\n\/\/ finds a php begin\nfunc lexHTML(l *lexer) stateFn {\n\tfor {\n\t\tif strings.HasPrefix(l.input[l.pos:], shortPHPBegin) {\n\t\t\tif l.pos > l.start {\n\t\t\t\tl.emit(token.HTML)\n\t\t\t}\n\t\t\treturn lexPHPBegin\n\t\t}\n\t\tif l.next() == eof {\n\t\t\tbreak\n\t\t}\n\t}\n\tif l.pos > l.start {\n\t\tl.emit(token.HTML)\n\t}\n\tl.emit(token.EOF)\n\treturn nil\n}\n\nfunc lexPHPBegin(l *lexer) stateFn {\n\tif strings.HasPrefix(l.input[l.pos:], longPHPBegin) {\n\t\tl.pos += len(longPHPBegin)\n\t}\n\tif strings.HasPrefix(l.input[l.pos:], shortPHPBegin) {\n\t\tl.pos += len(shortPHPBegin)\n\t}\n\tl.emit(token.PHPBegin)\n\treturn lexPHP\n}\n\nfunc lexPHP(l *lexer) stateFn {\n\tl.skipSpace()\n\n\tif r := l.peek(); unicode.IsDigit(r) {\n\t\treturn lexNumberLiteral\n\t} else if r == '.' {\n\t\tl.next() \/\/ must advance because we only peeked before\n\t\tsecondR := l.peek()\n\t\tl.backup()\n\t\tif unicode.IsDigit(secondR) {\n\t\t\treturn lexNumberLiteral\n\t\t}\n\t}\n\n\tif strings.HasPrefix(l.input[l.pos:], \"<<<\") {\n\t\treturn lexDoc\n\t}\n\n\tif strings.HasPrefix(l.input[l.pos:], \"?>\") {\n\t\treturn lexPHPEnd\n\t}\n\n\tif strings.HasPrefix(l.input[l.pos:], \"#\") {\n\t\treturn lexLineComment\n\t}\n\n\tif strings.HasPrefix(l.input[l.pos:], \"\/\/\") {\n\t\treturn lexLineComment\n\t}\n\n\tif strings.HasPrefix(l.input[l.pos:], \"\/*\") {\n\t\treturn lexBlockComment\n\t}\n\n\tif l.peek() == eof {\n\t\tl.emit(token.EOF)\n\t\treturn nil\n\t}\n\n\tif l.peek() == '`' {\n\t\treturn lexShellCommand\n\t}\n\n\tif l.peek() == '\\'' {\n\t\treturn lexSingleQuotedStringLiteral\n\t}\n\n\tif l.peek() == '\"' {\n\t\treturn lexDoubleQuotedStringLiteral\n\t}\n\n\tfor _, tokenString := range token.TokenList {\n\t\tt := token.TokenMap[tokenString]\n\t\tpotentialToken := l.input[l.pos:]\n\t\tif len(potentialToken) > len(tokenString) {\n\t\t\tpotentialToken = potentialToken[:len(tokenString)]\n\t\t}\n\t\tif strings.HasPrefix(strings.ToLower(potentialToken), tokenString) {\n\t\t\tprev := l.previous()\n\t\t\tif isKeyword(t, tokenString) && prev == '$' {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tl.pos += len(tokenString)\n\t\t\tif isKeyword(t, tokenString) && l.accept(alphabet+underscore+digits) {\n\t\t\t\tl.backup() \/\/ to account for the character consumed by accept\n\t\t\t\tl.pos -= len(tokenString)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tl.emit(t)\n\t\t\treturn lexPHP\n\t\t}\n\t}\n\n\tl.acceptRun(alphabet + underscore + digits + \"\\\\\")\n\tl.emit(token.Identifier)\n\treturn lexPHP\n}\n\nfunc lexNumberLiteral(l *lexer) stateFn {\n\tif l.accept(\"0\") {\n\t\t\/\/ binary?\n\t\tif l.accept(\"b\") {\n\t\t\tl.acceptRun(\"01\")\n\t\t\tl.emit(token.NumberLiteral)\n\t\t\treturn lexPHP\n\t\t}\n\t\t\/\/ hexadecimal?\n\t\tif l.accept(\"xX\") {\n\t\t\tl.acceptRun(digits + \"abcdefABCDEF\")\n\t\t\tl.emit(token.NumberLiteral)\n\t\t\treturn lexPHP\n\t\t}\n\t}\n\t\/\/ is decimal?\n\tl.acceptRun(digits)\n\tif l.accept(\".\") {\n\t\tl.acceptRun(digits)\n\t}\n\n\tif l.accept(\"E\") {\n\t\tl.acceptRun(digits)\n\t}\n\n\tl.emit(token.NumberLiteral)\n\treturn lexPHP\n}\n\nfunc lexShellCommand(l *lexer) stateFn {\n\tl.next()\n\tfor {\n\t\tswitch l.next() {\n\t\tcase '`':\n\t\t\tl.emit(token.ShellCommand)\n\t\t\treturn lexPHP\n\t\tcase eof:\n\t\t\tl.emit(token.ShellCommand)\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc lexSingleQuotedStringLiteral(l *lexer) stateFn {\n\tl.next()\n\tfor {\n\t\tswitch l.next() {\n\t\tcase '\\\\':\n\t\t\tl.next()\n\t\t\tcontinue\n\t\tcase '\\'':\n\t\t\tl.emit(token.StringLiteral)\n\t\t\treturn lexPHP\n\t\t}\n\t}\n}\n\nfunc lexDoubleQuotedStringLiteral(l *lexer) stateFn {\n\tl.next()\n\tfor {\n\t\tswitch l.next() {\n\t\tcase '\\\\':\n\t\t\tl.next()\n\t\t\tcontinue\n\t\tcase '\"':\n\t\t\tl.emit(token.StringLiteral)\n\t\t\treturn lexPHP\n\t\t}\n\t}\n}\n\nconst alphabet = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\nconst digits = \"0123456789\"\nconst underscore = \"_\"\n\nfunc lexIdentifier(l *lexer) stateFn {\n\tl.accept(\"$\")\n\tl.accept(underscore + alphabet)\n\tl.acceptRun(underscore + alphabet + digits)\n\tl.emit(token.VariableOperator)\n\treturn lexPHP\n}\n\n\/\/ lexPHPEnd lexes the end of a PHP section returning the context to HTML\nfunc lexPHPEnd(l *lexer) stateFn {\n\tl.pos += len(phpEnd)\n\tl.emit(token.PHPEnd)\n\treturn lexHTML\n}\n\nfunc lexLineComment(l *lexer) stateFn {\n\tlineLength := strings.Index(l.input[l.pos:], \"\\n\") + 1\n\tif lineLength == 0 {\n\t\t\/\/ this is the last line, so lex until the end\n\t\tlineLength = len(l.input[l.pos:])\n\t}\n\t\/\/ don't lex php end\n\tif phpEndLength := strings.Index(l.input[l.pos:l.pos+lineLength], phpEnd); phpEndLength >= 0 && phpEndLength < lineLength {\n\t\tlineLength = phpEndLength\n\t}\n\tl.pos += lineLength\n\tl.ignore()\n\treturn lexPHP\n}\n\nfunc lexBlockComment(l *lexer) stateFn {\n\tcommentLength := strings.Index(l.input[l.pos:], \"*\/\") + 2\n\tif commentLength == 1 {\n\t\t\/\/ the file ends before we find *\/\n\t\tcommentLength = len(l.input[l.pos:])\n\t}\n\tl.pos += commentLength\n\tl.ignore()\n\treturn lexPHP\n}\n\nfunc lexDoc(l *lexer) stateFn {\n\tvar nowDoc bool\n\tl.pos += len(\"<<<\")\n\tl.skipSpace()\n\tif strings.HasPrefix(l.input[l.pos:], \"'\") {\n\t\tnowDoc = true\n\t\tl.pos += len(\"'\")\n\t}\n\tlabelPos := l.pos\n\tl.accept(underscore + alphabet)\n\tl.acceptRun(underscore + alphabet + digits)\n\tendMarker := fmt.Sprintf(\"\\n%s\", l.input[labelPos:l.pos])\n\tif nowDoc {\n\t\tl.accept(\"'\")\n\t}\n\tl.accept(\"\\n\")\n\tfor !strings.HasPrefix(l.input[l.pos:], endMarker) {\n\t\tl.next()\n\t}\n\tl.pos += len(endMarker)\n\tl.emit(token.StringLiteral)\n\treturn lexPHP\n}\n<commit_msg>Adding lowercase e to scientific notation lexing<commit_after>package php\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"unicode\"\n\n\t\"github.com\/stephens2424\/php\/token\"\n)\n\nconst shortPHPBegin = \"<?\"\nconst longPHPBegin = \"<?php\"\nconst phpEnd = \"?>\"\n\nconst eof = -1\n\n\/\/ lexHTML consumes and emits an html t until it\n\/\/ finds a php begin\nfunc lexHTML(l *lexer) stateFn {\n\tfor {\n\t\tif strings.HasPrefix(l.input[l.pos:], shortPHPBegin) {\n\t\t\tif l.pos > l.start {\n\t\t\t\tl.emit(token.HTML)\n\t\t\t}\n\t\t\treturn lexPHPBegin\n\t\t}\n\t\tif l.next() == eof {\n\t\t\tbreak\n\t\t}\n\t}\n\tif l.pos > l.start {\n\t\tl.emit(token.HTML)\n\t}\n\tl.emit(token.EOF)\n\treturn nil\n}\n\nfunc lexPHPBegin(l *lexer) stateFn {\n\tif strings.HasPrefix(l.input[l.pos:], longPHPBegin) {\n\t\tl.pos += len(longPHPBegin)\n\t}\n\tif strings.HasPrefix(l.input[l.pos:], shortPHPBegin) {\n\t\tl.pos += len(shortPHPBegin)\n\t}\n\tl.emit(token.PHPBegin)\n\treturn lexPHP\n}\n\nfunc lexPHP(l *lexer) stateFn {\n\tl.skipSpace()\n\n\tif r := l.peek(); unicode.IsDigit(r) {\n\t\treturn lexNumberLiteral\n\t} else if r == '.' {\n\t\tl.next() \/\/ must advance because we only peeked before\n\t\tsecondR := l.peek()\n\t\tl.backup()\n\t\tif unicode.IsDigit(secondR) {\n\t\t\treturn lexNumberLiteral\n\t\t}\n\t}\n\n\tif strings.HasPrefix(l.input[l.pos:], \"<<<\") {\n\t\treturn lexDoc\n\t}\n\n\tif strings.HasPrefix(l.input[l.pos:], \"?>\") {\n\t\treturn lexPHPEnd\n\t}\n\n\tif strings.HasPrefix(l.input[l.pos:], \"#\") {\n\t\treturn lexLineComment\n\t}\n\n\tif strings.HasPrefix(l.input[l.pos:], \"\/\/\") {\n\t\treturn lexLineComment\n\t}\n\n\tif strings.HasPrefix(l.input[l.pos:], \"\/*\") {\n\t\treturn lexBlockComment\n\t}\n\n\tif l.peek() == eof {\n\t\tl.emit(token.EOF)\n\t\treturn nil\n\t}\n\n\tif l.peek() == '`' {\n\t\treturn lexShellCommand\n\t}\n\n\tif l.peek() == '\\'' {\n\t\treturn lexSingleQuotedStringLiteral\n\t}\n\n\tif l.peek() == '\"' {\n\t\treturn lexDoubleQuotedStringLiteral\n\t}\n\n\tfor _, tokenString := range token.TokenList {\n\t\tt := token.TokenMap[tokenString]\n\t\tpotentialToken := l.input[l.pos:]\n\t\tif len(potentialToken) > len(tokenString) {\n\t\t\tpotentialToken = potentialToken[:len(tokenString)]\n\t\t}\n\t\tif strings.HasPrefix(strings.ToLower(potentialToken), tokenString) {\n\t\t\tprev := l.previous()\n\t\t\tif isKeyword(t, tokenString) && prev == '$' {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tl.pos += len(tokenString)\n\t\t\tif isKeyword(t, tokenString) && l.accept(alphabet+underscore+digits) {\n\t\t\t\tl.backup() \/\/ to account for the character consumed by accept\n\t\t\t\tl.pos -= len(tokenString)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tl.emit(t)\n\t\t\treturn lexPHP\n\t\t}\n\t}\n\n\tl.acceptRun(alphabet + underscore + digits + \"\\\\\")\n\tl.emit(token.Identifier)\n\treturn lexPHP\n}\n\nfunc lexNumberLiteral(l *lexer) stateFn {\n\tif l.accept(\"0\") {\n\t\t\/\/ binary?\n\t\tif l.accept(\"b\") {\n\t\t\tl.acceptRun(\"01\")\n\t\t\tl.emit(token.NumberLiteral)\n\t\t\treturn lexPHP\n\t\t}\n\t\t\/\/ hexadecimal?\n\t\tif l.accept(\"xX\") {\n\t\t\tl.acceptRun(digits + \"abcdefABCDEF\")\n\t\t\tl.emit(token.NumberLiteral)\n\t\t\treturn lexPHP\n\t\t}\n\t}\n\t\/\/ is decimal?\n\tl.acceptRun(digits)\n\tif l.accept(\".\") {\n\t\tl.acceptRun(digits)\n\t}\n\n\tif l.accept(\"eE\") {\n\t\tl.acceptRun(digits)\n\t}\n\n\tl.emit(token.NumberLiteral)\n\treturn lexPHP\n}\n\nfunc lexShellCommand(l *lexer) stateFn {\n\tl.next()\n\tfor {\n\t\tswitch l.next() {\n\t\tcase '`':\n\t\t\tl.emit(token.ShellCommand)\n\t\t\treturn lexPHP\n\t\tcase eof:\n\t\t\tl.emit(token.ShellCommand)\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc lexSingleQuotedStringLiteral(l *lexer) stateFn {\n\tl.next()\n\tfor {\n\t\tswitch l.next() {\n\t\tcase '\\\\':\n\t\t\tl.next()\n\t\t\tcontinue\n\t\tcase '\\'':\n\t\t\tl.emit(token.StringLiteral)\n\t\t\treturn lexPHP\n\t\t}\n\t}\n}\n\nfunc lexDoubleQuotedStringLiteral(l *lexer) stateFn {\n\tl.next()\n\tfor {\n\t\tswitch l.next() {\n\t\tcase '\\\\':\n\t\t\tl.next()\n\t\t\tcontinue\n\t\tcase '\"':\n\t\t\tl.emit(token.StringLiteral)\n\t\t\treturn lexPHP\n\t\t}\n\t}\n}\n\nconst alphabet = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\nconst digits = \"0123456789\"\nconst underscore = \"_\"\n\nfunc lexIdentifier(l *lexer) stateFn {\n\tl.accept(\"$\")\n\tl.accept(underscore + alphabet)\n\tl.acceptRun(underscore + alphabet + digits)\n\tl.emit(token.VariableOperator)\n\treturn lexPHP\n}\n\n\/\/ lexPHPEnd lexes the end of a PHP section returning the context to HTML\nfunc lexPHPEnd(l *lexer) stateFn {\n\tl.pos += len(phpEnd)\n\tl.emit(token.PHPEnd)\n\treturn lexHTML\n}\n\nfunc lexLineComment(l *lexer) stateFn {\n\tlineLength := strings.Index(l.input[l.pos:], \"\\n\") + 1\n\tif lineLength == 0 {\n\t\t\/\/ this is the last line, so lex until the end\n\t\tlineLength = len(l.input[l.pos:])\n\t}\n\t\/\/ don't lex php end\n\tif phpEndLength := strings.Index(l.input[l.pos:l.pos+lineLength], phpEnd); phpEndLength >= 0 && phpEndLength < lineLength {\n\t\tlineLength = phpEndLength\n\t}\n\tl.pos += lineLength\n\tl.ignore()\n\treturn lexPHP\n}\n\nfunc lexBlockComment(l *lexer) stateFn {\n\tcommentLength := strings.Index(l.input[l.pos:], \"*\/\") + 2\n\tif commentLength == 1 {\n\t\t\/\/ the file ends before we find *\/\n\t\tcommentLength = len(l.input[l.pos:])\n\t}\n\tl.pos += commentLength\n\tl.ignore()\n\treturn lexPHP\n}\n\nfunc lexDoc(l *lexer) stateFn {\n\tvar nowDoc bool\n\tl.pos += len(\"<<<\")\n\tl.skipSpace()\n\tif strings.HasPrefix(l.input[l.pos:], \"'\") {\n\t\tnowDoc = true\n\t\tl.pos += len(\"'\")\n\t}\n\tlabelPos := l.pos\n\tl.accept(underscore + alphabet)\n\tl.acceptRun(underscore + alphabet + digits)\n\tendMarker := fmt.Sprintf(\"\\n%s\", l.input[labelPos:l.pos])\n\tif nowDoc {\n\t\tl.accept(\"'\")\n\t}\n\tl.accept(\"\\n\")\n\tfor !strings.HasPrefix(l.input[l.pos:], endMarker) {\n\t\tl.next()\n\t}\n\tl.pos += len(endMarker)\n\tl.emit(token.StringLiteral)\n\treturn lexPHP\n}\n<|endoftext|>"} {"text":"<commit_before>package CloudForest\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc ParseLibSVM(input io.Reader) *FeatureMatrix {\n\treader := bufio.NewReader(input)\n\n\tdata := make([]Feature, 0, 100)\n\tlookup := make(map[string]int, 0)\n\tlabels := make([]string, 0, 0)\n\n\ti := 0\n\tncases := 8100000\n\tallthedata := make([]uint8, 784*ncases, 784*ncases)\n\tfor {\n\n\t\tline, err := reader.ReadString('\\n')\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tlog.Print(\"Error:\", err)\n\t\t\treturn nil\n\t\t}\n\n\t\tvals := strings.Fields(line)\n\n\t\tif i == 0 {\n\t\t\tif strings.Contains(vals[0], \".\") {\n\t\t\t\t\/\/looks like a float...add dense float64 feature regression\n\t\t\t\tdata = append(data, &DenseNumFeature{\n\t\t\t\t\tmake([]float64, 0, 0),\n\t\t\t\t\tmake([]bool, 0, 0),\n\t\t\t\t\t\"0\"})\n\n\t\t\t} else {\n\t\t\t\t\/\/doesn't look like a float...add dense catagorical\n\t\t\t\tdata = append(data, &DenseCatFeature{\n\t\t\t\t\t&CatMap{make(map[string]int, 0),\n\t\t\t\t\t\tmake([]string, 0, 0)},\n\t\t\t\t\tmake([]int, 0, 0),\n\t\t\t\t\tmake([]bool, 0, 0),\n\t\t\t\t\t\"0\",\n\t\t\t\t\tfalse})\n\t\t\t}\n\t\t}\n\t\tdata[0].Append(vals[0])\n\n\t\tfor _, v := range vals[1:] {\n\t\t\tparts := strings.Split(v, \":\")\n\t\t\txi, err := strconv.Atoi(parts[0])\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(\"Atoi error: \", err, \" Line \", i, \" Parsing: \", v)\n\t\t\t}\n\t\t\t\/\/pad out the data to include this feature\n\t\t\tfor xi >= len(data) {\n\t\t\t\t\/\/ data = append(data, &SparseNumFeature{\n\t\t\t\t\/\/ \tmake(map[int]float64),\n\t\t\t\t\/\/ \tmake(map[int]bool),\n\t\t\t\t\/\/ \tfmt.Sprintf(\"%v\", len(data))})\n\n\t\t\t\tdata = append(data, &DenseUint8Feature{\n\t\t\t\t\tallthedata[(len(data)-1)*ncases : len(data)*ncases],\n\t\t\t\t\tmake(map[int]bool),\n\t\t\t\t\tfmt.Sprintf(\"%v\", len(data))})\n\t\t\t\t\/*\n\t\t\t\t\tdata = append(data, &SparseUint8Feature{\n\t\t\t\t\t\tmake(map[int]uint8),\n\t\t\t\t\t\tmake(map[int]bool),\n\t\t\t\t\t\tfmt.Sprintf(\"%v\", len(data))})\n\t\t\t\t*\/\n\n\t\t\t}\n\t\t\tdata[xi].PutStr(i, parts[1])\n\n\t\t}\n\t\tlabel := fmt.Sprintf(\"%v\", i)\n\t\tlookup[label] = i\n\t\tlabels = append(labels, label)\n\n\t\ti++\n\n\t}\n\n\tfm := &FeatureMatrix{data, lookup, labels}\n\n\treturn fm\n\n}\n<commit_msg>fix to libsvm parseing<commit_after>package CloudForest\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc ParseLibSVM(input io.Reader) *FeatureMatrix {\n\treader := bufio.NewReader(input)\n\n\tdata := make([]Feature, 0, 100)\n\tlookup := make(map[string]int, 0)\n\tlabels := make([]string, 0, 0)\n\n\ti := 0\n\t\/\/ncases := 8100000\n\t\/\/allthedata := make([]uint8, 784*ncases, 784*ncases)\n\tfor {\n\n\t\tline, err := reader.ReadString('\\n')\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tlog.Print(\"Error:\", err)\n\t\t\treturn nil\n\t\t}\n\n\t\tvals := strings.Fields(line)\n\n\t\tif i == 0 {\n\t\t\tif strings.Contains(vals[0], \".\") {\n\t\t\t\t\/\/looks like a float...add dense float64 feature regression\n\t\t\t\tdata = append(data, &DenseNumFeature{\n\t\t\t\t\tmake([]float64, 0, 0),\n\t\t\t\t\tmake([]bool, 0, 0),\n\t\t\t\t\t\"0\"})\n\n\t\t\t} else {\n\t\t\t\t\/\/doesn't look like a float...add dense catagorical\n\t\t\t\tdata = append(data, &DenseCatFeature{\n\t\t\t\t\t&CatMap{make(map[string]int, 0),\n\t\t\t\t\t\tmake([]string, 0, 0)},\n\t\t\t\t\tmake([]int, 0, 0),\n\t\t\t\t\tmake([]bool, 0, 0),\n\t\t\t\t\t\"0\",\n\t\t\t\t\tfalse})\n\t\t\t}\n\t\t}\n\t\tdata[0].Append(vals[0])\n\n\t\tfor _, v := range vals[1:] {\n\t\t\tparts := strings.Split(v, \":\")\n\t\t\txi, err := strconv.Atoi(parts[0])\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(\"Atoi error: \", err, \" Line \", i, \" Parsing: \", v)\n\t\t\t}\n\t\t\t\/\/pad out the data to include this feature\n\t\t\tfor xi >= len(data) {\n\t\t\t\tdata = append(data, &SparseNumFeature{\n\t\t\t\t\tmake(map[int]float64),\n\t\t\t\t\tmake(map[int]bool),\n\t\t\t\t\tfmt.Sprintf(\"%v\", len(data))})\n\n\t\t\t\t\/*\n\t\t\t\t\tdata = append(data, &DenseUint8Feature{\n\t\t\t\t\t\tallthedata[(len(data)-1)*ncases : len(data)*ncases],\n\t\t\t\t\t\tmake(map[int]bool),\n\t\t\t\t\t\tfmt.Sprintf(\"%v\", len(data))})\n\t\t\t\t\t\/*\n\t\t\t\t\t\tdata = append(data, &SparseUint8Feature{\n\t\t\t\t\t\t\tmake(map[int]uint8),\n\t\t\t\t\t\t\tmake(map[int]bool),\n\t\t\t\t\t\t\tfmt.Sprintf(\"%v\", len(data))})\n\t\t\t\t*\/\n\n\t\t\t}\n\t\t\tdata[xi].PutStr(i, parts[1])\n\n\t\t}\n\t\tlabel := fmt.Sprintf(\"%v\", i)\n\t\tlookup[label] = i\n\t\tlabels = append(labels, label)\n\n\t\ti++\n\n\t}\n\n\tfm := &FeatureMatrix{data, lookup, labels}\n\n\treturn fm\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"compress\/zlib\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\ntype PrtGenerator struct {\n\tenclosedsChan chan *EnclosedChunkJob\n\tcompleteChan chan bool\n\n\toutFile *os.File\n\tw *bufio.Writer\n\tzw io.WriteCloser\n\n\tparticleCount int64\n\ttotal int\n\tboundary *BoundaryLocator\n}\n\nfunc (o *PrtGenerator) Start(outFilename string, total int, maxProcs int, boundary *BoundaryLocator) {\n\to.enclosedsChan = make(chan *EnclosedChunkJob, maxProcs*2)\n\to.completeChan = make(chan bool)\n\to.total = total\n\to.boundary = boundary\n\n\tmaxProcs = 1\n\tfor i := 0; i < maxProcs; i++ {\n\t\tgo o.chunkProcessor()\n\t}\n\n\tvar openErr os.Error\n\n\to.outFile, openErr = os.Create(outFilename)\n\tif openErr != nil {\n\t\tfmt.Fprintln(os.Stderr, openErr) \/\/ TODO: return openErr\n\t\treturn\n\t}\n\n\to.w = bufio.NewWriter(o.outFile)\n\tWriteHeader(o.w, -1, []ChannelDefinition{{\"Position\", 4, 3, 0}, {\"BlockID\", 1, 1, 12}})\n\n\tvar zErr os.Error\n\to.zw, zErr = zlib.NewWriterLevel(o.w, zlib.NoCompression)\n\tif zErr != nil {\n\t\tfmt.Fprintln(os.Stderr, zErr) \/\/ TODO: return zErr\n\t\treturn\n\t}\n}\n\nfunc (o *PrtGenerator) chunkProcessor() {\n\tvar chunkCount = 0\n\tfor {\n\t\tvar job = <-o.enclosedsChan\n\n\t\tvar e = job.enclosed\n\n\t\tfor i := 0; i < len(e.blocks); i += 128 {\n\t\t\tvar x, z = (i \/ 128) \/ 16, (i \/ 128) % 16\n\n\t\t\tvar column = BlockColumn(e.blocks[i : i+128])\n\t\t\tfor y, blockId := range column {\n\t\t\t\tif y < yMin {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tswitch {\n\t\t\t\tcase o.boundary.IsBoundary(blockId, e.Get(x, y-1, z)):\n\t\t\t\t\tfallthrough\n\t\t\t\tcase o.boundary.IsBoundary(blockId, e.Get(x, y+1, z)):\n\t\t\t\t\tfallthrough\n\t\t\t\tcase o.boundary.IsBoundary(blockId, e.Get(x-1, y, z)):\n\t\t\t\t\tfallthrough\n\t\t\t\tcase o.boundary.IsBoundary(blockId, e.Get(x+1, y, z)):\n\t\t\t\t\tfallthrough\n\t\t\t\tcase o.boundary.IsBoundary(blockId, e.Get(x, y, z-1)):\n\t\t\t\t\tfallthrough\n\t\t\t\tcase o.boundary.IsBoundary(blockId, e.Get(x, y, z+1)):\n\t\t\t\t\to.particleCount++\n\t\t\t\t\tvar (\n\t\t\t\t\t\txa = x + e.xPos*16\n\t\t\t\t\t\tya = y - 64\n\t\t\t\t\t\tza = -(z + e.zPos*16)\n\t\t\t\t\t)\n\t\t\t\t\tbinary.Write(o.zw, binary.LittleEndian, float32(xa*2))\n\t\t\t\t\tbinary.Write(o.zw, binary.LittleEndian, float32(za*2))\n\t\t\t\t\tbinary.Write(o.zw, binary.LittleEndian, float32(ya*2))\n\t\t\t\t\tbinary.Write(o.zw, binary.LittleEndian, int32(blockId))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tchunkCount++\n\t\tfmt.Printf(\"%4v\/%-4v (%3v,%3v) Particles: %d\\n\", chunkCount, o.total, job.enclosed.xPos, job.enclosed.zPos, o.particleCount)\n\n\t\tif job.last {\n\t\t\to.completeChan <- true\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (o *PrtGenerator) Close() {\n\to.zw.Close()\n\to.w.Flush()\n\tUpdateParticleCount(o.outFile, o.particleCount)\n\to.outFile.Close()\n}\n\nfunc (o *PrtGenerator) GetEnclosedJobsChan() chan *EnclosedChunkJob {\n\treturn o.enclosedsChan\n}\n\nfunc (o *PrtGenerator) GetCompleteChan() chan bool {\n\treturn o.completeChan\n}\n\ntype ChannelDefinition struct {\n\tName string \/\/ max of 31 characters and must meet the regex [a-zA-Z_][0-9a-zA-Z_]*\n\tDataType, Arity, Offset int32\n}\n\n\/\/ http:\/\/software.primefocusworld.com\/software\/support\/krakatoa\/prt_file_format.php\nfunc WriteHeader(w io.Writer, particleCount int64, channels []ChannelDefinition) {\n\t\/\/ Header (56 bytes)\n\tvar magic = []byte{192, 'P', 'R', 'T', '\\r', '\\n', 26, '\\n'}\n\tw.Write(magic)\n\n\tvar headerLength = uint32(56)\n\tbinary.Write(w, binary.LittleEndian, headerLength)\n\n\tvar signature = make([]byte, 32)\n\tcopy(signature, []byte(\"Extensible Particle Format\"))\n\tw.Write(signature)\n\n\tvar version = uint32(1)\n\tbinary.Write(w, binary.LittleEndian, version)\n\n\tbinary.Write(w, binary.LittleEndian, particleCount)\n\n\t\/\/ Reserved bytes (4 bytes)\n\tvar reserved = int32(4)\n\tbinary.Write(w, binary.LittleEndian, reserved)\n\n\t\/\/ Channel definition header (8 bytes)\n\tbinary.Write(w, binary.LittleEndian, int32(len(channels)))\n\n\tvar channelDefinitionSize = int32(44)\n\tbinary.Write(w, binary.LittleEndian, channelDefinitionSize)\n\n\tfor _, channel := range channels {\n\t\tvar nameBytes = make([]byte, 32)\n\t\tcopy(nameBytes, []byte(channel.Name))\n\t\tw.Write(nameBytes)\n\n\t\tbinary.Write(w, binary.LittleEndian, channel.DataType)\n\t\tbinary.Write(w, binary.LittleEndian, channel.Arity)\n\t\tbinary.Write(w, binary.LittleEndian, channel.Offset)\n\t}\n}\n\nfunc UpdateParticleCount(file *os.File, particleCount int64) os.Error {\n\tvar storedOffset, err = file.Seek(0, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = file.Seek(0x30, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = binary.Write(file, binary.LittleEndian, particleCount)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = file.Seek(storedOffset, 0)\n\treturn err\n}\n<commit_msg>Remove the 2x scaling for prt<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"compress\/zlib\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\ntype PrtGenerator struct {\n\tenclosedsChan chan *EnclosedChunkJob\n\tcompleteChan chan bool\n\n\toutFile *os.File\n\tw *bufio.Writer\n\tzw io.WriteCloser\n\n\tparticleCount int64\n\ttotal int\n\tboundary *BoundaryLocator\n}\n\nfunc (o *PrtGenerator) Start(outFilename string, total int, maxProcs int, boundary *BoundaryLocator) {\n\to.enclosedsChan = make(chan *EnclosedChunkJob, maxProcs*2)\n\to.completeChan = make(chan bool)\n\to.total = total\n\to.boundary = boundary\n\n\tmaxProcs = 1\n\tfor i := 0; i < maxProcs; i++ {\n\t\tgo o.chunkProcessor()\n\t}\n\n\tvar openErr os.Error\n\n\to.outFile, openErr = os.Create(outFilename)\n\tif openErr != nil {\n\t\tfmt.Fprintln(os.Stderr, openErr) \/\/ TODO: return openErr\n\t\treturn\n\t}\n\n\to.w = bufio.NewWriter(o.outFile)\n\tWriteHeader(o.w, -1, []ChannelDefinition{{\"Position\", 4, 3, 0}, {\"BlockID\", 1, 1, 12}})\n\n\tvar zErr os.Error\n\to.zw, zErr = zlib.NewWriterLevel(o.w, zlib.NoCompression)\n\tif zErr != nil {\n\t\tfmt.Fprintln(os.Stderr, zErr) \/\/ TODO: return zErr\n\t\treturn\n\t}\n}\n\nfunc (o *PrtGenerator) chunkProcessor() {\n\tvar chunkCount = 0\n\tfor {\n\t\tvar job = <-o.enclosedsChan\n\n\t\tvar e = job.enclosed\n\n\t\tfor i := 0; i < len(e.blocks); i += 128 {\n\t\t\tvar x, z = (i \/ 128) \/ 16, (i \/ 128) % 16\n\n\t\t\tvar column = BlockColumn(e.blocks[i : i+128])\n\t\t\tfor y, blockId := range column {\n\t\t\t\tif y < yMin {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tswitch {\n\t\t\t\tcase o.boundary.IsBoundary(blockId, e.Get(x, y-1, z)):\n\t\t\t\t\tfallthrough\n\t\t\t\tcase o.boundary.IsBoundary(blockId, e.Get(x, y+1, z)):\n\t\t\t\t\tfallthrough\n\t\t\t\tcase o.boundary.IsBoundary(blockId, e.Get(x-1, y, z)):\n\t\t\t\t\tfallthrough\n\t\t\t\tcase o.boundary.IsBoundary(blockId, e.Get(x+1, y, z)):\n\t\t\t\t\tfallthrough\n\t\t\t\tcase o.boundary.IsBoundary(blockId, e.Get(x, y, z-1)):\n\t\t\t\t\tfallthrough\n\t\t\t\tcase o.boundary.IsBoundary(blockId, e.Get(x, y, z+1)):\n\t\t\t\t\to.particleCount++\n\t\t\t\t\tvar (\n\t\t\t\t\t\txa = x + e.xPos*16\n\t\t\t\t\t\tya = y - 64\n\t\t\t\t\t\tza = -(z + e.zPos*16)\n\t\t\t\t\t)\n\t\t\t\t\tbinary.Write(o.zw, binary.LittleEndian, float32(xa))\n\t\t\t\t\tbinary.Write(o.zw, binary.LittleEndian, float32(za))\n\t\t\t\t\tbinary.Write(o.zw, binary.LittleEndian, float32(ya))\n\t\t\t\t\tbinary.Write(o.zw, binary.LittleEndian, int32(blockId))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tchunkCount++\n\t\tfmt.Printf(\"%4v\/%-4v (%3v,%3v) Particles: %d\\n\", chunkCount, o.total, job.enclosed.xPos, job.enclosed.zPos, o.particleCount)\n\n\t\tif job.last {\n\t\t\to.completeChan <- true\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (o *PrtGenerator) Close() {\n\to.zw.Close()\n\to.w.Flush()\n\tUpdateParticleCount(o.outFile, o.particleCount)\n\to.outFile.Close()\n}\n\nfunc (o *PrtGenerator) GetEnclosedJobsChan() chan *EnclosedChunkJob {\n\treturn o.enclosedsChan\n}\n\nfunc (o *PrtGenerator) GetCompleteChan() chan bool {\n\treturn o.completeChan\n}\n\ntype ChannelDefinition struct {\n\tName string \/\/ max of 31 characters and must meet the regex [a-zA-Z_][0-9a-zA-Z_]*\n\tDataType, Arity, Offset int32\n}\n\n\/\/ http:\/\/software.primefocusworld.com\/software\/support\/krakatoa\/prt_file_format.php\nfunc WriteHeader(w io.Writer, particleCount int64, channels []ChannelDefinition) {\n\t\/\/ Header (56 bytes)\n\tvar magic = []byte{192, 'P', 'R', 'T', '\\r', '\\n', 26, '\\n'}\n\tw.Write(magic)\n\n\tvar headerLength = uint32(56)\n\tbinary.Write(w, binary.LittleEndian, headerLength)\n\n\tvar signature = make([]byte, 32)\n\tcopy(signature, []byte(\"Extensible Particle Format\"))\n\tw.Write(signature)\n\n\tvar version = uint32(1)\n\tbinary.Write(w, binary.LittleEndian, version)\n\n\tbinary.Write(w, binary.LittleEndian, particleCount)\n\n\t\/\/ Reserved bytes (4 bytes)\n\tvar reserved = int32(4)\n\tbinary.Write(w, binary.LittleEndian, reserved)\n\n\t\/\/ Channel definition header (8 bytes)\n\tbinary.Write(w, binary.LittleEndian, int32(len(channels)))\n\n\tvar channelDefinitionSize = int32(44)\n\tbinary.Write(w, binary.LittleEndian, channelDefinitionSize)\n\n\tfor _, channel := range channels {\n\t\tvar nameBytes = make([]byte, 32)\n\t\tcopy(nameBytes, []byte(channel.Name))\n\t\tw.Write(nameBytes)\n\n\t\tbinary.Write(w, binary.LittleEndian, channel.DataType)\n\t\tbinary.Write(w, binary.LittleEndian, channel.Arity)\n\t\tbinary.Write(w, binary.LittleEndian, channel.Offset)\n\t}\n}\n\nfunc UpdateParticleCount(file *os.File, particleCount int64) os.Error {\n\tvar storedOffset, err = file.Seek(0, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = file.Seek(0x30, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = binary.Write(file, binary.LittleEndian, particleCount)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = file.Seek(storedOffset, 0)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !integration\n\/\/ Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"). You may\n\/\/ not use this file except in compliance with the License. A copy of the\n\/\/ License is located at\n\/\/\n\/\/\thttp:\/\/aws.amazon.com\/apache2.0\/\n\/\/\n\/\/ or in the \"license\" file accompanying this file. This file is distributed\n\/\/ on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n\/\/ express or implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\npackage engine\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/api\"\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/engine\/dockerstate\/mocks\"\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/engine\/testdata\"\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/statemanager\"\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/utils\/ttime\/mocks\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\n\t\"github.com\/golang\/mock\/gomock\"\n)\n\nfunc TestCleanupTask(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tmockTime := mock_ttime.NewMockTime(ctrl)\n\tmockState := mock_dockerstate.NewMockTaskEngineState(ctrl)\n\tmockClient := NewMockDockerClient(ctrl)\n\tmockImageManager := NewMockImageManager(ctrl)\n\tdefer ctrl.Finish()\n\n\ttaskEngine := &DockerTaskEngine{\n\t\tsaver: statemanager.NewNoopStateManager(),\n\t\tstate: mockState,\n\t\tclient: mockClient,\n\t\timageManager: mockImageManager,\n\t}\n\tmTask := &managedTask{\n\t\tTask: testdata.LoadTask(\"sleep5\"),\n\t\t_time: mockTime,\n\t\tengine: taskEngine,\n\t\tacsMessages: make(chan acsTransition),\n\t\tdockerMessages: make(chan dockerContainerChange),\n\t}\n\tmTask.SetKnownStatus(api.TaskStopped)\n\tmTask.SetSentStatus(api.TaskStopped)\n\tcontainer := mTask.Containers[0]\n\tdockerContainer := &api.DockerContainer{\n\t\tDockerName: \"dockerContainer\",\n\t}\n\n\t\/\/ Expectations for triggering cleanup\n\tnow := mTask.GetKnownStatusTime()\n\ttaskStoppedDuration := 1 * time.Minute\n\tmockTime.EXPECT().Now().Return(now).AnyTimes()\n\tcleanupTimeTrigger := make(chan time.Time)\n\tmockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger)\n\tgo func() {\n\t\tcleanupTimeTrigger <- now\n\t}()\n\n\t\/\/ Expectations to verify that the task gets removed\n\tmockState.EXPECT().ContainerMapByArn(mTask.Arn).Return(map[string]*api.DockerContainer{container.Name: dockerContainer}, true)\n\tmockClient.EXPECT().RemoveContainer(dockerContainer.DockerName, gomock.Any()).Return(nil)\n\tmockImageManager.EXPECT().RemoveContainerReferenceFromImageState(container).Return(nil)\n\tmockState.EXPECT().RemoveTask(mTask.Task)\n\tmTask.cleanupTask(taskStoppedDuration)\n}\n\nfunc TestCleanupTaskWaitsForStoppedSent(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tmockTime := mock_ttime.NewMockTime(ctrl)\n\tmockState := mock_dockerstate.NewMockTaskEngineState(ctrl)\n\tmockClient := NewMockDockerClient(ctrl)\n\tmockImageManager := NewMockImageManager(ctrl)\n\tdefer ctrl.Finish()\n\n\ttaskEngine := &DockerTaskEngine{\n\t\tsaver: statemanager.NewNoopStateManager(),\n\t\tstate: mockState,\n\t\tclient: mockClient,\n\t\timageManager: mockImageManager,\n\t}\n\tmTask := &managedTask{\n\t\tTask: testdata.LoadTask(\"sleep5\"),\n\t\t_time: mockTime,\n\t\tengine: taskEngine,\n\t\tacsMessages: make(chan acsTransition),\n\t\tdockerMessages: make(chan dockerContainerChange),\n\t}\n\tmTask.SetKnownStatus(api.TaskStopped)\n\tmTask.SetSentStatus(api.TaskRunning)\n\tcontainer := mTask.Containers[0]\n\tdockerContainer := &api.DockerContainer{\n\t\tDockerName: \"dockerContainer\",\n\t}\n\n\t\/\/ Expectations for triggering cleanup\n\tnow := mTask.GetKnownStatusTime()\n\ttaskStoppedDuration := 1 * time.Minute\n\tmockTime.EXPECT().Now().Return(now).AnyTimes()\n\tcleanupTimeTrigger := make(chan time.Time)\n\tmockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger)\n\tgo func() {\n\t\tcleanupTimeTrigger <- now\n\t}()\n\ttimesCalled := 0\n\tcallsExpected := 3\n\tmockTime.EXPECT().Sleep(gomock.Any()).AnyTimes().Do(func(_ interface{}) {\n\t\ttimesCalled++\n\t\tif timesCalled == callsExpected {\n\t\t\tmTask.SetSentStatus(api.TaskStopped)\n\t\t} else if timesCalled > callsExpected {\n\t\t\tt.Errorf(\"Sleep called too many times, called %d but expected %d\", timesCalled, callsExpected)\n\t\t}\n\t})\n\tassert.Equal(t, api.TaskRunning, mTask.GetSentStatus())\n\n\t\/\/ Expectations to verify that the task gets removed\n\tmockState.EXPECT().ContainerMapByArn(mTask.Arn).Return(map[string]*api.DockerContainer{container.Name: dockerContainer}, true)\n\tmockClient.EXPECT().RemoveContainer(dockerContainer.DockerName, gomock.Any()).Return(nil)\n\tmockImageManager.EXPECT().RemoveContainerReferenceFromImageState(container).Return(nil)\n\tmockState.EXPECT().RemoveTask(mTask.Task)\n\tmTask.cleanupTask(taskStoppedDuration)\n\tassert.Equal(t, api.TaskStopped, mTask.GetSentStatus())\n}\n\nfunc TestCleanupTaskGivesUpIfWaitingTooLong(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tmockTime := mock_ttime.NewMockTime(ctrl)\n\tmockState := mock_dockerstate.NewMockTaskEngineState(ctrl)\n\tmockClient := NewMockDockerClient(ctrl)\n\tmockImageManager := NewMockImageManager(ctrl)\n\tdefer ctrl.Finish()\n\n\ttaskEngine := &DockerTaskEngine{\n\t\tsaver: statemanager.NewNoopStateManager(),\n\t\tstate: mockState,\n\t\tclient: mockClient,\n\t\timageManager: mockImageManager,\n\t}\n\tmTask := &managedTask{\n\t\tTask: testdata.LoadTask(\"sleep5\"),\n\t\t_time: mockTime,\n\t\tengine: taskEngine,\n\t\tacsMessages: make(chan acsTransition),\n\t\tdockerMessages: make(chan dockerContainerChange),\n\t}\n\tmTask.SetKnownStatus(api.TaskStopped)\n\tmTask.SetSentStatus(api.TaskRunning)\n\n\t\/\/ Expectations for triggering cleanup\n\tnow := mTask.GetKnownStatusTime()\n\ttaskStoppedDuration := 1 * time.Minute\n\tmockTime.EXPECT().Now().Return(now).AnyTimes()\n\tcleanupTimeTrigger := make(chan time.Time)\n\tmockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger)\n\tgo func() {\n\t\tcleanupTimeTrigger <- now\n\t}()\n\t_maxStoppedWaitTimes = 10\n\tdefer func() {\n\t\t\/\/ reset\n\t\t_maxStoppedWaitTimes = int(maxStoppedWaitTimes)\n\t}()\n\tmockTime.EXPECT().Sleep(gomock.Any()).Times(_maxStoppedWaitTimes)\n\tassert.Equal(t, api.TaskRunning, mTask.GetSentStatus())\n\n\t\/\/ No cleanup expected\n\tmTask.cleanupTask(taskStoppedDuration)\n\tassert.Equal(t, api.TaskRunning, mTask.GetSentStatus())\n}\n<commit_msg>engine: add tests for `managedTask.containerNextState`<commit_after>\/\/ +build !integration\n\/\/ Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"). You may\n\/\/ not use this file except in compliance with the License. A copy of the\n\/\/ License is located at\n\/\/\n\/\/\thttp:\/\/aws.amazon.com\/apache2.0\/\n\/\/\n\/\/ or in the \"license\" file accompanying this file. This file is distributed\n\/\/ on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n\/\/ express or implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\npackage engine\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/api\"\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/engine\/dockerstate\/mocks\"\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/engine\/testdata\"\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/statemanager\"\n\t\"github.com\/aws\/amazon-ecs-agent\/agent\/utils\/ttime\/mocks\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\n\t\"github.com\/golang\/mock\/gomock\"\n)\n\nfunc TestContainerNextStateFromNone(t *testing.T) {\n\t\/\/ NONE -> RUNNING transition is allowed and actionable, when desired is Running\n\t\/\/ The exptected next status is Pulled\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerStatusNone, api.ContainerRunning,\n\t\tapi.ContainerPulled, true, true)\n\n\t\/\/ NONE -> NONE transition is not be allowed and is not actionable,\n\t\/\/ when desired is Running\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerStatusNone, api.ContainerStatusNone,\n\t\tapi.ContainerStatusNone, false, false)\n\n\t\/\/ NONE -> STOPPED transition will result in STOPPED and is allowed, but not\n\t\/\/ actionable, when desired is STOPPED\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerStatusNone, api.ContainerStopped,\n\t\tapi.ContainerStopped, false, true)\n}\n\nfunc TestContainerNextStateFromPulled(t *testing.T) {\n\t\/\/ PULLED -> RUNNING transition is allowed and actionable, when desired is Running\n\t\/\/ The exptected next status is Created\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerPulled, api.ContainerRunning,\n\t\tapi.ContainerCreated, true, true)\n\n\t\/\/ PULLED -> PULLED transition is not allowed and not actionable,\n\t\/\/ when desired is Running\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerPulled, api.ContainerPulled,\n\t\tapi.ContainerStatusNone, false, false)\n\n\t\/\/ PULLED -> NONE transition is not allowed and not actionable,\n\t\/\/ when desired is Running\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerPulled, api.ContainerStatusNone,\n\t\tapi.ContainerStatusNone, false, false)\n\n\t\/\/ PULLED -> STOPPED transition will result in STOPPED and is allowed, but not\n\t\/\/ actionable, when desired is STOPPED\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerPulled, api.ContainerStopped,\n\t\tapi.ContainerStopped, false, true)\n}\n\nfunc TestContainerNextStateFromCreated(t *testing.T) {\n\t\/\/ CREATED -> RUNNING transition is allowed and actionable, when desired is Running\n\t\/\/ The exptected next status is Running\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerCreated, api.ContainerRunning,\n\t\tapi.ContainerRunning, true, true)\n\n\t\/\/ CREATED -> CREATED transition is not allowed and not actionable,\n\t\/\/ when desired is Running\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerCreated, api.ContainerCreated,\n\t\tapi.ContainerStatusNone, false, false)\n\n\t\/\/ CREATED -> NONE transition is not allowed and not actionable,\n\t\/\/ when desired is Running\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerCreated, api.ContainerStatusNone,\n\t\tapi.ContainerStatusNone, false, false)\n\n\t\/\/ CREATED -> PULLED transition is not allowed and not actionable,\n\t\/\/ when desired is Running\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerCreated, api.ContainerPulled,\n\t\tapi.ContainerStatusNone, false, false)\n\n\t\/\/ CREATED -> STOPPED transition will result in STOPPED and is allowed, but not\n\t\/\/ actionable, when desired is STOPPED\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerCreated, api.ContainerStopped,\n\t\tapi.ContainerStopped, false, true)\n}\n\nfunc TestContainerNextStateFromRunning(t *testing.T) {\n\t\/\/ RUNNING -> STOPPED transition is allowed and actionable, when desired is Running\n\t\/\/ The exptected next status is STOPPED\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerRunning, api.ContainerStopped,\n\t\tapi.ContainerStopped, true, true)\n\n\t\/\/ RUNNING -> RUNNING transition is not allowed and not actionable,\n\t\/\/ when desired is Running\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerRunning, api.ContainerRunning,\n\t\tapi.ContainerStatusNone, false, false)\n\n\t\/\/ RUNNING -> NONE transition is not allowed and not actionable,\n\t\/\/ when desired is Running\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerRunning, api.ContainerStatusNone,\n\t\tapi.ContainerStatusNone, false, false)\n\n\t\/\/ RUNNING -> PULLED transition is not allowed and not actionable,\n\t\/\/ when desired is Running\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerRunning, api.ContainerPulled,\n\t\tapi.ContainerStatusNone, false, false)\n\n\t\/\/ RUNNING -> CREATED transition is not allowed and not actionable,\n\t\/\/ when desired is Running\n\ttestContainerNextStateAssertions(t,\n\t\tapi.ContainerRunning, api.ContainerCreated,\n\t\tapi.ContainerStatusNone, false, false)\n}\n\nfunc testContainerNextStateAssertions(t *testing.T,\n\tcontainerCurrentStatus api.ContainerStatus,\n\tcontainerDesiredStatus api.ContainerStatus,\n\texpectedContainerStatus api.ContainerStatus,\n\texpectedTransitionActionable bool,\n\texpectedTransitionPossible bool) {\n\tcontainer := &api.Container{\n\t\tDesiredStatusUnsafe: containerDesiredStatus,\n\t\tKnownStatusUnsafe: containerCurrentStatus,\n\t}\n\ttask := &managedTask{\n\t\tTask: &api.Task{\n\t\t\tContainers: []*api.Container{\n\t\t\t\tcontainer,\n\t\t\t},\n\t\t\tDesiredStatusUnsafe: api.TaskRunning,\n\t\t},\n\t}\n\tnextStatus, actionRequired, possible := task.containerNextState(container)\n\tassert.Equal(t, nextStatus, expectedContainerStatus,\n\t\t\"Retrieved next state [%s] != Expected next state [%s]\",\n\t\tnextStatus.String(), expectedContainerStatus.String())\n\tassert.Equal(t, actionRequired, expectedTransitionActionable)\n\tassert.Equal(t, possible, expectedTransitionPossible)\n}\n\nfunc TestCleanupTask(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tmockTime := mock_ttime.NewMockTime(ctrl)\n\tmockState := mock_dockerstate.NewMockTaskEngineState(ctrl)\n\tmockClient := NewMockDockerClient(ctrl)\n\tmockImageManager := NewMockImageManager(ctrl)\n\tdefer ctrl.Finish()\n\n\ttaskEngine := &DockerTaskEngine{\n\t\tsaver: statemanager.NewNoopStateManager(),\n\t\tstate: mockState,\n\t\tclient: mockClient,\n\t\timageManager: mockImageManager,\n\t}\n\tmTask := &managedTask{\n\t\tTask: testdata.LoadTask(\"sleep5\"),\n\t\t_time: mockTime,\n\t\tengine: taskEngine,\n\t\tacsMessages: make(chan acsTransition),\n\t\tdockerMessages: make(chan dockerContainerChange),\n\t}\n\tmTask.SetKnownStatus(api.TaskStopped)\n\tmTask.SetSentStatus(api.TaskStopped)\n\tcontainer := mTask.Containers[0]\n\tdockerContainer := &api.DockerContainer{\n\t\tDockerName: \"dockerContainer\",\n\t}\n\n\t\/\/ Expectations for triggering cleanup\n\tnow := mTask.GetKnownStatusTime()\n\ttaskStoppedDuration := 1 * time.Minute\n\tmockTime.EXPECT().Now().Return(now).AnyTimes()\n\tcleanupTimeTrigger := make(chan time.Time)\n\tmockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger)\n\tgo func() {\n\t\tcleanupTimeTrigger <- now\n\t}()\n\n\t\/\/ Expectations to verify that the task gets removed\n\tmockState.EXPECT().ContainerMapByArn(mTask.Arn).Return(map[string]*api.DockerContainer{container.Name: dockerContainer}, true)\n\tmockClient.EXPECT().RemoveContainer(dockerContainer.DockerName, gomock.Any()).Return(nil)\n\tmockImageManager.EXPECT().RemoveContainerReferenceFromImageState(container).Return(nil)\n\tmockState.EXPECT().RemoveTask(mTask.Task)\n\tmTask.cleanupTask(taskStoppedDuration)\n}\n\nfunc TestCleanupTaskWaitsForStoppedSent(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tmockTime := mock_ttime.NewMockTime(ctrl)\n\tmockState := mock_dockerstate.NewMockTaskEngineState(ctrl)\n\tmockClient := NewMockDockerClient(ctrl)\n\tmockImageManager := NewMockImageManager(ctrl)\n\tdefer ctrl.Finish()\n\n\ttaskEngine := &DockerTaskEngine{\n\t\tsaver: statemanager.NewNoopStateManager(),\n\t\tstate: mockState,\n\t\tclient: mockClient,\n\t\timageManager: mockImageManager,\n\t}\n\tmTask := &managedTask{\n\t\tTask: testdata.LoadTask(\"sleep5\"),\n\t\t_time: mockTime,\n\t\tengine: taskEngine,\n\t\tacsMessages: make(chan acsTransition),\n\t\tdockerMessages: make(chan dockerContainerChange),\n\t}\n\tmTask.SetKnownStatus(api.TaskStopped)\n\tmTask.SetSentStatus(api.TaskRunning)\n\tcontainer := mTask.Containers[0]\n\tdockerContainer := &api.DockerContainer{\n\t\tDockerName: \"dockerContainer\",\n\t}\n\n\t\/\/ Expectations for triggering cleanup\n\tnow := mTask.GetKnownStatusTime()\n\ttaskStoppedDuration := 1 * time.Minute\n\tmockTime.EXPECT().Now().Return(now).AnyTimes()\n\tcleanupTimeTrigger := make(chan time.Time)\n\tmockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger)\n\tgo func() {\n\t\tcleanupTimeTrigger <- now\n\t}()\n\ttimesCalled := 0\n\tcallsExpected := 3\n\tmockTime.EXPECT().Sleep(gomock.Any()).AnyTimes().Do(func(_ interface{}) {\n\t\ttimesCalled++\n\t\tif timesCalled == callsExpected {\n\t\t\tmTask.SetSentStatus(api.TaskStopped)\n\t\t} else if timesCalled > callsExpected {\n\t\t\tt.Errorf(\"Sleep called too many times, called %d but expected %d\", timesCalled, callsExpected)\n\t\t}\n\t})\n\tassert.Equal(t, api.TaskRunning, mTask.GetSentStatus())\n\n\t\/\/ Expectations to verify that the task gets removed\n\tmockState.EXPECT().ContainerMapByArn(mTask.Arn).Return(map[string]*api.DockerContainer{container.Name: dockerContainer}, true)\n\tmockClient.EXPECT().RemoveContainer(dockerContainer.DockerName, gomock.Any()).Return(nil)\n\tmockImageManager.EXPECT().RemoveContainerReferenceFromImageState(container).Return(nil)\n\tmockState.EXPECT().RemoveTask(mTask.Task)\n\tmTask.cleanupTask(taskStoppedDuration)\n\tassert.Equal(t, api.TaskStopped, mTask.GetSentStatus())\n}\n\nfunc TestCleanupTaskGivesUpIfWaitingTooLong(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tmockTime := mock_ttime.NewMockTime(ctrl)\n\tmockState := mock_dockerstate.NewMockTaskEngineState(ctrl)\n\tmockClient := NewMockDockerClient(ctrl)\n\tmockImageManager := NewMockImageManager(ctrl)\n\tdefer ctrl.Finish()\n\n\ttaskEngine := &DockerTaskEngine{\n\t\tsaver: statemanager.NewNoopStateManager(),\n\t\tstate: mockState,\n\t\tclient: mockClient,\n\t\timageManager: mockImageManager,\n\t}\n\tmTask := &managedTask{\n\t\tTask: testdata.LoadTask(\"sleep5\"),\n\t\t_time: mockTime,\n\t\tengine: taskEngine,\n\t\tacsMessages: make(chan acsTransition),\n\t\tdockerMessages: make(chan dockerContainerChange),\n\t}\n\tmTask.SetKnownStatus(api.TaskStopped)\n\tmTask.SetSentStatus(api.TaskRunning)\n\n\t\/\/ Expectations for triggering cleanup\n\tnow := mTask.GetKnownStatusTime()\n\ttaskStoppedDuration := 1 * time.Minute\n\tmockTime.EXPECT().Now().Return(now).AnyTimes()\n\tcleanupTimeTrigger := make(chan time.Time)\n\tmockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger)\n\tgo func() {\n\t\tcleanupTimeTrigger <- now\n\t}()\n\t_maxStoppedWaitTimes = 10\n\tdefer func() {\n\t\t\/\/ reset\n\t\t_maxStoppedWaitTimes = int(maxStoppedWaitTimes)\n\t}()\n\tmockTime.EXPECT().Sleep(gomock.Any()).Times(_maxStoppedWaitTimes)\n\tassert.Equal(t, api.TaskRunning, mTask.GetSentStatus())\n\n\t\/\/ No cleanup expected\n\tmTask.cleanupTask(taskStoppedDuration)\n\tassert.Equal(t, api.TaskRunning, mTask.GetSentStatus())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Red-Black tree implementation. Each tree node (entry) contains Key and Value.\n\/\/ Entries in the RbMap are always ordered according to the key value, so\n\/\/ it can be used as ordered set (std::set in C++) or ordered map (std::map).\n\/\/ Note: all methods are not goroutine-safe.\npackage rbt\n\n\/\/ import ( \"strings\" ; \"fmt\" )\n\ntype RbMap struct {\n less LessFunc\n root *RbMapNode\n size int\n}\n\n\/\/ Red-black tree node, contains key and value. It is safe to overwrite Value\n\/\/ in-place.\ntype RbMapNode struct {\n left, right, parent *RbMapNode\n \/\/ key\n key interface{}\n Value interface{}\n isred bool \/\/ true == red, false == black\n}\n\n\/\/ LessFunc is a key comparsion function. \n\/\/ Must return true if k1 < k2, false otherwise\ntype LessFunc func(k1, k2 interface{}) bool\n\n\/\/ Create new RbMap with provided comparsion function. The latter must\n\/\/ return true if k1 < k2, false otherwise.\nfunc NewRbMap(lessFunc LessFunc) *RbMap {\n return &RbMap{ less: lessFunc }\n}\n\n\/\/ Find node by key and return its Value, returns nil if key not found.\nfunc (t *RbMap) Find(key interface{}) interface{} {\n n := t.FindNode(key)\n if n != nil {\n return n.Value\n }\n return nil\n}\n\n\/\/ Find a node by key, returns nil if not found\nfunc (t *RbMap) FindNode(key interface{}) *RbMapNode {\n x := t.root\n for x != nil {\n if t.less(x.key, key) {\n x = x.right\n } else {\n if t.less(key, x.key) {\n x = x.left\n } else {\n return x\n }\n }\n }\n return nil\n}\n\n\/\/ Get last node in the tree (with highest key value).\nfunc (t *RbMap) Last() *RbMapNode {\n if nil == t.root {\n return nil\n }\n return t.root.max()\n}\n\n\/\/ Get first node in the tree (with lowest key value).\nfunc (t *RbMap) First() *RbMapNode {\n if nil == t.root {\n return nil\n }\n return t.root.min()\n}\n\n\/\/ Returns previous node in the tree, in descending key value order.\nfunc (x *RbMapNode) Next() *RbMapNode {\n if x.right != nil {\n return x.right.min()\n }\n y := x.parent\n for y != nil && x == y.right {\n x = y\n y = y.parent\n }\n return y\n}\n\n\/\/ Returns key associated with tree node.\nfunc (x *RbMapNode) Key() interface{} {\n return x.key\n}\n\n\/\/ Returns next node in the tree, in ascending key value order.\nfunc (x *RbMapNode) Prev() *RbMapNode {\n if x.left != nil {\n return x.left.max()\n }\n y := x.parent\n for y != nil && x == y.left {\n x = y\n y = y.parent\n }\n return y\n}\n\n\/\/ Returns number of entries in the tree. This function returns internal\n\/\/ counter, therefore it is fast and safe to use in loops.\nfunc (t *RbMap) Size() int {\n return t.size\n}\n\n\/\/ Remove all entries in the tree.\nfunc (t *RbMap) Clear() {\n t.root = nil\n t.size = 0\n}\n\n\/\/ Insert key and value into the tree. If new entry is created, returns true.\n\/\/ If key already exists, value gets replaced and Insert returns false.\nfunc (t *RbMap) Insert(key interface{}, value interface{}) bool {\n x := t.root\n var y *RbMapNode\n\n for x != nil {\n y = x\n if t.less(x.key, key) {\n x = x.right\n } else if t.less(key, x.key) {\n x = x.left\n } else {\n x.Value = value\n return false \/\/ overwrite value\n }\n }\n z := &RbMapNode{parent: y, isred: true, key: key, Value: value}\n if y == nil {\n t.root = z\n } else {\n if t.less(key, y.key) {\n y.left = z\n } else {\n y.right = z\n }\n }\n t.rb_insert_fixup(z)\n t.size++\n return true\n}\n\n\/\/ Delete tree node by key. Returns true if key was found and deleted.\nfunc (t *RbMap) Delete(key interface{}) bool {\n if z := t.FindNode(key); z != nil {\n t.DeleteNode(z)\n return true\n }\n return false\n}\n\n\/\/ Delete tree node.\nfunc (t *RbMap) DeleteNode(n *RbMapNode) {\n var x *RbMapNode\n if nil != n.left && nil != n.right {\n x = n.left.max()\n n.key, n.Value = x.key, x.Value\n n = x\n }\n if nil == n.right {\n x = n.left\n } else {\n x = n.right\n }\n if isBlack(n) {\n n.isred = isRed(x)\n if nil != n.parent {\n t.rb_delete_fixup(n)\n }\n }\n t.rbreplace(n, x)\n if isRed(t.root) {\n t.root.isred = false\n }\n t.size--\n}\n\nfunc (t* RbMap) rb_delete_fixup(n *RbMapNode) {\n var s, p *RbMapNode\n for {\n s, p = n.sibling(), n.parent\n if isRed(s) {\n p.isred, s.isred = true, false\n if n == p.left {\n t.left_rotate(p)\n s = p.right\n } else {\n t.right_rotate(p)\n s = p.left\n }\n }\n if isBlack(p) && isBlack(s) && isBlack(s.left) && isBlack(s.right) {\n s.isred = true\n if nil != p.parent {\n n = p\n continue\n }\n return\n } else {\n break\n }\n }\n if isRed(n.parent) && isBlack(s) && isBlack(s.left) && isBlack(s.right) {\n s.isred, n.parent.isred = true, false\n } else {\n if isBlack(s) {\n if n == n.parent.left && isRed(s.left) && isBlack(s.right) {\n s.isred, s.left.isred = true, false\n t.right_rotate(s)\n s = n.parent.right\n } else if n == n.parent.right && isRed(s.right) && isBlack(s.left) {\n s.isred, s.right.isred = true, false\n t.left_rotate(s)\n s = n.parent.left\n }\n }\n s.isred = n.parent.isred\n n.parent.isred = false\n if n == n.parent.left {\n s.right.isred = false\n t.left_rotate(n.parent)\n } else {\n s.left.isred = false\n t.right_rotate(n.parent)\n }\n }\n}\n\nfunc (t *RbMap) rb_insert_fixup(x *RbMapNode) {\n var y *RbMapNode\n for isRed(x.parent) {\n if x.parent == x.parent.parent.left {\n y = x.parent.parent.right\n if isRed(y) {\n x.parent.isred, y.isred = false, false\n x.parent.parent.isred = true\n x = x.parent.parent\n } else {\n if x == x.parent.right {\n x = x.parent\n t.left_rotate(x)\n }\n x.parent.isred, x.parent.parent.isred = false, true\n t.right_rotate(x.parent.parent)\n }\n } else {\n y = x.parent.parent.left\n if isRed(y) {\n x.parent.isred, y.isred = false, false\n x.parent.parent.isred = true\n x = x.parent.parent\n } else {\n if x == x.parent.left {\n x = x.parent\n t.right_rotate(x)\n }\n x.parent.isred, x.parent.parent.isred = false, true\n t.left_rotate(x.parent.parent)\n }\n }\n }\n t.root.isred = false\n}\n\nfunc (n *RbMapNode) sibling() *RbMapNode {\n if n == n.parent.left {\n return n.parent.right\n } else {\n return n.parent.left\n }\n}\n\nfunc (n *RbMapNode) min() *RbMapNode {\n for n.left != nil {\n n = n.left\n }\n return n\n}\n\nfunc (n *RbMapNode) max() *RbMapNode {\n for n.right != nil {\n n = n.right\n }\n return n\n}\n\nfunc (t *RbMap) left_rotate(n *RbMapNode) {\n r := n.right\n t.rbreplace(n, r)\n n.right = r.left\n if nil != r.left {\n r.left.parent = n\n } \n r.left, n.parent = n, r\n}\n\nfunc (t *RbMap) right_rotate(n *RbMapNode) {\n l := n.left\n t.rbreplace(n, l)\n n.left = l.right\n if nil != l.right {\n l.right.parent = n\n }\n l.right, n.parent = n, l\n}\n\nfunc (t *RbMap) rbreplace(u, v *RbMapNode) {\n parent := u.parent\n if parent == nil {\n t.root = v\n } else if u == parent.left {\n parent.left = v\n } else {\n parent.right = v\n }\n if v != nil {\n v.parent = parent\n }\n}\n\nfunc isBlack(n *RbMapNode) bool {\n return nil == n || !n.isred\n}\n\nfunc isRed(n *RbMapNode) bool {\n return nil != n && n.isred\n}\n\n\/*\n\n\/\/ uncomment for debugging only, because this adds dependency on strings, fmt\n\nfunc (n *RbMapNode) Dump(indent int, tag string) {\n idn := strings.Repeat(\" \", indent*4)\n c := 'B'\n if n.isred { c = 'R' }\n fmt.Printf(\"%s%s[%v:%v]%c\\n\", idn, tag, n.Key(), n.Value, c)\n if n.left != nil {\n n.left.Dump(indent + 1, \"L:\")\n }\n if n.right != nil {\n n.right.Dump(indent + 1, \"R:\")\n }\n}\n\nfunc (t *RbMap) Dump() {\n if t.root == nil {\n fmt.Printf(\"<NULL TREE>\\n\")\n } else {\n t.root.Dump(0, \"*\")\n }\n}\n*\/\n\n\/\/ Internal tree consistency check used by tests. \nfunc (t *RbMap) verify() {\n if nil == t.root { return }\n if isRed(t.root) { panic(\"root is red\") }\n verify1(t.root)\n verify2(t.root)\n}\n\nfunc verify1(n *RbMapNode) {\n if isRed(n) {\n if !isBlack(n.left) { panic(\"left is not black\") }\n if !isBlack(n.right) { panic(\"right is not black\") }\n if !isBlack(n.parent) { panic(\"parent is not black\") }\n } \n if nil == n { return }\n verify1(n.left)\n verify1(n.right)\n}\n\nfunc verify2(n *RbMapNode) {\n black_count_path := -1\n verify2h(n, 0, &black_count_path)\n}\n\nfunc verify2h(n *RbMapNode, black_count int, path_black_count *int) {\n if isBlack(n) {\n black_count++\n }\n if n == nil {\n if *path_black_count == -1 {\n *path_black_count = black_count;\n } else {\n if black_count != *path_black_count {\n panic(\"black count\")\n }\n }\n return\n }\n verify2h(n.left, black_count, path_black_count)\n verify2h(n.right, black_count, path_black_count)\n}\n\n<commit_msg>documentation fixes<commit_after>\/\/ Red-Black tree implementation. Each tree node (entry) contains Key and Value.\n\/\/ Entries in the RbMap are always ordered according to the key value, so\n\/\/ it can be used as ordered set (std::set in C++) or ordered map (std::map).\n\/\/ Note: all methods are not goroutine-safe.\npackage rbt\n\n\/\/ import ( \"strings\" ; \"fmt\" )\n\ntype RbMap struct {\n less LessFunc\n root *RbMapNode\n size int\n}\n\n\/\/ Red-black tree node, contains key and value. It is safe to overwrite Value\n\/\/ in-place.\ntype RbMapNode struct {\n left, right, parent *RbMapNode\n \/\/ key\n key interface{}\n Value interface{}\n isred bool \/\/ true == red, false == black\n}\n\n\/\/ LessFunc is a key comparsion function. \n\/\/ Must return true if k1 < k2, false otherwise.\ntype LessFunc func(k1, k2 interface{}) bool\n\n\/\/ Create new RbMap with provided key comparsion function. \nfunc NewRbMap(lessFunc LessFunc) *RbMap {\n return &RbMap{ less: lessFunc }\n}\n\n\/\/ Find node by key and return its Value, returns nil if key not found.\nfunc (t *RbMap) Find(key interface{}) interface{} {\n n := t.FindNode(key)\n if n != nil {\n return n.Value\n }\n return nil\n}\n\n\/\/ Find a node by key, returns nil if not found.\nfunc (t *RbMap) FindNode(key interface{}) *RbMapNode {\n x := t.root\n for x != nil {\n if t.less(x.key, key) {\n x = x.right\n } else {\n if t.less(key, x.key) {\n x = x.left\n } else {\n return x\n }\n }\n }\n return nil\n}\n\n\/\/ Get last node in the tree (with highest key value).\nfunc (t *RbMap) Last() *RbMapNode {\n if nil == t.root {\n return nil\n }\n return t.root.max()\n}\n\n\/\/ Get first node in the tree (with lowest key value).\nfunc (t *RbMap) First() *RbMapNode {\n if nil == t.root {\n return nil\n }\n return t.root.min()\n}\n\n\/\/ Get next node, in ascending key value order.\nfunc (x *RbMapNode) Next() *RbMapNode {\n if x.right != nil {\n return x.right.min()\n }\n y := x.parent\n for y != nil && x == y.right {\n x = y\n y = y.parent\n }\n return y\n}\n\n\/\/ Returns key associated with tree node.\nfunc (x *RbMapNode) Key() interface{} {\n return x.key\n}\n\n\/\/ Get previous node, in descending key value order.\nfunc (x *RbMapNode) Prev() *RbMapNode {\n if x.left != nil {\n return x.left.max()\n }\n y := x.parent\n for y != nil && x == y.left {\n x = y\n y = y.parent\n }\n return y\n}\n\n\/\/ Returns number of entries in the tree. This function returns internal\n\/\/ counter, therefore it is fast and safe to use in loops.\nfunc (t *RbMap) Size() int {\n return t.size\n}\n\n\/\/ Remove all entries in the tree.\nfunc (t *RbMap) Clear() {\n t.root = nil\n t.size = 0\n}\n\n\/\/ Insert key and value into the tree. If new entry is created, returns true.\n\/\/ If key already exists, value gets replaced and Insert returns false.\nfunc (t *RbMap) Insert(key interface{}, value interface{}) bool {\n x := t.root\n var y *RbMapNode\n\n for x != nil {\n y = x\n if t.less(x.key, key) {\n x = x.right\n } else if t.less(key, x.key) {\n x = x.left\n } else {\n x.Value = value\n return false \/\/ overwrite value\n }\n }\n z := &RbMapNode{parent: y, isred: true, key: key, Value: value}\n if y == nil {\n t.root = z\n } else {\n if t.less(key, y.key) {\n y.left = z\n } else {\n y.right = z\n }\n }\n t.rb_insert_fixup(z)\n t.size++\n return true\n}\n\n\/\/ Delete tree node by key. Returns true if key was found and deleted.\nfunc (t *RbMap) Delete(key interface{}) bool {\n if z := t.FindNode(key); z != nil {\n t.DeleteNode(z)\n return true\n }\n return false\n}\n\n\/\/ Delete tree node.\nfunc (t *RbMap) DeleteNode(n *RbMapNode) {\n var x *RbMapNode\n if nil != n.left && nil != n.right {\n x = n.left.max()\n n.key, n.Value = x.key, x.Value\n n = x\n }\n if nil == n.right {\n x = n.left\n } else {\n x = n.right\n }\n if isBlack(n) {\n n.isred = isRed(x)\n if nil != n.parent {\n t.rb_delete_fixup(n)\n }\n }\n t.rbreplace(n, x)\n if isRed(t.root) {\n t.root.isred = false\n }\n t.size--\n}\n\nfunc (t* RbMap) rb_delete_fixup(n *RbMapNode) {\n var s, p *RbMapNode\n for {\n s, p = n.sibling(), n.parent\n if isRed(s) {\n p.isred, s.isred = true, false\n if n == p.left {\n t.left_rotate(p)\n s = p.right\n } else {\n t.right_rotate(p)\n s = p.left\n }\n }\n if isBlack(p) && isBlack(s) && isBlack(s.left) && isBlack(s.right) {\n s.isred = true\n if nil != p.parent {\n n = p\n continue\n }\n return\n } else {\n break\n }\n }\n if isRed(n.parent) && isBlack(s) && isBlack(s.left) && isBlack(s.right) {\n s.isred, n.parent.isred = true, false\n } else {\n if isBlack(s) {\n if n == n.parent.left && isRed(s.left) && isBlack(s.right) {\n s.isred, s.left.isred = true, false\n t.right_rotate(s)\n s = n.parent.right\n } else if n == n.parent.right && isRed(s.right) && isBlack(s.left) {\n s.isred, s.right.isred = true, false\n t.left_rotate(s)\n s = n.parent.left\n }\n }\n s.isred = n.parent.isred\n n.parent.isred = false\n if n == n.parent.left {\n s.right.isred = false\n t.left_rotate(n.parent)\n } else {\n s.left.isred = false\n t.right_rotate(n.parent)\n }\n }\n}\n\nfunc (t *RbMap) rb_insert_fixup(x *RbMapNode) {\n var y *RbMapNode\n for isRed(x.parent) {\n if x.parent == x.parent.parent.left {\n y = x.parent.parent.right\n if isRed(y) {\n x.parent.isred, y.isred = false, false\n x.parent.parent.isred = true\n x = x.parent.parent\n } else {\n if x == x.parent.right {\n x = x.parent\n t.left_rotate(x)\n }\n x.parent.isred, x.parent.parent.isred = false, true\n t.right_rotate(x.parent.parent)\n }\n } else {\n y = x.parent.parent.left\n if isRed(y) {\n x.parent.isred, y.isred = false, false\n x.parent.parent.isred = true\n x = x.parent.parent\n } else {\n if x == x.parent.left {\n x = x.parent\n t.right_rotate(x)\n }\n x.parent.isred, x.parent.parent.isred = false, true\n t.left_rotate(x.parent.parent)\n }\n }\n }\n t.root.isred = false\n}\n\nfunc (n *RbMapNode) sibling() *RbMapNode {\n if n == n.parent.left {\n return n.parent.right\n } else {\n return n.parent.left\n }\n}\n\nfunc (n *RbMapNode) min() *RbMapNode {\n for n.left != nil {\n n = n.left\n }\n return n\n}\n\nfunc (n *RbMapNode) max() *RbMapNode {\n for n.right != nil {\n n = n.right\n }\n return n\n}\n\nfunc (t *RbMap) left_rotate(n *RbMapNode) {\n r := n.right\n t.rbreplace(n, r)\n n.right = r.left\n if nil != r.left {\n r.left.parent = n\n } \n r.left, n.parent = n, r\n}\n\nfunc (t *RbMap) right_rotate(n *RbMapNode) {\n l := n.left\n t.rbreplace(n, l)\n n.left = l.right\n if nil != l.right {\n l.right.parent = n\n }\n l.right, n.parent = n, l\n}\n\nfunc (t *RbMap) rbreplace(u, v *RbMapNode) {\n parent := u.parent\n if parent == nil {\n t.root = v\n } else if u == parent.left {\n parent.left = v\n } else {\n parent.right = v\n }\n if v != nil {\n v.parent = parent\n }\n}\n\nfunc isBlack(n *RbMapNode) bool {\n return nil == n || !n.isred\n}\n\nfunc isRed(n *RbMapNode) bool {\n return nil != n && n.isred\n}\n\n\/*\n\n\/\/ uncomment for debugging only, because this adds dependency on strings, fmt\n\nfunc (n *RbMapNode) Dump(indent int, tag string) {\n idn := strings.Repeat(\" \", indent*4)\n c := 'B'\n if n.isred { c = 'R' }\n fmt.Printf(\"%s%s[%v:%v]%c\\n\", idn, tag, n.Key(), n.Value, c)\n if n.left != nil {\n n.left.Dump(indent + 1, \"L:\")\n }\n if n.right != nil {\n n.right.Dump(indent + 1, \"R:\")\n }\n}\n\nfunc (t *RbMap) Dump() {\n if t.root == nil {\n fmt.Printf(\"<NULL TREE>\\n\")\n } else {\n t.root.Dump(0, \"*\")\n }\n}\n*\/\n\n\/\/ Internal tree consistency check used by tests. \nfunc (t *RbMap) verify() {\n if nil == t.root { return }\n if isRed(t.root) { panic(\"root is red\") }\n verify1(t.root)\n verify2(t.root)\n}\n\nfunc verify1(n *RbMapNode) {\n if isRed(n) {\n if !isBlack(n.left) { panic(\"left is not black\") }\n if !isBlack(n.right) { panic(\"right is not black\") }\n if !isBlack(n.parent) { panic(\"parent is not black\") }\n } \n if nil == n { return }\n verify1(n.left)\n verify1(n.right)\n}\n\nfunc verify2(n *RbMapNode) {\n black_count_path := -1\n verify2h(n, 0, &black_count_path)\n}\n\nfunc verify2h(n *RbMapNode, black_count int, path_black_count *int) {\n if isBlack(n) {\n black_count++\n }\n if n == nil {\n if *path_black_count == -1 {\n *path_black_count = black_count;\n } else {\n if black_count != *path_black_count {\n panic(\"black count\")\n }\n }\n return\n }\n verify2h(n.left, black_count, path_black_count)\n verify2h(n.right, black_count, path_black_count)\n}\n\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\trandom *rand.Rand\n\tgetRand sync.Once\n\n\tgetLocation sync.Once\n\n\tnow time.Time\n\tgetNow sync.Once\n)\n\nfunc GetRand() *rand.Rand {\n\tgetRand.Do(func() {\n\t\trandom = rand.New(rand.NewSource(time.Now().UnixNano()))\n\t})\n\treturn random\n}\n\nfunc GetLocation() *time.Location {\n\tgetLocation.Do(func() {\n\t\tloc, _ := time.LoadLocation(GetFlags().Location)\n\t\ttime.Local = loc\n\t})\n\treturn time.Local\n}\n\nfunc Now() time.Time {\n\tgetNow.Do(func() {\n\t\ttimeString := GetFlags().Now\n\t\tif len(timeString) < 1 {\n\t\t\tnow = time.Now()\n\t\t} else {\n\t\t\tt, _ := time.ParseInLocation(\"2006-01-02 15:04:05.999999999\", timeString, GetLocation())\n\t\t\tnow = t\n\t\t}\n\t})\n\treturn now\n}\n<commit_msg>Release v0.3.3<commit_after>package cmd\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\trandom *rand.Rand\n\tgetRand sync.Once\n\n\tgetLocation sync.Once\n\n\tnow time.Time\n\tgetNow sync.Once\n)\n\nfunc GetRand() *rand.Rand {\n\tgetRand.Do(func() {\n\t\trandom = rand.New(rand.NewSource(time.Now().UnixNano()))\n\t})\n\treturn random\n}\n\nfunc GetLocation() *time.Location {\n\tgetLocation.Do(func() {\n\t\tloc, _ := time.LoadLocation(GetFlags().Location)\n\t\ttime.Local = loc\n\t\tfmt.Println(GetFlags().Location)\n\t})\n\treturn time.Local\n}\n\nfunc Now() time.Time {\n\tgetNow.Do(func() {\n\t\ttimeString := GetFlags().Now\n\t\tif len(timeString) < 1 {\n\t\t\tGetLocation()\n\t\t\tnow = time.Now()\n\t\t} else {\n\t\t\tt, _ := time.ParseInLocation(\"2006-01-02 15:04:05.999999999\", timeString, GetLocation())\n\t\t\tnow = t\n\t\t}\n\t})\n\treturn now\n}\n<|endoftext|>"} {"text":"<commit_before>package lib\n\nimport (\n\t\"fmt\"\n\t. \"github.com\/ahmetalpbalkan\/go-linq\"\n)\n\nfunc getCollection(allPosts []*LongPost) map[string][]*LongPost {\n\tcollection, err := From(allPosts).GroupBy(func(post T) T { return post.(*LongPost).Category }, func(post T) T { return post.(*LongPost) })\n\tif err != nil {\n\t\tfmt.Errorf(\"Error\", err)\n\t}\n\treturn coverMapType(collection)\n}\n\nfunc coverMapType(in map[T][]T) (out map[string][]*LongPost) {\n\tout = make(map[string][]*LongPost, len(in))\n\tfor k, _ := range in {\n\t\tif key, ok := k.(string); ok {\n\t\t\tv := in[k]\n\t\t\tout[key] = make([]*LongPost, 0, len(v))\n\t\t\tfor i := range v {\n\t\t\t\tif value, ok := v[i].(*LongPost); ok {\n\t\t\t\t\tout[key] = append(out[key], value)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>todo: remove go-linq<commit_after>package lib\n\nimport (\n\t\"fmt\"\n\t. \"github.com\/ahmetalpbalkan\/go-linq\"\n)\n\n\/\/ TODO: remove go-linq\nfunc getCollection(allPosts []*LongPost) map[string][]*LongPost {\n\tcollection, err := From(allPosts).GroupBy(func(post T) T { return post.(*LongPost).Category }, func(post T) T { return post.(*LongPost) })\n\tif err != nil {\n\t\tfmt.Errorf(\"Error\", err)\n\t}\n\treturn coverMapType(collection)\n}\n\nfunc coverMapType(in map[T][]T) (out map[string][]*LongPost) {\n\tout = make(map[string][]*LongPost, len(in))\n\tfor k, _ := range in {\n\t\tif key, ok := k.(string); ok {\n\t\t\tv := in[k]\n\t\t\tout[key] = make([]*LongPost, 0, len(v))\n\t\t\tfor i := range v {\n\t\t\t\tif value, ok := v[i].(*LongPost); ok {\n\t\t\t\t\tout[key] = append(out[key], value)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2015 Foursquare Labs Inc.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"strings\"\n\n\t\"github.com\/foursquare\/fsgo\/net\/httpthrift\"\n\t\"github.com\/foursquare\/fsgo\/report\"\n\t\"github.com\/foursquare\/quiver\/gen\"\n\t\"github.com\/foursquare\/quiver\/hfile\"\n\t\"github.com\/foursquare\/quiver\/util\"\n)\n\nfunc WrapHttpRpcHandler(cs *hfile.CollectionSet, stats *report.Recorder) *httpthrift.ThriftOverHTTPHandler {\n\treturn httpthrift.NewThriftOverHTTPHandler(gen.NewHFileServiceProcessor(&ThriftRpcImpl{cs}), stats)\n}\n\ntype ThriftRpcImpl struct {\n\t*hfile.CollectionSet\n}\n\nfunc (cs *ThriftRpcImpl) GetValuesSingle(req *gen.SingleHFileKeyRequest) (r *gen.SingleHFileKeyResponse, err error) {\n\tif Settings.debug {\n\t\tlog.Printf(\"[GetValuesSingle] %s (%d keys)\\n\", *req.HfileName, len(req.SortedKeys))\n\t}\n\thfile, err := cs.ReaderFor(*req.HfileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treader := hfile.GetScanner()\n\t\/\/ TODO: clients should request strict during dev\/testing?\n\treader.EnforceKeyOrder = false\n\tdefer reader.Release()\n\n\tres := new(gen.SingleHFileKeyResponse)\n\tres.Values = make(map[int32][]byte)\n\tfound := int32(0)\n\n\tfor idx, key := range req.SortedKeys {\n\t\tif Settings.debug {\n\t\t\tlog.Printf(\"[GetValuesSingle] key: %s\\n\", hex.EncodeToString(key))\n\t\t}\n\t\tif idx > 0 && bytes.Equal(req.SortedKeys[idx-1], key) {\n\t\t\tif prev, ok := res.Values[int32(idx-1)]; ok {\n\t\t\t\tres.Values[int32(idx)] = prev\n\t\t\t\tfound++\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif !hfile.MightContain(key) {\n\t\t\tcontinue\n\t\t}\n\n\t\tvalue, err, ok := reader.GetFirst(key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif ok {\n\t\t\tfound++\n\t\t\tif !req.GetCountOnly() {\n\t\t\t\tif req.PerKeyValueLimit != nil {\n\t\t\t\t\tres.Values[int32(idx)] = value[:req.GetPerKeyValueLimit()]\n\t\t\t\t} else {\n\t\t\t\t\tres.Values[int32(idx)] = value\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif Settings.debug {\n\t\tlog.Printf(\"[GetValuesSingle] %s found %d of %d.\\n\", *req.HfileName, found, len(req.SortedKeys))\n\t}\n\tres.KeyCount = &found\n\treturn res, nil\n}\n\nfunc (cs *ThriftRpcImpl) GetValuesMulti(req *gen.SingleHFileKeyRequest) (r *gen.MultiHFileKeyResponse, err error) {\n\tif Settings.debug {\n\t\tlog.Println(\"[GetValuesMulti]\", len(req.SortedKeys))\n\t}\n\n\thfile, err := cs.ReaderFor(*req.HfileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treader := hfile.GetScanner()\n\tdefer reader.Release()\n\n\tres := new(gen.MultiHFileKeyResponse)\n\tres.Values = make(map[int32][][]byte)\n\tfound := int32(0)\n\n\tfor idx, key := range req.SortedKeys {\n\t\tif !hfile.MightContain(key) {\n\t\t\tcontinue\n\t\t}\n\t\tvalues, err := reader.GetAll(key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(values) > 0 {\n\t\t\tfound += int32(len(values))\n\t\t\tres.Values[int32(idx)] = values\n\t\t}\n\t}\n\n\tres.KeyCount = &found\n\treturn res, nil\n\n}\n\nfunc (cs *ThriftRpcImpl) GetValuesForPrefixes(req *gen.PrefixRequest) (r *gen.PrefixResponse, err error) {\n\tres := new(gen.PrefixResponse)\n\tif reader, err := cs.ReaderFor(*req.HfileName); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\ti := reader.GetIterator()\n\t\tdefer i.Release()\n\t\tlimit := int32(0)\n\t\tif req.ValueLimit != nil {\n\t\t\tlimit = *req.ValueLimit\n\t\t}\n\t\tif res.Values, res.LastKey, err = i.AllForPrefixes(req.SortedKeys, limit, req.LastKey); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\treturn res, nil\n\t\t}\n\t}\n}\n\nfunc (cs *ThriftRpcImpl) GetValuesMultiSplitKeys(req *gen.MultiHFileSplitKeyRequest) (r *gen.KeyToValuesResponse, err error) {\n\tres := make(map[string][][]byte)\n\treader, err := cs.ReaderFor(*req.HfileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscanner := reader.GetScanner()\n\tdefer scanner.Release()\n\n\tfor _, parts := range util.RevProduct(req.SplitKey) {\n\t\t\/\/ TODO(davidt): avoid allocing concated key by adding split-key search lower down.\n\t\tkey := bytes.Join(parts, nil)\n\n\t\tif values, err := scanner.GetAll(key); err != nil {\n\t\t\treturn nil, err\n\t\t} else if len(values) > 0 {\n\t\t\tres[string(key)] = values\n\t\t}\n\t}\n\treturn &gen.KeyToValuesResponse{res}, nil\n}\n\nfunc (cs *ThriftRpcImpl) GetIterator(req *gen.IteratorRequest) (*gen.IteratorResponse, error) {\n\t\/\/ \tHfileName *string `thrift:\"hfileName,1\" json:\"hfileName\"`\n\t\/\/ \tIncludeValues *bool `thrift:\"includeValues,2\" json:\"includeValues\"`\n\t\/\/ \tLastKey []byte `thrift:\"lastKey,3\" json:\"lastKey\"`\n\t\/\/ \tSkipKeys *int32 `thrift:\"skipKeys,4\" json:\"skipKeys\"`\n\t\/\/ \tResponseLimit *int32 `thrift:\"responseLimit,5\" json:\"responseLimit\"`\n\t\/\/ \tEndKey []byte `thrift:\"endKey,6\" json:\"endKey\"`\n\tvar err error\n\n\tif req.ResponseLimit == nil {\n\t\treturn nil, fmt.Errorf(\"Missing limit.\")\n\t}\n\tlimit := int(*req.ResponseLimit)\n\n\treader, err := cs.ReaderFor(*req.HfileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tit := reader.GetIterator()\n\tdefer it.Release()\n\n\tremaining := false\n\n\tif req.LastKey != nil {\n\t\tremaining, err = it.Seek(req.LastKey)\n\t} else {\n\t\tremaining, err = it.Next()\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres := new(gen.IteratorResponse)\n\n\tif !remaining {\n\t\treturn res, nil\n\t}\n\n\tskipKeys := int32(0)\n\tlastKey := it.Key()\n\n\tif toSkip := req.GetSkipKeys(); toSkip > 0 {\n\t\tfor i := int32(0); i < toSkip && remaining; i++ {\n\t\t\tif bytes.Equal(lastKey, it.Key()) {\n\t\t\t\tskipKeys = skipKeys + 1\n\t\t\t} else {\n\t\t\t\tskipKeys = 0\n\t\t\t}\n\n\t\t\tlastKey = it.Key()\n\n\t\t\tremaining, err = it.Next()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tif !remaining {\n\t\t\treturn res, nil\n\t\t}\n\t}\n\n\tif req.EndKey != nil {\n\t\tremaining = remaining && !hfile.After(it.Key(), req.EndKey)\n\t}\n\n\tr := make([]*gen.KeyValueItem, 0)\n\tfor i := 0; i < limit && remaining; i++ {\n\t\tv := []byte{}\n\t\tif req.IncludeValues == nil || *req.IncludeValues {\n\t\t\tv = it.Value()\n\t\t}\n\t\tr = append(r, &gen.KeyValueItem{it.Key(), v})\n\n\t\tif bytes.Equal(lastKey, it.Key()) {\n\t\t\tskipKeys = skipKeys + 1\n\t\t} else {\n\t\t\tskipKeys = 1\n\t\t}\n\t\tlastKey = it.Key()\n\n\t\tremaining, err = it.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif req.EndKey != nil {\n\t\t\tremaining = remaining && !hfile.After(it.Key(), req.EndKey)\n\t\t}\n\t}\n\treturn &gen.IteratorResponse{r, lastKey, &skipKeys}, nil\n}\n\nfunc GetCollectionInfo(r *hfile.Reader, keySampleSize int) (*gen.HFileInfo, error) {\n\ti := new(gen.HFileInfo)\n\ti.Name = &r.Name\n\ti.Path = &r.SourcePath\n\tc := int64(r.EntryCount)\n\ti.NumElements = &c\n\ti.FirstKey, _ = r.FirstKey()\n\n\tif keySampleSize > 0 {\n\t\tit := r.GetIterator()\n\t\tdefer it.Release()\n\n\t\tpr := float64(keySampleSize) \/ float64(c)\n\t\tbuf := make([][]byte, keySampleSize)\n\t\tfound := 0\n\t\tnext, err := it.Next()\n\t\tfor next && found < keySampleSize {\n\t\t\tif rand.Float64() < pr {\n\t\t\t\tbuf[found] = it.Key()\n\t\t\t\tfound++\n\t\t\t}\n\t\t\tnext, err = it.Next()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tbuf = buf[:found]\n\t\ti.RandomKeys = buf\n\t}\n\tif Settings.debug {\n\t\tlog.Printf(\"[GetCollectionInfo] %v (%d keys)\\n\", i, len(i.RandomKeys))\n\t}\n\treturn i, nil\n}\n\nfunc (cs *ThriftRpcImpl) getInfo(req *gen.InfoRequest, allowRandom bool) (r []*gen.HFileInfo, err error) {\n\trequire := \"\"\n\tif req.IsSetHfileName() {\n\t\trequire := req.GetHfileName()\n\t\tif require != \"\" && !strings.ContainsRune(require, '\/') {\n\t\t\trequire = require + \"\/\"\n\t\t}\n\t}\n\n\tsample := 0\n\tif req.IsSetNumRandomKeys() && allowRandom {\n\t\tsample = int(*req.NumRandomKeys)\n\t}\n\n\tfor name, reader := range cs.Collections {\n\t\tif require == \"\" || strings.HasPrefix(name, require) {\n\t\t\tif i, err := GetCollectionInfo(reader, sample); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else {\n\t\t\t\tr = append(r, i)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn r, nil\n}\n\nfunc (cs *ThriftRpcImpl) GetInfo(req *gen.InfoRequest) (r []*gen.HFileInfo, err error) {\n\treturn cs.getInfo(req, false)\n}\n\nfunc (cs *ThriftRpcImpl) ScanCollectionAndSampleKeys(req *gen.InfoRequest) (r []*gen.HFileInfo, err error) {\n\treturn cs.getInfo(req, true)\n}\n\nfunc (cs *ThriftRpcImpl) TestTimeout(waitInMillis int32) (r int32, err error) {\n\treturn 0, fmt.Errorf(\"Not implemented\")\n}\n<commit_msg>whoops, valueLimit should apply to multi, not single<commit_after>\/\/ Copyright (C) 2015 Foursquare Labs Inc.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"strings\"\n\n\t\"github.com\/foursquare\/fsgo\/net\/httpthrift\"\n\t\"github.com\/foursquare\/fsgo\/report\"\n\t\"github.com\/foursquare\/quiver\/gen\"\n\t\"github.com\/foursquare\/quiver\/hfile\"\n\t\"github.com\/foursquare\/quiver\/util\"\n)\n\nfunc WrapHttpRpcHandler(cs *hfile.CollectionSet, stats *report.Recorder) *httpthrift.ThriftOverHTTPHandler {\n\treturn httpthrift.NewThriftOverHTTPHandler(gen.NewHFileServiceProcessor(&ThriftRpcImpl{cs}), stats)\n}\n\ntype ThriftRpcImpl struct {\n\t*hfile.CollectionSet\n}\n\nfunc (cs *ThriftRpcImpl) GetValuesSingle(req *gen.SingleHFileKeyRequest) (r *gen.SingleHFileKeyResponse, err error) {\n\tif Settings.debug {\n\t\tlog.Printf(\"[GetValuesSingle] %s (%d keys)\\n\", *req.HfileName, len(req.SortedKeys))\n\t}\n\thfile, err := cs.ReaderFor(*req.HfileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treader := hfile.GetScanner()\n\t\/\/ TODO: clients should request strict during dev\/testing?\n\treader.EnforceKeyOrder = false\n\tdefer reader.Release()\n\n\tres := new(gen.SingleHFileKeyResponse)\n\tres.Values = make(map[int32][]byte)\n\tfound := int32(0)\n\n\tfor idx, key := range req.SortedKeys {\n\t\tif Settings.debug {\n\t\t\tlog.Printf(\"[GetValuesSingle] key: %s\\n\", hex.EncodeToString(key))\n\t\t}\n\t\tif idx > 0 && bytes.Equal(req.SortedKeys[idx-1], key) {\n\t\t\tif prev, ok := res.Values[int32(idx-1)]; ok {\n\t\t\t\tres.Values[int32(idx)] = prev\n\t\t\t\tfound++\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif !hfile.MightContain(key) {\n\t\t\tcontinue\n\t\t}\n\n\t\tvalue, err, ok := reader.GetFirst(key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif ok {\n\t\t\tfound++\n\t\t\tif !req.GetCountOnly() {\n\t\t\t\tres.Values[int32(idx)] = value\n\t\t\t}\n\t\t}\n\t}\n\n\tif Settings.debug {\n\t\tlog.Printf(\"[GetValuesSingle] %s found %d of %d.\\n\", *req.HfileName, found, len(req.SortedKeys))\n\t}\n\tres.KeyCount = &found\n\treturn res, nil\n}\n\nfunc (cs *ThriftRpcImpl) GetValuesMulti(req *gen.SingleHFileKeyRequest) (r *gen.MultiHFileKeyResponse, err error) {\n\tif Settings.debug {\n\t\tlog.Println(\"[GetValuesMulti]\", len(req.SortedKeys))\n\t}\n\n\thfile, err := cs.ReaderFor(*req.HfileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treader := hfile.GetScanner()\n\tdefer reader.Release()\n\n\tres := new(gen.MultiHFileKeyResponse)\n\tres.Values = make(map[int32][][]byte)\n\tfound := int32(0)\n\n\tfor idx, key := range req.SortedKeys {\n\t\tif !hfile.MightContain(key) {\n\t\t\tcontinue\n\t\t}\n\t\tvalues, err := reader.GetAll(key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(values) > 0 {\n\t\t\tfound += int32(len(values))\n\t\t\tif req.PerKeyValueLimit != nil {\n\t\t\t\tres.Values[int32(idx)] = values[:req.GetPerKeyValueLimit()]\n\t\t\t} else {\n\t\t\t\tres.Values[int32(idx)] = values\n\t\t\t}\n\t\t}\n\t}\n\n\tres.KeyCount = &found\n\treturn res, nil\n\n}\n\nfunc (cs *ThriftRpcImpl) GetValuesForPrefixes(req *gen.PrefixRequest) (r *gen.PrefixResponse, err error) {\n\tres := new(gen.PrefixResponse)\n\tif reader, err := cs.ReaderFor(*req.HfileName); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\ti := reader.GetIterator()\n\t\tdefer i.Release()\n\t\tlimit := int32(0)\n\t\tif req.ValueLimit != nil {\n\t\t\tlimit = *req.ValueLimit\n\t\t}\n\t\tif res.Values, res.LastKey, err = i.AllForPrefixes(req.SortedKeys, limit, req.LastKey); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\treturn res, nil\n\t\t}\n\t}\n}\n\nfunc (cs *ThriftRpcImpl) GetValuesMultiSplitKeys(req *gen.MultiHFileSplitKeyRequest) (r *gen.KeyToValuesResponse, err error) {\n\tres := make(map[string][][]byte)\n\treader, err := cs.ReaderFor(*req.HfileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscanner := reader.GetScanner()\n\tdefer scanner.Release()\n\n\tfor _, parts := range util.RevProduct(req.SplitKey) {\n\t\t\/\/ TODO(davidt): avoid allocing concated key by adding split-key search lower down.\n\t\tkey := bytes.Join(parts, nil)\n\n\t\tif values, err := scanner.GetAll(key); err != nil {\n\t\t\treturn nil, err\n\t\t} else if len(values) > 0 {\n\t\t\tres[string(key)] = values\n\t\t}\n\t}\n\treturn &gen.KeyToValuesResponse{res}, nil\n}\n\nfunc (cs *ThriftRpcImpl) GetIterator(req *gen.IteratorRequest) (*gen.IteratorResponse, error) {\n\t\/\/ \tHfileName *string `thrift:\"hfileName,1\" json:\"hfileName\"`\n\t\/\/ \tIncludeValues *bool `thrift:\"includeValues,2\" json:\"includeValues\"`\n\t\/\/ \tLastKey []byte `thrift:\"lastKey,3\" json:\"lastKey\"`\n\t\/\/ \tSkipKeys *int32 `thrift:\"skipKeys,4\" json:\"skipKeys\"`\n\t\/\/ \tResponseLimit *int32 `thrift:\"responseLimit,5\" json:\"responseLimit\"`\n\t\/\/ \tEndKey []byte `thrift:\"endKey,6\" json:\"endKey\"`\n\tvar err error\n\n\tif req.ResponseLimit == nil {\n\t\treturn nil, fmt.Errorf(\"Missing limit.\")\n\t}\n\tlimit := int(*req.ResponseLimit)\n\n\treader, err := cs.ReaderFor(*req.HfileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tit := reader.GetIterator()\n\tdefer it.Release()\n\n\tremaining := false\n\n\tif req.LastKey != nil {\n\t\tremaining, err = it.Seek(req.LastKey)\n\t} else {\n\t\tremaining, err = it.Next()\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres := new(gen.IteratorResponse)\n\n\tif !remaining {\n\t\treturn res, nil\n\t}\n\n\tskipKeys := int32(0)\n\tlastKey := it.Key()\n\n\tif toSkip := req.GetSkipKeys(); toSkip > 0 {\n\t\tfor i := int32(0); i < toSkip && remaining; i++ {\n\t\t\tif bytes.Equal(lastKey, it.Key()) {\n\t\t\t\tskipKeys = skipKeys + 1\n\t\t\t} else {\n\t\t\t\tskipKeys = 0\n\t\t\t}\n\n\t\t\tlastKey = it.Key()\n\n\t\t\tremaining, err = it.Next()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tif !remaining {\n\t\t\treturn res, nil\n\t\t}\n\t}\n\n\tif req.EndKey != nil {\n\t\tremaining = remaining && !hfile.After(it.Key(), req.EndKey)\n\t}\n\n\tr := make([]*gen.KeyValueItem, 0)\n\tfor i := 0; i < limit && remaining; i++ {\n\t\tv := []byte{}\n\t\tif req.IncludeValues == nil || *req.IncludeValues {\n\t\t\tv = it.Value()\n\t\t}\n\t\tr = append(r, &gen.KeyValueItem{it.Key(), v})\n\n\t\tif bytes.Equal(lastKey, it.Key()) {\n\t\t\tskipKeys = skipKeys + 1\n\t\t} else {\n\t\t\tskipKeys = 1\n\t\t}\n\t\tlastKey = it.Key()\n\n\t\tremaining, err = it.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif req.EndKey != nil {\n\t\t\tremaining = remaining && !hfile.After(it.Key(), req.EndKey)\n\t\t}\n\t}\n\treturn &gen.IteratorResponse{r, lastKey, &skipKeys}, nil\n}\n\nfunc GetCollectionInfo(r *hfile.Reader, keySampleSize int) (*gen.HFileInfo, error) {\n\ti := new(gen.HFileInfo)\n\ti.Name = &r.Name\n\ti.Path = &r.SourcePath\n\tc := int64(r.EntryCount)\n\ti.NumElements = &c\n\ti.FirstKey, _ = r.FirstKey()\n\n\tif keySampleSize > 0 {\n\t\tit := r.GetIterator()\n\t\tdefer it.Release()\n\n\t\tpr := float64(keySampleSize) \/ float64(c)\n\t\tbuf := make([][]byte, keySampleSize)\n\t\tfound := 0\n\t\tnext, err := it.Next()\n\t\tfor next && found < keySampleSize {\n\t\t\tif rand.Float64() < pr {\n\t\t\t\tbuf[found] = it.Key()\n\t\t\t\tfound++\n\t\t\t}\n\t\t\tnext, err = it.Next()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tbuf = buf[:found]\n\t\ti.RandomKeys = buf\n\t}\n\tif Settings.debug {\n\t\tlog.Printf(\"[GetCollectionInfo] %v (%d keys)\\n\", i, len(i.RandomKeys))\n\t}\n\treturn i, nil\n}\n\nfunc (cs *ThriftRpcImpl) getInfo(req *gen.InfoRequest, allowRandom bool) (r []*gen.HFileInfo, err error) {\n\trequire := \"\"\n\tif req.IsSetHfileName() {\n\t\trequire := req.GetHfileName()\n\t\tif require != \"\" && !strings.ContainsRune(require, '\/') {\n\t\t\trequire = require + \"\/\"\n\t\t}\n\t}\n\n\tsample := 0\n\tif req.IsSetNumRandomKeys() && allowRandom {\n\t\tsample = int(*req.NumRandomKeys)\n\t}\n\n\tfor name, reader := range cs.Collections {\n\t\tif require == \"\" || strings.HasPrefix(name, require) {\n\t\t\tif i, err := GetCollectionInfo(reader, sample); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else {\n\t\t\t\tr = append(r, i)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn r, nil\n}\n\nfunc (cs *ThriftRpcImpl) GetInfo(req *gen.InfoRequest) (r []*gen.HFileInfo, err error) {\n\treturn cs.getInfo(req, false)\n}\n\nfunc (cs *ThriftRpcImpl) ScanCollectionAndSampleKeys(req *gen.InfoRequest) (r []*gen.HFileInfo, err error) {\n\treturn cs.getInfo(req, true)\n}\n\nfunc (cs *ThriftRpcImpl) TestTimeout(waitInMillis int32) (r int32, err error) {\n\treturn 0, fmt.Errorf(\"Not implemented\")\n}\n<|endoftext|>"} {"text":"<commit_before>package rss\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n)\n\ntype Rss struct {\n\tChannel Channel `xml:\"channel\"`\n}\ntype Item struct {\n\tTitle string `xml:\"title\"`\n\tLink string `xml:\"link\"`\n\tDescription string `xml:\"description\"`\n\tEnclosure\t\tEnclosure `xml:\"enclosure\"`\n}\ntype Enclosure struct {\n\tUrl\t\t\t\t\tstring `xml:\"url,attr\"`\n\tType\t\t\t\tstring `xml:\"type,attr\"`\n}\ntype Channel struct {\n\tTitle string `xml:\"title\"`\n\tLink string `xml:\"link\"`\n\tDescription string `xml:\"description\"`\n\tItems []Item `xml:\"item\"`\n}\n\nfunc ResponseToRss(r *http.Response) (Rss, error) {\n\trss := Rss{}\n\tXMLdata, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn rss, err\n\t}\n\tbuffer := bytes.NewBuffer(XMLdata)\n\tdecoded := xml.NewDecoder(buffer)\n\terr = decoded.Decode(&rss)\n\treturn rss, err\n}\n\/*\nfunc main() {\n\n response, err := http.Get(\"http:\/\/www.thestar.com.my\/RSS\/Metro\/Community\/\")\n\n if err != nil {\n fmt.Println(err)\n os.Exit(1)\n }\n\n defer response.Body.Close()\n\n XMLdata, err := ioutil.ReadAll(response.Body)\n\n if err != nil {\n fmt.Println(err)\n os.Exit(1)\n }\n\n rss := new(Rss)\n\n buffer := bytes.NewBuffer(XMLdata)\n\n decoded := xml.NewDecoder(buffer)\n\n err = decoded.Decode(rss)\n\n if err != nil {\n fmt.Println(err)\n os.Exit(1)\n }\n\n fmt.Printf(\"Title : %s\\n\", rss.Channel.Title)\n fmt.Printf(\"Description : %s\\n\", rss.Channel.Description)\n fmt.Printf(\"Link : %s\\n\", rss.Channel.Link)\n\n total := len(rss.Channel.Items)\n\n fmt.Printf(\"Total items : %v\\n\", total)\n\n for i := 0; i < total; i++ {\n fmt.Printf(\"[%d] item title : %s\\n\", i, rss.Channel.Items[i].Title)\n fmt.Printf(\"[%d] item description : %s\\n\", i, rss.Channel.Items[i].Description)\n fmt.Printf(\"[%d] item link : %s\\n\\n\", i, rss.Channel.Items[i].Link)\n }\n\n }\n *\/\n<commit_msg>remove old notes<commit_after>package rss\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n)\n\ntype Rss struct {\n\tChannel Channel `xml:\"channel\"`\n}\ntype Item struct {\n\tTitle string `xml:\"title\"`\n\tLink string `xml:\"link\"`\n\tDescription string `xml:\"description\"`\n\tEnclosure\t\tEnclosure `xml:\"enclosure\"`\n}\ntype Enclosure struct {\n\tUrl\t\t\t\t\tstring `xml:\"url,attr\"`\n\tType\t\t\t\tstring `xml:\"type,attr\"`\n}\ntype Channel struct {\n\tTitle string `xml:\"title\"`\n\tLink string `xml:\"link\"`\n\tDescription string `xml:\"description\"`\n\tItems []Item `xml:\"item\"`\n}\n\nfunc ResponseToRss(r *http.Response) (Rss, error) {\n\trss := Rss{}\n\tXMLdata, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn rss, err\n\t}\n\tbuffer := bytes.NewBuffer(XMLdata)\n\tdecoded := xml.NewDecoder(buffer)\n\terr = decoded.Decode(&rss)\n\treturn rss, err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2015 Eiichiro Watanabe\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/osrg\/gobgp\/packet\"\n)\n\nconst rtrProtocolVersion uint8 = 0\n\ntype rtrConn struct {\n\tconn *net.TCPConn\n\tsessionId uint16\n\tremoteAddr net.Addr\n}\n\ntype rtrServer struct {\n\tconnCh chan *rtrConn\n\tlistenPort int\n}\n\nfunc newRTRServer(port int) *rtrServer {\n\ts := &rtrServer{\n\t\tconnCh: make(chan *rtrConn, 1),\n\t\tlistenPort: port,\n\t}\n\treturn s\n}\n\nfunc (s *rtrServer) run() {\n\tservice := \":\" + strconv.Itoa(s.listenPort)\n\taddr, _ := net.ResolveTCPAddr(\"tcp\", service)\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tcheckError(err)\n\n\tfor i := 0; ; {\n\t\tconn, err := l.AcceptTCP()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ti++\n\t\tc := &rtrConn{\n\t\t\tconn: conn,\n\t\t\tsessionId: uint16(i),\n\t\t\tremoteAddr: conn.RemoteAddr(),\n\t\t}\n\t\ts.connCh <- c\n\t}\n}\n\nfunc (rtr *rtrConn) sendPDU(msg bgp.RTRMessage) error {\n\tpdu, _ := msg.Serialize()\n\t_, err := rtr.conn.Write(pdu)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (rtr *rtrConn) cacheResponse(currentSN uint32, lists FakeROATable) error {\n\tif err := rtr.sendPDU(bgp.NewRTRCacheResponse(rtr.sessionId)); err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"Sent Cache Response PDU to %v (ID: %v)\", rtr.remoteAddr, rtr.sessionId)\n\n\tvar counter uint32\n\tfor _, rf := range []bgp.RouteFamily{bgp.RF_IPv4_UC, bgp.RF_IPv6_UC} {\n\t\tfor _, flag := range []uint8{bgp.ANNOUNCEMENT, bgp.WITHDRAWAL} {\n\t\t\tcounter = 0\n\t\t\tfor _, v := range lists[rf][flag] {\n\t\t\t\tif err := rtr.sendPDU(bgp.NewRTRIPPrefix(v.Prefix, v.PrefixLen, v.MaxLen, v.AS, flag)); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcounter++\n\t\t\t\tlog.Debugf(\"Sent %s Prefix PDU to %v (Prefix: %v\/%v, Maxlen: %v, AS: %v, flags: %v)\", RFToIPVer(rf), rtr.remoteAddr, v.Prefix, v.PrefixLen, v.MaxLen, v.AS, flag)\n\t\t\t}\n\t\t\tif !commandOpts.Debug && counter != 0 {\n\t\t\t\tlog.Infof(\"Sent %s Prefix PDU(s) to %v (%d ROA(s), flags: %v)\", RFToIPVer(rf), rtr.remoteAddr, counter, flag)\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := rtr.sendPDU(bgp.NewRTREndOfData(rtr.sessionId, currentSN)); err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"Sent End of Data PDU to %v (ID: %v, SN: %v)\", rtr.remoteAddr, rtr.sessionId, currentSN)\n\n\treturn nil\n}\n\nfunc (rtr *rtrConn) noIncrementalUpdateAvailable() error {\n\tif err := rtr.sendPDU(bgp.NewRTRCacheReset()); err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"Sent Cache Reset PDU to %v\", rtr.remoteAddr)\n\n\treturn nil\n}\n\nfunc (rtr *rtrConn) cacheHasNoDataAvailable() error {\n\tif err := rtr.sendPDU(bgp.NewRTRErrorReport(bgp.NO_DATA_AVAILABLE, nil, nil)); err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"Sent Error Report PDU to %v (ID: %v, ErrorCode: %v)\", rtr.remoteAddr, rtr.sessionId, bgp.NO_DATA_AVAILABLE)\n\n\treturn nil\n}\n\nfunc RFToIPVer(rf bgp.RouteFamily) string {\n\treturn strings.Split(rf.String(), \"_\")[1]\n}\n\ntype errMsg struct {\n\tcode uint16\n\tdata []byte\n}\n\ntype resourceResponse struct {\n\tsn uint32\n\tlist FakeROATable\n}\n\nfunc handleRTR(rtr *rtrConn, mgr *ResourceManager) {\n\tbcastReceiver := mgr.serialNotify.Join()\n\tscanner := bufio.NewScanner(bufio.NewReader(rtr.conn))\n\tscanner.Split(bgp.SplitRTR)\n\n\tmsgCh := make(chan bgp.RTRMessage, 1)\n\terrCh := make(chan *errMsg, 1)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tlog.Infof(\"Connection to %v was closed. (ID: %v)\", rtr.remoteAddr, rtr.sessionId)\n\t\t\trtr.conn.Close()\n\t\t}()\n\n\t\tfor scanner.Scan() {\n\t\t\tbuf := scanner.Bytes()\n\t\t\tif buf[0] != rtrProtocolVersion {\n\t\t\t\terrCh <- &errMsg{code: bgp.UNSUPPORTED_PROTOCOL_VERSION, data: buf}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tm, err := bgp.ParseRTR(buf)\n\t\t\tif err != nil {\n\t\t\t\terrCh <- &errMsg{code: bgp.INVALID_REQUEST, data: buf}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmsgCh <- m\n\t\t}\n\t}()\n\nLOOP:\n\tfor {\n\t\tselect {\n\t\tcase <-bcastReceiver.In:\n\t\t\tcurrentSN := mgr.CurrentSerial()\n\t\t\tif err := rtr.sendPDU(bgp.NewRTRSerialNotify(rtr.sessionId, currentSN)); err != nil {\n\t\t\t\tbreak LOOP\n\t\t\t}\n\t\t\tlog.Infof(\"Sent Serial Notify PDU to %v (ID: %v, SN: %v)\", rtr.remoteAddr, rtr.sessionId, currentSN)\n\t\tcase msg := <-errCh:\n\t\t\trtr.sendPDU(bgp.NewRTRErrorReport(msg.code, msg.data, nil))\n\t\t\tlog.Infof(\"Sent Error Report PDU to %v (ID: %v, ErrorCode: %v)\", rtr.remoteAddr, rtr.sessionId, msg.code)\n\t\t\treturn\n\t\tcase m := <-msgCh:\n\t\t\tswitch msg := m.(type) {\n\t\t\tcase *bgp.RTRSerialQuery:\n\t\t\t\tpeerSN := msg.SerialNumber\n\t\t\t\tlog.Infof(\"Received Serial Query PDU from %v (ID: %v, SN: %d)\", rtr.remoteAddr, msg.SessionID, peerSN)\n\n\t\t\t\ttimeoutCh := make(chan bool, 1)\n\t\t\t\tresourceResponseCh := make(chan *resourceResponse, 1)\n\n\t\t\t\tgo func(tCh chan bool) {\n\t\t\t\t\ttime.Sleep(10 * time.Second)\n\t\t\t\t\ttCh <- true\n\t\t\t\t}(timeoutCh)\n\n\t\t\t\tgo func(rrCh chan *resourceResponse, peerSN uint32) {\n\t\t\t\t\ttrans := mgr.BeginTransaction()\n\t\t\t\t\tdefer trans.EndTransaction()\n\t\t\t\t\tif trans.HasKey(peerSN) {\n\t\t\t\t\t\trrCh <- &resourceResponse{\n\t\t\t\t\t\t\tsn: trans.CurrentSerial(),\n\t\t\t\t\t\t\tlist: trans.DeltaList(peerSN),\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\trrCh <- nil\n\t\t\t\t\t}\n\t\t\t\t}(resourceResponseCh, peerSN)\n\n\t\t\t\tselect {\n\t\t\t\tcase rr := <-resourceResponseCh:\n\t\t\t\t\tif rr != nil {\n\t\t\t\t\t\tif err := rtr.cacheResponse(rr.sn, rr.list); err == nil {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif err := rtr.noIncrementalUpdateAvailable(); err == nil {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase <-timeoutCh:\n\t\t\t\t\tif err := rtr.cacheHasNoDataAvailable(); err == nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak LOOP\n\t\t\tcase *bgp.RTRResetQuery:\n\t\t\t\tlog.Infof(\"Received Reset Query PDU from %v\", rtr.remoteAddr)\n\n\t\t\t\ttimeoutCh := make(chan bool, 1)\n\t\t\t\tresourceResponseCh := make(chan *resourceResponse, 1)\n\n\t\t\t\tgo func(tCh chan bool) {\n\t\t\t\t\ttime.Sleep(10 * time.Second)\n\t\t\t\t\ttCh <- true\n\t\t\t\t}(timeoutCh)\n\n\t\t\t\tgo func(rrCh chan *resourceResponse) {\n\t\t\t\t\ttrans := mgr.BeginTransaction()\n\t\t\t\t\tdefer trans.EndTransaction()\n\t\t\t\t\trrCh <- &resourceResponse{\n\t\t\t\t\t\tsn: trans.CurrentSerial(),\n\t\t\t\t\t\tlist: trans.CurrentList(),\n\t\t\t\t\t}\n\t\t\t\t}(resourceResponseCh)\n\n\t\t\t\tselect {\n\t\t\t\tcase rr := <-resourceResponseCh:\n\t\t\t\t\tif err := rtr.cacheResponse(rr.sn, rr.list); err == nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\tcase <-timeoutCh:\n\t\t\t\t\tif err := rtr.cacheHasNoDataAvailable(); err == nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tbreak LOOP\n\t\t\tcase *bgp.RTRErrorReport:\n\t\t\t\tlog.Warnf(\"Received Error Report PDU from %v (%#v)\", rtr.remoteAddr, msg)\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tpdu, _ := msg.Serialize()\n\t\t\t\tlog.Warnf(\"Received unsupported PDU (type %d) from %v (%#v)\", pdu[1], rtr.remoteAddr, msg)\n\t\t\t\trtr.sendPDU(bgp.NewRTRErrorReport(bgp.UNSUPPORTED_PDU_TYPE, pdu, nil))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\trtr.sendPDU(bgp.NewRTRErrorReport(bgp.INTERNAL_ERROR, nil, nil))\n\treturn\n}\n<commit_msg>Use length of slice instead of counter to count prefixes<commit_after>\/\/ Copyright (C) 2015 Eiichiro Watanabe\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/osrg\/gobgp\/packet\"\n)\n\nconst rtrProtocolVersion uint8 = 0\n\ntype rtrConn struct {\n\tconn *net.TCPConn\n\tsessionId uint16\n\tremoteAddr net.Addr\n}\n\ntype rtrServer struct {\n\tconnCh chan *rtrConn\n\tlistenPort int\n}\n\nfunc newRTRServer(port int) *rtrServer {\n\ts := &rtrServer{\n\t\tconnCh: make(chan *rtrConn, 1),\n\t\tlistenPort: port,\n\t}\n\treturn s\n}\n\nfunc (s *rtrServer) run() {\n\tservice := \":\" + strconv.Itoa(s.listenPort)\n\taddr, _ := net.ResolveTCPAddr(\"tcp\", service)\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tcheckError(err)\n\n\tfor i := 0; ; {\n\t\tconn, err := l.AcceptTCP()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ti++\n\t\tc := &rtrConn{\n\t\t\tconn: conn,\n\t\t\tsessionId: uint16(i),\n\t\t\tremoteAddr: conn.RemoteAddr(),\n\t\t}\n\t\ts.connCh <- c\n\t}\n}\n\nfunc (rtr *rtrConn) sendPDU(msg bgp.RTRMessage) error {\n\tpdu, _ := msg.Serialize()\n\t_, err := rtr.conn.Write(pdu)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (rtr *rtrConn) cacheResponse(currentSN uint32, lists FakeROATable) error {\n\tif err := rtr.sendPDU(bgp.NewRTRCacheResponse(rtr.sessionId)); err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"Sent Cache Response PDU to %v (ID: %v)\", rtr.remoteAddr, rtr.sessionId)\n\n\tfor _, rf := range []bgp.RouteFamily{bgp.RF_IPv4_UC, bgp.RF_IPv6_UC} {\n\t\tfor _, flag := range []uint8{bgp.ANNOUNCEMENT, bgp.WITHDRAWAL} {\n\t\t\tfor _, v := range lists[rf][flag] {\n\t\t\t\tif err := rtr.sendPDU(bgp.NewRTRIPPrefix(v.Prefix, v.PrefixLen, v.MaxLen, v.AS, flag)); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlog.Debugf(\"Sent %s Prefix PDU to %v (Prefix: %v\/%v, Maxlen: %v, AS: %v, flags: %v)\", RFToIPVer(rf), rtr.remoteAddr, v.Prefix, v.PrefixLen, v.MaxLen, v.AS, flag)\n\t\t\t}\n\t\t\tprefixes := len(lists[rf][flag])\n\t\t\tif !commandOpts.Debug && prefixes != 0 {\n\t\t\t\tlog.Infof(\"Sent %s Prefix PDU(s) to %v (%d ROA(s), flags: %v)\", RFToIPVer(rf), rtr.remoteAddr, prefixes, flag)\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := rtr.sendPDU(bgp.NewRTREndOfData(rtr.sessionId, currentSN)); err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"Sent End of Data PDU to %v (ID: %v, SN: %v)\", rtr.remoteAddr, rtr.sessionId, currentSN)\n\n\treturn nil\n}\n\nfunc (rtr *rtrConn) noIncrementalUpdateAvailable() error {\n\tif err := rtr.sendPDU(bgp.NewRTRCacheReset()); err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"Sent Cache Reset PDU to %v\", rtr.remoteAddr)\n\n\treturn nil\n}\n\nfunc (rtr *rtrConn) cacheHasNoDataAvailable() error {\n\tif err := rtr.sendPDU(bgp.NewRTRErrorReport(bgp.NO_DATA_AVAILABLE, nil, nil)); err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"Sent Error Report PDU to %v (ID: %v, ErrorCode: %v)\", rtr.remoteAddr, rtr.sessionId, bgp.NO_DATA_AVAILABLE)\n\n\treturn nil\n}\n\nfunc RFToIPVer(rf bgp.RouteFamily) string {\n\treturn strings.Split(rf.String(), \"_\")[1]\n}\n\ntype errMsg struct {\n\tcode uint16\n\tdata []byte\n}\n\ntype resourceResponse struct {\n\tsn uint32\n\tlist FakeROATable\n}\n\nfunc handleRTR(rtr *rtrConn, mgr *ResourceManager) {\n\tbcastReceiver := mgr.serialNotify.Join()\n\tscanner := bufio.NewScanner(bufio.NewReader(rtr.conn))\n\tscanner.Split(bgp.SplitRTR)\n\n\tmsgCh := make(chan bgp.RTRMessage, 1)\n\terrCh := make(chan *errMsg, 1)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tlog.Infof(\"Connection to %v was closed. (ID: %v)\", rtr.remoteAddr, rtr.sessionId)\n\t\t\trtr.conn.Close()\n\t\t}()\n\n\t\tfor scanner.Scan() {\n\t\t\tbuf := scanner.Bytes()\n\t\t\tif buf[0] != rtrProtocolVersion {\n\t\t\t\terrCh <- &errMsg{code: bgp.UNSUPPORTED_PROTOCOL_VERSION, data: buf}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tm, err := bgp.ParseRTR(buf)\n\t\t\tif err != nil {\n\t\t\t\terrCh <- &errMsg{code: bgp.INVALID_REQUEST, data: buf}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmsgCh <- m\n\t\t}\n\t}()\n\nLOOP:\n\tfor {\n\t\tselect {\n\t\tcase <-bcastReceiver.In:\n\t\t\tcurrentSN := mgr.CurrentSerial()\n\t\t\tif err := rtr.sendPDU(bgp.NewRTRSerialNotify(rtr.sessionId, currentSN)); err != nil {\n\t\t\t\tbreak LOOP\n\t\t\t}\n\t\t\tlog.Infof(\"Sent Serial Notify PDU to %v (ID: %v, SN: %v)\", rtr.remoteAddr, rtr.sessionId, currentSN)\n\t\tcase msg := <-errCh:\n\t\t\trtr.sendPDU(bgp.NewRTRErrorReport(msg.code, msg.data, nil))\n\t\t\tlog.Infof(\"Sent Error Report PDU to %v (ID: %v, ErrorCode: %v)\", rtr.remoteAddr, rtr.sessionId, msg.code)\n\t\t\treturn\n\t\tcase m := <-msgCh:\n\t\t\tswitch msg := m.(type) {\n\t\t\tcase *bgp.RTRSerialQuery:\n\t\t\t\tpeerSN := msg.SerialNumber\n\t\t\t\tlog.Infof(\"Received Serial Query PDU from %v (ID: %v, SN: %d)\", rtr.remoteAddr, msg.SessionID, peerSN)\n\n\t\t\t\ttimeoutCh := make(chan bool, 1)\n\t\t\t\tresourceResponseCh := make(chan *resourceResponse, 1)\n\n\t\t\t\tgo func(tCh chan bool) {\n\t\t\t\t\ttime.Sleep(10 * time.Second)\n\t\t\t\t\ttCh <- true\n\t\t\t\t}(timeoutCh)\n\n\t\t\t\tgo func(rrCh chan *resourceResponse, peerSN uint32) {\n\t\t\t\t\ttrans := mgr.BeginTransaction()\n\t\t\t\t\tdefer trans.EndTransaction()\n\t\t\t\t\tif trans.HasKey(peerSN) {\n\t\t\t\t\t\trrCh <- &resourceResponse{\n\t\t\t\t\t\t\tsn: trans.CurrentSerial(),\n\t\t\t\t\t\t\tlist: trans.DeltaList(peerSN),\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\trrCh <- nil\n\t\t\t\t\t}\n\t\t\t\t}(resourceResponseCh, peerSN)\n\n\t\t\t\tselect {\n\t\t\t\tcase rr := <-resourceResponseCh:\n\t\t\t\t\tif rr != nil {\n\t\t\t\t\t\tif err := rtr.cacheResponse(rr.sn, rr.list); err == nil {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif err := rtr.noIncrementalUpdateAvailable(); err == nil {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase <-timeoutCh:\n\t\t\t\t\tif err := rtr.cacheHasNoDataAvailable(); err == nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak LOOP\n\t\t\tcase *bgp.RTRResetQuery:\n\t\t\t\tlog.Infof(\"Received Reset Query PDU from %v\", rtr.remoteAddr)\n\n\t\t\t\ttimeoutCh := make(chan bool, 1)\n\t\t\t\tresourceResponseCh := make(chan *resourceResponse, 1)\n\n\t\t\t\tgo func(tCh chan bool) {\n\t\t\t\t\ttime.Sleep(10 * time.Second)\n\t\t\t\t\ttCh <- true\n\t\t\t\t}(timeoutCh)\n\n\t\t\t\tgo func(rrCh chan *resourceResponse) {\n\t\t\t\t\ttrans := mgr.BeginTransaction()\n\t\t\t\t\tdefer trans.EndTransaction()\n\t\t\t\t\trrCh <- &resourceResponse{\n\t\t\t\t\t\tsn: trans.CurrentSerial(),\n\t\t\t\t\t\tlist: trans.CurrentList(),\n\t\t\t\t\t}\n\t\t\t\t}(resourceResponseCh)\n\n\t\t\t\tselect {\n\t\t\t\tcase rr := <-resourceResponseCh:\n\t\t\t\t\tif err := rtr.cacheResponse(rr.sn, rr.list); err == nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\tcase <-timeoutCh:\n\t\t\t\t\tif err := rtr.cacheHasNoDataAvailable(); err == nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tbreak LOOP\n\t\t\tcase *bgp.RTRErrorReport:\n\t\t\t\tlog.Warnf(\"Received Error Report PDU from %v (%#v)\", rtr.remoteAddr, msg)\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tpdu, _ := msg.Serialize()\n\t\t\t\tlog.Warnf(\"Received unsupported PDU (type %d) from %v (%#v)\", pdu[1], rtr.remoteAddr, msg)\n\t\t\t\trtr.sendPDU(bgp.NewRTRErrorReport(bgp.UNSUPPORTED_PDU_TYPE, pdu, nil))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\trtr.sendPDU(bgp.NewRTRErrorReport(bgp.INTERNAL_ERROR, nil, nil))\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/remind101\/emp\/Godeps\/_workspace\/src\/github.com\/bgentry\/heroku-go\"\n\t\"github.com\/remind101\/emp\/Godeps\/_workspace\/src\/github.com\/docker\/docker\/pkg\/term\"\n)\n\nvar (\n\tdetachedRun bool\n\tdynoSize string\n)\n\nvar cmdRun = &Command{\n\tRun: runRun,\n\tUsage: \"run [-s <size>] [-d] <command> [<argument>...]\",\n\tNeedsApp: true,\n\tCategory: \"dyno\",\n\tShort: \"run a process in a dyno\",\n\tLong: `\nRun a process on Heroku. Flags such as` + \" `-a` \" + `may be parsed out of\nthe command unless the command is quoted or provided after a\ndouble-dash (--).\n\nOptions:\n\n -s <size> set the size for this dyno (e.g. 2X)\n -d run in detached mode instead of attached to terminal\n\nExamples:\n\n $ emp run echo \"hello\"\n Running ` + \"`echo \\\"hello\\\"`\" + ` on myapp as run.1234:\n \"hello\"\n\n $ emp run console\n Running ` + \"`console`\" + ` on myapp as run.5678:\n Loading production environment (Rails 3.2.14)\n irb(main):001:0> ...\n\n $ emp run -d -s 2X bin\/my_worker\n Ran ` + \"`bin\/my_worker`\" + ` on myapp as run.4321, detached.\n\n $ emp run -a myapp -- ls -a \/\n Running ` + \"`ls -a bin \/`\" + ` on myapp as run.8650:\n \/:\n . .. app bin dev etc home lib lib64 lost+found proc sbin tmp usr var\n`,\n}\n\nfunc init() {\n\tcmdRun.Flag.BoolVarP(&detachedRun, \"detached\", \"d\", false, \"detached\")\n\tcmdRun.Flag.StringVarP(&dynoSize, \"size\", \"s\", \"\", \"dyno size\")\n}\n\nfunc runRun(cmd *Command, args []string) {\n\tif len(args) == 0 {\n\t\tcmd.PrintUsage()\n\t\tos.Exit(2)\n\t}\n\tappname := mustApp()\n\n\tw, err := term.GetWinsize(inFd)\n\tif err != nil {\n\t\t\/\/ If syscall.TIOCGWINSZ is not supported by the device, we're\n\t\t\/\/ probably trying to run tests. Set w to some sensible default.\n\t\tif err.Error() == \"operation not supported by device\" {\n\t\t\tw = &term.Winsize{\n\t\t\t\tHeight: 20,\n\t\t\t\tWidth: 80,\n\t\t\t}\n\t\t} else {\n\t\t\tprintFatal(err.Error())\n\t\t}\n\t}\n\n\tattached := !detachedRun\n\topts := heroku.DynoCreateOpts{Attach: &attached}\n\tif attached {\n\t\tenv := map[string]string{\n\t\t\t\"COLUMNS\": strconv.Itoa(int(w.Width)),\n\t\t\t\"LINES\": strconv.Itoa(int(w.Height)),\n\t\t\t\"TERM\": os.Getenv(\"TERM\"),\n\t\t}\n\t\topts.Env = &env\n\t}\n\tif dynoSize != \"\" {\n\t\tif !strings.HasSuffix(dynoSize, \"X\") {\n\t\t\tcmd.PrintUsage()\n\t\t\tos.Exit(2)\n\t\t}\n\t\topts.Size = &dynoSize\n\t}\n\n\tcommand := strings.Join(args, \" \")\n\tif detachedRun {\n\t\tdyno, err := client.DynoCreate(appname, command, &opts)\n\t\tmust(err)\n\n\t\tlog.Printf(\"Ran `%s` on %s as %s, detached.\", dyno.Command, appname, dyno.Name)\n\t\treturn\n\t}\n\n\tparams := struct {\n\t\tCommand string `json:\"command\"`\n\t\tAttach *bool `json:\"attach,omitempty\"`\n\t\tEnv *map[string]string `json:\"env,omitempty\"`\n\t\tSize *string `json:\"size,omitempty\"`\n\t}{\n\t\tCommand: command,\n\t\tAttach: opts.Attach,\n\t\tEnv: opts.Env,\n\t\tSize: opts.Size,\n\t}\n\treq, err := client.NewRequest(\"POST\", \"\/apps\/\"+appname+\"\/dynos\", params)\n\tmust(err)\n\n\tu, err := url.Parse(apiURL)\n\tmust(err)\n\n\tprotocol := u.Scheme\n\taddress := u.Path\n\tif protocol != \"unix\" {\n\t\tprotocol = \"tcp\"\n\t\taddress = u.Host + \":80\"\n\t}\n\n\tif u.Scheme == \"https\" {\n\t\taddress = address + \":443\"\n\t}\n\n\tvar dial net.Conn\n\tif u.Scheme == \"https\" {\n\t\tdial, err = tlsDial(protocol, address, &tls.Config{})\n\t\tif err != nil {\n\t\t\tprintFatal(err.Error())\n\t\t}\n\t} else {\n\t\tdial, err = net.Dial(protocol, address)\n\t\tif err != nil {\n\t\t\tprintFatal(err.Error())\n\t\t}\n\t}\n\n\tclientconn := httputil.NewClientConn(dial, nil)\n\tdefer clientconn.Close()\n\t_, err = clientconn.Do(req)\n\tif err != nil && err != httputil.ErrPersistEOF {\n\t\tprintFatal(err.Error())\n\t}\n\trwc, br := clientconn.Hijack()\n\tdefer rwc.Close()\n\n\tif isTerminalIn && isTerminalOut {\n\t\tstate, err := term.SetRawTerminal(inFd)\n\t\tif err != nil {\n\t\t\tprintFatal(err.Error())\n\t\t}\n\t\tdefer term.RestoreTerminal(inFd, state)\n\t}\n\n\terrChanOut := make(chan error, 1)\n\terrChanIn := make(chan error, 1)\n\texit := make(chan bool)\n\tgo func() {\n\t\tdefer close(exit)\n\t\tdefer close(errChanOut)\n\t\tvar err error\n\t\t_, err = io.Copy(os.Stdout, br)\n\t\terrChanOut <- err\n\t}()\n\tgo func() {\n\t\t_, err := io.Copy(rwc, os.Stdin)\n\t\terrChanIn <- err\n\t\trwc.(interface {\n\t\t\tCloseWrite() error\n\t\t}).CloseWrite()\n\t}()\n\t<-exit\n\tselect {\n\tcase err = <-errChanIn:\n\t\tmust(err)\n\tcase err = <-errChanOut:\n\t\tmust(err)\n\t}\n}\n<commit_msg>Fix double port.<commit_after>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/remind101\/emp\/Godeps\/_workspace\/src\/github.com\/bgentry\/heroku-go\"\n\t\"github.com\/remind101\/emp\/Godeps\/_workspace\/src\/github.com\/docker\/docker\/pkg\/term\"\n)\n\nvar (\n\tdetachedRun bool\n\tdynoSize string\n)\n\nvar cmdRun = &Command{\n\tRun: runRun,\n\tUsage: \"run [-s <size>] [-d] <command> [<argument>...]\",\n\tNeedsApp: true,\n\tCategory: \"dyno\",\n\tShort: \"run a process in a dyno\",\n\tLong: `\nRun a process on Heroku. Flags such as` + \" `-a` \" + `may be parsed out of\nthe command unless the command is quoted or provided after a\ndouble-dash (--).\n\nOptions:\n\n -s <size> set the size for this dyno (e.g. 2X)\n -d run in detached mode instead of attached to terminal\n\nExamples:\n\n $ emp run echo \"hello\"\n Running ` + \"`echo \\\"hello\\\"`\" + ` on myapp as run.1234:\n \"hello\"\n\n $ emp run console\n Running ` + \"`console`\" + ` on myapp as run.5678:\n Loading production environment (Rails 3.2.14)\n irb(main):001:0> ...\n\n $ emp run -d -s 2X bin\/my_worker\n Ran ` + \"`bin\/my_worker`\" + ` on myapp as run.4321, detached.\n\n $ emp run -a myapp -- ls -a \/\n Running ` + \"`ls -a bin \/`\" + ` on myapp as run.8650:\n \/:\n . .. app bin dev etc home lib lib64 lost+found proc sbin tmp usr var\n`,\n}\n\nfunc init() {\n\tcmdRun.Flag.BoolVarP(&detachedRun, \"detached\", \"d\", false, \"detached\")\n\tcmdRun.Flag.StringVarP(&dynoSize, \"size\", \"s\", \"\", \"dyno size\")\n}\n\nfunc runRun(cmd *Command, args []string) {\n\tif len(args) == 0 {\n\t\tcmd.PrintUsage()\n\t\tos.Exit(2)\n\t}\n\tappname := mustApp()\n\n\tw, err := term.GetWinsize(inFd)\n\tif err != nil {\n\t\t\/\/ If syscall.TIOCGWINSZ is not supported by the device, we're\n\t\t\/\/ probably trying to run tests. Set w to some sensible default.\n\t\tif err.Error() == \"operation not supported by device\" {\n\t\t\tw = &term.Winsize{\n\t\t\t\tHeight: 20,\n\t\t\t\tWidth: 80,\n\t\t\t}\n\t\t} else {\n\t\t\tprintFatal(err.Error())\n\t\t}\n\t}\n\n\tattached := !detachedRun\n\topts := heroku.DynoCreateOpts{Attach: &attached}\n\tif attached {\n\t\tenv := map[string]string{\n\t\t\t\"COLUMNS\": strconv.Itoa(int(w.Width)),\n\t\t\t\"LINES\": strconv.Itoa(int(w.Height)),\n\t\t\t\"TERM\": os.Getenv(\"TERM\"),\n\t\t}\n\t\topts.Env = &env\n\t}\n\tif dynoSize != \"\" {\n\t\tif !strings.HasSuffix(dynoSize, \"X\") {\n\t\t\tcmd.PrintUsage()\n\t\t\tos.Exit(2)\n\t\t}\n\t\topts.Size = &dynoSize\n\t}\n\n\tcommand := strings.Join(args, \" \")\n\tif detachedRun {\n\t\tdyno, err := client.DynoCreate(appname, command, &opts)\n\t\tmust(err)\n\n\t\tlog.Printf(\"Ran `%s` on %s as %s, detached.\", dyno.Command, appname, dyno.Name)\n\t\treturn\n\t}\n\n\tparams := struct {\n\t\tCommand string `json:\"command\"`\n\t\tAttach *bool `json:\"attach,omitempty\"`\n\t\tEnv *map[string]string `json:\"env,omitempty\"`\n\t\tSize *string `json:\"size,omitempty\"`\n\t}{\n\t\tCommand: command,\n\t\tAttach: opts.Attach,\n\t\tEnv: opts.Env,\n\t\tSize: opts.Size,\n\t}\n\treq, err := client.NewRequest(\"POST\", \"\/apps\/\"+appname+\"\/dynos\", params)\n\tmust(err)\n\n\tu, err := url.Parse(apiURL)\n\tmust(err)\n\n\tprotocol := u.Scheme\n\taddress := u.Path\n\tif protocol != \"unix\" {\n\t\tprotocol = \"tcp\"\n\t\taddress = u.Host + \":80\"\n\t}\n\n\tif u.Scheme == \"https\" {\n\t\taddress = u.Host + \":443\"\n\t}\n\n\tvar dial net.Conn\n\tif u.Scheme == \"https\" {\n\t\tdial, err = tlsDial(protocol, address, &tls.Config{})\n\t\tif err != nil {\n\t\t\tprintFatal(err.Error())\n\t\t}\n\t} else {\n\t\tdial, err = net.Dial(protocol, address)\n\t\tif err != nil {\n\t\t\tprintFatal(err.Error())\n\t\t}\n\t}\n\n\tclientconn := httputil.NewClientConn(dial, nil)\n\tdefer clientconn.Close()\n\t_, err = clientconn.Do(req)\n\tif err != nil && err != httputil.ErrPersistEOF {\n\t\tprintFatal(err.Error())\n\t}\n\trwc, br := clientconn.Hijack()\n\tdefer rwc.Close()\n\n\tif isTerminalIn && isTerminalOut {\n\t\tstate, err := term.SetRawTerminal(inFd)\n\t\tif err != nil {\n\t\t\tprintFatal(err.Error())\n\t\t}\n\t\tdefer term.RestoreTerminal(inFd, state)\n\t}\n\n\terrChanOut := make(chan error, 1)\n\terrChanIn := make(chan error, 1)\n\texit := make(chan bool)\n\tgo func() {\n\t\tdefer close(exit)\n\t\tdefer close(errChanOut)\n\t\tvar err error\n\t\t_, err = io.Copy(os.Stdout, br)\n\t\terrChanOut <- err\n\t}()\n\tgo func() {\n\t\t_, err := io.Copy(rwc, os.Stdin)\n\t\terrChanIn <- err\n\t\trwc.(interface {\n\t\t\tCloseWrite() error\n\t\t}).CloseWrite()\n\t}()\n\t<-exit\n\tselect {\n\tcase err = <-errChanIn:\n\t\tmust(err)\n\tcase err = <-errChanOut:\n\t\tmust(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Matthew Baird\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage elastigo\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\thostpool \"github.com\/bitly\/go-hostpool\"\n)\n\nconst (\n\tVersion = \"0.0.2\"\n\tDefaultProtocol = \"http\"\n\tDefaultDomain = \"localhost\"\n\tDefaultPort = \"9200\"\n\t\/\/ A decay duration of zero results in the default behaviour\n\tDefaultDecayDuration = 0\n)\n\ntype Conn struct {\n\t\/\/ Maintain these for backwards compatibility\n\tProtocol string\n\tDomain string\n\tClusterDomains []string\n\tPort string\n\tUsername string\n\tPassword string\n\tHosts []string\n\tClient *http.Client\n\thp hostpool.HostPool\n\tonce sync.Once\n\n\t\/\/ To compute the weighting scores, we perform a weighted average of recent response times,\n\t\/\/ over the course of `DecayDuration`. DecayDuration may be set to 0 to use the default\n\t\/\/ value of 5 minutes. The EpsilonValueCalculator uses this to calculate a score\n\t\/\/ from the weighted average response time.\n\tDecayDuration time.Duration\n}\n\nfunc NewConn() *Conn {\n\treturn &Conn{\n\t\t\/\/ Maintain these for backwards compatibility\n\t\tProtocol: DefaultProtocol,\n\t\tDomain: DefaultDomain,\n\t\tClusterDomains: []string{DefaultDomain},\n\t\tPort: DefaultPort,\n\t\tDecayDuration: time.Duration(DefaultDecayDuration * time.Second),\n\t\tClient: http.DefaultClient,\n\t}\n}\n\nfunc (c *Conn) SetPort(port string) {\n\tc.Port = port\n}\n\nfunc (c *Conn) SetHosts(newhosts []string) {\n\n\t\/\/ Store the new host list\n\tc.Hosts = newhosts\n\n\t\/\/ Reinitialise the host pool Pretty naive as this will nuke the current\n\t\/\/ hostpool, and therefore reset any scoring\n\tc.initializeHostPool()\n}\n\n\/\/ Set up the host pool to be used\nfunc (c *Conn) initializeHostPool() {\n\n\t\/\/ If no hosts are set, fallback to defaults\n\tif len(c.Hosts) == 0 {\n\t\tc.Hosts = append(c.Hosts, fmt.Sprintf(\"%s:%s\", c.Domain, c.Port))\n\t}\n\n\t\/\/ Epsilon Greedy is an algorithm that allows HostPool not only to\n\t\/\/ track failure state, but also to learn about \"better\" options in\n\t\/\/ terms of speed, and to pick from available hosts based on how well\n\t\/\/ they perform. This gives a weighted request rate to better\n\t\/\/ performing hosts, while still distributing requests to all hosts\n\t\/\/ (proportionate to their performance). The interface is the same as\n\t\/\/ the standard HostPool, but be sure to mark the HostResponse\n\t\/\/ immediately after executing the request to the host, as that will\n\t\/\/ stop the implicitly running request timer.\n\t\/\/\n\t\/\/ A good overview of Epsilon Greedy is here http:\/\/stevehanov.ca\/blog\/index.php?id=132\n\tif c.hp != nil {\n\t\tc.hp.Close()\n\t}\n\tc.hp = hostpool.NewEpsilonGreedy(\n\t\tc.Hosts, c.DecayDuration, &hostpool.LinearEpsilonValueCalculator{})\n}\n\nfunc (c *Conn) Close() {\n\tc.hp.Close()\n}\n\nfunc (c *Conn) NewRequest(method, path, query string) (*Request, error) {\n\t\/\/ Setup the hostpool on our first run\n\tc.once.Do(c.initializeHostPool)\n\n\t\/\/ Get a host from the host pool\n\thr := c.hp.Get()\n\n\t\/\/ Get the final host and port\n\thost, portNum := splitHostnamePartsFromHost(hr.Host(), c.Port)\n\n\t\/\/ Build request\n\tvar uri string\n\t\/\/ If query parameters are provided, the add them to the URL,\n\t\/\/ otherwise, leave them out\n\tif len(query) > 0 {\n\t\turi = fmt.Sprintf(\"%s:\/\/%s:%s%s?%s\", c.Protocol, host, portNum, path, query)\n\t} else {\n\t\turi = fmt.Sprintf(\"%s:\/\/%s:%s%s\", c.Protocol, host, portNum, path)\n\t}\n\treq, err := http.NewRequest(method, uri, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\treq.Header.Add(\"User-Agent\", \"elasticSearch\/\"+Version+\" (\"+runtime.GOOS+\"-\"+runtime.GOARCH+\")\")\n\n\tif c.Username != \"\" || c.Password != \"\" {\n\t\treq.SetBasicAuth(c.Username, c.Password)\n\t}\n\n\tnewRequest := &Request{\n\t\tRequest: req,\n\t\thostResponse: hr,\n\t\tclient: c.Client,\n\t}\n\treturn newRequest, nil\n}\n\n\/\/ Split apart the hostname on colon\n\/\/ Return the host and a default port if there is no separator\nfunc splitHostnamePartsFromHost(fullHost string, defaultPortNum string) (string, string) {\n\n\th := strings.Split(fullHost, \":\")\n\n\tif len(h) == 2 {\n\t\treturn h[0], h[1]\n\t}\n\n\treturn h[0], defaultPortNum\n}\n<commit_msg>Add content type header for requests<commit_after>\/\/ Copyright 2013 Matthew Baird\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage elastigo\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\thostpool \"github.com\/bitly\/go-hostpool\"\n)\n\nconst (\n\tVersion = \"0.0.2\"\n\tDefaultProtocol = \"http\"\n\tDefaultDomain = \"localhost\"\n\tDefaultPort = \"9200\"\n\t\/\/ A decay duration of zero results in the default behaviour\n\tDefaultDecayDuration = 0\n)\n\ntype Conn struct {\n\t\/\/ Maintain these for backwards compatibility\n\tProtocol string\n\tDomain string\n\tClusterDomains []string\n\tPort string\n\tUsername string\n\tPassword string\n\tHosts []string\n\tClient *http.Client\n\thp hostpool.HostPool\n\tonce sync.Once\n\n\t\/\/ To compute the weighting scores, we perform a weighted average of recent response times,\n\t\/\/ over the course of `DecayDuration`. DecayDuration may be set to 0 to use the default\n\t\/\/ value of 5 minutes. The EpsilonValueCalculator uses this to calculate a score\n\t\/\/ from the weighted average response time.\n\tDecayDuration time.Duration\n}\n\nfunc NewConn() *Conn {\n\treturn &Conn{\n\t\t\/\/ Maintain these for backwards compatibility\n\t\tProtocol: DefaultProtocol,\n\t\tDomain: DefaultDomain,\n\t\tClusterDomains: []string{DefaultDomain},\n\t\tPort: DefaultPort,\n\t\tDecayDuration: time.Duration(DefaultDecayDuration * time.Second),\n\t\tClient: http.DefaultClient,\n\t}\n}\n\nfunc (c *Conn) SetPort(port string) {\n\tc.Port = port\n}\n\nfunc (c *Conn) SetHosts(newhosts []string) {\n\n\t\/\/ Store the new host list\n\tc.Hosts = newhosts\n\n\t\/\/ Reinitialise the host pool Pretty naive as this will nuke the current\n\t\/\/ hostpool, and therefore reset any scoring\n\tc.initializeHostPool()\n}\n\n\/\/ Set up the host pool to be used\nfunc (c *Conn) initializeHostPool() {\n\n\t\/\/ If no hosts are set, fallback to defaults\n\tif len(c.Hosts) == 0 {\n\t\tc.Hosts = append(c.Hosts, fmt.Sprintf(\"%s:%s\", c.Domain, c.Port))\n\t}\n\n\t\/\/ Epsilon Greedy is an algorithm that allows HostPool not only to\n\t\/\/ track failure state, but also to learn about \"better\" options in\n\t\/\/ terms of speed, and to pick from available hosts based on how well\n\t\/\/ they perform. This gives a weighted request rate to better\n\t\/\/ performing hosts, while still distributing requests to all hosts\n\t\/\/ (proportionate to their performance). The interface is the same as\n\t\/\/ the standard HostPool, but be sure to mark the HostResponse\n\t\/\/ immediately after executing the request to the host, as that will\n\t\/\/ stop the implicitly running request timer.\n\t\/\/\n\t\/\/ A good overview of Epsilon Greedy is here http:\/\/stevehanov.ca\/blog\/index.php?id=132\n\tif c.hp != nil {\n\t\tc.hp.Close()\n\t}\n\tc.hp = hostpool.NewEpsilonGreedy(\n\t\tc.Hosts, c.DecayDuration, &hostpool.LinearEpsilonValueCalculator{})\n}\n\nfunc (c *Conn) Close() {\n\tc.hp.Close()\n}\n\nfunc (c *Conn) NewRequest(method, path, query string) (*Request, error) {\n\t\/\/ Setup the hostpool on our first run\n\tc.once.Do(c.initializeHostPool)\n\n\t\/\/ Get a host from the host pool\n\thr := c.hp.Get()\n\n\t\/\/ Get the final host and port\n\thost, portNum := splitHostnamePartsFromHost(hr.Host(), c.Port)\n\n\t\/\/ Build request\n\tvar uri string\n\t\/\/ If query parameters are provided, the add them to the URL,\n\t\/\/ otherwise, leave them out\n\tif len(query) > 0 {\n\t\turi = fmt.Sprintf(\"%s:\/\/%s:%s%s?%s\", c.Protocol, host, portNum, path, query)\n\t} else {\n\t\turi = fmt.Sprintf(\"%s:\/\/%s:%s%s\", c.Protocol, host, portNum, path)\n\t}\n\treq, err := http.NewRequest(method, uri, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\treq.Header.Add(\"User-Agent\", \"elasticSearch\/\"+Version+\" (\"+runtime.GOOS+\"-\"+runtime.GOARCH+\")\")\n\n\tif c.Username != \"\" || c.Password != \"\" {\n\t\treq.SetBasicAuth(c.Username, c.Password)\n\t}\n\n\tnewRequest := &Request{\n\t\tRequest: req,\n\t\thostResponse: hr,\n\t\tclient: c.Client,\n\t}\n\treturn newRequest, nil\n}\n\n\/\/ Split apart the hostname on colon\n\/\/ Return the host and a default port if there is no separator\nfunc splitHostnamePartsFromHost(fullHost string, defaultPortNum string) (string, string) {\n\n\th := strings.Split(fullHost, \":\")\n\n\tif len(h) == 2 {\n\t\treturn h[0], h[1]\n\t}\n\n\treturn h[0], defaultPortNum\n}\n<|endoftext|>"} {"text":"<commit_before>package sdl\n\n\/*\n#cgo LDFLAGS: -lSDL2\n#cgo CFLAGS: -O3\n#include <SDL2\/SDL.h>\n#include <stdlib.h>\n#include <string.h>\n\nint running = 1;\n\nUint8 keyboardPrev[SDL_NUM_SCANCODES];\nint numScancodes;\nconst Uint8 *keyboardCurr;\n\nSDL_GameController *controller = NULL;\nUint8 buttonsPrev[SDL_CONTROLLER_BUTTON_MAX];\nUint8 buttonsCurr[SDL_CONTROLLER_BUTTON_MAX];\n\nvoid\nhandleEvents(void) {\n\tmemcpy(keyboardPrev, keyboardCurr, numScancodes);\n\tmemcpy(buttonsPrev, buttonsCurr, SDL_CONTROLLER_BUTTON_MAX);\n\n\tif (controller != NULL && !SDL_GameControllerGetAttached(controller)) {\n\t\tSDL_GameControllerClose(controller);\n\t\tcontroller = NULL;\n\t}\n\n\tif (controller == NULL) {\n\t\tint i = 0;\n\t\tfor (i = 0; i < SDL_NumJoysticks(); i++) {\n\t\t\tif (SDL_IsGameController(i)) {\n\t\t\t\tcontroller = SDL_GameControllerOpen(i);\n\t\t\t\tif (controller != NULL) {\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tSDL_Event event;\n\twhile (SDL_PollEvent(&event)) {\n\t\tswitch (event.type) {\n\t\tcase SDL_QUIT:\n\t\t\trunning = 0;\n\t\t\tbreak;\n\t\t}\n\t}\n\n\tif (controller != NULL) {\n\t\tint i = 0;\n\t\tfor (i = 0; i < SDL_CONTROLLER_BUTTON_MAX; i++) {\n\t\t\tbuttonsCurr[i] = SDL_GameControllerGetButton(controller, i);\n\t\t}\n\t}\n}\n\nint\neventFilter(void *userData, SDL_Event *event) {\n\t(void)userData;\n\n\tswitch (event->type) {\n\tcase SDL_QUIT:\n\t\treturn 1;\n\t}\n\treturn 0;\n}\n\nint\ninit(Uint32 flags) {\n\tint err = SDL_Init(flags);\n\tif (err != 0) {\n\t\treturn err;\n\t}\n\tSDL_SetEventFilter(eventFilter, NULL);\n\tkeyboardCurr = SDL_GetKeyboardState(&numScancodes);\n\n\treturn 0;\n}\n\nvoid\nquit() {\n\tif (controller != NULL) {\n\t\tSDL_GameControllerClose(controller);\n\t}\n\tSDL_Quit();\n}\n\nSDL_Rect srcRect;\nSDL_Rect dstRect;\nSDL_Point center;\nint\nRenderCopyEx(SDL_Renderer *r, SDL_Texture *t, double angle, SDL_RendererFlip flip) {\n\treturn SDL_RenderCopyEx(r, t, &srcRect, &dstRect, angle, ¢er, flip);\n}\n\nSDL_Texture *\nCreateTexture(SDL_Renderer *r, int w, int h, Uint32 *pixels) {\n\tSDL_Texture *t = SDL_CreateTexture(r, SDL_PIXELFORMAT_RGBA8888,\n\t\tSDL_TEXTUREACCESS_STATIC, w, h);\n\tif (t == NULL) {\n\t\tgoto fail1;\n\t}\n\n\tint err = 0;\n\terr = SDL_SetTextureBlendMode(t, SDL_BLENDMODE_BLEND);\n\tif (err != 0) {\n\t\tgoto fail2;\n\t}\n\n\terr = SDL_UpdateTexture(t, NULL, pixels, w * sizeof(pixels[0]));\n\tif (err != 0) {\n\t\tgoto fail2;\n\t}\n\n\tif (0) {\nfail2:\n\t\tSDL_DestroyTexture(t);\nfail1:\n\t\tt = NULL;\n\t}\n\n\treturn t;\n}\n*\/\nimport \"C\"\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"unsafe\"\n)\n\nfunc init() {\n\truntime.LockOSThread()\n}\n\nvar mainThreadFunc = make(chan func())\n\nfunc mainThreadCall(f func()) {\n\tdone := make(chan bool, 1)\n\tmainThreadFunc <- func() {\n\t\tf()\n\t\tdone <- true\n\t}\n\t<-done\n}\n\nfunc Run(run func() error) error {\n\tdone := make(chan error, 1)\n\tgo func() {\n\t\tdone <- run()\n\t}()\n\n\tvar err error\nmainThreadCallLoop:\n\tfor {\n\t\tselect {\n\t\tcase f := <-mainThreadFunc:\n\t\t\tf()\n\t\tcase err = <-done:\n\t\t\tbreak mainThreadCallLoop\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc getError() error {\n\tvar err *C.char\n\tmainThreadCall(func() {\n\t\terr = C.SDL_GetError()\n\t})\n\treturn errors.New(C.GoString(err))\n}\n\nconst (\n\tInitTimer = C.SDL_INIT_TIMER\n\tInitAudio = C.SDL_INIT_AUDIO\n\tInitVideo = C.SDL_INIT_VIDEO\n\tInitJoystick = C.SDL_INIT_JOYSTICK\n\tInitHaptic = C.SDL_INIT_HAPTIC\n\tInitGameController = C.SDL_INIT_GAMECONTROLLER\n\tInitEvents = C.SDL_INIT_EVENTS\n\tInitEverything = C.SDL_INIT_EVERYTHING\n\tInitNoParachute = C.SDL_INIT_NOPARACHUTE\n)\n\nfunc Init(flags int) error {\n\tvar err C.int\n\tmainThreadCall(func() {\n\t\terr = C.init(C.Uint32(flags))\n\t})\n\tif err != 0 {\n\t\treturn getError()\n\t}\n\n\tinitKeyboardStateSlices()\n\tinitButtonsStateSlices()\n\n\treturn nil\n}\n\nfunc Quit() {\n\tmainThreadCall(func() {\n\t\tC.quit()\n\t})\n}\n\nvar Running = true\n\nfunc HandleEvents() {\n\tmainThreadCall(func() {\n\t\tC.handleEvents()\n\t})\n\n\tRunning = C.running == 1\n}\n\nvar keyboardPrev []uint8\nvar keyboardCurr []uint8\n\nfunc KeyHeld(scancode int) bool {\n\treturn keyboardCurr[scancode] == 1\n}\n\nfunc KeyPressed(scancode int) bool {\n\treturn (keyboardCurr[scancode] &^ keyboardPrev[scancode]) == 1\n}\n\nfunc KeyReleased(scancode int) bool {\n\treturn (keyboardPrev[scancode] &^ keyboardCurr[scancode]) == 1\n}\n\nfunc initKeyboardStateSlices() {\n\thdr := reflect.SliceHeader{\n\t\tData: uintptr(unsafe.Pointer(&C.keyboardPrev)),\n\t\tLen: C.SDL_NUM_SCANCODES,\n\t\tCap: C.SDL_NUM_SCANCODES,\n\t}\n\tkeyboardPrev = *(*[]uint8)(unsafe.Pointer(&hdr))\n\n\thdr = reflect.SliceHeader{\n\t\tData: uintptr(unsafe.Pointer(C.keyboardCurr)),\n\t\tLen: C.SDL_NUM_SCANCODES,\n\t\tCap: C.SDL_NUM_SCANCODES,\n\t}\n\tkeyboardCurr = *(*[]uint8)(unsafe.Pointer(&hdr))\n}\n\nvar buttonsPrev []uint8\nvar buttonsCurr []uint8\n\nconst (\n\tButton_A = C.SDL_CONTROLLER_BUTTON_A\n\tButton_B = C.SDL_CONTROLLER_BUTTON_B\n\tButton_X = C.SDL_CONTROLLER_BUTTON_X\n\tButton_Y = C.SDL_CONTROLLER_BUTTON_Y\n\tButton_BACK = C.SDL_CONTROLLER_BUTTON_BACK\n\tButton_GUIDE = C.SDL_CONTROLLER_BUTTON_GUIDE\n\tButton_START = C.SDL_CONTROLLER_BUTTON_START\n\tButton_LEFTSTICK = C.SDL_CONTROLLER_BUTTON_LEFTSTICK\n\tButton_RIGHTSTICK = C.SDL_CONTROLLER_BUTTON_RIGHTSTICK\n\tButton_LEFTSHOULDER = C.SDL_CONTROLLER_BUTTON_LEFTSHOULDER\n\tButton_RIGHTSHOULDER = C.SDL_CONTROLLER_BUTTON_RIGHTSHOULDER\n\tButton_UP = C.SDL_CONTROLLER_BUTTON_DPAD_UP\n\tButton_DOWN = C.SDL_CONTROLLER_BUTTON_DPAD_DOWN\n\tButton_LEFT = C.SDL_CONTROLLER_BUTTON_DPAD_LEFT\n\tButton_RIGHT = C.SDL_CONTROLLER_BUTTON_DPAD_RIGHT\n)\n\nfunc ButtonHeld(button int) bool {\n\treturn buttonsCurr[button] == 1\n}\n\nfunc ButtonPressed(button int) bool {\n\treturn (buttonsCurr[button] &^ buttonsPrev[button]) == 1\n}\n\nfunc ButtonReleased(button int) bool {\n\treturn (buttonsPrev[button] &^ buttonsCurr[button]) == 1\n}\n\nfunc initButtonsStateSlices() {\n\thdr := reflect.SliceHeader{\n\t\tData: uintptr(unsafe.Pointer(&C.buttonsPrev)),\n\t\tLen: C.SDL_CONTROLLER_BUTTON_MAX,\n\t\tCap: C.SDL_CONTROLLER_BUTTON_MAX,\n\t}\n\tbuttonsPrev = *(*[]uint8)(unsafe.Pointer(&hdr))\n\n\thdr = reflect.SliceHeader{\n\t\tData: uintptr(unsafe.Pointer(&C.buttonsCurr)),\n\t\tLen: C.SDL_CONTROLLER_BUTTON_MAX,\n\t\tCap: C.SDL_CONTROLLER_BUTTON_MAX,\n\t}\n\tbuttonsCurr = *(*[]uint8)(unsafe.Pointer(&hdr))\n}\n\ntype Window C.SDL_Window\n\nconst (\n\tWindowPosCentered = C.SDL_WINDOWPOS_CENTERED\n\tWindowPosUndefined = C.SDL_WINDOWPOS_UNDEFINED\n)\n\nconst (\n\tWindowFullscreen = C.SDL_WINDOW_FULLSCREEN\n\tWindowFullscreenDesktop = C.SDL_WINDOW_FULLSCREEN_DESKTOP\n\tWindowOpenGL = C.SDL_WINDOW_OPENGL\n\tWindowHidden = C.SDL_WINDOW_HIDDEN\n\tWindowBorderless = C.SDL_WINDOW_BORDERLESS\n\tWindowResizable = C.SDL_WINDOW_RESIZABLE\n\tWindowMinimized = C.SDL_WINDOW_MINIMIZED\n\tWindowMaximized = C.SDL_WINDOW_MAXIMIZED\n\tWindowInputGrabbed = C.SDL_WINDOW_INPUT_GRABBED\n\tWindowAllowHighDPI = C.SDL_WINDOW_ALLOW_HIGHDPI\n)\n\nfunc CreateWindow(title string, x, y, w, h int, flags int) (*Window, error) {\n\tt := C.CString(title)\n\tdefer C.free(unsafe.Pointer(t))\n\n\tvar win *C.SDL_Window\n\tmainThreadCall(func() {\n\t\twin = C.SDL_CreateWindow(t, C.int(x), C.int(y), C.int(w),\n\t\t\tC.int(h), C.Uint32(flags))\n\t})\n\tif win == nil {\n\t\treturn nil, getError()\n\t}\n\treturn (*Window)(win), nil\n}\n\nfunc (w *Window) Destroy() {\n\tmainThreadCall(func() {\n\t\tC.SDL_DestroyWindow((*C.SDL_Window)(w))\n\t})\n}\n\ntype Renderer C.SDL_Renderer\n\nconst (\n\tRendererSoftware = C.SDL_RENDERER_SOFTWARE\n\tRendererAccelerated = C.SDL_RENDERER_ACCELERATED\n\tRendererPresentVSync = C.SDL_RENDERER_PRESENTVSYNC\n\tRendererTargetTexture = C.SDL_RENDERER_TARGETTEXTURE\n)\n\nfunc CreateRenderer(w *Window, index int, flags int) (*Renderer, error) {\n\tvar r *C.SDL_Renderer\n\tmainThreadCall(func() {\n\t\tr = C.SDL_CreateRenderer((*C.SDL_Window)(w), C.int(index),\n\t\t\tC.Uint32(flags))\n\t})\n\tif r == nil {\n\t\treturn nil, getError()\n\t}\n\treturn (*Renderer)(r), nil\n}\n\nfunc (r *Renderer) Destroy() {\n\tmainThreadCall(func() {\n\t\tC.SDL_DestroyRenderer((*C.SDL_Renderer)(r))\n\t})\n}\n\nfunc (r *Renderer) SetLogicalSize(w, h int) error {\n\tvar err C.int\n\tmainThreadCall(func() {\n\t\terr = C.SDL_RenderSetLogicalSize((*C.SDL_Renderer)(r), C.int(w), C.int(h))\n\t})\n\tif err != 0 {\n\t\treturn getError()\n\t}\n\treturn nil\n}\n\nfunc (r *Renderer) Clear() error {\n\tvar err C.int\n\tmainThreadCall(func() {\n\t\terr = C.SDL_RenderClear((*C.SDL_Renderer)(r))\n\t})\n\tif err != 0 {\n\t\treturn getError()\n\t}\n\treturn nil\n}\n\ntype Point struct {\n\tX int\n\tY int\n}\n\ntype Rect struct {\n\tX int\n\tY int\n\tW int\n\tH int\n}\n\nconst (\n\tFlipNone = C.SDL_FLIP_NONE\n\tFlipHorizontal = C.SDL_FLIP_HORIZONTAL\n\tFlipVertical = C.SDL_FLIP_VERTICAL\n)\n\nfunc (r *Renderer) CopyEx(t *Texture, src, dst *Rect, angle float64,\n\tcenter *Point, flip int) error {\n\tif src != nil {\n\t\tC.srcRect.x = C.int(src.X)\n\t\tC.srcRect.y = C.int(src.Y)\n\t\tC.srcRect.w = C.int(src.W)\n\t\tC.srcRect.h = C.int(src.H)\n\t}\n\tif dst != nil {\n\t\tC.dstRect.x = C.int(dst.X)\n\t\tC.dstRect.y = C.int(dst.Y)\n\t\tC.dstRect.w = C.int(dst.W)\n\t\tC.dstRect.h = C.int(dst.H)\n\t}\n\tif center != nil {\n\t\tC.center.x = C.int(center.X)\n\t\tC.center.y = C.int(center.Y)\n\t}\n\tvar err C.int\n\tmainThreadCall(func() {\n\t\terr = C.RenderCopyEx((*C.SDL_Renderer)(r), (*C.SDL_Texture)(t),\n\t\t\tC.double(angle), C.SDL_RendererFlip(flip))\n\t})\n\tif err != 0 {\n\t\treturn getError()\n\t}\n\treturn nil\n}\n\nfunc (r *Renderer) Present() {\n\tmainThreadCall(func() {\n\t\tC.SDL_RenderPresent((*C.SDL_Renderer)(r))\n\t})\n}\n\ntype Texture C.SDL_Texture\n\nfunc CreateTexture(r *Renderer, w, h int, pixels []uint32) (*Texture, error) {\n\tvar t *C.SDL_Texture\n\tmainThreadCall(func() {\n\t\tt = C.CreateTexture((*C.SDL_Renderer)(r), C.int(w), C.int(h),\n\t\t\t(*C.Uint32)(&pixels[0]))\n\t})\n\tif t == nil {\n\t\treturn nil, getError()\n\t}\n\n\treturn (*Texture)(t), nil\n}\n\nfunc (t *Texture) Destroy() {\n\tmainThreadCall(func() {\n\t\tC.SDL_DestroyTexture((*C.SDL_Texture)(t))\n\t})\n}\n<commit_msg>Added SetDrawColor<commit_after>package sdl\n\n\/*\n#cgo LDFLAGS: -lSDL2\n#cgo CFLAGS: -O3\n#include <SDL2\/SDL.h>\n#include <stdlib.h>\n#include <string.h>\n\nint running = 1;\n\nUint8 keyboardPrev[SDL_NUM_SCANCODES];\nint numScancodes;\nconst Uint8 *keyboardCurr;\n\nSDL_GameController *controller = NULL;\nUint8 buttonsPrev[SDL_CONTROLLER_BUTTON_MAX];\nUint8 buttonsCurr[SDL_CONTROLLER_BUTTON_MAX];\n\nvoid\nhandleEvents(void) {\n\tmemcpy(keyboardPrev, keyboardCurr, numScancodes);\n\tmemcpy(buttonsPrev, buttonsCurr, SDL_CONTROLLER_BUTTON_MAX);\n\n\tif (controller != NULL && !SDL_GameControllerGetAttached(controller)) {\n\t\tSDL_GameControllerClose(controller);\n\t\tcontroller = NULL;\n\t}\n\n\tif (controller == NULL) {\n\t\tint i = 0;\n\t\tfor (i = 0; i < SDL_NumJoysticks(); i++) {\n\t\t\tif (SDL_IsGameController(i)) {\n\t\t\t\tcontroller = SDL_GameControllerOpen(i);\n\t\t\t\tif (controller != NULL) {\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tSDL_Event event;\n\twhile (SDL_PollEvent(&event)) {\n\t\tswitch (event.type) {\n\t\tcase SDL_QUIT:\n\t\t\trunning = 0;\n\t\t\tbreak;\n\t\t}\n\t}\n\n\tif (controller != NULL) {\n\t\tint i = 0;\n\t\tfor (i = 0; i < SDL_CONTROLLER_BUTTON_MAX; i++) {\n\t\t\tbuttonsCurr[i] = SDL_GameControllerGetButton(controller, i);\n\t\t}\n\t}\n}\n\nint\neventFilter(void *userData, SDL_Event *event) {\n\t(void)userData;\n\n\tswitch (event->type) {\n\tcase SDL_QUIT:\n\t\treturn 1;\n\t}\n\treturn 0;\n}\n\nint\ninit(Uint32 flags) {\n\tint err = SDL_Init(flags);\n\tif (err != 0) {\n\t\treturn err;\n\t}\n\tSDL_SetEventFilter(eventFilter, NULL);\n\tkeyboardCurr = SDL_GetKeyboardState(&numScancodes);\n\n\treturn 0;\n}\n\nvoid\nquit() {\n\tif (controller != NULL) {\n\t\tSDL_GameControllerClose(controller);\n\t}\n\tSDL_Quit();\n}\n\nSDL_Rect srcRect;\nSDL_Rect dstRect;\nSDL_Point center;\nint\nRenderCopyEx(SDL_Renderer *r, SDL_Texture *t, double angle, SDL_RendererFlip flip) {\n\treturn SDL_RenderCopyEx(r, t, &srcRect, &dstRect, angle, ¢er, flip);\n}\n\nSDL_Texture *\nCreateTexture(SDL_Renderer *r, int w, int h, Uint32 *pixels) {\n\tSDL_Texture *t = SDL_CreateTexture(r, SDL_PIXELFORMAT_RGBA8888,\n\t\tSDL_TEXTUREACCESS_STATIC, w, h);\n\tif (t == NULL) {\n\t\tgoto fail1;\n\t}\n\n\tint err = 0;\n\terr = SDL_SetTextureBlendMode(t, SDL_BLENDMODE_BLEND);\n\tif (err != 0) {\n\t\tgoto fail2;\n\t}\n\n\terr = SDL_UpdateTexture(t, NULL, pixels, w * sizeof(pixels[0]));\n\tif (err != 0) {\n\t\tgoto fail2;\n\t}\n\n\tif (0) {\nfail2:\n\t\tSDL_DestroyTexture(t);\nfail1:\n\t\tt = NULL;\n\t}\n\n\treturn t;\n}\n*\/\nimport \"C\"\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"unsafe\"\n)\n\nfunc init() {\n\truntime.LockOSThread()\n}\n\nvar mainThreadFunc = make(chan func())\n\nfunc mainThreadCall(f func()) {\n\tdone := make(chan bool, 1)\n\tmainThreadFunc <- func() {\n\t\tf()\n\t\tdone <- true\n\t}\n\t<-done\n}\n\nfunc Run(run func() error) error {\n\tdone := make(chan error, 1)\n\tgo func() {\n\t\tdone <- run()\n\t}()\n\n\tvar err error\nmainThreadCallLoop:\n\tfor {\n\t\tselect {\n\t\tcase f := <-mainThreadFunc:\n\t\t\tf()\n\t\tcase err = <-done:\n\t\t\tbreak mainThreadCallLoop\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc getError() error {\n\tvar err *C.char\n\tmainThreadCall(func() {\n\t\terr = C.SDL_GetError()\n\t})\n\treturn errors.New(C.GoString(err))\n}\n\nconst (\n\tInitTimer = C.SDL_INIT_TIMER\n\tInitAudio = C.SDL_INIT_AUDIO\n\tInitVideo = C.SDL_INIT_VIDEO\n\tInitJoystick = C.SDL_INIT_JOYSTICK\n\tInitHaptic = C.SDL_INIT_HAPTIC\n\tInitGameController = C.SDL_INIT_GAMECONTROLLER\n\tInitEvents = C.SDL_INIT_EVENTS\n\tInitEverything = C.SDL_INIT_EVERYTHING\n\tInitNoParachute = C.SDL_INIT_NOPARACHUTE\n)\n\nfunc Init(flags int) error {\n\tvar err C.int\n\tmainThreadCall(func() {\n\t\terr = C.init(C.Uint32(flags))\n\t})\n\tif err != 0 {\n\t\treturn getError()\n\t}\n\n\tinitKeyboardStateSlices()\n\tinitButtonsStateSlices()\n\n\treturn nil\n}\n\nfunc Quit() {\n\tmainThreadCall(func() {\n\t\tC.quit()\n\t})\n}\n\nvar Running = true\n\nfunc HandleEvents() {\n\tmainThreadCall(func() {\n\t\tC.handleEvents()\n\t})\n\n\tRunning = C.running == 1\n}\n\nvar keyboardPrev []uint8\nvar keyboardCurr []uint8\n\nfunc KeyHeld(scancode int) bool {\n\treturn keyboardCurr[scancode] == 1\n}\n\nfunc KeyPressed(scancode int) bool {\n\treturn (keyboardCurr[scancode] &^ keyboardPrev[scancode]) == 1\n}\n\nfunc KeyReleased(scancode int) bool {\n\treturn (keyboardPrev[scancode] &^ keyboardCurr[scancode]) == 1\n}\n\nfunc initKeyboardStateSlices() {\n\thdr := reflect.SliceHeader{\n\t\tData: uintptr(unsafe.Pointer(&C.keyboardPrev)),\n\t\tLen: C.SDL_NUM_SCANCODES,\n\t\tCap: C.SDL_NUM_SCANCODES,\n\t}\n\tkeyboardPrev = *(*[]uint8)(unsafe.Pointer(&hdr))\n\n\thdr = reflect.SliceHeader{\n\t\tData: uintptr(unsafe.Pointer(C.keyboardCurr)),\n\t\tLen: C.SDL_NUM_SCANCODES,\n\t\tCap: C.SDL_NUM_SCANCODES,\n\t}\n\tkeyboardCurr = *(*[]uint8)(unsafe.Pointer(&hdr))\n}\n\nvar buttonsPrev []uint8\nvar buttonsCurr []uint8\n\nconst (\n\tButton_A = C.SDL_CONTROLLER_BUTTON_A\n\tButton_B = C.SDL_CONTROLLER_BUTTON_B\n\tButton_X = C.SDL_CONTROLLER_BUTTON_X\n\tButton_Y = C.SDL_CONTROLLER_BUTTON_Y\n\tButton_BACK = C.SDL_CONTROLLER_BUTTON_BACK\n\tButton_GUIDE = C.SDL_CONTROLLER_BUTTON_GUIDE\n\tButton_START = C.SDL_CONTROLLER_BUTTON_START\n\tButton_LEFTSTICK = C.SDL_CONTROLLER_BUTTON_LEFTSTICK\n\tButton_RIGHTSTICK = C.SDL_CONTROLLER_BUTTON_RIGHTSTICK\n\tButton_LEFTSHOULDER = C.SDL_CONTROLLER_BUTTON_LEFTSHOULDER\n\tButton_RIGHTSHOULDER = C.SDL_CONTROLLER_BUTTON_RIGHTSHOULDER\n\tButton_UP = C.SDL_CONTROLLER_BUTTON_DPAD_UP\n\tButton_DOWN = C.SDL_CONTROLLER_BUTTON_DPAD_DOWN\n\tButton_LEFT = C.SDL_CONTROLLER_BUTTON_DPAD_LEFT\n\tButton_RIGHT = C.SDL_CONTROLLER_BUTTON_DPAD_RIGHT\n)\n\nfunc ButtonHeld(button int) bool {\n\treturn buttonsCurr[button] == 1\n}\n\nfunc ButtonPressed(button int) bool {\n\treturn (buttonsCurr[button] &^ buttonsPrev[button]) == 1\n}\n\nfunc ButtonReleased(button int) bool {\n\treturn (buttonsPrev[button] &^ buttonsCurr[button]) == 1\n}\n\nfunc initButtonsStateSlices() {\n\thdr := reflect.SliceHeader{\n\t\tData: uintptr(unsafe.Pointer(&C.buttonsPrev)),\n\t\tLen: C.SDL_CONTROLLER_BUTTON_MAX,\n\t\tCap: C.SDL_CONTROLLER_BUTTON_MAX,\n\t}\n\tbuttonsPrev = *(*[]uint8)(unsafe.Pointer(&hdr))\n\n\thdr = reflect.SliceHeader{\n\t\tData: uintptr(unsafe.Pointer(&C.buttonsCurr)),\n\t\tLen: C.SDL_CONTROLLER_BUTTON_MAX,\n\t\tCap: C.SDL_CONTROLLER_BUTTON_MAX,\n\t}\n\tbuttonsCurr = *(*[]uint8)(unsafe.Pointer(&hdr))\n}\n\ntype Window C.SDL_Window\n\nconst (\n\tWindowPosCentered = C.SDL_WINDOWPOS_CENTERED\n\tWindowPosUndefined = C.SDL_WINDOWPOS_UNDEFINED\n)\n\nconst (\n\tWindowFullscreen = C.SDL_WINDOW_FULLSCREEN\n\tWindowFullscreenDesktop = C.SDL_WINDOW_FULLSCREEN_DESKTOP\n\tWindowOpenGL = C.SDL_WINDOW_OPENGL\n\tWindowHidden = C.SDL_WINDOW_HIDDEN\n\tWindowBorderless = C.SDL_WINDOW_BORDERLESS\n\tWindowResizable = C.SDL_WINDOW_RESIZABLE\n\tWindowMinimized = C.SDL_WINDOW_MINIMIZED\n\tWindowMaximized = C.SDL_WINDOW_MAXIMIZED\n\tWindowInputGrabbed = C.SDL_WINDOW_INPUT_GRABBED\n\tWindowAllowHighDPI = C.SDL_WINDOW_ALLOW_HIGHDPI\n)\n\nfunc CreateWindow(title string, x, y, w, h int, flags int) (*Window, error) {\n\tt := C.CString(title)\n\tdefer C.free(unsafe.Pointer(t))\n\n\tvar win *C.SDL_Window\n\tmainThreadCall(func() {\n\t\twin = C.SDL_CreateWindow(t, C.int(x), C.int(y), C.int(w),\n\t\t\tC.int(h), C.Uint32(flags))\n\t})\n\tif win == nil {\n\t\treturn nil, getError()\n\t}\n\treturn (*Window)(win), nil\n}\n\nfunc (w *Window) Destroy() {\n\tmainThreadCall(func() {\n\t\tC.SDL_DestroyWindow((*C.SDL_Window)(w))\n\t})\n}\n\ntype Renderer C.SDL_Renderer\n\nconst (\n\tRendererSoftware = C.SDL_RENDERER_SOFTWARE\n\tRendererAccelerated = C.SDL_RENDERER_ACCELERATED\n\tRendererPresentVSync = C.SDL_RENDERER_PRESENTVSYNC\n\tRendererTargetTexture = C.SDL_RENDERER_TARGETTEXTURE\n)\n\nfunc CreateRenderer(w *Window, index int, flags int) (*Renderer, error) {\n\tvar r *C.SDL_Renderer\n\tmainThreadCall(func() {\n\t\tr = C.SDL_CreateRenderer((*C.SDL_Window)(w), C.int(index),\n\t\t\tC.Uint32(flags))\n\t})\n\tif r == nil {\n\t\treturn nil, getError()\n\t}\n\treturn (*Renderer)(r), nil\n}\n\nfunc (r *Renderer) Destroy() {\n\tmainThreadCall(func() {\n\t\tC.SDL_DestroyRenderer((*C.SDL_Renderer)(r))\n\t})\n}\n\nfunc (r *Renderer) SetDrawColor(red, green, blue, alpha uint8) error {\n\tvar err C.int\n\tmainThreadCall(func() {\n\t\terr = C.SDL_SetRenderDrawColor((*C.SDL_Renderer)(r),\n\t\t\tC.Uint8(red), C.Uint8(green), C.Uint8(blue), C.Uint8(alpha))\n\t})\n\tif err != 0 {\n\t\treturn getError()\n\t}\n\treturn nil\n}\n\nfunc (r *Renderer) SetLogicalSize(w, h int) error {\n\tvar err C.int\n\tmainThreadCall(func() {\n\t\terr = C.SDL_RenderSetLogicalSize((*C.SDL_Renderer)(r), C.int(w), C.int(h))\n\t})\n\tif err != 0 {\n\t\treturn getError()\n\t}\n\treturn nil\n}\n\nfunc (r *Renderer) Clear() error {\n\tvar err C.int\n\tmainThreadCall(func() {\n\t\terr = C.SDL_RenderClear((*C.SDL_Renderer)(r))\n\t})\n\tif err != 0 {\n\t\treturn getError()\n\t}\n\treturn nil\n}\n\ntype Point struct {\n\tX int\n\tY int\n}\n\ntype Rect struct {\n\tX int\n\tY int\n\tW int\n\tH int\n}\n\nconst (\n\tFlipNone = C.SDL_FLIP_NONE\n\tFlipHorizontal = C.SDL_FLIP_HORIZONTAL\n\tFlipVertical = C.SDL_FLIP_VERTICAL\n)\n\nfunc (r *Renderer) CopyEx(t *Texture, src, dst *Rect, angle float64,\n\tcenter *Point, flip int) error {\n\tif src != nil {\n\t\tC.srcRect.x = C.int(src.X)\n\t\tC.srcRect.y = C.int(src.Y)\n\t\tC.srcRect.w = C.int(src.W)\n\t\tC.srcRect.h = C.int(src.H)\n\t}\n\tif dst != nil {\n\t\tC.dstRect.x = C.int(dst.X)\n\t\tC.dstRect.y = C.int(dst.Y)\n\t\tC.dstRect.w = C.int(dst.W)\n\t\tC.dstRect.h = C.int(dst.H)\n\t}\n\tif center != nil {\n\t\tC.center.x = C.int(center.X)\n\t\tC.center.y = C.int(center.Y)\n\t}\n\tvar err C.int\n\tmainThreadCall(func() {\n\t\terr = C.RenderCopyEx((*C.SDL_Renderer)(r), (*C.SDL_Texture)(t),\n\t\t\tC.double(angle), C.SDL_RendererFlip(flip))\n\t})\n\tif err != 0 {\n\t\treturn getError()\n\t}\n\treturn nil\n}\n\nfunc (r *Renderer) Present() {\n\tmainThreadCall(func() {\n\t\tC.SDL_RenderPresent((*C.SDL_Renderer)(r))\n\t})\n}\n\ntype Texture C.SDL_Texture\n\nfunc CreateTexture(r *Renderer, w, h int, pixels []uint32) (*Texture, error) {\n\tvar t *C.SDL_Texture\n\tmainThreadCall(func() {\n\t\tt = C.CreateTexture((*C.SDL_Renderer)(r), C.int(w), C.int(h),\n\t\t\t(*C.Uint32)(&pixels[0]))\n\t})\n\tif t == nil {\n\t\treturn nil, getError()\n\t}\n\n\treturn (*Texture)(t), nil\n}\n\nfunc (t *Texture) Destroy() {\n\tmainThreadCall(func() {\n\t\tC.SDL_DestroyTexture((*C.SDL_Texture)(t))\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package sed\n\nimport (\n \"flag\";\n \"fmt\";\n \"io\";\n \"os\";\n \"strings\";\n \"container\/vector\";\n)\n\nconst (\n versionMajor = 0;\n versionMinor = 1;\n versionPoint = 0;\n)\n\nvar versionString string\n\nfunc init() {\n versionString = fmt.Sprintf(\"%d.%d.%d\", versionMajor, versionMinor, versionPoint)\n}\n\n\/\/var show_version = flag.Bool(\"v\", false, \"Show version information.\")\nvar show_help = flag.Bool(\"h\", false, \"Show help information.\")\nvar quiet = flag.Bool(\"n\", false, \"Don't print the pattern space at the end of each script cycle.\")\nvar script = flag.String(\"e\", \"\", \"The script used to process the input file.\")\nvar script_file = flag.String(\"f\", \"\", \"Specify a file to read as the script. Ignored if -e present\")\nvar edit_inplace = flag.Bool(\"i\", false, \"This option specifies that files are to be edited in-place. Otherwise output is printed to stdout.\")\nvar line_wrap = flag.Uint(\"l\", 70, \"Specify the default line-wrap length for the l command. A length of 0 (zero) means to never wrap long lines. If not specified, it is taken to be 70.\")\nvar unbuffered = flag.Bool(\"u\", false, \"Buffer both input and output as minimally as practical. (ignored)\")\n\n\/\/ the input file split into lines\nvar inputLines []string\nvar usageShown bool = false\nvar commands = new(vector.Vector)\nvar outputFile = os.Stdout\n\nfunc usage() {\n \/\/ only show usage once.\n if !usageShown {\n usageShown = true;\n fmt.Fprint(os.Stdout, \"sed [options] [script] input_file\\n\\n\");\n flag.PrintDefaults();\n }\n}\n\nvar inputFilename string;\n\nfunc readInputFile() {\n if flag.NArg() > 1 {\n inputFilename = flag.Arg(1)\n } else if flag.NArg() == 1 {\n inputFilename = flag.Arg(0)\n }\n if len(inputFilename) == 0 {\n fmt.Fprint(os.Stderr, \"No input file specified.\\n\\n\");\n usage();\n os.Exit(-1);\n }\n f, err := os.Open(inputFilename, os.O_RDONLY, 0);\n if err != nil {\n fmt.Fprintf(os.Stderr, \"Error opening input file %s\\n\", inputFilename);\n os.Exit(-1);\n }\n b, err := io.ReadAll(f);\n if err != nil {\n fmt.Fprintf(os.Stderr, \"Error reading input file %s\\n\", inputFilename);\n os.Exit(-1);\n }\n _ = f.Close();\n inputLines = strings.Split(string(b), \"\\n\", 0);\n}\n\nfunc parseScript() (err os.Error) {\n \/\/ a script may be a single command or it may be several\n scriptLines := strings.Split(*script, \"\\n\", 0);\n for idx, line := range scriptLines {\n line = strings.TrimSpace(line);\n if strings.HasPrefix(line, \"#\") || len(line) == 0 {\n \/\/ comment\n continue\n }\n \/\/ this isn't really right. There may be slashes in the regular expression\n pieces := strings.Split(line, \"\/\", 0);\n c, err := NewCmd(pieces);\n if err != nil {\n fmt.Printf(\"%v line %d: %s\\n\", err, idx+1, line);\n os.Exit(-1);\n }\n commands.Push(c);\n }\n return nil;\n}\n\nfunc printPatternSpace(s string) {\n l := len(s);\n if *line_wrap <= 0 || l < int(*line_wrap) {\n fmt.Fprintf(outputFile, \"%s\\n\", s)\n } else {\n \/\/ print the line in segments\n for i := 0; i < l; i += int(*line_wrap) {\n endOfLine := i + int(*line_wrap);\n if endOfLine > l {\n endOfLine = l\n }\n fmt.Fprintf(outputFile, \"%s\\n\", s[i:endOfLine]);\n }\n }\n}\n\nfunc process() {\n for _, patternSpace := range inputLines {\n for c := range commands.Iter() {\n out, stop, err := c.(*cmd).processLine(patternSpace);\n if err != nil {\n fmt.Printf(\"%v\\n\", err);\n os.Exit(-1);\n }\n if stop {\n break\n }\n patternSpace = out;\n }\n if !*quiet {\n printPatternSpace(patternSpace)\n }\n }\n}\n\nfunc Main() {\n flag.Parse();\n \/\/ if *show_version {\n \/\/ fmt.Fprintf(os.Stdout, \"Version: %s (c)2009 Geoffrey Clements All Rights Reserved\\n\\n\", versionString)\n \/\/ }\n if *show_help {\n usage();\n return;\n }\n \/\/ we need a script\n if len(*script) == 0 {\n \/\/ no -e so try -f\n if len(*script_file) > 0 {\n f, err := os.Open(*script_file, os.O_RDONLY, 0);\n if err != nil {\n fmt.Fprintf(os.Stderr, \"Error opening file %s\\n\", *script_file);\n os.Exit(-1);\n }\n b, _ := io.ReadAll(f);\n if err != nil {\n fmt.Fprintf(os.Stderr, \"Error reading script file %s\\n\", *script_file);\n os.Exit(-1);\n }\n s := string(b);\n script = &s;\n } else if flag.NArg() > 1 {\n s := flag.Arg(0);\n script = &s;\n }\n }\n\n \/\/ if script still isn't set we are screwed, exit.\n if len(*script) == 0 {\n fmt.Fprint(os.Stderr, \"No script found.\\n\\n\");\n usage();\n os.Exit(-1);\n }\n\n \/\/ actually do the processing\n readInputFile();\n parseScript();\n if *edit_inplace {\n dir, err := os.Stat(inputFilename);\n if err != nil {\n fmt.Fprintf(os.Stderr, \"Error getting information about input file: %s %v\\n\", err);\n os.Exit(-1);\n }\n f, err := os.Open(inputFilename, os.O_WRONLY|os.O_TRUNC, int(dir.Mode));\n if err != nil {\n fmt.Fprint(os.Stderr, \"Error opening input file for inplace editing: %s %v\\n\", err);\n os.Exit(-1);\n }\n outputFile = f;\n }\n process();\n if *edit_inplace {\n outputFile.Close();\n }\n}\n<commit_msg>Handle more than one file on the command line<commit_after>package sed\n\nimport (\n \"flag\";\n \"fmt\";\n \"io\";\n \"os\";\n \"strings\";\n \"container\/vector\";\n)\n\nconst (\n versionMajor = 0;\n versionMinor = 1;\n versionPoint = 0;\n)\n\nvar versionString string\n\nfunc init() {\n versionString = fmt.Sprintf(\"%d.%d.%d\", versionMajor, versionMinor, versionPoint)\n}\n\nvar show_version = flag.Bool(\"v\", false, \"Show version information.\")\nvar show_help = flag.Bool(\"h\", false, \"Show help information.\")\nvar quiet = flag.Bool(\"n\", false, \"Don't print the pattern space at the end of each script cycle.\")\nvar script = flag.String(\"e\", \"\", \"The script used to process the input file.\")\nvar script_file = flag.String(\"f\", \"\", \"Specify a file to read as the script. Ignored if -e present\")\nvar edit_inplace = flag.Bool(\"i\", false, \"This option specifies that files are to be edited in-place. Otherwise output is printed to stdout.\")\nvar line_wrap = flag.Uint(\"l\", 70, \"Specify the default line-wrap length for the l command. A length of 0 (zero) means to never wrap long lines. If not specified, it is taken to be 70.\")\nvar unbuffered = flag.Bool(\"u\", false, \"Buffer both input and output as minimally as practical. (ignored)\")\n\n\/\/ the input file split into lines\nvar inputLines []string\nvar usageShown bool = false\nvar commands = new(vector.Vector)\nvar outputFile = os.Stdout\n\nfunc usage() {\n \/\/ only show usage once.\n if !usageShown {\n usageShown = true;\n fmt.Fprint(os.Stdout, \"sed [options] [script] input_file\\n\\n\");\n flag.PrintDefaults();\n }\n}\n\nvar inputFilename string;\n\nfunc readInputFile() {\n f, err := os.Open(inputFilename, os.O_RDONLY, 0);\n if err != nil {\n fmt.Fprintf(os.Stderr, \"Error opening input file %s\\n\", inputFilename);\n os.Exit(-1);\n }\n b, err := io.ReadAll(f);\n if err != nil {\n fmt.Fprintf(os.Stderr, \"Error reading input file %s\\n\", inputFilename);\n os.Exit(-1);\n }\n _ = f.Close();\n inputLines = strings.Split(string(b), \"\\n\", 0);\n}\n\nfunc parseScript() (err os.Error) {\n \/\/ a script may be a single command or it may be several\n scriptLines := strings.Split(*script, \"\\n\", 0);\n for idx, line := range scriptLines {\n line = strings.TrimSpace(line);\n if strings.HasPrefix(line, \"#\") || len(line) == 0 {\n \/\/ comment\n continue\n }\n \/\/ this isn't really right. There may be slashes in the regular expression\n pieces := strings.Split(line, \"\/\", 0);\n c, err := NewCmd(pieces);\n if err != nil {\n fmt.Printf(\"%v line %d: %s\\n\", err, idx+1, line);\n os.Exit(-1);\n }\n commands.Push(c);\n }\n return nil;\n}\n\nfunc printPatternSpace(s string) {\n l := len(s);\n if *line_wrap <= 0 || l < int(*line_wrap) {\n fmt.Fprintf(outputFile, \"%s\\n\", s)\n } else {\n \/\/ print the line in segments\n for i := 0; i < l; i += int(*line_wrap) {\n endOfLine := i + int(*line_wrap);\n if endOfLine > l {\n endOfLine = l\n }\n fmt.Fprintf(outputFile, \"%s\\n\", s[i:endOfLine]);\n }\n }\n}\n\nfunc process() {\n for _, patternSpace := range inputLines {\n for c := range commands.Iter() {\n out, stop, err := c.(*cmd).processLine(patternSpace);\n if err != nil {\n fmt.Printf(\"%v\\n\", err);\n os.Exit(-1);\n }\n if stop {\n break\n }\n patternSpace = out;\n }\n if !*quiet {\n printPatternSpace(patternSpace)\n }\n }\n}\n\nfunc Main() {\n flag.Parse();\n if *show_version {\n fmt.Fprintf(os.Stdout, \"Version: %s (c)2009 Geoffrey Clements All Rights Reserved\\n\\n\", versionString)\n }\n if *show_help {\n usage();\n return;\n }\n \n \/\/ the first parameter may be a script or an input file. This helps us track which\n currentFileParameter := 0;\n \n \/\/ we need a script\n if len(*script) == 0 {\n \/\/ no -e so try -f\n if len(*script_file) > 0 {\n f, err := os.Open(*script_file, os.O_RDONLY, 0);\n if err != nil {\n fmt.Fprintf(os.Stderr, \"Error opening file %s\\n\", *script_file);\n os.Exit(-1);\n }\n b, _ := io.ReadAll(f);\n if err != nil {\n fmt.Fprintf(os.Stderr, \"Error reading script file %s\\n\", *script_file);\n os.Exit(-1);\n }\n s := string(b);\n script = &s;\n } else if flag.NArg() > 1 {\n s := flag.Arg(0);\n script = &s;\n \/\/ first parameter was the script so move to second parameter\n currentFileParameter++;\n }\n }\n\n \/\/ if script still isn't set we are screwed, exit.\n if len(*script) == 0 {\n fmt.Fprint(os.Stderr, \"No script found.\\n\\n\");\n usage();\n os.Exit(-1);\n }\n \n \/\/ parse script\n parseScript();\n \n if currentFileParameter >= flag.NArg() {\n fmt.Fprint(os.Stderr, \"No input file specified.\\n\\n\");\n usage();\n os.Exit(-1);\n }\n \n for ; currentFileParameter < flag.NArg(); currentFileParameter++ {\n inputFilename = flag.Arg(currentFileParameter);\n \/\/ actually do the processing\n readInputFile();\n if *edit_inplace {\n dir, err := os.Stat(inputFilename);\n if err != nil {\n fmt.Fprintf(os.Stderr, \"Error getting information about input file: %s %v\\n\", err);\n os.Exit(-1);\n }\n f, err := os.Open(inputFilename, os.O_WRONLY|os.O_TRUNC, int(dir.Mode));\n if err != nil {\n fmt.Fprint(os.Stderr, \"Error opening input file for inplace editing: %s %v\\n\", err);\n os.Exit(-1);\n }\n outputFile = f;\n }\n process();\n if *edit_inplace {\n outputFile.Close();\n }\n } \n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright 2011-2017 gtalent2@gmail.com\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage liccor\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/monochromegane\/go-gitignore\"\n\t\"github.com\/paulvollmer\/go-verbose\"\n\n\t\"gopkg.in\/yaml.v2\"\n)\n\n\/\/ Version store the version as string\nconst Version = \"1.9.0\"\n\ntype liccorFile struct {\n\tSource []string `yaml:\"source\"`\n\tCopyrightNotice string `yaml:\"copyright_notice\"`\n\tIgnore string `yaml:\"ignore\"`\n}\n\n\/\/ Liccor the license corrector\ntype Liccor struct {\n\tLog verbose.Verbose\n\tSource []string\n\tNoticeBeforeText string\n\tNoticeAfterText string\n\tcopyrightNotice string\n\tignore gitignore.IgnoreMatcher\n}\n\n\/\/ New initialize and return a new Liccor instance\nfunc New() *Liccor {\n\tl := Liccor{}\n\t\/\/ ignore error, it's ok if there is no gitignore\n\tl.ignore, _ = gitignore.NewGitIgnore(\".gitignore\")\n\tl.Log = *verbose.New(os.Stdout, false)\n\tl.Source = []string{\".\"}\n\treturn &l\n}\n\n\/\/ Load liccor file search for a license file\nfunc (l *Liccor) LoadConfig(dir, liccorFileName string) error {\n\tl.Log.Printf(\"Search for a license file at directory '%s'\\n\", dir)\n\n\td, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not find license file. %v\", err)\n\t}\n\n\tfileName := \"\"\n\tif liccorFileName == \"\" { \/\/ no specified liccor file, search for default file names\n\t\tfor _, v := range d {\n\t\t\tfileName = v.Name()\n\t\t\t\/\/ search the license file\n\t\t\tif fileName == \".liccor\" || fileName == \".liccor.yml\" || fileName == \".liccor.yaml\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif _, err := os.Stat(liccorFileName); err == nil {\n\t\t\tfileName = liccorFileName\n\t\t}\n\t}\n\n\tswitch {\n\tcase fileName == \"\":\n\t\tbreak\n\tcase strings.HasSuffix(fileName, \".yaml\"), strings.HasSuffix(fileName, \".yml\"):\n\t\tvar lf liccorFile\n\t\tdata, err := ioutil.ReadFile(dir + \"\/\" + fileName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = yaml.Unmarshal(data, &lf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tl.copyrightNotice = lf.CopyrightNotice\n\t\tif len(l.Source) == 0 {\n\t\t\tl.Source = lf.Source\n\t\t}\n\t\tif lf.Ignore != \"\" {\n\t\t\tl.ignore = gitignore.NewGitIgnoreFromReader(\"\", strings.NewReader(lf.Ignore))\n\t\t}\n\t\treturn err\n\tdefault:\n\t\tcopyrightNotice, err := ioutil.ReadFile(dir + \"\/\" + fileName)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Could not access \" + fileName + \" file\")\n\t\t}\n\t\tl.Log.Printf(\"License file '%s' found...\\n\", fileName)\n\t\tl.copyrightNotice = string(copyrightNotice)\n\t\tl.copyrightNotice = l.copyrightNotice[:len(l.copyrightNotice)-1]\n\t\treturn err\n\t}\n\n\treturn l.LoadConfig(dir+\".\/.\", liccorFileName)\n}\n\n\/\/ FindSrcFiles search for source files\nfunc (l *Liccor) FindSrcFiles(dir string) ([]string, error) {\n\tl.Log.Printf(\"Search source files at '%s'\\n\", dir)\n\n\td, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar output []string\n\tfor _, v := range d {\n\t\tif v.IsDir() {\n\t\t\t\/\/ ignore .git dir\n\t\t\tif v.Name() != \".git\" {\n\t\t\t\tfiles, err2 := l.FindSrcFiles(dir + \"\/\" + v.Name())\n\t\t\t\tif err2 != nil {\n\t\t\t\t\treturn output, err2\n\t\t\t\t}\n\t\t\t\tfor _, v2 := range files {\n\t\t\t\t\toutput = append(output, v2)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tpt := strings.LastIndex(v.Name(), \".\")\n\t\t\tif pt == -1 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch v.Name()[pt:] {\n\t\t\tcase SuffixGO, SuffixC, SuffixCPP, SuffixCXX, SuffixH, SuffixHPP, SuffixJAVA, SuffixJS:\n\t\t\t\tsrcPath := dir + \"\/\" + v.Name()\n\t\t\t\toutput = append(output, srcPath)\n\t\t\t\tl.Log.Printf(\"Found source '%s'\\n\", srcPath)\n\t\t\t}\n\t\t}\n\t}\n\treturn output, err\n}\n\n\/\/ HasLicense check if sourcecode has a license at the top\nfunc (l *Liccor) HasLicense(file string) (bool, int) {\n\tfor i, c := range file {\n\t\tswitch c {\n\t\tcase ' ', '\\t', '\\n':\n\t\t\tcontinue\n\t\tcase '\/':\n\t\t\ti++\n\t\t\tif len(file) > i && file[i] == '*' {\n\t\t\t\treturn true, i\n\t\t\t}\n\t\tdefault:\n\t\t\treturn false, -1\n\t\t}\n\t}\n\treturn false, -1\n}\n\n\/\/ Correct a source file license\nfunc (l *Liccor) Correct(path, license string) (bool, error) {\n\tinput, err := ioutil.ReadFile(path)\n\tif err != nil || (l.ignore != nil && l.ignore.Match(path, false)) {\n\t\treturn false, err\n\t}\n\tfile := string(input)\n\torig := file\n\tif hasLicense, licenseStart := l.HasLicense(file); hasLicense {\n\t\t\/\/remove old license\n\t\tfor i := licenseStart; i < len(file); i++ {\n\t\t\tif file[i] == '*' && file[i+1] == '\/' {\n\t\t\t\ti += 2\n\t\t\t\tif file[i] == '\\n' {\n\t\t\t\t\ti++\n\t\t\t\t}\n\t\t\t\tfile = file[i:len(file)]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tfile = license + file\n\toutput := []byte(file)\n\tif file != orig {\n\t\terr = ioutil.WriteFile(path, output, 0)\n\t\treturn true, err\n\t}\n\treturn false, nil\n}\n\n\/\/ Process run the liccor magic\nfunc (l *Liccor) Process() {\n\tif l.NoticeBeforeText != \"\" {\n\t\tl.Log.Printf(\"License before text set to '%s'\\n\", l.NoticeBeforeText)\n\t\tl.copyrightNotice = l.NoticeBeforeText + \"\\n\" + l.copyrightNotice\n\t}\n\tif l.NoticeAfterText != \"\" {\n\t\tl.Log.Printf(\"License after text set to '%s'\\n\", l.NoticeAfterText)\n\t\tl.copyrightNotice = l.copyrightNotice + \"\\n\" + l.NoticeAfterText\n\t}\n\n\tlics := make(map[string]string)\n\tclike := \"\/*\\n * \" + strings.Replace(string(l.copyrightNotice), \"\\n\", \"\\n * \", -1) + \"\\n *\/\\n\"\n\tlics[\"c-like\"] = strings.Replace(clike, \"\\n * \\n\", \"\\n *\\n\", -1)\n\tlics[\"go\"] = func() string {\n\t\tgolic := \"\/*\\n \" + strings.Replace(string(l.copyrightNotice), \"\\n\", \"\\n \", -1) + \"\\n*\/\\n\"\n\t\tgolic = strings.Replace(golic, \"\\n \\n\", \"\\n\\n\", -1)\n\t\treturn golic\n\t}()\n\n\tallSuccess := true\n\tfor _, source := range l.Source {\n\t\tfiles, err := l.FindSrcFiles(source)\n\t\tif err != nil {\n\t\t\tfmt.Errorf(\"Error encountered while searching \"+source+\", %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := 0; i < len(files); i++ {\n\t\t\tpt := strings.LastIndex(files[i], \".\")\n\t\t\tlic := \"\"\n\t\t\t\/\/determine how to format the license\n\t\t\tswitch files[i][pt:] {\n\t\t\tcase SuffixGO:\n\t\t\t\tlic = lics[\"go\"]\n\t\t\tcase SuffixC, SuffixCPP, SuffixCXX, SuffixH, SuffixHPP, SuffixJAVA, SuffixJS:\n\t\t\t\tlic = lics[\"c-like\"]\n\t\t\t}\n\t\t\tchanged, err := l.Correct(files[i], lic)\n\t\t\tif changed {\n\t\t\t\tvar file string\n\t\t\t\tif files[i][:2] == \".\/\" {\n\t\t\t\t\tfile = files[i][2:]\n\t\t\t\t} else {\n\t\t\t\t\tfile = files[i]\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Correcting '\" + file + \"'... Failure!\")\n\t\t\t\t\tallSuccess = false\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"Correcting '\" + file + \"'... Success!\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif allSuccess {\n\t\tfmt.Println(\"All files up to date!\")\n\t}\n}\n<commit_msg>Fix issue with reading Sources from .liccor.yml<commit_after>\/*\n Copyright 2011-2017 gtalent2@gmail.com\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage liccor\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/monochromegane\/go-gitignore\"\n\t\"github.com\/paulvollmer\/go-verbose\"\n\n\t\"gopkg.in\/yaml.v2\"\n)\n\n\/\/ Version store the version as string\nconst Version = \"1.9.0\"\n\ntype liccorFile struct {\n\tSource []string `yaml:\"source\"`\n\tCopyrightNotice string `yaml:\"copyright_notice\"`\n\tIgnore string `yaml:\"ignore\"`\n}\n\n\/\/ Liccor the license corrector\ntype Liccor struct {\n\tLog verbose.Verbose\n\tSource []string\n\tNoticeBeforeText string\n\tNoticeAfterText string\n\tcopyrightNotice string\n\tignore gitignore.IgnoreMatcher\n}\n\n\/\/ New initialize and return a new Liccor instance\nfunc New() *Liccor {\n\tl := Liccor{}\n\t\/\/ ignore error, it's ok if there is no gitignore\n\tl.ignore, _ = gitignore.NewGitIgnore(\".gitignore\")\n\tl.Log = *verbose.New(os.Stdout, false)\n\tl.Source = []string{\".\"}\n\treturn &l\n}\n\n\/\/ Load liccor file search for a license file\nfunc (l *Liccor) LoadConfig(dir, liccorFileName string) error {\n\tl.Log.Printf(\"Search for a license file at directory '%s'\\n\", dir)\n\n\td, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not find license file. %v\", err)\n\t}\n\n\tfileName := \"\"\n\tif liccorFileName == \"\" { \/\/ no specified liccor file, search for default file names\n\t\tfor _, v := range d {\n\t\t\tfileName = v.Name()\n\t\t\t\/\/ search the license file\n\t\t\tif fileName == \".liccor\" || fileName == \".liccor.yml\" || fileName == \".liccor.yaml\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif _, err := os.Stat(liccorFileName); err == nil {\n\t\t\tfileName = liccorFileName\n\t\t}\n\t}\n\n\tswitch {\n\tcase fileName == \"\":\n\t\tbreak\n\tcase strings.HasSuffix(fileName, \".yaml\"), strings.HasSuffix(fileName, \".yml\"):\n\t\tvar lf liccorFile\n\t\tdata, err := ioutil.ReadFile(dir + \"\/\" + fileName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = yaml.Unmarshal(data, &lf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tl.copyrightNotice = lf.CopyrightNotice\n\t\tl.Source = lf.Source\n\t\tif lf.Ignore != \"\" {\n\t\t\tl.ignore = gitignore.NewGitIgnoreFromReader(\"\", strings.NewReader(lf.Ignore))\n\t\t}\n\t\treturn err\n\tdefault:\n\t\tcopyrightNotice, err := ioutil.ReadFile(dir + \"\/\" + fileName)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Could not access \" + fileName + \" file\")\n\t\t}\n\t\tl.Log.Printf(\"License file '%s' found...\\n\", fileName)\n\t\tl.copyrightNotice = string(copyrightNotice)\n\t\tl.copyrightNotice = l.copyrightNotice[:len(l.copyrightNotice)-1]\n\t\treturn err\n\t}\n\n\treturn l.LoadConfig(dir+\".\/.\", liccorFileName)\n}\n\n\/\/ FindSrcFiles search for source files\nfunc (l *Liccor) FindSrcFiles(dir string) ([]string, error) {\n\tl.Log.Printf(\"Search source files at '%s'\\n\", dir)\n\n\td, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar output []string\n\tfor _, v := range d {\n\t\tif v.IsDir() {\n\t\t\t\/\/ ignore .git dir\n\t\t\tif v.Name() != \".git\" {\n\t\t\t\tfiles, err2 := l.FindSrcFiles(dir + \"\/\" + v.Name())\n\t\t\t\tif err2 != nil {\n\t\t\t\t\treturn output, err2\n\t\t\t\t}\n\t\t\t\tfor _, v2 := range files {\n\t\t\t\t\toutput = append(output, v2)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tpt := strings.LastIndex(v.Name(), \".\")\n\t\t\tif pt == -1 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch v.Name()[pt:] {\n\t\t\tcase SuffixGO, SuffixC, SuffixCPP, SuffixCXX, SuffixH, SuffixHPP, SuffixJAVA, SuffixJS:\n\t\t\t\tsrcPath := dir + \"\/\" + v.Name()\n\t\t\t\toutput = append(output, srcPath)\n\t\t\t\tl.Log.Printf(\"Found source '%s'\\n\", srcPath)\n\t\t\t}\n\t\t}\n\t}\n\treturn output, err\n}\n\n\/\/ HasLicense check if sourcecode has a license at the top\nfunc (l *Liccor) HasLicense(file string) (bool, int) {\n\tfor i, c := range file {\n\t\tswitch c {\n\t\tcase ' ', '\\t', '\\n':\n\t\t\tcontinue\n\t\tcase '\/':\n\t\t\ti++\n\t\t\tif len(file) > i && file[i] == '*' {\n\t\t\t\treturn true, i\n\t\t\t}\n\t\tdefault:\n\t\t\treturn false, -1\n\t\t}\n\t}\n\treturn false, -1\n}\n\n\/\/ Correct a source file license\nfunc (l *Liccor) Correct(path, license string) (bool, error) {\n\tinput, err := ioutil.ReadFile(path)\n\tif err != nil || (l.ignore != nil && l.ignore.Match(path, false)) {\n\t\treturn false, err\n\t}\n\tfile := string(input)\n\torig := file\n\tif hasLicense, licenseStart := l.HasLicense(file); hasLicense {\n\t\t\/\/remove old license\n\t\tfor i := licenseStart; i < len(file); i++ {\n\t\t\tif file[i] == '*' && file[i+1] == '\/' {\n\t\t\t\ti += 2\n\t\t\t\tif file[i] == '\\n' {\n\t\t\t\t\ti++\n\t\t\t\t}\n\t\t\t\tfile = file[i:len(file)]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tfile = license + file\n\toutput := []byte(file)\n\tif file != orig {\n\t\terr = ioutil.WriteFile(path, output, 0)\n\t\treturn true, err\n\t}\n\treturn false, nil\n}\n\n\/\/ Process run the liccor magic\nfunc (l *Liccor) Process() {\n\tif l.NoticeBeforeText != \"\" {\n\t\tl.Log.Printf(\"License before text set to '%s'\\n\", l.NoticeBeforeText)\n\t\tl.copyrightNotice = l.NoticeBeforeText + \"\\n\" + l.copyrightNotice\n\t}\n\tif l.NoticeAfterText != \"\" {\n\t\tl.Log.Printf(\"License after text set to '%s'\\n\", l.NoticeAfterText)\n\t\tl.copyrightNotice = l.copyrightNotice + \"\\n\" + l.NoticeAfterText\n\t}\n\n\tlics := make(map[string]string)\n\tclike := \"\/*\\n * \" + strings.Replace(string(l.copyrightNotice), \"\\n\", \"\\n * \", -1) + \"\\n *\/\\n\"\n\tlics[\"c-like\"] = strings.Replace(clike, \"\\n * \\n\", \"\\n *\\n\", -1)\n\tlics[\"go\"] = func() string {\n\t\tgolic := \"\/*\\n \" + strings.Replace(string(l.copyrightNotice), \"\\n\", \"\\n \", -1) + \"\\n*\/\\n\"\n\t\tgolic = strings.Replace(golic, \"\\n \\n\", \"\\n\\n\", -1)\n\t\treturn golic\n\t}()\n\n\tallSuccess := true\n\tfor _, source := range l.Source {\n\t\tfiles, err := l.FindSrcFiles(source)\n\t\tif err != nil {\n\t\t\tfmt.Errorf(\"Error encountered while searching \"+source+\", %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := 0; i < len(files); i++ {\n\t\t\tpt := strings.LastIndex(files[i], \".\")\n\t\t\tlic := \"\"\n\t\t\t\/\/determine how to format the license\n\t\t\tswitch files[i][pt:] {\n\t\t\tcase SuffixGO:\n\t\t\t\tlic = lics[\"go\"]\n\t\t\tcase SuffixC, SuffixCPP, SuffixCXX, SuffixH, SuffixHPP, SuffixJAVA, SuffixJS:\n\t\t\t\tlic = lics[\"c-like\"]\n\t\t\t}\n\t\t\tchanged, err := l.Correct(files[i], lic)\n\t\t\tif changed {\n\t\t\t\tvar file string\n\t\t\t\tif files[i][:2] == \".\/\" {\n\t\t\t\t\tfile = files[i][2:]\n\t\t\t\t} else {\n\t\t\t\t\tfile = files[i]\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Correcting '\" + file + \"'... Failure!\")\n\t\t\t\t\tallSuccess = false\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"Correcting '\" + file + \"'... Success!\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif allSuccess {\n\t\tfmt.Println(\"All files up to date!\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package netconf\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/flynn\/go-shlex\"\n\n\t\"github.com\/ryanuber\/go-glob\"\n\t\"github.com\/vishvananda\/netlink\"\n)\n\nconst (\n\tCONF = \"\/var\/lib\/rancher\/conf\"\n\tMODE = \"mode\"\n)\n\nvar (\n\tdefaultDhcpArgs = []string{\"dhcpcd\", \"-MA4\", \"-e\", \"force_hostname=true\"}\n)\n\nfunc createInterfaces(netCfg *NetworkConfig) {\n\tconfigured := map[string]bool{}\n\n\tfor name, iface := range netCfg.Interfaces {\n\t\tif iface.Bridge == \"true\" {\n\t\t\tif _, err := NewBridge(name); err != nil {\n\t\t\t\tlog.Errorf(\"Failed to create bridge %s: %v\", name, err)\n\t\t\t}\n\t\t} else if iface.Bridge != \"\" {\n\t\t\tif _, err := NewBridge(iface.Bridge); err != nil {\n\t\t\t\tlog.Errorf(\"Failed to create bridge %s: %v\", iface.Bridge, err)\n\t\t\t}\n\t\t} else if iface.Bond != \"\" {\n\t\t\tbond, err := Bond(iface.Bond)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Failed to create bond %s: %v\", iface.Bond, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !configured[iface.Bond] {\n\t\t\t\tif bondIface, ok := netCfg.Interfaces[iface.Bond]; ok {\n\t\t\t\t\t\/\/ Other settings depends on mode, so set it first\n\t\t\t\t\tif v, ok := bondIface.BondOpts[MODE]; ok {\n\t\t\t\t\t\tbond.Opt(MODE, v)\n\t\t\t\t\t}\n\n\t\t\t\t\tfor k, v := range bondIface.BondOpts {\n\t\t\t\t\t\tif k != MODE {\n\t\t\t\t\t\t\tbond.Opt(k, v)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tconfigured[iface.Bond] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc createSlaveInterfaces(netCfg *NetworkConfig) {\n\tlinks, err := netlink.LinkList()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to list links: %v\", err)\n\t\treturn\n\t}\n\n\tfor _, link := range links {\n\t\tmatch, ok := findMatch(link, netCfg)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tvlanDefs, err := ParseVlanDefinitions(match.Vlans)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to create vlans on device %s: %v\", link.Attrs().Name, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, vlanDef := range vlanDefs {\n\t\t\tif _, err = NewVlan(link, vlanDef.Name, vlanDef.Id); err != nil {\n\t\t\t\tlog.Errorf(\"Failed to create vlans on device %s, id %d: %v\", link.Attrs().Name, vlanDef.Id, err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc findMatch(link netlink.Link, netCfg *NetworkConfig) (InterfaceConfig, bool) {\n\tlinkName := link.Attrs().Name\n\tvar match InterfaceConfig\n\texactMatch := false\n\tfound := false\n\n\tfor key, netConf := range netCfg.Interfaces {\n\t\tif netConf.Match == \"\" {\n\t\t\tnetConf.Match = key\n\t\t}\n\n\t\tif netConf.Match == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif strings.HasPrefix(netConf.Match, \"mac\") {\n\t\t\thaAddr, err := net.ParseMAC(netConf.Match[4:])\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Failed to parse mac %s: %v\", netConf.Match[4:], err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Don't match mac address of the bond because it is the same as the slave\n\t\t\tif bytes.Compare(haAddr, link.Attrs().HardwareAddr) == 0 && link.Attrs().Name != netConf.Bond {\n\t\t\t\t\/\/ MAC address match is used over all other matches\n\t\t\t\treturn netConf, true\n\t\t\t}\n\t\t}\n\n\t\tif !exactMatch && glob.Glob(netConf.Match, linkName) {\n\t\t\tmatch = netConf\n\t\t\tfound = true\n\t\t}\n\n\t\tif netConf.Match == linkName {\n\t\t\t\/\/ Found exact match, use it over wildcard match\n\t\t\tmatch = netConf\n\t\t\texactMatch = true\n\t\t}\n\t}\n\n\treturn match, exactMatch || found\n}\n\nfunc populateDefault(netCfg *NetworkConfig) {\n\tif netCfg.Interfaces == nil {\n\t\tnetCfg.Interfaces = map[string]InterfaceConfig{}\n\t}\n\n\tif len(netCfg.Interfaces) == 0 {\n\t\tnetCfg.Interfaces[\"eth*\"] = InterfaceConfig{\n\t\t\tDHCP: true,\n\t\t}\n\t}\n\n\tif _, ok := netCfg.Interfaces[\"lo\"]; !ok {\n\t\tnetCfg.Interfaces[\"lo\"] = InterfaceConfig{\n\t\t\tAddress: \"127.0.0.1\/8\",\n\t\t}\n\t}\n}\n\nfunc ApplyNetworkConfigs(netCfg *NetworkConfig) error {\n\tpopulateDefault(netCfg)\n\n\tlog.Debugf(\"Config: %#v\", netCfg)\n\trunCmds(netCfg.PreCmds, \"\")\n\n\tcreateInterfaces(netCfg)\n\n\tcreateSlaveInterfaces(netCfg)\n\n\tlinks, err := netlink.LinkList()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdhcpLinks := map[string]string{}\n\n\t\/\/apply network config\n\tfor _, link := range links {\n\t\tlinkName := link.Attrs().Name\n\t\tif match, ok := findMatch(link, netCfg); ok {\n\t\t\tif match.DHCP {\n\t\t\t\tdhcpLinks[link.Attrs().Name] = match.DHCPArgs\n\t\t\t} else if err = applyInterfaceConfig(link, match); err != nil {\n\t\t\t\tlog.Errorf(\"Failed to apply settings to %s : %v\", linkName, err)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/run dhcp\n\twg := sync.WaitGroup{}\n\tfor iface, args := range dhcpLinks {\n\t\twg.Add(1)\n\t\tgo func(iface, args string) {\n\t\t\trunDhcp(iface, args)\n\t\t\twg.Done()\n\t\t}(iface, args)\n\t}\n\twg.Wait()\n\n\trunCmds(netCfg.PostCmds, \"\")\n\treturn err\n}\n\nfunc runDhcp(iface string, argstr string) {\n\tlog.Infof(\"Running DHCP on %s\", iface)\n\targs := []string{}\n\tif argstr != \"\" {\n\t\tvar err error\n\t\targs, err = shlex.Split(argstr)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to parse [%s]: %v\", argstr, err)\n\t\t}\n\t}\n\tif len(args) == 0 {\n\t\targs = defaultDhcpArgs\n\t}\n\n\targs = append(args, iface)\n\tcmd := exec.Command(args[0], args[1:]...)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\tlog.Error(err)\n\t}\n}\n\nfunc linkUp(link netlink.Link, netConf InterfaceConfig) error {\n\tif err := netlink.LinkSetUp(link); err != nil {\n\t\tlog.Errorf(\"failed to setup link: %v\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc applyAddress(address string, link netlink.Link, netConf InterfaceConfig) error {\n\taddr, err := netlink.ParseAddr(address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := netlink.AddrAdd(link, addr); err == syscall.EEXIST {\n\t\t\/\/Ignore this error\n\t} else if err != nil {\n\t\tlog.Errorf(\"addr add failed: %v\", err)\n\t} else {\n\t\tlog.Infof(\"Set %s on %s\", netConf.Address, link.Attrs().Name)\n\t}\n\n\treturn nil\n}\n\nfunc setGateway(gateway string) error {\n\tif gateway == \"\" {\n\t\treturn nil\n\t}\n\n\tgatewayIp := net.ParseIP(gateway)\n\tif gatewayIp == nil {\n\t\treturn errors.New(\"Invalid gateway address \" + gateway)\n\t}\n\n\troute := netlink.Route{\n\t\tScope: netlink.SCOPE_UNIVERSE,\n\t\tGw: gatewayIp,\n\t}\n\n\tif err := netlink.RouteAdd(&route); err == syscall.EEXIST {\n\t\t\/\/Ignore this error\n\t} else if err != nil {\n\t\tlog.Errorf(\"gateway set failed: %v\", err)\n\t\treturn err\n\t}\n\n\tlog.Infof(\"Set default gateway %s\", gateway)\n\treturn nil\n}\n\nfunc applyInterfaceConfig(link netlink.Link, netConf InterfaceConfig) error {\n\tif netConf.Bond != \"\" {\n\t\tb, err := Bond(netConf.Bond)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := b.AddSlave(link.Attrs().Name); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tif netConf.Bridge != \"\" && netConf.Bridge != \"true\" {\n\t\tb, err := NewBridge(netConf.Bridge)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := b.AddLink(link); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tif netConf.IPV4LL {\n\t\tif err := AssignLinkLocalIP(link); err != nil {\n\t\t\tlog.Errorf(\"IPV4LL set failed: %v\", err)\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\taddresses := []string{}\n\n\t\tif netConf.Address != \"\" {\n\t\t\taddresses = append(addresses, netConf.Address)\n\t\t}\n\n\t\tif len(netConf.Addresses) > 0 {\n\t\t\taddresses = append(addresses, netConf.Addresses...)\n\t\t}\n\n\t\tfor _, address := range addresses {\n\t\t\terr := applyAddress(address, link, netConf)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Failed to apply address %s to %s: %v\", address, link.Attrs().Name, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif netConf.MTU > 0 {\n\t\tif err := netlink.LinkSetMTU(link, netConf.MTU); err != nil {\n\t\t\tlog.Errorf(\"set MTU Failed: %v\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\trunCmds(netConf.PreUp, link.Attrs().Name)\n\n\tif err := linkUp(link, netConf); err != nil {\n\t\treturn err\n\t}\n\n\tif err := setGateway(netConf.Gateway); err != nil {\n\t\tlog.Errorf(\"Fail to set gateway %s\", netConf.Gateway)\n\t}\n\n\tif err := setGateway(netConf.GatewayIpv6); err != nil {\n\t\tlog.Errorf(\"Fail to set gateway %s\", netConf.Gateway)\n\t}\n\n\trunCmds(netConf.PostUp, link.Attrs().Name)\n\n\treturn nil\n}\n\nfunc runCmds(cmds []string, iface string) {\n\tfor _, cmd := range cmds {\n\t\tcmd = strings.TrimSpace(cmd)\n\t\tif cmd == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\targs, err := shlex.Split(strings.Replace(cmd, \"$iface\", iface, -1))\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to parse command [%s]: %v\", cmd, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Infof(\"Running command %s %v\", args[0], args[1:])\n\t\tcmd := exec.Command(args[0], args[1:]...)\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\t\tif err := cmd.Run(); err != nil {\n\t\t\tlog.Errorf(\"Failed to run command [%s]: %v\", cmd, err)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n<commit_msg>Bring interfaces down before applying bond configuration<commit_after>package netconf\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/flynn\/go-shlex\"\n\n\t\"github.com\/ryanuber\/go-glob\"\n\t\"github.com\/vishvananda\/netlink\"\n)\n\nconst (\n\tCONF = \"\/var\/lib\/rancher\/conf\"\n\tMODE = \"mode\"\n)\n\nvar (\n\tdefaultDhcpArgs = []string{\"dhcpcd\", \"-MA4\", \"-e\", \"force_hostname=true\"}\n)\n\nfunc createInterfaces(netCfg *NetworkConfig) {\n\tconfigured := map[string]bool{}\n\n\tfor name, iface := range netCfg.Interfaces {\n\t\tif iface.Bridge == \"true\" {\n\t\t\tif _, err := NewBridge(name); err != nil {\n\t\t\t\tlog.Errorf(\"Failed to create bridge %s: %v\", name, err)\n\t\t\t}\n\t\t} else if iface.Bridge != \"\" {\n\t\t\tif _, err := NewBridge(iface.Bridge); err != nil {\n\t\t\t\tlog.Errorf(\"Failed to create bridge %s: %v\", iface.Bridge, err)\n\t\t\t}\n\t\t} else if iface.Bond != \"\" {\n\t\t\tbond, err := Bond(iface.Bond)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Failed to create bond %s: %v\", iface.Bond, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !configured[iface.Bond] {\n\t\t\t\tif bondIface, ok := netCfg.Interfaces[iface.Bond]; ok {\n\t\t\t\t\t\/\/ Other settings depends on mode, so set it first\n\t\t\t\t\tif v, ok := bondIface.BondOpts[MODE]; ok {\n\t\t\t\t\t\tbond.Opt(MODE, v)\n\t\t\t\t\t}\n\n\t\t\t\t\tfor k, v := range bondIface.BondOpts {\n\t\t\t\t\t\tif k != MODE {\n\t\t\t\t\t\t\tbond.Opt(k, v)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tconfigured[iface.Bond] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc createSlaveInterfaces(netCfg *NetworkConfig) {\n\tlinks, err := netlink.LinkList()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to list links: %v\", err)\n\t\treturn\n\t}\n\n\tfor _, link := range links {\n\t\tmatch, ok := findMatch(link, netCfg)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tvlanDefs, err := ParseVlanDefinitions(match.Vlans)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to create vlans on device %s: %v\", link.Attrs().Name, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, vlanDef := range vlanDefs {\n\t\t\tif _, err = NewVlan(link, vlanDef.Name, vlanDef.Id); err != nil {\n\t\t\t\tlog.Errorf(\"Failed to create vlans on device %s, id %d: %v\", link.Attrs().Name, vlanDef.Id, err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc findMatch(link netlink.Link, netCfg *NetworkConfig) (InterfaceConfig, bool) {\n\tlinkName := link.Attrs().Name\n\tvar match InterfaceConfig\n\texactMatch := false\n\tfound := false\n\n\tfor key, netConf := range netCfg.Interfaces {\n\t\tif netConf.Match == \"\" {\n\t\t\tnetConf.Match = key\n\t\t}\n\n\t\tif netConf.Match == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif strings.HasPrefix(netConf.Match, \"mac\") {\n\t\t\thaAddr, err := net.ParseMAC(netConf.Match[4:])\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Failed to parse mac %s: %v\", netConf.Match[4:], err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Don't match mac address of the bond because it is the same as the slave\n\t\t\tif bytes.Compare(haAddr, link.Attrs().HardwareAddr) == 0 && link.Attrs().Name != netConf.Bond {\n\t\t\t\t\/\/ MAC address match is used over all other matches\n\t\t\t\treturn netConf, true\n\t\t\t}\n\t\t}\n\n\t\tif !exactMatch && glob.Glob(netConf.Match, linkName) {\n\t\t\tmatch = netConf\n\t\t\tfound = true\n\t\t}\n\n\t\tif netConf.Match == linkName {\n\t\t\t\/\/ Found exact match, use it over wildcard match\n\t\t\tmatch = netConf\n\t\t\texactMatch = true\n\t\t}\n\t}\n\n\treturn match, exactMatch || found\n}\n\nfunc populateDefault(netCfg *NetworkConfig) {\n\tif netCfg.Interfaces == nil {\n\t\tnetCfg.Interfaces = map[string]InterfaceConfig{}\n\t}\n\n\tif len(netCfg.Interfaces) == 0 {\n\t\tnetCfg.Interfaces[\"eth*\"] = InterfaceConfig{\n\t\t\tDHCP: true,\n\t\t}\n\t}\n\n\tif _, ok := netCfg.Interfaces[\"lo\"]; !ok {\n\t\tnetCfg.Interfaces[\"lo\"] = InterfaceConfig{\n\t\t\tAddress: \"127.0.0.1\/8\",\n\t\t}\n\t}\n}\n\nfunc ApplyNetworkConfigs(netCfg *NetworkConfig) error {\n\tpopulateDefault(netCfg)\n\n\tlog.Debugf(\"Config: %#v\", netCfg)\n\trunCmds(netCfg.PreCmds, \"\")\n\n\tcreateInterfaces(netCfg)\n\n\tcreateSlaveInterfaces(netCfg)\n\n\tlinks, err := netlink.LinkList()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdhcpLinks := map[string]string{}\n\n\t\/\/apply network config\n\tfor _, link := range links {\n\t\tlinkName := link.Attrs().Name\n\t\tif match, ok := findMatch(link, netCfg); ok {\n\t\t\tif match.DHCP {\n\t\t\t\tdhcpLinks[link.Attrs().Name] = match.DHCPArgs\n\t\t\t} else if err = applyInterfaceConfig(link, match); err != nil {\n\t\t\t\tlog.Errorf(\"Failed to apply settings to %s : %v\", linkName, err)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/run dhcp\n\twg := sync.WaitGroup{}\n\tfor iface, args := range dhcpLinks {\n\t\twg.Add(1)\n\t\tgo func(iface, args string) {\n\t\t\trunDhcp(iface, args)\n\t\t\twg.Done()\n\t\t}(iface, args)\n\t}\n\twg.Wait()\n\n\trunCmds(netCfg.PostCmds, \"\")\n\treturn err\n}\n\nfunc runDhcp(iface string, argstr string) {\n\tlog.Infof(\"Running DHCP on %s\", iface)\n\targs := []string{}\n\tif argstr != \"\" {\n\t\tvar err error\n\t\targs, err = shlex.Split(argstr)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to parse [%s]: %v\", argstr, err)\n\t\t}\n\t}\n\tif len(args) == 0 {\n\t\targs = defaultDhcpArgs\n\t}\n\n\targs = append(args, iface)\n\tcmd := exec.Command(args[0], args[1:]...)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\tlog.Error(err)\n\t}\n}\n\nfunc linkUp(link netlink.Link, netConf InterfaceConfig) error {\n\tif err := netlink.LinkSetUp(link); err != nil {\n\t\tlog.Errorf(\"failed to setup link: %v\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc applyAddress(address string, link netlink.Link, netConf InterfaceConfig) error {\n\taddr, err := netlink.ParseAddr(address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := netlink.AddrAdd(link, addr); err == syscall.EEXIST {\n\t\t\/\/Ignore this error\n\t} else if err != nil {\n\t\tlog.Errorf(\"addr add failed: %v\", err)\n\t} else {\n\t\tlog.Infof(\"Set %s on %s\", netConf.Address, link.Attrs().Name)\n\t}\n\n\treturn nil\n}\n\nfunc setGateway(gateway string) error {\n\tif gateway == \"\" {\n\t\treturn nil\n\t}\n\n\tgatewayIp := net.ParseIP(gateway)\n\tif gatewayIp == nil {\n\t\treturn errors.New(\"Invalid gateway address \" + gateway)\n\t}\n\n\troute := netlink.Route{\n\t\tScope: netlink.SCOPE_UNIVERSE,\n\t\tGw: gatewayIp,\n\t}\n\n\tif err := netlink.RouteAdd(&route); err == syscall.EEXIST {\n\t\t\/\/Ignore this error\n\t} else if err != nil {\n\t\tlog.Errorf(\"gateway set failed: %v\", err)\n\t\treturn err\n\t}\n\n\tlog.Infof(\"Set default gateway %s\", gateway)\n\treturn nil\n}\n\nfunc applyInterfaceConfig(link netlink.Link, netConf InterfaceConfig) error {\n\tif netConf.Bond != \"\" {\n\t\tif err := netlink.LinkSetDown(link); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tb, err := Bond(netConf.Bond)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := b.AddSlave(link.Attrs().Name); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tif netConf.Bridge != \"\" && netConf.Bridge != \"true\" {\n\t\tb, err := NewBridge(netConf.Bridge)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := b.AddLink(link); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tif netConf.IPV4LL {\n\t\tif err := AssignLinkLocalIP(link); err != nil {\n\t\t\tlog.Errorf(\"IPV4LL set failed: %v\", err)\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\taddresses := []string{}\n\n\t\tif netConf.Address != \"\" {\n\t\t\taddresses = append(addresses, netConf.Address)\n\t\t}\n\n\t\tif len(netConf.Addresses) > 0 {\n\t\t\taddresses = append(addresses, netConf.Addresses...)\n\t\t}\n\n\t\tfor _, address := range addresses {\n\t\t\terr := applyAddress(address, link, netConf)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Failed to apply address %s to %s: %v\", address, link.Attrs().Name, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif netConf.MTU > 0 {\n\t\tif err := netlink.LinkSetMTU(link, netConf.MTU); err != nil {\n\t\t\tlog.Errorf(\"set MTU Failed: %v\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\trunCmds(netConf.PreUp, link.Attrs().Name)\n\n\tif err := linkUp(link, netConf); err != nil {\n\t\treturn err\n\t}\n\n\tif err := setGateway(netConf.Gateway); err != nil {\n\t\tlog.Errorf(\"Fail to set gateway %s\", netConf.Gateway)\n\t}\n\n\tif err := setGateway(netConf.GatewayIpv6); err != nil {\n\t\tlog.Errorf(\"Fail to set gateway %s\", netConf.Gateway)\n\t}\n\n\trunCmds(netConf.PostUp, link.Attrs().Name)\n\n\treturn nil\n}\n\nfunc runCmds(cmds []string, iface string) {\n\tfor _, cmd := range cmds {\n\t\tcmd = strings.TrimSpace(cmd)\n\t\tif cmd == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\targs, err := shlex.Split(strings.Replace(cmd, \"$iface\", iface, -1))\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to parse command [%s]: %v\", cmd, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Infof(\"Running command %s %v\", args[0], args[1:])\n\t\tcmd := exec.Command(args[0], args[1:]...)\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\t\tif err := cmd.Run(); err != nil {\n\t\t\tlog.Errorf(\"Failed to run command [%s]: %v\", cmd, err)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\n\/\/ +build fuchsia windows\n\npackage ipc\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/google\/syzkaller\/pkg\/osutil\"\n\t\"github.com\/google\/syzkaller\/prog\"\n)\n\ntype Env struct {\n\tbin []string\n\tpid int\n\tconfig Config\n\n\tStatExecs uint64\n\tStatRestarts uint64\n}\n\nfunc MakeEnv(bin string, pid int, config Config) (*Env, error) {\n\tif config.Timeout < 7*time.Second {\n\t\tconfig.Timeout = 7 * time.Second\n\t}\n\tenv := &Env{\n\t\tbin: strings.Split(bin, \" \"),\n\t\tpid: pid,\n\t\tconfig: config,\n\t}\n\tif len(env.bin) == 0 {\n\t\treturn nil, fmt.Errorf(\"binary is empty string\")\n\t}\n\tif false {\n\t\tenv.bin[0] = osutil.Abs(env.bin[0])\n\t\tbase := filepath.Base(env.bin[0])\n\t\tpidStr := fmt.Sprint(pid)\n\t\tif len(base)+len(pidStr) >= 16 {\n\t\t\t\/\/ TASK_COMM_LEN is currently set to 16\n\t\t\tbase = base[:15-len(pidStr)]\n\t\t}\n\t\tbinCopy := filepath.Join(filepath.Dir(env.bin[0]), base+pidStr)\n\t\tif err := os.Link(env.bin[0], binCopy); err == nil {\n\t\t\tenv.bin[0] = binCopy\n\t\t}\n\t}\n\treturn env, nil\n}\n\nfunc (env *Env) Close() error {\n\treturn nil\n}\n\nfunc (env *Env) Exec(opts *ExecOpts, p *prog.Prog) (output []byte, info []CallInfo, failed, hanged bool, err0 error) {\n\tdir, err := ioutil.TempDir(\".\/\", \"syzkaller-testdir\")\n\tif err != nil {\n\t\terr0 = fmt.Errorf(\"failed to create temp dir: %v\", err)\n\t\treturn\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tdata := make([]byte, prog.ExecBufferSize)\n\tif err := p.SerializeForExec(data, env.pid); err != nil {\n\t\terr0 = err\n\t\treturn\n\t}\n\tinbuf := new(bytes.Buffer)\n\tbinary.Write(inbuf, binary.LittleEndian, uint64(env.config.Flags))\n\tbinary.Write(inbuf, binary.LittleEndian, uint64(opts.Flags))\n\tbinary.Write(inbuf, binary.LittleEndian, uint64(env.pid))\n\tinbuf.Write(data)\n\n\tcmd := exec.Command(env.bin[0], env.bin[1:]...)\n\tcmd.Env = []string{}\n\tcmd.Dir = dir\n\tcmd.Stdin = inbuf\n\tif env.config.Flags&FlagDebug != 0 {\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stdout\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\terr0 = err\n\t\treturn\n\t}\n\tdone := make(chan error)\n\tgo func() {\n\t\tdone <- cmd.Wait()\n\t}()\n\tt := time.NewTimer(env.config.Timeout)\n\tselect {\n\tcase <-done:\n\t\tt.Stop()\n\tcase <-t.C:\n\t\tcmd.Process.Kill()\n\t\t<-done\n\t}\n\treturn\n}\n<commit_msg>pkg\/ipc: fix windows<commit_after>\/\/ Copyright 2017 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\n\/\/ +build fuchsia windows\n\npackage ipc\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/google\/syzkaller\/pkg\/osutil\"\n\t\"github.com\/google\/syzkaller\/prog\"\n)\n\ntype Env struct {\n\tbin []string\n\tpid int\n\tconfig Config\n\n\tStatExecs uint64\n\tStatRestarts uint64\n}\n\nfunc MakeEnv(bin string, pid int, config Config) (*Env, error) {\n\tif config.Timeout < 7*time.Second {\n\t\tconfig.Timeout = 7 * time.Second\n\t}\n\tenv := &Env{\n\t\tbin: strings.Split(bin, \" \"),\n\t\tpid: pid,\n\t\tconfig: config,\n\t}\n\tif len(env.bin) == 0 {\n\t\treturn nil, fmt.Errorf(\"binary is empty string\")\n\t}\n\tif runtime.GOOS != \"fuchsia\" {\n\t\tenv.bin[0] = osutil.Abs(env.bin[0])\n\t\tbase := filepath.Base(env.bin[0])\n\t\tpidStr := fmt.Sprint(pid)\n\t\tif len(base)+len(pidStr) >= 16 {\n\t\t\t\/\/ TASK_COMM_LEN is currently set to 16\n\t\t\tbase = base[:15-len(pidStr)]\n\t\t}\n\t\tbinCopy := filepath.Join(filepath.Dir(env.bin[0]), base+pidStr)\n\t\tif err := os.Link(env.bin[0], binCopy); err == nil {\n\t\t\tenv.bin[0] = binCopy\n\t\t}\n\t}\n\treturn env, nil\n}\n\nfunc (env *Env) Close() error {\n\treturn nil\n}\n\nfunc (env *Env) Exec(opts *ExecOpts, p *prog.Prog) (output []byte, info []CallInfo, failed, hanged bool, err0 error) {\n\tatomic.AddUint64(&env.StatExecs, 1)\n\tdir, err := ioutil.TempDir(\".\/\", \"syzkaller-testdir\")\n\tif err != nil {\n\t\terr0 = fmt.Errorf(\"failed to create temp dir: %v\", err)\n\t\treturn\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tdata := make([]byte, prog.ExecBufferSize)\n\tif err := p.SerializeForExec(data, env.pid); err != nil {\n\t\terr0 = err\n\t\treturn\n\t}\n\tinbuf := new(bytes.Buffer)\n\tbinary.Write(inbuf, binary.LittleEndian, uint64(env.config.Flags))\n\tbinary.Write(inbuf, binary.LittleEndian, uint64(opts.Flags))\n\tbinary.Write(inbuf, binary.LittleEndian, uint64(env.pid))\n\tinbuf.Write(data)\n\n\tcmd := exec.Command(env.bin[0], env.bin[1:]...)\n\tcmd.Env = []string{}\n\tcmd.Dir = dir\n\tcmd.Stdin = inbuf\n\tif env.config.Flags&FlagDebug != 0 {\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stdout\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\terr0 = err\n\t\treturn\n\t}\n\tdone := make(chan error)\n\tgo func() {\n\t\tdone <- cmd.Wait()\n\t}()\n\tt := time.NewTimer(env.config.Timeout)\n\tselect {\n\tcase <-done:\n\t\tt.Stop()\n\tcase <-t.C:\n\t\tcmd.Process.Kill()\n\t\t<-done\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018, The GoPacket Authors, All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by a BSD-style license\n\/\/ that can be found in the LICENSE file in the root of the source\n\/\/ tree.\n\/\/\n\/\/******************************************************************************\n\npackage layers\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"github.com\/google\/gopacket\"\n)\n\n\/\/******************************************************************************\n\/\/\n\/\/ ModbusTCP Decoding Layer\n\/\/ ------------------------------------------\n\/\/ This file provides a GoPacket decoding layer for ModbusTCP.\n\/\/\n\/\/******************************************************************************\n\nconst mbapRecordSizeInBytes int = 7\nconst modbusPDUMinimumRecordSizeInBytes int = 2\nconst modbusPDUMaximumRecordSizeInBytes int = 253\n\n\/\/******************************************************************************\n\n\/\/ ModbusTCP Type\n\/\/ --------\n\/\/ Type ModbusTCP implements the DecodingLayer interface. Each ModbusTCP object\n\/\/ represents in a structured form the MODBUS Application Protocol header (MBAP) record present as the TCP\n\/\/ payload in an ModbusTCP TCP packet.\n\/\/\ntype ModbusTCP struct {\n\tBaseLayer \/\/ Stores the packet bytes and payload (Modbus PDU) bytes .\n\n\tTransactionIdentifier uint16 \/\/ Identification of a MODBUS Request\/Response transaction\n\tProtocolIdentifier uint16 \/\/ 0 = MODBUS protocol\n\tLength uint16 \/\/ Number of following bytes (includes 1 byte for UnitIdentifier + Modbus data length\n\tUnitIdentifier uint8 \/\/ Identification of a remote slave connected on a serial line or on other buses\n}\n\n\/\/******************************************************************************\n\n\/\/ LayerType returns the layer type of the ModbusTCP object, which is LayerTypeModbusTCP.\nfunc (d *ModbusTCP) LayerType() gopacket.LayerType {\n\treturn LayerTypeModbusTCP\n}\n\n\/\/******************************************************************************\n\n\/\/ decodeModbusTCP analyses a byte slice and attempts to decode it as an ModbusTCP\n\/\/ record of a TCP packet.\n\/\/\n\/\/ If it succeeds, it loads p with information about the packet and returns nil.\n\/\/ If it fails, it returns an error (non nil).\n\/\/\n\/\/ This function is employed in layertypes.go to register the ModbusTCP layer.\nfunc decodeModbusTCP(data []byte, p gopacket.PacketBuilder) error {\n\n\t\/\/ Attempt to decode the byte slice.\n\td := &ModbusTCP{}\n\terr := d.DecodeFromBytes(data, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ If the decoding worked, add the layer to the packet and set it\n\t\/\/ as the application layer too, if there isn't already one.\n\tp.AddLayer(d)\n\tp.SetApplicationLayer(d)\n\n\treturn p.NextDecoder(d.NextLayerType())\n\n}\n\n\/\/******************************************************************************\n\n\/\/ DecodeFromBytes analyses a byte slice and attempts to decode it as an ModbusTCP\n\/\/ record of a TCP packet.\n\/\/\n\/\/ Upon succeeds, it loads the ModbusTCP object with information about the packet\n\/\/ and returns nil.\n\/\/ Upon failure, it returns an error (non nil).\nfunc (d *ModbusTCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {\n\n\t\/\/ If the data block is too short to be a MBAP record, then return an error.\n\tif len(data) < mbapRecordSizeInBytes+modbusPDUMinimumRecordSizeInBytes {\n\t\tdf.SetTruncated()\n\t\treturn errors.New(\"ModbusTCP packet too short\")\n\t}\n\n\tif len(data) > mbapRecordSizeInBytes+modbusPDUMaximumRecordSizeInBytes {\n\t\tdf.SetTruncated()\n\t\treturn errors.New(\"ModbusTCP packet too long\")\n\t}\n\n\t\/\/ ModbusTCP type embeds type BaseLayer which contains two fields:\n\t\/\/ Contents is supposed to contain the bytes of the data at this level (MPBA).\n\t\/\/ Payload is supposed to contain the payload of this level (PDU).\n\td.BaseLayer = BaseLayer{Contents: data[:mbapRecordSizeInBytes], Payload: data[mbapRecordSizeInBytes:len(data)]}\n\n\t\/\/ Extract the fields from the block of bytes.\n\t\/\/ The fields can just be copied in big endian order.\n\td.TransactionIdentifier = binary.BigEndian.Uint16(data[:2])\n\td.ProtocolIdentifier = binary.BigEndian.Uint16(data[2:4])\n\td.Length = binary.BigEndian.Uint16(data[4:6])\n\n\t\/\/ Length should have the size of the payload plus one byte (size of UnitIdentifier)\n\tif d.Length != uint16(len(d.BaseLayer.Payload)+1) {\n\t\tdf.SetTruncated()\n\t\treturn errors.New(\"ModbusTCP packet with wrong field value (Length)\")\n\t}\n\td.UnitIdentifier = uint8(data[6])\n\n\t\/\/ Return no error.\n\treturn nil\n}\n\n\/\/******************************************************************************\n\n\/\/ NextLayerType returns the layer type of the ModbusTCP payload, which is LayerTypePayload.\nfunc (d *ModbusTCP) NextLayerType() gopacket.LayerType {\n\treturn gopacket.LayerTypePayload\n}\n\n\/\/******************************************************************************\n\n\/\/ Payload returns Modbus Protocol Data Unit (PDU) composed by Function Code and Data, it is carried within ModbusTCP packets\nfunc (d *ModbusTCP) Payload() []byte {\n\treturn d.BaseLayer.Payload\n}\n\n\/\/******************************************************************************\n\/\/* End Of ModbusTCP File *\n\/\/******************************************************************************\n<commit_msg>ModbusProtocol enum added and minor comments corrections<commit_after>\/\/ Copyright 2018, The GoPacket Authors, All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by a BSD-style license\n\/\/ that can be found in the LICENSE file in the root of the source\n\/\/ tree.\n\/\/\n\/\/******************************************************************************\n\npackage layers\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"github.com\/google\/gopacket\"\n)\n\n\/\/******************************************************************************\n\/\/\n\/\/ ModbusTCP Decoding Layer\n\/\/ ------------------------------------------\n\/\/ This file provides a GoPacket decoding layer for ModbusTCP.\n\/\/\n\/\/******************************************************************************\n\nconst mbapRecordSizeInBytes int = 7\nconst modbusPDUMinimumRecordSizeInBytes int = 2\nconst modbusPDUMaximumRecordSizeInBytes int = 253\n\n\/\/ ModbusProtocol type\ntype ModbusProtocol uint16\n\nconst (\n\tModbusProtocolModbus ModbusProtocol = 0\n)\n\n\/\/******************************************************************************\n\n\/\/ ModbusTCP Type\n\/\/ --------\n\/\/ Type ModbusTCP implements the DecodingLayer interface. Each ModbusTCP object\n\/\/ represents in a structured form the MODBUS Application Protocol header (MBAP) record present as the TCP\n\/\/ payload in an ModbusTCP TCP packet.\n\/\/\ntype ModbusTCP struct {\n\tBaseLayer \/\/ Stores the packet bytes and payload (Modbus PDU) bytes .\n\n\tTransactionIdentifier uint16 \/\/ Identification of a MODBUS Request\/Response transaction\n\tProtocolIdentifier ModbusProtocol \/\/ It is used for intra-system multiplexing\n\tLength uint16 \/\/ Number of following bytes (includes 1 byte for UnitIdentifier + Modbus data length\n\tUnitIdentifier uint8 \/\/ Identification of a remote slave connected on a serial line or on other buses\n}\n\n\/\/******************************************************************************\n\n\/\/ LayerType returns the layer type of the ModbusTCP object, which is LayerTypeModbusTCP.\nfunc (d *ModbusTCP) LayerType() gopacket.LayerType {\n\treturn LayerTypeModbusTCP\n}\n\n\/\/******************************************************************************\n\n\/\/ decodeModbusTCP analyses a byte slice and attempts to decode it as an ModbusTCP\n\/\/ record of a TCP packet.\n\/\/\n\/\/ If it succeeds, it loads p with information about the packet and returns nil.\n\/\/ If it fails, it returns an error (non nil).\n\/\/\n\/\/ This function is employed in layertypes.go to register the ModbusTCP layer.\nfunc decodeModbusTCP(data []byte, p gopacket.PacketBuilder) error {\n\n\t\/\/ Attempt to decode the byte slice.\n\td := &ModbusTCP{}\n\terr := d.DecodeFromBytes(data, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ If the decoding worked, add the layer to the packet and set it\n\t\/\/ as the application layer too, if there isn't already one.\n\tp.AddLayer(d)\n\tp.SetApplicationLayer(d)\n\n\treturn p.NextDecoder(d.NextLayerType())\n\n}\n\n\/\/******************************************************************************\n\n\/\/ DecodeFromBytes analyses a byte slice and attempts to decode it as an ModbusTCP\n\/\/ record of a TCP packet.\n\/\/\n\/\/ Upon succeeds, it loads the ModbusTCP object with information about the packet\n\/\/ and returns nil.\n\/\/ Upon failure, it returns an error (non nil).\nfunc (d *ModbusTCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {\n\n\t\/\/ If the data block is too short to be a MBAP record, then return an error.\n\tif len(data) < mbapRecordSizeInBytes+modbusPDUMinimumRecordSizeInBytes {\n\t\tdf.SetTruncated()\n\t\treturn errors.New(\"ModbusTCP packet too short\")\n\t}\n\n\tif len(data) > mbapRecordSizeInBytes+modbusPDUMaximumRecordSizeInBytes {\n\t\tdf.SetTruncated()\n\t\treturn errors.New(\"ModbusTCP packet too long\")\n\t}\n\n\t\/\/ ModbusTCP type embeds type BaseLayer which contains two fields:\n\t\/\/ Contents is supposed to contain the bytes of the data at this level (MPBA).\n\t\/\/ Payload is supposed to contain the payload of this level (PDU).\n\td.BaseLayer = BaseLayer{Contents: data[:mbapRecordSizeInBytes], Payload: data[mbapRecordSizeInBytes:len(data)]}\n\n\t\/\/ Extract the fields from the block of bytes.\n\t\/\/ The fields can just be copied in big endian order.\n\td.TransactionIdentifier = binary.BigEndian.Uint16(data[:2])\n\td.ProtocolIdentifier = ModbusProtocol(binary.BigEndian.Uint16(data[2:4]))\n\td.Length = binary.BigEndian.Uint16(data[4:6])\n\n\t\/\/ Length should have the size of the payload plus one byte (size of UnitIdentifier)\n\tif d.Length != uint16(len(d.BaseLayer.Payload)+1) {\n\t\tdf.SetTruncated()\n\t\treturn errors.New(\"ModbusTCP packet with wrong field value (Length)\")\n\t}\n\td.UnitIdentifier = uint8(data[6])\n\n\treturn nil\n}\n\n\/\/******************************************************************************\n\n\/\/ NextLayerType returns the layer type of the ModbusTCP payload, which is LayerTypePayload.\nfunc (d *ModbusTCP) NextLayerType() gopacket.LayerType {\n\treturn gopacket.LayerTypePayload\n}\n\n\/\/******************************************************************************\n\n\/\/ Payload returns Modbus Protocol Data Unit (PDU) composed by Function Code and Data, it is carried within ModbusTCP packets\nfunc (d *ModbusTCP) Payload() []byte {\n\treturn d.BaseLayer.Payload\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/HawkMachine\/kodi_go_api\/v6\/kodi\"\n\n\tplaylist_builder \"github.com\/HawkMachine\/kodi_automation\/kodi\/playlist\/builder\"\n)\n\nvar (\n\tusername = flag.String(\"u\", \"\", \"Kodi username\")\n\tpassword = flag.String(\"p\", \"\", \"Kodi password\")\n\taddress = flag.String(\"a\", \"http:\/\/192.168.0.200:9080\", \"Kodi address\")\n\n\tsize = flag.Int(\"s\", 5, \"Number of episodes.\")\n\tshows = flag.String(\"shows\", \"\", \"Comma-separated list of tv shows names.\")\n\n\tdry_run = flag.Bool(\"dry_run\", true, \"Dry run mode - only print the playlist\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tif *address == \"\" || *username == \"\" || *password == \"\" {\n\t\tlog.Fatalf(\"Address, user or password is missing: %s, %s, %s\", *address, *username, *password)\n\t}\n\n\tk := kodi.New(*address+\"\/jsonrpc\", *username, *password)\n\n\ttvShowNames := strings.Split(*shows, \",\")\n\tepisodes, err := playlist_builder.GetUnwatchedPlaylist(k, tvShowNames, *size)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor _, ep := range episodes {\n\t\tfmt.Printf(\"%20s : S%2dE%2d : %q\\n\", ep.ShowTitle, ep.Season, ep.Episode, ep.Title)\n\t}\n}\n<commit_msg>Add items to playlist if not in dry run mode<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/HawkMachine\/kodi_go_api\/v6\/kodi\"\n\n\tplaylist_builder \"github.com\/HawkMachine\/kodi_automation\/kodi\/playlist\/builder\"\n)\n\nvar (\n\tusername = flag.String(\"u\", \"\", \"Kodi username\")\n\tpassword = flag.String(\"p\", \"\", \"Kodi password\")\n\taddress = flag.String(\"a\", \"http:\/\/192.168.0.200:9080\", \"Kodi address\")\n\n\tsize = flag.Int(\"s\", 5, \"Number of episodes.\")\n\tshows = flag.String(\"shows\", \"\", \"Comma-separated list of tv shows names.\")\n\n\tdryRun = flag.Bool(\"dry_run\", true, \"Dry run mode - only print the playlist\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tif *address == \"\" || *username == \"\" || *password == \"\" {\n\t\tlog.Fatalf(\"Address, user or password is missing: %s, %s, %s\", *address, *username, *password)\n\t}\n\n\tk := kodi.New(*address+\"\/jsonrpc\", *username, *password)\n\n\ttvShowNames := strings.Split(*shows, \",\")\n\tepisodes, err := playlist_builder.GetUnwatchedPlaylist(k, tvShowNames, *size)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor _, ep := range episodes {\n\t\tfmt.Printf(\"%20s : S%2dE%2d : %q\\n\", ep.ShowTitle, ep.Season, ep.Episode, ep.Title)\n\t}\n\n\tif *dryRun {\n\t\treturn\n\t}\n\n\tfmt.Println(\"==== Submitting to kodi ===\")\n\n\tfor _, ep := range episodes {\n\t\tresp, err := k.Playlist.Add(&kodi.PlaylistAddParams{\n\t\t\tPlaylistId: kodi.PLAYLIST_VIDEO_ID,\n\t\t\tItem: kodi.PlaylistItem{\n\t\t\t\tEpisodeId: &ep.EpisodeId,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\t\tif resp.Error != nil {\n\t\t\tlog.Fatal(resp.Error)\n\t\t}\n\t\tfmt.Printf(\"%20s : S%2dE%2d : %q : %s\\n\", ep.ShowTitle, ep.Season, ep.Episode, ep.Title, *resp.Result)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"text\/tabwriter\"\n\t\"time\"\n\n\t\"github.com\/coreos\/updateservicectl\/Godeps\/_workspace\/src\/code.google.com\/p\/go-uuid\/uuid\"\n\t\"github.com\/coreos\/updateservicectl\/Godeps\/_workspace\/src\/github.com\/coreos\/go-omaha\/omaha\"\n\n\t\"github.com\/coreos\/updateservicectl\/client\/update\/v1\"\n)\n\nvar (\n\twatchFlags struct {\n\t\tinterval int\n\t\tversion string\n\t\tappId StringFlag\n\t\tgroupId StringFlag\n\t\tclientId string\n\t}\n\tcmdWatch = &Command{\n\t\tName: \"watch\",\n\t\tUsage: \"[OPTION]... <cmd> <args>\",\n\t\tSummary: `Watch for app versions and exec a given command.`,\n\t\tRun: watch,\n\t}\n)\n\nfunc init() {\n\tcmdWatch.Flags.IntVar(&watchFlags.interval, \"interval\", 1, \"Update polling interval\")\n\tcmdWatch.Flags.StringVar(&watchFlags.version, \"version\", \"0.0.0\", \"Starting version number\")\n\tcmdWatch.Flags.Var(&watchFlags.appId, \"app-id\", \"Application to watch.\")\n\tcmdWatch.Flags.Var(&watchFlags.groupId, \"group-id\", \"Group of application to subscribe to.\")\n\tcmdWatch.Flags.StringVar(&watchFlags.clientId, \"client-id\", \"\", \"Client id to report ad. If not provided a random UUID will be generated.\")\n}\n\nfunc fetchUpdateCheck(server string, appID string, groupID string, clientID string, version string, debug bool) (*omaha.UpdateCheck, error) {\n\tclient := &http.Client{}\n\n\t\/\/ TODO: Fill out the OS field correctly based on \/etc\/os-release\n\trequest := omaha.NewRequest(\"lsb\", \"CoreOS\", \"\", \"\")\n\tapp := request.AddApp(fmt.Sprintf(\"{%s}\", appID), version)\n\tapp.AddUpdateCheck()\n\tapp.MachineID = clientID\n\tapp.BootId = uuid.New()\n\tapp.Track = groupID\n\n\tevent := app.AddEvent()\n\tevent.Type = \"1\"\n\tevent.Result = \"0\"\n\n\traw, err := xml.MarshalIndent(request, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn nil, err\n\t}\n\n\tif debug {\n\t\tfmt.Fprintf(os.Stderr, \"Request: %s%s\\n\", xml.Header, raw)\n\t}\n\n\tresp, err := client.Post(server+\"\/v1\/update\/\", \"text\/xml\", bytes.NewReader(raw))\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn nil, err\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn nil, err\n\t}\n\n\tif debug {\n\t\tfmt.Fprintf(os.Stderr, \"Response: %s%s\\n\", xml.Header, string(body))\n\t}\n\n\toresp := &omaha.Response{}\n\terr = xml.Unmarshal(body, oresp)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn nil, err\n\t}\n\n\treturn oresp.Apps[0].UpdateCheck, nil\n}\n\nfunc prepareEnvironment(appID string, version string, oldVersion string, updateCheck *omaha.UpdateCheck) []string {\n\tenv := os.Environ()\n\tenv = append(env, \"UPDATE_SERVICE_VERSION=\"+version)\n\tif oldVersion != \"\" {\n\t\tenv = append(env, \"UPDATE_SERVICE_OLD_VERSION=\"+oldVersion)\n\t}\n\tenv = append(env, \"UPDATE_SERVICE_APP_ID=\"+appID)\n\n\tif updateCheck.Status == \"ok\" {\n\t\turl, err := url.Parse(updateCheck.Urls.Urls[0].CodeBase)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\turl.Path = path.Join(url.Path, updateCheck.Manifest.Packages.Packages[0].Name)\n\t\tenv = append(env, \"UPDATE_SERVICE_URL=\"+url.String())\n\t}\n\treturn env\n}\n\nfunc runCmd(cmdName string, args []string, appID string, version string, oldVersion string, updateCheck *omaha.UpdateCheck) {\n\tcmd := exec.Command(cmdName, args...)\n\tcmd.Env = prepareEnvironment(appID, version, oldVersion, updateCheck)\n\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n\terr = cmd.Start()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n\tgo io.Copy(os.Stdout, stdout)\n\tgo io.Copy(os.Stderr, stderr)\n\tcmd.Wait()\n}\n\nfunc watch(args []string, service *update.Service, out *tabwriter.Writer) int {\n\ttick := time.NewTicker(time.Second * time.Duration(watchFlags.interval))\n\tserver := globalFlags.Server\n\tdebug := globalFlags.Debug\n\tversion := watchFlags.version\n\n\tif watchFlags.appId.Get() == nil || watchFlags.groupId.Get() == nil {\n\t\treturn ERROR_USAGE\n\t}\n\n\tif len(args) == 0 {\n\t\treturn ERROR_USAGE\n\t}\n\n\tappId := watchFlags.appId.String()\n\tgroupId := watchFlags.groupId.String()\n\tclientId := watchFlags.clientId\n\n\tif clientId == \"\" {\n\t\tclientId = uuid.New()\n\t}\n\n\t\/\/ initial check\n\tupdateCheck, err := fetchUpdateCheck(server, appId, groupId, clientId, version, debug)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n\trunCmd(args[0], args[1:], appId, version, \"\", updateCheck)\n\n\tfor {\n\t\tselect {\n\t\tcase <-tick.C:\n\n\t\t\tupdateCheck, err := fetchUpdateCheck(server, appId, groupId, clientId, version, debug)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"warning: update check failed (%v)\\n\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif updateCheck.Status == \"noupdate\" {\n\t\t\t\tcontinue\n\t\t\t} else if updateCheck.Status == \"error-version\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tnewVersion := updateCheck.Manifest.Version\n\n\t\t\tif newVersion != version {\n\t\t\t\trunCmd(args[0], args[1:], appId, newVersion, version, updateCheck)\n\t\t\t}\n\t\t\tversion = newVersion\n\t\t}\n\t}\n\n\ttick.Stop()\n\treturn OK\n}\n<commit_msg>guard initial watch run<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"text\/tabwriter\"\n\t\"time\"\n\n\t\"github.com\/coreos\/updateservicectl\/Godeps\/_workspace\/src\/code.google.com\/p\/go-uuid\/uuid\"\n\t\"github.com\/coreos\/updateservicectl\/Godeps\/_workspace\/src\/github.com\/coreos\/go-omaha\/omaha\"\n\n\t\"github.com\/coreos\/updateservicectl\/client\/update\/v1\"\n)\n\nvar (\n\twatchFlags struct {\n\t\tinterval int\n\t\tversion string\n\t\tappId StringFlag\n\t\tgroupId StringFlag\n\t\tclientId string\n\t}\n\tcmdWatch = &Command{\n\t\tName: \"watch\",\n\t\tUsage: \"[OPTION]... <cmd> <args>\",\n\t\tSummary: `Watch for app versions and exec a given command.`,\n\t\tRun: watch,\n\t}\n)\n\nfunc init() {\n\tcmdWatch.Flags.IntVar(&watchFlags.interval, \"interval\", 1, \"Update polling interval\")\n\tcmdWatch.Flags.StringVar(&watchFlags.version, \"version\", \"0.0.0\", \"Starting version number\")\n\tcmdWatch.Flags.Var(&watchFlags.appId, \"app-id\", \"Application to watch.\")\n\tcmdWatch.Flags.Var(&watchFlags.groupId, \"group-id\", \"Group of application to subscribe to.\")\n\tcmdWatch.Flags.StringVar(&watchFlags.clientId, \"client-id\", \"\", \"Client id to report ad. If not provided a random UUID will be generated.\")\n}\n\nfunc fetchUpdateCheck(server string, appID string, groupID string, clientID string, version string, debug bool) (*omaha.UpdateCheck, error) {\n\tclient := &http.Client{}\n\n\t\/\/ TODO: Fill out the OS field correctly based on \/etc\/os-release\n\trequest := omaha.NewRequest(\"lsb\", \"CoreOS\", \"\", \"\")\n\tapp := request.AddApp(fmt.Sprintf(\"{%s}\", appID), version)\n\tapp.AddUpdateCheck()\n\tapp.MachineID = clientID\n\tapp.BootId = uuid.New()\n\tapp.Track = groupID\n\n\tevent := app.AddEvent()\n\tevent.Type = \"1\"\n\tevent.Result = \"0\"\n\n\traw, err := xml.MarshalIndent(request, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn nil, err\n\t}\n\n\tif debug {\n\t\tfmt.Fprintf(os.Stderr, \"Request: %s%s\\n\", xml.Header, raw)\n\t}\n\n\tresp, err := client.Post(server+\"\/v1\/update\/\", \"text\/xml\", bytes.NewReader(raw))\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn nil, err\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn nil, err\n\t}\n\n\tif debug {\n\t\tfmt.Fprintf(os.Stderr, \"Response: %s%s\\n\", xml.Header, string(body))\n\t}\n\n\toresp := &omaha.Response{}\n\terr = xml.Unmarshal(body, oresp)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn nil, err\n\t}\n\n\treturn oresp.Apps[0].UpdateCheck, nil\n}\n\nfunc prepareEnvironment(appID string, version string, oldVersion string, updateCheck *omaha.UpdateCheck) []string {\n\tenv := os.Environ()\n\tenv = append(env, \"UPDATE_SERVICE_VERSION=\"+version)\n\tif oldVersion != \"\" {\n\t\tenv = append(env, \"UPDATE_SERVICE_OLD_VERSION=\"+oldVersion)\n\t}\n\tenv = append(env, \"UPDATE_SERVICE_APP_ID=\"+appID)\n\n\tif updateCheck.Status == \"ok\" {\n\t\turl, err := url.Parse(updateCheck.Urls.Urls[0].CodeBase)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\turl.Path = path.Join(url.Path, updateCheck.Manifest.Packages.Packages[0].Name)\n\t\tenv = append(env, \"UPDATE_SERVICE_URL=\"+url.String())\n\t}\n\treturn env\n}\n\nfunc runCmd(cmdName string, args []string, appID string, version string, oldVersion string, updateCheck *omaha.UpdateCheck) {\n\tcmd := exec.Command(cmdName, args...)\n\tcmd.Env = prepareEnvironment(appID, version, oldVersion, updateCheck)\n\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n\terr = cmd.Start()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n\tgo io.Copy(os.Stdout, stdout)\n\tgo io.Copy(os.Stderr, stderr)\n\tcmd.Wait()\n}\n\nfunc watch(args []string, service *update.Service, out *tabwriter.Writer) int {\n\ttick := time.NewTicker(time.Second * time.Duration(watchFlags.interval))\n\tserver := globalFlags.Server\n\tdebug := globalFlags.Debug\n\tversion := watchFlags.version\n\n\tif watchFlags.appId.Get() == nil || watchFlags.groupId.Get() == nil {\n\t\treturn ERROR_USAGE\n\t}\n\n\tif len(args) == 0 {\n\t\treturn ERROR_USAGE\n\t}\n\n\tappId := watchFlags.appId.String()\n\tgroupId := watchFlags.groupId.String()\n\tclientId := watchFlags.clientId\n\n\tif clientId == \"\" {\n\t\tclientId = uuid.New()\n\t}\n\n\t\/\/ initial check\n\tupdateCheck, err := fetchUpdateCheck(server, appId, groupId, clientId, version, debug)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tif updateCheck.Status != \"noupdate\" && updateCheck.Status == \"error-version\" {\n\t\trunCmd(args[0], args[1:], appId, version, \"\", updateCheck)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-tick.C:\n\n\t\t\tupdateCheck, err := fetchUpdateCheck(server, appId, groupId, clientId, version, debug)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"warning: update check failed (%v)\\n\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif updateCheck.Status == \"noupdate\" {\n\t\t\t\tcontinue\n\t\t\t} else if updateCheck.Status == \"error-version\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tnewVersion := updateCheck.Manifest.Version\n\n\t\t\tif newVersion != version {\n\t\t\t\trunCmd(args[0], args[1:], appId, newVersion, version, updateCheck)\n\t\t\t}\n\t\t\tversion = newVersion\n\t\t}\n\t}\n\n\ttick.Stop()\n\treturn OK\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/howeyc\/fsnotify\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\tcmd *exec.Cmd\n\tstate sync.Mutex\n\teventTime = make(map[string]time.Time)\n)\n\nfunc NewWatcher(paths []string) {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase e := <-watcher.Event:\n\t\t\t\tisbuild := true\n\t\t\t\tif t, ok := eventTime[e.String()]; ok {\n\t\t\t\t\t\/\/ if 500ms change many times, then ignore it.\n\t\t\t\t\t\/\/ for liteide often gofmt code after save.\n\t\t\t\t\tif t.Add(time.Millisecond * 500).After(time.Now()) {\n\t\t\t\t\t\tisbuild = false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\teventTime[e.String()] = time.Now()\n\n\t\t\t\tif isbuild {\n\t\t\t\t\tfmt.Println(e)\n\t\t\t\t\tgo Autobuild()\n\t\t\t\t}\n\t\t\tcase err := <-watcher.Error:\n\t\t\t\tlog.Fatal(\"error:\", err)\n\t\t\t}\n\t\t}\n\t}()\n\tfor _, path := range paths {\n\t\tfmt.Println(path)\n\t\terr = watcher.Watch(path)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n}\n\nfunc Autobuild() {\n\tstate.Lock()\n\tdefer state.Unlock()\n\n\tfmt.Println(\"start autobuild\")\n\tpath, _ := os.Getwd()\n\tos.Chdir(path)\n\tbcmd := exec.Command(\"go\", \"build\")\n\tbcmd.Stdout = os.Stdout\n\tbcmd.Stderr = os.Stderr\n\terr := bcmd.Run()\n\n\tif err != nil {\n\t\tfmt.Println(\"============== build failed ===================\")\n\t\treturn\n\t}\n\tfmt.Println(\"build success\")\n\tRestart(appname)\n}\n\nfunc Kill() {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tfmt.Println(\"Kill -> \", e)\n\t\t}\n\t}()\n\tif cmd != nil {\n\t\tcmd.Process.Kill()\n\t}\n}\n\nfunc Restart(appname string) {\n\tDebugf(\"kill running process\")\n\tKill()\n\tgo Start(appname)\n}\n\nfunc Start(appname string) {\n\tfmt.Println(\"start\", appname)\n\n\tcmd = exec.Command(appname)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\tgo cmd.Run()\n}\n<commit_msg>fixed: Support bee start appname && bee start .\/appname<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/howeyc\/fsnotify\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"sync\"\n\t\"time\"\n\t\"strings\"\n)\n\nvar (\n\tcmd *exec.Cmd\n\tstate sync.Mutex\n\teventTime = make(map[string]time.Time)\n)\n\nfunc NewWatcher(paths []string) {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase e := <-watcher.Event:\n\t\t\t\tisbuild := true\n\t\t\t\tif t, ok := eventTime[e.String()]; ok {\n\t\t\t\t\t\/\/ if 500ms change many times, then ignore it.\n\t\t\t\t\t\/\/ for liteide often gofmt code after save.\n\t\t\t\t\tif t.Add(time.Millisecond * 500).After(time.Now()) {\n\t\t\t\t\t\tisbuild = false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\teventTime[e.String()] = time.Now()\n\n\t\t\t\tif isbuild {\n\t\t\t\t\tfmt.Println(e)\n\t\t\t\t\tgo Autobuild()\n\t\t\t\t}\n\t\t\tcase err := <-watcher.Error:\n\t\t\t\tlog.Fatal(\"error:\", err)\n\t\t\t}\n\t\t}\n\t}()\n\tfor _, path := range paths {\n\t\tfmt.Println(path)\n\t\terr = watcher.Watch(path)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n}\n\nfunc Autobuild() {\n\tstate.Lock()\n\tdefer state.Unlock()\n\n\tfmt.Println(\"start autobuild\")\n\tpath, _ := os.Getwd()\n\tos.Chdir(path)\n\tbcmd := exec.Command(\"go\", \"build\")\n\tbcmd.Stdout = os.Stdout\n\tbcmd.Stderr = os.Stderr\n\terr := bcmd.Run()\n\n\tif err != nil {\n\t\tfmt.Println(\"============== build failed ===================\")\n\t\treturn\n\t}\n\tfmt.Println(\"build success\")\n\tRestart(appname)\n}\n\nfunc Kill() {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tfmt.Println(\"Kill -> \", e)\n\t\t}\n\t}()\n\tif cmd != nil {\n\t\tcmd.Process.Kill()\n\t}\n}\n\nfunc Restart(appname string) {\n\tDebugf(\"kill running process\")\n\tKill()\n\tgo Start(appname)\n}\n\nfunc Start(appname string) {\n\tfmt.Println(\"start\", appname)\n\t\n\tif strings.Index(appname, \".\/\") == -1 {\n\t\tappname = \".\/\" + appname\n\t}\n\n\tcmd = exec.Command(appname)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\tgo cmd.Run()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 bee authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/howeyc\/fsnotify\"\n)\n\nvar (\n\tcmd *exec.Cmd\n\tstate sync.Mutex\n\teventTime = make(map[string]int64)\n\tbuildPeriod time.Time\n)\n\nfunc NewWatcher(paths []string) {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tColorLog(\"[ERRO] Fail to create new Watcher[ %s ]\\n\", err)\n\t\tos.Exit(2)\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase e := <-watcher.Event:\n\t\t\t\tisbuild := true\n\n\t\t\t\t\/\/ Skip TMP files for Sublime Text.\n\t\t\t\tif checkTMPFile(e.Name) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif !chekcIfWatchExt(e.Name) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t\/\/ Prevent duplicated builds.\n\t\t\t\tif buildPeriod.Add(1 * time.Second).After(time.Now()) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbuildPeriod = time.Now()\n\n\t\t\t\tmt := getFileModTime(e.Name)\n\t\t\t\tif t := eventTime[e.Name]; mt == t {\n\t\t\t\t\tColorLog(\"[SKIP] # %s #\\n\", e.String())\n\t\t\t\t\tisbuild = false\n\t\t\t\t}\n\n\t\t\t\teventTime[e.Name] = mt\n\n\t\t\t\tif isbuild {\n\t\t\t\t\tColorLog(\"[EVEN] %s\\n\", e)\n\t\t\t\t\tgo Autobuild()\n\t\t\t\t}\n\t\t\tcase err := <-watcher.Error:\n\t\t\t\tColorLog(\"[WARN] %s\\n\", err.Error()) \/\/ No need to exit here\n\t\t\t}\n\t\t}\n\t}()\n\n\tColorLog(\"[INFO] Initializing watcher...\\n\")\n\tfor _, path := range paths {\n\t\tColorLog(\"[TRAC] Directory( %s )\\n\", path)\n\t\terr = watcher.Watch(path)\n\t\tif err != nil {\n\t\t\tColorLog(\"[ERRO] Fail to watch directory[ %s ]\\n\", err)\n\t\t\tos.Exit(2)\n\t\t}\n\t}\n\n}\n\n\/\/ getFileModTime retuens unix timestamp of `os.File.ModTime` by given path.\nfunc getFileModTime(path string) int64 {\n\tpath = strings.Replace(path, \"\\\\\", \"\/\", -1)\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\tColorLog(\"[ERRO] Fail to open file[ %s ]\\n\", err)\n\t\treturn time.Now().Unix()\n\t}\n\tdefer f.Close()\n\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\tColorLog(\"[ERRO] Fail to get file information[ %s ]\\n\", err)\n\t\treturn time.Now().Unix()\n\t}\n\n\treturn fi.ModTime().Unix()\n}\n\nfunc Autobuild() {\n\tstate.Lock()\n\tdefer state.Unlock()\n\n\tColorLog(\"[INFO] Start building...\\n\")\n\tpath, _ := os.Getwd()\n\tos.Chdir(path)\n\n\tcmdName := \"go\"\n\tif conf.Gopm.Enable {\n\t\tcmdName = \"gopm\"\n\t}\n\n\tvar err error\n\t\/\/ For applications use full import path like \"github.com\/...\/..\"\n\t\/\/ are able to use \"go install\" to reduce build time.\n\tif conf.GoInstall || conf.Gopm.Install {\n\t\ticmd := exec.Command(cmdName, \"install\")\n\t\ticmd.Stdout = os.Stdout\n\t\ticmd.Stderr = os.Stderr\n\t\terr = icmd.Run()\n\t}\n\n\tif err == nil {\n\t\tappName := appname\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tappName += \".exe\"\n\t\t}\n\t\tbinPath := GetGOPATHs()[0] + \"\/bin\/\" + appName\n\n\t\tif conf.GoInstall && isExist(binPath) {\n\t\t\tos.Rename(binPath, appName)\n\t\t\tColorLog(\"[INFO] Build command reduced\\n\")\n\t\t} else {\n\t\t\tbcmd := exec.Command(cmdName, \"build\")\n\t\t\tbcmd.Stdout = os.Stdout\n\t\t\tbcmd.Stderr = os.Stderr\n\t\t\terr = bcmd.Run()\n\t\t}\n\t}\n\n\tif err != nil {\n\t\tColorLog(\"[ERRO] ============== Build failed ===================\\n\")\n\t\treturn\n\t}\n\tColorLog(\"[SUCC] Build was successful\\n\")\n\tRestart(appname)\n}\n\nfunc Kill() {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tfmt.Println(\"Kill -> \", e)\n\t\t}\n\t}()\n\tif cmd != nil && cmd.Process != nil {\n\t\tcmd.Process.Kill()\n\t}\n}\n\nfunc Restart(appname string) {\n\tDebugf(\"kill running process\")\n\tKill()\n\tgo Start(appname)\n}\n\nfunc Start(appname string) {\n\tColorLog(\"[INFO] Restarting %s ...\\n\", appname)\n\tif strings.Index(appname, \".\/\") == -1 {\n\t\tappname = \".\/\" + appname\n\t}\n\n\tcmd = exec.Command(appname)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.Args = append([]string{appname}, conf.CmdArgs...)\n\tcmd.Env = append(os.Environ(), conf.Envs...)\n\n\tgo cmd.Run()\n\tColorLog(\"[INFO] %s is running...\\n\", appname)\n\tstarted <- true\n}\n\n\/\/ checkTMPFile returns true if the event was for TMP files.\nfunc checkTMPFile(name string) bool {\n\tif strings.HasSuffix(strings.ToLower(name), \".tmp\") {\n\t\treturn true\n\t}\n\treturn false\n}\n\nvar watchExts = []string{\".go\"}\n\n\/\/ chekcIfWatchExt returns true if the name HasSuffix <watch_ext>.\nfunc chekcIfWatchExt(name string) bool {\n\tfor _, s := range watchExts {\n\t\tif strings.HasSuffix(name, s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<commit_msg>Bug fixed<commit_after>\/\/ Copyright 2013 bee authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/howeyc\/fsnotify\"\n)\n\nvar (\n\tcmd *exec.Cmd\n\tstate sync.Mutex\n\teventTime = make(map[string]int64)\n\tbuildPeriod time.Time\n)\n\nfunc NewWatcher(paths []string) {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tColorLog(\"[ERRO] Fail to create new Watcher[ %s ]\\n\", err)\n\t\tos.Exit(2)\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase e := <-watcher.Event:\n\t\t\t\tisbuild := true\n\n\t\t\t\t\/\/ Skip TMP files for Sublime Text.\n\t\t\t\tif checkTMPFile(e.Name) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif !chekcIfWatchExt(e.Name) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t\/\/ Prevent duplicated builds.\n\t\t\t\tif buildPeriod.Add(1 * time.Second).After(time.Now()) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbuildPeriod = time.Now()\n\n\t\t\t\tmt := getFileModTime(e.Name)\n\t\t\t\tif t := eventTime[e.Name]; mt == t {\n\t\t\t\t\tColorLog(\"[SKIP] # %s #\\n\", e.String())\n\t\t\t\t\tisbuild = false\n\t\t\t\t}\n\n\t\t\t\teventTime[e.Name] = mt\n\n\t\t\t\tif isbuild {\n\t\t\t\t\tColorLog(\"[EVEN] %s\\n\", e)\n\t\t\t\t\tgo Autobuild()\n\t\t\t\t}\n\t\t\tcase err := <-watcher.Error:\n\t\t\t\tColorLog(\"[WARN] %s\\n\", err.Error()) \/\/ No need to exit here\n\t\t\t}\n\t\t}\n\t}()\n\n\tColorLog(\"[INFO] Initializing watcher...\\n\")\n\tfor _, path := range paths {\n\t\tColorLog(\"[TRAC] Directory( %s )\\n\", path)\n\t\terr = watcher.Watch(path)\n\t\tif err != nil {\n\t\t\tColorLog(\"[ERRO] Fail to watch directory[ %s ]\\n\", err)\n\t\t\tos.Exit(2)\n\t\t}\n\t}\n\n}\n\n\/\/ getFileModTime retuens unix timestamp of `os.File.ModTime` by given path.\nfunc getFileModTime(path string) int64 {\n\tpath = strings.Replace(path, \"\\\\\", \"\/\", -1)\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\tColorLog(\"[ERRO] Fail to open file[ %s ]\\n\", err)\n\t\treturn time.Now().Unix()\n\t}\n\tdefer f.Close()\n\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\tColorLog(\"[ERRO] Fail to get file information[ %s ]\\n\", err)\n\t\treturn time.Now().Unix()\n\t}\n\n\treturn fi.ModTime().Unix()\n}\n\nfunc Autobuild() {\n\tstate.Lock()\n\tdefer state.Unlock()\n\n\tColorLog(\"[INFO] Start building...\\n\")\n\tpath, _ := os.Getwd()\n\tos.Chdir(path)\n\n\tcmdName := \"go\"\n\tif conf.Gopm.Enable {\n\t\tcmdName = \"gopm\"\n\t}\n\n\tvar err error\n\t\/\/ For applications use full import path like \"github.com\/...\/..\"\n\t\/\/ are able to use \"go install\" to reduce build time.\n\tif conf.GoInstall || conf.Gopm.Install {\n\t\ticmd := exec.Command(cmdName, \"install\")\n\t\ticmd.Stdout = os.Stdout\n\t\ticmd.Stderr = os.Stderr\n\t\terr = icmd.Run()\n\t}\n\n\tif err == nil {\n\t\tappName := appname\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tappName += \".exe\"\n\t\t}\n\t\tif isExist(appName) {\n\t\t\tos.Remove(appName)\n\t\t}\n\t\tbinPath := GetGOPATHs()[0] + \"\/bin\/\" + appName\n\n\t\tif conf.GoInstall && isExist(binPath) {\n\t\t\tos.Rename(binPath, appName)\n\t\t\tColorLog(\"[INFO] Build command reduced\\n\")\n\t\t} else {\n\t\t\tbcmd := exec.Command(cmdName, \"build\")\n\t\t\tbcmd.Stdout = os.Stdout\n\t\t\tbcmd.Stderr = os.Stderr\n\t\t\terr = bcmd.Run()\n\t\t}\n\t}\n\n\tif err != nil {\n\t\tColorLog(\"[ERRO] ============== Build failed ===================\\n\")\n\t\treturn\n\t}\n\tColorLog(\"[SUCC] Build was successful\\n\")\n\tRestart(appname)\n}\n\nfunc Kill() {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tfmt.Println(\"Kill -> \", e)\n\t\t}\n\t}()\n\tif cmd != nil && cmd.Process != nil {\n\t\tcmd.Process.Kill()\n\t}\n}\n\nfunc Restart(appname string) {\n\tDebugf(\"kill running process\")\n\tKill()\n\tgo Start(appname)\n}\n\nfunc Start(appname string) {\n\tColorLog(\"[INFO] Restarting %s ...\\n\", appname)\n\tif strings.Index(appname, \".\/\") == -1 {\n\t\tappname = \".\/\" + appname\n\t}\n\n\tcmd = exec.Command(appname)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.Args = append([]string{appname}, conf.CmdArgs...)\n\tcmd.Env = append(os.Environ(), conf.Envs...)\n\n\tgo cmd.Run()\n\tColorLog(\"[INFO] %s is running...\\n\", appname)\n\tstarted <- true\n}\n\n\/\/ checkTMPFile returns true if the event was for TMP files.\nfunc checkTMPFile(name string) bool {\n\tif strings.HasSuffix(strings.ToLower(name), \".tmp\") {\n\t\treturn true\n\t}\n\treturn false\n}\n\nvar watchExts = []string{\".go\"}\n\n\/\/ chekcIfWatchExt returns true if the name HasSuffix <watch_ext>.\nfunc chekcIfWatchExt(name string) bool {\n\tfor _, s := range watchExts {\n\t\tif strings.HasSuffix(name, s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/fsnotify\/fsnotify\"\n)\n\n\/\/ watch watchers the input paths. The returned Watcher should only be used in\n\/\/ tests.\nfunc watch(inputPaths, ignoredPaths []string, cmdCh chan<- event) (*fsnotify.Watcher, error) {\n\t\/\/ Creates an Ignorer that just ignores file paths the user\n\t\/\/ specifically asked to be ignored.\n\tui, err := createUserIgnorer(ignoredPaths)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tw, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create watcher: %s\", err)\n\t}\n\n\t\/\/ Watch user-specified paths and create a set of them for walking\n\t\/\/ later. Paths that are both asked to be watched and ignored by\n\t\/\/ the user are ignored.\n\tuserPaths := make(map[string]bool)\n\tincludedHiddenFiles := make(map[string]bool)\n\tfor _, path := range inputPaths {\n\t\tfullPath, err := filepath.Abs(path)\n\t\tif err != nil {\n\t\t\tw.Close()\n\t\t\treturn nil, errors.New(\"unable to get current working directory while working with user-watched paths\")\n\t\t}\n\t\tif userPaths[fullPath] || ui.IsIgnored(path) {\n\t\t\tcontinue\n\t\t}\n\t\terr = w.Add(fullPath)\n\t\tif err != nil {\n\t\t\tw.Close()\n\t\t\treturn nil, fmt.Errorf(\"unable to watch '%s': %s\", path, err)\n\t\t}\n\t\tuserPaths[fullPath] = true\n\t}\n\n\t\/\/ Create some useful sets from the user-specified paths to be\n\t\/\/ used in smartIgnorer (and, therefore, listenForEvents). One is\n\t\/\/ the set of hidden paths that the user does not want ignored to\n\t\/\/ be used in smartIgnorer. We create this smaller map because the\n\t\/\/ amount of paths the user asked to watch may be large.\n\t\/\/\n\t\/\/ We also create the sets renameDirs and renameChildren to better\n\t\/\/ handle files that are renamed away and back from the paths the\n\t\/\/ user wanted watched. To be more concrete, folks might want to\n\t\/\/ watch only the normal file \"foobar\" but their tooling moves\n\t\/\/ foobar away and then back (like vim does on save). This will\n\t\/\/ cause the watch to fire on the first move but then never again,\n\t\/\/ even when its returned. So, we have to track the parent\n\t\/\/ directory of foobar in order to capture when foobar shows up in\n\t\/\/ its parent directory again but we don't want to send all events\n\t\/\/ in that parent directory.\n\trenameDirs := make(map[string]bool)\n\trenameChildren := make(map[string]bool)\n\tfor fullPath, _ := range userPaths {\n\t\tbaseName := filepath.Base(fullPath)\n\t\tif strings.HasPrefix(baseName, \".\") {\n\t\t\tincludedHiddenFiles[fullPath] = true\n\t\t}\n\n\t\tdirPath := filepath.Dir(fullPath)\n\t\tif !userPaths[dirPath] && dirPath != \"\" {\n\t\t\tif !renameDirs[dirPath] {\n\t\t\t\terr = w.Add(dirPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tw.Close()\n\t\t\t\t\treturn nil, fmt.Errorf(\"unable to watch rename-watched-only dir '%s': %s\", fullPath, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\trenameDirs[dirPath] = true\n\t\t\trenameChildren[fullPath] = true\n\t\t}\n\t}\n\tig := &smartIgnorer{\n\t\tincludedHiddenFiles: includedHiddenFiles,\n\t\tui: ui,\n\t\trenameDirs: renameDirs,\n\t\trenameChildren: renameChildren,\n\t}\n\n\tgo listenForEvents(w, cmdCh, ig)\n\treturn w, nil\n}\n\ntype event struct {\n\ttime.Time\n\tEvent fsnotify.Event\n}\n\nfunc listenForEvents(w *fsnotify.Watcher, cmdCh chan<- event, ignorer Ignorer) {\n\tfor {\n\t\tselect {\n\t\tcase ev, ok := <-w.Events:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif ignorer.IsIgnored(ev.Name) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif *verbose {\n\t\t\t\tlog.Printf(\"filtered file change: %s\", ev)\n\t\t\t}\n\t\t\tcmdCh <- event{\n\t\t\t\tTime: time.Now(),\n\t\t\t\tEvent: ev,\n\t\t\t}\n\t\tcase err := <-w.Errors:\n\t\t\t\/\/ w.Close causes this.\n\t\t\tif err == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Println(\"watch error:\", err)\n\t\t}\n\t}\n}\n\nfunc createUserIgnorer(ignoredPaths []string) (*userIgnorer, error) {\n\tignored := make(map[string]bool)\n\tignoredDirs := make([]string, 0)\n\tfor _, in := range ignoredPaths {\n\t\tin = strings.TrimSpace(in)\n\t\tif len(in) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tpath, err := filepath.Abs(in)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"unable to get current working dir while working with ignored paths\")\n\t\t}\n\t\tignored[path] = true\n\t\tdirPath := path\n\t\tif !strings.HasSuffix(path, \"\/\") {\n\t\t\tdirPath += \"\/\"\n\t\t}\n\t\tignoredDirs = append(ignoredDirs, dirPath)\n\t}\n\treturn &userIgnorer{ignored, ignoredDirs}, nil\n}\n<commit_msg>clear up the verbose log meaning<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/fsnotify\/fsnotify\"\n)\n\n\/\/ watch watchers the input paths. The returned Watcher should only be used in\n\/\/ tests.\nfunc watch(inputPaths, ignoredPaths []string, cmdCh chan<- event) (*fsnotify.Watcher, error) {\n\t\/\/ Creates an Ignorer that just ignores file paths the user\n\t\/\/ specifically asked to be ignored.\n\tui, err := createUserIgnorer(ignoredPaths)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tw, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create watcher: %s\", err)\n\t}\n\n\t\/\/ Watch user-specified paths and create a set of them for walking\n\t\/\/ later. Paths that are both asked to be watched and ignored by\n\t\/\/ the user are ignored.\n\tuserPaths := make(map[string]bool)\n\tincludedHiddenFiles := make(map[string]bool)\n\tfor _, path := range inputPaths {\n\t\tfullPath, err := filepath.Abs(path)\n\t\tif err != nil {\n\t\t\tw.Close()\n\t\t\treturn nil, errors.New(\"unable to get current working directory while working with user-watched paths\")\n\t\t}\n\t\tif userPaths[fullPath] || ui.IsIgnored(path) {\n\t\t\tcontinue\n\t\t}\n\t\terr = w.Add(fullPath)\n\t\tif err != nil {\n\t\t\tw.Close()\n\t\t\treturn nil, fmt.Errorf(\"unable to watch '%s': %s\", path, err)\n\t\t}\n\t\tuserPaths[fullPath] = true\n\t}\n\n\t\/\/ Create some useful sets from the user-specified paths to be\n\t\/\/ used in smartIgnorer (and, therefore, listenForEvents). One is\n\t\/\/ the set of hidden paths that the user does not want ignored to\n\t\/\/ be used in smartIgnorer. We create this smaller map because the\n\t\/\/ amount of paths the user asked to watch may be large.\n\t\/\/\n\t\/\/ We also create the sets renameDirs and renameChildren to better\n\t\/\/ handle files that are renamed away and back from the paths the\n\t\/\/ user wanted watched. To be more concrete, folks might want to\n\t\/\/ watch only the normal file \"foobar\" but their tooling moves\n\t\/\/ foobar away and then back (like vim does on save). This will\n\t\/\/ cause the watch to fire on the first move but then never again,\n\t\/\/ even when its returned. So, we have to track the parent\n\t\/\/ directory of foobar in order to capture when foobar shows up in\n\t\/\/ its parent directory again but we don't want to send all events\n\t\/\/ in that parent directory.\n\trenameDirs := make(map[string]bool)\n\trenameChildren := make(map[string]bool)\n\tfor fullPath, _ := range userPaths {\n\t\tbaseName := filepath.Base(fullPath)\n\t\tif strings.HasPrefix(baseName, \".\") {\n\t\t\tincludedHiddenFiles[fullPath] = true\n\t\t}\n\n\t\tdirPath := filepath.Dir(fullPath)\n\t\tif !userPaths[dirPath] && dirPath != \"\" {\n\t\t\tif !renameDirs[dirPath] {\n\t\t\t\terr = w.Add(dirPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tw.Close()\n\t\t\t\t\treturn nil, fmt.Errorf(\"unable to watch rename-watched-only dir '%s': %s\", fullPath, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\trenameDirs[dirPath] = true\n\t\t\trenameChildren[fullPath] = true\n\t\t}\n\t}\n\tig := &smartIgnorer{\n\t\tincludedHiddenFiles: includedHiddenFiles,\n\t\tui: ui,\n\t\trenameDirs: renameDirs,\n\t\trenameChildren: renameChildren,\n\t}\n\n\tgo listenForEvents(w, cmdCh, ig)\n\treturn w, nil\n}\n\ntype event struct {\n\ttime.Time\n\tEvent fsnotify.Event\n}\n\nfunc listenForEvents(w *fsnotify.Watcher, cmdCh chan<- event, ignorer Ignorer) {\n\tfor {\n\t\tselect {\n\t\tcase ev, ok := <-w.Events:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif ignorer.IsIgnored(ev.Name) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif *verbose {\n\t\t\t\tlog.Printf(\"unignored file change: %s\", ev)\n\t\t\t}\n\t\t\tcmdCh <- event{\n\t\t\t\tTime: time.Now(),\n\t\t\t\tEvent: ev,\n\t\t\t}\n\t\tcase err := <-w.Errors:\n\t\t\t\/\/ w.Close causes this.\n\t\t\tif err == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Println(\"watch error:\", err)\n\t\t}\n\t}\n}\n\nfunc createUserIgnorer(ignoredPaths []string) (*userIgnorer, error) {\n\tignored := make(map[string]bool)\n\tignoredDirs := make([]string, 0)\n\tfor _, in := range ignoredPaths {\n\t\tin = strings.TrimSpace(in)\n\t\tif len(in) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tpath, err := filepath.Abs(in)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"unable to get current working dir while working with ignored paths\")\n\t\t}\n\t\tignored[path] = true\n\t\tdirPath := path\n\t\tif !strings.HasSuffix(path, \"\/\") {\n\t\t\tdirPath += \"\/\"\n\t\t}\n\t\tignoredDirs = append(ignoredDirs, dirPath)\n\t}\n\treturn &userIgnorer{ignored, ignoredDirs}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package hypervisor\n\nimport (\n\t\"net\"\n\t\"os\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"github.com\/hyperhq\/runv\/hypervisor\/pod\"\n)\n\ntype VmEvent interface {\n\tEvent() int\n}\n\ntype VmExit struct{}\n\ntype VmStartFailEvent struct {\n\tMessage string\n}\n\ntype VmKilledEvent struct {\n\tSuccess bool\n}\n\ntype PodFinished struct {\n\tresult []uint32\n}\n\ntype VmTimeout struct{}\n\ntype InitFailedEvent struct {\n\tReason string\n}\n\ntype InitConnectedEvent struct {\n\tconn *net.UnixConn\n}\n\ntype GetPodIPCommand struct {\n\tId string\n}\n\ntype GetPodStatsCommand struct {\n\tId string\n}\n\ntype RunPodCommand struct {\n\tSpec *pod.UserPod\n\tContainers []*ContainerInfo\n\tVolumes []*VolumeInfo\n\tWg *sync.WaitGroup\n}\n\ntype ReplacePodCommand RunPodCommand\n\ntype PauseCommand struct {\n\tPause bool\n}\n\ntype PauseResult struct {\n\tCause string\n\tReply *PauseCommand\n}\n\ntype NewContainerCommand struct {\n\tcontainer *pod.UserContainer\n\tinfo *ContainerInfo\n}\n\ntype ExecCommand struct {\n\t*TtyIO `json:\"-\"`\n\tSequence uint64 `json:\"seq\"`\n\tContainer string `json:\"container,omitempty\"`\n\tCommand []string `json:\"cmd\"`\n}\n\ntype KillCommand struct {\n\tContainer string `json:\"container\"`\n\tSignal syscall.Signal `json:\"signal\"`\n}\n\ntype OnlineCpuMemCommand struct{}\n\ntype WriteFileCommand struct {\n\tContainer string `json:\"container\"`\n\tFile string `json:\"file\"`\n\tData []byte `json:\"-\"`\n}\n\ntype ReadFileCommand struct {\n\tContainer string `json:\"container\"`\n\tFile string `json:\"file\"`\n}\n\ntype StopPodCommand struct{}\ntype ShutdownCommand struct {\n\tWait bool\n}\ntype ReleaseVMCommand struct{}\n\ntype AttachCommand struct {\n\tStreams *TtyIO\n\tStderr *TtyIO\n\tSize *WindowSize\n\tContainer string\n}\n\ntype CommandAck struct {\n\treply *DecodedMessage\n\tmsg []byte\n}\n\ntype CommandError CommandAck\n\ntype WindowSizeCommand struct {\n\tClientTag string\n\tSize *WindowSize\n}\n\ntype ContainerCreatedEvent struct {\n\tIndex int\n\tId string\n\tRootfs string\n\tImage string \/\/ if fstype is `dir`, this should be a path relative to share_dir\n\t\/\/ which described the mounted aufs or overlayfs dir.\n\tFstype string\n\tWorkdir string\n\tEntrypoint []string\n\tCmd []string\n\tEnvs map[string]string\n}\n\ntype ContainerInfo struct {\n\tId string\n\tRootfs string\n\tImage string \/\/ if fstype is `dir`, this should be a path relative to share_dir\n\t\/\/ which described the mounted aufs or overlayfs dir.\n\tFstype string\n\tWorkdir string\n\tEntrypoint []string\n\tCmd []string\n\tEnvs map[string]string\n}\n\ntype ContainerUnmounted struct {\n\tIndex int\n\tSuccess bool\n}\n\ntype VolumeReadyEvent struct {\n\tName string \/\/volumen name in spec\n\tFilepath string \/\/block dev absolute path, or dir path relative to share dir\n\tFstype string \/\/\"xfs\", \"ext4\" etc. for block dev, or \"dir\" for dir path\n\tFormat string \/\/\"raw\" (or \"qcow2\") for volume, no meaning for dir path\n}\n\ntype VolumeInfo struct {\n\tName string \/\/volumen name in spec\n\tFilepath string \/\/block dev absolute path, or dir path relative to share dir\n\tFstype string \/\/\"xfs\", \"ext4\" etc. for block dev, or \"dir\" for dir path\n\tFormat string \/\/\"raw\" (or \"qcow2\") for volume, no meaning for dir path\n}\n\ntype VolumeUnmounted struct {\n\tName string\n\tSuccess bool\n}\n\ntype BlockdevInsertedEvent struct {\n\tName string\n\tSourceType string \/\/image or volume\n\tDeviceName string\n\tScsiId int\n\tScsiAddr string \/\/ pass scsi addr to hyperstart\n}\n\ntype DevSkipEvent struct{}\n\ntype BlockdevRemovedEvent struct {\n\tName string\n\tSuccess bool\n}\n\ntype InterfaceCreated struct {\n\tIndex int\n\tPCIAddr int\n\tFd *os.File\n\tBridge string\n\tHostDevice string\n\tDeviceName string\n\tMacAddr string\n\tIpAddr string\n\tNetMask string\n\tRouteTable []*RouteRule\n}\n\ntype InterfaceReleased struct {\n\tIndex int\n\tSuccess bool\n}\n\ntype RouteRule struct {\n\tDestination string\n\tGateway string\n\tViaThis bool\n}\n\ntype NetDevInsertedEvent struct {\n\tIndex int\n\tDeviceName string\n\tAddress int\n}\n\ntype NetDevRemovedEvent struct {\n\tIndex int\n}\n\ntype DeviceFailed struct {\n\tSession VmEvent\n}\n\ntype Interrupted struct {\n\tReason string\n}\n\ntype GenericOperation struct {\n\tOpName string\n\tState []string\n\tOpFunc func(ctx *VmContext, result chan<- error)\n\tResult chan<- error\n}\n\nfunc (qe *VmStartFailEvent) Event() int { return EVENT_VM_START_FAILED }\nfunc (qe *VmExit) Event() int { return EVENT_VM_EXIT }\nfunc (qe *VmKilledEvent) Event() int { return EVENT_VM_KILL }\nfunc (qe *PauseCommand) Event() int { return COMMAND_PAUSEVM }\nfunc (qe *PauseResult) Event() int { return EVENT_PAUSE_RESULT }\nfunc (qe *VmTimeout) Event() int { return EVENT_VM_TIMEOUT }\nfunc (qe *PodFinished) Event() int { return EVENT_POD_FINISH }\nfunc (qe *InitConnectedEvent) Event() int { return EVENT_INIT_CONNECTED }\nfunc (qe *ContainerCreatedEvent) Event() int { return EVENT_CONTAINER_ADD }\nfunc (qe *ContainerUnmounted) Event() int { return EVENT_CONTAINER_DELETE }\nfunc (qe *VolumeUnmounted) Event() int { return EVENT_BLOCK_EJECTED }\nfunc (qe *VolumeReadyEvent) Event() int { return EVENT_VOLUME_ADD }\nfunc (qe *BlockdevInsertedEvent) Event() int { return EVENT_BLOCK_INSERTED }\nfunc (qe *DevSkipEvent) Event() int { return EVENT_DEV_SKIP }\nfunc (qe *BlockdevRemovedEvent) Event() int { return EVENT_VOLUME_DELETE }\nfunc (qe *InterfaceCreated) Event() int { return EVENT_INTERFACE_ADD }\nfunc (qe *InterfaceReleased) Event() int { return EVENT_INTERFACE_DELETE }\nfunc (qe *NetDevInsertedEvent) Event() int { return EVENT_INTERFACE_INSERTED }\nfunc (qe *NetDevRemovedEvent) Event() int { return EVENT_INTERFACE_EJECTED }\nfunc (qe *RunPodCommand) Event() int { return COMMAND_RUN_POD }\nfunc (qe *GetPodIPCommand) Event() int { return COMMAND_GET_POD_IP }\nfunc (qe *GetPodStatsCommand) Event() int { return COMMAND_GET_POD_STATS }\nfunc (qe *StopPodCommand) Event() int { return COMMAND_STOP_POD }\nfunc (qe *ReplacePodCommand) Event() int { return COMMAND_REPLACE_POD }\nfunc (qe *NewContainerCommand) Event() int { return COMMAND_NEWCONTAINER }\nfunc (qe *ExecCommand) Event() int { return COMMAND_EXEC }\nfunc (qe *KillCommand) Event() int { return COMMAND_KILL }\nfunc (qe *OnlineCpuMemCommand) Event() int { return COMMAND_ONLINECPUMEM }\nfunc (qe *WriteFileCommand) Event() int { return COMMAND_WRITEFILE }\nfunc (qe *ReadFileCommand) Event() int { return COMMAND_READFILE }\nfunc (qe *AttachCommand) Event() int { return COMMAND_ATTACH }\nfunc (qe *WindowSizeCommand) Event() int { return COMMAND_WINDOWSIZE }\nfunc (qe *ShutdownCommand) Event() int { return COMMAND_SHUTDOWN }\nfunc (qe *ReleaseVMCommand) Event() int { return COMMAND_RELEASE }\nfunc (qe *CommandAck) Event() int { return COMMAND_ACK }\nfunc (qe *GenericOperation) Event() int { return GENERIC_OPERATION }\nfunc (qe *InitFailedEvent) Event() int { return ERROR_INIT_FAIL }\nfunc (qe *DeviceFailed) Event() int { return ERROR_QMP_FAIL }\nfunc (qe *Interrupted) Event() int { return ERROR_INTERRUPTED }\nfunc (qe *CommandError) Event() int { return ERROR_CMD_FAIL }\n<commit_msg>container: store mountid to container info<commit_after>package hypervisor\n\nimport (\n\t\"net\"\n\t\"os\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"github.com\/hyperhq\/runv\/hypervisor\/pod\"\n)\n\ntype VmEvent interface {\n\tEvent() int\n}\n\ntype VmExit struct{}\n\ntype VmStartFailEvent struct {\n\tMessage string\n}\n\ntype VmKilledEvent struct {\n\tSuccess bool\n}\n\ntype PodFinished struct {\n\tresult []uint32\n}\n\ntype VmTimeout struct{}\n\ntype InitFailedEvent struct {\n\tReason string\n}\n\ntype InitConnectedEvent struct {\n\tconn *net.UnixConn\n}\n\ntype GetPodIPCommand struct {\n\tId string\n}\n\ntype GetPodStatsCommand struct {\n\tId string\n}\n\ntype RunPodCommand struct {\n\tSpec *pod.UserPod\n\tContainers []*ContainerInfo\n\tVolumes []*VolumeInfo\n\tWg *sync.WaitGroup\n}\n\ntype ReplacePodCommand RunPodCommand\n\ntype PauseCommand struct {\n\tPause bool\n}\n\ntype PauseResult struct {\n\tCause string\n\tReply *PauseCommand\n}\n\ntype NewContainerCommand struct {\n\tcontainer *pod.UserContainer\n\tinfo *ContainerInfo\n}\n\ntype ExecCommand struct {\n\t*TtyIO `json:\"-\"`\n\tSequence uint64 `json:\"seq\"`\n\tContainer string `json:\"container,omitempty\"`\n\tCommand []string `json:\"cmd\"`\n}\n\ntype KillCommand struct {\n\tContainer string `json:\"container\"`\n\tSignal syscall.Signal `json:\"signal\"`\n}\n\ntype OnlineCpuMemCommand struct{}\n\ntype WriteFileCommand struct {\n\tContainer string `json:\"container\"`\n\tFile string `json:\"file\"`\n\tData []byte `json:\"-\"`\n}\n\ntype ReadFileCommand struct {\n\tContainer string `json:\"container\"`\n\tFile string `json:\"file\"`\n}\n\ntype StopPodCommand struct{}\ntype ShutdownCommand struct {\n\tWait bool\n}\ntype ReleaseVMCommand struct{}\n\ntype AttachCommand struct {\n\tStreams *TtyIO\n\tStderr *TtyIO\n\tSize *WindowSize\n\tContainer string\n}\n\ntype CommandAck struct {\n\treply *DecodedMessage\n\tmsg []byte\n}\n\ntype CommandError CommandAck\n\ntype WindowSizeCommand struct {\n\tClientTag string\n\tSize *WindowSize\n}\n\ntype ContainerCreatedEvent struct {\n\tIndex int\n\tId string\n\tRootfs string\n\tImage string \/\/ if fstype is `dir`, this should be a path relative to share_dir\n\t\/\/ which described the mounted aufs or overlayfs dir.\n\tFstype string\n\tWorkdir string\n\tEntrypoint []string\n\tCmd []string\n\tEnvs map[string]string\n}\n\ntype ContainerInfo struct {\n\tId string\n\tMountId string\n\tRootfs string\n\tImage string \/\/ if fstype is `dir`, this should be a path relative to share_dir\n\t\/\/ which described the mounted aufs or overlayfs dir.\n\tFstype string\n\tWorkdir string\n\tEntrypoint []string\n\tCmd []string\n\tEnvs map[string]string\n}\n\ntype ContainerUnmounted struct {\n\tIndex int\n\tSuccess bool\n}\n\ntype VolumeReadyEvent struct {\n\tName string \/\/volumen name in spec\n\tFilepath string \/\/block dev absolute path, or dir path relative to share dir\n\tFstype string \/\/\"xfs\", \"ext4\" etc. for block dev, or \"dir\" for dir path\n\tFormat string \/\/\"raw\" (or \"qcow2\") for volume, no meaning for dir path\n}\n\ntype VolumeInfo struct {\n\tName string \/\/volumen name in spec\n\tFilepath string \/\/block dev absolute path, or dir path relative to share dir\n\tFstype string \/\/\"xfs\", \"ext4\" etc. for block dev, or \"dir\" for dir path\n\tFormat string \/\/\"raw\" (or \"qcow2\") for volume, no meaning for dir path\n}\n\ntype VolumeUnmounted struct {\n\tName string\n\tSuccess bool\n}\n\ntype BlockdevInsertedEvent struct {\n\tName string\n\tSourceType string \/\/image or volume\n\tDeviceName string\n\tScsiId int\n\tScsiAddr string \/\/ pass scsi addr to hyperstart\n}\n\ntype DevSkipEvent struct{}\n\ntype BlockdevRemovedEvent struct {\n\tName string\n\tSuccess bool\n}\n\ntype InterfaceCreated struct {\n\tIndex int\n\tPCIAddr int\n\tFd *os.File\n\tBridge string\n\tHostDevice string\n\tDeviceName string\n\tMacAddr string\n\tIpAddr string\n\tNetMask string\n\tRouteTable []*RouteRule\n}\n\ntype InterfaceReleased struct {\n\tIndex int\n\tSuccess bool\n}\n\ntype RouteRule struct {\n\tDestination string\n\tGateway string\n\tViaThis bool\n}\n\ntype NetDevInsertedEvent struct {\n\tIndex int\n\tDeviceName string\n\tAddress int\n}\n\ntype NetDevRemovedEvent struct {\n\tIndex int\n}\n\ntype DeviceFailed struct {\n\tSession VmEvent\n}\n\ntype Interrupted struct {\n\tReason string\n}\n\ntype GenericOperation struct {\n\tOpName string\n\tState []string\n\tOpFunc func(ctx *VmContext, result chan<- error)\n\tResult chan<- error\n}\n\nfunc (qe *VmStartFailEvent) Event() int { return EVENT_VM_START_FAILED }\nfunc (qe *VmExit) Event() int { return EVENT_VM_EXIT }\nfunc (qe *VmKilledEvent) Event() int { return EVENT_VM_KILL }\nfunc (qe *PauseCommand) Event() int { return COMMAND_PAUSEVM }\nfunc (qe *PauseResult) Event() int { return EVENT_PAUSE_RESULT }\nfunc (qe *VmTimeout) Event() int { return EVENT_VM_TIMEOUT }\nfunc (qe *PodFinished) Event() int { return EVENT_POD_FINISH }\nfunc (qe *InitConnectedEvent) Event() int { return EVENT_INIT_CONNECTED }\nfunc (qe *ContainerCreatedEvent) Event() int { return EVENT_CONTAINER_ADD }\nfunc (qe *ContainerUnmounted) Event() int { return EVENT_CONTAINER_DELETE }\nfunc (qe *VolumeUnmounted) Event() int { return EVENT_BLOCK_EJECTED }\nfunc (qe *VolumeReadyEvent) Event() int { return EVENT_VOLUME_ADD }\nfunc (qe *BlockdevInsertedEvent) Event() int { return EVENT_BLOCK_INSERTED }\nfunc (qe *DevSkipEvent) Event() int { return EVENT_DEV_SKIP }\nfunc (qe *BlockdevRemovedEvent) Event() int { return EVENT_VOLUME_DELETE }\nfunc (qe *InterfaceCreated) Event() int { return EVENT_INTERFACE_ADD }\nfunc (qe *InterfaceReleased) Event() int { return EVENT_INTERFACE_DELETE }\nfunc (qe *NetDevInsertedEvent) Event() int { return EVENT_INTERFACE_INSERTED }\nfunc (qe *NetDevRemovedEvent) Event() int { return EVENT_INTERFACE_EJECTED }\nfunc (qe *RunPodCommand) Event() int { return COMMAND_RUN_POD }\nfunc (qe *GetPodIPCommand) Event() int { return COMMAND_GET_POD_IP }\nfunc (qe *GetPodStatsCommand) Event() int { return COMMAND_GET_POD_STATS }\nfunc (qe *StopPodCommand) Event() int { return COMMAND_STOP_POD }\nfunc (qe *ReplacePodCommand) Event() int { return COMMAND_REPLACE_POD }\nfunc (qe *NewContainerCommand) Event() int { return COMMAND_NEWCONTAINER }\nfunc (qe *ExecCommand) Event() int { return COMMAND_EXEC }\nfunc (qe *KillCommand) Event() int { return COMMAND_KILL }\nfunc (qe *OnlineCpuMemCommand) Event() int { return COMMAND_ONLINECPUMEM }\nfunc (qe *WriteFileCommand) Event() int { return COMMAND_WRITEFILE }\nfunc (qe *ReadFileCommand) Event() int { return COMMAND_READFILE }\nfunc (qe *AttachCommand) Event() int { return COMMAND_ATTACH }\nfunc (qe *WindowSizeCommand) Event() int { return COMMAND_WINDOWSIZE }\nfunc (qe *ShutdownCommand) Event() int { return COMMAND_SHUTDOWN }\nfunc (qe *ReleaseVMCommand) Event() int { return COMMAND_RELEASE }\nfunc (qe *CommandAck) Event() int { return COMMAND_ACK }\nfunc (qe *GenericOperation) Event() int { return GENERIC_OPERATION }\nfunc (qe *InitFailedEvent) Event() int { return ERROR_INIT_FAIL }\nfunc (qe *DeviceFailed) Event() int { return ERROR_QMP_FAIL }\nfunc (qe *Interrupted) Event() int { return ERROR_INTERRUPTED }\nfunc (qe *CommandError) Event() int { return ERROR_CMD_FAIL }\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build OMIT\n\npackage main\n\nimport \"fmt\"\n\nfunc f(left, right chan int) {\n\tleft <- 1 + <-right\n}\n\nfunc main() {\n\tconst n = 100000\n\tleftmost := make(chan int)\n\tright := leftmost\n\tleft := leftmost\n\tfor i := 0; i < n; i++ {\n\t\tright = make(chan int)\n\t\tgo f(left, right)\n\t\tleft = right\n\t}\n\tgo func(c chan int) { c <- 1 }(right)\n\tfmt.Println(<-leftmost)\n}\n<commit_msg>go.talks\/2013\/concurrency: decrease length of the daisy-chain<commit_after>\/\/ +build OMIT\n\npackage main\n\nimport \"fmt\"\n\nfunc f(left, right chan int) {\n\tleft <- 1 + <-right\n}\n\nfunc main() {\n\tconst n = 10000\n\tleftmost := make(chan int)\n\tright := leftmost\n\tleft := leftmost\n\tfor i := 0; i < n; i++ {\n\t\tright = make(chan int)\n\t\tgo f(left, right)\n\t\tleft = right\n\t}\n\tgo func(c chan int) { c <- 1 }(right)\n\tfmt.Println(<-leftmost)\n}\n<|endoftext|>"} {"text":"<commit_before>package tsz\n\nimport (\n\t\"math\/rand\"\n\t\"testing\"\n)\n\nvar T uint32\nvar V float64\n\nfunc BenchmarkPushSeries4h(b *testing.B) {\n\ts := NewSeries4h(0)\n\tN := uint32(b.N)\n\tfor i := uint32(1); i <= N; i++ {\n\t\tif i%10 == 0 {\n\t\t\ts.Push(i, 0)\n\t\t} else if i%10 == 1 {\n\t\t\ts.Push(i, 1)\n\t\t} else {\n\t\t\ts.Push(i, float64(i)+123.45)\n\t\t}\n\t}\n\ts.Finish()\n\tb.Logf(\"Series4h size: %dB\", len(s.Bytes()))\n}\n\nfunc benchmarkSeriesLong(b *testing.B, generator func(uint32) float64) {\n\ts := NewSeriesLong(0)\n\tN := uint32(b.N)\n\tfor i := uint32(1); i <= N; i++ {\n\t\ts.Push(i, generator(i))\n\t}\n\ts.Finish()\n\tb.Logf(\"SeriesLong size: %d points in %dB, avg %.2f bytes\/point\", N, len(s.Bytes()), float64(len(s.Bytes()))\/float64(N))\n}\n\nfunc BenchmarkPushSeriesLongMonotonicIncreaseWithResets(b *testing.B) {\n\tbenchmarkSeriesLong(b, func(i uint32) float64 {\n\t\tif i%10 == 0 {\n\t\t\treturn 0\n\t\t} else if i%10 == 1 {\n\t\t\treturn 1\n\t\t}\n\t\treturn float64(i) + 123.45\n\t})\n}\n\nfunc BenchmarkPushSeriesLongMonotonicIncrease(b *testing.B) {\n\tbenchmarkSeriesLong(b, func(i uint32) float64 {\n\t\treturn float64(i) + 123.45\n\t})\n}\n\nfunc BenchmarkPushSeriesLongSawtooth(b *testing.B) {\n\tbenchmarkSeriesLong(b, func(i uint32) float64 {\n\t\tmultiplier := 1.0\n\t\tif i%2 == 0 {\n\t\t\tmultiplier = -1.0\n\t\t}\n\t\treturn multiplier*123.45 + float64(i)\/1000\n\t})\n}\n\nfunc BenchmarkPushSeriesLongSawtoothWithFlats(b *testing.B) {\n\tbenchmarkSeriesLong(b, func(i uint32) float64 {\n\t\tmultiplier := 1.0\n\t\tif i%2 == 0 && i%100 != 0 {\n\t\t\tmultiplier = -1.0\n\t\t}\n\t\treturn multiplier*123.45 + float64(i)\/1000\n\t})\n}\n\nfunc BenchmarkPushSeriesLongSteps(b *testing.B) {\n\tbenchmarkSeriesLong(b, func(i uint32) float64 {\n\t\tmultiplier := 1.0\n\t\tif (i\/100)%2 == 0 {\n\t\t\tmultiplier = -1.0\n\t\t}\n\t\treturn multiplier*123.45 + float64(i)\/1000\n\t})\n}\n\nfunc BenchmarkPushSeriesLongRealWorldCPU(b *testing.B) {\n\tvalues := []float64{95.3, 95.7, 86.2, 95.0, 94.7, 95.4, 94.5, 94.0, 94.7, 95.0, 95.0, 93.8, 95.3, 95.4, 94.6, 83.8, 94.5, 94.5, 94.6, 92.0, 95.0, 89.6, 72.8, 72.1, 86.5, 94.9, 94.9, 93.9, 94.4, 95.4, 95.1, 93.7, 95.5, 95.4, 94.4, 93.2, 94.6, 95.5, 94.9, 94.1, 95.0, 95.5, 94.7, 93.7, 95.1, 96.6, 95.3, 94.0, 95.0, 95.2, 93.3, 94.2, 95.2, 94.9, 94.5, 95.3, 93.2, 95.4, 95.0, 95.2, 93.7}\n\tbenchmarkSeriesLong(b, func(i uint32) float64 {\n\t\treturn values[int(i)%len(values)]\n\t})\n}\n\nfunc BenchmarkIterSeries4h(b *testing.B) {\n\ts := NewSeries4h(0)\n\tN := uint32(b.N)\n\tfor i := uint32(1); i <= N; i++ {\n\t\ts.Push(i, 123.45)\n\t}\n\tb.ResetTimer()\n\titer := s.Iter(1)\n\tvar t uint32\n\tvar v float64\n\tfor iter.Next() {\n\t\tt, v = iter.Values()\n\t}\n\terr := iter.Err()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tT = t\n\tV = v\n}\n\nfunc BenchmarkIterSeriesLong(b *testing.B) {\n\ts := NewSeriesLong(0)\n\tN := uint32(b.N)\n\tfor i := uint32(1); i <= N; i++ {\n\t\ts.Push(i, 123.45)\n\t}\n\tb.ResetTimer()\n\titer := s.Iter()\n\tvar t uint32\n\tvar v float64\n\tfor iter.Next() {\n\t\tt, v = iter.Values()\n\t}\n\terr := iter.Err()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tT = t\n\tV = v\n}\n\nfunc BenchmarkIterSeriesLongInterface(b *testing.B) {\n\ts := NewSeriesLong(0)\n\tN := uint32(b.N)\n\tfor i := uint32(1); i <= N; i++ {\n\t\ts.Push(i, 123.45)\n\t}\n\tb.ResetTimer()\n\tvar t uint32\n\tvar v float64\n\tvar iter Iter\n\t\/\/ avoid compiler optimization where it can statically assign the right type\n\t\/\/ and skip the overhead of the interface\n\tif rand.Intn(1) == 0 {\n\t\titer = s.Iter()\n\t}\n\tfor iter.Next() {\n\t\tt, v = iter.Values()\n\t}\n\terr := iter.Err()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tT = t\n\tV = v\n}\n<commit_msg>divorce b.N from number of points in chunk<commit_after>package tsz\n\nimport (\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"testing\"\n)\n\nvar T uint32\nvar V float64\n\nfunc BenchmarkPushSeries4h(b *testing.B) {\n\ts := NewSeries4h(0)\n\tN := uint32(b.N)\n\tfor i := uint32(1); i <= N; i++ {\n\t\tif i%10 == 0 {\n\t\t\ts.Push(i, 0)\n\t\t} else if i%10 == 1 {\n\t\t\ts.Push(i, 1)\n\t\t} else {\n\t\t\ts.Push(i, float64(i)+123.45)\n\t\t}\n\t}\n\ts.Finish()\n\tb.Logf(\"Series4h size: %dB\", len(s.Bytes()))\n}\n\nvar benchmarkSeriesLongChunkSizes = []int{1, 10, 30, 60, 120, 180, 240}\n\nfunc benchmarkSeriesLong(num int, generator func(uint32) float64) func(b *testing.B) {\n\treturn func(b *testing.B) {\n\t\tvar s *SeriesLong\n\t\tfor j := 0; j < b.N; j++ {\n\t\t\ts = NewSeriesLong(0)\n\t\t\tN := uint32(num)\n\t\t\tfor i := uint32(1); i <= N; i++ {\n\t\t\t\ts.Push(i, generator(i))\n\t\t\t}\n\t\t\ts.Finish()\n\t\t}\n\t\tb.Logf(\"SeriesLong size: %d points in %dB, avg %.2f bytes\/point, b.n %d\", num, len(s.Bytes()), float64(len(s.Bytes()))\/float64(num), b.N)\n\t}\n}\n\nfunc BenchmarkPushSeriesLongMonotonicIncreaseWithResets(b *testing.B) {\n\tfor _, num := range benchmarkSeriesLongChunkSizes {\n\t\tb.Run(strconv.Itoa(num), benchmarkSeriesLong(num, func(i uint32) float64 {\n\t\t\tif i%10 == 0 {\n\t\t\t\treturn 0\n\t\t\t} else if i%10 == 1 {\n\t\t\t\treturn 1\n\t\t\t}\n\t\t\treturn float64(i) + 123.45\n\t\t}))\n\t}\n}\n\nfunc BenchmarkPushSeriesLongMonotonicIncrease(b *testing.B) {\n\tfor _, num := range benchmarkSeriesLongChunkSizes {\n\t\tb.Run(strconv.Itoa(num), benchmarkSeriesLong(num, func(i uint32) float64 {\n\t\t\treturn float64(i) + 123.45\n\t\t}))\n\t}\n}\n\nfunc BenchmarkPushSeriesLongSawtooth(b *testing.B) {\n\tfor _, num := range benchmarkSeriesLongChunkSizes {\n\t\tb.Run(strconv.Itoa(num), benchmarkSeriesLong(num, func(i uint32) float64 {\n\t\t\tmultiplier := 1.0\n\t\t\tif i%2 == 0 {\n\t\t\t\tmultiplier = -1.0\n\t\t\t}\n\t\t\treturn multiplier*123.45 + float64(i)\/1000\n\t\t}))\n\t}\n}\n\nfunc BenchmarkPushSeriesLongSawtoothWithFlats(b *testing.B) {\n\tfor _, num := range benchmarkSeriesLongChunkSizes {\n\t\tb.Run(strconv.Itoa(num), benchmarkSeriesLong(num, func(i uint32) float64 {\n\t\t\tmultiplier := 1.0\n\t\t\tif i%2 == 0 && i%100 != 0 {\n\t\t\t\tmultiplier = -1.0\n\t\t\t}\n\t\t\treturn multiplier*123.45 + float64(i)\/1000\n\t\t}))\n\t}\n}\n\nfunc BenchmarkPushSeriesLongSteps(b *testing.B) {\n\tfor _, num := range benchmarkSeriesLongChunkSizes {\n\t\tb.Run(strconv.Itoa(num), benchmarkSeriesLong(num, func(i uint32) float64 {\n\t\t\tmultiplier := 1.0\n\t\t\tif (i\/100)%2 == 0 {\n\t\t\t\tmultiplier = -1.0\n\t\t\t}\n\t\t\treturn multiplier*123.45 + float64(i)\/1000\n\t\t}))\n\t}\n}\n\nfunc BenchmarkPushSeriesLongRealWorldCPU(b *testing.B) {\n\tvalues := []float64{95.3, 95.7, 86.2, 95.0, 94.7, 95.4, 94.5, 94.0, 94.7, 95.0, 95.0, 93.8, 95.3, 95.4, 94.6, 83.8, 94.5, 94.5, 94.6, 92.0, 95.0, 89.6, 72.8, 72.1, 86.5, 94.9, 94.9, 93.9, 94.4, 95.4, 95.1, 93.7, 95.5, 95.4, 94.4, 93.2, 94.6, 95.5, 94.9, 94.1, 95.0, 95.5, 94.7, 93.7, 95.1, 96.6, 95.3, 94.0, 95.0, 95.2, 93.3, 94.2, 95.2, 94.9, 94.5, 95.3, 93.2, 95.4, 95.0, 95.2, 93.7}\n\tfor _, num := range benchmarkSeriesLongChunkSizes {\n\t\tb.Run(strconv.Itoa(num), benchmarkSeriesLong(num, func(i uint32) float64 {\n\t\t\treturn values[int(i)%len(values)]\n\t\t}))\n\t}\n}\n\nfunc BenchmarkIterSeries4h(b *testing.B) {\n\ts := NewSeries4h(0)\n\tN := uint32(b.N)\n\tfor i := uint32(1); i <= N; i++ {\n\t\ts.Push(i, 123.45)\n\t}\n\tb.ResetTimer()\n\titer := s.Iter(1)\n\tvar t uint32\n\tvar v float64\n\tfor iter.Next() {\n\t\tt, v = iter.Values()\n\t}\n\terr := iter.Err()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tT = t\n\tV = v\n}\n\nfunc BenchmarkIterSeriesLong(b *testing.B) {\n\ts := NewSeriesLong(0)\n\tN := uint32(b.N)\n\tfor i := uint32(1); i <= N; i++ {\n\t\ts.Push(i, 123.45)\n\t}\n\tb.ResetTimer()\n\titer := s.Iter()\n\tvar t uint32\n\tvar v float64\n\tfor iter.Next() {\n\t\tt, v = iter.Values()\n\t}\n\terr := iter.Err()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tT = t\n\tV = v\n}\n\nfunc BenchmarkIterSeriesLongInterface(b *testing.B) {\n\ts := NewSeriesLong(0)\n\tN := uint32(b.N)\n\tfor i := uint32(1); i <= N; i++ {\n\t\ts.Push(i, 123.45)\n\t}\n\tb.ResetTimer()\n\tvar t uint32\n\tvar v float64\n\tvar iter Iter\n\t\/\/ avoid compiler optimization where it can statically assign the right type\n\t\/\/ and skip the overhead of the interface\n\tif rand.Intn(1) == 0 {\n\t\titer = s.Iter()\n\t}\n\tfor iter.Next() {\n\t\tt, v = iter.Values()\n\t}\n\terr := iter.Err()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tT = t\n\tV = v\n}\n<|endoftext|>"} {"text":"<commit_before>package cfg\n\nimport (\n \"encoding\/json\"\n \"io\/ioutil\"\n \"log\"\n)\n\n\/\/ Configuration is a Configuration object.\ntype Configuration struct {\n LogLevel string\n UploadPath string\n}\n\n\/\/ Monitoring monitoring\ntype Monitoring struct {\n Enable string `json:\"enabled\"`\n}\n\n\/\/ EC2Config EC2 configuration\ntype EC2Config struct {\n DryRun string `json:\"dry_run\"`\n ImageID string `json:\"image_id\"`\n KeyName string `json:\"key_name\"`\n MinCount int `json:\"min_count\"`\n MaxCount int `json:\"max_count\"`\n InstanceType string `json:\"instance_type\"`\n Monitoring Monitoring `json:\"monitoring\"`\n}\n\n\/\/ IPRange ip range\ntype IPRange struct {\n \/\/ CidrIP cidr_ip\n CidrIP string `json:\"cidr_ip\"`\n}\n\n\/\/ IPPermission ip permission\ntype IPPermission struct {\n IPProtocol string `json:\"ip_protocol\"`\n FromPort string `json:\"from_port\"`\n ToPort string `json:\"to_port\"`\n IPRanges []IPRange `json:\"ip_ranges\"`\n}\n\n\/\/ SecurityGroup Security Group\ntype SecurityGroup struct {\n IPPermissions []IPPermission `json:\"ip_permission\"`\n}\n\n\/\/ LoadEC2Configuration Loads the configuration file.\nfunc LoadEC2Configuration() {\n dat, err := ioutil.ReadFile(\"ec2_conf.json\")\n if err != nil {\n panic(err)\n }\n ec2Config := EC2Config{}\n err = json.Unmarshal(dat, &ec2Config)\n if err != nil {\n panic(err)\n }\n log.Println(ec2Config)\n}\n<commit_msg>Parsing json file.<commit_after>package cfg\n\nimport (\n \"encoding\/json\"\n \"io\/ioutil\"\n \"log\"\n)\n\n\/\/ Configuration is a Configuration object.\ntype Configuration struct {\n LogLevel string\n UploadPath string\n}\n\n\/\/ Monitoring monitoring\ntype Monitoring struct {\n Enable bool `json:\"enabled\"`\n}\n\n\/\/ EC2Config EC2 configuration\ntype EC2Config struct {\n DryRun bool `json:\"dry_run\"`\n ImageID string `json:\"image_id\"`\n KeyName string `json:\"key_name\"`\n MinCount int `json:\"min_count\"`\n MaxCount int `json:\"max_count\"`\n InstanceType string `json:\"instance_type\"`\n Monitoring Monitoring `json:\"monitoring\"`\n}\n\n\/\/ IPRange ip range\ntype IPRange struct {\n \/\/ CidrIP cidr_ip\n CidrIP string `json:\"cidr_ip\"`\n}\n\n\/\/ IPPermission ip permission\ntype IPPermission struct {\n IPProtocol string `json:\"ip_protocol\"`\n FromPort string `json:\"from_port\"`\n ToPort string `json:\"to_port\"`\n IPRanges []IPRange `json:\"ip_ranges\"`\n}\n\n\/\/ SecurityGroup Security Group\ntype SecurityGroup struct {\n IPPermissions []IPPermission `json:\"ip_permission\"`\n}\n\n\/\/ LoadEC2Configuration Loads the configuration file.\nfunc LoadEC2Configuration() {\n dat, err := ioutil.ReadFile(\"cfg\/ec2_conf.json\")\n if err != nil {\n panic(err)\n }\n ec2Config := EC2Config{}\n err = json.Unmarshal(dat, &ec2Config)\n if err != nil {\n panic(err)\n }\n log.Println(ec2Config)\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"github.com\/openzipkin\/zipkin-go-opentracing\"\n\t\"github.com\/openzipkin\/zipkin-go-opentracing\/thrift\/gen-go\/zipkincore\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ PrometheusCollector is a custom Collector\n\/\/ which sends ZipKin traces to Prometheus\ntype PrometheusCollector struct {\n\n\t\/\/ Each span name is published as a separate Histogram metric\n\t\/\/ Using metric names of the form fn_span_<span-name>_duration_seconds\n\thistogramVecMap map[string]*prometheus.HistogramVec\n}\n\n\/\/ NewPrometheusCollector returns a new PrometheusCollector\nfunc NewPrometheusCollector() (zipkintracer.Collector, error) {\n\tpc := &PrometheusCollector{make(map[string]*prometheus.HistogramVec)}\n\treturn pc, nil\n}\n\n\/\/ Return the HistogramVec corresponding to the specified spanName.\n\/\/ If a HistogramVec does not already exist for specified spanName then one is created and configured with the specified labels\n\/\/ otherwise the labels parameter is ignored.\nfunc (pc PrometheusCollector) getHistogramVecForSpanName(spanName string, labels []string) *prometheus.HistogramVec {\n\tthisHistogramVec, found := pc.histogramVecMap[spanName]\n\tif !found {\n\t\tthisHistogramVec = prometheus.NewHistogramVec(\n\t\t\tprometheus.HistogramOpts{\n\t\t\t\tName: \"fn_span_\" + spanName + \"_duration_seconds\",\n\t\t\t\tHelp: \"Span \" + spanName + \" duration, by span name\",\n\t\t\t},\n\t\t\tlabels,\n\t\t)\n\t\tpc.histogramVecMap[spanName] = thisHistogramVec\n\t\tprometheus.MustRegister(thisHistogramVec)\n\t}\n\treturn thisHistogramVec\n}\n\n\/\/ PrometheusCollector implements Collector.\nfunc (pc PrometheusCollector) Collect(span *zipkincore.Span) error {\n\n\t\/\/ extract any label values from the span\n\tlabelKeys, labelValueMap := getLabels(span)\n\n\tpc.getHistogramVecForSpanName(span.GetName(), labelKeys).With(labelValueMap).Observe((time.Duration(span.GetDuration()) * time.Microsecond).Seconds())\n\treturn nil\n}\n\n\/\/ extract from the specified span the key\/value pairs that we want to add as labels to the Prometheus metric for this span\n\/\/ returns an array of keys, and a map of key-value pairs\nfunc getLabels(span *zipkincore.Span) ([]string, map[string]string) {\n\n\tvar keys []string\n\tlabelMap := make(map[string]string)\n\n\t\/\/ extract any tags whose key starts with \"fn\" from the span\n\tbinaryAnnotations := span.GetBinaryAnnotations()\n\tfor _, thisBinaryAnnotation := range binaryAnnotations {\n\t\tkey := thisBinaryAnnotation.GetKey()\n\t\tif thisBinaryAnnotation.GetAnnotationType() == zipkincore.AnnotationType_STRING && strings.HasPrefix(key, \"fn\") {\n\t\t\tkeys = append(keys, key)\n\t\t\tvalue := string(thisBinaryAnnotation.GetValue()[:])\n\t\t\tlabelMap[key] = value\n\t\t}\n\t}\n\n\treturn keys, labelMap\n}\n\n\/\/ PrometheusCollector implements Collector.\nfunc (PrometheusCollector) Close() error { return nil }\n<commit_msg>Add protection against inconsistent cardinaliity errors<commit_after>package server\n\nimport (\n\t\"github.com\/openzipkin\/zipkin-go-opentracing\"\n\t\"github.com\/openzipkin\/zipkin-go-opentracing\/thrift\/gen-go\/zipkincore\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ PrometheusCollector is a custom Collector\n\/\/ which sends ZipKin traces to Prometheus\ntype PrometheusCollector struct {\n\n\t\/\/ Each span name is published as a separate Histogram metric\n\t\/\/ Using metric names of the form fn_span_<span-name>_duration_seconds\n\n\t\/\/ In this map, the key is the name of a tracing span,\n\t\/\/ and the corresponding value is a HistogramVec metric used to report the duration of spans with this name to Prometheus\n\thistogramVecMap map[string]*prometheus.HistogramVec\n\n\t\/\/ In this map, the key is the name of a tracing span,\n\t\/\/ and the corresponding value is an array containing the label keys that were specified when the HistogramVec metric was created\n\tregisteredLabelKeysMap map[string][]string\n}\n\n\/\/ NewPrometheusCollector returns a new PrometheusCollector\nfunc NewPrometheusCollector() (zipkintracer.Collector, error) {\n\tpc := &PrometheusCollector{make(map[string]*prometheus.HistogramVec), make(map[string][]string)}\n\treturn pc, nil\n}\n\n\/\/ PrometheusCollector implements Collector.\nfunc (pc PrometheusCollector) Collect(span *zipkincore.Span) error {\n\tvar labelValuesToUse map[string]string\n\n\t\/\/ extract any label values from the span\n\tlabelKeysFromSpan, labelValuesFromSpan := getLabels(span)\n\n\t\/\/ get the HistogramVec for this span name\n\thistogramVec, found := pc.histogramVecMap[span.GetName()]\n\tif !found {\n\t\t\/\/ create a new HistogramVec\n\t\thistogramVec = prometheus.NewHistogramVec(\n\t\t\tprometheus.HistogramOpts{\n\t\t\t\tName: \"fn_span_\" + span.GetName() + \"_duration_seconds\",\n\t\t\t\tHelp: \"Span \" + span.GetName() + \" duration, by span name\",\n\t\t\t},\n\t\t\tlabelKeysFromSpan,\n\t\t)\n\t\tpc.histogramVecMap[span.GetName()] = histogramVec\n\t\tpc.registeredLabelKeysMap[span.GetName()] = labelKeysFromSpan\n\t\tprometheus.MustRegister(histogramVec)\n\t\tlabelValuesToUse = labelValuesFromSpan\n\t} else {\n\t\t\/\/ found an existing HistogramVec\n\t\t\/\/ need to be careful here, since we must supply the same label keys as when we first created the metric\n\t\t\/\/ otherwise we will get a \"inconsistent label cardinality\" panic\n\t\t\/\/ that's why we saved the original label keys in the registeredLabelKeysMap map\n\t\t\/\/ so we can use that to construct a map of label key\/value pairs to set on the metric\n\t\tlabelValuesToUse = make(map[string]string)\n\t\tfor _, thisRegisteredLabelKey := range pc.registeredLabelKeysMap[span.GetName()] {\n\t\t\tif value, found := labelValuesFromSpan[thisRegisteredLabelKey]; found {\n\t\t\t\tlabelValuesToUse[thisRegisteredLabelKey] = value\n\t\t\t} else {\n\t\t\t\tlabelValuesToUse[thisRegisteredLabelKey] = \"\"\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ now report the metric value\n\thistogramVec.With(labelValuesToUse).Observe((time.Duration(span.GetDuration()) * time.Microsecond).Seconds())\n\n\treturn nil\n}\n\n\/\/ extract from the specified span the key\/value pairs that we want to add as labels to the Prometheus metric for this span\n\/\/ returns an array of keys, and a map of key-value pairs\nfunc getLabels(span *zipkincore.Span) ([]string, map[string]string) {\n\n\tvar keys []string\n\tlabelMap := make(map[string]string)\n\n\t\/\/ extract any tags whose key starts with \"fn\" from the span\n\tbinaryAnnotations := span.GetBinaryAnnotations()\n\tfor _, thisBinaryAnnotation := range binaryAnnotations {\n\t\tkey := thisBinaryAnnotation.GetKey()\n\t\tif thisBinaryAnnotation.GetAnnotationType() == zipkincore.AnnotationType_STRING && strings.HasPrefix(key, \"fn\") {\n\t\t\tkeys = append(keys, key)\n\t\t\tvalue := string(thisBinaryAnnotation.GetValue()[:])\n\t\t\tlabelMap[key] = value\n\t\t}\n\t}\n\n\treturn keys, labelMap\n}\n\n\/\/ PrometheusCollector implements Collector.\nfunc (PrometheusCollector) Close() error { return nil }\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 Julian Phillips. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage py\n\n\/\/ #include \"utils.h\"\n\/\/ static inline void decref(PyObject *obj) { Py_DECREF(obj); }\nimport \"C\"\n\nimport (\n\t\"unsafe\"\n)\n\ntype Op int\n\nconst (\n\tLT = Op(C.Py_LT)\n\tLE = Op(C.Py_LE)\n\tEQ = Op(C.Py_EQ)\n\tNE = Op(C.Py_NE)\n\tGT = Op(C.Py_GT)\n\tGE = Op(C.Py_GE)\n)\n\ntype Object interface {\n\tBase() *BaseObject\n\tType() *Type\n\tDecref()\n\tIncref()\n\tIsTrue() bool\n\tNot() bool\n\tFree()\n}\n\nvar None = (*NoneObject)(unsafe.Pointer(&C._Py_NoneStruct))\n\ntype NoneObject struct {\n\tAbstractObject\n}\n\nfunc (n *NoneObject) String() string {\n\treturn \"None\"\n}\n\nfunc c(obj Object) *C.PyObject {\n\tif obj == nil {\n\t\treturn nil\n\t}\n\treturn (*C.PyObject)(unsafe.Pointer(obj.Base()))\n}\n\nvar types = make(map[*C.PyTypeObject]*Class)\n\nfunc registerType(pyType *C.PyTypeObject, class *Class) {\n\ttypes[pyType] = class\n}\n\nfunc newObject(obj *C.PyObject) Object {\n\tif obj == nil {\n\t\treturn nil\n\t}\n\to := unsafe.Pointer(obj)\n\tif o == unsafe.Pointer(None) {\n\t\treturn None\n\t}\n\tpyType := (*C.PyTypeObject)(obj.ob_type)\n\tclass, ok := types[pyType]\n\tif ok {\n\t\tt := unsafe.Typeof(class.Pointer)\n\t\tret, ok := unsafe.Unreflect(t, unsafe.Pointer(&obj)).(Object)\n\t\tif ok {\n\t\t\treturn ret\n\t\t}\n\t}\n\tswitch C.getBasePyType(obj) {\n\tcase &C.PyList_Type:\n\t\treturn (*List)(o)\n\tcase &C.PyTuple_Type:\n\t\treturn (*Tuple)(o)\n\tcase &C.PyDict_Type:\n\t\treturn (*Dict)(o)\n\tcase &C.PyString_Type:\n\t\treturn (*String)(o)\n\tcase &C.PyBool_Type:\n\t\treturn newBool(obj)\n\tcase &C.PyInt_Type:\n\t\treturn (*Int)(o)\n\tcase &C.PyLong_Type:\n\t\treturn (*Long)(o)\n\tcase &C.PyFloat_Type:\n\t\treturn (*Float)(o)\n\tcase &C.PyModule_Type:\n\t\treturn (*Module)(o)\n\tcase &C.PyType_Type:\n\t\treturn (*Type)(o)\n\tcase &C.PyCode_Type:\n\t\treturn (*Code)(o)\n\tcase &C.PyCFunction_Type:\n\t\treturn (*CFunction)(o)\n\t}\n\tfor pyType.tp_base != nil {\n\t\tpyType = (*C.PyTypeObject)(unsafe.Pointer(pyType.tp_base))\n\t\tclass, ok := types[pyType]\n\t\tif ok {\n\t\t\tt := unsafe.Typeof(class.Pointer)\n\t\t\tret, ok := unsafe.Unreflect(t, unsafe.Pointer(&obj)).(Object)\n\t\t\tif ok {\n\t\t\t\treturn ret\n\t\t\t}\n\t\t}\n\t}\n\treturn newBaseObject(obj)\n}\n<commit_msg>Check base object before standard types<commit_after>\/\/ Copyright 2011 Julian Phillips. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage py\n\n\/\/ #include \"utils.h\"\n\/\/ static inline void decref(PyObject *obj) { Py_DECREF(obj); }\nimport \"C\"\n\nimport (\n\t\"unsafe\"\n)\n\ntype Op int\n\nconst (\n\tLT = Op(C.Py_LT)\n\tLE = Op(C.Py_LE)\n\tEQ = Op(C.Py_EQ)\n\tNE = Op(C.Py_NE)\n\tGT = Op(C.Py_GT)\n\tGE = Op(C.Py_GE)\n)\n\ntype Object interface {\n\tBase() *BaseObject\n\tType() *Type\n\tDecref()\n\tIncref()\n\tIsTrue() bool\n\tNot() bool\n\tFree()\n}\n\nvar None = (*NoneObject)(unsafe.Pointer(&C._Py_NoneStruct))\n\ntype NoneObject struct {\n\tAbstractObject\n}\n\nfunc (n *NoneObject) String() string {\n\treturn \"None\"\n}\n\nfunc c(obj Object) *C.PyObject {\n\tif obj == nil {\n\t\treturn nil\n\t}\n\treturn (*C.PyObject)(unsafe.Pointer(obj.Base()))\n}\n\nvar types = make(map[*C.PyTypeObject]*Class)\n\nfunc registerType(pyType *C.PyTypeObject, class *Class) {\n\ttypes[pyType] = class\n}\n\nfunc newObject(obj *C.PyObject) Object {\n\tif obj == nil {\n\t\treturn nil\n\t}\n\to := unsafe.Pointer(obj)\n\tif o == unsafe.Pointer(None) {\n\t\treturn None\n\t}\n\tpyType := (*C.PyTypeObject)(obj.ob_type)\n\tclass, ok := types[pyType]\n\tif ok {\n\t\tt := unsafe.Typeof(class.Pointer)\n\t\tret, ok := unsafe.Unreflect(t, unsafe.Pointer(&obj)).(Object)\n\t\tif ok {\n\t\t\treturn ret\n\t\t}\n\t}\n\tfor pyType.tp_base != nil {\n\t\tpyType = (*C.PyTypeObject)(unsafe.Pointer(pyType.tp_base))\n\t\tclass, ok := types[pyType]\n\t\tif ok {\n\t\t\tt := unsafe.Typeof(class.Pointer)\n\t\t\tret, ok := unsafe.Unreflect(t, unsafe.Pointer(&obj)).(Object)\n\t\t\tif ok {\n\t\t\t\treturn ret\n\t\t\t}\n\t\t}\n\t}\n\tswitch C.getBasePyType(obj) {\n\tcase &C.PyList_Type:\n\t\treturn (*List)(o)\n\tcase &C.PyTuple_Type:\n\t\treturn (*Tuple)(o)\n\tcase &C.PyDict_Type:\n\t\treturn (*Dict)(o)\n\tcase &C.PyString_Type:\n\t\treturn (*String)(o)\n\tcase &C.PyBool_Type:\n\t\treturn newBool(obj)\n\tcase &C.PyInt_Type:\n\t\treturn (*Int)(o)\n\tcase &C.PyLong_Type:\n\t\treturn (*Long)(o)\n\tcase &C.PyFloat_Type:\n\t\treturn (*Float)(o)\n\tcase &C.PyModule_Type:\n\t\treturn (*Module)(o)\n\tcase &C.PyType_Type:\n\t\treturn (*Type)(o)\n\tcase &C.PyCode_Type:\n\t\treturn (*Code)(o)\n\tcase &C.PyCFunction_Type:\n\t\treturn (*CFunction)(o)\n\t}\n\treturn newBaseObject(obj)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"golang.org\/x\/sys\/windows\/svc\"\n\t\"golang.org\/x\/sys\/windows\/svc\/eventlog\"\n)\n\nconst name = \"mackerel-agent\"\n\nconst defaultEid = 1\nconst startEid = 2\nconst stopEid = 3\nconst loggerEid = 4\n\nvar (\n\tkernel32 = syscall.NewLazyDLL(\"kernel32\")\n\tprocAllocConsole = kernel32.NewProc(\"AllocConsole\")\n\tprocGenerateConsoleCtrlEvent = kernel32.NewProc(\"GenerateConsoleCtrlEvent\")\n)\n\nfunc main() {\n\tif len(os.Args) == 2 {\n\t\tvar err error\n\t\tswitch os.Args[1] {\n\t\tcase \"install\":\n\t\t\terr = installService(\"mackerel-agent\", \"mackerel agent\")\n\t\tcase \"remove\":\n\t\t\terr = removeService(\"mackerel-agent\")\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\treturn\n\t}\n\n\telog, err := eventlog.Open(name)\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\tdefer elog.Close()\n\n\t\/\/ `svc.Run` blocks until windows service will stopped.\n\t\/\/ ref. https:\/\/msdn.microsoft.com\/library\/cc429362.aspx\n\terr = svc.Run(name, &handler{elog: elog})\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n}\n\ntype logger interface {\n\tInfo(eid uint32, msg string) error\n\tWarning(eid uint32, msg string) error\n\tError(eid uint32, msg string) error\n}\n\ntype handler struct {\n\telog logger\n\tcmd *exec.Cmd\n\tr io.Reader\n\tw io.WriteCloser\n\twg sync.WaitGroup\n}\n\n\/\/ ex.\n\/\/ verbose log: 2017\/01\/21 22:21:08 command.go:434: DEBUG <command> received 'immediate' chan\n\/\/ normal log: 2017\/01\/24 14:14:27 INFO <main> Starting mackerel-agent version:0.36.0\nvar logRe = regexp.MustCompile(`^\\d{4}\/\\d{2}\/\\d{2} \\d{2}:\\d{2}:\\d{2} (?:\\S+\\.go:\\d+: )?([A-Z]+) `)\n\nfunc (h *handler) retire() error {\n\tdir := execdir()\n\tcmd := exec.Command(filepath.Join(dir, \"mackerel-agent.exe\"), \"retire\", \"--force\")\n\tcmd.Dir = dir\n\treturn cmd.Run()\n}\n\nfunc (h *handler) start() error {\n\tprocAllocConsole.Call()\n\tdir := execdir()\n\tcmd := exec.Command(filepath.Join(dir, \"mackerel-agent.exe\"))\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCreationFlags: syscall.CREATE_NEW_PROCESS_GROUP,\n\t}\n\tcmd.Dir = dir\n\n\th.cmd = cmd\n\th.r, h.w = io.Pipe()\n\tcmd.Stderr = h.w\n\n\terr := h.cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn h.aggregate()\n}\n\nfunc (h *handler) aggregate() error {\n\tbr := bufio.NewReader(h.r)\n\tlc := make(chan string, 10)\n\tdone := make(chan struct{})\n\n\t\/\/ It need to read data from pipe continuously. And it need to close handle\n\t\/\/ when process finished.\n\t\/\/ read data from pipe. When data arrived at EOL, send line-string to the\n\t\/\/ channel. Also remaining line to EOF.\n\th.wg.Add(1)\n\tgo func() {\n\t\tdefer h.wg.Done()\n\t\tdefer h.w.Close()\n\n\t\t\/\/ pipe stderr to windows event log\n\t\tvar body bytes.Buffer\n\t\tfor {\n\t\t\tb, err := br.ReadByte()\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\th.elog.Error(loggerEid, err.Error())\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif b == '\\n' {\n\t\t\t\tif body.Len() > 0 {\n\t\t\t\t\tlc <- body.String()\n\t\t\t\t\tbody.Reset()\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbody.WriteByte(b)\n\t\t}\n\t\tif body.Len() > 0 {\n\t\t\tlc <- body.String()\n\t\t}\n\t\tdone <- struct{}{}\n\t}()\n\n\th.wg.Add(1)\n\tgo func() {\n\t\tdefer h.wg.Done()\n\n\t\tlinebuf := []string{}\n\tloop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase line := <-lc:\n\t\t\t\tif len(linebuf) == 0 || logRe.MatchString(line) {\n\t\t\t\t\tlinebuf = append(linebuf, line)\n\t\t\t\t} else {\n\t\t\t\t\tlinebuf[len(linebuf)-1] += \"\\n\" + line\n\t\t\t\t}\n\t\t\tcase <-time.After(10 * time.Millisecond):\n\t\t\t\t\/\/ When it take 10ms, it is located at end of paragraph. Then\n\t\t\t\t\/\/ slice appended at above should be the paragraph.\n\t\t\t\tfor _, line := range linebuf {\n\t\t\t\t\tif match := logRe.FindStringSubmatch(line); match != nil {\n\t\t\t\t\t\tlevel := match[1]\n\t\t\t\t\t\tswitch level {\n\t\t\t\t\t\tcase \"TRACE\", \"DEBUG\", \"INFO\":\n\t\t\t\t\t\t\th.elog.Info(defaultEid, line)\n\t\t\t\t\t\tcase \"WARNING\", \"ERROR\":\n\t\t\t\t\t\t\th.elog.Warning(defaultEid, line)\n\t\t\t\t\t\tcase \"CRITICAL\":\n\t\t\t\t\t\t\th.elog.Error(defaultEid, line)\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\th.elog.Error(defaultEid, line)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\th.elog.Error(defaultEid, line)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase <-done:\n\t\t\t\t\tbreak loop\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tlinebuf = nil\n\t\t\t}\n\t\t}\n\t\tclose(lc)\n\t\tclose(done)\n\t}()\n\n\treturn nil\n}\n\nfunc interrupt(p *os.Process) error {\n\tr1, _, err := procGenerateConsoleCtrlEvent.Call(syscall.CTRL_BREAK_EVENT, uintptr(p.Pid))\n\tif r1 == 0 {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (h *handler) stop() error {\n\tif h.cmd != nil && h.cmd.Process != nil {\n\t\terr := interrupt(h.cmd.Process)\n\t\tif err == nil {\n\t\t\tend := time.Now().Add(10 * time.Second)\n\t\t\tfor time.Now().Before(end) {\n\t\t\t\tif h.cmd.ProcessState != nil && h.cmd.ProcessState.Exited() {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\t\t}\n\t\treturn h.cmd.Process.Kill()\n\t}\n\n\th.wg.Wait()\n\treturn nil\n}\n\nfunc autoRetire() bool {\n\tenv := os.Getenv(\"MACKEREL_AUTO_RETIREMENT\")\n\treturn env != \"\" && env != \"0\"\n}\n\n\/\/ implement https:\/\/godoc.org\/golang.org\/x\/sys\/windows\/svc#Handler\nfunc (h *handler) Execute(args []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (svcSpecificEC bool, exitCode uint32) {\n\ts <- svc.Status{State: svc.StartPending}\n\tdefer func() {\n\t\ts <- svc.Status{State: svc.Stopped}\n\t}()\n\n\tif err := h.start(); err != nil {\n\t\th.elog.Error(startEid, err.Error())\n\t\t\/\/ https:\/\/msdn.microsoft.com\/library\/windows\/desktop\/ms681383(v=vs.85).aspx\n\t\t\/\/ use ERROR_SERVICE_SPECIFIC_ERROR\n\t\treturn true, 1\n\t}\n\n\texit := make(chan struct{})\n\tgo func() {\n\t\terr := h.cmd.Wait()\n\t\t\/\/ enter when the child process exited\n\t\tif err != nil {\n\t\t\th.elog.Error(stopEid, err.Error())\n\t\t}\n\t\texit <- struct{}{}\n\t}()\n\n\ts <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown}\nL:\n\tfor {\n\t\tselect {\n\t\tcase req := <-r:\n\t\t\tswitch req.Cmd {\n\t\t\tcase svc.Interrogate:\n\t\t\t\ts <- req.CurrentStatus\n\t\t\tcase svc.Stop, svc.Shutdown:\n\t\t\t\ts <- svc.Status{State: svc.StopPending, Accepts: svc.AcceptStop | svc.AcceptShutdown}\n\t\t\t\tif err := h.stop(); err != nil {\n\t\t\t\t\th.elog.Error(stopEid, err.Error())\n\t\t\t\t\ts <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown}\n\t\t\t\t} else {\n\t\t\t\t\tif req.Cmd == svc.Shutdown && autoRetire() {\n\t\t\t\t\t\tif err := h.retire(); err != nil {\n\t\t\t\t\t\t\th.elog.Error(stopEid, err.Error())\n\t\t\t\t\t\t\ts <- svc.Status{State: svc.Running, Accepts: svc.AcceptShutdown | svc.AcceptShutdown}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-exit:\n\t\t\tbreak L\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc execdir() string {\n\tp, err := os.Executable()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn filepath.Dir(p)\n}\n<commit_msg>Remove duplicate<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"golang.org\/x\/sys\/windows\/svc\"\n\t\"golang.org\/x\/sys\/windows\/svc\/eventlog\"\n)\n\nconst name = \"mackerel-agent\"\n\nconst defaultEid = 1\nconst startEid = 2\nconst stopEid = 3\nconst loggerEid = 4\n\nvar (\n\tkernel32 = syscall.NewLazyDLL(\"kernel32\")\n\tprocAllocConsole = kernel32.NewProc(\"AllocConsole\")\n\tprocGenerateConsoleCtrlEvent = kernel32.NewProc(\"GenerateConsoleCtrlEvent\")\n)\n\nfunc main() {\n\tif len(os.Args) == 2 {\n\t\tvar err error\n\t\tswitch os.Args[1] {\n\t\tcase \"install\":\n\t\t\terr = installService(\"mackerel-agent\", \"mackerel agent\")\n\t\tcase \"remove\":\n\t\t\terr = removeService(\"mackerel-agent\")\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\treturn\n\t}\n\n\telog, err := eventlog.Open(name)\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\tdefer elog.Close()\n\n\t\/\/ `svc.Run` blocks until windows service will stopped.\n\t\/\/ ref. https:\/\/msdn.microsoft.com\/library\/cc429362.aspx\n\terr = svc.Run(name, &handler{elog: elog})\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n}\n\ntype logger interface {\n\tInfo(eid uint32, msg string) error\n\tWarning(eid uint32, msg string) error\n\tError(eid uint32, msg string) error\n}\n\ntype handler struct {\n\telog logger\n\tcmd *exec.Cmd\n\tr io.Reader\n\tw io.WriteCloser\n\twg sync.WaitGroup\n}\n\n\/\/ ex.\n\/\/ verbose log: 2017\/01\/21 22:21:08 command.go:434: DEBUG <command> received 'immediate' chan\n\/\/ normal log: 2017\/01\/24 14:14:27 INFO <main> Starting mackerel-agent version:0.36.0\nvar logRe = regexp.MustCompile(`^\\d{4}\/\\d{2}\/\\d{2} \\d{2}:\\d{2}:\\d{2} (?:\\S+\\.go:\\d+: )?([A-Z]+) `)\n\nfunc (h *handler) retire() error {\n\tdir := execdir()\n\tcmd := exec.Command(filepath.Join(dir, \"mackerel-agent.exe\"), \"retire\", \"--force\")\n\tcmd.Dir = dir\n\treturn cmd.Run()\n}\n\nfunc (h *handler) start() error {\n\tprocAllocConsole.Call()\n\tdir := execdir()\n\tcmd := exec.Command(filepath.Join(dir, \"mackerel-agent.exe\"))\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCreationFlags: syscall.CREATE_NEW_PROCESS_GROUP,\n\t}\n\tcmd.Dir = dir\n\n\th.cmd = cmd\n\th.r, h.w = io.Pipe()\n\tcmd.Stderr = h.w\n\n\terr := h.cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn h.aggregate()\n}\n\nfunc (h *handler) aggregate() error {\n\tbr := bufio.NewReader(h.r)\n\tlc := make(chan string, 10)\n\tdone := make(chan struct{})\n\n\t\/\/ It need to read data from pipe continuously. And it need to close handle\n\t\/\/ when process finished.\n\t\/\/ read data from pipe. When data arrived at EOL, send line-string to the\n\t\/\/ channel. Also remaining line to EOF.\n\th.wg.Add(1)\n\tgo func() {\n\t\tdefer h.wg.Done()\n\t\tdefer h.w.Close()\n\n\t\t\/\/ pipe stderr to windows event log\n\t\tvar body bytes.Buffer\n\t\tfor {\n\t\t\tb, err := br.ReadByte()\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\th.elog.Error(loggerEid, err.Error())\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif b == '\\n' {\n\t\t\t\tif body.Len() > 0 {\n\t\t\t\t\tlc <- body.String()\n\t\t\t\t\tbody.Reset()\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbody.WriteByte(b)\n\t\t}\n\t\tif body.Len() > 0 {\n\t\t\tlc <- body.String()\n\t\t}\n\t\tdone <- struct{}{}\n\t}()\n\n\th.wg.Add(1)\n\tgo func() {\n\t\tdefer h.wg.Done()\n\n\t\tlinebuf := []string{}\n\tloop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase line := <-lc:\n\t\t\t\tif len(linebuf) == 0 || logRe.MatchString(line) {\n\t\t\t\t\tlinebuf = append(linebuf, line)\n\t\t\t\t} else {\n\t\t\t\t\tlinebuf[len(linebuf)-1] += \"\\n\" + line\n\t\t\t\t}\n\t\t\tcase <-time.After(10 * time.Millisecond):\n\t\t\t\t\/\/ When it take 10ms, it is located at end of paragraph. Then\n\t\t\t\t\/\/ slice appended at above should be the paragraph.\n\t\t\t\tfor _, line := range linebuf {\n\t\t\t\t\tif match := logRe.FindStringSubmatch(line); match != nil {\n\t\t\t\t\t\tlevel := match[1]\n\t\t\t\t\t\tswitch level {\n\t\t\t\t\t\tcase \"TRACE\", \"DEBUG\", \"INFO\":\n\t\t\t\t\t\t\th.elog.Info(defaultEid, line)\n\t\t\t\t\t\tcase \"WARNING\", \"ERROR\":\n\t\t\t\t\t\t\th.elog.Warning(defaultEid, line)\n\t\t\t\t\t\tcase \"CRITICAL\":\n\t\t\t\t\t\t\th.elog.Error(defaultEid, line)\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\th.elog.Error(defaultEid, line)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\th.elog.Error(defaultEid, line)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase <-done:\n\t\t\t\t\tbreak loop\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tlinebuf = nil\n\t\t\t}\n\t\t}\n\t\tclose(lc)\n\t\tclose(done)\n\t}()\n\n\treturn nil\n}\n\nfunc interrupt(p *os.Process) error {\n\tr1, _, err := procGenerateConsoleCtrlEvent.Call(syscall.CTRL_BREAK_EVENT, uintptr(p.Pid))\n\tif r1 == 0 {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (h *handler) stop() error {\n\tif h.cmd != nil && h.cmd.Process != nil {\n\t\terr := interrupt(h.cmd.Process)\n\t\tif err == nil {\n\t\t\tend := time.Now().Add(10 * time.Second)\n\t\t\tfor time.Now().Before(end) {\n\t\t\t\tif h.cmd.ProcessState != nil && h.cmd.ProcessState.Exited() {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\t\t}\n\t\treturn h.cmd.Process.Kill()\n\t}\n\n\th.wg.Wait()\n\treturn nil\n}\n\nfunc autoRetire() bool {\n\tenv := os.Getenv(\"MACKEREL_AUTO_RETIREMENT\")\n\treturn env != \"\" && env != \"0\"\n}\n\n\/\/ implement https:\/\/godoc.org\/golang.org\/x\/sys\/windows\/svc#Handler\nfunc (h *handler) Execute(args []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (svcSpecificEC bool, exitCode uint32) {\n\ts <- svc.Status{State: svc.StartPending}\n\tdefer func() {\n\t\ts <- svc.Status{State: svc.Stopped}\n\t}()\n\n\tif err := h.start(); err != nil {\n\t\th.elog.Error(startEid, err.Error())\n\t\t\/\/ https:\/\/msdn.microsoft.com\/library\/windows\/desktop\/ms681383(v=vs.85).aspx\n\t\t\/\/ use ERROR_SERVICE_SPECIFIC_ERROR\n\t\treturn true, 1\n\t}\n\n\texit := make(chan struct{})\n\tgo func() {\n\t\terr := h.cmd.Wait()\n\t\t\/\/ enter when the child process exited\n\t\tif err != nil {\n\t\t\th.elog.Error(stopEid, err.Error())\n\t\t}\n\t\texit <- struct{}{}\n\t}()\n\n\ts <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown}\nL:\n\tfor {\n\t\tselect {\n\t\tcase req := <-r:\n\t\t\tswitch req.Cmd {\n\t\t\tcase svc.Interrogate:\n\t\t\t\ts <- req.CurrentStatus\n\t\t\tcase svc.Stop, svc.Shutdown:\n\t\t\t\ts <- svc.Status{State: svc.StopPending, Accepts: svc.AcceptStop | svc.AcceptShutdown}\n\t\t\t\tif err := h.stop(); err != nil {\n\t\t\t\t\th.elog.Error(stopEid, err.Error())\n\t\t\t\t\ts <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown}\n\t\t\t\t} else {\n\t\t\t\t\tif req.Cmd == svc.Shutdown && autoRetire() {\n\t\t\t\t\t\tif err := h.retire(); err != nil {\n\t\t\t\t\t\t\th.elog.Error(stopEid, err.Error())\n\t\t\t\t\t\t\ts <- svc.Status{State: svc.Running, Accepts: svc.AcceptShutdown}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-exit:\n\t\t\tbreak L\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc execdir() string {\n\tp, err := os.Executable()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn filepath.Dir(p)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strconv\"\n\n\t\"github.com\/mrap\/stringutil\"\n\twordpatterns \"github.com\/mrap\/wordpatterns\"\n)\n\ntype CombosRes struct {\n\tCombos []string `json:\"combos\"`\n}\n\n\/\/ TODO: store results in a db\nvar (\n\t_sharedLookup = make(wordpatterns.Wordmap)\n\t_sharedWordmap = make(wordpatterns.Wordmap)\n)\n\nconst (\n\tMinCombosCount = 0\n\tMinMaxComboLen = 2\n\tMinMinComboLen = 2\n)\n\nfunc combosHandler(w http.ResponseWriter, req *http.Request) {\n\tv := req.URL.Query()\n\n\t\/\/ Validate query params\n\t\/\/ chars\n\tchars := v.Get(\"chars\")\n\tif len(chars) == 0 {\n\t\tw.WriteHeader(400)\n\t\treturn\n\t}\n\n\t\/\/ count\n\tcount, err := strconv.Atoi(v.Get(\"count\"))\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tif count < MinCombosCount {\n\t\tcount = MinCombosCount\n\t}\n\n\t\/\/ max_len\n\tmaxLen, err := strconv.Atoi(v.Get(\"max_len\"))\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tif maxLen < MinMaxComboLen {\n\t\tmaxLen = MinMaxComboLen\n\t}\n\n\t\/\/ min_len\n\tminLen, err := strconv.Atoi(v.Get(\"min_len\"))\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tif minLen < MinMinComboLen {\n\t\tminLen = MinMinComboLen\n\t}\n\n\tcombos := RankedCombos(_sharedLookup, chars, minLen, maxLen)\n\tcomboList := GenerateComboList(_sharedWordmap, combos, count)\n\n\tdata, err := json.Marshal(CombosRes{comboList})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\tw.Write(data)\n}\n\n\/\/ StoreWords stores words from a word list file into\nfunc StoreWords(filename string) {\n\twm := wordpatterns.CreateWordmap(filename)\n\t_sharedWordmap = wm\n\t_sharedLookup = CreateCharWordLookup(_sharedWordmap)\n}\n\n\/\/ CreateCharWordLookup creates a lookup structure that makes it efficient\n\/\/ to query for sub-words that only contain specific characters\nfunc CreateCharWordLookup(wm wordpatterns.Wordmap) wordpatterns.Wordmap {\n\tvar (\n\t\tlookup = make(wordpatterns.Wordmap)\n\t\tsorted string\n\t)\n\n\tfor subword, _ := range wm {\n\t\tsorted = stringutil.SortString(subword)\n\t\tlookup[sorted] = append(lookup[sorted], subword)\n\t}\n\treturn lookup\n}\n\nfunc CombosWithChars(lookup wordpatterns.Wordmap, chars string, minLen int, maxLen int) []string {\n\tif maxLen < 0 {\n\t\tmaxLen = 0\n\t}\n\tif minLen < 0 {\n\t\tminLen = 0\n\t}\n\n\tvar combos []string\n\tsortedChars := stringutil.SortString(chars)\n\n\tfor _, subsorted := range stringutil.Substrs(sortedChars, 2) {\n\t\tif (minLen != 0 && len(subsorted) < minLen) || (maxLen != 0 && len(subsorted) > maxLen) {\n\t\t\tcontinue\n\t\t} else {\n\t\t\tcombos = append(combos, lookup[subsorted]...)\n\t\t}\n\t}\n\treturn combos\n}\n\nfunc RankedCombos(lookup wordpatterns.Wordmap, chars string, minLen, maxLen int) []string {\n\tcombos := CombosWithChars(lookup, chars, minLen, maxLen)\n\tsort.Sort(ByWordCount(combos))\n\treturn combos\n}\n\n\/\/ GenerateComboList returns a slice of combos that are distributed based on rank\n\/\/ More occurring combos will appear more frequently\nfunc GenerateComboList(wm wordpatterns.Wordmap, combos []string, count int) []string {\n\ttotal := 0\n\tfor _, c := range combos {\n\t\ttotal += len(wm[c])\n\t}\n\n\tlist := make([]string, count)\n\tvar r, cur int\n\tfor i := 0; i < count; i++ {\n\t\tcur = 0\n\t\tr = rand.Intn(total) + 1\n\t\tfor _, c := range combos {\n\t\t\tcur += len(wm[c])\n\t\t\tif r <= cur {\n\t\t\t\tlist[i] = c\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn list\n}\n\ntype ByWordCount []string\n\nfunc (a ByWordCount) Len() int { return len(a) }\nfunc (a ByWordCount) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a ByWordCount) Less(i, j int) bool {\n\treturn _sharedWordmap.Compare(a[i], a[j]) >= 0\n}\n\nfunc printComboListCounts(combos []string) {\n\tcounts := make(map[string]int)\n\tfor _, c := range combos {\n\t\tcounts[c]++\n\t}\n\tfor key, count := range counts {\n\t\tfmt.Printf(\"%s -> %d\\n\", key, count)\n\t}\n\tfmt.Println()\n}\n<commit_msg>\/combo max_len is never less than min_len<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strconv\"\n\n\t\"github.com\/mrap\/stringutil\"\n\twordpatterns \"github.com\/mrap\/wordpatterns\"\n)\n\ntype CombosRes struct {\n\tCombos []string `json:\"combos\"`\n}\n\n\/\/ TODO: store results in a db\nvar (\n\t_sharedLookup = make(wordpatterns.Wordmap)\n\t_sharedWordmap = make(wordpatterns.Wordmap)\n)\n\nconst (\n\tMinCombosCount = 0\n\tMinMaxComboLen = 2\n\tMinMinComboLen = 2\n)\n\nfunc combosHandler(w http.ResponseWriter, req *http.Request) {\n\tv := req.URL.Query()\n\n\t\/\/ Validate query params\n\t\/\/ chars\n\tchars := v.Get(\"chars\")\n\tif len(chars) == 0 {\n\t\tw.WriteHeader(400)\n\t\treturn\n\t}\n\n\t\/\/ count\n\tcount, err := strconv.Atoi(v.Get(\"count\"))\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tif count < MinCombosCount {\n\t\tcount = MinCombosCount\n\t}\n\n\t\/\/ min_len\n\tminLen, err := strconv.Atoi(v.Get(\"min_len\"))\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tif minLen < MinMinComboLen {\n\t\tminLen = MinMinComboLen\n\t}\n\n\t\/\/ max_len\n\tmaxLen, err := strconv.Atoi(v.Get(\"max_len\"))\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tif maxLen < MinMaxComboLen {\n\t\tmaxLen = MinMaxComboLen\n\t}\n\tif maxLen < minLen {\n\t\tmaxLen = minLen\n\t}\n\n\tcombos := RankedCombos(_sharedLookup, chars, minLen, maxLen)\n\tcomboList := GenerateComboList(_sharedWordmap, combos, count)\n\n\tdata, err := json.Marshal(CombosRes{comboList})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\tw.Write(data)\n}\n\n\/\/ StoreWords stores words from a word list file into\nfunc StoreWords(filename string) {\n\twm := wordpatterns.CreateWordmap(filename)\n\t_sharedWordmap = wm\n\t_sharedLookup = CreateCharWordLookup(_sharedWordmap)\n}\n\n\/\/ CreateCharWordLookup creates a lookup structure that makes it efficient\n\/\/ to query for sub-words that only contain specific characters\nfunc CreateCharWordLookup(wm wordpatterns.Wordmap) wordpatterns.Wordmap {\n\tvar (\n\t\tlookup = make(wordpatterns.Wordmap)\n\t\tsorted string\n\t)\n\n\tfor subword, _ := range wm {\n\t\tsorted = stringutil.SortString(subword)\n\t\tlookup[sorted] = append(lookup[sorted], subword)\n\t}\n\treturn lookup\n}\n\nfunc CombosWithChars(lookup wordpatterns.Wordmap, chars string, minLen int, maxLen int) []string {\n\tif maxLen < 0 {\n\t\tmaxLen = 0\n\t}\n\tif minLen < 0 {\n\t\tminLen = 0\n\t}\n\n\tvar combos []string\n\tsortedChars := stringutil.SortString(chars)\n\n\tfor _, subsorted := range stringutil.Substrs(sortedChars, 2) {\n\t\tif (minLen != 0 && len(subsorted) < minLen) || (maxLen != 0 && len(subsorted) > maxLen) {\n\t\t\tcontinue\n\t\t} else {\n\t\t\tcombos = append(combos, lookup[subsorted]...)\n\t\t}\n\t}\n\treturn combos\n}\n\nfunc RankedCombos(lookup wordpatterns.Wordmap, chars string, minLen, maxLen int) []string {\n\tcombos := CombosWithChars(lookup, chars, minLen, maxLen)\n\tsort.Sort(ByWordCount(combos))\n\treturn combos\n}\n\n\/\/ GenerateComboList returns a slice of combos that are distributed based on rank\n\/\/ More occurring combos will appear more frequently\nfunc GenerateComboList(wm wordpatterns.Wordmap, combos []string, count int) []string {\n\ttotal := 0\n\tfor _, c := range combos {\n\t\ttotal += len(wm[c])\n\t}\n\n\tlist := make([]string, count)\n\tvar r, cur int\n\tfor i := 0; i < count; i++ {\n\t\tcur = 0\n\t\tr = rand.Intn(total) + 1\n\t\tfor _, c := range combos {\n\t\t\tcur += len(wm[c])\n\t\t\tif r <= cur {\n\t\t\t\tlist[i] = c\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn list\n}\n\ntype ByWordCount []string\n\nfunc (a ByWordCount) Len() int { return len(a) }\nfunc (a ByWordCount) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a ByWordCount) Less(i, j int) bool {\n\treturn _sharedWordmap.Compare(a[i], a[j]) >= 0\n}\n\nfunc printComboListCounts(combos []string) {\n\tcounts := make(map[string]int)\n\tfor _, c := range combos {\n\t\tcounts[c]++\n\t}\n\tfor key, count := range counts {\n\t\tfmt.Printf(\"%s -> %d\\n\", key, count)\n\t}\n\tfmt.Println()\n}\n<|endoftext|>"} {"text":"<commit_before>package epub\n\nimport (\n\t\"archive\/zip\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\nconst (\n\tcontainerFilename = \"container.xml\"\n\tcontainerFileTemplate = `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<container version=\"1.0\" xmlns=\"urn:oasis:names:tc:opendocument:xmlns:container\">\n <rootfiles>\n <rootfile full-path=\"%s\/%s\" media-type=\"application\/oebps-package+xml\" \/>\n <\/rootfiles>\n<\/container>\n`\n\tcontentFolderName = \"EPUB\"\n\t\/\/ Permissions for any new directories we create\n\tdirPermissions = 0755\n\t\/\/ Permissions for any new files we create\n\tfilePermissions = 0644\n\timageFolderName = \"img\"\n\tmediaTypeNcx = \"application\/x-dtbncx+xml\"\n\tmediaTypeEpub = \"application\/epub+zip\"\n\tmediaTypeXhtml = \"application\/xhtml+xml\"\n\tmetaInfFolderName = \"META-INF\"\n\tmimetypeFilename = \"mimetype\"\n\tpkgFilename = \"package.opf\"\n\tsectionFileFormat = \"section%04d.xhtml\"\n\ttempDirPrefix = \"go-epub\"\n\txhtmlFolderName = \"xhtml\"\n)\n\nfunc (e *Epub) Write(destFilePath string) error {\n\ttempDir, err := ioutil.TempDir(\"\", tempDirPrefix)\n\tdefer os.Remove(tempDir)\n\tif err != nil {\n\t\tlog.Fatalf(\"os.Remove error: %s\", err)\n\t}\n\n\terr = createEpubFolders(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = writeMimetype(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = e.writeSections(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = e.writeToc(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = writeContainerFile(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = e.writePackageFile(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = e.writeEpub(tempDir, destFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ TODO\n\n\t\/\/\toutput, err := xml.MarshalIndent(e.toc.navDoc.xml, \"\", \" \")\n\t\/\/\toutput = append([]byte(xhtmlDoctype), output...)\n\n\t\/\/\toutput, err := xml.MarshalIndent(e.pkg.xml, \"\", \" \")\n\n\t\/\/ output, err := xml.MarshalIndent(e.toc.ncxXml, \"\", \" \")\n\t\/\/\toutput = append([]byte(xml.Header), output...)\n\t\/\/\tfmt.Println(string(output))\n\n\treturn nil\n}\n\nfunc createEpubFolders(tempDir string) error {\n\tif err := os.Mkdir(\n\t\tfilepath.Join(\n\t\t\ttempDir,\n\t\t\tcontentFolderName,\n\t\t),\n\t\tdirPermissions); err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.Mkdir(\n\t\tfilepath.Join(\n\t\t\ttempDir,\n\t\t\tcontentFolderName,\n\t\t\txhtmlFolderName,\n\t\t),\n\t\tdirPermissions); err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.Mkdir(\n\t\tfilepath.Join(\n\t\t\ttempDir,\n\t\t\tmetaInfFolderName,\n\t\t),\n\t\tdirPermissions); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc writeContainerFile(tempDir string) error {\n\tcontainerFilePath := filepath.Join(tempDir, metaInfFolderName, containerFilename)\n\tif err := ioutil.WriteFile(\n\t\tcontainerFilePath,\n\t\t[]byte(\n\t\t\tfmt.Sprintf(\n\t\t\t\tcontainerFileTemplate,\n\t\t\t\tcontentFolderName,\n\t\t\t\tpkgFilename,\n\t\t\t),\n\t\t),\n\t\tfilePermissions,\n\t); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/*\n type Rootfile struct {\n FullPath string `xml:\"full-path,attr\"`\n MediaType string `xml:\"media-type,attr\"`\n }\n\n type Rootfiles struct {\n Rootfile []Rootfile `xml:\"rootfile\"`\n }\n\n type Container struct {\n XMLName xml.Name `xml:\"urn:oasis:names:tc:opendocument:xmlns:container container\"`\n Version string `xml:\"version,attr\"`\n Rootfiles Rootfiles `xml:\"rootfiles\"`\n }\n\n v := &Container{Version: containerVersion}\n\n v.Rootfiles.Rootfile = append(\n v.Rootfiles.Rootfile,\n Rootfile{\n FullPath: filepath.Join(oebpsFolderName, contentFilename),\n MediaType: mediaTypeOebpsPackage,\n },\n )\n\n output, err := xml.MarshalIndent(v, \"\", ` `)\n if err != nil {\n return err\n }\n \/\/ Add the xml header to the output\n containerContent := append([]byte(xml.Header), output...)\n \/\/ It's generally nice to have files end with a newline\n containerContent = append(containerContent, \"\\n\"...)\n\n containerFilePath := filepath.Join(metaInfFolderPath, containerFilename)\n if err := ioutil.WriteFile(containerFilePath, containerContent, filePermissions); err != nil {\n return err\n }\n\n return nil\n}\n\n\/*\nfunc writeContentFile(tempDir string) error {\n oebpsFolderPath := filepath.Join(tempDir, oebpsFolderName)\n if err := os.Mkdir(oebpsFolderPath, dirPermissions); err != nil {\n return err\n }\n\n type Package struct {\n XMLName xml.Name `xml:\"http:\/\/www.idpf.org\/2007\/opf package\"`\n UniqueIdentifier string `xml:\"unique-identifier,attr\"`\n Version string `xml:\"version,attr\"`\n }\n\n\n\n\n\n type Rootfile struct {\n FullPath string `xml:\"full-path,attr\"`\n MediaType string `xml:\"media-type,attr\"`\n }\n\n type Rootfiles struct {\n Rootfile []Rootfile `xml:\"rootfile\"`\n }\n\n type Container struct {\n XMLName xml.Name `xml:\"urn:oasis:names:tc:opendocument:xmlns:container container\"`\n Version string `xml:\"version,attr\"`\n Rootfiles Rootfiles `xml:\"rootfiles\"`\n }\n\n v := &Container{Version: containerVersion}\n\n v.Rootfiles.Rootfile = append(\n v.Rootfiles.Rootfile,\n Rootfile{\n FullPath: filepath.Join(oebpsFolderName, contentFilename),\n MediaType: mediaTypeOebpsPackage,\n },\n )\n\n output, err := xml.MarshalIndent(v, \"\", ` `)\n if err != nil {\n return err\n }\n \/\/ Add the xml header to the output\n containerContent := append([]byte(xml.Header), output...)\n \/\/ It's generally nice to have files end with a newline\n containerContent = append(containerContent, \"\\n\"...)\n\n containerFilePath := filepath.Join(metaInfFolderPath, containerFilename)\n if err := ioutil.WriteFile(containerFilePath, containerContent, filePermissions); err != nil {\n return err\n }\n\n return nil\n}\n*\/\nfunc writeMimetype(tempDir string) error {\n\tmimetypeFilePath := filepath.Join(tempDir, mimetypeFilename)\n\n\tif err := ioutil.WriteFile(mimetypeFilePath, []byte(mediaTypeEpub), filePermissions); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (e *Epub) writeEpub(tempDir string, destFilePath string) error {\n\tf, err := os.Create(destFilePath)\n\tif err != nil {\n\t\tlog.Fatalf(\"os.Create error: %s\", err)\n\t}\n\tdefer func() {\n\t\tif err := f.Close(); err != nil {\n\t\t\tlog.Fatalf(\"os.File.Close error: %s\", err)\n\t\t}\n\t}()\n\n\tz := zip.NewWriter(f)\n\tdefer func() {\n\t\tif err := z.Close(); err != nil {\n\t\t\tlog.Fatalf(\"zip.Writer.Close error: %s\", err)\n\t\t}\n\t}()\n\n\tskipMimetypeFile := false\n\n\tvar addFileToZip = func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Get the path of the file relative to the folder we're zipping\n\t\trelativePath, err := filepath.Rel(tempDir, path)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"filepath.Rel error: %s\", err)\n\t\t}\n\n\t\t\/\/ Only include regular files, not directories\n\t\tif !info.Mode().IsRegular() {\n\t\t\treturn nil\n\t\t}\n\n\t\tvar w io.Writer\n\t\tif path == filepath.Join(tempDir, mimetypeFilename) {\n\t\t\t\/\/ Skip the mimetype file\n\t\t\tif skipMimetypeFile == true {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t\/\/ The mimetype file must be uncompressed according to the EPUB spec\n\t\t\tw, err = z.CreateHeader(&zip.FileHeader{\n\t\t\t\tName: relativePath,\n\t\t\t\tMethod: zip.Store,\n\t\t\t})\n\t\t} else {\n\t\t\tw, err = z.Create(relativePath)\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"zip.Writer.Create error: %s\", err)\n\t\t}\n\n\t\tr, err := os.Open(path)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"os.Open error: %s\", err)\n\t\t}\n\t\tdefer func() {\n\t\t\tif err := r.Close(); err != nil {\n\t\t\t\tlog.Fatalf(\"os.File.Close error: %s\", err)\n\t\t\t}\n\t\t}()\n\n\t\t_, err = io.Copy(w, r)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"io.Copy error: %s\", err)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t\/\/ Add the mimetype file first\n\tmimetypeFilePath := filepath.Join(tempDir, mimetypeFilename)\n\tmimetypeInfo, err := os.Lstat(mimetypeFilePath)\n\tif err != nil {\n\t\tlog.Fatalf(\"os.Lstat error: %s\", err)\n\t}\n\taddFileToZip(mimetypeFilePath, mimetypeInfo, nil)\n\n\tskipMimetypeFile = true\n\n\terr = filepath.Walk(tempDir, addFileToZip)\n\tif err != nil {\n\t\tlog.Fatalf(\"os.Lstat error: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (e *Epub) writePackageFile(tempDir string) error {\n\terr := e.pkg.write(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (e *Epub) writeSections(tempDir string) error {\n\tfor i, section := range e.sections {\n\t\tsectionIndex := i + 1\n\t\tsectionFilename := fmt.Sprintf(sectionFileFormat, sectionIndex)\n\t\tsectionFilePath := filepath.Join(tempDir, contentFolderName, xhtmlFolderName, sectionFilename)\n\n\t\tif err := section.write(sectionFilePath); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trelativePath := filepath.Join(xhtmlFolderName, sectionFilename)\n\t\te.toc.addSection(sectionIndex, section.Title(), relativePath)\n\t\te.pkg.addToManifest(sectionFilename, relativePath, mediaTypeXhtml, \"\")\n\t\te.pkg.addToSpine(sectionFilename)\n\t}\n\n\treturn nil\n}\n\nfunc (e *Epub) writeToc(tempDir string) error {\n\te.pkg.addToManifest(tocNavItemId, tocNavFilename, mediaTypeXhtml, tocNavItemProperties)\n\te.pkg.addToManifest(tocNcxItemId, tocNcxFilename, mediaTypeNcx, \"\")\n\n\terr := e.toc.write(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>Add functionality to write the images to the epub file<commit_after>package epub\n\nimport (\n\t\"archive\/zip\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\nconst (\n\tcontainerFilename = \"container.xml\"\n\tcontainerFileTemplate = `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<container version=\"1.0\" xmlns=\"urn:oasis:names:tc:opendocument:xmlns:container\">\n <rootfiles>\n <rootfile full-path=\"%s\/%s\" media-type=\"application\/oebps-package+xml\" \/>\n <\/rootfiles>\n<\/container>\n`\n\tcontentFolderName = \"EPUB\"\n\t\/\/ Permissions for any new directories we create\n\tdirPermissions = 0755\n\t\/\/ Permissions for any new files we create\n\tfilePermissions = 0644\n\timageFolderName = \"img\"\n\tmediaTypeNcx = \"application\/x-dtbncx+xml\"\n\tmediaTypeEpub = \"application\/epub+zip\"\n\tmediaTypeXhtml = \"application\/xhtml+xml\"\n\tmetaInfFolderName = \"META-INF\"\n\tmimetypeFilename = \"mimetype\"\n\tpkgFilename = \"package.opf\"\n\tsectionFileFormat = \"section%04d.xhtml\"\n\ttempDirPrefix = \"go-epub\"\n\txhtmlFolderName = \"xhtml\"\n)\n\nfunc (e *Epub) Write(destFilePath string) error {\n\ttempDir, err := ioutil.TempDir(\"\", tempDirPrefix)\n\tdefer os.Remove(tempDir)\n\tif err != nil {\n\t\tlog.Fatalf(\"os.Remove error: %s\", err)\n\t}\n\n\t\/\/ Must be run first\n\terr = createEpubFolders(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = e.writeImages(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = writeMimetype(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = e.writeSections(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = e.writeToc(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = writeContainerFile(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = e.writePackageFile(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = e.writeEpub(tempDir, destFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ TODO\n\n\t\/\/\toutput, err := xml.MarshalIndent(e.toc.navDoc.xml, \"\", \" \")\n\t\/\/\toutput = append([]byte(xhtmlDoctype), output...)\n\n\t\/\/\toutput, err := xml.MarshalIndent(e.pkg.xml, \"\", \" \")\n\n\t\/\/ output, err := xml.MarshalIndent(e.toc.ncxXml, \"\", \" \")\n\t\/\/\toutput = append([]byte(xml.Header), output...)\n\t\/\/\tfmt.Println(string(output))\n\n\treturn nil\n}\n\nfunc createEpubFolders(tempDir string) error {\n\tif err := os.Mkdir(\n\t\tfilepath.Join(\n\t\t\ttempDir,\n\t\t\tcontentFolderName,\n\t\t),\n\t\tdirPermissions); err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.Mkdir(\n\t\tfilepath.Join(\n\t\t\ttempDir,\n\t\t\tcontentFolderName,\n\t\t\txhtmlFolderName,\n\t\t),\n\t\tdirPermissions); err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.Mkdir(\n\t\tfilepath.Join(\n\t\t\ttempDir,\n\t\t\tmetaInfFolderName,\n\t\t),\n\t\tdirPermissions); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc writeContainerFile(tempDir string) error {\n\tcontainerFilePath := filepath.Join(tempDir, metaInfFolderName, containerFilename)\n\tif err := ioutil.WriteFile(\n\t\tcontainerFilePath,\n\t\t[]byte(\n\t\t\tfmt.Sprintf(\n\t\t\t\tcontainerFileTemplate,\n\t\t\t\tcontentFolderName,\n\t\t\t\tpkgFilename,\n\t\t\t),\n\t\t),\n\t\tfilePermissions,\n\t); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (e *Epub) writeImages(tempDir string) error {\n\timageFolderPath := filepath.Join(tempDir, contentFolderName, imageFolderName)\n\tif err := os.Mkdir(imageFolderPath, dirPermissions); err != nil {\n\t\treturn err\n\t}\n\n\tfor imageFilename, imageSource := range e.images {\n\t\tu, err := url.Parse(imageSource)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar r io.ReadCloser\n\t\tvar resp *http.Response\n\t\t\/\/ If it's a URL\n\t\tif u.Scheme == \"http\" || u.Scheme == \"https\" {\n\t\t\tresp, err = http.Get(imageSource)\n\t\t\tr = resp.Body\n\n\t\t\t\/\/ Otherwise, assume it's a local file\n\t\t} else {\n\t\t\tr, err = os.Open(imageSource)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer func() {\n\t\t\terr = r.Close()\n\t\t}()\n\n\t\timageFilePath := filepath.Join(\n\t\t\timageFolderPath,\n\t\t\timageFilename,\n\t\t)\n\n\t\tw, err := os.Create(imageFilePath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer func() {\n\t\t\terr = w.Close()\n\t\t}()\n\n\t\t_, err = io.Copy(w, r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/*\n type Rootfile struct {\n FullPath string `xml:\"full-path,attr\"`\n MediaType string `xml:\"media-type,attr\"`\n }\n\n type Rootfiles struct {\n Rootfile []Rootfile `xml:\"rootfile\"`\n }\n\n type Container struct {\n XMLName xml.Name `xml:\"urn:oasis:names:tc:opendocument:xmlns:container container\"`\n Version string `xml:\"version,attr\"`\n Rootfiles Rootfiles `xml:\"rootfiles\"`\n }\n\n v := &Container{Version: containerVersion}\n\n v.Rootfiles.Rootfile = append(\n v.Rootfiles.Rootfile,\n Rootfile{\n FullPath: filepath.Join(oebpsFolderName, contentFilename),\n MediaType: mediaTypeOebpsPackage,\n },\n )\n\n output, err := xml.MarshalIndent(v, \"\", ` `)\n if err != nil {\n return err\n }\n \/\/ Add the xml header to the output\n containerContent := append([]byte(xml.Header), output...)\n \/\/ It's generally nice to have files end with a newline\n containerContent = append(containerContent, \"\\n\"...)\n\n containerFilePath := filepath.Join(metaInfFolderPath, containerFilename)\n if err := ioutil.WriteFile(containerFilePath, containerContent, filePermissions); err != nil {\n return err\n }\n\n return nil\n}\n\n\/*\nfunc writeContentFile(tempDir string) error {\n oebpsFolderPath := filepath.Join(tempDir, oebpsFolderName)\n if err := os.Mkdir(oebpsFolderPath, dirPermissions); err != nil {\n return err\n }\n\n type Package struct {\n XMLName xml.Name `xml:\"http:\/\/www.idpf.org\/2007\/opf package\"`\n UniqueIdentifier string `xml:\"unique-identifier,attr\"`\n Version string `xml:\"version,attr\"`\n }\n\n\n\n\n\n type Rootfile struct {\n FullPath string `xml:\"full-path,attr\"`\n MediaType string `xml:\"media-type,attr\"`\n }\n\n type Rootfiles struct {\n Rootfile []Rootfile `xml:\"rootfile\"`\n }\n\n type Container struct {\n XMLName xml.Name `xml:\"urn:oasis:names:tc:opendocument:xmlns:container container\"`\n Version string `xml:\"version,attr\"`\n Rootfiles Rootfiles `xml:\"rootfiles\"`\n }\n\n v := &Container{Version: containerVersion}\n\n v.Rootfiles.Rootfile = append(\n v.Rootfiles.Rootfile,\n Rootfile{\n FullPath: filepath.Join(oebpsFolderName, contentFilename),\n MediaType: mediaTypeOebpsPackage,\n },\n )\n\n output, err := xml.MarshalIndent(v, \"\", ` `)\n if err != nil {\n return err\n }\n \/\/ Add the xml header to the output\n containerContent := append([]byte(xml.Header), output...)\n \/\/ It's generally nice to have files end with a newline\n containerContent = append(containerContent, \"\\n\"...)\n\n containerFilePath := filepath.Join(metaInfFolderPath, containerFilename)\n if err := ioutil.WriteFile(containerFilePath, containerContent, filePermissions); err != nil {\n return err\n }\n\n return nil\n}\n*\/\nfunc writeMimetype(tempDir string) error {\n\tmimetypeFilePath := filepath.Join(tempDir, mimetypeFilename)\n\n\tif err := ioutil.WriteFile(mimetypeFilePath, []byte(mediaTypeEpub), filePermissions); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (e *Epub) writeEpub(tempDir string, destFilePath string) error {\n\tf, err := os.Create(destFilePath)\n\tif err != nil {\n\t\tlog.Fatalf(\"os.Create error: %s\", err)\n\t}\n\tdefer func() {\n\t\tif err := f.Close(); err != nil {\n\t\t\tlog.Fatalf(\"os.File.Close error: %s\", err)\n\t\t}\n\t}()\n\n\tz := zip.NewWriter(f)\n\tdefer func() {\n\t\tif err := z.Close(); err != nil {\n\t\t\tlog.Fatalf(\"zip.Writer.Close error: %s\", err)\n\t\t}\n\t}()\n\n\tskipMimetypeFile := false\n\n\tvar addFileToZip = func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Get the path of the file relative to the folder we're zipping\n\t\trelativePath, err := filepath.Rel(tempDir, path)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"filepath.Rel error: %s\", err)\n\t\t}\n\n\t\t\/\/ Only include regular files, not directories\n\t\tif !info.Mode().IsRegular() {\n\t\t\treturn nil\n\t\t}\n\n\t\tvar w io.Writer\n\t\tif path == filepath.Join(tempDir, mimetypeFilename) {\n\t\t\t\/\/ Skip the mimetype file\n\t\t\tif skipMimetypeFile == true {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t\/\/ The mimetype file must be uncompressed according to the EPUB spec\n\t\t\tw, err = z.CreateHeader(&zip.FileHeader{\n\t\t\t\tName: relativePath,\n\t\t\t\tMethod: zip.Store,\n\t\t\t})\n\t\t} else {\n\t\t\tw, err = z.Create(relativePath)\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"zip.Writer.Create error: %s\", err)\n\t\t}\n\n\t\tr, err := os.Open(path)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"os.Open error: %s\", err)\n\t\t}\n\t\tdefer func() {\n\t\t\tif err := r.Close(); err != nil {\n\t\t\t\tlog.Fatalf(\"os.File.Close error: %s\", err)\n\t\t\t}\n\t\t}()\n\n\t\t_, err = io.Copy(w, r)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"io.Copy error: %s\", err)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t\/\/ Add the mimetype file first\n\tmimetypeFilePath := filepath.Join(tempDir, mimetypeFilename)\n\tmimetypeInfo, err := os.Lstat(mimetypeFilePath)\n\tif err != nil {\n\t\tlog.Fatalf(\"os.Lstat error: %s\", err)\n\t}\n\taddFileToZip(mimetypeFilePath, mimetypeInfo, nil)\n\n\tskipMimetypeFile = true\n\n\terr = filepath.Walk(tempDir, addFileToZip)\n\tif err != nil {\n\t\tlog.Fatalf(\"os.Lstat error: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (e *Epub) writePackageFile(tempDir string) error {\n\terr := e.pkg.write(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (e *Epub) writeSections(tempDir string) error {\n\tfor i, section := range e.sections {\n\t\tsectionIndex := i + 1\n\t\tsectionFilename := fmt.Sprintf(sectionFileFormat, sectionIndex)\n\t\tsectionFilePath := filepath.Join(tempDir, contentFolderName, xhtmlFolderName, sectionFilename)\n\n\t\tif err := section.write(sectionFilePath); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trelativePath := filepath.Join(xhtmlFolderName, sectionFilename)\n\t\te.toc.addSection(sectionIndex, section.Title(), relativePath)\n\t\te.pkg.addToManifest(sectionFilename, relativePath, mediaTypeXhtml, \"\")\n\t\te.pkg.addToSpine(sectionFilename)\n\t}\n\n\treturn nil\n}\n\nfunc (e *Epub) writeToc(tempDir string) error {\n\te.pkg.addToManifest(tocNavItemId, tocNavFilename, mediaTypeXhtml, tocNavItemProperties)\n\te.pkg.addToManifest(tocNcxItemId, tocNcxFilename, mediaTypeNcx, \"\")\n\n\terr := e.toc.write(tempDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package utils\n\n\/\/#include <stdio.h>\n\/\/#include <stdlib.h>\n\/\/#include <unistd.h>\n\/\/#include <sys\/types.h>\n\/\/#include <sys\/stat.h>\n\/\/#include <sys\/wait.h>\n\/\/#include <sys\/time.h>\n\/\/#include <sys\/resource.h>\n\/\/#include <fcntl.h>\n\/*\nint daemonize(char *cmd, char *argv[], int pipe, int fds[], int num) {\n\tint status = 0, fd, pid, i;\n\tstruct sigaction sa;\n\n\tpid = fork();\n\tif (pid < 0) {\n\t\treturn -1;\n\t} else if (pid > 0) {\n\t\tif (waitpid(pid, &status, 0) < 0)\n\t\t\treturn -1;\n\t\treturn WEXITSTATUS(status);\n\t}\n\t\/\/Become a session leader to lose controlling TTY\n\tsetsid();\n\n\t\/\/Ensure future opens won't allocate controlling TTYs\n\tsa.sa_handler = SIG_IGN;\n\tsigemptyset(&sa.sa_mask);\n\tsa.sa_flags = 0;\n\tif (sigaction(SIGHUP, &sa, NULL) < 0) {\n\t\t_exit(-1);\n\t}\n\n\tpid = fork();\n\tif (pid < 0) {\n\t\t_exit(-1);\n\t} else if (pid > 0) {\n\t\t_exit(0);\n\t}\n\n\tif (pipe > 0) {\n\t\tchar buf[4];\n\t\tint ret;\n\n\t\tpid = getpid();\n\n\t\tbuf[0] = pid >> 24;\n\t\tbuf[1] = pid >> 16;\n\t\tbuf[2] = pid >> 8;\n\t\tbuf[3] = pid;\n\n\t\tret = write(pipe, buf, 4);\n\t\tif (ret != 4)\n\t\t\t_exit(-1);\n\t}\n\n\t\/\/Clear file creation mask\n\tumask(0);\n\t\/\/Change the current working directory to the root so we won't prevent file system from being unmounted\n\tif (chdir(\"\/\") < 0)\n\t\t_exit(-1);\n\n\t\/\/Close all open file descriptors\n\tfor (i = 0; i < num; i++) {\n\t\tif (fds[i] == 0 || fds[i] == 1 || fds[i] == 2)\n\t\t\tcontinue;\n\n\t\tclose(fds[i]);\n\t}\n\t\/\/Attach file descriptors 0, 1, and 2 to \/dev\/null\n\tfd = open(\"\/dev\/null\", O_RDWR);\n\tdup2(fd, 0);\n\tdup2(fd, 1);\n\tdup2(fd, 2);\n\tclose(fd);\n\n\tif (execvp(cmd, argv) < 0)\n\t\t_exit(-1);\n\treturn -1;\n}\n*\/\nimport \"C\"\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nfunc ExecInDaemon(cmd string, argv []string) (pid uint32, err error) {\n\t\/\/ convert the args to the C style args\n\tcargs := make([]*C.char, len(argv))\n\n\tfor idx, a := range argv {\n\t\tcargs[idx] = C.CString(a)\n\t}\n\n\t\/\/ collect all the opened fds and close them when exec the daemon\n\tfds := (*C.int)(nil)\n\tnum := C.int(0)\n\tfdlist := listFd()\n\tif len(fdlist) != 0 {\n\t\tfds = (*C.int)(unsafe.Pointer(&fdlist[0]))\n\t\tnum = C.int(len(fdlist))\n\t}\n\n\t\/\/ create pipe for geting the daemon pid\n\tpipe := make([]int, 2)\n\terr = syscall.Pipe(pipe)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"fail to create pipe: %v\", err)\n\t}\n\n\t\/\/ do the job!\n\tret, err := C.daemonize(C.CString(cmd), (**C.char)(unsafe.Pointer(&cargs[0])), C.int(pipe[1]), fds, num)\n\tif err != nil || ret < 0 {\n\t\treturn 0, fmt.Errorf(\"fail to start %s in daemon mode: %v\", argv[0], err)\n\t}\n\n\t\/\/ get the daemon pid\n\tbuf := make([]byte, 4)\n\tnr, err := syscall.Read(pipe[0], buf)\n\tif err != nil || nr != 4 {\n\t\treturn 0, fmt.Errorf(\"fail to start %s in daemon mode or fail to get pid: %v\", argv[0], err)\n\t}\n\tsyscall.Close(pipe[1])\n\tsyscall.Close(pipe[0])\n\tpid = binary.BigEndian.Uint32(buf[:nr])\n\n\treturn pid, nil\n}\n\nfunc listFd() []int {\n\tfiles, err := ioutil.ReadDir(\"\/proc\/self\/fd\/\")\n\tif err != nil {\n\t\treturn []int{}\n\t}\n\n\tresult := []int{}\n\tfor _, file := range files {\n\t\tf, err := strconv.Atoi(file.Name())\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tresult = append(result, f)\n\t}\n\n\treturn result\n}\n<commit_msg>include <signal.h> for mac<commit_after>package utils\n\n\/\/#include <stdio.h>\n\/\/#include <stdlib.h>\n\/\/#include <unistd.h>\n\/\/#include <signal.h>\n\/\/#include <sys\/types.h>\n\/\/#include <sys\/stat.h>\n\/\/#include <sys\/wait.h>\n\/\/#include <sys\/time.h>\n\/\/#include <sys\/resource.h>\n\/\/#include <fcntl.h>\n\/*\nint daemonize(char *cmd, char *argv[], int pipe, int fds[], int num) {\n\tint status = 0, fd, pid, i;\n\tstruct sigaction sa;\n\n\tpid = fork();\n\tif (pid < 0) {\n\t\treturn -1;\n\t} else if (pid > 0) {\n\t\tif (waitpid(pid, &status, 0) < 0)\n\t\t\treturn -1;\n\t\treturn WEXITSTATUS(status);\n\t}\n\t\/\/Become a session leader to lose controlling TTY\n\tsetsid();\n\n\t\/\/Ensure future opens won't allocate controlling TTYs\n\tsa.sa_handler = SIG_IGN;\n\tsigemptyset(&sa.sa_mask);\n\tsa.sa_flags = 0;\n\tif (sigaction(SIGHUP, &sa, NULL) < 0) {\n\t\t_exit(-1);\n\t}\n\n\tpid = fork();\n\tif (pid < 0) {\n\t\t_exit(-1);\n\t} else if (pid > 0) {\n\t\t_exit(0);\n\t}\n\n\tif (pipe > 0) {\n\t\tchar buf[4];\n\t\tint ret;\n\n\t\tpid = getpid();\n\n\t\tbuf[0] = pid >> 24;\n\t\tbuf[1] = pid >> 16;\n\t\tbuf[2] = pid >> 8;\n\t\tbuf[3] = pid;\n\n\t\tret = write(pipe, buf, 4);\n\t\tif (ret != 4)\n\t\t\t_exit(-1);\n\t}\n\n\t\/\/Clear file creation mask\n\tumask(0);\n\t\/\/Change the current working directory to the root so we won't prevent file system from being unmounted\n\tif (chdir(\"\/\") < 0)\n\t\t_exit(-1);\n\n\t\/\/Close all open file descriptors\n\tfor (i = 0; i < num; i++) {\n\t\tif (fds[i] == 0 || fds[i] == 1 || fds[i] == 2)\n\t\t\tcontinue;\n\n\t\tclose(fds[i]);\n\t}\n\t\/\/Attach file descriptors 0, 1, and 2 to \/dev\/null\n\tfd = open(\"\/dev\/null\", O_RDWR);\n\tdup2(fd, 0);\n\tdup2(fd, 1);\n\tdup2(fd, 2);\n\tclose(fd);\n\n\tif (execvp(cmd, argv) < 0)\n\t\t_exit(-1);\n\treturn -1;\n}\n*\/\nimport \"C\"\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nfunc ExecInDaemon(cmd string, argv []string) (pid uint32, err error) {\n\t\/\/ convert the args to the C style args\n\tcargs := make([]*C.char, len(argv))\n\n\tfor idx, a := range argv {\n\t\tcargs[idx] = C.CString(a)\n\t}\n\n\t\/\/ collect all the opened fds and close them when exec the daemon\n\tfds := (*C.int)(nil)\n\tnum := C.int(0)\n\tfdlist := listFd()\n\tif len(fdlist) != 0 {\n\t\tfds = (*C.int)(unsafe.Pointer(&fdlist[0]))\n\t\tnum = C.int(len(fdlist))\n\t}\n\n\t\/\/ create pipe for geting the daemon pid\n\tpipe := make([]int, 2)\n\terr = syscall.Pipe(pipe)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"fail to create pipe: %v\", err)\n\t}\n\n\t\/\/ do the job!\n\tret, err := C.daemonize(C.CString(cmd), (**C.char)(unsafe.Pointer(&cargs[0])), C.int(pipe[1]), fds, num)\n\tif err != nil || ret < 0 {\n\t\treturn 0, fmt.Errorf(\"fail to start %s in daemon mode: %v\", argv[0], err)\n\t}\n\n\t\/\/ get the daemon pid\n\tbuf := make([]byte, 4)\n\tnr, err := syscall.Read(pipe[0], buf)\n\tif err != nil || nr != 4 {\n\t\treturn 0, fmt.Errorf(\"fail to start %s in daemon mode or fail to get pid: %v\", argv[0], err)\n\t}\n\tsyscall.Close(pipe[1])\n\tsyscall.Close(pipe[0])\n\tpid = binary.BigEndian.Uint32(buf[:nr])\n\n\treturn pid, nil\n}\n\nfunc listFd() []int {\n\tfiles, err := ioutil.ReadDir(\"\/proc\/self\/fd\/\")\n\tif err != nil {\n\t\treturn []int{}\n\t}\n\n\tresult := []int{}\n\tfor _, file := range files {\n\t\tf, err := strconv.Atoi(file.Name())\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tresult = append(result, f)\n\t}\n\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>package message\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"regexp\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/shazow\/ssh-chat\/set\"\n)\n\nconst messageBuffer = 5\nconst messageTimeout = 5 * time.Second\nconst reHighlight = `\\b(%s)\\b`\nconst timestampTimeout = 30 * time.Minute\n\nvar ErrUserClosed = errors.New(\"user closed\")\n\n\/\/ User definition, implemented set Item interface and io.Writer\ntype User struct {\n\tIdentifier\n\tIgnored *set.Set\n\tcolorIdx int\n\tjoined time.Time\n\tmsg chan Message\n\tdone chan struct{}\n\n\tscreen io.WriteCloser\n\tcloseOnce sync.Once\n\n\tmu sync.Mutex\n\tconfig UserConfig\n\treplyTo *User \/\/ Set when user gets a \/msg, for replying.\n\tlastMsg time.Time \/\/ When the last message was rendered\n}\n\nfunc NewUser(identity Identifier) *User {\n\tu := User{\n\t\tIdentifier: identity,\n\t\tconfig: DefaultUserConfig,\n\t\tjoined: time.Now(),\n\t\tmsg: make(chan Message, messageBuffer),\n\t\tdone: make(chan struct{}),\n\t\tIgnored: set.New(),\n\t}\n\tu.setColorIdx(rand.Int())\n\n\treturn &u\n}\n\nfunc NewUserScreen(identity Identifier, screen io.WriteCloser) *User {\n\tu := NewUser(identity)\n\tu.screen = screen\n\n\treturn u\n}\n\nfunc (u *User) Joined() time.Time {\n\treturn u.joined\n}\n\nfunc (u *User) Config() UserConfig {\n\tu.mu.Lock()\n\tdefer u.mu.Unlock()\n\treturn u.config\n}\n\nfunc (u *User) SetConfig(cfg UserConfig) {\n\tu.mu.Lock()\n\tu.config = cfg\n\tu.mu.Unlock()\n}\n\n\/\/ Rename the user with a new Identifier.\nfunc (u *User) SetID(id string) {\n\tu.Identifier.SetID(id)\n\tu.setColorIdx(rand.Int())\n}\n\n\/\/ ReplyTo returns the last user that messaged this user.\nfunc (u *User) ReplyTo() *User {\n\tu.mu.Lock()\n\tdefer u.mu.Unlock()\n\treturn u.replyTo\n}\n\n\/\/ SetReplyTo sets the last user to message this user.\nfunc (u *User) SetReplyTo(user *User) {\n\tu.mu.Lock()\n\tdefer u.mu.Unlock()\n\tu.replyTo = user\n}\n\n\/\/ setColorIdx will set the colorIdx to a specific value, primarily used for\n\/\/ testing.\nfunc (u *User) setColorIdx(idx int) {\n\tu.colorIdx = idx\n}\n\n\/\/ Disconnect user, stop accepting messages\nfunc (u *User) Close() {\n\tu.closeOnce.Do(func() {\n\t\tif u.screen != nil {\n\t\t\tu.screen.Close()\n\t\t}\n\t\t\/\/ close(u.msg) TODO: Close?\n\t\tclose(u.done)\n\t})\n}\n\n\/\/ Consume message buffer into the handler. Will block, should be called in a\n\/\/ goroutine.\nfunc (u *User) Consume() {\n\tfor {\n\t\tselect {\n\t\tcase <-u.done:\n\t\t\treturn\n\t\tcase m, ok := <-u.msg:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tu.HandleMsg(m)\n\t\t}\n\t}\n}\n\n\/\/ Consume one message and stop, mostly for testing\nfunc (u *User) ConsumeOne() Message {\n\treturn <-u.msg\n}\n\n\/\/ Check if there are pending messages, used for testing\nfunc (u *User) HasMessages() bool {\n\tselect {\n\tcase msg := <-u.msg:\n\t\tu.msg <- msg\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n\/\/ SetHighlight sets the highlighting regular expression to match string.\nfunc (u *User) SetHighlight(s string) error {\n\tre, err := regexp.Compile(fmt.Sprintf(reHighlight, s))\n\tif err != nil {\n\t\treturn err\n\t}\n\tu.mu.Lock()\n\tu.config.Highlight = re\n\tu.mu.Unlock()\n\treturn nil\n}\n\nfunc (u *User) render(m Message) string {\n\tcfg := u.Config()\n\tvar out string\n\tswitch m := m.(type) {\n\tcase PublicMsg:\n\t\tif u == m.From() {\n\t\t\tif !cfg.Echo {\n\t\t\t\treturn \"\"\n\t\t\t}\n\t\t\tout += m.RenderSelf(cfg)\n\t\t} else {\n\t\t\tout += m.RenderFor(cfg)\n\t\t}\n\tcase *PrivateMsg:\n\t\tout += m.Render(cfg.Theme)\n\t\tif cfg.Bell {\n\t\t\tout += Bel\n\t\t}\n\tcase *CommandMsg:\n\t\tout += m.RenderSelf(cfg)\n\tdefault:\n\t\tout += m.Render(cfg.Theme)\n\t}\n\tif cfg.Timeformat != nil {\n\t\tts := m.Timestamp()\n\t\tif cfg.Timezone != nil {\n\t\t\tts = ts.In(cfg.Timezone)\n\t\t} else {\n\t\t\tts = ts.UTC()\n\t\t}\n\t\treturn cfg.Theme.Timestamp(ts.Format(*cfg.Timeformat)) + \" \" + out + Newline\n\t}\n\treturn out + Newline\n}\n\n\/\/ writeMsg renders the message and attempts to write it, will Close the user\n\/\/ if it fails.\nfunc (u *User) writeMsg(m Message) error {\n\tr := u.render(m)\n\t_, err := u.screen.Write([]byte(r))\n\tif err != nil {\n\t\tlogger.Printf(\"Write failed to %s, closing: %s\", u.Name(), err)\n\t\tu.Close()\n\t}\n\treturn err\n}\n\n\/\/ HandleMsg will render the message to the screen, blocking.\nfunc (u *User) HandleMsg(m Message) error {\n\tu.mu.Lock()\n\tu.lastMsg = m.Timestamp()\n\tu.mu.Unlock()\n\treturn u.writeMsg(m)\n}\n\n\/\/ Add message to consume by user\nfunc (u *User) Send(m Message) error {\n\tselect {\n\tcase <-u.done:\n\t\treturn ErrUserClosed\n\tcase u.msg <- m:\n\tcase <-time.After(messageTimeout):\n\t\tlogger.Printf(\"Message buffer full, closing: %s\", u.Name())\n\t\tu.Close()\n\t\treturn ErrUserClosed\n\t}\n\treturn nil\n}\n\n\/\/ Container for per-user configurations.\ntype UserConfig struct {\n\tHighlight *regexp.Regexp\n\tBell bool\n\tQuiet bool\n\tEcho bool \/\/ Echo shows your own messages after sending, disabled for bots\n\tTimeformat *string\n\tTimezone *time.Location\n\tTheme *Theme\n}\n\n\/\/ Default user configuration to use\nvar DefaultUserConfig UserConfig\n\nfunc init() {\n\tDefaultUserConfig = UserConfig{\n\t\tBell: true,\n\t\tEcho: true,\n\t\tQuiet: false,\n\t}\n\n\t\/\/ TODO: Seed random?\n}\n\n\/\/ RecentActiveUsers is a slice of *Users that knows how to be sorted by the time of the last message.\ntype RecentActiveUsers []*User\n\nfunc (a RecentActiveUsers) Len() int { return len(a) }\nfunc (a RecentActiveUsers) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a RecentActiveUsers) Less(i, j int) bool {\n\ta[i].mu.Lock()\n\tdefer a[i].mu.Unlock()\n\ta[j].mu.Lock()\n\tdefer a[j].mu.Unlock()\n\n\tif a[i].lastMsg.IsZero() {\n\t\treturn a[i].joined.Before(a[j].joined)\n\t} else {\n\t\treturn a[i].lastMsg.After(a[j].lastMsg)\n\t}\n\n}\n<commit_msg>chat\/message: Fix RecentActiveUsers sort order<commit_after>package message\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"regexp\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/shazow\/ssh-chat\/set\"\n)\n\nconst messageBuffer = 5\nconst messageTimeout = 5 * time.Second\nconst reHighlight = `\\b(%s)\\b`\nconst timestampTimeout = 30 * time.Minute\n\nvar ErrUserClosed = errors.New(\"user closed\")\n\n\/\/ User definition, implemented set Item interface and io.Writer\ntype User struct {\n\tIdentifier\n\tIgnored *set.Set\n\tcolorIdx int\n\tjoined time.Time\n\tmsg chan Message\n\tdone chan struct{}\n\n\tscreen io.WriteCloser\n\tcloseOnce sync.Once\n\n\tmu sync.Mutex\n\tconfig UserConfig\n\treplyTo *User \/\/ Set when user gets a \/msg, for replying.\n\tlastMsg time.Time \/\/ When the last message was rendered\n}\n\nfunc NewUser(identity Identifier) *User {\n\tu := User{\n\t\tIdentifier: identity,\n\t\tconfig: DefaultUserConfig,\n\t\tjoined: time.Now(),\n\t\tmsg: make(chan Message, messageBuffer),\n\t\tdone: make(chan struct{}),\n\t\tIgnored: set.New(),\n\t}\n\tu.setColorIdx(rand.Int())\n\n\treturn &u\n}\n\nfunc NewUserScreen(identity Identifier, screen io.WriteCloser) *User {\n\tu := NewUser(identity)\n\tu.screen = screen\n\n\treturn u\n}\n\nfunc (u *User) Joined() time.Time {\n\treturn u.joined\n}\n\nfunc (u *User) Config() UserConfig {\n\tu.mu.Lock()\n\tdefer u.mu.Unlock()\n\treturn u.config\n}\n\nfunc (u *User) SetConfig(cfg UserConfig) {\n\tu.mu.Lock()\n\tu.config = cfg\n\tu.mu.Unlock()\n}\n\n\/\/ Rename the user with a new Identifier.\nfunc (u *User) SetID(id string) {\n\tu.Identifier.SetID(id)\n\tu.setColorIdx(rand.Int())\n}\n\n\/\/ ReplyTo returns the last user that messaged this user.\nfunc (u *User) ReplyTo() *User {\n\tu.mu.Lock()\n\tdefer u.mu.Unlock()\n\treturn u.replyTo\n}\n\n\/\/ SetReplyTo sets the last user to message this user.\nfunc (u *User) SetReplyTo(user *User) {\n\tu.mu.Lock()\n\tdefer u.mu.Unlock()\n\tu.replyTo = user\n}\n\n\/\/ setColorIdx will set the colorIdx to a specific value, primarily used for\n\/\/ testing.\nfunc (u *User) setColorIdx(idx int) {\n\tu.colorIdx = idx\n}\n\n\/\/ Disconnect user, stop accepting messages\nfunc (u *User) Close() {\n\tu.closeOnce.Do(func() {\n\t\tif u.screen != nil {\n\t\t\tu.screen.Close()\n\t\t}\n\t\t\/\/ close(u.msg) TODO: Close?\n\t\tclose(u.done)\n\t})\n}\n\n\/\/ Consume message buffer into the handler. Will block, should be called in a\n\/\/ goroutine.\nfunc (u *User) Consume() {\n\tfor {\n\t\tselect {\n\t\tcase <-u.done:\n\t\t\treturn\n\t\tcase m, ok := <-u.msg:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tu.HandleMsg(m)\n\t\t}\n\t}\n}\n\n\/\/ Consume one message and stop, mostly for testing\nfunc (u *User) ConsumeOne() Message {\n\treturn <-u.msg\n}\n\n\/\/ Check if there are pending messages, used for testing\nfunc (u *User) HasMessages() bool {\n\tselect {\n\tcase msg := <-u.msg:\n\t\tu.msg <- msg\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n\/\/ SetHighlight sets the highlighting regular expression to match string.\nfunc (u *User) SetHighlight(s string) error {\n\tre, err := regexp.Compile(fmt.Sprintf(reHighlight, s))\n\tif err != nil {\n\t\treturn err\n\t}\n\tu.mu.Lock()\n\tu.config.Highlight = re\n\tu.mu.Unlock()\n\treturn nil\n}\n\nfunc (u *User) render(m Message) string {\n\tcfg := u.Config()\n\tvar out string\n\tswitch m := m.(type) {\n\tcase PublicMsg:\n\t\tif u == m.From() {\n\t\t\tif !cfg.Echo {\n\t\t\t\treturn \"\"\n\t\t\t}\n\t\t\tout += m.RenderSelf(cfg)\n\t\t} else {\n\t\t\tout += m.RenderFor(cfg)\n\t\t}\n\tcase *PrivateMsg:\n\t\tout += m.Render(cfg.Theme)\n\t\tif cfg.Bell {\n\t\t\tout += Bel\n\t\t}\n\tcase *CommandMsg:\n\t\tout += m.RenderSelf(cfg)\n\tdefault:\n\t\tout += m.Render(cfg.Theme)\n\t}\n\tif cfg.Timeformat != nil {\n\t\tts := m.Timestamp()\n\t\tif cfg.Timezone != nil {\n\t\t\tts = ts.In(cfg.Timezone)\n\t\t} else {\n\t\t\tts = ts.UTC()\n\t\t}\n\t\treturn cfg.Theme.Timestamp(ts.Format(*cfg.Timeformat)) + \" \" + out + Newline\n\t}\n\treturn out + Newline\n}\n\n\/\/ writeMsg renders the message and attempts to write it, will Close the user\n\/\/ if it fails.\nfunc (u *User) writeMsg(m Message) error {\n\tr := u.render(m)\n\t_, err := u.screen.Write([]byte(r))\n\tif err != nil {\n\t\tlogger.Printf(\"Write failed to %s, closing: %s\", u.Name(), err)\n\t\tu.Close()\n\t}\n\treturn err\n}\n\n\/\/ HandleMsg will render the message to the screen, blocking.\nfunc (u *User) HandleMsg(m Message) error {\n\tu.mu.Lock()\n\tu.lastMsg = m.Timestamp()\n\tu.mu.Unlock()\n\treturn u.writeMsg(m)\n}\n\n\/\/ Add message to consume by user\nfunc (u *User) Send(m Message) error {\n\tselect {\n\tcase <-u.done:\n\t\treturn ErrUserClosed\n\tcase u.msg <- m:\n\tcase <-time.After(messageTimeout):\n\t\tlogger.Printf(\"Message buffer full, closing: %s\", u.Name())\n\t\tu.Close()\n\t\treturn ErrUserClosed\n\t}\n\treturn nil\n}\n\n\/\/ Container for per-user configurations.\ntype UserConfig struct {\n\tHighlight *regexp.Regexp\n\tBell bool\n\tQuiet bool\n\tEcho bool \/\/ Echo shows your own messages after sending, disabled for bots\n\tTimeformat *string\n\tTimezone *time.Location\n\tTheme *Theme\n}\n\n\/\/ Default user configuration to use\nvar DefaultUserConfig UserConfig\n\nfunc init() {\n\tDefaultUserConfig = UserConfig{\n\t\tBell: true,\n\t\tEcho: true,\n\t\tQuiet: false,\n\t}\n\n\t\/\/ TODO: Seed random?\n}\n\n\/\/ RecentActiveUsers is a slice of *Users that knows how to be sorted by the time of the last message.\ntype RecentActiveUsers []*User\n\nfunc (a RecentActiveUsers) Len() int { return len(a) }\nfunc (a RecentActiveUsers) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a RecentActiveUsers) Less(i, j int) bool {\n\ta[i].mu.Lock()\n\tdefer a[i].mu.Unlock()\n\ta[j].mu.Lock()\n\tdefer a[j].mu.Unlock()\n\n\tif a[i].lastMsg.IsZero() {\n\t\treturn a[i].joined.Before(a[j].joined)\n\t} else {\n\t\treturn a[i].lastMsg.Before(a[j].lastMsg)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package chlog provides clog-based logging for testing\npackage chlog\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/thatguystone\/cog\/check\"\n\t\"github.com\/thatguystone\/cog\/clog\"\n)\n\n\/\/ New creates a new TestLog that outputs all log messages to the given TB. This\n\/\/ logs at level Debug by default.\nfunc New(tb testing.TB) (*check.C, *clog.Log) {\n\tc := check.New(tb)\n\n\tlcfg := clog.Config{\n\t\tOutputs: map[string]*clog.ConfigOutput{\n\t\t\t\"testlog\": {\n\t\t\t\tWhich: \"testlog\",\n\t\t\t\tLevel: clog.Debug,\n\t\t\t\tArgs: clog.ConfigOutputArgs{\n\t\t\t\t\t\"log\": c,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tModules: map[string]*clog.ConfigModule{\n\t\t\t\"\": {\n\t\t\t\tOutputs: []string{\"testlog\"},\n\t\t\t\tLevel: clog.Debug,\n\t\t\t},\n\t\t},\n\t}\n\n\tlog, err := clog.New(lcfg)\n\tc.MustNotError(err)\n\n\treturn c, log\n}\n<commit_msg>check: Update chlog with new clog stuff<commit_after>\/\/ Package chlog provides clog-based logging for testing\npackage chlog\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/thatguystone\/cog\/check\"\n\t\"github.com\/thatguystone\/cog\/clog\"\n)\n\n\/\/ New creates a new TestLog that outputs all log messages to the given TB. This\n\/\/ logs at level Debug by default.\nfunc New(tb testing.TB) (*check.C, *clog.Log) {\n\tc := check.New(tb)\n\n\tlcfg := clog.Config{\n\t\tOutputs: map[string]*clog.ConfigOutput{\n\t\t\t\"testlog\": {\n\t\t\t\tWhich: \"testlog\",\n\t\t\t\tLevel: clog.Debug,\n\t\t\t\tArgs: clog.ConfigArgs{\n\t\t\t\t\t\"log\": c,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tModules: map[string]*clog.ConfigModule{\n\t\t\t\"\": {\n\t\t\t\tOutputs: []string{\"testlog\"},\n\t\t\t\tLevel: clog.Debug,\n\t\t\t},\n\t\t},\n\t}\n\n\tlog, err := clog.New(lcfg)\n\tc.MustNotError(err)\n\n\treturn c, log\n}\n<|endoftext|>"} {"text":"<commit_before>package microsoft\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n)\n\nfunc TestLanguageCodes(t *testing.T) {\n\texpectedCodes := []string{\"ab\", \"cd\", \"ef\", \"gh\", \"ij\"}\n\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"text\/xml\")\n\n\t\tresponse, err := xml.Marshal(newArrayOfStrings(expectedCodes))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Unexpected error marshalling xml repsonse: %s\", err)\n\t\t}\n\n\t\tfmt.Fprint(w, string(response))\n\t\treturn\n\t}))\n\tdefer server.Close()\n\n\tauthenticator := NewMockAuthenticator()\n\tauthenticator.accessToken = NewMockAccessToken(100)\n\n\trouter := NewMockRouter()\n\trouter.languageCodesUrl = server.URL\n\n\tapi := NewMockApi()\n\tapi.router = router\n\tapi.authenticator = authenticator\n\n\tactualCodes, err := api.languageCodes()\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %s\", err)\n\t}\n\n\tif len(actualCodes) != len(expectedCodes) {\n\t\tt.Fatalf(\"Unexpected number of languages codes: %q\", actualCodes)\n\t}\n\n\tfor i := range expectedCodes {\n\t\tif actualCodes[i] != expectedCodes[i] {\n\t\t\tt.Fatalf(\"Unexpected language code '%s'. Expected '%s'\", actualCodes[i], expectedCodes[i])\n\t\t}\n\t}\n}\n<commit_msg>Verify headers for languageCodes request<commit_after>package microsoft\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n)\n\nfunc TestLanguageCodes(t *testing.T) {\n\texpectedCodes := []string{\"ab\", \"cd\", \"ef\", \"gh\", \"ij\"}\n\n\tauthenticator := NewMockAuthenticator()\n\tauthenticator.accessToken = NewMockAccessToken(100)\n\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"text\/xml\")\n\n\t\tif r.Header.Get(\"Content-Type\") != \"text\/plain\" {\n\t\t\tt.Fatalf(\"Unexpected content type in request header: %s\", r.Header.Get(\"Content-Type\"))\n\t\t}\n\n\t\texpectedAuthToken, err := authenticator.authToken()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Unexpected error getting expected authToken: %s\", err)\n\t\t}\n\n\t\tif r.Header.Get(\"Authorization\") != expectedAuthToken {\n\t\t\tt.Fatalf(\"Unexpected authorization header for request: %s\", r.Header.Get(\"Authorization\"))\n\t\t}\n\n\t\tresponse, err := xml.Marshal(newArrayOfStrings(expectedCodes))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Unexpected error marshalling xml repsonse: %s\", err)\n\t\t}\n\n\t\tfmt.Fprint(w, string(response))\n\t\treturn\n\t}))\n\tdefer server.Close()\n\n\trouter := NewMockRouter()\n\trouter.languageCodesUrl = server.URL\n\n\tapi := NewMockApi()\n\tapi.router = router\n\tapi.authenticator = authenticator\n\n\tactualCodes, err := api.languageCodes()\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %s\", err)\n\t}\n\n\tif len(actualCodes) != len(expectedCodes) {\n\t\tt.Fatalf(\"Unexpected number of languages codes: %q\", actualCodes)\n\t}\n\n\tfor i := range expectedCodes {\n\t\tif actualCodes[i] != expectedCodes[i] {\n\t\t\tt.Fatalf(\"Unexpected language code '%s'. Expected '%s'\", actualCodes[i], expectedCodes[i])\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package yo\n\nimport (\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\n\/\/ YoAll sends a Yo to all of your subscribers. The API token is issued by Yo\n\/\/ for a specific account and is required.\nfunc (c *Client) YoAll(apiToken string) (*http.Response, error) {\n\tu := \"yoall\"\n\tbody := url.Values{\"api_token\": {apiToken}}\n\treq, err := c.NewRequestForm(u, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := c.Do(req, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}\n<commit_msg>Trailing slash in yoall call.<commit_after>package yo\n\nimport (\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\n\/\/ YoAll sends a Yo to all of your subscribers. The API token is issued by Yo\n\/\/ for a specific account and is required.\nfunc (c *Client) YoAll(apiToken string) (*http.Response, error) {\n\tu := \"yoall\/\"\n\tbody := url.Values{\"api_token\": {apiToken}}\n\treq, err := c.NewRequestForm(u, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := c.Do(req, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package logger\n\nimport (\n\t\"fmt\"\n\tstdlog \"log\"\n\t\"log\/syslog\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"github.com\/sent-hil\/go-logging\"\n)\n\nvar modules []string\n\nfunc init() {\n\tmodules = []string{}\n}\n\n\/\/ Default Log implementation.\ntype GoLogger struct {\n\tlog *logging.Logger\n}\n\nfunc NewGoLog(name string) *GoLogger {\n\tlogging.SetFormatter(logging.MustStringFormatter(\"%{module} [%{level:.8s}] - %{message}\"))\n\n\t\/\/ Send log to stdout\n\tvar logBackend = logging.NewLogBackend(os.Stderr, \"\", stdlog.LstdFlags|stdlog.Lshortfile)\n\tlogBackend.Color = true\n\n\t\/\/ Send log to syslog\n\tvar syslogBackend, err = logging.NewSyslogBackend(\"\", syslog.LOG_DEBUG|syslog.LOG_LOCAL0)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlogging.SetBackend(logBackend, syslogBackend)\n\n\tloggingLevel = getLoggingLevelFromConfig(name)\n\n\t\/\/ go-logging calls Reset() each time it is imported. So if this\n\t\/\/ pkg is imported in a library and then in a worker, the library\n\t\/\/ defaults back to DEBUG logging level. This fixes that by\n\t\/\/ re-setting the log level for already set modules.\n\tmodules = append(modules, name)\n\tfor _, mod := range modules {\n\t\tlogging.SetLevel(loggingLevel, mod)\n\t}\n\n\tvar goLog = &GoLogger{logging.MustGetLogger(name)}\n\n\treturn goLog\n}\n\nfunc (g *GoLogger) Fatal(args ...interface{}) {\n\tg.log.Fatal(args...)\n}\n\nfunc (g *GoLogger) Panic(format string, args ...interface{}) {\n\tg.log.Panicf(format, args...)\n}\n\nfunc (g *GoLogger) Critical(format string, args ...interface{}) {\n\tg.log.Critical(format, args...)\n}\n\nfunc (g *GoLogger) Error(format string, args ...interface{}) {\n\tg.log.Error(format, args...)\n}\n\nfunc (g *GoLogger) Warning(format string, args ...interface{}) {\n\tg.log.Warning(format, args...)\n}\n\nfunc (g *GoLogger) Notice(format string, args ...interface{}) {\n\tg.log.Notice(format, args...)\n}\n\nfunc (g *GoLogger) Info(format string, args ...interface{}) {\n\tg.log.Info(format, args...)\n}\n\nfunc (g *GoLogger) Debug(format string, args ...interface{}) {\n\tg.log.Debug(format, args...)\n}\n\nfunc (g *GoLogger) Name() string {\n\treturn g.log.Module\n}\n\n\/\/----------------------------------------------------------\n\/\/ Originally from koding\/tools\/log\n\/\/----------------------------------------------------------\n\nfunc (g *GoLogger) RecoverAndLog() {\n\tif err := recover(); err != nil {\n\t\tg.Critical(\"Panicked %v\", err)\n\t}\n}\n\nfunc (g *GoLogger) LogError(err interface{}, stackOffset int, additionalData ...interface{}) {\n\tdata := make([]interface{}, 0)\n\tdata = append(data, fmt.Sprintln(err))\n\tfor i := 1 + stackOffset; ; i++ {\n\t\tpc, file, line, ok := runtime.Caller(i)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tname := \"<unknown>\"\n\t\tif fn := runtime.FuncForPC(pc); fn != nil {\n\t\t\tname = fn.Name()\n\t\t}\n\t\tdata = append(data, fmt.Sprintf(\"at %s (%s:%d)\\n\", name, file, line))\n\t}\n\tdata = append(data, additionalData...)\n\tg.Error(\"LogError %v\", data)\n}\n<commit_msg>logger: remove 'LogError' prefix<commit_after>package logger\n\nimport (\n\t\"fmt\"\n\tstdlog \"log\"\n\t\"log\/syslog\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"github.com\/sent-hil\/go-logging\"\n)\n\nvar modules []string\n\nfunc init() {\n\tmodules = []string{}\n}\n\n\/\/ Default Log implementation.\ntype GoLogger struct {\n\tlog *logging.Logger\n}\n\nfunc NewGoLog(name string) *GoLogger {\n\tlogging.SetFormatter(logging.MustStringFormatter(\"%{module} [%{level:.8s}] - %{message}\"))\n\n\t\/\/ Send log to stdout\n\tvar logBackend = logging.NewLogBackend(os.Stderr, \"\", stdlog.LstdFlags|stdlog.Lshortfile)\n\tlogBackend.Color = true\n\n\t\/\/ Send log to syslog\n\tvar syslogBackend, err = logging.NewSyslogBackend(\"\", syslog.LOG_DEBUG|syslog.LOG_LOCAL0)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlogging.SetBackend(logBackend, syslogBackend)\n\n\tloggingLevel = getLoggingLevelFromConfig(name)\n\n\t\/\/ go-logging calls Reset() each time it is imported. So if this\n\t\/\/ pkg is imported in a library and then in a worker, the library\n\t\/\/ defaults back to DEBUG logging level. This fixes that by\n\t\/\/ re-setting the log level for already set modules.\n\tmodules = append(modules, name)\n\tfor _, mod := range modules {\n\t\tlogging.SetLevel(loggingLevel, mod)\n\t}\n\n\tvar goLog = &GoLogger{logging.MustGetLogger(name)}\n\n\treturn goLog\n}\n\nfunc (g *GoLogger) Fatal(args ...interface{}) {\n\tg.log.Fatal(args...)\n}\n\nfunc (g *GoLogger) Panic(format string, args ...interface{}) {\n\tg.log.Panicf(format, args...)\n}\n\nfunc (g *GoLogger) Critical(format string, args ...interface{}) {\n\tg.log.Critical(format, args...)\n}\n\nfunc (g *GoLogger) Error(format string, args ...interface{}) {\n\tg.log.Error(format, args...)\n}\n\nfunc (g *GoLogger) Warning(format string, args ...interface{}) {\n\tg.log.Warning(format, args...)\n}\n\nfunc (g *GoLogger) Notice(format string, args ...interface{}) {\n\tg.log.Notice(format, args...)\n}\n\nfunc (g *GoLogger) Info(format string, args ...interface{}) {\n\tg.log.Info(format, args...)\n}\n\nfunc (g *GoLogger) Debug(format string, args ...interface{}) {\n\tg.log.Debug(format, args...)\n}\n\nfunc (g *GoLogger) Name() string {\n\treturn g.log.Module\n}\n\n\/\/----------------------------------------------------------\n\/\/ Originally from koding\/tools\/log\n\/\/----------------------------------------------------------\n\nfunc (g *GoLogger) RecoverAndLog() {\n\tif err := recover(); err != nil {\n\t\tg.Critical(\"Panicked %v\", err)\n\t}\n}\n\nfunc (g *GoLogger) LogError(err interface{}, stackOffset int, additionalData ...interface{}) {\n\tdata := make([]interface{}, 0)\n\tdata = append(data, fmt.Sprintln(err))\n\tfor i := 1 + stackOffset; ; i++ {\n\t\tpc, file, line, ok := runtime.Caller(i)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tname := \"<unknown>\"\n\t\tif fn := runtime.FuncForPC(pc); fn != nil {\n\t\t\tname = fn.Name()\n\t\t}\n\t\tdata = append(data, fmt.Sprintf(\"at %s (%s:%d)\\n\", name, file, line))\n\t}\n\tdata = append(data, additionalData...)\n\tg.Error(\"%v\", data)\n}\n<|endoftext|>"} {"text":"<commit_before>package mock\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/bunsenapp\/migrator\"\n)\n\n\/\/ ValidConfiguration yields a configuration object that will pass validation.\nfunc ValidConfiguration() migrator.Configuration {\n\treturn migrator.Configuration{\n\t\tDatabaseConnectionString: \"my-database-connection-string\",\n\t\tMigrationsDir: \"my-migrations-directory\",\n\t\tRollbacksDir: \"my-rollbacks-directory\",\n\t}\n}\n\n\/\/ ValidConfigurationAndDirectories yields a configuration object and a clean\n\/\/ up function to ensure no directories are left over.\nfunc ValidConfigurationAndDirectories() (migrator.Configuration, func()) {\n\tconfig := migrator.Configuration{\n\t\tDatabaseConnectionString: \"my-connection-string\",\n\t\tMigrationsDir: \"my-migrations-directory\",\n\t\tRollbacksDir: \"my-rollbacks-directory\",\n\t}\n\n\tos.Mkdir(config.MigrationsDir, 0700)\n\tos.Mkdir(config.RollbacksDir, 0700)\n\n\tcleanup := func() {\n\t\tos.RemoveAll(config.MigrationsDir)\n\t\tos.RemoveAll(config.RollbacksDir)\n\t}\n\n\treturn config, cleanup\n}\n\n\/\/ ValidConfigurationAndDirectoriesAndFiles yields a configuration object, a cleanup\n\/\/ function, creates the required directories and also creates an up\/down file\n\/\/ for a fake SQL migration.\nfunc ValidConfigurationDirectoriesAndFiles() (migrator.Configuration, func()) {\n\tconfig := migrator.Configuration{\n\t\tDatabaseConnectionString: \"my-connection-string\",\n\t\tMigrationsDir: \"my-migrations-directory\",\n\t\tRollbacksDir: \"my-rollbacks-directory\",\n\t}\n\n\tos.Mkdir(config.MigrationsDir, 0700)\n\tos.Mkdir(config.RollbacksDir, 0700)\n\n\tcleanup := func() {\n\t\tos.RemoveAll(config.MigrationsDir)\n\t\tos.RemoveAll(config.RollbacksDir)\n\t}\n\n\tos.Create(fmt.Sprintf(\"%s\/1_first-migration_up.sql\", config.MigrationsDir))\n\tos.Create(fmt.Sprintf(\"%s\/1_first-migration_down.sql\", config.RollbacksDir))\n\n\treturn config, cleanup\n}\n<commit_msg>Report card fix - last one<commit_after>package mock\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/bunsenapp\/migrator\"\n)\n\n\/\/ ValidConfiguration yields a configuration object that will pass validation.\nfunc ValidConfiguration() migrator.Configuration {\n\treturn migrator.Configuration{\n\t\tDatabaseConnectionString: \"my-database-connection-string\",\n\t\tMigrationsDir: \"my-migrations-directory\",\n\t\tRollbacksDir: \"my-rollbacks-directory\",\n\t}\n}\n\n\/\/ ValidConfigurationAndDirectories yields a configuration object and a clean\n\/\/ up function to ensure no directories are left over.\nfunc ValidConfigurationAndDirectories() (migrator.Configuration, func()) {\n\tconfig := migrator.Configuration{\n\t\tDatabaseConnectionString: \"my-connection-string\",\n\t\tMigrationsDir: \"my-migrations-directory\",\n\t\tRollbacksDir: \"my-rollbacks-directory\",\n\t}\n\n\tos.Mkdir(config.MigrationsDir, 0700)\n\tos.Mkdir(config.RollbacksDir, 0700)\n\n\tcleanup := func() {\n\t\tos.RemoveAll(config.MigrationsDir)\n\t\tos.RemoveAll(config.RollbacksDir)\n\t}\n\n\treturn config, cleanup\n}\n\n\/\/ ValidConfigurationDirectoriesAndFiles yields a configuration object, a cleanup\n\/\/ function, creates the required directories and also creates an up\/down file\n\/\/ for a fake SQL migration.\nfunc ValidConfigurationDirectoriesAndFiles() (migrator.Configuration, func()) {\n\tconfig := migrator.Configuration{\n\t\tDatabaseConnectionString: \"my-connection-string\",\n\t\tMigrationsDir: \"my-migrations-directory\",\n\t\tRollbacksDir: \"my-rollbacks-directory\",\n\t}\n\n\tos.Mkdir(config.MigrationsDir, 0700)\n\tos.Mkdir(config.RollbacksDir, 0700)\n\n\tcleanup := func() {\n\t\tos.RemoveAll(config.MigrationsDir)\n\t\tos.RemoveAll(config.RollbacksDir)\n\t}\n\n\tos.Create(fmt.Sprintf(\"%s\/1_first-migration_up.sql\", config.MigrationsDir))\n\tos.Create(fmt.Sprintf(\"%s\/1_first-migration_down.sql\", config.RollbacksDir))\n\n\treturn config, cleanup\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\n\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage mount\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/golang\/glog\"\n\tutilio \"k8s.io\/kubernetes\/pkg\/util\/io\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/nsenter\"\n)\n\nconst (\n\t\/\/ hostProcMountsPath is the default mount path for rootfs\n\thostProcMountsPath = \"\/rootfs\/proc\/1\/mounts\"\n\t\/\/ hostProcMountinfoPath is the default mount info path for rootfs\n\thostProcMountinfoPath = \"\/rootfs\/proc\/1\/mountinfo\"\n\t\/\/ hostProcSelfStatusPath is the default path to \/proc\/self\/status on the host\n\thostProcSelfStatusPath = \"\/rootfs\/proc\/self\/status\"\n)\n\nvar (\n\t\/\/ pidRegExp matches \"Pid:\t<pid>\" in \/proc\/self\/status\n\tpidRegExp = regexp.MustCompile(`\\nPid:\\t([0-9]*)\\n`)\n)\n\n\/\/ Currently, all docker containers receive their own mount namespaces.\n\/\/ NsenterMounter works by executing nsenter to run commands in\n\/\/ the host's mount namespace.\ntype NsenterMounter struct {\n\tne *nsenter.Nsenter\n}\n\nfunc NewNsenterMounter() (*NsenterMounter, error) {\n\tne, err := nsenter.NewNsenter()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &NsenterMounter{ne: ne}, nil\n}\n\n\/\/ NsenterMounter implements mount.Interface\nvar _ = Interface(&NsenterMounter{})\n\n\/\/ Mount runs mount(8) in the host's root mount namespace. Aside from this\n\/\/ aspect, Mount has the same semantics as the mounter returned by mount.New()\nfunc (n *NsenterMounter) Mount(source string, target string, fstype string, options []string) error {\n\tbind, bindRemountOpts := isBind(options)\n\n\tif bind {\n\t\terr := n.doNsenterMount(source, target, fstype, []string{\"bind\"})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn n.doNsenterMount(source, target, fstype, bindRemountOpts)\n\t}\n\n\treturn n.doNsenterMount(source, target, fstype, options)\n}\n\n\/\/ doNsenterMount nsenters the host's mount namespace and performs the\n\/\/ requested mount.\nfunc (n *NsenterMounter) doNsenterMount(source, target, fstype string, options []string) error {\n\tglog.V(5).Infof(\"nsenter mount %s %s %s %v\", source, target, fstype, options)\n\tcmd, args := n.makeNsenterArgs(source, target, fstype, options)\n\toutputBytes, err := n.ne.Exec(cmd, args).CombinedOutput()\n\tif len(outputBytes) != 0 {\n\t\tglog.V(5).Infof(\"Output of mounting %s to %s: %v\", source, target, string(outputBytes))\n\t}\n\treturn err\n}\n\n\/\/ makeNsenterArgs makes a list of argument to nsenter in order to do the\n\/\/ requested mount.\nfunc (n *NsenterMounter) makeNsenterArgs(source, target, fstype string, options []string) (string, []string) {\n\tmountCmd := n.ne.AbsHostPath(\"mount\")\n\tmountArgs := makeMountArgs(source, target, fstype, options)\n\n\tif systemdRunPath, hasSystemd := n.ne.SupportsSystemd(); hasSystemd {\n\t\t\/\/ Complete command line:\n\t\t\/\/ nsenter --mount=\/rootfs\/proc\/1\/ns\/mnt -- \/bin\/systemd-run --description=... --scope -- \/bin\/mount -t <type> <what> <where>\n\t\t\/\/ Expected flow is:\n\t\t\/\/ * nsenter breaks out of container's mount namespace and executes\n\t\t\/\/ host's systemd-run.\n\t\t\/\/ * systemd-run creates a transient scope (=~ cgroup) and executes its\n\t\t\/\/ argument (\/bin\/mount) there.\n\t\t\/\/ * mount does its job, forks a fuse daemon if necessary and finishes.\n\t\t\/\/ (systemd-run --scope finishes at this point, returning mount's exit\n\t\t\/\/ code and stdout\/stderr - thats one of --scope benefits).\n\t\t\/\/ * systemd keeps the fuse daemon running in the scope (i.e. in its own\n\t\t\/\/ cgroup) until the fuse daemon dies (another --scope benefit).\n\t\t\/\/ Kubelet container can be restarted and the fuse daemon survives.\n\t\t\/\/ * When the daemon dies (e.g. during unmount) systemd removes the\n\t\t\/\/ scope automatically.\n\t\tmountCmd, mountArgs = addSystemdScope(systemdRunPath, target, mountCmd, mountArgs)\n\t} else {\n\t\t\/\/ Fall back to simple mount when the host has no systemd.\n\t\t\/\/ Complete command line:\n\t\t\/\/ nsenter --mount=\/rootfs\/proc\/1\/ns\/mnt -- \/bin\/mount -t <type> <what> <where>\n\t\t\/\/ Expected flow is:\n\t\t\/\/ * nsenter breaks out of container's mount namespace and executes host's \/bin\/mount.\n\t\t\/\/ * mount does its job, forks a fuse daemon if necessary and finishes.\n\t\t\/\/ * Any fuse daemon runs in cgroup of kubelet docker container,\n\t\t\/\/ restart of kubelet container will kill it!\n\n\t\t\/\/ No code here, mountCmd and mountArgs use \/bin\/mount\n\t}\n\n\treturn mountCmd, mountArgs\n}\n\n\/\/ Unmount runs umount(8) in the host's mount namespace.\nfunc (n *NsenterMounter) Unmount(target string) error {\n\targs := []string{target}\n\t\/\/ No need to execute systemd-run here, it's enough that unmount is executed\n\t\/\/ in the host's mount namespace. It will finish appropriate fuse daemon(s)\n\t\/\/ running in any scope.\n\tglog.V(5).Infof(\"nsenter unmount args: %v\", args)\n\toutputBytes, err := n.ne.Exec(\"umount\", args).CombinedOutput()\n\tif len(outputBytes) != 0 {\n\t\tglog.V(5).Infof(\"Output of unmounting %s: %v\", target, string(outputBytes))\n\t}\n\treturn err\n}\n\n\/\/ List returns a list of all mounted filesystems in the host's mount namespace.\nfunc (*NsenterMounter) List() ([]MountPoint, error) {\n\treturn listProcMounts(hostProcMountsPath)\n}\n\nfunc (m *NsenterMounter) IsNotMountPoint(dir string) (bool, error) {\n\treturn IsNotMountPoint(m, dir)\n}\n\nfunc (*NsenterMounter) IsMountPointMatch(mp MountPoint, dir string) bool {\n\tdeletedDir := fmt.Sprintf(\"%s\\\\040(deleted)\", dir)\n\treturn (mp.Path == dir) || (mp.Path == deletedDir)\n}\n\n\/\/ IsLikelyNotMountPoint determines whether a path is a mountpoint by calling findmnt\n\/\/ in the host's root mount namespace.\nfunc (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) {\n\tfile, err := filepath.Abs(file)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\n\t\/\/ Check the directory exists\n\tif _, err = os.Stat(file); os.IsNotExist(err) {\n\t\tglog.V(5).Infof(\"findmnt: directory %s does not exist\", file)\n\t\treturn true, err\n\t}\n\t\/\/ Add --first-only option: since we are testing for the absence of a mountpoint, it is sufficient to get only\n\t\/\/ the first of multiple possible mountpoints using --first-only.\n\t\/\/ Also add fstype output to make sure that the output of target file will give the full path\n\t\/\/ TODO: Need more refactoring for this function. Track the solution with issue #26996\n\targs := []string{\"-o\", \"target,fstype\", \"--noheadings\", \"--first-only\", \"--target\", file}\n\tglog.V(5).Infof(\"nsenter findmnt args: %v\", args)\n\tout, err := n.ne.Exec(\"findmnt\", args).CombinedOutput()\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Failed findmnt command for path %s: %s %v\", file, out, err)\n\t\t\/\/ Different operating systems behave differently for paths which are not mount points.\n\t\t\/\/ On older versions (e.g. 2.20.1) we'd get error, on newer ones (e.g. 2.26.2) we'd get \"\/\".\n\t\t\/\/ It's safer to assume that it's not a mount point.\n\t\treturn true, nil\n\t}\n\tmountTarget, err := parseFindMnt(string(out))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tglog.V(5).Infof(\"IsLikelyNotMountPoint findmnt output for path %s: %v:\", file, mountTarget)\n\n\tif mountTarget == file {\n\t\tglog.V(5).Infof(\"IsLikelyNotMountPoint: %s is a mount point\", file)\n\t\treturn false, nil\n\t}\n\tglog.V(5).Infof(\"IsLikelyNotMountPoint: %s is not a mount point\", file)\n\treturn true, nil\n}\n\n\/\/ parse output of \"findmnt -o target,fstype\" and return just the target\nfunc parseFindMnt(out string) (string, error) {\n\t\/\/ cut trailing newline\n\tout = strings.TrimSuffix(out, \"\\n\")\n\t\/\/ cut everything after the last space - it's the filesystem type\n\ti := strings.LastIndex(out, \" \")\n\tif i == -1 {\n\t\treturn \"\", fmt.Errorf(\"error parsing findmnt output, expected at least one space: %q\", out)\n\t}\n\treturn out[:i], nil\n}\n\n\/\/ DeviceOpened checks if block device in use by calling Open with O_EXCL flag.\n\/\/ Returns true if open returns errno EBUSY, and false if errno is nil.\n\/\/ Returns an error if errno is any error other than EBUSY.\n\/\/ Returns with error if pathname is not a device.\nfunc (n *NsenterMounter) DeviceOpened(pathname string) (bool, error) {\n\treturn exclusiveOpenFailsOnDevice(pathname)\n}\n\n\/\/ PathIsDevice uses FileInfo returned from os.Stat to check if path refers\n\/\/ to a device.\nfunc (n *NsenterMounter) PathIsDevice(pathname string) (bool, error) {\n\tpathType, err := n.GetFileType(pathname)\n\tisDevice := pathType == FileTypeCharDev || pathType == FileTypeBlockDev\n\treturn isDevice, err\n}\n\n\/\/GetDeviceNameFromMount given a mount point, find the volume id from checking \/proc\/mounts\nfunc (n *NsenterMounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) {\n\treturn getDeviceNameFromMount(n, mountPath, pluginDir)\n}\n\nfunc (n *NsenterMounter) MakeRShared(path string) error {\n\treturn doMakeRShared(path, hostProcMountinfoPath)\n}\n\nfunc (mounter *NsenterMounter) GetFileType(pathname string) (FileType, error) {\n\tvar pathType FileType\n\toutputBytes, err := mounter.ne.Exec(\"stat\", []string{\"-L\", \"--printf=%F\", pathname}).CombinedOutput()\n\tif err != nil {\n\t\tif strings.Contains(string(outputBytes), \"No such file\") {\n\t\t\terr = fmt.Errorf(\"%s does not exist\", pathname)\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"stat %s error: %v\", pathname, string(outputBytes))\n\t\t}\n\t\treturn pathType, err\n\t}\n\n\tswitch string(outputBytes) {\n\tcase \"socket\":\n\t\treturn FileTypeSocket, nil\n\tcase \"character special file\":\n\t\treturn FileTypeCharDev, nil\n\tcase \"block special file\":\n\t\treturn FileTypeBlockDev, nil\n\tcase \"directory\":\n\t\treturn FileTypeDirectory, nil\n\tcase \"regular file\":\n\t\treturn FileTypeFile, nil\n\t}\n\n\treturn pathType, fmt.Errorf(\"only recognise file, directory, socket, block device and character device\")\n}\n\nfunc (mounter *NsenterMounter) MakeDir(pathname string) error {\n\targs := []string{\"-p\", pathname}\n\tif _, err := mounter.ne.Exec(\"mkdir\", args).CombinedOutput(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (mounter *NsenterMounter) MakeFile(pathname string) error {\n\targs := []string{pathname}\n\tif _, err := mounter.ne.Exec(\"touch\", args).CombinedOutput(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (mounter *NsenterMounter) ExistsPath(pathname string) bool {\n\targs := []string{pathname}\n\t_, err := mounter.ne.Exec(\"ls\", args).CombinedOutput()\n\tif err == nil {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (mounter *NsenterMounter) CleanSubPaths(podDir string, volumeName string) error {\n\treturn doCleanSubPaths(mounter, podDir, volumeName)\n}\n\n\/\/ getPidOnHost returns kubelet's pid in the host pid namespace\nfunc (mounter *NsenterMounter) getPidOnHost(procStatusPath string) (int, error) {\n\t\/\/ Get the PID from \/rootfs\/proc\/self\/status\n\tstatusBytes, err := utilio.ConsistentRead(procStatusPath, maxListTries)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"error reading %s: %s\", procStatusPath, err)\n\t}\n\tmatches := pidRegExp.FindSubmatch(statusBytes)\n\tif len(matches) < 2 {\n\t\treturn 0, fmt.Errorf(\"cannot parse %s: no Pid:\", procStatusPath)\n\t}\n\treturn strconv.Atoi(string(matches[1]))\n}\n\nfunc (mounter *NsenterMounter) PrepareSafeSubpath(subPath Subpath) (newHostPath string, cleanupAction func(), err error) {\n\thostPid, err := mounter.getPidOnHost(hostProcSelfStatusPath)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tglog.V(4).Infof(\"Kubelet's PID on the host is %d\", hostPid)\n\n\t\/\/ Bind-mount the subpath to avoid using symlinks in subpaths.\n\tnewHostPath, err = doBindSubPath(mounter, subPath, hostPid)\n\n\t\/\/ There is no action when the container starts. Bind-mount will be cleaned\n\t\/\/ when container stops by CleanSubPaths.\n\tcleanupAction = nil\n\treturn newHostPath, cleanupAction, err\n}\n\nfunc (mounter *NsenterMounter) SafeMakeDir(pathname string, base string, perm os.FileMode) error {\n\treturn doSafeMakeDir(pathname, base, perm)\n}\n\nfunc (mounter *NsenterMounter) GetMountRefs(pathname string) ([]string, error) {\n\thostpath, err := mounter.ne.EvalSymlinks(pathname)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn searchMountPoints(hostpath, procMountInfoPath)\n}\n\nfunc (mounter *NsenterMounter) GetFSGroup(pathname string) (int64, error) {\n\tkubeletpath, err := mounter.ne.KubeletPath(pathname)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn getFSGroup(kubeletpath)\n}\n\nfunc (mounter *NsenterMounter) GetSELinuxSupport(pathname string) (bool, error) {\n\treturn getSELinuxSupport(pathname, procMountInfoPath)\n}\n<commit_msg>Should use `hostProcMountinfoPath` constant in nsenter_mount.go.<commit_after>\/\/ +build linux\n\n\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage mount\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/golang\/glog\"\n\tutilio \"k8s.io\/kubernetes\/pkg\/util\/io\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/nsenter\"\n)\n\nconst (\n\t\/\/ hostProcMountsPath is the default mount path for rootfs\n\thostProcMountsPath = \"\/rootfs\/proc\/1\/mounts\"\n\t\/\/ hostProcMountinfoPath is the default mount info path for rootfs\n\thostProcMountinfoPath = \"\/rootfs\/proc\/1\/mountinfo\"\n\t\/\/ hostProcSelfStatusPath is the default path to \/proc\/self\/status on the host\n\thostProcSelfStatusPath = \"\/rootfs\/proc\/self\/status\"\n)\n\nvar (\n\t\/\/ pidRegExp matches \"Pid:\t<pid>\" in \/proc\/self\/status\n\tpidRegExp = regexp.MustCompile(`\\nPid:\\t([0-9]*)\\n`)\n)\n\n\/\/ Currently, all docker containers receive their own mount namespaces.\n\/\/ NsenterMounter works by executing nsenter to run commands in\n\/\/ the host's mount namespace.\ntype NsenterMounter struct {\n\tne *nsenter.Nsenter\n}\n\nfunc NewNsenterMounter() (*NsenterMounter, error) {\n\tne, err := nsenter.NewNsenter()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &NsenterMounter{ne: ne}, nil\n}\n\n\/\/ NsenterMounter implements mount.Interface\nvar _ = Interface(&NsenterMounter{})\n\n\/\/ Mount runs mount(8) in the host's root mount namespace. Aside from this\n\/\/ aspect, Mount has the same semantics as the mounter returned by mount.New()\nfunc (n *NsenterMounter) Mount(source string, target string, fstype string, options []string) error {\n\tbind, bindRemountOpts := isBind(options)\n\n\tif bind {\n\t\terr := n.doNsenterMount(source, target, fstype, []string{\"bind\"})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn n.doNsenterMount(source, target, fstype, bindRemountOpts)\n\t}\n\n\treturn n.doNsenterMount(source, target, fstype, options)\n}\n\n\/\/ doNsenterMount nsenters the host's mount namespace and performs the\n\/\/ requested mount.\nfunc (n *NsenterMounter) doNsenterMount(source, target, fstype string, options []string) error {\n\tglog.V(5).Infof(\"nsenter mount %s %s %s %v\", source, target, fstype, options)\n\tcmd, args := n.makeNsenterArgs(source, target, fstype, options)\n\toutputBytes, err := n.ne.Exec(cmd, args).CombinedOutput()\n\tif len(outputBytes) != 0 {\n\t\tglog.V(5).Infof(\"Output of mounting %s to %s: %v\", source, target, string(outputBytes))\n\t}\n\treturn err\n}\n\n\/\/ makeNsenterArgs makes a list of argument to nsenter in order to do the\n\/\/ requested mount.\nfunc (n *NsenterMounter) makeNsenterArgs(source, target, fstype string, options []string) (string, []string) {\n\tmountCmd := n.ne.AbsHostPath(\"mount\")\n\tmountArgs := makeMountArgs(source, target, fstype, options)\n\n\tif systemdRunPath, hasSystemd := n.ne.SupportsSystemd(); hasSystemd {\n\t\t\/\/ Complete command line:\n\t\t\/\/ nsenter --mount=\/rootfs\/proc\/1\/ns\/mnt -- \/bin\/systemd-run --description=... --scope -- \/bin\/mount -t <type> <what> <where>\n\t\t\/\/ Expected flow is:\n\t\t\/\/ * nsenter breaks out of container's mount namespace and executes\n\t\t\/\/ host's systemd-run.\n\t\t\/\/ * systemd-run creates a transient scope (=~ cgroup) and executes its\n\t\t\/\/ argument (\/bin\/mount) there.\n\t\t\/\/ * mount does its job, forks a fuse daemon if necessary and finishes.\n\t\t\/\/ (systemd-run --scope finishes at this point, returning mount's exit\n\t\t\/\/ code and stdout\/stderr - thats one of --scope benefits).\n\t\t\/\/ * systemd keeps the fuse daemon running in the scope (i.e. in its own\n\t\t\/\/ cgroup) until the fuse daemon dies (another --scope benefit).\n\t\t\/\/ Kubelet container can be restarted and the fuse daemon survives.\n\t\t\/\/ * When the daemon dies (e.g. during unmount) systemd removes the\n\t\t\/\/ scope automatically.\n\t\tmountCmd, mountArgs = addSystemdScope(systemdRunPath, target, mountCmd, mountArgs)\n\t} else {\n\t\t\/\/ Fall back to simple mount when the host has no systemd.\n\t\t\/\/ Complete command line:\n\t\t\/\/ nsenter --mount=\/rootfs\/proc\/1\/ns\/mnt -- \/bin\/mount -t <type> <what> <where>\n\t\t\/\/ Expected flow is:\n\t\t\/\/ * nsenter breaks out of container's mount namespace and executes host's \/bin\/mount.\n\t\t\/\/ * mount does its job, forks a fuse daemon if necessary and finishes.\n\t\t\/\/ * Any fuse daemon runs in cgroup of kubelet docker container,\n\t\t\/\/ restart of kubelet container will kill it!\n\n\t\t\/\/ No code here, mountCmd and mountArgs use \/bin\/mount\n\t}\n\n\treturn mountCmd, mountArgs\n}\n\n\/\/ Unmount runs umount(8) in the host's mount namespace.\nfunc (n *NsenterMounter) Unmount(target string) error {\n\targs := []string{target}\n\t\/\/ No need to execute systemd-run here, it's enough that unmount is executed\n\t\/\/ in the host's mount namespace. It will finish appropriate fuse daemon(s)\n\t\/\/ running in any scope.\n\tglog.V(5).Infof(\"nsenter unmount args: %v\", args)\n\toutputBytes, err := n.ne.Exec(\"umount\", args).CombinedOutput()\n\tif len(outputBytes) != 0 {\n\t\tglog.V(5).Infof(\"Output of unmounting %s: %v\", target, string(outputBytes))\n\t}\n\treturn err\n}\n\n\/\/ List returns a list of all mounted filesystems in the host's mount namespace.\nfunc (*NsenterMounter) List() ([]MountPoint, error) {\n\treturn listProcMounts(hostProcMountsPath)\n}\n\nfunc (m *NsenterMounter) IsNotMountPoint(dir string) (bool, error) {\n\treturn IsNotMountPoint(m, dir)\n}\n\nfunc (*NsenterMounter) IsMountPointMatch(mp MountPoint, dir string) bool {\n\tdeletedDir := fmt.Sprintf(\"%s\\\\040(deleted)\", dir)\n\treturn (mp.Path == dir) || (mp.Path == deletedDir)\n}\n\n\/\/ IsLikelyNotMountPoint determines whether a path is a mountpoint by calling findmnt\n\/\/ in the host's root mount namespace.\nfunc (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) {\n\tfile, err := filepath.Abs(file)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\n\t\/\/ Check the directory exists\n\tif _, err = os.Stat(file); os.IsNotExist(err) {\n\t\tglog.V(5).Infof(\"findmnt: directory %s does not exist\", file)\n\t\treturn true, err\n\t}\n\t\/\/ Add --first-only option: since we are testing for the absence of a mountpoint, it is sufficient to get only\n\t\/\/ the first of multiple possible mountpoints using --first-only.\n\t\/\/ Also add fstype output to make sure that the output of target file will give the full path\n\t\/\/ TODO: Need more refactoring for this function. Track the solution with issue #26996\n\targs := []string{\"-o\", \"target,fstype\", \"--noheadings\", \"--first-only\", \"--target\", file}\n\tglog.V(5).Infof(\"nsenter findmnt args: %v\", args)\n\tout, err := n.ne.Exec(\"findmnt\", args).CombinedOutput()\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Failed findmnt command for path %s: %s %v\", file, out, err)\n\t\t\/\/ Different operating systems behave differently for paths which are not mount points.\n\t\t\/\/ On older versions (e.g. 2.20.1) we'd get error, on newer ones (e.g. 2.26.2) we'd get \"\/\".\n\t\t\/\/ It's safer to assume that it's not a mount point.\n\t\treturn true, nil\n\t}\n\tmountTarget, err := parseFindMnt(string(out))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tglog.V(5).Infof(\"IsLikelyNotMountPoint findmnt output for path %s: %v:\", file, mountTarget)\n\n\tif mountTarget == file {\n\t\tglog.V(5).Infof(\"IsLikelyNotMountPoint: %s is a mount point\", file)\n\t\treturn false, nil\n\t}\n\tglog.V(5).Infof(\"IsLikelyNotMountPoint: %s is not a mount point\", file)\n\treturn true, nil\n}\n\n\/\/ parse output of \"findmnt -o target,fstype\" and return just the target\nfunc parseFindMnt(out string) (string, error) {\n\t\/\/ cut trailing newline\n\tout = strings.TrimSuffix(out, \"\\n\")\n\t\/\/ cut everything after the last space - it's the filesystem type\n\ti := strings.LastIndex(out, \" \")\n\tif i == -1 {\n\t\treturn \"\", fmt.Errorf(\"error parsing findmnt output, expected at least one space: %q\", out)\n\t}\n\treturn out[:i], nil\n}\n\n\/\/ DeviceOpened checks if block device in use by calling Open with O_EXCL flag.\n\/\/ Returns true if open returns errno EBUSY, and false if errno is nil.\n\/\/ Returns an error if errno is any error other than EBUSY.\n\/\/ Returns with error if pathname is not a device.\nfunc (n *NsenterMounter) DeviceOpened(pathname string) (bool, error) {\n\treturn exclusiveOpenFailsOnDevice(pathname)\n}\n\n\/\/ PathIsDevice uses FileInfo returned from os.Stat to check if path refers\n\/\/ to a device.\nfunc (n *NsenterMounter) PathIsDevice(pathname string) (bool, error) {\n\tpathType, err := n.GetFileType(pathname)\n\tisDevice := pathType == FileTypeCharDev || pathType == FileTypeBlockDev\n\treturn isDevice, err\n}\n\n\/\/GetDeviceNameFromMount given a mount point, find the volume id from checking \/proc\/mounts\nfunc (n *NsenterMounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) {\n\treturn getDeviceNameFromMount(n, mountPath, pluginDir)\n}\n\nfunc (n *NsenterMounter) MakeRShared(path string) error {\n\treturn doMakeRShared(path, hostProcMountinfoPath)\n}\n\nfunc (mounter *NsenterMounter) GetFileType(pathname string) (FileType, error) {\n\tvar pathType FileType\n\toutputBytes, err := mounter.ne.Exec(\"stat\", []string{\"-L\", \"--printf=%F\", pathname}).CombinedOutput()\n\tif err != nil {\n\t\tif strings.Contains(string(outputBytes), \"No such file\") {\n\t\t\terr = fmt.Errorf(\"%s does not exist\", pathname)\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"stat %s error: %v\", pathname, string(outputBytes))\n\t\t}\n\t\treturn pathType, err\n\t}\n\n\tswitch string(outputBytes) {\n\tcase \"socket\":\n\t\treturn FileTypeSocket, nil\n\tcase \"character special file\":\n\t\treturn FileTypeCharDev, nil\n\tcase \"block special file\":\n\t\treturn FileTypeBlockDev, nil\n\tcase \"directory\":\n\t\treturn FileTypeDirectory, nil\n\tcase \"regular file\":\n\t\treturn FileTypeFile, nil\n\t}\n\n\treturn pathType, fmt.Errorf(\"only recognise file, directory, socket, block device and character device\")\n}\n\nfunc (mounter *NsenterMounter) MakeDir(pathname string) error {\n\targs := []string{\"-p\", pathname}\n\tif _, err := mounter.ne.Exec(\"mkdir\", args).CombinedOutput(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (mounter *NsenterMounter) MakeFile(pathname string) error {\n\targs := []string{pathname}\n\tif _, err := mounter.ne.Exec(\"touch\", args).CombinedOutput(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (mounter *NsenterMounter) ExistsPath(pathname string) bool {\n\targs := []string{pathname}\n\t_, err := mounter.ne.Exec(\"ls\", args).CombinedOutput()\n\tif err == nil {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (mounter *NsenterMounter) CleanSubPaths(podDir string, volumeName string) error {\n\treturn doCleanSubPaths(mounter, podDir, volumeName)\n}\n\n\/\/ getPidOnHost returns kubelet's pid in the host pid namespace\nfunc (mounter *NsenterMounter) getPidOnHost(procStatusPath string) (int, error) {\n\t\/\/ Get the PID from \/rootfs\/proc\/self\/status\n\tstatusBytes, err := utilio.ConsistentRead(procStatusPath, maxListTries)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"error reading %s: %s\", procStatusPath, err)\n\t}\n\tmatches := pidRegExp.FindSubmatch(statusBytes)\n\tif len(matches) < 2 {\n\t\treturn 0, fmt.Errorf(\"cannot parse %s: no Pid:\", procStatusPath)\n\t}\n\treturn strconv.Atoi(string(matches[1]))\n}\n\nfunc (mounter *NsenterMounter) PrepareSafeSubpath(subPath Subpath) (newHostPath string, cleanupAction func(), err error) {\n\thostPid, err := mounter.getPidOnHost(hostProcSelfStatusPath)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tglog.V(4).Infof(\"Kubelet's PID on the host is %d\", hostPid)\n\n\t\/\/ Bind-mount the subpath to avoid using symlinks in subpaths.\n\tnewHostPath, err = doBindSubPath(mounter, subPath, hostPid)\n\n\t\/\/ There is no action when the container starts. Bind-mount will be cleaned\n\t\/\/ when container stops by CleanSubPaths.\n\tcleanupAction = nil\n\treturn newHostPath, cleanupAction, err\n}\n\nfunc (mounter *NsenterMounter) SafeMakeDir(pathname string, base string, perm os.FileMode) error {\n\treturn doSafeMakeDir(pathname, base, perm)\n}\n\nfunc (mounter *NsenterMounter) GetMountRefs(pathname string) ([]string, error) {\n\thostpath, err := mounter.ne.EvalSymlinks(pathname)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn searchMountPoints(hostpath, hostProcMountinfoPath)\n}\n\nfunc (mounter *NsenterMounter) GetFSGroup(pathname string) (int64, error) {\n\tkubeletpath, err := mounter.ne.KubeletPath(pathname)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn getFSGroup(kubeletpath)\n}\n\nfunc (mounter *NsenterMounter) GetSELinuxSupport(pathname string) (bool, error) {\n\treturn getSELinuxSupport(pathname, hostProcMountsPath)\n}\n<|endoftext|>"} {"text":"<commit_before>package chipmunk\n\nimport (\n\t\/\/\"container\/list\"\n\t\/\/\"fmt\"\n\t. \"github.com\/vova616\/chipmunk\/vect\"\n\t\"time\"\n)\n\nvar VoidQueryFunc = func(a, b Indexable, data Data) {}\n\ntype HashSet map[HashValue]*Node\n\nfunc (set HashSet) Each(fnc HashSetIterator, data Data) {\n\tfor _, node := range set {\n\t\tfnc(node, data)\n\t}\n}\n\ntype BBTree struct {\n\tSpatialIndexClass\n\tSpatialIndex *SpatialIndex\n\n\tleaves HashSet\n\troot *Node\n\n\tpooledNodes *Node\n\tpooledPairs *Pair\n\tallocatedBuffers []Contact\n\n\tstamp time.Duration\n}\n\ntype Children struct {\n\tA, B *Node\n}\n\ntype Leaf struct {\n\tstamp time.Duration\n\tpairs *Pair\n}\n\ntype node struct {\n\tChildren\n\tLeaf\n}\n\ntype Node struct {\n\tobj Indexable\n\tbb AABB\n\tparent *Node\n\n\tnode\n}\n\nfunc (node *Node) IsLeaf() bool {\n\treturn node.obj != nil\n}\n\nfunc (node *Node) NodeSetA(value *Node) {\n\tnode.A = value\n\tvalue.parent = node\n}\n\nfunc (node *Node) NodeSetB(value *Node) {\n\tnode.B = value\n\tvalue.parent = node\n}\n\nfunc (node *Node) NodeOther(child *Node) *Node {\n\tif node.A == child {\n\t\treturn node.B\n\t}\n\treturn node.A\n}\n\ntype Thread struct {\n\tprev *Pair\n\tleaf *Node\n\tnext *Pair\n}\n\ntype Pair struct {\n\ta, b Thread\n}\n\nfunc NewBBTree(staticIndex *SpatialIndex) *SpatialIndex {\n\ttree := &BBTree{}\n\ttree.leaves = make(map[HashValue]*Node)\n\ttree.allocatedBuffers = make([]Contact, 0)\n\ttree.SpatialIndex = NewSpartialIndex(tree, staticIndex)\n\n\treturn tree.SpatialIndex\n}\n\nfunc (tree *BBTree) Count() int {\n\treturn len(tree.leaves)\n}\n\nfunc (tree *BBTree) NewLeaf(obj Indexable) *Node {\n\tnode := tree.NodeFromPool()\n\tnode.obj = obj\n\tnode.bb = tree.GetBB(obj)\n\n\treturn node\n}\n\nfunc (tree *BBTree) NodeNew(a, b *Node) *Node {\n\tnode := tree.NodeFromPool()\n\tnode.bb = Combine(a.bb, b.bb)\n\n\tnode.NodeSetA(a)\n\tnode.NodeSetB(b)\n\n\treturn node\n}\n\nfunc (tree *BBTree) GetBB(obj Indexable) AABB {\n\tbb := obj.AABB()\n\n\tv, ok := obj.Velocity()\n\tif ok {\n\n\t\tcoef := Float(0.1)\n\n\t\tl := bb.Lower.X\n\t\tb := bb.Lower.Y\n\t\tr := bb.Upper.X\n\t\tt := bb.Upper.Y\n\n\t\tx := (r - l) * coef\n\t\ty := (t - b) * coef\n\n\t\tv = Mult(v, 0.1)\n\n\t\treturn NewAABB(l+FMin(-x, v.X), b+FMin(-y, v.Y), r+FMax(x, v.X), t+FMax(y, v.Y))\n\t}\n\n\treturn bb\n}\n\nfunc (tree *BBTree) SubtreeInsert(subtree, leaf *Node) *Node {\n\tif subtree == nil {\n\t\treturn leaf\n\t} else if subtree.IsLeaf() {\n\t\treturn tree.NodeNew(leaf, subtree)\n\t}\n\n\tcost_a := subtree.B.bb.Area() + MergedArea(subtree.A.bb, leaf.bb)\n\tcost_b := subtree.A.bb.Area() + MergedArea(subtree.B.bb, leaf.bb)\n\n\tif cost_a == cost_b {\n\n\t\tcost_a = Proximity(subtree.A.bb, leaf.bb)\n\t\tcost_b = Proximity(subtree.B.bb, leaf.bb)\n\t}\n\n\tif cost_b < cost_a {\n\t\tsubtree.NodeSetB(tree.SubtreeInsert(subtree.B, leaf))\n\t} else {\n\t\tsubtree.NodeSetA(tree.SubtreeInsert(subtree.A, leaf))\n\t}\n\n\tsubtree.bb = Combine(subtree.bb, leaf.bb)\n\n\treturn subtree\n}\n\nfunc GetTree(index SpatialIndexClass) *BBTree {\n\tif index != nil {\n\t\ttree, _ := index.(*BBTree)\n\t\treturn tree\n\t}\n\treturn nil\n}\n\nfunc (tree *BBTree) GetMasterTree() SpatialIndexClass {\n\tdynamicTree := tree.SpatialIndex.dynamicIndex\n\tif dynamicTree != nil {\n\t\treturn dynamicTree\n\t}\n\treturn tree\n}\n\nfunc (tree *BBTree) Stamp() time.Duration {\n\treturn tree.stamp\n}\n\nfunc GetRootIfTree(index SpatialIndexClass) *Node {\n\tif index != nil {\n\t\ttree, ok := index.(*BBTree)\n\t\tif ok {\n\t\t\treturn tree.root\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (tree *BBTree) LeafAddPairs(leaf *Node) {\n\tdynamicIndex := tree.SpatialIndex.dynamicIndex\n\tif dynamicIndex != nil {\n\t\tdynamicRoot := GetRootIfTree(dynamicIndex)\n\t\tif dynamicRoot != nil {\n\t\t\tdynamicTree := GetTree(dynamicIndex)\n\t\t\tcontext := MarkContext{dynamicTree, nil, nil, nil}\n\t\t\tcontext.MarkLeafQuery(dynamicRoot, leaf, true)\n\t\t}\n\t} else {\n\t\tstaticRoot := GetRootIfTree(tree.SpatialIndex.staticIndex)\n\t\tcontext := MarkContext{tree, staticRoot, VoidQueryFunc, nil}\n\t\tcontext.MarkLeaf(leaf)\n\t}\n}\n\nfunc (tree *BBTree) IncrementStamp() {\n\tdynamicTree := GetTree(tree.SpatialIndex.dynamicIndex)\n\tif dynamicTree != nil {\n\t\tdynamicTree.stamp++\n\t} else {\n\t\ttree.stamp++\n\t}\n}\n\nfunc (tree *BBTree) Insert(obj Indexable) {\n\n\tleaf := tree.NewLeaf(obj)\n\n\ttree.leaves[obj.Hash()] = leaf\n\n\troot := tree.root\n\ttree.root = tree.SubtreeInsert(root, leaf)\n\n\tleaf.stamp = tree.GetMasterTree().Stamp()\n\n\ttree.LeafAddPairs(leaf)\n\ttree.IncrementStamp()\n}\n\nfunc (tree *BBTree) PairInsert(a, b *Node) {\n\tnextA := a.pairs\n\tnextB := b.pairs\n\tpair := tree.PairFromPool()\n\ttemp := Pair{Thread{nil, a, nextA}, Thread{nil, b, nextB}}\n\n\tb.pairs = pair\n\ta.pairs = pair\n\n\t*pair = temp\n\n\tif nextA != nil {\n\t\tif nextA.a.leaf == a {\n\t\t\tnextA.a.prev = pair\n\t\t} else {\n\t\t\tnextA.b.prev = pair\n\t\t}\n\t}\n\n\tif nextB != nil {\n\t\tif nextB.a.leaf == b {\n\t\t\tnextB.a.prev = pair\n\t\t} else {\n\t\t\tnextB.b.prev = pair\n\t\t}\n\t}\n}\n\nfunc (tree *BBTree) NodeRecycle(node *Node) {\n\n}\n\nfunc (tree *BBTree) NodeFromPool() *Node {\n\treturn &Node{}\n}\n\nfunc (tree *BBTree) PairRecycle(pair *Pair) {\n\n}\n\nfunc (tree *BBTree) PairFromPool() *Pair {\n\treturn &Pair{}\n}\n\nfunc (tree *BBTree) NodeReplaceChild(parent, child, value *Node) {\n\tif parent.IsLeaf() {\n\t\tpanic(\"Internal Error: Cannot replace child of a leaf.\")\n\t}\n\n\tif (child == parent.A || child == parent.B) == false {\n\t\tpanic(\"Internal Error: Node is not a child of parent.\")\n\t}\n\n\tif parent.A == child {\n\t\ttree.NodeRecycle(parent.A)\n\t\tparent.NodeSetA(value)\n\t} else {\n\t\ttree.NodeRecycle(parent.B)\n\t\tparent.NodeSetB(value)\n\t}\n\n\tfor node := parent; node != nil; node = node.parent {\n\t\tnode.bb = Combine(node.A.bb, node.B.bb)\n\t}\n}\n\nfunc (tree *BBTree) Remove(obj Indexable) {\n\tleaf := tree.leaves[obj.Hash()]\n\tdelete(tree.leaves, obj.Hash())\n\n\tif leaf == nil {\n\t\treturn\n\t}\n\n\ttree.root = tree.SubtreeRemove(tree.root, leaf)\n\ttree.PairsClear(leaf)\n\ttree.NodeRecycle(leaf)\n}\n\nfunc (tree *BBTree) SubtreeRemove(subtree, leaf *Node) *Node {\n\tif leaf == subtree {\n\t\treturn nil\n\t}\n\n\tparent := leaf.parent\n\tif parent == subtree {\n\t\tother := subtree.NodeOther(leaf)\n\t\tother.parent = subtree.parent\n\t\ttree.NodeRecycle(subtree)\n\t\treturn other\n\t}\n\n\ttree.NodeReplaceChild(parent.parent, parent, parent.NodeOther(leaf))\n\treturn subtree\n}\n\nfunc ThreadUnlink(thread Thread) {\n\tnext := thread.next\n\tprev := thread.prev\n\n\tif next != nil {\n\t\tif next.a.leaf == thread.leaf {\n\t\t\tnext.a.prev = prev\n\t\t} else {\n\t\t\tnext.b.prev = prev\n\t\t}\n\t}\n\n\tif prev != nil {\n\t\tif prev.a.leaf == thread.leaf {\n\t\t\tprev.a.next = next\n\t\t} else {\n\t\t\tprev.b.next = next\n\t\t}\n\t} else {\n\t\tthread.leaf.pairs = next\n\t}\n}\n\nfunc (tree *BBTree) PairsClear(leaf *Node) {\n\tpair := leaf.pairs\n\tleaf.pairs = nil\n\n\tfor pair != nil {\n\t\tif pair.a.leaf == leaf {\n\t\t\tnext := pair.a.next\n\t\t\tThreadUnlink(pair.b)\n\t\t\ttree.PairRecycle(pair)\n\t\t\tpair = next\n\t\t} else {\n\t\t\tnext := pair.b.next\n\t\t\tThreadUnlink(pair.a)\n\t\t\ttree.PairRecycle(pair)\n\t\t\tpair = next\n\t\t}\n\t}\n}\n\nfunc LeafUpdate(leaf *Node, tree *BBTree) bool {\n\troot := tree.root\n\tbb := leaf.obj.AABB()\n\n\tif !leaf.bb.Contains(bb) {\n\t\tleaf.bb = tree.GetBB(leaf.obj)\n\n\t\troot = tree.SubtreeRemove(root, leaf)\n\t\ttree.root = tree.SubtreeInsert(root, leaf)\n\n\t\ttree.PairsClear(leaf)\n\t\tleaf.stamp = tree.GetMasterTree().Stamp()\n\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (tree *BBTree) Each(fnc HashSetIterator, data Data) {\n\ttree.leaves.Each(fnc, data)\n}\n\nfunc (tree *BBTree) Each2(fnc HashSetIterator, data Data) {\n\ttree.leaves.Each(fnc, data)\n}\n\nfunc (tree *BBTree) ReindexQuery(fnc SpatialIndexQueryFunc, data Data) {\n\tif tree.root == nil {\n\t\treturn\n\t}\n\n\t\/\/ LeafUpdate() may modify tree->root. Don't cache it.\n\tfor _, node := range tree.leaves {\n\t\tLeafUpdate(node, tree)\n\t}\n\n\tstaticIndex := GetTree(tree.SpatialIndex.staticIndex)\n\tvar staticRoot *Node = nil\n\tif staticIndex != nil {\n\t\tstaticRoot = staticIndex.root\n\t}\n\n\tcontext := MarkContext{tree, staticRoot, fnc, data}\n\tcontext.MarkSubtree(tree.root)\n\tif staticIndex != nil && staticRoot == nil {\n\t\tSpatialIndexCollideStatic(tree.SpatialIndex, staticIndex.SpatialIndex, fnc, data)\n\t}\n\n\ttree.IncrementStamp()\n}\n\nfunc (tree *BBTree) Query(obj Indexable, aabb AABB, fnc SpatialIndexQueryFunc, data Data) {\n\tif tree.root != nil {\n\t\tSubtreeQuery(tree.root, obj, aabb, fnc, data)\n\t}\n}\n\nfunc SubtreeQuery(subtree *Node, obj Indexable, bb AABB, fnc SpatialIndexQueryFunc, data Data) {\n\tif TestOverlap(subtree.bb, bb) {\n\t\tif subtree.IsLeaf() {\n\t\t\tfnc(obj, subtree.obj, data)\n\t\t} else {\n\t\t\tSubtreeQuery(subtree.A, obj, bb, fnc, data)\n\t\t\tSubtreeQuery(subtree.B, obj, bb, fnc, data)\n\t\t}\n\t}\n}\n\ntype MarkContext struct {\n\ttree *BBTree\n\tstaticRoot *Node\n\tfnc SpatialIndexQueryFunc\n\tdata Data\n}\n\nfunc (context *MarkContext) MarkSubtree(subtree *Node) {\n\tif subtree.IsLeaf() {\n\t\tcontext.MarkLeaf(subtree)\n\t} else {\n\t\tcontext.MarkSubtree(subtree.A)\n\t\tcontext.MarkSubtree(subtree.B)\n\t}\n}\n\nfunc (context *MarkContext) MarkLeafQuery(subtree, leaf *Node, left bool) {\n\n\tif TestOverlap(leaf.bb, subtree.bb) {\n\t\tif subtree.IsLeaf() {\n\t\t\tif left {\n\t\t\t\tcontext.tree.PairInsert(leaf, subtree)\n\t\t\t} else {\n\t\t\t\tif subtree.stamp < leaf.stamp {\n\t\t\t\t\tcontext.tree.PairInsert(subtree, leaf)\n\t\t\t\t}\n\t\t\t\tcontext.fnc(leaf.obj, subtree.obj, context.data)\n\t\t\t}\n\t\t} else {\n\t\t\tcontext.MarkLeafQuery(subtree.A, leaf, left)\n\t\t\tcontext.MarkLeafQuery(subtree.B, leaf, left)\n\t\t}\n\t}\n}\n\nfunc (context *MarkContext) MarkLeaf(leaf *Node) {\n\ttree := context.tree\n\tif leaf.stamp == tree.GetMasterTree().Stamp() {\n\t\tstaticRoot := context.staticRoot\n\t\tif staticRoot != nil {\n\t\t\tcontext.MarkLeafQuery(staticRoot, leaf, false)\n\t\t}\n\n\t\tfor node := leaf; node.parent != nil; node = node.parent {\n\t\t\tif node == node.parent.A {\n\t\t\t\tcontext.MarkLeafQuery(node.parent.B, leaf, true)\n\t\t\t} else {\n\t\t\t\tcontext.MarkLeafQuery(node.parent.A, leaf, false)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tpair := leaf.pairs\n\t\ti := 0\n\t\tfor pair != nil {\n\t\t\tif leaf == pair.b.leaf {\n\t\t\t\tcontext.fnc(pair.a.leaf.obj, leaf.obj, context.data)\n\t\t\t\tpair = pair.b.next\n\t\t\t} else {\n\t\t\t\tpair = pair.a.next\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\t\/\/fmt.Println(i)\n\t}\n}\n<commit_msg>Cleanup and added pools :)<commit_after>package chipmunk\n\nimport (\n\t\/\/\"container\/list\"\n\t. \"github.com\/vova616\/chipmunk\/vect\"\n\t\/\/\"log\"\n\t\"time\"\n)\n\nvar VoidQueryFunc = func(a, b Indexable, data Data) {}\n\ntype HashSet map[HashValue]*Node\n\nfunc (set HashSet) Each(fnc HashSetIterator, data Data) {\n\tfor _, node := range set {\n\t\tfnc(node, data)\n\t}\n}\n\ntype BBTree struct {\n\tSpatialIndexClass\n\tSpatialIndex *SpatialIndex\n\n\tleaves HashSet\n\troot *Node\n\n\tpairBuffer []*Pair\n\tnodeBuffer []*Node\n\n\tstamp time.Duration\n}\n\ntype Children struct {\n\tA, B *Node\n}\n\ntype Leaf struct {\n\tstamp time.Duration\n\tpairs *Pair\n}\n\ntype node struct {\n\tChildren\n\tLeaf\n}\n\ntype Node struct {\n\tobj Indexable\n\tbb AABB\n\tparent *Node\n\n\tnode\n}\n\nfunc (node *Node) IsLeaf() bool {\n\treturn node.obj != nil\n}\n\nfunc (node *Node) NodeSetA(value *Node) {\n\tnode.A = value\n\tvalue.parent = node\n}\n\nfunc (node *Node) NodeSetB(value *Node) {\n\tnode.B = value\n\tvalue.parent = node\n}\n\nfunc (node *Node) NodeOther(child *Node) *Node {\n\tif node.A == child {\n\t\treturn node.B\n\t}\n\treturn node.A\n}\n\ntype Thread struct {\n\tprev *Pair\n\tleaf *Node\n\tnext *Pair\n}\n\ntype Pair struct {\n\ta, b Thread\n}\n\nfunc NewBBTree(staticIndex *SpatialIndex) *SpatialIndex {\n\ttree := &BBTree{}\n\ttree.leaves = make(map[HashValue]*Node)\n\n\ttree.SpatialIndex = NewSpartialIndex(tree, staticIndex)\n\ttree.pairBuffer = make([]*Pair, 0)\n\ttree.nodeBuffer = make([]*Node, 0)\n\treturn tree.SpatialIndex\n}\n\nfunc (tree *BBTree) Count() int {\n\treturn len(tree.leaves)\n}\n\nfunc (tree *BBTree) NewLeaf(obj Indexable) *Node {\n\tnode := tree.NodeFromPool()\n\tnode.obj = obj\n\tnode.bb = tree.GetBB(obj)\n\n\treturn node\n}\n\nfunc (tree *BBTree) NodeNew(a, b *Node) *Node {\n\tnode := tree.NodeFromPool()\n\tnode.bb = Combine(a.bb, b.bb)\n\n\tnode.NodeSetA(a)\n\tnode.NodeSetB(b)\n\n\treturn node\n}\n\nfunc (tree *BBTree) GetBB(obj Indexable) AABB {\n\tbb := obj.AABB()\n\n\tv, ok := obj.Velocity()\n\tif ok {\n\n\t\tcoef := Float(0.1)\n\n\t\tl := bb.Lower.X\n\t\tb := bb.Lower.Y\n\t\tr := bb.Upper.X\n\t\tt := bb.Upper.Y\n\n\t\tx := (r - l) * coef\n\t\ty := (t - b) * coef\n\n\t\tv = Mult(v, 0.1)\n\n\t\treturn NewAABB(l+FMin(-x, v.X), b+FMin(-y, v.Y), r+FMax(x, v.X), t+FMax(y, v.Y))\n\t}\n\n\treturn bb\n}\n\nfunc (tree *BBTree) SubtreeInsert(subtree, leaf *Node) *Node {\n\tif subtree == nil {\n\t\treturn leaf\n\t} else if subtree.IsLeaf() {\n\t\treturn tree.NodeNew(leaf, subtree)\n\t}\n\n\tcost_a := subtree.B.bb.Area() + MergedArea(subtree.A.bb, leaf.bb)\n\tcost_b := subtree.A.bb.Area() + MergedArea(subtree.B.bb, leaf.bb)\n\n\tif cost_a == cost_b {\n\n\t\tcost_a = Proximity(subtree.A.bb, leaf.bb)\n\t\tcost_b = Proximity(subtree.B.bb, leaf.bb)\n\t}\n\n\tif cost_b < cost_a {\n\t\tsubtree.NodeSetB(tree.SubtreeInsert(subtree.B, leaf))\n\t} else {\n\t\tsubtree.NodeSetA(tree.SubtreeInsert(subtree.A, leaf))\n\t}\n\n\tsubtree.bb = Combine(subtree.bb, leaf.bb)\n\n\treturn subtree\n}\n\nfunc GetTree(index SpatialIndexClass) *BBTree {\n\tif index != nil {\n\t\ttree, _ := index.(*BBTree)\n\t\treturn tree\n\t}\n\treturn nil\n}\n\nfunc (tree *BBTree) GetMasterTree() SpatialIndexClass {\n\tdynamicTree := tree.SpatialIndex.dynamicIndex\n\tif dynamicTree != nil {\n\t\treturn dynamicTree\n\t}\n\treturn tree\n}\n\nfunc (tree *BBTree) Stamp() time.Duration {\n\treturn tree.stamp\n}\n\nfunc GetRootIfTree(index SpatialIndexClass) *Node {\n\tif index != nil {\n\t\ttree, ok := index.(*BBTree)\n\t\tif ok {\n\t\t\treturn tree.root\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (tree *BBTree) LeafAddPairs(leaf *Node) {\n\tdynamicIndex := tree.SpatialIndex.dynamicIndex\n\tif dynamicIndex != nil {\n\t\tdynamicRoot := GetRootIfTree(dynamicIndex)\n\t\tif dynamicRoot != nil {\n\t\t\tdynamicTree := GetTree(dynamicIndex)\n\t\t\tcontext := MarkContext{dynamicTree, nil, nil, nil}\n\t\t\tcontext.MarkLeafQuery(dynamicRoot, leaf, true)\n\t\t}\n\t} else {\n\t\tstaticRoot := GetRootIfTree(tree.SpatialIndex.staticIndex)\n\t\tcontext := MarkContext{tree, staticRoot, VoidQueryFunc, nil}\n\t\tcontext.MarkLeaf(leaf)\n\t}\n}\n\nfunc (tree *BBTree) IncrementStamp() {\n\tdynamicTree := GetTree(tree.SpatialIndex.dynamicIndex)\n\tif dynamicTree != nil {\n\t\tdynamicTree.stamp++\n\t} else {\n\t\ttree.stamp++\n\t}\n}\n\nfunc (tree *BBTree) Insert(obj Indexable) {\n\n\tleaf := tree.NewLeaf(obj)\n\n\ttree.leaves[obj.Hash()] = leaf\n\n\troot := tree.root\n\ttree.root = tree.SubtreeInsert(root, leaf)\n\n\tleaf.stamp = tree.GetMasterTree().Stamp()\n\n\ttree.LeafAddPairs(leaf)\n\ttree.IncrementStamp()\n}\n\nfunc (tree *BBTree) PairInsert(a, b *Node) {\n\tnextA := a.pairs\n\tnextB := b.pairs\n\tpair := tree.PairFromPool()\n\ttemp := Pair{Thread{nil, a, nextA}, Thread{nil, b, nextB}}\n\n\tb.pairs = pair\n\ta.pairs = pair\n\n\t*pair = temp\n\n\tif nextA != nil {\n\t\tif nextA.a.leaf == a {\n\t\t\tnextA.a.prev = pair\n\t\t} else {\n\t\t\tnextA.b.prev = pair\n\t\t}\n\t}\n\n\tif nextB != nil {\n\t\tif nextB.a.leaf == b {\n\t\t\tnextB.a.prev = pair\n\t\t} else {\n\t\t\tnextB.b.prev = pair\n\t\t}\n\t}\n}\n\nfunc (tree *BBTree) NodeRecycle(node *Node) {\n\t*node = Node{}\n\ttree.nodeBuffer = append(tree.nodeBuffer, node)\n}\n\nfunc (tree *BBTree) NodeFromPool() *Node {\n\tvar node *Node\n\tif len(tree.nodeBuffer) > 0 {\n\t\tnode, tree.nodeBuffer = tree.nodeBuffer[len(tree.nodeBuffer)-1], tree.nodeBuffer[:len(tree.nodeBuffer)-1]\n\t} else {\n\t\tbuff := make([]*Node, 100)\n\t\t\/\/log.Println(\"New node buff\")\n\t\tfor i, _ := range buff {\n\t\t\tbuff[i] = &Node{}\n\t\t}\n\t\ttree.nodeBuffer = append(tree.nodeBuffer, buff...)\n\t\tnode, tree.nodeBuffer = tree.nodeBuffer[len(tree.nodeBuffer)-1], tree.nodeBuffer[:len(tree.nodeBuffer)-1]\n\t}\n\treturn node\n}\n\nfunc (tree *BBTree) PairRecycle(pair *Pair) {\n\t*pair = Pair{}\n\ttree.pairBuffer = append(tree.pairBuffer, pair)\n}\n\nfunc (tree *BBTree) PairFromPool() *Pair {\n\tvar pair *Pair\n\tif len(tree.pairBuffer) > 0 {\n\t\tpair, tree.pairBuffer = tree.pairBuffer[len(tree.pairBuffer)-1], tree.pairBuffer[:len(tree.pairBuffer)-1]\n\t} else {\n\t\tbuff := make([]*Pair, 100)\n\t\t\/\/log.Println(\"New pair buff\")\n\t\tfor i, _ := range buff {\n\t\t\tbuff[i] = &Pair{}\n\t\t}\n\t\ttree.pairBuffer = append(tree.pairBuffer, buff...)\n\t\tpair, tree.pairBuffer = tree.pairBuffer[len(tree.pairBuffer)-1], tree.pairBuffer[:len(tree.pairBuffer)-1]\n\t}\n\treturn pair\n}\n\nfunc (tree *BBTree) NodeReplaceChild(parent, child, value *Node) {\n\tif parent.IsLeaf() {\n\t\tpanic(\"Internal Error: Cannot replace child of a leaf.\")\n\t}\n\n\tif (child == parent.A || child == parent.B) == false {\n\t\tpanic(\"Internal Error: Node is not a child of parent.\")\n\t}\n\n\tif parent.A == child {\n\t\ttree.NodeRecycle(parent.A)\n\t\tparent.NodeSetA(value)\n\t} else {\n\t\ttree.NodeRecycle(parent.B)\n\t\tparent.NodeSetB(value)\n\t}\n\n\tfor node := parent; node != nil; node = node.parent {\n\t\tnode.bb = Combine(node.A.bb, node.B.bb)\n\t}\n}\n\nfunc (tree *BBTree) Remove(obj Indexable) {\n\tleaf := tree.leaves[obj.Hash()]\n\tdelete(tree.leaves, obj.Hash())\n\n\tif leaf == nil {\n\t\treturn\n\t}\n\n\ttree.root = tree.SubtreeRemove(tree.root, leaf)\n\ttree.PairsClear(leaf)\n\ttree.NodeRecycle(leaf)\n}\n\nfunc (tree *BBTree) SubtreeRemove(subtree, leaf *Node) *Node {\n\tif leaf == subtree {\n\t\treturn nil\n\t}\n\n\tparent := leaf.parent\n\tif parent == subtree {\n\t\tother := subtree.NodeOther(leaf)\n\t\tother.parent = subtree.parent\n\t\ttree.NodeRecycle(subtree)\n\t\treturn other\n\t}\n\n\ttree.NodeReplaceChild(parent.parent, parent, parent.NodeOther(leaf))\n\treturn subtree\n}\n\nfunc ThreadUnlink(thread Thread) {\n\tnext := thread.next\n\tprev := thread.prev\n\n\tif next != nil {\n\t\tif next.a.leaf == thread.leaf {\n\t\t\tnext.a.prev = prev\n\t\t} else {\n\t\t\tnext.b.prev = prev\n\t\t}\n\t}\n\n\tif prev != nil {\n\t\tif prev.a.leaf == thread.leaf {\n\t\t\tprev.a.next = next\n\t\t} else {\n\t\t\tprev.b.next = next\n\t\t}\n\t} else {\n\t\tthread.leaf.pairs = next\n\t}\n}\n\nfunc (tree *BBTree) PairsClear(leaf *Node) {\n\tpair := leaf.pairs\n\tleaf.pairs = nil\n\n\tfor pair != nil {\n\t\tif pair.a.leaf == leaf {\n\t\t\tnext := pair.a.next\n\t\t\tThreadUnlink(pair.b)\n\t\t\ttree.PairRecycle(pair)\n\t\t\tpair = next\n\t\t} else {\n\t\t\tnext := pair.b.next\n\t\t\tThreadUnlink(pair.a)\n\t\t\ttree.PairRecycle(pair)\n\t\t\tpair = next\n\t\t}\n\t}\n}\n\nfunc LeafUpdate(leaf *Node, tree *BBTree) bool {\n\troot := tree.root\n\tbb := leaf.obj.AABB()\n\n\tif !leaf.bb.Contains(bb) {\n\t\tleaf.bb = tree.GetBB(leaf.obj)\n\n\t\troot = tree.SubtreeRemove(root, leaf)\n\t\ttree.root = tree.SubtreeInsert(root, leaf)\n\n\t\ttree.PairsClear(leaf)\n\t\tleaf.stamp = tree.GetMasterTree().Stamp()\n\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (tree *BBTree) Each(fnc HashSetIterator, data Data) {\n\ttree.leaves.Each(fnc, data)\n}\n\nfunc (tree *BBTree) Each2(fnc HashSetIterator, data Data) {\n\ttree.leaves.Each(fnc, data)\n}\n\nfunc (tree *BBTree) ReindexQuery(fnc SpatialIndexQueryFunc, data Data) {\n\tif tree.root == nil {\n\t\treturn\n\t}\n\n\t\/\/ LeafUpdate() may modify tree->root. Don't cache it.\n\tfor _, node := range tree.leaves {\n\t\tLeafUpdate(node, tree)\n\t}\n\n\tstaticIndex := GetTree(tree.SpatialIndex.staticIndex)\n\tvar staticRoot *Node = nil\n\tif staticIndex != nil {\n\t\tstaticRoot = staticIndex.root\n\t}\n\n\tcontext := MarkContext{tree, staticRoot, fnc, data}\n\tcontext.MarkSubtree(tree.root)\n\tif staticIndex != nil && staticRoot == nil {\n\t\tSpatialIndexCollideStatic(tree.SpatialIndex, staticIndex.SpatialIndex, fnc, data)\n\t}\n\n\ttree.IncrementStamp()\n}\n\nfunc (tree *BBTree) Query(obj Indexable, aabb AABB, fnc SpatialIndexQueryFunc, data Data) {\n\tif tree.root != nil {\n\t\tSubtreeQuery(tree.root, obj, aabb, fnc, data)\n\t}\n}\n\nfunc SubtreeQuery(subtree *Node, obj Indexable, bb AABB, fnc SpatialIndexQueryFunc, data Data) {\n\tif TestOverlap(subtree.bb, bb) {\n\t\tif subtree.IsLeaf() {\n\t\t\tfnc(obj, subtree.obj, data)\n\t\t} else {\n\t\t\tSubtreeQuery(subtree.A, obj, bb, fnc, data)\n\t\t\tSubtreeQuery(subtree.B, obj, bb, fnc, data)\n\t\t}\n\t}\n}\n\ntype MarkContext struct {\n\ttree *BBTree\n\tstaticRoot *Node\n\tfnc SpatialIndexQueryFunc\n\tdata Data\n}\n\nfunc (context *MarkContext) MarkSubtree(subtree *Node) {\n\tif subtree.IsLeaf() {\n\t\tcontext.MarkLeaf(subtree)\n\t} else {\n\t\tcontext.MarkSubtree(subtree.A)\n\t\tcontext.MarkSubtree(subtree.B)\n\t}\n}\n\nfunc (context *MarkContext) MarkLeafQuery(subtree, leaf *Node, left bool) {\n\n\tif TestOverlap(leaf.bb, subtree.bb) {\n\t\tif subtree.IsLeaf() {\n\t\t\tif left {\n\t\t\t\tcontext.tree.PairInsert(leaf, subtree)\n\t\t\t} else {\n\t\t\t\tif subtree.stamp < leaf.stamp {\n\t\t\t\t\tcontext.tree.PairInsert(subtree, leaf)\n\t\t\t\t}\n\t\t\t\tcontext.fnc(leaf.obj, subtree.obj, context.data)\n\t\t\t}\n\t\t} else {\n\t\t\tcontext.MarkLeafQuery(subtree.A, leaf, left)\n\t\t\tcontext.MarkLeafQuery(subtree.B, leaf, left)\n\t\t}\n\t}\n}\n\nfunc (context *MarkContext) MarkLeaf(leaf *Node) {\n\ttree := context.tree\n\tif leaf.stamp == tree.GetMasterTree().Stamp() {\n\t\tstaticRoot := context.staticRoot\n\t\tif staticRoot != nil {\n\t\t\tcontext.MarkLeafQuery(staticRoot, leaf, false)\n\t\t}\n\n\t\tfor node := leaf; node.parent != nil; node = node.parent {\n\t\t\tif node == node.parent.A {\n\t\t\t\tcontext.MarkLeafQuery(node.parent.B, leaf, true)\n\t\t\t} else {\n\t\t\t\tcontext.MarkLeafQuery(node.parent.A, leaf, false)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tpair := leaf.pairs\n\t\ti := 0\n\t\tfor pair != nil {\n\t\t\tif leaf == pair.b.leaf {\n\t\t\t\tcontext.fnc(pair.a.leaf.obj, leaf.obj, context.data)\n\t\t\t\tpair = pair.b.next\n\t\t\t} else {\n\t\t\t\tpair = pair.a.next\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\t\/\/fmt.Println(i)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/* Copyright (C) 2019 Monomax Software Pty Ltd\n *\n * This file is part of Dnote CLI.\n *\n * Dnote CLI is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * Dnote CLI is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with Dnote CLI. If not, see <https:\/\/www.gnu.org\/licenses\/>.\n *\/\n\npackage edit\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"github.com\/dnote\/dnote\/cli\/core\"\n\t\"github.com\/dnote\/dnote\/cli\/infra\"\n\t\"github.com\/dnote\/dnote\/cli\/log\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar newContent string\n\nvar example = `\n * Edit the note by its id\n dnote edit 3\n\n\t* Skip the prompt by providing new content directly\n\tdnote edit 3 -c \"new content\"`\n\n\/\/ NewCmd returns a new edit command\nfunc NewCmd(ctx infra.DnoteCtx) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"edit\",\n\t\tShort: \"Edit a note or a book\",\n\t\tAliases: []string{\"e\"},\n\t\tExample: example,\n\t\tPreRunE: preRun,\n\t\tRunE: newRun(ctx),\n\t}\n\n\tf := cmd.Flags()\n\tf.StringVarP(&newContent, \"content\", \"c\", \"\", \"The new content for the note\")\n\n\treturn cmd\n}\n\nfunc preRun(cmd *cobra.Command, args []string) error {\n\tif len(args) != 1 && len(args) != 2 {\n\t\treturn errors.New(\"Incorrect number of argument\")\n\t}\n\n\treturn nil\n}\n\nfunc newRun(ctx infra.DnoteCtx) core.RunEFunc {\n\treturn func(cmd *cobra.Command, args []string) error {\n\t\tdb := ctx.DB\n\n\t\tvar noteRowID string\n\n\t\tif len(args) == 2 {\n\t\t\tlog.Plain(log.ColorYellow.Sprintf(\"DEPRECATED: you no longer need to pass book name to the view command. e.g. `dnote view 123`.\\n\\n\"))\n\n\t\t\tnoteRowID = args[1]\n\t\t} else {\n\t\t\tnoteRowID = args[0]\n\t\t}\n\n\t\tvar noteUUID, oldContent string\n\t\terr := db.QueryRow(\"SELECT uuid, body FROM notes WHERE rowid = ? AND deleted = false\", noteRowID).Scan(¬eUUID, &oldContent)\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn errors.Errorf(\"note %s not found\", noteRowID)\n\t\t} else if err != nil {\n\t\t\treturn errors.Wrap(err, \"querying the book\")\n\t\t}\n\n\t\tif newContent == \"\" {\n\t\t\tfpath := core.GetDnoteTmpContentPath(ctx)\n\n\t\t\te := ioutil.WriteFile(fpath, []byte(oldContent), 0644)\n\t\t\tif e != nil {\n\t\t\t\treturn errors.Wrap(e, \"preparing tmp content file\")\n\t\t\t}\n\n\t\t\te = core.GetEditorInput(ctx, fpath, &newContent)\n\t\t\tif e != nil {\n\t\t\t\treturn errors.Wrap(err, \"getting editor input\")\n\t\t\t}\n\t\t}\n\n\t\tif oldContent == newContent {\n\t\t\treturn errors.New(\"Nothing changed\")\n\t\t}\n\n\t\tts := time.Now().UnixNano()\n\t\tnewContent = core.SanitizeContent(newContent)\n\n\t\ttx, err := db.Begin()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"beginning a transaction\")\n\t\t}\n\n\t\t_, err = tx.Exec(`UPDATE notes\n\t\t\tSET body = ?, edited_on = ?, dirty = ?\n\t\t\tWHERE rowid = ?`, newContent, ts, true, noteRowID)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn errors.Wrap(err, \"updating the note\")\n\t\t}\n\n\t\ttx.Commit()\n\n\t\tlog.Success(\"edited the note\\n\")\n\t\tfmt.Printf(\"\\n------------------------content------------------------\\n\")\n\t\tfmt.Printf(\"%s\", newContent)\n\t\tfmt.Printf(\"\\n-------------------------------------------------------\\n\")\n\n\t\treturn nil\n\t}\n}\n<commit_msg>Remove mention of unsupported feature (#205)<commit_after>\/* Copyright (C) 2019 Monomax Software Pty Ltd\n *\n * This file is part of Dnote CLI.\n *\n * Dnote CLI is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * Dnote CLI is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with Dnote CLI. If not, see <https:\/\/www.gnu.org\/licenses\/>.\n *\/\n\npackage edit\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"github.com\/dnote\/dnote\/cli\/core\"\n\t\"github.com\/dnote\/dnote\/cli\/infra\"\n\t\"github.com\/dnote\/dnote\/cli\/log\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar newContent string\n\nvar example = `\n * Edit the note by its id\n dnote edit 3\n\n\t* Skip the prompt by providing new content directly\n\tdnote edit 3 -c \"new content\"`\n\n\/\/ NewCmd returns a new edit command\nfunc NewCmd(ctx infra.DnoteCtx) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"edit\",\n\t\tShort: \"Edit a note\",\n\t\tAliases: []string{\"e\"},\n\t\tExample: example,\n\t\tPreRunE: preRun,\n\t\tRunE: newRun(ctx),\n\t}\n\n\tf := cmd.Flags()\n\tf.StringVarP(&newContent, \"content\", \"c\", \"\", \"The new content for the note\")\n\n\treturn cmd\n}\n\nfunc preRun(cmd *cobra.Command, args []string) error {\n\tif len(args) != 1 && len(args) != 2 {\n\t\treturn errors.New(\"Incorrect number of argument\")\n\t}\n\n\treturn nil\n}\n\nfunc newRun(ctx infra.DnoteCtx) core.RunEFunc {\n\treturn func(cmd *cobra.Command, args []string) error {\n\t\tdb := ctx.DB\n\n\t\tvar noteRowID string\n\n\t\tif len(args) == 2 {\n\t\t\tlog.Plain(log.ColorYellow.Sprintf(\"DEPRECATED: you no longer need to pass book name to the view command. e.g. `dnote view 123`.\\n\\n\"))\n\n\t\t\tnoteRowID = args[1]\n\t\t} else {\n\t\t\tnoteRowID = args[0]\n\t\t}\n\n\t\tvar noteUUID, oldContent string\n\t\terr := db.QueryRow(\"SELECT uuid, body FROM notes WHERE rowid = ? AND deleted = false\", noteRowID).Scan(¬eUUID, &oldContent)\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn errors.Errorf(\"note %s not found\", noteRowID)\n\t\t} else if err != nil {\n\t\t\treturn errors.Wrap(err, \"querying the book\")\n\t\t}\n\n\t\tif newContent == \"\" {\n\t\t\tfpath := core.GetDnoteTmpContentPath(ctx)\n\n\t\t\te := ioutil.WriteFile(fpath, []byte(oldContent), 0644)\n\t\t\tif e != nil {\n\t\t\t\treturn errors.Wrap(e, \"preparing tmp content file\")\n\t\t\t}\n\n\t\t\te = core.GetEditorInput(ctx, fpath, &newContent)\n\t\t\tif e != nil {\n\t\t\t\treturn errors.Wrap(err, \"getting editor input\")\n\t\t\t}\n\t\t}\n\n\t\tif oldContent == newContent {\n\t\t\treturn errors.New(\"Nothing changed\")\n\t\t}\n\n\t\tts := time.Now().UnixNano()\n\t\tnewContent = core.SanitizeContent(newContent)\n\n\t\ttx, err := db.Begin()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"beginning a transaction\")\n\t\t}\n\n\t\t_, err = tx.Exec(`UPDATE notes\n\t\t\tSET body = ?, edited_on = ?, dirty = ?\n\t\t\tWHERE rowid = ?`, newContent, ts, true, noteRowID)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn errors.Wrap(err, \"updating the note\")\n\t\t}\n\n\t\ttx.Commit()\n\n\t\tlog.Success(\"edited the note\\n\")\n\t\tfmt.Printf(\"\\n------------------------content------------------------\\n\")\n\t\tfmt.Printf(\"%s\", newContent)\n\t\tfmt.Printf(\"\\n-------------------------------------------------------\\n\")\n\n\t\treturn nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package driver\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/go-version\"\n\t\"github.com\/hashicorp\/nomad\/client\/allocdir\"\n\t\"github.com\/hashicorp\/nomad\/client\/config\"\n\tcstructs \"github.com\/hashicorp\/nomad\/client\/driver\/structs\"\n\t\"github.com\/hashicorp\/nomad\/client\/fingerprint\"\n\t\"github.com\/hashicorp\/nomad\/helper\/args\"\n\t\"github.com\/hashicorp\/nomad\/nomad\/structs\"\n\t\"github.com\/mitchellh\/mapstructure\"\n)\n\nvar (\n\treRktVersion = regexp.MustCompile(`rkt version (\\d[.\\d]+)`)\n\treAppcVersion = regexp.MustCompile(`appc version (\\d[.\\d]+)`)\n)\n\nconst (\n\t\/\/ minRktVersion is the earliest supported version of rkt. rkt added support\n\t\/\/ for CPU and memory isolators in 0.14.0. We cannot support an earlier\n\t\/\/ version to maintain an uniform interface across all drivers\n\tminRktVersion = \"0.14.0\"\n\n\t\/\/ bytesToMB is the conversion from bytes to megabytes.\n\tbytesToMB = 1024 * 1024\n)\n\n\/\/ RktDriver is a driver for running images via Rkt\n\/\/ We attempt to chose sane defaults for now, with more configuration available\n\/\/ planned in the future\ntype RktDriver struct {\n\tDriverContext\n\tfingerprint.StaticFingerprinter\n}\n\ntype RktDriverConfig struct {\n\tImageName string `mapstructure:\"image\"`\n\tArgs []string `mapstructure:\"args\"`\n}\n\n\/\/ rktHandle is returned from Start\/Open as a handle to the PID\ntype rktHandle struct {\n\tproc *os.Process\n\timage string\n\tlogger *log.Logger\n\twaitCh chan *cstructs.WaitResult\n\tdoneCh chan struct{}\n}\n\n\/\/ rktPID is a struct to map the pid running the process to the vm image on\n\/\/ disk\ntype rktPID struct {\n\tPid int\n\tImage string\n}\n\n\/\/ NewRktDriver is used to create a new exec driver\nfunc NewRktDriver(ctx *DriverContext) Driver {\n\treturn &RktDriver{DriverContext: *ctx}\n}\n\nfunc (d *RktDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {\n\t\/\/ Only enable if we are root when running on non-windows systems.\n\tif runtime.GOOS != \"windows\" && syscall.Geteuid() != 0 {\n\t\td.logger.Printf(\"[DEBUG] driver.rkt: must run as root user, disabling\")\n\t\treturn false, nil\n\t}\n\n\toutBytes, err := exec.Command(\"rkt\", \"version\").Output()\n\tif err != nil {\n\t\treturn false, nil\n\t}\n\tout := strings.TrimSpace(string(outBytes))\n\n\trktMatches := reRktVersion.FindStringSubmatch(out)\n\tappcMatches := reAppcVersion.FindStringSubmatch(out)\n\tif len(rktMatches) != 2 || len(appcMatches) != 2 {\n\t\treturn false, fmt.Errorf(\"Unable to parse Rkt version string: %#v\", rktMatches)\n\t}\n\n\tnode.Attributes[\"driver.rkt\"] = \"1\"\n\tnode.Attributes[\"driver.rkt.version\"] = rktMatches[1]\n\tnode.Attributes[\"driver.rkt.appc.version\"] = appcMatches[1]\n\n\tminVersion, _ := version.NewVersion(minRktVersion)\n\tcurrentVersion, _ := version.NewVersion(node.Attributes[\"driver.rkt.version\"])\n\tif currentVersion.LessThan(minVersion) {\n\t\t\/\/ Do not allow rkt < 0.14.0\n\t\td.logger.Printf(\"[WARN] driver.rkt: please upgrade rkt to a version >= %s\", minVersion)\n\t\tnode.Attributes[\"driver.rkt\"] = \"0\"\n\t}\n\treturn true, nil\n}\n\n\/\/ Run an existing Rkt image.\nfunc (d *RktDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {\n\tvar driverConfig RktDriverConfig\n\tif err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Validate that the config is valid.\n\timg := driverConfig.ImageName\n\tif img == \"\" {\n\t\treturn nil, fmt.Errorf(\"Missing ACI image for rkt\")\n\t}\n\n\t\/\/ Get the tasks local directory.\n\ttaskName := d.DriverContext.taskName\n\ttaskDir, ok := ctx.AllocDir.TaskDirs[taskName]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Could not find task directory for task: %v\", d.DriverContext.taskName)\n\t}\n\ttaskLocal := filepath.Join(taskDir, allocdir.TaskLocal)\n\n\t\/\/ Add the given trust prefix\n\ttrustPrefix, trustCmd := task.Config[\"trust_prefix\"]\n\tif trustCmd {\n\t\tvar outBuf, errBuf bytes.Buffer\n\t\tcmd := exec.Command(\"rkt\", \"trust\", fmt.Sprintf(\"--prefix=%s\", trustPrefix))\n\t\tcmd.Stdout = &outBuf\n\t\tcmd.Stderr = &errBuf\n\t\tif err := cmd.Run(); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error running rkt trust: %s\\n\\nOutput: %s\\n\\nError: %s\",\n\t\t\t\terr, outBuf.String(), errBuf.String())\n\t\t}\n\t\td.logger.Printf(\"[DEBUG] driver.rkt: added trust prefix: %q\", trustPrefix)\n\t}\n\n\t\/\/ Build the command.\n\tvar cmdArgs []string\n\n\t\/\/ Inject the environment variables.\n\tenvVars := TaskEnvironmentVariables(ctx, task)\n\n\t\/\/ Clear the task directories as they are not currently supported.\n\tenvVars.ClearTaskLocalDir()\n\tenvVars.ClearAllocDir()\n\n\tfor k, v := range envVars.Map() {\n\t\tcmdArgs = append(cmdArgs, fmt.Sprintf(\"--set-env=%v=%v\", k, v))\n\t}\n\n\t\/\/ Disble signature verification if the trust command was not run.\n\tif !trustCmd {\n\t\tcmdArgs = append(cmdArgs, \"--insecure-skip-verify\")\n\t}\n\n\t\/\/ Append the run command.\n\tcmdArgs = append(cmdArgs, \"run\", \"--mds-register=false\", img)\n\n\t\/\/ Check if the user has overriden the exec command.\n\tif execCmd, ok := task.Config[\"command\"]; ok {\n\t\tcmdArgs = append(cmdArgs, fmt.Sprintf(\"--exec=%v\", execCmd))\n\t}\n\n\tif task.Resources.MemoryMB == 0 {\n\t\treturn nil, fmt.Errorf(\"Memory limit cannot be zero\")\n\t}\n\tif task.Resources.CPU == 0 {\n\t\treturn nil, fmt.Errorf(\"CPU limit cannot be zero\")\n\t}\n\n\t\/\/ Add memory isolator\n\tcmdArgs = append(cmdArgs, fmt.Sprintf(\"--memory=%vM\", int64(task.Resources.MemoryMB)*bytesToMB))\n\n\t\/\/ Add CPU isolator\n\tcmdArgs = append(cmdArgs, fmt.Sprintf(\"--cpu=%vm\", int64(task.Resources.CPU)))\n\n\t\/\/ Add user passed arguments.\n\tif len(driverConfig.Args) != 0 {\n\t\tparsed := args.ParseAndReplace(driverConfig.Args, envVars.Map())\n\n\t\t\/\/ Need to start arguments with \"--\"\n\t\tif len(parsed) > 0 {\n\t\t\tcmdArgs = append(cmdArgs, \"--\")\n\t\t}\n\n\t\tfor _, arg := range parsed {\n\t\t\tcmdArgs = append(cmdArgs, fmt.Sprintf(\"%v\", arg))\n\t\t}\n\t}\n\n\t\/\/ Create files to capture stdin and out.\n\tstdoutFilename := filepath.Join(taskLocal, fmt.Sprintf(\"%s.stdout\", taskName))\n\tstderrFilename := filepath.Join(taskLocal, fmt.Sprintf(\"%s.stderr\", taskName))\n\n\tstdo, err := os.OpenFile(stdoutFilename, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0666)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error opening file to redirect stdout: %v\", err)\n\t}\n\n\tstde, err := os.OpenFile(stderrFilename, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0666)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error opening file to redirect stderr: %v\", err)\n\t}\n\n\tcmd := exec.Command(\"rkt\", cmdArgs...)\n\tcmd.Stdout = stdo\n\tcmd.Stderr = stde\n\n\tif err := cmd.Start(); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error running rkt: %v\", err)\n\t}\n\n\td.logger.Printf(\"[DEBUG] driver.rkt: started ACI %q with: %v\", img, cmd.Args)\n\th := &rktHandle{\n\t\tproc: cmd.Process,\n\t\timage: img,\n\t\tlogger: d.logger,\n\t\tdoneCh: make(chan struct{}),\n\t\twaitCh: make(chan *cstructs.WaitResult, 1),\n\t}\n\tgo h.run()\n\treturn h, nil\n}\n\nfunc (d *RktDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {\n\t\/\/ Parse the handle\n\tpidBytes := []byte(strings.TrimPrefix(handleID, \"Rkt:\"))\n\tqpid := &rktPID{}\n\tif err := json.Unmarshal(pidBytes, qpid); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse Rkt handle '%s': %v\", handleID, err)\n\t}\n\n\t\/\/ Find the process\n\tproc, err := os.FindProcess(qpid.Pid)\n\tif proc == nil || err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to find Rkt PID %d: %v\", qpid.Pid, err)\n\t}\n\n\t\/\/ Return a driver handle\n\th := &rktHandle{\n\t\tproc: proc,\n\t\timage: qpid.Image,\n\t\tlogger: d.logger,\n\t\tdoneCh: make(chan struct{}),\n\t\twaitCh: make(chan *cstructs.WaitResult, 1),\n\t}\n\n\tgo h.run()\n\treturn h, nil\n}\n\nfunc (h *rktHandle) ID() string {\n\t\/\/ Return a handle to the PID\n\tpid := &rktPID{\n\t\tPid: h.proc.Pid,\n\t\tImage: h.image,\n\t}\n\tdata, err := json.Marshal(pid)\n\tif err != nil {\n\t\th.logger.Printf(\"[ERR] driver.rkt: failed to marshal rkt PID to JSON: %s\", err)\n\t}\n\treturn fmt.Sprintf(\"Rkt:%s\", string(data))\n}\n\nfunc (h *rktHandle) WaitCh() chan *cstructs.WaitResult {\n\treturn h.waitCh\n}\n\nfunc (h *rktHandle) Update(task *structs.Task) error {\n\t\/\/ Update is not possible\n\treturn nil\n}\n\n\/\/ Kill is used to terminate the task. We send an Interrupt\n\/\/ and then provide a 5 second grace period before doing a Kill.\nfunc (h *rktHandle) Kill() error {\n\th.proc.Signal(os.Interrupt)\n\tselect {\n\tcase <-h.doneCh:\n\t\treturn nil\n\tcase <-time.After(5 * time.Second):\n\t\treturn h.proc.Kill()\n\t}\n}\n\nfunc (h *rktHandle) run() {\n\tps, err := h.proc.Wait()\n\tclose(h.doneCh)\n\tcode := 0\n\tif !ps.Success() {\n\t\t\/\/ TODO: Better exit code parsing.\n\t\tcode = 1\n\t}\n\th.waitCh <- cstructs.NewWaitResult(code, 0, err)\n\tclose(h.waitCh)\n}\n<commit_msg>Consolidate if else conditions<commit_after>package driver\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/go-version\"\n\t\"github.com\/hashicorp\/nomad\/client\/allocdir\"\n\t\"github.com\/hashicorp\/nomad\/client\/config\"\n\tcstructs \"github.com\/hashicorp\/nomad\/client\/driver\/structs\"\n\t\"github.com\/hashicorp\/nomad\/client\/fingerprint\"\n\t\"github.com\/hashicorp\/nomad\/helper\/args\"\n\t\"github.com\/hashicorp\/nomad\/nomad\/structs\"\n\t\"github.com\/mitchellh\/mapstructure\"\n)\n\nvar (\n\treRktVersion = regexp.MustCompile(`rkt version (\\d[.\\d]+)`)\n\treAppcVersion = regexp.MustCompile(`appc version (\\d[.\\d]+)`)\n)\n\nconst (\n\t\/\/ minRktVersion is the earliest supported version of rkt. rkt added support\n\t\/\/ for CPU and memory isolators in 0.14.0. We cannot support an earlier\n\t\/\/ version to maintain an uniform interface across all drivers\n\tminRktVersion = \"0.14.0\"\n\n\t\/\/ bytesToMB is the conversion from bytes to megabytes.\n\tbytesToMB = 1024 * 1024\n)\n\n\/\/ RktDriver is a driver for running images via Rkt\n\/\/ We attempt to chose sane defaults for now, with more configuration available\n\/\/ planned in the future\ntype RktDriver struct {\n\tDriverContext\n\tfingerprint.StaticFingerprinter\n}\n\ntype RktDriverConfig struct {\n\tImageName string `mapstructure:\"image\"`\n\tArgs []string `mapstructure:\"args\"`\n}\n\n\/\/ rktHandle is returned from Start\/Open as a handle to the PID\ntype rktHandle struct {\n\tproc *os.Process\n\timage string\n\tlogger *log.Logger\n\twaitCh chan *cstructs.WaitResult\n\tdoneCh chan struct{}\n}\n\n\/\/ rktPID is a struct to map the pid running the process to the vm image on\n\/\/ disk\ntype rktPID struct {\n\tPid int\n\tImage string\n}\n\n\/\/ NewRktDriver is used to create a new exec driver\nfunc NewRktDriver(ctx *DriverContext) Driver {\n\treturn &RktDriver{DriverContext: *ctx}\n}\n\nfunc (d *RktDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {\n\t\/\/ Only enable if we are root when running on non-windows systems.\n\tif runtime.GOOS != \"windows\" && syscall.Geteuid() != 0 {\n\t\td.logger.Printf(\"[DEBUG] driver.rkt: must run as root user, disabling\")\n\t\treturn false, nil\n\t}\n\n\toutBytes, err := exec.Command(\"rkt\", \"version\").Output()\n\tif err != nil {\n\t\treturn false, nil\n\t}\n\tout := strings.TrimSpace(string(outBytes))\n\n\trktMatches := reRktVersion.FindStringSubmatch(out)\n\tappcMatches := reAppcVersion.FindStringSubmatch(out)\n\tif len(rktMatches) != 2 || len(appcMatches) != 2 {\n\t\treturn false, fmt.Errorf(\"Unable to parse Rkt version string: %#v\", rktMatches)\n\t}\n\n\tnode.Attributes[\"driver.rkt\"] = \"1\"\n\tnode.Attributes[\"driver.rkt.version\"] = rktMatches[1]\n\tnode.Attributes[\"driver.rkt.appc.version\"] = appcMatches[1]\n\n\tminVersion, _ := version.NewVersion(minRktVersion)\n\tcurrentVersion, _ := version.NewVersion(node.Attributes[\"driver.rkt.version\"])\n\tif currentVersion.LessThan(minVersion) {\n\t\t\/\/ Do not allow rkt < 0.14.0\n\t\td.logger.Printf(\"[WARN] driver.rkt: please upgrade rkt to a version >= %s\", minVersion)\n\t\tnode.Attributes[\"driver.rkt\"] = \"0\"\n\t}\n\treturn true, nil\n}\n\n\/\/ Run an existing Rkt image.\nfunc (d *RktDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {\n\tvar driverConfig RktDriverConfig\n\tif err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Validate that the config is valid.\n\timg := driverConfig.ImageName\n\tif img == \"\" {\n\t\treturn nil, fmt.Errorf(\"Missing ACI image for rkt\")\n\t}\n\n\t\/\/ Get the tasks local directory.\n\ttaskName := d.DriverContext.taskName\n\ttaskDir, ok := ctx.AllocDir.TaskDirs[taskName]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Could not find task directory for task: %v\", d.DriverContext.taskName)\n\t}\n\ttaskLocal := filepath.Join(taskDir, allocdir.TaskLocal)\n\n\t\/\/ Build the command.\n\tvar cmdArgs []string\n\n\t\/\/ Add the given trust prefix\n\ttrustPrefix, trustCmd := task.Config[\"trust_prefix\"]\n\tif trustCmd {\n\t\tvar outBuf, errBuf bytes.Buffer\n\t\tcmd := exec.Command(\"rkt\", \"trust\", fmt.Sprintf(\"--prefix=%s\", trustPrefix))\n\t\tcmd.Stdout = &outBuf\n\t\tcmd.Stderr = &errBuf\n\t\tif err := cmd.Run(); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error running rkt trust: %s\\n\\nOutput: %s\\n\\nError: %s\",\n\t\t\t\terr, outBuf.String(), errBuf.String())\n\t\t}\n\t\td.logger.Printf(\"[DEBUG] driver.rkt: added trust prefix: %q\", trustPrefix)\n\t} else {\n\t\t\/\/ Disble signature verification if the trust command was not run.\n\t\tcmdArgs = append(cmdArgs, \"--insecure-skip-verify\")\n\t}\n\n\t\/\/ Inject the environment variables.\n\tenvVars := TaskEnvironmentVariables(ctx, task)\n\n\t\/\/ Clear the task directories as they are not currently supported.\n\tenvVars.ClearTaskLocalDir()\n\tenvVars.ClearAllocDir()\n\n\tfor k, v := range envVars.Map() {\n\t\tcmdArgs = append(cmdArgs, fmt.Sprintf(\"--set-env=%v=%v\", k, v))\n\t}\n\n\t\/\/ Append the run command.\n\tcmdArgs = append(cmdArgs, \"run\", \"--mds-register=false\", img)\n\n\t\/\/ Check if the user has overriden the exec command.\n\tif execCmd, ok := task.Config[\"command\"]; ok {\n\t\tcmdArgs = append(cmdArgs, fmt.Sprintf(\"--exec=%v\", execCmd))\n\t}\n\n\tif task.Resources.MemoryMB == 0 {\n\t\treturn nil, fmt.Errorf(\"Memory limit cannot be zero\")\n\t}\n\tif task.Resources.CPU == 0 {\n\t\treturn nil, fmt.Errorf(\"CPU limit cannot be zero\")\n\t}\n\n\t\/\/ Add memory isolator\n\tcmdArgs = append(cmdArgs, fmt.Sprintf(\"--memory=%vM\", int64(task.Resources.MemoryMB)*bytesToMB))\n\n\t\/\/ Add CPU isolator\n\tcmdArgs = append(cmdArgs, fmt.Sprintf(\"--cpu=%vm\", int64(task.Resources.CPU)))\n\n\t\/\/ Add user passed arguments.\n\tif len(driverConfig.Args) != 0 {\n\t\tparsed := args.ParseAndReplace(driverConfig.Args, envVars.Map())\n\n\t\t\/\/ Need to start arguments with \"--\"\n\t\tif len(parsed) > 0 {\n\t\t\tcmdArgs = append(cmdArgs, \"--\")\n\t\t}\n\n\t\tfor _, arg := range parsed {\n\t\t\tcmdArgs = append(cmdArgs, fmt.Sprintf(\"%v\", arg))\n\t\t}\n\t}\n\n\t\/\/ Create files to capture stdin and out.\n\tstdoutFilename := filepath.Join(taskLocal, fmt.Sprintf(\"%s.stdout\", taskName))\n\tstderrFilename := filepath.Join(taskLocal, fmt.Sprintf(\"%s.stderr\", taskName))\n\n\tstdo, err := os.OpenFile(stdoutFilename, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0666)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error opening file to redirect stdout: %v\", err)\n\t}\n\n\tstde, err := os.OpenFile(stderrFilename, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0666)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error opening file to redirect stderr: %v\", err)\n\t}\n\n\tcmd := exec.Command(\"rkt\", cmdArgs...)\n\tcmd.Stdout = stdo\n\tcmd.Stderr = stde\n\n\tif err := cmd.Start(); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error running rkt: %v\", err)\n\t}\n\n\td.logger.Printf(\"[DEBUG] driver.rkt: started ACI %q with: %v\", img, cmd.Args)\n\th := &rktHandle{\n\t\tproc: cmd.Process,\n\t\timage: img,\n\t\tlogger: d.logger,\n\t\tdoneCh: make(chan struct{}),\n\t\twaitCh: make(chan *cstructs.WaitResult, 1),\n\t}\n\tgo h.run()\n\treturn h, nil\n}\n\nfunc (d *RktDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {\n\t\/\/ Parse the handle\n\tpidBytes := []byte(strings.TrimPrefix(handleID, \"Rkt:\"))\n\tqpid := &rktPID{}\n\tif err := json.Unmarshal(pidBytes, qpid); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse Rkt handle '%s': %v\", handleID, err)\n\t}\n\n\t\/\/ Find the process\n\tproc, err := os.FindProcess(qpid.Pid)\n\tif proc == nil || err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to find Rkt PID %d: %v\", qpid.Pid, err)\n\t}\n\n\t\/\/ Return a driver handle\n\th := &rktHandle{\n\t\tproc: proc,\n\t\timage: qpid.Image,\n\t\tlogger: d.logger,\n\t\tdoneCh: make(chan struct{}),\n\t\twaitCh: make(chan *cstructs.WaitResult, 1),\n\t}\n\n\tgo h.run()\n\treturn h, nil\n}\n\nfunc (h *rktHandle) ID() string {\n\t\/\/ Return a handle to the PID\n\tpid := &rktPID{\n\t\tPid: h.proc.Pid,\n\t\tImage: h.image,\n\t}\n\tdata, err := json.Marshal(pid)\n\tif err != nil {\n\t\th.logger.Printf(\"[ERR] driver.rkt: failed to marshal rkt PID to JSON: %s\", err)\n\t}\n\treturn fmt.Sprintf(\"Rkt:%s\", string(data))\n}\n\nfunc (h *rktHandle) WaitCh() chan *cstructs.WaitResult {\n\treturn h.waitCh\n}\n\nfunc (h *rktHandle) Update(task *structs.Task) error {\n\t\/\/ Update is not possible\n\treturn nil\n}\n\n\/\/ Kill is used to terminate the task. We send an Interrupt\n\/\/ and then provide a 5 second grace period before doing a Kill.\nfunc (h *rktHandle) Kill() error {\n\th.proc.Signal(os.Interrupt)\n\tselect {\n\tcase <-h.doneCh:\n\t\treturn nil\n\tcase <-time.After(5 * time.Second):\n\t\treturn h.proc.Kill()\n\t}\n}\n\nfunc (h *rktHandle) run() {\n\tps, err := h.proc.Wait()\n\tclose(h.doneCh)\n\tcode := 0\n\tif !ps.Success() {\n\t\t\/\/ TODO: Better exit code parsing.\n\t\tcode = 1\n\t}\n\th.waitCh <- cstructs.NewWaitResult(code, 0, err)\n\tclose(h.waitCh)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage client\n\nimport (\n\t\"errors\"\n\t\"math\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/m3db\/m3db\/clock\"\n\t\"github.com\/m3db\/m3db\/generated\/thrift\/rpc\"\n\t\"github.com\/m3db\/m3db\/topology\"\n\n\t\"github.com\/uber\/tchannel-go\/thrift\"\n)\n\nvar (\n\terrQueueNotOpen = errors.New(\"host operation queue not open\")\n\terrQueueUnknownOperation = errors.New(\"host operation queue received unknown operation\")\n\terrQueueFetchNoResponse = errors.New(\"host operation queue did not receive response for given fetch\")\n)\n\ntype queue struct {\n\tsync.RWMutex\n\n\topts Options\n\tnowFn clock.NowFn\n\thost topology.Host\n\tconnPool connectionPool\n\twriteBatchRequestPool writeBatchRequestPool\n\tidDatapointArrayPool idDatapointArrayPool\n\tsize int\n\tops []op\n\topsSumSize int\n\topsLastRotatedAt time.Time\n\topsArrayPool opArrayPool\n\tdrainIn chan []op\n\tstate state\n}\n\nfunc newHostQueue(\n\thost topology.Host,\n\twriteBatchRequestPool writeBatchRequestPool,\n\tidDatapointArrayPool idDatapointArrayPool,\n\topts Options,\n) hostQueue {\n\tsize := opts.HostQueueOpsFlushSize()\n\n\topArrayPoolCapacity := int(math.Max(float64(size), float64(opts.WriteBatchSize())))\n\topArrayPool := newOpArrayPool(opts.HostQueueOpsArrayPoolSize(), opArrayPoolCapacity)\n\topArrayPool.Init()\n\n\treturn &queue{\n\t\topts: opts,\n\t\tnowFn: opts.ClockOptions().NowFn(),\n\t\thost: host,\n\t\tconnPool: newConnectionPool(host, opts),\n\t\twriteBatchRequestPool: writeBatchRequestPool,\n\t\tidDatapointArrayPool: idDatapointArrayPool,\n\t\tsize: size,\n\t\tops: opArrayPool.Get(),\n\t\topsArrayPool: opArrayPool,\n\t\t\/\/ NB(r): specifically use non-buffered queue for single flush at a time\n\t\tdrainIn: make(chan []op),\n\t}\n}\n\nfunc (q *queue) Open() {\n\tq.Lock()\n\tdefer q.Unlock()\n\n\tif q.state != stateNotOpen {\n\t\treturn\n\t}\n\n\tq.state = stateOpen\n\n\t\/\/ Open the connection pool\n\tq.connPool.Open()\n\n\t\/\/ Continually drain the queue until closed\n\tgo q.drain()\n\n\tflushInterval := q.opts.HostQueueOpsFlushInterval()\n\tif flushInterval > 0 {\n\t\t\/\/ Continually flush the queue at given interval if set\n\t\tgo q.flushEvery(flushInterval)\n\t}\n}\n\nfunc (q *queue) flushEvery(interval time.Duration) {\n\t\/\/ sleepForOverride used change the next sleep based on last ops rotation\n\tvar sleepForOverride time.Duration\n\tfor {\n\t\tsleepFor := interval\n\t\tif sleepForOverride > 0 {\n\t\t\tsleepFor = sleepForOverride\n\t\t\tsleepForOverride = 0\n\t\t}\n\n\t\ttime.Sleep(sleepFor)\n\n\t\tq.RLock()\n\t\tif q.state != stateOpen {\n\t\t\tq.RUnlock()\n\t\t\treturn\n\t\t}\n\t\tlastRotateAt := q.opsLastRotatedAt\n\t\tq.RUnlock()\n\n\t\tsinceLastRotate := q.nowFn().Sub(lastRotateAt)\n\t\tif sinceLastRotate < interval {\n\t\t\t\/\/ Rotated already recently, sleep until we would next consider flushing\n\t\t\tsleepForOverride = interval - sinceLastRotate\n\t\t\tcontinue\n\t\t}\n\n\t\tq.Lock()\n\t\tneedsDrain := q.rotateOpsWithLock()\n\t\tq.Unlock()\n\n\t\tif len(needsDrain) != 0 {\n\t\t\tq.RLock()\n\t\t\tif q.state != stateOpen {\n\t\t\t\tq.RUnlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/ Need to hold lock while writing to the drainIn\n\t\t\t\/\/ channel to ensure it has not been closed\n\t\t\tq.drainIn <- needsDrain\n\t\t\tq.RUnlock()\n\t\t}\n\t}\n}\n\nfunc (q *queue) rotateOpsWithLock() []op {\n\tif q.opsSumSize == 0 {\n\t\t\/\/ No need to rotate as queue is empty\n\t\treturn nil\n\t}\n\n\tneedsDrain := q.ops\n\n\t\/\/ Reset ops\n\tq.ops = q.opsArrayPool.Get()\n\tq.opsSumSize = 0\n\tq.opsLastRotatedAt = q.nowFn()\n\n\treturn needsDrain\n}\n\nfunc (q *queue) drain() {\n\tvar (\n\t\twgAll = &sync.WaitGroup{}\n\t\tcurrWriteOpsByNamespace = make(map[string][]op)\n\t\tcurrIDDatapointsByNamespace = make(map[string][]*rpc.IDDatapoint)\n\t\twriteBatchSize = q.opts.WriteBatchSize()\n\t)\n\n\tfor {\n\t\tops, ok := <-q.drainIn\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tvar (\n\t\t\tcurrWriteOps []op\n\t\t\tcurrIDDatapoints []*rpc.IDDatapoint\n\t\t\topsLen = len(ops)\n\t\t)\n\t\tfor i := 0; i < opsLen; i++ {\n\t\t\tswitch v := ops[i].(type) {\n\t\t\tcase *writeOp:\n\t\t\t\tnamespace := v.request.NameSpace\n\t\t\t\tcurrWriteOps = currWriteOpsByNamespace[namespace]\n\t\t\t\tcurrIDDatapoints = currIDDatapointsByNamespace[namespace]\n\t\t\t\tif currWriteOps == nil {\n\t\t\t\t\tcurrWriteOps = q.opsArrayPool.Get()\n\t\t\t\t\tcurrIDDatapoints = q.idDatapointArrayPool.Get()\n\t\t\t\t}\n\n\t\t\t\tcurrWriteOps = append(currWriteOps, ops[i])\n\t\t\t\tcurrIDDatapoints = append(currIDDatapoints, v.request.IdDatapoint)\n\t\t\t\tcurrWriteOpsByNamespace[namespace] = currWriteOps\n\t\t\t\tcurrIDDatapointsByNamespace[namespace] = currIDDatapoints\n\n\t\t\t\tif len(currWriteOps) == writeBatchSize {\n\t\t\t\t\t\/\/ Reached write batch limit, write async and reset\n\t\t\t\t\tq.asyncWrite(wgAll, namespace, currWriteOps, currIDDatapoints)\n\t\t\t\t\tcurrWriteOpsByNamespace[namespace] = nil\n\t\t\t\t\tcurrIDDatapointsByNamespace[namespace] = nil\n\t\t\t\t}\n\t\t\tcase *fetchBatchOp:\n\t\t\t\tq.asyncFetch(wgAll, v)\n\t\t\tcase *truncateOp:\n\t\t\t\tq.asyncTruncate(wgAll, v)\n\t\t\tdefault:\n\t\t\t\tcompletionFn := ops[i].CompletionFn()\n\t\t\t\tcompletionFn(nil, errQueueUnknownOperation)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ If any outstanding write ops, async write\n\t\tfor namespace, writeOps := range currWriteOpsByNamespace {\n\t\t\tif len(writeOps) > 0 {\n\t\t\t\tq.asyncWrite(wgAll, namespace, writeOps, currIDDatapointsByNamespace[namespace])\n\t\t\t\tcurrWriteOpsByNamespace[namespace] = nil\n\t\t\t\tcurrIDDatapointsByNamespace[namespace] = nil\n\t\t\t}\n\t\t}\n\n\t\tif ops != nil {\n\t\t\tq.opsArrayPool.Put(ops)\n\t\t}\n\t}\n\n\t\/\/ Close the connection pool after all requests done\n\twgAll.Wait()\n\tq.connPool.Close()\n}\n\nfunc (q *queue) asyncWrite(wg *sync.WaitGroup, namespace string, ops []op, elems []*rpc.IDDatapoint) {\n\twg.Add(1)\n\t\/\/ TODO(r): Use a worker pool to avoid creating new go routines for async writes\n\tgo func() {\n\t\treq := q.writeBatchRequestPool.Get()\n\t\treq.NameSpace = namespace\n\t\treq.Elements = elems\n\n\t\t\/\/ NB(r): Defer is slow in the hot path unfortunately\n\t\tcleanup := func() {\n\t\t\tq.writeBatchRequestPool.Put(req)\n\t\t\tq.idDatapointArrayPool.Put(elems)\n\t\t\tq.opsArrayPool.Put(ops)\n\t\t\twg.Done()\n\t\t}\n\n\t\tclient, err := q.connPool.NextClient()\n\t\tif err != nil {\n\t\t\t\/\/ No client available\n\t\t\tcallAllCompletionFns(ops, nil, err)\n\t\t\tcleanup()\n\t\t\treturn\n\t\t}\n\n\t\tctx, _ := thrift.NewContext(q.opts.WriteRequestTimeout())\n\t\terr = client.WriteBatch(ctx, req)\n\t\tif err == nil {\n\t\t\t\/\/ All succeeded\n\t\t\tcallAllCompletionFns(ops, nil, nil)\n\t\t\tcleanup()\n\t\t\treturn\n\t\t}\n\n\t\tif batchErrs, ok := err.(*rpc.WriteBatchErrors); ok {\n\t\t\t\/\/ Callback all writes with errors\n\t\t\thasErr := make(map[int]struct{})\n\t\t\tfor _, batchErr := range batchErrs.Errors {\n\t\t\t\top := ops[batchErr.Index]\n\t\t\t\top.CompletionFn()(nil, batchErr.Err)\n\t\t\t\thasErr[int(batchErr.Index)] = struct{}{}\n\t\t\t}\n\t\t\t\/\/ Callback all writes with no errors\n\t\t\tfor i := range ops {\n\t\t\t\tif _, ok := hasErr[i]; !ok {\n\t\t\t\t\t\/\/ No error\n\t\t\t\t\tops[i].CompletionFn()(nil, nil)\n\t\t\t\t}\n\t\t\t}\n\t\t\tcleanup()\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Entire batch failed\n\t\tcallAllCompletionFns(ops, nil, err)\n\t\tcleanup()\n\t}()\n}\n\nfunc (q *queue) asyncFetch(wg *sync.WaitGroup, op *fetchBatchOp) {\n\twg.Add(1)\n\t\/\/ TODO(r): Use a worker pool to avoid creating new go routines for async fetches\n\tgo func() {\n\t\t\/\/ NB(r): Defer is slow in the hot path unfortunately\n\t\tcleanup := wg.Done\n\n\t\tclient, err := q.connPool.NextClient()\n\t\tif err != nil {\n\t\t\t\/\/ No client available\n\t\t\top.completeAll(nil, err)\n\t\t\tcleanup()\n\t\t\treturn\n\t\t}\n\n\t\tctx, _ := thrift.NewContext(q.opts.FetchRequestTimeout())\n\t\tresult, err := client.FetchRawBatch(ctx, &op.request)\n\t\tif err != nil {\n\t\t\top.completeAll(nil, err)\n\t\t\tcleanup()\n\t\t\treturn\n\t\t}\n\n\t\tresultLen := len(result.Elements)\n\t\tfor i := 0; i < op.Size(); i++ {\n\t\t\tif !(i < resultLen) {\n\t\t\t\t\/\/ No results for this entry, in practice should never occur\n\t\t\t\top.complete(i, nil, errQueueFetchNoResponse)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif result.Elements[i].Err != nil {\n\t\t\t\top.complete(i, nil, result.Elements[i].Err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\top.complete(i, result.Elements[i].Segments, nil)\n\t\t}\n\t\tcleanup()\n\t}()\n}\n\nfunc (q *queue) asyncTruncate(wg *sync.WaitGroup, op *truncateOp) {\n\twg.Add(1)\n\n\tgo func() {\n\t\tclient, err := q.connPool.NextClient()\n\t\tif err != nil {\n\t\t\t\/\/ No client available\n\t\t\top.completionFn(nil, err)\n\t\t\twg.Done()\n\t\t\treturn\n\t\t}\n\n\t\tctx, _ := thrift.NewContext(q.opts.TruncateRequestTimeout())\n\t\tif res, err := client.Truncate(ctx, &op.request); err != nil {\n\t\t\top.completionFn(nil, err)\n\t\t} else {\n\t\t\top.completionFn(res, nil)\n\t\t}\n\n\t\twg.Done()\n\t}()\n}\n\nfunc (q *queue) Len() int {\n\tq.RLock()\n\tv := q.opsSumSize\n\tq.RUnlock()\n\treturn v\n}\n\nfunc (q *queue) Enqueue(o op) error {\n\tvar needsDrain []op\n\tq.Lock()\n\tif q.state != stateOpen {\n\t\tq.Unlock()\n\t\treturn errQueueNotOpen\n\t}\n\tq.ops = append(q.ops, o)\n\tq.opsSumSize += o.Size()\n\t\/\/ If queue is full flush\n\tif q.opsSumSize >= q.size {\n\t\tneedsDrain = q.rotateOpsWithLock()\n\t}\n\t\/\/ Need to hold lock while writing to the drainIn\n\t\/\/ channel to ensure it has not been closed\n\tif len(needsDrain) != 0 {\n\t\tq.drainIn <- needsDrain\n\t}\n\tq.Unlock()\n\treturn nil\n}\n\nfunc (q *queue) Host() topology.Host {\n\treturn q.host\n}\n\nfunc (q *queue) ConnectionCount() int {\n\treturn q.connPool.ConnectionCount()\n}\n\nfunc (q *queue) ConnectionPool() connectionPool {\n\treturn q.connPool\n}\n\nfunc (q *queue) Close() {\n\tq.Lock()\n\tif q.state != stateOpen {\n\t\tq.Unlock()\n\t\treturn\n\t}\n\n\tq.state = stateClosed\n\t\/\/ Closed drainIn channel in lock to ensure writers know\n\t\/\/ consistently if channel is open or not by checking state\n\tclose(q.drainIn)\n\tq.Unlock()\n}\n<commit_msg>Make host queue drain channel buffered to match size of op arrays pool (#89)<commit_after>\/\/ Copyright (c) 2016 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage client\n\nimport (\n\t\"errors\"\n\t\"math\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/m3db\/m3db\/clock\"\n\t\"github.com\/m3db\/m3db\/generated\/thrift\/rpc\"\n\t\"github.com\/m3db\/m3db\/topology\"\n\n\t\"github.com\/uber\/tchannel-go\/thrift\"\n)\n\nvar (\n\terrQueueNotOpen = errors.New(\"host operation queue not open\")\n\terrQueueUnknownOperation = errors.New(\"host operation queue received unknown operation\")\n\terrQueueFetchNoResponse = errors.New(\"host operation queue did not receive response for given fetch\")\n)\n\ntype queue struct {\n\tsync.RWMutex\n\n\topts Options\n\tnowFn clock.NowFn\n\thost topology.Host\n\tconnPool connectionPool\n\twriteBatchRequestPool writeBatchRequestPool\n\tidDatapointArrayPool idDatapointArrayPool\n\tsize int\n\tops []op\n\topsSumSize int\n\topsLastRotatedAt time.Time\n\topsArrayPool opArrayPool\n\tdrainIn chan []op\n\tstate state\n}\n\nfunc newHostQueue(\n\thost topology.Host,\n\twriteBatchRequestPool writeBatchRequestPool,\n\tidDatapointArrayPool idDatapointArrayPool,\n\topts Options,\n) hostQueue {\n\tsize := opts.HostQueueOpsFlushSize()\n\n\topsArraysLen := opts.HostQueueOpsArrayPoolSize()\n\topArrayPoolCapacity := int(math.Max(float64(size), float64(opts.WriteBatchSize())))\n\topArrayPool := newOpArrayPool(opsArraysLen, opArrayPoolCapacity)\n\topArrayPool.Init()\n\n\treturn &queue{\n\t\topts: opts,\n\t\tnowFn: opts.ClockOptions().NowFn(),\n\t\thost: host,\n\t\tconnPool: newConnectionPool(host, opts),\n\t\twriteBatchRequestPool: writeBatchRequestPool,\n\t\tidDatapointArrayPool: idDatapointArrayPool,\n\t\tsize: size,\n\t\tops: opArrayPool.Get(),\n\t\topsArrayPool: opArrayPool,\n\t\tdrainIn: make(chan []op, opsArraysLen),\n\t}\n}\n\nfunc (q *queue) Open() {\n\tq.Lock()\n\tdefer q.Unlock()\n\n\tif q.state != stateNotOpen {\n\t\treturn\n\t}\n\n\tq.state = stateOpen\n\n\t\/\/ Open the connection pool\n\tq.connPool.Open()\n\n\t\/\/ Continually drain the queue until closed\n\tgo q.drain()\n\n\tflushInterval := q.opts.HostQueueOpsFlushInterval()\n\tif flushInterval > 0 {\n\t\t\/\/ Continually flush the queue at given interval if set\n\t\tgo q.flushEvery(flushInterval)\n\t}\n}\n\nfunc (q *queue) flushEvery(interval time.Duration) {\n\t\/\/ sleepForOverride used change the next sleep based on last ops rotation\n\tvar sleepForOverride time.Duration\n\tfor {\n\t\tsleepFor := interval\n\t\tif sleepForOverride > 0 {\n\t\t\tsleepFor = sleepForOverride\n\t\t\tsleepForOverride = 0\n\t\t}\n\n\t\ttime.Sleep(sleepFor)\n\n\t\tq.RLock()\n\t\tif q.state != stateOpen {\n\t\t\tq.RUnlock()\n\t\t\treturn\n\t\t}\n\t\tlastRotateAt := q.opsLastRotatedAt\n\t\tq.RUnlock()\n\n\t\tsinceLastRotate := q.nowFn().Sub(lastRotateAt)\n\t\tif sinceLastRotate < interval {\n\t\t\t\/\/ Rotated already recently, sleep until we would next consider flushing\n\t\t\tsleepForOverride = interval - sinceLastRotate\n\t\t\tcontinue\n\t\t}\n\n\t\tq.Lock()\n\t\tneedsDrain := q.rotateOpsWithLock()\n\t\tq.Unlock()\n\n\t\tif len(needsDrain) != 0 {\n\t\t\tq.RLock()\n\t\t\tif q.state != stateOpen {\n\t\t\t\tq.RUnlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/ Need to hold lock while writing to the drainIn\n\t\t\t\/\/ channel to ensure it has not been closed\n\t\t\tq.drainIn <- needsDrain\n\t\t\tq.RUnlock()\n\t\t}\n\t}\n}\n\nfunc (q *queue) rotateOpsWithLock() []op {\n\tif q.opsSumSize == 0 {\n\t\t\/\/ No need to rotate as queue is empty\n\t\treturn nil\n\t}\n\n\tneedsDrain := q.ops\n\n\t\/\/ Reset ops\n\tq.ops = q.opsArrayPool.Get()\n\tq.opsSumSize = 0\n\tq.opsLastRotatedAt = q.nowFn()\n\n\treturn needsDrain\n}\n\nfunc (q *queue) drain() {\n\tvar (\n\t\twgAll = &sync.WaitGroup{}\n\t\tcurrWriteOpsByNamespace = make(map[string][]op)\n\t\tcurrIDDatapointsByNamespace = make(map[string][]*rpc.IDDatapoint)\n\t\twriteBatchSize = q.opts.WriteBatchSize()\n\t)\n\n\tfor {\n\t\tops, ok := <-q.drainIn\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tvar (\n\t\t\tcurrWriteOps []op\n\t\t\tcurrIDDatapoints []*rpc.IDDatapoint\n\t\t\topsLen = len(ops)\n\t\t)\n\t\tfor i := 0; i < opsLen; i++ {\n\t\t\tswitch v := ops[i].(type) {\n\t\t\tcase *writeOp:\n\t\t\t\tnamespace := v.request.NameSpace\n\t\t\t\tcurrWriteOps = currWriteOpsByNamespace[namespace]\n\t\t\t\tcurrIDDatapoints = currIDDatapointsByNamespace[namespace]\n\t\t\t\tif currWriteOps == nil {\n\t\t\t\t\tcurrWriteOps = q.opsArrayPool.Get()\n\t\t\t\t\tcurrIDDatapoints = q.idDatapointArrayPool.Get()\n\t\t\t\t}\n\n\t\t\t\tcurrWriteOps = append(currWriteOps, ops[i])\n\t\t\t\tcurrIDDatapoints = append(currIDDatapoints, v.request.IdDatapoint)\n\t\t\t\tcurrWriteOpsByNamespace[namespace] = currWriteOps\n\t\t\t\tcurrIDDatapointsByNamespace[namespace] = currIDDatapoints\n\n\t\t\t\tif len(currWriteOps) == writeBatchSize {\n\t\t\t\t\t\/\/ Reached write batch limit, write async and reset\n\t\t\t\t\tq.asyncWrite(wgAll, namespace, currWriteOps, currIDDatapoints)\n\t\t\t\t\tcurrWriteOpsByNamespace[namespace] = nil\n\t\t\t\t\tcurrIDDatapointsByNamespace[namespace] = nil\n\t\t\t\t}\n\t\t\tcase *fetchBatchOp:\n\t\t\t\tq.asyncFetch(wgAll, v)\n\t\t\tcase *truncateOp:\n\t\t\t\tq.asyncTruncate(wgAll, v)\n\t\t\tdefault:\n\t\t\t\tcompletionFn := ops[i].CompletionFn()\n\t\t\t\tcompletionFn(nil, errQueueUnknownOperation)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ If any outstanding write ops, async write\n\t\tfor namespace, writeOps := range currWriteOpsByNamespace {\n\t\t\tif len(writeOps) > 0 {\n\t\t\t\tq.asyncWrite(wgAll, namespace, writeOps, currIDDatapointsByNamespace[namespace])\n\t\t\t\tcurrWriteOpsByNamespace[namespace] = nil\n\t\t\t\tcurrIDDatapointsByNamespace[namespace] = nil\n\t\t\t}\n\t\t}\n\n\t\tif ops != nil {\n\t\t\tq.opsArrayPool.Put(ops)\n\t\t}\n\t}\n\n\t\/\/ Close the connection pool after all requests done\n\twgAll.Wait()\n\tq.connPool.Close()\n}\n\nfunc (q *queue) asyncWrite(wg *sync.WaitGroup, namespace string, ops []op, elems []*rpc.IDDatapoint) {\n\twg.Add(1)\n\t\/\/ TODO(r): Use a worker pool to avoid creating new go routines for async writes\n\tgo func() {\n\t\treq := q.writeBatchRequestPool.Get()\n\t\treq.NameSpace = namespace\n\t\treq.Elements = elems\n\n\t\t\/\/ NB(r): Defer is slow in the hot path unfortunately\n\t\tcleanup := func() {\n\t\t\tq.writeBatchRequestPool.Put(req)\n\t\t\tq.idDatapointArrayPool.Put(elems)\n\t\t\tq.opsArrayPool.Put(ops)\n\t\t\twg.Done()\n\t\t}\n\n\t\tclient, err := q.connPool.NextClient()\n\t\tif err != nil {\n\t\t\t\/\/ No client available\n\t\t\tcallAllCompletionFns(ops, nil, err)\n\t\t\tcleanup()\n\t\t\treturn\n\t\t}\n\n\t\tctx, _ := thrift.NewContext(q.opts.WriteRequestTimeout())\n\t\terr = client.WriteBatch(ctx, req)\n\t\tif err == nil {\n\t\t\t\/\/ All succeeded\n\t\t\tcallAllCompletionFns(ops, nil, nil)\n\t\t\tcleanup()\n\t\t\treturn\n\t\t}\n\n\t\tif batchErrs, ok := err.(*rpc.WriteBatchErrors); ok {\n\t\t\t\/\/ Callback all writes with errors\n\t\t\thasErr := make(map[int]struct{})\n\t\t\tfor _, batchErr := range batchErrs.Errors {\n\t\t\t\top := ops[batchErr.Index]\n\t\t\t\top.CompletionFn()(nil, batchErr.Err)\n\t\t\t\thasErr[int(batchErr.Index)] = struct{}{}\n\t\t\t}\n\t\t\t\/\/ Callback all writes with no errors\n\t\t\tfor i := range ops {\n\t\t\t\tif _, ok := hasErr[i]; !ok {\n\t\t\t\t\t\/\/ No error\n\t\t\t\t\tops[i].CompletionFn()(nil, nil)\n\t\t\t\t}\n\t\t\t}\n\t\t\tcleanup()\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Entire batch failed\n\t\tcallAllCompletionFns(ops, nil, err)\n\t\tcleanup()\n\t}()\n}\n\nfunc (q *queue) asyncFetch(wg *sync.WaitGroup, op *fetchBatchOp) {\n\twg.Add(1)\n\t\/\/ TODO(r): Use a worker pool to avoid creating new go routines for async fetches\n\tgo func() {\n\t\t\/\/ NB(r): Defer is slow in the hot path unfortunately\n\t\tcleanup := wg.Done\n\n\t\tclient, err := q.connPool.NextClient()\n\t\tif err != nil {\n\t\t\t\/\/ No client available\n\t\t\top.completeAll(nil, err)\n\t\t\tcleanup()\n\t\t\treturn\n\t\t}\n\n\t\tctx, _ := thrift.NewContext(q.opts.FetchRequestTimeout())\n\t\tresult, err := client.FetchRawBatch(ctx, &op.request)\n\t\tif err != nil {\n\t\t\top.completeAll(nil, err)\n\t\t\tcleanup()\n\t\t\treturn\n\t\t}\n\n\t\tresultLen := len(result.Elements)\n\t\tfor i := 0; i < op.Size(); i++ {\n\t\t\tif !(i < resultLen) {\n\t\t\t\t\/\/ No results for this entry, in practice should never occur\n\t\t\t\top.complete(i, nil, errQueueFetchNoResponse)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif result.Elements[i].Err != nil {\n\t\t\t\top.complete(i, nil, result.Elements[i].Err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\top.complete(i, result.Elements[i].Segments, nil)\n\t\t}\n\t\tcleanup()\n\t}()\n}\n\nfunc (q *queue) asyncTruncate(wg *sync.WaitGroup, op *truncateOp) {\n\twg.Add(1)\n\n\tgo func() {\n\t\tclient, err := q.connPool.NextClient()\n\t\tif err != nil {\n\t\t\t\/\/ No client available\n\t\t\top.completionFn(nil, err)\n\t\t\twg.Done()\n\t\t\treturn\n\t\t}\n\n\t\tctx, _ := thrift.NewContext(q.opts.TruncateRequestTimeout())\n\t\tif res, err := client.Truncate(ctx, &op.request); err != nil {\n\t\t\top.completionFn(nil, err)\n\t\t} else {\n\t\t\top.completionFn(res, nil)\n\t\t}\n\n\t\twg.Done()\n\t}()\n}\n\nfunc (q *queue) Len() int {\n\tq.RLock()\n\tv := q.opsSumSize\n\tq.RUnlock()\n\treturn v\n}\n\nfunc (q *queue) Enqueue(o op) error {\n\tvar needsDrain []op\n\tq.Lock()\n\tif q.state != stateOpen {\n\t\tq.Unlock()\n\t\treturn errQueueNotOpen\n\t}\n\tq.ops = append(q.ops, o)\n\tq.opsSumSize += o.Size()\n\t\/\/ If queue is full flush\n\tif q.opsSumSize >= q.size {\n\t\tneedsDrain = q.rotateOpsWithLock()\n\t}\n\t\/\/ Need to hold lock while writing to the drainIn\n\t\/\/ channel to ensure it has not been closed\n\tif len(needsDrain) != 0 {\n\t\tq.drainIn <- needsDrain\n\t}\n\tq.Unlock()\n\treturn nil\n}\n\nfunc (q *queue) Host() topology.Host {\n\treturn q.host\n}\n\nfunc (q *queue) ConnectionCount() int {\n\treturn q.connPool.ConnectionCount()\n}\n\nfunc (q *queue) ConnectionPool() connectionPool {\n\treturn q.connPool\n}\n\nfunc (q *queue) Close() {\n\tq.Lock()\n\tif q.state != stateOpen {\n\t\tq.Unlock()\n\t\treturn\n\t}\n\n\tq.state = stateClosed\n\t\/\/ Closed drainIn channel in lock to ensure writers know\n\t\/\/ consistently if channel is open or not by checking state\n\tclose(q.drainIn)\n\tq.Unlock()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"sync\/atomic\"\n\n\t\"github.com\/kataras\/iris\"\n\t\"github.com\/kataras\/iris\/mvc\"\n)\n\nfunc main() {\n\tapp := iris.New()\n\tmvc.New(app.Party(\"\/\")).Register(&globalVisitorsController{visits: 0})\n\n\t\/\/ http:\/\/localhost:8080\n\tapp.Run(iris.Addr(\":8080\"))\n}\n\ntype globalVisitorsController struct {\n\t\/\/ When a singleton controller is used then concurent safe access is up to the developers, because\n\t\/\/ all clients share the same controller instance instead.\n\t\/\/ Note that any controller's methods\n\t\/\/ are per-client, but the struct's field can be shared across multiple clients if the structure\n\t\/\/ does not have any dynamic struct field depenendies that depend on the iris.Context\n\t\/\/ and ALL field's values are NOT zero, at this case we use uint64 which it's no zero (even if we didn't set it\n\t\/\/ manually ease-of-understand reasons) because it's a value of &{0}.\n\t\/\/ All the above declares a Singleton, note that you don't have to write a single line of code to do this, Iris is smart enough.\n\t\/\/\n\t\/\/ see `Get`.\n\tvisits uint64\n}\n\nfunc (c *globalVisitorsController) Get() string {\n\tcount := atomic.AddUint64(&c.visits, 1)\n\treturn fmt.Sprintf(\"Total visitors: %d\", count)\n}\n<commit_msg>fix typo<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"sync\/atomic\"\n\n\t\"github.com\/kataras\/iris\"\n\t\"github.com\/kataras\/iris\/mvc\"\n)\n\nfunc main() {\n\tapp := iris.New()\n\tmvc.New(app.Party(\"\/\")).Register(&globalVisitorsController{visits: 0})\n\n\t\/\/ http:\/\/localhost:8080\n\tapp.Run(iris.Addr(\":8080\"))\n}\n\ntype globalVisitorsController struct {\n\t\/\/ When a singleton controller is used then concurent safe access is up to the developers, because\n\t\/\/ all clients share the same controller instance instead.\n\t\/\/ Note that any controller's methods\n\t\/\/ are per-client, but the struct's field can be shared across multiple clients if the structure\n\t\/\/ does not have any dynamic struct field dependencies that depend on the iris.Context\n\t\/\/ and ALL field's values are NOT zero, at this case we use uint64 which it's no zero (even if we didn't set it\n\t\/\/ manually ease-of-understand reasons) because it's a value of &{0}.\n\t\/\/ All the above declares a Singleton, note that you don't have to write a single line of code to do this, Iris is smart enough.\n\t\/\/\n\t\/\/ see `Get`.\n\tvisits uint64\n}\n\nfunc (c *globalVisitorsController) Get() string {\n\tcount := atomic.AddUint64(&c.visits, 1)\n\treturn fmt.Sprintf(\"Total visitors: %d\", count)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/larryli\/vagrantcloud.v1\"\n\t\"log\"\n\t\"strings\"\n)\n\ntype Arch struct {\n\tname, info, box string\n}\n\ntype Release string\n\ntype Box vagrantcloud.Box\n\ntype Version vagrantcloud.Version\n\nconst (\n\turl = \"https:\/\/cloud-images.ubuntu.com\/vagrant\/\"\n\tnotFound = \"404 Not Found\"\n\tsee = \"\\n\\nSee https:\/\/github.com\/larryli\/vagrantcloud.v1\/tree\/master\/update-ubuntu-vagrant-box\"\n)\n\nvar (\n\tapi *vagrantcloud.Api\n\tusername = flag.String(\"username\", \"larryli\", \"username\")\n\ttoken = flag.String(\"token\", \"\", \"access_token\")\n\ttest = flag.Bool(\"test\", false, \"test, no effect\")\n\tarches = []Arch{\n\t\t{\n\t\t\tname: \"64\",\n\t\t\tinfo: \"amd64\",\n\t\t\tbox: \"-server-cloudimg-amd64-vagrant-disk1.box\",\n\t\t},\n\t\t{\n\t\t\tname: \"64juju\",\n\t\t\tinfo: \"amd64 with juju\",\n\t\t\tbox: \"-server-cloudimg-amd64-juju-vagrant-disk1.box\",\n\t\t},\n\t\t{\n\t\t\tname: \"32\",\n\t\t\tinfo: \"i386\",\n\t\t\tbox: \"-server-cloudimg-i386-vagrant-disk1.box\",\n\t\t},\n\t\t{\n\t\t\tname: \"32juju\",\n\t\t\tinfo: \"i386 with juju\",\n\t\t\tbox: \"-server-cloudimg-i386-juju-vagrant-disk1.box\",\n\t\t},\n\t}\n)\n\nfunc Fatal(err error, a ...interface{}) {\n\tif err != nil {\n\t\ta = append(a, err)\n\t\tlog.Fatalln(a...)\n\t}\n}\n\nfunc isChildren(s string) bool {\n\treturn !strings.HasPrefix(s, \"\/\") && !strings.HasPrefix(s, \"?\")\n}\n\nfunc fetchReleases() (releases []Release) {\n\tdoc, err := goquery.NewDocument(url)\n\tFatal(err, \"fetch \"+url)\n\tdoc.Find(\"a\").Each(func(i int, s *goquery.Selection) {\n\t\tif link, ok := s.Attr(\"href\"); ok {\n\t\t\tif isChildren(link) {\n\t\t\t\treleases = append(releases, Release(strings.Trim(link, \"\/\")))\n\t\t\t}\n\t\t}\n\t})\n\treturn\n}\n\nfunc (r Release) name() string {\n\treturn string(r)\n}\n\nfunc (r Release) url() string {\n\treturn url + r.name() + \"\/\"\n}\n\nfunc (r Release) fetch() (versions []string) {\n\tdoc, err := goquery.NewDocument(r.url())\n\tFatal(err, \"fetch \"+r.url())\n\tdoc.Find(\"a\").Each(func(i int, s *goquery.Selection) {\n\t\tif link, ok := s.Attr(\"href\"); ok {\n\t\t\tif isChildren(link) && link != \"current\/\" {\n\t\t\t\tversions = append(versions, strings.Trim(link, \"\/\"))\n\t\t\t}\n\t\t}\n\t})\n\treturn\n}\n\nfunc (r Release) scan() {\n\tversions := r.fetch()\n\tfor _, t := range arches {\n\t\tbox := api.Box(*username, r.name()+t.name)\n\t\ttodo := fmt.Sprintf(\"fetch \\\"%s\\\"\", box.Uri())\n\t\terr := box.Get()\n\t\tif err != nil && strings.EqualFold(err.Error(), notFound) {\n\t\t\tbox.ShortDescription = \"Ubuntu \" + strings.Title(r.name()) + \" \" + t.info\n\t\t\tbox.DescriptionMarkdown = r.url() + see\n\t\t\ttodo = fmt.Sprintf(\"add \\\"%s\\\"\", box.Uri())\n\t\t\tif !(*test) {\n\t\t\t\tFatal(box.New(), todo)\n\t\t\t}\n\t\t\tlog.Println(todo)\n\t\t} else {\n\t\t\tFatal(err, todo)\n\t\t}\n\t\tfor _, version := range box.Versions {\n\t\t\tv := Version(version)\n\t\t\tif !v.exists(versions) {\n\t\t\t\tv.delete()\n\t\t\t}\n\t\t}\n\t\tfor _, version := range versions {\n\t\t\tb := (*Box)(box)\n\t\t\tif !b.find(version) {\n\t\t\t\timage := r.url() + version + \"\/\" + r.name() + t.box\n\t\t\t\tb.add(version, image)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (v *Version) exists(versions []string) (found bool) {\n\tfor _, test := range versions {\n\t\tif test == v.Version {\n\t\t\tfound = true\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc (v *Version) delete() {\n\tversion := (*vagrantcloud.Version)(v)\n\ttodo := fmt.Sprintf(\"delete \\\"%s\\\" Version: \\\"%s\\\"\", version.Uri(), version.Version)\n\tif !(*test) {\n\t\tFatal(version.Delete(), todo)\n\t}\n\tlog.Println(todo)\n}\n\nfunc (b *Box) find(version string) (found bool) {\n\tfor _, test := range b.Versions {\n\t\tif test.Version == version {\n\t\t\tfound = true\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc (b *Box) add(version, image string) {\n\tbox := (*vagrantcloud.Box)(b)\n\tv := box.Version(0)\n\tv.Version = version\n\tv.DescriptionMarkdown = image + see\n\ttodo := fmt.Sprintf(\"add \\\"%s\\\" Version: \\\"%s\\\"\", v.Uri(), v.Version)\n\tif !(*test) {\n\t\tFatal(v.New(), todo)\n\t}\n\tp := v.Provider(vagrantcloud.ProviderVirtualbox)\n\tp.OriginalUrl = image\n\ttodo = fmt.Sprintf(\"add \\\"%s\\\" Version: \\\"%s\\\"\", p.Uri())\n\tif !(*test) {\n\t\tFatal(p.New(), todo)\n\t}\n\ttodo = fmt.Sprintf(\"public \\\"%s\\\" Version: \\\"%s\\\" Url: \\\"%s\\\"\", v.Uri(), v.Version, p.OriginalUrl)\n\tif !(*test) {\n\t\tFatal(v.Release(), todo)\n\t}\n\tlog.Println(todo)\n}\n\nfunc main() {\n\tflag.Parse()\n\tapi = vagrantcloud.New(*token)\n\tlog.Println(\"start\")\n\tfor _, release := range fetchReleases() {\n\t\trelease.scan()\n\t}\n\tlog.Println(\"end\")\n}\n<commit_msg>fix juju box name<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/larryli\/vagrantcloud.v1\"\n\t\"log\"\n\t\"strings\"\n)\n\ntype Arch struct {\n\tname, info, box string\n}\n\ntype Release string\n\ntype Box vagrantcloud.Box\n\ntype Version vagrantcloud.Version\n\nconst (\n\turl = \"https:\/\/cloud-images.ubuntu.com\/vagrant\/\"\n\tnotFound = \"404 Not Found\"\n\tsee = \"\\n\\nSee https:\/\/github.com\/larryli\/vagrantcloud.v1\/tree\/master\/update-ubuntu-vagrant-box\"\n)\n\nvar (\n\tapi *vagrantcloud.Api\n\tusername = flag.String(\"username\", \"larryli\", \"username\")\n\ttoken = flag.String(\"token\", \"\", \"access_token\")\n\ttest = flag.Bool(\"test\", false, \"test, no effect\")\n\tarches = []Arch{\n\t\t{\n\t\t\tname: \"64\",\n\t\t\tinfo: \"amd64\",\n\t\t\tbox: \"-server-cloudimg-amd64-vagrant-disk1.box\",\n\t\t},\n\t\t{\n\t\t\tname: \"64-juju\",\n\t\t\tinfo: \"amd64 with juju\",\n\t\t\tbox: \"-server-cloudimg-amd64-juju-vagrant-disk1.box\",\n\t\t},\n\t\t{\n\t\t\tname: \"32\",\n\t\t\tinfo: \"i386\",\n\t\t\tbox: \"-server-cloudimg-i386-vagrant-disk1.box\",\n\t\t},\n\t\t{\n\t\t\tname: \"32-juju\",\n\t\t\tinfo: \"i386 with juju\",\n\t\t\tbox: \"-server-cloudimg-i386-juju-vagrant-disk1.box\",\n\t\t},\n\t}\n)\n\nfunc Fatal(err error, a ...interface{}) {\n\tif err != nil {\n\t\ta = append(a, err)\n\t\tlog.Fatalln(a...)\n\t}\n}\n\nfunc isChildren(s string) bool {\n\treturn !strings.HasPrefix(s, \"\/\") && !strings.HasPrefix(s, \"?\")\n}\n\nfunc fetchReleases() (releases []Release) {\n\tdoc, err := goquery.NewDocument(url)\n\tFatal(err, \"fetch \"+url)\n\tdoc.Find(\"a\").Each(func(i int, s *goquery.Selection) {\n\t\tif link, ok := s.Attr(\"href\"); ok {\n\t\t\tif isChildren(link) {\n\t\t\t\treleases = append(releases, Release(strings.Trim(link, \"\/\")))\n\t\t\t}\n\t\t}\n\t})\n\treturn\n}\n\nfunc (r Release) name() string {\n\treturn string(r)\n}\n\nfunc (r Release) url() string {\n\treturn url + r.name() + \"\/\"\n}\n\nfunc (r Release) fetch() (versions []string) {\n\tdoc, err := goquery.NewDocument(r.url())\n\tFatal(err, \"fetch \"+r.url())\n\tdoc.Find(\"a\").Each(func(i int, s *goquery.Selection) {\n\t\tif link, ok := s.Attr(\"href\"); ok {\n\t\t\tif isChildren(link) && link != \"current\/\" {\n\t\t\t\tversions = append(versions, strings.Trim(link, \"\/\"))\n\t\t\t}\n\t\t}\n\t})\n\treturn\n}\n\nfunc (r Release) scan() {\n\tversions := r.fetch()\n\tfor _, t := range arches {\n\t\tbox := api.Box(*username, r.name()+t.name)\n\t\ttodo := fmt.Sprintf(\"fetch \\\"%s\\\"\", box.Uri())\n\t\terr := box.Get()\n\t\tif err != nil && strings.EqualFold(err.Error(), notFound) {\n\t\t\tbox.ShortDescription = \"Ubuntu \" + strings.Title(r.name()) + \" \" + t.info\n\t\t\tbox.DescriptionMarkdown = r.url() + see\n\t\t\ttodo = fmt.Sprintf(\"add \\\"%s\\\"\", box.Uri())\n\t\t\tif !(*test) {\n\t\t\t\tFatal(box.New(), todo)\n\t\t\t}\n\t\t\tlog.Println(todo)\n\t\t} else {\n\t\t\tFatal(err, todo)\n\t\t}\n\t\tfor _, version := range box.Versions {\n\t\t\tv := Version(version)\n\t\t\tif !v.exists(versions) {\n\t\t\t\tv.delete()\n\t\t\t}\n\t\t}\n\t\tfor _, version := range versions {\n\t\t\tb := (*Box)(box)\n\t\t\tif !b.find(version) {\n\t\t\t\timage := r.url() + version + \"\/\" + r.name() + t.box\n\t\t\t\tb.add(version, image)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (v *Version) exists(versions []string) (found bool) {\n\tfor _, test := range versions {\n\t\tif test == v.Version {\n\t\t\tfound = true\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc (v *Version) delete() {\n\tversion := (*vagrantcloud.Version)(v)\n\ttodo := fmt.Sprintf(\"delete \\\"%s\\\" Version: \\\"%s\\\"\", version.Uri(), version.Version)\n\tif !(*test) {\n\t\tFatal(version.Delete(), todo)\n\t}\n\tlog.Println(todo)\n}\n\nfunc (b *Box) find(version string) (found bool) {\n\tfor _, test := range b.Versions {\n\t\tif test.Version == version {\n\t\t\tfound = true\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc (b *Box) add(version, image string) {\n\tbox := (*vagrantcloud.Box)(b)\n\tv := box.Version(0)\n\tv.Version = version\n\tv.DescriptionMarkdown = image + see\n\ttodo := fmt.Sprintf(\"add \\\"%s\\\" Version: \\\"%s\\\"\", v.Uri(), v.Version)\n\tif !(*test) {\n\t\tFatal(v.New(), todo)\n\t}\n\tp := v.Provider(vagrantcloud.ProviderVirtualbox)\n\tp.OriginalUrl = image\n\ttodo = fmt.Sprintf(\"add \\\"%s\\\" Version: \\\"%s\\\"\", p.Uri())\n\tif !(*test) {\n\t\tFatal(p.New(), todo)\n\t}\n\ttodo = fmt.Sprintf(\"public \\\"%s\\\" Version: \\\"%s\\\" Url: \\\"%s\\\"\", v.Uri(), v.Version, p.OriginalUrl)\n\tif !(*test) {\n\t\tFatal(v.Release(), todo)\n\t}\n\tlog.Println(todo)\n}\n\nfunc main() {\n\tflag.Parse()\n\tapi = vagrantcloud.New(*token)\n\tlog.Println(\"start\")\n\tfor _, release := range fetchReleases() {\n\t\trelease.scan()\n\t}\n\tlog.Println(\"end\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/ninjasphere\/go-ninja\/events\"\n\t\"github.com\/ninjasphere\/go-ninja\/model\"\n\t\"github.com\/ninjasphere\/go-ninja\/support\"\n\t\"github.com\/ninjasphere\/go-zigbee\"\n\t\"github.com\/ninjasphere\/go-zigbee\/nwkmgr\"\n)\n\nconst (\n\tClusterIDBasic uint32 = 0x00\n\tClusterIDOnOff uint32 = 0x06\n\tClusterIDLevel uint32 = 0x08 \/\/ We're always exporting as brightness for now\n\tClusterIDColor uint32 = 0x300\n\tClusterIDTemp uint32 = 0x402\n\tClusterIDHumidity uint32 = 0x405\n\tClusterIDPower uint32 = 0x702\n)\n\ntype ZStackConfig struct {\n\tHostname string\n\tOtasrvrPort int\n\tGatewayPort int\n\tNwkmgrPort int\n\tStableFlagFile string\n}\n\ntype Driver struct {\n\tsupport.DriverSupport\n\n\tdevices map[uint64]*Device\n\n\tconfig *ZStackConfig\n\n\tnwkmgrConn *zigbee.ZStackNwkMgr\n\tgatewayConn *zigbee.ZStackGateway\n\totaConn *zigbee.ZStackOta\n\n\tdevicesFound int\n}\n\nfunc NewDriver(config *ZStackConfig) (*Driver, error) {\n\tdriver := &Driver{\n\t\tconfig: config,\n\t\tdevices: make(map[uint64]*Device),\n\t}\n\n\terr := driver.Init(info)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to initialize fake driver: %s\", err)\n\t}\n\n\terr = driver.Export(driver)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to export fake driver: %s\", err)\n\t}\n\n\tuserAgent := driver.Conn.GetServiceClient(\"$device\/:deviceId\/channel\/user-agent\")\n\tuserAgent.OnEvent(\"pairing-requested\", func(pairingRequest *events.PairingRequest, values map[string]string) bool {\n\t\tlog.Infof(\"Pairing request received from %s for %d seconds\", values[\"deviceId\"], pairingRequest.Duration)\n\n\t\tduration := uint32(pairingRequest.Duration)\n\n\t\tif err := driver.EnableJoin(duration); err != nil {\n\t\t\tlog.Warningf(\"Failed to enable joining: %s\", err)\n\t\t}\n\n\t\treturn true\n\t})\n\n\treturn driver, nil\n\n}\n\nfunc (d *Driver) Reset(hard bool) error {\n\treturn d.nwkmgrConn.Reset(hard)\n}\n\nfunc (d *Driver) Start() error {\n\n\twaitUntilZStackReady(d.config.StableFlagFile)\n\n\tvar err error\n\n\td.nwkmgrConn, err = zigbee.ConnectToNwkMgrServer(d.config.Hostname, d.config.NwkmgrPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to nwkmgr %s\", err)\n\t}\n\td.nwkmgrConn.OnDeviceFound = func(deviceInfo *nwkmgr.NwkDeviceInfoT) {\n\t\td.onDeviceFound(deviceInfo)\n\t}\n\n\t\/*done := false\n\td.nwkmgrConn.OnNetworkReady = func() {\n\t\tif !done {\n\t\t\tdone = true\n\t\t}\n\t}*\/\n\n\td.otaConn, err = zigbee.ConnectToOtaServer(d.config.Hostname, d.config.OtasrvrPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to ota server %s\", err)\n\t}\n\n\td.gatewayConn, err = zigbee.ConnectToGatewayServer(d.config.Hostname, d.config.GatewayPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to gateway %s\", err)\n\t}\n\n\t\/*keyResponse := &nwkmgr.NwkZigbeeGenericCnf{}\n\n\tkeyRequest := &nwkmgr.NwkChangeNwkKeyReq{\n\t\tNewKey: []byte{0x5a, 0x69, 0x67, 0x42, 0x65, 0x65, 0x41, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x30, 0x39},\n\t}\n\n\terr = d.nwkmgrConn.SendCommand(keyRequest, keyResponse)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed setting network key: %s\", err)\n\t}\n\n\tspew.Dump(keyResponse)\n\tlog.Println(\"Sleeping for 10 seconds after setting network key\")\n\n\ttime.Sleep(10 * time.Second)*\/\n\n\tnetworkInfo := &nwkmgr.NwkZigbeeNwkInfoCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkZigbeeNwkInfoReq{}, networkInfo)\n\tif err != nil {\n\t\tif log.IsDebugEnabled() {\n\t\t\tspew.Dump(networkInfo)\n\t\t}\n\t\treturn fmt.Errorf(\"Failed getting network info: %s\", err)\n\t}\n\n\tif *networkInfo.Status == nwkmgr.NwkNetworkStatusT_NWK_DOWN {\n\t\treturn fmt.Errorf(\"The ZigBee network is down\")\n\t}\n\n\tlocalDevice := &nwkmgr.NwkGetLocalDeviceInfoCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkGetLocalDeviceInfoReq{}, localDevice)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed getting local device info: %s\", err)\n\t}\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(\"device info\", localDevice.String())\n\t}\n\n\tnetworkKey := &nwkmgr.NwkGetNwkKeyCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkGetNwkKeyReq{}, networkKey)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed getting network key: %s\", err)\n\t}\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(networkKey)\n\t}\n\n\tlog.Debugf(\"Started coordinator. Channel:%d Pan ID:0x%X Key:% X\", *networkInfo.NwkChannel, *networkInfo.PanId, networkKey.NewKey)\n\n\treturn d.FetchDevices()\n}\n\nfunc (d *Driver) EnableJoin(duration uint32) error {\n\n\tgo func() {\n\t \tsave := d.devicesFound;\n\t\ttime.Sleep(time.Second * time.Duration(duration))\n\t\td.Log.Infof(\"Join window closes after %d seconds.\", duration);\n\t\td.SendEvent(\"pairing-ended\", &events.PairingEnded{\n\t\t\tDevicesFound: int(d.devicesFound - save),\n\t\t})\n\t}()\n\n\tpermitJoinRequest := &nwkmgr.NwkSetPermitJoinReq{\n\t\tPermitJoinTime: &duration,\n\t\tPermitJoin: nwkmgr.NwkPermitJoinTypeT_PERMIT_ALL.Enum(),\n\t}\n\n\tpermitJoinResponse := &nwkmgr.NwkZigbeeGenericCnf{}\n\n\terr := d.nwkmgrConn.SendCommand(permitJoinRequest, permitJoinResponse)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to enable joining: %s\", err)\n\t}\n\tif permitJoinResponse.Status.String() != \"STATUS_SUCCESS\" {\n\t\treturn fmt.Errorf(\"Failed to enable joining: %s\", permitJoinResponse.Status)\n\t}\n\n\td.SendEvent(\"pairing-started\", &events.PairingStarted{\n\t\tDuration: int(duration),\n\t})\n\n\td.Log.Infof(\"Join window opens for %d seconds\", duration);\n\n\treturn nil\n}\n\nfunc (d *Driver) FetchDevices() error {\n\treturn d.nwkmgrConn.FetchDeviceList()\n}\n\nfunc (d *Driver) onDeviceFound(deviceInfo *nwkmgr.NwkDeviceInfoT) {\n\n\tlog.Debugf(\"\\n\\n\")\n\tlog.Debugf(\"---- Found Device IEEE:%X ----\\f\", *deviceInfo.IeeeAddress)\n\tlog.Debugf(\"Device Info: %v\", *deviceInfo)\n\n\tif d.devices[*deviceInfo.IeeeAddress] != nil {\n\t\t\/\/ We've seen this already, but it may have been repaired. We *should* just be able to replace\n\t\t\/\/ the deviceInfo object, which is used for all communication.\n\t\t\/\/ TODO: Actually verify this. May need to re-run channel init.\n\t\td.devices[*deviceInfo.IeeeAddress].deviceInfo = deviceInfo\n\t}\n\n\tdevice := &Device{\n\t\tdriver: d,\n\t\tdeviceInfo: deviceInfo,\n\t\tinfo: &model.Device{\n\t\t\tNaturalID: fmt.Sprintf(\"%X\", *deviceInfo.IeeeAddress),\n\t\t\tNaturalIDType: \"zigbee\",\n\t\t\tSignatures: &map[string]string{},\n\t\t},\n\t}\n\n\tfor _, endpoint := range deviceInfo.SimpleDescList {\n\t\tif *endpoint.ProfileId == 0xC05E \/*ZLL*\/ {\n\n\t\t\tswitch *endpoint.DeviceId {\n\n\t\t\tcase 0x0000: \/\/ On\/Off Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x0100: \/\/ Dimmable Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x0200: \/\/ Color Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x210: \/\/ Ext Color Light\n\t\t\t\t(*device.info.Signatures)[\"ninja:thingType\"] = \"light\"\n\t\t\t}\n\t\t}\n\t}\n\n\terr := device.getBasicInfo()\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to get basic info: %s\", err)\n\t}\n\n\tif device.ManufacturerName != \"\" {\n\t\t(*device.info.Signatures)[\"zigbee:ManufacturerName\"] = device.ManufacturerName\n\t} else {\n\t\tdevice.ManufacturerName = \"Unknown\"\n\t}\n\tif device.ModelIdentifier != \"\" {\n\t\t(*device.info.Signatures)[\"zigbee:ModelIdentifier\"] = device.ModelIdentifier\n\t} else {\n\t\tdevice.ModelIdentifier = fmt.Sprintf(\"MAC:%X\", *deviceInfo.IeeeAddress)\n\t}\n\n\tname := fmt.Sprintf(\"%s by %s\", device.ModelIdentifier, device.ManufacturerName)\n\n\tdevice.info.Name = &name\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(deviceInfo)\n\t}\n\n\terr = d.Conn.ExportDevice(device)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to export zigbee device %s: %s\", name, err)\n\t}\n\td.devicesFound++;\n\n\tlog.Debugf(\"Got device : %d\", *deviceInfo.IeeeAddress)\n\n\tfor _, endpoint := range deviceInfo.SimpleDescList {\n\t\tlog.Debugf(\"Got endpoint : %d\", *endpoint.EndpointId)\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDOnOff) {\n\t\t\tlog.Debugf(\"This endpoint has on\/off cluster\")\n\n\t\t\tonOff := &OnOffChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDOnOff),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := onOff.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising on\/off channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDPower) {\n\t\t\tlog.Debugf(\"This endpoint has power cluster\")\n\n\t\t\tpower := &PowerChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDPower),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := power.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising power channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDTemp) {\n\t\t\tlog.Debugf(\"This endpoint has temperature cluster\")\n\n\t\t\ttemp := &TempChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDTemp),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := temp.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising temp channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDHumidity) {\n\t\t\tlog.Debugf(\"This endpoint has humidity cluster\")\n\n\t\t\thumidity := &HumidityChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDHumidity),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := humidity.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising humidity channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDLevel) {\n\t\t\tlog.Debugf(\"This endpoint has level cluster. Exporting as brightness channel\")\n\n\t\t\tif log.IsDebugEnabled() {\n\t\t\t\tspew.Dump(\"brightness cluster\", endpoint, ClusterIDLevel)\n\t\t\t}\n\n\t\t\tbrightness := &BrightnessChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDLevel),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := brightness.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising brightness channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\td.devices[*deviceInfo.IeeeAddress] = device\n\n\tfmt.Printf(\"---- Finished Device IEEE:%X ----\\n\", *deviceInfo.IeeeAddress)\n\n}\n\nfunc getCurDir() string {\n\tpwd, _ := os.Getwd()\n\treturn pwd + \"\/\"\n}\n\nfunc containsUInt32(hackstack []uint32, needle uint32) bool {\n\tfor _, cluster := range hackstack {\n\t\tif cluster == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc waitUntilZStackReady(checkFile string) {\n\tif checkFile == \"\" {\n\t\treturn\n\t}\n\n\t\/\/ cooperate with zigbeeHAgw so that we don't start the zigbee driver\n\t\/\/ until we look somewhat stable.\n\tlog.Debugf(\"waiting until zigbeeHAgw writes %s\", checkFile)\n\tfor {\n\t\tif _, err := os.Stat(checkFile); err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}\n\tlog.Debugf(\"%s detected. start up continues...\", checkFile)\n}\n<commit_msg>Put an upper bound on the pairing interval.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"github.com\/ninjasphere\/go-ninja\/events\"\n\t\"github.com\/ninjasphere\/go-ninja\/model\"\n\t\"github.com\/ninjasphere\/go-ninja\/support\"\n\t\"github.com\/ninjasphere\/go-zigbee\"\n\t\"github.com\/ninjasphere\/go-zigbee\/nwkmgr\"\n)\n\nconst (\n\tClusterIDBasic uint32 = 0x00\n\tClusterIDOnOff uint32 = 0x06\n\tClusterIDLevel uint32 = 0x08 \/\/ We're always exporting as brightness for now\n\tClusterIDColor uint32 = 0x300\n\tClusterIDTemp uint32 = 0x402\n\tClusterIDHumidity uint32 = 0x405\n\tClusterIDPower uint32 = 0x702\n)\n\ntype ZStackConfig struct {\n\tHostname string\n\tOtasrvrPort int\n\tGatewayPort int\n\tNwkmgrPort int\n\tStableFlagFile string\n}\n\ntype Driver struct {\n\tsupport.DriverSupport\n\n\tdevices map[uint64]*Device\n\n\tconfig *ZStackConfig\n\n\tnwkmgrConn *zigbee.ZStackNwkMgr\n\tgatewayConn *zigbee.ZStackGateway\n\totaConn *zigbee.ZStackOta\n\n\tdevicesFound int\n}\n\nfunc NewDriver(config *ZStackConfig) (*Driver, error) {\n\tdriver := &Driver{\n\t\tconfig: config,\n\t\tdevices: make(map[uint64]*Device),\n\t}\n\n\terr := driver.Init(info)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to initialize fake driver: %s\", err)\n\t}\n\n\terr = driver.Export(driver)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to export fake driver: %s\", err)\n\t}\n\n\tuserAgent := driver.Conn.GetServiceClient(\"$device\/:deviceId\/channel\/user-agent\")\n\tuserAgent.OnEvent(\"pairing-requested\", func(pairingRequest *events.PairingRequest, values map[string]string) bool {\n\n\t\tduration := uint32(pairingRequest.Duration)\n\n\t\tif duration > 254 {\n\t\t duration = 254\n\t\t}\n\n\t\tlog.Infof(\"Pairing request received from %s for %d seconds\", values[\"deviceId\"], duration)\n\n\t\tif err := driver.EnableJoin(duration); err != nil {\n\t\t\tlog.Warningf(\"Failed to enable joining: %s\", err)\n\t\t}\n\n\t\treturn true\n\t})\n\n\treturn driver, nil\n\n}\n\nfunc (d *Driver) Reset(hard bool) error {\n\treturn d.nwkmgrConn.Reset(hard)\n}\n\nfunc (d *Driver) Start() error {\n\n\twaitUntilZStackReady(d.config.StableFlagFile)\n\n\tvar err error\n\n\td.nwkmgrConn, err = zigbee.ConnectToNwkMgrServer(d.config.Hostname, d.config.NwkmgrPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to nwkmgr %s\", err)\n\t}\n\td.nwkmgrConn.OnDeviceFound = func(deviceInfo *nwkmgr.NwkDeviceInfoT) {\n\t\td.onDeviceFound(deviceInfo)\n\t}\n\n\t\/*done := false\n\td.nwkmgrConn.OnNetworkReady = func() {\n\t\tif !done {\n\t\t\tdone = true\n\t\t}\n\t}*\/\n\n\td.otaConn, err = zigbee.ConnectToOtaServer(d.config.Hostname, d.config.OtasrvrPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to ota server %s\", err)\n\t}\n\n\td.gatewayConn, err = zigbee.ConnectToGatewayServer(d.config.Hostname, d.config.GatewayPort)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error connecting to gateway %s\", err)\n\t}\n\n\t\/*keyResponse := &nwkmgr.NwkZigbeeGenericCnf{}\n\n\tkeyRequest := &nwkmgr.NwkChangeNwkKeyReq{\n\t\tNewKey: []byte{0x5a, 0x69, 0x67, 0x42, 0x65, 0x65, 0x41, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x30, 0x39},\n\t}\n\n\terr = d.nwkmgrConn.SendCommand(keyRequest, keyResponse)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed setting network key: %s\", err)\n\t}\n\n\tspew.Dump(keyResponse)\n\tlog.Println(\"Sleeping for 10 seconds after setting network key\")\n\n\ttime.Sleep(10 * time.Second)*\/\n\n\tnetworkInfo := &nwkmgr.NwkZigbeeNwkInfoCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkZigbeeNwkInfoReq{}, networkInfo)\n\tif err != nil {\n\t\tif log.IsDebugEnabled() {\n\t\t\tspew.Dump(networkInfo)\n\t\t}\n\t\treturn fmt.Errorf(\"Failed getting network info: %s\", err)\n\t}\n\n\tif *networkInfo.Status == nwkmgr.NwkNetworkStatusT_NWK_DOWN {\n\t\treturn fmt.Errorf(\"The ZigBee network is down\")\n\t}\n\n\tlocalDevice := &nwkmgr.NwkGetLocalDeviceInfoCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkGetLocalDeviceInfoReq{}, localDevice)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed getting local device info: %s\", err)\n\t}\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(\"device info\", localDevice.String())\n\t}\n\n\tnetworkKey := &nwkmgr.NwkGetNwkKeyCnf{}\n\n\terr = d.nwkmgrConn.SendCommand(&nwkmgr.NwkGetNwkKeyReq{}, networkKey)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed getting network key: %s\", err)\n\t}\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(networkKey)\n\t}\n\n\tlog.Debugf(\"Started coordinator. Channel:%d Pan ID:0x%X Key:% X\", *networkInfo.NwkChannel, *networkInfo.PanId, networkKey.NewKey)\n\n\treturn d.FetchDevices()\n}\n\nfunc (d *Driver) EnableJoin(duration uint32) error {\n\n\tgo func() {\n\t \tsave := d.devicesFound;\n\t\ttime.Sleep(time.Second * time.Duration(duration))\n\t\td.Log.Infof(\"Join window closes after %d seconds.\", duration);\n\t\td.SendEvent(\"pairing-ended\", &events.PairingEnded{\n\t\t\tDevicesFound: int(d.devicesFound - save),\n\t\t})\n\t}()\n\n\tpermitJoinRequest := &nwkmgr.NwkSetPermitJoinReq{\n\t\tPermitJoinTime: &duration,\n\t\tPermitJoin: nwkmgr.NwkPermitJoinTypeT_PERMIT_ALL.Enum(),\n\t}\n\n\tpermitJoinResponse := &nwkmgr.NwkZigbeeGenericCnf{}\n\n\terr := d.nwkmgrConn.SendCommand(permitJoinRequest, permitJoinResponse)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to enable joining: %s\", err)\n\t}\n\tif permitJoinResponse.Status.String() != \"STATUS_SUCCESS\" {\n\t\treturn fmt.Errorf(\"Failed to enable joining: %s\", permitJoinResponse.Status)\n\t}\n\n\td.SendEvent(\"pairing-started\", &events.PairingStarted{\n\t\tDuration: int(duration),\n\t})\n\n\td.Log.Infof(\"Join window opens for %d seconds\", duration);\n\n\treturn nil\n}\n\nfunc (d *Driver) FetchDevices() error {\n\treturn d.nwkmgrConn.FetchDeviceList()\n}\n\nfunc (d *Driver) onDeviceFound(deviceInfo *nwkmgr.NwkDeviceInfoT) {\n\n\tlog.Debugf(\"\\n\\n\")\n\tlog.Debugf(\"---- Found Device IEEE:%X ----\\f\", *deviceInfo.IeeeAddress)\n\tlog.Debugf(\"Device Info: %v\", *deviceInfo)\n\n\tif d.devices[*deviceInfo.IeeeAddress] != nil {\n\t\t\/\/ We've seen this already, but it may have been repaired. We *should* just be able to replace\n\t\t\/\/ the deviceInfo object, which is used for all communication.\n\t\t\/\/ TODO: Actually verify this. May need to re-run channel init.\n\t\td.devices[*deviceInfo.IeeeAddress].deviceInfo = deviceInfo\n\t}\n\n\tdevice := &Device{\n\t\tdriver: d,\n\t\tdeviceInfo: deviceInfo,\n\t\tinfo: &model.Device{\n\t\t\tNaturalID: fmt.Sprintf(\"%X\", *deviceInfo.IeeeAddress),\n\t\t\tNaturalIDType: \"zigbee\",\n\t\t\tSignatures: &map[string]string{},\n\t\t},\n\t}\n\n\tfor _, endpoint := range deviceInfo.SimpleDescList {\n\t\tif *endpoint.ProfileId == 0xC05E \/*ZLL*\/ {\n\n\t\t\tswitch *endpoint.DeviceId {\n\n\t\t\tcase 0x0000: \/\/ On\/Off Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x0100: \/\/ Dimmable Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x0200: \/\/ Color Light\n\t\t\t\tfallthrough\n\t\t\tcase 0x210: \/\/ Ext Color Light\n\t\t\t\t(*device.info.Signatures)[\"ninja:thingType\"] = \"light\"\n\t\t\t}\n\t\t}\n\t}\n\n\terr := device.getBasicInfo()\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to get basic info: %s\", err)\n\t}\n\n\tif device.ManufacturerName != \"\" {\n\t\t(*device.info.Signatures)[\"zigbee:ManufacturerName\"] = device.ManufacturerName\n\t} else {\n\t\tdevice.ManufacturerName = \"Unknown\"\n\t}\n\tif device.ModelIdentifier != \"\" {\n\t\t(*device.info.Signatures)[\"zigbee:ModelIdentifier\"] = device.ModelIdentifier\n\t} else {\n\t\tdevice.ModelIdentifier = fmt.Sprintf(\"MAC:%X\", *deviceInfo.IeeeAddress)\n\t}\n\n\tname := fmt.Sprintf(\"%s by %s\", device.ModelIdentifier, device.ManufacturerName)\n\n\tdevice.info.Name = &name\n\n\tif log.IsDebugEnabled() {\n\t\tspew.Dump(deviceInfo)\n\t}\n\n\terr = d.Conn.ExportDevice(device)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to export zigbee device %s: %s\", name, err)\n\t}\n\td.devicesFound++;\n\n\tlog.Debugf(\"Got device : %d\", *deviceInfo.IeeeAddress)\n\n\tfor _, endpoint := range deviceInfo.SimpleDescList {\n\t\tlog.Debugf(\"Got endpoint : %d\", *endpoint.EndpointId)\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDOnOff) {\n\t\t\tlog.Debugf(\"This endpoint has on\/off cluster\")\n\n\t\t\tonOff := &OnOffChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDOnOff),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := onOff.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising on\/off channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDPower) {\n\t\t\tlog.Debugf(\"This endpoint has power cluster\")\n\n\t\t\tpower := &PowerChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDPower),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := power.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising power channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDTemp) {\n\t\t\tlog.Debugf(\"This endpoint has temperature cluster\")\n\n\t\t\ttemp := &TempChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDTemp),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := temp.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising temp channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDHumidity) {\n\t\t\tlog.Debugf(\"This endpoint has humidity cluster\")\n\n\t\t\thumidity := &HumidityChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDHumidity),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := humidity.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising humidity channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t\tif containsUInt32(endpoint.InputClusters, ClusterIDLevel) {\n\t\t\tlog.Debugf(\"This endpoint has level cluster. Exporting as brightness channel\")\n\n\t\t\tif log.IsDebugEnabled() {\n\t\t\t\tspew.Dump(\"brightness cluster\", endpoint, ClusterIDLevel)\n\t\t\t}\n\n\t\t\tbrightness := &BrightnessChannel{\n\t\t\t\tChannel: Channel{\n\t\t\t\t\tID: fmt.Sprintf(\"%d-%d\", *endpoint.EndpointId, ClusterIDLevel),\n\t\t\t\t\tdevice: device,\n\t\t\t\t\tendpoint: endpoint,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := brightness.init()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Failed initialising brightness channel: %s\", err)\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\td.devices[*deviceInfo.IeeeAddress] = device\n\n\tfmt.Printf(\"---- Finished Device IEEE:%X ----\\n\", *deviceInfo.IeeeAddress)\n\n}\n\nfunc getCurDir() string {\n\tpwd, _ := os.Getwd()\n\treturn pwd + \"\/\"\n}\n\nfunc containsUInt32(hackstack []uint32, needle uint32) bool {\n\tfor _, cluster := range hackstack {\n\t\tif cluster == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc waitUntilZStackReady(checkFile string) {\n\tif checkFile == \"\" {\n\t\treturn\n\t}\n\n\t\/\/ cooperate with zigbeeHAgw so that we don't start the zigbee driver\n\t\/\/ until we look somewhat stable.\n\tlog.Debugf(\"waiting until zigbeeHAgw writes %s\", checkFile)\n\tfor {\n\t\tif _, err := os.Stat(checkFile); err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}\n\tlog.Debugf(\"%s detected. start up continues...\", checkFile)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Define a type called Client that handles authentication and communication with the sound\n\/\/ API\npackage soundclient\n\nimport ( \"fmt\"\n \"net\/http\"\n \"io\/ioutil\"\n )\n\nconst apiBaseUrl string = \"https:\/\/api.soundcloud.com\"\n\ntype SoundCloud struct {\n ClientId string\n ClientSecret string\n}\n\n\/\/ Handle get requests to an endpoint\n\/\/ \nfunc Get(s SoundCloud, apiCall string, kargs string) {\n url := apiBaseUrl+\"\/\"+apiCall+\"\/\"+kargs+\"?client_id=\"+s.ClientId\n resp, err := http.Get(url)\n fmt.Println(\"[DEBUG] URL-> \", url)\n if (err != nil) {\n fmt.Println(\"[ERROR] \", err)\n } else {\n body, err := ioutil.ReadAll(resp.Body)\n if (err == nil) {\n fmt.Printf(\"%s\" ,body)\n }\n }\n defer resp.Body.Close()\n \n}\n\n\n\/\/ Return a soundcloud track \nfunc (s SoundCloud) Tracks(trackId string) {\n Get(s, \"tracks\", trackId)\n}\n\nfunc (s SoundCloud) Playlists(playlistsId string) {\n Get(s, \"playlists\", playlistsId)\n}\n\n\n\/\/ Return a soundcloud user\nfunc (s SoundCloud) Users(userId string) {\n Get(s, \"users\", userId)\n}\n\n\n\/\/ Handle POST requests\nfunc Post() {\n \n}\n\n\n\n\n \n\n<commit_msg>return all responses as jason object<commit_after>\/\/ Define a type called Client that handles authentication and communication with the sound\n\/\/ API\npackage soundclient\n\nimport ( \"fmt\"\n \"net\/http\"\n \"io\/ioutil\"\n \"github.com\/antonholmquist\/jason\"\n )\n\nconst apiBaseUrl string = \"https:\/\/api.soundcloud.com\"\n\ntype SoundCloud struct {\n ClientId string\n ClientSecret string\n}\n\n\/\/ Handle get requests to an endpoint\n\/\/ and return a *jason.Object containing data\nfunc Get(s SoundCloud, apiCall string, kargs string) *jason.Object {\n url := apiBaseUrl+\"\/\"+apiCall+\"\/\"+kargs+\"?client_id=\"+s.ClientId\n resp, err := http.Get(url)\n if (err != nil) {\n fmt.Println(\"[ERROR] \", err)\n } else {\n result, err := ioutil.ReadAll(resp.Body)\n if (err == nil) {\n v, _ := jason.NewObjectFromBytes(result)\n return v\n }\n }\n defer resp.Body.Close()\n return nil\n \n}\n\n\/\/ Return a soundcloud track \nfunc (s SoundCloud) Tracks(trackId string) *jason.Object {\n return Get(s, \"tracks\", trackId)\n}\n\nfunc (s SoundCloud) Playlists(playlistsId string) *jason.Object {\n return Get(s, \"playlists\", playlistsId)\n}\n\n\n\/\/ Return a soundcloud user\nfunc (s SoundCloud) Users(userId string) *jason.Object {\n return Get(s, \"users\", userId)\n}\n\n\n\/\/ Get Group members and contributed tracks\nfunc (s SoundCloud) Groups(groupId string) *jason.Object {\n return Get(s, \"groups\", groupId)\n}\n \n\/\/ Handle POST requests\nfunc Post() {\n \n}\n\n\n\n\n \n\n<|endoftext|>"} {"text":"<commit_before>package bot\n\nconst changeLogPrefix = \"\\n`->` \"\n\n\/\/ probably want to move this to the DB, but not bad to have it here\nvar changeLog = map[string]string{\n\t\"0.2\": changeLogPrefix + \"Included this command!\" +\n\t\tchangeLogPrefix + \"Updated `Rank` command to prevent removal of lowest role.\" +\n\t\tchangeLogPrefix + \"Added random drops for tickets\" +\n\t\tchangeLogPrefix + \"Fixed the cooldown so users wouldn't be spammed due to high luck stat\" +\n\t\tchangeLogPrefix + \"Added `Raffle` related commands... For rafflin'\" +\n\t\tchangeLogPrefix + \"For future reference, previous versions included help, team, rank, and NSFW commands as well as a welcome message to the server.\",\n\n\t\"0.2.1\": changeLogPrefix + \"Updated raffle art\/relic submissions to post all submissions on command instead of over time.\",\n}\n\nfunc commChange(pack *commPackage) {\n\tpack.session.ChannelMessageSend(pack.channel.ID, \"`Moebot update log` (ver \"+version+\"): \\n\"+changeLog[\"0.3\"])\n}\n<commit_msg>fixes typo with changelog<commit_after>package bot\n\nconst changeLogPrefix = \"\\n`->` \"\n\n\/\/ probably want to move this to the DB, but not bad to have it here\nvar changeLog = map[string]string{\n\t\"0.2\": changeLogPrefix + \"Included this command!\" +\n\t\tchangeLogPrefix + \"Updated `Rank` command to prevent removal of lowest role.\" +\n\t\tchangeLogPrefix + \"Added random drops for tickets\" +\n\t\tchangeLogPrefix + \"Fixed the cooldown so users wouldn't be spammed due to high luck stat\" +\n\t\tchangeLogPrefix + \"Added `Raffle` related commands... For rafflin'\" +\n\t\tchangeLogPrefix + \"For future reference, previous versions included help, team, rank, and NSFW commands as well as a welcome message to the server.\",\n\n\t\"0.2.1\": changeLogPrefix + \"Updated raffle art\/relic submissions to post all submissions on command instead of over time.\",\n}\n\nfunc commChange(pack *commPackage) {\n\tpack.session.ChannelMessageSend(pack.channel.ID, \"`Moebot update log` (ver \"+version+\"): \\n\"+changeLog[\"0.2.1\"])\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"time\"\n\n\t\"owl\/common\/types\"\n\n\t\"github.com\/Knetic\/govaluate\"\n)\n\nconst (\n\tMAX_METHOD = \"max\"\n\tMIN_METHOD = \"min\"\n\tRATIO_METHOD = \"ratio\"\n\tTOP_METHOD = \"top\"\n\tBOTTOM_METHOD = \"bottom\"\n\tLAST_METHOD = \"last\"\n\tNODATA_METHOD = \"nodata\"\n)\n\nfunc maxMethod(host_id string, cycle int, trigger *types.Trigger) (*types.TriggerResultSet, error) {\n\ttrigger_result_set := &types.TriggerResultSet{make([]*types.TriggerResult, 0), false}\n\n\tparams := NewQueryParams(host_id, fmt.Sprintf(\"%dm-ago\", cycle), \"\", trigger.Tags, \"sum\", trigger.Metric)\n\tresults, err := tsdbClient.Query(params)\n\tif err != nil {\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\tfor _, result := range results {\n\t\tif len(result.Dps) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvalues := make([]float64, 0)\n\t\tfor _, value := range result.Dps {\n\t\t\tvalues = append(values, value)\n\t\t}\n\n\t\tsort.Float64s(values)\n\t\tparameters := make(map[string]interface{}, 8)\n\t\tcurrent_threshold := values[len(values)-1]\n\t\tparameters[\"current_threshold\"] = current_threshold\n\t\tparameters[\"threshold\"] = trigger.Threshold\n\t\texpression := fmt.Sprintf(\"current_threshold %s threshold\", trigger.Symbol)\n\t\ttrigger_result, err := compute(parameters, expression)\n\t\tif err != nil {\n\t\t\tlg.Error(err.Error())\n\t\t\treturn trigger_result_set, err\n\t\t}\n\n\t\tif !trigger_result_set.Triggered && trigger_result {\n\t\t\ttrigger_result_set.Triggered = trigger_result\n\t\t}\n\n\t\ttrigger_result_set.TriggerResults = append(trigger_result_set.TriggerResults, types.NewTriggerResult(trigger.Index, result.Tags, result.AggregateTags, current_threshold, trigger_result))\n\t}\n\n\treturn trigger_result_set, nil\n}\n\nfunc minMethod(host_id string, cycle int, trigger *types.Trigger) (*types.TriggerResultSet, error) {\n\ttrigger_result_set := &types.TriggerResultSet{make([]*types.TriggerResult, 0), false}\n\n\tparams := NewQueryParams(host_id, fmt.Sprintf(\"%dm-ago\", cycle), \"\", trigger.Tags, \"sum\", trigger.Metric)\n\tresults, err := tsdbClient.Query(params)\n\tif err != nil {\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\tfor _, result := range results {\n\t\tif len(result.Dps) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvalues := make([]float64, 0)\n\t\tfor _, value := range result.Dps {\n\t\t\tvalues = append(values, value)\n\t\t}\n\n\t\tsort.Float64s(values)\n\t\tparameters := make(map[string]interface{}, 8)\n\t\tcurrent_threshold := values[0]\n\t\tparameters[\"current_threshold\"] = current_threshold\n\t\tparameters[\"threshold\"] = trigger.Threshold\n\t\texpression := fmt.Sprintf(\"current_threshold %s threshold\", trigger.Symbol)\n\t\ttrigger_result, err := compute(parameters, expression)\n\t\tif err != nil {\n\t\t\tlg.Error(err.Error())\n\t\t\treturn trigger_result_set, err\n\t\t}\n\n\t\tif !trigger_result_set.Triggered && trigger_result {\n\t\t\ttrigger_result_set.Triggered = trigger_result\n\t\t}\n\n\t\ttrigger_result_set.TriggerResults = append(trigger_result_set.TriggerResults, types.NewTriggerResult(trigger.Index, result.Tags, result.AggregateTags, current_threshold, trigger_result))\n\t}\n\n\treturn trigger_result_set, nil\n}\n\nfunc ratioMethod(host_id string, cycle int, trigger *types.Trigger) (*types.TriggerResultSet, error) {\n\tif trigger.Number == 0 {\n\t\terr := errors.New(\"number can not be 0\")\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\ttrigger_result_set := &types.TriggerResultSet{make([]*types.TriggerResult, 0), false}\n\tparams := NewQueryParams(host_id, fmt.Sprintf(\"%dm-ago\", cycle), \"\", trigger.Tags, \"sum\", trigger.Metric)\n\tresults, err := tsdbClient.Query(params)\n\tif err != nil {\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\tstart_time, end_time := unit(cycle, trigger.Number)\n\tparams = NewQueryParams(host_id, start_time, end_time, trigger.Tags, \"sum\", trigger.Metric)\n\tresults_ago, err := tsdbClient.Query(params)\n\tif err != nil {\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\tfor index, result := range results {\n\t\tif len(result.Dps) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tdata := avg(result.Dps)\n\t\tif index > len(results_ago)-1 {\n\t\t\tbreak\n\t\t}\n\t\tdata_ago := avg(results_ago[index].Dps)\n\n\t\tcurrent_threshold := ((data - data_ago) \/ data_ago) * 100\n\n\t\tparameters := make(map[string]interface{}, 8)\n\t\tparameters[\"current_threshold\"] = current_threshold\n\t\tparameters[\"threshold\"] = trigger.Threshold\n\t\texpression := fmt.Sprintf(\"current_threshold %s threshold\", trigger.Symbol)\n\t\ttrigger_result, err := compute(parameters, expression)\n\t\tif err != nil {\n\t\t\tlg.Error(err.Error())\n\t\t\treturn trigger_result_set, err\n\t\t}\n\n\t\tif !trigger_result_set.Triggered && trigger_result {\n\t\t\ttrigger_result_set.Triggered = trigger_result\n\t\t}\n\n\t\ttrigger_result_set.TriggerResults = append(trigger_result_set.TriggerResults, types.NewTriggerResult(trigger.Index, result.Tags, result.AggregateTags, current_threshold, trigger_result))\n\t}\n\n\treturn trigger_result_set, nil\n}\n\nfunc topMethod(host_id string, cycle int, trigger *types.Trigger) (*types.TriggerResultSet, error) {\n\tif trigger.Number == 0 {\n\t\terr := errors.New(\"number can not be 0\")\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\ttrigger_result_set := &types.TriggerResultSet{make([]*types.TriggerResult, 0), false}\n\tparams := NewQueryParams(host_id, fmt.Sprintf(\"%dm-ago\", cycle), \"\", trigger.Tags, \"sum\", trigger.Metric)\n\tresults, err := tsdbClient.Query(params)\n\tif err != nil {\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\tfor _, result := range results {\n\t\tif len(result.Dps) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvalues := make([]float64, 0)\n\t\tfor _, value := range result.Dps {\n\t\t\tvalues = append(values, value)\n\t\t}\n\t\tsort.Sort(sort.Reverse(sort.Float64Slice(values)))\n\n\t\ttrigger_result := true\n\t\tvar count int\n\t\tvar sum float64\n\t\tfor _, value := range values {\n\t\t\tcount += 1\n\t\t\tif count > trigger.Number {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsum += value\n\t\t\tparameters := make(map[string]interface{}, 8)\n\t\t\tparameters[\"current_threshold\"] = value\n\t\t\tparameters[\"threshold\"] = trigger.Threshold\n\t\t\texpression := fmt.Sprintf(\"current_threshold %s threshold\", trigger.Symbol)\n\t\t\tone_result, err := compute(parameters, expression)\n\t\t\tif err != nil {\n\t\t\t\tlg.Error(err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !one_result {\n\t\t\t\ttrigger_result = false\n\t\t\t}\n\t\t}\n\n\t\tif !trigger_result_set.Triggered && trigger_result {\n\t\t\ttrigger_result_set.Triggered = trigger_result\n\t\t}\n\n\t\ttrigger_result_set.TriggerResults = append(trigger_result_set.TriggerResults, types.NewTriggerResult(trigger.Index, result.Tags, result.AggregateTags, sum\/float64(trigger.Number), trigger_result))\n\t}\n\n\treturn trigger_result_set, nil\n}\n\nfunc bottomMethod(host_id string, cycle int, trigger *types.Trigger) (*types.TriggerResultSet, error) {\n\tif trigger.Number == 0 {\n\t\terr := errors.New(\"number can not be 0\")\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\ttrigger_result_set := &types.TriggerResultSet{make([]*types.TriggerResult, 0), false}\n\tparams := NewQueryParams(host_id, fmt.Sprintf(\"%dm-ago\", cycle), \"\", trigger.Tags, \"sum\", trigger.Metric)\n\tresults, err := tsdbClient.Query(params)\n\tif err != nil {\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\tfor _, result := range results {\n\t\tif len(result.Dps) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvalues := make([]float64, 0)\n\t\tfor _, value := range result.Dps {\n\t\t\tvalues = append(values, value)\n\t\t}\n\t\tsort.Float64s(values)\n\n\t\ttrigger_result := true\n\t\tvar count int\n\t\tvar sum float64\n\t\tfor _, value := range values {\n\t\t\tcount += 1\n\t\t\tif count > trigger.Number {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsum += value\n\t\t\tparameters := make(map[string]interface{}, 8)\n\t\t\tparameters[\"current_threshold\"] = value\n\t\t\tparameters[\"threshold\"] = trigger.Threshold\n\t\t\texpression := fmt.Sprintf(\"current_threshold %s threshold\", trigger.Symbol)\n\t\t\tone_result, err := compute(parameters, expression)\n\t\t\tif err != nil {\n\t\t\t\tlg.Error(err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !one_result {\n\t\t\t\ttrigger_result = false\n\t\t\t}\n\t\t}\n\n\t\tif !trigger_result_set.Triggered && trigger_result {\n\t\t\ttrigger_result_set.Triggered = trigger_result\n\t\t}\n\n\t\ttrigger_result_set.TriggerResults = append(trigger_result_set.TriggerResults, types.NewTriggerResult(trigger.Index, result.Tags, result.AggregateTags, sum\/float64(trigger.Number), trigger_result))\n\t}\n\n\treturn trigger_result_set, nil\n}\n\nfunc lastMethod(host_id string, cycle int, trigger *types.Trigger) (*types.TriggerResultSet, error) {\n\tif trigger.Number == 0 {\n\t\terr := errors.New(\"number can not be 0\")\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\ttrigger_result_set := &types.TriggerResultSet{make([]*types.TriggerResult, 0), false}\n\tparams := NewQueryParams(host_id, fmt.Sprintf(\"%dm-ago\", cycle), \"\", trigger.Tags, \"sum\", trigger.Metric)\n\tresults, err := tsdbClient.Query(params)\n\tif err != nil {\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\tfor _, result := range results {\n\t\tif len(result.Dps) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvalue_times := make([]string, 0)\n\t\tfor value_time, _ := range result.Dps {\n\t\t\tvalue_times = append(value_times, value_time)\n\t\t}\n\n\t\tsort.Strings(value_times)\n\n\t\tvalues := make([]float64, len(value_times))\n\t\tfor index, value_time := range value_times {\n\t\t\tvalues[len(value_times)-(index+1)] = result.Dps[value_time]\n\t\t}\n\n\t\ttrigger_result := true\n\t\tvar count int\n\t\tvar sum float64\n\t\tfor _, value := range values {\n\t\t\tcount += 1\n\t\t\tif count > trigger.Number {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsum += value\n\t\t\tparameters := make(map[string]interface{}, 8)\n\t\t\tparameters[\"current_threshold\"] = value\n\t\t\tparameters[\"threshold\"] = trigger.Threshold\n\t\t\texpression := fmt.Sprintf(\"current_threshold %s threshold\", trigger.Symbol)\n\t\t\tone_result, err := compute(parameters, expression)\n\t\t\tif err != nil {\n\t\t\t\tlg.Error(err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !one_result {\n\t\t\t\ttrigger_result = false\n\t\t\t}\n\t\t}\n\n\t\tif !trigger_result_set.Triggered && trigger_result {\n\t\t\ttrigger_result_set.Triggered = trigger_result\n\t\t}\n\n\t\ttrigger_result_set.TriggerResults = append(trigger_result_set.TriggerResults, types.NewTriggerResult(trigger.Index, result.Tags, result.AggregateTags, sum\/float64(trigger.Number), trigger_result))\n\t}\n\n\treturn trigger_result_set, nil\n}\n\nfunc nodataMethod(host_id string, cycle int, trigger *types.Trigger) (*types.TriggerResultSet, error) {\n\ttrigger_result_set := &types.TriggerResultSet{make([]*types.TriggerResult, 0), false}\n\n\tparams := NewQueryParams(host_id, fmt.Sprintf(\"%dm-ago\", cycle), \"\", trigger.Tags, \"sum\", trigger.Metric)\n\tresults, err := tsdbClient.Query(params)\n\tif err != nil {\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\tvar current_threshold float64 = 1\n\n\tif len(results) != 0 && len(results[0].Dps) != 0 {\n\t\tcurrent_threshold = 0\n\t}\n\n\tparameters := make(map[string]interface{}, 8)\n\tparameters[\"current_threshold\"] = current_threshold\n\tparameters[\"threshold\"] = trigger.Threshold\n\texpression := fmt.Sprintf(\"current_threshold %s threshold\", trigger.Symbol)\n\ttrigger_result, err := compute(parameters, expression)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttrigger_result_set.Triggered = trigger_result\n\n\ttrigger_result_set.TriggerResults = append(trigger_result_set.TriggerResults, types.NewTriggerResult(trigger.Index, make(map[string]string), make([]string, 0), current_threshold, trigger_result))\n\n\treturn trigger_result_set, nil\n}\n\nfunc compute(params map[string]interface{}, express string) (bool, error) {\n\texpression, err := govaluate.NewEvaluableExpression(express)\n\tif err != nil {\n\t\tlg.Error(\"Parse the expression error %s\", err.Error())\n\t\treturn false, err\n\t}\n\n\tresult, err := expression.Evaluate(params)\n\tif err != nil {\n\t\tlg.Error(\"Evaluate the expression error %s\", err.Error())\n\t\treturn false, err\n\t}\n\tvalue, ok := result.(bool)\n\tif !ok {\n\t\tlg.Error(\"result value is not bool type\")\n\t}\n\n\treturn value, nil\n}\n\nfunc unit(cycle, number int) (start_time, end_time string) {\n\tstart := time.Now().Add(-time.Duration(time.Duration(number) * 24 * time.Hour)).Add(-time.Duration(time.Duration(cycle) * time.Minute))\n\tend := time.Now().Add(-time.Duration(time.Duration(number) * 24 * time.Hour))\n\n\tstart_time = start.Format(\"2006\/01\/02 15:04:05\")\n\tend_time = end.Format(\"2006\/01\/02 15:04:05\")\n\n\treturn start_time, end_time\n}\n\nfunc avg(values map[string]float64) float64 {\n\tvar sum float64\n\tcount := 0\n\tfor _, value := range values {\n\t\tsum += value\n\t\tcount += 1\n\t}\n\n\treturn sum \/ float64(count)\n}\n<commit_msg>fix bug for restore alarm wrong current threhold<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"time\"\n\n\t\"owl\/common\/types\"\n\n\t\"github.com\/Knetic\/govaluate\"\n)\n\nconst (\n\tMAX_METHOD = \"max\"\n\tMIN_METHOD = \"min\"\n\tRATIO_METHOD = \"ratio\"\n\tTOP_METHOD = \"top\"\n\tBOTTOM_METHOD = \"bottom\"\n\tLAST_METHOD = \"last\"\n\tNODATA_METHOD = \"nodata\"\n)\n\nfunc maxMethod(host_id string, cycle int, trigger *types.Trigger) (*types.TriggerResultSet, error) {\n\ttrigger_result_set := &types.TriggerResultSet{make([]*types.TriggerResult, 0), false}\n\n\tparams := NewQueryParams(host_id, fmt.Sprintf(\"%dm-ago\", cycle), \"\", trigger.Tags, \"sum\", trigger.Metric)\n\tresults, err := tsdbClient.Query(params)\n\tif err != nil {\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\tfor _, result := range results {\n\t\tif len(result.Dps) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvalues := make([]float64, 0)\n\t\tfor _, value := range result.Dps {\n\t\t\tvalues = append(values, value)\n\t\t}\n\n\t\tsort.Float64s(values)\n\t\tparameters := make(map[string]interface{}, 8)\n\t\tcurrent_threshold := values[len(values)-1]\n\t\tparameters[\"current_threshold\"] = current_threshold\n\t\tparameters[\"threshold\"] = trigger.Threshold\n\t\texpression := fmt.Sprintf(\"current_threshold %s threshold\", trigger.Symbol)\n\t\ttrigger_result, err := compute(parameters, expression)\n\t\tif err != nil {\n\t\t\tlg.Error(err.Error())\n\t\t\treturn trigger_result_set, err\n\t\t}\n\n\t\tif !trigger_result_set.Triggered && trigger_result {\n\t\t\ttrigger_result_set.Triggered = trigger_result\n\t\t}\n\n\t\ttrigger_result_set.TriggerResults = append(trigger_result_set.TriggerResults, types.NewTriggerResult(trigger.Index, result.Tags, result.AggregateTags, current_threshold, trigger_result))\n\t}\n\n\treturn trigger_result_set, nil\n}\n\nfunc minMethod(host_id string, cycle int, trigger *types.Trigger) (*types.TriggerResultSet, error) {\n\ttrigger_result_set := &types.TriggerResultSet{make([]*types.TriggerResult, 0), false}\n\n\tparams := NewQueryParams(host_id, fmt.Sprintf(\"%dm-ago\", cycle), \"\", trigger.Tags, \"sum\", trigger.Metric)\n\tresults, err := tsdbClient.Query(params)\n\tif err != nil {\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\tfor _, result := range results {\n\t\tif len(result.Dps) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvalues := make([]float64, 0)\n\t\tfor _, value := range result.Dps {\n\t\t\tvalues = append(values, value)\n\t\t}\n\n\t\tsort.Float64s(values)\n\t\tparameters := make(map[string]interface{}, 8)\n\t\tcurrent_threshold := values[0]\n\t\tparameters[\"current_threshold\"] = current_threshold\n\t\tparameters[\"threshold\"] = trigger.Threshold\n\t\texpression := fmt.Sprintf(\"current_threshold %s threshold\", trigger.Symbol)\n\t\ttrigger_result, err := compute(parameters, expression)\n\t\tif err != nil {\n\t\t\tlg.Error(err.Error())\n\t\t\treturn trigger_result_set, err\n\t\t}\n\n\t\tif !trigger_result_set.Triggered && trigger_result {\n\t\t\ttrigger_result_set.Triggered = trigger_result\n\t\t}\n\n\t\ttrigger_result_set.TriggerResults = append(trigger_result_set.TriggerResults, types.NewTriggerResult(trigger.Index, result.Tags, result.AggregateTags, current_threshold, trigger_result))\n\t}\n\n\treturn trigger_result_set, nil\n}\n\nfunc ratioMethod(host_id string, cycle int, trigger *types.Trigger) (*types.TriggerResultSet, error) {\n\tif trigger.Number == 0 {\n\t\terr := errors.New(\"number can not be 0\")\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\ttrigger_result_set := &types.TriggerResultSet{make([]*types.TriggerResult, 0), false}\n\tparams := NewQueryParams(host_id, fmt.Sprintf(\"%dm-ago\", cycle), \"\", trigger.Tags, \"sum\", trigger.Metric)\n\tresults, err := tsdbClient.Query(params)\n\tif err != nil {\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\tstart_time, end_time := unit(cycle, trigger.Number)\n\tparams = NewQueryParams(host_id, start_time, end_time, trigger.Tags, \"sum\", trigger.Metric)\n\tresults_ago, err := tsdbClient.Query(params)\n\tif err != nil {\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\tfor index, result := range results {\n\t\tif len(result.Dps) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tdata := avg(result.Dps)\n\t\tif index > len(results_ago)-1 {\n\t\t\tbreak\n\t\t}\n\t\tdata_ago := avg(results_ago[index].Dps)\n\n\t\tcurrent_threshold := ((data - data_ago) \/ data_ago) * 100\n\n\t\tparameters := make(map[string]interface{}, 8)\n\t\tparameters[\"current_threshold\"] = current_threshold\n\t\tparameters[\"threshold\"] = trigger.Threshold\n\t\texpression := fmt.Sprintf(\"current_threshold %s threshold\", trigger.Symbol)\n\t\ttrigger_result, err := compute(parameters, expression)\n\t\tif err != nil {\n\t\t\tlg.Error(err.Error())\n\t\t\treturn trigger_result_set, err\n\t\t}\n\n\t\tif !trigger_result_set.Triggered && trigger_result {\n\t\t\ttrigger_result_set.Triggered = trigger_result\n\t\t}\n\n\t\ttrigger_result_set.TriggerResults = append(trigger_result_set.TriggerResults, types.NewTriggerResult(trigger.Index, result.Tags, result.AggregateTags, current_threshold, trigger_result))\n\t}\n\n\treturn trigger_result_set, nil\n}\n\nfunc topMethod(host_id string, cycle int, trigger *types.Trigger) (*types.TriggerResultSet, error) {\n\tif trigger.Number == 0 {\n\t\terr := errors.New(\"number can not be 0\")\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\ttrigger_result_set := &types.TriggerResultSet{make([]*types.TriggerResult, 0), false}\n\tparams := NewQueryParams(host_id, fmt.Sprintf(\"%dm-ago\", cycle), \"\", trigger.Tags, \"sum\", trigger.Metric)\n\tresults, err := tsdbClient.Query(params)\n\tif err != nil {\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\tfor _, result := range results {\n\t\tif len(result.Dps) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvalues := make([]float64, 0)\n\t\tfor _, value := range result.Dps {\n\t\t\tvalues = append(values, value)\n\t\t}\n\t\tsort.Sort(sort.Reverse(sort.Float64Slice(values)))\n\n\t\ttrigger_result := true\n\t\tvar count int\n\t\tvar sum float64\n\t\tvar current_threshold float64\n\t\tfor _, value := range values {\n\t\t\tcount += 1\n\t\t\tif count > trigger.Number {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsum += value\n\t\t\tparameters := make(map[string]interface{}, 8)\n\t\t\tparameters[\"current_threshold\"] = value\n\t\t\tparameters[\"threshold\"] = trigger.Threshold\n\t\t\texpression := fmt.Sprintf(\"current_threshold %s threshold\", trigger.Symbol)\n\t\t\tone_result, err := compute(parameters, expression)\n\t\t\tif err != nil {\n\t\t\t\tlg.Error(err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !one_result {\n\t\t\t\ttrigger_result = false\n\t\t\t\tcurrent_threshold = value\n\t\t\t}\n\t\t}\n\n\t\tif !trigger_result_set.Triggered && trigger_result {\n\t\t\ttrigger_result_set.Triggered = trigger_result\n\t\t\tcurrent_threshold = sum \/ float64(trigger.Number)\n\t\t}\n\n\t\ttrigger_result_set.TriggerResults = append(trigger_result_set.TriggerResults, types.NewTriggerResult(trigger.Index, result.Tags, result.AggregateTags, current_threshold, trigger_result))\n\t}\n\n\treturn trigger_result_set, nil\n}\n\nfunc bottomMethod(host_id string, cycle int, trigger *types.Trigger) (*types.TriggerResultSet, error) {\n\tif trigger.Number == 0 {\n\t\terr := errors.New(\"number can not be 0\")\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\ttrigger_result_set := &types.TriggerResultSet{make([]*types.TriggerResult, 0), false}\n\tparams := NewQueryParams(host_id, fmt.Sprintf(\"%dm-ago\", cycle), \"\", trigger.Tags, \"sum\", trigger.Metric)\n\tresults, err := tsdbClient.Query(params)\n\tif err != nil {\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\tfor _, result := range results {\n\t\tif len(result.Dps) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvalues := make([]float64, 0)\n\t\tfor _, value := range result.Dps {\n\t\t\tvalues = append(values, value)\n\t\t}\n\t\tsort.Float64s(values)\n\n\t\ttrigger_result := true\n\t\tvar count int\n\t\tvar sum float64\n\t\tvar current_threshold float64\n\t\tfor _, value := range values {\n\t\t\tcount += 1\n\t\t\tif count > trigger.Number {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsum += value\n\t\t\tparameters := make(map[string]interface{}, 8)\n\t\t\tparameters[\"current_threshold\"] = value\n\t\t\tparameters[\"threshold\"] = trigger.Threshold\n\t\t\texpression := fmt.Sprintf(\"current_threshold %s threshold\", trigger.Symbol)\n\t\t\tone_result, err := compute(parameters, expression)\n\t\t\tif err != nil {\n\t\t\t\tlg.Error(err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !one_result {\n\t\t\t\ttrigger_result = false\n\t\t\t\tcurrent_threshold = value\n\t\t\t}\n\t\t}\n\n\t\tif !trigger_result_set.Triggered && trigger_result {\n\t\t\ttrigger_result_set.Triggered = trigger_result\n\t\t\tcurrent_threshold = sum \/ float64(trigger.Number)\n\t\t}\n\n\t\ttrigger_result_set.TriggerResults = append(trigger_result_set.TriggerResults, types.NewTriggerResult(trigger.Index, result.Tags, result.AggregateTags, current_threshold, trigger_result))\n\t}\n\n\treturn trigger_result_set, nil\n}\n\nfunc lastMethod(host_id string, cycle int, trigger *types.Trigger) (*types.TriggerResultSet, error) {\n\tif trigger.Number == 0 {\n\t\terr := errors.New(\"number can not be 0\")\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\ttrigger_result_set := &types.TriggerResultSet{make([]*types.TriggerResult, 0), false}\n\tparams := NewQueryParams(host_id, fmt.Sprintf(\"%dm-ago\", cycle), \"\", trigger.Tags, \"sum\", trigger.Metric)\n\tresults, err := tsdbClient.Query(params)\n\tif err != nil {\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\tfor _, result := range results {\n\t\tif len(result.Dps) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvalue_times := make([]string, 0)\n\t\tfor value_time, _ := range result.Dps {\n\t\t\tvalue_times = append(value_times, value_time)\n\t\t}\n\n\t\tsort.Strings(value_times)\n\n\t\tvalues := make([]float64, len(value_times))\n\t\tfor index, value_time := range value_times {\n\t\t\tvalues[len(value_times)-(index+1)] = result.Dps[value_time]\n\t\t}\n\n\t\ttrigger_result := true\n\t\tvar count int\n\t\tvar sum float64\n\t\tvar current_threshold float64\n\t\tfor _, value := range values {\n\t\t\tcount += 1\n\t\t\tif count > trigger.Number {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsum += value\n\t\t\tparameters := make(map[string]interface{}, 8)\n\t\t\tparameters[\"current_threshold\"] = value\n\t\t\tparameters[\"threshold\"] = trigger.Threshold\n\t\t\texpression := fmt.Sprintf(\"current_threshold %s threshold\", trigger.Symbol)\n\t\t\tone_result, err := compute(parameters, expression)\n\t\t\tif err != nil {\n\t\t\t\tlg.Error(err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !one_result {\n\t\t\t\ttrigger_result = false\n\t\t\t\tcurrent_threshold = value\n\t\t\t}\n\t\t}\n\n\t\tif !trigger_result_set.Triggered && trigger_result {\n\t\t\ttrigger_result_set.Triggered = trigger_result\n\t\t\tcurrent_threshold = sum \/ float64(trigger.Number)\n\t\t}\n\n\t\ttrigger_result_set.TriggerResults = append(trigger_result_set.TriggerResults, types.NewTriggerResult(trigger.Index, result.Tags, result.AggregateTags, current_threshold, trigger_result))\n\t}\n\n\treturn trigger_result_set, nil\n}\n\nfunc nodataMethod(host_id string, cycle int, trigger *types.Trigger) (*types.TriggerResultSet, error) {\n\ttrigger_result_set := &types.TriggerResultSet{make([]*types.TriggerResult, 0), false}\n\n\tparams := NewQueryParams(host_id, fmt.Sprintf(\"%dm-ago\", cycle), \"\", trigger.Tags, \"sum\", trigger.Metric)\n\tresults, err := tsdbClient.Query(params)\n\tif err != nil {\n\t\tlg.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\tvar current_threshold float64 = 1\n\n\tif len(results) != 0 && len(results[0].Dps) != 0 {\n\t\tcurrent_threshold = 0\n\t}\n\n\tparameters := make(map[string]interface{}, 8)\n\tparameters[\"current_threshold\"] = current_threshold\n\tparameters[\"threshold\"] = trigger.Threshold\n\texpression := fmt.Sprintf(\"current_threshold %s threshold\", trigger.Symbol)\n\ttrigger_result, err := compute(parameters, expression)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttrigger_result_set.Triggered = trigger_result\n\n\ttrigger_result_set.TriggerResults = append(trigger_result_set.TriggerResults, types.NewTriggerResult(trigger.Index, make(map[string]string), make([]string, 0), current_threshold, trigger_result))\n\n\treturn trigger_result_set, nil\n}\n\nfunc compute(params map[string]interface{}, express string) (bool, error) {\n\texpression, err := govaluate.NewEvaluableExpression(express)\n\tif err != nil {\n\t\tlg.Error(\"Parse the expression error %s\", err.Error())\n\t\treturn false, err\n\t}\n\n\tresult, err := expression.Evaluate(params)\n\tif err != nil {\n\t\tlg.Error(\"Evaluate the expression error %s\", err.Error())\n\t\treturn false, err\n\t}\n\tvalue, ok := result.(bool)\n\tif !ok {\n\t\tlg.Error(\"result value is not bool type\")\n\t}\n\n\treturn value, nil\n}\n\nfunc unit(cycle, number int) (start_time, end_time string) {\n\tstart := time.Now().Add(-time.Duration(time.Duration(number) * 24 * time.Hour)).Add(-time.Duration(time.Duration(cycle) * time.Minute))\n\tend := time.Now().Add(-time.Duration(time.Duration(number) * 24 * time.Hour))\n\n\tstart_time = start.Format(\"2006\/01\/02 15:04:05\")\n\tend_time = end.Format(\"2006\/01\/02 15:04:05\")\n\n\treturn start_time, end_time\n}\n\nfunc avg(values map[string]float64) float64 {\n\tvar sum float64\n\tcount := 0\n\tfor _, value := range values {\n\t\tsum += value\n\t\tcount += 1\n\t}\n\n\treturn sum \/ float64(count)\n}\n<|endoftext|>"} {"text":"<commit_before>package osutil\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\tfp \"path\/filepath\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/hashicorp\/errwrap\"\n\t\"github.com\/docker\/docker\/pkg\/mount\"\n\n\t\"github.com\/yuuki1\/droot\/log\"\n)\n\nfunc ExistsFile(file string) bool {\n\tf, err := os.Stat(file)\n\treturn err == nil && !f.IsDir()\n}\n\nfunc ExistsDir(dir string) bool {\n\tif f, err := os.Stat(dir); os.IsNotExist(err) || !f.IsDir() {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc IsDirEmpty(dir string) (bool, error) {\n\tf, err := os.Open(dir)\n\tif err != nil {\n\t\treturn false, errwrap.Wrapf(fmt.Sprintf(\"Failed to open %s: {{err}}\", dir), err)\n\t}\n\tdefer f.Close()\n\n\t_, err = f.Readdirnames(1)\n\tif err == io.EOF {\n\t\treturn true, nil\n\t}\n\treturn false, err \/\/ Either not empty or error, suits both cases\n}\n\nfunc RunCmd(name string, arg ...string) error {\n\tlog.Debug(\"runcmd: \", name, arg)\n\tout, err := exec.Command(name, arg...).CombinedOutput()\n\tif len(out) > 0 {\n\t\tlog.Debug(string(out))\n\t}\n\tif err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to exec %s %s: {{err}}\", name, arg), err)\n\t}\n\treturn nil\n}\n\nfunc Cp(from, to string) error {\n\tif err := RunCmd(\"cp\", \"-p\", from, to); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc GetMountsByRoot(rootDir string) ([]*mount.Info, error) {\n\tmounts, err := mount.GetMounts()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttargets := make([]*mount.Info, 0)\n\tfor _, m := range mounts {\n\t\tif strings.HasPrefix(m.Mountpoint, fp.Clean(rootDir)) {\n\t\t\ttargets = append(targets, m)\n\t\t}\n\t}\n\n\treturn targets, nil\n}\n\nfunc UmountRoot(rootDir string) error {\n\tmounts, err := GetMountsByRoot(rootDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, m := range mounts {\n\t\tif err := mount.Unmount(m.Mountpoint); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Debug(\"umount:\", m.Mountpoint)\n\t}\n\n\treturn nil\n}\n\nfunc BindMount(src, dest string) error {\n\treturn mount.Mount(src, dest, \"bind\", \"\")\n}\n\nfunc RObindMount(src, dest string) error {\n\treturn mount.Mount(src, dest, \"bind\", \"remount,ro,bind\")\n}\n\n\/\/ Mknod unless path does not exists.\nfunc Mknod(path string, mode uint32, dev int) error {\n\tif ExistsFile(path) {\n\t\treturn nil\n\t}\n\tif err := syscall.Mknod(path, mode, dev); err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to mknod %s: {{err}}\", path), err)\n\t}\n\treturn nil\n}\n\n\/\/ Symlink, but ignore already exists file.\nfunc Symlink(oldname, newname string) error {\n\tif err := os.Symlink(oldname, newname); err != nil {\n\t\t\/\/ Ignore already created symlink\n\t\tif _, ok := err.(*os.LinkError); !ok {\n\t\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to symlink %s %s: {{err}}\", oldname, newname), err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc Execv(cmd string, args []string, env []string) error {\n\tname, err := exec.LookPath(cmd)\n\tif err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Not found %s: {{err}}\", cmd), err)\n\t}\n\n\tlog.Debug(\"exec: \", name, args)\n\n\treturn syscall.Exec(name, args, env)\n}\n<commit_msg>Ignore Readdirnames err<commit_after>package osutil\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\tfp \"path\/filepath\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/hashicorp\/errwrap\"\n\t\"github.com\/docker\/docker\/pkg\/mount\"\n\n\t\"github.com\/yuuki1\/droot\/log\"\n)\n\nfunc ExistsFile(file string) bool {\n\tf, err := os.Stat(file)\n\treturn err == nil && !f.IsDir()\n}\n\nfunc ExistsDir(dir string) bool {\n\tif f, err := os.Stat(dir); os.IsNotExist(err) || !f.IsDir() {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc IsDirEmpty(dir string) (bool, error) {\n\tf, err := os.Open(dir)\n\tif err != nil {\n\t\treturn false, errwrap.Wrapf(fmt.Sprintf(\"Failed to open %s: {{err}}\", dir), err)\n\t}\n\tdefer f.Close()\n\n\t_, err = f.Readdirnames(1)\n\tif err == io.EOF {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}\n\nfunc RunCmd(name string, arg ...string) error {\n\tlog.Debug(\"runcmd: \", name, arg)\n\tout, err := exec.Command(name, arg...).CombinedOutput()\n\tif len(out) > 0 {\n\t\tlog.Debug(string(out))\n\t}\n\tif err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to exec %s %s: {{err}}\", name, arg), err)\n\t}\n\treturn nil\n}\n\nfunc Cp(from, to string) error {\n\tif err := RunCmd(\"cp\", \"-p\", from, to); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc GetMountsByRoot(rootDir string) ([]*mount.Info, error) {\n\tmounts, err := mount.GetMounts()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttargets := make([]*mount.Info, 0)\n\tfor _, m := range mounts {\n\t\tif strings.HasPrefix(m.Mountpoint, fp.Clean(rootDir)) {\n\t\t\ttargets = append(targets, m)\n\t\t}\n\t}\n\n\treturn targets, nil\n}\n\nfunc UmountRoot(rootDir string) error {\n\tmounts, err := GetMountsByRoot(rootDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, m := range mounts {\n\t\tif err := mount.Unmount(m.Mountpoint); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Debug(\"umount:\", m.Mountpoint)\n\t}\n\n\treturn nil\n}\n\nfunc BindMount(src, dest string) error {\n\treturn mount.Mount(src, dest, \"bind\", \"\")\n}\n\nfunc RObindMount(src, dest string) error {\n\treturn mount.Mount(src, dest, \"bind\", \"remount,ro,bind\")\n}\n\n\/\/ Mknod unless path does not exists.\nfunc Mknod(path string, mode uint32, dev int) error {\n\tif ExistsFile(path) {\n\t\treturn nil\n\t}\n\tif err := syscall.Mknod(path, mode, dev); err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to mknod %s: {{err}}\", path), err)\n\t}\n\treturn nil\n}\n\n\/\/ Symlink, but ignore already exists file.\nfunc Symlink(oldname, newname string) error {\n\tif err := os.Symlink(oldname, newname); err != nil {\n\t\t\/\/ Ignore already created symlink\n\t\tif _, ok := err.(*os.LinkError); !ok {\n\t\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to symlink %s %s: {{err}}\", oldname, newname), err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc Execv(cmd string, args []string, env []string) error {\n\tname, err := exec.LookPath(cmd)\n\tif err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Not found %s: {{err}}\", cmd), err)\n\t}\n\n\tlog.Debug(\"exec: \", name, args)\n\n\treturn syscall.Exec(name, args, env)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"srcd.works\/go-git.v4\"\n\t\"srcd.works\/go-git.v4\/plumbing\"\n\t\"srcd.works\/go-git.v4\/plumbing\/object\"\n\t\"srcd.works\/go-git.v4\/storage\/memory\"\n\n\t\"bytes\"\n\n\t\"regexp\"\n\n\t\"github.com\/BurntSushi\/toml\"\n)\n\ntype gitConfig struct {\n\turi string\n}\ntype salesforceConfig struct {\n\tusername string\n\tpassword string\n\tendpoint string\n\tpackagePath string\n\tapiVersion string\n}\n\ntype Downloader interface {\n\tDownload() ([]*File, error)\n}\n\ntype MetaPackageFile struct {\n\tVersion float64 `toml:\"version\"`\n\tTypes []*Type `toml:\"types\"`\n}\n\ntype Type struct {\n\tName string `toml:\"name\"`\n\tMembers []string `toml:\"members\"`\n}\n\ntype SalesforceDownloader struct {\n\tconfig *salesforceConfig\n\tclient *ForceClient\n\tlogger Logger\n}\n\nfunc NewSalesforceDownloader(logger Logger, config *salesforceConfig) (*SalesforceDownloader, error) {\n\td := &SalesforceDownloader{\n\t\tlogger: logger,\n\t\tconfig: config,\n\t}\n\terr := d.init()\n\treturn d, err\n}\n\nfunc (i *SalesforceDownloader) init() (err error) {\n\tif i.config.username == \"\" {\n\t\treturn errors.New(\"Username is required\")\n\t}\n\tif i.config.password == \"\" {\n\t\treturn errors.New(\"Password is required\")\n\t}\n\n\terr = i.setClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (i *SalesforceDownloader) setClient() error {\n\ti.client = NewForceClient(i.config.endpoint, i.config.apiVersion)\n\terr := i.client.Login(i.config.username, i.config.password)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (i *SalesforceDownloader) Download() ([]*File, error) {\n\tbuf, err := ioutil.ReadFile(i.config.packagePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpackages := &MetaPackageFile{}\n\terr = toml.Unmarshal(buf, packages)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ti.logger.Info(\"Start Retrieve Request...\")\n\tr, err := i.client.Retrieve(createRetrieveRequest(packages))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor {\n\t\ttime.Sleep(2 * time.Second)\n\t\ti.logger.Info(\"Check Retrieve Status...\")\n\t\tret_res, err := i.client.CheckRetrieveStatus(r.Result.Id)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif ret_res.Result.Done {\n\t\t\tzb := make([]byte, len(ret_res.Result.ZipFile))\n\t\t\t_, err = base64.StdEncoding.Decode(zb, ret_res.Result.ZipFile)\n\t\t\treturn []*File{&File{Body: zb}}, err\n\t\t}\n\t}\n\treturn nil, nil \/\/ Todo: error handling\n}\n\ntype GitDownloader struct {\n\tlogger Logger\n\tconfig *gitConfig\n}\n\nfunc NewGitDownloader(logger Logger, config *gitConfig) (*GitDownloader, error) {\n\treturn &GitDownloader{\n\t\tlogger: logger,\n\t\tconfig: config,\n\t}, nil\n}\n\nfunc (d *GitDownloader) Download() ([]*File, error) {\n\turi, _, _, branch := extractInstallParameter(d.config.uri)\n\td.logger.Infof(\"Clone repository from %s (branch: %s)\", uri, branch)\n\n\tr, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{\n\t\tReferenceName: plumbing.ReferenceName(fmt.Sprintf(\"refs\/heads\/%s\", branch)),\n\t\tSingleBranch: true,\n\t\tURL: uri,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tref, _ := r.Head()\n\tcommit, _ := r.Commit(ref.Hash())\n\n\tgfiles, err := commit.Files()\n\tfiles := make([]*File, 0)\n\terr = gfiles.ForEach(func(f *object.File) error {\n\t\treader, err := f.Reader()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tb := new(bytes.Buffer)\n\t\tif _, err := b.ReadFrom(reader); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfiles = append(files, &File{Name: f.Name, Body: b.Bytes()})\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn files, nil\n}\n\nfunc dispatchDownloader(logger Logger, uri string) (Downloader, error) {\n\tr := regexp.MustCompile(`^https:\/\/([^\/]+?)\/([^\/]+?)\/([^\/@]+?)(\/([^@]+))?(\\?([^\/]+))?$`)\n\tif r.MatchString(uri) {\n\t\treturn NewGitDownloader(logger, &gitConfig{uri: uri})\n\t}\n\tr = regexp.MustCompile(`^sf:\/\/([^\/]+?):([^\/]+)@([^\/]+?)\\?path=(.+)$`)\n\tfmt.Println(uri)\n\tif r.MatchString(uri) {\n\t\tgroup := r.FindAllStringSubmatch(uri, -1)\n\t\treturn NewSalesforceDownloader(logger, &salesforceConfig{\n\t\t\tusername: group[0][1],\n\t\t\tpassword: group[0][2],\n\t\t\tendpoint: group[0][3],\n\t\t\tpackagePath: group[0][4],\n\t\t\tapiVersion: \"38.0\",\n\t\t})\n\t}\n\treturn nil, errors.New(\"Invalid downloader\")\n}\n<commit_msg>Mod regexp<commit_after>package main\n\nimport (\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"srcd.works\/go-git.v4\"\n\t\"srcd.works\/go-git.v4\/plumbing\"\n\t\"srcd.works\/go-git.v4\/plumbing\/object\"\n\t\"srcd.works\/go-git.v4\/storage\/memory\"\n\n\t\"bytes\"\n\n\t\"regexp\"\n\n\t\"github.com\/BurntSushi\/toml\"\n)\n\ntype gitConfig struct {\n\turi string\n}\ntype salesforceConfig struct {\n\tusername string\n\tpassword string\n\tendpoint string\n\tpackagePath string\n\tapiVersion string\n}\n\ntype Downloader interface {\n\tDownload() ([]*File, error)\n}\n\ntype MetaPackageFile struct {\n\tVersion float64 `toml:\"version\"`\n\tTypes []*Type `toml:\"types\"`\n}\n\ntype Type struct {\n\tName string `toml:\"name\"`\n\tMembers []string `toml:\"members\"`\n}\n\ntype SalesforceDownloader struct {\n\tconfig *salesforceConfig\n\tclient *ForceClient\n\tlogger Logger\n}\n\nfunc NewSalesforceDownloader(logger Logger, config *salesforceConfig) (*SalesforceDownloader, error) {\n\td := &SalesforceDownloader{\n\t\tlogger: logger,\n\t\tconfig: config,\n\t}\n\terr := d.init()\n\treturn d, err\n}\n\nfunc (i *SalesforceDownloader) init() (err error) {\n\tif i.config.username == \"\" {\n\t\treturn errors.New(\"Username is required\")\n\t}\n\tif i.config.password == \"\" {\n\t\treturn errors.New(\"Password is required\")\n\t}\n\n\terr = i.setClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (i *SalesforceDownloader) setClient() error {\n\ti.client = NewForceClient(i.config.endpoint, i.config.apiVersion)\n\terr := i.client.Login(i.config.username, i.config.password)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (i *SalesforceDownloader) Download() ([]*File, error) {\n\tbuf, err := ioutil.ReadFile(i.config.packagePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpackages := &MetaPackageFile{}\n\terr = toml.Unmarshal(buf, packages)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ti.logger.Info(\"Start Retrieve Request...\")\n\tr, err := i.client.Retrieve(createRetrieveRequest(packages))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor {\n\t\ttime.Sleep(2 * time.Second)\n\t\ti.logger.Info(\"Check Retrieve Status...\")\n\t\tret_res, err := i.client.CheckRetrieveStatus(r.Result.Id)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif ret_res.Result.Done {\n\t\t\tzb := make([]byte, len(ret_res.Result.ZipFile))\n\t\t\t_, err = base64.StdEncoding.Decode(zb, ret_res.Result.ZipFile)\n\t\t\treturn []*File{&File{Body: zb}}, err\n\t\t}\n\t}\n\treturn nil, nil \/\/ Todo: error handling\n}\n\ntype GitDownloader struct {\n\tlogger Logger\n\tconfig *gitConfig\n}\n\nfunc NewGitDownloader(logger Logger, config *gitConfig) (*GitDownloader, error) {\n\treturn &GitDownloader{\n\t\tlogger: logger,\n\t\tconfig: config,\n\t}, nil\n}\n\nfunc (d *GitDownloader) Download() ([]*File, error) {\n\turi, _, _, branch := extractInstallParameter(d.config.uri)\n\td.logger.Infof(\"Clone repository from %s (branch: %s)\", uri, branch)\n\n\tr, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{\n\t\tReferenceName: plumbing.ReferenceName(fmt.Sprintf(\"refs\/heads\/%s\", branch)),\n\t\tSingleBranch: true,\n\t\tURL: uri,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tref, _ := r.Head()\n\tcommit, _ := r.Commit(ref.Hash())\n\n\tgfiles, err := commit.Files()\n\tfiles := make([]*File, 0)\n\terr = gfiles.ForEach(func(f *object.File) error {\n\t\treader, err := f.Reader()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tb := new(bytes.Buffer)\n\t\tif _, err := b.ReadFrom(reader); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfiles = append(files, &File{Name: f.Name, Body: b.Bytes()})\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn files, nil\n}\n\nfunc dispatchDownloader(logger Logger, uri string) (Downloader, error) {\n\tr := regexp.MustCompile(`^https:\/\/([^\/]+?)\/([^\/]+?)\/([^\/@]+?)(\/([^@]+))?(\\?([^\/]+))?$`)\n\tif r.MatchString(uri) {\n\t\treturn NewGitDownloader(logger, &gitConfig{uri: uri})\n\t}\n\tr = regexp.MustCompile(`^sf:\/\/([^\/]*?):([^\/]*)@([^\/]+?)\\?path=(.+)$`)\n\tfmt.Println(uri)\n\tif r.MatchString(uri) {\n\t\tgroup := r.FindAllStringSubmatch(uri, -1)\n\t\treturn NewSalesforceDownloader(logger, &salesforceConfig{\n\t\t\tusername: group[0][1],\n\t\t\tpassword: group[0][2],\n\t\t\tendpoint: group[0][3],\n\t\t\tpackagePath: group[0][4],\n\t\t\tapiVersion: \"38.0\",\n\t\t})\n\t}\n\treturn nil, errors.New(\"Invalid downloader\")\n}\n<|endoftext|>"} {"text":"<commit_before>package packet\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/yosssi\/gmq\/mqtt\"\n)\n\nfunc TestPUBLISH_setFixedHeader(t *testing.T) {\n\tp := &PUBLISH{\n\t\tDUP: true,\n\t\tretain: true,\n\t}\n\n\tp.variableHeader = []byte{0x00}\n\n\tp.payload = []byte{0x00, 0x00}\n\n\tp.setFixedHeader()\n\n\twant := []byte{0x39, 0x03}\n\n\tif len(p.fixedHeader) != len(want) || p.fixedHeader[0] != want[0] || p.fixedHeader[1] != want[1] {\n\t\tt.Errorf(\"p.fixedHeader => %v, want => %v\", p.fixedHeader, want)\n\t}\n}\n\nfunc TestPUBLISH_setVariableHeader(t *testing.T) {\n\tp := &PUBLISH{\n\t\tQoS: mqtt.QoS1,\n\t\tTopicName: []byte(\"topicName\"),\n\t\tPacketID: 1,\n\t}\n\n\tp.setVariableHeader()\n\n\twant := []byte{0x00, 0x09, 0x74, 0x6F, 0x70, 0x69, 0x63, 0x4E, 0x061, 0x6D, 0x65, 0x00, 0x01}\n\n\tif len(p.variableHeader) != len(want) {\n\t\tt.Errorf(\"p.variableHeader => %v, want => %v\", p.variableHeader, want)\n\t\treturn\n\t}\n\n\tfor i, b := range p.variableHeader {\n\t\tif b != want[i] {\n\t\t\tt.Errorf(\"p.variableHeader => %v, want => %v\", p.variableHeader, want)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc TestPUBLISH_setPayload(t *testing.T) {\n\tp := &PUBLISH{\n\t\tMessage: []byte{0x00, 0x01},\n\t}\n\n\tp.setPayload()\n\n\tif len(p.payload) != len(p.Message) || p.payload[0] != p.Message[0] || p.payload[1] != p.Message[1] {\n\t\tt.Errorf(\"p.payload => %v, want => %v\", p.payload, p.Message)\n\t}\n}\n\nfunc TestNewPUBLISH_optsNil(t *testing.T) {\n\tif _, err := NewPUBLISH(nil); err != nil {\n\t\tnilErrorExpected(t, err)\n\t}\n}\n\nfunc TestNewPUBLISH_validateErr(t *testing.T) {\n\t_, err := NewPUBLISH(&PUBLISHOptions{\n\t\tQoS: 0x03,\n\t})\n\n\tif err != ErrInvalidQoS {\n\t\tinvalidError(t, err, ErrInvalidQoS)\n\t}\n}\n\nfunc Test_validatePUBLISHBytes_fixedHeaderErrInvalidFixedHeaderLen(t *testing.T) {\n\tif err := validatePUBLISHBytes(nil, nil); err != ErrInvalidFixedHeaderLen {\n\t\tinvalidError(t, err, ErrInvalidFixedHeaderLen)\n\t}\n}\n\nfunc Test_validatePUBLISHBytes_ErrInvalidFixedHeaderLen(t *testing.T) {\n\tif err := validatePUBLISHBytes([]byte{TypePUBLISH << 4}, nil); err != ErrInvalidFixedHeaderLen {\n\t\tinvalidError(t, err, ErrInvalidFixedHeaderLen)\n\t}\n}\n\nfunc Test_validatePUBLISHBytes_ErrInvalidPacketType(t *testing.T) {\n\tif err := validatePUBLISHBytes([]byte{TypeCONNECT << 4, 0x00}, nil); err != ErrInvalidPacketType {\n\t\tinvalidError(t, err, ErrInvalidPacketType)\n\t}\n}\n\nfunc Test_validatePUBLISHBytes_ErrInvalidQoS(t *testing.T) {\n\tif err := validatePUBLISHBytes([]byte{TypePUBLISH<<4 | 0x06, 0x00}, nil); err != ErrInvalidQoS {\n\t\tinvalidError(t, err, ErrInvalidQoS)\n\t}\n}\n\nfunc Test_validatePUBLISHBytes_ErrInvalidRemainingLen(t *testing.T) {\n\tif err := validatePUBLISHBytes([]byte{TypePUBLISH << 4, 0x00}, nil); err != ErrInvalidRemainingLen {\n\t\tinvalidError(t, err, ErrInvalidRemainingLen)\n\t}\n}\n\nfunc Test_validatePUBLISHBytes_ErrInvalidRemainingLength(t *testing.T) {\n\tif err := validatePUBLISHBytes([]byte{TypePUBLISH<<4 | 0x02, 0x02}, []byte{0x00, 0x00}); err != ErrInvalidRemainingLength {\n\t\tinvalidError(t, err, ErrInvalidRemainingLength)\n\t}\n}\n\nfunc Test_validatePUBLISHBytes_ErrInvalidPacketID(t *testing.T) {\n\tif err := validatePUBLISHBytes([]byte{TypePUBLISH<<4 | 0x02, 0x07}, []byte{0x00, 0x03, 0x61, 0x2F, 0x62, 0x00, 0x00}); err != ErrInvalidPacketID {\n\t\tinvalidError(t, err, ErrInvalidPacketID)\n\t}\n}\n<commit_msg>Update mqtt\/packet\/publish_test.go<commit_after>package packet\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/yosssi\/gmq\/mqtt\"\n)\n\nfunc TestPUBLISH_setFixedHeader(t *testing.T) {\n\tp := &PUBLISH{\n\t\tDUP: true,\n\t\tretain: true,\n\t}\n\n\tp.variableHeader = []byte{0x00}\n\n\tp.payload = []byte{0x00, 0x00}\n\n\tp.setFixedHeader()\n\n\twant := []byte{0x39, 0x03}\n\n\tif len(p.fixedHeader) != len(want) || p.fixedHeader[0] != want[0] || p.fixedHeader[1] != want[1] {\n\t\tt.Errorf(\"p.fixedHeader => %v, want => %v\", p.fixedHeader, want)\n\t}\n}\n\nfunc TestPUBLISH_setVariableHeader(t *testing.T) {\n\tp := &PUBLISH{\n\t\tQoS: mqtt.QoS1,\n\t\tTopicName: []byte(\"topicName\"),\n\t\tPacketID: 1,\n\t}\n\n\tp.setVariableHeader()\n\n\twant := []byte{0x00, 0x09, 0x74, 0x6F, 0x70, 0x69, 0x63, 0x4E, 0x061, 0x6D, 0x65, 0x00, 0x01}\n\n\tif len(p.variableHeader) != len(want) {\n\t\tt.Errorf(\"p.variableHeader => %v, want => %v\", p.variableHeader, want)\n\t\treturn\n\t}\n\n\tfor i, b := range p.variableHeader {\n\t\tif b != want[i] {\n\t\t\tt.Errorf(\"p.variableHeader => %v, want => %v\", p.variableHeader, want)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc TestPUBLISH_setPayload(t *testing.T) {\n\tp := &PUBLISH{\n\t\tMessage: []byte{0x00, 0x01},\n\t}\n\n\tp.setPayload()\n\n\tif len(p.payload) != len(p.Message) || p.payload[0] != p.Message[0] || p.payload[1] != p.Message[1] {\n\t\tt.Errorf(\"p.payload => %v, want => %v\", p.payload, p.Message)\n\t}\n}\n\nfunc TestNewPUBLISH_optsNil(t *testing.T) {\n\tif _, err := NewPUBLISH(nil); err != nil {\n\t\tnilErrorExpected(t, err)\n\t}\n}\n\nfunc TestNewPUBLISH_validateErr(t *testing.T) {\n\t_, err := NewPUBLISH(&PUBLISHOptions{\n\t\tQoS: 0x03,\n\t})\n\n\tif err != ErrInvalidQoS {\n\t\tinvalidError(t, err, ErrInvalidQoS)\n\t}\n}\n\nfunc TestNewPUBLISHFromBytes_ErrInvalidFixedHeaderLen(t *testing.T) {\n\tif _, err := NewPUBLISHFromBytes(nil, nil); err != ErrInvalidFixedHeaderLen {\n\t\tinvalidError(t, err, ErrInvalidFixedHeaderLen)\n\t}\n}\n\nfunc TestNewPUBLISHFromBytes(t *testing.T) {\n\tif _, err := NewPUBLISHFromBytes([]byte{TypePUBLISH<<4 | 0x02, 0x07}, []byte{0x00, 0x03, 0x61, 0x2F, 0x62, 0x00, 0x01}); err != nil {\n\t\tnilErrorExpected(t, err)\n\t}\n}\n\nfunc Test_validatePUBLISHBytes_fixedHeaderErrInvalidFixedHeaderLen(t *testing.T) {\n\tif err := validatePUBLISHBytes(nil, nil); err != ErrInvalidFixedHeaderLen {\n\t\tinvalidError(t, err, ErrInvalidFixedHeaderLen)\n\t}\n}\n\nfunc Test_validatePUBLISHBytes_ErrInvalidFixedHeaderLen(t *testing.T) {\n\tif err := validatePUBLISHBytes([]byte{TypePUBLISH << 4}, nil); err != ErrInvalidFixedHeaderLen {\n\t\tinvalidError(t, err, ErrInvalidFixedHeaderLen)\n\t}\n}\n\nfunc Test_validatePUBLISHBytes_ErrInvalidPacketType(t *testing.T) {\n\tif err := validatePUBLISHBytes([]byte{TypeCONNECT << 4, 0x00}, nil); err != ErrInvalidPacketType {\n\t\tinvalidError(t, err, ErrInvalidPacketType)\n\t}\n}\n\nfunc Test_validatePUBLISHBytes_ErrInvalidQoS(t *testing.T) {\n\tif err := validatePUBLISHBytes([]byte{TypePUBLISH<<4 | 0x06, 0x00}, nil); err != ErrInvalidQoS {\n\t\tinvalidError(t, err, ErrInvalidQoS)\n\t}\n}\n\nfunc Test_validatePUBLISHBytes_ErrInvalidRemainingLen(t *testing.T) {\n\tif err := validatePUBLISHBytes([]byte{TypePUBLISH << 4, 0x00}, nil); err != ErrInvalidRemainingLen {\n\t\tinvalidError(t, err, ErrInvalidRemainingLen)\n\t}\n}\n\nfunc Test_validatePUBLISHBytes_ErrInvalidRemainingLength(t *testing.T) {\n\tif err := validatePUBLISHBytes([]byte{TypePUBLISH<<4 | 0x02, 0x02}, []byte{0x00, 0x00}); err != ErrInvalidRemainingLength {\n\t\tinvalidError(t, err, ErrInvalidRemainingLength)\n\t}\n}\n\nfunc Test_validatePUBLISHBytes_ErrInvalidPacketID(t *testing.T) {\n\tif err := validatePUBLISHBytes([]byte{TypePUBLISH<<4 | 0x02, 0x07}, []byte{0x00, 0x03, 0x61, 0x2F, 0x62, 0x00, 0x00}); err != ErrInvalidPacketID {\n\t\tinvalidError(t, err, ErrInvalidPacketID)\n\t}\n}\n\nfunc Test_validatePUBLISHBytes(t *testing.T) {\n\tif err := validatePUBLISHBytes([]byte{TypePUBLISH<<4 | 0x02, 0x07}, []byte{0x00, 0x03, 0x61, 0x2F, 0x62, 0x00, 0x01}); err != nil {\n\t\tnilErrorExpected(t, err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage container\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/google\/cadvisor\/info\"\n\t\"github.com\/google\/cadvisor\/sampling\"\n)\n\ntype percentilesContainerHandlerWrapper struct {\n\thandler ContainerHandler\n\tcontainerPercentiles *info.ContainerStatsPercentiles\n\tprevStats *info.ContainerStats\n\tnumStats uint64\n\tsampler sampling.Sampler\n\tlock sync.Mutex\n}\n\nfunc (self *percentilesContainerHandlerWrapper) GetSpec() (*info.ContainerSpec, error) {\n\treturn self.handler.GetSpec()\n}\n\nfunc (self *percentilesContainerHandlerWrapper) updatePrevStats(stats *info.ContainerStats) {\n\tif stats == nil || stats.Cpu == nil || stats.Memory == nil {\n\t\t\/\/ discard incomplete stats\n\t\tself.prevStats = nil\n\t\treturn\n\t}\n\tif self.prevStats == nil {\n\t\tself.prevStats = &info.ContainerStats{\n\t\t\tCpu: &info.CpuStats{},\n\t\t\tMemory: &info.MemoryStats{},\n\t\t}\n\t}\n\t\/\/ make a deep copy.\n\tself.prevStats.Timestamp = stats.Timestamp\n\t*self.prevStats.Cpu = *stats.Cpu\n\tself.prevStats.Cpu.Usage.PerCpu = make([]uint64, len(stats.Cpu.Usage.PerCpu))\n\tfor i, perCpu := range stats.Cpu.Usage.PerCpu {\n\t\tself.prevStats.Cpu.Usage.PerCpu[i] = perCpu\n\t}\n\t*self.prevStats.Memory = *stats.Memory\n}\n\nfunc (self *percentilesContainerHandlerWrapper) GetStats() (*info.ContainerStats, error) {\n\tstats, err := self.handler.GetStats()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ nil stats and nil error is possible for \/docker container.\n\tif stats == nil {\n\t\t\/\/ return nil, fmt.Errorf(\"container handler returns a nil error and a nil stats\")\n\t\treturn nil, nil\n\t}\n\tif stats.Timestamp.IsZero() {\n\t\treturn nil, fmt.Errorf(\"container handler did not set timestamp\")\n\t}\n\tself.lock.Lock()\n\tdefer self.lock.Unlock()\n\n\tif self.prevStats != nil {\n\t\tsample, err := info.NewSample(self.prevStats, stats)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"wrong stats: %v\", err)\n\t\t}\n\t\tif sample != nil {\n\t\t\tself.sampler.Update(sample)\n\t\t}\n\t}\n\tself.updatePrevStats(stats)\n\tif self.containerPercentiles == nil {\n\t\tself.containerPercentiles = new(info.ContainerStatsPercentiles)\n\t}\n\tself.numStats++\n\tif stats.Memory != nil {\n\t\tif stats.Memory.Usage > self.containerPercentiles.MaxMemoryUsage {\n\t\t\tself.containerPercentiles.MaxMemoryUsage = stats.Memory.Usage\n\t\t}\n\t}\n\treturn stats, nil\n}\n\nfunc (self *percentilesContainerHandlerWrapper) ListContainers(listType ListType) ([]info.ContainerReference, error) {\n\treturn self.handler.ListContainers(listType)\n}\n\nfunc (self *percentilesContainerHandlerWrapper) ListThreads(listType ListType) ([]int, error) {\n\treturn self.handler.ListThreads(listType)\n}\n\nfunc (self *percentilesContainerHandlerWrapper) ListProcesses(listType ListType) ([]int, error) {\n\treturn self.handler.ListProcesses(listType)\n}\n\nfunc (self *percentilesContainerHandlerWrapper) StatsSummary() (*info.ContainerStatsPercentiles, error) {\n\tself.lock.Lock()\n\tdefer self.lock.Unlock()\n\tsamples := make([]*info.ContainerStatsSample, 0, self.sampler.Len())\n\tself.sampler.Map(func(d interface{}) {\n\t\tstats := d.(*info.ContainerStatsSample)\n\t\tsamples = append(samples, stats)\n\t})\n\tself.containerPercentiles.Samples = samples\n\t\/\/ XXX(dengnan): probably add to StatsParameter?\n\tself.containerPercentiles.FillPercentiles(\n\t\t[]int{50, 80, 90, 95, 99},\n\t\t[]int{50, 80, 90, 95, 99},\n\t)\n\treturn self.containerPercentiles, nil\n}\n\ntype StatsParameter struct {\n\tSampler string\n\tNumSamples int\n\tWindowSize int\n\tResetPeriod time.Duration\n}\n\nfunc AddStatsSummary(handler ContainerHandler, parameter *StatsParameter) (ContainerHandler, error) {\n\tsampler, err := NewSampler(parameter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &percentilesContainerHandlerWrapper{\n\t\thandler: handler,\n\t\tcontainerPercentiles: &info.ContainerStatsPercentiles{},\n\t\tsampler: sampler,\n\t}, nil\n}\n<commit_msg>error when GetStats() return nil, nil<commit_after>\/\/ Copyright 2014 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage container\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/google\/cadvisor\/info\"\n\t\"github.com\/google\/cadvisor\/sampling\"\n)\n\ntype percentilesContainerHandlerWrapper struct {\n\thandler ContainerHandler\n\tcontainerPercentiles *info.ContainerStatsPercentiles\n\tprevStats *info.ContainerStats\n\tnumStats uint64\n\tsampler sampling.Sampler\n\tlock sync.Mutex\n}\n\nfunc (self *percentilesContainerHandlerWrapper) GetSpec() (*info.ContainerSpec, error) {\n\treturn self.handler.GetSpec()\n}\n\nfunc (self *percentilesContainerHandlerWrapper) updatePrevStats(stats *info.ContainerStats) {\n\tif stats == nil || stats.Cpu == nil || stats.Memory == nil {\n\t\t\/\/ discard incomplete stats\n\t\tself.prevStats = nil\n\t\treturn\n\t}\n\tif self.prevStats == nil {\n\t\tself.prevStats = &info.ContainerStats{\n\t\t\tCpu: &info.CpuStats{},\n\t\t\tMemory: &info.MemoryStats{},\n\t\t}\n\t}\n\t\/\/ make a deep copy.\n\tself.prevStats.Timestamp = stats.Timestamp\n\t*self.prevStats.Cpu = *stats.Cpu\n\tself.prevStats.Cpu.Usage.PerCpu = make([]uint64, len(stats.Cpu.Usage.PerCpu))\n\tfor i, perCpu := range stats.Cpu.Usage.PerCpu {\n\t\tself.prevStats.Cpu.Usage.PerCpu[i] = perCpu\n\t}\n\t*self.prevStats.Memory = *stats.Memory\n}\n\nfunc (self *percentilesContainerHandlerWrapper) GetStats() (*info.ContainerStats, error) {\n\tstats, err := self.handler.GetStats()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif stats == nil {\n\t\treturn nil, fmt.Errorf(\"container handler returns a nil error and a nil stats\")\n\t}\n\tif stats.Timestamp.IsZero() {\n\t\treturn nil, fmt.Errorf(\"container handler did not set timestamp\")\n\t}\n\tself.lock.Lock()\n\tdefer self.lock.Unlock()\n\n\tif self.prevStats != nil {\n\t\tsample, err := info.NewSample(self.prevStats, stats)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"wrong stats: %v\", err)\n\t\t}\n\t\tif sample != nil {\n\t\t\tself.sampler.Update(sample)\n\t\t}\n\t}\n\tself.updatePrevStats(stats)\n\tif self.containerPercentiles == nil {\n\t\tself.containerPercentiles = new(info.ContainerStatsPercentiles)\n\t}\n\tself.numStats++\n\tif stats.Memory != nil {\n\t\tif stats.Memory.Usage > self.containerPercentiles.MaxMemoryUsage {\n\t\t\tself.containerPercentiles.MaxMemoryUsage = stats.Memory.Usage\n\t\t}\n\t}\n\treturn stats, nil\n}\n\nfunc (self *percentilesContainerHandlerWrapper) ListContainers(listType ListType) ([]info.ContainerReference, error) {\n\treturn self.handler.ListContainers(listType)\n}\n\nfunc (self *percentilesContainerHandlerWrapper) ListThreads(listType ListType) ([]int, error) {\n\treturn self.handler.ListThreads(listType)\n}\n\nfunc (self *percentilesContainerHandlerWrapper) ListProcesses(listType ListType) ([]int, error) {\n\treturn self.handler.ListProcesses(listType)\n}\n\nfunc (self *percentilesContainerHandlerWrapper) StatsSummary() (*info.ContainerStatsPercentiles, error) {\n\tself.lock.Lock()\n\tdefer self.lock.Unlock()\n\tsamples := make([]*info.ContainerStatsSample, 0, self.sampler.Len())\n\tself.sampler.Map(func(d interface{}) {\n\t\tstats := d.(*info.ContainerStatsSample)\n\t\tsamples = append(samples, stats)\n\t})\n\tself.containerPercentiles.Samples = samples\n\t\/\/ XXX(dengnan): probably add to StatsParameter?\n\tself.containerPercentiles.FillPercentiles(\n\t\t[]int{50, 80, 90, 95, 99},\n\t\t[]int{50, 80, 90, 95, 99},\n\t)\n\treturn self.containerPercentiles, nil\n}\n\ntype StatsParameter struct {\n\tSampler string\n\tNumSamples int\n\tWindowSize int\n\tResetPeriod time.Duration\n}\n\nfunc AddStatsSummary(handler ContainerHandler, parameter *StatsParameter) (ContainerHandler, error) {\n\tsampler, err := NewSampler(parameter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &percentilesContainerHandlerWrapper{\n\t\thandler: handler,\n\t\tcontainerPercentiles: &info.ContainerStatsPercentiles{},\n\t\tsampler: sampler,\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage unversioned\n\nimport (\n\t\"fmt\"\n\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/sets\"\n)\n\n\/\/ LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements\n\/\/ labels.Selector\nfunc LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) {\n\tif ps == nil {\n\t\treturn labels.Nothing(), nil\n\t}\n\tif len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 {\n\t\treturn labels.Everything(), nil\n\t}\n\tselector := labels.NewSelector()\n\tfor k, v := range ps.MatchLabels {\n\t\tr, err := labels.NewRequirement(k, labels.InOperator, sets.NewString(v))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tselector = selector.Add(*r)\n\t}\n\tfor _, expr := range ps.MatchExpressions {\n\t\tvar op labels.Operator\n\t\tswitch expr.Operator {\n\t\tcase LabelSelectorOpIn:\n\t\t\top = labels.InOperator\n\t\tcase LabelSelectorOpNotIn:\n\t\t\top = labels.NotInOperator\n\t\tcase LabelSelectorOpExists:\n\t\t\top = labels.ExistsOperator\n\t\tcase LabelSelectorOpDoesNotExist:\n\t\t\top = labels.DoesNotExistOperator\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%q is not a valid pod selector operator\", expr.Operator)\n\t\t}\n\t\tr, err := labels.NewRequirement(expr.Key, op, sets.NewString(expr.Values...))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tselector = selector.Add(*r)\n\t}\n\treturn selector, nil\n}\n\n\/\/ SetAsLabelSelector converts the labels.Set object into a LabelSelector api object.\nfunc SetAsLabelSelector(ls labels.Set) *LabelSelector {\n\tif ls == nil {\n\t\treturn nil\n\t}\n\n\tselector := &LabelSelector{\n\t\tMatchLabels: make(map[string]string),\n\t}\n\tfor label, value := range ls {\n\t\tselector.MatchLabels[label] = value\n\t}\n\n\treturn selector\n}\n<commit_msg>implement kubectl get\/describe for ReplicaSet<commit_after>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage unversioned\n\nimport (\n\t\"fmt\"\n\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/sets\"\n)\n\n\/\/ LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements\n\/\/ labels.Selector\nfunc LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) {\n\tif ps == nil {\n\t\treturn labels.Nothing(), nil\n\t}\n\tif len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 {\n\t\treturn labels.Everything(), nil\n\t}\n\tselector := labels.NewSelector()\n\tfor k, v := range ps.MatchLabels {\n\t\tr, err := labels.NewRequirement(k, labels.InOperator, sets.NewString(v))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tselector = selector.Add(*r)\n\t}\n\tfor _, expr := range ps.MatchExpressions {\n\t\tvar op labels.Operator\n\t\tswitch expr.Operator {\n\t\tcase LabelSelectorOpIn:\n\t\t\top = labels.InOperator\n\t\tcase LabelSelectorOpNotIn:\n\t\t\top = labels.NotInOperator\n\t\tcase LabelSelectorOpExists:\n\t\t\top = labels.ExistsOperator\n\t\tcase LabelSelectorOpDoesNotExist:\n\t\t\top = labels.DoesNotExistOperator\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%q is not a valid pod selector operator\", expr.Operator)\n\t\t}\n\t\tr, err := labels.NewRequirement(expr.Key, op, sets.NewString(expr.Values...))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tselector = selector.Add(*r)\n\t}\n\treturn selector, nil\n}\n\n\/\/ SetAsLabelSelector converts the labels.Set object into a LabelSelector api object.\nfunc SetAsLabelSelector(ls labels.Set) *LabelSelector {\n\tif ls == nil {\n\t\treturn nil\n\t}\n\n\tselector := &LabelSelector{\n\t\tMatchLabels: make(map[string]string),\n\t}\n\tfor label, value := range ls {\n\t\tselector.MatchLabels[label] = value\n\t}\n\n\treturn selector\n}\n\n\/\/ FormatLabelSelector convert labelSelector into plain string\nfunc FormatLabelSelector(labelSelector *LabelSelector) string {\n\tvar l string\n\tif selector, err := LabelSelectorAsSelector(labelSelector); err != nil {\n\t\tl = selector.String()\n\t} else {\n\t\tl = \"<error>\"\n\t}\n\tif l == \"\" {\n\t\tl = \"<none>\"\n\t}\n\treturn l\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/templates\"\n\tcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\/resource\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/i18n\"\n\n\t\"github.com\/daviddengcn\/go-colortext\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\tlongDescr = templates.LongDesc(`\n Display addresses of the master and services with label kubernetes.io\/cluster-service=true\n To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.`)\n\n\tclusterinfo_example = templates.Examples(`\n\t\t# Print the address of the master and cluster services\n\t\tkubectl cluster-info`)\n)\n\nfunc NewCmdClusterInfo(f cmdutil.Factory, out io.Writer) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"cluster-info\",\n\t\t\/\/ clusterinfo is deprecated.\n\t\tAliases: []string{\"clusterinfo\"},\n\t\tShort: i18n.T(\"Display cluster info\"),\n\t\tLong: longDescr,\n\t\tExample: clusterinfo_example,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\terr := RunClusterInfo(f, out, cmd)\n\t\t\tcmdutil.CheckErr(err)\n\t\t},\n\t}\n\tcmdutil.AddInclude3rdPartyFlags(cmd)\n\tcmd.AddCommand(NewCmdClusterInfoDump(f, out))\n\treturn cmd\n}\n\nfunc RunClusterInfo(f cmdutil.Factory, out io.Writer, cmd *cobra.Command) error {\n\tif len(os.Args) > 1 && os.Args[1] == \"clusterinfo\" {\n\t\tprintDeprecationWarning(\"cluster-info\", \"clusterinfo\")\n\t}\n\n\tclient, err := f.ClientConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tprintService(out, \"Kubernetes master\", client.Host)\n\n\tmapper, typer := f.Object()\n\tcmdNamespace := cmdutil.GetFlagString(cmd, \"namespace\")\n\tif cmdNamespace == \"\" {\n\t\tcmdNamespace = metav1.NamespaceSystem\n\t}\n\n\t\/\/ TODO use generalized labels once they are implemented (#341)\n\tb := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)).\n\t\tNamespaceParam(cmdNamespace).DefaultNamespace().\n\t\tSelectorParam(\"kubernetes.io\/cluster-service=true\").\n\t\tResourceTypeOrNameArgs(false, []string{\"services\"}...).\n\t\tLatest()\n\tb.Do().Visit(func(r *resource.Info, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tservices := r.Object.(*api.ServiceList).Items\n\t\tfor _, service := range services {\n\t\t\tvar link string\n\t\t\tif len(service.Status.LoadBalancer.Ingress) > 0 {\n\t\t\t\tingress := service.Status.LoadBalancer.Ingress[0]\n\t\t\t\tip := ingress.IP\n\t\t\t\tif ip == \"\" {\n\t\t\t\t\tip = ingress.Hostname\n\t\t\t\t}\n\t\t\t\tfor _, port := range service.Spec.Ports {\n\t\t\t\t\tlink += \"http:\/\/\" + ip + \":\" + strconv.Itoa(int(port.Port)) + \" \"\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif len(client.GroupVersion.Group) == 0 {\n\t\t\t\t\tlink = client.Host + \"\/api\/\" + client.GroupVersion.Version + \"\/proxy\/namespaces\/\" + service.ObjectMeta.Namespace + \"\/services\/\" + service.ObjectMeta.Name\n\t\t\t\t} else {\n\t\t\t\t\tlink = client.Host + \"\/api\/\" + client.GroupVersion.Group + \"\/\" + client.GroupVersion.Version + \"\/proxy\/namespaces\/\" + service.ObjectMeta.Namespace + \"\/services\/\" + service.ObjectMeta.Name\n\n\t\t\t\t}\n\t\t\t}\n\t\t\tname := service.ObjectMeta.Labels[\"kubernetes.io\/name\"]\n\t\t\tif len(name) == 0 {\n\t\t\t\tname = service.ObjectMeta.Name\n\t\t\t}\n\t\t\tprintService(out, name, link)\n\t\t}\n\t\treturn nil\n\t})\n\tout.Write([]byte(\"\\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\\n\"))\n\treturn nil\n\n\t\/\/ TODO consider printing more information about cluster\n}\n\nfunc printService(out io.Writer, name, link string) {\n\tct.ChangeColor(ct.Green, false, ct.None, false)\n\tfmt.Fprint(out, name)\n\tct.ResetColor()\n\tfmt.Fprintf(out, \" is running at \")\n\tct.ChangeColor(ct.Yellow, false, ct.None, false)\n\tfmt.Fprint(out, link)\n\tct.ResetColor()\n\tfmt.Fprintln(out, \"\")\n}\n<commit_msg>Switch clusterinfo to print recommended proxy endpoints<commit_after>\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/templates\"\n\tcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\/resource\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/i18n\"\n\n\t\"github.com\/daviddengcn\/go-colortext\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\tlongDescr = templates.LongDesc(`\n Display addresses of the master and services with label kubernetes.io\/cluster-service=true\n To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.`)\n\n\tclusterinfo_example = templates.Examples(`\n\t\t# Print the address of the master and cluster services\n\t\tkubectl cluster-info`)\n)\n\nfunc NewCmdClusterInfo(f cmdutil.Factory, out io.Writer) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"cluster-info\",\n\t\t\/\/ clusterinfo is deprecated.\n\t\tAliases: []string{\"clusterinfo\"},\n\t\tShort: i18n.T(\"Display cluster info\"),\n\t\tLong: longDescr,\n\t\tExample: clusterinfo_example,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\terr := RunClusterInfo(f, out, cmd)\n\t\t\tcmdutil.CheckErr(err)\n\t\t},\n\t}\n\tcmdutil.AddInclude3rdPartyFlags(cmd)\n\tcmd.AddCommand(NewCmdClusterInfoDump(f, out))\n\treturn cmd\n}\n\nfunc RunClusterInfo(f cmdutil.Factory, out io.Writer, cmd *cobra.Command) error {\n\tif len(os.Args) > 1 && os.Args[1] == \"clusterinfo\" {\n\t\tprintDeprecationWarning(\"cluster-info\", \"clusterinfo\")\n\t}\n\n\tclient, err := f.ClientConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tprintService(out, \"Kubernetes master\", client.Host)\n\n\tmapper, typer := f.Object()\n\tcmdNamespace := cmdutil.GetFlagString(cmd, \"namespace\")\n\tif cmdNamespace == \"\" {\n\t\tcmdNamespace = metav1.NamespaceSystem\n\t}\n\n\t\/\/ TODO use generalized labels once they are implemented (#341)\n\tb := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)).\n\t\tNamespaceParam(cmdNamespace).DefaultNamespace().\n\t\tSelectorParam(\"kubernetes.io\/cluster-service=true\").\n\t\tResourceTypeOrNameArgs(false, []string{\"services\"}...).\n\t\tLatest()\n\tb.Do().Visit(func(r *resource.Info, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tservices := r.Object.(*api.ServiceList).Items\n\t\tfor _, service := range services {\n\t\t\tvar link string\n\t\t\tif len(service.Status.LoadBalancer.Ingress) > 0 {\n\t\t\t\tingress := service.Status.LoadBalancer.Ingress[0]\n\t\t\t\tip := ingress.IP\n\t\t\t\tif ip == \"\" {\n\t\t\t\t\tip = ingress.Hostname\n\t\t\t\t}\n\t\t\t\tfor _, port := range service.Spec.Ports {\n\t\t\t\t\tlink += \"http:\/\/\" + ip + \":\" + strconv.Itoa(int(port.Port)) + \" \"\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif len(client.GroupVersion.Group) == 0 {\n\t\t\t\t\tlink = client.Host + \"\/api\/\" + client.GroupVersion.Version + \"\/namespaces\/\" + service.ObjectMeta.Namespace + \"\/services\/\" + service.ObjectMeta.Name + \"\/proxy\"\n\t\t\t\t} else {\n\t\t\t\t\tlink = client.Host + \"\/api\/\" + client.GroupVersion.Group + \"\/\" + client.GroupVersion.Version + \"\/namespaces\/\" + service.ObjectMeta.Namespace + \"\/services\/\" + service.ObjectMeta.Name + \"\/proxy\"\n\n\t\t\t\t}\n\t\t\t}\n\t\t\tname := service.ObjectMeta.Labels[\"kubernetes.io\/name\"]\n\t\t\tif len(name) == 0 {\n\t\t\t\tname = service.ObjectMeta.Name\n\t\t\t}\n\t\t\tprintService(out, name, link)\n\t\t}\n\t\treturn nil\n\t})\n\tout.Write([]byte(\"\\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\\n\"))\n\treturn nil\n\n\t\/\/ TODO consider printing more information about cluster\n}\n\nfunc printService(out io.Writer, name, link string) {\n\tct.ChangeColor(ct.Green, false, ct.None, false)\n\tfmt.Fprint(out, name)\n\tct.ResetColor()\n\tfmt.Fprintf(out, \" is running at \")\n\tct.ChangeColor(ct.Yellow, false, ct.None, false)\n\tfmt.Fprint(out, link)\n\tct.ResetColor()\n\tfmt.Fprintln(out, \"\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype User struct {\n\tnick string\n\tuser string\n\tident string\n\tdead bool\n\tnickset bool\n\tconnection net.Conn\n\tid int\n\trealname string\n\tuserset bool\n\tregistered bool\n\tip string\n\thost string\n\tepoch time.Time\n\tchanlist map[string]*Channel\n}\n\nfunc (user *User) QuitCommandHandler(args []string) {\n\tvar reason string\n\tif len(args) > 1 {\n\t\targs[1] = StripLeading(args[1], \":\")\n\t\tvar buffer bytes.Buffer\n\t\tfor i := 1; i < len(args); i++ {\n\t\t\tbuffer.WriteString(args[i])\n\t\t\tbuffer.WriteString(\" \")\n\t\t}\n\t\treason = strings.TrimSpace(buffer.String())\n\t} else {\n\t\treason = \"Leaving\"\n\t}\n\tuser.Quit(reason)\n}\n\nfunc (user *User) Quit(reason string) {\n\ttargets := []*User{user}\n\tfor _, k := range user.chanlist {\n\t\ttargets = append(targets, k.GetUserList()...)\n\t\tdelete(k.userlist, user.id)\n\t\tdelete(user.chanlist, k.name)\n\t\tk.ShouldIDie()\n\t}\n\tSendToMany(fmt.Sprintf(\":%s QUIT :%s\", user.GetHostMask(), reason), targets)\n\tuser.SendLine(fmt.Sprintf(\"ERROR :Closing Link: %s (%s)\", user.host, reason))\n\tuser.dead = true\n\tif user.connection != nil {\n\t\tuser.connection.Close()\n\t}\n\tdelete(userlist, user.id)\n}\n\nfunc (user *User) FireNumeric(numeric int, args ...interface{}) {\n\tmsg := strcat(fmt.Sprintf(\":%s %.3d %s \", sname, numeric, user.nick), fmt.Sprintf(NUM[numeric], args...))\n\tuser.SendLine(msg)\n}\n\nfunc NewUser(conn net.Conn) *User {\n\tuserip := GetIpFromConn(conn)\n\tlog.Println(\"New connection from\", userip)\n\tcounter = counter + 1\n\tuser := &User{id: counter, connection: conn, ip: userip, nick: \"*\"}\n\tuser.chanlist = make(map[string]*Channel)\n\tuser.host = user.ip\n\tuser.epoch = time.Now()\n\tuserlist[user.id] = user\n\tgo user.UserHostLookup()\n\treturn user\n}\n\nfunc (user *User) SendLine(msg string) {\n\tmsg = fmt.Sprintf(\"%s\\n\", msg)\n\tif user.dead {\n\t\treturn\n\t}\n\t_, err := user.connection.Write([]byte(msg))\n\tif err != nil {\n\t\tuser.dead = true\n\t\tuser.Quit(\"Error\")\n\t\tlog.Printf(\"Error sending message to %s, disconnecting\\n\", user.nick)\n\t}\n\tlog.Printf(\"Send to %s: %s\", user.nick, msg)\n}\n\nfunc (user *User) HandleRequests() {\n\tb := bufio.NewReader(user.connection)\n\tfor {\n\t\tif user.dead {\n\t\t\tbreak\n\t\t}\n\t\tline, err := b.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error reading:\", err.Error())\n\t\t\tuser.dead = true\n\t\t\tuser.Quit(\"Error\")\n\t\t}\n\t\tif line == \"\" {\n\t\t\tuser.dead = true\n\t\t\tuser.Quit(\"Error\")\n\t\t\tbreak\n\t\t}\n\t\tline = strings.TrimSpace(line)\n\t\tlog.Println(\"Receive from\", fmt.Sprintf(\"%s:\", user.nick), line)\n\t\tgo ProcessLine(user, line)\n\t}\n}\nfunc (user *User) NickHandler(args []string) {\n\tif len(args) < 2 {\n\t\tuser.FireNumeric(ERR_NONICKNAMEGIVEN)\n\t\treturn\n\t}\n\tif GetUserByNick(args[1]) != nil {\n\t\tuser.FireNumeric(ERR_NICKNAMEINUSE, args[1])\n\t\treturn\n\t}\n\tif !user.nickset {\n\t\tuser.nickset = true\n\t} else if user.registered {\n\t\ttargets := []*User{}\n\t\ttargets = append(targets, user)\n\t\tfor _, k := range user.chanlist {\n\t\t\ttargets = append(targets, k.GetUserList()...)\n\t\t}\n\t\tSendToMany(fmt.Sprintf(\":%s NICK %s\", user.GetHostMask(), args[1]), targets)\n\t}\n\tuser.nick = args[1]\n\tif !user.registered && user.userset {\n\t\tuser.UserRegistrationFinished()\n\t}\n}\n\nfunc (user *User) UserHandler(args []string) {\n\tif len(args) < 5 {\n\t\t\/\/ERR_NEEDMOREPARAMS\n\t\treturn\n\t}\n\tuser.ident = args[1]\n\targs[4] = StripLeading(args[4], \":\")\n\tvar buffer bytes.Buffer\n\tfor i := 4; i < len(args); i++ {\n\t\tbuffer.WriteString(args[i])\n\t\tbuffer.WriteString(\" \")\n\t}\n\tuser.realname = strings.TrimSpace(buffer.String())\n\tuser.userset = true\n\tif !user.registered && user.nickset {\n\t\tuser.UserRegistrationFinished()\n\t}\n}\n\nfunc (user *User) UserRegistrationFinished() {\n\tuser.registered = true\n\tlog.Printf(\"User %d finished registration\\n\", user.id)\n\tuser.FireNumeric(RPL_WELCOME, user.nick, user.ident, user.host)\n\tuser.FireNumeric(RPL_YOURHOST, sname, software, softwarev)\n\tuser.FireNumeric(RPL_CREATED, epoch)\n\t\/\/TODO fire RPL_MYINFO when we actually have enough stuff to do it\n}\n\nfunc (user *User) UserHostLookup() {\n\tuser.SendLine(fmt.Sprintf(\":%s NOTICE %s :*** Looking up your hostname...\", sname, user.nick))\n\tadds, err := net.LookupAddr(user.ip)\n\tif err != nil {\n\t\tuser.SendLine(fmt.Sprintf(\"%s NOTICE %s :*** Unable to resolve your hostname\", sname, user.nick))\n\t\treturn\n\t}\n\taddstring := adds[0]\n\tadds, err = net.LookupHost(addstring)\n\tif err != nil {\n\t\tuser.SendLine(fmt.Sprintf(\"%s NOTICE %s :*** Unable to resolve your hostname\", sname, user.nick))\n\t\treturn\n\t}\n\tfor _, k := range adds {\n\t\tif user.ip == k {\n\t\t\tuser.host = addstring\n\t\t\tuser.SendLine(fmt.Sprintf(\":%s NOTICE %s :*** Found your hostname\", sname, user.nick))\n\t\t\treturn\n\t\t}\n\t}\n\tuser.SendLine(fmt.Sprintf(\":%s NOTICE %s :*** Your forward and reverse DNS do not match, ignoring hostname\", sname, user.nick))\n}\n\nfunc (user *User) CommandNotFound(args []string) {\n\tuser.FireNumeric(ERR_UNKNOWNCOMMAND, args[0])\n}\n\nfunc (user *User) GetHostMask() string {\n\treturn fmt.Sprintf(\"%s!%s@%s\", user.nick, user.ident, user.host)\n}\n\nfunc (user *User) JoinHandler(args []string) {\n\tif len(args) < 2 {\n\t\tuser.FireNumeric(ERR_NEEDMOREPARAMS, \"JOIN\")\n\t\treturn\n\t}\n\tif !ValidChanName(args[1]) {\n\t\tuser.FireNumeric(ERR_NOSUCHCHANNEL, args[1])\n\t\treturn\n\t}\n\t_, channel := GetChannelByName(args[1])\n\tchannel.JoinUser(user)\n\tuser.chanlist[channel.name] = channel\n}\n\nfunc (user *User) PrivmsgHandler(args []string) {\n\tif len(args) < 3 {\n\t\tuser.FireNumeric(ERR_NEEDMOREPARAMS, \"PRIVMSG\")\n\t\treturn\n\t}\n\tif ValidChanName(args[1]) { \/\/TODO part of this should be sent to the channel \"object\"\n\t\t\/\/presumably a channel\n\t\tk, j := GetChannelByName(args[1])\n\t\tif k {\n\t\t\t\/\/channel exists, send the message\n\t\t\tmsg := FormatMessageArgs(args)\n\t\t\tlist := j.GetUserList()\n\t\t\tfor _, l := range list {\n\t\t\t\tif l != user {\n\t\t\t\t\tl.SendLine(fmt.Sprintf(\":%s PRIVMSG %s :%s\", user.GetHostMask(), j.name, msg))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t} else {\n\t\t\t\/\/channel didnt exist but get channel by name makes one anyways, lets kill it...\n\t\t\tuser.FireNumeric(ERR_NOSUCHCHANNEL, args[1])\n\t\t\tj.ShouldIDie()\n\t\t\treturn\n\t\t}\n\t} else {\n\t\t\/\/maybe its a user\n\t\ttarget := GetUserByNick(args[1])\n\t\tif target != nil {\n\t\t\tmsg := FormatMessageArgs(args)\n\t\t\ttarget.SendLine(fmt.Sprint(\":%s PRIVMSG %s :%s\", user.GetHostMask(), target.nick, msg))\n\t\t}\n\t}\n}\n<commit_msg>fire ERR_NEEDMOREPARAMS when USER doesn't have enough commands<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype User struct {\n\tnick string\n\tuser string\n\tident string\n\tdead bool\n\tnickset bool\n\tconnection net.Conn\n\tid int\n\trealname string\n\tuserset bool\n\tregistered bool\n\tip string\n\thost string\n\tepoch time.Time\n\tchanlist map[string]*Channel\n}\n\nfunc (user *User) QuitCommandHandler(args []string) {\n\tvar reason string\n\tif len(args) > 1 {\n\t\targs[1] = StripLeading(args[1], \":\")\n\t\tvar buffer bytes.Buffer\n\t\tfor i := 1; i < len(args); i++ {\n\t\t\tbuffer.WriteString(args[i])\n\t\t\tbuffer.WriteString(\" \")\n\t\t}\n\t\treason = strings.TrimSpace(buffer.String())\n\t} else {\n\t\treason = \"Leaving\"\n\t}\n\tuser.Quit(reason)\n}\n\nfunc (user *User) Quit(reason string) {\n\ttargets := []*User{user}\n\tfor _, k := range user.chanlist {\n\t\ttargets = append(targets, k.GetUserList()...)\n\t\tdelete(k.userlist, user.id)\n\t\tdelete(user.chanlist, k.name)\n\t\tk.ShouldIDie()\n\t}\n\tSendToMany(fmt.Sprintf(\":%s QUIT :%s\", user.GetHostMask(), reason), targets)\n\tuser.SendLine(fmt.Sprintf(\"ERROR :Closing Link: %s (%s)\", user.host, reason))\n\tuser.dead = true\n\tif user.connection != nil {\n\t\tuser.connection.Close()\n\t}\n\tdelete(userlist, user.id)\n}\n\nfunc (user *User) FireNumeric(numeric int, args ...interface{}) {\n\tmsg := strcat(fmt.Sprintf(\":%s %.3d %s \", sname, numeric, user.nick), fmt.Sprintf(NUM[numeric], args...))\n\tuser.SendLine(msg)\n}\n\nfunc NewUser(conn net.Conn) *User {\n\tuserip := GetIpFromConn(conn)\n\tlog.Println(\"New connection from\", userip)\n\tcounter = counter + 1\n\tuser := &User{id: counter, connection: conn, ip: userip, nick: \"*\"}\n\tuser.chanlist = make(map[string]*Channel)\n\tuser.host = user.ip\n\tuser.epoch = time.Now()\n\tuserlist[user.id] = user\n\tgo user.UserHostLookup()\n\treturn user\n}\n\nfunc (user *User) SendLine(msg string) {\n\tmsg = fmt.Sprintf(\"%s\\n\", msg)\n\tif user.dead {\n\t\treturn\n\t}\n\t_, err := user.connection.Write([]byte(msg))\n\tif err != nil {\n\t\tuser.dead = true\n\t\tuser.Quit(\"Error\")\n\t\tlog.Printf(\"Error sending message to %s, disconnecting\\n\", user.nick)\n\t}\n\tlog.Printf(\"Send to %s: %s\", user.nick, msg)\n}\n\nfunc (user *User) HandleRequests() {\n\tb := bufio.NewReader(user.connection)\n\tfor {\n\t\tif user.dead {\n\t\t\tbreak\n\t\t}\n\t\tline, err := b.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error reading:\", err.Error())\n\t\t\tuser.dead = true\n\t\t\tuser.Quit(\"Error\")\n\t\t}\n\t\tif line == \"\" {\n\t\t\tuser.dead = true\n\t\t\tuser.Quit(\"Error\")\n\t\t\tbreak\n\t\t}\n\t\tline = strings.TrimSpace(line)\n\t\tlog.Println(\"Receive from\", fmt.Sprintf(\"%s:\", user.nick), line)\n\t\tgo ProcessLine(user, line)\n\t}\n}\nfunc (user *User) NickHandler(args []string) {\n\tif len(args) < 2 {\n\t\tuser.FireNumeric(ERR_NONICKNAMEGIVEN)\n\t\treturn\n\t}\n\tif GetUserByNick(args[1]) != nil {\n\t\tuser.FireNumeric(ERR_NICKNAMEINUSE, args[1])\n\t\treturn\n\t}\n\tif !user.nickset {\n\t\tuser.nickset = true\n\t} else if user.registered {\n\t\ttargets := []*User{}\n\t\ttargets = append(targets, user)\n\t\tfor _, k := range user.chanlist {\n\t\t\ttargets = append(targets, k.GetUserList()...)\n\t\t}\n\t\tSendToMany(fmt.Sprintf(\":%s NICK %s\", user.GetHostMask(), args[1]), targets)\n\t}\n\tuser.nick = args[1]\n\tif !user.registered && user.userset {\n\t\tuser.UserRegistrationFinished()\n\t}\n}\n\nfunc (user *User) UserHandler(args []string) {\n\tif len(args) < 5 {\n\t\tuser.FireNumeric(ERR_NEEDMOREPARAMS, \"USER\")\n\t\treturn\n\t}\n\tuser.ident = args[1]\n\targs[4] = StripLeading(args[4], \":\")\n\tvar buffer bytes.Buffer\n\tfor i := 4; i < len(args); i++ {\n\t\tbuffer.WriteString(args[i])\n\t\tbuffer.WriteString(\" \")\n\t}\n\tuser.realname = strings.TrimSpace(buffer.String())\n\tuser.userset = true\n\tif !user.registered && user.nickset {\n\t\tuser.UserRegistrationFinished()\n\t}\n}\n\nfunc (user *User) UserRegistrationFinished() {\n\tuser.registered = true\n\tlog.Printf(\"User %d finished registration\\n\", user.id)\n\tuser.FireNumeric(RPL_WELCOME, user.nick, user.ident, user.host)\n\tuser.FireNumeric(RPL_YOURHOST, sname, software, softwarev)\n\tuser.FireNumeric(RPL_CREATED, epoch)\n\t\/\/TODO fire RPL_MYINFO when we actually have enough stuff to do it\n}\n\nfunc (user *User) UserHostLookup() {\n\tuser.SendLine(fmt.Sprintf(\":%s NOTICE %s :*** Looking up your hostname...\", sname, user.nick))\n\tadds, err := net.LookupAddr(user.ip)\n\tif err != nil {\n\t\tuser.SendLine(fmt.Sprintf(\"%s NOTICE %s :*** Unable to resolve your hostname\", sname, user.nick))\n\t\treturn\n\t}\n\taddstring := adds[0]\n\tadds, err = net.LookupHost(addstring)\n\tif err != nil {\n\t\tuser.SendLine(fmt.Sprintf(\"%s NOTICE %s :*** Unable to resolve your hostname\", sname, user.nick))\n\t\treturn\n\t}\n\tfor _, k := range adds {\n\t\tif user.ip == k {\n\t\t\tuser.host = addstring\n\t\t\tuser.SendLine(fmt.Sprintf(\":%s NOTICE %s :*** Found your hostname\", sname, user.nick))\n\t\t\treturn\n\t\t}\n\t}\n\tuser.SendLine(fmt.Sprintf(\":%s NOTICE %s :*** Your forward and reverse DNS do not match, ignoring hostname\", sname, user.nick))\n}\n\nfunc (user *User) CommandNotFound(args []string) {\n\tuser.FireNumeric(ERR_UNKNOWNCOMMAND, args[0])\n}\n\nfunc (user *User) GetHostMask() string {\n\treturn fmt.Sprintf(\"%s!%s@%s\", user.nick, user.ident, user.host)\n}\n\nfunc (user *User) JoinHandler(args []string) {\n\tif len(args) < 2 {\n\t\tuser.FireNumeric(ERR_NEEDMOREPARAMS, \"JOIN\")\n\t\treturn\n\t}\n\tif !ValidChanName(args[1]) {\n\t\tuser.FireNumeric(ERR_NOSUCHCHANNEL, args[1])\n\t\treturn\n\t}\n\t_, channel := GetChannelByName(args[1])\n\tchannel.JoinUser(user)\n\tuser.chanlist[channel.name] = channel\n}\n\nfunc (user *User) PrivmsgHandler(args []string) {\n\tif len(args) < 3 {\n\t\tuser.FireNumeric(ERR_NEEDMOREPARAMS, \"PRIVMSG\")\n\t\treturn\n\t}\n\tif ValidChanName(args[1]) { \/\/TODO part of this should be sent to the channel \"object\"\n\t\t\/\/presumably a channel\n\t\tk, j := GetChannelByName(args[1])\n\t\tif k {\n\t\t\t\/\/channel exists, send the message\n\t\t\tmsg := FormatMessageArgs(args)\n\t\t\tlist := j.GetUserList()\n\t\t\tfor _, l := range list {\n\t\t\t\tif l != user {\n\t\t\t\t\tl.SendLine(fmt.Sprintf(\":%s PRIVMSG %s :%s\", user.GetHostMask(), j.name, msg))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t} else {\n\t\t\t\/\/channel didnt exist but get channel by name makes one anyways, lets kill it...\n\t\t\tuser.FireNumeric(ERR_NOSUCHCHANNEL, args[1])\n\t\t\tj.ShouldIDie()\n\t\t\treturn\n\t\t}\n\t} else {\n\t\t\/\/maybe its a user\n\t\ttarget := GetUserByNick(args[1])\n\t\tif target != nil {\n\t\t\tmsg := FormatMessageArgs(args)\n\t\t\ttarget.SendLine(fmt.Sprint(\":%s PRIVMSG %s :%s\", user.GetHostMask(), target.nick, msg))\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n)\n\ntype User struct {\n\tnick string\n\tuser string\n\tident string\n\tip string\n\tdead bool\n\tnickset bool\n\tconnection net.Conn\n\tid int\n\trealname string\n\tuserset bool\n\tregistered bool\n\thost string\n}\n\nfunc (user *User) Quit() {\n\tuser.dead = true\n\tif user.connection != nil {\n\t\tuser.connection.Close()\n\t}\n\tdelete(userlist, user.id)\n}\n\nfunc (user *User) FireNumeric(numeric int, args ...interface{}) {\n\tmsg := strcat(fmt.Sprintf(\":%s %.3d \", \"test.net.local\", numeric), fmt.Sprintf(NUM[numeric], args...))\n\tuser.SendLine(msg)\n}\n\nfunc NewUser(conn net.Conn) User {\n\tcounter = counter + 1\n\tuser := User{id: counter, connection: conn}\n\tuser.host = \"lol\"\n\tAddUserToList(user)\n\treturn user\n}\n\nfunc (user *User) SendLine(msg string) {\n\tmsg = fmt.Sprintf(\"%s\\n\", msg)\n\tuser.connection.Write([]byte(msg))\n}\n\nfunc (user *User) HandleRequests() {\n\tb := bufio.NewReader(user.connection)\n\tfor {\n\t\tif user.dead {\n\t\t\tbreak\n\t\t}\n\t\tline, err := b.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error reading:\", err.Error())\n\t\t\tuser.Quit()\n\t\t}\n\t\tline = strings.TrimSpace(line)\n\t\tfmt.Println(\"Received Line: \", line)\n\t\t\/\/ Send a response back to person contacting us.\n\t\tgo ProcessLine(user, line)\n\t}\n}\nfunc (user *User) NickHandler(args []string) {\n\tif CheckNickCollision(args[1]) != false {\n\t\treturn \/\/TODO handle properly\n\t}\n\tif !user.nickset {\n\t\tuser.nickset = true\n\t}\n\tuser.nick = args[1]\n\tfmt.Println(\"User changed name to\", args[1])\n\tif !user.registered && user.userset {\n\t\tuser.UserRegistrationFinished()\n\t}\n}\n\nfunc (user *User) UserHandler(args []string) {\n\tif len(args) < 5 {\n\t\t\/\/ERR_NEEDMOREPARAMS\n\t\treturn\n\t}\n\tuser.ident = args[1]\n\tif strings.HasPrefix(args[4], \":\") {\n\t\targs[4] = strings.Replace(args[4], \":\", \"\", 1)\n\t}\n\tvar buffer bytes.Buffer\n\tfor i := 4; i < len(args); i++ {\n\t\tbuffer.WriteString(args[i])\n\t\tbuffer.WriteString(\" \")\n\t}\n\tuser.realname = strings.TrimSpace(buffer.String())\n\tuser.userset = true\n\tif !user.registered && user.nickset {\n\t\tuser.UserRegistrationFinished()\n\t}\n}\n\nfunc (user *User) UserRegistrationFinished() {\n\tuser.registered = true\n\tfmt.Printf(\"User %d finished registration\\n\", user.id)\n\tuser.FireNumeric(RPL_WELCOME, user.nick, user.ident, user.host)\n}\n<commit_msg>remove unecessary threading<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n)\n\ntype User struct {\n\tnick string\n\tuser string\n\tident string\n\tip string\n\tdead bool\n\tnickset bool\n\tconnection net.Conn\n\tid int\n\trealname string\n\tuserset bool\n\tregistered bool\n\thost string\n}\n\nfunc (user *User) Quit() {\n\tuser.dead = true\n\tif user.connection != nil {\n\t\tuser.connection.Close()\n\t}\n\tdelete(userlist, user.id)\n}\n\nfunc (user *User) FireNumeric(numeric int, args ...interface{}) {\n\tmsg := strcat(fmt.Sprintf(\":%s %.3d \", \"test.net.local\", numeric), fmt.Sprintf(NUM[numeric], args...))\n\tuser.SendLine(msg)\n}\n\nfunc NewUser(conn net.Conn) User {\n\tcounter = counter + 1\n\tuser := User{id: counter, connection: conn}\n\tuser.host = \"lol\"\n\tAddUserToList(user)\n\treturn user\n}\n\nfunc (user *User) SendLine(msg string) {\n\tmsg = fmt.Sprintf(\"%s\\n\", msg)\n\tuser.connection.Write([]byte(msg))\n}\n\nfunc (user *User) HandleRequests() {\n\tb := bufio.NewReader(user.connection)\n\tfor {\n\t\tif user.dead {\n\t\t\tbreak\n\t\t}\n\t\tline, err := b.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error reading:\", err.Error())\n\t\t\tuser.Quit()\n\t\t}\n\t\tline = strings.TrimSpace(line)\n\t\tfmt.Println(\"Received Line: \", line)\n\t\tProcessLine(user, line)\n\t}\n}\nfunc (user *User) NickHandler(args []string) {\n\tif CheckNickCollision(args[1]) != false {\n\t\treturn \/\/TODO handle properly\n\t}\n\tif !user.nickset {\n\t\tuser.nickset = true\n\t}\n\tuser.nick = args[1]\n\tfmt.Println(\"User changed name to\", args[1])\n\tif !user.registered && user.userset {\n\t\tuser.UserRegistrationFinished()\n\t}\n}\n\nfunc (user *User) UserHandler(args []string) {\n\tif len(args) < 5 {\n\t\t\/\/ERR_NEEDMOREPARAMS\n\t\treturn\n\t}\n\tuser.ident = args[1]\n\tif strings.HasPrefix(args[4], \":\") {\n\t\targs[4] = strings.Replace(args[4], \":\", \"\", 1)\n\t}\n\tvar buffer bytes.Buffer\n\tfor i := 4; i < len(args); i++ {\n\t\tbuffer.WriteString(args[i])\n\t\tbuffer.WriteString(\" \")\n\t}\n\tuser.realname = strings.TrimSpace(buffer.String())\n\tuser.userset = true\n\tif !user.registered && user.nickset {\n\t\tuser.UserRegistrationFinished()\n\t}\n}\n\nfunc (user *User) UserRegistrationFinished() {\n\tuser.registered = true\n\tfmt.Printf(\"User %d finished registration\\n\", user.id)\n\tuser.FireNumeric(RPL_WELCOME, user.nick, user.ident, user.host)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage event\n\nimport (\n\t\"fmt\"\n\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/errors\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/validation\"\n\t\"k8s.io\/kubernetes\/pkg\/fields\"\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n\t\"k8s.io\/kubernetes\/pkg\/registry\/generic\"\n\t\"k8s.io\/kubernetes\/pkg\/runtime\"\n\tutilvalidation \"k8s.io\/kubernetes\/pkg\/util\/validation\"\n)\n\ntype eventStrategy struct {\n\truntime.ObjectTyper\n\tapi.NameGenerator\n}\n\n\/\/ Strategy is the default logic that pplies when creating and updating\n\/\/ Event objects via the REST API.\nvar Strategy = eventStrategy{api.Scheme, api.SimpleNameGenerator}\n\nfunc (eventStrategy) NamespaceScoped() bool {\n\treturn true\n}\n\nfunc (eventStrategy) PrepareForCreate(obj runtime.Object) {\n}\n\nfunc (eventStrategy) PrepareForUpdate(obj, old runtime.Object) {\n}\n\nfunc (eventStrategy) Validate(ctx api.Context, obj runtime.Object) utilvalidation.ErrorList {\n\tevent := obj.(*api.Event)\n\treturn validation.ValidateEvent(event)\n}\n\n\/\/ Canonicalize normalizes the object after validation.\nfunc (eventStrategy) Canonicalize(obj runtime.Object) {\n}\n\nfunc (eventStrategy) AllowCreateOnUpdate() bool {\n\treturn true\n}\n\nfunc (eventStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) utilvalidation.ErrorList {\n\tevent := obj.(*api.Event)\n\treturn validation.ValidateEvent(event)\n}\n\nfunc (eventStrategy) AllowUnconditionalUpdate() bool {\n\treturn true\n}\n\nfunc MatchEvent(label labels.Selector, field fields.Selector) generic.Matcher {\n\treturn &generic.SelectionPredicate{Label: label, Field: field, GetAttrs: getAttrs}\n}\n\nfunc getAttrs(obj runtime.Object) (objLabels labels.Set, objFields fields.Set, err error) {\n\tevent, ok := obj.(*api.Event)\n\tif !ok {\n\t\treturn nil, nil, errors.NewInternalError(fmt.Errorf(\"object is not of type event: %#v\", obj))\n\t}\n\tl := event.Labels\n\tif l == nil {\n\t\tl = labels.Set{}\n\t}\n\n\tobjectMetaFieldsSet := generic.ObjectMetaFieldsSet(event.ObjectMeta, true)\n\tspecificFieldsSet := fields.Set{\n\t\t\"involvedObject.kind\": event.InvolvedObject.Kind,\n\t\t\"involvedObject.namespace\": event.InvolvedObject.Namespace,\n\t\t\"involvedObject.name\": event.InvolvedObject.Name,\n\t\t\"involvedObject.uid\": string(event.InvolvedObject.UID),\n\t\t\"involvedObject.apiVersion\": event.InvolvedObject.APIVersion,\n\t\t\"involvedObject.resourceVersion\": fmt.Sprintf(\"%s\", event.InvolvedObject.ResourceVersion),\n\t\t\"involvedObject.fieldPath\": event.InvolvedObject.FieldPath,\n\t\t\"reason\": event.Reason,\n\t\t\"source\": event.Source.Component,\n\t}\n\treturn l, generic.MergeFieldsSets(objectMetaFieldsSet, specificFieldsSet), nil\n}\n<commit_msg>remove unnecessary fmt.Printf()<commit_after>\/*\nCopyright 2014 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage event\n\nimport (\n\t\"fmt\"\n\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/errors\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/validation\"\n\t\"k8s.io\/kubernetes\/pkg\/fields\"\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n\t\"k8s.io\/kubernetes\/pkg\/registry\/generic\"\n\t\"k8s.io\/kubernetes\/pkg\/runtime\"\n\tutilvalidation \"k8s.io\/kubernetes\/pkg\/util\/validation\"\n)\n\ntype eventStrategy struct {\n\truntime.ObjectTyper\n\tapi.NameGenerator\n}\n\n\/\/ Strategy is the default logic that pplies when creating and updating\n\/\/ Event objects via the REST API.\nvar Strategy = eventStrategy{api.Scheme, api.SimpleNameGenerator}\n\nfunc (eventStrategy) NamespaceScoped() bool {\n\treturn true\n}\n\nfunc (eventStrategy) PrepareForCreate(obj runtime.Object) {\n}\n\nfunc (eventStrategy) PrepareForUpdate(obj, old runtime.Object) {\n}\n\nfunc (eventStrategy) Validate(ctx api.Context, obj runtime.Object) utilvalidation.ErrorList {\n\tevent := obj.(*api.Event)\n\treturn validation.ValidateEvent(event)\n}\n\n\/\/ Canonicalize normalizes the object after validation.\nfunc (eventStrategy) Canonicalize(obj runtime.Object) {\n}\n\nfunc (eventStrategy) AllowCreateOnUpdate() bool {\n\treturn true\n}\n\nfunc (eventStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) utilvalidation.ErrorList {\n\tevent := obj.(*api.Event)\n\treturn validation.ValidateEvent(event)\n}\n\nfunc (eventStrategy) AllowUnconditionalUpdate() bool {\n\treturn true\n}\n\nfunc MatchEvent(label labels.Selector, field fields.Selector) generic.Matcher {\n\treturn &generic.SelectionPredicate{Label: label, Field: field, GetAttrs: getAttrs}\n}\n\nfunc getAttrs(obj runtime.Object) (objLabels labels.Set, objFields fields.Set, err error) {\n\tevent, ok := obj.(*api.Event)\n\tif !ok {\n\t\treturn nil, nil, errors.NewInternalError(fmt.Errorf(\"object is not of type event: %#v\", obj))\n\t}\n\tl := event.Labels\n\tif l == nil {\n\t\tl = labels.Set{}\n\t}\n\n\tobjectMetaFieldsSet := generic.ObjectMetaFieldsSet(event.ObjectMeta, true)\n\tspecificFieldsSet := fields.Set{\n\t\t\"involvedObject.kind\": event.InvolvedObject.Kind,\n\t\t\"involvedObject.namespace\": event.InvolvedObject.Namespace,\n\t\t\"involvedObject.name\": event.InvolvedObject.Name,\n\t\t\"involvedObject.uid\": string(event.InvolvedObject.UID),\n\t\t\"involvedObject.apiVersion\": event.InvolvedObject.APIVersion,\n\t\t\"involvedObject.resourceVersion\": event.InvolvedObject.ResourceVersion,\n\t\t\"involvedObject.fieldPath\": event.InvolvedObject.FieldPath,\n\t\t\"reason\": event.Reason,\n\t\t\"source\": event.Source.Component,\n\t}\n\treturn l, generic.MergeFieldsSets(objectMetaFieldsSet, specificFieldsSet), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cluster\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nconst (\n\ttestPath = \"\/dev\/shm\/info.json\"\n)\n\nfunc newInfo() *Info {\n\treturn NewInfo(testPath)\n}\n\nfunc rmInfo() {\n\tos.Remove(testPath)\n}\n\nfunc TestInfoSave(t *testing.T) {\n\tinfo := newInfo()\n\tdefer rmInfo()\n\n\terr := info.Save()\n\tif err != nil {\n\t\tt.Fatalf(\"Save() failed\")\n\t}\n\n\tst, err := os.Stat(testPath)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not stat saved file\")\n\t}\n\n\tif st == nil {\n\t\tt.Fatalf(\"Save() failed to write a file\")\n\t}\n\n\tpermissions := st.Mode().Perm()\n\tif permissions&077 != 0 {\n\t\tt.Fatalf(\"File mode is too permissive, got %#o\", st.Mode())\n\t}\n}\n\nfunc TestInfoSaveFail(t *testing.T) {\n\ti := NewInfo(\"\/nonexisting\/path\/something.json\")\n\terr := i.Save()\n\tif err == nil {\n\t\tt.Fatalf(\"Save() failed to detect error\")\n\t}\n}\n\nfunc TestInfoLoadFail(t *testing.T) {\n\tdefer rmInfo()\n\n\ti := &Info{\n\t\tpath: testPath,\n\t}\n\n\terr := i.Load()\n\tif err == nil {\n\t\tt.Fatalf(\"Load() failed to detect file read error\")\n\t}\n\n\t\/\/ Write some broken JSON.\n\tioutil.WriteFile(testPath, []byte(\"NOT JSON\"), 0600)\n\terr = i.Load()\n\tif err == nil {\n\t\tt.Fatalf(\"Load() failed to detect broken JSON\")\n\t}\n}\n\nfunc TestInfoPeers(t *testing.T) {\n\ti := newInfo()\n\tdefer rmInfo()\n\n\tcases := [][]string{\n\t\tnil,\n\t\t{},\n\t\t{\"a\"},\n\t\t{\"a\", \"b\"},\n\t}\n\n\tfor _, c := range cases {\n\t\terr := i.SetPeers(c)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"SetPeers() failed: %s\", err.Error())\n\t\t}\n\n\t\t\/\/ Let's have a roundtrip to disk.\n\t\ti.Save()\n\t\ti.PeerList = nil\n\t\ti.Load()\n\n\t\tpeers, err := i.Peers()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Peers() failed: %s\", err.Error())\n\t\t}\n\n\t\tif !reflect.DeepEqual(c, peers) {\n\t\t\tt.Fatalf(\"Peers() did not return what we gave SetPeers()\")\n\t\t}\n\t}\n}\n\nfunc TestInfoSelf(t *testing.T) {\n\ti := newInfo()\n\tdefer rmInfo()\n\n\tcases := []string{\n\t\t\"a\",\n\t\t\"\",\n\t\t\"\\n\",\n\t}\n\n\tfor _, c := range cases {\n\t\terr := i.SetSelf(c)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"SetSelf() failed: %s\", err.Error())\n\t\t}\n\n\t\t\/\/ Let's have a roundtrip to disk.\n\t\ti.Save()\n\t\ti.SelfName = \"NOT THIS\"\n\t\ti.Load()\n\n\t\tself := i.Self()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Self() failed: %s\", err.Error())\n\t\t}\n\n\t\tif c != self {\n\t\t\tt.Fatalf(\"Self() failed to return what we gave SetSelf()\")\n\t\t}\n\t}\n}\n\nfunc TestInfoIP(t *testing.T) {\n\ti := newInfo()\n\tdefer rmInfo()\n\n\ti.SetSelf(\"go-test-target-v4.gansoi-dev.com\")\n\tip := i.IP()\n\tif ip == nil {\n\t\tt.Fatalf(\"Failed to resolve IPv4 , got %s\", ip)\n\t}\n\n\tif ip.String() != \"198.51.100.1\" {\n\t\tt.Fatalf(\"Failed to resolve self to IP, got %s\", ip.String())\n\t}\n\n\ti.SetSelf(\"go-test-target-v6.gansoi-dev.com\")\n\tip = i.IP()\n\tif ip == nil {\n\t\tt.Fatalf(\"Failed to resolve IPv6 self, got %s\", ip)\n\t}\n\n\tif ip.String() != \"2001:db8::2\" {\n\t\tt.Fatalf(\"Failed to resolve self to IP, got %s\", ip.String())\n\t}\n\n\ti.SetSelf(\"go-test-nonexisting.gansoi-dev.com:4934\")\n\tip = i.IP()\n\tif ip != nil {\n\t\tt.Fatalf(\"Returned something for failing host in Self(), got %s\", ip)\n\t}\n}\n<commit_msg>Use os.TempDir() instead of hardcoded \/dev\/shm.<commit_after>package cluster\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"path\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nvar (\n\ttestPath = path.Join(os.TempDir(), fmt.Sprintf(\".info-%d.json\", rand.Int63()))\n)\n\nfunc newInfo() *Info {\n\treturn NewInfo(testPath)\n}\n\nfunc rmInfo() {\n\tos.Remove(testPath)\n}\n\nfunc TestInfoSave(t *testing.T) {\n\tinfo := newInfo()\n\tdefer rmInfo()\n\n\terr := info.Save()\n\tif err != nil {\n\t\tt.Fatalf(\"Save() failed\")\n\t}\n\n\tst, err := os.Stat(testPath)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not stat saved file\")\n\t}\n\n\tif st == nil {\n\t\tt.Fatalf(\"Save() failed to write a file\")\n\t}\n\n\tpermissions := st.Mode().Perm()\n\tif permissions&077 != 0 {\n\t\tt.Fatalf(\"File mode is too permissive, got %#o\", st.Mode())\n\t}\n}\n\nfunc TestInfoSaveFail(t *testing.T) {\n\ti := NewInfo(\"\/nonexisting\/path\/something.json\")\n\terr := i.Save()\n\tif err == nil {\n\t\tt.Fatalf(\"Save() failed to detect error\")\n\t}\n}\n\nfunc TestInfoLoadFail(t *testing.T) {\n\tdefer rmInfo()\n\n\ti := &Info{\n\t\tpath: testPath,\n\t}\n\n\terr := i.Load()\n\tif err == nil {\n\t\tt.Fatalf(\"Load() failed to detect file read error\")\n\t}\n\n\t\/\/ Write some broken JSON.\n\tioutil.WriteFile(testPath, []byte(\"NOT JSON\"), 0600)\n\terr = i.Load()\n\tif err == nil {\n\t\tt.Fatalf(\"Load() failed to detect broken JSON\")\n\t}\n}\n\nfunc TestInfoPeers(t *testing.T) {\n\ti := newInfo()\n\tdefer rmInfo()\n\n\tcases := [][]string{\n\t\tnil,\n\t\t{},\n\t\t{\"a\"},\n\t\t{\"a\", \"b\"},\n\t}\n\n\tfor _, c := range cases {\n\t\terr := i.SetPeers(c)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"SetPeers() failed: %s\", err.Error())\n\t\t}\n\n\t\t\/\/ Let's have a roundtrip to disk.\n\t\ti.Save()\n\t\ti.PeerList = nil\n\t\ti.Load()\n\n\t\tpeers, err := i.Peers()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Peers() failed: %s\", err.Error())\n\t\t}\n\n\t\tif !reflect.DeepEqual(c, peers) {\n\t\t\tt.Fatalf(\"Peers() did not return what we gave SetPeers()\")\n\t\t}\n\t}\n}\n\nfunc TestInfoSelf(t *testing.T) {\n\ti := newInfo()\n\tdefer rmInfo()\n\n\tcases := []string{\n\t\t\"a\",\n\t\t\"\",\n\t\t\"\\n\",\n\t}\n\n\tfor _, c := range cases {\n\t\terr := i.SetSelf(c)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"SetSelf() failed: %s\", err.Error())\n\t\t}\n\n\t\t\/\/ Let's have a roundtrip to disk.\n\t\ti.Save()\n\t\ti.SelfName = \"NOT THIS\"\n\t\ti.Load()\n\n\t\tself := i.Self()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Self() failed: %s\", err.Error())\n\t\t}\n\n\t\tif c != self {\n\t\t\tt.Fatalf(\"Self() failed to return what we gave SetSelf()\")\n\t\t}\n\t}\n}\n\nfunc TestInfoIP(t *testing.T) {\n\ti := newInfo()\n\tdefer rmInfo()\n\n\ti.SetSelf(\"go-test-target-v4.gansoi-dev.com\")\n\tip := i.IP()\n\tif ip == nil {\n\t\tt.Fatalf(\"Failed to resolve IPv4 , got %s\", ip)\n\t}\n\n\tif ip.String() != \"198.51.100.1\" {\n\t\tt.Fatalf(\"Failed to resolve self to IP, got %s\", ip.String())\n\t}\n\n\ti.SetSelf(\"go-test-target-v6.gansoi-dev.com\")\n\tip = i.IP()\n\tif ip == nil {\n\t\tt.Fatalf(\"Failed to resolve IPv6 self, got %s\", ip)\n\t}\n\n\tif ip.String() != \"2001:db8::2\" {\n\t\tt.Fatalf(\"Failed to resolve self to IP, got %s\", ip.String())\n\t}\n\n\ti.SetSelf(\"go-test-nonexisting.gansoi-dev.com:4934\")\n\tip = i.IP()\n\tif ip != nil {\n\t\tt.Fatalf(\"Returned something for failing host in Self(), got %s\", ip)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package facts\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"golang.org\/x\/tools\/go\/analysis\"\n)\n\ntype Generator int\n\n\/\/ A list of known generators we can detect\nconst (\n\tUnknown Generator = iota\n\tGoyacc\n\tCgo\n\tStringer\n)\n\nvar (\n\t\/\/ used by cgo before Go 1.11\n\toldCgo = []byte(\"\/\/ Created by cgo - DO NOT EDIT\")\n\tprefix = []byte(\"\/\/ Code generated \")\n\tsuffix = []byte(\" DO NOT EDIT.\")\n\tnl = []byte(\"\\n\")\n\tcrnl = []byte(\"\\r\\n\")\n)\n\nfunc isGenerated(path string) (Generator, bool) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn 0, false\n\t}\n\tdefer f.Close()\n\tbr := bufio.NewReader(f)\n\tfor {\n\t\ts, err := br.ReadBytes('\\n')\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn 0, false\n\t\t}\n\t\ts = bytes.TrimSuffix(s, crnl)\n\t\ts = bytes.TrimSuffix(s, nl)\n\t\tif bytes.HasPrefix(s, prefix) && bytes.HasSuffix(s, suffix) {\n\t\t\ttext := string(s[len(prefix) : len(s)-len(suffix)])\n\t\t\tswitch text {\n\t\t\tcase \"by goyacc.\":\n\t\t\t\treturn Goyacc, true\n\t\t\tcase \"by cmd\/cgo;\":\n\t\t\t\treturn Cgo, true\n\t\t\t}\n\t\t\tif strings.HasPrefix(text, `by \"stringer `) {\n\t\t\t\treturn Stringer, true\n\t\t\t}\n\t\t\treturn Unknown, true\n\t\t}\n\t\tif bytes.Equal(s, oldCgo) {\n\t\t\treturn Cgo, true\n\t\t}\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn 0, false\n}\n\nvar Generated = &analysis.Analyzer{\n\tName: \"isgenerated\",\n\tDoc: \"annotate file names that have been code generated\",\n\tRun: func(pass *analysis.Pass) (interface{}, error) {\n\t\tm := map[string]Generator{}\n\t\tfor _, f := range pass.Files {\n\t\t\tpath := pass.Fset.PositionFor(f.Pos(), false).Filename\n\t\t\tg, ok := isGenerated(path)\n\t\t\tif ok {\n\t\t\t\tm[path] = g\n\t\t\t}\n\t\t}\n\t\treturn m, nil\n\t},\n\tRunDespiteErrors: true,\n\tResultType: reflect.TypeOf(map[string]Generator{}),\n}\n<commit_msg>facts: fix detection of goyacc generated files<commit_after>package facts\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"golang.org\/x\/tools\/go\/analysis\"\n)\n\ntype Generator int\n\n\/\/ A list of known generators we can detect\nconst (\n\tUnknown Generator = iota\n\tGoyacc\n\tCgo\n\tStringer\n)\n\nvar (\n\t\/\/ used by cgo before Go 1.11\n\toldCgo = []byte(\"\/\/ Created by cgo - DO NOT EDIT\")\n\tprefix = []byte(\"\/\/ Code generated \")\n\tsuffix = []byte(\" DO NOT EDIT.\")\n\tnl = []byte(\"\\n\")\n\tcrnl = []byte(\"\\r\\n\")\n)\n\nfunc isGenerated(path string) (Generator, bool) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn 0, false\n\t}\n\tdefer f.Close()\n\tbr := bufio.NewReader(f)\n\tfor {\n\t\ts, err := br.ReadBytes('\\n')\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn 0, false\n\t\t}\n\t\ts = bytes.TrimSuffix(s, crnl)\n\t\ts = bytes.TrimSuffix(s, nl)\n\t\tif bytes.HasPrefix(s, prefix) && bytes.HasSuffix(s, suffix) {\n\t\t\ttext := string(s[len(prefix) : len(s)-len(suffix)])\n\t\t\tswitch text {\n\t\t\tcase \"by goyacc.\":\n\t\t\t\treturn Goyacc, true\n\t\t\tcase \"by cmd\/cgo;\":\n\t\t\t\treturn Cgo, true\n\t\t\t}\n\t\t\tif strings.HasPrefix(text, `by \"stringer `) {\n\t\t\t\treturn Stringer, true\n\t\t\t}\n\t\t\tif strings.HasPrefix(text, `by goyacc `) {\n\t\t\t\treturn Goyacc, true\n\t\t\t}\n\n\t\t\treturn Unknown, true\n\t\t}\n\t\tif bytes.Equal(s, oldCgo) {\n\t\t\treturn Cgo, true\n\t\t}\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn 0, false\n}\n\nvar Generated = &analysis.Analyzer{\n\tName: \"isgenerated\",\n\tDoc: \"annotate file names that have been code generated\",\n\tRun: func(pass *analysis.Pass) (interface{}, error) {\n\t\tm := map[string]Generator{}\n\t\tfor _, f := range pass.Files {\n\t\t\tpath := pass.Fset.PositionFor(f.Pos(), false).Filename\n\t\t\tg, ok := isGenerated(path)\n\t\t\tif ok {\n\t\t\t\tm[path] = g\n\t\t\t}\n\t\t}\n\t\treturn m, nil\n\t},\n\tRunDespiteErrors: true,\n\tResultType: reflect.TypeOf(map[string]Generator{}),\n}\n<|endoftext|>"} {"text":"<commit_before>package cssminify\n\nimport (\n\t\"fmt\"\n)\n\nfunc Minify(blocks []Block) {\n\tfor _, block := range blocks {\n\t\tfmt.Printf(\"%s\\n\", block.selector)\n\t}\n}\n<commit_msg>Preparing the field for minifying<commit_after>package cssminify\n\nimport (\n\t\"fmt\"\n)\n\nfunc Minify(blocks []Block) {\n\tfor _, block := range blocks {\n\t\tshowSelectors(string(block.selector))\n\t\tfmt.Print(\"{\")\n\t\tshowPropVals(block.pairs)\n\t\tfmt.Print(\"}\")\n\t}\n}\n\nfunc showSelectors(selector string) {\n}\n\nfunc showPropVals(pairs []Pair) {\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package main contains a Go mobile app that tweaks the\n\/\/ cmd\/blinker server's blink rate.\npackage main\n\nimport (\n\t\"encoding\/binary\"\n\t\"log\"\n\n\t\"golang.org\/x\/mobile\/app\"\n\t\"golang.org\/x\/mobile\/event\/config\"\n\t\"golang.org\/x\/mobile\/event\/lifecycle\"\n\t\"golang.org\/x\/mobile\/event\/paint\"\n\t\"golang.org\/x\/mobile\/event\/touch\"\n\t\"golang.org\/x\/mobile\/exp\/f32\"\n\t\"golang.org\/x\/mobile\/exp\/gl\/glutil\"\n\t\"golang.org\/x\/mobile\/geom\"\n\t\"golang.org\/x\/mobile\/gl\"\n)\n\nvar (\n\tprogram gl.Program\n\tposition gl.Attrib\n\toffset gl.Uniform\n\tcolor gl.Uniform\n\tbuf gl.Buffer\n\n\ttouchLoc geom.Point\n)\n\nfunc main() {\n\tapp.Main(func(a app.App) {\n\t\tvar c config.Event\n\t\tfor e := range a.Events() {\n\t\t\tswitch e := app.Filter(e).(type) {\n\t\t\tcase lifecycle.Event:\n\t\t\t\tswitch e.Crosses(lifecycle.StageVisible) {\n\t\t\t\tcase lifecycle.CrossOn:\n\t\t\t\t\tonStart()\n\t\t\t\tcase lifecycle.CrossOff:\n\t\t\t\t\tonStop()\n\t\t\t\t}\n\t\t\tcase config.Event:\n\t\t\t\tc = e\n\t\t\t\ttouchLoc = geom.Point{c.Width \/ 2, c.Height \/ 2}\n\t\t\tcase paint.Event:\n\t\t\t\tonPaint(c)\n\t\t\t\ta.EndPaint()\n\t\t\tcase touch.Event:\n\t\t\t\ttouchLoc = e.Loc\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc onStart() {\n\tvar err error\n\tprogram, err = glutil.CreateProgram(vertexShader, fragmentShader)\n\tif err != nil {\n\t\tlog.Printf(\"error creating GL program: %v\", err)\n\t\treturn\n\t}\n\n\tbuf = gl.CreateBuffer()\n\tgl.BindBuffer(gl.ARRAY_BUFFER, buf)\n\tgl.BufferData(gl.ARRAY_BUFFER, rectData, gl.STATIC_DRAW)\n\n\tposition = gl.GetAttribLocation(program, \"position\")\n\tcolor = gl.GetUniformLocation(program, \"color\")\n\toffset = gl.GetUniformLocation(program, \"offset\")\n}\n\nfunc onStop() {\n\tgl.DeleteProgram(program)\n\tgl.DeleteBuffer(buf)\n}\n\nfunc onPaint(c config.Event) {\n\tgl.ClearColor(1, 1, 1, 1)\n\tgl.Clear(gl.COLOR_BUFFER_BIT)\n\n\tgl.UseProgram(program)\n\n\tgl.Uniform4f(color, 0.3, 0.3, 0.3, 1) \/\/ color\n\t\/\/ position\n\tx := float32(touchLoc.X \/ c.Width)\n\ty := float32(touchLoc.Y \/ c.Height)\n\tgl.Uniform2f(offset, x, y)\n\n\tgl.BindBuffer(gl.ARRAY_BUFFER, buf)\n\tgl.EnableVertexAttribArray(position)\n\tgl.VertexAttribPointer(position, 2, gl.FLOAT, false, 0, 0)\n\tgl.DrawArrays(gl.TRIANGLE_STRIP, 0, 4)\n\tgl.DisableVertexAttribArray(position)\n}\n\nvar rectData = f32.Bytes(binary.LittleEndian,\n\t0, 0,\n\t0, 0.2,\n\t0.2, 0,\n\t0.2, 0.2,\n)\n\nconst vertexShader = `#version 100\nuniform vec2 offset;\n\nattribute vec4 position;\nvoid main() {\n \/\/ offset comes in with x\/y values between 0 and 1.\n \/\/ position bounds are -1 to 1.\n vec4 offset4 = vec4(2.0*offset.x-1.0, 1.0-2.0*offset.y, 0, 0);\n gl_Position = position + offset4;\n}`\n\nconst fragmentShader = `#version 100\nprecision mediump float;\nuniform vec4 color;\nvoid main() {\n gl_FragColor = color;\n}`\n<commit_msg>on touch update blink rate<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package main contains a Go mobile app that tweaks the\n\/\/ cmd\/blinker server's blink rate.\npackage main\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"golang.org\/x\/mobile\/app\"\n\t\"golang.org\/x\/mobile\/event\/config\"\n\t\"golang.org\/x\/mobile\/event\/lifecycle\"\n\t\"golang.org\/x\/mobile\/event\/paint\"\n\t\"golang.org\/x\/mobile\/event\/touch\"\n\t\"golang.org\/x\/mobile\/exp\/f32\"\n\t\"golang.org\/x\/mobile\/exp\/gl\/glutil\"\n\t\"golang.org\/x\/mobile\/geom\"\n\t\"golang.org\/x\/mobile\/gl\"\n)\n\nvar (\n\tprogram gl.Program\n\tposition gl.Attrib\n\toffset gl.Uniform\n\tcolor gl.Uniform\n\tbuf gl.Buffer\n\n\tc config.Event\n\ttouchLoc geom.Point\n)\n\nfunc main() {\n\tapp.Main(func(a app.App) {\n\t\tfor e := range a.Events() {\n\t\t\tswitch e := app.Filter(e).(type) {\n\t\t\tcase lifecycle.Event:\n\t\t\t\tswitch e.Crosses(lifecycle.StageVisible) {\n\t\t\t\tcase lifecycle.CrossOn:\n\t\t\t\t\tonStart()\n\t\t\t\tcase lifecycle.CrossOff:\n\t\t\t\t\tonStop()\n\t\t\t\t}\n\t\t\tcase config.Event:\n\t\t\t\tc = e\n\t\t\t\ttouchLoc = geom.Point{c.Width \/ 2, c.Height \/ 2}\n\t\t\tcase paint.Event:\n\t\t\t\tonPaint(c)\n\t\t\t\ta.EndPaint()\n\t\t\tcase touch.Event:\n\t\t\t\ttouchLoc = e.Loc\n\t\t\t\tgo updateBlinker()\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc onStart() {\n\tvar err error\n\tprogram, err = glutil.CreateProgram(vertexShader, fragmentShader)\n\tif err != nil {\n\t\tlog.Printf(\"error creating GL program: %v\", err)\n\t\treturn\n\t}\n\n\tbuf = gl.CreateBuffer()\n\tgl.BindBuffer(gl.ARRAY_BUFFER, buf)\n\tgl.BufferData(gl.ARRAY_BUFFER, rectData, gl.STATIC_DRAW)\n\n\tposition = gl.GetAttribLocation(program, \"position\")\n\tcolor = gl.GetUniformLocation(program, \"color\")\n\toffset = gl.GetUniformLocation(program, \"offset\")\n}\n\nfunc onStop() {\n\tgl.DeleteProgram(program)\n\tgl.DeleteBuffer(buf)\n}\n\nfunc onPaint(c config.Event) {\n\tgl.ClearColor(1, 1, 1, 1)\n\tgl.Clear(gl.COLOR_BUFFER_BIT)\n\n\tgl.UseProgram(program)\n\n\tgl.Uniform4f(color, 0.3, 0.3, 0.3, 1) \/\/ color\n\t\/\/ position\n\tx := float32(touchLoc.X \/ c.Width)\n\ty := float32(touchLoc.Y \/ c.Height)\n\tgl.Uniform2f(offset, x, y)\n\n\tgl.BindBuffer(gl.ARRAY_BUFFER, buf)\n\tgl.EnableVertexAttribArray(position)\n\tgl.VertexAttribPointer(position, 2, gl.FLOAT, false, 0, 0)\n\tgl.DrawArrays(gl.TRIANGLE_STRIP, 0, 4)\n\tgl.DisableVertexAttribArray(position)\n}\n\nfunc updateBlinker() {\n\tr := float32(touchLoc.Y\/c.Height) * 200\n\t\/\/ TODO(jbd): Don't hardcode the server.\n\t\/\/ TODO(jbd): Switch to mdns or another p2p protocol\n\t_, err := http.Get(fmt.Sprintf(\"http:\/\/10.0.1.9:8080?t=%d\", int(r)))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\nvar rectData = f32.Bytes(binary.LittleEndian,\n\t0, 0,\n\t0, 0.2,\n\t0.2, 0,\n\t0.2, 0.2,\n)\n\nconst vertexShader = `#version 100\nuniform vec2 offset;\n\nattribute vec4 position;\nvoid main() {\n \/\/ offset comes in with x\/y values between 0 and 1.\n \/\/ position bounds are -1 to 1.\n vec4 offset4 = vec4(2.0*offset.x-1.0, 1.0-2.0*offset.y, 0, 0);\n gl_Position = position + offset4;\n}`\n\nconst fragmentShader = `#version 100\nprecision mediump float;\nuniform vec4 color;\nvoid main() {\n gl_FragColor = color;\n}`\n<|endoftext|>"} {"text":"<commit_before>package main \/\/ import \"go.zeta.pm\/disguard\/cmd\/disguard\"\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/pressly\/chi\"\n\t\"go.zeta.pm\/disguard\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nfunc main() {\n\tin, err := ioutil.ReadFile(\"config.yaml\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar conf disguard.Config\n\terr = yaml.Unmarshal(in, &conf)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif conf.AuthRoot == \"\" {\n\t\tconf.AuthRoot = \"\/oauth\"\n\t}\n\n\tsess := disguard.NewSessionRouter(&conf)\n\n\troot := chi.NewRouter()\n\troot.Route(conf.AuthRoot, sess.Route)\n\troot.Mount(\"\/\", sess.ReverseHandler())\n\n\thttp.ListenAndServe(conf.ListenAddress, root)\n}\n<commit_msg>Adds a config file flag<commit_after>package main \/\/ import \"go.zeta.pm\/disguard\/cmd\/disguard\"\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"flag\"\n\n\t\"github.com\/pressly\/chi\"\n\t\"go.zeta.pm\/disguard\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nvar (\n\tconfigPath string\n)\n\nfunc init() {\n\tflag.StringVar(&configPath, \"c\", \"config.yaml\", \"The path to the configuration file.\")\n\tflag.Parse()\n}\n\nfunc main() {\n\tin, err := ioutil.ReadFile(configPath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar conf disguard.Config\n\terr = yaml.Unmarshal(in, &conf)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif conf.AuthRoot == \"\" {\n\t\tconf.AuthRoot = \"\/oauth\"\n\t}\n\n\tsess := disguard.NewSessionRouter(&conf)\n\n\troot := chi.NewRouter()\n\troot.Route(conf.AuthRoot, sess.Route)\n\troot.Mount(\"\/\", sess.ReverseHandler())\n\n\thttp.ListenAndServe(conf.ListenAddress, root)\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com\/exercism\/cli\/config\"\n\t\"github.com\/exercism\/cli\/workspace\"\n\t\"github.com\/spf13\/pflag\"\n\t\"github.com\/spf13\/viper\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestDownloadWithoutToken(t *testing.T) {\n\tcfg := config.Config{\n\t\tUserViperConfig: viper.New(),\n\t}\n\n\terr := runDownload(cfg, pflag.NewFlagSet(\"fake\", pflag.PanicOnError), []string{})\n\tif assert.Error(t, err) {\n\t\tassert.Regexp(t, \"Welcome to Exercism\", err.Error())\n\t\t\/\/ It uses the default base API url to infer the host\n\t\tassert.Regexp(t, \"exercism.io\/my\/settings\", err.Error())\n\t}\n}\n\nfunc TestDownloadWithoutWorkspace(t *testing.T) {\n\tv := viper.New()\n\tv.Set(\"token\", \"abc123\")\n\tcfg := config.Config{\n\t\tUserViperConfig: v,\n\t}\n\n\terr := runDownload(cfg, pflag.NewFlagSet(\"fake\", pflag.PanicOnError), []string{})\n\tif assert.Error(t, err) {\n\t\tassert.Regexp(t, \"re-run the configure\", err.Error())\n\t}\n}\n\nfunc TestDownloadWithoutBaseURL(t *testing.T) {\n\tv := viper.New()\n\tv.Set(\"token\", \"abc123\")\n\tv.Set(\"workspace\", \"\/home\/whatever\")\n\tcfg := config.Config{\n\t\tUserViperConfig: v,\n\t}\n\n\terr := runDownload(cfg, pflag.NewFlagSet(\"fake\", pflag.PanicOnError), []string{})\n\tif assert.Error(t, err) {\n\t\tassert.Regexp(t, \"re-run the configure\", err.Error())\n\t}\n}\n\nfunc TestDownloadWithoutFlags(t *testing.T) {\n\tv := viper.New()\n\tv.Set(\"token\", \"abc123\")\n\tv.Set(\"workspace\", \"\/home\/username\")\n\tv.Set(\"apibaseurl\", \"http:\/\/example.com\")\n\n\tcfg := config.Config{\n\t\tUserViperConfig: v,\n\t}\n\n\tflags := pflag.NewFlagSet(\"fake\", pflag.PanicOnError)\n\tsetupDownloadFlags(flags)\n\n\terr := runDownload(cfg, flags, []string{})\n\tif assert.Error(t, err) {\n\t\tassert.Regexp(t, \"need an --exercise name or a solution --uuid\", err.Error())\n\t}\n}\n\nfunc TestDownload(t *testing.T) {\n\toldOut := Out\n\toldErr := Err\n\tOut = ioutil.Discard\n\tErr = ioutil.Discard\n\tdefer func() {\n\t\tOut = oldOut\n\t\tErr = oldErr\n\t}()\n\n\ttestCases := []struct {\n\t\trequester bool\n\t\texpectedDir string\n\t\tflags map[string]string\n\t}{\n\t\t{\n\t\t\trequester: true,\n\t\t\texpectedDir: \"\",\n\t\t\tflags: map[string]string{\"exercise\": \"bogus-exercise\"},\n\t\t},\n\t\t{\n\t\t\trequester: true,\n\t\t\texpectedDir: \"\",\n\t\t\tflags: map[string]string{\"uuid\": \"bogus-id\"},\n\t\t},\n\t\t{\n\t\t\trequester: false,\n\t\t\texpectedDir: filepath.Join(\"users\", \"alice\"),\n\t\t\tflags: map[string]string{\"uuid\": \"bogus-id\"},\n\t\t},\n\t\t{\n\t\t\trequester: true,\n\t\t\texpectedDir: filepath.Join(\"teams\", \"bogus-team\"),\n\t\t\tflags: map[string]string{\"exercise\": \"bogus-exercise\", \"track\": \"bogus-track\", \"team\": \"bogus-team\"},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttmpDir, err := ioutil.TempDir(\"\", \"download-cmd\")\n\t\tdefer os.RemoveAll(tmpDir)\n\t\tassert.NoError(t, err)\n\n\t\tts := fakeDownloadServer(strconv.FormatBool(tc.requester), tc.flags[\"team\"])\n\t\tdefer ts.Close()\n\n\t\tv := viper.New()\n\t\tv.Set(\"workspace\", tmpDir)\n\t\tv.Set(\"apibaseurl\", ts.URL)\n\t\tv.Set(\"token\", \"abc123\")\n\n\t\tcfg := config.Config{\n\t\t\tUserViperConfig: v,\n\t\t}\n\t\tflags := pflag.NewFlagSet(\"fake\", pflag.PanicOnError)\n\t\tsetupDownloadFlags(flags)\n\t\tfor name, value := range tc.flags {\n\t\t\tflags.Set(name, value)\n\t\t}\n\n\t\terr = runDownload(cfg, flags, []string{})\n\t\tassert.NoError(t, err)\n\n\t\ttargetDir := filepath.Join(tmpDir, tc.expectedDir)\n\t\tassertDownloadedCorrectFiles(t, targetDir)\n\n\t\tpath := filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\")\n\t\tb, err := ioutil.ReadFile(workspace.NewExerciseFromDir(path).MetadataFilepath())\n\t\tvar s workspace.Solution\n\t\terr = json.Unmarshal(b, &s)\n\t\tassert.NoError(t, err)\n\n\t\tassert.Equal(t, \"bogus-track\", s.Track)\n\t\tassert.Equal(t, \"bogus-exercise\", s.Exercise)\n\t\tassert.Equal(t, tc.requester, s.IsRequester)\n\t}\n}\n\nfunc fakeDownloadServer(requestor, teamSlug string) *httptest.Server {\n\tmux := http.NewServeMux()\n\tserver := httptest.NewServer(mux)\n\n\tmux.HandleFunc(\"\/file-1.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"this is file 1\")\n\t})\n\n\tmux.HandleFunc(\"\/subdir\/file-2.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"this is file 2\")\n\t})\n\n\tmux.HandleFunc(\"\/special-char-filename#.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"this is a special file\")\n\t})\n\n\tmux.HandleFunc(\"\/with-leading-slash.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"this has a slash\")\n\t})\n\n\tmux.HandleFunc(\"\/file-3.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"\")\n\t})\n\n\tmux.HandleFunc(\"\/solutions\/latest\", func(w http.ResponseWriter, r *http.Request) {\n\t\tteam := \"null\"\n\t\tif teamSlug := r.FormValue(\"team_id\"); teamSlug != \"\" {\n\t\t\tteam = fmt.Sprintf(`{\"name\": \"Bogus Team\", \"slug\": \"%s\"}`, teamSlug)\n\t\t}\n\t\tpayloadBody := fmt.Sprintf(payloadTemplate, requestor, team, server.URL+\"\/\")\n\t\tfmt.Fprint(w, payloadBody)\n\t})\n\tmux.HandleFunc(\"\/solutions\/bogus-id\", func(w http.ResponseWriter, r *http.Request) {\n\t\tpayloadBody := fmt.Sprintf(payloadTemplate, requestor, \"null\", server.URL+\"\/\")\n\t\tfmt.Fprint(w, payloadBody)\n\t})\n\n\treturn server\n}\n\nfunc assertDownloadedCorrectFiles(t *testing.T, targetDir string) {\n\texpectedFiles := []struct {\n\t\tdesc string\n\t\tpath string\n\t\tcontents string\n\t}{\n\t\t{\n\t\t\tdesc: \"a file in the exercise root directory\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"file-1.txt\"),\n\t\t\tcontents: \"this is file 1\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a file in a subdirectory\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"subdir\", \"file-2.txt\"),\n\t\t\tcontents: \"this is file 2\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a file that requires URL encoding\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"special-char-filename#.txt\"),\n\t\t\tcontents: \"this is a special file\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a file that has a leading slash\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"with-leading-slash.txt\"),\n\t\t\tcontents: \"this has a slash\",\n\t\t},\n\t}\n\n\tfor _, file := range expectedFiles {\n\t\tt.Run(file.desc, func(t *testing.T) {\n\t\t\tb, err := ioutil.ReadFile(file.path)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, file.contents, string(b))\n\t\t})\n\t}\n\n\tpath := filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"file-3.txt\")\n\t_, err := os.Lstat(path)\n\tassert.True(t, os.IsNotExist(err), \"It should not write the file if empty.\")\n}\n\nconst payloadTemplate = `\n{\n\t\"solution\": {\n\t\t\"id\": \"bogus-id\",\n\t\t\"user\": {\n\t\t\t\"handle\": \"alice\",\n\t\t\t\"is_requester\": %s\n\t\t},\n\t\t\"team\": %s,\n\t\t\"exercise\": {\n\t\t\t\"id\": \"bogus-exercise\",\n\t\t\t\"instructions_url\": \"http:\/\/example.com\/bogus-exercise\",\n\t\t\t\"auto_approve\": false,\n\t\t\t\"track\": {\n\t\t\t\t\"id\": \"bogus-track\",\n\t\t\t\t\"language\": \"Bogus Language\"\n\t\t\t}\n\t\t},\n\t\t\"file_download_base_url\": \"%s\",\n\t\t\"files\": [\n\t\t\t\"file-1.txt\",\n\t\t\t\"subdir\/file-2.txt\",\n\t\t\t\"special-char-filename#.txt\",\n\t\t\t\"\/with-leading-slash.txt\",\n\t\t\t\"file-3.txt\"\n\t\t],\n\t\t\"iteration\": {\n\t\t\t\"submitted_at\": \"2017-08-21t10:11:12.130z\"\n\t\t}\n\t}\n}\n`\n<commit_msg>Refactor improve var name<commit_after>package cmd\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com\/exercism\/cli\/config\"\n\t\"github.com\/exercism\/cli\/workspace\"\n\t\"github.com\/spf13\/pflag\"\n\t\"github.com\/spf13\/viper\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestDownloadWithoutToken(t *testing.T) {\n\tcfg := config.Config{\n\t\tUserViperConfig: viper.New(),\n\t}\n\n\terr := runDownload(cfg, pflag.NewFlagSet(\"fake\", pflag.PanicOnError), []string{})\n\tif assert.Error(t, err) {\n\t\tassert.Regexp(t, \"Welcome to Exercism\", err.Error())\n\t\t\/\/ It uses the default base API url to infer the host\n\t\tassert.Regexp(t, \"exercism.io\/my\/settings\", err.Error())\n\t}\n}\n\nfunc TestDownloadWithoutWorkspace(t *testing.T) {\n\tv := viper.New()\n\tv.Set(\"token\", \"abc123\")\n\tcfg := config.Config{\n\t\tUserViperConfig: v,\n\t}\n\n\terr := runDownload(cfg, pflag.NewFlagSet(\"fake\", pflag.PanicOnError), []string{})\n\tif assert.Error(t, err) {\n\t\tassert.Regexp(t, \"re-run the configure\", err.Error())\n\t}\n}\n\nfunc TestDownloadWithoutBaseURL(t *testing.T) {\n\tv := viper.New()\n\tv.Set(\"token\", \"abc123\")\n\tv.Set(\"workspace\", \"\/home\/whatever\")\n\tcfg := config.Config{\n\t\tUserViperConfig: v,\n\t}\n\n\terr := runDownload(cfg, pflag.NewFlagSet(\"fake\", pflag.PanicOnError), []string{})\n\tif assert.Error(t, err) {\n\t\tassert.Regexp(t, \"re-run the configure\", err.Error())\n\t}\n}\n\nfunc TestDownloadWithoutFlags(t *testing.T) {\n\tv := viper.New()\n\tv.Set(\"token\", \"abc123\")\n\tv.Set(\"workspace\", \"\/home\/username\")\n\tv.Set(\"apibaseurl\", \"http:\/\/example.com\")\n\n\tcfg := config.Config{\n\t\tUserViperConfig: v,\n\t}\n\n\tflags := pflag.NewFlagSet(\"fake\", pflag.PanicOnError)\n\tsetupDownloadFlags(flags)\n\n\terr := runDownload(cfg, flags, []string{})\n\tif assert.Error(t, err) {\n\t\tassert.Regexp(t, \"need an --exercise name or a solution --uuid\", err.Error())\n\t}\n}\n\nfunc TestDownload(t *testing.T) {\n\toldOut := Out\n\toldErr := Err\n\tOut = ioutil.Discard\n\tErr = ioutil.Discard\n\tdefer func() {\n\t\tOut = oldOut\n\t\tErr = oldErr\n\t}()\n\n\ttestCases := []struct {\n\t\trequester bool\n\t\texpectedDir string\n\t\tflags map[string]string\n\t}{\n\t\t{\n\t\t\trequester: true,\n\t\t\texpectedDir: \"\",\n\t\t\tflags: map[string]string{\"exercise\": \"bogus-exercise\"},\n\t\t},\n\t\t{\n\t\t\trequester: true,\n\t\t\texpectedDir: \"\",\n\t\t\tflags: map[string]string{\"uuid\": \"bogus-id\"},\n\t\t},\n\t\t{\n\t\t\trequester: false,\n\t\t\texpectedDir: filepath.Join(\"users\", \"alice\"),\n\t\t\tflags: map[string]string{\"uuid\": \"bogus-id\"},\n\t\t},\n\t\t{\n\t\t\trequester: true,\n\t\t\texpectedDir: filepath.Join(\"teams\", \"bogus-team\"),\n\t\t\tflags: map[string]string{\"exercise\": \"bogus-exercise\", \"track\": \"bogus-track\", \"team\": \"bogus-team\"},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttmpDir, err := ioutil.TempDir(\"\", \"download-cmd\")\n\t\tdefer os.RemoveAll(tmpDir)\n\t\tassert.NoError(t, err)\n\n\t\tts := fakeDownloadServer(strconv.FormatBool(tc.requester), tc.flags[\"team\"])\n\t\tdefer ts.Close()\n\n\t\tv := viper.New()\n\t\tv.Set(\"workspace\", tmpDir)\n\t\tv.Set(\"apibaseurl\", ts.URL)\n\t\tv.Set(\"token\", \"abc123\")\n\n\t\tcfg := config.Config{\n\t\t\tUserViperConfig: v,\n\t\t}\n\t\tflags := pflag.NewFlagSet(\"fake\", pflag.PanicOnError)\n\t\tsetupDownloadFlags(flags)\n\t\tfor name, value := range tc.flags {\n\t\t\tflags.Set(name, value)\n\t\t}\n\n\t\terr = runDownload(cfg, flags, []string{})\n\t\tassert.NoError(t, err)\n\n\t\ttargetDir := filepath.Join(tmpDir, tc.expectedDir)\n\t\tassertDownloadedCorrectFiles(t, targetDir)\n\n\t\tdir := filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\")\n\t\tb, err := ioutil.ReadFile(workspace.NewExerciseFromDir(dir).MetadataFilepath())\n\t\tvar s workspace.Solution\n\t\terr = json.Unmarshal(b, &s)\n\t\tassert.NoError(t, err)\n\n\t\tassert.Equal(t, \"bogus-track\", s.Track)\n\t\tassert.Equal(t, \"bogus-exercise\", s.Exercise)\n\t\tassert.Equal(t, tc.requester, s.IsRequester)\n\t}\n}\n\nfunc fakeDownloadServer(requestor, teamSlug string) *httptest.Server {\n\tmux := http.NewServeMux()\n\tserver := httptest.NewServer(mux)\n\n\tmux.HandleFunc(\"\/file-1.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"this is file 1\")\n\t})\n\n\tmux.HandleFunc(\"\/subdir\/file-2.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"this is file 2\")\n\t})\n\n\tmux.HandleFunc(\"\/special-char-filename#.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"this is a special file\")\n\t})\n\n\tmux.HandleFunc(\"\/with-leading-slash.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"this has a slash\")\n\t})\n\n\tmux.HandleFunc(\"\/file-3.txt\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"\")\n\t})\n\n\tmux.HandleFunc(\"\/solutions\/latest\", func(w http.ResponseWriter, r *http.Request) {\n\t\tteam := \"null\"\n\t\tif teamSlug := r.FormValue(\"team_id\"); teamSlug != \"\" {\n\t\t\tteam = fmt.Sprintf(`{\"name\": \"Bogus Team\", \"slug\": \"%s\"}`, teamSlug)\n\t\t}\n\t\tpayloadBody := fmt.Sprintf(payloadTemplate, requestor, team, server.URL+\"\/\")\n\t\tfmt.Fprint(w, payloadBody)\n\t})\n\tmux.HandleFunc(\"\/solutions\/bogus-id\", func(w http.ResponseWriter, r *http.Request) {\n\t\tpayloadBody := fmt.Sprintf(payloadTemplate, requestor, \"null\", server.URL+\"\/\")\n\t\tfmt.Fprint(w, payloadBody)\n\t})\n\n\treturn server\n}\n\nfunc assertDownloadedCorrectFiles(t *testing.T, targetDir string) {\n\texpectedFiles := []struct {\n\t\tdesc string\n\t\tpath string\n\t\tcontents string\n\t}{\n\t\t{\n\t\t\tdesc: \"a file in the exercise root directory\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"file-1.txt\"),\n\t\t\tcontents: \"this is file 1\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a file in a subdirectory\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"subdir\", \"file-2.txt\"),\n\t\t\tcontents: \"this is file 2\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a file that requires URL encoding\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"special-char-filename#.txt\"),\n\t\t\tcontents: \"this is a special file\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"a file that has a leading slash\",\n\t\t\tpath: filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"with-leading-slash.txt\"),\n\t\t\tcontents: \"this has a slash\",\n\t\t},\n\t}\n\n\tfor _, file := range expectedFiles {\n\t\tt.Run(file.desc, func(t *testing.T) {\n\t\t\tb, err := ioutil.ReadFile(file.path)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, file.contents, string(b))\n\t\t})\n\t}\n\n\tpath := filepath.Join(targetDir, \"bogus-track\", \"bogus-exercise\", \"file-3.txt\")\n\t_, err := os.Lstat(path)\n\tassert.True(t, os.IsNotExist(err), \"It should not write the file if empty.\")\n}\n\nconst payloadTemplate = `\n{\n\t\"solution\": {\n\t\t\"id\": \"bogus-id\",\n\t\t\"user\": {\n\t\t\t\"handle\": \"alice\",\n\t\t\t\"is_requester\": %s\n\t\t},\n\t\t\"team\": %s,\n\t\t\"exercise\": {\n\t\t\t\"id\": \"bogus-exercise\",\n\t\t\t\"instructions_url\": \"http:\/\/example.com\/bogus-exercise\",\n\t\t\t\"auto_approve\": false,\n\t\t\t\"track\": {\n\t\t\t\t\"id\": \"bogus-track\",\n\t\t\t\t\"language\": \"Bogus Language\"\n\t\t\t}\n\t\t},\n\t\t\"file_download_base_url\": \"%s\",\n\t\t\"files\": [\n\t\t\t\"file-1.txt\",\n\t\t\t\"subdir\/file-2.txt\",\n\t\t\t\"special-char-filename#.txt\",\n\t\t\t\"\/with-leading-slash.txt\",\n\t\t\t\"file-3.txt\"\n\t\t],\n\t\t\"iteration\": {\n\t\t\t\"submitted_at\": \"2017-08-21t10:11:12.130z\"\n\t\t}\n\t}\n}\n`\n<|endoftext|>"} {"text":"<commit_before>package pagerank\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"runtime\"\n\t\"testing\"\n)\n\nfunc init() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n}\n\nfunc round(f float64) float64 {\n\treturn math.Floor(f*10+0.5) \/ 10\n}\n\nfunc toPercentage(f float64) float64 {\n\ttenPow3 := math.Pow(10, 3)\n\treturn round(100 * (f * tenPow3) \/ tenPow3)\n}\n\nfunc assertRank(t *testing.T, pageRank Interface, expected map[int]float64) {\n\tconst tolerance = 0.0001\n\tpageRank.Rank(0.85, tolerance, func(label int, rank float64) {\n\t\trankAsPercentage := toPercentage(rank)\n\t\tif math.Abs(rankAsPercentage - expected[label]) > tolerance {\n\t\t\tt.Error(\"Rank for\", label, \"should be\", expected[label], \"but was\", rankAsPercentage)\n\t\t}\n\t})\n}\n\nfunc assertEqual(t *testing.T, actual, expected interface{}) {\n\tif actual != expected {\n\t\tt.Error(\"Should be\", expected, \"but was\", actual)\n\t}\n}\n\nfunc assert(t *testing.T, actual bool) {\n\tif !actual {\n\t\tt.Error(\"Should be true\")\n\t}\n}\n\nfunc TestRound(t *testing.T) {\n\tassertEqual(t, round(0.6666666), 0.7)\n}\n\nfunc TestRankToPercentage(t *testing.T) {\n\tassertEqual(t, toPercentage(0.6666666), 66.7)\n}\n\nfunc TestShouldEnterTheBlock(t *testing.T) {\n\tpageRank := New()\n\tpageRank.Link(0, 1)\n\n\tentered := false\n\tpageRank.Rank(0.85, 0.0001, func(_ int, _ float64) {\n\t\tentered = true\n\t})\n\n\tassert(t, entered)\n}\n\nfunc TestShouldBePossibleToRecalculateTheRanksAfterANewLinkIsAdded(t *testing.T) {\n\tpageRank := New()\n\tpageRank.Link(0, 1)\n\tassertRank(t, pageRank, map[int]float64{0: 35.1, 1: 64.9})\n\tpageRank.Link(1, 2)\n\tassertRank(t, pageRank, map[int]float64{0: 18.4, 1: 34.1, 2: 47.4})\n}\n\nfunc TestShouldBePossibleToClearTheGraph(t *testing.T) {\n\tpageRank := New()\n\tpageRank.Link(0, 1)\n\tpageRank.Link(1, 2)\n\tpageRank.Clear()\n\tpageRank.Link(0, 1)\n\tassertRank(t, pageRank, map[int]float64{0: 35.1, 1: 64.9})\n}\n\nfunc TestShouldNotFailWhenCalculatingTheRankOfAnEmptyGraph(t *testing.T) {\n\tpageRank := New()\n\tpageRank.Rank(0.85, 0.0001, func(label int, rank float64) {\n\t\tt.Error(\"This should not be seen\")\n\t})\n}\n\nfunc TestShouldReturnCorrectResultsWhenHavingADanglingNode(t *testing.T) {\n\tpageRank := New()\n\t\/\/node 2 is a dangling node because it has no outbound links\n\tpageRank.Link(0, 2)\n\tpageRank.Link(1, 2)\n\n\texpectedRank := map[int]float64{\n\t\t0: 21.3,\n\t\t1: 21.3,\n\t\t2: 57.4,\n\t}\n\n\tassertRank(t, pageRank, expectedRank)\n}\n\nfunc TestShouldNotChangeTheGraphWhenAddingTheSameLinkManyTimes(t *testing.T) {\n\tpageRank := New()\n\tpageRank.Link(0, 2)\n\tpageRank.Link(0, 2)\n\tpageRank.Link(0, 2)\n\tpageRank.Link(1, 2)\n\tpageRank.Link(1, 2)\n\n\texpectedRank := map[int]float64{\n\t\t0: 21.3,\n\t\t1: 21.3,\n\t\t2: 57.4,\n\t}\n\n\tassertRank(t, pageRank, expectedRank)\n}\n\nfunc TestShouldReturnCorrectResultsForAStarGraph(t *testing.T) {\n\tpageRank := New()\n\tpageRank.Link(0, 2)\n\tpageRank.Link(1, 2)\n\tpageRank.Link(2, 2)\n\n\texpectedRank := map[int]float64{\n\t\t0: 5,\n\t\t1: 5,\n\t\t2: 90,\n\t}\n\n\tassertRank(t, pageRank, expectedRank)\n}\n\nfunc TestShouldBeUniformForACircularGraph(t *testing.T) {\n\tpageRank := New()\n\tpageRank.Link(0, 1)\n\tpageRank.Link(1, 2)\n\tpageRank.Link(2, 3)\n\tpageRank.Link(3, 4)\n\tpageRank.Link(4, 0)\n\n\texpectedRank := map[int]float64{\n\t\t0: 20,\n\t\t1: 20,\n\t\t2: 20,\n\t\t3: 20,\n\t\t4: 20,\n\t}\n\n\tassertRank(t, pageRank, expectedRank)\n}\n\nfunc TestShouldReturnCorrectResultsForAConvergingGraph(t *testing.T) {\n\tpageRank := New()\n\tpageRank.Link(0, 1)\n\tpageRank.Link(0, 2)\n\tpageRank.Link(1, 2)\n\tpageRank.Link(2, 2)\n\n\texpectedRank := map[int]float64{\n\t\t0: 5,\n\t\t1: 7.1,\n\t\t2: 87.9,\n\t}\n\n\tassertRank(t, pageRank, expectedRank)\n}\n\nfunc TestShouldCorrectlyReproduceTheWikipediaExample(t *testing.T) {\n\t\/\/http:\/\/en.wikipedia.org\/wiki\/File:PageRanks-Example.svg\n\tpageRank := New()\n\tpageRank.Link(1, 2)\n\tpageRank.Link(2, 1)\n\tpageRank.Link(3, 0)\n\tpageRank.Link(3, 1)\n\tpageRank.Link(4, 3)\n\tpageRank.Link(4, 1)\n\tpageRank.Link(4, 5)\n\tpageRank.Link(5, 4)\n\tpageRank.Link(5, 1)\n\tpageRank.Link(6, 1)\n\tpageRank.Link(6, 4)\n\tpageRank.Link(7, 1)\n\tpageRank.Link(7, 4)\n\tpageRank.Link(8, 1)\n\tpageRank.Link(8, 4)\n\tpageRank.Link(9, 4)\n\tpageRank.Link(10, 4)\n\n\texpectedRank := map[int]float64{\n\t\t0: 3.3, \/\/a\n\t\t1: 38.4, \/\/b\n\t\t2: 34.3, \/\/c\n\t\t3: 3.9, \/\/d\n\t\t4: 8.1, \/\/e\n\t\t5: 3.9, \/\/f\n\t\t6: 1.6, \/\/g\n\t\t7: 1.6, \/\/h\n\t\t8: 1.6, \/\/i\n\t\t9: 1.6, \/\/j\n\t\t10: 1.6, \/\/k\n\t}\n\n\tassertRank(t, pageRank, expectedRank)\n}\n\nfunc BenchmarkOneMillion(b *testing.B) {\n\tn := 1000000\n\n\tpageRank := New()\n\n\trand.Seed(5)\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tfor from := 0; from < n; from++ {\n\t\t\tfor j := 0; j < rand.Intn(60); j++ {\n\t\t\t\ttoo := rand.Intn(n)\n\n\t\t\t\tto := too\n\t\t\t\tif too > 800000 {\n\t\t\t\t\tto = rand.Intn(3)\n\t\t\t\t}\n\n\t\t\t\tpageRank.Link(from, to)\n\t\t\t}\n\t\t}\n\t}\n\n\tresult := make([]float64, n)\n\tpageRank.Rank(0.85, 0.001, func(key int, val float64) {\n\t\tresult[key] = val\n\t})\n\n\tfmt.Println(\"5 first values are\", result[0], \",\", result[1], \",\", result[2], \",\", result[3], \",\", result[4])\n\tpageRank.Clear()\n}\n<commit_msg>More links for the benchmark graph<commit_after>package pagerank\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"runtime\"\n\t\"testing\"\n)\n\nfunc init() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n}\n\nfunc round(f float64) float64 {\n\treturn math.Floor(f*10+0.5) \/ 10\n}\n\nfunc toPercentage(f float64) float64 {\n\ttenPow3 := math.Pow(10, 3)\n\treturn round(100 * (f * tenPow3) \/ tenPow3)\n}\n\nfunc assertRank(t *testing.T, pageRank Interface, expected map[int]float64) {\n\tconst tolerance = 0.0001\n\tpageRank.Rank(0.85, tolerance, func(label int, rank float64) {\n\t\trankAsPercentage := toPercentage(rank)\n\t\tif math.Abs(rankAsPercentage - expected[label]) > tolerance {\n\t\t\tt.Error(\"Rank for\", label, \"should be\", expected[label], \"but was\", rankAsPercentage)\n\t\t}\n\t})\n}\n\nfunc assertEqual(t *testing.T, actual, expected interface{}) {\n\tif actual != expected {\n\t\tt.Error(\"Should be\", expected, \"but was\", actual)\n\t}\n}\n\nfunc assert(t *testing.T, actual bool) {\n\tif !actual {\n\t\tt.Error(\"Should be true\")\n\t}\n}\n\nfunc TestRound(t *testing.T) {\n\tassertEqual(t, round(0.6666666), 0.7)\n}\n\nfunc TestRankToPercentage(t *testing.T) {\n\tassertEqual(t, toPercentage(0.6666666), 66.7)\n}\n\nfunc TestShouldEnterTheBlock(t *testing.T) {\n\tpageRank := New()\n\tpageRank.Link(0, 1)\n\n\tentered := false\n\tpageRank.Rank(0.85, 0.0001, func(_ int, _ float64) {\n\t\tentered = true\n\t})\n\n\tassert(t, entered)\n}\n\nfunc TestShouldBePossibleToRecalculateTheRanksAfterANewLinkIsAdded(t *testing.T) {\n\tpageRank := New()\n\tpageRank.Link(0, 1)\n\tassertRank(t, pageRank, map[int]float64{0: 35.1, 1: 64.9})\n\tpageRank.Link(1, 2)\n\tassertRank(t, pageRank, map[int]float64{0: 18.4, 1: 34.1, 2: 47.4})\n}\n\nfunc TestShouldBePossibleToClearTheGraph(t *testing.T) {\n\tpageRank := New()\n\tpageRank.Link(0, 1)\n\tpageRank.Link(1, 2)\n\tpageRank.Clear()\n\tpageRank.Link(0, 1)\n\tassertRank(t, pageRank, map[int]float64{0: 35.1, 1: 64.9})\n}\n\nfunc TestShouldNotFailWhenCalculatingTheRankOfAnEmptyGraph(t *testing.T) {\n\tpageRank := New()\n\tpageRank.Rank(0.85, 0.0001, func(label int, rank float64) {\n\t\tt.Error(\"This should not be seen\")\n\t})\n}\n\nfunc TestShouldReturnCorrectResultsWhenHavingADanglingNode(t *testing.T) {\n\tpageRank := New()\n\t\/\/node 2 is a dangling node because it has no outbound links\n\tpageRank.Link(0, 2)\n\tpageRank.Link(1, 2)\n\n\texpectedRank := map[int]float64{\n\t\t0: 21.3,\n\t\t1: 21.3,\n\t\t2: 57.4,\n\t}\n\n\tassertRank(t, pageRank, expectedRank)\n}\n\nfunc TestShouldNotChangeTheGraphWhenAddingTheSameLinkManyTimes(t *testing.T) {\n\tpageRank := New()\n\tpageRank.Link(0, 2)\n\tpageRank.Link(0, 2)\n\tpageRank.Link(0, 2)\n\tpageRank.Link(1, 2)\n\tpageRank.Link(1, 2)\n\n\texpectedRank := map[int]float64{\n\t\t0: 21.3,\n\t\t1: 21.3,\n\t\t2: 57.4,\n\t}\n\n\tassertRank(t, pageRank, expectedRank)\n}\n\nfunc TestShouldReturnCorrectResultsForAStarGraph(t *testing.T) {\n\tpageRank := New()\n\tpageRank.Link(0, 2)\n\tpageRank.Link(1, 2)\n\tpageRank.Link(2, 2)\n\n\texpectedRank := map[int]float64{\n\t\t0: 5,\n\t\t1: 5,\n\t\t2: 90,\n\t}\n\n\tassertRank(t, pageRank, expectedRank)\n}\n\nfunc TestShouldBeUniformForACircularGraph(t *testing.T) {\n\tpageRank := New()\n\tpageRank.Link(0, 1)\n\tpageRank.Link(1, 2)\n\tpageRank.Link(2, 3)\n\tpageRank.Link(3, 4)\n\tpageRank.Link(4, 0)\n\n\texpectedRank := map[int]float64{\n\t\t0: 20,\n\t\t1: 20,\n\t\t2: 20,\n\t\t3: 20,\n\t\t4: 20,\n\t}\n\n\tassertRank(t, pageRank, expectedRank)\n}\n\nfunc TestShouldReturnCorrectResultsForAConvergingGraph(t *testing.T) {\n\tpageRank := New()\n\tpageRank.Link(0, 1)\n\tpageRank.Link(0, 2)\n\tpageRank.Link(1, 2)\n\tpageRank.Link(2, 2)\n\n\texpectedRank := map[int]float64{\n\t\t0: 5,\n\t\t1: 7.1,\n\t\t2: 87.9,\n\t}\n\n\tassertRank(t, pageRank, expectedRank)\n}\n\nfunc TestShouldCorrectlyReproduceTheWikipediaExample(t *testing.T) {\n\t\/\/http:\/\/en.wikipedia.org\/wiki\/File:PageRanks-Example.svg\n\tpageRank := New()\n\tpageRank.Link(1, 2)\n\tpageRank.Link(2, 1)\n\tpageRank.Link(3, 0)\n\tpageRank.Link(3, 1)\n\tpageRank.Link(4, 3)\n\tpageRank.Link(4, 1)\n\tpageRank.Link(4, 5)\n\tpageRank.Link(5, 4)\n\tpageRank.Link(5, 1)\n\tpageRank.Link(6, 1)\n\tpageRank.Link(6, 4)\n\tpageRank.Link(7, 1)\n\tpageRank.Link(7, 4)\n\tpageRank.Link(8, 1)\n\tpageRank.Link(8, 4)\n\tpageRank.Link(9, 4)\n\tpageRank.Link(10, 4)\n\n\texpectedRank := map[int]float64{\n\t\t0: 3.3, \/\/a\n\t\t1: 38.4, \/\/b\n\t\t2: 34.3, \/\/c\n\t\t3: 3.9, \/\/d\n\t\t4: 8.1, \/\/e\n\t\t5: 3.9, \/\/f\n\t\t6: 1.6, \/\/g\n\t\t7: 1.6, \/\/h\n\t\t8: 1.6, \/\/i\n\t\t9: 1.6, \/\/j\n\t\t10: 1.6, \/\/k\n\t}\n\n\tassertRank(t, pageRank, expectedRank)\n}\n\nfunc BenchmarkOneMillion(b *testing.B) {\n\tn := 1000000\n\n\tpageRank := New()\n\n\trand.Seed(5)\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tfor from := 0; from < n; from++ {\n\t\t\tfor j := 0; j < rand.Intn(400); j++ {\n\t\t\t\ttoo := rand.Intn(n)\n\n\t\t\t\tto := too\n\t\t\t\tif too > 800000 {\n\t\t\t\t\tto = rand.Intn(3)\n\t\t\t\t}\n\n\t\t\t\tpageRank.Link(from, to)\n\t\t\t}\n\t\t}\n\t}\n\n\tresult := make([]float64, n)\n\tpageRank.Rank(0.85, 0.001, func(key int, val float64) {\n\t\tresult[key] = val\n\t})\n\n\tfmt.Println(\"5 first values are\", result[0], \",\", result[1], \",\", result[2], \",\", result[3], \",\", result[4])\n\tpageRank.Clear()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\taddr = flag.String(\"http\", \":8080\", \"HTTP address to serve on\")\n\ttlsAddr = flag.String(\"https\", \"\", \"HTTPS address to serve on\")\n\tpemFile = flag.String(\"pem-file\", \"\", \"HTTPS \/ SSL certificate .pem file\")\n\tkeyFile = flag.String(\"key-file\", \"\", \"HTTPS \/ SSL certificate .key file\")\n\tupdate = flag.Bool(\"update\", true, \"update via Git and shutdown server after pull\")\n\tupdateRate = flag.String(\"update-rate\", \"60s\", \"rate at which to check for updates via Git\")\n\tdev = flag.Bool(\"dev\", false, \"reload all templates on each request\")\n\n\tstaticDirPrefix = \"\/static\/\" \/\/ Path prefix to strip to get static dir root.\n\tstaticDir = \"static\/\" \/\/ Directory to serve for static files.\n\n\terrorTemplate = \"error\" \/\/ Template to use for errors.\n\ttemplateGlob = \"**\/*.tmpl\" \/\/ Glob to match all template files.\n\ttemplateDir = \"templates\/\" \/\/ Relative directory that templates reside in.\n\n\tsrc = &GitUpdater{\n\t\tDir: \".\",\n\t}\n\tlastUpdate = time.Now()\n\ttmpls *template.Template\n\tupdateRateT time.Duration\n)\n\nfunc reloadTemplates() error {\n\tmatches, err := filepath.Glob(templateGlob)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttmpls = template.New(\"\")\n\tfor _, m := range matches {\n\t\tname := strings.TrimPrefix(m, templateDir)\n\t\tname = strings.TrimSuffix(name, filepath.Ext(name))\n\t\tif _, err := tmpls.New(name).Parse(m); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\ntype TemplateData struct {\n\tRequest *http.Request\n\tError string\n}\n\nfunc handler(w http.ResponseWriter, r *http.Request) error {\n\tif *dev {\n\t\tif err := reloadTemplates(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar data = &TemplateData{\n\t\tRequest: r,\n\t}\n\n\t\/\/ Determine template name.\n\ttmplName := r.URL.Path[1:]\n\tif stat, err := os.Stat(filepath.Join(templateDir, tmplName)); err != nil {\n\t\treturn err\n\t} else {\n\t\tif stat.IsDir() {\n\t\t\ttmplName = \"index\"\n\t\t}\n\t}\n\n\t\/\/ Find the requested page.\n\troot := tmpls.Lookup(tmplName)\n\tif root == nil {\n\t\t\/\/ Couldn't find the page, render the error page.\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn fmt.Errorf(\"%v - %v\", http.StatusNotFound, http.StatusText(http.StatusNotFound))\n\t}\n\n\t\/\/ Execute the requested template page.\n\treturn root.Execute(w, data)\n}\n\nfunc logHandler(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"%v %v\\n\", r.Method, r.URL)\n\t\th.ServeHTTP(w, r)\n\t})\n}\n\nfunc errorHandler(h func(w http.ResponseWriter, r *http.Request) error) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\terr := h(w, r)\n\t\tif err == nil {\n\t\t\tlog.Printf(\"%v %v\\n\", r.Method, r.URL)\n\t\t\treturn\n\t\t}\n\t\tlog.Printf(\"[error] %v %v - %v\\n\", r.Method, r.URL, err)\n\n\t\t\/\/ Render the error template.\n\t\tdata := &TemplateData{\n\t\t\tRequest: r,\n\t\t\tError: err.Error(),\n\t\t}\n\t\tif err := tmpls.ExecuteTemplate(w, errorTemplate, data); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t})\n}\n\nfunc checkForUpdates() {\n\t\/\/ If not enough time has elapsed, skip the update.\n\tif time.Since(lastUpdate) < updateRateT {\n\t\treturn\n\t}\n\tlastUpdate = time.Now()\n\n\t\/\/ Check for updates.\n\tupdated, err := src.Update()\n\tif err != nil {\n\t\tlog.Println(\"Update error:\", err)\n\t\treturn\n\t}\n\tif updated {\n\t\tlog.Println(\"Updated source code. Exiting server..\")\n\t\tos.Exit(0)\n\t}\n\tlog.Println(\"checked for updates (none available)\")\n}\n\nfunc init() {\n\tif err := reloadTemplates(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif *update {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\ttime.Sleep(updateRateT)\n\t\t\t\tcheckForUpdates()\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\t\/\/ Parse update rate flag.\n\tvar err error\n\tupdateRateT, err = time.ParseDuration(*updateRate)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Static file hosting\n\thttp.Handle(\"\/static\/\", logHandler(http.StripPrefix(staticDirPrefix, http.FileServer(http.Dir(staticDir)))))\n\n\t\/\/ App handler.\n\thttp.Handle(\"\/\", errorHandler(handler))\n\n\t\/\/ Start HTTPS server:\n\tif *tlsAddr != \"\" {\n\t\tgo func() {\n\t\t\tif *pemFile == \"\" {\n\t\t\t\tlog.Fatal(\"expected -pem-file flag\")\n\t\t\t}\n\t\t\tif *keyFile == \"\" {\n\t\t\t\tlog.Fatal(\"expected -key-file flag\")\n\t\t\t}\n\t\t\tlog.Println(\"Serving on\", *tlsAddr)\n\t\t\tlog.Fatal(http.ListenAndServeTLS(*tlsAddr, *pemFile, *keyFile, nil))\n\t\t}()\n\t}\n\n\t\/\/ Start HTTP server:\n\tlog.Println(\"Serving on\", *addr)\n\tlog.Fatal(http.ListenAndServe(*addr, nil))\n}\n<commit_msg>cmd\/fiz3d-org: fix template name resolution for index files in subdirectories<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\taddr = flag.String(\"http\", \":8080\", \"HTTP address to serve on\")\n\ttlsAddr = flag.String(\"https\", \"\", \"HTTPS address to serve on\")\n\tpemFile = flag.String(\"pem-file\", \"\", \"HTTPS \/ SSL certificate .pem file\")\n\tkeyFile = flag.String(\"key-file\", \"\", \"HTTPS \/ SSL certificate .key file\")\n\tupdate = flag.Bool(\"update\", true, \"update via Git and shutdown server after pull\")\n\tupdateRate = flag.String(\"update-rate\", \"60s\", \"rate at which to check for updates via Git\")\n\tdev = flag.Bool(\"dev\", false, \"reload all templates on each request\")\n\n\tstaticDirPrefix = \"\/static\/\" \/\/ Path prefix to strip to get static dir root.\n\tstaticDir = \"static\/\" \/\/ Directory to serve for static files.\n\n\terrorTemplate = \"error\" \/\/ Template to use for errors.\n\ttemplateGlob = \"**\/*.tmpl\" \/\/ Glob to match all template files.\n\ttemplateDir = \"templates\/\" \/\/ Relative directory that templates reside in.\n\n\tsrc = &GitUpdater{\n\t\tDir: \".\",\n\t}\n\tlastUpdate = time.Now()\n\ttmpls *template.Template\n\tupdateRateT time.Duration\n)\n\nfunc reloadTemplates() error {\n\tmatches, err := filepath.Glob(templateGlob)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttmpls = template.New(\"\")\n\tfor _, m := range matches {\n\t\tname := strings.TrimPrefix(m, templateDir)\n\t\tname = strings.TrimSuffix(name, filepath.Ext(name))\n\t\tif _, err := tmpls.New(name).Parse(m); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\ntype TemplateData struct {\n\tRequest *http.Request\n\tError string\n}\n\nfunc handler(w http.ResponseWriter, r *http.Request) error {\n\tif *dev {\n\t\tif err := reloadTemplates(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar data = &TemplateData{\n\t\tRequest: r,\n\t}\n\n\t\/\/ Determine template name.\n\ttmplName := r.URL.Path[1:]\n\tif stat, err := os.Stat(filepath.Join(templateDir, tmplName)); err != nil {\n\t\treturn err\n\t} else {\n\t\tif stat.IsDir() {\n\t\t\ttmplName = path.Join(tmplName, \"index\")\n\t\t}\n\t}\n\n\t\/\/ Find the requested page.\n\troot := tmpls.Lookup(tmplName)\n\tif root == nil {\n\t\t\/\/ Couldn't find the page, render the error page.\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn fmt.Errorf(\"%v - %v\", http.StatusNotFound, http.StatusText(http.StatusNotFound))\n\t}\n\n\t\/\/ Execute the requested template page.\n\treturn root.Execute(w, data)\n}\n\nfunc logHandler(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"%v %v\\n\", r.Method, r.URL)\n\t\th.ServeHTTP(w, r)\n\t})\n}\n\nfunc errorHandler(h func(w http.ResponseWriter, r *http.Request) error) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\terr := h(w, r)\n\t\tif err == nil {\n\t\t\tlog.Printf(\"%v %v\\n\", r.Method, r.URL)\n\t\t\treturn\n\t\t}\n\t\tlog.Printf(\"[error] %v %v - %v\\n\", r.Method, r.URL, err)\n\n\t\t\/\/ Render the error template.\n\t\tdata := &TemplateData{\n\t\t\tRequest: r,\n\t\t\tError: err.Error(),\n\t\t}\n\t\tif err := tmpls.ExecuteTemplate(w, errorTemplate, data); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t})\n}\n\nfunc checkForUpdates() {\n\t\/\/ If not enough time has elapsed, skip the update.\n\tif time.Since(lastUpdate) < updateRateT {\n\t\treturn\n\t}\n\tlastUpdate = time.Now()\n\n\t\/\/ Check for updates.\n\tupdated, err := src.Update()\n\tif err != nil {\n\t\tlog.Println(\"Update error:\", err)\n\t\treturn\n\t}\n\tif updated {\n\t\tlog.Println(\"Updated source code. Exiting server..\")\n\t\tos.Exit(0)\n\t}\n\tlog.Println(\"checked for updates (none available)\")\n}\n\nfunc init() {\n\tif err := reloadTemplates(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif *update {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\ttime.Sleep(updateRateT)\n\t\t\t\tcheckForUpdates()\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\t\/\/ Parse update rate flag.\n\tvar err error\n\tupdateRateT, err = time.ParseDuration(*updateRate)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Static file hosting\n\thttp.Handle(\"\/static\/\", logHandler(http.StripPrefix(staticDirPrefix, http.FileServer(http.Dir(staticDir)))))\n\n\t\/\/ App handler.\n\thttp.Handle(\"\/\", errorHandler(handler))\n\n\t\/\/ Start HTTPS server:\n\tif *tlsAddr != \"\" {\n\t\tgo func() {\n\t\t\tif *pemFile == \"\" {\n\t\t\t\tlog.Fatal(\"expected -pem-file flag\")\n\t\t\t}\n\t\t\tif *keyFile == \"\" {\n\t\t\t\tlog.Fatal(\"expected -key-file flag\")\n\t\t\t}\n\t\t\tlog.Println(\"Serving on\", *tlsAddr)\n\t\t\tlog.Fatal(http.ListenAndServeTLS(*tlsAddr, *pemFile, *keyFile, nil))\n\t\t}()\n\t}\n\n\t\/\/ Start HTTP server:\n\tlog.Println(\"Serving on\", *addr)\n\tlog.Fatal(http.ListenAndServe(*addr, nil))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"launchpad.net\/gnuflag\"\n\t\"launchpad.net\/juju-core\/juju\/cmd\"\n\t\"launchpad.net\/juju-core\/juju\/log\"\n\t\"launchpad.net\/tomb\"\n\t\"launchpad.net\/juju-core\/juju\/container\"\n\t\"launchpad.net\/juju-core\/juju\/state\"\n)\n\n\/\/ MachineAgent is a cmd.Command responsible for running a machine agent.\ntype MachineAgent struct {\n\tConf AgentConf\n\tMachineId int\n}\n\n\/\/ Info returns usage information for the command.\nfunc (a *MachineAgent) Info() *cmd.Info {\n\treturn &cmd.Info{\"machine\", \"\", \"run a juju machine agent\", \"\"}\n}\n\n\/\/ Init initializes the command for running.\nfunc (a *MachineAgent) Init(f *gnuflag.FlagSet, args []string) error {\n\ta.Conf.addFlags(f)\n\tf.IntVar(&a.MachineId, \"machine-id\", -1, \"id of the machine to run\")\n\tif err := f.Parse(true, args); err != nil {\n\t\treturn err\n\t}\n\tif a.MachineId < 0 {\n\t\treturn fmt.Errorf(\"--machine-id option must be set, and expects a non-negative integer\")\n\t}\n\treturn a.Conf.checkArgs(f.Args())\n}\n\n\n\/\/ Run runs a machine agent.\nfunc (a *MachineAgent) Run(_ *cmd.Context) error {\n\t\/\/ TODO reconnect when the agent fails.\n\tm, err := NewMachiner(&a.Conf.StateInfo, a.MachineId)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgo m.loop()\n\treturn m.Wait()\n}\n\nfunc NewMachiner(info *state.Info, machineId int) (*Machiner, error) {\n\tm := new(Machiner)\n\tvar err error\n\tm.st, err = state.Open(info)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.machine, err = m.st.Machine(machineId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgo m.loop()\n\treturn m, nil\n}\n\ntype Machiner struct {\n\ttomb tomb.Tomb\n\tst *state.State\n\tmachine *state.Machine\n}\n\nfunc (m *Machiner) loop() {\n\tdefer m.tomb.Done()\n\tdefer m.st.Close()\n\n\twatcher := m.machine.WatchUnits()\n\t\/\/ TODO read initial units, check if they're running\n\t\/\/ and restart them if not.\n\tfor {\n\t\tselect {\n\t\tcase <-m.tomb.Dying():\n\t\t\treturn\n\t\tcase change := <-watcher.Changes():\n\t\t\tfor _, u := range change.Deleted {\n\t\t\t\tif u.IsPrincipal() {\n\t\t\t\t\tif err := container.Simple(u).Destroy(); err != nil {\n\t\t\t\t\t\tlog.Printf(\"cannot destroy unit %s: %v\", u.Name(), err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, u := range change.Added {\n\t\t\t\tif u.IsPrincipal() {\n\t\t\t\t\tif err := container.Simple(u).Deploy(); err != nil {\n\t\t\t\t\t\tlog.Printf(\"cannot deploy unit %s: %v\", u.Name(), err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (m *Machiner) Wait() error {\n\treturn m.tomb.Wait()\n}\n\nfunc (m *Machiner) Stop() {\n\tm.tomb.Kill(nil)\n\treturn m.tomb.Wait()\n}\n<commit_msg>cmd\/jujud: machine agent compiles<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"launchpad.net\/gnuflag\"\n\t\"launchpad.net\/juju-core\/juju\/cmd\"\n\t\"launchpad.net\/juju-core\/juju\/log\"\n\t\"launchpad.net\/tomb\"\n\t\"launchpad.net\/juju-core\/juju\/container\"\n\t\"launchpad.net\/juju-core\/juju\/state\"\n)\n\n\/\/ MachineAgent is a cmd.Command responsible for running a machine agent.\ntype MachineAgent struct {\n\tConf AgentConf\n\tMachineId int\n}\n\n\/\/ Info returns usage information for the command.\nfunc (a *MachineAgent) Info() *cmd.Info {\n\treturn &cmd.Info{\"machine\", \"\", \"run a juju machine agent\", \"\"}\n}\n\n\/\/ Init initializes the command for running.\nfunc (a *MachineAgent) Init(f *gnuflag.FlagSet, args []string) error {\n\ta.Conf.addFlags(f)\n\tf.IntVar(&a.MachineId, \"machine-id\", -1, \"id of the machine to run\")\n\tif err := f.Parse(true, args); err != nil {\n\t\treturn err\n\t}\n\tif a.MachineId < 0 {\n\t\treturn fmt.Errorf(\"--machine-id option must be set, and expects a non-negative integer\")\n\t}\n\treturn a.Conf.checkArgs(f.Args())\n}\n\n\n\/\/ Run runs a machine agent.\nfunc (a *MachineAgent) Run(_ *cmd.Context) error {\n\t\/\/ TODO reconnect when the agent fails.\n\tm, err := NewMachiner(&a.Conf.StateInfo, a.MachineId)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgo m.loop()\n\treturn m.Wait()\n}\n\nfunc NewMachiner(info *state.Info, machineId int) (*Machiner, error) {\n\tm := new(Machiner)\n\tvar err error\n\tm.st, err = state.Open(info)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.machine, err = m.st.Machine(machineId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgo m.loop()\n\treturn m, nil\n}\n\ntype Machiner struct {\n\ttomb tomb.Tomb\n\tst *state.State\n\tmachine *state.Machine\n}\n\nfunc (m *Machiner) loop() {\n\tdefer m.tomb.Done()\n\tdefer m.st.Close()\n\n\twatcher := m.machine.WatchUnits()\n\t\/\/ TODO read initial units, check if they're running\n\t\/\/ and restart them if not.\n\tfor {\n\t\tselect {\n\t\tcase <-m.tomb.Dying():\n\t\t\treturn\n\t\tcase change := <-watcher.Changes():\n\t\t\tfor _, u := range change.Deleted {\n\t\t\t\tif u.IsPrincipal() {\n\t\t\t\t\tif err := container.Simple(u).Destroy(); err != nil {\n\t\t\t\t\t\tlog.Printf(\"cannot destroy unit %s: %v\", u.Name(), err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, u := range change.Added {\n\t\t\t\tif u.IsPrincipal() {\n\t\t\t\t\tif err := container.Simple(u).Deploy(); err != nil {\n\t\t\t\t\t\tlog.Printf(\"cannot deploy unit %s: %v\", u.Name(), err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (m *Machiner) Wait() error {\n\treturn m.tomb.Wait()\n}\n\nfunc (m *Machiner) Stop() error {\n\tm.tomb.Kill(nil)\n\treturn m.tomb.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n\/\/ RunShellCommand executes a shell command and returns the output\/error\nfunc RunShellCommand(input string) (string, error) {\n\tinputCmd := strings.Split(input, \" \")[0]\n\targs := strings.Split(input, \" \")[1:]\n\n\tcmd := exec.Command(inputCmd, args...)\n\toutputBytes := &bytes.Buffer{}\n\tcmd.Stdout = outputBytes\n\tcmd.Stderr = outputBytes\n\tcmd.Start()\n\terr := cmd.Wait() \/\/ wait for command to finish\n\toutstring := outputBytes.String()\n\treturn outstring, err\n}\n\n\/\/ HandleShellCommand runs the shell command\n\/\/ The openTerm argument specifies whether a terminal should be opened (for viewing output\n\/\/ or interacting with stdin)\nfunc HandleShellCommand(input string, openTerm bool) {\n\tinputCmd := strings.Split(input, \" \")[0]\n\tif !openTerm {\n\t\t\/\/ Simply run the command in the background and notify the user when it's done\n\t\tmessenger.Message(\"Running...\")\n\t\tgo func() {\n\t\t\toutput, err := RunShellCommand(input)\n\t\t\ttotalLines := strings.Split(output, \"\\n\")\n\n\t\t\tif len(totalLines) < 3 {\n\t\t\t\tif err == nil {\n\t\t\t\t\tmessenger.Message(inputCmd, \" exited without error\")\n\t\t\t\t} else {\n\t\t\t\t\tmessenger.Message(inputCmd, \" exited with error: \", err, \": \", output)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tmessenger.Message(output)\n\t\t\t}\n\t\t\t\/\/ We have to make sure to redraw\n\t\t\tRedrawAll()\n\t\t}()\n\t} else {\n\t\t\/\/ Shut down the screen because we're going to interact directly with the shell\n\t\tscreen.Fini()\n\t\tscreen = nil\n\n\t\targs := strings.Split(input, \" \")[1:]\n\n\t\t\/\/ Set up everything for the command\n\t\tcmd := exec.Command(inputCmd, args...)\n\t\tcmd.Stdin = os.Stdin\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\n\t\t\/\/ This is a trap for Ctrl-C so that it doesn't kill micro\n\t\t\/\/ Instead we trap Ctrl-C to kill the program we're running\n\t\tc := make(chan os.Signal, 1)\n\t\tsignal.Notify(c, os.Interrupt)\n\t\tgo func() {\n\t\t\tfor range c {\n\t\t\t\tcmd.Process.Kill()\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ Start the command\n\t\tcmd.Start()\n\t\tcmd.Wait()\n\n\t\t\/\/ This is just so we don't return right away and let the user press enter to return\n\t\tTermMessage(\"\")\n\n\t\t\/\/ Start the screen back up\n\t\tInitScreen()\n\t}\n}\n\n\/\/ HandleCommand handles input from the user\nfunc HandleCommand(input string) {\n\tinputCmd := strings.Split(input, \" \")[0]\n\targs := strings.Split(input, \" \")[1:]\n\n\tswitch inputCmd {\n\tcase \"set\":\n\t\t\/\/ Set an option and we have to set it for every view\n\t\tfor _, view := range views {\n\t\t\tSetOption(view, args)\n\t\t}\n\tcase \"run\":\n\t\t\/\/ Run a shell command in the background (openTerm is false)\n\t\tHandleShellCommand(strings.Join(args, \" \"), false)\n\tcase \"quit\":\n\t\t\/\/ This is a bit weird because micro only has one view for now so there is no way to close\n\t\t\/\/ a single view\n\t\t\/\/ Currently if multiple views were open, it would close all of them, and not check the non-mainviews\n\t\t\/\/ for unsaved changes. This, and the behavior of Ctrl-Q need to be changed when splits are implemented\n\t\tif views[mainView].CanClose(\"Quit anyway? (yes, no, save) \") {\n\t\t\tscreen.Fini()\n\t\t\tos.Exit(0)\n\t\t}\n\tcase \"save\":\n\t\t\/\/ Save the main view\n\t\tviews[mainView].Save()\n\tcase \"replace\":\n\t\t\/\/ This is a regex to parse the replace expression\n\t\t\/\/ We allow no quotes if there are no spaces, but if you want to search\n\t\t\/\/ for or replace an expression with spaces, you can add double quotes\n\t\tr := regexp.MustCompile(`\"[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*\"|[^\\s]*`)\n\t\treplaceCmd := r.FindAllString(strings.Join(args, \" \"), -1)\n\t\tif len(replaceCmd) < 2 {\n\t\t\t\/\/ We need to find both a search and replace expression\n\t\t\tmessenger.Error(\"Invalid replace statement: \" + strings.Join(args, \" \"))\n\t\t\treturn\n\t\t}\n\n\t\tvar flags string\n\t\tif len(replaceCmd) == 3 {\n\t\t\t\/\/ The user included some flags\n\t\t\tflags = replaceCmd[2]\n\t\t}\n\n\t\tsearch := string(replaceCmd[0])\n\t\treplace := string(replaceCmd[1])\n\n\t\t\/\/ If the search and replace expressions have quotes, we need to remove those\n\t\tif strings.HasPrefix(search, `\"`) && strings.HasSuffix(search, `\"`) {\n\t\t\tsearch = search[1 : len(search)-1]\n\t\t}\n\t\tif strings.HasPrefix(replace, `\"`) && strings.HasSuffix(replace, `\"`) {\n\t\t\treplace = replace[1 : len(replace)-1]\n\t\t}\n\n\t\t\/\/ We replace all escaped double quotes to real double quotes\n\t\tsearch = strings.Replace(search, `\\\"`, `\"`, -1)\n\t\treplace = strings.Replace(replace, `\\\"`, `\"`, -1)\n\n\t\tregex, err := regexp.Compile(search)\n\t\tif err != nil {\n\t\t\t\/\/ There was an error with the user's regex\n\t\t\tmessenger.Error(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tview := views[mainView]\n\n\t\tfound := false\n\t\tfor {\n\t\t\tmatch := regex.FindStringIndex(view.Buf.String())\n\t\t\tif match == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfound = true\n\t\t\tif strings.Contains(flags, \"c\") {\n\t\t\t\t\/\/ The 'check' flag was used\n\t\t\t\tSearch(search, view, true)\n\t\t\t\tview.Relocate()\n\t\t\t\tif settings[\"syntax\"].(bool) {\n\t\t\t\t\tview.matches = Match(view)\n\t\t\t\t}\n\t\t\t\tRedrawAll()\n\t\t\t\tchoice, canceled := messenger.YesNoPrompt(\"Perform replacement? (y,n)\")\n\t\t\t\tif canceled {\n\t\t\t\t\tif view.Cursor.HasSelection() {\n\t\t\t\t\t\tview.Cursor.SetLoc(view.Cursor.curSelection[0])\n\t\t\t\t\t\tview.Cursor.ResetSelection()\n\t\t\t\t\t}\n\t\t\t\t\tmessenger.Reset()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif choice {\n\t\t\t\t\tview.Cursor.DeleteSelection()\n\t\t\t\t\tview.Buf.Insert(match[0], replace)\n\t\t\t\t\tview.Cursor.ResetSelection()\n\t\t\t\t\tmessenger.Reset()\n\t\t\t\t} else {\n\t\t\t\t\tif view.Cursor.HasSelection() {\n\t\t\t\t\t\tsearchStart = view.Cursor.curSelection[1]\n\t\t\t\t\t} else {\n\t\t\t\t\t\tsearchStart = ToCharPos(view.Cursor.x, view.Cursor.y, view.Buf)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tview.Buf.Replace(match[0], match[1], replace)\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tmessenger.Message(\"Nothing matched \" + search)\n\t\t}\n\tdefault:\n\t\tmessenger.Error(\"Unknown command: \" + inputCmd)\n\t}\n}\n<commit_msg>Allow users to insert \\t and \\n in replace commands<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n\/\/ RunShellCommand executes a shell command and returns the output\/error\nfunc RunShellCommand(input string) (string, error) {\n\tinputCmd := strings.Split(input, \" \")[0]\n\targs := strings.Split(input, \" \")[1:]\n\n\tcmd := exec.Command(inputCmd, args...)\n\toutputBytes := &bytes.Buffer{}\n\tcmd.Stdout = outputBytes\n\tcmd.Stderr = outputBytes\n\tcmd.Start()\n\terr := cmd.Wait() \/\/ wait for command to finish\n\toutstring := outputBytes.String()\n\treturn outstring, err\n}\n\n\/\/ HandleShellCommand runs the shell command\n\/\/ The openTerm argument specifies whether a terminal should be opened (for viewing output\n\/\/ or interacting with stdin)\nfunc HandleShellCommand(input string, openTerm bool) {\n\tinputCmd := strings.Split(input, \" \")[0]\n\tif !openTerm {\n\t\t\/\/ Simply run the command in the background and notify the user when it's done\n\t\tmessenger.Message(\"Running...\")\n\t\tgo func() {\n\t\t\toutput, err := RunShellCommand(input)\n\t\t\ttotalLines := strings.Split(output, \"\\n\")\n\n\t\t\tif len(totalLines) < 3 {\n\t\t\t\tif err == nil {\n\t\t\t\t\tmessenger.Message(inputCmd, \" exited without error\")\n\t\t\t\t} else {\n\t\t\t\t\tmessenger.Message(inputCmd, \" exited with error: \", err, \": \", output)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tmessenger.Message(output)\n\t\t\t}\n\t\t\t\/\/ We have to make sure to redraw\n\t\t\tRedrawAll()\n\t\t}()\n\t} else {\n\t\t\/\/ Shut down the screen because we're going to interact directly with the shell\n\t\tscreen.Fini()\n\t\tscreen = nil\n\n\t\targs := strings.Split(input, \" \")[1:]\n\n\t\t\/\/ Set up everything for the command\n\t\tcmd := exec.Command(inputCmd, args...)\n\t\tcmd.Stdin = os.Stdin\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\n\t\t\/\/ This is a trap for Ctrl-C so that it doesn't kill micro\n\t\t\/\/ Instead we trap Ctrl-C to kill the program we're running\n\t\tc := make(chan os.Signal, 1)\n\t\tsignal.Notify(c, os.Interrupt)\n\t\tgo func() {\n\t\t\tfor range c {\n\t\t\t\tcmd.Process.Kill()\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ Start the command\n\t\tcmd.Start()\n\t\tcmd.Wait()\n\n\t\t\/\/ This is just so we don't return right away and let the user press enter to return\n\t\tTermMessage(\"\")\n\n\t\t\/\/ Start the screen back up\n\t\tInitScreen()\n\t}\n}\n\n\/\/ HandleCommand handles input from the user\nfunc HandleCommand(input string) {\n\tinputCmd := strings.Split(input, \" \")[0]\n\targs := strings.Split(input, \" \")[1:]\n\n\tswitch inputCmd {\n\tcase \"set\":\n\t\t\/\/ Set an option and we have to set it for every view\n\t\tfor _, view := range views {\n\t\t\tSetOption(view, args)\n\t\t}\n\tcase \"run\":\n\t\t\/\/ Run a shell command in the background (openTerm is false)\n\t\tHandleShellCommand(strings.Join(args, \" \"), false)\n\tcase \"quit\":\n\t\t\/\/ This is a bit weird because micro only has one view for now so there is no way to close\n\t\t\/\/ a single view\n\t\t\/\/ Currently if multiple views were open, it would close all of them, and not check the non-mainviews\n\t\t\/\/ for unsaved changes. This, and the behavior of Ctrl-Q need to be changed when splits are implemented\n\t\tif views[mainView].CanClose(\"Quit anyway? (yes, no, save) \") {\n\t\t\tscreen.Fini()\n\t\t\tos.Exit(0)\n\t\t}\n\tcase \"save\":\n\t\t\/\/ Save the main view\n\t\tviews[mainView].Save()\n\tcase \"replace\":\n\t\t\/\/ This is a regex to parse the replace expression\n\t\t\/\/ We allow no quotes if there are no spaces, but if you want to search\n\t\t\/\/ for or replace an expression with spaces, you can add double quotes\n\t\tr := regexp.MustCompile(`\"[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*\"|[^\\s]*`)\n\t\treplaceCmd := r.FindAllString(strings.Join(args, \" \"), -1)\n\t\tif len(replaceCmd) < 2 {\n\t\t\t\/\/ We need to find both a search and replace expression\n\t\t\tmessenger.Error(\"Invalid replace statement: \" + strings.Join(args, \" \"))\n\t\t\treturn\n\t\t}\n\n\t\tvar flags string\n\t\tif len(replaceCmd) == 3 {\n\t\t\t\/\/ The user included some flags\n\t\t\tflags = replaceCmd[2]\n\t\t}\n\n\t\tsearch := string(replaceCmd[0])\n\t\treplace := string(replaceCmd[1])\n\n\t\t\/\/ If the search and replace expressions have quotes, we need to remove those\n\t\tif strings.HasPrefix(search, `\"`) && strings.HasSuffix(search, `\"`) {\n\t\t\tsearch = search[1 : len(search)-1]\n\t\t}\n\t\tif strings.HasPrefix(replace, `\"`) && strings.HasSuffix(replace, `\"`) {\n\t\t\treplace = replace[1 : len(replace)-1]\n\t\t}\n\n\t\t\/\/ We replace all escaped double quotes to real double quotes\n\t\tsearch = strings.Replace(search, `\\\"`, `\"`, -1)\n\t\treplace = strings.Replace(replace, `\\\"`, `\"`, -1)\n\t\t\/\/ Replace some things so users can actually insert newlines and tabs in replacements\n\t\treplace = strings.Replace(replace, \"\\\\n\", \"\\n\", -1)\n\t\treplace = strings.Replace(replace, \"\\\\t\", \"\\t\", -1)\n\n\t\tregex, err := regexp.Compile(search)\n\t\tif err != nil {\n\t\t\t\/\/ There was an error with the user's regex\n\t\t\tmessenger.Error(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tview := views[mainView]\n\n\t\tfound := false\n\t\tfor {\n\t\t\tmatch := regex.FindStringIndex(view.Buf.String())\n\t\t\tif match == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfound = true\n\t\t\tif strings.Contains(flags, \"c\") {\n\t\t\t\t\/\/ The 'check' flag was used\n\t\t\t\tSearch(search, view, true)\n\t\t\t\tview.Relocate()\n\t\t\t\tif settings[\"syntax\"].(bool) {\n\t\t\t\t\tview.matches = Match(view)\n\t\t\t\t}\n\t\t\t\tRedrawAll()\n\t\t\t\tchoice, canceled := messenger.YesNoPrompt(\"Perform replacement? (y,n)\")\n\t\t\t\tif canceled {\n\t\t\t\t\tif view.Cursor.HasSelection() {\n\t\t\t\t\t\tview.Cursor.SetLoc(view.Cursor.curSelection[0])\n\t\t\t\t\t\tview.Cursor.ResetSelection()\n\t\t\t\t\t}\n\t\t\t\t\tmessenger.Reset()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif choice {\n\t\t\t\t\tview.Cursor.DeleteSelection()\n\t\t\t\t\tview.Buf.Insert(match[0], replace)\n\t\t\t\t\tview.Cursor.ResetSelection()\n\t\t\t\t\tmessenger.Reset()\n\t\t\t\t} else {\n\t\t\t\t\tif view.Cursor.HasSelection() {\n\t\t\t\t\t\tsearchStart = view.Cursor.curSelection[1]\n\t\t\t\t\t} else {\n\t\t\t\t\t\tsearchStart = ToCharPos(view.Cursor.x, view.Cursor.y, view.Buf)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tview.Buf.Replace(match[0], match[1], replace)\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tmessenger.Message(\"Nothing matched \" + search)\n\t\t}\n\tdefault:\n\t\tmessenger.Error(\"Unknown command: \" + inputCmd)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"os\"\n\n\t\"github.com\/osrg\/gobgp\/packet\/bgp\"\n\t\"github.com\/osrg\/gobgp\/packet\/mrt\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/zmap\/zannotate\/zmrt\"\n)\n\ntype MRT2JsonGlobalConf struct {\n\tInputFilePath string\n\tOutputFilePath string\n\tLogFilePath string\n\tVerbosity int\n}\n\ntype RawPeer struct {\n\tType string `json:\"type\"`\n\tBgpId string `json:\"bgp_id\"`\n\tIpAddress string `json:\"ip_address\"`\n\tAS uint32 `json:\"as\"`\n}\n\ntype RawPeerIndexTableJSON struct {\n\tCollectorBgpId string `json:\"collector_bgp_id\"`\n\tViewName string `json:\"view_name\"`\n\tPeers []*RawPeer `json:\"peers\"`\n\tType string `json:\"type\"`\n}\n\ntype RawRibEntry struct {\n\tPeerIndex uint16 `json:\"peer_index\"`\n\tOriginatedTime uint32 `json:\"orginated_time\"`\n\tPathIdentifier uint32 `json:\"path_identifier\"`\n\tPathAttributes []bgp.PathAttributeInterface `json:\"path_attributes\"`\n\tType string `json:\"type\"`\n}\n\ntype RawRib struct {\n\tSubType string `json:\"sub_type\"`\n\tSequenceNumber uint32 `json:\"sequence_number\"`\n\tPrefix bgp.AddrPrefixInterface `json:\"prefix\"`\n\tEntries []*RawRibEntry `json:\"entries\"`\n\tRouteFamily bgp.RouteFamily `json:\"route_family\"`\n}\n\nfunc raw(conf *MRT2JsonGlobalConf, f *os.File) {\n\tf, err := os.Open(conf.InputFilePath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tzmrt.MrtRawIterate(f, func(msg *mrt.MRTMessage) error {\n\t\tif msg.Header.Type != mrt.TABLE_DUMPv2 {\n\t\t\tlog.Fatal(\"not an MRT TABLE_DUMPv2\")\n\t\t}\n\t\tswitch mrt.MRTSubTypeTableDumpv2(msg.Header.SubType) {\n\t\tcase mrt.PEER_INDEX_TABLE:\n\t\t\tpeerIndexTable := msg.Body.(*mrt.PeerIndexTable)\n\t\t\tvar out RawPeerIndexTableJSON\n\t\t\tout.CollectorBgpId = peerIndexTable.CollectorBgpId.String()\n\t\t\tout.ViewName = peerIndexTable.ViewName\n\t\t\tout.Type = \"peer_index_table\"\n\t\t\tfor _, peer := range peerIndexTable.Peers {\n\t\t\t\tvar outPeer RawPeer\n\t\t\t\t\/\/outPeer.Type = peer.Type\n\t\t\t\toutPeer.BgpId = peer.BgpId.String()\n\t\t\t\toutPeer.IpAddress = peer.IpAddress.String()\n\t\t\t\toutPeer.AS = peer.AS\n\t\t\t\tout.Peers = append(out.Peers, &outPeer)\n\t\t\t}\n\t\t\tjson, err := json.Marshal(out)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(\"unable to json marshal peer table\")\n\t\t\t}\n\t\t\tf.WriteString(string(json))\n\t\t\tf.WriteString(\"\\n\")\n\t\tcase mrt.RIB_IPV4_UNICAST, mrt.RIB_IPV6_UNICAST,\n\t\t\tmrt.RIB_IPV4_MULTICAST, mrt.RIB_IPV6_MULTICAST, mrt.RIB_GENERIC:\n\t\t\t\/\/\n\t\t\trib := msg.Body.(*mrt.Rib)\n\t\t\tvar out RawRib\n\t\t\tout.SequenceNumber = rib.SequenceNumber\n\t\t\tout.Prefix = rib.Prefix\n\t\t\tout.RouteFamily = rib.RouteFamily\n\t\t\tfor _, entry := range rib.Entries {\n\t\t\t\tvar ribOut RawRibEntry\n\t\t\t\tribOut.PeerIndex = entry.PeerIndex\n\t\t\t\tribOut.OriginatedTime = entry.OriginatedTime\n\t\t\t\tribOut.PathIdentifier = entry.PathIdentifier\n\t\t\t\tribOut.PathAttributes = entry.PathAttributes\n\t\t\t\tout.Entries = append(out.Entries, &ribOut)\n\t\t\t}\n\t\t\tout.SubType = zmrt.MrtSubTypeToName(msg.Header.SubType)\n\t\t\tjson, err := json.Marshal(out)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(\"unable to json marshal peer table\")\n\t\t\t}\n\t\t\tf.WriteString(string(json))\n\t\t\tf.WriteString(\"\\n\")\n\t\tcase mrt.GEO_PEER_TABLE:\n\t\t\t\/\/geopeers := msg.Body.(*mrt.GeoPeerTable)\n\t\tdefault:\n\t\t\tlog.Fatalf(\"unsupported subType: %v\", msg.Header.SubType)\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc paths(conf *MRT2JsonGlobalConf, f *os.File) {\n\tf, err := os.Open(conf.InputFilePath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tzmrt.MrtPathIterate(f, func(msg *zmrt.RIBEntry) {\n\t\tjson, _ := json.Marshal(msg)\n\t\tf.WriteString(string(json))\n\t\tf.WriteString(\"\\n\")\n\t})\n}\n\nfunc main() {\n\n\tvar conf MRT2JsonGlobalConf\n\tflags := flag.NewFlagSet(\"flags\", flag.ExitOnError)\n\tflags.StringVar(&conf.InputFilePath, \"input-file\", \"\", \"ip addresses to read\")\n\tflags.StringVar(&conf.OutputFilePath, \"output-file\", \"-\", \"where should JSON output be saved\")\n\tflags.StringVar(&conf.LogFilePath, \"log-file\", \"\", \"where should JSON output be saved\")\n\tflags.IntVar(&conf.Verbosity, \"verbosity\", 3, \"where should JSON output be saved\")\n\tflags.Parse(os.Args[2:])\n\n\tif conf.LogFilePath != \"\" {\n\t\tf, err := os.OpenFile(conf.LogFilePath, os.O_WRONLY|os.O_CREATE, 0666)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Unable to open log file (%s): %s\", conf.LogFilePath, err.Error())\n\t\t}\n\t\tlog.SetOutput(f)\n\t}\n\tif conf.InputFilePath == \"\" {\n\t\tlog.Fatal(\"no input path provided\")\n\t}\n\t\/\/ Translate the assigned verbosity level to a logrus log level.\n\tswitch conf.Verbosity {\n\tcase 1: \/\/ Fatal\n\t\tlog.SetLevel(log.FatalLevel)\n\tcase 2: \/\/ Error\n\t\tlog.SetLevel(log.ErrorLevel)\n\tcase 3: \/\/ Warnings (default)\n\t\tlog.SetLevel(log.WarnLevel)\n\tcase 4: \/\/ Information\n\t\tlog.SetLevel(log.InfoLevel)\n\tcase 5: \/\/ Debugging\n\t\tlog.SetLevel(log.DebugLevel)\n\tdefault:\n\t\tlog.Fatal(\"Unknown verbosity level specified. Must be between 1 (lowest)--5 (highest)\")\n\t}\n\tvar f *os.File\n\tif conf.OutputFilePath == \"-\" {\n\t\tf = os.Stdout\n\t} else {\n\t\tvar err error\n\t\tf, err = os.OpenFile(conf.OutputFilePath, os.O_WRONLY|os.O_CREATE, 0666)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"unable to open metadata file:\", err.Error())\n\t\t}\n\t\tdefer f.Close()\n\t}\n\tif os.Args[1] == \"raw\" {\n\t\traw(&conf, f)\n\t} else if os.Args[1] == \"entries\" {\n\t\tpaths(&conf, f)\n\t} else {\n\t\tlog.Fatal(\"invalid command\")\n\t}\n}\n<commit_msg>mrt2json works now<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"os\"\n\n\t\"github.com\/osrg\/gobgp\/packet\/bgp\"\n\t\"github.com\/osrg\/gobgp\/packet\/mrt\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/zmap\/zannotate\/zmrt\"\n)\n\ntype MRT2JsonGlobalConf struct {\n\tInputFilePath string\n\tOutputFilePath string\n\tLogFilePath string\n\tVerbosity int\n}\n\ntype RawPeer struct {\n\tType string `json:\"type\"`\n\tBgpId string `json:\"bgp_id\"`\n\tIpAddress string `json:\"ip_address\"`\n\tAS uint32 `json:\"as\"`\n}\n\ntype RawPeerIndexTableJSON struct {\n\tCollectorBgpId string `json:\"collector_bgp_id\"`\n\tViewName string `json:\"view_name\"`\n\tPeers []*RawPeer `json:\"peers\"`\n\tType string `json:\"type\"`\n}\n\ntype RawRibEntry struct {\n\tPeerIndex uint16 `json:\"peer_index\"`\n\tOriginatedTime uint32 `json:\"orginated_time\"`\n\tPathIdentifier uint32 `json:\"path_identifier\"`\n\tPathAttributes []bgp.PathAttributeInterface `json:\"path_attributes\"`\n\tType string `json:\"type\"`\n}\n\ntype RawRib struct {\n\tSubType string `json:\"sub_type\"`\n\tSequenceNumber uint32 `json:\"sequence_number\"`\n\tPrefix bgp.AddrPrefixInterface `json:\"prefix\"`\n\tEntries []*RawRibEntry `json:\"entries\"`\n\tRouteFamily bgp.RouteFamily `json:\"route_family\"`\n}\n\nfunc raw(conf *MRT2JsonGlobalConf, f *os.File) {\n\tinputFile, err := os.Open(conf.InputFilePath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tzmrt.MrtRawIterate(inputFile, func(msg *mrt.MRTMessage) error {\n\t\tif msg.Header.Type != mrt.TABLE_DUMPv2 {\n\t\t\tlog.Fatal(\"not an MRT TABLE_DUMPv2\")\n\t\t}\n\t\tswitch mrt.MRTSubTypeTableDumpv2(msg.Header.SubType) {\n\t\tcase mrt.PEER_INDEX_TABLE:\n\t\t\tpeerIndexTable := msg.Body.(*mrt.PeerIndexTable)\n\t\t\tvar out RawPeerIndexTableJSON\n\t\t\tout.CollectorBgpId = peerIndexTable.CollectorBgpId.String()\n\t\t\tout.ViewName = peerIndexTable.ViewName\n\t\t\tout.Type = \"peer_index_table\"\n\t\t\tfor _, peer := range peerIndexTable.Peers {\n\t\t\t\tvar outPeer RawPeer\n\t\t\t\t\/\/outPeer.Type = peer.Type\n\t\t\t\toutPeer.BgpId = peer.BgpId.String()\n\t\t\t\toutPeer.IpAddress = peer.IpAddress.String()\n\t\t\t\toutPeer.AS = peer.AS\n\t\t\t\tout.Peers = append(out.Peers, &outPeer)\n\t\t\t}\n\t\t\tjson, err := json.Marshal(out)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(\"unable to json marshal peer table\")\n\t\t\t}\n\t\t\tf.WriteString(string(json))\n\t\t\tf.WriteString(\"\\n\")\n\t\tcase mrt.RIB_IPV4_UNICAST, mrt.RIB_IPV6_UNICAST,\n\t\t\tmrt.RIB_IPV4_MULTICAST, mrt.RIB_IPV6_MULTICAST, mrt.RIB_GENERIC:\n\t\t\t\/\/\n\t\t\trib := msg.Body.(*mrt.Rib)\n\t\t\tvar out RawRib\n\t\t\tout.SequenceNumber = rib.SequenceNumber\n\t\t\tout.Prefix = rib.Prefix\n\t\t\tout.RouteFamily = rib.RouteFamily\n\t\t\tfor _, entry := range rib.Entries {\n\t\t\t\tvar ribOut RawRibEntry\n\t\t\t\tribOut.PeerIndex = entry.PeerIndex\n\t\t\t\tribOut.OriginatedTime = entry.OriginatedTime\n\t\t\t\tribOut.PathIdentifier = entry.PathIdentifier\n\t\t\t\tribOut.PathAttributes = entry.PathAttributes\n\t\t\t\tout.Entries = append(out.Entries, &ribOut)\n\t\t\t}\n\t\t\tout.SubType = zmrt.MrtSubTypeToName(msg.Header.SubType)\n\t\t\tjson, err := json.Marshal(out)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(\"unable to json marshal peer table\")\n\t\t\t}\n\t\t\tf.WriteString(string(json))\n\t\t\tf.WriteString(\"\\n\")\n\t\tcase mrt.GEO_PEER_TABLE:\n\t\t\/\/geopeers := msg.Body.(*mrt.GeoPeerTable)\n\t\tdefault:\n\t\t\tlog.Fatalf(\"unsupported subType: %v\", msg.Header.SubType)\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc paths(conf *MRT2JsonGlobalConf, f *os.File) {\n\tinputFile, err := os.Open(conf.InputFilePath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tzmrt.MrtPathIterate(inputFile, func(msg *zmrt.RIBEntry) {\n\t\tjson, _ := json.Marshal(msg)\n\t\tf.WriteString(string(json))\n\t\tf.WriteString(\"\\n\")\n\t})\n}\n\nfunc main() {\n\n\tvar conf MRT2JsonGlobalConf\n\tflags := flag.NewFlagSet(\"flags\", flag.ExitOnError)\n\tflags.StringVar(&conf.InputFilePath, \"input-file\", \"\", \"path to MRT file\")\n\tflags.StringVar(&conf.OutputFilePath, \"output-file\", \"-\", \"where should JSON output be saved\")\n\tflags.StringVar(&conf.LogFilePath, \"log-file\", \"\", \"where should JSON output be saved\")\n\tflags.IntVar(&conf.Verbosity, \"verbosity\", 3, \"where should JSON output be saved\")\n\n\tif len(os.Args) < 2 {\n\t\tlog.Fatalf(\"No command provided. Must choose raw or entries\")\n\t}\n\tflags.Parse(os.Args[2:])\n\n\tif conf.LogFilePath != \"\" {\n\t\tf, err := os.OpenFile(conf.LogFilePath, os.O_WRONLY|os.O_CREATE, 0666)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Unable to open log file (%s): %s\", conf.LogFilePath, err.Error())\n\t\t}\n\t\tlog.SetOutput(f)\n\t}\n\tif conf.InputFilePath == \"\" {\n\t\tlog.Fatal(\"no input path provided\")\n\t}\n\t\/\/ Translate the assigned verbosity level to a logrus log level.\n\tswitch conf.Verbosity {\n\tcase 1: \/\/ Fatal\n\t\tlog.SetLevel(log.FatalLevel)\n\tcase 2: \/\/ Error\n\t\tlog.SetLevel(log.ErrorLevel)\n\tcase 3: \/\/ Warnings (default)\n\t\tlog.SetLevel(log.WarnLevel)\n\tcase 4: \/\/ Information\n\t\tlog.SetLevel(log.InfoLevel)\n\tcase 5: \/\/ Debugging\n\t\tlog.SetLevel(log.DebugLevel)\n\tdefault:\n\t\tlog.Fatal(\"Unknown verbosity level specified. Must be between 1 (lowest)--5 (highest)\")\n\t}\n\tvar f *os.File\n\tif conf.OutputFilePath == \"-\" {\n\t\tf = os.Stdout\n\t} else {\n\t\tvar err error\n\t\tf, err = os.OpenFile(conf.OutputFilePath, os.O_WRONLY|os.O_CREATE, 0666)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"unable to open output file: \", err.Error())\n\t\t}\n\t\tdefer f.Close()\n\t}\n\tif os.Args[1] == \"raw\" {\n\t\traw(&conf, f)\n\t} else if os.Args[1] == \"entries\" {\n\t\tpaths(&conf, f)\n\t} else {\n\t\tlog.Fatal(\"invalid command\")\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Use of this source code is governed by a BSD-style license that can be found\n\/\/ in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"github.com\/google\/puffs\/lang\/parse\"\n\t\"github.com\/google\/puffs\/lang\/render\"\n\t\"github.com\/google\/puffs\/lang\/token\"\n)\n\nvar (\n\tlFlag = flag.Bool(\"l\", false, \"list files whose formatting differs from puffsfmt's\")\n\twFlag = flag.Bool(\"w\", false, \"write result to (source) file instead of stdout\")\n)\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"usage: puffsfmt [flags] [path ...]\\n\")\n\tflag.PrintDefaults()\n}\n\nfunc main() {\n\tif err := main1(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n}\n\nfunc main1() error {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\tif flag.NArg() == 0 {\n\t\tif *lFlag {\n\t\t\treturn errors.New(\"cannot use -l with standard input\")\n\t\t}\n\t\tif *wFlag {\n\t\t\treturn errors.New(\"cannot use -w with standard input\")\n\t\t}\n\t\treturn do(os.Stdin, \"<standard input>\")\n\t}\n\n\tif !*lFlag && !*wFlag {\n\t\treturn errors.New(\"must use -l or -w if paths are given\")\n\t}\n\n\tfor i := 0; i < flag.NArg(); i++ {\n\t\targ := flag.Arg(i)\n\t\tswitch dir, err := os.Stat(arg); {\n\t\tcase err != nil:\n\t\t\treturn err\n\t\tcase dir.IsDir():\n\t\t\treturn filepath.Walk(arg, walk)\n\t\tdefault:\n\t\t\tif err := do(nil, arg); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc isPuffsFile(info os.FileInfo) bool {\n\tname := info.Name()\n\treturn !info.IsDir() && !strings.HasPrefix(name, \".\") && strings.HasSuffix(name, \".puffs\")\n}\n\nfunc walk(filename string, info os.FileInfo, err error) error {\n\tif err == nil && isPuffsFile(info) {\n\t\terr = do(nil, filename)\n\t}\n\t\/\/ Don't complain if a file was deleted in the meantime (i.e. the directory\n\t\/\/ changed concurrently while running puffsfmt).\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc do(r io.Reader, filename string) error {\n\tsrc, err := []byte(nil), error(nil)\n\tif r != nil {\n\t\tsrc, err = ioutil.ReadAll(r)\n\t} else {\n\t\tsrc, err = ioutil.ReadFile(filename)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tidMap := token.IDMap{}\n\ttokens, comments, err := token.Tokenize(src, &idMap, filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ We don't need the AST node to pretty-print, but it's worth puffsfmt\n\t\/\/ rejecting syntax errors. This is just a parse, not a full type check.\n\tif _, err := parse.ParseFile(tokens, &idMap, filename); err != nil {\n\t\treturn err\n\t}\n\tbuf := &bytes.Buffer{}\n\tif err := render.RenderFile(buf, tokens, comments, &idMap); err != nil {\n\t\treturn err\n\t}\n\tdst := buf.Bytes()\n\n\tif r != nil {\n\t\tif _, err := os.Stdout.Write(dst); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if !bytes.Equal(dst, src) {\n\t\tif *lFlag {\n\t\t\tfmt.Println(filename)\n\t\t}\n\t\tif *wFlag {\n\t\t\tif err := writeFile(filename, dst); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nconst chmodSupported = runtime.GOOS != \"windows\"\n\nfunc writeFile(filename string, b []byte) error {\n\tinfo, err := os.Stat(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif chmodSupported {\n\t\tf.Chmod(info.Mode().Perm())\n\t}\n\t_, werr := f.Write(b)\n\tcerr := f.Close()\n\tif werr != nil {\n\t\tos.Remove(f.Name())\n\t\treturn werr\n\t}\n\tif cerr != nil {\n\t\tos.Remove(f.Name())\n\t\treturn cerr\n\t}\n\treturn os.Rename(f.Name(), filename)\n}\n<commit_msg>Add a puffsfmt doc comment.<commit_after>\/\/ Use of this source code is governed by a BSD-style license that can be found\n\/\/ in the LICENSE file.\n\n\/\/ puffsfmt formats Puffs programs.\n\/\/\n\/\/ Without explicit paths, it rewrites the standard input to standard output.\n\/\/ Otherwise, the -l or -w or both flags must be given. Given a file path, it\n\/\/ operates on that file; given a directory path, it operates on all .puffs\n\/\/ files in that directory, recursively. Files starting with a period are\n\/\/ ignored.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"github.com\/google\/puffs\/lang\/parse\"\n\t\"github.com\/google\/puffs\/lang\/render\"\n\t\"github.com\/google\/puffs\/lang\/token\"\n)\n\nvar (\n\tlFlag = flag.Bool(\"l\", false, \"list files whose formatting differs from puffsfmt's\")\n\twFlag = flag.Bool(\"w\", false, \"write result to (source) file instead of stdout\")\n)\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"usage: puffsfmt [flags] [path ...]\\n\")\n\tflag.PrintDefaults()\n}\n\nfunc main() {\n\tif err := main1(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n}\n\nfunc main1() error {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\tif flag.NArg() == 0 {\n\t\tif *lFlag {\n\t\t\treturn errors.New(\"cannot use -l with standard input\")\n\t\t}\n\t\tif *wFlag {\n\t\t\treturn errors.New(\"cannot use -w with standard input\")\n\t\t}\n\t\treturn do(os.Stdin, \"<standard input>\")\n\t}\n\n\tif !*lFlag && !*wFlag {\n\t\treturn errors.New(\"must use -l or -w if paths are given\")\n\t}\n\n\tfor i := 0; i < flag.NArg(); i++ {\n\t\targ := flag.Arg(i)\n\t\tswitch dir, err := os.Stat(arg); {\n\t\tcase err != nil:\n\t\t\treturn err\n\t\tcase dir.IsDir():\n\t\t\treturn filepath.Walk(arg, walk)\n\t\tdefault:\n\t\t\tif err := do(nil, arg); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc isPuffsFile(info os.FileInfo) bool {\n\tname := info.Name()\n\treturn !info.IsDir() && !strings.HasPrefix(name, \".\") && strings.HasSuffix(name, \".puffs\")\n}\n\nfunc walk(filename string, info os.FileInfo, err error) error {\n\tif err == nil && isPuffsFile(info) {\n\t\terr = do(nil, filename)\n\t}\n\t\/\/ Don't complain if a file was deleted in the meantime (i.e. the directory\n\t\/\/ changed concurrently while running puffsfmt).\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc do(r io.Reader, filename string) error {\n\tsrc, err := []byte(nil), error(nil)\n\tif r != nil {\n\t\tsrc, err = ioutil.ReadAll(r)\n\t} else {\n\t\tsrc, err = ioutil.ReadFile(filename)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tidMap := token.IDMap{}\n\ttokens, comments, err := token.Tokenize(src, &idMap, filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ We don't need the AST node to pretty-print, but it's worth puffsfmt\n\t\/\/ rejecting syntax errors. This is just a parse, not a full type check.\n\tif _, err := parse.ParseFile(tokens, &idMap, filename); err != nil {\n\t\treturn err\n\t}\n\tbuf := &bytes.Buffer{}\n\tif err := render.RenderFile(buf, tokens, comments, &idMap); err != nil {\n\t\treturn err\n\t}\n\tdst := buf.Bytes()\n\n\tif r != nil {\n\t\tif _, err := os.Stdout.Write(dst); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if !bytes.Equal(dst, src) {\n\t\tif *lFlag {\n\t\t\tfmt.Println(filename)\n\t\t}\n\t\tif *wFlag {\n\t\t\tif err := writeFile(filename, dst); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nconst chmodSupported = runtime.GOOS != \"windows\"\n\nfunc writeFile(filename string, b []byte) error {\n\tinfo, err := os.Stat(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif chmodSupported {\n\t\tf.Chmod(info.Mode().Perm())\n\t}\n\t_, werr := f.Write(b)\n\tcerr := f.Close()\n\tif werr != nil {\n\t\tos.Remove(f.Name())\n\t\treturn werr\n\t}\n\tif cerr != nil {\n\t\tos.Remove(f.Name())\n\t\treturn cerr\n\t}\n\treturn os.Rename(f.Name(), filename)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/proidiot\/gone\/log\"\n\t\"github.com\/stuphlabs\/pullcord\/authentication\"\n\t\"github.com\/stuphlabs\/pullcord\/config\"\n\t\"github.com\/stuphlabs\/pullcord\/monitor\"\n\tpcnet \"github.com\/stuphlabs\/pullcord\/net\"\n\t\"github.com\/stuphlabs\/pullcord\/proxy\"\n\t\"github.com\/stuphlabs\/pullcord\/trigger\"\n\t\"github.com\/stuphlabs\/pullcord\/util\"\n)\n\nconst defaultConfigFilePath = \"\/etc\/pullcord.json\"\nconst defaultConfig = `{\n\t\"resources\": {\n\t\t\"hresource\": {\n\t\t\t\"type\": \"exactpathrouter\",\n\t\t\t\"data\": {\n\t\t\t\t\"routes\": {\n\t\t\t\t\t\"\/favicon.ico\": {\n\t\t\t\t\t\t\"type\": \"standardresponse\",\n\t\t\t\t\t\t\"data\": 404\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t\"default\": {\n\t\t\t\t\t\"type\": \"landingfaulter\",\n\t\t\t\t\t\"data\": {}\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\t\"lresource\": {\n\t\t\t\"type\": \"basiclistener\",\n\t\t\t\"data\": {\n\t\t\t\t\"proto\": \"tcp\",\n\t\t\t\t\"laddr\": \":8080\"\n\t\t\t}\n\t\t}\n\t},\n\t\"listener\": \"lresource\",\n\t\"handler\": \"hresource\"\n}`\n\nfunc main() {\n\tvar inlineCfg string\n\tvar cfgPath string\n\tvar cfgFallback bool\n\n\tflag.StringVar(\n\t\t&inlineCfg,\n\t\t\"inline-config\",\n\t\t\"\",\n\t\t\"Inline pullcord config instead of using a config file\",\n\t)\n\n\tflag.StringVar(\n\t\t&cfgPath,\n\t\t\"config\",\n\t\tdefaultConfigFilePath,\n\t\t\"Path to pullcord config file\",\n\t)\n\n\tflag.BoolVar(\n\t\t&cfgFallback,\n\t\t\"config-fallback\",\n\t\ttrue,\n\t\t\"Fallback to basic config if unable to find the config file\",\n\t)\n\n\tflag.Parse()\n\n\tvar err error\n\tvar cfgReader io.Reader\n\tif inlineCfg != \"\" {\n\t\tcfgReader = strings.NewReader(inlineCfg)\n\t}\n\n\tif cfgReader == nil {\n\t\tcfgReader, err = os.Open(cfgPath)\n\t\tif err != nil {\n\t\t\tlog.Error(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"Unable to open specified config file\"+\n\t\t\t\t\t\t\" %s: %s\",\n\t\t\t\t\tcfgPath,\n\t\t\t\t\terr.Error(),\n\t\t\t\t),\n\t\t\t)\n\t\t} else {\n\t\t\tlog.Info(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"Reading config from file: %s\",\n\t\t\t\t\tcfgPath,\n\t\t\t\t),\n\t\t\t)\n\t\t}\n\t}\n\n\tif cfgReader == nil {\n\t\tif !cfgFallback {\n\t\t\tlog.Crit(\n\t\t\t\t\"No config defined and not falling back to\"+\n\t\t\t\t\t\" default, aborting.\",\n\t\t\t)\n\t\t\tos.Exit(1)\n\t\t} else {\n\t\t\tcfgReader = strings.NewReader(defaultConfig)\n\t\t}\n\t}\n\n\tauthentication.LoadPlugin()\n\tmonitor.LoadPlugin()\n\tpcnet.LoadPlugin()\n\tproxy.LoadPlugin()\n\ttrigger.LoadPlugin()\n\tutil.LoadPlugin()\n\n\tlog.Debug(\"Plugins loaded\")\n\n\tserver, err := config.ServerFromReader(cfgReader)\n\tif err != nil {\n\t\tlog.Debug(err)\n\t\tcritErr := fmt.Errorf(\n\t\t\t\"Error while parsing server config: %s\",\n\t\t\terr.Error(),\n\t\t)\n\t\tlog.Crit(critErr)\n\t\tpanic(critErr)\n\t}\n\n\tlog.Notice(\n\t\tfmt.Sprintf(\n\t\t\t\"Starting server at %s...\",\n\t\t\tserver.Listener.Addr(),\n\t\t),\n\t)\n\n\terr = server.Serve()\n\tif err != nil {\n\t\tlog.Debug(err)\n\t\tcritErr := fmt.Errorf(\n\t\t\t\"Error while running server: %s\",\n\t\t\terr.Error(),\n\t\t)\n\t\tlog.Crit(critErr)\n\t\tpanic(critErr)\n\t}\n}\n<commit_msg>Making gofmt happy.<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/proidiot\/gone\/log\"\n\t\"github.com\/stuphlabs\/pullcord\/authentication\"\n\t\"github.com\/stuphlabs\/pullcord\/config\"\n\t\"github.com\/stuphlabs\/pullcord\/monitor\"\n\tpcnet \"github.com\/stuphlabs\/pullcord\/net\"\n\t\"github.com\/stuphlabs\/pullcord\/proxy\"\n\t\"github.com\/stuphlabs\/pullcord\/trigger\"\n\t\"github.com\/stuphlabs\/pullcord\/util\"\n)\n\nconst defaultConfigFilePath = \"\/etc\/pullcord.json\"\nconst defaultConfig = `{\n\t\"resources\": {\n\t\t\"hresource\": {\n\t\t\t\"type\": \"exactpathrouter\",\n\t\t\t\"data\": {\n\t\t\t\t\"routes\": {\n\t\t\t\t\t\"\/favicon.ico\": {\n\t\t\t\t\t\t\"type\": \"standardresponse\",\n\t\t\t\t\t\t\"data\": 404\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t\"default\": {\n\t\t\t\t\t\"type\": \"landingfaulter\",\n\t\t\t\t\t\"data\": {}\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\t\"lresource\": {\n\t\t\t\"type\": \"basiclistener\",\n\t\t\t\"data\": {\n\t\t\t\t\"proto\": \"tcp\",\n\t\t\t\t\"laddr\": \":8080\"\n\t\t\t}\n\t\t}\n\t},\n\t\"listener\": \"lresource\",\n\t\"handler\": \"hresource\"\n}`\n\nfunc main() {\n\tvar inlineCfg string\n\tvar cfgPath string\n\tvar cfgFallback bool\n\n\tflag.StringVar(\n\t\t&inlineCfg,\n\t\t\"inline-config\",\n\t\t\"\",\n\t\t\"Inline pullcord config instead of using a config file\",\n\t)\n\n\tflag.StringVar(\n\t\t&cfgPath,\n\t\t\"config\",\n\t\tdefaultConfigFilePath,\n\t\t\"Path to pullcord config file\",\n\t)\n\n\tflag.BoolVar(\n\t\t&cfgFallback,\n\t\t\"config-fallback\",\n\t\ttrue,\n\t\t\"Fallback to basic config if unable to find the config file\",\n\t)\n\n\tflag.Parse()\n\n\tvar err error\n\tvar cfgReader io.Reader\n\tif inlineCfg != \"\" {\n\t\tcfgReader = strings.NewReader(inlineCfg)\n\t}\n\n\tif cfgReader == nil {\n\t\tcfgReader, err = os.Open(cfgPath)\n\t\tif err != nil {\n\t\t\tlog.Error(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"Unable to open specified config file\"+\n\t\t\t\t\t\t\" %s: %s\",\n\t\t\t\t\tcfgPath,\n\t\t\t\t\terr.Error(),\n\t\t\t\t),\n\t\t\t)\n\t\t} else {\n\t\t\tlog.Info(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"Reading config from file: %s\",\n\t\t\t\t\tcfgPath,\n\t\t\t\t),\n\t\t\t)\n\t\t}\n\t}\n\n\tif cfgReader == nil {\n\t\tif !cfgFallback {\n\t\t\tlog.Crit(\n\t\t\t\t\"No config defined and not falling back to\" +\n\t\t\t\t\t\" default, aborting.\",\n\t\t\t)\n\t\t\tos.Exit(1)\n\t\t} else {\n\t\t\tcfgReader = strings.NewReader(defaultConfig)\n\t\t}\n\t}\n\n\tauthentication.LoadPlugin()\n\tmonitor.LoadPlugin()\n\tpcnet.LoadPlugin()\n\tproxy.LoadPlugin()\n\ttrigger.LoadPlugin()\n\tutil.LoadPlugin()\n\n\tlog.Debug(\"Plugins loaded\")\n\n\tserver, err := config.ServerFromReader(cfgReader)\n\tif err != nil {\n\t\tlog.Debug(err)\n\t\tcritErr := fmt.Errorf(\n\t\t\t\"Error while parsing server config: %s\",\n\t\t\terr.Error(),\n\t\t)\n\t\tlog.Crit(critErr)\n\t\tpanic(critErr)\n\t}\n\n\tlog.Notice(\n\t\tfmt.Sprintf(\n\t\t\t\"Starting server at %s...\",\n\t\t\tserver.Listener.Addr(),\n\t\t),\n\t)\n\n\terr = server.Serve()\n\tif err != nil {\n\t\tlog.Debug(err)\n\t\tcritErr := fmt.Errorf(\n\t\t\t\"Error while running server: %s\",\n\t\t\terr.Error(),\n\t\t)\n\t\tlog.Crit(critErr)\n\t\tpanic(critErr)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package main defines a command line tool for submitting date\n\/\/ ranges for reprocessing\npackage main\n\n\/\/ TODO - note about setting up batch table and giving permission.\n\n\/*\nStrategies...\n 1. Work from a prefix, or range of prefixes.\n 2. Work from a date range\n 3. Work from a month prefix, but explicitly iterate over days.\n maybe use a separate goroutine for each date?\n\nUsage:\n\n\n\n\n*\/\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"google.golang.org\/api\/iterator\"\n\n\t\"cloud.google.com\/go\/storage\"\n)\n\ntype httpClientIntf interface {\n\tGet(url string) (resp *http.Response, err error)\n}\n\n\/\/ This is used to intercept Get requests to the queue_pusher when invoked\n\/\/ with -dry_run.\ntype dryRunHTTP struct{}\n\nfunc (dr *dryRunHTTP) Get(url string) (resp *http.Response, err error) {\n\tresp = &http.Response{}\n\tresp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))\n\tresp.Status = \"200 OK\"\n\tresp.StatusCode = 200\n\treturn\n}\n\nvar (\n\tfProject = flag.String(\"project\", \"\", \"Project containing queues.\")\n\tfQueue = flag.String(\"queue\", \"etl-ndt-batch-\", \"Base of queue name.\")\n\t\/\/ TODO implement listing queues to determine number of queue, and change this to 0\n\tfNumQueues = flag.Int(\"num_queues\", 8, \"Number of queues. Normally determined by listing queues.\")\n\tfBucket = flag.String(\"bucket\", \"archive-mlab-oti\", \"Source bucket.\")\n\tfExper = flag.String(\"experiment\", \"ndt\", \"Experiment prefix, trailing slash optional\")\n\tfMonth = flag.String(\"month\", \"\", \"Single month spec, as YYYY\/MM\")\n\tfDay = flag.String(\"day\", \"\", \"Single day spec, as YYYY\/MM\/DD\")\n\tfDryRun = flag.Bool(\"dry_run\", false, \"Prevents all output to queue_pusher.\")\n\n\tbucket *storage.BucketHandle\n\t\/\/ Use an interface to allow test fake.\n\thttpClient httpClientIntf = http.DefaultClient\n)\n\nfunc init() {\n\t\/\/ Always prepend the filename and line number.\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n}\n\n\/\/ postOne sends a single https request to the queue pusher to add a task.\n\/\/ Iff dryRun is true, this does nothing.\nfunc postOne(queue string, bucket string, fn string) error {\n\treqStr := fmt.Sprintf(\"https:\/\/queue-pusher-dot-%s.appspot.com\/receiver?queue=%s&filename=gs:\/\/%s\/%s\", *fProject, queue, bucket, fn)\n\n\tresp, err := httpClient.Get(reqStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn errors.New(\"http error: \" + resp.Status)\n\t}\n\n\treturn nil\n}\n\n\/\/ Post all items in an ObjectIterator into specific\n\/\/ queue.\nfunc postDay(wg *sync.WaitGroup, queue string, it *storage.ObjectIterator) {\n\tqpErrCount := 0\n\tgcsErrCount := 0\n\tfileCount := 0\n\tif wg != nil {\n\t\tdefer wg.Done()\n\t}\n\tfor o, err := it.Next(); err != iterator.Done; o, err = it.Next() {\n\t\tif err != nil {\n\t\t\t\/\/ TODO - should this retry?\n\t\t\tlog.Println(err)\n\t\t\tif gcsErrCount > 3 {\n\t\t\t\tlog.Printf(\"Failed after %d files to %s.\\n\", fileCount, queue)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\terr = postOne(queue, *fBucket, o.Name)\n\t\tif err != nil {\n\t\t\t\/\/ TODO - should this retry?\n\t\t\tlog.Println(err)\n\t\t\tif qpErrCount > 3 {\n\t\t\t\tlog.Printf(\"Failed after %d files to %s.\\n\", fileCount, queue)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tfileCount++\n\t\t}\n\t}\n\tlog.Println(\"Added \", fileCount, \" tasks to \", queue)\n}\n\n\/\/ Initially this used a hash, but using day ordinal is better\n\/\/ as it distributes across the queues more evenly.\nfunc queueFor(date time.Time) string {\n\tday := date.Unix() \/ (24 * 60 * 60)\n\treturn fmt.Sprintf(\"%s%d\", *fQueue, int(day)%*fNumQueues)\n}\n\n\/\/ day fetches an iterator over the objects with ndt\/YYYY\/MM\/DD prefix,\n\/\/ and passes the iterator to postDay with appropriate queue.\n\/\/ Iff wq is not nil, postDay will call done on wg when finished\n\/\/ posting.\nfunc day(wg *sync.WaitGroup, prefix string) {\n\tdate, err := time.Parse(\"ndt\/2006\/01\/02\/\", prefix)\n\tif err != nil {\n\t\tlog.Println(\"Failed parsing date from \", prefix)\n\t\tlog.Println(err)\n\t\tif wg != nil {\n\t\t\twg.Done()\n\t\t}\n\t\treturn\n\t}\n\tqueue := queueFor(date)\n\tlog.Println(\"Adding \", prefix, \" to \", queue)\n\tq := storage.Query{\n\t\tDelimiter: \"\/\",\n\t\tPrefix: prefix,\n\t}\n\t\/\/ TODO - can this error?\n\tit := bucket.Objects(context.Background(), &q)\n\tif wg != nil {\n\t\tgo postDay(wg, queue, it)\n\t} else {\n\t\tpostDay(nil, queue, it)\n\t}\n}\n\n\/\/ month adds all of the files from dates within a specified month.\n\/\/ It is difficult to test without hitting GCS. 8-(\nfunc month(prefix string) {\n\tq := storage.Query{\n\t\tDelimiter: \"\/\",\n\t\t\/\/ TODO - validate.\n\t\tPrefix: prefix,\n\t}\n\tit := bucket.Objects(context.Background(), &q)\n\n\tvar wg sync.WaitGroup\n\tfor o, err := it.Next(); err != iterator.Done; o, err = it.Next() {\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t} else if o.Prefix != \"\" {\n\t\t\twg.Add(1)\n\t\t\tday(&wg, o.Prefix)\n\t\t} else {\n\t\t\tlog.Println(\"Skipping: \", o.Name)\n\t\t}\n\t}\n\twg.Wait()\n}\n\nfunc setup(fakeHTTP httpClientIntf) {\n\tif *fDryRun {\n\t\thttpClient = &dryRunHTTP{}\n\t}\n\tif fakeHTTP != nil {\n\t\thttpClient = fakeHTTP\n\t}\n\n\tstorageClient, err := storage.NewClient(context.Background())\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tpanic(err)\n\t}\n\n\tbucket = storageClient.Bucket(*fBucket)\n\t_, err = bucket.Attrs(context.Background())\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tpanic(err)\n\t}\n}\n\nfunc run() {\n\tif *fMonth != \"\" {\n\t\tmonth(*fExper + \"\/\" + *fMonth + \"\/\")\n\t} else if *fDay != \"\" {\n\t\tday(nil, *fExper+\"\/\"+*fDay+\"\/\")\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tif *fProject == \"\" && !*fDryRun {\n\t\tlog.Println(\"Must specify project (or --dry_run)\")\n\t\tflag.PrintDefaults()\n\t\treturn\n\t}\n\n\tsetup(nil)\n\trun()\n}\n<commit_msg>Update header comment<commit_after>\/\/ Package main defines a command line tool for submitting date\n\/\/ ranges for reprocessing\npackage main\n\n\/*\nStrategies...\n 1. Work from a month prefix, but explicitly iterate over days.\n maybe use a separate goroutine for each date? (DONE)\n 2. Work from a prefix, or range of prefixes.\n 3. Work from a date range\n\n*\/\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"google.golang.org\/api\/iterator\"\n\n\t\"cloud.google.com\/go\/storage\"\n)\n\ntype httpClientIntf interface {\n\tGet(url string) (resp *http.Response, err error)\n}\n\n\/\/ This is used to intercept Get requests to the queue_pusher when invoked\n\/\/ with -dry_run.\ntype dryRunHTTP struct{}\n\nfunc (dr *dryRunHTTP) Get(url string) (resp *http.Response, err error) {\n\tresp = &http.Response{}\n\tresp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))\n\tresp.Status = \"200 OK\"\n\tresp.StatusCode = 200\n\treturn\n}\n\nvar (\n\tfProject = flag.String(\"project\", \"\", \"Project containing queues.\")\n\tfQueue = flag.String(\"queue\", \"etl-ndt-batch-\", \"Base of queue name.\")\n\t\/\/ TODO implement listing queues to determine number of queue, and change this to 0\n\tfNumQueues = flag.Int(\"num_queues\", 8, \"Number of queues. Normally determined by listing queues.\")\n\tfBucket = flag.String(\"bucket\", \"archive-mlab-oti\", \"Source bucket.\")\n\tfExper = flag.String(\"experiment\", \"ndt\", \"Experiment prefix, trailing slash optional\")\n\tfMonth = flag.String(\"month\", \"\", \"Single month spec, as YYYY\/MM\")\n\tfDay = flag.String(\"day\", \"\", \"Single day spec, as YYYY\/MM\/DD\")\n\tfDryRun = flag.Bool(\"dry_run\", false, \"Prevents all output to queue_pusher.\")\n\n\tbucket *storage.BucketHandle\n\t\/\/ Use an interface to allow test fake.\n\thttpClient httpClientIntf = http.DefaultClient\n)\n\nfunc init() {\n\t\/\/ Always prepend the filename and line number.\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n}\n\n\/\/ postOne sends a single https request to the queue pusher to add a task.\n\/\/ Iff dryRun is true, this does nothing.\nfunc postOne(queue string, bucket string, fn string) error {\n\treqStr := fmt.Sprintf(\"https:\/\/queue-pusher-dot-%s.appspot.com\/receiver?queue=%s&filename=gs:\/\/%s\/%s\", *fProject, queue, bucket, fn)\n\n\tresp, err := httpClient.Get(reqStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn errors.New(\"http error: \" + resp.Status)\n\t}\n\n\treturn nil\n}\n\n\/\/ Post all items in an ObjectIterator into specific\n\/\/ queue.\nfunc postDay(wg *sync.WaitGroup, queue string, it *storage.ObjectIterator) {\n\tqpErrCount := 0\n\tgcsErrCount := 0\n\tfileCount := 0\n\tif wg != nil {\n\t\tdefer wg.Done()\n\t}\n\tfor o, err := it.Next(); err != iterator.Done; o, err = it.Next() {\n\t\tif err != nil {\n\t\t\t\/\/ TODO - should this retry?\n\t\t\tlog.Println(err)\n\t\t\tif gcsErrCount > 3 {\n\t\t\t\tlog.Printf(\"Failed after %d files to %s.\\n\", fileCount, queue)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\terr = postOne(queue, *fBucket, o.Name)\n\t\tif err != nil {\n\t\t\t\/\/ TODO - should this retry?\n\t\t\tlog.Println(err)\n\t\t\tif qpErrCount > 3 {\n\t\t\t\tlog.Printf(\"Failed after %d files to %s.\\n\", fileCount, queue)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tfileCount++\n\t\t}\n\t}\n\tlog.Println(\"Added \", fileCount, \" tasks to \", queue)\n}\n\n\/\/ Initially this used a hash, but using day ordinal is better\n\/\/ as it distributes across the queues more evenly.\nfunc queueFor(date time.Time) string {\n\tday := date.Unix() \/ (24 * 60 * 60)\n\treturn fmt.Sprintf(\"%s%d\", *fQueue, int(day)%*fNumQueues)\n}\n\n\/\/ day fetches an iterator over the objects with ndt\/YYYY\/MM\/DD prefix,\n\/\/ and passes the iterator to postDay with appropriate queue.\n\/\/ Iff wq is not nil, postDay will call done on wg when finished\n\/\/ posting.\nfunc day(wg *sync.WaitGroup, prefix string) {\n\tdate, err := time.Parse(\"ndt\/2006\/01\/02\/\", prefix)\n\tif err != nil {\n\t\tlog.Println(\"Failed parsing date from \", prefix)\n\t\tlog.Println(err)\n\t\tif wg != nil {\n\t\t\twg.Done()\n\t\t}\n\t\treturn\n\t}\n\tqueue := queueFor(date)\n\tlog.Println(\"Adding \", prefix, \" to \", queue)\n\tq := storage.Query{\n\t\tDelimiter: \"\/\",\n\t\tPrefix: prefix,\n\t}\n\t\/\/ TODO - can this error? Or do errors only occur on iterator ops?\n\tit := bucket.Objects(context.Background(), &q)\n\tif wg != nil {\n\t\tgo postDay(wg, queue, it)\n\t} else {\n\t\tpostDay(nil, queue, it)\n\t}\n}\n\n\/\/ month adds all of the files from dates within a specified month.\n\/\/ It is difficult to test without hitting GCS. 8-(\nfunc month(prefix string) {\n\tq := storage.Query{\n\t\tDelimiter: \"\/\",\n\t\t\/\/ TODO - validate.\n\t\tPrefix: prefix,\n\t}\n\tit := bucket.Objects(context.Background(), &q)\n\n\tvar wg sync.WaitGroup\n\tfor o, err := it.Next(); err != iterator.Done; o, err = it.Next() {\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t} else if o.Prefix != \"\" {\n\t\t\twg.Add(1)\n\t\t\tday(&wg, o.Prefix)\n\t\t} else {\n\t\t\tlog.Println(\"Skipping: \", o.Name)\n\t\t}\n\t}\n\twg.Wait()\n}\n\nfunc setup(fakeHTTP httpClientIntf) {\n\tif *fDryRun {\n\t\thttpClient = &dryRunHTTP{}\n\t}\n\tif fakeHTTP != nil {\n\t\thttpClient = fakeHTTP\n\t}\n\n\tstorageClient, err := storage.NewClient(context.Background())\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tpanic(err)\n\t}\n\n\tbucket = storageClient.Bucket(*fBucket)\n\t_, err = bucket.Attrs(context.Background())\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tpanic(err)\n\t}\n}\n\nfunc run() {\n\tif *fMonth != \"\" {\n\t\tmonth(*fExper + \"\/\" + *fMonth + \"\/\")\n\t} else if *fDay != \"\" {\n\t\tday(nil, *fExper+\"\/\"+*fDay+\"\/\")\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tif *fProject == \"\" && !*fDryRun {\n\t\tlog.Println(\"Must specify project (or --dry_run)\")\n\t\tflag.PrintDefaults()\n\t\treturn\n\t}\n\n\tsetup(nil)\n\trun()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ tinystat is used to compare two or more sets of measurements (e.g., runs of a\n\/\/ multiple runs of benchmarks of two possible implementations) and determine if\n\/\/ they are statistically different.\n\/\/\n\/\/ Imagine we have the results of different animals' SAT scores. Each animal\n\/\/ took the SATs multiple times, and we're assuming that differences between\n\/\/ each animal's attempts are measurement error (i.e., normally distributed). We\n\/\/ can test for differences as follows:\n\/\/\n\/\/ $ tinystat examples\/iguana examples\/chameleon examples\/leopard\n\/\/\n\/\/ Filename N Min Max Median Mean StdDev\n\/\/ examples\/iguana 7 50.000000 750.000000 200.000000 300.000000 238.047614\n\/\/ examples\/chameleon 5 150.000000 930.000000 500.000000 540.000000 299.081929\n\/\/ No difference proven at 95% confidence.\n\/\/ examples\/leopard 6 700.000000 700.000000 700.000000 700.000000 0.000000\n\/\/ Difference at 95% confidence!\n\/\/ 400.000000 +\/- 215.283224\n\/\/ 57.142857% +\/- 30.754746%\n\/\/ (Student's t, pooled s = 175.809815)\n\/\/\n\/\/ As you can see, despite the superficial differences between the iguana's\n\/\/ scores and the chameleon's scores, there is no statistically significant\n\/\/ difference between the two at a 95% confidence level. The leopard, on the\n\/\/ other hand, has statistically significantly different scores.\npackage main\n\nimport (\n\t\"encoding\/csv\"\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/codahale\/tinystat\"\n\t\"github.com\/docopt\/docopt-go\"\n)\n\nfunc main() {\n\tusage := `tinystat helps you conduct experiments.\n\nUsage:\n tinystat [options] <control> [<experiment>...]\n\nArguments:\n <control> Input file containing control measurements.\n <experiment> Input file containing experimental measurements.\n\nOptions:\n -h --help Display this usage information.\n -c --confidence=<pct> Specify confidence level for analysis. [default: 95]\n -C --column=<num> The column to analyze. [default: 0]\n -d --delimiter=(t|s|c) Tab, space, or comma delimited data. [default: t]\n`\n\n\targs, err := docopt.Parse(usage, nil, true, \"\", true)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tconfidence, err := strconv.ParseFloat(args[\"--confidence\"].(string), 64)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tcolumn, err := strconv.Atoi(args[\"--column\"].(string))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar delimiter rune\n\tswitch args[\"--delimiter\"] {\n\tcase \"t\":\n\t\tdelimiter = '\\t'\n\tcase \"c\":\n\t\tdelimiter = ','\n\tcase \"s\":\n\t\tdelimiter = ' '\n\tdefault:\n\t\tpanic(\"bad delimiter\")\n\t}\n\n\tcontrolFilename := args[\"<control>\"].(string)\n\texperimentFilenames := args[\"<experiment>\"].([]string)\n\n\tw := new(tabwriter.Writer)\n\tw.Init(os.Stdout, 2, 0, 2, ' ', 0)\n\tfmt.Fprintln(w, \"Filename\\tN\\tMin\\tMax\\tMedian\\tMean\\tStdDev\\t\")\n\n\tcontrol, err := readFile(controlFilename, column, delimiter)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Fprintf(w,\n\t\t\"%s\\t%.0f\\t%f\\t%f\\t%f\\t%f\\t%f\\t\\n\",\n\t\tcontrolFilename,\n\t\tcontrol.N,\n\t\tcontrol.Min,\n\t\tcontrol.Max,\n\t\tcontrol.Median,\n\t\tcontrol.Mean,\n\t\tcontrol.StdDev,\n\t)\n\n\tfor _, filename := range experimentFilenames {\n\t\texperimental, err := readFile(filename, column, delimiter)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tfmt.Fprintf(w,\n\t\t\t\"%s\\t%.0f\\t%f\\t%f\\t%f\\t%f\\t%f\\t\\n\",\n\t\t\tfilename,\n\t\t\texperimental.N,\n\t\t\texperimental.Min,\n\t\t\texperimental.Max,\n\t\t\texperimental.Median,\n\t\t\texperimental.Mean,\n\t\t\texperimental.StdDev,\n\t\t)\n\n\t\td := tinystat.Compare(control, experimental, confidence)\n\t\tif d.Significant() {\n\t\t\tfmt.Fprintf(w, \"\\tDifference at %v%% confidence!\\n\", confidence)\n\t\t\tfmt.Fprintf(w, \"\\t\\t%10f +\/- %10f\\n\", d.Delta, d.Error)\n\t\t\tfmt.Fprintf(w, \"\\t\\t%9f%% +\/- %9f%%\\n\", d.PctDelta, d.PctError)\n\t\t\tfmt.Fprintf(w, \"\\t\\t(Student's t, pooled s = %f)\\n\", d.PooledStdDev)\n\t\t} else {\n\t\t\tfmt.Fprintf(w, \"\\tNo difference proven at %v%% confidence.\\n\", confidence)\n\t\t}\n\t}\n\t_ = w.Flush()\n}\n\nfunc readFile(filename string, col int, del rune) (tinystat.Summary, error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn tinystat.Summary{}, err\n\t}\n\tdefer f.Close()\n\n\tr := csv.NewReader(f)\n\tr.Comma = del\n\trecords, err := r.ReadAll()\n\tif err != nil {\n\t\treturn tinystat.Summary{}, err\n\t}\n\n\tdata := make([]float64, len(records))\n\tfor i, s := range records {\n\t\tn, err := strconv.ParseFloat(s[col], 64)\n\t\tif err != nil {\n\t\t\treturn tinystat.Summary{}, err\n\t\t}\n\t\tdata[i] = n\n\t}\n\n\treturn tinystat.Summarize(data), nil\n}\n<commit_msg>Nicer error message.<commit_after>\/\/ tinystat is used to compare two or more sets of measurements (e.g., runs of a\n\/\/ multiple runs of benchmarks of two possible implementations) and determine if\n\/\/ they are statistically different.\n\/\/\n\/\/ Imagine we have the results of different animals' SAT scores. Each animal\n\/\/ took the SATs multiple times, and we're assuming that differences between\n\/\/ each animal's attempts are measurement error (i.e., normally distributed). We\n\/\/ can test for differences as follows:\n\/\/\n\/\/ $ tinystat examples\/iguana examples\/chameleon examples\/leopard\n\/\/\n\/\/ Filename N Min Max Median Mean StdDev\n\/\/ examples\/iguana 7 50.000000 750.000000 200.000000 300.000000 238.047614\n\/\/ examples\/chameleon 5 150.000000 930.000000 500.000000 540.000000 299.081929\n\/\/ No difference proven at 95% confidence.\n\/\/ examples\/leopard 6 700.000000 700.000000 700.000000 700.000000 0.000000\n\/\/ Difference at 95% confidence!\n\/\/ 400.000000 +\/- 215.283224\n\/\/ 57.142857% +\/- 30.754746%\n\/\/ (Student's t, pooled s = 175.809815)\n\/\/\n\/\/ As you can see, despite the superficial differences between the iguana's\n\/\/ scores and the chameleon's scores, there is no statistically significant\n\/\/ difference between the two at a 95% confidence level. The leopard, on the\n\/\/ other hand, has statistically significantly different scores.\npackage main\n\nimport (\n\t\"encoding\/csv\"\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/codahale\/tinystat\"\n\t\"github.com\/docopt\/docopt-go\"\n)\n\nfunc main() {\n\tusage := `tinystat helps you conduct experiments.\n\nUsage:\n tinystat [options] <control> [<experiment>...]\n\nArguments:\n <control> Input file containing control measurements.\n <experiment> Input file containing experimental measurements.\n\nOptions:\n -h --help Display this usage information.\n -c --confidence=<pct> Specify confidence level for analysis. [default: 95]\n -C --column=<num> The column to analyze. [default: 0]\n -d --delimiter=(t|s|c) Tab, space, or comma delimited data. [default: t]\n`\n\n\targs, err := docopt.Parse(usage, nil, true, \"\", true)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tconfidence, err := strconv.ParseFloat(args[\"--confidence\"].(string), 64)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tcolumn, err := strconv.Atoi(args[\"--column\"].(string))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar delimiter rune\n\tswitch d := args[\"--delimiter\"]; d {\n\tcase \"t\":\n\t\tdelimiter = '\\t'\n\tcase \"c\":\n\t\tdelimiter = ','\n\tcase \"s\":\n\t\tdelimiter = ' '\n\tdefault:\n\t\tpanic(fmt.Errorf(\"bad delimiter: %#v\", d))\n\t}\n\n\tcontrolFilename := args[\"<control>\"].(string)\n\texperimentFilenames := args[\"<experiment>\"].([]string)\n\n\tw := new(tabwriter.Writer)\n\tw.Init(os.Stdout, 2, 0, 2, ' ', 0)\n\tfmt.Fprintln(w, \"Filename\\tN\\tMin\\tMax\\tMedian\\tMean\\tStdDev\\t\")\n\n\tcontrol, err := readFile(controlFilename, column, delimiter)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Fprintf(w,\n\t\t\"%s\\t%.0f\\t%f\\t%f\\t%f\\t%f\\t%f\\t\\n\",\n\t\tcontrolFilename,\n\t\tcontrol.N,\n\t\tcontrol.Min,\n\t\tcontrol.Max,\n\t\tcontrol.Median,\n\t\tcontrol.Mean,\n\t\tcontrol.StdDev,\n\t)\n\n\tfor _, filename := range experimentFilenames {\n\t\texperimental, err := readFile(filename, column, delimiter)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tfmt.Fprintf(w,\n\t\t\t\"%s\\t%.0f\\t%f\\t%f\\t%f\\t%f\\t%f\\t\\n\",\n\t\t\tfilename,\n\t\t\texperimental.N,\n\t\t\texperimental.Min,\n\t\t\texperimental.Max,\n\t\t\texperimental.Median,\n\t\t\texperimental.Mean,\n\t\t\texperimental.StdDev,\n\t\t)\n\n\t\td := tinystat.Compare(control, experimental, confidence)\n\t\tif d.Significant() {\n\t\t\tfmt.Fprintf(w, \"\\tDifference at %v%% confidence!\\n\", confidence)\n\t\t\tfmt.Fprintf(w, \"\\t\\t%10f +\/- %10f\\n\", d.Delta, d.Error)\n\t\t\tfmt.Fprintf(w, \"\\t\\t%9f%% +\/- %9f%%\\n\", d.PctDelta, d.PctError)\n\t\t\tfmt.Fprintf(w, \"\\t\\t(Student's t, pooled s = %f)\\n\", d.PooledStdDev)\n\t\t} else {\n\t\t\tfmt.Fprintf(w, \"\\tNo difference proven at %v%% confidence.\\n\", confidence)\n\t\t}\n\t}\n\t_ = w.Flush()\n}\n\nfunc readFile(filename string, col int, del rune) (tinystat.Summary, error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn tinystat.Summary{}, err\n\t}\n\tdefer f.Close()\n\n\tr := csv.NewReader(f)\n\tr.Comma = del\n\trecords, err := r.ReadAll()\n\tif err != nil {\n\t\treturn tinystat.Summary{}, err\n\t}\n\n\tdata := make([]float64, len(records))\n\tfor i, s := range records {\n\t\tn, err := strconv.ParseFloat(s[col], 64)\n\t\tif err != nil {\n\t\t\treturn tinystat.Summary{}, err\n\t\t}\n\t\tdata[i] = n\n\t}\n\n\treturn tinystat.Summarize(data), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tdifflib \"github.com\/kylelemons\/godebug\/diff\"\n\t\"github.com\/nochso\/tocenize\"\n)\n\nvar VERSION = \"?\"\n\nfunc main() {\n\tlog.SetFlags(0)\n\tflag.Usage = func() {\n\t\tfmt.Println(\"tocenize [options] FILE...\")\n\t\tfmt.Println()\n\t\tflag.PrintDefaults()\n\t}\n\tjob := tocenize.Job{}\n\tflag.IntVar(&job.MinDepth, \"min\", 1, \"minimum depth\")\n\tflag.IntVar(&job.MaxDepth, \"max\", 99, \"maximum depth\")\n\tflag.StringVar(&tocenize.Indent, \"indent\", \"\\t\", \"string used for nesting\")\n\tdoDiff := flag.Bool(\"d\", false, \"print full diff to stdout\")\n\tdoPrint := flag.Bool(\"p\", false, \"print full result to stdout\")\n\tflag.BoolVar(&job.ExistingOnly, \"e\", false, \"update only existing TOC (no insert)\")\n\tshowVersion := flag.Bool(\"v\", false, \"print version\")\n\tflag.Parse()\n\n\tif *showVersion {\n\t\tfmt.Println(VERSION)\n\t\tos.Exit(0)\n\t}\n\tif flag.NArg() == 0 {\n\t\tlog.Println(\"too few arguments\")\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\n\taction := update\n\tif *doDiff {\n\t\taction = diff\n\t}\n\tif *doPrint {\n\t\taction = print\n\t}\n\n\tfor _, arg := range flag.Args() {\n\t\tpaths, err := filepath.Glob(arg)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, path := range paths {\n\t\t\tlog.SetPrefix(path + \": \")\n\n\t\t\tdoc, err := tocenize.NewDocument(path)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttoc := tocenize.NewTOC(doc, job)\n\t\t\tnewDoc, err := doc.Update(toc, job.ExistingOnly)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\taction(job, doc, newDoc)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\t\tlog.SetPrefix(\"\")\n\t}\n}\n\nfunc diff(job tocenize.Job, a, b tocenize.Document) error {\n\tlog.Println()\n\td := difflib.Diff(a.String(), b.String())\n\tif d != \"\" {\n\t\tfmt.Println(d)\n\t}\n\treturn nil\n}\n\nfunc print(job tocenize.Job, a, b tocenize.Document) error {\n\tfmt.Println(b.String())\n\treturn nil\n}\n\nfunc update(job tocenize.Job, a, b tocenize.Document) error {\n\treturn ioutil.WriteFile(b.Path, []byte(b.String()), 0644)\n}\n<commit_msg>Extract actionFunc and runAction<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tdifflib \"github.com\/kylelemons\/godebug\/diff\"\n\t\"github.com\/nochso\/tocenize\"\n)\n\nvar VERSION = \"?\"\n\nfunc main() {\n\tlog.SetFlags(0)\n\tflag.Usage = func() {\n\t\tfmt.Println(\"tocenize [options] FILE...\")\n\t\tfmt.Println()\n\t\tflag.PrintDefaults()\n\t}\n\tjob := tocenize.Job{}\n\tflag.IntVar(&job.MinDepth, \"min\", 1, \"minimum depth\")\n\tflag.IntVar(&job.MaxDepth, \"max\", 99, \"maximum depth\")\n\tflag.StringVar(&tocenize.Indent, \"indent\", \"\\t\", \"string used for nesting\")\n\tdoDiff := flag.Bool(\"d\", false, \"print full diff to stdout\")\n\tdoPrint := flag.Bool(\"p\", false, \"print full result to stdout\")\n\tflag.BoolVar(&job.ExistingOnly, \"e\", false, \"update only existing TOC (no insert)\")\n\tshowVersion := flag.Bool(\"v\", false, \"print version\")\n\tflag.Parse()\n\n\tif *showVersion {\n\t\tfmt.Println(VERSION)\n\t\tos.Exit(0)\n\t}\n\tif flag.NArg() == 0 {\n\t\tlog.Println(\"too few arguments\")\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\n\taction := update\n\tif *doDiff {\n\t\taction = diff\n\t}\n\tif *doPrint {\n\t\taction = print\n\t}\n\n\tfor _, arg := range flag.Args() {\n\t\tpaths, err := filepath.Glob(arg)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, path := range paths {\n\t\t\tlog.SetPrefix(path + \": \")\n\t\t\terr = runAction(path, job, action)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\t\tlog.SetPrefix(\"\")\n\t}\n}\n\ntype actionFunc func(job tocenize.Job, a, b tocenize.Document) error\n\nfunc runAction(path string, job tocenize.Job, action actionFunc) error {\n\tdoc, err := tocenize.NewDocument(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttoc := tocenize.NewTOC(doc, job)\n\tnewDoc, err := doc.Update(toc, job.ExistingOnly)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn action(job, doc, newDoc)\n}\n\nfunc diff(job tocenize.Job, a, b tocenize.Document) error {\n\tlog.Println()\n\td := difflib.Diff(a.String(), b.String())\n\tif d != \"\" {\n\t\tfmt.Println(d)\n\t}\n\treturn nil\n}\n\nfunc print(job tocenize.Job, a, b tocenize.Document) error {\n\tfmt.Println(b.String())\n\treturn nil\n}\n\nfunc update(job tocenize.Job, a, b tocenize.Document) error {\n\treturn ioutil.WriteFile(b.Path, []byte(b.String()), 0644)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\n\t\"github.com\/antihax\/evedata\/internal\/redigohelper\"\n\t\"github.com\/antihax\/evedata\/internal\/sqlhelper\"\n\t\"github.com\/antihax\/evedata\/services\/vanguard\"\n\t\"github.com\/antihax\/evedata\/services\/vanguard\/models\"\n\t_ \"github.com\/antihax\/evedata\/services\/vanguard\/views\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"github.com\/gorilla\/context\"\n)\n\nfunc main() {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\tlog.SetPrefix(\"evedata vanguard: \")\n\n\tr := redigohelper.ConnectRedisProdPool()\n\tdb := sqlhelper.NewDatabase()\n\n\t\/\/ Make a new service and send it into the background.\n\tvanguard := vanguard.NewVanguard(r, db)\n\tlog.Printf(\"Setup Router\\n\")\n\trtr := vanguard.NewRouter()\n\tdefer vanguard.Close()\n\n\t\/\/ Handle command line arguments\n\tif len(os.Args) > 1 {\n\t\tif os.Args[1] == \"dumpdb\" {\n\t\t\t\/\/ Dump the database to sql file.\n\t\t\tlog.Printf(\"Dumping Database to evedata.sql\\n\")\n\t\t\terr := models.DumpDatabase(\".\/sql\/evedata.sql\", \"evedata\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(err)\n\t\t\t}\n\t\t} else if os.Args[1] == \"flushcache\" {\n\t\t\t\/\/ Erase http cache in redis\n\t\t\tlog.Printf(\"Flushing Redis\\n\")\n\t\t\tconn := r.Get()\n\t\t\tdefer conn.Close()\n\t\t\tkeys, err := redis.Strings(conn.Do(\"KEYS\", \"*rediscache*\"))\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\tfor _, key := range keys {\n\t\t\t\t\tconn.Do(\"DEL\", key)\n\t\t\t\t\tlog.Printf(\"Deleting %s\\n\", key)\n\t\t\t\t}\n\t\t\t}\n\t\t} else if os.Args[1] == \"flushstructures\" {\n\t\t\t\/\/ Erase http cache in redis\n\t\t\tlog.Printf(\"Flushing structures\\n\")\n\t\t\tconn := r.Get()\n\t\t\tdefer conn.Close()\n\t\t\tkeys, err := redis.Strings(conn.Do(\"KEYS\", \"*structure_failure*\"))\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\tfor _, key := range keys {\n\t\t\t\t\tconn.Do(\"DEL\", key)\n\t\t\t\t\tlog.Printf(\"Deleting %s\\n\", key)\n\t\t\t\t}\n\t\t\t}\n\n\t\t} else if os.Args[1] == \"flushredis\" {\n\t\t\t\/\/ Erase everything in redis for modified deployments\n\t\t\tlog.Printf(\"Flushing Redis\\n\")\n\t\t\tconn := r.Get()\n\t\t\tdefer conn.Close()\n\t\t\tconn.Do(\"FLUSHALL\")\n\t\t}\n\t}\n\n\tlog.Printf(\"Start Listening\\n\")\n\tgo log.Fatalln(http.ListenAndServe(\":3000\", context.ClearHandler(rtr)))\n\n\tlog.Printf(\"In production\\n\")\n\t\/\/ Handle SIGINT and SIGTERM.\n\tch := make(chan os.Signal)\n\tsignal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)\n\tlog.Println(<-ch)\n}\n<commit_msg>add flush kills<commit_after>package main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\n\t\"github.com\/antihax\/evedata\/internal\/redigohelper\"\n\t\"github.com\/antihax\/evedata\/internal\/sqlhelper\"\n\t\"github.com\/antihax\/evedata\/services\/vanguard\"\n\t\"github.com\/antihax\/evedata\/services\/vanguard\/models\"\n\t_ \"github.com\/antihax\/evedata\/services\/vanguard\/views\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"github.com\/gorilla\/context\"\n)\n\nfunc main() {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\tlog.SetPrefix(\"evedata vanguard: \")\n\n\tr := redigohelper.ConnectRedisProdPool()\n\tdb := sqlhelper.NewDatabase()\n\n\t\/\/ Make a new service and send it into the background.\n\tvanguard := vanguard.NewVanguard(r, db)\n\tlog.Printf(\"Setup Router\\n\")\n\trtr := vanguard.NewRouter()\n\tdefer vanguard.Close()\n\n\t\/\/ Handle command line arguments\n\tif len(os.Args) > 1 {\n\t\tif os.Args[1] == \"dumpdb\" {\n\t\t\t\/\/ Dump the database to sql file.\n\t\t\tlog.Printf(\"Dumping Database to evedata.sql\\n\")\n\t\t\terr := models.DumpDatabase(\".\/sql\/evedata.sql\", \"evedata\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(err)\n\t\t\t}\n\t\t} else if os.Args[1] == \"flushcache\" {\n\t\t\t\/\/ Erase http cache in redis\n\t\t\tlog.Printf(\"Flushing Redis\\n\")\n\t\t\tconn := r.Get()\n\t\t\tdefer conn.Close()\n\t\t\tkeys, err := redis.Strings(conn.Do(\"KEYS\", \"*rediscache*\"))\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\tfor _, key := range keys {\n\t\t\t\t\tconn.Do(\"DEL\", key)\n\t\t\t\t\tlog.Printf(\"Deleting %s\\n\", key)\n\t\t\t\t}\n\t\t\t}\n\t\t} else if os.Args[1] == \"flushstructures\" {\n\t\t\t\/\/ Erase http cache in redis\n\t\t\tlog.Printf(\"Flushing structures\\n\")\n\t\t\tconn := r.Get()\n\t\t\tdefer conn.Close()\n\t\t\tkeys, err := redis.Strings(conn.Do(\"KEYS\", \"*structure_failure*\"))\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\tfor _, key := range keys {\n\t\t\t\t\tconn.Do(\"DEL\", key)\n\t\t\t\t\tlog.Printf(\"Deleting %s\\n\", key)\n\t\t\t\t}\n\t\t\t}\n\t\t} else if os.Args[1] == \"flushkills\" {\n\t\t\t\/\/ Erase http cache in redis\n\t\t\tlog.Printf(\"Flushing killmails\\n\")\n\t\t\tconn := r.Get()\n\t\t\tdefer conn.Close()\n\t\t\ti, err := redis.Int64(conn.Do(\"DEL\", \"evedata_known_kills\"))\n\t\t\tlog.Printf(\"%d %s\\n\", i, err)\n\t\t} else if os.Args[1] == \"flushredis\" {\n\t\t\t\/\/ Erase everything in redis for modified deployments\n\t\t\tlog.Printf(\"Flushing Redis\\n\")\n\t\t\tconn := r.Get()\n\t\t\tdefer conn.Close()\n\t\t\tconn.Do(\"FLUSHALL\")\n\t\t}\n\t}\n\n\tlog.Printf(\"Start Listening\\n\")\n\tgo log.Fatalln(http.ListenAndServe(\":3000\", context.ClearHandler(rtr)))\n\n\tlog.Printf(\"In production\\n\")\n\t\/\/ Handle SIGINT and SIGTERM.\n\tch := make(chan os.Signal)\n\tsignal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)\n\tlog.Println(<-ch)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n)\n\n\/\/ Client for the deployment service\ntype Client struct {\n\ttoken string\n\tbaseURL string\n}\n\n\/\/ NewClient makes a new Client\nfunc NewClient(token, baseURL string) Client {\n\treturn Client{\n\t\ttoken: token,\n\t\tbaseURL: baseURL,\n\t}\n}\n\nfunc (c Client) newRequest(method, path string, body io.Reader) (*http.Request, error) {\n\treq, err := http.NewRequest(method, c.baseURL+path, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"Scope-Probe token=%s\", c.token))\n\treturn req, nil\n}\n\n\/\/ Deploy notifies the deployment service about a new deployment\nfunc (c Client) Deploy(deployment Deployment) error {\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(deployment); err != nil {\n\t\treturn err\n\t}\n\treq, err := c.newRequest(\"POST\", \"\/api\/deploy\/deploy\", &buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.StatusCode != 204 {\n\t\treturn fmt.Errorf(\"Error making request: %s\", res.Status)\n\t}\n\treturn nil\n}\n\n\/\/ GetDeployments returns a list of deployments\nfunc (c Client) GetDeployments(from, through int64) ([]Deployment, error) {\n\treq, err := c.newRequest(\"GET\", fmt.Sprintf(\"\/api\/deploy\/deploy?from=%d&through=%d\", from, through), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"Error making request: %s\", res.Status)\n\t}\n\tvar response struct {\n\t\tDeployments []Deployment `json:\"deployments\"`\n\t}\n\tif err := json.NewDecoder(res.Body).Decode(&response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn response.Deployments, nil\n}\n\n\/\/ GetEvents returns the raw events.\nfunc (c Client) GetEvents(from, through int64) ([]byte, error) {\n\treq, err := c.newRequest(\"GET\", fmt.Sprintf(\"\/api\/deploy\/event?from=%d&through=%d\", from, through), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"Error making request: %s\", res.Status)\n\t}\n\treturn ioutil.ReadAll(res.Body)\n}\n\n\/\/ GetConfig returns the current Config\nfunc (c Client) GetConfig() (*Config, error) {\n\treq, err := c.newRequest(\"GET\", \"\/api\/config\/deploy\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode == 404 {\n\t\treturn nil, fmt.Errorf(\"No configuration uploaded yet.\")\n\t}\n\tif res.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"Error making request: %s\", res.Status)\n\t}\n\tvar config Config\n\tif err := json.NewDecoder(res.Body).Decode(&config); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &config, nil\n}\n\n\/\/ SetConfig sets the current Config\nfunc (c Client) SetConfig(config *Config) error {\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(config); err != nil {\n\t\treturn err\n\t}\n\treq, err := c.newRequest(\"POST\", \"\/api\/config\/deploy\", &buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.StatusCode != 204 {\n\t\treturn fmt.Errorf(\"Error making request: %s\", res.Status)\n\t}\n\treturn nil\n}\n\n\/\/ GetLogs returns the logs for a given deployment.\nfunc (c Client) GetLogs(deployID string) ([]byte, error) {\n\treq, err := c.newRequest(\"GET\", fmt.Sprintf(\"\/api\/deploy\/%s\/log\", deployID), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"Error making request: %s\", res.Status)\n\t}\n\treturn ioutil.ReadAll(res.Body)\n}\n<commit_msg>Fix logs path<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n)\n\n\/\/ Client for the deployment service\ntype Client struct {\n\ttoken string\n\tbaseURL string\n}\n\n\/\/ NewClient makes a new Client\nfunc NewClient(token, baseURL string) Client {\n\treturn Client{\n\t\ttoken: token,\n\t\tbaseURL: baseURL,\n\t}\n}\n\nfunc (c Client) newRequest(method, path string, body io.Reader) (*http.Request, error) {\n\treq, err := http.NewRequest(method, c.baseURL+path, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"Scope-Probe token=%s\", c.token))\n\treturn req, nil\n}\n\n\/\/ Deploy notifies the deployment service about a new deployment\nfunc (c Client) Deploy(deployment Deployment) error {\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(deployment); err != nil {\n\t\treturn err\n\t}\n\treq, err := c.newRequest(\"POST\", \"\/api\/deploy\/deploy\", &buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.StatusCode != 204 {\n\t\treturn fmt.Errorf(\"Error making request: %s\", res.Status)\n\t}\n\treturn nil\n}\n\n\/\/ GetDeployments returns a list of deployments\nfunc (c Client) GetDeployments(from, through int64) ([]Deployment, error) {\n\treq, err := c.newRequest(\"GET\", fmt.Sprintf(\"\/api\/deploy\/deploy?from=%d&through=%d\", from, through), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"Error making request: %s\", res.Status)\n\t}\n\tvar response struct {\n\t\tDeployments []Deployment `json:\"deployments\"`\n\t}\n\tif err := json.NewDecoder(res.Body).Decode(&response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn response.Deployments, nil\n}\n\n\/\/ GetEvents returns the raw events.\nfunc (c Client) GetEvents(from, through int64) ([]byte, error) {\n\treq, err := c.newRequest(\"GET\", fmt.Sprintf(\"\/api\/deploy\/event?from=%d&through=%d\", from, through), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"Error making request: %s\", res.Status)\n\t}\n\treturn ioutil.ReadAll(res.Body)\n}\n\n\/\/ GetConfig returns the current Config\nfunc (c Client) GetConfig() (*Config, error) {\n\treq, err := c.newRequest(\"GET\", \"\/api\/config\/deploy\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode == 404 {\n\t\treturn nil, fmt.Errorf(\"No configuration uploaded yet.\")\n\t}\n\tif res.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"Error making request: %s\", res.Status)\n\t}\n\tvar config Config\n\tif err := json.NewDecoder(res.Body).Decode(&config); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &config, nil\n}\n\n\/\/ SetConfig sets the current Config\nfunc (c Client) SetConfig(config *Config) error {\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(config); err != nil {\n\t\treturn err\n\t}\n\treq, err := c.newRequest(\"POST\", \"\/api\/config\/deploy\", &buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.StatusCode != 204 {\n\t\treturn fmt.Errorf(\"Error making request: %s\", res.Status)\n\t}\n\treturn nil\n}\n\n\/\/ GetLogs returns the logs for a given deployment.\nfunc (c Client) GetLogs(deployID string) ([]byte, error) {\n\treq, err := c.newRequest(\"GET\", fmt.Sprintf(\"\/api\/deploy\/deploy\/%s\/log\", deployID), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"Error making request: %s\", res.Status)\n\t}\n\treturn ioutil.ReadAll(res.Body)\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"github.com\/qiniu\/qshell\/v2\/cmd_test\/test\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestPFop(t *testing.T) {\n\tresult, errs := test.RunCmdWithError(\"pfop\", \"qshell-na0\", \"test_mv.mp4\", \"avthumb\/mp4\")\n\tif len(errs) > 0{\n\t\tt.Fail()\n\t}\n\tresult = strings.ReplaceAll(result, \"\\n\", \"\")\n\tresult, errs = test.RunCmdWithError(\"prefop\", result)\n\tif len(errs) > 0{\n\t\tt.Fail()\n\t}\n}\n<commit_msg>add fop test case<commit_after>package cmd\n\nimport (\n\t\"github.com\/qiniu\/qshell\/v2\/cmd_test\/test\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestFop(t *testing.T) {\n\tresult, errs := test.RunCmdWithError(\"pfop\", \"qshell-na0\", \"test_mv.mp4\", \"avthumb\/mp4\")\n\tif len(errs) > 0{\n\t\tt.Fail()\n\t}\n\n\tresult = strings.ReplaceAll(result, \"\\n\", \"\")\n\tresult, errs = test.RunCmdWithError(\"prefop\", result)\n\tif len(errs) > 0{\n\t\tt.Fail()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2018 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage telemetry\n\nimport (\n\t\"strconv\"\n\t\"time\"\n\n\tgovppapi \"git.fd.io\/govpp.git\/api\"\n\t\"github.com\/ligato\/cn-infra\/flavors\/local\"\n\tprom \"github.com\/ligato\/cn-infra\/rpc\/prometheus\"\n\t\"github.com\/ligato\/vpp-agent\/plugins\/govppmux\"\n\t\"github.com\/ligato\/vpp-agent\/plugins\/govppmux\/vppcalls\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\/promhttp\"\n)\n\nconst (\n\t\/\/ Period between metric updates\n\tupdatePeriod = time.Second * 5\n\n\t\/\/ Registry path for telemetry metrics\n\tregistryPath = \"\/vpp\"\n\n\t\/\/ Metrics label used for agent label\n\tagentLabel = \"agent\"\n)\n\nconst (\n\t\/\/ Runtime\n\truntimeThreadLabel = \"thread\"\n\truntimeThreadIDLabel = \"threadID\"\n\truntimeItemLabel = \"item\"\n\n\truntimeCallsMetric = \"calls\"\n\truntimeVectorsMetric = \"vectors\"\n\truntimeSuspendsMetric = \"suspends\"\n\truntimeClocksMetric = \"clocks\"\n\truntimeVectorsPerCallMetric = \"vectors_per_call\"\n\n\t\/\/ Memory\n\tmemoryThreadLabel = \"thread\"\n\tmemoryThreadIDLabel = \"threadID\"\n\n\tmemoryObjectsMetric = \"objects\"\n\tmemoryUsedMetric = \"used\"\n\tmemoryTotalMetric = \"total\"\n\tmemoryFreeMetric = \"free\"\n\tmemoryReclaimedMetric = \"reclaimed\"\n\tmemoryOverheadMetric = \"overhead\"\n\tmemoryCapacityMetric = \"capacity\"\n\n\t\/\/ Buffers\n\tbuffersThreadIDLabel = \"threadID\"\n\tbuffersItemLabel = \"item\"\n\tbuffersIndexLabel = \"index\"\n\n\tbuffersSizeMetric = \"size\"\n\tbuffersAllocMetric = \"alloc\"\n\tbuffersFreeMetric = \"free\"\n\tbuffersNumAllocMetric = \"num_alloc\"\n\tbuffersNumFreeMetric = \"num_free\"\n\n\t\/\/ Node counters\n\tnodeCounterItemLabel = \"item\"\n\tnodeCounterReasonLabel = \"reason\"\n\n\tnodeCounterCountMetric = \"count\"\n)\n\n\/\/ Plugin registers Telemetry Plugin\ntype Plugin struct {\n\tDeps\n\n\tvppCh govppapi.Channel\n\n\truntimeGaugeVecs map[string]*prometheus.GaugeVec\n\truntimeStats map[string]*runtimeStats\n\n\tmemoryGaugeVecs map[string]*prometheus.GaugeVec\n\tmemoryStats map[string]*memoryStats\n\n\tbuffersGaugeVecs map[string]*prometheus.GaugeVec\n\tbuffersStats map[string]*buffersStats\n\n\tnodeCounterGaugeVecs map[string]*prometheus.GaugeVec\n\tnodeCounterStats map[string]*nodeCounterStats\n}\n\n\/\/ Deps represents dependencies of Telemetry Plugin\ntype Deps struct {\n\tlocal.PluginInfraDeps\n\n\tGoVppmux govppmux.API\n\tPrometheus prom.API\n}\n\ntype runtimeStats struct {\n\tthreadName string\n\tthreadID uint\n\titemName string\n\tmetrics map[string]prometheus.Gauge\n}\n\ntype memoryStats struct {\n\tthreadName string\n\tthreadID uint\n\tmetrics map[string]prometheus.Gauge\n}\n\ntype buffersStats struct {\n\tthreadID uint\n\titemName string\n\titemIndex uint\n\tmetrics map[string]prometheus.Gauge\n}\n\ntype nodeCounterStats struct {\n\titemName string\n\tmetrics map[string]prometheus.Gauge\n}\n\n\/\/ Init initializes Telemetry Plugin\nfunc (p *Plugin) Init() error {\n\t\/\/ Register '\/vpp' registry path\n\terr := p.Prometheus.NewRegistry(registryPath, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Runtime metrics\n\tp.runtimeGaugeVecs = make(map[string]*prometheus.GaugeVec)\n\tp.runtimeStats = make(map[string]*runtimeStats)\n\n\tfor _, metric := range [][2]string{\n\t\t{runtimeCallsMetric, \"Number of calls\"},\n\t\t{runtimeVectorsMetric, \"Number of vectors\"},\n\t\t{runtimeSuspendsMetric, \"Number of suspends\"},\n\t\t{runtimeClocksMetric, \"Number of clocks\"},\n\t\t{runtimeVectorsPerCallMetric, \"Number of vectors per call\"},\n\t} {\n\t\tname := metric[0]\n\t\tp.runtimeGaugeVecs[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: \"vpp\",\n\t\t\tSubsystem: \"runtime\",\n\t\t\tName: name,\n\t\t\tHelp: metric[1],\n\t\t\tConstLabels: prometheus.Labels{\n\t\t\t\tagentLabel: p.ServiceLabel.GetAgentLabel(),\n\t\t\t},\n\t\t}, []string{runtimeItemLabel, runtimeThreadLabel, runtimeThreadIDLabel})\n\n\t}\n\n\t\/\/ register created vectors to prometheus\n\tfor name, metric := range p.runtimeGaugeVecs {\n\t\tif err := p.Prometheus.Register(registryPath, metric); err != nil {\n\t\t\tp.Log.Errorf(\"failed to register %v metric: %v\", name, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Memory metrics\n\tp.memoryGaugeVecs = make(map[string]*prometheus.GaugeVec)\n\tp.memoryStats = make(map[string]*memoryStats)\n\n\tfor _, metric := range [][2]string{\n\t\t{memoryObjectsMetric, \"Number of objects\"},\n\t\t{memoryUsedMetric, \"Used memory\"},\n\t\t{memoryTotalMetric, \"Total memory\"},\n\t\t{memoryFreeMetric, \"Free memory\"},\n\t\t{memoryReclaimedMetric, \"Reclaimed memory\"},\n\t\t{memoryOverheadMetric, \"Overhead\"},\n\t\t{memoryCapacityMetric, \"Capacity\"},\n\t} {\n\t\tname := metric[0]\n\t\tp.memoryGaugeVecs[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: \"vpp\",\n\t\t\tSubsystem: \"memory\",\n\t\t\tName: name,\n\t\t\tHelp: metric[1],\n\t\t\tConstLabels: prometheus.Labels{\n\t\t\t\tagentLabel: p.ServiceLabel.GetAgentLabel(),\n\t\t\t},\n\t\t}, []string{memoryThreadLabel, memoryThreadIDLabel})\n\n\t}\n\n\t\/\/ register created vectors to prometheus\n\tfor name, metric := range p.memoryGaugeVecs {\n\t\tif err := p.Prometheus.Register(registryPath, metric); err != nil {\n\t\t\tp.Log.Errorf(\"failed to register %v metric: %v\", name, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Buffers metrics\n\tp.buffersGaugeVecs = make(map[string]*prometheus.GaugeVec)\n\tp.buffersStats = make(map[string]*buffersStats)\n\n\tfor _, metric := range [][2]string{\n\t\t{buffersSizeMetric, \"Size of buffer\"},\n\t\t{buffersAllocMetric, \"Allocated\"},\n\t\t{buffersFreeMetric, \"Free\"},\n\t\t{buffersNumAllocMetric, \"Number of allocated\"},\n\t\t{buffersNumFreeMetric, \"Number of free\"},\n\t} {\n\t\tname := metric[0]\n\t\tp.buffersGaugeVecs[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: \"vpp\",\n\t\t\tSubsystem: \"buffers\",\n\t\t\tName: name,\n\t\t\tHelp: metric[1],\n\t\t\tConstLabels: prometheus.Labels{\n\t\t\t\tagentLabel: p.ServiceLabel.GetAgentLabel(),\n\t\t\t},\n\t\t}, []string{buffersThreadIDLabel, buffersItemLabel, buffersIndexLabel})\n\n\t}\n\n\t\/\/ register created vectors to prometheus\n\tfor name, metric := range p.buffersGaugeVecs {\n\t\tif err := p.Prometheus.Register(registryPath, metric); err != nil {\n\t\t\tp.Log.Errorf(\"failed to register %v metric: %v\", name, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Node counters metrics\n\tp.nodeCounterGaugeVecs = make(map[string]*prometheus.GaugeVec)\n\tp.nodeCounterStats = make(map[string]*nodeCounterStats)\n\n\tfor _, metric := range [][2]string{\n\t\t{nodeCounterCountMetric, \"Count\"},\n\t} {\n\t\tname := metric[0]\n\t\tp.nodeCounterGaugeVecs[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: \"vpp\",\n\t\t\tSubsystem: \"node_counter\",\n\t\t\tName: name,\n\t\t\tHelp: metric[1],\n\t\t\tConstLabels: prometheus.Labels{\n\t\t\t\tagentLabel: p.ServiceLabel.GetAgentLabel(),\n\t\t\t},\n\t\t}, []string{nodeCounterItemLabel, nodeCounterReasonLabel})\n\n\t}\n\n\t\/\/ register created vectors to prometheus\n\tfor name, metric := range p.nodeCounterGaugeVecs {\n\t\tif err := p.Prometheus.Register(registryPath, metric); err != nil {\n\t\t\tp.Log.Errorf(\"failed to register %v metric: %v\", name, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Create GoVPP channel\n\tp.vppCh, err = p.GoVppmux.NewAPIChannel()\n\tif err != nil {\n\t\tp.Log.Errorf(\"Error creating channel: %v\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ AfterInit executes after initializion of Telemetry Plugin\nfunc (p *Plugin) AfterInit() error {\n\t\/\/ Periodically update data\n\tgo func() {\n\t\tfor {\n\t\t\t\/\/ Update runtime\n\t\t\truntimeInfo, err := vppcalls.GetRuntimeInfo(p.vppCh)\n\t\t\tif err != nil {\n\t\t\t\tp.Log.Errorf(\"Command failed: %v\", err)\n\t\t\t} else {\n\t\t\t\tfor _, thread := range runtimeInfo.Threads {\n\t\t\t\t\tfor _, item := range thread.Items {\n\t\t\t\t\t\tstats, ok := p.runtimeStats[item.Name]\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\tstats = &runtimeStats{\n\t\t\t\t\t\t\t\tthreadID: thread.ID,\n\t\t\t\t\t\t\t\tthreadName: thread.Name,\n\t\t\t\t\t\t\t\titemName: item.Name,\n\t\t\t\t\t\t\t\tmetrics: map[string]prometheus.Gauge{},\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\/\/ add gauges with corresponding labels into vectors\n\t\t\t\t\t\t\tfor k, vec := range p.runtimeGaugeVecs {\n\t\t\t\t\t\t\t\tstats.metrics[k], err = vec.GetMetricWith(prometheus.Labels{\n\t\t\t\t\t\t\t\t\truntimeItemLabel: item.Name,\n\t\t\t\t\t\t\t\t\truntimeThreadLabel: thread.Name,\n\t\t\t\t\t\t\t\t\truntimeThreadIDLabel: strconv.Itoa(int(thread.ID)),\n\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\tp.Log.Error(err)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tstats.metrics[runtimeCallsMetric].Set(float64(item.Calls))\n\t\t\t\t\t\tstats.metrics[runtimeVectorsMetric].Set(float64(item.Vectors))\n\t\t\t\t\t\tstats.metrics[runtimeSuspendsMetric].Set(float64(item.Suspends))\n\t\t\t\t\t\tstats.metrics[runtimeClocksMetric].Set(item.Clocks)\n\t\t\t\t\t\tstats.metrics[runtimeVectorsPerCallMetric].Set(item.VectorsPerCall)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Update memory\n\t\t\tmemoryInfo, err := vppcalls.GetMemory(p.vppCh)\n\t\t\tif err != nil {\n\t\t\t\tp.Log.Errorf(\"Command failed: %v\", err)\n\t\t\t} else {\n\t\t\t\tfor _, thread := range memoryInfo.Threads {\n\t\t\t\t\tstats, ok := p.memoryStats[thread.Name]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tstats = &memoryStats{\n\t\t\t\t\t\t\tthreadName: thread.Name,\n\t\t\t\t\t\t\tthreadID: thread.ID,\n\t\t\t\t\t\t\tmetrics: map[string]prometheus.Gauge{},\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\/\/ add gauges with corresponding labels into vectors\n\t\t\t\t\t\tfor k, vec := range p.memoryGaugeVecs {\n\t\t\t\t\t\t\tstats.metrics[k], err = vec.GetMetricWith(prometheus.Labels{\n\t\t\t\t\t\t\t\tmemoryThreadLabel: thread.Name,\n\t\t\t\t\t\t\t\tmemoryThreadIDLabel: strconv.Itoa(int(thread.ID)),\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tp.Log.Error(err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tstats.metrics[memoryObjectsMetric].Set(float64(thread.Objects))\n\t\t\t\t\tstats.metrics[memoryUsedMetric].Set(float64(thread.Used))\n\t\t\t\t\tstats.metrics[memoryTotalMetric].Set(float64(thread.Total))\n\t\t\t\t\tstats.metrics[memoryFreeMetric].Set(float64(thread.Free))\n\t\t\t\t\tstats.metrics[memoryReclaimedMetric].Set(float64(thread.Reclaimed))\n\t\t\t\t\tstats.metrics[memoryOverheadMetric].Set(float64(thread.Overhead))\n\t\t\t\t\tstats.metrics[memoryCapacityMetric].Set(float64(thread.Capacity))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Update buffers\n\t\t\tbuffersInfo, err := vppcalls.GetBuffersInfo(p.vppCh)\n\t\t\tif err != nil {\n\t\t\t\tp.Log.Errorf(\"Command failed: %v\", err)\n\t\t\t} else {\n\t\t\t\tfor _, item := range buffersInfo.Items {\n\t\t\t\t\tstats, ok := p.buffersStats[item.Name]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tstats = &buffersStats{\n\t\t\t\t\t\t\tthreadID: item.ThreadID,\n\t\t\t\t\t\t\titemName: item.Name,\n\t\t\t\t\t\t\titemIndex: item.Index,\n\t\t\t\t\t\t\tmetrics: map[string]prometheus.Gauge{},\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\/\/ add gauges with corresponding labels into vectors\n\t\t\t\t\t\tfor k, vec := range p.buffersGaugeVecs {\n\t\t\t\t\t\t\tstats.metrics[k], err = vec.GetMetricWith(prometheus.Labels{\n\t\t\t\t\t\t\t\tbuffersThreadIDLabel: strconv.Itoa(int(item.ThreadID)),\n\t\t\t\t\t\t\t\tbuffersItemLabel: item.Name,\n\t\t\t\t\t\t\t\tbuffersIndexLabel: strconv.Itoa(int(item.Index)),\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tp.Log.Error(err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tstats.metrics[buffersSizeMetric].Set(float64(item.Size))\n\t\t\t\t\tstats.metrics[buffersAllocMetric].Set(float64(item.Alloc))\n\t\t\t\t\tstats.metrics[buffersFreeMetric].Set(float64(item.Free))\n\t\t\t\t\tstats.metrics[buffersNumAllocMetric].Set(float64(item.NumAlloc))\n\t\t\t\t\tstats.metrics[buffersNumFreeMetric].Set(float64(item.NumFree))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Update node counters\n\t\t\tnodeCountersInfo, err := vppcalls.GetNodeCounters(p.vppCh)\n\t\t\tif err != nil {\n\t\t\t\tp.Log.Errorf(\"Command failed: %v\", err)\n\t\t\t} else {\n\t\t\t\tfor _, item := range nodeCountersInfo.Counters {\n\t\t\t\t\tstats, ok := p.nodeCounterStats[item.Node]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tstats = &nodeCounterStats{\n\t\t\t\t\t\t\titemName: item.Node,\n\t\t\t\t\t\t\tmetrics: map[string]prometheus.Gauge{},\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\/\/ add gauges with corresponding labels into vectors\n\t\t\t\t\t\tfor k, vec := range p.nodeCounterGaugeVecs {\n\t\t\t\t\t\t\tstats.metrics[k], err = vec.GetMetricWith(prometheus.Labels{\n\t\t\t\t\t\t\t\tnodeCounterItemLabel: item.Node,\n\t\t\t\t\t\t\t\tnodeCounterReasonLabel: item.Reason,\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tp.Log.Error(err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tstats.metrics[nodeCounterCountMetric].Set(float64(item.Count))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Delay period between updates\n\t\t\ttime.Sleep(updatePeriod)\n\t\t}\n\t}()\n\treturn nil\n}\n\n\/\/ Close is used to clean up resources used by Telemetry Plugin\nfunc (p *Plugin) Close() error {\n\tp.vppCh.Close()\n\treturn nil\n}\n<commit_msg>Use safeclose for closing VPP channel in Telemetry plugin<commit_after>\/\/ Copyright (c) 2018 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage telemetry\n\nimport (\n\t\"strconv\"\n\t\"time\"\n\n\tgovppapi \"git.fd.io\/govpp.git\/api\"\n\t\"github.com\/ligato\/cn-infra\/flavors\/local\"\n\tprom \"github.com\/ligato\/cn-infra\/rpc\/prometheus\"\n\t\"github.com\/ligato\/cn-infra\/utils\/safeclose\"\n\t\"github.com\/ligato\/vpp-agent\/plugins\/govppmux\"\n\t\"github.com\/ligato\/vpp-agent\/plugins\/govppmux\/vppcalls\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\/promhttp\"\n)\n\nconst (\n\t\/\/ Period between metric updates\n\tupdatePeriod = time.Second * 5\n\n\t\/\/ Registry path for telemetry metrics\n\tregistryPath = \"\/vpp\"\n\n\t\/\/ Metrics label used for agent label\n\tagentLabel = \"agent\"\n)\n\nconst (\n\t\/\/ Runtime\n\truntimeThreadLabel = \"thread\"\n\truntimeThreadIDLabel = \"threadID\"\n\truntimeItemLabel = \"item\"\n\n\truntimeCallsMetric = \"calls\"\n\truntimeVectorsMetric = \"vectors\"\n\truntimeSuspendsMetric = \"suspends\"\n\truntimeClocksMetric = \"clocks\"\n\truntimeVectorsPerCallMetric = \"vectors_per_call\"\n\n\t\/\/ Memory\n\tmemoryThreadLabel = \"thread\"\n\tmemoryThreadIDLabel = \"threadID\"\n\n\tmemoryObjectsMetric = \"objects\"\n\tmemoryUsedMetric = \"used\"\n\tmemoryTotalMetric = \"total\"\n\tmemoryFreeMetric = \"free\"\n\tmemoryReclaimedMetric = \"reclaimed\"\n\tmemoryOverheadMetric = \"overhead\"\n\tmemoryCapacityMetric = \"capacity\"\n\n\t\/\/ Buffers\n\tbuffersThreadIDLabel = \"threadID\"\n\tbuffersItemLabel = \"item\"\n\tbuffersIndexLabel = \"index\"\n\n\tbuffersSizeMetric = \"size\"\n\tbuffersAllocMetric = \"alloc\"\n\tbuffersFreeMetric = \"free\"\n\tbuffersNumAllocMetric = \"num_alloc\"\n\tbuffersNumFreeMetric = \"num_free\"\n\n\t\/\/ Node counters\n\tnodeCounterItemLabel = \"item\"\n\tnodeCounterReasonLabel = \"reason\"\n\n\tnodeCounterCountMetric = \"count\"\n)\n\n\/\/ Plugin registers Telemetry Plugin\ntype Plugin struct {\n\tDeps\n\n\tvppCh govppapi.Channel\n\n\truntimeGaugeVecs map[string]*prometheus.GaugeVec\n\truntimeStats map[string]*runtimeStats\n\n\tmemoryGaugeVecs map[string]*prometheus.GaugeVec\n\tmemoryStats map[string]*memoryStats\n\n\tbuffersGaugeVecs map[string]*prometheus.GaugeVec\n\tbuffersStats map[string]*buffersStats\n\n\tnodeCounterGaugeVecs map[string]*prometheus.GaugeVec\n\tnodeCounterStats map[string]*nodeCounterStats\n}\n\n\/\/ Deps represents dependencies of Telemetry Plugin\ntype Deps struct {\n\tlocal.PluginInfraDeps\n\n\tGoVppmux govppmux.API\n\tPrometheus prom.API\n}\n\ntype runtimeStats struct {\n\tthreadName string\n\tthreadID uint\n\titemName string\n\tmetrics map[string]prometheus.Gauge\n}\n\ntype memoryStats struct {\n\tthreadName string\n\tthreadID uint\n\tmetrics map[string]prometheus.Gauge\n}\n\ntype buffersStats struct {\n\tthreadID uint\n\titemName string\n\titemIndex uint\n\tmetrics map[string]prometheus.Gauge\n}\n\ntype nodeCounterStats struct {\n\titemName string\n\tmetrics map[string]prometheus.Gauge\n}\n\n\/\/ Init initializes Telemetry Plugin\nfunc (p *Plugin) Init() error {\n\t\/\/ Register '\/vpp' registry path\n\terr := p.Prometheus.NewRegistry(registryPath, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Runtime metrics\n\tp.runtimeGaugeVecs = make(map[string]*prometheus.GaugeVec)\n\tp.runtimeStats = make(map[string]*runtimeStats)\n\n\tfor _, metric := range [][2]string{\n\t\t{runtimeCallsMetric, \"Number of calls\"},\n\t\t{runtimeVectorsMetric, \"Number of vectors\"},\n\t\t{runtimeSuspendsMetric, \"Number of suspends\"},\n\t\t{runtimeClocksMetric, \"Number of clocks\"},\n\t\t{runtimeVectorsPerCallMetric, \"Number of vectors per call\"},\n\t} {\n\t\tname := metric[0]\n\t\tp.runtimeGaugeVecs[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: \"vpp\",\n\t\t\tSubsystem: \"runtime\",\n\t\t\tName: name,\n\t\t\tHelp: metric[1],\n\t\t\tConstLabels: prometheus.Labels{\n\t\t\t\tagentLabel: p.ServiceLabel.GetAgentLabel(),\n\t\t\t},\n\t\t}, []string{runtimeItemLabel, runtimeThreadLabel, runtimeThreadIDLabel})\n\n\t}\n\n\t\/\/ register created vectors to prometheus\n\tfor name, metric := range p.runtimeGaugeVecs {\n\t\tif err := p.Prometheus.Register(registryPath, metric); err != nil {\n\t\t\tp.Log.Errorf(\"failed to register %v metric: %v\", name, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Memory metrics\n\tp.memoryGaugeVecs = make(map[string]*prometheus.GaugeVec)\n\tp.memoryStats = make(map[string]*memoryStats)\n\n\tfor _, metric := range [][2]string{\n\t\t{memoryObjectsMetric, \"Number of objects\"},\n\t\t{memoryUsedMetric, \"Used memory\"},\n\t\t{memoryTotalMetric, \"Total memory\"},\n\t\t{memoryFreeMetric, \"Free memory\"},\n\t\t{memoryReclaimedMetric, \"Reclaimed memory\"},\n\t\t{memoryOverheadMetric, \"Overhead\"},\n\t\t{memoryCapacityMetric, \"Capacity\"},\n\t} {\n\t\tname := metric[0]\n\t\tp.memoryGaugeVecs[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: \"vpp\",\n\t\t\tSubsystem: \"memory\",\n\t\t\tName: name,\n\t\t\tHelp: metric[1],\n\t\t\tConstLabels: prometheus.Labels{\n\t\t\t\tagentLabel: p.ServiceLabel.GetAgentLabel(),\n\t\t\t},\n\t\t}, []string{memoryThreadLabel, memoryThreadIDLabel})\n\n\t}\n\n\t\/\/ register created vectors to prometheus\n\tfor name, metric := range p.memoryGaugeVecs {\n\t\tif err := p.Prometheus.Register(registryPath, metric); err != nil {\n\t\t\tp.Log.Errorf(\"failed to register %v metric: %v\", name, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Buffers metrics\n\tp.buffersGaugeVecs = make(map[string]*prometheus.GaugeVec)\n\tp.buffersStats = make(map[string]*buffersStats)\n\n\tfor _, metric := range [][2]string{\n\t\t{buffersSizeMetric, \"Size of buffer\"},\n\t\t{buffersAllocMetric, \"Allocated\"},\n\t\t{buffersFreeMetric, \"Free\"},\n\t\t{buffersNumAllocMetric, \"Number of allocated\"},\n\t\t{buffersNumFreeMetric, \"Number of free\"},\n\t} {\n\t\tname := metric[0]\n\t\tp.buffersGaugeVecs[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: \"vpp\",\n\t\t\tSubsystem: \"buffers\",\n\t\t\tName: name,\n\t\t\tHelp: metric[1],\n\t\t\tConstLabels: prometheus.Labels{\n\t\t\t\tagentLabel: p.ServiceLabel.GetAgentLabel(),\n\t\t\t},\n\t\t}, []string{buffersThreadIDLabel, buffersItemLabel, buffersIndexLabel})\n\n\t}\n\n\t\/\/ register created vectors to prometheus\n\tfor name, metric := range p.buffersGaugeVecs {\n\t\tif err := p.Prometheus.Register(registryPath, metric); err != nil {\n\t\t\tp.Log.Errorf(\"failed to register %v metric: %v\", name, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Node counters metrics\n\tp.nodeCounterGaugeVecs = make(map[string]*prometheus.GaugeVec)\n\tp.nodeCounterStats = make(map[string]*nodeCounterStats)\n\n\tfor _, metric := range [][2]string{\n\t\t{nodeCounterCountMetric, \"Count\"},\n\t} {\n\t\tname := metric[0]\n\t\tp.nodeCounterGaugeVecs[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: \"vpp\",\n\t\t\tSubsystem: \"node_counter\",\n\t\t\tName: name,\n\t\t\tHelp: metric[1],\n\t\t\tConstLabels: prometheus.Labels{\n\t\t\t\tagentLabel: p.ServiceLabel.GetAgentLabel(),\n\t\t\t},\n\t\t}, []string{nodeCounterItemLabel, nodeCounterReasonLabel})\n\n\t}\n\n\t\/\/ register created vectors to prometheus\n\tfor name, metric := range p.nodeCounterGaugeVecs {\n\t\tif err := p.Prometheus.Register(registryPath, metric); err != nil {\n\t\t\tp.Log.Errorf(\"failed to register %v metric: %v\", name, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Create GoVPP channel\n\tp.vppCh, err = p.GoVppmux.NewAPIChannel()\n\tif err != nil {\n\t\tp.Log.Errorf(\"Error creating channel: %v\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ AfterInit executes after initializion of Telemetry Plugin\nfunc (p *Plugin) AfterInit() error {\n\t\/\/ Periodically update data\n\tgo func() {\n\t\tfor {\n\t\t\t\/\/ Update runtime\n\t\t\truntimeInfo, err := vppcalls.GetRuntimeInfo(p.vppCh)\n\t\t\tif err != nil {\n\t\t\t\tp.Log.Errorf(\"Command failed: %v\", err)\n\t\t\t} else {\n\t\t\t\tfor _, thread := range runtimeInfo.Threads {\n\t\t\t\t\tfor _, item := range thread.Items {\n\t\t\t\t\t\tstats, ok := p.runtimeStats[item.Name]\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\tstats = &runtimeStats{\n\t\t\t\t\t\t\t\tthreadID: thread.ID,\n\t\t\t\t\t\t\t\tthreadName: thread.Name,\n\t\t\t\t\t\t\t\titemName: item.Name,\n\t\t\t\t\t\t\t\tmetrics: map[string]prometheus.Gauge{},\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\/\/ add gauges with corresponding labels into vectors\n\t\t\t\t\t\t\tfor k, vec := range p.runtimeGaugeVecs {\n\t\t\t\t\t\t\t\tstats.metrics[k], err = vec.GetMetricWith(prometheus.Labels{\n\t\t\t\t\t\t\t\t\truntimeItemLabel: item.Name,\n\t\t\t\t\t\t\t\t\truntimeThreadLabel: thread.Name,\n\t\t\t\t\t\t\t\t\truntimeThreadIDLabel: strconv.Itoa(int(thread.ID)),\n\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\tp.Log.Error(err)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tstats.metrics[runtimeCallsMetric].Set(float64(item.Calls))\n\t\t\t\t\t\tstats.metrics[runtimeVectorsMetric].Set(float64(item.Vectors))\n\t\t\t\t\t\tstats.metrics[runtimeSuspendsMetric].Set(float64(item.Suspends))\n\t\t\t\t\t\tstats.metrics[runtimeClocksMetric].Set(item.Clocks)\n\t\t\t\t\t\tstats.metrics[runtimeVectorsPerCallMetric].Set(item.VectorsPerCall)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Update memory\n\t\t\tmemoryInfo, err := vppcalls.GetMemory(p.vppCh)\n\t\t\tif err != nil {\n\t\t\t\tp.Log.Errorf(\"Command failed: %v\", err)\n\t\t\t} else {\n\t\t\t\tfor _, thread := range memoryInfo.Threads {\n\t\t\t\t\tstats, ok := p.memoryStats[thread.Name]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tstats = &memoryStats{\n\t\t\t\t\t\t\tthreadName: thread.Name,\n\t\t\t\t\t\t\tthreadID: thread.ID,\n\t\t\t\t\t\t\tmetrics: map[string]prometheus.Gauge{},\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\/\/ add gauges with corresponding labels into vectors\n\t\t\t\t\t\tfor k, vec := range p.memoryGaugeVecs {\n\t\t\t\t\t\t\tstats.metrics[k], err = vec.GetMetricWith(prometheus.Labels{\n\t\t\t\t\t\t\t\tmemoryThreadLabel: thread.Name,\n\t\t\t\t\t\t\t\tmemoryThreadIDLabel: strconv.Itoa(int(thread.ID)),\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tp.Log.Error(err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tstats.metrics[memoryObjectsMetric].Set(float64(thread.Objects))\n\t\t\t\t\tstats.metrics[memoryUsedMetric].Set(float64(thread.Used))\n\t\t\t\t\tstats.metrics[memoryTotalMetric].Set(float64(thread.Total))\n\t\t\t\t\tstats.metrics[memoryFreeMetric].Set(float64(thread.Free))\n\t\t\t\t\tstats.metrics[memoryReclaimedMetric].Set(float64(thread.Reclaimed))\n\t\t\t\t\tstats.metrics[memoryOverheadMetric].Set(float64(thread.Overhead))\n\t\t\t\t\tstats.metrics[memoryCapacityMetric].Set(float64(thread.Capacity))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Update buffers\n\t\t\tbuffersInfo, err := vppcalls.GetBuffersInfo(p.vppCh)\n\t\t\tif err != nil {\n\t\t\t\tp.Log.Errorf(\"Command failed: %v\", err)\n\t\t\t} else {\n\t\t\t\tfor _, item := range buffersInfo.Items {\n\t\t\t\t\tstats, ok := p.buffersStats[item.Name]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tstats = &buffersStats{\n\t\t\t\t\t\t\tthreadID: item.ThreadID,\n\t\t\t\t\t\t\titemName: item.Name,\n\t\t\t\t\t\t\titemIndex: item.Index,\n\t\t\t\t\t\t\tmetrics: map[string]prometheus.Gauge{},\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\/\/ add gauges with corresponding labels into vectors\n\t\t\t\t\t\tfor k, vec := range p.buffersGaugeVecs {\n\t\t\t\t\t\t\tstats.metrics[k], err = vec.GetMetricWith(prometheus.Labels{\n\t\t\t\t\t\t\t\tbuffersThreadIDLabel: strconv.Itoa(int(item.ThreadID)),\n\t\t\t\t\t\t\t\tbuffersItemLabel: item.Name,\n\t\t\t\t\t\t\t\tbuffersIndexLabel: strconv.Itoa(int(item.Index)),\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tp.Log.Error(err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tstats.metrics[buffersSizeMetric].Set(float64(item.Size))\n\t\t\t\t\tstats.metrics[buffersAllocMetric].Set(float64(item.Alloc))\n\t\t\t\t\tstats.metrics[buffersFreeMetric].Set(float64(item.Free))\n\t\t\t\t\tstats.metrics[buffersNumAllocMetric].Set(float64(item.NumAlloc))\n\t\t\t\t\tstats.metrics[buffersNumFreeMetric].Set(float64(item.NumFree))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Update node counters\n\t\t\tnodeCountersInfo, err := vppcalls.GetNodeCounters(p.vppCh)\n\t\t\tif err != nil {\n\t\t\t\tp.Log.Errorf(\"Command failed: %v\", err)\n\t\t\t} else {\n\t\t\t\tfor _, item := range nodeCountersInfo.Counters {\n\t\t\t\t\tstats, ok := p.nodeCounterStats[item.Node]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tstats = &nodeCounterStats{\n\t\t\t\t\t\t\titemName: item.Node,\n\t\t\t\t\t\t\tmetrics: map[string]prometheus.Gauge{},\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\/\/ add gauges with corresponding labels into vectors\n\t\t\t\t\t\tfor k, vec := range p.nodeCounterGaugeVecs {\n\t\t\t\t\t\t\tstats.metrics[k], err = vec.GetMetricWith(prometheus.Labels{\n\t\t\t\t\t\t\t\tnodeCounterItemLabel: item.Node,\n\t\t\t\t\t\t\t\tnodeCounterReasonLabel: item.Reason,\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tp.Log.Error(err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tstats.metrics[nodeCounterCountMetric].Set(float64(item.Count))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Delay period between updates\n\t\t\ttime.Sleep(updatePeriod)\n\t\t}\n\t}()\n\treturn nil\n}\n\n\/\/ Close is used to clean up resources used by Telemetry Plugin\nfunc (p *Plugin) Close() error {\n\treturn safeclose.Close(p.vppCh)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gen\n\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"hash\"\n\t\"hash\/fnv\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode\/utf8\"\n)\n\n\/\/ This file contains utilities for generating code.\n\n\/\/ TODO: other write methods like:\n\/\/ - slices, maps, types, etc.\n\n\/\/ CodeWriter is a utility for writing structured code. It computes the content\n\/\/ hash and size of written content. It ensures there are newlines between\n\/\/ written code blocks.\ntype CodeWriter struct {\n\tbuf bytes.Buffer\n\tSize int\n\tHash hash.Hash32 \/\/ content hash\n\tgob *gob.Encoder\n\t\/\/ For comments we skip the usual one-line separator if they are followed by\n\t\/\/ a code block.\n\tskipSep bool\n}\n\nfunc (w *CodeWriter) Write(p []byte) (n int, err error) {\n\treturn w.buf.Write(p)\n}\n\n\/\/ NewCodeWriter returns a new CodeWriter.\nfunc NewCodeWriter() *CodeWriter {\n\th := fnv.New32()\n\treturn &CodeWriter{Hash: h, gob: gob.NewEncoder(h)}\n}\n\n\/\/ WriteGoFile appends the buffer with the total size of all created structures\n\/\/ and writes it as a Go file to the the given file with the given package name.\nfunc (w *CodeWriter) WriteGoFile(filename, pkg string) {\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not create file %s: %v\", filename, err)\n\t}\n\tdefer f.Close()\n\tif _, err = w.WriteGo(f, pkg); err != nil {\n\t\tlog.Fatalf(\"Error writing file %s: %v\", filename, err)\n\t}\n}\n\n\/\/ WriteGo appends the buffer with the total size of all created structures and\n\/\/ writes it as a Go file to the the given writer with the given package name.\nfunc (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) {\n\tsz := w.Size\n\tw.WriteComment(\"Total table size %d bytes (%dKiB); checksum: %X\\n\", sz, sz\/1024, w.Hash.Sum32())\n\tdefer w.buf.Reset()\n\treturn WriteGo(out, pkg, w.buf.Bytes())\n}\n\nfunc (w *CodeWriter) printf(f string, x ...interface{}) {\n\tfmt.Fprintf(w, f, x...)\n}\n\nfunc (w *CodeWriter) insertSep() {\n\tif w.skipSep {\n\t\tw.skipSep = false\n\t\treturn\n\t}\n\t\/\/ Use at least two newlines to ensure a blank space between the previous\n\t\/\/ block. WriteGoFile will remove extraneous newlines.\n\tw.printf(\"\\n\\n\")\n}\n\n\/\/ WriteComment writes a comment block. All line starts are prefixed with \"\/\/\".\n\/\/ Initial empty lines are gobbled. The indentation for the first line is\n\/\/ stripped from consecutive lines.\nfunc (w *CodeWriter) WriteComment(comment string, args ...interface{}) {\n\ts := fmt.Sprintf(comment, args...)\n\ts = strings.Trim(s, \"\\n\")\n\n\t\/\/ Use at least two newlines to ensure a blank space between the previous\n\t\/\/ block. WriteGoFile will remove extraneous newlines.\n\tw.printf(\"\\n\\n\/\/ \")\n\tw.skipSep = true\n\n\t\/\/ strip first indent level.\n\tsep := \"\\n\"\n\tfor ; len(s) > 0 && (s[0] == '\\t' || s[0] == ' '); s = s[1:] {\n\t\tsep += s[:1]\n\t}\n\n\tstrings.NewReplacer(sep, \"\\n\/\/ \", \"\\n\", \"\\n\/\/ \").WriteString(w, s)\n\n\tw.printf(\"\\n\")\n}\n\nfunc (w *CodeWriter) writeSizeInfo(size int) {\n\tw.printf(\"\/\/ Size: %d bytes\\n\", size)\n}\n\n\/\/ WriteConst writes a constant of the given name and value.\nfunc (w *CodeWriter) WriteConst(name string, x interface{}) {\n\tw.insertSep()\n\tv := reflect.ValueOf(x)\n\n\tswitch v.Type().Kind() {\n\tcase reflect.String:\n\t\t\/\/ See golang.org\/issue\/13145.\n\t\tconst arbitraryCutoff = 16\n\t\tif v.Len() > arbitraryCutoff {\n\t\t\tw.printf(\"var %s %s = \", name, typeName(x))\n\t\t} else {\n\t\t\tw.printf(\"const %s %s = \", name, typeName(x))\n\t\t}\n\t\tw.WriteString(v.String())\n\t\tw.printf(\"\\n\")\n\tdefault:\n\t\tw.printf(\"const %s = %#v\\n\", name, x)\n\t}\n}\n\n\/\/ WriteVar writes a variable of the given name and value.\nfunc (w *CodeWriter) WriteVar(name string, x interface{}) {\n\tw.insertSep()\n\tv := reflect.ValueOf(x)\n\toldSize := w.Size\n\tsz := int(v.Type().Size())\n\tw.Size += sz\n\n\tswitch v.Type().Kind() {\n\tcase reflect.String:\n\t\tw.printf(\"var %s %s = \", name, typeName(x))\n\t\tw.WriteString(v.String())\n\tcase reflect.Struct:\n\t\tw.gob.Encode(x)\n\t\tfallthrough\n\tcase reflect.Slice, reflect.Array:\n\t\tw.printf(\"var %s = \", name)\n\t\tw.writeValue(v)\n\t\tw.writeSizeInfo(w.Size - oldSize)\n\tdefault:\n\t\tw.printf(\"var %s %s = \", name, typeName(x))\n\t\tw.gob.Encode(x)\n\t\tw.writeValue(v)\n\t\tw.writeSizeInfo(w.Size - oldSize)\n\t}\n\tw.printf(\"\\n\")\n}\n\nfunc (w *CodeWriter) writeValue(v reflect.Value) {\n\tx := v.Interface()\n\tswitch v.Kind() {\n\tcase reflect.String:\n\t\tw.WriteString(v.String())\n\tcase reflect.Array:\n\t\t\/\/ Don't double count: callers of WriteArray count on the size being\n\t\t\/\/ added, so we need to discount it here.\n\t\tw.Size -= int(v.Type().Size())\n\t\tw.writeSlice(x, true)\n\tcase reflect.Slice:\n\t\tw.writeSlice(x, false)\n\tcase reflect.Struct:\n\t\tw.printf(\"%s{\\n\", typeName(v.Interface()))\n\t\tt := v.Type()\n\t\tfor i := 0; i < v.NumField(); i++ {\n\t\t\tw.printf(\"%s: \", t.Field(i).Name)\n\t\t\tw.writeValue(v.Field(i))\n\t\t\tw.printf(\",\\n\")\n\t\t}\n\t\tw.printf(\"}\")\n\tdefault:\n\t\tw.printf(\"%#v\", x)\n\t}\n}\n\n\/\/ WriteString writes a string literal.\nfunc (w *CodeWriter) WriteString(s string) {\n\tio.WriteString(w.Hash, s) \/\/ content hash\n\tw.Size += len(s)\n\n\tconst maxInline = 40\n\tif len(s) <= maxInline {\n\t\tw.printf(\"%q\", s)\n\t\treturn\n\t}\n\n\t\/\/ We will render the string as a multi-line string.\n\tconst maxWidth = 80 - 4 - len(`\"`) - len(`\" +`)\n\n\t\/\/ When starting on its own line, go fmt indents line 2+ an extra level.\n\tn, max := maxWidth, maxWidth-4\n\n\t\/\/ Print \"\" +\\n, if a string does not start on its own line.\n\tb := w.buf.Bytes()\n\tif p := len(bytes.TrimRight(b, \" \\t\")); p > 0 && b[p-1] != '\\n' {\n\t\tw.printf(\"\\\"\\\" + \/\/ Size: %d bytes\\n\", len(s))\n\t\tn, max = maxWidth, maxWidth\n\t}\n\n\tw.printf(`\"`)\n\n\tfor sz, p := 0, 0; p < len(s); {\n\t\tvar r rune\n\t\tr, sz = utf8.DecodeRuneInString(s[p:])\n\t\tout := s[p : p+sz]\n\t\tchars := 1\n\t\tif !unicode.IsPrint(r) || r == utf8.RuneError || r == '\"' {\n\t\t\tswitch sz {\n\t\t\tcase 1:\n\t\t\t\tout = fmt.Sprintf(\"\\\\x%02x\", s[p])\n\t\t\tcase 2, 3:\n\t\t\t\tout = fmt.Sprintf(\"\\\\u%04x\", r)\n\t\t\tcase 4:\n\t\t\t\tout = fmt.Sprintf(\"\\\\U%08x\", r)\n\t\t\t}\n\t\t\tchars = len(out)\n\t\t}\n\t\tif n -= chars; n < 0 {\n\t\t\tw.printf(\"\\\" +\\n\\\"\")\n\t\t\tn = max - len(out)\n\t\t}\n\t\tw.printf(\"%s\", out)\n\t\tp += sz\n\t}\n\tw.printf(`\"`)\n}\n\n\/\/ WriteSlice writes a slice value.\nfunc (w *CodeWriter) WriteSlice(x interface{}) {\n\tw.writeSlice(x, false)\n}\n\n\/\/ WriteArray writes an array value.\nfunc (w *CodeWriter) WriteArray(x interface{}) {\n\tw.writeSlice(x, true)\n}\n\nfunc (w *CodeWriter) writeSlice(x interface{}, isArray bool) {\n\tv := reflect.ValueOf(x)\n\tw.gob.Encode(v.Len())\n\tw.Size += v.Len() * int(v.Type().Elem().Size())\n\tname := typeName(x)\n\tif isArray {\n\t\tname = fmt.Sprintf(\"[%d]%s\", v.Len(), name[strings.Index(name, \"]\")+1:])\n\t}\n\tif isArray {\n\t\tw.printf(\"%s{\\n\", name)\n\t} else {\n\t\tw.printf(\"%s{ \/\/ %d elements\\n\", name, v.Len())\n\t}\n\n\tswitch kind := v.Type().Elem().Kind(); kind {\n\tcase reflect.String:\n\t\tfor _, s := range x.([]string) {\n\t\t\tw.WriteString(s)\n\t\t\tw.printf(\",\\n\")\n\t\t}\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,\n\t\treflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\/\/ nLine and nBlock are the number of elements per line and block.\n\t\tnLine, nBlock, format := 8, 64, \"%d,\"\n\t\tswitch kind {\n\t\tcase reflect.Uint8:\n\t\t\tformat = \"%#02x,\"\n\t\tcase reflect.Uint16:\n\t\t\tformat = \"%#04x,\"\n\t\tcase reflect.Uint32:\n\t\t\tnLine, nBlock, format = 4, 32, \"%#08x,\"\n\t\tcase reflect.Uint, reflect.Uint64:\n\t\t\tnLine, nBlock, format = 4, 32, \"%#016x,\"\n\t\tcase reflect.Int8:\n\t\t\tnLine = 16\n\t\t}\n\t\tn := nLine\n\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\tif i%nBlock == 0 && v.Len() > nBlock {\n\t\t\t\tw.printf(\"\/\/ Entry %X - %X\\n\", i, i+nBlock-1)\n\t\t\t}\n\t\t\tx := v.Index(i).Interface()\n\t\t\tw.gob.Encode(x)\n\t\t\tw.printf(format, x)\n\t\t\tif n--; n == 0 {\n\t\t\t\tn = nLine\n\t\t\t\tw.printf(\"\\n\")\n\t\t\t}\n\t\t}\n\t\tw.printf(\"\\n\")\n\tcase reflect.Struct:\n\t\tzero := reflect.Zero(v.Type().Elem()).Interface()\n\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\tx := v.Index(i).Interface()\n\t\t\tw.gob.EncodeValue(v)\n\t\t\tif !reflect.DeepEqual(zero, x) {\n\t\t\t\tline := fmt.Sprintf(\"%#v,\\n\", x)\n\t\t\t\tline = line[strings.IndexByte(line, '{'):]\n\t\t\t\tw.printf(\"%d: \", i)\n\t\t\t\tw.printf(line)\n\t\t\t}\n\t\t}\n\tcase reflect.Array:\n\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\tw.printf(\"%d: %#v,\\n\", i, v.Index(i).Interface())\n\t\t}\n\tdefault:\n\t\tpanic(\"gen: slice elem type not supported\")\n\t}\n\tw.printf(\"}\")\n}\n\n\/\/ WriteType writes a definition of the type of the given value and returns the\n\/\/ type name.\nfunc (w *CodeWriter) WriteType(x interface{}) string {\n\tt := reflect.TypeOf(x)\n\tw.printf(\"type %s struct {\\n\", t.Name())\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tw.printf(\"\\t%s %s\\n\", t.Field(i).Name, t.Field(i).Type)\n\t}\n\tw.printf(\"}\\n\")\n\treturn t.Name()\n}\n\n\/\/ typeName returns the name of the go type of x.\nfunc typeName(x interface{}) string {\n\tt := reflect.ValueOf(x).Type()\n\treturn strings.Replace(fmt.Sprint(t), \"main.\", \"\", 1)\n}\n<commit_msg>internal\/gen: bugfix: escape the escape character<commit_after>\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gen\n\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"hash\"\n\t\"hash\/fnv\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode\/utf8\"\n)\n\n\/\/ This file contains utilities for generating code.\n\n\/\/ TODO: other write methods like:\n\/\/ - slices, maps, types, etc.\n\n\/\/ CodeWriter is a utility for writing structured code. It computes the content\n\/\/ hash and size of written content. It ensures there are newlines between\n\/\/ written code blocks.\ntype CodeWriter struct {\n\tbuf bytes.Buffer\n\tSize int\n\tHash hash.Hash32 \/\/ content hash\n\tgob *gob.Encoder\n\t\/\/ For comments we skip the usual one-line separator if they are followed by\n\t\/\/ a code block.\n\tskipSep bool\n}\n\nfunc (w *CodeWriter) Write(p []byte) (n int, err error) {\n\treturn w.buf.Write(p)\n}\n\n\/\/ NewCodeWriter returns a new CodeWriter.\nfunc NewCodeWriter() *CodeWriter {\n\th := fnv.New32()\n\treturn &CodeWriter{Hash: h, gob: gob.NewEncoder(h)}\n}\n\n\/\/ WriteGoFile appends the buffer with the total size of all created structures\n\/\/ and writes it as a Go file to the the given file with the given package name.\nfunc (w *CodeWriter) WriteGoFile(filename, pkg string) {\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not create file %s: %v\", filename, err)\n\t}\n\tdefer f.Close()\n\tif _, err = w.WriteGo(f, pkg); err != nil {\n\t\tlog.Fatalf(\"Error writing file %s: %v\", filename, err)\n\t}\n}\n\n\/\/ WriteGo appends the buffer with the total size of all created structures and\n\/\/ writes it as a Go file to the the given writer with the given package name.\nfunc (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) {\n\tsz := w.Size\n\tw.WriteComment(\"Total table size %d bytes (%dKiB); checksum: %X\\n\", sz, sz\/1024, w.Hash.Sum32())\n\tdefer w.buf.Reset()\n\treturn WriteGo(out, pkg, w.buf.Bytes())\n}\n\nfunc (w *CodeWriter) printf(f string, x ...interface{}) {\n\tfmt.Fprintf(w, f, x...)\n}\n\nfunc (w *CodeWriter) insertSep() {\n\tif w.skipSep {\n\t\tw.skipSep = false\n\t\treturn\n\t}\n\t\/\/ Use at least two newlines to ensure a blank space between the previous\n\t\/\/ block. WriteGoFile will remove extraneous newlines.\n\tw.printf(\"\\n\\n\")\n}\n\n\/\/ WriteComment writes a comment block. All line starts are prefixed with \"\/\/\".\n\/\/ Initial empty lines are gobbled. The indentation for the first line is\n\/\/ stripped from consecutive lines.\nfunc (w *CodeWriter) WriteComment(comment string, args ...interface{}) {\n\ts := fmt.Sprintf(comment, args...)\n\ts = strings.Trim(s, \"\\n\")\n\n\t\/\/ Use at least two newlines to ensure a blank space between the previous\n\t\/\/ block. WriteGoFile will remove extraneous newlines.\n\tw.printf(\"\\n\\n\/\/ \")\n\tw.skipSep = true\n\n\t\/\/ strip first indent level.\n\tsep := \"\\n\"\n\tfor ; len(s) > 0 && (s[0] == '\\t' || s[0] == ' '); s = s[1:] {\n\t\tsep += s[:1]\n\t}\n\n\tstrings.NewReplacer(sep, \"\\n\/\/ \", \"\\n\", \"\\n\/\/ \").WriteString(w, s)\n\n\tw.printf(\"\\n\")\n}\n\nfunc (w *CodeWriter) writeSizeInfo(size int) {\n\tw.printf(\"\/\/ Size: %d bytes\\n\", size)\n}\n\n\/\/ WriteConst writes a constant of the given name and value.\nfunc (w *CodeWriter) WriteConst(name string, x interface{}) {\n\tw.insertSep()\n\tv := reflect.ValueOf(x)\n\n\tswitch v.Type().Kind() {\n\tcase reflect.String:\n\t\t\/\/ See golang.org\/issue\/13145.\n\t\tconst arbitraryCutoff = 16\n\t\tif v.Len() > arbitraryCutoff {\n\t\t\tw.printf(\"var %s %s = \", name, typeName(x))\n\t\t} else {\n\t\t\tw.printf(\"const %s %s = \", name, typeName(x))\n\t\t}\n\t\tw.WriteString(v.String())\n\t\tw.printf(\"\\n\")\n\tdefault:\n\t\tw.printf(\"const %s = %#v\\n\", name, x)\n\t}\n}\n\n\/\/ WriteVar writes a variable of the given name and value.\nfunc (w *CodeWriter) WriteVar(name string, x interface{}) {\n\tw.insertSep()\n\tv := reflect.ValueOf(x)\n\toldSize := w.Size\n\tsz := int(v.Type().Size())\n\tw.Size += sz\n\n\tswitch v.Type().Kind() {\n\tcase reflect.String:\n\t\tw.printf(\"var %s %s = \", name, typeName(x))\n\t\tw.WriteString(v.String())\n\tcase reflect.Struct:\n\t\tw.gob.Encode(x)\n\t\tfallthrough\n\tcase reflect.Slice, reflect.Array:\n\t\tw.printf(\"var %s = \", name)\n\t\tw.writeValue(v)\n\t\tw.writeSizeInfo(w.Size - oldSize)\n\tdefault:\n\t\tw.printf(\"var %s %s = \", name, typeName(x))\n\t\tw.gob.Encode(x)\n\t\tw.writeValue(v)\n\t\tw.writeSizeInfo(w.Size - oldSize)\n\t}\n\tw.printf(\"\\n\")\n}\n\nfunc (w *CodeWriter) writeValue(v reflect.Value) {\n\tx := v.Interface()\n\tswitch v.Kind() {\n\tcase reflect.String:\n\t\tw.WriteString(v.String())\n\tcase reflect.Array:\n\t\t\/\/ Don't double count: callers of WriteArray count on the size being\n\t\t\/\/ added, so we need to discount it here.\n\t\tw.Size -= int(v.Type().Size())\n\t\tw.writeSlice(x, true)\n\tcase reflect.Slice:\n\t\tw.writeSlice(x, false)\n\tcase reflect.Struct:\n\t\tw.printf(\"%s{\\n\", typeName(v.Interface()))\n\t\tt := v.Type()\n\t\tfor i := 0; i < v.NumField(); i++ {\n\t\t\tw.printf(\"%s: \", t.Field(i).Name)\n\t\t\tw.writeValue(v.Field(i))\n\t\t\tw.printf(\",\\n\")\n\t\t}\n\t\tw.printf(\"}\")\n\tdefault:\n\t\tw.printf(\"%#v\", x)\n\t}\n}\n\n\/\/ WriteString writes a string literal.\nfunc (w *CodeWriter) WriteString(s string) {\n\ts = strings.Replace(s, `\\`, `\\\\`, -1)\n\tio.WriteString(w.Hash, s) \/\/ content hash\n\tw.Size += len(s)\n\n\tconst maxInline = 40\n\tif len(s) <= maxInline {\n\t\tw.printf(\"%q\", s)\n\t\treturn\n\t}\n\n\t\/\/ We will render the string as a multi-line string.\n\tconst maxWidth = 80 - 4 - len(`\"`) - len(`\" +`)\n\n\t\/\/ When starting on its own line, go fmt indents line 2+ an extra level.\n\tn, max := maxWidth, maxWidth-4\n\n\t\/\/ Print \"\" +\\n, if a string does not start on its own line.\n\tb := w.buf.Bytes()\n\tif p := len(bytes.TrimRight(b, \" \\t\")); p > 0 && b[p-1] != '\\n' {\n\t\tw.printf(\"\\\"\\\" + \/\/ Size: %d bytes\\n\", len(s))\n\t\tn, max = maxWidth, maxWidth\n\t}\n\n\tw.printf(`\"`)\n\n\tfor sz, p := 0, 0; p < len(s); {\n\t\tvar r rune\n\t\tr, sz = utf8.DecodeRuneInString(s[p:])\n\t\tout := s[p : p+sz]\n\t\tchars := 1\n\t\tif !unicode.IsPrint(r) || r == utf8.RuneError || r == '\"' {\n\t\t\tswitch sz {\n\t\t\tcase 1:\n\t\t\t\tout = fmt.Sprintf(\"\\\\x%02x\", s[p])\n\t\t\tcase 2, 3:\n\t\t\t\tout = fmt.Sprintf(\"\\\\u%04x\", r)\n\t\t\tcase 4:\n\t\t\t\tout = fmt.Sprintf(\"\\\\U%08x\", r)\n\t\t\t}\n\t\t\tchars = len(out)\n\t\t}\n\t\tif n -= chars; n < 0 {\n\t\t\tw.printf(\"\\\" +\\n\\\"\")\n\t\t\tn = max - len(out)\n\t\t}\n\t\tw.printf(\"%s\", out)\n\t\tp += sz\n\t}\n\tw.printf(`\"`)\n}\n\n\/\/ WriteSlice writes a slice value.\nfunc (w *CodeWriter) WriteSlice(x interface{}) {\n\tw.writeSlice(x, false)\n}\n\n\/\/ WriteArray writes an array value.\nfunc (w *CodeWriter) WriteArray(x interface{}) {\n\tw.writeSlice(x, true)\n}\n\nfunc (w *CodeWriter) writeSlice(x interface{}, isArray bool) {\n\tv := reflect.ValueOf(x)\n\tw.gob.Encode(v.Len())\n\tw.Size += v.Len() * int(v.Type().Elem().Size())\n\tname := typeName(x)\n\tif isArray {\n\t\tname = fmt.Sprintf(\"[%d]%s\", v.Len(), name[strings.Index(name, \"]\")+1:])\n\t}\n\tif isArray {\n\t\tw.printf(\"%s{\\n\", name)\n\t} else {\n\t\tw.printf(\"%s{ \/\/ %d elements\\n\", name, v.Len())\n\t}\n\n\tswitch kind := v.Type().Elem().Kind(); kind {\n\tcase reflect.String:\n\t\tfor _, s := range x.([]string) {\n\t\t\tw.WriteString(s)\n\t\t\tw.printf(\",\\n\")\n\t\t}\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,\n\t\treflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\/\/ nLine and nBlock are the number of elements per line and block.\n\t\tnLine, nBlock, format := 8, 64, \"%d,\"\n\t\tswitch kind {\n\t\tcase reflect.Uint8:\n\t\t\tformat = \"%#02x,\"\n\t\tcase reflect.Uint16:\n\t\t\tformat = \"%#04x,\"\n\t\tcase reflect.Uint32:\n\t\t\tnLine, nBlock, format = 4, 32, \"%#08x,\"\n\t\tcase reflect.Uint, reflect.Uint64:\n\t\t\tnLine, nBlock, format = 4, 32, \"%#016x,\"\n\t\tcase reflect.Int8:\n\t\t\tnLine = 16\n\t\t}\n\t\tn := nLine\n\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\tif i%nBlock == 0 && v.Len() > nBlock {\n\t\t\t\tw.printf(\"\/\/ Entry %X - %X\\n\", i, i+nBlock-1)\n\t\t\t}\n\t\t\tx := v.Index(i).Interface()\n\t\t\tw.gob.Encode(x)\n\t\t\tw.printf(format, x)\n\t\t\tif n--; n == 0 {\n\t\t\t\tn = nLine\n\t\t\t\tw.printf(\"\\n\")\n\t\t\t}\n\t\t}\n\t\tw.printf(\"\\n\")\n\tcase reflect.Struct:\n\t\tzero := reflect.Zero(v.Type().Elem()).Interface()\n\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\tx := v.Index(i).Interface()\n\t\t\tw.gob.EncodeValue(v)\n\t\t\tif !reflect.DeepEqual(zero, x) {\n\t\t\t\tline := fmt.Sprintf(\"%#v,\\n\", x)\n\t\t\t\tline = line[strings.IndexByte(line, '{'):]\n\t\t\t\tw.printf(\"%d: \", i)\n\t\t\t\tw.printf(line)\n\t\t\t}\n\t\t}\n\tcase reflect.Array:\n\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\tw.printf(\"%d: %#v,\\n\", i, v.Index(i).Interface())\n\t\t}\n\tdefault:\n\t\tpanic(\"gen: slice elem type not supported\")\n\t}\n\tw.printf(\"}\")\n}\n\n\/\/ WriteType writes a definition of the type of the given value and returns the\n\/\/ type name.\nfunc (w *CodeWriter) WriteType(x interface{}) string {\n\tt := reflect.TypeOf(x)\n\tw.printf(\"type %s struct {\\n\", t.Name())\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tw.printf(\"\\t%s %s\\n\", t.Field(i).Name, t.Field(i).Type)\n\t}\n\tw.printf(\"}\\n\")\n\treturn t.Name()\n}\n\n\/\/ typeName returns the name of the go type of x.\nfunc typeName(x interface{}) string {\n\tt := reflect.ValueOf(x).Type()\n\treturn strings.Replace(fmt.Sprint(t), \"main.\", \"\", 1)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build ignore\n\n\/\/go:generate go run gen.go\n\n\/\/ This program generates internet protocol constants and tables by\n\/\/ reading IANA protocol registries.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"go\/format\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar registries = []struct {\n\turl string\n\tparse func(io.Writer, io.Reader) error\n}{\n\t{\n\t\t\"http:\/\/www.iana.org\/assignments\/dscp-registry\/dscp-registry.xml\",\n\t\tparseDSCPRegistry,\n\t},\n\t{\n\t\t\"http:\/\/www.iana.org\/assignments\/ipv4-tos-byte\/ipv4-tos-byte.xml\",\n\t\tparseTOSTCByte,\n\t},\n\t{\n\t\t\"http:\/\/www.iana.org\/assignments\/protocol-numbers\/protocol-numbers.xml\",\n\t\tparseProtocolNumbers,\n\t},\n}\n\nfunc main() {\n\tvar bb bytes.Buffer\n\tfmt.Fprintf(&bb, \"\/\/ go generate gen.go\\n\")\n\tfmt.Fprintf(&bb, \"\/\/ GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\\n\\n\")\n\tfmt.Fprintf(&bb, \"\/\/ Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\\n\")\n\tfmt.Fprintf(&bb, `package iana \/\/ import \"golang.org\/x\/net\/internal\/iana\"\\n\\n`)\n\tfor _, r := range registries {\n\t\tresp, err := http.Get(r.url)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\tfmt.Fprintf(os.Stderr, \"got HTTP status code %v for %v\\n\", resp.StatusCode, r.url)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tif err := r.parse(&bb, resp.Body); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Fprintf(&bb, \"\\n\")\n\t}\n\tb, err := format.Source(bb.Bytes())\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\tif err := ioutil.WriteFile(\"const.go\", b, 0644); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc parseDSCPRegistry(w io.Writer, r io.Reader) error {\n\tdec := xml.NewDecoder(r)\n\tvar dr dscpRegistry\n\tif err := dec.Decode(&dr); err != nil {\n\t\treturn err\n\t}\n\tdrs := dr.escape()\n\tfmt.Fprintf(w, \"\/\/ %s, Updated: %s\\n\", dr.Title, dr.Updated)\n\tfmt.Fprintf(w, \"const (\\n\")\n\tfor _, dr := range drs {\n\t\tfmt.Fprintf(w, \"DiffServ%s = %#x\", dr.Name, dr.Value)\n\t\tfmt.Fprintf(w, \"\/\/ %s\\n\", dr.OrigName)\n\t}\n\tfmt.Fprintf(w, \")\\n\")\n\treturn nil\n}\n\ntype dscpRegistry struct {\n\tXMLName xml.Name `xml:\"registry\"`\n\tTitle string `xml:\"title\"`\n\tUpdated string `xml:\"updated\"`\n\tNote string `xml:\"note\"`\n\tRegTitle string `xml:\"registry>title\"`\n\tPoolRecords []struct {\n\t\tName string `xml:\"name\"`\n\t\tSpace string `xml:\"space\"`\n\t} `xml:\"registry>record\"`\n\tRecords []struct {\n\t\tName string `xml:\"name\"`\n\t\tSpace string `xml:\"space\"`\n\t} `xml:\"registry>registry>record\"`\n}\n\ntype canonDSCPRecord struct {\n\tOrigName string\n\tName string\n\tValue int\n}\n\nfunc (drr *dscpRegistry) escape() []canonDSCPRecord {\n\tdrs := make([]canonDSCPRecord, len(drr.Records))\n\tsr := strings.NewReplacer(\n\t\t\"+\", \"\",\n\t\t\"-\", \"\",\n\t\t\"\/\", \"\",\n\t\t\".\", \"\",\n\t\t\" \", \"\",\n\t)\n\tfor i, dr := range drr.Records {\n\t\ts := strings.TrimSpace(dr.Name)\n\t\tdrs[i].OrigName = s\n\t\tdrs[i].Name = sr.Replace(s)\n\t\tn, err := strconv.ParseUint(dr.Space, 2, 8)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdrs[i].Value = int(n) << 2\n\t}\n\treturn drs\n}\n\nfunc parseTOSTCByte(w io.Writer, r io.Reader) error {\n\tdec := xml.NewDecoder(r)\n\tvar ttb tosTCByte\n\tif err := dec.Decode(&ttb); err != nil {\n\t\treturn err\n\t}\n\ttrs := ttb.escape()\n\tfmt.Fprintf(w, \"\/\/ %s, Updated: %s\\n\", ttb.Title, ttb.Updated)\n\tfmt.Fprintf(w, \"const (\\n\")\n\tfor _, tr := range trs {\n\t\tfmt.Fprintf(w, \"%s = %#x\", tr.Keyword, tr.Value)\n\t\tfmt.Fprintf(w, \"\/\/ %s\\n\", tr.OrigKeyword)\n\t}\n\tfmt.Fprintf(w, \")\\n\")\n\treturn nil\n}\n\ntype tosTCByte struct {\n\tXMLName xml.Name `xml:\"registry\"`\n\tTitle string `xml:\"title\"`\n\tUpdated string `xml:\"updated\"`\n\tNote string `xml:\"note\"`\n\tRegTitle string `xml:\"registry>title\"`\n\tRecords []struct {\n\t\tBinary string `xml:\"binary\"`\n\t\tKeyword string `xml:\"keyword\"`\n\t} `xml:\"registry>record\"`\n}\n\ntype canonTOSTCByteRecord struct {\n\tOrigKeyword string\n\tKeyword string\n\tValue int\n}\n\nfunc (ttb *tosTCByte) escape() []canonTOSTCByteRecord {\n\ttrs := make([]canonTOSTCByteRecord, len(ttb.Records))\n\tsr := strings.NewReplacer(\n\t\t\"Capable\", \"\",\n\t\t\"(\", \"\",\n\t\t\")\", \"\",\n\t\t\"+\", \"\",\n\t\t\"-\", \"\",\n\t\t\"\/\", \"\",\n\t\t\".\", \"\",\n\t\t\" \", \"\",\n\t)\n\tfor i, tr := range ttb.Records {\n\t\ts := strings.TrimSpace(tr.Keyword)\n\t\ttrs[i].OrigKeyword = s\n\t\tss := strings.Split(s, \" \")\n\t\tif len(ss) > 1 {\n\t\t\ttrs[i].Keyword = strings.Join(ss[1:], \" \")\n\t\t} else {\n\t\t\ttrs[i].Keyword = ss[0]\n\t\t}\n\t\ttrs[i].Keyword = sr.Replace(trs[i].Keyword)\n\t\tn, err := strconv.ParseUint(tr.Binary, 2, 8)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ttrs[i].Value = int(n)\n\t}\n\treturn trs\n}\n\nfunc parseProtocolNumbers(w io.Writer, r io.Reader) error {\n\tdec := xml.NewDecoder(r)\n\tvar pn protocolNumbers\n\tif err := dec.Decode(&pn); err != nil {\n\t\treturn err\n\t}\n\tprs := pn.escape()\n\tprs = append([]canonProtocolRecord{{\n\t\tName: \"IP\",\n\t\tDescr: \"IPv4 encapsulation, pseudo protocol number\",\n\t\tValue: 0,\n\t}}, prs...)\n\tfmt.Fprintf(w, \"\/\/ %s, Updated: %s\\n\", pn.Title, pn.Updated)\n\tfmt.Fprintf(w, \"const (\\n\")\n\tfor _, pr := range prs {\n\t\tif pr.Name == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(w, \"Protocol%s = %d\", pr.Name, pr.Value)\n\t\ts := pr.Descr\n\t\tif s == \"\" {\n\t\t\ts = pr.OrigName\n\t\t}\n\t\tfmt.Fprintf(w, \"\/\/ %s\\n\", s)\n\t}\n\tfmt.Fprintf(w, \")\\n\")\n\treturn nil\n}\n\ntype protocolNumbers struct {\n\tXMLName xml.Name `xml:\"registry\"`\n\tTitle string `xml:\"title\"`\n\tUpdated string `xml:\"updated\"`\n\tRegTitle string `xml:\"registry>title\"`\n\tNote string `xml:\"registry>note\"`\n\tRecords []struct {\n\t\tValue string `xml:\"value\"`\n\t\tName string `xml:\"name\"`\n\t\tDescr string `xml:\"description\"`\n\t} `xml:\"registry>record\"`\n}\n\ntype canonProtocolRecord struct {\n\tOrigName string\n\tName string\n\tDescr string\n\tValue int\n}\n\nfunc (pn *protocolNumbers) escape() []canonProtocolRecord {\n\tprs := make([]canonProtocolRecord, len(pn.Records))\n\tsr := strings.NewReplacer(\n\t\t\"-in-\", \"in\",\n\t\t\"-within-\", \"within\",\n\t\t\"-over-\", \"over\",\n\t\t\"+\", \"P\",\n\t\t\"-\", \"\",\n\t\t\"\/\", \"\",\n\t\t\".\", \"\",\n\t\t\" \", \"\",\n\t)\n\tfor i, pr := range pn.Records {\n\t\tprs[i].OrigName = pr.Name\n\t\ts := strings.TrimSpace(pr.Name)\n\t\tswitch pr.Name {\n\t\tcase \"ISIS over IPv4\":\n\t\t\tprs[i].Name = \"ISIS\"\n\t\tcase \"manet\":\n\t\t\tprs[i].Name = \"MANET\"\n\t\tdefault:\n\t\t\tprs[i].Name = sr.Replace(s)\n\t\t}\n\t\tss := strings.Split(pr.Descr, \"\\n\")\n\t\tfor i := range ss {\n\t\t\tss[i] = strings.TrimSpace(ss[i])\n\t\t}\n\t\tif len(ss) > 1 {\n\t\t\tprs[i].Descr = strings.Join(ss, \" \")\n\t\t} else {\n\t\t\tprs[i].Descr = ss[0]\n\t\t}\n\t\tprs[i].Value, _ = strconv.Atoi(pr.Value)\n\t}\n\treturn prs\n}\n<commit_msg>internal\/iana: fix a import comment in code generator.<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build ignore\n\n\/\/go:generate go run gen.go\n\n\/\/ This program generates internet protocol constants and tables by\n\/\/ reading IANA protocol registries.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"go\/format\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar registries = []struct {\n\turl string\n\tparse func(io.Writer, io.Reader) error\n}{\n\t{\n\t\t\"http:\/\/www.iana.org\/assignments\/dscp-registry\/dscp-registry.xml\",\n\t\tparseDSCPRegistry,\n\t},\n\t{\n\t\t\"http:\/\/www.iana.org\/assignments\/ipv4-tos-byte\/ipv4-tos-byte.xml\",\n\t\tparseTOSTCByte,\n\t},\n\t{\n\t\t\"http:\/\/www.iana.org\/assignments\/protocol-numbers\/protocol-numbers.xml\",\n\t\tparseProtocolNumbers,\n\t},\n}\n\nfunc main() {\n\tvar bb bytes.Buffer\n\tfmt.Fprintf(&bb, \"\/\/ go generate gen.go\\n\")\n\tfmt.Fprintf(&bb, \"\/\/ GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\\n\\n\")\n\tfmt.Fprintf(&bb, \"\/\/ Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\\n\")\n\tfmt.Fprintf(&bb, `package iana \/\/ import \"golang.org\/x\/net\/internal\/iana\"`+\"\\n\\n\")\n\tfor _, r := range registries {\n\t\tresp, err := http.Get(r.url)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\tfmt.Fprintf(os.Stderr, \"got HTTP status code %v for %v\\n\", resp.StatusCode, r.url)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tif err := r.parse(&bb, resp.Body); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Fprintf(&bb, \"\\n\")\n\t}\n\tb, err := format.Source(bb.Bytes())\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\tif err := ioutil.WriteFile(\"const.go\", b, 0644); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc parseDSCPRegistry(w io.Writer, r io.Reader) error {\n\tdec := xml.NewDecoder(r)\n\tvar dr dscpRegistry\n\tif err := dec.Decode(&dr); err != nil {\n\t\treturn err\n\t}\n\tdrs := dr.escape()\n\tfmt.Fprintf(w, \"\/\/ %s, Updated: %s\\n\", dr.Title, dr.Updated)\n\tfmt.Fprintf(w, \"const (\\n\")\n\tfor _, dr := range drs {\n\t\tfmt.Fprintf(w, \"DiffServ%s = %#x\", dr.Name, dr.Value)\n\t\tfmt.Fprintf(w, \"\/\/ %s\\n\", dr.OrigName)\n\t}\n\tfmt.Fprintf(w, \")\\n\")\n\treturn nil\n}\n\ntype dscpRegistry struct {\n\tXMLName xml.Name `xml:\"registry\"`\n\tTitle string `xml:\"title\"`\n\tUpdated string `xml:\"updated\"`\n\tNote string `xml:\"note\"`\n\tRegTitle string `xml:\"registry>title\"`\n\tPoolRecords []struct {\n\t\tName string `xml:\"name\"`\n\t\tSpace string `xml:\"space\"`\n\t} `xml:\"registry>record\"`\n\tRecords []struct {\n\t\tName string `xml:\"name\"`\n\t\tSpace string `xml:\"space\"`\n\t} `xml:\"registry>registry>record\"`\n}\n\ntype canonDSCPRecord struct {\n\tOrigName string\n\tName string\n\tValue int\n}\n\nfunc (drr *dscpRegistry) escape() []canonDSCPRecord {\n\tdrs := make([]canonDSCPRecord, len(drr.Records))\n\tsr := strings.NewReplacer(\n\t\t\"+\", \"\",\n\t\t\"-\", \"\",\n\t\t\"\/\", \"\",\n\t\t\".\", \"\",\n\t\t\" \", \"\",\n\t)\n\tfor i, dr := range drr.Records {\n\t\ts := strings.TrimSpace(dr.Name)\n\t\tdrs[i].OrigName = s\n\t\tdrs[i].Name = sr.Replace(s)\n\t\tn, err := strconv.ParseUint(dr.Space, 2, 8)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdrs[i].Value = int(n) << 2\n\t}\n\treturn drs\n}\n\nfunc parseTOSTCByte(w io.Writer, r io.Reader) error {\n\tdec := xml.NewDecoder(r)\n\tvar ttb tosTCByte\n\tif err := dec.Decode(&ttb); err != nil {\n\t\treturn err\n\t}\n\ttrs := ttb.escape()\n\tfmt.Fprintf(w, \"\/\/ %s, Updated: %s\\n\", ttb.Title, ttb.Updated)\n\tfmt.Fprintf(w, \"const (\\n\")\n\tfor _, tr := range trs {\n\t\tfmt.Fprintf(w, \"%s = %#x\", tr.Keyword, tr.Value)\n\t\tfmt.Fprintf(w, \"\/\/ %s\\n\", tr.OrigKeyword)\n\t}\n\tfmt.Fprintf(w, \")\\n\")\n\treturn nil\n}\n\ntype tosTCByte struct {\n\tXMLName xml.Name `xml:\"registry\"`\n\tTitle string `xml:\"title\"`\n\tUpdated string `xml:\"updated\"`\n\tNote string `xml:\"note\"`\n\tRegTitle string `xml:\"registry>title\"`\n\tRecords []struct {\n\t\tBinary string `xml:\"binary\"`\n\t\tKeyword string `xml:\"keyword\"`\n\t} `xml:\"registry>record\"`\n}\n\ntype canonTOSTCByteRecord struct {\n\tOrigKeyword string\n\tKeyword string\n\tValue int\n}\n\nfunc (ttb *tosTCByte) escape() []canonTOSTCByteRecord {\n\ttrs := make([]canonTOSTCByteRecord, len(ttb.Records))\n\tsr := strings.NewReplacer(\n\t\t\"Capable\", \"\",\n\t\t\"(\", \"\",\n\t\t\")\", \"\",\n\t\t\"+\", \"\",\n\t\t\"-\", \"\",\n\t\t\"\/\", \"\",\n\t\t\".\", \"\",\n\t\t\" \", \"\",\n\t)\n\tfor i, tr := range ttb.Records {\n\t\ts := strings.TrimSpace(tr.Keyword)\n\t\ttrs[i].OrigKeyword = s\n\t\tss := strings.Split(s, \" \")\n\t\tif len(ss) > 1 {\n\t\t\ttrs[i].Keyword = strings.Join(ss[1:], \" \")\n\t\t} else {\n\t\t\ttrs[i].Keyword = ss[0]\n\t\t}\n\t\ttrs[i].Keyword = sr.Replace(trs[i].Keyword)\n\t\tn, err := strconv.ParseUint(tr.Binary, 2, 8)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ttrs[i].Value = int(n)\n\t}\n\treturn trs\n}\n\nfunc parseProtocolNumbers(w io.Writer, r io.Reader) error {\n\tdec := xml.NewDecoder(r)\n\tvar pn protocolNumbers\n\tif err := dec.Decode(&pn); err != nil {\n\t\treturn err\n\t}\n\tprs := pn.escape()\n\tprs = append([]canonProtocolRecord{{\n\t\tName: \"IP\",\n\t\tDescr: \"IPv4 encapsulation, pseudo protocol number\",\n\t\tValue: 0,\n\t}}, prs...)\n\tfmt.Fprintf(w, \"\/\/ %s, Updated: %s\\n\", pn.Title, pn.Updated)\n\tfmt.Fprintf(w, \"const (\\n\")\n\tfor _, pr := range prs {\n\t\tif pr.Name == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(w, \"Protocol%s = %d\", pr.Name, pr.Value)\n\t\ts := pr.Descr\n\t\tif s == \"\" {\n\t\t\ts = pr.OrigName\n\t\t}\n\t\tfmt.Fprintf(w, \"\/\/ %s\\n\", s)\n\t}\n\tfmt.Fprintf(w, \")\\n\")\n\treturn nil\n}\n\ntype protocolNumbers struct {\n\tXMLName xml.Name `xml:\"registry\"`\n\tTitle string `xml:\"title\"`\n\tUpdated string `xml:\"updated\"`\n\tRegTitle string `xml:\"registry>title\"`\n\tNote string `xml:\"registry>note\"`\n\tRecords []struct {\n\t\tValue string `xml:\"value\"`\n\t\tName string `xml:\"name\"`\n\t\tDescr string `xml:\"description\"`\n\t} `xml:\"registry>record\"`\n}\n\ntype canonProtocolRecord struct {\n\tOrigName string\n\tName string\n\tDescr string\n\tValue int\n}\n\nfunc (pn *protocolNumbers) escape() []canonProtocolRecord {\n\tprs := make([]canonProtocolRecord, len(pn.Records))\n\tsr := strings.NewReplacer(\n\t\t\"-in-\", \"in\",\n\t\t\"-within-\", \"within\",\n\t\t\"-over-\", \"over\",\n\t\t\"+\", \"P\",\n\t\t\"-\", \"\",\n\t\t\"\/\", \"\",\n\t\t\".\", \"\",\n\t\t\" \", \"\",\n\t)\n\tfor i, pr := range pn.Records {\n\t\tprs[i].OrigName = pr.Name\n\t\ts := strings.TrimSpace(pr.Name)\n\t\tswitch pr.Name {\n\t\tcase \"ISIS over IPv4\":\n\t\t\tprs[i].Name = \"ISIS\"\n\t\tcase \"manet\":\n\t\t\tprs[i].Name = \"MANET\"\n\t\tdefault:\n\t\t\tprs[i].Name = sr.Replace(s)\n\t\t}\n\t\tss := strings.Split(pr.Descr, \"\\n\")\n\t\tfor i := range ss {\n\t\t\tss[i] = strings.TrimSpace(ss[i])\n\t\t}\n\t\tif len(ss) > 1 {\n\t\t\tprs[i].Descr = strings.Join(ss, \" \")\n\t\t} else {\n\t\t\tprs[i].Descr = ss[0]\n\t\t}\n\t\tprs[i].Value, _ = strconv.Atoi(pr.Value)\n\t}\n\treturn prs\n}\n<|endoftext|>"} {"text":"<commit_before>package inventory\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/aarbt\/bitcoin-network\"\n\t\"github.com\/aarbt\/bitcoin-wallet\/database\"\n\t\"github.com\/aarbt\/bitcoin-wallet\/messages\"\n\t\"github.com\/aarbt\/bitcoin-wallet\/utils\"\n)\n\ntype HeaderGetter struct {\n\tinv *Inventory\n\tinput chan network.Message\n\toutput chan *messages.ExtendedHeader\n\tdb *database.DB\n\n\tclosing bool\n\tcloseCh chan struct{}\n\tdoneCh chan struct{}\n}\n\nfunc GetHeaders(inv *Inventory, db *database.DB) *HeaderGetter {\n\tgetter := HeaderGetter{\n\t\tinv: inv,\n\t\tdb: db,\n\n\t\tcloseCh: make(chan struct{}),\n\t\tdoneCh: make(chan struct{}),\n\t}\n\tgetter.Run()\n\treturn &getter\n}\n\n\/\/ Close shuts down the header getter. It is guaranteed to not read or write to\n\/\/ any channels after Close returns.\nfunc (getter *HeaderGetter) Close() {\n\tgetter.closing = true\n\tclose(getter.closeCh)\n\t<-getter.doneCh\n}\n\nfunc (getter *HeaderGetter) Run() {\n\tgo func() {\n\t\tfor !getter.closing {\n\t\t\tincomplete := getter.requestMoreHeaders()\n\t\t\tif getter.closing {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !incomplete {\n\t\t\t\ttime.Sleep(10 * time.Second)\n\t\t\t}\n\t\t}\n\t\tclose(getter.doneCh)\n\t}()\n}\n\nfunc (getter *HeaderGetter) Input() chan<- network.Message {\n\t\/\/ TODO Find a better way to avoid dropped messages.\n\tgetter.input = make(chan network.Message, 10)\n\treturn getter.input\n}\n\nfunc (getter *HeaderGetter) loadHeader(hash []byte) *messages.ExtendedHeader {\n\theader := &messages.ExtendedHeader{}\n\terr := getter.db.Get(hash, header)\n\tif err != nil {\n\t\tlog.Printf(\"Getting header failed: %v\", err)\n\t\treturn nil\n\t}\n\treturn header\n}\n\nfunc (getter *HeaderGetter) setHighestKnownBlock(hash []byte, height uint) {\n\tgetter.db.Set(kHighestKnownBlock, databaseBlockHeight{\n\t\tHash: hash,\n\t\tHeight: height,\n\t})\n}\n\ntype databaseBlockHeight struct {\n\tHash []byte\n\tHeight uint\n}\n\nfunc (getter *HeaderGetter) highestKnownBlock() ([]byte, uint) {\n\t\/\/ TODO read from memory if no database is set.\n\tlast := databaseBlockHeight{}\n\terr := getter.db.Get(kHighestKnownBlock, &last)\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't retrieve highest known block: %v\", err)\n\t\treturn make([]byte, 32), 0\n\t}\n\treturn last.Hash, last.Height\n}\n\nfunc (getter *HeaderGetter) formGetHeaderMessage() network.Message {\n\thash, height := getter.highestKnownBlock()\n\tmsg := messages.GetBlocks{\n\t\tVersion: 70001,\n\t\tHashStop: make([]byte, 32),\n\t\tLocators: [][]byte{hash},\n\t}\n\tloc := hash\n\t\/\/ Add 20 recent known headers with larger and larger gaps between them\n\t\/\/ as they get less recent (up to a step of 40 between the two least\n\t\/\/ recent ones.)\n\tfor i := 0; i < 20 && loc != nil; i++ {\n\t\tfor j := 0; j < i*2; j++ {\n\t\t\theader := getter.loadHeader(loc)\n\t\t\tif header == nil {\n\t\t\t\tloc = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tloc = header.PrevBlock\n\t\t}\n\t\tif loc != nil {\n\t\t\tmsg.Locators = append(msg.Locators, loc)\n\t\t}\n\t}\n\tvar msgBytes bytes.Buffer\n\tmsg.Serialize(&msgBytes)\n\tlog.Printf(\"Getting more headers, starting from %x at height %d.\\n\",\n\t\tutils.ReverseBytes(hash), height)\n\treturn network.Message{\n\t\tType: \"getheaders\",\n\t\tData: msgBytes.Bytes(),\n\t}\n}\n\nfunc (getter *HeaderGetter) requestMoreHeaders() bool {\n\t\/\/ Wait for connection.\n\tselect {\n\tcase <-getter.inv.Connected():\n\t\t\/\/ do nothing.\n\tcase <-getter.closeCh:\n\t\treturn false\n\t}\n\n\tmsg := getter.formGetHeaderMessage()\n\tendpoint := getter.inv.Send(msg)\n\tif endpoint == \"\" {\n\t\tlog.Printf(\"No endpoint connected; wait for timeout and retry.\")\n\t}\n\n\tvar incomplete, received bool\n\t\/\/ TODO Increase timeout on large requests.\n\ttimer := time.NewTimer(45 * time.Second)\n\tfor !received {\n\t\tselect {\n\t\tcase m := <-getter.input:\n\t\t\tif m.Endpoint == endpoint {\n\t\t\t\tincomplete = getter.handleHeaders(m)\n\t\t\t\treceived = true\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Received unexpected message from %q \"+\n\t\t\t\t\t\"while waiting for message from %q.\",\n\t\t\t\t\tm.Endpoint, endpoint)\n\t\t\t}\n\t\tcase <-timer.C:\n\t\t\tif endpoint != \"\" {\n\t\t\t\tgetter.inv.reportMisbehaviour(endpoint, 5,\n\t\t\t\t\t\"getheaders request timed out.\")\n\t\t\t}\n\t\t\treturn true\n\t\tcase <-getter.closeCh:\n\t\t\ttimer.Stop()\n\t\t\treturn false\n\t\t}\n\t}\n\treturn incomplete\n}\n\nfunc (getter *HeaderGetter) setMessageHeight(m *messages.ExtendedHeader) (uint, error) {\n\tprev := getter.loadHeader(m.PrevBlock)\n\tif prev != nil {\n\t\tm.Height = prev.Height + 1\n\t\treturn m.Height, nil\n\t} else {\n\t\tblock0, _ := hex.DecodeString(\n\t\t\t\"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f\")\n\t\tif bytes.Equal(m.PrevBlock, utils.ReverseBytes(block0)) {\n\t\t\tm.Height = 1\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, fmt.Errorf(\"Couldn't find prev block.\")\n\t}\n}\n\nfunc (getter *HeaderGetter) handleHeaders(m network.Message) bool {\n\t_, oldHeight := getter.highestKnownBlock()\n\theaders, err := messages.ParseHeaders(m.Data)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to parse headers: %v\", err)\n\t\treturn true\n\t}\n\tlog.Printf(\"Received %d headers from %s.\\n\",\n\t\tlen(headers), m.Endpoint)\n\tvar hash []byte\n\tvar height uint\n\tvar incomplete bool\n\tvar vector messages.InventoryVector\n\tfor _, h := range headers {\n\t\theight, err = getter.setMessageHeight(h)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Received bad header %x from %s.\",\n\t\t\t\tutils.ReverseBytes(h.Hash()), m.Endpoint)\n\t\t\t\/\/ TODO handle this\n\t\t\tgetter.inv.reportMisbehaviour(m.Endpoint, 10,\n\t\t\t\t\"Sent bad block header.\")\n\t\t\tincomplete = true\n\t\t\tbreak\n\t\t}\n\t\thash = h.Hash()\n\t\tgetter.db.Set(hash, h)\n\n\t\t\/\/ Request merkleblocks for headers.\n\t\tvector = append(vector, &messages.Inventory{\n\t\t\tType: messages.TypeMsgFilteredBlock,\n\t\t\tHash: hash,\n\t\t})\n\t}\n\tif len(hash) != 0 && height > oldHeight {\n\t\tgetter.setHighestKnownBlock(hash, height)\n\t\tlog.Printf(\"Last header updated to %x (internal byte order), height is %d.\",\n\t\t\tutils.ReverseBytes(hash), height)\n\t\tif height < oldHeight+uint(len(vector)) {\n\t\t\t\/\/ TODO not all blocks are new, consider changing endpoint.\n\t\t}\n\t} else {\n\t\tgetter.inv.reportMisbehaviour(m.Endpoint, 10,\n\t\t\t\"No new headers in received message.\")\n\t\tincomplete = true\n\t}\n\tif len(headers) >= 2000 {\n\t\tincomplete = true\n\t} else {\n\t\t\/\/ TODO question if endpoint is good.\n\t}\n\tif len(vector) > 0 {\n\t\tdata, err := messages.EncodeInventoryVector(vector)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to encode getdata vector: %v\", err)\n\t\t\treturn true\n\t\t}\n\t\tgetter.inv.Send(network.Message{\n\t\t\tType: \"getdata\",\n\t\t\tData: data,\n\t\t})\n\t}\n\treturn incomplete\n}\n<commit_msg>Clean up shut-down process of headers module.<commit_after>package inventory\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/aarbt\/bitcoin-network\"\n\t\"github.com\/aarbt\/bitcoin-wallet\/database\"\n\t\"github.com\/aarbt\/bitcoin-wallet\/messages\"\n\t\"github.com\/aarbt\/bitcoin-wallet\/utils\"\n)\n\ntype HeaderGetter struct {\n\tinv *Inventory\n\tinput chan network.Message\n\toutput chan *messages.ExtendedHeader\n\tdb *database.DB\n\n\tcloseCh chan struct{}\n\tdoneCh chan struct{}\n}\n\nfunc GetHeaders(inv *Inventory, db *database.DB) *HeaderGetter {\n\tgetter := HeaderGetter{\n\t\tinv: inv,\n\t\tdb: db,\n\n\t\tcloseCh: make(chan struct{}),\n\t\tdoneCh: make(chan struct{}),\n\t}\n\tgetter.Run()\n\treturn &getter\n}\n\n\/\/ Close shuts down the header getter. It is guaranteed to not read or write to\n\/\/ any channels after Close returns.\nfunc (getter *HeaderGetter) Close() {\n\tclose(getter.closeCh)\n\t<-getter.doneCh\n}\n\nfunc (getter *HeaderGetter) Run() {\n\tgo func() {\n\t\tvar closing bool\n\t\tfor !closing {\n\t\t\ttimer := time.NewTimer(2 * time.Minute)\n\t\t\tincomplete := getter.requestMoreHeaders()\n\t\t\tif !incomplete {\n\t\t\t\t\/\/ We shouldn't need to poll for headers very\n\t\t\t\t\/\/ often as blocks are broadcast by miners.\n\t\t\t\tselect {\n\t\t\t\tcase <-timer.C:\n\t\t\t\t\t\/\/nothing\n\t\t\t\tcase <-getter.closeCh:\n\t\t\t\t\tclosing = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tclose(getter.doneCh)\n\t}()\n}\n\nfunc (getter *HeaderGetter) Input() chan<- network.Message {\n\t\/\/ TODO Find a better way to avoid dropped messages.\n\tgetter.input = make(chan network.Message, 10)\n\treturn getter.input\n}\n\nfunc (getter *HeaderGetter) loadHeader(hash []byte) (*messages.ExtendedHeader, error) {\n\theader := &messages.ExtendedHeader{}\n\terr := getter.db.Get(hash, header)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Loading header failed: %v\", err)\n\t}\n\treturn header, nil\n}\n\nfunc (getter *HeaderGetter) setHighestKnownBlock(hash []byte, height int) {\n\tgetter.db.Set(kHighestKnownBlock, databaseBlockHeight{\n\t\tHash: hash,\n\t\tHeight: height,\n\t})\n}\n\ntype databaseBlockHeight struct {\n\tHash []byte\n\tHeight int\n}\n\nfunc (getter *HeaderGetter) highestKnownBlock() ([]byte, int) {\n\t\/\/ TODO read from memory if no database is set.\n\tlast := databaseBlockHeight{}\n\terr := getter.db.Get(kHighestKnownBlock, &last)\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't retrieve highest known block: %v\", err)\n\t\treturn make([]byte, 32), 0\n\t}\n\treturn last.Hash, last.Height\n}\n\nfunc (getter *HeaderGetter) formGetHeaderMessage() network.Message {\n\thash, height := getter.highestKnownBlock()\n\tmsg := messages.GetBlocks{\n\t\tVersion: 70001,\n\t\tHashStop: make([]byte, 32),\n\t\tLocators: [][]byte{hash},\n\t}\n\tloc := hash\n\t\/\/ Add 20 recent known headers with larger and larger gaps between them\n\t\/\/ as they get less recent (up to a step of 40 between the two least\n\t\/\/ recent ones.)\n\tfor i := 0; i < 20 && loc != nil; i++ {\n\t\tfor j := 0; j < i*2; j++ {\n\t\t\theader, err := getter.loadHeader(loc)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Failed to get recent header, %d steps back: %v\",\n\t\t\t\t\ti, err)\n\t\t\t\tloc = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tloc = header.PrevBlock\n\t\t}\n\t\tif loc != nil {\n\t\t\tmsg.Locators = append(msg.Locators, loc)\n\t\t}\n\t}\n\tvar msgBytes bytes.Buffer\n\tmsg.Serialize(&msgBytes)\n\tlog.Printf(\"Getting more headers, starting from %x at height %d.\\n\",\n\t\tutils.ReverseBytes(hash), height)\n\treturn network.Message{\n\t\tType: \"getheaders\",\n\t\tData: msgBytes.Bytes(),\n\t}\n}\n\nfunc (getter *HeaderGetter) requestMoreHeaders() bool {\n\t\/\/ Wait for connection.\n\tselect {\n\tcase <-getter.inv.Connected():\n\t\t\/\/ do nothing.\n\tcase <-getter.closeCh:\n\t\treturn false\n\t}\n\n\tmsg := getter.formGetHeaderMessage()\n\tendpoint := getter.inv.Send(msg)\n\tif endpoint == \"\" {\n\t\tlog.Printf(\"No endpoint connected; wait for timeout and retry.\")\n\t}\n\n\tvar incomplete, received bool\n\t\/\/ TODO Increase timeout on large requests.\n\ttimer := time.NewTimer(45 * time.Second)\n\tfor !received {\n\t\tselect {\n\t\tcase m := <-getter.input:\n\t\t\tif m.Endpoint == endpoint {\n\t\t\t\tincomplete = getter.handleHeaders(m)\n\t\t\t\treceived = true\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Received unexpected message from %q \"+\n\t\t\t\t\t\"while waiting for message from %q.\",\n\t\t\t\t\tm.Endpoint, endpoint)\n\t\t\t}\n\t\tcase <-timer.C:\n\t\t\tif endpoint != \"\" {\n\t\t\t\tgetter.inv.reportMisbehaviour(endpoint, 5,\n\t\t\t\t\t\"getheaders request timed out.\")\n\t\t\t}\n\t\t\treturn true\n\t\tcase <-getter.closeCh:\n\t\t\ttimer.Stop()\n\t\t\treturn false\n\t\t}\n\t}\n\treturn incomplete\n}\n\nfunc (getter *HeaderGetter) getBlockHeight(hash []byte) (int, error) {\n\tprev, err := getter.loadHeader(hash)\n\tif err != nil {\n\t\tblock0, _ := hex.DecodeString(\n\t\t\t\"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f\")\n\t\tif bytes.Equal(hash, utils.ReverseBytes(block0)) {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn -1, fmt.Errorf(\"Couldn't find prev block.\")\n\t}\n\treturn int(prev.Height), nil\n}\n\nfunc (getter *HeaderGetter) handleHeaders(m network.Message) bool {\n\t_, oldHeight := getter.highestKnownBlock()\n\theaders, err := messages.ParseHeaders(m.Data)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to parse headers: %v\", err)\n\t\treturn true\n\t}\n\tlog.Printf(\"Received %d headers from %s.\\n\",\n\t\tlen(headers), m.Endpoint)\n\tvar hash []byte\n\tvar height int\n\tvar incomplete bool\n\tvar vector messages.InventoryVector\n\tfor _, h := range headers {\n\t\tprevHeight, err := getter.getBlockHeight(h.PrevBlock)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Received bad header %x from %s.\",\n\t\t\t\tutils.ReverseBytes(h.Hash()), m.Endpoint)\n\t\t\t\/\/ TODO handle this\n\t\t\tgetter.inv.reportMisbehaviour(m.Endpoint, 20,\n\t\t\t\t\"Sent bad block header.\")\n\t\t\tincomplete = true\n\t\t\tbreak\n\t\t}\n\t\theight = prevHeight + 1\n\t\th.Height = height\n\t\thash = h.Hash()\n\t\tgetter.db.Set(hash, h)\n\n\t\t\/\/ Request merkleblocks for headers.\n\t\tvector = append(vector, &messages.Inventory{\n\t\t\tType: messages.TypeMsgFilteredBlock,\n\t\t\tHash: hash,\n\t\t})\n\t}\n\tif len(hash) != 0 && height > oldHeight {\n\t\tgetter.setHighestKnownBlock(hash, height)\n\t\tlog.Printf(\"Last header updated to %x (internal byte order), height is %d.\",\n\t\t\tutils.ReverseBytes(hash), height)\n\t\tif height < oldHeight+len(vector) {\n\t\t\t\/\/ TODO not all blocks are new, consider changing endpoint.\n\t\t}\n\t} else {\n\t\t\/\/ TODO If we haven't caught up yet, this means the endpoint is\n\t\t\/\/ misbehaving.\n\t}\n\tif len(headers) >= 2000 {\n\t\tincomplete = true\n\t} else {\n\t\t\/\/ TODO question if endpoint is good.\n\t}\n\terr = getter.inv.SendGetData(vector, m.Endpoint)\n\tif err != nil {\n\t\treturn true\n\t}\n\treturn incomplete\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Daniel Oaks <daniel@danieloaks.net>\n\/\/ released under the MIT license\n\npackage logger\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"strings\"\n\n\t\"sync\"\n\t\"sync\/atomic\"\n)\n\n\/\/ Level represents the level to log messages at.\ntype Level int\n\nconst (\n\t\/\/ LogDebug represents debug messages.\n\tLogDebug Level = iota\n\t\/\/ LogInfo represents informational messages.\n\tLogInfo\n\t\/\/ LogWarning represents warnings.\n\tLogWarning\n\t\/\/ LogError represents errors.\n\tLogError\n)\n\nvar (\n\t\/\/ LogLevelNames takes a config name and gives the real log level.\n\tLogLevelNames = map[string]Level{\n\t\t\"debug\": LogDebug,\n\t\t\"info\": LogInfo,\n\t\t\"warn\": LogWarning,\n\t\t\"warning\": LogWarning,\n\t\t\"warnings\": LogWarning,\n\t\t\"error\": LogError,\n\t\t\"errors\": LogError,\n\t}\n\t\/\/ LogLevelDisplayNames gives the display name to use for our log levels.\n\tLogLevelDisplayNames = map[Level]string{\n\t\tLogDebug: \"debug\",\n\t\tLogInfo: \"info\",\n\t\tLogWarning: \"warning\",\n\t\tLogError: \"error\",\n\t}\n)\n\n\/\/ Manager is the main interface used to log debug\/info\/error messages.\ntype Manager struct {\n\tconfigMutex sync.RWMutex\n\tloggers []singleLogger\n\tstdoutWriteLock sync.Mutex \/\/ use one lock for both stdout and stderr\n\tfileWriteLock sync.Mutex\n\tloggingRawIO uint32\n}\n\n\/\/ LoggingConfig represents the configuration of a single logger.\ntype LoggingConfig struct {\n\tMethod string\n\tMethodStdout bool\n\tMethodStderr bool\n\tMethodFile bool\n\tFilename string\n\tTypeString string `yaml:\"type\"`\n\tTypes []string `yaml:\"real-types\"`\n\tExcludedTypes []string `yaml:\"real-excluded-types\"`\n\tLevelString string `yaml:\"level\"`\n\tLevel Level `yaml:\"level-real\"`\n}\n\n\/\/ NewManager returns a new log manager.\nfunc NewManager(config []LoggingConfig) (*Manager, error) {\n\tvar logger Manager\n\n\tif err := logger.ApplyConfig(config); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &logger, nil\n}\n\n\/\/ ApplyConfig applies the given config to this logger (rehashes the config, in other words).\nfunc (logger *Manager) ApplyConfig(config []LoggingConfig) error {\n\tlogger.configMutex.Lock()\n\tdefer logger.configMutex.Unlock()\n\n\tfor _, logger := range logger.loggers {\n\t\tlogger.Close()\n\t}\n\n\tlogger.loggers = nil\n\tatomic.StoreUint32(&logger.loggingRawIO, 0)\n\n\t\/\/ for safety, this deep-copies all mutable data in `config`\n\t\/\/ XXX let's keep it that way\n\tvar lastErr error\n\tfor _, logConfig := range config {\n\t\ttypeMap := make(map[string]bool)\n\t\tfor _, name := range logConfig.Types {\n\t\t\ttypeMap[name] = true\n\t\t}\n\t\texcludedTypeMap := make(map[string]bool)\n\t\tfor _, name := range logConfig.ExcludedTypes {\n\t\t\texcludedTypeMap[name] = true\n\t\t}\n\n\t\tsLogger := singleLogger{\n\t\t\tMethodSTDOUT: logConfig.MethodStdout,\n\t\t\tMethodSTDERR: logConfig.MethodStderr,\n\t\t\tMethodFile: fileMethod{\n\t\t\t\tEnabled: logConfig.MethodFile,\n\t\t\t\tFilename: logConfig.Filename,\n\t\t\t},\n\t\t\tLevel: logConfig.Level,\n\t\t\tTypes: typeMap,\n\t\t\tExcludedTypes: excludedTypeMap,\n\t\t\tstdoutWriteLock: &logger.stdoutWriteLock,\n\t\t\tfileWriteLock: &logger.fileWriteLock,\n\t\t}\n\t\tioEnabled := typeMap[\"userinput\"] || typeMap[\"useroutput\"] || (typeMap[\"*\"] && !(excludedTypeMap[\"userinput\"] && excludedTypeMap[\"useroutput\"]))\n\t\t\/\/ raw I\/O is only logged at level debug;\n\t\tif ioEnabled && logConfig.Level == LogDebug {\n\t\t\tatomic.StoreUint32(&logger.loggingRawIO, 1)\n\t\t}\n\t\tif sLogger.MethodFile.Enabled {\n\t\t\tfile, err := os.OpenFile(sLogger.MethodFile.Filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666)\n\t\t\tif err != nil {\n\t\t\t\tlastErr = fmt.Errorf(\"Could not open log file %s [%s]\", sLogger.MethodFile.Filename, err.Error())\n\t\t\t}\n\t\t\twriter := bufio.NewWriter(file)\n\t\t\tsLogger.MethodFile.File = file\n\t\t\tsLogger.MethodFile.Writer = writer\n\t\t}\n\t\tlogger.loggers = append(logger.loggers, sLogger)\n\t}\n\n\treturn lastErr\n}\n\n\/\/ IsLoggingRawIO returns true if raw user input and output is being logged.\nfunc (logger *Manager) IsLoggingRawIO() bool {\n\treturn atomic.LoadUint32(&logger.loggingRawIO) == 1\n}\n\n\/\/ Log logs the given message with the given details.\nfunc (logger *Manager) Log(level Level, logType string, messageParts ...string) {\n\tlogger.configMutex.RLock()\n\tdefer logger.configMutex.RUnlock()\n\n\tfor _, singleLogger := range logger.loggers {\n\t\tsingleLogger.Log(level, logType, messageParts...)\n\t}\n}\n\n\/\/ Debug logs the given message as a debug message.\nfunc (logger *Manager) Debug(logType string, messageParts ...string) {\n\tlogger.Log(LogDebug, logType, messageParts...)\n}\n\n\/\/ Info logs the given message as an info message.\nfunc (logger *Manager) Info(logType string, messageParts ...string) {\n\tlogger.Log(LogInfo, logType, messageParts...)\n}\n\n\/\/ Warning logs the given message as a warning message.\nfunc (logger *Manager) Warning(logType string, messageParts ...string) {\n\tlogger.Log(LogWarning, logType, messageParts...)\n}\n\n\/\/ Error logs the given message as an error message.\nfunc (logger *Manager) Error(logType string, messageParts ...string) {\n\tlogger.Log(LogError, logType, messageParts...)\n}\n\ntype fileMethod struct {\n\tEnabled bool\n\tFilename string\n\tFile *os.File\n\tWriter *bufio.Writer\n}\n\n\/\/ singleLogger represents a single logger instance.\ntype singleLogger struct {\n\tstdoutWriteLock *sync.Mutex\n\tfileWriteLock *sync.Mutex\n\tMethodSTDOUT bool\n\tMethodSTDERR bool\n\tMethodFile fileMethod\n\tLevel Level\n\tTypes map[string]bool\n\tExcludedTypes map[string]bool\n}\n\nfunc (logger *singleLogger) Close() error {\n\tif logger.MethodFile.Enabled {\n\t\tflushErr := logger.MethodFile.Writer.Flush()\n\t\tcloseErr := logger.MethodFile.File.Close()\n\t\tif flushErr != nil {\n\t\t\treturn flushErr\n\t\t}\n\t\treturn closeErr\n\t}\n\treturn nil\n}\n\n\/\/ Log logs the given message with the given details.\nfunc (logger *singleLogger) Log(level Level, logType string, messageParts ...string) {\n\t\/\/ no logging enabled\n\tif !(logger.MethodSTDOUT || logger.MethodSTDERR || logger.MethodFile.Enabled) {\n\t\treturn\n\t}\n\n\t\/\/ ensure we're logging to the given level\n\tif level < logger.Level {\n\t\treturn\n\t}\n\n\t\/\/ ensure we're capturing this logType\n\tlogTypeCleaned := strings.ToLower(strings.TrimSpace(logType))\n\tcapturing := (logger.Types[\"*\"] || logger.Types[logTypeCleaned]) && !logger.ExcludedTypes[\"*\"] && !logger.ExcludedTypes[logTypeCleaned]\n\tif !capturing {\n\t\treturn\n\t}\n\n\t\/\/ assemble full line\n\n\tvar rawBuf bytes.Buffer\n\tfmt.Fprintf(&rawBuf, \"%s : %s : %s : \", time.Now().UTC().Format(\"2006-01-02T15:04:05Z\"), LogLevelDisplayNames[level], logType)\n\tfor i, p := range messageParts {\n\t\trawBuf.WriteString(p)\n\n\t\tif i != len(messageParts)-1 {\n\t\t\trawBuf.WriteString(\" : \")\n\t\t}\n\t}\n\trawBuf.WriteRune('\\n')\n\n\t\/\/ output\n\tif logger.MethodSTDOUT {\n\t\tlogger.stdoutWriteLock.Lock()\n\t\tos.Stdout.Write(rawBuf.Bytes())\n\t\tlogger.stdoutWriteLock.Unlock()\n\t}\n\tif logger.MethodSTDERR {\n\t\tlogger.stdoutWriteLock.Lock()\n\t\tos.Stderr.Write(rawBuf.Bytes())\n\t\tlogger.stdoutWriteLock.Unlock()\n\t}\n\tif logger.MethodFile.Enabled {\n\t\tlogger.fileWriteLock.Lock()\n\t\tlogger.MethodFile.Writer.Write(rawBuf.Bytes())\n\t\tlogger.MethodFile.Writer.Flush()\n\t\tlogger.fileWriteLock.Unlock()\n\t}\n}\n<commit_msg>fix timestamp formatting<commit_after>\/\/ Copyright (c) 2017 Daniel Oaks <daniel@danieloaks.net>\n\/\/ released under the MIT license\n\npackage logger\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"strings\"\n\n\t\"sync\"\n\t\"sync\/atomic\"\n)\n\n\/\/ Level represents the level to log messages at.\ntype Level int\n\nconst (\n\t\/\/ LogDebug represents debug messages.\n\tLogDebug Level = iota\n\t\/\/ LogInfo represents informational messages.\n\tLogInfo\n\t\/\/ LogWarning represents warnings.\n\tLogWarning\n\t\/\/ LogError represents errors.\n\tLogError\n)\n\nvar (\n\t\/\/ LogLevelNames takes a config name and gives the real log level.\n\tLogLevelNames = map[string]Level{\n\t\t\"debug\": LogDebug,\n\t\t\"info\": LogInfo,\n\t\t\"warn\": LogWarning,\n\t\t\"warning\": LogWarning,\n\t\t\"warnings\": LogWarning,\n\t\t\"error\": LogError,\n\t\t\"errors\": LogError,\n\t}\n\t\/\/ LogLevelDisplayNames gives the display name to use for our log levels.\n\tLogLevelDisplayNames = map[Level]string{\n\t\tLogDebug: \"debug\",\n\t\tLogInfo: \"info\",\n\t\tLogWarning: \"warning\",\n\t\tLogError: \"error\",\n\t}\n)\n\n\/\/ Manager is the main interface used to log debug\/info\/error messages.\ntype Manager struct {\n\tconfigMutex sync.RWMutex\n\tloggers []singleLogger\n\tstdoutWriteLock sync.Mutex \/\/ use one lock for both stdout and stderr\n\tfileWriteLock sync.Mutex\n\tloggingRawIO uint32\n}\n\n\/\/ LoggingConfig represents the configuration of a single logger.\ntype LoggingConfig struct {\n\tMethod string\n\tMethodStdout bool\n\tMethodStderr bool\n\tMethodFile bool\n\tFilename string\n\tTypeString string `yaml:\"type\"`\n\tTypes []string `yaml:\"real-types\"`\n\tExcludedTypes []string `yaml:\"real-excluded-types\"`\n\tLevelString string `yaml:\"level\"`\n\tLevel Level `yaml:\"level-real\"`\n}\n\n\/\/ NewManager returns a new log manager.\nfunc NewManager(config []LoggingConfig) (*Manager, error) {\n\tvar logger Manager\n\n\tif err := logger.ApplyConfig(config); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &logger, nil\n}\n\n\/\/ ApplyConfig applies the given config to this logger (rehashes the config, in other words).\nfunc (logger *Manager) ApplyConfig(config []LoggingConfig) error {\n\tlogger.configMutex.Lock()\n\tdefer logger.configMutex.Unlock()\n\n\tfor _, logger := range logger.loggers {\n\t\tlogger.Close()\n\t}\n\n\tlogger.loggers = nil\n\tatomic.StoreUint32(&logger.loggingRawIO, 0)\n\n\t\/\/ for safety, this deep-copies all mutable data in `config`\n\t\/\/ XXX let's keep it that way\n\tvar lastErr error\n\tfor _, logConfig := range config {\n\t\ttypeMap := make(map[string]bool)\n\t\tfor _, name := range logConfig.Types {\n\t\t\ttypeMap[name] = true\n\t\t}\n\t\texcludedTypeMap := make(map[string]bool)\n\t\tfor _, name := range logConfig.ExcludedTypes {\n\t\t\texcludedTypeMap[name] = true\n\t\t}\n\n\t\tsLogger := singleLogger{\n\t\t\tMethodSTDOUT: logConfig.MethodStdout,\n\t\t\tMethodSTDERR: logConfig.MethodStderr,\n\t\t\tMethodFile: fileMethod{\n\t\t\t\tEnabled: logConfig.MethodFile,\n\t\t\t\tFilename: logConfig.Filename,\n\t\t\t},\n\t\t\tLevel: logConfig.Level,\n\t\t\tTypes: typeMap,\n\t\t\tExcludedTypes: excludedTypeMap,\n\t\t\tstdoutWriteLock: &logger.stdoutWriteLock,\n\t\t\tfileWriteLock: &logger.fileWriteLock,\n\t\t}\n\t\tioEnabled := typeMap[\"userinput\"] || typeMap[\"useroutput\"] || (typeMap[\"*\"] && !(excludedTypeMap[\"userinput\"] && excludedTypeMap[\"useroutput\"]))\n\t\t\/\/ raw I\/O is only logged at level debug;\n\t\tif ioEnabled && logConfig.Level == LogDebug {\n\t\t\tatomic.StoreUint32(&logger.loggingRawIO, 1)\n\t\t}\n\t\tif sLogger.MethodFile.Enabled {\n\t\t\tfile, err := os.OpenFile(sLogger.MethodFile.Filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666)\n\t\t\tif err != nil {\n\t\t\t\tlastErr = fmt.Errorf(\"Could not open log file %s [%s]\", sLogger.MethodFile.Filename, err.Error())\n\t\t\t}\n\t\t\twriter := bufio.NewWriter(file)\n\t\t\tsLogger.MethodFile.File = file\n\t\t\tsLogger.MethodFile.Writer = writer\n\t\t}\n\t\tlogger.loggers = append(logger.loggers, sLogger)\n\t}\n\n\treturn lastErr\n}\n\n\/\/ IsLoggingRawIO returns true if raw user input and output is being logged.\nfunc (logger *Manager) IsLoggingRawIO() bool {\n\treturn atomic.LoadUint32(&logger.loggingRawIO) == 1\n}\n\n\/\/ Log logs the given message with the given details.\nfunc (logger *Manager) Log(level Level, logType string, messageParts ...string) {\n\tlogger.configMutex.RLock()\n\tdefer logger.configMutex.RUnlock()\n\n\tfor _, singleLogger := range logger.loggers {\n\t\tsingleLogger.Log(level, logType, messageParts...)\n\t}\n}\n\n\/\/ Debug logs the given message as a debug message.\nfunc (logger *Manager) Debug(logType string, messageParts ...string) {\n\tlogger.Log(LogDebug, logType, messageParts...)\n}\n\n\/\/ Info logs the given message as an info message.\nfunc (logger *Manager) Info(logType string, messageParts ...string) {\n\tlogger.Log(LogInfo, logType, messageParts...)\n}\n\n\/\/ Warning logs the given message as a warning message.\nfunc (logger *Manager) Warning(logType string, messageParts ...string) {\n\tlogger.Log(LogWarning, logType, messageParts...)\n}\n\n\/\/ Error logs the given message as an error message.\nfunc (logger *Manager) Error(logType string, messageParts ...string) {\n\tlogger.Log(LogError, logType, messageParts...)\n}\n\ntype fileMethod struct {\n\tEnabled bool\n\tFilename string\n\tFile *os.File\n\tWriter *bufio.Writer\n}\n\n\/\/ singleLogger represents a single logger instance.\ntype singleLogger struct {\n\tstdoutWriteLock *sync.Mutex\n\tfileWriteLock *sync.Mutex\n\tMethodSTDOUT bool\n\tMethodSTDERR bool\n\tMethodFile fileMethod\n\tLevel Level\n\tTypes map[string]bool\n\tExcludedTypes map[string]bool\n}\n\nfunc (logger *singleLogger) Close() error {\n\tif logger.MethodFile.Enabled {\n\t\tflushErr := logger.MethodFile.Writer.Flush()\n\t\tcloseErr := logger.MethodFile.File.Close()\n\t\tif flushErr != nil {\n\t\t\treturn flushErr\n\t\t}\n\t\treturn closeErr\n\t}\n\treturn nil\n}\n\n\/\/ Log logs the given message with the given details.\nfunc (logger *singleLogger) Log(level Level, logType string, messageParts ...string) {\n\t\/\/ no logging enabled\n\tif !(logger.MethodSTDOUT || logger.MethodSTDERR || logger.MethodFile.Enabled) {\n\t\treturn\n\t}\n\n\t\/\/ ensure we're logging to the given level\n\tif level < logger.Level {\n\t\treturn\n\t}\n\n\t\/\/ ensure we're capturing this logType\n\tlogTypeCleaned := strings.ToLower(strings.TrimSpace(logType))\n\tcapturing := (logger.Types[\"*\"] || logger.Types[logTypeCleaned]) && !logger.ExcludedTypes[\"*\"] && !logger.ExcludedTypes[logTypeCleaned]\n\tif !capturing {\n\t\treturn\n\t}\n\n\t\/\/ assemble full line\n\n\tvar rawBuf bytes.Buffer\n\tfmt.Fprintf(&rawBuf, \"%s : %s : %s : \", time.Now().UTC().Format(\"2006-01-02T15:04:05.000Z\"), LogLevelDisplayNames[level], logType)\n\tfor i, p := range messageParts {\n\t\trawBuf.WriteString(p)\n\n\t\tif i != len(messageParts)-1 {\n\t\t\trawBuf.WriteString(\" : \")\n\t\t}\n\t}\n\trawBuf.WriteRune('\\n')\n\n\t\/\/ output\n\tif logger.MethodSTDOUT {\n\t\tlogger.stdoutWriteLock.Lock()\n\t\tos.Stdout.Write(rawBuf.Bytes())\n\t\tlogger.stdoutWriteLock.Unlock()\n\t}\n\tif logger.MethodSTDERR {\n\t\tlogger.stdoutWriteLock.Lock()\n\t\tos.Stderr.Write(rawBuf.Bytes())\n\t\tlogger.stdoutWriteLock.Unlock()\n\t}\n\tif logger.MethodFile.Enabled {\n\t\tlogger.fileWriteLock.Lock()\n\t\tlogger.MethodFile.Writer.Write(rawBuf.Bytes())\n\t\tlogger.MethodFile.Writer.Flush()\n\t\tlogger.fileWriteLock.Unlock()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package mapgen\n\nimport (\n\t\"fmt\"\n\n\t\"buildblast\/lib\/coords\"\n)\n\ntype Chunk [][][]Block\n\nfunc generateChunk(bg blockGenerator, cc coords.Chunk) (Chunk, []coords.World) {\n\tcw := coords.ChunkWidth\n\tch := coords.ChunkHeight\n\tcd := coords.ChunkDepth\n\n\tspawns := make([]coords.World, 0)\n\tblocks := make([][][]Block, cw)\n\tfor ox := 0; ox < cw; ox++ {\n\t\tblocks[ox] = make([][]Block, ch)\n\t\tfor oy := 0; oy < ch; oy++ {\n\t\t\tblocks[ox][oy] = make([]Block, cd)\n\t\t\tfor oz := 0; oz < cd; oz++ {\n\t\t\t\tbc := coords.Block{\n\t\t\t\t\tX: ox + cc.X*cw,\n\t\t\t\t\tY: oy + cc.Y*ch,\n\t\t\t\t\tZ: oz + cc.Z*cd,\n\t\t\t\t}\n\t\t\t\tblock, isSpawn := bg.Block(bc)\n\t\t\t\tblocks[ox][oy][oz] = block\n\t\t\t\tif isSpawn {\n\t\t\t\t\tspawns = append(spawns, bc.Center())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn blocks, spawns\n}\n\nfunc (c Chunk) Block(oc coords.Offset) Block {\n\treturn c[oc.X][oc.Y][oc.Z]\n}\n\nfunc (c Chunk) SetBlock(oc coords.Offset, newBlock Block) {\n\tc[oc.X][oc.Y][oc.Z] = newBlock\n}\n\n\/\/ Flatten returns the chunk data as a string. It\n\/\/ can be used for various forms of serialization\n\/\/ where a valid UTF-8 string is required, and\n\/\/ efficiency (in terms of size) is not hugely\n\/\/ important. We use this format for chunk data\n\/\/ in JSON documents because:\n\/\/ a) It's smaller, at least half the size, of a\n\/\/ normal JSON array, potentially more.\n\/\/ (depending on the contents)\n\/\/ b) It's much faster - the go JSON implementation\n\/\/ isn't particulilly fast at serializing large\n\/\/ arrays of numbers.\nfunc (c Chunk) Flatten() string {\n\tcw := coords.ChunkWidth\n\tch := coords.ChunkHeight\n\tcd := coords.ChunkDepth\n\tdata := make([]byte, cw*ch*cd)\n\tfor ox := 0; ox < cw; ox++ {\n\t\tfor oy := 0; oy < ch; oy++ {\n\t\t\tfor oz := 0; oz < cd; oz++ {\n\t\t\t\t\/\/ 32: Space charater. Control charaters\n\t\t\t\t\/\/ are not allowed in JSON strings.\n\t\t\t\tvalue := byte(c[ox][oy][oz] + 32)\n\t\t\t\tif value >= 127 || value < 32 {\n\t\t\t\t\tpanic(fmt.Sprintf(\"Attempted to encode out of range value of '%d' to chunk data. (It might work but we need to test it)\", value))\n\t\t\t\t}\n\t\t\t\tdata[ox*cw*ch+oy*cw+oz] = value\n\t\t\t}\n\t\t}\n\t}\n\treturn string(data)\n}\n\nfunc (c Chunk) Clone() Chunk {\n\tcw := coords.ChunkWidth\n\tch := coords.ChunkHeight\n\tcd := coords.ChunkDepth\n\n\tblocks := make([][][]Block, cw)\n\tfor ox := 0; ox < cw; ox++ {\n\t\tblocks[ox] = make([][]Block, ch)\n\t\tfor oy := 0; oy < ch; oy++ {\n\t\t\tblocks[ox][oy] = make([]Block, cd)\n\t\t\tfor oz := 0; oz < cd; oz++ {\n\t\t\t\tblocks[ox][oy][oz] = c[ox][oy][oz]\n\t\t\t}\n\t\t}\n\t}\n\treturn blocks\n}\n<commit_msg>Sublime is stupid and not making unsaved files evident.<commit_after>package mapgen\n\nimport (\n\t\"fmt\"\n\n\t\"buildblast\/lib\/coords\"\n)\n\ntype Chunk [][][]Block\n\nfunc generateChunk(bg blockGenerator, cc coords.Chunk) (Chunk, []coords.World) {\n\tcw := coords.ChunkWidth\n\tch := coords.ChunkHeight\n\tcd := coords.ChunkDepth\n\n\tspawns := make([]coords.World, 0)\n\tblocks := make([][][]Block, cw)\n\tfor ocX := 0; ocX < cw; ocX++ {\n\t\tblocks[ocX] = make([][]Block, ch)\n\t\tfor ocY := 0; ocY < ch; ocY++ {\n\t\t\tblocks[ocX][ocY] = make([]Block, cd)\n\t\t\tfor ocZ := 0; ocZ < cd; ocZ++ {\n\t\t\t\tbc := coords.Block{\n\t\t\t\t\tX: ocX + cc.X*cw,\n\t\t\t\t\tY: ocY + cc.Y*ch,\n\t\t\t\t\tZ: ocZ + cc.Z*cd,\n\t\t\t\t}\n\t\t\t\tblock, isSpawn := bg.Block(bc)\n\t\t\t\tblocks[ocX][ocY][ocZ] = block\n\t\t\t\tif isSpawn {\n\t\t\t\t\tspawns = append(spawns, bc.Center())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn blocks, spawns\n}\n\nfunc (c Chunk) Block(oc coords.Offset) Block {\n\treturn c[oc.X][oc.Y][oc.Z]\n}\n\nfunc (c Chunk) SetBlock(oc coords.Offset, newBlock Block) {\n\tc[oc.X][oc.Y][oc.Z] = newBlock\n}\n\n\/\/ Flatten returns the chunk data as a string. It\n\/\/ can be used for various forms of serialization\n\/\/ where a valid UTF-8 string is required, and\n\/\/ efficiency (in terms of size) is not hugely\n\/\/ important. We use this format for chunk data\n\/\/ in JSON documents because:\n\/\/ a) It's smaller, at least half the size, of a\n\/\/ normal JSON array, potentially more.\n\/\/ (depending on the contents)\n\/\/ b) It's much faster - the go JSON implementation\n\/\/ isn't particulilly fast at serializing large\n\/\/ arrays of numbers.\nfunc (c Chunk) Flatten() string {\n\tcw := coords.ChunkWidth\n\tch := coords.ChunkHeight\n\tcd := coords.ChunkDepth\n\tdata := make([]byte, cw*ch*cd)\n\tfor ocX := 0; ocX < cw; ocX++ {\n\t\tfor ocY := 0; ocY < ch; ocY++ {\n\t\t\tfor ocZ := 0; ocZ < cd; ocZ++ {\n\t\t\t\t\/\/ 32: Space charater. Control charaters\n\t\t\t\t\/\/ are not allowed in JSON strings.\n\t\t\t\tvalue := byte(c[ocX][ocY][ocZ] + 32)\n\t\t\t\tif value >= 127 || value < 32 {\n\t\t\t\t\tpanic(fmt.Sprintf(\"Attempted to encode out of range value of '%d' to chunk data. (It might work but we need to test it)\", value))\n\t\t\t\t}\n\t\t\t\tdata[ocX*cw*ch+ocY*cw+ocZ] = value\n\t\t\t}\n\t\t}\n\t}\n\treturn string(data)\n}\n\nfunc (c Chunk) Clone() Chunk {\n\tcw := coords.ChunkWidth\n\tch := coords.ChunkHeight\n\tcd := coords.ChunkDepth\n\n\tblocks := make([][][]Block, cw)\n\tfor ocX := 0; ocX < cw; ocX++ {\n\t\tblocks[ocX] = make([][]Block, ch)\n\t\tfor ocY := 0; ocY < ch; ocY++ {\n\t\t\tblocks[ocX][ocY] = make([]Block, cd)\n\t\t\tfor ocZ := 0; ocZ < cd; ocZ++ {\n\t\t\t\tblocks[ocX][ocY][ocZ] = c[ocX][ocY][ocZ]\n\t\t\t}\n\t\t}\n\t}\n\treturn blocks\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"fmt\"\n \"math\/rand\"\n \"time\"\n \"strconv\"\n \"runtime\"\n \"sync\"\n \"log\"\n\n \"github.com\/fatih\/color\"\n)\nvar wg sync.WaitGroup\n\nvar re *color.Color = color.New(color.FgRed)\nvar red *color.Color = re.Add(color.BgBlack)\nvar boldRed *color.Color = red.Add(color.Bold)\n\nfunc main() {\n\n runtime.GOMAXPROCS(2)\n wg.Add(1)\n\n var mode, numTables, total, tries int = 0, 0, 0, 0\n var avg float32 = 0\n red.Println(\"\\n\\tGame modes:\\n1: Pick a random table\\n2: Find the avg tries it takes to solve a puzzle\\n\\nPlease choose 1 or 2: \")\n _, err := fmt.Scanf(\"%d\", &mode)\n if err != nil {\n log.Fatal(err)\n }\n\n if mode == 1 {\n picker()\n } else if mode == 2 {\n red.Print(\"How many tables would you like to calculate for: \")\n _, err2 := fmt.Scanf(\"%d\", &numTables)\n if err2 != nil {\n log.Fatal(err2)\n }\n red.Print(\"How many times would you like to run the experiment: \")\n _, err3 := fmt.Scanf(\"%d\", &tries)\n if err3 != nil {\n log.Fatal(err3)\n }\n fmt.Println(\"\\n\")\n go calc()\n go printAvg(numTables, total, avg, tries)\n wg.Wait()\n } else {\n red.Println(\"Goodbye!\")\n }\n}\n\nfunc printAvg(numTables int, total int, avg float32, tries int) {\n defer wg.Done()\n for i := 1; i <= tries; i++ {\n total += avgFinder(numTables)\n }\n \/\/a := color.New(color.FgRed, color.Bold)\n avg = float32(total)\/float32(tries)\n boldRed.Printf(\"\\rThe avg tries it took to solve %v tables was %v \\n\\n\", numTables, avg)\n}\n\nfunc calc() {\n for {\n for i := \"Calculating\";; i += \".\" {\n red.Printf(\"\\r%s\", i)\n time.Sleep(500 * time.Millisecond)\n }\n }\n}\n\nfunc printWelcome() {\n welcome := []string{\"\\nWelcome to Colin's Random Table Picker\", \"Tables are eliminated when number is drawn,\", \"And put back in the game when their number is drawn again.\", \"Last number left wins!\", \"Good Luck.\\n\"}\n for _, item := range welcome {\n red.Println(item)\n time.Sleep(400 * time.Millisecond)\n }\n}\n\nfunc picker() {\n var tables, comp, wait int\n var compString string\n printWelcome()\n for {\n red.Print(\"How many tables are playing: \")\n _, err := fmt.Scanf(\"%d\", &tables)\n if err != nil {\n log.Fatal(err)\n }\n red.Print(\"How many ms would you like to wait between each iteration: \")\n _, err2 := fmt.Scanf(\"%d\", &wait)\n if err2 != nil {\n log.Fatal(err2)\n }\n if tables > 0 {\n break\n }\n }\n tableArray := make([]string, tables)\n for i, _ := range tableArray {\n tableArray[i] = strconv.Itoa(i + 1)\n }\n var total int = 0\n for {\n r := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))\n comp = r.Intn(tables) + 1;\n compString = strconv.Itoa(comp)\n var numX int = 0\n total++\n red.Println(\"Random number is:\", compString)\n if tableArray[comp - 1] == compString {\n tableArray[comp - 1] = \"x\"\n } else {\n tableArray[comp - 1] = compString\n }\n red.Println(tableArray)\n for _, element := range tableArray {\n if element == \"x\" {\n numX++\n }\n }\n if numX >= tables - 1 {\n for _, element := range tableArray {\n if element != \"x\" {\n boldRed.Println(\"The lucky winner is\", element, \"chosen after\", total, \"rounds.\\n\")\n break\n }\n }\n break\n }\n time.Sleep(time.Duration(wait) * time.Millisecond)\n }\n}\n\nfunc avgFinder(tables int) int{\n var comp, total int = 0, 0\n var compString string\n tableArray := make([]string, tables)\n for i, _ := range tableArray {\n tableArray[i] = strconv.Itoa(i + 1)\n }\n for {\n r := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))\n comp = r.Intn(tables) + 1;\n compString = strconv.Itoa(comp)\n var numX int = 0\n total++\n if tableArray[comp - 1] == compString {\n tableArray[comp - 1] = \"x\"\n } else {\n tableArray[comp - 1] = compString\n }\n for _, element := range tableArray {\n if element == \"x\" {\n numX++\n }\n }\n if numX >= tables - 1 {\n break\n }\n }\n return total\n}\n\n\/*\nAVERAGES:\n1: 0\n2: 1\n3: 2\n4: 6\n5: 11\n6: 19\n7: 34\n8: 57\n9: 95\n10: 164\n11: 283\n12: 494\n*\/\n<commit_msg>better error checking, using scanln instead of scanf<commit_after>package main\n\nimport (\n \"fmt\"\n \"math\/rand\"\n \"time\"\n \"strconv\"\n \"runtime\"\n \"sync\"\n \"log\"\n\n \"github.com\/fatih\/color\"\n)\nvar wg sync.WaitGroup\n\nvar re *color.Color = color.New(color.FgRed)\nvar red *color.Color = re.Add(color.BgBlack)\nvar boldRed *color.Color = red.Add(color.Bold)\n\nfunc main() {\n\n runtime.GOMAXPROCS(2)\n wg.Add(1)\n\n var mode, numTables, total, tries int = 0, 0, 0, 0\n var avg float32 = 0\n red.Println(\"\\n\\tGame modes:\\n1: Pick a random table\\n2: Find the avg tries it takes to solve a puzzle\\n\\nPlease choose 1 or 2: \")\n if _, err := fmt.Scanln(&mode); err != nil {\n log.Fatal(err)\n }\n\n if mode == 1 {\n picker()\n } else if mode == 2 {\n red.Print(\"How many tables would you like to calculate for: \")\n if _, err := fmt.Scanln(&numTables); err != nil {\n log.Fatal(err)\n }\n red.Print(\"How many times would you like to run the experiment: \")\n if _, err := fmt.Scanln(&tries); err != nil {\n log.Fatal(err)\n }\n fmt.Println(\"\\n\")\n go calc()\n go printAvg(numTables, total, avg, tries)\n wg.Wait()\n } else {\n red.Println(\"Goodbye!\")\n }\n}\n\nfunc printAvg(numTables int, total int, avg float32, tries int) {\n defer wg.Done()\n for i := 1; i <= tries; i++ {\n total += avgFinder(numTables)\n }\n \/\/a := color.New(color.FgRed, color.Bold)\n avg = float32(total)\/float32(tries)\n boldRed.Printf(\"\\rThe avg tries it took to solve %v tables was %v \\n\\n\", numTables, avg)\n}\n\nfunc calc() {\n for {\n for i := \"Calculating\";; i += \".\" {\n red.Printf(\"\\r%s\", i)\n time.Sleep(500 * time.Millisecond)\n }\n }\n}\n\nfunc printWelcome() {\n welcome := []string{\"\\nWelcome to Colin's Random Table Picker\", \"Tables are eliminated when number is drawn,\", \"And put back in the game when their number is drawn again.\", \"Last number left wins!\", \"Good Luck.\\n\"}\n for _, item := range welcome {\n red.Println(item)\n time.Sleep(400 * time.Millisecond)\n }\n}\n\nfunc picker() {\n var tables, comp, wait int\n var compString string\n printWelcome()\n for {\n red.Print(\"How many tables are playing: \")\n if _, err := fmt.Scanln(&tables); err != nil {\n log.Fatal(err)\n }\n red.Print(\"How many ms would you like to wait between each iteration: \")\n if _, err := fmt.Scanln(&wait); err != nil {\n log.Fatal(err)\n }\n if tables > 0 {\n break\n }\n }\n tableArray := make([]string, tables)\n for i, _ := range tableArray {\n tableArray[i] = strconv.Itoa(i + 1)\n }\n var total int = 0\n for {\n r := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))\n comp = r.Intn(tables) + 1;\n compString = strconv.Itoa(comp)\n var numX int = 0\n total++\n red.Println(\"Random number is:\", compString)\n if tableArray[comp - 1] == compString {\n tableArray[comp - 1] = \"x\"\n } else {\n tableArray[comp - 1] = compString\n }\n red.Println(tableArray)\n for _, element := range tableArray {\n if element == \"x\" {\n numX++\n }\n }\n if numX >= tables - 1 {\n for _, element := range tableArray {\n if element != \"x\" {\n boldRed.Println(\"The lucky winner is\", element, \"chosen after\", total, \"rounds.\\n\")\n break\n }\n }\n break\n }\n time.Sleep(time.Duration(wait) * time.Millisecond)\n }\n}\n\nfunc avgFinder(tables int) int{\n var comp, total int = 0, 0\n var compString string\n tableArray := make([]string, tables)\n for i, _ := range tableArray {\n tableArray[i] = strconv.Itoa(i + 1)\n }\n for {\n r := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))\n comp = r.Intn(tables) + 1;\n compString = strconv.Itoa(comp)\n var numX int = 0\n total++\n if tableArray[comp - 1] == compString {\n tableArray[comp - 1] = \"x\"\n } else {\n tableArray[comp - 1] = compString\n }\n for _, element := range tableArray {\n if element == \"x\" {\n numX++\n }\n }\n if numX >= tables - 1 {\n break\n }\n }\n return total\n}\n\n\/*\nAVERAGES:\n1: 0\n2: 1\n3: 2\n4: 6\n5: 11\n6: 19\n7: 34\n8: 57\n9: 95\n10: 164\n11: 283\n12: 494\n*\/\n<|endoftext|>"} {"text":"<commit_before>package msgpack\n\nimport (\n\t\"math\"\n\t\"reflect\"\n\t\"sort\"\n\n\t\"github.com\/vmihailenco\/msgpack\/v5\/msgpcode\"\n)\n\nfunc encodeMapValue(e *Encoder, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn e.EncodeNil()\n\t}\n\n\tif err := e.EncodeMapLen(v.Len()); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, key := range v.MapKeys() {\n\t\tif err := e.EncodeValue(key); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := e.EncodeValue(v.MapIndex(key)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc encodeMapStringStringValue(e *Encoder, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn e.EncodeNil()\n\t}\n\n\tif err := e.EncodeMapLen(v.Len()); err != nil {\n\t\treturn err\n\t}\n\n\tm := v.Convert(mapStringStringType).Interface().(map[string]string)\n\tif e.flags&sortMapKeysFlag != 0 {\n\t\treturn e.encodeSortedMapStringString(m)\n\t}\n\n\tfor mk, mv := range m {\n\t\tif err := e.EncodeString(mk); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := e.EncodeString(mv); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc encodeMapStringInterfaceValue(e *Encoder, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn e.EncodeNil()\n\t}\n\tm := v.Convert(mapStringInterfaceType).Interface().(map[string]interface{})\n\tif e.flags&sortMapKeysFlag != 0 {\n\t\treturn e.EncodeMapSorted(m)\n\t}\n\treturn e.EncodeMap(m)\n}\n\nfunc (e *Encoder) EncodeMap(m map[string]interface{}) error {\n\tif m == nil {\n\t\treturn e.EncodeNil()\n\t}\n\tif err := e.EncodeMapLen(len(m)); err != nil {\n\t\treturn err\n\t}\n\tfor mk, mv := range m {\n\t\tif err := e.EncodeString(mk); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := e.Encode(mv); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (e *Encoder) EncodeMapSorted(m map[string]interface{}) error {\n\tif m == nil {\n\t\treturn e.EncodeNil()\n\t}\n\tif err := e.EncodeMapLen(len(m)); err != nil {\n\t\treturn err\n\t}\n\n\tkeys := make([]string, 0, len(m))\n\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\n\tsort.Strings(keys)\n\n\tfor _, k := range keys {\n\t\tif err := e.EncodeString(k); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := e.Encode(m[k]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (e *Encoder) encodeSortedMapStringString(m map[string]string) error {\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tfor _, k := range keys {\n\t\terr := e.EncodeString(k)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = e.EncodeString(m[k]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (e *Encoder) EncodeMapLen(l int) error {\n\tif l < 16 {\n\t\treturn e.writeCode(msgpcode.FixedMapLow | byte(l))\n\t}\n\tif l <= math.MaxUint16 {\n\t\treturn e.write2(msgpcode.Map16, uint16(l))\n\t}\n\treturn e.write4(msgpcode.Map32, uint32(l))\n}\n\nfunc encodeStructValue(e *Encoder, strct reflect.Value) error {\n\tstructFields := structs.Fields(strct.Type(), e.structTag)\n\tif e.flags&arrayEncodedStructsFlag != 0 || structFields.AsArray {\n\t\treturn encodeStructValueAsArray(e, strct, structFields.List)\n\t}\n\tfields := structFields.OmitEmpty(strct, e.flags&omitEmptyFlag != 0)\n\n\tif err := e.EncodeMapLen(len(fields)); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, f := range fields {\n\t\tif err := e.EncodeString(f.name); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := f.EncodeValue(e, strct); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc encodeStructValueAsArray(e *Encoder, strct reflect.Value, fields []*field) error {\n\tif err := e.EncodeArrayLen(len(fields)); err != nil {\n\t\treturn err\n\t}\n\tfor _, f := range fields {\n\t\tif err := f.EncodeValue(e, strct); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Use MapRange for map iteration<commit_after>package msgpack\n\nimport (\n\t\"math\"\n\t\"reflect\"\n\t\"sort\"\n\n\t\"github.com\/vmihailenco\/msgpack\/v5\/msgpcode\"\n)\n\nfunc encodeMapValue(e *Encoder, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn e.EncodeNil()\n\t}\n\n\tif err := e.EncodeMapLen(v.Len()); err != nil {\n\t\treturn err\n\t}\n\n\titer := v.MapRange()\n\tfor iter.Next() {\n\t\tif err := e.EncodeValue(iter.Key()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := e.EncodeValue(iter.Value()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc encodeMapStringStringValue(e *Encoder, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn e.EncodeNil()\n\t}\n\n\tif err := e.EncodeMapLen(v.Len()); err != nil {\n\t\treturn err\n\t}\n\n\tm := v.Convert(mapStringStringType).Interface().(map[string]string)\n\tif e.flags&sortMapKeysFlag != 0 {\n\t\treturn e.encodeSortedMapStringString(m)\n\t}\n\n\tfor mk, mv := range m {\n\t\tif err := e.EncodeString(mk); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := e.EncodeString(mv); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc encodeMapStringInterfaceValue(e *Encoder, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn e.EncodeNil()\n\t}\n\tm := v.Convert(mapStringInterfaceType).Interface().(map[string]interface{})\n\tif e.flags&sortMapKeysFlag != 0 {\n\t\treturn e.EncodeMapSorted(m)\n\t}\n\treturn e.EncodeMap(m)\n}\n\nfunc (e *Encoder) EncodeMap(m map[string]interface{}) error {\n\tif m == nil {\n\t\treturn e.EncodeNil()\n\t}\n\tif err := e.EncodeMapLen(len(m)); err != nil {\n\t\treturn err\n\t}\n\tfor mk, mv := range m {\n\t\tif err := e.EncodeString(mk); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := e.Encode(mv); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (e *Encoder) EncodeMapSorted(m map[string]interface{}) error {\n\tif m == nil {\n\t\treturn e.EncodeNil()\n\t}\n\tif err := e.EncodeMapLen(len(m)); err != nil {\n\t\treturn err\n\t}\n\n\tkeys := make([]string, 0, len(m))\n\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\n\tsort.Strings(keys)\n\n\tfor _, k := range keys {\n\t\tif err := e.EncodeString(k); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := e.Encode(m[k]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (e *Encoder) encodeSortedMapStringString(m map[string]string) error {\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tfor _, k := range keys {\n\t\terr := e.EncodeString(k)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = e.EncodeString(m[k]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (e *Encoder) EncodeMapLen(l int) error {\n\tif l < 16 {\n\t\treturn e.writeCode(msgpcode.FixedMapLow | byte(l))\n\t}\n\tif l <= math.MaxUint16 {\n\t\treturn e.write2(msgpcode.Map16, uint16(l))\n\t}\n\treturn e.write4(msgpcode.Map32, uint32(l))\n}\n\nfunc encodeStructValue(e *Encoder, strct reflect.Value) error {\n\tstructFields := structs.Fields(strct.Type(), e.structTag)\n\tif e.flags&arrayEncodedStructsFlag != 0 || structFields.AsArray {\n\t\treturn encodeStructValueAsArray(e, strct, structFields.List)\n\t}\n\tfields := structFields.OmitEmpty(strct, e.flags&omitEmptyFlag != 0)\n\n\tif err := e.EncodeMapLen(len(fields)); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, f := range fields {\n\t\tif err := e.EncodeString(f.name); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := f.EncodeValue(e, strct); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc encodeStructValueAsArray(e *Encoder, strct reflect.Value, fields []*field) error {\n\tif err := e.EncodeArrayLen(len(fields)); err != nil {\n\t\treturn err\n\t}\n\tfor _, f := range fields {\n\t\tif err := f.EncodeValue(e, strct); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package emil\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n)\n\nvar errNowNewAnalysis = errors.New(\"errNowNewAnalysis\")\n\ntype Analysis struct {\n\tdtm int \/\/ Depth to mate\n\n\tboard *Board\n\tmove *Move\n}\n\nfunc (a *Analysis) Move() *Move {\n\treturn a.move\n}\nfunc (a *Analysis) Board() *Board {\n\treturn a.board\n}\n\n\/\/ EndGameDb to query for mate in 1,2, etc.\ntype EndGameDb struct {\n\tpositionDb map[string]*Analysis\n\n\tdtmDb []map[string]bool\n\n\tsearchedPositions int\n}\n\nfunc (db *EndGameDb) Find(board *Board) (bestMove *Move) {\n\tif DEBUG {\n\t\tfmt.Printf(\"Find:\\n%s\\n\", board.String())\n\t}\n\ta := db.positionDb[board.String()]\n\tif DEBUG {\n\t\tfmt.Printf(\"Found: positionDb with dtm %d\\n\", a.dtm)\n\t}\n\treturn a.move\n}\nfunc (db *EndGameDb) FindMatesIn(dtm int) (as []*Analysis) {\n\tif dtm == -1 {\n\t\tfor _, a := range db.positionDb {\n\t\t\tif a.dtm == -1 {\n\t\t\t\tas = append(as, a)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfor str := range db.dtmDb[dtm] {\n\t\t\tas = append(as, db.positionDb[str])\n\t\t}\n\t}\n\treturn as\n}\n\nfunc (db *EndGameDb) FindMates() (as []*Analysis) {\n\treturn db.FindMatesIn(0)\n}\n\nfunc (db *EndGameDb) FindMate(piece, square int) (boards []*Board) {\n\tfor str := range db.dtmDb[0] {\n\t\ta := db.positionDb[str]\n\t\tif a.board.squares[square] == piece {\n\t\t\tboards = append(boards, a.board)\n\t\t}\n\t}\n\treturn boards\n}\n\nfunc (db *EndGameDb) addPosition(board *Board) {\n\tdb.addAnalysis(board, -1, nil)\n}\n\nfunc (db *EndGameDb) addAnalysis(board *Board, dtm int, move *Move) {\n\ta := &Analysis{\n\t\tdtm: dtm,\n\t\tboard: board}\n\tif move != nil {\n\t\ta.move = move.reverse()\n\t}\n\tif dtm >= 0 {\n\t\tif move != nil {\n\t\t\tplayerForStep := playerForStepN(dtm)\n\t\t\tif playerForStep != move.player {\n\t\t\t\tpanic(\"playerForStep != move.player\")\n\t\t\t}\n\t\t}\n\n\t\tdb.dtmDb[dtm][a.board.String()] = true\n\t}\n\n\tdb.positionDb[a.board.String()] = a\n}\n\nfunc (db *EndGameDb) positions() int {\n\treturn len(db.positionDb)\n}\n\n\/\/ find positions where black is checkmate\nfunc (db *EndGameDb) retrogradeAnalysisStep1() {\n\tdb.dtmDb = append(db.dtmDb, make(map[string]bool))\n\n\tstart := time.Now()\n\n\tplayer := BLACK\n\tfor boardStr, a := range db.positionDb {\n\t\t\/\/ mate only on border square\n\t\tblackKingSquare := BoardSquares[a.board.blackKing]\n\t\tif !blackKingSquare.isBorder {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ mate only with help from king\n\t\tif squaresDistances[a.board.blackKing][a.board.whiteKing] > 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tp := NewPosition(a.board, player)\n\n\t\tmove := Search(p)\n\t\tdb.searchedPositions++\n\t\tif move == nil {\n\t\t\tif isKingInCheck(p) {\n\t\t\t\ta.dtm = 0\n\t\t\t\tdb.addAnalysis(a.board, 0, nil)\n\t\t\t\tif DEBUG {\n\t\t\t\t\tfmt.Printf(\"mate:\\n%s\\n\", boardStr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tend := time.Now()\n\tif DEBUG {\n\t\tfmt.Printf(\"searchedPositions %d\\n\", db.searchedPositions)\n\t\tfmt.Printf(\"db.dtmDb[0] %d\\n\", len(db.dtmDb[0]))\n\t\tfmt.Printf(\"duration %v\\n\\n\\n\", end.Sub(start))\n\t}\n}\nfunc playerForStepN(dtm int) (player int) {\n\tif dtm%2 == 0 {\n\t\treturn BLACK\n\t}\n\treturn WHITE\n}\n\nfunc (db *EndGameDb) retrogradeAnalysisStepN(dtm int) (noError error) {\n\tstart := time.Now()\n\tdb.dtmDb = append(db.dtmDb, make(map[string]bool))\n\n\tplayer := playerForStepN(dtm)\n\n\tpositions := 0\n\tif player == WHITE {\n\t\tfor _, a := range db.positionDb {\n\t\t\tif db.isMateIn1357(a.board, dtm) >= 0 {\n\t\t\t\tpositions++\n\t\t\t}\n\t\t}\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"WHITE Start positions %d\\n\", len(db.positionDb)-positions)\n\t\t}\n\t\tfor _, a := range db.positionDb {\n\t\t\tif db.isMateIn1357(a.board, dtm) >= 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif IsTheKingInCheck(NewPosition(a.board, WHITE)) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tp := NewPosition(a.board, player)\n\t\t\tmoves := generateMoves(p)\n\n\t\t\tfor _, m := range moves {\n\t\t\t\tnewBoard := a.board.doMove(m)\n\t\t\t\tnewDtm := db.isMateIn0246(newBoard, dtm)\n\t\t\t\tif newDtm == dtm-1 {\n\t\t\t\t\tdb.addAnalysis(a.board, dtm, m)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\t\/\/ Suche alle Stellungen, bei denen Schwarz am Zug ist und\n\t\t\/\/ **jeder** Zug von ihm zu einer Stellung unter 2. führt.\n\t\t\/\/ Schwarz kann hier Matt in einem Zug nicht verhindern.\n\t\t\/\/ Markiere diese Stellungen in der Datei.\n\t\tfor _, a := range db.positionDb {\n\t\t\tif db.isMateIn0246(a.board, dtm) >= 0 {\n\t\t\t\tpositions++\n\t\t\t}\n\t\t}\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"BLACK Start positions %d\\n\", len(db.positionDb)-positions)\n\t\t}\n\t\tfor _, a := range db.positionDb {\n\t\t\tif db.isMateIn0246(a.board, dtm) >= 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tp := NewPosition(a.board, player)\n\t\t\tmoves := GenerateMoves(p)\n\n\t\t\tfound := 0\n\t\t\tmaxDTM := -1\n\t\t\tfor _, m := range moves {\n\t\t\t\tnewBoard := a.board.doMove(m)\n\t\t\t\tnewDtm := db.isMateIn1357(newBoard, dtm)\n\t\t\t\tif newDtm > maxDTM {\n\t\t\t\t\tmaxDTM = newDtm\n\t\t\t\t}\n\t\t\t\tif db.isMateIn1357(newBoard, dtm) >= 0 {\n\t\t\t\t\tfound++\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif found == len(moves) {\n\t\t\t\tfor _, m := range moves {\n\t\t\t\t\tdb.addAnalysis(a.board, maxDTM+1, m)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tend := time.Now()\n\n\tif DEBUG {\n\t\tfmt.Printf(\"db.dtmDb[%d] %d\\n\", dtm, len(db.dtmDb[dtm]))\n\t\tfmt.Printf(\"duration %v\\n\\n\\n\", end.Sub(start))\n\t}\n\n\tif len(db.dtmDb[dtm]) == 0 {\n\t\treturn errNowNewAnalysis\n\t}\n\treturn noError\n}\nfunc (db *EndGameDb) isMateIn0246(board *Board, maxDtm int) int {\n\tfor dtm := 0; dtm < maxDtm; dtm += 2 {\n\t\t_, ok := db.dtmDb[dtm][board.String()]\n\t\tif ok {\n\t\t\treturn dtm\n\t\t}\n\t}\n\treturn -1\n}\nfunc (db *EndGameDb) isMateIn1357(board *Board, maxDtm int) int {\n\tfor dtm := 1; dtm < maxDtm; dtm += 2 {\n\t\t_, ok := db.dtmDb[dtm][board.String()]\n\t\tif ok {\n\t\t\treturn dtm\n\t\t}\n\t}\n\treturn -1\n}\n\nfunc (db *EndGameDb) MaxDtm() int {\n\treturn len(db.dtmDb)\n}\n\nfunc (db *EndGameDb) retrogradeAnalysis() {\n\t\/\/ find positions where black is checkmate\n\tdb.retrogradeAnalysisStep1()\n\tdtm := 1\n\tfor {\n\t\terr := db.retrogradeAnalysisStepN(dtm)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tdtm++\n\t}\n}\nfunc GenerateMoves(p *position) (list []*Move) {\n\tfor _, m := range generateMoves(p) {\n\t\tb := p.board.DoMove(m)\n\t\tif !IsTheKingInCheck(NewPosition(b, WHITE)) {\n\t\t\tlist = append(list, m)\n\t\t}\n\t}\n\treturn list\n}\nfunc generateMoves(p *position) (list []*Move) {\n\tfor src, piece := range p.board.squares {\n\t\tif isOwnPiece(p.player, piece) {\n\t\t\tswitch abs(piece) {\n\t\t\tcase kingValue:\n\t\t\t\tfor _, dst := range kingDestinationsFrom(src) {\n\t\t\t\t\tcapture := p.board.squares[dst]\n\t\t\t\t\tif isOtherKing(p.player, capture) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif capture == Empty {\n\t\t\t\t\t\tlist = append(list, newSilentMove(p.player, piece, src, dst))\n\t\t\t\t\t} else if !isOwnPiece(p.player, capture) {\n\t\t\t\t\t\tlist = append(list, newCaptureMove(p.player, piece, capture, src, dst))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase rockValue:\n\t\t\t\tfor _, dsts := range rockDestinationsFrom(src) {\n\t\t\t\t\tfor _, dst := range dsts {\n\t\t\t\t\t\tcapture := p.board.squares[dst]\n\t\t\t\t\t\tif isOtherKing(p.player, capture) {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif capture == Empty {\n\t\t\t\t\t\t\tlist = append(list, newSilentMove(p.player, piece, src, dst))\n\t\t\t\t\t\t} else if !isOwnPiece(p.player, capture) {\n\t\t\t\t\t\t\tlist = append(list, newCaptureMove(p.player, piece, capture, src, dst))\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tbreak \/\/ onOwnPiece\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn list\n}\n\n\/\/ NewEndGameDb generates an end game DB for KRK\nfunc NewEndGameDb() *EndGameDb {\n\tvar err error\n\tstart := time.Now()\n\n\tendGames := &EndGameDb{\n\t\tpositionDb: make(map[string]*Analysis),\n\t\tdtmDb: make([]map[string]bool, 0)}\n\n\tfor wk := A1; wk <= H8; wk++ {\n\t\t\/\/for wk := E3; wk <= E3; wk++ {\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"White king on %s\\n\", BoardSquares[wk])\n\t\t}\n\t\tfor wr := A1; wr <= H8; wr++ {\n\t\t\tfor bk := A1; bk <= H8; bk++ {\n\n\t\t\t\tboard := NewBoard()\n\n\t\t\t\terr = board.Setup(WhiteKing, wk)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = board.Setup(WhiteRock, wr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = board.Setup(BlackKing, bk)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = board.kingsToClose()\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tendGames.addPosition(board)\n\t\t\t}\n\t\t}\n\t}\n\tend := time.Now()\n\tif DEBUG {\n\t\tfmt.Printf(\"all positions %d\\n\", 64*63*62)\n\t\tfmt.Printf(\"endGames.positions() %d\\n\", endGames.positions())\n\t\tfmt.Printf(\"difference %d\\n\", 64*63*62-endGames.positions())\n\t\tfmt.Printf(\"duration %v\\n\", end.Sub(start))\n\t}\n\tendGames.retrogradeAnalysis()\n\n\treturn endGames\n}\n<commit_msg>refactor retrogradeAnalysisStepN<commit_after>package emil\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n)\n\nvar errNowNewAnalysis = errors.New(\"errNowNewAnalysis\")\n\ntype Analysis struct {\n\tdtm int \/\/ Depth to mate\n\n\tboard *Board\n\tmove *Move\n}\n\nfunc (a *Analysis) Move() *Move {\n\treturn a.move\n}\nfunc (a *Analysis) Board() *Board {\n\treturn a.board\n}\n\n\/\/ EndGameDb to query for mate in 1,2, etc.\ntype EndGameDb struct {\n\tpositionDb map[string]*Analysis\n\n\tdtmDb []map[string]bool\n\n\tsearchedPositions int\n}\n\nfunc (db *EndGameDb) Find(board *Board) (bestMove *Move) {\n\tif DEBUG {\n\t\tfmt.Printf(\"Find:\\n%s\\n\", board.String())\n\t}\n\ta := db.positionDb[board.String()]\n\tif DEBUG {\n\t\tfmt.Printf(\"Found: positionDb with dtm %d\\n\", a.dtm)\n\t}\n\treturn a.move\n}\nfunc (db *EndGameDb) FindMatesIn(dtm int) (as []*Analysis) {\n\tif dtm == -1 {\n\t\tfor _, a := range db.positionDb {\n\t\t\tif a.dtm == -1 {\n\t\t\t\tas = append(as, a)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfor str := range db.dtmDb[dtm] {\n\t\t\tas = append(as, db.positionDb[str])\n\t\t}\n\t}\n\treturn as\n}\n\nfunc (db *EndGameDb) FindMates() (as []*Analysis) {\n\treturn db.FindMatesIn(0)\n}\n\nfunc (db *EndGameDb) FindMate(piece, square int) (boards []*Board) {\n\tfor str := range db.dtmDb[0] {\n\t\ta := db.positionDb[str]\n\t\tif a.board.squares[square] == piece {\n\t\t\tboards = append(boards, a.board)\n\t\t}\n\t}\n\treturn boards\n}\n\nfunc (db *EndGameDb) addPosition(board *Board) {\n\tdb.addAnalysis(board, -1, nil)\n}\n\nfunc (db *EndGameDb) addAnalysis(board *Board, dtm int, move *Move) {\n\ta := &Analysis{\n\t\tdtm: dtm,\n\t\tboard: board}\n\tif move != nil {\n\t\ta.move = move.reverse()\n\t}\n\tif dtm >= 0 {\n\t\tif move != nil {\n\t\t\tplayerForStep := playerForStepN(dtm)\n\t\t\tif playerForStep != move.player {\n\t\t\t\tpanic(\"playerForStep != move.player\")\n\t\t\t}\n\t\t}\n\n\t\tdb.dtmDb[dtm][a.board.String()] = true\n\t}\n\n\tdb.positionDb[a.board.String()] = a\n}\n\nfunc (db *EndGameDb) positions() int {\n\treturn len(db.positionDb)\n}\n\n\/\/ find positions where black is checkmate\nfunc (db *EndGameDb) retrogradeAnalysisStep1() {\n\tdb.dtmDb = append(db.dtmDb, make(map[string]bool))\n\n\tstart := time.Now()\n\n\tplayer := BLACK\n\tfor boardStr, a := range db.positionDb {\n\t\t\/\/ mate only on border square\n\t\tblackKingSquare := BoardSquares[a.board.blackKing]\n\t\tif !blackKingSquare.isBorder {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ mate only with help from king\n\t\tif squaresDistances[a.board.blackKing][a.board.whiteKing] > 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tp := NewPosition(a.board, player)\n\n\t\tmove := Search(p)\n\t\tdb.searchedPositions++\n\t\tif move == nil {\n\t\t\tif isKingInCheck(p) {\n\t\t\t\ta.dtm = 0\n\t\t\t\tdb.addAnalysis(a.board, 0, nil)\n\t\t\t\tif DEBUG {\n\t\t\t\t\tfmt.Printf(\"mate:\\n%s\\n\", boardStr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tend := time.Now()\n\tif DEBUG {\n\t\tfmt.Printf(\"searchedPositions %d\\n\", db.searchedPositions)\n\t\tfmt.Printf(\"db.dtmDb[0] %d\\n\", len(db.dtmDb[0]))\n\t\tfmt.Printf(\"duration %v\\n\\n\\n\", end.Sub(start))\n\t}\n}\nfunc playerForStepN(dtm int) (player int) {\n\tif dtm%2 == 0 {\n\t\treturn BLACK\n\t}\n\treturn WHITE\n}\n\nfunc (db *EndGameDb) retrogradeAnalysisStepN(dtm int) (noError error) {\n\tstart := time.Now()\n\tdb.dtmDb = append(db.dtmDb, make(map[string]bool))\n\n\tplayer := playerForStepN(dtm)\n\n\tpositions := 0\n\tif player == WHITE {\n\t\tfor _, a := range db.positionDb {\n\t\t\tif db.isMateIn1357(a.board, dtm) >= 0 {\n\t\t\t\tpositions++\n\t\t\t}\n\t\t}\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"WHITE Start positions %d\\n\", len(db.positionDb)-positions)\n\t\t}\n\t\tfor _, a := range db.positionDb {\n\t\t\tif db.isMateIn1357(a.board, dtm) >= 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tp := NewPosition(a.board, player)\n\t\t\tif IsTheKingInCheck(p) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmoves := generateMoves(p)\n\n\t\t\tfor _, m := range moves {\n\t\t\t\tnewBoard := a.board.doMove(m)\n\t\t\t\tnewDtm := db.isMateIn0246(newBoard, dtm)\n\t\t\t\tif newDtm == dtm-1 {\n\t\t\t\t\tdb.addAnalysis(a.board, dtm, m)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\t\/\/ Suche alle Stellungen, bei denen Schwarz am Zug ist und\n\t\t\/\/ **jeder** Zug von ihm zu einer Stellung unter 2. führt.\n\t\t\/\/ Schwarz kann hier Matt in einem Zug nicht verhindern.\n\t\t\/\/ Markiere diese Stellungen in der Datei.\n\t\tfor _, a := range db.positionDb {\n\t\t\tif db.isMateIn0246(a.board, dtm) >= 0 {\n\t\t\t\tpositions++\n\t\t\t}\n\t\t}\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"BLACK Start positions %d\\n\", len(db.positionDb)-positions)\n\t\t}\n\t\tfor _, a := range db.positionDb {\n\t\t\tif db.isMateIn0246(a.board, dtm) >= 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tp := NewPosition(a.board, player)\n\t\t\tmoves := GenerateMoves(p)\n\n\t\t\tfound := 0\n\t\t\tmaxDTM := -1\n\t\t\tfor _, m := range moves {\n\t\t\t\tnewBoard := a.board.doMove(m)\n\t\t\t\tnewDtm := db.isMateIn1357(newBoard, dtm)\n\t\t\t\tif newDtm > maxDTM {\n\t\t\t\t\tmaxDTM = newDtm\n\t\t\t\t}\n\t\t\t\tif db.isMateIn1357(newBoard, dtm) >= 0 {\n\t\t\t\t\tfound++\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif found == len(moves) {\n\t\t\t\tfor _, m := range moves {\n\t\t\t\t\tdb.addAnalysis(a.board, maxDTM+1, m)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tend := time.Now()\n\n\tif DEBUG {\n\t\tfmt.Printf(\"db.dtmDb[%d] %d\\n\", dtm, len(db.dtmDb[dtm]))\n\t\tfmt.Printf(\"duration %v\\n\\n\\n\", end.Sub(start))\n\t}\n\n\tif len(db.dtmDb[dtm]) == 0 {\n\t\treturn errNowNewAnalysis\n\t}\n\treturn noError\n}\nfunc (db *EndGameDb) isMateIn0246(board *Board, maxDtm int) int {\n\tfor dtm := 0; dtm < maxDtm; dtm += 2 {\n\t\t_, ok := db.dtmDb[dtm][board.String()]\n\t\tif ok {\n\t\t\treturn dtm\n\t\t}\n\t}\n\treturn -1\n}\nfunc (db *EndGameDb) isMateIn1357(board *Board, maxDtm int) int {\n\tfor dtm := 1; dtm < maxDtm; dtm += 2 {\n\t\t_, ok := db.dtmDb[dtm][board.String()]\n\t\tif ok {\n\t\t\treturn dtm\n\t\t}\n\t}\n\treturn -1\n}\n\nfunc (db *EndGameDb) MaxDtm() int {\n\treturn len(db.dtmDb)\n}\n\nfunc (db *EndGameDb) retrogradeAnalysis() {\n\t\/\/ find positions where black is checkmate\n\tdb.retrogradeAnalysisStep1()\n\tdtm := 1\n\tfor {\n\t\terr := db.retrogradeAnalysisStepN(dtm)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tdtm++\n\t}\n}\nfunc GenerateMoves(p *position) (list []*Move) {\n\tfor _, m := range generateMoves(p) {\n\t\tb := p.board.DoMove(m)\n\t\tif !IsTheKingInCheck(NewPosition(b, WHITE)) {\n\t\t\tlist = append(list, m)\n\t\t}\n\t}\n\treturn list\n}\nfunc generateMoves(p *position) (list []*Move) {\n\tfor src, piece := range p.board.squares {\n\t\tif isOwnPiece(p.player, piece) {\n\t\t\tswitch abs(piece) {\n\t\t\tcase kingValue:\n\t\t\t\tfor _, dst := range kingDestinationsFrom(src) {\n\t\t\t\t\tcapture := p.board.squares[dst]\n\t\t\t\t\tif isOtherKing(p.player, capture) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif capture == Empty {\n\t\t\t\t\t\tlist = append(list, newSilentMove(p.player, piece, src, dst))\n\t\t\t\t\t} else if !isOwnPiece(p.player, capture) {\n\t\t\t\t\t\tlist = append(list, newCaptureMove(p.player, piece, capture, src, dst))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase rockValue:\n\t\t\t\tfor _, dsts := range rockDestinationsFrom(src) {\n\t\t\t\t\tfor _, dst := range dsts {\n\t\t\t\t\t\tcapture := p.board.squares[dst]\n\t\t\t\t\t\tif isOtherKing(p.player, capture) {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif capture == Empty {\n\t\t\t\t\t\t\tlist = append(list, newSilentMove(p.player, piece, src, dst))\n\t\t\t\t\t\t} else if !isOwnPiece(p.player, capture) {\n\t\t\t\t\t\t\tlist = append(list, newCaptureMove(p.player, piece, capture, src, dst))\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tbreak \/\/ onOwnPiece\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn list\n}\n\n\/\/ NewEndGameDb generates an end game DB for KRK\nfunc NewEndGameDb() *EndGameDb {\n\tvar err error\n\tstart := time.Now()\n\n\tendGames := &EndGameDb{\n\t\tpositionDb: make(map[string]*Analysis),\n\t\tdtmDb: make([]map[string]bool, 0)}\n\n\tfor wk := A1; wk <= H8; wk++ {\n\t\t\/\/for wk := E3; wk <= E3; wk++ {\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"White king on %s\\n\", BoardSquares[wk])\n\t\t}\n\t\tfor wr := A1; wr <= H8; wr++ {\n\t\t\tfor bk := A1; bk <= H8; bk++ {\n\n\t\t\t\tboard := NewBoard()\n\n\t\t\t\terr = board.Setup(WhiteKing, wk)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = board.Setup(WhiteRock, wr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = board.Setup(BlackKing, bk)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = board.kingsToClose()\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tendGames.addPosition(board)\n\t\t\t}\n\t\t}\n\t}\n\tend := time.Now()\n\tif DEBUG {\n\t\tfmt.Printf(\"all positions %d\\n\", 64*63*62)\n\t\tfmt.Printf(\"endGames.positions() %d\\n\", endGames.positions())\n\t\tfmt.Printf(\"difference %d\\n\", 64*63*62-endGames.positions())\n\t\tfmt.Printf(\"duration %v\\n\", end.Sub(start))\n\t}\n\tendGames.retrogradeAnalysis()\n\n\treturn endGames\n}\n<|endoftext|>"} {"text":"<commit_before>package controllers\n\nimport (\n\n\t\/\/ \"encoding\/json\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/looyun\/feedall\/models\"\n\t\"github.com\/mmcdole\/gofeed\"\n\n\t\"gopkg.in\/macaron.v1\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\nvar TelegramBotToken string = \"123456\"\n\nfunc InsertFeedAndUpdateItems(feedurl string) error {\n\t\/\/ parse feed url\n\tfp := gofeed.NewParser()\n\tparsedFeed, err := fp.ParseURL(feedurl)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbsonFeed, err := bson.Marshal(parsedFeed)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Insert feed\n\tfeed := &models.Feed{}\n\terr = bson.Unmarshal(bsonFeed, &feed)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfeed.ID = bson.NewObjectId()\n\terr = models.Insert(models.Feeds, feed)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Update items\n\tvar data struct {\n\t\tItems []models.Item `bson:\"items\"`\n\t}\n\terr = bson.Unmarshal(bsonFeed, &data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, item := range data.Items {\n\t\titem.FeedID = feed.ID\n\t\t_, err := models.Upsert(models.Items,\n\t\t\tbson.M{\n\t\t\t\t\"$and\": []bson.M{\n\t\t\t\t\tbson.M{\"feedID\": feed.ID}, bson.M{\"link\": item.Link}}},\n\t\t\titem)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc UpdateItems(feed models.Feed) error {\n\n\tfeedurl := feed.FeedURL\n\n\t\/\/ parse feed url\n\tfp := gofeed.NewParser()\n\tparsedFeed, err := fp.ParseURL(feedurl)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbsonFeed, err := bson.Marshal(parsedFeed)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar data struct {\n\t\tItems []models.Item `bson:\"items\"`\n\t}\n\terr = bson.Unmarshal(bsonFeed, &data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, item := range data.Items {\n\t\titem.FeedID = feed.ID\n\t\t_, err := models.Upsert(models.Items,\n\t\t\tbson.M{\n\t\t\t\t\"$and\": []bson.M{\n\t\t\t\t\tbson.M{\"feedID\": feed.ID}, bson.M{\"link\": item.Link}}},\n\t\t\titem)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ GetFeeds get feeds sort by subscribeCount.\nfunc GetFeeds(c *macaron.Context) interface{} {\n\n\tpage := c.QueryInt(\"page\")\n\tif page > 0 {\n\t\tpage--\n\t}\n\tperPage := c.QueryInt(\"per_page\")\n\tif perPage == 0 {\n\t\tperPage = 30\n\t}\n\tif perPage > 100 {\n\t\tperPage = 100\n\t}\n\tfeeds := []bson.M{}\n\terr := models.Feeds.Find(bson.M{}).Sort(\"-subscribeCount\").Skip(page * perPage).Limit(perPage).All(&feeds)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn feeds\n}\n\n\/\/ GetFeeds get feeds sort by subscribeCount.\nfunc GetFeedItems(c *macaron.Context) interface{} {\n\n\tpage := c.QueryInt(\"page\")\n\tif page > 0 {\n\t\tpage--\n\t}\n\tperPage := c.QueryInt(\"per_page\")\n\tif perPage == 0 {\n\t\tperPage = 30\n\t}\n\tif perPage > 100 {\n\t\tperPage = 100\n\t}\n\n\tfeedID := c.Params(\":id\")\n\n\titems := []bson.M{}\n\terr := models.Items.Find(bson.M{\"feedID\": feedID}).Sort(\"-publishedParsed\").Skip(perPage * page).Limit(perPage).All(&items)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn items\n}\nfunc GetFeed(c *macaron.Context) interface{} {\n\tid := c.Params(\":id\")\n\tfeed := bson.M{}\n\terr := models.FindOne(models.Feeds,\n\t\tbson.M{\"_id\": id},\n\t\t&feed)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treturn feed\n}\n\nfunc GetItems(c *macaron.Context, n int) interface{} {\n\tif n > 100 {\n\t\treturn nil\n\t}\n\titems := []bson.M{}\n\terr := models.FindSortLimit(models.Items,\n\t\tbson.M{}, \"-starCount\", n,\n\t\t&items)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn items\n}\nfunc GetItem(c *macaron.Context) interface{} {\n\tid := c.Params(\":id\")\n\titem := bson.M{}\n\terr := models.FindOne(models.Items,\n\t\tbson.M{\"_id\": id},\n\t\t&item)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn item\n\n}\n\nfunc GetRandomItem(c *macaron.Context, n int) interface{} {\n\tif n > 100 {\n\t\treturn nil\n\t}\n\titems := []bson.M{}\n\terr := models.PipeAll(models.Items, []bson.M{{\"$sample\": bson.M{\"size\": n}}},\n\t\t&items)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn items\n\n}\nfunc GetLatestItem(c *macaron.Context, n int) interface{} {\n\tif n > 100 {\n\t\treturn nil\n\t}\n\titems := []bson.M{}\n\terr := models.FindSortLimit(models.Items,\n\t\tbson.M{}, \"-publishedParsed\", n,\n\t\t&items)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn items\n\n}\n\nfunc StandarURL(s string) string {\n\tif !strings.HasSuffix(s, \"\/\") {\n\t\ts = s + \"\/\"\n\t}\n\treturn s\n}\n\nfunc DecodeImg(str string, link string) string {\n\tstr = strings.Replace(str, \""\", \"\\\"\", -1)\n\tstr = strings.Replace(str, \"src=\\\"\/\", \"src=\\\"\"+link+\"\/\", -1)\n\treturn str\n}\n\nfunc ParseDate(t string) (then time.Time) {\n\n\tif len(t) >= 25 {\n\t\tif strings.HasSuffix(t, \"0000\") {\n\t\t\tthen, _ = time.Parse(\"Mon, 02 Jan 2006 15:04:05 +0000\", t)\n\t\t} else if strings.HasSuffix(t, \"GMT\") {\n\t\t\tthen, _ = time.Parse(\"Mon, 02 Jan 2006 15:04:05 GMT\", t)\n\t\t} else if strings.HasSuffix(t, \"UTC\") {\n\t\t\tthen, _ = time.Parse(\"Mon, 02 Jan 2006 15:04:05 UTC\", t)\n\t\t} else if strings.HasSuffix(t, \"CST\") {\n\t\t\tthen, _ = time.Parse(\"Mon, 02 Jan 2006 15:04:05 CST\", t)\n\t\t} else if strings.HasSuffix(t, \"0400\") {\n\t\t\tthen, _ = time.Parse(\"Mon, 02 Jan 2006 15:04:05 -0400\", t)\n\t\t} else if strings.HasSuffix(t, \"Z\") {\n\t\t\tthen, _ = time.Parse(time.RFC3339, t)\n\t\t} else if strings.HasSuffix(t, \"0800\") {\n\t\t\tthen, _ = time.Parse(\"Mon, 02 Jan 2006 15:04:05 +0800\", t)\n\t\t}\n\t} else {\n\t\tif strings.HasSuffix(t, \"0000\") {\n\t\t\tthen, _ = time.Parse(\"02 Jan 06 15:04 +0000\", t)\n\t\t} else if strings.HasSuffix(t, \"GMT\") {\n\t\t\tthen, _ = time.Parse(\"02 Jan 06 15:04 GMT\", t)\n\t\t} else if strings.HasSuffix(t, \"UTC\") {\n\t\t\tthen, _ = time.Parse(\"02 Jan 06 15:04 UTC\", t)\n\t\t} else if strings.HasSuffix(t, \"CST\") {\n\t\t\tthen, _ = time.Parse(\"02 Jan 06 15:04 CST\", t)\n\t\t} else if strings.HasSuffix(t, \"0400\") {\n\t\t\tthen, _ = time.Parse(\"02 Jan 06 15:04 -0400\", t)\n\t\t} else if strings.HasSuffix(t, \"Z\") {\n\t\t\tthen, _ = time.Parse(time.RFC3339, t)\n\t\t} else if strings.HasSuffix(t, \"0800\") {\n\t\t\tthen, _ = time.Parse(\"02 Jan 06 15:04 +0800\", t)\n\t\t}\n\t}\n\treturn then\n}\n<commit_msg>Upsert feed with feedurl<commit_after>package controllers\n\nimport (\n\n\t\/\/ \"encoding\/json\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/looyun\/feedall\/models\"\n\t\"github.com\/mmcdole\/gofeed\"\n\n\t\"gopkg.in\/macaron.v1\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\nvar TelegramBotToken string = \"123456\"\n\nfunc InsertFeedAndUpdateItems(feedurl string) error {\n\t\/\/ parse feed url\n\tfp := gofeed.NewParser()\n\tparsedFeed, err := fp.ParseURL(feedurl)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbsonFeed, err := bson.Marshal(parsedFeed)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Insert feed\n\tfeed := &models.Feed{}\n\terr = bson.Unmarshal(bsonFeed, &feed)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfeed.FeedURL = feedurl\n\tfeed.ID = bson.NewObjectId()\n\t_, err = models.Upsert(models.Feeds,\n\t\tbson.M{\"feedURL\": feedurl},\n\t\tfeed)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Update items\n\tvar data struct {\n\t\tItems []models.Item `bson:\"items\"`\n\t}\n\terr = bson.Unmarshal(bsonFeed, &data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, item := range data.Items {\n\t\titem.FeedID = feed.ID\n\t\t_, err := models.Upsert(models.Items,\n\t\t\tbson.M{\n\t\t\t\t\"$and\": []bson.M{\n\t\t\t\t\tbson.M{\"feedID\": feed.ID}, bson.M{\"link\": item.Link}}},\n\t\t\titem)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc UpdateItems(feed models.Feed) error {\n\n\tfeedurl := feed.FeedURL\n\n\t\/\/ parse feed url\n\tfp := gofeed.NewParser()\n\tparsedFeed, err := fp.ParseURL(feedurl)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbsonFeed, err := bson.Marshal(parsedFeed)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar data struct {\n\t\tItems []models.Item `bson:\"items\"`\n\t}\n\terr = bson.Unmarshal(bsonFeed, &data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, item := range data.Items {\n\t\titem.FeedID = feed.ID\n\t\t_, err := models.Upsert(models.Items,\n\t\t\tbson.M{\n\t\t\t\t\"$and\": []bson.M{\n\t\t\t\t\tbson.M{\"feedID\": feed.ID}, bson.M{\"link\": item.Link}}},\n\t\t\titem)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ GetFeeds get feeds sort by subscribeCount.\nfunc GetFeeds(c *macaron.Context) interface{} {\n\n\tpage := c.QueryInt(\"page\")\n\tif page > 0 {\n\t\tpage--\n\t}\n\tperPage := c.QueryInt(\"per_page\")\n\tif perPage == 0 {\n\t\tperPage = 30\n\t}\n\tif perPage > 100 {\n\t\tperPage = 100\n\t}\n\tfeeds := []bson.M{}\n\terr := models.Feeds.Find(bson.M{}).Sort(\"-subscribeCount\").Skip(page * perPage).Limit(perPage).All(&feeds)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn feeds\n}\n\n\/\/ GetFeeds get feeds sort by subscribeCount.\nfunc GetFeedItems(c *macaron.Context) interface{} {\n\n\tpage := c.QueryInt(\"page\")\n\tif page > 0 {\n\t\tpage--\n\t}\n\tperPage := c.QueryInt(\"per_page\")\n\tif perPage == 0 {\n\t\tperPage = 30\n\t}\n\tif perPage > 100 {\n\t\tperPage = 100\n\t}\n\n\tfeedID := c.Params(\":id\")\n\n\titems := []bson.M{}\n\terr := models.Items.Find(bson.M{\"feedID\": feedID}).Sort(\"-publishedParsed\").Skip(perPage * page).Limit(perPage).All(&items)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn items\n}\nfunc GetFeed(c *macaron.Context) interface{} {\n\tid := c.Params(\":id\")\n\tfeed := bson.M{}\n\terr := models.FindOne(models.Feeds,\n\t\tbson.M{\"_id\": id},\n\t\t&feed)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treturn feed\n}\n\nfunc GetItems(c *macaron.Context, n int) interface{} {\n\tif n > 100 {\n\t\treturn nil\n\t}\n\titems := []bson.M{}\n\terr := models.FindSortLimit(models.Items,\n\t\tbson.M{}, \"-starCount\", n,\n\t\t&items)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn items\n}\nfunc GetItem(c *macaron.Context) interface{} {\n\tid := c.Params(\":id\")\n\titem := bson.M{}\n\terr := models.FindOne(models.Items,\n\t\tbson.M{\"_id\": id},\n\t\t&item)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn item\n\n}\n\nfunc GetRandomItem(c *macaron.Context, n int) interface{} {\n\tif n > 100 {\n\t\treturn nil\n\t}\n\titems := []bson.M{}\n\terr := models.PipeAll(models.Items, []bson.M{{\"$sample\": bson.M{\"size\": n}}},\n\t\t&items)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn items\n\n}\nfunc GetLatestItem(c *macaron.Context, n int) interface{} {\n\tif n > 100 {\n\t\treturn nil\n\t}\n\titems := []bson.M{}\n\terr := models.FindSortLimit(models.Items,\n\t\tbson.M{}, \"-publishedParsed\", n,\n\t\t&items)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn items\n\n}\n\nfunc StandarURL(s string) string {\n\tif !strings.HasSuffix(s, \"\/\") {\n\t\ts = s + \"\/\"\n\t}\n\treturn s\n}\n\nfunc DecodeImg(str string, link string) string {\n\tstr = strings.Replace(str, \""\", \"\\\"\", -1)\n\tstr = strings.Replace(str, \"src=\\\"\/\", \"src=\\\"\"+link+\"\/\", -1)\n\treturn str\n}\n\nfunc ParseDate(t string) (then time.Time) {\n\n\tif len(t) >= 25 {\n\t\tif strings.HasSuffix(t, \"0000\") {\n\t\t\tthen, _ = time.Parse(\"Mon, 02 Jan 2006 15:04:05 +0000\", t)\n\t\t} else if strings.HasSuffix(t, \"GMT\") {\n\t\t\tthen, _ = time.Parse(\"Mon, 02 Jan 2006 15:04:05 GMT\", t)\n\t\t} else if strings.HasSuffix(t, \"UTC\") {\n\t\t\tthen, _ = time.Parse(\"Mon, 02 Jan 2006 15:04:05 UTC\", t)\n\t\t} else if strings.HasSuffix(t, \"CST\") {\n\t\t\tthen, _ = time.Parse(\"Mon, 02 Jan 2006 15:04:05 CST\", t)\n\t\t} else if strings.HasSuffix(t, \"0400\") {\n\t\t\tthen, _ = time.Parse(\"Mon, 02 Jan 2006 15:04:05 -0400\", t)\n\t\t} else if strings.HasSuffix(t, \"Z\") {\n\t\t\tthen, _ = time.Parse(time.RFC3339, t)\n\t\t} else if strings.HasSuffix(t, \"0800\") {\n\t\t\tthen, _ = time.Parse(\"Mon, 02 Jan 2006 15:04:05 +0800\", t)\n\t\t}\n\t} else {\n\t\tif strings.HasSuffix(t, \"0000\") {\n\t\t\tthen, _ = time.Parse(\"02 Jan 06 15:04 +0000\", t)\n\t\t} else if strings.HasSuffix(t, \"GMT\") {\n\t\t\tthen, _ = time.Parse(\"02 Jan 06 15:04 GMT\", t)\n\t\t} else if strings.HasSuffix(t, \"UTC\") {\n\t\t\tthen, _ = time.Parse(\"02 Jan 06 15:04 UTC\", t)\n\t\t} else if strings.HasSuffix(t, \"CST\") {\n\t\t\tthen, _ = time.Parse(\"02 Jan 06 15:04 CST\", t)\n\t\t} else if strings.HasSuffix(t, \"0400\") {\n\t\t\tthen, _ = time.Parse(\"02 Jan 06 15:04 -0400\", t)\n\t\t} else if strings.HasSuffix(t, \"Z\") {\n\t\t\tthen, _ = time.Parse(time.RFC3339, t)\n\t\t} else if strings.HasSuffix(t, \"0800\") {\n\t\t\tthen, _ = time.Parse(\"02 Jan 06 15:04 +0800\", t)\n\t\t}\n\t}\n\treturn then\n}\n<|endoftext|>"} {"text":"<commit_before>package controllers\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"regexp\"\n\n\t\"github.com\/UniversityRadioYork\/2016-site\/models\"\n\t\"github.com\/UniversityRadioYork\/2016-site\/structs\"\n\t\"github.com\/UniversityRadioYork\/2016-site\/utils\"\n\t\"github.com\/UniversityRadioYork\/myradio-go\"\n)\n\n\/\/ SignUpController is the controller for processing signup requests.\ntype SignUpController struct {\n\tController\n}\n\n\/\/ NewSignUpController returns a new SignUpController with the MyRadio\n\/\/ session s and configuration context c.\nfunc NewSignUpController(s *myradio.Session, c *structs.Config) *SignUpController {\n\treturn &SignUpController{Controller{session: s, config: c}}\n}\n\n\/\/ Post handles the HTTP POST request r for the get involved, writing to w.\nfunc (gic *SignUpController) Post(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\tformParams := r.Form\n\tvar feedback []string\n\n\t\/\/Validate that necessary params are present and correct(enough)\n\t_, ok := formParams[\"fname\"]\n\tif !ok || formParams[\"fname\"][0] == \"\" {\n\t\tfeedback = append(feedback, \"You need to provide your First Name\")\n\t}\n\t_, ok = formParams[\"sname\"]\n\tif !ok || formParams[\"sname\"][0] == \"\" {\n\t\tfeedback = append(feedback, \"You need to provide your Last Name\")\n\t}\n\t\/\/ Check an eduroam value is submitted\n\t\/\/ If not then the user is signing up using a personal email\n\tif _, ok := formParams[\"eduroam\"]; ok {\n\t\tif formParams[\"eduroam\"][0] == \"\" {\n\t\t\tfeedback = append(feedback, \"You need to provide your York Email\")\n\t\t} else {\n\t\t\tmatch, _ := regexp.MatchString(\"^[a-z]{1,6}[0-9]{1,6}$\", formParams[\"eduroam\"][0])\n\t\t\tif !match {\n\t\t\t\tfeedback = append(feedback, \"The @york.ac.uk email you provided seems invalid\")\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif _, ok = formParams[\"email\"]; !ok {\n\t\t\tfeedback = append(feedback, \"You need to provide your email address\")\n\t\t}\n\t}\n\t_, ok = formParams[\"phone\"]\n\tif !ok || formParams[\"phone\"][0] == \"\" {\n\t\tdelete(formParams, \"phone\")\n\t}\n\n\t\/\/If they are then post them off to the API\n\tif len(feedback) == 0 {\n\t\tsm := models.NewSignUpModel(gic.session)\n\t\terr := sm.Post(formParams)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tfeedback = append(feedback, \"Oops. Something went wrong on our end.\")\n\t\t\tfeedback = append(feedback, \"Please try again later\")\n\t\t}\n\t}\n\n\tdata := struct {\n\t\tFeedback []string\n\t}{\n\t\tFeedback: feedback,\n\t}\n\n\terr := utils.RenderTemplate(w, gic.config.PageContext, data, \"signedup.tmpl\")\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n}\n<commit_msg>Allow @york.ac.uk to be entered even though we assume it<commit_after>package controllers\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/UniversityRadioYork\/2016-site\/models\"\n\t\"github.com\/UniversityRadioYork\/2016-site\/structs\"\n\t\"github.com\/UniversityRadioYork\/2016-site\/utils\"\n\t\"github.com\/UniversityRadioYork\/myradio-go\"\n)\n\n\/\/ SignUpController is the controller for processing signup requests.\ntype SignUpController struct {\n\tController\n}\n\n\/\/ NewSignUpController returns a new SignUpController with the MyRadio\n\/\/ session s and configuration context c.\nfunc NewSignUpController(s *myradio.Session, c *structs.Config) *SignUpController {\n\treturn &SignUpController{Controller{session: s, config: c}}\n}\n\n\/\/ Post handles the HTTP POST request r for the get involved, writing to w.\nfunc (gic *SignUpController) Post(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\tformParams := r.Form\n\tvar feedback []string\n\n\t\/\/Validate that necessary params are present and correct(enough)\n\t_, ok := formParams[\"fname\"]\n\tif !ok || formParams[\"fname\"][0] == \"\" {\n\t\tfeedback = append(feedback, \"You need to provide your First Name\")\n\t}\n\t_, ok = formParams[\"sname\"]\n\tif !ok || formParams[\"sname\"][0] == \"\" {\n\t\tfeedback = append(feedback, \"You need to provide your Last Name\")\n\t}\n\t\/\/ Check an eduroam value is submitted\n\t\/\/ If not then the user is signing up using a personal email\n\tif _, ok := formParams[\"eduroam\"]; ok {\n\t\teduroam := formParams[\"eduroam\"][0]\n\t\tif eduroam == \"\" {\n\t\t\tfeedback = append(feedback, \"You need to provide your York Email\")\n\t\t} else {\n\t\t\t\/\/ Ignore an added @york.ac.uk (since we assume it)\n\t\t\tif strings.HasSuffix(eduroam, \"@york.ac.uk\"){\n\t\t\t\teduroam = eduroam[:len(eduroam)-11]\n\t\t\t}\n\t\t\tlog.Println(eduroam)\n\t\t\tmatch, _ := regexp.MatchString(\"^[a-z]{1,6}[0-9]{1,6}$\", eduroam)\n\t\t\tif !match {\n\t\t\t\tfeedback = append(feedback, \"The @york.ac.uk email you provided seems invalid\")\n\t\t\t}\n\t\t\tformParams[\"eduroam\"][0] = eduroam\n\t\t}\n\t} else {\n\t\tif _, ok = formParams[\"email\"]; !ok {\n\t\t\tfeedback = append(feedback, \"You need to provide your email address\")\n\t\t}\n\t}\n\t_, ok = formParams[\"phone\"]\n\tif !ok || formParams[\"phone\"][0] == \"\" {\n\t\tdelete(formParams, \"phone\")\n\t}\n\n\t\/\/If they are then post them off to the API\n\tif len(feedback) == 0 {\n\t\tsm := models.NewSignUpModel(gic.session)\n\t\terr := sm.Post(formParams)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tfeedback = append(feedback, \"Oops. Something went wrong on our end.\")\n\t\t\tfeedback = append(feedback, \"Please try again later\")\n\t\t}\n\t}\n\n\tdata := struct {\n\t\tFeedback []string\n\t}{\n\t\tFeedback: feedback,\n\t}\n\n\terr := utils.RenderTemplate(w, gic.config.PageContext, data, \"signedup.tmpl\")\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Only files including IPv4, IPv6, and Location (in english)\n\/\/ will be read and parsed into lists.\npackage parser\n\nimport (\n\t\"encoding\/binary\"\n\t\"encoding\/csv\"\n\t\"errors\"\n\t\"io\"\n\t\"log\"\n\t\"math\"\n\t\"net\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\tipNumColumnsGlite2 = 10\n\tlocationNumColumnsGlite2 = 13\n\tgLite2Prefix = \"GeoLite2-City\"\n\n\tipNumColumnsGlite1 = 3\n\tlocationNumColumnsGlite1 = 9\n\tgLite1Prefix = \"GeoLiteCity\"\n\n\tmapMax = 200000\n)\n\n\/\/ IPNode defines IPv4 and IPv6 databases\ntype IPNode struct {\n\tIPAddressLow net.IP\n\tIPAddressHigh net.IP\n\tLocationIndex int \/\/ Index to slice of locations\n\tPostalCode string\n\tLatitude float64\n\tLongitude float64\n}\n\n\/\/ LocationNode defines Location databases\ntype LocationNode struct {\n\tGeonameID int\n\tContinentCode string\n\tCountryCode string\n\tCountryName string\n\tMetroCode int64\n\tCityName string\n}\n\n\/\/ Creates a List of IPNodes\nfunc CreateIPList(reader io.Reader, idMap map[int]int, file string) ([]IPNode, error) {\n\tg1IP := []string{\"startIpNum\", \"endIpNum\", \"locId\"}\n\tlist := []IPNode{}\n\tr := csv.NewReader(reader)\n\t\/\/ Skip first line\n\ttitle, err := r.Read()\n\tif err == io.EOF {\n\t\tlog.Println(\"Empty input data\")\n\t\treturn nil, errors.New(\"Empty input data\")\n\t}\n\tswitch {\n\tcase strings.HasPrefix(file, gLite1Prefix):\n\t\t\/\/ Skip 2nd line\n\t\ttitle, err = r.Read()\n\t\tif err == io.EOF {\n\t\t\tlog.Println(\"Empty input data\")\n\t\t\treturn nil, errors.New(\"Empty input data\")\n\t\t}\n\t\tif !reflect.DeepEqual(g1IP, title) {\n\t\t\tlog.Println(\"Improper data format got: \", title, \" wanted: \", g1IP)\n\t\t\treturn nil, errors.New(\"Improper data format\")\n\t\t}\n\t\tfor {\n\t\t\t\/\/ Example:\n\t\t\t\/\/ GLite1 : record = [16777216,16777471,17]\n\t\t\trecord, err := r.Read()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\terr = checkColumnLength(record, ipNumColumnsGlite1)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tvar newNode IPNode\n\t\t\tnewNode.IPAddressLow, err = Int2ip(record[0])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tnewNode.IPAddressHigh, err = Int2ip(record[1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t\/\/ Look for GeoId within idMap and return index\n\t\t\tindex, err := lookupGeoId(record[2], idMap)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tnewNode.LocationIndex = index\n\t\t\tlist = append(list, newNode)\n\t\t}\n\tcase strings.HasPrefix(file, gLite2Prefix):\n\t\tfor {\n\t\t\t\/\/ Example:\n\t\t\t\/\/ GLite2 : record = [2a04:97c0::\/29,2658434,2658434,0,0,47,8,100]\n\t\t\trecord, err := r.Read()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvar newNode IPNode\n\t\t\terr = checkColumnLength(record, ipNumColumnsGlite2)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlowIp, highIp, err := RangeCIDR(record[0])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tnewNode.IPAddressLow = lowIp\n\t\t\tnewNode.IPAddressHigh = highIp\n\t\t\t\/\/ Look for GeoId within idMap and return index\n\t\t\tindex, err := lookupGeoId(record[1], idMap)\n\t\t\tif err != nil {\n\t\t\t\tindex, err = lookupGeoId(record[2], idMap)\n\t\t\t}\n\t\t\tnewNode.LocationIndex = index\n\t\t\tnewNode.PostalCode = record[6]\n\t\t\tnewNode.Latitude, err = stringToFloat(record[7], \"Latitude\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tnewNode.Longitude, err = stringToFloat(record[8], \"Longitude\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlist = append(list, newNode)\n\t\t}\n\tdefault:\n\t\tlog.Println(\"Unaccepted csv file provided: \", file)\n\t\treturn list, errors.New(\"Unaccepted csv file provided\")\n\t}\n\treturn list, nil\n}\n\n\/\/ Verify column length\nfunc checkColumnLength(record []string, size int) error {\n\tif len(record) != size {\n\t\tlog.Println(\"Incorrect number of columns in IP list\", size, \" got: \", len(record), record)\n\t\treturn errors.New(\"Corrupted Data: wrong number of columns\")\n\t}\n\treturn nil\n}\n\n\/\/ Converts integer to net.IPv4\nfunc Int2ip(str string) (net.IP, error) {\n\tnum, err := strconv.Atoi(str)\n\tif err != nil {\n\t\tlog.Println(\"Provided IP should be a number\")\n\t\treturn nil, errors.New(\"Inputed string cannot be converted to a number\")\n\t}\n\tft := float64(num)\n\tif ft > math.Pow(2, 32) || num < 1 {\n\t\tlog.Println(\"Provided IP should be in the range of 0.0.0.1 and 255.255.255.255 \", str)\n\t}\n\tip := make(net.IP, 4)\n\tbinary.BigEndian.PutUint32(ip, uint32(num))\n\tip = net.IPv4(ip[0], ip[1], ip[2], ip[3])\n\treturn ip, nil\n}\n\n\/\/ Finds the smallest and largest net.IP from a CIDR range\n\/\/ Example: \"1.0.0.0\/24\" -> 1.0.0.0 , 1.0.0.255\nfunc RangeCIDR(cidr string) (net.IP, net.IP, error) {\n\tip, ipnet, err := net.ParseCIDR(cidr)\n\tif err != nil {\n\t\treturn nil, nil, errors.New(\"Invalid CIDR IP range\")\n\t}\n\tlowIp := make(net.IP, len(ip))\n\tcopy(lowIp, ip)\n\tmask := ipnet.Mask\n\tfor x, _ := range ip {\n\t\tif len(mask) == 4 {\n\t\t\tif x < 12 {\n\t\t\t\tip[x] |= 0\n\t\t\t} else {\n\t\t\t\tip[x] |= ^mask[x-12]\n\t\t\t}\n\t\t} else {\n\t\t\tip[x] |= ^mask[x]\n\t\t}\n\t}\n\treturn lowIp, ip, nil\n}\n\n\/\/ Finds provided geonameID within idMap and returns the index in idMap\n\/\/ locationIdMap := map[int]int{\n\/\/\t609013: 0,\n\/\/\t104084: 4,\n\/\/\t17: 4,\n\/\/ }\n\/\/ lookupGeoId(\"17\",locationIdMap) would return (2,nil).\n\/\/ TODO: Add error metrics\nfunc lookupGeoId(gnid string, idMap map[int]int) (int, error) {\n\tgeonameId, err := strconv.Atoi(gnid)\n\tif err != nil {\n\t\tlog.Println(\"geonameID should be a number\")\n\t\treturn 0, errors.New(\"Corrupted Data: geonameID should be a number\")\n\t}\n\tloadIndex, ok := idMap[geonameId]\n\tif !ok {\n\t\tlog.Println(\"geonameID not found \", geonameId)\n\t\treturn 0, errors.New(\"Corrupted Data: geonameId not found\")\n\t}\n\treturn loadIndex, nil\n}\n\nfunc stringToFloat(str, field string) (float64, error) {\n\tflt, err := strconv.ParseFloat(str, 64)\n\tif err != nil {\n\t\tif len(str) > 0 {\n\t\t\tlog.Println(field, \" was not a number\")\n\t\t\toutput := strings.Join([]string{\"Corrupted Data: \", field, \" should be an int\"}, \"\")\n\t\t\treturn 0, errors.New(output)\n\t\t}\n\t}\n\treturn flt, nil\n}\n\nfunc checkAllCaps(str, field string) (string, error) {\n\tmatch, _ := regexp.MatchString(\"^[A-Z]*$\", str)\n\tif match {\n\t\treturn str, nil\n\t} else {\n\t\tlog.Println(field, \"should be all capitals and no numbers\")\n\t\toutput := strings.Join([]string{\"Corrupted Data: \", field, \" should be all caps\"}, \"\")\n\t\treturn \"\", errors.New(output)\n\n\t}\n}\n\n\/\/ Creates list for location databases\n\/\/ returns list with location data and a hashmap with index to geonameId\nfunc CreateLocationList(reader io.Reader) ([]LocationNode, map[int]int, error) {\n\tidMap := make(map[int]int, mapMax)\n\tlist := []LocationNode{}\n\tr := csv.NewReader(reader)\n\tr.TrimLeadingSpace = true\n\t\/\/ Skip the first line\n\t_, err := r.Read()\n\tif err == io.EOF {\n\t\tlog.Println(\"Empty input data\")\n\t\treturn nil, nil, errors.New(\"Empty input data\")\n\t}\n\tif err != nil {\n\t\tlog.Println(\"Error reading file\")\n\t\treturn nil, nil, errors.New(\"Error reading file\")\n\t}\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif len(record) != locationNumColumnsGlite2 {\n\t\t\tlog.Println(\"Incorrect number of columns in Location list\\n\\twanted: \", locationNumColumnsGlite2, \" got: \", len(record), record)\n\t\t\treturn nil, nil, errors.New(\"Corrupted Data: wrong number of columns\")\n\t\t}\n\t\tvar newNode LocationNode\n\t\tnewNode.GeonameID, err = strconv.Atoi(record[0])\n\t\tif err != nil {\n\t\t\tif len(record[0]) > 0 {\n\t\t\t\tlog.Println(\"GeonameID should be a number \", record[0])\n\t\t\t\treturn nil, nil, errors.New(\"Corrupted Data: GeonameID should be a number\")\n\t\t\t}\n\t\t}\n\t\tnewNode.ContinentCode, err = checkAllCaps(record[2], \"Continent code\")\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tnewNode.CountryCode, err = checkAllCaps(record[4], \"Country code\")\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tmatch, _ := regexp.MatchString(`^[^0-9]*$`, record[5])\n\t\tif match {\n\t\t\tnewNode.CountryName = record[5]\n\t\t} else {\n\t\t\tlog.Println(\"Country name should be letters only : \", record[5])\n\t\t\treturn nil, nil, errors.New(\"Corrupted Data: country name should be letters\")\n\t\t}\n\t\tnewNode.MetroCode, err = strconv.ParseInt(record[11], 10, 64)\n\t\tif err != nil {\n\t\t\tif len(record[11]) > 0 {\n\t\t\t\tlog.Println(\"MetroCode should be a number\")\n\t\t\t\treturn nil, nil, errors.New(\"Corrupted Data: metrocode should be a number\")\n\t\t\t}\n\t\t}\n\t\tnewNode.CityName = record[10]\n\t\tlist = append(list, newNode)\n\t\tidMap[newNode.GeonameID] = len(list) - 1\n\t}\n\treturn list, idMap, nil\n}\n\n\/\/ Returns nil if two nodes are equal\n\/\/ Used by the search package\nfunc IsEqualIPNodes(expected, node IPNode) error {\n\tif !((node.IPAddressLow).Equal(expected.IPAddressLow)) {\n\t\toutput := strings.Join([]string{\"IPAddress Low inconsistent\\ngot:\", node.IPAddressLow.String(), \" \\nwanted:\", expected.IPAddressLow.String()}, \"\")\n\t\tlog.Println(output)\n\t\treturn errors.New(output)\n\t}\n\tif !((node.IPAddressHigh).Equal(expected.IPAddressHigh)) {\n\t\toutput := strings.Join([]string{\"IPAddressHigh inconsistent\\ngot:\", node.IPAddressHigh.String(), \" \\nwanted:\", expected.IPAddressHigh.String()}, \"\")\n\t\tlog.Println(output)\n\t\treturn errors.New(output)\n\t}\n\tif node.LocationIndex != expected.LocationIndex {\n\t\toutput := strings.Join([]string{\"LocationIndex inconsistent\\ngot:\", strconv.Itoa(node.LocationIndex), \" \\nwanted:\", strconv.Itoa(expected.LocationIndex)}, \"\")\n\t\tlog.Println(output)\n\t\treturn errors.New(output)\n\t}\n\tif node.PostalCode != expected.PostalCode {\n\t\toutput := strings.Join([]string{\"PostalCode inconsistent\\ngot:\", node.PostalCode, \" \\nwanted:\", expected.PostalCode}, \"\")\n\t\tlog.Println(output)\n\t\treturn errors.New(output)\n\t}\n\tif node.Latitude != expected.Latitude {\n\t\toutput := strings.Join([]string{\"Latitude inconsistent\\ngot:\", floatToString(node.Latitude), \" \\nwanted:\", floatToString(expected.Latitude)}, \"\")\n\t\tlog.Println(output)\n\t\treturn errors.New(output)\n\t}\n\tif node.Longitude != expected.Longitude {\n\t\toutput := strings.Join([]string{\"Longitude inconsistent\\ngot:\", floatToString(node.Longitude), \" \\nwanted:\", floatToString(expected.Longitude)}, \"\")\n\t\tlog.Println(output)\n\t\treturn errors.New(output)\n\t}\n\treturn nil\n}\nfunc floatToString(num float64) string {\n\treturn strconv.FormatFloat(num, 'f', 6, 64)\n}\n<commit_msg>clarify comment<commit_after>\/\/ Only files including IPv4, IPv6, and Location (in english)\n\/\/ will be read and parsed into lists.\npackage parser\n\nimport (\n\t\"encoding\/binary\"\n\t\"encoding\/csv\"\n\t\"errors\"\n\t\"io\"\n\t\"log\"\n\t\"math\"\n\t\"net\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\tipNumColumnsGlite2 = 10\n\tlocationNumColumnsGlite2 = 13\n\tgLite2Prefix = \"GeoLite2-City\"\n\n\tipNumColumnsGlite1 = 3\n\tlocationNumColumnsGlite1 = 9\n\tgLite1Prefix = \"GeoLiteCity\"\n\n\tmapMax = 200000\n)\n\n\/\/ IPNode defines IPv4 and IPv6 databases\ntype IPNode struct {\n\tIPAddressLow net.IP\n\tIPAddressHigh net.IP\n\tLocationIndex int \/\/ Index to slice of locations\n\tPostalCode string\n\tLatitude float64\n\tLongitude float64\n}\n\n\/\/ LocationNode defines Location databases\ntype LocationNode struct {\n\tGeonameID int\n\tContinentCode string\n\tCountryCode string\n\tCountryName string\n\tMetroCode int64\n\tCityName string\n}\n\n\/\/ Creates a List of IPNodes\nfunc CreateIPList(reader io.Reader, idMap map[int]int, file string) ([]IPNode, error) {\n\tg1IP := []string{\"startIpNum\", \"endIpNum\", \"locId\"}\n\tlist := []IPNode{}\n\tr := csv.NewReader(reader)\n\t\/\/ Skip first line\n\ttitle, err := r.Read()\n\tif err == io.EOF {\n\t\tlog.Println(\"Empty input data\")\n\t\treturn nil, errors.New(\"Empty input data\")\n\t}\n\tswitch {\n\tcase strings.HasPrefix(file, gLite1Prefix):\n\t\t\/\/ Skip 2nd line, which contains column labels\n\t\ttitle, err = r.Read()\n\t\tif err == io.EOF {\n\t\t\tlog.Println(\"Empty input data\")\n\t\t\treturn nil, errors.New(\"Empty input data\")\n\t\t}\n\t\tif !reflect.DeepEqual(g1IP, title) {\n\t\t\tlog.Println(\"Improper data format got: \", title, \" wanted: \", g1IP)\n\t\t\treturn nil, errors.New(\"Improper data format\")\n\t\t}\n\t\tfor {\n\t\t\t\/\/ Example:\n\t\t\t\/\/ GLite1 : record = [16777216,16777471,17]\n\t\t\trecord, err := r.Read()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\terr = checkColumnLength(record, ipNumColumnsGlite1)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tvar newNode IPNode\n\t\t\tnewNode.IPAddressLow, err = Int2ip(record[0])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tnewNode.IPAddressHigh, err = Int2ip(record[1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t\/\/ Look for GeoId within idMap and return index\n\t\t\tindex, err := lookupGeoId(record[2], idMap)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tnewNode.LocationIndex = index\n\t\t\tlist = append(list, newNode)\n\t\t}\n\tcase strings.HasPrefix(file, gLite2Prefix):\n\t\tfor {\n\t\t\t\/\/ Example:\n\t\t\t\/\/ GLite2 : record = [2a04:97c0::\/29,2658434,2658434,0,0,47,8,100]\n\t\t\trecord, err := r.Read()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvar newNode IPNode\n\t\t\terr = checkColumnLength(record, ipNumColumnsGlite2)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlowIp, highIp, err := RangeCIDR(record[0])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tnewNode.IPAddressLow = lowIp\n\t\t\tnewNode.IPAddressHigh = highIp\n\t\t\t\/\/ Look for GeoId within idMap and return index\n\t\t\tindex, err := lookupGeoId(record[1], idMap)\n\t\t\tif err != nil {\n\t\t\t\tindex, err = lookupGeoId(record[2], idMap)\n\t\t\t}\n\t\t\tnewNode.LocationIndex = index\n\t\t\tnewNode.PostalCode = record[6]\n\t\t\tnewNode.Latitude, err = stringToFloat(record[7], \"Latitude\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tnewNode.Longitude, err = stringToFloat(record[8], \"Longitude\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlist = append(list, newNode)\n\t\t}\n\tdefault:\n\t\tlog.Println(\"Unaccepted csv file provided: \", file)\n\t\treturn list, errors.New(\"Unaccepted csv file provided\")\n\t}\n\treturn list, nil\n}\n\n\/\/ Verify column length\nfunc checkColumnLength(record []string, size int) error {\n\tif len(record) != size {\n\t\tlog.Println(\"Incorrect number of columns in IP list\", size, \" got: \", len(record), record)\n\t\treturn errors.New(\"Corrupted Data: wrong number of columns\")\n\t}\n\treturn nil\n}\n\n\/\/ Converts integer to net.IPv4\nfunc Int2ip(str string) (net.IP, error) {\n\tnum, err := strconv.Atoi(str)\n\tif err != nil {\n\t\tlog.Println(\"Provided IP should be a number\")\n\t\treturn nil, errors.New(\"Inputed string cannot be converted to a number\")\n\t}\n\tft := float64(num)\n\tif ft > math.Pow(2, 32) || num < 1 {\n\t\tlog.Println(\"Provided IP should be in the range of 0.0.0.1 and 255.255.255.255 \", str)\n\t}\n\tip := make(net.IP, 4)\n\tbinary.BigEndian.PutUint32(ip, uint32(num))\n\tip = net.IPv4(ip[0], ip[1], ip[2], ip[3])\n\treturn ip, nil\n}\n\n\/\/ Finds the smallest and largest net.IP from a CIDR range\n\/\/ Example: \"1.0.0.0\/24\" -> 1.0.0.0 , 1.0.0.255\nfunc RangeCIDR(cidr string) (net.IP, net.IP, error) {\n\tip, ipnet, err := net.ParseCIDR(cidr)\n\tif err != nil {\n\t\treturn nil, nil, errors.New(\"Invalid CIDR IP range\")\n\t}\n\tlowIp := make(net.IP, len(ip))\n\tcopy(lowIp, ip)\n\tmask := ipnet.Mask\n\tfor x, _ := range ip {\n\t\tif len(mask) == 4 {\n\t\t\tif x < 12 {\n\t\t\t\tip[x] |= 0\n\t\t\t} else {\n\t\t\t\tip[x] |= ^mask[x-12]\n\t\t\t}\n\t\t} else {\n\t\t\tip[x] |= ^mask[x]\n\t\t}\n\t}\n\treturn lowIp, ip, nil\n}\n\n\/\/ Finds provided geonameID within idMap and returns the index in idMap\n\/\/ locationIdMap := map[int]int{\n\/\/\t609013: 0,\n\/\/\t104084: 4,\n\/\/\t17: 4,\n\/\/ }\n\/\/ lookupGeoId(\"17\",locationIdMap) would return (2,nil).\n\/\/ TODO: Add error metrics\nfunc lookupGeoId(gnid string, idMap map[int]int) (int, error) {\n\tgeonameId, err := strconv.Atoi(gnid)\n\tif err != nil {\n\t\tlog.Println(\"geonameID should be a number\")\n\t\treturn 0, errors.New(\"Corrupted Data: geonameID should be a number\")\n\t}\n\tloadIndex, ok := idMap[geonameId]\n\tif !ok {\n\t\tlog.Println(\"geonameID not found \", geonameId)\n\t\treturn 0, errors.New(\"Corrupted Data: geonameId not found\")\n\t}\n\treturn loadIndex, nil\n}\n\nfunc stringToFloat(str, field string) (float64, error) {\n\tflt, err := strconv.ParseFloat(str, 64)\n\tif err != nil {\n\t\tif len(str) > 0 {\n\t\t\tlog.Println(field, \" was not a number\")\n\t\t\toutput := strings.Join([]string{\"Corrupted Data: \", field, \" should be an int\"}, \"\")\n\t\t\treturn 0, errors.New(output)\n\t\t}\n\t}\n\treturn flt, nil\n}\n\nfunc checkAllCaps(str, field string) (string, error) {\n\tmatch, _ := regexp.MatchString(\"^[A-Z]*$\", str)\n\tif match {\n\t\treturn str, nil\n\t} else {\n\t\tlog.Println(field, \"should be all capitals and no numbers\")\n\t\toutput := strings.Join([]string{\"Corrupted Data: \", field, \" should be all caps\"}, \"\")\n\t\treturn \"\", errors.New(output)\n\n\t}\n}\n\n\/\/ Creates list for location databases\n\/\/ returns list with location data and a hashmap with index to geonameId\nfunc CreateLocationList(reader io.Reader) ([]LocationNode, map[int]int, error) {\n\tidMap := make(map[int]int, mapMax)\n\tlist := []LocationNode{}\n\tr := csv.NewReader(reader)\n\tr.TrimLeadingSpace = true\n\t\/\/ Skip the first line\n\t_, err := r.Read()\n\tif err == io.EOF {\n\t\tlog.Println(\"Empty input data\")\n\t\treturn nil, nil, errors.New(\"Empty input data\")\n\t}\n\tif err != nil {\n\t\tlog.Println(\"Error reading file\")\n\t\treturn nil, nil, errors.New(\"Error reading file\")\n\t}\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif len(record) != locationNumColumnsGlite2 {\n\t\t\tlog.Println(\"Incorrect number of columns in Location list\\n\\twanted: \", locationNumColumnsGlite2, \" got: \", len(record), record)\n\t\t\treturn nil, nil, errors.New(\"Corrupted Data: wrong number of columns\")\n\t\t}\n\t\tvar newNode LocationNode\n\t\tnewNode.GeonameID, err = strconv.Atoi(record[0])\n\t\tif err != nil {\n\t\t\tif len(record[0]) > 0 {\n\t\t\t\tlog.Println(\"GeonameID should be a number \", record[0])\n\t\t\t\treturn nil, nil, errors.New(\"Corrupted Data: GeonameID should be a number\")\n\t\t\t}\n\t\t}\n\t\tnewNode.ContinentCode, err = checkAllCaps(record[2], \"Continent code\")\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tnewNode.CountryCode, err = checkAllCaps(record[4], \"Country code\")\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tmatch, _ := regexp.MatchString(`^[^0-9]*$`, record[5])\n\t\tif match {\n\t\t\tnewNode.CountryName = record[5]\n\t\t} else {\n\t\t\tlog.Println(\"Country name should be letters only : \", record[5])\n\t\t\treturn nil, nil, errors.New(\"Corrupted Data: country name should be letters\")\n\t\t}\n\t\tnewNode.MetroCode, err = strconv.ParseInt(record[11], 10, 64)\n\t\tif err != nil {\n\t\t\tif len(record[11]) > 0 {\n\t\t\t\tlog.Println(\"MetroCode should be a number\")\n\t\t\t\treturn nil, nil, errors.New(\"Corrupted Data: metrocode should be a number\")\n\t\t\t}\n\t\t}\n\t\tnewNode.CityName = record[10]\n\t\tlist = append(list, newNode)\n\t\tidMap[newNode.GeonameID] = len(list) - 1\n\t}\n\treturn list, idMap, nil\n}\n\n\/\/ Returns nil if two nodes are equal\n\/\/ Used by the search package\nfunc IsEqualIPNodes(expected, node IPNode) error {\n\tif !((node.IPAddressLow).Equal(expected.IPAddressLow)) {\n\t\toutput := strings.Join([]string{\"IPAddress Low inconsistent\\ngot:\", node.IPAddressLow.String(), \" \\nwanted:\", expected.IPAddressLow.String()}, \"\")\n\t\tlog.Println(output)\n\t\treturn errors.New(output)\n\t}\n\tif !((node.IPAddressHigh).Equal(expected.IPAddressHigh)) {\n\t\toutput := strings.Join([]string{\"IPAddressHigh inconsistent\\ngot:\", node.IPAddressHigh.String(), \" \\nwanted:\", expected.IPAddressHigh.String()}, \"\")\n\t\tlog.Println(output)\n\t\treturn errors.New(output)\n\t}\n\tif node.LocationIndex != expected.LocationIndex {\n\t\toutput := strings.Join([]string{\"LocationIndex inconsistent\\ngot:\", strconv.Itoa(node.LocationIndex), \" \\nwanted:\", strconv.Itoa(expected.LocationIndex)}, \"\")\n\t\tlog.Println(output)\n\t\treturn errors.New(output)\n\t}\n\tif node.PostalCode != expected.PostalCode {\n\t\toutput := strings.Join([]string{\"PostalCode inconsistent\\ngot:\", node.PostalCode, \" \\nwanted:\", expected.PostalCode}, \"\")\n\t\tlog.Println(output)\n\t\treturn errors.New(output)\n\t}\n\tif node.Latitude != expected.Latitude {\n\t\toutput := strings.Join([]string{\"Latitude inconsistent\\ngot:\", floatToString(node.Latitude), \" \\nwanted:\", floatToString(expected.Latitude)}, \"\")\n\t\tlog.Println(output)\n\t\treturn errors.New(output)\n\t}\n\tif node.Longitude != expected.Longitude {\n\t\toutput := strings.Join([]string{\"Longitude inconsistent\\ngot:\", floatToString(node.Longitude), \" \\nwanted:\", floatToString(expected.Longitude)}, \"\")\n\t\tlog.Println(output)\n\t\treturn errors.New(output)\n\t}\n\treturn nil\n}\nfunc floatToString(num float64) string {\n\treturn strconv.FormatFloat(num, 'f', 6, 64)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The clang-server Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage parser\n\nimport (\n\t\"encoding\/binary\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"github.com\/go-clang\/v3.9\/clang\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/pkgutil\/osutil\"\n\t\"github.com\/pkgutil\/stringsutil\"\n\t\"github.com\/zchee\/clang-server\/compilationdatabase\"\n\t\"github.com\/zchee\/clang-server\/indexdb\"\n\t\"github.com\/zchee\/clang-server\/internal\/log\"\n\t\"github.com\/zchee\/clang-server\/internal\/pathutil\"\n\t\"github.com\/zchee\/clang-server\/parser\/builtinheader\"\n\t\"github.com\/zchee\/clang-server\/symbol\"\n)\n\n\/\/ defaultClangOption defalut global clang options.\n\/\/ clang.TranslationUnit_DetailedPreprocessingRecord = 0x01\n\/\/ clang.TranslationUnit_Incomplete = 0x02\n\/\/ clang.TranslationUnit_PrecompiledPreamble = 0x04\n\/\/ clang.TranslationUnit_CacheCompletionResults = 0x08\n\/\/ clang.TranslationUnit_ForSerialization = 0x10\n\/\/ clang.TranslationUnit_CXXChainedPCH = 0x20\n\/\/ clang.TranslationUnit_SkipFunctionBodies = 0x40\n\/\/ clang.TranslationUnit_IncludeBriefCommentsInCodeCompletion = 0x80\n\/\/ clang.TranslationUnit_CreatePreambleOnFirstParse = 0x100\n\/\/ clang.TranslationUnit_KeepGoing = 0x200\n\/\/ const defaultClangOption uint32 = 0x445 \/\/ Use all flags for now\nvar defaultClangOption = clang.DefaultEditingTranslationUnitOptions() | uint32(clang.TranslationUnit_KeepGoing)\n\n\/\/ Parser represents a C\/C++ AST parser.\ntype Parser struct {\n\troot string\n\tclangOption uint32\n\n\tidx clang.Index\n\tcd *compilationdatabase.CompilationDatabase\n\tdb *indexdb.IndexDB\n\n\tdispatcher *dispatcher\n\n\tdebugUncatched bool \/\/ for debug\n\tuncachedKind map[clang.CursorKind]int \/\/ for debug\n}\n\n\/\/ Config represents a parser config.\ntype Config struct {\n\tJSONName string\n\tPathRange []string\n\tClangOption uint32\n\n\tDebug bool\n}\n\n\/\/ NewParser return the new Parser.\nfunc NewParser(path string, config Config) *Parser {\n\troot, err := pathutil.FindProjectRoot(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tcd := compilationdatabase.NewCompilationDatabase(root)\n\tif err := cd.Parse(config.JSONName, config.PathRange); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdb, err := indexdb.NewIndexDB(root)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tclangOption := config.ClangOption\n\tif clangOption == 0 {\n\t\tclangOption = defaultClangOption\n\t}\n\n\tp := &Parser{\n\t\troot: root,\n\t\tclangOption: clangOption,\n\t\tidx: clang.NewIndex(0, 1), \/\/ disable excludeDeclarationsFromPCH, enable displayDiagnostics\n\t\tcd: cd,\n\t\tdb: db,\n\t}\n\n\tif config.Debug {\n\t\tp.debugUncatched = true\n\t\tp.uncachedKind = make(map[clang.CursorKind]int)\n\t}\n\n\tif err := CreateBulitinHeaders(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn p\n}\n\n\/\/ CreateBulitinHeaders creates(dumps) a clang builtin header to cache directory.\nfunc CreateBulitinHeaders() error {\n\tbuiltinHdrDir := filepath.Join(pathutil.CacheDir(), \"clang\", \"include\")\n\tif !osutil.IsExist(builtinHdrDir) {\n\t\tif err := os.MkdirAll(builtinHdrDir, 0700); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t}\n\n\tfor _, fname := range builtinheader.AssetNames() {\n\t\tdata, err := builtinheader.AssetInfo(fname)\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\n\t\tif strings.Contains(data.Name(), string(filepath.Separator)) {\n\t\t\tdir, _ := filepath.Split(data.Name())\n\t\t\tif err := os.MkdirAll(filepath.Join(builtinHdrDir, dir), 0700); err != nil {\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\t\t}\n\n\t\tbuf, err := builtinheader.Asset(data.Name())\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\n\t\tif err := ioutil.WriteFile(filepath.Join(builtinHdrDir, data.Name()), buf, 0600); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Parse parses the project directories.\nfunc (p *Parser) Parse() {\n\tdefer func() {\n\t\tp.db.Close()\n\t\tsymbol.Serve()\n\t}()\n\n\tccs := p.cd.CompileCommands()\n\tif len(ccs) == 0 {\n\t\tlog.Fatal(\"not walk\")\n\t}\n\n\tcompilerConfig := p.cd.CompilerConfig\n\tflags := append(compilerConfig.SystemCIncludeDir, compilerConfig.SystemFrameworkDir...)\n\n\t\/\/ TODO(zchee): needs include stdint.h?\n\tif i := stringsutil.IndexContainsSlice(ccs[0].Arguments, \"-std=\"); i > 0 {\n\t\tstd := ccs[0].Arguments[i][5:]\n\t\tswitch {\n\t\tcase strings.HasPrefix(std, \"c\"), strings.HasPrefix(std, \"gnu\"):\n\t\t\tif std[len(std)-2] == '8' || std[len(std)-2] == '9' {\n\t\t\t\tflags = append(flags, \"-include\", \"\/usr\/include\/stdint.h\")\n\t\t\t}\n\t\t}\n\t} else {\n\t\tflags = append(flags, \"-include\", \"\/usr\/include\/stdint.h\")\n\t}\n\tif !(filepath.Ext(ccs[0].File) == \".c\") {\n\t\tflags = append(flags, compilerConfig.SystemCXXIncludeDir...)\n\t}\n\n\tbuiltinHdrDir := filepath.Join(pathutil.CacheDir(), \"clang\", \"include\")\n\tflags = append(flags, \"-I\"+builtinHdrDir)\n\n\tp.dispatcher = newDispatcher(p.ParseFile)\n\tp.dispatcher.Start()\n\tfor i := 0; i < len(ccs); i++ {\n\t\targs := ccs[i].Arguments\n\t\targs = append(flags, args...)\n\t\tp.dispatcher.Add(parseArg{ccs[i].File, args})\n\t}\n\tp.dispatcher.Wait()\n}\n\ntype parseArg struct {\n\tfilename string\n\tflag []string\n}\n\n\/\/ ParseFile parses the C\/C++ file.\nfunc (p *Parser) ParseFile(arg parseArg) error {\n\tvar tu clang.TranslationUnit\n\n\tif p.db.Has(arg.filename) {\n\t\tbuf, err := p.db.Get(arg.filename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ttu, err = deserializeTranslationUnit(p.idx, buf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer tu.Dispose()\n\n\t\tlog.Debugf(\"tu.Spelling(): %T => %+v\\n\", tu.Spelling(), tu.Spelling()) \/\/ for debug\n\n\t\treturn nil\n\t}\n\n\tif cErr := p.idx.ParseTranslationUnit2(arg.filename, arg.flag, nil, p.clangOption, &tu); clang.ErrorCode(cErr) != clang.Error_Success {\n\t\treturn errors.New(clang.ErrorCode(cErr).Spelling())\n\t}\n\tdefer tu.Dispose()\n\n\ttuch := make(chan []byte)\n\tgo func() {\n\t\ttuch <- serializeTranslationUnit(arg.filename, tu)\n\t}()\n\n\tprintDiagnostics(tu.Diagnostics())\n\n\trootCursor := tu.TranslationUnitCursor()\n\tfile := symbol.NewFile(arg.filename)\n\tvisitNode := func(cursor, parent clang.Cursor) clang.ChildVisitResult {\n\t\tif cursor.IsNull() {\n\t\t\tlog.Debug(\"cursor: <none>\")\n\t\t\treturn clang.ChildVisit_Continue\n\t\t}\n\n\t\tcursorLoc := symbol.FromCursor(cursor)\n\t\tif cursorLoc.FileName() == \"\" || cursorLoc.FileName() == \".\" {\n\t\t\t\/\/ TODO(zchee): Ignore system header(?)\n\t\t\treturn clang.ChildVisit_Continue\n\t\t}\n\n\t\tkind := cursor.Kind()\n\t\tswitch kind {\n\t\tcase clang.Cursor_FunctionDecl, clang.Cursor_StructDecl, clang.Cursor_FieldDecl, clang.Cursor_TypedefDecl, clang.Cursor_EnumDecl, clang.Cursor_EnumConstantDecl:\n\t\t\tdefCursor := cursor.Definition()\n\t\t\tif defCursor.IsNull() {\n\t\t\t\tfile.AddDecl(cursorLoc)\n\t\t\t} else {\n\t\t\t\tdefLoc := symbol.FromCursor(defCursor)\n\t\t\t\tfile.AddDefinition(cursorLoc, defLoc)\n\t\t\t}\n\t\tcase clang.Cursor_MacroDefinition:\n\t\t\tfile.AddDefinition(cursorLoc, cursorLoc)\n\t\tcase clang.Cursor_VarDecl:\n\t\t\tfile.AddDecl(cursorLoc)\n\t\tcase clang.Cursor_ParmDecl:\n\t\t\tif cursor.Spelling() != \"\" {\n\t\t\t\tfile.AddDecl(cursorLoc)\n\t\t\t}\n\t\tcase clang.Cursor_CallExpr:\n\t\t\trefCursor := cursor.Referenced()\n\t\t\trefLoc := symbol.FromCursor(refCursor)\n\t\t\tfile.AddCaller(cursorLoc, refLoc, true)\n\t\tcase clang.Cursor_DeclRefExpr, clang.Cursor_TypeRef, clang.Cursor_MemberRefExpr, clang.Cursor_MacroExpansion:\n\t\t\trefCursor := cursor.Referenced()\n\t\t\trefLoc := symbol.FromCursor(refCursor)\n\t\t\tfile.AddCaller(cursorLoc, refLoc, false)\n\t\tcase clang.Cursor_InclusionDirective:\n\t\t\tincFile := cursor.IncludedFile()\n\t\t\tfile.AddHeader(cursor.Spelling(), incFile)\n\t\tdefault:\n\t\t\tif p.debugUncatched {\n\t\t\t\tp.uncachedKind[kind]++\n\t\t\t}\n\t\t}\n\n\t\treturn clang.ChildVisit_Recurse\n\t}\n\n\trootCursor.Visit(visitNode)\n\tfile.AddTranslationUnit(<-tuch)\n\tbuf := file.Serialize()\n\n\tout := symbol.GetRootAsFile(buf.FinishedBytes(), 0)\n\tprintFile(out) \/\/ for debug\n\n\tlog.Debugf(\"Goroutine:%d\", runtime.NumGoroutine())\n\tlog.Debugf(\"\\n================== DONE: filename: %+v ==================\\n\\n\\n\", arg.filename)\n\n\treturn p.db.Put(arg.filename, buf.FinishedBytes())\n}\n\n\/\/ serializeTranslationUnit selialize the TranslationUnit to Clang serialized representation.\n\/\/ TODO(zchee): Avoid ioutil.TempFile if possible.\nfunc serializeTranslationUnit(filename string, tu clang.TranslationUnit) []byte {\n\ttmpFile, err := ioutil.TempFile(os.TempDir(), filepath.Base(filename))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tsaveOptions := uint32(clang.TranslationUnit_KeepGoing)\n\tif cErr := tu.SaveTranslationUnit(tmpFile.Name(), saveOptions); clang.SaveError(cErr) != clang.SaveError_None {\n\t\tlog.Fatal(clang.SaveError(cErr))\n\t}\n\n\tbuf, err := ioutil.ReadFile(tmpFile.Name())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tos.Remove(tmpFile.Name())\n\n\treturn buf\n}\n\nfunc deserializeTranslationUnit(idx clang.Index, buf []byte) (clang.TranslationUnit, error) {\n\tvar tu clang.TranslationUnit\n\n\ttmpfile, err := ioutil.TempFile(os.TempDir(), \"clang-server\")\n\tif err != nil {\n\t\treturn tu, err\n\t}\n\tbinary.Write(tmpfile, binary.LittleEndian, buf)\n\n\tif err := idx.TranslationUnit2(tmpfile.Name(), &tu); clang.ErrorCode(err) != clang.Error_Success {\n\t\treturn tu, errors.New(err.Spelling())\n\t}\n\t\/\/ finished create a translation unit from an AST file, remove tmpfile\n\tos.Remove(tmpfile.Name())\n\n\treturn tu, nil\n}\n\n\/\/ ClangVersion return the current clang version.\nfunc ClangVersion() string {\n\treturn clang.GetClangVersion()\n}\n<commit_msg>parser: fix TranslationUnit chan(tuch) size to 1<commit_after>\/\/ Copyright 2016 The clang-server Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage parser\n\nimport (\n\t\"encoding\/binary\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"github.com\/go-clang\/v3.9\/clang\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/pkgutil\/osutil\"\n\t\"github.com\/pkgutil\/stringsutil\"\n\t\"github.com\/zchee\/clang-server\/compilationdatabase\"\n\t\"github.com\/zchee\/clang-server\/indexdb\"\n\t\"github.com\/zchee\/clang-server\/internal\/log\"\n\t\"github.com\/zchee\/clang-server\/internal\/pathutil\"\n\t\"github.com\/zchee\/clang-server\/parser\/builtinheader\"\n\t\"github.com\/zchee\/clang-server\/symbol\"\n)\n\n\/\/ defaultClangOption defalut global clang options.\n\/\/ clang.TranslationUnit_DetailedPreprocessingRecord = 0x01\n\/\/ clang.TranslationUnit_Incomplete = 0x02\n\/\/ clang.TranslationUnit_PrecompiledPreamble = 0x04\n\/\/ clang.TranslationUnit_CacheCompletionResults = 0x08\n\/\/ clang.TranslationUnit_ForSerialization = 0x10\n\/\/ clang.TranslationUnit_CXXChainedPCH = 0x20\n\/\/ clang.TranslationUnit_SkipFunctionBodies = 0x40\n\/\/ clang.TranslationUnit_IncludeBriefCommentsInCodeCompletion = 0x80\n\/\/ clang.TranslationUnit_CreatePreambleOnFirstParse = 0x100\n\/\/ clang.TranslationUnit_KeepGoing = 0x200\n\/\/ const defaultClangOption uint32 = 0x445 \/\/ Use all flags for now\nvar defaultClangOption = clang.DefaultEditingTranslationUnitOptions() | uint32(clang.TranslationUnit_KeepGoing)\n\n\/\/ Parser represents a C\/C++ AST parser.\ntype Parser struct {\n\troot string\n\tclangOption uint32\n\n\tidx clang.Index\n\tcd *compilationdatabase.CompilationDatabase\n\tdb *indexdb.IndexDB\n\n\tdispatcher *dispatcher\n\n\tdebugUncatched bool \/\/ for debug\n\tuncachedKind map[clang.CursorKind]int \/\/ for debug\n}\n\n\/\/ Config represents a parser config.\ntype Config struct {\n\tJSONName string\n\tPathRange []string\n\tClangOption uint32\n\n\tDebug bool\n}\n\n\/\/ NewParser return the new Parser.\nfunc NewParser(path string, config Config) *Parser {\n\troot, err := pathutil.FindProjectRoot(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tcd := compilationdatabase.NewCompilationDatabase(root)\n\tif err := cd.Parse(config.JSONName, config.PathRange); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdb, err := indexdb.NewIndexDB(root)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tclangOption := config.ClangOption\n\tif clangOption == 0 {\n\t\tclangOption = defaultClangOption\n\t}\n\n\tp := &Parser{\n\t\troot: root,\n\t\tclangOption: clangOption,\n\t\tidx: clang.NewIndex(0, 1), \/\/ disable excludeDeclarationsFromPCH, enable displayDiagnostics\n\t\tcd: cd,\n\t\tdb: db,\n\t}\n\n\tif config.Debug {\n\t\tp.debugUncatched = true\n\t\tp.uncachedKind = make(map[clang.CursorKind]int)\n\t}\n\n\tif err := CreateBulitinHeaders(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn p\n}\n\n\/\/ CreateBulitinHeaders creates(dumps) a clang builtin header to cache directory.\nfunc CreateBulitinHeaders() error {\n\tbuiltinHdrDir := filepath.Join(pathutil.CacheDir(), \"clang\", \"include\")\n\tif !osutil.IsExist(builtinHdrDir) {\n\t\tif err := os.MkdirAll(builtinHdrDir, 0700); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t}\n\n\tfor _, fname := range builtinheader.AssetNames() {\n\t\tdata, err := builtinheader.AssetInfo(fname)\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\n\t\tif strings.Contains(data.Name(), string(filepath.Separator)) {\n\t\t\tdir, _ := filepath.Split(data.Name())\n\t\t\tif err := os.MkdirAll(filepath.Join(builtinHdrDir, dir), 0700); err != nil {\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\t\t}\n\n\t\tbuf, err := builtinheader.Asset(data.Name())\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\n\t\tif err := ioutil.WriteFile(filepath.Join(builtinHdrDir, data.Name()), buf, 0600); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Parse parses the project directories.\nfunc (p *Parser) Parse() {\n\tdefer func() {\n\t\tp.db.Close()\n\t\tsymbol.Serve()\n\t}()\n\n\tccs := p.cd.CompileCommands()\n\tif len(ccs) == 0 {\n\t\tlog.Fatal(\"not walk\")\n\t}\n\n\tcompilerConfig := p.cd.CompilerConfig\n\tflags := append(compilerConfig.SystemCIncludeDir, compilerConfig.SystemFrameworkDir...)\n\n\t\/\/ TODO(zchee): needs include stdint.h?\n\tif i := stringsutil.IndexContainsSlice(ccs[0].Arguments, \"-std=\"); i > 0 {\n\t\tstd := ccs[0].Arguments[i][5:]\n\t\tswitch {\n\t\tcase strings.HasPrefix(std, \"c\"), strings.HasPrefix(std, \"gnu\"):\n\t\t\tif std[len(std)-2] == '8' || std[len(std)-2] == '9' {\n\t\t\t\tflags = append(flags, \"-include\", \"\/usr\/include\/stdint.h\")\n\t\t\t}\n\t\t}\n\t} else {\n\t\tflags = append(flags, \"-include\", \"\/usr\/include\/stdint.h\")\n\t}\n\tif !(filepath.Ext(ccs[0].File) == \".c\") {\n\t\tflags = append(flags, compilerConfig.SystemCXXIncludeDir...)\n\t}\n\n\tbuiltinHdrDir := filepath.Join(pathutil.CacheDir(), \"clang\", \"include\")\n\tflags = append(flags, \"-I\"+builtinHdrDir)\n\n\tp.dispatcher = newDispatcher(p.ParseFile)\n\tp.dispatcher.Start()\n\tfor i := 0; i < len(ccs); i++ {\n\t\targs := ccs[i].Arguments\n\t\targs = append(flags, args...)\n\t\tp.dispatcher.Add(parseArg{ccs[i].File, args})\n\t}\n\tp.dispatcher.Wait()\n}\n\ntype parseArg struct {\n\tfilename string\n\tflag []string\n}\n\n\/\/ ParseFile parses the C\/C++ file.\nfunc (p *Parser) ParseFile(arg parseArg) error {\n\tvar tu clang.TranslationUnit\n\n\tif p.db.Has(arg.filename) {\n\t\tbuf, err := p.db.Get(arg.filename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ttu, err = deserializeTranslationUnit(p.idx, buf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer tu.Dispose()\n\n\t\tlog.Debugf(\"tu.Spelling(): %T => %+v\\n\", tu.Spelling(), tu.Spelling()) \/\/ for debug\n\n\t\treturn nil\n\t}\n\n\tif cErr := p.idx.ParseTranslationUnit2(arg.filename, arg.flag, nil, p.clangOption, &tu); clang.ErrorCode(cErr) != clang.Error_Success {\n\t\treturn errors.New(clang.ErrorCode(cErr).Spelling())\n\t}\n\tdefer tu.Dispose()\n\n\ttuch := make(chan []byte, 1)\n\tgo func() {\n\t\ttuch <- serializeTranslationUnit(arg.filename, tu)\n\t}()\n\n\tprintDiagnostics(tu.Diagnostics())\n\n\trootCursor := tu.TranslationUnitCursor()\n\tfile := symbol.NewFile(arg.filename)\n\tvisitNode := func(cursor, parent clang.Cursor) clang.ChildVisitResult {\n\t\tif cursor.IsNull() {\n\t\t\tlog.Debug(\"cursor: <none>\")\n\t\t\treturn clang.ChildVisit_Continue\n\t\t}\n\n\t\tcursorLoc := symbol.FromCursor(cursor)\n\t\tif cursorLoc.FileName() == \"\" || cursorLoc.FileName() == \".\" {\n\t\t\t\/\/ TODO(zchee): Ignore system header(?)\n\t\t\treturn clang.ChildVisit_Continue\n\t\t}\n\n\t\tkind := cursor.Kind()\n\t\tswitch kind {\n\t\tcase clang.Cursor_FunctionDecl, clang.Cursor_StructDecl, clang.Cursor_FieldDecl, clang.Cursor_TypedefDecl, clang.Cursor_EnumDecl, clang.Cursor_EnumConstantDecl:\n\t\t\tdefCursor := cursor.Definition()\n\t\t\tif defCursor.IsNull() {\n\t\t\t\tfile.AddDecl(cursorLoc)\n\t\t\t} else {\n\t\t\t\tdefLoc := symbol.FromCursor(defCursor)\n\t\t\t\tfile.AddDefinition(cursorLoc, defLoc)\n\t\t\t}\n\t\tcase clang.Cursor_MacroDefinition:\n\t\t\tfile.AddDefinition(cursorLoc, cursorLoc)\n\t\tcase clang.Cursor_VarDecl:\n\t\t\tfile.AddDecl(cursorLoc)\n\t\tcase clang.Cursor_ParmDecl:\n\t\t\tif cursor.Spelling() != \"\" {\n\t\t\t\tfile.AddDecl(cursorLoc)\n\t\t\t}\n\t\tcase clang.Cursor_CallExpr:\n\t\t\trefCursor := cursor.Referenced()\n\t\t\trefLoc := symbol.FromCursor(refCursor)\n\t\t\tfile.AddCaller(cursorLoc, refLoc, true)\n\t\tcase clang.Cursor_DeclRefExpr, clang.Cursor_TypeRef, clang.Cursor_MemberRefExpr, clang.Cursor_MacroExpansion:\n\t\t\trefCursor := cursor.Referenced()\n\t\t\trefLoc := symbol.FromCursor(refCursor)\n\t\t\tfile.AddCaller(cursorLoc, refLoc, false)\n\t\tcase clang.Cursor_InclusionDirective:\n\t\t\tincFile := cursor.IncludedFile()\n\t\t\tfile.AddHeader(cursor.Spelling(), incFile)\n\t\tdefault:\n\t\t\tif p.debugUncatched {\n\t\t\t\tp.uncachedKind[kind]++\n\t\t\t}\n\t\t}\n\n\t\treturn clang.ChildVisit_Recurse\n\t}\n\n\trootCursor.Visit(visitNode)\n\tfile.AddTranslationUnit(<-tuch)\n\tbuf := file.Serialize()\n\n\tout := symbol.GetRootAsFile(buf.FinishedBytes(), 0)\n\tprintFile(out) \/\/ for debug\n\n\tlog.Debugf(\"Goroutine:%d\", runtime.NumGoroutine())\n\tlog.Debugf(\"\\n================== DONE: filename: %+v ==================\\n\\n\\n\", arg.filename)\n\n\treturn p.db.Put(arg.filename, buf.FinishedBytes())\n}\n\n\/\/ serializeTranslationUnit selialize the TranslationUnit to Clang serialized representation.\n\/\/ TODO(zchee): Avoid ioutil.TempFile if possible.\nfunc serializeTranslationUnit(filename string, tu clang.TranslationUnit) []byte {\n\ttmpFile, err := ioutil.TempFile(os.TempDir(), filepath.Base(filename))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tsaveOptions := uint32(clang.TranslationUnit_KeepGoing)\n\tif cErr := tu.SaveTranslationUnit(tmpFile.Name(), saveOptions); clang.SaveError(cErr) != clang.SaveError_None {\n\t\tlog.Fatal(clang.SaveError(cErr))\n\t}\n\n\tbuf, err := ioutil.ReadFile(tmpFile.Name())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tos.Remove(tmpFile.Name())\n\n\treturn buf\n}\n\nfunc deserializeTranslationUnit(idx clang.Index, buf []byte) (clang.TranslationUnit, error) {\n\tvar tu clang.TranslationUnit\n\n\ttmpfile, err := ioutil.TempFile(os.TempDir(), \"clang-server\")\n\tif err != nil {\n\t\treturn tu, err\n\t}\n\tbinary.Write(tmpfile, binary.LittleEndian, buf)\n\n\tif err := idx.TranslationUnit2(tmpfile.Name(), &tu); clang.ErrorCode(err) != clang.Error_Success {\n\t\treturn tu, errors.New(err.Spelling())\n\t}\n\t\/\/ finished create a translation unit from an AST file, remove tmpfile\n\tos.Remove(tmpfile.Name())\n\n\treturn tu, nil\n}\n\n\/\/ ClangVersion return the current clang version.\nfunc ClangVersion() string {\n\treturn clang.GetClangVersion()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015-2016 Sevki <s@sevki.org>. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage parser \/\/ import \"sevki.org\/build\/parser\"\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"sevki.org\/build\/token\"\n\n\t\"sevki.org\/build\/ast\"\n\t\"sevki.org\/build\/lexer\"\n)\n\nvar (\n\tErrConsumption = errors.New(\"consumption error\")\n)\n\ntype Parser struct {\n\tname string\n\tPath string\n\tlexer *lexer.Lexer\n\tDecls chan ast.Decl\n\tstate stateFn\n\tpeekTok token.Token\n\tcurTok token.Token\n\tError error\n}\n\nfunc (p *Parser) peek() token.Token {\n\treturn p.peekTok\n}\n\nfunc (p *Parser) next() token.Token {\nIGNORETOKEN:\n\tt := <-p.lexer.Tokens\n\n\tswitch t.Type {\n\tcase token.Error:\n\t\tp.errorf(\"%q\", t)\n\tcase token.Newline:\n\t\tgoto IGNORETOKEN\n\t}\n\n\ttok := p.peekTok\n\tp.peekTok = t\n\tp.curTok = tok\n\n\treturn tok\n}\n\nfunc (p *Parser) errorf(format string, args ...interface{}) error {\n\tp.curTok = token.Token{Type: token.Error}\n\tp.peekTok = token.Token{Type: token.EOF}\n\tp.Error = fmt.Errorf(format, args...)\n\treturn p.Error\n}\n\nfunc New(name, path string, r io.Reader) *Parser {\n\tp := &Parser{\n\t\tname: name,\n\t\tPath: path,\n\t\tlexer: lexer.New(name, r),\n\t\tDecls: make(chan ast.Decl),\n\t}\n\treturn p\n}\n\nfunc (p *Parser) Run() {\n\tp.next()\n\tfor p.state = parseDecl; p.state != nil; {\n\t\tp.state = p.state(p)\n\t}\n\tp.Decls <- nil\n\tclose(p.Decls)\n\n}\n\ntype stateFn func(*Parser) stateFn\n\nfunc parseDecl(p *Parser) stateFn {\n\tswitch p.peek().Type {\n\tcase token.Func:\n\t\treturn parseFunc\n\tcase token.String:\n\t\treturn parseVar\n\t}\n\treturn nil\n}\n\nfunc parseFunc(p *Parser) stateFn {\n\tif f, err := p.consumeFunc(); err != nil {\n\t\tp.Error = err\n\t\treturn nil\n\t} else {\n\t\tp.Decls <- f\n\t}\n\treturn parseDecl\n}\n\nfunc parseVar(p *Parser) stateFn {\n\tt := p.next()\n\n\tif err := p.expects(t, token.String); err != nil {\n\t\tp.Error = err\n\t\treturn nil\n\t}\n\tif err := p.expects(p.next(), token.Equal); err != nil {\n\t\tp.Error = err\n\t\treturn nil\n\t}\n\n\tswitch p.peek().Type {\n\tcase token.LeftBrac, token.LeftCurly, token.String, token.Quote, token.True, token.False, token.Func:\n\t\tif n, err := p.consumeNode(); err != nil {\n\t\t\treturn nil\n\t\t} else {\n\t\t\ta := ast.Assignment{\n\t\t\t\tKey: t.String(),\n\t\t\t\tValue: n,\n\t\t\t}\n\t\t\ta.SetEnd(t)\n\t\t\ta.SetStart(p.curTok)\n\t\t\tp.Decls <- &a\n\t\t}\n\t}\n\n\treturn parseDecl\n}\n\nfunc (p *Parser) consumeNode() (interface{}, error) {\n\tvar r interface{}\n\tvar err error\n\n\tswitch p.peek().Type {\n\tcase token.Quote:\n\t\tr, err = ast.NewBasicLit(p.next()), nil\n\tcase token.True:\n\t\tr, err = ast.NewBasicLit(p.next()), nil\n\tcase token.False:\n\t\tr, err = ast.NewBasicLit(p.next()), nil\n\tcase token.String:\n\t\tr, err = ast.Variable{Key: p.next().String()}, nil\n\tcase token.LeftBrac:\n\t\tr, err = p.consumeSlice()\n\tcase token.LeftCurly:\n\t\tr, err = p.consumeMap()\n\tcase token.Func:\n\t\tr, err = p.consumeFunc()\n\tdefault:\n\t\treturn nil, ErrConsumption\n\t}\nREPROCESS:\n\tswitch p.peek().Type {\n\tcase token.Plus:\n\t\tr, err = p.consumeAddFunc(r)\n\t\tgoto REPROCESS\n\tcase token.LeftBrac:\n\t\tr, err = p.consumeSliceFunc(r)\n\t\tgoto REPROCESS\n\t}\n\treturn r, err\n}\n\nfunc (p *Parser) consumeAddFunc(v interface{}) (*ast.Func, error) {\n\tf := &ast.Func{\n\t\tName: \"addition\",\n\t}\n\n\tf.File = p.name\n\tf.SetStart(p.curTok)\n\n\tf.AnonParams = []interface{}{v}\n\n\tfor p.peek().Type == token.Plus {\n\n\t\tp.next()\n\t\tswitch p.peek().Type {\n\t\tcase token.String:\n\t\t\tf.AnonParams = append(\n\t\t\t\tf.AnonParams,\n\t\t\t\tast.Variable{Key: p.next().String()},\n\t\t\t)\n\t\tcase token.Quote:\n\t\t\tf.AnonParams = append(\n\t\t\t\tf.AnonParams,\n\t\t\t\tp.next().String(),\n\t\t\t)\n\t\t}\n\t}\n\n\tf.SetEnd(p.curTok)\n\treturn f, nil\n}\n\nfunc (p *Parser) consumeSliceFunc(v interface{}) (*ast.Func, error) {\n\n\tf := &ast.Func{\n\t\tParams: make(map[string]interface{}),\n\t}\n\tf.File = p.name\n\tf.SetStart(p.curTok)\n\n\t\/\/ advance [\n\tp.next()\n\n\tf.Params[\"var\"] = v\n\tif p.peek().Type == token.Colon {\n\t\t\/\/ advance :\n\t\tp.next()\n\t\tf.Name = \"slice\"\n\t\tf.Params[\"start\"] = 0\n\t\tnode, err := p.consumeNode()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tf.Params[\"end\"] = node\n\t\tgoto END\n\t} else if p.peek().Type == token.Int {\n\t\tnode, err := p.consumeNode()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif p.peek().Type == token.RightBrac {\n\t\t\tf.Name = \"index\"\n\t\t\tf.Params[\"index\"] = node\n\t\t\tgoto END\n\t\t} else if p.peek().Type == token.Colon {\n\t\t\t\/\/ advance :\n\t\t\tp.next()\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"this is a malformed slice\")\n\t\t}\n\t\tf.Name = \"slice\"\n\n\t\tnode, err = p.consumeNode()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tf.Params[\"start\"] = node\n\t\tif p.peek().Type == token.Int {\n\t\t\tnode, err = p.consumeNode()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tf.Params[\"end\"] = node\n\t\t} else if p.peek().Type == token.RightBrac {\n\t\t\tgoto END\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"this is a malformed slice\")\n\t\t}\n\n\t}\nEND:\n\t\/\/ advance ]\n\tf.SetEnd(p.next())\n\treturn f, nil\n}\nfunc (p *Parser) consumeParams(f *ast.Func) error {\n\tfor {\n\t\tswitch p.peek().Type {\n\t\tcase token.Quote, token.LeftBrac, token.Func:\n\t\t\tif n, err := p.consumeNode(); err != nil {\n\t\t\t\treturn err\n\t\t\t} else {\n\t\t\t\tf.AnonParams = append(f.AnonParams, n)\n\t\t\t}\n\t\tcase token.String:\n\t\t\tt := p.next()\n\t\t\tif f.Params == nil {\n\t\t\t\tf.Params = make(map[string]interface{})\n\t\t\t}\n\n\t\t\tif err := p.expects(p.peek(), token.Colon, token.Equal); err == nil {\n\t\t\t\tswitch p.next().Type {\n\t\t\t\tcase token.Colon:\n\t\t\t\tcase token.Equal:\n\t\t\t\t\tif n, err := p.consumeNode(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t} else {\n\t\t\t\t\t\tf.Params[t.String()] = n\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\treturn ErrConsumption\n\t\t}\n\n\t\tif err := p.expects(p.peek(), token.RightParen, token.Comma); err == nil {\n\t\tDANGLING_COMMA:\n\t\t\tswitch p.peek().Type {\n\t\t\tcase token.RightParen:\n\t\t\t\tp.next()\n\t\t\t\treturn nil\n\t\t\tcase token.Comma:\n\t\t\t\tp.next()\n\t\t\t\tif p.peek().Type == token.RightParen {\n\t\t\t\t\tgoto DANGLING_COMMA\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\n\t}\n}\nfunc (p *Parser) consumeMap() (*ast.Map, error) {\n\tt := p.next()\n\t_map := ast.Map{\n\t\tValue: make(map[string]interface{}),\n\t}\n\t_map.SetStart(t)\n\n\tfor p.peek().Type != token.RightCurly {\n\t\tt := p.next()\n\t\tif err := p.expects(t, token.Quote); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := p.expects(p.next(), token.Colon); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif n, err := p.consumeNode(); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\t_map.Value[t.String()] = n\n\t\t}\n\t\tif p.peek().Type == token.Comma {\n\t\t\tp.next()\n\t\t} else if err := p.expects(p.peek(), token.RightCurly); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ advance }\n\n\t_map.SetEnd(p.next())\n\treturn &_map, nil\n}\nfunc (p *Parser) consumeFunc() (*ast.Func, error) {\n\tt := p.next()\n\tif err := p.expects(t, token.Func); err != nil {\n\t\treturn nil, err\n\t}\n\tf := ast.Func{\n\t\tName: t.String(),\n\t}\n\n\tf.File = p.name\n\tf.Start = ast.Position{\n\t\tLine: t.Line,\n\t\tIndex: t.Start,\n\t}\n\n\tt = p.next()\n\tif err := p.expects(t, token.LeftParen); err != nil {\n\t\treturn nil, err\n\t}\n\tp.consumeParams(&f)\n\treturn &f, nil\n}\n\nfunc (p *Parser) consumeSlice() (ast.Slice, error) {\n\tvar _slice ast.Slice\n\n\tif err := p.expects(p.peek(), token.LeftBrac); err != nil {\n\t\treturn _slice, err\n\t} else {\n\t\t_slice.SetStart(p.next())\n\t}\n\n\n\tfor p.peek().Type != token.RightBrac {\n\t\t_slice.Slice = append(_slice.Slice, p.next().String())\n\t\tif p.peek().Type == token.Comma {\n\t\t\tp.next()\n\t\t} else if err := p.expects(p.peek(), token.RightBrac); err != nil {\n\t\t\treturn _slice, err\n\t\t}\n\t}\n\n\t\/\/ advance ]\n\t_slice.SetEnd(p.next())\n\n\treturn _slice, nil\n}\n<commit_msg>parser: slice consumes nodes<commit_after>\/\/ Copyright 2015-2016 Sevki <s@sevki.org>. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage parser \/\/ import \"sevki.org\/build\/parser\"\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"sevki.org\/build\/token\"\n\n\t\"sevki.org\/build\/ast\"\n\t\"sevki.org\/build\/lexer\"\n)\n\nvar (\n\tErrConsumption = errors.New(\"consumption error\")\n)\n\ntype Parser struct {\n\tname string\n\tPath string\n\tlexer *lexer.Lexer\n\tDecls chan ast.Decl\n\tstate stateFn\n\tpeekTok token.Token\n\tcurTok token.Token\n\tError error\n}\n\nfunc (p *Parser) peek() token.Token {\n\treturn p.peekTok\n}\n\nfunc (p *Parser) next() token.Token {\nIGNORETOKEN:\n\tt := <-p.lexer.Tokens\n\n\tswitch t.Type {\n\tcase token.Error:\n\t\tp.errorf(\"%q\", t)\n\tcase token.Newline:\n\t\tgoto IGNORETOKEN\n\t}\n\n\ttok := p.peekTok\n\tp.peekTok = t\n\tp.curTok = tok\n\n\treturn tok\n}\n\nfunc (p *Parser) errorf(format string, args ...interface{}) error {\n\tp.curTok = token.Token{Type: token.Error}\n\tp.peekTok = token.Token{Type: token.EOF}\n\tp.Error = fmt.Errorf(format, args...)\n\treturn p.Error\n}\n\nfunc New(name, path string, r io.Reader) *Parser {\n\tp := &Parser{\n\t\tname: name,\n\t\tPath: path,\n\t\tlexer: lexer.New(name, r),\n\t\tDecls: make(chan ast.Decl),\n\t}\n\treturn p\n}\n\nfunc (p *Parser) Run() {\n\tp.next()\n\tfor p.state = parseDecl; p.state != nil; {\n\t\tp.state = p.state(p)\n\t}\n\tp.Decls <- nil\n\tclose(p.Decls)\n\n}\n\ntype stateFn func(*Parser) stateFn\n\nfunc parseDecl(p *Parser) stateFn {\n\tswitch p.peek().Type {\n\tcase token.Func:\n\t\treturn parseFunc\n\tcase token.String:\n\t\treturn parseVar\n\t}\n\treturn nil\n}\n\nfunc parseFunc(p *Parser) stateFn {\n\tif f, err := p.consumeFunc(); err != nil {\n\t\tp.Error = err\n\t\treturn nil\n\t} else {\n\t\tp.Decls <- f\n\t}\n\treturn parseDecl\n}\n\nfunc parseVar(p *Parser) stateFn {\n\tt := p.next()\n\n\tif err := p.expects(t, token.String); err != nil {\n\t\tp.Error = err\n\t\treturn nil\n\t}\n\tif err := p.expects(p.next(), token.Equal); err != nil {\n\t\tp.Error = err\n\t\treturn nil\n\t}\n\n\tswitch p.peek().Type {\n\tcase token.LeftBrac, token.LeftCurly, token.String, token.Quote, token.True, token.False, token.Func:\n\t\tif n, err := p.consumeNode(); err != nil {\n\t\t\treturn nil\n\t\t} else {\n\t\t\ta := ast.Assignment{\n\t\t\t\tKey: t.String(),\n\t\t\t\tValue: n,\n\t\t\t}\n\t\t\ta.SetEnd(t)\n\t\t\ta.SetStart(p.curTok)\n\t\t\tp.Decls <- &a\n\t\t}\n\t}\n\n\treturn parseDecl\n}\n\nfunc (p *Parser) consumeNode() (interface{}, error) {\n\tvar r interface{}\n\tvar err error\n\n\tswitch p.peek().Type {\n\tcase token.Quote:\n\t\tr, err = ast.NewBasicLit(p.next()), nil\n\tcase token.True:\n\t\tr, err = ast.NewBasicLit(p.next()), nil\n\tcase token.False:\n\t\tr, err = ast.NewBasicLit(p.next()), nil\n\tcase token.String:\n\t\tr, err = ast.Variable{Key: p.next().String()}, nil\n\tcase token.LeftBrac:\n\t\tr, err = p.consumeSlice()\n\tcase token.LeftCurly:\n\t\tr, err = p.consumeMap()\n\tcase token.Func:\n\t\tr, err = p.consumeFunc()\n\tdefault:\n\t\treturn nil, ErrConsumption\n\t}\nREPROCESS:\n\tswitch p.peek().Type {\n\tcase token.Plus:\n\t\tr, err = p.consumeAddFunc(r)\n\t\tgoto REPROCESS\n\tcase token.LeftBrac:\n\t\tr, err = p.consumeSliceFunc(r)\n\t\tgoto REPROCESS\n\t}\n\treturn r, err\n}\n\nfunc (p *Parser) consumeAddFunc(v interface{}) (*ast.Func, error) {\n\tf := &ast.Func{\n\t\tName: \"addition\",\n\t}\n\n\tf.File = p.name\n\tf.SetStart(p.curTok)\n\n\tf.AnonParams = []interface{}{v}\n\n\tfor p.peek().Type == token.Plus {\n\n\t\tp.next()\n\t\tswitch p.peek().Type {\n\t\tcase token.String:\n\t\t\tf.AnonParams = append(\n\t\t\t\tf.AnonParams,\n\t\t\t\tast.Variable{Key: p.next().String()},\n\t\t\t)\n\t\tcase token.Quote:\n\t\t\tf.AnonParams = append(\n\t\t\t\tf.AnonParams,\n\t\t\t\tp.next().String(),\n\t\t\t)\n\t\t}\n\t}\n\n\tf.SetEnd(p.curTok)\n\treturn f, nil\n}\n\nfunc (p *Parser) consumeSliceFunc(v interface{}) (*ast.Func, error) {\n\n\tf := &ast.Func{\n\t\tParams: make(map[string]interface{}),\n\t}\n\tf.File = p.name\n\tf.SetStart(p.curTok)\n\n\t\/\/ advance [\n\tp.next()\n\n\tf.Params[\"var\"] = v\n\tif p.peek().Type == token.Colon {\n\t\t\/\/ advance :\n\t\tp.next()\n\t\tf.Name = \"slice\"\n\t\tf.Params[\"start\"] = 0\n\t\tnode, err := p.consumeNode()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tf.Params[\"end\"] = node\n\t\tgoto END\n\t} else if p.peek().Type == token.Int {\n\t\tnode, err := p.consumeNode()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif p.peek().Type == token.RightBrac {\n\t\t\tf.Name = \"index\"\n\t\t\tf.Params[\"index\"] = node\n\t\t\tgoto END\n\t\t} else if p.peek().Type == token.Colon {\n\t\t\t\/\/ advance :\n\t\t\tp.next()\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"this is a malformed slice\")\n\t\t}\n\t\tf.Name = \"slice\"\n\n\t\tnode, err = p.consumeNode()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tf.Params[\"start\"] = node\n\t\tif p.peek().Type == token.Int {\n\t\t\tnode, err = p.consumeNode()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tf.Params[\"end\"] = node\n\t\t} else if p.peek().Type == token.RightBrac {\n\t\t\tgoto END\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"this is a malformed slice\")\n\t\t}\n\n\t}\nEND:\n\t\/\/ advance ]\n\tf.SetEnd(p.next())\n\treturn f, nil\n}\nfunc (p *Parser) consumeParams(f *ast.Func) error {\n\tfor {\n\t\tswitch p.peek().Type {\n\t\tcase token.Quote, token.LeftBrac, token.Func:\n\t\t\tif n, err := p.consumeNode(); err != nil {\n\t\t\t\treturn err\n\t\t\t} else {\n\t\t\t\tf.AnonParams = append(f.AnonParams, n)\n\t\t\t}\n\t\tcase token.String:\n\t\t\tt := p.next()\n\t\t\tif f.Params == nil {\n\t\t\t\tf.Params = make(map[string]interface{})\n\t\t\t}\n\n\t\t\tif err := p.expects(p.peek(), token.Colon, token.Equal); err == nil {\n\t\t\t\tswitch p.next().Type {\n\t\t\t\tcase token.Colon:\n\t\t\t\tcase token.Equal:\n\t\t\t\t\tif n, err := p.consumeNode(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t} else {\n\t\t\t\t\t\tf.Params[t.String()] = n\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\treturn ErrConsumption\n\t\t}\n\n\t\tif err := p.expects(p.peek(), token.RightParen, token.Comma); err == nil {\n\t\tDANGLING_COMMA:\n\t\t\tswitch p.peek().Type {\n\t\t\tcase token.RightParen:\n\t\t\t\tp.next()\n\t\t\t\treturn nil\n\t\t\tcase token.Comma:\n\t\t\t\tp.next()\n\t\t\t\tif p.peek().Type == token.RightParen {\n\t\t\t\t\tgoto DANGLING_COMMA\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\n\t}\n}\nfunc (p *Parser) consumeMap() (*ast.Map, error) {\n\tt := p.next()\n\t_map := ast.Map{\n\t\tValue: make(map[string]interface{}),\n\t}\n\t_map.SetStart(t)\n\n\tfor p.peek().Type != token.RightCurly {\n\t\tt := p.next()\n\t\tif err := p.expects(t, token.Quote); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := p.expects(p.next(), token.Colon); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif n, err := p.consumeNode(); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\t_map.Value[t.String()] = n\n\t\t}\n\t\tif p.peek().Type == token.Comma {\n\t\t\tp.next()\n\t\t} else if err := p.expects(p.peek(), token.RightCurly); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ advance }\n\n\t_map.SetEnd(p.next())\n\treturn &_map, nil\n}\nfunc (p *Parser) consumeFunc() (*ast.Func, error) {\n\tt := p.next()\n\tif err := p.expects(t, token.Func); err != nil {\n\t\treturn nil, err\n\t}\n\tf := ast.Func{\n\t\tName: t.String(),\n\t}\n\n\tf.File = p.name\n\tf.Start = ast.Position{\n\t\tLine: t.Line,\n\t\tIndex: t.Start,\n\t}\n\n\tt = p.next()\n\tif err := p.expects(t, token.LeftParen); err != nil {\n\t\treturn nil, err\n\t}\n\tp.consumeParams(&f)\n\treturn &f, nil\n}\n\nfunc (p *Parser) consumeSlice() (ast.Slice, error) {\n\tvar _slice ast.Slice\n\n\tif err := p.expects(p.peek(), token.LeftBrac); err != nil {\n\t\treturn _slice, err\n\t} else {\n\t\t_slice.SetStart(p.next())\n\t}\n\n\n\tfor p.peek().Type != token.RightBrac {\n\t\tnode, err := p.consumeNode()\n\t\tif err != nil {\n\t\t\treturn _slice, err\n\t\t}\n\t\t_slice.Slice = append(_slice.Slice, node)\n\t\tif p.peek().Type == token.Comma {\n\t\t\tp.next()\n\t\t} else if err := p.expects(p.peek(), token.RightBrac); err != nil {\n\t\t\treturn _slice, err\n\t\t}\n\t}\n\n\t\/\/ advance ]\n\t_slice.SetEnd(p.next())\n\n\treturn _slice, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package netlink\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\n\t\"github.com\/vishvananda\/netlink\/nl\"\n\t\"github.com\/vishvananda\/netns\"\n\t\"golang.org\/x\/sys\/unix\"\n)\n\n\/\/ IFA_FLAGS is a u32 attribute.\nconst IFA_FLAGS = 0x8\n\n\/\/ AddrAdd will add an IP address to a link device.\n\/\/ Equivalent to: `ip addr add $addr dev $link`\nfunc AddrAdd(link Link, addr *Addr) error {\n\treturn pkgHandle.AddrAdd(link, addr)\n}\n\n\/\/ AddrAdd will add an IP address to a link device.\n\/\/ Equivalent to: `ip addr add $addr dev $link`\nfunc (h *Handle) AddrAdd(link Link, addr *Addr) error {\n\treq := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK)\n\treturn h.addrHandle(link, addr, req)\n}\n\n\/\/ AddrReplace will replace (or, if not present, add) an IP address on a link device.\n\/\/ Equivalent to: `ip addr replace $addr dev $link`\nfunc AddrReplace(link Link, addr *Addr) error {\n\treturn pkgHandle.AddrReplace(link, addr)\n}\n\n\/\/ AddrReplace will replace (or, if not present, add) an IP address on a link device.\n\/\/ Equivalent to: `ip addr replace $addr dev $link`\nfunc (h *Handle) AddrReplace(link Link, addr *Addr) error {\n\treq := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_REPLACE|unix.NLM_F_ACK)\n\treturn h.addrHandle(link, addr, req)\n}\n\n\/\/ AddrDel will delete an IP address from a link device.\n\/\/ Equivalent to: `ip addr del $addr dev $link`\nfunc AddrDel(link Link, addr *Addr) error {\n\treturn pkgHandle.AddrDel(link, addr)\n}\n\n\/\/ AddrDel will delete an IP address from a link device.\n\/\/ Equivalent to: `ip addr del $addr dev $link`\nfunc (h *Handle) AddrDel(link Link, addr *Addr) error {\n\treq := h.newNetlinkRequest(unix.RTM_DELADDR, unix.NLM_F_ACK)\n\treturn h.addrHandle(link, addr, req)\n}\n\nfunc (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error {\n\tbase := link.Attrs()\n\tif addr.Label != \"\" && !strings.HasPrefix(addr.Label, base.Name) {\n\t\treturn fmt.Errorf(\"label must begin with interface name\")\n\t}\n\th.ensureIndex(base)\n\n\tfamily := nl.GetIPFamily(addr.IP)\n\n\tmsg := nl.NewIfAddrmsg(family)\n\tmsg.Index = uint32(base.Index)\n\tmsg.Scope = uint8(addr.Scope)\n\tprefixlen, masklen := addr.Mask.Size()\n\tmsg.Prefixlen = uint8(prefixlen)\n\treq.AddData(msg)\n\n\tvar localAddrData []byte\n\tif family == FAMILY_V4 {\n\t\tlocalAddrData = addr.IP.To4()\n\t} else {\n\t\tlocalAddrData = addr.IP.To16()\n\t}\n\n\tlocalData := nl.NewRtAttr(unix.IFA_LOCAL, localAddrData)\n\treq.AddData(localData)\n\tvar peerAddrData []byte\n\tif addr.Peer != nil {\n\t\tif family == FAMILY_V4 {\n\t\t\tpeerAddrData = addr.Peer.IP.To4()\n\t\t} else {\n\t\t\tpeerAddrData = addr.Peer.IP.To16()\n\t\t}\n\t} else {\n\t\tpeerAddrData = localAddrData\n\t}\n\n\taddressData := nl.NewRtAttr(unix.IFA_ADDRESS, peerAddrData)\n\treq.AddData(addressData)\n\n\tif addr.Flags != 0 {\n\t\tif addr.Flags <= 0xff {\n\t\t\tmsg.IfAddrmsg.Flags = uint8(addr.Flags)\n\t\t} else {\n\t\t\tb := make([]byte, 4)\n\t\t\tnative.PutUint32(b, uint32(addr.Flags))\n\t\t\tflagsData := nl.NewRtAttr(IFA_FLAGS, b)\n\t\t\treq.AddData(flagsData)\n\t\t}\n\t}\n\n\tif addr.Broadcast == nil {\n\t\tcalcBroadcast := make(net.IP, masklen\/8)\n\t\tfor i := range localAddrData {\n\t\t\tcalcBroadcast[i] = localAddrData[i] | ^addr.Mask[i]\n\t\t}\n\t\taddr.Broadcast = calcBroadcast\n\t}\n\treq.AddData(nl.NewRtAttr(unix.IFA_BROADCAST, addr.Broadcast))\n\n\tif addr.Label != \"\" {\n\t\tlabelData := nl.NewRtAttr(unix.IFA_LABEL, nl.ZeroTerminated(addr.Label))\n\t\treq.AddData(labelData)\n\t}\n\n\t_, err := req.Execute(unix.NETLINK_ROUTE, 0)\n\treturn err\n}\n\n\/\/ AddrList gets a list of IP addresses in the system.\n\/\/ Equivalent to: `ip addr show`.\n\/\/ The list can be filtered by link and ip family.\nfunc AddrList(link Link, family int) ([]Addr, error) {\n\treturn pkgHandle.AddrList(link, family)\n}\n\n\/\/ AddrList gets a list of IP addresses in the system.\n\/\/ Equivalent to: `ip addr show`.\n\/\/ The list can be filtered by link and ip family.\nfunc (h *Handle) AddrList(link Link, family int) ([]Addr, error) {\n\treq := h.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP)\n\tmsg := nl.NewIfInfomsg(family)\n\treq.AddData(msg)\n\n\tmsgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tindexFilter := 0\n\tif link != nil {\n\t\tbase := link.Attrs()\n\t\th.ensureIndex(base)\n\t\tindexFilter = base.Index\n\t}\n\n\tvar res []Addr\n\tfor _, m := range msgs {\n\t\taddr, msgFamily, ifindex, err := parseAddr(m)\n\t\tif err != nil {\n\t\t\treturn res, err\n\t\t}\n\n\t\tif link != nil && ifindex != indexFilter {\n\t\t\t\/\/ Ignore messages from other interfaces\n\t\t\tcontinue\n\t\t}\n\n\t\tif family != FAMILY_ALL && msgFamily != family {\n\t\t\tcontinue\n\t\t}\n\n\t\tres = append(res, addr)\n\t}\n\n\treturn res, nil\n}\n\nfunc parseAddr(m []byte) (addr Addr, family, index int, err error) {\n\tmsg := nl.DeserializeIfAddrmsg(m)\n\n\tfamily = -1\n\tindex = -1\n\n\tattrs, err1 := nl.ParseRouteAttr(m[msg.Len():])\n\tif err1 != nil {\n\t\terr = err1\n\t\treturn\n\t}\n\n\tfamily = int(msg.Family)\n\tindex = int(msg.Index)\n\n\tvar local, dst *net.IPNet\n\tfor _, attr := range attrs {\n\t\tswitch attr.Attr.Type {\n\t\tcase unix.IFA_ADDRESS:\n\t\t\tdst = &net.IPNet{\n\t\t\t\tIP: attr.Value,\n\t\t\t\tMask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),\n\t\t\t}\n\t\t\taddr.Peer = dst\n\t\tcase unix.IFA_LOCAL:\n\t\t\tlocal = &net.IPNet{\n\t\t\t\tIP: attr.Value,\n\t\t\t\tMask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),\n\t\t\t}\n\t\t\taddr.IPNet = local\n\t\tcase unix.IFA_BROADCAST:\n\t\t\taddr.Broadcast = attr.Value\n\t\tcase unix.IFA_LABEL:\n\t\t\taddr.Label = string(attr.Value[:len(attr.Value)-1])\n\t\tcase IFA_FLAGS:\n\t\t\taddr.Flags = int(native.Uint32(attr.Value[0:4]))\n\t\tcase nl.IFA_CACHEINFO:\n\t\t\tci := nl.DeserializeIfaCacheInfo(attr.Value)\n\t\t\taddr.PreferedLft = int(ci.IfaPrefered)\n\t\t\taddr.ValidLft = int(ci.IfaValid)\n\t\t}\n\t}\n\n\t\/\/ IFA_LOCAL should be there but if not, fall back to IFA_ADDRESS\n\tif local != nil {\n\t\taddr.IPNet = local\n\t} else {\n\t\taddr.IPNet = dst\n\t}\n\taddr.Scope = int(msg.Scope)\n\n\treturn\n}\n\ntype AddrUpdate struct {\n\tLinkAddress net.IPNet\n\tLinkIndex int\n\tFlags int\n\tScope int\n\tPreferedLft int\n\tValidLft int\n\tNewAddr bool \/\/ true=added false=deleted\n}\n\n\/\/ AddrSubscribe takes a chan down which notifications will be sent\n\/\/ when addresses change. Close the 'done' chan to stop subscription.\nfunc AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error {\n\treturn addrSubscribeAt(netns.None(), netns.None(), ch, done, nil)\n}\n\n\/\/ AddrSubscribeAt works like AddrSubscribe plus it allows the caller\n\/\/ to choose the network namespace in which to subscribe (ns).\nfunc AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error {\n\treturn addrSubscribeAt(ns, netns.None(), ch, done, nil)\n}\n\n\/\/ AddrSubscribeOptions contains a set of options to use with\n\/\/ AddrSubscribeWithOptions.\ntype AddrSubscribeOptions struct {\n\tNamespace *netns.NsHandle\n\tErrorCallback func(error)\n}\n\n\/\/ AddrSubscribeWithOptions work like AddrSubscribe but enable to\n\/\/ provide additional options to modify the behavior. Currently, the\n\/\/ namespace can be provided as well as an error callback.\nfunc AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, options AddrSubscribeOptions) error {\n\tif options.Namespace == nil {\n\t\tnone := netns.None()\n\t\toptions.Namespace = &none\n\t}\n\treturn addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback)\n}\n\nfunc addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error)) error {\n\ts, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_IFADDR, unix.RTNLGRP_IPV6_IFADDR)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif done != nil {\n\t\tgo func() {\n\t\t\t<-done\n\t\t\ts.Close()\n\t\t}()\n\t}\n\tgo func() {\n\t\tdefer close(ch)\n\t\tfor {\n\t\t\tmsgs, err := s.Receive()\n\t\t\tif err != nil {\n\t\t\t\tif cberr != nil {\n\t\t\t\t\tcberr(err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, m := range msgs {\n\t\t\t\tmsgType := m.Header.Type\n\t\t\t\tif msgType != unix.RTM_NEWADDR && msgType != unix.RTM_DELADDR {\n\t\t\t\t\tif cberr != nil {\n\t\t\t\t\t\tcberr(fmt.Errorf(\"bad message type: %d\", msgType))\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\taddr, _, ifindex, err := parseAddr(m.Data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif cberr != nil {\n\t\t\t\t\t\tcberr(fmt.Errorf(\"could not parse address: %v\", err))\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tch <- AddrUpdate{LinkAddress: *addr.IPNet,\n\t\t\t\t\tLinkIndex: ifindex,\n\t\t\t\t\tNewAddr: msgType == unix.RTM_NEWADDR,\n\t\t\t\t\tFlags: addr.Flags,\n\t\t\t\t\tScope: addr.Scope,\n\t\t\t\t\tPreferedLft: addr.PreferedLft,\n\t\t\t\t\tValidLft: addr.ValidLft}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n<commit_msg>addr_linux: Skip BROADCAST and LABEL for non-ipv4<commit_after>package netlink\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\n\t\"github.com\/vishvananda\/netlink\/nl\"\n\t\"github.com\/vishvananda\/netns\"\n\t\"golang.org\/x\/sys\/unix\"\n)\n\n\/\/ IFA_FLAGS is a u32 attribute.\nconst IFA_FLAGS = 0x8\n\n\/\/ AddrAdd will add an IP address to a link device.\n\/\/ Equivalent to: `ip addr add $addr dev $link`\nfunc AddrAdd(link Link, addr *Addr) error {\n\treturn pkgHandle.AddrAdd(link, addr)\n}\n\n\/\/ AddrAdd will add an IP address to a link device.\n\/\/ Equivalent to: `ip addr add $addr dev $link`\nfunc (h *Handle) AddrAdd(link Link, addr *Addr) error {\n\treq := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK)\n\treturn h.addrHandle(link, addr, req)\n}\n\n\/\/ AddrReplace will replace (or, if not present, add) an IP address on a link device.\n\/\/ Equivalent to: `ip addr replace $addr dev $link`\nfunc AddrReplace(link Link, addr *Addr) error {\n\treturn pkgHandle.AddrReplace(link, addr)\n}\n\n\/\/ AddrReplace will replace (or, if not present, add) an IP address on a link device.\n\/\/ Equivalent to: `ip addr replace $addr dev $link`\nfunc (h *Handle) AddrReplace(link Link, addr *Addr) error {\n\treq := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_REPLACE|unix.NLM_F_ACK)\n\treturn h.addrHandle(link, addr, req)\n}\n\n\/\/ AddrDel will delete an IP address from a link device.\n\/\/ Equivalent to: `ip addr del $addr dev $link`\nfunc AddrDel(link Link, addr *Addr) error {\n\treturn pkgHandle.AddrDel(link, addr)\n}\n\n\/\/ AddrDel will delete an IP address from a link device.\n\/\/ Equivalent to: `ip addr del $addr dev $link`\nfunc (h *Handle) AddrDel(link Link, addr *Addr) error {\n\treq := h.newNetlinkRequest(unix.RTM_DELADDR, unix.NLM_F_ACK)\n\treturn h.addrHandle(link, addr, req)\n}\n\nfunc (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error {\n\tbase := link.Attrs()\n\tif addr.Label != \"\" && !strings.HasPrefix(addr.Label, base.Name) {\n\t\treturn fmt.Errorf(\"label must begin with interface name\")\n\t}\n\th.ensureIndex(base)\n\n\tfamily := nl.GetIPFamily(addr.IP)\n\n\tmsg := nl.NewIfAddrmsg(family)\n\tmsg.Index = uint32(base.Index)\n\tmsg.Scope = uint8(addr.Scope)\n\tprefixlen, masklen := addr.Mask.Size()\n\tmsg.Prefixlen = uint8(prefixlen)\n\treq.AddData(msg)\n\n\tvar localAddrData []byte\n\tif family == FAMILY_V4 {\n\t\tlocalAddrData = addr.IP.To4()\n\t} else {\n\t\tlocalAddrData = addr.IP.To16()\n\t}\n\n\tlocalData := nl.NewRtAttr(unix.IFA_LOCAL, localAddrData)\n\treq.AddData(localData)\n\tvar peerAddrData []byte\n\tif addr.Peer != nil {\n\t\tif family == FAMILY_V4 {\n\t\t\tpeerAddrData = addr.Peer.IP.To4()\n\t\t} else {\n\t\t\tpeerAddrData = addr.Peer.IP.To16()\n\t\t}\n\t} else {\n\t\tpeerAddrData = localAddrData\n\t}\n\n\taddressData := nl.NewRtAttr(unix.IFA_ADDRESS, peerAddrData)\n\treq.AddData(addressData)\n\n\tif addr.Flags != 0 {\n\t\tif addr.Flags <= 0xff {\n\t\t\tmsg.IfAddrmsg.Flags = uint8(addr.Flags)\n\t\t} else {\n\t\t\tb := make([]byte, 4)\n\t\t\tnative.PutUint32(b, uint32(addr.Flags))\n\t\t\tflagsData := nl.NewRtAttr(IFA_FLAGS, b)\n\t\t\treq.AddData(flagsData)\n\t\t}\n\t}\n\n\tif family == FAMILY_V4 {\n\t\tif addr.Broadcast == nil {\n\t\t\tcalcBroadcast := make(net.IP, masklen\/8)\n\t\t\tfor i := range localAddrData {\n\t\t\t\tcalcBroadcast[i] = localAddrData[i] | ^addr.Mask[i]\n\t\t\t}\n\t\t\taddr.Broadcast = calcBroadcast\n\t\t}\n\t\treq.AddData(nl.NewRtAttr(unix.IFA_BROADCAST, addr.Broadcast))\n\n\t\tif addr.Label != \"\" {\n\t\t\tlabelData := nl.NewRtAttr(unix.IFA_LABEL, nl.ZeroTerminated(addr.Label))\n\t\t\treq.AddData(labelData)\n\t\t}\n\t}\n\n\t_, err := req.Execute(unix.NETLINK_ROUTE, 0)\n\treturn err\n}\n\n\/\/ AddrList gets a list of IP addresses in the system.\n\/\/ Equivalent to: `ip addr show`.\n\/\/ The list can be filtered by link and ip family.\nfunc AddrList(link Link, family int) ([]Addr, error) {\n\treturn pkgHandle.AddrList(link, family)\n}\n\n\/\/ AddrList gets a list of IP addresses in the system.\n\/\/ Equivalent to: `ip addr show`.\n\/\/ The list can be filtered by link and ip family.\nfunc (h *Handle) AddrList(link Link, family int) ([]Addr, error) {\n\treq := h.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP)\n\tmsg := nl.NewIfInfomsg(family)\n\treq.AddData(msg)\n\n\tmsgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tindexFilter := 0\n\tif link != nil {\n\t\tbase := link.Attrs()\n\t\th.ensureIndex(base)\n\t\tindexFilter = base.Index\n\t}\n\n\tvar res []Addr\n\tfor _, m := range msgs {\n\t\taddr, msgFamily, ifindex, err := parseAddr(m)\n\t\tif err != nil {\n\t\t\treturn res, err\n\t\t}\n\n\t\tif link != nil && ifindex != indexFilter {\n\t\t\t\/\/ Ignore messages from other interfaces\n\t\t\tcontinue\n\t\t}\n\n\t\tif family != FAMILY_ALL && msgFamily != family {\n\t\t\tcontinue\n\t\t}\n\n\t\tres = append(res, addr)\n\t}\n\n\treturn res, nil\n}\n\nfunc parseAddr(m []byte) (addr Addr, family, index int, err error) {\n\tmsg := nl.DeserializeIfAddrmsg(m)\n\n\tfamily = -1\n\tindex = -1\n\n\tattrs, err1 := nl.ParseRouteAttr(m[msg.Len():])\n\tif err1 != nil {\n\t\terr = err1\n\t\treturn\n\t}\n\n\tfamily = int(msg.Family)\n\tindex = int(msg.Index)\n\n\tvar local, dst *net.IPNet\n\tfor _, attr := range attrs {\n\t\tswitch attr.Attr.Type {\n\t\tcase unix.IFA_ADDRESS:\n\t\t\tdst = &net.IPNet{\n\t\t\t\tIP: attr.Value,\n\t\t\t\tMask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),\n\t\t\t}\n\t\t\taddr.Peer = dst\n\t\tcase unix.IFA_LOCAL:\n\t\t\tlocal = &net.IPNet{\n\t\t\t\tIP: attr.Value,\n\t\t\t\tMask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),\n\t\t\t}\n\t\t\taddr.IPNet = local\n\t\tcase unix.IFA_BROADCAST:\n\t\t\taddr.Broadcast = attr.Value\n\t\tcase unix.IFA_LABEL:\n\t\t\taddr.Label = string(attr.Value[:len(attr.Value)-1])\n\t\tcase IFA_FLAGS:\n\t\t\taddr.Flags = int(native.Uint32(attr.Value[0:4]))\n\t\tcase nl.IFA_CACHEINFO:\n\t\t\tci := nl.DeserializeIfaCacheInfo(attr.Value)\n\t\t\taddr.PreferedLft = int(ci.IfaPrefered)\n\t\t\taddr.ValidLft = int(ci.IfaValid)\n\t\t}\n\t}\n\n\t\/\/ IFA_LOCAL should be there but if not, fall back to IFA_ADDRESS\n\tif local != nil {\n\t\taddr.IPNet = local\n\t} else {\n\t\taddr.IPNet = dst\n\t}\n\taddr.Scope = int(msg.Scope)\n\n\treturn\n}\n\ntype AddrUpdate struct {\n\tLinkAddress net.IPNet\n\tLinkIndex int\n\tFlags int\n\tScope int\n\tPreferedLft int\n\tValidLft int\n\tNewAddr bool \/\/ true=added false=deleted\n}\n\n\/\/ AddrSubscribe takes a chan down which notifications will be sent\n\/\/ when addresses change. Close the 'done' chan to stop subscription.\nfunc AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error {\n\treturn addrSubscribeAt(netns.None(), netns.None(), ch, done, nil)\n}\n\n\/\/ AddrSubscribeAt works like AddrSubscribe plus it allows the caller\n\/\/ to choose the network namespace in which to subscribe (ns).\nfunc AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error {\n\treturn addrSubscribeAt(ns, netns.None(), ch, done, nil)\n}\n\n\/\/ AddrSubscribeOptions contains a set of options to use with\n\/\/ AddrSubscribeWithOptions.\ntype AddrSubscribeOptions struct {\n\tNamespace *netns.NsHandle\n\tErrorCallback func(error)\n}\n\n\/\/ AddrSubscribeWithOptions work like AddrSubscribe but enable to\n\/\/ provide additional options to modify the behavior. Currently, the\n\/\/ namespace can be provided as well as an error callback.\nfunc AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, options AddrSubscribeOptions) error {\n\tif options.Namespace == nil {\n\t\tnone := netns.None()\n\t\toptions.Namespace = &none\n\t}\n\treturn addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback)\n}\n\nfunc addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error)) error {\n\ts, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_IFADDR, unix.RTNLGRP_IPV6_IFADDR)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif done != nil {\n\t\tgo func() {\n\t\t\t<-done\n\t\t\ts.Close()\n\t\t}()\n\t}\n\tgo func() {\n\t\tdefer close(ch)\n\t\tfor {\n\t\t\tmsgs, err := s.Receive()\n\t\t\tif err != nil {\n\t\t\t\tif cberr != nil {\n\t\t\t\t\tcberr(err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, m := range msgs {\n\t\t\t\tmsgType := m.Header.Type\n\t\t\t\tif msgType != unix.RTM_NEWADDR && msgType != unix.RTM_DELADDR {\n\t\t\t\t\tif cberr != nil {\n\t\t\t\t\t\tcberr(fmt.Errorf(\"bad message type: %d\", msgType))\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\taddr, _, ifindex, err := parseAddr(m.Data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif cberr != nil {\n\t\t\t\t\t\tcberr(fmt.Errorf(\"could not parse address: %v\", err))\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tch <- AddrUpdate{LinkAddress: *addr.IPNet,\n\t\t\t\t\tLinkIndex: ifindex,\n\t\t\t\t\tNewAddr: msgType == unix.RTM_NEWADDR,\n\t\t\t\t\tFlags: addr.Flags,\n\t\t\t\t\tScope: addr.Scope,\n\t\t\t\t\tPreferedLft: addr.PreferedLft,\n\t\t\t\t\tValidLft: addr.ValidLft}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package order\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/silenceper\/wechat\/v2\/pay\/config\"\n\t\"github.com\/silenceper\/wechat\/v2\/util\"\n)\n\n\/\/ https:\/\/pay.weixin.qq.com\/wiki\/doc\/api\/jsapi.php?chapter=9_1\nvar payGateway = \"https:\/\/api.mch.weixin.qq.com\/pay\/unifiedorder\"\n\n\/\/ SUCCESS 表示支付成功\nconst SUCCESS = \"SUCCESS\"\n\n\/\/ Order struct extends context\ntype Order struct {\n\t*config.Config\n}\n\n\/\/ NewOrder return an instance of order package\nfunc NewOrder(cfg *config.Config) *Order {\n\torder := Order{cfg}\n\treturn &order\n}\n\n\/\/ Params was NEEDED when request Unified order\n\/\/ 传入的参数,用于生成 prepay_id 的必需参数\ntype Params struct {\n\tTotalFee string\n\tCreateIP string\n\tBody string\n\tOutTradeNo string\n\tTimeExpire string \/\/ 订单失效时间,格式为yyyyMMddHHmmss,如2009年12月27日9点10分10秒表示为20091227091010。\n\tOpenID string\n\tTradeType string\n\tSignType string\n\tDetail string\n\tAttach string\n\tGoodsTag string\n\tNotifyURL string\n}\n\n\/\/ Config 是传出用于 js sdk 用的参数\ntype Config struct {\n\tTimestamp string `json:\"timestamp\"`\n\tNonceStr string `json:\"nonceStr\"`\n\tPrePayID string `json:\"prePayId\"`\n\tSignType string `json:\"signType\"`\n\tPackage string `json:\"package\"`\n\tPaySign string `json:\"paySign\"`\n}\n\n\/\/ ConfigForApp 是传出用于 app sdk 用的参数\ntype ConfigForApp struct {\n\tAppID string `json:\"appid\"`\n\tMchID string `json:\"partnerid\"` \/\/ 微信支付分配的商户号\n\tPrePayID string `json:\"prepayid\"`\n\tPackage string `json:\"package\"`\n\tNonceStr string `json:\"nonceStr\"`\n\tTimestamp string `json:\"timestamp\"`\n\tSign string `json:\"sign\"`\n}\n\n\/\/ PreOrder 是 Unified order 接口的返回\ntype PreOrder struct {\n\tReturnCode string `xml:\"return_code\"`\n\tReturnMsg string `xml:\"return_msg\"`\n\tAppID string `xml:\"appid,omitempty\"`\n\tMchID string `xml:\"mch_id,omitempty\"`\n\tNonceStr string `xml:\"nonce_str,omitempty\"`\n\tSign string `xml:\"sign,omitempty\"`\n\tResultCode string `xml:\"result_code,omitempty\"`\n\tTradeType string `xml:\"trade_type,omitempty\"`\n\tPrePayID string `xml:\"prepay_id,omitempty\"`\n\tCodeURL string `xml:\"code_url,omitempty\"`\n\tMWebURL string `xml:\"mweb_url,omitempty\"`\n\tErrCode string `xml:\"err_code,omitempty\"`\n\tErrCodeDes string `xml:\"err_code_des,omitempty\"`\n}\n\n\/\/ payRequest 接口请求参数\ntype payRequest struct {\n\tAppID string `xml:\"appid\"` \/\/ 公众账号ID\n\tMchID string `xml:\"mch_id\"` \/\/ 商户号\n\tDeviceInfo string `xml:\"device_info,omitempty\"` \/\/ 设备号\n\tNonceStr string `xml:\"nonce_str\"` \/\/ 随机字符串\n\tSign string `xml:\"sign\"` \/\/ 签名\n\tSignType string `xml:\"sign_type,omitempty\"` \/\/ 签名类型\n\tBody string `xml:\"body\"` \/\/ 商品描述\n\tDetail string `xml:\"detail,omitempty\"` \/\/ 商品详情\n\tAttach string `xml:\"attach,omitempty\"` \/\/ 附加数据\n\tOutTradeNo string `xml:\"out_trade_no\"` \/\/ 商户订单号\n\tFeeType string `xml:\"fee_type,omitempty\"` \/\/ 标价币种\n\tTotalFee string `xml:\"total_fee\"` \/\/ 标价金额\n\tSpbillCreateIP string `xml:\"spbill_create_ip\"` \/\/ 终端IP\n\tTimeStart string `xml:\"time_start,omitempty\"` \/\/ 交易起始时间\n\tTimeExpire string `xml:\"time_expire,omitempty\"` \/\/ 交易结束时间\n\tGoodsTag string `xml:\"goods_tag,omitempty\"` \/\/ 订单优惠标记\n\tNotifyURL string `xml:\"notify_url\"` \/\/ 通知地址\n\tTradeType string `xml:\"trade_type\"` \/\/ 交易类型\n\tProductID string `xml:\"product_id,omitempty\"` \/\/ 商品ID\n\tLimitPay string `xml:\"limit_pay,omitempty\"` \/\/ 指定支付方式\n\tOpenID string `xml:\"openid,omitempty\"` \/\/ 用户标识\n\tSceneInfo string `xml:\"scene_info,omitempty\"` \/\/ 场景信息\n\n\tXMLName struct{} `xml:\"xml\"`\n}\n\nfunc (req *payRequest) BridgePayRequest(p *Params, AppID, MchID, nonceStr, sign string) *payRequest {\n\trequest := payRequest{\n\t\tAppID: AppID,\n\t\tMchID: MchID,\n\t\tNonceStr: nonceStr,\n\t\tSign: sign,\n\t\tBody: p.Body,\n\t\tOutTradeNo: p.OutTradeNo,\n\t\tTotalFee: p.TotalFee,\n\t\tSpbillCreateIP: p.CreateIP,\n\t\tNotifyURL: p.NotifyURL,\n\t\tTradeType: p.TradeType,\n\t\tOpenID: p.OpenID,\n\t\tSignType: p.SignType,\n\t\tDetail: p.Detail,\n\t\tAttach: p.Attach,\n\t\tGoodsTag: p.GoodsTag,\n\t}\n\treturn &request\n}\n\n\/\/ BridgeConfig get js bridge config\nfunc (o *Order) BridgeConfig(p *Params) (cfg Config, err error) {\n\tvar (\n\t\tbuffer strings.Builder\n\t\ttimestamp = strconv.FormatInt(time.Now().Unix(), 10)\n\t)\n\torder, err := o.PrePayOrder(p)\n\tif err != nil {\n\t\treturn\n\t}\n\tbuffer.WriteString(\"appId=\")\n\tbuffer.WriteString(order.AppID)\n\tbuffer.WriteString(\"&nonceStr=\")\n\tbuffer.WriteString(order.NonceStr)\n\tbuffer.WriteString(\"&package=\")\n\tbuffer.WriteString(\"prepay_id=\" + order.PrePayID)\n\tbuffer.WriteString(\"&signType=\")\n\tbuffer.WriteString(p.SignType)\n\tbuffer.WriteString(\"&timeStamp=\")\n\tbuffer.WriteString(timestamp)\n\tbuffer.WriteString(\"&key=\")\n\tbuffer.WriteString(o.Key)\n\n\tsign, err := util.CalculateSign(buffer.String(), p.SignType, o.Key)\n\tif err != nil {\n\t\treturn\n\t}\n\t\/\/ 签名\n\tcfg.PaySign = sign\n\tcfg.NonceStr = order.NonceStr\n\tcfg.Timestamp = timestamp\n\tcfg.PrePayID = order.PrePayID\n\tcfg.SignType = p.SignType\n\tcfg.Package = \"prepay_id=\" + order.PrePayID\n\treturn\n}\n\n\/\/ BridgeAppConfig get app bridge config\nfunc (o *Order) BridgeAppConfig(p *Params) (cfg ConfigForApp, err error) {\n\tvar (\n\t\ttimestamp = strconv.FormatInt(time.Now().Unix(), 10)\n\t\tnoncestr = util.RandomStr(32)\n\t\t_package = \"Sign=WXPay\"\n\t)\n\torder, err := o.PrePayOrder(p)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresult := map[string]string{\n\t\t\"appid\": order.AppID,\n\t\t\"partnerid\": order.MchID,\n\t\t\"prepayid\": order.PrePayID,\n\t\t\"package\": _package,\n\t\t\"noncestr\": noncestr,\n\t\t\"timestamp\": timestamp,\n\t}\n\t\/\/ 签名\n\tsign, err := util.ParamSign(result, o.Key)\n\tif err != nil {\n\t\treturn\n\t}\n\tresult[\"sign\"] = sign\n\tcfg = ConfigForApp{\n\t\tAppID: result[\"appid\"],\n\t\tMchID: result[\"partnerid\"],\n\t\tPrePayID: result[\"prepayid\"],\n\t\tPackage: result[\"package\"],\n\t\tNonceStr: result[\"noncestr\"],\n\t\tTimestamp: result[\"timestamp\"],\n\t\tSign: result[\"sign\"],\n\t}\n\treturn\n}\n\n\/\/ PrePayOrder return data for invoke wechat payment\nfunc (o *Order) PrePayOrder(p *Params) (payOrder PreOrder, err error) {\n\tnonceStr := util.RandomStr(32)\n\n\t\/\/ 通知地址\n\tif len(p.NotifyURL) == 0 {\n\t\tp.NotifyURL = o.NotifyURL \/\/ 默认使用order.NotifyURL\n\t}\n\n\tparam := map[string]string{\n\t\t\"appid\": o.AppID,\n\t\t\"body\": p.Body,\n\t\t\"mch_id\": o.MchID,\n\t\t\"nonce_str\": nonceStr,\n\t\t\"out_trade_no\": p.OutTradeNo,\n\t\t\"spbill_create_ip\": p.CreateIP,\n\t\t\"total_fee\": p.TotalFee,\n\t\t\"trade_type\": p.TradeType,\n\t\t\"openid\": p.OpenID,\n\t\t\"sign_type\": p.SignType,\n\t\t\"detail\": p.Detail,\n\t\t\"attach\": p.Attach,\n\t\t\"goods_tag\": p.GoodsTag,\n\t\t\"notify_url\": p.NotifyURL,\n\t}\n\t\/\/ 签名类型\n\tif param[\"sign_type\"] == \"\" {\n\t\tparam[\"sign_type\"] = util.SignTypeMD5\n\t}\n\n\tif p.TimeExpire != \"\" {\n\t\t\/\/ 如果有传入交易结束时间\n\t\tparam[\"time_expire\"] = p.TimeExpire\n\t}\n\n\tsign, err := util.ParamSign(param, o.Key)\n\tif err != nil {\n\t\treturn\n\t}\n\trequest := new(payRequest).BridgePayRequest(p, o.AppID, o.MchID, nonceStr, sign)\n\tif len(p.TimeExpire) > 0 {\n\t\t\/\/ 如果有传入交易结束时间\n\t\trequest.TimeExpire = p.TimeExpire\n\t}\n\trawRet, err := util.PostXML(payGateway, request)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = xml.Unmarshal(rawRet, &payOrder)\n\tif err != nil {\n\t\treturn\n\t}\n\tif payOrder.ReturnCode == SUCCESS {\n\t\t\/\/ pay success\n\t\tif payOrder.ResultCode == SUCCESS {\n\t\t\terr = nil\n\t\t\treturn\n\t\t}\n\t\terr = errors.New(payOrder.ErrCode + payOrder.ErrCodeDes)\n\t\treturn\n\t}\n\terr = errors.New(\"[msg : xmlUnmarshalError] [rawReturn : \" + string(rawRet) + \"] [sign : \" + sign + \"]\")\n\treturn\n}\n\n\/\/ PrePayID will request wechat merchant api and request for a pre payment order id\nfunc (o *Order) PrePayID(p *Params) (prePayID string, err error) {\n\torder, err := o.PrePayOrder(p)\n\tif err != nil {\n\t\treturn\n\t}\n\tif order.PrePayID == \"\" {\n\t\terr = errors.New(\"empty prepayid\")\n\t}\n\tprePayID = order.PrePayID\n\treturn\n}\n<commit_msg>修复不传sign_type导致request sign_type无默认值的bug (#480)<commit_after>package order\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/silenceper\/wechat\/v2\/pay\/config\"\n\t\"github.com\/silenceper\/wechat\/v2\/util\"\n)\n\n\/\/ https:\/\/pay.weixin.qq.com\/wiki\/doc\/api\/jsapi.php?chapter=9_1\nvar payGateway = \"https:\/\/api.mch.weixin.qq.com\/pay\/unifiedorder\"\n\n\/\/ SUCCESS 表示支付成功\nconst SUCCESS = \"SUCCESS\"\n\n\/\/ Order struct extends context\ntype Order struct {\n\t*config.Config\n}\n\n\/\/ NewOrder return an instance of order package\nfunc NewOrder(cfg *config.Config) *Order {\n\torder := Order{cfg}\n\treturn &order\n}\n\n\/\/ Params was NEEDED when request Unified order\n\/\/ 传入的参数,用于生成 prepay_id 的必需参数\ntype Params struct {\n\tTotalFee string\n\tCreateIP string\n\tBody string\n\tOutTradeNo string\n\tTimeExpire string \/\/ 订单失效时间,格式为yyyyMMddHHmmss,如2009年12月27日9点10分10秒表示为20091227091010。\n\tOpenID string\n\tTradeType string\n\tSignType string\n\tDetail string\n\tAttach string\n\tGoodsTag string\n\tNotifyURL string\n}\n\n\/\/ Config 是传出用于 js sdk 用的参数\ntype Config struct {\n\tTimestamp string `json:\"timestamp\"`\n\tNonceStr string `json:\"nonceStr\"`\n\tPrePayID string `json:\"prePayId\"`\n\tSignType string `json:\"signType\"`\n\tPackage string `json:\"package\"`\n\tPaySign string `json:\"paySign\"`\n}\n\n\/\/ ConfigForApp 是传出用于 app sdk 用的参数\ntype ConfigForApp struct {\n\tAppID string `json:\"appid\"`\n\tMchID string `json:\"partnerid\"` \/\/ 微信支付分配的商户号\n\tPrePayID string `json:\"prepayid\"`\n\tPackage string `json:\"package\"`\n\tNonceStr string `json:\"nonceStr\"`\n\tTimestamp string `json:\"timestamp\"`\n\tSign string `json:\"sign\"`\n}\n\n\/\/ PreOrder 是 Unified order 接口的返回\ntype PreOrder struct {\n\tReturnCode string `xml:\"return_code\"`\n\tReturnMsg string `xml:\"return_msg\"`\n\tAppID string `xml:\"appid,omitempty\"`\n\tMchID string `xml:\"mch_id,omitempty\"`\n\tNonceStr string `xml:\"nonce_str,omitempty\"`\n\tSign string `xml:\"sign,omitempty\"`\n\tResultCode string `xml:\"result_code,omitempty\"`\n\tTradeType string `xml:\"trade_type,omitempty\"`\n\tPrePayID string `xml:\"prepay_id,omitempty\"`\n\tCodeURL string `xml:\"code_url,omitempty\"`\n\tMWebURL string `xml:\"mweb_url,omitempty\"`\n\tErrCode string `xml:\"err_code,omitempty\"`\n\tErrCodeDes string `xml:\"err_code_des,omitempty\"`\n}\n\n\/\/ payRequest 接口请求参数\ntype payRequest struct {\n\tAppID string `xml:\"appid\"` \/\/ 公众账号ID\n\tMchID string `xml:\"mch_id\"` \/\/ 商户号\n\tDeviceInfo string `xml:\"device_info,omitempty\"` \/\/ 设备号\n\tNonceStr string `xml:\"nonce_str\"` \/\/ 随机字符串\n\tSign string `xml:\"sign\"` \/\/ 签名\n\tSignType string `xml:\"sign_type,omitempty\"` \/\/ 签名类型\n\tBody string `xml:\"body\"` \/\/ 商品描述\n\tDetail string `xml:\"detail,omitempty\"` \/\/ 商品详情\n\tAttach string `xml:\"attach,omitempty\"` \/\/ 附加数据\n\tOutTradeNo string `xml:\"out_trade_no\"` \/\/ 商户订单号\n\tFeeType string `xml:\"fee_type,omitempty\"` \/\/ 标价币种\n\tTotalFee string `xml:\"total_fee\"` \/\/ 标价金额\n\tSpbillCreateIP string `xml:\"spbill_create_ip\"` \/\/ 终端IP\n\tTimeStart string `xml:\"time_start,omitempty\"` \/\/ 交易起始时间\n\tTimeExpire string `xml:\"time_expire,omitempty\"` \/\/ 交易结束时间\n\tGoodsTag string `xml:\"goods_tag,omitempty\"` \/\/ 订单优惠标记\n\tNotifyURL string `xml:\"notify_url\"` \/\/ 通知地址\n\tTradeType string `xml:\"trade_type\"` \/\/ 交易类型\n\tProductID string `xml:\"product_id,omitempty\"` \/\/ 商品ID\n\tLimitPay string `xml:\"limit_pay,omitempty\"` \/\/ 指定支付方式\n\tOpenID string `xml:\"openid,omitempty\"` \/\/ 用户标识\n\tSceneInfo string `xml:\"scene_info,omitempty\"` \/\/ 场景信息\n\n\tXMLName struct{} `xml:\"xml\"`\n}\n\nfunc (req *payRequest) BridgePayRequest(p *Params, AppID, MchID, nonceStr, sign string) *payRequest {\n\trequest := payRequest{\n\t\tAppID: AppID,\n\t\tMchID: MchID,\n\t\tNonceStr: nonceStr,\n\t\tSign: sign,\n\t\tBody: p.Body,\n\t\tOutTradeNo: p.OutTradeNo,\n\t\tTotalFee: p.TotalFee,\n\t\tSpbillCreateIP: p.CreateIP,\n\t\tNotifyURL: p.NotifyURL,\n\t\tTradeType: p.TradeType,\n\t\tOpenID: p.OpenID,\n\t\tSignType: p.SignType,\n\t\tDetail: p.Detail,\n\t\tAttach: p.Attach,\n\t\tGoodsTag: p.GoodsTag,\n\t}\n\treturn &request\n}\n\n\/\/ BridgeConfig get js bridge config\nfunc (o *Order) BridgeConfig(p *Params) (cfg Config, err error) {\n\tvar (\n\t\tbuffer strings.Builder\n\t\ttimestamp = strconv.FormatInt(time.Now().Unix(), 10)\n\t)\n\torder, err := o.PrePayOrder(p)\n\tif err != nil {\n\t\treturn\n\t}\n\tbuffer.WriteString(\"appId=\")\n\tbuffer.WriteString(order.AppID)\n\tbuffer.WriteString(\"&nonceStr=\")\n\tbuffer.WriteString(order.NonceStr)\n\tbuffer.WriteString(\"&package=\")\n\tbuffer.WriteString(\"prepay_id=\" + order.PrePayID)\n\tbuffer.WriteString(\"&signType=\")\n\tbuffer.WriteString(p.SignType)\n\tbuffer.WriteString(\"&timeStamp=\")\n\tbuffer.WriteString(timestamp)\n\tbuffer.WriteString(\"&key=\")\n\tbuffer.WriteString(o.Key)\n\n\tsign, err := util.CalculateSign(buffer.String(), p.SignType, o.Key)\n\tif err != nil {\n\t\treturn\n\t}\n\t\/\/ 签名\n\tcfg.PaySign = sign\n\tcfg.NonceStr = order.NonceStr\n\tcfg.Timestamp = timestamp\n\tcfg.PrePayID = order.PrePayID\n\tcfg.SignType = p.SignType\n\tcfg.Package = \"prepay_id=\" + order.PrePayID\n\treturn\n}\n\n\/\/ BridgeAppConfig get app bridge config\nfunc (o *Order) BridgeAppConfig(p *Params) (cfg ConfigForApp, err error) {\n\tvar (\n\t\ttimestamp = strconv.FormatInt(time.Now().Unix(), 10)\n\t\tnoncestr = util.RandomStr(32)\n\t\t_package = \"Sign=WXPay\"\n\t)\n\torder, err := o.PrePayOrder(p)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresult := map[string]string{\n\t\t\"appid\": order.AppID,\n\t\t\"partnerid\": order.MchID,\n\t\t\"prepayid\": order.PrePayID,\n\t\t\"package\": _package,\n\t\t\"noncestr\": noncestr,\n\t\t\"timestamp\": timestamp,\n\t}\n\t\/\/ 签名\n\tsign, err := util.ParamSign(result, o.Key)\n\tif err != nil {\n\t\treturn\n\t}\n\tresult[\"sign\"] = sign\n\tcfg = ConfigForApp{\n\t\tAppID: result[\"appid\"],\n\t\tMchID: result[\"partnerid\"],\n\t\tPrePayID: result[\"prepayid\"],\n\t\tPackage: result[\"package\"],\n\t\tNonceStr: result[\"noncestr\"],\n\t\tTimestamp: result[\"timestamp\"],\n\t\tSign: result[\"sign\"],\n\t}\n\treturn\n}\n\n\/\/ PrePayOrder return data for invoke wechat payment\nfunc (o *Order) PrePayOrder(p *Params) (payOrder PreOrder, err error) {\n\tnonceStr := util.RandomStr(32)\n\n\t\/\/ 通知地址\n\tif len(p.NotifyURL) == 0 {\n\t\tp.NotifyURL = o.NotifyURL \/\/ 默认使用order.NotifyURL\n\t}\n\n\t\/\/ 签名类型\n\tif p.SignType == \"\" {\n\t\tp.SignType = util.SignTypeMD5\n\t}\n\n\tparam := map[string]string{\n\t\t\"appid\": o.AppID,\n\t\t\"body\": p.Body,\n\t\t\"mch_id\": o.MchID,\n\t\t\"nonce_str\": nonceStr,\n\t\t\"out_trade_no\": p.OutTradeNo,\n\t\t\"spbill_create_ip\": p.CreateIP,\n\t\t\"total_fee\": p.TotalFee,\n\t\t\"trade_type\": p.TradeType,\n\t\t\"openid\": p.OpenID,\n\t\t\"sign_type\": p.SignType,\n\t\t\"detail\": p.Detail,\n\t\t\"attach\": p.Attach,\n\t\t\"goods_tag\": p.GoodsTag,\n\t\t\"notify_url\": p.NotifyURL,\n\t}\n\n\tif p.TimeExpire != \"\" {\n\t\t\/\/ 如果有传入交易结束时间\n\t\tparam[\"time_expire\"] = p.TimeExpire\n\t}\n\n\tsign, err := util.ParamSign(param, o.Key)\n\tif err != nil {\n\t\treturn\n\t}\n\trequest := new(payRequest).BridgePayRequest(p, o.AppID, o.MchID, nonceStr, sign)\n\tif len(p.TimeExpire) > 0 {\n\t\t\/\/ 如果有传入交易结束时间\n\t\trequest.TimeExpire = p.TimeExpire\n\t}\n\trawRet, err := util.PostXML(payGateway, request)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = xml.Unmarshal(rawRet, &payOrder)\n\tif err != nil {\n\t\treturn\n\t}\n\tif payOrder.ReturnCode == SUCCESS {\n\t\t\/\/ pay success\n\t\tif payOrder.ResultCode == SUCCESS {\n\t\t\terr = nil\n\t\t\treturn\n\t\t}\n\t\terr = errors.New(payOrder.ErrCode + payOrder.ErrCodeDes)\n\t\treturn\n\t}\n\terr = errors.New(\"[msg : xmlUnmarshalError] [rawReturn : \" + string(rawRet) + \"] [sign : \" + sign + \"]\")\n\treturn\n}\n\n\/\/ PrePayID will request wechat merchant api and request for a pre payment order id\nfunc (o *Order) PrePayID(p *Params) (prePayID string, err error) {\n\torder, err := o.PrePayOrder(p)\n\tif err != nil {\n\t\treturn\n\t}\n\tif order.PrePayID == \"\" {\n\t\terr = errors.New(\"empty prepayid\")\n\t}\n\tprePayID = order.PrePayID\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n\tSimple logger package for go services supervised by DJB deamontools\n*\/\n\npackage logger\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tCRITICAL = 4\n\tERROR = 3\n\tWARNING = 2\n\tNOTICE = 1\n\tDEBUG = 0\n)\n\ntype Logger struct {\n\tlevel int\n\taddTimestamp bool\n}\n\nvar levels = map[string]int{\n\t\"critical\": CRITICAL,\n\t\"error\": ERROR,\n\t\"warning\": WARNING,\n\t\"notice\": NOTICE,\n\t\"debug\": DEBUG,\n}\n\nvar rlevels = [5]string{\n\t\"debug\",\n\t\"notice\",\n\t\"warning\",\n\t\"error\",\n\t\"critical\",\n}\n\nfunc init() {\n\n}\n\nfunc (l *Logger) SetLevel(level string) {\n\tlevel = strings.ToLower(level)\n\tl.level = levels[level]\n}\n\nfunc (l *Logger) SetTimeStamp(ts bool) {\n\tl.addTimestamp = ts\n}\n\n\/\/ Base logger\nfunc (l *Logger) log(level int, v ...interface{}) {\n\tif level >= l.level {\n\t\tif l.addTimestamp {\n\t\t\tfmt.Print(fmt.Sprintf(\"%d - \", time.Now().UnixNano()))\n\t\t}\n\t\tfmt.Print(fmt.Sprintf(\"%s - \", strings.ToTitle(rlevels[level])))\n\t\tfmt.Println(v...)\n\t}\n}\n\nfunc (l *Logger) Debug(v ...interface{}) {\n\tl.log(DEBUG, v...)\n}\n\nfunc (l *Logger) Notice(v ...interface{}) {\n\tl.log(NOTICE, v...)\n}\n\nfunc (l *Logger) Warning(v ...interface{}) {\n\tl.log(WARNING, v...)\n}\n\nfunc (l *Logger) Error(v ...interface{}) {\n\tl.log(ERROR, v...)\n}\n\nfunc (l *Logger) Critical(v ...interface{}) {\n\tl.log(CRITICAL, v...)\n}\n<commit_msg>Add mutex to avoid concurrent writes to stdout<commit_after>\/*\n\tSimple logger package for go services supervised by DJB deamontools\n*\/\n\npackage logger\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tCRITICAL = 4\n\tERROR = 3\n\tWARNING = 2\n\tNOTICE = 1\n\tDEBUG = 0\n)\n\ntype Logger struct {\n\tlevel int\n\taddTimestamp bool\n}\n\ntype logWritter struct {\n\tsync.Mutex\n}\n\nfunc (lw *logWritter) WriteLog(msg string) {\n\tlw.Lock()\n\tfmt.Println(msg)\n\tlw.Unlock()\n}\n\nvar levels = map[string]int{\n\t\"critical\": CRITICAL,\n\t\"error\": ERROR,\n\t\"warning\": WARNING,\n\t\"notice\": NOTICE,\n\t\"debug\": DEBUG,\n}\n\nvar rlevels = [5]string{\n\t\"debug\",\n\t\"notice\",\n\t\"warning\",\n\t\"error\",\n\t\"critical\",\n}\n\nvar lw *logWritter\n\n\/\/var fout *bufio.Writer\n\nfunc init() {\n\tlw = new(logWritter)\n}\n\nfunc (l *Logger) SetLevel(level string) {\n\tlevel = strings.ToLower(level)\n\tl.level = levels[level]\n}\n\nfunc (l *Logger) SetTimeStamp(ts bool) {\n\tl.addTimestamp = ts\n}\n\n\/\/ Base logger\nfunc (l *Logger) log(level int, v ...interface{}) {\n\tmsg := \"\"\n\tif level >= l.level {\n\t\tif l.addTimestamp {\n\t\t\tmsg = fmt.Sprintf(\"%d - \", time.Now().UnixNano())\n\t\t}\n\t\tmsg = fmt.Sprintf(\"%s%s - \", msg, strings.ToTitle(rlevels[level]))\n\t\tfor i := range v {\n\t\t\tmsg = fmt.Sprintf(\"%s%v\", msg, v[i])\n\t\t}\n\t\tlw.WriteLog(msg)\n\t}\n}\n\nfunc (l *Logger) Debug(v ...interface{}) {\n\tl.log(DEBUG, v...)\n}\n\nfunc (l *Logger) Notice(v ...interface{}) {\n\tl.log(NOTICE, v...)\n}\n\nfunc (l *Logger) Warning(v ...interface{}) {\n\tl.log(WARNING, v...)\n}\n\nfunc (l *Logger) Error(v ...interface{}) {\n\tl.log(ERROR, v...)\n}\n\nfunc (l *Logger) Critical(v ...interface{}) {\n\tl.log(CRITICAL, v...)\n}\n<|endoftext|>"} {"text":"<commit_before>package log\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\nvar (\n\tpool = &sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\treturn make([]byte, 0, maxLogSize)\n\t\t},\n\t}\n\n\tutsname string\n)\n\nfunc init() {\n\thname, err := os.Hostname()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tutsname = hname\n}\n\n\/\/ Logger is a collection of properties how to output logs.\n\/\/ Properties are initially set by NewLogger. They can be customized\n\/\/ later by Logger methods.\ntype Logger struct {\n\ttopic atomic.Value\n\tthreshold int32\n\tdefaults atomic.Value\n\tformat atomic.Value\n\terrorHandler atomic.Value\n\n\tmu sync.Mutex\n\toutput io.Writer\n}\n\n\/\/ NewLogger constructs a new Logger struct.\n\/\/\n\/\/ Attributes are initialized as follows:\n\/\/ Topic: path.Base(os.Args[0])\n\/\/ Threshold: LvInfo\n\/\/ Formatter: PlainFormat\n\/\/ Output: os.Stderr\n\/\/ Defaults: nil\n\/\/ ErrorHandler: os.Exit(5) on EPIPE.\nfunc NewLogger() *Logger {\n\tl := &Logger{\n\t\toutput: os.Stderr,\n\t}\n\tfilename := filepath.Base(os.Args[0])\n\tif runtime.GOOS == \"windows\" {\n\t\tif ext := filepath.Ext(filename); ext != \"\" {\n\t\t\tfilename = filename[:len(filename)-len(ext)]\n\t\t}\n\t}\n\tl.SetTopic(normalizeTopic(filename))\n\tl.SetThreshold(LvInfo)\n\tl.SetDefaults(nil)\n\tl.SetFormatter(PlainFormat{})\n\tl.SetErrorHandler(errorHandler)\n\treturn l\n}\n\nfunc normalizeTopic(n string) string {\n\t\/\/ Topic must match [.a-z0-9-]+\n\ttopic := strings.Map(func(r rune) rune {\n\t\tswitch {\n\t\tcase r == '.' || r == '-':\n\t\t\treturn r\n\t\tcase r >= '0' && r < '9':\n\t\t\treturn r\n\t\tcase r >= 'a' && r <= 'z':\n\t\t\treturn r\n\t\tcase r >= 'A' && r < 'Z':\n\t\t\treturn r + ('a' - 'A')\n\t\tdefault:\n\t\t\treturn '-'\n\t\t}\n\t}, n)\n\tif len(topic) > maxTopicLength {\n\t\treturn topic[:maxTopicLength]\n\t}\n\treturn topic\n}\n\n\/\/ Topic returns the topic for the logger.\nfunc (l *Logger) Topic() string {\n\treturn l.topic.Load().(string)\n}\n\n\/\/ SetTopic sets a new topic for the logger.\n\/\/ topic must not be empty. Too long topic may be shortened automatically.\nfunc (l *Logger) SetTopic(topic string) {\n\tif len(topic) == 0 {\n\t\tpanic(\"Empty tag\")\n\t}\n\n\tl.topic.Store(topic)\n}\n\n\/\/ Threshold returns the current threshold of the logger.\nfunc (l *Logger) Threshold() int {\n\treturn int(atomic.LoadInt32(&l.threshold))\n}\n\n\/\/ Enabled returns true if the log for the given level will be logged.\n\/\/ This can be used to avoid futile computation for logs being ignored.\n\/\/\n\/\/ if log.Enabled(log.LvDebug) {\n\/\/ log.Debug(\"message\", map[string]interface{}{\n\/\/ \"debug info\": \"...\",\n\/\/ })\n\/\/ }\nfunc (l *Logger) Enabled(level int) bool {\n\treturn level <= l.Threshold()\n}\n\n\/\/ SetThreshold sets the threshold for the logger.\n\/\/ level must be a pre-defined constant such as LvInfo.\nfunc (l *Logger) SetThreshold(level int) {\n\tatomic.StoreInt32(&l.threshold, int32(level))\n}\n\n\/\/ SetThresholdByName sets the threshold for the logger by the level name.\nfunc (l *Logger) SetThresholdByName(n string) error {\n\tvar level int\n\tswitch n {\n\tcase \"critical\", \"crit\":\n\t\tlevel = LvCritical\n\tcase \"error\":\n\t\tlevel = LvError\n\tcase \"warning\", \"warn\":\n\t\tlevel = LvWarn\n\tcase \"information\", \"info\":\n\t\tlevel = LvInfo\n\tcase \"debug\":\n\t\tlevel = LvDebug\n\tdefault:\n\t\treturn fmt.Errorf(\"No such level: %s\", n)\n\t}\n\tl.SetThreshold(level)\n\treturn nil\n}\n\n\/\/ SetDefaults sets default field values for the logger.\n\/\/ Setting nil effectively clear the defaults.\nfunc (l *Logger) SetDefaults(d map[string]interface{}) error {\n\tfor key := range d {\n\t\tif !IsValidKey(key) {\n\t\t\treturn ErrInvalidKey\n\t\t}\n\t}\n\n\tl.defaults.Store(d)\n\treturn nil\n}\n\n\/\/ Defaults returns default field values.\nfunc (l *Logger) Defaults() map[string]interface{} {\n\treturn l.defaults.Load().(map[string]interface{})\n}\n\n\/\/ SetFormatter sets log formatter.\nfunc (l *Logger) SetFormatter(f Formatter) {\n\tl.format.Store(&f)\n}\n\n\/\/ Formatter returns the current log formatter.\nfunc (l *Logger) Formatter() Formatter {\n\treturn *l.format.Load().(*Formatter)\n}\n\n\/\/ SetErrorHandler sets error handler.\n\/\/\n\/\/ The handler will be called if the underlying Writer's Write\n\/\/ returns non-nil error. If h is nil, no handler will be called.\nfunc (l *Logger) SetErrorHandler(h func(error) error) {\n\tl.errorHandler.Store(h)\n}\n\n\/\/ Formatter returns the current log formatter.\nfunc (l *Logger) handleError(err error) error {\n\th := l.errorHandler.Load().(func(error) error)\n\tif h == nil {\n\t\treturn err\n\t}\n\treturn h(err)\n}\n\n\/\/ SetOutput sets io.Writer for log output.\n\/\/ Setting nil disables log output.\nfunc (l *Logger) SetOutput(w io.Writer) {\n\tl.mu.Lock()\n\tl.output = w\n\tl.mu.Unlock()\n}\n\ntype logWriter struct {\n\tbuf []byte\n\tlogfunc func(p []byte) (n int, err error)\n}\n\nfunc (w *logWriter) Write(p []byte) (int, error) {\n\ttbuf := p\n\tif len(w.buf) > 0 {\n\t\ttbuf = append(w.buf, p...)\n\t}\n\twritten, err := w.logfunc(tbuf)\n\tn := written - len(w.buf)\n\tif err != nil {\n\t\tif n < 0 {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn n, err\n\t}\n\n\tw.buf = w.buf[:0]\n\tremain := len(tbuf) - written\n\tif remain == 0 {\n\t\treturn n, nil\n\t}\n\tif cap(w.buf) < remain {\n\t\treturn n, errors.New(\"too long\")\n\t}\n\tw.buf = append(w.buf, tbuf[n:]...)\n\treturn len(p), nil\n}\n\n\/\/ Writer returns an io.Writer.\n\/\/ Each line written in the writer will be logged to the logger\n\/\/ with the given severity.\nfunc (l *Logger) Writer(severity int) io.Writer {\n\tlogfunc := func(p []byte) (n int, err error) {\n\t\tfor len(p) > 0 {\n\t\t\teol := bytes.IndexByte(p, '\\n')\n\t\t\tif eol == -1 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tln := eol + 1\n\t\t\terr = l.Log(severity, string(p[:eol]), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tn += ln\n\t\t\tp = p[ln:]\n\t\t}\n\t\treturn\n\t}\n\n\treturn &logWriter{\n\t\tbuf: make([]byte, 0, maxLogSize\/2),\n\t\tlogfunc: logfunc,\n\t}\n}\n\n\/\/ Log outputs a log message with additional fields.\n\/\/ fields can be nil.\nfunc (l *Logger) Log(severity int, msg string, fields map[string]interface{}) error {\n\tif severity > l.Threshold() {\n\t\treturn nil\n\t}\n\n\t\/\/ format the message before acquiring mutex for better concurrency.\n\tt := time.Now()\n\tbuf := pool.Get().([]byte)\n\tdefer pool.Put(buf)\n\n\tb, err := l.Formatter().Format(buf, l, t, severity, msg, fields)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\n\tif l.output == nil {\n\t\treturn nil\n\t}\n\n\t_, err = l.output.Write(b)\n\tif err == nil {\n\t\treturn nil\n\t}\n\terr = l.handleError(err)\n\tif err == nil {\n\t\treturn nil\n\t}\n\treturn errors.Wrap(err, \"Logger.Log\")\n}\n\n\/\/ Critical outputs a critical log.\n\/\/ fields can be nil.\nfunc (l *Logger) Critical(msg string, fields map[string]interface{}) error {\n\treturn l.Log(LvCritical, msg, fields)\n}\n\n\/\/ Error outputs an error log.\n\/\/ fields can be nil.\nfunc (l *Logger) Error(msg string, fields map[string]interface{}) error {\n\treturn l.Log(LvError, msg, fields)\n}\n\n\/\/ Warn outputs a warning log.\n\/\/ fields can be nil.\nfunc (l *Logger) Warn(msg string, fields map[string]interface{}) error {\n\treturn l.Log(LvWarn, msg, fields)\n}\n\n\/\/ Info outputs an informational log.\n\/\/ fields can be nil.\nfunc (l *Logger) Info(msg string, fields map[string]interface{}) error {\n\treturn l.Log(LvInfo, msg, fields)\n}\n\n\/\/ Debug outputs a debug log.\n\/\/ fields can be nil.\nfunc (l *Logger) Debug(msg string, fields map[string]interface{}) error {\n\treturn l.Log(LvDebug, msg, fields)\n}\n\n\/\/ WriteThrough writes data through to the underlying writer.\nfunc (l *Logger) WriteThrough(data []byte) error {\n\tl.mu.Lock()\n\t_, err := l.output.Write(data)\n\tl.mu.Unlock()\n\treturn err\n}\n<commit_msg>Handle errors in Logger.WriteThrough<commit_after>package log\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\nvar (\n\tpool = &sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\treturn make([]byte, 0, maxLogSize)\n\t\t},\n\t}\n\n\tutsname string\n)\n\nfunc init() {\n\thname, err := os.Hostname()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tutsname = hname\n}\n\n\/\/ Logger is a collection of properties how to output logs.\n\/\/ Properties are initially set by NewLogger. They can be customized\n\/\/ later by Logger methods.\ntype Logger struct {\n\ttopic atomic.Value\n\tthreshold int32\n\tdefaults atomic.Value\n\tformat atomic.Value\n\terrorHandler atomic.Value\n\n\tmu sync.Mutex\n\toutput io.Writer\n}\n\n\/\/ NewLogger constructs a new Logger struct.\n\/\/\n\/\/ Attributes are initialized as follows:\n\/\/ Topic: path.Base(os.Args[0])\n\/\/ Threshold: LvInfo\n\/\/ Formatter: PlainFormat\n\/\/ Output: os.Stderr\n\/\/ Defaults: nil\n\/\/ ErrorHandler: os.Exit(5) on EPIPE.\nfunc NewLogger() *Logger {\n\tl := &Logger{\n\t\toutput: os.Stderr,\n\t}\n\tfilename := filepath.Base(os.Args[0])\n\tif runtime.GOOS == \"windows\" {\n\t\tif ext := filepath.Ext(filename); ext != \"\" {\n\t\t\tfilename = filename[:len(filename)-len(ext)]\n\t\t}\n\t}\n\tl.SetTopic(normalizeTopic(filename))\n\tl.SetThreshold(LvInfo)\n\tl.SetDefaults(nil)\n\tl.SetFormatter(PlainFormat{})\n\tl.SetErrorHandler(errorHandler)\n\treturn l\n}\n\nfunc normalizeTopic(n string) string {\n\t\/\/ Topic must match [.a-z0-9-]+\n\ttopic := strings.Map(func(r rune) rune {\n\t\tswitch {\n\t\tcase r == '.' || r == '-':\n\t\t\treturn r\n\t\tcase r >= '0' && r < '9':\n\t\t\treturn r\n\t\tcase r >= 'a' && r <= 'z':\n\t\t\treturn r\n\t\tcase r >= 'A' && r < 'Z':\n\t\t\treturn r + ('a' - 'A')\n\t\tdefault:\n\t\t\treturn '-'\n\t\t}\n\t}, n)\n\tif len(topic) > maxTopicLength {\n\t\treturn topic[:maxTopicLength]\n\t}\n\treturn topic\n}\n\n\/\/ Topic returns the topic for the logger.\nfunc (l *Logger) Topic() string {\n\treturn l.topic.Load().(string)\n}\n\n\/\/ SetTopic sets a new topic for the logger.\n\/\/ topic must not be empty. Too long topic may be shortened automatically.\nfunc (l *Logger) SetTopic(topic string) {\n\tif len(topic) == 0 {\n\t\tpanic(\"Empty tag\")\n\t}\n\n\tl.topic.Store(topic)\n}\n\n\/\/ Threshold returns the current threshold of the logger.\nfunc (l *Logger) Threshold() int {\n\treturn int(atomic.LoadInt32(&l.threshold))\n}\n\n\/\/ Enabled returns true if the log for the given level will be logged.\n\/\/ This can be used to avoid futile computation for logs being ignored.\n\/\/\n\/\/ if log.Enabled(log.LvDebug) {\n\/\/ log.Debug(\"message\", map[string]interface{}{\n\/\/ \"debug info\": \"...\",\n\/\/ })\n\/\/ }\nfunc (l *Logger) Enabled(level int) bool {\n\treturn level <= l.Threshold()\n}\n\n\/\/ SetThreshold sets the threshold for the logger.\n\/\/ level must be a pre-defined constant such as LvInfo.\nfunc (l *Logger) SetThreshold(level int) {\n\tatomic.StoreInt32(&l.threshold, int32(level))\n}\n\n\/\/ SetThresholdByName sets the threshold for the logger by the level name.\nfunc (l *Logger) SetThresholdByName(n string) error {\n\tvar level int\n\tswitch n {\n\tcase \"critical\", \"crit\":\n\t\tlevel = LvCritical\n\tcase \"error\":\n\t\tlevel = LvError\n\tcase \"warning\", \"warn\":\n\t\tlevel = LvWarn\n\tcase \"information\", \"info\":\n\t\tlevel = LvInfo\n\tcase \"debug\":\n\t\tlevel = LvDebug\n\tdefault:\n\t\treturn fmt.Errorf(\"No such level: %s\", n)\n\t}\n\tl.SetThreshold(level)\n\treturn nil\n}\n\n\/\/ SetDefaults sets default field values for the logger.\n\/\/ Setting nil effectively clear the defaults.\nfunc (l *Logger) SetDefaults(d map[string]interface{}) error {\n\tfor key := range d {\n\t\tif !IsValidKey(key) {\n\t\t\treturn ErrInvalidKey\n\t\t}\n\t}\n\n\tl.defaults.Store(d)\n\treturn nil\n}\n\n\/\/ Defaults returns default field values.\nfunc (l *Logger) Defaults() map[string]interface{} {\n\treturn l.defaults.Load().(map[string]interface{})\n}\n\n\/\/ SetFormatter sets log formatter.\nfunc (l *Logger) SetFormatter(f Formatter) {\n\tl.format.Store(&f)\n}\n\n\/\/ Formatter returns the current log formatter.\nfunc (l *Logger) Formatter() Formatter {\n\treturn *l.format.Load().(*Formatter)\n}\n\n\/\/ SetErrorHandler sets error handler.\n\/\/\n\/\/ The handler will be called if the underlying Writer's Write\n\/\/ returns non-nil error. If h is nil, no handler will be called.\nfunc (l *Logger) SetErrorHandler(h func(error) error) {\n\tl.errorHandler.Store(h)\n}\n\n\/\/ Formatter returns the current log formatter.\nfunc (l *Logger) handleError(err error) error {\n\th := l.errorHandler.Load().(func(error) error)\n\tif h == nil {\n\t\treturn err\n\t}\n\treturn h(err)\n}\n\n\/\/ SetOutput sets io.Writer for log output.\n\/\/ Setting nil disables log output.\nfunc (l *Logger) SetOutput(w io.Writer) {\n\tl.mu.Lock()\n\tl.output = w\n\tl.mu.Unlock()\n}\n\ntype logWriter struct {\n\tbuf []byte\n\tlogfunc func(p []byte) (n int, err error)\n}\n\nfunc (w *logWriter) Write(p []byte) (int, error) {\n\ttbuf := p\n\tif len(w.buf) > 0 {\n\t\ttbuf = append(w.buf, p...)\n\t}\n\twritten, err := w.logfunc(tbuf)\n\tn := written - len(w.buf)\n\tif err != nil {\n\t\tif n < 0 {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn n, err\n\t}\n\n\tw.buf = w.buf[:0]\n\tremain := len(tbuf) - written\n\tif remain == 0 {\n\t\treturn n, nil\n\t}\n\tif cap(w.buf) < remain {\n\t\treturn n, errors.New(\"too long\")\n\t}\n\tw.buf = append(w.buf, tbuf[n:]...)\n\treturn len(p), nil\n}\n\n\/\/ Writer returns an io.Writer.\n\/\/ Each line written in the writer will be logged to the logger\n\/\/ with the given severity.\nfunc (l *Logger) Writer(severity int) io.Writer {\n\tlogfunc := func(p []byte) (n int, err error) {\n\t\tfor len(p) > 0 {\n\t\t\teol := bytes.IndexByte(p, '\\n')\n\t\t\tif eol == -1 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tln := eol + 1\n\t\t\terr = l.Log(severity, string(p[:eol]), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tn += ln\n\t\t\tp = p[ln:]\n\t\t}\n\t\treturn\n\t}\n\n\treturn &logWriter{\n\t\tbuf: make([]byte, 0, maxLogSize\/2),\n\t\tlogfunc: logfunc,\n\t}\n}\n\n\/\/ Log outputs a log message with additional fields.\n\/\/ fields can be nil.\nfunc (l *Logger) Log(severity int, msg string, fields map[string]interface{}) error {\n\tif severity > l.Threshold() {\n\t\treturn nil\n\t}\n\n\t\/\/ format the message before acquiring mutex for better concurrency.\n\tt := time.Now()\n\tbuf := pool.Get().([]byte)\n\tdefer pool.Put(buf)\n\n\tb, err := l.Formatter().Format(buf, l, t, severity, msg, fields)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\n\tif l.output == nil {\n\t\treturn nil\n\t}\n\n\t_, err = l.output.Write(b)\n\tif err == nil {\n\t\treturn nil\n\t}\n\terr = l.handleError(err)\n\tif err == nil {\n\t\treturn nil\n\t}\n\treturn errors.Wrap(err, \"Logger.Log\")\n}\n\n\/\/ Critical outputs a critical log.\n\/\/ fields can be nil.\nfunc (l *Logger) Critical(msg string, fields map[string]interface{}) error {\n\treturn l.Log(LvCritical, msg, fields)\n}\n\n\/\/ Error outputs an error log.\n\/\/ fields can be nil.\nfunc (l *Logger) Error(msg string, fields map[string]interface{}) error {\n\treturn l.Log(LvError, msg, fields)\n}\n\n\/\/ Warn outputs a warning log.\n\/\/ fields can be nil.\nfunc (l *Logger) Warn(msg string, fields map[string]interface{}) error {\n\treturn l.Log(LvWarn, msg, fields)\n}\n\n\/\/ Info outputs an informational log.\n\/\/ fields can be nil.\nfunc (l *Logger) Info(msg string, fields map[string]interface{}) error {\n\treturn l.Log(LvInfo, msg, fields)\n}\n\n\/\/ Debug outputs a debug log.\n\/\/ fields can be nil.\nfunc (l *Logger) Debug(msg string, fields map[string]interface{}) error {\n\treturn l.Log(LvDebug, msg, fields)\n}\n\n\/\/ WriteThrough writes data through to the underlying writer.\nfunc (l *Logger) WriteThrough(data []byte) error {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\n\t_, err := l.output.Write(data)\n\tif err == nil {\n\t\treturn nil\n\t}\n\terr = l.handleError(err)\n\tif err == nil {\n\t\treturn nil\n\t}\n\treturn errors.Wrap(err, \"Logger.WriteThrough\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ logger\r\npackage minilog\r\n\r\nimport (\r\n\t\"bufio\"\r\n\t\"bytes\"\r\n\t\"fmt\"\r\n\t\"io\"\r\n\t\"os\"\r\n\t\"os\/user\"\r\n\t\"path\/filepath\"\r\n\t\"runtime\"\r\n\t\"strconv\"\r\n\t\"strings\"\r\n\t\"sync\"\r\n\t\"time\"\r\n)\r\n\r\nvar logger *Logger\r\n\r\ntype severity int\r\n\r\nconst (\r\n\ttraceLevel severity = iota\r\n\tdebugLevel\r\n\tinfoLevel\r\n\twarnLevel\r\n\terrorLevel\r\n\tfatalLevel\r\n\tnumSeverity = 6\r\n)\r\n\r\nvar severityName = []string{\r\n\ttraceLevel: \"TRACE\",\r\n\tdebugLevel: \"DEBUG\",\r\n\tinfoLevel: \"INFO \",\r\n\twarnLevel: \"WARN \",\r\n\terrorLevel: \"ERROR\",\r\n\tfatalLevel: \"FATAL\",\r\n}\r\n\r\ntype Mode int\r\n\r\nconst (\r\n\tToFile = (1 << 0)\r\n\tToStderr = (1 << 1)\r\n\tAlsoToStderr = (ToFile | ToStderr)\r\n)\r\n\r\nfunc (m Mode) String() string {\r\n\tswitch m {\r\n\tcase ToFile:\r\n\t\treturn \"log to file\"\r\n\tcase ToStderr:\r\n\t\treturn \"print to stderr\"\r\n\tcase AlsoToStderr:\r\n\t\treturn \"log to file and print to stderr\"\r\n\tdefault:\r\n\t\treturn \"unknow log mode\"\r\n\t}\r\n}\r\n\r\ntype Logger struct {\r\n\tmaxSize int\r\n\tmaxFileNum int\r\n\tlevel severity\r\n\tlogDir string\r\n\tlogMode Mode\r\n\tlogName string\r\n\tprevLog *lastLog\r\n\tkeepName []string\r\n\tcreateTime int64\r\n\tcallHeader func(string) string\r\n\tbuffer *logBuffer\r\n\tmu sync.Mutex\r\n\twriter flushWriter\r\n\tnBytes int\r\n}\r\n\r\ntype lastLog struct {\r\n\trepeatLog string\r\n\trepeatNum int\r\n\tlastHeader string\r\n}\r\n\r\nvar (\r\n\tpid = os.Getpid()\r\n\tprogram = filepath.Base(os.Args[0])\r\n\thost = \"unknownhost\"\r\n\tuserName = \"unknownuser\"\r\n)\r\n\r\nfunc init() {\r\n\th, err := os.Hostname()\r\n\tif err == nil {\r\n\t\thost = shortHostname(h)\r\n\t}\r\n\r\n\tcurrent, err := user.Current()\r\n\tif err == nil {\r\n\t\tuserName = current.Username\r\n\t}\r\n\r\n\t\/\/ Sanitize userName since it may contain filepath separators on Windows.\r\n\tuserName = strings.Replace(userName, `\\`, \"_\", -1)\r\n}\r\n\r\nfunc shortHostname(hostname string) string {\r\n\tif i := strings.Index(hostname, \".\"); i >= 0 {\r\n\t\treturn hostname[:i]\r\n\t}\r\n\treturn hostname\r\n}\r\n\r\nfunc severityByName(s string) severity {\r\n\ts = strings.ToUpper(s)\r\n\tfor i, name := range severityName {\r\n\t\tif name == s {\r\n\t\t\treturn severity(i)\r\n\t\t}\r\n\t}\r\n\treturn warnLevel\r\n}\r\n\r\n\/\/ init logger for log\r\nfunc InitLogger() *Logger {\r\n\tl := new(Logger)\r\n\tl.maxSize = 0\r\n\tl.maxFileNum = 1\r\n\tl.level = warnLevel\r\n\tl.logDir = os.TempDir()\r\n\tl.logMode = 0\r\n\tl.prevLog = new(lastLog)\r\n\tl.logName = program + \".log.\" + host\r\n\tl.SetLogHeader(l.formatHeader)\r\n\tl.nBytes = 0\r\n\tlogger = l\r\n\r\n\treturn l\r\n}\r\nfunc CloseLogger() {\r\n\tlogger.close()\r\n}\r\n\r\n\/\/ print logger config\r\nfunc (l *Logger) PrintLogger() {\r\n\tfmt.Printf(\"logDir<%s>, logName<%s>, level<%s>, maxSize<%d kb>, maxFileNum<%d>, logMode=%s\\n\",\r\n\t\tl.logDir, l.logName, severityName[l.level],\r\n\t\tl.maxSize\/1024, l.maxFileNum,\r\n\t\tl.logMode)\r\n}\r\n\r\n\/\/ file max size\r\nfunc (l *Logger) SetFileMaxSize(maxSize int) {\r\n\tif maxSize < 0 {\r\n\t\treturn\r\n\t}\r\n\tl.maxSize = maxSize * 1024\r\n}\r\nfunc (l *Logger) getFileMaxSize() int {\r\n\treturn l.maxSize\r\n}\r\n\r\n\/\/ max file num\r\nfunc (l *Logger) SetMaxFileNum(maxFileNum int) {\r\n\tif maxFileNum < 0 {\r\n\t\treturn\r\n\t}\r\n\r\n\tl.maxFileNum = maxFileNum\r\n}\r\nfunc (l *Logger) getMaxFileNum() int {\r\n\treturn l.maxFileNum\r\n}\r\n\r\n\/\/ log level\r\nfunc (l *Logger) SetLogLevel(level string) {\r\n\tl.level = severityByName(level)\r\n}\r\nfunc (l *Logger) getLogLevel() severity {\r\n\treturn l.level\r\n}\r\nfunc (l *Logger) IsLogFatal() bool {\r\n\tif l.level == fatalLevel {\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}\r\nfunc (l *Logger) IsLogError() bool {\r\n\tif l.level == errorLevel {\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}\r\nfunc (l *Logger) IsLogWarn() bool {\r\n\tif l.level == warnLevel {\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}\r\nfunc (l *Logger) IsLogInfo() bool {\r\n\tif l.level == infoLevel {\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}\r\nfunc (l *Logger) IsLogDebug() bool {\r\n\tif l.level == debugLevel {\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}\r\nfunc (l *Logger) IsLogTrace() bool {\r\n\tif l.level == traceLevel {\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}\r\n\r\n\/\/ log dir\r\nfunc (l *Logger) SetLogDir(logDir string) {\r\n\terr := os.Mkdir(logDir, 0666)\r\n\tif err != nil && os.IsNotExist(err) {\r\n\t\treturn\r\n\t}\r\n\tl.logDir = logDir\r\n}\r\nfunc (l *Logger) getLogDir() string {\r\n\treturn l.logDir\r\n}\r\n\r\n\/\/ log mode\r\nfunc (l *Logger) SetLogMode(logMode Mode) {\r\n\tl.logMode = logMode\r\n}\r\nfunc (l *Logger) getLogMode() Mode {\r\n\treturn l.logMode\r\n}\r\nfunc (l *Logger) isLogFileMode() bool {\r\n\tif (l.logMode & ToFile) != 0 {\r\n\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}\r\nfunc (l *Logger) isLogStderrMode() bool {\r\n\tif (l.logMode & ToStderr) != 0 {\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}\r\n\r\n\/\/ userdef log header\r\nfunc (l *Logger) SetLogHeader(header func(string) string) {\r\n\tl.callHeader = header\r\n}\r\nfunc (l *Logger) getLogHeader() func(string) string {\r\n\treturn l.callHeader\r\n}\r\nfunc (l *Logger) SetLogConfig(logDir, level string, maxSize, maxFileNum int, logMode Mode) {\r\n\tl.SetLogDir(logDir)\r\n\tl.SetLogLevel(level)\r\n\tl.SetFileMaxSize(maxSize)\r\n\tl.SetMaxFileNum(maxFileNum)\r\n\tl.logMode = logMode\r\n}\r\n\r\n\/\/ get buffer\r\nfunc (l *Logger) getBuffer() *logBuffer {\r\n\tb := l.buffer\r\n\tif b != nil {\r\n\t\tl.buffer = b.next\r\n\t}\r\n\r\n\tif b == nil {\r\n\t\tb = new(logBuffer)\r\n\t} else {\r\n\t\tb.next = nil\r\n\t\tb.Reset()\r\n\t}\r\n\r\n\treturn b\r\n}\r\n\r\ntype flushWriter interface {\r\n\tFlush() error\r\n\tSync() error\r\n\tFclose() error\r\n\tio.Writer\r\n}\r\n\r\ntype syncBuffer struct {\r\n\t*bufio.Writer\r\n\tfile *os.File\r\n}\r\n\r\nfunc (sbuf *syncBuffer) Sync() error {\r\n\treturn sbuf.file.Sync()\r\n}\r\nfunc (sbuf *syncBuffer) Fclose() error {\r\n\treturn sbuf.file.Close()\r\n}\r\n\r\ntype logBuffer struct {\r\n\tbytes.Buffer\r\n\tnext *logBuffer\r\n}\r\n\r\n\/\/ get log filename, funcname and line number\r\nfunc GetLogFileLine(depth int) (string, string, int) {\r\n\tvar funcName string\r\n\tpc, file, line, ok := runtime.Caller(3 + depth)\r\n\tif !ok {\r\n\t\tfuncName = \"unknow\"\r\n\t\tfile = \"???\"\r\n\t\tline = 1\r\n\t} else {\r\n\t\tidx := strings.LastIndex(file, \"\/\")\r\n\t\tif idx >= 0 {\r\n\t\t\tfile = file[idx+1:]\r\n\t\t}\r\n\t\tfullName := runtime.FuncForPC(pc).Name()\r\n\t\tshortName := strings.Split(fullName, \".\")\r\n\t\tfuncName = shortName[len(shortName)-1]\r\n\t}\r\n\r\n\treturn funcName, file, line\r\n}\r\n\r\n\/\/ log header by default\r\nfunc (l *Logger) formatHeader(level string) string {\r\n\tnow := time.Now()\r\n\tyear, month, day := now.Date()\r\n\thour, minute, second := now.Clock()\r\n\tusec := now.Nanosecond() \/ 1000000\r\n\r\n\t_, file, line := GetLogFileLine(2)\r\n\t\/\/ yy-mm-dd hh:mm:ss.uuuu level pid file[line]:\r\n\theader := fmt.Sprintf(\"%04d-%02d-%02d %02d:%02d:%02d.%04d [%s] %d %s[%d]\",\r\n\t\tyear, month, day, hour, minute, second, usec,\r\n\t\tlevel, os.Getpid(), file, line)\r\n\r\n\treturn header\r\n}\r\n\r\n\/\/ Trace\r\nfunc Ltrace(format string, args ...interface{}) {\r\n\tlogger.println(traceLevel, format, args...)\r\n}\r\n\r\n\/\/ Debug\r\nfunc Ldebug(format string, args ...interface{}) {\r\n\tlogger.println(debugLevel, format, args...)\r\n}\r\n\r\n\/\/ Info\r\nfunc Linfo(format string, args ...interface{}) {\r\n\tlogger.println(infoLevel, format, args...)\r\n}\r\n\r\n\/\/ Warning\r\nfunc Lwarn(format string, args ...interface{}) {\r\n\tlogger.println(warnLevel, format, args...)\r\n}\r\n\r\n\/\/ Error\r\nfunc Lerror(format string, args ...interface{}) {\r\n\tlogger.println(errorLevel, format, args...)\r\n}\r\n\r\n\/\/ Fatal\r\nfunc Lfatal(format string, args ...interface{}) {\r\n\tlogger.println(fatalLevel, format, args...)\r\n}\r\n\r\nfunc (l *Logger) println(s severity, format string, args ...interface{}) {\r\n\tif l.level > s && s < numSeverity {\r\n\t\treturn\r\n\t}\r\n\r\n\theader := l.callHeader(severityName[s])\r\n\tmessage := fmt.Sprintf(format, args...)\r\n\r\n\t\/\/ equal prev log message\r\n\tif l.prevLog.repeatLog == message {\r\n\t\tl.prevLog.repeatNum++\r\n\t\tl.prevLog.lastHeader = header\r\n\t\treturn\r\n\t} else {\r\n\t\tl.printLastLog()\r\n\t\tl.prevLog.repeatLog = message\r\n\t\tl.prevLog.lastHeader = header\r\n\t\tl.prevLog.repeatNum = 0\r\n\t}\r\n\r\n\tl.output(header, message)\r\n}\r\n\r\n\/\/ rename file when rolling policy\r\nfunc rename(fname, kname string) (string, int) {\r\n\tvar n int = 0\r\n\tlenk := len(kname)\r\n\tlenf := len(fname)\r\n\tif lenk > lenf {\r\n\t\tidx := kname[(lenf + 1):]\r\n\t\tn, _ = strconv.Atoi(idx)\r\n\t}\r\n\r\n\tname := fmt.Sprintf(\"%s.%d\", fname, n+1)\r\n\terr := os.Rename(kname, name)\r\n\tif err != nil {\r\n\t\tfmt.Println(err)\r\n\t}\r\n\treturn name, n + 1\r\n}\r\n\r\n\/\/ create log file by default\r\nfunc (l *Logger) createLogFile() *syncBuffer {\r\n\tsBuf := new(syncBuffer)\r\n\r\n\tfname := filepath.Join(l.logDir, l.logName)\r\n\r\n\t\/\/\r\n\tif l.keepName == nil {\r\n\t\tl.keepName = make([]string, l.maxFileNum)\r\n\t}\r\n\tlength := len(l.keepName)\r\n\tfor i := length - 1; i >= 0; i-- {\r\n\t\tif len(l.keepName[i]) == 0 {\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\tif i == l.maxFileNum-1 {\r\n\t\t\tos.Remove(l.keepName[i])\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\tif i == 0 {\r\n\t\t\tl.writer.Fclose()\r\n\t\t}\r\n\t\tname, n := rename(fname, l.keepName[i])\r\n\t\tl.keepName[n] = name\r\n\t}\r\n\r\n\tf, err := os.Create(fname)\r\n\tif err != nil {\r\n\t\tfmt.Printf(\"can't create log file<%s>\\n\", fname)\r\n\t\tos.Exit(-1)\r\n\t}\r\n\tsBuf.file = f\r\n\tsBuf.Writer = bufio.NewWriterSize(f, 1024*1024)\r\n\tl.createTime = getCreateTime()\r\n\tl.keepName[0] = fname\r\n\r\n\treturn sBuf\r\n}\r\n\r\n\/\/ get next create time in daily policy of rolling policy by default\r\nfunc getCreateTime() int64 {\r\n\ttimeStr := time.Now().Format(\"2006-01-02 00:00:00\")\r\n\tt, _ := time.Parse(\"2006-01-02 00:00:00\", timeStr)\r\n\td, _ := time.ParseDuration(\"+24h\")\r\n\treturn t.Add(d).Unix()\r\n}\r\n\r\nfunc isInToday(createTime int64) bool {\r\n\tnow := time.Now().Unix()\r\n\r\n\tif now > createTime {\r\n\t\treturn true\r\n\t}\r\n\r\n\treturn false\r\n}\r\nfunc (l *Logger) output(header string, msg string) {\r\n\tl.mu.Lock()\r\n\tdefer l.mu.Unlock()\r\n\tbuffer := l.getBuffer()\r\n\tmessage := msg\r\n\r\n\tbuffer.WriteString(header)\r\n\tbuffer.WriteString(\": \")\r\n\tbuffer.WriteString(message)\r\n\tbuffer.WriteString(\"\\n\")\r\n\tdata := buffer.Bytes()\r\n\r\n\t\/\/ log mode\r\n\tif l.isLogStderrMode() {\r\n\t\tos.Stdout.Write(data)\r\n\t}\r\n\tif l.isLogFileMode() {\r\n\t\t\/\/ rolling policy\r\n\t\t\/\/ 1, file max size\r\n\t\t\/\/ 2, daily\r\n\t\tif l.writer == nil ||\r\n\t\t\t(l.maxFileNum > 1 && ((l.maxSize == 0 && isInToday(l.createTime)) ||\r\n\t\t\t\t(l.maxSize > 0 && l.nBytes > l.maxSize))) {\r\n\t\t\tl.writer = l.createLogFile()\r\n\t\t\tl.nBytes = 0\r\n\t\t}\r\n\t\tn, _ := l.writer.Write(data)\r\n\t\tl.writer.Flush()\r\n\t\tl.nBytes += n\r\n\t}\r\n\r\n\tl.putBuffer(buffer)\r\n}\r\n\r\nfunc (l *Logger) printLastLog() {\r\n\tif l.prevLog.repeatNum > 0 {\r\n\t\tmsg := fmt.Sprintf(\"Last message repeated %d times\", l.prevLog.repeatNum)\r\n\t\tl.output(l.prevLog.lastHeader, msg)\r\n\t}\r\n}\r\n\r\nfunc (l *Logger) putBuffer(b *logBuffer) {\r\n\tb.next = l.buffer\r\n\tl.buffer = b\r\n}\r\n\r\nfunc (l *Logger) close() {\r\n\tl.printLastLog()\r\n\r\n\tif l.writer == nil {\r\n\t\treturn\r\n\t}\r\n\tl.writer.Sync()\r\n\tl.writer.Fclose()\r\n}\r\n<commit_msg>update file<commit_after>\/\/ logger\r\n\r\n\/\/Go support for leveled logs\r\n\/\/\r\n\/\/ Copyright 2016 @wren. All Rights Reserved.\r\n\/\/\r\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\r\n\/\/ you may not use this file except in compliance with the License.\r\n\/\/ You may obtain a copy of the License at\r\n\/\/\r\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\r\n\/\/\r\n\/\/ Unless required by applicable law or agreed to in writing, software\r\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\r\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n\/\/ See the License for the specific language governing permissions and\r\n\/\/ limitations under the License.\r\n\r\n\/\/It provides functions Linfo, Lwarn, Lerror, Lfatal\r\n\/\/Basic examples:\r\n\r\n\/\/ l := InitLogger()\r\n\/\/ defer CloseLogger()\r\n\/\/ l.SetLogLevel(\"trace\")\r\n\/\/ l.SetLogMode(ToStderr)\r\n\/\/ l.SetLogDir(\"\/tmp\")\r\n\r\n\/\/ Ltrace(\"hello world\")\r\n\/\/ Lerror(\"this is a test\")\r\n\r\n\/\/ It has two rolling policy: file max size and daily on zero\r\n\/\/ examples:\r\n\/\/ l := InitLogger()\r\n\/\/ defer CloseLogger()\r\n\/\/ l.SetMaxFileNum(2) \/\/ default daily on zero\r\n\/\/ l.SetFileMaxSize(4) \/\/ use file max size policy to replace file max size policy\r\n\/\/\r\n\r\npackage minilog\r\n\r\nimport (\r\n\t\"bufio\"\r\n\t\"bytes\"\r\n\t\"fmt\"\r\n\t\"io\"\r\n\t\"os\"\r\n\t\"os\/user\"\r\n\t\"path\/filepath\"\r\n\t\"runtime\"\r\n\t\"strconv\"\r\n\t\"strings\"\r\n\t\"sync\"\r\n\t\"time\"\r\n)\r\n\r\nvar logger *Logger\r\n\r\ntype severity int\r\n\r\nconst (\r\n\ttraceLevel severity = iota\r\n\tdebugLevel\r\n\tinfoLevel\r\n\twarnLevel\r\n\terrorLevel\r\n\tfatalLevel\r\n\tnumSeverity = 6\r\n)\r\n\r\nvar severityName = []string{\r\n\ttraceLevel: \"TRACE\",\r\n\tdebugLevel: \"DEBUG\",\r\n\tinfoLevel: \"INFO \",\r\n\twarnLevel: \"WARN \",\r\n\terrorLevel: \"ERROR\",\r\n\tfatalLevel: \"FATAL\",\r\n}\r\n\r\ntype Mode int\r\n\r\nconst (\r\n\tToFile = (1 << 0)\r\n\tToStderr = (1 << 1)\r\n\tAlsoToStderr = (ToFile | ToStderr)\r\n)\r\n\r\nfunc (m Mode) String() string {\r\n\tswitch m {\r\n\tcase ToFile:\r\n\t\treturn \"log to file\"\r\n\tcase ToStderr:\r\n\t\treturn \"print to stderr\"\r\n\tcase AlsoToStderr:\r\n\t\treturn \"log to file and print to stderr\"\r\n\tdefault:\r\n\t\treturn \"unknow log mode\"\r\n\t}\r\n}\r\n\r\ntype Logger struct {\r\n\tmaxSize int\r\n\tmaxFileNum int\r\n\tlevel severity\r\n\tlogDir string\r\n\tlogMode Mode\r\n\tlogName string\r\n\tprevLog *lastLog\r\n\tkeepName []string\r\n\tcreateTime int64\r\n\tcallHeader func(string) string\r\n\tbuffer *logBuffer\r\n\tmu sync.Mutex\r\n\twriter flushWriter\r\n\tnBytes int\r\n}\r\n\r\ntype lastLog struct {\r\n\trepeatLog string\r\n\trepeatNum int\r\n\tlastHeader string\r\n}\r\n\r\nvar (\r\n\tpid = os.Getpid()\r\n\tprogram = filepath.Base(os.Args[0])\r\n\thost = \"unknownhost\"\r\n\tuserName = \"unknownuser\"\r\n)\r\n\r\nfunc init() {\r\n\th, err := os.Hostname()\r\n\tif err == nil {\r\n\t\thost = shortHostname(h)\r\n\t}\r\n\r\n\tcurrent, err := user.Current()\r\n\tif err == nil {\r\n\t\tuserName = current.Username\r\n\t}\r\n\r\n\t\/\/ Sanitize userName since it may contain filepath separators on Windows.\r\n\tuserName = strings.Replace(userName, `\\`, \"_\", -1)\r\n}\r\n\r\nfunc shortHostname(hostname string) string {\r\n\tif i := strings.Index(hostname, \".\"); i >= 0 {\r\n\t\treturn hostname[:i]\r\n\t}\r\n\treturn hostname\r\n}\r\n\r\nfunc severityByName(s string) severity {\r\n\ts = strings.ToUpper(s)\r\n\tfor i, name := range severityName {\r\n\t\tif name == s {\r\n\t\t\treturn severity(i)\r\n\t\t}\r\n\t}\r\n\treturn warnLevel\r\n}\r\n\r\n\/\/ init logger for log\r\nfunc InitLogger() *Logger {\r\n\tl := new(Logger)\r\n\tl.maxSize = 0\r\n\tl.maxFileNum = 1\r\n\tl.level = warnLevel\r\n\tl.logDir = os.TempDir()\r\n\tl.logMode = 0\r\n\tl.prevLog = new(lastLog)\r\n\tl.logName = program + \".log.\" + host\r\n\tl.SetLogHeader(l.formatHeader)\r\n\tl.nBytes = 0\r\n\tlogger = l\r\n\r\n\treturn l\r\n}\r\nfunc CloseLogger() {\r\n\tlogger.close()\r\n}\r\n\r\n\/\/ print logger config\r\nfunc (l *Logger) PrintLogger() {\r\n\tfmt.Printf(\"logDir<%s>, logName<%s>, level<%s>, maxSize<%d kb>, maxFileNum<%d>, logMode=%s\\n\",\r\n\t\tl.logDir, l.logName, severityName[l.level],\r\n\t\tl.maxSize\/1024, l.maxFileNum,\r\n\t\tl.logMode)\r\n}\r\n\r\n\/\/ file max size\r\nfunc (l *Logger) SetFileMaxSize(maxSize int) {\r\n\tif maxSize < 0 {\r\n\t\treturn\r\n\t}\r\n\tl.maxSize = maxSize * 1024\r\n}\r\nfunc (l *Logger) getFileMaxSize() int {\r\n\treturn l.maxSize\r\n}\r\n\r\n\/\/ max file num\r\nfunc (l *Logger) SetMaxFileNum(maxFileNum int) {\r\n\tif maxFileNum < 0 {\r\n\t\treturn\r\n\t}\r\n\r\n\tl.maxFileNum = maxFileNum\r\n}\r\nfunc (l *Logger) getMaxFileNum() int {\r\n\treturn l.maxFileNum\r\n}\r\n\r\n\/\/ log level\r\nfunc (l *Logger) SetLogLevel(level string) {\r\n\tl.level = severityByName(level)\r\n}\r\nfunc (l *Logger) getLogLevel() severity {\r\n\treturn l.level\r\n}\r\nfunc (l *Logger) IsLogFatal() bool {\r\n\tif l.level == fatalLevel {\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}\r\nfunc (l *Logger) IsLogError() bool {\r\n\tif l.level == errorLevel {\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}\r\nfunc (l *Logger) IsLogWarn() bool {\r\n\tif l.level == warnLevel {\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}\r\nfunc (l *Logger) IsLogInfo() bool {\r\n\tif l.level == infoLevel {\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}\r\nfunc (l *Logger) IsLogDebug() bool {\r\n\tif l.level == debugLevel {\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}\r\nfunc (l *Logger) IsLogTrace() bool {\r\n\tif l.level == traceLevel {\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}\r\n\r\n\/\/ log dir\r\nfunc (l *Logger) SetLogDir(logDir string) {\r\n\terr := os.Mkdir(logDir, 0666)\r\n\tif err != nil && os.IsNotExist(err) {\r\n\t\treturn\r\n\t}\r\n\tl.logDir = logDir\r\n}\r\nfunc (l *Logger) getLogDir() string {\r\n\treturn l.logDir\r\n}\r\n\r\n\/\/ log mode\r\nfunc (l *Logger) SetLogMode(logMode Mode) {\r\n\tl.logMode = logMode\r\n}\r\nfunc (l *Logger) getLogMode() Mode {\r\n\treturn l.logMode\r\n}\r\nfunc (l *Logger) isLogFileMode() bool {\r\n\tif (l.logMode & ToFile) != 0 {\r\n\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}\r\nfunc (l *Logger) isLogStderrMode() bool {\r\n\tif (l.logMode & ToStderr) != 0 {\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}\r\n\r\n\/\/ userdef log header\r\nfunc (l *Logger) SetLogHeader(header func(string) string) {\r\n\tl.callHeader = header\r\n}\r\nfunc (l *Logger) getLogHeader() func(string) string {\r\n\treturn l.callHeader\r\n}\r\nfunc (l *Logger) SetLogConfig(logDir, level string, maxSize, maxFileNum int, logMode Mode) {\r\n\tl.SetLogDir(logDir)\r\n\tl.SetLogLevel(level)\r\n\tl.SetFileMaxSize(maxSize)\r\n\tl.SetMaxFileNum(maxFileNum)\r\n\tl.logMode = logMode\r\n}\r\n\r\n\/\/ get buffer\r\nfunc (l *Logger) getBuffer() *logBuffer {\r\n\tb := l.buffer\r\n\tif b != nil {\r\n\t\tl.buffer = b.next\r\n\t}\r\n\r\n\tif b == nil {\r\n\t\tb = new(logBuffer)\r\n\t} else {\r\n\t\tb.next = nil\r\n\t\tb.Reset()\r\n\t}\r\n\r\n\treturn b\r\n}\r\n\r\ntype flushWriter interface {\r\n\tFlush() error\r\n\tSync() error\r\n\tFclose() error\r\n\tio.Writer\r\n}\r\n\r\ntype syncBuffer struct {\r\n\t*bufio.Writer\r\n\tfile *os.File\r\n}\r\n\r\nfunc (sbuf *syncBuffer) Sync() error {\r\n\treturn sbuf.file.Sync()\r\n}\r\nfunc (sbuf *syncBuffer) Fclose() error {\r\n\treturn sbuf.file.Close()\r\n}\r\n\r\ntype logBuffer struct {\r\n\tbytes.Buffer\r\n\tnext *logBuffer\r\n}\r\n\r\n\/\/ get log filename, funcname and line number\r\nfunc GetLogFileLine(depth int) (string, string, int) {\r\n\tvar funcName string\r\n\tpc, file, line, ok := runtime.Caller(3 + depth)\r\n\tif !ok {\r\n\t\tfuncName = \"unknow\"\r\n\t\tfile = \"???\"\r\n\t\tline = 1\r\n\t} else {\r\n\t\tidx := strings.LastIndex(file, \"\/\")\r\n\t\tif idx >= 0 {\r\n\t\t\tfile = file[idx+1:]\r\n\t\t}\r\n\t\tfullName := runtime.FuncForPC(pc).Name()\r\n\t\tshortName := strings.Split(fullName, \".\")\r\n\t\tfuncName = shortName[len(shortName)-1]\r\n\t}\r\n\r\n\treturn funcName, file, line\r\n}\r\n\r\n\/\/ log header by default\r\nfunc (l *Logger) formatHeader(level string) string {\r\n\tnow := time.Now()\r\n\tyear, month, day := now.Date()\r\n\thour, minute, second := now.Clock()\r\n\tusec := now.Nanosecond() \/ 1000000\r\n\r\n\t_, file, line := GetLogFileLine(2)\r\n\t\/\/ yy-mm-dd hh:mm:ss.uuuu level pid file[line]:\r\n\theader := fmt.Sprintf(\"%04d-%02d-%02d %02d:%02d:%02d.%04d [%s] %d %s[%d]\",\r\n\t\tyear, month, day, hour, minute, second, usec,\r\n\t\tlevel, os.Getpid(), file, line)\r\n\r\n\treturn header\r\n}\r\n\r\n\/\/ Trace\r\nfunc Ltrace(format string, args ...interface{}) {\r\n\tlogger.println(traceLevel, format, args...)\r\n}\r\n\r\n\/\/ Debug\r\nfunc Ldebug(format string, args ...interface{}) {\r\n\tlogger.println(debugLevel, format, args...)\r\n}\r\n\r\n\/\/ Info\r\nfunc Linfo(format string, args ...interface{}) {\r\n\tlogger.println(infoLevel, format, args...)\r\n}\r\n\r\n\/\/ Warning\r\nfunc Lwarn(format string, args ...interface{}) {\r\n\tlogger.println(warnLevel, format, args...)\r\n}\r\n\r\n\/\/ Error\r\nfunc Lerror(format string, args ...interface{}) {\r\n\tlogger.println(errorLevel, format, args...)\r\n}\r\n\r\n\/\/ Fatal\r\nfunc Lfatal(format string, args ...interface{}) {\r\n\tlogger.println(fatalLevel, format, args...)\r\n}\r\n\r\nfunc (l *Logger) println(s severity, format string, args ...interface{}) {\r\n\tif l.level > s && s < numSeverity {\r\n\t\treturn\r\n\t}\r\n\r\n\theader := l.callHeader(severityName[s])\r\n\tmessage := fmt.Sprintf(format, args...)\r\n\r\n\t\/\/ equal prev log message\r\n\tif l.prevLog.repeatLog == message {\r\n\t\tl.prevLog.repeatNum++\r\n\t\tl.prevLog.lastHeader = header\r\n\t\treturn\r\n\t} else {\r\n\t\tl.printLastLog()\r\n\t\tl.prevLog.repeatLog = message\r\n\t\tl.prevLog.lastHeader = header\r\n\t\tl.prevLog.repeatNum = 0\r\n\t}\r\n\r\n\tl.output(header, message)\r\n}\r\n\r\n\/\/ rename file when rolling policy\r\nfunc rename(fname, kname string) (string, int) {\r\n\tvar n int = 0\r\n\tlenk := len(kname)\r\n\tlenf := len(fname)\r\n\tif lenk > lenf {\r\n\t\tidx := kname[(lenf + 1):]\r\n\t\tn, _ = strconv.Atoi(idx)\r\n\t}\r\n\r\n\tname := fmt.Sprintf(\"%s.%d\", fname, n+1)\r\n\terr := os.Rename(kname, name)\r\n\tif err != nil {\r\n\t\tfmt.Println(err)\r\n\t}\r\n\treturn name, n + 1\r\n}\r\n\r\n\/\/ create log file by default\r\nfunc (l *Logger) createLogFile() *syncBuffer {\r\n\tsBuf := new(syncBuffer)\r\n\r\n\tfname := filepath.Join(l.logDir, l.logName)\r\n\r\n\t\/\/\r\n\tif l.keepName == nil {\r\n\t\tl.keepName = make([]string, l.maxFileNum)\r\n\t}\r\n\tlength := len(l.keepName)\r\n\tfor i := length - 1; i >= 0; i-- {\r\n\t\tif len(l.keepName[i]) == 0 {\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\tif i == l.maxFileNum-1 {\r\n\t\t\tos.Remove(l.keepName[i])\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\tif i == 0 {\r\n\t\t\tl.writer.Fclose()\r\n\t\t}\r\n\t\tname, n := rename(fname, l.keepName[i])\r\n\t\tl.keepName[n] = name\r\n\t}\r\n\r\n\tf, err := os.Create(fname)\r\n\tif err != nil {\r\n\t\tfmt.Printf(\"can't create log file<%s>\\n\", fname)\r\n\t\tos.Exit(-1)\r\n\t}\r\n\tsBuf.file = f\r\n\tsBuf.Writer = bufio.NewWriterSize(f, 1024*1024)\r\n\tl.createTime = getCreateTime()\r\n\tl.keepName[0] = fname\r\n\r\n\treturn sBuf\r\n}\r\n\r\n\/\/ get next create time in daily policy of rolling policy by default\r\nfunc getCreateTime() int64 {\r\n\ttimeStr := time.Now().Format(\"2006-01-02 00:00:00\")\r\n\tt, _ := time.Parse(\"2006-01-02 00:00:00\", timeStr)\r\n\td, _ := time.ParseDuration(\"+24h\")\r\n\treturn t.Add(d).Unix()\r\n}\r\n\r\nfunc isInToday(createTime int64) bool {\r\n\tnow := time.Now().Unix()\r\n\r\n\tif now > createTime {\r\n\t\treturn true\r\n\t}\r\n\r\n\treturn false\r\n}\r\nfunc (l *Logger) output(header string, msg string) {\r\n\tl.mu.Lock()\r\n\tdefer l.mu.Unlock()\r\n\tbuffer := l.getBuffer()\r\n\tmessage := msg\r\n\r\n\tbuffer.WriteString(header)\r\n\tbuffer.WriteString(\": \")\r\n\tbuffer.WriteString(message)\r\n\tbuffer.WriteString(\"\\n\")\r\n\tdata := buffer.Bytes()\r\n\r\n\t\/\/ log mode\r\n\tif l.isLogStderrMode() {\r\n\t\tos.Stdout.Write(data)\r\n\t}\r\n\tif l.isLogFileMode() {\r\n\t\t\/\/ rolling policy\r\n\t\t\/\/ 1, file max size\r\n\t\t\/\/ 2, daily\r\n\t\tif l.writer == nil ||\r\n\t\t\t(l.maxFileNum > 1 && ((l.maxSize == 0 && isInToday(l.createTime)) ||\r\n\t\t\t\t(l.maxSize > 0 && l.nBytes > l.maxSize))) {\r\n\t\t\tl.writer = l.createLogFile()\r\n\t\t\tl.nBytes = 0\r\n\t\t}\r\n\t\tn, _ := l.writer.Write(data)\r\n\t\tl.writer.Flush()\r\n\t\tl.nBytes += n\r\n\t}\r\n\r\n\tl.putBuffer(buffer)\r\n}\r\n\r\nfunc (l *Logger) printLastLog() {\r\n\tif l.prevLog.repeatNum > 0 {\r\n\t\tmsg := fmt.Sprintf(\"Last message repeated %d times\", l.prevLog.repeatNum)\r\n\t\tl.output(l.prevLog.lastHeader, msg)\r\n\t}\r\n}\r\n\r\nfunc (l *Logger) putBuffer(b *logBuffer) {\r\n\tb.next = l.buffer\r\n\tl.buffer = b\r\n}\r\n\r\nfunc (l *Logger) close() {\r\n\tl.printLastLog()\r\n\r\n\tif l.writer == nil {\r\n\t\treturn\r\n\t}\r\n\tl.writer.Sync()\r\n\tl.writer.Fclose()\r\n}\r\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 bee authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"text\/template\"\n)\n\nvar errInvalidLogLevel = errors.New(\"logger: invalid log level\")\n\nconst (\n\tlevelCritical = iota\n\tlevelFatal\n\tlevelSuccess\n\tlevelHint\n\tlevelDebug\n\tlevelInfo\n\tlevelWarn\n\tlevelError\n)\n\nvar (\n\tsequenceNo uint64\n\tlogger *BeeLogger\n)\n\n\/\/ BeeLogger logs logging records to the specified io.Writer\ntype BeeLogger struct {\n\tmu sync.Mutex\n\toutput io.Writer\n}\n\n\/\/ LogRecord represents a log record and contains the timestamp when the record\n\/\/ was created, an increasing id, level and the actual formatted log line.\ntype LogRecord struct {\n\tID uint64\n\tLevel string\n\tMessage string\n\tFilename string\n\tLineNo int\n}\n\nvar (\n\tlogRecordTemplate *template.Template\n\tdebugLogRecordTemplate *template.Template\n\tdebugLogFormat string\n)\n\nfunc init() {\n\tvar (\n\t\terr error\n\t\tsimpleLogFormat = `{{Now \"2006\/01\/02 15:04:05\"}} {{.Level}} ▶ {{.ID}} {{.Message}}{{EndLine}}`\n\t\tdebugLogFormat = `{{Now \"2006\/01\/02 15:04:05\"}} {{.Level}} ▶ {{.ID}} {{.Filename}}:{{.LineNo}} {{.Message}}{{EndLine}}`\n\t)\n\n\t\/\/ Initialize and parse logging templates\n\tfuncs := template.FuncMap{\n\t\t\"Now\": Now,\n\t\t\"EndLine\": EndLine,\n\t}\n\tlogRecordTemplate, err = template.New(\"logRecordTemplate\").Funcs(funcs).Parse(simpleLogFormat)\n\tMustCheck(err)\n\tdebugLogRecordTemplate, err = template.New(\"dbgLogRecordTemplate\").Funcs(funcs).Parse(debugLogFormat)\n\tMustCheck(err)\n\n\t\/\/ Initialize the logger instance with a NewColorWriter output\n\tlogger = &BeeLogger{output: NewColorWriter(os.Stdout)}\n}\n\n\/\/ SetOutput sets the logger output destination\nfunc (l *BeeLogger) SetOutput(w io.Writer) {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\tl.output = NewColorWriter(w)\n}\n\nfunc (l *BeeLogger) getLevelTag(level int) string {\n\tswitch level {\n\tcase levelFatal:\n\t\treturn \"FATAL \"\n\tcase levelSuccess:\n\t\treturn \"SUCCESS \"\n\tcase levelHint:\n\t\treturn \"HINT \"\n\tcase levelDebug:\n\t\treturn \"DEBUG \"\n\tcase levelInfo:\n\t\treturn \"INFO \"\n\tcase levelWarn:\n\t\treturn \"WARN \"\n\tcase levelError:\n\t\treturn \"ERROR \"\n\tcase levelCritical:\n\t\treturn \"CRITICAL\"\n\tdefault:\n\t\tpanic(errInvalidLogLevel)\n\t}\n}\n\nfunc (l *BeeLogger) getColorLevel(level int) string {\n\tswitch level {\n\tcase levelCritical:\n\t\treturn RedBold(l.getLevelTag(level))\n\tcase levelFatal:\n\t\treturn RedBold(l.getLevelTag(level))\n\tcase levelInfo:\n\t\treturn BlueBold(l.getLevelTag(level))\n\tcase levelHint:\n\t\treturn CyanBold(l.getLevelTag(level))\n\tcase levelDebug:\n\t\treturn YellowBold(l.getLevelTag(level))\n\tcase levelError:\n\t\treturn RedBold(l.getLevelTag(level))\n\tcase levelWarn:\n\t\treturn YellowBold(l.getLevelTag(level))\n\tcase levelSuccess:\n\t\treturn GreenBold(l.getLevelTag(level))\n\tdefault:\n\t\tpanic(errInvalidLogLevel)\n\t}\n}\n\n\/\/ mustLog logs the message according to the specified level and arguments.\n\/\/ It panics in case of an error.\nfunc (l *BeeLogger) mustLog(level int, message string, args ...interface{}) {\n\t\/\/ Create the logging record and pass into the output\n\trecord := LogRecord{\n\t\tID: atomic.AddUint64(&sequenceNo, 1),\n\t\tLevel: l.getColorLevel(level),\n\t\tMessage: fmt.Sprintf(message, args...),\n\t}\n\n\terr := logRecordTemplate.Execute(l.output, record)\n\tMustCheck(err)\n}\n\n\/\/ mustLogDebug logs a debug message only if debug mode\n\/\/ is enabled. i.e. DEBUG_ENABLED=\"1\"\nfunc (l *BeeLogger) mustLogDebug(message string, args ...interface{}) {\n\tif !IsDebugEnabled() {\n\t\treturn\n\t}\n\n\t\/\/ Change the output to Stderr\n\tl.SetOutput(os.Stderr)\n\n\t\/\/ Create the log record and Get the filename\n\t\/\/ and the line number of the caller\n\t_, file, line, _ := runtime.Caller(1)\n\trecord := LogRecord{\n\t\tID: atomic.AddUint64(&sequenceNo, 1),\n\t\tLevel: l.getColorLevel(levelDebug),\n\t\tMessage: fmt.Sprintf(message, args...),\n\t\tLineNo: line,\n\t\tFilename: filepath.Base(file),\n\t}\n\terr := debugLogRecordTemplate.Execute(l.output, record)\n\tMustCheck(err)\n}\n\n\/\/ Debug outputs a debug log message\nfunc (l *BeeLogger) Debug(message string) {\n\tl.mustLogDebug(message)\n}\n\n\/\/ Debugf outputs a formatted debug log message\nfunc (l *BeeLogger) Debugf(message string, vars ...interface{}) {\n\tl.mustLogDebug(message, vars...)\n}\n\n\/\/ Info outputs an information log message\nfunc (l *BeeLogger) Info(message string) {\n\tl.mustLog(levelInfo, message)\n}\n\n\/\/ Infof outputs a formatted information log message\nfunc (l *BeeLogger) Infof(message string, vars ...interface{}) {\n\tl.mustLog(levelInfo, message, vars...)\n}\n\n\/\/ Warn outputs a warning log message\nfunc (l *BeeLogger) Warn(message string) {\n\tl.mustLog(levelWarn, message)\n}\n\n\/\/ Warnf outputs a formatted warning log message\nfunc (l *BeeLogger) Warnf(message string, vars ...interface{}) {\n\tl.mustLog(levelWarn, message, vars...)\n}\n\n\/\/ Error outputs an error log message\nfunc (l *BeeLogger) Error(message string) {\n\tl.mustLog(levelError, message)\n}\n\n\/\/ Errorf outputs a formatted error log message\nfunc (l *BeeLogger) Errorf(message string, vars ...interface{}) {\n\tl.mustLog(levelError, message, vars...)\n}\n\n\/\/ Fatal outputs a fatal log message and exists\nfunc (l *BeeLogger) Fatal(message string) {\n\tl.mustLog(levelFatal, message)\n\tos.Exit(255)\n}\n\n\/\/ Fatalf outputs a formatted log message and exists\nfunc (l *BeeLogger) Fatalf(message string, vars ...interface{}) {\n\tl.mustLog(levelFatal, message, vars...)\n\tos.Exit(255)\n}\n\n\/\/ Success outputs a success log message\nfunc (l *BeeLogger) Success(message string) {\n\tl.mustLog(levelSuccess, message)\n}\n\n\/\/ Successf outputs a formatted success log message\nfunc (l *BeeLogger) Successf(message string, vars ...interface{}) {\n\tl.mustLog(levelSuccess, message, vars...)\n}\n\n\/\/ Hint outputs a hint log message\nfunc (l *BeeLogger) Hint(message string) {\n\tl.mustLog(levelHint, message)\n}\n\n\/\/ Hintf outputs a formatted hint log message\nfunc (l *BeeLogger) Hintf(message string, vars ...interface{}) {\n\tl.mustLog(levelHint, message, vars...)\n}\n\n\/\/ Critical outputs a critical log message\nfunc (l *BeeLogger) Critical(message string) {\n\tl.mustLog(levelCritical, message)\n}\n\n\/\/ Criticalf outputs a formatted critical log message\nfunc (l *BeeLogger) Criticalf(message string, vars ...interface{}) {\n\tl.mustLog(levelCritical, message, vars...)\n}\n<commit_msg>Included leading zeros for the log record ID<commit_after>\/\/ Copyright 2013 bee authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"text\/template\"\n)\n\nvar errInvalidLogLevel = errors.New(\"logger: invalid log level\")\n\nconst (\n\tlevelCritical = iota\n\tlevelFatal\n\tlevelSuccess\n\tlevelHint\n\tlevelDebug\n\tlevelInfo\n\tlevelWarn\n\tlevelError\n)\n\nvar (\n\tsequenceNo uint64\n\tlogger *BeeLogger\n)\n\n\/\/ BeeLogger logs logging records to the specified io.Writer\ntype BeeLogger struct {\n\tmu sync.Mutex\n\toutput io.Writer\n}\n\n\/\/ LogRecord represents a log record and contains the timestamp when the record\n\/\/ was created, an increasing id, level and the actual formatted log line.\ntype LogRecord struct {\n\tID string\n\tLevel string\n\tMessage string\n\tFilename string\n\tLineNo int\n}\n\nvar (\n\tlogRecordTemplate *template.Template\n\tdebugLogRecordTemplate *template.Template\n\tdebugLogFormat string\n)\n\nfunc init() {\n\tvar (\n\t\terr error\n\t\tsimpleLogFormat = `{{Now \"2006\/01\/02 15:04:05\"}} {{.Level}} ▶ {{.ID}} {{.Message}}{{EndLine}}`\n\t\tdebugLogFormat = `{{Now \"2006\/01\/02 15:04:05\"}} {{.Level}} ▶ {{.ID}} {{.Filename}}:{{.LineNo}} {{.Message}}{{EndLine}}`\n\t)\n\n\t\/\/ Initialize and parse logging templates\n\tfuncs := template.FuncMap{\n\t\t\"Now\": Now,\n\t\t\"EndLine\": EndLine,\n\t}\n\tlogRecordTemplate, err = template.New(\"logRecordTemplate\").Funcs(funcs).Parse(simpleLogFormat)\n\tMustCheck(err)\n\tdebugLogRecordTemplate, err = template.New(\"dbgLogRecordTemplate\").Funcs(funcs).Parse(debugLogFormat)\n\tMustCheck(err)\n\n\t\/\/ Initialize the logger instance with a NewColorWriter output\n\tlogger = &BeeLogger{output: NewColorWriter(os.Stdout)}\n}\n\n\/\/ SetOutput sets the logger output destination\nfunc (l *BeeLogger) SetOutput(w io.Writer) {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\tl.output = NewColorWriter(w)\n}\n\nfunc (l *BeeLogger) getLevelTag(level int) string {\n\tswitch level {\n\tcase levelFatal:\n\t\treturn \"FATAL \"\n\tcase levelSuccess:\n\t\treturn \"SUCCESS \"\n\tcase levelHint:\n\t\treturn \"HINT \"\n\tcase levelDebug:\n\t\treturn \"DEBUG \"\n\tcase levelInfo:\n\t\treturn \"INFO \"\n\tcase levelWarn:\n\t\treturn \"WARN \"\n\tcase levelError:\n\t\treturn \"ERROR \"\n\tcase levelCritical:\n\t\treturn \"CRITICAL\"\n\tdefault:\n\t\tpanic(errInvalidLogLevel)\n\t}\n}\n\nfunc (l *BeeLogger) getColorLevel(level int) string {\n\tswitch level {\n\tcase levelCritical:\n\t\treturn RedBold(l.getLevelTag(level))\n\tcase levelFatal:\n\t\treturn RedBold(l.getLevelTag(level))\n\tcase levelInfo:\n\t\treturn BlueBold(l.getLevelTag(level))\n\tcase levelHint:\n\t\treturn CyanBold(l.getLevelTag(level))\n\tcase levelDebug:\n\t\treturn YellowBold(l.getLevelTag(level))\n\tcase levelError:\n\t\treturn RedBold(l.getLevelTag(level))\n\tcase levelWarn:\n\t\treturn YellowBold(l.getLevelTag(level))\n\tcase levelSuccess:\n\t\treturn GreenBold(l.getLevelTag(level))\n\tdefault:\n\t\tpanic(errInvalidLogLevel)\n\t}\n}\n\n\/\/ mustLog logs the message according to the specified level and arguments.\n\/\/ It panics in case of an error.\nfunc (l *BeeLogger) mustLog(level int, message string, args ...interface{}) {\n\t\/\/ Create the logging record and pass into the output\n\trecord := LogRecord{\n\t\tID: fmt.Sprintf(\"%04d\", atomic.AddUint64(&sequenceNo, 1)),\n\t\tLevel: l.getColorLevel(level),\n\t\tMessage: fmt.Sprintf(message, args...),\n\t}\n\n\terr := logRecordTemplate.Execute(l.output, record)\n\tMustCheck(err)\n}\n\n\/\/ mustLogDebug logs a debug message only if debug mode\n\/\/ is enabled. i.e. DEBUG_ENABLED=\"1\"\nfunc (l *BeeLogger) mustLogDebug(message string, args ...interface{}) {\n\tif !IsDebugEnabled() {\n\t\treturn\n\t}\n\n\t\/\/ Change the output to Stderr\n\tl.SetOutput(os.Stderr)\n\n\t\/\/ Create the log record and Get the filename\n\t\/\/ and the line number of the caller\n\t_, file, line, _ := runtime.Caller(1)\n\trecord := LogRecord{\n\t\tID: fmt.Sprintf(\"%04d\", atomic.AddUint64(&sequenceNo, 1)),\n\t\tLevel: l.getColorLevel(levelDebug),\n\t\tMessage: fmt.Sprintf(message, args...),\n\t\tLineNo: line,\n\t\tFilename: filepath.Base(file),\n\t}\n\terr := debugLogRecordTemplate.Execute(l.output, record)\n\tMustCheck(err)\n}\n\n\/\/ Debug outputs a debug log message\nfunc (l *BeeLogger) Debug(message string) {\n\tl.mustLogDebug(message)\n}\n\n\/\/ Debugf outputs a formatted debug log message\nfunc (l *BeeLogger) Debugf(message string, vars ...interface{}) {\n\tl.mustLogDebug(message, vars...)\n}\n\n\/\/ Info outputs an information log message\nfunc (l *BeeLogger) Info(message string) {\n\tl.mustLog(levelInfo, message)\n}\n\n\/\/ Infof outputs a formatted information log message\nfunc (l *BeeLogger) Infof(message string, vars ...interface{}) {\n\tl.mustLog(levelInfo, message, vars...)\n}\n\n\/\/ Warn outputs a warning log message\nfunc (l *BeeLogger) Warn(message string) {\n\tl.mustLog(levelWarn, message)\n}\n\n\/\/ Warnf outputs a formatted warning log message\nfunc (l *BeeLogger) Warnf(message string, vars ...interface{}) {\n\tl.mustLog(levelWarn, message, vars...)\n}\n\n\/\/ Error outputs an error log message\nfunc (l *BeeLogger) Error(message string) {\n\tl.mustLog(levelError, message)\n}\n\n\/\/ Errorf outputs a formatted error log message\nfunc (l *BeeLogger) Errorf(message string, vars ...interface{}) {\n\tl.mustLog(levelError, message, vars...)\n}\n\n\/\/ Fatal outputs a fatal log message and exists\nfunc (l *BeeLogger) Fatal(message string) {\n\tl.mustLog(levelFatal, message)\n\tos.Exit(255)\n}\n\n\/\/ Fatalf outputs a formatted log message and exists\nfunc (l *BeeLogger) Fatalf(message string, vars ...interface{}) {\n\tl.mustLog(levelFatal, message, vars...)\n\tos.Exit(255)\n}\n\n\/\/ Success outputs a success log message\nfunc (l *BeeLogger) Success(message string) {\n\tl.mustLog(levelSuccess, message)\n}\n\n\/\/ Successf outputs a formatted success log message\nfunc (l *BeeLogger) Successf(message string, vars ...interface{}) {\n\tl.mustLog(levelSuccess, message, vars...)\n}\n\n\/\/ Hint outputs a hint log message\nfunc (l *BeeLogger) Hint(message string) {\n\tl.mustLog(levelHint, message)\n}\n\n\/\/ Hintf outputs a formatted hint log message\nfunc (l *BeeLogger) Hintf(message string, vars ...interface{}) {\n\tl.mustLog(levelHint, message, vars...)\n}\n\n\/\/ Critical outputs a critical log message\nfunc (l *BeeLogger) Critical(message string) {\n\tl.mustLog(levelCritical, message)\n}\n\n\/\/ Criticalf outputs a formatted critical log message\nfunc (l *BeeLogger) Criticalf(message string, vars ...interface{}) {\n\tl.mustLog(levelCritical, message, vars...)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"time\"\n\n\tconfig \"github.com\/qiniu\/logkit\/conf\"\n\t_ \"github.com\/qiniu\/logkit\/metric\/all\"\n\t\"github.com\/qiniu\/logkit\/mgr\"\n\t\"github.com\/qiniu\/logkit\/times\"\n\t\"github.com\/qiniu\/logkit\/utils\"\n\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/qiniu\/log\"\n)\n\n\/\/Config of logkit\ntype Config struct {\n\tMaxProcs int `json:\"max_procs\"`\n\tDebugLevel int `json:\"debug_level\"`\n\tProfileHost string `json:\"profile_host\"`\n\tConfsPath []string `json:\"confs_path\"`\n\tCleanSelfLog bool `json:\"clean_self_log\"`\n\tCleanSelfDir string `json:\"clean_self_dir\"`\n\tCleanSelfPattern string `json:\"clean_self_pattern\"`\n\tTimeLayouts []string `json:\"timeformat_layouts\"`\n\tCleanSelfLogCnt int `json:\"clean_self_cnt\"`\n\tStaticRootPath string `json:\"static_root_path\"`\n\tmgr.ManagerConfig\n}\n\nvar conf Config\n\nconst (\n\tVersion = \"v1.2.3\"\n\tdefaultReserveCnt = 5\n\tdefaultLogDir = \".\/run\"\n\tdefaultLogPattern = \"*.log-*\"\n)\n\nfunc getValidPath(confPaths []string) (paths []string) {\n\tpaths = make([]string, 0)\n\texits := make(map[string]bool)\n\tfor _, v := range confPaths {\n\t\trp, err := filepath.Abs(v)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Get real path of ConfsPath %v error %v, ignore it\", v, rp)\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := exits[rp]; ok {\n\t\t\tlog.Errorf(\"ConfsPath %v duplicated, ignore\", rp)\n\t\t\tcontinue\n\t\t}\n\t\texits[rp] = true\n\t\tpaths = append(paths, rp)\n\t}\n\treturn\n}\n\nfunc cleanLogkitLog(dir, pattern string, reserveCnt int) {\n\tvar err error\n\tpath := filepath.Join(dir, pattern)\n\tmatches, err := filepath.Glob(path)\n\tif err != nil {\n\t\tlog.Errorf(\"filepath.Glob path %v error %v\", path, err)\n\t\treturn\n\t}\n\tif len(matches) <= reserveCnt {\n\t\treturn\n\t}\n\tsort.Strings(matches)\n\tfor _, f := range matches[0 : len(matches)-reserveCnt] {\n\t\terr := os.Remove(f)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Remove %s failed , error: %v\", f, err)\n\t\t\tcontinue\n\t\t}\n\t}\n\treturn\n}\n\nfunc loopCleanLogkitLog(dir, pattern string, reserveCnt int, exitchan chan struct{}) {\n\tif len(dir) <= 0 {\n\t\tdir = defaultLogDir\n\t}\n\tif len(pattern) <= 0 {\n\t\tpattern = defaultLogPattern\n\t}\n\tif reserveCnt <= 0 {\n\t\treserveCnt = defaultReserveCnt\n\t}\n\tticker := time.NewTicker(time.Minute * 10)\n\tfor {\n\t\tselect {\n\t\tcase <-exitchan:\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tcleanLogkitLog(dir, pattern, reserveCnt)\n\t\t}\n\t}\n}\n\nfunc main() {\n\tconfig.Init(\"f\", \"qbox\", \"qboxlogexporter.conf\")\n\tif err := config.Load(&conf); err != nil {\n\t\tlog.Fatal(\"config.Load failed:\", err)\n\t}\n\tlog.Printf(\"Welcome to use Logkit, Version: %v \\n\\nConfig: %#v\", Version, conf)\n\tif conf.TimeLayouts != nil {\n\t\ttimes.AddLayout(conf.TimeLayouts)\n\t}\n\tif conf.MaxProcs == 0 {\n\t\tconf.MaxProcs = runtime.NumCPU()\n\t}\n\truntime.GOMAXPROCS(conf.MaxProcs)\n\tlog.SetOutputLevel(conf.DebugLevel)\n\n\tm, err := mgr.NewManager(conf.ManagerConfig)\n\tif err != nil {\n\t\tlog.Fatalf(\"NewManager: %v\", err)\n\t}\n\tpaths := getValidPath(conf.ConfsPath)\n\tif len(paths) <= 0 {\n\t\tlog.Fatalf(\"Cannot read or create any ConfsPath %v\", conf.ConfsPath)\n\t}\n\tif err = m.Watch(paths); err != nil {\n\t\tlog.Fatalf(\"watch path error %v\", err)\n\t}\n\tm.RestoreWebDir()\n\n\tstopClean := make(chan struct{}, 0)\n\tdefer close(stopClean)\n\tif conf.CleanSelfLog {\n\t\tgo loopCleanLogkitLog(conf.CleanSelfDir, conf.CleanSelfPattern, conf.CleanSelfLogCnt, stopClean)\n\t}\n\tif len(conf.BindHost) > 0 {\n\t\tm.BindHost = conf.BindHost\n\t}\n\te := echo.New()\n\te.Static(\"\/\", conf.StaticRootPath)\n\n\t\/\/ start rest service\n\trs := mgr.NewRestService(m, e)\n\tif conf.ProfileHost != \"\" {\n\t\tlog.Printf(\"profile_host was open at %v\", conf.ProfileHost)\n\t\tgo func() {\n\t\t\tlog.Println(http.ListenAndServe(conf.ProfileHost, nil))\n\t\t}()\n\t}\n\tutils.WaitForInterrupt(func() {\n\t\trs.Stop()\n\t\tif conf.CleanSelfLog {\n\t\t\tstopClean <- struct{}{}\n\t\t}\n\t\tm.Stop()\n\t})\n}\n<commit_msg>fix crash without confs dir<commit_after>package main\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"time\"\n\n\tconfig \"github.com\/qiniu\/logkit\/conf\"\n\t_ \"github.com\/qiniu\/logkit\/metric\/all\"\n\t\"github.com\/qiniu\/logkit\/mgr\"\n\t\"github.com\/qiniu\/logkit\/times\"\n\t\"github.com\/qiniu\/logkit\/utils\"\n\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/qiniu\/log\"\n)\n\n\/\/Config of logkit\ntype Config struct {\n\tMaxProcs int `json:\"max_procs\"`\n\tDebugLevel int `json:\"debug_level\"`\n\tProfileHost string `json:\"profile_host\"`\n\tConfsPath []string `json:\"confs_path\"`\n\tCleanSelfLog bool `json:\"clean_self_log\"`\n\tCleanSelfDir string `json:\"clean_self_dir\"`\n\tCleanSelfPattern string `json:\"clean_self_pattern\"`\n\tTimeLayouts []string `json:\"timeformat_layouts\"`\n\tCleanSelfLogCnt int `json:\"clean_self_cnt\"`\n\tStaticRootPath string `json:\"static_root_path\"`\n\tmgr.ManagerConfig\n}\n\nvar conf Config\n\nconst (\n\tVersion = \"v1.2.3\"\n\tdefaultReserveCnt = 5\n\tdefaultLogDir = \".\/run\"\n\tdefaultLogPattern = \"*.log-*\"\n)\n\nfunc getValidPath(confPaths []string) (paths []string) {\n\tpaths = make([]string, 0)\n\texits := make(map[string]bool)\n\tfor _, v := range confPaths {\n\t\trp, err := filepath.Abs(v)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Get real path of ConfsPath %v error %v, ignore it\", v, rp)\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := exits[rp]; ok {\n\t\t\tlog.Errorf(\"ConfsPath %v duplicated, ignore\", rp)\n\t\t\tcontinue\n\t\t}\n\t\texits[rp] = true\n\t\tpaths = append(paths, rp)\n\t}\n\treturn\n}\n\nfunc cleanLogkitLog(dir, pattern string, reserveCnt int) {\n\tvar err error\n\tpath := filepath.Join(dir, pattern)\n\tmatches, err := filepath.Glob(path)\n\tif err != nil {\n\t\tlog.Errorf(\"filepath.Glob path %v error %v\", path, err)\n\t\treturn\n\t}\n\tif len(matches) <= reserveCnt {\n\t\treturn\n\t}\n\tsort.Strings(matches)\n\tfor _, f := range matches[0 : len(matches)-reserveCnt] {\n\t\terr := os.Remove(f)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Remove %s failed , error: %v\", f, err)\n\t\t\tcontinue\n\t\t}\n\t}\n\treturn\n}\n\nfunc loopCleanLogkitLog(dir, pattern string, reserveCnt int, exitchan chan struct{}) {\n\tif len(dir) <= 0 {\n\t\tdir = defaultLogDir\n\t}\n\tif len(pattern) <= 0 {\n\t\tpattern = defaultLogPattern\n\t}\n\tif reserveCnt <= 0 {\n\t\treserveCnt = defaultReserveCnt\n\t}\n\tticker := time.NewTicker(time.Minute * 10)\n\tfor {\n\t\tselect {\n\t\tcase <-exitchan:\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tcleanLogkitLog(dir, pattern, reserveCnt)\n\t\t}\n\t}\n}\n\nfunc main() {\n\tconfig.Init(\"f\", \"qbox\", \"qboxlogexporter.conf\")\n\tif err := config.Load(&conf); err != nil {\n\t\tlog.Fatal(\"config.Load failed:\", err)\n\t}\n\tlog.Printf(\"Welcome to use Logkit, Version: %v \\n\\nConfig: %#v\", Version, conf)\n\tif conf.TimeLayouts != nil {\n\t\ttimes.AddLayout(conf.TimeLayouts)\n\t}\n\tif conf.MaxProcs == 0 {\n\t\tconf.MaxProcs = runtime.NumCPU()\n\t}\n\truntime.GOMAXPROCS(conf.MaxProcs)\n\tlog.SetOutputLevel(conf.DebugLevel)\n\n\tm, err := mgr.NewManager(conf.ManagerConfig)\n\tif err != nil {\n\t\tlog.Fatalf(\"NewManager: %v\", err)\n\t}\n\tpaths := getValidPath(conf.ConfsPath)\n\tif len(paths) <= 0 {\n\t\tlog.Warnf(\"Cannot read or create any ConfsPath %v\", conf.ConfsPath)\n\t}\n\tif err = m.Watch(paths); err != nil {\n\t\tlog.Fatalf(\"watch path error %v\", err)\n\t}\n\tm.RestoreWebDir()\n\n\tstopClean := make(chan struct{}, 0)\n\tdefer close(stopClean)\n\tif conf.CleanSelfLog {\n\t\tgo loopCleanLogkitLog(conf.CleanSelfDir, conf.CleanSelfPattern, conf.CleanSelfLogCnt, stopClean)\n\t}\n\tif len(conf.BindHost) > 0 {\n\t\tm.BindHost = conf.BindHost\n\t}\n\te := echo.New()\n\te.Static(\"\/\", conf.StaticRootPath)\n\n\t\/\/ start rest service\n\trs := mgr.NewRestService(m, e)\n\tif conf.ProfileHost != \"\" {\n\t\tlog.Printf(\"profile_host was open at %v\", conf.ProfileHost)\n\t\tgo func() {\n\t\t\tlog.Println(http.ListenAndServe(conf.ProfileHost, nil))\n\t\t}()\n\t}\n\tutils.WaitForInterrupt(func() {\n\t\trs.Stop()\n\t\tif conf.CleanSelfLog {\n\t\t\tstopClean <- struct{}{}\n\t\t}\n\t\tm.Stop()\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package jman\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar (\n\t\/\/ ErrSpecificNode is for when retrieving a node does not return a specific key\/value, but perhaps a map\n\tErrSpecificNode = errors.New(\"Could not find a specific node that matched the given path\")\n)\n\n\/\/ JSONFile represents a JSON file and contains the filename and root node\ntype JSONFile struct {\n\tfilename string\n\trootnode *Node\n\trw *sync.RWMutex\n}\n\n\/\/ NewFile will read the given filename and return a JSONFile struct\nfunc NewFile(filename string) (*JSONFile, error) {\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tjs, err := New(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trw := &sync.RWMutex{}\n\treturn &JSONFile{filename, js, rw}, nil\n}\n\n\/\/ Recursively look up a given JSON path\nfunc jsonpath(js *Node, JSONpath string) (*Node, string, error) {\n\tif JSONpath == \"\" {\n\t\t\/\/ Could not find end node\n\t\treturn js, JSONpath, ErrSpecificNode\n\t}\n\tfirstpart := JSONpath\n\tsecondpart := \"\"\n\tif strings.Contains(JSONpath, \".\") {\n\t\tfields := strings.SplitN(JSONpath, \".\", 2)\n\t\tfirstpart = fields[0]\n\t\tsecondpart = fields[1]\n\t}\n\tif firstpart == \"x\" {\n\t\treturn jsonpath(js, secondpart)\n\t} else if strings.Contains(firstpart, \"[\") && strings.Contains(firstpart, \"]\") {\n\t\tfields := strings.SplitN(firstpart, \"[\", 2)\n\t\tname := fields[0]\n\t\tif name != \"x\" {\n\t\t\tjs = js.Get(name)\n\t\t}\n\t\tfields = strings.SplitN(fields[1], \"]\", 2)\n\t\tindex, err := strconv.Atoi(fields[0])\n\t\tif err != nil {\n\t\t\treturn js, JSONpath, errors.New(\"Invalid index: \" + fields[0] + \" (\" + err.Error() + \")\")\n\t\t}\n\t\tnode, ok := js.GetIndex(index)\n\t\tif !ok {\n\t\t\treturn js, JSONpath, errors.New(\"Could not find index: \" + fields[0])\n\t\t}\n\t\treturn jsonpath(node, secondpart)\n\t}\n\tname := firstpart\n\tif secondpart != \"\" {\n\t\treturn js, JSONpath, errors.New(\"JSON path left unparsed: \" + secondpart)\n\t}\n\treturn js.Get(name), \"\", nil\n}\n\n\/\/ GetNode will find the JSON node that corresponds to the given JSON path\nfunc (jf *JSONFile) GetNode(JSONpath string) (*Node, error) {\n\tfoundnode, leftoverpath, err := jsonpath(jf.rootnode, JSONpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif leftoverpath != \"\" {\n\t\treturn nil, errors.New(\"JSON path left unparsed: \" + leftoverpath)\n\t}\n\tif foundnode == nil {\n\t\treturn nil, errors.New(\"Could not lookup: \" + JSONpath)\n\t}\n\treturn foundnode, nil\n}\n\n\/\/ GetString will find the string that corresponds to the given JSON Path\nfunc (jf *JSONFile) GetString(JSONpath string) (string, error) {\n\tnode, err := jf.GetNode(JSONpath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn node.String(), nil\n}\n\nfunc (jf *JSONFile) SetString(JSONpath, value string) error {\n\tfirstpart := \"\"\n\tlastpart := JSONpath\n\tif strings.Contains(JSONpath, \".\") {\n\t\tpos := strings.LastIndex(JSONpath, \".\")\n\t\tfirstpart = JSONpath[:pos]\n\t\tlastpart = JSONpath[pos+1:]\n\t}\n\n\tnode, _, err := jsonpath(jf.rootnode, firstpart)\n\tif (err != nil) && (err != ErrSpecificNode) {\n\t\treturn err\n\t}\n\n\t_, hasNode := node.CheckGet(lastpart)\n\tif !hasNode {\n\t\treturn errors.New(\"Index out of range? Could not set value.\")\n\t}\n\n\t\/\/ It's weird that simplejson Set does not return an error value\n\tnode.Set(lastpart, value)\n\n\tnewdata, err := jf.rootnode.EncodePretty()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn jf.Write(newdata)\n}\n\n\/\/ Write writes the current JSON data to the file\nfunc (jf *JSONFile) Write(data []byte) error {\n\tjf.rw.Lock()\n\tdefer jf.rw.Unlock()\n\t\/\/ TODO: Add newline as well?\n\treturn ioutil.WriteFile(jf.filename, data, 0666)\n}\n\n\/\/ AddJSON adds JSON data at the given JSON path\nfunc (jf *JSONFile) AddJSON(JSONpath, JSONdata string) error {\n\tfirstpart := \"\"\n\tlastpart := JSONpath\n\tif strings.Contains(JSONpath, \".\") {\n\t\tpos := strings.LastIndex(JSONpath, \".\")\n\t\tfirstpart = JSONpath[:pos]\n\t\tlastpart = JSONpath[pos+1:]\n\t}\n\n\tnode, _, err := jsonpath(jf.rootnode, firstpart)\n\tif (err != nil) && (err != ErrSpecificNode) {\n\t\treturn err\n\t}\n\n\t_, hasNode := node.CheckGet(lastpart)\n\tif hasNode {\n\t\treturn errors.New(\"The JSON path should not point to a single key when adding JSON data.\")\n\t}\n\n\tlistJSON, err := node.Encode()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfullJSON, err := jf.rootnode.Encode()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ TODO: Fork simplejson for a better way of adding data!\n\tnewFullJSON := bytes.Replace(fullJSON, listJSON, badd(listJSON[:len(listJSON)-1], []byte(\",\"+JSONdata+\"]\")), 1)\n\n\tjs, err := New(newFullJSON)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewFullJSON, err = js.EncodePretty()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn jf.Write(newFullJSON)\n}\n\n\/\/ SetString sets a value to the given JSON file at the given JSON path\nfunc SetString(filename, JSONpath, value string) error {\n\tjf, err := NewFile(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn jf.SetString(JSONpath, value)\n}\n\n\/\/ AddJSON adds JSON data to the given JSON file at the given JSON path\nfunc AddJSON(filename, JSONpath, JSONdata string) error {\n\tjf, err := NewFile(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn jf.AddJSON(JSONpath, JSONdata)\n}\n\n\/\/ JSONString will find the string that corresponds to the given JSON Path,\n\/\/ given a filename and a simple JSON path expression.\nfunc GetString(filename, JSONpath string) (string, error) {\n\tjf, err := NewFile(filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn jf.GetString(JSONpath)\n}\n<commit_msg>golint has no complaints<commit_after>package jman\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar (\n\t\/\/ ErrSpecificNode is for when retrieving a node does not return a specific key\/value, but perhaps a map\n\tErrSpecificNode = errors.New(\"Could not find a specific node that matched the given path\")\n)\n\n\/\/ JSONFile represents a JSON file and contains the filename and root node\ntype JSONFile struct {\n\tfilename string\n\trootnode *Node\n\trw *sync.RWMutex\n}\n\n\/\/ NewFile will read the given filename and return a JSONFile struct\nfunc NewFile(filename string) (*JSONFile, error) {\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tjs, err := New(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trw := &sync.RWMutex{}\n\treturn &JSONFile{filename, js, rw}, nil\n}\n\n\/\/ Recursively look up a given JSON path\nfunc jsonpath(js *Node, JSONpath string) (*Node, string, error) {\n\tif JSONpath == \"\" {\n\t\t\/\/ Could not find end node\n\t\treturn js, JSONpath, ErrSpecificNode\n\t}\n\tfirstpart := JSONpath\n\tsecondpart := \"\"\n\tif strings.Contains(JSONpath, \".\") {\n\t\tfields := strings.SplitN(JSONpath, \".\", 2)\n\t\tfirstpart = fields[0]\n\t\tsecondpart = fields[1]\n\t}\n\tif firstpart == \"x\" {\n\t\treturn jsonpath(js, secondpart)\n\t} else if strings.Contains(firstpart, \"[\") && strings.Contains(firstpart, \"]\") {\n\t\tfields := strings.SplitN(firstpart, \"[\", 2)\n\t\tname := fields[0]\n\t\tif name != \"x\" {\n\t\t\tjs = js.Get(name)\n\t\t}\n\t\tfields = strings.SplitN(fields[1], \"]\", 2)\n\t\tindex, err := strconv.Atoi(fields[0])\n\t\tif err != nil {\n\t\t\treturn js, JSONpath, errors.New(\"Invalid index: \" + fields[0] + \" (\" + err.Error() + \")\")\n\t\t}\n\t\tnode, ok := js.GetIndex(index)\n\t\tif !ok {\n\t\t\treturn js, JSONpath, errors.New(\"Could not find index: \" + fields[0])\n\t\t}\n\t\treturn jsonpath(node, secondpart)\n\t}\n\tname := firstpart\n\tif secondpart != \"\" {\n\t\treturn js, JSONpath, errors.New(\"JSON path left unparsed: \" + secondpart)\n\t}\n\treturn js.Get(name), \"\", nil\n}\n\n\/\/ GetNode will find the JSON node that corresponds to the given JSON path\nfunc (jf *JSONFile) GetNode(JSONpath string) (*Node, error) {\n\tfoundnode, leftoverpath, err := jsonpath(jf.rootnode, JSONpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif leftoverpath != \"\" {\n\t\treturn nil, errors.New(\"JSON path left unparsed: \" + leftoverpath)\n\t}\n\tif foundnode == nil {\n\t\treturn nil, errors.New(\"Could not lookup: \" + JSONpath)\n\t}\n\treturn foundnode, nil\n}\n\n\/\/ GetString will find the string that corresponds to the given JSON path\nfunc (jf *JSONFile) GetString(JSONpath string) (string, error) {\n\tnode, err := jf.GetNode(JSONpath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn node.String(), nil\n}\n\n\/\/ SetString will change the value of the key that the given JSON path points to\nfunc (jf *JSONFile) SetString(JSONpath, value string) error {\n\tfirstpart := \"\"\n\tlastpart := JSONpath\n\tif strings.Contains(JSONpath, \".\") {\n\t\tpos := strings.LastIndex(JSONpath, \".\")\n\t\tfirstpart = JSONpath[:pos]\n\t\tlastpart = JSONpath[pos+1:]\n\t}\n\n\tnode, _, err := jsonpath(jf.rootnode, firstpart)\n\tif (err != nil) && (err != ErrSpecificNode) {\n\t\treturn err\n\t}\n\n\t_, hasNode := node.CheckGet(lastpart)\n\tif !hasNode {\n\t\treturn errors.New(\"Index out of range? Could not set value.\")\n\t}\n\n\t\/\/ It's weird that simplejson Set does not return an error value\n\tnode.Set(lastpart, value)\n\n\tnewdata, err := jf.rootnode.EncodePretty()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn jf.Write(newdata)\n}\n\n\/\/ Write writes the current JSON data to the file\nfunc (jf *JSONFile) Write(data []byte) error {\n\tjf.rw.Lock()\n\tdefer jf.rw.Unlock()\n\t\/\/ TODO: Add newline as well?\n\treturn ioutil.WriteFile(jf.filename, data, 0666)\n}\n\n\/\/ AddJSON adds JSON data at the given JSON path\nfunc (jf *JSONFile) AddJSON(JSONpath, JSONdata string) error {\n\tfirstpart := \"\"\n\tlastpart := JSONpath\n\tif strings.Contains(JSONpath, \".\") {\n\t\tpos := strings.LastIndex(JSONpath, \".\")\n\t\tfirstpart = JSONpath[:pos]\n\t\tlastpart = JSONpath[pos+1:]\n\t}\n\n\tnode, _, err := jsonpath(jf.rootnode, firstpart)\n\tif (err != nil) && (err != ErrSpecificNode) {\n\t\treturn err\n\t}\n\n\t_, hasNode := node.CheckGet(lastpart)\n\tif hasNode {\n\t\treturn errors.New(\"The JSON path should not point to a single key when adding JSON data.\")\n\t}\n\n\tlistJSON, err := node.Encode()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfullJSON, err := jf.rootnode.Encode()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ TODO: Fork simplejson for a better way of adding data!\n\tnewFullJSON := bytes.Replace(fullJSON, listJSON, badd(listJSON[:len(listJSON)-1], []byte(\",\"+JSONdata+\"]\")), 1)\n\n\tjs, err := New(newFullJSON)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewFullJSON, err = js.EncodePretty()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn jf.Write(newFullJSON)\n}\n\n\/\/ SetString sets a value to the given JSON file at the given JSON path\nfunc SetString(filename, JSONpath, value string) error {\n\tjf, err := NewFile(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn jf.SetString(JSONpath, value)\n}\n\n\/\/ AddJSON adds JSON data to the given JSON file at the given JSON path\nfunc AddJSON(filename, JSONpath, JSONdata string) error {\n\tjf, err := NewFile(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn jf.AddJSON(JSONpath, JSONdata)\n}\n\n\/\/ GetString will find the string that corresponds to the given JSON Path,\n\/\/ given a filename and a simple JSON path expression.\nfunc GetString(filename, JSONpath string) (string, error) {\n\tjf, err := NewFile(filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn jf.GetString(JSONpath)\n}\n<|endoftext|>"} {"text":"<commit_before>package zdns\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/miekg\/dns\"\n\t_ \"io\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\nfunc parseAlexa(line string) (string, int) {\n\ts := strings.SplitN(line, \",\", 1)\n\trank, err := strconv.Atoi(s[0])\n\tif err != nil {\n\t\tlog.Fatal(\"Malformed Alexa Top Million file\")\n\t}\n\treturn s[1], rank\n}\n\nfunc makeName(name string, prefix string) (string, bool) {\n\tif prefix == \"\" {\n\t\treturn name, false\n\t} else {\n\t\treturn strings.Join([]string{prefix, name}, \"\"), true\n\t}\n}\n\nfunc doLookup(g *GlobalLookupFactory, gc *GlobalConf, input <-chan string, output chan<- string, wg *sync.WaitGroup) error {\n\tf, err := (*g).MakeRoutineFactory()\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to create new routine factory\", err.Error())\n\t}\n\tfor line := range input {\n\t\tvar res Result\n\t\tvar rawName string\n\t\tvar rank int\n\t\tif gc.AlexaFormat == true {\n\t\t\trawName, rank = parseAlexa(line)\n\t\t\tres.AlexaRank = rank\n\t\t} else {\n\t\t\trawName = line\n\t\t}\n\t\tlookupName, changed := makeName(rawName, gc.NamePrefix)\n\t\tres.Domain = lookupName\n\t\tif changed {\n\t\t\tres.OriginalDomain = rawName\n\t\t}\n\t\tl, err := f.MakeLookup()\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Unable to build lookup instance\", err)\n\t\t}\n\t\tinnerRes, status, err := l.DoLookup(lookupName)\n\t\tres.Status = string(status)\n\t\tres.Data = innerRes\n\t\tif err != nil {\n\t\t\tres.Error = err.Error()\n\t\t}\n\t\tjsonRes, err := json.Marshal(res)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Unable to marshal JSON result\", err)\n\t\t}\n\t\toutput <- string(jsonRes)\n\t\tfmt.Print(string(jsonRes))\n\t}\n\t(*wg).Done()\n\treturn nil\n}\n\n\/\/ write results from lookup to output file\nfunc doOutput(out <-chan string, path string) error {\n\tvar f *os.File\n\tif path == \"\" || path == \"-\" {\n\t\tf = os.Stdout\n\t} else {\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"unable to open output file:\", err.Error())\n\t\t}\n\t\tdefer f.Close()\n\t}\n\tfor n := range out {\n\t\tf.WriteString(n)\n\t\tf.WriteString(\"\\n\")\n\t}\n\treturn nil\n}\n\n\/\/ read input file and put results into channel\nfunc doInput(in chan<- string, path string) error {\n\tvar f *os.File\n\tif path == \"\" || path == \"-\" {\n\t\tf = os.Stdin\n\t} else {\n\t\tvar err error\n\t\tf, err = os.Open(path)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"unable to open output file:\", err.Error())\n\t\t}\n\t}\n\ts := bufio.NewScanner(f)\n\tfor s.Scan() {\n\t\tin <- s.Text()\n\t}\n\tif err := s.Err(); err != nil {\n\t\tlog.Fatal(\"input unable to read file\", err)\n\t}\n\tclose(in)\n\treturn nil\n}\n\nfunc DoLookups(g *GlobalLookupFactory, c *GlobalConf) error {\n\t\/\/ doInput: take lines from input -> inChan\n\t\/\/\t- closes channel when done processing\n\t\/\/ doOutput: take serialized JSON from outChan and write\n\t\/\/ DoLookup:\n\t\/\/\t- n threads that do processing from in and place results in out\n\t\/\/\t- process until inChan closes, then wg.done()\n\t\/\/ Once we processing threads have all finished, wait until the\n\t\/\/ output and metadata threads have completed\n\tinChan := make(chan string)\n\toutChan := make(chan string)\n\tgo doOutput(outChan, c.OutputFilePath)\n\tgo doInput(inChan, c.InputFilePath)\n\tvar wg sync.WaitGroup\n\twg.Add(c.Threads)\n\tfor i := 0; i < c.Threads; i++ {\n\t\tgo doLookup(g, c, inChan, outChan, &wg)\n\t}\n\twg.Wait()\n\tclose(outChan)\n\treturn nil\n}\n\nfunc GetDNSServers(path string) ([]string, error) {\n\tc, err := dns.ClientConfigFromFile(path)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\tvar servers []string\n\tfor _, s := range c.Servers {\n\t\tfull := strings.Join([]string{s, c.Port}, \":\")\n\t\tservers = append(servers, full)\n\t}\n\treturn servers, nil\n}\n<commit_msg>This scaffolding works<commit_after>package zdns\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"github.com\/miekg\/dns\"\n\t_ \"io\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\nfunc parseAlexa(line string) (string, int) {\n\ts := strings.SplitN(line, \",\", 1)\n\trank, err := strconv.Atoi(s[0])\n\tif err != nil {\n\t\tlog.Fatal(\"Malformed Alexa Top Million file\")\n\t}\n\treturn s[1], rank\n}\n\nfunc makeName(name string, prefix string) (string, bool) {\n\tif prefix == \"\" {\n\t\treturn name, false\n\t} else {\n\t\treturn strings.Join([]string{prefix, name}, \"\"), true\n\t}\n}\n\nfunc doLookup(g *GlobalLookupFactory, gc *GlobalConf, input <-chan string, output chan<- string, wg *sync.WaitGroup) error {\n\tf, err := (*g).MakeRoutineFactory()\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to create new routine factory\", err.Error())\n\t}\n\tfor line := range input {\n\t\tvar res Result\n\t\tvar rawName string\n\t\tvar rank int\n\t\tif gc.AlexaFormat == true {\n\t\t\trawName, rank = parseAlexa(line)\n\t\t\tres.AlexaRank = rank\n\t\t} else {\n\t\t\trawName = line\n\t\t}\n\t\tlookupName, changed := makeName(rawName, gc.NamePrefix)\n\t\tres.Domain = lookupName\n\t\tif changed {\n\t\t\tres.OriginalDomain = rawName\n\t\t}\n\t\tl, err := f.MakeLookup()\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Unable to build lookup instance\", err)\n\t\t}\n\t\tinnerRes, status, err := l.DoLookup(lookupName)\n\t\tres.Status = string(status)\n\t\tres.Data = innerRes\n\t\tif err != nil {\n\t\t\tres.Error = err.Error()\n\t\t}\n\t\tjsonRes, err := json.Marshal(res)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Unable to marshal JSON result\", err)\n\t\t}\n\t\toutput <- string(jsonRes)\n\t}\n\t(*wg).Done()\n\treturn nil\n}\n\n\/\/ write results from lookup to output file\nfunc doOutput(out <-chan string, path string, wg *sync.WaitGroup) error {\n\tvar f *os.File\n\tif path == \"\" || path == \"-\" {\n\t\tf = os.Stdout\n\t} else {\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"unable to open output file:\", err.Error())\n\t\t}\n\t\tdefer f.Close()\n\t}\n\tfor n := range out {\n\t\tf.WriteString(n)\n\t\tf.WriteString(\"\\n\")\n\t}\n\t(*wg).Done()\n\treturn nil\n}\n\n\/\/ read input file and put results into channel\nfunc doInput(in chan<- string, path string, wg *sync.WaitGroup) error {\n\tvar f *os.File\n\tif path == \"\" || path == \"-\" {\n\t\tf = os.Stdin\n\t} else {\n\t\tvar err error\n\t\tf, err = os.Open(path)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"unable to open output file:\", err.Error())\n\t\t}\n\t}\n\ts := bufio.NewScanner(f)\n\tfor s.Scan() {\n\t\tin <- s.Text()\n\t}\n\tif err := s.Err(); err != nil {\n\t\tlog.Fatal(\"input unable to read file\", err)\n\t}\n\tclose(in)\n\t(*wg).Done()\n\treturn nil\n}\n\nfunc DoLookups(g *GlobalLookupFactory, c *GlobalConf) error {\n\t\/\/ doInput: take lines from input -> inChan\n\t\/\/\t- closes channel when done processing\n\t\/\/ doOutput: take serialized JSON from outChan and write\n\t\/\/ DoLookup:\n\t\/\/\t- n threads that do processing from in and place results in out\n\t\/\/\t- process until inChan closes, then wg.done()\n\t\/\/ Once we processing threads have all finished, wait until the\n\t\/\/ output and metadata threads have completed\n\tinChan := make(chan string)\n\toutChan := make(chan string)\n\tvar routineWG sync.WaitGroup\n\tgo doOutput(outChan, c.OutputFilePath, &routineWG)\n\tgo doInput(inChan, c.InputFilePath, &routineWG)\n\troutineWG.Add(2)\n\t\/\/ create pool of worker goroutines\n\tvar lookupWG sync.WaitGroup\n\tlookupWG.Add(c.Threads)\n\tfor i := 0; i < c.Threads; i++ {\n\t\tgo doLookup(g, c, inChan, outChan, &lookupWG)\n\t}\n\tlookupWG.Wait()\n\tclose(outChan)\n\troutineWG.Wait()\n\treturn nil\n}\n\nfunc GetDNSServers(path string) ([]string, error) {\n\tc, err := dns.ClientConfigFromFile(path)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\tvar servers []string\n\tfor _, s := range c.Servers {\n\t\tfull := strings.Join([]string{s, c.Port}, \":\")\n\t\tservers = append(servers, full)\n\t}\n\treturn servers, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package crane\n\nimport (\n\t\"fmt\"\n)\n\ntype UnitOfWork struct {\n\ttargeted []string\n\tcontainers []string\n\torder []string\n\trequireStarted []string\n}\n\nfunc NewUnitOfWork(graph DependencyGraph, targeted []string) (uow *UnitOfWork, err error) {\n\n\tuow = &UnitOfWork{\n\t\ttargeted: targeted,\n\t\tcontainers: targeted,\n\t\torder: []string{},\n\t\trequireStarted: []string{},\n\t}\n\n\t\/\/ select all containers which we care about\n\tfor {\n\t\tc := uow.containers\n\t\tinitialLenContainers := len(c)\n\t\tfor _, name := range c {\n\t\t\tdependencies := graph[name]\n\t\t\tif dependencies == nil {\n\t\t\t\terr = fmt.Errorf(\"Container %s referenced, but not defined.\", name)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, dep := range dependencies.All {\n\t\t\t\tuow.ensureInContainers(dep)\n\t\t\t\tif dependencies.requireStarted(dep) {\n\t\t\t\t\tuow.ensureInRequireStarted(dep)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(uow.containers) == initialLenContainers {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ bring containers into order\n\tfor {\n\t\tinitialLenOrdered := len(uow.order)\n\t\tfor _, name := range uow.containers {\n\t\t\tif dependencies, ok := graph[name]; ok {\n\t\t\t\tif dependencies.satisfied() {\n\t\t\t\t\tuow.order = append(uow.order, name)\n\t\t\t\t\tgraph.resolve(name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(uow.order) == initialLenOrdered {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif len(uow.order) < len(uow.containers) {\n\t\terr = fmt.Errorf(\"Dependencies for container(s) %s could not be resolved.\", uow.targeted)\n\t} else if len(uow.containers) == 0 {\n\t\terr = fmt.Errorf(\"ERROR: Command cannot be applied to any container.\")\n\t}\n\n\treturn\n}\n\nfunc (uow *UnitOfWork) Run(cmds []string, excluded []string) {\n\tfor _, container := range uow.Containers() {\n\t\tif includes(uow.targeted, container.Name()) {\n\t\t\tcontainer.Run(cmds, excluded, cfg.Path())\n\t\t} else if includes(uow.requireStarted, container.Name()) {\n\t\t\tcontainer.Start(excluded, cfg.Path())\n\t\t}\n\t}\n}\n\nfunc (uow *UnitOfWork) Lift(cmds []string, excluded []string, noCache bool) {\n\tfor _, container := range uow.Containers() {\n\t\tif includes(uow.targeted, container.Name()) {\n\t\t\tcontainer.Lift(cmds, noCache, excluded, cfg.Path())\n\t\t} else if includes(uow.requireStarted, container.Name()) {\n\t\t\tcontainer.Start(excluded, cfg.Path())\n\t\t}\n\t}\n}\n\nfunc (uow *UnitOfWork) Stats() {\n\targs := []string{\"stats\"}\n\tfor _, container := range uow.Targeted() {\n\t\tif container.Running() {\n\t\t\targs = append(args, container.Name())\n\t\t}\n\t}\n\tif len(args) > 1 {\n\t\texecuteCommand(\"docker\", args)\n\t} else {\n\t\tprintErrorf(\"None of the targeted container is running.\\n\")\n\t}\n}\n\nfunc (uow *UnitOfWork) Status(noTrunc bool) {\n\tuow.Targeted().Status(noTrunc)\n}\n\n\/\/ Push containers.\nfunc (uow *UnitOfWork) Push() {\n\tfor _, container := range uow.Targeted() {\n\t\tcontainer.Push()\n\t}\n}\n\n\/\/ Unpause containers.\nfunc (uow *UnitOfWork) Unpause() {\n\tfor _, container := range uow.Targeted() {\n\t\tcontainer.Unpause()\n\t}\n}\n\n\/\/ Pause containers.\nfunc (uow *UnitOfWork) Pause() {\n\tfor _, container := range uow.Targeted().Reversed() {\n\t\tcontainer.Pause()\n\t}\n}\n\n\/\/ Start containers.\nfunc (uow *UnitOfWork) Start() {\n\tfor _, container := range uow.Containers() {\n\t\tif includes(uow.targeted, container.Name()) {\n\t\t\tcontainer.Start(excluded, cfg.Path())\n\t\t} else if includes(uow.requireStarted, container.Name()) {\n\t\t\tcontainer.Start(excluded, cfg.Path())\n\t\t}\n\t}\n}\n\n\/\/ Stop containers.\nfunc (uow *UnitOfWork) Stop() {\n\tfor _, container := range uow.Targeted().Reversed() {\n\t\tcontainer.Stop()\n\t}\n}\n\n\/\/ Kill containers.\nfunc (uow *UnitOfWork) Kill() {\n\tfor _, container := range uow.Targeted().Reversed() {\n\t\tcontainer.Kill()\n\t}\n}\n\n\/\/ Rm containers.\nfunc (uow *UnitOfWork) Rm(force bool) {\n\tfor _, container := range uow.Targeted().Reversed() {\n\t\tcontainer.Rm(force)\n\t}\n}\n\n\/\/ Create containers.\nfunc (uow *UnitOfWork) Create(cmds []string, excluded []string) {\n\tfor _, container := range uow.Containers() {\n\t\tif includes(uow.targeted, container.Name()) {\n\t\t\tcontainer.Create(cmds, excluded, cfg.Path())\n\t\t} else if includes(uow.requireStarted, container.Name()) {\n\t\t\tcontainer.Start(excluded, cfg.Path())\n\t\t}\n\t}\n}\n\n\/\/ Provision containers.\nfunc (uow *UnitOfWork) Provision(noCache bool) {\n\tuow.Targeted().Provision(noCache)\n}\n\n\/\/ Pull containers.\nfunc (uow *UnitOfWork) PullImage() {\n\tfor _, container := range uow.Targeted() {\n\t\tif len(container.Dockerfile()) == 0 {\n\t\t\tcontainer.PullImage()\n\t\t}\n\t}\n}\n\n\/\/ Log containers.\nfunc (uow *UnitOfWork) Logs(follow bool, timestamps bool, tail string, colorize bool, since string) {\n\tuow.Targeted().Logs(follow, timestamps, tail, colorize, since)\n}\n\nfunc (uow *UnitOfWork) Containers() Containers {\n\tc := []Container{}\n\tfor _, name := range uow.order {\n\t\tc = append(c, cfg.Container(name))\n\t}\n\treturn c\n}\n\nfunc (uow *UnitOfWork) Targeted() Containers {\n\tc := []Container{}\n\tfor _, name := range uow.order {\n\t\tif includes(uow.targeted, name) {\n\t\t\tc = append(c, cfg.Container(name))\n\t\t}\n\t}\n\treturn c\n}\n\nfunc (uow *UnitOfWork) Affected() []string {\n\tc := []string{}\n\tfor _, name := range uow.order {\n\t\tif !includes(uow.targeted, name) {\n\t\t\tc = append(c, name)\n\t\t}\n\t}\n\treturn c\n}\n\nfunc (uow *UnitOfWork) ensureInContainers(name string) {\n\tif !includes(uow.containers, name) {\n\t\tuow.containers = append(uow.containers, name)\n\t}\n}\n\nfunc (uow *UnitOfWork) ensureInRequireStarted(name string) {\n\tif !includes(uow.requireStarted, name) {\n\t\tuow.requireStarted = append(uow.requireStarted, name)\n\t}\n}\n<commit_msg>Start non-existant containers too when required<commit_after>package crane\n\nimport (\n\t\"fmt\"\n)\n\ntype UnitOfWork struct {\n\ttargeted []string\n\tcontainers []string\n\torder []string\n\trequireStarted []string\n}\n\nfunc NewUnitOfWork(graph DependencyGraph, targeted []string) (uow *UnitOfWork, err error) {\n\n\tuow = &UnitOfWork{\n\t\ttargeted: targeted,\n\t\tcontainers: targeted,\n\t\torder: []string{},\n\t\trequireStarted: []string{},\n\t}\n\n\t\/\/ select all containers which we care about\n\tfor {\n\t\tc := uow.containers\n\t\tinitialLenContainers := len(c)\n\t\tfor _, name := range c {\n\t\t\tdependencies := graph[name]\n\t\t\tif dependencies == nil {\n\t\t\t\terr = fmt.Errorf(\"Container %s referenced, but not defined.\", name)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, dep := range dependencies.All {\n\t\t\t\tuow.ensureInContainers(dep)\n\t\t\t\tif dependencies.requireStarted(dep) {\n\t\t\t\t\tuow.ensureInRequireStarted(dep)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(uow.containers) == initialLenContainers {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ bring containers into order\n\tfor {\n\t\tinitialLenOrdered := len(uow.order)\n\t\tfor _, name := range uow.containers {\n\t\t\tif dependencies, ok := graph[name]; ok {\n\t\t\t\tif dependencies.satisfied() {\n\t\t\t\t\tuow.order = append(uow.order, name)\n\t\t\t\t\tgraph.resolve(name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(uow.order) == initialLenOrdered {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif len(uow.order) < len(uow.containers) {\n\t\terr = fmt.Errorf(\"Dependencies for container(s) %s could not be resolved.\", uow.targeted)\n\t} else if len(uow.containers) == 0 {\n\t\terr = fmt.Errorf(\"ERROR: Command cannot be applied to any container.\")\n\t}\n\n\treturn\n}\n\nfunc (uow *UnitOfWork) Run(cmds []string, excluded []string) {\n\tfor _, container := range uow.Containers() {\n\t\tif includes(uow.targeted, container.Name()) {\n\t\t\tcontainer.Run(cmds, excluded, cfg.Path())\n\t\t} else if includes(uow.requireStarted, container.Name()) || !container.Exists() {\n\t\t\tcontainer.Start(excluded, cfg.Path())\n\t\t}\n\t}\n}\n\nfunc (uow *UnitOfWork) Lift(cmds []string, excluded []string, noCache bool) {\n\tfor _, container := range uow.Containers() {\n\t\tif includes(uow.targeted, container.Name()) {\n\t\t\tcontainer.Lift(cmds, noCache, excluded, cfg.Path())\n\t\t} else if includes(uow.requireStarted, container.Name()) || !container.Exists() {\n\t\t\tcontainer.Start(excluded, cfg.Path())\n\t\t}\n\t}\n}\n\nfunc (uow *UnitOfWork) Stats() {\n\targs := []string{\"stats\"}\n\tfor _, container := range uow.Targeted() {\n\t\tif container.Running() {\n\t\t\targs = append(args, container.Name())\n\t\t}\n\t}\n\tif len(args) > 1 {\n\t\texecuteCommand(\"docker\", args)\n\t} else {\n\t\tprintErrorf(\"None of the targeted container is running.\\n\")\n\t}\n}\n\nfunc (uow *UnitOfWork) Status(noTrunc bool) {\n\tuow.Targeted().Status(noTrunc)\n}\n\n\/\/ Push containers.\nfunc (uow *UnitOfWork) Push() {\n\tfor _, container := range uow.Targeted() {\n\t\tcontainer.Push()\n\t}\n}\n\n\/\/ Unpause containers.\nfunc (uow *UnitOfWork) Unpause() {\n\tfor _, container := range uow.Targeted() {\n\t\tcontainer.Unpause()\n\t}\n}\n\n\/\/ Pause containers.\nfunc (uow *UnitOfWork) Pause() {\n\tfor _, container := range uow.Targeted().Reversed() {\n\t\tcontainer.Pause()\n\t}\n}\n\n\/\/ Start containers.\nfunc (uow *UnitOfWork) Start() {\n\tfor _, container := range uow.Containers() {\n\t\tif includes(uow.targeted, container.Name()) {\n\t\t\tcontainer.Start(excluded, cfg.Path())\n\t\t} else if includes(uow.requireStarted, container.Name()) || !container.Exists() {\n\t\t\tcontainer.Start(excluded, cfg.Path())\n\t\t}\n\t}\n}\n\n\/\/ Stop containers.\nfunc (uow *UnitOfWork) Stop() {\n\tfor _, container := range uow.Targeted().Reversed() {\n\t\tcontainer.Stop()\n\t}\n}\n\n\/\/ Kill containers.\nfunc (uow *UnitOfWork) Kill() {\n\tfor _, container := range uow.Targeted().Reversed() {\n\t\tcontainer.Kill()\n\t}\n}\n\n\/\/ Rm containers.\nfunc (uow *UnitOfWork) Rm(force bool) {\n\tfor _, container := range uow.Targeted().Reversed() {\n\t\tcontainer.Rm(force)\n\t}\n}\n\n\/\/ Create containers.\nfunc (uow *UnitOfWork) Create(cmds []string, excluded []string) {\n\tfor _, container := range uow.Containers() {\n\t\tif includes(uow.targeted, container.Name()) {\n\t\t\tcontainer.Create(cmds, excluded, cfg.Path())\n\t\t} else if includes(uow.requireStarted, container.Name()) || !container.Exists() {\n\t\t\tcontainer.Start(excluded, cfg.Path())\n\t\t}\n\t}\n}\n\n\/\/ Provision containers.\nfunc (uow *UnitOfWork) Provision(noCache bool) {\n\tuow.Targeted().Provision(noCache)\n}\n\n\/\/ Pull containers.\nfunc (uow *UnitOfWork) PullImage() {\n\tfor _, container := range uow.Targeted() {\n\t\tif len(container.Dockerfile()) == 0 {\n\t\t\tcontainer.PullImage()\n\t\t}\n\t}\n}\n\n\/\/ Log containers.\nfunc (uow *UnitOfWork) Logs(follow bool, timestamps bool, tail string, colorize bool, since string) {\n\tuow.Targeted().Logs(follow, timestamps, tail, colorize, since)\n}\n\nfunc (uow *UnitOfWork) Containers() Containers {\n\tc := []Container{}\n\tfor _, name := range uow.order {\n\t\tc = append(c, cfg.Container(name))\n\t}\n\treturn c\n}\n\nfunc (uow *UnitOfWork) Targeted() Containers {\n\tc := []Container{}\n\tfor _, name := range uow.order {\n\t\tif includes(uow.targeted, name) {\n\t\t\tc = append(c, cfg.Container(name))\n\t\t}\n\t}\n\treturn c\n}\n\nfunc (uow *UnitOfWork) Affected() []string {\n\tc := []string{}\n\tfor _, name := range uow.order {\n\t\tif !includes(uow.targeted, name) {\n\t\t\tc = append(c, name)\n\t\t}\n\t}\n\treturn c\n}\n\nfunc (uow *UnitOfWork) ensureInContainers(name string) {\n\tif !includes(uow.containers, name) {\n\t\tuow.containers = append(uow.containers, name)\n\t}\n}\n\nfunc (uow *UnitOfWork) ensureInRequireStarted(name string) {\n\tif !includes(uow.requireStarted, name) {\n\t\tuow.requireStarted = append(uow.requireStarted, name)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package criteria\n\nimport (\n\t\"github.com\/viant\/assertly\"\n\t\"github.com\/viant\/toolbox\"\n\t\"github.com\/viant\/toolbox\/data\"\n\t\"fmt\"\n)\n\n\/\/Criterion represent evaluation criterion\ntype Criterion struct {\n\t*Predicate\n\tLeftOperand interface{}\n\tOperator string\n\tRightOperand interface{}\n}\n\nfunc (c *Criterion) expandOperand(opperand interface{}, state data.Map) interface{} {\n\tif opperand == nil {\n\t\treturn nil\n\t}\n\treturn state.Expand(opperand)\n}\n\n\/\/Apply evaluates criterion with supplied context and state map . Dolar prefixed $expression will be expanded before evaluation.\nfunc (c *Criterion) Apply(state data.Map) (bool, error) {\n\n\tif c.Predicate != nil && len(c.Predicate.Criteria) > 0 {\n\t\treturn c.Predicate.Apply(state)\n\t}\n\n\tleftOperand := c.expandOperand(c.LeftOperand, state)\n\trightOperand := c.expandOperand(c.RightOperand, state)\n\tvar err error\n\tvar leftNumber, rightNumber float64\n\tvar rootPath = assertly.NewDataPath(\"\/\")\n\tvar context = assertly.NewDefaultContext()\n\n\tswitch c.Operator {\n\tcase \"=\", \":\":\n\t\tvalidation, err := assertly.AssertWithContext(rightOperand, leftOperand, rootPath, context)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tfmt.Printf(\"validation: %v\\n\", validation.Report())\n\t\treturn validation.FailedCount == 0, nil\n\tcase \"!=\", \"\":\n\t\tif _, ok := leftOperand.(string); ok && rightOperand == nil {\n\t\t\trightOperand = \"\"\n\t\t}\n\t\tvalidation, err := assertly.AssertWithContext(leftOperand, rightOperand, rootPath, context)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn validation.FailedCount > 0, nil\n\tcase \">=\":\n\t\tif leftNumber, err = toolbox.ToFloat(leftOperand); err == nil {\n\t\t\tif rightNumber, err = toolbox.ToFloat(rightOperand); err == nil {\n\t\t\t\treturn leftNumber >= rightNumber, nil\n\t\t\t}\n\t\t}\n\tcase \"<=\":\n\t\tif leftNumber, err = toolbox.ToFloat(leftOperand); err == nil {\n\t\t\tif rightNumber, err = toolbox.ToFloat(rightOperand); err == nil {\n\t\t\t\treturn leftNumber <= rightNumber, nil\n\t\t\t}\n\t\t}\n\n\tcase \">\":\n\t\tif leftNumber, err = toolbox.ToFloat(leftOperand); err == nil {\n\t\t\tif rightNumber, err = toolbox.ToFloat(rightOperand); err == nil {\n\t\t\t\treturn leftNumber > rightNumber, nil\n\t\t\t}\n\t\t}\n\tcase \"<\":\n\t\tif leftNumber, err = toolbox.ToFloat(leftOperand); err == nil {\n\t\t\tif rightNumber, err = toolbox.ToFloat(rightOperand); err == nil {\n\t\t\t\treturn leftNumber < rightNumber, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn false, err\n}\n\n\n\/\/NewCriterion creates a new criterion\nfunc NewCriterion(leftOperand interface{}, operator string, rightOperand interface{}) *Criterion {\n\treturn &Criterion{\n\t\tLeftOperand: leftOperand,\n\t\tOperator: operator,\n\t\tRightOperand: rightOperand,\n\t}\n}\n<commit_msg>patched evaluator with predicate<commit_after>package criteria\n\nimport (\n\t\"github.com\/viant\/assertly\"\n\t\"github.com\/viant\/toolbox\"\n\t\"github.com\/viant\/toolbox\/data\"\n\t\"fmt\"\n)\n\n\/\/Criterion represent evaluation criterion\ntype Criterion struct {\n\t*Predicate\n\tLeftOperand interface{}\n\tOperator string\n\tRightOperand interface{}\n}\n\nfunc (c *Criterion) expandOperand(opperand interface{}, state data.Map) interface{} {\n\tif opperand == nil {\n\t\treturn nil\n\t}\n\treturn state.Expand(opperand)\n}\n\n\/\/Apply evaluates criterion with supplied context and state map . Dolar prefixed $expression will be expanded before evaluation.\nfunc (c *Criterion) Apply(state data.Map) (bool, error) {\n\n\tif c.Predicate != nil && len(c.Predicate.Criteria) > 0 {\n\t\treturn c.Predicate.Apply(state)\n\t}\n\n\tleftOperand := c.expandOperand(c.LeftOperand, state)\n\trightOperand := c.expandOperand(c.RightOperand, state)\n\tvar err error\n\tvar leftNumber, rightNumber float64\n\tvar rootPath = assertly.NewDataPath(\"\/\")\n\tvar context = assertly.NewDefaultContext()\n\n\tswitch c.Operator {\n\tcase \"=\", \":\":\n\t\tvalidation, err := assertly.AssertWithContext(rightOperand, leftOperand, rootPath, context)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn validation.FailedCount == 0, nil\n\tcase \"!=\", \"\":\n\t\tif _, ok := leftOperand.(string); ok && rightOperand == nil {\n\t\t\trightOperand = \"\"\n\t\t}\n\t\tvalidation, err := assertly.AssertWithContext(leftOperand, rightOperand, rootPath, context)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn validation.FailedCount > 0, nil\n\tcase \">=\":\n\t\tif leftNumber, err = toolbox.ToFloat(leftOperand); err == nil {\n\t\t\tif rightNumber, err = toolbox.ToFloat(rightOperand); err == nil {\n\t\t\t\treturn leftNumber >= rightNumber, nil\n\t\t\t}\n\t\t}\n\tcase \"<=\":\n\t\tif leftNumber, err = toolbox.ToFloat(leftOperand); err == nil {\n\t\t\tif rightNumber, err = toolbox.ToFloat(rightOperand); err == nil {\n\t\t\t\treturn leftNumber <= rightNumber, nil\n\t\t\t}\n\t\t}\n\n\tcase \">\":\n\t\tif leftNumber, err = toolbox.ToFloat(leftOperand); err == nil {\n\t\t\tif rightNumber, err = toolbox.ToFloat(rightOperand); err == nil {\n\t\t\t\treturn leftNumber > rightNumber, nil\n\t\t\t}\n\t\t}\n\tcase \"<\":\n\t\tif leftNumber, err = toolbox.ToFloat(leftOperand); err == nil {\n\t\t\tif rightNumber, err = toolbox.ToFloat(rightOperand); err == nil {\n\t\t\t\treturn leftNumber < rightNumber, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn false, err\n}\n\n\n\/\/NewCriterion creates a new criterion\nfunc NewCriterion(leftOperand interface{}, operator string, rightOperand interface{}) *Criterion {\n\treturn &Criterion{\n\t\tLeftOperand: leftOperand,\n\t\tOperator: operator,\n\t\tRightOperand: rightOperand,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com\/godbus\/dbus\"\n\t\"github.com\/muka\/go-bluetooth\/bluez\"\n\t\"github.com\/muka\/go-bluetooth\/bluez\/profile\"\n\t\"github.com\/muka\/go-bluetooth\/emitter\"\n\t\"github.com\/muka\/go-bluetooth\/util\"\n\t\"github.com\/tj\/go-debug\"\n)\n\nvar dbgDevice = debug.Debug(\"bluez:api:Device\")\n\nvar deviceRegistry = make(map[string]*Device)\n\n\/\/ NewDevice creates a new Device\nfunc NewDevice(path string) *Device {\n\n\tif _, ok := deviceRegistry[path]; ok {\n\t\t\/\/ dbgDevice(\"Reusing cache instance %s\", path)\n\t\treturn deviceRegistry[path]\n\t}\n\n\td := new(Device)\n\td.Path = path\n\td.client = profile.NewDevice1(path)\n\n\td.client.GetProperties()\n\td.Properties = d.client.Properties\n\td.chars = make(map[dbus.ObjectPath]*profile.GattCharacteristic1, 0)\n\n\tdeviceRegistry[path] = d\n\n\t\/\/ d.watchProperties()\n\n\treturn d\n}\n\n\/\/ClearDevice free a device struct\nfunc ClearDevice(d *Device) {\n\n\td.Disconnect()\n\td.unwatchProperties()\n\tc, err := d.GetClient()\n\tif err == nil {\n\t\tc.Close()\n\t}\n\n\tif _, ok := deviceRegistry[d.Path]; ok {\n\t\tdelete(deviceRegistry, d.Path)\n\t}\n\n}\n\n\/\/ ParseDevice parse a Device from a ObjectManager map\nfunc ParseDevice(path dbus.ObjectPath, propsMap map[string]dbus.Variant) (*Device, error) {\n\n\td := new(Device)\n\td.Path = string(path)\n\td.client = profile.NewDevice1(d.Path)\n\n\tprops := new(profile.Device1Properties)\n\tutil.MapToStruct(props, propsMap)\n\tc, err := d.GetClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.Properties = props\n\n\treturn d, nil\n}\n\nfunc (d *Device) watchProperties() error {\n\n\tdbgDevice(\"watch-prop: watching properties\")\n\n\tchannel, err := d.client.Register()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo (func() {\n\t\tfor {\n\n\t\t\tif channel == nil {\n\t\t\t\tdbgDevice(\"watch-prop: nil channel, exit\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tdbgDevice(\"watch-prop: waiting updates\")\n\t\t\tsig := <-channel\n\n\t\t\tif sig == nil {\n\t\t\t\tdbgDevice(\"watch-prop: nil sig, exit\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tdbgDevice(\"watch-prop: signal name %s\", sig.Name)\n\t\t\tif sig.Name != bluez.PropertiesChanged {\n\t\t\t\tdbgDevice(\"Skipped %s vs %s\\n\", sig.Name, bluez.PropertiesInterface)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdbgDevice(\"Device property changed\")\n\t\t\tfor i := 0; i < len(sig.Body); i++ {\n\t\t\t\tdbgDevice(\"%s -> %s\", reflect.TypeOf(sig.Body[i]), sig.Body[i])\n\t\t\t}\n\n\t\t\tiface := sig.Body[0].(string)\n\t\t\tchanges := sig.Body[1].(map[string]dbus.Variant)\n\t\t\tfor field, val := range changes {\n\n\t\t\t\t\/\/ updates [*]Properties struct\n\t\t\t\tprops := d.Properties\n\n\t\t\t\ts := reflect.ValueOf(props).Elem()\n\t\t\t\t\/\/ exported field\n\t\t\t\tf := s.FieldByName(field)\n\t\t\t\tif f.IsValid() {\n\t\t\t\t\t\/\/ A Value can be changed only if it is\n\t\t\t\t\t\/\/ addressable and was not obtained by\n\t\t\t\t\t\/\/ the use of unexported struct fields.\n\t\t\t\t\tif f.CanSet() {\n\t\t\t\t\t\tx := reflect.ValueOf(val.Value())\n\t\t\t\t\t\tf.Set(x)\n\t\t\t\t\t\tdbgDevice(\"Set props value: %s = %s\\n\", field, x.Interface())\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tdbgDevice(\"Emit change for %s = %v\\n\", field, val.Value())\n\t\t\t\tpropChanged := PropertyChangedEvent{string(iface), field, val.Value(), props, d}\n\t\t\t\td.Emit(\"changed\", propChanged)\n\t\t\t}\n\t\t}\n\t})()\n\n\treturn nil\n}\n\n\/\/Device return an API to interact with a DBus device\ntype Device struct {\n\tPath string\n\tProperties *profile.Device1Properties\n\tclient *profile.Device1\n\tchars map[dbus.ObjectPath]*profile.GattCharacteristic1\n}\n\nfunc (d *Device) unwatchProperties() error {\n\treturn d.client.Unregister()\n}\n\n\/\/GetClient return a DBus Device1 interface client\nfunc (d *Device) GetClient() (*profile.Device1, error) {\n\tif d.client == nil {\n\t\treturn nil, errors.New(\"Client not available\")\n\t}\n\treturn d.client, nil\n}\n\n\/\/GetProperties return the properties for the device\nfunc (d *Device) GetProperties() (*profile.Device1Properties, error) {\n\n\tif d == nil {\n\t\treturn nil, errors.New(\"Empty device pointer\")\n\t}\n\n\tc, err := d.GetClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprops, err := c.GetProperties()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td.Properties = props\n\treturn d.Properties, err\n}\n\n\/\/GetProperty return a property value\nfunc (d *Device) GetProperty(name string) (data interface{}, err error) {\n\tc, err := d.GetClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tval, err := c.GetProperty(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn val.Value(), nil\n}\n\n\/\/On register callback for event\nfunc (d *Device) On(name string, fn *emitter.Callback) {\n\tswitch name {\n\tcase \"changed\":\n\t\td.watchProperties()\n\t\tbreak\n\t}\n\temitter.On(d.Path+\".\"+name, fn)\n}\n\n\/\/Off unregister callback for event\nfunc (d *Device) Off(name string, cb *emitter.Callback) {\n\tswitch name {\n\tcase \"changed\":\n\t\td.unwatchProperties()\n\t\tbreak\n\t}\n\n\tpattern := d.Path + \".\" + name\n\tif name != \"*\" {\n\t\temitter.Off(pattern, cb)\n\t} else {\n\t\temitter.RemoveListeners(pattern, nil)\n\t}\n}\n\n\/\/Emit an event\nfunc (d *Device) Emit(name string, data interface{}) {\n\temitter.Emit(d.Path+\".\"+name, data)\n}\n\n\/\/GetService return a GattService\nfunc (d *Device) GetService(path string) *profile.GattService1 {\n\treturn profile.NewGattService1(path, \"org.bluez\")\n}\n\n\/\/GetChar return a GattService\nfunc (d *Device) GetChar(path string) *profile.GattCharacteristic1 {\n\treturn profile.NewGattCharacteristic1(path)\n}\n\n\/\/GetCharByUUID return a GattService by its uuid, return nil if not found\nfunc (d *Device) GetCharByUUID(uuid string) (*profile.GattCharacteristic1, error) {\n\n\tuuid = strings.ToUpper(uuid)\n\n\tlist := d.GetCharsList()\n\n\tdbgDevice(\"Find by uuid, char list %d, cached list %d\", len(list), len(d.chars))\n\n\tvar deviceFound *profile.GattCharacteristic1\n\n\tfor _, path := range list {\n\n\t\t\/\/ use cache\n\t\t_, ok := d.chars[path]\n\t\tif !ok {\n\t\t\td.chars[path] = profile.NewGattCharacteristic1(string(path))\n\t\t}\n\n\t\tprops := d.chars[path].Properties\n\t\tcuuid := strings.ToUpper(props.UUID)\n\n\t\tif cuuid == uuid {\n\t\t\tdbgDevice(\"Found char %s\", uuid)\n\t\t\tdeviceFound = d.chars[path]\n\t\t}\n\t}\n\n\tif deviceFound == nil {\n\t\tdbgDevice(\"Characteristic not Found: %s \", uuid)\n\t}\n\n\treturn deviceFound, nil\n}\n\n\/\/GetCharsList return a device characteristics\nfunc (d *Device) GetCharsList() []dbus.ObjectPath {\n\n\tvar chars []dbus.ObjectPath\n\n\tif len(d.chars) != 0 {\n\n\t\tfor objpath := range d.chars {\n\t\t\tchars = append(chars, objpath)\n\t\t}\n\n\t\tdbgDevice(\"Cached %d chars\", len(chars))\n\t\treturn chars\n\t}\n\n\tdbgDevice(\"Scanning chars by ObjectPath\")\n\tlist := GetManager().GetObjects()\n\tfor objpath := range *list {\n\t\tpath := string(objpath)\n\t\tif !strings.HasPrefix(path, d.Path) {\n\t\t\tcontinue\n\t\t}\n\t\tcharPos := strings.Index(path, \"char\")\n\t\tif charPos == -1 {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Index(path[charPos:], \"desc\") != -1 {\n\t\t\tcontinue\n\t\t}\n\n\t\tchars = append(chars, objpath)\n\t}\n\n\tdbgDevice(\"Found %d chars\", len(chars))\n\treturn chars\n}\n\n\/\/IsConnected check if connected to the device\nfunc (d *Device) IsConnected() bool {\n\n\tprops, _ := d.GetProperties()\n\n\tif props == nil {\n\t\treturn false\n\t}\n\n\treturn props.Connected\n}\n\n\/\/Connect to device\nfunc (d *Device) Connect() error {\n\n\tc, err := d.GetClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = c.Connect()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/Disconnect from a device\nfunc (d *Device) Disconnect() error {\n\tc, err := d.GetClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.Disconnect()\n\treturn nil\n}\n\n\/\/Pair a device\nfunc (d *Device) Pair() error {\n\tc, err := d.GetClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.Pair()\n\treturn nil\n}\n<commit_msg>Fix nil map panic<commit_after>package api\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com\/godbus\/dbus\"\n\t\"github.com\/muka\/go-bluetooth\/bluez\"\n\t\"github.com\/muka\/go-bluetooth\/bluez\/profile\"\n\t\"github.com\/muka\/go-bluetooth\/emitter\"\n\t\"github.com\/muka\/go-bluetooth\/util\"\n\t\"github.com\/tj\/go-debug\"\n)\n\nvar dbgDevice = debug.Debug(\"bluez:api:Device\")\n\nvar deviceRegistry = make(map[string]*Device)\n\n\/\/ NewDevice creates a new Device\nfunc NewDevice(path string) *Device {\n\n\tif _, ok := deviceRegistry[path]; ok {\n\t\t\/\/ dbgDevice(\"Reusing cache instance %s\", path)\n\t\treturn deviceRegistry[path]\n\t}\n\n\td := new(Device)\n\td.Path = path\n\td.client = profile.NewDevice1(path)\n\n\td.client.GetProperties()\n\td.Properties = d.client.Properties\n\td.chars = make(map[dbus.ObjectPath]*profile.GattCharacteristic1, 0)\n\n\tdeviceRegistry[path] = d\n\n\t\/\/ d.watchProperties()\n\n\treturn d\n}\n\n\/\/ClearDevice free a device struct\nfunc ClearDevice(d *Device) {\n\n\td.Disconnect()\n\td.unwatchProperties()\n\tc, err := d.GetClient()\n\tif err == nil {\n\t\tc.Close()\n\t}\n\n\tif _, ok := deviceRegistry[d.Path]; ok {\n\t\tdelete(deviceRegistry, d.Path)\n\t}\n\n}\n\n\/\/ ParseDevice parse a Device from a ObjectManager map\nfunc ParseDevice(path dbus.ObjectPath, propsMap map[string]dbus.Variant) (*Device, error) {\n\n\td := new(Device)\n\td.Path = string(path)\n\td.client = profile.NewDevice1(d.Path)\n\n\tprops := new(profile.Device1Properties)\n\tutil.MapToStruct(props, propsMap)\n\tc, err := d.GetClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.Properties = props\n\td.chars = make(map[dbus.ObjectPath]*profile.GattCharacteristic1, 0)\n\n\treturn d, nil\n}\n\nfunc (d *Device) watchProperties() error {\n\n\tdbgDevice(\"watch-prop: watching properties\")\n\n\tchannel, err := d.client.Register()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo (func() {\n\t\tfor {\n\n\t\t\tif channel == nil {\n\t\t\t\tdbgDevice(\"watch-prop: nil channel, exit\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tdbgDevice(\"watch-prop: waiting updates\")\n\t\t\tsig := <-channel\n\n\t\t\tif sig == nil {\n\t\t\t\tdbgDevice(\"watch-prop: nil sig, exit\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tdbgDevice(\"watch-prop: signal name %s\", sig.Name)\n\t\t\tif sig.Name != bluez.PropertiesChanged {\n\t\t\t\tdbgDevice(\"Skipped %s vs %s\\n\", sig.Name, bluez.PropertiesInterface)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdbgDevice(\"Device property changed\")\n\t\t\tfor i := 0; i < len(sig.Body); i++ {\n\t\t\t\tdbgDevice(\"%s -> %s\", reflect.TypeOf(sig.Body[i]), sig.Body[i])\n\t\t\t}\n\n\t\t\tiface := sig.Body[0].(string)\n\t\t\tchanges := sig.Body[1].(map[string]dbus.Variant)\n\t\t\tfor field, val := range changes {\n\n\t\t\t\t\/\/ updates [*]Properties struct\n\t\t\t\tprops := d.Properties\n\n\t\t\t\ts := reflect.ValueOf(props).Elem()\n\t\t\t\t\/\/ exported field\n\t\t\t\tf := s.FieldByName(field)\n\t\t\t\tif f.IsValid() {\n\t\t\t\t\t\/\/ A Value can be changed only if it is\n\t\t\t\t\t\/\/ addressable and was not obtained by\n\t\t\t\t\t\/\/ the use of unexported struct fields.\n\t\t\t\t\tif f.CanSet() {\n\t\t\t\t\t\tx := reflect.ValueOf(val.Value())\n\t\t\t\t\t\tf.Set(x)\n\t\t\t\t\t\tdbgDevice(\"Set props value: %s = %s\\n\", field, x.Interface())\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tdbgDevice(\"Emit change for %s = %v\\n\", field, val.Value())\n\t\t\t\tpropChanged := PropertyChangedEvent{string(iface), field, val.Value(), props, d}\n\t\t\t\td.Emit(\"changed\", propChanged)\n\t\t\t}\n\t\t}\n\t})()\n\n\treturn nil\n}\n\n\/\/Device return an API to interact with a DBus device\ntype Device struct {\n\tPath string\n\tProperties *profile.Device1Properties\n\tclient *profile.Device1\n\tchars map[dbus.ObjectPath]*profile.GattCharacteristic1\n}\n\nfunc (d *Device) unwatchProperties() error {\n\treturn d.client.Unregister()\n}\n\n\/\/GetClient return a DBus Device1 interface client\nfunc (d *Device) GetClient() (*profile.Device1, error) {\n\tif d.client == nil {\n\t\treturn nil, errors.New(\"Client not available\")\n\t}\n\treturn d.client, nil\n}\n\n\/\/GetProperties return the properties for the device\nfunc (d *Device) GetProperties() (*profile.Device1Properties, error) {\n\n\tif d == nil {\n\t\treturn nil, errors.New(\"Empty device pointer\")\n\t}\n\n\tc, err := d.GetClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprops, err := c.GetProperties()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td.Properties = props\n\treturn d.Properties, err\n}\n\n\/\/GetProperty return a property value\nfunc (d *Device) GetProperty(name string) (data interface{}, err error) {\n\tc, err := d.GetClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tval, err := c.GetProperty(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn val.Value(), nil\n}\n\n\/\/On register callback for event\nfunc (d *Device) On(name string, fn *emitter.Callback) {\n\tswitch name {\n\tcase \"changed\":\n\t\td.watchProperties()\n\t\tbreak\n\t}\n\temitter.On(d.Path+\".\"+name, fn)\n}\n\n\/\/Off unregister callback for event\nfunc (d *Device) Off(name string, cb *emitter.Callback) {\n\tswitch name {\n\tcase \"changed\":\n\t\td.unwatchProperties()\n\t\tbreak\n\t}\n\n\tpattern := d.Path + \".\" + name\n\tif name != \"*\" {\n\t\temitter.Off(pattern, cb)\n\t} else {\n\t\temitter.RemoveListeners(pattern, nil)\n\t}\n}\n\n\/\/Emit an event\nfunc (d *Device) Emit(name string, data interface{}) {\n\temitter.Emit(d.Path+\".\"+name, data)\n}\n\n\/\/GetService return a GattService\nfunc (d *Device) GetService(path string) *profile.GattService1 {\n\treturn profile.NewGattService1(path, \"org.bluez\")\n}\n\n\/\/GetChar return a GattService\nfunc (d *Device) GetChar(path string) *profile.GattCharacteristic1 {\n\treturn profile.NewGattCharacteristic1(path)\n}\n\n\/\/GetCharByUUID return a GattService by its uuid, return nil if not found\nfunc (d *Device) GetCharByUUID(uuid string) (*profile.GattCharacteristic1, error) {\n\n\tuuid = strings.ToUpper(uuid)\n\n\tlist := d.GetCharsList()\n\n\tdbgDevice(\"Find by uuid, char list %d, cached list %d\", len(list), len(d.chars))\n\n\tvar deviceFound *profile.GattCharacteristic1\n\n\tfor _, path := range list {\n\n\t\t\/\/ use cache\n\t\t_, ok := d.chars[path]\n\t\tif !ok {\n\t\t\td.chars[path] = profile.NewGattCharacteristic1(string(path))\n\t\t}\n\n\t\tprops := d.chars[path].Properties\n\t\tcuuid := strings.ToUpper(props.UUID)\n\n\t\tif cuuid == uuid {\n\t\t\tdbgDevice(\"Found char %s\", uuid)\n\t\t\tdeviceFound = d.chars[path]\n\t\t}\n\t}\n\n\tif deviceFound == nil {\n\t\tdbgDevice(\"Characteristic not Found: %s \", uuid)\n\t}\n\n\treturn deviceFound, nil\n}\n\n\/\/GetCharsList return a device characteristics\nfunc (d *Device) GetCharsList() []dbus.ObjectPath {\n\n\tvar chars []dbus.ObjectPath\n\n\tif len(d.chars) != 0 {\n\n\t\tfor objpath := range d.chars {\n\t\t\tchars = append(chars, objpath)\n\t\t}\n\n\t\tdbgDevice(\"Cached %d chars\", len(chars))\n\t\treturn chars\n\t}\n\n\tdbgDevice(\"Scanning chars by ObjectPath\")\n\tlist := GetManager().GetObjects()\n\tfor objpath := range *list {\n\t\tpath := string(objpath)\n\t\tif !strings.HasPrefix(path, d.Path) {\n\t\t\tcontinue\n\t\t}\n\t\tcharPos := strings.Index(path, \"char\")\n\t\tif charPos == -1 {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Index(path[charPos:], \"desc\") != -1 {\n\t\t\tcontinue\n\t\t}\n\n\t\tchars = append(chars, objpath)\n\t}\n\n\tdbgDevice(\"Found %d chars\", len(chars))\n\treturn chars\n}\n\n\/\/IsConnected check if connected to the device\nfunc (d *Device) IsConnected() bool {\n\n\tprops, _ := d.GetProperties()\n\n\tif props == nil {\n\t\treturn false\n\t}\n\n\treturn props.Connected\n}\n\n\/\/Connect to device\nfunc (d *Device) Connect() error {\n\n\tc, err := d.GetClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = c.Connect()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/Disconnect from a device\nfunc (d *Device) Disconnect() error {\n\tc, err := d.GetClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.Disconnect()\n\treturn nil\n}\n\n\/\/Pair a device\nfunc (d *Device) Pair() error {\n\tc, err := d.GetClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.Pair()\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Keybase Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD\n\/\/ license that can be found in the LICENSE file.\n\npackage libkbfs\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\n\tlru \"github.com\/hashicorp\/golang-lru\"\n\t\"github.com\/keybase\/kbfs\/kbfsblock\"\n\t\"github.com\/keybase\/kbfs\/kbfshash\"\n\t\"github.com\/keybase\/kbfs\/tlf\"\n)\n\ntype idCacheKey struct {\n\ttlf tlf.ID\n\tplaintextHash kbfshash.RawDefaultHash\n}\n\n\/\/ BlockCacheStandard implements the BlockCache interface by storing\n\/\/ blocks in an in-memory LRU cache. Clean blocks are identified\n\/\/ internally by just their block ID (since blocks are immutable and\n\/\/ content-addressable).\ntype BlockCacheStandard struct {\n\tcleanBytesCapacity uint64\n\n\tids *lru.Cache\n\n\tcleanTransient *lru.Cache\n\n\tcleanLock sync.RWMutex\n\tcleanPermanent map[kbfsblock.ID]Block\n\n\tbytesLock sync.Mutex\n\tcleanTotalBytes uint64\n}\n\n\/\/ NewBlockCacheStandard constructs a new BlockCacheStandard instance\n\/\/ with the given transient capacity (in number of entries) and the\n\/\/ clean bytes capacity, which is the total of number of bytes allowed\n\/\/ between the transient and permanent clean caches. If putting a\n\/\/ block will exceed this bytes capacity, transient entries are\n\/\/ evicted until the block will fit in capacity.\nfunc NewBlockCacheStandard(transientCapacity int,\n\tcleanBytesCapacity uint64) *BlockCacheStandard {\n\tb := &BlockCacheStandard{\n\t\tcleanBytesCapacity: cleanBytesCapacity,\n\t\tcleanPermanent: make(map[kbfsblock.ID]Block),\n\t}\n\n\tif transientCapacity > 0 {\n\t\tvar err error\n\t\t\/\/ TODO: Plumb error up.\n\t\tb.ids, err = lru.New(transientCapacity)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tb.cleanTransient, err = lru.NewWithEvict(transientCapacity, b.onEvict)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn b\n}\n\n\/\/ GetWithPrefetch implements the BlockCache interface for BlockCacheStandard.\nfunc (b *BlockCacheStandard) GetWithPrefetch(ptr BlockPointer) (Block, bool, error) {\n\tif b.cleanTransient != nil {\n\t\tif tmp, ok := b.cleanTransient.Get(ptr.ID); ok {\n\t\t\tblock, ok := tmp.(Block)\n\t\t\tif !ok {\n\t\t\t\treturn nil, false, BadDataError{ptr.ID}\n\t\t\t}\n\t\t\treturn block, true, nil\n\t\t}\n\t}\n\n\tblock := func() Block {\n\t\tb.cleanLock.RLock()\n\t\tdefer b.cleanLock.RUnlock()\n\t\treturn b.cleanPermanent[ptr.ID]\n\t}()\n\tif block != nil {\n\t\treturn block, true, nil\n\t}\n\n\treturn nil, false, NoSuchBlockError{ptr.ID}\n}\n\n\/\/ Get implements the BlockCache interface for BlockCacheStandard.\nfunc (b *BlockCacheStandard) Get(ptr BlockPointer) (Block, error) {\n\tblock, _, err := b.GetWithPrefetch(ptr)\n\treturn block, err\n}\n\nfunc getCachedBlockSize(block Block) uint32 {\n\t\/\/ Get the size of the block. For direct file blocks, use the\n\t\/\/ length of the plaintext contents. For everything else, just\n\t\/\/ approximate the plaintext size using the encoding size.\n\tswitch b := block.(type) {\n\tcase *FileBlock:\n\t\tif b.IsInd {\n\t\t\treturn b.GetEncodedSize()\n\t\t}\n\t\treturn uint32(len(b.Contents))\n\tdefault:\n\t\treturn block.GetEncodedSize()\n\t}\n}\n\nfunc (b *BlockCacheStandard) onEvict(key interface{}, value interface{}) {\n\tblock, ok := value.(Block)\n\tif !ok {\n\t\treturn\n\t}\n\n\tb.bytesLock.Lock()\n\tdefer b.bytesLock.Unlock()\n\tb.cleanTotalBytes -= uint64(getCachedBlockSize(block))\n}\n\n\/\/ CheckForKnownPtr implements the BlockCache interface for BlockCacheStandard.\nfunc (b *BlockCacheStandard) CheckForKnownPtr(tlf tlf.ID, block *FileBlock) (\n\tBlockPointer, error) {\n\tif block.IsInd {\n\t\treturn BlockPointer{}, NotDirectFileBlockError{}\n\t}\n\n\tif b.ids == nil {\n\t\treturn BlockPointer{}, nil\n\t}\n\n\tkey := idCacheKey{tlf, block.UpdateHash()}\n\ttmp, ok := b.ids.Get(key)\n\tif !ok {\n\t\treturn BlockPointer{}, nil\n\t}\n\n\tptr, ok := tmp.(BlockPointer)\n\tif !ok {\n\t\treturn BlockPointer{}, fmt.Errorf(\"Unexpected cached id: %v\", tmp)\n\t}\n\treturn ptr, nil\n}\n\n\/\/ SetCleanBytesCapacity implements the BlockCache interface for\n\/\/ BlockCacheStandard.\nfunc (b *BlockCacheStandard) SetCleanBytesCapacity(capacity uint64) {\n\tatomic.StoreUint64(&b.cleanBytesCapacity, capacity)\n}\n\n\/\/ GetCleanBytesCapacity implements the BlockCache interface for\n\/\/ BlockCacheStandard.\nfunc (b *BlockCacheStandard) GetCleanBytesCapacity() (capacity uint64) {\n\treturn atomic.LoadUint64(&b.cleanBytesCapacity)\n}\n\nfunc (b *BlockCacheStandard) makeRoomForSize(size uint64) bool {\n\tif b.cleanTransient == nil {\n\t\treturn false\n\t}\n\n\toldLen := b.cleanTransient.Len() + 1\n\tdoUnlock := true\n\tb.bytesLock.Lock()\n\tdefer func() {\n\t\tif doUnlock {\n\t\t\tb.bytesLock.Unlock()\n\t\t}\n\t}()\n\n\tcleanBytesCapacity := b.GetCleanBytesCapacity()\n\n\t\/\/ Evict items from the cache until the bytes capacity is lower\n\t\/\/ than the total capacity (or until no items are removed).\n\tfor b.cleanTotalBytes+size > cleanBytesCapacity {\n\t\t\/\/ Unlock while removing, since onEvict needs the lock and\n\t\t\/\/ cleanTransient.Len() takes the LRU mutex (which could lead\n\t\t\/\/ to a deadlock with onEvict). TODO: either change\n\t\t\/\/ `cleanTransient` into an `lru.SimpleLRU` and protect it\n\t\t\/\/ with our own lock, or build our own LRU that can evict\n\t\t\/\/ based on total bytes. See #250 and KBFS-1404 for a longer\n\t\t\/\/ discussion.\n\t\tb.bytesLock.Unlock()\n\t\tdoUnlock = false\n\t\tif oldLen == b.cleanTransient.Len() {\n\t\t\tbreak\n\t\t}\n\t\toldLen = b.cleanTransient.Len()\n\t\tb.cleanTransient.RemoveOldest()\n\t\tdoUnlock = true\n\t\tb.bytesLock.Lock()\n\t}\n\tif b.cleanTotalBytes+size > cleanBytesCapacity {\n\t\t\/\/ There must be too many permanent clean blocks, so we\n\t\t\/\/ couldn't make room.\n\t\treturn false\n\t}\n\t\/\/ Only count clean bytes if we actually have a transient cache.\n\tb.cleanTotalBytes += size\n\treturn true\n}\n\n\/\/ PutWithPrefetch implements the BlockCache interface for BlockCacheStandard.\nfunc (b *BlockCacheStandard) PutWithPrefetch(\n\tptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime,\n\thasPrefetched bool) error {\n\n\tmadeRoom := false\n\n\tswitch lifetime {\n\tcase NoCacheEntry:\n\t\treturn nil\n\n\tcase TransientEntry:\n\t\t\/\/ If it's the right type of block, store the hash -> ID mapping.\n\t\tif fBlock, isFileBlock := block.(*FileBlock); b.ids != nil && isFileBlock && !fBlock.IsInd {\n\n\t\t\tkey := idCacheKey{tlf, fBlock.GetHash()}\n\t\t\t\/\/ zero out the refnonce, it doesn't matter\n\t\t\tptr.RefNonce = kbfsblock.ZeroRefNonce\n\t\t\tb.ids.Add(key, ptr)\n\t\t}\n\t\tif b.cleanTransient == nil {\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ Cache it later, once we know there's room\n\t\tdefer func() {\n\t\t\tif madeRoom {\n\t\t\t\tb.cleanTransient.Add(ptr.ID, block)\n\t\t\t}\n\t\t}()\n\n\tcase PermanentEntry:\n\t\tfunc() {\n\t\t\tb.cleanLock.Lock()\n\t\t\tdefer b.cleanLock.Unlock()\n\t\t\tb.cleanPermanent[ptr.ID] = block\n\t\t}()\n\n\tdefault:\n\t\treturn fmt.Errorf(\"Unknown lifetime %v\", lifetime)\n\t}\n\n\t\/\/ We must make room whether the cache is transient or permanent\n\tsize := uint64(getCachedBlockSize(block))\n\tmadeRoom = b.makeRoomForSize(size)\n\n\treturn nil\n}\n\n\/\/ Put implements the BlockCache interface for BlockCacheStandard.\nfunc (b *BlockCacheStandard) Put(\n\tptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime) error {\n\t\/\/ Default should be to assume that a prefetch has happened, and thus it\n\t\/\/ won't trigger prefetches in the future.\n\treturn b.PutWithPrefetch(ptr, tlf, block, lifetime, true)\n}\n\n\/\/ DeletePermanent implements the BlockCache interface for\n\/\/ BlockCacheStandard.\nfunc (b *BlockCacheStandard) DeletePermanent(id kbfsblock.ID) error {\n\tb.cleanLock.Lock()\n\tdefer b.cleanLock.Unlock()\n\tblock, ok := b.cleanPermanent[id]\n\tif ok {\n\t\tdelete(b.cleanPermanent, id)\n\t\tb.bytesLock.Lock()\n\t\tdefer b.bytesLock.Unlock()\n\t\tb.cleanTotalBytes -= uint64(getCachedBlockSize(block))\n\t}\n\treturn nil\n}\n\n\/\/ DeleteTransient implements the BlockCache interface for BlockCacheStandard.\nfunc (b *BlockCacheStandard) DeleteTransient(\n\tptr BlockPointer, tlf tlf.ID) error {\n\tif b.cleanTransient == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ If the block is cached and a file block, delete the known\n\t\/\/ pointer as well.\n\tif tmp, ok := b.cleanTransient.Get(ptr.ID); ok {\n\t\tblock, ok := tmp.(Block)\n\t\tif !ok {\n\t\t\treturn BadDataError{ptr.ID}\n\t\t}\n\n\t\t\/\/ Remove the key if it exists\n\t\tif fBlock, ok := block.(*FileBlock); b.ids != nil && ok &&\n\t\t\t!fBlock.IsInd {\n\t\t\t_, hash := kbfshash.DoRawDefaultHash(fBlock.Contents)\n\t\t\tkey := idCacheKey{tlf, hash}\n\t\t\tb.ids.Remove(key)\n\t\t}\n\n\t\tb.cleanTransient.Remove(ptr.ID)\n\t}\n\treturn nil\n}\n\n\/\/ DeleteKnownPtr implements the BlockCache interface for BlockCacheStandard.\nfunc (b *BlockCacheStandard) DeleteKnownPtr(tlf tlf.ID, block *FileBlock) error {\n\tif block.IsInd {\n\t\treturn NotDirectFileBlockError{}\n\t}\n\n\tif b.ids == nil {\n\t\treturn nil\n\t}\n\n\t_, hash := kbfshash.DoRawDefaultHash(block.Contents)\n\tkey := idCacheKey{tlf, hash}\n\tb.ids.Remove(key)\n\treturn nil\n}\n<commit_msg>bcache: add hasPrefetched boolean to cache<commit_after>\/\/ Copyright 2016 Keybase Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD\n\/\/ license that can be found in the LICENSE file.\n\npackage libkbfs\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\n\tlru \"github.com\/hashicorp\/golang-lru\"\n\t\"github.com\/keybase\/kbfs\/kbfsblock\"\n\t\"github.com\/keybase\/kbfs\/kbfshash\"\n\t\"github.com\/keybase\/kbfs\/tlf\"\n)\n\ntype blockContainer struct {\n\tblock Block\n\thasPrefetched bool\n}\n\ntype idCacheKey struct {\n\ttlf tlf.ID\n\tplaintextHash kbfshash.RawDefaultHash\n}\n\n\/\/ BlockCacheStandard implements the BlockCache interface by storing\n\/\/ blocks in an in-memory LRU cache. Clean blocks are identified\n\/\/ internally by just their block ID (since blocks are immutable and\n\/\/ content-addressable).\ntype BlockCacheStandard struct {\n\tcleanBytesCapacity uint64\n\n\tids *lru.Cache\n\n\tcleanTransient *lru.Cache\n\n\tcleanLock sync.RWMutex\n\tcleanPermanent map[kbfsblock.ID]Block\n\n\tbytesLock sync.Mutex\n\tcleanTotalBytes uint64\n}\n\n\/\/ NewBlockCacheStandard constructs a new BlockCacheStandard instance\n\/\/ with the given transient capacity (in number of entries) and the\n\/\/ clean bytes capacity, which is the total of number of bytes allowed\n\/\/ between the transient and permanent clean caches. If putting a\n\/\/ block will exceed this bytes capacity, transient entries are\n\/\/ evicted until the block will fit in capacity.\nfunc NewBlockCacheStandard(transientCapacity int,\n\tcleanBytesCapacity uint64) *BlockCacheStandard {\n\tb := &BlockCacheStandard{\n\t\tcleanBytesCapacity: cleanBytesCapacity,\n\t\tcleanPermanent: make(map[kbfsblock.ID]Block),\n\t}\n\n\tif transientCapacity > 0 {\n\t\tvar err error\n\t\t\/\/ TODO: Plumb error up.\n\t\tb.ids, err = lru.New(transientCapacity)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tb.cleanTransient, err = lru.NewWithEvict(transientCapacity, b.onEvict)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn b\n}\n\n\/\/ GetWithPrefetch implements the BlockCache interface for BlockCacheStandard.\nfunc (b *BlockCacheStandard) GetWithPrefetch(ptr BlockPointer) (Block, bool, error) {\n\tif b.cleanTransient != nil {\n\t\tif tmp, ok := b.cleanTransient.Get(ptr.ID); ok {\n\t\t\tbc, ok := tmp.(blockContainer)\n\t\t\tif !ok {\n\t\t\t\treturn nil, false, BadDataError{ptr.ID}\n\t\t\t}\n\t\t\treturn bc.block, bc.hasPrefetched, nil\n\t\t}\n\t}\n\n\tblock := func() Block {\n\t\tb.cleanLock.RLock()\n\t\tdefer b.cleanLock.RUnlock()\n\t\treturn b.cleanPermanent[ptr.ID]\n\t}()\n\tif block != nil {\n\t\treturn block, true, nil\n\t}\n\n\treturn nil, false, NoSuchBlockError{ptr.ID}\n}\n\n\/\/ Get implements the BlockCache interface for BlockCacheStandard.\nfunc (b *BlockCacheStandard) Get(ptr BlockPointer) (Block, error) {\n\tblock, _, err := b.GetWithPrefetch(ptr)\n\treturn block, err\n}\n\nfunc getCachedBlockSize(block Block) uint32 {\n\t\/\/ Get the size of the block. For direct file blocks, use the\n\t\/\/ length of the plaintext contents. For everything else, just\n\t\/\/ approximate the plaintext size using the encoding size.\n\tswitch b := block.(type) {\n\tcase *FileBlock:\n\t\tif b.IsInd {\n\t\t\treturn b.GetEncodedSize()\n\t\t}\n\t\treturn uint32(len(b.Contents))\n\tdefault:\n\t\treturn block.GetEncodedSize()\n\t}\n}\n\nfunc (b *BlockCacheStandard) onEvict(key interface{}, value interface{}) {\n\tbc, ok := value.(blockContainer)\n\tif !ok {\n\t\treturn\n\t}\n\tblock := bc.block\n\n\tb.bytesLock.Lock()\n\tdefer b.bytesLock.Unlock()\n\tb.cleanTotalBytes -= uint64(getCachedBlockSize(block))\n}\n\n\/\/ CheckForKnownPtr implements the BlockCache interface for BlockCacheStandard.\nfunc (b *BlockCacheStandard) CheckForKnownPtr(tlf tlf.ID, block *FileBlock) (\n\tBlockPointer, error) {\n\tif block.IsInd {\n\t\treturn BlockPointer{}, NotDirectFileBlockError{}\n\t}\n\n\tif b.ids == nil {\n\t\treturn BlockPointer{}, nil\n\t}\n\n\tkey := idCacheKey{tlf, block.UpdateHash()}\n\ttmp, ok := b.ids.Get(key)\n\tif !ok {\n\t\treturn BlockPointer{}, nil\n\t}\n\n\tptr, ok := tmp.(BlockPointer)\n\tif !ok {\n\t\treturn BlockPointer{}, fmt.Errorf(\"Unexpected cached id: %v\", tmp)\n\t}\n\treturn ptr, nil\n}\n\n\/\/ SetCleanBytesCapacity implements the BlockCache interface for\n\/\/ BlockCacheStandard.\nfunc (b *BlockCacheStandard) SetCleanBytesCapacity(capacity uint64) {\n\tatomic.StoreUint64(&b.cleanBytesCapacity, capacity)\n}\n\n\/\/ GetCleanBytesCapacity implements the BlockCache interface for\n\/\/ BlockCacheStandard.\nfunc (b *BlockCacheStandard) GetCleanBytesCapacity() (capacity uint64) {\n\treturn atomic.LoadUint64(&b.cleanBytesCapacity)\n}\n\nfunc (b *BlockCacheStandard) makeRoomForSize(size uint64) bool {\n\tif b.cleanTransient == nil {\n\t\treturn false\n\t}\n\n\toldLen := b.cleanTransient.Len() + 1\n\tdoUnlock := true\n\tb.bytesLock.Lock()\n\tdefer func() {\n\t\tif doUnlock {\n\t\t\tb.bytesLock.Unlock()\n\t\t}\n\t}()\n\n\tcleanBytesCapacity := b.GetCleanBytesCapacity()\n\n\t\/\/ Evict items from the cache until the bytes capacity is lower\n\t\/\/ than the total capacity (or until no items are removed).\n\tfor b.cleanTotalBytes+size > cleanBytesCapacity {\n\t\t\/\/ Unlock while removing, since onEvict needs the lock and\n\t\t\/\/ cleanTransient.Len() takes the LRU mutex (which could lead\n\t\t\/\/ to a deadlock with onEvict). TODO: either change\n\t\t\/\/ `cleanTransient` into an `lru.SimpleLRU` and protect it\n\t\t\/\/ with our own lock, or build our own LRU that can evict\n\t\t\/\/ based on total bytes. See #250 and KBFS-1404 for a longer\n\t\t\/\/ discussion.\n\t\tb.bytesLock.Unlock()\n\t\tdoUnlock = false\n\t\tif oldLen == b.cleanTransient.Len() {\n\t\t\tbreak\n\t\t}\n\t\toldLen = b.cleanTransient.Len()\n\t\tb.cleanTransient.RemoveOldest()\n\t\tdoUnlock = true\n\t\tb.bytesLock.Lock()\n\t}\n\tif b.cleanTotalBytes+size > cleanBytesCapacity {\n\t\t\/\/ There must be too many permanent clean blocks, so we\n\t\t\/\/ couldn't make room.\n\t\treturn false\n\t}\n\t\/\/ Only count clean bytes if we actually have a transient cache.\n\tb.cleanTotalBytes += size\n\treturn true\n}\n\n\/\/ PutWithPrefetch implements the BlockCache interface for BlockCacheStandard.\nfunc (b *BlockCacheStandard) PutWithPrefetch(\n\tptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime,\n\thasPrefetched bool) error {\n\n\tmadeRoom := false\n\n\tswitch lifetime {\n\tcase NoCacheEntry:\n\t\treturn nil\n\n\tcase TransientEntry:\n\t\t\/\/ If it's the right type of block, store the hash -> ID mapping.\n\t\tif fBlock, isFileBlock := block.(*FileBlock); b.ids != nil && isFileBlock && !fBlock.IsInd {\n\n\t\t\tkey := idCacheKey{tlf, fBlock.GetHash()}\n\t\t\t\/\/ zero out the refnonce, it doesn't matter\n\t\t\tptr.RefNonce = kbfsblock.ZeroRefNonce\n\t\t\tb.ids.Add(key, ptr)\n\t\t}\n\t\tif b.cleanTransient == nil {\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ Cache it later, once we know there's room\n\t\tdefer func() {\n\t\t\tif madeRoom {\n\t\t\t\tb.cleanTransient.Add(ptr.ID, blockContainer{block, hasPrefetched})\n\t\t\t}\n\t\t}()\n\n\tcase PermanentEntry:\n\t\tfunc() {\n\t\t\tb.cleanLock.Lock()\n\t\t\tdefer b.cleanLock.Unlock()\n\t\t\tb.cleanPermanent[ptr.ID] = block\n\t\t}()\n\n\tdefault:\n\t\treturn fmt.Errorf(\"Unknown lifetime %v\", lifetime)\n\t}\n\n\t\/\/ We must make room whether the cache is transient or permanent\n\tsize := uint64(getCachedBlockSize(block))\n\tmadeRoom = b.makeRoomForSize(size)\n\n\treturn nil\n}\n\n\/\/ Put implements the BlockCache interface for BlockCacheStandard.\nfunc (b *BlockCacheStandard) Put(\n\tptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime) error {\n\t\/\/ Default should be to assume that a prefetch has happened, and thus it\n\t\/\/ won't trigger prefetches in the future.\n\treturn b.PutWithPrefetch(ptr, tlf, block, lifetime, true)\n}\n\n\/\/ DeletePermanent implements the BlockCache interface for\n\/\/ BlockCacheStandard.\nfunc (b *BlockCacheStandard) DeletePermanent(id kbfsblock.ID) error {\n\tb.cleanLock.Lock()\n\tdefer b.cleanLock.Unlock()\n\tblock, ok := b.cleanPermanent[id]\n\tif ok {\n\t\tdelete(b.cleanPermanent, id)\n\t\tb.bytesLock.Lock()\n\t\tdefer b.bytesLock.Unlock()\n\t\tb.cleanTotalBytes -= uint64(getCachedBlockSize(block))\n\t}\n\treturn nil\n}\n\n\/\/ DeleteTransient implements the BlockCache interface for BlockCacheStandard.\nfunc (b *BlockCacheStandard) DeleteTransient(\n\tptr BlockPointer, tlf tlf.ID) error {\n\tif b.cleanTransient == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ If the block is cached and a file block, delete the known\n\t\/\/ pointer as well.\n\tif tmp, ok := b.cleanTransient.Get(ptr.ID); ok {\n\t\tbc, ok := tmp.(blockContainer)\n\t\tif !ok {\n\t\t\treturn BadDataError{ptr.ID}\n\t\t}\n\t\tblock := bc.block\n\n\t\t\/\/ Remove the key if it exists\n\t\tif fBlock, ok := block.(*FileBlock); b.ids != nil && ok &&\n\t\t\t!fBlock.IsInd {\n\t\t\t_, hash := kbfshash.DoRawDefaultHash(fBlock.Contents)\n\t\t\tkey := idCacheKey{tlf, hash}\n\t\t\tb.ids.Remove(key)\n\t\t}\n\n\t\tb.cleanTransient.Remove(ptr.ID)\n\t}\n\treturn nil\n}\n\n\/\/ DeleteKnownPtr implements the BlockCache interface for BlockCacheStandard.\nfunc (b *BlockCacheStandard) DeleteKnownPtr(tlf tlf.ID, block *FileBlock) error {\n\tif block.IsInd {\n\t\treturn NotDirectFileBlockError{}\n\t}\n\n\tif b.ids == nil {\n\t\treturn nil\n\t}\n\n\t_, hash := kbfshash.DoRawDefaultHash(block.Contents)\n\tkey := idCacheKey{tlf, hash}\n\tb.ids.Remove(key)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/docker\/swarm\/cluster\"\n)\n\n\/\/ EventsHandler broadcasts events to multiple client listeners.\ntype eventsHandler struct {\n\tsync.RWMutex\n\tws map[string]io.Writer\n\tcs map[string]chan struct{}\n}\n\n\/\/ NewEventsHandler creates a new EventsHandler for a cluster.\n\/\/ The new eventsHandler is initialized with no writers or channels.\nfunc newEventsHandler() *eventsHandler {\n\treturn &eventsHandler{\n\t\tws: make(map[string]io.Writer),\n\t\tcs: make(map[string]chan struct{}),\n\t}\n}\n\n\/\/ Add adds the writer and a new channel for the remote address.\nfunc (eh *eventsHandler) Add(remoteAddr string, w io.Writer) {\n\teh.Lock()\n\teh.ws[remoteAddr] = w\n\teh.cs[remoteAddr] = make(chan struct{})\n\teh.Unlock()\n}\n\n\/\/ Wait waits on a signal from the remote address.\nfunc (eh *eventsHandler) Wait(remoteAddr string, until int64) {\n\n\ttimer := time.NewTimer(0)\n\ttimer.Stop()\n\tif until > 0 {\n\t\tdur := time.Unix(until, 0).Sub(time.Now())\n\t\ttimer = time.NewTimer(dur)\n\t}\n\n\t\/\/ subscribe to http client close event\n\tw := eh.ws[remoteAddr]\n\tvar closeNotify <-chan bool\n\tif closeNotifier, ok := w.(http.CloseNotifier); ok {\n\t\tcloseNotify = closeNotifier.CloseNotify()\n\t}\n\n\tselect {\n\tcase <-eh.cs[remoteAddr]:\n\tcase <-closeNotify:\n\tcase <-timer.C: \/\/ `--until` timeout\n\t\tclose(eh.cs[remoteAddr])\n\t}\n\teh.cleanupHandler(remoteAddr)\n}\n\nfunc (eh *eventsHandler) cleanupHandler(remoteAddr string) {\n\teh.Lock()\n\t\/\/ the maps are expected to have the same keys\n\tdelete(eh.cs, remoteAddr)\n\tdelete(eh.ws, remoteAddr)\n\teh.Unlock()\n\n}\n\n\/\/ Handle writes information about a cluster event to each remote address in the cluster that has been added to the events handler.\n\/\/ After an unsuccessful write to a remote address, the associated channel is closed and the address is removed from the events handler.\nfunc (eh *eventsHandler) Handle(e *cluster.Event) error {\n\teh.RLock()\n\n\t\/\/ remove this hack once 1.10 is broadly adopted\n\tfrom := e.From\n\te.From = e.From + \" node:\" + e.Engine.Name\n\n\t\/\/ Attributes will be nil if the event was sent by engine < 1.10\n\tif e.Actor.Attributes == nil {\n\t\te.Actor.Attributes = make(map[string]string)\n\t}\n\te.Actor.Attributes[\"node.name\"] = e.Engine.Name\n\te.Actor.Attributes[\"node.id\"] = e.Engine.ID\n\te.Actor.Attributes[\"node.addr\"] = e.Engine.Addr\n\te.Actor.Attributes[\"node.ip\"] = e.Engine.IP\n\n\tdata, err := json.Marshal(e)\n\te.From = from\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ remove the node field once 1.10 is broadly adopted & interlock stop relying on it\n\tnode := fmt.Sprintf(\",%q:{%q:%q,%q:%q,%q:%q,%q:%q}}\",\n\t\t\"node\",\n\t\t\"Name\", e.Engine.Name,\n\t\t\"Id\", e.Engine.ID,\n\t\t\"Addr\", e.Engine.Addr,\n\t\t\"Ip\", e.Engine.IP,\n\t)\n\n\t\/\/ insert Node field\n\tdata = data[:len(data)-1]\n\tdata = append(data, []byte(node)...)\n\n\tvar failed []string\n\n\tfor key, w := range eh.ws {\n\t\tif _, err := fmt.Fprintf(w, string(data)); err != nil {\n\t\t\t\/\/ collect them to handle later under Lock\n\t\t\tfailed = append(failed, key)\n\t\t\tcontinue\n\t\t}\n\n\t\tif f, ok := w.(http.Flusher); ok {\n\t\t\tf.Flush()\n\t\t}\n\t}\n\teh.RUnlock()\n\teh.Lock()\n\tif len(failed) > 0 {\n\t\tfor _, key := range failed {\n\t\t\tif ch, ok := eh.cs[key]; ok {\n\t\t\t\tclose(ch)\n\t\t\t}\n\t\t\tdelete(eh.cs, key)\n\t\t}\n\t}\n\n\teh.Unlock()\n\n\treturn nil\n}\n\n\/\/ Size returns the number of remote addresses that the events handler currently contains.\nfunc (eh *eventsHandler) Size() int {\n\teh.RLock()\n\tdefer eh.RUnlock()\n\treturn len(eh.ws)\n}\n<commit_msg>add read lock for eventsHandler when only it is necessary.<commit_after>package api\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/docker\/swarm\/cluster\"\n)\n\n\/\/ EventsHandler broadcasts events to multiple client listeners.\ntype eventsHandler struct {\n\tsync.RWMutex\n\tws map[string]io.Writer\n\tcs map[string]chan struct{}\n}\n\n\/\/ NewEventsHandler creates a new EventsHandler for a cluster.\n\/\/ The new eventsHandler is initialized with no writers or channels.\nfunc newEventsHandler() *eventsHandler {\n\treturn &eventsHandler{\n\t\tws: make(map[string]io.Writer),\n\t\tcs: make(map[string]chan struct{}),\n\t}\n}\n\n\/\/ Add adds the writer and a new channel for the remote address.\nfunc (eh *eventsHandler) Add(remoteAddr string, w io.Writer) {\n\teh.Lock()\n\teh.ws[remoteAddr] = w\n\teh.cs[remoteAddr] = make(chan struct{})\n\teh.Unlock()\n}\n\n\/\/ Wait waits on a signal from the remote address.\nfunc (eh *eventsHandler) Wait(remoteAddr string, until int64) {\n\n\ttimer := time.NewTimer(0)\n\ttimer.Stop()\n\tif until > 0 {\n\t\tdur := time.Unix(until, 0).Sub(time.Now())\n\t\ttimer = time.NewTimer(dur)\n\t}\n\n\t\/\/ subscribe to http client close event\n\tw := eh.ws[remoteAddr]\n\tvar closeNotify <-chan bool\n\tif closeNotifier, ok := w.(http.CloseNotifier); ok {\n\t\tcloseNotify = closeNotifier.CloseNotify()\n\t}\n\n\tselect {\n\tcase <-eh.cs[remoteAddr]:\n\tcase <-closeNotify:\n\tcase <-timer.C: \/\/ `--until` timeout\n\t\tclose(eh.cs[remoteAddr])\n\t}\n\teh.cleanupHandler(remoteAddr)\n}\n\nfunc (eh *eventsHandler) cleanupHandler(remoteAddr string) {\n\teh.Lock()\n\t\/\/ the maps are expected to have the same keys\n\tdelete(eh.cs, remoteAddr)\n\tdelete(eh.ws, remoteAddr)\n\teh.Unlock()\n\n}\n\n\/\/ Handle writes information about a cluster event to each remote address in the cluster that has been added to the events handler.\n\/\/ After an unsuccessful write to a remote address, the associated channel is closed and the address is removed from the events handler.\nfunc (eh *eventsHandler) Handle(e *cluster.Event) error {\n\t\/\/ remove this hack once 1.10 is broadly adopted\n\tfrom := e.From\n\te.From = e.From + \" node:\" + e.Engine.Name\n\n\t\/\/ Attributes will be nil if the event was sent by engine < 1.10\n\tif e.Actor.Attributes == nil {\n\t\te.Actor.Attributes = make(map[string]string)\n\t}\n\te.Actor.Attributes[\"node.name\"] = e.Engine.Name\n\te.Actor.Attributes[\"node.id\"] = e.Engine.ID\n\te.Actor.Attributes[\"node.addr\"] = e.Engine.Addr\n\te.Actor.Attributes[\"node.ip\"] = e.Engine.IP\n\n\tdata, err := json.Marshal(e)\n\te.From = from\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ remove the node field once 1.10 is broadly adopted & interlock stop relying on it\n\tnode := fmt.Sprintf(\",%q:{%q:%q,%q:%q,%q:%q,%q:%q}}\",\n\t\t\"node\",\n\t\t\"Name\", e.Engine.Name,\n\t\t\"Id\", e.Engine.ID,\n\t\t\"Addr\", e.Engine.Addr,\n\t\t\"Ip\", e.Engine.IP,\n\t)\n\n\t\/\/ insert Node field\n\tdata = data[:len(data)-1]\n\tdata = append(data, []byte(node)...)\n\n\tvar failed []string\n\n\teh.RLock()\n\n\tfor key, w := range eh.ws {\n\t\tif _, err := fmt.Fprintf(w, string(data)); err != nil {\n\t\t\t\/\/ collect them to handle later under Lock\n\t\t\tfailed = append(failed, key)\n\t\t\tcontinue\n\t\t}\n\n\t\tif f, ok := w.(http.Flusher); ok {\n\t\t\tf.Flush()\n\t\t}\n\t}\n\teh.RUnlock()\n\teh.Lock()\n\tif len(failed) > 0 {\n\t\tfor _, key := range failed {\n\t\t\tif ch, ok := eh.cs[key]; ok {\n\t\t\t\tclose(ch)\n\t\t\t}\n\t\t\tdelete(eh.cs, key)\n\t\t}\n\t}\n\n\teh.Unlock()\n\n\treturn nil\n}\n\n\/\/ Size returns the number of remote addresses that the events handler currently contains.\nfunc (eh *eventsHandler) Size() int {\n\teh.RLock()\n\tdefer eh.RUnlock()\n\treturn len(eh.ws)\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\nconst (\n\t\/\/ HealthAny is special, and is used as a wild card,\n\t\/\/ not as a specific state.\n\tHealthAny = \"any\"\n\tHealthPassing = \"passing\"\n\tHealthWarning = \"warning\"\n\tHealthCritical = \"critical\"\n\tHealthMaint = \"maintenance\"\n)\n\nconst (\n\t\/\/ NodeMaint is the special key set by a node in maintenance mode.\n\tNodeMaint = \"_node_maintenance\"\n\n\t\/\/ ServiceMaintPrefix is the prefix for a service in maintenance mode.\n\tServiceMaintPrefix = \"_service_maintenance:\"\n)\n\n\/\/ HealthCheck is used to represent a single check\ntype HealthCheck struct {\n\tNode string\n\tCheckID string\n\tName string\n\tStatus string\n\tNotes string\n\tOutput string\n\tServiceID string\n\tServiceName string\n}\n\n\/\/ HealthChecks is a collection of HealthCheck structs.\ntype HealthChecks []*HealthCheck\n\n\/\/ AggregatedStatus returns the \"best\" status for the list of health checks.\n\/\/ Because a given entry may have many service and node-level health checks\n\/\/ attached, this function determines the best representative of the status as\n\/\/ as single string using the following heuristic:\n\/\/\n\/\/ maintenance > critical > warning > passing\n\/\/\nfunc (c HealthChecks) AggregatedStatus() string {\n\tvar passing, warning, critical, maintenance bool\n\tfor _, check := range c {\n\t\tid := string(check.CheckID)\n\t\tif id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) {\n\t\t\tmaintenance = true\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch check.Status {\n\t\tcase HealthPassing:\n\t\t\tpassing = true\n\t\tcase HealthWarning:\n\t\t\twarning = true\n\t\tcase HealthCritical:\n\t\t\tcritical = true\n\t\tdefault:\n\t\t\treturn \"\"\n\t\t}\n\t}\n\n\tswitch {\n\tcase maintenance:\n\t\treturn HealthMaint\n\tcase critical:\n\t\treturn HealthCritical\n\tcase warning:\n\t\treturn HealthWarning\n\tcase passing:\n\t\treturn HealthPassing\n\tdefault:\n\t\treturn HealthPassing\n\t}\n}\n\n\/\/ ServiceEntry is used for the health service endpoint\ntype ServiceEntry struct {\n\tNode *Node\n\tService *AgentService\n\tChecks []*HealthCheck\n}\n\n\/\/ Health can be used to query the Health endpoints\ntype Health struct {\n\tc *Client\n}\n\n\/\/ Health returns a handle to the health endpoints\nfunc (c *Client) Health() *Health {\n\treturn &Health{c}\n}\n\n\/\/ Node is used to query for checks belonging to a given node\nfunc (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {\n\tr := h.c.newRequest(\"GET\", \"\/v1\/health\/node\/\"+node)\n\tr.setQueryOptions(q)\n\trtt, resp, err := requireOK(h.c.doRequest(r))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tqm := &QueryMeta{}\n\tparseQueryMeta(resp, qm)\n\tqm.RequestTime = rtt\n\n\tvar out []*HealthCheck\n\tif err := decodeBody(resp, &out); err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn out, qm, nil\n}\n\n\/\/ Checks is used to return the checks associated with a service\nfunc (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {\n\tr := h.c.newRequest(\"GET\", \"\/v1\/health\/checks\/\"+service)\n\tr.setQueryOptions(q)\n\trtt, resp, err := requireOK(h.c.doRequest(r))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tqm := &QueryMeta{}\n\tparseQueryMeta(resp, qm)\n\tqm.RequestTime = rtt\n\n\tvar out []*HealthCheck\n\tif err := decodeBody(resp, &out); err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn out, qm, nil\n}\n\n\/\/ Service is used to query health information along with service info\n\/\/ for a given service. It can optionally do server-side filtering on a tag\n\/\/ or nodes with passing health checks only.\nfunc (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {\n\tr := h.c.newRequest(\"GET\", \"\/v1\/health\/service\/\"+service)\n\tr.setQueryOptions(q)\n\tif tag != \"\" {\n\t\tr.params.Set(\"tag\", tag)\n\t}\n\tif passingOnly {\n\t\tr.params.Set(HealthPassing, \"1\")\n\t}\n\trtt, resp, err := requireOK(h.c.doRequest(r))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tqm := &QueryMeta{}\n\tparseQueryMeta(resp, qm)\n\tqm.RequestTime = rtt\n\n\tvar out []*ServiceEntry\n\tif err := decodeBody(resp, &out); err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn out, qm, nil\n}\n\n\/\/ State is used to retrieve all the checks in a given state.\n\/\/ The wildcard \"any\" state can also be used for all checks.\nfunc (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {\n\tswitch state {\n\tcase HealthAny:\n\tcase HealthWarning:\n\tcase HealthCritical:\n\tcase HealthPassing:\n\tdefault:\n\t\treturn nil, nil, fmt.Errorf(\"Unsupported state: %v\", state)\n\t}\n\tr := h.c.newRequest(\"GET\", \"\/v1\/health\/state\/\"+state)\n\tr.setQueryOptions(q)\n\trtt, resp, err := requireOK(h.c.doRequest(r))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tqm := &QueryMeta{}\n\tparseQueryMeta(resp, qm)\n\tqm.RequestTime = rtt\n\n\tvar out []*HealthCheck\n\tif err := decodeBody(resp, &out); err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn out, qm, nil\n}\n<commit_msg>Return the correct type<commit_after>package api\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\nconst (\n\t\/\/ HealthAny is special, and is used as a wild card,\n\t\/\/ not as a specific state.\n\tHealthAny = \"any\"\n\tHealthPassing = \"passing\"\n\tHealthWarning = \"warning\"\n\tHealthCritical = \"critical\"\n\tHealthMaint = \"maintenance\"\n)\n\nconst (\n\t\/\/ NodeMaint is the special key set by a node in maintenance mode.\n\tNodeMaint = \"_node_maintenance\"\n\n\t\/\/ ServiceMaintPrefix is the prefix for a service in maintenance mode.\n\tServiceMaintPrefix = \"_service_maintenance:\"\n)\n\n\/\/ HealthCheck is used to represent a single check\ntype HealthCheck struct {\n\tNode string\n\tCheckID string\n\tName string\n\tStatus string\n\tNotes string\n\tOutput string\n\tServiceID string\n\tServiceName string\n}\n\n\/\/ HealthChecks is a collection of HealthCheck structs.\ntype HealthChecks []*HealthCheck\n\n\/\/ AggregatedStatus returns the \"best\" status for the list of health checks.\n\/\/ Because a given entry may have many service and node-level health checks\n\/\/ attached, this function determines the best representative of the status as\n\/\/ as single string using the following heuristic:\n\/\/\n\/\/ maintenance > critical > warning > passing\n\/\/\nfunc (c HealthChecks) AggregatedStatus() string {\n\tvar passing, warning, critical, maintenance bool\n\tfor _, check := range c {\n\t\tid := string(check.CheckID)\n\t\tif id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) {\n\t\t\tmaintenance = true\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch check.Status {\n\t\tcase HealthPassing:\n\t\t\tpassing = true\n\t\tcase HealthWarning:\n\t\t\twarning = true\n\t\tcase HealthCritical:\n\t\t\tcritical = true\n\t\tdefault:\n\t\t\treturn \"\"\n\t\t}\n\t}\n\n\tswitch {\n\tcase maintenance:\n\t\treturn HealthMaint\n\tcase critical:\n\t\treturn HealthCritical\n\tcase warning:\n\t\treturn HealthWarning\n\tcase passing:\n\t\treturn HealthPassing\n\tdefault:\n\t\treturn HealthPassing\n\t}\n}\n\n\/\/ ServiceEntry is used for the health service endpoint\ntype ServiceEntry struct {\n\tNode *Node\n\tService *AgentService\n\tChecks HealthChecks\n}\n\n\/\/ Health can be used to query the Health endpoints\ntype Health struct {\n\tc *Client\n}\n\n\/\/ Health returns a handle to the health endpoints\nfunc (c *Client) Health() *Health {\n\treturn &Health{c}\n}\n\n\/\/ Node is used to query for checks belonging to a given node\nfunc (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {\n\tr := h.c.newRequest(\"GET\", \"\/v1\/health\/node\/\"+node)\n\tr.setQueryOptions(q)\n\trtt, resp, err := requireOK(h.c.doRequest(r))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tqm := &QueryMeta{}\n\tparseQueryMeta(resp, qm)\n\tqm.RequestTime = rtt\n\n\tvar out HealthChecks\n\tif err := decodeBody(resp, &out); err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn out, qm, nil\n}\n\n\/\/ Checks is used to return the checks associated with a service\nfunc (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {\n\tr := h.c.newRequest(\"GET\", \"\/v1\/health\/checks\/\"+service)\n\tr.setQueryOptions(q)\n\trtt, resp, err := requireOK(h.c.doRequest(r))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tqm := &QueryMeta{}\n\tparseQueryMeta(resp, qm)\n\tqm.RequestTime = rtt\n\n\tvar out HealthChecks\n\tif err := decodeBody(resp, &out); err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn out, qm, nil\n}\n\n\/\/ Service is used to query health information along with service info\n\/\/ for a given service. It can optionally do server-side filtering on a tag\n\/\/ or nodes with passing health checks only.\nfunc (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {\n\tr := h.c.newRequest(\"GET\", \"\/v1\/health\/service\/\"+service)\n\tr.setQueryOptions(q)\n\tif tag != \"\" {\n\t\tr.params.Set(\"tag\", tag)\n\t}\n\tif passingOnly {\n\t\tr.params.Set(HealthPassing, \"1\")\n\t}\n\trtt, resp, err := requireOK(h.c.doRequest(r))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tqm := &QueryMeta{}\n\tparseQueryMeta(resp, qm)\n\tqm.RequestTime = rtt\n\n\tvar out []*ServiceEntry\n\tif err := decodeBody(resp, &out); err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn out, qm, nil\n}\n\n\/\/ State is used to retrieve all the checks in a given state.\n\/\/ The wildcard \"any\" state can also be used for all checks.\nfunc (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {\n\tswitch state {\n\tcase HealthAny:\n\tcase HealthWarning:\n\tcase HealthCritical:\n\tcase HealthPassing:\n\tdefault:\n\t\treturn nil, nil, fmt.Errorf(\"Unsupported state: %v\", state)\n\t}\n\tr := h.c.newRequest(\"GET\", \"\/v1\/health\/state\/\"+state)\n\tr.setQueryOptions(q)\n\trtt, resp, err := requireOK(h.c.doRequest(r))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tqm := &QueryMeta{}\n\tparseQueryMeta(resp, qm)\n\tqm.RequestTime = rtt\n\n\tvar out HealthChecks\n\tif err := decodeBody(resp, &out); err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn out, qm, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package jet\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype mapper struct {\n\tconv ColumnConverter\n}\n\nfunc (m *mapper) unpack(keys []string, values []interface{}, out interface{}) error {\n\tval := reflect.ValueOf(out)\n\tif val.Kind() != reflect.Ptr {\n\t\treturn fmt.Errorf(\"cannot unpack result to non-pointer (%s)\", val.Type().String())\n\t}\n\treturn m.unpackValue(keys, values, val)\n}\n\nfunc (m *mapper) unpackValue(keys []string, values []interface{}, out reflect.Value) error {\n\tswitch out.Interface().(type) {\n\tcase ComplexValue:\n\t\tif out.IsNil() {\n\t\t\tout.Set(reflect.New(out.Type().Elem()))\n\t\t}\n\t\tplain := reflect.Indirect(reflect.ValueOf(values[0]))\n\t\treturn out.Interface().(ComplexValue).Decode(plain.Interface())\n\t}\n\tif out.CanAddr() {\n\t\tswitch out.Addr().Interface().(type) {\n\t\tcase ComplexValue:\n\t\t\treturn m.unpackValue(keys, values, out.Addr())\n\t\t}\n\t}\n\tswitch out.Kind() {\n\tcase reflect.Ptr:\n\t\tif out.IsNil() {\n\t\t\tout.Set(reflect.New(out.Type().Elem()))\n\t\t}\n\t\treturn m.unpackValue(keys, values, reflect.Indirect(out))\n\tcase reflect.Slice:\n\t\tif keys == nil {\n\t\t\treturn m.unpackSimple(nil, values, out)\n\t\t} else {\n\t\t\treturn m.unpackSlice(keys, values, out)\n\t\t}\n\tcase reflect.Struct:\n\t\treturn m.unpackStruct(keys, values, out)\n\tcase reflect.Map:\n\t\tif keys == nil {\n\t\t\treturn m.unpackSimple(nil, values, out)\n\t\t} else {\n\t\t\treturn m.unpackMap(keys, values, out)\n\t\t}\n\tdefault:\n\t\treturn m.unpackSimple(nil, values, out)\n\t}\n\treturn fmt.Errorf(\"cannot unpack result to %T (%s)\", out, out.Kind())\n}\n\nfunc (m *mapper) unpackSlice(keys []string, values []interface{}, out reflect.Value) error {\n\telemTyp := reflect.Indirect(reflect.New(out.Type().Elem()))\n\tm.unpackValue(keys, values, elemTyp)\n\tout.Set(reflect.Append(out, elemTyp))\n\treturn nil\n}\n\nfunc (m *mapper) unpackStruct(keys []string, values []interface{}, out reflect.Value) error {\n\t\/\/ If no keys are passed in it's probably a struct field requiring\n\t\/\/ a simple unpack like a time.Time struct field.\n\tif len(keys) == 0 {\n\t\treturn m.unpackSimple(nil, values, out)\n\t}\n\tfor i, k := range keys {\n\t\tvar convKey string\n\t\tif m.conv == nil {\n\t\t\tconvKey = strings.ToUpper(k[:1]) + k[1:]\n\t\t} else if m.conv != nil {\n\t\t\tconvKey = m.conv.ColumnToFieldName(k)\n\t\t}\n\t\tfield := out.FieldByName(convKey)\n\n\t\t\/\/ If the field is not found it can mean that we don't want it or that\n\t\t\/\/ we have special case like UserID, UUID, userUUID\n\t\t\/\/ So fix the name and try again\n\t\tif !field.IsValid() {\n\t\t\tconvKey = strings.Replace(convKey, \"Uuid\", \"UUID\", -1)\n\t\t\tconvKey = strings.Replace(convKey, \"Id\", \"ID\", -1)\n\t\t\tfield = out.FieldByName(convKey)\n\t\t}\n\t\t\n\t\tif field.IsValid() {\n\t\t\tm.unpackValue(nil, values[i:i+1], field)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (m *mapper) unpackMap(keys []string, values []interface{}, out reflect.Value) error {\n\tif out.IsNil() {\n\t\tout.Set(reflect.MakeMap(out.Type()))\n\t}\n\tfor i, k := range keys {\n\t\telemTyp := reflect.Indirect(reflect.New(out.Type().Elem()))\n\t\tm.unpackValue(nil, values[i:i+1], elemTyp)\n\t\tout.SetMapIndex(reflect.ValueOf(k), elemTyp)\n\t}\n\n\treturn nil\n}\n\nfunc (m *mapper) unpackSimple(keys []string, values []interface{}, out reflect.Value) error {\n\tif !out.IsValid() {\n\t\tpanic(\"cannot unpack to zero value\")\n\t}\n\tif len(values) != 1 {\n\t\tpanic(\"cannot unpack to simple value, invalid values input\")\n\t}\n\tsetValue(reflect.Indirect(reflect.ValueOf(values[0])), out)\n\treturn nil\n}\n\nfunc convertAndSet(f interface{}, to reflect.Value) {\n\tfrom := reflect.ValueOf(f)\n\tif from.IsValid() {\n\t\tto.Set(from.Convert(to.Type()))\n\t} else {\n\t\tto.Set(reflect.Zero(to.Type()))\n\t}\n}\n\nfunc setValue(from, to reflect.Value) {\n\tswitch t := from.Interface().(type) {\n\tcase []uint8:\n\t\tsetValueFromBytes(t, to)\n\tcase int, int8, int16, int32, int64:\n\t\tsetValueFromInt(reflect.ValueOf(t).Int(), to)\n\tcase uint, uint8, uint16, uint32, uint64:\n\t\tsetValueFromUint(reflect.ValueOf(t).Uint(), to)\n\tcase float32, float64:\n\t\tsetValueFromFloat(reflect.ValueOf(t).Float(), to)\n\tcase time.Time:\n\t\tsetValueFromTime(t, to)\n\tdefault:\n\t\tconvertAndSet(t, to)\n\t}\n}\n\nfunc setValueFromBytes(t []uint8, to reflect.Value) {\n\tswitch to.Interface().(type) {\n\tcase bool:\n\t\tn, _ := strconv.ParseInt(string(t), 10, 32)\n\t\tconvertAndSet(bool(n == 1), to)\n\tcase int, int8, int16, int32, int64:\n\t\tn, _ := strconv.ParseInt(string(t), 10, 64)\n\t\tconvertAndSet(n, to)\n\tcase uint, uint8, uint16, uint32, uint64:\n\t\tn, _ := strconv.ParseUint(string(t), 10, 64)\n\t\tconvertAndSet(n, to)\n\tcase float32:\n\t\tn, _ := strconv.ParseFloat(string(t), 32)\n\t\tconvertAndSet(n, to)\n\tcase float64:\n\t\tn, _ := strconv.ParseFloat(string(t), 64)\n\t\tconvertAndSet(n, to)\n\tcase string:\n\t\tto.SetString(string(t))\n\tcase map[string]interface{}:\n\t\tto.Set(reflect.ValueOf(parseHstoreColumn(string(t))))\n\tdefault:\n\t\tconvertAndSet(t, to)\n\t}\n}\n\nfunc setValueFromFloat(f float64, to reflect.Value) {\n\tswitch to.Interface().(type) {\n\tcase bool:\n\t\tconvertAndSet(bool(f == 1), to)\n\tdefault:\n\t\tconvertAndSet(f, to)\n\t}\n}\n\nfunc setValueFromInt(i int64, to reflect.Value) {\n\tswitch to.Interface().(type) {\n\tcase bool:\n\t\tconvertAndSet(bool(i == 1), to)\n\tdefault:\n\t\tconvertAndSet(i, to)\n\t}\n}\n\nfunc setValueFromUint(i uint64, to reflect.Value) {\n\tswitch to.Interface().(type) {\n\tcase bool:\n\t\tconvertAndSet(bool(i == 1), to)\n\tdefault:\n\t\tconvertAndSet(i, to)\n\t}\n}\n\nfunc setValueFromTime(t time.Time, to reflect.Value) {\n\tswitch to.Interface().(type) {\n\tcase int64:\n\t\tconvertAndSet(int64(t.Unix()), to)\n\tcase uint64:\n\t\tconvertAndSet(uint64(t.Unix()), to)\n\tdefault:\n\t\tconvertAndSet(t, to)\n\t}\n}\n<commit_msg>Handle IP\/Ip<commit_after>package jet\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype mapper struct {\n\tconv ColumnConverter\n}\n\nfunc (m *mapper) unpack(keys []string, values []interface{}, out interface{}) error {\n\tval := reflect.ValueOf(out)\n\tif val.Kind() != reflect.Ptr {\n\t\treturn fmt.Errorf(\"cannot unpack result to non-pointer (%s)\", val.Type().String())\n\t}\n\treturn m.unpackValue(keys, values, val)\n}\n\nfunc (m *mapper) unpackValue(keys []string, values []interface{}, out reflect.Value) error {\n\tswitch out.Interface().(type) {\n\tcase ComplexValue:\n\t\tif out.IsNil() {\n\t\t\tout.Set(reflect.New(out.Type().Elem()))\n\t\t}\n\t\tplain := reflect.Indirect(reflect.ValueOf(values[0]))\n\t\treturn out.Interface().(ComplexValue).Decode(plain.Interface())\n\t}\n\tif out.CanAddr() {\n\t\tswitch out.Addr().Interface().(type) {\n\t\tcase ComplexValue:\n\t\t\treturn m.unpackValue(keys, values, out.Addr())\n\t\t}\n\t}\n\tswitch out.Kind() {\n\tcase reflect.Ptr:\n\t\tif out.IsNil() {\n\t\t\tout.Set(reflect.New(out.Type().Elem()))\n\t\t}\n\t\treturn m.unpackValue(keys, values, reflect.Indirect(out))\n\tcase reflect.Slice:\n\t\tif keys == nil {\n\t\t\treturn m.unpackSimple(nil, values, out)\n\t\t} else {\n\t\t\treturn m.unpackSlice(keys, values, out)\n\t\t}\n\tcase reflect.Struct:\n\t\treturn m.unpackStruct(keys, values, out)\n\tcase reflect.Map:\n\t\tif keys == nil {\n\t\t\treturn m.unpackSimple(nil, values, out)\n\t\t} else {\n\t\t\treturn m.unpackMap(keys, values, out)\n\t\t}\n\tdefault:\n\t\treturn m.unpackSimple(nil, values, out)\n\t}\n\treturn fmt.Errorf(\"cannot unpack result to %T (%s)\", out, out.Kind())\n}\n\nfunc (m *mapper) unpackSlice(keys []string, values []interface{}, out reflect.Value) error {\n\telemTyp := reflect.Indirect(reflect.New(out.Type().Elem()))\n\tm.unpackValue(keys, values, elemTyp)\n\tout.Set(reflect.Append(out, elemTyp))\n\treturn nil\n}\n\nfunc (m *mapper) unpackStruct(keys []string, values []interface{}, out reflect.Value) error {\n\t\/\/ If no keys are passed in it's probably a struct field requiring\n\t\/\/ a simple unpack like a time.Time struct field.\n\tif len(keys) == 0 {\n\t\treturn m.unpackSimple(nil, values, out)\n\t}\n\tfor i, k := range keys {\n\t\tvar convKey string\n\t\tif m.conv == nil {\n\t\t\tconvKey = strings.ToUpper(k[:1]) + k[1:]\n\t\t} else if m.conv != nil {\n\t\t\tconvKey = m.conv.ColumnToFieldName(k)\n\t\t}\n\t\tfield := out.FieldByName(convKey)\n\n\t\t\/\/ If the field is not found it can mean that we don't want it or that\n\t\t\/\/ we have special case like UserID, UUID, userUUID\n\t\t\/\/ So fix the name and try again\n\t\tif !field.IsValid() {\n\t\t\tconvKey = strings.Replace(convKey, \"Uuid\", \"UUID\", -1)\n\t\t\tconvKey = strings.Replace(convKey, \"Id\", \"ID\", -1)\n\t\t\tconvKey = strings.Replace(convKey, \"Ip\", \"IP\", -1)\n\t\t\tfield = out.FieldByName(convKey)\n\t\t}\n\t\t\n\t\tif field.IsValid() {\n\t\t\tm.unpackValue(nil, values[i:i+1], field)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (m *mapper) unpackMap(keys []string, values []interface{}, out reflect.Value) error {\n\tif out.IsNil() {\n\t\tout.Set(reflect.MakeMap(out.Type()))\n\t}\n\tfor i, k := range keys {\n\t\telemTyp := reflect.Indirect(reflect.New(out.Type().Elem()))\n\t\tm.unpackValue(nil, values[i:i+1], elemTyp)\n\t\tout.SetMapIndex(reflect.ValueOf(k), elemTyp)\n\t}\n\n\treturn nil\n}\n\nfunc (m *mapper) unpackSimple(keys []string, values []interface{}, out reflect.Value) error {\n\tif !out.IsValid() {\n\t\tpanic(\"cannot unpack to zero value\")\n\t}\n\tif len(values) != 1 {\n\t\tpanic(\"cannot unpack to simple value, invalid values input\")\n\t}\n\tsetValue(reflect.Indirect(reflect.ValueOf(values[0])), out)\n\treturn nil\n}\n\nfunc convertAndSet(f interface{}, to reflect.Value) {\n\tfrom := reflect.ValueOf(f)\n\tif from.IsValid() {\n\t\tto.Set(from.Convert(to.Type()))\n\t} else {\n\t\tto.Set(reflect.Zero(to.Type()))\n\t}\n}\n\nfunc setValue(from, to reflect.Value) {\n\tswitch t := from.Interface().(type) {\n\tcase []uint8:\n\t\tsetValueFromBytes(t, to)\n\tcase int, int8, int16, int32, int64:\n\t\tsetValueFromInt(reflect.ValueOf(t).Int(), to)\n\tcase uint, uint8, uint16, uint32, uint64:\n\t\tsetValueFromUint(reflect.ValueOf(t).Uint(), to)\n\tcase float32, float64:\n\t\tsetValueFromFloat(reflect.ValueOf(t).Float(), to)\n\tcase time.Time:\n\t\tsetValueFromTime(t, to)\n\tdefault:\n\t\tconvertAndSet(t, to)\n\t}\n}\n\nfunc setValueFromBytes(t []uint8, to reflect.Value) {\n\tswitch to.Interface().(type) {\n\tcase bool:\n\t\tn, _ := strconv.ParseInt(string(t), 10, 32)\n\t\tconvertAndSet(bool(n == 1), to)\n\tcase int, int8, int16, int32, int64:\n\t\tn, _ := strconv.ParseInt(string(t), 10, 64)\n\t\tconvertAndSet(n, to)\n\tcase uint, uint8, uint16, uint32, uint64:\n\t\tn, _ := strconv.ParseUint(string(t), 10, 64)\n\t\tconvertAndSet(n, to)\n\tcase float32:\n\t\tn, _ := strconv.ParseFloat(string(t), 32)\n\t\tconvertAndSet(n, to)\n\tcase float64:\n\t\tn, _ := strconv.ParseFloat(string(t), 64)\n\t\tconvertAndSet(n, to)\n\tcase string:\n\t\tto.SetString(string(t))\n\tcase map[string]interface{}:\n\t\tto.Set(reflect.ValueOf(parseHstoreColumn(string(t))))\n\tdefault:\n\t\tconvertAndSet(t, to)\n\t}\n}\n\nfunc setValueFromFloat(f float64, to reflect.Value) {\n\tswitch to.Interface().(type) {\n\tcase bool:\n\t\tconvertAndSet(bool(f == 1), to)\n\tdefault:\n\t\tconvertAndSet(f, to)\n\t}\n}\n\nfunc setValueFromInt(i int64, to reflect.Value) {\n\tswitch to.Interface().(type) {\n\tcase bool:\n\t\tconvertAndSet(bool(i == 1), to)\n\tdefault:\n\t\tconvertAndSet(i, to)\n\t}\n}\n\nfunc setValueFromUint(i uint64, to reflect.Value) {\n\tswitch to.Interface().(type) {\n\tcase bool:\n\t\tconvertAndSet(bool(i == 1), to)\n\tdefault:\n\t\tconvertAndSet(i, to)\n\t}\n}\n\nfunc setValueFromTime(t time.Time, to reflect.Value) {\n\tswitch to.Interface().(type) {\n\tcase int64:\n\t\tconvertAndSet(int64(t.Unix()), to)\n\tcase uint64:\n\t\tconvertAndSet(uint64(t.Unix()), to)\n\tdefault:\n\t\tconvertAndSet(t, to)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ This is the primary file of LibreJS-Gopher\n\npackage librejsgopher\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n)\n\nvar LicensesCapitalizedStrings []string \/\/ An array of strings where each string is a license name or sub-string that needs to be capitalized\n\nvar LicenseMap map[string]string \/\/ LicenseMap is a map of license names to magnet URLs\n\nfunc init() {\n\tLicensesCapitalizedStrings = []string{\"BSD\", \"CC\", \"GPL\", \"ISC\", \"MPL\"}\n\n\tLicenseMap = map[string]string{\n\t\t\"AGPL-3.0\": \"magnet:?xt=urn:btih:0b31508aeb0634b347b8270c7bee4d411b5d4109&dn=agpl-3.0.txt\",\n\t\t\"Apache-2.0\": \"magnet:?xt=urn:btih:8e4f440f4c65981c5bf93c76d35135ba5064d8b7&dn=apache-2.0.txt\",\n\t\t\"Artistic-2.0\": \"magnet:?xt=urn:btih:54fd2283f9dbdf29466d2df1a98bf8f65cafe314&dn=artistic-2.0.txt\",\n\t\t\"BSD-3.0\": \"magnet:?xt=urn:btih:c80d50af7d3db9be66a4d0a86db0286e4fd33292&dn=bsd-3-clause.txt\",\n\t\t\"CC0\": \"magnet:?xt=urn:btih:90dc5c0be029de84e523b9b3922520e79e0e6f08&dn=cc0.txt\",\n\t\t\"Expat\": \"magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&dn=expat.txt\",\n\t\t\"FreeBSD\": \"magnet:?xt=urn:btih:87f119ba0b429ba17a44b4bffcab33165ebdacc0&dn=freebsd.txt\",\n\t\t\"GPL-2.0\": \"magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&dn=gpl-2.0.txt\",\n\t\t\"GPL-3.0\": \"magnet:?xt=urn:btih:1f739d935676111cfff4b4693e3816e664797050&dn=gpl-3.0.txt\",\n\t\t\"ISC\": \"magnet:?xt=urn:btih:b8999bbaf509c08d127678643c515b9ab0836bae&dn=ISC.txt\",\n\t\t\"LGPL-2.1\": \"magnet:?xt=urn:btih:5de60da917303dbfad4f93fb1b985ced5a89eac2&dn=lgpl-2.1.txt\",\n\t\t\"LGPL-3.0\": \"magnet:?xt=urn:btih:0ef1b8170b3b615170ff270def6427c317705f85&dn=lgpl-3.0.txt\",\n\t\t\"MPL-2.0\": \"magnet:?xt=urn:btih:3877d6d54b3accd4bc32f8a48bf32ebc0901502a&dn=mpl-2.0.txt\",\n\t\t\"Public-Domain\": \"magnet:?xt=urn:btih:e95b018ef3580986a04669f1b5879592219e2a7a&dn=public-domain.txt\",\n\t\t\"X11\": \"magnet:?xt=urn:btih:5305d91886084f776adcf57509a648432709a7c7&dn=x11.txt\",\n\t\t\"XFree86\": \"magnet:?xt=urn:btih:12f2ec9e8de2a3b0002a33d518d6010cc8ab2ae9&dn=xfree86.txt\",\n\t}\n}\n\n\/\/ AddLicense\n\/\/ This function will add a valid LibreJS short-form header and footer to the file. You can set to write the file automatically (we will always return new file content or an error)\nfunc AddLicense(license string, file string, writeContentAutomatically bool) (string, error) {\n\tvar newFileContent string\n\tvar addError error\n\n\tif strings.HasSuffix(file, \".js\") { \/\/ If this is a JavaScript file\n\t\tfileContentBytes, fileReadError := ioutil.ReadFile(file) \/\/ Get the fileContent or if the file does not exist (or we do not have the permission) assign to fileReadError\n\n\t\tif fileReadError == nil { \/\/ If there was no read error\n\t\t\tparsedLicense := ParseLicenseName(license) \/\/ Format license to be consistent when appending to newFileContent\n\t\t\tmagnetURL, magnetError := GetMagnetLink(parsedLicense) \/\/ Attempt to get the magnet URL and if license does not exist return error\n\n\t\t\tif magnetError == nil { \/\/ If the license requested is valid and return a magnet URL\n\t\t\t\tfileContentString := string(fileContentBytes[:]) \/\/ Convert to string\n\t\t\t\tnewFileContent = \"@license \" + magnetURL + \" \" + parsedLicense + \"\\n\" + fileContentString + \"\\n@license-end\" \/\/ Add @license INFO + content + @license-end\n\n\t\t\t\tif writeContentAutomatically { \/\/ If we should write the file content automatically\n\t\t\t\t\tfileStruct, _ := os.Open(file) \/\/ Open the file and get an os.File struct\n\t\t\t\t\tfileStat, _ := fileStruct.Stat() \/\/ Get the stats about the file\n\t\t\t\t\tfileMode := fileStat.Mode()\n\t\t\t\t\tfileStruct.Close() \/\/ Close the open file struct\n\n\t\t\t\t\tioutil.WriteFile(file, []byte(newFileContent), fileMode) \/\/ Write the file with the new content and same mode\n\t\t\t\t}\n\t\t\t} else { \/\/ If the magnetURL does not exist\n\t\t\t\taddError = magnetError \/\/ Assign addError as the magnetError\n\t\t\t}\n\t\t} else { \/\/ If there was a read error\n\t\t\taddError = errors.New(file + \" does not exist.\")\n\t\t}\n\t} else { \/\/ File provided is not a JavaScript file\n\t\taddError = errors.New(file + \" is not a JavaScript file (detected if ending with .js).\")\n\t}\n\n\treturn newFileContent, addError\n}\n\n\/\/ AddLicenseInfo\n\/\/ This function is a backwards-compatible call that actually calls AddLicenseInfo\nfunc AddLicenseInfo(license string, file string, writeContentAutomatically bool) (string, error) {\n\treturn AddLicense(license, file, writeContentAutomatically)\n}\n\n\/\/ GetFileLicense\n\/\/ This function will get the license of the file, assuming it uses a valid LibreJS short-form header.\nfunc GetFileLicense(file string) (LibreJSMetaInfo, error) {\n\tvar getError error\n\tvar metaInfo LibreJSMetaInfo\n\n\tfileContentBytes, fileReadError := ioutil.ReadFile(file) \/\/ Get the fileContent or if the file does not exist (or we do not have the permission) assign to fileReadError\n\n\tif fileReadError == nil { \/\/ If there was no read error\n\t\tfileContent := string(fileContentBytes[:]) \/\/ Convert to string\n\t\tfileContentLines := strings.Split(fileContent, \"\\n\") \/\/ Split each new line into an []string\n\t\tfileContentLinesCount := len(fileContentLines)\n\t\tlinesParsed := 0 \/\/ Define linesParsed as the number of lines parsed by FileLicenseLineParser\n\n\t\tif fileContentLinesCount > 1 { \/\/ If this file is not a single line or empty\n\t\t\tfileLineParserChannel := make(chan LibreJSMetaInfo, fileContentLinesCount) \/\/ Make a channel that takes LibreJSMetaInfo\n\n\t\t\tfor _, lineContent := range fileContentLines { \/\/ For each license\n\t\t\t\tgo FileLicenseLineParser(fileLineParserChannel, lineContent) \/\/ Asynchronously call FileLicenseLineParser\n\t\t\t}\n\n\t\tLineParserLoop:\n\t\t\tfor libreJsMetaInfo := range fileLineParserChannel { \/\/ Constantly listen for channel input\n\t\t\t\tif libreJsMetaInfo.License != \"\" { \/\/ If the provided LibreJSMetaInfo has a valid License\n\t\t\t\t\tmetaInfo = libreJsMetaInfo \/\/ Assign metaInfo as provided libreJsMetaInfo\n\t\t\t\t}\n\n\t\t\t\tlinesParsed++ \/\/ Increment linesParsed\n\n\t\t\t\tif fileContentLinesCount == linesParsed { \/\/ If we're parsed all the liens\n\t\t\t\t\tclose(fileLineParserChannel) \/\/ Close the channel\n\t\t\t\t\tbreak LineParserLoop \/\/ Break LineParserLoop\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif metaInfo.License == \"\" { \/\/ If there is no License defined by the end of the file\n\t\t\t\tgetError = errors.New(\"LibreJS short-form header does not exist in this file.\")\n\t\t\t}\n\t\t} else { \/\/ If the length of the file is 1 line or none\n\t\t\tgetError = errors.New(\"File is either empty or does not contain the necessary individual lines required by LibreJS short-form blocks.\")\n\t\t}\n\t} else { \/\/ If the file does not exist\n\t\tgetError = errors.New(file + \" does not exist.\")\n\t}\n\n\treturn metaInfo, getError\n}\n\n\/\/ FileLicenseLineParser\n\/\/ This function handles individual line parsing\nfunc FileLicenseLineParser(returnContentChannel chan LibreJSMetaInfo, lineContent string) {\n\tmetaInfo := LibreJSMetaInfo{}\n\n\tlineContent = strings.Replace(lineContent, \"\/\/\", \"\", -1) \/\/ Replace any \/\/ with nothing\n\tlineContent = strings.Replace(lineContent, \"*\", \"\", -1) \/\/ Replace any * (block quotes) with nothing\n\tlineContent = strings.TrimPrefix(lineContent, \" \") \/\/ Trim any prefixed whitespace\n\n\tif strings.HasPrefix(lineContent, \"@license\") { \/\/ If the line starts with @license\n\t\tlicenseHeaderFragments := strings.SplitN(lineContent, \" \", 3) \/\/ Split the license header info into three segments, separated by whitespace\n\n\t\tif len(licenseHeaderFragments) == 3 { \/\/ If there are three items in the slice, meaning this is a @license line and not @license-end\n\t\t\tmetaInfo.License = ParseLicenseName(licenseHeaderFragments[2]) \/\/ Define License as the parsed license name of the last item in fragments index\n\t\t\tmetaInfo.Magnet = licenseHeaderFragments[1] \/\/ Define Magnet as the second item in the fragments index\n\t\t}\n\t}\n\n\treturnContentChannel <- metaInfo\n}\n\n\/\/ GetMagnetLink\n\/\/ This function will get a magnet link of the associated license exists\n\/\/ Returns string for magnet link, error if item does not exist\nfunc GetMagnetLink(license string) (string, error) {\n\tvar magnetLinkFetchError error\n\n\tlicense = ParseLicenseName(license) \/\/ Parse the license name first\n\tmagnetURL, licenseExists := LicenseMap[license]\n\n\tif !licenseExists { \/\/ If the license does not exist\n\t\tmagnetLinkFetchError = errors.New(license + \" does not exist.\")\n\t}\n\n\treturn magnetURL, magnetLinkFetchError\n}\n\n\/\/ ParseLicenseName\n\/\/ This function will attempt to parse the provided license into a more logic naming scheme used in LicenseMap\nfunc ParseLicenseName(license string) string {\n\tlicense = strings.ToLower(license) \/\/ Lowercase the entire string to make selective capitalization easier\n\n\tfor _, licenseCapitalizedString := range LicensesCapitalizedStrings { \/\/ For each capitalized string of a license in LicensesCapitalizedStrings\n\t\tlicense = strings.Replace(license, strings.ToLower(licenseCapitalizedString), licenseCapitalizedString, -1) \/\/ Replace any lowercase instance with capitalized instance\n\t}\n\n\tlicense = strings.Title(license) \/\/ Title the license (example: apache -> Apache)\n\tlicense = strings.TrimSpace(license) \/\/ Trim all the spacing before and after the license\n\tlicense = strings.Replace(license, \" \", \"-\", -1) \/\/ Replace whitespacing with hyphens\n\n\treturn license\n}\n<commit_msg>Added IsLicense func.<commit_after>\/\/ This is the primary file of LibreJS-Gopher\n\npackage librejsgopher\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n)\n\nvar LicensesCapitalizedStrings []string \/\/ An array of strings where each string is a license name or sub-string that needs to be capitalized\n\nvar LicenseMap map[string]string \/\/ LicenseMap is a map of license names to magnet URLs\n\nfunc init() {\n\tLicensesCapitalizedStrings = []string{\"BSD\", \"CC\", \"GPL\", \"ISC\", \"MPL\"}\n\n\tLicenseMap = map[string]string{\n\t\t\"AGPL-3.0\": \"magnet:?xt=urn:btih:0b31508aeb0634b347b8270c7bee4d411b5d4109&dn=agpl-3.0.txt\",\n\t\t\"Apache-2.0\": \"magnet:?xt=urn:btih:8e4f440f4c65981c5bf93c76d35135ba5064d8b7&dn=apache-2.0.txt\",\n\t\t\"Artistic-2.0\": \"magnet:?xt=urn:btih:54fd2283f9dbdf29466d2df1a98bf8f65cafe314&dn=artistic-2.0.txt\",\n\t\t\"BSD-3.0\": \"magnet:?xt=urn:btih:c80d50af7d3db9be66a4d0a86db0286e4fd33292&dn=bsd-3-clause.txt\",\n\t\t\"CC0\": \"magnet:?xt=urn:btih:90dc5c0be029de84e523b9b3922520e79e0e6f08&dn=cc0.txt\",\n\t\t\"Expat\": \"magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&dn=expat.txt\",\n\t\t\"FreeBSD\": \"magnet:?xt=urn:btih:87f119ba0b429ba17a44b4bffcab33165ebdacc0&dn=freebsd.txt\",\n\t\t\"GPL-2.0\": \"magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&dn=gpl-2.0.txt\",\n\t\t\"GPL-3.0\": \"magnet:?xt=urn:btih:1f739d935676111cfff4b4693e3816e664797050&dn=gpl-3.0.txt\",\n\t\t\"ISC\": \"magnet:?xt=urn:btih:b8999bbaf509c08d127678643c515b9ab0836bae&dn=ISC.txt\",\n\t\t\"LGPL-2.1\": \"magnet:?xt=urn:btih:5de60da917303dbfad4f93fb1b985ced5a89eac2&dn=lgpl-2.1.txt\",\n\t\t\"LGPL-3.0\": \"magnet:?xt=urn:btih:0ef1b8170b3b615170ff270def6427c317705f85&dn=lgpl-3.0.txt\",\n\t\t\"MPL-2.0\": \"magnet:?xt=urn:btih:3877d6d54b3accd4bc32f8a48bf32ebc0901502a&dn=mpl-2.0.txt\",\n\t\t\"Public-Domain\": \"magnet:?xt=urn:btih:e95b018ef3580986a04669f1b5879592219e2a7a&dn=public-domain.txt\",\n\t\t\"X11\": \"magnet:?xt=urn:btih:5305d91886084f776adcf57509a648432709a7c7&dn=x11.txt\",\n\t\t\"XFree86\": \"magnet:?xt=urn:btih:12f2ec9e8de2a3b0002a33d518d6010cc8ab2ae9&dn=xfree86.txt\",\n\t}\n}\n\n\/\/ AddLicense\n\/\/ This function will add a valid LibreJS short-form header and footer to the file. You can set to write the file automatically (we will always return new file content or an error)\nfunc AddLicense(license string, file string, writeContentAutomatically bool) (string, error) {\n\tvar newFileContent string\n\tvar addError error\n\n\tif strings.HasSuffix(file, \".js\") { \/\/ If this is a JavaScript file\n\t\tfileContentBytes, fileReadError := ioutil.ReadFile(file) \/\/ Get the fileContent or if the file does not exist (or we do not have the permission) assign to fileReadError\n\n\t\tif fileReadError == nil { \/\/ If there was no read error\n\t\t\tparsedLicense := ParseLicenseName(license) \/\/ Format license to be consistent when appending to newFileContent\n\t\t\tmagnetURL, magnetError := GetMagnetLink(parsedLicense) \/\/ Attempt to get the magnet URL and if license does not exist return error\n\n\t\t\tif magnetError == nil { \/\/ If the license requested is valid and return a magnet URL\n\t\t\t\tfileContentString := string(fileContentBytes[:]) \/\/ Convert to string\n\t\t\t\tnewFileContent = \"@license \" + magnetURL + \" \" + parsedLicense + \"\\n\" + fileContentString + \"\\n@license-end\" \/\/ Add @license INFO + content + @license-end\n\n\t\t\t\tif writeContentAutomatically { \/\/ If we should write the file content automatically\n\t\t\t\t\tfileStruct, _ := os.Open(file) \/\/ Open the file and get an os.File struct\n\t\t\t\t\tfileStat, _ := fileStruct.Stat() \/\/ Get the stats about the file\n\t\t\t\t\tfileMode := fileStat.Mode()\n\t\t\t\t\tfileStruct.Close() \/\/ Close the open file struct\n\n\t\t\t\t\tioutil.WriteFile(file, []byte(newFileContent), fileMode) \/\/ Write the file with the new content and same mode\n\t\t\t\t}\n\t\t\t} else { \/\/ If the magnetURL does not exist\n\t\t\t\taddError = magnetError \/\/ Assign addError as the magnetError\n\t\t\t}\n\t\t} else { \/\/ If there was a read error\n\t\t\taddError = errors.New(file + \" does not exist.\")\n\t\t}\n\t} else { \/\/ File provided is not a JavaScript file\n\t\taddError = errors.New(file + \" is not a JavaScript file (detected if ending with .js).\")\n\t}\n\n\treturn newFileContent, addError\n}\n\n\/\/ AddLicenseInfo\n\/\/ This function is a backwards-compatible call that actually calls AddLicenseInfo\nfunc AddLicenseInfo(license string, file string, writeContentAutomatically bool) (string, error) {\n\treturn AddLicense(license, file, writeContentAutomatically)\n}\n\n\/\/ GetFileLicense\n\/\/ This function will get the license of the file, assuming it uses a valid LibreJS short-form header.\nfunc GetFileLicense(file string) (LibreJSMetaInfo, error) {\n\tvar getError error\n\tvar metaInfo LibreJSMetaInfo\n\n\tfileContentBytes, fileReadError := ioutil.ReadFile(file) \/\/ Get the fileContent or if the file does not exist (or we do not have the permission) assign to fileReadError\n\n\tif fileReadError == nil { \/\/ If there was no read error\n\t\tfileContent := string(fileContentBytes[:]) \/\/ Convert to string\n\t\tfileContentLines := strings.Split(fileContent, \"\\n\") \/\/ Split each new line into an []string\n\t\tfileContentLinesCount := len(fileContentLines)\n\t\tlinesParsed := 0 \/\/ Define linesParsed as the number of lines parsed by FileLicenseLineParser\n\n\t\tif fileContentLinesCount > 1 { \/\/ If this file is not a single line or empty\n\t\t\tfileLineParserChannel := make(chan LibreJSMetaInfo, fileContentLinesCount) \/\/ Make a channel that takes LibreJSMetaInfo\n\n\t\t\tfor _, lineContent := range fileContentLines { \/\/ For each license\n\t\t\t\tgo FileLicenseLineParser(fileLineParserChannel, lineContent) \/\/ Asynchronously call FileLicenseLineParser\n\t\t\t}\n\n\t\tLineParserLoop:\n\t\t\tfor libreJsMetaInfo := range fileLineParserChannel { \/\/ Constantly listen for channel input\n\t\t\t\tif libreJsMetaInfo.License != \"\" { \/\/ If the provided LibreJSMetaInfo has a valid License\n\t\t\t\t\tmetaInfo = libreJsMetaInfo \/\/ Assign metaInfo as provided libreJsMetaInfo\n\t\t\t\t}\n\n\t\t\t\tlinesParsed++ \/\/ Increment linesParsed\n\n\t\t\t\tif fileContentLinesCount == linesParsed { \/\/ If we're parsed all the liens\n\t\t\t\t\tclose(fileLineParserChannel) \/\/ Close the channel\n\t\t\t\t\tbreak LineParserLoop \/\/ Break LineParserLoop\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif metaInfo.License == \"\" { \/\/ If there is no License defined by the end of the file\n\t\t\t\tgetError = errors.New(\"LibreJS short-form header does not exist in this file.\")\n\t\t\t}\n\t\t} else { \/\/ If the length of the file is 1 line or none\n\t\t\tgetError = errors.New(\"File is either empty or does not contain the necessary individual lines required by LibreJS short-form blocks.\")\n\t\t}\n\t} else { \/\/ If the file does not exist\n\t\tgetError = errors.New(file + \" does not exist.\")\n\t}\n\n\treturn metaInfo, getError\n}\n\n\/\/ FileLicenseLineParser\n\/\/ This function handles individual line parsing\nfunc FileLicenseLineParser(returnContentChannel chan LibreJSMetaInfo, lineContent string) {\n\tmetaInfo := LibreJSMetaInfo{}\n\n\tlineContent = strings.Replace(lineContent, \"\/\/\", \"\", -1) \/\/ Replace any \/\/ with nothing\n\tlineContent = strings.Replace(lineContent, \"*\", \"\", -1) \/\/ Replace any * (block quotes) with nothing\n\tlineContent = strings.TrimPrefix(lineContent, \" \") \/\/ Trim any prefixed whitespace\n\n\tif strings.HasPrefix(lineContent, \"@license\") { \/\/ If the line starts with @license\n\t\tlicenseHeaderFragments := strings.SplitN(lineContent, \" \", 3) \/\/ Split the license header info into three segments, separated by whitespace\n\n\t\tif len(licenseHeaderFragments) == 3 { \/\/ If there are three items in the slice, meaning this is a @license line and not @license-end\n\t\t\tmetaInfo.License = ParseLicenseName(licenseHeaderFragments[2]) \/\/ Define License as the parsed license name of the last item in fragments index\n\t\t\tmetaInfo.Magnet = licenseHeaderFragments[1] \/\/ Define Magnet as the second item in the fragments index\n\t\t}\n\t}\n\n\treturnContentChannel <- metaInfo\n}\n\n\/\/ GetMagnetLink\n\/\/ This function will get a magnet link of the associated license exists\n\/\/ Returns string for magnet link, error if item does not exist\nfunc GetMagnetLink(license string) (string, error) {\n\tvar magnetLinkFetchError error\n\n\tlicense = ParseLicenseName(license) \/\/ Parse the license name first\n\tmagnetURL, licenseExists := LicenseMap[license]\n\n\tif !licenseExists { \/\/ If the license does not exist\n\t\tmagnetLinkFetchError = errors.New(license + \" does not exist.\")\n\t}\n\n\treturn magnetURL, magnetLinkFetchError\n}\n\n\/\/ IsLicense\n\/\/ This function will return whether this is a valid license in the LicenseMap or not\nfunc IsLicense(license string) bool {\n\tvar isLicense bool \/\/ Define isLicense by default as false\n\tlicense = ParseLicenseName(license) \/\/ Parse the license\n\n\t_, isLicense = LicenseMap[license] \/\/ Assign isLicense to bool of LicenseMap[License] exist check\n\n\treturn isLicense\n}\n\n\/\/ ParseLicenseName\n\/\/ This function will attempt to parse the provided license into a more logic naming scheme used in LicenseMap\nfunc ParseLicenseName(license string) string {\n\tlicense = strings.ToLower(license) \/\/ Lowercase the entire string to make selective capitalization easier\n\n\tfor _, licenseCapitalizedString := range LicensesCapitalizedStrings { \/\/ For each capitalized string of a license in LicensesCapitalizedStrings\n\t\tlicense = strings.Replace(license, strings.ToLower(licenseCapitalizedString), licenseCapitalizedString, -1) \/\/ Replace any lowercase instance with capitalized instance\n\t}\n\n\tlicense = strings.Title(license) \/\/ Title the license (example: apache -> Apache)\n\tlicense = strings.TrimSpace(license) \/\/ Trim all the spacing before and after the license\n\tlicense = strings.Replace(license, \" \", \"-\", -1) \/\/ Replace whitespacing with hyphens\n\n\treturn license\n}\n<|endoftext|>"} {"text":"<commit_before>package application\n\nimport (\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/didip\/stopwatch\"\n\n\t\"github.com\/resourced\/resourced-master\/libtime\"\n\t\"github.com\/resourced\/resourced-master\/models\/pg\"\n)\n\n\/\/ PruneAll runs background job to prune all old timeseries data.\nfunc (app *Application) PruneAll() {\n\tif !app.GeneralConfig.EnablePeriodicPruneJobs {\n\t\treturn\n\t}\n\n\tfor {\n\t\tvar clusters []*pg.ClusterRow\n\t\tvar err error\n\n\t\tdaemons := make([]string, 0)\n\t\tallPeers := app.Peers.Items()\n\n\t\tif len(allPeers) > 0 {\n\t\t\tfor hostAndPort, _ := range allPeers {\n\t\t\t\tdaemons = append(daemons, hostAndPort)\n\t\t\t}\n\n\t\t\tgroupedClustersByDaemon, err := pg.NewCluster(app.GetContext()).AllSplitToDaemons(nil, daemons)\n\t\t\tif err != nil {\n\t\t\t\tapp.ErrLogger.WithFields(logrus.Fields{\n\t\t\t\t\t\"Method\": \"Cluster.AllSplitToDaemons\",\n\t\t\t\t}).Error(err)\n\n\t\t\t\tlibtime.SleepString(\"24h\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tclusters = groupedClustersByDaemon[app.FullAddr()]\n\n\t\t} else {\n\t\t\tclusters, err = pg.NewCluster(app.GetContext()).All(nil)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tapp.ErrLogger.WithFields(logrus.Fields{\n\t\t\t\t\"Method\": \"Application.PruneAll\",\n\t\t\t}).Error(err)\n\n\t\t\tlibtime.SleepString(\"24h\")\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, cluster := range clusters {\n\t\t\tgo func(cluster *pg.ClusterRow) {\n\t\t\t\tapp.PruneTSCheckOnce(cluster.ID)\n\t\t\t}(cluster)\n\n\t\t\tgo func(cluster *pg.ClusterRow) {\n\t\t\t\tapp.PruneTSMetricOnce(cluster.ID)\n\t\t\t}(cluster)\n\n\t\t\tgo func(cluster *pg.ClusterRow) {\n\t\t\t\tapp.PruneTSEventOnce(cluster.ID)\n\t\t\t}(cluster)\n\n\t\t\tgo func(cluster *pg.ClusterRow) {\n\t\t\t\tapp.PruneTSLogOnce(cluster.ID)\n\t\t\t}(cluster)\n\t\t}\n\n\t\tlibtime.SleepString(\"24h\")\n\t}\n}\n\n\/\/ PruneTSCheckOnce deletes old ts_checks data.\nfunc (app *Application) PruneTSCheckOnce(clusterID int64) (err error) {\n\tf := func() {\n\t\terr = pg.NewTSCheck(app.GetContext(), clusterID).DeleteDeleted(nil, clusterID)\n\t}\n\n\tlatency := stopwatch.Measure(f)\n\n\tlogFields := logrus.Fields{\n\t\t\"Method\": \"Application.PruneTSCheckOnce\",\n\t\t\"NanoSeconds\": latency,\n\t\t\"MicroSeconds\": latency \/ 1000,\n\t\t\"MilliSeconds\": latency \/ 1000 \/ 1000,\n\t}\n\tif err != nil {\n\t\tapp.ErrLogger.WithFields(logFields).Error(err)\n\t} else {\n\t\tapp.OutLogger.WithFields(logFields).Info(\"Latency measurement\")\n\t}\n\n\treturn err\n}\n\n\/\/ PruneTSMetricOnce deletes old ts_metrics data.\nfunc (app *Application) PruneTSMetricOnce(clusterID int64) (err error) {\n\tif app.PGDBConfig.TSMetric == nil {\n\t\treturn nil\n\t}\n\n\tf := func() {\n\t\terr = pg.NewTSMetric(app.GetContext(), clusterID).DeleteDeleted(nil, clusterID)\n\t}\n\n\tlatency := stopwatch.Measure(f)\n\n\tlogFields := logrus.Fields{\n\t\t\"Method\": \"Application.PruneTSMetricOnce\",\n\t\t\"NanoSeconds\": latency,\n\t\t\"MicroSeconds\": latency \/ 1000,\n\t\t\"MilliSeconds\": latency \/ 1000 \/ 1000,\n\t}\n\tif err != nil {\n\t\tapp.ErrLogger.WithFields(logFields).Error(err)\n\t} else {\n\t\tapp.OutLogger.WithFields(logFields).Info(\"Latency measurement\")\n\t}\n\n\treturn err\n}\n\n\/\/ PruneTSEventOnce deletes old ts_events data.\nfunc (app *Application) PruneTSEventOnce(clusterID int64) (err error) {\n\tf := func() {\n\t\terr = pg.NewTSEvent(app.GetContext(), clusterID).DeleteDeleted(nil, clusterID)\n\t}\n\n\tlatency := stopwatch.Measure(f)\n\n\tlogFields := logrus.Fields{\n\t\t\"Method\": \"Application.PruneTSEventOnce\",\n\t\t\"NanoSeconds\": latency,\n\t\t\"MicroSeconds\": latency \/ 1000,\n\t\t\"MilliSeconds\": latency \/ 1000 \/ 1000,\n\t}\n\tif err != nil {\n\t\tapp.ErrLogger.WithFields(logFields).Error(err)\n\t} else {\n\t\tapp.OutLogger.WithFields(logFields).Info(\"Latency measurement\")\n\t}\n\n\treturn err\n}\n\n\/\/ PruneTSLogOnce deletes old ts_logs data.\nfunc (app *Application) PruneTSLogOnce(clusterID int64) (err error) {\n\tf := func() {\n\t\terr = pg.NewTSLog(app.GetContext(), clusterID).DeleteDeleted(nil, clusterID)\n\t}\n\n\tlatency := stopwatch.Measure(f)\n\n\tlogFields := logrus.Fields{\n\t\t\"Method\": \"Application.PruneTSLogOnce\",\n\t\t\"NanoSeconds\": latency,\n\t\t\"MicroSeconds\": latency \/ 1000,\n\t\t\"MilliSeconds\": latency \/ 1000 \/ 1000,\n\t}\n\tif err != nil {\n\t\tapp.ErrLogger.WithFields(logFields).Error(err)\n\t} else {\n\t\tapp.OutLogger.WithFields(logFields).Info(\"Latency measurement\")\n\t}\n\n\treturn err\n}\n<commit_msg>Make sure metrics prunner does not run if TSMetric uses Cassandra backend.<commit_after>package application\n\nimport (\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/didip\/stopwatch\"\n\n\t\"github.com\/resourced\/resourced-master\/libtime\"\n\t\"github.com\/resourced\/resourced-master\/models\/pg\"\n)\n\nfunc (app *Application) myClusters() (clusters []*pg.ClusterRow, err error) {\n\tdaemons := make([]string, 0)\n\tallPeers := app.Peers.Items()\n\n\tif len(allPeers) > 0 {\n\t\tfor hostAndPort, _ := range allPeers {\n\t\t\tdaemons = append(daemons, hostAndPort)\n\t\t}\n\n\t\tgroupedClustersByDaemon, err := pg.NewCluster(app.GetContext()).AllSplitToDaemons(nil, daemons)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tclusters = groupedClustersByDaemon[app.FullAddr()]\n\n\t} else {\n\t\tclusters, err = pg.NewCluster(app.GetContext()).All(nil)\n\t}\n\n\treturn clusters, err\n}\n\n\/\/ PruneAll runs background job to prune all old timeseries data.\nfunc (app *Application) PruneAll() {\n\tif !app.GeneralConfig.EnablePeriodicPruneJobs {\n\t\treturn\n\t}\n\n\tfor {\n\t\tclusters, err := app.myClusters()\n\t\tif err != nil {\n\t\t\tapp.ErrLogger.WithFields(logrus.Fields{\n\t\t\t\t\"Method\": \"Application.myClusters\",\n\t\t\t}).Error(err)\n\n\t\t\tlibtime.SleepString(\"24h\")\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, cluster := range clusters {\n\t\t\tgo func(cluster *pg.ClusterRow) {\n\t\t\t\tapp.PruneTSCheckOnce(cluster.ID)\n\t\t\t}(cluster)\n\n\t\t\tif app.GeneralConfig.GetMetricsDB() == \"pg\" {\n\t\t\t\tgo func(cluster *pg.ClusterRow) {\n\t\t\t\t\tapp.PruneTSMetricOnce(cluster.ID)\n\t\t\t\t}(cluster)\n\t\t\t}\n\n\t\t\tgo func(cluster *pg.ClusterRow) {\n\t\t\t\tapp.PruneTSEventOnce(cluster.ID)\n\t\t\t}(cluster)\n\n\t\t\tgo func(cluster *pg.ClusterRow) {\n\t\t\t\tapp.PruneTSLogOnce(cluster.ID)\n\t\t\t}(cluster)\n\t\t}\n\n\t\tlibtime.SleepString(\"24h\")\n\t}\n}\n\n\/\/ PruneTSCheckOnce deletes old ts_checks data.\nfunc (app *Application) PruneTSCheckOnce(clusterID int64) (err error) {\n\tf := func() {\n\t\terr = pg.NewTSCheck(app.GetContext(), clusterID).DeleteDeleted(nil, clusterID)\n\t}\n\n\tlatency := stopwatch.Measure(f)\n\n\tlogFields := logrus.Fields{\n\t\t\"Method\": \"Application.PruneTSCheckOnce\",\n\t\t\"NanoSeconds\": latency,\n\t\t\"MicroSeconds\": latency \/ 1000,\n\t\t\"MilliSeconds\": latency \/ 1000 \/ 1000,\n\t}\n\tif err != nil {\n\t\tapp.ErrLogger.WithFields(logFields).Error(err)\n\t} else {\n\t\tapp.OutLogger.WithFields(logFields).Info(\"Latency measurement\")\n\t}\n\n\treturn err\n}\n\n\/\/ PruneTSMetricOnce deletes old ts_metrics data.\nfunc (app *Application) PruneTSMetricOnce(clusterID int64) (err error) {\n\tif app.GeneralConfig.GetMetricsDB() != \"pg\" {\n\t\treturn nil\n\t}\n\n\tf := func() {\n\t\terr = pg.NewTSMetric(app.GetContext(), clusterID).DeleteDeleted(nil, clusterID)\n\t}\n\n\tlatency := stopwatch.Measure(f)\n\n\tlogFields := logrus.Fields{\n\t\t\"Method\": \"Application.PruneTSMetricOnce\",\n\t\t\"NanoSeconds\": latency,\n\t\t\"MicroSeconds\": latency \/ 1000,\n\t\t\"MilliSeconds\": latency \/ 1000 \/ 1000,\n\t}\n\tif err != nil {\n\t\tapp.ErrLogger.WithFields(logFields).Error(err)\n\t} else {\n\t\tapp.OutLogger.WithFields(logFields).Info(\"Latency measurement\")\n\t}\n\n\treturn err\n}\n\n\/\/ PruneTSEventOnce deletes old ts_events data.\nfunc (app *Application) PruneTSEventOnce(clusterID int64) (err error) {\n\tf := func() {\n\t\terr = pg.NewTSEvent(app.GetContext(), clusterID).DeleteDeleted(nil, clusterID)\n\t}\n\n\tlatency := stopwatch.Measure(f)\n\n\tlogFields := logrus.Fields{\n\t\t\"Method\": \"Application.PruneTSEventOnce\",\n\t\t\"NanoSeconds\": latency,\n\t\t\"MicroSeconds\": latency \/ 1000,\n\t\t\"MilliSeconds\": latency \/ 1000 \/ 1000,\n\t}\n\tif err != nil {\n\t\tapp.ErrLogger.WithFields(logFields).Error(err)\n\t} else {\n\t\tapp.OutLogger.WithFields(logFields).Info(\"Latency measurement\")\n\t}\n\n\treturn err\n}\n\n\/\/ PruneTSLogOnce deletes old ts_logs data.\nfunc (app *Application) PruneTSLogOnce(clusterID int64) (err error) {\n\tf := func() {\n\t\terr = pg.NewTSLog(app.GetContext(), clusterID).DeleteDeleted(nil, clusterID)\n\t}\n\n\tlatency := stopwatch.Measure(f)\n\n\tlogFields := logrus.Fields{\n\t\t\"Method\": \"Application.PruneTSLogOnce\",\n\t\t\"NanoSeconds\": latency,\n\t\t\"MicroSeconds\": latency \/ 1000,\n\t\t\"MilliSeconds\": latency \/ 1000 \/ 1000,\n\t}\n\tif err != nil {\n\t\tapp.ErrLogger.WithFields(logFields).Error(err)\n\t} else {\n\t\tapp.OutLogger.WithFields(logFields).Info(\"Latency measurement\")\n\t}\n\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage api\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/codegangsta\/negroni\"\n\t\"github.com\/tsuru\/config\"\n\t\"github.com\/tsuru\/tsuru\/app\"\n\t\"github.com\/tsuru\/tsuru\/auth\"\n\t_ \"github.com\/tsuru\/tsuru\/auth\/native\"\n\t_ \"github.com\/tsuru\/tsuru\/auth\/oauth\"\n\t\"github.com\/tsuru\/tsuru\/db\"\n\t\"github.com\/tsuru\/tsuru\/hc\"\n\t\"github.com\/tsuru\/tsuru\/log\"\n\t\"github.com\/tsuru\/tsuru\/provision\"\n\t\"github.com\/tsuru\/tsuru\/repository\"\n\t\"github.com\/tsuru\/tsuru\/router\"\n)\n\nconst Version = \"0.10.2\"\n\nfunc getProvisioner() (string, error) {\n\tprovisioner, err := config.GetString(\"provisioner\")\n\tif provisioner == \"\" {\n\t\tprovisioner = \"docker\"\n\t}\n\treturn provisioner, err\n}\n\ntype TsuruHandler struct {\n\tmethod string\n\tpath string\n\th http.Handler\n}\n\nfunc fatal(err error) {\n\tfmt.Println(err.Error())\n\tlog.Fatal(err.Error())\n}\n\nvar tsuruHandlerList []TsuruHandler\n\n\/\/RegisterHandler inserts a handler on a list of handlers\nfunc RegisterHandler(path string, method string, h http.Handler) {\n\tvar th TsuruHandler\n\tth.path = path\n\tth.method = method\n\tth.h = h\n\ttsuruHandlerList = append(tsuruHandlerList, th)\n}\n\nfunc getAuthScheme() (string, error) {\n\tname, err := config.GetString(\"auth:scheme\")\n\tif name == \"\" {\n\t\tname = \"native\"\n\t}\n\treturn name, err\n}\n\n\/\/ RunServer starts tsuru API server. The dry parameter indicates whether the\n\/\/ server should run in dry mode, not starting the HTTP listener (for testing\n\/\/ purposes).\nfunc RunServer(dry bool) http.Handler {\n\tlog.Init()\n\tconnString, err := config.GetString(\"database:url\")\n\tif err != nil {\n\t\tconnString = db.DefaultDatabaseURL\n\t}\n\tdbName, err := config.GetString(\"database:name\")\n\tif err != nil {\n\t\tdbName = db.DefaultDatabaseName\n\t}\n\tfmt.Printf(\"Using mongodb database %q from the server %q.\\n\", dbName, connString)\n\n\tm := &delayedRouter{}\n\n\tfor _, handler := range tsuruHandlerList {\n\t\tm.Add(handler.method, handler.path, handler.h)\n\t}\n\n\tif disableIndex, _ := config.GetBool(\"disable-index-page\"); !disableIndex {\n\t\tm.Add(\"Get\", \"\/\", Handler(index))\n\t}\n\tm.Add(\"Get\", \"\/info\", Handler(info))\n\n\tm.Add(\"Get\", \"\/services\/instances\", authorizationRequiredHandler(serviceInstances))\n\tm.Add(\"Get\", \"\/services\/instances\/{name}\", authorizationRequiredHandler(serviceInstance))\n\tm.Add(\"Delete\", \"\/services\/instances\/{name}\", authorizationRequiredHandler(removeServiceInstance))\n\tm.Add(\"Post\", \"\/services\/instances\", authorizationRequiredHandler(createServiceInstance))\n\tm.Add(\"Put\", \"\/services\/instances\/{instance}\/{app}\", authorizationRequiredHandler(bindServiceInstance))\n\tm.Add(\"Delete\", \"\/services\/instances\/{instance}\/{app}\", authorizationRequiredHandler(unbindServiceInstance))\n\tm.Add(\"Get\", \"\/services\/instances\/{instance}\/status\", authorizationRequiredHandler(serviceInstanceStatus))\n\n\tm.AddAll(\"\/services\/proxy\/{instance}\", authorizationRequiredHandler(serviceProxy))\n\n\tm.Add(\"Get\", \"\/services\", authorizationRequiredHandler(serviceList))\n\tm.Add(\"Post\", \"\/services\", authorizationRequiredHandler(serviceCreate))\n\tm.Add(\"Put\", \"\/services\", authorizationRequiredHandler(serviceUpdate))\n\tm.Add(\"Delete\", \"\/services\/{name}\", authorizationRequiredHandler(serviceDelete))\n\tm.Add(\"Get\", \"\/services\/{name}\", authorizationRequiredHandler(serviceInfo))\n\tm.Add(\"Get\", \"\/services\/{name}\/plans\", authorizationRequiredHandler(servicePlans))\n\tm.Add(\"Get\", \"\/services\/{name}\/doc\", authorizationRequiredHandler(serviceDoc))\n\tm.Add(\"Put\", \"\/services\/{name}\/doc\", authorizationRequiredHandler(serviceAddDoc))\n\tm.Add(\"Put\", \"\/services\/{service}\/{team}\", authorizationRequiredHandler(grantServiceAccess))\n\tm.Add(\"Delete\", \"\/services\/{service}\/{team}\", authorizationRequiredHandler(revokeServiceAccess))\n\n\tm.Add(\"Delete\", \"\/apps\/{app}\", authorizationRequiredHandler(appDelete))\n\tm.Add(\"Get\", \"\/apps\/{app}\", authorizationRequiredHandler(appInfo))\n\tm.Add(\"Post\", \"\/apps\/{app}\/cname\", authorizationRequiredHandler(setCName))\n\tm.Add(\"Delete\", \"\/apps\/{app}\/cname\", authorizationRequiredHandler(unsetCName))\n\trunHandler := authorizationRequiredHandler(runCommand)\n\tm.Add(\"Post\", \"\/apps\/{app}\/run\", runHandler)\n\tm.Add(\"Post\", \"\/apps\/{app}\/restart\", authorizationRequiredHandler(restart))\n\tm.Add(\"Post\", \"\/apps\/{app}\/start\", authorizationRequiredHandler(start))\n\tm.Add(\"Post\", \"\/apps\/{app}\/stop\", authorizationRequiredHandler(stop))\n\tm.Add(\"Get\", \"\/apps\/{appname}\/quota\", AdminRequiredHandler(getAppQuota))\n\tm.Add(\"Post\", \"\/apps\/{appname}\/quota\", AdminRequiredHandler(changeAppQuota))\n\tm.Add(\"Get\", \"\/apps\/{app}\/env\", authorizationRequiredHandler(getEnv))\n\tm.Add(\"Post\", \"\/apps\/{app}\/env\", authorizationRequiredHandler(setEnv))\n\tm.Add(\"Delete\", \"\/apps\/{app}\/env\", authorizationRequiredHandler(unsetEnv))\n\tm.Add(\"Get\", \"\/apps\", authorizationRequiredHandler(appList))\n\tm.Add(\"Post\", \"\/apps\", authorizationRequiredHandler(createApp))\n\tm.Add(\"Post\", \"\/apps\/{app}\/team-owner\", authorizationRequiredHandler(setTeamOwner))\n\tforceDeleteLockHandler := AdminRequiredHandler(forceDeleteLock)\n\tm.Add(\"Delete\", \"\/apps\/{app}\/lock\", forceDeleteLockHandler)\n\tm.Add(\"Put\", \"\/apps\/{app}\/units\", authorizationRequiredHandler(addUnits))\n\tm.Add(\"Delete\", \"\/apps\/{app}\/units\", authorizationRequiredHandler(removeUnits))\n\tregisterUnitHandler := authorizationRequiredHandler(registerUnit)\n\tm.Add(\"Post\", \"\/apps\/{app}\/units\/register\", registerUnitHandler)\n\tsetUnitStatusHandler := authorizationRequiredHandler(setUnitStatus)\n\tm.Add(\"Post\", \"\/apps\/{app}\/units\/{unit}\", setUnitStatusHandler)\n\tm.Add(\"Put\", \"\/apps\/{app}\/teams\/{team}\", authorizationRequiredHandler(grantAppAccess))\n\tm.Add(\"Delete\", \"\/apps\/{app}\/teams\/{team}\", authorizationRequiredHandler(revokeAppAccess))\n\tm.Add(\"Get\", \"\/apps\/{app}\/log\", authorizationRequiredHandler(appLog))\n\tlogPostHandler := authorizationRequiredHandler(addLog)\n\tm.Add(\"Post\", \"\/apps\/{app}\/log\", logPostHandler)\n\tsaveCustomDataHandler := authorizationRequiredHandler(saveAppCustomData)\n\tm.Add(\"Post\", \"\/apps\/{app}\/customdata\", saveCustomDataHandler)\n\tm.Add(\"Post\", \"\/apps\/{appname}\/deploy\/rollback\", authorizationRequiredHandler(deployRollback))\n\tm.Add(\"Get\", \"\/apps\/{app}\/shell\", authorizationRequiredHandler(remoteShellHandler))\n\n\tm.Add(\"Get\", \"\/autoscale\", authorizationRequiredHandler(autoScaleHistoryHandler))\n\tm.Add(\"Put\", \"\/autoscale\/{app}\", authorizationRequiredHandler(autoScaleConfig))\n\tm.Add(\"Put\", \"\/autoscale\/{app}\/enable\", authorizationRequiredHandler(autoScaleEnable))\n\tm.Add(\"Put\", \"\/autoscale\/{app}\/disable\", authorizationRequiredHandler(autoScaleDisable))\n\n\tm.Add(\"Get\", \"\/deploys\", authorizationRequiredHandler(deploysList))\n\tm.Add(\"Get\", \"\/deploys\/{deploy}\", authorizationRequiredHandler(deployInfo))\n\n\tm.Add(\"Get\", \"\/platforms\", authorizationRequiredHandler(platformList))\n\tm.Add(\"Post\", \"\/platforms\", AdminRequiredHandler(platformAdd))\n\tm.Add(\"Put\", \"\/platforms\/{name}\", AdminRequiredHandler(platformUpdate))\n\tm.Add(\"Delete\", \"\/platforms\/{name}\", AdminRequiredHandler(platformRemove))\n\n\t\/\/ These handlers don't use :app on purpose. Using :app means that only\n\t\/\/ the token generate for the given app is valid, but these handlers\n\t\/\/ use a token generated for Gandalf.\n\tm.Add(\"Get\", \"\/apps\/{appname}\/available\", authorizationRequiredHandler(appIsAvailable))\n\tm.Add(\"Post\", \"\/apps\/{appname}\/repository\/clone\", authorizationRequiredHandler(deploy))\n\tm.Add(\"Post\", \"\/apps\/{appname}\/deploy\", authorizationRequiredHandler(deploy))\n\n\tm.Add(\"Get\", \"\/users\", AdminRequiredHandler(listUsers))\n\tm.Add(\"Post\", \"\/users\", Handler(createUser))\n\tm.Add(\"Get\", \"\/auth\/scheme\", Handler(authScheme))\n\tm.Add(\"Post\", \"\/auth\/login\", Handler(login))\n\tm.Add(\"Post\", \"\/users\/{email}\/password\", Handler(resetPassword))\n\tm.Add(\"Post\", \"\/users\/{email}\/tokens\", Handler(login))\n\tm.Add(\"Get\", \"\/users\/{email}\/quota\", AdminRequiredHandler(getUserQuota))\n\tm.Add(\"Post\", \"\/users\/{email}\/quota\", AdminRequiredHandler(changeUserQuota))\n\tm.Add(\"Delete\", \"\/users\/tokens\", authorizationRequiredHandler(logout))\n\tm.Add(\"Put\", \"\/users\/password\", authorizationRequiredHandler(changePassword))\n\tm.Add(\"Delete\", \"\/users\", authorizationRequiredHandler(removeUser))\n\tm.Add(\"Get\", \"\/users\/keys\", authorizationRequiredHandler(listKeys))\n\tm.Add(\"Post\", \"\/users\/keys\", authorizationRequiredHandler(addKeyToUser))\n\tm.Add(\"Delete\", \"\/users\/keys\", authorizationRequiredHandler(removeKeyFromUser))\n\tm.Add(\"Get\", \"\/users\/api-key\", authorizationRequiredHandler(showAPIToken))\n\tm.Add(\"Post\", \"\/users\/api-key\", authorizationRequiredHandler(regenerateAPIToken))\n\n\tm.Add(\"Delete\", \"\/logs\", AdminRequiredHandler(logRemove))\n\n\tm.Add(\"Get\", \"\/teams\", authorizationRequiredHandler(teamList))\n\tm.Add(\"Post\", \"\/teams\", authorizationRequiredHandler(createTeam))\n\tm.Add(\"Get\", \"\/teams\/{name}\", authorizationRequiredHandler(getTeam))\n\tm.Add(\"Delete\", \"\/teams\/{name}\", authorizationRequiredHandler(removeTeam))\n\tm.Add(\"Put\", \"\/teams\/{team}\/{user}\", authorizationRequiredHandler(addUserToTeam))\n\tm.Add(\"Delete\", \"\/teams\/{team}\/{user}\", authorizationRequiredHandler(removeUserFromTeam))\n\n\tm.Add(\"Put\", \"\/swap\", authorizationRequiredHandler(swap))\n\n\tm.Add(\"Get\", \"\/healthcheck\/\", http.HandlerFunc(healthcheck))\n\n\tm.Add(\"Get\", \"\/iaas\/machines\", AdminRequiredHandler(machinesList))\n\tm.Add(\"Delete\", \"\/iaas\/machines\/{machine_id}\", AdminRequiredHandler(machineDestroy))\n\tm.Add(\"Get\", \"\/iaas\/templates\", AdminRequiredHandler(templatesList))\n\tm.Add(\"Post\", \"\/iaas\/templates\", AdminRequiredHandler(templateCreate))\n\tm.Add(\"Delete\", \"\/iaas\/templates\/{template_name}\", AdminRequiredHandler(templateDestroy))\n\n\tm.Add(\"Get\", \"\/plans\", authorizationRequiredHandler(listPlans))\n\tm.Add(\"Post\", \"\/plans\", AdminRequiredHandler(addPlan))\n\tm.Add(\"Delete\", \"\/plans\/{planname}\", AdminRequiredHandler(removePlan))\n\n\tm.Add(\"Get\", \"\/debug\/goroutines\", AdminRequiredHandler(dumpGoroutines))\n\n\tn := negroni.New()\n\tn.Use(negroni.NewRecovery())\n\tn.Use(newLoggerMiddleware())\n\tn.UseHandler(m)\n\tn.Use(negroni.HandlerFunc(contextClearerMiddleware))\n\tn.Use(negroni.HandlerFunc(flushingWriterMiddleware))\n\tn.Use(negroni.HandlerFunc(errorHandlingMiddleware))\n\tn.Use(negroni.HandlerFunc(setVersionHeadersMiddleware))\n\tn.Use(negroni.HandlerFunc(authTokenMiddleware))\n\tn.Use(&appLockMiddleware{excludedHandlers: []http.Handler{\n\t\tlogPostHandler,\n\t\trunHandler,\n\t\tforceDeleteLockHandler,\n\t\tregisterUnitHandler,\n\t\tsaveCustomDataHandler,\n\t\tsetUnitStatusHandler,\n\t}})\n\tn.UseHandler(http.HandlerFunc(runDelayedHandler))\n\n\tif !dry {\n\t\trouterType, err := config.GetString(\"docker:router\")\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\trouterObj, err := router.Get(routerType)\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tif messageRouter, ok := routerObj.(router.MessageRouter); ok {\n\t\t\tstartupMessage, err := messageRouter.StartupMessage()\n\t\t\tif err == nil && startupMessage != \"\" {\n\t\t\t\tfmt.Print(startupMessage)\n\t\t\t}\n\t\t}\n\t\trepoManager, err := config.GetString(\"repo-manager\")\n\t\tif err != nil {\n\t\t\trepoManager = \"gandalf\"\n\t\t\tfmt.Println(\"Warning: configuration didn't declare a repository manager, using default manager.\")\n\t\t}\n\t\tfmt.Printf(\"Using %q repository manager.\\n\", repoManager)\n\t\tif initializer, ok := repository.Manager().(repository.Initializer); ok {\n\t\t\tfmt.Printf(\"Initializing %q repository manager.\\n\", repoManager)\n\t\t\terr = initializer.Initialize()\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t}\n\t\tprovisioner, err := getProvisioner()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Warning: configuration didn't declare a provisioner, using default provisioner.\")\n\t\t}\n\t\tapp.Provisioner, err = provision.Get(provisioner)\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tfmt.Printf(\"Using %q provisioner.\\n\", provisioner)\n\t\tif initializableProvisioner, ok := app.Provisioner.(provision.InitializableProvisioner); ok {\n\t\t\terr = initializableProvisioner.Initialize()\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t}\n\t\tif messageProvisioner, ok := app.Provisioner.(provision.MessageProvisioner); ok {\n\t\t\tstartupMessage, err := messageProvisioner.StartupMessage()\n\t\t\tif err == nil && startupMessage != \"\" {\n\t\t\t\tfmt.Print(startupMessage)\n\t\t\t}\n\t\t}\n\t\tscheme, err := getAuthScheme()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Warning: configuration didn't declare auth:scheme, using default scheme.\\n\")\n\t\t}\n\t\tapp.AuthScheme, err = auth.GetScheme(scheme)\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tfmt.Printf(\"Using %q auth scheme.\\n\", scheme)\n\t\tfmt.Println(\"Checking components status:\")\n\t\tresults := hc.Check()\n\t\tvar hcFail bool\n\t\tfor _, result := range results {\n\t\t\tif result.Status != hc.HealthCheckOK {\n\t\t\t\thcFail = true\n\t\t\t\tfmt.Printf(\"ERROR: %q is not working: %s\\n\", result.Name, result.Status)\n\t\t\t}\n\t\t}\n\t\tif hcFail {\n\t\t\tos.Exit(2)\n\t\t}\n\t\tlisten, err := config.GetString(\"listen\")\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tapp.StartAutoScale()\n\t\ttls, _ := config.GetBool(\"use-tls\")\n\t\tif tls {\n\t\t\tcertFile, err := config.GetString(\"tls:cert-file\")\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t\tkeyFile, err := config.GetString(\"tls:key-file\")\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t\tfmt.Printf(\"tsuru HTTP\/TLS server listening at %s...\\n\", listen)\n\t\t\tfatal(http.ListenAndServeTLS(listen, certFile, keyFile, n))\n\t\t} else {\n\t\t\tlistener, err := net.Listen(\"tcp\", listen)\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t\tfmt.Printf(\"tsuru HTTP server listening at %s...\\n\", listen)\n\t\t\thttp.Handle(\"\/\", n)\n\t\t\tfatal(http.Serve(listener, nil))\n\t\t}\n\t}\n\treturn n\n}\n<commit_msg>api\/server: threat healthcheck failures as warnings instead of errors<commit_after>\/\/ Copyright 2015 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage api\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\n\t\"github.com\/codegangsta\/negroni\"\n\t\"github.com\/tsuru\/config\"\n\t\"github.com\/tsuru\/tsuru\/app\"\n\t\"github.com\/tsuru\/tsuru\/auth\"\n\t_ \"github.com\/tsuru\/tsuru\/auth\/native\"\n\t_ \"github.com\/tsuru\/tsuru\/auth\/oauth\"\n\t\"github.com\/tsuru\/tsuru\/db\"\n\t\"github.com\/tsuru\/tsuru\/hc\"\n\t\"github.com\/tsuru\/tsuru\/log\"\n\t\"github.com\/tsuru\/tsuru\/provision\"\n\t\"github.com\/tsuru\/tsuru\/repository\"\n\t\"github.com\/tsuru\/tsuru\/router\"\n)\n\nconst Version = \"0.10.2\"\n\nfunc getProvisioner() (string, error) {\n\tprovisioner, err := config.GetString(\"provisioner\")\n\tif provisioner == \"\" {\n\t\tprovisioner = \"docker\"\n\t}\n\treturn provisioner, err\n}\n\ntype TsuruHandler struct {\n\tmethod string\n\tpath string\n\th http.Handler\n}\n\nfunc fatal(err error) {\n\tfmt.Println(err.Error())\n\tlog.Fatal(err.Error())\n}\n\nvar tsuruHandlerList []TsuruHandler\n\n\/\/RegisterHandler inserts a handler on a list of handlers\nfunc RegisterHandler(path string, method string, h http.Handler) {\n\tvar th TsuruHandler\n\tth.path = path\n\tth.method = method\n\tth.h = h\n\ttsuruHandlerList = append(tsuruHandlerList, th)\n}\n\nfunc getAuthScheme() (string, error) {\n\tname, err := config.GetString(\"auth:scheme\")\n\tif name == \"\" {\n\t\tname = \"native\"\n\t}\n\treturn name, err\n}\n\n\/\/ RunServer starts tsuru API server. The dry parameter indicates whether the\n\/\/ server should run in dry mode, not starting the HTTP listener (for testing\n\/\/ purposes).\nfunc RunServer(dry bool) http.Handler {\n\tlog.Init()\n\tconnString, err := config.GetString(\"database:url\")\n\tif err != nil {\n\t\tconnString = db.DefaultDatabaseURL\n\t}\n\tdbName, err := config.GetString(\"database:name\")\n\tif err != nil {\n\t\tdbName = db.DefaultDatabaseName\n\t}\n\tfmt.Printf(\"Using mongodb database %q from the server %q.\\n\", dbName, connString)\n\n\tm := &delayedRouter{}\n\n\tfor _, handler := range tsuruHandlerList {\n\t\tm.Add(handler.method, handler.path, handler.h)\n\t}\n\n\tif disableIndex, _ := config.GetBool(\"disable-index-page\"); !disableIndex {\n\t\tm.Add(\"Get\", \"\/\", Handler(index))\n\t}\n\tm.Add(\"Get\", \"\/info\", Handler(info))\n\n\tm.Add(\"Get\", \"\/services\/instances\", authorizationRequiredHandler(serviceInstances))\n\tm.Add(\"Get\", \"\/services\/instances\/{name}\", authorizationRequiredHandler(serviceInstance))\n\tm.Add(\"Delete\", \"\/services\/instances\/{name}\", authorizationRequiredHandler(removeServiceInstance))\n\tm.Add(\"Post\", \"\/services\/instances\", authorizationRequiredHandler(createServiceInstance))\n\tm.Add(\"Put\", \"\/services\/instances\/{instance}\/{app}\", authorizationRequiredHandler(bindServiceInstance))\n\tm.Add(\"Delete\", \"\/services\/instances\/{instance}\/{app}\", authorizationRequiredHandler(unbindServiceInstance))\n\tm.Add(\"Get\", \"\/services\/instances\/{instance}\/status\", authorizationRequiredHandler(serviceInstanceStatus))\n\n\tm.AddAll(\"\/services\/proxy\/{instance}\", authorizationRequiredHandler(serviceProxy))\n\n\tm.Add(\"Get\", \"\/services\", authorizationRequiredHandler(serviceList))\n\tm.Add(\"Post\", \"\/services\", authorizationRequiredHandler(serviceCreate))\n\tm.Add(\"Put\", \"\/services\", authorizationRequiredHandler(serviceUpdate))\n\tm.Add(\"Delete\", \"\/services\/{name}\", authorizationRequiredHandler(serviceDelete))\n\tm.Add(\"Get\", \"\/services\/{name}\", authorizationRequiredHandler(serviceInfo))\n\tm.Add(\"Get\", \"\/services\/{name}\/plans\", authorizationRequiredHandler(servicePlans))\n\tm.Add(\"Get\", \"\/services\/{name}\/doc\", authorizationRequiredHandler(serviceDoc))\n\tm.Add(\"Put\", \"\/services\/{name}\/doc\", authorizationRequiredHandler(serviceAddDoc))\n\tm.Add(\"Put\", \"\/services\/{service}\/{team}\", authorizationRequiredHandler(grantServiceAccess))\n\tm.Add(\"Delete\", \"\/services\/{service}\/{team}\", authorizationRequiredHandler(revokeServiceAccess))\n\n\tm.Add(\"Delete\", \"\/apps\/{app}\", authorizationRequiredHandler(appDelete))\n\tm.Add(\"Get\", \"\/apps\/{app}\", authorizationRequiredHandler(appInfo))\n\tm.Add(\"Post\", \"\/apps\/{app}\/cname\", authorizationRequiredHandler(setCName))\n\tm.Add(\"Delete\", \"\/apps\/{app}\/cname\", authorizationRequiredHandler(unsetCName))\n\trunHandler := authorizationRequiredHandler(runCommand)\n\tm.Add(\"Post\", \"\/apps\/{app}\/run\", runHandler)\n\tm.Add(\"Post\", \"\/apps\/{app}\/restart\", authorizationRequiredHandler(restart))\n\tm.Add(\"Post\", \"\/apps\/{app}\/start\", authorizationRequiredHandler(start))\n\tm.Add(\"Post\", \"\/apps\/{app}\/stop\", authorizationRequiredHandler(stop))\n\tm.Add(\"Get\", \"\/apps\/{appname}\/quota\", AdminRequiredHandler(getAppQuota))\n\tm.Add(\"Post\", \"\/apps\/{appname}\/quota\", AdminRequiredHandler(changeAppQuota))\n\tm.Add(\"Get\", \"\/apps\/{app}\/env\", authorizationRequiredHandler(getEnv))\n\tm.Add(\"Post\", \"\/apps\/{app}\/env\", authorizationRequiredHandler(setEnv))\n\tm.Add(\"Delete\", \"\/apps\/{app}\/env\", authorizationRequiredHandler(unsetEnv))\n\tm.Add(\"Get\", \"\/apps\", authorizationRequiredHandler(appList))\n\tm.Add(\"Post\", \"\/apps\", authorizationRequiredHandler(createApp))\n\tm.Add(\"Post\", \"\/apps\/{app}\/team-owner\", authorizationRequiredHandler(setTeamOwner))\n\tforceDeleteLockHandler := AdminRequiredHandler(forceDeleteLock)\n\tm.Add(\"Delete\", \"\/apps\/{app}\/lock\", forceDeleteLockHandler)\n\tm.Add(\"Put\", \"\/apps\/{app}\/units\", authorizationRequiredHandler(addUnits))\n\tm.Add(\"Delete\", \"\/apps\/{app}\/units\", authorizationRequiredHandler(removeUnits))\n\tregisterUnitHandler := authorizationRequiredHandler(registerUnit)\n\tm.Add(\"Post\", \"\/apps\/{app}\/units\/register\", registerUnitHandler)\n\tsetUnitStatusHandler := authorizationRequiredHandler(setUnitStatus)\n\tm.Add(\"Post\", \"\/apps\/{app}\/units\/{unit}\", setUnitStatusHandler)\n\tm.Add(\"Put\", \"\/apps\/{app}\/teams\/{team}\", authorizationRequiredHandler(grantAppAccess))\n\tm.Add(\"Delete\", \"\/apps\/{app}\/teams\/{team}\", authorizationRequiredHandler(revokeAppAccess))\n\tm.Add(\"Get\", \"\/apps\/{app}\/log\", authorizationRequiredHandler(appLog))\n\tlogPostHandler := authorizationRequiredHandler(addLog)\n\tm.Add(\"Post\", \"\/apps\/{app}\/log\", logPostHandler)\n\tsaveCustomDataHandler := authorizationRequiredHandler(saveAppCustomData)\n\tm.Add(\"Post\", \"\/apps\/{app}\/customdata\", saveCustomDataHandler)\n\tm.Add(\"Post\", \"\/apps\/{appname}\/deploy\/rollback\", authorizationRequiredHandler(deployRollback))\n\tm.Add(\"Get\", \"\/apps\/{app}\/shell\", authorizationRequiredHandler(remoteShellHandler))\n\n\tm.Add(\"Get\", \"\/autoscale\", authorizationRequiredHandler(autoScaleHistoryHandler))\n\tm.Add(\"Put\", \"\/autoscale\/{app}\", authorizationRequiredHandler(autoScaleConfig))\n\tm.Add(\"Put\", \"\/autoscale\/{app}\/enable\", authorizationRequiredHandler(autoScaleEnable))\n\tm.Add(\"Put\", \"\/autoscale\/{app}\/disable\", authorizationRequiredHandler(autoScaleDisable))\n\n\tm.Add(\"Get\", \"\/deploys\", authorizationRequiredHandler(deploysList))\n\tm.Add(\"Get\", \"\/deploys\/{deploy}\", authorizationRequiredHandler(deployInfo))\n\n\tm.Add(\"Get\", \"\/platforms\", authorizationRequiredHandler(platformList))\n\tm.Add(\"Post\", \"\/platforms\", AdminRequiredHandler(platformAdd))\n\tm.Add(\"Put\", \"\/platforms\/{name}\", AdminRequiredHandler(platformUpdate))\n\tm.Add(\"Delete\", \"\/platforms\/{name}\", AdminRequiredHandler(platformRemove))\n\n\t\/\/ These handlers don't use :app on purpose. Using :app means that only\n\t\/\/ the token generate for the given app is valid, but these handlers\n\t\/\/ use a token generated for Gandalf.\n\tm.Add(\"Get\", \"\/apps\/{appname}\/available\", authorizationRequiredHandler(appIsAvailable))\n\tm.Add(\"Post\", \"\/apps\/{appname}\/repository\/clone\", authorizationRequiredHandler(deploy))\n\tm.Add(\"Post\", \"\/apps\/{appname}\/deploy\", authorizationRequiredHandler(deploy))\n\n\tm.Add(\"Get\", \"\/users\", AdminRequiredHandler(listUsers))\n\tm.Add(\"Post\", \"\/users\", Handler(createUser))\n\tm.Add(\"Get\", \"\/auth\/scheme\", Handler(authScheme))\n\tm.Add(\"Post\", \"\/auth\/login\", Handler(login))\n\tm.Add(\"Post\", \"\/users\/{email}\/password\", Handler(resetPassword))\n\tm.Add(\"Post\", \"\/users\/{email}\/tokens\", Handler(login))\n\tm.Add(\"Get\", \"\/users\/{email}\/quota\", AdminRequiredHandler(getUserQuota))\n\tm.Add(\"Post\", \"\/users\/{email}\/quota\", AdminRequiredHandler(changeUserQuota))\n\tm.Add(\"Delete\", \"\/users\/tokens\", authorizationRequiredHandler(logout))\n\tm.Add(\"Put\", \"\/users\/password\", authorizationRequiredHandler(changePassword))\n\tm.Add(\"Delete\", \"\/users\", authorizationRequiredHandler(removeUser))\n\tm.Add(\"Get\", \"\/users\/keys\", authorizationRequiredHandler(listKeys))\n\tm.Add(\"Post\", \"\/users\/keys\", authorizationRequiredHandler(addKeyToUser))\n\tm.Add(\"Delete\", \"\/users\/keys\", authorizationRequiredHandler(removeKeyFromUser))\n\tm.Add(\"Get\", \"\/users\/api-key\", authorizationRequiredHandler(showAPIToken))\n\tm.Add(\"Post\", \"\/users\/api-key\", authorizationRequiredHandler(regenerateAPIToken))\n\n\tm.Add(\"Delete\", \"\/logs\", AdminRequiredHandler(logRemove))\n\n\tm.Add(\"Get\", \"\/teams\", authorizationRequiredHandler(teamList))\n\tm.Add(\"Post\", \"\/teams\", authorizationRequiredHandler(createTeam))\n\tm.Add(\"Get\", \"\/teams\/{name}\", authorizationRequiredHandler(getTeam))\n\tm.Add(\"Delete\", \"\/teams\/{name}\", authorizationRequiredHandler(removeTeam))\n\tm.Add(\"Put\", \"\/teams\/{team}\/{user}\", authorizationRequiredHandler(addUserToTeam))\n\tm.Add(\"Delete\", \"\/teams\/{team}\/{user}\", authorizationRequiredHandler(removeUserFromTeam))\n\n\tm.Add(\"Put\", \"\/swap\", authorizationRequiredHandler(swap))\n\n\tm.Add(\"Get\", \"\/healthcheck\/\", http.HandlerFunc(healthcheck))\n\n\tm.Add(\"Get\", \"\/iaas\/machines\", AdminRequiredHandler(machinesList))\n\tm.Add(\"Delete\", \"\/iaas\/machines\/{machine_id}\", AdminRequiredHandler(machineDestroy))\n\tm.Add(\"Get\", \"\/iaas\/templates\", AdminRequiredHandler(templatesList))\n\tm.Add(\"Post\", \"\/iaas\/templates\", AdminRequiredHandler(templateCreate))\n\tm.Add(\"Delete\", \"\/iaas\/templates\/{template_name}\", AdminRequiredHandler(templateDestroy))\n\n\tm.Add(\"Get\", \"\/plans\", authorizationRequiredHandler(listPlans))\n\tm.Add(\"Post\", \"\/plans\", AdminRequiredHandler(addPlan))\n\tm.Add(\"Delete\", \"\/plans\/{planname}\", AdminRequiredHandler(removePlan))\n\n\tm.Add(\"Get\", \"\/debug\/goroutines\", AdminRequiredHandler(dumpGoroutines))\n\n\tn := negroni.New()\n\tn.Use(negroni.NewRecovery())\n\tn.Use(newLoggerMiddleware())\n\tn.UseHandler(m)\n\tn.Use(negroni.HandlerFunc(contextClearerMiddleware))\n\tn.Use(negroni.HandlerFunc(flushingWriterMiddleware))\n\tn.Use(negroni.HandlerFunc(errorHandlingMiddleware))\n\tn.Use(negroni.HandlerFunc(setVersionHeadersMiddleware))\n\tn.Use(negroni.HandlerFunc(authTokenMiddleware))\n\tn.Use(&appLockMiddleware{excludedHandlers: []http.Handler{\n\t\tlogPostHandler,\n\t\trunHandler,\n\t\tforceDeleteLockHandler,\n\t\tregisterUnitHandler,\n\t\tsaveCustomDataHandler,\n\t\tsetUnitStatusHandler,\n\t}})\n\tn.UseHandler(http.HandlerFunc(runDelayedHandler))\n\n\tif !dry {\n\t\trouterType, err := config.GetString(\"docker:router\")\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\trouterObj, err := router.Get(routerType)\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tif messageRouter, ok := routerObj.(router.MessageRouter); ok {\n\t\t\tstartupMessage, err := messageRouter.StartupMessage()\n\t\t\tif err == nil && startupMessage != \"\" {\n\t\t\t\tfmt.Print(startupMessage)\n\t\t\t}\n\t\t}\n\t\trepoManager, err := config.GetString(\"repo-manager\")\n\t\tif err != nil {\n\t\t\trepoManager = \"gandalf\"\n\t\t\tfmt.Println(\"Warning: configuration didn't declare a repository manager, using default manager.\")\n\t\t}\n\t\tfmt.Printf(\"Using %q repository manager.\\n\", repoManager)\n\t\tif initializer, ok := repository.Manager().(repository.Initializer); ok {\n\t\t\tfmt.Printf(\"Initializing %q repository manager.\\n\", repoManager)\n\t\t\terr = initializer.Initialize()\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t}\n\t\tprovisioner, err := getProvisioner()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Warning: configuration didn't declare a provisioner, using default provisioner.\")\n\t\t}\n\t\tapp.Provisioner, err = provision.Get(provisioner)\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tfmt.Printf(\"Using %q provisioner.\\n\", provisioner)\n\t\tif initializableProvisioner, ok := app.Provisioner.(provision.InitializableProvisioner); ok {\n\t\t\terr = initializableProvisioner.Initialize()\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t}\n\t\tif messageProvisioner, ok := app.Provisioner.(provision.MessageProvisioner); ok {\n\t\t\tstartupMessage, err := messageProvisioner.StartupMessage()\n\t\t\tif err == nil && startupMessage != \"\" {\n\t\t\t\tfmt.Print(startupMessage)\n\t\t\t}\n\t\t}\n\t\tscheme, err := getAuthScheme()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Warning: configuration didn't declare auth:scheme, using default scheme.\\n\")\n\t\t}\n\t\tapp.AuthScheme, err = auth.GetScheme(scheme)\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tfmt.Printf(\"Using %q auth scheme.\\n\", scheme)\n\t\tfmt.Println(\"Checking components status:\")\n\t\tresults := hc.Check()\n\t\tfor _, result := range results {\n\t\t\tif result.Status != hc.HealthCheckOK {\n\t\t\t\tfmt.Printf(\"WARNING: %q is not working: %s\\n\", result.Name, result.Status)\n\t\t\t}\n\t\t}\n\t\tlisten, err := config.GetString(\"listen\")\n\t\tif err != nil {\n\t\t\tfatal(err)\n\t\t}\n\t\tapp.StartAutoScale()\n\t\ttls, _ := config.GetBool(\"use-tls\")\n\t\tif tls {\n\t\t\tcertFile, err := config.GetString(\"tls:cert-file\")\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t\tkeyFile, err := config.GetString(\"tls:key-file\")\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t\tfmt.Printf(\"tsuru HTTP\/TLS server listening at %s...\\n\", listen)\n\t\t\tfatal(http.ListenAndServeTLS(listen, certFile, keyFile, n))\n\t\t} else {\n\t\t\tlistener, err := net.Listen(\"tcp\", listen)\n\t\t\tif err != nil {\n\t\t\t\tfatal(err)\n\t\t\t}\n\t\t\tfmt.Printf(\"tsuru HTTP server listening at %s...\\n\", listen)\n\t\t\thttp.Handle(\"\/\", n)\n\t\t\tfatal(http.Serve(listener, nil))\n\t\t}\n\t}\n\treturn n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/rand\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"math\/big\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\tstartingCards = flag.Int(\"starting_cards\", 5, \"Number of cards each player should start with.\")\n\tnumGames = flag.Int(\"num_games\", 100, \"Number of games that will be played.\")\n\tnumTests = flag.Int(\"num_tests\", 100, \"Number of tests to be performed.\")\n\tais = flag.String(\"ais\", \"avoidJChooseSuit,avoidJChooseSuit\", \"AI algorithms to be used by each player separated by comma. The first player is the main one.\")\n\trandomStart = flag.Bool(\"random_start\", true, \"Defines who starts randomly. If false, the first player always starts.\")\n\tdecks = flag.Int(\"decks\", 1, \"Number of card decks to be used.\")\n\tdebug = flag.Bool(\"debug\", false, \"Print debug information\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tvar results []int\n\ttotal := 0\n\tfor i := 0; i < *numTests; i++ {\n\t\tres := runGame()\n\t\tresults = append(results, res)\n\t\ttotal += res\n\t}\n\tavg := float64(total) \/ float64(*numTests)\n\tvariance := 0.0\n\tfor _, res := range results {\n\t\tterm := float64(res) - avg\n\t\tvariance += term * term\n\t}\n\tvariance \/= float64(*numTests)\n\tfmt.Printf(\"%v+-%v\\n\", avg, math.Sqrt(variance))\n}\n\nfunc runGame() int {\n\tw := 0\n\tfor i := 0; i < *numGames; i++ {\n\t\tg := newGame()\n\t\tif g.play() == 0 {\n\t\t\tw++\n\t\t}\n\t}\n\treturn w\n}\n\ntype game struct {\n\tplayers []*player\n\tplaying int\n\tdeck *deck\n\ttop *card\n\tasked suit\n\torder int\n\tgarbage []*card\n}\n\nfunc newGame() *game {\n\tg := &game{}\n\tif *randomStart {\n\t\tif *debug {\n\t\t\tfmt.Println(\"random_start\")\n\t\t}\n\t\tg.playing = randInt(2)\n\t}\n\tg.deck = newDeck()\n\tfor _, aiName := range strings.Split(*ais, \",\") {\n\t\tg.players = append(g.players, newPlayer(aiName))\n\t}\n\tfor nc := 0; nc < *startingCards; nc++ {\n\t\tfor _, player := range g.players {\n\t\t\tplayer.addCard(g.getCard())\n\t\t}\n\t}\n\tg.top = g.getCard()\n\tg.order = 1\n\treturn g\n}\n\nfunc (g *game) play() int {\n\tfor {\n\t\ttop, asked := g.player().play(g.top, g.asked, g.deck)\n\t\tif *debug {\n\t\t\tfmt.Println(g, top, asked)\n\t\t}\n\t\tif top == nil {\n\t\t\tg.addCard()\n\t\t\tg.next()\n\t\t\tcontinue\n\t\t}\n\t\tisPlayValid(top, asked, g.top, g.asked)\n\t\tif g.end() {\n\t\t\treturn g.playing\n\t\t}\n\t\tg.garbage = append(g.garbage, g.top)\n\t\tg.top, g.asked = top, asked\n\t\tswitch g.top.n {\n\t\tcase 1:\n\t\tcase 12:\n\t\t\tif len(g.players) == 2 {\n\t\t\t\tg.next()\n\t\t\t} else {\n\t\t\t\tg.order *= -1\n\t\t\t}\n\t\tcase 7:\n\t\t\tg.next()\n\t\t\tg.addCard()\n\t\t\tg.addCard()\n\t\t}\n\t\tg.next()\n\t}\n}\n\nfunc (g *game) player() *player {\n\treturn g.players[g.playing]\n}\n\nfunc (g *game) next() {\n\tg.playing = (g.playing + g.order + len(g.players)) % len(g.players)\n}\n\nfunc (g *game) getCard() *card {\n\tc := g.deck.getCard()\n\tif c != nil {\n\t\treturn c\n\t}\n\tif g.garbage == nil {\n\t\tlog.Fatal(\"empty garbage\")\n\t}\n\tg.deck.cs = g.garbage\n\tg.deck.shuffle()\n\tg.garbage = nil\n\treturn g.deck.getCard()\n}\n\nfunc (g *game) addCard() {\n\tg.player().addCard(g.getCard())\n}\n\nfunc (g *game) end() bool {\n\treturn g.player().end()\n}\n\nfunc (g *game) String() string {\n\tvar b bytes.Buffer\n\tb.WriteString(fmt.Sprintf(\"-> %v %v %v %v %v %v \", len(g.deck.cs), len(g.garbage), g.playing, g.top, g.asked, g.order))\n\tfor _, p := range g.players {\n\t\tb.WriteString(p.String())\n\t\tb.WriteString(\" \")\n\t}\n\treturn b.String()\n}\n\ntype deck struct {\n\tcs []*card\n}\n\nfunc newDeck() *deck {\n\td := &deck{}\n\tfor n := 1; n <= 13; n++ {\n\t\tfor s := spades; s <= clubs; s++ {\n\t\t\tfor i := 0; i < *decks; i++ {\n\t\t\t\td.cs = append(d.cs, &card{n, s})\n\t\t\t}\n\t\t}\n\t}\n\td.shuffle()\n\treturn d\n}\n\nfunc (d *deck) shuffle() {\n\tl := len(d.cs)\n\ts := make([]*card, l)\n\tfor i, c := range d.cs {\n\t\tr := randInt(i + 1)\n\t\ts[i] = s[r]\n\t\ts[r] = c\n\t}\n\td.cs = s\n}\n\nfunc (d *deck) getCard() *card {\n\tif len(d.cs) == 0 {\n\t\treturn nil\n\t}\n\tc := d.cs[0]\n\td.cs = d.cs[1:]\n\treturn c\n}\n\ntype card struct {\n\tn int\n\ts suit\n}\n\nfunc (c *card) String() string {\n\treturn strconv.Itoa(c.n) + \"\/\" + strconv.Itoa(int(c.s))\n}\n\ntype suit int\n\nconst (\n\tnoSuit suit = iota\n\tspades\n\thearts\n\tdiamonds\n\tclubs\n)\n\ntype player struct {\n\tcs []*card\n\tai ai\n}\n\nfunc newPlayer(aiName string) *player {\n\timp, ok := aiImplementation[aiName]\n\tif !ok {\n\t\tlog.Fatal(\"Unexisting ai: \", aiName)\n\t}\n\treturn &player{nil, imp}\n}\n\nfunc (p *player) addCard(c *card) {\n\tp.cs = append(p.cs, c)\n}\n\nfunc (p *player) play(top *card, asked suit, d *deck) (*card, suit) {\n\ti, s := p.ai(p.cs, top, asked, d)\n\tif i == -1 {\n\t\treturn nil, noSuit\n\t}\n\tc := p.cs[i]\n\tp.cs = append(p.cs[:i], p.cs[i+1:]...)\n\tif c.n != 11 {\n\t\treturn c, noSuit\n\t}\n\treturn c, s\n}\n\nfunc (p *player) end() bool {\n\treturn len(p.cs) == 0\n}\n\nfunc (p *player) String() string {\n\treturn fmt.Sprintf(\"%v\", p.cs)\n}\n\n\/\/ ai returns the index of the card in cs that should be played, -1 if there's\n\/\/ no card to play, and suit if the card is a 11.\ntype ai func(cs []*card, top *card, asked suit, d *deck) (int, suit)\n\nvar aiImplementation = map[string]ai{\n\t\"avoidJChooseSuit\": avoidJChooseSuitAI,\n\t\"aFirst\": aFirstAI,\n\t\"first\": firstAI,\n\t\"avoidJ\": avoidJAI,\n\t\"chooseSuit\": chooseSuitAI,\n\t\"onlyFirst\": onlyFirstAI,\n\t\"onlyBuy\": onlyBuyAI,\n}\n\nfunc avoidJChooseSuitAI(cs []*card, top *card, asked suit, d *deck) (int, suit) {\n\tis := validIndexes(cs, top, asked)\n\tif len(is) == 0 {\n\t\treturn -1, noSuit\n\t}\n\tif n := findNumber(cs, is, 11, true); n != -1 {\n\t\treturn n, noSuit\n\t}\n\treturn is[0], mostPopularSuit(cs)\n}\n\nfunc aFirstAI(cs []*card, top *card, asked suit, d *deck) (int, suit) {\n\tis := validIndexes(cs, top, asked)\n\tif len(is) == 0 {\n\t\treturn -1, noSuit\n\t}\n\tif n := findNumber(cs, is, 1, false); n != -1 {\n\t\treturn n, noSuit\n\t}\n\tif n := findNumber(cs, is, 11, true); n != -1 {\n\t\treturn n, noSuit\n\t}\n\treturn is[0], mostPopularSuit(cs)\n}\n\nfunc firstAI(cs []*card, top *card, asked suit, d *deck) (int, suit) {\n\tis := validIndexes(cs, top, asked)\n\tif len(is) == 0 {\n\t\treturn -1, noSuit\n\t}\n\treturn is[0], suit(randInt(4) + 1)\n}\n\nfunc avoidJAI(cs []*card, top *card, asked suit, d *deck) (int, suit) {\n\tis := validIndexes(cs, top, asked)\n\tif len(is) == 0 {\n\t\treturn -1, noSuit\n\t}\n\tif n := findNumber(cs, is, 11, true); n != -1 {\n\t\treturn n, noSuit\n\t}\n\treturn is[0], suit(randInt(4) + 1)\n}\n\nfunc chooseSuitAI(cs []*card, top *card, asked suit, d *deck) (int, suit) {\n\tis := validIndexes(cs, top, asked)\n\tif len(is) == 0 {\n\t\treturn -1, noSuit\n\t}\n\tif cs[is[0]].n == 11 {\n\t\treturn is[0], mostPopularSuit(cs)\n\t}\n\treturn is[0], noSuit\n}\n\nfunc onlyFirstAI(cs []*card, top *card, asked suit, d *deck) (int, suit) {\n\tif cs[0].n == 11 {\n\t\treturn 0, spades\n\t}\n\tif asked != noSuit {\n\t\tif cs[0].s == asked {\n\t\t\treturn 0, noSuit\n\t\t}\n\t\treturn -1, noSuit\n\t}\n\tif cs[0].n == top.n || cs[0].s == top.s {\n\t\treturn 0, noSuit\n\t}\n\treturn -1, noSuit\n}\n\nfunc onlyBuyAI(cs []*card, top *card, asked suit, d *deck) (int, suit) {\n\treturn -1, noSuit\n}\n\nfunc validIndexes(cs []*card, top *card, asked suit) []int {\n\tvar is []int\n\tfor i, c := range cs {\n\t\tif c.n == 11 {\n\t\t\tis = append(is, i)\n\t\t\tcontinue\n\t\t}\n\t\tif asked != noSuit {\n\t\t\tif c.s == asked {\n\t\t\t\tis = append(is, i)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif c.s == top.s || c.n == top.n {\n\t\t\tis = append(is, i)\n\t\t}\n\t}\n\treturn is\n}\n\nfunc mostPopularSuit(cs []*card) suit {\n\tsuits := make(map[suit]int)\n\tmps := noSuit\n\tfor _, c := range cs {\n\t\tif c.n == 11 {\n\t\t\tcontinue\n\t\t}\n\t\tsuits[c.s]++\n\t\tif suits[c.s] > suits[mps] {\n\t\t\tmps = c.s\n\t\t}\n\t}\n\tif mps == noSuit {\n\t\treturn suit(randInt(4) + 1)\n\t}\n\treturn mps\n}\n\nfunc findNumber(cs []*card, is []int, number int, inverse bool) int {\n\tfor _, i := range is {\n\t\tif (cs[i].n != number) == inverse {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\nfunc isPlayValid(newc *card, newa suit, oldc *card, olda suit) {\n\tif newc.n == 11 {\n\t\tif newa == noSuit {\n\t\t\tlog.Fatal(\"newc.n = 11, newa = noSuit\")\n\t\t}\n\t\treturn\n\t}\n\tif newa != noSuit {\n\t\tlog.Fatalf(\"newc.n = %v, newa = %v\", newc.n, newa)\n\t}\n\tif olda != noSuit {\n\t\tif newc.s != olda {\n\t\t\tlog.Fatalf(\"newc.s = %v, olda = %v\", newc.s, olda)\n\t\t}\n\t\treturn\n\t}\n\tif newc.n != oldc.n && newc.s != newc.s {\n\t\tlog.Fatalf(\"newc = %v, oldc = %v\", newc, oldc)\n\t}\n}\n\n\/\/ randInt returns a random number from 0 to max - 1.\nfunc randInt(max int) int {\n\tb, err := rand.Int(rand.Reader, big.NewInt(int64(max)))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn int(b.Int64())\n}\n<commit_msg>correctA<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/rand\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"math\/big\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\tstartingCards = flag.Int(\"starting_cards\", 5, \"Number of cards each player should start with.\")\n\tnumGames = flag.Int(\"num_games\", 100, \"Number of games that will be played.\")\n\tnumTests = flag.Int(\"num_tests\", 100, \"Number of tests to be performed.\")\n\tais = flag.String(\"ais\", \"avoidJChooseSuit,avoidJChooseSuit\", \"AI algorithms to be used by each player separated by comma. The first player is the main one.\")\n\trandomStart = flag.Bool(\"random_start\", true, \"Defines who starts randomly. If false, the first player always starts.\")\n\tdecks = flag.Int(\"decks\", 1, \"Number of card decks to be used.\")\n\tdebug = flag.Bool(\"debug\", false, \"Print debug information\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tvar results []int\n\ttotal := 0\n\tfor i := 0; i < *numTests; i++ {\n\t\tres := runGame()\n\t\tresults = append(results, res)\n\t\ttotal += res\n\t}\n\tavg := float64(total) \/ float64(*numTests)\n\tvariance := 0.0\n\tfor _, res := range results {\n\t\tterm := float64(res) - avg\n\t\tvariance += term * term\n\t}\n\tvariance \/= float64(*numTests)\n\tfmt.Printf(\"%v+-%v\\n\", avg, math.Sqrt(variance))\n}\n\nfunc runGame() int {\n\tw := 0\n\tfor i := 0; i < *numGames; i++ {\n\t\tg := newGame()\n\t\tif g.play() == 0 {\n\t\t\tw++\n\t\t}\n\t}\n\treturn w\n}\n\ntype game struct {\n\tplayers []*player\n\tplaying int\n\tdeck *deck\n\ttop *card\n\tasked suit\n\torder int\n\tgarbage []*card\n}\n\nfunc newGame() *game {\n\tg := &game{}\n\tif *randomStart {\n\t\tif *debug {\n\t\t\tfmt.Println(\"random_start\")\n\t\t}\n\t\tg.playing = randInt(2)\n\t}\n\tg.deck = newDeck()\n\tfor _, aiName := range strings.Split(*ais, \",\") {\n\t\tg.players = append(g.players, newPlayer(aiName))\n\t}\n\tfor nc := 0; nc < *startingCards; nc++ {\n\t\tfor _, player := range g.players {\n\t\t\tplayer.addCard(g.getCard())\n\t\t}\n\t}\n\tg.top = g.getCard()\n\tg.order = 1\n\treturn g\n}\n\nfunc (g *game) play() int {\n\tfor {\n\t\ttop, asked := g.player().play(g.top, g.asked, g.deck)\n\t\tif *debug {\n\t\t\tfmt.Println(g, top, asked)\n\t\t}\n\t\tif top == nil {\n\t\t\tg.addCard()\n\t\t\tg.next()\n\t\t\tcontinue\n\t\t}\n\t\tisPlayValid(top, asked, g.top, g.asked)\n\t\tif g.end() {\n\t\t\treturn g.playing\n\t\t}\n\t\tg.garbage = append(g.garbage, g.top)\n\t\tg.top, g.asked = top, asked\n\t\tswitch g.top.n {\n\t\tcase 1:\n\t\t\tg.next()\n\t\tcase 12:\n\t\t\tif len(g.players) == 2 {\n\t\t\t\tg.next()\n\t\t\t} else {\n\t\t\t\tg.order *= -1\n\t\t\t}\n\t\tcase 7:\n\t\t\tg.next()\n\t\t\tg.addCard()\n\t\t\tg.addCard()\n\t\t}\n\t\tg.next()\n\t}\n}\n\nfunc (g *game) player() *player {\n\treturn g.players[g.playing]\n}\n\nfunc (g *game) next() {\n\tg.playing = (g.playing + g.order + len(g.players)) % len(g.players)\n}\n\nfunc (g *game) getCard() *card {\n\tc := g.deck.getCard()\n\tif c != nil {\n\t\treturn c\n\t}\n\tif g.garbage == nil {\n\t\tlog.Fatal(\"empty garbage\")\n\t}\n\tg.deck.cs = g.garbage\n\tg.deck.shuffle()\n\tg.garbage = nil\n\treturn g.deck.getCard()\n}\n\nfunc (g *game) addCard() {\n\tg.player().addCard(g.getCard())\n}\n\nfunc (g *game) end() bool {\n\treturn g.player().end()\n}\n\nfunc (g *game) String() string {\n\tvar b bytes.Buffer\n\tb.WriteString(fmt.Sprintf(\"-> %v %v %v %v %v %v \", len(g.deck.cs), len(g.garbage), g.playing, g.top, g.asked, g.order))\n\tfor _, p := range g.players {\n\t\tb.WriteString(p.String())\n\t\tb.WriteString(\" \")\n\t}\n\treturn b.String()\n}\n\ntype deck struct {\n\tcs []*card\n}\n\nfunc newDeck() *deck {\n\td := &deck{}\n\tfor n := 1; n <= 13; n++ {\n\t\tfor s := spades; s <= clubs; s++ {\n\t\t\tfor i := 0; i < *decks; i++ {\n\t\t\t\td.cs = append(d.cs, &card{n, s})\n\t\t\t}\n\t\t}\n\t}\n\td.shuffle()\n\treturn d\n}\n\nfunc (d *deck) shuffle() {\n\tl := len(d.cs)\n\ts := make([]*card, l)\n\tfor i, c := range d.cs {\n\t\tr := randInt(i + 1)\n\t\ts[i] = s[r]\n\t\ts[r] = c\n\t}\n\td.cs = s\n}\n\nfunc (d *deck) getCard() *card {\n\tif len(d.cs) == 0 {\n\t\treturn nil\n\t}\n\tc := d.cs[0]\n\td.cs = d.cs[1:]\n\treturn c\n}\n\ntype card struct {\n\tn int\n\ts suit\n}\n\nfunc (c *card) String() string {\n\treturn strconv.Itoa(c.n) + \"\/\" + strconv.Itoa(int(c.s))\n}\n\ntype suit int\n\nconst (\n\tnoSuit suit = iota\n\tspades\n\thearts\n\tdiamonds\n\tclubs\n)\n\ntype player struct {\n\tcs []*card\n\tai ai\n}\n\nfunc newPlayer(aiName string) *player {\n\timp, ok := aiImplementation[aiName]\n\tif !ok {\n\t\tlog.Fatal(\"Unexisting ai: \", aiName)\n\t}\n\treturn &player{nil, imp}\n}\n\nfunc (p *player) addCard(c *card) {\n\tp.cs = append(p.cs, c)\n}\n\nfunc (p *player) play(top *card, asked suit, d *deck) (*card, suit) {\n\ti, s := p.ai(p.cs, top, asked, d)\n\tif i == -1 {\n\t\treturn nil, noSuit\n\t}\n\tc := p.cs[i]\n\tp.cs = append(p.cs[:i], p.cs[i+1:]...)\n\tif c.n != 11 {\n\t\treturn c, noSuit\n\t}\n\treturn c, s\n}\n\nfunc (p *player) end() bool {\n\treturn len(p.cs) == 0\n}\n\nfunc (p *player) String() string {\n\treturn fmt.Sprintf(\"%v\", p.cs)\n}\n\n\/\/ ai returns the index of the card in cs that should be played, -1 if there's\n\/\/ no card to play, and suit if the card is a 11.\ntype ai func(cs []*card, top *card, asked suit, d *deck) (int, suit)\n\nvar aiImplementation = map[string]ai{\n\t\"avoidJChooseSuit\": avoidJChooseSuitAI,\n\t\"aFirst\": aFirstAI,\n\t\"first\": firstAI,\n\t\"avoidJ\": avoidJAI,\n\t\"chooseSuit\": chooseSuitAI,\n\t\"onlyFirst\": onlyFirstAI,\n\t\"onlyBuy\": onlyBuyAI,\n}\n\nfunc avoidJChooseSuitAI(cs []*card, top *card, asked suit, d *deck) (int, suit) {\n\tis := validIndexes(cs, top, asked)\n\tif len(is) == 0 {\n\t\treturn -1, noSuit\n\t}\n\tif n := findNumber(cs, is, 11, true); n != -1 {\n\t\treturn n, noSuit\n\t}\n\treturn is[0], mostPopularSuit(cs)\n}\n\nfunc aFirstAI(cs []*card, top *card, asked suit, d *deck) (int, suit) {\n\tis := validIndexes(cs, top, asked)\n\tif len(is) == 0 {\n\t\treturn -1, noSuit\n\t}\n\tif n := findNumber(cs, is, 1, false); n != -1 {\n\t\treturn n, noSuit\n\t}\n\tif n := findNumber(cs, is, 11, true); n != -1 {\n\t\treturn n, noSuit\n\t}\n\treturn is[0], mostPopularSuit(cs)\n}\n\nfunc firstAI(cs []*card, top *card, asked suit, d *deck) (int, suit) {\n\tis := validIndexes(cs, top, asked)\n\tif len(is) == 0 {\n\t\treturn -1, noSuit\n\t}\n\treturn is[0], suit(randInt(4) + 1)\n}\n\nfunc avoidJAI(cs []*card, top *card, asked suit, d *deck) (int, suit) {\n\tis := validIndexes(cs, top, asked)\n\tif len(is) == 0 {\n\t\treturn -1, noSuit\n\t}\n\tif n := findNumber(cs, is, 11, true); n != -1 {\n\t\treturn n, noSuit\n\t}\n\treturn is[0], suit(randInt(4) + 1)\n}\n\nfunc chooseSuitAI(cs []*card, top *card, asked suit, d *deck) (int, suit) {\n\tis := validIndexes(cs, top, asked)\n\tif len(is) == 0 {\n\t\treturn -1, noSuit\n\t}\n\tif cs[is[0]].n == 11 {\n\t\treturn is[0], mostPopularSuit(cs)\n\t}\n\treturn is[0], noSuit\n}\n\nfunc onlyFirstAI(cs []*card, top *card, asked suit, d *deck) (int, suit) {\n\tif cs[0].n == 11 {\n\t\treturn 0, spades\n\t}\n\tif asked != noSuit {\n\t\tif cs[0].s == asked {\n\t\t\treturn 0, noSuit\n\t\t}\n\t\treturn -1, noSuit\n\t}\n\tif cs[0].n == top.n || cs[0].s == top.s {\n\t\treturn 0, noSuit\n\t}\n\treturn -1, noSuit\n}\n\nfunc onlyBuyAI(cs []*card, top *card, asked suit, d *deck) (int, suit) {\n\treturn -1, noSuit\n}\n\nfunc validIndexes(cs []*card, top *card, asked suit) []int {\n\tvar is []int\n\tfor i, c := range cs {\n\t\tif c.n == 11 {\n\t\t\tis = append(is, i)\n\t\t\tcontinue\n\t\t}\n\t\tif asked != noSuit {\n\t\t\tif c.s == asked {\n\t\t\t\tis = append(is, i)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif c.s == top.s || c.n == top.n {\n\t\t\tis = append(is, i)\n\t\t}\n\t}\n\treturn is\n}\n\nfunc mostPopularSuit(cs []*card) suit {\n\tsuits := make(map[suit]int)\n\tmps := noSuit\n\tfor _, c := range cs {\n\t\tif c.n == 11 {\n\t\t\tcontinue\n\t\t}\n\t\tsuits[c.s]++\n\t\tif suits[c.s] > suits[mps] {\n\t\t\tmps = c.s\n\t\t}\n\t}\n\tif mps == noSuit {\n\t\treturn suit(randInt(4) + 1)\n\t}\n\treturn mps\n}\n\nfunc findNumber(cs []*card, is []int, number int, inverse bool) int {\n\tfor _, i := range is {\n\t\tif (cs[i].n != number) == inverse {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\nfunc isPlayValid(newc *card, newa suit, oldc *card, olda suit) {\n\tif newc.n == 11 {\n\t\tif newa == noSuit {\n\t\t\tlog.Fatal(\"newc.n = 11, newa = noSuit\")\n\t\t}\n\t\treturn\n\t}\n\tif newa != noSuit {\n\t\tlog.Fatalf(\"newc.n = %v, newa = %v\", newc.n, newa)\n\t}\n\tif olda != noSuit {\n\t\tif newc.s != olda {\n\t\t\tlog.Fatalf(\"newc.s = %v, olda = %v\", newc.s, olda)\n\t\t}\n\t\treturn\n\t}\n\tif newc.n != oldc.n && newc.s != newc.s {\n\t\tlog.Fatalf(\"newc = %v, oldc = %v\", newc, oldc)\n\t}\n}\n\n\/\/ randInt returns a random number from 0 to max - 1.\nfunc randInt(max int) int {\n\tb, err := rand.Int(rand.Reader, big.NewInt(int64(max)))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn int(b.Int64())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Mender Software AS\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\npackage main\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/mendersoftware\/log\"\n\t\"github.com\/pkg\/errors\"\n)\n\ntype Controller interface {\n\tAuthorize() menderError\n\tBootstrap() menderError\n\tGetCurrentImageID() string\n\tGetUpdatePollInterval() time.Duration\n\tHasUpgrade() (bool, menderError)\n\tCheckUpdate() (*UpdateResponse, menderError)\n\tFetchUpdate(url string) (io.ReadCloser, int64, error)\n\tReportUpdateStatus(update UpdateResponse, status string) menderError\n\tUploadLog(update UpdateResponse, logs []byte) menderError\n\n\tUInstallCommitRebooter\n\tStateRunner\n}\n\nconst (\n\tdefaultManifestFile = \"\/etc\/mender\/build_mender\"\n\tdefaultKeyFile = \"mender-agent.pem\"\n\tdefaultDataStore = \"\/var\/lib\/mender\"\n)\n\ntype MenderState int\n\nconst (\n\t\/\/ initial state\n\tMenderStateInit MenderState = iota\n\t\/\/ client is bootstrapped, i.e. ready to go\n\tMenderStateBootstrapped\n\t\/\/ client has all authorization data available\n\tMenderStateAuthorized\n\t\/\/ wait before authorization attempt\n\tMenderStateAuthorizeWait\n\t\/\/ wait for new update\n\tMenderStateUpdateCheckWait\n\t\/\/ check update\n\tMenderStateUpdateCheck\n\t\/\/ update fetch\n\tMenderStateUpdateFetch\n\t\/\/ update install\n\tMenderStateUpdateInstall\n\t\/\/ varify update\n\tMenderStateUpdateVerify\n\t\/\/ commit needed\n\tMenderStateUpdateCommit\n\t\/\/ status report\n\tMenderStateUpdateStatusReport\n\t\/\/ errro reporting status\n\tMenderStateReportStatusError\n\t\/\/ reboot\n\tMenderStateReboot\n\t\/\/rollback\n\tMenderStateRollback\n\t\/\/ error\n\tMenderStateError\n\t\/\/ update error\n\tMenderStateUpdateError\n\t\/\/ exit state\n\tMenderStateDone\n)\n\ntype mender struct {\n\tUInstallCommitRebooter\n\tupdater Updater\n\tenv BootEnvReadWriter\n\tstate State\n\tconfig menderConfig\n\tmanifestFile string\n\tforceBootstrap bool\n\tauthReq AuthRequester\n\tauthMgr AuthManager\n\tapi *ApiClient\n\tauthToken AuthToken\n}\n\ntype MenderPieces struct {\n\tdevice UInstallCommitRebooter\n\tenv BootEnvReadWriter\n\tstore Store\n\tauthMgr AuthManager\n}\n\nfunc NewMender(config menderConfig, pieces MenderPieces) (*mender, error) {\n\tapi, err := NewApiClient(config.GetHttpConfig())\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error creating HTTP client\")\n\t}\n\n\tm := &mender{\n\t\tUInstallCommitRebooter: pieces.device,\n\t\tupdater: NewUpdateClient(),\n\t\tenv: pieces.env,\n\t\tmanifestFile: defaultManifestFile,\n\t\tstate: initState,\n\t\tconfig: config,\n\t\tauthMgr: pieces.authMgr,\n\t\tauthReq: NewAuthClient(),\n\t\tapi: api,\n\t\tauthToken: noAuthToken,\n\t}\n\treturn m, nil\n}\n\nfunc (m mender) GetCurrentImageID() string {\n\t\/\/ This is where Yocto stores buid information\n\tmanifest, err := os.Open(m.manifestFile)\n\tif err != nil {\n\t\tlog.Error(\"Can not read current image id.\")\n\t\treturn \"\"\n\t}\n\n\timageID := \"\"\n\n\tscanner := bufio.NewScanner(manifest)\n\tfor scanner.Scan() {\n\t\tline := strings.TrimSpace(scanner.Text())\n\t\tlog.Debug(\"Read data from device manifest file: \", line)\n\t\tif strings.HasPrefix(line, \"IMAGE_ID\") {\n\t\t\tlog.Debug(\"Found device id line: \", line)\n\t\t\tlineID := strings.Split(line, \"=\")\n\t\t\tif len(lineID) != 2 {\n\t\t\t\tlog.Errorf(\"Broken device manifest file: (%v)\", lineID)\n\t\t\t\treturn \"\"\n\t\t\t}\n\t\t\tlog.Debug(\"Current image id: \", strings.TrimSpace(lineID[1]))\n\t\t\treturn strings.TrimSpace(lineID[1])\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Error(err)\n\t}\n\treturn imageID\n}\n\nfunc (m *mender) HasUpgrade() (bool, menderError) {\n\tenv, err := m.env.ReadEnv(\"upgrade_available\")\n\tif err != nil {\n\t\treturn false, NewFatalError(err)\n\t}\n\tupgradeAvailable := env[\"upgrade_available\"]\n\n\t\/\/ we are after update\n\tif upgradeAvailable == \"1\" {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}\n\nfunc (m *mender) ForceBootstrap() {\n\tm.forceBootstrap = true\n}\n\nfunc (m *mender) needsBootstrap() bool {\n\tif m.forceBootstrap {\n\t\treturn true\n\t}\n\n\tif !m.authMgr.HasKey() {\n\t\tlog.Debugf(\"needs keys\")\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (m *mender) Bootstrap() menderError {\n\tif !m.needsBootstrap() {\n\t\treturn nil\n\t}\n\n\treturn m.doBootstrap()\n}\n\n\/\/ cache authorization code\nfunc (m *mender) loadAuth() menderError {\n\tif m.authToken != noAuthToken {\n\t\treturn nil\n\t}\n\n\tcode, err := m.authMgr.AuthToken()\n\tif err != nil {\n\t\treturn NewFatalError(errors.Wrap(err, \"failed to cache authorization code\"))\n\t}\n\n\tm.authToken = code\n\treturn nil\n}\n\nfunc (m *mender) Authorize() menderError {\n\tif m.authMgr.IsAuthorized() {\n\t\tlog.Info(\"authorization data present and valid, skipping authorization attempt\")\n\t\treturn m.loadAuth()\n\t}\n\n\tm.authToken = noAuthToken\n\n\trsp, err := m.authReq.Request(m.api, m.config.ServerURL, m.authMgr)\n\tif err != nil {\n\t\tif err == AuthErrorUnauthorized {\n\t\t\t\/\/ make sure to remove auth token once device is rejected\n\t\t\tif remErr := m.authMgr.RemoveAuthToken(); remErr != nil {\n\t\t\t\tlog.Warn(\"can not remove rejected authentication token\")\n\t\t\t}\n\t\t}\n\t\treturn NewTransientError(errors.Wrap(err, \"authorization request failed\"))\n\t}\n\n\terr = m.authMgr.RecvAuthResponse(rsp)\n\tif err != nil {\n\t\treturn NewTransientError(errors.Wrap(err, \"failed to parse authorization response\"))\n\t}\n\n\tlog.Info(\"successfuly received new authorization data\")\n\n\treturn m.loadAuth()\n}\n\nfunc (m *mender) doBootstrap() menderError {\n\tif !m.authMgr.HasKey() || m.forceBootstrap {\n\t\tlog.Infof(\"device keys not present or bootstrap forced, generating\")\n\t\tif err := m.authMgr.GenerateKey(); err != nil {\n\t\t\treturn NewFatalError(err)\n\t\t}\n\n\t}\n\n\tm.forceBootstrap = false\n\n\treturn nil\n}\n\nfunc (m *mender) FetchUpdate(url string) (io.ReadCloser, int64, error) {\n\treturn m.updater.FetchUpdate(m.api.Request(m.authToken), url)\n}\n\n\/\/ Check if new update is available. In case of errors, returns nil and error\n\/\/ that occurred. If no update is available *UpdateResponse is nil, otherwise it\n\/\/ contains update information.\nfunc (m *mender) CheckUpdate() (*UpdateResponse, menderError) {\n\tcurrentImageID := m.GetCurrentImageID()\n\t\/\/TODO: if currentImageID == \"\" {\n\t\/\/ \treturn errors.New(\"\")\n\t\/\/ }\n\n\thaveUpdate, err := m.updater.GetScheduledUpdate(m.api.Request(m.authToken),\n\t\tm.config.ServerURL)\n\tif err != nil {\n\t\tlog.Error(\"Error receiving scheduled update data: \", err)\n\t\treturn nil, NewTransientError(err)\n\t}\n\n\tif haveUpdate == nil {\n\t\tlog.Debug(\"no updates available\")\n\t\treturn nil, nil\n\t}\n\tupdate, ok := haveUpdate.(UpdateResponse)\n\tif !ok {\n\t\treturn nil, NewTransientError(errors.Errorf(\"not an update response?\"))\n\t}\n\n\tlog.Debugf(\"received update response: %v\", update)\n\n\tif update.Image.YoctoID == currentImageID {\n\t\treturn nil, nil\n\t}\n\treturn &update, nil\n}\n\nfunc (m *mender) ReportUpdateStatus(update UpdateResponse, status string) menderError {\n\ts := NewStatusClient()\n\terr := s.Report(m.api.Request(m.authToken), m.config.ServerURL,\n\t\tStatusReport{\n\t\t\tdeploymentID: update.ID,\n\t\t\tStatus: status,\n\t\t})\n\tif err != nil {\n\t\tlog.Error(\"error reporting update status: \", err)\n\t\treturn NewTransientError(err)\n\t}\n\treturn nil\n}\n\nfunc (m *mender) UploadLog(update UpdateResponse, logs []byte) menderError {\n\ts := NewLogUploadClient()\n\terr := s.Upload(m.api.Request(m.authToken), m.config.ServerURL,\n\t\tLogData{\n\t\t\tdeploymentID: update.ID,\n\t\t\tMessages: logs,\n\t\t})\n\tif err != nil {\n\t\tlog.Error(\"error uploading logs: \", err)\n\t\treturn NewTransientError(err)\n\t}\n\treturn nil\n}\n\nfunc (m mender) GetUpdatePollInterval() time.Duration {\n\treturn time.Duration(m.config.PollIntervalSeconds) * time.Second\n}\n\nfunc (m *mender) SetState(s State) {\n\tlog.Infof(\"Mender state: %v -> %v\", m.state.Id(), s.Id())\n\tm.state = s\n}\n\nfunc (m *mender) GetState() State {\n\treturn m.state\n}\n\nfunc (m *mender) RunState(ctx *StateContext) (State, bool) {\n\treturn m.state.Handle(ctx, m)\n}\n<commit_msg>Warn that update will not be performed if update image id same as current<commit_after>\/\/ Copyright 2016 Mender Software AS\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\npackage main\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/mendersoftware\/log\"\n\t\"github.com\/pkg\/errors\"\n)\n\ntype Controller interface {\n\tAuthorize() menderError\n\tBootstrap() menderError\n\tGetCurrentImageID() string\n\tGetUpdatePollInterval() time.Duration\n\tHasUpgrade() (bool, menderError)\n\tCheckUpdate() (*UpdateResponse, menderError)\n\tFetchUpdate(url string) (io.ReadCloser, int64, error)\n\tReportUpdateStatus(update UpdateResponse, status string) menderError\n\tUploadLog(update UpdateResponse, logs []byte) menderError\n\n\tUInstallCommitRebooter\n\tStateRunner\n}\n\nconst (\n\tdefaultManifestFile = \"\/etc\/mender\/build_mender\"\n\tdefaultKeyFile = \"mender-agent.pem\"\n\tdefaultDataStore = \"\/var\/lib\/mender\"\n)\n\ntype MenderState int\n\nconst (\n\t\/\/ initial state\n\tMenderStateInit MenderState = iota\n\t\/\/ client is bootstrapped, i.e. ready to go\n\tMenderStateBootstrapped\n\t\/\/ client has all authorization data available\n\tMenderStateAuthorized\n\t\/\/ wait before authorization attempt\n\tMenderStateAuthorizeWait\n\t\/\/ wait for new update\n\tMenderStateUpdateCheckWait\n\t\/\/ check update\n\tMenderStateUpdateCheck\n\t\/\/ update fetch\n\tMenderStateUpdateFetch\n\t\/\/ update install\n\tMenderStateUpdateInstall\n\t\/\/ varify update\n\tMenderStateUpdateVerify\n\t\/\/ commit needed\n\tMenderStateUpdateCommit\n\t\/\/ status report\n\tMenderStateUpdateStatusReport\n\t\/\/ errro reporting status\n\tMenderStateReportStatusError\n\t\/\/ reboot\n\tMenderStateReboot\n\t\/\/rollback\n\tMenderStateRollback\n\t\/\/ error\n\tMenderStateError\n\t\/\/ update error\n\tMenderStateUpdateError\n\t\/\/ exit state\n\tMenderStateDone\n)\n\ntype mender struct {\n\tUInstallCommitRebooter\n\tupdater Updater\n\tenv BootEnvReadWriter\n\tstate State\n\tconfig menderConfig\n\tmanifestFile string\n\tforceBootstrap bool\n\tauthReq AuthRequester\n\tauthMgr AuthManager\n\tapi *ApiClient\n\tauthToken AuthToken\n}\n\ntype MenderPieces struct {\n\tdevice UInstallCommitRebooter\n\tenv BootEnvReadWriter\n\tstore Store\n\tauthMgr AuthManager\n}\n\nfunc NewMender(config menderConfig, pieces MenderPieces) (*mender, error) {\n\tapi, err := NewApiClient(config.GetHttpConfig())\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error creating HTTP client\")\n\t}\n\n\tm := &mender{\n\t\tUInstallCommitRebooter: pieces.device,\n\t\tupdater: NewUpdateClient(),\n\t\tenv: pieces.env,\n\t\tmanifestFile: defaultManifestFile,\n\t\tstate: initState,\n\t\tconfig: config,\n\t\tauthMgr: pieces.authMgr,\n\t\tauthReq: NewAuthClient(),\n\t\tapi: api,\n\t\tauthToken: noAuthToken,\n\t}\n\treturn m, nil\n}\n\nfunc (m mender) GetCurrentImageID() string {\n\t\/\/ This is where Yocto stores buid information\n\tmanifest, err := os.Open(m.manifestFile)\n\tif err != nil {\n\t\tlog.Error(\"Can not read current image id.\")\n\t\treturn \"\"\n\t}\n\n\timageID := \"\"\n\n\tscanner := bufio.NewScanner(manifest)\n\tfor scanner.Scan() {\n\t\tline := strings.TrimSpace(scanner.Text())\n\t\tlog.Debug(\"Read data from device manifest file: \", line)\n\t\tif strings.HasPrefix(line, \"IMAGE_ID\") {\n\t\t\tlog.Debug(\"Found device id line: \", line)\n\t\t\tlineID := strings.Split(line, \"=\")\n\t\t\tif len(lineID) != 2 {\n\t\t\t\tlog.Errorf(\"Broken device manifest file: (%v)\", lineID)\n\t\t\t\treturn \"\"\n\t\t\t}\n\t\t\tlog.Debug(\"Current image id: \", strings.TrimSpace(lineID[1]))\n\t\t\treturn strings.TrimSpace(lineID[1])\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Error(err)\n\t}\n\treturn imageID\n}\n\nfunc (m *mender) HasUpgrade() (bool, menderError) {\n\tenv, err := m.env.ReadEnv(\"upgrade_available\")\n\tif err != nil {\n\t\treturn false, NewFatalError(err)\n\t}\n\tupgradeAvailable := env[\"upgrade_available\"]\n\n\t\/\/ we are after update\n\tif upgradeAvailable == \"1\" {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}\n\nfunc (m *mender) ForceBootstrap() {\n\tm.forceBootstrap = true\n}\n\nfunc (m *mender) needsBootstrap() bool {\n\tif m.forceBootstrap {\n\t\treturn true\n\t}\n\n\tif !m.authMgr.HasKey() {\n\t\tlog.Debugf(\"needs keys\")\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (m *mender) Bootstrap() menderError {\n\tif !m.needsBootstrap() {\n\t\treturn nil\n\t}\n\n\treturn m.doBootstrap()\n}\n\n\/\/ cache authorization code\nfunc (m *mender) loadAuth() menderError {\n\tif m.authToken != noAuthToken {\n\t\treturn nil\n\t}\n\n\tcode, err := m.authMgr.AuthToken()\n\tif err != nil {\n\t\treturn NewFatalError(errors.Wrap(err, \"failed to cache authorization code\"))\n\t}\n\n\tm.authToken = code\n\treturn nil\n}\n\nfunc (m *mender) Authorize() menderError {\n\tif m.authMgr.IsAuthorized() {\n\t\tlog.Info(\"authorization data present and valid, skipping authorization attempt\")\n\t\treturn m.loadAuth()\n\t}\n\n\tm.authToken = noAuthToken\n\n\trsp, err := m.authReq.Request(m.api, m.config.ServerURL, m.authMgr)\n\tif err != nil {\n\t\tif err == AuthErrorUnauthorized {\n\t\t\t\/\/ make sure to remove auth token once device is rejected\n\t\t\tif remErr := m.authMgr.RemoveAuthToken(); remErr != nil {\n\t\t\t\tlog.Warn(\"can not remove rejected authentication token\")\n\t\t\t}\n\t\t}\n\t\treturn NewTransientError(errors.Wrap(err, \"authorization request failed\"))\n\t}\n\n\terr = m.authMgr.RecvAuthResponse(rsp)\n\tif err != nil {\n\t\treturn NewTransientError(errors.Wrap(err, \"failed to parse authorization response\"))\n\t}\n\n\tlog.Info(\"successfuly received new authorization data\")\n\n\treturn m.loadAuth()\n}\n\nfunc (m *mender) doBootstrap() menderError {\n\tif !m.authMgr.HasKey() || m.forceBootstrap {\n\t\tlog.Infof(\"device keys not present or bootstrap forced, generating\")\n\t\tif err := m.authMgr.GenerateKey(); err != nil {\n\t\t\treturn NewFatalError(err)\n\t\t}\n\n\t}\n\n\tm.forceBootstrap = false\n\n\treturn nil\n}\n\nfunc (m *mender) FetchUpdate(url string) (io.ReadCloser, int64, error) {\n\treturn m.updater.FetchUpdate(m.api.Request(m.authToken), url)\n}\n\n\/\/ Check if new update is available. In case of errors, returns nil and error\n\/\/ that occurred. If no update is available *UpdateResponse is nil, otherwise it\n\/\/ contains update information.\nfunc (m *mender) CheckUpdate() (*UpdateResponse, menderError) {\n\tcurrentImageID := m.GetCurrentImageID()\n\t\/\/TODO: if currentImageID == \"\" {\n\t\/\/ \treturn errors.New(\"\")\n\t\/\/ }\n\n\thaveUpdate, err := m.updater.GetScheduledUpdate(m.api.Request(m.authToken),\n\t\tm.config.ServerURL)\n\tif err != nil {\n\t\tlog.Error(\"Error receiving scheduled update data: \", err)\n\t\treturn nil, NewTransientError(err)\n\t}\n\n\tif haveUpdate == nil {\n\t\tlog.Debug(\"no updates available\")\n\t\treturn nil, nil\n\t}\n\tupdate, ok := haveUpdate.(UpdateResponse)\n\tif !ok {\n\t\treturn nil, NewTransientError(errors.Errorf(\"not an update response?\"))\n\t}\n\n\tlog.Debugf(\"received update response: %v\", update)\n\n\tif update.Image.YoctoID == currentImageID {\n\t\tlog.Info(\"Attempting to upgrade to currently installed image ID, not performing upgrade.\")\n\t\treturn nil, nil\n\t}\n\treturn &update, nil\n}\n\nfunc (m *mender) ReportUpdateStatus(update UpdateResponse, status string) menderError {\n\ts := NewStatusClient()\n\terr := s.Report(m.api.Request(m.authToken), m.config.ServerURL,\n\t\tStatusReport{\n\t\t\tdeploymentID: update.ID,\n\t\t\tStatus: status,\n\t\t})\n\tif err != nil {\n\t\tlog.Error(\"error reporting update status: \", err)\n\t\treturn NewTransientError(err)\n\t}\n\treturn nil\n}\n\nfunc (m *mender) UploadLog(update UpdateResponse, logs []byte) menderError {\n\ts := NewLogUploadClient()\n\terr := s.Upload(m.api.Request(m.authToken), m.config.ServerURL,\n\t\tLogData{\n\t\t\tdeploymentID: update.ID,\n\t\t\tMessages: logs,\n\t\t})\n\tif err != nil {\n\t\tlog.Error(\"error uploading logs: \", err)\n\t\treturn NewTransientError(err)\n\t}\n\treturn nil\n}\n\nfunc (m mender) GetUpdatePollInterval() time.Duration {\n\treturn time.Duration(m.config.PollIntervalSeconds) * time.Second\n}\n\nfunc (m *mender) SetState(s State) {\n\tlog.Infof(\"Mender state: %v -> %v\", m.state.Id(), s.Id())\n\tm.state = s\n}\n\nfunc (m *mender) GetState() State {\n\treturn m.state\n}\n\nfunc (m *mender) RunState(ctx *StateContext) (State, bool) {\n\treturn m.state.Handle(ctx, m)\n}\n<|endoftext|>"} {"text":"<commit_before>package loops\n\nimport (\n\tgl \"github.com\/go-gl\/gl\/v4.1-core\/gl\"\n)\n\nvar programSolid2d = ProgramSource{\n\t[]ShaderSource{\n\t\tShaderSource{\n\t\t\tgl.VERTEX_SHADER,\n\t\t\t`\n\t\t\t\t#version 330\n\n\t\t\t\tuniform mat4 object_mat;\n\t\t\t\tuniform mat4 view_mat;\n\n\t\t\t\tin vec2 v_position;\n\n\t\t\t\tvoid main() {\n\t\t\t\t\tgl_Position = view_mat * object_mat * vec4(v_position, 0, 1);\n\t\t\t\t}\n\t\t\t`,\n\t\t},\n\t\tShaderSource{\n\t\t\tgl.FRAGMENT_SHADER,\n\t\t\t`\n\t\t\t\t#version 330\n\t\t\t\tuniform vec4 color;\n\t\t\t\tout vec4 fragColor;\n\n\t\t\t\tvoid main() {\n\t\t\t\t\tfragColor = color;\n\t\t\t\t}\n\t\t\t`,\n\t\t},\n\t},\n}\n\nvar programColored2d = ProgramSource{\n\t[]ShaderSource{\n\t\tShaderSource{\n\t\t\tgl.VERTEX_SHADER,\n\t\t\t`\n\t\t\t\t#version 330\n\n\t\t\t\tuniform mat4 object_mat;\n\t\t\t\tuniform mat4 view_mat;\n\n\t\t\t\tin vec2 v_position;\n\t\t\t\tin vec4 v_color;\n\t\t\t\tout vec4 f_color;\n\n\t\t\t\tvoid main() {\n\t\t\t\t\tf_color = v_color;\n\t\t\t\t\tgl_Position = view_mat * object_mat * vec4(v_position, 0, 1);\n\t\t\t\t}\n\t\t\t`,\n\t\t},\n\t\tShaderSource{\n\t\t\tgl.FRAGMENT_SHADER,\n\t\t\t`\n\t\t\t\t#version 330\n\t\t\t\tuniform vec4 color;\n\t\t\t\tin vec4 f_color;\n\t\t\t\tout vec4 fragColor;\n\n\t\t\t\tvoid main() {\n\t\t\t\t\tfragColor = color * f_color;\n\t\t\t\t}\n\t\t\t`,\n\t\t},\n\t},\n}\n\nvar programSolid3d = ProgramSource{\n\t[]ShaderSource{\n\t\tShaderSource{\n\t\t\tgl.VERTEX_SHADER,\n\t\t\t`\n\t\t\t\t#version 330\n\n\t\t\t\tuniform mat4 object_mat;\n\t\t\t\tuniform mat4 view_mat;\n\n\t\t\t\tin vec3 v_position;\n\t\t\t\tin vec3 v_normal;\n\n\t\t\t\tout vec3 f_position;\n\t\t\t\tout vec3 f_normal;\n\n\t\t\t\tvoid main() {\n\t\t\t\t\tf_normal = v_normal;\n\t\t\t\t\tf_position = vec3(object_mat * vec4(v_position, 1));\n\t\t\t\t\tgl_Position = view_mat * object_mat * vec4(v_position, 1);\n\t\t\t\t}\n\t\t\t`,\n\t\t},\n\t\tShaderSource{\n\t\t\tgl.FRAGMENT_SHADER,\n\t\t\t`\n\t\t\t\t#version 330\n\t\t\t\tuniform vec4 color;\n\n\t\t\t\tin vec3 f_normal;\n\t\t\t\tin vec3 f_position;\n\n\t\t\t\tout vec4 fragColor;\n\n\t\t\t\tvoid main() {\n\t\t\t\t\tvec3 cam = vec3(0,0,0);\n\t\t\t\t\tvec3 at_cam = cam - f_position;\n\t\t\t\t\tfragColor = vec4(vec3(1,1,1) * dot(normalize(at_cam), f_normal), 1);\n\t\t\t\t}\n\t\t\t`,\n\t\t},\n\t},\n}\n<commit_msg>fix lighting<commit_after>package loops\n\nimport (\n\tgl \"github.com\/go-gl\/gl\/v4.1-core\/gl\"\n)\n\nvar programSolid2d = ProgramSource{\n\t[]ShaderSource{\n\t\tShaderSource{\n\t\t\tgl.VERTEX_SHADER,\n\t\t\t`\n\t\t\t\t#version 330\n\n\t\t\t\tuniform mat4 object_mat;\n\t\t\t\tuniform mat4 view_mat;\n\n\t\t\t\tin vec2 v_position;\n\n\t\t\t\tvoid main() {\n\t\t\t\t\tgl_Position = view_mat * object_mat * vec4(v_position, 0, 1);\n\t\t\t\t}\n\t\t\t`,\n\t\t},\n\t\tShaderSource{\n\t\t\tgl.FRAGMENT_SHADER,\n\t\t\t`\n\t\t\t\t#version 330\n\t\t\t\tuniform vec4 color;\n\t\t\t\tout vec4 fragColor;\n\n\t\t\t\tvoid main() {\n\t\t\t\t\tfragColor = color;\n\t\t\t\t}\n\t\t\t`,\n\t\t},\n\t},\n}\n\nvar programColored2d = ProgramSource{\n\t[]ShaderSource{\n\t\tShaderSource{\n\t\t\tgl.VERTEX_SHADER,\n\t\t\t`\n\t\t\t\t#version 330\n\n\t\t\t\tuniform mat4 object_mat;\n\t\t\t\tuniform mat4 view_mat;\n\n\t\t\t\tin vec2 v_position;\n\t\t\t\tin vec4 v_color;\n\t\t\t\tout vec4 f_color;\n\n\t\t\t\tvoid main() {\n\t\t\t\t\tf_color = v_color;\n\t\t\t\t\tgl_Position = view_mat * object_mat * vec4(v_position, 0, 1);\n\t\t\t\t}\n\t\t\t`,\n\t\t},\n\t\tShaderSource{\n\t\t\tgl.FRAGMENT_SHADER,\n\t\t\t`\n\t\t\t\t#version 330\n\t\t\t\tuniform vec4 color;\n\t\t\t\tin vec4 f_color;\n\t\t\t\tout vec4 fragColor;\n\n\t\t\t\tvoid main() {\n\t\t\t\t\tfragColor = color * f_color;\n\t\t\t\t}\n\t\t\t`,\n\t\t},\n\t},\n}\n\nvar programSolid3d = ProgramSource{\n\t[]ShaderSource{\n\t\tShaderSource{\n\t\t\tgl.VERTEX_SHADER,\n\t\t\t`\n\t\t\t\t#version 330\n\n\t\t\t\tuniform mat4 object_mat;\n\t\t\t\tuniform mat4 view_mat;\n\n\t\t\t\tin vec3 v_position;\n\t\t\t\tin vec3 v_normal;\n\n\t\t\t\tout vec3 f_position;\n\t\t\t\tout vec3 f_normal;\n\n\t\t\t\tvoid main() {\n\t\t\t\t\tf_normal = vec3(object_mat * vec4(v_normal, 0));\n\t\t\t\t\tf_position = vec3(object_mat * vec4(v_position, 1));\n\t\t\t\t\tgl_Position = view_mat * object_mat * vec4(v_position, 1);\n\t\t\t\t}\n\t\t\t`,\n\t\t},\n\t\tShaderSource{\n\t\t\tgl.FRAGMENT_SHADER,\n\t\t\t`\n\t\t\t\t#version 330\n\t\t\t\tuniform vec4 color;\n\n\t\t\t\tin vec3 f_normal;\n\t\t\t\tin vec3 f_position;\n\n\t\t\t\tout vec4 fragColor;\n\n\t\t\t\tvoid main() {\n\t\t\t\t\tvec3 cam = vec3(0,0,0);\n\t\t\t\t\tvec3 at_cam = cam - f_position;\n\t\t\t\t\tfragColor = vec4(color.rgb * max(0.3, dot(normalize(at_cam), f_normal)), color);\n\t\t\t\t}\n\t\t\t`,\n\t\t},\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/go:build linux && cgo && !agent\n\/\/ +build linux,cgo,!agent\n\npackage db\n\nimport (\n\t\"fmt\"\n)\n\nfunc deviceTypeToString(t int) (string, error) {\n\tswitch t {\n\tcase 0:\n\t\treturn \"none\", nil\n\tcase 1:\n\t\treturn \"nic\", nil\n\tcase 2:\n\t\treturn \"disk\", nil\n\tcase 3:\n\t\treturn \"unix-char\", nil\n\tcase 4:\n\t\treturn \"unix-block\", nil\n\tcase 5:\n\t\treturn \"usb\", nil\n\tcase 6:\n\t\treturn \"gpu\", nil\n\tcase 7:\n\t\treturn \"infiniband\", nil\n\tcase 8:\n\t\treturn \"proxy\", nil\n\tcase 9:\n\t\treturn \"unix-hotplug\", nil\n\tcase 10:\n\t\treturn \"tpm\", nil\n\tcase 11:\n\t\treturn \"pci\", nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"Invalid device type %d\", t)\n\t}\n}\n\nfunc deviceTypeToInt(t string) (int, error) {\n\tswitch t {\n\tcase \"none\":\n\t\treturn 0, nil\n\tcase \"nic\":\n\t\treturn 1, nil\n\tcase \"disk\":\n\t\treturn 2, nil\n\tcase \"unix-char\":\n\t\treturn 3, nil\n\tcase \"unix-block\":\n\t\treturn 4, nil\n\tcase \"usb\":\n\t\treturn 5, nil\n\tcase \"gpu\":\n\t\treturn 6, nil\n\tcase \"infiniband\":\n\t\treturn 7, nil\n\tcase \"proxy\":\n\t\treturn 8, nil\n\tcase \"unix-hotplug\":\n\t\treturn 9, nil\n\tcase \"tpm\":\n\t\treturn 10, nil\n\tcase \"pci\":\n\t\treturn 11, nil\n\tdefault:\n\t\treturn -1, fmt.Errorf(\"Invalid device type %s\", t)\n\t}\n}\n<commit_msg>lxd\/db\/devices: add Devices struct and generator comments<commit_after>\/\/go:build linux && cgo && !agent\n\/\/ +build linux,cgo,!agent\n\npackage db\n\nimport \"fmt\"\n\n\/\/ Code generation directives.\n\/\/\n\/\/go:generate -command mapper lxd-generate db mapper -t devices.mapper.go\n\/\/go:generate mapper reset\n\/\/\n\/\/go:generate mapper stmt -p db -e device objects\n\/\/go:generate mapper stmt -p db -e device create struct=Device\n\/\/go:generate mapper stmt -p db -e device delete\n\/\/\n\/\/go:generate mapper method -p db -e device GetMany\n\/\/go:generate mapper method -p db -e device Create struct=Device\n\/\/go:generate mapper method -p db -e device Update struct=Device\n\/\/go:generate mapper method -p db -e device DeleteMany\n\n\/\/ DeviceType represents the types of supported devices.\ntype DeviceType int\n\n\/\/ Device is a reference struct representing another entity's device.\ntype Device struct {\n\tID int\n\tReferenceID int\n\tName string\n\tType DeviceType\n\tConfig map[string]string\n}\n\n\/\/ Supported device types.\nconst (\n\tTypeNone = DeviceType(0)\n\tTypeNIC = DeviceType(1)\n\tTypeDisk = DeviceType(2)\n\tTypeUnixChar = DeviceType(3)\n\tTypeUnixBlock = DeviceType(4)\n\tTypeUSB = DeviceType(5)\n\tTypeGPU = DeviceType(6)\n\tTypeInfiniband = DeviceType(7)\n\tTypeProxy = DeviceType(8)\n\tTypeUnixHotplug = DeviceType(9)\n\tTypeTPM = DeviceType(10)\n\tTypePCI = DeviceType(11)\n)\n\nfunc (t DeviceType) String() string {\n\tswitch t {\n\tcase TypeNone:\n\t\treturn \"none\"\n\tcase TypeNIC:\n\t\treturn \"nic\"\n\tcase TypeDisk:\n\t\treturn \"disk\"\n\tcase TypeUnixChar:\n\t\treturn \"unix-char\"\n\tcase TypeUnixBlock:\n\t\treturn \"unix-block\"\n\tcase TypeUSB:\n\t\treturn \"usb\"\n\tcase TypeGPU:\n\t\treturn \"gpu\"\n\tcase TypeInfiniband:\n\t\treturn \"infiniband\"\n\tcase TypeProxy:\n\t\treturn \"proxy\"\n\tcase TypeUnixHotplug:\n\t\treturn \"unix-hotplug\"\n\tcase TypeTPM:\n\t\treturn \"tpm\"\n\tcase TypePCI:\n\t\treturn \"pci\"\n\t}\n\n\treturn \"\"\n}\n\n\/\/ NewDeviceType determines the device type from the given string, if supported.\nfunc NewDeviceType(t string) (DeviceType, error) {\n\tswitch t {\n\tcase \"none\":\n\t\treturn TypeNone, nil\n\tcase \"nic\":\n\t\treturn TypeNIC, nil\n\tcase \"disk\":\n\t\treturn TypeDisk, nil\n\tcase \"unix-char\":\n\t\treturn TypeUnixChar, nil\n\tcase \"unix-block\":\n\t\treturn TypeUnixBlock, nil\n\tcase \"usb\":\n\t\treturn TypeUSB, nil\n\tcase \"gpu\":\n\t\treturn TypeGPU, nil\n\tcase \"infiniband\":\n\t\treturn TypeInfiniband, nil\n\tcase \"proxy\":\n\t\treturn TypeProxy, nil\n\tcase \"unix-hotplug\":\n\t\treturn TypeUnixHotplug, nil\n\tcase \"tpm\":\n\t\treturn TypeTPM, nil\n\tcase \"pci\":\n\t\treturn TypePCI, nil\n\tdefault:\n\t\treturn -1, fmt.Errorf(\"Invalid device type %s\", t)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package libgobuster\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n)\n\nconst (\n\tuserAgent = \"gobuster\"\n)\n\ntype httpClient struct {\n\tclient *http.Client\n\tcontext context.Context\n\tuserAgent string\n\tusername string\n\tpassword string\n\tincludeLength bool\n}\n\n\/\/ NewHTTPClient returns a new HTTPClient\nfunc newHTTPClient(c context.Context, opt *Options) (*httpClient, error) {\n\tvar proxyURLFunc func(*http.Request) (*url.URL, error)\n\tvar client httpClient\n\tproxyURLFunc = http.ProxyFromEnvironment\n\n\tif opt.Proxy != \"\" {\n\t\tproxyURL, err := url.Parse(opt.Proxy)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"[!] Proxy URL is invalid\")\n\t\t}\n\t\tproxyURLFunc = http.ProxyURL(proxyURL)\n\t}\n\n\tvar redirectFunc func(req *http.Request, via []*http.Request) error\n\tif !opt.FollowRedirect {\n\t\tredirectFunc = func(req *http.Request, via []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t}\n\t} else {\n\t\tredirectFunc = nil\n\t}\n\n\tclient.client = &http.Client{\n\t\tTimeout: opt.Timeout,\n\t\tCheckRedirect: redirectFunc,\n\t\tTransport: &http.Transport{\n\t\t\tProxy: proxyURLFunc,\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: opt.InsecureSSL,\n\t\t\t},\n\t\t}}\n\tclient.context = c\n\tclient.username = opt.Username\n\tclient.password = opt.Password\n\tclient.includeLength = opt.IncludeLength\n\treturn &client, nil\n}\n\n\/\/ MakeRequest makes a request to the specified url\nfunc (client *httpClient) makeRequest(fullURL, cookie string) (*int, *int64, error) {\n\treq, err := http.NewRequest(http.MethodGet, fullURL, nil)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ add the context so we can easily cancel out\n\treq = req.WithContext(client.context)\n\n\tif cookie != \"\" {\n\t\treq.Header.Set(\"Cookie\", cookie)\n\t}\n\n\tua := userAgent\n\tif client.userAgent != \"\" {\n\t\tua = client.userAgent\n\t}\n\treq.Header.Set(\"User-Agent\", ua)\n\n\tif client.username != \"\" {\n\t\treq.SetBasicAuth(client.username, client.password)\n\t}\n\n\tresp, err := client.client.Do(req)\n\tif err != nil {\n\t\tif ue, ok := err.(*url.Error); ok {\n\n\t\t\tif strings.HasPrefix(ue.Err.Error(), \"x509\") {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"invalid certificate\")\n\t\t\t}\n\t\t}\n\t\treturn nil, nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tvar length *int64\n\n\tif client.includeLength {\n\t\tlength = new(int64)\n\t\tif resp.ContentLength <= 0 {\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err == nil {\n\t\t\t\t*length = int64(utf8.RuneCountInString(string(body)))\n\t\t\t}\n\t\t} else {\n\t\t\t*length = resp.ContentLength\n\t\t}\n\t}\n\n\treturn &resp.StatusCode, length, nil\n}\n<commit_msg>fix user agent<commit_after>package libgobuster\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n)\n\ntype httpClient struct {\n\tclient *http.Client\n\tcontext context.Context\n\tuserAgent string\n\tusername string\n\tpassword string\n\tincludeLength bool\n}\n\n\/\/ NewHTTPClient returns a new HTTPClient\nfunc newHTTPClient(c context.Context, opt *Options) (*httpClient, error) {\n\tvar proxyURLFunc func(*http.Request) (*url.URL, error)\n\tvar client httpClient\n\tproxyURLFunc = http.ProxyFromEnvironment\n\n\tif opt.Proxy != \"\" {\n\t\tproxyURL, err := url.Parse(opt.Proxy)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"[!] Proxy URL is invalid\")\n\t\t}\n\t\tproxyURLFunc = http.ProxyURL(proxyURL)\n\t}\n\n\tvar redirectFunc func(req *http.Request, via []*http.Request) error\n\tif !opt.FollowRedirect {\n\t\tredirectFunc = func(req *http.Request, via []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t}\n\t} else {\n\t\tredirectFunc = nil\n\t}\n\n\tclient.client = &http.Client{\n\t\tTimeout: opt.Timeout,\n\t\tCheckRedirect: redirectFunc,\n\t\tTransport: &http.Transport{\n\t\t\tProxy: proxyURLFunc,\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: opt.InsecureSSL,\n\t\t\t},\n\t\t}}\n\tclient.context = c\n\tclient.username = opt.Username\n\tclient.password = opt.Password\n\tclient.includeLength = opt.IncludeLength\n\tclient.userAgent = opt.UserAgent\n\treturn &client, nil\n}\n\n\/\/ MakeRequest makes a request to the specified url\nfunc (client *httpClient) makeRequest(fullURL, cookie string) (*int, *int64, error) {\n\treq, err := http.NewRequest(http.MethodGet, fullURL, nil)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ add the context so we can easily cancel out\n\treq = req.WithContext(client.context)\n\n\tif cookie != \"\" {\n\t\treq.Header.Set(\"Cookie\", cookie)\n\t}\n\n\tua := fmt.Sprintf(\"gobuster %s\", VERSION)\n\tif client.userAgent != \"\" {\n\t\tua = client.userAgent\n\t}\n\treq.Header.Set(\"User-Agent\", ua)\n\n\tif client.username != \"\" {\n\t\treq.SetBasicAuth(client.username, client.password)\n\t}\n\n\tresp, err := client.client.Do(req)\n\tif err != nil {\n\t\tif ue, ok := err.(*url.Error); ok {\n\n\t\t\tif strings.HasPrefix(ue.Err.Error(), \"x509\") {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"invalid certificate\")\n\t\t\t}\n\t\t}\n\t\treturn nil, nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tvar length *int64\n\n\tif client.includeLength {\n\t\tlength = new(int64)\n\t\tif resp.ContentLength <= 0 {\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err == nil {\n\t\t\t\t*length = int64(utf8.RuneCountInString(string(body)))\n\t\t\t}\n\t\t} else {\n\t\t\t*length = resp.ContentLength\n\t\t}\n\t}\n\n\treturn &resp.StatusCode, length, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package combiner contains methods to consume Job objects, orchestrating\n\/\/ the download, combination, and upload of a group of related PDF files.\n\/\/ TODO now that s.Stat{} can return errors, maybe the err chan is unnecessary\npackage combiner\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/brasic\/pdfreader\/pdfread\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"github.com\/peopleadmin\/pdfcombiner\/cpdf\"\n\t\"github.com\/peopleadmin\/pdfcombiner\/job\"\n\t\"github.com\/peopleadmin\/pdfcombiner\/notifier\"\n\ts \"github.com\/peopleadmin\/pdfcombiner\/stat\"\n\t\"runtime\"\n\t\"time\"\n)\n\nvar (\n\tdownloadTimeout = 3 * time.Minute\n\tbasedir = \"\/tmp\/\"\n\tmaxGoroutines = 30\n)\n\n\/\/ Get an individual file from S3. If successful, writes the file out to disk\n\/\/ and sends a stat object back to the main channel. If there are errors they\n\/\/ are sent back through the error channel.\nfunc getFile(j *job.Job, docname string, dir string, c chan<- s.Stat, e chan<- s.Stat) {\n\tstart := time.Now()\n\tdata, err := j.Get(docname)\n\tif err != nil {\n\t\te <- s.Stat{Filename: docname, Err: err}\n\t\treturn\n\t}\n\tpath := dir + docname\n\terr = ioutil.WriteFile(path, data, 0644)\n\tif err != nil {\n\t\te <- s.Stat{Filename: docname, Err: err}\n\t\treturn\n\t}\n\tpagecount := pdfread.Load(path).PageCount()\n\tc <- s.Stat{Filename: docname,\n\t\tSize: len(data),\n\t\tPageCount: pagecount,\n\t\tDlTime: time.Since(start)}\n}\n\n\/\/ Fan out workers to download each document in parallel, then block\n\/\/ until all downloads are complete.\nfunc getAllFiles(j *job.Job, dir string) {\n\tstart := time.Now()\n\tc := make(chan s.Stat, j.DocCount())\n\te := make(chan s.Stat, j.DocCount())\n\tfor _, doc := range j.DocList {\n\t\tthrottle()\n\t\tgo getFile(j, doc.Name, dir, c, e)\n\t}\n\n\ttotalBytes := waitForDownloads(j, c, e)\n\tprintSummary(start, totalBytes, j.CompleteCount())\n}\n\n\/\/ Prevents the system from being overwhelmed with work.\n\/\/ Blocks until the number of Goroutines is less than a preset threshold.\nfunc throttle() {\n\tfor {\n\t\tif runtime.NumGoroutine() < maxGoroutines {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(50 * time.Millisecond)\n\t}\n}\n\n\/\/ Listen on several channels for information from background download\n\/\/ tasks -- each task will either send a s.Stat through c, an error through\n\/\/ e, or timeout. Once all docs are accounted for, return the total number\n\/\/ of bytes recieved.\nfunc waitForDownloads(j *job.Job, c <-chan s.Stat, e <-chan s.Stat) (totalBytes int) {\n\tfor _ = range j.DocList {\n\t\tselect {\n\t\tcase packet := <-c:\n\t\t\tlog.Printf(\"%s was %d bytes\\n\", packet.Filename, packet.Size)\n\t\t\ttotalBytes += packet.Size\n\t\t\tj.MarkComplete(packet.Filename, packet)\n\t\tcase bad := <-e:\n\t\t\tj.AddError(bad.Filename, bad.Err)\n\t\tcase <-time.After(downloadTimeout):\n\t\t\tj.AddError(\"general\", errors.New(\"timed out while downloading\"))\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ Print a summary of the download activity.\nfunc printSummary(start time.Time, bytes int, count int) {\n\tseconds := time.Since(start).Seconds()\n\tmbps := float64(bytes) \/ 1024 \/ 1024 \/ seconds\n\tlog.Printf(\"got %d bytes over %d files in %f secs (%f MB\/s)\\n\",\n\t\tbytes, count, seconds, mbps)\n}\n\n\/\/ Send an update on the success or failure of the operation to the\n\/\/ callback URL provided by the job originator.\nfunc postToCallback(j *job.Job) {\n\tlog.Println(\"work complete, posting status to callback:\", j.Callback)\n\tnotifier.SendNotification(j)\n}\n\n\/\/ Make and return a randomized temporary directory.\nfunc mkTmpDir() (dirname string) {\n\trand.Seed(time.Now().UnixNano())\n\tdirname = fmt.Sprintf(\"\/tmp\/pdfcombiner\/%d\/\", rand.Int())\n\tos.MkdirAll(dirname, 0777)\n\treturn\n}\n\n\/\/ Combine is the entry point to this package. Given a Job, downloads\n\/\/ all the files, combines them into a single one, uploads it to AWS\n\/\/ and posts the status to a callback endpoint.\nfunc Combine(j *job.Job) bool {\n\tdefer postToCallback(j)\n\tsaveDir := mkTmpDir()\n\tgetAllFiles(j, saveDir)\n\tif j.HasDownloadedDocs() {\n\t\tcpdf.Merge(j.Downloaded, saveDir)\n\t}\n\treturn true\n}\n<commit_msg>go fmt<commit_after>\/\/ Package combiner contains methods to consume Job objects, orchestrating\n\/\/ the download, combination, and upload of a group of related PDF files.\n\/\/ TODO now that s.Stat{} can return errors, maybe the err chan is unnecessary\npackage combiner\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/brasic\/pdfreader\/pdfread\"\n\t\"github.com\/peopleadmin\/pdfcombiner\/cpdf\"\n\t\"github.com\/peopleadmin\/pdfcombiner\/job\"\n\t\"github.com\/peopleadmin\/pdfcombiner\/notifier\"\n\ts \"github.com\/peopleadmin\/pdfcombiner\/stat\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n)\n\nvar (\n\tdownloadTimeout = 3 * time.Minute\n\tbasedir = \"\/tmp\/\"\n\tmaxGoroutines = 30\n)\n\n\/\/ Get an individual file from S3. If successful, writes the file out to disk\n\/\/ and sends a stat object back to the main channel. If there are errors they\n\/\/ are sent back through the error channel.\nfunc getFile(j *job.Job, docname string, dir string, c chan<- s.Stat, e chan<- s.Stat) {\n\tstart := time.Now()\n\tdata, err := j.Get(docname)\n\tif err != nil {\n\t\te <- s.Stat{Filename: docname, Err: err}\n\t\treturn\n\t}\n\tpath := dir + docname\n\terr = ioutil.WriteFile(path, data, 0644)\n\tif err != nil {\n\t\te <- s.Stat{Filename: docname, Err: err}\n\t\treturn\n\t}\n\tpagecount := pdfread.Load(path).PageCount()\n\tc <- s.Stat{Filename: docname,\n\t\tSize: len(data),\n\t\tPageCount: pagecount,\n\t\tDlTime: time.Since(start)}\n}\n\n\/\/ Fan out workers to download each document in parallel, then block\n\/\/ until all downloads are complete.\nfunc getAllFiles(j *job.Job, dir string) {\n\tstart := time.Now()\n\tc := make(chan s.Stat, j.DocCount())\n\te := make(chan s.Stat, j.DocCount())\n\tfor _, doc := range j.DocList {\n\t\tthrottle()\n\t\tgo getFile(j, doc.Name, dir, c, e)\n\t}\n\n\ttotalBytes := waitForDownloads(j, c, e)\n\tprintSummary(start, totalBytes, j.CompleteCount())\n}\n\n\/\/ Prevents the system from being overwhelmed with work.\n\/\/ Blocks until the number of Goroutines is less than a preset threshold.\nfunc throttle() {\n\tfor {\n\t\tif runtime.NumGoroutine() < maxGoroutines {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(50 * time.Millisecond)\n\t}\n}\n\n\/\/ Listen on several channels for information from background download\n\/\/ tasks -- each task will either send a s.Stat through c, an error through\n\/\/ e, or timeout. Once all docs are accounted for, return the total number\n\/\/ of bytes recieved.\nfunc waitForDownloads(j *job.Job, c <-chan s.Stat, e <-chan s.Stat) (totalBytes int) {\n\tfor _ = range j.DocList {\n\t\tselect {\n\t\tcase packet := <-c:\n\t\t\tlog.Printf(\"%s was %d bytes\\n\", packet.Filename, packet.Size)\n\t\t\ttotalBytes += packet.Size\n\t\t\tj.MarkComplete(packet.Filename, packet)\n\t\tcase bad := <-e:\n\t\t\tj.AddError(bad.Filename, bad.Err)\n\t\tcase <-time.After(downloadTimeout):\n\t\t\tj.AddError(\"general\", errors.New(\"timed out while downloading\"))\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ Print a summary of the download activity.\nfunc printSummary(start time.Time, bytes int, count int) {\n\tseconds := time.Since(start).Seconds()\n\tmbps := float64(bytes) \/ 1024 \/ 1024 \/ seconds\n\tlog.Printf(\"got %d bytes over %d files in %f secs (%f MB\/s)\\n\",\n\t\tbytes, count, seconds, mbps)\n}\n\n\/\/ Send an update on the success or failure of the operation to the\n\/\/ callback URL provided by the job originator.\nfunc postToCallback(j *job.Job) {\n\tlog.Println(\"work complete, posting status to callback:\", j.Callback)\n\tnotifier.SendNotification(j)\n}\n\n\/\/ Make and return a randomized temporary directory.\nfunc mkTmpDir() (dirname string) {\n\trand.Seed(time.Now().UnixNano())\n\tdirname = fmt.Sprintf(\"\/tmp\/pdfcombiner\/%d\/\", rand.Int())\n\tos.MkdirAll(dirname, 0777)\n\treturn\n}\n\n\/\/ Combine is the entry point to this package. Given a Job, downloads\n\/\/ all the files, combines them into a single one, uploads it to AWS\n\/\/ and posts the status to a callback endpoint.\nfunc Combine(j *job.Job) bool {\n\tdefer postToCallback(j)\n\tsaveDir := mkTmpDir()\n\tgetAllFiles(j, saveDir)\n\tif j.HasDownloadedDocs() {\n\t\tcpdf.Merge(j.Downloaded, saveDir)\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package agent\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\tmqtt \"git.eclipse.org\/gitroot\/paho\/org.eclipse.paho.mqtt.golang.git\"\n)\n\nconst (\n\tupdateTopic = \"$hardware\/status\/+\"\n\tstatusTopic = \"$sphere\/leds\/status\"\n)\n\n\/*\n Just manages all the data going into out of this service.\n*\/\ntype Bus struct {\n\tconf *Config\n\tagent *Agent\n\tclient *mqtt.MqttClient\n\tticker *time.Ticker\n}\n\ntype updateRequest struct {\n\tTopic string\n\tBrightness int `json:\"brightness\"`\n\tOn bool `json:\"on\"`\n\tColor string `json:\"color\"`\n\tFlash bool `json:\"flash\"`\n}\n\ntype statusEvent struct {\n\tStatus string `json:\"status\"`\n}\n\ntype statsEvent struct {\n\n\t\/\/ memory related information\n\tAlloc uint64 `json:\"alloc\"`\n\tHeapAlloc uint64 `json:\"heapAlloc\"`\n\tTotalAlloc uint64 `json:\"totalAlloc\"`\n}\n\nfunc createBus(conf *Config, agent *Agent) *Bus {\n\n\treturn &Bus{conf: conf, agent: agent}\n}\n\nfunc (b *Bus) listen() {\n\tlogger.Infof(\"connecting to the bus\")\n\n\topts := mqtt.NewClientOptions().AddBroker(b.conf.LocalUrl).SetClientId(\"mqtt-bridgeify\")\n\n\t\/\/ shut up\n\tb.client = mqtt.NewClient(opts)\n\n\t_, err := b.client.Start()\n\tif err != nil {\n\t\tlog.Fatalf(\"error starting connection: %s\", err)\n\t} else {\n\t\tlogger.Infof(\"Connected as %s\\n\", b.conf.LocalUrl)\n\t}\n\n\ttopicFilter, _ := mqtt.NewTopicFilter(updateTopic, 0)\n\tif _, err := b.client.StartSubscription(b.handleUpdate, topicFilter); err != nil {\n\t\tlog.Fatalf(\"error starting subscription: %s\", err)\n\t}\n\n\tb.setupBackgroundJob()\n\n}\n\nfunc (b *Bus) handleUpdate(client *mqtt.MqttClient, msg mqtt.Message) {\n\tlogger.Debugf(\"handleUpdate\")\n\treq := &updateRequest{}\n\terr := b.decodeRequest(&msg, req)\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to decode connect request %s\", err)\n\t}\n\treq.Topic = msg.Topic()\n\tb.agent.updateLeds(req)\n\n}\n\nfunc (b *Bus) setupBackgroundJob() {\n\tb.ticker = time.NewTicker(10 * time.Second)\n\n\tfor {\n\t\tselect {\n\t\tcase <-b.ticker.C:\n\t\t\t\/\/ emit the status\n\t\t\tstatus := b.agent.getStatus()\n\t\t\t\/\/ log.Printf(\"[DEBUG] status %+v\", status)\n\t\t\tb.client.PublishMessage(statusTopic, b.encodeRequest(status))\n\t\t}\n\t}\n\n}\n\nfunc (b *Bus) encodeRequest(data interface{}) *mqtt.Message {\n\tbuf := bytes.NewBuffer(nil)\n\tjson.NewEncoder(buf).Encode(data)\n\treturn mqtt.NewMessage(buf.Bytes())\n}\n\nfunc (b *Bus) decodeRequest(msg *mqtt.Message, data interface{}) error {\n\tpayload := string(msg.Payload())\n\tif strings.HasPrefix(payload, \"[\") && strings.HasSuffix(payload, \"]\") {\n\t\t\/\/ go rpc can't handle enclosing parameters\n\t\tpayload = payload[1 : len(payload)-1]\n\t}\n\treturn json.NewDecoder(bytes.NewBuffer([]byte(payload))).Decode(data)\n}\n<commit_msg>Update to use go-ninja bus abstraction.<commit_after>package agent\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"github.com\/ninjasphere\/go-ninja\/bus\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tupdateTopic = \"$hardware\/status\/+\"\n\tstatusTopic = \"$sphere\/leds\/status\"\n)\n\n\/*\n Just manages all the data going into out of this service.\n*\/\ntype Bus struct {\n\tconf *Config\n\tagent *Agent\n\tbus bus.Bus\n\tticker *time.Ticker\n}\n\ntype updateRequest struct {\n\tTopic string\n\tBrightness int `json:\"brightness\"`\n\tOn bool `json:\"on\"`\n\tColor string `json:\"color\"`\n\tFlash bool `json:\"flash\"`\n}\n\ntype statusEvent struct {\n\tStatus string `json:\"status\"`\n}\n\ntype statsEvent struct {\n\n\t\/\/ memory related information\n\tAlloc uint64 `json:\"alloc\"`\n\tHeapAlloc uint64 `json:\"heapAlloc\"`\n\tTotalAlloc uint64 `json:\"totalAlloc\"`\n}\n\nfunc createBus(conf *Config, agent *Agent) *Bus {\n\tvar addr string\n\tif strings.HasPrefix(conf.LocalUrl, \"tcp:\/\/\") {\n\t\taddr = conf.LocalUrl[len(\"tcp:\/\/\"):]\n\t} else {\n\t\taddr = conf.LocalUrl\n\t}\n\tlogger.Infof(\"connecting to the bus %s...\", addr)\n\ttheBus := bus.MustConnect(addr, \"sphere-leds\")\n\tlogger.Infof(\"connected to the bus.\")\n\treturn &Bus{conf: conf, bus: theBus, agent: agent}\n}\n\nfunc (b *Bus) listen() {\n\tlogger.Infof(\"subscribing to topic %s\", updateTopic)\n\t_, err := b.bus.Subscribe(updateTopic, b.handleUpdate)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tb.setupBackgroundJob()\n\n}\n\nfunc (b *Bus) handleUpdate(topic string, payload []byte) {\n\tlogger.Debugf(\"handleUpdate %s\", string(payload))\n\treq := &updateRequest{}\n\terr := b.decodeRequest(payload, req)\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to decode connect request %s\", err)\n\t}\n\treq.Topic = topic\n\tb.agent.updateLeds(req)\n\n}\n\nfunc (b *Bus) setupBackgroundJob() {\n\tb.ticker = time.NewTicker(10 * time.Second)\n\n\tfor {\n\t\tselect {\n\t\tcase <-b.ticker.C:\n\t\t\t\/\/ emit the status\n\t\t\tstatus := b.agent.getStatus()\n\t\t\tlogger.Debugf(\"[DEBUG] status %+v\", status)\n\t\t\tb.bus.Publish(statusTopic, b.encodeRequest(status))\n\t\t}\n\t}\n\n}\n\nfunc (b *Bus) encodeRequest(data interface{}) []byte {\n\tbuf := bytes.NewBuffer(nil)\n\tjson.NewEncoder(buf).Encode(data)\n\treturn buf.Bytes()\n}\n\nfunc (b *Bus) decodeRequest(payloadBytes []byte, data interface{}) error {\n\tpayload := string(payloadBytes)\n\tif strings.HasPrefix(payload, \"[\") && strings.HasSuffix(payload, \"]\") {\n\t\t\/\/ go rpc can't handle enclosing parameters\n\t\tpayload = payload[1 : len(payload)-1]\n\t}\n\treturn json.NewDecoder(bytes.NewBuffer([]byte(payload))).Decode(data)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com\/koding\/klient\/cmd\/klientctl\/util\"\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n)\n\nfunc TestAdminRequired(t *testing.T) {\n\n\tConvey(\"If the user has permissions, it\", t, func() {\n\t\tp := &util.Permissions{\n\t\t\tAdminChecker: func() (bool, error) { return true, nil },\n\t\t}\n\n\t\tConvey(\"Should not return an error with no bin args\", func() {\n\t\t\targs := []string{\"kd\"}\n\t\t\treqs := []string{\"start\", \"stop\"}\n\t\t\tSo(AdminRequired(args, reqs, p), ShouldBeNil)\n\t\t})\n\n\t\tConvey(\"Should not return an error for a required command\", func() {\n\t\t\targs := []string{\"kd\", \"stop\"}\n\t\t\treqs := []string{\"start\", \"stop\"}\n\t\t\tSo(AdminRequired(args, reqs, p), ShouldBeNil)\n\t\t})\n\n\t\tConvey(\"Should not return an error for a not required command\", func() {\n\t\t\targs := []string{\"kd\", \"list\"}\n\t\t\treqs := []string{\"start\", \"stop\"}\n\t\t\tSo(AdminRequired(args, reqs, p), ShouldBeNil)\n\t\t})\n\t})\n\n\tConvey(\"If the user does not have permissions, it\", t, func() {\n\t\tp := &util.Permissions{\n\t\t\tAdminChecker: func() (bool, error) { return false, nil },\n\t\t}\n\n\t\tConvey(\"Should not return an error with no bin args\", func() {\n\t\t\targs := []string{\"kd\"}\n\t\t\treqs := []string{\"start\", \"stop\"}\n\t\t\tSo(AdminRequired(args, reqs, p), ShouldBeNil)\n\t\t})\n\n\t\tConvey(\"Should return an error for a required command\", func() {\n\t\t\targs := []string{\"kd\", \"stop\"}\n\t\t\treqs := []string{\"start\", \"stop\"}\n\t\t\tSo(AdminRequired(args, reqs, p), ShouldNotBeNil)\n\t\t})\n\n\t\tConvey(\"Should not return an error for a not required command\", func() {\n\t\t\targs := []string{\"kd\", \"list\"}\n\t\t\treqs := []string{\"start\", \"stop\"}\n\t\t\tSo(AdminRequired(args, reqs, p), ShouldBeNil)\n\t\t})\n\t})\n\n\tConvey(\"If the admin checker errors\", t, func() {\n\t\tp := &util.Permissions{\n\t\t\tAdminChecker: func() (bool, error) {\n\t\t\t\treturn false, errors.New(\"Fake error\")\n\t\t\t},\n\t\t}\n\n\t\tConvey(\"Allow the user to run the command\", func() {\n\t\t\targs := []string{\"kd\", \"stop\"}\n\t\t\treqs := []string{\"start\", \"stop\"}\n\t\t\tSo(AdminRequired(args, reqs, p), ShouldBeNil)\n\t\t})\n\t})\n}\n<commit_msg>styleguide: Remove newline<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com\/koding\/klient\/cmd\/klientctl\/util\"\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n)\n\nfunc TestAdminRequired(t *testing.T) {\n\tConvey(\"If the user has permissions, it\", t, func() {\n\t\tp := &util.Permissions{\n\t\t\tAdminChecker: func() (bool, error) { return true, nil },\n\t\t}\n\n\t\tConvey(\"Should not return an error with no bin args\", func() {\n\t\t\targs := []string{\"kd\"}\n\t\t\treqs := []string{\"start\", \"stop\"}\n\t\t\tSo(AdminRequired(args, reqs, p), ShouldBeNil)\n\t\t})\n\n\t\tConvey(\"Should not return an error for a required command\", func() {\n\t\t\targs := []string{\"kd\", \"stop\"}\n\t\t\treqs := []string{\"start\", \"stop\"}\n\t\t\tSo(AdminRequired(args, reqs, p), ShouldBeNil)\n\t\t})\n\n\t\tConvey(\"Should not return an error for a not required command\", func() {\n\t\t\targs := []string{\"kd\", \"list\"}\n\t\t\treqs := []string{\"start\", \"stop\"}\n\t\t\tSo(AdminRequired(args, reqs, p), ShouldBeNil)\n\t\t})\n\t})\n\n\tConvey(\"If the user does not have permissions, it\", t, func() {\n\t\tp := &util.Permissions{\n\t\t\tAdminChecker: func() (bool, error) { return false, nil },\n\t\t}\n\n\t\tConvey(\"Should not return an error with no bin args\", func() {\n\t\t\targs := []string{\"kd\"}\n\t\t\treqs := []string{\"start\", \"stop\"}\n\t\t\tSo(AdminRequired(args, reqs, p), ShouldBeNil)\n\t\t})\n\n\t\tConvey(\"Should return an error for a required command\", func() {\n\t\t\targs := []string{\"kd\", \"stop\"}\n\t\t\treqs := []string{\"start\", \"stop\"}\n\t\t\tSo(AdminRequired(args, reqs, p), ShouldNotBeNil)\n\t\t})\n\n\t\tConvey(\"Should not return an error for a not required command\", func() {\n\t\t\targs := []string{\"kd\", \"list\"}\n\t\t\treqs := []string{\"start\", \"stop\"}\n\t\t\tSo(AdminRequired(args, reqs, p), ShouldBeNil)\n\t\t})\n\t})\n\n\tConvey(\"If the admin checker errors\", t, func() {\n\t\tp := &util.Permissions{\n\t\t\tAdminChecker: func() (bool, error) {\n\t\t\t\treturn false, errors.New(\"Fake error\")\n\t\t\t},\n\t\t}\n\n\t\tConvey(\"Allow the user to run the command\", func() {\n\t\t\targs := []string{\"kd\", \"stop\"}\n\t\t\treqs := []string{\"start\", \"stop\"}\n\t\t\tSo(AdminRequired(args, reqs, p), ShouldBeNil)\n\t\t})\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package minion\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com\/pressly\/chi\"\n\t\"github.com\/unrolled\/render\"\n)\n\nvar l = log.New(os.Stdout, \"[minion] \", 0)\n\n\/\/ HandlerFunc TODO\ntype HandlerFunc func(*Context)\n\n\/\/ Engine TODO\ntype Engine struct {\n\t*RouterGroup\n\trouter *chi.Mux\n\tallNoRoute []HandlerFunc\n\tpool sync.Pool\n\toptions Options\n}\n\n\/\/ Version api version\nfunc Version() string {\n\treturn \"0.0.1\"\n}\n\n\/\/ Options defines the options to start the API\ntype Options struct {\n\tCors []string\n\tJWTToken string\n\tDisableJSONApi bool\n\tUnauthenticatedRoutes []string\n}\n\n\/\/ New returns a new blank Engine instance without any middleware attached.\nfunc New(opts Options) *Engine {\n\tengine := &Engine{}\n\tengine.RouterGroup = &RouterGroup{\n\t\tabsolutePath: \"\/\",\n\t\tengine: engine,\n\t}\n\tengine.options = opts\n\tengine.router = chi.NewRouter()\n\t\/\/\tengine.router.NotFound = http.HandlerFunc(engine.handle404)\n\tengine.pool.New = func() interface{} {\n\t\tctx := &Context{\n\t\t\tEngine: engine,\n\t\t\trender: render.New(render.Options{\n\t\t\t\tLayout: \"layout\",\n\t\t\t}),\n\t\t}\n\t\treturn ctx\n\t}\n\tengine.Use(AuthenticatedRoutes(opts.JWTToken, opts.UnauthenticatedRoutes))\n\tengine.Use(Recovery())\n\treturn engine\n}\n\n\/\/ Use add middlewares to be used on applicaton\nfunc (c *Engine) Use(middlewares ...HandlerFunc) {\n\tc.RouterGroup.Use(middlewares...)\n\tc.allNoRoute = c.combineHandlers(nil)\n}\n\n\/\/ ServeHTTP makes the router implement the http.Handler interface.\nfunc (c *Engine) ServeHTTP(res http.ResponseWriter, req *http.Request) {\n\tc.router.ServeHTTP(res, req)\n}\n\n\/\/ Run run the http server.\nfunc (c *Engine) Run(port int) error {\n\taddr := fmt.Sprintf(\":%d\", port)\n\tl.Printf(\"Starting server on port [%d]\\n\", port)\n\tif err := http.ListenAndServe(addr, WriteLog(c)); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ RunTLS run the https server.\nfunc (c *Engine) RunTLS(port int, cert string, key string) error {\n\taddr := fmt.Sprintf(\":%d\", port)\n\tl.Printf(\"Starting server on port [%d]\\n\", port)\n\tif err := http.ListenAndServeTLS(addr, cert, key, c); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nconst (\n\t\/\/ DEV runs the server in development mode\n\tDEV string = \"development\"\n\t\/\/ PROD runs the server in production mode\n\tPROD string = \"production\"\n\t\/\/ TEST runs the server in test mode\n\tTEST string = \"test\"\n)\n\n\/\/ MinionEnv is the environment that Minion is executing in.\n\/\/ The MINION_ENV is read on initialization to set this variable.\nvar MinionEnv = DEV\n\nfunc init() {\n\tenv := os.Getenv(\"MINION_ENV\")\n\tif len(env) > 0 {\n\t\tMinionEnv = env\n\t}\n}\n<commit_msg>Add cors<commit_after>package minion\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com\/pressly\/chi\"\n\t\"github.com\/rs\/cors\"\n\t\"github.com\/unrolled\/render\"\n)\n\nvar l = log.New(os.Stdout, \"[minion] \", 0)\n\n\/\/ HandlerFunc TODO\ntype HandlerFunc func(*Context)\n\n\/\/ Engine TODO\ntype Engine struct {\n\t*RouterGroup\n\trouter *chi.Mux\n\tallNoRoute []HandlerFunc\n\tpool sync.Pool\n\toptions Options\n}\n\n\/\/ Version api version\nfunc Version() string {\n\treturn \"0.0.1\"\n}\n\n\/\/ Options defines the options to start the API\ntype Options struct {\n\tCors []string\n\tJWTToken string\n\tDisableJSONApi bool\n\tUnauthenticatedRoutes []string\n}\n\n\/\/ New returns a new blank Engine instance without any middleware attached.\nfunc New(opts Options) *Engine {\n\tengine := &Engine{}\n\tengine.RouterGroup = &RouterGroup{\n\t\tabsolutePath: \"\/\",\n\t\tengine: engine,\n\t}\n\tengine.options = opts\n\tengine.router = chi.NewRouter()\n\t\/\/\tengine.router.NotFound = http.HandlerFunc(engine.handle404)\n\tengine.pool.New = func() interface{} {\n\t\tctx := &Context{\n\t\t\tEngine: engine,\n\t\t\trender: render.New(render.Options{\n\t\t\t\tLayout: \"layout\",\n\t\t\t}),\n\t\t}\n\t\treturn ctx\n\t}\n\tengine.Use(AuthenticatedRoutes(opts.JWTToken, opts.UnauthenticatedRoutes))\n\tengine.Use(Recovery())\n\treturn engine\n}\n\n\/\/ Use add middlewares to be used on applicaton\nfunc (c *Engine) Use(middlewares ...HandlerFunc) {\n\tc.RouterGroup.Use(middlewares...)\n\tc.allNoRoute = c.combineHandlers(nil)\n}\n\n\/\/ ServeHTTP makes the router implement the http.Handler interface.\nfunc (c *Engine) ServeHTTP(res http.ResponseWriter, req *http.Request) {\n\tc.router.ServeHTTP(res, req)\n}\n\n\/\/ Run run the http server.\nfunc (c *Engine) Run(port int) error {\n\tcrs := cors.New(cors.Options{\n\t\tAllowedOrigins: c.options.Cors,\n\t})\n\n\taddr := fmt.Sprintf(\":%d\", port)\n\tl.Printf(\"Starting server on port [%d]\\n\", port)\n\tif err := http.ListenAndServe(addr, crs.Handler(WriteLog(c))); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ RunTLS run the https server.\nfunc (c *Engine) RunTLS(port int, cert string, key string) error {\n\taddr := fmt.Sprintf(\":%d\", port)\n\tl.Printf(\"Starting server on port [%d]\\n\", port)\n\tif err := http.ListenAndServeTLS(addr, cert, key, c); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nconst (\n\t\/\/ DEV runs the server in development mode\n\tDEV string = \"development\"\n\t\/\/ PROD runs the server in production mode\n\tPROD string = \"production\"\n\t\/\/ TEST runs the server in test mode\n\tTEST string = \"test\"\n)\n\n\/\/ MinionEnv is the environment that Minion is executing in.\n\/\/ The MINION_ENV is read on initialization to set this variable.\nvar MinionEnv = DEV\n\nfunc init() {\n\tenv := os.Getenv(\"MINION_ENV\")\n\tif len(env) > 0 {\n\t\tMinionEnv = env\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/GeertJohan\/go.rice\"\n\t\"github.com\/howeyc\/fsnotify\"\n\t\"github.com\/ssddanbrown\/haste\/engine\"\n\t\"golang.org\/x\/net\/websocket\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\ntype managerServer struct {\n\tWatchedFolders []string\n\tfileWatcher *fsnotify.Watcher\n\tchangedFiles chan string\n\tsockets []*websocket.Conn\n\tlastFileChange int64\n\tPort int\n\twatchedFile string\n\tLiveReload bool\n\tWatchDepth int\n}\n\nfunc (m *managerServer) addWatchedFolder(htmlFilePath string) {\n\trootPath := filepath.Dir(htmlFilePath)\n\terr := m.watchFoldersToDepth(rootPath, m.WatchDepth)\n\tcheck(err)\n\tgo m.handleFileChange(htmlFilePath)\n}\n\nfunc (m *managerServer) listen() error {\n\tm.startFileWatcher()\n\thandler := m.getManagerRouting()\n\thttp.ListenAndServe(fmt.Sprintf(\"0.0.0.0:%d\", m.Port), handler)\n\treturn nil\n}\n\nfunc (m *managerServer) liveReloadAlertChange(file string) {\n\tresponse := livereloadChange{\n\t\tCommand: \"reload\",\n\t\tPath: file,\n\t\tLiveCSS: true,\n\t}\n\tfor i := 0; i < len(m.sockets); i++ {\n\t\tif !m.sockets[i].IsServerConn() {\n\t\t\tm.sockets[i].Close()\n\t\t}\n\n\t\tif m.sockets[i].IsServerConn() {\n\t\t\twebsocket.JSON.Send(m.sockets[i], response)\n\t\t}\n\t}\n\n\tdevlog(\"File changed: \" + file)\n}\n\nfunc (m *managerServer) handleFileChange(changedFile string) {\n\n\t\/\/ Prevent duplicate changes\n\tcurrentTime := time.Now().UnixNano()\n\tif (currentTime-m.lastFileChange)\/1000000 < 100 {\n\t\treturn\n\t}\n\n\t\/\/ Ignore git directories\n\tif strings.Contains(changedFile, \".git\") {\n\t\treturn\n\t}\n\n\tif strings.Contains(changedFile, \".gen.html\") {\n\t\treturn\n\t}\n\n\t\/\/ Check if a relevant extension\n\twatchedExtensions := []string{\".html\", \".css\", \".js\"}\n\treload := false\n\tfor i := range watchedExtensions {\n\t\tif filepath.Ext(changedFile) == watchedExtensions[i] {\n\t\t\treload = true\n\t\t}\n\t}\n\n\tif reload {\n\t\ttime.AfterFunc(100*time.Millisecond, func() {\n\t\t\tin, err := ioutil.ReadFile(m.watchedFile)\n\t\t\tcheck(err)\n\n\t\t\tr := strings.NewReader(string(in))\n\n\t\t\tnewContent, err := engine.Parse(r, m.watchedFile)\n\t\t\tif err != nil {\n\t\t\t\tdevlog(err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnewFileName := getGenFileName(m.watchedFile)\n\t\t\tnewFileLocation := filepath.Join(filepath.Dir(m.watchedFile), \".\/\"+newFileName)\n\t\t\tnewFile, err := os.Create(newFileLocation)\n\t\t\tdefer newFile.Close()\n\t\t\tcheck(err)\n\n\t\t\tnewFile.WriteString(newContent)\n\t\t\tnewFile.Sync()\n\t\t\tm.changedFiles <- newFileLocation\n\t\t})\n\n\t} else {\n\t\tm.changedFiles <- changedFile\n\t}\n\n\tm.lastFileChange = currentTime\n}\n\nfunc (m *managerServer) startFileWatcher() error {\n\twatcher, err := fsnotify.NewWatcher()\n\tcheck(err)\n\n\tm.fileWatcher = watcher\n\tm.changedFiles = make(chan string)\n\n\t\/\/ Process events\n\tgo func() {\n\n\t\tdone := make(chan bool)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase ev := <-watcher.Event:\n\t\t\t\tif ev.IsModify() {\n\t\t\t\t\tm.handleFileChange(ev.Name)\n\t\t\t\t}\n\t\t\tcase err := <-watcher.Error:\n\t\t\t\tdevlog(\"File Watcher Error: \" + err.Error())\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Hang so program doesn't exit\n\t\t<-done\n\n\t\twatcher.Close()\n\t}()\n\n\tfor i := 0; i < len(m.WatchedFolders); i++ {\n\t\terr = watcher.Watch(m.WatchedFolders[i])\n\t\tdevlog(\"Adding file watcher to \" + m.WatchedFolders[i])\n\t\tcheck(err)\n\t}\n\n\tgo func() {\n\t\tfor f := range m.changedFiles {\n\t\t\tif len(m.sockets) > 0 {\n\t\t\t\tm.liveReloadAlertChange(f)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (m *managerServer) watchFoldersToDepth(folderPath string, depth int) error {\n\n\tignoreFolders := []string{\"node_modules\", \".git\"}\n\n\tm.watchFolder(folderPath)\n\tif depth == 0 {\n\t\treturn nil\n\t}\n\n\tfolderItems, err := ioutil.ReadDir(folderPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, f := range folderItems {\n\t\tif f.IsDir() && !stringInSlice(f.Name(), ignoreFolders) {\n\t\t\tnewFPath := filepath.Join(folderPath, f.Name())\n\t\t\tm.watchFoldersToDepth(newFPath, depth-1)\n\t\t}\n\t}\n\n\treturn nil\n}\nfunc (m *managerServer) watchFolder(folderPath string) error {\n\tif !stringInSlice(folderPath, m.WatchedFolders) {\n\t\tm.WatchedFolders = append(m.WatchedFolders, folderPath)\n\t\tif m.fileWatcher == nil {\n\t\t\treturn nil\n\t\t}\n\t\terr := m.fileWatcher.Watch(folderPath)\n\t\tdevlog(\"Adding file watcher to \" + folderPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (manager *managerServer) getManagerRouting() *http.ServeMux {\n\n\thandler := http.NewServeMux()\n\n\t\/\/ Get our generated HTML file\n\thandler.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfile, err := os.Open(getGenFileName(manager.watchedFile))\n\t\tcheck(err)\n\t\tw.Header().Add(\"Cache-Control\", \"no-cache\")\n\t\tw.Header().Add(\"Content-Type\", \"text\/html\")\n\t\tio.Copy(w, file)\n\t\tif manager.LiveReload {\n\t\t\tfmt.Fprintln(w, \"\\n<script src=\\\"\/livereload.js\\\"><\/script>\")\n\t\t}\n\t})\n\n\tif !manager.LiveReload {\n\t\treturn handler\n\t}\n\n\t\/\/ Load compiled in static content\n\tfileBox := rice.MustFindBox(\"res\")\n\n\t\/\/ Get LiveReload Script\n\thandler.HandleFunc(\"\/livereload.js\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.FileServer(fileBox.HTTPBox())\n\t\tscriptString := fileBox.MustString(\"livereload.js\")\n\t\ttemplS, err := template.New(\"livereload\").Parse(scriptString)\n\t\tif err != nil {\n\t\t\tcheck(err)\n\t\t}\n\t\ttemplS.Execute(w, manager.Port)\n\t})\n\n\t\/\/ Websocket handling\n\twsHandler := manager.getLivereloadWsHandler()\n\thandler.Handle(\"\/livereload\", websocket.Handler(wsHandler))\n\n\treturn handler\n}\n\ntype livereloadResponse struct {\n\tCommand string `json:\"command\"`\n}\n\ntype livereloadHello struct {\n\tCommand string `json:\"command\"`\n\tProtocols []string `json:\"protocols\"`\n\tServerName string `json:\"serverName\"`\n}\n\ntype livereloadChange struct {\n\tCommand string `json:\"command\"`\n\tPath string `json:\"path\"`\n\tLiveCSS bool `json:\"liveCSS\"`\n}\n\nfunc (manager *managerServer) getLivereloadWsHandler() func(ws *websocket.Conn) {\n\treturn func(ws *websocket.Conn) {\n\n\t\tmanager.sockets = append(manager.sockets, ws)\n\n\t\tfor {\n\t\t\t\/\/ websocket.Message.Send(ws, \"Hello, Client!\")\n\t\t\twsData := new(livereloadResponse)\n\t\t\terr := websocket.JSON.Receive(ws, &wsData)\n\t\t\tif err != nil && err != io.EOF {\n\t\t\t\tcheck(err)\n\t\t\t\treturn\n\t\t\t} else if err == io.EOF {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif wsData.Command == \"hello\" {\n\t\t\t\tresponse := livereloadHello{\n\t\t\t\t\tCommand: \"hello\",\n\t\t\t\t\tProtocols: []string{\n\t\t\t\t\t\t\"http:\/\/livereload.com\/protocols\/connection-check-1\",\n\t\t\t\t\t\t\"http:\/\/livereload.com\/protocols\/official-7\",\n\t\t\t\t\t\t\"http:\/\/livereload.com\/protocols\/official-8\",\n\t\t\t\t\t\t\"http:\/\/livereload.com\/protocols\/official-9\",\n\t\t\t\t\t\t\"http:\/\/livereload.com\/protocols\/2.x-origin-version-negotiation\",\n\t\t\t\t\t},\n\t\t\t\t\tServerName: \"Webby\",\n\t\t\t\t}\n\t\t\t\tdevlog(\"Sending livereload hello\")\n\t\t\t\twebsocket.JSON.Send(ws, response)\n\t\t\t}\n\n\t\t}\n\n\t}\n\n}\n<commit_msg>Added file server back in on default listener<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/GeertJohan\/go.rice\"\n\t\"github.com\/howeyc\/fsnotify\"\n\t\"github.com\/ssddanbrown\/haste\/engine\"\n\t\"golang.org\/x\/net\/websocket\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\ntype managerServer struct {\n\tWatchedFolders []string\n\tfileWatcher *fsnotify.Watcher\n\tchangedFiles chan string\n\tsockets []*websocket.Conn\n\tlastFileChange int64\n\tPort int\n\twatchedFile string\n\tLiveReload bool\n\tWatchDepth int\n}\n\nfunc (m *managerServer) addWatchedFolder(htmlFilePath string) {\n\trootPath := filepath.Dir(htmlFilePath)\n\terr := m.watchFoldersToDepth(rootPath, m.WatchDepth)\n\tcheck(err)\n\tgo m.handleFileChange(htmlFilePath)\n}\n\nfunc (m *managerServer) listen() error {\n\tm.startFileWatcher()\n\thandler := m.getManagerRouting()\n\thttp.ListenAndServe(fmt.Sprintf(\"0.0.0.0:%d\", m.Port), handler)\n\treturn nil\n}\n\nfunc (m *managerServer) liveReloadAlertChange(file string) {\n\tresponse := livereloadChange{\n\t\tCommand: \"reload\",\n\t\tPath: file,\n\t\tLiveCSS: true,\n\t}\n\tfor i := 0; i < len(m.sockets); i++ {\n\t\tif !m.sockets[i].IsServerConn() {\n\t\t\tm.sockets[i].Close()\n\t\t}\n\n\t\tif m.sockets[i].IsServerConn() {\n\t\t\twebsocket.JSON.Send(m.sockets[i], response)\n\t\t}\n\t}\n\n\tdevlog(\"File changed: \" + file)\n}\n\nfunc (m *managerServer) handleFileChange(changedFile string) {\n\n\t\/\/ Prevent duplicate changes\n\tcurrentTime := time.Now().UnixNano()\n\tif (currentTime-m.lastFileChange)\/1000000 < 100 {\n\t\treturn\n\t}\n\n\t\/\/ Ignore git directories\n\tif strings.Contains(changedFile, \".git\") {\n\t\treturn\n\t}\n\n\tif strings.Contains(changedFile, \".gen.html\") {\n\t\treturn\n\t}\n\n\t\/\/ Check if a relevant extension\n\twatchedExtensions := []string{\".html\", \".css\", \".js\"}\n\treload := false\n\tfor i := range watchedExtensions {\n\t\tif filepath.Ext(changedFile) == watchedExtensions[i] {\n\t\t\treload = true\n\t\t}\n\t}\n\n\tif reload {\n\t\ttime.AfterFunc(100*time.Millisecond, func() {\n\t\t\tin, err := ioutil.ReadFile(m.watchedFile)\n\t\t\tcheck(err)\n\n\t\t\tr := strings.NewReader(string(in))\n\n\t\t\tnewContent, err := engine.Parse(r, m.watchedFile)\n\t\t\tif err != nil {\n\t\t\t\tdevlog(err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnewFileName := getGenFileName(m.watchedFile)\n\t\t\tnewFileLocation := filepath.Join(filepath.Dir(m.watchedFile), \".\/\"+newFileName)\n\t\t\tnewFile, err := os.Create(newFileLocation)\n\t\t\tdefer newFile.Close()\n\t\t\tcheck(err)\n\n\t\t\tnewFile.WriteString(newContent)\n\t\t\tnewFile.Sync()\n\t\t\tm.changedFiles <- newFileLocation\n\t\t})\n\n\t} else {\n\t\tm.changedFiles <- changedFile\n\t}\n\n\tm.lastFileChange = currentTime\n}\n\nfunc (m *managerServer) startFileWatcher() error {\n\twatcher, err := fsnotify.NewWatcher()\n\tcheck(err)\n\n\tm.fileWatcher = watcher\n\tm.changedFiles = make(chan string)\n\n\t\/\/ Process events\n\tgo func() {\n\n\t\tdone := make(chan bool)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase ev := <-watcher.Event:\n\t\t\t\tif ev.IsModify() {\n\t\t\t\t\tm.handleFileChange(ev.Name)\n\t\t\t\t}\n\t\t\tcase err := <-watcher.Error:\n\t\t\t\tdevlog(\"File Watcher Error: \" + err.Error())\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Hang so program doesn't exit\n\t\t<-done\n\n\t\twatcher.Close()\n\t}()\n\n\tfor i := 0; i < len(m.WatchedFolders); i++ {\n\t\terr = watcher.Watch(m.WatchedFolders[i])\n\t\tdevlog(\"Adding file watcher to \" + m.WatchedFolders[i])\n\t\tcheck(err)\n\t}\n\n\tgo func() {\n\t\tfor f := range m.changedFiles {\n\t\t\tif len(m.sockets) > 0 {\n\t\t\t\tm.liveReloadAlertChange(f)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (m *managerServer) watchFoldersToDepth(folderPath string, depth int) error {\n\n\tignoreFolders := []string{\"node_modules\", \".git\"}\n\n\tm.watchFolder(folderPath)\n\tif depth == 0 {\n\t\treturn nil\n\t}\n\n\tfolderItems, err := ioutil.ReadDir(folderPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, f := range folderItems {\n\t\tif f.IsDir() && !stringInSlice(f.Name(), ignoreFolders) {\n\t\t\tnewFPath := filepath.Join(folderPath, f.Name())\n\t\t\tm.watchFoldersToDepth(newFPath, depth-1)\n\t\t}\n\t}\n\n\treturn nil\n}\nfunc (m *managerServer) watchFolder(folderPath string) error {\n\tif !stringInSlice(folderPath, m.WatchedFolders) {\n\t\tm.WatchedFolders = append(m.WatchedFolders, folderPath)\n\t\tif m.fileWatcher == nil {\n\t\t\treturn nil\n\t\t}\n\t\terr := m.fileWatcher.Watch(folderPath)\n\t\tdevlog(\"Adding file watcher to \" + folderPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (manager *managerServer) getManagerRouting() *http.ServeMux {\n\n\thandler := http.NewServeMux()\n\tcustomServeMux := http.NewServeMux()\n\n\tcustomServeMux.Handle(\"\/\", http.FileServer(http.Dir(filepath.Dir(manager.watchedFile))))\n\n\t\/\/ Get our generated HTML file\n\thandler.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\n\t\tif r.URL.Path != \"\/\" {\n\t\t\tfmt.Println(r.URL.Path)\n\t\t\tcustomServeMux.ServeHTTP(w, r)\n\t\t} else {\n\n\t\t\tfile, err := os.Open(getGenFileName(manager.watchedFile))\n\t\t\tcheck(err)\n\t\t\tw.Header().Add(\"Cache-Control\", \"no-cache\")\n\t\t\tw.Header().Add(\"Content-Type\", \"text\/html\")\n\t\t\tio.Copy(w, file)\n\t\t\tif manager.LiveReload {\n\t\t\t\tfmt.Fprintln(w, \"\\n<script src=\\\"\/livereload.js\\\"><\/script>\")\n\t\t\t}\n\t\t}\n\n\t})\n\n\tif !manager.LiveReload {\n\t\treturn handler\n\t}\n\n\t\/\/ Load compiled in static content\n\tfileBox := rice.MustFindBox(\"res\")\n\n\t\/\/ Get LiveReload Script\n\thandler.HandleFunc(\"\/livereload.js\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.FileServer(fileBox.HTTPBox())\n\t\tscriptString := fileBox.MustString(\"livereload.js\")\n\t\ttemplS, err := template.New(\"livereload\").Parse(scriptString)\n\t\tif err != nil {\n\t\t\tcheck(err)\n\t\t}\n\t\ttemplS.Execute(w, manager.Port)\n\t})\n\n\t\/\/ Websocket handling\n\twsHandler := manager.getLivereloadWsHandler()\n\thandler.Handle(\"\/livereload\", websocket.Handler(wsHandler))\n\n\treturn handler\n}\n\ntype livereloadResponse struct {\n\tCommand string `json:\"command\"`\n}\n\ntype livereloadHello struct {\n\tCommand string `json:\"command\"`\n\tProtocols []string `json:\"protocols\"`\n\tServerName string `json:\"serverName\"`\n}\n\ntype livereloadChange struct {\n\tCommand string `json:\"command\"`\n\tPath string `json:\"path\"`\n\tLiveCSS bool `json:\"liveCSS\"`\n}\n\nfunc (manager *managerServer) getLivereloadWsHandler() func(ws *websocket.Conn) {\n\treturn func(ws *websocket.Conn) {\n\n\t\tmanager.sockets = append(manager.sockets, ws)\n\n\t\tfor {\n\t\t\t\/\/ websocket.Message.Send(ws, \"Hello, Client!\")\n\t\t\twsData := new(livereloadResponse)\n\t\t\terr := websocket.JSON.Receive(ws, &wsData)\n\t\t\tif err != nil && err != io.EOF {\n\t\t\t\tcheck(err)\n\t\t\t\treturn\n\t\t\t} else if err == io.EOF {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif wsData.Command == \"hello\" {\n\t\t\t\tresponse := livereloadHello{\n\t\t\t\t\tCommand: \"hello\",\n\t\t\t\t\tProtocols: []string{\n\t\t\t\t\t\t\"http:\/\/livereload.com\/protocols\/connection-check-1\",\n\t\t\t\t\t\t\"http:\/\/livereload.com\/protocols\/official-7\",\n\t\t\t\t\t\t\"http:\/\/livereload.com\/protocols\/official-8\",\n\t\t\t\t\t\t\"http:\/\/livereload.com\/protocols\/official-9\",\n\t\t\t\t\t\t\"http:\/\/livereload.com\/protocols\/2.x-origin-version-negotiation\",\n\t\t\t\t\t},\n\t\t\t\t\tServerName: \"Webby\",\n\t\t\t\t}\n\t\t\t\tdevlog(\"Sending livereload hello\")\n\t\t\t\twebsocket.JSON.Send(ws, response)\n\t\t\t}\n\n\t\t}\n\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package registry provides domain abstractions over container registries.\npackage registry\n\nimport (\n\t\"context\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/cookiejar\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/go-kit\/kit\/log\"\n\tdockerregistry \"github.com\/heroku\/docker-registry-client\/registry\"\n\t\"golang.org\/x\/net\/publicsuffix\"\n\n\t\"github.com\/weaveworks\/flux\"\n)\n\nconst (\n\tdockerHubHost = \"index.docker.io\"\n\tdockerHubLibrary = \"library\"\n)\n\ntype creds struct {\n\tusername, password string\n}\n\n\/\/ Credentials to a (Docker) registry.\ntype Credentials struct {\n\tm map[string]creds\n}\n\n\/\/ Client is a handle to a registry.\ntype Client struct {\n\tCredentials Credentials\n\tLogger log.Logger\n}\n\ntype roundtripperFunc func(*http.Request) (*http.Response, error)\n\nfunc (f roundtripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {\n\treturn f(r)\n}\n\n\/\/ GetRepository yields a repository matching the given name, if any exists.\n\/\/ Repository may be of various forms, in which case omitted elements take\n\/\/ assumed defaults.\n\/\/\n\/\/ helloworld -> index.docker.io\/library\/helloworld\n\/\/ foo\/helloworld -> index.docker.io\/foo\/helloworld\n\/\/ quay.io\/foo\/helloworld -> quay.io\/foo\/helloworld\n\/\/\nfunc (c *Client) GetRepository(repository string) ([]flux.ImageDescription, error) {\n\tvar host, org, image string\n\tparts := strings.Split(repository, \"\/\")\n\tswitch len(parts) {\n\tcase 1:\n\t\thost = dockerHubHost\n\t\torg = dockerHubLibrary\n\t\timage = parts[0]\n\tcase 2:\n\t\thost = dockerHubHost\n\t\torg = parts[0]\n\t\timage = parts[1]\n\tcase 3:\n\t\thost = parts[0]\n\t\torg = parts[1]\n\t\timage = parts[2]\n\tdefault:\n\t\treturn nil, fmt.Errorf(`expected image name as either \"<host>\/<org>\/<image>\", \"<org>\/<image>\", or \"<image>\"`)\n\t}\n\n\thostlessImageName := fmt.Sprintf(\"%s\/%s\", org, image)\n\thttphost := \"https:\/\/\" + host\n\n\t\/\/ quay.io wants us to use cookies for authorisation, so we have\n\t\/\/ to construct one (the default client has none). This means a\n\t\/\/ bit more constructing things to be able to make a registry\n\t\/\/ client literal, rather than calling .New()\n\tjar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tauth := c.Credentials.credsFor(host)\n\n\t\/\/ A context we'll use to cancel requests on error\n\tctx, cancel := context.WithCancel(context.Background())\n\n\t\/\/ Use the wrapper to fix headers for quay.io, and remember bearer tokens\n\tvar transport http.RoundTripper = &wwwAuthenticateFixer{transport: http.DefaultTransport}\n\t\/\/ Now the auth-handling wrappers that come with the library\n\ttransport = dockerregistry.WrapTransport(transport, httphost, auth.username, auth.password)\n\n\tclient := &dockerregistry.Registry{\n\t\tURL: httphost,\n\t\tClient: &http.Client{\n\t\t\tTransport: roundtripperFunc(func(r *http.Request) (*http.Response, error) {\n\t\t\t\treturn transport.RoundTrip(r.WithContext(ctx))\n\t\t\t}),\n\t\t\tJar: jar,\n\t\t},\n\t\tLogf: dockerregistry.Quiet,\n\t}\n\n\ttags, err := client.Tags(hostlessImageName)\n\tif err != nil {\n\t\tcancel()\n\t\treturn nil, err\n\t}\n\n\treturn c.tagsToRepository(cancel, client, repository, tags)\n}\n\nfunc (c *Client) lookupImage(client *dockerregistry.Registry, repoName, tag string) (flux.ImageDescription, error) {\n\t\/\/ Minor cheat: this will work whether the registry (e.g.,\n\t\/\/ quay.io) is part of the repo name given or not.\n\tid := flux.MakeImageID(\"\", repoName, tag)\n\timg := flux.ImageDescription{ID: id}\n\n\t_, name, _ := id.Components()\n\tmeta, err := client.Manifest(name, tag)\n\tif err != nil {\n\t\treturn img, err\n\t}\n\t\/\/ the manifest includes some v1-backwards-compatibility data,\n\t\/\/ oddly called \"History\", which are layer metadata as JSON\n\t\/\/ strings; these appear most-recent (i.e., topmost layer) first,\n\t\/\/ so happily we can just decode the first entry to get a created\n\t\/\/ time.\n\ttype v1image struct {\n\t\tCreated time.Time `json:\"created\"`\n\t}\n\tvar topmost v1image\n\tif err = json.Unmarshal([]byte(meta.History[0].V1Compatibility), &topmost); err == nil {\n\t\timg.CreatedAt = topmost.Created\n\t}\n\n\treturn img, err\n}\n\nfunc (c *Client) tagsToRepository(cancel func(), client *dockerregistry.Registry, repoName string, tags []string) ([]flux.ImageDescription, error) {\n\t\/\/ one way or another, we'll be finishing all requests\n\tdefer cancel()\n\n\ttype result struct {\n\t\timage flux.ImageDescription\n\t\terr error\n\t}\n\n\tfetched := make(chan result, len(tags))\n\n\tfor _, tag := range tags {\n\t\tgo func(t string) {\n\t\t\timg, err := c.lookupImage(client, repoName, t)\n\t\t\tif err != nil {\n\t\t\t\tc.Logger.Log(\"registry-metadata-err\", err)\n\t\t\t}\n\t\t\tfetched <- result{img, err}\n\t\t}(tag)\n\t}\n\n\timages := make([]flux.ImageDescription, cap(fetched))\n\tfor i := 0; i < cap(fetched); i++ {\n\t\tres := <-fetched\n\t\tif res.err != nil {\n\t\t\treturn nil, res.err\n\t\t}\n\t\timages[i] = res.image\n\t}\n\n\tsort.Sort(byCreatedDesc(images))\n\treturn images, nil\n}\n\n\/\/ --- Credentials\n\n\/\/ NoCredentials returns a usable but empty credentials object.\nfunc NoCredentials() Credentials {\n\treturn Credentials{\n\t\tm: map[string]creds{},\n\t}\n}\n\n\/\/ CredentialsFromFile returns a credentials object parsed from the given\n\/\/ filepath.\nfunc CredentialsFromFile(path string) (Credentials, error) {\n\tbytes, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn Credentials{}, err\n\t}\n\n\ttype dockerConfig struct {\n\t\tAuths map[string]struct {\n\t\t\tAuth string `json:\"auth\"`\n\t\t\tEmail string `json:\"email\"`\n\t\t} `json:\"auths\"`\n\t}\n\n\tvar config dockerConfig\n\tif err = json.Unmarshal(bytes, &config); err != nil {\n\t\treturn Credentials{}, err\n\t}\n\n\tm := map[string]creds{}\n\tfor host, entry := range config.Auths {\n\t\tdecodedAuth, err := base64.StdEncoding.DecodeString(entry.Auth)\n\t\tif err != nil {\n\t\t\treturn Credentials{}, err\n\t\t}\n\t\tauthParts := strings.SplitN(string(decodedAuth), \":\", 2)\n\t\tm[host] = creds{\n\t\t\tusername: authParts[0],\n\t\t\tpassword: authParts[1],\n\t\t}\n\t}\n\treturn Credentials{m: m}, nil\n}\n\nfunc CredentialsFromConfig(config flux.UnsafeInstanceConfig) (Credentials, error) {\n\tm := map[string]creds{}\n\tfor host, entry := range config.Registry.Auths {\n\t\tdecodedAuth, err := base64.StdEncoding.DecodeString(entry.Auth)\n\t\tif err != nil {\n\t\t\treturn Credentials{}, err\n\t\t}\n\t\tauthParts := strings.SplitN(string(decodedAuth), \":\", 2)\n\t\tm[host] = creds{\n\t\t\tusername: authParts[0],\n\t\t\tpassword: authParts[1],\n\t\t}\n\t}\n\treturn Credentials{m: m}, nil\n}\n\n\/\/ For yields an authenticator for a specific host.\nfunc (cs Credentials) credsFor(host string) creds {\n\tif cred, found := cs.m[host]; found {\n\t\treturn cred\n\t}\n\tif cred, found := cs.m[fmt.Sprintf(\"https:\/\/%s\/v1\/\", host)]; found {\n\t\treturn cred\n\t}\n\treturn creds{}\n}\n\n\/\/ Hosts returns all of the hosts available in these credentials.\nfunc (cs Credentials) Hosts() []string {\n\thosts := []string{}\n\tfor host := range cs.m {\n\t\thosts = append(hosts, host)\n\t}\n\treturn hosts\n}\n\n\/\/ -----\n\ntype byCreatedDesc []flux.ImageDescription\n\nfunc (is byCreatedDesc) Len() int { return len(is) }\nfunc (is byCreatedDesc) Swap(i, j int) { is[i], is[j] = is[j], is[i] }\nfunc (is byCreatedDesc) Less(i, j int) bool {\n\tif is[i].CreatedAt.Equal(is[j].CreatedAt) {\n\t\treturn is[i].ID < is[j].ID\n\t}\n\treturn is[i].CreatedAt.After(is[j].CreatedAt)\n}\n\n\/\/ Log requests as they go through, and responses as they come back.\n\/\/ transport = logTransport{\n\/\/ \ttransport: transport,\n\/\/ \tlog: func(format string, args ...interface{}) {\n\/\/ \t\tc.Logger.Log(\"registry-client-log\", fmt.Sprintf(format, args...))\n\/\/ \t},\n\/\/ }\ntype logTransport struct {\n\tlog func(string, ...interface{})\n\ttransport http.RoundTripper\n}\n\nfunc (t logTransport) RoundTrip(req *http.Request) (*http.Response, error) {\n\tt.log(\"Request %s %#v\", req.URL, req)\n\tres, err := t.transport.RoundTrip(req)\n\tt.log(\"Response %#v\", res)\n\tif err != nil {\n\t\tt.log(\"Error %s\", err)\n\t}\n\treturn res, err\n}\n<commit_msg>Correct lookup of \"library\" images<commit_after>\/\/ Package registry provides domain abstractions over container registries.\npackage registry\n\nimport (\n\t\"context\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/cookiejar\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/go-kit\/kit\/log\"\n\tdockerregistry \"github.com\/heroku\/docker-registry-client\/registry\"\n\t\"golang.org\/x\/net\/publicsuffix\"\n\n\t\"github.com\/weaveworks\/flux\"\n)\n\nconst (\n\tdockerHubHost = \"index.docker.io\"\n\tdockerHubLibrary = \"library\"\n)\n\ntype creds struct {\n\tusername, password string\n}\n\n\/\/ Credentials to a (Docker) registry.\ntype Credentials struct {\n\tm map[string]creds\n}\n\n\/\/ Client is a handle to a registry.\ntype Client struct {\n\tCredentials Credentials\n\tLogger log.Logger\n}\n\ntype roundtripperFunc func(*http.Request) (*http.Response, error)\n\nfunc (f roundtripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {\n\treturn f(r)\n}\n\n\/\/ GetRepository yields a repository matching the given name, if any exists.\n\/\/ Repository may be of various forms, in which case omitted elements take\n\/\/ assumed defaults.\n\/\/\n\/\/ helloworld -> index.docker.io\/library\/helloworld\n\/\/ foo\/helloworld -> index.docker.io\/foo\/helloworld\n\/\/ quay.io\/foo\/helloworld -> quay.io\/foo\/helloworld\n\/\/\nfunc (c *Client) GetRepository(repository string) ([]flux.ImageDescription, error) {\n\tvar host, org, image string\n\tparts := strings.Split(repository, \"\/\")\n\tswitch len(parts) {\n\tcase 1:\n\t\thost = dockerHubHost\n\t\torg = dockerHubLibrary\n\t\timage = parts[0]\n\tcase 2:\n\t\thost = dockerHubHost\n\t\torg = parts[0]\n\t\timage = parts[1]\n\tcase 3:\n\t\thost = parts[0]\n\t\torg = parts[1]\n\t\timage = parts[2]\n\tdefault:\n\t\treturn nil, fmt.Errorf(`expected image name as either \"<host>\/<org>\/<image>\", \"<org>\/<image>\", or \"<image>\"`)\n\t}\n\n\thostlessImageName := fmt.Sprintf(\"%s\/%s\", org, image)\n\thttphost := \"https:\/\/\" + host\n\n\t\/\/ quay.io wants us to use cookies for authorisation, so we have\n\t\/\/ to construct one (the default client has none). This means a\n\t\/\/ bit more constructing things to be able to make a registry\n\t\/\/ client literal, rather than calling .New()\n\tjar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tauth := c.Credentials.credsFor(host)\n\n\t\/\/ A context we'll use to cancel requests on error\n\tctx, cancel := context.WithCancel(context.Background())\n\n\t\/\/ Use the wrapper to fix headers for quay.io, and remember bearer tokens\n\tvar transport http.RoundTripper = &wwwAuthenticateFixer{transport: http.DefaultTransport}\n\t\/\/ Now the auth-handling wrappers that come with the library\n\ttransport = dockerregistry.WrapTransport(transport, httphost, auth.username, auth.password)\n\n\tclient := &dockerregistry.Registry{\n\t\tURL: httphost,\n\t\tClient: &http.Client{\n\t\t\tTransport: roundtripperFunc(func(r *http.Request) (*http.Response, error) {\n\t\t\t\treturn transport.RoundTrip(r.WithContext(ctx))\n\t\t\t}),\n\t\t\tJar: jar,\n\t\t},\n\t\tLogf: dockerregistry.Quiet,\n\t}\n\n\ttags, err := client.Tags(hostlessImageName)\n\tif err != nil {\n\t\tcancel()\n\t\treturn nil, err\n\t}\n\n\t\/\/ the hostlessImageName is canonicalised, in the sense that it\n\t\/\/ includes \"library\" as the org, if unqualified -- e.g.,\n\t\/\/ `library\/nats`. We need that to fetch the tags etc. However, we\n\t\/\/ want the results to use the *actual* name of the images to be\n\t\/\/ as supplied, e.g., `nats`.\n\treturn c.tagsToRepository(cancel, client, hostlessImageName, repository, tags)\n}\n\nfunc (c *Client) lookupImage(client *dockerregistry.Registry, lookupName, imageName, tag string) (flux.ImageDescription, error) {\n\t\/\/ Minor cheat: this will give the correct result even if the\n\t\/\/ imageName includes a host\n\tid := flux.MakeImageID(\"\", imageName, tag)\n\timg := flux.ImageDescription{ID: id}\n\n\tmeta, err := client.Manifest(lookupName, tag)\n\tif err != nil {\n\t\treturn img, err\n\t}\n\t\/\/ the manifest includes some v1-backwards-compatibility data,\n\t\/\/ oddly called \"History\", which are layer metadata as JSON\n\t\/\/ strings; these appear most-recent (i.e., topmost layer) first,\n\t\/\/ so happily we can just decode the first entry to get a created\n\t\/\/ time.\n\ttype v1image struct {\n\t\tCreated time.Time `json:\"created\"`\n\t}\n\tvar topmost v1image\n\tif err = json.Unmarshal([]byte(meta.History[0].V1Compatibility), &topmost); err == nil {\n\t\timg.CreatedAt = topmost.Created\n\t}\n\n\treturn img, err\n}\n\nfunc (c *Client) tagsToRepository(cancel func(), client *dockerregistry.Registry, lookupName, imageName string, tags []string) ([]flux.ImageDescription, error) {\n\t\/\/ one way or another, we'll be finishing all requests\n\tdefer cancel()\n\n\ttype result struct {\n\t\timage flux.ImageDescription\n\t\terr error\n\t}\n\n\tfetched := make(chan result, len(tags))\n\n\tfor _, tag := range tags {\n\t\tgo func(t string) {\n\t\t\timg, err := c.lookupImage(client, lookupName, imageName, t)\n\t\t\tif err != nil {\n\t\t\t\tc.Logger.Log(\"registry-metadata-err\", err)\n\t\t\t}\n\t\t\tfetched <- result{img, err}\n\t\t}(tag)\n\t}\n\n\timages := make([]flux.ImageDescription, cap(fetched))\n\tfor i := 0; i < cap(fetched); i++ {\n\t\tres := <-fetched\n\t\tif res.err != nil {\n\t\t\treturn nil, res.err\n\t\t}\n\t\timages[i] = res.image\n\t}\n\n\tsort.Sort(byCreatedDesc(images))\n\treturn images, nil\n}\n\n\/\/ --- Credentials\n\n\/\/ NoCredentials returns a usable but empty credentials object.\nfunc NoCredentials() Credentials {\n\treturn Credentials{\n\t\tm: map[string]creds{},\n\t}\n}\n\n\/\/ CredentialsFromFile returns a credentials object parsed from the given\n\/\/ filepath.\nfunc CredentialsFromFile(path string) (Credentials, error) {\n\tbytes, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn Credentials{}, err\n\t}\n\n\ttype dockerConfig struct {\n\t\tAuths map[string]struct {\n\t\t\tAuth string `json:\"auth\"`\n\t\t\tEmail string `json:\"email\"`\n\t\t} `json:\"auths\"`\n\t}\n\n\tvar config dockerConfig\n\tif err = json.Unmarshal(bytes, &config); err != nil {\n\t\treturn Credentials{}, err\n\t}\n\n\tm := map[string]creds{}\n\tfor host, entry := range config.Auths {\n\t\tdecodedAuth, err := base64.StdEncoding.DecodeString(entry.Auth)\n\t\tif err != nil {\n\t\t\treturn Credentials{}, err\n\t\t}\n\t\tauthParts := strings.SplitN(string(decodedAuth), \":\", 2)\n\t\tm[host] = creds{\n\t\t\tusername: authParts[0],\n\t\t\tpassword: authParts[1],\n\t\t}\n\t}\n\treturn Credentials{m: m}, nil\n}\n\nfunc CredentialsFromConfig(config flux.UnsafeInstanceConfig) (Credentials, error) {\n\tm := map[string]creds{}\n\tfor host, entry := range config.Registry.Auths {\n\t\tdecodedAuth, err := base64.StdEncoding.DecodeString(entry.Auth)\n\t\tif err != nil {\n\t\t\treturn Credentials{}, err\n\t\t}\n\t\tauthParts := strings.SplitN(string(decodedAuth), \":\", 2)\n\t\tm[host] = creds{\n\t\t\tusername: authParts[0],\n\t\t\tpassword: authParts[1],\n\t\t}\n\t}\n\treturn Credentials{m: m}, nil\n}\n\n\/\/ For yields an authenticator for a specific host.\nfunc (cs Credentials) credsFor(host string) creds {\n\tif cred, found := cs.m[host]; found {\n\t\treturn cred\n\t}\n\tif cred, found := cs.m[fmt.Sprintf(\"https:\/\/%s\/v1\/\", host)]; found {\n\t\treturn cred\n\t}\n\treturn creds{}\n}\n\n\/\/ Hosts returns all of the hosts available in these credentials.\nfunc (cs Credentials) Hosts() []string {\n\thosts := []string{}\n\tfor host := range cs.m {\n\t\thosts = append(hosts, host)\n\t}\n\treturn hosts\n}\n\n\/\/ -----\n\ntype byCreatedDesc []flux.ImageDescription\n\nfunc (is byCreatedDesc) Len() int { return len(is) }\nfunc (is byCreatedDesc) Swap(i, j int) { is[i], is[j] = is[j], is[i] }\nfunc (is byCreatedDesc) Less(i, j int) bool {\n\tif is[i].CreatedAt.Equal(is[j].CreatedAt) {\n\t\treturn is[i].ID < is[j].ID\n\t}\n\treturn is[i].CreatedAt.After(is[j].CreatedAt)\n}\n\n\/\/ Log requests as they go through, and responses as they come back.\n\/\/ transport = logTransport{\n\/\/ \ttransport: transport,\n\/\/ \tlog: func(format string, args ...interface{}) {\n\/\/ \t\tc.Logger.Log(\"registry-client-log\", fmt.Sprintf(format, args...))\n\/\/ \t},\n\/\/ }\ntype logTransport struct {\n\tlog func(string, ...interface{})\n\ttransport http.RoundTripper\n}\n\nfunc (t logTransport) RoundTrip(req *http.Request) (*http.Response, error) {\n\tt.log(\"Request %s %#v\", req.URL, req)\n\tres, err := t.transport.RoundTrip(req)\n\tt.log(\"Response %#v\", res)\n\tif err != nil {\n\t\tt.log(\"Error %s\", err)\n\t}\n\treturn res, err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/gorilla\/pat\"\n)\n\nfunc main() {\n\tr := pat.New()\n\tr.NewRoute().PathPrefix(\"\/\").Handler(http.HandlerFunc(MirrorHandler)).Methods(\"POST\", \"PUT\", \"GET\", \"DELETE\")\n\n\thttp.Handle(\"\/\", r)\n\n\terr := http.ListenAndServe(\":12345\", nil)\n\tif err != nil {\n\t\tlog.Fatal(\"Listen And Serve: \", err)\n\t}\n}\n\nfunc MirrorHandler(w http.ResponseWriter, req *http.Request) {\n\tbody, _ := ioutil.ReadAll(req.Body)\n\tfmt.Printf(\"%s > %q\\n\", req.URL, body)\n}\n<commit_msg>simplified and made more useful output<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n)\n\nfunc main() {\n\thttp.HandleFunc(\"\/\", mirrorHandler)\n\tlog.Fatal(http.ListenAndServe(\":12345\", nil))\n}\n\nfunc mirrorHandler(w http.ResponseWriter, req *http.Request) {\n\tb, err := httputil.DumpRequest(req, true)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\", b)\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/docker\/machine\/commands\/mcndirs\"\n\t\"github.com\/docker\/machine\/libmachine\"\n\t\"github.com\/docker\/machine\/libmachine\/crashreport\"\n\t\"github.com\/docker\/machine\/libmachine\/host\"\n\t\"github.com\/docker\/machine\/libmachine\/log\"\n\t\"github.com\/docker\/machine\/libmachine\/mcnerror\"\n\t\"github.com\/docker\/machine\/libmachine\/mcnutils\"\n\t\"github.com\/docker\/machine\/libmachine\/persist\"\n\t\"github.com\/docker\/machine\/libmachine\/ssh\"\n)\n\nconst (\n\tdefaultMachineName = \"default\"\n)\n\nvar (\n\tErrHostLoad = errors.New(\"All specified hosts had errors loading their configuration.\")\n\tErrNoDefault = fmt.Errorf(\"Error: No machine name(s) specified and no %q machine exists.\", defaultMachineName)\n\tErrNoMachineSpecified = errors.New(\"Error: Expected to get one or more machine names as arguments\")\n\tErrExpectedOneMachine = errors.New(\"Error: Expected one machine name as an argument\")\n\tErrTooManyArguments = errors.New(\"Error: Too many arguments given\")\n\n\tosExit = func(code int) { os.Exit(code) }\n)\n\n\/\/ CommandLine contains all the information passed to the commands on the command line.\ntype CommandLine interface {\n\tShowHelp()\n\n\tShowVersion()\n\n\tApplication() *cli.App\n\n\tArgs() cli.Args\n\n\tBool(name string) bool\n\n\tInt(name string) int\n\n\tString(name string) string\n\n\tStringSlice(name string) []string\n\n\tGlobalString(name string) string\n\n\tFlagNames() (names []string)\n\n\tGeneric(name string) interface{}\n}\n\ntype contextCommandLine struct {\n\t*cli.Context\n}\n\nfunc (c *contextCommandLine) ShowHelp() {\n\tcli.ShowCommandHelp(c.Context, c.Command.Name)\n}\n\nfunc (c *contextCommandLine) ShowVersion() {\n\tcli.ShowVersion(c.Context)\n}\n\nfunc (c *contextCommandLine) Application() *cli.App {\n\treturn c.App\n}\n\n\/\/ targetHost returns a specific host name if one is indicated by the first CLI\n\/\/ arg, or the default host name if no host is specified.\nfunc targetHost(c CommandLine, api libmachine.API) (string, error) {\n\tif len(c.Args()) == 0 {\n\t\tdefaultExists, err := api.Exists(defaultMachineName)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"Error checking if host %q exists: %s\", defaultMachineName, err)\n\t\t}\n\n\t\tif defaultExists {\n\t\t\treturn defaultMachineName, nil\n\t\t}\n\n\t\treturn \"\", ErrNoDefault\n\t}\n\n\treturn c.Args()[0], nil\n}\n\nfunc runAction(actionName string, c CommandLine, api libmachine.API) error {\n\tvar (\n\t\thostsToLoad []string\n\t)\n\n\t\/\/ If user did not specify a machine name explicitly, use the 'default'\n\t\/\/ machine if it exists. This allows short form commands such as\n\t\/\/ 'docker-machine stop' for convenience.\n\tif len(c.Args()) == 0 {\n\t\ttarget, err := targetHost(c, api)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\thostsToLoad = []string{target}\n\t} else {\n\t\thostsToLoad = c.Args()\n\t}\n\n\thosts, hostsInError := persist.LoadHosts(api, hostsToLoad)\n\n\tif len(hostsInError) > 0 {\n\t\terrs := []error{}\n\t\tfor _, err := range hostsInError {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t\treturn consolidateErrs(errs)\n\t}\n\n\tif len(hosts) == 0 {\n\t\treturn ErrHostLoad\n\t}\n\n\tif errs := runActionForeachMachine(actionName, hosts); len(errs) > 0 {\n\t\treturn consolidateErrs(errs)\n\t}\n\n\tfor _, h := range hosts {\n\t\tif err := api.Save(h); err != nil {\n\t\t\treturn fmt.Errorf(\"Error saving host to store: %s\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc runCommand(command func(commandLine CommandLine, api libmachine.API) error) func(context *cli.Context) {\n\treturn func(context *cli.Context) {\n\t\tapi := libmachine.NewClient(mcndirs.GetBaseDir(), mcndirs.GetMachineCertDir())\n\t\tdefer api.Close()\n\n\t\tif context.GlobalBool(\"native-ssh\") {\n\t\t\tapi.SSHClientType = ssh.Native\n\t\t}\n\t\tapi.GithubAPIToken = context.GlobalString(\"github-api-token\")\n\t\tapi.Filestore.Path = context.GlobalString(\"storage-path\")\n\n\t\t\/\/ TODO (nathanleclaire): These should ultimately be accessed\n\t\t\/\/ through the libmachine client by the rest of the code and\n\t\t\/\/ not through their respective modules. For now, however,\n\t\t\/\/ they are also being set the way that they originally were\n\t\t\/\/ set to preserve backwards compatibility.\n\t\tmcndirs.BaseDir = api.Filestore.Path\n\t\tmcnutils.GithubAPIToken = api.GithubAPIToken\n\t\tssh.SetDefaultClient(api.SSHClientType)\n\n\t\tif err := command(&contextCommandLine{context}, api); err != nil {\n\t\t\tlog.Error(err)\n\n\t\t\tif crashErr, ok := err.(crashreport.CrashError); ok {\n\t\t\t\tcrashReporter := crashreport.NewCrashReporter(mcndirs.GetBaseDir(), context.GlobalString(\"bugsnag-api-token\"))\n\t\t\t\tcrashReporter.Send(crashErr)\n\n\t\t\t\tif _, ok := crashErr.Cause.(mcnerror.ErrDuringPreCreate); ok {\n\t\t\t\t\tosExit(3)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tosExit(1)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc confirmInput(msg string) (bool, error) {\n\tfmt.Printf(\"%s (y\/n): \", msg)\n\n\tvar resp string\n\t_, err := fmt.Scanln(&resp)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tconfirmed := strings.Index(strings.ToLower(resp), \"y\") == 0\n\treturn confirmed, nil\n}\n\nvar Commands = []cli.Command{\n\t{\n\t\tName: \"active\",\n\t\tUsage: \"Print which machine is active\",\n\t\tAction: runCommand(cmdActive),\n\t},\n\t{\n\t\tName: \"config\",\n\t\tUsage: \"Print the connection config for machine\",\n\t\tDescription: \"Argument is a machine name.\",\n\t\tAction: runCommand(cmdConfig),\n\t\tFlags: []cli.Flag{\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"swarm\",\n\t\t\t\tUsage: \"Display the Swarm config instead of the Docker daemon\",\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tFlags: sharedCreateFlags,\n\t\tName: \"create\",\n\t\tUsage: \"Create a machine\",\n\t\tDescription: fmt.Sprintf(\"Run '%s create --driver name' to include the create flags for that driver in the help text.\", os.Args[0]),\n\t\tAction: runCommand(cmdCreateOuter),\n\t\tSkipFlagParsing: true,\n\t},\n\t{\n\t\tName: \"env\",\n\t\tUsage: \"Display the commands to set up the environment for the Docker client\",\n\t\tDescription: \"Argument is a machine name.\",\n\t\tAction: runCommand(cmdEnv),\n\t\tFlags: []cli.Flag{\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"swarm\",\n\t\t\t\tUsage: \"Display the Swarm config instead of the Docker daemon\",\n\t\t\t},\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"shell\",\n\t\t\t\tUsage: \"Force environment to be configured for a specified shell: [fish, cmd, powershell], default is bash\",\n\t\t\t\tValue: \"bash\",\n\t\t\t},\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"unset, u\",\n\t\t\t\tUsage: \"Unset variables instead of setting them\",\n\t\t\t},\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"no-proxy\",\n\t\t\t\tUsage: \"Add machine IP to NO_PROXY environment variable\",\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tName: \"inspect\",\n\t\tUsage: \"Inspect information about a machine\",\n\t\tDescription: \"Argument is a machine name.\",\n\t\tAction: runCommand(cmdInspect),\n\t\tFlags: []cli.Flag{\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"format, f\",\n\t\t\t\tUsage: \"Format the output using the given go template.\",\n\t\t\t\tValue: \"\",\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tName: \"ip\",\n\t\tUsage: \"Get the IP address of a machine\",\n\t\tDescription: \"Argument(s) are one or more machine names.\",\n\t\tAction: runCommand(cmdIP),\n\t},\n\t{\n\t\tName: \"kill\",\n\t\tUsage: \"Kill a machine\",\n\t\tDescription: \"Argument(s) are one or more machine names.\",\n\t\tAction: runCommand(cmdKill),\n\t},\n\t{\n\t\tName: \"ls\",\n\t\tUsage: \"List machines\",\n\t\tAction: runCommand(cmdLs),\n\t\tFlags: []cli.Flag{\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"quiet, q\",\n\t\t\t\tUsage: \"Enable quiet mode\",\n\t\t\t},\n\t\t\tcli.StringSliceFlag{\n\t\t\t\tName: \"filter\",\n\t\t\t\tUsage: \"Filter output based on conditions provided\",\n\t\t\t\tValue: &cli.StringSlice{},\n\t\t\t},\n\t\t\tcli.IntFlag{\n\t\t\t\tName: \"timeout, t\",\n\t\t\t\tUsage: fmt.Sprintf(\"Timeout in seconds, default to %s\", stateTimeoutDuration),\n\t\t\t\tValue: lsDefaultTimeout,\n\t\t\t},\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"format, f\",\n\t\t\t\tUsage: \"Pretty-print machines using a Go template\",\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tName: \"provision\",\n\t\tUsage: \"Re-provision existing machines\",\n\t\tAction: runCommand(cmdProvision),\n\t},\n\t{\n\t\tName: \"regenerate-certs\",\n\t\tUsage: \"Regenerate TLS Certificates for a machine\",\n\t\tDescription: \"Argument(s) are one or more machine names.\",\n\t\tAction: runCommand(cmdRegenerateCerts),\n\t\tFlags: []cli.Flag{\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"force, f\",\n\t\t\t\tUsage: \"Force rebuild and do not prompt\",\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tName: \"restart\",\n\t\tUsage: \"Restart a machine\",\n\t\tDescription: \"Argument(s) are one or more machine names.\",\n\t\tAction: runCommand(cmdRestart),\n\t},\n\t{\n\t\tFlags: []cli.Flag{\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"force, f\",\n\t\t\t\tUsage: \"Remove local configuration even if machine cannot be removed, also implies an automatic yes (`-y`)\",\n\t\t\t},\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"y\",\n\t\t\t\tUsage: \"Assumes automatic yes to proceed with remove, without prompting further user confirmation\",\n\t\t\t},\n\t\t},\n\t\tName: \"rm\",\n\t\tUsage: \"Remove a machine\",\n\t\tDescription: \"Argument(s) are one or more machine names.\",\n\t\tAction: runCommand(cmdRm),\n\t},\n\t{\n\t\tName: \"ssh\",\n\t\tUsage: \"Log into or run a command on a machine with SSH.\",\n\t\tDescription: \"Arguments are [machine-name] [command]\",\n\t\tAction: runCommand(cmdSSH),\n\t\tSkipFlagParsing: true,\n\t},\n\t{\n\t\tName: \"scp\",\n\t\tUsage: \"Copy files between machines\",\n\t\tDescription: \"Arguments are [machine:][path] [machine:][path].\",\n\t\tAction: runCommand(cmdScp),\n\t\tFlags: []cli.Flag{\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"recursive, r\",\n\t\t\t\tUsage: \"Copy files recursively (required to copy directories)\",\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tName: \"start\",\n\t\tUsage: \"Start a machine\",\n\t\tDescription: \"Argument(s) are one or more machine names.\",\n\t\tAction: runCommand(cmdStart),\n\t},\n\t{\n\t\tName: \"status\",\n\t\tUsage: \"Get the status of a machine\",\n\t\tDescription: \"Argument is a machine name.\",\n\t\tAction: runCommand(cmdStatus),\n\t},\n\t{\n\t\tName: \"stop\",\n\t\tUsage: \"Stop a machine\",\n\t\tDescription: \"Argument(s) are one or more machine names.\",\n\t\tAction: runCommand(cmdStop),\n\t},\n\t{\n\t\tName: \"upgrade\",\n\t\tUsage: \"Upgrade a machine to the latest version of Docker\",\n\t\tDescription: \"Argument(s) are one or more machine names.\",\n\t\tAction: runCommand(cmdUpgrade),\n\t},\n\t{\n\t\tName: \"url\",\n\t\tUsage: \"Get the URL of a machine\",\n\t\tDescription: \"Argument is a machine name.\",\n\t\tAction: runCommand(cmdURL),\n\t},\n\t{\n\t\tName: \"version\",\n\t\tUsage: \"Show the Docker Machine version or a machine docker version\",\n\t\tAction: runCommand(cmdVersion),\n\t},\n}\n\nfunc printIP(h *host.Host) func() error {\n\treturn func() error {\n\t\tip, err := h.Driver.GetIP()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error getting IP address: %s\", err)\n\t\t}\n\n\t\tfmt.Println(ip)\n\n\t\treturn nil\n\t}\n}\n\n\/\/ machineCommand maps the command name to the corresponding machine command.\n\/\/ We run commands concurrently and communicate back an error if there was one.\nfunc machineCommand(actionName string, host *host.Host, errorChan chan<- error) {\n\t\/\/ TODO: These actions should have their own type.\n\tcommands := map[string](func() error){\n\t\t\"configureAuth\": host.ConfigureAuth,\n\t\t\"start\": host.Start,\n\t\t\"stop\": host.Stop,\n\t\t\"restart\": host.Restart,\n\t\t\"kill\": host.Kill,\n\t\t\"upgrade\": host.Upgrade,\n\t\t\"ip\": printIP(host),\n\t\t\"provision\": host.Provision,\n\t}\n\n\tlog.Debugf(\"command=%s machine=%s\", actionName, host.Name)\n\n\terrorChan <- commands[actionName]()\n}\n\n\/\/ runActionForeachMachine will run the command across multiple machines\nfunc runActionForeachMachine(actionName string, machines []*host.Host) []error {\n\tvar (\n\t\tnumConcurrentActions = 0\n\t\terrorChan = make(chan error)\n\t\terrs = []error{}\n\t)\n\n\tfor _, machine := range machines {\n\t\tnumConcurrentActions++\n\t\tgo machineCommand(actionName, machine, errorChan)\n\t}\n\n\t\/\/ TODO: We should probably only do 5-10 of these\n\t\/\/ at a time, since otherwise cloud providers might\n\t\/\/ rate limit us.\n\tfor i := 0; i < numConcurrentActions; i++ {\n\t\tif err := <-errorChan; err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\n\tclose(errorChan)\n\n\treturn errs\n}\n\nfunc consolidateErrs(errs []error) error {\n\tfinalErr := \"\"\n\tfor _, err := range errs {\n\t\tfinalErr = fmt.Sprintf(\"%s\\n%s\", finalErr, err)\n\t}\n\n\treturn errors.New(strings.TrimSpace(finalErr))\n}\n<commit_msg>FIX #2982 Fix auto detection<commit_after>package commands\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/docker\/machine\/commands\/mcndirs\"\n\t\"github.com\/docker\/machine\/libmachine\"\n\t\"github.com\/docker\/machine\/libmachine\/crashreport\"\n\t\"github.com\/docker\/machine\/libmachine\/host\"\n\t\"github.com\/docker\/machine\/libmachine\/log\"\n\t\"github.com\/docker\/machine\/libmachine\/mcnerror\"\n\t\"github.com\/docker\/machine\/libmachine\/mcnutils\"\n\t\"github.com\/docker\/machine\/libmachine\/persist\"\n\t\"github.com\/docker\/machine\/libmachine\/ssh\"\n)\n\nconst (\n\tdefaultMachineName = \"default\"\n)\n\nvar (\n\tErrHostLoad = errors.New(\"All specified hosts had errors loading their configuration.\")\n\tErrNoDefault = fmt.Errorf(\"Error: No machine name(s) specified and no %q machine exists.\", defaultMachineName)\n\tErrNoMachineSpecified = errors.New(\"Error: Expected to get one or more machine names as arguments\")\n\tErrExpectedOneMachine = errors.New(\"Error: Expected one machine name as an argument\")\n\tErrTooManyArguments = errors.New(\"Error: Too many arguments given\")\n\n\tosExit = func(code int) { os.Exit(code) }\n)\n\n\/\/ CommandLine contains all the information passed to the commands on the command line.\ntype CommandLine interface {\n\tShowHelp()\n\n\tShowVersion()\n\n\tApplication() *cli.App\n\n\tArgs() cli.Args\n\n\tBool(name string) bool\n\n\tInt(name string) int\n\n\tString(name string) string\n\n\tStringSlice(name string) []string\n\n\tGlobalString(name string) string\n\n\tFlagNames() (names []string)\n\n\tGeneric(name string) interface{}\n}\n\ntype contextCommandLine struct {\n\t*cli.Context\n}\n\nfunc (c *contextCommandLine) ShowHelp() {\n\tcli.ShowCommandHelp(c.Context, c.Command.Name)\n}\n\nfunc (c *contextCommandLine) ShowVersion() {\n\tcli.ShowVersion(c.Context)\n}\n\nfunc (c *contextCommandLine) Application() *cli.App {\n\treturn c.App\n}\n\n\/\/ targetHost returns a specific host name if one is indicated by the first CLI\n\/\/ arg, or the default host name if no host is specified.\nfunc targetHost(c CommandLine, api libmachine.API) (string, error) {\n\tif len(c.Args()) == 0 {\n\t\tdefaultExists, err := api.Exists(defaultMachineName)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"Error checking if host %q exists: %s\", defaultMachineName, err)\n\t\t}\n\n\t\tif defaultExists {\n\t\t\treturn defaultMachineName, nil\n\t\t}\n\n\t\treturn \"\", ErrNoDefault\n\t}\n\n\treturn c.Args()[0], nil\n}\n\nfunc runAction(actionName string, c CommandLine, api libmachine.API) error {\n\tvar (\n\t\thostsToLoad []string\n\t)\n\n\t\/\/ If user did not specify a machine name explicitly, use the 'default'\n\t\/\/ machine if it exists. This allows short form commands such as\n\t\/\/ 'docker-machine stop' for convenience.\n\tif len(c.Args()) == 0 {\n\t\ttarget, err := targetHost(c, api)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\thostsToLoad = []string{target}\n\t} else {\n\t\thostsToLoad = c.Args()\n\t}\n\n\thosts, hostsInError := persist.LoadHosts(api, hostsToLoad)\n\n\tif len(hostsInError) > 0 {\n\t\terrs := []error{}\n\t\tfor _, err := range hostsInError {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t\treturn consolidateErrs(errs)\n\t}\n\n\tif len(hosts) == 0 {\n\t\treturn ErrHostLoad\n\t}\n\n\tif errs := runActionForeachMachine(actionName, hosts); len(errs) > 0 {\n\t\treturn consolidateErrs(errs)\n\t}\n\n\tfor _, h := range hosts {\n\t\tif err := api.Save(h); err != nil {\n\t\t\treturn fmt.Errorf(\"Error saving host to store: %s\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc runCommand(command func(commandLine CommandLine, api libmachine.API) error) func(context *cli.Context) {\n\treturn func(context *cli.Context) {\n\t\tapi := libmachine.NewClient(mcndirs.GetBaseDir(), mcndirs.GetMachineCertDir())\n\t\tdefer api.Close()\n\n\t\tif context.GlobalBool(\"native-ssh\") {\n\t\t\tapi.SSHClientType = ssh.Native\n\t\t}\n\t\tapi.GithubAPIToken = context.GlobalString(\"github-api-token\")\n\t\tapi.Filestore.Path = context.GlobalString(\"storage-path\")\n\n\t\t\/\/ TODO (nathanleclaire): These should ultimately be accessed\n\t\t\/\/ through the libmachine client by the rest of the code and\n\t\t\/\/ not through their respective modules. For now, however,\n\t\t\/\/ they are also being set the way that they originally were\n\t\t\/\/ set to preserve backwards compatibility.\n\t\tmcndirs.BaseDir = api.Filestore.Path\n\t\tmcnutils.GithubAPIToken = api.GithubAPIToken\n\t\tssh.SetDefaultClient(api.SSHClientType)\n\n\t\tif err := command(&contextCommandLine{context}, api); err != nil {\n\t\t\tlog.Error(err)\n\n\t\t\tif crashErr, ok := err.(crashreport.CrashError); ok {\n\t\t\t\tcrashReporter := crashreport.NewCrashReporter(mcndirs.GetBaseDir(), context.GlobalString(\"bugsnag-api-token\"))\n\t\t\t\tcrashReporter.Send(crashErr)\n\n\t\t\t\tif _, ok := crashErr.Cause.(mcnerror.ErrDuringPreCreate); ok {\n\t\t\t\t\tosExit(3)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tosExit(1)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc confirmInput(msg string) (bool, error) {\n\tfmt.Printf(\"%s (y\/n): \", msg)\n\n\tvar resp string\n\t_, err := fmt.Scanln(&resp)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tconfirmed := strings.Index(strings.ToLower(resp), \"y\") == 0\n\treturn confirmed, nil\n}\n\nvar Commands = []cli.Command{\n\t{\n\t\tName: \"active\",\n\t\tUsage: \"Print which machine is active\",\n\t\tAction: runCommand(cmdActive),\n\t},\n\t{\n\t\tName: \"config\",\n\t\tUsage: \"Print the connection config for machine\",\n\t\tDescription: \"Argument is a machine name.\",\n\t\tAction: runCommand(cmdConfig),\n\t\tFlags: []cli.Flag{\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"swarm\",\n\t\t\t\tUsage: \"Display the Swarm config instead of the Docker daemon\",\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tFlags: sharedCreateFlags,\n\t\tName: \"create\",\n\t\tUsage: \"Create a machine\",\n\t\tDescription: fmt.Sprintf(\"Run '%s create --driver name' to include the create flags for that driver in the help text.\", os.Args[0]),\n\t\tAction: runCommand(cmdCreateOuter),\n\t\tSkipFlagParsing: true,\n\t},\n\t{\n\t\tName: \"env\",\n\t\tUsage: \"Display the commands to set up the environment for the Docker client\",\n\t\tDescription: \"Argument is a machine name.\",\n\t\tAction: runCommand(cmdEnv),\n\t\tFlags: []cli.Flag{\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"swarm\",\n\t\t\t\tUsage: \"Display the Swarm config instead of the Docker daemon\",\n\t\t\t},\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"shell\",\n\t\t\t\tUsage: \"Force environment to be configured for a specified shell: [fish, cmd, powershell], default is auto-detect\",\n\t\t\t},\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"unset, u\",\n\t\t\t\tUsage: \"Unset variables instead of setting them\",\n\t\t\t},\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"no-proxy\",\n\t\t\t\tUsage: \"Add machine IP to NO_PROXY environment variable\",\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tName: \"inspect\",\n\t\tUsage: \"Inspect information about a machine\",\n\t\tDescription: \"Argument is a machine name.\",\n\t\tAction: runCommand(cmdInspect),\n\t\tFlags: []cli.Flag{\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"format, f\",\n\t\t\t\tUsage: \"Format the output using the given go template.\",\n\t\t\t\tValue: \"\",\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tName: \"ip\",\n\t\tUsage: \"Get the IP address of a machine\",\n\t\tDescription: \"Argument(s) are one or more machine names.\",\n\t\tAction: runCommand(cmdIP),\n\t},\n\t{\n\t\tName: \"kill\",\n\t\tUsage: \"Kill a machine\",\n\t\tDescription: \"Argument(s) are one or more machine names.\",\n\t\tAction: runCommand(cmdKill),\n\t},\n\t{\n\t\tName: \"ls\",\n\t\tUsage: \"List machines\",\n\t\tAction: runCommand(cmdLs),\n\t\tFlags: []cli.Flag{\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"quiet, q\",\n\t\t\t\tUsage: \"Enable quiet mode\",\n\t\t\t},\n\t\t\tcli.StringSliceFlag{\n\t\t\t\tName: \"filter\",\n\t\t\t\tUsage: \"Filter output based on conditions provided\",\n\t\t\t\tValue: &cli.StringSlice{},\n\t\t\t},\n\t\t\tcli.IntFlag{\n\t\t\t\tName: \"timeout, t\",\n\t\t\t\tUsage: fmt.Sprintf(\"Timeout in seconds, default to %s\", stateTimeoutDuration),\n\t\t\t\tValue: lsDefaultTimeout,\n\t\t\t},\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"format, f\",\n\t\t\t\tUsage: \"Pretty-print machines using a Go template\",\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tName: \"provision\",\n\t\tUsage: \"Re-provision existing machines\",\n\t\tAction: runCommand(cmdProvision),\n\t},\n\t{\n\t\tName: \"regenerate-certs\",\n\t\tUsage: \"Regenerate TLS Certificates for a machine\",\n\t\tDescription: \"Argument(s) are one or more machine names.\",\n\t\tAction: runCommand(cmdRegenerateCerts),\n\t\tFlags: []cli.Flag{\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"force, f\",\n\t\t\t\tUsage: \"Force rebuild and do not prompt\",\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tName: \"restart\",\n\t\tUsage: \"Restart a machine\",\n\t\tDescription: \"Argument(s) are one or more machine names.\",\n\t\tAction: runCommand(cmdRestart),\n\t},\n\t{\n\t\tFlags: []cli.Flag{\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"force, f\",\n\t\t\t\tUsage: \"Remove local configuration even if machine cannot be removed, also implies an automatic yes (`-y`)\",\n\t\t\t},\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"y\",\n\t\t\t\tUsage: \"Assumes automatic yes to proceed with remove, without prompting further user confirmation\",\n\t\t\t},\n\t\t},\n\t\tName: \"rm\",\n\t\tUsage: \"Remove a machine\",\n\t\tDescription: \"Argument(s) are one or more machine names.\",\n\t\tAction: runCommand(cmdRm),\n\t},\n\t{\n\t\tName: \"ssh\",\n\t\tUsage: \"Log into or run a command on a machine with SSH.\",\n\t\tDescription: \"Arguments are [machine-name] [command]\",\n\t\tAction: runCommand(cmdSSH),\n\t\tSkipFlagParsing: true,\n\t},\n\t{\n\t\tName: \"scp\",\n\t\tUsage: \"Copy files between machines\",\n\t\tDescription: \"Arguments are [machine:][path] [machine:][path].\",\n\t\tAction: runCommand(cmdScp),\n\t\tFlags: []cli.Flag{\n\t\t\tcli.BoolFlag{\n\t\t\t\tName: \"recursive, r\",\n\t\t\t\tUsage: \"Copy files recursively (required to copy directories)\",\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tName: \"start\",\n\t\tUsage: \"Start a machine\",\n\t\tDescription: \"Argument(s) are one or more machine names.\",\n\t\tAction: runCommand(cmdStart),\n\t},\n\t{\n\t\tName: \"status\",\n\t\tUsage: \"Get the status of a machine\",\n\t\tDescription: \"Argument is a machine name.\",\n\t\tAction: runCommand(cmdStatus),\n\t},\n\t{\n\t\tName: \"stop\",\n\t\tUsage: \"Stop a machine\",\n\t\tDescription: \"Argument(s) are one or more machine names.\",\n\t\tAction: runCommand(cmdStop),\n\t},\n\t{\n\t\tName: \"upgrade\",\n\t\tUsage: \"Upgrade a machine to the latest version of Docker\",\n\t\tDescription: \"Argument(s) are one or more machine names.\",\n\t\tAction: runCommand(cmdUpgrade),\n\t},\n\t{\n\t\tName: \"url\",\n\t\tUsage: \"Get the URL of a machine\",\n\t\tDescription: \"Argument is a machine name.\",\n\t\tAction: runCommand(cmdURL),\n\t},\n\t{\n\t\tName: \"version\",\n\t\tUsage: \"Show the Docker Machine version or a machine docker version\",\n\t\tAction: runCommand(cmdVersion),\n\t},\n}\n\nfunc printIP(h *host.Host) func() error {\n\treturn func() error {\n\t\tip, err := h.Driver.GetIP()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error getting IP address: %s\", err)\n\t\t}\n\n\t\tfmt.Println(ip)\n\n\t\treturn nil\n\t}\n}\n\n\/\/ machineCommand maps the command name to the corresponding machine command.\n\/\/ We run commands concurrently and communicate back an error if there was one.\nfunc machineCommand(actionName string, host *host.Host, errorChan chan<- error) {\n\t\/\/ TODO: These actions should have their own type.\n\tcommands := map[string](func() error){\n\t\t\"configureAuth\": host.ConfigureAuth,\n\t\t\"start\": host.Start,\n\t\t\"stop\": host.Stop,\n\t\t\"restart\": host.Restart,\n\t\t\"kill\": host.Kill,\n\t\t\"upgrade\": host.Upgrade,\n\t\t\"ip\": printIP(host),\n\t\t\"provision\": host.Provision,\n\t}\n\n\tlog.Debugf(\"command=%s machine=%s\", actionName, host.Name)\n\n\terrorChan <- commands[actionName]()\n}\n\n\/\/ runActionForeachMachine will run the command across multiple machines\nfunc runActionForeachMachine(actionName string, machines []*host.Host) []error {\n\tvar (\n\t\tnumConcurrentActions = 0\n\t\terrorChan = make(chan error)\n\t\terrs = []error{}\n\t)\n\n\tfor _, machine := range machines {\n\t\tnumConcurrentActions++\n\t\tgo machineCommand(actionName, machine, errorChan)\n\t}\n\n\t\/\/ TODO: We should probably only do 5-10 of these\n\t\/\/ at a time, since otherwise cloud providers might\n\t\/\/ rate limit us.\n\tfor i := 0; i < numConcurrentActions; i++ {\n\t\tif err := <-errorChan; err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\n\tclose(errorChan)\n\n\treturn errs\n}\n\nfunc consolidateErrs(errs []error) error {\n\tfinalErr := \"\"\n\tfor _, err := range errs {\n\t\tfinalErr = fmt.Sprintf(\"%s\\n%s\", finalErr, err)\n\t}\n\n\treturn errors.New(strings.TrimSpace(finalErr))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build windows\n\npackage main\n\nimport (\n\t\"io\"\n\t\"os\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/mattn\/go-colorable\"\n\n\t\"github.com\/lxc\/lxd\/shared\/logger\"\n)\n\n\/\/ Windows doesn't process ANSI sequences natively, so we wrap\n\/\/ os.Stdout for improved user experience for Windows client\ntype WrappedWriteCloser struct {\n\tio.Closer\n\twrapper io.Writer\n}\n\nfunc (wwc *WrappedWriteCloser) Write(p []byte) (int, error) {\n\treturn wwc.wrapper.Write(p)\n}\n\nfunc (c *execCmd) getStdout() io.WriteCloser {\n\treturn &WrappedWriteCloser{os.Stdout, colorable.NewColorableStdout()}\n}\n\nfunc (c *execCmd) getTERM() (string, bool) {\n\treturn \"dumb\", true\n}\n\nfunc (c *execCmd) controlSocketHandler(control *websocket.Conn) {\n\t\/\/ TODO: figure out what the equivalent of signal.SIGWINCH is on\n\t\/\/ windows and use that; for now if you resize your terminal it just\n\t\/\/ won't work quite correctly.\n\terr := c.sendTermSize(control)\n\tif err != nil {\n\t\tlogger.Debugf(\"error setting term size %s\", err)\n\t}\n}\n<commit_msg>lxc\/exec: Fix signal handler for Windows<commit_after>\/\/ +build windows\n\npackage main\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/mattn\/go-colorable\"\n\n\t\"github.com\/lxc\/lxd\/shared\/logger\"\n)\n\n\/\/ Windows doesn't process ANSI sequences natively, so we wrap\n\/\/ os.Stdout for improved user experience for Windows client\ntype WrappedWriteCloser struct {\n\tio.Closer\n\twrapper io.Writer\n}\n\nfunc (wwc *WrappedWriteCloser) Write(p []byte) (int, error) {\n\treturn wwc.wrapper.Write(p)\n}\n\nfunc (c *execCmd) getStdout() io.WriteCloser {\n\treturn &WrappedWriteCloser{os.Stdout, colorable.NewColorableStdout()}\n}\n\nfunc (c *execCmd) getTERM() (string, bool) {\n\treturn \"dumb\", true\n}\n\nfunc (c *execCmd) controlSocketHandler(control *websocket.Conn) {\n\tch := make(chan os.Signal, 10)\n\tsignal.Notify(ch, os.Interrupt)\n\n\tcloseMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, \"\")\n\tdefer control.WriteMessage(websocket.CloseMessage, closeMsg)\n\n\tfor {\n\t\tsig := <-ch\n\t\tswitch sig {\n\t\tcase os.Interrupt:\n\t\t\tlogger.Debugf(\"Received '%s signal', forwarding to executing program.\", sig)\n\t\t\terr := c.forwardSignal(control, syscall.SIGINT)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Debugf(\"Failed to forward signal '%s'.\", syscall.SIGINT)\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\tbreak\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"fmt\"\n\t\"github.com\/pavel-d\/artificial_idiot\/telegram\"\n\t\"golang.org\/x\/net\/websocket\"\n\t\"log\"\n\t\"strings\"\n)\n\nfunc CurrencyHandler(message telegram.Message, bot *telegram.Bot, params []string) {\n\tkeyboard := telegram.KeyboardForOptions(\"Книжка\", \"НБУ\", \"RUB\")\n\tmessage.ReplyWithKeyboardMarkup(\"Какой курс?\", keyboard)\n\n\tbot.Once(message.From.Id, func(message telegram.Message, bot *telegram.Bot) {\n\t\tif message.Text == \"RUB\" {\n\t\t\tmessage.Reply(fetchRub())\n\t\t}\n\t})\n}\n\nfunc fetchRub() string {\n\torigin := \"http:\/\/zenrus.ru\/\"\n\turl := \"ws:\/\/zenrus.ru:8888\/\"\n\n\tws, err := websocket.Dial(url, \"\", origin)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer ws.Close()\n\n\tvar msg = make([]byte, 512)\n\n\tif n, err = ws.Read(msg); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trates := strings.Split(string(msg[:n]), \";\")\n\n\treturn fmt.Sprintf(\"USD\/RUB - %s; EUR\/RUB - %s\", rates[0], rates[1])\n}\n<commit_msg>Fix syntax error<commit_after>package commands\n\nimport (\n\t\"fmt\"\n\t\"github.com\/pavel-d\/artificial_idiot\/telegram\"\n\t\"golang.org\/x\/net\/websocket\"\n\t\"log\"\n\t\"strings\"\n)\n\nfunc CurrencyHandler(message telegram.Message, bot *telegram.Bot, params []string) {\n\tkeyboard := telegram.KeyboardForOptions(\"Книжка\", \"НБУ\", \"RUB\")\n\tmessage.ReplyWithKeyboardMarkup(\"Какой курс?\", keyboard)\n\n\tbot.Once(message.From.Id, func(message telegram.Message, bot *telegram.Bot) {\n\t\tif message.Text == \"RUB\" {\n\t\t\tmessage.Reply(fetchRub())\n\t\t}\n\t})\n}\n\nfunc fetchRub() string {\n\torigin := \"http:\/\/zenrus.ru\/\"\n\turl := \"ws:\/\/zenrus.ru:8888\/\"\n\n\tws, err := websocket.Dial(url, \"\", origin)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer ws.Close()\n\n\tvar msg = make([]byte, 512)\n\tvar n int\n\n\tif n, err = ws.Read(msg); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trates := strings.Split(string(msg[:n]), \";\")\n\n\treturn fmt.Sprintf(\"USD\/RUB - %s; EUR\/RUB - %s\", rates[0], rates[1])\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/jinzhu\/gorm\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n)\n\ntype Search struct {\n\tID uint\n\tQuery string\n\tCompletedPages int\n\tFinished bool\n}\n\ntype Prospect struct {\n\tID uint\n\tUser string\n\tRepo string\n\tName string\n\tEmail string\n\tLocation string\n\tSource string\n}\n\ntype Store struct {\n\tPath string\n\tDB gorm.DB\n}\n\nfunc NewStore(path string) (*Store, error) {\n\tdb, err := gorm.Open(\"sqlite3\", path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Store{\n\t\tPath: path,\n\t\tDB: db,\n\t}, nil\n}\n\nfunc (s *Store) Init() error {\n\ts.DB.CreateTable(&Search{})\n\ts.DB.CreateTable(&Prospect{})\n\treturn nil\n}\n\nfunc (s *Store) NewSearch(query string) (*Search, error) {\n\tsearch := &Search{Query: query}\n\tq := s.DB.Create(search)\n\tif q.Error != nil {\n\t\treturn nil, q.Error\n\t}\n\treturn search, nil\n}\n<commit_msg>some doc on the Prospect struct<commit_after>package main\n\nimport (\n\t\"github.com\/jinzhu\/gorm\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n)\n\ntype Search struct {\n\tID uint\n\tQuery string\n\tCompletedPages int\n\tFinished bool\n}\n\ntype Prospect struct {\n\tID uint\n\n\t\/\/ User login handle\n\tUser string\n\n\t\/\/ Repo is the full name of the repository\n\tRepo string\n\n\t\/\/ Name of the user.\n\tName string\n\n\t\/\/ Email of the user.\n\tEmail string\n\n\t\/\/ Location of the user.\n\tLocation string\n\n\t\/\/ Source of the prospect. This is whether it came from their profile page,\n\t\/\/ or from scraping commit logs.\n\tSource string\n}\n\ntype Store struct {\n\tPath string\n\tDB gorm.DB\n}\n\nfunc NewStore(path string) (*Store, error) {\n\tdb, err := gorm.Open(\"sqlite3\", path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Store{\n\t\tPath: path,\n\t\tDB: db,\n\t}, nil\n}\n\nfunc (s *Store) Init() error {\n\ts.DB.CreateTable(&Search{})\n\ts.DB.CreateTable(&Prospect{})\n\treturn nil\n}\n\nfunc (s *Store) NewSearch(query string) (*Search, error) {\n\tsearch := &Search{Query: query}\n\tq := s.DB.Create(search)\n\tif q.Error != nil {\n\t\treturn nil, q.Error\n\t}\n\treturn search, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\/\/\t\"k8s.io\/kubernetes\/pkg\/util\/sets\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\t\"strings\"\n\t\"sync\"\n)\n\ntype Login struct {\n\tUser string\n\tPassword string\n\tsync sync.Mutex\n}\ntype User struct {\n\tId string\n\tDisplayName string\n\tUrl string\n\tImageUrl string\n}\ntype Users struct {\n\tCount int\n\tValue []User\n}\ntype Room struct {\n\tId int\n\tName string\n\tDescription string\n\tLastActivity string\n\tCreatedBy User\n\tCreatedDate string\n\tHasAdminPermissions bool\n\tHasReadWritePermissions bool\n}\ntype Rooms struct {\n\tCount int\n\tValue []Room\n}\n\nvar apiClient http.RoundTripper = &http.Transport {\n\tProxy: http.ProxyFromEnvironment,\n}\nvar apiBase string = os.Args[1]\nvar apiEndpoint []string = []string {\n\tapiBase + \"\/_apis\/chat\/rooms?api-version=1.0\",\n\tapiBase + \"\/_apis\/chat\/rooms\/%s\/users?api-version=1.0\",\n\tapiBase + \"\/_apis\/chat\/rooms\/%s\/users\/%s?api-version=1.0\",\n\tapiBase + \"\/_apis\/chat\/rooms\/%s\/messages?api-version=1.0\",\n\tapiBase + \"\/_apis\/chat\/rooms\/%s\/messages?$filter=PostedTime ge %s&api-version=1.0\",\n}\n\nfunc main() {\n\tlogin := Login{\n\t\tUser: os.Args[2],\n\t\tPassword: os.Args[3],\n\t}\n\tlistener := make(chan string)\n\n\tgo pollRooms(5*time.Second, &login, &listener)\n\n\tfor l := range listener {\n\t\tfmt.Println(l)\n\t}\t\n}\n\n\/\/ requests are mutually exclusive on login\n\/\/ so login can be updated on errors\n\/\/ without others triggering same\nfunc makeRequest(login *Login, verb string, url string, body string, code int, listener *chan string) ([]byte, error) {\n\tlogin.sync.Lock()\n\tdefer login.sync.Unlock()\n\n request, err := http.NewRequest(verb, url, strings.NewReader(body));\n\tif err != nil {\n\t\treturn nil, err\n\t}\n request.SetBasicAuth(login.User, login.Password)\n\n\tresponse, err := apiClient.RoundTrip(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer response.Body.Close()\n\tif response.StatusCode != code {\n return nil, errors.New(response.Status)\n\t}\n\n\treturn ioutil.ReadAll(response.Body)\n}\n\nfunc pollRooms(d time.Duration, login *Login, listener *chan string) {\n\t\/\/roomMap := make(map[string]string)\n\troomTicker := time.NewTicker(d)\n\tdefer roomTicker.Stop()\n\n\tfor _ = range roomTicker.C {\n\t\tbody, err := makeRequest(login, \"GET\", apiEndpoint[0], \"\", 200, listener)\n\t\tif (err != nil) {\n\t\t\t*listener <- fmt.Sprintf(\"error pollRooms %s\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar rooms Rooms \/\/ or interface{} for generic\n\t\terr = json.Unmarshal(body, &rooms)\n\t\tif (err != nil) {\n *listener <- fmt.Sprintf(\"error pollRooms %s\", err)\n continue\n\t\t}\n\n\t\t\/\/ if 401 or \"auth\" header, need U\/P from speaker, block everywhere?\n\t\t\/\/ determine room added, removed\n\t\t\/\/ for added rooms start ticker, for removed rooms stop ticker\n\t\t\/\/ need to signal \"done\"?\n\t}\n}\n\nfunc pollUsers(d time.Duration) {\n}\n\nfunc pollMessages(d time.Duration) {\n}\n\n<commit_msg>go fmt<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\/\/\t\"k8s.io\/kubernetes\/pkg\/util\/sets\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype Login struct {\n\tUser string\n\tPassword string\n\tsync sync.Mutex\n}\ntype User struct {\n\tId string\n\tDisplayName string\n\tUrl string\n\tImageUrl string\n}\ntype Users struct {\n\tCount int\n\tValue []User\n}\ntype Room struct {\n\tId int\n\tName string\n\tDescription string\n\tLastActivity string\n\tCreatedBy User\n\tCreatedDate string\n\tHasAdminPermissions bool\n\tHasReadWritePermissions bool\n}\ntype Rooms struct {\n\tCount int\n\tValue []Room\n}\n\nvar apiClient http.RoundTripper = &http.Transport{\n\tProxy: http.ProxyFromEnvironment,\n}\nvar apiBase string = os.Args[1]\nvar apiEndpoint []string = []string{\n\tapiBase + \"\/_apis\/chat\/rooms?api-version=1.0\",\n\tapiBase + \"\/_apis\/chat\/rooms\/%s\/users?api-version=1.0\",\n\tapiBase + \"\/_apis\/chat\/rooms\/%s\/users\/%s?api-version=1.0\",\n\tapiBase + \"\/_apis\/chat\/rooms\/%s\/messages?api-version=1.0\",\n\tapiBase + \"\/_apis\/chat\/rooms\/%s\/messages?$filter=PostedTime ge %s&api-version=1.0\",\n}\n\nfunc main() {\n\tlogin := Login{\n\t\tUser: os.Args[2],\n\t\tPassword: os.Args[3],\n\t}\n\tlistener := make(chan string)\n\n\tgo pollRooms(5*time.Second, &login, &listener)\n\n\tfor l := range listener {\n\t\tfmt.Println(l)\n\t}\n}\n\n\/\/ requests are mutually exclusive on login\n\/\/ so login can be updated on errors\n\/\/ without others triggering same\nfunc makeRequest(login *Login, verb string, url string, body string, code int, listener *chan string) ([]byte, error) {\n\tlogin.sync.Lock()\n\tdefer login.sync.Unlock()\n\n\trequest, err := http.NewRequest(verb, url, strings.NewReader(body))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trequest.SetBasicAuth(login.User, login.Password)\n\n\tresponse, err := apiClient.RoundTrip(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer response.Body.Close()\n\tif response.StatusCode != code {\n\t\treturn nil, errors.New(response.Status)\n\t}\n\n\treturn ioutil.ReadAll(response.Body)\n}\n\nfunc pollRooms(d time.Duration, login *Login, listener *chan string) {\n\t\/\/roomMap := make(map[string]string)\n\troomTicker := time.NewTicker(d)\n\tdefer roomTicker.Stop()\n\n\tfor _ = range roomTicker.C {\n\t\tbody, err := makeRequest(login, \"GET\", apiEndpoint[0], \"\", 200, listener)\n\t\tif err != nil {\n\t\t\t*listener <- fmt.Sprintf(\"error pollRooms %s\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar rooms Rooms \/\/ or interface{} for generic\n\t\terr = json.Unmarshal(body, &rooms)\n\t\tif err != nil {\n\t\t\t*listener <- fmt.Sprintf(\"error pollRooms %s\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ if 401 or \"auth\" header, need U\/P from speaker, block everywhere?\n\t\t\/\/ determine room added, removed\n\t\t\/\/ for added rooms start ticker, for removed rooms stop ticker\n\t\t\/\/ need to signal \"done\"?\n\t}\n}\n\nfunc pollUsers(d time.Duration) {\n}\n\nfunc pollMessages(d time.Duration) {\n}\n<|endoftext|>"} {"text":"<commit_before>package matchers\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\n\/\/ EqualMatcher performs a DeepEqual between the actual and expected.\ntype EqualMatcher struct {\n\texpected interface{}\n}\n\n\/\/ Equal returns an EqualMatcher with the expected value\nfunc Equal(expected interface{}) EqualMatcher {\n\treturn EqualMatcher{\n\t\texpected: expected,\n\t}\n}\n\nfunc (m EqualMatcher) Match(actual interface{}) (interface{}, error) {\n\tif !reflect.DeepEqual(actual, m.expected) {\n\t\treturn nil, fmt.Errorf(\"%v (%T) to equal %v (%T)\", actual, actual, m.expected, m.expected)\n\t}\n\n\treturn actual, nil\n}\n<commit_msg>EqualMatcher reports more info<commit_after>package matchers\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\n\/\/ EqualMatcher performs a DeepEqual between the actual and expected.\ntype EqualMatcher struct {\n\texpected interface{}\n}\n\n\/\/ Equal returns an EqualMatcher with the expected value\nfunc Equal(expected interface{}) EqualMatcher {\n\treturn EqualMatcher{\n\t\texpected: expected,\n\t}\n}\n\nfunc (m EqualMatcher) Match(actual interface{}) (interface{}, error) {\n\tif !reflect.DeepEqual(actual, m.expected) {\n\t\treturn nil, fmt.Errorf(\"%+v (%T) to equal %+v (%T)\", actual, actual, m.expected, m.expected)\n\t}\n\n\treturn actual, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ The matrix package contains various utilities for dealing with raw matrices.\n\/\/ The interface is loosely based on the NumPy package in Python.\n\/\/\n\/\/ The key interfaces here are:\n\/\/ * NDArray – A multidimensional array, with dense and (2D-only) sparse\n\/\/ implementations.\n\/\/ * Matrix – A two-dimensional array, with various methods only available when\n\/\/ working in two dimensions. A two-dimensional NDArray can be\n\/\/ trivially converted to the Matrix type by calling arr.M().\n\/\/\n\/\/ When possible, function implementations take advantage of matrix sparsity.\n\/\/ For instance, MProd(), the matrix multiplication function, performs the\n\/\/ minimum amount of work required based on the types of its arguments.\n\/\/\n\/\/ Certain linear algebra methods, particularly in the Matrix interface, rely\n\/\/ on BLAS. In order to use it, you will need to register an appropriate engine.\n\/\/ See the documentation at https:\/\/github.com\/gonum\/blas for details. You can\n\/\/ register a default (native Go) engine by calling InitDefaultBlas().\n\/\/ If you see a panic message like\n\/\/ \"mat64: no blas engine registered: call Register()\"\n\/\/ then you need to register a BLAS engine. If you don't see this error, then\n\/\/ you probably don't need to worry about it.\npackage matrix\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"time\"\n)\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\n\n\/\/ ArraySparsity indicates the representation type of the matrix\ntype ArraySparsity int\n\nconst (\n\tDenseArray ArraySparsity = iota\n\tSparseCooMatrix\n\tSparseDiagMatrix\n)\n\n\/\/ A NDArray is an n-dimensional array of numbers which can be manipulated in\n\/\/ various ways. Concrete implementations can differ; for instance, sparse\n\/\/ and dense representations are possible.\ntype NDArray interface {\n\n\t\/\/ Return the element-wise sum of this array and one or more others\n\tAdd(others ...NDArray) NDArray\n\n\t\/\/ Returns true if and only if all items are nonzero\n\tAll() bool\n\n\t\/\/ Returns true if f is true for all array elements\n\tAllF(f func(v float64) bool) bool\n\n\t\/\/ Returns true if f is true for all pairs of array elements in the same position\n\tAllF2(f func(v1, v2 float64) bool, other NDArray) bool\n\n\t\/\/ Returns true if and only if any item is nonzero\n\tAny() bool\n\n\t\/\/ Returns true if f is true for any array element\n\tAnyF(f func(v float64) bool) bool\n\n\t\/\/ Returns true if f is true for any pair of array elements in the same position\n\tAnyF2(f func(v1, v2 float64) bool, other NDArray) bool\n\n\t\/\/ Return the result of applying a function to all elements\n\tApply(f func(float64) float64) NDArray\n\n\t\/\/ Get the matrix data as a flattened 1D array; sparse matrices will make\n\t\/\/ a copy first.\n\tArray() []float64\n\n\t\/\/ Create a new array by concatenating this with another array along the\n\t\/\/ specified axis. The array shapes must be equal along all other axes.\n\t\/\/ It is legal to add a new axis.\n\tConcat(axis int, others ...NDArray) NDArray\n\n\t\/\/ Returns a duplicate of this array\n\tCopy() NDArray\n\n\t\/\/ Counts the number of nonzero elements in the array\n\tCountNonzero() int\n\n\t\/\/ Returns a dense copy of the array\n\tDense() NDArray\n\n\t\/\/ Return the element-wise quotient of this array and one or more others.\n\t\/\/ This function defines 0 \/ 0 = 0, so it's useful for sparse arrays.\n\tDiv(others ...NDArray) NDArray\n\n\t\/\/ Returns true if and only if all elements in the two arrays are equal\n\tEqual(other NDArray) bool\n\n\t\/\/ Set all array elements to the given value\n\tFill(value float64)\n\n\t\/\/ Get an array element in a flattened verison of this array\n\tFlatItem(index int) float64\n\n\t\/\/ Set an array element in a flattened version of this array\n\tFlatItemSet(value float64, index int)\n\n\t\/\/ Return an iterator over populated matrix entries\n\tFlatIter() FlatNDArrayIterator\n\n\t\/\/ Get an array element\n\tItem(index ...int) float64\n\n\t\/\/ Return the result of adding a scalar value to each array element\n\tItemAdd(value float64) NDArray\n\n\t\/\/ Return the result of dividing each array element by a scalar value\n\tItemDiv(value float64) NDArray\n\n\t\/\/ Return the reuslt of multiplying each array element by a scalar value\n\tItemProd(value float64) NDArray\n\n\t\/\/ Return the result of subtracting a scalar value from each array element\n\tItemSub(value float64) NDArray\n\n\t\/\/ Set an array element\n\tItemSet(value float64, index ...int)\n\n\t\/\/ Return an iterator over populated matrix entries\n\tIter() CoordNDArrayIterator\n\n\t\/\/ Returns the array as a matrix. This is only possible for 1D and 2D arrays;\n\t\/\/ 1D arrays of length n are converted into n x 1 vectors.\n\tM() Matrix\n\n\t\/\/ Get the value of the largest array element\n\tMax() float64\n\n\t\/\/ Get the value of the smallest array element\n\tMin() float64\n\n\t\/\/ Return the element-wise product of this array and one or more others\n\tProd(others ...NDArray) NDArray\n\n\t\/\/ The number of dimensions in the matrix\n\tNDim() int\n\n\t\/\/ Return a copy of the array, normalized to sum to 1\n\tNormalize() NDArray\n\n\t\/\/ Get a 1D copy of the array, in 'C' order: rightmost axes change fastest\n\tRavel() NDArray\n\n\t\/\/ A slice giving the size of all array dimensions\n\tShape() []int\n\n\t\/\/ The total number of elements in the matrix\n\tSize() int\n\n\t\/\/ Get an array containing a rectangular slice of this array.\n\t\/\/ `from` and `to` should both have one index per axis. The indices\n\t\/\/ in `from` and `to` define the first and just-past-last indices you wish\n\t\/\/ to select along each axis. Negative indexing is supported: when slicing,\n\t\/\/ index -1 refers to the item just past the last and -arr.Size() refers to\n\t\/\/ the first element.\n\tSlice(from []int, to []int) NDArray\n\n\t\/\/ Ask whether the matrix has a sparse representation (useful for optimization)\n\tSparsity() ArraySparsity\n\n\t\/\/ Return the element-wise difference of this array and one or more others\n\tSub(others ...NDArray) NDArray\n\n\t\/\/ Return the sum of all array elements\n\tSum() float64\n\n\t\/\/ Return the same matrix, but with axes transposed. The same data is used,\n\t\/\/ for speed and memory efficiency. Use Copy() to create a new array.\n\t\/\/ A 1D array is unchanged; create a 2D analog to rotate a vector.\n\tT() NDArray\n}\n\n\/\/ Create an array from literal data\nfunc A(shape []int, values ...float64) NDArray {\n\tsize := 1\n\tfor _, sz := range shape {\n\t\tsize *= sz\n\t}\n\tif len(values) != size {\n\t\tpanic(fmt.Sprintf(\"Expected %d array elements but got %d\", size, len(values)))\n\t}\n\tarray := &DenseF64Array{\n\t\tshape: shape,\n\t\tarray: make([]float64, len(values)),\n\t}\n\tcopy(array.array[:], values[:])\n\treturn array\n}\n\n\/\/ Create a 1D array\nfunc A1(values ...float64) NDArray {\n\treturn A([]int{len(values)}, values...)\n}\n\n\/\/ Create a 2D array\nfunc A2(values ...[]float64) NDArray {\n\tarray := &DenseF64Array{\n\t\tshape: []int{len(values), len(values[0])},\n\t\tarray: make([]float64, len(values)*len(values[0])),\n\t}\n\tfor i0 := 0; i0 < array.shape[0]; i0++ {\n\t\tif len(values[i0]) != array.shape[1] {\n\t\t\tpanic(fmt.Sprintf(\"A2 got inconsistent array lengths %d and %d\", array.shape[1], len(values[i0])))\n\t\t}\n\t\tfor i1 := 0; i1 < array.shape[1]; i1++ {\n\t\t\tarray.ItemSet(values[i0][i1], i0, i1)\n\t\t}\n\t}\n\treturn array\n}\n\n\/\/ Create an NDArray of float64 values, initialized to zero\nfunc Dense(size ...int) NDArray {\n\ttotalSize := 1\n\tfor _, sz := range size {\n\t\ttotalSize *= sz\n\t}\n\treturn &DenseF64Array{\n\t\tshape: size,\n\t\tarray: make([]float64, totalSize),\n\t}\n}\n\n\/\/ Create an NDArray of float64 values, initialized to value\nfunc WithValue(value float64, size ...int) NDArray {\n\tarray := Dense(size...)\n\tarray.Fill(value)\n\treturn array\n}\n\n\/\/ Create an NDArray of float64 values, initialized to zero\nfunc Zeros(size ...int) NDArray {\n\treturn Dense(size...)\n}\n\n\/\/ Create an NDArray of float64 values, initialized to one\nfunc Ones(size ...int) NDArray {\n\treturn WithValue(1.0, size...)\n}\n\n\/\/ Create a dense NDArray of float64 values, initialized to uniformly random\n\/\/ values in [0, 1).\nfunc Rand(size ...int) NDArray {\n\tarray := Dense(size...)\n\n\tmax := array.Size()\n\tfor i := 0; i < max; i++ {\n\t\tarray.FlatItemSet(rand.Float64(), i)\n\t}\n\n\treturn array\n}\n\n\/\/ Create a dense NDArray of float64 values, initialized to random values in\n\/\/ [-math.MaxFloat64, +math.MaxFloat64] distributed on the standard Normal\n\/\/ distribution.\nfunc RandN(size ...int) NDArray {\n\tarray := Dense(size...)\n\n\tmax := array.Size()\n\tfor i := 0; i < max; i++ {\n\t\tarray.FlatItemSet(rand.NormFloat64(), i)\n\t}\n\n\treturn array\n}\n<commit_msg>updated package docs<commit_after>\/\/ The matrix package contains various utilities for dealing with raw matrices.\n\/\/ The interface is loosely based on the NumPy package in Python.\n\/\/\n\/\/ The key interfaces here are:\n\/\/ * NDArray – A multidimensional array, with dense and (2D-only) sparse\n\/\/ implementations.\n\/\/ * Matrix – A two-dimensional array, with various methods only available when\n\/\/ working in two dimensions. A two-dimensional NDArray can be\n\/\/ trivially converted to the Matrix type by calling arr.M().\n\/\/\n\/\/ When possible, function implementations take advantage of matrix sparsity.\n\/\/ For instance, MProd(), the matrix multiplication function, performs the\n\/\/ minimum amount of work required based on the types of its arguments.\n\/\/\n\/\/ Certain linear algebra methods, particularly in the Matrix interface, rely\n\/\/ on BLAS. In order to use it, you will need to register an appropriate engine.\n\/\/ See the documentation at https:\/\/github.com\/gonum\/blas for details. You can\n\/\/ register a default (native Go) engine by calling InitDefaultBlas().\n\/\/ If you see a panic message like\n\/\/ \"mat64: no blas engine registered: call Register()\"\n\/\/ then you need to register a BLAS engine. If you don't see this error, then\n\/\/ you probably don't need to worry about it.\npackage matrix\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"time\"\n)\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\n\n\/\/ ArraySparsity indicates the representation type of the matrix\ntype ArraySparsity int\n\nconst (\n\tDenseArray ArraySparsity = iota\n\tSparseCooMatrix\n\tSparseDiagMatrix\n)\n\n\/\/ A NDArray is an n-dimensional array of numbers which can be manipulated in\n\/\/ various ways. Concrete implementations can differ; for instance, sparse\n\/\/ and dense representations are possible.\ntype NDArray interface {\n\n\t\/\/ Return the element-wise sum of this array and one or more others\n\tAdd(others ...NDArray) NDArray\n\n\t\/\/ Returns true if and only if all items are nonzero\n\tAll() bool\n\n\t\/\/ Returns true if f is true for all array elements\n\tAllF(f func(v float64) bool) bool\n\n\t\/\/ Returns true if f is true for all pairs of array elements in the same position\n\tAllF2(f func(v1, v2 float64) bool, other NDArray) bool\n\n\t\/\/ Returns true if and only if any item is nonzero\n\tAny() bool\n\n\t\/\/ Returns true if f is true for any array element\n\tAnyF(f func(v float64) bool) bool\n\n\t\/\/ Returns true if f is true for any pair of array elements in the same position\n\tAnyF2(f func(v1, v2 float64) bool, other NDArray) bool\n\n\t\/\/ Return the result of applying a function to all elements\n\tApply(f func(float64) float64) NDArray\n\n\t\/\/ Get the matrix data as a flattened 1D array; sparse matrices will make\n\t\/\/ a copy first.\n\tArray() []float64\n\n\t\/\/ Create a new array by concatenating this with another array along the\n\t\/\/ specified axis. The array shapes must be equal along all other axes.\n\t\/\/ It is legal to add a new axis.\n\tConcat(axis int, others ...NDArray) NDArray\n\n\t\/\/ Returns a duplicate of this array\n\tCopy() NDArray\n\n\t\/\/ Counts the number of nonzero elements in the array\n\tCountNonzero() int\n\n\t\/\/ Returns a dense copy of the array\n\tDense() NDArray\n\n\t\/\/ Return the element-wise quotient of this array and one or more others.\n\t\/\/ This function defines 0 \/ 0 = 0, so it's useful for sparse arrays.\n\tDiv(others ...NDArray) NDArray\n\n\t\/\/ Returns true if and only if all elements in the two arrays are equal\n\tEqual(other NDArray) bool\n\n\t\/\/ Set all array elements to the given value\n\tFill(value float64)\n\n\t\/\/ Get an array element in a flattened verison of this array\n\tFlatItem(index int) float64\n\n\t\/\/ Set an array element in a flattened version of this array\n\tFlatItemSet(value float64, index int)\n\n\t\/\/ Return an iterator over populated matrix entries\n\tFlatIter() FlatNDArrayIterator\n\n\t\/\/ Get an array element\n\tItem(index ...int) float64\n\n\t\/\/ Return the result of adding a scalar value to each array element\n\tItemAdd(value float64) NDArray\n\n\t\/\/ Return the result of dividing each array element by a scalar value\n\tItemDiv(value float64) NDArray\n\n\t\/\/ Return the reuslt of multiplying each array element by a scalar value\n\tItemProd(value float64) NDArray\n\n\t\/\/ Return the result of subtracting a scalar value from each array element\n\tItemSub(value float64) NDArray\n\n\t\/\/ Set an array element\n\tItemSet(value float64, index ...int)\n\n\t\/\/ Return an iterator over populated matrix entries\n\tIter() CoordNDArrayIterator\n\n\t\/\/ Returns the array as a matrix. This is only possible for 1D and 2D arrays;\n\t\/\/ 1D arrays of length n are converted into n x 1 vectors.\n\tM() Matrix\n\n\t\/\/ Get the value of the largest array element\n\tMax() float64\n\n\t\/\/ Get the value of the smallest array element\n\tMin() float64\n\n\t\/\/ Return the element-wise product of this array and one or more others\n\tProd(others ...NDArray) NDArray\n\n\t\/\/ The number of dimensions in the matrix\n\tNDim() int\n\n\t\/\/ Return a copy of the array, normalized to sum to 1\n\tNormalize() NDArray\n\n\t\/\/ Get a 1D copy of the array, in 'C' order: rightmost axes change fastest\n\tRavel() NDArray\n\n\t\/\/ A slice giving the size of all array dimensions\n\tShape() []int\n\n\t\/\/ The total number of elements in the matrix\n\tSize() int\n\n\t\/\/ Get an array containing a rectangular slice of this array.\n\t\/\/ `from` and `to` should both have one index per axis. The indices\n\t\/\/ in `from` and `to` define the first and just-past-last indices you wish\n\t\/\/ to select along each axis. Negative indexing is supported: when slicing,\n\t\/\/ index -1 refers to the item just past the last and -arr.Size() refers to\n\t\/\/ the first element.\n\tSlice(from []int, to []int) NDArray\n\n\t\/\/ Ask whether the matrix has a sparse representation (useful for optimization)\n\tSparsity() ArraySparsity\n\n\t\/\/ Return the element-wise difference of this array and one or more others\n\tSub(others ...NDArray) NDArray\n\n\t\/\/ Return the sum of all array elements\n\tSum() float64\n\n\t\/\/ Return the same matrix, but with axes transposed. The same data is used,\n\t\/\/ for speed and memory efficiency. Use Copy() to create a new array.\n\t\/\/ A 1D array is unchanged; create a 2D analog to rotate a vector.\n\tT() NDArray\n}\n\n\/\/ Create an array from literal data\nfunc A(shape []int, values ...float64) NDArray {\n\tsize := 1\n\tfor _, sz := range shape {\n\t\tsize *= sz\n\t}\n\tif len(values) != size {\n\t\tpanic(fmt.Sprintf(\"Expected %d array elements but got %d\", size, len(values)))\n\t}\n\tarray := &DenseF64Array{\n\t\tshape: shape,\n\t\tarray: make([]float64, len(values)),\n\t}\n\tcopy(array.array[:], values[:])\n\treturn array\n}\n\n\/\/ Create a 1D array\nfunc A1(values ...float64) NDArray {\n\treturn A([]int{len(values)}, values...)\n}\n\n\/\/ Create a 2D array\nfunc A2(values ...[]float64) NDArray {\n\tarray := &DenseF64Array{\n\t\tshape: []int{len(values), len(values[0])},\n\t\tarray: make([]float64, len(values)*len(values[0])),\n\t}\n\tfor i0 := 0; i0 < array.shape[0]; i0++ {\n\t\tif len(values[i0]) != array.shape[1] {\n\t\t\tpanic(fmt.Sprintf(\"A2 got inconsistent array lengths %d and %d\", array.shape[1], len(values[i0])))\n\t\t}\n\t\tfor i1 := 0; i1 < array.shape[1]; i1++ {\n\t\t\tarray.ItemSet(values[i0][i1], i0, i1)\n\t\t}\n\t}\n\treturn array\n}\n\n\/\/ Create an NDArray of float64 values, initialized to zero\nfunc Dense(size ...int) NDArray {\n\ttotalSize := 1\n\tfor _, sz := range size {\n\t\ttotalSize *= sz\n\t}\n\treturn &DenseF64Array{\n\t\tshape: size,\n\t\tarray: make([]float64, totalSize),\n\t}\n}\n\n\/\/ Create an NDArray of float64 values, initialized to value\nfunc WithValue(value float64, size ...int) NDArray {\n\tarray := Dense(size...)\n\tarray.Fill(value)\n\treturn array\n}\n\n\/\/ Create an NDArray of float64 values, initialized to zero\nfunc Zeros(size ...int) NDArray {\n\treturn Dense(size...)\n}\n\n\/\/ Create an NDArray of float64 values, initialized to one\nfunc Ones(size ...int) NDArray {\n\treturn WithValue(1.0, size...)\n}\n\n\/\/ Create a dense NDArray of float64 values, initialized to uniformly random\n\/\/ values in [0, 1).\nfunc Rand(size ...int) NDArray {\n\tarray := Dense(size...)\n\n\tmax := array.Size()\n\tfor i := 0; i < max; i++ {\n\t\tarray.FlatItemSet(rand.Float64(), i)\n\t}\n\n\treturn array\n}\n\n\/\/ Create a dense NDArray of float64 values, initialized to random values in\n\/\/ [-math.MaxFloat64, +math.MaxFloat64] distributed on the standard Normal\n\/\/ distribution.\nfunc RandN(size ...int) NDArray {\n\tarray := Dense(size...)\n\n\tmax := array.Size()\n\tfor i := 0; i < max; i++ {\n\t\tarray.FlatItemSet(rand.NormFloat64(), i)\n\t}\n\n\treturn array\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 Christian Grabowski All rights reserved.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage pipeline\n\nimport (\n\t\"errors\"\n\t\"log\"\n\n\tgit \"gopkg.in\/libgit2\/git2go.v22\"\n)\n\n\/\/ DepTree is a dependency tree to determine whether or not to build services\ntype DepTree struct {\n\tCurrNode *DepService\n}\n\n\/\/ TraverseTree traverses a dependency tree\nfunc TraverseTree(depSrv *DepService, repo *git.Repository, lastBuildCommit *string) error {\n\tif depSrv == nil {\n\t\treturn errors.New(\"Service is nil in tree\")\n\t}\n\tshouldBuild, buildErr := depSrv.build.ShouldBuild(repo, lastBuildCommit)\n\tif buildErr != nil {\n\t\treturn buildErr\n\t}\n\tif shouldBuild {\n\t\tfor i := range depSrv.Children {\n\t\t\tdepSrv.Children[i].build.shouldBuild = true\n\t\t\ttravErr := TraverseTree(depSrv.Children[i], repo, lastBuildCommit)\n\t\t\tif travErr != nil {\n\t\t\t\treturn travErr\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ DepService represents a service in the tree\ntype DepService struct {\n\tbuild *Service\n\tParent *DepService\n\tChildren map[string]*DepService\n}\n\nfunc getDependencies(depSrv *DepService, created map[string]*DepService, proj *Project) {\n\tfor j := range depSrv.build.conf.DependsOn {\n\t\tif created[depSrv.build.conf.DependsOn[j]] != nil {\n\t\t\tdepSrv.Parent = created[depSrv.build.conf.DependsOn[j]]\n\t\t\tif created[depSrv.build.conf.DependsOn[j]].Children[depSrv.build.conf.Name] == nil {\n\t\t\t\tcreated[depSrv.build.conf.DependsOn[j]].Children[depSrv.build.conf.Name] = depSrv\n\t\t\t}\n\t\t} else {\n\t\t\tparent := &DepService{build: proj.Services[depSrv.build.conf.DependsOn[j]]}\n\t\t\tdepSrv.Parent = parent\n\t\t\tif parent.Children[depSrv.build.conf.Name] == nil {\n\t\t\t\tparent.Children[depSrv.build.conf.Name] = depSrv\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ NewTreeList returns a list of dependency trees\nfunc NewTreeList(proj *Project) []*DepTree {\n\tvar newTrees []*DepTree\n\tcreated := make(map[string]*DepService)\n\tlog.Println(\"Creating a dependency tree\")\n\tfor i := range proj.Services {\n\t\tlog.Println(\"Finding a spot in the tree for \", proj.Services[i].conf.Name)\n\t\tif created[proj.Services[i].conf.Name] != nil {\n\t\t\tdepSrv := created[proj.Services[i].conf.Name]\n\t\t\tgetDependencies(depSrv, created, proj)\n\t\t}\n\t\tdepSrv := &DepService{build: proj.Services[i], Children: make(map[string]*DepService)}\n\t\tif len(depSrv.build.conf.DependsOn) == 0 {\n\t\t\tnewTrees = append(newTrees, &DepTree{CurrNode: depSrv})\n\t\t\tcreated[depSrv.build.conf.Name] = depSrv\n\t\t} else {\n\t\t\tgetDependencies(depSrv, created, proj)\n\t\t}\n\t}\n\treturn newTrees\n}\n<commit_msg>make lowest parent direct dependency<commit_after>\/*\nCopyright 2016 Christian Grabowski All rights reserved.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage pipeline\n\nimport (\n\t\"errors\"\n\t\"log\"\n\n\tgit \"gopkg.in\/libgit2\/git2go.v22\"\n)\n\n\/\/ DepTree is a dependency tree to determine whether or not to build services\ntype DepTree struct {\n\tCurrNode *DepService\n}\n\n\/\/ TraverseTree traverses a dependency tree\nfunc TraverseTree(depSrv *DepService, repo *git.Repository, lastBuildCommit *string) error {\n\tif depSrv == nil {\n\t\treturn errors.New(\"Service is nil in tree\")\n\t}\n\tshouldBuild, buildErr := depSrv.build.ShouldBuild(repo, lastBuildCommit)\n\tif buildErr != nil {\n\t\treturn buildErr\n\t}\n\tif shouldBuild {\n\t\tfor i := range depSrv.Children {\n\t\t\tdepSrv.Children[i].build.shouldBuild = true\n\t\t\ttravErr := TraverseTree(depSrv.Children[i], repo, lastBuildCommit)\n\t\t\tif travErr != nil {\n\t\t\t\treturn travErr\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ DepService represents a service in the tree\ntype DepService struct {\n\tbuild *Service\n\tParent *DepService\n\tChildren map[string]*DepService\n}\n\nfunc dependsOnChild(child, parent *DepService) int {\n\tfor j := range child.build.conf.DependsOn {\n\t\tif parent.Children[child.build.conf.DependsOn[j]] != nil {\n\t\t\treturn j\n\t\t}\n\t}\n\treturn -1\n}\n\nfunc getDependencies(depSrv *DepService, created map[string]*DepService, proj *Project) {\n\tfor j := range depSrv.build.conf.DependsOn {\n\t\tif created[depSrv.build.conf.DependsOn[j]] != nil {\n\t\t\tyoungerParentIndex := dependsOnChild(depSrv, created[depSrv.build.conf.DependsOn[j]])\n\t\t\tif youngerParentIndex > -1 {\n\t\t\t\tdepSrv.Parent = created[depSrv.build.conf.DependsOn[j]].Children[depSrv.build.conf.DependsOn[youngerParentIndex]]\n\t\t\t\tdepSrv.Children[depSrv.build.conf.Name] = depSrv\n\t\t\t} else {\n\t\t\t\tdepSrv.Parent = created[depSrv.build.conf.DependsOn[j]]\n\t\t\t\tif depSrv.Parent.Children[depSrv.build.conf.Name] == nil {\n\t\t\t\t\tdepSrv.Parent.Children[depSrv.build.conf.Name] = depSrv\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tparent := &DepService{build: proj.Services[depSrv.build.conf.DependsOn[j]]}\n\t\t\tdepSrv.Parent = parent\n\t\t\tif parent.Children[depSrv.build.conf.Name] == nil {\n\t\t\t\tparent.Children[depSrv.build.conf.Name] = depSrv\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ NewTreeList returns a list of dependency trees\nfunc NewTreeList(proj *Project) []*DepTree {\n\tvar newTrees []*DepTree\n\tcreated := make(map[string]*DepService)\n\tlog.Println(\"Creating a dependency tree\")\n\tfor i := range proj.Services {\n\t\tlog.Println(\"Finding a spot in the tree for \", proj.Services[i].conf.Name)\n\t\tif created[proj.Services[i].conf.Name] != nil {\n\t\t\tdepSrv := created[proj.Services[i].conf.Name]\n\t\t\tgetDependencies(depSrv, created, proj)\n\t\t}\n\t\tdepSrv := &DepService{build: proj.Services[i], Children: make(map[string]*DepService)}\n\t\tif len(depSrv.build.conf.DependsOn) == 0 {\n\t\t\tnewTrees = append(newTrees, &DepTree{CurrNode: depSrv})\n\t\t\tcreated[depSrv.build.conf.Name] = depSrv\n\t\t} else {\n\t\t\tgetDependencies(depSrv, created, proj)\n\t\t}\n\t}\n\treturn newTrees\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage helm\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n\n\tjsonpatch \"github.com\/evanphx\/json-patch\"\n\t\"github.com\/ghodss\/yaml\"\n\t\"k8s.io\/helm\/pkg\/chartutil\"\n\t\"k8s.io\/helm\/pkg\/engine\"\n\t\"k8s.io\/helm\/pkg\/proto\/hapi\/chart\"\n\t\"k8s.io\/helm\/pkg\/timeconv\"\n\n\t\"istio.io\/operator\/pkg\/util\"\n\t\"istio.io\/pkg\/log\"\n)\n\nconst (\n\t\/\/ YAMLSeparator is a separator for multi-document YAML files.\n\tYAMLSeparator = \"\\n---\\n\"\n\n\t\/\/ DefaultProfileString is the name of the default profile.\n\tDefaultProfileString = \"default\"\n)\n\n\/\/ TemplateRenderer defines a helm template renderer interface.\ntype TemplateRenderer interface {\n\t\/\/ Run starts the renderer and should be called before using it.\n\tRun() error\n\t\/\/ RenderManifest renders the associated helm charts with the given values YAML string and returns the resulting\n\t\/\/ string.\n\tRenderManifest(values string) (string, error)\n}\n\n\/\/ NewHelmRenderer creates a new helm renderer with the given parameters and returns an interface to it.\n\/\/ The format of helmBaseDir and profile strings determines the type of helm renderer returned (compiled-in, file,\n\/\/ HTTP etc.)\nfunc NewHelmRenderer(chartsRootDir, helmBaseDir, componentName, namespace string) (TemplateRenderer, error) {\n\t\/\/ filepath would remove leading slash here if chartsRootDir is empty.\n\tdir := chartsRootDir + \"\/\" + helmBaseDir\n\tswitch {\n\tcase chartsRootDir == \"\":\n\t\treturn NewVFSRenderer(helmBaseDir, componentName, namespace), nil\n\tcase util.IsFilePath(dir):\n\t\treturn NewFileTemplateRenderer(dir, componentName, namespace), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown helm renderer with chartsRoot=%s\", chartsRootDir)\n\t}\n}\n\n\/\/ ReadProfileYAML reads the YAML values associated with the given profile. It uses an appropriate reader for the\n\/\/ profile format (compiled-in, file, HTTP, etc.).\nfunc ReadProfileYAML(profile string) (string, error) {\n\tvar err error\n\tvar globalValues string\n\tif profile == \"\" {\n\t\tlog.Infof(\"ReadProfileYAML for profile name: [Empty]\")\n\t} else {\n\t\tlog.Infof(\"ReadProfileYAML for profile name: %s\", profile)\n\t}\n\n\t\/\/ Get global values from profile.\n\tswitch {\n\tcase isBuiltinProfileName(profile):\n\t\tif globalValues, err = LoadValuesVFS(profile); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\tcase util.IsFilePath(profile):\n\t\tlog.Infof(\"Loading values from local filesystem at path %s\", profile)\n\t\tif globalValues, err = readFile(profile); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"unsupported Profile type: %s\", profile)\n\t}\n\n\treturn globalValues, nil\n}\n\n\/\/ renderChart renders the given chart with the given values and returns the resulting YAML manifest string.\nfunc renderChart(namespace, values string, chrt *chart.Chart) (string, error) {\n\tconfig := &chart.Config{Raw: values, Values: map[string]*chart.Value{}}\n\toptions := chartutil.ReleaseOptions{\n\t\tName: \"istio\",\n\t\tTime: timeconv.Now(),\n\t\tNamespace: namespace,\n\t}\n\n\tvals, err := chartutil.ToRenderValuesCaps(chrt, config, options, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfiles, err := engine.New().Render(chrt, vals)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Create sorted array of keys to iterate over, to stabilize the order of the rendered templates\n\tkeys := make([]string, 0, len(files))\n\tfor k := range files {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tvar sb strings.Builder\n\tfor i := 0; i < len(keys); i++ {\n\t\tf := files[keys[i]]\n\t\t\/\/ add yaml separator if the rendered file doesn't have one at the end\n\t\tif !strings.HasSuffix(strings.TrimSpace(f)+\"\\n\", YAMLSeparator) {\n\t\t\tf += YAMLSeparator\n\t\t}\n\t\t_, err := sb.WriteString(f)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn sb.String(), nil\n}\n\n\/\/ OverlayYAML patches the overlay tree over the base tree and returns the result. All trees are expressed as YAML\n\/\/ strings.\nfunc OverlayYAML(base, overlay string) (string, error) {\n\tif overlay == \"\" {\n\t\treturn base, nil\n\t}\n\tbj, err := yaml.YAMLToJSON([]byte(base))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"yamlToJSON error in base: %s\\n%s\", err, bj)\n\t}\n\toj, err := yaml.YAMLToJSON([]byte(overlay))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"yamlToJSON error in overlay: %s\\n%s\", err, oj)\n\t}\n\tif overlay == \"\" {\n\t\toj = []byte(\"{}\")\n\t}\n\n\tmerged, err := jsonpatch.MergePatch(bj, oj)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"json merge error (%s) for base object: \\n%s\\n override object: \\n%s\", err, bj, oj)\n\t}\n\tmy, err := yaml.JSONToYAML(merged)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"jsonToYAML error (%s) for merged object: \\n%s\", err, merged)\n\t}\n\n\treturn string(my), nil\n}\n\n\/\/ DefaultFilenameForProfile returns the profile name of the default profile for the given profile.\nfunc DefaultFilenameForProfile(profile string) (string, error) {\n\tswitch {\n\tcase util.IsFilePath(profile):\n\t\treturn filepath.Join(filepath.Dir(profile), DefaultProfileFilename), nil\n\tdefault:\n\t\tif _, ok := ProfileNames[profile]; ok || profile == \"\" {\n\t\t\treturn DefaultProfileString, nil\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"bad profile string %s\", profile)\n\t}\n}\n\n\/\/ IsDefaultProfile reports whether the given profile is the default profile.\nfunc IsDefaultProfile(profile string) bool {\n\treturn profile == \"\" || profile == DefaultProfileString || filepath.Base(profile) == DefaultProfileFilename\n}\n\nfunc readFile(path string) (string, error) {\n\tb, err := ioutil.ReadFile(path)\n\treturn string(b), err\n}\n<commit_msg>remove notes file from render output. (#280)<commit_after>\/\/ Copyright 2019 Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage helm\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n\n\tjsonpatch \"github.com\/evanphx\/json-patch\"\n\t\"github.com\/ghodss\/yaml\"\n\t\"k8s.io\/helm\/pkg\/chartutil\"\n\t\"k8s.io\/helm\/pkg\/engine\"\n\t\"k8s.io\/helm\/pkg\/proto\/hapi\/chart\"\n\t\"k8s.io\/helm\/pkg\/timeconv\"\n\n\t\"istio.io\/operator\/pkg\/util\"\n\t\"istio.io\/pkg\/log\"\n)\n\nconst (\n\t\/\/ YAMLSeparator is a separator for multi-document YAML files.\n\tYAMLSeparator = \"\\n---\\n\"\n\n\t\/\/ DefaultProfileString is the name of the default profile.\n\tDefaultProfileString = \"default\"\n\n\t\/\/ notes file name suffix for the helm chart.\n\tNotesFileNameSuffix = \".txt\"\n)\n\n\/\/ TemplateRenderer defines a helm template renderer interface.\ntype TemplateRenderer interface {\n\t\/\/ Run starts the renderer and should be called before using it.\n\tRun() error\n\t\/\/ RenderManifest renders the associated helm charts with the given values YAML string and returns the resulting\n\t\/\/ string.\n\tRenderManifest(values string) (string, error)\n}\n\n\/\/ NewHelmRenderer creates a new helm renderer with the given parameters and returns an interface to it.\n\/\/ The format of helmBaseDir and profile strings determines the type of helm renderer returned (compiled-in, file,\n\/\/ HTTP etc.)\nfunc NewHelmRenderer(chartsRootDir, helmBaseDir, componentName, namespace string) (TemplateRenderer, error) {\n\t\/\/ filepath would remove leading slash here if chartsRootDir is empty.\n\tdir := chartsRootDir + \"\/\" + helmBaseDir\n\tswitch {\n\tcase chartsRootDir == \"\":\n\t\treturn NewVFSRenderer(helmBaseDir, componentName, namespace), nil\n\tcase util.IsFilePath(dir):\n\t\treturn NewFileTemplateRenderer(dir, componentName, namespace), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown helm renderer with chartsRoot=%s\", chartsRootDir)\n\t}\n}\n\n\/\/ ReadProfileYAML reads the YAML values associated with the given profile. It uses an appropriate reader for the\n\/\/ profile format (compiled-in, file, HTTP, etc.).\nfunc ReadProfileYAML(profile string) (string, error) {\n\tvar err error\n\tvar globalValues string\n\tif profile == \"\" {\n\t\tlog.Infof(\"ReadProfileYAML for profile name: [Empty]\")\n\t} else {\n\t\tlog.Infof(\"ReadProfileYAML for profile name: %s\", profile)\n\t}\n\n\t\/\/ Get global values from profile.\n\tswitch {\n\tcase isBuiltinProfileName(profile):\n\t\tif globalValues, err = LoadValuesVFS(profile); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\tcase util.IsFilePath(profile):\n\t\tlog.Infof(\"Loading values from local filesystem at path %s\", profile)\n\t\tif globalValues, err = readFile(profile); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"unsupported Profile type: %s\", profile)\n\t}\n\n\treturn globalValues, nil\n}\n\n\/\/ renderChart renders the given chart with the given values and returns the resulting YAML manifest string.\nfunc renderChart(namespace, values string, chrt *chart.Chart) (string, error) {\n\tconfig := &chart.Config{Raw: values, Values: map[string]*chart.Value{}}\n\toptions := chartutil.ReleaseOptions{\n\t\tName: \"istio\",\n\t\tTime: timeconv.Now(),\n\t\tNamespace: namespace,\n\t}\n\n\tvals, err := chartutil.ToRenderValuesCaps(chrt, config, options, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfiles, err := engine.New().Render(chrt, vals)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Create sorted array of keys to iterate over, to stabilize the order of the rendered templates\n\tkeys := make([]string, 0, len(files))\n\tfor k := range files {\n\t\tif strings.HasSuffix(k, NotesFileNameSuffix) {\n\t\t\tcontinue\n\t\t}\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tvar sb strings.Builder\n\tfor i := 0; i < len(keys); i++ {\n\t\tf := files[keys[i]]\n\t\t\/\/ add yaml separator if the rendered file doesn't have one at the end\n\t\tif !strings.HasSuffix(strings.TrimSpace(f)+\"\\n\", YAMLSeparator) {\n\t\t\tf += YAMLSeparator\n\t\t}\n\t\t_, err := sb.WriteString(f)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn sb.String(), nil\n}\n\n\/\/ OverlayYAML patches the overlay tree over the base tree and returns the result. All trees are expressed as YAML\n\/\/ strings.\nfunc OverlayYAML(base, overlay string) (string, error) {\n\tif overlay == \"\" {\n\t\treturn base, nil\n\t}\n\tbj, err := yaml.YAMLToJSON([]byte(base))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"yamlToJSON error in base: %s\\n%s\", err, bj)\n\t}\n\toj, err := yaml.YAMLToJSON([]byte(overlay))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"yamlToJSON error in overlay: %s\\n%s\", err, oj)\n\t}\n\tif overlay == \"\" {\n\t\toj = []byte(\"{}\")\n\t}\n\n\tmerged, err := jsonpatch.MergePatch(bj, oj)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"json merge error (%s) for base object: \\n%s\\n override object: \\n%s\", err, bj, oj)\n\t}\n\tmy, err := yaml.JSONToYAML(merged)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"jsonToYAML error (%s) for merged object: \\n%s\", err, merged)\n\t}\n\n\treturn string(my), nil\n}\n\n\/\/ DefaultFilenameForProfile returns the profile name of the default profile for the given profile.\nfunc DefaultFilenameForProfile(profile string) (string, error) {\n\tswitch {\n\tcase util.IsFilePath(profile):\n\t\treturn filepath.Join(filepath.Dir(profile), DefaultProfileFilename), nil\n\tdefault:\n\t\tif _, ok := ProfileNames[profile]; ok || profile == \"\" {\n\t\t\treturn DefaultProfileString, nil\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"bad profile string %s\", profile)\n\t}\n}\n\n\/\/ IsDefaultProfile reports whether the given profile is the default profile.\nfunc IsDefaultProfile(profile string) bool {\n\treturn profile == \"\" || profile == DefaultProfileString || filepath.Base(profile) == DefaultProfileFilename\n}\n\nfunc readFile(path string) (string, error) {\n\tb, err := ioutil.ReadFile(path)\n\treturn string(b), err\n}\n<|endoftext|>"} {"text":"<commit_before>package plugin\n\nimport (\n\t\"os\/exec\"\n\n\tplugin \"github.com\/hashicorp\/go-plugin\"\n\t\"github.com\/hashicorp\/terraform\/plugin\/discovery\"\n)\n\n\/\/ ClientConfig returns a configuration object that can be used to instantiate\n\/\/ a client for the plugin described by the given metadata.\nfunc ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig {\n\treturn &plugin.ClientConfig{\n\t\tCmd: exec.Command(m.Path),\n\t\tHandshakeConfig: Handshake,\n\t\tManaged: true,\n\t\tPlugins: PluginMap,\n\t}\n}\n\n\/\/ Client returns a plugin client for the plugin described by the given metadata.\nfunc Client(m discovery.PluginMeta) *plugin.Client {\n\treturn plugin.NewClient(ClientConfig(m))\n}\n<commit_msg>specify a logger for go-plugin<commit_after>package plugin\n\nimport (\n\t\"os\"\n\t\"os\/exec\"\n\n\thclog \"github.com\/hashicorp\/go-hclog\"\n\tplugin \"github.com\/hashicorp\/go-plugin\"\n\t\"github.com\/hashicorp\/terraform\/plugin\/discovery\"\n)\n\n\/\/ ClientConfig returns a configuration object that can be used to instantiate\n\/\/ a client for the plugin described by the given metadata.\nfunc ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig {\n\tlogger := hclog.New(&hclog.LoggerOptions{\n\t\tName: \"plugin\",\n\t\tLevel: hclog.Trace,\n\t\tOutput: os.Stderr,\n\t})\n\n\treturn &plugin.ClientConfig{\n\t\tCmd: exec.Command(m.Path),\n\t\tHandshakeConfig: Handshake,\n\t\tManaged: true,\n\t\tPlugins: PluginMap,\n\t\tLogger: logger,\n\t}\n}\n\n\/\/ Client returns a plugin client for the plugin described by the given metadata.\nfunc Client(m discovery.PluginMeta) *plugin.Client {\n\treturn plugin.NewClient(ClientConfig(m))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.\n\/\/\n\/\/ Use of this source code is governed by an MIT-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build go1.8\n\npackage sqlite3\n\nimport (\n\t\"context\"\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestNamedParams(t *testing.T) {\n\ttempFilename := TempFilename(t)\n\tdefer os.Remove(tempFilename)\n\tdb, err := sql.Open(\"sqlite3\", tempFilename)\n\tif err != nil {\n\t\tt.Fatal(\"Failed to open database:\", err)\n\t}\n\tdefer db.Close()\n\n\t_, err = db.Exec(`\n\tcreate table foo (id integer, name text, extra text);\n\t`)\n\tif err != nil {\n\t\tt.Error(\"Failed to call db.Query:\", err)\n\t}\n\n\t_, err = db.Exec(`insert into foo(id, name, extra) values(:id, :name, :name)`, sql.Named(\"name\", \"foo\"), sql.Named(\"id\", 1))\n\tif err != nil {\n\t\tt.Error(\"Failed to call db.Exec:\", err)\n\t}\n\n\trow := db.QueryRow(`select id, extra from foo where id = :id and extra = :extra`, sql.Named(\"id\", 1), sql.Named(\"extra\", \"foo\"))\n\tif row == nil {\n\t\tt.Error(\"Failed to call db.QueryRow\")\n\t}\n\tvar id int\n\tvar extra string\n\terr = row.Scan(&id, &extra)\n\tif err != nil {\n\t\tt.Error(\"Failed to db.Scan:\", err)\n\t}\n\tif id != 1 || extra != \"foo\" {\n\t\tt.Error(\"Failed to db.QueryRow: not matched results\")\n\t}\n}\n\nvar (\n\ttestTableStatements = []string{\n\t\t`DROP TABLE IF EXISTS test_table`,\n\t\t`\nCREATE TABLE IF NOT EXISTS test_table (\n\tkey1 VARCHAR(64) PRIMARY KEY,\n\tkey_id VARCHAR(64) NOT NULL,\n\tkey2 VARCHAR(64) NOT NULL,\n\tkey3 VARCHAR(64) NOT NULL,\n\tkey4 VARCHAR(64) NOT NULL,\n\tkey5 VARCHAR(64) NOT NULL,\n\tkey6 VARCHAR(64) NOT NULL,\n\tdata BLOB NOT NULL\n);`,\n\t}\n\tletterBytes = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n)\n\nfunc randStringBytes(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letterBytes[rand.Intn(len(letterBytes))]\n\t}\n\treturn string(b)\n}\n\nfunc initDatabase(t *testing.T, db *sql.DB, rowCount int64) {\n\tt.Logf(\"Executing db initializing statements\")\n\tfor _, query := range testTableStatements {\n\t\t_, err := db.Exec(query)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\tfor i := int64(0); i < rowCount; i++ {\n\t\tquery := `INSERT INTO test_table\n\t\t\t(key1, key_id, key2, key3, key4, key5, key6, data)\n\t\t\tVALUES\n\t\t\t(?, ?, ?, ?, ?, ?, ?, ?);`\n\t\targs := []interface{}{\n\t\t\trandStringBytes(50),\n\t\t\tfmt.Sprint(i),\n\t\t\trandStringBytes(50),\n\t\t\trandStringBytes(50),\n\t\t\trandStringBytes(50),\n\t\t\trandStringBytes(50),\n\t\t\trandStringBytes(50),\n\t\t\trandStringBytes(50),\n\t\t\trandStringBytes(2048),\n\t\t}\n\t\t_, err := db.Exec(query, args...)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestShortTimeout(t *testing.T) {\n\tsrcTempFilename := TempFilename(t)\n\tdefer os.Remove(srcTempFilename)\n\n\tdb, err := sql.Open(\"sqlite3\", srcTempFilename)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer db.Close()\n\tinitDatabase(t, db, 100)\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Microsecond)\n\tdefer cancel()\n\tquery := `SELECT key1, key_id, key2, key3, key4, key5, key6, data\n\t\tFROM test_table\n\t\tORDER BY key2 ASC`\n\trows, err := db.QueryContext(ctx, query)\n\tif err != nil && context.DeadlineExceeded {\n\t\tt.Fatal(err)\n\t}\n\tif ctx.Err() != nil && context.DeadlineExceeded != ctx.Err() {\n\t\tt.Fatalf(ctx.Err())\n\t}\n\trows.Close()\n}\n<commit_msg>fix broken test<commit_after>\/\/ Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.\n\/\/\n\/\/ Use of this source code is governed by an MIT-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build go1.8\n\npackage sqlite3\n\nimport (\n\t\"context\"\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestNamedParams(t *testing.T) {\n\ttempFilename := TempFilename(t)\n\tdefer os.Remove(tempFilename)\n\tdb, err := sql.Open(\"sqlite3\", tempFilename)\n\tif err != nil {\n\t\tt.Fatal(\"Failed to open database:\", err)\n\t}\n\tdefer db.Close()\n\n\t_, err = db.Exec(`\n\tcreate table foo (id integer, name text, extra text);\n\t`)\n\tif err != nil {\n\t\tt.Error(\"Failed to call db.Query:\", err)\n\t}\n\n\t_, err = db.Exec(`insert into foo(id, name, extra) values(:id, :name, :name)`, sql.Named(\"name\", \"foo\"), sql.Named(\"id\", 1))\n\tif err != nil {\n\t\tt.Error(\"Failed to call db.Exec:\", err)\n\t}\n\n\trow := db.QueryRow(`select id, extra from foo where id = :id and extra = :extra`, sql.Named(\"id\", 1), sql.Named(\"extra\", \"foo\"))\n\tif row == nil {\n\t\tt.Error(\"Failed to call db.QueryRow\")\n\t}\n\tvar id int\n\tvar extra string\n\terr = row.Scan(&id, &extra)\n\tif err != nil {\n\t\tt.Error(\"Failed to db.Scan:\", err)\n\t}\n\tif id != 1 || extra != \"foo\" {\n\t\tt.Error(\"Failed to db.QueryRow: not matched results\")\n\t}\n}\n\nvar (\n\ttestTableStatements = []string{\n\t\t`DROP TABLE IF EXISTS test_table`,\n\t\t`\nCREATE TABLE IF NOT EXISTS test_table (\n\tkey1 VARCHAR(64) PRIMARY KEY,\n\tkey_id VARCHAR(64) NOT NULL,\n\tkey2 VARCHAR(64) NOT NULL,\n\tkey3 VARCHAR(64) NOT NULL,\n\tkey4 VARCHAR(64) NOT NULL,\n\tkey5 VARCHAR(64) NOT NULL,\n\tkey6 VARCHAR(64) NOT NULL,\n\tdata BLOB NOT NULL\n);`,\n\t}\n\tletterBytes = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n)\n\nfunc randStringBytes(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letterBytes[rand.Intn(len(letterBytes))]\n\t}\n\treturn string(b)\n}\n\nfunc initDatabase(t *testing.T, db *sql.DB, rowCount int64) {\n\tt.Logf(\"Executing db initializing statements\")\n\tfor _, query := range testTableStatements {\n\t\t_, err := db.Exec(query)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\tfor i := int64(0); i < rowCount; i++ {\n\t\tquery := `INSERT INTO test_table\n\t\t\t(key1, key_id, key2, key3, key4, key5, key6, data)\n\t\t\tVALUES\n\t\t\t(?, ?, ?, ?, ?, ?, ?, ?);`\n\t\targs := []interface{}{\n\t\t\trandStringBytes(50),\n\t\t\tfmt.Sprint(i),\n\t\t\trandStringBytes(50),\n\t\t\trandStringBytes(50),\n\t\t\trandStringBytes(50),\n\t\t\trandStringBytes(50),\n\t\t\trandStringBytes(50),\n\t\t\trandStringBytes(50),\n\t\t\trandStringBytes(2048),\n\t\t}\n\t\t_, err := db.Exec(query, args...)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestShortTimeout(t *testing.T) {\n\tsrcTempFilename := TempFilename(t)\n\tdefer os.Remove(srcTempFilename)\n\n\tdb, err := sql.Open(\"sqlite3\", srcTempFilename)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer db.Close()\n\tinitDatabase(t, db, 100)\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Microsecond)\n\tdefer cancel()\n\tquery := `SELECT key1, key_id, key2, key3, key4, key5, key6, data\n\t\tFROM test_table\n\t\tORDER BY key2 ASC`\n\trows, err := db.QueryContext(ctx, query)\n\tif err != nil && err != context.DeadlineExceeded {\n\t\tt.Fatal(err)\n\t}\n\tif ctx.Err() != nil && ctx.Err() != context.DeadlineExceeded {\n\t\tt.Fatal(ctx.Err())\n\t}\n\trows.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package sqsbroker\n\nimport (\n\t\"fmt\"\n)\n\nconst minAllocatedStorage = 5\nconst maxAllocatedStorage = 6144\n\ntype Catalog struct {\n\tServices []Service `json:\"services,omitempty\"`\n}\n\ntype Service struct {\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDescription string `json:\"description\"`\n\tBindable bool `json:\"bindable,omitempty\"`\n\tTags []string `json:\"tags,omitempty\"`\n\tMetadata *ServiceMetadata `json:\"metadata,omitempty\"`\n\tRequires []string `json:\"requires,omitempty\"`\n\tPlanUpdateable bool `json:\"plan_updateable\"`\n\tPlans []ServicePlan `json:\"plans,omitempty\"`\n\tDashboardClient *DashboardClient `json:\"dashboard_client,omitempty\"`\n}\n\ntype ServiceMetadata struct {\n\tDisplayName string `json:\"displayName,omitempty\"`\n\tImageURL string `json:\"imageUrl,omitempty\"`\n\tLongDescription string `json:\"longDescription,omitempty\"`\n\tProviderDisplayName string `json:\"providerDisplayName,omitempty\"`\n\tDocumentationURL string `json:\"documentationUrl,omitempty\"`\n\tSupportURL string `json:\"supportUrl,omitempty\"`\n}\n\ntype ServicePlan struct {\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDescription string `json:\"description\"`\n\tMetadata *ServicePlanMetadata `json:\"metadata,omitempty\"`\n\tFree bool `json:\"free\"`\n\tSQSProperties SQSProperties `json:\"sqs_properties,omitempty\"`\n}\n\ntype ServicePlanMetadata struct {\n\tBullets []string `json:\"bullets,omitempty\"`\n\tCosts []Cost `json:\"costs,omitempty\"`\n\tDisplayName string `json:\"displayName,omitempty\"`\n}\n\ntype DashboardClient struct {\n\tID string `json:\"id,omitempty\"`\n\tSecret string `json:\"secret,omitempty\"`\n\tRedirectURI string `json:\"redirect_uri,omitempty\"`\n}\n\ntype Cost struct {\n\tAmount map[string]interface{} `json:\"amount,omitempty\"`\n\tUnit string `json:\"unit,omitempty\"`\n}\n\ntype SQSProperties struct {\n\tDelaySeconds string `json:\"delay_seconds\"`\n\tMaximumMessageSize string `json:\"maximum_message_size\"`\n\tMessageRetentionPeriod string `json:\"message_retention_period\"`\n\tReceiveMessageWaitTimeSeconds string `json:\"receive_message_wait_time_seconds\"`\n\tVisibilityTimeout string `json:\"visibility_timeout\"`\n}\n\nfunc (c Catalog) Validate() error {\n\tfor _, service := range c.Services {\n\t\tif err := service.Validate(); err != nil {\n\t\t\treturn fmt.Errorf(\"Validating Services configuration: %s\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (c Catalog) FindService(serviceID string) (service Service, found bool) {\n\tfor _, service := range c.Services {\n\t\tif service.ID == serviceID {\n\t\t\treturn service, true\n\t\t}\n\t}\n\n\treturn service, false\n}\n\nfunc (c Catalog) FindServicePlan(planID string) (plan ServicePlan, found bool) {\n\tfor _, service := range c.Services {\n\t\tfor _, plan := range service.Plans {\n\t\t\tif plan.ID == planID {\n\t\t\t\treturn plan, true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn plan, false\n}\n\nfunc (s Service) Validate() error {\n\tif s.ID == \"\" {\n\t\treturn fmt.Errorf(\"Must provide a non-empty ID (%+v)\", s)\n\t}\n\n\tif s.Name == \"\" {\n\t\treturn fmt.Errorf(\"Must provide a non-empty Name (%+v)\", s)\n\t}\n\n\tif s.Description == \"\" {\n\t\treturn fmt.Errorf(\"Must provide a non-empty Description (%+v)\", s)\n\t}\n\n\tfor _, servicePlan := range s.Plans {\n\t\tif err := servicePlan.Validate(); err != nil {\n\t\t\treturn fmt.Errorf(\"Validating Plans configuration: %s\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (sp ServicePlan) Validate() error {\n\tif sp.ID == \"\" {\n\t\treturn fmt.Errorf(\"Must provide a non-empty ID (%+v)\", sp)\n\t}\n\n\tif sp.Name == \"\" {\n\t\treturn fmt.Errorf(\"Must provide a non-empty Name (%+v)\", sp)\n\t}\n\n\tif sp.Description == \"\" {\n\t\treturn fmt.Errorf(\"Must provide a non-empty Description (%+v)\", sp)\n\t}\n\n\tif err := sp.SQSProperties.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"Validating SQS Properties configuration: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (sq SQSProperties) Validate() error {\n\n\treturn nil\n}\n<commit_msg>Add omitempty to optional fields<commit_after>package sqsbroker\n\nimport (\n\t\"fmt\"\n)\n\nconst minAllocatedStorage = 5\nconst maxAllocatedStorage = 6144\n\ntype Catalog struct {\n\tServices []Service `json:\"services,omitempty\"`\n}\n\ntype Service struct {\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDescription string `json:\"description\"`\n\tBindable bool `json:\"bindable,omitempty\"`\n\tTags []string `json:\"tags,omitempty\"`\n\tMetadata *ServiceMetadata `json:\"metadata,omitempty\"`\n\tRequires []string `json:\"requires,omitempty\"`\n\tPlanUpdateable bool `json:\"plan_updateable\"`\n\tPlans []ServicePlan `json:\"plans,omitempty\"`\n\tDashboardClient *DashboardClient `json:\"dashboard_client,omitempty\"`\n}\n\ntype ServiceMetadata struct {\n\tDisplayName string `json:\"displayName,omitempty\"`\n\tImageURL string `json:\"imageUrl,omitempty\"`\n\tLongDescription string `json:\"longDescription,omitempty\"`\n\tProviderDisplayName string `json:\"providerDisplayName,omitempty\"`\n\tDocumentationURL string `json:\"documentationUrl,omitempty\"`\n\tSupportURL string `json:\"supportUrl,omitempty\"`\n}\n\ntype ServicePlan struct {\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDescription string `json:\"description\"`\n\tMetadata *ServicePlanMetadata `json:\"metadata,omitempty\"`\n\tFree bool `json:\"free\"`\n\tSQSProperties SQSProperties `json:\"sqs_properties,omitempty\"`\n}\n\ntype ServicePlanMetadata struct {\n\tBullets []string `json:\"bullets,omitempty\"`\n\tCosts []Cost `json:\"costs,omitempty\"`\n\tDisplayName string `json:\"displayName,omitempty\"`\n}\n\ntype DashboardClient struct {\n\tID string `json:\"id,omitempty\"`\n\tSecret string `json:\"secret,omitempty\"`\n\tRedirectURI string `json:\"redirect_uri,omitempty\"`\n}\n\ntype Cost struct {\n\tAmount map[string]interface{} `json:\"amount,omitempty\"`\n\tUnit string `json:\"unit,omitempty\"`\n}\n\ntype SQSProperties struct {\n\tDelaySeconds string `json:\"delay_seconds,omitempty\"`\n\tMaximumMessageSize string `json:\"maximum_message_size,omitempty\"`\n\tMessageRetentionPeriod string `json:\"message_retention_period,omitempty\"`\n\tReceiveMessageWaitTimeSeconds string `json:\"receive_message_wait_time_seconds,omitempty\"`\n\tVisibilityTimeout string `json:\"visibility_timeout,omitempty\"`\n}\n\nfunc (c Catalog) Validate() error {\n\tfor _, service := range c.Services {\n\t\tif err := service.Validate(); err != nil {\n\t\t\treturn fmt.Errorf(\"Validating Services configuration: %s\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (c Catalog) FindService(serviceID string) (service Service, found bool) {\n\tfor _, service := range c.Services {\n\t\tif service.ID == serviceID {\n\t\t\treturn service, true\n\t\t}\n\t}\n\n\treturn service, false\n}\n\nfunc (c Catalog) FindServicePlan(planID string) (plan ServicePlan, found bool) {\n\tfor _, service := range c.Services {\n\t\tfor _, plan := range service.Plans {\n\t\t\tif plan.ID == planID {\n\t\t\t\treturn plan, true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn plan, false\n}\n\nfunc (s Service) Validate() error {\n\tif s.ID == \"\" {\n\t\treturn fmt.Errorf(\"Must provide a non-empty ID (%+v)\", s)\n\t}\n\n\tif s.Name == \"\" {\n\t\treturn fmt.Errorf(\"Must provide a non-empty Name (%+v)\", s)\n\t}\n\n\tif s.Description == \"\" {\n\t\treturn fmt.Errorf(\"Must provide a non-empty Description (%+v)\", s)\n\t}\n\n\tfor _, servicePlan := range s.Plans {\n\t\tif err := servicePlan.Validate(); err != nil {\n\t\t\treturn fmt.Errorf(\"Validating Plans configuration: %s\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (sp ServicePlan) Validate() error {\n\tif sp.ID == \"\" {\n\t\treturn fmt.Errorf(\"Must provide a non-empty ID (%+v)\", sp)\n\t}\n\n\tif sp.Name == \"\" {\n\t\treturn fmt.Errorf(\"Must provide a non-empty Name (%+v)\", sp)\n\t}\n\n\tif sp.Description == \"\" {\n\t\treturn fmt.Errorf(\"Must provide a non-empty Description (%+v)\", sp)\n\t}\n\n\tif err := sp.SQSProperties.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"Validating SQS Properties configuration: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (sq SQSProperties) Validate() error {\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package manager\n\nimport (\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/common\/model\"\n\t\"github.com\/prometheus\/log\"\n)\n\n\/\/ Dispatcher dispatches alerts. It is absed on the alert's data\n\/\/ rather than the time they arrive. Thus it can recover it's state\n\/\/ without persistence.\ntype Dispatcher struct {\n\tstate State\n\n\taggrGroups map[model.Fingerprint]*aggrGroup\n}\n\nfunc NewDispatcher(state State) *Dispatcher {\n\treturn &Dispatcher{\n\t\tstate: state,\n\t\taggrGroups: map[model.Fingerprint]*aggrGroup{},\n\t}\n}\n\nfunc (d *Dispatcher) notify(name string, alerts ...*Alert) {\n\tn := &LogNotifier{}\n\ti := []interface{}{name, \"::\"}\n\tfor _, a := range alerts {\n\t\ti = append(i, a)\n\t}\n\tn.Send(i...)\n}\n\nfunc (d *Dispatcher) Run() {\n\tfor {\n\t\tlog.Infoln(\"waiting\")\n\t\talert := d.state.Alert().Next()\n\n\t\tlog.Infoln(\"received:\", alert)\n\n\t\tconf, err := d.state.Config().Get()\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Infoln(\"retrieved config\")\n\n\t\tfor _, m := range conf.Routes.Match(alert.Labels) {\n\t\t\td.processAlert(alert, m)\n\t\t}\n\t\tlog.Infoln(\"processing done\")\n\t}\n}\n\nfunc (d *Dispatcher) processAlert(alert *Alert, opts *RouteOpts) {\n\tgroup := model.LabelSet{}\n\n\tfor ln, lv := range alert.Labels {\n\t\tif _, ok := opts.GroupBy[ln]; ok {\n\t\t\tgroup[ln] = lv\n\t\t}\n\t}\n\n\tfp := group.Fingerprint()\n\n\tag, ok := d.aggrGroups[fp]\n\tif !ok {\n\t\tag = &aggrGroup{\n\t\t\tdispatcher: d,\n\n\t\t\tlabels: group,\n\t\t\twait: opts.GroupWait(),\n\t\t\twaitTimer: time.NewTimer(time.Hour),\n\t\t\tnotify: opts.SendTo,\n\t\t}\n\t\td.aggrGroups[fp] = ag\n\n\t\tgo ag.run()\n\t}\n\n\tag.insert(alert)\n}\n\n\/\/ aggrGroup aggregates alerts into groups based on\n\/\/ common values for a set of labels.\ntype aggrGroup struct {\n\tdispatcher *Dispatcher\n\n\tlabels model.LabelSet\n\talertsOld alertTimeline\n\talertsNew alertTimeline\n\tnotify string\n\n\twait time.Duration\n\twaitTimer *time.Timer\n\n\tdone chan bool\n\tmtx sync.RWMutex\n}\n\nfunc (ag *aggrGroup) run() {\n\n\tag.waitTimer.Stop()\n\n\tif ag.wait > 0 {\n\t\tag.waitTimer.Reset(ag.wait)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ag.waitTimer.C:\n\t\t\tlog.Infoln(\"timer flush\")\n\t\t\tag.flush()\n\t\tcase <-ag.done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (ag *aggrGroup) stop() {\n\tag.waitTimer.Stop()\n\tclose(ag.done)\n}\n\nfunc (ag *aggrGroup) fingerprint() model.Fingerprint {\n\treturn ag.labels.Fingerprint()\n}\n\nfunc (ag *aggrGroup) insert(alert *Alert) {\n\tlog.Infoln(\"insert:\", alert)\n\n\tag.alertsNew = append(ag.alertsNew, alert)\n\tsort.Sort(ag.alertsNew)\n\n\tif alert.Timestamp.Add(ag.wait).Before(time.Now()) {\n\t\tag.flush()\n\t}\n\n\tif ag.wait > 0 {\n\t\tag.waitTimer.Reset(ag.wait)\n\t}\n}\n\nfunc (ag *aggrGroup) flush() {\n\tlog.Infoln(\"flush\")\n\n\tag.mtx.Lock()\n\tdefer ag.mtx.Unlock()\n\n\tag.dispatcher.notify(ag.notify, ag.alertsNew...)\n\n\tag.alertsOld = append(ag.alertsOld, ag.alertsNew...)\n\tag.alertsNew = ag.alertsNew[:0]\n}\n\ntype alertTimeline []*Alert\n\nfunc (at alertTimeline) Len() int {\n\treturn len(at)\n}\n\nfunc (at alertTimeline) Less(i, j int) bool {\n\treturn at[i].Timestamp.Before(at[j].Timestamp)\n}\n\nfunc (at alertTimeline) Swap(i, j int) {\n\tat[i], at[j] = at[j], at[i]\n}\n<commit_msg>improve dispatcher code and fix concurrency<commit_after>package manager\n\nimport (\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/common\/model\"\n\t\"github.com\/prometheus\/log\"\n)\n\n\/\/ Dispatcher dispatches alerts. It is absed on the alert's data\n\/\/ rather than the time they arrive. Thus it can recover it's state\n\/\/ without persistence.\ntype Dispatcher struct {\n\tstate State\n\n\taggrGroups map[model.Fingerprint]*aggrGroup\n}\n\nfunc NewDispatcher(state State) *Dispatcher {\n\treturn &Dispatcher{\n\t\tstate: state,\n\t\taggrGroups: map[model.Fingerprint]*aggrGroup{},\n\t}\n}\n\nfunc (d *Dispatcher) notify(name string, alerts ...*Alert) {\n\tn := &LogNotifier{}\n\ti := []interface{}{name, \"::\"}\n\tfor _, a := range alerts {\n\t\ti = append(i, a)\n\t}\n\tn.Send(i...)\n}\n\nfunc (d *Dispatcher) Run() {\n\tfor {\n\t\talert := d.state.Alert().Next()\n\n\t\tconf, err := d.state.Config().Get()\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, m := range conf.Routes.Match(alert.Labels) {\n\t\t\td.processAlert(alert, m)\n\t\t}\n\t}\n}\n\nfunc (d *Dispatcher) processAlert(alert *Alert, opts *RouteOpts) {\n\tgroup := model.LabelSet{}\n\n\tfor ln, lv := range alert.Labels {\n\t\tif _, ok := opts.GroupBy[ln]; ok {\n\t\t\tgroup[ln] = lv\n\t\t}\n\t}\n\n\tfp := group.Fingerprint()\n\n\tag, ok := d.aggrGroups[fp]\n\tif !ok {\n\t\tag = newAggrGroup(group, opts)\n\t\td.aggrGroups[fp] = ag\n\t}\n\n\tag.insert(alert)\n\n\tif ag.empty() {\n\t\tag.stop()\n\t\tdelete(d.aggrGroups, fp)\n\t}\n}\n\n\/\/ aggrGroup aggregates alerts into groups based on\n\/\/ common values for a set of labels.\ntype aggrGroup struct {\n\tdispatcher *Dispatcher\n\n\tlabels model.LabelSet\n\talertsOld alertTimeline\n\talertsNew alertTimeline\n\tnotify string\n\n\twait time.Duration\n\twaitTimer *time.Timer\n\n\tdone chan bool\n\tmtx sync.RWMutex\n}\n\n\/\/ newAggrGroup returns a new aggregation group and starts background processing\n\/\/ that sends notifications about the contained alerts.\nfunc newAggrGroup(labels model.LabelSet, opts *RouteOpts) *aggrGroup {\n\tag := &aggrGroup{\n\t\tdispatcher: d,\n\n\t\tlabels: group,\n\t\twait: opts.GroupWait(),\n\t\twaitTimer: time.NewTimer(opts.GroupWait()),\n\t\tnotify: opts.SendTo,\n\t\tdone: make(chan bool),\n\t}\n\tif ag.wait == 0 {\n\t\tag.waitTimer.Stop()\n\t}\n\n\tgo ag.run()\n\n\treturn ag\n}\n\nfunc (ag *aggrGroup) run() {\n\tfor {\n\t\tselect {\n\t\tcase <-ag.waitTimer.C:\n\t\t\tag.flush()\n\t\tcase <-ag.done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (ag *aggrGroup) stop() {\n\tag.waitTimer.Stop()\n\tclose(ag.done)\n}\n\nfunc (ag *aggrGroup) fingerprint() model.Fingerprint {\n\treturn ag.labels.Fingerprint()\n}\n\n\/\/ insert the alert into the aggregation group. If the aggregation group\n\/\/ is empty afterwards, true is returned.\nfunc (ag *aggrGroup) insert(alert *Alert) {\n\tag.mtx.Lock()\n\n\tag.alertsNew = append(ag.alertsNew, alert)\n\tsort.Sort(ag.alertsNew)\n\n\tag.mtx.Unlock()\n\n\t\/\/ Immediately trigger a flush if the wait duration for this\n\t\/\/ alert is already over.\n\tif alert.Timestamp.Add(ag.wait).Before(time.Now()) {\n\t\tag.flush()\n\t}\n\n\tif ag.wait > 0 {\n\t\tag.waitTimer.Reset(ag.wait)\n\t}\n}\n\nfunc (ag *aggrGroup) empty() bool {\n\tag.mtx.RLock()\n\tdefer ag.mtx.RUnlock()\n\n\treturn len(ag.alertsNew)+len(ag.alertsOld) == 0\n}\n\n\/\/ flush sends notifications for all new alerts.\nfunc (ag *aggrGroup) flush() {\n\tag.mtx.Lock()\n\tdefer ag.mtx.Unlock()\n\n\tag.dispatcher.notify(ag.notify, ag.alertsNew...)\n\n\tag.alertsOld = append(ag.alertsOld, ag.alertsNew...)\n\tag.alertsNew = ag.alertsNew[:0]\n}\n\n\/\/ alertTimeline is a list of alerts sorted by their timestamp.\ntype alertTimeline []*Alert\n\nfunc (at alertTimeline) Len() int { return len(at) }\nfunc (at alertTimeline) Less(i, j int) bool { return at[i].Timestamp.Before(at[j].Timestamp) }\nfunc (at alertTimeline) Swap(i, j int) { at[i], at[j] = at[j], at[i] }\n<|endoftext|>"} {"text":"<commit_before>package manager\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/common\/model\"\n\t\"github.com\/prometheus\/log\"\n)\n\nconst ResolveTimeout = 15 * time.Second\n\n\/\/ Dispatcher dispatches alerts. It is absed on the alert's data\n\/\/ rather than the time they arrive. Thus it can recover it's state\n\/\/ without persistence.\ntype Dispatcher struct {\n\tstate State\n\n\taggrGroups map[model.Fingerprint]*aggrGroup\n\tnotifiers map[string]Notifier\n\n\tmtx sync.RWMutex\n}\n\nfunc NewDispatcher(state State, notifiers []Notifier) *Dispatcher {\n\tdisp := &Dispatcher{\n\t\tstate: state,\n\t\taggrGroups: map[model.Fingerprint]*aggrGroup{},\n\t\tnotifiers: map[string]Notifier{},\n\t}\n\n\tfor _, n := range notifiers {\n\t\tdisp.notifiers[n.Name()] = n\n\t}\n\n\treturn disp\n}\n\nfunc (d *Dispatcher) notify(name string, alerts ...*Alert) error {\n\tif len(alerts) == 0 {\n\t\treturn nil\n\t}\n\n\td.mtx.RLock()\n\tnotifier, ok := d.notifiers[name]\n\td.mtx.RUnlock()\n\n\tif !ok {\n\t\treturn fmt.Errorf(\"notifier %q does not exist\", name)\n\t}\n\n\treturn notifier.Send(alerts...)\n}\n\nfunc (d *Dispatcher) Run() {\n\n\tupdates := d.state.Alert().Iter()\n\tcleanup := time.Tick(30 * time.Second)\n\n\tfor {\n\t\tselect {\n\t\tcase <-cleanup:\n\t\t\t\/\/ Cleanup routine.\n\t\t\tfor _, ag := range d.aggrGroups {\n\t\t\t\tif ag.empty() {\n\t\t\t\t\tag.stop()\n\t\t\t\t\tdelete(d.aggrGroups, ag.fingerprint())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ now := time.Now()\n\n\t\t\t\/\/ list, err := d.state.Alert().GetAll()\n\t\t\t\/\/ if err != nil {\n\t\t\t\/\/ \tlog.Error(err)\n\t\t\t\/\/ }\n\n\t\t\t\/\/ for _, a := range list {\n\t\t\t\/\/ \tif a.Resolved() && a.ResolvedAt.Before(now.Sub(ResolveTimeout)) {\n\t\t\t\/\/ \t\tif err := d.state.Alert().Del(a.Fingerprint()); err != nil {\n\t\t\t\/\/ \t\t\tlog.Errorf(\"error cleaning resolved alerts: %s\", err)\n\t\t\t\/\/ \t\t}\n\t\t\t\/\/ \t}\n\t\t\t\/\/ }\n\n\t\tcase alert := <-updates:\n\n\t\t\tconf, err := d.state.Config().Get()\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, m := range conf.Routes.Match(alert.Labels) {\n\t\t\t\td.processAlert(alert, m)\n\t\t\t}\n\n\t\t\tif !alert.Resolved() {\n\t\t\t\ta := *alert\n\t\t\t\ta.ResolvedAt = alert.CreatedAt.Add(ResolveTimeout)\n\n\t\t\t\t\/\/ After the constant timeout update the alert to be resolved.\n\t\t\t\tgo func(a Alert) {\n\t\t\t\t\tnow := time.Now()\n\n\t\t\t\t\tif a.ResolvedAt.After(now) {\n\t\t\t\t\t\ttime.Sleep(a.ResolvedAt.Sub(now))\n\t\t\t\t\t}\n\t\t\t\t\tif err := d.state.Alert().Add(&a); err != nil {\n\t\t\t\t\t\tlog.Errorf(\"alert auto-resolve failed: %s\", err)\n\t\t\t\t\t}\n\t\t\t\t}(a)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (d *Dispatcher) processAlert(alert *Alert, opts *RouteOpts) {\n\tgroup := model.LabelSet{}\n\n\tfor ln, lv := range alert.Labels {\n\t\tif _, ok := opts.GroupBy[ln]; ok {\n\t\t\tgroup[ln] = lv\n\t\t}\n\t}\n\n\tfp := group.Fingerprint()\n\n\tag, ok := d.aggrGroups[fp]\n\tif !ok {\n\t\tag = newAggrGroup(d, group, opts)\n\t\td.aggrGroups[fp] = ag\n\t}\n\n\tag.insert(alert)\n}\n\n\/\/ Alert models an action triggered by Prometheus.\ntype Alert struct {\n\t\/\/ Label value pairs for purpose of aggregation, matching, and disposition\n\t\/\/ dispatching. This must minimally include an \"alertname\" label.\n\tLabels model.LabelSet `json:\"labels\"`\n\n\t\/\/ Extra key\/value information which is not used for aggregation.\n\tPayload map[string]string `json:\"payload,omitempty\"`\n\tSummary string `json:\"summary,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tRunbook string `json:\"runbook,omitempty\"`\n\n\tCreatedAt time.Time `json:\"created_at,omitempty\"`\n\tResolvedAt time.Time `json:\"resolved_at,omitempty\"`\n\n\t\/\/ The authoritative timestamp.\n\tTimestamp time.Time `json:\"timestamp\"`\n}\n\n\/\/ Name returns the name of the alert. It is equivalent to the \"alertname\" label.\nfunc (a *Alert) Name() string {\n\treturn string(a.Labels[model.AlertNameLabel])\n}\n\n\/\/ Fingerprint returns a unique hash for the alert. It is equivalent to\n\/\/ the fingerprint of the alert's label set.\nfunc (a *Alert) Fingerprint() model.Fingerprint {\n\treturn a.Labels.Fingerprint()\n}\n\nfunc (a *Alert) String() string {\n\ts := fmt.Sprintf(\"%s[%x]\", a.Name(), a.Fingerprint())\n\tif a.Resolved() {\n\t\treturn s + \"[resolved]\"\n\t}\n\treturn s + \"[active]\"\n}\n\nfunc (a *Alert) Resolved() bool {\n\treturn a.ResolvedAt.After(a.CreatedAt)\n}\n\n\/\/ aggrGroup aggregates alerts into groups based on\n\/\/ common values for a set of labels.\ntype aggrGroup struct {\n\tdispatcher *Dispatcher\n\n\tlabels model.LabelSet\n\topts *RouteOpts\n\n\tnext *time.Timer\n\tdone chan struct{}\n\n\tmtx sync.RWMutex\n\talerts map[model.Fingerprint]struct{}\n\thasSent bool\n}\n\n\/\/ newAggrGroup returns a new aggregation group and starts background processing\n\/\/ that sends notifications about the contained alerts.\nfunc newAggrGroup(d *Dispatcher, labels model.LabelSet, opts *RouteOpts) *aggrGroup {\n\tag := &aggrGroup{\n\t\tdispatcher: d,\n\n\t\tlabels: labels,\n\t\topts: opts,\n\n\t\talerts: map[model.Fingerprint]struct{}{},\n\t\tdone: make(chan struct{}),\n\n\t\t\/\/ Set an initial one-time wait before flushing\n\t\t\/\/ the first batch of notifications.\n\t\tnext: time.NewTimer(opts.GroupWait),\n\t}\n\n\tgo ag.run()\n\n\treturn ag\n}\n\nfunc (ag *aggrGroup) run() {\n\n\tdefer ag.next.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ag.next.C:\n\t\t\tag.flush()\n\t\t\t\/\/ Wait the configured interval before calling flush again.\n\t\t\tag.next.Reset(ag.opts.GroupInterval)\n\n\t\tcase <-ag.done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (ag *aggrGroup) stop() {\n\tclose(ag.done)\n}\n\nfunc (ag *aggrGroup) fingerprint() model.Fingerprint {\n\treturn ag.labels.Fingerprint()\n}\n\n\/\/ insert the alert into the aggregation group. If the aggregation group\n\/\/ is empty afterwards, true is returned.\nfunc (ag *aggrGroup) insert(alert *Alert) {\n\tfp := alert.Fingerprint()\n\n\tag.mtx.Lock()\n\tag.alerts[fp] = struct{}{}\n\tag.mtx.Unlock()\n\n\t\/\/ Immediately trigger a flush if the wait duration for this\n\t\/\/ alert is already over.\n\tif !ag.hasSent && alert.Timestamp.Add(ag.opts.GroupWait).Before(time.Now()) {\n\t\tag.next.Reset(0)\n\t}\n}\n\nfunc (ag *aggrGroup) empty() bool {\n\tag.mtx.RLock()\n\tdefer ag.mtx.RUnlock()\n\n\treturn len(ag.alerts) == 0\n}\n\n\/\/ flush sends notifications for all new alerts.\nfunc (ag *aggrGroup) flush() {\n\tag.mtx.Lock()\n\tdefer ag.mtx.Unlock()\n\n\tvar alerts []*Alert\n\tfor fp := range ag.alerts {\n\t\ta, err := ag.dispatcher.state.Alert().Get(fp)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ TODO(fabxc): only delete if notify successful.\n\t\tif a.Resolved() {\n\t\t\tdelete(ag.alerts, fp)\n\t\t}\n\t\talerts = append(alerts, a)\n\t}\n\n\tag.dispatcher.notify(ag.opts.SendTo, alerts...)\n\tag.hasSent = true\n}\n\n\/\/ alertTimeline is a list of alerts sorted by their timestamp.\ntype alertTimeline []*Alert\n\nfunc (at alertTimeline) Len() int { return len(at) }\nfunc (at alertTimeline) Less(i, j int) bool { return at[i].Timestamp.Before(at[j].Timestamp) }\nfunc (at alertTimeline) Swap(i, j int) { at[i], at[j] = at[j], at[i] }\n<commit_msg>calculate stale resolve time on initial insert<commit_after>package manager\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/common\/model\"\n\t\"github.com\/prometheus\/log\"\n)\n\nconst ResolveTimeout = 15 * time.Second\n\n\/\/ Dispatcher dispatches alerts. It is absed on the alert's data\n\/\/ rather than the time they arrive. Thus it can recover it's state\n\/\/ without persistence.\ntype Dispatcher struct {\n\tstate State\n\n\taggrGroups map[model.Fingerprint]*aggrGroup\n\tnotifiers map[string]Notifier\n\n\tmtx sync.RWMutex\n}\n\nfunc NewDispatcher(state State, notifiers []Notifier) *Dispatcher {\n\tdisp := &Dispatcher{\n\t\tstate: state,\n\t\taggrGroups: map[model.Fingerprint]*aggrGroup{},\n\t\tnotifiers: map[string]Notifier{},\n\t}\n\n\tfor _, n := range notifiers {\n\t\tdisp.notifiers[n.Name()] = n\n\t}\n\n\treturn disp\n}\n\nfunc (d *Dispatcher) notify(name string, alerts ...*Alert) error {\n\tif len(alerts) == 0 {\n\t\treturn nil\n\t}\n\n\td.mtx.RLock()\n\tnotifier, ok := d.notifiers[name]\n\td.mtx.RUnlock()\n\n\tif !ok {\n\t\treturn fmt.Errorf(\"notifier %q does not exist\", name)\n\t}\n\n\treturn notifier.Send(alerts...)\n}\n\nfunc (d *Dispatcher) Run() {\n\n\tupdates := d.state.Alert().Iter()\n\tcleanup := time.Tick(30 * time.Second)\n\n\tfor {\n\t\tselect {\n\t\tcase <-cleanup:\n\t\t\t\/\/ Cleanup routine.\n\t\t\tfor _, ag := range d.aggrGroups {\n\t\t\t\tif ag.empty() {\n\t\t\t\t\tag.stop()\n\t\t\t\t\tdelete(d.aggrGroups, ag.fingerprint())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ now := time.Now()\n\n\t\t\t\/\/ list, err := d.state.Alert().GetAll()\n\t\t\t\/\/ if err != nil {\n\t\t\t\/\/ \tlog.Error(err)\n\t\t\t\/\/ }\n\n\t\t\t\/\/ for _, a := range list {\n\t\t\t\/\/ \tif a.Resolved() && a.ResolvedAt.Before(now.Sub(ResolveTimeout)) {\n\t\t\t\/\/ \t\tif err := d.state.Alert().Del(a.Fingerprint()); err != nil {\n\t\t\t\/\/ \t\t\tlog.Errorf(\"error cleaning resolved alerts: %s\", err)\n\t\t\t\/\/ \t\t}\n\t\t\t\/\/ \t}\n\t\t\t\/\/ }\n\n\t\tcase alert := <-updates:\n\n\t\t\tconf, err := d.state.Config().Get()\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, m := range conf.Routes.Match(alert.Labels) {\n\t\t\t\td.processAlert(alert, m)\n\t\t\t}\n\n\t\t\tif alert.ResolvedAt.IsZero() {\n\t\t\t\talert.ResolvedAt = alert.CreatedAt.Add(ResolveTimeout)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (d *Dispatcher) processAlert(alert *Alert, opts *RouteOpts) {\n\tgroup := model.LabelSet{}\n\n\tfor ln, lv := range alert.Labels {\n\t\tif _, ok := opts.GroupBy[ln]; ok {\n\t\t\tgroup[ln] = lv\n\t\t}\n\t}\n\n\tfp := group.Fingerprint()\n\n\tag, ok := d.aggrGroups[fp]\n\tif !ok {\n\t\tag = newAggrGroup(d, group, opts)\n\t\td.aggrGroups[fp] = ag\n\t}\n\n\tag.insert(alert)\n}\n\n\/\/ Alert models an action triggered by Prometheus.\ntype Alert struct {\n\t\/\/ Label value pairs for purpose of aggregation, matching, and disposition\n\t\/\/ dispatching. This must minimally include an \"alertname\" label.\n\tLabels model.LabelSet `json:\"labels\"`\n\n\t\/\/ Extra key\/value information which is not used for aggregation.\n\tPayload map[string]string `json:\"payload,omitempty\"`\n\tSummary string `json:\"summary,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tRunbook string `json:\"runbook,omitempty\"`\n\n\tCreatedAt time.Time `json:\"created_at,omitempty\"`\n\tResolvedAt time.Time `json:\"resolved_at,omitempty\"`\n\n\t\/\/ The authoritative timestamp.\n\tTimestamp time.Time `json:\"timestamp\"`\n}\n\n\/\/ Name returns the name of the alert. It is equivalent to the \"alertname\" label.\nfunc (a *Alert) Name() string {\n\treturn string(a.Labels[model.AlertNameLabel])\n}\n\n\/\/ Fingerprint returns a unique hash for the alert. It is equivalent to\n\/\/ the fingerprint of the alert's label set.\nfunc (a *Alert) Fingerprint() model.Fingerprint {\n\treturn a.Labels.Fingerprint()\n}\n\nfunc (a *Alert) String() string {\n\ts := fmt.Sprintf(\"%s[%s]\", a.Name(), a.Fingerprint())\n\tif a.Resolved() {\n\t\treturn s + \"[resolved]\"\n\t}\n\treturn s + \"[active]\"\n}\n\nfunc (a *Alert) Resolved() bool {\n\tif a.ResolvedAt.IsZero() {\n\t\treturn false\n\t}\n\treturn !a.ResolvedAt.After(time.Now())\n}\n\n\/\/ aggrGroup aggregates alerts into groups based on\n\/\/ common values for a set of labels.\ntype aggrGroup struct {\n\tdispatcher *Dispatcher\n\n\tlabels model.LabelSet\n\topts *RouteOpts\n\n\tnext *time.Timer\n\tdone chan struct{}\n\n\tmtx sync.RWMutex\n\talerts map[model.Fingerprint]struct{}\n\thasSent bool\n}\n\n\/\/ newAggrGroup returns a new aggregation group and starts background processing\n\/\/ that sends notifications about the contained alerts.\nfunc newAggrGroup(d *Dispatcher, labels model.LabelSet, opts *RouteOpts) *aggrGroup {\n\tag := &aggrGroup{\n\t\tdispatcher: d,\n\n\t\tlabels: labels,\n\t\topts: opts,\n\n\t\talerts: map[model.Fingerprint]struct{}{},\n\t\tdone: make(chan struct{}),\n\n\t\t\/\/ Set an initial one-time wait before flushing\n\t\t\/\/ the first batch of notifications.\n\t\tnext: time.NewTimer(opts.GroupWait),\n\t}\n\n\tgo ag.run()\n\n\treturn ag\n}\n\nfunc (ag *aggrGroup) run() {\n\n\tdefer ag.next.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ag.next.C:\n\t\t\tag.flush()\n\t\t\t\/\/ Wait the configured interval before calling flush again.\n\t\t\tag.next.Reset(ag.opts.GroupInterval)\n\n\t\tcase <-ag.done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (ag *aggrGroup) stop() {\n\tclose(ag.done)\n}\n\nfunc (ag *aggrGroup) fingerprint() model.Fingerprint {\n\treturn ag.labels.Fingerprint()\n}\n\n\/\/ insert the alert into the aggregation group. If the aggregation group\n\/\/ is empty afterwards, true is returned.\nfunc (ag *aggrGroup) insert(alert *Alert) {\n\tfp := alert.Fingerprint()\n\n\tag.mtx.Lock()\n\tag.alerts[fp] = struct{}{}\n\tag.mtx.Unlock()\n\n\t\/\/ Immediately trigger a flush if the wait duration for this\n\t\/\/ alert is already over.\n\tif !ag.hasSent && alert.Timestamp.Add(ag.opts.GroupWait).Before(time.Now()) {\n\t\tag.next.Reset(0)\n\t}\n}\n\nfunc (ag *aggrGroup) empty() bool {\n\tag.mtx.RLock()\n\tdefer ag.mtx.RUnlock()\n\n\treturn len(ag.alerts) == 0\n}\n\n\/\/ flush sends notifications for all new alerts.\nfunc (ag *aggrGroup) flush() {\n\tag.mtx.Lock()\n\tdefer ag.mtx.Unlock()\n\n\tvar alerts []*Alert\n\tfor fp := range ag.alerts {\n\t\ta, err := ag.dispatcher.state.Alert().Get(fp)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ TODO(fabxc): only delete if notify successful.\n\t\tif a.Resolved() {\n\t\t\tdelete(ag.alerts, fp)\n\t\t}\n\t\talerts = append(alerts, a)\n\t}\n\n\tag.dispatcher.notify(ag.opts.SendTo, alerts...)\n\tag.hasSent = true\n}\n\n\/\/ alertTimeline is a list of alerts sorted by their timestamp.\ntype alertTimeline []*Alert\n\nfunc (at alertTimeline) Len() int { return len(at) }\nfunc (at alertTimeline) Less(i, j int) bool { return at[i].Timestamp.Before(at[j].Timestamp) }\nfunc (at alertTimeline) Swap(i, j int) { at[i], at[j] = at[j], at[i] }\n<|endoftext|>"} {"text":"<commit_before>\/\/ A package for mocking the JSON HTTP POST requests.\npackage poster\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n)\n\n\/\/ A Poster is an interface for creating JSON HTTP POST requests.\ntype Poster interface {\n\t\/\/ Create a POST request.\n\tPost(url string, r io.Reader) (*http.Response, error)\n}\n\ntype httpPoster struct {\n\tclient *http.Client\n}\n\nfunc (self httpPoster) Post(url string, r io.Reader) (*http.Response, error) {\n\treturn self.client.Post(url, \"text\/json\", r)\n}\n\n\/\/ Create a Poster implementation with standart net\/http library.\nfunc Http() Poster {\n\treturn httpPoster{&http.Client{}}\n}\n\ntype PosterCloser interface {\n\tio.Closer\n\tPoster\n}\n\ntype handlerPoster struct {\n\tsrv *httptest.Server\n}\n\nfunc (self handlerPoster) Post(url string, r io.Reader) (*http.Response, error) {\n\treturn http.Post(url, \"text\/json\", r)\n}\n\nfunc (self handlerPoster) Close() error {\n\tself.srv.Close()\n\treturn nil\n}\n\n\/\/ Create a Poster implementation with standart net\/http\/httptest library\n\/\/ that creates POST requests to a specific http.Handler.\n\/\/ You need to Close it after you're done.\n\/\/ Close always returns nil for this implementation.\nfunc Handle(h http.Handler) (PosterCloser, string) {\n\tres := handlerPoster{httptest.NewServer(h)}\n\treturn res, res.srv.URL\n}\n<commit_msg>better docs for poster<commit_after>\/\/ A package for mocking the JSON HTTP POST requests.\npackage poster\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n)\n\n\/\/ A Poster is an interface for creating JSON HTTP POST requests.\ntype Poster interface {\n\t\/\/ Create a POST request.\n\tPost(url string, r io.Reader) (*http.Response, error)\n}\n\ntype httpPoster struct {\n\tclient *http.Client\n}\n\nfunc (self httpPoster) Post(url string, r io.Reader) (*http.Response, error) {\n\treturn self.client.Post(url, \"text\/json\", r)\n}\n\n\/\/ Create a Poster implementation with standart net\/http library.\nfunc Http() Poster {\n\treturn httpPoster{&http.Client{}}\n}\n\ntype PosterCloser interface {\n\tio.Closer\n\tPoster\n}\n\ntype handlerPoster struct {\n\tsrv *httptest.Server\n}\n\nfunc (self handlerPoster) Post(url string, r io.Reader) (*http.Response, error) {\n\treturn http.Post(url, \"text\/json\", r)\n}\n\nfunc (self handlerPoster) Close() error {\n\tself.srv.Close()\n\treturn nil\n}\n\n\/*\nCreate a Poster implementation with standart net\/http\/httptest library\nthat creates POST requests to a specific http.Handler.\n\nYou need to Close it after you're done.\nClose always returns nil for this implementation.\n*\/\nfunc Handle(h http.Handler) (PosterCloser, string) {\n\tres := handlerPoster{httptest.NewServer(h)}\n\treturn res, res.srv.URL\n}\n<|endoftext|>"} {"text":"<commit_before>package dagr\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"testing\"\n)\n\nfunc TestCompiledPoint(t *testing.T) {\n\tconst required = `service.some_event,host=example.local,pid=1234 depth=123.456,msg=\"a \\\"string\\\" of sorts\",on=T,value=123i 1136214245000000000` + \"\\n\"\n\n\tdefer prepareLogger(t)()\n\n\tinteger := new(Int)\n\tboolean := new(Bool)\n\tfloat := new(Float)\n\tstr := new(String)\n\n\tinteger.Set(123)\n\tboolean.Set(true)\n\tfloat.Set(123.456)\n\tstr.Set(`a \"string\" of sorts`)\n\n\tvar rb bytes.Buffer\n\tm := NewPoint(\n\t\t\"service.some_event\",\n\t\tTags{\"pid\": fmt.Sprint(1234), \"host\": \"example.local\"},\n\t\tFields{\"value\": integer, \"depth\": float, \"on\": boolean, \"msg\": str},\n\t)\n\n\tif _, err := m.WriteTo(&rb); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tnormal := rb.String()\n\tif normal != required {\n\t\tt.Errorf(\"[N] Expected %q\\nGot %q\", required, normal)\n\t}\n\n\tc := m.compile()\n\tvar cb bytes.Buffer\n\tif _, err := c.WriteTo(&cb); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcompiled := cb.String()\n\tif compiled != required {\n\t\tt.Errorf(\"[C] Expected %q\\nGot %q\", required, compiled)\n\t}\n\n\tif compiled != normal {\n\t\tt.Errorf(\"[CN] Expected %q\\nGot %q\", normal, compiled)\n\t}\n}\n\nfunc BenchmarkWriteMeasurement(b *testing.B) {\n\tconst result = `service.some_event,host=example.local,pid=1234 depth=123.456,msg=\"a \\\"string\\\" of sorts\",on=T,value=123i 1136214245000000000` + \"\\n\"\n\n\tdefer prepareLogger(b)()\n\n\tinteger := new(Int)\n\tboolean := new(Bool)\n\tfloat := new(Float)\n\tstr := new(String)\n\n\tinteger.Set(123)\n\tboolean.Set(true)\n\tfloat.Set(123.456)\n\tstr.Set(`a \"string\" of sorts`)\n\n\tm := NewPoint(\n\t\t\"service.some_event\",\n\t\tTags{\"pid\": fmt.Sprint(1234), \"host\": \"example.local\"},\n\t\tFields{\"value\": integer, \"depth\": float, \"on\": boolean, \"msg\": str},\n\t)\n\n\tfor i := b.N; i > 0; i-- {\n\t\tWriteMeasurement(ioutil.Discard, m)\n\t}\n}\n\nfunc BenchmarkWriteMeasurement_Compiled(b *testing.B) {\n\tconst result = `service.some_event,host=example.local,pid=1234 depth=123.456,msg=\"a \\\"string\\\" of sorts\",on=T,value=123i 1136214245000000000` + \"\\n\"\n\n\tdefer prepareLogger(b)()\n\n\tinteger := new(Int)\n\tboolean := new(Bool)\n\tfloat := new(Float)\n\tstr := new(String)\n\n\tinteger.Set(123)\n\tboolean.Set(true)\n\tfloat.Set(123.456)\n\tstr.Set(`a \"string\" of sorts`)\n\n\tm := NewPoint(\n\t\t\"service.some_event\",\n\t\tTags{\"pid\": fmt.Sprint(1234), \"host\": \"example.local\"},\n\t\tFields{\"value\": integer, \"depth\": float, \"on\": boolean, \"msg\": str},\n\t).Compiled()\n\n\tfor i := b.N; i > 0; i-- {\n\t\tWriteMeasurement(ioutil.Discard, m)\n\t}\n}\n\nfunc BenchmarkWriteMeasurement_Parallel(b *testing.B) {\n\tconst result = `service.some_event,host=example.local,pid=1234 depth=123.456,msg=\"a \\\"string\\\" of sorts\",on=T,value=123i 1136214245000000000` + \"\\n\"\n\n\tdefer prepareLogger(b)()\n\n\tinteger := new(Int)\n\tboolean := new(Bool)\n\tfloat := new(Float)\n\tstr := new(String)\n\n\tinteger.Set(123)\n\tboolean.Set(true)\n\tfloat.Set(123.456)\n\tstr.Set(`a \"string\" of sorts`)\n\n\tm := NewPoint(\n\t\t\"service.some_event\",\n\t\tTags{\"pid\": fmt.Sprint(1234), \"host\": \"example.local\"},\n\t\tFields{\"value\": integer, \"depth\": float, \"on\": boolean, \"msg\": str},\n\t)\n\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tWriteMeasurement(ioutil.Discard, m)\n\t\t}\n\t})\n}\n\nfunc BenchmarkWriteMeasurement_ParallelCompiled(b *testing.B) {\n\tconst result = `service.some_event,host=example.local,pid=1234 depth=123.456,msg=\"a \\\"string\\\" of sorts\",on=T,value=123i 1136214245000000000` + \"\\n\"\n\n\tdefer prepareLogger(b)()\n\n\tinteger := new(Int)\n\tboolean := new(Bool)\n\tfloat := new(Float)\n\tstr := new(String)\n\n\tinteger.Set(123)\n\tboolean.Set(true)\n\tfloat.Set(123.456)\n\tstr.Set(`a \"string\" of sorts`)\n\n\tm := NewPoint(\n\t\t\"service.some_event\",\n\t\tTags{\"pid\": fmt.Sprint(1234), \"host\": \"example.local\"},\n\t\tFields{\"value\": integer, \"depth\": float, \"on\": boolean, \"msg\": str},\n\t).Compiled()\n\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tWriteMeasurement(ioutil.Discard, m)\n\t\t}\n\t})\n}\n<commit_msg>Add TestWriteMeasurement<commit_after>package dagr\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"testing\"\n)\n\nfunc TestWriteMeasurement(t *testing.T) {\n\tconst required = `balderdash.things_baldered,host=example.local,pid=1234 on=T,value=123i 1136214245000000000` + \"\\n\"\n\n\tdefer prepareLogger(t)()\n\n\tinteger := new(Int)\n\tboolean := new(Bool)\n\n\tboolean.Set(true)\n\tinteger.Set(123)\n\n\tvar rb bytes.Buffer\n\tm := NewPoint(\n\t\t\"balderdash.things_baldered\",\n\t\tTags{\"pid\": fmt.Sprint(1234), \"host\": \"example.local\"},\n\t\tFields{\"value\": integer, \"on\": boolean},\n\t)\n\n\t_, err := WriteMeasurement(&rb, m)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif bs := rb.String(); bs != required {\n\t\tt.Errorf(\"Expected ---\\n%s\\n---\\n\\nGot ---\\n%s\\n---\", required, bs)\n\t}\n}\n\nfunc TestCompiledPoint(t *testing.T) {\n\tconst required = `service.some_event,host=example.local,pid=1234 depth=123.456,msg=\"a \\\"string\\\" of sorts\",on=T,value=123i 1136214245000000000` + \"\\n\"\n\n\tdefer prepareLogger(t)()\n\n\tinteger := new(Int)\n\tboolean := new(Bool)\n\tfloat := new(Float)\n\tstr := new(String)\n\n\tinteger.Set(123)\n\tboolean.Set(true)\n\tfloat.Set(123.456)\n\tstr.Set(`a \"string\" of sorts`)\n\n\tvar rb bytes.Buffer\n\tm := NewPoint(\n\t\t\"service.some_event\",\n\t\tTags{\"pid\": fmt.Sprint(1234), \"host\": \"example.local\"},\n\t\tFields{\"value\": integer, \"depth\": float, \"on\": boolean, \"msg\": str},\n\t)\n\n\tif _, err := m.WriteTo(&rb); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tnormal := rb.String()\n\tif normal != required {\n\t\tt.Errorf(\"[N] Expected %q\\nGot %q\", required, normal)\n\t}\n\n\tc := m.compile()\n\tvar cb bytes.Buffer\n\tif _, err := c.WriteTo(&cb); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcompiled := cb.String()\n\tif compiled != required {\n\t\tt.Errorf(\"[C] Expected %q\\nGot %q\", required, compiled)\n\t}\n\n\tif compiled != normal {\n\t\tt.Errorf(\"[CN] Expected %q\\nGot %q\", normal, compiled)\n\t}\n}\n\nfunc BenchmarkWriteMeasurement(b *testing.B) {\n\tconst result = `service.some_event,host=example.local,pid=1234 depth=123.456,msg=\"a \\\"string\\\" of sorts\",on=T,value=123i 1136214245000000000` + \"\\n\"\n\n\tdefer prepareLogger(b)()\n\n\tinteger := new(Int)\n\tboolean := new(Bool)\n\tfloat := new(Float)\n\tstr := new(String)\n\n\tinteger.Set(123)\n\tboolean.Set(true)\n\tfloat.Set(123.456)\n\tstr.Set(`a \"string\" of sorts`)\n\n\tm := NewPoint(\n\t\t\"service.some_event\",\n\t\tTags{\"pid\": fmt.Sprint(1234), \"host\": \"example.local\"},\n\t\tFields{\"value\": integer, \"depth\": float, \"on\": boolean, \"msg\": str},\n\t)\n\n\tfor i := b.N; i > 0; i-- {\n\t\tWriteMeasurement(ioutil.Discard, m)\n\t}\n}\n\nfunc BenchmarkWriteMeasurement_Compiled(b *testing.B) {\n\tconst result = `service.some_event,host=example.local,pid=1234 depth=123.456,msg=\"a \\\"string\\\" of sorts\",on=T,value=123i 1136214245000000000` + \"\\n\"\n\n\tdefer prepareLogger(b)()\n\n\tinteger := new(Int)\n\tboolean := new(Bool)\n\tfloat := new(Float)\n\tstr := new(String)\n\n\tinteger.Set(123)\n\tboolean.Set(true)\n\tfloat.Set(123.456)\n\tstr.Set(`a \"string\" of sorts`)\n\n\tm := NewPoint(\n\t\t\"service.some_event\",\n\t\tTags{\"pid\": fmt.Sprint(1234), \"host\": \"example.local\"},\n\t\tFields{\"value\": integer, \"depth\": float, \"on\": boolean, \"msg\": str},\n\t).Compiled()\n\n\tfor i := b.N; i > 0; i-- {\n\t\tWriteMeasurement(ioutil.Discard, m)\n\t}\n}\n\nfunc BenchmarkWriteMeasurement_Parallel(b *testing.B) {\n\tconst result = `service.some_event,host=example.local,pid=1234 depth=123.456,msg=\"a \\\"string\\\" of sorts\",on=T,value=123i 1136214245000000000` + \"\\n\"\n\n\tdefer prepareLogger(b)()\n\n\tinteger := new(Int)\n\tboolean := new(Bool)\n\tfloat := new(Float)\n\tstr := new(String)\n\n\tinteger.Set(123)\n\tboolean.Set(true)\n\tfloat.Set(123.456)\n\tstr.Set(`a \"string\" of sorts`)\n\n\tm := NewPoint(\n\t\t\"service.some_event\",\n\t\tTags{\"pid\": fmt.Sprint(1234), \"host\": \"example.local\"},\n\t\tFields{\"value\": integer, \"depth\": float, \"on\": boolean, \"msg\": str},\n\t)\n\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tWriteMeasurement(ioutil.Discard, m)\n\t\t}\n\t})\n}\n\nfunc BenchmarkWriteMeasurement_ParallelCompiled(b *testing.B) {\n\tconst result = `service.some_event,host=example.local,pid=1234 depth=123.456,msg=\"a \\\"string\\\" of sorts\",on=T,value=123i 1136214245000000000` + \"\\n\"\n\n\tdefer prepareLogger(b)()\n\n\tinteger := new(Int)\n\tboolean := new(Bool)\n\tfloat := new(Float)\n\tstr := new(String)\n\n\tinteger.Set(123)\n\tboolean.Set(true)\n\tfloat.Set(123.456)\n\tstr.Set(`a \"string\" of sorts`)\n\n\tm := NewPoint(\n\t\t\"service.some_event\",\n\t\tTags{\"pid\": fmt.Sprint(1234), \"host\": \"example.local\"},\n\t\tFields{\"value\": integer, \"depth\": float, \"on\": boolean, \"msg\": str},\n\t).Compiled()\n\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tWriteMeasurement(ioutil.Discard, m)\n\t\t}\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package trash\n\nimport (\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com\/cozy\/cozy-stack\/model\/job\"\n\t\"github.com\/cozy\/cozy-stack\/model\/vfs\"\n)\n\nfunc init() {\n\tjob.AddWorker(&job.WorkerConfig{\n\t\tWorkerType: \"trash-files\",\n\t\tConcurrency: runtime.NumCPU() * 4,\n\t\tMaxExecCount: 2,\n\t\tReserved: true,\n\t\tTimeout: 2 * time.Hour,\n\t\tWorkerFunc: WorkerTrashFiles,\n\t})\n}\n\n\/\/ WorkerTrashFiles is a worker to remove files in Swift after they have been\n\/\/ removed from CouchDB. It is used when cleaning the trash, as removing a lot\n\/\/ of files from Swift can take some time.\nfunc WorkerTrashFiles(ctx *job.WorkerContext) error {\n\topts := vfs.TrashJournal{}\n\terr := ctx.UnmarshalMessage(&opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfs := ctx.Instance.VFS()\n\treturn fs.EnsureErased(opts)\n}\n<commit_msg>Add a critical log if trash-files fails (#2112)<commit_after>package trash\n\nimport (\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com\/cozy\/cozy-stack\/model\/job\"\n\t\"github.com\/cozy\/cozy-stack\/model\/vfs\"\n)\n\nfunc init() {\n\tjob.AddWorker(&job.WorkerConfig{\n\t\tWorkerType: \"trash-files\",\n\t\tConcurrency: runtime.NumCPU() * 4,\n\t\tMaxExecCount: 2,\n\t\tReserved: true,\n\t\tTimeout: 2 * time.Hour,\n\t\tWorkerFunc: WorkerTrashFiles,\n\t})\n}\n\n\/\/ WorkerTrashFiles is a worker to remove files in Swift after they have been\n\/\/ removed from CouchDB. It is used when cleaning the trash, as removing a lot\n\/\/ of files from Swift can take some time.\nfunc WorkerTrashFiles(ctx *job.WorkerContext) error {\n\topts := vfs.TrashJournal{}\n\tif err := ctx.UnmarshalMessage(&opts); err != nil {\n\t\treturn err\n\t}\n\tfs := ctx.Instance.VFS()\n\tif err := fs.EnsureErased(opts); err != nil {\n\t\tctx.Logger().WithField(\"critical\", \"true\").\n\t\t\tErrorf(\"Error: %s\", err)\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/rand\"\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/appc\/spec\/schema\"\n\t\"github.com\/appc\/spec\/schema\/types\"\n\t\"github.com\/coreos\/rocket\/Godeps\/_workspace\/src\/github.com\/gorilla\/mux\"\n)\n\ntype metadata struct {\n\tmanifest schema.ContainerRuntimeManifest\n\tapps map[string]*schema.ImageManifest\n}\n\nvar (\n\tmetadataByIP = make(map[string]*metadata)\n\tmetadataByUID = make(map[types.UUID]*metadata)\n\thmacKey [sha256.Size]byte\n)\n\nconst (\n\tmyPort = \"4444\"\n\tmetaIP = \"169.254.169.255\"\n\tmetaPort = \"80\"\n)\n\nfunc setupIPTables() error {\n\treturn exec.Command(\n\t\t\"iptables\",\n\t\t\"-t\", \"nat\",\n\t\t\"-A\", \"PREROUTING\",\n\t\t\"-p\", \"tcp\",\n\t\t\"-d\", metaIP,\n\t\t\"--dport\", metaPort,\n\t\t\"-j\", \"REDIRECT\",\n\t\t\"--to-port\", myPort,\n\t).Run()\n}\n\nfunc antiSpoof(brPort, ipAddr string) error {\n\treturn exec.Command(\n\t\t\"ebtables\",\n\t\t\"-t\", \"filter\",\n\t\t\"-I\", \"INPUT\",\n\t\t\"-i\", brPort,\n\t\t\"-p\", \"IPV4\",\n\t\t\"!\", \"--ip-source\", ipAddr,\n\t\t\"-j\", \"DROP\",\n\t).Run()\n}\n\nfunc queryValue(u *url.URL, key string) string {\n\tvals, ok := u.Query()[key]\n\tif !ok || len(vals) != 1 {\n\t\treturn \"\"\n\t}\n\treturn vals[0]\n}\n\nfunc handleRegisterContainer(w http.ResponseWriter, r *http.Request) {\n\tremoteIP := strings.Split(r.RemoteAddr, \":\")[0]\n\tif _, ok := metadataByIP[remoteIP]; ok {\n\t\t\/\/ not allowed from container IP\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\t}\n\n\tcontainerIP := queryValue(r.URL, \"container_ip\")\n\tif containerIP == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Print(w, \"container_ip missing\")\n\t\treturn\n\t}\n\tcontainerBrPort := queryValue(r.URL, \"container_brport\")\n\tif containerBrPort == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Print(w, \"container_brport missing\")\n\t\treturn\n\t}\n\n\tm := &metadata{\n\t\tapps: make(map[string]*schema.ImageManifest),\n\t}\n\n\tif err := json.NewDecoder(r.Body).Decode(&m.manifest); err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, \"JSON-decoding failed: %v\", err)\n\t\treturn\n\t}\n\n\tif err := antiSpoof(containerBrPort, containerIP); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, \"failed to set anti-spoofing: %v\", err)\n\t\treturn\n\t}\n\n\tmetadataByIP[containerIP] = m\n\tmetadataByUID[m.manifest.UUID] = m\n\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc handleRegisterApp(w http.ResponseWriter, r *http.Request) {\n\tremoteIP := strings.Split(r.RemoteAddr, \":\")[0]\n\tif _, ok := metadataByIP[remoteIP]; ok {\n\t\t\/\/ not allowed from container IP\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\t}\n\n\tuid, err := types.NewUUID(mux.Vars(r)[\"uid\"])\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"UUID is missing or mulformed: %v\", err)\n\t\treturn\n\t}\n\n\tm, ok := metadataByUID[*uid]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprint(w, \"Container with given UUID not found\")\n\t\treturn\n\t}\n\n\tan := mux.Vars(r)[\"app\"]\n\n\tapp := &schema.ImageManifest{}\n\tif err := json.NewDecoder(r.Body).Decode(&app); err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, \"JSON-decoding failed: %v\", err)\n\t\treturn\n\t}\n\n\tm.apps[an] = app\n\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc containerGet(h func(w http.ResponseWriter, r *http.Request, m *metadata)) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tremoteIP := strings.Split(r.RemoteAddr, \":\")[0]\n\t\tm, ok := metadataByIP[remoteIP]\n\t\tif !ok {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tfmt.Fprintf(w, \"metadata by remoteIP (%v) not found\", remoteIP)\n\t\t\treturn\n\t\t}\n\n\t\th(w, r, m)\n\t}\n}\n\nfunc appGet(h func(w http.ResponseWriter, r *http.Request, m *metadata, _ *schema.ImageManifest)) http.HandlerFunc {\n\treturn containerGet(func(w http.ResponseWriter, r *http.Request, m *metadata) {\n\t\tappname := mux.Vars(r)[\"app\"]\n\n\t\tif im, ok := m.apps[appname]; ok {\n\t\t\th(w, r, m, im)\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tfmt.Fprintf(w, \"App (%v) not found\", appname)\n\t\t}\n\t})\n}\n\nfunc handleContainerAnnotations(w http.ResponseWriter, r *http.Request, m *metadata) {\n\tw.Header().Add(\"Content-Type\", \"text\/plain\")\n\tw.WriteHeader(http.StatusOK)\n\n\tfor k := range m.manifest.Annotations {\n\t\tfmt.Fprintln(w, k)\n\t}\n}\n\nfunc handleContainerAnnotation(w http.ResponseWriter, r *http.Request, m *metadata) {\n\tk, err := types.NewACName(mux.Vars(r)[\"name\"])\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"Container annotation is not a valid AC Name\")\n\t\treturn\n\t}\n\n\tv, ok := m.manifest.Annotations.Get(k.String())\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"Container annotation (%v) not found\", k)\n\t\treturn\n\t}\n\n\tw.Header().Add(\"Content-Type\", \"text\/plain\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(v))\n}\n\nfunc handleContainerManifest(w http.ResponseWriter, r *http.Request, m *metadata) {\n\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(http.StatusOK)\n\n\tif err := json.NewEncoder(w).Encode(m.manifest); err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\nfunc handleContainerUID(w http.ResponseWriter, r *http.Request, m *metadata) {\n\tuid := m.manifest.UUID.String()\n\n\tw.Header().Add(\"Content-Type\", \"text\/plain\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(uid))\n}\n\nfunc mergeAppAnnotations(im *schema.ImageManifest, cm *schema.ContainerRuntimeManifest) types.Annotations {\n\tmerged := types.Annotations{}\n\n\tfor _, annot := range im.Annotations {\n\t\tmerged.Set(annot.Name, annot.Value)\n\t}\n\n\tif app := cm.Apps.Get(im.Name); app != nil {\n\t\tfor _, annot := range app.Annotations {\n\t\t\tmerged.Set(annot.Name, annot.Value)\n\t\t}\n\t}\n\n\treturn merged\n}\n\nfunc handleAppAnnotations(w http.ResponseWriter, r *http.Request, m *metadata, im *schema.ImageManifest) {\n\tw.Header().Add(\"Content-Type\", \"text\/plain\")\n\tw.WriteHeader(http.StatusOK)\n\n\tfor _, annot := range mergeAppAnnotations(im, &m.manifest) {\n\t\tfmt.Fprintln(w, string(annot.Name))\n\t}\n}\n\nfunc handleAppAnnotation(w http.ResponseWriter, r *http.Request, m *metadata, im *schema.ImageManifest) {\n\tk, err := types.NewACName(mux.Vars(r)[\"name\"])\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"App annotation is not a valid AC Name\")\n\t\treturn\n\t}\n\n\tmerged := mergeAppAnnotations(im, &m.manifest)\n\n\tv, ok := merged.Get(k.String())\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"App annotation (%v) not found\", k)\n\t\treturn\n\t}\n\n\tw.Header().Add(\"Content-Type\", \"text\/plain\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(v))\n}\n\nfunc handleImageManifest(w http.ResponseWriter, r *http.Request, m *metadata, im *schema.ImageManifest) {\n\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(http.StatusOK)\n\n\tif err := json.NewEncoder(w).Encode(*im); err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\nfunc handleAppID(w http.ResponseWriter, r *http.Request, m *metadata, im *schema.ImageManifest) {\n\tw.Header().Add(\"Content-Type\", \"text\/plain\")\n\tw.WriteHeader(http.StatusOK)\n\ta := m.manifest.Apps.Get(im.Name)\n\tif a == nil {\n\t\tpanic(\"could not find app in manifest!\")\n\t}\n\tw.Write([]byte(a.ImageID.String()))\n}\n\nfunc initCrypto() error {\n\tif n, err := rand.Reader.Read(hmacKey[:]); err != nil || n != len(hmacKey) {\n\t\treturn fmt.Errorf(\"failed to generate HMAC Key\")\n\t}\n\treturn nil\n}\n\nfunc digest(r io.Reader) ([]byte, error) {\n\tdigest := sha256.New()\n\tif _, err := io.Copy(digest, r); err != nil {\n\t\treturn nil, err\n\t}\n\treturn digest.Sum(nil), nil\n}\n\nfunc handleContainerSign(w http.ResponseWriter, r *http.Request) {\n\tremoteIP := strings.Split(r.RemoteAddr, \":\")[0]\n\tm, ok := metadataByIP[remoteIP]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"Metadata by remoteIP (%v) not found\", remoteIP)\n\t\treturn\n\t}\n\n\t\/\/ compute message digest\n\td, err := digest(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, \"Digest computation failed: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ HMAC(UID:digest)\n\th := hmac.New(sha256.New, hmacKey[:])\n\th.Write(m.manifest.UUID[:])\n\th.Write(d)\n\n\t\/\/ Send back digest:HMAC as the signature\n\tw.Header().Add(\"Content-Type\", \"text\/plain\")\n\tw.WriteHeader(http.StatusOK)\n\tenc := base64.NewEncoder(base64.StdEncoding, w)\n\tenc.Write(d)\n\tenc.Write(h.Sum(nil))\n\tenc.Close()\n}\n\nfunc handleContainerVerify(w http.ResponseWriter, r *http.Request) {\n\tuid, err := types.NewUUID(r.FormValue(\"uid\"))\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, \"uid field missing or malformed: %v\", err)\n\t\treturn\n\t}\n\n\tsig, err := base64.StdEncoding.DecodeString(r.FormValue(\"signature\"))\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, \"signature field missing or corrupt: %v\", err)\n\t\treturn\n\t}\n\n\tdigest := sig[:sha256.Size]\n\tsum := sig[sha256.Size:]\n\n\th := hmac.New(sha256.New, hmacKey[:])\n\th.Write(uid[:])\n\th.Write(digest)\n\n\tif hmac.Equal(sum, h.Sum(nil)) {\n\t\tw.WriteHeader(http.StatusOK)\n\t} else {\n\t\tw.WriteHeader(http.StatusForbidden)\n\t}\n}\n\ntype httpResp struct {\n\twriter http.ResponseWriter\n\tstatus int\n}\n\nfunc (r *httpResp) Header() http.Header {\n\treturn r.writer.Header()\n}\n\nfunc (r *httpResp) Write(d []byte) (int, error) {\n\treturn r.writer.Write(d)\n}\n\nfunc (r *httpResp) WriteHeader(status int) {\n\tr.status = status\n\tr.writer.WriteHeader(status)\n}\n\nfunc logReq(h func(w http.ResponseWriter, r *http.Request)) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tresp := &httpResp{w, 0}\n\t\th(resp, r)\n\t\tfmt.Printf(\"%v %v - %v\\n\", r.Method, r.RequestURI, resp.status)\n\t}\n}\n\nfunc main() {\n\tif err := setupIPTables(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif err := initCrypto(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"\/containers\/\", logReq(handleRegisterContainer)).Methods(\"POST\")\n\tr.HandleFunc(\"\/containers\/{uid}\/{app:.*}\", logReq(handleRegisterApp)).Methods(\"PUT\")\n\n\tacRtr := r.Headers(\"Metadata-Flavor\", \"AppContainer header\").\n\t\tPathPrefix(\"\/acMetadata\/v1\").Subrouter()\n\n\tmr := acRtr.Methods(\"GET\").Subrouter()\n\n\tmr.HandleFunc(\"\/container\/annotations\/\", logReq(containerGet(handleContainerAnnotations)))\n\tmr.HandleFunc(\"\/container\/annotations\/{name}\", logReq(containerGet(handleContainerAnnotation)))\n\tmr.HandleFunc(\"\/container\/manifest\", logReq(containerGet(handleContainerManifest)))\n\tmr.HandleFunc(\"\/container\/uid\", logReq(containerGet(handleContainerUID)))\n\n\tmr.HandleFunc(\"\/apps\/{app:.*}\/annotations\/\", logReq(appGet(handleAppAnnotations)))\n\tmr.HandleFunc(\"\/apps\/{app:.*}\/annotations\/{name}\", logReq(appGet(handleAppAnnotation)))\n\tmr.HandleFunc(\"\/apps\/{app:.*}\/image\/manifest\", logReq(appGet(handleImageManifest)))\n\tmr.HandleFunc(\"\/apps\/{app:.*}\/image\/id\", logReq(appGet(handleAppID)))\n\n\tacRtr.HandleFunc(\"\/container\/hmac\/sign\", logReq(handleContainerSign)).Methods(\"POST\")\n\tacRtr.HandleFunc(\"\/container\/hmac\/verify\", logReq(handleContainerVerify)).Methods(\"POST\")\n\n\tlog.Fatal(http.ListenAndServe(\":4444\", r))\n}\n<commit_msg>metadatasvc: remove string \"header\" from http header match<commit_after>\/\/ Copyright 2014 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/rand\"\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/appc\/spec\/schema\"\n\t\"github.com\/appc\/spec\/schema\/types\"\n\t\"github.com\/coreos\/rocket\/Godeps\/_workspace\/src\/github.com\/gorilla\/mux\"\n)\n\ntype metadata struct {\n\tmanifest schema.ContainerRuntimeManifest\n\tapps map[string]*schema.ImageManifest\n}\n\nvar (\n\tmetadataByIP = make(map[string]*metadata)\n\tmetadataByUID = make(map[types.UUID]*metadata)\n\thmacKey [sha256.Size]byte\n)\n\nconst (\n\tmyPort = \"4444\"\n\tmetaIP = \"169.254.169.255\"\n\tmetaPort = \"80\"\n)\n\nfunc setupIPTables() error {\n\treturn exec.Command(\n\t\t\"iptables\",\n\t\t\"-t\", \"nat\",\n\t\t\"-A\", \"PREROUTING\",\n\t\t\"-p\", \"tcp\",\n\t\t\"-d\", metaIP,\n\t\t\"--dport\", metaPort,\n\t\t\"-j\", \"REDIRECT\",\n\t\t\"--to-port\", myPort,\n\t).Run()\n}\n\nfunc antiSpoof(brPort, ipAddr string) error {\n\treturn exec.Command(\n\t\t\"ebtables\",\n\t\t\"-t\", \"filter\",\n\t\t\"-I\", \"INPUT\",\n\t\t\"-i\", brPort,\n\t\t\"-p\", \"IPV4\",\n\t\t\"!\", \"--ip-source\", ipAddr,\n\t\t\"-j\", \"DROP\",\n\t).Run()\n}\n\nfunc queryValue(u *url.URL, key string) string {\n\tvals, ok := u.Query()[key]\n\tif !ok || len(vals) != 1 {\n\t\treturn \"\"\n\t}\n\treturn vals[0]\n}\n\nfunc handleRegisterContainer(w http.ResponseWriter, r *http.Request) {\n\tremoteIP := strings.Split(r.RemoteAddr, \":\")[0]\n\tif _, ok := metadataByIP[remoteIP]; ok {\n\t\t\/\/ not allowed from container IP\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\t}\n\n\tcontainerIP := queryValue(r.URL, \"container_ip\")\n\tif containerIP == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Print(w, \"container_ip missing\")\n\t\treturn\n\t}\n\tcontainerBrPort := queryValue(r.URL, \"container_brport\")\n\tif containerBrPort == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Print(w, \"container_brport missing\")\n\t\treturn\n\t}\n\n\tm := &metadata{\n\t\tapps: make(map[string]*schema.ImageManifest),\n\t}\n\n\tif err := json.NewDecoder(r.Body).Decode(&m.manifest); err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, \"JSON-decoding failed: %v\", err)\n\t\treturn\n\t}\n\n\tif err := antiSpoof(containerBrPort, containerIP); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, \"failed to set anti-spoofing: %v\", err)\n\t\treturn\n\t}\n\n\tmetadataByIP[containerIP] = m\n\tmetadataByUID[m.manifest.UUID] = m\n\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc handleRegisterApp(w http.ResponseWriter, r *http.Request) {\n\tremoteIP := strings.Split(r.RemoteAddr, \":\")[0]\n\tif _, ok := metadataByIP[remoteIP]; ok {\n\t\t\/\/ not allowed from container IP\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\t}\n\n\tuid, err := types.NewUUID(mux.Vars(r)[\"uid\"])\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"UUID is missing or mulformed: %v\", err)\n\t\treturn\n\t}\n\n\tm, ok := metadataByUID[*uid]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprint(w, \"Container with given UUID not found\")\n\t\treturn\n\t}\n\n\tan := mux.Vars(r)[\"app\"]\n\n\tapp := &schema.ImageManifest{}\n\tif err := json.NewDecoder(r.Body).Decode(&app); err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, \"JSON-decoding failed: %v\", err)\n\t\treturn\n\t}\n\n\tm.apps[an] = app\n\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc containerGet(h func(w http.ResponseWriter, r *http.Request, m *metadata)) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tremoteIP := strings.Split(r.RemoteAddr, \":\")[0]\n\t\tm, ok := metadataByIP[remoteIP]\n\t\tif !ok {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tfmt.Fprintf(w, \"metadata by remoteIP (%v) not found\", remoteIP)\n\t\t\treturn\n\t\t}\n\n\t\th(w, r, m)\n\t}\n}\n\nfunc appGet(h func(w http.ResponseWriter, r *http.Request, m *metadata, _ *schema.ImageManifest)) http.HandlerFunc {\n\treturn containerGet(func(w http.ResponseWriter, r *http.Request, m *metadata) {\n\t\tappname := mux.Vars(r)[\"app\"]\n\n\t\tif im, ok := m.apps[appname]; ok {\n\t\t\th(w, r, m, im)\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tfmt.Fprintf(w, \"App (%v) not found\", appname)\n\t\t}\n\t})\n}\n\nfunc handleContainerAnnotations(w http.ResponseWriter, r *http.Request, m *metadata) {\n\tw.Header().Add(\"Content-Type\", \"text\/plain\")\n\tw.WriteHeader(http.StatusOK)\n\n\tfor k := range m.manifest.Annotations {\n\t\tfmt.Fprintln(w, k)\n\t}\n}\n\nfunc handleContainerAnnotation(w http.ResponseWriter, r *http.Request, m *metadata) {\n\tk, err := types.NewACName(mux.Vars(r)[\"name\"])\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"Container annotation is not a valid AC Name\")\n\t\treturn\n\t}\n\n\tv, ok := m.manifest.Annotations.Get(k.String())\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"Container annotation (%v) not found\", k)\n\t\treturn\n\t}\n\n\tw.Header().Add(\"Content-Type\", \"text\/plain\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(v))\n}\n\nfunc handleContainerManifest(w http.ResponseWriter, r *http.Request, m *metadata) {\n\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(http.StatusOK)\n\n\tif err := json.NewEncoder(w).Encode(m.manifest); err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\nfunc handleContainerUID(w http.ResponseWriter, r *http.Request, m *metadata) {\n\tuid := m.manifest.UUID.String()\n\n\tw.Header().Add(\"Content-Type\", \"text\/plain\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(uid))\n}\n\nfunc mergeAppAnnotations(im *schema.ImageManifest, cm *schema.ContainerRuntimeManifest) types.Annotations {\n\tmerged := types.Annotations{}\n\n\tfor _, annot := range im.Annotations {\n\t\tmerged.Set(annot.Name, annot.Value)\n\t}\n\n\tif app := cm.Apps.Get(im.Name); app != nil {\n\t\tfor _, annot := range app.Annotations {\n\t\t\tmerged.Set(annot.Name, annot.Value)\n\t\t}\n\t}\n\n\treturn merged\n}\n\nfunc handleAppAnnotations(w http.ResponseWriter, r *http.Request, m *metadata, im *schema.ImageManifest) {\n\tw.Header().Add(\"Content-Type\", \"text\/plain\")\n\tw.WriteHeader(http.StatusOK)\n\n\tfor _, annot := range mergeAppAnnotations(im, &m.manifest) {\n\t\tfmt.Fprintln(w, string(annot.Name))\n\t}\n}\n\nfunc handleAppAnnotation(w http.ResponseWriter, r *http.Request, m *metadata, im *schema.ImageManifest) {\n\tk, err := types.NewACName(mux.Vars(r)[\"name\"])\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"App annotation is not a valid AC Name\")\n\t\treturn\n\t}\n\n\tmerged := mergeAppAnnotations(im, &m.manifest)\n\n\tv, ok := merged.Get(k.String())\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"App annotation (%v) not found\", k)\n\t\treturn\n\t}\n\n\tw.Header().Add(\"Content-Type\", \"text\/plain\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(v))\n}\n\nfunc handleImageManifest(w http.ResponseWriter, r *http.Request, m *metadata, im *schema.ImageManifest) {\n\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(http.StatusOK)\n\n\tif err := json.NewEncoder(w).Encode(*im); err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\nfunc handleAppID(w http.ResponseWriter, r *http.Request, m *metadata, im *schema.ImageManifest) {\n\tw.Header().Add(\"Content-Type\", \"text\/plain\")\n\tw.WriteHeader(http.StatusOK)\n\ta := m.manifest.Apps.Get(im.Name)\n\tif a == nil {\n\t\tpanic(\"could not find app in manifest!\")\n\t}\n\tw.Write([]byte(a.ImageID.String()))\n}\n\nfunc initCrypto() error {\n\tif n, err := rand.Reader.Read(hmacKey[:]); err != nil || n != len(hmacKey) {\n\t\treturn fmt.Errorf(\"failed to generate HMAC Key\")\n\t}\n\treturn nil\n}\n\nfunc digest(r io.Reader) ([]byte, error) {\n\tdigest := sha256.New()\n\tif _, err := io.Copy(digest, r); err != nil {\n\t\treturn nil, err\n\t}\n\treturn digest.Sum(nil), nil\n}\n\nfunc handleContainerSign(w http.ResponseWriter, r *http.Request) {\n\tremoteIP := strings.Split(r.RemoteAddr, \":\")[0]\n\tm, ok := metadataByIP[remoteIP]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"Metadata by remoteIP (%v) not found\", remoteIP)\n\t\treturn\n\t}\n\n\t\/\/ compute message digest\n\td, err := digest(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, \"Digest computation failed: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ HMAC(UID:digest)\n\th := hmac.New(sha256.New, hmacKey[:])\n\th.Write(m.manifest.UUID[:])\n\th.Write(d)\n\n\t\/\/ Send back digest:HMAC as the signature\n\tw.Header().Add(\"Content-Type\", \"text\/plain\")\n\tw.WriteHeader(http.StatusOK)\n\tenc := base64.NewEncoder(base64.StdEncoding, w)\n\tenc.Write(d)\n\tenc.Write(h.Sum(nil))\n\tenc.Close()\n}\n\nfunc handleContainerVerify(w http.ResponseWriter, r *http.Request) {\n\tuid, err := types.NewUUID(r.FormValue(\"uid\"))\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, \"uid field missing or malformed: %v\", err)\n\t\treturn\n\t}\n\n\tsig, err := base64.StdEncoding.DecodeString(r.FormValue(\"signature\"))\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, \"signature field missing or corrupt: %v\", err)\n\t\treturn\n\t}\n\n\tdigest := sig[:sha256.Size]\n\tsum := sig[sha256.Size:]\n\n\th := hmac.New(sha256.New, hmacKey[:])\n\th.Write(uid[:])\n\th.Write(digest)\n\n\tif hmac.Equal(sum, h.Sum(nil)) {\n\t\tw.WriteHeader(http.StatusOK)\n\t} else {\n\t\tw.WriteHeader(http.StatusForbidden)\n\t}\n}\n\ntype httpResp struct {\n\twriter http.ResponseWriter\n\tstatus int\n}\n\nfunc (r *httpResp) Header() http.Header {\n\treturn r.writer.Header()\n}\n\nfunc (r *httpResp) Write(d []byte) (int, error) {\n\treturn r.writer.Write(d)\n}\n\nfunc (r *httpResp) WriteHeader(status int) {\n\tr.status = status\n\tr.writer.WriteHeader(status)\n}\n\nfunc logReq(h func(w http.ResponseWriter, r *http.Request)) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tresp := &httpResp{w, 0}\n\t\th(resp, r)\n\t\tfmt.Printf(\"%v %v - %v\\n\", r.Method, r.RequestURI, resp.status)\n\t}\n}\n\nfunc main() {\n\tif err := setupIPTables(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif err := initCrypto(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"\/containers\/\", logReq(handleRegisterContainer)).Methods(\"POST\")\n\tr.HandleFunc(\"\/containers\/{uid}\/{app:.*}\", logReq(handleRegisterApp)).Methods(\"PUT\")\n\n\tacRtr := r.Headers(\"Metadata-Flavor\", \"AppContainer\").\n\t\tPathPrefix(\"\/acMetadata\/v1\").Subrouter()\n\n\tmr := acRtr.Methods(\"GET\").Subrouter()\n\n\tmr.HandleFunc(\"\/container\/annotations\/\", logReq(containerGet(handleContainerAnnotations)))\n\tmr.HandleFunc(\"\/container\/annotations\/{name}\", logReq(containerGet(handleContainerAnnotation)))\n\tmr.HandleFunc(\"\/container\/manifest\", logReq(containerGet(handleContainerManifest)))\n\tmr.HandleFunc(\"\/container\/uid\", logReq(containerGet(handleContainerUID)))\n\n\tmr.HandleFunc(\"\/apps\/{app:.*}\/annotations\/\", logReq(appGet(handleAppAnnotations)))\n\tmr.HandleFunc(\"\/apps\/{app:.*}\/annotations\/{name}\", logReq(appGet(handleAppAnnotation)))\n\tmr.HandleFunc(\"\/apps\/{app:.*}\/image\/manifest\", logReq(appGet(handleImageManifest)))\n\tmr.HandleFunc(\"\/apps\/{app:.*}\/image\/id\", logReq(appGet(handleAppID)))\n\n\tacRtr.HandleFunc(\"\/container\/hmac\/sign\", logReq(handleContainerSign)).Methods(\"POST\")\n\tacRtr.HandleFunc(\"\/container\/hmac\/verify\", logReq(handleContainerVerify)).Methods(\"POST\")\n\n\tlog.Fatal(http.ListenAndServe(\":4444\", r))\n}\n<|endoftext|>"} {"text":"<commit_before>package metrics\n\nimport (\n\t\"container\/heap\"\n\t\"fmt\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"time\"\n)\n\nconst (\n\tedRescaleThreshold = time.Hour\n)\n\n\/\/ Reservoir\n\ntype priorityValue struct {\n\tpriority float64\n\tvalue int64\n}\n\ntype reservoir struct {\n\tsamples []priorityValue\n}\n\nfunc (self *reservoir) String() string {\n\treturn fmt.Sprintf(\"%s\", self.Values())\n}\n\nfunc (self *reservoir) Clear() {\n\tself.samples = self.samples[0:0]\n}\n\nfunc (self *reservoir) Get(i int) priorityValue {\n\treturn self.samples[i]\n}\n\nfunc (self *reservoir) Values() (values []int64) {\n\tvalues = make([]int64, len(self.samples))\n\tfor i, sample := range self.samples {\n\t\tvalues[i] = sample.value\n\t}\n\treturn\n}\n\nfunc (self *reservoir) ScalePriority(scale float64) {\n\tfor i, sample := range self.samples {\n\t\tself.samples[i] = priorityValue{sample.priority * scale, sample.value}\n\t}\n}\n\nfunc (self *reservoir) Len() int {\n\treturn len(self.samples)\n}\n\nfunc (self *reservoir) Less(i, j int) bool {\n\treturn self.samples[i].priority < self.samples[j].priority\n}\n\nfunc (self *reservoir) Swap(i, j int) {\n\tself.samples[i], self.samples[j] = self.samples[j], self.samples[i]\n}\n\nfunc (self *reservoir) Push(x interface{}) {\n\tself.samples = append(self.samples, x.(priorityValue))\n}\n\nfunc (self *reservoir) Pop() interface{} {\n\tv := self.samples[len(self.samples)-1]\n\tself.samples = self.samples[:len(self.samples)-1]\n\treturn v\n}\n\ntype exponentiallyDecayingSample struct {\n\t\/\/ the number of samples to keep in the sampling reservoir\n\treservoirSize int\n\t\/\/ the exponential decay factor; the higher this is, the more\n\t\/\/ biased the sample will be towards newer values\n\talpha float64\n\tvalues *reservoir\n\tcount int\n\tstartTime time.Time\n\tnextScaleTime time.Time\n}\n\n\/\/ An exponentially-decaying random sample of values. Uses Cormode et\n\/\/ al's forward-decaying priority reservoir sampling method to produce a\n\/\/ statistically representative sample, exponentially biased towards newer\n\/\/ entries.\n\/\/\n\/\/ http:\/\/www.research.att.com\/people\/Cormode_Graham\/library\/publications\/CormodeShkapenyukSrivastavaXu09.pdf\n\/\/ Cormode et al. Forward Decay: A Practical Time Decay Model for Streaming\n\/\/ Systems. ICDE '09: Proceedings of the 2009 IEEE International Conference on\n\/\/ Data Engineering (2009)\nfunc NewExponentiallyDecayingSample(reservoirSize int, alpha float64) Sample {\n\teds := exponentiallyDecayingSample{\n\t\treservoirSize: reservoirSize,\n\t\talpha: alpha,\n\t\tvalues: &reservoir{}}\n\teds.Clear()\n\treturn &eds\n}\n\nfunc (self *exponentiallyDecayingSample) Clear() {\n\tself.values.Clear()\n\theap.Init(self.values)\n\tself.count = 0\n\tself.startTime = time.Now()\n\tself.nextScaleTime = self.startTime.Add(edRescaleThreshold)\n}\n\nfunc (self *exponentiallyDecayingSample) Len() int {\n\tif self.count < self.reservoirSize {\n\t\treturn self.count\n\t}\n\treturn self.reservoirSize\n}\n\nfunc (self *exponentiallyDecayingSample) Values() []int64 {\n\treturn self.values.Values()\n}\n\nfunc (self *exponentiallyDecayingSample) Update(value int64) {\n\ttimestamp := time.Now()\n\tpriority := self.weight(timestamp.Sub(self.startTime)) \/ rand.Float64()\n\tself.count++\n\tif self.count <= self.reservoirSize {\n\t\theap.Push(self.values, priorityValue{priority, value})\n\t} else {\n\t\tif first := self.values.Get(0); first.priority > priority {\n\t\t\t\/\/ heap.Replace(self.values, priorityValue{priority, value})\n\t\t\theap.Pop(self.values)\n\t\t\theap.Push(self.values, priorityValue{priority, value})\n\t\t}\n\t}\n\n\tif timestamp.After(self.nextScaleTime) {\n\t\tself.rescale(timestamp)\n\t}\n}\n\nfunc (self *exponentiallyDecayingSample) weight(delta time.Duration) float64 {\n\treturn math.Exp(self.alpha * float64(delta))\n}\n\n\/*\nA common feature of the above techniques—indeed, the key technique that\nallows us to track the decayed weights efficiently—is that they maintain\ncounts and other quantities based on g(ti − L), and only scale by g(t − L)\nat query time. But while g(ti −L)\/g(t−L) is guaranteed to lie between zero\nand one, the intermediate values of g(ti − L) could become very large. For\npolynomial functions, these values should not grow too large, and should be\neffectively represented in practice by floating point values without loss of\nprecision. For exponential functions, these values could grow quite large as\nnew values of (ti − L) become large, and potentially exceed the capacity of\ncommon floating point types. However, since the values stored by the\nalgorithms are linear combinations of g values (scaled sums), they can be\nrescaled relative to a new landmark. That is, by the analysis of exponential\ndecay in Section III-A, the choice of L does not affect the final result. We\ncan therefore multiply each value based on L by a factor of exp(−α(L′ − L)),\nand obtain the correct value as if we had instead computed relative to a new\nlandmark L′ (and then use this new L′ at query time). This can be done with\na linear pass over whatever data structure is being used.\n*\/\nfunc (self *exponentiallyDecayingSample) rescale(now time.Time) {\n\tself.nextScaleTime = now.Add(edRescaleThreshold)\n\toldStartTime := self.startTime\n\tself.startTime = now\n\tscale := math.Exp(-self.alpha * float64(self.startTime.Sub(oldStartTime)))\n\tself.values.ScalePriority(scale)\n}\n<commit_msg>Use seconds insted of nanoseconds for exponentially decaying sample priority<commit_after>package metrics\n\nimport (\n\t\"container\/heap\"\n\t\"fmt\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"time\"\n)\n\nconst (\n\tedRescaleThreshold = time.Hour\n)\n\n\/\/ Reservoir\n\ntype priorityValue struct {\n\tpriority float64\n\tvalue int64\n}\n\ntype reservoir struct {\n\tsamples []priorityValue\n}\n\nfunc (self *reservoir) String() string {\n\treturn fmt.Sprintf(\"%s\", self.Values())\n}\n\nfunc (self *reservoir) Clear() {\n\tself.samples = self.samples[0:0]\n}\n\nfunc (self *reservoir) Get(i int) priorityValue {\n\treturn self.samples[i]\n}\n\nfunc (self *reservoir) Values() (values []int64) {\n\tvalues = make([]int64, len(self.samples))\n\tfor i, sample := range self.samples {\n\t\tvalues[i] = sample.value\n\t}\n\treturn\n}\n\nfunc (self *reservoir) ScalePriority(scale float64) {\n\tfor i, sample := range self.samples {\n\t\tself.samples[i] = priorityValue{sample.priority * scale, sample.value}\n\t}\n}\n\nfunc (self *reservoir) Len() int {\n\treturn len(self.samples)\n}\n\nfunc (self *reservoir) Less(i, j int) bool {\n\treturn self.samples[i].priority < self.samples[j].priority\n}\n\nfunc (self *reservoir) Swap(i, j int) {\n\tself.samples[i], self.samples[j] = self.samples[j], self.samples[i]\n}\n\nfunc (self *reservoir) Push(x interface{}) {\n\tself.samples = append(self.samples, x.(priorityValue))\n}\n\nfunc (self *reservoir) Pop() interface{} {\n\tv := self.samples[len(self.samples)-1]\n\tself.samples = self.samples[:len(self.samples)-1]\n\treturn v\n}\n\ntype exponentiallyDecayingSample struct {\n\t\/\/ the number of samples to keep in the sampling reservoir\n\treservoirSize int\n\t\/\/ the exponential decay factor; the higher this is, the more\n\t\/\/ biased the sample will be towards newer values\n\talpha float64\n\tvalues *reservoir\n\tcount int\n\tstartTime time.Time\n\tnextScaleTime time.Time\n}\n\n\/\/ An exponentially-decaying random sample of values. Uses Cormode et\n\/\/ al's forward-decaying priority reservoir sampling method to produce a\n\/\/ statistically representative sample, exponentially biased towards newer\n\/\/ entries.\n\/\/\n\/\/ http:\/\/www.research.att.com\/people\/Cormode_Graham\/library\/publications\/CormodeShkapenyukSrivastavaXu09.pdf\n\/\/ Cormode et al. Forward Decay: A Practical Time Decay Model for Streaming\n\/\/ Systems. ICDE '09: Proceedings of the 2009 IEEE International Conference on\n\/\/ Data Engineering (2009)\nfunc NewExponentiallyDecayingSample(reservoirSize int, alpha float64) Sample {\n\teds := exponentiallyDecayingSample{\n\t\treservoirSize: reservoirSize,\n\t\talpha: alpha,\n\t\tvalues: &reservoir{}}\n\teds.Clear()\n\treturn &eds\n}\n\nfunc (self *exponentiallyDecayingSample) Clear() {\n\tself.values.Clear()\n\theap.Init(self.values)\n\tself.count = 0\n\tself.startTime = time.Now()\n\tself.nextScaleTime = self.startTime.Add(edRescaleThreshold)\n}\n\nfunc (self *exponentiallyDecayingSample) Len() int {\n\tif self.count < self.reservoirSize {\n\t\treturn self.count\n\t}\n\treturn self.reservoirSize\n}\n\nfunc (self *exponentiallyDecayingSample) Values() []int64 {\n\treturn self.values.Values()\n}\n\nfunc (self *exponentiallyDecayingSample) Update(value int64) {\n\ttimestamp := time.Now()\n\tpriority := self.weight(timestamp.Sub(self.startTime)) \/ rand.Float64()\n\tself.count++\n\tif self.count <= self.reservoirSize {\n\t\theap.Push(self.values, priorityValue{priority, value})\n\t} else {\n\t\tif first := self.values.Get(0); first.priority > priority {\n\t\t\t\/\/ heap.Replace(self.values, priorityValue{priority, value})\n\t\t\theap.Pop(self.values)\n\t\t\theap.Push(self.values, priorityValue{priority, value})\n\t\t}\n\t}\n\n\tif timestamp.After(self.nextScaleTime) {\n\t\tself.rescale(timestamp)\n\t}\n}\n\nfunc (self *exponentiallyDecayingSample) weight(delta time.Duration) float64 {\n\treturn math.Exp(self.alpha * delta.Seconds())\n}\n\n\/*\nA common feature of the above techniques—indeed, the key technique that\nallows us to track the decayed weights efficiently—is that they maintain\ncounts and other quantities based on g(ti − L), and only scale by g(t − L)\nat query time. But while g(ti −L)\/g(t−L) is guaranteed to lie between zero\nand one, the intermediate values of g(ti − L) could become very large. For\npolynomial functions, these values should not grow too large, and should be\neffectively represented in practice by floating point values without loss of\nprecision. For exponential functions, these values could grow quite large as\nnew values of (ti − L) become large, and potentially exceed the capacity of\ncommon floating point types. However, since the values stored by the\nalgorithms are linear combinations of g values (scaled sums), they can be\nrescaled relative to a new landmark. That is, by the analysis of exponential\ndecay in Section III-A, the choice of L does not affect the final result. We\ncan therefore multiply each value based on L by a factor of exp(−α(L′ − L)),\nand obtain the correct value as if we had instead computed relative to a new\nlandmark L′ (and then use this new L′ at query time). This can be done with\na linear pass over whatever data structure is being used.\n*\/\nfunc (self *exponentiallyDecayingSample) rescale(now time.Time) {\n\tself.nextScaleTime = now.Add(edRescaleThreshold)\n\toldStartTime := self.startTime\n\tself.startTime = now\n\tscale := math.Exp(-self.alpha * self.startTime.Sub(oldStartTime).Seconds())\n\tself.values.ScalePriority(scale)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n** Copyright [2013-2015] [Megam Systems]\n**\n** Licensed under the Apache License, Version 2.0 (the \"License\");\n** you may not use this file except in compliance with the License.\n** You may obtain a copy of the License at\n**\n** http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n**\n** Unless required by applicable law or agreed to in writing, software\n** distributed under the License is distributed on an \"AS IS\" BASIS,\n** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n** See the License for the specific language governing permissions and\n** limitations under the License.\n *\/\npackage provision\n\nimport (\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/megamsys\/megamd\/carton\/bind\"\n\t\"github.com\/megamsys\/megamd\/repository\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nconst (\n\tCPU = \"cpu\"\n\tRAM = \"ram\"\n\tHDD = \"hdd\"\n\n\t\/\/ BoxSome indicates that there is atleast one box to deploy or delete.\n\tBoxSome BoxLevel = iota\n\n\t\/\/ BoxNone indicates that there are no boxes to deploy or delete but its parent can be.\n\tBoxNone\n)\n\nvar cnameRegexp = regexp.MustCompile(`^(\\*\\.)?[a-zA-Z0-9][\\w-.]+$`)\n\n\/\/ Boxlevel represents the deployment level.\ntype BoxLevel int\n\n\/\/ Boxlog represents a log entry.\ntype Boxlog struct {\n\tTimestamp time.Time\n\tMessage string\n\tSource string\n\tName string\n\tUnit string\n}\n\ntype BoxCompute struct {\n\tCpushare string\n\tMemory string\n\tSwap string\n\tHDD string\n}\n\nfunc (bc *BoxCompute) numCpushare() int64 {\n\tif cs, err := strconv.ParseInt(bc.Cpushare, 10, 64); err != nil {\n\t\treturn 0\n\t} else {\n\t\treturn cs\n\t}\n}\n\nfunc (bc *BoxCompute) numMemory() int64 {\n\tif cp, err := strconv.ParseInt(bc.Memory, 10, 64); err != nil {\n\t\treturn 0\n\t} else {\n\t\treturn cp\n\t}\n}\n\nfunc (bc *BoxCompute) numSwap() int64 {\n\tif cs, err := strconv.ParseInt(bc.Swap, 10, 64); err != nil {\n\t\treturn 0\n\t} else {\n\t\treturn cs\n\t}\n}\n\nfunc (bc *BoxCompute) numHDD() int64 {\n\tif cp, err := strconv.ParseInt(bc.HDD, 10, 64); err != nil {\n\t\treturn 10\n\t} else {\n\t\treturn cp\n\t}\n}\n\nfunc (bc *BoxCompute) String() string {\n\treturn \"(\" + strings.Join([]string{\n\t\tCPU + \":\" + bc.Cpushare,\n\t\tRAM + \":\" + bc.Memory,\n\t\tHDD + \":\" + bc.HDD},\n\t\t\",\") + \" )\"\n}\n\n\/\/ BoxDeploy represents a log entry.\ntype BoxDeploy struct {\n\tDate time.Time\n\tHookId string\n\tImageId string\n\tName string\n\tUnit string\n}\n\n\/\/ Box represents a provision unit. Can be a machine, container or anything\n\/\/ IP-addressable.\ntype Box struct {\n\tId string\n\tCartonsId string\n\tCartonId string\n\tCartonName string\n\tName string\n\tLevel BoxLevel\n\tDomainName string\n\tTosca string\n\tImageVersion string\n\tCompute BoxCompute\n\tRepo *repository.Repo\n\tStatus Status\n\tProvider string\n\tPublicIp string\n\tCommit string\n\tEnvs []bind.EnvVar\n\tAddress *url.URL\n}\n\nfunc (b *Box) String() string {\n\tif d, err := yaml.Marshal(b); err != nil {\n\t\treturn err.Error()\n\t} else {\n\t\treturn string(d)\n\t}\n}\n\nfunc (b *Box) GetMemory() int64 {\n\treturn b.Compute.numMemory()\n}\n\nfunc (b *Box) GetSwap() int64 {\n\treturn b.Compute.numSwap()\n}\n\nfunc (b *Box) GetCpushare() int64 {\n\treturn b.Compute.numCpushare()\n}\n\n\/\/ GetName returns the assemblyname.domain(assembly001YeahBoy.megambox.com) of the box.\nfunc (b *Box) GetFullName() string {\n\treturn b.CartonName + \".\" + b.DomainName\n}\n\n\/\/ GetTosca returns the tosca type of the box.\nfunc (b *Box) GetTosca() string {\n\treturn b.Tosca\n}\n\n\/\/ GetIp returns the Unit.IP.\nfunc (b *Box) GetPublicIp() string {\n\treturn b.PublicIp\n}\n\n\/\/ Available returns true if the unit is available. It will return true\n\/\/ whenever the unit itself is available, even when the application process is\n\/\/ not.\nfunc (b *Box) Available() bool {\n\treturn b.Status == StatusDeploying ||\n\t\tb.Status == StatusCreating ||\n\t\tb.Status == StatusError\n}\n\nfunc (box *Box) GetRouter() (string, error) {\n\treturn \"route53\", nil \/\/dns.LoadConfig()\n}\n\n\/\/ Log adds a log message to the app. Specifying a good source is good so the\n\/\/ user can filter where the message come from.\nfunc (box *Box) Log(message, source, unit string) error {\n\tmessages := strings.Split(message, \"\\n\")\n\tlogs := make([]interface{}, 0, len(messages))\n\tfor _, msg := range messages {\n\t\tif len(strings.TrimSpace(msg)) > 0 {\n\t\t\tbl := Boxlog{\n\t\t\t\tTimestamp: time.Now().In(time.UTC),\n\t\t\t\tMessage: msg,\n\t\t\t\tSource: source,\n\t\t\t\tName: box.Name,\n\t\t\t\tUnit: box.Id,\n\t\t\t}\n\t\t\tlogs = append(logs, bl)\n\t\t}\n\t}\n\tif len(logs) > 0 {\n\t\t_ = notify(box.GetFullName(), logs)\n\t}\n\treturn nil\n}\n<commit_msg>Changed the date to a usable format for logs<commit_after>\/*\n** Copyright [2013-2015] [Megam Systems]\n**\n** Licensed under the Apache License, Version 2.0 (the \"License\");\n** you may not use this file except in compliance with the License.\n** You may obtain a copy of the License at\n**\n** http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n**\n** Unless required by applicable law or agreed to in writing, software\n** distributed under the License is distributed on an \"AS IS\" BASIS,\n** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n** See the License for the specific language governing permissions and\n** limitations under the License.\n *\/\npackage provision\n\nimport (\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/megamsys\/megamd\/carton\/bind\"\n\t\"github.com\/megamsys\/megamd\/repository\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nconst (\n\tCPU = \"cpu\"\n\tRAM = \"ram\"\n\tHDD = \"hdd\"\n\n\t\/\/ BoxSome indicates that there is atleast one box to deploy or delete.\n\tBoxSome BoxLevel = iota\n\n\t\/\/ BoxNone indicates that there are no boxes to deploy or delete but its parent can be.\n\tBoxNone\n)\n\nvar cnameRegexp = regexp.MustCompile(`^(\\*\\.)?[a-zA-Z0-9][\\w-.]+$`)\n\n\/\/ Boxlevel represents the deployment level.\ntype BoxLevel int\n\n\/\/ Boxlog represents a log entry.\ntype Boxlog struct {\n\tTimestamp string\n\tMessage string\n\tSource string\n\tName string\n\tUnit string\n}\n\ntype BoxCompute struct {\n\tCpushare string\n\tMemory string\n\tSwap string\n\tHDD string\n}\n\nfunc (bc *BoxCompute) numCpushare() int64 {\n\tif cs, err := strconv.ParseInt(bc.Cpushare, 10, 64); err != nil {\n\t\treturn 0\n\t} else {\n\t\treturn cs\n\t}\n}\n\nfunc (bc *BoxCompute) numMemory() int64 {\n\tif cp, err := strconv.ParseInt(bc.Memory, 10, 64); err != nil {\n\t\treturn 0\n\t} else {\n\t\treturn cp\n\t}\n}\n\nfunc (bc *BoxCompute) numSwap() int64 {\n\tif cs, err := strconv.ParseInt(bc.Swap, 10, 64); err != nil {\n\t\treturn 0\n\t} else {\n\t\treturn cs\n\t}\n}\n\nfunc (bc *BoxCompute) numHDD() int64 {\n\tif cp, err := strconv.ParseInt(bc.HDD, 10, 64); err != nil {\n\t\treturn 10\n\t} else {\n\t\treturn cp\n\t}\n}\n\nfunc (bc *BoxCompute) String() string {\n\treturn \"(\" + strings.Join([]string{\n\t\tCPU + \":\" + bc.Cpushare,\n\t\tRAM + \":\" + bc.Memory,\n\t\tHDD + \":\" + bc.HDD},\n\t\t\",\") + \" )\"\n}\n\n\/\/ BoxDeploy represents a log entry.\ntype BoxDeploy struct {\n\tDate time.Time\n\tHookId string\n\tImageId string\n\tName string\n\tUnit string\n}\n\n\/\/ Box represents a provision unit. Can be a machine, container or anything\n\/\/ IP-addressable.\ntype Box struct {\n\tId string\n\tCartonsId string\n\tCartonId string\n\tCartonName string\n\tName string\n\tLevel BoxLevel\n\tDomainName string\n\tTosca string\n\tImageVersion string\n\tCompute BoxCompute\n\tRepo *repository.Repo\n\tStatus Status\n\tProvider string\n\tPublicIp string\n\tCommit string\n\tEnvs []bind.EnvVar\n\tAddress *url.URL\n}\n\nfunc (b *Box) String() string {\n\tif d, err := yaml.Marshal(b); err != nil {\n\t\treturn err.Error()\n\t} else {\n\t\treturn string(d)\n\t}\n}\n\nfunc (b *Box) GetMemory() int64 {\n\treturn b.Compute.numMemory()\n}\n\nfunc (b *Box) GetSwap() int64 {\n\treturn b.Compute.numSwap()\n}\n\nfunc (b *Box) GetCpushare() int64 {\n\treturn b.Compute.numCpushare()\n}\n\n\/\/ GetName returns the assemblyname.domain(assembly001YeahBoy.megambox.com) of the box.\nfunc (b *Box) GetFullName() string {\n\treturn b.CartonName + \".\" + b.DomainName\n}\n\n\/\/ GetTosca returns the tosca type of the box.\nfunc (b *Box) GetTosca() string {\n\treturn b.Tosca\n}\n\n\/\/ GetIp returns the Unit.IP.\nfunc (b *Box) GetPublicIp() string {\n\treturn b.PublicIp\n}\n\n\/\/ Available returns true if the unit is available. It will return true\n\/\/ whenever the unit itself is available, even when the application process is\n\/\/ not.\nfunc (b *Box) Available() bool {\n\treturn b.Status == StatusDeploying ||\n\t\tb.Status == StatusCreating ||\n\t\tb.Status == StatusError\n}\n\nfunc (box *Box) GetRouter() (string, error) {\n\treturn \"route53\", nil \/\/dns.LoadConfig()\n}\n\n\/\/ Log adds a log message to the app. Specifying a good source is good so the\n\/\/ user can filter where the message come from.\nfunc (box *Box) Log(message, source, unit string) error {\n\tmessages := strings.Split(message, \"\\n\")\n\tlogs := make([]interface{}, 0, len(messages))\n\tfor _, msg := range messages {\n\t\tif len(strings.TrimSpace(msg)) > 0 {\n\t\t\tbl := Boxlog{\n\t\t\t\tTimestamp: time.Now().Local().Format(time.RFC822),\n\t\t\t\tMessage: msg,\n\t\t\t\tSource: source,\n\t\t\t\tName: box.Name,\n\t\t\t\tUnit: box.Id,\n\t\t\t}\n\t\t\tlogs = append(logs, bl)\n\t\t}\n\t}\n\tif len(logs) > 0 {\n\t\t_ = notify(box.GetFullName(), logs)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !confonly\n\npackage dns\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"sync\"\n\n\t\"golang.org\/x\/net\/dns\/dnsmessage\"\n\n\t\"v2ray.com\/core\"\n\t\"v2ray.com\/core\/common\"\n\t\"v2ray.com\/core\/common\/buf\"\n\t\"v2ray.com\/core\/common\/net\"\n\tdns_proto \"v2ray.com\/core\/common\/protocol\/dns\"\n\t\"v2ray.com\/core\/common\/session\"\n\t\"v2ray.com\/core\/common\/task\"\n\t\"v2ray.com\/core\/features\/dns\"\n\t\"v2ray.com\/core\/transport\"\n\t\"v2ray.com\/core\/transport\/internet\"\n)\n\nfunc init() {\n\tcommon.Must(common.RegisterConfig((*Config)(nil), func(ctx context.Context, config interface{}) (interface{}, error) {\n\t\th := new(Handler)\n\t\tif err := core.RequireFeatures(ctx, func(dnsClient dns.Client) error {\n\t\t\treturn h.Init(config.(*Config), dnsClient)\n\t\t}); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn h, nil\n\t}))\n}\n\ntype ownLinkVerifier interface {\n\tIsOwnLink(ctx context.Context) bool\n}\n\ntype Handler struct {\n\tipv4Lookup dns.IPv4Lookup\n\tipv6Lookup dns.IPv6Lookup\n\townLinkVerifier ownLinkVerifier\n}\n\nfunc (h *Handler) Init(config *Config, dnsClient dns.Client) error {\n\tipv4lookup, ok := dnsClient.(dns.IPv4Lookup)\n\tif !ok {\n\t\treturn newError(\"dns.Client doesn't implement IPv4Lookup\")\n\t}\n\th.ipv4Lookup = ipv4lookup\n\n\tipv6lookup, ok := dnsClient.(dns.IPv6Lookup)\n\tif !ok {\n\t\treturn newError(\"dns.Client doesn't implement IPv6Lookup\")\n\t}\n\th.ipv6Lookup = ipv6lookup\n\n\tif v, ok := dnsClient.(ownLinkVerifier); ok {\n\t\th.ownLinkVerifier = v\n\t}\n\treturn nil\n}\n\nfunc (h *Handler) isOwnLink(ctx context.Context) bool {\n\treturn h.ownLinkVerifier != nil && h.ownLinkVerifier.IsOwnLink(ctx)\n}\n\nfunc parseIPQuery(b []byte) (r bool, domain string, id uint16, qType dnsmessage.Type) {\n\tvar parser dnsmessage.Parser\n\theader, err := parser.Start(b)\n\tif err != nil {\n\t\tnewError(\"parser start\").Base(err).WriteToLog()\n\t\treturn\n\t}\n\n\tid = header.ID\n\tq, err := parser.Question()\n\tif err != nil {\n\t\tnewError(\"question\").Base(err).WriteToLog()\n\t\treturn\n\t}\n\tqType = q.Type\n\tif qType != dnsmessage.TypeA && qType != dnsmessage.TypeAAAA {\n\t\treturn\n\t}\n\n\tdomain = q.Name.String()\n\tr = true\n\treturn\n}\n\n\/\/ Process implements proxy.Outbound.\nfunc (h *Handler) Process(ctx context.Context, link *transport.Link, d internet.Dialer) error {\n\toutbound := session.OutboundFromContext(ctx)\n\tif outbound == nil || !outbound.Target.IsValid() {\n\t\treturn newError(\"invalid outbound\")\n\t}\n\n\tdest := outbound.Target\n\n\tconn := &outboundConn{\n\t\tdialer: func() (internet.Connection, error) {\n\t\t\treturn d.Dial(ctx, dest)\n\t\t},\n\t\tconnReady: make(chan struct{}, 1),\n\t}\n\n\tvar reader dns_proto.MessageReader\n\tvar writer dns_proto.MessageWriter\n\tif dest.Network == net.Network_TCP {\n\t\treader = dns_proto.NewTCPReader(link.Reader)\n\t\twriter = &dns_proto.TCPWriter{\n\t\t\tWriter: link.Writer,\n\t\t}\n\t} else {\n\t\treader = &dns_proto.UDPReader{\n\t\t\tReader: link.Reader,\n\t\t}\n\t\twriter = &dns_proto.UDPWriter{\n\t\t\tWriter: link.Writer,\n\t\t}\n\t}\n\n\tvar connReader dns_proto.MessageReader\n\tvar connWriter dns_proto.MessageWriter\n\tif dest.Network == net.Network_TCP {\n\t\tconnReader = dns_proto.NewTCPReader(buf.NewReader(conn))\n\t\tconnWriter = &dns_proto.TCPWriter{\n\t\t\tWriter: buf.NewWriter(conn),\n\t\t}\n\t} else {\n\t\tconnReader = &dns_proto.UDPReader{\n\t\t\tReader: buf.NewPacketReader(conn),\n\t\t}\n\t\tconnWriter = &dns_proto.UDPWriter{\n\t\t\tWriter: buf.NewWriter(conn),\n\t\t}\n\t}\n\n\trequest := func() error {\n\t\tdefer conn.Close()\n\n\t\tfor {\n\t\t\tb, err := reader.ReadMessage()\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif !h.isOwnLink(ctx) {\n\t\t\t\tisIPQuery, domain, id, qType := parseIPQuery(b.Bytes())\n\t\t\t\tif isIPQuery {\n\t\t\t\t\tgo h.handleIPQuery(id, qType, domain, writer)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err := connWriter.WriteMessage(b); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tresponse := func() error {\n\t\tfor {\n\t\t\tb, err := connReader.ReadMessage()\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := writer.WriteMessage(b); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := task.Run(ctx, request, response); err != nil {\n\t\treturn newError(\"connection ends\").Base(err)\n\t}\n\n\treturn nil\n}\n\nfunc (h *Handler) handleIPQuery(id uint16, qType dnsmessage.Type, domain string, writer dns_proto.MessageWriter) {\n\tvar ips []net.IP\n\tvar err error\n\n\tswitch qType {\n\tcase dnsmessage.TypeA:\n\t\tips, err = h.ipv4Lookup.LookupIPv4(domain)\n\tcase dnsmessage.TypeAAAA:\n\t\tips, err = h.ipv6Lookup.LookupIPv6(domain)\n\t}\n\n\tif err != nil {\n\t\tnewError(\"ip query\").Base(err).WriteToLog()\n\t\treturn\n\t}\n\n\tif len(ips) == 0 {\n\t\treturn\n\t}\n\n\tb := buf.New()\n\trawBytes := b.Extend(buf.Size)\n\tbuilder := dnsmessage.NewBuilder(rawBytes[:0], dnsmessage.Header{\n\t\tID: id,\n\t\tRCode: dnsmessage.RCodeSuccess,\n\t\tResponse: true,\n\t})\n\tcommon.Must(builder.StartAnswers())\n\n\trHeader := dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName(domain), Class: dnsmessage.ClassINET, TTL: 600}\n\tfor _, ip := range ips {\n\t\tif len(ip) == net.IPv4len {\n\t\t\tvar r dnsmessage.AResource\n\t\t\tcopy(r.A[:], ip)\n\t\t\tcommon.Must(builder.AResource(rHeader, r))\n\t\t} else {\n\t\t\tvar r dnsmessage.AAAAResource\n\t\t\tcopy(r.AAAA[:], ip)\n\t\t\tcommon.Must(builder.AAAAResource(rHeader, r))\n\t\t}\n\t}\n\tmsgBytes, err := builder.Finish()\n\tif err != nil {\n\t\tnewError(\"pack message\").Base(err).WriteToLog()\n\t\tb.Release()\n\t\treturn\n\t}\n\tb.Resize(0, int32(len(msgBytes)))\n\n\tif err := writer.WriteMessage(b); err != nil {\n\t\tnewError(\"write IP answer\").Base(err).WriteToLog()\n\t}\n}\n\ntype outboundConn struct {\n\taccess sync.Mutex\n\tdialer func() (internet.Connection, error)\n\n\tconn net.Conn\n\tconnReady chan struct{}\n}\n\nfunc (c *outboundConn) dial() error {\n\tconn, err := c.dialer()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.conn = conn\n\tc.connReady <- struct{}{}\n\treturn nil\n}\n\nfunc (c *outboundConn) Write(b []byte) (int, error) {\n\tc.access.Lock()\n\n\tif c.conn == nil {\n\t\tif err := c.dial(); err != nil {\n\t\t\tc.access.Unlock()\n\t\t\tnewError(\"failed to dial outbound connection\").Base(err).AtWarning().WriteToLog()\n\t\t\treturn len(b), nil\n\t\t}\n\t}\n\n\tc.access.Unlock()\n\n\treturn c.conn.Write(b)\n}\n\nfunc (c *outboundConn) Read(b []byte) (int, error) {\n\tvar conn net.Conn\n\tc.access.Lock()\n\tconn = c.conn\n\tc.access.Unlock()\n\n\tif conn == nil {\n\t\t_, open := <-c.connReady\n\t\tif !open {\n\t\t\treturn 0, io.EOF\n\t\t}\n\t\tconn = c.conn\n\t}\n\n\treturn conn.Read(b)\n}\n\nfunc (c *outboundConn) Close() error {\n\tc.access.Lock()\n\tclose(c.connReady)\n\tif c.conn != nil {\n\t\tc.conn.Close()\n\t}\n\tc.access.Unlock()\n\treturn nil\n}\n<commit_msg>include question section in dns response<commit_after>\/\/ +build !confonly\n\npackage dns\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"sync\"\n\n\t\"golang.org\/x\/net\/dns\/dnsmessage\"\n\n\t\"v2ray.com\/core\"\n\t\"v2ray.com\/core\/common\"\n\t\"v2ray.com\/core\/common\/buf\"\n\t\"v2ray.com\/core\/common\/net\"\n\tdns_proto \"v2ray.com\/core\/common\/protocol\/dns\"\n\t\"v2ray.com\/core\/common\/session\"\n\t\"v2ray.com\/core\/common\/task\"\n\t\"v2ray.com\/core\/features\/dns\"\n\t\"v2ray.com\/core\/transport\"\n\t\"v2ray.com\/core\/transport\/internet\"\n)\n\nfunc init() {\n\tcommon.Must(common.RegisterConfig((*Config)(nil), func(ctx context.Context, config interface{}) (interface{}, error) {\n\t\th := new(Handler)\n\t\tif err := core.RequireFeatures(ctx, func(dnsClient dns.Client) error {\n\t\t\treturn h.Init(config.(*Config), dnsClient)\n\t\t}); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn h, nil\n\t}))\n}\n\ntype ownLinkVerifier interface {\n\tIsOwnLink(ctx context.Context) bool\n}\n\ntype Handler struct {\n\tipv4Lookup dns.IPv4Lookup\n\tipv6Lookup dns.IPv6Lookup\n\townLinkVerifier ownLinkVerifier\n}\n\nfunc (h *Handler) Init(config *Config, dnsClient dns.Client) error {\n\tipv4lookup, ok := dnsClient.(dns.IPv4Lookup)\n\tif !ok {\n\t\treturn newError(\"dns.Client doesn't implement IPv4Lookup\")\n\t}\n\th.ipv4Lookup = ipv4lookup\n\n\tipv6lookup, ok := dnsClient.(dns.IPv6Lookup)\n\tif !ok {\n\t\treturn newError(\"dns.Client doesn't implement IPv6Lookup\")\n\t}\n\th.ipv6Lookup = ipv6lookup\n\n\tif v, ok := dnsClient.(ownLinkVerifier); ok {\n\t\th.ownLinkVerifier = v\n\t}\n\treturn nil\n}\n\nfunc (h *Handler) isOwnLink(ctx context.Context) bool {\n\treturn h.ownLinkVerifier != nil && h.ownLinkVerifier.IsOwnLink(ctx)\n}\n\nfunc parseIPQuery(b []byte) (r bool, domain string, id uint16, qType dnsmessage.Type) {\n\tvar parser dnsmessage.Parser\n\theader, err := parser.Start(b)\n\tif err != nil {\n\t\tnewError(\"parser start\").Base(err).WriteToLog()\n\t\treturn\n\t}\n\n\tid = header.ID\n\tq, err := parser.Question()\n\tif err != nil {\n\t\tnewError(\"question\").Base(err).WriteToLog()\n\t\treturn\n\t}\n\tqType = q.Type\n\tif qType != dnsmessage.TypeA && qType != dnsmessage.TypeAAAA {\n\t\treturn\n\t}\n\n\tdomain = q.Name.String()\n\tr = true\n\treturn\n}\n\n\/\/ Process implements proxy.Outbound.\nfunc (h *Handler) Process(ctx context.Context, link *transport.Link, d internet.Dialer) error {\n\toutbound := session.OutboundFromContext(ctx)\n\tif outbound == nil || !outbound.Target.IsValid() {\n\t\treturn newError(\"invalid outbound\")\n\t}\n\n\tdest := outbound.Target\n\n\tconn := &outboundConn{\n\t\tdialer: func() (internet.Connection, error) {\n\t\t\treturn d.Dial(ctx, dest)\n\t\t},\n\t\tconnReady: make(chan struct{}, 1),\n\t}\n\n\tvar reader dns_proto.MessageReader\n\tvar writer dns_proto.MessageWriter\n\tif dest.Network == net.Network_TCP {\n\t\treader = dns_proto.NewTCPReader(link.Reader)\n\t\twriter = &dns_proto.TCPWriter{\n\t\t\tWriter: link.Writer,\n\t\t}\n\t} else {\n\t\treader = &dns_proto.UDPReader{\n\t\t\tReader: link.Reader,\n\t\t}\n\t\twriter = &dns_proto.UDPWriter{\n\t\t\tWriter: link.Writer,\n\t\t}\n\t}\n\n\tvar connReader dns_proto.MessageReader\n\tvar connWriter dns_proto.MessageWriter\n\tif dest.Network == net.Network_TCP {\n\t\tconnReader = dns_proto.NewTCPReader(buf.NewReader(conn))\n\t\tconnWriter = &dns_proto.TCPWriter{\n\t\t\tWriter: buf.NewWriter(conn),\n\t\t}\n\t} else {\n\t\tconnReader = &dns_proto.UDPReader{\n\t\t\tReader: buf.NewPacketReader(conn),\n\t\t}\n\t\tconnWriter = &dns_proto.UDPWriter{\n\t\t\tWriter: buf.NewWriter(conn),\n\t\t}\n\t}\n\n\trequest := func() error {\n\t\tdefer conn.Close()\n\n\t\tfor {\n\t\t\tb, err := reader.ReadMessage()\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif !h.isOwnLink(ctx) {\n\t\t\t\tisIPQuery, domain, id, qType := parseIPQuery(b.Bytes())\n\t\t\t\tif isIPQuery {\n\t\t\t\t\tgo h.handleIPQuery(id, qType, domain, writer)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err := connWriter.WriteMessage(b); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tresponse := func() error {\n\t\tfor {\n\t\t\tb, err := connReader.ReadMessage()\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := writer.WriteMessage(b); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := task.Run(ctx, request, response); err != nil {\n\t\treturn newError(\"connection ends\").Base(err)\n\t}\n\n\treturn nil\n}\n\nfunc (h *Handler) handleIPQuery(id uint16, qType dnsmessage.Type, domain string, writer dns_proto.MessageWriter) {\n\tvar ips []net.IP\n\tvar err error\n\n\tswitch qType {\n\tcase dnsmessage.TypeA:\n\t\tips, err = h.ipv4Lookup.LookupIPv4(domain)\n\tcase dnsmessage.TypeAAAA:\n\t\tips, err = h.ipv6Lookup.LookupIPv6(domain)\n\t}\n\n\tif err != nil {\n\t\tnewError(\"ip query\").Base(err).WriteToLog()\n\t\treturn\n\t}\n\n\tif len(ips) == 0 {\n\t\treturn\n\t}\n\n\tb := buf.New()\n\trawBytes := b.Extend(buf.Size)\n\tbuilder := dnsmessage.NewBuilder(rawBytes[:0], dnsmessage.Header{\n\t\tID: id,\n\t\tRCode: dnsmessage.RCodeSuccess,\n\t\tResponse: true,\n\t})\n\tbuilder.EnableCompression()\n\tcommon.Must(builder.StartQuestions())\n\tcommon.Must(builder.Question(dnsmessage.Question{\n\t\tName: dnsmessage.MustNewName(domain),\n\t\tClass: dnsmessage.ClassINET,\n\t\tType: qType,\n\t}))\n\tcommon.Must(builder.StartAnswers())\n\n\trHeader := dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName(domain), Class: dnsmessage.ClassINET, TTL: 600}\n\tfor _, ip := range ips {\n\t\tif len(ip) == net.IPv4len {\n\t\t\tvar r dnsmessage.AResource\n\t\t\tcopy(r.A[:], ip)\n\t\t\tcommon.Must(builder.AResource(rHeader, r))\n\t\t} else {\n\t\t\tvar r dnsmessage.AAAAResource\n\t\t\tcopy(r.AAAA[:], ip)\n\t\t\tcommon.Must(builder.AAAAResource(rHeader, r))\n\t\t}\n\t}\n\tmsgBytes, err := builder.Finish()\n\tif err != nil {\n\t\tnewError(\"pack message\").Base(err).WriteToLog()\n\t\tb.Release()\n\t\treturn\n\t}\n\tb.Resize(0, int32(len(msgBytes)))\n\n\tif err := writer.WriteMessage(b); err != nil {\n\t\tnewError(\"write IP answer\").Base(err).WriteToLog()\n\t}\n}\n\ntype outboundConn struct {\n\taccess sync.Mutex\n\tdialer func() (internet.Connection, error)\n\n\tconn net.Conn\n\tconnReady chan struct{}\n}\n\nfunc (c *outboundConn) dial() error {\n\tconn, err := c.dialer()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.conn = conn\n\tc.connReady <- struct{}{}\n\treturn nil\n}\n\nfunc (c *outboundConn) Write(b []byte) (int, error) {\n\tc.access.Lock()\n\n\tif c.conn == nil {\n\t\tif err := c.dial(); err != nil {\n\t\t\tc.access.Unlock()\n\t\t\tnewError(\"failed to dial outbound connection\").Base(err).AtWarning().WriteToLog()\n\t\t\treturn len(b), nil\n\t\t}\n\t}\n\n\tc.access.Unlock()\n\n\treturn c.conn.Write(b)\n}\n\nfunc (c *outboundConn) Read(b []byte) (int, error) {\n\tvar conn net.Conn\n\tc.access.Lock()\n\tconn = c.conn\n\tc.access.Unlock()\n\n\tif conn == nil {\n\t\t_, open := <-c.connReady\n\t\tif !open {\n\t\t\treturn 0, io.EOF\n\t\t}\n\t\tconn = c.conn\n\t}\n\n\treturn conn.Read(b)\n}\n\nfunc (c *outboundConn) Close() error {\n\tc.access.Lock()\n\tclose(c.connReady)\n\tif c.conn != nil {\n\t\tc.conn.Close()\n\t}\n\tc.access.Unlock()\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package model\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/Cristofori\/kmud\/database\"\n\t\"github.com\/Cristofori\/kmud\/datastore\"\n\ttu \"github.com\/Cristofori\/kmud\/testutils\"\n\t\"github.com\/Cristofori\/kmud\/types\"\n\t\"gopkg.in\/mgo.v2\"\n)\n\nvar _db *mgo.Database\n\nfunc _cleanup(t *testing.T) {\n\t_db.DropDatabase()\n\tdatastore.ClearAll()\n}\n\nfunc Test_Init(t *testing.T) {\n\tsession, err := mgo.Dial(\"localhost\")\n\ttu.Assert(err == nil, t, \"Failed to connect to database\")\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdbName := \"unit_model_test\"\n\n\t_db = session.DB(dbName)\n\n\tsession.DB(dbName).DropDatabase()\n\tInit(database.NewMongoSession(session), dbName)\n}\n\nfunc Test_UserFunctions(t *testing.T) {\n\tname1 := \"Test_name1\"\n\tpassword1 := \"test_password2\"\n\n\tuser1 := CreateUser(name1, password1)\n\n\ttu.Assert(user1.GetName() == name1, t, \"User creation failed, bad name:\", user1.GetName(), name1)\n\ttu.Assert(user1.VerifyPassword(password1), t, \"User creation failed, bad password\")\n\n\tuser2 := GetOrCreateUser(name1, password1)\n\ttu.Assert(user1 == user2, t, \"GetOrCreateUser() should have returned the user we already created\")\n\n\tname2 := \"test_name2\"\n\tpassword2 := \"test_password2\"\n\tuser3 := GetOrCreateUser(name2, password2)\n\ttu.Assert(user3 != user2 && user3 != user1, t, \"GetOrCreateUser() shouldn't have returned an already existing user\")\n\n\tuserByName := GetUserByName(name1)\n\ttu.Assert(userByName == user1, t, \"GetUserByName() failed to find user1\", name1)\n\n\tuserByName = GetUserByName(\"foobar\")\n\ttu.Assert(userByName == nil, t, \"GetUserByName() should have returned nill\")\n\n\tzone, _ := CreateZone(\"testZone\")\n\troom, _ := CreateRoom(zone, types.Coordinate{X: 0, Y: 0, Z: 0})\n\tCreatePlayerCharacter(\"testPlayer\", user1, room)\n\n\tDeleteUser(user1)\n\tuserByName = GetUserByName(name1)\n\ttu.Assert(userByName == nil, t, \"DeleteUser() failed to delete user1\")\n\ttu.Assert(len(GetUserCharacters(user1)) == 0, t, \"Deleting a user should have deleted its characters\")\n\n\t_cleanup(t)\n}\n\nfunc Test_ZoneFunctions(t *testing.T) {\n\tname := \"zone1\"\n\tzone1, err1 := CreateZone(name)\n\n\ttu.Assert(zone1 != nil && err1 == nil, t, \"Zone creation failed\")\n\n\tzoneByName := GetZoneByName(name)\n\ttu.Assert(zoneByName == zone1, t, \"GetZoneByName() failed\", zoneByName, zone1)\n\n\tzone2, err2 := CreateZone(\"zone2\")\n\ttu.Assert(zone2 != nil && err2 == nil, t, \"Failed to create zone2\")\n\n\tzone3, err3 := CreateZone(\"zone3\")\n\ttu.Assert(zone3 != nil && err3 == nil, t, \"Failed to create zone3\")\n\n\tzoneById := GetZone(zone1.GetId())\n\ttu.Assert(zoneById == zone1, t, \"GetZoneById() failed\")\n\n\t_, err := CreateZone(\"zone3\")\n\ttu.Assert(err != nil, t, \"Creating zone with duplicate name should have failed\")\n\n\t_cleanup(t)\n}\n\nfunc Test_RoomFunctions(t *testing.T) {\n\tzone, err := CreateZone(\"zone\")\n\ttu.Assert(zone != nil && err == nil, t, \"Zone creation failed\")\n\n\troom1, err1 := CreateRoom(zone, types.Coordinate{X: 0, Y: 0, Z: 0})\n\ttu.Assert(room1 != nil && err1 == nil, t, \"Room creation failed\")\n\n\tbadRoom, shouldError := CreateRoom(zone, types.Coordinate{X: 0, Y: 0, Z: 0})\n\ttu.Assert(badRoom == nil && shouldError != nil, t, \"Creating two rooms at the same location should have failed\")\n\n\troom2, err2 := CreateRoom(zone, types.Coordinate{X: 0, Y: 1, Z: 0})\n\ttu.Assert(room2 != nil && err2 == nil, t, \"Second room creation failed\")\n\n\troom1.SetExitEnabled(types.DirectionSouth, true)\n\troom2.SetExitEnabled(types.DirectionNorth, true)\n\n\ttu.Assert(room2.HasExit(types.DirectionNorth), t, \"Call to room.SetExitEnabled failed\")\n\tDeleteRoom(room1)\n\ttu.Assert(!room2.HasExit(types.DirectionNorth), t, \"Deleting room1 should have removed corresponding exit from room2\")\n\n\t_cleanup(t)\n}\n\nfunc Test_RoomAndZoneFunctions(t *testing.T) {\n\t\/\/ ZoneCorners\n\t\/\/ GetRoomsInZone\n}\n\nfunc Test_CharFunctions(t *testing.T) {\n\t\/\/user := CreateUser(\"user1\", \"\")\n\t\/\/playerName1 := \"player1\"\n\t\/\/player1 := CreatePlayer(name1, user\n}\n\n\/\/ vim: nocindent\n<commit_msg>Updated the model test to use go check and fixed an issue where it was hanging indefinitely<commit_after>package model\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/Cristofori\/kmud\/database\"\n\t\"github.com\/Cristofori\/kmud\/datastore\"\n\t\"github.com\/Cristofori\/kmud\/events\"\n\t\"github.com\/Cristofori\/kmud\/types\"\n\t. \"gopkg.in\/check.v1\"\n\t\"gopkg.in\/mgo.v2\"\n)\n\nfunc Test(t *testing.T) { TestingT(t) }\n\ntype ModelSuite struct{}\n\nvar _ = Suite(&ModelSuite{})\n\nvar _db *mgo.Database\n\nfunc (s *ModelSuite) SetUpSuite(c *C) {\n\tsession, err := mgo.Dial(\"localhost\")\n\tc.Assert(err, Equals, nil)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdbName := \"unit_model_test\"\n\n\t_db = session.DB(dbName)\n\n\tsession.DB(dbName).DropDatabase()\n\tInit(database.NewMongoSession(session), dbName)\n\n\tevents.StartEvents()\n}\n\nfunc (s *ModelSuite) TearDownSuite(c *C) {\n\t_db.DropDatabase()\n\tdatastore.ClearAll()\n}\n\nfunc (s *ModelSuite) TestUserFunctions(c *C) {\n\tname1 := \"Test_name1\"\n\tpassword1 := \"test_password2\"\n\n\tuser1 := CreateUser(name1, password1)\n\n\tc.Assert(user1.GetName(), Equals, name1)\n\tc.Assert(user1.VerifyPassword(password1), Equals, true)\n\n\tuser2 := GetOrCreateUser(name1, password1)\n\tc.Assert(user1, Equals, user2)\n\n\tname2 := \"test_name2\"\n\tpassword2 := \"test_password2\"\n\tuser3 := GetOrCreateUser(name2, password2)\n\tc.Assert(user3, Not(Equals), user2)\n\tc.Assert(user3, Not(Equals), user1)\n\n\tuserByName := GetUserByName(name1)\n\tc.Assert(userByName, Equals, user1)\n\n\tuserByName = GetUserByName(\"foobar\")\n\tc.Assert(userByName, Equals, nil)\n\n\tzone, _ := CreateZone(\"testZone\")\n\troom, _ := CreateRoom(zone, types.Coordinate{X: 0, Y: 0, Z: 0})\n\tCreatePlayerCharacter(\"testPlayer\", user1, room)\n\n\tDeleteUser(user1)\n\tuserByName = GetUserByName(name1)\n\tc.Assert(userByName, Equals, nil)\n\tc.Assert(GetUserCharacters(user1), HasLen, 0)\n}\n\nfunc (s *ModelSuite) TestZoneFunctions(c *C) {\n\tname := \"zone1\"\n\tzone1, err1 := CreateZone(name)\n\n\tc.Assert(zone1, Not(Equals), nil)\n\tc.Assert(err1, Equals, nil)\n\n\tzoneByName := GetZoneByName(name)\n\tc.Assert(zoneByName, Equals, zone1)\n\n\tzone2, err2 := CreateZone(\"zone2\")\n\tc.Assert(zone2, Not(Equals), nil)\n\tc.Assert(err2, Equals, nil)\n\n\tzone3, err3 := CreateZone(\"zone3\")\n\tc.Assert(zone3, Not(Equals), nil)\n\tc.Assert(err3, Equals, nil)\n\n\tzoneById := GetZone(zone1.GetId())\n\tc.Assert(zoneById, Equals, zone1)\n\n\t_, err := CreateZone(\"zone3\")\n\tc.Assert(err, Not(Equals), nil)\n}\n\nfunc (s *ModelSuite) TestRoomFunctions(c *C) {\n\tzone, err := CreateZone(\"zone\")\n\tc.Assert(zone, Not(Equals), nil)\n\tc.Assert(err, Equals, nil)\n\n\troom1, err1 := CreateRoom(zone, types.Coordinate{X: 0, Y: 0, Z: 0})\n\tc.Assert(room1, Not(Equals), nil)\n\tc.Assert(err1, Equals, nil)\n\n\tbadRoom, shouldError := CreateRoom(zone, types.Coordinate{X: 0, Y: 0, Z: 0})\n\tc.Assert(badRoom, Equals, nil)\n\tc.Assert(shouldError, Not(Equals), nil)\n\n\troom2, err2 := CreateRoom(zone, types.Coordinate{X: 0, Y: 1, Z: 0})\n\tc.Assert(room2, Not(Equals), nil)\n\tc.Assert(err2, Equals, nil)\n\n\troom1.SetExitEnabled(types.DirectionSouth, true)\n\troom2.SetExitEnabled(types.DirectionNorth, true)\n\n\tc.Assert(room2.HasExit(types.DirectionNorth), Equals, true)\n\tDeleteRoom(room1)\n\tc.Assert(room2.HasExit(types.DirectionNorth), Equals, false)\n}\n\nfunc (s *ModelSuite) TestRoomAndZoneFunctions(c *C) {\n\t\/\/ ZoneCorners\n\t\/\/ GetRoomsInZone\n}\n\nfunc (s *ModelSuite) TestCharFunctions(c *C) {\n\t\/\/user := CreateUser(\"user1\", \"\")\n\t\/\/playerName1 := \"player1\"\n\t\/\/player1 := CreatePlayer(name1, user\n}\n<|endoftext|>"} {"text":"<commit_before>package user\n\nimport (\n\t\"crypto\/sha1\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/13pinj\/todoapp\/Godeps\/_workspace\/src\/github.com\/gin-gonic\/gin\"\n\t\"github.com\/13pinj\/todoapp\/Godeps\/_workspace\/src\/github.com\/jinzhu\/gorm\"\n\t\"github.com\/13pinj\/todoapp\/models\"\n\t\"github.com\/13pinj\/todoapp\/models\/session\"\n\t\"github.com\/13pinj\/todoapp\/models\/todolist\"\n)\n\nconst (\n\tDefaultRole = \"\"\n\tAdminRole = \"admin\"\n)\n\n\/\/ User - структура модели пользователя\ntype User struct {\n\tgorm.Model\n\tName string\n\tPwdHash string\n\tRole string\n\tLists []*todolist.TodoList\n}\n\n\/\/ LoadLists загружает из базы списки дел пользователя в поле Lists\nfunc (u *User) LoadLists() {\n\tif u.Lists != nil {\n\t\treturn\n\t}\n\tu.Lists = todolist.FindByUser(u.ID)\n}\n\nfunc validateName(name string) bool {\n\terr := models.DB.Where(\"name = ?\", name).First(&User{}).Error\n\treturn err != nil\n}\n\nfunc hashPwd(pwd string) string {\n\treturn fmt.Sprintf(\"%x\", sha1.Sum([]byte(pwd)))\n}\n\n\/\/ Register добавляет нового пользователя в базу, и возвращает его структуру,\n\/\/ если введенные поля корректны. В противном случае Register возвращает ошибку.\nfunc Register(name string, password string) (u *User, errs []error) {\n\tok := strings.Contains(name, \" \")\n\tif ok {\n\t\terrs = append(errs, errors.New(\"Пробел в имени запрещен\"))\n\t}\n\tif len([]rune(name)) < 4 {\n\t\terrs = append(errs, errors.New(\"Имя слишком короткое (минимум 4 символа)\"))\n\t}\n\tif len([]rune(password)) < 6 {\n\t\terrs = append(errs, errors.New(\"Пароль слишком короткий (минимум 6 символов)\"))\n\t}\n\t\/\/ TODO: Заменить вызов validateName на Find\n\tif !validateName(name) {\n\t\terrs = append(errs, errors.New(\"Имя кем-то занято\"))\n\t}\n\tif errs != nil {\n\t\treturn\n\t}\n\tu = &User{\n\t\tName: name,\n\t\tPwdHash: hashPwd(password),\n\t}\n\terr := models.DB.Save(u).Error\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t}\n\treturn\n}\n\n\/\/ Login выполняет авторизацию пользователей.\n\/\/ Если введенные имя и пароль действительны, Login запишет факт авторизации\n\/\/ в сессию пользователя и вернет первым значением структуру пользователя,\n\/\/ а вторым true. В противном случае - nil и false.\n\/\/ Login перезапишет старые данные об авторизации, если таковые имеются.\nfunc Login(c *gin.Context, name string, password string) (*User, bool) {\n\tuser := &User{}\n\terr := models.DB.Where(\"name = ?\", name).First(user).Error\n\tif err != nil {\n\t\treturn nil, false\n\t}\n\thash := sha1.Sum([]byte(password))\n\tstr := fmt.Sprintf(\"%x\", hash)\n\tif str != user.PwdHash {\n\t\treturn nil, false\n\t}\n\tst := session.FromContext(c)\n\tst.SetInt(\"user_id\", int(user.ID))\n\treturn user, true\n}\n\n\/\/ Logout стирает данные об авторизации из сессии пользователя.\nfunc Logout(c *gin.Context) {\n\tst := session.FromContext(c)\n\tst.SetInt(\"user_id\", 0)\n}\n\n\/\/ FromContext получает данные об авторизации из сессии пользователя.\n\/\/ Если пользователь авторизован, FromContext вернет структуру и true.\n\/\/ Иначе nil и false.\nfunc FromContext(c *gin.Context) (*User, bool) {\n\tst := session.FromContext(c)\n\tif st.GetInt(\"user_id\") == 0 {\n\t\treturn nil, false\n\t}\n\tuserid := uint(st.GetInt(\"user_id\"))\n\tuser := &User{}\n\terr := models.DB.Find(user, userid).Error\n\tif err != nil {\n\t\treturn nil, false\n\t}\n\treturn user, true\n}\n\nfunc Find(name string) (*User, bool) {\n\treturn nil, false\n}\n\n\/\/ AutoLogin запишет факт авторизации в сессию пользователя.\n\/\/ Он перезапишет старые данные об авторизации, если таковые имеются.\nfunc (u *User) AutoLogin(c *gin.Context) {\n\tst := session.FromContext(c)\n\tst.SetInt(\"user_id\", int(u.ID))\n}\n\nfunc (u *User) SetRole(r string) {\n\n}\n\nfunc (u *User) Admin() bool {\n\treturn false\n}\n\n\/\/ Destroy стирает данные о пользователе из базы данных.\nfunc (u *User) Destroy() {\n\tmodels.DB.Delete(u)\n}\n\nvar initUser = &User{\n\tName: \"root\",\n\tPwdHash: hashPwd(\"12345678\"),\n\tRole: AdminRole,\n}\n\nfunc init() {\n\tinitializeUsers()\n}\n\nfunc initializeUsers() {\n\tif !models.DB.HasTable(initUser) {\n\t\tmodels.DB.CreateTable(initUser).Create(initUser)\n\t}\n\tmodels.DB.AutoMigrate(initUser)\n}\n<commit_msg>Move method to appropriate place<commit_after>package user\n\nimport (\n\t\"crypto\/sha1\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/13pinj\/todoapp\/Godeps\/_workspace\/src\/github.com\/gin-gonic\/gin\"\n\t\"github.com\/13pinj\/todoapp\/Godeps\/_workspace\/src\/github.com\/jinzhu\/gorm\"\n\t\"github.com\/13pinj\/todoapp\/models\"\n\t\"github.com\/13pinj\/todoapp\/models\/session\"\n\t\"github.com\/13pinj\/todoapp\/models\/todolist\"\n)\n\nconst (\n\tDefaultRole = \"\"\n\tAdminRole = \"admin\"\n)\n\n\/\/ User - структура модели пользователя\ntype User struct {\n\tgorm.Model\n\tName string\n\tPwdHash string\n\tRole string\n\tLists []*todolist.TodoList\n}\n\nfunc validateName(name string) bool {\n\terr := models.DB.Where(\"name = ?\", name).First(&User{}).Error\n\treturn err != nil\n}\n\nfunc hashPwd(pwd string) string {\n\treturn fmt.Sprintf(\"%x\", sha1.Sum([]byte(pwd)))\n}\n\n\/\/ Register добавляет нового пользователя в базу, и возвращает его структуру,\n\/\/ если введенные поля корректны. В противном случае Register возвращает ошибку.\nfunc Register(name string, password string) (u *User, errs []error) {\n\tok := strings.Contains(name, \" \")\n\tif ok {\n\t\terrs = append(errs, errors.New(\"Пробел в имени запрещен\"))\n\t}\n\tif len([]rune(name)) < 4 {\n\t\terrs = append(errs, errors.New(\"Имя слишком короткое (минимум 4 символа)\"))\n\t}\n\tif len([]rune(password)) < 6 {\n\t\terrs = append(errs, errors.New(\"Пароль слишком короткий (минимум 6 символов)\"))\n\t}\n\t\/\/ TODO: Заменить вызов validateName на Find\n\tif !validateName(name) {\n\t\terrs = append(errs, errors.New(\"Имя кем-то занято\"))\n\t}\n\tif errs != nil {\n\t\treturn\n\t}\n\tu = &User{\n\t\tName: name,\n\t\tPwdHash: hashPwd(password),\n\t}\n\terr := models.DB.Save(u).Error\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t}\n\treturn\n}\n\n\/\/ Login выполняет авторизацию пользователей.\n\/\/ Если введенные имя и пароль действительны, Login запишет факт авторизации\n\/\/ в сессию пользователя и вернет первым значением структуру пользователя,\n\/\/ а вторым true. В противном случае - nil и false.\n\/\/ Login перезапишет старые данные об авторизации, если таковые имеются.\nfunc Login(c *gin.Context, name string, password string) (*User, bool) {\n\tuser := &User{}\n\terr := models.DB.Where(\"name = ?\", name).First(user).Error\n\tif err != nil {\n\t\treturn nil, false\n\t}\n\thash := sha1.Sum([]byte(password))\n\tstr := fmt.Sprintf(\"%x\", hash)\n\tif str != user.PwdHash {\n\t\treturn nil, false\n\t}\n\tst := session.FromContext(c)\n\tst.SetInt(\"user_id\", int(user.ID))\n\treturn user, true\n}\n\n\/\/ Logout стирает данные об авторизации из сессии пользователя.\nfunc Logout(c *gin.Context) {\n\tst := session.FromContext(c)\n\tst.SetInt(\"user_id\", 0)\n}\n\n\/\/ FromContext получает данные об авторизации из сессии пользователя.\n\/\/ Если пользователь авторизован, FromContext вернет структуру и true.\n\/\/ Иначе nil и false.\nfunc FromContext(c *gin.Context) (*User, bool) {\n\tst := session.FromContext(c)\n\tif st.GetInt(\"user_id\") == 0 {\n\t\treturn nil, false\n\t}\n\tuserid := uint(st.GetInt(\"user_id\"))\n\tuser := &User{}\n\terr := models.DB.Find(user, userid).Error\n\tif err != nil {\n\t\treturn nil, false\n\t}\n\treturn user, true\n}\n\nfunc Find(name string) (*User, bool) {\n\treturn nil, false\n}\n\n\/\/ AutoLogin запишет факт авторизации в сессию пользователя.\n\/\/ Он перезапишет старые данные об авторизации, если таковые имеются.\nfunc (u *User) AutoLogin(c *gin.Context) {\n\tst := session.FromContext(c)\n\tst.SetInt(\"user_id\", int(u.ID))\n}\n\n\/\/ LoadLists загружает из базы списки дел пользователя в поле Lists\nfunc (u *User) LoadLists() {\n\tif u.Lists != nil {\n\t\treturn\n\t}\n\tu.Lists = todolist.FindByUser(u.ID)\n}\n\nfunc (u *User) SetRole(r string) {\n\n}\n\nfunc (u *User) Admin() bool {\n\treturn false\n}\n\n\/\/ Destroy стирает данные о пользователе из базы данных.\nfunc (u *User) Destroy() {\n\tmodels.DB.Delete(u)\n}\n\nvar initUser = &User{\n\tName: \"root\",\n\tPwdHash: hashPwd(\"12345678\"),\n\tRole: AdminRole,\n}\n\nfunc init() {\n\tinitializeUsers()\n}\n\nfunc initializeUsers() {\n\tif !models.DB.HasTable(initUser) {\n\t\tmodels.DB.CreateTable(initUser).Create(initUser)\n\t}\n\tmodels.DB.AutoMigrate(initUser)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage newton\n\nimport (\n\t\"github.com\/szabba\/md\/src\/vect\"\n)\n\n\/\/ A molecular dynamics system\ntype System struct {\n\talgo Integrator\n\tbodies []*Body\n\tforce Force\n}\n\n\/\/ Construct an empty system that will use the given integrator and has space\n\/\/ for the given number of bodies\nfunc NewSystem(algo Integrator, bodyCount int) *System {\n\n\tsys := new(System)\n\n\tsys.algo = algo\n\tsys.bodies = make([]*Body, 0, bodyCount)\n\n\treturn sys\n}\n\n\/\/ Set the system force\nfunc (sys *System) SetForce(f Force) {\n\n\tsys.force = f\n}\n\n\/\/ Add a force to the system\nfunc (sys *System) AddForce(f Force) {\n\n\tif sys.force != nil {\n\n\t\tsys.force = Combine(sys.force, f)\n\n\t} else {\n\n\t\tsys.SetForce(f)\n\t}\n}\n\n\/\/ Perform an integration step with the given dt\nfunc (sys *System) Step(dt float64) {\n\n\tas := make([]vect.Vector, len(sys.bodies))\n\n\tfor i, _ := range sys.bodies {\n\n\t\tas[i] = sys.force.Accel(sys.bodies, i)\n\t}\n\n\tfor i, body := range sys.bodies {\n\n\t\tsys.algo.Integrate(body, as[i], dt)\n\t}\n}\n<commit_msg>Add newton.*System.{Bodies,Body}<commit_after>\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage newton\n\nimport (\n\t\"github.com\/szabba\/md\/src\/vect\"\n)\n\n\/\/ A molecular dynamics system\ntype System struct {\n\talgo Integrator\n\tbodies []*Body\n\tforce Force\n}\n\n\/\/ Construct an empty system that will use the given integrator and has space\n\/\/ for the given number of bodies\nfunc NewSystem(algo Integrator, bodyCount int) *System {\n\n\tsys := new(System)\n\n\tsys.algo = algo\n\n\tsys.bodies = make([]*Body, bodyCount)\n\tfor i, _ := range sys.bodies {\n\n\t\tsys.bodies[i] = NewBody(sys.algo)\n\t}\n\n\treturn sys\n}\n\n\/\/ Set the system force\nfunc (sys *System) SetForce(f Force) {\n\n\tsys.force = f\n}\n\n\/\/ Add a force to the system\nfunc (sys *System) AddForce(f Force) {\n\n\tif sys.force != nil {\n\n\t\tsys.force = Combine(sys.force, f)\n\n\t} else {\n\n\t\tsys.SetForce(f)\n\t}\n}\n\n\/\/ Perform an integration step with the given dt\nfunc (sys *System) Step(dt float64) {\n\n\tas := make([]vect.Vector, len(sys.bodies))\n\n\tfor i, _ := range sys.bodies {\n\n\t\tas[i] = sys.force.Accel(sys.bodies, i)\n\t}\n\n\tfor i, body := range sys.bodies {\n\n\t\tsys.algo.Integrate(body, as[i], dt)\n\t}\n}\n\n\/\/ The number of bodies in the system\nfunc (sys *System) Bodies() int {\n\n\treturn len(sys.bodies)\n}\n\n\/\/ The i-th body within the system\nfunc (sys *System) Body(i int) *Body {\n\n\treturn sys.bodies[i]\n}\n<|endoftext|>"} {"text":"<commit_before>package mongo_test\n\nimport (\n\t\/\/ Standard Library Imports\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\/\/ External Imports\n\t\"github.com\/globalsign\/mgo\"\n\n\t\/\/ Public Imports\n\t\"github.com\/matthewhartstonge\/storage\/mongo\"\n)\n\nfunc TestMain(m *testing.M) {\n\t\/\/ If needed, enable logging when debugging for tests\n\t\/\/mongo.SetLogger(logrus.New())\n\t\/\/mongo.SetDebug(true)\n\n\texitCode := m.Run()\n\tos.Exit(exitCode)\n}\n\nfunc AssertError(t *testing.T, got interface{}, want interface{}, msg string) {\n\tt.Errorf(fmt.Sprintf(\"Error: %s\\n\t got: %#+v\\n\twant: %#+v\", msg, got, want))\n}\n\nfunc AssertFatal(t *testing.T, got interface{}, want interface{}, msg string) {\n\tt.Fatalf(fmt.Sprintf(\"Fatal: %s\\n\t got: %#+v\\n\twant: %#+v\", msg, got, want))\n}\n\nfunc setup(t *testing.T) (*mongo.Store, context.Context, func()) {\n\t\/\/ Build our default mongo storage layer\n\tstore, err := mongo.NewDefaultStore()\n\tif err != nil {\n\t\tAssertFatal(t, err, nil, \"mongo connection error\")\n\t}\n\tmgo.SetStats(true)\n\n\t\/\/ Build a context with a mongo session ready to use for testing\n\tctx := context.Background()\n\tmgoSession := store.NewSession()\n\tctx = mongo.MgoSessionToContext(ctx, mgoSession)\n\n\treturn store, ctx, func() {\n\t\t\/\/ Test for leaky sockets\n\t\tstats := mgo.GetStats()\n\t\tif stats.SocketsAlive > 0 {\n\t\t\tAssertError(t, stats.SocketsAlive, 0, \"sockets are being leaked\")\n\t\t}\n\n\t\t\/\/ Close the inner (test) session.\n\t\tmgoSession.Close()\n\n\t\t\/\/ Close the database session.\n\t\tstore.DB.DropDatabase()\n\t\tstore.Close()\n\t}\n}\n<commit_msg>:zap: mongo: fixes missed error check.<commit_after>package mongo_test\n\nimport (\n\t\/\/ Standard Library Imports\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\/\/ External Imports\n\t\"github.com\/globalsign\/mgo\"\n\n\t\/\/ Public Imports\n\t\"github.com\/matthewhartstonge\/storage\/mongo\"\n)\n\nfunc TestMain(m *testing.M) {\n\t\/\/ If needed, enable logging when debugging for tests\n\t\/\/mongo.SetLogger(logrus.New())\n\t\/\/mongo.SetDebug(true)\n\n\texitCode := m.Run()\n\tos.Exit(exitCode)\n}\n\nfunc AssertError(t *testing.T, got interface{}, want interface{}, msg string) {\n\tt.Errorf(fmt.Sprintf(\"Error: %s\\n\t got: %#+v\\n\twant: %#+v\", msg, got, want))\n}\n\nfunc AssertFatal(t *testing.T, got interface{}, want interface{}, msg string) {\n\tt.Fatalf(fmt.Sprintf(\"Fatal: %s\\n\t got: %#+v\\n\twant: %#+v\", msg, got, want))\n}\n\nfunc setup(t *testing.T) (*mongo.Store, context.Context, func()) {\n\t\/\/ Build our default mongo storage layer\n\tstore, err := mongo.NewDefaultStore()\n\tif err != nil {\n\t\tAssertFatal(t, err, nil, \"mongo connection error\")\n\t}\n\tmgo.SetStats(true)\n\n\t\/\/ Build a context with a mongo session ready to use for testing\n\tctx := context.Background()\n\tmgoSession := store.NewSession()\n\tctx = mongo.MgoSessionToContext(ctx, mgoSession)\n\n\treturn store, ctx, func() {\n\t\t\/\/ Test for leaky sockets\n\t\tstats := mgo.GetStats()\n\t\tif stats.SocketsAlive > 0 {\n\t\t\tAssertError(t, stats.SocketsAlive, 0, \"sockets are being leaked\")\n\t\t}\n\n\t\t\/\/ Close the inner (test) session.\n\t\tmgoSession.Close()\n\n\t\t\/\/ Close the database session.\n\t\terr := store.DB.DropDatabase()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"error dropping database on cleanup: %s\", err)\n\t\t}\n\t\tstore.Close()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nPackage signal implements access to incoming signals.\n\nSignals are primarily used on Unix-like systems. For the use of this\npackage on Windows and Plan 9, see below.\n\nTypes of signals\n\nThe signals SIGKILL and SIGSTOP may not be caught by a program, and\ntherefore can not be affected by this package.\n\nSynchronous signals are signals triggered by errors in program\nexecution: SIGBUS, SIGFPE, and SIGSEGV. These are only considered\nsynchronous when caused by program execution, not when sent using\nos.Process.Kill or the kill program or some similar mechanism. In\ngeneral, except as discussed below, Go programs will convert a\nsynchronous signal into a run-time panic.\n\nThe remaining signals are asynchronous signals. They are not\ntriggered by program errors, but are instead sent from the kernel or\nfrom some other program.\n\nOf the asynchronous signals, the SIGHUP signal is sent when a program\nloses its controlling terminal. The SIGINT signal is sent when the\nuser at the controlling terminal presses the interrupt character,\nwhich by default is ^C (Control-C). The SIGQUIT signal is sent when\nthe user at the controlling terminal presses the quit character, which\nby default is ^\\ (Control-Backslash). In general you can cause a\nprogram to simply exit by pressing ^C, and you can cause it to exit\nwith a stack dump by pressing ^\\.\n\nDefault behavior of signals in Go programs\n\nBy default, a synchronous signal is converted into a run-time panic. A\nSIGHUP, SIGINT, or SIGTERM signal causes the program to exit. A\nSIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGSTKFLT, SIGEMT, or SIGSYS signal\ncauses the program to exit with a stack dump. A SIGTSTP, SIGTTIN, or\nSIGTTOU signal gets the system default behavior (these signals are\nused by the shell for job control). The SIGPROF signal is handled\ndirectly by the Go runtime to implement runtime.CPUProfile. Other\nsignals are ignored.\n\nIf the Go program is started with either SIGHUP or SIGINT ignored,\nthey will remain ignored. Go always registers a handler for the other\nsignals.\n\nIf the Go program is started with a non-empty signal mask, that will\ngenerally be honored. However, some signals are explicitly unblocked:\nthe synchronous signals, SIGILL, SIGTRAP, SIGSTKFLT, SIGCHLD, SIGPROF,\nand, on GNU\/Linux, signals 32 (SIGCANCEL) and 33 (SIGSETXID)\n(SIGCANCEL and SIGSETXID are used internally by glibc).\n\nChanging the behavior of signals in Go programs\n\nThe functions in this package allow a program to change the way Go\nprograms handle signals.\n\nNotify disables the default behavior for a given set of asynchronous\nsignals and instead delivers them over one or more registered\nchannels. Specifically, it applies to the signals SIGHUP, SIGINT,\nSIGQUIT, SIGABRT, and SIGTERM. It also applies to the job control\nsignals SIGTSTP, SIGTTIN, and SIGTTOU, in which case the system\ndefault behavior does not occur. It also applies to some signals that\nare otherwise ignored: SIGUSR1, SIGUSR2, SIGPIPE, SIGALRM, SIGCHLD,\nSIGURG, SIGXCPU, SIGXFSZ, SIGVTALRM, SIGWINCH, SIGIO, SIGPWR, SIGSYS,\nSIGINFO, SIGTHR, SIGWAITING, SIGLWP, SIGFREEZE, SIGTHAW, SIGLOST,\nSIGXRES, SIGJVM1, SIGJVM2, and any real time signals used on the\nsystem. Note that not all of these signals are available on all\nsystems.\n\nIf the program was started with SIGHUP or SIGINT ignored, and Notify\nis called for either signal, a signal handler will be installed for\nthat signal and it will no longer be ignored. If, later, Reset or\nIgnore is called for that signal, or Stop is called on all channels\npassed to Notify for that signal, the signal will once again be\nignored. Reset will restore the system default behavior for the\nsignal, while Ignore will cause the system to ignore the signal\nentirely.\n\nIf the program is started with a non-empty signal mask, some signals\nwill be explicitly unblocked as described above. If Notify is called\nfor a blocked signal, it will be unblocked. If, later, Reset is\ncalled for that signal, or Stop is called on all channels passed to\nNotify for that signal, the signal will once again be blocked.\n\nSIGPIPE\n\nWhen a Go program receives an EPIPE error from the kernel while\nwriting to file descriptors 1 or 2 (standard output or standard\nerror), it will raise a SIGPIPE signal. If the program is not\ncurrently receiving SIGPIPE via a call to Notify, this will cause the\nprogram to exit with SIGPIPE. On descriptors other than 1 or 2, the\nwrite will return the EPIPE error. This means that, by default,\ncommand line programs will behave like typical Unix command line\nprograms, while other programs will not crash with SIGPIPE when\nwriting to a closed network connection.\n\nGo programs that use cgo or SWIG\n\nIn a Go program that includes non-Go code, typically C\/C++ code\naccessed using cgo or SWIG, Go's startup code normally runs first. It\nconfigures the signal handlers as expected by the Go runtime, before\nthe non-Go startup code runs. If the non-Go startup code wishes to\ninstall its own signal handlers, it must take certain steps to keep Go\nworking well. This section documents those steps and the overall\neffect changes to signal handler settings by the non-Go code can have\non Go programs. In rare cases, the non-Go code may run before the Go\ncode, in which case the next section also applies.\n\nIf the non-Go code called by the Go program does not change any signal\nhandlers or masks, then the behavior is the same as for a pure Go\nprogram.\n\nIf the non-Go code installs any signal handlers, it must use the\nSA_ONSTACK flag with sigaction. Failing to do so is likely to cause\nthe program to crash if the signal is received. Go programs routinely\nrun with a limited stack, and therefore set up an alternate signal\nstack. Also, the Go standard library expects that any signal handlers\nwill use the SA_RESTART flag. Failing to do so may cause some library\ncalls to return \"interrupted system call\" errors.\n\nIf the non-Go code installs a signal handler for any of the\nsynchronous signals (SIGBUS, SIGFPE, SIGSEGV), then it should record\nthe existing Go signal handler. If those signals occur while\nexecuting Go code, it should invoke the Go signal handler (whether the\nsignal occurs while executing Go code can be determined by looking at\nthe PC passed to the signal handler). Otherwise some Go run-time\npanics will not occur as expected.\n\nIf the non-Go code installs a signal handler for any of the\nasynchronous signals, it may invoke the Go signal handler or not as it\nchooses. Naturally, if it does not invoke the Go signal handler, the\nGo behavior described above will not occur. This can be an issue with\nthe SIGPROF signal in particular.\n\nThe non-Go code should not change the signal mask on any threads\ncreated by the Go runtime. If the non-Go code starts new threads of\nits own, it may set the signal mask as it pleases.\n\nIf the non-Go code starts a new thread, changes the signal mask, and\nthen invokes a Go function in that thread, the Go runtime will\nautomatically unblock certain signals: the synchronous signals,\nSIGILL, SIGTRAP, SIGSTKFLT, SIGCHLD, SIGPROF, SIGCANCEL, and\nSIGSETXID. When the Go function returns, the non-Go signal mask will\nbe restored.\n\nIf the Go signal handler is invoked on a non-Go thread not running Go\ncode, the handler generally forwards the signal to the non-Go code, as\nfollows. If the signal is SIGPROF, the Go handler does\nnothing. Otherwise, the Go handler removes itself, unblocks the\nsignal, and raises it again, to invoke any non-Go handler or default\nsystem handler. If the program does not exit, the Go handler then\nreinstalls itself and continues execution of the program.\n\nNon-Go programs that call Go code\n\nWhen Go code is built with options like -buildmode=c-shared, it will\nbe run as part of an existing non-Go program. The non-Go code may\nhave already installed signal handlers when the Go code starts (that\nmay also happen in unusual cases when using cgo or SWIG; in that case,\nthe discussion here applies).\n\nIf the Go runtime sees an existing signal handler for the SIGCANCEL or\nSIGSETXID signals (which are used only on GNU\/Linux), it will turn on\nthe SA_ONSTACK flag and otherwise keep the signal handler.\n\nFor the synchronous signals, the Go runtime will install a signal\nhandler. It will save any existing signal handler. If a synchronous\nsignal arrives while executing non-Go code, the Go runtime will invoke\nthe existing signal handler instead of the Go signal handler.\n\nGo code built with -buildmode=c-archive or -buildmode=c-shared will\nnot install any other signal handlers by default. If there is an\nexisting signal handler, the Go runtime will turn on the SA_ONSTACK\nflag and otherwise keep the signal handler. If Notify is called for an\nasynchronous signal, a Go signal handler will be installed for that\nsignal. If, later, Reset is called for that signal, the original\nhandling for that signal will be reinstalled, restoring the non-Go\nsignal handler if any.\n\nGo code built without -buildmode=c-archive or -buildmode=c-shared will\ninstall a signal handler for the asynchronous signals listed above,\nand save any existing signal handler. If a signal is delivered to a\nnon-Go thread, it will act as described above, except that if there is\nan existing non-Go signal handler, that handler will be installed\nbefore raising the signal.\n\nWindows\n\nOn Windows a ^C (Control-C) or ^BREAK (Control-Break) normally cause\nthe program to exit. If Notify is called for os.SIGINT, ^C or ^BREAK\nwill cause os.SIGINT to be sent on the channel, and the program will\nnot exit. If Reset is called, or Stop is called on all channels passed\nto Notify, then the default behavior will be restored.\n\nPlan 9\n\nOn Plan 9, signals have type syscall.Note, which is a string. Calling\nNotify with a syscall.Note will cause that value to be sent on the\nchannel when that string is posted as a note.\n\n*\/\npackage signal\n<commit_msg>os\/signal: clarify signal doc<commit_after>\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nPackage signal implements access to incoming signals.\n\nSignals are primarily used on Unix-like systems. For the use of this\npackage on Windows and Plan 9, see below.\n\nTypes of signals\n\nThe signals SIGKILL and SIGSTOP may not be caught by a program, and\ntherefore can not be affected by this package.\n\nSynchronous signals are signals triggered by errors in program\nexecution: SIGBUS, SIGFPE, and SIGSEGV. These are only considered\nsynchronous when caused by program execution, not when sent using\nos.Process.Kill or the kill program or some similar mechanism. In\ngeneral, except as discussed below, Go programs will convert a\nsynchronous signal into a run-time panic.\n\nThe remaining signals are asynchronous signals. They are not\ntriggered by program errors, but are instead sent from the kernel or\nfrom some other program.\n\nOf the asynchronous signals, the SIGHUP signal is sent when a program\nloses its controlling terminal. The SIGINT signal is sent when the\nuser at the controlling terminal presses the interrupt character,\nwhich by default is ^C (Control-C). The SIGQUIT signal is sent when\nthe user at the controlling terminal presses the quit character, which\nby default is ^\\ (Control-Backslash). In general you can cause a\nprogram to simply exit by pressing ^C, and you can cause it to exit\nwith a stack dump by pressing ^\\.\n\nDefault behavior of signals in Go programs\n\nBy default, a synchronous signal is converted into a run-time panic. A\nSIGHUP, SIGINT, or SIGTERM signal causes the program to exit. A\nSIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGSTKFLT, SIGEMT, or SIGSYS signal\ncauses the program to exit with a stack dump. A SIGTSTP, SIGTTIN, or\nSIGTTOU signal gets the system default behavior (these signals are\nused by the shell for job control). The SIGPROF signal is handled\ndirectly by the Go runtime to implement runtime.CPUProfile. Other\nsignals will be caught but no action will be taken.\n\nIf the Go program is started with either SIGHUP or SIGINT ignored\n(signal handler set to SIG_IGN), they will remain ignored.\n\nIf the Go program is started with a non-empty signal mask, that will\ngenerally be honored. However, some signals are explicitly unblocked:\nthe synchronous signals, SIGILL, SIGTRAP, SIGSTKFLT, SIGCHLD, SIGPROF,\nand, on GNU\/Linux, signals 32 (SIGCANCEL) and 33 (SIGSETXID)\n(SIGCANCEL and SIGSETXID are used internally by glibc). Subprocesses\nstarted by os.Exec, or by the os\/exec package, will inherit the\nmodified signal mask.\n\nChanging the behavior of signals in Go programs\n\nThe functions in this package allow a program to change the way Go\nprograms handle signals.\n\nNotify disables the default behavior for a given set of asynchronous\nsignals and instead delivers them over one or more registered\nchannels. Specifically, it applies to the signals SIGHUP, SIGINT,\nSIGQUIT, SIGABRT, and SIGTERM. It also applies to the job control\nsignals SIGTSTP, SIGTTIN, and SIGTTOU, in which case the system\ndefault behavior does not occur. It also applies to some signals that\notherwise cause no action: SIGUSR1, SIGUSR2, SIGPIPE, SIGALRM,\nSIGCHLD, SIGCONT, SIGURG, SIGXCPU, SIGXFSZ, SIGVTALRM, SIGWINCH,\nSIGIO, SIGPWR, SIGSYS, SIGINFO, SIGTHR, SIGWAITING, SIGLWP, SIGFREEZE,\nSIGTHAW, SIGLOST, SIGXRES, SIGJVM1, SIGJVM2, and any real time signals\nused on the system. Note that not all of these signals are available\non all systems.\n\nIf the program was started with SIGHUP or SIGINT ignored, and Notify\nis called for either signal, a signal handler will be installed for\nthat signal and it will no longer be ignored. If, later, Reset or\nIgnore is called for that signal, or Stop is called on all channels\npassed to Notify for that signal, the signal will once again be\nignored. Reset will restore the system default behavior for the\nsignal, while Ignore will cause the system to ignore the signal\nentirely.\n\nIf the program is started with a non-empty signal mask, some signals\nwill be explicitly unblocked as described above. If Notify is called\nfor a blocked signal, it will be unblocked. If, later, Reset is\ncalled for that signal, or Stop is called on all channels passed to\nNotify for that signal, the signal will once again be blocked.\n\nSIGPIPE\n\nWhen a Go program writes to a broken pipe, the kernel will raise a\nSIGPIPE signal.\n\nIf the program has not called Notify to receive SIGPIPE signals, then\nthe behavior depends on the file descriptor number. A write to a\nbroken pipe on file descriptors 1 or 2 (standard output or standard\nerror) will cause the program to exit with a SIGPIPE signal. A write\nto a broken pipe on some other file descriptor will take no action on\nthe SIGPIPE signal, and the write will fail with an EPIPE error.\n\nIf the program has called Notify to receive SIGPIPE signals, the file\ndescriptor number does not matter. The SIGPIPE signal will be\ndelivered to the Notify channel, and the write will fail with an EPIPE\nerror.\n\nThis means that, by default, command line programs will behave like\ntypical Unix command line programs, while other programs will not\ncrash with SIGPIPE when writing to a closed network connection.\n\nGo programs that use cgo or SWIG\n\nIn a Go program that includes non-Go code, typically C\/C++ code\naccessed using cgo or SWIG, Go's startup code normally runs first. It\nconfigures the signal handlers as expected by the Go runtime, before\nthe non-Go startup code runs. If the non-Go startup code wishes to\ninstall its own signal handlers, it must take certain steps to keep Go\nworking well. This section documents those steps and the overall\neffect changes to signal handler settings by the non-Go code can have\non Go programs. In rare cases, the non-Go code may run before the Go\ncode, in which case the next section also applies.\n\nIf the non-Go code called by the Go program does not change any signal\nhandlers or masks, then the behavior is the same as for a pure Go\nprogram.\n\nIf the non-Go code installs any signal handlers, it must use the\nSA_ONSTACK flag with sigaction. Failing to do so is likely to cause\nthe program to crash if the signal is received. Go programs routinely\nrun with a limited stack, and therefore set up an alternate signal\nstack. Also, the Go standard library expects that any signal handlers\nwill use the SA_RESTART flag. Failing to do so may cause some library\ncalls to return \"interrupted system call\" errors.\n\nIf the non-Go code installs a signal handler for any of the\nsynchronous signals (SIGBUS, SIGFPE, SIGSEGV), then it should record\nthe existing Go signal handler. If those signals occur while\nexecuting Go code, it should invoke the Go signal handler (whether the\nsignal occurs while executing Go code can be determined by looking at\nthe PC passed to the signal handler). Otherwise some Go run-time\npanics will not occur as expected.\n\nIf the non-Go code installs a signal handler for any of the\nasynchronous signals, it may invoke the Go signal handler or not as it\nchooses. Naturally, if it does not invoke the Go signal handler, the\nGo behavior described above will not occur. This can be an issue with\nthe SIGPROF signal in particular.\n\nThe non-Go code should not change the signal mask on any threads\ncreated by the Go runtime. If the non-Go code starts new threads of\nits own, it may set the signal mask as it pleases.\n\nIf the non-Go code starts a new thread, changes the signal mask, and\nthen invokes a Go function in that thread, the Go runtime will\nautomatically unblock certain signals: the synchronous signals,\nSIGILL, SIGTRAP, SIGSTKFLT, SIGCHLD, SIGPROF, SIGCANCEL, and\nSIGSETXID. When the Go function returns, the non-Go signal mask will\nbe restored.\n\nIf the Go signal handler is invoked on a non-Go thread not running Go\ncode, the handler generally forwards the signal to the non-Go code, as\nfollows. If the signal is SIGPROF, the Go handler does\nnothing. Otherwise, the Go handler removes itself, unblocks the\nsignal, and raises it again, to invoke any non-Go handler or default\nsystem handler. If the program does not exit, the Go handler then\nreinstalls itself and continues execution of the program.\n\nNon-Go programs that call Go code\n\nWhen Go code is built with options like -buildmode=c-shared, it will\nbe run as part of an existing non-Go program. The non-Go code may\nhave already installed signal handlers when the Go code starts (that\nmay also happen in unusual cases when using cgo or SWIG; in that case,\nthe discussion here applies).\n\nIf the Go runtime sees an existing signal handler for the SIGCANCEL or\nSIGSETXID signals (which are used only on GNU\/Linux), it will turn on\nthe SA_ONSTACK flag and otherwise keep the signal handler.\n\nFor the synchronous signals, the Go runtime will install a signal\nhandler. It will save any existing signal handler. If a synchronous\nsignal arrives while executing non-Go code, the Go runtime will invoke\nthe existing signal handler instead of the Go signal handler.\n\nGo code built with -buildmode=c-archive or -buildmode=c-shared will\nnot install any other signal handlers by default. If there is an\nexisting signal handler, the Go runtime will turn on the SA_ONSTACK\nflag and otherwise keep the signal handler. If Notify is called for an\nasynchronous signal, a Go signal handler will be installed for that\nsignal. If, later, Reset is called for that signal, the original\nhandling for that signal will be reinstalled, restoring the non-Go\nsignal handler if any.\n\nGo code built without -buildmode=c-archive or -buildmode=c-shared will\ninstall a signal handler for the asynchronous signals listed above,\nand save any existing signal handler. If a signal is delivered to a\nnon-Go thread, it will act as described above, except that if there is\nan existing non-Go signal handler, that handler will be installed\nbefore raising the signal.\n\nWindows\n\nOn Windows a ^C (Control-C) or ^BREAK (Control-Break) normally cause\nthe program to exit. If Notify is called for os.SIGINT, ^C or ^BREAK\nwill cause os.SIGINT to be sent on the channel, and the program will\nnot exit. If Reset is called, or Stop is called on all channels passed\nto Notify, then the default behavior will be restored.\n\nPlan 9\n\nOn Plan 9, signals have type syscall.Note, which is a string. Calling\nNotify with a syscall.Note will cause that value to be sent on the\nchannel when that string is posted as a note.\n\n*\/\npackage signal\n<|endoftext|>"} {"text":"<commit_before>package moves\n\nimport (\n\t\"errors\"\n\t\"github.com\/jkomoros\/boardgame\"\n)\n\n\/\/CurrentPlayerSetter should be implemented by gameStates that use FinishTurn.\ntype CurrentPlayerSetter interface {\n\tSetCurrentPlayer(currentPlayer boardgame.PlayerIndex)\n}\n\n\/\/PlayerTurnFinisher is the interface your playerState is expected to adhere\n\/\/to when you use FinishTurn.\ntype PlayerTurnFinisher interface {\n\t\/\/TurnDone should return nil when the turn is done, or a descriptive error\n\t\/\/if the turn is not done.\n\tTurnDone() error\n\t\/\/ResetForTurnStart will be called when this player begins their turn.\n\tResetForTurnStart()\n\t\/\/ResetForTurnEnd will be called right before the CurrentPlayer is\n\t\/\/advanced to the next player.\n\tResetForTurnEnd()\n}\n\n\/*\n\nFinishTurn is designed to be used as a FixUp move that advances the\nCurrentPlayer to the next player when the current player's turn is done. Your\ngame's playerStates should implement the PlayerTurnFinisher interface, and\nyour gameState should implement CurrentPlayerSetter.\n\n*\/\ntype FinishTurn struct {\n\tBase\n}\n\n\/\/Legal checks if the game's CurrentPlayer's TurnDone() returns true.\nfunc (f *FinishTurn) Legal(state boardgame.State, proposer boardgame.PlayerIndex) error {\n\tcurrentPlayerIndex := state.Game().CurrentPlayerIndex()\n\n\tif !currentPlayerIndex.Valid(state) {\n\t\treturn errors.New(\"Current player is not valid\")\n\t}\n\n\tif currentPlayerIndex < 0 {\n\t\treturn errors.New(\"Current player is not valid\")\n\t}\n\n\tcurrentPlayer := state.PlayerStates()[currentPlayerIndex]\n\n\tcurrentPlayerTurnFinisher, ok := currentPlayer.(PlayerTurnFinisher)\n\n\tif !ok {\n\t\treturn errors.New(\"The current player interface did not implement PlayerTurnFinisher\")\n\t}\n\n\tif err := currentPlayerTurnFinisher.TurnDone(); err != nil {\n\t\treturn errors.New(\"The current player is not done with their turn: \" + err.Error())\n\t}\n\n\treturn nil\n\n}\n\n\/\/Aoply resets the current player via ResetForTurnEnd, then advances to the\n\/\/next player (using game.SetCurrentPlayer), then calls ResetForTurnStart on\n\/\/the new player.\nfunc (f *FinishTurn) Apply(state boardgame.State) error {\n\tcurrentPlayer := state.PlayerStates()[state.Game().CurrentPlayerIndex()]\n\n\tcurrentPlayerTurnFinisher, ok := currentPlayer.(PlayerTurnFinisher)\n\n\tif !ok {\n\t\treturn errors.New(\"The current player interface did not implement PlayerTurnFinisher\")\n\t}\n\n\tcurrentPlayerTurnFinisher.ResetForTurnEnd()\n\n\tnewPlayerIndex := state.Game().CurrentPlayerIndex().Next(state)\n\n\tplayerSetter, ok := state.GameState().(CurrentPlayerSetter)\n\n\tif !ok {\n\t\treturn errors.New(\"Gamestate did not implement CurrentPlayerSetter\")\n\t}\n\n\tplayerSetter.SetCurrentPlayer(newPlayerIndex)\n\n\tif state.Game().CurrentPlayerIndex() != newPlayerIndex {\n\t\treturn errors.New(\"Calling SetCurrentPlayer did not set the CurrentPlayer to the new index\")\n\t}\n\n\tcurrentPlayer = state.PlayerStates()[state.Game().CurrentPlayerIndex()]\n\n\tcurrentPlayerTurnFinisher, ok = currentPlayer.(PlayerTurnFinisher)\n\n\tif !ok {\n\t\treturn errors.New(\"The current player interface did not implement PlayerTurnFinisher\")\n\t}\n\n\tcurrentPlayerTurnFinisher.ResetForTurnStart()\n\n\treturn nil\n\n}\n<commit_msg>Oopen, FinishTurn now has the right signature for Apply. Part of #56.<commit_after>package moves\n\nimport (\n\t\"errors\"\n\t\"github.com\/jkomoros\/boardgame\"\n)\n\n\/\/CurrentPlayerSetter should be implemented by gameStates that use FinishTurn.\ntype CurrentPlayerSetter interface {\n\tSetCurrentPlayer(currentPlayer boardgame.PlayerIndex)\n}\n\n\/\/PlayerTurnFinisher is the interface your playerState is expected to adhere\n\/\/to when you use FinishTurn.\ntype PlayerTurnFinisher interface {\n\t\/\/TurnDone should return nil when the turn is done, or a descriptive error\n\t\/\/if the turn is not done.\n\tTurnDone() error\n\t\/\/ResetForTurnStart will be called when this player begins their turn.\n\tResetForTurnStart()\n\t\/\/ResetForTurnEnd will be called right before the CurrentPlayer is\n\t\/\/advanced to the next player.\n\tResetForTurnEnd()\n}\n\n\/*\n\nFinishTurn is designed to be used as a FixUp move that advances the\nCurrentPlayer to the next player when the current player's turn is done. Your\ngame's playerStates should implement the PlayerTurnFinisher interface, and\nyour gameState should implement CurrentPlayerSetter.\n\n*\/\ntype FinishTurn struct {\n\tBase\n}\n\n\/\/Legal checks if the game's CurrentPlayer's TurnDone() returns true.\nfunc (f *FinishTurn) Legal(state boardgame.State, proposer boardgame.PlayerIndex) error {\n\tcurrentPlayerIndex := state.Game().CurrentPlayerIndex()\n\n\tif !currentPlayerIndex.Valid(state) {\n\t\treturn errors.New(\"Current player is not valid\")\n\t}\n\n\tif currentPlayerIndex < 0 {\n\t\treturn errors.New(\"Current player is not valid\")\n\t}\n\n\tcurrentPlayer := state.PlayerStates()[currentPlayerIndex]\n\n\tcurrentPlayerTurnFinisher, ok := currentPlayer.(PlayerTurnFinisher)\n\n\tif !ok {\n\t\treturn errors.New(\"The current player interface did not implement PlayerTurnFinisher\")\n\t}\n\n\tif err := currentPlayerTurnFinisher.TurnDone(); err != nil {\n\t\treturn errors.New(\"The current player is not done with their turn: \" + err.Error())\n\t}\n\n\treturn nil\n\n}\n\n\/\/Aoply resets the current player via ResetForTurnEnd, then advances to the\n\/\/next player (using game.SetCurrentPlayer), then calls ResetForTurnStart on\n\/\/the new player.\nfunc (f *FinishTurn) Apply(state boardgame.MutableState) error {\n\tcurrentPlayer := state.PlayerStates()[state.Game().CurrentPlayerIndex()]\n\n\tcurrentPlayerTurnFinisher, ok := currentPlayer.(PlayerTurnFinisher)\n\n\tif !ok {\n\t\treturn errors.New(\"The current player interface did not implement PlayerTurnFinisher\")\n\t}\n\n\tcurrentPlayerTurnFinisher.ResetForTurnEnd()\n\n\tnewPlayerIndex := state.Game().CurrentPlayerIndex().Next(state)\n\n\tplayerSetter, ok := state.GameState().(CurrentPlayerSetter)\n\n\tif !ok {\n\t\treturn errors.New(\"Gamestate did not implement CurrentPlayerSetter\")\n\t}\n\n\tplayerSetter.SetCurrentPlayer(newPlayerIndex)\n\n\tif state.Game().CurrentPlayerIndex() != newPlayerIndex {\n\t\treturn errors.New(\"Calling SetCurrentPlayer did not set the CurrentPlayer to the new index\")\n\t}\n\n\tcurrentPlayer = state.PlayerStates()[state.Game().CurrentPlayerIndex()]\n\n\tcurrentPlayerTurnFinisher, ok = currentPlayer.(PlayerTurnFinisher)\n\n\tif !ok {\n\t\treturn errors.New(\"The current player interface did not implement PlayerTurnFinisher\")\n\t}\n\n\tcurrentPlayerTurnFinisher.ResetForTurnStart()\n\n\treturn nil\n\n}\n<|endoftext|>"} {"text":"<commit_before>package system\n\nimport (\n\t\"os\/exec\"\n)\n\ntype Client struct {\n}\n\nfunc GetLoad() (string, error) {\n\tuptimeCommand := `cat \/proc\/loadavg | awk '{ print $1\" \"$2\" \"$3 }'`\n\toutput, err := exec.Command(uptimeCommand).Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(output), nil\n}\n\nfunc GetUptime() (string, error) {\n\tuptimeCommand := `cat \/proc\/uptime | awk '{ print $1 }'`\n\toutput, err := exec.Command(uptimeCommand).Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(output), nil\n}\n<commit_msg>bugs fixed<commit_after>package system\n\nimport (\n\t\"os\/exec\"\n\t\"strings\"\n)\n\ntype Client struct {\n}\n\nfunc GetLoad() (string, error) {\n\toutput, err := exec.Command(\"cat\", \"\/proc\/loadavg\").Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(output), nil\n}\n\nfunc GetUptime() (string, error) {\n\toutput, err := exec.Command(\"cat\", \"\/proc\/uptime\").Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tloadAry := strings.Split(string(output), \" \")\n\treturn loadAry[0], nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Packet to interpret the bits\n\/\/ https:\/\/tools.ietf.org\/html\/rfc2865\n\/\/ https:\/\/tools.ietf.org\/html\/rfc2866\n\/\/\n\/\/ https:\/\/github.com\/bronze1man\/radius\n\/\/ https:\/\/github.com\/hoffoo\/go-radius\n\/\/ https:\/\/github.com\/alouca\/goradius\npackage radius\n\n\/\/ TODO: Convert magicnumbers to const?\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/md5\"\n\t\"encoding\/binary\"\n\t\"radiusd\/config\"\n\t\"fmt\"\n)\n\ntype Attr struct {\n\tType uint8\n\tLength uint8\n\tValue []byte\n}\n\n\/\/ Wrapper around Attr\ntype PubAttr struct {\n\tType AttributeType\n\tValue []byte\n}\n\ntype Packet struct {\n\tsecret string \/\/ shared secret\n\tCode uint8\n\tIdentifier uint8\n\tLen uint16\n\tAuth []byte \/\/ Request Authenticator\n\tAllAttrs []Attr\n}\n\nfunc (p *Packet) Secret() string {\n\treturn p.secret\n}\n\/\/ Get first packet by key\nfunc (p *Packet) Attr(key AttributeType) []byte {\n\tk := uint8(key)\n\tfor _, a := range p.AllAttrs {\n\t\tif a.Type == k {\n\t\t\treturn a.Value\n\t\t}\n\t}\n\tpanic(fmt.Sprintf(\"No such key %+v\", k))\n}\n\/\/ If requested attribute exists\nfunc (p *Packet) HasAttr(key AttributeType) bool {\n\tk := uint8(key)\n\tfor _, a := range p.AllAttrs {\n\t\tif a.Type == k {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Decode bytes into packet\nfunc decode(buf []byte, n int, secret string) (*Packet, error) {\n\tp := &Packet{}\n\tp.secret = secret\n\tp.Code = buf[0]\n\tp.Identifier = buf[1]\n\tp.Len = binary.BigEndian.Uint16(buf[2:4])\n\n\tp.Auth = buf[4:20] \/\/ 16 octets\n\t\/\/p.Attrs = make(map[AttributeType]Attr)\n\n\t\/\/ attrs\n\ti := 20\n\tfor {\n\t\tif i >= n {\n\t\t\tbreak\n\t\t}\n\n\t\tattr := Attr{\n\t\t\tType: buf[i],\n\t\t\tLength: buf[i+1],\n\t\t}\n\t\tb := i + 2\n\t\te := b + int(attr.Length) - 2 \/\/ Length is including type+Length fields\n\t\tattr.Value = buf[b:e]\n\t\tp.AllAttrs = append(p.AllAttrs, attr)\n\t\t\/\/p.Attrs[AttributeType(attr.Type)] = attr\n\n\t\ti = e\n\t}\n\tif config.Debug {\n\t\tconfig.Log.Printf(\"packet.decode: %+v\", p)\n\t}\n\treturn p, nil\n}\n\n\/\/ Encode packet into bytes\nfunc encode(p *Packet) []byte {\n\tb := make([]byte, 1024)\n\tb[0] = p.Code\n\tb[1] = p.Identifier\n\t\/\/ Skip Len for now 2+3\n\tcopy(b[4:20], p.Auth)\n\twritten := 20\n\n\tbb := b[20:]\n\tfor _, attr := range p.AllAttrs {\n\t\taLen := len(attr.Value) + 2 \/\/ add type+len fields\n\t\tif aLen > 255 || aLen < 2 {\n\t\t\tpanic(\"Value too big for attr\")\n\t\t}\n\t\tbb[0] = attr.Type\n\t\tbb[1] = uint8(aLen)\n\t\tcopy(bb[2:], attr.Value)\n\n\t\twritten += aLen\n\t\tbb = bb[aLen:]\n\t}\n\n\t\/\/ Now set Len\n\tbinary.BigEndian.PutUint16(b[2:4], uint16(written))\n\tif config.Debug {\n\t\tconfig.Log.Printf(\"packet.encode: %+v\", b[:written])\n\t}\n\treturn b[:written]\n}\n\n\/\/ MessageAuthenticate if any\nfunc validate(p *Packet) bool {\n\tif p.HasAttr(MessageAuthenticator) {\n\t\tcheck := p.Attr(MessageAuthenticator)\n\t\th := md5.New()\n\t\ttemp := encode(p)\n\t\t\/\/h.Write(temp[0:4])\n\t\t\/\/h.Write([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})\n\t\t\/\/h.Write(temp[20:])\n\t\th.Write(temp)\n\t\th.Write([]byte(p.secret))\n\n\t\tif !hmac.Equal(check, h.Sum(nil)) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Create response packet\nfunc (p *Packet) Response(code PacketCode, attrs []PubAttr) []byte {\n\tn := &Packet{\n\t\tCode: uint8(code),\n\t\tIdentifier: p.Identifier,\n\t\tAuth: p.Auth, \/\/ Set req auth\n\t\tLen: 0, \/\/ Set by Encode\n\t\t\/\/Attrs: make(map[AttributeType]Attr),\n\t}\n\n\tfor _, attr := range attrs {\n\t\tmsg := Attr{\n\t\t\tType: uint8(attr.Type),\n\t\t\tValue: attr.Value,\n\t\t}\n\t\tmsg.Length = uint8(2 + len(msg.Value))\n\t\tn.AllAttrs = append(n.AllAttrs, msg)\n\t}\n\n\t\/\/ Encode\n\tr := encode(n)\n\n\t\/\/ Set right Response Authenticator\n\t\/\/ MD5(Code+ID+Length+RequestAuth+Attributes+Secret)\n\th := md5.New()\n\th.Write(r)\n\th.Write([]byte(p.secret))\n\tres := h.Sum(nil)[:16]\n\tcopy(r[4:20], res)\n\n\treturn r\n}\n<commit_msg>Bugfix. Change Type to AttributeType for better debugging<commit_after>\/\/ Packet to interpret the bits\n\/\/ https:\/\/tools.ietf.org\/html\/rfc2865\n\/\/ https:\/\/tools.ietf.org\/html\/rfc2866\n\/\/\n\/\/ https:\/\/github.com\/bronze1man\/radius\n\/\/ https:\/\/github.com\/hoffoo\/go-radius\n\/\/ https:\/\/github.com\/alouca\/goradius\npackage radius\n\n\/\/ TODO: Convert magicnumbers to const?\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/md5\"\n\t\"encoding\/binary\"\n\t\"radiusd\/config\"\n\t\"fmt\"\n)\n\ntype Attr struct {\n\tType AttributeType\n\tLength uint8\n\tValue []byte\n}\n\n\/\/ Wrapper around Attr\ntype PubAttr struct {\n\tType AttributeType\n\tValue []byte\n}\n\ntype Packet struct {\n\tsecret string \/\/ shared secret\n\tCode uint8\n\tIdentifier uint8\n\tLen uint16\n\tAuth []byte \/\/ Request Authenticator\n\tAllAttrs []Attr\n}\n\nfunc (p *Packet) Secret() string {\n\treturn p.secret\n}\n\/\/ Get first packet by key\nfunc (p *Packet) Attr(key AttributeType) []byte {\n\tfor _, a := range p.AllAttrs {\n\t\tif a.Type == key {\n\t\t\treturn a.Value\n\t\t}\n\t}\n\tpanic(fmt.Sprintf(\"No such key %s\", key.String()))\n}\n\/\/ If requested attribute exists\nfunc (p *Packet) HasAttr(key AttributeType) bool {\n\tfor _, a := range p.AllAttrs {\n\t\tif a.Type == key {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Decode bytes into packet\nfunc decode(buf []byte, n int, secret string) (*Packet, error) {\n\tp := &Packet{}\n\tp.secret = secret\n\tp.Code = buf[0]\n\tp.Identifier = buf[1]\n\tp.Len = binary.BigEndian.Uint16(buf[2:4])\n\n\tp.Auth = buf[4:20] \/\/ 16 octets\n\t\/\/p.Attrs = make(map[AttributeType]Attr)\n\n\t\/\/ attrs\n\ti := 20\n\tfor {\n\t\tif i >= n {\n\t\t\tbreak\n\t\t}\n\n\t\tattr := Attr{\n\t\t\tType: AttributeType(buf[i]),\n\t\t\tLength: buf[i+1],\n\t\t}\n\t\tb := i + 2\n\t\te := b + int(attr.Length) - 2 \/\/ Length is including type+Length fields\n\t\tattr.Value = buf[b:e]\n\t\tp.AllAttrs = append(p.AllAttrs, attr)\n\t\t\/\/p.Attrs[AttributeType(attr.Type)] = attr\n\n\t\ti = e\n\t}\n\tif config.Debug {\n\t\tconfig.Log.Printf(\"packet.decode: %+v\", p)\n\t\t\/*logAttrs := \"packet.decode:\\n\"\n\t\tfor _, attr := range p.AllAttrs {\n\t\t\tlogAttrs += fmt.Sprintf(\"\\t%s\\t\\t%s\", attr.String() + \"\\n\"\n\t\t}\n\t\tconfig.Log.Printf(logAttrs)*\/\n\t}\n\treturn p, nil\n}\n\n\/\/ Encode packet into bytes\nfunc encode(p *Packet) []byte {\n\tb := make([]byte, 1024)\n\tb[0] = p.Code\n\tb[1] = p.Identifier\n\t\/\/ Skip Len for now 2+3\n\tcopy(b[4:20], p.Auth)\n\twritten := 20\n\n\tbb := b[20:]\n\tfor _, attr := range p.AllAttrs {\n\t\taLen := len(attr.Value) + 2 \/\/ add type+len fields\n\t\tif aLen > 255 || aLen < 2 {\n\t\t\tpanic(\"Value too big for attr\")\n\t\t}\n\t\tbb[0] = uint8(attr.Type)\n\t\tbb[1] = uint8(aLen)\n\t\tcopy(bb[2:], attr.Value)\n\n\t\twritten += aLen\n\t\tbb = bb[aLen:]\n\t}\n\n\t\/\/ Now set Len\n\tbinary.BigEndian.PutUint16(b[2:4], uint16(written))\n\tif config.Debug {\n\t\tconfig.Log.Printf(\"packet.encode: %+v\", p)\n\t}\n\treturn b[:written]\n}\n\n\/\/ MessageAuthenticate if any\nfunc validate(p *Packet) bool {\n\tif p.HasAttr(MessageAuthenticator) {\n\t\tcheck := p.Attr(MessageAuthenticator)\n\t\th := md5.New()\n\t\ttemp := encode(p)\n\t\t\/\/h.Write(temp[0:4])\n\t\t\/\/h.Write([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})\n\t\t\/\/h.Write(temp[20:])\n\t\th.Write(temp)\n\t\th.Write([]byte(p.secret))\n\n\t\tif !hmac.Equal(check, h.Sum(nil)) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Create response packet\nfunc (p *Packet) Response(code PacketCode, attrs []PubAttr) []byte {\n\tn := &Packet{\n\t\tCode: uint8(code),\n\t\tIdentifier: p.Identifier,\n\t\tAuth: p.Auth, \/\/ Set req auth\n\t\tLen: 0, \/\/ Set by Encode\n\t\t\/\/Attrs: make(map[AttributeType]Attr),\n\t}\n\n\tfor _, attr := range attrs {\n\t\tmsg := Attr{\n\t\t\tType: attr.Type,\n\t\t\tValue: attr.Value,\n\t\t}\n\t\tmsg.Length = uint8(2 + len(msg.Value))\n\t\tn.AllAttrs = append(n.AllAttrs, msg)\n\t}\n\n\t\/\/ Encode\n\tr := encode(n)\n\n\t\/\/ Set right Response Authenticator\n\t\/\/ MD5(Code+ID+Length+RequestAuth+Attributes+Secret)\n\th := md5.New()\n\th.Write(r)\n\th.Write([]byte(p.secret))\n\tres := h.Sum(nil)[:16]\n\tcopy(r[4:20], res)\n\n\treturn r\n}\n<|endoftext|>"} {"text":"<commit_before>package rapi\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n)\n\ntype TransformCb func() (err error)\n\ntype Endpoint struct {\n\tconfig *Config\n\tpath string\n\treqExtStruct interface{}\n\treqIntStruct interface{}\n\tresExtStruct interface{}\n\tresIntStruct interface{}\n\ttransformer Transformable\n}\n\nfunc NewEndpoint(a *Api, method, path string) *Endpoint {\n\tep := &Endpoint{\n\t\tconfig: a.config,\n\t\tpath: path,\n\t\ttransformer: a.transformer,\n\t}\n\n\ta.Route(method, ep.config.Listener.Prefix+path, ep)\n\n\treturn ep\n}\n\nfunc (ep *Endpoint) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\ttr := http.DefaultTransport\n\n\tr.URL.Host = ep.config.Backend.Address\n\tr.URL.Scheme = \"http\"\n\tr.URL.Path = ep.config.Backend.Prefix + ep.path\n\n\tif ep.reqIntStruct != nil && ep.reqExtStruct != nil {\n\t\tep.transformer.TransformRequest(r, ep.reqExtStruct, ep.reqIntStruct)\n\t}\n\n\tres, resErr := tr.RoundTrip(r)\n\tif resErr != nil {\n\t\tpanic(fmt.Sprintf(\"Response error: %s\", resErr))\n\t} else {\n\t\tdefer res.Body.Close()\n\t}\n\n\tif ep.resIntStruct != nil && ep.resExtStruct != nil {\n\t\tep.transformer.TransformResponse(res, ep.resIntStruct, ep.resExtStruct)\n\t}\n\n\tw.WriteHeader(res.StatusCode)\n\t_, ioErr := io.Copy(w, res.Body)\n\n\tif ioErr != nil {\n\t\tlog.Printf(\"Error writting response: %s\", ioErr)\n\t}\n}\n\nfunc (ep *Endpoint) InternalPath(path string) *Endpoint {\n\tep.path = path\n\treturn ep\n}\n\nfunc (ep *Endpoint) TransformRequest(ex, in interface{}) *Endpoint {\n\tep.reqExtStruct = ex\n\tep.reqIntStruct = in\n\treturn ep\n}\n\nfunc (ep *Endpoint) TransformResponse(in, ex interface{}) *Endpoint {\n\tep.resIntStruct = in\n\tep.resExtStruct = ex\n\treturn ep\n}\n\n\/\/ To be done\nfunc (ep *Endpoint) TransformRequestCb(cb TransformCb) *Endpoint {\n\terr := cb()\n\tif err != nil {\n\t\tlog.Print(\"Something went wrong\")\n\t}\n\treturn ep\n}\n\n\/\/ To be done\nfunc (ep *Endpoint) TransformResponseCb(cb TransformCb) *Endpoint {\n\terr := cb()\n\tif err != nil {\n\t\tlog.Print(\"Something went wrong\")\n\t}\n\treturn ep\n}\n<commit_msg>Url vars handler<commit_after>package rapi\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"text\/template\"\n\n\t\"github.com\/gorilla\/mux\"\n)\n\ntype TransformCb func() (err error)\n\ntype Endpoint struct {\n\tconfig *Config\n\tpath string\n\treqExtStruct interface{}\n\treqIntStruct interface{}\n\tresExtStruct interface{}\n\tresIntStruct interface{}\n\ttransformer Transformable\n}\n\nfunc NewEndpoint(a *Api, method, path string) *Endpoint {\n\tep := &Endpoint{\n\t\tconfig: a.config,\n\t\tpath: path,\n\t\ttransformer: a.transformer,\n\t}\n\n\ta.Route(method, ep.config.Listener.Prefix+path, ep)\n\n\treturn ep\n}\n\nfunc (ep *Endpoint) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\ttr := http.DefaultTransport\n\n\tep.CopyUrlVars(r)\n\n\tr.URL.Host = ep.config.Backend.Address\n\tr.URL.Scheme = \"http\"\n\tr.URL.Path = ep.config.Backend.Prefix + ep.path\n\n\tif ep.reqIntStruct != nil && ep.reqExtStruct != nil {\n\t\tep.transformer.TransformRequest(r, ep.reqExtStruct, ep.reqIntStruct)\n\t}\n\n\tres, resErr := tr.RoundTrip(r)\n\tif resErr != nil {\n\t\tpanic(fmt.Sprintf(\"Response error: %s\", resErr))\n\t} else {\n\t\tdefer res.Body.Close()\n\t}\n\n\tif ep.resIntStruct != nil && ep.resExtStruct != nil {\n\t\tep.transformer.TransformResponse(res, ep.resIntStruct, ep.resExtStruct)\n\t}\n\n\tw.WriteHeader(res.StatusCode)\n\t_, ioErr := io.Copy(w, res.Body)\n\n\tif ioErr != nil {\n\t\tlog.Printf(\"Error writting response: %s\", ioErr)\n\t}\n}\n\nfunc (ep *Endpoint) CopyUrlVars(r *http.Request) {\n\tvar path bytes.Buffer\n\n\tvars := mux.Vars(r)\n\tif len(vars) == 0 {\n\t\treturn\n\t}\n\n\tt, err := template.New(\"path\").Parse(ep.path)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tt.Execute(&path, vars)\n\tep.path = path.String()\n}\n\nfunc (ep *Endpoint) InternalPath(path string) *Endpoint {\n\tep.path = path\n\treturn ep\n}\n\nfunc (ep *Endpoint) TransformRequest(ex, in interface{}) *Endpoint {\n\tep.reqExtStruct = ex\n\tep.reqIntStruct = in\n\treturn ep\n}\n\nfunc (ep *Endpoint) TransformResponse(in, ex interface{}) *Endpoint {\n\tep.resIntStruct = in\n\tep.resExtStruct = ex\n\treturn ep\n}\n\n\/\/ To be done\nfunc (ep *Endpoint) TransformRequestCb(cb TransformCb) *Endpoint {\n\terr := cb()\n\tif err != nil {\n\t\tlog.Print(\"Something went wrong\")\n\t}\n\treturn ep\n}\n\n\/\/ To be done\nfunc (ep *Endpoint) TransformResponseCb(cb TransformCb) *Endpoint {\n\terr := cb()\n\tif err != nil {\n\t\tlog.Print(\"Something went wrong\")\n\t}\n\treturn ep\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 CloudAwan LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage rbac\n\nimport (\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"strings\"\n)\n\ntype Resource struct {\n\tName string\n\tComponent string\n\tPath string \/\/ Path is hierarchy\n}\n\nfunc CreateResource(component string, path string) (*Resource, error) {\n\tname, err := GetResourceName(component, path)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn nil, err\n\t}\n\treturn &Resource{\n\t\tname,\n\t\tcomponent,\n\t\tpath,\n\t}, nil\n}\n\nfunc GetResourceName(component string, path string) (string, error) {\n\tif component == \"\" {\n\t\tlog.Error(\"Component couldn't be empty\")\n\t\treturn \"\", errors.New(\"Component couldn't be empty\")\n\t}\n\tif path == \"\" {\n\t\tlog.Error(\"Path couldn't be empty\")\n\t\treturn \"\", errors.New(\"Path couldn't be empty\")\n\t}\n\n\treturn hex.EncodeToString([]byte(component + \" \" + path)), nil\n}\n\nfunc (resource *Resource) HasResource(component string, path string) bool {\n\t\/\/ * means all\n\tif component == \"*\" {\n\t\treturn true\n\t} else if resource.Component == component {\n\t\t\/\/ * means all\n\t\tif path == \"*\" {\n\t\t\treturn true\n\t\t\t\/\/ Prefix for hierarchy authorization\n\t\t} else if strings.HasPrefix(resource.Path, path) {\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\treturn false\n\t}\n}\n<commit_msg>Fix the wrong target<commit_after>\/\/ Copyright 2015 CloudAwan LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage rbac\n\nimport (\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"strings\"\n)\n\ntype Resource struct {\n\tName string\n\tComponent string\n\tPath string \/\/ Path is hierarchy\n}\n\nfunc CreateResource(component string, path string) (*Resource, error) {\n\tname, err := GetResourceName(component, path)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn nil, err\n\t}\n\treturn &Resource{\n\t\tname,\n\t\tcomponent,\n\t\tpath,\n\t}, nil\n}\n\nfunc GetResourceName(component string, path string) (string, error) {\n\tif component == \"\" {\n\t\tlog.Error(\"Component couldn't be empty\")\n\t\treturn \"\", errors.New(\"Component couldn't be empty\")\n\t}\n\tif path == \"\" {\n\t\tlog.Error(\"Path couldn't be empty\")\n\t\treturn \"\", errors.New(\"Path couldn't be empty\")\n\t}\n\n\treturn hex.EncodeToString([]byte(component + \" \" + path)), nil\n}\n\nfunc (resource *Resource) HasResource(component string, path string) bool {\n\t\/\/ * means all\n\tif resource.Component == \"*\" {\n\t\treturn true\n\t} else if resource.Component == component {\n\t\t\/\/ * means all\n\t\tif path == \"*\" {\n\t\t\treturn true\n\t\t\t\/\/ Prefix for hierarchy authorization\n\t\t} else if strings.HasPrefix(resource.Path, path) {\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\treturn false\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/rightscale\/rsc\/recording\"\n)\n\nconst output = \"recording_new.json\"\n\nvar exitRegexp = regexp.MustCompile(`exit status (\\d+)`)\n\nfunc main() {\n\targs := []string{\"--dump=json\"}\n\targs = append(args, os.Args[1:]...)\n\tcmd := exec.Command(\"rsc\", args...)\n\t_, args = extractArg(\"--dump\", args)\n\t_, args = extractArg(\"--host\", args)\n\t_, args = extractArg(\"--key\", args)\n\tvar out bytes.Buffer\n\tvar outErr bytes.Buffer\n\tcmd.Stdout = &out\n\tcmd.Stderr = &outErr\n\tr, w, err := os.Pipe()\n\tif err != nil {\n\t\tfail(\"can't create pipe: %s\", err)\n\t}\n\terr = syscall.Dup2(int(w.Fd()), 10)\n\tif err != nil {\n\t\tfail(\"can't create fd 10: %s\", err)\n\t}\n\tfd10 := os.NewFile(10, \"fd10\")\n\tcmd.ExtraFiles = append(cmd.ExtraFiles, fd10)\n\terr = cmd.Run()\n\tfd10.Close()\n\tw.Close()\n\texitCode := 0\n\tif err != nil {\n\t\texCode := exitRegexp.FindStringSubmatch(err.Error())\n\t\tif len(exCode) == 2 {\n\t\t\texitCode, err = strconv.Atoi(exCode[1])\n\t\t\tif err != nil {\n\t\t\t\tfail(\"Invalid exit code '%s': %s\", exCode[1], err.Error())\n\t\t\t}\n\t\t} else {\n\t\t\tfail(\"%s %s failed: %s\\n%s\\n%s\\n\", \"rsc\", strings.Join(args, \" \"), err, out.String(), outErr.String())\n\t\t}\n\t}\n\traw, err := ioutil.ReadAll(r)\n\tos.Remove(\"fd10\")\n\tif err != nil {\n\t\tfail(\"read rsc dump: %s\\n\", err, outErr.String())\n\t}\n\tr.Close()\n\tvar rr recording.RequestResponse\n\terr = json.Unmarshal(raw, &rr)\n\tif err != nil {\n\t\tfail(\"load rsc dump: %s - dump was:\\n%s\\noutput was:\\n%s\\n\", err, string(raw), out)\n\t}\n\trr.ReqHeader.Del(\"Authorization\")\n\trr.ReqHeader.Del(\"User-Agent\")\n\trr.RespHeader.Del(\"Cache-Control\")\n\trr.RespHeader.Del(\"Connection\")\n\trr.RespHeader.Del(\"Set-Cookie\")\n\trr.RespHeader.Del(\"Strict-Transport-Security\")\n\trr.RespHeader.Del(\"X-Request-Uuid\")\n\trecord := recording.Recording{\n\t\tCmdArgs: args,\n\t\tExitCode: exitCode,\n\t\tStdout: out.String(),\n\t\tRR: rr,\n\t}\n\tjs, err := json.MarshalIndent(record, \"\", \" \")\n\tif err != nil {\n\t\tfail(\"failed to serialize record: %s\", err)\n\t}\n\twrite(js)\n\t\/\/ Echo output of rsc so calling script can use it\n\tfmt.Print(out.String())\n}\n\n\/\/ Extract command line argument with given name and return remaining arguments\nfunc extractArg(name string, args []string) (string, []string) {\n\tvar val string\n\tvar newArgs []string\n\tvar skip bool\n\tfor i, a := range args {\n\t\tif skip {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(a, \"=\") {\n\t\t\telems := strings.SplitN(a, \"=\", 2)\n\t\t\tif elems[0] == name {\n\t\t\t\tval = elems[1]\n\t\t\t} else {\n\t\t\t\tnewArgs = append(newArgs, a)\n\t\t\t}\n\t\t} else if a == name && len(args) > (i+1) {\n\t\t\tval = args[i+1]\n\t\t\tskip = true\n\t\t} else {\n\t\t\tnewArgs = append(newArgs, a)\n\t\t}\n\t}\n\treturn val, newArgs\n}\n\n\/\/ Helper function that appends a string to output file\nfunc write(b []byte) {\n\tf, err := os.OpenFile(output, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)\n\tif err != nil {\n\t\tfail(\"failed to open output file\")\n\t}\n\tf.Write(b)\n\tf.WriteString(\"\\n\")\n\tf.Close()\n}\n\n\/\/ Print error message and exit with status code 1\nfunc fail(msg string, format ...interface{}) {\n\tfmt.Fprintf(os.Stderr, msg+\"\\n\", format...)\n\tos.Exit(1)\n}\n<commit_msg>Fix recorder output in recorder.<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/rightscale\/rsc\/recording\"\n)\n\nconst output = \"recording_new.json\"\n\nvar exitRegexp = regexp.MustCompile(`exit status (\\d+)`)\n\nfunc main() {\n\targs := []string{\"--dump=json\"}\n\targs = append(args, os.Args[1:]...)\n\tcmd := exec.Command(\"rsc\", args...)\n\t_, args = extractArg(\"--dump\", args)\n\t_, args = extractArg(\"--host\", args)\n\t_, args = extractArg(\"--key\", args)\n\tvar out bytes.Buffer\n\tvar outErr bytes.Buffer\n\tcmd.Stdout = &out\n\tcmd.Stderr = &outErr\n\tr, w, err := os.Pipe()\n\tif err != nil {\n\t\tfail(\"can't create pipe: %s\", err)\n\t}\n\terr = syscall.Dup2(int(w.Fd()), 10)\n\tif err != nil {\n\t\tfail(\"can't create fd 10: %s\", err)\n\t}\n\tfd10 := os.NewFile(10, \"fd10\")\n\tcmd.ExtraFiles = append(cmd.ExtraFiles, fd10)\n\terr = cmd.Run()\n\tfd10.Close()\n\tw.Close()\n\texitCode := 0\n\tif err != nil {\n\t\texCode := exitRegexp.FindStringSubmatch(err.Error())\n\t\tif len(exCode) == 2 {\n\t\t\texitCode, err = strconv.Atoi(exCode[1])\n\t\t\tif err != nil {\n\t\t\t\tfail(\"Invalid exit code '%s': %s\", exCode[1], err.Error())\n\t\t\t}\n\t\t} else {\n\t\t\tfail(\"%s %s failed: %s\\n%s\\n%s\\n\", \"rsc\", strings.Join(args, \" \"), err, out.String(), outErr.String())\n\t\t}\n\t}\n\traw, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\tfail(\"read rsc dump: %s\\n\", err, outErr.String())\n\t}\n\tr.Close()\n\tvar rr recording.RequestResponse\n\terr = json.Unmarshal(raw, &rr)\n\tif err != nil {\n\t\tfail(\"load rsc dump: %s - dump was:\\n%s\\noutput was:\\n%s\\n\", err, string(raw), out.String())\n\t}\n\trr.ReqHeader.Del(\"Authorization\")\n\trr.ReqHeader.Del(\"User-Agent\")\n\trr.RespHeader.Del(\"Cache-Control\")\n\trr.RespHeader.Del(\"Connection\")\n\trr.RespHeader.Del(\"Set-Cookie\")\n\trr.RespHeader.Del(\"Strict-Transport-Security\")\n\trr.RespHeader.Del(\"X-Request-Uuid\")\n\trecord := recording.Recording{\n\t\tCmdArgs: args,\n\t\tExitCode: exitCode,\n\t\tStdout: out.String(),\n\t\tRR: rr,\n\t}\n\tjs, err := json.MarshalIndent(record, \"\", \" \")\n\tif err != nil {\n\t\tfail(\"failed to serialize record: %s\", err)\n\t}\n\twrite(js)\n\t\/\/ Echo output of rsc so calling script can use it\n\tfmt.Print(out.String())\n}\n\n\/\/ Extract command line argument with given name and return remaining arguments\nfunc extractArg(name string, args []string) (string, []string) {\n\tvar val string\n\tvar newArgs []string\n\tvar skip bool\n\tfor i, a := range args {\n\t\tif skip {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(a, \"=\") {\n\t\t\telems := strings.SplitN(a, \"=\", 2)\n\t\t\tif elems[0] == name {\n\t\t\t\tval = elems[1]\n\t\t\t} else {\n\t\t\t\tnewArgs = append(newArgs, a)\n\t\t\t}\n\t\t} else if a == name && len(args) > (i+1) {\n\t\t\tval = args[i+1]\n\t\t\tskip = true\n\t\t} else {\n\t\t\tnewArgs = append(newArgs, a)\n\t\t}\n\t}\n\treturn val, newArgs\n}\n\n\/\/ Helper function that appends a string to output file\nfunc write(b []byte) {\n\tf, err := os.OpenFile(output, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)\n\tif err != nil {\n\t\tfail(\"failed to open output file\")\n\t}\n\tf.Write(b)\n\tf.WriteString(\"\\n\")\n\tf.Close()\n}\n\n\/\/ Print error message and exit with status code 1\nfunc fail(msg string, format ...interface{}) {\n\tfmt.Fprintf(os.Stderr, msg+\"\\n\", format...)\n\tos.Exit(1)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package redsync provides a Redis-based distributed mutual exclusion lock implementation as described in the blog post http:\/\/antirez.com\/news\/77.\n\/\/\n\/\/ Values containing the types defined in this package should not be copied.\npackage redsync\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/garyburd\/redigo\/redis\"\n)\n\nconst (\n\t\/\/ DefaultExpiry is used when Mutex Duration is 0\n\tDefaultExpiry = 8 * time.Second\n\t\/\/ DefaultTries is used when Mutex Duration is 0\n\tDefaultTries = 16\n\t\/\/ DefaultDelay is used when Mutex Delay is 0\n\tDefaultDelay = 512 * time.Millisecond\n\t\/\/ DefaultFactor is used when Mutex Factor is 0\n\tDefaultFactor = 0.01\n)\n\nvar (\n\t\/\/ ErrFailed is returned when lock cannot be acquired\n\tErrFailed = errors.New(\"failed to acquire lock\")\n)\n\n\/\/ Locker interface with Lock returning an error when lock cannot be aquired\ntype Locker interface {\n\tLock() error\n\tUnlock()\n}\n\n\/\/ A Mutex is a mutual exclusion lock.\n\/\/\n\/\/ Fields of a Mutex must not be changed after first use.\ntype Mutex struct {\n\tName string \/\/ Resouce name\n\tExpiry time.Duration \/\/ Duration for which the lock is valid, DefaultExpiry if 0\n\n\tTries int \/\/ Number of attempts to acquire lock before admitting failure, DefaultTries if 0\n\tDelay time.Duration \/\/ Delay between two attempts to acquire lock, DefaultDelay if 0\n\n\tFactor float64 \/\/ Drift factor, DefaultFactor if 0\n\n\tQuorum int \/\/ Quorum for the lock, set to len(addrs)\/2+1 by NewMutex()\n\n\tvalue string\n\tuntil time.Time\n\n\tnodes []*redis.Pool\n\tnodem sync.Mutex\n}\n\nvar _ = Locker(&Mutex{})\n\n\/\/ NewMutex returns a new Mutex on a named resource connected to the Redis instances at given addresses.\nfunc NewMutex(name string, addrs []net.Addr) (*Mutex, error) {\n\tif len(addrs) == 0 {\n\t\tpanic(\"redsync: addrs is empty\")\n\t}\n\n\tnodes := make([]*redis.Pool, len(addrs))\n\tfor i, addr := range addrs {\n\t\tdialTo := addr\n\t\tnode := &redis.Pool{\n\t\t\tMaxActive: 1,\n\t\t\tWait: true,\n\t\t\tDial: func() (redis.Conn, error) {\n\t\t\t\treturn redis.Dial(\"tcp\", \"127.0.0.1\"+dialTo.String())\n\t\t\t},\n\t\t}\n\t\tnodes[i] = node\n\t}\n\n\treturn NewMutexWithPool(name, nodes)\n}\n\n\/\/ NewMutexWithPool returns a new Mutex on a named resource connected to the Redis instances at given redis Pools.\nfunc NewMutexWithPool(name string, nodes []*redis.Pool) (*Mutex, error) {\n\tif len(nodes) == 0 {\n\t\tpanic(\"redsync: nodes is empty\")\n\t}\n\n\treturn &Mutex{\n\t\tName: name,\n\t\tQuorum: len(nodes)\/2 + 1,\n\t\tnodes: nodes,\n\t}, nil\n}\n\n\/\/ Lock locks m.\n\/\/ In case it returns an error on failure, you may retry to acquire the lock by calling this method again.\nfunc (m *Mutex) Lock() error {\n\tm.nodem.Lock()\n\tdefer m.nodem.Unlock()\n\n\tb := make([]byte, 16)\n\t_, err := rand.Read(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvalue := base64.StdEncoding.EncodeToString(b)\n\n\texpiry := m.Expiry\n\tif expiry == 0 {\n\t\texpiry = DefaultExpiry\n\t}\n\n\tretries := m.Tries\n\tif retries == 0 {\n\t\tretries = DefaultTries\n\t}\n\n\tfor i := 0; i < retries; i++ {\n\t\tn := 0\n\t\tstart := time.Now()\n\t\tfor _, node := range m.nodes {\n\t\t\tif node == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tconn := node.Get()\n\t\t\treply, err := redis.String(conn.Do(\"set\", m.Name, value, \"nx\", \"px\", int(expiry\/time.Millisecond)))\n\t\t\tconn.Close()\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif reply != \"OK\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tn++\n\t\t}\n\n\t\tfactor := m.Factor\n\t\tif factor == 0 {\n\t\t\tfactor = DefaultFactor\n\t\t}\n\n\t\tuntil := time.Now().Add(expiry - time.Now().Sub(start) - time.Duration(int64(float64(expiry)*factor)) + 2*time.Millisecond)\n\t\tif n >= m.Quorum && time.Now().Before(until) {\n\t\t\tm.value = value\n\t\t\tm.until = until\n\t\t\treturn nil\n\t\t}\n\t\tfor _, node := range m.nodes {\n\t\t\tif node == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tconn := node.Get()\n\t\t\t_, err := delScript.Do(conn, m.Name, value)\n\t\t\tconn.Close()\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tdelay := m.Delay\n\t\tif delay == 0 {\n\t\t\tdelay = DefaultDelay\n\t\t}\n\t\ttime.Sleep(delay)\n\t}\n\n\treturn ErrFailed\n}\n\n\/\/ Unlock unlocks m.\n\/\/ It is a run-time error if m is not locked on entry to Unlock.\nfunc (m *Mutex) Unlock() {\n\tm.nodem.Lock()\n\tdefer m.nodem.Unlock()\n\n\tvalue := m.value\n\tif value == \"\" {\n\t\tpanic(\"redsync: unlock of unlocked mutex\")\n\t}\n\n\tm.value = \"\"\n\tm.until = time.Unix(0, 0)\n\n\tfor _, node := range m.nodes {\n\t\tif node == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tconn := node.Get()\n\t\tdelScript.Do(conn, m.Name, value)\n\t\tconn.Close()\n\t}\n}\n\nvar delScript = redis.NewScript(1, `\nif redis.call(\"get\", KEYS[1]) == ARGV[1] then\n\treturn redis.call(\"del\", KEYS[1])\nelse\n\treturn 0\nend`)\n<commit_msg>Remove hardcoded IP<commit_after>\/\/ Package redsync provides a Redis-based distributed mutual exclusion lock implementation as described in the blog post http:\/\/antirez.com\/news\/77.\n\/\/\n\/\/ Values containing the types defined in this package should not be copied.\npackage redsync\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/garyburd\/redigo\/redis\"\n)\n\nconst (\n\t\/\/ DefaultExpiry is used when Mutex Duration is 0\n\tDefaultExpiry = 8 * time.Second\n\t\/\/ DefaultTries is used when Mutex Duration is 0\n\tDefaultTries = 16\n\t\/\/ DefaultDelay is used when Mutex Delay is 0\n\tDefaultDelay = 512 * time.Millisecond\n\t\/\/ DefaultFactor is used when Mutex Factor is 0\n\tDefaultFactor = 0.01\n)\n\nvar (\n\t\/\/ ErrFailed is returned when lock cannot be acquired\n\tErrFailed = errors.New(\"failed to acquire lock\")\n)\n\n\/\/ Locker interface with Lock returning an error when lock cannot be aquired\ntype Locker interface {\n\tLock() error\n\tUnlock()\n}\n\n\/\/ A Mutex is a mutual exclusion lock.\n\/\/\n\/\/ Fields of a Mutex must not be changed after first use.\ntype Mutex struct {\n\tName string \/\/ Resouce name\n\tExpiry time.Duration \/\/ Duration for which the lock is valid, DefaultExpiry if 0\n\n\tTries int \/\/ Number of attempts to acquire lock before admitting failure, DefaultTries if 0\n\tDelay time.Duration \/\/ Delay between two attempts to acquire lock, DefaultDelay if 0\n\n\tFactor float64 \/\/ Drift factor, DefaultFactor if 0\n\n\tQuorum int \/\/ Quorum for the lock, set to len(addrs)\/2+1 by NewMutex()\n\n\tvalue string\n\tuntil time.Time\n\n\tnodes []*redis.Pool\n\tnodem sync.Mutex\n}\n\nvar _ = Locker(&Mutex{})\n\n\/\/ NewMutex returns a new Mutex on a named resource connected to the Redis instances at given addresses.\nfunc NewMutex(name string, addrs []net.Addr) (*Mutex, error) {\n\tif len(addrs) == 0 {\n\t\tpanic(\"redsync: addrs is empty\")\n\t}\n\n\tnodes := make([]*redis.Pool, len(addrs))\n\tfor i, addr := range addrs {\n\t\tdialTo := addr\n\t\tnode := &redis.Pool{\n\t\t\tMaxActive: 1,\n\t\t\tWait: true,\n\t\t\tDial: func() (redis.Conn, error) {\n\t\t\t\treturn redis.Dial(\"tcp\", dialTo.String())\n\t\t\t},\n\t\t}\n\t\tnodes[i] = node\n\t}\n\n\treturn NewMutexWithPool(name, nodes)\n}\n\n\/\/ NewMutexWithPool returns a new Mutex on a named resource connected to the Redis instances at given redis Pools.\nfunc NewMutexWithPool(name string, nodes []*redis.Pool) (*Mutex, error) {\n\tif len(nodes) == 0 {\n\t\tpanic(\"redsync: nodes is empty\")\n\t}\n\n\treturn &Mutex{\n\t\tName: name,\n\t\tQuorum: len(nodes)\/2 + 1,\n\t\tnodes: nodes,\n\t}, nil\n}\n\n\/\/ Lock locks m.\n\/\/ In case it returns an error on failure, you may retry to acquire the lock by calling this method again.\nfunc (m *Mutex) Lock() error {\n\tm.nodem.Lock()\n\tdefer m.nodem.Unlock()\n\n\tb := make([]byte, 16)\n\t_, err := rand.Read(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvalue := base64.StdEncoding.EncodeToString(b)\n\n\texpiry := m.Expiry\n\tif expiry == 0 {\n\t\texpiry = DefaultExpiry\n\t}\n\n\tretries := m.Tries\n\tif retries == 0 {\n\t\tretries = DefaultTries\n\t}\n\n\tfor i := 0; i < retries; i++ {\n\t\tn := 0\n\t\tstart := time.Now()\n\t\tfor _, node := range m.nodes {\n\t\t\tif node == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tconn := node.Get()\n\t\t\treply, err := redis.String(conn.Do(\"set\", m.Name, value, \"nx\", \"px\", int(expiry\/time.Millisecond)))\n\t\t\tconn.Close()\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif reply != \"OK\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tn++\n\t\t}\n\n\t\tfactor := m.Factor\n\t\tif factor == 0 {\n\t\t\tfactor = DefaultFactor\n\t\t}\n\n\t\tuntil := time.Now().Add(expiry - time.Now().Sub(start) - time.Duration(int64(float64(expiry)*factor)) + 2*time.Millisecond)\n\t\tif n >= m.Quorum && time.Now().Before(until) {\n\t\t\tm.value = value\n\t\t\tm.until = until\n\t\t\treturn nil\n\t\t}\n\t\tfor _, node := range m.nodes {\n\t\t\tif node == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tconn := node.Get()\n\t\t\t_, err := delScript.Do(conn, m.Name, value)\n\t\t\tconn.Close()\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tdelay := m.Delay\n\t\tif delay == 0 {\n\t\t\tdelay = DefaultDelay\n\t\t}\n\t\ttime.Sleep(delay)\n\t}\n\n\treturn ErrFailed\n}\n\n\/\/ Unlock unlocks m.\n\/\/ It is a run-time error if m is not locked on entry to Unlock.\nfunc (m *Mutex) Unlock() {\n\tm.nodem.Lock()\n\tdefer m.nodem.Unlock()\n\n\tvalue := m.value\n\tif value == \"\" {\n\t\tpanic(\"redsync: unlock of unlocked mutex\")\n\t}\n\n\tm.value = \"\"\n\tm.until = time.Unix(0, 0)\n\n\tfor _, node := range m.nodes {\n\t\tif node == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tconn := node.Get()\n\t\tdelScript.Do(conn, m.Name, value)\n\t\tconn.Close()\n\t}\n}\n\nvar delScript = redis.NewScript(1, `\nif redis.call(\"get\", KEYS[1]) == ARGV[1] then\n\treturn redis.call(\"del\", KEYS[1])\nelse\n\treturn 0\nend`)\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage gce\n\nimport (\n\t\"code.google.com\/p\/google-api-go-client\/compute\/v1\"\n\t\"encoding\/base64\"\n\t\"github.com\/juju\/errors\"\n\n\t\"github.com\/juju\/juju\/constraints\"\n\t\"github.com\/juju\/juju\/environs\"\n\t\"github.com\/juju\/juju\/environs\/imagemetadata\"\n\t\"github.com\/juju\/juju\/environs\/instances\"\n\t\"github.com\/juju\/juju\/environs\/simplestreams\"\n\t\"github.com\/juju\/juju\/instance\"\n\t\"github.com\/juju\/juju\/provider\/common\"\n\t\"github.com\/juju\/juju\/state\/multiwatcher\"\n\t\"github.com\/juju\/juju\/tools\"\n)\n\nfunc (env *environ) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {\n\t\/\/ Please note that in order to fulfil the demands made of Instances and\n\t\/\/ AllInstances, it is imperative that some environment feature be used to\n\t\/\/ keep track of which instances were actually started by juju.\n\tenv = env.getSnapshot()\n\n\t\/\/ Start a new raw instance.\n\n\tif args.MachineConfig.HasNetworks() {\n\t\treturn nil, errors.New(\"starting instances with networks is not supported yet\")\n\t}\n\n\tspec, err := env.finishMachineConfig(args)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\traw, err := env.newRawInstance(args, spec)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tlogger.Infof(\"started instance %q in zone %q\", raw.Name, zoneName(raw))\n\n\t\/\/ Build the result.\n\n\tinst := newInstance(raw, env)\n\tinst.updateDisk(raw)\n\t\/\/ TODO(ericsnow) Pass diskMB here (instead of using inst.rootDiskMB)?\n\thwc := env.getHardwareCharacteristics(spec, inst)\n\n\tresult := environs.StartInstanceResult{\n\t\tInstance: inst,\n\t\tHardware: hwc,\n\t}\n\treturn &result, nil\n}\n\nfunc (env *environ) finishMachineConfig(args environs.StartInstanceParams) (*instances.InstanceSpec, error) {\n\tarches := args.Tools.Arches()\n\tseries := args.Tools.OneSeries()\n\tspec, err := env.findInstanceSpec(env.Config().ImageStream(), &instances.InstanceConstraint{\n\t\tRegion: env.ecfg.region(),\n\t\tSeries: series,\n\t\tArches: arches,\n\t\tConstraints: args.Constraints,\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tenvTools, err := args.Tools.Match(tools.Filter{Arch: spec.Image.Arch})\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"chosen architecture %v not present in %v\", spec.Image.Arch, arches)\n\t}\n\n\targs.MachineConfig.Tools = envTools[0]\n\terr = environs.FinishMachineConfig(args.MachineConfig, env.Config())\n\treturn spec, errors.Trace(err)\n}\n\nfunc (env *environ) findInstanceSpec(stream string, ic *instances.InstanceConstraint) (*instances.InstanceSpec, error) {\n\tsources, err := environs.ImageMetadataSources(env)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tcloudSpec, err := env.cloudSpec(ic.Region)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\timageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{\n\t\tCloudSpec: cloudSpec,\n\t\tSeries: []string{ic.Series},\n\t\tArches: ic.Arches,\n\t\tStream: stream,\n\t})\n\n\tmatchingImages, _, err := imagemetadata.Fetch(sources, imageConstraint, signedImageDataOnly)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\timages := instances.ImageMetadataToImages(matchingImages)\n\tspec, err := instances.FindInstanceSpec(images, ic, allInstanceTypes)\n\treturn spec, errors.Trace(err)\n}\n\nfunc (env *environ) newRawInstance(args environs.StartInstanceParams, spec *instances.InstanceSpec) (*compute.Instance, error) {\n\t\/\/ Compose the instance.\n\tmachineID := common.MachineFullName(env, args.MachineConfig.MachineId)\n\tdisks := getDisks(spec, args.Constraints)\n\tnetworkInterfaces := getNetworkInterfaces()\n\tmetadata, err := getMetadata(args)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tinstance := &compute.Instance{\n\t\t\/\/ MachineType is set in the env.gce.newInstance call.\n\t\tName: machineID,\n\t\tDisks: disks,\n\t\tNetworkInterfaces: networkInterfaces,\n\t\tMetadata: metadata,\n\t}\n\n\tavailabilityZones, err := env.parseAvailabilityZones(args)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\t\/\/ TODO(ericsnow) Drop carrying InitializeParams over once\n\t\/\/ gceConnection.disk is working?\n\tdiskInit := rootDisk(instance).InitializeParams\n\tif err := env.gce.addInstance(instance, spec.InstanceType.Name, availabilityZones); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif rootDisk(instance).InitializeParams == nil {\n\t\trootDisk(instance).InitializeParams = diskInit\n\t}\n\treturn instance, nil\n}\n\nfunc getMetadata(args environs.StartInstanceParams) (*compute.Metadata, error) {\n\tuserData, err := environs.ComposeUserData(args.MachineConfig, nil)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"cannot make user data\")\n\t}\n\tlogger.Debugf(\"GCE user data; %d bytes\", len(userData))\n\n\tauthKeys, err := gceSSHKeys()\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\t\/\/ Handle state machines.\n\tvar role string\n\tif multiwatcher.AnyJobNeedsState(args.MachineConfig.Jobs...) {\n\t\trole = roleState\n\t}\n\n\tb64UserData := base64.StdEncoding.EncodeToString([]byte(userData))\n\treturn packMetadata(map[string]string{\n\t\tmetadataKeyRole: role,\n\t\t\/\/ We store a gz snapshop of information that is used by\n\t\t\/\/ cloud-init and unpacked in to the \/var\/lib\/cloud\/instances folder\n\t\t\/\/ for the instance. Due to a limitation with GCE and binary blobs\n\t\t\/\/ we base64 encode the data before storing it.\n\t\tmetadataKeyCloudInit: b64UserData,\n\t\t\/\/ Valid encoding values are determined by the cloudinit GCE data source.\n\t\t\/\/ See: http:\/\/cloudinit.readthedocs.org\n\t\tmetadataKeyEncoding: \"base64\",\n\t\tmetadataKeySSHKeys: authKeys,\n\t}), nil\n}\n\nfunc getDisks(spec *instances.InstanceSpec, cons constraints.Value) []*compute.AttachedDisk {\n\tsize := common.MinRootDiskSizeGiB\n\tif cons.RootDisk != nil && *cons.RootDisk > uint64(size) {\n\t\tsize = int64(common.MiBToGiB(*cons.RootDisk))\n\t}\n\tdSpec := diskSpec{\n\t\tsizeHint: size,\n\t\timageURL: imageBasePath + spec.Image.Id,\n\t\tboot: true,\n\t\tautoDelete: true,\n\t}\n\trootDisk := dSpec.newAttached()\n\tif cons.RootDisk != nil && dSpec.size() == int64(minDiskSize) {\n\t\tmsg := \"Ignoring root-disk constraint of %dM because it is smaller than the GCE image size of %dM\"\n\t\tlogger.Infof(msg, *cons.RootDisk, minDiskSize)\n\t}\n\treturn []*compute.AttachedDisk{rootDisk}\n}\n\nfunc getNetworkInterfaces() []*compute.NetworkInterface {\n\t\/\/ TODO(ericsnow) Use the env ID for the network name\n\t\/\/ (instead of default)?\n\t\/\/ TODO(ericsnow) Make the network name configurable?\n\t\/\/ TODO(ericsnow) Support multiple networks?\n\tspec := networkSpec{}\n\t\/\/ TODO(ericsnow) Use a different name? Configurable?\n\trootIF := spec.newInterface(\"External NAT\")\n\treturn []*compute.NetworkInterface{rootIF}\n}\n\nfunc (env *environ) getHardwareCharacteristics(spec *instances.InstanceSpec, inst *environInstance) *instance.HardwareCharacteristics {\n\thwc := instance.HardwareCharacteristics{\n\t\tArch: &spec.Image.Arch,\n\t\tMem: &spec.InstanceType.Mem,\n\t\tCpuCores: &spec.InstanceType.CpuCores,\n\t\tCpuPower: spec.InstanceType.CpuPower,\n\t\tRootDisk: &inst.rootDiskMB,\n\t\tAvailabilityZone: &inst.zone,\n\t\t\/\/ Tags: not supported in GCE.\n\t}\n\treturn &hwc\n}\n\nfunc (env *environ) AllInstances() ([]instance.Instance, error) {\n\tinstances, err := env.instances()\n\treturn instances, errors.Trace(err)\n}\n\nfunc (env *environ) StopInstances(instances ...instance.Id) error {\n\tenv = env.getSnapshot()\n\n\tvar ids []string\n\tfor _, id := range instances {\n\t\tids = append(ids, string(id))\n\t}\n\terr := env.gce.removeInstances(env, ids...)\n\treturn errors.Trace(err)\n}\n<commit_msg>provider\/gce: Add an isStateServer helper.<commit_after>\/\/ Copyright 2014 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage gce\n\nimport (\n\t\"code.google.com\/p\/google-api-go-client\/compute\/v1\"\n\t\"encoding\/base64\"\n\t\"github.com\/juju\/errors\"\n\n\t\"github.com\/juju\/juju\/constraints\"\n\t\"github.com\/juju\/juju\/environs\"\n\t\"github.com\/juju\/juju\/environs\/imagemetadata\"\n\t\"github.com\/juju\/juju\/environs\/instances\"\n\t\"github.com\/juju\/juju\/environs\/simplestreams\"\n\t\"github.com\/juju\/juju\/instance\"\n\t\"github.com\/juju\/juju\/provider\/common\"\n\t\"github.com\/juju\/juju\/state\/multiwatcher\"\n\t\"github.com\/juju\/juju\/tools\"\n)\n\nfunc (env *environ) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {\n\t\/\/ Please note that in order to fulfil the demands made of Instances and\n\t\/\/ AllInstances, it is imperative that some environment feature be used to\n\t\/\/ keep track of which instances were actually started by juju.\n\tenv = env.getSnapshot()\n\n\t\/\/ Start a new raw instance.\n\n\tif args.MachineConfig.HasNetworks() {\n\t\treturn nil, errors.New(\"starting instances with networks is not supported yet\")\n\t}\n\n\tspec, err := env.finishMachineConfig(args)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\traw, err := env.newRawInstance(args, spec)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tlogger.Infof(\"started instance %q in zone %q\", raw.Name, zoneName(raw))\n\n\t\/\/ Build the result.\n\n\tinst := newInstance(raw, env)\n\tinst.updateDisk(raw)\n\t\/\/ TODO(ericsnow) Pass diskMB here (instead of using inst.rootDiskMB)?\n\thwc := env.getHardwareCharacteristics(spec, inst)\n\n\tresult := environs.StartInstanceResult{\n\t\tInstance: inst,\n\t\tHardware: hwc,\n\t}\n\treturn &result, nil\n}\n\nfunc (env *environ) finishMachineConfig(args environs.StartInstanceParams) (*instances.InstanceSpec, error) {\n\tarches := args.Tools.Arches()\n\tseries := args.Tools.OneSeries()\n\tspec, err := env.findInstanceSpec(env.Config().ImageStream(), &instances.InstanceConstraint{\n\t\tRegion: env.ecfg.region(),\n\t\tSeries: series,\n\t\tArches: arches,\n\t\tConstraints: args.Constraints,\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tenvTools, err := args.Tools.Match(tools.Filter{Arch: spec.Image.Arch})\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"chosen architecture %v not present in %v\", spec.Image.Arch, arches)\n\t}\n\n\targs.MachineConfig.Tools = envTools[0]\n\terr = environs.FinishMachineConfig(args.MachineConfig, env.Config())\n\treturn spec, errors.Trace(err)\n}\n\nfunc (env *environ) findInstanceSpec(stream string, ic *instances.InstanceConstraint) (*instances.InstanceSpec, error) {\n\tsources, err := environs.ImageMetadataSources(env)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tcloudSpec, err := env.cloudSpec(ic.Region)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\timageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{\n\t\tCloudSpec: cloudSpec,\n\t\tSeries: []string{ic.Series},\n\t\tArches: ic.Arches,\n\t\tStream: stream,\n\t})\n\n\tmatchingImages, _, err := imagemetadata.Fetch(sources, imageConstraint, signedImageDataOnly)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\timages := instances.ImageMetadataToImages(matchingImages)\n\tspec, err := instances.FindInstanceSpec(images, ic, allInstanceTypes)\n\treturn spec, errors.Trace(err)\n}\n\nfunc isStateServer(mcfg cloudinit.MachineConfig) bool {\n\treturn multiwatcher.AnyJobNeedsState(mcfg.Jobs...)\n}\n\nfunc (env *environ) newRawInstance(args environs.StartInstanceParams, spec *instances.InstanceSpec) (*compute.Instance, error) {\n\t\/\/ Compose the instance.\n\tmachineID := common.MachineFullName(env, args.MachineConfig.MachineId)\n\tdisks := getDisks(spec, args.Constraints)\n\tnetworkInterfaces := getNetworkInterfaces()\n\tmetadata, err := getMetadata(args)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tinstance := &compute.Instance{\n\t\t\/\/ MachineType is set in the env.gce.newInstance call.\n\t\tName: machineID,\n\t\tDisks: disks,\n\t\tNetworkInterfaces: networkInterfaces,\n\t\tMetadata: metadata,\n\t}\n\n\tavailabilityZones, err := env.parseAvailabilityZones(args)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\t\/\/ TODO(ericsnow) Drop carrying InitializeParams over once\n\t\/\/ gceConnection.disk is working?\n\tdiskInit := rootDisk(instance).InitializeParams\n\tif err := env.gce.addInstance(instance, spec.InstanceType.Name, availabilityZones); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tif rootDisk(instance).InitializeParams == nil {\n\t\trootDisk(instance).InitializeParams = diskInit\n\t}\n\treturn instance, nil\n}\n\nfunc getMetadata(args environs.StartInstanceParams) (*compute.Metadata, error) {\n\tuserData, err := environs.ComposeUserData(args.MachineConfig, nil)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"cannot make user data\")\n\t}\n\tlogger.Debugf(\"GCE user data; %d bytes\", len(userData))\n\n\tauthKeys, err := gceSSHKeys()\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\t\/\/ Handle state machines.\n\tvar role string\n\tif isStateServer(args.MachineConfig) {\n\t\trole = roleState\n\t}\n\n\tb64UserData := base64.StdEncoding.EncodeToString([]byte(userData))\n\treturn packMetadata(map[string]string{\n\t\tmetadataKeyRole: role,\n\t\t\/\/ We store a gz snapshop of information that is used by\n\t\t\/\/ cloud-init and unpacked in to the \/var\/lib\/cloud\/instances folder\n\t\t\/\/ for the instance. Due to a limitation with GCE and binary blobs\n\t\t\/\/ we base64 encode the data before storing it.\n\t\tmetadataKeyCloudInit: b64UserData,\n\t\t\/\/ Valid encoding values are determined by the cloudinit GCE data source.\n\t\t\/\/ See: http:\/\/cloudinit.readthedocs.org\n\t\tmetadataKeyEncoding: \"base64\",\n\t\tmetadataKeySSHKeys: authKeys,\n\t}), nil\n}\n\nfunc getDisks(spec *instances.InstanceSpec, cons constraints.Value) []*compute.AttachedDisk {\n\tsize := common.MinRootDiskSizeGiB\n\tif cons.RootDisk != nil && *cons.RootDisk > uint64(size) {\n\t\tsize = int64(common.MiBToGiB(*cons.RootDisk))\n\t}\n\tdSpec := diskSpec{\n\t\tsizeHint: size,\n\t\timageURL: imageBasePath + spec.Image.Id,\n\t\tboot: true,\n\t\tautoDelete: true,\n\t}\n\trootDisk := dSpec.newAttached()\n\tif cons.RootDisk != nil && dSpec.size() == int64(minDiskSize) {\n\t\tmsg := \"Ignoring root-disk constraint of %dM because it is smaller than the GCE image size of %dM\"\n\t\tlogger.Infof(msg, *cons.RootDisk, minDiskSize)\n\t}\n\treturn []*compute.AttachedDisk{rootDisk}\n}\n\nfunc getNetworkInterfaces() []*compute.NetworkInterface {\n\t\/\/ TODO(ericsnow) Use the env ID for the network name\n\t\/\/ (instead of default)?\n\t\/\/ TODO(ericsnow) Make the network name configurable?\n\t\/\/ TODO(ericsnow) Support multiple networks?\n\tspec := networkSpec{}\n\t\/\/ TODO(ericsnow) Use a different name? Configurable?\n\trootIF := spec.newInterface(\"External NAT\")\n\treturn []*compute.NetworkInterface{rootIF}\n}\n\nfunc (env *environ) getHardwareCharacteristics(spec *instances.InstanceSpec, inst *environInstance) *instance.HardwareCharacteristics {\n\thwc := instance.HardwareCharacteristics{\n\t\tArch: &spec.Image.Arch,\n\t\tMem: &spec.InstanceType.Mem,\n\t\tCpuCores: &spec.InstanceType.CpuCores,\n\t\tCpuPower: spec.InstanceType.CpuPower,\n\t\tRootDisk: &inst.rootDiskMB,\n\t\tAvailabilityZone: &inst.zone,\n\t\t\/\/ Tags: not supported in GCE.\n\t}\n\treturn &hwc\n}\n\nfunc (env *environ) AllInstances() ([]instance.Instance, error) {\n\tinstances, err := env.instances()\n\treturn instances, errors.Trace(err)\n}\n\nfunc (env *environ) StopInstances(instances ...instance.Id) error {\n\tenv = env.getSnapshot()\n\n\tvar ids []string\n\tfor _, id := range instances {\n\t\tids = append(ids, string(id))\n\t}\n\terr := env.gce.removeInstances(env, ids...)\n\treturn errors.Trace(err)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage docker\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com\/tsuru\/config\"\n\t\"github.com\/tsuru\/docker-cluster\/cluster\"\n\t\"github.com\/tsuru\/tsuru\/db\"\n\t\"github.com\/tsuru\/tsuru\/db\/storage\"\n\t\"github.com\/tsuru\/tsuru\/iaas\"\n\t\"github.com\/tsuru\/tsuru\/log\"\n\t\"github.com\/tsuru\/tsuru\/safe\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\ntype autoScaleEvent struct {\n\tID interface{} `bson:\"_id\"`\n\tMetadataValue string\n\tAction string \/\/ \"rebalance\" or \"add\"\n\tStartTime time.Time\n\tEndTime time.Time `bson:\",omitempty\"`\n\tSuccessful bool\n\tError string `bson:\",omitempty\"`\n}\n\nfunc autoScaleCollection() (*storage.Collection, error) {\n\tconn, err := db.Conn()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tname, err := config.GetString(\"docker:collection\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn conn.Collection(fmt.Sprintf(\"%s_auto_scale\", name)), nil\n}\n\nvar errAutoScaleRunning = errors.New(\"autoscale already running\")\n\nfunc newAutoScaleEvent(metadataValue, action string) (*autoScaleEvent, error) {\n\t\/\/ Use metadataValue as ID to ensure only one auto scale process runs for\n\t\/\/ each metadataValue. (*autoScaleEvent).update() will generate a new\n\t\/\/ unique ID and remove this initial record.\n\tevt := autoScaleEvent{\n\t\tID: metadataValue,\n\t\tStartTime: time.Now().UTC(),\n\t\tMetadataValue: metadataValue,\n\t\tAction: action,\n\t}\n\tcoll, err := autoScaleCollection()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer coll.Close()\n\terr = coll.Insert(evt)\n\tif mgo.IsDup(err) {\n\t\treturn nil, errAutoScaleRunning\n\t}\n\treturn &evt, err\n}\n\nfunc (evt *autoScaleEvent) update(err error) error {\n\tif err != nil {\n\t\tevt.Error = err.Error()\n\t}\n\tevt.Successful = err == nil\n\tevt.EndTime = time.Now().UTC()\n\tcoll, err := autoScaleCollection()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer coll.Close()\n\tdefer coll.RemoveId(evt.ID)\n\tevt.ID = bson.NewObjectId()\n\treturn coll.Insert(evt)\n}\n\nfunc listAutoScaleEvents(skip, limit int) ([]autoScaleEvent, error) {\n\tcoll, err := autoScaleCollection()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tquery := coll.Find(nil).Sort(\"-_id\")\n\tif skip != 0 {\n\t\tquery = query.Skip(skip)\n\t}\n\tif limit != 0 {\n\t\tquery = query.Limit(limit)\n\t}\n\tvar list []autoScaleEvent\n\terr = query.All(&list)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn list, nil\n}\n\ntype autoScaleConfig struct {\n\tprovisioner *dockerProvisioner\n\tmatadataFilter string\n\tgroupByMetadata string\n\ttotalMemoryMetadata string\n\tmaxMemoryRatio float32\n\tmaxContainerCount int\n\twaitTimeNewMachine time.Duration\n\trunInterval time.Duration\n\tdone chan bool\n}\n\ntype metaWithFrequency struct {\n\tmetadata map[string]string\n\tfreq int\n}\n\ntype metaWithFrequencyList []metaWithFrequency\n\nfunc (l metaWithFrequencyList) Len() int { return len(l) }\nfunc (l metaWithFrequencyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }\nfunc (l metaWithFrequencyList) Less(i, j int) bool { return l[i].freq < l[j].freq }\n\nfunc (a *autoScaleConfig) run() error {\n\tisMemoryBased := a.totalMemoryMetadata != \"\" && a.maxMemoryRatio != 0\n\tif !isMemoryBased && a.maxContainerCount == 0 {\n\t\terr := fmt.Errorf(\"[node autoscale] aborting node auto scale, either memory information or max container count must be informed in config\")\n\t\tlog.Error(err.Error())\n\t\treturn err\n\t}\n\toneMinute := 1 * time.Minute\n\tif a.runInterval < oneMinute {\n\t\ta.runInterval = oneMinute\n\t}\n\tif a.waitTimeNewMachine < oneMinute {\n\t\ta.waitTimeNewMachine = oneMinute\n\t}\n\tfor {\n\t\terr := a.runOnce(isMemoryBased)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"[node autoscale] %s\", err.Error())\n\t\t\tlog.Error(err.Error())\n\t\t}\n\t\tselect {\n\t\tcase <-a.done:\n\t\t\treturn err\n\t\tcase <-time.After(a.runInterval):\n\t\t}\n\t}\n}\n\nfunc (a *autoScaleConfig) stop() {\n\ta.done <- true\n}\n\nfunc (a *autoScaleConfig) runOnce(isMemoryBased bool) (retErr error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tretErr = fmt.Errorf(\"recovered panic, we can never stop! panic: %v\", r)\n\t\t}\n\t}()\n\tnodes, err := a.provisioner.getCluster().Nodes()\n\tif err != nil {\n\t\tretErr = fmt.Errorf(\"error getting nodes: %s\", err.Error())\n\t\treturn\n\t}\n\tclusterMap := map[string][]*cluster.Node{}\n\tfor i := range nodes {\n\t\tnode := &nodes[i]\n\t\tif a.groupByMetadata == \"\" {\n\t\t\tclusterMap[\"\"] = append(clusterMap[\"\"], node)\n\t\t\tcontinue\n\t\t}\n\t\tgroupMetadata := node.Metadata[a.groupByMetadata]\n\t\tif groupMetadata == \"\" {\n\t\t\tlog.Debugf(\"[node autoscale] skipped node %s, no metadata value for %s.\", node.Address, a.groupByMetadata)\n\t\t\tcontinue\n\t\t}\n\t\tif a.matadataFilter != \"\" && a.matadataFilter != groupMetadata {\n\t\t\tcontinue\n\t\t}\n\t\tclusterMap[groupMetadata] = append(clusterMap[groupMetadata], node)\n\t}\n\tfor groupMetadata, nodes := range clusterMap {\n\t\tif !isMemoryBased {\n\t\t\terr = a.scaleGroupByCount(groupMetadata, nodes)\n\t\t\tif err != nil {\n\t\t\t\tretErr = fmt.Errorf(\"error scaling group %s: %s\", groupMetadata, err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc (a *autoScaleConfig) scaleGroupByCount(groupMetadata string, nodes []*cluster.Node) error {\n\ttotalCount, gap, err := a.provisioner.containerGapInNodes(nodes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't find containers from nodes: %s\", err)\n\t}\n\tfreeSlots := (len(nodes) * a.maxContainerCount) - totalCount\n\tshouldRebalance := false\n\tvar rebalanceFilter map[string]string\n\tif a.groupByMetadata != \"\" {\n\t\trebalanceFilter = map[string]string{a.groupByMetadata: groupMetadata}\n\t}\n\tvar event *autoScaleEvent\n\tvar gapAfter int\n\tif freeSlots < 0 {\n\t\tshouldRebalance = true\n\t\tevent, err = newAutoScaleEvent(groupMetadata, \"add\")\n\t\tif err != nil {\n\t\t\tif err == errAutoScaleRunning {\n\t\t\t\tlog.Debugf(\"[node autoscale] skipping already running for: %s\", groupMetadata)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"unable to create auto scale event: %s\", err)\n\t\t}\n\t\tlog.Debugf(\"[node autoscale] adding a new machine, metadata value: %s, free slots: %d\", groupMetadata, freeSlots)\n\t\terr := a.addNode(nodes)\n\t\tif err != nil {\n\t\t\tevent.update(err)\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tbuf := safe.NewBuffer(nil)\n\t\trebalanceProvisioner, err := a.provisioner.rebalanceContainersByFilter(buf, nil, rebalanceFilter, true)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"unable to run dry rebalance to check if rebalance is needed: %s - log: %s\", err, buf.String())\n\t\t\treturn nil\n\t\t}\n\t\t_, gapAfter, err = rebalanceProvisioner.containerGapInNodes(nodes)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"couldn't find containers from rebalanced nodes: %s\", err)\n\t\t}\n\t\tshouldRebalance = math.Abs((float64)(gap-gapAfter)) > 2.0\n\t}\n\tif shouldRebalance {\n\t\tlog.Debugf(\"[node autoscale] running rebalance, metadata value: %s, gap before: %d, gap after: %d\", groupMetadata, gap, gapAfter)\n\t\tif event == nil {\n\t\t\tevent, err = newAutoScaleEvent(groupMetadata, \"rebalance\")\n\t\t\tif err != nil {\n\t\t\t\tif err == errAutoScaleRunning {\n\t\t\t\t\tlog.Debugf(\"[node autoscale] skipping already running for: %s\", groupMetadata)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn fmt.Errorf(\"unable to create auto scale event: %s\", err)\n\t\t\t}\n\t\t}\n\t\tbuf := safe.NewBuffer(nil)\n\t\t_, err := a.provisioner.rebalanceContainersByFilter(buf, nil, rebalanceFilter, false)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Unable to rebalance containers: %s - log: %s\", err.Error(), buf.String())\n\t\t}\n\t\tevent.update(err)\n\t}\n\treturn nil\n}\n\nfunc (a *autoScaleConfig) addNode(modelNodes []*cluster.Node) error {\n\tmetadata, err := chooseMetadataFromNodes(modelNodes)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, hasIaas := metadata[\"iaas\"]\n\tif !hasIaas {\n\t\treturn fmt.Errorf(\"no IaaS information in nodes metadata: %#v\", metadata)\n\t}\n\tmachine, err := iaas.CreateMachineForIaaS(metadata[\"iaas\"], metadata)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create machine: %s\", err.Error())\n\t}\n\tnewAddr := machine.FormatNodeAddress()\n\tlog.Debugf(\"[node autoscale] new machine created: %s - Waiting for docker to start...\", newAddr)\n\t_, err = a.provisioner.getCluster().WaitAndRegister(newAddr, metadata, a.waitTimeNewMachine)\n\tif err != nil {\n\t\tmachine.Destroy()\n\t\treturn fmt.Errorf(\"error registering new node %s: %s\", newAddr, err.Error())\n\t}\n\tlog.Debugf(\"[node autoscale] new machine created: %s - started!\", newAddr)\n\treturn nil\n}\n\nfunc splitMetadata(nodesMetadata []map[string]string) (metaWithFrequencyList, map[string]string, error) {\n\tcommon := make(map[string]string)\n\texclusive := make([]map[string]string, len(nodesMetadata))\n\tfor i := range nodesMetadata {\n\t\tmetadata := nodesMetadata[i]\n\t\tfor k, v := range metadata {\n\t\t\tisExclusive := false\n\t\t\tfor j := range nodesMetadata {\n\t\t\t\tif i == j {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\totherMetadata := nodesMetadata[j]\n\t\t\t\tif v != otherMetadata[k] {\n\t\t\t\t\tisExclusive = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif isExclusive {\n\t\t\t\tif exclusive[i] == nil {\n\t\t\t\t\texclusive[i] = make(map[string]string)\n\t\t\t\t}\n\t\t\t\texclusive[i][k] = v\n\t\t\t} else {\n\t\t\t\tcommon[k] = v\n\t\t\t}\n\t\t}\n\t}\n\tvar group metaWithFrequencyList\n\tsameMap := make(map[int]bool)\n\tfor i := range exclusive {\n\t\tfreq := 1\n\t\tfor j := range exclusive {\n\t\t\tif i == j {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdiffCount := 0\n\t\t\tfor k, v := range exclusive[i] {\n\t\t\t\tif exclusive[j][k] != v {\n\t\t\t\t\tdiffCount++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif diffCount > 0 && diffCount < len(exclusive[i]) {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"unbalanced metadata for node group: %v vs %v\", exclusive[i], exclusive[j])\n\t\t\t}\n\t\t\tif diffCount == 0 {\n\t\t\t\tsameMap[j] = true\n\t\t\t\tfreq++\n\t\t\t}\n\t\t}\n\t\tif !sameMap[i] && exclusive[i] != nil {\n\t\t\tgroup = append(group, metaWithFrequency{metadata: exclusive[i], freq: freq})\n\t\t}\n\t}\n\treturn group, common, nil\n}\n\nfunc chooseMetadataFromNodes(modelNodes []*cluster.Node) (map[string]string, error) {\n\tmetadataList := make([]map[string]string, len(modelNodes))\n\tfor i, n := range modelNodes {\n\t\tmetadataList[i] = n.CleanMetadata()\n\t}\n\texclusiveList, baseMetadata, err := splitMetadata(metadataList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar chosenExclusive map[string]string\n\tif exclusiveList != nil {\n\t\tsort.Sort(exclusiveList)\n\t\tchosenExclusive = exclusiveList[0].metadata\n\t}\n\tfor k, v := range chosenExclusive {\n\t\tbaseMetadata[k] = v\n\t}\n\treturn baseMetadata, nil\n}\n\nfunc (p *dockerProvisioner) containerGapInNodes(nodes []*cluster.Node) (int, int, error) {\n\tmaxCount := 0\n\tminCount := 0\n\ttotalCount := 0\n\tfor _, n := range nodes {\n\t\tcontCount, err := p.countRunningContainersByHost(urlToHost(n.Address))\n\t\tif err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\t\tif contCount > maxCount {\n\t\t\tmaxCount = contCount\n\t\t}\n\t\tif minCount == 0 || contCount < minCount {\n\t\t\tminCount = contCount\n\t\t}\n\t\ttotalCount += contCount\n\t}\n\treturn totalCount, maxCount - minCount, nil\n}\n<commit_msg>provision\/docker: preparing for memory based auto scaling<commit_after>\/\/ Copyright 2015 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage docker\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com\/tsuru\/config\"\n\t\"github.com\/tsuru\/docker-cluster\/cluster\"\n\t\"github.com\/tsuru\/tsuru\/db\"\n\t\"github.com\/tsuru\/tsuru\/db\/storage\"\n\t\"github.com\/tsuru\/tsuru\/iaas\"\n\t\"github.com\/tsuru\/tsuru\/log\"\n\t\"github.com\/tsuru\/tsuru\/safe\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\ntype autoScaleEvent struct {\n\tID interface{} `bson:\"_id\"`\n\tMetadataValue string\n\tAction string \/\/ \"rebalance\" or \"add\"\n\tStartTime time.Time\n\tEndTime time.Time `bson:\",omitempty\"`\n\tSuccessful bool\n\tError string `bson:\",omitempty\"`\n}\n\nfunc autoScaleCollection() (*storage.Collection, error) {\n\tconn, err := db.Conn()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tname, err := config.GetString(\"docker:collection\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn conn.Collection(fmt.Sprintf(\"%s_auto_scale\", name)), nil\n}\n\nvar errAutoScaleRunning = errors.New(\"autoscale already running\")\n\nfunc newAutoScaleEvent(metadataValue, action string) (*autoScaleEvent, error) {\n\t\/\/ Use metadataValue as ID to ensure only one auto scale process runs for\n\t\/\/ each metadataValue. (*autoScaleEvent).update() will generate a new\n\t\/\/ unique ID and remove this initial record.\n\tevt := autoScaleEvent{\n\t\tID: metadataValue,\n\t\tStartTime: time.Now().UTC(),\n\t\tMetadataValue: metadataValue,\n\t\tAction: action,\n\t}\n\tcoll, err := autoScaleCollection()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer coll.Close()\n\terr = coll.Insert(evt)\n\tif mgo.IsDup(err) {\n\t\treturn nil, errAutoScaleRunning\n\t}\n\treturn &evt, err\n}\n\nfunc (evt *autoScaleEvent) update(err error) error {\n\tif err != nil {\n\t\tevt.Error = err.Error()\n\t}\n\tevt.Successful = err == nil\n\tevt.EndTime = time.Now().UTC()\n\tcoll, err := autoScaleCollection()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer coll.Close()\n\tdefer coll.RemoveId(evt.ID)\n\tevt.ID = bson.NewObjectId()\n\treturn coll.Insert(evt)\n}\n\nfunc listAutoScaleEvents(skip, limit int) ([]autoScaleEvent, error) {\n\tcoll, err := autoScaleCollection()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tquery := coll.Find(nil).Sort(\"-_id\")\n\tif skip != 0 {\n\t\tquery = query.Skip(skip)\n\t}\n\tif limit != 0 {\n\t\tquery = query.Limit(limit)\n\t}\n\tvar list []autoScaleEvent\n\terr = query.All(&list)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn list, nil\n}\n\ntype autoScaleConfig struct {\n\tprovisioner *dockerProvisioner\n\tmatadataFilter string\n\tgroupByMetadata string\n\ttotalMemoryMetadata string\n\tmaxMemoryRatio float32\n\tmaxContainerCount int\n\twaitTimeNewMachine time.Duration\n\trunInterval time.Duration\n\tdone chan bool\n}\n\ntype autoScaler interface {\n\tscale(groupMetadata string, nodes []*cluster.Node) error\n}\n\ntype memoryScaler struct {\n\t*autoScaleConfig\n}\n\ntype countScaler struct {\n\t*autoScaleConfig\n}\n\ntype metaWithFrequency struct {\n\tmetadata map[string]string\n\tfreq int\n}\n\ntype metaWithFrequencyList []metaWithFrequency\n\nfunc (l metaWithFrequencyList) Len() int { return len(l) }\nfunc (l metaWithFrequencyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }\nfunc (l metaWithFrequencyList) Less(i, j int) bool { return l[i].freq < l[j].freq }\n\nfunc (a *autoScaleConfig) run() error {\n\tvar scaler autoScaler\n\tisMemoryBased := a.totalMemoryMetadata != \"\" && a.maxMemoryRatio != 0\n\tif !isMemoryBased && a.maxContainerCount == 0 {\n\t\tscaler = &memoryScaler{a}\n\t\terr := fmt.Errorf(\"[node autoscale] aborting node auto scale, either memory information or max container count must be informed in config\")\n\t\tlog.Error(err.Error())\n\t\treturn err\n\t} else {\n\t\tscaler = &countScaler{a}\n\t}\n\toneMinute := 1 * time.Minute\n\tif a.runInterval < oneMinute {\n\t\ta.runInterval = oneMinute\n\t}\n\tif a.waitTimeNewMachine < oneMinute {\n\t\ta.waitTimeNewMachine = oneMinute\n\t}\n\tfor {\n\t\terr := a.runOnce(scaler)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"[node autoscale] %s\", err.Error())\n\t\t\tlog.Error(err.Error())\n\t\t}\n\t\tselect {\n\t\tcase <-a.done:\n\t\t\treturn err\n\t\tcase <-time.After(a.runInterval):\n\t\t}\n\t}\n}\n\nfunc (a *autoScaleConfig) stop() {\n\ta.done <- true\n}\n\nfunc (a *autoScaleConfig) runOnce(scaler autoScaler) (retErr error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tretErr = fmt.Errorf(\"recovered panic, we can never stop! panic: %v\", r)\n\t\t}\n\t}()\n\tnodes, err := a.provisioner.getCluster().Nodes()\n\tif err != nil {\n\t\tretErr = fmt.Errorf(\"error getting nodes: %s\", err.Error())\n\t\treturn\n\t}\n\tclusterMap := map[string][]*cluster.Node{}\n\tfor i := range nodes {\n\t\tnode := &nodes[i]\n\t\tif a.groupByMetadata == \"\" {\n\t\t\tclusterMap[\"\"] = append(clusterMap[\"\"], node)\n\t\t\tcontinue\n\t\t}\n\t\tgroupMetadata := node.Metadata[a.groupByMetadata]\n\t\tif groupMetadata == \"\" {\n\t\t\tlog.Debugf(\"[node autoscale] skipped node %s, no metadata value for %s.\", node.Address, a.groupByMetadata)\n\t\t\tcontinue\n\t\t}\n\t\tif a.matadataFilter != \"\" && a.matadataFilter != groupMetadata {\n\t\t\tcontinue\n\t\t}\n\t\tclusterMap[groupMetadata] = append(clusterMap[groupMetadata], node)\n\t}\n\tfor groupMetadata, nodes := range clusterMap {\n\t\terr = scaler.scale(groupMetadata, nodes)\n\t\tif err != nil {\n\t\t\tretErr = fmt.Errorf(\"error scaling group %s: %s\", groupMetadata, err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc (a *memoryScaler) scale(groupMetadata string, nodes []*cluster.Node) error {\n\treturn nil\n}\n\nfunc (a *countScaler) scale(groupMetadata string, nodes []*cluster.Node) error {\n\ttotalCount, gap, err := a.provisioner.containerGapInNodes(nodes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't find containers from nodes: %s\", err)\n\t}\n\tfreeSlots := (len(nodes) * a.maxContainerCount) - totalCount\n\tshouldRebalance := false\n\tvar rebalanceFilter map[string]string\n\tif a.groupByMetadata != \"\" {\n\t\trebalanceFilter = map[string]string{a.groupByMetadata: groupMetadata}\n\t}\n\tvar event *autoScaleEvent\n\tvar gapAfter int\n\tif freeSlots < 0 {\n\t\tshouldRebalance = true\n\t\tevent, err = newAutoScaleEvent(groupMetadata, \"add\")\n\t\tif err != nil {\n\t\t\tif err == errAutoScaleRunning {\n\t\t\t\tlog.Debugf(\"[node autoscale] skipping already running for: %s\", groupMetadata)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"unable to create auto scale event: %s\", err)\n\t\t}\n\t\tlog.Debugf(\"[node autoscale] adding a new machine, metadata value: %s, free slots: %d\", groupMetadata, freeSlots)\n\t\terr := a.addNode(nodes)\n\t\tif err != nil {\n\t\t\tevent.update(err)\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tbuf := safe.NewBuffer(nil)\n\t\trebalanceProvisioner, err := a.provisioner.rebalanceContainersByFilter(buf, nil, rebalanceFilter, true)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"unable to run dry rebalance to check if rebalance is needed: %s - log: %s\", err, buf.String())\n\t\t\treturn nil\n\t\t}\n\t\t_, gapAfter, err = rebalanceProvisioner.containerGapInNodes(nodes)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"couldn't find containers from rebalanced nodes: %s\", err)\n\t\t}\n\t\tshouldRebalance = math.Abs((float64)(gap-gapAfter)) > 2.0\n\t}\n\tif shouldRebalance {\n\t\tlog.Debugf(\"[node autoscale] running rebalance, metadata value: %s, gap before: %d, gap after: %d\", groupMetadata, gap, gapAfter)\n\t\tif event == nil {\n\t\t\tevent, err = newAutoScaleEvent(groupMetadata, \"rebalance\")\n\t\t\tif err != nil {\n\t\t\t\tif err == errAutoScaleRunning {\n\t\t\t\t\tlog.Debugf(\"[node autoscale] skipping already running for: %s\", groupMetadata)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn fmt.Errorf(\"unable to create auto scale event: %s\", err)\n\t\t\t}\n\t\t}\n\t\tbuf := safe.NewBuffer(nil)\n\t\t_, err := a.provisioner.rebalanceContainersByFilter(buf, nil, rebalanceFilter, false)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Unable to rebalance containers: %s - log: %s\", err.Error(), buf.String())\n\t\t}\n\t\tevent.update(err)\n\t}\n\treturn nil\n}\n\nfunc (a *autoScaleConfig) addNode(modelNodes []*cluster.Node) error {\n\tmetadata, err := chooseMetadataFromNodes(modelNodes)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, hasIaas := metadata[\"iaas\"]\n\tif !hasIaas {\n\t\treturn fmt.Errorf(\"no IaaS information in nodes metadata: %#v\", metadata)\n\t}\n\tmachine, err := iaas.CreateMachineForIaaS(metadata[\"iaas\"], metadata)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create machine: %s\", err.Error())\n\t}\n\tnewAddr := machine.FormatNodeAddress()\n\tlog.Debugf(\"[node autoscale] new machine created: %s - Waiting for docker to start...\", newAddr)\n\t_, err = a.provisioner.getCluster().WaitAndRegister(newAddr, metadata, a.waitTimeNewMachine)\n\tif err != nil {\n\t\tmachine.Destroy()\n\t\treturn fmt.Errorf(\"error registering new node %s: %s\", newAddr, err.Error())\n\t}\n\tlog.Debugf(\"[node autoscale] new machine created: %s - started!\", newAddr)\n\treturn nil\n}\n\nfunc splitMetadata(nodesMetadata []map[string]string) (metaWithFrequencyList, map[string]string, error) {\n\tcommon := make(map[string]string)\n\texclusive := make([]map[string]string, len(nodesMetadata))\n\tfor i := range nodesMetadata {\n\t\tmetadata := nodesMetadata[i]\n\t\tfor k, v := range metadata {\n\t\t\tisExclusive := false\n\t\t\tfor j := range nodesMetadata {\n\t\t\t\tif i == j {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\totherMetadata := nodesMetadata[j]\n\t\t\t\tif v != otherMetadata[k] {\n\t\t\t\t\tisExclusive = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif isExclusive {\n\t\t\t\tif exclusive[i] == nil {\n\t\t\t\t\texclusive[i] = make(map[string]string)\n\t\t\t\t}\n\t\t\t\texclusive[i][k] = v\n\t\t\t} else {\n\t\t\t\tcommon[k] = v\n\t\t\t}\n\t\t}\n\t}\n\tvar group metaWithFrequencyList\n\tsameMap := make(map[int]bool)\n\tfor i := range exclusive {\n\t\tfreq := 1\n\t\tfor j := range exclusive {\n\t\t\tif i == j {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdiffCount := 0\n\t\t\tfor k, v := range exclusive[i] {\n\t\t\t\tif exclusive[j][k] != v {\n\t\t\t\t\tdiffCount++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif diffCount > 0 && diffCount < len(exclusive[i]) {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"unbalanced metadata for node group: %v vs %v\", exclusive[i], exclusive[j])\n\t\t\t}\n\t\t\tif diffCount == 0 {\n\t\t\t\tsameMap[j] = true\n\t\t\t\tfreq++\n\t\t\t}\n\t\t}\n\t\tif !sameMap[i] && exclusive[i] != nil {\n\t\t\tgroup = append(group, metaWithFrequency{metadata: exclusive[i], freq: freq})\n\t\t}\n\t}\n\treturn group, common, nil\n}\n\nfunc chooseMetadataFromNodes(modelNodes []*cluster.Node) (map[string]string, error) {\n\tmetadataList := make([]map[string]string, len(modelNodes))\n\tfor i, n := range modelNodes {\n\t\tmetadataList[i] = n.CleanMetadata()\n\t}\n\texclusiveList, baseMetadata, err := splitMetadata(metadataList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar chosenExclusive map[string]string\n\tif exclusiveList != nil {\n\t\tsort.Sort(exclusiveList)\n\t\tchosenExclusive = exclusiveList[0].metadata\n\t}\n\tfor k, v := range chosenExclusive {\n\t\tbaseMetadata[k] = v\n\t}\n\treturn baseMetadata, nil\n}\n\nfunc (p *dockerProvisioner) containerGapInNodes(nodes []*cluster.Node) (int, int, error) {\n\tmaxCount := 0\n\tminCount := 0\n\ttotalCount := 0\n\tfor _, n := range nodes {\n\t\tcontCount, err := p.countRunningContainersByHost(urlToHost(n.Address))\n\t\tif err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\t\tif contCount > maxCount {\n\t\t\tmaxCount = contCount\n\t\t}\n\t\tif minCount == 0 || contCount < minCount {\n\t\t\tminCount = contCount\n\t\t}\n\t\ttotalCount += contCount\n\t}\n\treturn totalCount, maxCount - minCount, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package container\n\nimport (\n\t\"net\"\n\t\"os\"\n\t\"testing\"\n)\n\nconst (\n\tContainerName = \"testContainer\"\n\tContainerType = \"busybox\"\n\tPrepareHost = \"vagrant\"\n\tPrepareName = \"vm-test\"\n)\n\nfunc TestNewContainer(t *testing.T) {\n\tc := NewContainer(ContainerName)\n\n\tif c.Name != ContainerName {\n\t\tt.Errorf(\"NewContainer: exptecting: %s got: %s\", ContainerName, c.Name)\n\t}\n\n\tcontainerDir := lxcDir + ContainerName + \"\/\"\n\n\tif c.Path(\"\") != containerDir {\n\t\tt.Errorf(\"NewContainer: exptecting: %s got: %s\", containerDir, c.Dir)\n\t}\n\n}\n\nfunc TestContainer_GenerateFiles(t *testing.T) {\n\tc := NewContainer(ContainerName)\n\tc.IP = net.ParseIP(\"127.0.0.1\")\n\tc.HostnameAlias = \"vagrant\"\n\n\tif err := c.AsHost().PrepareDir(c.Path(\"\")); err != nil {\n\t\tt.Errorf(\"Generatefile: %s \", err)\n\t}\n\n\tvar files = []struct {\n\t\tfileName string\n\t\ttemplate string\n\t}{\n\t\t{c.Path(\"config\"), \"config\"},\n\t\t{c.Path(\"fstab\"), \"fstab\"},\n\t\t{c.Path(\"ip-address\"), \"ip-address\"},\n\t}\n\n\tfor _, file := range files {\n\t\terr := c.AsHost().GenerateFile(file.fileName, file.template)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Generatefile: %s \", err)\n\t\t}\n\n\t\tif _, err := os.Stat(file.fileName); os.IsNotExist(err) {\n\t\t\tt.Errorf(\"Generatefile: %s does not exist\", file.fileName)\n\t\t}\n\t}\n\n}\n\nfunc TestContainer_GenerateOverlayFiles(t *testing.T) {\n\tc := NewContainer(ContainerName)\n\tc.HostnameAlias = \"vagrant\"\n\tc.LdapPassword = \"123456789\"\n\tc.IP = net.ParseIP(\"127.0.0.1\")\n\n\tif err := c.AsContainer().PrepareDir(c.OverlayPath(\"\")); err != nil {\n\t\tt.Errorf(\"PrepareDir Overlay: %s \", err)\n\t}\n\n\tif err := c.AsContainer().PrepareDir(c.OverlayPath(\"\/lost+found\")); err != nil {\n\t\tt.Errorf(\"PrepareDir Overlay\/lost+found: %s \", err)\n\t}\n\n\tif err := c.AsContainer().PrepareDir(c.OverlayPath(\"\/etc\")); err != nil {\n\t\tt.Errorf(\"PrepareDir Overlay\/etc: %s \", err)\n\t}\n\n\tvar containerFiles = []struct {\n\t\tfileName string\n\t\ttemplate string\n\t}{\n\t\t{c.OverlayPath(\"\/etc\/hostname\"), \"hostname\"},\n\t\t{c.OverlayPath(\"\/etc\/hosts\"), \"hosts\"},\n\t\t{c.OverlayPath(\"\/etc\/ldap.conf\"), \"ldap.conf\"},\n\t}\n\n\tfor _, file := range containerFiles {\n\t\terr := c.AsContainer().GenerateFile(file.fileName, file.template)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Generatefile: %s \", err)\n\t\t}\n\n\t\tif _, err := os.Stat(file.fileName); os.IsNotExist(err) {\n\t\t\tt.Errorf(\"Generatefile: %s does not exist\", file.fileName)\n\t\t}\n\t}\n}\n\nfunc TestContainer_CreateUserHome(t *testing.T) {\n\tc := NewContainer(ContainerName)\n\tc.HostnameAlias = \"vagrant\"\n\tc.LdapPassword = \"123456789\"\n\tc.IP = net.ParseIP(\"127.0.0.1\")\n\tc.Username = \"testing\"\n\tc.WebHome = \"testing\"\n\n\tif err := c.createUserHome(); err != nil {\n\t\tt.Errorf(\"CreateUserHome %s \", err)\n\t}\n\n}\n\n\/\/ func TestContainer_Create(t *testing.T) {\n\/\/ \tc := NewContainer(ContainerName)\n\n\/\/ \tif err := c.Create(ContainerType); err != nil {\n\/\/ \t\tt.Error(err)\n\/\/ \t}\n\n\/\/ \tif _, err := os.Stat(c.Dir); os.IsNotExist(err) {\n\/\/ \t\tt.Errorf(\"Create: %s does not exist\", c.Dir)\n\/\/ \t}\n\n\/\/ }\n\n\/\/ func TestContainer_Start(t *testing.T) {\n\/\/ \tc := NewContainer(ContainerName)\n\n\/\/ \tif err := c.Start(); err != nil {\n\/\/ \t\tt.Error(err)\n\/\/ \t}\n\n\/\/ \tif !c.IsRunning() {\n\/\/ \t\tt.Errorf(\"Starting the container failed...\")\n\/\/ \t}\n\/\/ }\n\n\/\/ func TestShutdown(t *testing.T) {\n\/\/ \tc := NewContainer(ContainerName)\n\n\/\/ \tif err := c.Shutdown(3); err != nil {\n\/\/ \t\tt.Errorf(err.Error())\n\/\/ \t}\n\n\/\/ \tif c.IsRunning() {\n\/\/ \t\tt.Errorf(\"Shutting down the container failed...\")\n\/\/ \t}\n\/\/ }\n\n\/\/ func TestStop(t *testing.T) {\n\/\/ \tc := NewContainer(ContainerName)\n\n\/\/ \tif err := c.Start(); err != nil {\n\/\/ \t\tt.Error(err)\n\/\/ \t}\n\n\/\/ \tif err := c.Stop(); err != nil {\n\/\/ \t\tt.Errorf(err.Error())\n\/\/ \t}\n\n\/\/ \tif c.IsRunning() {\n\/\/ \t\tt.Errorf(\"Stopping the container failed...\")\n\/\/ \t}\n\/\/ }\n\n\/\/ func TestDestroy(t *testing.T) {\n\/\/ \tc := NewContainer(ContainerName)\n\n\/\/ \tif err := c.Destroy(); err != nil {\n\/\/ \t\tt.Error(err)\n\/\/ \t}\n\/\/ }\n<commit_msg>container: add a new test for webdir<commit_after>package container\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"testing\"\n)\n\nconst (\n\tContainerName = \"testContainer\"\n\tContainerType = \"busybox\"\n\tPrepareHost = \"vagrant\"\n\tPrepareName = \"vm-test\"\n)\n\nfunc exist(filename string) bool {\n\tif _, err := os.Stat(filename); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc TestNewContainer(t *testing.T) {\n\tc := NewContainer(ContainerName)\n\n\tif c.Name != ContainerName {\n\t\tt.Errorf(\"NewContainer: exptecting: %s got: %s\", ContainerName, c.Name)\n\t}\n\n\tcontainerDir := lxcDir + ContainerName + \"\/\"\n\n\tif c.Path(\"\") != containerDir {\n\t\tt.Errorf(\"NewContainer: exptecting: %s got: %s\", containerDir, c.Dir)\n\t}\n\n}\n\nfunc TestContainer_GenerateFiles(t *testing.T) {\n\tc := NewContainer(ContainerName)\n\tc.IP = net.ParseIP(\"127.0.0.1\")\n\tc.HostnameAlias = \"vagrant\"\n\n\tif err := c.AsHost().PrepareDir(c.Path(\"\")); err != nil {\n\t\tt.Errorf(\"Generatefile: %s \", err)\n\t}\n\n\tvar files = []struct {\n\t\tfileName string\n\t\ttemplate string\n\t}{\n\t\t{c.Path(\"config\"), \"config\"},\n\t\t{c.Path(\"fstab\"), \"fstab\"},\n\t\t{c.Path(\"ip-address\"), \"ip-address\"},\n\t}\n\n\tfor _, file := range files {\n\t\terr := c.AsHost().GenerateFile(file.fileName, file.template)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Generatefile: %s \", err)\n\t\t}\n\n\t\tif !exist(file.fileName) {\n\t\t\tt.Errorf(\"Generatefile: %s does not exist\", file.fileName)\n\t\t}\n\t}\n\n}\n\nfunc TestContainer_GenerateOverlayFiles(t *testing.T) {\n\tc := NewContainer(ContainerName)\n\tc.HostnameAlias = \"vagrant\"\n\tc.LdapPassword = \"123456789\"\n\tc.IP = net.ParseIP(\"127.0.0.1\")\n\n\tif err := c.AsContainer().PrepareDir(c.OverlayPath(\"\")); err != nil {\n\t\tt.Errorf(\"PrepareDir Overlay: %s \", err)\n\t}\n\n\tif err := c.AsContainer().PrepareDir(c.OverlayPath(\"\/lost+found\")); err != nil {\n\t\tt.Errorf(\"PrepareDir Overlay\/lost+found: %s \", err)\n\t}\n\n\tif err := c.AsContainer().PrepareDir(c.OverlayPath(\"\/etc\")); err != nil {\n\t\tt.Errorf(\"PrepareDir Overlay\/etc: %s \", err)\n\t}\n\n\tvar containerFiles = []struct {\n\t\tfileName string\n\t\ttemplate string\n\t}{\n\t\t{c.OverlayPath(\"etc\/hostname\"), \"hostname\"},\n\t\t{c.OverlayPath(\"etc\/hosts\"), \"hosts\"},\n\t\t{c.OverlayPath(\"etc\/ldap.conf\"), \"ldap.conf\"},\n\t}\n\n\tfor _, file := range containerFiles {\n\t\terr := c.AsContainer().GenerateFile(file.fileName, file.template)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Generatefile: %s \", err)\n\t\t}\n\n\t\tif !exist(file.fileName) {\n\t\t\tt.Errorf(\"Generatefile: %s does not exist\", file.fileName)\n\t\t}\n\t}\n}\n\nfunc TestContainer_CreateUserHome(t *testing.T) {\n\tc := NewContainer(ContainerName)\n\tc.HostnameAlias = \"vagrant\"\n\tc.LdapPassword = \"123456789\"\n\tc.IP = net.ParseIP(\"127.0.0.1\")\n\tc.Username = \"testing\"\n\tc.WebHome = \"testing\"\n\n\tif err := c.createUserHome(); err != nil {\n\t\tt.Errorf(\"Could not create home directory %s \", err)\n\t}\n\n\tif !exist(c.Path(\"rootfs\/home\/testing\")) {\n\t\tt.Error(\"User home directory does not exist\")\n\t}\n}\n\n\/\/ func TestContainer_CreateWebDir(t *testing.T) {\n\/\/ \tc := NewContainer(ContainerName)\n\/\/ \tc.HostnameAlias = \"vagrant\"\n\/\/ \tc.LdapPassword = \"123456789\"\n\/\/ \tc.IP = net.ParseIP(\"127.0.0.1\")\n\/\/ \tc.Username = \"testing\"\n\/\/ \tc.WebHome = \"testing\"\n\n\/\/ \tvmWebDir := c.Path(fmt.Sprintf(\"rootfs\/home\/%s\/Web\", c.WebHome))\n\n\/\/ \tif err := c.createWebDir(vmWebDir); err != nil {\n\/\/ \t\tt.Errorf(\"Could not create web directory %s \", err)\n\/\/ \t}\n\n\/\/ \tif !exist(c.Path(\"rootfs\/var\/www\")) {\n\/\/ \t\tt.Error(\"Create web directory, \/var\/www does not exist\")\n\/\/ \t}\n\n\/\/ \tif !exist(c.Path(\"rootfs\/home\/testing\/Web\")) {\n\/\/ \t\tt.Error(\"Create web directory. \/home\/testing\/Web does not exist\")\n\/\/ \t}\n\/\/ }\n<|endoftext|>"} {"text":"<commit_before>package discard\n\nimport (\n\t\"sync\"\n\n\t\"koding\/klient\/machine\/index\"\n\tmsync \"koding\/klient\/machine\/mount\/sync\"\n)\n\n\/\/ BuildOpts represents a context that can be used by external syncers to build\n\/\/ their own type. Built syncer should update indexes after syncing and manage\n\/\/ received events.\ntype BuildOpts struct {\n\tRemoteIdx *index.Index \/\/ known state of remote index.\n\tLocalIdx *index.Index \/\/ known state of local index.\n}\n\n\/\/ DiscardBuilder is a factory for Discard synchronization objects.\ntype DiscardBuilder struct{}\n\n\/\/ Build satisfies msync.Builder interface. It produces Discard objects and\n\/\/ always returns nil error.\nfunc (DiscardBuilder) Build(_ *msync.BuildOpts) (msync.Syncer, error) {\n\treturn NewDiscard(), nil\n}\n\n\/\/ Event is a no-op for synchronization object.\ntype Event struct {\n\tev *msync.Event\n}\n\n\/\/ Exec satisfies msync.Execer interface. The only thing this method does is\n\/\/ closing internal sync event.\nfunc (e *Event) Exec() error {\n\te.ev.Done()\n\n\treturn nil\n}\n\n\/\/ Discard is a no-op synchronization object. It can be used as a stub for other\n\/\/ non-event-dependent logic. This means that this type should be used only in\n\/\/ tests which doesn't care about mount file synchronization.\ntype Discard struct {\n\tonce sync.Once\n\tstopC chan struct{} \/\/ channel used to close any opened exec streams.\n}\n\n\/\/ NewDiscard creates a new Discard synchronization object.\nfunc NewDiscard() *Discard {\n\treturn &Discard{\n\t\tstopC: make(chan struct{}),\n\t}\n}\n\n\/\/ ExecStream wraps incoming msync events with discard event logic which is\n\/\/ basically no-op.\nfunc (d *Discard) ExecStream(evC <-chan *msync.Event) <-chan msync.Execer {\n\texC := make(chan msync.Execer)\n\n\tgo func() {\n\t\tdefer close(exC)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase ev := <-evC:\n\t\t\t\tex := &Event{ev: ev}\n\t\t\t\tselect {\n\t\t\t\tcase exC <- ex:\n\t\t\t\tcase <-d.stopC:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-d.stopC:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn exC\n}\n\n\/\/ Close stops all created synchronization streams.\nfunc (d *Discard) Close() {\n\td.once.Do(func() {\n\t\tclose(d.stopC)\n\t})\n}\n<commit_msg>klient\/machine: handle closed event channel inside discard syncer<commit_after>package discard\n\nimport (\n\t\"sync\"\n\n\t\"koding\/klient\/machine\/index\"\n\tmsync \"koding\/klient\/machine\/mount\/sync\"\n)\n\n\/\/ BuildOpts represents a context that can be used by external syncers to build\n\/\/ their own type. Built syncer should update indexes after syncing and manage\n\/\/ received events.\ntype BuildOpts struct {\n\tRemoteIdx *index.Index \/\/ known state of remote index.\n\tLocalIdx *index.Index \/\/ known state of local index.\n}\n\n\/\/ DiscardBuilder is a factory for Discard synchronization objects.\ntype DiscardBuilder struct{}\n\n\/\/ Build satisfies msync.Builder interface. It produces Discard objects and\n\/\/ always returns nil error.\nfunc (DiscardBuilder) Build(_ *msync.BuildOpts) (msync.Syncer, error) {\n\treturn NewDiscard(), nil\n}\n\n\/\/ Event is a no-op for synchronization object.\ntype Event struct {\n\tev *msync.Event\n}\n\n\/\/ Exec satisfies msync.Execer interface. The only thing this method does is\n\/\/ closing internal sync event.\nfunc (e *Event) Exec() error {\n\te.ev.Done()\n\n\treturn nil\n}\n\n\/\/ Discard is a no-op synchronization object. It can be used as a stub for other\n\/\/ non-event-dependent logic. This means that this type should be used only in\n\/\/ tests which doesn't care about mount file synchronization.\ntype Discard struct {\n\tonce sync.Once\n\tstopC chan struct{} \/\/ channel used to close any opened exec streams.\n}\n\n\/\/ NewDiscard creates a new Discard synchronization object.\nfunc NewDiscard() *Discard {\n\treturn &Discard{\n\t\tstopC: make(chan struct{}),\n\t}\n}\n\n\/\/ ExecStream wraps incoming msync events with discard event logic which is\n\/\/ basically no-op.\nfunc (d *Discard) ExecStream(evC <-chan *msync.Event) <-chan msync.Execer {\n\texC := make(chan msync.Execer)\n\n\tgo func() {\n\t\tdefer close(exC)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase ev, ok := <-evC:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tex := &Event{ev: ev}\n\t\t\t\tselect {\n\t\t\t\tcase exC <- ex:\n\t\t\t\tcase <-d.stopC:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-d.stopC:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn exC\n}\n\n\/\/ Close stops all created synchronization streams.\nfunc (d *Discard) Close() {\n\td.once.Do(func() {\n\t\tclose(d.stopC)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package message\n\nimport (\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"socialapi\/models\"\n\t\"socialapi\/workers\/api\/modules\/helpers\"\n\n\t\"github.com\/jinzhu\/gorm\"\n)\n\nfunc Create(u *url.URL, h http.Header, req *models.ChannelMessage) (int, http.Header, interface{}, error) {\n\tchannelId, err := helpers.GetURIInt64(u, \"id\")\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\t\/\/ override message type\n\t\/\/ all of the messages coming from client-side\n\t\/\/ should be marked as POST\n\treq.TypeConstant = models.ChannelMessage_TYPE_POST\n\n\t\/\/ set initial channel id\n\treq.InitialChannelId = channelId\n\n\tif err := req.Create(); err != nil {\n\t\t\/\/ todo this should be internal server error\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tcml := models.NewChannelMessageList()\n\n\t\/\/ override channel id\n\tcml.ChannelId = channelId\n\tcml.MessageId = req.Id\n\tif err := cml.Create(); err != nil {\n\t\t\/\/ todo this should be internal server error\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\treturn helpers.NewOKResponse(req)\n}\n\nfunc Delete(u *url.URL, h http.Header, req *models.ChannelMessage) (int, http.Header, interface{}, error) {\n\tid, err := helpers.GetURIInt64(u, \"id\")\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\treq.Id = id\n\n\tif err := req.Fetch(); err != nil {\n\t\tif err == gorm.RecordNotFound {\n\t\t\treturn helpers.NewNotFoundResponse()\n\t\t}\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tif err := req.Delete(); err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\t\/\/ yes it is deleted but not removed completely from our system\n\treturn helpers.NewDeletedResponse()\n}\n\nfunc Update(u *url.URL, h http.Header, req *models.ChannelMessage) (int, http.Header, interface{}, error) {\n\tid, err := helpers.GetURIInt64(u, \"id\")\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\treq.Id = id\n\n\tbody := req.Body\n\tif err := req.Fetch(); err != nil {\n\t\tif err == gorm.RecordNotFound {\n\t\t\treturn helpers.NewNotFoundResponse()\n\t\t}\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tif req.Id == 0 {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\treq.Body = body\n\tif err := req.Update(); err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\treturn helpers.NewOKResponse(req)\n}\n\nfunc Get(u *url.URL, h http.Header, req *models.ChannelMessage) (int, http.Header, interface{}, error) {\n\tid, err := helpers.GetURIInt64(u, \"id\")\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\treq.Id = id\n\tif err := req.Fetch(); err != nil {\n\t\tif err == gorm.RecordNotFound {\n\t\t\treturn helpers.NewNotFoundResponse()\n\t\t}\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\treturn helpers.NewOKResponse(req)\n}\n<commit_msg>Social: implement handler for endpoint for fetching messages with related data<commit_after>package message\n\nimport (\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"socialapi\/models\"\n\t\"socialapi\/workers\/api\/modules\/helpers\"\n\n\t\"github.com\/jinzhu\/gorm\"\n)\n\nfunc Create(u *url.URL, h http.Header, req *models.ChannelMessage) (int, http.Header, interface{}, error) {\n\tchannelId, err := helpers.GetURIInt64(u, \"id\")\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\t\/\/ override message type\n\t\/\/ all of the messages coming from client-side\n\t\/\/ should be marked as POST\n\treq.TypeConstant = models.ChannelMessage_TYPE_POST\n\n\t\/\/ set initial channel id\n\treq.InitialChannelId = channelId\n\n\tif err := req.Create(); err != nil {\n\t\t\/\/ todo this should be internal server error\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tcml := models.NewChannelMessageList()\n\n\t\/\/ override channel id\n\tcml.ChannelId = channelId\n\tcml.MessageId = req.Id\n\tif err := cml.Create(); err != nil {\n\t\t\/\/ todo this should be internal server error\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\treturn helpers.NewOKResponse(req)\n}\n\nfunc Delete(u *url.URL, h http.Header, req *models.ChannelMessage) (int, http.Header, interface{}, error) {\n\tid, err := helpers.GetURIInt64(u, \"id\")\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\treq.Id = id\n\n\tif err := req.Fetch(); err != nil {\n\t\tif err == gorm.RecordNotFound {\n\t\t\treturn helpers.NewNotFoundResponse()\n\t\t}\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tif err := req.Delete(); err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\t\/\/ yes it is deleted but not removed completely from our system\n\treturn helpers.NewDeletedResponse()\n}\n\nfunc Update(u *url.URL, h http.Header, req *models.ChannelMessage) (int, http.Header, interface{}, error) {\n\tid, err := helpers.GetURIInt64(u, \"id\")\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\treq.Id = id\n\n\tbody := req.Body\n\tif err := req.Fetch(); err != nil {\n\t\tif err == gorm.RecordNotFound {\n\t\t\treturn helpers.NewNotFoundResponse()\n\t\t}\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tif req.Id == 0 {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\treq.Body = body\n\tif err := req.Update(); err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\treturn helpers.NewOKResponse(req)\n}\n\nfunc Get(u *url.URL, h http.Header, _ interface{}) (int, http.Header, interface{}, error) {\n\tid, err := helpers.GetURIInt64(u, \"id\")\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tcm := models.NewChannelMessage()\n\tcm.Id = id\n\tif err := cm.Fetch(); err != nil {\n\t\tif err == gorm.RecordNotFound {\n\t\t\treturn helpers.NewNotFoundResponse()\n\t\t}\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\treturn helpers.NewOKResponse(cm)\n}\n\nfunc GetWithRelated(u *url.URL, h http.Header, _ interface{}) (int, http.Header, interface{}, error) {\n\tid, err := helpers.GetURIInt64(u, \"id\")\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tcm := models.NewChannelMessage()\n\tcm.Id = id\n\tif err := cm.Fetch(); err != nil {\n\t\tif err == gorm.RecordNotFound {\n\t\t\treturn helpers.NewNotFoundResponse()\n\t\t}\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tcmc, err := cm.BuildMessage()\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\treturn helpers.NewOKResponse(cmc)\n}\n<|endoftext|>"} {"text":"<commit_before>package applyspec\n\nimport (\n\tmodels \"bosh\/agent\/applier\/models\"\n)\n\ntype V1ApplySpec struct {\n\tPropertiesSpec PropertiesSpec `json:\"properties\"`\n\tJobSpec JobSpec `json:\"job\"`\n\tPackageSpecs map[string]PackageSpec `json:\"packages\"`\n\tConfigurationHash string `json:\"configuration_hash\"`\n\tNetworkSpecs map[string]interface{} `json:\"networks\"`\n\n\tRenderedTemplatesArchiveSpec RenderedTemplatesArchiveSpec `json:\"rendered_templates_archive\"`\n}\n\ntype PropertiesSpec struct {\n\tLoggingSpec LoggingSpec `json:\"logging\"`\n}\n\ntype LoggingSpec struct {\n\tMaxLogFileSize string `json:\"max_log_file_size\"`\n}\n\n\/\/ BOSH Director provides a single tarball with all job templates pre-rendered\nfunc (s V1ApplySpec) Jobs() []models.Job {\n\tjobsWithSource := []models.Job{}\n\tfor _, j := range s.JobSpec.JobTemplateSpecsAsJobs() {\n\t\tj.Source = s.RenderedTemplatesArchiveSpec.AsSource(j)\n\t\tjobsWithSource = append(jobsWithSource, j)\n\t}\n\treturn jobsWithSource\n}\n\nfunc (s V1ApplySpec) Packages() []models.Package {\n\tpackages := make([]models.Package, 0)\n\tfor _, value := range s.PackageSpecs {\n\t\tpackages = append(packages, value.AsPackage())\n\t}\n\treturn packages\n}\n\nfunc (s V1ApplySpec) MaxLogFileSize() string {\n\tfileSize := s.PropertiesSpec.LoggingSpec.MaxLogFileSize\n\tif len(fileSize) > 0 {\n\t\treturn fileSize\n\t}\n\treturn \"50M\"\n}\n<commit_msg>persist resource_pool, deployment name, and job index in apply spec<commit_after>package applyspec\n\nimport (\n\tmodels \"bosh\/agent\/applier\/models\"\n)\n\ntype V1ApplySpec struct {\n\tPropertiesSpec PropertiesSpec `json:\"properties\"`\n\tJobSpec JobSpec `json:\"job\"`\n\tPackageSpecs map[string]PackageSpec `json:\"packages\"`\n\tConfigurationHash string `json:\"configuration_hash\"`\n\tNetworkSpecs map[string]interface{} `json:\"networks\"`\n\tResourcePoolSpecs map[string]interface{} `json:\"resource_pool\"`\n\tDeployment string `json:\"deployment\"`\n\tIndex int `json:\"index\"`\n\n\tRenderedTemplatesArchiveSpec RenderedTemplatesArchiveSpec `json:\"rendered_templates_archive\"`\n}\n\ntype PropertiesSpec struct {\n\tLoggingSpec LoggingSpec `json:\"logging\"`\n}\n\ntype LoggingSpec struct {\n\tMaxLogFileSize string `json:\"max_log_file_size\"`\n}\n\n\/\/ BOSH Director provides a single tarball with all job templates pre-rendered\nfunc (s V1ApplySpec) Jobs() []models.Job {\n\tjobsWithSource := []models.Job{}\n\tfor _, j := range s.JobSpec.JobTemplateSpecsAsJobs() {\n\t\tj.Source = s.RenderedTemplatesArchiveSpec.AsSource(j)\n\t\tjobsWithSource = append(jobsWithSource, j)\n\t}\n\treturn jobsWithSource\n}\n\nfunc (s V1ApplySpec) Packages() []models.Package {\n\tpackages := make([]models.Package, 0)\n\tfor _, value := range s.PackageSpecs {\n\t\tpackages = append(packages, value.AsPackage())\n\t}\n\treturn packages\n}\n\nfunc (s V1ApplySpec) MaxLogFileSize() string {\n\tfileSize := s.PropertiesSpec.LoggingSpec.MaxLogFileSize\n\tif len(fileSize) > 0 {\n\t\treturn fileSize\n\t}\n\treturn \"50M\"\n}\n<|endoftext|>"} {"text":"<commit_before>package loggers\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"log\"\n)\n\ntype contextKey int\n\n\/\/ key is used to identify the value carried by context.\nconst key contextKey = 0\n\n\/\/ ErrNoLogger is the error returned by FromContext when context does not carry\n\/\/ a logger.\nvar ErrNoLogger = errors.New(\"logger not carried by context\")\n\n\/\/ NewContext returns a new context that carries logger.\nfunc NewContext(ctx context.Context, logger *log.Logger) context.Context {\n\treturn context.WithValue(ctx, key, logger)\n}\n\n\/\/ FromContext returns the logger from carried by ctx. If no logger is\n\/\/ carried, error is ErrNoLogger.\nfunc FromContext(ctx context.Context) (*log.Logger, error) {\n\tif ctx == nil {\n\t\treturn nil, ErrNoLogger\n\t}\n\n\tlogger, ok := ctx.Value(key).(*log.Logger)\n\tif !ok {\n\t\treturn nil, ErrNoLogger\n\t}\n\treturn logger, nil\n}\n<commit_msg>Fixed typo in comment.<commit_after>package loggers\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"log\"\n)\n\ntype contextKey int\n\n\/\/ key is used to identify the value carried by context.\nconst key contextKey = 0\n\n\/\/ ErrNoLogger is the error returned by FromContext when context does not carry\n\/\/ a logger.\nvar ErrNoLogger = errors.New(\"logger not carried by context\")\n\n\/\/ NewContext returns a new context that carries logger.\nfunc NewContext(ctx context.Context, logger *log.Logger) context.Context {\n\treturn context.WithValue(ctx, key, logger)\n}\n\n\/\/ FromContext returns the logger carried by ctx. If no logger is carried, error\n\/\/ is ErrNoLogger.\nfunc FromContext(ctx context.Context) (*log.Logger, error) {\n\tif ctx == nil {\n\t\treturn nil, ErrNoLogger\n\t}\n\n\tlogger, ok := ctx.Value(key).(*log.Logger)\n\tif !ok {\n\t\treturn nil, ErrNoLogger\n\t}\n\treturn logger, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package renderer\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/andreaskoch\/docs\/indexer\"\n\t\"github.com\/andreaskoch\/docs\/mapper\"\n\t\"github.com\/andreaskoch\/docs\/parser\"\n\t\"github.com\/andreaskoch\/docs\/templates\"\n\t\"os\"\n\t\"text\/template\"\n)\n\nfunc RenderRepositories(repositoryPaths []string) []*indexer.Index {\n\tnumberOfRepositories := len(repositoryPaths)\n\tindizes := make([]*indexer.Index, numberOfRepositories, numberOfRepositories)\n\n\tfor indexNumber, repositoryPath := range repositoryPaths {\n\t\tindex, err := indexer.NewIndex(repositoryPath)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Cannot create an index for folder %q. Error: %v\", repositoryPath, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tindizes[indexNumber] = renderIndex(index)\n\t}\n\n\treturn indizes\n}\n\nfunc renderIndex(index *indexer.Index) *indexer.Index {\n\n\tindex.Walk(func(item *indexer.Item) {\n\n\t\t\/\/ render the item\n\t\titem.Render(renderItem)\n\n\t\t\/\/ render the item again if it changes\n\t\titem.RegisterOnChangeCallback(\"RenderOnChange\", func(i *indexer.Item) {\n\n\t\t\tfmt.Printf(\"Item %q changed\", item)\n\t\t\ti.Render(renderItem)\n\t\t})\n\t})\n\n\treturn index\n}\n\nfunc renderItem(item *indexer.Item) *indexer.Item {\n\n\tfmt.Printf(\"Rendering item %q\\n\", item.Path)\n\n\t_, err := parser.Parse(item)\n\tif err != nil {\n\t\tfmt.Printf(\"Could not parse item \\\"%v\\\": %v\\n\", item.Path, err)\n\t\treturn item\n\t}\n\n\tfmt.Println(\"Reindexing files\")\n\titem.IndexFiles()\n\n\t\/\/ get a template\n\ttemplateText, err := templates.GetTemplate(item)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn item\n\t}\n\n\t\/\/ get a viewmodel mapper\n\tmapperFunc, err := mapper.GetMapper(item)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn item\n\t}\n\n\t\/\/ create the viewmodel\n\tviewModel := mapperFunc(item, nil)\n\n\t\/\/ render the template\n\trender(item, templateText, viewModel)\n\n\treturn item\n}\n\nfunc render(item *indexer.Item, templateText string, viewModel interface{}) (*indexer.Item, error) {\n\tfile, err := os.Create(item.RenderedPath)\n\tif err != nil {\n\t\treturn item, err\n\t}\n\n\twriter := bufio.NewWriter(file)\n\n\tdefer func() {\n\t\twriter.Flush()\n\t\tfile.Close()\n\t}()\n\n\ttemplate := template.New(item.Type)\n\ttemplate.Parse(templateText)\n\ttemplate.Execute(writer, viewModel)\n\n\treturn item, nil\n}\n<commit_msg>Bug fix: the mapper function requires a callback which makes sure all child items are parsed<commit_after>package renderer\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/andreaskoch\/docs\/indexer\"\n\t\"github.com\/andreaskoch\/docs\/mapper\"\n\t\"github.com\/andreaskoch\/docs\/parser\"\n\t\"github.com\/andreaskoch\/docs\/templates\"\n\t\"os\"\n\t\"text\/template\"\n)\n\nfunc RenderRepositories(repositoryPaths []string) []*indexer.Index {\n\tnumberOfRepositories := len(repositoryPaths)\n\tindizes := make([]*indexer.Index, numberOfRepositories, numberOfRepositories)\n\n\tfor indexNumber, repositoryPath := range repositoryPaths {\n\t\tindex, err := indexer.NewIndex(repositoryPath)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Cannot create an index for folder %q. Error: %v\", repositoryPath, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tindizes[indexNumber] = renderIndex(index)\n\t}\n\n\treturn indizes\n}\n\nfunc renderIndex(index *indexer.Index) *indexer.Index {\n\n\tindex.Walk(func(item *indexer.Item) {\n\n\t\t\/\/ render the item\n\t\titem.Render(renderItem)\n\n\t\t\/\/ render the item again if it changes\n\t\titem.RegisterOnChangeCallback(\"RenderOnChange\", func(i *indexer.Item) {\n\n\t\t\tfmt.Printf(\"Item %q changed\", item)\n\t\t\ti.Render(renderItem)\n\t\t})\n\t})\n\n\treturn index\n}\n\nfunc renderItem(item *indexer.Item) *indexer.Item {\n\n\tfmt.Printf(\"Rendering item %q\\n\", item.Path)\n\n\t_, err := parser.Parse(item)\n\tif err != nil {\n\t\tfmt.Printf(\"Could not parse item \\\"%v\\\": %v\\n\", item.Path, err)\n\t\treturn item\n\t}\n\n\tfmt.Println(\"Reindexing files\")\n\titem.IndexFiles()\n\n\t\/\/ get a template\n\ttemplateText, err := templates.GetTemplate(item)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn item\n\t}\n\n\t\/\/ get a viewmodel mapper\n\tmapperFunc, err := mapper.GetMapper(item)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn item\n\t}\n\n\t\/\/ create the viewmodel\n\tviewModel := mapperFunc(item, func(i *indexer.Item) {\n\t\tparser.Parse(i)\n\t})\n\n\t\/\/ render the template\n\trender(item, templateText, viewModel)\n\n\treturn item\n}\n\nfunc render(item *indexer.Item, templateText string, viewModel interface{}) (*indexer.Item, error) {\n\tfile, err := os.Create(item.RenderedPath)\n\tif err != nil {\n\t\treturn item, err\n\t}\n\n\twriter := bufio.NewWriter(file)\n\n\tdefer func() {\n\t\twriter.Flush()\n\t\tfile.Close()\n\t}()\n\n\ttemplate := template.New(item.Type)\n\ttemplate.Parse(templateText)\n\ttemplate.Execute(writer, viewModel)\n\n\treturn item, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package isolated\n\nimport (\n\t\"fmt\"\n\n\t\"code.cloudfoundry.org\/cli\/integration\/helpers\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gbytes\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n)\n\nvar _ = Describe(\"reset-space-isolation-segment command\", func() {\n\tvar organizationName string\n\tvar spaceName string\n\n\tBeforeEach(func() {\n\t\torganizationName = helpers.NewOrgName()\n\t\tspaceName = helpers.NewSpaceName()\n\t})\n\n\tDescribe(\"help\", func() {\n\t\tContext(\"when --help flag is set\", func() {\n\t\t\tIt(\"Displays command usage to output\", func() {\n\t\t\t\tsession := helpers.CF(\"reset-space-isolation-segment\", \"--help\")\n\t\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\t\t\t\tEventually(session).Should(Say(\"reset-space-isolation-segment - Reset the space's isolation segment to the org default\"))\n\t\t\t\tEventually(session).Should(Say(\"USAGE:\"))\n\t\t\t\tEventually(session).Should(Say(\"cf reset-space-isolation-segment SPACE_NAME\"))\n\t\t\t\tEventually(session).Should(Say(\"SEE ALSO:\"))\n\t\t\t\tEventually(session).Should(Say(\"org, restart, space\"))\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when the environment is not setup correctly\", func() {\n\t\tContext(\"when no API endpoint is set\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.UnsetAPI()\n\t\t\t})\n\n\t\t\tIt(\"fails with no API endpoint set message\", func() {\n\t\t\t\tsession := helpers.CF(\"reset-space-isolation-segment\", spaceName)\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"No API endpoint set. Use 'cf login' or 'cf api' to target an endpoint.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when not logged in\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LogoutCF()\n\t\t\t})\n\n\t\t\tIt(\"fails with not logged in message\", func() {\n\t\t\t\tsession := helpers.CF(\"reset-space-isolation-segment\", spaceName)\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"Not logged in. Use 'cf login' to log in.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when there is no org set\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LoginCF()\n\t\t\t})\n\n\t\t\tIt(\"fails with no targeted org error message\", func() {\n\t\t\t\tsession := helpers.CF(\"reset-space-isolation-segment\", spaceName)\n\t\t\t\tEventually(session.Out).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"No org targeted, use 'cf target -o ORG' to target an org.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when the environment is set up correctly\", func() {\n\t\tvar userName string\n\n\t\tBeforeEach(func() {\n\t\t\thelpers.LoginCF()\n\t\t\tuserName, _ = helpers.GetCredentials()\n\t\t\thelpers.CreateOrg(organizationName)\n\t\t\thelpers.TargetOrg(organizationName)\n\t\t})\n\n\t\tContext(\"when the space does not exist\", func() {\n\t\t\tIt(\"fails with space not found message\", func() {\n\t\t\t\tsession := helpers.CF(\"reset-space-isolation-segment\", spaceName)\n\t\t\t\tEventually(session).Should(Say(\"Resetting isolation segment assignment of space %s in org %s as %s...\", spaceName, organizationName, userName))\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"Space '%s' not found.\", spaceName))\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the space exists\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.CreateSpace(spaceName)\n\t\t\t\tisolationSegmentName := helpers.NewIsolationSegmentName()\n\t\t\t\tEventually(helpers.CF(\"create-isolation-segment\", isolationSegmentName)).Should(Exit(0))\n\t\t\t\tEventually(helpers.CF(\"enable-org-isolation\", organizationName, isolationSegmentName)).Should(Exit(0))\n\t\t\t\tEventually(helpers.CF(\"set-space-isolation-segment\", spaceName, isolationSegmentName)).Should(Exit(0))\n\t\t\t})\n\n\t\t\tContext(\"when there is no default org isolation segment\", func() {\n\t\t\t\tIt(\"resets the space isolation segment to the shared isolation segment\", func() {\n\t\t\t\t\tsession := helpers.CF(\"reset-space-isolation-segment\", spaceName)\n\t\t\t\t\tEventually(session).Should(Say(\"Resetting isolation segment assignment of space %s in org %s as %s...\", spaceName, organizationName, userName))\n\n\t\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\t\tEventually(session).Should(Say(\"Applications in this space will be placed in the platform default isolation segment.\"))\n\t\t\t\t\tEventually(session).Should(Say(\"Running applications need a restart to be moved there.\"))\n\t\t\t\t\tEventually(session).Should(Exit(0))\n\n\t\t\t\t\tEventually(helpers.CF(\"space\", spaceName)).Should(Say(\"(?m)isolation segment:\\\\s*$\"))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when there is a default org isolation segment\", func() {\n\t\t\t\tvar orgGUID string\n\t\t\t\tvar orgIsolationSegmentName string\n\t\t\t\tvar orgIsolationSegmentGUID string\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\torgIsolationSegmentName = helpers.NewIsolationSegmentName()\n\t\t\t\t\tEventually(helpers.CF(\"create-isolation-segment\", orgIsolationSegmentName)).Should(Exit(0))\n\t\t\t\t\tEventually(helpers.CF(\"enable-org-isolation\", organizationName, orgIsolationSegmentName)).Should(Exit(0))\n\t\t\t\t\torgIsolationSegmentGUID = helpers.GetIsolationSegmentGUID(orgIsolationSegmentName)\n\t\t\t\t\torgGUID = helpers.GetOrgGUID(organizationName)\n\n\t\t\t\t\tEventually(helpers.CF(\"curl\", \"-X\", \"PATCH\",\n\t\t\t\t\t\tfmt.Sprintf(\"\/v3\/organizations\/%s\/relationships\/default_isolation_segment\", orgGUID),\n\t\t\t\t\t\t\"-d\", fmt.Sprintf(`{\"data\":{\"guid\":\"%s\"}`, orgIsolationSegmentGUID))).Should(Exit(0))\n\t\t\t\t})\n\n\t\t\t\tIt(\"resets the space isolation segment to the default org isolation segment\", func() {\n\t\t\t\t\tsession := helpers.CF(\"reset-space-isolation-segment\", spaceName)\n\t\t\t\t\tEventually(session).Should(Say(\"Resetting isolation segment assignment of space %s in org %s as %s...\", spaceName, organizationName, userName))\n\n\t\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\t\tEventually(session).Should(Say(\"Applications in this space will be placed in isolation segment %s.\", orgIsolationSegmentName))\n\t\t\t\t\tEventually(session).Should(Say(\"Running applications need a restart to be moved there.\"))\n\t\t\t\t\tEventually(session).Should(Exit(0))\n\n\t\t\t\t\tEventually(helpers.CF(\"space\", spaceName)).Should(Say(\"isolation segment:\\\\s+%s\", orgIsolationSegmentName))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<commit_msg>wait for session to finish before exiting tests<commit_after>package isolated\n\nimport (\n\t\"code.cloudfoundry.org\/cli\/integration\/helpers\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gbytes\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n)\n\nvar _ = Describe(\"reset-space-isolation-segment command\", func() {\n\tvar organizationName string\n\tvar spaceName string\n\n\tBeforeEach(func() {\n\t\torganizationName = helpers.NewOrgName()\n\t\tspaceName = helpers.NewSpaceName()\n\t})\n\n\tDescribe(\"help\", func() {\n\t\tContext(\"when --help flag is set\", func() {\n\t\t\tIt(\"Displays command usage to output\", func() {\n\t\t\t\tsession := helpers.CF(\"reset-space-isolation-segment\", \"--help\")\n\t\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\t\t\t\tEventually(session).Should(Say(\"reset-space-isolation-segment - Reset the space's isolation segment to the org default\"))\n\t\t\t\tEventually(session).Should(Say(\"USAGE:\"))\n\t\t\t\tEventually(session).Should(Say(\"cf reset-space-isolation-segment SPACE_NAME\"))\n\t\t\t\tEventually(session).Should(Say(\"SEE ALSO:\"))\n\t\t\t\tEventually(session).Should(Say(\"org, restart, space\"))\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when the environment is not setup correctly\", func() {\n\t\tContext(\"when no API endpoint is set\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.UnsetAPI()\n\t\t\t})\n\n\t\t\tIt(\"fails with no API endpoint set message\", func() {\n\t\t\t\tsession := helpers.CF(\"reset-space-isolation-segment\", spaceName)\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"No API endpoint set. Use 'cf login' or 'cf api' to target an endpoint.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when not logged in\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LogoutCF()\n\t\t\t})\n\n\t\t\tIt(\"fails with not logged in message\", func() {\n\t\t\t\tsession := helpers.CF(\"reset-space-isolation-segment\", spaceName)\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"Not logged in. Use 'cf login' to log in.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when there is no org set\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LoginCF()\n\t\t\t})\n\n\t\t\tIt(\"fails with no targeted org error message\", func() {\n\t\t\t\tsession := helpers.CF(\"reset-space-isolation-segment\", spaceName)\n\t\t\t\tEventually(session.Out).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"No org targeted, use 'cf target -o ORG' to target an org.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when the environment is set up correctly\", func() {\n\t\tvar userName string\n\n\t\tBeforeEach(func() {\n\t\t\thelpers.LoginCF()\n\t\t\tuserName, _ = helpers.GetCredentials()\n\t\t\thelpers.CreateOrg(organizationName)\n\t\t\thelpers.TargetOrg(organizationName)\n\t\t})\n\n\t\tContext(\"when the space does not exist\", func() {\n\t\t\tIt(\"fails with space not found message\", func() {\n\t\t\t\tsession := helpers.CF(\"reset-space-isolation-segment\", spaceName)\n\t\t\t\tEventually(session).Should(Say(\"Resetting isolation segment assignment of space %s in org %s as %s...\", spaceName, organizationName, userName))\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"Space '%s' not found.\", spaceName))\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the space exists\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.CreateSpace(spaceName)\n\t\t\t\tisolationSegmentName := helpers.NewIsolationSegmentName()\n\t\t\t\tEventually(helpers.CF(\"create-isolation-segment\", isolationSegmentName)).Should(Exit(0))\n\t\t\t\tEventually(helpers.CF(\"enable-org-isolation\", organizationName, isolationSegmentName)).Should(Exit(0))\n\t\t\t\tEventually(helpers.CF(\"set-space-isolation-segment\", spaceName, isolationSegmentName)).Should(Exit(0))\n\t\t\t})\n\n\t\t\tContext(\"when there is no default org isolation segment\", func() {\n\t\t\t\tIt(\"resets the space isolation segment to the shared isolation segment\", func() {\n\t\t\t\t\tsession := helpers.CF(\"reset-space-isolation-segment\", spaceName)\n\t\t\t\t\tEventually(session).Should(Say(\"Resetting isolation segment assignment of space %s in org %s as %s...\", spaceName, organizationName, userName))\n\n\t\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\t\tEventually(session).Should(Say(\"Applications in this space will be placed in the platform default isolation segment.\"))\n\t\t\t\t\tEventually(session).Should(Say(\"Running applications need a restart to be moved there.\"))\n\t\t\t\t\tEventually(session).Should(Exit(0))\n\n\t\t\t\t\tsession = helpers.CF(\"space\", spaceName)\n\t\t\t\t\tEventually(session).Should(Say(\"(?m)isolation segment:\\\\s*$\"))\n\t\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when there is a default org isolation segment\", func() {\n\t\t\t\tvar orgIsolationSegmentName string\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\torgIsolationSegmentName = helpers.NewIsolationSegmentName()\n\t\t\t\t\tEventually(helpers.CF(\"create-isolation-segment\", orgIsolationSegmentName)).Should(Exit(0))\n\t\t\t\t\tEventually(helpers.CF(\"enable-org-isolation\", organizationName, orgIsolationSegmentName)).Should(Exit(0))\n\t\t\t\t\tEventually(helpers.CF(\"set-org-default-isolation-segment\", organizationName, orgIsolationSegmentName)).Should(Exit(0))\n\t\t\t\t})\n\n\t\t\t\tIt(\"resets the space isolation segment to the default org isolation segment\", func() {\n\t\t\t\t\tsession := helpers.CF(\"reset-space-isolation-segment\", spaceName)\n\t\t\t\t\tEventually(session).Should(Say(\"Resetting isolation segment assignment of space %s in org %s as %s...\", spaceName, organizationName, userName))\n\n\t\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\t\tEventually(session).Should(Say(\"Applications in this space will be placed in isolation segment %s.\", orgIsolationSegmentName))\n\t\t\t\t\tEventually(session).Should(Say(\"Running applications need a restart to be moved there.\"))\n\t\t\t\t\tEventually(session).Should(Exit(0))\n\n\t\t\t\t\tsession = helpers.CF(\"space\", spaceName)\n\t\t\t\t\tEventually(session).Should(Say(\"isolation segment:\\\\s+%s\", orgIsolationSegmentName))\n\t\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"gopkg.in\/resty.v0\"\n)\n\nfunc registerPermissions(app cli.App) *cli.App {\n\tapp.Commands = append(app.Commands,\n\t\t[]cli.Command{\n\t\t\t\/\/ permissions\n\t\t\t{\n\t\t\t\tName: \"permissions\",\n\t\t\t\tUsage: \"SUBCOMMANDS for permissions\",\n\t\t\t\tSubcommands: []cli.Command{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"type\",\n\t\t\t\t\t\tUsage: \"SUBCOMMANDS for permission types\",\n\t\t\t\t\t\tSubcommands: []cli.Command{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"add\",\n\t\t\t\t\t\t\t\tUsage: \"Register a new permission type\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionTypeAdd),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"remove\",\n\t\t\t\t\t\t\t\tUsage: \"Remove an existing permission type\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionTypeDel),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\/*\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"rename\",\n\t\t\t\t\t\t\t\t\tUsage: \"Rename an existing permission type\",\n\t\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionTypeRename),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t*\/\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"list\",\n\t\t\t\t\t\t\t\tUsage: \"List all permission types\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionTypeList),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"show\",\n\t\t\t\t\t\t\t\tUsage: \"Show details for a permission type\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionTypeShow),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, \/\/ end permissions type\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"add\",\n\t\t\t\t\t\tUsage: \"Register a new permission\",\n\t\t\t\t\t\tAction: runtime(cmdPermissionAdd),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"remove\",\n\t\t\t\t\t\tUsage: \"Remove a permission\",\n\t\t\t\t\t\tAction: runtime(cmdPermissionDel),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"list\",\n\t\t\t\t\t\tUsage: \"List all permissions\",\n\t\t\t\t\t\tAction: runtime(cmdPermissionList),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"show\",\n\t\t\t\t\t\tUsage: \"SUBCOMMANDS for permission show\",\n\t\t\t\t\t\tSubcommands: []cli.Command{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"user\",\n\t\t\t\t\t\t\t\tUsage: \"Show permissions of a user\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionShowUser),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"team\",\n\t\t\t\t\t\t\t\tUsage: \"Show permissions of a team\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionShowTeam),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"tool\",\n\t\t\t\t\t\t\t\tUsage: \"Show permissions of a tool account\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionShowTool),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"permission\",\n\t\t\t\t\t\t\t\tUsage: \"Show details about a permission\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionShowPermission),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}, \/\/ end permissions show\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"audit\",\n\t\t\t\t\t\tUsage: \"Show all limited permissions associated with a repository\",\n\t\t\t\t\t\tAction: runtime(cmdPermissionAudit),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"grant\",\n\t\t\t\t\t\tUsage: \"SUBCOMMANDS for permission grant\",\n\t\t\t\t\t\tSubcommands: []cli.Command{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"enable\",\n\t\t\t\t\t\t\t\tUsage: \"Enable a useraccount to receive GRANT permissions\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionGrantEnable),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"global\",\n\t\t\t\t\t\t\t\tUsage: \"Grant a global permission\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionGrantGlobal),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"limited\",\n\t\t\t\t\t\t\t\tUsage: \"Grant a limited permission\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionGrantLimited),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"system\",\n\t\t\t\t\t\t\t\tUsage: \"Grant a system permission\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionGrantSystem),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}, \/\/ end permissions grant\n\t\t\t\t},\n\t\t\t}, \/\/ end permissions\n\t\t}...,\n\t)\n\treturn &app\n}\n\nfunc cmdPermissionTypeAdd(c *cli.Context) error {\n\tutl.ValidateCliArgumentCount(c, 1)\n\tpermissionType := c.Args().First()\n\n\tvar req proto.Request\n\treq.Permission = &proto.Permission{}\n\treq.Permission.Category = permissionType\n\n\tresp := utl.PostRequestWithBody(Client, req, \"\/permission\/types\/\")\n\tfmt.Println(resp)\n\treturn nil\n}\n\nfunc cmdPermissionTypeDel(c *cli.Context) error {\n\turl := Cfg.Run.SomaAPI\n\n\tutl.ValidateCliArgumentCount(c, 1)\n\tpermissionType := c.Args().First()\n\turl.Path = fmt.Sprintf(\"\/permissions\/types\/%s\", permissionType)\n\tlog.Printf(\"Command: delete permission type [%s]\", permissionType)\n\n\t_, err := resty.New().\n\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\tR().\n\t\tDelete(url.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn nil\n}\n\nfunc cmdPermissionTypeRename(c *cli.Context) error {\n\turl := Cfg.Run.SomaAPI\n\n\tutl.ValidateCliArgumentCount(c, 3)\n\tutl.ValidateCliArgument(c, 2, \"to\") \/\/ starts args counting at 1\n\tpermissionType := c.Args().Get(0)\n\tnewPermissionType := c.Args().Get(2)\n\turl.Path = fmt.Sprintf(\"\/permissions\/types\/%s\", permissionType)\n\n\tvar req proto.Request\n\treq.Permission = &proto.Permission{}\n\treq.Permission.Category = newPermissionType\n\n\t_, err := resty.New().\n\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\tR().\n\t\tSetBody(req).\n\t\tPatch(url.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn nil\n}\n\nfunc cmdPermissionTypeList(c *cli.Context) error {\n\turl := Cfg.Run.SomaAPI\n\turl.Path = \"\/permissions\/types\"\n\n\tutl.ValidateCliArgumentCount(c, 0)\n\n\t_, err := resty.New().\n\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\tR().\n\t\tGet(url.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ TODO display list result\n\treturn nil\n}\n\nfunc cmdPermissionTypeShow(c *cli.Context) error {\n\turl := Cfg.Run.SomaAPI\n\n\tutl.ValidateCliArgumentCount(c, 1)\n\tpermissionType := c.Args().Get(0)\n\turl.Path = fmt.Sprintf(\"\/permissions\/types\/%s\", permissionType)\n\n\t_, err := resty.New().\n\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\tR().\n\t\tGet(url.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ TODO display show result\n\treturn nil\n}\n\nfunc cmdPermissionAdd(c *cli.Context) error {\n\turl := Cfg.Run.SomaAPI\n\turl.Path = \"\/permissions\"\n\n\tutl.ValidateCliArgumentCount(c, 3)\n\tutl.ValidateCliArgument(c, 2, \"type\")\n\tpermission := c.Args().Get(0)\n\tpermissionType := c.Args().Get(2)\n\n\tvar req proto.Request\n\treq.Permission = &proto.Permission{}\n\treq.Permission.Name = permission\n\treq.Permission.Category = permissionType\n\n\t_, err := resty.New().\n\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\tR().\n\t\tSetBody(req).\n\t\tPost(url.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn nil\n}\n\nfunc cmdPermissionDel(c *cli.Context) error {\n\turl := Cfg.Run.SomaAPI\n\n\tutl.ValidateCliArgumentCount(c, 1)\n\tpermission := c.Args().Get(0)\n\turl.Path = fmt.Sprintf(\"\/permissions\/%s\", permission)\n\n\t_, err := resty.New().\n\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\tR().\n\t\tDelete(url.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn nil\n}\n\nfunc cmdPermissionList(c *cli.Context) error {\n\turl := Cfg.Run.SomaAPI\n\turl.Path = \"\/permissions\"\n\n\tutl.ValidateCliArgumentCount(c, 0)\n\n\t_, err := resty.New().\n\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\tR().\n\t\tGet(url.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ TODO list permissions\n\treturn nil\n}\n\nfunc cmdPermissionShowGeneric(c *cli.Context, objType string) {\n\turl := Cfg.Run.SomaAPI\n\tvar (\n\t\tobjName string\n\t\trepo string\n\t\thasRepo bool\n\t)\n\n\tswitch utl.GetCliArgumentCount(c) {\n\tcase 1:\n\t\thasRepo = false\n\tcase 3:\n\t\tutl.ValidateCliArgument(c, 2, \"repository\")\n\t\thasRepo = true\n\tdefault:\n\t\tlog.Fatal(\"Syntax error, unexpected argument count\")\n\t}\n\n\tobjName = c.Args().Get(0)\n\tif hasRepo {\n\t\trepo = c.Args().Get(2)\n\t\turl.Path = fmt.Sprintf(\"\/permissions\/%s\/%s\/repository\/%s\",\n\t\t\tobjType, objName, repo)\n\t} else {\n\t\turl.Path = fmt.Sprintf(\"\/permissions\/%s\/%s\", objType, objName)\n\t}\n\n\t_, err := resty.New().\n\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\tR().\n\t\tGet(url.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ TODO list permissions\n}\n\nfunc cmdPermissionShowUser(c *cli.Context) error {\n\tcmdPermissionShowGeneric(c, \"user\")\n\treturn nil\n}\n\nfunc cmdPermissionShowTeam(c *cli.Context) error {\n\tcmdPermissionShowGeneric(c, \"team\")\n\treturn nil\n}\n\nfunc cmdPermissionShowTool(c *cli.Context) error {\n\tcmdPermissionShowGeneric(c, \"tool\")\n\treturn nil\n}\n\nfunc cmdPermissionShowPermission(c *cli.Context) error {\n\turl := Cfg.Run.SomaAPI\n\n\tutl.ValidateCliArgumentCount(c, 1)\n\turl.Path = fmt.Sprintf(\"\/permissions\/permission\/%s\", c.Args().Get(0))\n\n\t_, err := resty.New().\n\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\tR().\n\t\tGet(url.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ TODO list users\n\treturn nil\n}\n\nfunc cmdPermissionAudit(c *cli.Context) error {\n\turl := Cfg.Run.SomaAPI\n\n\tutl.ValidateCliArgumentCount(c, 1)\n\turl.Path = fmt.Sprintf(\"\/permissions\/repository\/%s\", c.Args().Get(0))\n\n\t_, err := resty.New().\n\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\tR().\n\t\tGet(url.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ TODO list permissions\n\treturn nil\n}\n\nfunc cmdPermissionGrantEnable(c *cli.Context) error {\n\tutl.ValidateCliArgumentCount(c, 1)\n\tuserId := utl.TryGetUserByUUIDOrName(Client, c.Args().First())\n\tpath := fmt.Sprintf(\"\/permissions\/user\/%s\", userId)\n\n\tresp := utl.PatchRequestWithBody(Client, proto.Request{\n\t\tGrant: &proto.Grant{\n\t\t\tRecipientType: \"user\",\n\t\t\tRecipientId: userId,\n\t\t\tPermission: \"global_grant_limited\",\n\t\t},\n\t}, path,\n\t)\n\tfmt.Println(resp)\n\treturn nil\n}\n\nfunc cmdPermissionGrantGlobal(c *cli.Context) error {\n\t\/*\n\t\turl := getApiUrl()\n\t\tvar objType string\n\n\t\tutl.ValidateCliArgumentCount(c, 3)\n\t\tswitch c.Args().Get(1) {\n\t\tcase \"user\":\n\t\t\tobjType = \"user\"\n\t\tcase \"team\":\n\t\t\tobjType = \"team\"\n\t\tcase \"tool\":\n\t\t\tobjType = \"tool\"\n\t\tdefault:\n\t\t\tSlog.Fatal(\"Syntax error\")\n\t\t}\n\t\turl.Path = fmt.Sprintf(\"\/permissions\/%s\/%s\", objType, c.Args().Get(2))\n\n\t\tvar req somaproto.ProtoRequestPermission\n\t\treq.Permission = c.Args().Get(0)\n\n\t\t_, err := resty.New().\n\t\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\t\tR().\n\t\t\tSetBody(req).\n\t\t\tPatch(url.String())\n\t\tif err != nil {\n\t\t\tSlog.Fatal(err)\n\t\t}\n\t*\/\n\treturn nil\n}\n\nfunc cmdPermissionGrantLimited(c *cli.Context) error {\n\t\/*\n\t\turl := getApiUrl()\n\t\tkeys := [...]string{\"repository\", \"bucket\", \"group\", \"cluster\"}\n\t\tkeySlice := make([]string, 0)\n\t\tvar objType string\n\t\tvar req somaproto.ProtoRequestPermission\n\t\treq.Grant.GrantType = \"limited\"\n\n\t\t\/\/ the keys array is ordered towars increasing detail, ie. there can\n\t\t\/\/ not be a limited grant on a cluster without specifying the\n\t\t\/\/ repository\n\t\t\/\/ Also, slicing uses halfopen interval [a:b), ie `a` is part of the\n\t\t\/\/ resulting slice, but `b` is not\n\t\tswitch utl.GetCliArgumentCount(c) {\n\t\tcase 5:\n\t\t\tkeySlice = keys[0:1]\n\t\tcase 7:\n\t\t\tkeySlice = keys[0:2]\n\t\tcase 9:\n\t\t\tkeySlice = keys[0:3]\n\t\tcase 11:\n\t\t\tkeySlice = keys[:]\n\t\tdefault:\n\t\t\tSlog.Fatal(\"Syntax error, unexpected argument count\")\n\t\t}\n\t\t\/\/ Tail() skips the first argument 0, which is returned by First(),\n\t\t\/\/ thus contains arguments 1-n. The first 3 arguments 0-2 are fixed in\n\t\t\/\/ order, the variable parts are arguments 4+ (argv 3-10) -- element\n\t\t\/\/ 2-9 in the tail slice.\n\t\targSlice := c.Args().Tail()[2:]\n\t\toptions := *parseLimitedGrantArguments(keySlice, argSlice)\n\n\t\tswitch c.Args().Get(1) {\n\t\tcase \"user\":\n\t\t\tobjType = \"user\"\n\t\tcase \"team\":\n\t\t\tobjType = \"team\"\n\t\tcase \"tool\":\n\t\t\tobjType = \"tool\"\n\t\tdefault:\n\t\t\tSlog.Fatal(\"Syntax error\")\n\t\t}\n\t\treq.Permission = c.Args().Get(0)\n\t\turl.Path = fmt.Sprintf(\"\/permissions\/%s\/%s\", objType, c.Args().Get(2))\n\n\t\tfor k, v := range options {\n\t\t\tswitch k {\n\t\t\tcase \"repository\":\n\t\t\t\treq.Grant.Repository = v\n\t\t\tcase \"bucket\":\n\t\t\t\treq.Grant.Bucket = v\n\t\t\tcase \"group\":\n\t\t\t\treq.Grant.Group = v\n\t\t\tcase \"cluster\":\n\t\t\t\treq.Grant.Cluster = v\n\t\t\t}\n\t\t}\n\n\t\t_, err := resty.New().\n\t\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\t\tR().\n\t\t\tSetBody(req).\n\t\t\tPatch(url.String())\n\t\tif err != nil {\n\t\t\tSlog.Fatal(err)\n\t\t}\n\t*\/\n\treturn nil\n}\n\nfunc cmdPermissionGrantSystem(c *cli.Context) error {\n\treturn nil\n}\n\n\/\/ vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix\n<commit_msg>Update permissions command<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"gopkg.in\/resty.v0\"\n)\n\nfunc registerPermissions(app cli.App) *cli.App {\n\tapp.Commands = append(app.Commands,\n\t\t[]cli.Command{\n\t\t\t\/\/ permissions\n\t\t\t{\n\t\t\t\tName: \"permissions\",\n\t\t\t\tUsage: \"SUBCOMMANDS for permissions\",\n\t\t\t\tSubcommands: []cli.Command{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"category\",\n\t\t\t\t\t\tUsage: \"SUBCOMMANDS for permission categories\",\n\t\t\t\t\t\tSubcommands: []cli.Command{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"add\",\n\t\t\t\t\t\t\t\tUsage: \"Register a new permission category\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionCategoryAdd),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"remove\",\n\t\t\t\t\t\t\t\tUsage: \"Remove an existing permission category\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionCategoryDel),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"list\",\n\t\t\t\t\t\t\t\tUsage: \"List all permission categories\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionCategoryList),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"show\",\n\t\t\t\t\t\t\t\tUsage: \"Show details for a permission category\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionCategoryShow),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, \/\/ end permissions type\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"add\",\n\t\t\t\t\t\tUsage: \"Register a new permission\",\n\t\t\t\t\t\tAction: runtime(cmdPermissionAdd),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"remove\",\n\t\t\t\t\t\tUsage: \"Remove a permission\",\n\t\t\t\t\t\tAction: runtime(cmdPermissionDel),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"list\",\n\t\t\t\t\t\tUsage: \"List all permissions\",\n\t\t\t\t\t\tAction: runtime(cmdPermissionList),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"show\",\n\t\t\t\t\t\tUsage: \"SUBCOMMANDS for permission show\",\n\t\t\t\t\t\tSubcommands: []cli.Command{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"user\",\n\t\t\t\t\t\t\t\tUsage: \"Show permissions of a user\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionShowUser),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"team\",\n\t\t\t\t\t\t\t\tUsage: \"Show permissions of a team\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionShowTeam),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"tool\",\n\t\t\t\t\t\t\t\tUsage: \"Show permissions of a tool account\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionShowTool),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"permission\",\n\t\t\t\t\t\t\t\tUsage: \"Show details about a permission\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionShowPermission),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}, \/\/ end permissions show\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"audit\",\n\t\t\t\t\t\tUsage: \"Show all limited permissions associated with a repository\",\n\t\t\t\t\t\tAction: runtime(cmdPermissionAudit),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"grant\",\n\t\t\t\t\t\tUsage: \"SUBCOMMANDS for permission grant\",\n\t\t\t\t\t\tSubcommands: []cli.Command{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"enable\",\n\t\t\t\t\t\t\t\tUsage: \"Enable a useraccount to receive GRANT permissions\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionGrantEnable),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"global\",\n\t\t\t\t\t\t\t\tUsage: \"Grant a global permission\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionGrantGlobal),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"limited\",\n\t\t\t\t\t\t\t\tUsage: \"Grant a limited permission\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionGrantLimited),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"system\",\n\t\t\t\t\t\t\t\tUsage: \"Grant a system permission\",\n\t\t\t\t\t\t\t\tAction: runtime(cmdPermissionGrantSystem),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}, \/\/ end permissions grant\n\t\t\t\t},\n\t\t\t}, \/\/ end permissions\n\t\t}...,\n\t)\n\treturn &app\n}\n\nfunc cmdPermissionCategoryAdd(c *cli.Context) error {\n\tutl.ValidateCliArgumentCount(c, 1)\n\n\treq := proto.NewCategoryRequest()\n\treq.Category.Name = c.Args().First()\n\n\tresp := utl.PostRequestWithBody(Client, req, `\/category\/`)\n\tfmt.Println(resp)\n\treturn nil\n}\n\nfunc cmdPermissionCategoryDel(c *cli.Context) error {\n\tutl.ValidateCliArgumentCount(c, 1)\n\n\tpath := fmt.Sprintf(\"\/category\/%s\", c.Args().First())\n\n\tresp := utl.DeleteRequest(Client, path)\n\tfmt.Println(resp)\n\treturn nil\n}\n\nfunc cmdPermissionCategoryList(c *cli.Context) error {\n\tutl.ValidateCliArgumentCount(c, 0)\n\n\tresp := utl.GetRequest(Client, `\/category\/`)\n\tfmt.Println(resp)\n\treturn nil\n}\n\nfunc cmdPermissionCategoryShow(c *cli.Context) error {\n\tutl.ValidateCliArgumentCount(c, 1)\n\n\tpath := fmt.Sprintf(\"\/category\/%s\", c.Args().First())\n\n\tresp := utl.GetRequest(Client, path)\n\tfmt.Println(resp)\n\treturn nil\n}\n\nfunc cmdPermissionAdd(c *cli.Context) error {\n\tutl.ValidateCliMinArgumentCount(c, 3)\n\tmultiple := []string{}\n\tunique := []string{`category`, `grants`}\n\trequired := []string{`category`}\n\n\topts := utl.ParseVariadicArguments(\n\t\tmultiple,\n\t\tunique,\n\t\trequired,\n\t\tc.Args().Tail())\n\n\treq := proto.NewPermissionRequest()\n\treq.Permission.Name = c.Args().First()\n\treq.Permission.Category = opts[`category`][0]\n\tif sl, ok := opts[`grants`]; ok && len(sl) > 0 {\n\t\treq.Permission.Grants = opts[`grants`][0]\n\t}\n\n\tresp := utl.PostRequestWithBody(Client, req, `\/permission\/`)\n\tfmt.Println(resp)\n\treturn nil\n}\n\nfunc cmdPermissionDel(c *cli.Context) error {\n\tutl.ValidateCliArgumentCount(c, 1)\n\n\tpath := fmt.Sprintf(\"\/permission\/%s\", c.Args().First())\n\n\tresp := utl.DeleteRequest(Client, path)\n\tfmt.Println(resp)\n\treturn nil\n}\n\nfunc cmdPermissionList(c *cli.Context) error {\n\tutl.ValidateCliArgumentCount(c, 0)\n\n\tresp := utl.GetRequest(Client, `\/permission\/`)\n\tfmt.Println(resp)\n\treturn nil\n}\n\nfunc cmdPermissionShowGeneric(c *cli.Context, objType string) {\n\turl := Cfg.Run.SomaAPI\n\tvar (\n\t\tobjName string\n\t\trepo string\n\t\thasRepo bool\n\t)\n\n\tswitch utl.GetCliArgumentCount(c) {\n\tcase 1:\n\t\thasRepo = false\n\tcase 3:\n\t\tutl.ValidateCliArgument(c, 2, \"repository\")\n\t\thasRepo = true\n\tdefault:\n\t\tlog.Fatal(\"Syntax error, unexpected argument count\")\n\t}\n\n\tobjName = c.Args().Get(0)\n\tif hasRepo {\n\t\trepo = c.Args().Get(2)\n\t\turl.Path = fmt.Sprintf(\"\/permissions\/%s\/%s\/repository\/%s\",\n\t\t\tobjType, objName, repo)\n\t} else {\n\t\turl.Path = fmt.Sprintf(\"\/permissions\/%s\/%s\", objType, objName)\n\t}\n\n\t_, err := resty.New().\n\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\tR().\n\t\tGet(url.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ TODO list permissions\n}\n\nfunc cmdPermissionShowUser(c *cli.Context) error {\n\tcmdPermissionShowGeneric(c, \"user\")\n\treturn nil\n}\n\nfunc cmdPermissionShowTeam(c *cli.Context) error {\n\tcmdPermissionShowGeneric(c, \"team\")\n\treturn nil\n}\n\nfunc cmdPermissionShowTool(c *cli.Context) error {\n\tcmdPermissionShowGeneric(c, \"tool\")\n\treturn nil\n}\n\nfunc cmdPermissionShowPermission(c *cli.Context) error {\n\turl := Cfg.Run.SomaAPI\n\n\tutl.ValidateCliArgumentCount(c, 1)\n\turl.Path = fmt.Sprintf(\"\/permissions\/permission\/%s\", c.Args().Get(0))\n\n\t_, err := resty.New().\n\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\tR().\n\t\tGet(url.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ TODO list users\n\treturn nil\n}\n\nfunc cmdPermissionAudit(c *cli.Context) error {\n\turl := Cfg.Run.SomaAPI\n\n\tutl.ValidateCliArgumentCount(c, 1)\n\turl.Path = fmt.Sprintf(\"\/permissions\/repository\/%s\", c.Args().Get(0))\n\n\t_, err := resty.New().\n\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\tR().\n\t\tGet(url.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ TODO list permissions\n\treturn nil\n}\n\nfunc cmdPermissionGrantEnable(c *cli.Context) error {\n\tutl.ValidateCliArgumentCount(c, 1)\n\tuserId := utl.TryGetUserByUUIDOrName(Client, c.Args().First())\n\tpath := fmt.Sprintf(\"\/permissions\/user\/%s\", userId)\n\n\tresp := utl.PatchRequestWithBody(Client, proto.Request{\n\t\tGrant: &proto.Grant{\n\t\t\tRecipientType: \"user\",\n\t\t\tRecipientId: userId,\n\t\t\tPermission: \"global_grant_limited\",\n\t\t},\n\t}, path,\n\t)\n\tfmt.Println(resp)\n\treturn nil\n}\n\nfunc cmdPermissionGrantGlobal(c *cli.Context) error {\n\t\/*\n\t\turl := getApiUrl()\n\t\tvar objType string\n\n\t\tutl.ValidateCliArgumentCount(c, 3)\n\t\tswitch c.Args().Get(1) {\n\t\tcase \"user\":\n\t\t\tobjType = \"user\"\n\t\tcase \"team\":\n\t\t\tobjType = \"team\"\n\t\tcase \"tool\":\n\t\t\tobjType = \"tool\"\n\t\tdefault:\n\t\t\tSlog.Fatal(\"Syntax error\")\n\t\t}\n\t\turl.Path = fmt.Sprintf(\"\/permissions\/%s\/%s\", objType, c.Args().Get(2))\n\n\t\tvar req somaproto.ProtoRequestPermission\n\t\treq.Permission = c.Args().Get(0)\n\n\t\t_, err := resty.New().\n\t\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\t\tR().\n\t\t\tSetBody(req).\n\t\t\tPatch(url.String())\n\t\tif err != nil {\n\t\t\tSlog.Fatal(err)\n\t\t}\n\t*\/\n\treturn nil\n}\n\nfunc cmdPermissionGrantLimited(c *cli.Context) error {\n\t\/*\n\t\turl := getApiUrl()\n\t\tkeys := [...]string{\"repository\", \"bucket\", \"group\", \"cluster\"}\n\t\tkeySlice := make([]string, 0)\n\t\tvar objType string\n\t\tvar req somaproto.ProtoRequestPermission\n\t\treq.Grant.GrantType = \"limited\"\n\n\t\t\/\/ the keys array is ordered towars increasing detail, ie. there can\n\t\t\/\/ not be a limited grant on a cluster without specifying the\n\t\t\/\/ repository\n\t\t\/\/ Also, slicing uses halfopen interval [a:b), ie `a` is part of the\n\t\t\/\/ resulting slice, but `b` is not\n\t\tswitch utl.GetCliArgumentCount(c) {\n\t\tcase 5:\n\t\t\tkeySlice = keys[0:1]\n\t\tcase 7:\n\t\t\tkeySlice = keys[0:2]\n\t\tcase 9:\n\t\t\tkeySlice = keys[0:3]\n\t\tcase 11:\n\t\t\tkeySlice = keys[:]\n\t\tdefault:\n\t\t\tSlog.Fatal(\"Syntax error, unexpected argument count\")\n\t\t}\n\t\t\/\/ Tail() skips the first argument 0, which is returned by First(),\n\t\t\/\/ thus contains arguments 1-n. The first 3 arguments 0-2 are fixed in\n\t\t\/\/ order, the variable parts are arguments 4+ (argv 3-10) -- element\n\t\t\/\/ 2-9 in the tail slice.\n\t\targSlice := c.Args().Tail()[2:]\n\t\toptions := *parseLimitedGrantArguments(keySlice, argSlice)\n\n\t\tswitch c.Args().Get(1) {\n\t\tcase \"user\":\n\t\t\tobjType = \"user\"\n\t\tcase \"team\":\n\t\t\tobjType = \"team\"\n\t\tcase \"tool\":\n\t\t\tobjType = \"tool\"\n\t\tdefault:\n\t\t\tSlog.Fatal(\"Syntax error\")\n\t\t}\n\t\treq.Permission = c.Args().Get(0)\n\t\turl.Path = fmt.Sprintf(\"\/permissions\/%s\/%s\", objType, c.Args().Get(2))\n\n\t\tfor k, v := range options {\n\t\t\tswitch k {\n\t\t\tcase \"repository\":\n\t\t\t\treq.Grant.Repository = v\n\t\t\tcase \"bucket\":\n\t\t\t\treq.Grant.Bucket = v\n\t\t\tcase \"group\":\n\t\t\t\treq.Grant.Group = v\n\t\t\tcase \"cluster\":\n\t\t\t\treq.Grant.Cluster = v\n\t\t\t}\n\t\t}\n\n\t\t_, err := resty.New().\n\t\t\tSetRedirectPolicy(resty.FlexibleRedirectPolicy(3)).\n\t\t\tR().\n\t\t\tSetBody(req).\n\t\t\tPatch(url.String())\n\t\tif err != nil {\n\t\t\tSlog.Fatal(err)\n\t\t}\n\t*\/\n\treturn nil\n}\n\nfunc cmdPermissionGrantSystem(c *cli.Context) error {\n\treturn nil\n}\n\n\/\/ vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix\n<|endoftext|>"} {"text":"<commit_before>package consensus\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/bytom\/protocol\/bc\"\n)\n\n\/\/consensus variables\nconst (\n\t\/\/ Max gas that one block contains\n\tMaxBlockGas = uint64(100000000)\n\n\t\/\/config parameter for coinbase reward\n\tCoinbasePendingBlockNumber = uint64(6)\n\tsubsidyReductionInterval = uint64(560640)\n\tbaseSubsidy = uint64(41250000000)\n\tInitialBlockSubsidy = uint64(1470000000000000000)\n\n\t\/\/ config for pow mining\n\tPowMinBits = uint64(2305843009213861724)\n\tBlocksPerRetarget = uint64(128)\n\tTargetSecondsPerBlock = uint64(60)\n\tSeedPerRetarget = uint64(128)\n\n\t\/\/ MaxTimeOffsetSeconds is the maximum number of seconds a block time is allowed to be ahead of the current time\n\tMaxTimeOffsetSeconds = uint64(60 * 60)\n\tMedianTimeBlocks = 11\n\n\tPayToWitnessPubKeyHashDataSize = 20\n\tPayToWitnessScriptHashDataSize = 32\n\n\tCoinbaseArbitrarySizeLimit = 128\n\n\tVMGasRate = int64(1000)\n\tStorageGasRate = int64(5)\n\tMaxGasAmount = int64(100000)\n\tDefaultGasCredit = int64(80000)\n\n\tBTMAlias = \"BTM\"\n\tBTMSymbol = \"BTM\"\n\tBTMDecimals = 8\n\tBTMDescription = `Bytom Official Issue`\n)\n\n\/\/ BTMAssetID is BTM's asset id, the soul asset of Bytom\nvar BTMAssetID = &bc.AssetID{\n\tV0: uint64(18446744073709551615),\n\tV1: uint64(18446744073709551615),\n\tV2: uint64(18446744073709551615),\n\tV3: uint64(18446744073709551615),\n}\n\n\/\/ InitialSeed is SHA3-256 of Byte[0^32]\nvar InitialSeed = &bc.Hash{\n\tV0: uint64(11412844483649490393),\n\tV1: uint64(4614157290180302959),\n\tV2: uint64(1780246333311066183),\n\tV3: uint64(9357197556716379726),\n}\n\n\/\/ BTMDefinitionMap is the ....\nvar BTMDefinitionMap = map[string]interface{}{\n\t\"name\": BTMAlias,\n\t\"symbol\": BTMSymbol,\n\t\"decimals\": BTMDecimals,\n\t\"description\": BTMDescription,\n}\n\n\/\/ BlockSubsidy calculate the coinbase rewards on given block height\nfunc BlockSubsidy(height uint64) uint64 {\n\tif height == 0 {\n\t\treturn InitialBlockSubsidy\n\t}\n\treturn baseSubsidy >> uint(height\/subsidyReductionInterval)\n}\n\n\/\/ IsBech32SegwitPrefix returns whether the prefix is a known prefix for segwit\n\/\/ addresses on any default or registered network. This is used when decoding\n\/\/ an address string into a specific address type.\nfunc IsBech32SegwitPrefix(prefix string, params *Params) bool {\n\tprefix = strings.ToLower(prefix)\n\treturn prefix == params.Bech32HRPSegwit+\"1\"\n}\n\n\/\/ Params store the config for different network\ntype Params struct {\n\t\/\/ Name defines a human-readable identifier for the network.\n\tName string\n\tBech32HRPSegwit string\n}\n\nvar ActiveNetParams = MainNetParams\n\n\/\/ NetParams is the correspondence between chain_id and Params\nvar NetParams = map[string]Params{\n\t\"mainnet\": MainNetParams,\n\t\"testnet\": TestNetParams,\n}\n\n\/\/ MainNetParams is the config for production\nvar MainNetParams = Params{\n\tName: \"main\",\n\tBech32HRPSegwit: \"bm\",\n}\n\n\/\/ TestNetParams is the config for test-net\nvar TestNetParams = Params{\n\tName: \"test\",\n\tBech32HRPSegwit: \"tm\",\n}\n<commit_msg>low the gas rate<commit_after>package consensus\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/bytom\/protocol\/bc\"\n)\n\n\/\/consensus variables\nconst (\n\t\/\/ Max gas that one block contains\n\tMaxBlockGas = uint64(100000000)\n\n\t\/\/config parameter for coinbase reward\n\tCoinbasePendingBlockNumber = uint64(6)\n\tsubsidyReductionInterval = uint64(560640)\n\tbaseSubsidy = uint64(41250000000)\n\tInitialBlockSubsidy = uint64(1470000000000000000)\n\n\t\/\/ config for pow mining\n\tPowMinBits = uint64(2305843009213861724)\n\tBlocksPerRetarget = uint64(128)\n\tTargetSecondsPerBlock = uint64(60)\n\tSeedPerRetarget = uint64(128)\n\n\t\/\/ MaxTimeOffsetSeconds is the maximum number of seconds a block time is allowed to be ahead of the current time\n\tMaxTimeOffsetSeconds = uint64(60 * 60)\n\tMedianTimeBlocks = 11\n\n\tPayToWitnessPubKeyHashDataSize = 20\n\tPayToWitnessScriptHashDataSize = 32\n\n\tCoinbaseArbitrarySizeLimit = 128\n\n\tVMGasRate = int64(100)\n\tStorageGasRate = int64(5)\n\tMaxGasAmount = int64(100000)\n\tDefaultGasCredit = int64(80000)\n\n\tBTMAlias = \"BTM\"\n)\n\n\/\/ BTMAssetID is BTM's asset id, the soul asset of Bytom\nvar BTMAssetID = &bc.AssetID{\n\tV0: uint64(18446744073709551615),\n\tV1: uint64(18446744073709551615),\n\tV2: uint64(18446744073709551615),\n\tV3: uint64(18446744073709551615),\n}\n\n\/\/ InitialSeed is SHA3-256 of Byte[0^32]\nvar InitialSeed = &bc.Hash{\n\tV0: uint64(11412844483649490393),\n\tV1: uint64(4614157290180302959),\n\tV2: uint64(1780246333311066183),\n\tV3: uint64(9357197556716379726),\n}\n\n\/\/ BTMDefinitionMap is the ....\nvar BTMDefinitionMap = map[string]interface{}{\n\t\"name\": BTMAlias,\n\t\"symbol\": BTMAlias,\n\t\"decimals\": 8,\n\t\"description\": `Bytom Official Issue`,\n}\n\n\/\/ BlockSubsidy calculate the coinbase rewards on given block height\nfunc BlockSubsidy(height uint64) uint64 {\n\tif height == 0 {\n\t\treturn InitialBlockSubsidy\n\t}\n\treturn baseSubsidy >> uint(height\/subsidyReductionInterval)\n}\n\n\/\/ IsBech32SegwitPrefix returns whether the prefix is a known prefix for segwit\n\/\/ addresses on any default or registered network. This is used when decoding\n\/\/ an address string into a specific address type.\nfunc IsBech32SegwitPrefix(prefix string, params *Params) bool {\n\tprefix = strings.ToLower(prefix)\n\treturn prefix == params.Bech32HRPSegwit+\"1\"\n}\n\n\/\/ Params store the config for different network\ntype Params struct {\n\t\/\/ Name defines a human-readable identifier for the network.\n\tName string\n\tBech32HRPSegwit string\n}\n\nvar ActiveNetParams = MainNetParams\n\n\/\/ NetParams is the correspondence between chain_id and Params\nvar NetParams = map[string]Params{\n\t\"mainnet\": MainNetParams,\n\t\"testnet\": TestNetParams,\n}\n\n\/\/ MainNetParams is the config for production\nvar MainNetParams = Params{\n\tName: \"main\",\n\tBech32HRPSegwit: \"bm\",\n}\n\n\/\/ TestNetParams is the config for test-net\nvar TestNetParams = Params{\n\tName: \"test\",\n\tBech32HRPSegwit: \"tm\",\n}\n<|endoftext|>"} {"text":"<commit_before>package anticaptcha\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/jarcoal\/httpmock\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nvar (\n\t\/\/ mux is the HTTP request multiplexer used with the test server.\n\tmux *http.ServeMux\n\n\t\/\/ client is the GitHub client being tested.\n\tclient *Client\n\n\t\/\/ server is a test HTTP server used to provide mock API responses.\n\tserver *httptest.Server\n)\n\nfunc setup() {\n\t\/\/ test server\n\tmux = http.NewServeMux()\n\tserver = httptest.NewServer(mux)\n\n\t\/\/ github client configured to use test server\n\tclient = NewClient(\"123123\")\n}\n\n\/\/ teardown closes the test HTTP server.\nfunc teardown() {\n\tserver.Close()\n}\n\nfunc TestNewClient(t *testing.T) {\n\tc := NewClient(\"123123\")\n\tapiKey := \"123123\"\n\n\tassert.Equal(t, c.BaseURL.String(), defaultBaseURL)\n\tassert.Equal(t, c.APIKey, apiKey)\n}\n\nfunc TestNewRequest(t *testing.T) {\n\tc := NewClient(\"123123\")\n\n\tinURL, outURL := \"\/foo\", defaultBaseURL+\"\/foo\"\n\tinBody, outBody := strings.NewReader(`{\"Balance\":1}`+\"\\n\"), `{\"Balance\":1}`+\"\\n\"\n\treq, _ := c.NewRequest(\"GET\", inURL, inBody)\n\n\t\/\/ test that relative URL was expanded\n\tassert.Equal(t, req.URL.String(), outURL)\n\n\tbody, _ := ioutil.ReadAll(req.Body)\n\tassert.Equal(t, string(body), outBody)\n\n}\n\nfunc TestDo(t *testing.T) {\n\tsetup()\n\tdefer teardown()\n\n\tmux.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\ttestMethod(t, r, \"GET\")\n\t\tfmt.Fprint(w, 1.1)\n\t})\n\n\treq, _ := client.NewRequest(\"GET\", \"\/\", nil)\n\tbody := 1.0\n\tclient.Do(req)\n\n\twant := 1.0\n\tassert.Equal(t, body, want)\n\n\thttpmock.RegisterResponder(\"GET\", \"http:\/\/anti-captcha.com\/res.php?&action=getbalance\",\n\t\thttpmock.NewStringResponder(200, `ERROR_KEY_DOES_NOT_EXIST`))\n\treqTestKeyNotExst, _ := client.NewRequest(\"GET\", \"http:\/\/anti-captcha.com\/res.php?key=1&action=getbalance\", nil)\n\t_, err := client.Do(reqTestKeyNotExst)\n\tassert.Equal(t, err.Error(), \"Api key does not exist, plaese set correct api key from http:\/\/anti-captcha.com\")\n\n\thttpmock.RegisterResponder(\"GET\", \"http:\/\/anti-captcha.com\/res.php?&action=getbalance\",\n\t\thttpmock.NewStringResponder(200, `ERROR_WRONG_USER_KEY`))\n\treqTestWrongKey, _ := client.NewRequest(\"GET\", \"http:\/\/anti-captcha.com\/res.php?key=1&action=getbalance\", nil)\n\t_, err = client.Do(reqTestWrongKey)\n\tassert.Equal(t, err.Error(), \"Api key does not exist, plaese set correct api key from http:\/\/anti-captcha.com\")\n\n}\n\nfunc testMethod(t *testing.T, r *http.Request, want string) {\n\tassert.Equal(t, r.Method, want)\n}\n<commit_msg>add test for barUrl<commit_after>package anticaptcha\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/jarcoal\/httpmock\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nvar (\n\t\/\/ mux is the HTTP request multiplexer used with the test server.\n\tmux *http.ServeMux\n\n\t\/\/ client is the GitHub client being tested.\n\tclient *Client\n\n\t\/\/ server is a test HTTP server used to provide mock API responses.\n\tserver *httptest.Server\n)\n\nfunc setup() {\n\t\/\/ test server\n\tmux = http.NewServeMux()\n\tserver = httptest.NewServer(mux)\n\n\t\/\/ github client configured to use test server\n\tclient = NewClient(\"123123\")\n}\n\n\/\/ teardown closes the test HTTP server.\nfunc teardown() {\n\tserver.Close()\n}\n\nfunc TestNewClient(t *testing.T) {\n\tc := NewClient(\"123123\")\n\tapiKey := \"123123\"\n\n\tassert.Equal(t, c.BaseURL.String(), defaultBaseURL)\n\tassert.Equal(t, c.APIKey, apiKey)\n}\n\nfunc TestNewRequest(t *testing.T) {\n\tc := NewClient(\"123123\")\n\n\tinURL, outURL := \"\/foo\", defaultBaseURL+\"\/foo\"\n\tinBody, outBody := strings.NewReader(`{\"Balance\":1}`+\"\\n\"), `{\"Balance\":1}`+\"\\n\"\n\treq, _ := c.NewRequest(\"GET\", inURL, inBody)\n\n\t\/\/ test that relative URL was expanded\n\tassert.Equal(t, req.URL.String(), outURL)\n\n\tbody, _ := ioutil.ReadAll(req.Body)\n\tassert.Equal(t, string(body), outBody)\n\n}\n\nfunc TestNewRequest_badURL(t *testing.T) {\n\tc := NewClient(\"\")\n\t_, err := c.NewRequest(\"GET\", \":\", nil)\n\ttestURLParseError(t, err)\n}\n\nfunc TestDo(t *testing.T) {\n\tsetup()\n\tdefer teardown()\n\n\tmux.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\ttestMethod(t, r, \"GET\")\n\t\tfmt.Fprint(w, 1.1)\n\t})\n\n\treq, _ := client.NewRequest(\"GET\", \"\/\", nil)\n\tbody := 1.0\n\tclient.Do(req)\n\n\twant := 1.0\n\tassert.Equal(t, body, want)\n\n\thttpmock.RegisterResponder(\"GET\", \"http:\/\/anti-captcha.com\/res.php?&action=getbalance\",\n\t\thttpmock.NewStringResponder(200, `ERROR_KEY_DOES_NOT_EXIST`))\n\treqTestKeyNotExst, _ := client.NewRequest(\"GET\", \"http:\/\/anti-captcha.com\/res.php?key=1&action=getbalance\", nil)\n\t_, err := client.Do(reqTestKeyNotExst)\n\tassert.Equal(t, err.Error(), \"Api key does not exist, plaese set correct api key from http:\/\/anti-captcha.com\")\n\n\thttpmock.RegisterResponder(\"GET\", \"http:\/\/anti-captcha.com\/res.php?&action=getbalance\",\n\t\thttpmock.NewStringResponder(200, `ERROR_WRONG_USER_KEY`))\n\treqTestWrongKey, _ := client.NewRequest(\"GET\", \"http:\/\/anti-captcha.com\/res.php?key=1&action=getbalance\", nil)\n\t_, err = client.Do(reqTestWrongKey)\n\tassert.Equal(t, err.Error(), \"Api key does not exist, plaese set correct api key from http:\/\/anti-captcha.com\")\n\n}\n\nfunc testMethod(t *testing.T, r *http.Request, want string) {\n\tassert.Equal(t, r.Method, want)\n}\n\nfunc testURLParseError(t *testing.T, err error) {\n\tif err == nil {\n\t\tt.Errorf(\"Expected error to be returned\")\n\t}\n\tif err, ok := err.(*url.Error); !ok || err.Op != \"parse\" {\n\t\tt.Errorf(\"Expected URL parse error, got %+v\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package app\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\n\t\"github.com\/jcelliott\/lumber\"\n\t\"github.com\/nanobox-io\/golang-docker-client\"\n\n\t\"github.com\/nanobox-io\/nanobox\/models\"\n\t\"github.com\/nanobox-io\/nanobox\/processors\/component\"\n\tprocess_provider \"github.com\/nanobox-io\/nanobox\/processors\/provider\"\n\t\"github.com\/nanobox-io\/nanobox\/util\"\n\t\"github.com\/nanobox-io\/nanobox\/util\/dhcp\"\n\t\"github.com\/nanobox-io\/nanobox\/util\/display\"\n\t\"github.com\/nanobox-io\/nanobox\/util\/locker\"\n)\n\n\/\/ Stop will stop all services associated with an app\nfunc Stop(appModel *models.App) error {\n\tlocker.LocalLock()\n\tdefer locker.LocalUnlock()\n\n\t\/\/ short-circuit if the app is already down\n\t\/\/ TODO: also check if any containers are running\n\tif appModel.Status != \"up\" {\n\t\treturn nil\n\t}\n\n\t\/\/ load the env for the display context\n\tenvModel, err := appModel.Env()\n\tif err != nil {\n\t\tlumber.Error(\"app:Stop:models.App.Env()\")\n\t\treturn util.ErrorAppend(err, \"failed to load app env\")\n\t}\n\n\tdisplay.OpenContext(\"%s (%s)\", envModel.Name, appModel.DisplayName())\n\tdefer display.CloseContext()\n\n\t\/\/ initialize docker for the provider\n\tif err := process_provider.Init(); err != nil {\n\t\treturn util.ErrorAppend(err, \"failed to initialize docker environment\")\n\t}\n\n\t\/\/ stop all app components\n\tif err := component.StopAll(appModel); err != nil {\n\t\treturn util.ErrorAppend(err, \"failed to stop all app components\")\n\t}\n\n\tdisplay.StartTask(\"Pausing App\")\n\tdisplay.StopTask()\n\n\t\/\/ stop any dev containers\n\tstopDevContainer(appModel)\n\n\t\/\/ set the status to down\n\tappModel.Status = \"down\"\n\tif err := appModel.Save(); err != nil {\n\t\tlumber.Error(\"app:Stop:models.App.Save()\")\n\t\treturn util.ErrorAppend(err, \"failed to persist app status\")\n\t}\n\n\treturn nil\n}\n\nfunc stopDevContainer(appModel *models.App) error {\n\t\/\/ grab the container info\n\tcontainer, err := docker.GetContainer(fmt.Sprintf(\"nanobox_%s\", appModel.ID))\n\tif err != nil {\n\t\t\/\/ if we cant get the container it may have been removed by someone else\n\t\t\/\/ just return here\n\t\treturn nil\n\t}\n\n\t\/\/ remove the container\n\tif err := docker.ContainerRemove(container.ID); err != nil {\n\t\tlumber.Error(\"dev:console:teardown:docker.ContainerRemove(%s): %s\", container.ID, err)\n\t\treturn util.ErrorAppend(err, \"failed to remove dev container\")\n\t}\n\n\t\/\/ extract the container IP\n\tip := docker.GetIP(container)\n\n\t\/\/ return the container IP back to the IP pool\n\tif err := dhcp.ReturnIP(net.ParseIP(ip)); err != nil {\n\t\tlumber.Error(\"dev:console:teardown:dhcp.ReturnIP(%s): %s\", ip, err)\n\n\t\tlumber.Error(\"An error occurred durring dev console teadown:%s\", err.Error())\n\t\treturn util.ErrorAppend(err, \"failed to return unused IP back to pool\")\n\t}\n\treturn nil\n}\n<commit_msg>do not return a dev containers ip when nanobox stop is stopping dev containers<commit_after>package app\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\n\t\"github.com\/jcelliott\/lumber\"\n\t\"github.com\/nanobox-io\/golang-docker-client\"\n\n\t\"github.com\/nanobox-io\/nanobox\/models\"\n\t\"github.com\/nanobox-io\/nanobox\/processors\/component\"\n\tprocess_provider \"github.com\/nanobox-io\/nanobox\/processors\/provider\"\n\t\"github.com\/nanobox-io\/nanobox\/util\"\n\t\"github.com\/nanobox-io\/nanobox\/util\/dhcp\"\n\t\"github.com\/nanobox-io\/nanobox\/util\/display\"\n\t\"github.com\/nanobox-io\/nanobox\/util\/locker\"\n)\n\n\/\/ Stop will stop all services associated with an app\nfunc Stop(appModel *models.App) error {\n\tlocker.LocalLock()\n\tdefer locker.LocalUnlock()\n\n\t\/\/ short-circuit if the app is already down\n\t\/\/ TODO: also check if any containers are running\n\tif appModel.Status != \"up\" {\n\t\treturn nil\n\t}\n\n\t\/\/ load the env for the display context\n\tenvModel, err := appModel.Env()\n\tif err != nil {\n\t\tlumber.Error(\"app:Stop:models.App.Env()\")\n\t\treturn util.ErrorAppend(err, \"failed to load app env\")\n\t}\n\n\tdisplay.OpenContext(\"%s (%s)\", envModel.Name, appModel.DisplayName())\n\tdefer display.CloseContext()\n\n\t\/\/ initialize docker for the provider\n\tif err := process_provider.Init(); err != nil {\n\t\treturn util.ErrorAppend(err, \"failed to initialize docker environment\")\n\t}\n\n\t\/\/ stop all app components\n\tif err := component.StopAll(appModel); err != nil {\n\t\treturn util.ErrorAppend(err, \"failed to stop all app components\")\n\t}\n\n\tdisplay.StartTask(\"Pausing App\")\n\tdisplay.StopTask()\n\n\t\/\/ stop any dev containers\n\tstopDevContainer(appModel)\n\n\t\/\/ set the status to down\n\tappModel.Status = \"down\"\n\tif err := appModel.Save(); err != nil {\n\t\tlumber.Error(\"app:Stop:models.App.Save()\")\n\t\treturn util.ErrorAppend(err, \"failed to persist app status\")\n\t}\n\n\treturn nil\n}\n\nfunc stopDevContainer(appModel *models.App) error {\n\t\/\/ grab the container info\n\tcontainer, err := docker.GetContainer(fmt.Sprintf(\"nanobox_%s\", appModel.ID))\n\tif err != nil {\n\t\t\/\/ if we cant get the container it may have been removed by someone else\n\t\t\/\/ just return here\n\t\treturn nil\n\t}\n\n\t\/\/ remove the container\n\tif err := docker.ContainerRemove(container.ID); err != nil {\n\t\tlumber.Error(\"dev:console:teardown:docker.ContainerRemove(%s): %s\", container.ID, err)\n\t\treturn util.ErrorAppend(err, \"failed to remove dev container\")\n\t}\n\n\t\/\/ \/\/ extract the container IP\n\t\/\/ ip := docker.GetIP(container)\n\n\t\/\/ \/\/ return the container IP back to the IP pool\n\t\/\/ if err := dhcp.ReturnIP(net.ParseIP(ip)); err != nil {\n\t\/\/ \tlumber.Error(\"dev:console:teardown:dhcp.ReturnIP(%s): %s\", ip, err)\n\n\t\/\/ \tlumber.Error(\"An error occurred durring dev console teadown:%s\", err.Error())\n\t\/\/ \treturn util.ErrorAppend(err, \"failed to return unused IP back to pool\")\n\t\/\/ }\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package contexts\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/viper\"\n\t\"github.com\/yunify\/qingstor-sdk-go\/v3\/config\"\n\t\"github.com\/yunify\/qingstor-sdk-go\/v3\/service\"\n\n\t\"github.com\/yunify\/qsctl\/constants\"\n)\n\nvar (\n\t\/\/ Service is the global service.\n\tService *service.Service\n\t\/\/ Bucket is the bucket for bucket operation.\n\tBucket *service.Bucket\n)\n\n\/\/ Available flags.\nvar (\n\t\/\/ Global flags.\n\tBench bool\n\t\/\/ Copy commands flags.\n\tExpectSize int64\n\tMaximumMemoryContent int64\n)\n\n\/\/ SetupServices will setup services.\nfunc SetupServices() (err error) {\n\tif Service != nil {\n\t\treturn\n\t}\n\n\tcfg, err := config.New(\n\t\tviper.GetString(constants.ConfigAccessKeyID),\n\t\tviper.GetString(constants.ConfigSecretAccessKey),\n\t)\n\tif err != nil {\n\t\tlog.Errorf(\"contexts: Init config failed [%v]\", err)\n\t\treturn\n\t}\n\tcfg.Host = viper.GetString(constants.ConfigHost)\n\tcfg.Port = viper.GetInt(constants.ConfigPort)\n\tcfg.Protocol = viper.GetString(constants.ConfigProtocol)\n\tcfg.ConnectionRetries = viper.GetInt(constants.ConfigConnectionRetries)\n\tcfg.LogLevel = viper.GetString(constants.ConfigLogLevel)\n\n\tService, err = service.Init(cfg)\n\tif err != nil {\n\t\tlog.Errorf(\"contexts: Init service failed [%v]\", err)\n\t\treturn\n\t}\n\treturn\n}\n\n\/\/ SetupBuckets will create a new bucket instance.\nfunc SetupBuckets(name, zone string) (bucket *service.Bucket, err error) {\n\terr = SetupServices()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif Bucket != nil {\n\t\treturn Bucket, nil\n\t}\n\n\tif zone != \"\" {\n\t\tBucket, _ = Service.Bucket(name, zone)\n\t\treturn Bucket, nil\n\t}\n\n\tclient := &http.Client{\n\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n\turl := fmt.Sprintf(\"%s:\/\/%s.%s:%d\",\n\t\tviper.GetString(constants.ConfigProtocol),\n\t\tname,\n\t\tviper.GetString(constants.ConfigHost),\n\t\tviper.GetInt(constants.ConfigPort))\n\n\tr, err := client.Head(url)\n\tif err != nil {\n\t\tlog.Errorf(\"contexts: Head location failed [%v]\", err)\n\t\treturn\n\t}\n\tif r.StatusCode != http.StatusOK {\n\t\tlog.Infof(\"Detect bucket location failed, please check your input\")\n\t\treturn nil, constants.ErrorQsPathNotFound\n\t}\n\n\t\/\/ Example URL: https:\/\/bucket.zone.qingstor.com\n\tzone = strings.Split(r.Header.Get(\"Location\"), \".\")[1]\n\tBucket, _ = Service.Bucket(name, zone)\n\treturn Bucket, nil\n}\n<commit_msg>contexts: Head bucket will return 307 instead of 200<commit_after>package contexts\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/viper\"\n\t\"github.com\/yunify\/qingstor-sdk-go\/v3\/config\"\n\t\"github.com\/yunify\/qingstor-sdk-go\/v3\/service\"\n\n\t\"github.com\/yunify\/qsctl\/constants\"\n)\n\nvar (\n\t\/\/ Service is the global service.\n\tService *service.Service\n\t\/\/ Bucket is the bucket for bucket operation.\n\tBucket *service.Bucket\n)\n\n\/\/ Available flags.\nvar (\n\t\/\/ Global flags.\n\tBench bool\n\t\/\/ Copy commands flags.\n\tExpectSize int64\n\tMaximumMemoryContent int64\n)\n\n\/\/ SetupServices will setup services.\nfunc SetupServices() (err error) {\n\tif Service != nil {\n\t\treturn\n\t}\n\n\tcfg, err := config.New(\n\t\tviper.GetString(constants.ConfigAccessKeyID),\n\t\tviper.GetString(constants.ConfigSecretAccessKey),\n\t)\n\tif err != nil {\n\t\tlog.Errorf(\"contexts: Init config failed [%v]\", err)\n\t\treturn\n\t}\n\tcfg.Host = viper.GetString(constants.ConfigHost)\n\tcfg.Port = viper.GetInt(constants.ConfigPort)\n\tcfg.Protocol = viper.GetString(constants.ConfigProtocol)\n\tcfg.ConnectionRetries = viper.GetInt(constants.ConfigConnectionRetries)\n\tcfg.LogLevel = viper.GetString(constants.ConfigLogLevel)\n\n\tService, err = service.Init(cfg)\n\tif err != nil {\n\t\tlog.Errorf(\"contexts: Init service failed [%v]\", err)\n\t\treturn\n\t}\n\treturn\n}\n\n\/\/ SetupBuckets will create a new bucket instance.\nfunc SetupBuckets(name, zone string) (bucket *service.Bucket, err error) {\n\terr = SetupServices()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif Bucket != nil {\n\t\treturn Bucket, nil\n\t}\n\n\tif zone != \"\" {\n\t\tBucket, _ = Service.Bucket(name, zone)\n\t\treturn Bucket, nil\n\t}\n\n\tclient := &http.Client{\n\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n\turl := fmt.Sprintf(\"%s:\/\/%s.%s:%d\",\n\t\tviper.GetString(constants.ConfigProtocol),\n\t\tname,\n\t\tviper.GetString(constants.ConfigHost),\n\t\tviper.GetInt(constants.ConfigPort))\n\n\tr, err := client.Head(url)\n\tif err != nil {\n\t\tlog.Errorf(\"contexts: Head location failed [%v]\", err)\n\t\treturn\n\t}\n\tif r.StatusCode != http.StatusTemporaryRedirect {\n\t\tlog.Infof(\"Detect bucket location failed, please check your input\")\n\t\treturn nil, constants.ErrorQsPathNotFound\n\t}\n\n\t\/\/ Example URL: https:\/\/bucket.zone.qingstor.com\n\tzone = strings.Split(r.Header.Get(\"Location\"), \".\")[1]\n\tBucket, _ = Service.Bucket(name, zone)\n\treturn Bucket, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package controllers\n\nimport (\n\t\"time\"\n\n\t\"github.com\/lfq7413\/tomato\/config\"\n\t\"github.com\/lfq7413\/tomato\/errs\"\n\t\"github.com\/lfq7413\/tomato\/files\"\n\t\"github.com\/lfq7413\/tomato\/orm\"\n\t\"github.com\/lfq7413\/tomato\/rest\"\n\t\"github.com\/lfq7413\/tomato\/types\"\n\t\"github.com\/lfq7413\/tomato\/utils\"\n)\n\n\/\/ LoginController 处理 \/login 接口的请求\ntype LoginController struct {\n\tClassesController\n}\n\n\/\/ HandleLogIn 处理登录请求\n\/\/ @router \/ [get]\nfunc (l *LoginController) HandleLogIn() {\n\tvar username, password string\n\tif l.JSONBody != nil && l.JSONBody[\"username\"] != nil {\n\t\tusername = utils.S(l.JSONBody[\"username\"])\n\t} else {\n\t\tusername = l.Query[\"username\"]\n\t}\n\tif l.JSONBody != nil && l.JSONBody[\"password\"] != nil {\n\t\tpassword = utils.S(l.JSONBody[\"password\"])\n\t} else {\n\t\tpassword = l.Query[\"password\"]\n\t}\n\n\tif username == \"\" {\n\t\tl.HandleError(errs.E(errs.UsernameMissing, \"username is required.\"), 0)\n\t\treturn\n\t}\n\tif password == \"\" {\n\t\tl.HandleError(errs.E(errs.PasswordMissing, \"password is required.\"), 0)\n\t\treturn\n\t}\n\n\twhere := types.M{\n\t\t\"username\": username,\n\t}\n\tresults, err := orm.TomatoDBController.Find(\"_User\", where, types.M{})\n\tif err != nil {\n\t\tl.HandleError(err, 0)\n\t\treturn\n\t}\n\tif results == nil || len(results) == 0 {\n\t\tl.HandleError(errs.E(errs.ObjectNotFound, \"Invalid username\/password.\"), 0)\n\t\treturn\n\t}\n\tuser := utils.M(results[0])\n\n\tvar emailVerified bool\n\tif _, ok := user[\"emailVerified\"]; ok {\n\t\tif v, ok := user[\"emailVerified\"].(bool); ok {\n\t\t\temailVerified = v\n\t\t}\n\t}\n\tif config.TConfig.VerifyUserEmails && config.TConfig.PreventLoginWithUnverifiedEmail && emailVerified == false {\n\t\t\/\/ 拒绝未验证邮箱的用户登录\n\t\tl.HandleError(errs.E(errs.EmailNotFound, \"User email is not verified.\"), 0)\n\t\treturn\n\t}\n\n\t\/\/ TODO 换用高强度的加密方式\n\tcorrect := utils.Compare(password, utils.S(user[\"password\"]))\n\taccountLockoutPolicy := rest.NewAccountLockout(utils.S(user[\"username\"]))\n\terr = accountLockoutPolicy.HandleLoginAttempt(correct)\n\tif err != nil {\n\t\tl.HandleError(err, 0)\n\t\treturn\n\t}\n\tif correct == false {\n\t\tl.HandleError(errs.E(errs.ObjectNotFound, \"Invalid username\/password.\"), 0)\n\t\treturn\n\t}\n\n\t\/\/ 检测密码是否过期\n\tif config.TConfig.PasswordPolicy && config.TConfig.MaxPasswordAge > 0 {\n\t\tif changedAt, ok := user[\"_password_changed_at\"].(time.Time); ok {\n\t\t\t\/\/ 密码过期时间戳存在,判断是否过期\n\t\t\texpiresAt := changedAt.Add(time.Duration(config.TConfig.MaxPasswordAge) * 24 * time.Hour)\n\t\t\tif expiresAt.UnixNano() < time.Now().UnixNano() {\n\t\t\t\tl.HandleError(errs.E(errs.ObjectNotFound, \"Your password has expired. Please reset your password.\"), 0)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ 在启用密码过期之前的数据,需要增加该字段\n\t\t\tquery := types.M{\"username\": user[\"username\"]}\n\t\t\tupdate := types.M{\"_password_changed_at\": utils.TimetoString(time.Now().UTC())}\n\t\t\torm.TomatoDBController.Update(\"_User\", query, update, types.M{}, false)\n\t\t}\n\t}\n\n\ttoken := \"r:\" + utils.CreateToken()\n\tuser[\"sessionToken\"] = token\n\tdelete(user, \"password\")\n\n\tif user[\"authData\"] != nil {\n\t\tauthData := utils.M(user[\"authData\"])\n\t\tfor k, v := range authData {\n\t\t\tif v == nil {\n\t\t\t\tdelete(authData, k)\n\t\t\t}\n\t\t}\n\t\tif len(authData) == 0 {\n\t\t\tdelete(user, \"authData\")\n\t\t}\n\t}\n\n\t\/\/ 展开文件信息\n\tfiles.ExpandFilesInObject(user)\n\n\texpiresAt := config.GenerateSessionExpiresAt()\n\tusr := types.M{\n\t\t\"__type\": \"Pointer\",\n\t\t\"className\": \"_User\",\n\t\t\"objectId\": user[\"objectId\"],\n\t}\n\tcreatedWith := types.M{\n\t\t\"action\": \"login\",\n\t\t\"authProvider\": \"password\",\n\t}\n\tsessionData := types.M{\n\t\t\"sessionToken\": token,\n\t\t\"user\": usr,\n\t\t\"createdWith\": createdWith,\n\t\t\"restricted\": false,\n\t\t\"expiresAt\": utils.TimetoString(expiresAt),\n\t}\n\tif l.Info.InstallationID != \"\" {\n\t\tsessionData[\"installationId\"] = l.Info.InstallationID\n\t}\n\t\/\/ 为新登录用户创建 sessionToken\n\twrite, err := rest.NewWrite(rest.Master(), \"_Session\", nil, sessionData, nil, l.Info.ClientSDK)\n\tif err != nil {\n\t\tl.HandleError(err, 0)\n\t\treturn\n\t}\n\t_, err = write.Execute()\n\tif err != nil {\n\t\tl.HandleError(err, 0)\n\t\treturn\n\t}\n\n\tl.Data[\"json\"] = user\n\tl.ServeJSON()\n\n}\n\n\/\/ Post ...\n\/\/ @router \/ [post]\nfunc (l *LoginController) Post() {\n\tl.ClassesController.Post()\n}\n\n\/\/ Delete ...\n\/\/ @router \/ [delete]\nfunc (l *LoginController) Delete() {\n\tl.ClassesController.Delete()\n}\n\n\/\/ Put ...\n\/\/ @router \/ [put]\nfunc (l *LoginController) Put() {\n\tl.ClassesController.Put()\n}\n<commit_msg>修改 expiresAt 字段的数据格式<commit_after>package controllers\n\nimport (\n\t\"time\"\n\n\t\"github.com\/lfq7413\/tomato\/config\"\n\t\"github.com\/lfq7413\/tomato\/errs\"\n\t\"github.com\/lfq7413\/tomato\/files\"\n\t\"github.com\/lfq7413\/tomato\/orm\"\n\t\"github.com\/lfq7413\/tomato\/rest\"\n\t\"github.com\/lfq7413\/tomato\/types\"\n\t\"github.com\/lfq7413\/tomato\/utils\"\n)\n\n\/\/ LoginController 处理 \/login 接口的请求\ntype LoginController struct {\n\tClassesController\n}\n\n\/\/ HandleLogIn 处理登录请求\n\/\/ @router \/ [get]\nfunc (l *LoginController) HandleLogIn() {\n\tvar username, password string\n\tif l.JSONBody != nil && l.JSONBody[\"username\"] != nil {\n\t\tusername = utils.S(l.JSONBody[\"username\"])\n\t} else {\n\t\tusername = l.Query[\"username\"]\n\t}\n\tif l.JSONBody != nil && l.JSONBody[\"password\"] != nil {\n\t\tpassword = utils.S(l.JSONBody[\"password\"])\n\t} else {\n\t\tpassword = l.Query[\"password\"]\n\t}\n\n\tif username == \"\" {\n\t\tl.HandleError(errs.E(errs.UsernameMissing, \"username is required.\"), 0)\n\t\treturn\n\t}\n\tif password == \"\" {\n\t\tl.HandleError(errs.E(errs.PasswordMissing, \"password is required.\"), 0)\n\t\treturn\n\t}\n\n\twhere := types.M{\n\t\t\"username\": username,\n\t}\n\tresults, err := orm.TomatoDBController.Find(\"_User\", where, types.M{})\n\tif err != nil {\n\t\tl.HandleError(err, 0)\n\t\treturn\n\t}\n\tif results == nil || len(results) == 0 {\n\t\tl.HandleError(errs.E(errs.ObjectNotFound, \"Invalid username\/password.\"), 0)\n\t\treturn\n\t}\n\tuser := utils.M(results[0])\n\n\tvar emailVerified bool\n\tif _, ok := user[\"emailVerified\"]; ok {\n\t\tif v, ok := user[\"emailVerified\"].(bool); ok {\n\t\t\temailVerified = v\n\t\t}\n\t}\n\tif config.TConfig.VerifyUserEmails && config.TConfig.PreventLoginWithUnverifiedEmail && emailVerified == false {\n\t\t\/\/ 拒绝未验证邮箱的用户登录\n\t\tl.HandleError(errs.E(errs.EmailNotFound, \"User email is not verified.\"), 0)\n\t\treturn\n\t}\n\n\t\/\/ TODO 换用高强度的加密方式\n\tcorrect := utils.Compare(password, utils.S(user[\"password\"]))\n\taccountLockoutPolicy := rest.NewAccountLockout(utils.S(user[\"username\"]))\n\terr = accountLockoutPolicy.HandleLoginAttempt(correct)\n\tif err != nil {\n\t\tl.HandleError(err, 0)\n\t\treturn\n\t}\n\tif correct == false {\n\t\tl.HandleError(errs.E(errs.ObjectNotFound, \"Invalid username\/password.\"), 0)\n\t\treturn\n\t}\n\n\t\/\/ 检测密码是否过期\n\tif config.TConfig.PasswordPolicy && config.TConfig.MaxPasswordAge > 0 {\n\t\tif changedAt, ok := user[\"_password_changed_at\"].(time.Time); ok {\n\t\t\t\/\/ 密码过期时间戳存在,判断是否过期\n\t\t\texpiresAt := changedAt.Add(time.Duration(config.TConfig.MaxPasswordAge) * 24 * time.Hour)\n\t\t\tif expiresAt.UnixNano() < time.Now().UnixNano() {\n\t\t\t\tl.HandleError(errs.E(errs.ObjectNotFound, \"Your password has expired. Please reset your password.\"), 0)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ 在启用密码过期之前的数据,需要增加该字段\n\t\t\tquery := types.M{\"username\": user[\"username\"]}\n\t\t\tupdate := types.M{\"_password_changed_at\": utils.TimetoString(time.Now().UTC())}\n\t\t\torm.TomatoDBController.Update(\"_User\", query, update, types.M{}, false)\n\t\t}\n\t}\n\n\ttoken := \"r:\" + utils.CreateToken()\n\tuser[\"sessionToken\"] = token\n\tdelete(user, \"password\")\n\n\tif user[\"authData\"] != nil {\n\t\tauthData := utils.M(user[\"authData\"])\n\t\tfor k, v := range authData {\n\t\t\tif v == nil {\n\t\t\t\tdelete(authData, k)\n\t\t\t}\n\t\t}\n\t\tif len(authData) == 0 {\n\t\t\tdelete(user, \"authData\")\n\t\t}\n\t}\n\n\t\/\/ 展开文件信息\n\tfiles.ExpandFilesInObject(user)\n\n\texpiresAt := config.GenerateSessionExpiresAt()\n\tusr := types.M{\n\t\t\"__type\": \"Pointer\",\n\t\t\"className\": \"_User\",\n\t\t\"objectId\": user[\"objectId\"],\n\t}\n\tcreatedWith := types.M{\n\t\t\"action\": \"login\",\n\t\t\"authProvider\": \"password\",\n\t}\n\tsessionData := types.M{\n\t\t\"sessionToken\": token,\n\t\t\"user\": usr,\n\t\t\"createdWith\": createdWith,\n\t\t\"restricted\": false,\n\t\t\"expiresAt\": types.M{\n\t\t\t\"__type\": \"Date\",\n\t\t\t\"iso\": utils.TimetoString(expiresAt),\n\t\t},\n\t}\n\tif l.Info.InstallationID != \"\" {\n\t\tsessionData[\"installationId\"] = l.Info.InstallationID\n\t}\n\t\/\/ 为新登录用户创建 sessionToken\n\twrite, err := rest.NewWrite(rest.Master(), \"_Session\", nil, sessionData, nil, l.Info.ClientSDK)\n\tif err != nil {\n\t\tl.HandleError(err, 0)\n\t\treturn\n\t}\n\t_, err = write.Execute()\n\tif err != nil {\n\t\tl.HandleError(err, 0)\n\t\treturn\n\t}\n\n\tl.Data[\"json\"] = user\n\tl.ServeJSON()\n\n}\n\n\/\/ Post ...\n\/\/ @router \/ [post]\nfunc (l *LoginController) Post() {\n\tl.ClassesController.Post()\n}\n\n\/\/ Delete ...\n\/\/ @router \/ [delete]\nfunc (l *LoginController) Delete() {\n\tl.ClassesController.Delete()\n}\n\n\/\/ Put ...\n\/\/ @router \/ [put]\nfunc (l *LoginController) Put() {\n\tl.ClassesController.Put()\n}\n<|endoftext|>"} {"text":"<commit_before>\/* This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/. *\/\n\npackage models\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"os\"\n\t\"strings\"\n\n\th \"github.com\/ernestio\/api-gateway\/helpers\"\n\taes \"github.com\/ernestio\/crypto\/aes\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ Project holds the project response from datacenter-store\ntype Project struct {\n\tID int `json:\"id\"`\n\tName string `json:\"name\"`\n\tType string `json:\"type\"`\n\tCredentials map[string]interface{} `json:\"credentials,omitempty\"`\n\tEnvironments []string `json:\"environments,omitempty\"`\n\tMembers []Role `json:\"members,omitempty\"`\n}\n\n\/\/ Validate the project\nfunc (d *Project) Validate() error {\n\tif d.Name == \"\" {\n\t\treturn errors.New(\"Project name is empty\")\n\t}\n\n\tif !IsAlphaNumeric(d.Name) {\n\t\treturn errors.New(\"Project Name contains invalid characters\")\n\t}\n\n\tif strings.Contains(d.Name, EnvNameSeparator) {\n\t\treturn errors.New(\"Project name does not support char '\" + EnvNameSeparator + \"' as part of its name\")\n\t}\n\n\tif d.Type == \"\" {\n\t\treturn errors.New(\"Project type is empty\")\n\t}\n\n\treturn nil\n}\n\n\/\/ Map : maps a project from a request's body and validates the input\nfunc (d *Project) Map(data []byte) error {\n\tif err := json.Unmarshal(data, &d); err != nil {\n\t\th.L.WithFields(logrus.Fields{\n\t\t\t\"input\": string(data),\n\t\t}).Error(\"Couldn't unmarshal given input\")\n\t\treturn NewError(InvalidInputCode, \"Invalid input\")\n\t}\n\n\treturn nil\n}\n\n\/\/ FindByName : Searches for all projects with a name equal to the specified\nfunc (d *Project) FindByName(name string) (err error) {\n\tquery := make(map[string]interface{})\n\tquery[\"name\"] = name\n\treturn NewBaseModel(d.getStore()).GetBy(query, d)\n}\n\n\/\/ FindByID : Gets a model by its id\nfunc (d *Project) FindByID(id int) (err error) {\n\tquery := make(map[string]interface{})\n\tquery[\"id\"] = id\n\treturn NewBaseModel(d.getStore()).GetBy(query, d)\n}\n\n\/\/ FindByIDs : Gets a model by its id\nfunc (d *Project) FindByIDs(ids []string, ds *[]Project) (err error) {\n\tquery := make(map[string]interface{})\n\tquery[\"names\"] = ids\n\treturn NewBaseModel(d.getStore()).FindBy(query, ds)\n}\n\n\/\/ FindAll : Searches for all entities on the store current user\n\/\/ has access to\nfunc (d *Project) FindAll(au User, projects *[]Project) (err error) {\n\tquery := make(map[string]interface{})\n\treturn NewBaseModel(d.getStore()).FindBy(query, projects)\n}\n\n\/\/ Save : calls datacenter.set with the marshalled current entity\nfunc (d *Project) Save() (err error) {\n\treturn NewBaseModel(d.getStore()).Save(d)\n}\n\n\/\/ Delete : will delete a project by its id\nfunc (d *Project) Delete() (err error) {\n\tquery := make(map[string]interface{})\n\tquery[\"id\"] = d.ID\n\treturn NewBaseModel(d.getStore()).Delete(query)\n}\n\n\/\/ Redact : removes all sensitive fields from the return\n\/\/ data before outputting to the user\nfunc (d *Project) Redact() {\n\tfor k := range d.Credentials {\n\t\tif k != \"region\" && k != \"vdc\" && k != \"username\" && k != \"vcloud_url\" {\n\t\t\tdelete(d.Credentials, k)\n\t\t}\n\t}\n}\n\n\/\/ Improve : adds extra data to this entity\nfunc (d *Project) Improve() {\n}\n\n\/\/ Envs : Get the envs related with current project\nfunc (d *Project) Envs() (envs []Env, err error) {\n\tvar s Env\n\terr = s.FindByProjectID(d.ID, &envs)\n\treturn\n}\n\n\/\/ GetID : ID getter\nfunc (d *Project) GetID() string {\n\treturn d.Name\n}\n\n\/\/ GetType : Gets the resource type\nfunc (d *Project) GetType() string {\n\treturn \"project\"\n}\n\n\/\/ Override : override not empty parameters with the given project ones\nfunc (d *Project) Override(dt Project) {\n\tfor k, v := range dt.Credentials {\n\t\td.Credentials[k] = v\n\t}\n}\n\n\/\/ Encrypt : encrypts sensible data\nfunc (d *Project) Encrypt() {\n\tfor k, v := range d.Credentials {\n\t\txc, ok := v.(string)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\td.Credentials[k], _ = crypt(xc)\n\t}\n}\n\nfunc decrypt(s string) (string, error) {\n\tcrypto := aes.New()\n\tkey := os.Getenv(\"ERNEST_CRYPTO_KEY\")\n\tif s != \"\" {\n\t\tencrypted, err := crypto.Decrypt(s, key)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\ts = encrypted\n\t}\n\n\treturn s, nil\n}\n\nfunc crypt(s string) (string, error) {\n\tcrypto := aes.New()\n\tkey := os.Getenv(\"ERNEST_CRYPTO_KEY\")\n\tif s != \"\" {\n\t\tencrypted, err := crypto.Encrypt(s, key)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\ts = encrypted\n\t}\n\n\treturn s, nil\n}\n\nfunc (d *Project) getStore() string {\n\treturn \"datacenter\"\n}\n\n\/\/ IsAzure : check if the current project is of type azure\nfunc (d *Project) IsAzure() bool {\n\treturn (d.Type == \"azure\" || d.Type == \"azure-fake\")\n}\n<commit_msg>added validation on project type<commit_after>\/* This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/. *\/\n\npackage models\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"os\"\n\t\"strings\"\n\n\th \"github.com\/ernestio\/api-gateway\/helpers\"\n\taes \"github.com\/ernestio\/crypto\/aes\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ Project holds the project response from datacenter-store\ntype Project struct {\n\tID int `json:\"id\"`\n\tName string `json:\"name\"`\n\tType string `json:\"type\"`\n\tCredentials map[string]interface{} `json:\"credentials,omitempty\"`\n\tEnvironments []string `json:\"environments,omitempty\"`\n\tMembers []Role `json:\"members,omitempty\"`\n}\n\n\/\/ Validate the project\nfunc (d *Project) Validate() error {\n\tif d.Name == \"\" {\n\t\treturn errors.New(\"Project name is empty\")\n\t}\n\n\tif !IsAlphaNumeric(d.Name) {\n\t\treturn errors.New(\"Project Name contains invalid characters\")\n\t}\n\n\tif strings.Contains(d.Name, EnvNameSeparator) {\n\t\treturn errors.New(\"Project name does not support char '\" + EnvNameSeparator + \"' as part of its name\")\n\t}\n\n\tif d.Type == \"\" {\n\t\treturn errors.New(\"Project type is empty\")\n\t}\n\n\tswitch d.Type {\n\tcase \"aws\", \"azure\", \"vcloud\":\n\t\treturn nil\n\tdefault:\n\t\treturn errors.New(\"Project type is not one of the following: 'aws', 'azure' or 'vcloud'\")\n\t}\n}\n\n\/\/ Map : maps a project from a request's body and validates the input\nfunc (d *Project) Map(data []byte) error {\n\tif err := json.Unmarshal(data, &d); err != nil {\n\t\th.L.WithFields(logrus.Fields{\n\t\t\t\"input\": string(data),\n\t\t}).Error(\"Couldn't unmarshal given input\")\n\t\treturn NewError(InvalidInputCode, \"Invalid input\")\n\t}\n\n\treturn nil\n}\n\n\/\/ FindByName : Searches for all projects with a name equal to the specified\nfunc (d *Project) FindByName(name string) (err error) {\n\tquery := make(map[string]interface{})\n\tquery[\"name\"] = name\n\treturn NewBaseModel(d.getStore()).GetBy(query, d)\n}\n\n\/\/ FindByID : Gets a model by its id\nfunc (d *Project) FindByID(id int) (err error) {\n\tquery := make(map[string]interface{})\n\tquery[\"id\"] = id\n\treturn NewBaseModel(d.getStore()).GetBy(query, d)\n}\n\n\/\/ FindByIDs : Gets a model by its id\nfunc (d *Project) FindByIDs(ids []string, ds *[]Project) (err error) {\n\tquery := make(map[string]interface{})\n\tquery[\"names\"] = ids\n\treturn NewBaseModel(d.getStore()).FindBy(query, ds)\n}\n\n\/\/ FindAll : Searches for all entities on the store current user\n\/\/ has access to\nfunc (d *Project) FindAll(au User, projects *[]Project) (err error) {\n\tquery := make(map[string]interface{})\n\treturn NewBaseModel(d.getStore()).FindBy(query, projects)\n}\n\n\/\/ Save : calls datacenter.set with the marshalled current entity\nfunc (d *Project) Save() (err error) {\n\treturn NewBaseModel(d.getStore()).Save(d)\n}\n\n\/\/ Delete : will delete a project by its id\nfunc (d *Project) Delete() (err error) {\n\tquery := make(map[string]interface{})\n\tquery[\"id\"] = d.ID\n\treturn NewBaseModel(d.getStore()).Delete(query)\n}\n\n\/\/ Redact : removes all sensitive fields from the return\n\/\/ data before outputting to the user\nfunc (d *Project) Redact() {\n\tfor k := range d.Credentials {\n\t\tif k != \"region\" && k != \"vdc\" && k != \"username\" && k != \"vcloud_url\" {\n\t\t\tdelete(d.Credentials, k)\n\t\t}\n\t}\n}\n\n\/\/ Improve : adds extra data to this entity\nfunc (d *Project) Improve() {\n}\n\n\/\/ Envs : Get the envs related with current project\nfunc (d *Project) Envs() (envs []Env, err error) {\n\tvar s Env\n\terr = s.FindByProjectID(d.ID, &envs)\n\treturn\n}\n\n\/\/ GetID : ID getter\nfunc (d *Project) GetID() string {\n\treturn d.Name\n}\n\n\/\/ GetType : Gets the resource type\nfunc (d *Project) GetType() string {\n\treturn \"project\"\n}\n\n\/\/ Override : override not empty parameters with the given project ones\nfunc (d *Project) Override(dt Project) {\n\tfor k, v := range dt.Credentials {\n\t\td.Credentials[k] = v\n\t}\n}\n\n\/\/ Encrypt : encrypts sensible data\nfunc (d *Project) Encrypt() {\n\tfor k, v := range d.Credentials {\n\t\txc, ok := v.(string)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\td.Credentials[k], _ = crypt(xc)\n\t}\n}\n\nfunc decrypt(s string) (string, error) {\n\tcrypto := aes.New()\n\tkey := os.Getenv(\"ERNEST_CRYPTO_KEY\")\n\tif s != \"\" {\n\t\tencrypted, err := crypto.Decrypt(s, key)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\ts = encrypted\n\t}\n\n\treturn s, nil\n}\n\nfunc crypt(s string) (string, error) {\n\tcrypto := aes.New()\n\tkey := os.Getenv(\"ERNEST_CRYPTO_KEY\")\n\tif s != \"\" {\n\t\tencrypted, err := crypto.Encrypt(s, key)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\ts = encrypted\n\t}\n\n\treturn s, nil\n}\n\nfunc (d *Project) getStore() string {\n\treturn \"datacenter\"\n}\n\n\/\/ IsAzure : check if the current project is of type azure\nfunc (d *Project) IsAzure() bool {\n\treturn (d.Type == \"azure\" || d.Type == \"azure-fake\")\n}\n<|endoftext|>"} {"text":"<commit_before>package modules\n\nconst (\n\tAPIErrorCodeUnknown = iota\n\tAPIErrorCodeUnauthorized\n\tAPIErrorCodeDigestInvalid\n\tAPIErrorCodeSizeInvalid\n\tAPIErrorCodeNameInvalid\n\tAPIErrorCodeTagInvalid\n\tAPIErrorCodeNameUnknown\n\tAPIErrorCodeManifestUnknown\n\tAPIErrorCodeManifestInvalid\n\tAPIErrorCodeManifestUnverified\n\tAPIErrorCodeBlobUnknown\n\tAPIErrorCodeBlobUploadUnknown\n)\n\ntype ErrorDescriptor struct {\n\tCode int\n\tValue string\n\tMessage string\n\tDescription string\n}\n\nvar ErrorDescriptors = []ErrorDescriptor{\n\t{\n\t\tCode: APIErrorCodeUnknown,\n\t\tValue: \"UNKNOWN\",\n\t\tMessage: \"unknown error\",\n\t\tDescription: `Generic error returned when the error does not have an\n API classification.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeUnauthorized,\n\t\tValue: \"UNAUTHORIZED\",\n\t\tMessage: \"access to the requested resource is not authorized\",\n\t\tDescription: `The access controller denied access for the operation on\n a resource. Often this will be accompanied by a 401 Unauthorized\n response status.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeDigestInvalid,\n\t\tValue: \"DIGEST_INVALID\",\n\t\tMessage: \"provided digest did not match uploaded content\",\n\t\tDescription: `When a blob is uploaded, the registry will check that\n the content matches the digest provided by the client. The error may\n include a detail structure with the key \"digest\", including the\n invalid digest string. This error may also be returned when a manifest\n includes an invalid layer digest.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeSizeInvalid,\n\t\tValue: \"SIZE_INVALID\",\n\t\tMessage: \"provided length did not match content length\",\n\t\tDescription: `When a layer is uploaded, the provided size will be\n checked against the uploaded content. If they do not match, this error\n will be returned.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeNameInvalid,\n\t\tValue: \"NAME_INVALID\",\n\t\tMessage: \"manifest name did not match URI\",\n\t\tDescription: `During a manifest upload, if the name in the manifest\n does not match the uri name, this error will be returned.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeTagInvalid,\n\t\tValue: \"TAG_INVALID\",\n\t\tMessage: \"manifest tag did not match URI\",\n\t\tDescription: `During a manifest upload, if the tag in the manifest\n does not match the uri tag, this error will be returned.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeNameUnknown,\n\t\tValue: \"NAME_UNKNOWN\",\n\t\tMessage: \"repository name not known to registry\",\n\t\tDescription: `This is returned if the name used during an operation is\n unknown to the registry.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeManifestUnknown,\n\t\tValue: \"MANIFEST_UNKNOWN\",\n\t\tMessage: \"manifest unknown\",\n\t\tDescription: `This error is returned when the manifest, identified by\n name and tag is unknown to the repository.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeManifestInvalid,\n\t\tValue: \"MANIFEST_INVALID\",\n\t\tMessage: \"manifest invalid\",\n\t\tDescription: `During upload, manifests undergo several checks ensuring\n validity. If those checks fail, this error may be returned, unless a\n more specific error is included. The detail will contain information\n the failed validation.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeManifestUnverified,\n\t\tValue: \"MANIFEST_UNVERIFIED\",\n\t\tMessage: \"manifest failed signature verification\",\n\t\tDescription: `During manifest upload, if the manifest fails signature\n verification, this error will be returned.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeBlobUnknown,\n\t\tValue: \"BLOB_UNKNOWN\",\n\t\tMessage: \"blob unknown to registry\",\n\t\tDescription: `This error may be returned when a blob is unknown to the\n registry in a specified repository. This can be returned with a\n standard get or if a manifest references an unknown layer during\n upload.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeBlobUploadUnknown,\n\t\tValue: \"BLOB_UPLOAD_UNKNOWN\",\n\t\tMessage: \"blob upload unknown to registry\",\n\t\tDescription: `If a blob upload has been cancelled or was never \n started, this error code may be returned.`,\n\t},\n}\n<commit_msg>Reformat description in ErrorDescriptor struct.<commit_after>package modules\n\nconst (\n\tAPIErrorCodeUnknown = iota\n\tAPIErrorCodeUnauthorized\n\tAPIErrorCodeDigestInvalid\n\tAPIErrorCodeSizeInvalid\n\tAPIErrorCodeNameInvalid\n\tAPIErrorCodeTagInvalid\n\tAPIErrorCodeNameUnknown\n\tAPIErrorCodeManifestUnknown\n\tAPIErrorCodeManifestInvalid\n\tAPIErrorCodeManifestUnverified\n\tAPIErrorCodeBlobUnknown\n\tAPIErrorCodeBlobUploadUnknown\n)\n\ntype ErrorDescriptor struct {\n\tCode int\n\tValue string\n\tMessage string\n\tDescription string\n}\n\nvar ErrorDescriptors = []ErrorDescriptor{\n\t{\n\t\tCode: APIErrorCodeUnknown,\n\t\tValue: \"UNKNOWN\",\n\t\tMessage: \"unknown error\",\n\t\tDescription: `Generic error returned when the error does not have an API classification.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeUnauthorized,\n\t\tValue: \"UNAUTHORIZED\",\n\t\tMessage: \"access to the requested resource is not authorized\",\n\t\tDescription: `The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeDigestInvalid,\n\t\tValue: \"DIGEST_INVALID\",\n\t\tMessage: \"provided digest did not match uploaded content\",\n\t\tDescription: `When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key \"digest\", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeSizeInvalid,\n\t\tValue: \"SIZE_INVALID\",\n\t\tMessage: \"provided length did not match content length\",\n\t\tDescription: `When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeNameInvalid,\n\t\tValue: \"NAME_INVALID\",\n\t\tMessage: \"manifest name did not match URI\",\n\t\tDescription: `During a manifest upload, if the name in the manifest does not match the uri name, this error will be returned.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeTagInvalid,\n\t\tValue: \"TAG_INVALID\",\n\t\tMessage: \"manifest tag did not match URI\",\n\t\tDescription: `During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeNameUnknown,\n\t\tValue: \"NAME_UNKNOWN\",\n\t\tMessage: \"repository name not known to registry\",\n\t\tDescription: `This is returned if the name used during an operation is unknown to the registry.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeManifestUnknown,\n\t\tValue: \"MANIFEST_UNKNOWN\",\n\t\tMessage: \"manifest unknown\",\n\t\tDescription: `This error is returned when the manifest, identified by name and tag is unknown to the repository.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeManifestInvalid,\n\t\tValue: \"MANIFEST_INVALID\",\n\t\tMessage: \"manifest invalid\",\n\t\tDescription: `During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeManifestUnverified,\n\t\tValue: \"MANIFEST_UNVERIFIED\",\n\t\tMessage: \"manifest failed signature verification\",\n\t\tDescription: `During manifest upload, if the manifest fails signature verification, this error will be returned.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeBlobUnknown,\n\t\tValue: \"BLOB_UNKNOWN\",\n\t\tMessage: \"blob unknown to registry\",\n\t\tDescription: `This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload.`,\n\t},\n\t{\n\t\tCode: APIErrorCodeBlobUploadUnknown,\n\t\tValue: \"BLOB_UPLOAD_UNKNOWN\",\n\t\tMessage: \"blob upload unknown to registry\",\n\t\tDescription: `If a blob upload has been cancelled or was never started, this error code may be returned.`,\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The go-ethereum Authors\n\/\/ This file is part of the go-ethereum library.\n\/\/\n\/\/ The go-ethereum library is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Lesser General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ The go-ethereum library is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Lesser General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Lesser General Public License\n\/\/ along with the go-ethereum library. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage core\n\nimport (\n\t\"fmt\"\n\t\"math\/big\"\n\n\t\"github.com\/ethereum\/go-ethereum\/common\"\n\t\"github.com\/ethereum\/go-ethereum\/core\/state\"\n\t\"github.com\/ethereum\/go-ethereum\/core\/types\"\n\t\"github.com\/ethereum\/go-ethereum\/ethdb\"\n\t\"github.com\/ethereum\/go-ethereum\/event\"\n\t\"github.com\/ethereum\/go-ethereum\/pow\"\n)\n\n\/\/ FakePow is a non-validating proof of work implementation.\n\/\/ It returns true from Verify for any block.\ntype FakePow struct{}\n\nfunc (f FakePow) Search(block pow.Block, stop <-chan struct{}, index int) (uint64, []byte) {\n\treturn 0, nil\n}\nfunc (f FakePow) Verify(block pow.Block) bool { return true }\nfunc (f FakePow) GetHashrate() int64 { return 0 }\nfunc (f FakePow) Turbo(bool) {}\n\n\/\/ So we can deterministically seed different blockchains\nvar (\n\tcanonicalSeed = 1\n\tforkSeed = 2\n)\n\n\/\/ BlockGen creates blocks for testing.\n\/\/ See GenerateChain for a detailed explanation.\ntype BlockGen struct {\n\ti int\n\tparent *types.Block\n\tchain []*types.Block\n\theader *types.Header\n\tstatedb *state.StateDB\n\n\tgasPool *GasPool\n\ttxs []*types.Transaction\n\treceipts []*types.Receipt\n\tuncles []*types.Header\n}\n\n\/\/ SetCoinbase sets the coinbase of the generated block.\n\/\/ It can be called at most once.\nfunc (b *BlockGen) SetCoinbase(addr common.Address) {\n\tif b.gasPool != nil {\n\t\tif len(b.txs) > 0 {\n\t\t\tpanic(\"coinbase must be set before adding transactions\")\n\t\t}\n\t\tpanic(\"coinbase can only be set once\")\n\t}\n\tb.header.Coinbase = addr\n\tb.gasPool = new(GasPool).AddGas(b.header.GasLimit)\n}\n\n\/\/ SetExtra sets the extra data field of the generated block.\nfunc (b *BlockGen) SetExtra(data []byte) {\n\tb.header.Extra = data\n}\n\n\/\/ AddTx adds a transaction to the generated block. If no coinbase has\n\/\/ been set, the block's coinbase is set to the zero address.\n\/\/\n\/\/ AddTx panics if the transaction cannot be executed. In addition to\n\/\/ the protocol-imposed limitations (gas limit, etc.), there are some\n\/\/ further limitations on the content of transactions that can be\n\/\/ added. Notably, contract code relying on the BLOCKHASH instruction\n\/\/ will panic during execution.\nfunc (b *BlockGen) AddTx(tx *types.Transaction) {\n\tif b.gasPool == nil {\n\t\tb.SetCoinbase(common.Address{})\n\t}\n\tb.statedb.StartRecord(tx.Hash(), common.Hash{}, len(b.txs))\n\t_, gas, err := ApplyMessage(NewEnv(b.statedb, nil, tx, b.header), tx, b.gasPool)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\troot := b.statedb.IntermediateRoot()\n\tb.header.GasUsed.Add(b.header.GasUsed, gas)\n\treceipt := types.NewReceipt(root.Bytes(), b.header.GasUsed)\n\treceipt.Logs = b.statedb.GetLogs(tx.Hash())\n\treceipt.Bloom = types.CreateBloom(types.Receipts{receipt})\n\tb.txs = append(b.txs, tx)\n\tb.receipts = append(b.receipts, receipt)\n}\n\n\/\/ Number returns the block number of the block being generated.\nfunc (b *BlockGen) Number() *big.Int {\n\treturn new(big.Int).Set(b.header.Number)\n}\n\n\/\/ AddUncheckedReceipts forcefully adds a receipts to the block without a\n\/\/ backing transaction.\n\/\/\n\/\/ AddUncheckedReceipts will cause consensus failures when used during real\n\/\/ chain processing. This is best used in conjuction with raw block insertion.\nfunc (b *BlockGen) AddUncheckedReceipt(receipt *types.Receipt) {\n\tb.receipts = append(b.receipts, receipt)\n}\n\n\/\/ TxNonce returns the next valid transaction nonce for the\n\/\/ account at addr. It panics if the account does not exist.\nfunc (b *BlockGen) TxNonce(addr common.Address) uint64 {\n\tif !b.statedb.HasAccount(addr) {\n\t\tpanic(\"account does not exist\")\n\t}\n\treturn b.statedb.GetNonce(addr)\n}\n\n\/\/ AddUncle adds an uncle header to the generated block.\nfunc (b *BlockGen) AddUncle(h *types.Header) {\n\tb.uncles = append(b.uncles, h)\n}\n\n\/\/ PrevBlock returns a previously generated block by number. It panics if\n\/\/ num is greater or equal to the number of the block being generated.\n\/\/ For index -1, PrevBlock returns the parent block given to GenerateChain.\nfunc (b *BlockGen) PrevBlock(index int) *types.Block {\n\tif index >= b.i {\n\t\tpanic(\"block index out of range\")\n\t}\n\tif index == -1 {\n\t\treturn b.parent\n\t}\n\treturn b.chain[index]\n}\n\n\/\/ OffsetTime modifies the time instance of a block, implicitly changing its\n\/\/ associated difficulty. It's useful to test scenarios where forking is not\n\/\/ tied to chain length directly.\nfunc (b *BlockGen) OffsetTime(seconds int64) {\n\tb.header.Time.Add(b.header.Time, new(big.Int).SetInt64(seconds))\n\tif b.header.Time.Cmp(b.parent.Header().Time) <= 0 {\n\t\tpanic(\"block time out of range\")\n\t}\n\tb.header.Difficulty = CalcDifficulty(b.header.Time.Uint64(), b.parent.Time().Uint64(), b.parent.Number(), b.parent.Difficulty())\n}\n\n\/\/ GenerateChain creates a chain of n blocks. The first block's\n\/\/ parent will be the provided parent. db is used to store\n\/\/ intermediate states and should contain the parent's state trie.\n\/\/\n\/\/ The generator function is called with a new block generator for\n\/\/ every block. Any transactions and uncles added to the generator\n\/\/ become part of the block. If gen is nil, the blocks will be empty\n\/\/ and their coinbase will be the zero address.\n\/\/\n\/\/ Blocks created by GenerateChain do not contain valid proof of work\n\/\/ values. Inserting them into BlockChain requires use of FakePow or\n\/\/ a similar non-validating proof of work implementation.\nfunc GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) {\n\tstatedb, err := state.New(parent.Root(), db)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tblocks, receipts := make(types.Blocks, n), make([]types.Receipts, n)\n\tgenblock := func(i int, h *types.Header) (*types.Block, types.Receipts) {\n\t\tb := &BlockGen{parent: parent, i: i, chain: blocks, header: h, statedb: statedb}\n\t\tif gen != nil {\n\t\t\tgen(i, b)\n\t\t}\n\t\tAccumulateRewards(statedb, h, b.uncles)\n\t\troot, err := statedb.Commit()\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"state write error: %v\", err))\n\t\t}\n\t\th.Root = root\n\t\treturn types.NewBlock(h, b.txs, b.uncles, b.receipts), b.receipts\n\t}\n\tfor i := 0; i < n; i++ {\n\t\theader := makeHeader(parent, statedb)\n\t\tblock, receipt := genblock(i, header)\n\t\tblocks[i] = block\n\t\treceipts[i] = receipt\n\t\tparent = block\n\t}\n\treturn blocks, receipts\n}\n\nfunc makeHeader(parent *types.Block, state *state.StateDB) *types.Header {\n\tvar time *big.Int\n\tif parent.Time() == nil {\n\t\ttime = big.NewInt(10)\n\t} else {\n\t\ttime = new(big.Int).Add(parent.Time(), big.NewInt(10)) \/\/ block time is fixed at 10 seconds\n\t}\n\treturn &types.Header{\n\t\tRoot: state.IntermediateRoot(),\n\t\tParentHash: parent.Hash(),\n\t\tCoinbase: parent.Coinbase(),\n\t\tDifficulty: CalcDifficulty(time.Uint64(), new(big.Int).Sub(time, big.NewInt(10)).Uint64(), parent.Number(), parent.Difficulty()),\n\t\tGasLimit: CalcGasLimit(parent),\n\t\tGasUsed: new(big.Int),\n\t\tNumber: new(big.Int).Add(parent.Number(), common.Big1),\n\t\tTime: time,\n\t}\n}\n\n\/\/ newCanonical creates a chain database, and injects a deterministic canonical\n\/\/ chain. Depending on the full flag, if creates either a full block chain or a\n\/\/ header only chain.\nfunc newCanonical(n int, full bool) (ethdb.Database, *BlockChain, error) {\n\t\/\/ Create te new chain database\n\tdb, _ := ethdb.NewMemDatabase()\n\tevmux := &event.TypeMux{}\n\n\t\/\/ Initialize a fresh chain with only a genesis block\n\tgenesis, _ := WriteTestNetGenesisBlock(db)\n\n\tblockchain, _ := NewBlockChain(db, FakePow{}, evmux)\n\t\/\/ Create and inject the requested chain\n\tif n == 0 {\n\t\treturn db, blockchain, nil\n\t}\n\tif full {\n\t\t\/\/ Full block-chain requested\n\t\tblocks := makeBlockChain(genesis, n, db, canonicalSeed)\n\t\t_, err := blockchain.InsertChain(blocks)\n\t\treturn db, blockchain, err\n\t}\n\t\/\/ Header-only chain requested\n\theaders := makeHeaderChain(genesis.Header(), n, db, canonicalSeed)\n\t_, err := blockchain.InsertHeaderChain(headers, 1)\n\treturn db, blockchain, err\n}\n\n\/\/ makeHeaderChain creates a deterministic chain of headers rooted at parent.\nfunc makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) []*types.Header {\n\tblocks := makeBlockChain(types.NewBlockWithHeader(parent), n, db, seed)\n\theaders := make([]*types.Header, len(blocks))\n\tfor i, block := range blocks {\n\t\theaders[i] = block.Header()\n\t}\n\treturn headers\n}\n\n\/\/ makeBlockChain creates a deterministic chain of blocks rooted at parent.\nfunc makeBlockChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block {\n\tblocks, _ := GenerateChain(parent, db, n, func(i int, b *BlockGen) {\n\t\tb.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})\n\t})\n\treturn blocks\n}\n<commit_msg>core: fix invalid state reuse in chain maker based tests<commit_after>\/\/ Copyright 2015 The go-ethereum Authors\n\/\/ This file is part of the go-ethereum library.\n\/\/\n\/\/ The go-ethereum library is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Lesser General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ The go-ethereum library is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Lesser General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Lesser General Public License\n\/\/ along with the go-ethereum library. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage core\n\nimport (\n\t\"fmt\"\n\t\"math\/big\"\n\n\t\"github.com\/ethereum\/go-ethereum\/common\"\n\t\"github.com\/ethereum\/go-ethereum\/core\/state\"\n\t\"github.com\/ethereum\/go-ethereum\/core\/types\"\n\t\"github.com\/ethereum\/go-ethereum\/ethdb\"\n\t\"github.com\/ethereum\/go-ethereum\/event\"\n\t\"github.com\/ethereum\/go-ethereum\/pow\"\n)\n\n\/\/ FakePow is a non-validating proof of work implementation.\n\/\/ It returns true from Verify for any block.\ntype FakePow struct{}\n\nfunc (f FakePow) Search(block pow.Block, stop <-chan struct{}, index int) (uint64, []byte) {\n\treturn 0, nil\n}\nfunc (f FakePow) Verify(block pow.Block) bool { return true }\nfunc (f FakePow) GetHashrate() int64 { return 0 }\nfunc (f FakePow) Turbo(bool) {}\n\n\/\/ So we can deterministically seed different blockchains\nvar (\n\tcanonicalSeed = 1\n\tforkSeed = 2\n)\n\n\/\/ BlockGen creates blocks for testing.\n\/\/ See GenerateChain for a detailed explanation.\ntype BlockGen struct {\n\ti int\n\tparent *types.Block\n\tchain []*types.Block\n\theader *types.Header\n\tstatedb *state.StateDB\n\n\tgasPool *GasPool\n\ttxs []*types.Transaction\n\treceipts []*types.Receipt\n\tuncles []*types.Header\n}\n\n\/\/ SetCoinbase sets the coinbase of the generated block.\n\/\/ It can be called at most once.\nfunc (b *BlockGen) SetCoinbase(addr common.Address) {\n\tif b.gasPool != nil {\n\t\tif len(b.txs) > 0 {\n\t\t\tpanic(\"coinbase must be set before adding transactions\")\n\t\t}\n\t\tpanic(\"coinbase can only be set once\")\n\t}\n\tb.header.Coinbase = addr\n\tb.gasPool = new(GasPool).AddGas(b.header.GasLimit)\n}\n\n\/\/ SetExtra sets the extra data field of the generated block.\nfunc (b *BlockGen) SetExtra(data []byte) {\n\tb.header.Extra = data\n}\n\n\/\/ AddTx adds a transaction to the generated block. If no coinbase has\n\/\/ been set, the block's coinbase is set to the zero address.\n\/\/\n\/\/ AddTx panics if the transaction cannot be executed. In addition to\n\/\/ the protocol-imposed limitations (gas limit, etc.), there are some\n\/\/ further limitations on the content of transactions that can be\n\/\/ added. Notably, contract code relying on the BLOCKHASH instruction\n\/\/ will panic during execution.\nfunc (b *BlockGen) AddTx(tx *types.Transaction) {\n\tif b.gasPool == nil {\n\t\tb.SetCoinbase(common.Address{})\n\t}\n\tb.statedb.StartRecord(tx.Hash(), common.Hash{}, len(b.txs))\n\treceipt, _, _, err := ApplyTransaction(nil, b.gasPool, b.statedb, b.header, tx, b.header.GasUsed)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tb.txs = append(b.txs, tx)\n\tb.receipts = append(b.receipts, receipt)\n}\n\n\/\/ Number returns the block number of the block being generated.\nfunc (b *BlockGen) Number() *big.Int {\n\treturn new(big.Int).Set(b.header.Number)\n}\n\n\/\/ AddUncheckedReceipts forcefully adds a receipts to the block without a\n\/\/ backing transaction.\n\/\/\n\/\/ AddUncheckedReceipts will cause consensus failures when used during real\n\/\/ chain processing. This is best used in conjuction with raw block insertion.\nfunc (b *BlockGen) AddUncheckedReceipt(receipt *types.Receipt) {\n\tb.receipts = append(b.receipts, receipt)\n}\n\n\/\/ TxNonce returns the next valid transaction nonce for the\n\/\/ account at addr. It panics if the account does not exist.\nfunc (b *BlockGen) TxNonce(addr common.Address) uint64 {\n\tif !b.statedb.HasAccount(addr) {\n\t\tpanic(\"account does not exist\")\n\t}\n\treturn b.statedb.GetNonce(addr)\n}\n\n\/\/ AddUncle adds an uncle header to the generated block.\nfunc (b *BlockGen) AddUncle(h *types.Header) {\n\tb.uncles = append(b.uncles, h)\n}\n\n\/\/ PrevBlock returns a previously generated block by number. It panics if\n\/\/ num is greater or equal to the number of the block being generated.\n\/\/ For index -1, PrevBlock returns the parent block given to GenerateChain.\nfunc (b *BlockGen) PrevBlock(index int) *types.Block {\n\tif index >= b.i {\n\t\tpanic(\"block index out of range\")\n\t}\n\tif index == -1 {\n\t\treturn b.parent\n\t}\n\treturn b.chain[index]\n}\n\n\/\/ OffsetTime modifies the time instance of a block, implicitly changing its\n\/\/ associated difficulty. It's useful to test scenarios where forking is not\n\/\/ tied to chain length directly.\nfunc (b *BlockGen) OffsetTime(seconds int64) {\n\tb.header.Time.Add(b.header.Time, new(big.Int).SetInt64(seconds))\n\tif b.header.Time.Cmp(b.parent.Header().Time) <= 0 {\n\t\tpanic(\"block time out of range\")\n\t}\n\tb.header.Difficulty = CalcDifficulty(b.header.Time.Uint64(), b.parent.Time().Uint64(), b.parent.Number(), b.parent.Difficulty())\n}\n\n\/\/ GenerateChain creates a chain of n blocks. The first block's\n\/\/ parent will be the provided parent. db is used to store\n\/\/ intermediate states and should contain the parent's state trie.\n\/\/\n\/\/ The generator function is called with a new block generator for\n\/\/ every block. Any transactions and uncles added to the generator\n\/\/ become part of the block. If gen is nil, the blocks will be empty\n\/\/ and their coinbase will be the zero address.\n\/\/\n\/\/ Blocks created by GenerateChain do not contain valid proof of work\n\/\/ values. Inserting them into BlockChain requires use of FakePow or\n\/\/ a similar non-validating proof of work implementation.\nfunc GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) {\n\tblocks, receipts := make(types.Blocks, n), make([]types.Receipts, n)\n\tgenblock := func(i int, h *types.Header, statedb *state.StateDB) (*types.Block, types.Receipts) {\n\t\tb := &BlockGen{parent: parent, i: i, chain: blocks, header: h, statedb: statedb}\n\t\tif gen != nil {\n\t\t\tgen(i, b)\n\t\t}\n\t\tAccumulateRewards(statedb, h, b.uncles)\n\t\troot, err := statedb.Commit()\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"state write error: %v\", err))\n\t\t}\n\t\th.Root = root\n\t\treturn types.NewBlock(h, b.txs, b.uncles, b.receipts), b.receipts\n\t}\n\tfor i := 0; i < n; i++ {\n\t\tstatedb, err := state.New(parent.Root(), db)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\theader := makeHeader(parent, statedb)\n\t\tblock, receipt := genblock(i, header, statedb)\n\t\tblocks[i] = block\n\t\treceipts[i] = receipt\n\t\tparent = block\n\t}\n\treturn blocks, receipts\n}\n\nfunc makeHeader(parent *types.Block, state *state.StateDB) *types.Header {\n\tvar time *big.Int\n\tif parent.Time() == nil {\n\t\ttime = big.NewInt(10)\n\t} else {\n\t\ttime = new(big.Int).Add(parent.Time(), big.NewInt(10)) \/\/ block time is fixed at 10 seconds\n\t}\n\treturn &types.Header{\n\t\tRoot: state.IntermediateRoot(),\n\t\tParentHash: parent.Hash(),\n\t\tCoinbase: parent.Coinbase(),\n\t\tDifficulty: CalcDifficulty(time.Uint64(), new(big.Int).Sub(time, big.NewInt(10)).Uint64(), parent.Number(), parent.Difficulty()),\n\t\tGasLimit: CalcGasLimit(parent),\n\t\tGasUsed: new(big.Int),\n\t\tNumber: new(big.Int).Add(parent.Number(), common.Big1),\n\t\tTime: time,\n\t}\n}\n\n\/\/ newCanonical creates a chain database, and injects a deterministic canonical\n\/\/ chain. Depending on the full flag, if creates either a full block chain or a\n\/\/ header only chain.\nfunc newCanonical(n int, full bool) (ethdb.Database, *BlockChain, error) {\n\t\/\/ Create te new chain database\n\tdb, _ := ethdb.NewMemDatabase()\n\tevmux := &event.TypeMux{}\n\n\t\/\/ Initialize a fresh chain with only a genesis block\n\tgenesis, _ := WriteTestNetGenesisBlock(db)\n\n\tblockchain, _ := NewBlockChain(db, FakePow{}, evmux)\n\t\/\/ Create and inject the requested chain\n\tif n == 0 {\n\t\treturn db, blockchain, nil\n\t}\n\tif full {\n\t\t\/\/ Full block-chain requested\n\t\tblocks := makeBlockChain(genesis, n, db, canonicalSeed)\n\t\t_, err := blockchain.InsertChain(blocks)\n\t\treturn db, blockchain, err\n\t}\n\t\/\/ Header-only chain requested\n\theaders := makeHeaderChain(genesis.Header(), n, db, canonicalSeed)\n\t_, err := blockchain.InsertHeaderChain(headers, 1)\n\treturn db, blockchain, err\n}\n\n\/\/ makeHeaderChain creates a deterministic chain of headers rooted at parent.\nfunc makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) []*types.Header {\n\tblocks := makeBlockChain(types.NewBlockWithHeader(parent), n, db, seed)\n\theaders := make([]*types.Header, len(blocks))\n\tfor i, block := range blocks {\n\t\theaders[i] = block.Header()\n\t}\n\treturn headers\n}\n\n\/\/ makeBlockChain creates a deterministic chain of blocks rooted at parent.\nfunc makeBlockChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block {\n\tblocks, _ := GenerateChain(parent, db, n, func(i int, b *BlockGen) {\n\t\tb.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})\n\t})\n\treturn blocks\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.\n\/\/ Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.\n\/\/\n\/\/ All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without modification,\n\/\/ are permitted provided that the following conditions are met:\n\/\/\n\/\/ 1. Redistributions of source code must retain the above copyright notice, this\n\/\/ list of conditions and the following disclaimer.\n\/\/\n\/\/ 2. Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation and\/or\n\/\/ other materials provided with the distribution.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n\/\/ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n\/\/ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n\/\/ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\n\/\/ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n\/\/ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n\/\/ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n\/\/ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\/\/ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n\/\/ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\npackage main\n\nimport (\n \"net\"\n \"os\"\n\n \"sippy\/log\"\n \"sippy\/utils\"\n)\n\ntype Cli_server_local struct {\n command_cb func(string) string\n listener net.Listener\n logger sippy_log.ErrorLogger\n}\n\/*\nfrom twisted.internet.protocol import Factory\nfrom twisted.internet import reactor\nfrom Cli_session import Cli_session\nfrom os import chown, unlink\nfrom os.path import exists\n\nclass Cli_server_local(Factory):\n command_cb = nil\n*\/\nfunc NewCli_server_local(command_cb func(string) string, address string, logger sippy_log.ErrorLogger\/*, sock_owner = nil*\/) (*Cli_server_local, error) {\n if _, err := os.Stat(address); err == nil {\n err = os.Remove(address)\n if err != nil { return nil, err }\n }\n addr, err := net.ResolveUnixAddr(\"unix\", address)\n if err != nil { return nil, err }\n\n listener, err := net.ListenUnix(\"unix\", addr)\n if err != nil { return nil, err }\n\n self := &Cli_server_local{\n command_cb : command_cb,\n\/\/ protocol : NewCli_session,\n listener : listener,\n logger : logger,\n }\n \/\/if address == nil:\n \/\/ address = '\/var\/run\/ccm.sock'\n \/\/if sock_owner != nil:\n \/\/ chown(address, sock_owner[0], sock_owner[1])\n return self, nil\n}\n\nfunc (self *Cli_server_local) Start() {\n go self.run()\n}\n\nfunc (self *Cli_server_local) run() {\n for {\n conn, err := self.listener.Accept()\n if err != nil {\n break\n }\n go sippy_utils.SafeCall(func() { self.handle_request(conn) }, nil, self.logger)\n }\n}\n\nfunc (self *Cli_server_local) handle_request(conn net.Conn) {\n buf := make([]byte, 2048)\n n, err := conn.Read(buf)\n if err != nil || n == 0 {\n return\n }\n defer conn.Close()\n res := self.command_cb(string(buf[:n]))\n conn.Write([]byte(res))\n}\n\/*\n def buildProtocol(self, addr):\n p = Factory.buildProtocol(self, addr)\n p.command_cb = self.command_cb\n return p\n\nif __name__ == '__main__':\n def callback(clm, cmd):\n print cmd\n return False\n f = Cli_server_local(callback)\n reactor.run()\n*\/\n<commit_msg>The Cli iface should use persistent connection.<commit_after>\/\/ Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.\n\/\/ Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.\n\/\/\n\/\/ All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without modification,\n\/\/ are permitted provided that the following conditions are met:\n\/\/\n\/\/ 1. Redistributions of source code must retain the above copyright notice, this\n\/\/ list of conditions and the following disclaimer.\n\/\/\n\/\/ 2. Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation and\/or\n\/\/ other materials provided with the distribution.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n\/\/ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n\/\/ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n\/\/ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\n\/\/ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n\/\/ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n\/\/ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n\/\/ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\/\/ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n\/\/ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\npackage main\n\nimport (\n \"bufio\"\n \"net\"\n \"os\"\n\n \"sippy\/log\"\n \"sippy\/utils\"\n)\n\ntype Cli_server_local struct {\n command_cb func(string) string\n listener net.Listener\n logger sippy_log.ErrorLogger\n}\n\/*\nfrom twisted.internet.protocol import Factory\nfrom twisted.internet import reactor\nfrom Cli_session import Cli_session\nfrom os import chown, unlink\nfrom os.path import exists\n\nclass Cli_server_local(Factory):\n command_cb = nil\n*\/\nfunc NewCli_server_local(command_cb func(string) string, address string, logger sippy_log.ErrorLogger\/*, sock_owner = nil*\/) (*Cli_server_local, error) {\n if _, err := os.Stat(address); err == nil {\n err = os.Remove(address)\n if err != nil { return nil, err }\n }\n addr, err := net.ResolveUnixAddr(\"unix\", address)\n if err != nil { return nil, err }\n\n listener, err := net.ListenUnix(\"unix\", addr)\n if err != nil { return nil, err }\n\n self := &Cli_server_local{\n command_cb : command_cb,\n\/\/ protocol : NewCli_session,\n listener : listener,\n logger : logger,\n }\n \/\/if address == nil:\n \/\/ address = '\/var\/run\/ccm.sock'\n \/\/if sock_owner != nil:\n \/\/ chown(address, sock_owner[0], sock_owner[1])\n return self, nil\n}\n\nfunc (self *Cli_server_local) Start() {\n go self.run()\n}\n\nfunc (self *Cli_server_local) run() {\n for {\n conn, err := self.listener.Accept()\n if err != nil {\n break\n }\n go sippy_utils.SafeCall(func() { self.handle_request(conn) }, nil, self.logger)\n }\n}\n\nfunc (self *Cli_server_local) handle_request(conn net.Conn) {\n defer conn.Close()\n reader := bufio.NewReader(conn)\n for {\n line, err := reader.ReadString('\\n')\n if err != nil {\n break\n }\n res := self.command_cb(line)\n _, err = conn.Write([]byte(res))\n if err != nil {\n break\n }\n }\n}\n\/*\n def buildProtocol(self, addr):\n p = Factory.buildProtocol(self, addr)\n p.command_cb = self.command_cb\n return p\n\nif __name__ == '__main__':\n def callback(clm, cmd):\n print cmd\n return False\n f = Cli_server_local(callback)\n reactor.run()\n*\/\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"compress\/gzip\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\tgopath \"path\"\n\t\"strings\"\n\n\tcore \"github.com\/ipfs\/go-ipfs\/core\"\n\te \"github.com\/ipfs\/go-ipfs\/core\/commands\/e\"\n\tdag \"github.com\/ipfs\/go-ipfs\/merkledag\"\n\tpath \"github.com\/ipfs\/go-ipfs\/path\"\n\ttar \"github.com\/ipfs\/go-ipfs\/thirdparty\/tar\"\n\tuarchive \"github.com\/ipfs\/go-ipfs\/unixfs\/archive\"\n\n\t\"gx\/ipfs\/QmUEB5nT4LG3TkUd5mkHrfRESUSgaUD4r7jSAYvvPeuWT9\/go-ipfs-cmds\"\n\t\"gx\/ipfs\/QmceUdzxkimdYsgtX733uNgzf1DLHyBKN6ehGSp85ayppM\/go-ipfs-cmdkit\"\n\t\"gx\/ipfs\/QmeWjRodbcZFKe5tMN7poEx3izym6osrLSnTLf9UjJZBbs\/pb\"\n)\n\nvar ErrInvalidCompressionLevel = errors.New(\"Compression level must be between 1 and 9\")\n\nvar GetCmd = &cmds.Command{\n\tHelptext: cmdkit.HelpText{\n\t\tTagline: \"Download IPFS objects.\",\n\t\tShortDescription: `\nStores to disk the data contained an IPFS or IPNS object(s) at the given path.\n\nBy default, the output will be stored at '.\/<ipfs-path>', but an alternate\npath can be specified with '--output=<path>' or '-o=<path>'.\n\nTo output a TAR archive instead of unpacked files, use '--archive' or '-a'.\n\nTo compress the output with GZIP compression, use '--compress' or '-C'. You\nmay also specify the level of compression by specifying '-l=<1-9>'.\n`,\n\t},\n\n\tArguments: []cmdkit.Argument{\n\t\tcmdkit.StringArg(\"ipfs-path\", true, false, \"The path to the IPFS object(s) to be outputted.\").EnableStdin(),\n\t},\n\tOptions: []cmdkit.Option{\n\t\tcmdkit.StringOption(\"output\", \"o\", \"The path where the output should be stored.\"),\n\t\tcmdkit.BoolOption(\"archive\", \"a\", \"Output a TAR archive.\"),\n\t\tcmdkit.BoolOption(\"compress\", \"C\", \"Compress the output with GZIP compression.\"),\n\t\tcmdkit.IntOption(\"compression-level\", \"l\", \"The level of compression (1-9).\").WithDefault(-1),\n\t},\n\tPreRun: func(req *cmds.Request, env cmds.Environment) error {\n\t\t_, err := getCompressOptions(req)\n\t\treturn err\n\t},\n\tRun: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) {\n\t\tif len(req.Arguments) == 0 {\n\t\t\tres.SetError(errors.New(\"not enough arugments provided\"), cmdkit.ErrClient)\n\t\t\treturn\n\t\t}\n\t\tcmplvl, err := getCompressOptions(req)\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmdkit.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tnode, err := GetNode(env)\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmdkit.ErrNormal)\n\t\t\treturn\n\t\t}\n\t\tp := path.Path(req.Arguments[0])\n\t\tctx := req.Context\n\t\tdn, err := core.Resolve(ctx, node.Namesys, node.Resolver, p)\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmdkit.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tswitch dn := dn.(type) {\n\t\tcase *dag.ProtoNode:\n\t\t\tsize, err := dn.Size()\n\t\t\tif err != nil {\n\t\t\t\tres.SetError(err, cmdkit.ErrNormal)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tres.SetLength(size)\n\t\tcase *dag.RawNode:\n\t\t\tres.SetLength(uint64(len(dn.RawData())))\n\t\tdefault:\n\t\t\tres.SetError(err, cmdkit.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tarchive, _ := req.Options[\"archive\"].(bool)\n\t\treader, err := uarchive.DagArchive(ctx, dn, p.String(), node.DAG, archive, cmplvl)\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmdkit.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tres.Emit(reader)\n\t},\n\tPostRun: cmds.PostRunMap{\n\t\tcmds.CLI: func(req *cmds.Request, re cmds.ResponseEmitter) cmds.ResponseEmitter {\n\t\t\treNext, res := cmds.NewChanResponsePair(req)\n\n\t\t\tgo func() {\n\t\t\t\tdefer re.Close()\n\n\t\t\t\tv, err := res.Next()\n\t\t\t\tif !cmds.HandleError(err, res, re) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\toutReader, ok := v.(io.Reader)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Error(e.New(e.TypeErr(outReader, v)))\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\toutPath, _ := req.Options[\"output\"].(string)\n\t\t\t\tif len(outPath) == 0 {\n\t\t\t\t\t_, outPath = gopath.Split(req.Arguments[0])\n\t\t\t\t\toutPath = gopath.Clean(outPath)\n\t\t\t\t}\n\n\t\t\t\tcmplvl, err := getCompressOptions(req)\n\t\t\t\tif err != nil {\n\t\t\t\t\tre.SetError(err, cmdkit.ErrNormal)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tarchive, _ := req.Options[\"archive\"].(bool)\n\n\t\t\t\tgw := getWriter{\n\t\t\t\t\tOut: os.Stdout,\n\t\t\t\t\tErr: os.Stderr,\n\t\t\t\t\tArchive: archive,\n\t\t\t\t\tCompression: cmplvl,\n\t\t\t\t\tSize: int64(res.Length()),\n\t\t\t\t}\n\n\t\t\t\tif err := gw.Write(outReader, outPath); err != nil {\n\t\t\t\t\tre.SetError(err, cmdkit.ErrNormal)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\treturn reNext\n\t\t},\n\t},\n}\n\ntype clearlineReader struct {\n\tio.Reader\n\tout io.Writer\n}\n\nfunc (r *clearlineReader) Read(p []byte) (n int, err error) {\n\tn, err = r.Reader.Read(p)\n\tif err == io.EOF {\n\t\t\/\/ callback\n\t\tfmt.Fprintf(r.out, \"\\033[2K\\r\") \/\/ clear progress bar line on EOF\n\t}\n\treturn\n}\n\nfunc progressBarForReader(out io.Writer, r io.Reader, l int64) (*pb.ProgressBar, io.Reader) {\n\tbar := makeProgressBar(out, l)\n\tbarR := bar.NewProxyReader(r)\n\treturn bar, &clearlineReader{barR, out}\n}\n\nfunc makeProgressBar(out io.Writer, l int64) *pb.ProgressBar {\n\t\/\/ setup bar reader\n\t\/\/ TODO: get total length of files\n\tbar := pb.New64(l).SetUnits(pb.U_BYTES)\n\tbar.Output = out\n\n\t\/\/ the progress bar lib doesn't give us a way to get the width of the output,\n\t\/\/ so as a hack we just use a callback to measure the output, then git rid of it\n\tbar.Callback = func(line string) {\n\t\tterminalWidth := len(line)\n\t\tbar.Callback = nil\n\t\tlog.Infof(\"terminal width: %v\\n\", terminalWidth)\n\t}\n\treturn bar\n}\n\nfunc getOutPath(req *cmds.Request) string {\n\toutPath, _ := req.Options[\"output\"].(string)\n\tif outPath == \"\" {\n\t\ttrimmed := strings.TrimRight(req.Arguments[0], \"\/\")\n\t\t_, outPath = gopath.Split(trimmed)\n\t\toutPath = gopath.Clean(outPath)\n\t}\n\treturn outPath\n}\n\ntype getWriter struct {\n\tOut io.Writer \/\/ for output to user\n\tErr io.Writer \/\/ for progress bar output\n\n\tArchive bool\n\tCompression int\n\tSize int64\n}\n\nfunc (gw *getWriter) Write(r io.Reader, fpath string) error {\n\tif gw.Archive || gw.Compression != gzip.NoCompression {\n\t\treturn gw.writeArchive(r, fpath)\n\t}\n\treturn gw.writeExtracted(r, fpath)\n}\n\nfunc (gw *getWriter) writeArchive(r io.Reader, fpath string) error {\n\t\/\/ adjust file name if tar\n\tif gw.Archive {\n\t\tif !strings.HasSuffix(fpath, \".tar\") && !strings.HasSuffix(fpath, \".tar.gz\") {\n\t\t\tfpath += \".tar\"\n\t\t}\n\t}\n\n\t\/\/ adjust file name if gz\n\tif gw.Compression != gzip.NoCompression {\n\t\tif !strings.HasSuffix(fpath, \".gz\") {\n\t\t\tfpath += \".gz\"\n\t\t}\n\t}\n\n\t\/\/ create file\n\tfile, err := os.Create(fpath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tfmt.Fprintf(gw.Out, \"Saving archive to %s\\n\", fpath)\n\tbar, barR := progressBarForReader(gw.Err, r, gw.Size)\n\tbar.Start()\n\tdefer bar.Finish()\n\n\t_, err = io.Copy(file, barR)\n\treturn err\n}\n\nfunc (gw *getWriter) writeExtracted(r io.Reader, fpath string) error {\n\tfmt.Fprintf(gw.Out, \"Saving file(s) to %s\\n\", fpath)\n\tbar := makeProgressBar(gw.Err, gw.Size)\n\tbar.Start()\n\tdefer bar.Finish()\n\tdefer bar.Set64(gw.Size)\n\n\textractor := &tar.Extractor{fpath, bar.Add64}\n\treturn extractor.Extract(r)\n}\n\nfunc getCompressOptions(req *cmds.Request) (int, error) {\n\tcmprs, _ := req.Options[\"compress\"].(bool)\n\tcmplvl, _ := req.Options[\"compression-level\"].(int)\n\tswitch {\n\tcase !cmprs:\n\t\treturn gzip.NoCompression, nil\n\tcase cmprs && cmplvl == -1:\n\t\treturn gzip.DefaultCompression, nil\n\tcase cmprs && (cmplvl < 1 || cmplvl > 9):\n\t\treturn gzip.NoCompression, ErrInvalidCompressionLevel\n\t}\n\treturn cmplvl, nil\n}\n<commit_msg>forbid bad compression levels<commit_after>package commands\n\nimport (\n\t\"compress\/gzip\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\tgopath \"path\"\n\t\"strings\"\n\n\tcore \"github.com\/ipfs\/go-ipfs\/core\"\n\te \"github.com\/ipfs\/go-ipfs\/core\/commands\/e\"\n\tdag \"github.com\/ipfs\/go-ipfs\/merkledag\"\n\tpath \"github.com\/ipfs\/go-ipfs\/path\"\n\ttar \"github.com\/ipfs\/go-ipfs\/thirdparty\/tar\"\n\tuarchive \"github.com\/ipfs\/go-ipfs\/unixfs\/archive\"\n\n\t\"gx\/ipfs\/QmUEB5nT4LG3TkUd5mkHrfRESUSgaUD4r7jSAYvvPeuWT9\/go-ipfs-cmds\"\n\t\"gx\/ipfs\/QmceUdzxkimdYsgtX733uNgzf1DLHyBKN6ehGSp85ayppM\/go-ipfs-cmdkit\"\n\t\"gx\/ipfs\/QmeWjRodbcZFKe5tMN7poEx3izym6osrLSnTLf9UjJZBbs\/pb\"\n)\n\nvar ErrInvalidCompressionLevel = errors.New(\"Compression level must be between 1 and 9\")\n\nvar GetCmd = &cmds.Command{\n\tHelptext: cmdkit.HelpText{\n\t\tTagline: \"Download IPFS objects.\",\n\t\tShortDescription: `\nStores to disk the data contained an IPFS or IPNS object(s) at the given path.\n\nBy default, the output will be stored at '.\/<ipfs-path>', but an alternate\npath can be specified with '--output=<path>' or '-o=<path>'.\n\nTo output a TAR archive instead of unpacked files, use '--archive' or '-a'.\n\nTo compress the output with GZIP compression, use '--compress' or '-C'. You\nmay also specify the level of compression by specifying '-l=<1-9>'.\n`,\n\t},\n\n\tArguments: []cmdkit.Argument{\n\t\tcmdkit.StringArg(\"ipfs-path\", true, false, \"The path to the IPFS object(s) to be outputted.\").EnableStdin(),\n\t},\n\tOptions: []cmdkit.Option{\n\t\tcmdkit.StringOption(\"output\", \"o\", \"The path where the output should be stored.\"),\n\t\tcmdkit.BoolOption(\"archive\", \"a\", \"Output a TAR archive.\"),\n\t\tcmdkit.BoolOption(\"compress\", \"C\", \"Compress the output with GZIP compression.\"),\n\t\tcmdkit.IntOption(\"compression-level\", \"l\", \"The level of compression (1-9).\"),\n\t},\n\tPreRun: func(req *cmds.Request, env cmds.Environment) error {\n\t\t_, err := getCompressOptions(req)\n\t\treturn err\n\t},\n\tRun: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) {\n\t\tif len(req.Arguments) == 0 {\n\t\t\tres.SetError(errors.New(\"not enough arugments provided\"), cmdkit.ErrClient)\n\t\t\treturn\n\t\t}\n\t\tcmplvl, err := getCompressOptions(req)\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmdkit.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tnode, err := GetNode(env)\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmdkit.ErrNormal)\n\t\t\treturn\n\t\t}\n\t\tp := path.Path(req.Arguments[0])\n\t\tctx := req.Context\n\t\tdn, err := core.Resolve(ctx, node.Namesys, node.Resolver, p)\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmdkit.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tswitch dn := dn.(type) {\n\t\tcase *dag.ProtoNode:\n\t\t\tsize, err := dn.Size()\n\t\t\tif err != nil {\n\t\t\t\tres.SetError(err, cmdkit.ErrNormal)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tres.SetLength(size)\n\t\tcase *dag.RawNode:\n\t\t\tres.SetLength(uint64(len(dn.RawData())))\n\t\tdefault:\n\t\t\tres.SetError(err, cmdkit.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tarchive, _ := req.Options[\"archive\"].(bool)\n\t\treader, err := uarchive.DagArchive(ctx, dn, p.String(), node.DAG, archive, cmplvl)\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmdkit.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tres.Emit(reader)\n\t},\n\tPostRun: cmds.PostRunMap{\n\t\tcmds.CLI: func(req *cmds.Request, re cmds.ResponseEmitter) cmds.ResponseEmitter {\n\t\t\treNext, res := cmds.NewChanResponsePair(req)\n\n\t\t\tgo func() {\n\t\t\t\tdefer re.Close()\n\n\t\t\t\tv, err := res.Next()\n\t\t\t\tif !cmds.HandleError(err, res, re) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\toutReader, ok := v.(io.Reader)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Error(e.New(e.TypeErr(outReader, v)))\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\toutPath, _ := req.Options[\"output\"].(string)\n\t\t\t\tif len(outPath) == 0 {\n\t\t\t\t\t_, outPath = gopath.Split(req.Arguments[0])\n\t\t\t\t\toutPath = gopath.Clean(outPath)\n\t\t\t\t}\n\n\t\t\t\tcmplvl, err := getCompressOptions(req)\n\t\t\t\tif err != nil {\n\t\t\t\t\tre.SetError(err, cmdkit.ErrNormal)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tarchive, _ := req.Options[\"archive\"].(bool)\n\n\t\t\t\tgw := getWriter{\n\t\t\t\t\tOut: os.Stdout,\n\t\t\t\t\tErr: os.Stderr,\n\t\t\t\t\tArchive: archive,\n\t\t\t\t\tCompression: cmplvl,\n\t\t\t\t\tSize: int64(res.Length()),\n\t\t\t\t}\n\n\t\t\t\tif err := gw.Write(outReader, outPath); err != nil {\n\t\t\t\t\tre.SetError(err, cmdkit.ErrNormal)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\treturn reNext\n\t\t},\n\t},\n}\n\ntype clearlineReader struct {\n\tio.Reader\n\tout io.Writer\n}\n\nfunc (r *clearlineReader) Read(p []byte) (n int, err error) {\n\tn, err = r.Reader.Read(p)\n\tif err == io.EOF {\n\t\t\/\/ callback\n\t\tfmt.Fprintf(r.out, \"\\033[2K\\r\") \/\/ clear progress bar line on EOF\n\t}\n\treturn\n}\n\nfunc progressBarForReader(out io.Writer, r io.Reader, l int64) (*pb.ProgressBar, io.Reader) {\n\tbar := makeProgressBar(out, l)\n\tbarR := bar.NewProxyReader(r)\n\treturn bar, &clearlineReader{barR, out}\n}\n\nfunc makeProgressBar(out io.Writer, l int64) *pb.ProgressBar {\n\t\/\/ setup bar reader\n\t\/\/ TODO: get total length of files\n\tbar := pb.New64(l).SetUnits(pb.U_BYTES)\n\tbar.Output = out\n\n\t\/\/ the progress bar lib doesn't give us a way to get the width of the output,\n\t\/\/ so as a hack we just use a callback to measure the output, then git rid of it\n\tbar.Callback = func(line string) {\n\t\tterminalWidth := len(line)\n\t\tbar.Callback = nil\n\t\tlog.Infof(\"terminal width: %v\\n\", terminalWidth)\n\t}\n\treturn bar\n}\n\nfunc getOutPath(req *cmds.Request) string {\n\toutPath, _ := req.Options[\"output\"].(string)\n\tif outPath == \"\" {\n\t\ttrimmed := strings.TrimRight(req.Arguments[0], \"\/\")\n\t\t_, outPath = gopath.Split(trimmed)\n\t\toutPath = gopath.Clean(outPath)\n\t}\n\treturn outPath\n}\n\ntype getWriter struct {\n\tOut io.Writer \/\/ for output to user\n\tErr io.Writer \/\/ for progress bar output\n\n\tArchive bool\n\tCompression int\n\tSize int64\n}\n\nfunc (gw *getWriter) Write(r io.Reader, fpath string) error {\n\tif gw.Archive || gw.Compression != gzip.NoCompression {\n\t\treturn gw.writeArchive(r, fpath)\n\t}\n\treturn gw.writeExtracted(r, fpath)\n}\n\nfunc (gw *getWriter) writeArchive(r io.Reader, fpath string) error {\n\t\/\/ adjust file name if tar\n\tif gw.Archive {\n\t\tif !strings.HasSuffix(fpath, \".tar\") && !strings.HasSuffix(fpath, \".tar.gz\") {\n\t\t\tfpath += \".tar\"\n\t\t}\n\t}\n\n\t\/\/ adjust file name if gz\n\tif gw.Compression != gzip.NoCompression {\n\t\tif !strings.HasSuffix(fpath, \".gz\") {\n\t\t\tfpath += \".gz\"\n\t\t}\n\t}\n\n\t\/\/ create file\n\tfile, err := os.Create(fpath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tfmt.Fprintf(gw.Out, \"Saving archive to %s\\n\", fpath)\n\tbar, barR := progressBarForReader(gw.Err, r, gw.Size)\n\tbar.Start()\n\tdefer bar.Finish()\n\n\t_, err = io.Copy(file, barR)\n\treturn err\n}\n\nfunc (gw *getWriter) writeExtracted(r io.Reader, fpath string) error {\n\tfmt.Fprintf(gw.Out, \"Saving file(s) to %s\\n\", fpath)\n\tbar := makeProgressBar(gw.Err, gw.Size)\n\tbar.Start()\n\tdefer bar.Finish()\n\tdefer bar.Set64(gw.Size)\n\n\textractor := &tar.Extractor{fpath, bar.Add64}\n\treturn extractor.Extract(r)\n}\n\nfunc getCompressOptions(req *cmds.Request) (int, error) {\n\tcmprs, _ := req.Options[\"compress\"].(bool)\n\tcmplvl, cmplvlFound := req.Options[\"compression-level\"].(int)\n\tswitch {\n\tcase !cmprs:\n\t\treturn gzip.NoCompression, nil\n\tcase cmprs && !cmplvlFound:\n\t\treturn gzip.DefaultCompression, nil\n\tcase cmprs && (cmplvl < 1 || cmplvl > 9):\n\t\treturn gzip.NoCompression, ErrInvalidCompressionLevel\n\t}\n\treturn cmplvl, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013-2014 The Cinode Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage utils\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"unicode\/utf8\"\n)\n\nvar (\n\tErrDeserializeIntegerOverflow = errors.New(\"Couldn not deserialize integer value - overflow of a 64-bit value detected\")\n\tErrDeserializeIntegerPaddingZeros = errors.New(\"Couldn not deserialize integer value - padding zeroes detected\")\n\tErrDeserializeStringToLarge = errors.New(\"Could not deserialize string value due to invalid length\")\n\tErrDeserializeStringNotUTF8 = errors.New(\"Could not deserialize string value - not a UTF-8 sequence\")\n)\n\nfunc SerializeInt(v uint64, w io.Writer) error {\n\n\tbuff := make([]byte, 0, 10)\n\n\tfor {\n\t\tb := byte(v & 0x7F)\n\t\tv = v >> 7\n\t\tif v != 0 {\n\t\t\tb |= 0x80\n\t\t}\n\n\t\tbuff = append(buff, b)\n\n\t\tif v == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t_, err := w.Write(buff)\n\treturn err\n}\n\nfunc SerializeBuffer(data []byte, w io.Writer) error {\n\n\tif err := SerializeInt(uint64(len(data)), w); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := w.Write(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc SerializeString(s string, w io.Writer) error {\n\treturn SerializeBuffer([]byte(s), w)\n}\n\nfunc DeserializeInt(r io.Reader) (v uint64, err error) {\n\n\tv, s := 0, uint(0)\n\tbuff := []byte{0}\n\n\tfor ; ; s += 7 {\n\n\t\t\/\/ Early overflow detection\n\t\tif s >= 64 {\n\t\t\treturn 0, ErrDeserializeIntegerOverflow\n\t\t}\n\n\t\t\/\/ Get next byte\n\t\tif _, err = r.Read(buff); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\t\/\/ Fill in the data in returned value\n\t\tv |= uint64(buff[0]&0x7F) << s\n\n\t\t\/\/ Overflow will cut some bits we won't be able to restore\n\t\tif (v >> s) != uint64(buff[0]&0x7F) {\n\t\t\treturn 0, ErrDeserializeIntegerOverflow\n\t\t}\n\n\t\t\/\/ Highest bit in a byte means we shall continue\n\t\tif (buff[0] & 0x80) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ No padding zeros allowed\n\tif ((buff[0] & 0x7F) == 0) && (s > 0) {\n\t\treturn 0, ErrDeserializeIntegerPaddingZeros\n\t}\n\n\treturn\n}\n\nfunc DeserializeBuffer(r io.Reader, maxLength uint64) (data []byte, err error) {\n\tlength, err := DeserializeInt(r)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif (length < 0) || (length > maxLength) {\n\t\treturn nil, ErrDeserializeStringToLarge\n\t}\n\n\tbuffer := make([]byte, length)\n\tif _, err = io.ReadFull(r, buffer); err != nil {\n\t\treturn\n\t}\n\n\treturn buffer, nil\n}\n\nfunc DeserializeString(r io.Reader, maxLength uint64) (s string, err error) {\n\n\tbuffer, err := DeserializeBuffer(r, maxLength)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif !utf8.Valid(buffer) {\n\t\treturn \"\", ErrDeserializeStringNotUTF8\n\t}\n\n\treturn string(buffer), nil\n}\n<commit_msg>Update serialization routines<commit_after>\/\/ Copyright 2013-2014 The Cinode Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage utils\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"unicode\/utf8\"\n)\n\nvar (\n\tErrIntegerOverflow = errors.New(\"integer overflow\")\n\tErrIntegerPaddingZeros = errors.New(\"padding zeroes are not allowed\")\n\tErrBufferToLarge = errors.New(\"buffer size is to large\")\n\tErrStringToLarge = errors.New(\"string length is to large\")\n\tErrStringNotUTF8 = errors.New(\"buffer is not a valid utf-8 string\")\n)\n\nconst (\n\t\/\/ 64-bit integer can be represented in 10 bytes, each representing 7 bits of the number\n\tmaxNumberBytes = 10\n)\n\n\/\/ SerializeInt serializes integer value into writer\nfunc SerializeInt(v uint64, w io.Writer) error {\n\n\tbuff := make([]byte, 0, maxNumberBytes)\n\n\tfor {\n\t\tb := byte(v & 0x7F)\n\t\tv = v >> 7\n\t\tif v != 0 {\n\t\t\tb |= 0x80\n\t\t}\n\n\t\tbuff = append(buff, b)\n\n\t\tif v == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t_, err := w.Write(buff)\n\treturn err\n}\n\n\/\/ SerializeBuffer serializes array of bytes into writer\nfunc SerializeBuffer(data []byte, w io.Writer, maxLength uint64) error {\n\n\tl := uint64(len(data))\n\tif l > maxLength {\n\t\treturn ErrBufferToLarge\n\t}\n\n\tif err := SerializeInt(l, w); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := w.Write(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ SerializeString serializes string into writer\nfunc SerializeString(s string, w io.Writer, maxLength uint64) error {\n\treturn SerializeBuffer([]byte(s), w, maxLength)\n}\n\n\/\/ DeserializeInt tries to deserialize integer value from reader\nfunc DeserializeInt(r io.Reader) (uint64, error) {\n\n\tv, s, buff := uint64(0), uint(0), []byte{0}\n\n\tfor ; ; s += 7 {\n\n\t\t\/\/ Early overflow detection\n\t\tif s >= 64 {\n\t\t\treturn 0, ErrIntegerOverflow\n\t\t}\n\n\t\t\/\/ Get next byte\n\t\tif _, err := r.Read(buff); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\t\/\/ Fill in the data in returned value\n\t\tv |= uint64(buff[0]&0x7F) << s\n\n\t\t\/\/ Overflow will cut some bits we won't be able to restore\n\t\tif (v >> s) != uint64(buff[0]&0x7F) {\n\t\t\treturn 0, ErrIntegerOverflow\n\t\t}\n\n\t\t\/\/ Highest bit in a byte means we shall continue\n\t\tif (buff[0] & 0x80) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ No padding zeros allowed\n\tif ((buff[0] & 0x7F) == 0) && (s > 0) {\n\t\treturn 0, ErrIntegerPaddingZeros\n\t}\n\n\treturn v, nil\n}\n\n\/\/ DeserializeBuffer tries to deserialize buffer form reader\nfunc DeserializeBuffer(r io.Reader, maxLength uint64) ([]byte, error) {\n\tlength, err := DeserializeInt(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif (length < 0) || (length > maxLength) {\n\t\treturn nil, ErrBufferToLarge\n\t}\n\n\tbuffer := make([]byte, length)\n\tif _, err = io.ReadFull(r, buffer); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buffer, nil\n}\n\n\/\/ DeserializeString tries to deserialize string from reader\nfunc DeserializeString(r io.Reader, maxLength uint64) (string, error) {\n\n\tbuffer, err := DeserializeBuffer(r, maxLength)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif !utf8.Valid(buffer) {\n\t\treturn \"\", ErrStringNotUTF8\n\t}\n\n\treturn string(buffer), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package store\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\ntype SeekReader interface {\n\tseekInternal(pos int64) error\n\treadInternal(buf []byte) error\n\tLength() int64\n}\n\ntype BufferedIndexInput struct {\n\t*IndexInputImpl\n\tSeekReader\n\tbufferSize int\n\tbuffer []byte\n\tbufferStart int64\n\tbufferLength int\n\tbufferPosition int\n}\n\nfunc newBufferedIndexInput(spi SeekReader, desc string, context IOContext) *BufferedIndexInput {\n\treturn newBufferedIndexInputBySize(spi, desc, bufferSize(context))\n}\n\nfunc newBufferedIndexInputBySize(spi SeekReader, desc string, bufferSize int) *BufferedIndexInput {\n\tcheckBufferSize(bufferSize)\n\tans := &BufferedIndexInput{bufferSize: bufferSize}\n\tans.SeekReader = spi\n\tans.IndexInputImpl = newIndexInputImpl(desc, ans)\n\treturn ans\n}\n\nfunc (in *BufferedIndexInput) newBuffer(newBuffer []byte) {\n\t\/\/ Subclasses can do something here\n\tin.buffer = newBuffer\n}\n\nfunc (in *BufferedIndexInput) ReadByte() (b byte, err error) {\n\tif in.bufferPosition >= in.bufferLength {\n\t\tin.refill()\n\t}\n\tin.bufferPosition++\n\treturn in.buffer[in.bufferPosition-1], nil\n}\n\nfunc checkBufferSize(bufferSize int) {\n\tif bufferSize <= 0 {\n\t\tpanic(fmt.Sprintf(\"bufferSize must be greater than 0 (got %v)\", bufferSize))\n\t}\n}\n\nfunc (in *BufferedIndexInput) ReadBytes(buf []byte) error {\n\treturn in.ReadBytesBuffered(buf, true)\n}\n\nfunc (in *BufferedIndexInput) ReadBytesBuffered(buf []byte, useBuffer bool) error {\n\tavailable := in.bufferLength - in.bufferPosition\n\tif length := len(buf); length <= available {\n\t\t\/\/ the buffer contains enough data to satisfy this request\n\t\tif length > 0 { \/\/ to allow b to be null if len is 0...\n\t\t\tcopy(buf, in.buffer[in.bufferPosition:in.bufferPosition+length])\n\t\t}\n\t\tin.bufferPosition += length\n\t} else {\n\t\t\/\/ the buffer does not have enough data. First serve all we've got.\n\t\tif available > 0 {\n\t\t\tcopy(buf, in.buffer[in.bufferPosition:in.bufferPosition+available])\n\t\t\tbuf = buf[available:]\n\t\t\tin.bufferPosition += available\n\t\t}\n\t\t\/\/ and now, read the remaining 'len' bytes:\n\t\tif length := len(buf); useBuffer && length < in.bufferSize {\n\t\t\t\/\/ If the amount left to read is small enough, and\n\t\t\t\/\/ we are allowed to use our buffer, do it in the usual\n\t\t\t\/\/ buffered way: fill the buffer and copy from it:\n\t\t\tif err := in.refill(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif in.bufferLength < length {\n\t\t\t\t\/\/ Throw an exception when refill() could not read len bytes:\n\t\t\t\tcopy(buf, in.buffer[0:in.bufferLength])\n\t\t\t\treturn errors.New(fmt.Sprintf(\"read past EOF: %v\", in))\n\t\t\t} else {\n\t\t\t\tcopy(buf, in.buffer[0:length])\n\t\t\t\tin.bufferPosition += length\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ The amount left to read is larger than the buffer\n\t\t\t\/\/ or we've been asked to not use our buffer -\n\t\t\t\/\/ there's no performance reason not to read it all\n\t\t\t\/\/ at once. Note that unlike the previous code of\n\t\t\t\/\/ this function, there is no need to do a seek\n\t\t\t\/\/ here, because there's no need to reread what we\n\t\t\t\/\/ had in the buffer.\n\t\t\tlength := len(buf)\n\t\t\tafter := in.bufferStart + int64(in.bufferPosition) + int64(length)\n\t\t\tif after > in.Length() {\n\t\t\t\treturn errors.New(fmt.Sprintf(\"read past EOF: %v\", in))\n\t\t\t}\n\t\t\tif err := in.readInternal(buf); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tin.bufferStart = after\n\t\t\tin.bufferPosition = 0\n\t\t\tin.bufferLength = 0 \/\/ trigger refill() on read\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (in *BufferedIndexInput) ReadShort() (n int16, err error) {\n\tif 2 <= in.bufferLength-in.bufferPosition {\n\t\tin.bufferPosition += 2\n\t\treturn (int16(in.buffer[in.bufferPosition-2]) << 8) | int16(in.buffer[in.bufferPosition-1]), nil\n\t}\n\treturn in.DataInputImpl.ReadShort()\n}\n\nfunc (in *BufferedIndexInput) ReadInt() (n int32, err error) {\n\tif 4 <= in.bufferLength-in.bufferPosition {\n\t\t\/\/ log.Print(\"Reading int from buffer...\")\n\t\tin.bufferPosition += 4\n\t\treturn (int32(in.buffer[in.bufferPosition-4]) << 24) | (int32(in.buffer[in.bufferPosition-3]) << 16) |\n\t\t\t(int32(in.buffer[in.bufferPosition-2]) << 8) | int32(in.buffer[in.bufferPosition-1]), nil\n\t}\n\treturn in.DataInputImpl.ReadInt()\n}\n\nfunc (in *BufferedIndexInput) ReadLong() (n int64, err error) {\n\tif 8 <= in.bufferLength-in.bufferPosition {\n\t\tin.bufferPosition += 4\n\t\ti1 := (int64(in.buffer[in.bufferPosition-4]) << 24) | (int64(in.buffer[in.bufferPosition-3]) << 16) |\n\t\t\t(int64(in.buffer[in.bufferPosition-2]) << 8) | int64(in.buffer[in.bufferPosition-1])\n\t\tin.bufferPosition += 4\n\t\ti2 := (int64(in.buffer[in.bufferPosition-4]) << 24) | (int64(in.buffer[in.bufferPosition-3]) << 16) |\n\t\t\t(int64(in.buffer[in.bufferPosition-2]) << 8) | int64(in.buffer[in.bufferPosition-1])\n\t\treturn (i1 << 32) | i2, nil\n\t}\n\treturn in.DataInputImpl.ReadLong()\n}\n\nfunc (in *BufferedIndexInput) ReadVInt() (n int32, err error) {\n\tif 5 <= in.bufferLength-in.bufferPosition {\n\t\tb := in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tif b < 128 {\n\t\t\treturn int32(b), nil\n\t\t}\n\t\tn := int32(b) & 0x7F\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int32(b) & 0x7F) << 7\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int32(b) & 0x7F) << 14\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int32(b) & 0x7F) << 21\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\t\/\/ Warning: the next ands use 0x0F \/ 0xF0 - beware copy\/paste errors:\n\t\tn |= (int32(b) & 0x0F) << 28\n\t\tif (b & 0xF0) == 0 {\n\t\t\treturn n, nil\n\t\t}\n\t\treturn 0, errors.New(\"Invalid vInt detected (too many bits)\")\n\t}\n\treturn in.DataInputImpl.ReadVInt()\n}\n\nfunc (in *BufferedIndexInput) ReadVLong() (n int64, err error) {\n\tif 9 <= in.bufferLength-in.bufferPosition {\n\t\tb := in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tif b < 128 {\n\t\t\treturn int64(b), nil\n\t\t}\n\t\tn := int64(b & 0x7F)\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int64(b&0x7F) << 7)\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int64(b&0x7F) << 14)\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int64(b&0x7F) << 21)\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int64(b&0x7F) << 28)\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int64(b&0x7F) << 35)\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int64(b&0x7F) << 42)\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int64(b&0x7F) << 49)\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int64(b&0x7F) << 56)\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\treturn 0, errors.New(\"Invalid vLong detected (negative values disallowed)\")\n\t}\n\treturn in.DataInputImpl.ReadVLong()\n}\n\n\/\/ use panic\/recover to handle error\nfunc (in *BufferedIndexInput) refill() error {\n\tstart := in.bufferStart + int64(in.bufferPosition)\n\tend := start + int64(in.bufferSize)\n\tif n := in.Length(); end > n { \/\/ don't read past EOF\n\t\tend = n\n\t}\n\tnewLength := int(end - start)\n\tif newLength <= 0 {\n\t\treturn errors.New(fmt.Sprintf(\"read past EOF: %v\", in))\n\t}\n\n\tif in.buffer == nil {\n\t\tin.newBuffer(make([]byte, in.bufferSize)) \/\/ allocate buffer lazily\n\t\tin.seekInternal(int64(in.bufferStart))\n\t}\n\tin.readInternal(in.buffer[0:newLength])\n\tin.bufferLength = newLength\n\tin.bufferStart = start\n\tin.bufferPosition = 0\n\treturn nil\n}\n\nfunc (in *BufferedIndexInput) FilePointer() int64 {\n\treturn in.bufferStart + int64(in.bufferPosition)\n}\n\nfunc (in *BufferedIndexInput) Seek(pos int64) error {\n\tif pos >= in.bufferStart && pos < in.bufferStart+int64(in.bufferLength) {\n\t\tin.bufferPosition = int(pos - in.bufferStart) \/\/ seek within buffer\n\t\treturn nil\n\t} else {\n\t\tin.bufferStart = pos\n\t\tin.bufferPosition = 0\n\t\tin.bufferLength = 0 \/\/ trigger refill() on read()\n\t\treturn in.seekInternal(pos)\n\t}\n}\n\nfunc (in *BufferedIndexInput) Clone() *BufferedIndexInput {\n\tans := &BufferedIndexInput{\n\t\tbufferSize: in.bufferSize,\n\t\tbuffer: nil,\n\t\tbufferStart: in.FilePointer(),\n\t\tbufferLength: 0,\n\t\tbufferPosition: 0,\n\t}\n\tans.IndexInputImpl = newIndexInputImpl(in.desc, ans)\n\treturn ans\n}\n\n\/* The default buffer size in bytes. *\/\nconst DEFAULT_BUFFER_SIZE = 16384\n\ntype flushBufferAndLength interface {\n\tFlushBuffer(b []byte) error\n\tLength() (int64, error)\n}\n\ntype BufferedIndexOutput struct {\n\t*IndexOutputImpl\n\tspi flushBufferAndLength\n\tbuffer []byte\n\tstart int64 \/\/ position in file of buffer\n\tposition int \/\/ position in buffer\n}\n\n\/*\nCreates a new BufferedIndexOutput with the given buffer size.\n*\/\nfunc NewBufferedIndexOutput(size int, part flushBufferAndLength) *BufferedIndexOutput {\n\tassert2(size > 0, fmt.Sprintf(\"bufferSize must be greater than 0 (got %v)\", size))\n\tout := &BufferedIndexOutput{spi: part, buffer: make([]byte, size)}\n\tout.IndexOutputImpl = NewIndexOutput(out)\n\treturn out\n}\n\nfunc (out *BufferedIndexOutput) WriteByte(b byte) error {\n\tpanic(\"not implemented yet\")\n}\n\nfunc (out *BufferedIndexOutput) WriteBytes(buf []byte) error {\n\tbytesLeft := len(out.buffer) - out.position\n\t\/\/ is there enough space in the buffer?\n\tif bytesLeft >= len(buf) {\n\t\t\/\/ we add the data to the end of the buffer\n\t\tcopy(out.buffer[out.position:], buf)\n\t\tout.position += len(buf)\n\t\t\/\/ if the buffer is full, flush it\n\t\tif len(out.buffer) == out.position {\n\t\t\treturn out.Flush()\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ is data larger than buffer?\n\tif len(buf) > len(out.buffer) {\n\t\t\/\/ we flush the buffer\n\t\tif out.position > 0 {\n\t\t\terr := out.Flush()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t\/\/ and write data at once\n\t\terr := out.spi.FlushBuffer(buf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tout.start += int64(len(buf))\n\t\treturn nil\n\t}\n\n\t\/\/ we fill\/flush the buffer (until the input is written)\n\tvar pos = 0 \/\/ position in the input data\n\tvar pieceLength int\n\tfor pos < len(buf) {\n\t\tpieceLength = len(buf) - pos\n\t\tif bytesLeft < pieceLength {\n\t\t\tpieceLength = bytesLeft\n\t\t}\n\t\tcopy(out.buffer[out.position:], buf[pos:pos+pieceLength])\n\t\tpos += pieceLength\n\t\tout.position += pieceLength\n\t\t\/\/ if the buffer is full, flush it\n\t\tbytesLeft = len(out.buffer) - out.position\n\t\tif bytesLeft == 0 {\n\t\t\terr := out.Flush()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbytesLeft = len(out.buffer)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (out *BufferedIndexOutput) Flush() error {\n\terr := out.spi.FlushBuffer(out.buffer[:out.position])\n\tif err == nil {\n\t\tout.start += int64(out.position)\n\t\tout.position = 0\n\t}\n\treturn err\n}\n\nfunc (out *BufferedIndexOutput) Close() error {\n\treturn out.Flush()\n}\n\nfunc (out *BufferedIndexOutput) FilePointer() int64 {\n\treturn out.start + int64(out.position)\n}\n<commit_msg>implement BufferedIndexOutput.WriteByte()<commit_after>package store\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\ntype SeekReader interface {\n\tseekInternal(pos int64) error\n\treadInternal(buf []byte) error\n\tLength() int64\n}\n\ntype BufferedIndexInput struct {\n\t*IndexInputImpl\n\tSeekReader\n\tbufferSize int\n\tbuffer []byte\n\tbufferStart int64\n\tbufferLength int\n\tbufferPosition int\n}\n\nfunc newBufferedIndexInput(spi SeekReader, desc string, context IOContext) *BufferedIndexInput {\n\treturn newBufferedIndexInputBySize(spi, desc, bufferSize(context))\n}\n\nfunc newBufferedIndexInputBySize(spi SeekReader, desc string, bufferSize int) *BufferedIndexInput {\n\tcheckBufferSize(bufferSize)\n\tans := &BufferedIndexInput{bufferSize: bufferSize}\n\tans.SeekReader = spi\n\tans.IndexInputImpl = newIndexInputImpl(desc, ans)\n\treturn ans\n}\n\nfunc (in *BufferedIndexInput) newBuffer(newBuffer []byte) {\n\t\/\/ Subclasses can do something here\n\tin.buffer = newBuffer\n}\n\nfunc (in *BufferedIndexInput) ReadByte() (b byte, err error) {\n\tif in.bufferPosition >= in.bufferLength {\n\t\tin.refill()\n\t}\n\tin.bufferPosition++\n\treturn in.buffer[in.bufferPosition-1], nil\n}\n\nfunc checkBufferSize(bufferSize int) {\n\tif bufferSize <= 0 {\n\t\tpanic(fmt.Sprintf(\"bufferSize must be greater than 0 (got %v)\", bufferSize))\n\t}\n}\n\nfunc (in *BufferedIndexInput) ReadBytes(buf []byte) error {\n\treturn in.ReadBytesBuffered(buf, true)\n}\n\nfunc (in *BufferedIndexInput) ReadBytesBuffered(buf []byte, useBuffer bool) error {\n\tavailable := in.bufferLength - in.bufferPosition\n\tif length := len(buf); length <= available {\n\t\t\/\/ the buffer contains enough data to satisfy this request\n\t\tif length > 0 { \/\/ to allow b to be null if len is 0...\n\t\t\tcopy(buf, in.buffer[in.bufferPosition:in.bufferPosition+length])\n\t\t}\n\t\tin.bufferPosition += length\n\t} else {\n\t\t\/\/ the buffer does not have enough data. First serve all we've got.\n\t\tif available > 0 {\n\t\t\tcopy(buf, in.buffer[in.bufferPosition:in.bufferPosition+available])\n\t\t\tbuf = buf[available:]\n\t\t\tin.bufferPosition += available\n\t\t}\n\t\t\/\/ and now, read the remaining 'len' bytes:\n\t\tif length := len(buf); useBuffer && length < in.bufferSize {\n\t\t\t\/\/ If the amount left to read is small enough, and\n\t\t\t\/\/ we are allowed to use our buffer, do it in the usual\n\t\t\t\/\/ buffered way: fill the buffer and copy from it:\n\t\t\tif err := in.refill(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif in.bufferLength < length {\n\t\t\t\t\/\/ Throw an exception when refill() could not read len bytes:\n\t\t\t\tcopy(buf, in.buffer[0:in.bufferLength])\n\t\t\t\treturn errors.New(fmt.Sprintf(\"read past EOF: %v\", in))\n\t\t\t} else {\n\t\t\t\tcopy(buf, in.buffer[0:length])\n\t\t\t\tin.bufferPosition += length\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ The amount left to read is larger than the buffer\n\t\t\t\/\/ or we've been asked to not use our buffer -\n\t\t\t\/\/ there's no performance reason not to read it all\n\t\t\t\/\/ at once. Note that unlike the previous code of\n\t\t\t\/\/ this function, there is no need to do a seek\n\t\t\t\/\/ here, because there's no need to reread what we\n\t\t\t\/\/ had in the buffer.\n\t\t\tlength := len(buf)\n\t\t\tafter := in.bufferStart + int64(in.bufferPosition) + int64(length)\n\t\t\tif after > in.Length() {\n\t\t\t\treturn errors.New(fmt.Sprintf(\"read past EOF: %v\", in))\n\t\t\t}\n\t\t\tif err := in.readInternal(buf); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tin.bufferStart = after\n\t\t\tin.bufferPosition = 0\n\t\t\tin.bufferLength = 0 \/\/ trigger refill() on read\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (in *BufferedIndexInput) ReadShort() (n int16, err error) {\n\tif 2 <= in.bufferLength-in.bufferPosition {\n\t\tin.bufferPosition += 2\n\t\treturn (int16(in.buffer[in.bufferPosition-2]) << 8) | int16(in.buffer[in.bufferPosition-1]), nil\n\t}\n\treturn in.DataInputImpl.ReadShort()\n}\n\nfunc (in *BufferedIndexInput) ReadInt() (n int32, err error) {\n\tif 4 <= in.bufferLength-in.bufferPosition {\n\t\t\/\/ log.Print(\"Reading int from buffer...\")\n\t\tin.bufferPosition += 4\n\t\treturn (int32(in.buffer[in.bufferPosition-4]) << 24) | (int32(in.buffer[in.bufferPosition-3]) << 16) |\n\t\t\t(int32(in.buffer[in.bufferPosition-2]) << 8) | int32(in.buffer[in.bufferPosition-1]), nil\n\t}\n\treturn in.DataInputImpl.ReadInt()\n}\n\nfunc (in *BufferedIndexInput) ReadLong() (n int64, err error) {\n\tif 8 <= in.bufferLength-in.bufferPosition {\n\t\tin.bufferPosition += 4\n\t\ti1 := (int64(in.buffer[in.bufferPosition-4]) << 24) | (int64(in.buffer[in.bufferPosition-3]) << 16) |\n\t\t\t(int64(in.buffer[in.bufferPosition-2]) << 8) | int64(in.buffer[in.bufferPosition-1])\n\t\tin.bufferPosition += 4\n\t\ti2 := (int64(in.buffer[in.bufferPosition-4]) << 24) | (int64(in.buffer[in.bufferPosition-3]) << 16) |\n\t\t\t(int64(in.buffer[in.bufferPosition-2]) << 8) | int64(in.buffer[in.bufferPosition-1])\n\t\treturn (i1 << 32) | i2, nil\n\t}\n\treturn in.DataInputImpl.ReadLong()\n}\n\nfunc (in *BufferedIndexInput) ReadVInt() (n int32, err error) {\n\tif 5 <= in.bufferLength-in.bufferPosition {\n\t\tb := in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tif b < 128 {\n\t\t\treturn int32(b), nil\n\t\t}\n\t\tn := int32(b) & 0x7F\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int32(b) & 0x7F) << 7\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int32(b) & 0x7F) << 14\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int32(b) & 0x7F) << 21\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\t\/\/ Warning: the next ands use 0x0F \/ 0xF0 - beware copy\/paste errors:\n\t\tn |= (int32(b) & 0x0F) << 28\n\t\tif (b & 0xF0) == 0 {\n\t\t\treturn n, nil\n\t\t}\n\t\treturn 0, errors.New(\"Invalid vInt detected (too many bits)\")\n\t}\n\treturn in.DataInputImpl.ReadVInt()\n}\n\nfunc (in *BufferedIndexInput) ReadVLong() (n int64, err error) {\n\tif 9 <= in.bufferLength-in.bufferPosition {\n\t\tb := in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tif b < 128 {\n\t\t\treturn int64(b), nil\n\t\t}\n\t\tn := int64(b & 0x7F)\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int64(b&0x7F) << 7)\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int64(b&0x7F) << 14)\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int64(b&0x7F) << 21)\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int64(b&0x7F) << 28)\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int64(b&0x7F) << 35)\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int64(b&0x7F) << 42)\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int64(b&0x7F) << 49)\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\tb = in.buffer[in.bufferPosition]\n\t\tin.bufferPosition++\n\t\tn |= (int64(b&0x7F) << 56)\n\t\tif b < 128 {\n\t\t\treturn n, nil\n\t\t}\n\t\treturn 0, errors.New(\"Invalid vLong detected (negative values disallowed)\")\n\t}\n\treturn in.DataInputImpl.ReadVLong()\n}\n\n\/\/ use panic\/recover to handle error\nfunc (in *BufferedIndexInput) refill() error {\n\tstart := in.bufferStart + int64(in.bufferPosition)\n\tend := start + int64(in.bufferSize)\n\tif n := in.Length(); end > n { \/\/ don't read past EOF\n\t\tend = n\n\t}\n\tnewLength := int(end - start)\n\tif newLength <= 0 {\n\t\treturn errors.New(fmt.Sprintf(\"read past EOF: %v\", in))\n\t}\n\n\tif in.buffer == nil {\n\t\tin.newBuffer(make([]byte, in.bufferSize)) \/\/ allocate buffer lazily\n\t\tin.seekInternal(int64(in.bufferStart))\n\t}\n\tin.readInternal(in.buffer[0:newLength])\n\tin.bufferLength = newLength\n\tin.bufferStart = start\n\tin.bufferPosition = 0\n\treturn nil\n}\n\nfunc (in *BufferedIndexInput) FilePointer() int64 {\n\treturn in.bufferStart + int64(in.bufferPosition)\n}\n\nfunc (in *BufferedIndexInput) Seek(pos int64) error {\n\tif pos >= in.bufferStart && pos < in.bufferStart+int64(in.bufferLength) {\n\t\tin.bufferPosition = int(pos - in.bufferStart) \/\/ seek within buffer\n\t\treturn nil\n\t} else {\n\t\tin.bufferStart = pos\n\t\tin.bufferPosition = 0\n\t\tin.bufferLength = 0 \/\/ trigger refill() on read()\n\t\treturn in.seekInternal(pos)\n\t}\n}\n\nfunc (in *BufferedIndexInput) Clone() *BufferedIndexInput {\n\tans := &BufferedIndexInput{\n\t\tbufferSize: in.bufferSize,\n\t\tbuffer: nil,\n\t\tbufferStart: in.FilePointer(),\n\t\tbufferLength: 0,\n\t\tbufferPosition: 0,\n\t}\n\tans.IndexInputImpl = newIndexInputImpl(in.desc, ans)\n\treturn ans\n}\n\n\/* The default buffer size in bytes. *\/\nconst DEFAULT_BUFFER_SIZE = 16384\n\ntype flushBufferAndLength interface {\n\tFlushBuffer(b []byte) error\n\tLength() (int64, error)\n}\n\ntype BufferedIndexOutput struct {\n\t*IndexOutputImpl\n\tspi flushBufferAndLength\n\tbuffer []byte\n\tstart int64 \/\/ position in file of buffer\n\tposition int \/\/ position in buffer\n}\n\n\/*\nCreates a new BufferedIndexOutput with the given buffer size.\n*\/\nfunc NewBufferedIndexOutput(size int, part flushBufferAndLength) *BufferedIndexOutput {\n\tassert2(size > 0, fmt.Sprintf(\"bufferSize must be greater than 0 (got %v)\", size))\n\tout := &BufferedIndexOutput{spi: part, buffer: make([]byte, size)}\n\tout.IndexOutputImpl = NewIndexOutput(out)\n\treturn out\n}\n\nfunc (out *BufferedIndexOutput) WriteByte(b byte) error {\n\tif out.position >= len(out.buffer) {\n\t\terr := out.Flush()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tout.buffer[out.position] = b\n\tout.position++\n\treturn nil\n}\n\nfunc (out *BufferedIndexOutput) WriteBytes(buf []byte) error {\n\tbytesLeft := len(out.buffer) - out.position\n\t\/\/ is there enough space in the buffer?\n\tif bytesLeft >= len(buf) {\n\t\t\/\/ we add the data to the end of the buffer\n\t\tcopy(out.buffer[out.position:], buf)\n\t\tout.position += len(buf)\n\t\t\/\/ if the buffer is full, flush it\n\t\tif len(out.buffer) == out.position {\n\t\t\treturn out.Flush()\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ is data larger than buffer?\n\tif len(buf) > len(out.buffer) {\n\t\t\/\/ we flush the buffer\n\t\tif out.position > 0 {\n\t\t\terr := out.Flush()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t\/\/ and write data at once\n\t\terr := out.spi.FlushBuffer(buf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tout.start += int64(len(buf))\n\t\treturn nil\n\t}\n\n\t\/\/ we fill\/flush the buffer (until the input is written)\n\tvar pos = 0 \/\/ position in the input data\n\tvar pieceLength int\n\tfor pos < len(buf) {\n\t\tpieceLength = len(buf) - pos\n\t\tif bytesLeft < pieceLength {\n\t\t\tpieceLength = bytesLeft\n\t\t}\n\t\tcopy(out.buffer[out.position:], buf[pos:pos+pieceLength])\n\t\tpos += pieceLength\n\t\tout.position += pieceLength\n\t\t\/\/ if the buffer is full, flush it\n\t\tbytesLeft = len(out.buffer) - out.position\n\t\tif bytesLeft == 0 {\n\t\t\terr := out.Flush()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbytesLeft = len(out.buffer)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (out *BufferedIndexOutput) Flush() error {\n\terr := out.spi.FlushBuffer(out.buffer[:out.position])\n\tif err == nil {\n\t\tout.start += int64(out.position)\n\t\tout.position = 0\n\t}\n\treturn err\n}\n\nfunc (out *BufferedIndexOutput) Close() error {\n\treturn out.Flush()\n}\n\nfunc (out *BufferedIndexOutput) FilePointer() int64 {\n\treturn out.start + int64(out.position)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package crypto collects common cryptographic constants.\npackage crypto\n\nimport (\n\t\"hash\"\n\t\"io\"\n\t\"strconv\"\n)\n\n\/\/ Hash identifies a cryptographic hash function that is implemented in another\n\/\/ package.\ntype Hash uint\n\n\/\/ HashFunc simply returns the value of h so that Hash implements SignerOpts.\nfunc (h Hash) HashFunc() Hash {\n\treturn h\n}\n\nfunc (h Hash) String() string {\n\tswitch h {\n\tcase MD4:\n\t\treturn \"MD4\"\n\tcase MD5:\n\t\treturn \"MD5\"\n\tcase SHA1:\n\t\treturn \"SHA-1\"\n\tcase SHA224:\n\t\treturn \"SHA-224\"\n\tcase SHA256:\n\t\treturn \"SHA-256\"\n\tcase SHA384:\n\t\treturn \"SHA-384\"\n\tcase SHA512:\n\t\treturn \"SHA-512\"\n\tcase MD5SHA1:\n\t\treturn \"MD5+SHA1\"\n\tcase RIPEMD160:\n\t\treturn \"RIPEMD-160\"\n\tcase SHA3_224:\n\t\treturn \"SHA3-224\"\n\tcase SHA3_256:\n\t\treturn \"SHA3-256\"\n\tcase SHA3_384:\n\t\treturn \"SHA3-384\"\n\tcase SHA3_512:\n\t\treturn \"SHA3-512\"\n\tcase SHA512_224:\n\t\treturn \"SHA-512\/224\"\n\tcase SHA512_256:\n\t\treturn \"SHA-512\/256\"\n\tcase BLAKE2s_256:\n\t\treturn \"BLAKE2s-256\"\n\tcase BLAKE2b_256:\n\t\treturn \"BLAKE2b-256\"\n\tcase BLAKE2b_384:\n\t\treturn \"BLAKE2b-384\"\n\tcase BLAKE2b_512:\n\t\treturn \"BLAKE2b-512\"\n\tdefault:\n\t\treturn \"unknown hash value \" + strconv.Itoa(int(h))\n\t}\n}\n\nconst (\n\tMD4 Hash = 1 + iota \/\/ import golang.org\/x\/crypto\/md4\n\tMD5 \/\/ import crypto\/md5\n\tSHA1 \/\/ import crypto\/sha1\n\tSHA224 \/\/ import crypto\/sha256\n\tSHA256 \/\/ import crypto\/sha256\n\tSHA384 \/\/ import crypto\/sha512\n\tSHA512 \/\/ import crypto\/sha512\n\tMD5SHA1 \/\/ no implementation; MD5+SHA1 used for TLS RSA\n\tRIPEMD160 \/\/ import golang.org\/x\/crypto\/ripemd160\n\tSHA3_224 \/\/ import golang.org\/x\/crypto\/sha3\n\tSHA3_256 \/\/ import golang.org\/x\/crypto\/sha3\n\tSHA3_384 \/\/ import golang.org\/x\/crypto\/sha3\n\tSHA3_512 \/\/ import golang.org\/x\/crypto\/sha3\n\tSHA512_224 \/\/ import crypto\/sha512\n\tSHA512_256 \/\/ import crypto\/sha512\n\tBLAKE2s_256 \/\/ import golang.org\/x\/crypto\/blake2s\n\tBLAKE2b_256 \/\/ import golang.org\/x\/crypto\/blake2b\n\tBLAKE2b_384 \/\/ import golang.org\/x\/crypto\/blake2b\n\tBLAKE2b_512 \/\/ import golang.org\/x\/crypto\/blake2b\n\tmaxHash\n)\n\nvar digestSizes = []uint8{\n\tMD4: 16,\n\tMD5: 16,\n\tSHA1: 20,\n\tSHA224: 28,\n\tSHA256: 32,\n\tSHA384: 48,\n\tSHA512: 64,\n\tSHA512_224: 28,\n\tSHA512_256: 32,\n\tSHA3_224: 28,\n\tSHA3_256: 32,\n\tSHA3_384: 48,\n\tSHA3_512: 64,\n\tMD5SHA1: 36,\n\tRIPEMD160: 20,\n\tBLAKE2s_256: 32,\n\tBLAKE2b_256: 32,\n\tBLAKE2b_384: 48,\n\tBLAKE2b_512: 64,\n}\n\n\/\/ Size returns the length, in bytes, of a digest resulting from the given hash\n\/\/ function. It doesn't require that the hash function in question be linked\n\/\/ into the program.\nfunc (h Hash) Size() int {\n\tif h > 0 && h < maxHash {\n\t\treturn int(digestSizes[h])\n\t}\n\tpanic(\"crypto: Size of unknown hash function\")\n}\n\nvar hashes = make([]func() hash.Hash, maxHash)\n\n\/\/ New returns a new hash.Hash calculating the given hash function. New panics\n\/\/ if the hash function is not linked into the binary.\nfunc (h Hash) New() hash.Hash {\n\tif h > 0 && h < maxHash {\n\t\tf := hashes[h]\n\t\tif f != nil {\n\t\t\treturn f()\n\t\t}\n\t}\n\tpanic(\"crypto: requested hash function #\" + strconv.Itoa(int(h)) + \" is unavailable\")\n}\n\n\/\/ Available reports whether the given hash function is linked into the binary.\nfunc (h Hash) Available() bool {\n\treturn h < maxHash && hashes[h] != nil\n}\n\n\/\/ RegisterHash registers a function that returns a new instance of the given\n\/\/ hash function. This is intended to be called from the init function in\n\/\/ packages that implement hash functions.\nfunc RegisterHash(h Hash, f func() hash.Hash) {\n\tif h >= maxHash {\n\t\tpanic(\"crypto: RegisterHash of unknown hash function\")\n\t}\n\thashes[h] = f\n}\n\n\/\/ PublicKey represents a public key using an unspecified algorithm.\ntype PublicKey interface{}\n\n\/\/ PrivateKey represents a private key using an unspecified algorithm.\ntype PrivateKey interface{}\n\n\/\/ Signer is an interface for an opaque private key that can be used for\n\/\/ signing operations. For example, an RSA key kept in a hardware module.\ntype Signer interface {\n\t\/\/ Public returns the public key corresponding to the opaque,\n\t\/\/ private key.\n\tPublic() PublicKey\n\n\t\/\/ Sign signs digest with the private key, possibly using entropy from\n\t\/\/ rand. For an RSA key, the resulting signature should be either a\n\t\/\/ PKCS #1 v1.5 or PSS signature (as indicated by opts). For an (EC)DSA\n\t\/\/ key, it should be a DER-serialised, ASN.1 signature structure.\n\t\/\/\n\t\/\/ Hash implements the SignerOpts interface and, in most cases, one can\n\t\/\/ simply pass in the hash function used as opts. Sign may also attempt\n\t\/\/ to type assert opts to other types in order to obtain algorithm\n\t\/\/ specific values. See the documentation in each package for details.\n\t\/\/\n\t\/\/ Note that when a signature of a hash of a larger message is needed,\n\t\/\/ the caller is responsible for hashing the larger message and passing\n\t\/\/ the hash (as digest) and the hash function (as opts) to Sign.\n\tSign(rand io.Reader, digest []byte, opts SignerOpts) (signature []byte, err error)\n}\n\n\/\/ SignerOpts contains options for signing with a Signer.\ntype SignerOpts interface {\n\t\/\/ HashFunc returns an identifier for the hash function used to produce\n\t\/\/ the message passed to Signer.Sign, or else zero to indicate that no\n\t\/\/ hashing was done.\n\tHashFunc() Hash\n}\n\n\/\/ Decrypter is an interface for an opaque private key that can be used for\n\/\/ asymmetric decryption operations. An example would be an RSA key\n\/\/ kept in a hardware module.\ntype Decrypter interface {\n\t\/\/ Public returns the public key corresponding to the opaque,\n\t\/\/ private key.\n\tPublic() PublicKey\n\n\t\/\/ Decrypt decrypts msg. The opts argument should be appropriate for\n\t\/\/ the primitive used. See the documentation in each implementation for\n\t\/\/ details.\n\tDecrypt(rand io.Reader, msg []byte, opts DecrypterOpts) (plaintext []byte, err error)\n}\n\ntype DecrypterOpts interface{}\n<commit_msg>crypto: document the extended key interfaces<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package crypto collects common cryptographic constants.\npackage crypto\n\nimport (\n\t\"hash\"\n\t\"io\"\n\t\"strconv\"\n)\n\n\/\/ Hash identifies a cryptographic hash function that is implemented in another\n\/\/ package.\ntype Hash uint\n\n\/\/ HashFunc simply returns the value of h so that Hash implements SignerOpts.\nfunc (h Hash) HashFunc() Hash {\n\treturn h\n}\n\nfunc (h Hash) String() string {\n\tswitch h {\n\tcase MD4:\n\t\treturn \"MD4\"\n\tcase MD5:\n\t\treturn \"MD5\"\n\tcase SHA1:\n\t\treturn \"SHA-1\"\n\tcase SHA224:\n\t\treturn \"SHA-224\"\n\tcase SHA256:\n\t\treturn \"SHA-256\"\n\tcase SHA384:\n\t\treturn \"SHA-384\"\n\tcase SHA512:\n\t\treturn \"SHA-512\"\n\tcase MD5SHA1:\n\t\treturn \"MD5+SHA1\"\n\tcase RIPEMD160:\n\t\treturn \"RIPEMD-160\"\n\tcase SHA3_224:\n\t\treturn \"SHA3-224\"\n\tcase SHA3_256:\n\t\treturn \"SHA3-256\"\n\tcase SHA3_384:\n\t\treturn \"SHA3-384\"\n\tcase SHA3_512:\n\t\treturn \"SHA3-512\"\n\tcase SHA512_224:\n\t\treturn \"SHA-512\/224\"\n\tcase SHA512_256:\n\t\treturn \"SHA-512\/256\"\n\tcase BLAKE2s_256:\n\t\treturn \"BLAKE2s-256\"\n\tcase BLAKE2b_256:\n\t\treturn \"BLAKE2b-256\"\n\tcase BLAKE2b_384:\n\t\treturn \"BLAKE2b-384\"\n\tcase BLAKE2b_512:\n\t\treturn \"BLAKE2b-512\"\n\tdefault:\n\t\treturn \"unknown hash value \" + strconv.Itoa(int(h))\n\t}\n}\n\nconst (\n\tMD4 Hash = 1 + iota \/\/ import golang.org\/x\/crypto\/md4\n\tMD5 \/\/ import crypto\/md5\n\tSHA1 \/\/ import crypto\/sha1\n\tSHA224 \/\/ import crypto\/sha256\n\tSHA256 \/\/ import crypto\/sha256\n\tSHA384 \/\/ import crypto\/sha512\n\tSHA512 \/\/ import crypto\/sha512\n\tMD5SHA1 \/\/ no implementation; MD5+SHA1 used for TLS RSA\n\tRIPEMD160 \/\/ import golang.org\/x\/crypto\/ripemd160\n\tSHA3_224 \/\/ import golang.org\/x\/crypto\/sha3\n\tSHA3_256 \/\/ import golang.org\/x\/crypto\/sha3\n\tSHA3_384 \/\/ import golang.org\/x\/crypto\/sha3\n\tSHA3_512 \/\/ import golang.org\/x\/crypto\/sha3\n\tSHA512_224 \/\/ import crypto\/sha512\n\tSHA512_256 \/\/ import crypto\/sha512\n\tBLAKE2s_256 \/\/ import golang.org\/x\/crypto\/blake2s\n\tBLAKE2b_256 \/\/ import golang.org\/x\/crypto\/blake2b\n\tBLAKE2b_384 \/\/ import golang.org\/x\/crypto\/blake2b\n\tBLAKE2b_512 \/\/ import golang.org\/x\/crypto\/blake2b\n\tmaxHash\n)\n\nvar digestSizes = []uint8{\n\tMD4: 16,\n\tMD5: 16,\n\tSHA1: 20,\n\tSHA224: 28,\n\tSHA256: 32,\n\tSHA384: 48,\n\tSHA512: 64,\n\tSHA512_224: 28,\n\tSHA512_256: 32,\n\tSHA3_224: 28,\n\tSHA3_256: 32,\n\tSHA3_384: 48,\n\tSHA3_512: 64,\n\tMD5SHA1: 36,\n\tRIPEMD160: 20,\n\tBLAKE2s_256: 32,\n\tBLAKE2b_256: 32,\n\tBLAKE2b_384: 48,\n\tBLAKE2b_512: 64,\n}\n\n\/\/ Size returns the length, in bytes, of a digest resulting from the given hash\n\/\/ function. It doesn't require that the hash function in question be linked\n\/\/ into the program.\nfunc (h Hash) Size() int {\n\tif h > 0 && h < maxHash {\n\t\treturn int(digestSizes[h])\n\t}\n\tpanic(\"crypto: Size of unknown hash function\")\n}\n\nvar hashes = make([]func() hash.Hash, maxHash)\n\n\/\/ New returns a new hash.Hash calculating the given hash function. New panics\n\/\/ if the hash function is not linked into the binary.\nfunc (h Hash) New() hash.Hash {\n\tif h > 0 && h < maxHash {\n\t\tf := hashes[h]\n\t\tif f != nil {\n\t\t\treturn f()\n\t\t}\n\t}\n\tpanic(\"crypto: requested hash function #\" + strconv.Itoa(int(h)) + \" is unavailable\")\n}\n\n\/\/ Available reports whether the given hash function is linked into the binary.\nfunc (h Hash) Available() bool {\n\treturn h < maxHash && hashes[h] != nil\n}\n\n\/\/ RegisterHash registers a function that returns a new instance of the given\n\/\/ hash function. This is intended to be called from the init function in\n\/\/ packages that implement hash functions.\nfunc RegisterHash(h Hash, f func() hash.Hash) {\n\tif h >= maxHash {\n\t\tpanic(\"crypto: RegisterHash of unknown hash function\")\n\t}\n\thashes[h] = f\n}\n\n\/\/ PublicKey represents a public key using an unspecified algorithm.\n\/\/\n\/\/ Although this type is an empty interface for backwards compatibility reasons,\n\/\/ all public key types in the standard library implement the following interface\n\/\/\n\/\/ interface{\n\/\/ Equal(x crypto.PublicKey) bool\n\/\/ }\n\/\/\n\/\/ which can be used for increased type safety within applications.\ntype PublicKey interface{}\n\n\/\/ PrivateKey represents a private key using an unspecified algorithm.\n\/\/\n\/\/ Although this type is an empty interface for backwards compatibility reasons,\n\/\/ all private key types in the standard library implement the following interface\n\/\/\n\/\/ interface{\n\/\/ Public() crypto.PublicKey\n\/\/ Equal(x crypto.PrivateKey) bool\n\/\/ }\n\/\/\n\/\/ as well as purpose-specific interfaces such as Signer and Decrypter, which\n\/\/ can be used for increased type safety within applications.\ntype PrivateKey interface{}\n\n\/\/ Signer is an interface for an opaque private key that can be used for\n\/\/ signing operations. For example, an RSA key kept in a hardware module.\ntype Signer interface {\n\t\/\/ Public returns the public key corresponding to the opaque,\n\t\/\/ private key.\n\tPublic() PublicKey\n\n\t\/\/ Sign signs digest with the private key, possibly using entropy from\n\t\/\/ rand. For an RSA key, the resulting signature should be either a\n\t\/\/ PKCS #1 v1.5 or PSS signature (as indicated by opts). For an (EC)DSA\n\t\/\/ key, it should be a DER-serialised, ASN.1 signature structure.\n\t\/\/\n\t\/\/ Hash implements the SignerOpts interface and, in most cases, one can\n\t\/\/ simply pass in the hash function used as opts. Sign may also attempt\n\t\/\/ to type assert opts to other types in order to obtain algorithm\n\t\/\/ specific values. See the documentation in each package for details.\n\t\/\/\n\t\/\/ Note that when a signature of a hash of a larger message is needed,\n\t\/\/ the caller is responsible for hashing the larger message and passing\n\t\/\/ the hash (as digest) and the hash function (as opts) to Sign.\n\tSign(rand io.Reader, digest []byte, opts SignerOpts) (signature []byte, err error)\n}\n\n\/\/ SignerOpts contains options for signing with a Signer.\ntype SignerOpts interface {\n\t\/\/ HashFunc returns an identifier for the hash function used to produce\n\t\/\/ the message passed to Signer.Sign, or else zero to indicate that no\n\t\/\/ hashing was done.\n\tHashFunc() Hash\n}\n\n\/\/ Decrypter is an interface for an opaque private key that can be used for\n\/\/ asymmetric decryption operations. An example would be an RSA key\n\/\/ kept in a hardware module.\ntype Decrypter interface {\n\t\/\/ Public returns the public key corresponding to the opaque,\n\t\/\/ private key.\n\tPublic() PublicKey\n\n\t\/\/ Decrypt decrypts msg. The opts argument should be appropriate for\n\t\/\/ the primitive used. See the documentation in each implementation for\n\t\/\/ details.\n\tDecrypt(rand io.Reader, msg []byte, opts DecrypterOpts) (plaintext []byte, err error)\n}\n\ntype DecrypterOpts interface{}\n<|endoftext|>"} {"text":"<commit_before>package nacl\n\n\/\/ #cgo CFLAGS: -I\/usr\/local\/include\/sodium\n\/\/ #cgo LDFLAGS: \/usr\/local\/lib\/libsodium.a\n\/\/ #include <stdio.h>\n\/\/ #include <sodium.h>\nimport \"C\"\n\nfunc CryptoScalarmultBytes() int {\n\treturn int(C.crypto_scalarmult_bytes())\n}\n\nfunc CryptoScalarmultScalarBytes() int {\n\treturn int(C.crypto_scalarmult_scalarbytes())\n}\n\nfunc CryptoScalarmultPrimitive() string {\n\treturn C.GoString(C.crypto_scalarmult_primitive())\n}\n\nfunc CryptoScalarmultBase(n []byte) ([]byte, int) {\n\tcheckSize(n, CryptoScalarmultScalarBytes(), \"secret key\")\n\tq := make([]byte, CryptoScalarmultBytes())\n\texit := C.crypto_scalarmult_base(\n\t\t(*C.uchar)(&q[0]),\n\t\t(*C.uchar)(&n[0]))\n\n\treturn q, exit\n}\n\nfunc CryptoScalarMult(n []byte, p []byte) ([]byte, int) {\n\tcheckSize(n, CryptoScalarmultScalarBytes(), \"secret key\")\n\tcheckSize(p, CryptoScalarmultScalarBytes(), \"public key\")\n\tq := make([]byte, CryptoScalarmultBytes())\n\texit := C.crypto_scalarmult(\n\t\t(*C.uchar)(&q[0]),\n\t\t(*C.uchar)(&n[0]),\n\t\t(*C.uchar)(&p[0]))\n\n\treturn q, exit\n\n}\n\nfunc checkSize(buf []byte, expected int, descrip string) {\n\tif len(buf) != expected {\n\t\tpanic(fmt.Sprintf(\"Incorrect %s buffer size, expected (%d), got (%d).\", descrip, expected, len(buf)))\n\t}\n}\n<commit_msg>Delete crypto_scalarmult.go<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage mysqldef\n\n\/\/ Version informations.\nconst (\n\tMinProtocolVersion byte = 10\n\tMaxPayloadLen int = 1<<24 - 1\n\tServerVersion string = \"5.5.31-TiDB-1.0\"\n)\n\n\/\/ Header informations.\nconst (\n\tOKHeader byte = 0x00\n\tErrHeader byte = 0xff\n\tEOFHeader byte = 0xfe\n\tLocalInFileHeader byte = 0xfb\n)\n\n\/\/ Server informations.\nconst (\n\tServerStatusInTrans uint16 = 0x0001\n\tServerStatusAutocommit uint16 = 0x0002\n\tServerMoreResultsExists uint16 = 0x0008\n\tServerStatusNoGoodIndexUsed uint16 = 0x0010\n\tServerStatusNoIndexUsed uint16 = 0x0020\n\tServerStatusCursorExists uint16 = 0x0040\n\tServerStatusLastRowSend uint16 = 0x0080\n\tServerStatusDBDropped uint16 = 0x0100\n\tServerStatusNoBackslashEscaped uint16 = 0x0200\n\tServerStatusMetadataChanged uint16 = 0x0400\n\tServerStatusWasSlow uint16 = 0x0800\n\tServerPSOutParams uint16 = 0x1000\n)\n\n\/\/ Command informations.\nconst (\n\tComSleep byte = iota\n\tComQuit\n\tComInitDB\n\tComQuery\n\tComFieldList\n\tComCreateDB\n\tComDropDB\n\tComRefresh\n\tComShutdown\n\tComStatistics\n\tComProcessInfo\n\tComConnect\n\tComProcessKill\n\tComDebug\n\tComPing\n\tComTime\n\tComDelayedInsert\n\tComChangeUser\n\tComBinlogDump\n\tComTableDump\n\tComConnectOut\n\tComRegisterSlave\n\tComStmtPrepare\n\tComStmtExecute\n\tComStmtSendLongData\n\tComStmtClose\n\tComStmtReset\n\tComSetOption\n\tComStmtFetch\n\tComDaemon\n\tComBinlogDumpGtid\n\tComResetConnection\n)\n\n\/\/ Client informations.\nconst (\n\tClientLongPassword uint32 = 1 << iota\n\tClientFoundRows\n\tClientLongFlag\n\tClientConnectWithDB\n\tClientNoSchema\n\tClientCompress\n\tClientODBC\n\tClientLocalFiles\n\tClientIgnoreSpace\n\tClientProtocol41\n\tClientInteractive\n\tClientSSL\n\tClientIgnoreSigpipe\n\tClientTransactions\n\tClientReserved\n\tClientSecureConnection\n\tClientMultiStatements\n\tClientMultiResults\n\tClientPSMultiResults\n\tClientPluginAuth\n\tClientConnectAtts\n\tClientPluginAuthLenencClientData\n)\n\n\/\/ Cache type informations.\nconst (\n\tTypeNoCache byte = 0xff\n)\n\n\/\/ Auth name informations.\nconst (\n\tAuthName = \"mysql_native_password\"\n)\n\n\/\/ MySQL database and tables.\nconst (\n\t\/\/ SystemDB is the name of system database.\n\tSystemDB = \"mysql\"\n\t\/\/ UserTable is the table in system db contains user info.\n\tUserTable = \"User\"\n)\n<commit_msg>mysqldef: Add some consts fro privilege<commit_after>\/\/ Copyright 2015 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage mysqldef\n\n\/\/ Version informations.\nconst (\n\tMinProtocolVersion byte = 10\n\tMaxPayloadLen int = 1<<24 - 1\n\tServerVersion string = \"5.5.31-TiDB-1.0\"\n)\n\n\/\/ Header informations.\nconst (\n\tOKHeader byte = 0x00\n\tErrHeader byte = 0xff\n\tEOFHeader byte = 0xfe\n\tLocalInFileHeader byte = 0xfb\n)\n\n\/\/ Server informations.\nconst (\n\tServerStatusInTrans uint16 = 0x0001\n\tServerStatusAutocommit uint16 = 0x0002\n\tServerMoreResultsExists uint16 = 0x0008\n\tServerStatusNoGoodIndexUsed uint16 = 0x0010\n\tServerStatusNoIndexUsed uint16 = 0x0020\n\tServerStatusCursorExists uint16 = 0x0040\n\tServerStatusLastRowSend uint16 = 0x0080\n\tServerStatusDBDropped uint16 = 0x0100\n\tServerStatusNoBackslashEscaped uint16 = 0x0200\n\tServerStatusMetadataChanged uint16 = 0x0400\n\tServerStatusWasSlow uint16 = 0x0800\n\tServerPSOutParams uint16 = 0x1000\n)\n\n\/\/ Command informations.\nconst (\n\tComSleep byte = iota\n\tComQuit\n\tComInitDB\n\tComQuery\n\tComFieldList\n\tComCreateDB\n\tComDropDB\n\tComRefresh\n\tComShutdown\n\tComStatistics\n\tComProcessInfo\n\tComConnect\n\tComProcessKill\n\tComDebug\n\tComPing\n\tComTime\n\tComDelayedInsert\n\tComChangeUser\n\tComBinlogDump\n\tComTableDump\n\tComConnectOut\n\tComRegisterSlave\n\tComStmtPrepare\n\tComStmtExecute\n\tComStmtSendLongData\n\tComStmtClose\n\tComStmtReset\n\tComSetOption\n\tComStmtFetch\n\tComDaemon\n\tComBinlogDumpGtid\n\tComResetConnection\n)\n\n\/\/ Client informations.\nconst (\n\tClientLongPassword uint32 = 1 << iota\n\tClientFoundRows\n\tClientLongFlag\n\tClientConnectWithDB\n\tClientNoSchema\n\tClientCompress\n\tClientODBC\n\tClientLocalFiles\n\tClientIgnoreSpace\n\tClientProtocol41\n\tClientInteractive\n\tClientSSL\n\tClientIgnoreSigpipe\n\tClientTransactions\n\tClientReserved\n\tClientSecureConnection\n\tClientMultiStatements\n\tClientMultiResults\n\tClientPSMultiResults\n\tClientPluginAuth\n\tClientConnectAtts\n\tClientPluginAuthLenencClientData\n)\n\n\/\/ Cache type informations.\nconst (\n\tTypeNoCache byte = 0xff\n)\n\n\/\/ Auth name informations.\nconst (\n\tAuthName = \"mysql_native_password\"\n)\n\n\/\/ MySQL database and tables.\nconst (\n\t\/\/ SystemDB is the name of system database.\n\tSystemDB = \"mysql\"\n\t\/\/ UserTable is the table in system db contains user info.\n\tUserTable = \"User\"\n)\n\n\/\/ PrivilegeType privilege\ntype PrivilegeType uint32\n\nconst (\n\t_ PrivilegeType = 1 << iota\n\t\/\/ CreatePriv is the privilege to create schema\/table.\n\tCreatePriv\n\t\/\/ SelectPriv is the privilege to read from table.\n\tSelectPriv\n\t\/\/ InsertPriv is the privilege to insert data into table.\n\tInsertPriv\n\t\/\/ UpdatePriv is the privilege to update data in table.\n\tUpdatePriv\n\t\/\/ DeletePriv is the privilege to delete data from table.\n\tDeletePriv\n\t\/\/ ShowPriv is the privilege to run show statement.\n\tShowPriv\n\t\/\/ CreateUserPriv is the privilege to create user.\n\tCreateUserPriv\n\t\/\/ DropPriv is the privilege to drop schema\/table.\n\tDropPriv\n)\n<|endoftext|>"} {"text":"<commit_before>package nameserver\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/miekg\/dns\"\n\n\t\"github.com\/weaveworks\/weave\/net\/address\"\n)\n\nconst (\n\ttopDomain = \".\"\n\treverseDNSdomain = \"in-addr.arpa.\"\n\tetcResolvConf = \"\/etc\/resolv.conf\"\n\tudpBuffSize = uint16(4096)\n\tminUDPSize = 512\n\n\tDefaultListenAddress = \"0.0.0.0:53\"\n\tDefaultTTL = 1\n\tDefaultClientTimeout = 5 * time.Second\n)\n\ntype DNSServer struct {\n\tns *Nameserver\n\tdomain string\n\tttl uint32\n\taddress string\n\n\tservers []*dns.Server\n\tupstream *dns.ClientConfig\n\ttcpClient *dns.Client\n\tudpClient *dns.Client\n}\n\nfunc NewDNSServer(ns *Nameserver, domain string, address string, ttl uint32, clientTimeout time.Duration) (*DNSServer, error) {\n\n\ts := &DNSServer{\n\t\tns: ns,\n\t\tdomain: dns.Fqdn(domain),\n\t\tttl: ttl,\n\t\taddress: address,\n\t\ttcpClient: &dns.Client{Net: \"tcp\", ReadTimeout: clientTimeout},\n\t\tudpClient: &dns.Client{Net: \"udp\", ReadTimeout: clientTimeout, UDPSize: udpBuffSize},\n\t}\n\tvar err error\n\tif s.upstream, err = dns.ClientConfigFromFile(etcResolvConf); err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = s.listen(address)\n\treturn s, err\n}\n\nfunc (d *DNSServer) String() string {\n\tvar buf bytes.Buffer\n\tfmt.Fprintf(&buf, \"WeaveDNS (%s)\\n\", d.ns.ourName)\n\tfmt.Fprintf(&buf, \" listening on %s, for domain %s\\n\", d.address, d.domain)\n\tfmt.Fprintf(&buf, \" response ttl %d\\n\", d.ttl)\n\treturn buf.String()\n}\n\nfunc (d *DNSServer) listen(address string) error {\n\tudpListener, err := net.ListenPacket(\"udp\", address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tudpServer := &dns.Server{PacketConn: udpListener, Handler: d.createMux(d.udpClient, minUDPSize)}\n\n\ttcpListener, err := net.Listen(\"tcp\", address)\n\tif err != nil {\n\t\tudpServer.Shutdown()\n\t\treturn err\n\t}\n\ttcpServer := &dns.Server{Listener: tcpListener, Handler: d.createMux(d.tcpClient, -1)}\n\n\td.servers = []*dns.Server{udpServer, tcpServer}\n\treturn nil\n}\n\nfunc (d *DNSServer) ActivateAndServe() {\n\tfor _, server := range d.servers {\n\t\tgo func(server *dns.Server) {\n\t\t\tserver.ActivateAndServe()\n\t\t}(server)\n\t}\n}\n\nfunc (d *DNSServer) Stop() error {\n\tfor _, server := range d.servers {\n\t\tif err := server.Shutdown(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *DNSServer) errorResponse(r *dns.Msg, code int, w dns.ResponseWriter) {\n\tm := dns.Msg{}\n\tm.SetReply(r)\n\tm.RecursionAvailable = true\n\tm.Rcode = code\n\n\td.ns.debugf(\"error response: %+v\", m)\n\tif err := w.WriteMsg(&m); err != nil {\n\t\td.ns.infof(\"error responding: %v\", err)\n\t}\n}\n\ntype handler struct {\n\t*DNSServer\n\tmaxResponseSize int\n\tclient *dns.Client\n}\n\nfunc (d *DNSServer) createMux(client *dns.Client, defaultMaxResponseSize int) *dns.ServeMux {\n\tm := dns.NewServeMux()\n\th := &handler{\n\t\tDNSServer: d,\n\t\tmaxResponseSize: defaultMaxResponseSize,\n\t\tclient: client,\n\t}\n\tm.HandleFunc(d.domain, h.handleLocal)\n\tm.HandleFunc(reverseDNSdomain, h.handleReverse)\n\tm.HandleFunc(topDomain, h.handleRecursive)\n\treturn m\n}\n\nfunc (h *handler) handleLocal(w dns.ResponseWriter, req *dns.Msg) {\n\th.ns.debugf(\"local request: %+v\", *req)\n\tif len(req.Question) != 1 || req.Question[0].Qtype != dns.TypeA {\n\t\th.errorResponse(req, dns.RcodeNameError, w)\n\t\treturn\n\t}\n\n\thostname := dns.Fqdn(req.Question[0].Name)\n\tif strings.Count(hostname, \".\") == 1 {\n\t\thostname = hostname + h.domain\n\t}\n\n\taddrs := h.ns.Lookup(hostname)\n\tif len(addrs) == 0 {\n\t\th.errorResponse(req, dns.RcodeNameError, w)\n\t\treturn\n\t}\n\n\theader := dns.RR_Header{\n\t\tName: req.Question[0].Name,\n\t\tRrtype: dns.TypeA,\n\t\tClass: dns.ClassINET,\n\t\tTtl: h.ttl,\n\t}\n\tanswers := make([]dns.RR, len(addrs))\n\tfor i, addr := range addrs {\n\t\tip := addr.IP4()\n\t\tanswers[i] = &dns.A{Hdr: header, A: ip}\n\t}\n\tshuffleAnswers(&answers)\n\n\tresponse := h.makeResponse(req, answers)\n\th.ns.debugf(\"response: %+v\", response)\n\tif err := w.WriteMsg(response); err != nil {\n\t\th.ns.infof(\"error responding: %v\", err)\n\t}\n}\n\nfunc (h *handler) handleReverse(w dns.ResponseWriter, req *dns.Msg) {\n\th.ns.debugf(\"reverse request: %+v\", *req)\n\tif len(req.Question) != 1 || req.Question[0].Qtype != dns.TypePTR {\n\t\th.errorResponse(req, dns.RcodeNameError, w)\n\t\treturn\n\t}\n\n\tipStr := strings.TrimSuffix(req.Question[0].Name, \".\"+reverseDNSdomain)\n\tip, err := address.ParseIP(ipStr)\n\tif err != nil {\n\t\th.errorResponse(req, dns.RcodeNameError, w)\n\t\treturn\n\t}\n\n\thostname, err := h.ns.ReverseLookup(ip.Reverse())\n\tif err != nil {\n\t\th.handleRecursive(w, req)\n\t\treturn\n\t}\n\n\theader := dns.RR_Header{\n\t\tName: req.Question[0].Name,\n\t\tRrtype: dns.TypePTR,\n\t\tClass: dns.ClassINET,\n\t\tTtl: h.ttl,\n\t}\n\tanswers := []dns.RR{&dns.PTR{\n\t\tHdr: header,\n\t\tPtr: hostname,\n\t}}\n\n\tresponse := h.makeResponse(req, answers)\n\th.ns.debugf(\"response: %+v\", response)\n\tif err := w.WriteMsg(response); err != nil {\n\t\th.ns.infof(\"error responding: %v\", err)\n\t}\n}\n\nfunc (h *handler) handleRecursive(w dns.ResponseWriter, req *dns.Msg) {\n\th.ns.debugf(\"recursive request: %+v\", *req)\n\n\t\/\/ Resolve unqualified names locally\n\tif len(req.Question) == 1 && req.Question[0].Qtype == dns.TypeA {\n\t\thostname := dns.Fqdn(req.Question[0].Name)\n\t\tif strings.Count(hostname, \".\") == 1 {\n\t\t\th.handleLocal(w, req)\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor _, server := range h.upstream.Servers {\n\t\treqCopy := req.Copy()\n\t\treqCopy.Id = dns.Id()\n\t\tresponse, _, err := h.client.Exchange(reqCopy, fmt.Sprintf(\"%s:%s\", server, h.upstream.Port))\n\t\tif err != nil || response == nil {\n\t\t\th.ns.debugf(\"error trying %s: %v\", server, err)\n\t\t\tcontinue\n\t\t}\n\t\th.ns.debugf(\"response: %+v\", response)\n\t\tresponse.Id = req.Id\n\t\tif response.Len() > h.getMaxResponseSize(req) {\n\t\t\tresponse.Compress = true\n\t\t}\n\t\tif err := w.WriteMsg(response); err != nil {\n\t\t\th.ns.infof(\"error responding: %v\", err)\n\t\t}\n\t\treturn\n\t}\n\n\th.errorResponse(req, dns.RcodeServerFailure, w)\n}\n\nfunc (h *handler) makeResponse(req *dns.Msg, answers []dns.RR) *dns.Msg {\n\tresponse := &dns.Msg{}\n\tresponse.SetReply(req)\n\tresponse.RecursionAvailable = true\n\tresponse.Authoritative = true\n\tresponse.Answer = answers\n\n\tmaxSize := h.getMaxResponseSize(req)\n\tif len(response.Answer) <= 1 || maxSize <= 0 {\n\t\treturn response\n\t}\n\n\t\/\/ search for smallest i that is too big\n\ti := sort.Search(len(response.Answer), func(i int) bool {\n\t\t\/\/ return true if too big\n\t\tresponse.Answer = answers[:i+1]\n\t\treturn response.Len() > maxSize\n\t})\n\tif i == len(answers) {\n\t\tresponse.Answer = answers\n\t\treturn response\n\t}\n\n\tresponse.Answer = answers[:i]\n\tresponse.Truncated = true\n\treturn response\n}\n\nfunc (h *handler) getMaxResponseSize(req *dns.Msg) int {\n\tif opt := req.IsEdns0(); opt != nil {\n\t\treturn int(opt.UDPSize())\n\t}\n\treturn h.maxResponseSize\n}\n\nfunc shuffleAnswers(answers *[]dns.RR) {\n\tif len(*answers) <= 1 {\n\t\treturn\n\t}\n\n\tfor i := range *answers {\n\t\tj := rand.Intn(i + 1)\n\t\t(*answers)[i], (*answers)[j] = (*answers)[j], (*answers)[i]\n\t}\n}\n<commit_msg>simplify response truncation<commit_after>package nameserver\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/miekg\/dns\"\n\n\t\"github.com\/weaveworks\/weave\/net\/address\"\n)\n\nconst (\n\ttopDomain = \".\"\n\treverseDNSdomain = \"in-addr.arpa.\"\n\tetcResolvConf = \"\/etc\/resolv.conf\"\n\tudpBuffSize = uint16(4096)\n\tminUDPSize = 512\n\n\tDefaultListenAddress = \"0.0.0.0:53\"\n\tDefaultTTL = 1\n\tDefaultClientTimeout = 5 * time.Second\n)\n\ntype DNSServer struct {\n\tns *Nameserver\n\tdomain string\n\tttl uint32\n\taddress string\n\n\tservers []*dns.Server\n\tupstream *dns.ClientConfig\n\ttcpClient *dns.Client\n\tudpClient *dns.Client\n}\n\nfunc NewDNSServer(ns *Nameserver, domain string, address string, ttl uint32, clientTimeout time.Duration) (*DNSServer, error) {\n\n\ts := &DNSServer{\n\t\tns: ns,\n\t\tdomain: dns.Fqdn(domain),\n\t\tttl: ttl,\n\t\taddress: address,\n\t\ttcpClient: &dns.Client{Net: \"tcp\", ReadTimeout: clientTimeout},\n\t\tudpClient: &dns.Client{Net: \"udp\", ReadTimeout: clientTimeout, UDPSize: udpBuffSize},\n\t}\n\tvar err error\n\tif s.upstream, err = dns.ClientConfigFromFile(etcResolvConf); err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = s.listen(address)\n\treturn s, err\n}\n\nfunc (d *DNSServer) String() string {\n\tvar buf bytes.Buffer\n\tfmt.Fprintf(&buf, \"WeaveDNS (%s)\\n\", d.ns.ourName)\n\tfmt.Fprintf(&buf, \" listening on %s, for domain %s\\n\", d.address, d.domain)\n\tfmt.Fprintf(&buf, \" response ttl %d\\n\", d.ttl)\n\treturn buf.String()\n}\n\nfunc (d *DNSServer) listen(address string) error {\n\tudpListener, err := net.ListenPacket(\"udp\", address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tudpServer := &dns.Server{PacketConn: udpListener, Handler: d.createMux(d.udpClient, minUDPSize)}\n\n\ttcpListener, err := net.Listen(\"tcp\", address)\n\tif err != nil {\n\t\tudpServer.Shutdown()\n\t\treturn err\n\t}\n\ttcpServer := &dns.Server{Listener: tcpListener, Handler: d.createMux(d.tcpClient, -1)}\n\n\td.servers = []*dns.Server{udpServer, tcpServer}\n\treturn nil\n}\n\nfunc (d *DNSServer) ActivateAndServe() {\n\tfor _, server := range d.servers {\n\t\tgo func(server *dns.Server) {\n\t\t\tserver.ActivateAndServe()\n\t\t}(server)\n\t}\n}\n\nfunc (d *DNSServer) Stop() error {\n\tfor _, server := range d.servers {\n\t\tif err := server.Shutdown(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *DNSServer) errorResponse(r *dns.Msg, code int, w dns.ResponseWriter) {\n\tm := dns.Msg{}\n\tm.SetReply(r)\n\tm.RecursionAvailable = true\n\tm.Rcode = code\n\n\td.ns.debugf(\"error response: %+v\", m)\n\tif err := w.WriteMsg(&m); err != nil {\n\t\td.ns.infof(\"error responding: %v\", err)\n\t}\n}\n\ntype handler struct {\n\t*DNSServer\n\tmaxResponseSize int\n\tclient *dns.Client\n}\n\nfunc (d *DNSServer) createMux(client *dns.Client, defaultMaxResponseSize int) *dns.ServeMux {\n\tm := dns.NewServeMux()\n\th := &handler{\n\t\tDNSServer: d,\n\t\tmaxResponseSize: defaultMaxResponseSize,\n\t\tclient: client,\n\t}\n\tm.HandleFunc(d.domain, h.handleLocal)\n\tm.HandleFunc(reverseDNSdomain, h.handleReverse)\n\tm.HandleFunc(topDomain, h.handleRecursive)\n\treturn m\n}\n\nfunc (h *handler) handleLocal(w dns.ResponseWriter, req *dns.Msg) {\n\th.ns.debugf(\"local request: %+v\", *req)\n\tif len(req.Question) != 1 || req.Question[0].Qtype != dns.TypeA {\n\t\th.errorResponse(req, dns.RcodeNameError, w)\n\t\treturn\n\t}\n\n\thostname := dns.Fqdn(req.Question[0].Name)\n\tif strings.Count(hostname, \".\") == 1 {\n\t\thostname = hostname + h.domain\n\t}\n\n\taddrs := h.ns.Lookup(hostname)\n\tif len(addrs) == 0 {\n\t\th.errorResponse(req, dns.RcodeNameError, w)\n\t\treturn\n\t}\n\n\theader := dns.RR_Header{\n\t\tName: req.Question[0].Name,\n\t\tRrtype: dns.TypeA,\n\t\tClass: dns.ClassINET,\n\t\tTtl: h.ttl,\n\t}\n\tanswers := make([]dns.RR, len(addrs))\n\tfor i, addr := range addrs {\n\t\tip := addr.IP4()\n\t\tanswers[i] = &dns.A{Hdr: header, A: ip}\n\t}\n\tshuffleAnswers(&answers)\n\n\tresponse := h.makeResponse(req, answers)\n\th.ns.debugf(\"response: %+v\", response)\n\tif err := w.WriteMsg(response); err != nil {\n\t\th.ns.infof(\"error responding: %v\", err)\n\t}\n}\n\nfunc (h *handler) handleReverse(w dns.ResponseWriter, req *dns.Msg) {\n\th.ns.debugf(\"reverse request: %+v\", *req)\n\tif len(req.Question) != 1 || req.Question[0].Qtype != dns.TypePTR {\n\t\th.errorResponse(req, dns.RcodeNameError, w)\n\t\treturn\n\t}\n\n\tipStr := strings.TrimSuffix(req.Question[0].Name, \".\"+reverseDNSdomain)\n\tip, err := address.ParseIP(ipStr)\n\tif err != nil {\n\t\th.errorResponse(req, dns.RcodeNameError, w)\n\t\treturn\n\t}\n\n\thostname, err := h.ns.ReverseLookup(ip.Reverse())\n\tif err != nil {\n\t\th.handleRecursive(w, req)\n\t\treturn\n\t}\n\n\theader := dns.RR_Header{\n\t\tName: req.Question[0].Name,\n\t\tRrtype: dns.TypePTR,\n\t\tClass: dns.ClassINET,\n\t\tTtl: h.ttl,\n\t}\n\tanswers := []dns.RR{&dns.PTR{\n\t\tHdr: header,\n\t\tPtr: hostname,\n\t}}\n\n\tresponse := h.makeResponse(req, answers)\n\th.ns.debugf(\"response: %+v\", response)\n\tif err := w.WriteMsg(response); err != nil {\n\t\th.ns.infof(\"error responding: %v\", err)\n\t}\n}\n\nfunc (h *handler) handleRecursive(w dns.ResponseWriter, req *dns.Msg) {\n\th.ns.debugf(\"recursive request: %+v\", *req)\n\n\t\/\/ Resolve unqualified names locally\n\tif len(req.Question) == 1 && req.Question[0].Qtype == dns.TypeA {\n\t\thostname := dns.Fqdn(req.Question[0].Name)\n\t\tif strings.Count(hostname, \".\") == 1 {\n\t\t\th.handleLocal(w, req)\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor _, server := range h.upstream.Servers {\n\t\treqCopy := req.Copy()\n\t\treqCopy.Id = dns.Id()\n\t\tresponse, _, err := h.client.Exchange(reqCopy, fmt.Sprintf(\"%s:%s\", server, h.upstream.Port))\n\t\tif err != nil || response == nil {\n\t\t\th.ns.debugf(\"error trying %s: %v\", server, err)\n\t\t\tcontinue\n\t\t}\n\t\th.ns.debugf(\"response: %+v\", response)\n\t\tresponse.Id = req.Id\n\t\tif response.Len() > h.getMaxResponseSize(req) {\n\t\t\tresponse.Compress = true\n\t\t}\n\t\tif err := w.WriteMsg(response); err != nil {\n\t\t\th.ns.infof(\"error responding: %v\", err)\n\t\t}\n\t\treturn\n\t}\n\n\th.errorResponse(req, dns.RcodeServerFailure, w)\n}\n\nfunc (h *handler) makeResponse(req *dns.Msg, answers []dns.RR) *dns.Msg {\n\tresponse := &dns.Msg{}\n\tresponse.SetReply(req)\n\tresponse.RecursionAvailable = true\n\tresponse.Authoritative = true\n\n\tmaxSize := h.getMaxResponseSize(req)\n\tif len(answers) <= 1 || maxSize <= 0 {\n\t\tresponse.Answer = answers\n\t\treturn response\n\t}\n\n\t\/\/ search for smallest i that is too big\n\ti := sort.Search(len(answers), func(i int) bool {\n\t\t\/\/ return true if too big\n\t\tresponse.Answer = answers[:i+1]\n\t\treturn response.Len() > maxSize\n\t})\n\n\tresponse.Answer = answers[:i]\n\tif i < len(answers) {\n\t\tresponse.Truncated = true\n\t}\n\treturn response\n}\n\nfunc (h *handler) getMaxResponseSize(req *dns.Msg) int {\n\tif opt := req.IsEdns0(); opt != nil {\n\t\treturn int(opt.UDPSize())\n\t}\n\treturn h.maxResponseSize\n}\n\nfunc shuffleAnswers(answers *[]dns.RR) {\n\tif len(*answers) <= 1 {\n\t\treturn\n\t}\n\n\tfor i := range *answers {\n\t\tj := rand.Intn(i + 1)\n\t\t(*answers)[i], (*answers)[j] = (*answers)[j], (*answers)[i]\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package nameserver\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/miekg\/dns\"\n\n\t\"github.com\/weaveworks\/weave\/net\/address\"\n)\n\nconst (\n\ttopDomain = \".\"\n\treverseDNSdomain = \"in-addr.arpa.\"\n\tetcResolvConf = \"\/etc\/resolv.conf\"\n\tudpBuffSize = uint16(4096)\n\tminUDPSize = 512\n\n\tDefaultListenAddress = \"0.0.0.0:53\"\n\tDefaultTTL = 1\n\tDefaultClientTimeout = 5 * time.Second\n)\n\ntype DNSServer struct {\n\tns *Nameserver\n\tdomain string\n\tttl uint32\n\taddress string\n\n\tservers []*dns.Server\n\tupstream *dns.ClientConfig\n\ttcpClient *dns.Client\n\tudpClient *dns.Client\n}\n\nfunc NewDNSServer(ns *Nameserver, domain string, address string, ttl uint32,\n\tclientTimeout time.Duration) (*DNSServer, error) {\n\n\ts := &DNSServer{\n\t\tns: ns,\n\t\tdomain: dns.Fqdn(domain),\n\t\tttl: ttl,\n\t\taddress: address,\n\t\ttcpClient: &dns.Client{Net: \"tcp\", ReadTimeout: clientTimeout},\n\t\tudpClient: &dns.Client{Net: \"udp\", ReadTimeout: clientTimeout, UDPSize: udpBuffSize},\n\t}\n\tvar err error\n\tif s.upstream, err = dns.ClientConfigFromFile(etcResolvConf); err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = s.listen(address)\n\treturn s, err\n}\n\nfunc (d *DNSServer) String() string {\n\tvar buf bytes.Buffer\n\tfmt.Fprintf(&buf, \"WeaveDNS (%s)\\n\", d.ns.ourName)\n\tfmt.Fprintf(&buf, \" listening on %s, for domain %s\\n\", d.address, d.domain)\n\tfmt.Fprintf(&buf, \" response ttl %d\\n\", d.ttl)\n\treturn buf.String()\n}\n\nfunc (d *DNSServer) listen(address string) error {\n\tudpListener, err := net.ListenPacket(\"udp\", address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tudpServer := &dns.Server{PacketConn: udpListener, Handler: d.createMux(d.udpClient, minUDPSize)}\n\n\ttcpListener, err := net.Listen(\"tcp\", address)\n\tif err != nil {\n\t\tudpServer.Shutdown()\n\t\treturn err\n\t}\n\ttcpServer := &dns.Server{Listener: tcpListener, Handler: d.createMux(d.tcpClient, -1)}\n\n\td.servers = []*dns.Server{udpServer, tcpServer}\n\treturn nil\n}\n\nfunc (d *DNSServer) ActivateAndServe() {\n\tfor _, server := range d.servers {\n\t\tgo func(server *dns.Server) {\n\t\t\tserver.ActivateAndServe()\n\t\t}(server)\n\t}\n}\n\nfunc (d *DNSServer) Stop() error {\n\tfor _, server := range d.servers {\n\t\tif err := server.Shutdown(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *DNSServer) errorResponse(r *dns.Msg, code int, w dns.ResponseWriter) {\n\tm := dns.Msg{}\n\tm.SetReply(r)\n\tm.RecursionAvailable = true\n\tm.Rcode = code\n\n\td.ns.debugf(\"error response: %+v\", m)\n\tif err := w.WriteMsg(&m); err != nil {\n\t\td.ns.infof(\"error responding: %v\", err)\n\t}\n}\n\nfunc (d *DNSServer) createMux(client *dns.Client, defaultMaxResponseSize int) *dns.ServeMux {\n\tm := dns.NewServeMux()\n\tm.HandleFunc(d.domain, d.handleLocal(defaultMaxResponseSize))\n\tm.HandleFunc(reverseDNSdomain, d.handleReverse(client, defaultMaxResponseSize))\n\tm.HandleFunc(topDomain, d.handleRecursive(client, defaultMaxResponseSize))\n\treturn m\n}\n\nfunc (d *DNSServer) handleLocal(defaultMaxResponseSize int) func(dns.ResponseWriter, *dns.Msg) {\n\treturn func(w dns.ResponseWriter, req *dns.Msg) {\n\t\td.ns.debugf(\"local request: %+v\", *req)\n\t\tif len(req.Question) != 1 || req.Question[0].Qtype != dns.TypeA {\n\t\t\td.errorResponse(req, dns.RcodeNameError, w)\n\t\t\treturn\n\t\t}\n\n\t\thostname := dns.Fqdn(req.Question[0].Name)\n\t\tif strings.Count(hostname, \".\") == 1 {\n\t\t\thostname = hostname + d.domain\n\t\t}\n\n\t\taddrs := d.ns.Lookup(hostname)\n\n\t\tresponse := dns.Msg{}\n\t\tresponse.RecursionAvailable = true\n\t\tresponse.Authoritative = true\n\t\tresponse.SetReply(req)\n\t\tresponse.Answer = make([]dns.RR, len(addrs))\n\n\t\theader := dns.RR_Header{\n\t\t\tName: req.Question[0].Name,\n\t\t\tRrtype: dns.TypeA,\n\t\t\tClass: dns.ClassINET,\n\t\t\tTtl: d.ttl,\n\t\t}\n\n\t\tfor i, addr := range addrs {\n\t\t\tip := addr.IP4()\n\t\t\tresponse.Answer[i] = &dns.A{Hdr: header, A: ip}\n\t\t}\n\n\t\tshuffleAnswers(&response.Answer)\n\t\tmaxResponseSize := getMaxResponseSize(req, defaultMaxResponseSize)\n\t\ttruncateResponse(&response, maxResponseSize)\n\n\t\td.ns.debugf(\"response: %+v\", response)\n\t\tif err := w.WriteMsg(&response); err != nil {\n\t\t\td.ns.infof(\"error responding: %v\", err)\n\t\t}\n\t}\n}\n\nfunc (d *DNSServer) handleReverse(client *dns.Client, defaultMaxResponseSize int) func(dns.ResponseWriter, *dns.Msg) {\n\treturn func(w dns.ResponseWriter, req *dns.Msg) {\n\t\td.ns.debugf(\"reverse request: %+v\", *req)\n\t\tif len(req.Question) != 1 || req.Question[0].Qtype != dns.TypePTR {\n\t\t\td.errorResponse(req, dns.RcodeNameError, w)\n\t\t\treturn\n\t\t}\n\n\t\tipStr := strings.TrimSuffix(req.Question[0].Name, \".\"+reverseDNSdomain)\n\t\tip, err := address.ParseIP(ipStr)\n\t\tif err != nil {\n\t\t\td.errorResponse(req, dns.RcodeNameError, w)\n\t\t\treturn\n\t\t}\n\n\t\thostname, err := d.ns.ReverseLookup(ip.Reverse())\n\t\tif err != nil {\n\t\t\td.handleRecursive(client, defaultMaxResponseSize)(w, req)\n\t\t\treturn\n\t\t}\n\n\t\tresponse := dns.Msg{}\n\t\tresponse.RecursionAvailable = true\n\t\tresponse.Authoritative = true\n\t\tresponse.SetReply(req)\n\n\t\theader := dns.RR_Header{\n\t\t\tName: req.Question[0].Name,\n\t\t\tRrtype: dns.TypePTR,\n\t\t\tClass: dns.ClassINET,\n\t\t\tTtl: d.ttl,\n\t\t}\n\n\t\tresponse.Answer = []dns.RR{&dns.PTR{\n\t\t\tHdr: header,\n\t\t\tPtr: hostname,\n\t\t}}\n\n\t\tmaxResponseSize := getMaxResponseSize(req, defaultMaxResponseSize)\n\t\ttruncateResponse(&response, maxResponseSize)\n\n\t\td.ns.debugf(\"response: %+v\", response)\n\t\tif err := w.WriteMsg(&response); err != nil {\n\t\t\td.ns.infof(\"error responding: %v\", err)\n\t\t}\n\t}\n}\n\nfunc (d *DNSServer) handleRecursive(client *dns.Client, defaultMaxResponseSize int) func(dns.ResponseWriter, *dns.Msg) {\n\treturn func(w dns.ResponseWriter, req *dns.Msg) {\n\t\td.ns.debugf(\"recursive request: %+v\", *req)\n\n\t\t\/\/ Resolve unqualified names locally\n\t\tif len(req.Question) == 1 && req.Question[0].Qtype == dns.TypeA {\n\t\t\thostname := dns.Fqdn(req.Question[0].Name)\n\t\t\tif strings.Count(hostname, \".\") == 1 {\n\t\t\t\td.handleLocal(defaultMaxResponseSize)(w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tfor _, server := range d.upstream.Servers {\n\t\t\treqCopy := req.Copy()\n\t\t\treqCopy.Id = dns.Id()\n\t\t\tresponse, _, err := client.Exchange(reqCopy, fmt.Sprintf(\"%s:%s\", server, d.upstream.Port))\n\t\t\tif err != nil || response == nil {\n\t\t\t\td.ns.debugf(\"error trying %s: %v\", server, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif response.Rcode != dns.RcodeSuccess && !response.Authoritative {\n\t\t\t\td.ns.debugf(\"non-authoritative error trying %s: %v\", server, response.Rcode)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\td.ns.debugf(\"response: %+v\", response)\n\t\t\tresponse.SetReply(req)\n\t\t\tif err := w.WriteMsg(response); err != nil {\n\t\t\t\td.ns.infof(\"error responding: %v\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\td.errorResponse(req, dns.RcodeServerFailure, w)\n\t}\n}\n\nfunc shuffleAnswers(answers *[]dns.RR) {\n\tif len(*answers) <= 1 {\n\t\treturn\n\t}\n\n\tfor i := range *answers {\n\t\tj := rand.Intn(i + 1)\n\t\t(*answers)[i], (*answers)[j] = (*answers)[j], (*answers)[i]\n\t}\n}\n\nfunc truncateResponse(response *dns.Msg, maxSize int) {\n\tif len(response.Answer) <= 1 || maxSize <= 0 {\n\t\treturn\n\t}\n\n\t\/\/ take a copy of answers, as we're going to mutate response\n\tanswers := response.Answer\n\n\t\/\/ search for smallest i that is too big\n\ti := sort.Search(len(response.Answer), func(i int) bool {\n\t\t\/\/ return true if too big\n\t\tresponse.Answer = answers[:i+1]\n\t\treturn response.Len() > maxSize\n\t})\n\tif i == len(answers) {\n\t\tresponse.Answer = answers\n\t\treturn\n\t}\n\n\tresponse.Answer = answers[:i]\n\tresponse.Truncated = true\n}\n\nfunc getMaxResponseSize(req *dns.Msg, defaultMaxResponseSize int) int {\n\tif opt := req.IsEdns0(); opt != nil {\n\t\treturn int(opt.UDPSize())\n\t}\n\treturn defaultMaxResponseSize\n}\n<commit_msg>cosmetic<commit_after>package nameserver\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/miekg\/dns\"\n\n\t\"github.com\/weaveworks\/weave\/net\/address\"\n)\n\nconst (\n\ttopDomain = \".\"\n\treverseDNSdomain = \"in-addr.arpa.\"\n\tetcResolvConf = \"\/etc\/resolv.conf\"\n\tudpBuffSize = uint16(4096)\n\tminUDPSize = 512\n\n\tDefaultListenAddress = \"0.0.0.0:53\"\n\tDefaultTTL = 1\n\tDefaultClientTimeout = 5 * time.Second\n)\n\ntype DNSServer struct {\n\tns *Nameserver\n\tdomain string\n\tttl uint32\n\taddress string\n\n\tservers []*dns.Server\n\tupstream *dns.ClientConfig\n\ttcpClient *dns.Client\n\tudpClient *dns.Client\n}\n\nfunc NewDNSServer(ns *Nameserver, domain string, address string, ttl uint32, clientTimeout time.Duration) (*DNSServer, error) {\n\n\ts := &DNSServer{\n\t\tns: ns,\n\t\tdomain: dns.Fqdn(domain),\n\t\tttl: ttl,\n\t\taddress: address,\n\t\ttcpClient: &dns.Client{Net: \"tcp\", ReadTimeout: clientTimeout},\n\t\tudpClient: &dns.Client{Net: \"udp\", ReadTimeout: clientTimeout, UDPSize: udpBuffSize},\n\t}\n\tvar err error\n\tif s.upstream, err = dns.ClientConfigFromFile(etcResolvConf); err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = s.listen(address)\n\treturn s, err\n}\n\nfunc (d *DNSServer) String() string {\n\tvar buf bytes.Buffer\n\tfmt.Fprintf(&buf, \"WeaveDNS (%s)\\n\", d.ns.ourName)\n\tfmt.Fprintf(&buf, \" listening on %s, for domain %s\\n\", d.address, d.domain)\n\tfmt.Fprintf(&buf, \" response ttl %d\\n\", d.ttl)\n\treturn buf.String()\n}\n\nfunc (d *DNSServer) listen(address string) error {\n\tudpListener, err := net.ListenPacket(\"udp\", address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tudpServer := &dns.Server{PacketConn: udpListener, Handler: d.createMux(d.udpClient, minUDPSize)}\n\n\ttcpListener, err := net.Listen(\"tcp\", address)\n\tif err != nil {\n\t\tudpServer.Shutdown()\n\t\treturn err\n\t}\n\ttcpServer := &dns.Server{Listener: tcpListener, Handler: d.createMux(d.tcpClient, -1)}\n\n\td.servers = []*dns.Server{udpServer, tcpServer}\n\treturn nil\n}\n\nfunc (d *DNSServer) ActivateAndServe() {\n\tfor _, server := range d.servers {\n\t\tgo func(server *dns.Server) {\n\t\t\tserver.ActivateAndServe()\n\t\t}(server)\n\t}\n}\n\nfunc (d *DNSServer) Stop() error {\n\tfor _, server := range d.servers {\n\t\tif err := server.Shutdown(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *DNSServer) errorResponse(r *dns.Msg, code int, w dns.ResponseWriter) {\n\tm := dns.Msg{}\n\tm.SetReply(r)\n\tm.RecursionAvailable = true\n\tm.Rcode = code\n\n\td.ns.debugf(\"error response: %+v\", m)\n\tif err := w.WriteMsg(&m); err != nil {\n\t\td.ns.infof(\"error responding: %v\", err)\n\t}\n}\n\nfunc (d *DNSServer) createMux(client *dns.Client, defaultMaxResponseSize int) *dns.ServeMux {\n\tm := dns.NewServeMux()\n\tm.HandleFunc(d.domain, d.handleLocal(defaultMaxResponseSize))\n\tm.HandleFunc(reverseDNSdomain, d.handleReverse(client, defaultMaxResponseSize))\n\tm.HandleFunc(topDomain, d.handleRecursive(client, defaultMaxResponseSize))\n\treturn m\n}\n\nfunc (d *DNSServer) handleLocal(defaultMaxResponseSize int) func(dns.ResponseWriter, *dns.Msg) {\n\treturn func(w dns.ResponseWriter, req *dns.Msg) {\n\t\td.ns.debugf(\"local request: %+v\", *req)\n\t\tif len(req.Question) != 1 || req.Question[0].Qtype != dns.TypeA {\n\t\t\td.errorResponse(req, dns.RcodeNameError, w)\n\t\t\treturn\n\t\t}\n\n\t\thostname := dns.Fqdn(req.Question[0].Name)\n\t\tif strings.Count(hostname, \".\") == 1 {\n\t\t\thostname = hostname + d.domain\n\t\t}\n\n\t\taddrs := d.ns.Lookup(hostname)\n\n\t\tresponse := dns.Msg{}\n\t\tresponse.RecursionAvailable = true\n\t\tresponse.Authoritative = true\n\t\tresponse.SetReply(req)\n\t\tresponse.Answer = make([]dns.RR, len(addrs))\n\n\t\theader := dns.RR_Header{\n\t\t\tName: req.Question[0].Name,\n\t\t\tRrtype: dns.TypeA,\n\t\t\tClass: dns.ClassINET,\n\t\t\tTtl: d.ttl,\n\t\t}\n\n\t\tfor i, addr := range addrs {\n\t\t\tip := addr.IP4()\n\t\t\tresponse.Answer[i] = &dns.A{Hdr: header, A: ip}\n\t\t}\n\n\t\tshuffleAnswers(&response.Answer)\n\t\tmaxResponseSize := getMaxResponseSize(req, defaultMaxResponseSize)\n\t\ttruncateResponse(&response, maxResponseSize)\n\n\t\td.ns.debugf(\"response: %+v\", response)\n\t\tif err := w.WriteMsg(&response); err != nil {\n\t\t\td.ns.infof(\"error responding: %v\", err)\n\t\t}\n\t}\n}\n\nfunc (d *DNSServer) handleReverse(client *dns.Client, defaultMaxResponseSize int) func(dns.ResponseWriter, *dns.Msg) {\n\treturn func(w dns.ResponseWriter, req *dns.Msg) {\n\t\td.ns.debugf(\"reverse request: %+v\", *req)\n\t\tif len(req.Question) != 1 || req.Question[0].Qtype != dns.TypePTR {\n\t\t\td.errorResponse(req, dns.RcodeNameError, w)\n\t\t\treturn\n\t\t}\n\n\t\tipStr := strings.TrimSuffix(req.Question[0].Name, \".\"+reverseDNSdomain)\n\t\tip, err := address.ParseIP(ipStr)\n\t\tif err != nil {\n\t\t\td.errorResponse(req, dns.RcodeNameError, w)\n\t\t\treturn\n\t\t}\n\n\t\thostname, err := d.ns.ReverseLookup(ip.Reverse())\n\t\tif err != nil {\n\t\t\td.handleRecursive(client, defaultMaxResponseSize)(w, req)\n\t\t\treturn\n\t\t}\n\n\t\tresponse := dns.Msg{}\n\t\tresponse.RecursionAvailable = true\n\t\tresponse.Authoritative = true\n\t\tresponse.SetReply(req)\n\n\t\theader := dns.RR_Header{\n\t\t\tName: req.Question[0].Name,\n\t\t\tRrtype: dns.TypePTR,\n\t\t\tClass: dns.ClassINET,\n\t\t\tTtl: d.ttl,\n\t\t}\n\n\t\tresponse.Answer = []dns.RR{&dns.PTR{\n\t\t\tHdr: header,\n\t\t\tPtr: hostname,\n\t\t}}\n\n\t\tmaxResponseSize := getMaxResponseSize(req, defaultMaxResponseSize)\n\t\ttruncateResponse(&response, maxResponseSize)\n\n\t\td.ns.debugf(\"response: %+v\", response)\n\t\tif err := w.WriteMsg(&response); err != nil {\n\t\t\td.ns.infof(\"error responding: %v\", err)\n\t\t}\n\t}\n}\n\nfunc (d *DNSServer) handleRecursive(client *dns.Client, defaultMaxResponseSize int) func(dns.ResponseWriter, *dns.Msg) {\n\treturn func(w dns.ResponseWriter, req *dns.Msg) {\n\t\td.ns.debugf(\"recursive request: %+v\", *req)\n\n\t\t\/\/ Resolve unqualified names locally\n\t\tif len(req.Question) == 1 && req.Question[0].Qtype == dns.TypeA {\n\t\t\thostname := dns.Fqdn(req.Question[0].Name)\n\t\t\tif strings.Count(hostname, \".\") == 1 {\n\t\t\t\td.handleLocal(defaultMaxResponseSize)(w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tfor _, server := range d.upstream.Servers {\n\t\t\treqCopy := req.Copy()\n\t\t\treqCopy.Id = dns.Id()\n\t\t\tresponse, _, err := client.Exchange(reqCopy, fmt.Sprintf(\"%s:%s\", server, d.upstream.Port))\n\t\t\tif err != nil || response == nil {\n\t\t\t\td.ns.debugf(\"error trying %s: %v\", server, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif response.Rcode != dns.RcodeSuccess && !response.Authoritative {\n\t\t\t\td.ns.debugf(\"non-authoritative error trying %s: %v\", server, response.Rcode)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\td.ns.debugf(\"response: %+v\", response)\n\t\t\tresponse.SetReply(req)\n\t\t\tif err := w.WriteMsg(response); err != nil {\n\t\t\t\td.ns.infof(\"error responding: %v\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\td.errorResponse(req, dns.RcodeServerFailure, w)\n\t}\n}\n\nfunc shuffleAnswers(answers *[]dns.RR) {\n\tif len(*answers) <= 1 {\n\t\treturn\n\t}\n\n\tfor i := range *answers {\n\t\tj := rand.Intn(i + 1)\n\t\t(*answers)[i], (*answers)[j] = (*answers)[j], (*answers)[i]\n\t}\n}\n\nfunc truncateResponse(response *dns.Msg, maxSize int) {\n\tif len(response.Answer) <= 1 || maxSize <= 0 {\n\t\treturn\n\t}\n\n\t\/\/ take a copy of answers, as we're going to mutate response\n\tanswers := response.Answer\n\n\t\/\/ search for smallest i that is too big\n\ti := sort.Search(len(response.Answer), func(i int) bool {\n\t\t\/\/ return true if too big\n\t\tresponse.Answer = answers[:i+1]\n\t\treturn response.Len() > maxSize\n\t})\n\tif i == len(answers) {\n\t\tresponse.Answer = answers\n\t\treturn\n\t}\n\n\tresponse.Answer = answers[:i]\n\tresponse.Truncated = true\n}\n\nfunc getMaxResponseSize(req *dns.Msg, defaultMaxResponseSize int) int {\n\tif opt := req.IsEdns0(); opt != nil {\n\t\treturn int(opt.UDPSize())\n\t}\n\treturn defaultMaxResponseSize\n}\n<|endoftext|>"} {"text":"<commit_before>package net\n\nimport (\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/hmac\"\n\t\"crypto\/rand\"\n\t\"crypto\/sha256\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"golang.org\/x\/crypto\/hkdf\"\n\tlibp2p \"gx\/ipfs\/QmUWER4r4qMvaCnX5zREcfyiWN7cXN9g3a7fkRqNz8qWPP\/go-libp2p-crypto\"\n\t\"io\"\n)\n\nconst (\n\t\/\/ The version of the encryption algorithm used. Currently only 1 is supported\n\tCiphertextVersion = 1\n\n\t\/\/ Length of the serialized version in bytes\n\tCiphertextVersionBytes = 4\n\n\t\/\/ Length of the secret key used to generate the AES and MAC keys in bytes\n\tSecretKeyBytes = 32\n\n\t\/\/ Length of the AES key in bytes\n\tAESKeyBytes = 32\n\n\t\/\/ Length of the MAC key in bytes\n\tMacKeyBytes = 32\n\n\t\/\/ Length of the RSA encrypted secret key ciphertext in bytes\n\tEncryptedSecretKeyBytes = 512\n\n\t\/\/ Length of the MAC in bytes\n\tMacBytes = 32\n)\n\nvar (\n\t\/\/ The ciphertext cannot be shorter than CiphertextVersionBytes + EncryptedSecretKeyBytes + aes.BlockSize + MacKeyBytes\n\tErrShortCiphertext = errors.New(\"Ciphertext is too short\")\n\n\t\/\/ The HMAC included in the ciphertext is invalid\n\tErrInvalidHmac = errors.New(\"Invalid Hmac\")\n\n\t\/\/ Satic salt used in the hdkf\n\tSalt = []byte(\"OpenBazaar Encryption Algorithm\")\n)\n\nfunc Encrypt(pubKey libp2p.PubKey, plaintext []byte) ([]byte, error) {\n\n\t\/\/ Encrypt random secret key with RSA pubkey\n\tsecretKey := make([]byte, SecretKeyBytes)\n\trand.Read(secretKey)\n\n\tencKey, err := pubKey.Encrypt(secretKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Derive MAC and AES keys from the secret key using hkdf\n\thash := sha256.New\n\n\thkdf := hkdf.New(hash, secretKey, Salt, nil)\n\n\taesKey := make([]byte, AESKeyBytes)\n\t_, err = io.ReadFull(hkdf, aesKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmacKey := make([]byte, AESKeyBytes)\n\t_, err = io.ReadFull(hkdf, macKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Encrypt message with the AES key\n\tblock, err := aes.NewCipher(aesKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/* The IV needs to be unique, but not secure. Therefore it is common to\n\t include it at the beginning of the ciphertext. *\/\n\tciphertext := make([]byte, aes.BlockSize+len(plaintext))\n\tiv := ciphertext[:aes.BlockSize]\n\tif _, err := io.ReadFull(rand.Reader, iv); err != nil {\n\t\treturn nil, err\n\t}\n\n\tstream := cipher.NewCFBEncrypter(block, iv)\n\tstream.XORKeyStream(ciphertext[aes.BlockSize:], plaintext)\n\n\t\/\/ Create the HMAC\n\tmac := hmac.New(sha256.New, macKey)\n\tmac.Write(ciphertext)\n\tmessageMac := mac.Sum(nil)\n\n\t\/\/ Prepend the ciphertext with the encrypted secret key\n\tciphertext = append(encKey, ciphertext...)\n\n\t\/\/ Prepend version\n\tversion := make([]byte, CiphertextVersionBytes)\n\tbinary.BigEndian.PutUint32(version, uint32(CiphertextVersion))\n\tciphertext = append(version, ciphertext...)\n\n\t\/\/ Append the MAC\n\tciphertext = append(ciphertext, messageMac...)\n\treturn ciphertext, nil\n}\n\nfunc Decrypt(privKey libp2p.PrivKey, ciphertext []byte) ([]byte, error) {\n\tversion := getCipherTextVersion(ciphertext)\n\tif version == CiphertextVersion {\n\t\treturn decryptV1(privKey, ciphertext)\n\t} else {\n\t\treturn nil, errors.New(\"Unknown ciphertext version\")\n\t}\n}\n\nfunc decryptV1(privKey libp2p.PrivKey, ciphertext []byte) ([]byte, error) {\n\tif len(ciphertext) < CiphertextVersionBytes+EncryptedSecretKeyBytes+aes.BlockSize+MacKeyBytes {\n\t\treturn nil, ErrShortCiphertext\n\t}\n\n\t\/\/ Decrypt the secret key using the RSA private key\n\tsecretKey, err := privKey.Decrypt(ciphertext[CiphertextVersionBytes : CiphertextVersionBytes+EncryptedSecretKeyBytes])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Derive the AES and MAC keys from the secret key using hdkf\n\thash := sha256.New\n\n\thkdf := hkdf.New(hash, secretKey, Salt, nil)\n\n\taesKey := make([]byte, AESKeyBytes)\n\t_, err = io.ReadFull(hkdf, aesKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmacKey := make([]byte, MacKeyBytes)\n\t_, err = io.ReadFull(hkdf, macKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Calculate the HMAC and verify it is correct\n\tmac := hmac.New(sha256.New, macKey)\n\tmac.Write(ciphertext[CiphertextVersionBytes+EncryptedSecretKeyBytes : len(ciphertext)-MacBytes])\n\tmessageMac := mac.Sum(nil)\n\tif !hmac.Equal(messageMac, ciphertext[len(ciphertext)-MacBytes:]) {\n\t\treturn nil, ErrInvalidHmac\n\t}\n\n\t\/\/ Decrypt the AES ciphertext\n\tblock, err := aes.NewCipher(aesKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tciphertext = ciphertext[CiphertextVersionBytes+EncryptedSecretKeyBytes : len(ciphertext)-MacBytes]\n\tif len(ciphertext) < aes.BlockSize {\n\t\treturn nil, err\n\t}\n\tiv := ciphertext[:aes.BlockSize]\n\tciphertext = ciphertext[aes.BlockSize:]\n\n\tstream := cipher.NewCFBDecrypter(block, iv)\n\n\t\/\/ XORKeyStream can work in-place if the two arguments are the same\n\tstream.XORKeyStream(ciphertext, ciphertext)\n\tplaintext := ciphertext\n\treturn plaintext, nil\n}\n\nfunc getCipherTextVersion(ciphertext []byte) uint32 {\n\treturn binary.BigEndian.Uint32(ciphertext[:CiphertextVersionBytes])\n}\n<commit_msg>fix: mac key now uses MacKeyBytes<commit_after>package net\n\nimport (\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/hmac\"\n\t\"crypto\/rand\"\n\t\"crypto\/sha256\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"golang.org\/x\/crypto\/hkdf\"\n\tlibp2p \"gx\/ipfs\/QmUWER4r4qMvaCnX5zREcfyiWN7cXN9g3a7fkRqNz8qWPP\/go-libp2p-crypto\"\n\t\"io\"\n)\n\nconst (\n\t\/\/ The version of the encryption algorithm used. Currently only 1 is supported\n\tCiphertextVersion = 1\n\n\t\/\/ Length of the serialized version in bytes\n\tCiphertextVersionBytes = 4\n\n\t\/\/ Length of the secret key used to generate the AES and MAC keys in bytes\n\tSecretKeyBytes = 32\n\n\t\/\/ Length of the AES key in bytes\n\tAESKeyBytes = 32\n\n\t\/\/ Length of the MAC key in bytes\n\tMacKeyBytes = 32\n\n\t\/\/ Length of the RSA encrypted secret key ciphertext in bytes\n\tEncryptedSecretKeyBytes = 512\n\n\t\/\/ Length of the MAC in bytes\n\tMacBytes = 32\n)\n\nvar (\n\t\/\/ The ciphertext cannot be shorter than CiphertextVersionBytes + EncryptedSecretKeyBytes + aes.BlockSize + MacKeyBytes\n\tErrShortCiphertext = errors.New(\"Ciphertext is too short\")\n\n\t\/\/ The HMAC included in the ciphertext is invalid\n\tErrInvalidHmac = errors.New(\"Invalid Hmac\")\n\n\t\/\/ Satic salt used in the hdkf\n\tSalt = []byte(\"OpenBazaar Encryption Algorithm\")\n)\n\nfunc Encrypt(pubKey libp2p.PubKey, plaintext []byte) ([]byte, error) {\n\n\t\/\/ Encrypt random secret key with RSA pubkey\n\tsecretKey := make([]byte, SecretKeyBytes)\n\trand.Read(secretKey)\n\n\tencKey, err := pubKey.Encrypt(secretKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Derive MAC and AES keys from the secret key using hkdf\n\thash := sha256.New\n\n\thkdf := hkdf.New(hash, secretKey, Salt, nil)\n\n\taesKey := make([]byte, AESKeyBytes)\n\t_, err = io.ReadFull(hkdf, aesKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmacKey := make([]byte, MacKeyBytes)\n\t_, err = io.ReadFull(hkdf, macKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Encrypt message with the AES key\n\tblock, err := aes.NewCipher(aesKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/* The IV needs to be unique, but not secure. Therefore it is common to\n\t include it at the beginning of the ciphertext. *\/\n\tciphertext := make([]byte, aes.BlockSize+len(plaintext))\n\tiv := ciphertext[:aes.BlockSize]\n\tif _, err := io.ReadFull(rand.Reader, iv); err != nil {\n\t\treturn nil, err\n\t}\n\n\tstream := cipher.NewCFBEncrypter(block, iv)\n\tstream.XORKeyStream(ciphertext[aes.BlockSize:], plaintext)\n\n\t\/\/ Create the HMAC\n\tmac := hmac.New(sha256.New, macKey)\n\tmac.Write(ciphertext)\n\tmessageMac := mac.Sum(nil)\n\n\t\/\/ Prepend the ciphertext with the encrypted secret key\n\tciphertext = append(encKey, ciphertext...)\n\n\t\/\/ Prepend version\n\tversion := make([]byte, CiphertextVersionBytes)\n\tbinary.BigEndian.PutUint32(version, uint32(CiphertextVersion))\n\tciphertext = append(version, ciphertext...)\n\n\t\/\/ Append the MAC\n\tciphertext = append(ciphertext, messageMac...)\n\treturn ciphertext, nil\n}\n\nfunc Decrypt(privKey libp2p.PrivKey, ciphertext []byte) ([]byte, error) {\n\tversion := getCipherTextVersion(ciphertext)\n\tif version == CiphertextVersion {\n\t\treturn decryptV1(privKey, ciphertext)\n\t} else {\n\t\treturn nil, errors.New(\"Unknown ciphertext version\")\n\t}\n}\n\nfunc decryptV1(privKey libp2p.PrivKey, ciphertext []byte) ([]byte, error) {\n\tif len(ciphertext) < CiphertextVersionBytes+EncryptedSecretKeyBytes+aes.BlockSize+MacKeyBytes {\n\t\treturn nil, ErrShortCiphertext\n\t}\n\n\t\/\/ Decrypt the secret key using the RSA private key\n\tsecretKey, err := privKey.Decrypt(ciphertext[CiphertextVersionBytes : CiphertextVersionBytes+EncryptedSecretKeyBytes])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Derive the AES and MAC keys from the secret key using hdkf\n\thash := sha256.New\n\n\thkdf := hkdf.New(hash, secretKey, Salt, nil)\n\n\taesKey := make([]byte, AESKeyBytes)\n\t_, err = io.ReadFull(hkdf, aesKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmacKey := make([]byte, MacKeyBytes)\n\t_, err = io.ReadFull(hkdf, macKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Calculate the HMAC and verify it is correct\n\tmac := hmac.New(sha256.New, macKey)\n\tmac.Write(ciphertext[CiphertextVersionBytes+EncryptedSecretKeyBytes : len(ciphertext)-MacBytes])\n\tmessageMac := mac.Sum(nil)\n\tif !hmac.Equal(messageMac, ciphertext[len(ciphertext)-MacBytes:]) {\n\t\treturn nil, ErrInvalidHmac\n\t}\n\n\t\/\/ Decrypt the AES ciphertext\n\tblock, err := aes.NewCipher(aesKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tciphertext = ciphertext[CiphertextVersionBytes+EncryptedSecretKeyBytes : len(ciphertext)-MacBytes]\n\tif len(ciphertext) < aes.BlockSize {\n\t\treturn nil, err\n\t}\n\tiv := ciphertext[:aes.BlockSize]\n\tciphertext = ciphertext[aes.BlockSize:]\n\n\tstream := cipher.NewCFBDecrypter(block, iv)\n\n\t\/\/ XORKeyStream can work in-place if the two arguments are the same\n\tstream.XORKeyStream(ciphertext, ciphertext)\n\tplaintext := ciphertext\n\treturn plaintext, nil\n}\n\nfunc getCipherTextVersion(ciphertext []byte) uint32 {\n\treturn binary.BigEndian.Uint32(ciphertext[:CiphertextVersionBytes])\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/gorilla\/mux\"\n\n\t\"github.com\/lxc\/lxd\/client\"\n\tagentAPI \"github.com\/lxc\/lxd\/lxd-agent\/api\"\n\t\"github.com\/lxc\/lxd\/lxd\/daemon\"\n\t\"github.com\/lxc\/lxd\/lxd\/device\/config\"\n\t\"github.com\/lxc\/lxd\/lxd\/util\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/logger\"\n)\n\n\/\/ DevLxdServer creates an http.Server capable of handling requests against the\n\/\/ \/dev\/lxd Unix socket endpoint created inside VMs.\nfunc devLxdServer(d *Daemon) *http.Server {\n\treturn &http.Server{\n\t\tHandler: devLxdAPI(d),\n\t}\n}\n\ntype devLxdResponse struct {\n\tcontent any\n\tcode int\n\tctype string\n}\n\nfunc okResponse(ct any, ctype string) *devLxdResponse {\n\treturn &devLxdResponse{ct, http.StatusOK, ctype}\n}\n\ntype devLxdHandler struct {\n\tpath string\n\n\t\/*\n\t * This API will have to be changed slightly when we decide to support\n\t * websocket events upgrading, but since we don't have events on the\n\t * server side right now either, I went the simple route to avoid\n\t * needless noise.\n\t *\/\n\tf func(d *Daemon, w http.ResponseWriter, r *http.Request) *devLxdResponse\n}\n\nfunc getVsockClient(d *Daemon) (lxd.InstanceServer, error) {\n\t\/\/ Try connecting to LXD server.\n\tclient, err := getClient(int(d.serverCID), int(d.serverPort), d.serverCertificate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserver, err := lxd.ConnectLXDHTTP(nil, client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn server, nil\n}\n\nvar devlxdConfigGet = devLxdHandler{\"\/1.0\/config\", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devLxdResponse {\n\tclient, err := getVsockClient(d)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tdefer client.Disconnect()\n\n\tresp, _, err := client.RawQuery(\"GET\", \"\/1.0\/config\", nil, \"\")\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tvar config []string\n\n\terr = resp.MetadataAsStruct(&config)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tfiltered := []string{}\n\tfor _, k := range config {\n\t\tif strings.HasPrefix(k, \"\/1.0\/config\/user.\") || strings.HasPrefix(k, \"\/1.0\/config\/cloud-init.\") {\n\t\t\tfiltered = append(filtered, k)\n\t\t}\n\t}\n\treturn okResponse(filtered, \"json\")\n}}\n\nvar devlxdConfigKeyGet = devLxdHandler{\"\/1.0\/config\/{key}\", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devLxdResponse {\n\tkey, err := url.PathUnescape(mux.Vars(r)[\"key\"])\n\tif err != nil {\n\t\treturn &devLxdResponse{\"bad request\", http.StatusBadRequest, \"raw\"}\n\t}\n\n\tif !strings.HasPrefix(key, \"user.\") && !strings.HasPrefix(key, \"cloud-init.\") {\n\t\treturn &devLxdResponse{\"not authorized\", http.StatusForbidden, \"raw\"}\n\t}\n\n\tclient, err := getVsockClient(d)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tdefer client.Disconnect()\n\n\tresp, _, err := client.RawQuery(\"GET\", fmt.Sprintf(\"\/1.0\/config\/%s\", key), nil, \"\")\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tvar value string\n\n\terr = resp.MetadataAsStruct(&value)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\treturn okResponse(value, \"raw\")\n}}\n\nvar devlxdMetadataGet = devLxdHandler{\"\/1.0\/meta-data\", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devLxdResponse {\n\tclient, err := getVsockClient(d)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tdefer client.Disconnect()\n\n\tresp, _, err := client.RawQuery(\"GET\", \"\/1.0\/meta-data\", nil, \"\")\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tvar metaData string\n\n\terr = resp.MetadataAsStruct(&metaData)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\treturn okResponse(metaData, \"raw\")\n}}\n\nvar devLxdEventsGet = devLxdHandler{\"\/1.0\/events\", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devLxdResponse {\n\terr := eventsGet(d, r).Render(w)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\treturn okResponse(\"\", \"raw\")\n}}\n\nvar devlxdAPIGet = devLxdHandler{\"\/1.0\", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devLxdResponse {\n\tclient, err := getVsockClient(d)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tdefer client.Disconnect()\n\n\tresp, _, err := client.RawQuery(\"GET\", \"\/1.0\", nil, \"\")\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tvar instanceData agentAPI.DevLXDGet\n\n\terr = resp.MetadataAsStruct(&instanceData)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\treturn okResponse(instanceData, \"json\")\n}}\n\nvar devlxdDevicesGet = devLxdHandler{\"\/1.0\/devices\", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devLxdResponse {\n\tclient, err := getVsockClient(d)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tdefer client.Disconnect()\n\n\tresp, _, err := client.RawQuery(\"GET\", \"\/1.0\/devices\", nil, \"\")\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tvar devices config.Devices\n\n\terr = resp.MetadataAsStruct(&devices)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\treturn okResponse(devices, \"json\")\n}}\n\nvar handlers = []devLxdHandler{\n\t{\"\/\", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devLxdResponse {\n\t\treturn okResponse([]string{\"\/1.0\"}, \"json\")\n\t}},\n\tdevlxdAPIGet,\n\tdevlxdConfigGet,\n\tdevlxdConfigKeyGet,\n\tdevlxdMetadataGet,\n\tdevLxdEventsGet,\n\tdevlxdDevicesGet,\n}\n\nfunc hoistReq(f func(*Daemon, http.ResponseWriter, *http.Request) *devLxdResponse, d *Daemon) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tresp := f(d, w, r)\n\t\tif resp.code != http.StatusOK {\n\t\t\thttp.Error(w, fmt.Sprintf(\"%s\", resp.content), resp.code)\n\t\t} else if resp.ctype == \"json\" {\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\n\t\t\tvar debugLogger logger.Logger\n\t\t\tif daemon.Debug {\n\t\t\t\tdebugLogger = logger.Logger(logger.Log)\n\t\t\t}\n\n\t\t\t_ = util.WriteJSON(w, resp.content, debugLogger)\n\t\t} else if resp.ctype != \"websocket\" {\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/octet-stream\")\n\t\t\t_, _ = fmt.Fprint(w, resp.content.(string))\n\t\t}\n\t}\n}\n\nfunc devLxdAPI(d *Daemon) http.Handler {\n\tm := mux.NewRouter()\n\tm.UseEncodedPath() \/\/ Allow encoded values in path segments.\n\n\tfor _, handler := range handlers {\n\t\tm.HandleFunc(handler.path, hoistReq(handler.f, d))\n\t}\n\n\treturn m\n}\n\n\/\/ Create a new net.Listener bound to the unix socket of the devlxd endpoint.\nfunc createDevLxdlListener(dir string) (net.Listener, error) {\n\tpath := filepath.Join(dir, \"lxd\", \"sock\")\n\n\terr := os.MkdirAll(filepath.Dir(path), 0755)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If this socket exists, that means a previous LXD instance died and\n\t\/\/ didn't clean up. We assume that such LXD instance is actually dead\n\t\/\/ if we get this far, since localCreateListener() tries to connect to\n\t\/\/ the actual lxd socket to make sure that it is actually dead. So, it\n\t\/\/ is safe to remove it here without any checks.\n\t\/\/\n\t\/\/ Also, it would be nice to SO_REUSEADDR here so we don't have to\n\t\/\/ delete the socket, but we can't:\n\t\/\/ http:\/\/stackoverflow.com\/questions\/15716302\/so-reuseaddr-and-af-unix\n\t\/\/\n\t\/\/ Note that this will force clients to reconnect when LXD is restarted.\n\terr = socketUnixRemoveStale(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlistener, err := socketUnixListen(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = socketUnixSetPermissions(path, 0600)\n\tif err != nil {\n\t\t_ = listener.Close()\n\t\treturn nil, err\n\t}\n\n\treturn listener, nil\n}\n\n\/\/ Remove any stale socket file at the given path.\nfunc socketUnixRemoveStale(path string) error {\n\t\/\/ If there's no socket file at all, there's nothing to do.\n\tif !shared.PathExists(path) {\n\t\treturn nil\n\t}\n\n\tlogger.Debugf(\"Detected stale unix socket, deleting\")\n\terr := os.Remove(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not delete stale local socket: %w\", err)\n\t}\n\n\treturn nil\n}\n\n\/\/ Change the file mode of the given unix socket file.\nfunc socketUnixSetPermissions(path string, mode os.FileMode) error {\n\terr := os.Chmod(path, mode)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot set permissions on local socket: %w\", err)\n\t}\n\n\treturn nil\n}\n\n\/\/ Bind to the given unix socket path.\nfunc socketUnixListen(path string) (net.Listener, error) {\n\taddr, err := net.ResolveUnixAddr(\"unix\", path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot resolve socket address: %w\", err)\n\t}\n\n\tlistener, err := net.ListenUnix(\"unix\", addr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot bind socket: %w\", err)\n\t}\n\n\treturn listener, err\n}\n<commit_msg>lxd-agent: Handle PATCH \/1.0 in devlxd<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/gorilla\/mux\"\n\n\t\"github.com\/lxc\/lxd\/client\"\n\tagentAPI \"github.com\/lxc\/lxd\/lxd-agent\/api\"\n\t\"github.com\/lxc\/lxd\/lxd\/daemon\"\n\t\"github.com\/lxc\/lxd\/lxd\/device\/config\"\n\t\"github.com\/lxc\/lxd\/lxd\/util\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/logger\"\n)\n\n\/\/ DevLxdServer creates an http.Server capable of handling requests against the\n\/\/ \/dev\/lxd Unix socket endpoint created inside VMs.\nfunc devLxdServer(d *Daemon) *http.Server {\n\treturn &http.Server{\n\t\tHandler: devLxdAPI(d),\n\t}\n}\n\ntype devLxdResponse struct {\n\tcontent any\n\tcode int\n\tctype string\n}\n\nfunc okResponse(ct any, ctype string) *devLxdResponse {\n\treturn &devLxdResponse{ct, http.StatusOK, ctype}\n}\n\ntype devLxdHandler struct {\n\tpath string\n\n\t\/*\n\t * This API will have to be changed slightly when we decide to support\n\t * websocket events upgrading, but since we don't have events on the\n\t * server side right now either, I went the simple route to avoid\n\t * needless noise.\n\t *\/\n\tf func(d *Daemon, w http.ResponseWriter, r *http.Request) *devLxdResponse\n}\n\nfunc getVsockClient(d *Daemon) (lxd.InstanceServer, error) {\n\t\/\/ Try connecting to LXD server.\n\tclient, err := getClient(int(d.serverCID), int(d.serverPort), d.serverCertificate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserver, err := lxd.ConnectLXDHTTP(nil, client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn server, nil\n}\n\nvar devlxdConfigGet = devLxdHandler{\"\/1.0\/config\", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devLxdResponse {\n\tclient, err := getVsockClient(d)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tdefer client.Disconnect()\n\n\tresp, _, err := client.RawQuery(\"GET\", \"\/1.0\/config\", nil, \"\")\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tvar config []string\n\n\terr = resp.MetadataAsStruct(&config)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tfiltered := []string{}\n\tfor _, k := range config {\n\t\tif strings.HasPrefix(k, \"\/1.0\/config\/user.\") || strings.HasPrefix(k, \"\/1.0\/config\/cloud-init.\") {\n\t\t\tfiltered = append(filtered, k)\n\t\t}\n\t}\n\treturn okResponse(filtered, \"json\")\n}}\n\nvar devlxdConfigKeyGet = devLxdHandler{\"\/1.0\/config\/{key}\", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devLxdResponse {\n\tkey, err := url.PathUnescape(mux.Vars(r)[\"key\"])\n\tif err != nil {\n\t\treturn &devLxdResponse{\"bad request\", http.StatusBadRequest, \"raw\"}\n\t}\n\n\tif !strings.HasPrefix(key, \"user.\") && !strings.HasPrefix(key, \"cloud-init.\") {\n\t\treturn &devLxdResponse{\"not authorized\", http.StatusForbidden, \"raw\"}\n\t}\n\n\tclient, err := getVsockClient(d)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tdefer client.Disconnect()\n\n\tresp, _, err := client.RawQuery(\"GET\", fmt.Sprintf(\"\/1.0\/config\/%s\", key), nil, \"\")\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tvar value string\n\n\terr = resp.MetadataAsStruct(&value)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\treturn okResponse(value, \"raw\")\n}}\n\nvar devlxdMetadataGet = devLxdHandler{\"\/1.0\/meta-data\", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devLxdResponse {\n\tclient, err := getVsockClient(d)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tdefer client.Disconnect()\n\n\tresp, _, err := client.RawQuery(\"GET\", \"\/1.0\/meta-data\", nil, \"\")\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tvar metaData string\n\n\terr = resp.MetadataAsStruct(&metaData)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\treturn okResponse(metaData, \"raw\")\n}}\n\nvar devLxdEventsGet = devLxdHandler{\"\/1.0\/events\", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devLxdResponse {\n\terr := eventsGet(d, r).Render(w)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\treturn okResponse(\"\", \"raw\")\n}}\n\nvar devlxdAPIGet = devLxdHandler{\"\/1.0\", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devLxdResponse {\n\tclient, err := getVsockClient(d)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tdefer client.Disconnect()\n\n\tif r.Method == \"GET\" {\n\t\tresp, _, err := client.RawQuery(r.Method, \"\/1.0\", nil, \"\")\n\t\tif err != nil {\n\t\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t\t}\n\n\t\tvar instanceData agentAPI.DevLXDGet\n\n\t\terr = resp.MetadataAsStruct(&instanceData)\n\t\tif err != nil {\n\t\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t\t}\n\n\t\treturn okResponse(instanceData, \"json\")\n\t} else if r.Method == \"PATCH\" {\n\t\t_, _, err := client.RawQuery(r.Method, \"\/1.0\", r.Body, \"\")\n\t\tif err != nil {\n\t\t\treturn &devLxdResponse{err.Error(), http.StatusInternalServerError, \"raw\"}\n\t\t}\n\n\t\treturn okResponse(\"\", \"raw\")\n\t}\n\n\treturn &devLxdResponse{fmt.Sprintf(\"method %q not allowed\", r.Method), http.StatusBadRequest, \"raw\"}\n}}\n\nvar devlxdDevicesGet = devLxdHandler{\"\/1.0\/devices\", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devLxdResponse {\n\tclient, err := getVsockClient(d)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tdefer client.Disconnect()\n\n\tresp, _, err := client.RawQuery(\"GET\", \"\/1.0\/devices\", nil, \"\")\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\tvar devices config.Devices\n\n\terr = resp.MetadataAsStruct(&devices)\n\tif err != nil {\n\t\treturn &devLxdResponse{\"internal server error\", http.StatusInternalServerError, \"raw\"}\n\t}\n\n\treturn okResponse(devices, \"json\")\n}}\n\nvar handlers = []devLxdHandler{\n\t{\"\/\", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devLxdResponse {\n\t\treturn okResponse([]string{\"\/1.0\"}, \"json\")\n\t}},\n\tdevlxdAPIGet,\n\tdevlxdConfigGet,\n\tdevlxdConfigKeyGet,\n\tdevlxdMetadataGet,\n\tdevLxdEventsGet,\n\tdevlxdDevicesGet,\n}\n\nfunc hoistReq(f func(*Daemon, http.ResponseWriter, *http.Request) *devLxdResponse, d *Daemon) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tresp := f(d, w, r)\n\t\tif resp.code != http.StatusOK {\n\t\t\thttp.Error(w, fmt.Sprintf(\"%s\", resp.content), resp.code)\n\t\t} else if resp.ctype == \"json\" {\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\n\t\t\tvar debugLogger logger.Logger\n\t\t\tif daemon.Debug {\n\t\t\t\tdebugLogger = logger.Logger(logger.Log)\n\t\t\t}\n\n\t\t\t_ = util.WriteJSON(w, resp.content, debugLogger)\n\t\t} else if resp.ctype != \"websocket\" {\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/octet-stream\")\n\t\t\t_, _ = fmt.Fprint(w, resp.content.(string))\n\t\t}\n\t}\n}\n\nfunc devLxdAPI(d *Daemon) http.Handler {\n\tm := mux.NewRouter()\n\tm.UseEncodedPath() \/\/ Allow encoded values in path segments.\n\n\tfor _, handler := range handlers {\n\t\tm.HandleFunc(handler.path, hoistReq(handler.f, d))\n\t}\n\n\treturn m\n}\n\n\/\/ Create a new net.Listener bound to the unix socket of the devlxd endpoint.\nfunc createDevLxdlListener(dir string) (net.Listener, error) {\n\tpath := filepath.Join(dir, \"lxd\", \"sock\")\n\n\terr := os.MkdirAll(filepath.Dir(path), 0755)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If this socket exists, that means a previous LXD instance died and\n\t\/\/ didn't clean up. We assume that such LXD instance is actually dead\n\t\/\/ if we get this far, since localCreateListener() tries to connect to\n\t\/\/ the actual lxd socket to make sure that it is actually dead. So, it\n\t\/\/ is safe to remove it here without any checks.\n\t\/\/\n\t\/\/ Also, it would be nice to SO_REUSEADDR here so we don't have to\n\t\/\/ delete the socket, but we can't:\n\t\/\/ http:\/\/stackoverflow.com\/questions\/15716302\/so-reuseaddr-and-af-unix\n\t\/\/\n\t\/\/ Note that this will force clients to reconnect when LXD is restarted.\n\terr = socketUnixRemoveStale(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlistener, err := socketUnixListen(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = socketUnixSetPermissions(path, 0600)\n\tif err != nil {\n\t\t_ = listener.Close()\n\t\treturn nil, err\n\t}\n\n\treturn listener, nil\n}\n\n\/\/ Remove any stale socket file at the given path.\nfunc socketUnixRemoveStale(path string) error {\n\t\/\/ If there's no socket file at all, there's nothing to do.\n\tif !shared.PathExists(path) {\n\t\treturn nil\n\t}\n\n\tlogger.Debugf(\"Detected stale unix socket, deleting\")\n\terr := os.Remove(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not delete stale local socket: %w\", err)\n\t}\n\n\treturn nil\n}\n\n\/\/ Change the file mode of the given unix socket file.\nfunc socketUnixSetPermissions(path string, mode os.FileMode) error {\n\terr := os.Chmod(path, mode)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot set permissions on local socket: %w\", err)\n\t}\n\n\treturn nil\n}\n\n\/\/ Bind to the given unix socket path.\nfunc socketUnixListen(path string) (net.Listener, error) {\n\taddr, err := net.ResolveUnixAddr(\"unix\", path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot resolve socket address: %w\", err)\n\t}\n\n\tlistener, err := net.ListenUnix(\"unix\", addr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot bind socket: %w\", err)\n\t}\n\n\treturn listener, err\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage model\n\nimport (\n\t\"fmt\"\n\n\t\"k8s.io\/kops\/nodeup\/pkg\/distros\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\/nodeup\/nodetasks\"\n\n\t\"github.com\/golang\/glog\"\n)\n\n\/\/ KubectlBuilder install kubectl\ntype KubectlBuilder struct {\n\t*NodeupModelContext\n}\n\nvar _ fi.ModelBuilder = &KubectlBuilder{}\n\n\/\/ Build is responsible for mananging the kubectl on the nodes\nfunc (b *KubectlBuilder) Build(c *fi.ModelBuilderContext) error {\n\tif !b.IsMaster {\n\t\treturn nil\n\t}\n\n\t{\n\t\t\/\/ TODO: Extract to common function?\n\t\tassetName := \"kubectl\"\n\t\tassetPath := \"\"\n\t\tasset, err := b.Assets.Find(assetName, assetPath)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error trying to locate asset %q: %v\", assetName, err)\n\t\t}\n\t\tif asset == nil {\n\t\t\treturn fmt.Errorf(\"unable to locate asset %q\", assetName)\n\t\t}\n\n\t\tt := &nodetasks.File{\n\t\t\tPath: b.KubectlPath() + \"\/\" + assetName,\n\t\t\tContents: asset,\n\t\t\tType: nodetasks.FileType_File,\n\t\t\tMode: s(\"0755\"),\n\t\t}\n\t\tc.AddTask(t)\n\t}\n\n\t{\n\t\tkubeconfig, err := b.buildPKIKubeconfig(\"kubecfg\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tt := &nodetasks.File{\n\t\t\tPath: \"\/var\/lib\/kubectl\/kubeconfig\",\n\t\t\tContents: fi.NewStringResource(kubeconfig),\n\t\t\tType: nodetasks.FileType_File,\n\t\t\tMode: s(\"0400\"),\n\t\t}\n\t\tc.AddTask(t)\n\n\t\tswitch b.Distribution {\n\t\tcase distros.DistributionJessie, distros.DistributionDebian9:\n\t\t\tc.AddTask(&nodetasks.File{\n\t\t\t\tPath: \"\/home\/admin\/.kube\/\",\n\t\t\t\tType: nodetasks.FileType_Directory,\n\t\t\t\tMode: s(\"0700\"),\n\t\t\t\tOwner: s(\"admin\"),\n\t\t\t\tGroup: s(\"admin\"),\n\t\t\t})\n\n\t\t\tc.AddTask(&nodetasks.File{\n\t\t\t\tPath: \"\/home\/admin\/.kube\/config\",\n\t\t\t\tContents: fi.NewStringResource(kubeconfig),\n\t\t\t\tType: nodetasks.FileType_File,\n\t\t\t\tMode: s(\"0400\"),\n\t\t\t\tOwner: s(\"admin\"),\n\t\t\t\tGroup: s(\"admin\"),\n\t\t\t})\n\n\t\tdefault:\n\t\t\tglog.Warningf(\"Unknown distro; won't write kubeconfig to homedir %s\", b.Distribution)\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>fix typo in comment: mananging<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage model\n\nimport (\n\t\"fmt\"\n\n\t\"k8s.io\/kops\/nodeup\/pkg\/distros\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\/nodeup\/nodetasks\"\n\n\t\"github.com\/golang\/glog\"\n)\n\n\/\/ KubectlBuilder install kubectl\ntype KubectlBuilder struct {\n\t*NodeupModelContext\n}\n\nvar _ fi.ModelBuilder = &KubectlBuilder{}\n\n\/\/ Build is responsible for managing the kubectl on the nodes\nfunc (b *KubectlBuilder) Build(c *fi.ModelBuilderContext) error {\n\tif !b.IsMaster {\n\t\treturn nil\n\t}\n\n\t{\n\t\t\/\/ TODO: Extract to common function?\n\t\tassetName := \"kubectl\"\n\t\tassetPath := \"\"\n\t\tasset, err := b.Assets.Find(assetName, assetPath)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error trying to locate asset %q: %v\", assetName, err)\n\t\t}\n\t\tif asset == nil {\n\t\t\treturn fmt.Errorf(\"unable to locate asset %q\", assetName)\n\t\t}\n\n\t\tt := &nodetasks.File{\n\t\t\tPath: b.KubectlPath() + \"\/\" + assetName,\n\t\t\tContents: asset,\n\t\t\tType: nodetasks.FileType_File,\n\t\t\tMode: s(\"0755\"),\n\t\t}\n\t\tc.AddTask(t)\n\t}\n\n\t{\n\t\tkubeconfig, err := b.buildPKIKubeconfig(\"kubecfg\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tt := &nodetasks.File{\n\t\t\tPath: \"\/var\/lib\/kubectl\/kubeconfig\",\n\t\t\tContents: fi.NewStringResource(kubeconfig),\n\t\t\tType: nodetasks.FileType_File,\n\t\t\tMode: s(\"0400\"),\n\t\t}\n\t\tc.AddTask(t)\n\n\t\tswitch b.Distribution {\n\t\tcase distros.DistributionJessie, distros.DistributionDebian9:\n\t\t\tc.AddTask(&nodetasks.File{\n\t\t\t\tPath: \"\/home\/admin\/.kube\/\",\n\t\t\t\tType: nodetasks.FileType_Directory,\n\t\t\t\tMode: s(\"0700\"),\n\t\t\t\tOwner: s(\"admin\"),\n\t\t\t\tGroup: s(\"admin\"),\n\t\t\t})\n\n\t\t\tc.AddTask(&nodetasks.File{\n\t\t\t\tPath: \"\/home\/admin\/.kube\/config\",\n\t\t\t\tContents: fi.NewStringResource(kubeconfig),\n\t\t\t\tType: nodetasks.FileType_File,\n\t\t\t\tMode: s(\"0400\"),\n\t\t\t\tOwner: s(\"admin\"),\n\t\t\t\tGroup: s(\"admin\"),\n\t\t\t})\n\n\t\tdefault:\n\t\t\tglog.Warningf(\"Unknown distro; won't write kubeconfig to homedir %s\", b.Distribution)\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package certinel\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/miekg\/dns\"\n)\n\nvar (\n\tErrNoPeerCertificate = errors.New(\"peer did not present certificate for domain\")\n\tErrExpired = errors.New(\"certificate expired\")\n\tErrNotYetValid = errors.New(\"certificate not yet valid\")\n\tErrInvalidHostname = errors.New(\"invalid hostname\")\n\tErrNoCertificate = errors.New(\"certificate serial not found\")\n)\n\ntype Domain struct {\n\tDomain string `json:\"domain\"`\n\tPort string `json:\"port\"`\n\tcert *x509.Certificate\n}\n\ntype Status struct {\n\tDuration int64 `json:\"check_duration\"`\n\tValid bool `json:\"valid\"`\n\tErr string `json:\"error\"`\n\tTime time.Time `json:\"last_check\"`\n}\n\ntype Subject struct {\n\tCommonName string `json:\"cn\"`\n\tCountry []string `json:\"c\"`\n\tOrganization []string `json:\"o\"`\n\tOrganizationalUnit []string `json:\"ou\"`\n}\n\ntype Certificate struct {\n\tNotBefore time.Time `json:\"not_before\"`\n\tNotAfter time.Time `json:\"not_after\"`\n\tIssuer Subject `json:\"issuer\"`\n\tSubject Subject `json:\"subject\"`\n\tSerialNumber string `json:\"serial\"`\n\tAlternateNames []string `json:\"alternate_names\"`\n}\n\nfunc convertCert(cert *x509.Certificate) *Certificate {\n\tresult := &Certificate{\n\t\tNotBefore: cert.NotBefore.UTC(),\n\t\tNotAfter: cert.NotAfter.UTC(),\n\t\tSerialNumber: cert.SerialNumber.String(),\n\t\tAlternateNames: cert.DNSNames,\n\t}\n\n\tresult.Subject = Subject{\n\t\tCommonName: cert.Subject.CommonName,\n\t\tCountry: cert.Subject.Country,\n\t\tOrganization: cert.Subject.Organization,\n\t\tOrganizationalUnit: cert.Subject.OrganizationalUnit,\n\t}\n\n\tresult.Issuer = Subject{\n\t\tCommonName: cert.Issuer.CommonName,\n\t\tCountry: cert.Issuer.Country,\n\t\tOrganization: cert.Issuer.Organization,\n\t\tOrganizationalUnit: cert.Issuer.OrganizationalUnit,\n\t}\n\n\treturn result\n}\n\n\/\/ reverse a hostname (example.com => com.example.). This will provide a better\n\/\/ form for sorting (www.example.com and api.example.com will be close together\n\/\/ when reversed)\nfunc ReverseHost(hostname string) (string, error) {\n\tif _, ok := dns.IsDomainName(hostname); ok {\n\t\tlabels := dns.SplitDomainName(hostname)\n\n\t\tfor i, j := 0, len(labels)-1; i < j; i, j = i+1, j-1 {\n\t\t\tlabels[i], labels[j] = labels[j], labels[i]\n\t\t}\n\n\t\treturn strings.Join(labels, \".\"), nil\n\t} else {\n\t\treturn \"\", ErrInvalidHostname\n\t}\n\n\treturn \"\", ErrInvalidHostname\n}\n\nfunc (d *Domain) GetCertificate() (*x509.Certificate, error) {\n\tconn, err := tls.Dial(\"tcp\", d.Domain+\":\"+d.Port, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := conn.Handshake(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tstate := conn.ConnectionState()\n\tfor _, cert := range state.PeerCertificates {\n\t\tif ok := cert.VerifyHostname(d.Domain); ok == nil {\n\t\t\treturn cert, nil\n\t\t}\n\t}\n\n\treturn nil, ErrNoPeerCertificate\n}\n\nfunc (d *Domain) Check() error {\n\tcert, err := d.GetCertificate()\n\tif err != nil {\n\t\treturn nil\n\t}\n\td.cert = cert\n\n\tnow := time.Now().UTC()\n\tif !now.Before(cert.NotAfter) {\n\t\treturn ErrExpired\n\t}\n\n\tif !now.After(cert.NotBefore) {\n\t\treturn ErrNotYetValid\n\t}\n\n\treturn nil\n}\n\nfunc (d *Domain) Store(status *Status) error {\n\trhost, err := ReverseHost(d.Domain)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstore := GetStore()\n\tbucket := []string{\"domains\", rhost + \":\" + d.Port}\n\tif err := store.Create(bucket); err != nil {\n\t\treturn err\n\t}\n\n\tif d.cert != nil {\n\t\tid := d.cert.NotBefore.Format(time.RFC3339)\n\t\tdata := base64.StdEncoding.EncodeToString(d.cert.Raw)\n\t\tif err := store.Set(bucket, \"cert~\"+d.cert.SerialNumber.String(), data); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := store.Set(bucket, \"history~\"+id, d.cert.SerialNumber.String()); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := store.Set(bucket, \"history~~\", \"-- LIST STOP --\"); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := store.Set(bucket, \"current\", d.cert.SerialNumber.String()); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif status != nil {\n\t\t\tdata, err := json.Marshal(status)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := store.Set(bucket, \"status\", string(data)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (d *Domain) Status() (*Status, error) {\n\trhost, err := ReverseHost(d.Domain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstore := GetStore()\n\tbucket := []string{\"domains\", rhost + \":\" + d.Port}\n\tvalue, err := store.Get(bucket, \"status\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := &Status{}\n\tif err := json.Unmarshal([]byte(value), data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn data, nil\n}\n\nfunc (d *Domain) Delete() error {\n\trhost, err := ReverseHost(d.Domain)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstore := GetStore()\n\treturn store.Remove([]string{\"domains\"}, rhost+\":\"+d.Port)\n}\n\nfunc (d *Domain) CertList() (string, []string, error) {\n\trhost, err := ReverseHost(d.Domain)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\tstore := GetStore()\n\tbucket := []string{\"domains\", rhost + \":\" + d.Port}\n\tcurrent, err := store.Get(bucket, \"current\")\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\this := store.Scan(bucket, \"history~\", true, 51)\n\thistory := make([]string, 0)\n\tfor kv := range his {\n\t\tif len(kv.Value) > 0 {\n\t\t\thistory = append(history, kv.Value)\n\t\t}\n\t}\n\n\treturn current, history[1:], nil\n}\n\nfunc (d *Domain) LoadCertificate(serial string) (*Certificate, error) {\n\trhost, err := ReverseHost(d.Domain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstore := GetStore()\n\tbucket := []string{\"domains\", rhost + \":\" + d.Port}\n\traw, err := store.Get(bucket, \"cert~\"+serial)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(raw) == 0 {\n\t\treturn nil, ErrNoCertificate\n\t}\n\n\tdata, err := base64.StdEncoding.DecodeString(raw)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcert, err := x509.ParseCertificate(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn convertCert(cert), nil\n}\n\nfunc CheckDomain(domain, port string) {\n\tticker := time.NewTicker(time.Minute * 5)\n\tlog.Printf(\"starting domain checker for \\\"%s:%s\\\"\\n\", domain, port)\n\n\tfor {\n\t\td := &Domain{\n\t\t\tDomain: domain,\n\t\t\tPort: port,\n\t\t}\n\n\t\tstart := time.Now()\n\t\tstatus := &Status{Time: start}\n\t\terr := d.Check()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"checking domain \\\"%s:%s\\\": %s\\n\", domain, port, err.Error())\n\t\t\tstatus.Valid = false\n\t\t\tstatus.Err = err.Error()\n\t\t} else {\n\t\t\tnow := time.Now().UTC().Unix()\n\t\t\tvalidity := int((d.cert.NotAfter.Unix() - now) \/ 86400)\n\t\t\tlog.Printf(\"checking domain \\\"%s:%s\\\": certificate is valid for %d days\", domain, port, validity)\n\t\t\tstatus.Valid = true\n\t\t}\n\t\tstatus.Duration = int64(time.Since(start) \/ time.Millisecond)\n\n\t\t\/\/ store latest check and certificate\n\t\td.Store(status)\n\n\t\t\/\/ wait for 5 minutes\n\t\t<-ticker.C\n\t}\n}\n\nfunc GetDomains() []*Domain {\n\tresult := make([]*Domain, 0)\n\tstore := GetStore()\n\tfor kv := range store.Scan([]string{\"domains\"}, \"\", false, 0) {\n\t\tsplitter := strings.Split(kv.Key, \":\")\n\t\tif len(splitter) != 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tdomain, err := ReverseHost(splitter[0])\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tresult = append(result, &Domain{Domain: domain, Port: splitter[1]})\n\t}\n\n\treturn result\n}\n\nfunc StartDomainChecker() {\n\tfor _, d := range GetDomains() {\n\t\tgo CheckDomain(d.Domain, d.Port)\n\t}\n}\n<commit_msg>more information in the certificate<commit_after>package certinel\n\nimport (\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"encoding\/base64\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/miekg\/dns\"\n)\n\nvar (\n\tErrNoPeerCertificate = errors.New(\"peer did not present certificate for domain\")\n\tErrExpired = errors.New(\"certificate expired\")\n\tErrNotYetValid = errors.New(\"certificate not yet valid\")\n\tErrInvalidHostname = errors.New(\"invalid hostname\")\n\tErrNoCertificate = errors.New(\"certificate serial not found\")\n)\n\ntype Domain struct {\n\tDomain string `json:\"domain\"`\n\tPort string `json:\"port\"`\n\tcert *x509.Certificate\n}\n\ntype Status struct {\n\tDuration int64 `json:\"check_duration\"`\n\tValid bool `json:\"valid\"`\n\tErr string `json:\"error\"`\n\tTime time.Time `json:\"last_check\"`\n}\n\ntype Subject struct {\n\tCommonName string `json:\"cn\"`\n\tCountry []string `json:\"c,omitempty\"`\n\tOrganization []string `json:\"o,omitempty\"`\n\tOrganizationalUnit []string `json:\"ou,omitempty\"`\n}\n\ntype Signature struct {\n\tAlgorithm int `json:\"algorithm\"`\n\tValue string `json:\"value\"`\n}\n\ntype Certificate struct {\n\tNotBefore time.Time `json:\"not_before\"`\n\tNotAfter time.Time `json:\"not_after\"`\n\tIssuer Subject `json:\"issuer\"`\n\tSubject Subject `json:\"subject\"`\n\tSerialNumber string `json:\"serial\"`\n\tAlternateNames []string `json:\"alternate_names,omitempty\"`\n\tSignature Signature `json:\"signature\"`\n\tFingerprints map[string]string `json:\"fingerprints\"`\n}\n\nfunc toHexString(data []byte) string {\n\tresult := make([]string, len(data))\n\tfor i := 0; i < len(data); i += 1 {\n\t\tresult[i] = hex.EncodeToString(data[i : i+1])\n\t}\n\treturn strings.Join(result, \":\")\n}\n\nfunc convertCert(cert *x509.Certificate) *Certificate {\n\tresult := &Certificate{\n\t\tNotBefore: cert.NotBefore.UTC(),\n\t\tNotAfter: cert.NotAfter.UTC(),\n\t\tSerialNumber: toHexString(cert.SerialNumber.Bytes()),\n\t\tAlternateNames: cert.DNSNames,\n\t\tFingerprints: make(map[string]string),\n\t}\n\n\tresult.Signature = Signature{\n\t\tValue: toHexString(cert.Signature),\n\t\tAlgorithm: int(cert.SignatureAlgorithm),\n\t}\n\n\tresult.Subject = Subject{\n\t\tCommonName: cert.Subject.CommonName,\n\t\tCountry: cert.Subject.Country,\n\t\tOrganization: cert.Subject.Organization,\n\t\tOrganizationalUnit: cert.Subject.OrganizationalUnit,\n\t}\n\n\tresult.Issuer = Subject{\n\t\tCommonName: cert.Issuer.CommonName,\n\t\tCountry: cert.Issuer.Country,\n\t\tOrganization: cert.Issuer.Organization,\n\t\tOrganizationalUnit: cert.Issuer.OrganizationalUnit,\n\t}\n\n\ts256 := sha256.New()\n\ts256.Write(cert.Raw)\n\tresult.Fingerprints[\"sha256\"] = toHexString(s256.Sum(nil))\n\n\ts1 := sha1.New()\n\ts1.Write(cert.Raw)\n\tresult.Fingerprints[\"sha1\"] = toHexString(s1.Sum(nil))\n\n\treturn result\n}\n\n\/\/ reverse a hostname (example.com => com.example.). This will provide a better\n\/\/ form for sorting (www.example.com and api.example.com will be close together\n\/\/ when reversed)\nfunc ReverseHost(hostname string) (string, error) {\n\tif _, ok := dns.IsDomainName(hostname); ok {\n\t\tlabels := dns.SplitDomainName(hostname)\n\n\t\tfor i, j := 0, len(labels)-1; i < j; i, j = i+1, j-1 {\n\t\t\tlabels[i], labels[j] = labels[j], labels[i]\n\t\t}\n\n\t\treturn strings.Join(labels, \".\"), nil\n\t} else {\n\t\treturn \"\", ErrInvalidHostname\n\t}\n\n\treturn \"\", ErrInvalidHostname\n}\n\nfunc (d *Domain) GetCertificate() (*x509.Certificate, error) {\n\tconn, err := tls.Dial(\"tcp\", d.Domain+\":\"+d.Port, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := conn.Handshake(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tstate := conn.ConnectionState()\n\tfor _, cert := range state.PeerCertificates {\n\t\tif ok := cert.VerifyHostname(d.Domain); ok == nil {\n\t\t\treturn cert, nil\n\t\t}\n\t}\n\n\treturn nil, ErrNoPeerCertificate\n}\n\nfunc (d *Domain) Check() error {\n\tcert, err := d.GetCertificate()\n\tif err != nil {\n\t\treturn nil\n\t}\n\td.cert = cert\n\n\tnow := time.Now().UTC()\n\tif !now.Before(cert.NotAfter) {\n\t\treturn ErrExpired\n\t}\n\n\tif !now.After(cert.NotBefore) {\n\t\treturn ErrNotYetValid\n\t}\n\n\treturn nil\n}\n\nfunc (d *Domain) Store(status *Status) error {\n\trhost, err := ReverseHost(d.Domain)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstore := GetStore()\n\tbucket := []string{\"domains\", rhost + \":\" + d.Port}\n\tif err := store.Create(bucket); err != nil {\n\t\treturn err\n\t}\n\n\tif d.cert != nil {\n\t\tid := d.cert.NotBefore.Format(time.RFC3339)\n\t\tdata := base64.StdEncoding.EncodeToString(d.cert.Raw)\n\t\tif err := store.Set(bucket, \"cert~\"+d.cert.SerialNumber.String(), data); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := store.Set(bucket, \"history~\"+id, d.cert.SerialNumber.String()); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := store.Set(bucket, \"history~~\", \"-- LIST STOP --\"); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := store.Set(bucket, \"current\", d.cert.SerialNumber.String()); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif status != nil {\n\t\t\tdata, err := json.Marshal(status)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := store.Set(bucket, \"status\", string(data)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (d *Domain) Status() (*Status, error) {\n\trhost, err := ReverseHost(d.Domain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstore := GetStore()\n\tbucket := []string{\"domains\", rhost + \":\" + d.Port}\n\tvalue, err := store.Get(bucket, \"status\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := &Status{}\n\tif err := json.Unmarshal([]byte(value), data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn data, nil\n}\n\nfunc (d *Domain) Delete() error {\n\trhost, err := ReverseHost(d.Domain)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstore := GetStore()\n\treturn store.Remove([]string{\"domains\"}, rhost+\":\"+d.Port)\n}\n\nfunc (d *Domain) CertList() (string, []string, error) {\n\trhost, err := ReverseHost(d.Domain)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\tstore := GetStore()\n\tbucket := []string{\"domains\", rhost + \":\" + d.Port}\n\tcurrent, err := store.Get(bucket, \"current\")\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\this := store.Scan(bucket, \"history~\", true, 51)\n\thistory := make([]string, 0)\n\tfor kv := range his {\n\t\tif len(kv.Value) > 0 {\n\t\t\thistory = append(history, kv.Value)\n\t\t}\n\t}\n\n\treturn current, history[1:], nil\n}\n\nfunc (d *Domain) LoadCertificate(serial string) (*Certificate, error) {\n\trhost, err := ReverseHost(d.Domain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstore := GetStore()\n\tbucket := []string{\"domains\", rhost + \":\" + d.Port}\n\traw, err := store.Get(bucket, \"cert~\"+serial)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(raw) == 0 {\n\t\treturn nil, ErrNoCertificate\n\t}\n\n\tdata, err := base64.StdEncoding.DecodeString(raw)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcert, err := x509.ParseCertificate(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn convertCert(cert), nil\n}\n\nfunc CheckDomain(domain, port string) {\n\tticker := time.NewTicker(time.Minute * 5)\n\tlog.Printf(\"starting domain checker for \\\"%s:%s\\\"\\n\", domain, port)\n\n\tfor {\n\t\td := &Domain{\n\t\t\tDomain: domain,\n\t\t\tPort: port,\n\t\t}\n\n\t\tstart := time.Now()\n\t\tstatus := &Status{Time: start}\n\t\terr := d.Check()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"checking domain \\\"%s:%s\\\": %s\\n\", domain, port, err.Error())\n\t\t\tstatus.Valid = false\n\t\t\tstatus.Err = err.Error()\n\t\t} else {\n\t\t\tnow := time.Now().UTC().Unix()\n\t\t\tvalidity := int((d.cert.NotAfter.Unix() - now) \/ 86400)\n\t\t\tlog.Printf(\"checking domain \\\"%s:%s\\\": certificate is valid for %d days\", domain, port, validity)\n\t\t\tstatus.Valid = true\n\t\t}\n\t\tstatus.Duration = int64(time.Since(start) \/ time.Millisecond)\n\n\t\t\/\/ store latest check and certificate\n\t\td.Store(status)\n\n\t\t\/\/ wait for 5 minutes\n\t\t<-ticker.C\n\t}\n}\n\nfunc GetDomains() []*Domain {\n\tresult := make([]*Domain, 0)\n\tstore := GetStore()\n\tfor kv := range store.Scan([]string{\"domains\"}, \"\", false, 0) {\n\t\tsplitter := strings.Split(kv.Key, \":\")\n\t\tif len(splitter) != 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tdomain, err := ReverseHost(splitter[0])\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tresult = append(result, &Domain{Domain: domain, Port: splitter[1]})\n\t}\n\n\treturn result\n}\n\nfunc StartDomainChecker() {\n\tfor _, d := range GetDomains() {\n\t\tgo CheckDomain(d.Domain, d.Port)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package pluginaction_test\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t. \"code.cloudfoundry.org\/cli\/actor\/pluginaction\"\n\t\"code.cloudfoundry.org\/cli\/actor\/pluginaction\/pluginactionfakes\"\n\t\"code.cloudfoundry.org\/cli\/util\/configv3\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"UninstallPlugin\", func() {\n\tvar (\n\t\tactor Actor\n\t\tbinaryPath string\n\t\tfakeConfig *pluginactionfakes.FakeConfig\n\t\tfakePluginUninstaller *pluginactionfakes.FakePluginUninstaller\n\t\tpluginHome string\n\t\terr error\n\t)\n\n\tBeforeEach(func() {\n\t\tpluginHome, err = ioutil.TempDir(\"\", \"\")\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\tbinaryPath = filepath.Join(pluginHome, \"banana-faceman\")\n\t\terr = ioutil.WriteFile(binaryPath, nil, 0600)\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\tfakePluginUninstaller = new(pluginactionfakes.FakePluginUninstaller)\n\t\tfakeConfig = new(pluginactionfakes.FakeConfig)\n\t\tactor = NewActor(fakeConfig, nil)\n\n\t})\n\n\tAfterEach(func() {\n\t\tos.RemoveAll(pluginHome)\n\t})\n\n\tContext(\"when the plugin does not exist\", func() {\n\t\tBeforeEach(func() {\n\t\t\tfakeConfig.GetPluginReturns(configv3.Plugin{}, false)\n\t\t})\n\n\t\tIt(\"returns a PluginNotFoundError\", func() {\n\t\t\terr := actor.UninstallPlugin(fakePluginUninstaller, \"some-non-existent-plugin\")\n\t\t\tExpect(err).To(MatchError(PluginNotFoundError{Name: \"some-non-existent-plugin\"}))\n\t\t})\n\t})\n\n\tContext(\"when the plugin exists\", func() {\n\t\tBeforeEach(func() {\n\t\t\tfakeConfig.GetPluginReturns(configv3.Plugin{\n\t\t\t\tName: \"some-plugin\",\n\t\t\t\tLocation: binaryPath,\n\t\t\t}, true)\n\t\t})\n\n\t\tContext(\"when no errors are encountered\", func() {\n\t\t\tIt(\"runs the plugin cleanup, deletes the binary and removes the plugin config\", func() {\n\t\t\t\terr := actor.UninstallPlugin(fakePluginUninstaller, \"some-plugin\")\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\tExpect(fakeConfig.GetPluginCallCount()).To(Equal(1))\n\t\t\t\tExpect(fakeConfig.GetPluginArgsForCall(0)).To(Equal(\"some-plugin\"))\n\n\t\t\t\tExpect(fakePluginUninstaller.UninstallCallCount()).To(Equal(1))\n\t\t\t\tExpect(fakePluginUninstaller.UninstallArgsForCall(0)).To(Equal(binaryPath))\n\n\t\t\t\t_, err = os.Stat(binaryPath)\n\t\t\t\tExpect(os.IsNotExist(err)).To(BeTrue())\n\n\t\t\t\tExpect(fakeConfig.RemovePluginCallCount()).To(Equal(1))\n\t\t\t\tExpect(fakeConfig.RemovePluginArgsForCall(0)).To(Equal(\"some-plugin\"))\n\n\t\t\t\tExpect(fakeConfig.WritePluginConfigCallCount()).To(Equal(1))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the plugin uninstaller returns an error\", func() {\n\t\t\tvar expectedErr error\n\n\t\t\tBeforeEach(func() {\n\t\t\t\texpectedErr = errors.New(\"some error\")\n\t\t\t\tfakePluginUninstaller.UninstallReturns(expectedErr)\n\t\t\t})\n\n\t\t\tIt(\"returns the error and does not delete the binary or remove the plugin config\", func() {\n\t\t\t\terr := actor.UninstallPlugin(fakePluginUninstaller, \"some-plugin\")\n\t\t\t\tExpect(err).To(MatchError(expectedErr))\n\n\t\t\t\t_, err = os.Stat(binaryPath)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\tExpect(fakeConfig.RemovePluginCallCount()).To(Equal(0))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when deleting the plugin binary returns a 'file does not exist' error\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\terr = os.Remove(binaryPath)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t})\n\n\t\t\tIt(\"does not return the error and removes the plugin config\", func() {\n\t\t\t\terr := actor.UninstallPlugin(fakePluginUninstaller, \"some-plugin\")\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\tExpect(fakeConfig.RemovePluginCallCount()).To(Equal(1))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when deleting the plugin binary returns any other error\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\terr = os.Remove(binaryPath)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\terr = os.Mkdir(binaryPath, 0700)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\terr = ioutil.WriteFile(filepath.Join(binaryPath, \"foooooo\"), nil, 0500)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t})\n\n\t\t\tIt(\"does not return the error and removes the plugin config\", func() {\n\t\t\t\terr := actor.UninstallPlugin(fakePluginUninstaller, \"some-plugin\")\n\t\t\t\t_, ok := err.(*os.PathError)\n\t\t\t\tExpect(ok).To(BeTrue())\n\n\t\t\t\tExpect(fakeConfig.RemovePluginCallCount()).To(Equal(0))\n\t\t\t\tExpect(fakeConfig.WritePluginConfigCallCount()).To(Equal(0))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when writing the config returns an error\", func() {\n\t\t\tvar expectedErr error\n\n\t\t\tBeforeEach(func() {\n\t\t\t\texpectedErr = errors.New(\"some plugin config write error\")\n\n\t\t\t\tfakeConfig.WritePluginConfigReturns(expectedErr)\n\t\t\t})\n\n\t\t\tIt(\"returns the error\", func() {\n\t\t\t\terr := actor.UninstallPlugin(fakePluginUninstaller, \"some-plugin\")\n\t\t\t\tExpect(err).To(MatchError(expectedErr))\n\t\t\t})\n\t\t})\n\t})\n})\n<commit_msg>add another level of nesting for plugin actor test<commit_after>package pluginaction_test\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t. \"code.cloudfoundry.org\/cli\/actor\/pluginaction\"\n\t\"code.cloudfoundry.org\/cli\/actor\/pluginaction\/pluginactionfakes\"\n\t\"code.cloudfoundry.org\/cli\/util\/configv3\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Plugin actor\", func() {\n\tvar (\n\t\tactor Actor\n\t\tfakeConfig *pluginactionfakes.FakeConfig\n\t)\n\n\tBeforeEach(func() {\n\t\tfakeConfig = new(pluginactionfakes.FakeConfig)\n\t\tactor = NewActor(fakeConfig, nil)\n\t})\n\n\tDescribe(\"UninstallPlugin\", func() {\n\t\tvar (\n\t\t\tbinaryPath string\n\t\t\tfakePluginUninstaller *pluginactionfakes.FakePluginUninstaller\n\t\t\tpluginHome string\n\t\t\terr error\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tpluginHome, err = ioutil.TempDir(\"\", \"\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tbinaryPath = filepath.Join(pluginHome, \"banana-faceman\")\n\t\t\terr = ioutil.WriteFile(binaryPath, nil, 0600)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\tfakePluginUninstaller = new(pluginactionfakes.FakePluginUninstaller)\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tos.RemoveAll(pluginHome)\n\t\t})\n\n\t\tContext(\"when the plugin does not exist\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tfakeConfig.GetPluginReturns(configv3.Plugin{}, false)\n\t\t\t})\n\n\t\t\tIt(\"returns a PluginNotFoundError\", func() {\n\t\t\t\terr := actor.UninstallPlugin(fakePluginUninstaller, \"some-non-existent-plugin\")\n\t\t\t\tExpect(err).To(MatchError(PluginNotFoundError{Name: \"some-non-existent-plugin\"}))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the plugin exists\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tfakeConfig.GetPluginReturns(configv3.Plugin{\n\t\t\t\t\tName: \"some-plugin\",\n\t\t\t\t\tLocation: binaryPath,\n\t\t\t\t}, true)\n\t\t\t})\n\n\t\t\tContext(\"when no errors are encountered\", func() {\n\t\t\t\tIt(\"runs the plugin cleanup, deletes the binary and removes the plugin config\", func() {\n\t\t\t\t\terr := actor.UninstallPlugin(fakePluginUninstaller, \"some-plugin\")\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\t\tExpect(fakeConfig.GetPluginCallCount()).To(Equal(1))\n\t\t\t\t\tExpect(fakeConfig.GetPluginArgsForCall(0)).To(Equal(\"some-plugin\"))\n\n\t\t\t\t\tExpect(fakePluginUninstaller.UninstallCallCount()).To(Equal(1))\n\t\t\t\t\tExpect(fakePluginUninstaller.UninstallArgsForCall(0)).To(Equal(binaryPath))\n\n\t\t\t\t\t_, err = os.Stat(binaryPath)\n\t\t\t\t\tExpect(os.IsNotExist(err)).To(BeTrue())\n\n\t\t\t\t\tExpect(fakeConfig.RemovePluginCallCount()).To(Equal(1))\n\t\t\t\t\tExpect(fakeConfig.RemovePluginArgsForCall(0)).To(Equal(\"some-plugin\"))\n\n\t\t\t\t\tExpect(fakeConfig.WritePluginConfigCallCount()).To(Equal(1))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when the plugin uninstaller returns an error\", func() {\n\t\t\t\tvar expectedErr error\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\texpectedErr = errors.New(\"some error\")\n\t\t\t\t\tfakePluginUninstaller.UninstallReturns(expectedErr)\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns the error and does not delete the binary or remove the plugin config\", func() {\n\t\t\t\t\terr := actor.UninstallPlugin(fakePluginUninstaller, \"some-plugin\")\n\t\t\t\t\tExpect(err).To(MatchError(expectedErr))\n\n\t\t\t\t\t_, err = os.Stat(binaryPath)\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\t\tExpect(fakeConfig.RemovePluginCallCount()).To(Equal(0))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when deleting the plugin binary returns a 'file does not exist' error\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\terr = os.Remove(binaryPath)\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t})\n\n\t\t\t\tIt(\"does not return the error and removes the plugin config\", func() {\n\t\t\t\t\terr := actor.UninstallPlugin(fakePluginUninstaller, \"some-plugin\")\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\t\tExpect(fakeConfig.RemovePluginCallCount()).To(Equal(1))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when deleting the plugin binary returns any other error\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\terr = os.Remove(binaryPath)\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t\terr = os.Mkdir(binaryPath, 0700)\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t\terr = ioutil.WriteFile(filepath.Join(binaryPath, \"foooooo\"), nil, 0500)\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t})\n\n\t\t\t\tIt(\"does not return the error and removes the plugin config\", func() {\n\t\t\t\t\terr := actor.UninstallPlugin(fakePluginUninstaller, \"some-plugin\")\n\t\t\t\t\t_, ok := err.(*os.PathError)\n\t\t\t\t\tExpect(ok).To(BeTrue())\n\n\t\t\t\t\tExpect(fakeConfig.RemovePluginCallCount()).To(Equal(0))\n\t\t\t\t\tExpect(fakeConfig.WritePluginConfigCallCount()).To(Equal(0))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when writing the config returns an error\", func() {\n\t\t\t\tvar expectedErr error\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\texpectedErr = errors.New(\"some plugin config write error\")\n\n\t\t\t\t\tfakeConfig.WritePluginConfigReturns(expectedErr)\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns the error\", func() {\n\t\t\t\t\terr := actor.UninstallPlugin(fakePluginUninstaller, \"some-plugin\")\n\t\t\t\t\tExpect(err).To(MatchError(expectedErr))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ The nodefs package offers a high level API that resembles the\n\/\/ kernel's idea of what an FS looks like. File systems can have\n\/\/ multiple hard-links to one file, for example. It is also suited if\n\/\/ the data to represent fits in memory: you can construct the\n\/\/ complete file system tree at mount time\npackage nodefs\n\nimport (\n\t\"time\"\n\n\t\"github.com\/hanwen\/go-fuse\/fuse\"\n)\n\n\/\/ The Node interface implements the user-defined file system\n\/\/ functionality\ntype Node interface {\n\t\/\/ Inode and SetInode are basic getter\/setters. They are\n\t\/\/ called by the FileSystemConnector. You get them for free by\n\t\/\/ embedding the result of NewDefaultNode() in your node\n\t\/\/ struct.\n\tInode() *Inode\n\tSetInode(node *Inode)\n\n\t\/\/ OnMount is called on the root node just after a mount is\n\t\/\/ executed, either when the actual root is mounted, or when a\n\t\/\/ filesystem is mounted in-process. The passed-in\n\t\/\/ FileSystemConnector gives access to Notify methods and\n\t\/\/ Debug settings.\n\tOnMount(conn *FileSystemConnector)\n\n\t\/\/ OnUnmount is executed just before a submount is removed,\n\t\/\/ and when the process receives a forget for the FUSE root\n\t\/\/ node.\n\tOnUnmount()\n\n\t\/\/ Lookup finds a child node to this node; it is only called\n\t\/\/ for directory Nodes.\n\tLookup(out *fuse.Attr, name string, context *fuse.Context) (*Inode, fuse.Status)\n\n\t\/\/ Deletable() should return true if this inode may be\n\t\/\/ discarded from the children list. This will be called from\n\t\/\/ within the treeLock critical section, so you cannot look at\n\t\/\/ other inodes.\n\tDeletable() bool\n\n\t\/\/ OnForget is called when the reference to this inode is\n\t\/\/ dropped from the tree.\n\tOnForget()\n\n\t\/\/ Misc.\n\tAccess(mode uint32, context *fuse.Context) (code fuse.Status)\n\tReadlink(c *fuse.Context) ([]byte, fuse.Status)\n\n\t\/\/ Namespace operations; these are only called on directory Nodes.\n\n\t\/\/ Mknod should create the node, add it to the receiver's\n\t\/\/ inode, and return it\n\tMknod(name string, mode uint32, dev uint32, context *fuse.Context) (newNode *Inode, code fuse.Status)\n\n\t\/\/ Mkdir should create the directory Inode, add it to the\n\t\/\/ receiver's Inode, and return it\n\tMkdir(name string, mode uint32, context *fuse.Context) (newNode *Inode, code fuse.Status)\n\tUnlink(name string, context *fuse.Context) (code fuse.Status)\n\tRmdir(name string, context *fuse.Context) (code fuse.Status)\n\n\t\/\/ Symlink should create a child inode to the receiver, and\n\t\/\/ return it.\n\tSymlink(name string, content string, context *fuse.Context) (*Inode, fuse.Status)\n\tRename(oldName string, newParent Node, newName string, context *fuse.Context) (code fuse.Status)\n\n\t\/\/ Link should return the Inode of the resulting link. In\n\t\/\/ a POSIX conformant file system, this should add 'existing'\n\t\/\/ to the receiver, and return the Inode corresponding to\n\t\/\/ 'existing'.\n\tLink(name string, existing Node, context *fuse.Context) (newNode *Inode, code fuse.Status)\n\n\t\/\/ Create should return an open file, and the Inode for that file.\n\tCreate(name string, flags uint32, mode uint32, context *fuse.Context) (file File, child *Inode, code fuse.Status)\n\n\t\/\/ Open opens a file, and returns a File which is associated\n\t\/\/ with a file handle. It is OK to return (nil, OK) here. In\n\t\/\/ that case, the Node should implement Read or Write\n\t\/\/ directly.\n\tOpen(flags uint32, context *fuse.Context) (file File, code fuse.Status)\n\tOpenDir(context *fuse.Context) ([]fuse.DirEntry, fuse.Status)\n\n\t\/\/ XAttrs\n\tGetXAttr(attribute string, context *fuse.Context) (data []byte, code fuse.Status)\n\tRemoveXAttr(attr string, context *fuse.Context) fuse.Status\n\tSetXAttr(attr string, data []byte, flags int, context *fuse.Context) fuse.Status\n\tListXAttr(context *fuse.Context) (attrs []string, code fuse.Status)\n\n\t\/\/ Attributes\n\tRead(file File, dest []byte, off int64, context *fuse.Context) (fuse.ReadResult, fuse.Status)\n\tWrite(file File, data []byte, off int64, context *fuse.Context) (written uint32, code fuse.Status)\n\tGetAttr(out *fuse.Attr, file File, context *fuse.Context) (code fuse.Status)\n\tChmod(file File, perms uint32, context *fuse.Context) (code fuse.Status)\n\tChown(file File, uid uint32, gid uint32, context *fuse.Context) (code fuse.Status)\n\tTruncate(file File, size uint64, context *fuse.Context) (code fuse.Status)\n\tUtimens(file File, atime *time.Time, mtime *time.Time, context *fuse.Context) (code fuse.Status)\n\tFallocate(file File, off uint64, size uint64, mode uint32, context *fuse.Context) (code fuse.Status)\n\n\tStatFs() *fuse.StatfsOut\n}\n\n\/\/ A File object is returned from FileSystem.Open and\n\/\/ FileSystem.Create. Include the NewDefaultFile return value into\n\/\/ the struct to inherit a null implementation.\ntype File interface {\n\t\/\/ Called upon registering the filehandle in the inode.\n\tSetInode(*Inode)\n\n\t\/\/ The String method is for debug printing.\n\tString() string\n\n\t\/\/ Wrappers around other File implementations, should return\n\t\/\/ the inner file here.\n\tInnerFile() File\n\n\tRead(dest []byte, off int64) (fuse.ReadResult, fuse.Status)\n\tWrite(data []byte, off int64) (written uint32, code fuse.Status)\n\n\t\/\/ Flush is called for close() call on a file descriptor. In\n\t\/\/ case of duplicated descriptor, it may be called more than\n\t\/\/ once for a file.\n\tFlush() fuse.Status\n\n\t\/\/ This is called to before the file handle is forgotten. This\n\t\/\/ method has no return value, so nothing can synchronizes on\n\t\/\/ the call. Any cleanup that requires specific synchronization or\n\t\/\/ could fail with I\/O errors should happen in Flush instead.\n\tRelease()\n\tFsync(flags int) (code fuse.Status)\n\n\t\/\/ The methods below may be called on closed files, due to\n\t\/\/ concurrency. In that case, you should return EBADF.\n\tTruncate(size uint64) fuse.Status\n\tGetAttr(out *fuse.Attr) fuse.Status\n\tChown(uid uint32, gid uint32) fuse.Status\n\tChmod(perms uint32) fuse.Status\n\tUtimens(atime *time.Time, mtime *time.Time) fuse.Status\n\tAllocate(off uint64, size uint64, mode uint32) (code fuse.Status)\n}\n\n\/\/ Wrap a File return in this to set FUSE flags. Also used internally\n\/\/ to store open file data.\ntype WithFlags struct {\n\tFile\n\n\t\/\/ For debugging.\n\tDescription string\n\n\t\/\/ Put FOPEN_* flags here.\n\tFuseFlags uint32\n\n\t\/\/ O_RDWR, O_TRUNCATE, etc.\n\tOpenFlags uint32\n}\n\n\/\/ Options contains time out options for a node FileSystem. The\n\/\/ default copied from libfuse and set in NewMountOptions() is\n\/\/ (1s,1s,0s).\ntype Options struct {\n\tEntryTimeout time.Duration\n\tAttrTimeout time.Duration\n\tNegativeTimeout time.Duration\n\n\t\/\/ If set, replace all uids with given UID.\n\t\/\/ NewFileSystemOptions() will set this to the daemon's\n\t\/\/ uid\/gid.\n\t*fuse.Owner\n\n\t\/\/ If set, use a more portable, but slower inode number\n\t\/\/ generation scheme. This will make inode numbers (exported\n\t\/\/ back to callers) stay within int32, which is necessary for\n\t\/\/ making stat() succeed in 32-bit programs.\n\tPortableInodes bool\n}\n<commit_msg>fuse\/nodefs: reorganize read\/write in interface ordering.<commit_after>\/\/ The nodefs package offers a high level API that resembles the\n\/\/ kernel's idea of what an FS looks like. File systems can have\n\/\/ multiple hard-links to one file, for example. It is also suited if\n\/\/ the data to represent fits in memory: you can construct the\n\/\/ complete file system tree at mount time\npackage nodefs\n\nimport (\n\t\"time\"\n\n\t\"github.com\/hanwen\/go-fuse\/fuse\"\n)\n\n\/\/ The Node interface implements the user-defined file system\n\/\/ functionality\ntype Node interface {\n\t\/\/ Inode and SetInode are basic getter\/setters. They are\n\t\/\/ called by the FileSystemConnector. You get them for free by\n\t\/\/ embedding the result of NewDefaultNode() in your node\n\t\/\/ struct.\n\tInode() *Inode\n\tSetInode(node *Inode)\n\n\t\/\/ OnMount is called on the root node just after a mount is\n\t\/\/ executed, either when the actual root is mounted, or when a\n\t\/\/ filesystem is mounted in-process. The passed-in\n\t\/\/ FileSystemConnector gives access to Notify methods and\n\t\/\/ Debug settings.\n\tOnMount(conn *FileSystemConnector)\n\n\t\/\/ OnUnmount is executed just before a submount is removed,\n\t\/\/ and when the process receives a forget for the FUSE root\n\t\/\/ node.\n\tOnUnmount()\n\n\t\/\/ Lookup finds a child node to this node; it is only called\n\t\/\/ for directory Nodes.\n\tLookup(out *fuse.Attr, name string, context *fuse.Context) (*Inode, fuse.Status)\n\n\t\/\/ Deletable() should return true if this inode may be\n\t\/\/ discarded from the children list. This will be called from\n\t\/\/ within the treeLock critical section, so you cannot look at\n\t\/\/ other inodes.\n\tDeletable() bool\n\n\t\/\/ OnForget is called when the reference to this inode is\n\t\/\/ dropped from the tree.\n\tOnForget()\n\n\t\/\/ Misc.\n\tAccess(mode uint32, context *fuse.Context) (code fuse.Status)\n\tReadlink(c *fuse.Context) ([]byte, fuse.Status)\n\n\t\/\/ Namespace operations; these are only called on directory Nodes.\n\n\t\/\/ Mknod should create the node, add it to the receiver's\n\t\/\/ inode, and return it\n\tMknod(name string, mode uint32, dev uint32, context *fuse.Context) (newNode *Inode, code fuse.Status)\n\n\t\/\/ Mkdir should create the directory Inode, add it to the\n\t\/\/ receiver's Inode, and return it\n\tMkdir(name string, mode uint32, context *fuse.Context) (newNode *Inode, code fuse.Status)\n\tUnlink(name string, context *fuse.Context) (code fuse.Status)\n\tRmdir(name string, context *fuse.Context) (code fuse.Status)\n\n\t\/\/ Symlink should create a child inode to the receiver, and\n\t\/\/ return it.\n\tSymlink(name string, content string, context *fuse.Context) (*Inode, fuse.Status)\n\tRename(oldName string, newParent Node, newName string, context *fuse.Context) (code fuse.Status)\n\n\t\/\/ Link should return the Inode of the resulting link. In\n\t\/\/ a POSIX conformant file system, this should add 'existing'\n\t\/\/ to the receiver, and return the Inode corresponding to\n\t\/\/ 'existing'.\n\tLink(name string, existing Node, context *fuse.Context) (newNode *Inode, code fuse.Status)\n\n\t\/\/ Create should return an open file, and the Inode for that file.\n\tCreate(name string, flags uint32, mode uint32, context *fuse.Context) (file File, child *Inode, code fuse.Status)\n\n\t\/\/ Open opens a file, and returns a File which is associated\n\t\/\/ with a file handle. It is OK to return (nil, OK) here. In\n\t\/\/ that case, the Node should implement Read or Write\n\t\/\/ directly.\n\tOpen(flags uint32, context *fuse.Context) (file File, code fuse.Status)\n\tOpenDir(context *fuse.Context) ([]fuse.DirEntry, fuse.Status)\n\tRead(file File, dest []byte, off int64, context *fuse.Context) (fuse.ReadResult, fuse.Status)\n\tWrite(file File, data []byte, off int64, context *fuse.Context) (written uint32, code fuse.Status)\n\n\t\/\/ XAttrs\n\tGetXAttr(attribute string, context *fuse.Context) (data []byte, code fuse.Status)\n\tRemoveXAttr(attr string, context *fuse.Context) fuse.Status\n\tSetXAttr(attr string, data []byte, flags int, context *fuse.Context) fuse.Status\n\tListXAttr(context *fuse.Context) (attrs []string, code fuse.Status)\n\n\t\/\/ Attributes\n\tGetAttr(out *fuse.Attr, file File, context *fuse.Context) (code fuse.Status)\n\tChmod(file File, perms uint32, context *fuse.Context) (code fuse.Status)\n\tChown(file File, uid uint32, gid uint32, context *fuse.Context) (code fuse.Status)\n\tTruncate(file File, size uint64, context *fuse.Context) (code fuse.Status)\n\tUtimens(file File, atime *time.Time, mtime *time.Time, context *fuse.Context) (code fuse.Status)\n\tFallocate(file File, off uint64, size uint64, mode uint32, context *fuse.Context) (code fuse.Status)\n\n\tStatFs() *fuse.StatfsOut\n}\n\n\/\/ A File object is returned from FileSystem.Open and\n\/\/ FileSystem.Create. Include the NewDefaultFile return value into\n\/\/ the struct to inherit a null implementation.\ntype File interface {\n\t\/\/ Called upon registering the filehandle in the inode.\n\tSetInode(*Inode)\n\n\t\/\/ The String method is for debug printing.\n\tString() string\n\n\t\/\/ Wrappers around other File implementations, should return\n\t\/\/ the inner file here.\n\tInnerFile() File\n\n\tRead(dest []byte, off int64) (fuse.ReadResult, fuse.Status)\n\tWrite(data []byte, off int64) (written uint32, code fuse.Status)\n\n\t\/\/ Flush is called for close() call on a file descriptor. In\n\t\/\/ case of duplicated descriptor, it may be called more than\n\t\/\/ once for a file.\n\tFlush() fuse.Status\n\n\t\/\/ This is called to before the file handle is forgotten. This\n\t\/\/ method has no return value, so nothing can synchronizes on\n\t\/\/ the call. Any cleanup that requires specific synchronization or\n\t\/\/ could fail with I\/O errors should happen in Flush instead.\n\tRelease()\n\tFsync(flags int) (code fuse.Status)\n\n\t\/\/ The methods below may be called on closed files, due to\n\t\/\/ concurrency. In that case, you should return EBADF.\n\tTruncate(size uint64) fuse.Status\n\tGetAttr(out *fuse.Attr) fuse.Status\n\tChown(uid uint32, gid uint32) fuse.Status\n\tChmod(perms uint32) fuse.Status\n\tUtimens(atime *time.Time, mtime *time.Time) fuse.Status\n\tAllocate(off uint64, size uint64, mode uint32) (code fuse.Status)\n}\n\n\/\/ Wrap a File return in this to set FUSE flags. Also used internally\n\/\/ to store open file data.\ntype WithFlags struct {\n\tFile\n\n\t\/\/ For debugging.\n\tDescription string\n\n\t\/\/ Put FOPEN_* flags here.\n\tFuseFlags uint32\n\n\t\/\/ O_RDWR, O_TRUNCATE, etc.\n\tOpenFlags uint32\n}\n\n\/\/ Options contains time out options for a node FileSystem. The\n\/\/ default copied from libfuse and set in NewMountOptions() is\n\/\/ (1s,1s,0s).\ntype Options struct {\n\tEntryTimeout time.Duration\n\tAttrTimeout time.Duration\n\tNegativeTimeout time.Duration\n\n\t\/\/ If set, replace all uids with given UID.\n\t\/\/ NewFileSystemOptions() will set this to the daemon's\n\t\/\/ uid\/gid.\n\t*fuse.Owner\n\n\t\/\/ If set, use a more portable, but slower inode number\n\t\/\/ generation scheme. This will make inode numbers (exported\n\t\/\/ back to callers) stay within int32, which is necessary for\n\t\/\/ making stat() succeed in 32-bit programs.\n\tPortableInodes bool\n}\n<|endoftext|>"} {"text":"<commit_before>package resource\n\nimport (\n\t\"encoding\/json\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/satori\/go.uuid\"\n\n\t\"github.com\/diegobernardes\/flare\"\n)\n\ntype pagination struct {\n\tbase *flare.Pagination\n}\n\nfunc (p *pagination) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(&struct {\n\t\tLimit int `json:\"limit\"`\n\t\tOffset int `json:\"offset\"`\n\t\tTotal int `json:\"total\"`\n\t}{\n\t\tLimit: p.base.Limit,\n\t\tTotal: p.base.Total,\n\t\tOffset: p.base.Offset,\n\t})\n}\n\ntype resource struct {\n\tbase *flare.Resource\n}\n\nfunc (r *resource) MarshalJSON() ([]byte, error) {\n\tchange := map[string]string{\n\t\t\"kind\": r.base.Change.Kind,\n\t\t\"field\": r.base.Change.Field,\n\t}\n\n\tif r.base.Change.DateFormat != \"\" {\n\t\tchange[\"dateFormat\"] = r.base.Change.DateFormat\n\t}\n\n\treturn json.Marshal(&struct {\n\t\tId string `json:\"id\"`\n\t\tDomains []string `json:\"domains\"`\n\t\tPath string `json:\"path\"`\n\t\tChange map[string]string `json:\"change\"`\n\t\tCreatedAt string `json:\"createdAt\"`\n\t\tUpdatedAt string `json:\"updatedAt\"`\n\t}{\n\t\tId: r.base.Id,\n\t\tDomains: r.base.Domains,\n\t\tPath: r.base.Path,\n\t\tChange: change,\n\t\tCreatedAt: r.base.CreatedAt.Format(time.RFC3339),\n\t\tUpdatedAt: r.base.UpdatedAt.Format(time.RFC3339),\n\t})\n}\n\ntype resourceCreate struct {\n\tPath string `json:\"path\"`\n\tDomains []string `json:\"domains\"`\n\tChange struct {\n\t\tKind string `json:\"kind\"`\n\t\tField string `json:\"field\"`\n\t\tDateFormat string `json:\"dateFormat\"`\n\t} `json:\"change\"`\n}\n\nfunc (r *resourceCreate) cleanup() {\n\ttrim := func(value string) string { return strings.TrimSpace(value) }\n\tr.Path = trim(r.Path)\n\tr.Change.Kind = trim(r.Change.Kind)\n\tr.Change.Field = trim(r.Change.Field)\n\tr.Change.DateFormat = trim(r.Change.DateFormat)\n\n\tfor i, value := range r.Domains {\n\t\tr.Domains[i] = trim(value)\n\t}\n}\n\nfunc (r *resourceCreate) valid() error {\n\tif len(r.Domains) == 0 {\n\t\treturn errors.New(\"missing domains\")\n\t}\n\n\tif r.Path == \"\" {\n\t\treturn errors.New(\"missing path\")\n\t}\n\n\tif r.Change.Field == \"\" {\n\t\treturn errors.New(\"missing change\")\n\t}\n\n\tswitch r.Change.Kind {\n\tcase flare.ResourceChangeInteger, flare.ResourceChangeString:\n\tcase flare.ResourceChangeDate:\n\t\tif r.Change.DateFormat == \"\" {\n\t\t\treturn errors.New(\"missing change.dateFormat\")\n\t\t}\n\tdefault:\n\t\treturn errors.New(\"invalid change.kind\")\n\t}\n\n\treturn nil\n}\n\nfunc (r *resourceCreate) toFlareResource() *flare.Resource {\n\treturn &flare.Resource{\n\t\tId: uuid.NewV4().String(),\n\t\tDomains: r.Domains,\n\t\tPath: r.Path,\n\t\tChange: flare.ResourceChange{\n\t\t\tKind: r.Change.Kind,\n\t\t\tField: r.Change.Field,\n\t\t\tDateFormat: r.Change.DateFormat,\n\t\t},\n\t}\n}\n\nfunc transformResource(r *flare.Resource) *resource { return &resource{r} }\n\nfunc transformPagination(p *flare.Pagination) *pagination { return &pagination{base: p} }\n\nfunc transformResources(r []flare.Resource) []resource {\n\tresult := make([]resource, len(r))\n\tfor i := 0; i < len(r); i++ {\n\t\tresult[i] = resource{&r[i]}\n\t}\n\treturn result\n}\n\ntype response struct {\n\tPagination *pagination\n\tError *responseError\n\tResources []resource\n\tResource *resource\n}\n\nfunc (r *response) MarshalJSON() ([]byte, error) {\n\tvar result interface{}\n\n\tif r.Error != nil {\n\t\tresult = map[string]*responseError{\"error\": r.Error}\n\t} else if r.Resource != nil {\n\t\tresult = r.Resource\n\t} else {\n\t\tresult = map[string]interface{}{\n\t\t\t\"pagination\": r.Pagination,\n\t\t\t\"resources\": r.Resources,\n\t\t}\n\t}\n\n\treturn json.Marshal(result)\n}\n\ntype responseError struct {\n\tStatus int `json:\"status\"`\n\tTitle string `json:\"title\"`\n\tDetail string `json:\"detail,omitempty\"`\n}\n<commit_msg>resource: added track mark<commit_after>package resource\n\nimport (\n\t\"encoding\/json\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/satori\/go.uuid\"\n\n\t\"github.com\/diegobernardes\/flare\"\n)\n\ntype pagination struct {\n\tbase *flare.Pagination\n}\n\nfunc (p *pagination) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(&struct {\n\t\tLimit int `json:\"limit\"`\n\t\tOffset int `json:\"offset\"`\n\t\tTotal int `json:\"total\"`\n\t}{\n\t\tLimit: p.base.Limit,\n\t\tTotal: p.base.Total,\n\t\tOffset: p.base.Offset,\n\t})\n}\n\ntype resource struct {\n\tbase *flare.Resource\n}\n\nfunc (r *resource) MarshalJSON() ([]byte, error) {\n\tchange := map[string]string{\n\t\t\"kind\": r.base.Change.Kind,\n\t\t\"field\": r.base.Change.Field,\n\t}\n\n\tif r.base.Change.DateFormat != \"\" {\n\t\tchange[\"dateFormat\"] = r.base.Change.DateFormat\n\t}\n\n\treturn json.Marshal(&struct {\n\t\tId string `json:\"id\"`\n\t\tDomains []string `json:\"domains\"`\n\t\tPath string `json:\"path\"`\n\t\tChange map[string]string `json:\"change\"`\n\t\tCreatedAt string `json:\"createdAt\"`\n\t\tUpdatedAt string `json:\"updatedAt\"`\n\t}{\n\t\tId: r.base.Id,\n\t\tDomains: r.base.Domains,\n\t\tPath: r.base.Path,\n\t\tChange: change,\n\t\tCreatedAt: r.base.CreatedAt.Format(time.RFC3339),\n\t\tUpdatedAt: r.base.UpdatedAt.Format(time.RFC3339),\n\t})\n}\n\ntype resourceCreate struct {\n\tPath string `json:\"path\"`\n\tDomains []string `json:\"domains\"`\n\tChange struct {\n\t\tKind string `json:\"kind\"`\n\t\tField string `json:\"field\"`\n\t\tDateFormat string `json:\"dateFormat\"`\n\t} `json:\"change\"`\n}\n\nfunc (r *resourceCreate) cleanup() {\n\ttrim := func(value string) string { return strings.TrimSpace(value) }\n\tr.Path = trim(r.Path)\n\tr.Change.Kind = trim(r.Change.Kind)\n\tr.Change.Field = trim(r.Change.Field)\n\tr.Change.DateFormat = trim(r.Change.DateFormat)\n\n\tfor i, value := range r.Domains {\n\t\tr.Domains[i] = trim(value)\n\t}\n}\n\nfunc (r *resourceCreate) valid() error {\n\tif len(r.Domains) == 0 {\n\t\treturn errors.New(\"missing domains\")\n\t}\n\n\tif r.Path == \"\" {\n\t\treturn errors.New(\"missing path\")\n\t}\n\n\ttrackCount := strings.Count(r.Path, \"{track}\")\n\tif trackCount == 0 {\n\t\treturn errors.New(\"missing track mark on path\")\n\t} else if trackCount > 1 {\n\t\treturn errors.Errorf(\"path can have only one track, current has %d\", trackCount)\n\t}\n\n\tif r.Change.Field == \"\" {\n\t\treturn errors.New(\"missing change\")\n\t}\n\n\tswitch r.Change.Kind {\n\tcase flare.ResourceChangeInteger, flare.ResourceChangeString:\n\tcase flare.ResourceChangeDate:\n\t\tif r.Change.DateFormat == \"\" {\n\t\t\treturn errors.New(\"missing change.dateFormat\")\n\t\t}\n\tdefault:\n\t\treturn errors.New(\"invalid change.kind\")\n\t}\n\n\treturn nil\n}\n\nfunc (r *resourceCreate) toFlareResource() *flare.Resource {\n\treturn &flare.Resource{\n\t\tId: uuid.NewV4().String(),\n\t\tDomains: r.Domains,\n\t\tPath: r.Path,\n\t\tChange: flare.ResourceChange{\n\t\t\tKind: r.Change.Kind,\n\t\t\tField: r.Change.Field,\n\t\t\tDateFormat: r.Change.DateFormat,\n\t\t},\n\t}\n}\n\nfunc transformResource(r *flare.Resource) *resource { return &resource{r} }\n\nfunc transformPagination(p *flare.Pagination) *pagination { return &pagination{base: p} }\n\nfunc transformResources(r []flare.Resource) []resource {\n\tresult := make([]resource, len(r))\n\tfor i := 0; i < len(r); i++ {\n\t\tresult[i] = resource{&r[i]}\n\t}\n\treturn result\n}\n\ntype response struct {\n\tPagination *pagination\n\tError *responseError\n\tResources []resource\n\tResource *resource\n}\n\nfunc (r *response) MarshalJSON() ([]byte, error) {\n\tvar result interface{}\n\n\tif r.Error != nil {\n\t\tresult = map[string]*responseError{\"error\": r.Error}\n\t} else if r.Resource != nil {\n\t\tresult = r.Resource\n\t} else {\n\t\tresult = map[string]interface{}{\n\t\t\t\"pagination\": r.Pagination,\n\t\t\t\"resources\": r.Resources,\n\t\t}\n\t}\n\n\treturn json.Marshal(result)\n}\n\ntype responseError struct {\n\tStatus int `json:\"status\"`\n\tTitle string `json:\"title\"`\n\tDetail string `json:\"detail,omitempty\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package response\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"net\/http\"\n\t\"reflect\"\n\n\t\"github.com\/codegangsta\/inject\"\n\t\"github.com\/go-martini\/martini\"\n\t\"github.com\/martini-contrib\/encoder\"\n)\n\ntype wrappedResponseWriter struct {\n\thttp.ResponseWriter\n\n\tstatusCode int\n}\n\nfunc newWrappedResponseWriter(w http.ResponseWriter) *wrappedResponseWriter {\n\twr := &wrappedResponseWriter{ResponseWriter: w}\n\treturn wr\n}\n\nfunc (wr *wrappedResponseWriter) WriteHeader(code int) {\n\twr.ResponseWriter.WriteHeader(code)\n\twr.statusCode = code\n}\n\ntype errorResponse struct {\n\tXMLName xml.Name `json:\"-\" xml:\"error\"`\n\tError int `json:\"error\" xml:\"code\"`\n\tMessage string `json:\"message\" xml:\"message\"`\n}\n\nfunc NewEncoder() martini.Handler {\n\treturn func(c martini.Context, w http.ResponseWriter) {\n\t\twrappedWriter := newWrappedResponseWriter(w)\n\t\tc.MapTo(wrappedWriter, (*http.ResponseWriter)(nil))\n\t\tc.MapTo(encoder.JsonEncoder{}, (*encoder.Encoder)(nil))\n\n\t\tvar rtnHandler martini.ReturnHandler\n\t\trtnHandler = func(ctx martini.Context, vals []reflect.Value) {\n\t\t\trv := ctx.Get(inject.InterfaceOf((*http.ResponseWriter)(nil)))\n\t\t\tres := rv.Interface().(http.ResponseWriter)\n\t\t\tvar responseVal reflect.Value\n\t\t\tif len(vals) > 1 && vals[0].Kind() == reflect.Int {\n\t\t\t\tres.WriteHeader(int(vals[0].Int()))\n\t\t\t\tresponseVal = vals[1]\n\t\t\t} else if len(vals) > 0 {\n\t\t\t\tresponseVal = vals[0]\n\t\t\t}\n\t\t\tif isNil(responseVal) {\n\t\t\t\twrappedRes := res.(*wrappedResponseWriter)\n\t\t\t\tcode := wrappedRes.statusCode\n\t\t\t\tif code == 0 {\n\t\t\t\t\tpanic(errors.New(\"No return code set for error\"))\n\t\t\t\t}\n\t\t\t\tresponseVal = reflect.ValueOf(errorResponse{Error: code, Message: http.StatusText(code)})\n\t\t\t}\n\t\t\tif canDeref(responseVal) {\n\t\t\t\tresponseVal = responseVal.Elem()\n\t\t\t}\n\t\t\tif isByteSlice(responseVal) {\n\t\t\t\tres.Write(responseVal.Bytes())\n\t\t\t} else if isStruct(responseVal) || isStructSlice(responseVal) {\n\t\t\t\tencv := ctx.Get(inject.InterfaceOf((*encoder.Encoder)(nil)))\n\t\t\t\tenc := encv.Interface().(encoder.Encoder)\n\t\t\t\tres.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\t\tres.Write(encoder.Must(enc.Encode(responseVal.Interface())))\n\t\t\t} else {\n\t\t\t\tres.Write([]byte(responseVal.String()))\n\t\t\t}\n\t\t}\n\t\tc.Map(rtnHandler)\n\t}\n}\n\nfunc isByteSlice(val reflect.Value) bool {\n\treturn val.Kind() == reflect.Slice && val.Type().Elem().Kind() == reflect.Uint8\n}\n\nfunc isStruct(val reflect.Value) bool {\n\treturn val.Kind() == reflect.Struct\n}\n\nfunc isStructSlice(val reflect.Value) bool {\n\treturn val.Kind() == reflect.Slice && val.Type().Elem().Kind() == reflect.Struct\n}\n\nfunc isNil(val reflect.Value) bool {\n\treturn val.Kind() == reflect.Invalid\n}\n\nfunc canDeref(val reflect.Value) bool {\n\treturn val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr\n}\n<commit_msg>Added options for HTML Json escape, and Json indenting<commit_after>package response\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"net\/http\"\n\t\"reflect\"\n\n\t\"github.com\/codegangsta\/inject\"\n\t\"github.com\/go-martini\/martini\"\n\t\"github.com\/martini-contrib\/encoder\"\n)\n\ntype wrappedResponseWriter struct {\n\thttp.ResponseWriter\n\n\tstatusCode int\n}\n\nfunc newWrappedResponseWriter(w http.ResponseWriter) *wrappedResponseWriter {\n\twr := &wrappedResponseWriter{ResponseWriter: w}\n\treturn wr\n}\n\nfunc (wr *wrappedResponseWriter) WriteHeader(code int) {\n\twr.ResponseWriter.WriteHeader(code)\n\twr.statusCode = code\n}\n\ntype errorResponse struct {\n\tXMLName xml.Name `json:\"-\" xml:\"error\"`\n\tError int `json:\"error\" xml:\"code\"`\n\tMessage string `json:\"message\" xml:\"message\"`\n}\n\ntype Options struct {\n\tHtml bool\n\tIndent bool\n}\n\nfunc NewEncoder(opts ...Options) martini.Handler {\n\treturn func(c martini.Context, w http.ResponseWriter) {\n\t\twrappedWriter := newWrappedResponseWriter(w)\n\t\tc.MapTo(wrappedWriter, (*http.ResponseWriter)(nil))\n\t\tc.MapTo(encoder.JsonEncoder{}, (*encoder.Encoder)(nil))\n\n\t\tvar rtnHandler martini.ReturnHandler\n\t\trtnHandler = func(ctx martini.Context, vals []reflect.Value) {\n\t\t\trv := ctx.Get(inject.InterfaceOf((*http.ResponseWriter)(nil)))\n\t\t\tres := rv.Interface().(http.ResponseWriter)\n\t\t\tvar responseVal reflect.Value\n\t\t\tif len(vals) > 1 && vals[0].Kind() == reflect.Int {\n\t\t\t\tres.WriteHeader(int(vals[0].Int()))\n\t\t\t\tresponseVal = vals[1]\n\t\t\t} else if len(vals) > 0 {\n\t\t\t\tresponseVal = vals[0]\n\t\t\t}\n\t\t\tif isNil(responseVal) {\n\t\t\t\twrappedRes := res.(*wrappedResponseWriter)\n\t\t\t\tcode := wrappedRes.statusCode\n\t\t\t\tif code == 0 {\n\t\t\t\t\tpanic(errors.New(\"No return code set for error\"))\n\t\t\t\t}\n\t\t\t\tresponseVal = reflect.ValueOf(errorResponse{Error: code, Message: http.StatusText(code)})\n\t\t\t}\n\t\t\tif canDeref(responseVal) {\n\t\t\t\tresponseVal = responseVal.Elem()\n\t\t\t}\n\t\t\tif isByteSlice(responseVal) {\n\t\t\t\tres.Write(responseVal.Bytes())\n\t\t\t} else if isStruct(responseVal) || isStructSlice(responseVal) {\n\t\t\t\tencv := ctx.Get(inject.InterfaceOf((*encoder.Encoder)(nil)))\n\t\t\t\tenc := encv.Interface().(encoder.Encoder)\n\t\t\t\tres.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\t\tbuf := bytes.NewBuffer(encoder.Must(enc.Encode(responseVal.Interface())))\n\t\t\t\tif len(opts) > 0 {\n\t\t\t\t\tif opts[0].Html {\n\t\t\t\t\t\tval := buf.Bytes()\n\t\t\t\t\t\tbuf.Reset()\n\t\t\t\t\t\tjson.HTMLEscape(buf, val)\n\t\t\t\t\t}\n\t\t\t\t\tif opts[0].Indent {\n\t\t\t\t\t\tval := buf.Bytes()\n\t\t\t\t\t\tbuf.Reset()\n\t\t\t\t\t\tjson.Indent(buf, val, \"\", \"\\t\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tres.Write(buf.Bytes())\n\t\t\t} else {\n\t\t\t\tres.Write([]byte(responseVal.String()))\n\t\t\t}\n\t\t}\n\t\tc.Map(rtnHandler)\n\t}\n}\n\nfunc isByteSlice(val reflect.Value) bool {\n\treturn val.Kind() == reflect.Slice && val.Type().Elem().Kind() == reflect.Uint8\n}\n\nfunc isStruct(val reflect.Value) bool {\n\treturn val.Kind() == reflect.Struct\n}\n\nfunc isStructSlice(val reflect.Value) bool {\n\treturn val.Kind() == reflect.Slice && val.Type().Elem().Kind() == reflect.Struct\n}\n\nfunc isNil(val reflect.Value) bool {\n\treturn val.Kind() == reflect.Invalid\n}\n\nfunc canDeref(val reflect.Value) bool {\n\treturn val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr\n}\n<|endoftext|>"} {"text":"<commit_before>package ast\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ AnnotationDeclaration defines a annotation type which holds detail about a giving annotation.\ntype AnnotationDeclaration struct {\n\tName string `json:\"name\"`\n\tTemplate string `json:\"template\"`\n\tArguments []string `json:\"arguments\"`\n\tParams map[string]string `json:\"params\"`\n\tAttrs map[string]interface{} `json:\"attrs\"`\n\tDefer bool `json:\"defer\"`\n}\n\n\/\/ HasArg returns true\/false if the giving AnnotationDeclaration has a giving key in its Arguments.\nfunc (ad AnnotationDeclaration) HasArg(name string) bool {\n\tfor _, item := range ad.Arguments {\n\t\tif item == name {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/ Param returns the associated param value with giving key (\"name\").\nfunc (ad AnnotationDeclaration) Param(name string) string {\n\treturn ad.Params[name]\n}\n\n\/\/ Attrs returns the associated param value with giving key (\"name\").\nfunc (ad AnnotationDeclaration) Attr(name string) interface{} {\n\treturn ad.Attrs[name]\n}\n\n\/\/ ReadAnnotationsFromCommentry returns a slice of all annotation passed from the provided list.\nfunc ReadAnnotationsFromCommentry(r io.Reader) []AnnotationDeclaration {\n\tvar annotations []AnnotationDeclaration\n\n\treader := bufio.NewReader(r)\n\n\tfor {\n\t\tline, err := reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\ttrimmedline := string(cleanWord([]byte(line)))\n\t\tif trimmedline == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Do we have a annotation here?\n\t\tif !strings.HasPrefix(trimmedline, \"@\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tparams := make(map[string]string, 0)\n\n\t\tif !strings.Contains(trimmedline, \"(\") {\n\t\t\tannotations = append(annotations, AnnotationDeclaration{Name: trimmedline, Params: params, Attrs: make(map[string]interface{})})\n\t\t\tcontinue\n\t\t}\n\n\t\targIndex := strings.IndexRune(trimmedline, '(')\n\t\targName := trimmedline[:argIndex]\n\t\targContents := trimmedline[argIndex:]\n\n\t\t\/\/ Do we have a template associated with this annotation, if not, split the\n\t\t\/\/ commas and let those be our arguments.\n\t\tif !strings.HasSuffix(argContents, \"{\") {\n\n\t\t\targContents = strings.TrimPrefix(strings.TrimSuffix(argContents, \")\"), \"(\")\n\n\t\t\tvar parts []string\n\n\t\t\tfor _, part := range strings.Split(argContents, \",\") {\n\t\t\t\ttrimmed := strings.TrimSpace(part)\n\t\t\t\tif trimmed == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tparts = append(parts, trimmed)\n\n\t\t\t\t\/\/ If we are dealing with key value pairs then split, trimspace and set\n\t\t\t\t\/\/ in params. We only expect 2 values, any more and we wont consider the rest.\n\t\t\t\tif kvPieces := strings.Split(trimmed, \"=>\"); len(kvPieces) > 1 {\n\t\t\t\t\tval := strings.TrimSpace(kvPieces[1])\n\t\t\t\t\tparams[strings.TrimSpace(kvPieces[0])] = val\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar deferred bool\n\n\t\t\t\/\/ Find out if we are to be deferred\n\t\t\tdefered, ok := params[\"defer\"]\n\t\t\tif !ok {\n\t\t\t\tdefered, ok = params[\"Defer\"]\n\t\t\t}\n\n\t\t\tdeferred, _ = strconv.ParseBool(defered)\n\n\t\t\tannotations = append(annotations, AnnotationDeclaration{\n\t\t\t\tArguments: parts,\n\t\t\t\tName: argName,\n\t\t\t\tParams: params,\n\t\t\t\tDefer: deferred,\n\t\t\t\tAttrs: make(map[string]interface{}),\n\t\t\t})\n\n\t\t\tcontinue\n\t\t}\n\n\t\ttemplateIndex := strings.IndexRune(argContents, '{')\n\t\ttemplateArgs := argContents[:templateIndex]\n\t\ttemplateArgs = strings.TrimPrefix(strings.TrimSuffix(templateArgs, \")\"), \"(\")\n\n\t\tvar parts []string\n\n\t\tfor _, part := range strings.Split(templateArgs, \",\") {\n\t\t\ttrimmed := strings.TrimSpace(part)\n\t\t\tif trimmed == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tparts = append(parts, trimmed)\n\n\t\t\t\/\/ If we are dealing with key value pairs then split, trimspace and set\n\t\t\t\/\/ in params. We only expect 2 values, any more and we wont consider the rest.\n\t\t\tif kvPieces := strings.Split(trimmed, \"=>\"); len(kvPieces) > 1 {\n\t\t\t\tval := strings.TrimSpace(kvPieces[1])\n\n\t\t\t\tparams[strings.TrimSpace(kvPieces[0])] = val\n\t\t\t}\n\t\t}\n\n\t\ttemplate := strings.TrimSpace(readTemplate(reader))\n\n\t\tvar asJSON bool\n\t\tfor _, item := range parts {\n\t\t\tif item == \"asJSON\" {\n\t\t\t\tasJSON = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif _, ok := params[\"asJSON\"]; ok {\n\t\t\tasJSON = true\n\t\t}\n\n\t\tvar attrs map[string]interface{}\n\n\t\tif asJSON {\n\t\t\tif err := json.Unmarshal([]byte(template), &attrs); err == nil {\n\t\t\t\ttemplate = \"\"\n\t\t\t}\n\t\t} else {\n\t\t\tattrs = make(map[string]interface{})\n\t\t}\n\n\t\tannotations = append(annotations, AnnotationDeclaration{\n\t\t\tArguments: parts,\n\t\t\tName: argName,\n\t\t\tTemplate: template,\n\t\t\tParams: params,\n\t\t\tAttrs: attrs,\n\t\t})\n\n\t}\n\n\treturn annotations\n}\n\nvar ending = []byte(\"})\")\nvar newline = []byte(\"\\n\")\nvar empty = []byte(\"\")\nvar singleComment = []byte(\"\/\/\")\nvar multiComment = []byte(\"\/*\")\nvar multiCommentItem = []byte(\"*\")\nvar commentry = regexp.MustCompile(`\\s*?([\\\/\\\/*|\\*|\\\/]+)`)\n\nfunc readTemplate(reader *bufio.Reader) string {\n\tvar bu bytes.Buffer\n\n\tvar seenEnd bool\n\n\tfor {\n\t\t\/\/ Do we have another pending prefix, if so, we are at the ending, so return.\n\t\tif seenEnd {\n\t\t\tdata, _ := reader.Peek(100)\n\t\t\tdataVal := commentry.ReplaceAllString(string(data), \"\")\n\t\t\tdataVal = string(cleanWord([]byte(dataVal)))\n\n\t\t\t\/\/ fmt.Printf(\"Peek2: %+q -> %+q\\n\", dataVal, data)\n\n\t\t\tif strings.HasPrefix(string(dataVal), \"@\") {\n\t\t\t\treturn bu.String()\n\t\t\t}\n\n\t\t\t\/\/ If it's all space, then return.\n\t\t\t\/\/ if strings.TrimSpace(string(dataVal)) == \"\" {\n\t\t\treturn bu.String()\n\t\t\t\/\/ }\n\t\t}\n\n\t\ttwoWord, err := reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tbu.WriteString(twoWord)\n\t\t\treturn bu.String()\n\t\t}\n\n\t\ttwoWorded := cleanWord([]byte(twoWord))\n\t\t\/\/ fmt.Printf(\"Ending: %+q -> %t\\n\", twoWorded, bytes.HasPrefix(twoWorded, ending))\n\n\t\tif bytes.HasPrefix(twoWorded, ending) {\n\t\t\tseenEnd = true\n\t\t\tcontinue\n\t\t}\n\n\t\tbu.WriteString(twoWord)\n\t}\n\n}\n\nfunc cleanWord(word []byte) []byte {\n\tword = bytes.TrimSpace(word)\n\tword = bytes.TrimPrefix(word, singleComment)\n\tword = bytes.TrimPrefix(word, multiComment)\n\tword = bytes.TrimPrefix(word, multiCommentItem)\n\treturn bytes.TrimSpace(word)\n}\n<commit_msg>Fix nil Attrs map<commit_after>package ast\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ AnnotationDeclaration defines a annotation type which holds detail about a giving annotation.\ntype AnnotationDeclaration struct {\n\tName string `json:\"name\"`\n\tTemplate string `json:\"template\"`\n\tArguments []string `json:\"arguments\"`\n\tParams map[string]string `json:\"params\"`\n\tAttrs map[string]interface{} `json:\"attrs\"`\n\tDefer bool `json:\"defer\"`\n}\n\n\/\/ HasArg returns true\/false if the giving AnnotationDeclaration has a giving key in its Arguments.\nfunc (ad AnnotationDeclaration) HasArg(name string) bool {\n\tfor _, item := range ad.Arguments {\n\t\tif item == name {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/ Param returns the associated param value with giving key (\"name\").\nfunc (ad AnnotationDeclaration) Param(name string) string {\n\treturn ad.Params[name]\n}\n\n\/\/ Attrs returns the associated param value with giving key (\"name\").\nfunc (ad AnnotationDeclaration) Attr(name string) interface{} {\n\treturn ad.Attrs[name]\n}\n\n\/\/ ReadAnnotationsFromCommentry returns a slice of all annotation passed from the provided list.\nfunc ReadAnnotationsFromCommentry(r io.Reader) []AnnotationDeclaration {\n\tvar annotations []AnnotationDeclaration\n\n\treader := bufio.NewReader(r)\n\n\tfor {\n\t\tline, err := reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\ttrimmedline := string(cleanWord([]byte(line)))\n\t\tif trimmedline == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Do we have a annotation here?\n\t\tif !strings.HasPrefix(trimmedline, \"@\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tparams := make(map[string]string, 0)\n\n\t\tif !strings.Contains(trimmedline, \"(\") {\n\t\t\tannotations = append(annotations, AnnotationDeclaration{Name: trimmedline, Params: params, Attrs: make(map[string]interface{})})\n\t\t\tcontinue\n\t\t}\n\n\t\targIndex := strings.IndexRune(trimmedline, '(')\n\t\targName := trimmedline[:argIndex]\n\t\targContents := trimmedline[argIndex:]\n\n\t\t\/\/ Do we have a template associated with this annotation, if not, split the\n\t\t\/\/ commas and let those be our arguments.\n\t\tif !strings.HasSuffix(argContents, \"{\") {\n\n\t\t\targContents = strings.TrimPrefix(strings.TrimSuffix(argContents, \")\"), \"(\")\n\n\t\t\tvar parts []string\n\n\t\t\tfor _, part := range strings.Split(argContents, \",\") {\n\t\t\t\ttrimmed := strings.TrimSpace(part)\n\t\t\t\tif trimmed == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tparts = append(parts, trimmed)\n\n\t\t\t\t\/\/ If we are dealing with key value pairs then split, trimspace and set\n\t\t\t\t\/\/ in params. We only expect 2 values, any more and we wont consider the rest.\n\t\t\t\tif kvPieces := strings.Split(trimmed, \"=>\"); len(kvPieces) > 1 {\n\t\t\t\t\tval := strings.TrimSpace(kvPieces[1])\n\t\t\t\t\tparams[strings.TrimSpace(kvPieces[0])] = val\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar deferred bool\n\n\t\t\t\/\/ Find out if we are to be deferred\n\t\t\tdefered, ok := params[\"defer\"]\n\t\t\tif !ok {\n\t\t\t\tdefered, ok = params[\"Defer\"]\n\t\t\t}\n\n\t\t\tdeferred, _ = strconv.ParseBool(defered)\n\n\t\t\tannotations = append(annotations, AnnotationDeclaration{\n\t\t\t\tArguments: parts,\n\t\t\t\tName: argName,\n\t\t\t\tParams: params,\n\t\t\t\tDefer: deferred,\n\t\t\t\tAttrs: make(map[string]interface{}),\n\t\t\t})\n\n\t\t\tcontinue\n\t\t}\n\n\t\ttemplateIndex := strings.IndexRune(argContents, '{')\n\t\ttemplateArgs := argContents[:templateIndex]\n\t\ttemplateArgs = strings.TrimPrefix(strings.TrimSuffix(templateArgs, \")\"), \"(\")\n\n\t\tvar parts []string\n\n\t\tfor _, part := range strings.Split(templateArgs, \",\") {\n\t\t\ttrimmed := strings.TrimSpace(part)\n\t\t\tif trimmed == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tparts = append(parts, trimmed)\n\n\t\t\t\/\/ If we are dealing with key value pairs then split, trimspace and set\n\t\t\t\/\/ in params. We only expect 2 values, any more and we wont consider the rest.\n\t\t\tif kvPieces := strings.Split(trimmed, \"=>\"); len(kvPieces) > 1 {\n\t\t\t\tval := strings.TrimSpace(kvPieces[1])\n\t\t\t\tparams[strings.TrimSpace(kvPieces[0])] = val\n\t\t\t}\n\t\t}\n\n\t\ttemplate := strings.TrimSpace(readTemplate(reader))\n\n\t\tvar asJSON bool\n\t\tfor _, item := range parts {\n\t\t\tif item == \"asJSON\" {\n\t\t\t\tasJSON = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif _, ok := params[\"asJSON\"]; ok {\n\t\t\tasJSON = true\n\t\t}\n\n\t\tvar attrs map[string]interface{}\n\n\t\tif asJSON {\n\t\t\tif err := json.Unmarshal([]byte(template), &attrs); err == nil {\n\t\t\t\ttemplate = \"\"\n\t\t\t}\n\t\t} else {\n\t\t\tattrs = make(map[string]interface{})\n\t\t}\n\n\t\tannotations = append(annotations, AnnotationDeclaration{\n\t\t\tArguments: parts,\n\t\t\tName: argName,\n\t\t\tTemplate: template,\n\t\t\tParams: params,\n\t\t\tAttrs: attrs,\n\t\t})\n\n\t}\n\n\treturn annotations\n}\n\nvar ending = []byte(\"})\")\nvar newline = []byte(\"\\n\")\nvar empty = []byte(\"\")\nvar singleComment = []byte(\"\/\/\")\nvar multiComment = []byte(\"\/*\")\nvar multiCommentItem = []byte(\"*\")\nvar commentry = regexp.MustCompile(`\\s*?([\\\/\\\/*|\\*|\\\/]+)`)\n\nfunc readTemplate(reader *bufio.Reader) string {\n\tvar bu bytes.Buffer\n\n\tvar seenEnd bool\n\n\tfor {\n\t\t\/\/ Do we have another pending prefix, if so, we are at the ending, so return.\n\t\tif seenEnd {\n\t\t\tdata, _ := reader.Peek(100)\n\t\t\tdataVal := commentry.ReplaceAllString(string(data), \"\")\n\t\t\tdataVal = string(cleanWord([]byte(dataVal)))\n\n\t\t\t\/\/ fmt.Printf(\"Peek2: %+q -> %+q\\n\", dataVal, data)\n\n\t\t\tif strings.HasPrefix(string(dataVal), \"@\") {\n\t\t\t\treturn bu.String()\n\t\t\t}\n\n\t\t\t\/\/ If it's all space, then return.\n\t\t\t\/\/ if strings.TrimSpace(string(dataVal)) == \"\" {\n\t\t\treturn bu.String()\n\t\t\t\/\/ }\n\t\t}\n\n\t\ttwoWord, err := reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tbu.WriteString(twoWord)\n\t\t\treturn bu.String()\n\t\t}\n\n\t\ttwoWorded := cleanWord([]byte(twoWord))\n\t\t\/\/ fmt.Printf(\"Ending: %+q -> %t\\n\", twoWorded, bytes.HasPrefix(twoWorded, ending))\n\n\t\tif bytes.HasPrefix(twoWorded, ending) {\n\t\t\tseenEnd = true\n\t\t\tcontinue\n\t\t}\n\n\t\tbu.WriteString(twoWord)\n\t}\n\n}\n\nfunc cleanWord(word []byte) []byte {\n\tword = bytes.TrimSpace(word)\n\tword = bytes.TrimPrefix(word, singleComment)\n\tword = bytes.TrimPrefix(word, multiComment)\n\tword = bytes.TrimPrefix(word, multiCommentItem)\n\treturn bytes.TrimSpace(word)\n}\n<|endoftext|>"} {"text":"<commit_before>package remote\n\nimport (\n\t\"bytes\"\n\t\"crypto\/md5\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nconst (\n\t\/\/ LocalDirectory is the directory created in the working\n\t\/\/ dir to hold the remote state file.\n\tLocalDirectory = \".terraform\"\n\n\t\/\/ HiddenStateFile is the name of the state file in the\n\t\/\/ LocalDirectory\n\tHiddenStateFile = \"terraform.tfstate\"\n\n\t\/\/ BackupHiddenStateFile is the path we backup the state\n\t\/\/ file to before modifications are made\n\tBackupHiddenStateFile = \"terraform.tfstate.backup\"\n)\n\n\/\/ StateChangeResult is used to communicate to a caller\n\/\/ what actions have been taken when updating a state file\ntype StateChangeResult int\n\nconst (\n\t\/\/ StateChangeNoop indicates nothing has happened,\n\t\/\/ but that does not indicate an error. Everything is\n\t\/\/ just up to date. (Push\/Pull)\n\tStateChangeNoop StateChangeResult = iota\n\n\t\/\/ StateChangeInit indicates that there is no local or\n\t\/\/ remote state, and that the state was initialized\n\tStateChangeInit\n\n\t\/\/ StateChangeUpdateLocal indicates the local state\n\t\/\/ was updated. (Pull)\n\tStateChangeUpdateLocal\n\n\t\/\/ StateChangeUpdateRemote indicates the remote state\n\t\/\/ was updated. (Push)\n\tStateChangeUpdateRemote\n\n\t\/\/ StateChangeLocalNewer means the pull was a no-op\n\t\/\/ because the local state is newer than that of the\n\t\/\/ server. This means a Push should take place. (Pull)\n\tStateChangeLocalNewer\n\n\t\/\/ StateChangeRemoteNewer means the push was a no-op\n\t\/\/ because the remote state is newer than that of the\n\t\/\/ local state. This means a Pull should take place.\n\t\/\/ (Push)\n\tStateChangeRemoteNewer\n\n\t\/\/ StateChangeConflict means that the push or pull\n\t\/\/ was a no-op because there is a conflict. This means\n\t\/\/ there are multiple state definitions at the same\n\t\/\/ serial number with different contents. This requires\n\t\/\/ an operator to intervene and resolve the conflict.\n\t\/\/ Shame on the user for doing concurrent apply.\n\t\/\/ (Push\/Pull)\n\tStateChangeConflict\n)\n\nfunc (sc StateChangeResult) String() string {\n\tswitch sc {\n\tcase StateChangeNoop:\n\t\treturn \"Local and remote state in sync\"\n\tcase StateChangeInit:\n\t\treturn \"Local state initialized\"\n\tcase StateChangeUpdateLocal:\n\t\treturn \"Local state updated\"\n\tcase StateChangeUpdateRemote:\n\t\treturn \"Remote state updated\"\n\tcase StateChangeLocalNewer:\n\t\treturn \"Local state is newer than remote state, push required\"\n\tcase StateChangeRemoteNewer:\n\t\treturn \"Remote state is newer than local state, pull required\"\n\tcase StateChangeConflict:\n\t\treturn \"Local and remote state conflict, manual resolution required\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"Unknown state change type: %d\", sc)\n\t}\n}\n\n\/\/ SuccessfulPull is used to clasify the StateChangeResult for\n\/\/ a pull operation. This is different by operation, but can be used\n\/\/ to determine a proper exit code.\nfunc (sc StateChangeResult) SuccessfulPull() bool {\n\tswitch sc {\n\tcase StateChangeNoop:\n\t\treturn true\n\tcase StateChangeInit:\n\t\treturn true\n\tcase StateChangeUpdateLocal:\n\t\treturn true\n\tcase StateChangeLocalNewer:\n\t\treturn false\n\tcase StateChangeConflict:\n\t\treturn false\n\tdefault:\n\t\treturn false\n\t}\n}\n\n\/\/ SuccessfulPush is used to clasify the StateChangeResult for\n\/\/ a push operation. This is different by operation, but can be used\n\/\/ to determine a proper exit code\nfunc (sc StateChangeResult) SuccessfulPush() bool {\n\tswitch sc {\n\tcase StateChangeNoop:\n\t\treturn true\n\tcase StateChangeUpdateRemote:\n\t\treturn true\n\tcase StateChangeRemoteNewer:\n\t\treturn false\n\tcase StateChangeConflict:\n\t\treturn false\n\tdefault:\n\t\treturn false\n\t}\n}\n\n\/\/ EnsureDirectory is used to make sure the local storage\n\/\/ directory exists\nfunc EnsureDirectory() error {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to get current directory: %v\", err)\n\t}\n\tpath := filepath.Join(cwd, LocalDirectory)\n\tif err := os.Mkdir(path, 0770); err != nil {\n\t\tif os.IsExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"Failed to make directory '%s': %v\", path, err)\n\t}\n\treturn nil\n}\n\n\/\/ HiddenStatePath is used to return the path to the hidden state file,\n\/\/ should there be one.\n\/\/ TODO: Rename to LocalStatePath\nfunc HiddenStatePath() (string, error) {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to get current directory: %v\", err)\n\t}\n\tpath := filepath.Join(cwd, LocalDirectory, HiddenStateFile)\n\treturn path, nil\n}\n\n\/\/ HaveLocalState is used to check if we have a local state file\nfunc HaveLocalState() (bool, error) {\n\tpath, err := HiddenStatePath()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn ExistsFile(path)\n}\n\n\/\/ ExistsFile is used to check if a given file exists\nfunc ExistsFile(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn false, err\n}\n\n\/\/ ValidConfig does a purely logical validation of the remote config\nfunc ValidConfig(conf *terraform.RemoteState) error {\n\t\/\/ Default the type to Atlas\n\tif conf.Type == \"\" {\n\t\tconf.Type = \"atlas\"\n\t}\n\t_, err := NewClientByState(conf)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ ReadLocalState is used to read and parse the local state file\nfunc ReadLocalState() (*terraform.State, []byte, error) {\n\tpath, err := HiddenStatePath()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ Open the existing file\n\traw, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, nil, nil\n\t\t}\n\t\treturn nil, nil, fmt.Errorf(\"Failed to open state file '%s': %s\", path, err)\n\t}\n\n\t\/\/ Decode the state\n\tstate, err := terraform.ReadState(bytes.NewReader(raw))\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"Failed to read state file '%s': %v\", path, err)\n\t}\n\treturn state, raw, nil\n}\n\n\/\/ RefreshState is used to read the remote state given\n\/\/ the configuration for the remote endpoint, and update\n\/\/ the local state if necessary.\nfunc RefreshState(conf *terraform.RemoteState) (StateChangeResult, error) {\n\tif conf == nil {\n\t\treturn StateChangeNoop, fmt.Errorf(\"Missing remote server configuration\")\n\t}\n\n\t\/\/ Read the state from the server\n\tclient, err := NewClientByState(conf)\n\tif err != nil {\n\t\treturn StateChangeNoop,\n\t\t\tfmt.Errorf(\"Failed to create remote client: %v\", err)\n\t}\n\tpayload, err := client.GetState()\n\tif err != nil {\n\t\treturn StateChangeNoop,\n\t\t\tfmt.Errorf(\"Failed to read remote state: %v\", err)\n\t}\n\n\t\/\/ Parse the remote state\n\tvar remoteState *terraform.State\n\tif payload != nil {\n\t\tremoteState, err = terraform.ReadState(bytes.NewReader(payload.State))\n\t\tif err != nil {\n\t\t\treturn StateChangeNoop,\n\t\t\t\tfmt.Errorf(\"Failed to parse remote state: %v\", err)\n\t\t}\n\n\t\t\/\/ Ensure we understand the remote version!\n\t\tif remoteState.Version > terraform.StateVersion {\n\t\t\treturn StateChangeNoop, fmt.Errorf(\n\t\t\t\t`Remote state is version %d, this version of Terraform only understands up to %d`, remoteState.Version, terraform.StateVersion)\n\t\t}\n\t}\n\n\t\/\/ Decode the state\n\tlocalState, raw, err := ReadLocalState()\n\tif err != nil {\n\t\treturn StateChangeNoop, err\n\t}\n\n\t\/\/ We need to handle the matrix of cases in reconciling\n\t\/\/ the local and remote state. Primarily the concern is\n\t\/\/ around the Serial number which should grow monotonically.\n\t\/\/ Additionally, we use the MD5 to detect a conflict for\n\t\/\/ a given Serial.\n\tswitch {\n\tcase remoteState == nil && localState == nil:\n\t\t\/\/ Initialize a blank state\n\t\tout, _ := blankState(conf)\n\t\tif err := Persist(bytes.NewReader(out)); err != nil {\n\t\t\treturn StateChangeNoop,\n\t\t\t\tfmt.Errorf(\"Failed to persist state: %v\", err)\n\t\t}\n\t\treturn StateChangeInit, nil\n\n\tcase remoteState == nil && localState != nil:\n\t\t\/\/ User should probably do a push, nothing to do\n\t\treturn StateChangeLocalNewer, nil\n\n\tcase remoteState != nil && localState == nil:\n\t\tgoto PERSIST\n\n\tcase remoteState.Serial < localState.Serial:\n\t\t\/\/ User should probably do a push, nothing to do\n\t\treturn StateChangeLocalNewer, nil\n\n\tcase remoteState.Serial > localState.Serial:\n\t\tgoto PERSIST\n\n\tcase remoteState.Serial == localState.Serial:\n\t\t\/\/ Check for a hash collision on the local\/remote state\n\t\tlocalMD5 := md5.Sum(raw)\n\t\tif bytes.Equal(localMD5[:md5.Size], payload.MD5) {\n\t\t\t\/\/ Hash collision, everything is up-to-date\n\t\t\treturn StateChangeNoop, nil\n\t\t} else {\n\t\t\t\/\/ This is very bad. This means we have 2 state files\n\t\t\t\/\/ with the same Serial but a different hash. Most probably\n\t\t\t\/\/ explaination is two parallel apply operations. This\n\t\t\t\/\/ requires a manual reconciliation.\n\t\t\treturn StateChangeConflict, nil\n\t\t}\n\tdefault:\n\t\t\/\/ We should not reach this point\n\t\tpanic(\"Unhandled remote update case\")\n\t}\n\nPERSIST:\n\t\/\/ Update the local state from the remote state\n\tif err := Persist(bytes.NewReader(payload.State)); err != nil {\n\t\treturn StateChangeNoop,\n\t\t\tfmt.Errorf(\"Failed to persist state: %v\", err)\n\t}\n\treturn StateChangeUpdateLocal, nil\n}\n\n\/\/ PushState is used to read the local state and\n\/\/ update the remote state if necessary. The state push\n\/\/ can be 'forced' to override any conflict detection\n\/\/ on the server-side.\nfunc PushState(conf *terraform.RemoteState, force bool) (StateChangeResult, error) {\n\t\/\/ Read the local state\n\t_, raw, err := ReadLocalState()\n\tif err != nil {\n\t\treturn StateChangeNoop, err\n\t}\n\n\t\/\/ Check if there is no local state\n\tif raw == nil {\n\t\treturn StateChangeNoop, fmt.Errorf(\"No local state to push\")\n\t}\n\n\t\/\/ Push the state to the server\n\tclient, err := NewClientByState(conf)\n\tif err != nil {\n\t\treturn StateChangeNoop,\n\t\t\tfmt.Errorf(\"Failed to create remote client: %v\", err)\n\t}\n\terr = client.PutState(raw, force)\n\n\t\/\/ Handle the various edge cases\n\tswitch err {\n\tcase nil:\n\t\treturn StateChangeUpdateRemote, nil\n\tcase ErrServerNewer:\n\t\treturn StateChangeRemoteNewer, nil\n\tcase ErrConflict:\n\t\treturn StateChangeConflict, nil\n\tdefault:\n\t\treturn StateChangeNoop, err\n\t}\n}\n\n\/\/ DeleteState is used to delete the remote state given\n\/\/ the configuration for the remote endpoint.\nfunc DeleteState(conf *terraform.RemoteState) error {\n\tif conf == nil {\n\t\treturn fmt.Errorf(\"Missing remote server configuration\")\n\t}\n\n\t\/\/ Setup the client\n\tclient, err := NewClientByState(conf)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to create remote client: %v\", err)\n\t}\n\n\t\/\/ Destroy the state\n\terr = client.DeleteState()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to delete remote state: %v\", err)\n\t}\n\treturn nil\n}\n\n\/\/ blankState is used to return a serialized form of a blank state\n\/\/ with only the remote info.\nfunc blankState(conf *terraform.RemoteState) ([]byte, error) {\n\tblank := terraform.NewState()\n\tblank.Remote = conf\n\tbuf := bytes.NewBuffer(nil)\n\terr := terraform.WriteState(blank, buf)\n\treturn buf.Bytes(), err\n}\n\n\/\/ PersistState is used to persist out the given terraform state\n\/\/ in our local state cache location.\nfunc PersistState(s *terraform.State) error {\n\tbuf := bytes.NewBuffer(nil)\n\tif err := terraform.WriteState(s, buf); err != nil {\n\t\treturn fmt.Errorf(\"Failed to encode state: %v\", err)\n\t}\n\tif err := Persist(buf); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Persist is used to write out the state given by a reader (likely\n\/\/ being streamed from a remote server) to the local storage.\nfunc Persist(r io.Reader) error {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to get current directory: %v\", err)\n\t}\n\tstatePath := filepath.Join(cwd, LocalDirectory, HiddenStateFile)\n\tbackupPath := filepath.Join(cwd, LocalDirectory, BackupHiddenStateFile)\n\n\t\/\/ Backup the old file if it exists\n\tif err := CopyFile(statePath, backupPath); err != nil {\n\t\treturn fmt.Errorf(\"Failed to backup state file '%s' to '%s': %v\", statePath, backupPath, err)\n\t}\n\n\t\/\/ Open the state path\n\tfh, err := os.Create(statePath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to open state file '%s': %v\", statePath, err)\n\t}\n\n\t\/\/ Copy the new state\n\t_, err = io.Copy(fh, r)\n\tfh.Close()\n\tif err != nil {\n\t\tos.Remove(statePath)\n\t\treturn fmt.Errorf(\"Failed to persist state file: %v\", err)\n\t}\n\treturn nil\n}\n\n\/\/ CopyFile is used to copy from a source file if it exists to a destination.\n\/\/ This is used to create a backup of the state file.\nfunc CopyFile(src, dst string) error {\n\tsrcFH, err := os.Open(src)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tdefer srcFH.Close()\n\n\tdstFH, err := os.Create(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer dstFH.Close()\n\n\t_, err = io.Copy(dstFH, srcFH)\n\treturn err\n}\n<commit_msg>remote: just limiting the public API<commit_after>package remote\n\nimport (\n\t\"bytes\"\n\t\"crypto\/md5\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nconst (\n\t\/\/ LocalDirectory is the directory created in the working\n\t\/\/ dir to hold the remote state file.\n\tLocalDirectory = \".terraform\"\n\n\t\/\/ HiddenStateFile is the name of the state file in the\n\t\/\/ LocalDirectory\n\tHiddenStateFile = \"terraform.tfstate\"\n\n\t\/\/ BackupHiddenStateFile is the path we backup the state\n\t\/\/ file to before modifications are made\n\tBackupHiddenStateFile = \"terraform.tfstate.backup\"\n)\n\n\/\/ StateChangeResult is used to communicate to a caller\n\/\/ what actions have been taken when updating a state file\ntype StateChangeResult int\n\nconst (\n\t\/\/ StateChangeNoop indicates nothing has happened,\n\t\/\/ but that does not indicate an error. Everything is\n\t\/\/ just up to date. (Push\/Pull)\n\tStateChangeNoop StateChangeResult = iota\n\n\t\/\/ StateChangeInit indicates that there is no local or\n\t\/\/ remote state, and that the state was initialized\n\tStateChangeInit\n\n\t\/\/ StateChangeUpdateLocal indicates the local state\n\t\/\/ was updated. (Pull)\n\tStateChangeUpdateLocal\n\n\t\/\/ StateChangeUpdateRemote indicates the remote state\n\t\/\/ was updated. (Push)\n\tStateChangeUpdateRemote\n\n\t\/\/ StateChangeLocalNewer means the pull was a no-op\n\t\/\/ because the local state is newer than that of the\n\t\/\/ server. This means a Push should take place. (Pull)\n\tStateChangeLocalNewer\n\n\t\/\/ StateChangeRemoteNewer means the push was a no-op\n\t\/\/ because the remote state is newer than that of the\n\t\/\/ local state. This means a Pull should take place.\n\t\/\/ (Push)\n\tStateChangeRemoteNewer\n\n\t\/\/ StateChangeConflict means that the push or pull\n\t\/\/ was a no-op because there is a conflict. This means\n\t\/\/ there are multiple state definitions at the same\n\t\/\/ serial number with different contents. This requires\n\t\/\/ an operator to intervene and resolve the conflict.\n\t\/\/ Shame on the user for doing concurrent apply.\n\t\/\/ (Push\/Pull)\n\tStateChangeConflict\n)\n\nfunc (sc StateChangeResult) String() string {\n\tswitch sc {\n\tcase StateChangeNoop:\n\t\treturn \"Local and remote state in sync\"\n\tcase StateChangeInit:\n\t\treturn \"Local state initialized\"\n\tcase StateChangeUpdateLocal:\n\t\treturn \"Local state updated\"\n\tcase StateChangeUpdateRemote:\n\t\treturn \"Remote state updated\"\n\tcase StateChangeLocalNewer:\n\t\treturn \"Local state is newer than remote state, push required\"\n\tcase StateChangeRemoteNewer:\n\t\treturn \"Remote state is newer than local state, pull required\"\n\tcase StateChangeConflict:\n\t\treturn \"Local and remote state conflict, manual resolution required\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"Unknown state change type: %d\", sc)\n\t}\n}\n\n\/\/ SuccessfulPull is used to clasify the StateChangeResult for\n\/\/ a pull operation. This is different by operation, but can be used\n\/\/ to determine a proper exit code.\nfunc (sc StateChangeResult) SuccessfulPull() bool {\n\tswitch sc {\n\tcase StateChangeNoop:\n\t\treturn true\n\tcase StateChangeInit:\n\t\treturn true\n\tcase StateChangeUpdateLocal:\n\t\treturn true\n\tcase StateChangeLocalNewer:\n\t\treturn false\n\tcase StateChangeConflict:\n\t\treturn false\n\tdefault:\n\t\treturn false\n\t}\n}\n\n\/\/ SuccessfulPush is used to clasify the StateChangeResult for\n\/\/ a push operation. This is different by operation, but can be used\n\/\/ to determine a proper exit code\nfunc (sc StateChangeResult) SuccessfulPush() bool {\n\tswitch sc {\n\tcase StateChangeNoop:\n\t\treturn true\n\tcase StateChangeUpdateRemote:\n\t\treturn true\n\tcase StateChangeRemoteNewer:\n\t\treturn false\n\tcase StateChangeConflict:\n\t\treturn false\n\tdefault:\n\t\treturn false\n\t}\n}\n\n\/\/ EnsureDirectory is used to make sure the local storage\n\/\/ directory exists\nfunc EnsureDirectory() error {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to get current directory: %v\", err)\n\t}\n\tpath := filepath.Join(cwd, LocalDirectory)\n\tif err := os.Mkdir(path, 0770); err != nil {\n\t\tif os.IsExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"Failed to make directory '%s': %v\", path, err)\n\t}\n\treturn nil\n}\n\n\/\/ HiddenStatePath is used to return the path to the hidden state file,\n\/\/ should there be one.\n\/\/ TODO: Rename to LocalStatePath\nfunc HiddenStatePath() (string, error) {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to get current directory: %v\", err)\n\t}\n\tpath := filepath.Join(cwd, LocalDirectory, HiddenStateFile)\n\treturn path, nil\n}\n\n\/\/ HaveLocalState is used to check if we have a local state file\nfunc HaveLocalState() (bool, error) {\n\tpath, err := HiddenStatePath()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn existsFile(path)\n}\n\n\/\/ ValidConfig does a purely logical validation of the remote config\nfunc ValidConfig(conf *terraform.RemoteState) error {\n\t\/\/ Default the type to Atlas\n\tif conf.Type == \"\" {\n\t\tconf.Type = \"atlas\"\n\t}\n\t_, err := NewClientByState(conf)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ ReadLocalState is used to read and parse the local state file\nfunc ReadLocalState() (*terraform.State, []byte, error) {\n\tpath, err := HiddenStatePath()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ Open the existing file\n\traw, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, nil, nil\n\t\t}\n\t\treturn nil, nil, fmt.Errorf(\"Failed to open state file '%s': %s\", path, err)\n\t}\n\n\t\/\/ Decode the state\n\tstate, err := terraform.ReadState(bytes.NewReader(raw))\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"Failed to read state file '%s': %v\", path, err)\n\t}\n\treturn state, raw, nil\n}\n\n\/\/ RefreshState is used to read the remote state given\n\/\/ the configuration for the remote endpoint, and update\n\/\/ the local state if necessary.\nfunc RefreshState(conf *terraform.RemoteState) (StateChangeResult, error) {\n\tif conf == nil {\n\t\treturn StateChangeNoop, fmt.Errorf(\"Missing remote server configuration\")\n\t}\n\n\t\/\/ Read the state from the server\n\tclient, err := NewClientByState(conf)\n\tif err != nil {\n\t\treturn StateChangeNoop,\n\t\t\tfmt.Errorf(\"Failed to create remote client: %v\", err)\n\t}\n\tpayload, err := client.GetState()\n\tif err != nil {\n\t\treturn StateChangeNoop,\n\t\t\tfmt.Errorf(\"Failed to read remote state: %v\", err)\n\t}\n\n\t\/\/ Parse the remote state\n\tvar remoteState *terraform.State\n\tif payload != nil {\n\t\tremoteState, err = terraform.ReadState(bytes.NewReader(payload.State))\n\t\tif err != nil {\n\t\t\treturn StateChangeNoop,\n\t\t\t\tfmt.Errorf(\"Failed to parse remote state: %v\", err)\n\t\t}\n\n\t\t\/\/ Ensure we understand the remote version!\n\t\tif remoteState.Version > terraform.StateVersion {\n\t\t\treturn StateChangeNoop, fmt.Errorf(\n\t\t\t\t`Remote state is version %d, this version of Terraform only understands up to %d`, remoteState.Version, terraform.StateVersion)\n\t\t}\n\t}\n\n\t\/\/ Decode the state\n\tlocalState, raw, err := ReadLocalState()\n\tif err != nil {\n\t\treturn StateChangeNoop, err\n\t}\n\n\t\/\/ We need to handle the matrix of cases in reconciling\n\t\/\/ the local and remote state. Primarily the concern is\n\t\/\/ around the Serial number which should grow monotonically.\n\t\/\/ Additionally, we use the MD5 to detect a conflict for\n\t\/\/ a given Serial.\n\tswitch {\n\tcase remoteState == nil && localState == nil:\n\t\t\/\/ Initialize a blank state\n\t\tout, _ := blankState(conf)\n\t\tif err := Persist(bytes.NewReader(out)); err != nil {\n\t\t\treturn StateChangeNoop,\n\t\t\t\tfmt.Errorf(\"Failed to persist state: %v\", err)\n\t\t}\n\t\treturn StateChangeInit, nil\n\n\tcase remoteState == nil && localState != nil:\n\t\t\/\/ User should probably do a push, nothing to do\n\t\treturn StateChangeLocalNewer, nil\n\n\tcase remoteState != nil && localState == nil:\n\t\tgoto PERSIST\n\n\tcase remoteState.Serial < localState.Serial:\n\t\t\/\/ User should probably do a push, nothing to do\n\t\treturn StateChangeLocalNewer, nil\n\n\tcase remoteState.Serial > localState.Serial:\n\t\tgoto PERSIST\n\n\tcase remoteState.Serial == localState.Serial:\n\t\t\/\/ Check for a hash collision on the local\/remote state\n\t\tlocalMD5 := md5.Sum(raw)\n\t\tif bytes.Equal(localMD5[:md5.Size], payload.MD5) {\n\t\t\t\/\/ Hash collision, everything is up-to-date\n\t\t\treturn StateChangeNoop, nil\n\t\t} else {\n\t\t\t\/\/ This is very bad. This means we have 2 state files\n\t\t\t\/\/ with the same Serial but a different hash. Most probably\n\t\t\t\/\/ explaination is two parallel apply operations. This\n\t\t\t\/\/ requires a manual reconciliation.\n\t\t\treturn StateChangeConflict, nil\n\t\t}\n\tdefault:\n\t\t\/\/ We should not reach this point\n\t\tpanic(\"Unhandled remote update case\")\n\t}\n\nPERSIST:\n\t\/\/ Update the local state from the remote state\n\tif err := Persist(bytes.NewReader(payload.State)); err != nil {\n\t\treturn StateChangeNoop,\n\t\t\tfmt.Errorf(\"Failed to persist state: %v\", err)\n\t}\n\treturn StateChangeUpdateLocal, nil\n}\n\n\/\/ PushState is used to read the local state and\n\/\/ update the remote state if necessary. The state push\n\/\/ can be 'forced' to override any conflict detection\n\/\/ on the server-side.\nfunc PushState(conf *terraform.RemoteState, force bool) (StateChangeResult, error) {\n\t\/\/ Read the local state\n\t_, raw, err := ReadLocalState()\n\tif err != nil {\n\t\treturn StateChangeNoop, err\n\t}\n\n\t\/\/ Check if there is no local state\n\tif raw == nil {\n\t\treturn StateChangeNoop, fmt.Errorf(\"No local state to push\")\n\t}\n\n\t\/\/ Push the state to the server\n\tclient, err := NewClientByState(conf)\n\tif err != nil {\n\t\treturn StateChangeNoop,\n\t\t\tfmt.Errorf(\"Failed to create remote client: %v\", err)\n\t}\n\terr = client.PutState(raw, force)\n\n\t\/\/ Handle the various edge cases\n\tswitch err {\n\tcase nil:\n\t\treturn StateChangeUpdateRemote, nil\n\tcase ErrServerNewer:\n\t\treturn StateChangeRemoteNewer, nil\n\tcase ErrConflict:\n\t\treturn StateChangeConflict, nil\n\tdefault:\n\t\treturn StateChangeNoop, err\n\t}\n}\n\n\/\/ DeleteState is used to delete the remote state given\n\/\/ the configuration for the remote endpoint.\nfunc DeleteState(conf *terraform.RemoteState) error {\n\tif conf == nil {\n\t\treturn fmt.Errorf(\"Missing remote server configuration\")\n\t}\n\n\t\/\/ Setup the client\n\tclient, err := NewClientByState(conf)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to create remote client: %v\", err)\n\t}\n\n\t\/\/ Destroy the state\n\terr = client.DeleteState()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to delete remote state: %v\", err)\n\t}\n\treturn nil\n}\n\n\/\/ blankState is used to return a serialized form of a blank state\n\/\/ with only the remote info.\nfunc blankState(conf *terraform.RemoteState) ([]byte, error) {\n\tblank := terraform.NewState()\n\tblank.Remote = conf\n\tbuf := bytes.NewBuffer(nil)\n\terr := terraform.WriteState(blank, buf)\n\treturn buf.Bytes(), err\n}\n\n\/\/ PersistState is used to persist out the given terraform state\n\/\/ in our local state cache location.\nfunc PersistState(s *terraform.State) error {\n\tbuf := bytes.NewBuffer(nil)\n\tif err := terraform.WriteState(s, buf); err != nil {\n\t\treturn fmt.Errorf(\"Failed to encode state: %v\", err)\n\t}\n\tif err := Persist(buf); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Persist is used to write out the state given by a reader (likely\n\/\/ being streamed from a remote server) to the local storage.\nfunc Persist(r io.Reader) error {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to get current directory: %v\", err)\n\t}\n\tstatePath := filepath.Join(cwd, LocalDirectory, HiddenStateFile)\n\tbackupPath := filepath.Join(cwd, LocalDirectory, BackupHiddenStateFile)\n\n\t\/\/ Backup the old file if it exists\n\tif err := copyFile(statePath, backupPath); err != nil {\n\t\treturn fmt.Errorf(\"Failed to backup state file '%s' to '%s': %v\", statePath, backupPath, err)\n\t}\n\n\t\/\/ Open the state path\n\tfh, err := os.Create(statePath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to open state file '%s': %v\", statePath, err)\n\t}\n\n\t\/\/ Copy the new state\n\t_, err = io.Copy(fh, r)\n\tfh.Close()\n\tif err != nil {\n\t\tos.Remove(statePath)\n\t\treturn fmt.Errorf(\"Failed to persist state file: %v\", err)\n\t}\n\treturn nil\n}\n\n\/\/ copyFile is used to copy from a source file if it exists to a destination.\n\/\/ This is used to create a backup of the state file.\nfunc copyFile(src, dst string) error {\n\tsrcFH, err := os.Open(src)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tdefer srcFH.Close()\n\n\tdstFH, err := os.Create(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer dstFH.Close()\n\n\t_, err = io.Copy(dstFH, srcFH)\n\treturn err\n}\n\n\/\/ existsFile is used to check if a given file exists\nfunc existsFile(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn false, err\n}\n<|endoftext|>"} {"text":"<commit_before>package gateway\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/42wim\/matterbridge\/bridge\"\n\t\"github.com\/42wim\/matterbridge\/bridge\/config\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\/\/\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"crypto\/sha1\"\n\t\"github.com\/hashicorp\/golang-lru\"\n\t\"github.com\/peterhellberg\/emojilib\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Gateway struct {\n\t*config.Config\n\tRouter *Router\n\tMyConfig *config.Gateway\n\tBridges map[string]*bridge.Bridge\n\tChannels map[string]*config.ChannelInfo\n\tChannelOptions map[string]config.ChannelOptions\n\tMessage chan config.Message\n\tName string\n\tMessages *lru.Cache\n}\n\ntype BrMsgID struct {\n\tbr *bridge.Bridge\n\tID string\n\tChannelID string\n}\n\nfunc New(cfg config.Gateway, r *Router) *Gateway {\n\tgw := &Gateway{Channels: make(map[string]*config.ChannelInfo), Message: r.Message,\n\t\tRouter: r, Bridges: make(map[string]*bridge.Bridge), Config: r.Config}\n\tcache, _ := lru.New(5000)\n\tgw.Messages = cache\n\tgw.AddConfig(&cfg)\n\treturn gw\n}\n\nfunc (gw *Gateway) AddBridge(cfg *config.Bridge) error {\n\tbr := gw.Router.getBridge(cfg.Account)\n\tif br == nil {\n\t\tbr = bridge.New(gw.Config, cfg, gw.Message)\n\t}\n\tgw.mapChannelsToBridge(br)\n\tgw.Bridges[cfg.Account] = br\n\treturn nil\n}\n\nfunc (gw *Gateway) AddConfig(cfg *config.Gateway) error {\n\tgw.Name = cfg.Name\n\tgw.MyConfig = cfg\n\tgw.mapChannels()\n\tfor _, br := range append(gw.MyConfig.In, append(gw.MyConfig.InOut, gw.MyConfig.Out...)...) {\n\t\terr := gw.AddBridge(&br)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (gw *Gateway) mapChannelsToBridge(br *bridge.Bridge) {\n\tfor ID, channel := range gw.Channels {\n\t\tif br.Account == channel.Account {\n\t\t\tbr.Channels[ID] = *channel\n\t\t}\n\t}\n}\n\nfunc (gw *Gateway) reconnectBridge(br *bridge.Bridge) {\n\tbr.Disconnect()\n\ttime.Sleep(time.Second * 5)\nRECONNECT:\n\tlog.Infof(\"Reconnecting %s\", br.Account)\n\terr := br.Connect()\n\tif err != nil {\n\t\tlog.Errorf(\"Reconnection failed: %s. Trying again in 60 seconds\", err)\n\t\ttime.Sleep(time.Second * 60)\n\t\tgoto RECONNECT\n\t}\n\tbr.Joined = make(map[string]bool)\n\tbr.JoinChannels()\n}\n\nfunc (gw *Gateway) mapChannelConfig(cfg []config.Bridge, direction string) {\n\tfor _, br := range cfg {\n\t\tif isApi(br.Account) {\n\t\t\tbr.Channel = \"api\"\n\t\t}\n\t\tID := br.Channel + br.Account\n\t\tif _, ok := gw.Channels[ID]; !ok {\n\t\t\tchannel := &config.ChannelInfo{Name: br.Channel, Direction: direction, ID: ID, Options: br.Options, Account: br.Account,\n\t\t\t\tSameChannel: make(map[string]bool)}\n\t\t\tchannel.SameChannel[gw.Name] = br.SameChannel\n\t\t\tgw.Channels[channel.ID] = channel\n\t\t} else {\n\t\t\t\/\/ if we already have a key and it's not our current direction it means we have a bidirectional inout\n\t\t\tif gw.Channels[ID].Direction != direction {\n\t\t\t\tgw.Channels[ID].Direction = \"inout\"\n\t\t\t}\n\t\t}\n\t\tgw.Channels[ID].SameChannel[gw.Name] = br.SameChannel\n\t}\n}\n\nfunc (gw *Gateway) mapChannels() error {\n\tgw.mapChannelConfig(gw.MyConfig.In, \"in\")\n\tgw.mapChannelConfig(gw.MyConfig.Out, \"out\")\n\tgw.mapChannelConfig(gw.MyConfig.InOut, \"inout\")\n\treturn nil\n}\n\nfunc (gw *Gateway) getDestChannel(msg *config.Message, dest bridge.Bridge) []config.ChannelInfo {\n\tvar channels []config.ChannelInfo\n\t\/\/ if source channel is in only, do nothing\n\tfor _, channel := range gw.Channels {\n\t\t\/\/ lookup the channel from the message\n\t\tif channel.ID == getChannelID(*msg) {\n\t\t\t\/\/ we only have destinations if the original message is from an \"in\" (sending) channel\n\t\t\tif !strings.Contains(channel.Direction, \"in\") {\n\t\t\t\treturn channels\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t}\n\tfor _, channel := range gw.Channels {\n\t\tif _, ok := gw.Channels[getChannelID(*msg)]; !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ do samechannelgateway logic\n\t\tif channel.SameChannel[msg.Gateway] {\n\t\t\tif msg.Channel == channel.Name && msg.Account != dest.Account {\n\t\t\t\tchannels = append(channels, *channel)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(channel.Direction, \"out\") && channel.Account == dest.Account && gw.validGatewayDest(msg, channel) {\n\t\t\tchannels = append(channels, *channel)\n\t\t}\n\t}\n\treturn channels\n}\n\nfunc (gw *Gateway) handleMessage(msg config.Message, dest *bridge.Bridge) []*BrMsgID {\n\tvar brMsgIDs []*BrMsgID\n\n\t\/\/ TODO refactor\n\t\/\/ only slack now, check will have to be done in the different bridges.\n\t\/\/ we need to check if we can't use fallback or text in other bridges\n\tif msg.Extra != nil {\n\t\tif dest.Protocol != \"discord\" &&\n\t\t\tdest.Protocol != \"slack\" &&\n\t\t\tdest.Protocol != \"mattermost\" &&\n\t\t\tdest.Protocol != \"telegram\" &&\n\t\t\tdest.Protocol != \"matrix\" {\n\t\t\tif msg.Text == \"\" {\n\t\t\t\treturn brMsgIDs\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ only relay join\/part when configged\n\tif msg.Event == config.EVENT_JOIN_LEAVE && !gw.Bridges[dest.Account].Config.ShowJoinPart {\n\t\treturn brMsgIDs\n\t}\n\t\/\/ broadcast to every out channel (irc QUIT)\n\tif msg.Channel == \"\" && msg.Event != config.EVENT_JOIN_LEAVE {\n\t\tlog.Debug(\"empty channel\")\n\t\treturn brMsgIDs\n\t}\n\toriginchannel := msg.Channel\n\torigmsg := msg\n\tchannels := gw.getDestChannel(&msg, *dest)\n\tfor _, channel := range channels {\n\t\t\/\/ do not send to ourself\n\t\tif channel.ID == getChannelID(origmsg) {\n\t\t\tcontinue\n\t\t}\n\t\tlog.Debugf(\"Sending %#v from %s (%s) to %s (%s)\", msg, msg.Account, originchannel, dest.Account, channel.Name)\n\t\tmsg.Channel = channel.Name\n\t\tmsg.Avatar = gw.modifyAvatar(origmsg, dest)\n\t\tmsg.Username = gw.modifyUsername(origmsg, dest)\n\t\tmsg.ID = \"\"\n\t\tif res, ok := gw.Messages.Get(origmsg.ID); ok {\n\t\t\tIDs := res.([]*BrMsgID)\n\t\t\tfor _, id := range IDs {\n\t\t\t\t\/\/ check protocol, bridge name and channelname\n\t\t\t\t\/\/ for people that reuse the same bridge multiple times. see #342\n\t\t\t\tif dest.Protocol == id.br.Protocol && dest.Name == id.br.Name && channel.ID == id.ChannelID {\n\t\t\t\t\tmsg.ID = id.ID\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\/\/ for api we need originchannel as channel\n\t\tif dest.Protocol == \"api\" {\n\t\t\tmsg.Channel = originchannel\n\t\t}\n\t\tmID, err := dest.Send(msg)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\t\/\/ append the message ID (mID) from this bridge (dest) to our brMsgIDs slice\n\t\tif mID != \"\" {\n\t\t\tbrMsgIDs = append(brMsgIDs, &BrMsgID{dest, mID, channel.ID})\n\t\t}\n\t}\n\treturn brMsgIDs\n}\n\nfunc (gw *Gateway) ignoreMessage(msg *config.Message) bool {\n\t\/\/ if we don't have the bridge, ignore it\n\tif _, ok := gw.Bridges[msg.Account]; !ok {\n\t\treturn true\n\t}\n\tif msg.Text == \"\" {\n\t\t\/\/ we have an attachment or actual bytes\n\t\tif msg.Extra != nil && (msg.Extra[\"attachments\"] != nil || len(msg.Extra[\"file\"]) > 0) {\n\t\t\treturn false\n\t\t}\n\t\tlog.Debugf(\"ignoring empty message %#v from %s\", msg, msg.Account)\n\t\treturn true\n\t}\n\tfor _, entry := range strings.Fields(gw.Bridges[msg.Account].Config.IgnoreNicks) {\n\t\tif msg.Username == entry {\n\t\t\tlog.Debugf(\"ignoring %s from %s\", msg.Username, msg.Account)\n\t\t\treturn true\n\t\t}\n\t}\n\t\/\/ TODO do not compile regexps everytime\n\tfor _, entry := range strings.Fields(gw.Bridges[msg.Account].Config.IgnoreMessages) {\n\t\tif entry != \"\" {\n\t\t\tre, err := regexp.Compile(entry)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"incorrect regexp %s for %s\", entry, msg.Account)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif re.MatchString(msg.Text) {\n\t\t\t\tlog.Debugf(\"matching %s. ignoring %s from %s\", entry, msg.Text, msg.Account)\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (gw *Gateway) modifyUsername(msg config.Message, dest *bridge.Bridge) string {\n\tbr := gw.Bridges[msg.Account]\n\tmsg.Protocol = br.Protocol\n\tif gw.Config.General.StripNick || dest.Config.StripNick {\n\t\tre := regexp.MustCompile(\"[^a-zA-Z0-9]+\")\n\t\tmsg.Username = re.ReplaceAllString(msg.Username, \"\")\n\t}\n\tnick := dest.Config.RemoteNickFormat\n\tif nick == \"\" {\n\t\tnick = gw.Config.General.RemoteNickFormat\n\t}\n\n\t\/\/ loop to replace nicks\n\tfor _, outer := range br.Config.ReplaceNicks {\n\t\tsearch := outer[0]\n\t\treplace := outer[1]\n\t\t\/\/ TODO move compile to bridge init somewhere\n\t\tre, err := regexp.Compile(search)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"regexp in %s failed: %s\", msg.Account, err)\n\t\t\tbreak\n\t\t}\n\t\tmsg.Username = re.ReplaceAllString(msg.Username, replace)\n\t}\n\n\tif len(msg.Username) > 0 {\n\t\t\/\/ fix utf-8 issue #193\n\t\ti := 0\n\t\tfor index := range msg.Username {\n\t\t\tif i == 1 {\n\t\t\t\ti = index\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\tnick = strings.Replace(nick, \"{NOPINGNICK}\", msg.Username[:i]+\"​\"+msg.Username[i:], -1)\n\t}\n\tnick = strings.Replace(nick, \"{BRIDGE}\", br.Name, -1)\n\tnick = strings.Replace(nick, \"{PROTOCOL}\", br.Protocol, -1)\n\tnick = strings.Replace(nick, \"{NICK}\", msg.Username, -1)\n\treturn nick\n}\n\nfunc (gw *Gateway) modifyAvatar(msg config.Message, dest *bridge.Bridge) string {\n\ticonurl := gw.Config.General.IconURL\n\tif iconurl == \"\" {\n\t\ticonurl = dest.Config.IconURL\n\t}\n\ticonurl = strings.Replace(iconurl, \"{NICK}\", msg.Username, -1)\n\tif msg.Avatar == \"\" {\n\t\tmsg.Avatar = iconurl\n\t}\n\treturn msg.Avatar\n}\n\nfunc (gw *Gateway) modifyMessage(msg *config.Message) {\n\t\/\/ replace :emoji: to unicode\n\tmsg.Text = emojilib.Replace(msg.Text)\n\tbr := gw.Bridges[msg.Account]\n\t\/\/ loop to replace messages\n\tfor _, outer := range br.Config.ReplaceMessages {\n\t\tsearch := outer[0]\n\t\treplace := outer[1]\n\t\t\/\/ TODO move compile to bridge init somewhere\n\t\tre, err := regexp.Compile(search)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"regexp in %s failed: %s\", msg.Account, err)\n\t\t\tbreak\n\t\t}\n\t\tmsg.Text = re.ReplaceAllString(msg.Text, replace)\n\t}\n\tmsg.Gateway = gw.Name\n}\n\nfunc (gw *Gateway) handleFiles(msg *config.Message) {\n\tif msg.Extra == nil || gw.Config.General.MediaServerUpload == \"\" {\n\t\treturn\n\t}\n\tif len(msg.Extra[\"file\"]) > 0 {\n\t\tclient := &http.Client{\n\t\t\tTimeout: time.Second * 5,\n\t\t}\n\t\tfor i, f := range msg.Extra[\"file\"] {\n\t\t\tfi := f.(config.FileInfo)\n\t\t\tsha1sum := fmt.Sprintf(\"%x\", sha1.Sum(*fi.Data))\n\t\t\treader := bytes.NewReader(*fi.Data)\n\t\t\turl := gw.Config.General.MediaServerUpload + \"\/\" + sha1sum + \"\/\" + fi.Name\n\t\t\tdurl := gw.Config.General.MediaServerDownload + \"\/\" + sha1sum + \"\/\" + fi.Name\n\t\t\textra := msg.Extra[\"file\"][i].(config.FileInfo)\n\t\t\textra.URL = durl\n\t\t\tmsg.Extra[\"file\"][i] = extra\n\t\t\treq, _ := http.NewRequest(\"PUT\", url, reader)\n\t\t\treq.Header.Set(\"Content-Type\", \"binary\/octet-stream\")\n\t\t\t_, err := client.Do(req)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"mediaserver upload failed: %#v\", err)\n\t\t\t}\n\t\t\tlog.Debugf(\"mediaserver download URL = %s\", durl)\n\t\t}\n\t}\n}\n\nfunc getChannelID(msg config.Message) string {\n\treturn msg.Channel + msg.Account\n}\n\nfunc (gw *Gateway) validGatewayDest(msg *config.Message, channel *config.ChannelInfo) bool {\n\treturn msg.Gateway == gw.Name\n}\n\nfunc isApi(account string) bool {\n\treturn strings.HasPrefix(account, \"api.\")\n}\n<commit_msg>Obey the Gateway value from the json (api). Closes #344<commit_after>package gateway\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/42wim\/matterbridge\/bridge\"\n\t\"github.com\/42wim\/matterbridge\/bridge\/config\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\/\/\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"crypto\/sha1\"\n\t\"github.com\/hashicorp\/golang-lru\"\n\t\"github.com\/peterhellberg\/emojilib\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Gateway struct {\n\t*config.Config\n\tRouter *Router\n\tMyConfig *config.Gateway\n\tBridges map[string]*bridge.Bridge\n\tChannels map[string]*config.ChannelInfo\n\tChannelOptions map[string]config.ChannelOptions\n\tMessage chan config.Message\n\tName string\n\tMessages *lru.Cache\n}\n\ntype BrMsgID struct {\n\tbr *bridge.Bridge\n\tID string\n\tChannelID string\n}\n\nfunc New(cfg config.Gateway, r *Router) *Gateway {\n\tgw := &Gateway{Channels: make(map[string]*config.ChannelInfo), Message: r.Message,\n\t\tRouter: r, Bridges: make(map[string]*bridge.Bridge), Config: r.Config}\n\tcache, _ := lru.New(5000)\n\tgw.Messages = cache\n\tgw.AddConfig(&cfg)\n\treturn gw\n}\n\nfunc (gw *Gateway) AddBridge(cfg *config.Bridge) error {\n\tbr := gw.Router.getBridge(cfg.Account)\n\tif br == nil {\n\t\tbr = bridge.New(gw.Config, cfg, gw.Message)\n\t}\n\tgw.mapChannelsToBridge(br)\n\tgw.Bridges[cfg.Account] = br\n\treturn nil\n}\n\nfunc (gw *Gateway) AddConfig(cfg *config.Gateway) error {\n\tgw.Name = cfg.Name\n\tgw.MyConfig = cfg\n\tgw.mapChannels()\n\tfor _, br := range append(gw.MyConfig.In, append(gw.MyConfig.InOut, gw.MyConfig.Out...)...) {\n\t\terr := gw.AddBridge(&br)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (gw *Gateway) mapChannelsToBridge(br *bridge.Bridge) {\n\tfor ID, channel := range gw.Channels {\n\t\tif br.Account == channel.Account {\n\t\t\tbr.Channels[ID] = *channel\n\t\t}\n\t}\n}\n\nfunc (gw *Gateway) reconnectBridge(br *bridge.Bridge) {\n\tbr.Disconnect()\n\ttime.Sleep(time.Second * 5)\nRECONNECT:\n\tlog.Infof(\"Reconnecting %s\", br.Account)\n\terr := br.Connect()\n\tif err != nil {\n\t\tlog.Errorf(\"Reconnection failed: %s. Trying again in 60 seconds\", err)\n\t\ttime.Sleep(time.Second * 60)\n\t\tgoto RECONNECT\n\t}\n\tbr.Joined = make(map[string]bool)\n\tbr.JoinChannels()\n}\n\nfunc (gw *Gateway) mapChannelConfig(cfg []config.Bridge, direction string) {\n\tfor _, br := range cfg {\n\t\tif isApi(br.Account) {\n\t\t\tbr.Channel = \"api\"\n\t\t}\n\t\tID := br.Channel + br.Account\n\t\tif _, ok := gw.Channels[ID]; !ok {\n\t\t\tchannel := &config.ChannelInfo{Name: br.Channel, Direction: direction, ID: ID, Options: br.Options, Account: br.Account,\n\t\t\t\tSameChannel: make(map[string]bool)}\n\t\t\tchannel.SameChannel[gw.Name] = br.SameChannel\n\t\t\tgw.Channels[channel.ID] = channel\n\t\t} else {\n\t\t\t\/\/ if we already have a key and it's not our current direction it means we have a bidirectional inout\n\t\t\tif gw.Channels[ID].Direction != direction {\n\t\t\t\tgw.Channels[ID].Direction = \"inout\"\n\t\t\t}\n\t\t}\n\t\tgw.Channels[ID].SameChannel[gw.Name] = br.SameChannel\n\t}\n}\n\nfunc (gw *Gateway) mapChannels() error {\n\tgw.mapChannelConfig(gw.MyConfig.In, \"in\")\n\tgw.mapChannelConfig(gw.MyConfig.Out, \"out\")\n\tgw.mapChannelConfig(gw.MyConfig.InOut, \"inout\")\n\treturn nil\n}\n\nfunc (gw *Gateway) getDestChannel(msg *config.Message, dest bridge.Bridge) []config.ChannelInfo {\n\tvar channels []config.ChannelInfo\n\n\t\/\/ for messages received from the api check that the gateway is the specified one\n\tif msg.Protocol == \"api\" && gw.Name != msg.Gateway {\n\t\treturn channels\n\t}\n\n\t\/\/ if source channel is in only, do nothing\n\tfor _, channel := range gw.Channels {\n\t\t\/\/ lookup the channel from the message\n\t\tif channel.ID == getChannelID(*msg) {\n\t\t\t\/\/ we only have destinations if the original message is from an \"in\" (sending) channel\n\t\t\tif !strings.Contains(channel.Direction, \"in\") {\n\t\t\t\treturn channels\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t}\n\tfor _, channel := range gw.Channels {\n\t\tif _, ok := gw.Channels[getChannelID(*msg)]; !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ do samechannelgateway logic\n\t\tif channel.SameChannel[msg.Gateway] {\n\t\t\tif msg.Channel == channel.Name && msg.Account != dest.Account {\n\t\t\t\tchannels = append(channels, *channel)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(channel.Direction, \"out\") && channel.Account == dest.Account && gw.validGatewayDest(msg, channel) {\n\t\t\tchannels = append(channels, *channel)\n\t\t}\n\t}\n\treturn channels\n}\n\nfunc (gw *Gateway) handleMessage(msg config.Message, dest *bridge.Bridge) []*BrMsgID {\n\tvar brMsgIDs []*BrMsgID\n\n\t\/\/ TODO refactor\n\t\/\/ only slack now, check will have to be done in the different bridges.\n\t\/\/ we need to check if we can't use fallback or text in other bridges\n\tif msg.Extra != nil {\n\t\tif dest.Protocol != \"discord\" &&\n\t\t\tdest.Protocol != \"slack\" &&\n\t\t\tdest.Protocol != \"mattermost\" &&\n\t\t\tdest.Protocol != \"telegram\" &&\n\t\t\tdest.Protocol != \"matrix\" {\n\t\t\tif msg.Text == \"\" {\n\t\t\t\treturn brMsgIDs\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ only relay join\/part when configged\n\tif msg.Event == config.EVENT_JOIN_LEAVE && !gw.Bridges[dest.Account].Config.ShowJoinPart {\n\t\treturn brMsgIDs\n\t}\n\t\/\/ broadcast to every out channel (irc QUIT)\n\tif msg.Channel == \"\" && msg.Event != config.EVENT_JOIN_LEAVE {\n\t\tlog.Debug(\"empty channel\")\n\t\treturn brMsgIDs\n\t}\n\toriginchannel := msg.Channel\n\torigmsg := msg\n\tchannels := gw.getDestChannel(&msg, *dest)\n\tfor _, channel := range channels {\n\t\t\/\/ do not send to ourself\n\t\tif channel.ID == getChannelID(origmsg) {\n\t\t\tcontinue\n\t\t}\n\t\tlog.Debugf(\"Sending %#v from %s (%s) to %s (%s)\", msg, msg.Account, originchannel, dest.Account, channel.Name)\n\t\tmsg.Channel = channel.Name\n\t\tmsg.Avatar = gw.modifyAvatar(origmsg, dest)\n\t\tmsg.Username = gw.modifyUsername(origmsg, dest)\n\t\tmsg.ID = \"\"\n\t\tif res, ok := gw.Messages.Get(origmsg.ID); ok {\n\t\t\tIDs := res.([]*BrMsgID)\n\t\t\tfor _, id := range IDs {\n\t\t\t\t\/\/ check protocol, bridge name and channelname\n\t\t\t\t\/\/ for people that reuse the same bridge multiple times. see #342\n\t\t\t\tif dest.Protocol == id.br.Protocol && dest.Name == id.br.Name && channel.ID == id.ChannelID {\n\t\t\t\t\tmsg.ID = id.ID\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\/\/ for api we need originchannel as channel\n\t\tif dest.Protocol == \"api\" {\n\t\t\tmsg.Channel = originchannel\n\t\t}\n\t\tmID, err := dest.Send(msg)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\t\/\/ append the message ID (mID) from this bridge (dest) to our brMsgIDs slice\n\t\tif mID != \"\" {\n\t\t\tbrMsgIDs = append(brMsgIDs, &BrMsgID{dest, mID, channel.ID})\n\t\t}\n\t}\n\treturn brMsgIDs\n}\n\nfunc (gw *Gateway) ignoreMessage(msg *config.Message) bool {\n\t\/\/ if we don't have the bridge, ignore it\n\tif _, ok := gw.Bridges[msg.Account]; !ok {\n\t\treturn true\n\t}\n\tif msg.Text == \"\" {\n\t\t\/\/ we have an attachment or actual bytes\n\t\tif msg.Extra != nil && (msg.Extra[\"attachments\"] != nil || len(msg.Extra[\"file\"]) > 0) {\n\t\t\treturn false\n\t\t}\n\t\tlog.Debugf(\"ignoring empty message %#v from %s\", msg, msg.Account)\n\t\treturn true\n\t}\n\tfor _, entry := range strings.Fields(gw.Bridges[msg.Account].Config.IgnoreNicks) {\n\t\tif msg.Username == entry {\n\t\t\tlog.Debugf(\"ignoring %s from %s\", msg.Username, msg.Account)\n\t\t\treturn true\n\t\t}\n\t}\n\t\/\/ TODO do not compile regexps everytime\n\tfor _, entry := range strings.Fields(gw.Bridges[msg.Account].Config.IgnoreMessages) {\n\t\tif entry != \"\" {\n\t\t\tre, err := regexp.Compile(entry)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"incorrect regexp %s for %s\", entry, msg.Account)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif re.MatchString(msg.Text) {\n\t\t\t\tlog.Debugf(\"matching %s. ignoring %s from %s\", entry, msg.Text, msg.Account)\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (gw *Gateway) modifyUsername(msg config.Message, dest *bridge.Bridge) string {\n\tbr := gw.Bridges[msg.Account]\n\tmsg.Protocol = br.Protocol\n\tif gw.Config.General.StripNick || dest.Config.StripNick {\n\t\tre := regexp.MustCompile(\"[^a-zA-Z0-9]+\")\n\t\tmsg.Username = re.ReplaceAllString(msg.Username, \"\")\n\t}\n\tnick := dest.Config.RemoteNickFormat\n\tif nick == \"\" {\n\t\tnick = gw.Config.General.RemoteNickFormat\n\t}\n\n\t\/\/ loop to replace nicks\n\tfor _, outer := range br.Config.ReplaceNicks {\n\t\tsearch := outer[0]\n\t\treplace := outer[1]\n\t\t\/\/ TODO move compile to bridge init somewhere\n\t\tre, err := regexp.Compile(search)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"regexp in %s failed: %s\", msg.Account, err)\n\t\t\tbreak\n\t\t}\n\t\tmsg.Username = re.ReplaceAllString(msg.Username, replace)\n\t}\n\n\tif len(msg.Username) > 0 {\n\t\t\/\/ fix utf-8 issue #193\n\t\ti := 0\n\t\tfor index := range msg.Username {\n\t\t\tif i == 1 {\n\t\t\t\ti = index\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\tnick = strings.Replace(nick, \"{NOPINGNICK}\", msg.Username[:i]+\"​\"+msg.Username[i:], -1)\n\t}\n\tnick = strings.Replace(nick, \"{BRIDGE}\", br.Name, -1)\n\tnick = strings.Replace(nick, \"{PROTOCOL}\", br.Protocol, -1)\n\tnick = strings.Replace(nick, \"{NICK}\", msg.Username, -1)\n\treturn nick\n}\n\nfunc (gw *Gateway) modifyAvatar(msg config.Message, dest *bridge.Bridge) string {\n\ticonurl := gw.Config.General.IconURL\n\tif iconurl == \"\" {\n\t\ticonurl = dest.Config.IconURL\n\t}\n\ticonurl = strings.Replace(iconurl, \"{NICK}\", msg.Username, -1)\n\tif msg.Avatar == \"\" {\n\t\tmsg.Avatar = iconurl\n\t}\n\treturn msg.Avatar\n}\n\nfunc (gw *Gateway) modifyMessage(msg *config.Message) {\n\t\/\/ replace :emoji: to unicode\n\tmsg.Text = emojilib.Replace(msg.Text)\n\tbr := gw.Bridges[msg.Account]\n\t\/\/ loop to replace messages\n\tfor _, outer := range br.Config.ReplaceMessages {\n\t\tsearch := outer[0]\n\t\treplace := outer[1]\n\t\t\/\/ TODO move compile to bridge init somewhere\n\t\tre, err := regexp.Compile(search)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"regexp in %s failed: %s\", msg.Account, err)\n\t\t\tbreak\n\t\t}\n\t\tmsg.Text = re.ReplaceAllString(msg.Text, replace)\n\t}\n\n\t\/\/ messages from api have Gateway specified, don't overwrite\n\tif msg.Protocol != \"api\" {\n\t\tmsg.Gateway = gw.Name\n\t}\n}\n\nfunc (gw *Gateway) handleFiles(msg *config.Message) {\n\tif msg.Extra == nil || gw.Config.General.MediaServerUpload == \"\" {\n\t\treturn\n\t}\n\tif len(msg.Extra[\"file\"]) > 0 {\n\t\tclient := &http.Client{\n\t\t\tTimeout: time.Second * 5,\n\t\t}\n\t\tfor i, f := range msg.Extra[\"file\"] {\n\t\t\tfi := f.(config.FileInfo)\n\t\t\tsha1sum := fmt.Sprintf(\"%x\", sha1.Sum(*fi.Data))\n\t\t\treader := bytes.NewReader(*fi.Data)\n\t\t\turl := gw.Config.General.MediaServerUpload + \"\/\" + sha1sum + \"\/\" + fi.Name\n\t\t\tdurl := gw.Config.General.MediaServerDownload + \"\/\" + sha1sum + \"\/\" + fi.Name\n\t\t\textra := msg.Extra[\"file\"][i].(config.FileInfo)\n\t\t\textra.URL = durl\n\t\t\tmsg.Extra[\"file\"][i] = extra\n\t\t\treq, _ := http.NewRequest(\"PUT\", url, reader)\n\t\t\treq.Header.Set(\"Content-Type\", \"binary\/octet-stream\")\n\t\t\t_, err := client.Do(req)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"mediaserver upload failed: %#v\", err)\n\t\t\t}\n\t\t\tlog.Debugf(\"mediaserver download URL = %s\", durl)\n\t\t}\n\t}\n}\n\nfunc getChannelID(msg config.Message) string {\n\treturn msg.Channel + msg.Account\n}\n\nfunc (gw *Gateway) validGatewayDest(msg *config.Message, channel *config.ChannelInfo) bool {\n\treturn msg.Gateway == gw.Name\n}\n\nfunc isApi(account string) bool {\n\treturn strings.HasPrefix(account, \"api.\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage time\n\nimport (\n\t\"syscall\"\n\t\"sync\"\n\t\"os\"\n)\n\n\/\/ BUG(brainman): The Windows implementation assumes that\n\/\/ this year's rules for daylight savings time apply to all previous\n\/\/ and future years as well.\n\n\/\/ TODO(brainman): use GetDynamicTimeZoneInformation, whenever possible (Vista and up),\n\/\/ to improve on situation described in the bug above.\n\ntype zone struct {\n\tname string\n\toffset int\n\tyear int64\n\tmonth, day, dayofweek int\n\thour, minute, second int\n\tabssec int64\n\tprev *zone\n}\n\n\/\/ Populate zone struct with Windows supplied information. Returns true, if data is valid.\nfunc (z *zone) populate(bias, biasdelta int32, d *syscall.Systemtime, name []uint16) (dateisgood bool) {\n\tz.name = syscall.UTF16ToString(name)\n\tz.offset = int(bias)\n\tz.year = int64(d.Year)\n\tz.month = int(d.Month)\n\tz.day = int(d.Day)\n\tz.dayofweek = int(d.DayOfWeek)\n\tz.hour = int(d.Hour)\n\tz.minute = int(d.Minute)\n\tz.second = int(d.Second)\n\tdateisgood = d.Month != 0\n\tif dateisgood {\n\t\tz.offset += int(biasdelta)\n\t}\n\tz.offset = -z.offset * 60\n\treturn\n}\n\n\/\/ Pre-calculate cutoff time in seconds since the Unix epoch, if data is supplied in \"absolute\" format.\nfunc (z *zone) preCalculateAbsSec() {\n\tif z.year != 0 {\n\t\tz.abssec = (&Time{z.year, int(z.month), int(z.day), int(z.hour), int(z.minute), int(z.second), 0, 0, \"\"}).Seconds()\n\t\t\/\/ Time given is in \"local\" time. Adjust it for \"utc\".\n\t\tz.abssec -= int64(z.prev.offset)\n\t}\n}\n\n\/\/ Convert zone cutoff time to sec in number of seconds since the Unix epoch, given particular year.\nfunc (z *zone) cutoffSeconds(year int64) int64 {\n\t\/\/ Windows specifies daylight savings information in \"day in month\" format:\n\t\/\/ z.month is month number (1-12)\n\t\/\/ z.dayofweek is appropriate weekday (Sunday=0 to Saturday=6)\n\t\/\/ z.day is week within the month (1 to 5, where 5 is last week of the month)\n\t\/\/ z.hour, z.minute and z.second are absolute time\n\tt := &Time{year, int(z.month), 1, int(z.hour), int(z.minute), int(z.second), 0, 0, \"\"}\n\tt = SecondsToUTC(t.Seconds())\n\ti := int(z.dayofweek) - t.Weekday\n\tif i < 0 {\n\t\ti += 7\n\t}\n\tt.Day += i\n\tif week := int(z.day) - 1; week < 4 {\n\t\tt.Day += week * 7\n\t} else {\n\t\t\/\/ \"Last\" instance of the day.\n\t\tt.Day += 4 * 7\n\t\tif t.Day > months(year)[t.Month] {\n\t\t\tt.Day -= 7\n\t\t}\n\t}\n\t\/\/ Result is in \"local\" time. Adjust it for \"utc\".\n\treturn t.Seconds() - int64(z.prev.offset)\n}\n\n\/\/ Is t before the cutoff for switching to z?\nfunc (z *zone) isBeforeCutoff(t *Time) bool {\n\tvar coff int64\n\tif z.year == 0 {\n\t\t\/\/ \"day in month\" format used\n\t\tcoff = z.cutoffSeconds(t.Year)\n\t} else {\n\t\t\/\/ \"absolute\" format used\n\t\tcoff = z.abssec\n\t}\n\treturn t.Seconds() < coff\n}\n\ntype zoneinfo struct {\n\tdisabled bool \/\/ daylight saving time is not used locally\n\toffsetIfDisabled int\n\tjanuaryIsStd bool \/\/ is january 1 standard time?\n\tstd, dst zone\n}\n\n\/\/ Pick zone (std or dst) t time belongs to.\nfunc (zi *zoneinfo) pickZone(t *Time) *zone {\n\tz := &zi.std\n\tif tz.januaryIsStd {\n\t\tif !zi.dst.isBeforeCutoff(t) && zi.std.isBeforeCutoff(t) {\n\t\t\t\/\/ after switch to daylight time and before the switch back to standard\n\t\t\tz = &zi.dst\n\t\t}\n\t} else {\n\t\tif zi.std.isBeforeCutoff(t) || !zi.dst.isBeforeCutoff(t) {\n\t\t\t\/\/ before switch to standard time or after the switch back to daylight\n\t\t\tz = &zi.dst\n\t\t}\n\t}\n\treturn z\n}\n\nvar tz zoneinfo\nvar initError os.Error\nvar onceSetupZone sync.Once\n\nfunc setupZone() {\n\tvar i syscall.Timezoneinformation\n\tif _, e := syscall.GetTimeZoneInformation(&i); e != 0 {\n\t\tinitError = os.NewSyscallError(\"GetTimeZoneInformation\", e)\n\t\treturn\n\t}\n\tif !tz.std.populate(i.Bias, i.StandardBias, &i.StandardDate, i.StandardName[0:]) {\n\t\ttz.disabled = true\n\t\ttz.offsetIfDisabled = tz.std.offset\n\t\treturn\n\t}\n\ttz.std.prev = &tz.dst\n\ttz.dst.populate(i.Bias, i.DaylightBias, &i.DaylightDate, i.DaylightName[0:])\n\ttz.dst.prev = &tz.std\n\ttz.std.preCalculateAbsSec()\n\ttz.dst.preCalculateAbsSec()\n\t\/\/ Is january 1 standard time this year?\n\tt := UTC()\n\ttz.januaryIsStd = tz.dst.cutoffSeconds(t.Year) < tz.std.cutoffSeconds(t.Year)\n}\n\n\/\/ Look up the correct time zone (daylight savings or not) for the given unix time, in the current location.\nfunc lookupTimezone(sec int64) (zone string, offset int) {\n\tonceSetupZone.Do(setupZone)\n\tif initError != nil {\n\t\treturn \"\", 0\n\t}\n\tif tz.disabled {\n\t\treturn \"\", tz.offsetIfDisabled\n\t}\n\tt := SecondsToUTC(sec)\n\tz := &tz.std\n\tif tz.std.year == 0 {\n\t\t\/\/ \"day in month\" format used\n\t\tz = tz.pickZone(t)\n\t} else {\n\t\t\/\/ \"absolute\" format used\n\t\tif tz.std.year == t.Year {\n\t\t\t\/\/ we have rule for the year in question\n\t\t\tz = tz.pickZone(t)\n\t\t} else {\n\t\t\t\/\/ we do not have any information for that year,\n\t\t\t\/\/ will assume standard offset all year around\n\t\t}\n\t}\n\treturn z.name, z.offset\n}\n\n\/\/ lookupByName returns the time offset for the\n\/\/ time zone with the given abbreviation. It only considers\n\/\/ time zones that apply to the current system.\nfunc lookupByName(name string) (off int, found bool) {\n\tonceSetupZone.Do(setupZone)\n\tif initError != nil {\n\t\treturn 0, false\n\t}\n\tif tz.disabled {\n\t\treturn tz.offsetIfDisabled, false\n\t}\n\tswitch name {\n\tcase tz.std.name:\n\t\treturn tz.std.offset, true\n\tcase tz.dst.name:\n\t\treturn tz.dst.offset, true\n\t}\n\treturn 0, false\n}\n<commit_msg>time: fix windows build<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage time\n\nimport (\n\t\"syscall\"\n\t\"sync\"\n\t\"os\"\n)\n\n\/\/ BUG(brainman): The Windows implementation assumes that\n\/\/ this year's rules for daylight savings time apply to all previous\n\/\/ and future years as well.\n\n\/\/ TODO(brainman): use GetDynamicTimeZoneInformation, whenever possible (Vista and up),\n\/\/ to improve on situation described in the bug above.\n\ntype zone struct {\n\tname string\n\toffset int\n\tyear int64\n\tmonth, day, dayofweek int\n\thour, minute, second int\n\tabssec int64\n\tprev *zone\n}\n\n\/\/ Populate zone struct with Windows supplied information. Returns true, if data is valid.\nfunc (z *zone) populate(bias, biasdelta int32, d *syscall.Systemtime, name []uint16) (dateisgood bool) {\n\tz.name = syscall.UTF16ToString(name)\n\tz.offset = int(bias)\n\tz.year = int64(d.Year)\n\tz.month = int(d.Month)\n\tz.day = int(d.Day)\n\tz.dayofweek = int(d.DayOfWeek)\n\tz.hour = int(d.Hour)\n\tz.minute = int(d.Minute)\n\tz.second = int(d.Second)\n\tdateisgood = d.Month != 0\n\tif dateisgood {\n\t\tz.offset += int(biasdelta)\n\t}\n\tz.offset = -z.offset * 60\n\treturn\n}\n\n\/\/ Pre-calculate cutoff time in seconds since the Unix epoch, if data is supplied in \"absolute\" format.\nfunc (z *zone) preCalculateAbsSec() {\n\tif z.year != 0 {\n\t\tz.abssec = (&Time{z.year, int(z.month), int(z.day), int(z.hour), int(z.minute), int(z.second), 0, 0, 0, \"\"}).Seconds()\n\t\t\/\/ Time given is in \"local\" time. Adjust it for \"utc\".\n\t\tz.abssec -= int64(z.prev.offset)\n\t}\n}\n\n\/\/ Convert zone cutoff time to sec in number of seconds since the Unix epoch, given particular year.\nfunc (z *zone) cutoffSeconds(year int64) int64 {\n\t\/\/ Windows specifies daylight savings information in \"day in month\" format:\n\t\/\/ z.month is month number (1-12)\n\t\/\/ z.dayofweek is appropriate weekday (Sunday=0 to Saturday=6)\n\t\/\/ z.day is week within the month (1 to 5, where 5 is last week of the month)\n\t\/\/ z.hour, z.minute and z.second are absolute time\n\tt := &Time{year, int(z.month), 1, int(z.hour), int(z.minute), int(z.second), 0, 0, 0, \"\"}\n\tt = SecondsToUTC(t.Seconds())\n\ti := int(z.dayofweek) - t.Weekday\n\tif i < 0 {\n\t\ti += 7\n\t}\n\tt.Day += i\n\tif week := int(z.day) - 1; week < 4 {\n\t\tt.Day += week * 7\n\t} else {\n\t\t\/\/ \"Last\" instance of the day.\n\t\tt.Day += 4 * 7\n\t\tif t.Day > months(year)[t.Month] {\n\t\t\tt.Day -= 7\n\t\t}\n\t}\n\t\/\/ Result is in \"local\" time. Adjust it for \"utc\".\n\treturn t.Seconds() - int64(z.prev.offset)\n}\n\n\/\/ Is t before the cutoff for switching to z?\nfunc (z *zone) isBeforeCutoff(t *Time) bool {\n\tvar coff int64\n\tif z.year == 0 {\n\t\t\/\/ \"day in month\" format used\n\t\tcoff = z.cutoffSeconds(t.Year)\n\t} else {\n\t\t\/\/ \"absolute\" format used\n\t\tcoff = z.abssec\n\t}\n\treturn t.Seconds() < coff\n}\n\ntype zoneinfo struct {\n\tdisabled bool \/\/ daylight saving time is not used locally\n\toffsetIfDisabled int\n\tjanuaryIsStd bool \/\/ is january 1 standard time?\n\tstd, dst zone\n}\n\n\/\/ Pick zone (std or dst) t time belongs to.\nfunc (zi *zoneinfo) pickZone(t *Time) *zone {\n\tz := &zi.std\n\tif tz.januaryIsStd {\n\t\tif !zi.dst.isBeforeCutoff(t) && zi.std.isBeforeCutoff(t) {\n\t\t\t\/\/ after switch to daylight time and before the switch back to standard\n\t\t\tz = &zi.dst\n\t\t}\n\t} else {\n\t\tif zi.std.isBeforeCutoff(t) || !zi.dst.isBeforeCutoff(t) {\n\t\t\t\/\/ before switch to standard time or after the switch back to daylight\n\t\t\tz = &zi.dst\n\t\t}\n\t}\n\treturn z\n}\n\nvar tz zoneinfo\nvar initError os.Error\nvar onceSetupZone sync.Once\n\nfunc setupZone() {\n\tvar i syscall.Timezoneinformation\n\tif _, e := syscall.GetTimeZoneInformation(&i); e != 0 {\n\t\tinitError = os.NewSyscallError(\"GetTimeZoneInformation\", e)\n\t\treturn\n\t}\n\tif !tz.std.populate(i.Bias, i.StandardBias, &i.StandardDate, i.StandardName[0:]) {\n\t\ttz.disabled = true\n\t\ttz.offsetIfDisabled = tz.std.offset\n\t\treturn\n\t}\n\ttz.std.prev = &tz.dst\n\ttz.dst.populate(i.Bias, i.DaylightBias, &i.DaylightDate, i.DaylightName[0:])\n\ttz.dst.prev = &tz.std\n\ttz.std.preCalculateAbsSec()\n\ttz.dst.preCalculateAbsSec()\n\t\/\/ Is january 1 standard time this year?\n\tt := UTC()\n\ttz.januaryIsStd = tz.dst.cutoffSeconds(t.Year) < tz.std.cutoffSeconds(t.Year)\n}\n\n\/\/ Look up the correct time zone (daylight savings or not) for the given unix time, in the current location.\nfunc lookupTimezone(sec int64) (zone string, offset int) {\n\tonceSetupZone.Do(setupZone)\n\tif initError != nil {\n\t\treturn \"\", 0\n\t}\n\tif tz.disabled {\n\t\treturn \"\", tz.offsetIfDisabled\n\t}\n\tt := SecondsToUTC(sec)\n\tz := &tz.std\n\tif tz.std.year == 0 {\n\t\t\/\/ \"day in month\" format used\n\t\tz = tz.pickZone(t)\n\t} else {\n\t\t\/\/ \"absolute\" format used\n\t\tif tz.std.year == t.Year {\n\t\t\t\/\/ we have rule for the year in question\n\t\t\tz = tz.pickZone(t)\n\t\t} else {\n\t\t\t\/\/ we do not have any information for that year,\n\t\t\t\/\/ will assume standard offset all year around\n\t\t}\n\t}\n\treturn z.name, z.offset\n}\n\n\/\/ lookupByName returns the time offset for the\n\/\/ time zone with the given abbreviation. It only considers\n\/\/ time zones that apply to the current system.\nfunc lookupByName(name string) (off int, found bool) {\n\tonceSetupZone.Do(setupZone)\n\tif initError != nil {\n\t\treturn 0, false\n\t}\n\tif tz.disabled {\n\t\treturn tz.offsetIfDisabled, false\n\t}\n\tswitch name {\n\tcase tz.std.name:\n\t\treturn tz.std.offset, true\n\tcase tz.dst.name:\n\t\treturn tz.dst.offset, true\n\t}\n\treturn 0, false\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Pilosa Corp.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\npackage roaring\n\nimport ( \n\t\"testing\"\n)\n\nfunc TestUnmarshalBinary(t *testing.T) {\n\tb := NewBitmap()\n\tconfirmedCrashers := []struct {\n\t\tcr []byte\n\t\texpected string\n\t} {\n\t\t{\t\/\/ Checks for int overflow\n\t\t\tcr : []byte(\"<0\\x000\\x00\\x00\\x00\\x00000000000000\" +\n\t\t\t\"0\"),\t\t\t\t\t\t\t\t\t\t\t\t\t\t\/\/\"<000000000000000\"\n\t\t\texpected : \"unmarshaling as pilosa roaring: Maximum operation size exceeded\",\n\t\t},\t\t\n\t\t{\t\/\/ Checks for the zero containers situation\n\t\t\tcr : []byte(\":0\\x000\\x01\\x00\\x00\\x000000\"),\t\t\t\t\t\/\/\":000000\"\n\t\t\texpected : \"reading roaring header: malformed bitmap, key-cardinality slice overruns buffer at 12\",\n\t\t},\t\n\t\t{\t\/\/ The next 5 check for malformed bitmaps\n\t\t\tcr : []byte(\"<0\\x0000000000000000000\" +\n\t\t\t\"\\x00\\x00\\xec\\x00\\x03\\x00\\x00\\x00\\xec000\"),\t\t\t\t\t\/\/\"<000000000000000000ÏÏ000\"\n\t\t\texpected : \"unmarshaling as pilosa roaring: malformed bitmap, key-cardinality not provided for 67372036 containers\",\n\t\t},\t\n\t\t{\n\t\t\tcr : []byte(\"<0\\x00\\x02\\x00\\x00\\x00\\\\f\\x01\\xb5\\x8d\\x009\\v\\x01\\x00\\x00\\x00\\x00\" +\n\t\t\t\"\\x00\\x00e\\x04\\x00\\x00\\x00\\x04\\xfd\\x00\\x01\\x00\"),\t\t\t\/\/\"<0\\f\u0001µç9\u0001e\u0004˝\"\n\t\t\texpected : \"unmarshaling as pilosa roaring: malformed bitmap, key-cardinality not provided for 128625322 containers\",\n\t\t},\n\t\t{\n\t\t\tcr : []byte(\"<0\\x00\\x02\\x00\\x00\\x00&x.field safe\"),\t\t\t\/\/\"<0&x.field safe\"\n\t\t\texpected : \"unmarshaling as pilosa roaring: malformed bitmap, key-cardinality not provided for 53127850 containers\",\n\t\t},\n\t\t{\n\t\t\tcr : []byte(\"<0\\x00\\x00\\x14\\x00\\x00\\x00\\x80\\xffp\\x05_ 4\\x114089\" +\n\t\t\t\"\\x00\\x00\\xff\\x000\\x00\\x02\\x00\\x00\\x00\\x00\\xff\\u007f\\x00\\x00\\x01\\x10\\x00\\x00j\" +\n\t\t\t\"\\x02\\x00\\x00$\\x04_\\x00\\xff\\u007f\\xff062616163\\x00\" +\t\t\/\/\"<0ġp\u0005_ 4\u00114089ˇ0ˇj\u0002$\u0004_ˇˇ0626161630ø¸ad$j\u0010√\"\n\t\t\t\"0\\x00\\x02\\x00\\x01\\xbf\\x00\\x04\\x00\\xfcad$\\x00\\x00j\\x10\\x00\\x00\\xc3\"),\t\t\t\n\t\t\texpected : \"unmarshaling as pilosa roaring: malformed bitmap, key-cardinality not provided for 1 containers\",\n\t\t},\n\t\t{\t\/\/ 0 containers because the container is partially formed, but not fully (ie. 3\/12 = 0)\n\t\t\tcr : []byte(\"<0\\x00\\x02\\x03\\x00\\x00\\x00쳫\\v\\x00d9\\v\\x00\\x009\\v\"),\t\t\/\/<0쳫\u000bd9\u000b9\u000b\t\n\t\t\texpected : \"unmarshaling as pilosa roaring: malformed bitmap, key-cardinality not provided for 0 containers\",\n\t\t},\n\t}\n\n\tfor _, crash := range confirmedCrashers {\n\t\terr := b.UnmarshalBinary(crash.cr)\n\t\tif err.Error() != crash.expected {\n\t\t\tt.Errorf(\"Expected: %s, Got: %s\", crash.expected, err)\n\t\t} \n\t}\n\t\n}<commit_msg>Fixed fuzz_test.go order to mergability<commit_after>\/\/ Copyright 2017 Pilosa Corp.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\npackage roaring\n\nimport ( \n\t\"testing\"\n)\n\nfunc TestUnmarshalBinary(t *testing.T) {\n\tb := NewBitmap()\n\tconfirmedCrashers := []struct {\n\t\tcr []byte\n\t\texpected string\n\t} {\n\t\t{\t\/\/ Checks for the zero containers situation\n\t\t\tcr : []byte(\":0\\x000\\x01\\x00\\x00\\x000000\"),\t\t\t\t\t\/\/\":000000\"\n\t\t\texpected : \"reading roaring header: malformed bitmap, key-cardinality slice overruns buffer at 12\",\n\t\t},\t\n\t\t{\t\/\/ Checks for int overflow\n\t\t\tcr : []byte(\"<0\\x000\\x00\\x00\\x00\\x00000000000000\" +\n\t\t\t\"0\"),\t\t\t\t\t\t\t\t\t\t\t\t\t\t\/\/\"<000000000000000\"\n\t\t\texpected : \"unmarshaling as pilosa roaring: Maximum operation size exceeded\",\n\t\t},\t\n\t\t{\t\/\/ The next 5 check for malformed bitmaps\n\t\t\tcr : []byte(\"<0\\x0000000000000000000\" +\n\t\t\t\"\\x00\\x00\\xec\\x00\\x03\\x00\\x00\\x00\\xec000\"),\t\t\t\t\t\/\/\"<000000000000000000ÏÏ000\"\n\t\t\texpected : \"unmarshaling as pilosa roaring: malformed bitmap, key-cardinality not provided for 67372036 containers\",\n\t\t},\t\n\t\t{\n\t\t\tcr : []byte(\"<0\\x00\\x02\\x00\\x00\\x00\\\\f\\x01\\xb5\\x8d\\x009\\v\\x01\\x00\\x00\\x00\\x00\" +\n\t\t\t\"\\x00\\x00e\\x04\\x00\\x00\\x00\\x04\\xfd\\x00\\x01\\x00\"),\t\t\t\/\/\"<0\\f\u0001µç9\u0001e\u0004˝\"\n\t\t\texpected : \"unmarshaling as pilosa roaring: malformed bitmap, key-cardinality not provided for 128625322 containers\",\n\t\t},\n\t\t{\n\t\t\tcr : []byte(\"<0\\x00\\x02\\x00\\x00\\x00&x.field safe\"),\t\t\t\/\/\"<0&x.field safe\"\n\t\t\texpected : \"unmarshaling as pilosa roaring: malformed bitmap, key-cardinality not provided for 53127850 containers\",\n\t\t},\n\t\t{\n\t\t\tcr : []byte(\"<0\\x00\\x00\\x14\\x00\\x00\\x00\\x80\\xffp\\x05_ 4\\x114089\" +\n\t\t\t\"\\x00\\x00\\xff\\x000\\x00\\x02\\x00\\x00\\x00\\x00\\xff\\u007f\\x00\\x00\\x01\\x10\\x00\\x00j\" +\n\t\t\t\"\\x02\\x00\\x00$\\x04_\\x00\\xff\\u007f\\xff062616163\\x00\" +\t\t\/\/\"<0ġp\u0005_ 4\u00114089ˇ0ˇj\u0002$\u0004_ˇˇ0626161630ø¸ad$j\u0010√\"\n\t\t\t\"0\\x00\\x02\\x00\\x01\\xbf\\x00\\x04\\x00\\xfcad$\\x00\\x00j\\x10\\x00\\x00\\xc3\"),\t\t\t\n\t\t\texpected : \"unmarshaling as pilosa roaring: malformed bitmap, key-cardinality not provided for 1 containers\",\n\t\t},\n\t\t{\t\/\/ 0 containers because the container is partially formed, but not fully (ie. 3\/12 = 0)\n\t\t\tcr : []byte(\"<0\\x00\\x02\\x03\\x00\\x00\\x00쳫\\v\\x00d9\\v\\x00\\x009\\v\"),\t\t\/\/<0쳫\u000bd9\u000b9\u000b\t\n\t\t\texpected : \"unmarshaling as pilosa roaring: malformed bitmap, key-cardinality not provided for 0 containers\",\n\t\t},\n\t}\n\n\tfor _, crash := range confirmedCrashers {\n\t\terr := b.UnmarshalBinary(crash.cr)\n\t\tif err.Error() != crash.expected {\n\t\t\tt.Errorf(\"Expected: %s, Got: %s\", crash.expected, err)\n\t\t} \n\t}\n\t\n}<|endoftext|>"} {"text":"<commit_before>package caps \/\/ import \"github.com\/docker\/docker\/oci\/caps\"\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/docker\/docker\/errdefs\"\n\t\"github.com\/syndtr\/gocapability\/capability\"\n)\n\nvar capabilityList Capabilities\n\nfunc init() {\n\tlast := capability.CAP_LAST_CAP\n\t\/\/ hack for RHEL6 which has no \/proc\/sys\/kernel\/cap_last_cap\n\tif last == capability.Cap(63) {\n\t\tlast = capability.CAP_BLOCK_SUSPEND\n\t}\n\tfor _, cap := range capability.List() {\n\t\tif cap > last {\n\t\t\tcontinue\n\t\t}\n\t\tcapabilityList = append(capabilityList,\n\t\t\t&CapabilityMapping{\n\t\t\t\tKey: \"CAP_\" + strings.ToUpper(cap.String()),\n\t\t\t\tValue: cap,\n\t\t\t},\n\t\t)\n\t}\n}\n\ntype (\n\t\/\/ CapabilityMapping maps linux capability name to its value of capability.Cap type\n\t\/\/ Capabilities is one of the security systems in Linux Security Module (LSM)\n\t\/\/ framework provided by the kernel.\n\t\/\/ For more details on capabilities, see http:\/\/man7.org\/linux\/man-pages\/man7\/capabilities.7.html\n\tCapabilityMapping struct {\n\t\tKey string `json:\"key,omitempty\"`\n\t\tValue capability.Cap `json:\"value,omitempty\"`\n\t}\n\t\/\/ Capabilities contains all CapabilityMapping\n\tCapabilities []*CapabilityMapping\n)\n\n\/\/ String returns <key> of CapabilityMapping\nfunc (c *CapabilityMapping) String() string {\n\treturn c.Key\n}\n\n\/\/ GetCapability returns CapabilityMapping which contains specific key\nfunc GetCapability(key string) *CapabilityMapping {\n\tfor _, capp := range capabilityList {\n\t\tif capp.Key == key {\n\t\t\tcpy := *capp\n\t\t\treturn &cpy\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ GetAllCapabilities returns all of the capabilities\nfunc GetAllCapabilities() []string {\n\toutput := make([]string, len(capabilityList))\n\tfor i, capability := range capabilityList {\n\t\toutput[i] = capability.String()\n\t}\n\treturn output\n}\n\n\/\/ inSlice tests whether a string is contained in a slice of strings or not.\nfunc inSlice(slice []string, s string) bool {\n\tfor _, ss := range slice {\n\t\tif s == ss {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nconst allCapabilities = \"ALL\"\n\n\/\/ NormalizeLegacyCapabilities normalizes, and validates CapAdd\/CapDrop capabilities\n\/\/ by upper-casing them, and adding a CAP_ prefix (if not yet present).\n\/\/\n\/\/ This function also accepts the \"ALL\" magic-value, that's used by CapAdd\/CapDrop.\nfunc NormalizeLegacyCapabilities(caps []string) ([]string, error) {\n\tvar normalized []string\n\n\tvalids := GetAllCapabilities()\n\tfor _, c := range caps {\n\t\tc = strings.ToUpper(c)\n\t\tif c == allCapabilities {\n\t\t\tnormalized = append(normalized, c)\n\t\t\tcontinue\n\t\t}\n\t\tif !strings.HasPrefix(c, \"CAP_\") {\n\t\t\tc = \"CAP_\" + c\n\t\t}\n\t\tif !inSlice(valids, c) {\n\t\t\treturn nil, errdefs.InvalidParameter(fmt.Errorf(\"unknown capability: %q\", c))\n\t\t}\n\t\tnormalized = append(normalized, c)\n\t}\n\treturn normalized, nil\n}\n\n\/\/ ValidateCapabilities validates if caps only contains valid capabilities\nfunc ValidateCapabilities(caps []string) error {\n\tvalids := GetAllCapabilities()\n\tfor _, c := range caps {\n\t\tif !inSlice(valids, c) {\n\t\t\treturn errdefs.InvalidParameter(fmt.Errorf(\"unknown capability: %q\", c))\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ TweakCapabilities tweaks capabilities by adding, dropping, or overriding\n\/\/ capabilities in the basics capabilities list.\nfunc TweakCapabilities(basics, adds, drops []string, privileged bool) ([]string, error) {\n\tswitch {\n\tcase privileged:\n\t\t\/\/ Privileged containers get all capabilities\n\t\treturn GetAllCapabilities(), nil\n\tcase len(adds) == 0 && len(drops) == 0:\n\t\t\/\/ Nothing to tweak; we're done\n\t\treturn basics, nil\n\t}\n\n\tcapDrop, err := NormalizeLegacyCapabilities(drops)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcapAdd, err := NormalizeLegacyCapabilities(adds)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar caps []string\n\n\tswitch {\n\tcase inSlice(capAdd, allCapabilities):\n\t\t\/\/ Add all capabilities except ones on capDrop\n\t\tfor _, c := range GetAllCapabilities() {\n\t\t\tif !inSlice(capDrop, c) {\n\t\t\t\tcaps = append(caps, c)\n\t\t\t}\n\t\t}\n\tcase inSlice(capDrop, allCapabilities):\n\t\t\/\/ \"Drop\" all capabilities; use what's in capAdd instead\n\t\tcaps = capAdd\n\tdefault:\n\t\t\/\/ First drop some capabilities\n\t\tfor _, c := range basics {\n\t\t\tif !inSlice(capDrop, c) {\n\t\t\t\tcaps = append(caps, c)\n\t\t\t}\n\t\t}\n\t\t\/\/ Then add the list of capabilities from capAdd\n\t\tcaps = append(caps, capAdd...)\n\t}\n\treturn caps, nil\n}\n<commit_msg>Temporarily disable CAP_PERFMON, CAP_BPF, and CAP_CHECKPOINT_RESTORE<commit_after>package caps \/\/ import \"github.com\/docker\/docker\/oci\/caps\"\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/docker\/docker\/errdefs\"\n\t\"github.com\/syndtr\/gocapability\/capability\"\n)\n\nvar capabilityList Capabilities\n\nfunc init() {\n\tlast := capability.CAP_LAST_CAP\n\t\/\/ hack for RHEL6 which has no \/proc\/sys\/kernel\/cap_last_cap\n\tif last == capability.Cap(63) {\n\t\tlast = capability.CAP_BLOCK_SUSPEND\n\t}\n\tif last > capability.CAP_AUDIT_READ {\n\t\t\/\/ Prevents docker from setting CAP_PERFMON, CAP_BPF, and CAP_CHECKPOINT_RESTORE\n\t\t\/\/ capabilities on privileged (or CAP_ALL) containers on Kernel 5.8 and up.\n\t\t\/\/ While these kernels support these capabilities, the current release of\n\t\t\/\/ runc ships with an older version of \/gocapability\/capability, and does\n\t\t\/\/ not know about them, causing an error to be produced.\n\t\t\/\/\n\t\t\/\/ FIXME remove once https:\/\/github.com\/opencontainers\/runc\/commit\/6dfbe9b80707b1ca188255e8def15263348e0f9a\n\t\t\/\/ is included in a runc release and once we stop supporting containerd 1.3.x\n\t\t\/\/ (which ships with runc v1.0.0-rc92)\n\t\tlast = capability.CAP_AUDIT_READ\n\t}\n\tfor _, cap := range capability.List() {\n\t\tif cap > last {\n\t\t\tcontinue\n\t\t}\n\t\tcapabilityList = append(capabilityList,\n\t\t\t&CapabilityMapping{\n\t\t\t\tKey: \"CAP_\" + strings.ToUpper(cap.String()),\n\t\t\t\tValue: cap,\n\t\t\t},\n\t\t)\n\t}\n}\n\ntype (\n\t\/\/ CapabilityMapping maps linux capability name to its value of capability.Cap type\n\t\/\/ Capabilities is one of the security systems in Linux Security Module (LSM)\n\t\/\/ framework provided by the kernel.\n\t\/\/ For more details on capabilities, see http:\/\/man7.org\/linux\/man-pages\/man7\/capabilities.7.html\n\tCapabilityMapping struct {\n\t\tKey string `json:\"key,omitempty\"`\n\t\tValue capability.Cap `json:\"value,omitempty\"`\n\t}\n\t\/\/ Capabilities contains all CapabilityMapping\n\tCapabilities []*CapabilityMapping\n)\n\n\/\/ String returns <key> of CapabilityMapping\nfunc (c *CapabilityMapping) String() string {\n\treturn c.Key\n}\n\n\/\/ GetCapability returns CapabilityMapping which contains specific key\nfunc GetCapability(key string) *CapabilityMapping {\n\tfor _, capp := range capabilityList {\n\t\tif capp.Key == key {\n\t\t\tcpy := *capp\n\t\t\treturn &cpy\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ GetAllCapabilities returns all of the capabilities\nfunc GetAllCapabilities() []string {\n\toutput := make([]string, len(capabilityList))\n\tfor i, capability := range capabilityList {\n\t\toutput[i] = capability.String()\n\t}\n\treturn output\n}\n\n\/\/ inSlice tests whether a string is contained in a slice of strings or not.\nfunc inSlice(slice []string, s string) bool {\n\tfor _, ss := range slice {\n\t\tif s == ss {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nconst allCapabilities = \"ALL\"\n\n\/\/ NormalizeLegacyCapabilities normalizes, and validates CapAdd\/CapDrop capabilities\n\/\/ by upper-casing them, and adding a CAP_ prefix (if not yet present).\n\/\/\n\/\/ This function also accepts the \"ALL\" magic-value, that's used by CapAdd\/CapDrop.\nfunc NormalizeLegacyCapabilities(caps []string) ([]string, error) {\n\tvar normalized []string\n\n\tvalids := GetAllCapabilities()\n\tfor _, c := range caps {\n\t\tc = strings.ToUpper(c)\n\t\tif c == allCapabilities {\n\t\t\tnormalized = append(normalized, c)\n\t\t\tcontinue\n\t\t}\n\t\tif !strings.HasPrefix(c, \"CAP_\") {\n\t\t\tc = \"CAP_\" + c\n\t\t}\n\t\tif !inSlice(valids, c) {\n\t\t\treturn nil, errdefs.InvalidParameter(fmt.Errorf(\"unknown capability: %q\", c))\n\t\t}\n\t\tnormalized = append(normalized, c)\n\t}\n\treturn normalized, nil\n}\n\n\/\/ ValidateCapabilities validates if caps only contains valid capabilities\nfunc ValidateCapabilities(caps []string) error {\n\tvalids := GetAllCapabilities()\n\tfor _, c := range caps {\n\t\tif !inSlice(valids, c) {\n\t\t\treturn errdefs.InvalidParameter(fmt.Errorf(\"unknown capability: %q\", c))\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ TweakCapabilities tweaks capabilities by adding, dropping, or overriding\n\/\/ capabilities in the basics capabilities list.\nfunc TweakCapabilities(basics, adds, drops []string, privileged bool) ([]string, error) {\n\tswitch {\n\tcase privileged:\n\t\t\/\/ Privileged containers get all capabilities\n\t\treturn GetAllCapabilities(), nil\n\tcase len(adds) == 0 && len(drops) == 0:\n\t\t\/\/ Nothing to tweak; we're done\n\t\treturn basics, nil\n\t}\n\n\tcapDrop, err := NormalizeLegacyCapabilities(drops)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcapAdd, err := NormalizeLegacyCapabilities(adds)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar caps []string\n\n\tswitch {\n\tcase inSlice(capAdd, allCapabilities):\n\t\t\/\/ Add all capabilities except ones on capDrop\n\t\tfor _, c := range GetAllCapabilities() {\n\t\t\tif !inSlice(capDrop, c) {\n\t\t\t\tcaps = append(caps, c)\n\t\t\t}\n\t\t}\n\tcase inSlice(capDrop, allCapabilities):\n\t\t\/\/ \"Drop\" all capabilities; use what's in capAdd instead\n\t\tcaps = capAdd\n\tdefault:\n\t\t\/\/ First drop some capabilities\n\t\tfor _, c := range basics {\n\t\t\tif !inSlice(capDrop, c) {\n\t\t\t\tcaps = append(caps, c)\n\t\t\t}\n\t\t}\n\t\t\/\/ Then add the list of capabilities from capAdd\n\t\tcaps = append(caps, capAdd...)\n\t}\n\treturn caps, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package ocsp implements an OCSP responder based on a generic storage backend.\n\/\/ It provides a couple of sample implementations.\n\/\/ Because OCSP responders handle high query volumes, we have to be careful\n\/\/ about how much logging we do. Error-level logs are reserved for problems\n\/\/ internal to the server, that can be fixed by an administrator. Any type of\n\/\/ incorrect input from a user should be logged and Info or below. For things\n\/\/ that are logged on every request, Debug is the appropriate level.\npackage ocsp\n\nimport (\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/cloudflare\/cfssl\/certdb\"\n\t\"github.com\/cloudflare\/cfssl\/certdb\/dbconf\"\n\t\"github.com\/cloudflare\/cfssl\/certdb\/sql\"\n\t\"github.com\/cloudflare\/cfssl\/log\"\n\t\"github.com\/jmhodges\/clock\"\n\t\"golang.org\/x\/crypto\/ocsp\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"time\"\n)\n\nvar (\n\tmalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01}\n\tinternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02}\n\ttryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03}\n\tsigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05}\n\tunauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06}\n\n\t\/\/ ErrNotFound indicates the request OCSP response was not found. It is used to\n\t\/\/ indicate that the responder should reply with unauthorizedErrorResponse.\n\tErrNotFound = errors.New(\"Request OCSP Response not found\")\n)\n\n\/\/ Source represents the logical source of OCSP responses, i.e.,\n\/\/ the logic that actually chooses a response based on a request. In\n\/\/ order to create an actual responder, wrap one of these in a Responder\n\/\/ object and pass it to http.Handle. By default the Responder will set\n\/\/ the headers Cache-Control to \"max-age=(response.NextUpdate-now), public, no-transform, must-revalidate\",\n\/\/ Last-Modified to response.ThisUpdate, Expires to response.NextUpdate,\n\/\/ ETag to the SHA256 hash of the response, and Content-Type to\n\/\/ application\/ocsp-response. If you want to override these headers,\n\/\/ or set extra headers, your source should return a http.Header\n\/\/ with the headers you wish to set. If you don't want to set any\n\/\/ extra headers you may return nil instead.\ntype Source interface {\n\tResponse(*ocsp.Request) ([]byte, http.Header, error)\n}\n\n\/\/ An InMemorySource is a map from serialNumber -> der(response)\ntype InMemorySource map[string][]byte\n\n\/\/ Response looks up an OCSP response to provide for a given request.\n\/\/ InMemorySource looks up a response purely based on serial number,\n\/\/ without regard to what issuer the request is asking for.\nfunc (src InMemorySource) Response(request *ocsp.Request) ([]byte, http.Header, error) {\n\tresponse, present := src[request.SerialNumber.String()]\n\tif !present {\n\t\treturn nil, nil, ErrNotFound\n\t}\n\treturn response, nil, nil\n}\n\n\/\/ DBSource represnts a source of OCSP responses backed by the certdb package.\ntype DBSource struct {\n\tAccessor certdb.Accessor\n}\n\n\/\/ NewDBSource creates a new DBSource type with an associated dbAccessor.\nfunc NewDBSource(dbAccessor certdb.Accessor) Source {\n\treturn DBSource{\n\t\tAccessor: dbAccessor,\n\t}\n}\n\n\/\/ Response implements cfssl.ocsp.responder.Source, which returns the\n\/\/ OCSP response in the Database for the given request with the expiration\n\/\/ date furthest in the future.\nfunc (src DBSource) Response(req *ocsp.Request) ([]byte, http.Header, error) {\n\tif req == nil {\n\t\treturn nil, nil, errors.New(\"called with nil request\")\n\t}\n\n\taki := hex.EncodeToString(req.IssuerKeyHash)\n\tsn := req.SerialNumber\n\n\tif sn == nil {\n\t\treturn nil, nil, errors.New(\"request contains no serial\")\n\t}\n\tstrSN := sn.String()\n\n\tif src.Accessor == nil {\n\t\tlog.Errorf(\"No DB Accessor\")\n\t\treturn nil, nil, errors.New(\"called with nil DB accessor\")\n\t}\n\trecords, err := src.Accessor.GetOCSP(strSN, aki)\n\n\t\/\/ Response() logs when there are errors obtaining the OCSP response\n\t\/\/ and returns nil, false.\n\tif err != nil {\n\t\tlog.Errorf(\"Error obtaining OCSP response: %s\", err)\n\t\treturn nil, nil, fmt.Errorf(\"failed to obtain OCSP response: %s\", err)\n\t}\n\n\tif len(records) == 0 {\n\t\treturn nil, nil, ErrNotFound\n\t}\n\n\t\/\/ Response() finds the OCSPRecord with the expiration date furthest in the future.\n\tcur := records[0]\n\tfor _, rec := range records {\n\t\tif rec.Expiry.After(cur.Expiry) {\n\t\t\tcur = rec\n\t\t}\n\t}\n\treturn []byte(cur.Body), nil, nil\n}\n\n\/\/ NewSourceFromFile reads the named file into an InMemorySource.\n\/\/ The file read by this function must contain whitespace-separated OCSP\n\/\/ responses. Each OCSP response must be in base64-encoded DER form (i.e.,\n\/\/ PEM without headers or whitespace). Invalid responses are ignored.\n\/\/ This function pulls the entire file into an InMemorySource.\nfunc NewSourceFromFile(responseFile string) (Source, error) {\n\tfileContents, err := ioutil.ReadFile(responseFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponsesB64 := regexp.MustCompile(\"\\\\s\").Split(string(fileContents), -1)\n\tsrc := InMemorySource{}\n\tfor _, b64 := range responsesB64 {\n\t\t\/\/ if the line\/space is empty just skip\n\t\tif b64 == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tder, tmpErr := base64.StdEncoding.DecodeString(b64)\n\t\tif tmpErr != nil {\n\t\t\tlog.Errorf(\"Base64 decode error %s on: %s\", tmpErr, b64)\n\t\t\tcontinue\n\t\t}\n\n\t\tresponse, tmpErr := ocsp.ParseResponse(der, nil)\n\t\tif tmpErr != nil {\n\t\t\tlog.Errorf(\"OCSP decode error %s on: %s\", tmpErr, b64)\n\t\t\tcontinue\n\t\t}\n\n\t\tsrc[response.SerialNumber.String()] = der\n\t}\n\n\tlog.Infof(\"Read %d OCSP responses\", len(src))\n\treturn src, nil\n}\n\n\/\/ NewSourceFromDB reads the given database configuration file\n\/\/ and creates a database data source for use with the OCSP responder\nfunc NewSourceFromDB(DBConfigFile string) (Source, error) {\n\t\/\/ Load DB from cofiguration file\n\tdb, err := dbconf.DBFromConfig(DBConfigFile)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Create accesor\n\taccessor := sql.NewAccessor(db)\n\tsrc := NewDBSource(accessor)\n\n\treturn src, nil\n}\n\n\/\/ A Responder object provides the HTTP logic to expose a\n\/\/ Source of OCSP responses.\ntype Responder struct {\n\tSource Source\n\tclk clock.Clock\n}\n\n\/\/ NewResponder instantiates a Responder with the give Source.\nfunc NewResponder(source Source) *Responder {\n\treturn &Responder{\n\t\tSource: source,\n\t\tclk: clock.Default(),\n\t}\n}\n\nfunc overrideHeaders(response http.ResponseWriter, headers http.Header) {\n\tfor k, v := range headers {\n\t\tif len(v) == 1 {\n\t\t\tresponse.Header().Set(k, v[0])\n\t\t} else if len(v) > 1 {\n\t\t\tresponse.Header().Del(k)\n\t\t\tfor _, e := range v {\n\t\t\t\tresponse.Header().Add(k, e)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ A Responder can process both GET and POST requests. The mapping\n\/\/ from an OCSP request to an OCSP response is done by the Source;\n\/\/ the Responder simply decodes the request, and passes back whatever\n\/\/ response is provided by the source.\n\/\/ Note: The caller must use http.StripPrefix to strip any path components\n\/\/ (including '\/') on GET requests.\n\/\/ Do not use this responder in conjunction with http.NewServeMux, because the\n\/\/ default handler will try to canonicalize path components by changing any\n\/\/ strings of repeated '\/' into a single '\/', which will break the base64\n\/\/ encoding.\nfunc (rs Responder) ServeHTTP(response http.ResponseWriter, request *http.Request) {\n\t\/\/ By default we set a 'max-age=0, no-cache' Cache-Control header, this\n\t\/\/ is only returned to the client if a valid authorized OCSP response\n\t\/\/ is not found or an error is returned. If a response if found the header\n\t\/\/ will be altered to contain the proper max-age and modifiers.\n\tresponse.Header().Add(\"Cache-Control\", \"max-age=0, no-cache\")\n\t\/\/ Read response from request\n\tvar requestBody []byte\n\tvar err error\n\tswitch request.Method {\n\tcase \"GET\":\n\t\tbase64Request, err := url.QueryUnescape(request.URL.Path)\n\t\tif err != nil {\n\t\t\tlog.Infof(\"Error decoding URL: %s\", request.URL.Path)\n\t\t\tresponse.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\t\/\/ url.QueryUnescape not only unescapes %2B escaping, but it additionally\n\t\t\/\/ turns the resulting '+' into a space, which makes base64 decoding fail.\n\t\t\/\/ So we go back afterwards and turn ' ' back into '+'. This means we\n\t\t\/\/ accept some malformed input that includes ' ' or %20, but that's fine.\n\t\tbase64RequestBytes := []byte(base64Request)\n\t\tfor i := range base64RequestBytes {\n\t\t\tif base64RequestBytes[i] == ' ' {\n\t\t\t\tbase64RequestBytes[i] = '+'\n\t\t\t}\n\t\t}\n\t\t\/\/ In certain situations a UA may construct a request that has a double\n\t\t\/\/ slash between the host name and the base64 request body due to naively\n\t\t\/\/ constructing the request URL. In that case strip the leading slash\n\t\t\/\/ so that we can still decode the request.\n\t\tif len(base64RequestBytes) > 0 && base64RequestBytes[0] == '\/' {\n\t\t\tbase64RequestBytes = base64RequestBytes[1:]\n\t\t}\n\t\trequestBody, err = base64.StdEncoding.DecodeString(string(base64RequestBytes))\n\t\tif err != nil {\n\t\t\tlog.Infof(\"Error decoding base64 from URL: %s\", string(base64RequestBytes))\n\t\t\tresponse.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\tcase \"POST\":\n\t\trequestBody, err = ioutil.ReadAll(request.Body)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Problem reading body of POST: %s\", err)\n\t\t\tresponse.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\tdefault:\n\t\tresponse.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tb64Body := base64.StdEncoding.EncodeToString(requestBody)\n\tlog.Debugf(\"Received OCSP request: %s\", b64Body)\n\n\t\/\/ All responses after this point will be OCSP.\n\t\/\/ We could check for the content type of the request, but that\n\t\/\/ seems unnecessariliy restrictive.\n\tresponse.Header().Add(\"Content-Type\", \"application\/ocsp-response\")\n\n\t\/\/ Parse response as an OCSP request\n\t\/\/ XXX: This fails if the request contains the nonce extension.\n\t\/\/ We don't intend to support nonces anyway, but maybe we\n\t\/\/ should return unauthorizedRequest instead of malformed.\n\tocspRequest, err := ocsp.ParseRequest(requestBody)\n\tif err != nil {\n\t\tlog.Infof(\"Error decoding request body: %s\", b64Body)\n\t\tresponse.WriteHeader(http.StatusBadRequest)\n\t\tresponse.Write(malformedRequestErrorResponse)\n\t\treturn\n\t}\n\n\t\/\/ Look up OCSP response from source\n\tocspResponse, headers, err := rs.Source.Response(ocspRequest)\n\tif err != nil {\n\t\tif err == ErrNotFound {\n\t\t\tlog.Infof(\"No response found for request: serial %x, request body %s\",\n\t\t\t\tocspRequest.SerialNumber, b64Body)\n\t\t\tresponse.Write(unauthorizedErrorResponse)\n\t\t\treturn\n\t\t}\n\t\tlog.Infof(\"Error retrieving response for request: serial %x, request body %s, error: %s\",\n\t\t\tocspRequest.SerialNumber, b64Body, err)\n\t\tresponse.WriteHeader(http.StatusInternalServerError)\n\t\tresponse.Write(internalErrorErrorResponse)\n\t\treturn\n\t}\n\n\tparsedResponse, err := ocsp.ParseResponse(ocspResponse, nil)\n\tif err != nil {\n\t\tlog.Errorf(\"Error parsing response for serial %x: %s\",\n\t\t\tocspRequest.SerialNumber, err)\n\t\tresponse.Write(unauthorizedErrorResponse)\n\t\treturn\n\t}\n\n\t\/\/ Write OCSP response to response\n\tresponse.Header().Add(\"Last-Modified\", parsedResponse.ThisUpdate.Format(time.RFC1123))\n\tresponse.Header().Add(\"Expires\", parsedResponse.NextUpdate.Format(time.RFC1123))\n\tnow := rs.clk.Now()\n\tmaxAge := 0\n\tif now.Before(parsedResponse.NextUpdate) {\n\t\tmaxAge = int(parsedResponse.NextUpdate.Sub(now) \/ time.Second)\n\t} else {\n\t\t\/\/ TODO(#530): we want max-age=0 but this is technically an authorized OCSP response\n\t\t\/\/ (despite being stale) and 5019 forbids attaching no-cache\n\t\tmaxAge = 0\n\t}\n\tresponse.Header().Set(\n\t\t\"Cache-Control\",\n\t\tfmt.Sprintf(\n\t\t\t\"max-age=%d, public, no-transform, must-revalidate\",\n\t\t\tmaxAge,\n\t\t),\n\t)\n\tresponseHash := sha256.Sum256(ocspResponse)\n\tresponse.Header().Add(\"ETag\", fmt.Sprintf(\"\\\"%X\\\"\", responseHash))\n\n\tif headers != nil {\n\t\toverrideHeaders(response, headers)\n\t}\n\n\t\/\/ RFC 7232 says that a 304 response must contain the above\n\t\/\/ headers if they would also be sent for a 200 for the same\n\t\/\/ request, so we have to wait until here to do this\n\tif etag := request.Header.Get(\"If-None-Match\"); etag != \"\" {\n\t\tif etag == fmt.Sprintf(\"\\\"%X\\\"\", responseHash) {\n\t\t\tresponse.WriteHeader(http.StatusNotModified)\n\t\t\treturn\n\t\t}\n\t}\n\tresponse.WriteHeader(http.StatusOK)\n\tresponse.Write(ocspResponse)\n}\n<commit_msg>ocsp: Reduce user-triggerable logs to debug.<commit_after>\/\/ Package ocsp implements an OCSP responder based on a generic storage backend.\n\/\/ It provides a couple of sample implementations.\n\/\/ Because OCSP responders handle high query volumes, we have to be careful\n\/\/ about how much logging we do. Error-level logs are reserved for problems\n\/\/ internal to the server, that can be fixed by an administrator. Any type of\n\/\/ incorrect input from a user should be logged and Info or below. For things\n\/\/ that are logged on every request, Debug is the appropriate level.\npackage ocsp\n\nimport (\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/cloudflare\/cfssl\/certdb\"\n\t\"github.com\/cloudflare\/cfssl\/certdb\/dbconf\"\n\t\"github.com\/cloudflare\/cfssl\/certdb\/sql\"\n\t\"github.com\/cloudflare\/cfssl\/log\"\n\t\"github.com\/jmhodges\/clock\"\n\t\"golang.org\/x\/crypto\/ocsp\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"time\"\n)\n\nvar (\n\tmalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01}\n\tinternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02}\n\ttryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03}\n\tsigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05}\n\tunauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06}\n\n\t\/\/ ErrNotFound indicates the request OCSP response was not found. It is used to\n\t\/\/ indicate that the responder should reply with unauthorizedErrorResponse.\n\tErrNotFound = errors.New(\"Request OCSP Response not found\")\n)\n\n\/\/ Source represents the logical source of OCSP responses, i.e.,\n\/\/ the logic that actually chooses a response based on a request. In\n\/\/ order to create an actual responder, wrap one of these in a Responder\n\/\/ object and pass it to http.Handle. By default the Responder will set\n\/\/ the headers Cache-Control to \"max-age=(response.NextUpdate-now), public, no-transform, must-revalidate\",\n\/\/ Last-Modified to response.ThisUpdate, Expires to response.NextUpdate,\n\/\/ ETag to the SHA256 hash of the response, and Content-Type to\n\/\/ application\/ocsp-response. If you want to override these headers,\n\/\/ or set extra headers, your source should return a http.Header\n\/\/ with the headers you wish to set. If you don't want to set any\n\/\/ extra headers you may return nil instead.\ntype Source interface {\n\tResponse(*ocsp.Request) ([]byte, http.Header, error)\n}\n\n\/\/ An InMemorySource is a map from serialNumber -> der(response)\ntype InMemorySource map[string][]byte\n\n\/\/ Response looks up an OCSP response to provide for a given request.\n\/\/ InMemorySource looks up a response purely based on serial number,\n\/\/ without regard to what issuer the request is asking for.\nfunc (src InMemorySource) Response(request *ocsp.Request) ([]byte, http.Header, error) {\n\tresponse, present := src[request.SerialNumber.String()]\n\tif !present {\n\t\treturn nil, nil, ErrNotFound\n\t}\n\treturn response, nil, nil\n}\n\n\/\/ DBSource represnts a source of OCSP responses backed by the certdb package.\ntype DBSource struct {\n\tAccessor certdb.Accessor\n}\n\n\/\/ NewDBSource creates a new DBSource type with an associated dbAccessor.\nfunc NewDBSource(dbAccessor certdb.Accessor) Source {\n\treturn DBSource{\n\t\tAccessor: dbAccessor,\n\t}\n}\n\n\/\/ Response implements cfssl.ocsp.responder.Source, which returns the\n\/\/ OCSP response in the Database for the given request with the expiration\n\/\/ date furthest in the future.\nfunc (src DBSource) Response(req *ocsp.Request) ([]byte, http.Header, error) {\n\tif req == nil {\n\t\treturn nil, nil, errors.New(\"called with nil request\")\n\t}\n\n\taki := hex.EncodeToString(req.IssuerKeyHash)\n\tsn := req.SerialNumber\n\n\tif sn == nil {\n\t\treturn nil, nil, errors.New(\"request contains no serial\")\n\t}\n\tstrSN := sn.String()\n\n\tif src.Accessor == nil {\n\t\tlog.Errorf(\"No DB Accessor\")\n\t\treturn nil, nil, errors.New(\"called with nil DB accessor\")\n\t}\n\trecords, err := src.Accessor.GetOCSP(strSN, aki)\n\n\t\/\/ Response() logs when there are errors obtaining the OCSP response\n\t\/\/ and returns nil, false.\n\tif err != nil {\n\t\tlog.Errorf(\"Error obtaining OCSP response: %s\", err)\n\t\treturn nil, nil, fmt.Errorf(\"failed to obtain OCSP response: %s\", err)\n\t}\n\n\tif len(records) == 0 {\n\t\treturn nil, nil, ErrNotFound\n\t}\n\n\t\/\/ Response() finds the OCSPRecord with the expiration date furthest in the future.\n\tcur := records[0]\n\tfor _, rec := range records {\n\t\tif rec.Expiry.After(cur.Expiry) {\n\t\t\tcur = rec\n\t\t}\n\t}\n\treturn []byte(cur.Body), nil, nil\n}\n\n\/\/ NewSourceFromFile reads the named file into an InMemorySource.\n\/\/ The file read by this function must contain whitespace-separated OCSP\n\/\/ responses. Each OCSP response must be in base64-encoded DER form (i.e.,\n\/\/ PEM without headers or whitespace). Invalid responses are ignored.\n\/\/ This function pulls the entire file into an InMemorySource.\nfunc NewSourceFromFile(responseFile string) (Source, error) {\n\tfileContents, err := ioutil.ReadFile(responseFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponsesB64 := regexp.MustCompile(\"\\\\s\").Split(string(fileContents), -1)\n\tsrc := InMemorySource{}\n\tfor _, b64 := range responsesB64 {\n\t\t\/\/ if the line\/space is empty just skip\n\t\tif b64 == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tder, tmpErr := base64.StdEncoding.DecodeString(b64)\n\t\tif tmpErr != nil {\n\t\t\tlog.Errorf(\"Base64 decode error %s on: %s\", tmpErr, b64)\n\t\t\tcontinue\n\t\t}\n\n\t\tresponse, tmpErr := ocsp.ParseResponse(der, nil)\n\t\tif tmpErr != nil {\n\t\t\tlog.Errorf(\"OCSP decode error %s on: %s\", tmpErr, b64)\n\t\t\tcontinue\n\t\t}\n\n\t\tsrc[response.SerialNumber.String()] = der\n\t}\n\n\tlog.Infof(\"Read %d OCSP responses\", len(src))\n\treturn src, nil\n}\n\n\/\/ NewSourceFromDB reads the given database configuration file\n\/\/ and creates a database data source for use with the OCSP responder\nfunc NewSourceFromDB(DBConfigFile string) (Source, error) {\n\t\/\/ Load DB from cofiguration file\n\tdb, err := dbconf.DBFromConfig(DBConfigFile)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Create accesor\n\taccessor := sql.NewAccessor(db)\n\tsrc := NewDBSource(accessor)\n\n\treturn src, nil\n}\n\n\/\/ A Responder object provides the HTTP logic to expose a\n\/\/ Source of OCSP responses.\ntype Responder struct {\n\tSource Source\n\tclk clock.Clock\n}\n\n\/\/ NewResponder instantiates a Responder with the give Source.\nfunc NewResponder(source Source) *Responder {\n\treturn &Responder{\n\t\tSource: source,\n\t\tclk: clock.Default(),\n\t}\n}\n\nfunc overrideHeaders(response http.ResponseWriter, headers http.Header) {\n\tfor k, v := range headers {\n\t\tif len(v) == 1 {\n\t\t\tresponse.Header().Set(k, v[0])\n\t\t} else if len(v) > 1 {\n\t\t\tresponse.Header().Del(k)\n\t\t\tfor _, e := range v {\n\t\t\t\tresponse.Header().Add(k, e)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ A Responder can process both GET and POST requests. The mapping\n\/\/ from an OCSP request to an OCSP response is done by the Source;\n\/\/ the Responder simply decodes the request, and passes back whatever\n\/\/ response is provided by the source.\n\/\/ Note: The caller must use http.StripPrefix to strip any path components\n\/\/ (including '\/') on GET requests.\n\/\/ Do not use this responder in conjunction with http.NewServeMux, because the\n\/\/ default handler will try to canonicalize path components by changing any\n\/\/ strings of repeated '\/' into a single '\/', which will break the base64\n\/\/ encoding.\nfunc (rs Responder) ServeHTTP(response http.ResponseWriter, request *http.Request) {\n\t\/\/ By default we set a 'max-age=0, no-cache' Cache-Control header, this\n\t\/\/ is only returned to the client if a valid authorized OCSP response\n\t\/\/ is not found or an error is returned. If a response if found the header\n\t\/\/ will be altered to contain the proper max-age and modifiers.\n\tresponse.Header().Add(\"Cache-Control\", \"max-age=0, no-cache\")\n\t\/\/ Read response from request\n\tvar requestBody []byte\n\tvar err error\n\tswitch request.Method {\n\tcase \"GET\":\n\t\tbase64Request, err := url.QueryUnescape(request.URL.Path)\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"Error decoding URL: %s\", request.URL.Path)\n\t\t\tresponse.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\t\/\/ url.QueryUnescape not only unescapes %2B escaping, but it additionally\n\t\t\/\/ turns the resulting '+' into a space, which makes base64 decoding fail.\n\t\t\/\/ So we go back afterwards and turn ' ' back into '+'. This means we\n\t\t\/\/ accept some malformed input that includes ' ' or %20, but that's fine.\n\t\tbase64RequestBytes := []byte(base64Request)\n\t\tfor i := range base64RequestBytes {\n\t\t\tif base64RequestBytes[i] == ' ' {\n\t\t\t\tbase64RequestBytes[i] = '+'\n\t\t\t}\n\t\t}\n\t\t\/\/ In certain situations a UA may construct a request that has a double\n\t\t\/\/ slash between the host name and the base64 request body due to naively\n\t\t\/\/ constructing the request URL. In that case strip the leading slash\n\t\t\/\/ so that we can still decode the request.\n\t\tif len(base64RequestBytes) > 0 && base64RequestBytes[0] == '\/' {\n\t\t\tbase64RequestBytes = base64RequestBytes[1:]\n\t\t}\n\t\trequestBody, err = base64.StdEncoding.DecodeString(string(base64RequestBytes))\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"Error decoding base64 from URL: %s\", string(base64RequestBytes))\n\t\t\tresponse.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\tcase \"POST\":\n\t\trequestBody, err = ioutil.ReadAll(request.Body)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Problem reading body of POST: %s\", err)\n\t\t\tresponse.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\tdefault:\n\t\tresponse.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tb64Body := base64.StdEncoding.EncodeToString(requestBody)\n\tlog.Debugf(\"Received OCSP request: %s\", b64Body)\n\n\t\/\/ All responses after this point will be OCSP.\n\t\/\/ We could check for the content type of the request, but that\n\t\/\/ seems unnecessariliy restrictive.\n\tresponse.Header().Add(\"Content-Type\", \"application\/ocsp-response\")\n\n\t\/\/ Parse response as an OCSP request\n\t\/\/ XXX: This fails if the request contains the nonce extension.\n\t\/\/ We don't intend to support nonces anyway, but maybe we\n\t\/\/ should return unauthorizedRequest instead of malformed.\n\tocspRequest, err := ocsp.ParseRequest(requestBody)\n\tif err != nil {\n\t\tlog.Debugf(\"Error decoding request body: %s\", b64Body)\n\t\tresponse.WriteHeader(http.StatusBadRequest)\n\t\tresponse.Write(malformedRequestErrorResponse)\n\t\treturn\n\t}\n\n\t\/\/ Look up OCSP response from source\n\tocspResponse, headers, err := rs.Source.Response(ocspRequest)\n\tif err != nil {\n\t\tif err == ErrNotFound {\n\t\t\tlog.Infof(\"No response found for request: serial %x, request body %s\",\n\t\t\t\tocspRequest.SerialNumber, b64Body)\n\t\t\tresponse.Write(unauthorizedErrorResponse)\n\t\t\treturn\n\t\t}\n\t\tlog.Infof(\"Error retrieving response for request: serial %x, request body %s, error: %s\",\n\t\t\tocspRequest.SerialNumber, b64Body, err)\n\t\tresponse.WriteHeader(http.StatusInternalServerError)\n\t\tresponse.Write(internalErrorErrorResponse)\n\t\treturn\n\t}\n\n\tparsedResponse, err := ocsp.ParseResponse(ocspResponse, nil)\n\tif err != nil {\n\t\tlog.Errorf(\"Error parsing response for serial %x: %s\",\n\t\t\tocspRequest.SerialNumber, err)\n\t\tresponse.Write(unauthorizedErrorResponse)\n\t\treturn\n\t}\n\n\t\/\/ Write OCSP response to response\n\tresponse.Header().Add(\"Last-Modified\", parsedResponse.ThisUpdate.Format(time.RFC1123))\n\tresponse.Header().Add(\"Expires\", parsedResponse.NextUpdate.Format(time.RFC1123))\n\tnow := rs.clk.Now()\n\tmaxAge := 0\n\tif now.Before(parsedResponse.NextUpdate) {\n\t\tmaxAge = int(parsedResponse.NextUpdate.Sub(now) \/ time.Second)\n\t} else {\n\t\t\/\/ TODO(#530): we want max-age=0 but this is technically an authorized OCSP response\n\t\t\/\/ (despite being stale) and 5019 forbids attaching no-cache\n\t\tmaxAge = 0\n\t}\n\tresponse.Header().Set(\n\t\t\"Cache-Control\",\n\t\tfmt.Sprintf(\n\t\t\t\"max-age=%d, public, no-transform, must-revalidate\",\n\t\t\tmaxAge,\n\t\t),\n\t)\n\tresponseHash := sha256.Sum256(ocspResponse)\n\tresponse.Header().Add(\"ETag\", fmt.Sprintf(\"\\\"%X\\\"\", responseHash))\n\n\tif headers != nil {\n\t\toverrideHeaders(response, headers)\n\t}\n\n\t\/\/ RFC 7232 says that a 304 response must contain the above\n\t\/\/ headers if they would also be sent for a 200 for the same\n\t\/\/ request, so we have to wait until here to do this\n\tif etag := request.Header.Get(\"If-None-Match\"); etag != \"\" {\n\t\tif etag == fmt.Sprintf(\"\\\"%X\\\"\", responseHash) {\n\t\t\tresponse.WriteHeader(http.StatusNotModified)\n\t\t\treturn\n\t\t}\n\t}\n\tresponse.WriteHeader(http.StatusOK)\n\tresponse.Write(ocspResponse)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 jrweizhang AT gmail.com. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage opencv\n\n\/\/#include \"opencv.h\"\n\/\/#cgo linux pkg-config: opencv\n\/\/#cgo darwin pkg-config: opencv\n\/\/#cgo freebsd pkg-config: opencv\n\/\/#cgo windows LDFLAGS: -lopencv_core242.dll -lopencv_imgproc242.dll -lopencv_photo242.dll -lopencv_highgui242.dll -lstdc++\nimport \"C\"\nimport (\n\t\/\/\"errors\"\n\t\/\/\"log\"\n\t\"unsafe\"\n)\n\nfunc Resize(src *IplImage, width, height, interpolation int) *IplImage {\n\tif width == 0 && height == 0 {\n\t\tpanic(\"Width and Height cannot be 0 at the same time\")\n\t}\n\tif width == 0 {\n\t\tratio := float64(height) \/ float64(src.Height())\n\t\twidth = int(float64(src.Width()) * ratio)\n\t} else if height == 0 {\n\t\tratio := float64(width) \/ float64(src.Width())\n\t\theight = int(float64(src.Height()) * ratio)\n\t}\n\n\tdst := CreateImage(width, height, src.Depth(), src.Channels())\n\tC.cvResize(unsafe.Pointer(src), unsafe.Pointer(dst), C.int(interpolation))\n\treturn dst\n}\n<commit_msg>Add Crop().<commit_after>\/\/ Copyright 2013 jrweizhang AT gmail.com. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage opencv\n\n\/\/#include \"opencv.h\"\n\/\/#cgo linux pkg-config: opencv\n\/\/#cgo darwin pkg-config: opencv\n\/\/#cgo freebsd pkg-config: opencv\n\/\/#cgo windows LDFLAGS: -lopencv_core242.dll -lopencv_imgproc242.dll -lopencv_photo242.dll -lopencv_highgui242.dll -lstdc++\nimport \"C\"\nimport (\n\t\/\/\"errors\"\n\t\/\/\"log\"\n\t\"unsafe\"\n)\n\nfunc Resize(src *IplImage, width, height, interpolation int) *IplImage {\n\tif width == 0 && height == 0 {\n\t\tpanic(\"Width and Height cannot be 0 at the same time\")\n\t}\n\tif width == 0 {\n\t\tratio := float64(height) \/ float64(src.Height())\n\t\twidth = int(float64(src.Width()) * ratio)\n\t} else if height == 0 {\n\t\tratio := float64(width) \/ float64(src.Width())\n\t\theight = int(float64(src.Height()) * ratio)\n\t}\n\n\tdst := CreateImage(width, height, src.Depth(), src.Channels())\n\tC.cvResize(unsafe.Pointer(src), unsafe.Pointer(dst), C.int(interpolation))\n\treturn dst\n}\n\nfunc NewRect(x, y, width, height int) Rect {\n\tr := C.cvRect(\n\t\tC.int(x),\n\t\tC.int(y),\n\t\tC.int(width),\n\t\tC.int(height),\n\t)\n\treturn Rect(r)\n}\n\nfunc Crop(src *IplImage, x, y, width, height int) *IplImage {\n\trect := NewRect(x, y, width, height)\n\n\tsrc.SetROI(rect)\n\tdest := CreateImage(width, height, src.Depth(), src.Channels())\n\tCopy(src, dest, nil)\n\tsrc.ResetROI()\n\n\treturn dest\n}\n<|endoftext|>"} {"text":"<commit_before>package operation\n\nimport (\n\t\"fmt\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/stormcat24\/ecs-formation\/logger\"\n\t\"github.com\/stormcat24\/ecs-formation\/task\"\n\t\"github.com\/stormcat24\/ecs-formation\/util\"\n\t\"github.com\/str1ngs\/ansi\/color\"\n\t\"os\"\n)\n\nvar commandTask = cli.Command{\n\tName: \"task\",\n\tUsage: \"Manage ECS Task Definitions\",\n\tDescription: \"Manage ECS Task Definitions.\",\n\tFlags: []cli.Flag{\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"params, p\",\n\t\t\tUsage: \"parameters\",\n\t\t},\n\t},\n\tAction: doTask,\n}\n\nfunc doTask(c *cli.Context) {\n\n\tawsManager, err := buildAwsManager()\n\n\tif err != nil {\n\t\tlogger.Main.Error(color.Red(err.Error()))\n\t\tos.Exit(1)\n\t}\n\n\toperation, errSubCommand := createOperation(c)\n\n\tif errSubCommand != nil {\n\t\tlogger.Main.Error(color.Red(errSubCommand.Error()))\n\t\tos.Exit(1)\n\t}\n\n\tprojectDir, err := os.Getwd()\n\tif err != nil {\n\t\tlogger.Main.Error(color.Red(err.Error()))\n\t\tos.Exit(1)\n\t}\n\n\ttaskController, err := task.NewTaskDefinitionController(awsManager, projectDir, operation.TargetResource, operation.Params)\n\tif err != nil {\n\t\tlogger.Main.Error(color.Red(err.Error()))\n\t\tos.Exit(1)\n\t}\n\n\tplans := createTaskPlans(taskController, projectDir)\n\n\tif operation.SubCommand == \"apply\" {\n\t\tresults, errapp := taskController.ApplyTaskDefinitionPlans(plans)\n\n\t\tif errapp != nil {\n\t\t\tlogger.Main.Error(color.Red(errapp.Error()))\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfor _, output := range results {\n\t\t\tlogger.Main.Infof(\"Registered Task Definition '%s'\", *output.TaskDefinition.Family)\n\t\t\tlogger.Main.Info(color.Cyan(util.StringValueWithIndent(output.TaskDefinition, 1)))\n\t\t}\n\t}\n}\n\nfunc createTaskPlans(controller *task.TaskDefinitionController, projectDir string) []*task.TaskUpdatePlan {\n\n\ttaskDefs := controller.GetTaskDefinitionMap()\n\tplans := controller.CreateTaskUpdatePlans(taskDefs)\n\n\tfor _, plan := range plans {\n\t\tlogger.Main.Infof(\"Task Definition '%s'\", plan.Name)\n\n\t\tfor _, add := range plan.NewContainers {\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" (+) %s\", add.Name))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" image: %s\", add.Image))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" ports: %s\", add.Ports))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" environment:\\n%s\", util.StringValueWithIndent(add.Environment, 4)))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" links: %s\", add.Links))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" volumes: %s\", add.Volumes))\n\t\t}\n\n\t\tutil.Println()\n\t}\n\n\treturn plans\n}\n<commit_msg>print task definitions<commit_after>package operation\n\nimport (\n\t\"fmt\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/stormcat24\/ecs-formation\/logger\"\n\t\"github.com\/stormcat24\/ecs-formation\/task\"\n\t\"github.com\/stormcat24\/ecs-formation\/util\"\n\t\"github.com\/str1ngs\/ansi\/color\"\n\t\"os\"\n)\n\nvar commandTask = cli.Command{\n\tName: \"task\",\n\tUsage: \"Manage ECS Task Definitions\",\n\tDescription: \"Manage ECS Task Definitions.\",\n\tFlags: []cli.Flag{\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"params, p\",\n\t\t\tUsage: \"parameters\",\n\t\t},\n\t},\n\tAction: doTask,\n}\n\nfunc doTask(c *cli.Context) {\n\n\tawsManager, err := buildAwsManager()\n\n\tif err != nil {\n\t\tlogger.Main.Error(color.Red(err.Error()))\n\t\tos.Exit(1)\n\t}\n\n\toperation, errSubCommand := createOperation(c)\n\n\tif errSubCommand != nil {\n\t\tlogger.Main.Error(color.Red(errSubCommand.Error()))\n\t\tos.Exit(1)\n\t}\n\n\tprojectDir, err := os.Getwd()\n\tif err != nil {\n\t\tlogger.Main.Error(color.Red(err.Error()))\n\t\tos.Exit(1)\n\t}\n\n\ttaskController, err := task.NewTaskDefinitionController(awsManager, projectDir, operation.TargetResource, operation.Params)\n\tif err != nil {\n\t\tlogger.Main.Error(color.Red(err.Error()))\n\t\tos.Exit(1)\n\t}\n\n\tplans := createTaskPlans(taskController, projectDir)\n\n\tif operation.SubCommand == \"apply\" {\n\t\tresults, errapp := taskController.ApplyTaskDefinitionPlans(plans)\n\n\t\tif errapp != nil {\n\t\t\tlogger.Main.Error(color.Red(errapp.Error()))\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfor _, output := range results {\n\t\t\tlogger.Main.Infof(\"Registered Task Definition '%s'\", *output.TaskDefinition.Family)\n\t\t\tlogger.Main.Info(color.Cyan(util.StringValueWithIndent(output.TaskDefinition, 1)))\n\t\t}\n\t}\n}\n\nfunc createTaskPlans(controller *task.TaskDefinitionController, projectDir string) []*task.TaskUpdatePlan {\n\n\ttaskDefs := controller.GetTaskDefinitionMap()\n\tplans := controller.CreateTaskUpdatePlans(taskDefs)\n\n\tfor _, plan := range plans {\n\t\tlogger.Main.Infof(\"Task Definition '%v'\", plan.Name)\n\n\t\tfor _, add := range plan.NewContainers {\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" (+) %v\", add.Name))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" image: %v\", add.Image))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" ports: %v\", add.Ports))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" environment:\\n%v\", util.StringValueWithIndent(add.Environment, 4)))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" links: %v\", add.Links))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" volumes: %v\", add.Volumes))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" volumes_from: %v\", add.VolumesFrom))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" memory: %v\", add.Memory))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" cpu_units: %v\", add.CpuUnits))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" essential: %v\", add.Essential))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" entry_point: %v\", add.EntryPoint))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" command: %v\", add.Command))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" disable_networking: %v\", add.DisableNetworking))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" dns_search: %v\", add.DnsSearchDomains))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" dns: %v\", add.DnsServers))\n\t\t\tif len(add.DockerLabels) > 0 {\n\t\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" labels: %v\", util.StringValueWithIndent(add.DockerLabels, 4)))\n\t\t\t}\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" security_opt: %v\", add.DockerSecurityOptions))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" extra_hosts: %v\", add.ExtraHosts))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" hostname: %v\", add.Hostname))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" log_driver: %v\", add.LogDriver))\n\t\t\tif len(add.LogOpt) > 0 {\n\t\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" log_opt: %v\", util.StringValueWithIndent(add.LogOpt, 4)))\n\t\t\t}\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" privileged: %v\", add.Privileged))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" read_only: %v\", add.ReadonlyRootFilesystem))\n\t\t\tif len(add.Ulimits) > 0 {\n\t\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" ulimits: %v\", util.StringValueWithIndent(add.Ulimits, 4)))\n\t\t\t}\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" user: %v\", add.User))\n\t\t\tutil.PrintlnCyan(fmt.Sprintf(\" working_dir: %v\", add.WorkingDirectory))\n\t\t}\n\n\t\tutil.Println()\n\t}\n\n\treturn plans\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/xml\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/jstemmer\/go-junit-report\/v2\/pkg\/gtr\"\n\t\"github.com\/jstemmer\/go-junit-report\/v2\/pkg\/parser\/gotest\"\n)\n\nvar (\n\tVersion = \"v2.0.0-dev\"\n\tRevision = \"HEAD\"\n\tBuildTime string\n)\n\nvar (\n\tnoXMLHeader = flag.Bool(\"no-xml-header\", false, \"do not print xml header\")\n\tpackageName = flag.String(\"package-name\", \"\", \"specify a package name (compiled test have no package name in output)\")\n\tgoVersionFlag = flag.String(\"go-version\", \"\", \"specify the value to use for the go.version property in the generated XML\")\n\tsetExitCode = flag.Bool(\"set-exit-code\", false, \"set exit code to 1 if tests failed\")\n\tversion = flag.Bool(\"version\", false, \"print version\")\n\tinput = flag.String(\"in\", \"\", \"read go test log from file\")\n\toutput = flag.String(\"out\", \"\", \"write XML report to file\")\n\n\t\/\/ debug flags\n\tprintEvents = flag.Bool(\"debug.print-events\", false, \"print events generated by the go test parser\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\tif *version {\n\t\tfmt.Printf(\"go-junit-report %s %s (%s)\\n\", Version, BuildTime, Revision)\n\t\treturn\n\t}\n\n\tif flag.NArg() != 0 {\n\t\tfmt.Fprintf(os.Stderr, \"%s does not accept positional arguments\\n\", os.Args[0])\n\t\tflag.Usage()\n\t\texitf(\"\")\n\t}\n\n\t\/\/ Read input\n\tvar in io.Reader = os.Stdin\n\tif *input != \"\" {\n\t\tf, err := os.Open(*input)\n\t\tif err != nil {\n\t\t\texitf(\"error opening input file: %v\", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tin = f\n\t}\n\tevents, err := gotest.Parse(in)\n\tif err != nil {\n\t\texitf(\"error reading input: %s\\n\", err)\n\t}\n\n\tif *printEvents {\n\t\tfor i, ev := range events {\n\t\t\tfmt.Printf(\"%02d: %#v\\n\", i, ev)\n\t\t}\n\t}\n\treport := gtr.FromEvents(events, *packageName)\n\n\thostname, _ := os.Hostname() \/\/ ignore error\n\ttestsuites := gtr.JUnit(report, hostname, time.Now())\n\n\tvar out io.Writer = os.Stdout\n\tif *output != \"\" {\n\t\tf, err := os.Create(*output)\n\t\tif err != nil {\n\t\t\texitf(\"error creating output file: %v\", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tout = f\n\t}\n\tif !*noXMLHeader {\n\t\tfmt.Fprintf(out, xml.Header)\n\t}\n\n\tenc := xml.NewEncoder(out)\n\tenc.Indent(\"\", \"\\t\")\n\tif err := enc.Encode(testsuites); err != nil {\n\t\texitf(\"error writing XML: %v\", err)\n\t}\n\tif err := enc.Flush(); err != nil {\n\t\texitf(\"error flusing XML: %v\", err)\n\t}\n\tfmt.Fprintf(out, \"\\n\")\n\n\tif *setExitCode && report.HasFailures() {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc exitf(msg string, args ...interface{}) {\n\tif msg != \"\" {\n\t\tfmt.Fprintf(os.Stderr, msg+\"\\n\", args...)\n\t}\n\tos.Exit(2)\n}\n<commit_msg>Add -iocopy flag to copy input to stdout<commit_after>package main\n\nimport (\n\t\"encoding\/xml\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/jstemmer\/go-junit-report\/v2\/pkg\/gtr\"\n\t\"github.com\/jstemmer\/go-junit-report\/v2\/pkg\/parser\/gotest\"\n)\n\nvar (\n\tVersion = \"v2.0.0-dev\"\n\tRevision = \"HEAD\"\n\tBuildTime string\n)\n\nvar (\n\tnoXMLHeader = flag.Bool(\"no-xml-header\", false, \"do not print xml header\")\n\tpackageName = flag.String(\"package-name\", \"\", \"specify a package name (compiled test have no package name in output)\")\n\tgoVersionFlag = flag.String(\"go-version\", \"\", \"specify the value to use for the go.version property in the generated XML\")\n\tsetExitCode = flag.Bool(\"set-exit-code\", false, \"set exit code to 1 if tests failed\")\n\tversion = flag.Bool(\"version\", false, \"print version\")\n\tinput = flag.String(\"in\", \"\", \"read go test log from file\")\n\toutput = flag.String(\"out\", \"\", \"write XML report to file\")\n\tiocopy = flag.Bool(\"iocopy\", false, \"copy input to stdout; can only be used in conjunction with -out\")\n\n\t\/\/ debug flags\n\tprintEvents = flag.Bool(\"debug.print-events\", false, \"print events generated by the go test parser\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tif *iocopy && *output == \"\" {\n\t\texitf(\"you must specify an output file with -out when using -iocopy\")\n\t}\n\n\tif *version {\n\t\tfmt.Printf(\"go-junit-report %s %s (%s)\\n\", Version, BuildTime, Revision)\n\t\treturn\n\t}\n\n\tif flag.NArg() != 0 {\n\t\tfmt.Fprintf(os.Stderr, \"%s does not accept positional arguments\\n\", os.Args[0])\n\t\tflag.Usage()\n\t\texitf(\"\")\n\t}\n\n\t\/\/ Read input\n\tvar in io.Reader = os.Stdin\n\tif *input != \"\" {\n\t\tf, err := os.Open(*input)\n\t\tif err != nil {\n\t\t\texitf(\"error opening input file: %v\", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tin = f\n\t}\n\n\tif *iocopy {\n\t\tin = io.TeeReader(in, os.Stdout)\n\t}\n\n\tevents, err := gotest.Parse(in)\n\tif err != nil {\n\t\texitf(\"error reading input: %s\\n\", err)\n\t}\n\n\tif *printEvents {\n\t\tfor i, ev := range events {\n\t\t\tfmt.Printf(\"%02d: %#v\\n\", i, ev)\n\t\t}\n\t}\n\treport := gtr.FromEvents(events, *packageName)\n\n\thostname, _ := os.Hostname() \/\/ ignore error\n\ttestsuites := gtr.JUnit(report, hostname, time.Now())\n\n\tvar out io.Writer = os.Stdout\n\tif *output != \"\" {\n\t\tf, err := os.Create(*output)\n\t\tif err != nil {\n\t\t\texitf(\"error creating output file: %v\", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tout = f\n\t}\n\tif !*noXMLHeader {\n\t\tfmt.Fprintf(out, xml.Header)\n\t}\n\n\tenc := xml.NewEncoder(out)\n\tenc.Indent(\"\", \"\\t\")\n\tif err := enc.Encode(testsuites); err != nil {\n\t\texitf(\"error writing XML: %v\", err)\n\t}\n\tif err := enc.Flush(); err != nil {\n\t\texitf(\"error flusing XML: %v\", err)\n\t}\n\tfmt.Fprintf(out, \"\\n\")\n\n\tif *setExitCode && report.HasFailures() {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc exitf(msg string, args ...interface{}) {\n\tif msg != \"\" {\n\t\tfmt.Fprintf(os.Stderr, msg+\"\\n\", args...)\n\t}\n\tos.Exit(2)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage libkb\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\n\t\"github.com\/keybase\/client\/go\/logger\"\n\t\"github.com\/keybase\/go-framed-msgpack-rpc\/rpc\"\n)\n\n\/\/ NewSocket() (Socket, err) is defined in the various platform-specific socket_*.go files.\ntype Socket interface {\n\tBindToSocket() (net.Listener, error)\n\tDialSocket() (net.Conn, error)\n}\n\ntype SocketInfo struct {\n\tlog logger.Logger\n\tbindFile string\n\tdialFiles []string\n}\n\nfunc (s SocketInfo) GetBindFile() string {\n\treturn s.bindFile\n}\n\nfunc (s SocketInfo) GetDialFiles() []string {\n\treturn s.dialFiles\n}\n\ntype SocketWrapper struct {\n\tconn net.Conn\n\txp rpc.Transporter\n\terr error\n}\n\nfunc (g *GlobalContext) MakeLoopbackServer() (l net.Listener, err error) {\n\tg.socketWrapperMu.Lock()\n\tg.LoopbackListener = NewLoopbackListener()\n\tl = g.LoopbackListener\n\tg.socketWrapperMu.Unlock()\n\treturn\n}\n\nfunc (g *GlobalContext) BindToSocket() (net.Listener, error) {\n\treturn g.SocketInfo.BindToSocket()\n}\n\nfunc NewTransportFromSocket(g *GlobalContext, s net.Conn) rpc.Transporter {\n\treturn rpc.NewTransport(s, NewRPCLogFactory(g), WrapError)\n}\n\n\/\/ ResetSocket clears and returns a new socket\nfunc (g *GlobalContext) ResetSocket(clearError bool) (net.Conn, rpc.Transporter, bool, error) {\n\tg.SocketWrapper = nil\n\treturn g.GetSocket(clearError)\n}\n\nfunc (g *GlobalContext) GetSocket(clearError bool) (conn net.Conn, xp rpc.Transporter, isNew bool, err error) {\n\n\tg.Trace(\"GetSocket\", func() error { return err })()\n\n\t\/\/ Protect all global socket wrapper manipulation with a\n\t\/\/ lock to prevent race conditions.\n\tg.socketWrapperMu.Lock()\n\tdefer g.socketWrapperMu.Unlock()\n\n\tneedWrapper := false\n\tif g.SocketWrapper == nil {\n\t\tneedWrapper = true\n\t\tg.Log.Debug(\"| empty socket wrapper; need a new one\")\n\t} else if g.SocketWrapper.xp != nil && !g.SocketWrapper.xp.IsConnected() {\n\t\t\/\/ need reconnect\n\t\tg.Log.Debug(\"| rpc transport isn't connected, reconnecting...\")\n\t\tneedWrapper = true\n\t}\n\n\tif needWrapper {\n\t\tsw := SocketWrapper{}\n\t\tif g.LoopbackListener != nil {\n\t\t\tsw.conn, sw.err = g.LoopbackListener.Dial()\n\t\t} else if g.SocketInfo == nil {\n\t\t\tsw.err = fmt.Errorf(\"Cannot get socket in standalone mode\")\n\t\t} else {\n\t\t\tsw.conn, sw.err = g.SocketInfo.DialSocket()\n\t\t\tg.Log.Debug(\"| DialSocket -> %s\", ErrToOk(sw.err))\n\t\t\tisNew = true\n\t\t}\n\t\tif sw.err == nil {\n\t\t\tsw.xp = NewTransportFromSocket(g, sw.conn)\n\t\t}\n\t\tg.SocketWrapper = &sw\n\t}\n\n\tsw := g.SocketWrapper\n\tif sw.err != nil && clearError {\n\t\tg.SocketWrapper = nil\n\t}\n\terr = sw.err\n\n\treturn sw.conn, sw.xp, isNew, err\n}\n<commit_msg>socket: Export SocketWrapper members<commit_after>\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage libkb\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\n\t\"github.com\/keybase\/client\/go\/logger\"\n\t\"github.com\/keybase\/go-framed-msgpack-rpc\/rpc\"\n)\n\n\/\/ NewSocket() (Socket, err) is defined in the various platform-specific socket_*.go files.\ntype Socket interface {\n\tBindToSocket() (net.Listener, error)\n\tDialSocket() (net.Conn, error)\n}\n\ntype SocketInfo struct {\n\tlog logger.Logger\n\tbindFile string\n\tdialFiles []string\n}\n\nfunc (s SocketInfo) GetBindFile() string {\n\treturn s.bindFile\n}\n\nfunc (s SocketInfo) GetDialFiles() []string {\n\treturn s.dialFiles\n}\n\ntype SocketWrapper struct {\n\tConn net.Conn\n\tTransporter rpc.Transporter\n\tErr error\n}\n\nfunc (g *GlobalContext) MakeLoopbackServer() (l net.Listener, err error) {\n\tg.socketWrapperMu.Lock()\n\tg.LoopbackListener = NewLoopbackListener()\n\tl = g.LoopbackListener\n\tg.socketWrapperMu.Unlock()\n\treturn\n}\n\nfunc (g *GlobalContext) BindToSocket() (net.Listener, error) {\n\treturn g.SocketInfo.BindToSocket()\n}\n\nfunc NewTransportFromSocket(g *GlobalContext, s net.Conn) rpc.Transporter {\n\treturn rpc.NewTransport(s, NewRPCLogFactory(g), WrapError)\n}\n\n\/\/ ResetSocket clears and returns a new socket\nfunc (g *GlobalContext) ResetSocket(clearError bool) (net.Conn, rpc.Transporter, bool, error) {\n\tg.SocketWrapper = nil\n\treturn g.GetSocket(clearError)\n}\n\nfunc (g *GlobalContext) GetSocket(clearError bool) (conn net.Conn, xp rpc.Transporter, isNew bool, err error) {\n\n\tg.Trace(\"GetSocket\", func() error { return err })()\n\n\t\/\/ Protect all global socket wrapper manipulation with a\n\t\/\/ lock to prevent race conditions.\n\tg.socketWrapperMu.Lock()\n\tdefer g.socketWrapperMu.Unlock()\n\n\tneedWrapper := false\n\tif g.SocketWrapper == nil {\n\t\tneedWrapper = true\n\t\tg.Log.Debug(\"| empty socket wrapper; need a new one\")\n\t} else if g.SocketWrapper.Transporter != nil && !g.SocketWrapper.Transporter.IsConnected() {\n\t\t\/\/ need reconnect\n\t\tg.Log.Debug(\"| rpc transport isn't connected, reconnecting...\")\n\t\tneedWrapper = true\n\t}\n\n\tif needWrapper {\n\t\tsw := SocketWrapper{}\n\t\tif g.LoopbackListener != nil {\n\t\t\tsw.Conn, sw.Err = g.LoopbackListener.Dial()\n\t\t} else if g.SocketInfo == nil {\n\t\t\tsw.Err = fmt.Errorf(\"Cannot get socket in standalone mode\")\n\t\t} else {\n\t\t\tsw.Conn, sw.Err = g.SocketInfo.DialSocket()\n\t\t\tg.Log.Debug(\"| DialSocket -> %s\", ErrToOk(sw.Err))\n\t\t\tisNew = true\n\t\t}\n\t\tif sw.Err == nil {\n\t\t\tsw.Transporter = NewTransportFromSocket(g, sw.Conn)\n\t\t}\n\t\tg.SocketWrapper = &sw\n\t}\n\n\tsw := g.SocketWrapper\n\tif sw.Err != nil && clearError {\n\t\tg.SocketWrapper = nil\n\t}\n\terr = sw.Err\n\n\treturn sw.Conn, sw.Transporter, isNew, err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nPackage packages loads Go packages for inspection and analysis.\n\nThe Load function takes as input a list of patterns and return a list of Package\nstructs describing individual packages matched by those patterns.\nThe LoadMode controls the amount of detail in the loaded packages.\n\nLoad passes most patterns directly to the underlying build tool,\nbut all patterns with the prefix \"query=\", where query is a\nnon-empty string of letters from [a-z], are reserved and may be\ninterpreted as query operators.\n\nTwo query operators are currently supported: \"file\" and \"pattern\".\n\nThe query \"file=path\/to\/file.go\" matches the package or packages enclosing\nthe Go source file path\/to\/file.go. For example \"file=~\/go\/src\/fmt\/print.go\"\nmight return the packages \"fmt\" and \"fmt [fmt.test]\".\n\nThe query \"pattern=string\" causes \"string\" to be passed directly to\nthe underlying build tool. In most cases this is unnecessary,\nbut an application can use Load(\"pattern=\" + x) as an escaping mechanism\nto ensure that x is not interpreted as a query operator if it contains '='.\n\nAll other query operators are reserved for future use and currently\ncause Load to report an error.\n\nThe Package struct provides basic information about the package, including\n\n - ID, a unique identifier for the package in the returned set;\n - GoFiles, the names of the package's Go source files;\n - Imports, a map from source import strings to the Packages they name;\n - Types, the type information for the package's exported symbols;\n - Syntax, the parsed syntax trees for the package's source code; and\n - TypeInfo, the result of a complete type-check of the package syntax trees.\n\n(See the documentation for type Package for the complete list of fields\nand more detailed descriptions.)\n\nFor example,\n\n\tLoad(nil, \"bytes\", \"unicode...\")\n\nreturns four Package structs describing the standard library packages\nbytes, unicode, unicode\/utf16, and unicode\/utf8. Note that one pattern\ncan match multiple packages and that a package might be matched by\nmultiple patterns: in general it is not possible to determine which\npackages correspond to which patterns.\n\nNote that the list returned by Load contains only the packages matched\nby the patterns. Their dependencies can be found by walking the import\ngraph using the Imports fields.\n\nThe Load function can be configured by passing a pointer to a Config as\nthe first argument. A nil Config is equivalent to the zero Config, which\ncauses Load to run in LoadFiles mode, collecting minimal information.\nSee the documentation for type Config for details.\n\nAs noted earlier, the Config.Mode controls the amount of detail\nreported about the loaded packages, with each mode returning all the data of the\nprevious mode with some extra added. See the documentation for type LoadMode\nfor details.\n\nMost tools should pass their command-line arguments (after any flags)\nuninterpreted to the loader, so that the loader can interpret them\naccording to the conventions of the underlying build system.\nSee the Example function for typical usage.\n\n*\/\npackage packages \/\/ import \"golang.org\/x\/tools\/go\/packages\"\n\n\/*\n\nMotivation and design considerations\n\nThe new package's design solves problems addressed by two existing\npackages: go\/build, which locates and describes packages, and\ngolang.org\/x\/tools\/go\/loader, which loads, parses and type-checks them.\nThe go\/build.Package structure encodes too much of the 'go build' way\nof organizing projects, leaving us in need of a data type that describes a\npackage of Go source code independent of the underlying build system.\nWe wanted something that works equally well with go build and vgo, and\nalso other build systems such as Bazel and Blaze, making it possible to\nconstruct analysis tools that work in all these environments.\nTools such as errcheck and staticcheck were essentially unavailable to\nthe Go community at Google, and some of Google's internal tools for Go\nare unavailable externally.\nThis new package provides a uniform way to obtain package metadata by\nquerying each of these build systems, optionally supporting their\npreferred command-line notations for packages, so that tools integrate\nneatly with users' build environments. The Metadata query function\nexecutes an external query tool appropriate to the current workspace.\n\nLoading packages always returns the complete import graph \"all the way down\",\neven if all you want is information about a single package, because the query\nmechanisms of all the build systems we currently support ({go,vgo} list, and\nblaze\/bazel aspect-based query) cannot provide detailed information\nabout one package without visiting all its dependencies too, so there is\nno additional asymptotic cost to providing transitive information.\n(This property might not be true of a hypothetical 5th build system.)\n\nIn calls to TypeCheck, all initial packages, and any package that\ntransitively depends on one of them, must be loaded from source.\nConsider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from\nsource; D may be loaded from export data, and E may not be loaded at all\n(though it's possible that D's export data mentions it, so a\ntypes.Package may be created for it and exposed.)\n\nThe old loader had a feature to suppress type-checking of function\nbodies on a per-package basis, primarily intended to reduce the work of\nobtaining type information for imported packages. Now that imports are\nsatisfied by export data, the optimization no longer seems necessary.\n\nDespite some early attempts, the old loader did not exploit export data,\ninstead always using the equivalent of WholeProgram mode. This was due\nto the complexity of mixing source and export data packages (now\nresolved by the upward traversal mentioned above), and because export data\nfiles were nearly always missing or stale. Now that 'go build' supports\ncaching, all the underlying build systems can guarantee to produce\nexport data in a reasonable (amortized) time.\n\nTest \"main\" packages synthesized by the build system are now reported as\nfirst-class packages, avoiding the need for clients (such as go\/ssa) to\nreinvent this generation logic.\n\nOne way in which go\/packages is simpler than the old loader is in its\ntreatment of in-package tests. In-package tests are packages that\nconsist of all the files of the library under test, plus the test files.\nThe old loader constructed in-package tests by a two-phase process of\nmutation called \"augmentation\": first it would construct and type check\nall the ordinary library packages and type-check the packages that\ndepend on them; then it would add more (test) files to the package and\ntype-check again. This two-phase approach had four major problems:\n1) in processing the tests, the loader modified the library package,\n leaving no way for a client application to see both the test\n package and the library package; one would mutate into the other.\n2) because test files can declare additional methods on types defined in\n the library portion of the package, the dispatch of method calls in\n the library portion was affected by the presence of the test files.\n This should have been a clue that the packages were logically\n different.\n3) this model of \"augmentation\" assumed at most one in-package test\n per library package, which is true of projects using 'go build',\n but not other build systems.\n4) because of the two-phase nature of test processing, all packages that\n import the library package had to be processed before augmentation,\n forcing a \"one-shot\" API and preventing the client from calling Load\n in several times in sequence as is now possible in WholeProgram mode.\n (TypeCheck mode has a similar one-shot restriction for a different reason.)\n\nEarly drafts of this package supported \"multi-shot\" operation.\nAlthough it allowed clients to make a sequence of calls (or concurrent\ncalls) to Load, building up the graph of Packages incrementally,\nit was of marginal value: it complicated the API\n(since it allowed some options to vary across calls but not others),\nit complicated the implementation,\nit cannot be made to work in Types mode, as explained above,\nand it was less efficient than making one combined call (when this is possible).\nAmong the clients we have inspected, none made multiple calls to load\nbut could not be easily and satisfactorily modified to make only a single call.\nHowever, applications changes may be required.\nFor example, the ssadump command loads the user-specified packages\nand in addition the runtime package. It is tempting to simply append\n\"runtime\" to the user-provided list, but that does not work if the user\nspecified an ad-hoc package such as [a.go b.go].\nInstead, ssadump no longer requests the runtime package,\nbut seeks it among the dependencies of the user-specified packages,\nand emits an error if it is not found.\n\nOverlays: The Overlay field in the Config allows providing alternate contents\nfor Go source files, by providing a mapping from file path to contents.\ngo\/packages will pull in new imports added in overlay files when go\/packages\nis run in LoadImports mode or greater.\nOverlay support for the go list driver isn't complete yet: if the file doesn't\nexist on disk, it will only be recognized in an overlay if it is a non-test file\nand the package would be reported even without the overlay.\n\nQuestions & Tasks\n\n- Add GOARCH\/GOOS?\n They are not portable concepts, but could be made portable.\n Our goal has been to allow users to express themselves using the conventions\n of the underlying build system: if the build system honors GOARCH\n during a build and during a metadata query, then so should\n applications built atop that query mechanism.\n Conversely, if the target architecture of the build is determined by\n command-line flags, the application can pass the relevant\n flags through to the build system using a command such as:\n myapp -query_flag=\"--cpu=amd64\" -query_flag=\"--os=darwin\"\n However, this approach is low-level, unwieldy, and non-portable.\n GOOS and GOARCH seem important enough to warrant a dedicated option.\n\n- How should we handle partial failures such as a mixture of good and\n malformed patterns, existing and non-existent packages, successful and\n failed builds, import failures, import cycles, and so on, in a call to\n Load?\n\n- Support bazel, blaze, and go1.10 list, not just go1.11 list.\n\n- Handle (and test) various partial success cases, e.g.\n a mixture of good packages and:\n invalid patterns\n nonexistent packages\n empty packages\n packages with malformed package or import declarations\n unreadable files\n import cycles\n other parse errors\n type errors\n Make sure we record errors at the correct place in the graph.\n\n- Missing packages among initial arguments are not reported.\n Return bogus packages for them, like golist does.\n\n- \"undeclared name\" errors (for example) are reported out of source file\n order. I suspect this is due to the breadth-first resolution now used\n by go\/types. Is that a bug? Discuss with gri.\n\n*\/\n<commit_msg>go\/packages: remove obsolete comment about LoadMode in doc.go<commit_after>\/\/ Copyright 2018 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nPackage packages loads Go packages for inspection and analysis.\n\nThe Load function takes as input a list of patterns and return a list of Package\nstructs describing individual packages matched by those patterns.\nThe LoadMode controls the amount of detail in the loaded packages.\n\nLoad passes most patterns directly to the underlying build tool,\nbut all patterns with the prefix \"query=\", where query is a\nnon-empty string of letters from [a-z], are reserved and may be\ninterpreted as query operators.\n\nTwo query operators are currently supported: \"file\" and \"pattern\".\n\nThe query \"file=path\/to\/file.go\" matches the package or packages enclosing\nthe Go source file path\/to\/file.go. For example \"file=~\/go\/src\/fmt\/print.go\"\nmight return the packages \"fmt\" and \"fmt [fmt.test]\".\n\nThe query \"pattern=string\" causes \"string\" to be passed directly to\nthe underlying build tool. In most cases this is unnecessary,\nbut an application can use Load(\"pattern=\" + x) as an escaping mechanism\nto ensure that x is not interpreted as a query operator if it contains '='.\n\nAll other query operators are reserved for future use and currently\ncause Load to report an error.\n\nThe Package struct provides basic information about the package, including\n\n - ID, a unique identifier for the package in the returned set;\n - GoFiles, the names of the package's Go source files;\n - Imports, a map from source import strings to the Packages they name;\n - Types, the type information for the package's exported symbols;\n - Syntax, the parsed syntax trees for the package's source code; and\n - TypeInfo, the result of a complete type-check of the package syntax trees.\n\n(See the documentation for type Package for the complete list of fields\nand more detailed descriptions.)\n\nFor example,\n\n\tLoad(nil, \"bytes\", \"unicode...\")\n\nreturns four Package structs describing the standard library packages\nbytes, unicode, unicode\/utf16, and unicode\/utf8. Note that one pattern\ncan match multiple packages and that a package might be matched by\nmultiple patterns: in general it is not possible to determine which\npackages correspond to which patterns.\n\nNote that the list returned by Load contains only the packages matched\nby the patterns. Their dependencies can be found by walking the import\ngraph using the Imports fields.\n\nThe Load function can be configured by passing a pointer to a Config as\nthe first argument. A nil Config is equivalent to the zero Config, which\ncauses Load to run in LoadFiles mode, collecting minimal information.\nSee the documentation for type Config for details.\n\nAs noted earlier, the Config.Mode controls the amount of detail\nreported about the loaded packages. See the documentation for type LoadMode\nfor details.\n\nMost tools should pass their command-line arguments (after any flags)\nuninterpreted to the loader, so that the loader can interpret them\naccording to the conventions of the underlying build system.\nSee the Example function for typical usage.\n\n*\/\npackage packages \/\/ import \"golang.org\/x\/tools\/go\/packages\"\n\n\/*\n\nMotivation and design considerations\n\nThe new package's design solves problems addressed by two existing\npackages: go\/build, which locates and describes packages, and\ngolang.org\/x\/tools\/go\/loader, which loads, parses and type-checks them.\nThe go\/build.Package structure encodes too much of the 'go build' way\nof organizing projects, leaving us in need of a data type that describes a\npackage of Go source code independent of the underlying build system.\nWe wanted something that works equally well with go build and vgo, and\nalso other build systems such as Bazel and Blaze, making it possible to\nconstruct analysis tools that work in all these environments.\nTools such as errcheck and staticcheck were essentially unavailable to\nthe Go community at Google, and some of Google's internal tools for Go\nare unavailable externally.\nThis new package provides a uniform way to obtain package metadata by\nquerying each of these build systems, optionally supporting their\npreferred command-line notations for packages, so that tools integrate\nneatly with users' build environments. The Metadata query function\nexecutes an external query tool appropriate to the current workspace.\n\nLoading packages always returns the complete import graph \"all the way down\",\neven if all you want is information about a single package, because the query\nmechanisms of all the build systems we currently support ({go,vgo} list, and\nblaze\/bazel aspect-based query) cannot provide detailed information\nabout one package without visiting all its dependencies too, so there is\nno additional asymptotic cost to providing transitive information.\n(This property might not be true of a hypothetical 5th build system.)\n\nIn calls to TypeCheck, all initial packages, and any package that\ntransitively depends on one of them, must be loaded from source.\nConsider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from\nsource; D may be loaded from export data, and E may not be loaded at all\n(though it's possible that D's export data mentions it, so a\ntypes.Package may be created for it and exposed.)\n\nThe old loader had a feature to suppress type-checking of function\nbodies on a per-package basis, primarily intended to reduce the work of\nobtaining type information for imported packages. Now that imports are\nsatisfied by export data, the optimization no longer seems necessary.\n\nDespite some early attempts, the old loader did not exploit export data,\ninstead always using the equivalent of WholeProgram mode. This was due\nto the complexity of mixing source and export data packages (now\nresolved by the upward traversal mentioned above), and because export data\nfiles were nearly always missing or stale. Now that 'go build' supports\ncaching, all the underlying build systems can guarantee to produce\nexport data in a reasonable (amortized) time.\n\nTest \"main\" packages synthesized by the build system are now reported as\nfirst-class packages, avoiding the need for clients (such as go\/ssa) to\nreinvent this generation logic.\n\nOne way in which go\/packages is simpler than the old loader is in its\ntreatment of in-package tests. In-package tests are packages that\nconsist of all the files of the library under test, plus the test files.\nThe old loader constructed in-package tests by a two-phase process of\nmutation called \"augmentation\": first it would construct and type check\nall the ordinary library packages and type-check the packages that\ndepend on them; then it would add more (test) files to the package and\ntype-check again. This two-phase approach had four major problems:\n1) in processing the tests, the loader modified the library package,\n leaving no way for a client application to see both the test\n package and the library package; one would mutate into the other.\n2) because test files can declare additional methods on types defined in\n the library portion of the package, the dispatch of method calls in\n the library portion was affected by the presence of the test files.\n This should have been a clue that the packages were logically\n different.\n3) this model of \"augmentation\" assumed at most one in-package test\n per library package, which is true of projects using 'go build',\n but not other build systems.\n4) because of the two-phase nature of test processing, all packages that\n import the library package had to be processed before augmentation,\n forcing a \"one-shot\" API and preventing the client from calling Load\n in several times in sequence as is now possible in WholeProgram mode.\n (TypeCheck mode has a similar one-shot restriction for a different reason.)\n\nEarly drafts of this package supported \"multi-shot\" operation.\nAlthough it allowed clients to make a sequence of calls (or concurrent\ncalls) to Load, building up the graph of Packages incrementally,\nit was of marginal value: it complicated the API\n(since it allowed some options to vary across calls but not others),\nit complicated the implementation,\nit cannot be made to work in Types mode, as explained above,\nand it was less efficient than making one combined call (when this is possible).\nAmong the clients we have inspected, none made multiple calls to load\nbut could not be easily and satisfactorily modified to make only a single call.\nHowever, applications changes may be required.\nFor example, the ssadump command loads the user-specified packages\nand in addition the runtime package. It is tempting to simply append\n\"runtime\" to the user-provided list, but that does not work if the user\nspecified an ad-hoc package such as [a.go b.go].\nInstead, ssadump no longer requests the runtime package,\nbut seeks it among the dependencies of the user-specified packages,\nand emits an error if it is not found.\n\nOverlays: The Overlay field in the Config allows providing alternate contents\nfor Go source files, by providing a mapping from file path to contents.\ngo\/packages will pull in new imports added in overlay files when go\/packages\nis run in LoadImports mode or greater.\nOverlay support for the go list driver isn't complete yet: if the file doesn't\nexist on disk, it will only be recognized in an overlay if it is a non-test file\nand the package would be reported even without the overlay.\n\nQuestions & Tasks\n\n- Add GOARCH\/GOOS?\n They are not portable concepts, but could be made portable.\n Our goal has been to allow users to express themselves using the conventions\n of the underlying build system: if the build system honors GOARCH\n during a build and during a metadata query, then so should\n applications built atop that query mechanism.\n Conversely, if the target architecture of the build is determined by\n command-line flags, the application can pass the relevant\n flags through to the build system using a command such as:\n myapp -query_flag=\"--cpu=amd64\" -query_flag=\"--os=darwin\"\n However, this approach is low-level, unwieldy, and non-portable.\n GOOS and GOARCH seem important enough to warrant a dedicated option.\n\n- How should we handle partial failures such as a mixture of good and\n malformed patterns, existing and non-existent packages, successful and\n failed builds, import failures, import cycles, and so on, in a call to\n Load?\n\n- Support bazel, blaze, and go1.10 list, not just go1.11 list.\n\n- Handle (and test) various partial success cases, e.g.\n a mixture of good packages and:\n invalid patterns\n nonexistent packages\n empty packages\n packages with malformed package or import declarations\n unreadable files\n import cycles\n other parse errors\n type errors\n Make sure we record errors at the correct place in the graph.\n\n- Missing packages among initial arguments are not reported.\n Return bogus packages for them, like golist does.\n\n- \"undeclared name\" errors (for example) are reported out of source file\n order. I suspect this is due to the breadth-first resolution now used\n by go\/types. Is that a bug? Discuss with gri.\n\n*\/\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage saltpack\n\nimport (\n\t\"crypto\/hmac\"\n)\n\n\/\/ RawBoxKey is the raw byte-representation of what a box key should\n\/\/ look like, a static 32-byte buffer. Used for NaCl Box.\ntype RawBoxKey [32]byte\n\nfunc rawBoxKeyFromSlice(slice []byte) (*RawBoxKey, error) {\n\tvar result RawBoxKey\n\tif len(slice) != len(result) {\n\t\treturn nil, ErrBadBoxKey\n\t}\n\tcopy(result[:], slice)\n\treturn &result, nil\n}\n\n\/\/ SymmetricKey is a template for a symmetric key, a 32-byte static\n\/\/ buffer. Used for NaCl SecretBox.\ntype SymmetricKey [32]byte\n\nfunc symmetricKeyFromSlice(slice []byte) (*SymmetricKey, error) {\n\tvar result SymmetricKey\n\tif len(slice) != len(result) {\n\t\treturn nil, ErrBadSymmetricKey\n\t}\n\tcopy(result[:], slice)\n\treturn &result, nil\n}\n\n\/\/ KIDExtractor key types can output a key ID corresponding to the\n\/\/ key.\ntype KIDExtractor interface {\n\t\/\/ ToKID outputs the \"key ID\" that corresponds to this key.\n\t\/\/ You can do whatever you'd like here, but probably it makes sense just\n\t\/\/ to output the public key as is.\n\tToKID() []byte\n}\n\n\/\/ BoxPublicKey is an generic interface to NaCl's public key Box function.\ntype BoxPublicKey interface {\n\tKIDExtractor\n\n\t\/\/ ToRawBoxKeyPointer returns this public key as a *[32]byte,\n\t\/\/ for use with nacl.box.Seal\n\tToRawBoxKeyPointer() *RawBoxKey\n\n\t\/\/ CreateEmphemeralKey creates an ephemeral key of the same type,\n\t\/\/ but totally random.\n\tCreateEphemeralKey() (BoxSecretKey, error)\n\n\t\/\/ HideIdentity returns true if we should hide the identity of this\n\t\/\/ key in our output message format.\n\tHideIdentity() bool\n}\n\n\/\/ BoxPrecomputedSharedKey results from a Precomputation below.\ntype BoxPrecomputedSharedKey interface {\n\tUnbox(nonce *Nonce, msg []byte) ([]byte, error)\n\tBox(nonce *Nonce, msg []byte) []byte\n}\n\n\/\/ BoxSecretKey is the secret key corresponding to a BoxPublicKey\ntype BoxSecretKey interface {\n\n\t\/\/ Box boxes up data, sent from this secret key, and to the receiver\n\t\/\/ specified.\n\tBox(receiver BoxPublicKey, nonce *Nonce, msg []byte) []byte\n\n\t\/\/ Unobx opens up the box, using this secret key as the receiver key\n\t\/\/ abd the give public key as the sender key.\n\tUnbox(sender BoxPublicKey, nonce *Nonce, msg []byte) ([]byte, error)\n\n\t\/\/ GetPublicKey gets the public key associated with this secret key.\n\tGetPublicKey() BoxPublicKey\n\n\t\/\/ Precompute computes a DH with the given key\n\tPrecompute(sender BoxPublicKey) BoxPrecomputedSharedKey\n}\n\n\/\/ SigningSecretKey is a secret NaCl key that can sign messages.\ntype SigningSecretKey interface {\n\t\/\/ Sign signs message with this secret key.\n\tSign(message []byte) ([]byte, error)\n\n\t\/\/ PublicKey gets the public key associated with this secret key.\n\tPublicKey() SigningPublicKey\n}\n\n\/\/ SigningPublicKey is a public NaCl key that can verify\n\/\/ signatures.\ntype SigningPublicKey interface {\n\tKIDExtractor\n\n\t\/\/ Verify verifies that signature is a valid signature of message for\n\t\/\/ this public key.\n\tVerify(message []byte, signature []byte) error\n}\n\n\/\/ Keyring is an interface used with decryption; it is called to\n\/\/ recover public or private keys during the decryption process.\n\/\/ Calls can block on network action.\ntype Keyring interface {\n\t\/\/ LookupBoxSecretKey looks in the Keyring for the secret key corresponding\n\t\/\/ to one of the given Key IDs. Returns the index and the key on success,\n\t\/\/ or -1 and nil on failure.\n\tLookupBoxSecretKey(kids [][]byte) (int, BoxSecretKey)\n\n\t\/\/ LookupBoxPublicKey returns a public key given the specified key ID.\n\t\/\/ For most cases, the key ID will be the key itself.\n\tLookupBoxPublicKey(kid []byte) BoxPublicKey\n\n\t\/\/ GetAllSecretKeys returns all keys, needed if we want to support\n\t\/\/ \"hidden\" receivers via trial and error\n\tGetAllSecretKeys() []BoxSecretKey\n\n\t\/\/ ImportEphemeralKey imports the ephemeral key into\n\t\/\/ BoxPublicKey format. This key has never been seen before, so\n\t\/\/ will be ephemeral.\n\tImportEphemeralKey(kid []byte) BoxPublicKey\n}\n\n\/\/ SigKeyring is an interface used during verification to find\n\/\/ the public key for the signer of a message.\ntype SigKeyring interface {\n\t\/\/ LookupSigningPublicKey returns a public signing key for the specified key ID.\n\tLookupSigningPublicKey(kid []byte) SigningPublicKey\n}\n\n\/\/ SecretKeyEqual returns true if the two secret keys are equal.\nfunc SecretKeyEqual(sk1, sk2 BoxSecretKey) bool {\n\treturn PublicKeyEqual(sk1.GetPublicKey(), sk2.GetPublicKey())\n}\n\n\/\/ PublicKeyEqual returns true if the two public keys are equal.\nfunc PublicKeyEqual(pk1, pk2 BoxPublicKey) bool {\n\treturn KIDEqual(pk1, pk2)\n}\n\n\/\/ KIDEqual return true if the KIDs for two keys are equal.\nfunc KIDEqual(k1, k2 KIDExtractor) bool {\n\treturn hmac.Equal(k1.ToKID(), k2.ToKID())\n}\n<commit_msg>delete SecretKeyEqual<commit_after>\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage saltpack\n\nimport (\n\t\"crypto\/hmac\"\n)\n\n\/\/ RawBoxKey is the raw byte-representation of what a box key should\n\/\/ look like, a static 32-byte buffer. Used for NaCl Box.\ntype RawBoxKey [32]byte\n\nfunc rawBoxKeyFromSlice(slice []byte) (*RawBoxKey, error) {\n\tvar result RawBoxKey\n\tif len(slice) != len(result) {\n\t\treturn nil, ErrBadBoxKey\n\t}\n\tcopy(result[:], slice)\n\treturn &result, nil\n}\n\n\/\/ SymmetricKey is a template for a symmetric key, a 32-byte static\n\/\/ buffer. Used for NaCl SecretBox.\ntype SymmetricKey [32]byte\n\nfunc symmetricKeyFromSlice(slice []byte) (*SymmetricKey, error) {\n\tvar result SymmetricKey\n\tif len(slice) != len(result) {\n\t\treturn nil, ErrBadSymmetricKey\n\t}\n\tcopy(result[:], slice)\n\treturn &result, nil\n}\n\n\/\/ KIDExtractor key types can output a key ID corresponding to the\n\/\/ key.\ntype KIDExtractor interface {\n\t\/\/ ToKID outputs the \"key ID\" that corresponds to this key.\n\t\/\/ You can do whatever you'd like here, but probably it makes sense just\n\t\/\/ to output the public key as is.\n\tToKID() []byte\n}\n\n\/\/ BoxPublicKey is an generic interface to NaCl's public key Box function.\ntype BoxPublicKey interface {\n\tKIDExtractor\n\n\t\/\/ ToRawBoxKeyPointer returns this public key as a *[32]byte,\n\t\/\/ for use with nacl.box.Seal\n\tToRawBoxKeyPointer() *RawBoxKey\n\n\t\/\/ CreateEmphemeralKey creates an ephemeral key of the same type,\n\t\/\/ but totally random.\n\tCreateEphemeralKey() (BoxSecretKey, error)\n\n\t\/\/ HideIdentity returns true if we should hide the identity of this\n\t\/\/ key in our output message format.\n\tHideIdentity() bool\n}\n\n\/\/ BoxPrecomputedSharedKey results from a Precomputation below.\ntype BoxPrecomputedSharedKey interface {\n\tUnbox(nonce *Nonce, msg []byte) ([]byte, error)\n\tBox(nonce *Nonce, msg []byte) []byte\n}\n\n\/\/ BoxSecretKey is the secret key corresponding to a BoxPublicKey\ntype BoxSecretKey interface {\n\n\t\/\/ Box boxes up data, sent from this secret key, and to the receiver\n\t\/\/ specified.\n\tBox(receiver BoxPublicKey, nonce *Nonce, msg []byte) []byte\n\n\t\/\/ Unobx opens up the box, using this secret key as the receiver key\n\t\/\/ abd the give public key as the sender key.\n\tUnbox(sender BoxPublicKey, nonce *Nonce, msg []byte) ([]byte, error)\n\n\t\/\/ GetPublicKey gets the public key associated with this secret key.\n\tGetPublicKey() BoxPublicKey\n\n\t\/\/ Precompute computes a DH with the given key\n\tPrecompute(sender BoxPublicKey) BoxPrecomputedSharedKey\n}\n\n\/\/ SigningSecretKey is a secret NaCl key that can sign messages.\ntype SigningSecretKey interface {\n\t\/\/ Sign signs message with this secret key.\n\tSign(message []byte) ([]byte, error)\n\n\t\/\/ PublicKey gets the public key associated with this secret key.\n\tPublicKey() SigningPublicKey\n}\n\n\/\/ SigningPublicKey is a public NaCl key that can verify\n\/\/ signatures.\ntype SigningPublicKey interface {\n\tKIDExtractor\n\n\t\/\/ Verify verifies that signature is a valid signature of message for\n\t\/\/ this public key.\n\tVerify(message []byte, signature []byte) error\n}\n\n\/\/ Keyring is an interface used with decryption; it is called to\n\/\/ recover public or private keys during the decryption process.\n\/\/ Calls can block on network action.\ntype Keyring interface {\n\t\/\/ LookupBoxSecretKey looks in the Keyring for the secret key corresponding\n\t\/\/ to one of the given Key IDs. Returns the index and the key on success,\n\t\/\/ or -1 and nil on failure.\n\tLookupBoxSecretKey(kids [][]byte) (int, BoxSecretKey)\n\n\t\/\/ LookupBoxPublicKey returns a public key given the specified key ID.\n\t\/\/ For most cases, the key ID will be the key itself.\n\tLookupBoxPublicKey(kid []byte) BoxPublicKey\n\n\t\/\/ GetAllSecretKeys returns all keys, needed if we want to support\n\t\/\/ \"hidden\" receivers via trial and error\n\tGetAllSecretKeys() []BoxSecretKey\n\n\t\/\/ ImportEphemeralKey imports the ephemeral key into\n\t\/\/ BoxPublicKey format. This key has never been seen before, so\n\t\/\/ will be ephemeral.\n\tImportEphemeralKey(kid []byte) BoxPublicKey\n}\n\n\/\/ SigKeyring is an interface used during verification to find\n\/\/ the public key for the signer of a message.\ntype SigKeyring interface {\n\t\/\/ LookupSigningPublicKey returns a public signing key for the specified key ID.\n\tLookupSigningPublicKey(kid []byte) SigningPublicKey\n}\n\n\/\/ PublicKeyEqual returns true if the two public keys are equal.\nfunc PublicKeyEqual(pk1, pk2 BoxPublicKey) bool {\n\treturn KIDEqual(pk1, pk2)\n}\n\n\/\/ KIDEqual return true if the KIDs for two keys are equal.\nfunc KIDEqual(k1, k2 KIDExtractor) bool {\n\treturn hmac.Equal(k1.ToKID(), k2.ToKID())\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n\t\"strings\"\n\n\t\"fmt\"\n\n\t\".\/api\"\n\t\".\/miniserver\"\n\t\".\/utils\"\n)\n\nvar supportedContentTypes [][]string = [][]string{\n\t{miniserver.ContentJson, \".json\"},\n\t{miniserver.ContentJavascript, \".js\"},\n\t{miniserver.ContentHtml, \".htm\", \".html\", \".shtml\"},\n\t{miniserver.ContentCss, \".css\"},\n\t{miniserver.ContentXIcon, \".ico\"},\n\t{miniserver.ContentSVG, \".svg\"},\n}\n\nvar deviceData *api.DeviceData\n\nfunc onConnect(client *miniserver.Client) *miniserver.Response {\n\tvar response *miniserver.Response\n\tvar url string = client.Url[1:]\n\tvar urls []string = strings.Split(url, \"\/\")\n\n\tvar realPath string\n\tfor i := range urls {\n\t\tif !utils.StringEmpty(urls[i]) &&\n\t\t\t(utils.DirExists(fmt.Sprintf(\"dist\/%s\", urls[i])) ||\n\t\t\t\tutils.FileExists(fmt.Sprintf(\"dist\/%s\", urls[i]))) {\n\t\t\tvar realPathBuf []string = []string{\"dist\"}\n\t\t\tfor x := i; x < len(urls); x++ {\n\t\t\t\trealPathBuf = append(realPathBuf, urls[x])\n\t\t\t}\n\t\t\trealPath = strings.Join(realPathBuf, \"\/\")\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif urls[0] != \"\/\" && urls[0] != \"..\" && urls[0] != \"serverdata\" {\n\t\tif !utils.StringEmpty(realPath) {\n\t\t\turl = realPath\n\t\t}\n\t\tif utils.FileExists(url) {\n\t\t\tresponse = client.ResponseFile(url)\n\t\t\tresponse.SetContentType(\"text\/plain\")\n\n\t\ttypesLoop:\n\t\t\tfor _, contentType := range supportedContentTypes {\n\t\t\t\tfor i := 1; i < len(contentType); i++ {\n\t\t\t\t\tvar extension string = contentType[i]\n\t\t\t\t\tif len(url) > len(extension) &&\n\t\t\t\t\t\turl[len(url)-len(extension):] == extension {\n\t\t\t\t\t\tresponse.SetContentType(contentType[0])\n\t\t\t\t\t\tbreak typesLoop\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(urls[len(urls)-1]) == 0 {\n\t\turls = urls[:len(urls)-1]\n\t}\n\tif response == nil {\n\t\tif len(urls) >= 4 && urls[1] == \"api\" && len(realPath) == 0 {\n\t\t\tvar resApi api.Api\n\n\t\t\tif urls[0] == \"kerneladiutor\" {\n\t\t\t\tresApi = api.NewKernelAdiutorApi(client,\n\t\t\t\t\tstrings.Join(urls[3:], \"\/\"), urls[2], deviceData)\n\t\t\t}\n\n\t\t\tif resApi != nil {\n\t\t\t\tresponse = resApi.GetResponse()\n\t\t\t}\n\t\t}\n\n\t\tif response == nil {\n\t\t\tresponse = client.ResponseFile(\"dist\/index.html\")\n\t\t}\n\t}\n\treturn response\n}\n\nfunc main() {\n\tif _, err := os.Stat(\"serverdata\"); err != nil {\n\t\terr = os.Mkdir(\".\/serverdata\", 0755)\n\t\tutils.Panic(err)\n\t}\n\n\tdeviceData = api.NewDeviceData()\n\tif deviceData == nil {\n\t\tpanic(\"Can't open devicedate db\")\n\t}\n\n\tvar server *miniserver.MiniServer = miniserver.NewServer(3000)\n\tserver.StartListening(onConnect)\n}\n<commit_msg>server: Add hammerjs workaround<commit_after>package main\n\nimport (\n\t\"os\"\n\t\"strings\"\n\n\t\"fmt\"\n\n\t\".\/api\"\n\t\".\/miniserver\"\n\t\".\/utils\"\n)\n\nvar supportedContentTypes [][]string = [][]string{\n\t{miniserver.ContentJson, \".json\"},\n\t{miniserver.ContentJavascript, \".js\"},\n\t{miniserver.ContentHtml, \".htm\", \".html\", \".shtml\"},\n\t{miniserver.ContentCss, \".css\"},\n\t{miniserver.ContentXIcon, \".ico\"},\n\t{miniserver.ContentSVG, \".svg\"},\n}\n\nvar deviceData *api.DeviceData\n\nfunc onConnect(client *miniserver.Client) *miniserver.Response {\n\tvar response *miniserver.Response\n\tvar url string = client.Url[1:]\n\n\t\/\/ Hammerjs workaround\n\tif url == \"hammer.min.js.map\" {\n\t\turl = \"node_modules\/hammerjs\/hammer.min.js.map\"\n\t}\n\n\tvar urls []string = strings.Split(url, \"\/\")\n\n\tvar realPath string\n\tfor i := range urls {\n\t\tif !utils.StringEmpty(urls[i]) &&\n\t\t\t(utils.DirExists(fmt.Sprintf(\"dist\/%s\", urls[i])) ||\n\t\t\t\tutils.FileExists(fmt.Sprintf(\"dist\/%s\", urls[i]))) {\n\t\t\tvar realPathBuf []string = []string{\"dist\"}\n\t\t\tfor x := i; x < len(urls); x++ {\n\t\t\t\trealPathBuf = append(realPathBuf, urls[x])\n\t\t\t}\n\t\t\trealPath = strings.Join(realPathBuf, \"\/\")\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif urls[0] != \"\/\" && urls[0] != \"..\" && urls[0] != \"serverdata\" {\n\t\tif !utils.StringEmpty(realPath) {\n\t\t\turl = realPath\n\t\t}\n\t\tif utils.FileExists(url) {\n\t\t\tresponse = client.ResponseFile(url)\n\t\t\tresponse.SetContentType(\"text\/plain\")\n\n\t\ttypesLoop:\n\t\t\tfor _, contentType := range supportedContentTypes {\n\t\t\t\tfor i := 1; i < len(contentType); i++ {\n\t\t\t\t\tvar extension string = contentType[i]\n\t\t\t\t\tif len(url) > len(extension) &&\n\t\t\t\t\t\turl[len(url)-len(extension):] == extension {\n\t\t\t\t\t\tresponse.SetContentType(contentType[0])\n\t\t\t\t\t\tbreak typesLoop\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(urls[len(urls)-1]) == 0 {\n\t\turls = urls[:len(urls)-1]\n\t}\n\tif response == nil {\n\t\tif len(urls) >= 4 && urls[1] == \"api\" && len(realPath) == 0 {\n\t\t\tvar resApi api.Api\n\n\t\t\tif urls[0] == \"kerneladiutor\" {\n\t\t\t\tresApi = api.NewKernelAdiutorApi(client,\n\t\t\t\t\tstrings.Join(urls[3:], \"\/\"), urls[2], deviceData)\n\t\t\t}\n\n\t\t\tif resApi != nil {\n\t\t\t\tresponse = resApi.GetResponse()\n\t\t\t}\n\t\t}\n\n\t\tif response == nil {\n\t\t\tresponse = client.ResponseFile(\"dist\/index.html\")\n\t\t}\n\t}\n\treturn response\n}\n\nfunc main() {\n\tif _, err := os.Stat(\"serverdata\"); err != nil {\n\t\terr = os.Mkdir(\".\/serverdata\", 0755)\n\t\tutils.Panic(err)\n\t}\n\n\tdeviceData = api.NewDeviceData()\n\tif deviceData == nil {\n\t\tpanic(\"Can't open devicedate db\")\n\t}\n\n\tvar server *miniserver.MiniServer = miniserver.NewServer(3000)\n\tserver.StartListening(onConnect)\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Build transaction context in BlockRes<commit_after><|endoftext|>"} {"text":"<commit_before>package runner\n\nimport (\n\t\"archive\/zip\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/lhchavez\/quark\/common\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n)\n\ntype CaseResult struct {\n\tVerdict string `json:\"verdict\"`\n\tName string `json:\"name\"`\n\tMaxScore float64 `json:\"max_score\"`\n\tScore float64 `json:\"score\"`\n\tMeta map[string]RunMetadata `json:\"meta\"`\n}\n\ntype GroupResult struct {\n\tGroup string `json:\"group\"`\n\tMaxScore float64 `json:\"max_score\"`\n\tScore float64 `json:\"score\"`\n\tCases []CaseResult `json:\"cases\"`\n}\n\ntype RunResult struct {\n\tVerdict string `json:\"verdict\"`\n\tCompileError *string `json:\"compile_error,omitempty\"`\n\tCompileMeta map[string]RunMetadata `json:\"compile_meta\"`\n\tScore float64 `json:\"score\"`\n\tTime float64 `json:\"time\"`\n\tWallTime float64 `json:\"wall_time\"`\n\tMemory int `json:\"memory\"`\n\tGroups []GroupResult `json:\"groups\"`\n}\n\ntype binary struct {\n\tname string\n\tlanguage string\n}\n\nfunc Grade(\n\tctx *common.Context,\n\tclient *http.Client,\n\tbaseURL *url.URL,\n\trun *common.Run,\n\tinput common.Input,\n\tsandbox Sandbox,\n) (*RunResult, error) {\n\trunResult := &RunResult{\n\t\tVerdict: \"JE\",\n\t}\n\tif !sandbox.Supported() {\n\t\treturn runResult, errors.New(\"Sandbox not supported\")\n\t}\n\trunRoot := path.Join(\n\t\tctx.Config.Runner.RuntimePath,\n\t\t\"grade\",\n\t\tstrconv.FormatUint(run.AttemptID, 10),\n\t)\n\tif !ctx.Config.Runner.PreserveFiles {\n\t\tdefer os.RemoveAll(runRoot)\n\t}\n\n\tctx.Log.Info(\"Running\", \"run\", run)\n\n\tbinaries := []binary{\n\t\t{\"Main\", run.Language},\n\t}\n\n\tgeneratedFiles := make([]string, 0)\n\n\t\/\/ Setup all source files.\n\tmainBinPath := path.Join(runRoot, \"Main\", \"bin\")\n\tif err := os.MkdirAll(mainBinPath, 0755); err != nil {\n\t\treturn runResult, err\n\t}\n\tmainSourceFile := path.Join(mainBinPath, fmt.Sprintf(\"Main.%s\", run.Language))\n\terr := ioutil.WriteFile(mainSourceFile, []byte(run.Source), 0644)\n\tif err != nil {\n\t\treturn runResult, err\n\t}\n\n\tvalidatorBinPath := path.Join(runRoot, \"validator\", \"bin\")\n\tif input.Settings().Validator.Name == \"custom\" {\n\t\tif err := os.MkdirAll(validatorBinPath, 0755); err != nil {\n\t\t\treturn runResult, err\n\t\t}\n\t\tvalidatorLang := *input.Settings().Validator.Lang\n\t\tvalidatorFileName := fmt.Sprintf(\"validator.%s\", validatorLang)\n\t\tvalidatorSourceFile := path.Join(validatorBinPath, validatorFileName)\n\t\terr := os.Link(path.Join(input.Path(), validatorFileName), validatorSourceFile)\n\t\tif err != nil {\n\t\t\treturn runResult, err\n\t\t}\n\t\tbinaries = append(binaries, binary{\"validator\", validatorLang})\n\t}\n\n\trunResult.CompileMeta = make(map[string]RunMetadata)\n\n\tfor _, b := range binaries {\n\t\tbinRoot := path.Join(runRoot, b.name)\n\t\tbinPath := path.Join(binRoot, \"bin\")\n\t\tsourceFile := path.Join(binPath, fmt.Sprintf(\"%s.%s\", b.name, b.language))\n\n\t\tcompileMeta, err := sandbox.Compile(\n\t\t\tctx,\n\t\t\tb.language,\n\t\t\t[]string{sourceFile},\n\t\t\tbinPath,\n\t\t\tpath.Join(binRoot, \"compile.out\"),\n\t\t\tpath.Join(binRoot, \"compile.err\"),\n\t\t\tpath.Join(binRoot, \"compile.meta\"),\n\t\t\tb.name,\n\t\t\t[]string{},\n\t\t)\n\t\tgeneratedFiles = append(\n\t\t\tgeneratedFiles,\n\t\t\tpath.Join(b.name, \"compile.out\"),\n\t\t\tpath.Join(b.name, \"compile.err\"),\n\t\t\tpath.Join(b.name, \"compile.meta\"),\n\t\t)\n\n\t\tif compileMeta != nil {\n\t\t\trunResult.CompileMeta[b.name] = *compileMeta\n\t\t}\n\n\t\tif err != nil || compileMeta.Verdict != \"OK\" {\n\t\t\tctx.Log.Error(\"Compile error\", \"err\", err, \"compileMeta\", compileMeta)\n\t\t\trunResult.Verdict = \"CE\"\n\t\t\tcompileErrorFile := \"compile.err\"\n\t\t\tif b.language == \"pas\" {\n\t\t\t\t\/\/ Lazarus writes the output of the compile error in compile.out.\n\t\t\t\tcompileErrorFile = \"compile.out\"\n\t\t\t} else {\n\t\t\t\tcompileErrorFile = \"compile.err\"\n\t\t\t}\n\t\t\tcompileError := getCompileError(path.Join(binRoot, compileErrorFile))\n\t\t\trunResult.CompileError = &compileError\n\t\t\treturn runResult, err\n\t\t}\n\t}\n\n\tgroupResults := make([]GroupResult, len(input.Settings().Cases))\n\tmaxScore := run.MaxScore\n\trunResult.Verdict = \"OK\"\n\twallTimeLimit := (float64)(input.Settings().Limits.OverallWallTimeLimit \/ 1000.0)\n\tfor i, group := range input.Settings().Cases {\n\t\tcaseResults := make([]CaseResult, len(group.Cases))\n\t\tfor j, caseData := range group.Cases {\n\t\t\tvar runMeta *RunMetadata\n\t\t\tif runResult.WallTime > wallTimeLimit {\n\t\t\t\trunMeta = &RunMetadata{\n\t\t\t\t\tVerdict: \"TLE\",\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\trunMeta, err = sandbox.Run(\n\t\t\t\t\tctx,\n\t\t\t\t\tinput,\n\t\t\t\t\trun.Language,\n\t\t\t\t\tmainBinPath,\n\t\t\t\t\tpath.Join(input.Path(), \"in\", fmt.Sprintf(\"%s.in\", caseData.Name)),\n\t\t\t\t\tpath.Join(runRoot, fmt.Sprintf(\"%s.out\", caseData.Name)),\n\t\t\t\t\tpath.Join(runRoot, fmt.Sprintf(\"%s.err\", caseData.Name)),\n\t\t\t\t\tpath.Join(runRoot, fmt.Sprintf(\"%s.meta\", caseData.Name)),\n\t\t\t\t\t\"Main\",\n\t\t\t\t\tnil,\n\t\t\t\t\tnil,\n\t\t\t\t\tnil,\n\t\t\t\t\t[]string{},\n\t\t\t\t\tmap[string]string{},\n\t\t\t\t)\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Log.Error(\"failed to run \"+caseData.Name, \"err\", err)\n\t\t\t\t}\n\t\t\t\tgeneratedFiles = append(\n\t\t\t\t\tgeneratedFiles,\n\t\t\t\t\tfmt.Sprintf(\"%s.out\", caseData.Name),\n\t\t\t\t\tfmt.Sprintf(\"%s.err\", caseData.Name),\n\t\t\t\t\tfmt.Sprintf(\"%s.meta\", caseData.Name),\n\t\t\t\t)\n\t\t\t}\n\t\t\trunResult.Verdict = worseVerdict(runResult.Verdict, runMeta.Verdict)\n\t\t\trunResult.Time += runMeta.Time\n\t\t\trunResult.WallTime += runMeta.WallTime\n\t\t\trunResult.Memory = max(runResult.Memory, runMeta.Memory)\n\n\t\t\tcaseResults[j] = CaseResult{\n\t\t\t\tName: caseData.Name,\n\t\t\t\tMaxScore: maxScore * caseData.Weight,\n\t\t\t\tVerdict: runMeta.Verdict,\n\t\t\t\tMeta: map[string]RunMetadata{\n\t\t\t\t\t\"Main\": *runMeta,\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\t\tgroupResults[i] = GroupResult{\n\t\t\tGroup: group.Name,\n\t\t\tMaxScore: maxScore * group.Weight,\n\t\t\tScore: 0,\n\t\t\tCases: caseResults,\n\t\t}\n\t}\n\n\t\/\/ Validate outputs.\n\tfor i, group := range input.Settings().Cases {\n\t\tcorrect := true\n\t\tscore := 0.0\n\t\tfor j, caseData := range group.Cases {\n\t\t\tcaseResults := groupResults[i].Cases[j]\n\t\t\tif caseResults.Verdict == \"OK\" {\n\t\t\t\tcontestantPath := path.Join(\n\t\t\t\t\trunRoot, fmt.Sprintf(\"%s.out\", caseData.Name),\n\t\t\t\t)\n\t\t\t\tif input.Settings().Validator.Name == \"custom\" {\n\t\t\t\t\toriginalInputFile := path.Join(\n\t\t\t\t\t\tinput.Path(),\n\t\t\t\t\t\t\"in\",\n\t\t\t\t\t\tfmt.Sprintf(\"%s.in\", caseData.Name),\n\t\t\t\t\t)\n\t\t\t\t\toriginalOutputFile := path.Join(\n\t\t\t\t\t\tinput.Path(),\n\t\t\t\t\t\t\"out\",\n\t\t\t\t\t\tfmt.Sprintf(\"%s.out\", caseData.Name),\n\t\t\t\t\t)\n\t\t\t\t\trunMetaFile := path.Join(runRoot, fmt.Sprintf(\"%s.meta\", caseData.Name))\n\t\t\t\t\tvalidateMeta, err := sandbox.Run(\n\t\t\t\t\t\tctx,\n\t\t\t\t\t\tinput,\n\t\t\t\t\t\t*input.Settings().Validator.Lang,\n\t\t\t\t\t\tvalidatorBinPath,\n\t\t\t\t\t\tcontestantPath,\n\t\t\t\t\t\tpath.Join(runRoot, \"validator\", fmt.Sprintf(\"%s.out\", caseData.Name)),\n\t\t\t\t\t\tpath.Join(runRoot, \"validator\", fmt.Sprintf(\"%s.err\", caseData.Name)),\n\t\t\t\t\t\tpath.Join(runRoot, \"validator\", fmt.Sprintf(\"%s.meta\", caseData.Name)),\n\t\t\t\t\t\t\"validator\",\n\t\t\t\t\t\t&originalInputFile,\n\t\t\t\t\t\t&originalOutputFile,\n\t\t\t\t\t\t&runMetaFile,\n\t\t\t\t\t\t[]string{},\n\t\t\t\t\t\tmap[string]string{},\n\t\t\t\t\t)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tctx.Log.Error(\"failed to validate \"+caseData.Name, \"err\", err)\n\t\t\t\t\t}\n\t\t\t\t\tgeneratedFiles = append(\n\t\t\t\t\t\tgeneratedFiles,\n\t\t\t\t\t\tfmt.Sprintf(\"validator\/%s.out\", caseData.Name),\n\t\t\t\t\t\tfmt.Sprintf(\"validator\/%s.err\", caseData.Name),\n\t\t\t\t\t\tfmt.Sprintf(\"validator\/%s.meta\", caseData.Name),\n\t\t\t\t\t)\n\t\t\t\t\tif validateMeta.Verdict != \"OK\" {\n\t\t\t\t\t\t\/\/ If the validator did not exit cleanly, assume an empty output.\n\t\t\t\t\t\tcontestantPath = \"\/dev\/null\"\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcontestantPath = path.Join(\n\t\t\t\t\t\t\trunRoot,\n\t\t\t\t\t\t\t\"validator\",\n\t\t\t\t\t\t\tfmt.Sprintf(\"%s.out\", caseData.Name),\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontestantFd, err := os.Open(contestantPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Log.Warn(\"Error opening file\", \"path\", contestantPath, \"err\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdefer contestantFd.Close()\n\t\t\t\texpectedPath := path.Join(\n\t\t\t\t\tinput.Path(), \"out\", fmt.Sprintf(\"%s.out\", caseData.Name),\n\t\t\t\t)\n\t\t\t\texpectedFd, err := os.Open(expectedPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Log.Warn(\"Error opening file\", \"path\", expectedPath, \"err\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdefer expectedFd.Close()\n\t\t\t\trunScore, err := CalculateScore(\n\t\t\t\t\t&input.Settings().Validator,\n\t\t\t\t\tcontestantFd,\n\t\t\t\t\texpectedFd,\n\t\t\t\t)\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Log.Debug(\"error comparing values\", \"err\", err)\n\t\t\t\t}\n\t\t\t\tcaseResults.Score = maxScore * runScore * caseData.Weight\n\t\t\t\tscore += runScore * caseData.Weight\n\t\t\t\tif runScore == 0 {\n\t\t\t\t\tcorrect = false\n\t\t\t\t}\n\t\t\t\tif runScore != 1 {\n\t\t\t\t\trunResult.Verdict = worseVerdict(runResult.Verdict, \"PA\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif correct {\n\t\t\tgroupResults[i].Score = maxScore * score\n\t\t\trunResult.Score += groupResults[i].Score\n\t\t}\n\t}\n\n\trunResult.Groups = groupResults\n\n\tif runResult.Verdict == \"PA\" && runResult.Score == 0 {\n\t\trunResult.Verdict = \"WA\"\n\t} else if runResult.Verdict == \"OK\" {\n\t\trunResult.Verdict = \"AC\"\n\t}\n\n\tctx.Log.Debug(\"Finished running\", \"results\", runResult)\n\tfilesURL, err := baseURL.Parse(fmt.Sprintf(\"run\/%d\/files\/\", run.AttemptID))\n\tif err != nil {\n\t\treturn runResult, err\n\t}\n\tif err := uploadFiles(\n\t\tctx,\n\t\tclient,\n\t\tfilesURL.String(),\n\t\trunRoot,\n\t\tinput,\n\t\tgeneratedFiles,\n\t); err != nil {\n\t\tctx.Log.Debug(\"uploadFiles failed\", \"err\", err)\n\t\treturn runResult, err\n\t}\n\n\treturn runResult, nil\n}\n\nfunc uploadFiles(\n\tctx *common.Context,\n\tclient *http.Client,\n\tuploadURL string,\n\trunRoot string,\n\tinput common.Input,\n\tfiles []string,\n) error {\n\tpath, err := createZipFile(runRoot, files)\n\tif path != \"\" {\n\t\tdefer os.Remove(path)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tfd, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := client.Post(uploadURL, \"application\/zip\", fd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp.Body.Close()\n\n\treturn nil\n}\n\nfunc createZipFile(runRoot string, files []string) (string, error) {\n\tzipFd, err := ioutil.TempFile(runRoot, \".results_zip\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tzipPath := zipFd.Name()\n\tdefer zipFd.Close()\n\tzip := zip.NewWriter(zipFd)\n\tfor _, file := range files {\n\t\tf, err := os.Open(path.Join(runRoot, file))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdefer f.Close()\n\t\tzf, err := zip.Create(file)\n\t\tif err != nil {\n\t\t\treturn zipPath, err\n\t\t}\n\t\tif _, err := io.Copy(zf, f); err != nil {\n\t\t\treturn zipPath, err\n\t\t}\n\t}\n\treturn zipPath, zip.Close()\n}\n\nfunc getCompileError(errorFile string) string {\n\tfd, err := os.Open(errorFile)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\tdefer fd.Close()\n\tbytes, err := ioutil.ReadAll(fd)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\treturn string(bytes)\n}\n\nfunc worseVerdict(a, b string) string {\n\tverdictList := []string{\n\t\t\"JE\",\n\t\t\"CE\",\n\t\t\"MLE\",\n\t\t\"RFE\",\n\t\t\"RTE\",\n\t\t\"TLE\",\n\t\t\"OLE\",\n\t\t\"WA\",\n\t\t\"PA\",\n\t\t\"AC\",\n\t\t\"OK\",\n\t}\n\tidxA := sliceIndex(len(verdictList),\n\t\tfunc(i int) bool { return verdictList[i] == a })\n\tidxB := sliceIndex(len(verdictList),\n\t\tfunc(i int) bool { return verdictList[i] == b })\n\treturn verdictList[min(idxA, idxB)]\n}\n\nfunc sliceIndex(limit int, predicate func(i int) bool) int {\n\tfor i := 0; i < limit; i++ {\n\t\tif predicate(i) {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n<commit_msg>Including MaxScore in RunResult<commit_after>package runner\n\nimport (\n\t\"archive\/zip\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/lhchavez\/quark\/common\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n)\n\ntype CaseResult struct {\n\tVerdict string `json:\"verdict\"`\n\tName string `json:\"name\"`\n\tMaxScore float64 `json:\"max_score\"`\n\tScore float64 `json:\"score\"`\n\tMeta map[string]RunMetadata `json:\"meta\"`\n}\n\ntype GroupResult struct {\n\tGroup string `json:\"group\"`\n\tMaxScore float64 `json:\"max_score\"`\n\tScore float64 `json:\"score\"`\n\tCases []CaseResult `json:\"cases\"`\n}\n\ntype RunResult struct {\n\tVerdict string `json:\"verdict\"`\n\tCompileError *string `json:\"compile_error,omitempty\"`\n\tCompileMeta map[string]RunMetadata `json:\"compile_meta\"`\n\tScore float64 `json:\"score\"`\n\tMaxScore float64 `json:\"max_score\"`\n\tTime float64 `json:\"time\"`\n\tWallTime float64 `json:\"wall_time\"`\n\tMemory int `json:\"memory\"`\n\tGroups []GroupResult `json:\"groups\"`\n}\n\ntype binary struct {\n\tname string\n\tlanguage string\n}\n\nfunc Grade(\n\tctx *common.Context,\n\tclient *http.Client,\n\tbaseURL *url.URL,\n\trun *common.Run,\n\tinput common.Input,\n\tsandbox Sandbox,\n) (*RunResult, error) {\n\trunResult := &RunResult{\n\t\tVerdict: \"JE\",\n\t}\n\tif !sandbox.Supported() {\n\t\treturn runResult, errors.New(\"Sandbox not supported\")\n\t}\n\trunRoot := path.Join(\n\t\tctx.Config.Runner.RuntimePath,\n\t\t\"grade\",\n\t\tstrconv.FormatUint(run.AttemptID, 10),\n\t)\n\tif !ctx.Config.Runner.PreserveFiles {\n\t\tdefer os.RemoveAll(runRoot)\n\t}\n\n\tctx.Log.Info(\"Running\", \"run\", run)\n\n\tbinaries := []binary{\n\t\t{\"Main\", run.Language},\n\t}\n\n\tgeneratedFiles := make([]string, 0)\n\n\t\/\/ Setup all source files.\n\tmainBinPath := path.Join(runRoot, \"Main\", \"bin\")\n\tif err := os.MkdirAll(mainBinPath, 0755); err != nil {\n\t\treturn runResult, err\n\t}\n\tmainSourceFile := path.Join(mainBinPath, fmt.Sprintf(\"Main.%s\", run.Language))\n\terr := ioutil.WriteFile(mainSourceFile, []byte(run.Source), 0644)\n\tif err != nil {\n\t\treturn runResult, err\n\t}\n\n\tvalidatorBinPath := path.Join(runRoot, \"validator\", \"bin\")\n\tif input.Settings().Validator.Name == \"custom\" {\n\t\tif err := os.MkdirAll(validatorBinPath, 0755); err != nil {\n\t\t\treturn runResult, err\n\t\t}\n\t\tvalidatorLang := *input.Settings().Validator.Lang\n\t\tvalidatorFileName := fmt.Sprintf(\"validator.%s\", validatorLang)\n\t\tvalidatorSourceFile := path.Join(validatorBinPath, validatorFileName)\n\t\terr := os.Link(path.Join(input.Path(), validatorFileName), validatorSourceFile)\n\t\tif err != nil {\n\t\t\treturn runResult, err\n\t\t}\n\t\tbinaries = append(binaries, binary{\"validator\", validatorLang})\n\t}\n\n\trunResult.CompileMeta = make(map[string]RunMetadata)\n\n\tfor _, b := range binaries {\n\t\tbinRoot := path.Join(runRoot, b.name)\n\t\tbinPath := path.Join(binRoot, \"bin\")\n\t\tsourceFile := path.Join(binPath, fmt.Sprintf(\"%s.%s\", b.name, b.language))\n\n\t\tcompileMeta, err := sandbox.Compile(\n\t\t\tctx,\n\t\t\tb.language,\n\t\t\t[]string{sourceFile},\n\t\t\tbinPath,\n\t\t\tpath.Join(binRoot, \"compile.out\"),\n\t\t\tpath.Join(binRoot, \"compile.err\"),\n\t\t\tpath.Join(binRoot, \"compile.meta\"),\n\t\t\tb.name,\n\t\t\t[]string{},\n\t\t)\n\t\tgeneratedFiles = append(\n\t\t\tgeneratedFiles,\n\t\t\tpath.Join(b.name, \"compile.out\"),\n\t\t\tpath.Join(b.name, \"compile.err\"),\n\t\t\tpath.Join(b.name, \"compile.meta\"),\n\t\t)\n\n\t\tif compileMeta != nil {\n\t\t\trunResult.CompileMeta[b.name] = *compileMeta\n\t\t}\n\n\t\tif err != nil || compileMeta.Verdict != \"OK\" {\n\t\t\tctx.Log.Error(\"Compile error\", \"err\", err, \"compileMeta\", compileMeta)\n\t\t\trunResult.Verdict = \"CE\"\n\t\t\tcompileErrorFile := \"compile.err\"\n\t\t\tif b.language == \"pas\" {\n\t\t\t\t\/\/ Lazarus writes the output of the compile error in compile.out.\n\t\t\t\tcompileErrorFile = \"compile.out\"\n\t\t\t} else {\n\t\t\t\tcompileErrorFile = \"compile.err\"\n\t\t\t}\n\t\t\tcompileError := getCompileError(path.Join(binRoot, compileErrorFile))\n\t\t\trunResult.CompileError = &compileError\n\t\t\treturn runResult, err\n\t\t}\n\t}\n\n\tgroupResults := make([]GroupResult, len(input.Settings().Cases))\n\trunResult.MaxScore = run.MaxScore\n\trunResult.Verdict = \"OK\"\n\twallTimeLimit := (float64)(input.Settings().Limits.OverallWallTimeLimit \/ 1000.0)\n\tfor i, group := range input.Settings().Cases {\n\t\tcaseResults := make([]CaseResult, len(group.Cases))\n\t\tfor j, caseData := range group.Cases {\n\t\t\tvar runMeta *RunMetadata\n\t\t\tif runResult.WallTime > wallTimeLimit {\n\t\t\t\trunMeta = &RunMetadata{\n\t\t\t\t\tVerdict: \"TLE\",\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\trunMeta, err = sandbox.Run(\n\t\t\t\t\tctx,\n\t\t\t\t\tinput,\n\t\t\t\t\trun.Language,\n\t\t\t\t\tmainBinPath,\n\t\t\t\t\tpath.Join(input.Path(), \"in\", fmt.Sprintf(\"%s.in\", caseData.Name)),\n\t\t\t\t\tpath.Join(runRoot, fmt.Sprintf(\"%s.out\", caseData.Name)),\n\t\t\t\t\tpath.Join(runRoot, fmt.Sprintf(\"%s.err\", caseData.Name)),\n\t\t\t\t\tpath.Join(runRoot, fmt.Sprintf(\"%s.meta\", caseData.Name)),\n\t\t\t\t\t\"Main\",\n\t\t\t\t\tnil,\n\t\t\t\t\tnil,\n\t\t\t\t\tnil,\n\t\t\t\t\t[]string{},\n\t\t\t\t\tmap[string]string{},\n\t\t\t\t)\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Log.Error(\"failed to run \"+caseData.Name, \"err\", err)\n\t\t\t\t}\n\t\t\t\tgeneratedFiles = append(\n\t\t\t\t\tgeneratedFiles,\n\t\t\t\t\tfmt.Sprintf(\"%s.out\", caseData.Name),\n\t\t\t\t\tfmt.Sprintf(\"%s.err\", caseData.Name),\n\t\t\t\t\tfmt.Sprintf(\"%s.meta\", caseData.Name),\n\t\t\t\t)\n\t\t\t}\n\t\t\trunResult.Verdict = worseVerdict(runResult.Verdict, runMeta.Verdict)\n\t\t\trunResult.Time += runMeta.Time\n\t\t\trunResult.WallTime += runMeta.WallTime\n\t\t\trunResult.Memory = max(runResult.Memory, runMeta.Memory)\n\n\t\t\tcaseResults[j] = CaseResult{\n\t\t\t\tName: caseData.Name,\n\t\t\t\tMaxScore: runResult.MaxScore * caseData.Weight,\n\t\t\t\tVerdict: runMeta.Verdict,\n\t\t\t\tMeta: map[string]RunMetadata{\n\t\t\t\t\t\"Main\": *runMeta,\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\t\tgroupResults[i] = GroupResult{\n\t\t\tGroup: group.Name,\n\t\t\tMaxScore: runResult.MaxScore * group.Weight,\n\t\t\tScore: 0,\n\t\t\tCases: caseResults,\n\t\t}\n\t}\n\n\t\/\/ Validate outputs.\n\tfor i, group := range input.Settings().Cases {\n\t\tcorrect := true\n\t\tscore := 0.0\n\t\tfor j, caseData := range group.Cases {\n\t\t\tcaseResults := groupResults[i].Cases[j]\n\t\t\tif caseResults.Verdict == \"OK\" {\n\t\t\t\tcontestantPath := path.Join(\n\t\t\t\t\trunRoot, fmt.Sprintf(\"%s.out\", caseData.Name),\n\t\t\t\t)\n\t\t\t\tif input.Settings().Validator.Name == \"custom\" {\n\t\t\t\t\toriginalInputFile := path.Join(\n\t\t\t\t\t\tinput.Path(),\n\t\t\t\t\t\t\"in\",\n\t\t\t\t\t\tfmt.Sprintf(\"%s.in\", caseData.Name),\n\t\t\t\t\t)\n\t\t\t\t\toriginalOutputFile := path.Join(\n\t\t\t\t\t\tinput.Path(),\n\t\t\t\t\t\t\"out\",\n\t\t\t\t\t\tfmt.Sprintf(\"%s.out\", caseData.Name),\n\t\t\t\t\t)\n\t\t\t\t\trunMetaFile := path.Join(runRoot, fmt.Sprintf(\"%s.meta\", caseData.Name))\n\t\t\t\t\tvalidateMeta, err := sandbox.Run(\n\t\t\t\t\t\tctx,\n\t\t\t\t\t\tinput,\n\t\t\t\t\t\t*input.Settings().Validator.Lang,\n\t\t\t\t\t\tvalidatorBinPath,\n\t\t\t\t\t\tcontestantPath,\n\t\t\t\t\t\tpath.Join(runRoot, \"validator\", fmt.Sprintf(\"%s.out\", caseData.Name)),\n\t\t\t\t\t\tpath.Join(runRoot, \"validator\", fmt.Sprintf(\"%s.err\", caseData.Name)),\n\t\t\t\t\t\tpath.Join(runRoot, \"validator\", fmt.Sprintf(\"%s.meta\", caseData.Name)),\n\t\t\t\t\t\t\"validator\",\n\t\t\t\t\t\t&originalInputFile,\n\t\t\t\t\t\t&originalOutputFile,\n\t\t\t\t\t\t&runMetaFile,\n\t\t\t\t\t\t[]string{},\n\t\t\t\t\t\tmap[string]string{},\n\t\t\t\t\t)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tctx.Log.Error(\"failed to validate \"+caseData.Name, \"err\", err)\n\t\t\t\t\t}\n\t\t\t\t\tgeneratedFiles = append(\n\t\t\t\t\t\tgeneratedFiles,\n\t\t\t\t\t\tfmt.Sprintf(\"validator\/%s.out\", caseData.Name),\n\t\t\t\t\t\tfmt.Sprintf(\"validator\/%s.err\", caseData.Name),\n\t\t\t\t\t\tfmt.Sprintf(\"validator\/%s.meta\", caseData.Name),\n\t\t\t\t\t)\n\t\t\t\t\tif validateMeta.Verdict != \"OK\" {\n\t\t\t\t\t\t\/\/ If the validator did not exit cleanly, assume an empty output.\n\t\t\t\t\t\tcontestantPath = \"\/dev\/null\"\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcontestantPath = path.Join(\n\t\t\t\t\t\t\trunRoot,\n\t\t\t\t\t\t\t\"validator\",\n\t\t\t\t\t\t\tfmt.Sprintf(\"%s.out\", caseData.Name),\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontestantFd, err := os.Open(contestantPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Log.Warn(\"Error opening file\", \"path\", contestantPath, \"err\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdefer contestantFd.Close()\n\t\t\t\texpectedPath := path.Join(\n\t\t\t\t\tinput.Path(), \"out\", fmt.Sprintf(\"%s.out\", caseData.Name),\n\t\t\t\t)\n\t\t\t\texpectedFd, err := os.Open(expectedPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Log.Warn(\"Error opening file\", \"path\", expectedPath, \"err\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdefer expectedFd.Close()\n\t\t\t\trunScore, err := CalculateScore(\n\t\t\t\t\t&input.Settings().Validator,\n\t\t\t\t\tcontestantFd,\n\t\t\t\t\texpectedFd,\n\t\t\t\t)\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Log.Debug(\"error comparing values\", \"err\", err)\n\t\t\t\t}\n\t\t\t\tcaseResults.Score = runResult.MaxScore * runScore * caseData.Weight\n\t\t\t\tscore += runScore * caseData.Weight\n\t\t\t\tif runScore == 0 {\n\t\t\t\t\tcorrect = false\n\t\t\t\t}\n\t\t\t\tif runScore != 1 {\n\t\t\t\t\trunResult.Verdict = worseVerdict(runResult.Verdict, \"PA\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif correct {\n\t\t\tgroupResults[i].Score = runResult.MaxScore * score\n\t\t\trunResult.Score += groupResults[i].Score\n\t\t}\n\t}\n\n\trunResult.Groups = groupResults\n\n\tif runResult.Verdict == \"PA\" && runResult.Score == 0 {\n\t\trunResult.Verdict = \"WA\"\n\t} else if runResult.Verdict == \"OK\" {\n\t\trunResult.Verdict = \"AC\"\n\t\trunResult.Score = 1\n\t}\n\n\tctx.Log.Debug(\"Finished running\", \"results\", runResult)\n\tfilesURL, err := baseURL.Parse(fmt.Sprintf(\"run\/%d\/files\/\", run.AttemptID))\n\tif err != nil {\n\t\treturn runResult, err\n\t}\n\tif err := uploadFiles(\n\t\tctx,\n\t\tclient,\n\t\tfilesURL.String(),\n\t\trunRoot,\n\t\tinput,\n\t\tgeneratedFiles,\n\t); err != nil {\n\t\tctx.Log.Debug(\"uploadFiles failed\", \"err\", err)\n\t\treturn runResult, err\n\t}\n\n\treturn runResult, nil\n}\n\nfunc uploadFiles(\n\tctx *common.Context,\n\tclient *http.Client,\n\tuploadURL string,\n\trunRoot string,\n\tinput common.Input,\n\tfiles []string,\n) error {\n\tpath, err := createZipFile(runRoot, files)\n\tif path != \"\" {\n\t\tdefer os.Remove(path)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tfd, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := client.Post(uploadURL, \"application\/zip\", fd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp.Body.Close()\n\n\treturn nil\n}\n\nfunc createZipFile(runRoot string, files []string) (string, error) {\n\tzipFd, err := ioutil.TempFile(runRoot, \".results_zip\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tzipPath := zipFd.Name()\n\tdefer zipFd.Close()\n\tzip := zip.NewWriter(zipFd)\n\tfor _, file := range files {\n\t\tf, err := os.Open(path.Join(runRoot, file))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdefer f.Close()\n\t\tzf, err := zip.Create(file)\n\t\tif err != nil {\n\t\t\treturn zipPath, err\n\t\t}\n\t\tif _, err := io.Copy(zf, f); err != nil {\n\t\t\treturn zipPath, err\n\t\t}\n\t}\n\treturn zipPath, zip.Close()\n}\n\nfunc getCompileError(errorFile string) string {\n\tfd, err := os.Open(errorFile)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\tdefer fd.Close()\n\tbytes, err := ioutil.ReadAll(fd)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\treturn string(bytes)\n}\n\nfunc worseVerdict(a, b string) string {\n\tverdictList := []string{\n\t\t\"JE\",\n\t\t\"CE\",\n\t\t\"MLE\",\n\t\t\"RFE\",\n\t\t\"RTE\",\n\t\t\"TLE\",\n\t\t\"OLE\",\n\t\t\"WA\",\n\t\t\"PA\",\n\t\t\"AC\",\n\t\t\"OK\",\n\t}\n\tidxA := sliceIndex(len(verdictList),\n\t\tfunc(i int) bool { return verdictList[i] == a })\n\tidxB := sliceIndex(len(verdictList),\n\t\tfunc(i int) bool { return verdictList[i] == b })\n\treturn verdictList[min(idxA, idxB)]\n}\n\nfunc sliceIndex(limit int, predicate func(i int) bool) int {\n\tfor i := 0; i < limit; i++ {\n\t\tif predicate(i) {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n<|endoftext|>"} {"text":"<commit_before>package runner\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/bivas\/bitbucket-pipelines\/parser\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tpipelineRunnerName = \"pipeline__runner__\"\n\tpipelineRunnerDockerName = pipelineRunnerName + \"docker\"\n\tdockerImage = \"docker:1.8-dind\"\n\tbootTimeout = 30\n)\n\n\/\/PipelineRunner : create the pipelines inner\ntype PipelineRunner interface {\n\tSetup()\n\tRun(commands []string) (string, error)\n\tClose()\n}\n\ntype inner struct {\n\timage string\n\thostPath string\n\tenvironment string\n\tuseDockerService bool\n\tcommand *exec.Cmd\n\tsignal chan error\n}\n\nfunc (*inner) commandRun(name string, args []string) *exec.Cmd {\n\tlog.Println(\"Running (commandRun)\", name, args)\n\treturn exec.Command(name, args...)\n}\n\nfunc (*inner) commandOutput(name string, args []string) (string, error) {\n\tlog.Println(\"Running (commandOutput)\", name, args)\n\tcmd := exec.Command(name, args...)\n\tout, err := cmd.CombinedOutput()\n\treturn strings.TrimSpace(string(out)), err\n}\n\nfunc (env *inner) docker(args ...string) (string, error) {\n\treturn env.commandOutput(\"docker\", args)\n}\n\nfunc (env *inner) pullImage() {\n\tui.Info(\"Pulling image %s\", env.image)\n\tout, e := env.docker(\"pull\", env.image)\n\tif e != nil {\n\t\tui.Error(\"Error pulling image %s %s\", env.image, e)\n\t\tlog.Fatalf(\"Error message %s\\n\", out)\n\t}\n}\n\nfunc (env *inner) runDockerDocker() {\n\tif env.useDockerService {\n\t\tlog.Println(\"pulling\", dockerImage)\n\t\tout, e := env.docker(\"pull\", dockerImage)\n\t\tif e != nil {\n\t\t\tlog.Fatalf(\"Error pulling image %s %s\\n\", dockerImage, e)\n\t\t\tlog.Fatalf(\"Error message %s\\n\", out)\n\t\t}\n\t\targs := []string{\"run\",\n\t\t\t\"-d\",\n\t\t\t\"--name=\" + pipelineRunnerDockerName,\n\t\t\t\"--privileged\",\n\t\t\tdockerImage}\n\t\tdockerCommand := env.commandRun(\"docker\", args)\n\t\tlog.Println(\"running\", dockerImage)\n\t\tgo func() {\n\t\t\tout, err := dockerCommand.CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"error combining output %s, %s\", out, err)\n\t\t\t}\n\t\t}()\n\t\tenv.waitForImage(pipelineRunnerDockerName)\n\t}\n}\n\nfunc (*inner) sendInitCommands(cmd *exec.Cmd) {\n\tstdin, e := cmd.StdinPipe()\n\tif e != nil {\n\t\tlog.Fatal(\"error setting up stdin\", e)\n\t}\n\tdefer stdin.Close()\n\tio.WriteString(stdin, \"touch \/.running\\n\")\n\tio.WriteString(stdin, \"while [ -e \/.running ]; do sleep 1; done; exit;\\n\")\n}\n\nfunc (env *inner) runImage() {\n\tenv.cleanup()\n\tui.Info(\"Running image %s\", env.image)\n\targs := []string{\"run\",\n\t\t\"-i\",\n\t\t\"--rm\",\n\t\t\"--name=\" + pipelineRunnerName,\n\t\t\"--volume=\" + env.hostPath + \":\/wd\",\n\t\t\"--workdir=\/wd\",\n\t\t\"--entrypoint=\/bin\/sh\"}\n\tif env.useDockerService {\n\t\tenv.runDockerDocker()\n\t\targs = append(args,\n\t\t\t[]string{\n\t\t\t\t\"--env=DOCKER_HOST=tcp:\/\/docker:2375\",\n\t\t\t\t\"--link=\" + pipelineRunnerDockerName + \":docker\",\n\t\t\t}...)\n\t}\n\tif env.environment != \"\" {\n\t\tif isFile(env.environment) {\n\t\t\targs = append(args,\n\t\t\t\t[]string{\n\t\t\t\t\t\"--env-file=\" + env.environment,\n\t\t\t\t}...)\n\t\t} else {\n\t\t\tui.Warning(\n\t\t\t\t\"Environment file %s doesn't exist - running without environment overrides\",\n\t\t\t\tenv.environment)\n\t\t}\n\t}\n\targs = append(args, env.image)\n\tenv.command = env.commandRun(\"docker\", args)\n\tenv.sendInitCommands(env.command)\n\tenv.signal = make(chan error)\n\tgo func() {\n\t\tout, err := env.command.CombinedOutput()\n\t\tif err != nil {\n\t\t\tenv.signal <- fmt.Errorf(\"error combining output %s, %s\", out, err)\n\t\t} else {\n\t\t\tenv.signal <- err\n\t\t}\n\t}()\n}\n\nfunc (env *inner) stopImage() {\n\tenv.docker(\"exec\", \"-i\", pipelineRunnerName, \"rm\", \"\/.running\")\n\tif env.useDockerService {\n\t\tenv.docker(\"stop\", pipelineRunnerDockerName)\n\t}\n}\n\nfunc (env *inner) cleanup() {\n\tenv.docker(\"rm\", \"-f\", pipelineRunnerName)\n\tif env.useDockerService {\n\t\tenv.docker(\"rm\", \"-f\", pipelineRunnerDockerName)\n\t\tos.Remove(\"\")\n\t}\n}\n\nfunc (env *inner) Setup() {\n\tlog.Println(\"Setup inner runner\")\n\tenv.pullImage()\n\tenv.runImage()\n}\n\nfunc (env *inner) Close() {\n\tlog.Println(\"Closing inner runner\")\n\tenv.command.Process.Kill()\n\tenv.command.Wait()\n\tenv.cleanup()\n}\n\nfunc (env *inner) waitForImage(image string) error {\n\tfilterPs := []string{\"ps\", \"-aq\", \"--filter\", \"name=\" + image}\n\tid, _ := env.docker(filterPs...)\n\tfor i := 0; ; i++ {\n\t\tif i > bootTimeout {\n\t\t\treturn fmt.Errorf(\"Unable to start container after %d seconds\", bootTimeout)\n\t\t}\n\t\tlog.Println(\"Waiting for container to be available\", image, id)\n\t\tif id != \"\" {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t\tid, _ = env.docker(filterPs...)\n\t}\n\treturn nil\n}\n\nfunc (env *inner) Run(commands []string) error {\n\tgo func() {\n\t\ttime.Sleep(5 * time.Minute)\n\t\tenv.signal <- errors.New(\"Timeout trying to run commands\")\n\t}()\n\tgo func() {\n\t\te := env.waitForImage(pipelineRunnerName)\n\t\tif e != nil {\n\t\t\tenv.signal <- e\n\t\t\treturn\n\t\t}\n\t\tif env.useDockerService {\n\t\t\tenv.copyDockerBin()\n\t\t}\n\t\tfor _, command := range commands {\n\t\t\toutput, err := env.docker(\"exec\", \"-i\", pipelineRunnerName, \"\/bin\/sh\", \"-c\", command)\n\t\t\tif err != nil {\n\t\t\t\tenv.signal <- errors.New(fmt.Sprintln(\"error running\", command, output, err))\n\t\t\t}\n\t\t\tui.Info(\" == Running '%s' ==>\", command)\n\t\t\tui.Output(output)\n\t\t}\n\t\tenv.signal <- nil\n\t}()\n\terr := <-env.signal\n\tenv.stopImage()\n\treturn err\n}\n\nfunc (env *inner) copyDockerBin() {\n\tif out1, e1 := env.docker([]string{\n\t\t\"cp\",\n\t\tpipelineRunnerDockerName + \":\/usr\/local\/bin\/docker\",\n\t\t\"\/tmp\/\",\n\t}...); e1 != nil {\n\t\tlog.Fatal(\"copy from\", out1, e1)\n\t}\n\tif out1, e1 := env.docker([]string{\n\t\t\"exec\",\n\t\tpipelineRunnerName,\n\t\t\"mkdir\",\n\t\t\"-p\",\n\t\t\"\/usr\/local\/bin\/\",\n\t}...); e1 != nil {\n\t\tlog.Fatal(\"copy to\", out1, e1)\n\t}\n\tif out1, e1 := env.docker([]string{\n\t\t\"cp\",\n\t\t\"\/tmp\/docker\",\n\t\tpipelineRunnerName + \":\/usr\/local\/bin\/\",\n\t}...); e1 != nil {\n\t\tlog.Fatal(\"copy to\", out1, e1)\n\t}\n}\n\n\/\/Run : run it!\nfunc Run(yml string, env string) int {\n\treader, e := os.Open(yml)\n\tif e != nil {\n\t\tlog.Printf(\"Unable to read %s\\n\", yml)\n\t\treturn 1\n\t}\n\tpipeline := parser.ReadPipelineDef(reader)\n\tpath, _ := os.Getwd()\n\tr := &inner{\n\t\timage: pipeline.Image,\n\t\thostPath: path,\n\t\tenvironment: env,\n\t\tuseDockerService: pipeline.Options.Docker,\n\t}\n\tdefer r.Close()\n\tr.Setup()\n\teR := r.Run(pipeline.Pipelines.Default[0].Step.Scripts)\n\tif eR != nil {\n\t\tlog.Println(\"error when running\", eR)\n\t\treturn 1\n\t}\n\treturn 0\n}\n<commit_msg>update to docker 17<commit_after>package runner\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/bivas\/bitbucket-pipelines\/parser\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tpipelineRunnerName = \"pipeline__runner__\"\n\tpipelineRunnerDockerName = pipelineRunnerName + \"docker\"\n\tdockerImage = \"docker:17-dind\"\n\tbootTimeout = 30\n)\n\n\/\/PipelineRunner : create the pipelines inner\ntype PipelineRunner interface {\n\tSetup()\n\tRun(commands []string) (string, error)\n\tClose()\n}\n\ntype inner struct {\n\timage string\n\thostPath string\n\tenvironment string\n\tuseDockerService bool\n\tcommand *exec.Cmd\n\tsignal chan error\n}\n\nfunc (*inner) commandRun(name string, args []string) *exec.Cmd {\n\tlog.Println(\"Running (commandRun)\", name, args)\n\treturn exec.Command(name, args...)\n}\n\nfunc (*inner) commandOutput(name string, args []string) (string, error) {\n\tlog.Println(\"Running (commandOutput)\", name, args)\n\tcmd := exec.Command(name, args...)\n\tout, err := cmd.CombinedOutput()\n\treturn strings.TrimSpace(string(out)), err\n}\n\nfunc (env *inner) docker(args ...string) (string, error) {\n\treturn env.commandOutput(\"docker\", args)\n}\n\nfunc (env *inner) pullImage() {\n\tui.Info(\"Pulling image %s\", env.image)\n\tout, e := env.docker(\"pull\", env.image)\n\tif e != nil {\n\t\tui.Error(\"Error pulling image %s %s\", env.image, e)\n\t\tlog.Fatalf(\"Error message %s\\n\", out)\n\t}\n}\n\nfunc (env *inner) runDockerDocker() {\n\tif env.useDockerService {\n\t\tlog.Println(\"pulling\", dockerImage)\n\t\tout, e := env.docker(\"pull\", dockerImage)\n\t\tif e != nil {\n\t\t\tlog.Fatalf(\"Error pulling image %s %s\\n\", dockerImage, e)\n\t\t\tlog.Fatalf(\"Error message %s\\n\", out)\n\t\t}\n\t\targs := []string{\"run\",\n\t\t\t\"-d\",\n\t\t\t\"--name=\" + pipelineRunnerDockerName,\n\t\t\t\"--privileged\",\n\t\t\tdockerImage}\n\t\tdockerCommand := env.commandRun(\"docker\", args)\n\t\tlog.Println(\"running\", dockerImage)\n\t\tgo func() {\n\t\t\tout, err := dockerCommand.CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"error combining output %s, %s\", out, err)\n\t\t\t}\n\t\t}()\n\t\tenv.waitForImage(pipelineRunnerDockerName)\n\t}\n}\n\nfunc (*inner) sendInitCommands(cmd *exec.Cmd) {\n\tstdin, e := cmd.StdinPipe()\n\tif e != nil {\n\t\tlog.Fatal(\"error setting up stdin\", e)\n\t}\n\tdefer stdin.Close()\n\tio.WriteString(stdin, \"touch \/.running\\n\")\n\tio.WriteString(stdin, \"while [ -e \/.running ]; do sleep 1; done; exit;\\n\")\n}\n\nfunc (env *inner) runImage() {\n\tenv.cleanup()\n\tui.Info(\"Running image %s\", env.image)\n\targs := []string{\"run\",\n\t\t\"-i\",\n\t\t\"--rm\",\n\t\t\"--name=\" + pipelineRunnerName,\n\t\t\"--volume=\" + env.hostPath + \":\/wd\",\n\t\t\"--workdir=\/wd\",\n\t\t\"--entrypoint=\/bin\/sh\"}\n\tif env.useDockerService {\n\t\tenv.runDockerDocker()\n\t\targs = append(args,\n\t\t\t[]string{\n\t\t\t\t\"--env=DOCKER_HOST=tcp:\/\/docker:2375\",\n\t\t\t\t\"--link=\" + pipelineRunnerDockerName + \":docker\",\n\t\t\t}...)\n\t}\n\tif env.environment != \"\" {\n\t\tif isFile(env.environment) {\n\t\t\targs = append(args,\n\t\t\t\t[]string{\n\t\t\t\t\t\"--env-file=\" + env.environment,\n\t\t\t\t}...)\n\t\t} else {\n\t\t\tui.Warning(\n\t\t\t\t\"Environment file %s doesn't exist - running without environment overrides\",\n\t\t\t\tenv.environment)\n\t\t}\n\t}\n\targs = append(args, env.image)\n\tenv.command = env.commandRun(\"docker\", args)\n\tenv.sendInitCommands(env.command)\n\tenv.signal = make(chan error)\n\tgo func() {\n\t\tout, err := env.command.CombinedOutput()\n\t\tif err != nil {\n\t\t\tenv.signal <- fmt.Errorf(\"error combining output %s, %s\", out, err)\n\t\t} else {\n\t\t\tenv.signal <- err\n\t\t}\n\t}()\n}\n\nfunc (env *inner) stopImage() {\n\tenv.docker(\"exec\", \"-i\", pipelineRunnerName, \"rm\", \"\/.running\")\n\tif env.useDockerService {\n\t\tenv.docker(\"stop\", pipelineRunnerDockerName)\n\t}\n}\n\nfunc (env *inner) cleanup() {\n\tenv.docker(\"rm\", \"-f\", pipelineRunnerName)\n\tif env.useDockerService {\n\t\tenv.docker(\"rm\", \"-f\", pipelineRunnerDockerName)\n\t\tos.Remove(\"\")\n\t}\n}\n\nfunc (env *inner) Setup() {\n\tlog.Println(\"Setup inner runner\")\n\tenv.pullImage()\n\tenv.runImage()\n}\n\nfunc (env *inner) Close() {\n\tlog.Println(\"Closing inner runner\")\n\tenv.command.Process.Kill()\n\tenv.command.Wait()\n\tenv.cleanup()\n}\n\nfunc (env *inner) waitForImage(image string) error {\n\tfilterPs := []string{\"ps\", \"-aq\", \"--filter\", \"name=\" + image}\n\tid, _ := env.docker(filterPs...)\n\tfor i := 0; ; i++ {\n\t\tif i > bootTimeout {\n\t\t\treturn fmt.Errorf(\"Unable to start container after %d seconds\", bootTimeout)\n\t\t}\n\t\tlog.Println(\"Waiting for container to be available\", image, id)\n\t\ttime.Sleep(1 * time.Second)\n\t\tif id != \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tid, _ = env.docker(filterPs...)\n\t}\n\treturn nil\n}\n\nfunc (env *inner) Run(commands []string) error {\n\tgo func() {\n\t\ttime.Sleep(5 * time.Minute)\n\t\tenv.signal <- errors.New(\"Timeout trying to run commands\")\n\t}()\n\tgo func() {\n\t\te := env.waitForImage(pipelineRunnerName)\n\t\tif e != nil {\n\t\t\tenv.signal <- e\n\t\t\treturn\n\t\t}\n\t\tif env.useDockerService {\n\t\t\tenv.copyDockerBin()\n\t\t}\n\t\tfor _, command := range commands {\n\t\t\toutput, err := env.docker(\"exec\", \"-i\", pipelineRunnerName, \"\/bin\/sh\", \"-c\", command)\n\t\t\tif err != nil {\n\t\t\t\tenv.signal <- errors.New(fmt.Sprintln(\"error running\", command, output, err))\n\t\t\t}\n\t\t\tui.Info(\" == Running '%s' ==>\", command)\n\t\t\tui.Output(output)\n\t\t}\n\t\tenv.signal <- nil\n\t}()\n\terr := <-env.signal\n\tenv.stopImage()\n\treturn err\n}\n\nfunc (env *inner) copyDockerBin() {\n\tif out1, e1 := env.docker([]string{\n\t\t\"cp\",\n\t\tpipelineRunnerDockerName + \":\/usr\/local\/bin\/docker\",\n\t\t\"\/tmp\/\",\n\t}...); e1 != nil {\n\t\tlog.Fatal(\"copy from\", out1, e1)\n\t}\n\tif out1, e1 := env.docker([]string{\n\t\t\"exec\",\n\t\tpipelineRunnerName,\n\t\t\"mkdir\",\n\t\t\"-p\",\n\t\t\"\/usr\/local\/bin\/\",\n\t}...); e1 != nil {\n\t\tlog.Fatal(\"copy to\", out1, e1)\n\t}\n\tif out1, e1 := env.docker([]string{\n\t\t\"cp\",\n\t\t\"\/tmp\/docker\",\n\t\tpipelineRunnerName + \":\/usr\/local\/bin\/\",\n\t}...); e1 != nil {\n\t\tlog.Fatal(\"copy to\", out1, e1)\n\t}\n}\n\n\/\/Run : run it!\nfunc Run(yml string, env string) int {\n\treader, e := os.Open(yml)\n\tif e != nil {\n\t\tlog.Printf(\"Unable to read %s\\n\", yml)\n\t\treturn 1\n\t}\n\tpipeline := parser.ReadPipelineDef(reader)\n\tpath, _ := os.Getwd()\n\tr := &inner{\n\t\timage: pipeline.Image,\n\t\thostPath: path,\n\t\tenvironment: env,\n\t\tuseDockerService: pipeline.Options.Docker,\n\t}\n\tdefer r.Close()\n\tr.Setup()\n\teR := r.Run(pipeline.Pipelines.Default[0].Step.Scripts)\n\tif eR != nil {\n\t\tlog.Println(\"error when running\", eR)\n\t\treturn 1\n\t}\n\treturn 0\n}\n<|endoftext|>"} {"text":"<commit_before>package scanner\n\nconst (\n\tTSTRING = iota\n\tTNUMBER\n\tTTRUE\n\tTFALSE\n\tTNULL\n\tTLBRACE\n\tTRBRACE\n\tTLBRACKET\n\tTRBRACKET\n\tTCOLON\n\tTCOMMA\n)\n\nvar names = map[int]string{\n\tTSTRING: \"string\",\n\tTNUMBER: \"number\",\n\tTTRUE: \"true\",\n\tTFALSE: \"false\",\n\tTNULL: \"null\",\n\tTLBRACE: \"left brace\",\n\tTRBRACE: \"right brace\",\n\tTLBRACKET: \"left bracket\",\n\tTRBRACKET: \"right bracket\",\n\tTCOLON: \"colon\",\n\tTCOMMA: \"comma\",\n}\n\n\/\/ TokenName returns a human readable version of the token.\nfunc TokenName(tok int) string {\n\treturn names[tok]\n}\n<commit_msg>Changes the integer value of TSTRING so that it can be Unscan'ed.<commit_after>package scanner\n\nconst (\n\tTSTRING = iota + 1\n\tTNUMBER\n\tTTRUE\n\tTFALSE\n\tTNULL\n\tTLBRACE\n\tTRBRACE\n\tTLBRACKET\n\tTRBRACKET\n\tTCOLON\n\tTCOMMA\n)\n\nvar names = map[int]string{\n\tTSTRING: \"string\",\n\tTNUMBER: \"number\",\n\tTTRUE: \"true\",\n\tTFALSE: \"false\",\n\tTNULL: \"null\",\n\tTLBRACE: \"left brace\",\n\tTRBRACE: \"right brace\",\n\tTLBRACKET: \"left bracket\",\n\tTRBRACKET: \"right bracket\",\n\tTCOLON: \"colon\",\n\tTCOMMA: \"comma\",\n}\n\n\/\/ TokenName returns a human readable version of the token.\nfunc TokenName(tok int) string {\n\treturn names[tok]\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"sort\"\n\t\"sync\"\n\n\t\"github.com\/blang\/semver\"\n\t\"github.com\/google\/go-github\/github\"\n)\n\nvar (\n\tupdateAssetRe = regexp.MustCompile(`^update_(darwin|windows|linux|android)_(arm|386|amd64)\\.?.*$`)\n\n\temptyVersion semver.Version\n)\n\n\/\/ Arch holds architecture names.\nvar Arch = struct {\n\tX64 string\n\tX86 string\n\tARM string\n}{\n\t\"amd64\",\n\t\"386\",\n\t\"arm\",\n}\n\n\/\/ OS holds operating system names.\nvar OS = struct {\n\tWindows string\n\tLinux string\n\tDarwin string\n\tAndroid string\n}{\n\t\"windows\",\n\t\"linux\",\n\t\"darwin\",\n\t\"android\"\n}\n\n\/\/ Release struct represents a single github release.\ntype Release struct {\n\tid int\n\tURL string\n\tVersion semver.Version\n\tAssets []Asset\n}\n\ntype releasesByID []Release\n\n\/\/ Asset struct represents a file included as part of a Release.\ntype Asset struct {\n\tid int\n\tv semver.Version\n\tName string\n\tURL string\n\tLocalFile string\n\tChecksum string\n\tSignature string\n\tAssetInfo\n}\n\n\/\/ AssetInfo struct holds OS and Arch information of an asset.\ntype AssetInfo struct {\n\tOS string\n\tArch string\n}\n\n\/\/ ReleaseManager struct defines a repository to pull releases from.\ntype ReleaseManager struct {\n\tclient *github.Client\n\towner string\n\trepo string\n\tupdateAssetsMap map[string]map[string]map[string]*Asset\n\tlatestAssetsMap map[string]map[string]*Asset\n\tmu *sync.RWMutex\n}\n\nfunc (a releasesByID) Len() int {\n\treturn len(a)\n}\n\nfunc (a releasesByID) Swap(i, j int) {\n\ta[i], a[j] = a[j], a[i]\n}\n\nfunc (a releasesByID) Less(i, j int) bool {\n\treturn a[i].id < a[j].id\n}\n\n\/\/ NewReleaseManager creates a wrapper of github.Client.\nfunc NewReleaseManager(owner string, repo string) *ReleaseManager {\n\n\tghc := &ReleaseManager{\n\t\tclient: github.NewClient(nil),\n\t\towner: owner,\n\t\trepo: repo,\n\t\tmu: new(sync.RWMutex),\n\t\tupdateAssetsMap: make(map[string]map[string]map[string]*Asset),\n\t\tlatestAssetsMap: make(map[string]map[string]*Asset),\n\t}\n\n\treturn ghc\n}\n\n\/\/ getReleases queries github for all product releases.\nfunc (g *ReleaseManager) getReleases() ([]Release, error) {\n\tvar releases []Release\n\n\tfor page := 1; true; page++ {\n\t\topt := &github.ListOptions{Page: page}\n\n\t\trels, _, err := g.client.Repositories.ListReleases(g.owner, g.repo, opt)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif len(rels) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\treleases = make([]Release, 0, len(rels))\n\n\t\tfor i := range rels {\n\t\t\tversion := *rels[i].TagName\n\t\t\tv, err := semver.Parse(version)\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Release %q is not semantically versioned (%q). Skipping.\", version, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trel := Release{\n\t\t\t\tid: *rels[i].ID,\n\t\t\t\tURL: *rels[i].ZipballURL,\n\t\t\t\tVersion: v,\n\t\t\t}\n\t\t\trel.Assets = make([]Asset, 0, len(rels[i].Assets))\n\t\t\tfor _, asset := range rels[i].Assets {\n\t\t\t\trel.Assets = append(rel.Assets, Asset{\n\t\t\t\t\tid: *asset.ID,\n\t\t\t\t\tName: *asset.Name,\n\t\t\t\t\tURL: *asset.BrowserDownloadURL,\n\t\t\t\t})\n\t\t\t}\n\t\t\tlog.Debugf(\"Release %q has %d assets...\", version, len(rel.Assets))\n\t\t\treleases = append(releases, rel)\n\t\t}\n\t}\n\n\tsort.Sort(sort.Reverse(releasesByID(releases)))\n\n\treturn releases, nil\n}\n\n\/\/ UpdateAssetsMap will pull published releases, scan for compatible\n\/\/ update-only binaries and will add them to the updateAssetsMap.\nfunc (g *ReleaseManager) UpdateAssetsMap() (err error) {\n\n\tvar rs []Release\n\n\tlog.Debugf(\"Getting releases...\")\n\tif rs, err = g.getReleases(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Resetting file hashes.\n\tfileHashMapMu.Lock()\n\tfileHashMap = map[string]string{}\n\tfileHashMapMu.Unlock()\n\n\tlog.Debugf(\"Getting assets...\")\n\tfor i := range rs {\n\t\tlog.Debugf(\"Getting assets for release %q...\", rs[i].Version)\n\t\tfor j := range rs[i].Assets {\n\t\t\tlog.Debugf(\"Found %q.\", rs[i].Assets[j].Name)\n\t\t\t\/\/ Does this asset represent a binary update?\n\t\t\tif isUpdateAsset(rs[i].Assets[j].Name) {\n\t\t\t\tlog.Debugf(\"%q is an auto-update asset.\", rs[i].Assets[j].Name)\n\t\t\t\tasset := rs[i].Assets[j]\n\t\t\t\tasset.v = rs[i].Version\n\t\t\t\tinfo, err := getAssetInfo(asset.Name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Could not get asset info: %q\", err)\n\t\t\t\t}\n\t\t\t\tif err = g.pushAsset(info.OS, info.Arch, &asset); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Could not push asset: %q\", err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Debugf(\"%q is not an auto-update asset. Skipping.\", rs[i].Assets[j].Name)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (g *ReleaseManager) getProductUpdate(os string, arch string) (asset *Asset, err error) {\n\tg.mu.RLock()\n\tdefer g.mu.RUnlock()\n\n\tif g.latestAssetsMap == nil {\n\t\treturn nil, fmt.Errorf(\"No updates available.\")\n\t}\n\n\tif g.latestAssetsMap[os] == nil {\n\t\treturn nil, fmt.Errorf(\"No such OS.\")\n\t}\n\n\tif g.latestAssetsMap[os][arch] == nil {\n\t\treturn nil, fmt.Errorf(\"No such Arch.\")\n\t}\n\n\treturn g.latestAssetsMap[os][arch], nil\n}\n\nfunc (g *ReleaseManager) lookupAssetWithChecksum(os string, arch string, checksum string) (asset *Asset, err error) {\n\tg.mu.RLock()\n\tdefer g.mu.RUnlock()\n\n\tif g.updateAssetsMap == nil {\n\t\treturn nil, fmt.Errorf(\"No updates available.\")\n\t}\n\n\tif g.updateAssetsMap[os] == nil {\n\t\treturn nil, fmt.Errorf(\"No such OS.\")\n\t}\n\n\tif g.updateAssetsMap[os][arch] == nil {\n\t\treturn nil, fmt.Errorf(\"No such Arch.\")\n\t}\n\n\tfor _, a := range g.updateAssetsMap[os][arch] {\n\t\tif a.Checksum == checksum {\n\t\t\treturn a, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"Could not find a matching checksum in assets list.\")\n}\n\nfunc (g *ReleaseManager) lookupAssetWithVersion(os string, arch string, version string) (asset *Asset, err error) {\n\tg.mu.RLock()\n\tdefer g.mu.RUnlock()\n\n\tif g.updateAssetsMap == nil {\n\t\treturn nil, fmt.Errorf(\"No updates available.\")\n\t}\n\n\tif g.updateAssetsMap[os] == nil {\n\t\treturn nil, fmt.Errorf(\"No such OS.\")\n\t}\n\n\tif g.updateAssetsMap[os][arch] == nil {\n\t\treturn nil, fmt.Errorf(\"No such Arch.\")\n\t}\n\n\tfor _, a := range g.updateAssetsMap[os][arch] {\n\t\tif a.v.String() == version {\n\t\t\treturn a, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"Could not find a matching version in assets list.\")\n}\n\nfunc (g *ReleaseManager) pushAsset(os string, arch string, asset *Asset) (err error) {\n\tg.mu.Lock()\n\tdefer g.mu.Unlock()\n\n\tversion := asset.v\n\n\tasset.OS = os\n\tasset.Arch = arch\n\n\tif version.EQ(emptyVersion) {\n\t\treturn fmt.Errorf(\"Missing asset version.\")\n\t}\n\n\tvar localfile string\n\tif localfile, err = downloadAsset(asset.URL); err != nil {\n\t\treturn err\n\t}\n\n\tif asset.Checksum, err = checksumForFile(localfile); err != nil {\n\t\treturn err\n\t}\n\n\tif asset.Signature, err = signatureForFile(localfile); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Pushing version.\n\tif g.updateAssetsMap[os] == nil {\n\t\tg.updateAssetsMap[os] = make(map[string]map[string]*Asset)\n\t}\n\tif g.updateAssetsMap[os][arch] == nil {\n\t\tg.updateAssetsMap[os][arch] = make(map[string]*Asset)\n\t}\n\tg.updateAssetsMap[os][arch][version.String()] = asset\n\n\t\/\/ Setting latest version.\n\tif g.latestAssetsMap[os] == nil {\n\t\tg.latestAssetsMap[os] = make(map[string]*Asset)\n\t}\n\n\t\/\/ Only considering non-manoto versions for the latestAssetsMap\n\tif !buildStringContainsManoto(asset.v) {\n\t\tif g.latestAssetsMap[os][arch] == nil {\n\t\t\tg.latestAssetsMap[os][arch] = asset\n\t\t} else {\n\t\t\t\/\/ Compare against already set version.\n\t\t\tif asset.v.GT(g.latestAssetsMap[os][arch].v) {\n\t\t\t\tg.latestAssetsMap[os][arch] = asset\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc getAssetInfo(s string) (*AssetInfo, error) {\n\tmatches := updateAssetRe.FindStringSubmatch(s)\n\tif len(matches) >= 3 {\n\t\tif matches[1] != OS.Windows && matches[1] != OS.Linux && matches[1] != OS.Darwin && matches[1] != OS.Android {\n\t\t\treturn nil, fmt.Errorf(\"Unknown OS: \\\"%s\\\".\", matches[1])\n\t\t}\n\t\tif matches[2] != Arch.X64 && matches[2] != Arch.X86 && matches[2] != Arch.ARM {\n\t\t\treturn nil, fmt.Errorf(\"Unknown architecture \\\"%s\\\".\", matches[2])\n\t\t}\n\t\tinfo := &AssetInfo{\n\t\t\tOS: matches[1],\n\t\t\tArch: matches[2],\n\t\t}\n\t\treturn info, nil\n\t}\n\treturn nil, fmt.Errorf(\"Could not find asset info.\")\n}\n\nfunc isUpdateAsset(s string) bool {\n\treturn updateAssetRe.MatchString(s)\n}\n<commit_msg>add comment<commit_after>package server\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"sort\"\n\t\"sync\"\n\n\t\"github.com\/blang\/semver\"\n\t\"github.com\/google\/go-github\/github\"\n)\n\nvar (\n\tupdateAssetRe = regexp.MustCompile(`^update_(darwin|windows|linux|android)_(arm|386|amd64)\\.?.*$`)\n\n\temptyVersion semver.Version\n)\n\n\/\/ Arch holds architecture names.\nvar Arch = struct {\n\tX64 string\n\tX86 string\n\tARM string\n}{\n\t\"amd64\",\n\t\"386\",\n\t\"arm\",\n}\n\n\/\/ OS holds operating system names.\nvar OS = struct {\n\tWindows string\n\tLinux string\n\tDarwin string\n\tAndroid string\n}{\n\t\"windows\",\n\t\"linux\",\n\t\"darwin\",\n\t\"android\",\n}\n\n\/\/ Release struct represents a single github release.\ntype Release struct {\n\tid int\n\tURL string\n\tVersion semver.Version\n\tAssets []Asset\n}\n\ntype releasesByID []Release\n\n\/\/ Asset struct represents a file included as part of a Release.\ntype Asset struct {\n\tid int\n\tv semver.Version\n\tName string\n\tURL string\n\tLocalFile string\n\tChecksum string\n\tSignature string\n\tAssetInfo\n}\n\n\/\/ AssetInfo struct holds OS and Arch information of an asset.\ntype AssetInfo struct {\n\tOS string\n\tArch string\n}\n\n\/\/ ReleaseManager struct defines a repository to pull releases from.\ntype ReleaseManager struct {\n\tclient *github.Client\n\towner string\n\trepo string\n\tupdateAssetsMap map[string]map[string]map[string]*Asset\n\tlatestAssetsMap map[string]map[string]*Asset\n\tmu *sync.RWMutex\n}\n\nfunc (a releasesByID) Len() int {\n\treturn len(a)\n}\n\nfunc (a releasesByID) Swap(i, j int) {\n\ta[i], a[j] = a[j], a[i]\n}\n\nfunc (a releasesByID) Less(i, j int) bool {\n\treturn a[i].id < a[j].id\n}\n\n\/\/ NewReleaseManager creates a wrapper of github.Client.\nfunc NewReleaseManager(owner string, repo string) *ReleaseManager {\n\n\tghc := &ReleaseManager{\n\t\tclient: github.NewClient(nil),\n\t\towner: owner,\n\t\trepo: repo,\n\t\tmu: new(sync.RWMutex),\n\t\tupdateAssetsMap: make(map[string]map[string]map[string]*Asset),\n\t\tlatestAssetsMap: make(map[string]map[string]*Asset),\n\t}\n\n\treturn ghc\n}\n\n\/\/ getReleases queries github for all product releases.\nfunc (g *ReleaseManager) getReleases() ([]Release, error) {\n\tvar releases []Release\n\n\tfor page := 1; true; page++ {\n\t\topt := &github.ListOptions{Page: page}\n\n\t\trels, _, err := g.client.Repositories.ListReleases(g.owner, g.repo, opt)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif len(rels) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\treleases = make([]Release, 0, len(rels))\n\n\t\tfor i := range rels {\n\t\t\tversion := *rels[i].TagName\n\t\t\tv, err := semver.Parse(version)\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"Release %q is not semantically versioned (%q). Skipping.\", version, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trel := Release{\n\t\t\t\tid: *rels[i].ID,\n\t\t\t\tURL: *rels[i].ZipballURL,\n\t\t\t\tVersion: v,\n\t\t\t}\n\t\t\trel.Assets = make([]Asset, 0, len(rels[i].Assets))\n\t\t\tfor _, asset := range rels[i].Assets {\n\t\t\t\trel.Assets = append(rel.Assets, Asset{\n\t\t\t\t\tid: *asset.ID,\n\t\t\t\t\tName: *asset.Name,\n\t\t\t\t\tURL: *asset.BrowserDownloadURL,\n\t\t\t\t})\n\t\t\t}\n\t\t\tlog.Debugf(\"Release %q has %d assets...\", version, len(rel.Assets))\n\t\t\treleases = append(releases, rel)\n\t\t}\n\t}\n\n\tsort.Sort(sort.Reverse(releasesByID(releases)))\n\n\treturn releases, nil\n}\n\n\/\/ UpdateAssetsMap will pull published releases, scan for compatible\n\/\/ update-only binaries and will add them to the updateAssetsMap.\nfunc (g *ReleaseManager) UpdateAssetsMap() (err error) {\n\n\tvar rs []Release\n\n\tlog.Debugf(\"Getting releases...\")\n\tif rs, err = g.getReleases(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Resetting file hashes.\n\tfileHashMapMu.Lock()\n\tfileHashMap = map[string]string{}\n\tfileHashMapMu.Unlock()\n\n\tlog.Debugf(\"Getting assets...\")\n\tfor i := range rs {\n\t\tlog.Debugf(\"Getting assets for release %q...\", rs[i].Version)\n\t\tfor j := range rs[i].Assets {\n\t\t\tlog.Debugf(\"Found %q.\", rs[i].Assets[j].Name)\n\t\t\t\/\/ Does this asset represent a binary update?\n\t\t\tif isUpdateAsset(rs[i].Assets[j].Name) {\n\t\t\t\tlog.Debugf(\"%q is an auto-update asset.\", rs[i].Assets[j].Name)\n\t\t\t\tasset := rs[i].Assets[j]\n\t\t\t\tasset.v = rs[i].Version\n\t\t\t\tinfo, err := getAssetInfo(asset.Name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Could not get asset info: %q\", err)\n\t\t\t\t}\n\t\t\t\tif err = g.pushAsset(info.OS, info.Arch, &asset); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Could not push asset: %q\", err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Debugf(\"%q is not an auto-update asset. Skipping.\", rs[i].Assets[j].Name)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (g *ReleaseManager) getProductUpdate(os string, arch string) (asset *Asset, err error) {\n\tg.mu.RLock()\n\tdefer g.mu.RUnlock()\n\n\tif g.latestAssetsMap == nil {\n\t\treturn nil, fmt.Errorf(\"No updates available.\")\n\t}\n\n\tif g.latestAssetsMap[os] == nil {\n\t\treturn nil, fmt.Errorf(\"No such OS.\")\n\t}\n\n\tif g.latestAssetsMap[os][arch] == nil {\n\t\treturn nil, fmt.Errorf(\"No such Arch.\")\n\t}\n\n\treturn g.latestAssetsMap[os][arch], nil\n}\n\nfunc (g *ReleaseManager) lookupAssetWithChecksum(os string, arch string, checksum string) (asset *Asset, err error) {\n\tg.mu.RLock()\n\tdefer g.mu.RUnlock()\n\n\tif g.updateAssetsMap == nil {\n\t\treturn nil, fmt.Errorf(\"No updates available.\")\n\t}\n\n\tif g.updateAssetsMap[os] == nil {\n\t\treturn nil, fmt.Errorf(\"No such OS.\")\n\t}\n\n\tif g.updateAssetsMap[os][arch] == nil {\n\t\treturn nil, fmt.Errorf(\"No such Arch.\")\n\t}\n\n\tfor _, a := range g.updateAssetsMap[os][arch] {\n\t\tif a.Checksum == checksum {\n\t\t\treturn a, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"Could not find a matching checksum in assets list.\")\n}\n\nfunc (g *ReleaseManager) lookupAssetWithVersion(os string, arch string, version string) (asset *Asset, err error) {\n\tg.mu.RLock()\n\tdefer g.mu.RUnlock()\n\n\tif g.updateAssetsMap == nil {\n\t\treturn nil, fmt.Errorf(\"No updates available.\")\n\t}\n\n\tif g.updateAssetsMap[os] == nil {\n\t\treturn nil, fmt.Errorf(\"No such OS.\")\n\t}\n\n\tif g.updateAssetsMap[os][arch] == nil {\n\t\treturn nil, fmt.Errorf(\"No such Arch.\")\n\t}\n\n\tfor _, a := range g.updateAssetsMap[os][arch] {\n\t\tif a.v.String() == version {\n\t\t\treturn a, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"Could not find a matching version in assets list.\")\n}\n\nfunc (g *ReleaseManager) pushAsset(os string, arch string, asset *Asset) (err error) {\n\tg.mu.Lock()\n\tdefer g.mu.Unlock()\n\n\tversion := asset.v\n\n\tasset.OS = os\n\tasset.Arch = arch\n\n\tif version.EQ(emptyVersion) {\n\t\treturn fmt.Errorf(\"Missing asset version.\")\n\t}\n\n\tvar localfile string\n\tif localfile, err = downloadAsset(asset.URL); err != nil {\n\t\treturn err\n\t}\n\n\tif asset.Checksum, err = checksumForFile(localfile); err != nil {\n\t\treturn err\n\t}\n\n\tif asset.Signature, err = signatureForFile(localfile); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Pushing version.\n\tif g.updateAssetsMap[os] == nil {\n\t\tg.updateAssetsMap[os] = make(map[string]map[string]*Asset)\n\t}\n\tif g.updateAssetsMap[os][arch] == nil {\n\t\tg.updateAssetsMap[os][arch] = make(map[string]*Asset)\n\t}\n\tg.updateAssetsMap[os][arch][version.String()] = asset\n\n\t\/\/ Setting latest version.\n\tif g.latestAssetsMap[os] == nil {\n\t\tg.latestAssetsMap[os] = make(map[string]*Asset)\n\t}\n\n\t\/\/ Only considering non-manoto versions for the latestAssetsMap\n\tif !buildStringContainsManoto(asset.v) {\n\t\tif g.latestAssetsMap[os][arch] == nil {\n\t\t\tg.latestAssetsMap[os][arch] = asset\n\t\t} else {\n\t\t\t\/\/ Compare against already set version.\n\t\t\tif asset.v.GT(g.latestAssetsMap[os][arch].v) {\n\t\t\t\tg.latestAssetsMap[os][arch] = asset\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc getAssetInfo(s string) (*AssetInfo, error) {\n\tmatches := updateAssetRe.FindStringSubmatch(s)\n\tif len(matches) >= 3 {\n\t\tif matches[1] != OS.Windows && matches[1] != OS.Linux && matches[1] != OS.Darwin && matches[1] != OS.Android {\n\t\t\treturn nil, fmt.Errorf(\"Unknown OS: \\\"%s\\\".\", matches[1])\n\t\t}\n\t\tif matches[2] != Arch.X64 && matches[2] != Arch.X86 && matches[2] != Arch.ARM {\n\t\t\treturn nil, fmt.Errorf(\"Unknown architecture \\\"%s\\\".\", matches[2])\n\t\t}\n\t\tinfo := &AssetInfo{\n\t\t\tOS: matches[1],\n\t\t\tArch: matches[2],\n\t\t}\n\t\treturn info, nil\n\t}\n\treturn nil, fmt.Errorf(\"Could not find asset info.\")\n}\n\nfunc isUpdateAsset(s string) bool {\n\treturn updateAssetRe.MatchString(s)\n}\n<|endoftext|>"} {"text":"<commit_before>package randomword_test\n\nimport (\n\t\"time\"\n\n\t. \"code.cloudfoundry.org\/cli\/util\/randomword\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Generator\", func() {\n\tvar gen Generator\n\n\tBeforeEach(func() {\n\t\tgen = Generator{}\n\t})\n\n\tDescribe(\"RandomAdjective\", func() {\n\t\tIt(\"generates a random adjective each time it is called\", func() {\n\t\t\tadj := gen.RandomAdjective()\n\t\t\t\/\/ We wait for 3 millisecond because the seed we use to generate the\n\t\t\t\/\/ randomness has a unit of 1 nanosecond plus random test flakiness\n\t\t\ttime.Sleep(3)\n\t\t\tExpect(adj).ToNot(Equal(gen.RandomAdjective()))\n\t\t})\n\t})\n\n\tDescribe(\"RandomNoun\", func() {\n\t\tIt(\"generates a random noun each time it is called\", func() {\n\t\t\tnoun := gen.RandomNoun()\n\t\t\t\/\/ We wait for 3 millisecond because the seed we use to generate the\n\t\t\t\/\/ randomness has a unit of 1 nanosecond plus random test flakiness\n\t\t\ttime.Sleep(10)\n\t\t\tExpect(noun).ToNot(Equal(gen.RandomNoun()))\n\t\t})\n\t})\n\n\tDescribe(\"Babble\", func() {\n\t\tIt(\"generates a random adjective noun pair each time it is called\", func() {\n\t\t\twordPair := gen.Babble()\n\t\t\tExpect(wordPair).To(MatchRegexp(\"^\\\\w+-\\\\w+$\"))\n\t\t})\n\t})\n})\n<commit_msg>make this less flaky<commit_after>package randomword_test\n\nimport (\n\t\"time\"\n\n\t. \"code.cloudfoundry.org\/cli\/util\/randomword\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Generator\", func() {\n\tvar gen Generator\n\n\tBeforeEach(func() {\n\t\tgen = Generator{}\n\t})\n\n\tDescribe(\"RandomAdjective\", func() {\n\t\tIt(\"generates a random adjective each time it is called\", func() {\n\t\t\tsetOne := []string{}\n\t\t\tsetTwo := []string{}\n\n\t\t\tfor i := 0; i < 3; i++ {\n\t\t\t\tsetOne = append(setOne, gen.RandomAdjective())\n\t\t\t\t\/\/ We wait for 3 millisecond because the seed we use to generate the\n\t\t\t\t\/\/ randomness has a unit of 1 nanosecond plus random test flakiness\n\t\t\t\ttime.Sleep(3)\n\t\t\t\tsetTwo = append(setTwo, gen.RandomAdjective())\n\t\t\t}\n\t\t\tExpect(setOne).ToNot(ConsistOf(setTwo))\n\t\t})\n\t})\n\n\tDescribe(\"RandomNoun\", func() {\n\t\tIt(\"generates a random noun each time it is called\", func() {\n\t\t\tsetOne := []string{}\n\t\t\tsetTwo := []string{}\n\n\t\t\tfor i := 0; i < 3; i++ {\n\t\t\t\tsetOne = append(setOne, gen.RandomNoun())\n\t\t\t\t\/\/ We wait for 3 millisecond because the seed we use to generate the\n\t\t\t\t\/\/ randomness has a unit of 1 nanosecond plus random test flakiness\n\t\t\t\ttime.Sleep(3)\n\t\t\t\tsetTwo = append(setTwo, gen.RandomNoun())\n\t\t\t}\n\t\t\tExpect(setOne).ToNot(ConsistOf(setTwo))\n\t\t})\n\t})\n\n\tDescribe(\"Babble\", func() {\n\t\tIt(\"generates a random adjective noun pair each time it is called\", func() {\n\t\t\twordPair := gen.Babble()\n\t\t\tExpect(wordPair).To(MatchRegexp(\"^\\\\w+-\\\\w+$\"))\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage cipher_test\n\nimport (\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"testing\"\n)\n\nfunc benchmarkAESGCMSign(b *testing.B, buf []byte) {\n\tb.SetBytes(int64(len(buf)))\n\n\tvar key [16]byte\n\tvar nonce [12]byte\n\taes, _ := aes.NewCipher(key[:])\n\taesgcm, _ := cipher.NewGCM(aes)\n\tvar out []byte\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tout = aesgcm.Seal(out[:0], nonce[:], nil, buf)\n\t}\n}\n\nfunc benchmarkAESGCMSeal(b *testing.B, buf []byte) {\n\tb.SetBytes(int64(len(buf)))\n\n\tvar key [16]byte\n\tvar nonce [12]byte\n\tvar ad [13]byte\n\taes, _ := aes.NewCipher(key[:])\n\taesgcm, _ := cipher.NewGCM(aes)\n\tvar out []byte\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tout = aesgcm.Seal(out[:0], nonce[:], buf, ad[:])\n\t}\n}\n\nfunc benchmarkAESGCMOpen(b *testing.B, buf []byte) {\n\tb.SetBytes(int64(len(buf)))\n\n\tvar key [16]byte\n\tvar nonce [12]byte\n\tvar ad [13]byte\n\taes, _ := aes.NewCipher(key[:])\n\taesgcm, _ := cipher.NewGCM(aes)\n\tvar out []byte\n\tout = aesgcm.Seal(out[:0], nonce[:], buf, ad[:])\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err := aesgcm.Open(buf[:0], nonce[:], out, ad[:])\n\t\tif err != nil {\n\t\t\tb.Errorf(\"Open: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkAESGCMSeal1K(b *testing.B) {\n\tbenchmarkAESGCMSeal(b, make([]byte, 1024))\n}\n\nfunc BenchmarkAESGCMOpen1K(b *testing.B) {\n\tbenchmarkAESGCMOpen(b, make([]byte, 1024))\n}\n\nfunc BenchmarkAESGCMSign8K(b *testing.B) {\n\tbenchmarkAESGCMSign(b, make([]byte, 8*1024))\n}\n\nfunc BenchmarkAESGCMSeal8K(b *testing.B) {\n\tbenchmarkAESGCMSeal(b, make([]byte, 8*1024))\n}\n\nfunc BenchmarkAESGCMOpen8K(b *testing.B) {\n\tbenchmarkAESGCMOpen(b, make([]byte, 8*1024))\n}\n\n\/\/ If we test exactly 1K blocks, we would generate exact multiples of\n\/\/ the cipher's block size, and the cipher stream fragments would\n\/\/ always be wordsize aligned, whereas non-aligned is a more typical\n\/\/ use-case.\nconst almost1K = 1024 - 5\n\nfunc BenchmarkAESCFBEncrypt1K(b *testing.B) {\n\tbuf := make([]byte, almost1K)\n\tb.SetBytes(int64(len(buf)))\n\n\tvar key [16]byte\n\tvar iv [16]byte\n\taes, _ := aes.NewCipher(key[:])\n\tctr := cipher.NewCFBEncrypter(aes, iv[:])\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tctr.XORKeyStream(buf, buf)\n\t}\n}\n\nfunc BenchmarkAESCFBDecrypt1K(b *testing.B) {\n\tbuf := make([]byte, almost1K)\n\tb.SetBytes(int64(len(buf)))\n\n\tvar key [16]byte\n\tvar iv [16]byte\n\taes, _ := aes.NewCipher(key[:])\n\tctr := cipher.NewCFBDecrypter(aes, iv[:])\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tctr.XORKeyStream(buf, buf)\n\t}\n}\n\nfunc BenchmarkAESOFB1K(b *testing.B) {\n\tbuf := make([]byte, almost1K)\n\tb.SetBytes(int64(len(buf)))\n\n\tvar key [16]byte\n\tvar iv [16]byte\n\taes, _ := aes.NewCipher(key[:])\n\tctr := cipher.NewOFB(aes, iv[:])\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tctr.XORKeyStream(buf, buf)\n\t}\n}\n\nfunc BenchmarkAESCTR1K(b *testing.B) {\n\tbuf := make([]byte, almost1K)\n\tb.SetBytes(int64(len(buf)))\n\n\tvar key [16]byte\n\tvar iv [16]byte\n\taes, _ := aes.NewCipher(key[:])\n\tctr := cipher.NewCTR(aes, iv[:])\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tctr.XORKeyStream(buf, buf)\n\t}\n}\n\nfunc BenchmarkAESCBCEncrypt1K(b *testing.B) {\n\tbuf := make([]byte, 1024)\n\tb.SetBytes(int64(len(buf)))\n\n\tvar key [16]byte\n\tvar iv [16]byte\n\taes, _ := aes.NewCipher(key[:])\n\tcbc := cipher.NewCBCEncrypter(aes, iv[:])\n\tfor i := 0; i < b.N; i++ {\n\t\tcbc.CryptBlocks(buf, buf)\n\t}\n}\n\nfunc BenchmarkAESCBCDecrypt1K(b *testing.B) {\n\tbuf := make([]byte, 1024)\n\tb.SetBytes(int64(len(buf)))\n\n\tvar key [16]byte\n\tvar iv [16]byte\n\taes, _ := aes.NewCipher(key[:])\n\tcbc := cipher.NewCBCDecrypter(aes, iv[:])\n\tfor i := 0; i < b.N; i++ {\n\t\tcbc.CryptBlocks(buf, buf)\n\t}\n}\n<commit_msg>crypto\/cipher: 8K benchmarks for AES stream modes<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage cipher_test\n\nimport (\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"testing\"\n)\n\nfunc benchmarkAESGCMSign(b *testing.B, buf []byte) {\n\tb.SetBytes(int64(len(buf)))\n\n\tvar key [16]byte\n\tvar nonce [12]byte\n\taes, _ := aes.NewCipher(key[:])\n\taesgcm, _ := cipher.NewGCM(aes)\n\tvar out []byte\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tout = aesgcm.Seal(out[:0], nonce[:], nil, buf)\n\t}\n}\n\nfunc benchmarkAESGCMSeal(b *testing.B, buf []byte) {\n\tb.SetBytes(int64(len(buf)))\n\n\tvar key [16]byte\n\tvar nonce [12]byte\n\tvar ad [13]byte\n\taes, _ := aes.NewCipher(key[:])\n\taesgcm, _ := cipher.NewGCM(aes)\n\tvar out []byte\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tout = aesgcm.Seal(out[:0], nonce[:], buf, ad[:])\n\t}\n}\n\nfunc benchmarkAESGCMOpen(b *testing.B, buf []byte) {\n\tb.SetBytes(int64(len(buf)))\n\n\tvar key [16]byte\n\tvar nonce [12]byte\n\tvar ad [13]byte\n\taes, _ := aes.NewCipher(key[:])\n\taesgcm, _ := cipher.NewGCM(aes)\n\tvar out []byte\n\tout = aesgcm.Seal(out[:0], nonce[:], buf, ad[:])\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err := aesgcm.Open(buf[:0], nonce[:], out, ad[:])\n\t\tif err != nil {\n\t\t\tb.Errorf(\"Open: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkAESGCMSeal1K(b *testing.B) {\n\tbenchmarkAESGCMSeal(b, make([]byte, 1024))\n}\n\nfunc BenchmarkAESGCMOpen1K(b *testing.B) {\n\tbenchmarkAESGCMOpen(b, make([]byte, 1024))\n}\n\nfunc BenchmarkAESGCMSign8K(b *testing.B) {\n\tbenchmarkAESGCMSign(b, make([]byte, 8*1024))\n}\n\nfunc BenchmarkAESGCMSeal8K(b *testing.B) {\n\tbenchmarkAESGCMSeal(b, make([]byte, 8*1024))\n}\n\nfunc BenchmarkAESGCMOpen8K(b *testing.B) {\n\tbenchmarkAESGCMOpen(b, make([]byte, 8*1024))\n}\n\nfunc benchmarkAESStream(b *testing.B, mode func(cipher.Block, []byte) cipher.Stream, buf []byte) {\n\tb.SetBytes(int64(len(buf)))\n\n\tvar key [16]byte\n\tvar iv [16]byte\n\taes, _ := aes.NewCipher(key[:])\n\tstream := mode(aes, iv[:])\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tstream.XORKeyStream(buf, buf)\n\t}\n}\n\n\/\/ If we test exactly 1K blocks, we would generate exact multiples of\n\/\/ the cipher's block size, and the cipher stream fragments would\n\/\/ always be wordsize aligned, whereas non-aligned is a more typical\n\/\/ use-case.\nconst almost1K = 1024 - 5\nconst almost8K = 8*1024 - 5\n\nfunc BenchmarkAESCFBEncrypt1K(b *testing.B) {\n\tbenchmarkAESStream(b, cipher.NewCFBEncrypter, make([]byte, almost1K))\n}\n\nfunc BenchmarkAESCFBDecrypt1K(b *testing.B) {\n\tbenchmarkAESStream(b, cipher.NewCFBDecrypter, make([]byte, almost1K))\n}\n\nfunc BenchmarkAESCFBDecrypt8K(b *testing.B) {\n\tbenchmarkAESStream(b, cipher.NewCFBDecrypter, make([]byte, almost8K))\n}\n\nfunc BenchmarkAESOFB1K(b *testing.B) {\n\tbenchmarkAESStream(b, cipher.NewOFB, make([]byte, almost1K))\n}\n\nfunc BenchmarkAESCTR1K(b *testing.B) {\n\tbenchmarkAESStream(b, cipher.NewCTR, make([]byte, almost1K))\n}\n\nfunc BenchmarkAESCTR8K(b *testing.B) {\n\tbenchmarkAESStream(b, cipher.NewCTR, make([]byte, almost8K))\n}\n\nfunc BenchmarkAESCBCEncrypt1K(b *testing.B) {\n\tbuf := make([]byte, 1024)\n\tb.SetBytes(int64(len(buf)))\n\n\tvar key [16]byte\n\tvar iv [16]byte\n\taes, _ := aes.NewCipher(key[:])\n\tcbc := cipher.NewCBCEncrypter(aes, iv[:])\n\tfor i := 0; i < b.N; i++ {\n\t\tcbc.CryptBlocks(buf, buf)\n\t}\n}\n\nfunc BenchmarkAESCBCDecrypt1K(b *testing.B) {\n\tbuf := make([]byte, 1024)\n\tb.SetBytes(int64(len(buf)))\n\n\tvar key [16]byte\n\tvar iv [16]byte\n\taes, _ := aes.NewCipher(key[:])\n\tcbc := cipher.NewCBCDecrypter(aes, iv[:])\n\tfor i := 0; i < b.N; i++ {\n\t\tcbc.CryptBlocks(buf, buf)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * EliasDB\n *\n * Copyright 2016 Matthias Ladkau. All rights reserved.\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n *\/\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"devt.de\/common\/fileutil\"\n\t\"devt.de\/common\/httputil\"\n\t\"devt.de\/eliasdb\/api\"\n\t\"devt.de\/eliasdb\/graph\/graphstorage\"\n)\n\nconst testdb = \"testdb\"\n\nconst invalidFileName = \"**\" + string(0x0)\n\nvar printLog = []string{}\nvar errorLog = []string{}\n\nvar printLogging = false\n\nfunc TestMain(m *testing.M) {\n\tflag.Parse()\n\n\tbasepath = testdb + \"\/\"\n\n\t\/\/ Log all print and error messagesí\n\n\tprint = func(v ...interface{}) {\n\t\tif printLogging {\n\t\t\tfmt.Println(v...)\n\t\t}\n\t\tprintLog = append(printLog, fmt.Sprint(v...))\n\t}\n\tfatal = func(v ...interface{}) {\n\t\tif printLogging {\n\t\t\tfmt.Println(v...)\n\t\t}\n\t\terrorLog = append(errorLog, fmt.Sprint(v...))\n\t}\n\n\tdefer func() {\n\t\tfatal = log.Fatal\n\t\tbasepath = \"\"\n\t}()\n\n\tif res, _ := fileutil.PathExists(testdb); res {\n\t\tif err := os.RemoveAll(testdb); err != nil {\n\t\t\tfmt.Print(\"Could not remove test directory:\", err.Error())\n\t\t}\n\t}\n\n\tensurePath(testdb)\n\n\t\/\/ Run the tests\n\n\tres := m.Run()\n\n\tif res, _ := fileutil.PathExists(testdb); res {\n\t\tif err := os.RemoveAll(testdb); err != nil {\n\t\t\tfmt.Print(\"Could not remove test directory:\", err.Error())\n\t\t}\n\t}\n\n\tos.Exit(res)\n}\n\nfunc TestMainNormalCase(t *testing.T) {\n\n\t\/\/ Make sure to reset the DefaultServeMux\n\n\tdefer func() { http.DefaultServeMux = http.NewServeMux() }()\n\n\t\/\/ Make sure to remove any files\n\n\tdefer func() {\n\t\tif err := os.RemoveAll(testdb); err != nil {\n\t\t\tfmt.Print(\"Could not remove test directory:\", err.Error())\n\t\t}\n\t\ttime.Sleep(time.Duration(100) * time.Millisecond)\n\t\tensurePath(testdb)\n\t}()\n\n\t\/\/ Reset logs\n\n\tprintLog = []string{}\n\terrorLog = []string{}\n\n\terrorChan := make(chan error)\n\n\t\/\/ Test handle function\n\n\tapi.HandleFunc = func(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\t\tdefer func() {\n\t\t\tif r := recover(); r == nil {\n\t\t\t\tt.Error(\"Request with nil request and nil response should cause a panic.\")\n\t\t\t}\n\t\t}()\n\t\thandler(nil, nil)\n\t}\n\n\t\/\/ Kick off main function\n\n\tgo func() {\n\t\tmain()\n\t\terrorChan <- nil\n\t}()\n\n\t\/\/ To exit the main function the lock watcher thread\n\t\/\/ has to recognise that the lockfile was modified\n\n\ttime.Sleep(time.Duration(2) * time.Second)\n\n\t\/\/ Do a normal shutdown with a log file\n\n\tif err := shutdownWithLogFile(); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\t\/\/ Wait for the main function to end\n\n\tif err := <-errorChan; err != nil || len(errorLog) != 0 {\n\t\tt.Error(\"Unexpected ending of main thread:\", err, errorLog)\n\t\treturn\n\t}\n\n\t\/\/ Check the print log\n\n\tif logString := strings.Join(printLog, \"\\n\"); logString != `\nEliasDB 0.8\nStarting datastore in testdb\/db\nCreating GraphManager instance\nCreating key (key.pem) and certificate (cert.pem) in: ssl\nEnsuring web folder: testdb\/web\nEnsuring web termminal: testdb\/web\/db\/term.html\nStarting server on: localhost:9090\nWriting fingerprint file: testdb\/web\/fingerprint.json\nWaiting for shutdown\nLockfile was modified\nShutting down\nClosing datastore`[1:] {\n\t\tt.Error(\"Unexpected log:\", logString)\n\t\treturn\n\t}\n\n}\n\nfunc TestMainErrorCases(t *testing.T) {\n\n\t\/\/ Make sure to reset the DefaultServeMux\n\n\tdefer func() { http.DefaultServeMux = http.NewServeMux() }()\n\n\t\/\/ Make sure to remove any files\n\n\tdefer func() {\n\t\tif err := os.RemoveAll(testdb); err != nil {\n\t\t\tfmt.Print(\"Could not remove test directory:\", err.Error())\n\t\t}\n\t\ttime.Sleep(time.Duration(100) * time.Millisecond)\n\t\tensurePath(testdb)\n\t}()\n\n\t\/\/ Setup config and logs\n\n\tConfig = nil\n\torigConfFile := ConfigFile\n\torigBasePath := basepath\n\n\tbasepath = \"\"\n\tConfigFile = invalidFileName\n\n\tprintLog = []string{}\n\terrorLog = []string{}\n\n\tmain()\n\n\t\/\/ Check that an error happened\n\n\tif len(errorLog) != 1 || (errorLog[0] != \"stat **\"+string(0)+\": invalid argument\" &&\n\t\terrorLog[0] != \"Lstat **\"+string(0)+\": invalid argument\") {\n\n\t\tt.Error(\"Unexpected error:\", errorLog)\n\t\treturn\n\t}\n\n\t\/\/ Set back variables\n\n\tbasepath = origBasePath\n\tConfigFile = origConfFile\n\tprintLog = []string{}\n\terrorLog = []string{}\n\n\tdata := make(map[string]interface{})\n\tfor k, v := range DefaultConfig {\n\t\tdata[k] = v\n\t}\n\n\tConfig = data\n\n\t\/\/ Test db access error\n\n\tConfig[LocationDatastore] = invalidFileName\n\n\tmain()\n\n\t\/\/ Check that an error happened\n\n\tif len(errorLog) != 2 ||\n\t\t!strings.Contains(errorLog[0], \"Could not create directory\") ||\n\t\t!strings.Contains(errorLog[1], \"Failed to open graph storage\") {\n\t\tt.Error(\"Unexpected error:\", errorLog)\n\t\treturn\n\t}\n\n\t\/\/ Set back logs\n\n\tprintLog = []string{}\n\terrorLog = []string{}\n\n\t\/\/ Use memory only storage\n\n\tConfig[MemoryOnlyStorage] = true\n\n\t\/\/ Test failed ssl key generation\n\n\tConfig[HTTPSKey] = invalidFileName\n\n\tmain()\n\n\t\/\/ Check that an error happened\n\n\tif len(errorLog) != 1 ||\n\t\t!strings.Contains(errorLog[0], \"Failed to generate ssl key and certificate\") {\n\t\tt.Error(\"Unexpected error:\", errorLog)\n\t\treturn\n\t}\n\n\tConfig[HTTPSKey] = DefaultConfig[HTTPSKey]\n\n\t\/\/ Set back logs\n\n\tprintLog = []string{}\n\terrorLog = []string{}\n\n\t\/\/ Special error when closing the store\n\n\tgraphstorage.MgsRetClose = errors.New(\"Testerror\")\n\n\t\/\/ Use 9090\n\n\tConfig[HTTPSPort] = \"9090\"\n\n\tths := httputil.HTTPServer{}\n\tgo ths.RunHTTPServer(\":9090\", nil)\n\n\ttime.Sleep(time.Duration(1) * time.Second)\n\n\tmain()\n\n\tths.Shutdown()\n\n\ttime.Sleep(time.Duration(1) * time.Second)\n\n\tif ths.Running {\n\t\tt.Error(\"Server should not be running\")\n\t\treturn\n\t}\n\n\tif len(errorLog) != 2 || (errorLog[0] != \"listen tcp :9090\"+\n\t\t\": bind: address already in use\" && errorLog[0] != \"listen tcp :9090\"+\n\t\t\": bind: Only one usage of each socket address (protocol\/network address\/port) is normally permitted.\") ||\n\t\terrorLog[1] != \"Testerror\" {\n\t\tt.Error(\"Unexpected error:\", errorLog)\n\t\treturn\n\t}\n\n\tConfig = nil\n}\n\nfunc shutdownWithLogFile() error {\n\tfile, err := os.OpenFile(basepath+config(LockFile), os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0660)\n\tdefer file.Close()\n\tif err != nil {\n\t\tfmt.Println(errorLog)\n\t\treturn err\n\t}\n\n\t_, err = file.Write([]byte(\"a\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>Adding rev output in test<commit_after>\/*\n * EliasDB\n *\n * Copyright 2016 Matthias Ladkau. All rights reserved.\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n *\/\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"devt.de\/common\/fileutil\"\n\t\"devt.de\/common\/httputil\"\n\t\"devt.de\/eliasdb\/api\"\n\t\"devt.de\/eliasdb\/graph\/graphstorage\"\n)\n\nconst testdb = \"testdb\"\n\nconst invalidFileName = \"**\" + string(0x0)\n\nvar printLog = []string{}\nvar errorLog = []string{}\n\nvar printLogging = false\n\nfunc TestMain(m *testing.M) {\n\tflag.Parse()\n\n\tbasepath = testdb + \"\/\"\n\n\t\/\/ Log all print and error messagesí\n\n\tprint = func(v ...interface{}) {\n\t\tif printLogging {\n\t\t\tfmt.Println(v...)\n\t\t}\n\t\tprintLog = append(printLog, fmt.Sprint(v...))\n\t}\n\tfatal = func(v ...interface{}) {\n\t\tif printLogging {\n\t\t\tfmt.Println(v...)\n\t\t}\n\t\terrorLog = append(errorLog, fmt.Sprint(v...))\n\t}\n\n\tdefer func() {\n\t\tfatal = log.Fatal\n\t\tbasepath = \"\"\n\t}()\n\n\tif res, _ := fileutil.PathExists(testdb); res {\n\t\tif err := os.RemoveAll(testdb); err != nil {\n\t\t\tfmt.Print(\"Could not remove test directory:\", err.Error())\n\t\t}\n\t}\n\n\tensurePath(testdb)\n\n\t\/\/ Run the tests\n\n\tres := m.Run()\n\n\tif res, _ := fileutil.PathExists(testdb); res {\n\t\tif err := os.RemoveAll(testdb); err != nil {\n\t\t\tfmt.Print(\"Could not remove test directory:\", err.Error())\n\t\t}\n\t}\n\n\tos.Exit(res)\n}\n\nfunc TestMainNormalCase(t *testing.T) {\n\n\t\/\/ Make sure to reset the DefaultServeMux\n\n\tdefer func() { http.DefaultServeMux = http.NewServeMux() }()\n\n\t\/\/ Make sure to remove any files\n\n\tdefer func() {\n\t\tif err := os.RemoveAll(testdb); err != nil {\n\t\t\tfmt.Print(\"Could not remove test directory:\", err.Error())\n\t\t}\n\t\ttime.Sleep(time.Duration(100) * time.Millisecond)\n\t\tensurePath(testdb)\n\t}()\n\n\t\/\/ Reset logs\n\n\tprintLog = []string{}\n\terrorLog = []string{}\n\n\terrorChan := make(chan error)\n\n\t\/\/ Test handle function\n\n\tapi.HandleFunc = func(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\t\tdefer func() {\n\t\t\tif r := recover(); r == nil {\n\t\t\t\tt.Error(\"Request with nil request and nil response should cause a panic.\")\n\t\t\t}\n\t\t}()\n\t\thandler(nil, nil)\n\t}\n\n\t\/\/ Kick off main function\n\n\tgo func() {\n\t\tmain()\n\t\terrorChan <- nil\n\t}()\n\n\t\/\/ To exit the main function the lock watcher thread\n\t\/\/ has to recognise that the lockfile was modified\n\n\ttime.Sleep(time.Duration(2) * time.Second)\n\n\t\/\/ Do a normal shutdown with a log file\n\n\tif err := shutdownWithLogFile(); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\t\/\/ Wait for the main function to end\n\n\tif err := <-errorChan; err != nil || len(errorLog) != 0 {\n\t\tt.Error(\"Unexpected ending of main thread:\", err, errorLog)\n\t\treturn\n\t}\n\n\t\/\/ Check the print log\n\n\tif logString := strings.Join(printLog, \"\\n\"); logString != `\nEliasDB 0.8.0\nStarting datastore in testdb\/db\nCreating GraphManager instance\nCreating key (key.pem) and certificate (cert.pem) in: ssl\nEnsuring web folder: testdb\/web\nEnsuring web termminal: testdb\/web\/db\/term.html\nStarting server on: localhost:9090\nWriting fingerprint file: testdb\/web\/fingerprint.json\nWaiting for shutdown\nLockfile was modified\nShutting down\nClosing datastore`[1:] {\n\t\tt.Error(\"Unexpected log:\", logString)\n\t\treturn\n\t}\n\n}\n\nfunc TestMainErrorCases(t *testing.T) {\n\n\t\/\/ Make sure to reset the DefaultServeMux\n\n\tdefer func() { http.DefaultServeMux = http.NewServeMux() }()\n\n\t\/\/ Make sure to remove any files\n\n\tdefer func() {\n\t\tif err := os.RemoveAll(testdb); err != nil {\n\t\t\tfmt.Print(\"Could not remove test directory:\", err.Error())\n\t\t}\n\t\ttime.Sleep(time.Duration(100) * time.Millisecond)\n\t\tensurePath(testdb)\n\t}()\n\n\t\/\/ Setup config and logs\n\n\tConfig = nil\n\torigConfFile := ConfigFile\n\torigBasePath := basepath\n\n\tbasepath = \"\"\n\tConfigFile = invalidFileName\n\n\tprintLog = []string{}\n\terrorLog = []string{}\n\n\tmain()\n\n\t\/\/ Check that an error happened\n\n\tif len(errorLog) != 1 || (errorLog[0] != \"stat **\"+string(0)+\": invalid argument\" &&\n\t\terrorLog[0] != \"Lstat **\"+string(0)+\": invalid argument\") {\n\n\t\tt.Error(\"Unexpected error:\", errorLog)\n\t\treturn\n\t}\n\n\t\/\/ Set back variables\n\n\tbasepath = origBasePath\n\tConfigFile = origConfFile\n\tprintLog = []string{}\n\terrorLog = []string{}\n\n\tdata := make(map[string]interface{})\n\tfor k, v := range DefaultConfig {\n\t\tdata[k] = v\n\t}\n\n\tConfig = data\n\n\t\/\/ Test db access error\n\n\tConfig[LocationDatastore] = invalidFileName\n\n\tmain()\n\n\t\/\/ Check that an error happened\n\n\tif len(errorLog) != 2 ||\n\t\t!strings.Contains(errorLog[0], \"Could not create directory\") ||\n\t\t!strings.Contains(errorLog[1], \"Failed to open graph storage\") {\n\t\tt.Error(\"Unexpected error:\", errorLog)\n\t\treturn\n\t}\n\n\t\/\/ Set back logs\n\n\tprintLog = []string{}\n\terrorLog = []string{}\n\n\t\/\/ Use memory only storage\n\n\tConfig[MemoryOnlyStorage] = true\n\n\t\/\/ Test failed ssl key generation\n\n\tConfig[HTTPSKey] = invalidFileName\n\n\tmain()\n\n\t\/\/ Check that an error happened\n\n\tif len(errorLog) != 1 ||\n\t\t!strings.Contains(errorLog[0], \"Failed to generate ssl key and certificate\") {\n\t\tt.Error(\"Unexpected error:\", errorLog)\n\t\treturn\n\t}\n\n\tConfig[HTTPSKey] = DefaultConfig[HTTPSKey]\n\n\t\/\/ Set back logs\n\n\tprintLog = []string{}\n\terrorLog = []string{}\n\n\t\/\/ Special error when closing the store\n\n\tgraphstorage.MgsRetClose = errors.New(\"Testerror\")\n\n\t\/\/ Use 9090\n\n\tConfig[HTTPSPort] = \"9090\"\n\n\tths := httputil.HTTPServer{}\n\tgo ths.RunHTTPServer(\":9090\", nil)\n\n\ttime.Sleep(time.Duration(1) * time.Second)\n\n\tmain()\n\n\tths.Shutdown()\n\n\ttime.Sleep(time.Duration(1) * time.Second)\n\n\tif ths.Running {\n\t\tt.Error(\"Server should not be running\")\n\t\treturn\n\t}\n\n\tif len(errorLog) != 2 || (errorLog[0] != \"listen tcp :9090\"+\n\t\t\": bind: address already in use\" && errorLog[0] != \"listen tcp :9090\"+\n\t\t\": bind: Only one usage of each socket address (protocol\/network address\/port) is normally permitted.\") ||\n\t\terrorLog[1] != \"Testerror\" {\n\t\tt.Error(\"Unexpected error:\", errorLog)\n\t\treturn\n\t}\n\n\tConfig = nil\n}\n\nfunc shutdownWithLogFile() error {\n\tfile, err := os.OpenFile(basepath+config(LockFile), os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0660)\n\tdefer file.Close()\n\tif err != nil {\n\t\tfmt.Println(errorLog)\n\t\treturn err\n\t}\n\n\t_, err = file.Write([]byte(\"a\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc initTestDb(t *testing.T) *sql.DB {\n\tuser := os.Getenv(\"DB_USER\")\n\tpass := os.Getenv(\"DB_PASS\")\n\tdb_name := os.Getenv(\"DB_TEST_DB\")\n\tif len(db_name) == 0 {\n\t\tt.Fatal(\"Must specify a db so we don't accidentally overwrite anything.\")\n\t}\n\tdb, err := sql.Open(\"postgres\", fmt.Sprintf(\"user=%s password=%s dbname=%s sslmode=disable\", user, pass, db_name))\n\tif err != nil {\n\t\tt.Fatal(\"Couldn't connect to DB.\", err)\n\t}\n\treturn db\n}\n\nfunc make_user(id int, username, email string, db *sql.DB, t *testing.T) {\n\t_, err := db.Exec(`INSERT INTO auth_user VALUES (\n $1, $2,\n 'first', 'last', \n $3, 'pass', \n TRUE, TRUE, TRUE, now(), now()\n )`, id, username, email)\n\tif err != nil {\n\t\tt.Fatal(\"Unable to create user.\", err)\n\t}\n}\n\nfunc make_repo(id int, user, project string, db *sql.DB, t *testing.T) {\n\t_, err := db.Exec(`INSERT INTO streamer_repo VALUES (\n $1, $2, $3, NULL, NULL\n )`, id, user, project)\n\tif err != nil {\n\t\tt.Fatal(\"Unable to create repo.\", err)\n\t}\n}\n\nfunc make_userprofile(id, uid int, time_interval string, db *sql.DB, t *testing.T) {\n\t_, err := db.Exec(`INSERT INTO streamer_userprofile\n (id, user_id, max_time_interval_between_emails) VALUES (\n $1, $2, $3\n )`, id, uid, time_interval)\n\tif err != nil {\n\t\tt.Fatal(\"Unable to create userprofile.\", err)\n\t}\n}\n\nfunc make_userprofile_repo(id, userprofile_id, repo_id int, db *sql.DB, t *testing.T) {\n\t_, err := db.Exec(`INSERT INTO streamer_userprofile_repos\n (id, userprofile_id, repo_id) VALUES (\n $1, $2, $3\n )`, id, userprofile_id, repo_id)\n\tif err != nil {\n\t\tt.Fatal(\"Unable to create userprofile_repo.\", err)\n\t}\n}\n\nfunc TestGetUser_Exists(t *testing.T) {\n\tdb := initTestDb(t)\n\tdbc := DbController{db}\n\tdefer dbc.Close()\n\n\tdefer db.Exec(\"DELETE from auth_user\")\n\tmake_user(1, \"username\", \"em@a.il\", db, t)\n\n\tu, err := dbc.GetUser(1)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif u.Id != 1 {\n\t\tt.Fatal(\"Incorrect user id.\")\n\t\treturn\n\t}\n\tif u.username != \"username\" {\n\t\tt.Fatal(\"Incorrect username.\")\n\t\treturn\n\t}\n\tif u.Email != \"em@a.il\" {\n\t\tt.Fatal(\"Incorrect email.\")\n\t\treturn\n\t}\n}\n\nfunc TestGetUser_NoExists(t *testing.T) {\n\tdb := initTestDb(t)\n\tdbc := DbController{db}\n\tdefer dbc.Close()\n\n\t_, err := dbc.GetUser(1)\n\tif err == nil {\n\t\tt.Fatal(\"No error when accessing invalid user id.\")\n\t\treturn\n\t}\n}\n\nfunc TestGetUserRepos_None(t *testing.T) {\n\tdb := initTestDb(t)\n\tdbc := DbController{db}\n\tdefer dbc.Close()\n\tuid := 1\n\n\tdefer db.Exec(\"DELETE FROM auth_user;\")\n\tmake_user(uid, \"username\", \"em@a.il\", db, t)\n\tdefer db.Exec(\"DELETE FROM streamer_userprofile;\")\n\tmake_userprofile(2, uid, \"D\", db, t) \/\/ Daily, should really be enum or something.\n\tdefer db.Exec(\"DELETE FROM streamer_repo;\")\n\tmake_repo(3, \"user\", \"repo\", db, t)\n\tdefer db.Exec(\"DELETE FROM streamer_userprofile_repos;\")\n\tmake_userprofile_repo(4, 2, 3, db, t)\n\n\tur, err := dbc.GetUserRepos(uid)\n\tif err != nil {\n\t\tt.Fatal(\"Can't get user's repos.\", err)\n\t\treturn\n\t}\n\tif len(ur) != 1 {\n\t\tfmt.Println(ur)\n\t\tt.Fatal(\"Got user's repos, but expected len 1, but got \", len(ur))\n\t\treturn\n\t}\n}\n\nfunc testGetUserRepos_One(t *testing.T) {\n}\n\nfunc testGetUserRepos_Many(t *testing.T) {\n}\n\nfunc testGetRepoActivity_OtherRepo(t *testing.T) {\n\n}\n\nfunc testGetRepoActivity_OtherUser(t *testing.T) {\n\n}\n\nfunc testGetRepoActivity_Old(t *testing.T) {\n\n}\n\nfunc testGetRepoActivity_AlreadySent(t *testing.T) {\n\n}\n\nfunc testGetRepoActivity_NeverSent(t *testing.T) {\n\n}\n\nfunc testMarkUserRepoSent(t *testing.T) {\n\n}\n<commit_msg>add zero userrepo test to the thing with the correct name & add single case.<commit_after>package main\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc initTestDb(t *testing.T) *sql.DB {\n\tuser := os.Getenv(\"DB_USER\")\n\tpass := os.Getenv(\"DB_PASS\")\n\tdb_name := os.Getenv(\"DB_TEST_DB\")\n\tif len(db_name) == 0 {\n\t\tt.Fatal(\"Must specify a db so we don't accidentally overwrite anything.\")\n\t}\n\tdb, err := sql.Open(\"postgres\", fmt.Sprintf(\"user=%s password=%s dbname=%s sslmode=disable\", user, pass, db_name))\n\tif err != nil {\n\t\tt.Fatal(\"Couldn't connect to DB.\", err)\n\t}\n\treturn db\n}\n\nfunc make_user(id int, username, email string, db *sql.DB, t *testing.T) {\n\t_, err := db.Exec(`INSERT INTO auth_user VALUES (\n $1, $2,\n 'first', 'last', \n $3, 'pass', \n TRUE, TRUE, TRUE, now(), now()\n )`, id, username, email)\n\tif err != nil {\n\t\tt.Fatal(\"Unable to create user.\", err)\n\t}\n}\n\nfunc make_repo(id int, user, project string, db *sql.DB, t *testing.T) {\n\t_, err := db.Exec(`INSERT INTO streamer_repo VALUES (\n $1, $2, $3, NULL, NULL\n )`, id, user, project)\n\tif err != nil {\n\t\tt.Fatal(\"Unable to create repo.\", err)\n\t}\n}\n\nfunc make_userprofile(id, uid int, time_interval string, db *sql.DB, t *testing.T) {\n\t_, err := db.Exec(`INSERT INTO streamer_userprofile\n (id, user_id, max_time_interval_between_emails) VALUES (\n $1, $2, $3\n )`, id, uid, time_interval)\n\tif err != nil {\n\t\tt.Fatal(\"Unable to create userprofile.\", err)\n\t}\n}\n\nfunc make_userprofile_repo(id, userprofile_id, repo_id int, db *sql.DB, t *testing.T) {\n\t_, err := db.Exec(`INSERT INTO streamer_userprofile_repos\n (id, userprofile_id, repo_id) VALUES (\n $1, $2, $3\n )`, id, userprofile_id, repo_id)\n\tif err != nil {\n\t\tt.Fatal(\"Unable to create userprofile_repo.\", err)\n\t}\n}\n\nfunc TestGetUser_Exists(t *testing.T) {\n\tdb := initTestDb(t)\n\tdbc := DbController{db}\n\tdefer dbc.Close()\n\n\tdefer db.Exec(\"DELETE from auth_user\")\n\tmake_user(1, \"username\", \"em@a.il\", db, t)\n\n\tu, err := dbc.GetUser(1)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif u.Id != 1 {\n\t\tt.Fatal(\"Incorrect user id.\")\n\t\treturn\n\t}\n\tif u.username != \"username\" {\n\t\tt.Fatal(\"Incorrect username.\")\n\t\treturn\n\t}\n\tif u.Email != \"em@a.il\" {\n\t\tt.Fatal(\"Incorrect email.\")\n\t\treturn\n\t}\n}\n\nfunc TestGetUser_NoExists(t *testing.T) {\n\tdb := initTestDb(t)\n\tdbc := DbController{db}\n\tdefer dbc.Close()\n\n\t_, err := dbc.GetUser(1)\n\tif err == nil {\n\t\tt.Fatal(\"No error when accessing invalid user id.\")\n\t\treturn\n\t}\n}\n\nfunc TestGetUserRepos_None(t *testing.T) {\n\tdb := initTestDb(t)\n\tdbc := DbController{db}\n\tdefer dbc.Close()\n\tuid := 1\n\n\tdefer db.Exec(\"DELETE FROM auth_user;\")\n\tmake_user(uid, \"username\", \"em@a.il\", db, t)\n\tdefer db.Exec(\"DELETE FROM streamer_userprofile;\")\n\tmake_userprofile(2, uid, \"D\", db, t) \/\/ Daily, should really be enum or something.\n\tdefer db.Exec(\"DELETE FROM streamer_repo;\")\n\tmake_repo(3, \"user\", \"repo\", db, t)\n\n\tur, err := dbc.GetUserRepos(uid)\n\tif err != nil {\n\t\tt.Fatal(\"Can't get user's repos.\", err)\n\t}\n\tif len(ur) != 0 {\n\t\tfmt.Println(ur)\n\t\tt.Fatal(\"Got user's repos, but expected len 0, but got \", len(ur))\n\t}\n}\n\nfunc testGetUserRepos_One(t *testing.T) {\n\tdb := initTestDb(t)\n\tdbc := DbController{db}\n\tdefer dbc.Close()\n\tuid := 1\n\n\tdefer db.Exec(\"DELETE FROM auth_user;\")\n\tmake_user(uid, \"username\", \"em@a.il\", db, t)\n\tdefer db.Exec(\"DELETE FROM streamer_userprofile;\")\n\tmake_userprofile(2, uid, \"D\", db, t) \/\/ Daily, should really be enum or something.\n\tdefer db.Exec(\"DELETE FROM streamer_repo;\")\n\tmake_repo(3, \"user\", \"repo\", db, t)\n\tdefer db.Exec(\"DELETE FROM streamer_userprofile_repos;\")\n\tmake_userprofile_repo(4, 2, 3, db, t)\n\n\tur, err := dbc.GetUserRepos(uid)\n\tif err != nil {\n\t\tt.Fatal(\"Can't get user's repos.\", err)\n\t\treturn\n\t}\n\tif len(ur) != 1 {\n\t\tfmt.Println(ur)\n\t\tt.Fatal(\"Got user's repos, but expected len 1, but got \", len(ur))\n\t\treturn\n\t}\n}\n\nfunc testGetUserRepos_Many(t *testing.T) {\n}\n\nfunc testGetRepoActivity_OtherRepo(t *testing.T) {\n\n}\n\nfunc testGetRepoActivity_OtherUser(t *testing.T) {\n\n}\n\nfunc testGetRepoActivity_Old(t *testing.T) {\n\n}\n\nfunc testGetRepoActivity_AlreadySent(t *testing.T) {\n\n}\n\nfunc testGetRepoActivity_NeverSent(t *testing.T) {\n\n}\n\nfunc testMarkUserRepoSent(t *testing.T) {\n\n}\n<|endoftext|>"} {"text":"<commit_before>package integrationtest\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/jacksontj\/dataman\/src\/query\"\n\t\"github.com\/jacksontj\/dataman\/src\/router_node\"\n\t\"github.com\/jacksontj\/dataman\/src\/router_node\/metadata\"\n\t\"github.com\/jacksontj\/dataman\/src\/storage_node\"\n\t\"github.com\/jacksontj\/dataman\/src\/task_node\"\n)\n\ntype Data map[string]map[string][]map[string]interface{}\n\ntype Queries []*query.Query\n\n\/\/ For this we assume these are empty and we can do whatever we want to them!\nfunc RunIntegrationTests(t *testing.T, task *tasknode.TaskNode, router *routernode.RouterNode, datasource *storagenode.DatasourceInstance) {\n\n\t\/\/ Find all tests\n\tfiles, err := ioutil.ReadDir(\"tests\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, file := range files {\n\t\tif !file.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ TODO: subtest stuff\n\t\tt.Run(file.Name(), func(t *testing.T) {\n\t\t\trunIntegrationTest(file.Name(), t, task, router, datasource)\n\t\t})\n\t}\n}\n\nfunc runIntegrationTest(testDir string, t *testing.T, task *tasknode.TaskNode, router *routernode.RouterNode, datasource *storagenode.DatasourceInstance) {\n\t\/\/ Load the various files\n\tschema := make(map[string]*metadata.Database)\n\tschemaBytes, err := ioutil.ReadFile(path.Join(\"tests\", testDir, \"\/schema.json\"))\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to read schema for test %s: %v\", testDir, err)\n\t}\n\tif err := json.Unmarshal(schemaBytes, &schema); err != nil {\n\t\tt.Fatalf(\"Unable to load schema for test %s: %v\", testDir, err)\n\t}\n\n\t\/\/ Load the schema\n\tfor _, database := range schema {\n\t\tif err := task.EnsureExistsDatabase(context.Background(), database); err != nil {\n\t\t\tt.Fatalf(\"Unable to ensureSchema in test %s for database %s: %v\", testDir, database.Name, err)\n\t\t}\n\t}\n\n\t\/\/ Block the router waiting on an update from the tasknode\n\trouter.FetchMeta()\n\n\t\/\/ Load data\n\tdata := make(Data)\n\tdataString, err := ioutil.ReadFile(path.Join(\"tests\", testDir, \"\/data.json\"))\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to read data for test %s: %v\", testDir, err)\n\t}\n\tif err := json.Unmarshal([]byte(dataString), &data); err != nil {\n\t\tt.Fatalf(\"Unable to load data for test %s: %v\", testDir, err)\n\t}\n\n\tfor databaseName, collectionMap := range data {\n\t\tfor collectionName, recordList := range collectionMap {\n\t\t\tfor _, record := range recordList {\n\t\t\t\tresult := router.HandleQuery(context.Background(), &query.Query{\n\t\t\t\t\tType: query.Insert,\n\t\t\t\t\tArgs: map[string]interface{}{\n\t\t\t\t\t\t\"db\": databaseName,\n\t\t\t\t\t\t\"collection\": collectionName,\n\t\t\t\t\t\t\"record\": record,\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\tif result.ValidationError != nil {\n\t\t\t\t\tt.Fatalf(\"Valdiation error loading data into %s.%s: %v\", databaseName, collectionName, result.ValidationError)\n\t\t\t\t}\n\t\t\t\tif result.Error != \"\" {\n\t\t\t\t\tt.Fatalf(\"Error loading data into %s.%s: %v\", databaseName, collectionName, result.Error)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\ttestsDir := path.Join(\"tests\", testDir, \"\/\")\n\n\twalkFunc := func(fpath string, info os.FileInfo, err error) error {\n\t\t\/\/ If its a directory, skip it-- we'll let something else grab it\n\t\tif !info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ if this is a test directory it must have a query.json file\n\t\t\/\/ Load the query\n\t\tq := &query.Query{}\n\t\tqueryBytes, err := ioutil.ReadFile(path.Join(fpath, \"query.json\"))\n\t\tif err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tt.Fatalf(\"Unable to read queryBytes for test %s.%s: %v\", testDir, info.Name(), err)\n\t\t}\n\t\tif err := json.Unmarshal([]byte(queryBytes), &q); err != nil {\n\t\t\tt.Fatalf(\"Unable to load queries for test %s.%s: %v\", testDir, info.Name(), err)\n\t\t}\n\n\t\trelFilePath, err := filepath.Rel(testsDir, fpath)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error getting relative path? Shouldn't be possible: %v\", err)\n\t\t}\n\t\tt.Run(relFilePath, func(t *testing.T) {\n\n\t\t\t\/\/ Run the query\n\t\t\tresult := router.HandleQuery(context.Background(), q)\n\n\t\t\t\/\/ Check result\n\n\t\t\t\/\/ write out results\n\t\t\tresultPath := path.Join(fpath, \"result.json\")\n\t\t\tresultBytes, _ := json.MarshalIndent(result, \"\", \" \")\n\t\t\tioutil.WriteFile(resultPath, resultBytes, 0644)\n\n\t\t\t\/\/ compare against baseline if it exists\n\t\t\tbaselinePath := path.Join(fpath, \"baseline.json\")\n\t\t\tbaselineResultBytes, err := ioutil.ReadFile(baselinePath)\n\t\t\tif err != nil {\n\t\t\t\tt.Skip(\"No baseline.json found, skipping comparison\")\n\t\t\t} else {\n\t\t\t\tbaselineResultBytes = bytes.TrimSpace(baselineResultBytes)\n\t\t\t\tresultBytes = bytes.TrimSpace(resultBytes)\n\t\t\t\tif !bytes.Equal(baselineResultBytes, resultBytes) {\n\t\t\t\t\tt.Fatalf(\"Mismatch of results and baseline!\")\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\treturn nil\n\t}\n\n\tif err := filepath.Walk(testsDir, walkFunc); err != nil {\n\t\tt.Errorf(\"Error walking: %v\", err)\n\t}\n\n}\n<commit_msg>Update tests to cleanup after themselves<commit_after>package integrationtest\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/jacksontj\/dataman\/src\/query\"\n\t\"github.com\/jacksontj\/dataman\/src\/router_node\"\n\t\"github.com\/jacksontj\/dataman\/src\/router_node\/metadata\"\n\t\"github.com\/jacksontj\/dataman\/src\/storage_node\"\n\t\"github.com\/jacksontj\/dataman\/src\/task_node\"\n)\n\ntype Data map[string]map[string][]map[string]interface{}\n\ntype Queries []*query.Query\n\n\/\/ For this we assume these are empty and we can do whatever we want to them!\nfunc RunIntegrationTests(t *testing.T, task *tasknode.TaskNode, router *routernode.RouterNode, datasource *storagenode.DatasourceInstance) {\n\n\t\/\/ Find all tests\n\tfiles, err := ioutil.ReadDir(\"tests\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, file := range files {\n\t\tif !file.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ TODO: subtest stuff\n\t\tt.Run(file.Name(), func(t *testing.T) {\n\t\t\trunIntegrationTest(file.Name(), t, task, router, datasource)\n\t\t})\n\t}\n}\n\nfunc runIntegrationTest(testDir string, t *testing.T, task *tasknode.TaskNode, router *routernode.RouterNode, datasource *storagenode.DatasourceInstance) {\n\t\/\/ Load the various files\n\tschema := make(map[string]*metadata.Database)\n\tschemaBytes, err := ioutil.ReadFile(path.Join(\"tests\", testDir, \"\/schema.json\"))\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to read schema for test %s: %v\", testDir, err)\n\t}\n\tif err := json.Unmarshal(schemaBytes, &schema); err != nil {\n\t\tt.Fatalf(\"Unable to load schema for test %s: %v\", testDir, err)\n\t}\n\n\t\/\/ Load the schema\n\tfor _, database := range schema {\n\t\tif err := task.EnsureExistsDatabase(context.Background(), database); err != nil {\n\t\t\tt.Fatalf(\"Unable to ensureSchema in test %s for database %s: %v\", testDir, database.Name, err)\n\t\t}\n\t}\n\n\t\/\/ Block the router waiting on an update from the tasknode\n\trouter.FetchMeta()\n\n\t\/\/ Load data\n\tdata := make(Data)\n\tdataString, err := ioutil.ReadFile(path.Join(\"tests\", testDir, \"\/data.json\"))\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to read data for test %s: %v\", testDir, err)\n\t}\n\tif err := json.Unmarshal([]byte(dataString), &data); err != nil {\n\t\tt.Fatalf(\"Unable to load data for test %s: %v\", testDir, err)\n\t}\n\n\tfor databaseName, collectionMap := range data {\n\t\tfor collectionName, recordList := range collectionMap {\n\t\t\tfor _, record := range recordList {\n\t\t\t\tresult := router.HandleQuery(context.Background(), &query.Query{\n\t\t\t\t\tType: query.Insert,\n\t\t\t\t\tArgs: map[string]interface{}{\n\t\t\t\t\t\t\"db\": databaseName,\n\t\t\t\t\t\t\"collection\": collectionName,\n\t\t\t\t\t\t\"record\": record,\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\tif result.ValidationError != nil {\n\t\t\t\t\tt.Fatalf(\"Valdiation error loading data into %s.%s: %v\", databaseName, collectionName, result.ValidationError)\n\t\t\t\t}\n\t\t\t\tif result.Error != \"\" {\n\t\t\t\t\tt.Fatalf(\"Error loading data into %s.%s: %v\", databaseName, collectionName, result.Error)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\ttestsDir := path.Join(\"tests\", testDir, \"\/\")\n\n\twalkFunc := func(fpath string, info os.FileInfo, err error) error {\n\t\t\/\/ If its a directory, skip it-- we'll let something else grab it\n\t\tif !info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ if this is a test directory it must have a query.json file\n\t\t\/\/ Load the query\n\t\tq := &query.Query{}\n\t\tqueryBytes, err := ioutil.ReadFile(path.Join(fpath, \"query.json\"))\n\t\tif err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tt.Fatalf(\"Unable to read queryBytes for test %s.%s: %v\", testDir, info.Name(), err)\n\t\t}\n\t\tif err := json.Unmarshal([]byte(queryBytes), &q); err != nil {\n\t\t\tt.Fatalf(\"Unable to load queries for test %s.%s: %v\", testDir, info.Name(), err)\n\t\t}\n\n\t\trelFilePath, err := filepath.Rel(testsDir, fpath)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error getting relative path? Shouldn't be possible: %v\", err)\n\t\t}\n\t\tt.Run(relFilePath, func(t *testing.T) {\n\n\t\t\t\/\/ Run the query\n\t\t\tresult := router.HandleQuery(context.Background(), q)\n\n\t\t\t\/\/ Check result\n\n\t\t\t\/\/ write out results\n\t\t\tresultPath := path.Join(fpath, \"result.json\")\n\t\t\tresultBytes, _ := json.MarshalIndent(result, \"\", \" \")\n\t\t\tioutil.WriteFile(resultPath, resultBytes, 0644)\n\n\t\t\t\/\/ compare against baseline if it exists\n\t\t\tbaselinePath := path.Join(fpath, \"baseline.json\")\n\t\t\tbaselineResultBytes, err := ioutil.ReadFile(baselinePath)\n\t\t\tif err != nil {\n\t\t\t\tt.Skip(\"No baseline.json found, skipping comparison\")\n\t\t\t} else {\n\t\t\t\tbaselineResultBytes = bytes.TrimSpace(baselineResultBytes)\n\t\t\t\tresultBytes = bytes.TrimSpace(resultBytes)\n\t\t\t\tif !bytes.Equal(baselineResultBytes, resultBytes) {\n\t\t\t\t\tt.Fatalf(\"Mismatch of results and baseline!\")\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\treturn nil\n\t}\n\n\tif err := filepath.Walk(testsDir, walkFunc); err != nil {\n\t\tt.Errorf(\"Error walking: %v\", err)\n\t}\n\n\t\/\/ Assuming we finished properly, lets remove the things we added\n\tfor _, database := range schema {\n\t\tif err := task.EnsureDoesntExistDatabase(context.Background(), database.Name); err != nil {\n\t\t\tt.Fatalf(\"Unable to remove in test %s for database %s: %v\", testDir, database.Name, err)\n\t\t}\n\t}\n\n\t\/\/ Block the router waiting on an update from the tasknode\n\trouter.FetchMeta()\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage ssh\n\nimport (\n\t\"bytes\"\n\t\"crypto\"\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"testing\"\n)\n\nconst _pem = `-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA19lGVsTqIT5iiNYRgnoY1CwkbETW5cq+Rzk5v\/kTlf31XpSU\n70HVWkbTERECjaYdXM2gGcbb+sxpq6GtXf1M3kVomycqhxwhPv4Cr6Xp4WT\/jkFx\n9z+FFzpeodGJWjOH6L2H5uX1Cvr9EDdQp9t9\/J32\/qBFntY8GwoUI\/y\/1MSTmMiF\ntupdMODN064vd3gyMKTwrlQ8tZM6aYuyOPsutLlUY7M5x5FwMDYvnPDSeyT\/Iw0z\ns3B+NCyqeeMd2T7YzQFnRATj0M7rM5LoSs7DVqVriOEABssFyLj31PboaoLhOKgc\nqoM9khkNzr7FHVvi+DhYM2jD0DwvqZLN6NmnLwIDAQABAoIBAQCGVj+kuSFOV1lT\n+IclQYA6bM6uY5mroqcSBNegVxCNhWU03BxlW\/\/BE9tA\/+kq53vWylMeN9mpGZea\nriEMIh25KFGWXqXlOOioH8bkMsqA8S7sBmc7jljyv+0toQ9vCCtJ+sueNPhxQQxH\nD2YvUjfzBQ04I9+wn30BByDJ1QA\/FoPsunxIOUCcRBE\/7jxuLYcpR+JvEF68yYIh\natXRld4W4in7T65YDR8jK1Uj9XAcNeDYNpT\/M6oFLx1aPIlkG86aCWRO19S1jLPT\nb1ZAKHHxPMCVkSYW0RqvIgLXQOR62D0Zne6\/2wtzJkk5UCjkSQ2z7ZzJpMkWgDgN\nifCULFPBAoGBAPoMZ5q1w+zB+knXUD33n1J+niN6TZHJulpf2w5zsW+m2K6Zn62M\nMXndXlVAHtk6p02q9kxHdgov34Uo8VpuNjbS1+abGFTI8NZgFo+bsDxJdItemwC4\nKJ7L1iz39hRN\/ZylMRLz5uTYRGddCkeIHhiG2h7zohH\/MaYzUacXEEy3AoGBANz8\ne\/msleB+iXC0cXKwds26N4hyMdAFE5qAqJXvV3S2W8JZnmU+sS7vPAWMYPlERPk1\nD8Q2eXqdPIkAWBhrx4RxD7rNc5qFNcQWEhCIxC9fccluH1y5g2M+4jpMX2CT8Uv+\n3z+NoJ5uDTXZTnLCfoZzgZ4nCZVZ+6iU5U1+YXFJAoGBANLPpIV920n\/nJmmquMj\norI1R\/QXR9Cy56cMC65agezlGOfTYxk5Cfl5Ve+\/2IJCfgzwJyjWUsFx7RviEeGw\n64o7JoUom1HX+5xxdHPsyZ96OoTJ5RqtKKoApnhRMamau0fWydH1yeOEJd+TRHhc\nXStGfhz8QNa1dVFvENczja1vAoGABGWhsd4VPVpHMc7lUvrf4kgKQtTC2PjA4xoc\nQJ96hf\/642sVE76jl+N6tkGMzGjnVm4P2j+bOy1VvwQavKGoXqJBRd5Apppv727g\n\/SM7hBXKFc\/zH80xKBBgP\/i1DR7kdjakCoeu4ngeGywvu2jTS6mQsqzkK+yWbUxJ\nI7mYBsECgYB\/KNXlTEpXtz\/kwWCHFSYA8U74l7zZbVD8ul0e56JDK+lLcJ0tJffk\ngqnBycHj6AhEycjda75cs+0zybZvN4x65KZHOGW\/O\/7OAWEcZP5TPb3zf9ned3Hl\nNsZoFj52ponUM6+99A2CmezFCN16c4mbA\/\/luWF+k3VVqR6BpkrhKw==\n-----END RSA PRIVATE KEY-----`\n\n\/\/ reused internally by tests\nvar serverConfig = new(ServerConfig)\n\nfunc init() {\n\tif err := serverConfig.SetRSAPrivateKey([]byte(_pem)); err != nil {\n\t\tpanic(\"unable to set private key: \" + err.Error())\n\t}\n}\n\n\/\/ keychain implements the ClientPublickey interface\ntype keychain struct {\n\tkeys []*rsa.PrivateKey\n}\n\nfunc (k *keychain) Key(i int) (interface{}, error) {\n\tif i < 0 || i >= len(k.keys) {\n\t\treturn nil, nil\n\t}\n\treturn k.keys[i].PublicKey, nil\n}\n\nfunc (k *keychain) Sign(i int, rand io.Reader, data []byte) (sig []byte, err error) {\n\thashFunc := crypto.SHA1\n\th := hashFunc.New()\n\th.Write(data)\n\tdigest := h.Sum()\n\treturn rsa.SignPKCS1v15(rand, k.keys[i], hashFunc, digest)\n}\n\nfunc (k *keychain) loadPEM(file string) error {\n\tbuf, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tblock, _ := pem.Decode(buf)\n\tif block == nil {\n\t\treturn errors.New(\"ssh: no key found\")\n\t}\n\tr, err := x509.ParsePKCS1PrivateKey(block.Bytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\tk.keys = append(k.keys, r)\n\treturn nil\n}\n\nvar pkey *rsa.PrivateKey\n\nfunc init() {\n\tvar err error\n\tpkey, err = rsa.GenerateKey(rand.Reader, 512)\n\tif err != nil {\n\t\tpanic(\"unable to generate public key\")\n\t}\n}\n\nfunc TestClientAuthPublickey(t *testing.T) {\n\tk := new(keychain)\n\tk.keys = append(k.keys, pkey)\n\n\tserverConfig.PubKeyCallback = func(user, algo string, pubkey []byte) bool {\n\t\texpected := []byte(serializePublickey(k.keys[0].PublicKey))\n\t\talgoname := algoName(k.keys[0].PublicKey)\n\t\treturn user == \"testuser\" && algo == algoname && bytes.Equal(pubkey, expected)\n\t}\n\tserverConfig.PasswordCallback = nil\n\n\tl, err := Listen(\"tcp\", \"127.0.0.1:0\", serverConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to listen: %s\", err)\n\t}\n\tdefer l.Close()\n\n\tdone := make(chan bool, 1)\n\tgo func() {\n\t\tc, err := l.Accept()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c.Close()\n\t\tif err := c.Handshake(); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tdone <- true\n\t}()\n\n\tconfig := &ClientConfig{\n\t\tUser: \"testuser\",\n\t\tAuth: []ClientAuth{\n\t\t\tClientAuthPublickey(k),\n\t\t},\n\t}\n\n\tc, err := Dial(\"tcp\", l.Addr().String(), config)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to dial remote side: %s\", err)\n\t}\n\tdefer c.Close()\n\t<-done\n}\n\n\/\/ password implements the ClientPassword interface\ntype password string\n\nfunc (p password) Password(user string) (string, error) {\n\treturn string(p), nil\n}\n\nfunc TestClientAuthPassword(t *testing.T) {\n\tpw := password(\"tiger\")\n\n\tserverConfig.PasswordCallback = func(user, pass string) bool {\n\t\treturn user == \"testuser\" && pass == string(pw)\n\t}\n\tserverConfig.PubKeyCallback = nil\n\n\tl, err := Listen(\"tcp\", \"0.0.0.0:0\", serverConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to listen: %s\", err)\n\t}\n\tdefer l.Close()\n\n\tdone := make(chan bool)\n\tgo func() {\n\t\tc, err := l.Accept()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := c.Handshake(); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tdefer c.Close()\n\t\tdone <- true\n\t}()\n\n\tconfig := &ClientConfig{\n\t\tUser: \"testuser\",\n\t\tAuth: []ClientAuth{\n\t\t\tClientAuthPassword(pw),\n\t\t},\n\t}\n\n\tc, err := Dial(\"tcp\", l.Addr().String(), config)\n\tif err != nil {\n\t\tt.Errorf(\"unable to dial remote side: %s\", err)\n\t}\n\tdefer c.Close()\n\t<-done\n}\n\nfunc TestClientAuthPasswordAndPublickey(t *testing.T) {\n\tpw := password(\"tiger\")\n\n\tserverConfig.PasswordCallback = func(user, pass string) bool {\n\t\treturn user == \"testuser\" && pass == string(pw)\n\t}\n\n\tk := new(keychain)\n\tk.keys = append(k.keys, pkey)\n\n\tserverConfig.PubKeyCallback = func(user, algo string, pubkey []byte) bool {\n\t\texpected := []byte(serializePublickey(k.keys[0].PublicKey))\n\t\talgoname := algoName(k.keys[0].PublicKey)\n\t\treturn user == \"testuser\" && algo == algoname && bytes.Equal(pubkey, expected)\n\t}\n\n\tl, err := Listen(\"tcp\", \"0.0.0.0:0\", serverConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to listen: %s\", err)\n\t}\n\tdefer l.Close()\n\n\tdone := make(chan bool)\n\tgo func() {\n\t\tc, err := l.Accept()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := c.Handshake(); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tdefer c.Close()\n\t\tdone <- true\n\t}()\n\n\twrongPw := password(\"wrong\")\n\tconfig := &ClientConfig{\n\t\tUser: \"testuser\",\n\t\tAuth: []ClientAuth{\n\t\t\tClientAuthPassword(wrongPw),\n\t\t\tClientAuthPublickey(k),\n\t\t},\n\t}\n\n\tc, err := Dial(\"tcp\", l.Addr().String(), config)\n\tif err != nil {\n\t\tt.Errorf(\"unable to dial remote side: %s\", err)\n\t}\n\tdefer c.Close()\n\t<-done\n}\n<commit_msg>exp\/ssh: change test listen address, also exit test if fails<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage ssh\n\nimport (\n\t\"bytes\"\n\t\"crypto\"\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"testing\"\n)\n\nconst _pem = `-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA19lGVsTqIT5iiNYRgnoY1CwkbETW5cq+Rzk5v\/kTlf31XpSU\n70HVWkbTERECjaYdXM2gGcbb+sxpq6GtXf1M3kVomycqhxwhPv4Cr6Xp4WT\/jkFx\n9z+FFzpeodGJWjOH6L2H5uX1Cvr9EDdQp9t9\/J32\/qBFntY8GwoUI\/y\/1MSTmMiF\ntupdMODN064vd3gyMKTwrlQ8tZM6aYuyOPsutLlUY7M5x5FwMDYvnPDSeyT\/Iw0z\ns3B+NCyqeeMd2T7YzQFnRATj0M7rM5LoSs7DVqVriOEABssFyLj31PboaoLhOKgc\nqoM9khkNzr7FHVvi+DhYM2jD0DwvqZLN6NmnLwIDAQABAoIBAQCGVj+kuSFOV1lT\n+IclQYA6bM6uY5mroqcSBNegVxCNhWU03BxlW\/\/BE9tA\/+kq53vWylMeN9mpGZea\nriEMIh25KFGWXqXlOOioH8bkMsqA8S7sBmc7jljyv+0toQ9vCCtJ+sueNPhxQQxH\nD2YvUjfzBQ04I9+wn30BByDJ1QA\/FoPsunxIOUCcRBE\/7jxuLYcpR+JvEF68yYIh\natXRld4W4in7T65YDR8jK1Uj9XAcNeDYNpT\/M6oFLx1aPIlkG86aCWRO19S1jLPT\nb1ZAKHHxPMCVkSYW0RqvIgLXQOR62D0Zne6\/2wtzJkk5UCjkSQ2z7ZzJpMkWgDgN\nifCULFPBAoGBAPoMZ5q1w+zB+knXUD33n1J+niN6TZHJulpf2w5zsW+m2K6Zn62M\nMXndXlVAHtk6p02q9kxHdgov34Uo8VpuNjbS1+abGFTI8NZgFo+bsDxJdItemwC4\nKJ7L1iz39hRN\/ZylMRLz5uTYRGddCkeIHhiG2h7zohH\/MaYzUacXEEy3AoGBANz8\ne\/msleB+iXC0cXKwds26N4hyMdAFE5qAqJXvV3S2W8JZnmU+sS7vPAWMYPlERPk1\nD8Q2eXqdPIkAWBhrx4RxD7rNc5qFNcQWEhCIxC9fccluH1y5g2M+4jpMX2CT8Uv+\n3z+NoJ5uDTXZTnLCfoZzgZ4nCZVZ+6iU5U1+YXFJAoGBANLPpIV920n\/nJmmquMj\norI1R\/QXR9Cy56cMC65agezlGOfTYxk5Cfl5Ve+\/2IJCfgzwJyjWUsFx7RviEeGw\n64o7JoUom1HX+5xxdHPsyZ96OoTJ5RqtKKoApnhRMamau0fWydH1yeOEJd+TRHhc\nXStGfhz8QNa1dVFvENczja1vAoGABGWhsd4VPVpHMc7lUvrf4kgKQtTC2PjA4xoc\nQJ96hf\/642sVE76jl+N6tkGMzGjnVm4P2j+bOy1VvwQavKGoXqJBRd5Apppv727g\n\/SM7hBXKFc\/zH80xKBBgP\/i1DR7kdjakCoeu4ngeGywvu2jTS6mQsqzkK+yWbUxJ\nI7mYBsECgYB\/KNXlTEpXtz\/kwWCHFSYA8U74l7zZbVD8ul0e56JDK+lLcJ0tJffk\ngqnBycHj6AhEycjda75cs+0zybZvN4x65KZHOGW\/O\/7OAWEcZP5TPb3zf9ned3Hl\nNsZoFj52ponUM6+99A2CmezFCN16c4mbA\/\/luWF+k3VVqR6BpkrhKw==\n-----END RSA PRIVATE KEY-----`\n\n\/\/ reused internally by tests\nvar serverConfig = new(ServerConfig)\n\nfunc init() {\n\tif err := serverConfig.SetRSAPrivateKey([]byte(_pem)); err != nil {\n\t\tpanic(\"unable to set private key: \" + err.Error())\n\t}\n}\n\n\/\/ keychain implements the ClientPublickey interface\ntype keychain struct {\n\tkeys []*rsa.PrivateKey\n}\n\nfunc (k *keychain) Key(i int) (interface{}, error) {\n\tif i < 0 || i >= len(k.keys) {\n\t\treturn nil, nil\n\t}\n\treturn k.keys[i].PublicKey, nil\n}\n\nfunc (k *keychain) Sign(i int, rand io.Reader, data []byte) (sig []byte, err error) {\n\thashFunc := crypto.SHA1\n\th := hashFunc.New()\n\th.Write(data)\n\tdigest := h.Sum()\n\treturn rsa.SignPKCS1v15(rand, k.keys[i], hashFunc, digest)\n}\n\nfunc (k *keychain) loadPEM(file string) error {\n\tbuf, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tblock, _ := pem.Decode(buf)\n\tif block == nil {\n\t\treturn errors.New(\"ssh: no key found\")\n\t}\n\tr, err := x509.ParsePKCS1PrivateKey(block.Bytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\tk.keys = append(k.keys, r)\n\treturn nil\n}\n\nvar pkey *rsa.PrivateKey\n\nfunc init() {\n\tvar err error\n\tpkey, err = rsa.GenerateKey(rand.Reader, 512)\n\tif err != nil {\n\t\tpanic(\"unable to generate public key\")\n\t}\n}\n\nfunc TestClientAuthPublickey(t *testing.T) {\n\tk := new(keychain)\n\tk.keys = append(k.keys, pkey)\n\n\tserverConfig.PubKeyCallback = func(user, algo string, pubkey []byte) bool {\n\t\texpected := []byte(serializePublickey(k.keys[0].PublicKey))\n\t\talgoname := algoName(k.keys[0].PublicKey)\n\t\treturn user == \"testuser\" && algo == algoname && bytes.Equal(pubkey, expected)\n\t}\n\tserverConfig.PasswordCallback = nil\n\n\tl, err := Listen(\"tcp\", \"127.0.0.1:0\", serverConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to listen: %s\", err)\n\t}\n\tdefer l.Close()\n\n\tdone := make(chan bool, 1)\n\tgo func() {\n\t\tc, err := l.Accept()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c.Close()\n\t\tif err := c.Handshake(); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tdone <- true\n\t}()\n\n\tconfig := &ClientConfig{\n\t\tUser: \"testuser\",\n\t\tAuth: []ClientAuth{\n\t\t\tClientAuthPublickey(k),\n\t\t},\n\t}\n\n\tc, err := Dial(\"tcp\", l.Addr().String(), config)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to dial remote side: %s\", err)\n\t}\n\tdefer c.Close()\n\t<-done\n}\n\n\/\/ password implements the ClientPassword interface\ntype password string\n\nfunc (p password) Password(user string) (string, error) {\n\treturn string(p), nil\n}\n\nfunc TestClientAuthPassword(t *testing.T) {\n\tpw := password(\"tiger\")\n\n\tserverConfig.PasswordCallback = func(user, pass string) bool {\n\t\treturn user == \"testuser\" && pass == string(pw)\n\t}\n\tserverConfig.PubKeyCallback = nil\n\n\tl, err := Listen(\"tcp\", \"127.0.0.1:0\", serverConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to listen: %s\", err)\n\t}\n\tdefer l.Close()\n\n\tdone := make(chan bool)\n\tgo func() {\n\t\tc, err := l.Accept()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := c.Handshake(); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tdefer c.Close()\n\t\tdone <- true\n\t}()\n\n\tconfig := &ClientConfig{\n\t\tUser: \"testuser\",\n\t\tAuth: []ClientAuth{\n\t\t\tClientAuthPassword(pw),\n\t\t},\n\t}\n\n\tc, err := Dial(\"tcp\", l.Addr().String(), config)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to dial remote side: %s\", err)\n\t}\n\tdefer c.Close()\n\t<-done\n}\n\nfunc TestClientAuthPasswordAndPublickey(t *testing.T) {\n\tpw := password(\"tiger\")\n\n\tserverConfig.PasswordCallback = func(user, pass string) bool {\n\t\treturn user == \"testuser\" && pass == string(pw)\n\t}\n\n\tk := new(keychain)\n\tk.keys = append(k.keys, pkey)\n\n\tserverConfig.PubKeyCallback = func(user, algo string, pubkey []byte) bool {\n\t\texpected := []byte(serializePublickey(k.keys[0].PublicKey))\n\t\talgoname := algoName(k.keys[0].PublicKey)\n\t\treturn user == \"testuser\" && algo == algoname && bytes.Equal(pubkey, expected)\n\t}\n\n\tl, err := Listen(\"tcp\", \"127.0.0.1:0\", serverConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to listen: %s\", err)\n\t}\n\tdefer l.Close()\n\n\tdone := make(chan bool)\n\tgo func() {\n\t\tc, err := l.Accept()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := c.Handshake(); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tdefer c.Close()\n\t\tdone <- true\n\t}()\n\n\twrongPw := password(\"wrong\")\n\tconfig := &ClientConfig{\n\t\tUser: \"testuser\",\n\t\tAuth: []ClientAuth{\n\t\t\tClientAuthPassword(wrongPw),\n\t\t\tClientAuthPublickey(k),\n\t\t},\n\t}\n\n\tc, err := Dial(\"tcp\", l.Addr().String(), config)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to dial remote side: %s\", err)\n\t}\n\tdefer c.Close()\n\t<-done\n}\n<|endoftext|>"} {"text":"<commit_before>package cmds\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/config\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/errors\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/transaction\"\n)\n\n\/\/ getActiveTransaction will read the active transaction from the config file\n\/\/ (if it exists) and return it. If the config file is uninitialized or the\n\/\/ active transaction is unset, `nil` will be returned.\nfunc getActiveTransaction() (*transaction.Transaction, error) {\n\tcfg, err := config.Read(false)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"error reading Pachyderm config:\")\n\t}\n\t_, context, err := cfg.ActiveContext()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"error getting the active context\")\n\t}\n\tif context.ActiveTransaction == \"\" {\n\t\treturn nil, nil\n\t}\n\treturn &transaction.Transaction{ID: context.ActiveTransaction}, nil\n}\n\nfunc requireActiveTransaction() (*transaction.Transaction, error) {\n\ttxn, err := getActiveTransaction()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if txn == nil {\n\t\treturn nil, errors.Errorf(\"no active transaction\")\n\t}\n\treturn txn, nil\n}\n\nfunc setActiveTransaction(txn *transaction.Transaction) error {\n\tcfg, err := config.Read(false)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"error reading Pachyderm config\")\n\t}\n\t_, context, err := cfg.ActiveContext()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"error getting the active context:\")\n\t}\n\tif txn == nil {\n\t\tcontext.ActiveTransaction = \"\"\n\t} else {\n\t\tcontext.ActiveTransaction = txn.ID\n\t}\n\tif err := cfg.Write(); err != nil {\n\t\treturn errors.Wrapf(err, \"error writing Pachyderm config:\")\n\t}\n\treturn nil\n}\n\n\/\/ ClearActiveTransaction will remove the active transaction from the pachctl\n\/\/ config file - used by the 'delete all' command.\nfunc ClearActiveTransaction() error {\n\treturn setActiveTransaction(nil)\n}\n\n\/\/ WithActiveTransaction is a helper function that will attach the given\n\/\/ transaction to the given client (resulting in a new client), then run the\n\/\/ given callback with the new client. This is for executing RPCs that can be\n\/\/ run inside a transaction - if this isn't supported, it will have no effect.\nfunc WithActiveTransaction(c *client.APIClient, callback func(*client.APIClient) error) error {\n\ttxn, err := getActiveTransaction()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif txn != nil {\n\t\tc = c.WithTransaction(txn)\n\t}\n\terr = callback(c)\n\tif err == nil && txn != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Added to transaction: %s\\n\", txn.ID)\n\t}\n\treturn err\n}\n<commit_msg>Update src\/server\/transaction\/cmds\/util.go<commit_after>package cmds\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/config\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/errors\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/transaction\"\n)\n\n\/\/ getActiveTransaction will read the active transaction from the config file\n\/\/ (if it exists) and return it. If the config file is uninitialized or the\n\/\/ active transaction is unset, `nil` will be returned.\nfunc getActiveTransaction() (*transaction.Transaction, error) {\n\tcfg, err := config.Read(false)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"error reading Pachyderm config:\")\n\t}\n\t_, context, err := cfg.ActiveContext()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"error getting the active context\")\n\t}\n\tif context.ActiveTransaction == \"\" {\n\t\treturn nil, nil\n\t}\n\treturn &transaction.Transaction{ID: context.ActiveTransaction}, nil\n}\n\nfunc requireActiveTransaction() (*transaction.Transaction, error) {\n\ttxn, err := getActiveTransaction()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if txn == nil {\n\t\treturn nil, errors.Errorf(\"no active transaction\")\n\t}\n\treturn txn, nil\n}\n\nfunc setActiveTransaction(txn *transaction.Transaction) error {\n\tcfg, err := config.Read(false)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"error reading Pachyderm config\")\n\t}\n\t_, context, err := cfg.ActiveContext()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"error getting the active context\")\n\t}\n\tif txn == nil {\n\t\tcontext.ActiveTransaction = \"\"\n\t} else {\n\t\tcontext.ActiveTransaction = txn.ID\n\t}\n\tif err := cfg.Write(); err != nil {\n\t\treturn errors.Wrapf(err, \"error writing Pachyderm config:\")\n\t}\n\treturn nil\n}\n\n\/\/ ClearActiveTransaction will remove the active transaction from the pachctl\n\/\/ config file - used by the 'delete all' command.\nfunc ClearActiveTransaction() error {\n\treturn setActiveTransaction(nil)\n}\n\n\/\/ WithActiveTransaction is a helper function that will attach the given\n\/\/ transaction to the given client (resulting in a new client), then run the\n\/\/ given callback with the new client. This is for executing RPCs that can be\n\/\/ run inside a transaction - if this isn't supported, it will have no effect.\nfunc WithActiveTransaction(c *client.APIClient, callback func(*client.APIClient) error) error {\n\ttxn, err := getActiveTransaction()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif txn != nil {\n\t\tc = c.WithTransaction(txn)\n\t}\n\terr = callback(c)\n\tif err == nil && txn != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Added to transaction: %s\\n\", txn.ID)\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\/\/\n\/\/ Author: Tobias Schottdorf (tobias.schottdorf@gmail.com)\n\npackage storage\n\nimport (\n\t\"math\/rand\"\n\t\"testing\"\n\n\t\"github.com\/coreos\/etcd\/raft\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/cockroachdb\/cockroach\/config\"\n\t\"github.com\/cockroachdb\/cockroach\/internal\/client\"\n\t\"github.com\/cockroachdb\/cockroach\/keys\"\n\t\"github.com\/cockroachdb\/cockroach\/roachpb\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/leaktest\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/randutil\"\n)\n\nconst rangeID = 1\nconst keySize = 1 << 7 \/\/ 128 B\nconst valSize = 1 << 10 \/\/ 1 KiB\n\nfunc fillTestRange(t testing.TB, rep *Replica, size int64) {\n\tsrc := rand.New(rand.NewSource(0))\n\tfor i := int64(0); i < size\/int64(keySize+valSize); i++ {\n\t\tkey := keys.MakeRowSentinelKey(randutil.RandBytes(src, keySize))\n\t\tval := randutil.RandBytes(src, valSize)\n\t\tpArgs := putArgs(key, val)\n\t\tif _, pErr := client.SendWrappedWith(rep, nil, roachpb.Header{\n\t\t\tRangeID: rangeID,\n\t\t}, &pArgs); pErr != nil {\n\t\t\tt.Fatal(pErr)\n\t\t}\n\t}\n\trep.mu.Lock()\n\tafter := rep.mu.state.Stats.Total()\n\trep.mu.Unlock()\n\tif after < size {\n\t\tt.Fatalf(\"range not full after filling: wrote %d, but range at %d\", size, after)\n\t}\n}\n\nfunc TestSkipLargeReplicaSnapshot(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tstore, _, stopper := createTestStore(t)\n\tstore.ctx.TestingKnobs.DisableSplitQueue = true\n\t\/\/ We want to manually control the size of the raft log.\n\tdefer stopper.Stop()\n\n\tconst snapSize = 1 << 20 \/\/ 1 MiB\n\tcfg := config.DefaultZoneConfig()\n\tcfg.RangeMaxBytes = snapSize\n\tdefer config.TestingSetDefaultZoneConfig(cfg)()\n\n\trep, err := store.GetReplica(rangeID)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trep.SetMaxBytes(snapSize)\n\n\tif pErr := rep.redirectOnOrAcquireLease(context.Background()); pErr != nil {\n\t\tt.Fatal(pErr)\n\t}\n\n\tfillTestRange(t, rep, snapSize)\n\n\tif _, err := rep.GetSnapshot(context.Background()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfillTestRange(t, rep, snapSize*2)\n\n\tif _, err := rep.Snapshot(); err != raft.ErrSnapshotTemporarilyUnavailable {\n\t\trep.mu.Lock()\n\t\tafter := rep.mu.state.Stats.Total()\n\t\trep.mu.Unlock()\n\t\tt.Fatalf(\n\t\t\t\"snapshot of a very large range (%d \/ %d, needsSplit: %v, exceeds snap limit: %v) should fail but got %v\",\n\t\t\tafter, rep.GetMaxBytes(),\n\t\t\trep.needsSplitBySize(), rep.exceedsDoubleSplitSizeLocked(), err,\n\t\t)\n\t}\n}\n<commit_msg>storage: actually disable splits in TestSkipLargeReplicaSnapshot<commit_after>\/\/ Copyright 2016 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\/\/\n\/\/ Author: Tobias Schottdorf (tobias.schottdorf@gmail.com)\n\npackage storage\n\nimport (\n\t\"math\/rand\"\n\t\"testing\"\n\n\t\"github.com\/coreos\/etcd\/raft\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/cockroachdb\/cockroach\/config\"\n\t\"github.com\/cockroachdb\/cockroach\/internal\/client\"\n\t\"github.com\/cockroachdb\/cockroach\/keys\"\n\t\"github.com\/cockroachdb\/cockroach\/roachpb\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/leaktest\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/randutil\"\n)\n\nconst rangeID = 1\nconst keySize = 1 << 7 \/\/ 128 B\nconst valSize = 1 << 10 \/\/ 1 KiB\n\nfunc fillTestRange(t testing.TB, rep *Replica, size int64) {\n\tsrc := rand.New(rand.NewSource(0))\n\tfor i := int64(0); i < size\/int64(keySize+valSize); i++ {\n\t\tkey := keys.MakeRowSentinelKey(randutil.RandBytes(src, keySize))\n\t\tval := randutil.RandBytes(src, valSize)\n\t\tpArgs := putArgs(key, val)\n\t\tif _, pErr := client.SendWrappedWith(rep, nil, roachpb.Header{\n\t\t\tRangeID: rangeID,\n\t\t}, &pArgs); pErr != nil {\n\t\t\tt.Fatal(pErr)\n\t\t}\n\t}\n\trep.mu.Lock()\n\tafter := rep.mu.state.Stats.Total()\n\trep.mu.Unlock()\n\tif after < size {\n\t\tt.Fatalf(\"range not full after filling: wrote %d, but range at %d\", size, after)\n\t}\n}\n\nfunc TestSkipLargeReplicaSnapshot(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tsCtx := TestStoreContext()\n\tsCtx.TestingKnobs.DisableSplitQueue = true\n\tstore, _, stopper := createTestStoreWithContext(t, &sCtx)\n\tdefer stopper.Stop()\n\n\tconst snapSize = 1 << 20 \/\/ 1 MiB\n\tcfg := config.DefaultZoneConfig()\n\tcfg.RangeMaxBytes = snapSize\n\tdefer config.TestingSetDefaultZoneConfig(cfg)()\n\n\trep, err := store.GetReplica(rangeID)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trep.SetMaxBytes(snapSize)\n\n\tif pErr := rep.redirectOnOrAcquireLease(context.Background()); pErr != nil {\n\t\tt.Fatal(pErr)\n\t}\n\n\tfillTestRange(t, rep, snapSize)\n\n\tif _, err := rep.GetSnapshot(context.Background()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfillTestRange(t, rep, snapSize*2)\n\n\tif _, err := rep.Snapshot(); err != raft.ErrSnapshotTemporarilyUnavailable {\n\t\trep.mu.Lock()\n\t\tafter := rep.mu.state.Stats.Total()\n\t\trep.mu.Unlock()\n\t\tt.Fatalf(\n\t\t\t\"snapshot of a very large range (%d \/ %d, needsSplit: %v, exceeds snap limit: %v) should fail but got %v\",\n\t\t\tafter, rep.GetMaxBytes(),\n\t\t\trep.needsSplitBySize(), rep.exceedsDoubleSplitSizeLocked(), err,\n\t\t)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright (C) 2013 Lucy\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand\/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.package main\n*\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"math\/cmplx\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/jackvalmadre\/go-fftw\"\n\tflag \"github.com\/neeee\/pflag\"\n\t\"github.com\/neeee\/termbox-go\"\n)\n\nvar (\n\tcolor = flag.StringP(\"color\", \"c\", \"default\", \"Color to use\")\n\tdim = flag.BoolP(\"dim\", \"d\", false,\n\t\t\"Turn off bright colors where possible\")\n\n\ttick = flag.DurationP(\"tick\", \"t\", 0,\n\t\t\"Min time to spend on frames. Not supported for lines.\")\n\n\tstep = flag.Int(\"step\", 2, \"Samples for each step (wave\/lines)\")\n\tscale = flag.Float64(\"scale\", 2, \"Scale divisor (spectrum)\")\n\n\ticolor = flag.BoolP(\"icolor\", \"i\", false,\n\t\t\"Color bars according to intensity (spectrum\/lines)\")\n\timode = flag.String(\"imode\", \"dumb\",\n\t\t\"Mode for colorisation (dumb, 256 or grayscale)\")\n\n\tfilename = flag.StringP(\"file\", \"f\", \"\/tmp\/mpd.fifo\",\n\t\t\"Where to read pcm data from\")\n\tvis = flag.StringP(\"viz\", \"v\", \"wave\",\n\t\t\"Visualisation (spectrum, wave or lines)\")\n)\n\nvar colors = map[string]termbox.Attribute{\n\t\"default\": termbox.ColorDefault,\n\t\"black\": termbox.ColorBlack,\n\t\"red\": termbox.ColorRed,\n\t\"green\": termbox.ColorGreen,\n\t\"yellow\": termbox.ColorYellow,\n\t\"blue\": termbox.ColorBlue,\n\t\"magenta\": termbox.ColorMagenta,\n\t\"cyan\": termbox.ColorCyan,\n\t\"white\": termbox.ColorWhite,\n}\n\nvar iColors []termbox.Attribute\n\nvar (\n\ton = termbox.ColorDefault\n\toff = termbox.ColorDefault\n)\n\n\/\/ inaccurate ticker\nfunc ticker(d time.Duration, ch chan struct{}) {\n\tfor {\n\t\ttime.Sleep(d)\n\t\tch <- struct{}{}\n\t}\n}\n\nvar tickc = make(chan struct{})\n\nfunc warn(format string, args ...interface{}) {\n\tfmt.Fprintf(os.Stderr, format, args...)\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif cl, ok := colors[*color]; !ok {\n\t\twarn(\"Unknown color \\\"%s\\\"\\n\", *color)\n\t\treturn\n\t} else {\n\t\ton = cl\n\t}\n\n\tif !*dim {\n\t\ton = on | termbox.AttrBold\n\t}\n\n\tswitch *imode {\n\tcase \"dumb\":\n\t\tiColors = []termbox.Attribute{\n\t\t\ttermbox.ColorBlue,\n\t\t\ttermbox.ColorCyan,\n\t\t\ttermbox.ColorGreen,\n\t\t\ttermbox.ColorYellow,\n\t\t\ttermbox.ColorRed,\n\t\t}\n\t\tif !*dim {\n\t\t\tfor i := range iColors {\n\t\t\t\tiColors[i] = iColors[i] + 8\n\t\t\t}\n\t\t}\n\tcase \"256\":\n\t\tiColors = []termbox.Attribute{\n\t\t\t21, 27, 39, 45, 51, 86, 85, 84, 82,\n\t\t\t154, 192, 220, 214, 208, 202, 196,\n\t\t}\n\tcase \"grayscale\":\n\t\tconst num = 19\n\t\tiColors = make([]termbox.Attribute, num)\n\t\tfor i := termbox.Attribute(0); i < num; i++ {\n\t\t\tiColors[i] = i + 255 - num\n\t\t}\n\tdefault:\n\t\twarn(\"Unsupported mode: \\\"%s\\\"\\n\", *imode)\n\t\treturn\n\t}\n\n\tvar draw func(*os.File, chan bool)\n\tswitch *vis {\n\tcase \"spectrum\":\n\t\tdraw = drawSpectrum\n\tcase \"wave\":\n\t\tdraw = drawWave\n\tcase \"lines\":\n\t\tdraw = drawLines\n\tdefault:\n\t\twarn(\"Unknown visualisation \\\"%s\\\"\\n\"+\n\t\t\t\"Supported: spectrum, wave\\n\", *vis)\n\t\treturn\n\t}\n\n\tfile, err := os.Open(*filename)\n\tif err != nil {\n\t\twarn(\"%s\\n\", err)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\terr = termbox.Init()\n\tif err != nil {\n\t\twarn(\"%s\\b\", err)\n\t\treturn\n\t}\n\tdefer termbox.Close()\n\n\tif *tick == 0 {\n\t\tclose(tickc)\n\t} else {\n\t\tgo ticker(*tick, tickc)\n\t}\n\tend := make(chan bool)\n\n\t\/\/ drawer\n\tgo draw(file, end)\n\n\t\/\/ input handler\n\tgo func() {\n\t\tfor {\n\t\t\tev := termbox.PollEvent()\n\t\t\tif ev.Ch == 0 && ev.Key == termbox.KeyCtrlC {\n\t\t\t\tclose(end)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\t<-end\n}\n\nfunc size() (int, int) {\n\tw, h := termbox.Size()\n\treturn w, h * 2\n}\n\nfunc drawWave(file *os.File, end chan bool) {\n\tdefer close(end)\n\tvar (\n\t\tinRaw []int16\n\t\tilen = len(iColors) - 1\n\t\tflen = float64(ilen)\n\t\tfstep = float64(*step)\n\t)\n\tfor {\n\t\tw, h := size()\n\t\tif s := w * *step; len(inRaw) != s {\n\t\t\tinRaw = make([]int16, s)\n\t\t}\n\n\t\tif readInt16s(file, inRaw) == io.EOF {\n\t\t\treturn\n\t\t}\n\n\t\thalf_h := float64(h \/ 2)\n\t\toffset := 0\n\t\tfor pos := 0; pos < w; pos++ {\n\t\t\tvar v float64\n\t\t\tfor i := 0; i < *step; i++ {\n\t\t\t\tv += float64(inRaw[offset+i])\n\t\t\t}\n\t\t\toffset += *step\n\t\t\tv \/= fstep\n\n\t\t\tif *icolor {\n\t\t\t\ton = iColors[min(ilen, abs(int(v\/(math.MaxInt16\/flen))))]\n\t\t\t}\n\n\t\t\tvi := int(v\/(math.MaxInt16\/half_h) + half_h)\n\t\t\tif vi%2 == 0 {\n\t\t\t\ttermbox.SetCell(pos, vi\/2, '▀', on, off)\n\t\t\t} else {\n\t\t\t\ttermbox.SetCell(pos, vi\/2, '▄', on, off)\n\t\t\t}\n\t\t}\n\n\t\ttermbox.Flush()\n\t\ttermbox.Clear(off, off)\n\t\t<-tickc\n\t}\n\n}\n\ntype buffer []byte\n\nfunc (b *buffer) Read(p []byte) (n int, err error) {\n\treturn copy(p, *b), nil\n}\n\nfunc drawSpectrum(file *os.File, end chan bool) {\n\tdefer close(end)\n\tvar (\n\t\tw, h = size()\n\t\tilen = len(iColors) - 1\n\t\tflen = float64(ilen)\n\t\tsamples = (w - 1) * 2\n\t\tresn = w\n\t\tin = make([]float64, samples)\n\t\tinRaw = make([]int16, samples)\n\t\tout = fftw.Alloc1d(resn)\n\t\tplan = fftw.PlanDftR2C1d(in, out, fftw.Measure)\n\t)\n\n\tfor {\n\t\tif resn != w && w != 1 {\n\t\t\tfftw.Free1d(out)\n\t\t\tresn = w\n\t\t\tsamples = (w - 1) * 2\n\t\t\tin = make([]float64, samples)\n\t\t\tinRaw = make([]int16, samples)\n\t\t\tout = fftw.Alloc1d(resn)\n\t\t\tplan = fftw.PlanDftR2C1d(in, out, fftw.Measure)\n\t\t}\n\n\t\tif readInt16s(file, inRaw) != nil {\n\t\t\treturn\n\t\t}\n\n\t\tfor i := range inRaw {\n\t\t\tin[i] = float64(inRaw[i])\n\t\t}\n\n\t\tplan.Execute()\n\t\thFloat := float64(h)\n\t\tfor i := 0; i < w; i++ {\n\t\t\tv := cmplx.Abs(out[i]) \/ 1e5 \/ *scale\n\t\t\tif *icolor {\n\t\t\t\ton = iColors[min(ilen, int(v*(flen)))]\n\t\t\t}\n\t\t\thd := int(v * hFloat)\n\t\t\tfor j := h - 1; j > h-hd; j-- {\n\t\t\t\ttermbox.SetCell(i, j\/2, '┃', on, off)\n\t\t\t}\n\t\t\tif hd%2 != 0 {\n\t\t\t\ttermbox.SetCell(i, (h-hd)\/2, '╻', on, off)\n\t\t\t}\n\t\t}\n\n\t\ttermbox.Flush()\n\t\ttermbox.Clear(off, off)\n\t\tw, h = size()\n\n\t\t<-tickc\n\t}\n}\n\ntype pair struct{ x, y int }\n\nvar dirs = [9]pair{\n\t{1, 1}, {-1, -1},\n\t{1, -1}, {-1, 1},\n\t{1, 0}, {-1, 0},\n\t{0, 1}, {0, -1},\n\t{0, 0},\n}\n\ntype coord struct{ x, y, dir int }\n\nfunc (c *coord) step(x, y int) {\n\tc.x += dirs[c.dir].x\n\tc.y += dirs[c.dir].y\n\tc.x, c.y = mod(c.x, x), mod(c.y, y)\n}\n\nfunc drawLines(file *os.File, end chan bool) {\n\tdefer close(end)\n\tvar (\n\t\tc, bc coord\n\t\tinraw = make([]int16, *step)\n\t\thist = make([]pair, 1000)\n\t\tilen = len(iColors) - 1\n\t\tfilen = float64(ilen)\n\t)\n\n\tfor {\n\t\tif readInt16s(file, inraw) == io.EOF {\n\t\t\treturn\n\t\t}\n\n\t\tvar raw float64\n\t\tfor i := range inraw {\n\t\t\traw += float64(inraw[i])\n\t\t}\n\t\traw \/= float64(*step)\n\t\tc.dir = min(8, abs(int(raw\/(math.MaxInt16\/8))))\n\t\tbc.dir = 8 - c.dir\n\t\tif *icolor {\n\t\t\ton = iColors[min(ilen, abs(int(raw\/(math.MaxInt16\/filen))))]\n\t\t}\n\n\t\tw, h := termbox.Size()\n\t\tbc.step(w, h)\n\t\tc.step(w, h)\n\n\t\thist = append(hist[1:], pair{c.x, c.y})\n\t\ttermbox.SetCell(hist[0].x, hist[0].y, ' ',\n\t\t\ttermbox.ColorDefault,\n\t\t\ttermbox.ColorDefault)\n\t\ttermbox.SetCell(c.x, c.y, '#',\n\t\t\ton, termbox.ColorDefault)\n\t\ttermbox.Flush()\n\t\ttime.Sleep(1)\n\t}\n}\n<commit_msg>get rid of ticker<commit_after>\/*\nCopyright (C) 2013 Lucy\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand\/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.package main\n*\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"math\/cmplx\"\n\t\"os\"\n\n\t\"github.com\/jackvalmadre\/go-fftw\"\n\tflag \"github.com\/neeee\/pflag\"\n\t\"github.com\/neeee\/termbox-go\"\n)\n\nvar (\n\tcolor = flag.StringP(\"color\", \"c\", \"default\", \"Color to use\")\n\tdim = flag.BoolP(\"dim\", \"d\", false,\n\t\t\"Turn off bright colors where possible\")\n\n\tstep = flag.Int(\"step\", 2, \"Samples for each step (wave\/lines)\")\n\tscale = flag.Float64(\"scale\", 2, \"Scale divisor (spectrum)\")\n\n\ticolor = flag.BoolP(\"icolor\", \"i\", false,\n\t\t\"Color bars according to intensity (spectrum\/lines)\")\n\timode = flag.String(\"imode\", \"dumb\",\n\t\t\"Mode for colorisation (dumb, 256 or grayscale)\")\n\n\tfilename = flag.StringP(\"file\", \"f\", \"\/tmp\/mpd.fifo\",\n\t\t\"Where to read pcm data from\")\n\tvis = flag.StringP(\"viz\", \"v\", \"wave\",\n\t\t\"Visualisation (spectrum, wave or lines)\")\n)\n\nvar colors = map[string]termbox.Attribute{\n\t\"default\": termbox.ColorDefault,\n\t\"black\": termbox.ColorBlack,\n\t\"red\": termbox.ColorRed,\n\t\"green\": termbox.ColorGreen,\n\t\"yellow\": termbox.ColorYellow,\n\t\"blue\": termbox.ColorBlue,\n\t\"magenta\": termbox.ColorMagenta,\n\t\"cyan\": termbox.ColorCyan,\n\t\"white\": termbox.ColorWhite,\n}\n\nvar iColors []termbox.Attribute\n\nvar (\n\ton = termbox.ColorDefault\n\toff = termbox.ColorDefault\n)\n\nfunc warn(format string, args ...interface{}) {\n\tfmt.Fprintf(os.Stderr, format, args...)\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif cl, ok := colors[*color]; !ok {\n\t\twarn(\"Unknown color \\\"%s\\\"\\n\", *color)\n\t\treturn\n\t} else {\n\t\ton = cl\n\t}\n\n\tif !*dim {\n\t\ton = on | termbox.AttrBold\n\t}\n\n\tswitch *imode {\n\tcase \"dumb\":\n\t\tiColors = []termbox.Attribute{\n\t\t\ttermbox.ColorBlue,\n\t\t\ttermbox.ColorCyan,\n\t\t\ttermbox.ColorGreen,\n\t\t\ttermbox.ColorYellow,\n\t\t\ttermbox.ColorRed,\n\t\t}\n\t\tif !*dim {\n\t\t\tfor i := range iColors {\n\t\t\t\tiColors[i] = iColors[i] + 8\n\t\t\t}\n\t\t}\n\tcase \"256\":\n\t\tiColors = []termbox.Attribute{\n\t\t\t21, 27, 39, 45, 51, 86, 85, 84, 82,\n\t\t\t154, 192, 220, 214, 208, 202, 196,\n\t\t}\n\tcase \"grayscale\":\n\t\tconst num = 19\n\t\tiColors = make([]termbox.Attribute, num)\n\t\tfor i := termbox.Attribute(0); i < num; i++ {\n\t\t\tiColors[i] = i + 255 - num\n\t\t}\n\tdefault:\n\t\twarn(\"Unsupported mode: \\\"%s\\\"\\n\", *imode)\n\t\treturn\n\t}\n\n\tvar draw func(*os.File, chan bool)\n\tswitch *vis {\n\tcase \"spectrum\":\n\t\tdraw = drawSpectrum\n\tcase \"wave\":\n\t\tdraw = drawWave\n\tcase \"lines\":\n\t\tdraw = drawLines\n\tdefault:\n\t\twarn(\"Unknown visualisation \\\"%s\\\"\\n\"+\n\t\t\t\"Supported: spectrum, wave\\n\", *vis)\n\t\treturn\n\t}\n\n\tfile, err := os.Open(*filename)\n\tif err != nil {\n\t\twarn(\"%s\\n\", err)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\terr = termbox.Init()\n\tif err != nil {\n\t\twarn(\"%s\\b\", err)\n\t\treturn\n\t}\n\tdefer termbox.Close()\n\n\tend := make(chan bool)\n\tgo draw(file, end)\n\n\t\/\/ input handler\n\tgo func() {\n\t\tfor {\n\t\t\tev := termbox.PollEvent()\n\t\t\tif ev.Ch == 0 && ev.Key == termbox.KeyCtrlC {\n\t\t\t\tclose(end)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\t<-end\n}\n\nfunc size() (int, int) {\n\tw, h := termbox.Size()\n\treturn w, h * 2\n}\n\nfunc drawWave(file *os.File, end chan bool) {\n\tdefer close(end)\n\tvar (\n\t\tinRaw []int16\n\t\tilen = len(iColors) - 1\n\t\tflen = float64(ilen)\n\t\tfstep = float64(*step)\n\t)\n\tfor {\n\t\tw, h := size()\n\t\tif s := w * *step; len(inRaw) != s {\n\t\t\tinRaw = make([]int16, s)\n\t\t}\n\n\t\tif readInt16s(file, inRaw) == io.EOF {\n\t\t\treturn\n\t\t}\n\n\t\thalf_h := float64(h \/ 2)\n\t\toffset := 0\n\t\tfor pos := 0; pos < w; pos++ {\n\t\t\tvar v float64\n\t\t\tfor i := 0; i < *step; i++ {\n\t\t\t\tv += float64(inRaw[offset+i])\n\t\t\t}\n\t\t\toffset += *step\n\t\t\tv \/= fstep\n\n\t\t\tif *icolor {\n\t\t\t\ton = iColors[min(ilen, abs(int(v\/(math.MaxInt16\/flen))))]\n\t\t\t}\n\n\t\t\tvi := int(v\/(math.MaxInt16\/half_h) + half_h)\n\t\t\tif vi%2 == 0 {\n\t\t\t\ttermbox.SetCell(pos, vi\/2, '▀', on, off)\n\t\t\t} else {\n\t\t\t\ttermbox.SetCell(pos, vi\/2, '▄', on, off)\n\t\t\t}\n\t\t}\n\n\t\ttermbox.Flush()\n\t\ttermbox.Clear(off, off)\n\t}\n\n}\n\ntype buffer []byte\n\nfunc (b *buffer) Read(p []byte) (n int, err error) {\n\treturn copy(p, *b), nil\n}\n\nfunc drawSpectrum(file *os.File, end chan bool) {\n\tdefer close(end)\n\tvar (\n\t\tw, h = size()\n\t\tilen = len(iColors) - 1\n\t\tflen = float64(ilen)\n\t\tsamples = (w - 1) * 2\n\t\tresn = w\n\t\tin = make([]float64, samples)\n\t\tinRaw = make([]int16, samples)\n\t\tout = fftw.Alloc1d(resn)\n\t\tplan = fftw.PlanDftR2C1d(in, out, fftw.Measure)\n\t)\n\n\tfor {\n\t\tif resn != w && w != 1 {\n\t\t\tfftw.Free1d(out)\n\t\t\tresn = w\n\t\t\tsamples = (w - 1) * 2\n\t\t\tin = make([]float64, samples)\n\t\t\tinRaw = make([]int16, samples)\n\t\t\tout = fftw.Alloc1d(resn)\n\t\t\tplan = fftw.PlanDftR2C1d(in, out, fftw.Measure)\n\t\t}\n\n\t\tif readInt16s(file, inRaw) != nil {\n\t\t\treturn\n\t\t}\n\n\t\tfor i := range inRaw {\n\t\t\tin[i] = float64(inRaw[i])\n\t\t}\n\n\t\tplan.Execute()\n\t\thFloat := float64(h)\n\t\tfor i := 0; i < w; i++ {\n\t\t\tv := cmplx.Abs(out[i]) \/ 1e5 \/ *scale\n\t\t\tif *icolor {\n\t\t\t\ton = iColors[min(ilen, int(v*(flen)))]\n\t\t\t}\n\t\t\thd := int(v * hFloat)\n\t\t\tfor j := h - 1; j > h-hd; j-- {\n\t\t\t\ttermbox.SetCell(i, j\/2, '┃', on, off)\n\t\t\t}\n\t\t\tif hd%2 != 0 {\n\t\t\t\ttermbox.SetCell(i, (h-hd)\/2, '╻', on, off)\n\t\t\t}\n\t\t}\n\n\t\ttermbox.Flush()\n\t\ttermbox.Clear(off, off)\n\t\tw, h = size()\n\t}\n}\n\ntype pair struct{ x, y int }\n\nvar dirs = [9]pair{\n\t{1, 1}, {-1, -1},\n\t{1, -1}, {-1, 1},\n\t{1, 0}, {-1, 0},\n\t{0, 1}, {0, -1},\n\t{0, 0},\n}\n\ntype coord struct{ x, y, dir int }\n\nfunc (c *coord) step(x, y int) {\n\tc.x += dirs[c.dir].x\n\tc.y += dirs[c.dir].y\n\tc.x, c.y = mod(c.x, x), mod(c.y, y)\n}\n\nfunc drawLines(file *os.File, end chan bool) {\n\tdefer close(end)\n\tvar (\n\t\tc, bc coord\n\t\tinraw = make([]int16, *step)\n\t\thist = make([]pair, 1000)\n\t\tilen = len(iColors) - 1\n\t\tfilen = float64(ilen)\n\t)\n\n\tfor {\n\t\tif readInt16s(file, inraw) == io.EOF {\n\t\t\treturn\n\t\t}\n\n\t\tvar raw float64\n\t\tfor i := range inraw {\n\t\t\traw += float64(inraw[i])\n\t\t}\n\t\traw \/= float64(*step)\n\t\tc.dir = min(8, abs(int(raw\/(math.MaxInt16\/8))))\n\t\tbc.dir = 8 - c.dir\n\t\tif *icolor {\n\t\t\ton = iColors[min(ilen, abs(int(raw\/(math.MaxInt16\/filen))))]\n\t\t}\n\n\t\tw, h := termbox.Size()\n\t\tbc.step(w, h)\n\t\tc.step(w, h)\n\n\t\thist = append(hist[1:], pair{c.x, c.y})\n\t\ttermbox.SetCell(hist[0].x, hist[0].y, ' ',\n\t\t\ttermbox.ColorDefault,\n\t\t\ttermbox.ColorDefault)\n\t\ttermbox.SetCell(c.x, c.y, '#',\n\t\t\ton, termbox.ColorDefault)\n\t\ttermbox.Flush()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package eu\n\nimport \"testing\"\n\nfunc TestPercent(t *testing.T) {\n\n\ttests := []struct {\n\t\tnum float64\n\t\tv uint64\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tnum: 23,\n\t\t\tv: 0,\n\t\t\texpected: \"%\\u00a023\",\n\t\t},\n\t\t{\n\t\t\tnum: 23.45,\n\t\t\tv: 2,\n\t\t\texpected: \"%\\u00a023,45\",\n\t\t},\n\t\t{\n\t\t\tnum: 1023.45,\n\t\t\tv: 2,\n\t\t\texpected: \"%\\u00a01.023,45\",\n\t\t},\n\t\t{\n\t\t\tnum: -1023.45,\n\t\t\tv: 2,\n\t\t\texpected: \"%\\u00a0-1.023,45\",\n\t\t},\n\t}\n\n\ttrans := New()\n\n\tfor _, tt := range tests {\n\t\ts := string(trans.FmtPercent(tt.num, tt.v))\n\t\tif s != tt.expected {\n\t\t\tt.Errorf(\"Expected '%s' Got '%s'\", tt.expected, s)\n\t\t}\n\t}\n}\n<commit_msg>Updating test to use the correct hyphen<commit_after>package eu\n\nimport \"testing\"\n\nfunc TestPercent(t *testing.T) {\n\n\ttests := []struct {\n\t\tnum float64\n\t\tv uint64\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tnum: 23,\n\t\t\tv: 0,\n\t\t\texpected: \"%\\u00a023\",\n\t\t},\n\t\t{\n\t\t\tnum: 23.45,\n\t\t\tv: 2,\n\t\t\texpected: \"%\\u00a023,45\",\n\t\t},\n\t\t{\n\t\t\tnum: 1023.45,\n\t\t\tv: 2,\n\t\t\texpected: \"%\\u00a01.023,45\",\n\t\t},\n\t\t{\n\t\t\tnum: -1023.45,\n\t\t\tv: 2,\n\t\t\texpected: \"%\\u00a0−1.023,45\",\n\t\t},\n\t}\n\n\ttrans := New()\n\n\tfor _, tt := range tests {\n\t\ts := string(trans.FmtPercent(tt.num, tt.v))\n\t\tif s != tt.expected {\n\t\t\tt.Errorf(\"Expected '%s' Got '%s'\", tt.expected, s)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package app_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/murlokswarm\/app\"\n\t\"github.com\/murlokswarm\/app\/tests\"\n)\n\nfunc TestEventRegistry(t *testing.T) {\n\ttests.TestEventRegistry(t, func() app.EventRegistry {\n\t\treturn app.NewEventRegistry(func(f func()) {\n\t\t\tf()\n\t\t})\n\t})\n}\n\nfunc TestEventRegistryWithLogs(t *testing.T) {\n\ttests.TestEventRegistry(t, func() app.EventRegistry {\n\t\tr := app.NewEventRegistry(func(f func()) {\n\t\t\tf()\n\t\t})\n\t\treturn app.EventRegistryWithLogs(r)\n\t})\n}\n\nfunc TestConcurrentEventRegistry(t *testing.T) {\n\ttests.TestEventRegistry(t, func() app.EventRegistry {\n\t\tr := app.NewEventRegistry(func(f func()) {\n\t\t\tf()\n\t\t})\n\t\treturn app.ConcurrentEventRegistry(r)\n\t})\n}\n<commit_msg>test event subscriber<commit_after>package app_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/murlokswarm\/app\"\n\t\"github.com\/murlokswarm\/app\/tests\"\n)\n\nfunc TestEventRegistry(t *testing.T) {\n\ttests.TestEventRegistry(t, func() app.EventRegistry {\n\t\treturn app.NewEventRegistry(func(f func()) {\n\t\t\tf()\n\t\t})\n\t})\n}\n\nfunc TestEventRegistryWithLogs(t *testing.T) {\n\ttests.TestEventRegistry(t, func() app.EventRegistry {\n\t\tr := app.NewEventRegistry(func(f func()) {\n\t\t\tf()\n\t\t})\n\t\treturn app.EventRegistryWithLogs(r)\n\t})\n}\n\nfunc TestConcurrentEventRegistry(t *testing.T) {\n\ttests.TestEventRegistry(t, func() app.EventRegistry {\n\t\tr := app.NewEventRegistry(func(f func()) {\n\t\t\tf()\n\t\t})\n\t\treturn app.ConcurrentEventRegistry(r)\n\t})\n}\n\nfunc TestEventSubscriber(t *testing.T) {\n\ts := app.NewEventSubscriber()\n\tdefer s.Close()\n\n\ts.Subscribe(\"test-event-subscriber\", func() {})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ SPDX-License-Identifier: MIT\n\npackage server\n\nimport (\n\t\"io\/fs\"\n\n\t\"github.com\/issue9\/sliceutil\"\n\n\t\"github.com\/issue9\/web\/internal\/filesystem\"\n)\n\ntype Module struct {\n\tsrv *Server\n\tid string\n\tfs *filesystem.MultipleFS\n}\n\n\/\/ NewModule 声明新的模块\n\/\/\n\/\/ id 模块的 ID,需要全局唯一。会根据此 ID 从 Server 派生出文件系统。\nfunc (srv *Server) NewModule(id string) *Module {\n\tcontains := sliceutil.Index(srv.modules, func(i int) bool {\n\t\treturn srv.modules[i] == id\n\t}) >= 0\n\tif contains {\n\t\tpanic(\"存在同名模块\")\n\t}\n\n\tvar f *filesystem.MultipleFS\n\tfsys, err := fs.Sub(srv, id)\n\tif err != nil {\n\t\tsrv.Logs().Error(err) \/\/ 不退出,创建一个空的 filesystem.MultipleFS\n\t\tf = filesystem.NewMultipleFS()\n\t} else {\n\t\tf = filesystem.NewMultipleFS(fsys)\n\t}\n\n\tsrv.modules = append(srv.modules, id)\n\treturn &Module{\n\t\tsrv: srv,\n\t\tfs: f,\n\t\tid: id,\n\t}\n}\n\n\/\/ ID 模块的唯一 ID\nfunc (m *Module) ID() string { return m.id }\n\nfunc (m *Module) Server() *Server { return m.srv }\n\n\/\/ AddFS 添加文件系统\n\/\/\n\/\/ Module 默认以 id 为名称相对于 Server 创建了一个文件系统,\n\/\/ 此操作会将 fsys 作为 Module 另一个文件系统与 Module 相关联,\n\/\/ 当执行 Open 等操作时,会依然以关联顺序查找相应的文件系统, 直到找到。\nfunc (m *Module) AddFS(fsys ...fs.FS) { m.fs.Add(fsys...) }\n\nfunc (m *Module) Open(name string) (fs.File, error) { return m.fs.Open(name) }\n<commit_msg>feat(server): 添加 Module.Cache<commit_after>\/\/ SPDX-License-Identifier: MIT\n\npackage server\n\nimport (\n\t\"io\/fs\"\n\n\t\"github.com\/issue9\/cache\"\n\t\"github.com\/issue9\/sliceutil\"\n\n\t\"github.com\/issue9\/web\/internal\/filesystem\"\n)\n\ntype Module struct {\n\tsrv *Server\n\tid string\n\tfs *filesystem.MultipleFS\n}\n\n\/\/ NewModule 声明新的模块\n\/\/\n\/\/ id 模块的 ID,需要全局唯一。会根据此 ID 从 Server 派生出文件系统。\nfunc (srv *Server) NewModule(id string) *Module {\n\tcontains := sliceutil.Index(srv.modules, func(i int) bool {\n\t\treturn srv.modules[i] == id\n\t}) >= 0\n\tif contains {\n\t\tpanic(\"存在同名模块\")\n\t}\n\n\tvar f *filesystem.MultipleFS\n\tfsys, err := fs.Sub(srv, id)\n\tif err != nil {\n\t\tsrv.Logs().Error(err) \/\/ 不退出,创建一个空的 filesystem.MultipleFS\n\t\tf = filesystem.NewMultipleFS()\n\t} else {\n\t\tf = filesystem.NewMultipleFS(fsys)\n\t}\n\n\tsrv.modules = append(srv.modules, id)\n\treturn &Module{\n\t\tsrv: srv,\n\t\tfs: f,\n\t\tid: id,\n\t}\n}\n\n\/\/ ID 模块的唯一 ID\nfunc (m *Module) ID() string { return m.id }\n\nfunc (m *Module) Server() *Server { return m.srv }\n\n\/\/ AddFS 添加文件系统\n\/\/\n\/\/ Module 默认以 id 为名称相对于 Server 创建了一个文件系统,\n\/\/ 此操作会将 fsys 作为 Module 另一个文件系统与 Module 相关联,\n\/\/ 当执行 Open 等操作时,会依然以关联顺序查找相应的文件系统, 直到找到。\nfunc (m *Module) AddFS(fsys ...fs.FS) { m.fs.Add(fsys...) }\n\nfunc (m *Module) Open(name string) (fs.File, error) { return m.fs.Open(name) }\n\n\/\/ Cache 获取缓存对象\n\/\/\n\/\/ 该缓存对象的 key 会自动添加 Module.ID 作为其前缀。\nfunc (m *Module) Cache() cache.Access {\n\treturn cache.Prefix(m.ID(), m.Server().Cache())\n}\n<|endoftext|>"} {"text":"<commit_before>package mmatcher\n\nimport (\n\t\"fmt\"\n\t\"math\"\n)\n\ntype Atter interface {\n\tEqual(Atter, Atter) bool\n}\n\ntype TextAtt struct {\n\tVal string\n}\n\ntype NumericAtt struct {\n\tVal float64\n}\n\nfunc (a TextAtt) Equal(b Atter, e Atter) bool {\n\tv, ok := b.(TextAtt)\n\treturn ok && a.Val == v.Val\n}\n\nfunc (a NumericAtt) Equal(b Atter, e Atter) bool {\n\tv, ok := b.(NumericAtt)\n\tif !ok {\n\t\treturn false\n\t}\n\tvar epsilon NumericAtt\n\tepsilon, ok = e.(NumericAtt)\n\tif ok && epsilon.Val > 0 {\n\t\treturn math.Abs(math.Abs(a.Val)-math.Abs(v.Val)) <= epsilon.Val\n\t}\n\treturn a.Val == v.Val\n}\n\nfunc (a NumericAtt) String() string {\n\treturn fmt.Sprintf(\"%v\", a.Val)\n}\n<commit_msg>Redeclare is legal now<commit_after>package mmatcher\n\nimport (\n\t\"fmt\"\n\t\"math\"\n)\n\ntype Atter interface {\n\tEqual(Atter, Atter) bool\n}\n\ntype TextAtt struct {\n\tVal string\n}\n\ntype NumericAtt struct {\n\tVal float64\n}\n\nfunc (a TextAtt) Equal(b Atter, e Atter) bool {\n\tv, ok := b.(TextAtt)\n\treturn ok && a.Val == v.Val\n}\n\nfunc (a NumericAtt) Equal(b Atter, e Atter) bool {\n\tv, ok := b.(NumericAtt)\n\tif !ok {\n\t\treturn false\n\t}\n\tepsilon, ok := e.(NumericAtt)\n\tif ok && epsilon.Val > 0 {\n\t\treturn math.Abs(math.Abs(a.Val)-math.Abs(v.Val)) <= epsilon.Val\n\t}\n\treturn a.Val == v.Val\n}\n\nfunc (a NumericAtt) String() string {\n\treturn fmt.Sprintf(\"%v\", a.Val)\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/viktorasm\/redbutton\/server\/api\"\n\n\t\"github.com\/gorilla\/mux\"\n)\n\nfunc makeRoutes(s *server) http.Handler {\n\tm := mux.NewRouter()\n\n\tr := api.NewRouteWrapper(m)\n\n\t\/\/ so far only a stub of login service; returns new voterId each time it's called\n\tr.Post(\"\/api\/login\", func(c *api.HTTPHandlerContext) {\n\t\tc.Result(&api.LoginResponse{\n\t\t\tVoterID: uniqueID(),\n\t\t})\n\t})\n\n\tr.Post(\"\/api\/room\", s.createRoom)\n\tr.Get(\"\/api\/room\/{roomId}\", s.getRoomInfo)\n\tr.Post(\"\/api\/room\/{roomId}\", s.updateRoomInfo)\n\tr.Get(\"\/api\/room\/{roomId}\/voter\/{voterId}\", s.getVoterStatus)\n\tr.Post(\"\/api\/room\/{roomId}\/voter\/{voterId}\", s.handleChangeVoterStatus)\n\tr.Router.Methods(\"GET\").Path(\"\/api\/room\/{roomId}\/voter\/{voterId}\/events\").HandlerFunc(s.roomEventListenerHandler)\n\tm.PathPrefix(\"\/\").Handler(http.FileServer(http.Dir(s.UIDir)))\n\n\treturn m\n}\n<commit_msg>added route for swagger spec file<commit_after>package server\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/viktorasm\/redbutton\/server\/api\"\n\n\t\"github.com\/gorilla\/mux\"\n)\n\nfunc makeRoutes(s *server) http.Handler {\n\tm := mux.NewRouter()\n\n\tr := api.NewRouteWrapper(m)\n\n\t\/\/ so far only a stub of login service; returns new voterId each time it's called\n\tr.Post(\"\/api\/login\", func(c *api.HTTPHandlerContext) {\n\t\tc.Result(&api.LoginResponse{\n\t\t\tVoterID: uniqueID(),\n\t\t})\n\t})\n\n\tr.Post(\"\/api\/room\", s.createRoom)\n\tr.Get(\"\/api\/room\/{roomId}\", s.getRoomInfo)\n\tr.Post(\"\/api\/room\/{roomId}\", s.updateRoomInfo)\n\tr.Get(\"\/api\/room\/{roomId}\/voter\/{voterId}\", s.getVoterStatus)\n\tr.Post(\"\/api\/room\/{roomId}\/voter\/{voterId}\", s.handleChangeVoterStatus)\n\tr.Router.Methods(\"GET\").Path(\"\/api\/room\/{roomId}\/voter\/{voterId}\/events\").HandlerFunc(s.roomEventListenerHandler)\n\tm.PathPrefix(\"\/swagger.spec.yml\").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\/\/ serve our spec locally so it can be viewable from swagger-ui\n\t\thttp.ServeFile(w, r, \"api\/swagger.spec.yml\")\n\t})\n\tm.PathPrefix(\"\/\").Handler(http.FileServer(http.Dir(s.UIDir)))\n\n\treturn m\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Jacob Taylor jacob@ablox.io\n\/\/ License: Apache2 - http:\/\/www.apache.org\/licenses\/LICENSE-2.0\npackage main\n\nimport (\n \"fmt\"\n \"net\"\n \"..\/utils\"\n \"bufio\"\n \"encoding\/binary\"\n \/\/\"time\"\n \"os\"\n \"bytes\"\n \"io\"\n \"io\/ioutil\"\n \"log\"\n \"github.com\/urfave\/cli\"\n)\n\nconst nbd_folder = \"\/sample_disks\/\"\n\nvar characters_per_line = 100\nvar newline = 0\nvar line_number = 0\n\n\/\/ settings for the server\ntype Settings struct {\n ReadOnly bool\n AutoFlush bool\n Host string\n Port int32\n Listen string\n File string\n Directory string\n}\n\nvar globalSettings Settings = &Settings{\n ReadOnly: false,\n AutoFlush: true,\n Host: \"localhost\",\n Port: 8000,\n Listen: \"\",\n File: \"\",\n Directory: \"sample_disks\",\n}\n\nfunc send_export_list_item(output *bufio.Writer, options uint32, export_name string) {\n data := make([]byte, 1024)\n length := len(export_name)\n offset := 0\n\n \/\/ length of export name\n binary.BigEndian.PutUint32(data[offset:], uint32(length)) \/\/ length of string\n offset += 4\n\n \/\/ export name\n copy(data[offset:], export_name)\n offset += length\n\n reply_type := uint32(2) \/\/ reply_type: NBD_REP_SERVER\n send_message(output, options, reply_type, uint32(offset), data)\n}\n\nfunc send_ack(output *bufio.Writer, options uint32) {\n send_message(output, options, utils.NBD_COMMAND_ACK, 0, nil)\n}\n\nfunc export_name(output *bufio.Writer, conn net.Conn, payload_size int, payload []byte, options uint32, globalSettings Settings) {\n fmt.Printf(\"have request to bind to: %s\\n\", string(payload[:payload_size]))\n\n defer conn.Close()\n\n \/\/todo add support for folder specifications\n \/\/todo add support for file specificiation\n\n var filename bytes.Buffer\n current_directory, err := os.Getwd()\n utils.ErrorCheck(err)\n filename.WriteString(current_directory)\n filename.WriteString(nbd_folder)\n filename.Write(payload[:payload_size])\n\n fmt.Printf(\"Opening file: %s\\n\", filename.String())\n\n file, err := os.OpenFile(filename.String(), os.O_RDWR, 0644)\n\n utils.ErrorCheck(err)\n if err != nil {\n return\n }\n\n buffer := make([]byte, 256)\n offset := 0\n\n fs, err := file.Stat()\n file_size := uint64(fs.Size())\n\n binary.BigEndian.PutUint64(buffer[offset:], file_size) \/\/ size\n offset += 8\n\n binary.BigEndian.PutUint16(buffer[offset:], 1) \/\/ flags\n offset += 2\n\n \/\/ if requested, padd with 124 zeros\n if (options & utils.NBD_FLAG_NO_ZEROES) != utils.NBD_FLAG_NO_ZEROES {\n offset += 124\n }\n\n _, err = output.Write(buffer[:offset])\n output.Flush()\n utils.ErrorCheck(err)\n\n buffer = make([]byte, 2048*1024) \/\/ set the buffer to 2mb\n conn_reader := bufio.NewReader(conn)\n for {\n waiting_for := 28 \/\/ wait for at least the minimum payload size\n\n _, err := io.ReadFull(conn_reader, buffer[:waiting_for])\n if err == io.EOF {\n fmt.Printf(\"Abort detected, escaping processing loop\\n\")\n break\n }\n utils.ErrorCheck(err)\n\n \/\/magic := binary.BigEndian.Uint32(buffer)\n command := binary.BigEndian.Uint32(buffer[4:8])\n \/\/handle := binary.BigEndian.Uint64(buffer[8:16])\n from := binary.BigEndian.Uint64(buffer[16:24])\n length := binary.BigEndian.Uint32(buffer[24:28])\n\n newline += 1;\n if newline % characters_per_line == 0 {\n line_number++\n fmt.Printf(\"\\n%5d: \", line_number * 100)\n newline -= characters_per_line\n }\n\n switch command {\n case utils.NBD_COMMAND_READ:\n fmt.Printf(\".\")\n\n _, err = file.ReadAt(buffer[16:16+length], int64(from))\n utils.ErrorCheck(err)\n\n binary.BigEndian.PutUint32(buffer[:4], utils.NBD_REPLY_MAGIC)\n binary.BigEndian.PutUint32(buffer[4:8], 0) \/\/ error bits\n\n conn.Write(buffer[:16+length])\n\n continue\n case utils.NBD_COMMAND_WRITE:\n fmt.Printf(\"W\")\n\n _, err := io.ReadFull(conn_reader, buffer[28:28+length])\n if err == io.EOF {\n fmt.Printf(\"Abort detected, escaping processing loop\\n\")\n break\n }\n utils.ErrorCheck(err)\n\n _, err = file.WriteAt(buffer[28:28+length], int64(from))\n utils.ErrorCheck(err)\n\n file.Sync()\n\n \/\/ let them know we are done\n binary.BigEndian.PutUint32(buffer[:4], utils.NBD_REPLY_MAGIC)\n binary.BigEndian.PutUint32(buffer[4:8], 0) \/\/ error bits\n\n conn.Write(buffer[:16])\n\n continue\n\n case utils.NBD_COMMAND_DISCONNECT:\n fmt.Printf(\"D\")\n\n file.Sync()\n return\n }\n }\n}\n\n\/*\nFirst check for a specific file. If one is specified, use it. If not, check for a directory. If that is not\navilable, use the CWD.\n *\/\nfunc send_export_list(output *bufio.Writer, options uint32, globalSettings Settings) {\n \/\/todo add support for file and directory here\n current_directory, err := os.Getwd()\n utils.ErrorCheck(err)\n\n files, err := ioutil.ReadDir(current_directory + nbd_folder)\n utils.ErrorCheck(err)\n\n if err != nil {\n log.Fatal(err)\n }\n for _, file := range files {\n send_export_list_item(output, options, file.Name())\n }\n\n send_ack(output, options)\n}\n\nfunc send_message(output *bufio.Writer, options uint32, reply_type uint32, length uint32, data []byte ) {\n endian := binary.BigEndian\n buffer := make([]byte, 1024)\n offset := 0\n\n endian.PutUint64(buffer[offset:], utils.NBD_SERVER_SEND_REPLY_MAGIC)\n offset += 8\n\n endian.PutUint32(buffer[offset:], options) \/\/ put out the server options\n offset += 4\n\n endian.PutUint32(buffer[offset:], reply_type) \/\/ reply_type: NBD_REP_SERVER\n offset += 4\n\n endian.PutUint32(buffer[offset:], length) \/\/ length of package\n offset += 4\n\n if data != nil {\n copy(buffer[offset:], data[0:length])\n offset += int(length)\n }\n\n data_to_send := buffer[:offset]\n output.Write(data_to_send)\n output.Flush()\n\n utils.LogData(\"Just sent:\", offset, data_to_send)\n}\n\nvar defaultOptions = []byte{0, 0}\n\nfunc main() {\n\n app := cli.NewApp()\n app.Name = \"AnyBlox\"\n app.Usage = \"block storage for the masses\"\n app.Action = func(c *cli.Context) error {\n fmt.Println(\"Please specify either a full 'listen' parameter (e.g. 'localhost:8000', '192.168.1.2:8000) or a host and port\\n\")\n return nil\n }\n\n app.Flags = []cli.Flag {\n cli.StringFlag{\n Name: \"host\",\n Value: globalSettings.Host,\n Usage: \"Hostname or IP address you want to serve traffic on. e.x. 'localhost', '192.168.1.2'\",\n Destination: &globalSettings.Host,\n },\n cli.StringFlag{\n Name: \"port\",\n Value: globalSettings.Port,\n Usage: \"Port you want to serve traffic on. e.x. '8000'\",\n Destination: &globalSettings.Port,\n },\n cli.StringFlag{\n Name: \"listen, l\",\n Destination: &globalSettings.Listen,\n Usage: \"Address and port the server should listen on. Listen will take priority over host and port parameters. hostname:port - e.x. 'localhost:8000', '192.168.1.2:8000'\",\n },\n cli.StringFlag{\n Name: \"file, f\",\n Destination: &globalSettings.File,\n Value: \"\",\n Usage: \"The file that should be shared by this server. 'file' overrides 'directory'. It is required to be a full absolute path that includes the filename\",\n },\n cli.StringFlag{\n Name: \"directory, d\",\n Destination: &globalSettings.Directory,\n Value: globalSettings.Directory,\n Usage: \"Specify a directory where the files to share are located. Default is 'sample_disks\",\n },\n }\n\n app.Run(os.Args)\n\n \/\/ Determine where the host should be listening to, depending on the arguments\n hostingAddress := globalSettings.Listen\n if len(globalSettings.Listen) == 0 {\n hostingAddress = globalSettings.Host + \":\" + globalSettings.Port\n }\n\n fmt.Printf(\"About to listen on %s\\n\", hostingAddress)\n listener, err := net.Listen(\"tcp\", hostingAddress)\n utils.ErrorCheck(err)\n\n fmt.Printf(\"aBlox server online\\n\")\n\n reply_magic := make([]byte, 4)\n binary.BigEndian.PutUint32(reply_magic, utils.NBD_REPLY_MAGIC)\n\n defer fmt.Printf(\"End of line\\n\")\n\n for {\n conn, err := listener.Accept()\n utils.ErrorCheck(err)\n\n fmt.Printf(\"We have a new connection from: %s\\n\", conn.RemoteAddr())\n output := bufio.NewWriter(conn)\n\n output.WriteString(\"NBDMAGIC\") \/\/ init password\n output.WriteString(\"IHAVEOPT\") \/\/ Magic\n\n output.Write(defaultOptions)\n\n output.Flush()\n\n \/\/ Fetch the data until we get the initial options\n data := make([]byte, 1024)\n offset := 0\n waiting_for := 16 \/\/ wait for at least the minimum payload size\n\n _, err = io.ReadFull(conn, data[:waiting_for])\n utils.ErrorCheck(err)\n\n options := binary.BigEndian.Uint32(data[:4])\n command := binary.BigEndian.Uint32(data[12:16])\n\n \/\/ If we are requesting an export, make sure we have the length of the data for the export name.\n if binary.BigEndian.Uint32(data[12:]) == utils.NBD_COMMAND_EXPORT_NAME {\n waiting_for += 4\n _, err = io.ReadFull(conn, data[16:20])\n utils.ErrorCheck(err)\n }\n payload_size := int(binary.BigEndian.Uint32(data[16:]))\n\n fmt.Printf(\"command is: %d\\npayload_size is: %d\\n\", command, payload_size)\n offset = waiting_for\n waiting_for += int(payload_size)\n _, err = io.ReadFull(conn, data[offset:waiting_for])\n utils.ErrorCheck(err)\n\n payload := make([]byte, payload_size)\n if payload_size > 0 {\n copy(payload, data[20:])\n }\n\n utils.LogData(\"Payload is:\", payload_size, payload)\n\n \/\/ At this point, we have the command, payload size, and payload.\n switch command {\n case utils.NBD_COMMAND_LIST:\n send_export_list(output, options, globalSettings)\n conn.Close()\n break\n case utils.NBD_COMMAND_EXPORT_NAME:\n go export_name(output, conn, payload_size, payload, options, globalSettings)\n break\n }\n }\n\n}\n<commit_msg>supporting filename in send_export_list #19 #18<commit_after>\/\/ Copyright 2016 Jacob Taylor jacob@ablox.io\n\/\/ License: Apache2 - http:\/\/www.apache.org\/licenses\/LICENSE-2.0\npackage main\n\nimport (\n \"fmt\"\n \"net\"\n \"..\/utils\"\n \"bufio\"\n \"encoding\/binary\"\n \/\/\"time\"\n \"os\"\n \"bytes\"\n \"io\"\n \"io\/ioutil\"\n \"github.com\/urfave\/cli\"\n \"path\/filepath\"\n)\n\nconst nbd_folder = \"\/sample_disks\/\"\n\nvar characters_per_line = 100\nvar newline = 0\nvar line_number = 0\n\n\/\/ settings for the server\ntype Settings struct {\n ReadOnly bool\n AutoFlush bool\n Host string\n Port int32\n Listen string\n File string\n Directory string\n}\n\nvar globalSettings Settings = &Settings{\n ReadOnly: false,\n AutoFlush: true,\n Host: \"localhost\",\n Port: 8000,\n Listen: \"\",\n File: \"\",\n Directory: \"sample_disks\",\n}\n\nfunc send_export_list_item(output *bufio.Writer, options uint32, export_name string) {\n data := make([]byte, 1024)\n length := len(export_name)\n offset := 0\n\n \/\/ length of export name\n binary.BigEndian.PutUint32(data[offset:], uint32(length)) \/\/ length of string\n offset += 4\n\n \/\/ export name\n copy(data[offset:], export_name)\n offset += length\n\n reply_type := uint32(2) \/\/ reply_type: NBD_REP_SERVER\n send_message(output, options, reply_type, uint32(offset), data)\n}\n\nfunc send_ack(output *bufio.Writer, options uint32) {\n send_message(output, options, utils.NBD_COMMAND_ACK, 0, nil)\n}\n\nfunc export_name(output *bufio.Writer, conn net.Conn, payload_size int, payload []byte, options uint32, globalSettings Settings) {\n fmt.Printf(\"have request to bind to: %s\\n\", string(payload[:payload_size]))\n\n defer conn.Close()\n\n \/\/todo add support for folder specifications\n \/\/todo add support for file specificiation\n\n var filename bytes.Buffer\n current_directory, err := os.Getwd()\n utils.ErrorCheck(err)\n filename.WriteString(current_directory)\n filename.WriteString(nbd_folder)\n filename.Write(payload[:payload_size])\n\n fmt.Printf(\"Opening file: %s\\n\", filename.String())\n\n file, err := os.OpenFile(filename.String(), os.O_RDWR, 0644)\n\n utils.ErrorCheck(err)\n if err != nil {\n return\n }\n\n buffer := make([]byte, 256)\n offset := 0\n\n fs, err := file.Stat()\n file_size := uint64(fs.Size())\n\n binary.BigEndian.PutUint64(buffer[offset:], file_size) \/\/ size\n offset += 8\n\n binary.BigEndian.PutUint16(buffer[offset:], 1) \/\/ flags\n offset += 2\n\n \/\/ if requested, padd with 124 zeros\n if (options & utils.NBD_FLAG_NO_ZEROES) != utils.NBD_FLAG_NO_ZEROES {\n offset += 124\n }\n\n _, err = output.Write(buffer[:offset])\n output.Flush()\n utils.ErrorCheck(err)\n\n buffer = make([]byte, 2048*1024) \/\/ set the buffer to 2mb\n conn_reader := bufio.NewReader(conn)\n for {\n waiting_for := 28 \/\/ wait for at least the minimum payload size\n\n _, err := io.ReadFull(conn_reader, buffer[:waiting_for])\n if err == io.EOF {\n fmt.Printf(\"Abort detected, escaping processing loop\\n\")\n break\n }\n utils.ErrorCheck(err)\n\n \/\/magic := binary.BigEndian.Uint32(buffer)\n command := binary.BigEndian.Uint32(buffer[4:8])\n \/\/handle := binary.BigEndian.Uint64(buffer[8:16])\n from := binary.BigEndian.Uint64(buffer[16:24])\n length := binary.BigEndian.Uint32(buffer[24:28])\n\n newline += 1;\n if newline % characters_per_line == 0 {\n line_number++\n fmt.Printf(\"\\n%5d: \", line_number * 100)\n newline -= characters_per_line\n }\n\n switch command {\n case utils.NBD_COMMAND_READ:\n fmt.Printf(\".\")\n\n _, err = file.ReadAt(buffer[16:16+length], int64(from))\n utils.ErrorCheck(err)\n\n binary.BigEndian.PutUint32(buffer[:4], utils.NBD_REPLY_MAGIC)\n binary.BigEndian.PutUint32(buffer[4:8], 0) \/\/ error bits\n\n conn.Write(buffer[:16+length])\n\n continue\n case utils.NBD_COMMAND_WRITE:\n fmt.Printf(\"W\")\n\n _, err := io.ReadFull(conn_reader, buffer[28:28+length])\n if err == io.EOF {\n fmt.Printf(\"Abort detected, escaping processing loop\\n\")\n break\n }\n utils.ErrorCheck(err)\n\n _, err = file.WriteAt(buffer[28:28+length], int64(from))\n utils.ErrorCheck(err)\n\n file.Sync()\n\n \/\/ let them know we are done\n binary.BigEndian.PutUint32(buffer[:4], utils.NBD_REPLY_MAGIC)\n binary.BigEndian.PutUint32(buffer[4:8], 0) \/\/ error bits\n\n conn.Write(buffer[:16])\n\n continue\n\n case utils.NBD_COMMAND_DISCONNECT:\n fmt.Printf(\"D\")\n\n file.Sync()\n return\n }\n }\n}\n\n\/*\nFirst check for a specific file. If one is specified, use it. If not, check for a directory. If that is not\navilable, use the CWD.\n *\/\nfunc send_export_list(output *bufio.Writer, options uint32, globalSettings Settings) {\n if globalSettings.File {\n _, file := filepath.Split(globalSettings.File)\n\n send_export_list_item(output, options, file)\n send_ack(output, options)\n return\n }\n\n var current_directory string\n var err error\n if globalSettings.Directory == \"\" {\n current_directory, err = os.Getwd()\n utils.ErrorCheck(err)\n }\n\n files, err := ioutil.ReadDir(current_directory + nbd_folder)\n utils.ErrorCheck(err)\n\n for _, file := range files {\n send_export_list_item(output, options, file.Name())\n }\n\n send_ack(output, options)\n}\n\nfunc send_message(output *bufio.Writer, options uint32, reply_type uint32, length uint32, data []byte ) {\n endian := binary.BigEndian\n buffer := make([]byte, 1024)\n offset := 0\n\n endian.PutUint64(buffer[offset:], utils.NBD_SERVER_SEND_REPLY_MAGIC)\n offset += 8\n\n endian.PutUint32(buffer[offset:], options) \/\/ put out the server options\n offset += 4\n\n endian.PutUint32(buffer[offset:], reply_type) \/\/ reply_type: NBD_REP_SERVER\n offset += 4\n\n endian.PutUint32(buffer[offset:], length) \/\/ length of package\n offset += 4\n\n if data != nil {\n copy(buffer[offset:], data[0:length])\n offset += int(length)\n }\n\n data_to_send := buffer[:offset]\n output.Write(data_to_send)\n output.Flush()\n\n utils.LogData(\"Just sent:\", offset, data_to_send)\n}\n\nvar defaultOptions = []byte{0, 0}\n\nfunc main() {\n\n app := cli.NewApp()\n app.Name = \"AnyBlox\"\n app.Usage = \"block storage for the masses\"\n app.Action = func(c *cli.Context) error {\n fmt.Println(\"Please specify either a full 'listen' parameter (e.g. 'localhost:8000', '192.168.1.2:8000) or a host and port\\n\")\n return nil\n }\n\n app.Flags = []cli.Flag {\n cli.StringFlag{\n Name: \"host\",\n Value: globalSettings.Host,\n Usage: \"Hostname or IP address you want to serve traffic on. e.x. 'localhost', '192.168.1.2'\",\n Destination: &globalSettings.Host,\n },\n cli.StringFlag{\n Name: \"port\",\n Value: globalSettings.Port,\n Usage: \"Port you want to serve traffic on. e.x. '8000'\",\n Destination: &globalSettings.Port,\n },\n cli.StringFlag{\n Name: \"listen, l\",\n Destination: &globalSettings.Listen,\n Usage: \"Address and port the server should listen on. Listen will take priority over host and port parameters. hostname:port - e.x. 'localhost:8000', '192.168.1.2:8000'\",\n },\n cli.StringFlag{\n Name: \"file, f\",\n Destination: &globalSettings.File,\n Value: \"\",\n Usage: \"The file that should be shared by this server. 'file' overrides 'directory'. It is required to be a full absolute path that includes the filename\",\n },\n cli.StringFlag{\n Name: \"directory, d\",\n Destination: &globalSettings.Directory,\n Value: globalSettings.Directory,\n Usage: \"Specify a directory where the files to share are located. Default is 'sample_disks\",\n },\n }\n\n app.Run(os.Args)\n\n \/\/ Determine where the host should be listening to, depending on the arguments\n hostingAddress := globalSettings.Listen\n if len(globalSettings.Listen) == 0 {\n hostingAddress = globalSettings.Host + \":\" + globalSettings.Port\n }\n\n fmt.Printf(\"About to listen on %s\\n\", hostingAddress)\n listener, err := net.Listen(\"tcp\", hostingAddress)\n utils.ErrorCheck(err)\n\n fmt.Printf(\"aBlox server online\\n\")\n\n reply_magic := make([]byte, 4)\n binary.BigEndian.PutUint32(reply_magic, utils.NBD_REPLY_MAGIC)\n\n defer fmt.Printf(\"End of line\\n\")\n\n for {\n conn, err := listener.Accept()\n utils.ErrorCheck(err)\n\n fmt.Printf(\"We have a new connection from: %s\\n\", conn.RemoteAddr())\n output := bufio.NewWriter(conn)\n\n output.WriteString(\"NBDMAGIC\") \/\/ init password\n output.WriteString(\"IHAVEOPT\") \/\/ Magic\n\n output.Write(defaultOptions)\n\n output.Flush()\n\n \/\/ Fetch the data until we get the initial options\n data := make([]byte, 1024)\n offset := 0\n waiting_for := 16 \/\/ wait for at least the minimum payload size\n\n _, err = io.ReadFull(conn, data[:waiting_for])\n utils.ErrorCheck(err)\n\n options := binary.BigEndian.Uint32(data[:4])\n command := binary.BigEndian.Uint32(data[12:16])\n\n \/\/ If we are requesting an export, make sure we have the length of the data for the export name.\n if binary.BigEndian.Uint32(data[12:]) == utils.NBD_COMMAND_EXPORT_NAME {\n waiting_for += 4\n _, err = io.ReadFull(conn, data[16:20])\n utils.ErrorCheck(err)\n }\n payload_size := int(binary.BigEndian.Uint32(data[16:]))\n\n fmt.Printf(\"command is: %d\\npayload_size is: %d\\n\", command, payload_size)\n offset = waiting_for\n waiting_for += int(payload_size)\n _, err = io.ReadFull(conn, data[offset:waiting_for])\n utils.ErrorCheck(err)\n\n payload := make([]byte, payload_size)\n if payload_size > 0 {\n copy(payload, data[20:])\n }\n\n utils.LogData(\"Payload is:\", payload_size, payload)\n\n \/\/ At this point, we have the command, payload size, and payload.\n switch command {\n case utils.NBD_COMMAND_LIST:\n send_export_list(output, options, globalSettings)\n conn.Close()\n break\n case utils.NBD_COMMAND_EXPORT_NAME:\n go export_name(output, conn, payload_size, payload, options, globalSettings)\n break\n }\n }\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/Package server contains the ts3chatter server.\npackage server\n\nimport (\n\t\"fmt\"\n\t\"github.com\/crackdog\/ts3sqlib\"\n\t\"log\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/Server contains the ts3 sq connection and other ts3 server data.\ntype Server struct {\n\tts3conn *ts3sqlib.SqConn\n\tdata *serverData\n\taddress string\n\tloginname string\n\tpassword string\n\tnickname string\n\tvirtualserver int\n\tlogger *log.Logger\n\tsleepseconds int\n\thandlermutex *sync.Mutex\n\tdatamutex *sync.Mutex\n\tquit chan bool\n\tclosed bool\n}\n\ntype serverData struct {\n\tclientlist []ts3sqlib.Client\n\tn int \/\/Number of online clients.\n\tchannellist []channel\n}\n\ntype channel struct {\n\tName string `json:\"channel_name\"`\n\tData map[string]string `json:\"-\"`\n\tClients []ts3sqlib.Client `json:\"clients\"`\n}\n\n\/\/New creates a new Server structure.\nfunc New(address, login, password string, virtualserver int,\n\tlogger *log.Logger, sleepseconds int, nick string) (s *Server, err error) {\n\n\ts = new(Server)\n\ts.address = address\n\ts.loginname = login\n\ts.password = password\n\ts.nickname = nick\n\ts.virtualserver = virtualserver\n\ts.logger = logger\n\t\/\/s.clientlist = new(clients)\n\t\/\/s.clientlist.cl = make([]ts3sqlib.Client, 0)\n\t\/\/s.clientlist.n = 0\n\ts.data = new(serverData)\n\ts.data.clientlist = make([]ts3sqlib.Client, 0)\n\ts.data.channellist = make([]channel, 0)\n\ts.data.n = 0\n\ts.sleepseconds = sleepseconds\n\ts.handlermutex = new(sync.Mutex)\n\ts.datamutex = new(sync.Mutex)\n\n\ts.quit = make(chan bool)\n\ts.closed = false\n\n\ts.ts3conn, err = ts3sqlib.Dial(address, logger)\n\tif err != nil {\n\t\ts = nil\n\t\treturn\n\t}\n\n\tgo s.dataReceiver(time.Duration(s.sleepseconds) * time.Second)\n\n\treturn\n}\n\nfunc (s *Server) login() (err error) {\n\tif s.ts3conn == nil {\n\t\terr = fmt.Errorf(\"login: nil pointer\")\n\t\treturn\n\t}\n\n\terr = s.ts3conn.Use(s.virtualserver)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = s.ts3conn.Login(s.loginname, s.password)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/changing nickname...\n\tpairs, err := s.ts3conn.SendToMap(\"whoami\\n\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tclid, ok := pairs[\"client_id\"]\n\tif !ok {\n\t\terr = fmt.Errorf(\"error at collecting client_id\")\n\t\treturn\n\t}\n\n\t_, err = s.ts3conn.Send(\"clientupdate clid=\" + clid + \" client_nickname=\" +\n\t\ts.nickname + \"\\n\")\n\n\treturn\n}\n\n\/\/Quit disconnects the ts3 sq connection of a Server.\nfunc (s *Server) Quit() (err error) {\n\ts.closed = true\n\ts.quit <- true\n\tif s.ts3conn != nil {\n\t\terr = s.ts3conn.Quit()\n\t} else {\n\t\terr = fmt.Errorf(\"quit: s.ts3conn nil error\")\n\t}\n\treturn\n}\n\nfunc (s *Server) log(v ...interface{}) {\n\tif s.logger != nil {\n\t\ts.logger.Print(v...)\n\t}\n}\n\nfunc (s *Server) reconnect() (err error) {\n\tif !s.ts3conn.IsClosed() {\n\t\t_ = s.ts3conn.Quit()\n\t}\n\n\tfor {\n\n\t\ts.ts3conn, err = ts3sqlib.Dial(s.address, s.logger)\n\t\tif err == nil {\n\t\t\terr = s.login()\n\t\t}\n\n\t\tif err != nil {\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err != nil {\n\t\ts.closed = false\n\t}\n\n\treturn\n}\n\nfunc (s *Server) handleError(err error) {\n\tswitch {\n\tcase ts3sqlib.ClosedError.Equals(err):\n\t\terr = s.reconnect()\n\tcase ts3sqlib.PermissionError.Equals(err):\n\t\terr = s.login()\n\tdefault:\n\t\t\/\/nop\n\t}\n\n\tif err != nil {\n\t\ts.log(\"handleError: \", err)\n\t}\n}\n\n\/\/clientlistReceiver receives a Clientlist every\nfunc (s *Server) dataReceiver(sleeptime time.Duration) {\n\tvar (\n\t\tdata *serverData\n\t\terr error\n\t)\n\n\terr = s.login()\n\tif err != nil {\n\t\ts.log(err)\n\t\ts.Quit()\n\t\treturn\n\t}\n\n\tfor !s.closed {\n\t\terr = nil\n\t\tdata = new(serverData)\n\n\t\tdata.clientlist, err = s.ts3conn.ClientlistToClients(\"\")\n\t\tif err != nil {\n\t\t\ts.handleError(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tchannelmaps, err := s.ts3conn.SendToMaps(\"channellist\\n\")\n\t\tif err != nil {\n\t\t\ts.handleError(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tdata.channellist = make([]channel, len(channelmaps))\n\t\tfor i := range data.channellist {\n\t\t\tdata.channellist[i].Data = channelmaps[i]\n\t\t\tdata.channellist[i].Name = data.channellist[i].Data[\"channel_name\"]\n\t\t\tdata.channellist[i].Clients = make([]ts3sqlib.Client, 0, 2)\n\t\t}\n\n\t\tfor i := range data.channellist {\n\t\t\tchannelIndex, tmperr := strconv.Atoi(data.channellist[i].Data[\"cid\"])\n\t\t\tif tmperr != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, c := range data.clientlist {\n\t\t\t\tif c.Cid == channelIndex {\n\t\t\t\t\tdata.channellist[i].Clients = append(data.channellist[i].Clients, c)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\ts.datamutex.Lock()\n\t\ts.data = data\n\t\ts.datamutex.Unlock()\n\n\t\ttime.Sleep(sleeptime)\n\t}\n}\n\nfunc (s *Server) notificationHandler() {\n\tvar (\n\t\tanswer string\n\t\terr error\n\t)\n\n\tfor {\n\t\tanswer, err = s.ts3conn.RecvNotify()\n\t\tif err != nil {\n\t\t\ts.log(err)\n\t\t} else {\n\t\t\t\/\/handle notification\n\t\t\ts.log(answer)\n\t\t}\n\t}\n}\n<commit_msg>deleting some useless comments<commit_after>\/\/Package server contains the ts3chatter server.\npackage server\n\nimport (\n\t\"fmt\"\n\t\"github.com\/crackdog\/ts3sqlib\"\n\t\"log\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/Server contains the ts3 sq connection and other ts3 server data.\ntype Server struct {\n\tts3conn *ts3sqlib.SqConn\n\tdata *serverData\n\taddress string\n\tloginname string\n\tpassword string\n\tnickname string\n\tvirtualserver int\n\tlogger *log.Logger\n\tsleepseconds int\n\thandlermutex *sync.Mutex\n\tdatamutex *sync.Mutex\n\tquit chan bool\n\tclosed bool\n}\n\ntype serverData struct {\n\tclientlist []ts3sqlib.Client\n\tn int \/\/Number of online clients.\n\tchannellist []channel\n}\n\ntype channel struct {\n\tName string `json:\"channel_name\"`\n\tData map[string]string `json:\"-\"`\n\tClients []ts3sqlib.Client `json:\"clients\"`\n}\n\n\/\/New creates a new Server structure.\nfunc New(address, login, password string, virtualserver int,\n\tlogger *log.Logger, sleepseconds int, nick string) (s *Server, err error) {\n\n\ts = new(Server)\n\ts.address = address\n\ts.loginname = login\n\ts.password = password\n\ts.nickname = nick\n\ts.virtualserver = virtualserver\n\ts.logger = logger\n\ts.data = new(serverData)\n\ts.data.clientlist = make([]ts3sqlib.Client, 0)\n\ts.data.channellist = make([]channel, 0)\n\ts.data.n = 0\n\ts.sleepseconds = sleepseconds\n\ts.handlermutex = new(sync.Mutex)\n\ts.datamutex = new(sync.Mutex)\n\n\ts.quit = make(chan bool)\n\ts.closed = false\n\n\ts.ts3conn, err = ts3sqlib.Dial(address, logger)\n\tif err != nil {\n\t\ts = nil\n\t\treturn\n\t}\n\n\tgo s.dataReceiver(time.Duration(s.sleepseconds) * time.Second)\n\n\treturn\n}\n\nfunc (s *Server) login() (err error) {\n\tif s.ts3conn == nil {\n\t\terr = fmt.Errorf(\"login: nil pointer\")\n\t\treturn\n\t}\n\n\terr = s.ts3conn.Use(s.virtualserver)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = s.ts3conn.Login(s.loginname, s.password)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/changing nickname...\n\tpairs, err := s.ts3conn.SendToMap(\"whoami\\n\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tclid, ok := pairs[\"client_id\"]\n\tif !ok {\n\t\terr = fmt.Errorf(\"error at collecting client_id\")\n\t\treturn\n\t}\n\n\t_, err = s.ts3conn.Send(\"clientupdate clid=\" + clid + \" client_nickname=\" +\n\t\ts.nickname + \"\\n\")\n\n\treturn\n}\n\n\/\/Quit disconnects the ts3 sq connection of a Server.\nfunc (s *Server) Quit() (err error) {\n\ts.closed = true\n\ts.quit <- true\n\tif s.ts3conn != nil {\n\t\terr = s.ts3conn.Quit()\n\t} else {\n\t\terr = fmt.Errorf(\"quit: s.ts3conn nil error\")\n\t}\n\treturn\n}\n\nfunc (s *Server) log(v ...interface{}) {\n\tif s.logger != nil {\n\t\ts.logger.Print(v...)\n\t}\n}\n\nfunc (s *Server) reconnect() (err error) {\n\tif !s.ts3conn.IsClosed() {\n\t\t_ = s.ts3conn.Quit()\n\t}\n\n\tfor {\n\n\t\ts.ts3conn, err = ts3sqlib.Dial(s.address, s.logger)\n\t\tif err == nil {\n\t\t\terr = s.login()\n\t\t}\n\n\t\tif err != nil {\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err != nil {\n\t\ts.closed = false\n\t}\n\n\treturn\n}\n\nfunc (s *Server) handleError(err error) {\n\tswitch {\n\tcase ts3sqlib.ClosedError.Equals(err):\n\t\terr = s.reconnect()\n\tcase ts3sqlib.PermissionError.Equals(err):\n\t\terr = s.login()\n\tdefault:\n\t\t\/\/nop\n\t}\n\n\tif err != nil {\n\t\ts.log(\"handleError: \", err)\n\t}\n}\n\nfunc (s *Server) dataReceiver(sleeptime time.Duration) {\n\tvar (\n\t\tdata *serverData\n\t\terr error\n\t)\n\n\terr = s.login()\n\tif err != nil {\n\t\ts.handleError(err)\n\t}\n\n\tfor !s.closed {\n\t\terr = nil\n\t\tdata = new(serverData)\n\n\t\tdata.clientlist, err = s.ts3conn.ClientlistToClients(\"\")\n\t\tif err != nil {\n\t\t\ts.handleError(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tchannelmaps, err := s.ts3conn.SendToMaps(\"channellist\\n\")\n\t\tif err != nil {\n\t\t\ts.handleError(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tdata.channellist = make([]channel, len(channelmaps))\n\t\tfor i := range data.channellist {\n\t\t\tdata.channellist[i].Data = channelmaps[i]\n\t\t\tdata.channellist[i].Name = data.channellist[i].Data[\"channel_name\"]\n\t\t\tdata.channellist[i].Clients = make([]ts3sqlib.Client, 0, 2)\n\t\t}\n\n\t\tfor i := range data.channellist {\n\t\t\tchannelIndex, tmperr := strconv.Atoi(data.channellist[i].Data[\"cid\"])\n\t\t\tif tmperr != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, c := range data.clientlist {\n\t\t\t\tif c.Cid == channelIndex {\n\t\t\t\t\tdata.channellist[i].Clients = append(data.channellist[i].Clients, c)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\ts.datamutex.Lock()\n\t\ts.data = data\n\t\ts.datamutex.Unlock()\n\n\t\ttime.Sleep(sleeptime)\n\t}\n}\n\nfunc (s *Server) notificationHandler() {\n\tvar (\n\t\tanswer string\n\t\terr error\n\t)\n\n\tfor {\n\t\tanswer, err = s.ts3conn.RecvNotify()\n\t\tif err != nil {\n\t\t\ts.log(err)\n\t\t} else {\n\t\t\t\/\/handle notification\n\t\t\ts.log(answer)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/rpc\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/janelia-flyem\/dvid\/datastore\"\n\t\"github.com\/janelia-flyem\/dvid\/dvid\"\n\t\"github.com\/janelia-flyem\/dvid\/storage\"\n\n\t\"github.com\/janelia-flyem\/go\/nrsc\"\n)\n\nconst (\n\t\/\/ The default URL of the DVID web server\n\tDefaultWebAddress = \"localhost:8000\"\n\n\t\/\/ The default RPC address of the DVID RPC server\n\tDefaultRPCAddress = \"localhost:8001\"\n\n\t\/\/ The relative URL path to our Level 2 REST API\n\tWebAPIPath = \"\/api\/\"\n\n\t\/\/ The relative URL path to a DVID web console.\n\t\/\/ The root URL will be redirected to \/{ConsolePath}\/index.html\n\tConsolePath = \"\/console\/\"\n\n\t\/\/ The name of the server error log, stored in the datastore directory.\n\tErrorLogFilename = \"dvid-errors.log\"\n)\n\nvar (\n\t\/\/ runningService is a global variable that holds the currently running\n\t\/\/ datastore service.\n\trunningService = Service{\n\t\tWebAddress: DefaultWebAddress,\n\t\tRPCAddress: DefaultRPCAddress,\n\t}\n\n\t\/\/ ActiveHandlers is maximum number of active handlers over last second.\n\tActiveHandlers int\n\n\t\/\/ Running tally of active handlers up to the last second\n\tcurActiveHandlers int\n\n\t\/\/ MaxChunkHandlers sets the maximum number of chunk handlers (goroutines) that\n\t\/\/ can be multiplexed onto available cores. (See -numcpu setting in dvid.go)\n\tMaxChunkHandlers = runtime.NumCPU()\n\n\t\/\/ HandlerToken is buffered channel to limit spawning of goroutines.\n\t\/\/ See ProcessChunk() in datatype\/voxels for example.\n\tHandlerToken = make(chan int, MaxChunkHandlers)\n\n\t\/\/ Timeout in seconds for waiting to open a datastore for exclusive access.\n\tTimeoutSecs int\n\n\t\/\/ GzipAPI turns on gzip compression on REST API responses.\n\t\/\/ For high bandwidth networks or local use, it is better to leave gzip\n\t\/\/ off because delay due to compression is frequently higher than gains\n\t\/\/ from decreased response size.\n\tGzipAPI = false\n)\n\n\/\/ Service holds information on the servers attached to a DVID datastore. If more than\n\/\/ one storage engine is used by a DVID server, e.g., polyglot persistence where graphs\n\/\/ are managed by a graph database and key-value by a key-value database, this would\n\/\/ be the level at which the storage engines are integrated.\ntype Service struct {\n\t\/\/ The currently opened DVID datastore\n\t*datastore.Service\n\n\t\/\/ Error log directory\n\tErrorLogDir string\n\n\t\/\/ The address of the web server\n\tWebAddress string\n\n\t\/\/ The path to the DVID web client\n\tWebClientPath string\n\n\t\/\/ The address of the rpc server\n\tRPCAddress string\n}\n\nfunc init() {\n\t\/\/ Initialize the number of handler tokens available.\n\tfor i := 0; i < MaxChunkHandlers; i++ {\n\t\tHandlerToken <- 1\n\t}\n\n\t\/\/ Monitor the handler token load, resetting every second.\n\tloadCheckTimer := time.Tick(10 * time.Millisecond)\n\tticks := 0\n\tgo func() {\n\t\tfor {\n\t\t\t<-loadCheckTimer\n\t\t\tticks = (ticks + 1) % 100\n\t\t\tif ticks == 0 {\n\t\t\t\tActiveHandlers = curActiveHandlers\n\t\t\t\tcurActiveHandlers = 0\n\t\t\t}\n\t\t\tnumHandlers := MaxChunkHandlers - len(HandlerToken)\n\t\t\tif numHandlers > curActiveHandlers {\n\t\t\t\tcurActiveHandlers = numHandlers\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ DatastoreService returns the current datastore service. One DVID process\n\/\/ is assigned to one datastore service, although it may be possible to have\n\/\/ multiple (polyglot) persistence backends attached to that one service.\nfunc DatastoreService() *datastore.Service {\n\treturn runningService.Service\n}\n\n\/\/ MatchingUUID returns a UUID on this server that uniquely matches a uuid string.\nfunc MatchingUUID(uuidStr string) (uuid datastore.UUID, err error) {\n\tif runningService.Service == nil {\n\t\terr = fmt.Errorf(\"Datastore service has not been started on this server.\")\n\t\treturn\n\t}\n\tuuid, _, _, err = runningService.Service.NodeIDFromString(uuidStr)\n\treturn\n}\n\n\/\/ VersionLocalID returns a server-specific local ID for the node with the given UUID.\nfunc VersionLocalID(uuid datastore.UUID) (datastore.VersionLocalID, error) {\n\tif runningService.Service == nil {\n\t\treturn 0, fmt.Errorf(\"Datastore service has not been started on this server.\")\n\t}\n\t_, versionID, err := runningService.Service.LocalIDFromUUID(uuid)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn versionID, nil\n}\n\n\/\/ StorageEngine returns the default storage engine or nil if it's not available.\nfunc StorageEngine() storage.Engine {\n\tif runningService.Service == nil {\n\t\treturn nil\n\t}\n\treturn runningService.StorageEngine()\n}\n\n\/\/ Shutdown handles graceful cleanup of server functions before exiting DVID.\n\/\/ This may not be so graceful if the chunk handler uses cgo since the interrupt\n\/\/ may be caught during cgo execution.\nfunc Shutdown() {\n\tif runningService.Service != nil {\n\t\trunningService.Service.Shutdown()\n\t}\n\tfor {\n\t\tactive := MaxChunkHandlers - len(HandlerToken)\n\t\tif active > 0 {\n\t\t\tlog.Printf(\"Waiting for %d chunk handlers to finish...\\n\", active)\n\t\t} else {\n\t\t\tlog.Println(\"No chunk handlers active...\")\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\tdvid.BlockOnActiveCgo()\n}\n\n\/\/ ServerlessDo runs a command locally, opening and closing a datastore\n\/\/ as necessary.\nfunc ServerlessDo(datastoreDir string, request datastore.Request, reply *datastore.Response) error {\n\t\/\/ Make sure we don't already have an open datastore.\n\tif runningService.Service != nil {\n\t\treturn fmt.Errorf(\"Cannot do concurrent requests on different datastores.\")\n\t}\n\n\t\/\/ Get exclusive ownership of a DVID datastore. Wait if allowed and necessary.\n\tdvid.Fmt(dvid.Debug, \"Getting exclusive ownership of datastore at: %s\\n\", datastoreDir)\n\tstartTime := time.Now()\n\tfor {\n\t\tvar err *datastore.OpenError\n\t\trunningService.Service, err = datastore.Open(datastoreDir)\n\t\tif err != nil {\n\t\t\tif TimeoutSecs == 0 || err.ErrorType != datastore.ErrorOpening {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdvid.Fmt(dvid.Debug, \"Waiting a second for exclusive datastore access...\\n\")\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\telapsed := time.Since(startTime).Seconds()\n\t\t\tif elapsed > float64(TimeoutSecs) {\n\t\t\t\treturn fmt.Errorf(\"Unable to obtain exclusive access of datastore (%s) in %d seconds\",\n\t\t\t\t\tdatastoreDir, TimeoutSecs)\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tdefer Shutdown()\n\n\t\/\/ Register an error logger that appends to a file in this datastore directory.\n\terrorLog := filepath.Join(datastoreDir, ErrorLogFilename)\n\tfile, err := os.OpenFile(errorLog, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to open error logging file (%s): %s\\n\", errorLog, err.Error())\n\t}\n\tdvid.SetErrorLoggingFile(file)\n\n\t\/\/ Issue local command\n\tvar localConnect RPCConnection\n\terr = localConnect.Do(request, reply)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ OpenDatastore returns a Server service. Only one datastore can be opened\n\/\/ for any server.\nfunc OpenDatastore(datastoreDir string) (service *Service, err error) {\n\t\/\/ Make sure we don't already have an open datastore.\n\tif runningService.Service != nil {\n\t\terr = fmt.Errorf(\"Cannot create new server. A DVID process can serve only one datastore.\")\n\t\treturn\n\t}\n\n\t\/\/ Get exclusive ownership of a DVID datastore\n\tlog.Println(\"Getting exclusive ownership of datastore at:\", datastoreDir)\n\n\tvar openErr *datastore.OpenError\n\trunningService.Service, openErr = datastore.Open(datastoreDir)\n\tif openErr != nil {\n\t\terr = openErr\n\t\treturn\n\t}\n\trunningService.ErrorLogDir = datastoreDir\n\n\tservice = &runningService\n\treturn\n}\n\n\/\/ Serve opens a datastore then creates both web and rpc servers for the datastore.\n\/\/ This function must be called for DatastoreService() to be non-nil.\nfunc (service *Service) Serve(webAddress, webClientDir, rpcAddress string) error {\n\tlog.Printf(\"Using %d of %d logical CPUs for DVID.\\n\", dvid.NumCPU, runtime.NumCPU())\n\n\t\/\/ Register an error logger that appends to a file in this datastore directory.\n\terrorLog := filepath.Join(service.ErrorLogDir, ErrorLogFilename)\n\tfile, err := os.OpenFile(errorLog, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to open error logging file (%s): %s\\n\", errorLog, err.Error())\n\t}\n\tdvid.SetErrorLoggingFile(file)\n\n\t\/\/ Launch the web server\n\tgo runningService.ServeHttp(webAddress, webClientDir)\n\n\t\/\/ Launch the rpc server\n\terr = runningService.ServeRpc(rpcAddress)\n\tif err != nil {\n\t\tlog.Fatalln(err.Error())\n\t}\n\n\treturn nil\n}\n\n\/\/ Listen and serve HTTP requests using address and don't let stay-alive\n\/\/ connections hog goroutines for more than an hour.\n\/\/ See for discussion:\n\/\/ http:\/\/stackoverflow.com\/questions\/10971800\/golang-http-server-leaving-open-goroutines\nfunc (service *Service) ServeHttp(address, clientDir string) {\n\tif address == \"\" {\n\t\taddress = DefaultWebAddress\n\t}\n\tservice.WebAddress = address\n\tservice.WebClientPath = clientDir\n\tfmt.Printf(\"Web server listening at %s ...\\n\", address)\n\n\tsrc := &http.Server{\n\t\tAddr: address,\n\t\tReadTimeout: 1 * time.Hour,\n\t}\n\n\t\/\/ Handle Level 2 REST API.\n\tif GzipAPI {\n\t\tfmt.Println(\"HTTP server will return gzip values if permitted by browser.\")\n\t\thttp.HandleFunc(WebAPIPath, makeGzipHandler(apiHandler))\n\t} else {\n\t\thttp.HandleFunc(WebAPIPath, apiHandler)\n\t}\n\n\t\/\/ Handle static files through serving embedded files\n\t\/\/ via nrsc or loading files from a specified web client directory.\n\tif clientDir == \"\" {\n\t\terr := nrsc.Handle(ConsolePath)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"ERROR with nrsc trying to serve web pages:\", err.Error())\n\t\t\tfmt.Println(webClientUnavailableMessage)\n\t\t\tfmt.Println(\"HTTP server will be started without webclient...\\n\")\n\t\t\thttp.HandleFunc(ConsolePath, mainHandler)\n\t\t} else {\n\t\t\tfmt.Println(\"Serving web client from embedded files...\")\n\t\t}\n\t} else {\n\t\thttp.HandleFunc(ConsolePath, mainHandler)\n\t\tdvid.Log(dvid.Debug, \"Serving web pages from %s\\n\", clientDir)\n\t}\n\n\t\/\/ Manage redirection from \/ to the ConsolePath.\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tvar urlStr string\n\t\tif r.URL.Path == \"\/\" {\n\t\t\turlStr = ConsolePath + \"index.html\"\n\t\t} else {\n\t\t\turlStr = ConsolePath + strings.TrimLeft(r.URL.Path, \"\/\")\n\t\t}\n\t\tdvid.Fmt(dvid.Debug, \"Redirect %s -> %s\\n\", r.URL.Path, urlStr)\n\t\thttp.Redirect(w, r, urlStr, http.StatusMovedPermanently)\n\t})\n\n\t\/\/ Serve it up!\n\tsrc.ListenAndServe()\n}\n\n\/\/ Listen and serve RPC requests using address.\nfunc (service *Service) ServeRpc(address string) error {\n\tif address == \"\" {\n\t\taddress = DefaultRPCAddress\n\t}\n\tservice.RPCAddress = address\n\tdvid.Log(dvid.Debug, \"Rpc server listening at %s ...\\n\", address)\n\n\tc := new(RPCConnection)\n\trpc.Register(c)\n\trpc.HandleHTTP()\n\tlistener, err := net.Listen(\"tcp\", address)\n\tif err != nil {\n\t\treturn err\n\t}\n\thttp.Serve(listener, nil)\n\treturn nil\n}\n\n\/\/ Nod to Andrew Gerrand for simple gzip solution:\n\/\/ See https:\/\/groups.google.com\/forum\/m\/?fromgroups#!topic\/golang-nuts\/eVnTcMwNVjM\ntype gzipResponseWriter struct {\n\tio.Writer\n\thttp.ResponseWriter\n}\n\nfunc (w gzipResponseWriter) Write(b []byte) (int, error) {\n\treturn w.Writer.Write(b)\n}\n\nfunc makeGzipHandler(fn http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif !strings.Contains(r.Header.Get(\"Accept-Encoding\"), \"gzip\") {\n\t\t\tfn(w, r)\n\t\t\treturn\n\t\t}\n\t\tdvid.Log(dvid.Debug, \"Responding to request with gzip\\n\")\n\t\tw.Header().Set(\"Content-Encoding\", \"gzip\")\n\t\tgz := gzip.NewWriter(w)\n\t\tdefer gz.Close()\n\t\tfn(gzipResponseWriter{Writer: gz, ResponseWriter: w}, r)\n\t}\n}\n<commit_msg>wraps http handler functions so that panics are caught to prevent killing the program<commit_after>package server\n\nimport (\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/rpc\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/janelia-flyem\/dvid\/datastore\"\n\t\"github.com\/janelia-flyem\/dvid\/dvid\"\n\t\"github.com\/janelia-flyem\/dvid\/storage\"\n\n\t\"github.com\/janelia-flyem\/go\/nrsc\"\n)\n\nconst (\n\t\/\/ The default URL of the DVID web server\n\tDefaultWebAddress = \"localhost:8000\"\n\n\t\/\/ The default RPC address of the DVID RPC server\n\tDefaultRPCAddress = \"localhost:8001\"\n\n\t\/\/ The relative URL path to our Level 2 REST API\n\tWebAPIPath = \"\/api\/\"\n\n\t\/\/ The relative URL path to a DVID web console.\n\t\/\/ The root URL will be redirected to \/{ConsolePath}\/index.html\n\tConsolePath = \"\/console\/\"\n\n\t\/\/ The name of the server error log, stored in the datastore directory.\n\tErrorLogFilename = \"dvid-errors.log\"\n)\n\nvar (\n\t\/\/ runningService is a global variable that holds the currently running\n\t\/\/ datastore service.\n\trunningService = Service{\n\t\tWebAddress: DefaultWebAddress,\n\t\tRPCAddress: DefaultRPCAddress,\n\t}\n\n\t\/\/ ActiveHandlers is maximum number of active handlers over last second.\n\tActiveHandlers int\n\n\t\/\/ Running tally of active handlers up to the last second\n\tcurActiveHandlers int\n\n\t\/\/ MaxChunkHandlers sets the maximum number of chunk handlers (goroutines) that\n\t\/\/ can be multiplexed onto available cores. (See -numcpu setting in dvid.go)\n\tMaxChunkHandlers = runtime.NumCPU()\n\n\t\/\/ HandlerToken is buffered channel to limit spawning of goroutines.\n\t\/\/ See ProcessChunk() in datatype\/voxels for example.\n\tHandlerToken = make(chan int, MaxChunkHandlers)\n\n\t\/\/ Timeout in seconds for waiting to open a datastore for exclusive access.\n\tTimeoutSecs int\n\n\t\/\/ GzipAPI turns on gzip compression on REST API responses.\n\t\/\/ For high bandwidth networks or local use, it is better to leave gzip\n\t\/\/ off because delay due to compression is frequently higher than gains\n\t\/\/ from decreased response size.\n\tGzipAPI = false\n)\n\n\/\/ Service holds information on the servers attached to a DVID datastore. If more than\n\/\/ one storage engine is used by a DVID server, e.g., polyglot persistence where graphs\n\/\/ are managed by a graph database and key-value by a key-value database, this would\n\/\/ be the level at which the storage engines are integrated.\ntype Service struct {\n\t\/\/ The currently opened DVID datastore\n\t*datastore.Service\n\n\t\/\/ Error log directory\n\tErrorLogDir string\n\n\t\/\/ The address of the web server\n\tWebAddress string\n\n\t\/\/ The path to the DVID web client\n\tWebClientPath string\n\n\t\/\/ The address of the rpc server\n\tRPCAddress string\n}\n\nfunc init() {\n\t\/\/ Initialize the number of handler tokens available.\n\tfor i := 0; i < MaxChunkHandlers; i++ {\n\t\tHandlerToken <- 1\n\t}\n\n\t\/\/ Monitor the handler token load, resetting every second.\n\tloadCheckTimer := time.Tick(10 * time.Millisecond)\n\tticks := 0\n\tgo func() {\n\t\tfor {\n\t\t\t<-loadCheckTimer\n\t\t\tticks = (ticks + 1) % 100\n\t\t\tif ticks == 0 {\n\t\t\t\tActiveHandlers = curActiveHandlers\n\t\t\t\tcurActiveHandlers = 0\n\t\t\t}\n\t\t\tnumHandlers := MaxChunkHandlers - len(HandlerToken)\n\t\t\tif numHandlers > curActiveHandlers {\n\t\t\t\tcurActiveHandlers = numHandlers\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ DatastoreService returns the current datastore service. One DVID process\n\/\/ is assigned to one datastore service, although it may be possible to have\n\/\/ multiple (polyglot) persistence backends attached to that one service.\nfunc DatastoreService() *datastore.Service {\n\treturn runningService.Service\n}\n\n\/\/ MatchingUUID returns a UUID on this server that uniquely matches a uuid string.\nfunc MatchingUUID(uuidStr string) (uuid datastore.UUID, err error) {\n\tif runningService.Service == nil {\n\t\terr = fmt.Errorf(\"Datastore service has not been started on this server.\")\n\t\treturn\n\t}\n\tuuid, _, _, err = runningService.Service.NodeIDFromString(uuidStr)\n\treturn\n}\n\n\/\/ VersionLocalID returns a server-specific local ID for the node with the given UUID.\nfunc VersionLocalID(uuid datastore.UUID) (datastore.VersionLocalID, error) {\n\tif runningService.Service == nil {\n\t\treturn 0, fmt.Errorf(\"Datastore service has not been started on this server.\")\n\t}\n\t_, versionID, err := runningService.Service.LocalIDFromUUID(uuid)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn versionID, nil\n}\n\n\/\/ StorageEngine returns the default storage engine or nil if it's not available.\nfunc StorageEngine() storage.Engine {\n\tif runningService.Service == nil {\n\t\treturn nil\n\t}\n\treturn runningService.StorageEngine()\n}\n\n\/\/ Shutdown handles graceful cleanup of server functions before exiting DVID.\n\/\/ This may not be so graceful if the chunk handler uses cgo since the interrupt\n\/\/ may be caught during cgo execution.\nfunc Shutdown() {\n\tif runningService.Service != nil {\n\t\trunningService.Service.Shutdown()\n\t}\n\tfor {\n\t\tactive := MaxChunkHandlers - len(HandlerToken)\n\t\tif active > 0 {\n\t\t\tlog.Printf(\"Waiting for %d chunk handlers to finish...\\n\", active)\n\t\t} else {\n\t\t\tlog.Println(\"No chunk handlers active...\")\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\tdvid.BlockOnActiveCgo()\n}\n\n\/\/ ServerlessDo runs a command locally, opening and closing a datastore\n\/\/ as necessary.\nfunc ServerlessDo(datastoreDir string, request datastore.Request, reply *datastore.Response) error {\n\t\/\/ Make sure we don't already have an open datastore.\n\tif runningService.Service != nil {\n\t\treturn fmt.Errorf(\"Cannot do concurrent requests on different datastores.\")\n\t}\n\n\t\/\/ Get exclusive ownership of a DVID datastore. Wait if allowed and necessary.\n\tdvid.Fmt(dvid.Debug, \"Getting exclusive ownership of datastore at: %s\\n\", datastoreDir)\n\tstartTime := time.Now()\n\tfor {\n\t\tvar err *datastore.OpenError\n\t\trunningService.Service, err = datastore.Open(datastoreDir)\n\t\tif err != nil {\n\t\t\tif TimeoutSecs == 0 || err.ErrorType != datastore.ErrorOpening {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdvid.Fmt(dvid.Debug, \"Waiting a second for exclusive datastore access...\\n\")\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\telapsed := time.Since(startTime).Seconds()\n\t\t\tif elapsed > float64(TimeoutSecs) {\n\t\t\t\treturn fmt.Errorf(\"Unable to obtain exclusive access of datastore (%s) in %d seconds\",\n\t\t\t\t\tdatastoreDir, TimeoutSecs)\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tdefer Shutdown()\n\n\t\/\/ Register an error logger that appends to a file in this datastore directory.\n\terrorLog := filepath.Join(datastoreDir, ErrorLogFilename)\n\tfile, err := os.OpenFile(errorLog, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to open error logging file (%s): %s\\n\", errorLog, err.Error())\n\t}\n\tdvid.SetErrorLoggingFile(file)\n\n\t\/\/ Issue local command\n\tvar localConnect RPCConnection\n\terr = localConnect.Do(request, reply)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ OpenDatastore returns a Server service. Only one datastore can be opened\n\/\/ for any server.\nfunc OpenDatastore(datastoreDir string) (service *Service, err error) {\n\t\/\/ Make sure we don't already have an open datastore.\n\tif runningService.Service != nil {\n\t\terr = fmt.Errorf(\"Cannot create new server. A DVID process can serve only one datastore.\")\n\t\treturn\n\t}\n\n\t\/\/ Get exclusive ownership of a DVID datastore\n\tlog.Println(\"Getting exclusive ownership of datastore at:\", datastoreDir)\n\n\tvar openErr *datastore.OpenError\n\trunningService.Service, openErr = datastore.Open(datastoreDir)\n\tif openErr != nil {\n\t\terr = openErr\n\t\treturn\n\t}\n\trunningService.ErrorLogDir = datastoreDir\n\n\tservice = &runningService\n\treturn\n}\n\n\/\/ Serve opens a datastore then creates both web and rpc servers for the datastore.\n\/\/ This function must be called for DatastoreService() to be non-nil.\nfunc (service *Service) Serve(webAddress, webClientDir, rpcAddress string) error {\n\tlog.Printf(\"Using %d of %d logical CPUs for DVID.\\n\", dvid.NumCPU, runtime.NumCPU())\n\n\t\/\/ Register an error logger that appends to a file in this datastore directory.\n\terrorLog := filepath.Join(service.ErrorLogDir, ErrorLogFilename)\n\tfile, err := os.OpenFile(errorLog, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to open error logging file (%s): %s\\n\", errorLog, err.Error())\n\t}\n\tdvid.SetErrorLoggingFile(file)\n\n\t\/\/ Launch the web server\n\tgo runningService.ServeHttp(webAddress, webClientDir)\n\n\t\/\/ Launch the rpc server\n\terr = runningService.ServeRpc(rpcAddress)\n\tif err != nil {\n\t\tlog.Fatalln(err.Error())\n\t}\n\n\treturn nil\n}\n\n\/\/ Wrapper function so that http handlers recover from panics gracefully\n\/\/ without crashing the entire program. The error message is written to\n\/\/ the log. Paradigm follows recommendation in:\n\/\/ \"Programming in Go: Creating Applications for the 21st Century\".\nfunc logHttpPanics(httpHandler func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {\n\treturn func(writer http.ResponseWriter, request *http.Request) {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tlog.Printf(\"[%v] caught panic: %v\", request.RemoteAddr, err)\n\t\t\t}\n\t\t}()\n\t\thttpHandler(writer, request)\n\t}\n}\n\n\/\/ Listen and serve HTTP requests using address and don't let stay-alive\n\/\/ connections hog goroutines for more than an hour.\n\/\/ See for discussion:\n\/\/ http:\/\/stackoverflow.com\/questions\/10971800\/golang-http-server-leaving-open-goroutines\nfunc (service *Service) ServeHttp(address, clientDir string) {\n\tif address == \"\" {\n\t\taddress = DefaultWebAddress\n\t}\n\tservice.WebAddress = address\n\tservice.WebClientPath = clientDir\n\tfmt.Printf(\"Web server listening at %s ...\\n\", address)\n\n\tsrc := &http.Server{\n\t\tAddr: address,\n\t\tReadTimeout: 1 * time.Hour,\n\t}\n\n\t\/\/ Handle Level 2 REST API.\n\tif GzipAPI {\n\t\tfmt.Println(\"HTTP server will return gzip values if permitted by browser.\")\n\t\thttp.HandleFunc(WebAPIPath, logHttpPanics(makeGzipHandler(apiHandler)))\n\t} else {\n\t\thttp.HandleFunc(WebAPIPath, logHttpPanics(apiHandler))\n\t}\n\n\t\/\/ Handle static files through serving embedded files\n\t\/\/ via nrsc or loading files from a specified web client directory.\n\tif clientDir == \"\" {\n\t\terr := nrsc.Handle(ConsolePath)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"ERROR with nrsc trying to serve web pages:\", err.Error())\n\t\t\tfmt.Println(webClientUnavailableMessage)\n\t\t\tfmt.Println(\"HTTP server will be started without webclient...\\n\")\n\t\t\thttp.HandleFunc(ConsolePath, logHttpPanics(mainHandler))\n\t\t} else {\n\t\t\tfmt.Println(\"Serving web client from embedded files...\")\n\t\t}\n\t} else {\n\t\thttp.HandleFunc(ConsolePath, logHttpPanics(mainHandler))\n\t\tdvid.Log(dvid.Debug, \"Serving web pages from %s\\n\", clientDir)\n\t}\n\n\t\/\/ Manage redirection from \/ to the ConsolePath.\n\thttp.HandleFunc(\"\/\", logHttpPanics(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar urlStr string\n\t\tif r.URL.Path == \"\/\" {\n\t\t\turlStr = ConsolePath + \"index.html\"\n\t\t} else {\n\t\t\turlStr = ConsolePath + strings.TrimLeft(r.URL.Path, \"\/\")\n\t\t}\n\t\tdvid.Fmt(dvid.Debug, \"Redirect %s -> %s\\n\", r.URL.Path, urlStr)\n\t\thttp.Redirect(w, r, urlStr, http.StatusMovedPermanently)\n\t}))\n\n\t\/\/ Serve it up!\n\tsrc.ListenAndServe()\n}\n\n\/\/ Listen and serve RPC requests using address.\nfunc (service *Service) ServeRpc(address string) error {\n\tif address == \"\" {\n\t\taddress = DefaultRPCAddress\n\t}\n\tservice.RPCAddress = address\n\tdvid.Log(dvid.Debug, \"Rpc server listening at %s ...\\n\", address)\n\n\tc := new(RPCConnection)\n\trpc.Register(c)\n\trpc.HandleHTTP()\n\tlistener, err := net.Listen(\"tcp\", address)\n\tif err != nil {\n\t\treturn err\n\t}\n\thttp.Serve(listener, nil)\n\treturn nil\n}\n\n\/\/ Nod to Andrew Gerrand for simple gzip solution:\n\/\/ See https:\/\/groups.google.com\/forum\/m\/?fromgroups#!topic\/golang-nuts\/eVnTcMwNVjM\ntype gzipResponseWriter struct {\n\tio.Writer\n\thttp.ResponseWriter\n}\n\nfunc (w gzipResponseWriter) Write(b []byte) (int, error) {\n\treturn w.Writer.Write(b)\n}\n\nfunc makeGzipHandler(fn http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif !strings.Contains(r.Header.Get(\"Accept-Encoding\"), \"gzip\") {\n\t\t\tfn(w, r)\n\t\t\treturn\n\t\t}\n\t\tdvid.Log(dvid.Debug, \"Responding to request with gzip\\n\")\n\t\tw.Header().Set(\"Content-Encoding\", \"gzip\")\n\t\tgz := gzip.NewWriter(w)\n\t\tdefer gz.Close()\n\t\tfn(gzipResponseWriter{Writer: gz, ResponseWriter: w}, r)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"github.com\/codegangsta\/negroni\"\n\t\"github.com\/jmmcatee\/cracklord\/queue\"\n\t\"github.com\/unrolled\/secure\"\n\t\"github.com\/vaughan0\/go-ini\"\n\t\"net\/http\"\n)\n\nfunc main() {\n\t\/\/ Define the flags\n\tvar confPath = flag.String(\"conf\", \"\", \"Configuration file to use\")\n\tvar runIP = flag.String(\"host\", \"0.0.0.0\", \"IP to bind to\")\n\tvar runPort = flag.String(\"port\", \"443\", \"Port to bind to\")\n\tvar certPath = flag.String(\"cert\", \"\", \"Custom certificate file to use\")\n\tvar keyPath = flag.String(\"key\", \"\", \"Custom key file to use\")\n\n\t\/\/ Parse the flags\n\tflag.Parse()\n\n\t\/\/ Read the configuration file\n\tvar confFile ini.File\n\tvar confErr error\n\tif *confPath == \"\" {\n\t\tconfFile, confErr = ini.LoadFile(\".\/cracklord.ini\")\n\t} else {\n\t\tconfFile, confErr = ini.LoadFile(*confPath)\n\t}\n\n\t\/\/ Build the App Controller\n\tvar server AppController\n\n\t\/\/ Check for errors\n\tif confErr != nil {\n\t\tprintln(\"Error in configuration read:\")\n\t\tprintln(\"\\t\" + confErr.Error())\n\t\treturn\n\t}\n\n\t\/\/ Get the Authentication configuration\n\tconfAuth := confFile.Section(\"Authenication\")\n\tif confAuth == nil {\n\t\tprintln(\"No authentication configuration!\")\n\t\treturn\n\t}\n\n\t\/\/ Check for type of authentication and set conf\n\tswitch confAuth[\"type\"] {\n\tcase \"INI\":\n\t\tvar i INIAuth\n\n\t\t\/\/ Get the users\n\t\tumap := map[string]string{}\n\n\t\tau, ok := confAuth[\"adminuser\"]\n\t\tif !ok {\n\t\t\tprintln(\"Admin user not present...\")\n\t\t\treturn\n\t\t}\n\t\tap := confAuth[\"adminpass\"]\n\t\tif !ok {\n\t\t\tprintln(\"Admin password not present...\")\n\t\t\treturn\n\t\t}\n\n\t\tsu, ok := confAuth[\"standarduser\"]\n\t\tif !ok {\n\t\t\tprintln(\"Standard user not present...\")\n\t\t\treturn\n\t\t}\n\t\tsp := confAuth[\"standarduser\"]\n\t\tif !ok {\n\t\t\tprintln(\"Standard password not present...\")\n\t\t\treturn\n\t\t}\n\n\t\tru, ok := confAuth[\"readonlyuser\"]\n\t\tif !ok {\n\t\t\tprintln(\"ReadOnly user not present...\")\n\t\t\treturn\n\t\t}\n\t\trp := confAuth[\"readonlypass\"]\n\t\tif !ok {\n\t\t\tprintln(\"ReadOnly password not present...\")\n\t\t\treturn\n\t\t}\n\n\t\tumap[au] = ap\n\t\tumap[su] = sp\n\t\tumap[ru] = rp\n\n\t\t\/\/ Setup group mappings\n\t\tgmap := map[string]string{}\n\n\t\tgmap[au] = Administrator\n\t\tgmap[su] = StandardUser\n\t\tgmap[ru] = ReadOnly\n\n\t\ti.Setup(umap, gmap)\n\n\t\tserver.Auth = &i\n\tcase \"ActiveDirectory\":\n\t\tvar ad ADAuth\n\n\t\trealm, ok := confAuth[\"realm\"]\n\t\tif !ok {\n\t\t\tprintln(\"AD Auth chosen and no Realm given...\")\n\t\t\treturn\n\t\t}\n\t\tad.Realm = realm\n\n\t\tgmap := map[string]string{}\n\t\tro, ok := confAuth[\"ReadOnlyGroup\"]\n\t\tif !ok {\n\t\t\tprintln(\"No ReadOnly group provided...\")\n\t\t\treturn\n\t\t}\n\t\tst, ok := confAuth[\"StandardGroup\"]\n\t\tif !ok {\n\t\t\tprintln(\"No Standard group provided...\")\n\t\t\treturn\n\t\t}\n\t\tadmin, ok := confAuth[\"AdminGroup\"]\n\t\tif !ok {\n\t\t\tprintln(\"No Administrator group provided\")\n\t\t\treturn\n\t\t}\n\n\t\tgmap[ReadOnly] = ro\n\t\tgmap[StandardUser] = st\n\t\tgmap[Administrator] = admin\n\n\t\tad.Setup(gmap)\n\n\t\tserver.Auth = &ad\n\t}\n\n\t\/\/ Configure the TokenStore\n\tserver.T = NewTokenStore()\n\n\t\/\/ Configure the Queue\n\tserver.Q = queue.NewQueue()\n\n\t\/\/ Add some nice security stuff\n\tsecureMiddleware := secure.New(secure.Options{\n\t\tSSLRedirect: true,\n\t\tFrameDeny: true,\n\t\tCustomFrameOptionsValue: \"SAMEORIGIN\",\n\t\tBrowserXssFilter: true,\n\t\tIsDevelopment: true,\n\t})\n\n\t\/\/ Build the Negroni handler\n\tn := negroni.Classic()\n\tn.Use(negroni.HandlerFunc(secureMiddleware.HandlerFuncWithNext))\n\tn.UseHandler(server.Router())\n\n\t\/\/ Check for given certs and generate a new one if none exist\n\tvar cFile, kFile string\n\tif *certPath == \"\" || *keyPath == \"\" {\n\t\t\/\/ We need to create certs\n\t\terr := genNewCert(\"\") \/\/ Gen file in local directory\n\n\t\tif err != nil {\n\t\t\tprintln(\"Error generating TLS certs...\")\n\t\t\treturn\n\t\t}\n\n\t\tcFile = \"cert.pem\"\n\t\tkFile = \"cert.key\"\n\t} else {\n\t\tcFile = *certPath\n\t\tkFile = *keyPath\n\t}\n\n\thttp.ListenAndServeTLS(*runIP+\":\"+*runPort, cFile, kFile, n)\n}\n<commit_msg>fixed misspelling again<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/codegangsta\/negroni\"\n\t\"github.com\/jmmcatee\/cracklord\/queue\"\n\t\"github.com\/unrolled\/secure\"\n\t\"github.com\/vaughan0\/go-ini\"\n\t\"net\/http\"\n)\n\nfunc main() {\n\t\/\/ Define the flags\n\tvar confPath = flag.String(\"conf\", \"\", \"Configuration file to use\")\n\tvar runIP = flag.String(\"host\", \"0.0.0.0\", \"IP to bind to\")\n\tvar runPort = flag.String(\"port\", \"443\", \"Port to bind to\")\n\tvar certPath = flag.String(\"cert\", \"\", \"Custom certificate file to use\")\n\tvar keyPath = flag.String(\"key\", \"\", \"Custom key file to use\")\n\n\t\/\/ Parse the flags\n\tflag.Parse()\n\n\t\/\/ Read the configuration file\n\tvar confFile ini.File\n\tvar confErr error\n\tif *confPath == \"\" {\n\t\tconfFile, confErr = ini.LoadFile(\".\/cracklord.ini\")\n\t} else {\n\t\tconfFile, confErr = ini.LoadFile(*confPath)\n\t}\n\n\t\/\/ Build the App Controller\n\tvar server AppController\n\n\t\/\/ Check for errors\n\tif confErr != nil {\n\t\tprintln(\"Error in configuration read:\")\n\t\tprintln(\"\\t\" + confErr.Error())\n\t\treturn\n\t}\n\n\t\/\/ Get the Authentication configuration\n\tconfAuth := confFile.Section(\"Authentication\")\n\tif confAuth == nil {\n\t\tprintln(\"No authentication configuration!\")\n\t\treturn\n\t}\n\n\tfmt.Printf(\"%+v\\n\", confAuth)\n\n\t\/\/ Check for type of authentication and set conf\n\tswitch confAuth[\"type\"] {\n\tcase \"INI\":\n\t\tvar i INIAuth\n\n\t\t\/\/ Get the users\n\t\tumap := map[string]string{}\n\n\t\tau, ok := confAuth[\"adminuser\"]\n\t\tif !ok {\n\t\t\tprintln(\"Admin user not present...\")\n\t\t\treturn\n\t\t}\n\t\tap := confAuth[\"adminpass\"]\n\t\tif !ok {\n\t\t\tprintln(\"Admin password not present...\")\n\t\t\treturn\n\t\t}\n\n\t\tsu, ok := confAuth[\"standarduser\"]\n\t\tif !ok {\n\t\t\tprintln(\"Standard user not present...\")\n\t\t\treturn\n\t\t}\n\t\tsp := confAuth[\"standarduser\"]\n\t\tif !ok {\n\t\t\tprintln(\"Standard password not present...\")\n\t\t\treturn\n\t\t}\n\n\t\tru, ok := confAuth[\"readonlyuser\"]\n\t\tif !ok {\n\t\t\tprintln(\"ReadOnly user not present...\")\n\t\t\treturn\n\t\t}\n\t\trp := confAuth[\"readonlypass\"]\n\t\tif !ok {\n\t\t\tprintln(\"ReadOnly password not present...\")\n\t\t\treturn\n\t\t}\n\n\t\tumap[au] = ap\n\t\tumap[su] = sp\n\t\tumap[ru] = rp\n\n\t\t\/\/ Setup group mappings\n\t\tgmap := map[string]string{}\n\n\t\tgmap[au] = Administrator\n\t\tgmap[su] = StandardUser\n\t\tgmap[ru] = ReadOnly\n\n\t\ti.Setup(umap, gmap)\n\n\t\tserver.Auth = &i\n\tcase \"ActiveDirectory\":\n\t\tvar ad ADAuth\n\n\t\trealm, ok := confAuth[\"realm\"]\n\t\tif !ok {\n\t\t\tprintln(\"AD Auth chosen and no Realm given...\")\n\t\t\treturn\n\t\t}\n\t\tad.Realm = realm\n\n\t\tgmap := map[string]string{}\n\t\tro, ok := confAuth[\"ReadOnlyGroup\"]\n\t\tif !ok {\n\t\t\tprintln(\"No ReadOnly group provided...\")\n\t\t\treturn\n\t\t}\n\t\tst, ok := confAuth[\"StandardGroup\"]\n\t\tif !ok {\n\t\t\tprintln(\"No Standard group provided...\")\n\t\t\treturn\n\t\t}\n\t\tadmin, ok := confAuth[\"AdminGroup\"]\n\t\tif !ok {\n\t\t\tprintln(\"No Administrator group provided\")\n\t\t\treturn\n\t\t}\n\n\t\tgmap[ReadOnly] = ro\n\t\tgmap[StandardUser] = st\n\t\tgmap[Administrator] = admin\n\n\t\tad.Setup(gmap)\n\n\t\tserver.Auth = &ad\n\t}\n\n\t\/\/ Configure the TokenStore\n\tserver.T = NewTokenStore()\n\n\t\/\/ Configure the Queue\n\tserver.Q = queue.NewQueue()\n\n\t\/\/ Add some nice security stuff\n\tsecureMiddleware := secure.New(secure.Options{\n\t\tSSLRedirect: true,\n\t\tFrameDeny: true,\n\t\tCustomFrameOptionsValue: \"SAMEORIGIN\",\n\t\tBrowserXssFilter: true,\n\t\tIsDevelopment: true,\n\t})\n\n\t\/\/ Build the Negroni handler\n\tn := negroni.Classic()\n\tn.Use(negroni.HandlerFunc(secureMiddleware.HandlerFuncWithNext))\n\tn.UseHandler(server.Router())\n\n\t\/\/ Check for given certs and generate a new one if none exist\n\tvar cFile, kFile string\n\tif *certPath == \"\" || *keyPath == \"\" {\n\t\t\/\/ We need to create certs\n\t\terr := genNewCert(\"\") \/\/ Gen file in local directory\n\n\t\tif err != nil {\n\t\t\tprintln(\"Error generating TLS certs...\")\n\t\t\treturn\n\t\t}\n\n\t\tcFile = \"cert.pem\"\n\t\tkFile = \"cert.key\"\n\t} else {\n\t\tcFile = *certPath\n\t\tkFile = *keyPath\n\t}\n\n\thttp.ListenAndServeTLS(*runIP+\":\"+*runPort, cFile, kFile, n)\n}\n<|endoftext|>"} {"text":"<commit_before>package handler\n\n\/\/ Contains common methods used for writing appengine apps.\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"net\/http\"\n\t\"net\/url\"\n\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/appengine\"\n\t\"google.golang.org\/appengine\/log\"\n)\n\ntype handlerError struct {\n\tAppVersion string `json:\"appVersion\"`\n\tURL *url.URL `json:\"url\"`\n\tMethod string `json:\"method\"`\n\tStatusCode int `json:\"statusCode\"`\n\tInstanceID string `json:\"instanceId\"`\n\tVersionID string `json:\"versionId\"`\n\tRequestID string `json:\"requestId\"`\n\tModuleName string `json:\"moduleName\"`\n\tErr string `json:\"message\"`\n}\n\nfunc (e *handlerError) Error() string {\n\tb, err := json.MarshalIndent(e, \"\", \" \")\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\treturn string(b)\n}\n\n\/\/ Base struct designed to be extended by more specific url handlers\ntype Base struct {\n\tCtx context.Context\n\tReq *http.Request\n\tRes http.ResponseWriter\n\n\tconfig Config\n\ttemplates map[string]*template.Template\n}\n\n\/\/ Config contains the custom handler configuration settings\ntype Config struct {\n\tLayoutPath string\n\tViewPath string\n\tParentLayoutName string\n}\n\nvar defaultConfig = Config{\n\tLayoutPath: \"layouts\/application.html\",\n\tViewPath: \"views\",\n\tParentLayoutName: \"layout\",\n}\n\n\/\/ New allows one to override the default configuration settings.\n\/\/ func NewRootHandler() rootHandler {\n\/\/ \treturn rootHandler{Base: handler.New(&handler.Config{\n\/\/ \t\tLayoutPath: \"layouts\/admin.html\",\n\/\/ \t})}\n\/\/ }\nfunc New(c *Config) Base {\n\tif c == nil {\n\t\tc = &defaultConfig\n\t}\n\tb := Base{config: *c} \/\/ copy the passed in pointer\n\tb.templates = make(map[string]*template.Template)\n\treturn b\n}\n\n\/\/ Default uses the default config settings\n\/\/ func NewRootHandler() rootHandler {\n\/\/ \treturn rootHandler{Base: handler.Default()}\n\/\/ }\nfunc Default() Base {\n\treturn New(nil)\n}\n\n\/\/ OriginMiddleware returns a middleware function that validates the origin\n\/\/ header within the request matches the allowed values\nfunc OriginMiddleware(allowed []string) func(context.Context, http.ResponseWriter, *http.Request) context.Context {\n\treturn func(c context.Context, w http.ResponseWriter, r *http.Request) context.Context {\n\t\torigin := r.Header.Get(\"Origin\")\n\t\tif len(origin) == 0 {\n\t\t\treturn c\n\t\t}\n\t\tok := validateOrigin(origin, allowed)\n\t\tif !ok {\n\t\t\tc2, cancel := context.WithCancel(c)\n\t\t\tcancel()\n\t\t\treturn c2\n\t\t}\n\n\t\tw.Header().Add(\"Access-Control-Allow-Headers\", \"Content-Type, Authorization\")\n\t\tw.Header().Add(\"Access-Control-Allow-Methods\", \"GET, POST, PUT, DELETE, PATCH, OPTIONS\")\n\t\tw.Header().Add(\"Access-Control-Allow-Origin\", origin)\n\n\t\treturn c\n\t}\n}\n\n\/\/ ValidateOrigin is a helper method called within the ServeHTTP method on\n\/\/ OPTION requests to validate the allowed origins\nfunc (b *Base) ValidateOrigin(allowed []string) {\n\torigin := b.Req.Header.Get(\"Origin\")\n\tok := validateOrigin(origin, allowed)\n\tif !ok {\n\t\t_, cancel := context.WithCancel(b.Ctx)\n\t\tcancel()\n\t}\n}\n\nfunc validateOrigin(origin string, allowed []string) bool {\n\tif allowed == nil || len(allowed) == 0 {\n\t\treturn true\n\t}\n\tif len(origin) == 0 {\n\t\treturn false\n\t}\n\tfor _, allowedOrigin := range allowed {\n\t\tif origin == allowedOrigin {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ ToJSON encodes an interface into the response writer with a default http\n\/\/ status code of 200\nfunc (b *Base) ToJSON(data interface{}) {\n\tb.Res.Header().Add(\"Content-Type\", \"application\/json\")\n\terr := json.NewEncoder(b.Res).Encode(data)\n\tif err != nil {\n\t\tb.Abort(http.StatusInternalServerError, fmt.Errorf(\"Decoding JSON: %v\", err))\n\t}\n}\n\n\/\/ ToJSONWithStatus json encodes an interface into the response writer with a\n\/\/ custom http status code\nfunc (b *Base) ToJSONWithStatus(data interface{}, status int) {\n\tb.Res.Header().Add(\"Content-Type\", \"application\/json\")\n\tb.Res.WriteHeader(status)\n\tb.ToJSON(data)\n}\n\n\/\/ SendStatus writes the passed in status to the response without any data\nfunc (b *Base) SendStatus(status int) {\n\tb.Res.WriteHeader(status)\n}\n\n\/\/ Bind must be called at the beginning of every request to set the required references\nfunc (b *Base) Bind(c context.Context, w http.ResponseWriter, r *http.Request) {\n\tb.Ctx, b.Res, b.Req = c, w, r\n}\n\n\/\/ Header gets the request header value\nfunc (b *Base) Header(name string) string {\n\treturn b.Req.Header.Get(name)\n}\n\n\/\/ SetHeader sets a response header value\nfunc (b *Base) SetHeader(name, value string) {\n\tb.Res.Header().Set(name, value)\n}\n\n\/\/ Abort is called when pre-maturally exiting from a handler function due to an\n\/\/ error. A detailed error is delivered to the client and logged to provide the\n\/\/ details required to identify the issue.\nfunc (b *Base) Abort(statusCode int, err error) {\n\tc, cancel := context.WithCancel(b.Ctx)\n\tdefer cancel()\n\n\t\/\/ testapp is the name given to all apps when being tested\n\tvar isTest = appengine.AppID(c) == \"testapp\"\n\n\thErr := &handlerError{\n\t\tURL: b.Req.URL,\n\t\tMethod: b.Req.Method,\n\t\tStatusCode: statusCode,\n\t\tAppVersion: appengine.AppID(c),\n\t\tRequestID: appengine.RequestID(c),\n\t}\n\tif err != nil {\n\t\thErr.Err = err.Error()\n\t}\n\n\tif !isTest {\n\t\thErr.InstanceID = appengine.InstanceID()\n\t\thErr.VersionID = appengine.VersionID(c)\n\t\thErr.ModuleName = appengine.ModuleName(c)\n\t}\n\n\t\/\/ log method to appengine log\n\tlog.Errorf(c, hErr.Error())\n\n\tb.Res.WriteHeader(statusCode)\n\tif strings.Index(b.Req.Header.Get(\"Accept\"), \"application\/json\") >= 0 {\n\t\tjson.NewEncoder(b.Res).Encode(hErr)\n\t}\n}\n\n\/\/ Redirect is a simple wrapper around the core http method\nfunc (b *Base) Redirect(url string, perm bool) {\n\tstatus := 302\n\tif perm {\n\t\tstatus = http.StatusMovedPermanently\n\t}\n\thttp.Redirect(b.Res, b.Req, url, status)\n}\n\n\/\/ Render pre-caches and renders template.\nfunc (b *Base) Render(template string, data interface{}, fns template.FuncMap) {\n\ttmpl := b.loadTemplate(template, fns)\n\ttmpl.ExecuteTemplate(b.Res, b.config.ParentLayoutName, data)\n}\n\nfunc (b *Base) loadTemplate(name string, fns template.FuncMap) *template.Template {\n\tif b.templates[name] != nil {\n\t\treturn b.templates[name]\n\t}\n\n\tview := fmt.Sprintf(\"%s\/%s.html\", b.config.ViewPath, name)\n\tt := template.New(name)\n\tif fns != nil {\n\t\tt.Funcs(fns)\n\t}\n\ttemplate, err := t.ParseFiles(b.config.LayoutPath, view)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to load template: %s => %v\", view, err))\n\t}\n\n\tb.templates[name] = template\n\treturn template\n}\n<commit_msg>allow easy setting of response cache headers<commit_after>package handler\n\n\/\/ Contains common methods used for writing appengine apps.\n\nimport (\n\t\"crypto\/md5\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/appengine\"\n\t\"google.golang.org\/appengine\/log\"\n)\n\ntype handlerError struct {\n\tAppVersion string `json:\"appVersion\"`\n\tURL *url.URL `json:\"url\"`\n\tMethod string `json:\"method\"`\n\tStatusCode int `json:\"statusCode\"`\n\tInstanceID string `json:\"instanceId\"`\n\tVersionID string `json:\"versionId\"`\n\tRequestID string `json:\"requestId\"`\n\tModuleName string `json:\"moduleName\"`\n\tErr string `json:\"message\"`\n}\n\nfunc (e *handlerError) Error() string {\n\tb, err := json.MarshalIndent(e, \"\", \" \")\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\treturn string(b)\n}\n\n\/\/ Base struct designed to be extended by more specific url handlers\ntype Base struct {\n\tCtx context.Context\n\tReq *http.Request\n\tRes http.ResponseWriter\n\n\tconfig Config\n\ttemplates map[string]*template.Template\n}\n\n\/\/ Config contains the custom handler configuration settings\ntype Config struct {\n\tLayoutPath string\n\tViewPath string\n\tParentLayoutName string\n}\n\nvar defaultConfig = Config{\n\tLayoutPath: \"layouts\/application.html\",\n\tViewPath: \"views\",\n\tParentLayoutName: \"layout\",\n}\n\n\/\/ New allows one to override the default configuration settings.\n\/\/ func NewRootHandler() rootHandler {\n\/\/ \treturn rootHandler{Base: handler.New(&handler.Config{\n\/\/ \t\tLayoutPath: \"layouts\/admin.html\",\n\/\/ \t})}\n\/\/ }\nfunc New(c *Config) Base {\n\tif c == nil {\n\t\tc = &defaultConfig\n\t}\n\tb := Base{config: *c} \/\/ copy the passed in pointer\n\tb.templates = make(map[string]*template.Template)\n\treturn b\n}\n\n\/\/ Default uses the default config settings\n\/\/ func NewRootHandler() rootHandler {\n\/\/ \treturn rootHandler{Base: handler.Default()}\n\/\/ }\nfunc Default() Base {\n\treturn New(nil)\n}\n\n\/\/ OriginMiddleware returns a middleware function that validates the origin\n\/\/ header within the request matches the allowed values\nfunc OriginMiddleware(allowed []string) func(context.Context, http.ResponseWriter, *http.Request) context.Context {\n\treturn func(c context.Context, w http.ResponseWriter, r *http.Request) context.Context {\n\t\torigin := r.Header.Get(\"Origin\")\n\t\tif len(origin) == 0 {\n\t\t\treturn c\n\t\t}\n\t\tok := validateOrigin(origin, allowed)\n\t\tif !ok {\n\t\t\tc2, cancel := context.WithCancel(c)\n\t\t\tcancel()\n\t\t\treturn c2\n\t\t}\n\n\t\tw.Header().Add(\"Access-Control-Allow-Headers\", \"Content-Type, Authorization\")\n\t\tw.Header().Add(\"Access-Control-Allow-Methods\", \"GET, POST, PUT, DELETE, PATCH, OPTIONS\")\n\t\tw.Header().Add(\"Access-Control-Allow-Origin\", origin)\n\n\t\treturn c\n\t}\n}\n\n\/\/ ValidateOrigin is a helper method called within the ServeHTTP method on\n\/\/ OPTION requests to validate the allowed origins\nfunc (b *Base) ValidateOrigin(allowed []string) {\n\torigin := b.Req.Header.Get(\"Origin\")\n\tok := validateOrigin(origin, allowed)\n\tif !ok {\n\t\t_, cancel := context.WithCancel(b.Ctx)\n\t\tcancel()\n\t}\n}\n\nfunc validateOrigin(origin string, allowed []string) bool {\n\tif allowed == nil || len(allowed) == 0 {\n\t\treturn true\n\t}\n\tif len(origin) == 0 {\n\t\treturn false\n\t}\n\tfor _, allowedOrigin := range allowed {\n\t\tif origin == allowedOrigin {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ ToJSON encodes an interface into the response writer with a default http\n\/\/ status code of 200\nfunc (b *Base) ToJSON(data interface{}) {\n\tb.Res.Header().Add(\"Content-Type\", \"application\/json\")\n\terr := json.NewEncoder(b.Res).Encode(data)\n\tif err != nil {\n\t\tb.Abort(http.StatusInternalServerError, fmt.Errorf(\"Decoding JSON: %v\", err))\n\t}\n}\n\n\/\/ ToJSONWithStatus json encodes an interface into the response writer with a\n\/\/ custom http status code\nfunc (b *Base) ToJSONWithStatus(data interface{}, status int) {\n\tb.Res.Header().Add(\"Content-Type\", \"application\/json\")\n\tb.Res.WriteHeader(status)\n\tb.ToJSON(data)\n}\n\n\/\/ SendStatus writes the passed in status to the response without any data\nfunc (b *Base) SendStatus(status int) {\n\tb.Res.WriteHeader(status)\n}\n\n\/\/ Bind must be called at the beginning of every request to set the required references\nfunc (b *Base) Bind(c context.Context, w http.ResponseWriter, r *http.Request) {\n\tb.Ctx, b.Res, b.Req = c, w, r\n}\n\n\/\/ Header gets the request header value\nfunc (b *Base) Header(name string) string {\n\treturn b.Req.Header.Get(name)\n}\n\n\/\/ SetHeader sets a response header value\nfunc (b *Base) SetHeader(name, value string) {\n\tb.Res.Header().Set(name, value)\n}\n\n\/\/ Abort is called when pre-maturally exiting from a handler function due to an\n\/\/ error. A detailed error is delivered to the client and logged to provide the\n\/\/ details required to identify the issue.\nfunc (b *Base) Abort(statusCode int, err error) {\n\tc, cancel := context.WithCancel(b.Ctx)\n\tdefer cancel()\n\n\t\/\/ testapp is the name given to all apps when being tested\n\tvar isTest = appengine.AppID(c) == \"testapp\"\n\n\thErr := &handlerError{\n\t\tURL: b.Req.URL,\n\t\tMethod: b.Req.Method,\n\t\tStatusCode: statusCode,\n\t\tAppVersion: appengine.AppID(c),\n\t\tRequestID: appengine.RequestID(c),\n\t}\n\tif err != nil {\n\t\thErr.Err = err.Error()\n\t}\n\n\tif !isTest {\n\t\thErr.InstanceID = appengine.InstanceID()\n\t\thErr.VersionID = appengine.VersionID(c)\n\t\thErr.ModuleName = appengine.ModuleName(c)\n\t}\n\n\t\/\/ log method to appengine log\n\tlog.Errorf(c, hErr.Error())\n\n\tb.Res.WriteHeader(statusCode)\n\tif strings.Index(b.Req.Header.Get(\"Accept\"), \"application\/json\") >= 0 {\n\t\tjson.NewEncoder(b.Res).Encode(hErr)\n\t}\n}\n\n\/\/ Redirect is a simple wrapper around the core http method\nfunc (b *Base) Redirect(url string, perm bool) {\n\tstatus := 302\n\tif perm {\n\t\tstatus = http.StatusMovedPermanently\n\t}\n\thttp.Redirect(b.Res, b.Req, url, status)\n}\n\n\/\/ Render pre-caches and renders template.\nfunc (b *Base) Render(template string, data interface{}, fns template.FuncMap) {\n\ttmpl := b.loadTemplate(template, fns)\n\ttmpl.ExecuteTemplate(b.Res, b.config.ParentLayoutName, data)\n}\n\n\/\/ SetLastModified sets the Last-Modified header in the RFC1123 time format\nfunc (b *Base) SetLastModified(t time.Time) {\n\tb.Res.Header().Set(\"Last-Modified\", t.Format(time.RFC1123))\n}\n\n\/\/ SetETag sets the etag with the md5 value\nfunc (b *Base) SetETag(val interface{}) {\n\tvar str string\n\tswitch val.(type) {\n\tcase string:\n\t\tstr = val.(string)\n\tcase time.Time:\n\t\tstr = val.(time.Time).Format(time.RFC1123)\n\tcase fmt.Stringer:\n\t\tstr = val.(fmt.Stringer).String()\n\tdefault:\n\t\tstr = fmt.Sprintf(\"%v\", val)\n\t}\n\n\th := md5.New()\n\tio.WriteString(h, str)\n\tetag := base64.StdEncoding.EncodeToString(h.Sum(nil))\n\tb.Res.Header().Set(\"ETag\", etag)\n}\n\nfunc (b *Base) SetExpires(t time.Time) {\n\tb.Res.Header().Set(\"Expires\", t.Format(time.RFC1123))\n}\n\nfunc (b *Base) SetExpiresIn(d time.Duration) {\n\tb.Res.Header().Set(\"Expires\", time.Now().Add(d).Format(time.RFC1123))\n}\n\nfunc (b *Base) loadTemplate(name string, fns template.FuncMap) *template.Template {\n\tif b.templates[name] != nil {\n\t\treturn b.templates[name]\n\t}\n\n\tview := fmt.Sprintf(\"%s\/%s.html\", b.config.ViewPath, name)\n\tt := template.New(name)\n\tif fns != nil {\n\t\tt.Funcs(fns)\n\t}\n\ttemplate, err := t.ParseFiles(b.config.LayoutPath, view)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to load template: %s => %v\", view, err))\n\t}\n\n\tb.templates[name] = template\n\treturn template\n}\n<|endoftext|>"} {"text":"<commit_before>package handlers\n\nimport (\n\t\"fmt\"\n\t\"html\/template\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/gorilla\/context\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/gorilla\/sessions\"\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/resourced\/resourced-master\/dal\"\n\t\"github.com\/resourced\/resourced-master\/libhttp\"\n)\n\nfunc GetGraphs(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text\/html\")\n\n\tcookieStore := context.Get(r, \"cookieStore\").(*sessions.CookieStore)\n\tsession, _ := cookieStore.Get(r, \"resourcedmaster-session\")\n\n\tcurrentUserRow, ok := session.Values[\"user\"].(*dal.UserRow)\n\tif !ok {\n\t\thttp.Redirect(w, r, \"\/logout\", 301)\n\t\treturn\n\t}\n\n\tcurrentClusterInterface := session.Values[\"currentCluster\"]\n\tif currentClusterInterface == nil {\n\t\thttp.Redirect(w, r, \"\/\", 301)\n\t\treturn\n\t}\n\n\tcurrentCluster := currentClusterInterface.(*dal.ClusterRow)\n\n\tdb := context.Get(r, \"db.Core\").(*sqlx.DB)\n\n\tgraphs, err := dal.NewGraph(db).AllByClusterID(nil, currentCluster.ID)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\tdata := struct {\n\t\tAddr string\n\t\tCurrentUser *dal.UserRow\n\t\tClusters []*dal.ClusterRow\n\t\tCurrentClusterJson string\n\t\tGraphs []*dal.GraphRow\n\t}{\n\t\tcontext.Get(r, \"addr\").(string),\n\t\tcurrentUserRow,\n\t\tcontext.Get(r, \"clusters\").([]*dal.ClusterRow),\n\t\tstring(context.Get(r, \"currentClusterJson\").([]byte)),\n\t\tgraphs,\n\t}\n\n\ttmpl, err := template.ParseFiles(\"templates\/dashboard.html.tmpl\", \"templates\/graphs\/list.html.tmpl\")\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\ttmpl.Execute(w, data)\n}\n\nfunc PostGraphs(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text\/html\")\n\n\tcookieStore := context.Get(r, \"cookieStore\").(*sessions.CookieStore)\n\tsession, _ := cookieStore.Get(r, \"resourcedmaster-session\")\n\n\tcurrentClusterInterface := session.Values[\"currentCluster\"]\n\tif currentClusterInterface == nil {\n\t\thttp.Redirect(w, r, \"\/\", 301)\n\t\treturn\n\t}\n\n\tcurrentCluster := currentClusterInterface.(*dal.ClusterRow)\n\n\tname := r.FormValue(\"Name\")\n\tdescription := r.FormValue(\"Description\")\n\n\tdb := context.Get(r, \"db.Core\").(*sqlx.DB)\n\n\t_, err := dal.NewGraph(db).Create(nil, currentCluster.ID, name, description)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\thttp.Redirect(w, r, \"\/graphs\", 301)\n}\n\nfunc GetPostPutDeleteGraphsID(w http.ResponseWriter, r *http.Request) {\n\tmethod := r.FormValue(\"_method\")\n\n\tif method == \"\" || method == \"get\" {\n\t\tGetGraphsID(w, r)\n\t} else if method == \"post\" || method == \"put\" {\n\t\tPutGraphsID(w, r)\n\t} else if method == \"delete\" {\n\t\tDeleteGraphsID(w, r)\n\t}\n}\n\nfunc GetGraphsID(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text\/html\")\n\n\tdb := context.Get(r, \"db.Core\").(*sqlx.DB)\n\tcookieStore := context.Get(r, \"cookieStore\").(*sessions.CookieStore)\n\tsession, _ := cookieStore.Get(r, \"resourcedmaster-session\")\n\n\tidString := mux.Vars(r)[\"id\"]\n\tid, err := strconv.ParseInt(idString, 10, 64)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\tcurrentUserRow, ok := session.Values[\"user\"].(*dal.UserRow)\n\tif !ok {\n\t\thttp.Redirect(w, r, \"\/logout\", 301)\n\t\treturn\n\t}\n\n\taccessTokenRow, err := dal.NewAccessToken(db).GetByUserID(nil, currentUserRow.ID)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\tcurrentClusterInterface := session.Values[\"currentCluster\"]\n\tif currentClusterInterface == nil {\n\t\thttp.Redirect(w, r, \"\/\", 301)\n\t\treturn\n\t}\n\n\tcurrentCluster := currentClusterInterface.(*dal.ClusterRow)\n\n\tgraphs, err := dal.NewGraph(db).AllByClusterID(nil, currentCluster.ID)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\tcurrentGraph, err := dal.NewGraph(db).GetById(nil, id)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\tmetrics, err := dal.NewMetric(db).AllByClusterID(nil, currentCluster.ID)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\tdata := struct {\n\t\tAddr string\n\t\tCurrentUser *dal.UserRow\n\t\tAccessToken *dal.AccessTokenRow\n\t\tClusters []*dal.ClusterRow\n\t\tCurrentClusterJson string\n\t\tCurrentGraph *dal.GraphRow\n\t\tGraphs []*dal.GraphRow\n\t\tMetrics []*dal.MetricRow\n\t}{\n\t\tcontext.Get(r, \"addr\").(string),\n\t\tcurrentUserRow,\n\t\taccessTokenRow,\n\t\tcontext.Get(r, \"clusters\").([]*dal.ClusterRow),\n\t\tstring(context.Get(r, \"currentClusterJson\").([]byte)),\n\t\tcurrentGraph,\n\t\tgraphs,\n\t\tmetrics,\n\t}\n\n\ttmpl, err := template.ParseFiles(\"templates\/dashboard.html.tmpl\", \"templates\/graphs\/dashboard.html.tmpl\")\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\ttmpl.Execute(w, data)\n}\n\nfunc PutGraphsID(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tdb := context.Get(r, \"db.Core\").(*sqlx.DB)\n\tcookieStore := context.Get(r, \"cookieStore\").(*sessions.CookieStore)\n\tsession, _ := cookieStore.Get(r, \"resourcedmaster-session\")\n\n\tidString := vars[\"id\"]\n\tid, err := strconv.ParseInt(idString, 10, 64)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\terr = r.ParseForm()\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\tcurrentClusterInterface := session.Values[\"currentCluster\"]\n\tif currentClusterInterface == nil {\n\t\thttp.Redirect(w, r, \"\/\", 301)\n\t\treturn\n\t}\n\n\tcurrentCluster := currentClusterInterface.(*dal.ClusterRow)\n\n\tname := r.FormValue(\"Name\")\n\tdescription := r.FormValue(\"Description\")\n\n\tmetrics := r.Form[\"Metrics\"]\n\n\tdata := make(map[string]interface{})\n\tif name != \"\" {\n\t\tdata[\"name\"] = name\n\t}\n\tif description != \"\" {\n\t\tdata[\"description\"] = description\n\t}\n\n\tmetricsJSONBytes, err := dal.NewGraph(db).BuildMetricsJSONForSave(nil, currentCluster.ID, metrics)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\tdata[\"metrics\"] = metricsJSONBytes\n\n\t_, err = dal.NewGraph(db).UpdateByID(nil, data, id)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\thttp.Redirect(w, r, fmt.Sprintf(\"\/graphs\/%v\", idString), 301)\n}\n\nfunc DeleteGraphsID(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tdb := context.Get(r, \"db.Core\").(*sqlx.DB)\n\n\tidString := vars[\"id\"]\n\tid, err := strconv.ParseInt(idString, 10, 64)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\t_, err = dal.NewGraph(db).DeleteByID(nil, id)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\thttp.Redirect(w, r, \"\/graphs\", 301)\n}\n<commit_msg>Fix accidental missing data when updating graphs<commit_after>package handlers\n\nimport (\n\t\"fmt\"\n\t\"html\/template\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/gorilla\/context\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/gorilla\/sessions\"\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/resourced\/resourced-master\/dal\"\n\t\"github.com\/resourced\/resourced-master\/libhttp\"\n)\n\nfunc GetGraphs(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text\/html\")\n\n\tcookieStore := context.Get(r, \"cookieStore\").(*sessions.CookieStore)\n\tsession, _ := cookieStore.Get(r, \"resourcedmaster-session\")\n\n\tcurrentUserRow, ok := session.Values[\"user\"].(*dal.UserRow)\n\tif !ok {\n\t\thttp.Redirect(w, r, \"\/logout\", 301)\n\t\treturn\n\t}\n\n\tcurrentClusterInterface := session.Values[\"currentCluster\"]\n\tif currentClusterInterface == nil {\n\t\thttp.Redirect(w, r, \"\/\", 301)\n\t\treturn\n\t}\n\n\tcurrentCluster := currentClusterInterface.(*dal.ClusterRow)\n\n\tdb := context.Get(r, \"db.Core\").(*sqlx.DB)\n\n\tgraphs, err := dal.NewGraph(db).AllByClusterID(nil, currentCluster.ID)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\tdata := struct {\n\t\tAddr string\n\t\tCurrentUser *dal.UserRow\n\t\tClusters []*dal.ClusterRow\n\t\tCurrentClusterJson string\n\t\tGraphs []*dal.GraphRow\n\t}{\n\t\tcontext.Get(r, \"addr\").(string),\n\t\tcurrentUserRow,\n\t\tcontext.Get(r, \"clusters\").([]*dal.ClusterRow),\n\t\tstring(context.Get(r, \"currentClusterJson\").([]byte)),\n\t\tgraphs,\n\t}\n\n\ttmpl, err := template.ParseFiles(\"templates\/dashboard.html.tmpl\", \"templates\/graphs\/list.html.tmpl\")\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\ttmpl.Execute(w, data)\n}\n\nfunc PostGraphs(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text\/html\")\n\n\tcookieStore := context.Get(r, \"cookieStore\").(*sessions.CookieStore)\n\tsession, _ := cookieStore.Get(r, \"resourcedmaster-session\")\n\n\tcurrentClusterInterface := session.Values[\"currentCluster\"]\n\tif currentClusterInterface == nil {\n\t\thttp.Redirect(w, r, \"\/\", 301)\n\t\treturn\n\t}\n\n\tcurrentCluster := currentClusterInterface.(*dal.ClusterRow)\n\n\tname := r.FormValue(\"Name\")\n\tdescription := r.FormValue(\"Description\")\n\n\tdb := context.Get(r, \"db.Core\").(*sqlx.DB)\n\n\t_, err := dal.NewGraph(db).Create(nil, currentCluster.ID, name, description)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\thttp.Redirect(w, r, \"\/graphs\", 301)\n}\n\nfunc GetPostPutDeleteGraphsID(w http.ResponseWriter, r *http.Request) {\n\tmethod := r.FormValue(\"_method\")\n\n\tif method == \"\" || method == \"get\" {\n\t\tGetGraphsID(w, r)\n\t} else if method == \"post\" || method == \"put\" {\n\t\tPutGraphsID(w, r)\n\t} else if method == \"delete\" {\n\t\tDeleteGraphsID(w, r)\n\t}\n}\n\nfunc GetGraphsID(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text\/html\")\n\n\tdb := context.Get(r, \"db.Core\").(*sqlx.DB)\n\tcookieStore := context.Get(r, \"cookieStore\").(*sessions.CookieStore)\n\tsession, _ := cookieStore.Get(r, \"resourcedmaster-session\")\n\n\tidString := mux.Vars(r)[\"id\"]\n\tid, err := strconv.ParseInt(idString, 10, 64)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\tcurrentUserRow, ok := session.Values[\"user\"].(*dal.UserRow)\n\tif !ok {\n\t\thttp.Redirect(w, r, \"\/logout\", 301)\n\t\treturn\n\t}\n\n\taccessTokenRow, err := dal.NewAccessToken(db).GetByUserID(nil, currentUserRow.ID)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\tcurrentClusterInterface := session.Values[\"currentCluster\"]\n\tif currentClusterInterface == nil {\n\t\thttp.Redirect(w, r, \"\/\", 301)\n\t\treturn\n\t}\n\n\tcurrentCluster := currentClusterInterface.(*dal.ClusterRow)\n\n\tgraphs, err := dal.NewGraph(db).AllByClusterID(nil, currentCluster.ID)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\tcurrentGraph, err := dal.NewGraph(db).GetById(nil, id)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\tmetrics, err := dal.NewMetric(db).AllByClusterID(nil, currentCluster.ID)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\tdata := struct {\n\t\tAddr string\n\t\tCurrentUser *dal.UserRow\n\t\tAccessToken *dal.AccessTokenRow\n\t\tClusters []*dal.ClusterRow\n\t\tCurrentClusterJson string\n\t\tCurrentGraph *dal.GraphRow\n\t\tGraphs []*dal.GraphRow\n\t\tMetrics []*dal.MetricRow\n\t}{\n\t\tcontext.Get(r, \"addr\").(string),\n\t\tcurrentUserRow,\n\t\taccessTokenRow,\n\t\tcontext.Get(r, \"clusters\").([]*dal.ClusterRow),\n\t\tstring(context.Get(r, \"currentClusterJson\").([]byte)),\n\t\tcurrentGraph,\n\t\tgraphs,\n\t\tmetrics,\n\t}\n\n\ttmpl, err := template.ParseFiles(\"templates\/dashboard.html.tmpl\", \"templates\/graphs\/dashboard.html.tmpl\")\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\ttmpl.Execute(w, data)\n}\n\nfunc PutGraphsID(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tdb := context.Get(r, \"db.Core\").(*sqlx.DB)\n\tcookieStore := context.Get(r, \"cookieStore\").(*sessions.CookieStore)\n\tsession, _ := cookieStore.Get(r, \"resourcedmaster-session\")\n\n\tidString := vars[\"id\"]\n\tid, err := strconv.ParseInt(idString, 10, 64)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\terr = r.ParseForm()\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\tcurrentClusterInterface := session.Values[\"currentCluster\"]\n\tif currentClusterInterface == nil {\n\t\thttp.Redirect(w, r, \"\/\", 301)\n\t\treturn\n\t}\n\n\tcurrentCluster := currentClusterInterface.(*dal.ClusterRow)\n\n\tname := r.FormValue(\"Name\")\n\tdescription := r.FormValue(\"Description\")\n\n\tdata := make(map[string]interface{})\n\tif name != \"\" {\n\t\tdata[\"name\"] = name\n\t}\n\tif description != \"\" {\n\t\tdata[\"description\"] = description\n\t}\n\n\tmetrics := r.Form[\"Metrics\"]\n\n\tif len(metrics) > 0 {\n\t\tmetricsJSONBytes, err := dal.NewGraph(db).BuildMetricsJSONForSave(nil, currentCluster.ID, metrics)\n\t\tif err != nil {\n\t\t\tlibhttp.HandleErrorJson(w, err)\n\t\t\treturn\n\t\t}\n\t\tdata[\"metrics\"] = metricsJSONBytes\n\t}\n\n\t_, err = dal.NewGraph(db).UpdateByID(nil, data, id)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\thttp.Redirect(w, r, fmt.Sprintf(\"\/graphs\/%v\", idString), 301)\n}\n\nfunc DeleteGraphsID(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tdb := context.Get(r, \"db.Core\").(*sqlx.DB)\n\n\tidString := vars[\"id\"]\n\tid, err := strconv.ParseInt(idString, 10, 64)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\t_, err = dal.NewGraph(db).DeleteByID(nil, id)\n\tif err != nil {\n\t\tlibhttp.HandleErrorJson(w, err)\n\t\treturn\n\t}\n\n\thttp.Redirect(w, r, \"\/graphs\", 301)\n}\n<|endoftext|>"} {"text":"<commit_before>package helpers\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\tginkgoconfig \"github.com\/onsi\/ginkgo\/config\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/pivotal-cf-experimental\/cf-test-helpers\/cf\"\n\t\"github.com\/vito\/cmdtest\"\n\t. \"github.com\/vito\/cmdtest\/matchers\"\n)\n\ntype ConfiguredContext struct {\n\tconfig Config\n\n\torganizationName string\n\tspaceName string\n\n\tquotaDefinitionName string\n\n\tregularUserUsername string\n\tregularUserPassword string\n\n\tisPersistent bool\n}\n\ntype quotaDefinition struct {\n\tName string\n\n\tTotalServices string\n\tTotalRoutes string\n\tMemoryLimit string\n\n\tNonBasicServicesAllowed bool\n}\n\nfunc NewContext(config Config) *ConfiguredContext {\n\tnode := ginkgoconfig.GinkgoConfig.ParallelNode\n\ttimeTag := time.Now().Format(\"2006_01_02-15h04m05.999s\")\n\n\treturn &ConfiguredContext{\n\t\tconfig: config,\n\n\t\tquotaDefinitionName: fmt.Sprintf(\"CATS-QUOTA-%d-%s\", node, timeTag),\n\n\t\torganizationName: fmt.Sprintf(\"CATS-ORG-%d-%s\", node, timeTag),\n\t\tspaceName: fmt.Sprintf(\"CATS-SPACE-%d-%s\", node, timeTag),\n\n\t\tregularUserUsername: fmt.Sprintf(\"CATS-USER-%d-%s\", node, timeTag),\n\t\tregularUserPassword: \"meow\",\n\n\t\tisPersistent: false,\n\t}\n}\n\nfunc NewPersistentAppContext(config Config) *ConfiguredContext {\n\tbaseContext := NewContext(config)\n\n\tbaseContext.quotaDefinitionName = config.PersistentAppQuotaName\n\tbaseContext.organizationName = config.PersistentAppOrg\n\tbaseContext.spaceName = config.PersistentAppSpace\n\tbaseContext.isPersistent = true\n\n\treturn baseContext\n}\n\nfunc (context *ConfiguredContext) Setup() {\n\tcf.AsUser(context.AdminUserContext(), func() {\n\t\tdefinition := quotaDefinition{\n\t\t\tName: context.quotaDefinitionName,\n\n\t\t\tTotalServices: \"100\",\n\t\t\tTotalRoutes: \"1000\",\n\t\t\tMemoryLimit: \"10G\",\n\n\t\t\tNonBasicServicesAllowed: true, \/\/TODO:Needs to be added once CLI gets updated\n\t\t}\n\n\t\targs := []string {\n\t\t\t\"create-quota\",\n\t\t\tcontext.quotaDefinitionName,\n\t\t\t\"-m\", definition.MemoryLimit,\n\t\t\t\"-r\", definition.TotalRoutes,\n\t\t\t\"-s\", definition.TotalServices,\n\t\t}\n\t\tif (definition.NonBasicServicesAllowed) {\n\t\t\targs = append(args, \"--allow-paid-service-plans\")\n\t\t}\n\t\tExpect(cf.Cf(args...)).To(Say(\"OK\"))\n\n\t\tExpect(cf.Cf(\"create-user\", context.regularUserUsername, context.regularUserPassword)).To(SayBranches(\n\t\t\tcmdtest.ExpectBranch{\"OK\", func() {}},\n\t\t\tcmdtest.ExpectBranch{\"scim_resource_already_exists\", func() {}},\n\t\t))\n\n\t\tExpect(cf.Cf(\"create-org\", context.organizationName)).To(ExitWith(0))\n\t\tExpect(cf.Cf(\"set-quota\", context.organizationName, definition.Name)).To(ExitWith(0))\n\t})\n}\n\nfunc (context *ConfiguredContext) Teardown() {\n\tcf.AsUser(context.AdminUserContext(), func() {\n\t\tExpect(cf.Cf(\"delete-user\", \"-f\", context.regularUserUsername)).To(Say(\"OK\"))\n\n\t\tif !context.isPersistent {\n\t\t\tExpect(cf.Cf(\"delete-org\", \"-f\", context.organizationName)).To(Say(\"OK\"))\n\n\t\t\tExpect(cf.Cf(\"delete-quota\", \"-f\", context.quotaDefinitionName)).To(Say(\"OK\"))\n\t\t}\n\t})\n}\n\nfunc (context *ConfiguredContext) AdminUserContext() cf.UserContext {\n\treturn cf.NewUserContext(\n\t\tcontext.config.ApiEndpoint,\n\t\tcontext.config.AdminUser,\n\t\tcontext.config.AdminPassword,\n\t\t\"\",\n\t\t\"\",\n\t\tcontext.config.SkipSSLValidation,\n\t)\n}\n\nfunc (context *ConfiguredContext) RegularUserContext() cf.UserContext {\n\treturn cf.NewUserContext(\n\t\tcontext.config.ApiEndpoint,\n\t\tcontext.regularUserUsername,\n\t\tcontext.regularUserPassword,\n\t\tcontext.organizationName,\n\t\tcontext.spaceName,\n\t\tcontext.config.SkipSSLValidation,\n\t)\n}\n<commit_msg>remove outdated comment.<commit_after>package helpers\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\tginkgoconfig \"github.com\/onsi\/ginkgo\/config\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/pivotal-cf-experimental\/cf-test-helpers\/cf\"\n\t\"github.com\/vito\/cmdtest\"\n\t. \"github.com\/vito\/cmdtest\/matchers\"\n)\n\ntype ConfiguredContext struct {\n\tconfig Config\n\n\torganizationName string\n\tspaceName string\n\n\tquotaDefinitionName string\n\n\tregularUserUsername string\n\tregularUserPassword string\n\n\tisPersistent bool\n}\n\ntype quotaDefinition struct {\n\tName string\n\n\tTotalServices string\n\tTotalRoutes string\n\tMemoryLimit string\n\n\tNonBasicServicesAllowed bool\n}\n\nfunc NewContext(config Config) *ConfiguredContext {\n\tnode := ginkgoconfig.GinkgoConfig.ParallelNode\n\ttimeTag := time.Now().Format(\"2006_01_02-15h04m05.999s\")\n\n\treturn &ConfiguredContext{\n\t\tconfig: config,\n\n\t\tquotaDefinitionName: fmt.Sprintf(\"CATS-QUOTA-%d-%s\", node, timeTag),\n\n\t\torganizationName: fmt.Sprintf(\"CATS-ORG-%d-%s\", node, timeTag),\n\t\tspaceName: fmt.Sprintf(\"CATS-SPACE-%d-%s\", node, timeTag),\n\n\t\tregularUserUsername: fmt.Sprintf(\"CATS-USER-%d-%s\", node, timeTag),\n\t\tregularUserPassword: \"meow\",\n\n\t\tisPersistent: false,\n\t}\n}\n\nfunc NewPersistentAppContext(config Config) *ConfiguredContext {\n\tbaseContext := NewContext(config)\n\n\tbaseContext.quotaDefinitionName = config.PersistentAppQuotaName\n\tbaseContext.organizationName = config.PersistentAppOrg\n\tbaseContext.spaceName = config.PersistentAppSpace\n\tbaseContext.isPersistent = true\n\n\treturn baseContext\n}\n\nfunc (context *ConfiguredContext) Setup() {\n\tcf.AsUser(context.AdminUserContext(), func() {\n\t\tdefinition := quotaDefinition{\n\t\t\tName: context.quotaDefinitionName,\n\n\t\t\tTotalServices: \"100\",\n\t\t\tTotalRoutes: \"1000\",\n\t\t\tMemoryLimit: \"10G\",\n\n\t\t\tNonBasicServicesAllowed: true,\n\t\t}\n\n\t\targs := []string {\n\t\t\t\"create-quota\",\n\t\t\tcontext.quotaDefinitionName,\n\t\t\t\"-m\", definition.MemoryLimit,\n\t\t\t\"-r\", definition.TotalRoutes,\n\t\t\t\"-s\", definition.TotalServices,\n\t\t}\n\t\tif (definition.NonBasicServicesAllowed) {\n\t\t\targs = append(args, \"--allow-paid-service-plans\")\n\t\t}\n\n\t\tExpect(cf.Cf(args...)).To(Say(\"OK\"))\n\n\t\tExpect(cf.Cf(\"create-user\", context.regularUserUsername, context.regularUserPassword)).To(SayBranches(\n\t\t\tcmdtest.ExpectBranch{\"OK\", func() {}},\n\t\t\tcmdtest.ExpectBranch{\"scim_resource_already_exists\", func() {}},\n\t\t))\n\n\t\tExpect(cf.Cf(\"create-org\", context.organizationName)).To(ExitWith(0))\n\t\tExpect(cf.Cf(\"set-quota\", context.organizationName, definition.Name)).To(ExitWith(0))\n\t})\n}\n\nfunc (context *ConfiguredContext) Teardown() {\n\tcf.AsUser(context.AdminUserContext(), func() {\n\t\tExpect(cf.Cf(\"delete-user\", \"-f\", context.regularUserUsername)).To(Say(\"OK\"))\n\n\t\tif !context.isPersistent {\n\t\t\tExpect(cf.Cf(\"delete-org\", \"-f\", context.organizationName)).To(Say(\"OK\"))\n\n\t\t\tExpect(cf.Cf(\"delete-quota\", \"-f\", context.quotaDefinitionName)).To(Say(\"OK\"))\n\t\t}\n\t})\n}\n\nfunc (context *ConfiguredContext) AdminUserContext() cf.UserContext {\n\treturn cf.NewUserContext(\n\t\tcontext.config.ApiEndpoint,\n\t\tcontext.config.AdminUser,\n\t\tcontext.config.AdminPassword,\n\t\t\"\",\n\t\t\"\",\n\t\tcontext.config.SkipSSLValidation,\n\t)\n}\n\nfunc (context *ConfiguredContext) RegularUserContext() cf.UserContext {\n\treturn cf.NewUserContext(\n\t\tcontext.config.ApiEndpoint,\n\t\tcontext.regularUserUsername,\n\t\tcontext.regularUserPassword,\n\t\tcontext.organizationName,\n\t\tcontext.spaceName,\n\t\tcontext.config.SkipSSLValidation,\n\t)\n}\n<|endoftext|>"} {"text":"<commit_before>package helpers\n\nimport (\n \"errors\"\n \"github.com\/Seklfreak\/Robyul2\/cache\"\n \"github.com\/bwmarrin\/discordgo\"\n \"time\"\n \"regexp\"\n \"math\/big\"\n \"strings\"\n \"strconv\"\n \"fmt\"\n \"github.com\/Seklfreak\/Robyul2\/logger\"\n)\n\nconst (\n DISCORD_EPOCH int64 = 1420070400000\n)\n\nvar botAdmins = []string{\n \"116620585638821891\", \/\/ Sekl\n}\nvar NukeMods = []string{\n \"116620585638821891\", \/\/ Sekl\n \"134298438559858688\", \/\/ Kakkela\n}\nvar adminRoleNames = []string{\"Admin\", \"Admins\", \"ADMIN\"}\nvar modRoleNames = []string{\"Mod\", \"Mods\", \"Mod Trainee\", \"Moderator\", \"Moderators\", \"MOD\"}\n\nfunc IsNukeMod(id string) bool {\n for _, s := range NukeMods {\n if s == id {\n return true\n }\n }\n\n return false\n}\n\n\/\/ IsBotAdmin checks if $id is in $botAdmins\nfunc IsBotAdmin(id string) bool {\n for _, s := range botAdmins {\n if s == id {\n return true\n }\n }\n\n return false\n}\n\nfunc IsAdmin(msg *discordgo.Message) bool {\n channel, e := cache.GetSession().Channel(msg.ChannelID)\n if e != nil {\n return false\n }\n\n guild, e := cache.GetSession().Guild(channel.GuildID)\n if e != nil {\n return false\n }\n\n if msg.Author.ID == guild.OwnerID || IsBotAdmin(msg.Author.ID) {\n return true\n }\n\n guildMember, e := cache.GetSession().GuildMember(guild.ID, msg.Author.ID)\n \/\/ Check if role may manage server or a role is in admin role list\n for _, role := range guild.Roles {\n for _, userRole := range guildMember.Roles {\n if userRole == role.ID {\n if role.Permissions&discordgo.PermissionAdministrator == discordgo.PermissionAdministrator {\n return true\n }\n for _, adminRoleName := range adminRoleNames {\n if role.Name == adminRoleName {\n return true\n }\n }\n }\n }\n }\n return false\n}\n\nfunc IsMod(msg *discordgo.Message) bool {\n if IsAdmin(msg) == true {\n return true\n } else {\n channel, e := cache.GetSession().Channel(msg.ChannelID)\n if e != nil {\n return false\n }\n guild, e := cache.GetSession().Guild(channel.GuildID)\n if e != nil {\n return false\n }\n guildMember, e := cache.GetSession().GuildMember(guild.ID, msg.Author.ID)\n \/\/ check if a role is in mod role list\n for _, role := range guild.Roles {\n for _, userRole := range guildMember.Roles {\n if userRole == role.ID {\n for _, modRoleName := range modRoleNames {\n if role.Name == modRoleName {\n return true\n }\n }\n }\n }\n }\n }\n return false\n}\n\n\/\/ RequireAdmin only calls $cb if the author is an admin or has MANAGE_SERVER permission\nfunc RequireAdmin(msg *discordgo.Message, cb Callback) {\n if !IsAdmin(msg) {\n cache.GetSession().ChannelMessageSend(msg.ChannelID, GetText(\"admin.no_permission\"))\n return\n }\n\n cb()\n}\n\n\/\/ RequireAdmin only calls $cb if the author is an admin or has MANAGE_SERVER permission\nfunc RequireMod(msg *discordgo.Message, cb Callback) {\n if !IsMod(msg) {\n cache.GetSession().ChannelMessageSend(msg.ChannelID, GetText(\"mod.no_permission\"))\n return\n }\n\n cb()\n}\n\n\/\/ RequireBotAdmin only calls $cb if the author is a bot admin\nfunc RequireBotAdmin(msg *discordgo.Message, cb Callback) {\n if !IsBotAdmin(msg.Author.ID) {\n cache.GetSession().ChannelMessageSend(msg.ChannelID, GetText(\"botadmin.no_permission\"))\n return\n }\n\n cb()\n}\n\nfunc ConfirmEmbed(channelID string, author *discordgo.User, confirmMessageText string, confirmEmojiID string, abortEmojiID string) bool {\n \/\/ send embed asking the user to confirm\n confirmMessage, err := cache.GetSession().ChannelMessageSendEmbedWithMessage(channelID,\n \"<@\"+author.ID+\">\",\n &discordgo.MessageEmbed{\n Title: GetText(\"bot.embeds.please-confirm-title\"),\n Description: confirmMessageText,\n })\n if err != nil {\n cache.GetSession().ChannelMessageSend(channelID, GetTextF(\"bot.errors.general\", err.Error()))\n }\n\n \/\/ delete embed after everything is done\n defer cache.GetSession().ChannelMessageDelete(confirmMessage.ChannelID, confirmMessage.ID)\n\n \/\/ add default reactions to embed\n cache.GetSession().MessageReactionAdd(confirmMessage.ChannelID, confirmMessage.ID, confirmEmojiID)\n cache.GetSession().MessageReactionAdd(confirmMessage.ChannelID, confirmMessage.ID, abortEmojiID)\n\n \/\/ check every second if a reaction has been clicked\n for {\n confirmes, _ := cache.GetSession().MessageReactions(confirmMessage.ChannelID, confirmMessage.ID, confirmEmojiID, 100)\n for _, confirm := range confirmes {\n if confirm.ID == author.ID {\n \/\/ user has confirmed the call\n return true\n }\n }\n aborts, _ := cache.GetSession().MessageReactions(confirmMessage.ChannelID, confirmMessage.ID, abortEmojiID, 100)\n for _, abort := range aborts {\n if abort.ID == author.ID {\n \/\/ User has aborted the call\n return false\n }\n }\n\n time.Sleep(1 * time.Second)\n }\n}\n\nfunc GetMuteRole(guildID string) (*discordgo.Role, error) {\n guild, err := cache.GetSession().Guild(guildID)\n Relax(err)\n var muteRole *discordgo.Role\n settings, err := GuildSettingsGet(guildID)\n for _, role := range guild.Roles {\n Relax(err)\n if role.Name == settings.MutedRoleName {\n muteRole = role\n }\n }\n if muteRole == nil {\n muteRole, err = cache.GetSession().GuildRoleCreate(guildID)\n if err != nil {\n return muteRole, err\n }\n muteRole, err = cache.GetSession().GuildRoleEdit(guildID, muteRole.ID, settings.MutedRoleName, muteRole.Color, muteRole.Hoist, 0, muteRole.Mentionable)\n if err != nil {\n return muteRole, err\n }\n for _, channel := range guild.Channels {\n err = cache.GetSession().ChannelPermissionSet(channel.ID, muteRole.ID, \"role\", 0, discordgo.PermissionSendMessages)\n if err != nil {\n logger.ERROR.L(\"discord\", \"Error disabling send messages on mute Role: \" + err.Error())\n }\n \/\/ TODO: update discordgo\n \/\/err = cache.GetSession().ChannelPermissionSet(channel.ID, muteRole.ID, \"role\", 0, discordgo.PermissionAddReactions)\n \/\/if err != nil {\n \/\/ logger.ERROR.L(\"discord\", \"Error disabling add reactions on mute Role: \" + err.Error())\n \/\/}\n }\n }\n return muteRole, nil\n}\n\nfunc GetChannelFromMention(mention string) (*discordgo.Channel, error) {\n var targetChannel *discordgo.Channel\n re := regexp.MustCompile(\"(<#)?(\\\\d+)(>)?\")\n result := re.FindStringSubmatch(mention)\n if len(result) == 4 {\n targetChannel, err := cache.GetSession().Channel(result[2])\n return targetChannel, err\n } else {\n return targetChannel, errors.New(\"Channel not found.\")\n }\n}\n\nfunc GetUserFromMention(mention string) (*discordgo.User, error) {\n var targetUser *discordgo.User\n re := regexp.MustCompile(\"(<@)?(\\\\d+)(>)?\")\n result := re.FindStringSubmatch(mention)\n if len(result) == 4 {\n targetUser, err := cache.GetSession().User(result[2])\n return targetUser, err\n } else {\n return targetUser, errors.New(\"User not found.\")\n }\n}\n\nfunc GetDiscordColorFromHex(hex string) int {\n colorInt, ok := new(big.Int).SetString(strings.Replace(hex, \"#\", \"\", 1), 16)\n if ok == true {\n return int(colorInt.Int64())\n } else {\n return 0x0FADED\n }\n}\n\nfunc GetTimeFromSnowflake(id string) time.Time {\n iid, err := strconv.ParseInt(id, 10, 64)\n Relax(err)\n\n return time.Unix(((iid>>22)+DISCORD_EPOCH)\/1000, 0).UTC()\n}\n\nfunc GetAllPermissions(guild *discordgo.Guild, member *discordgo.Member) int64 {\n var perms int64 = 0\n for _, x := range guild.Roles {\n if x.Name == \"@everyone\" {\n perms |= int64(x.Permissions)\n }\n }\n for _, r := range member.Roles {\n for _, x := range guild.Roles {\n if x.ID == r {\n perms |= int64(x.Permissions)\n }\n }\n }\n return perms\n}\nfunc Pagify(text string, delimiter string) []string {\n result := make([]string, 0)\n textParts := strings.Split(text, delimiter)\n currentOutputPart := \"\"\n for _, textPart := range textParts {\n if len(currentOutputPart)+len(textPart)+len(delimiter) <= 1992 {\n if len(result) > 0 {\n currentOutputPart += delimiter + textPart\n } else {\n currentOutputPart += textPart\n }\n } else {\n result = append(result, currentOutputPart)\n currentOutputPart = \"\"\n if len(textPart) <= 1992 { \/\/ @TODO: else: split text somehow\n currentOutputPart = textPart\n }\n }\n }\n if currentOutputPart != \"\" {\n result = append(result, currentOutputPart)\n }\n return result\n}\n\nfunc GetAvatarUrl(user *discordgo.User) string {\n return GetAvatarUrlWithSize(user, 1024)\n}\n\nfunc GetAvatarUrlWithSize(user *discordgo.User, size uint16) string {\n if user.Avatar == \"\" {\n return \"\"\n }\n\n avatarUrl := \"https:\/\/cdn.discordapp.com\/avatars\/%s\/%s.%s?size=%d\"\n\n if strings.HasPrefix(user.Avatar, \"a_\") {\n return fmt.Sprintf(avatarUrl, user.ID, user.Avatar, \"gif\", size)\n }\n\n return fmt.Sprintf(avatarUrl, user.ID, user.Avatar, \"jpg\", size)\n}\n\nfunc CommandExists(name string) bool {\n for _, command := range cache.GetPluginList() {\n if command == strings.ToLower(name) {\n return true\n }\n }\n for _, command := range cache.GetPluginExtendedList() {\n if command == strings.ToLower(name) {\n return true\n }\n }\n for _, command := range cache.GetTriggerPluginList() {\n if command == strings.ToLower(name) {\n return true\n }\n }\n return false\n}\n<commit_msg>[bias] more splitting fixing<commit_after>package helpers\n\nimport (\n \"errors\"\n \"github.com\/Seklfreak\/Robyul2\/cache\"\n \"github.com\/bwmarrin\/discordgo\"\n \"time\"\n \"regexp\"\n \"math\/big\"\n \"strings\"\n \"strconv\"\n \"fmt\"\n \"github.com\/Seklfreak\/Robyul2\/logger\"\n)\n\nconst (\n DISCORD_EPOCH int64 = 1420070400000\n)\n\nvar botAdmins = []string{\n \"116620585638821891\", \/\/ Sekl\n}\nvar NukeMods = []string{\n \"116620585638821891\", \/\/ Sekl\n \"134298438559858688\", \/\/ Kakkela\n}\nvar adminRoleNames = []string{\"Admin\", \"Admins\", \"ADMIN\"}\nvar modRoleNames = []string{\"Mod\", \"Mods\", \"Mod Trainee\", \"Moderator\", \"Moderators\", \"MOD\"}\n\nfunc IsNukeMod(id string) bool {\n for _, s := range NukeMods {\n if s == id {\n return true\n }\n }\n\n return false\n}\n\n\/\/ IsBotAdmin checks if $id is in $botAdmins\nfunc IsBotAdmin(id string) bool {\n for _, s := range botAdmins {\n if s == id {\n return true\n }\n }\n\n return false\n}\n\nfunc IsAdmin(msg *discordgo.Message) bool {\n channel, e := cache.GetSession().Channel(msg.ChannelID)\n if e != nil {\n return false\n }\n\n guild, e := cache.GetSession().Guild(channel.GuildID)\n if e != nil {\n return false\n }\n\n if msg.Author.ID == guild.OwnerID || IsBotAdmin(msg.Author.ID) {\n return true\n }\n\n guildMember, e := cache.GetSession().GuildMember(guild.ID, msg.Author.ID)\n \/\/ Check if role may manage server or a role is in admin role list\n for _, role := range guild.Roles {\n for _, userRole := range guildMember.Roles {\n if userRole == role.ID {\n if role.Permissions&discordgo.PermissionAdministrator == discordgo.PermissionAdministrator {\n return true\n }\n for _, adminRoleName := range adminRoleNames {\n if role.Name == adminRoleName {\n return true\n }\n }\n }\n }\n }\n return false\n}\n\nfunc IsMod(msg *discordgo.Message) bool {\n if IsAdmin(msg) == true {\n return true\n } else {\n channel, e := cache.GetSession().Channel(msg.ChannelID)\n if e != nil {\n return false\n }\n guild, e := cache.GetSession().Guild(channel.GuildID)\n if e != nil {\n return false\n }\n guildMember, e := cache.GetSession().GuildMember(guild.ID, msg.Author.ID)\n \/\/ check if a role is in mod role list\n for _, role := range guild.Roles {\n for _, userRole := range guildMember.Roles {\n if userRole == role.ID {\n for _, modRoleName := range modRoleNames {\n if role.Name == modRoleName {\n return true\n }\n }\n }\n }\n }\n }\n return false\n}\n\n\/\/ RequireAdmin only calls $cb if the author is an admin or has MANAGE_SERVER permission\nfunc RequireAdmin(msg *discordgo.Message, cb Callback) {\n if !IsAdmin(msg) {\n cache.GetSession().ChannelMessageSend(msg.ChannelID, GetText(\"admin.no_permission\"))\n return\n }\n\n cb()\n}\n\n\/\/ RequireAdmin only calls $cb if the author is an admin or has MANAGE_SERVER permission\nfunc RequireMod(msg *discordgo.Message, cb Callback) {\n if !IsMod(msg) {\n cache.GetSession().ChannelMessageSend(msg.ChannelID, GetText(\"mod.no_permission\"))\n return\n }\n\n cb()\n}\n\n\/\/ RequireBotAdmin only calls $cb if the author is a bot admin\nfunc RequireBotAdmin(msg *discordgo.Message, cb Callback) {\n if !IsBotAdmin(msg.Author.ID) {\n cache.GetSession().ChannelMessageSend(msg.ChannelID, GetText(\"botadmin.no_permission\"))\n return\n }\n\n cb()\n}\n\nfunc ConfirmEmbed(channelID string, author *discordgo.User, confirmMessageText string, confirmEmojiID string, abortEmojiID string) bool {\n \/\/ send embed asking the user to confirm\n confirmMessage, err := cache.GetSession().ChannelMessageSendEmbedWithMessage(channelID,\n \"<@\"+author.ID+\">\",\n &discordgo.MessageEmbed{\n Title: GetText(\"bot.embeds.please-confirm-title\"),\n Description: confirmMessageText,\n })\n if err != nil {\n cache.GetSession().ChannelMessageSend(channelID, GetTextF(\"bot.errors.general\", err.Error()))\n }\n\n \/\/ delete embed after everything is done\n defer cache.GetSession().ChannelMessageDelete(confirmMessage.ChannelID, confirmMessage.ID)\n\n \/\/ add default reactions to embed\n cache.GetSession().MessageReactionAdd(confirmMessage.ChannelID, confirmMessage.ID, confirmEmojiID)\n cache.GetSession().MessageReactionAdd(confirmMessage.ChannelID, confirmMessage.ID, abortEmojiID)\n\n \/\/ check every second if a reaction has been clicked\n for {\n confirmes, _ := cache.GetSession().MessageReactions(confirmMessage.ChannelID, confirmMessage.ID, confirmEmojiID, 100)\n for _, confirm := range confirmes {\n if confirm.ID == author.ID {\n \/\/ user has confirmed the call\n return true\n }\n }\n aborts, _ := cache.GetSession().MessageReactions(confirmMessage.ChannelID, confirmMessage.ID, abortEmojiID, 100)\n for _, abort := range aborts {\n if abort.ID == author.ID {\n \/\/ User has aborted the call\n return false\n }\n }\n\n time.Sleep(1 * time.Second)\n }\n}\n\nfunc GetMuteRole(guildID string) (*discordgo.Role, error) {\n guild, err := cache.GetSession().Guild(guildID)\n Relax(err)\n var muteRole *discordgo.Role\n settings, err := GuildSettingsGet(guildID)\n for _, role := range guild.Roles {\n Relax(err)\n if role.Name == settings.MutedRoleName {\n muteRole = role\n }\n }\n if muteRole == nil {\n muteRole, err = cache.GetSession().GuildRoleCreate(guildID)\n if err != nil {\n return muteRole, err\n }\n muteRole, err = cache.GetSession().GuildRoleEdit(guildID, muteRole.ID, settings.MutedRoleName, muteRole.Color, muteRole.Hoist, 0, muteRole.Mentionable)\n if err != nil {\n return muteRole, err\n }\n for _, channel := range guild.Channels {\n err = cache.GetSession().ChannelPermissionSet(channel.ID, muteRole.ID, \"role\", 0, discordgo.PermissionSendMessages)\n if err != nil {\n logger.ERROR.L(\"discord\", \"Error disabling send messages on mute Role: \" + err.Error())\n }\n \/\/ TODO: update discordgo\n \/\/err = cache.GetSession().ChannelPermissionSet(channel.ID, muteRole.ID, \"role\", 0, discordgo.PermissionAddReactions)\n \/\/if err != nil {\n \/\/ logger.ERROR.L(\"discord\", \"Error disabling add reactions on mute Role: \" + err.Error())\n \/\/}\n }\n }\n return muteRole, nil\n}\n\nfunc GetChannelFromMention(mention string) (*discordgo.Channel, error) {\n var targetChannel *discordgo.Channel\n re := regexp.MustCompile(\"(<#)?(\\\\d+)(>)?\")\n result := re.FindStringSubmatch(mention)\n if len(result) == 4 {\n targetChannel, err := cache.GetSession().Channel(result[2])\n return targetChannel, err\n } else {\n return targetChannel, errors.New(\"Channel not found.\")\n }\n}\n\nfunc GetUserFromMention(mention string) (*discordgo.User, error) {\n var targetUser *discordgo.User\n re := regexp.MustCompile(\"(<@)?(\\\\d+)(>)?\")\n result := re.FindStringSubmatch(mention)\n if len(result) == 4 {\n targetUser, err := cache.GetSession().User(result[2])\n return targetUser, err\n } else {\n return targetUser, errors.New(\"User not found.\")\n }\n}\n\nfunc GetDiscordColorFromHex(hex string) int {\n colorInt, ok := new(big.Int).SetString(strings.Replace(hex, \"#\", \"\", 1), 16)\n if ok == true {\n return int(colorInt.Int64())\n } else {\n return 0x0FADED\n }\n}\n\nfunc GetTimeFromSnowflake(id string) time.Time {\n iid, err := strconv.ParseInt(id, 10, 64)\n Relax(err)\n\n return time.Unix(((iid>>22)+DISCORD_EPOCH)\/1000, 0).UTC()\n}\n\nfunc GetAllPermissions(guild *discordgo.Guild, member *discordgo.Member) int64 {\n var perms int64 = 0\n for _, x := range guild.Roles {\n if x.Name == \"@everyone\" {\n perms |= int64(x.Permissions)\n }\n }\n for _, r := range member.Roles {\n for _, x := range guild.Roles {\n if x.ID == r {\n perms |= int64(x.Permissions)\n }\n }\n }\n return perms\n}\nfunc Pagify(text string, delimiter string) []string {\n result := make([]string, 0)\n textParts := strings.Split(text, delimiter)\n currentOutputPart := \"\"\n for _, textPart := range textParts {\n if len(currentOutputPart)+len(textPart)+len(delimiter) <= 1992 {\n if len(currentOutputPart) > 0 || len(result) > 0 {\n currentOutputPart += delimiter + textPart\n } else {\n currentOutputPart += textPart\n }\n } else {\n result = append(result, currentOutputPart)\n currentOutputPart = \"\"\n if len(textPart) <= 1992 { \/\/ @TODO: else: split text somehow\n currentOutputPart = textPart\n }\n }\n }\n if currentOutputPart != \"\" {\n result = append(result, currentOutputPart)\n }\n return result\n}\n\nfunc GetAvatarUrl(user *discordgo.User) string {\n return GetAvatarUrlWithSize(user, 1024)\n}\n\nfunc GetAvatarUrlWithSize(user *discordgo.User, size uint16) string {\n if user.Avatar == \"\" {\n return \"\"\n }\n\n avatarUrl := \"https:\/\/cdn.discordapp.com\/avatars\/%s\/%s.%s?size=%d\"\n\n if strings.HasPrefix(user.Avatar, \"a_\") {\n return fmt.Sprintf(avatarUrl, user.ID, user.Avatar, \"gif\", size)\n }\n\n return fmt.Sprintf(avatarUrl, user.ID, user.Avatar, \"jpg\", size)\n}\n\nfunc CommandExists(name string) bool {\n for _, command := range cache.GetPluginList() {\n if command == strings.ToLower(name) {\n return true\n }\n }\n for _, command := range cache.GetPluginExtendedList() {\n if command == strings.ToLower(name) {\n return true\n }\n }\n for _, command := range cache.GetTriggerPluginList() {\n if command == strings.ToLower(name) {\n return true\n }\n }\n return false\n}\n<|endoftext|>"} {"text":"<commit_before>package php\n\nimport \"stephensearles.com\/php\/ast\"\n\n\/*\n\nValid Expression Patterns\nExpr [Binary Op] Expr\n[Unary Op] Expr\nExpr [Unary Op]\nExpr [Tertiary Op 1] Expr [Tertiary Op 2] Expr\nIdentifier\nLiteral\nFunction Call\n\nParentesis always triggers sub-expression\n\nnon-associative clone new clone and new\nleft [ array()\nright ++ -- ~ (int) (float) (string) (array) (object) (bool) @ types and increment\/decrement\nnon-associative instanceof types\nright ! logical\nleft * \/ % arithmetic\nleft + - . arithmetic and string\nleft << >> bitwise\nnon-associative < <= > >= comparison\nnon-associative == != === !== <> comparison\nleft & bitwise and references\nleft ^ bitwise\nleft | bitwise\nleft && logical\nleft || logical\nleft ? : ternary\nright = += -= *= \/= .= %= &= |= ^= <<= >>= => assignment\nleft and logical\nleft xor logical\nleft or logical\nleft , many uses\n\n*\/\n\nvar operatorPrecedence = map[ItemType]int{\n\titemArrayLookupOperatorLeft: 19,\n\titemArrayLookupOperatorRight: 19,\n\titemUnaryOperator: 18,\n\titemCastOperator: 18,\n\titemInstanceofOperator: 17,\n\titemNegationOperator: 16,\n\titemMultOperator: 15,\n\titemAdditionOperator: 14,\n\titemSubtractionOperator: 14,\n\titemConcatenationOperator: 14,\n\n\titemBitwiseShiftOperator: 13,\n\titemComparisonOperator: 12,\n\titemEqualityOperator: 11,\n\n\titemAmpersandOperator: 10,\n\titemBitwiseXorOperator: 9,\n\titemBitwiseOrOperator: 8,\n\titemAndOperator: 7,\n\titemOrOperator: 6,\n\titemTernaryOperator1: 5,\n\titemTernaryOperator2: 5,\n\titemAssignmentOperator: 4,\n\titemWrittenAndOperator: 3,\n\titemWrittenXorOperator: 2,\n\titemWrittenOrOperator: 1,\n}\n\nfunc (p *parser) parseExpression() (expr ast.Expression) {\n\t\/\/ consume expression\n\toriginalParenLev := p.parenLevel\n\tswitch p.current.typ {\n\tcase itemNewOperator:\n\t\treturn &ast.NewExpression{\n\t\t\tExpression: p.parseNextExpression(),\n\t\t}\n\tcase itemUnaryOperator, itemNegationOperator:\n\t\top := p.current\n\t\texpr = p.parseUnaryExpressionRight(p.parseNextExpression(), op)\n\tcase itemArray:\n\t\treturn p.parseArrayDeclaration()\n\tcase itemIdentifier:\n\t\tif p.peek().typ == itemAssignmentOperator {\n\t\t\tassignee := p.parseIdentifier().(ast.Assignable)\n\t\t\tp.next()\n\t\t\treturn ast.AssignmentExpression{\n\t\t\t\tAssignee: assignee,\n\t\t\t\tOperator: p.current.val,\n\t\t\t\tValue: p.parseNextExpression(),\n\t\t\t}\n\t\t}\n\t\tfallthrough\n\tcase itemNonVariableIdentifier, itemStringLiteral, itemNumberLiteral, itemBooleanLiteral:\n\t\texpr = p.parseOperation(originalParenLev, p.expressionize())\n\tcase itemOpenParen:\n\t\tp.parenLevel += 1\n\t\tp.next()\n\t\texpr = p.parseExpression()\n\t\tp.expect(itemCloseParen)\n\t\tp.parenLevel -= 1\n\tdefault:\n\t\tp.errorf(\"Expected expression. Found %s\", p.current)\n\t\treturn nil\n\t}\n\tif p.parenLevel != originalParenLev {\n\t\tp.errorf(\"unbalanced parens: %d prev: %d\", p.parenLevel, originalParenLev)\n\t\treturn nil\n\t}\n\treturn\n}\n\nfunc (p *parser) parseOperation(originalParenLevel int, lhs ast.Expression) (expr ast.Expression) {\n\tp.next()\n\tswitch p.current.typ {\n\tcase itemUnaryOperator:\n\t\texpr = p.parseUnaryExpressionLeft(lhs, p.current)\n\tcase itemAdditionOperator, itemSubtractionOperator, itemConcatenationOperator, itemComparisonOperator, itemMultOperator, itemAndOperator, itemOrOperator, itemAmpersandOperator, itemBitwiseXorOperator, itemBitwiseOrOperator, itemBitwiseShiftOperator, itemWrittenAndOperator, itemWrittenXorOperator, itemWrittenOrOperator:\n\t\texpr = p.parseBinaryOperation(lhs, p.current, originalParenLevel)\n\tcase itemCloseParen:\n\t\tif p.parenLevel <= originalParenLevel {\n\t\t\tp.backup()\n\t\t\treturn lhs\n\t\t}\n\t\tp.parenLevel -= 1\n\t\treturn p.parseOperation(originalParenLevel, lhs)\n\tdefault:\n\t\tp.backup()\n\t\treturn lhs\n\t}\n\treturn p.parseOperation(originalParenLevel, expr)\n}\n\nfunc (p *parser) parseBinaryOperation(lhs ast.Expression, operator Item, originalParenLevel int) ast.Expression {\n\tp.next()\n\trhs := p.expressionize()\n\tfor {\n\t\tp.next()\n\t\tnextOperator := p.current\n\t\tp.backup()\n\t\tnextOperatorPrecedence, ok := operatorPrecedence[nextOperator.typ]\n\t\tif ok && nextOperatorPrecedence > operatorPrecedence[operator.typ] {\n\t\t\trhs = p.parseOperation(originalParenLevel, rhs)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn newBinaryOperation(operator, lhs, rhs)\n}\n\nfunc (p *parser) parseUnaryExpressionRight(operand ast.Expression, operator Item) ast.Expression {\n\treturn newUnaryOperation(operator, operand)\n}\n\nfunc (p *parser) parseUnaryExpressionLeft(operand ast.Expression, operator Item) ast.Expression {\n\treturn newUnaryOperation(operator, operand)\n}\n\n\/\/ expressionize takes the current token and returns it as the simplest\n\/\/ expression for that token. That means an expression with no operators\n\/\/ except for the object operator.\nfunc (p *parser) expressionize() ast.Expression {\n\tswitch p.current.typ {\n\tcase itemIdentifier:\n\t\treturn p.parseIdentifier()\n\tcase itemStringLiteral, itemBooleanLiteral, itemNumberLiteral:\n\t\treturn p.parseLiteral()\n\tcase itemNonVariableIdentifier:\n\t\tif p.peek().typ == itemOpenParen {\n\t\t\texpr := p.parseFunctionCall()\n\t\t\tif p.peek().typ == itemObjectOperator {\n\t\t\t\treturn p.parseObjectLookup(expr)\n\t\t\t}\n\t\t\treturn expr\n\t\t}\n\t\tif p.peek().typ == itemScopeResolutionOperator {\n\t\t\tp.expect(itemScopeResolutionOperator)\n\t\t\treturn &ast.ClassExpression{\n\t\t\t\tReceiver: p.current.val,\n\t\t\t\tExpression: p.parseNextExpression(),\n\t\t\t}\n\t\t}\n\t\treturn ast.ConstantExpression{\n\t\t\tIdentifier: ast.NewIdentifier(p.current.val),\n\t\t}\n\tcase itemOpenParen:\n\t\treturn p.parseExpression()\n\t}\n\t\/\/ error?\n\treturn nil\n}\n\nfunc (p *parser) parseLiteral() *ast.Literal {\n\tswitch p.current.typ {\n\tcase itemStringLiteral:\n\t\treturn &ast.Literal{Type: ast.String}\n\tcase itemBooleanLiteral:\n\t\treturn &ast.Literal{Type: ast.Boolean}\n\tcase itemNumberLiteral:\n\t\treturn &ast.Literal{Type: ast.Float}\n\t}\n\tp.errorf(\"Unknown literal type\")\n\treturn nil\n}\n\nfunc (p *parser) parseIdentifier() ast.Expression {\n\tident := ast.NewIdentifier(p.current.val)\n\tswitch pk := p.peek(); pk.typ {\n\tcase itemObjectOperator:\n\t\treturn p.parseObjectLookup(ident)\n\tcase itemArrayLookupOperatorLeft:\n\t\treturn p.parseArrayLookup(ident)\n\t}\n\treturn ident\n}\n\nfunc (p *parser) parseObjectLookup(r ast.Expression) ast.Expression {\n\tp.expect(itemObjectOperator)\n\tp.expect(itemNonVariableIdentifier)\n\tif pk := p.peek(); pk.typ == itemOpenParen {\n\t\texpr := &ast.MethodCallExpression{\n\t\t\tReceiver: r,\n\t\t\tFunctionCallExpression: p.parseFunctionCall(),\n\t\t}\n\t\treturn expr\n\t}\n\treturn &ast.PropertyExpression{\n\t\tReceiver: r,\n\t\tName: p.current.val,\n\t}\n}\n\nfunc (p *parser) parseArrayLookup(e ast.Expression) ast.Expression {\n\tp.expect(itemArrayLookupOperatorLeft)\n\tif p.peek().typ == itemArrayLookupOperatorRight {\n\t\tp.expect(itemArrayLookupOperatorRight)\n\t\treturn ast.ArrayAppendExpression{Array: e}\n\t}\n\tp.next()\n\texpr := &ast.ArrayLookupExpression{\n\t\tArray: e,\n\t\tIndex: p.parseExpression(),\n\t}\n\tp.expect(itemArrayLookupOperatorRight)\n\tif p.peek().typ == itemArrayLookupOperatorLeft {\n\t\treturn p.parseArrayLookup(expr)\n\t}\n\treturn expr\n}\n\nfunc (p *parser) parseArrayDeclaration() ast.Expression {\n\tpairs := make([]ast.ArrayPair, 0)\n\tp.expect(itemOpenParen)\n\tvar key, val ast.Expression\nArrayLoop:\n\tfor {\n\t\tp.next()\n\t\tswitch p.current.typ {\n\t\tcase itemArrayKeyOperator:\n\t\t\tif val == nil {\n\t\t\t\tp.errorf(\"expected array key before =>.\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tkey = val\n\t\t\tp.next()\n\t\t\tval = p.parseExpression()\n\t\tcase itemCloseParen:\n\t\t\tif val != nil {\n\t\t\t\tpairs = append(pairs, ast.ArrayPair{key, val})\n\t\t\t}\n\t\t\tbreak ArrayLoop\n\t\tcase itemArgumentSeparator:\n\t\t\tpairs = append(pairs, ast.ArrayPair{key, val})\n\t\t\tkey = nil\n\t\t\tval = nil\n\t\tdefault:\n\t\t\tval = p.parseExpression()\n\t\t}\n\t}\n\treturn &ast.ArrayExpression{Pairs: pairs}\n}\n<commit_msg>Wrote code to support ternary operator<commit_after>package php\n\nimport \"stephensearles.com\/php\/ast\"\n\n\/*\n\nValid Expression Patterns\nExpr [Binary Op] Expr\n[Unary Op] Expr\nExpr [Unary Op]\nExpr [Tertiary Op 1] Expr [Tertiary Op 2] Expr\nIdentifier\nLiteral\nFunction Call\n\nParentesis always triggers sub-expression\n\nnon-associative clone new clone and new\nleft [ array()\nright ++ -- ~ (int) (float) (string) (array) (object) (bool) @ types and increment\/decrement\nnon-associative instanceof types\nright ! logical\nleft * \/ % arithmetic\nleft + - . arithmetic and string\nleft << >> bitwise\nnon-associative < <= > >= comparison\nnon-associative == != === !== <> comparison\nleft & bitwise and references\nleft ^ bitwise\nleft | bitwise\nleft && logical\nleft || logical\nleft ? : ternary\nright = += -= *= \/= .= %= &= |= ^= <<= >>= => assignment\nleft and logical\nleft xor logical\nleft or logical\nleft , many uses\n\n*\/\n\nvar operatorPrecedence = map[ItemType]int{\n\titemArrayLookupOperatorLeft: 19,\n\titemArrayLookupOperatorRight: 19,\n\titemUnaryOperator: 18,\n\titemCastOperator: 18,\n\titemInstanceofOperator: 17,\n\titemNegationOperator: 16,\n\titemMultOperator: 15,\n\titemAdditionOperator: 14,\n\titemSubtractionOperator: 14,\n\titemConcatenationOperator: 14,\n\n\titemBitwiseShiftOperator: 13,\n\titemComparisonOperator: 12,\n\titemEqualityOperator: 11,\n\n\titemAmpersandOperator: 10,\n\titemBitwiseXorOperator: 9,\n\titemBitwiseOrOperator: 8,\n\titemAndOperator: 7,\n\titemOrOperator: 6,\n\titemTernaryOperator1: 5,\n\titemTernaryOperator2: 5,\n\titemAssignmentOperator: 4,\n\titemWrittenAndOperator: 3,\n\titemWrittenXorOperator: 2,\n\titemWrittenOrOperator: 1,\n}\n\nfunc (p *parser) parseExpression() (expr ast.Expression) {\n\t\/\/ consume expression\n\toriginalParenLev := p.parenLevel\n\tswitch p.current.typ {\n\tcase itemNewOperator:\n\t\treturn &ast.NewExpression{\n\t\t\tExpression: p.parseNextExpression(),\n\t\t}\n\tcase itemUnaryOperator, itemNegationOperator:\n\t\top := p.current\n\t\texpr = p.parseUnaryExpressionRight(p.parseNextExpression(), op)\n\tcase itemArray:\n\t\treturn p.parseArrayDeclaration()\n\tcase itemIdentifier:\n\t\tif p.peek().typ == itemAssignmentOperator {\n\t\t\tassignee := p.parseIdentifier().(ast.Assignable)\n\t\t\tp.next()\n\t\t\treturn ast.AssignmentExpression{\n\t\t\t\tAssignee: assignee,\n\t\t\t\tOperator: p.current.val,\n\t\t\t\tValue: p.parseNextExpression(),\n\t\t\t}\n\t\t}\n\t\tfallthrough\n\tcase itemNonVariableIdentifier, itemStringLiteral, itemNumberLiteral, itemBooleanLiteral:\n\t\texpr = p.parseOperation(originalParenLev, p.expressionize())\n\tcase itemOpenParen:\n\t\tp.parenLevel += 1\n\t\tp.next()\n\t\texpr = p.parseExpression()\n\t\tp.expect(itemCloseParen)\n\t\tp.parenLevel -= 1\n\tdefault:\n\t\tp.errorf(\"Expected expression. Found %s\", p.current)\n\t\treturn nil\n\t}\n\tif p.parenLevel != originalParenLev {\n\t\tp.errorf(\"unbalanced parens: %d prev: %d\", p.parenLevel, originalParenLev)\n\t\treturn nil\n\t}\n\treturn\n}\n\nfunc (p *parser) parseOperation(originalParenLevel int, lhs ast.Expression) (expr ast.Expression) {\n\tp.next()\n\tswitch p.current.typ {\n\tcase itemUnaryOperator:\n\t\texpr = p.parseUnaryExpressionLeft(lhs, p.current)\n\tcase itemAdditionOperator, itemSubtractionOperator, itemConcatenationOperator, itemComparisonOperator, itemMultOperator, itemAndOperator, itemOrOperator, itemAmpersandOperator, itemBitwiseXorOperator, itemBitwiseOrOperator, itemBitwiseShiftOperator, itemWrittenAndOperator, itemWrittenXorOperator, itemWrittenOrOperator:\n\t\texpr = p.parseBinaryOperation(lhs, p.current, originalParenLevel)\n\tcase itemTernaryOperator1:\n\t\texpr = p.parseTernaryOperation(lhs)\n\tcase itemCloseParen:\n\t\tif p.parenLevel <= originalParenLevel {\n\t\t\tp.backup()\n\t\t\treturn lhs\n\t\t}\n\t\tp.parenLevel -= 1\n\t\treturn p.parseOperation(originalParenLevel, lhs)\n\tdefault:\n\t\tp.backup()\n\t\treturn lhs\n\t}\n\treturn p.parseOperation(originalParenLevel, expr)\n}\n\nfunc (p *parser) parseBinaryOperation(lhs ast.Expression, operator Item, originalParenLevel int) ast.Expression {\n\tp.next()\n\trhs := p.expressionize()\n\tfor {\n\t\tp.next()\n\t\tnextOperator := p.current\n\t\tp.backup()\n\t\tnextOperatorPrecedence, ok := operatorPrecedence[nextOperator.typ]\n\t\tif ok && nextOperatorPrecedence > operatorPrecedence[operator.typ] {\n\t\t\trhs = p.parseOperation(originalParenLevel, rhs)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn newBinaryOperation(operator, lhs, rhs)\n}\n\nfunc (p *parser) parseTernaryOperation(lhs ast.Expression) ast.Expression {\n\ttruthy := p.parseNextExpression()\n\tp.expect(itemTernaryOperator2)\n\tfalsy := p.parseNextExpression()\n\treturn &ast.OperatorExpression{\n\t\tOperand1: lhs,\n\t\tOperand2: truthy,\n\t\tOperand3: falsy,\n\t\tType: truthy.EvaluatesTo() | falsy.EvaluatesTo(),\n\t\tOperator: \"?:\",\n\t}\n}\n\nfunc (p *parser) parseUnaryExpressionRight(operand ast.Expression, operator Item) ast.Expression {\n\treturn newUnaryOperation(operator, operand)\n}\n\nfunc (p *parser) parseUnaryExpressionLeft(operand ast.Expression, operator Item) ast.Expression {\n\treturn newUnaryOperation(operator, operand)\n}\n\n\/\/ expressionize takes the current token and returns it as the simplest\n\/\/ expression for that token. That means an expression with no operators\n\/\/ except for the object operator.\nfunc (p *parser) expressionize() ast.Expression {\n\tswitch p.current.typ {\n\tcase itemIdentifier:\n\t\treturn p.parseIdentifier()\n\tcase itemStringLiteral, itemBooleanLiteral, itemNumberLiteral:\n\t\treturn p.parseLiteral()\n\tcase itemNonVariableIdentifier:\n\t\tif p.peek().typ == itemOpenParen {\n\t\t\texpr := p.parseFunctionCall()\n\t\t\tif p.peek().typ == itemObjectOperator {\n\t\t\t\treturn p.parseObjectLookup(expr)\n\t\t\t}\n\t\t\treturn expr\n\t\t}\n\t\tif p.peek().typ == itemScopeResolutionOperator {\n\t\t\tp.expect(itemScopeResolutionOperator)\n\t\t\treturn &ast.ClassExpression{\n\t\t\t\tReceiver: p.current.val,\n\t\t\t\tExpression: p.parseNextExpression(),\n\t\t\t}\n\t\t}\n\t\treturn ast.ConstantExpression{\n\t\t\tIdentifier: ast.NewIdentifier(p.current.val),\n\t\t}\n\tcase itemOpenParen:\n\t\treturn p.parseExpression()\n\t}\n\t\/\/ error?\n\treturn nil\n}\n\nfunc (p *parser) parseLiteral() *ast.Literal {\n\tswitch p.current.typ {\n\tcase itemStringLiteral:\n\t\treturn &ast.Literal{Type: ast.String}\n\tcase itemBooleanLiteral:\n\t\treturn &ast.Literal{Type: ast.Boolean}\n\tcase itemNumberLiteral:\n\t\treturn &ast.Literal{Type: ast.Float}\n\t}\n\tp.errorf(\"Unknown literal type\")\n\treturn nil\n}\n\nfunc (p *parser) parseIdentifier() ast.Expression {\n\tident := ast.NewIdentifier(p.current.val)\n\tswitch pk := p.peek(); pk.typ {\n\tcase itemObjectOperator:\n\t\treturn p.parseObjectLookup(ident)\n\tcase itemArrayLookupOperatorLeft:\n\t\treturn p.parseArrayLookup(ident)\n\t}\n\treturn ident\n}\n\nfunc (p *parser) parseObjectLookup(r ast.Expression) ast.Expression {\n\tp.expect(itemObjectOperator)\n\tp.expect(itemNonVariableIdentifier)\n\tif pk := p.peek(); pk.typ == itemOpenParen {\n\t\texpr := &ast.MethodCallExpression{\n\t\t\tReceiver: r,\n\t\t\tFunctionCallExpression: p.parseFunctionCall(),\n\t\t}\n\t\treturn expr\n\t}\n\treturn &ast.PropertyExpression{\n\t\tReceiver: r,\n\t\tName: p.current.val,\n\t}\n}\n\nfunc (p *parser) parseArrayLookup(e ast.Expression) ast.Expression {\n\tp.expect(itemArrayLookupOperatorLeft)\n\tif p.peek().typ == itemArrayLookupOperatorRight {\n\t\tp.expect(itemArrayLookupOperatorRight)\n\t\treturn ast.ArrayAppendExpression{Array: e}\n\t}\n\tp.next()\n\texpr := &ast.ArrayLookupExpression{\n\t\tArray: e,\n\t\tIndex: p.parseExpression(),\n\t}\n\tp.expect(itemArrayLookupOperatorRight)\n\tif p.peek().typ == itemArrayLookupOperatorLeft {\n\t\treturn p.parseArrayLookup(expr)\n\t}\n\treturn expr\n}\n\nfunc (p *parser) parseArrayDeclaration() ast.Expression {\n\tpairs := make([]ast.ArrayPair, 0)\n\tp.expect(itemOpenParen)\n\tvar key, val ast.Expression\nArrayLoop:\n\tfor {\n\t\tp.next()\n\t\tswitch p.current.typ {\n\t\tcase itemArrayKeyOperator:\n\t\t\tif val == nil {\n\t\t\t\tp.errorf(\"expected array key before =>.\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tkey = val\n\t\t\tp.next()\n\t\t\tval = p.parseExpression()\n\t\tcase itemCloseParen:\n\t\t\tif val != nil {\n\t\t\t\tpairs = append(pairs, ast.ArrayPair{key, val})\n\t\t\t}\n\t\t\tbreak ArrayLoop\n\t\tcase itemArgumentSeparator:\n\t\t\tpairs = append(pairs, ast.ArrayPair{key, val})\n\t\t\tkey = nil\n\t\t\tval = nil\n\t\tdefault:\n\t\t\tval = p.parseExpression()\n\t\t}\n\t}\n\treturn &ast.ArrayExpression{Pairs: pairs}\n}\n<|endoftext|>"} {"text":"<commit_before>package hermes\n\nimport (\n\t\"fmt\"\n\t\"crypto\/sha1\"\n)\n\ntype HermesFile struct {\n\tname string\n\tcontents string\n}\n\nfunc (file *HermesFile) generateFileName(file_id int, file_checksum string) {\n\tfile_name := fmt.Sprintf(\"Hermes_%02d%v\", file_ID, file_checksum)\n\tfile.name = file_name\n}\nfunc (file *HermesFile) generateFileContents(file_ID int) {\n\tfile.contents = \"jhfvjhdfjhfjjhjhdfvjvcvfjh\";\n}\nfunc (file HermesFile) generateFileChecksum() string {\n\t\n\tfile_contents := []byte(file.contents)\n\thash := sha1.Sum(file_contents)\n\tchecksum := fmt.Sprintf(\"%s%x\",\"_\", hash)\n\treturn checksum;\n}\nfunc GenerateFile (id int) HermesFile {\n\tfile := HermesFile{}\n\tfile.generateFileContents(id)\n\tchecksum := file.generateFileChecksum()\n\tfile.generateFileName(id, checksum)\n\treturn file\n}\n<commit_msg>Update hermes\/file_gen.go<commit_after>package hermes\n\nimport (\n\t\"fmt\"\n\t\"crypto\/sha1\"\n)\n\ntype HermesFile struct {\n\tname string\n\tcontents string\n}\n\nfunc (file *HermesFile) generateFileName(file_id int, file_checksum string) {\n\tfile_name := fmt.Sprintf(\"Hermes_%02d%v\", file_ID, file_checksum)\n\tfile.name = file_name\n}\n\nfunc (file *HermesFile) generateFileContents(file_ID int) {\n\tfile.contents = \"jhfvjhdfjhfjjhjhdfvjvcvfjh\";\n}\nfunc (file HermesFile) generateFileChecksum() string {\n\t\n\tfile_contents := []byte(file.contents)\n\thash := sha1.Sum(file_contents)\n\tchecksum := fmt.Sprintf(\"%s%x\",\"_\", hash)\n\treturn checksum;\n}\nfunc GenerateFile (id int) HermesFile {\n\tfile := HermesFile{}\n\tfile.generateFileContents(id)\n\tchecksum := file.generateFileChecksum()\n\tfile.generateFileName(id, checksum)\n\treturn file\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"net\"\n)\n\nfunc main() {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"127.0.0.1:1381\")\n\t_, err = net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t}\n}\n<commit_msg>Process MQTT packets<commit_after>package main\n\nimport (\n\t\"git.eclipse.org\/gitroot\/paho\/org.eclipse.paho.mqtt.golang.git\/packets\"\n\t\"github.com\/shafreeck\/hermes\/hermes\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n)\n\nvar (\n\tWAITING = 0\n\tCONNECT = 1\n\tCONNACK = 2\n\tESTABLISHED = 3\n\tDISCONNECT = 4\n)\n\ntype client struct {\n\tstate int\n\tconn net.Conn\n\tid string\n\thms *hermes.Hermes\n}\n\nfunc (c *client) processConnect(cp packets.ControlPacket) {\n\t\/\/ load all the retained sessions\n\t\/\/ setup the cursor goroution\n}\n\nfunc (c *client) process(in, out chan packets.ControlPacket) {\n\tconn := c.conn\n\tfor {\n\t\tcp, err := packets.ReadPacket(conn)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s\\n\", err.Error())\n\t\t\tif err == io.EOF {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tswitch cp.(type) {\n\t\tcase *packets.PublishPacket:\n\t\t\tlog.Printf(\"%s\\n\", cp.String())\n\t\t\tp := cp.(*packets.PublishPacket)\n\t\t\tlog.Printf(\"%s\\n\", p.TopicName)\n\n\t\t\tc.hms.Publish(p.TopicName, p.Payload)\n\t\t\tif p.Qos == 1 {\n\t\t\t\tpa := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)\n\t\t\t\tpa.MessageID = p.MessageID\n\t\t\t\tpa.Write(conn)\n\t\t\t}\n\n\t\tcase *packets.ConnectPacket:\n\t\t\tlog.Printf(\"%s\\n\", cp.String())\n\t\t\tp := cp.(*packets.ConnectPacket)\n\t\t\tlog.Printf(\"%s\\n\", p.ProtocolName)\n\t\t\tc.id = p.ClientIdentifier\n\t\t\tca := packets.NewControlPacket(packets.Connack).(*packets.ConnackPacket)\n\t\t\tca.Write(conn)\n\n\t\tcase *packets.SubscribePacket:\n\t\t\tp := cp.(*packets.SubscribePacket)\n\t\t\tc.hms.Subscribe(p.Topics[0], c.id)\n\t\t\tif p.Qos > 0 {\n\t\t\t\tpa := packets.NewControlPacket(packets.Suback).(*packets.SubackPacket)\n\t\t\t\tpa.MessageID = p.MessageID\n\t\t\t\tpa.Write(conn)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc main() {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"127.0.0.1:1883\")\n\tlistener, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tlog.Fatalf(\"%s\\n\", err)\n\t}\n\n\thms := hermes.NewHermes()\n\n\tfor {\n\t\tconn, err := listener.AcceptTCP()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s\\n\", err.Error())\n\t\t}\n\t\tc := &client{conn: conn, hms: hms}\n\t\tc.state = WAITING\n\t\tgo c.process()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package hive\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\/\/ \"fmt\"\n\t\"io\"\n\t\/\/ \"log\"\n\t\"os\/exec\"\n\t\"strings\"\n)\n\nconst (\n\tBEE_CLI_STR = \"0: jdbc:hive2:\"\n\tCLOSE_SCRIPT = \"!quit\"\n)\n\ntype DuplexTerm struct {\n\tWriter *bufio.Writer\n\tReader *bufio.Reader\n\tCmd *exec.Cmd\n\tStdin io.WriteCloser\n\tStdout io.ReadCloser\n\tFnReceive FnHiveReceive\n}\n\n\/*func (d *DuplexTerm) Open() (e error) {\n\tif d.Stdin, e = d.Cmd.StdinPipe(); e != nil {\n\t\treturn\n\t}\n\n\tif d.Stdout, e = d.Cmd.StdoutPipe(); e != nil {\n\t\treturn\n\t}\n\n\td.Writer = bufio.NewWriter(d.Stdin)\n\td.Reader = bufio.NewReader(d.Stdout)\n\n\te = d.Cmd.Start()\n\treturn\n}*\/\n\nfunc (d *DuplexTerm) Open() (e error) {\n\tif d.Stdin, e = d.Cmd.StdinPipe(); e != nil {\n\t\treturn\n\t}\n\n\tif d.Stdout, e = d.Cmd.StdoutPipe(); e != nil {\n\t\treturn\n\t}\n\n\td.Writer = bufio.NewWriter(d.Stdin)\n\td.Reader = bufio.NewReader(d.Stdout)\n\n\tif d.FnReceive != nil {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tbread, e := d.Reader.ReadString('\\n')\n\n\t\t\t\tpeek, _ := d.Reader.Peek(14)\n\t\t\t\tpeekStr := string(peek)\n\n\t\t\t\tif !strings.Contains(bread, BEE_CLI_STR) {\n\t\t\t\t\t\/\/result = append(result, bread)\n\t\t\t\t\td.FnReceive(bread)\n\t\t\t\t}\n\n\t\t\t\tif (e != nil && e.Error() == \"EOF\") || (strings.Contains(peekStr, CLOSE_SCRIPT)) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t}\n\t\t}()\n\t}\n\te = d.Cmd.Start()\n\treturn\n}\n\nfunc (d *DuplexTerm) Close() {\n\tresult, e := d.SendInput(CLOSE_SCRIPT)\n\n\t_ = result\n\t_ = e\n\n\td.Cmd.Wait()\n\td.Stdin.Close()\n\td.Stdout.Close()\n}\n\nfunc (d *DuplexTerm) SendInput(input string) (result []string, e error) {\n\tiwrite, e := d.Writer.WriteString(input + \"\\n\")\n\tif iwrite == 0 {\n\t\te = errors.New(\"Writing only 0 byte\")\n\t} else {\n\t\te = d.Writer.Flush()\n\t}\n\n\tif e != nil {\n\t\treturn\n\t}\n\n\tif d.FnReceive == nil {\n\t\tfor {\n\t\t\tbread, e := d.Reader.ReadString('\\n')\n\t\t\tpeek, _ := d.Reader.Peek(14)\n\t\t\tpeekStr := string(peek)\n\n\t\t\tif !strings.Contains(bread, BEE_CLI_STR) {\n\t\t\t\tresult = append(result, bread)\n\t\t\t}\n\n\t\t\tif (e != nil && e.Error() == \"EOF\") || (BEE_CLI_STR == peekStr) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n<commit_msg>trim the \\n<commit_after>package hive\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\/\/ \"fmt\"\n\t\"io\"\n\t\/\/ \"log\"\n\t\"os\/exec\"\n\t\"strings\"\n)\n\nconst (\n\tBEE_CLI_STR = \"0: jdbc:hive2:\"\n\tCLOSE_SCRIPT = \"!quit\"\n)\n\ntype DuplexTerm struct {\n\tWriter *bufio.Writer\n\tReader *bufio.Reader\n\tCmd *exec.Cmd\n\tStdin io.WriteCloser\n\tStdout io.ReadCloser\n\tFnReceive FnHiveReceive\n}\n\n\/*func (d *DuplexTerm) Open() (e error) {\n\tif d.Stdin, e = d.Cmd.StdinPipe(); e != nil {\n\t\treturn\n\t}\n\n\tif d.Stdout, e = d.Cmd.StdoutPipe(); e != nil {\n\t\treturn\n\t}\n\n\td.Writer = bufio.NewWriter(d.Stdin)\n\td.Reader = bufio.NewReader(d.Stdout)\n\n\te = d.Cmd.Start()\n\treturn\n}*\/\n\nfunc (d *DuplexTerm) Open() (e error) {\n\tif d.Stdin, e = d.Cmd.StdinPipe(); e != nil {\n\t\treturn\n\t}\n\n\tif d.Stdout, e = d.Cmd.StdoutPipe(); e != nil {\n\t\treturn\n\t}\n\n\td.Writer = bufio.NewWriter(d.Stdin)\n\td.Reader = bufio.NewReader(d.Stdout)\n\n\tif d.FnReceive != nil {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tbread, e := d.Reader.ReadString('\\n')\n\n\t\t\t\tpeek, _ := d.Reader.Peek(14)\n\t\t\t\tpeekStr := string(peek)\n\n\t\t\t\tif !strings.Contains(bread, BEE_CLI_STR) {\n\t\t\t\t\t\/\/result = append(result, bread)\n\t\t\t\t\td.FnReceive(strings.TrimRight(bread, \"\\n\"))\n\t\t\t\t}\n\n\t\t\t\tif (e != nil && e.Error() == \"EOF\") || (strings.Contains(peekStr, CLOSE_SCRIPT)) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t}\n\t\t}()\n\t}\n\te = d.Cmd.Start()\n\treturn\n}\n\nfunc (d *DuplexTerm) Close() {\n\tresult, e := d.SendInput(CLOSE_SCRIPT)\n\n\t_ = result\n\t_ = e\n\n\td.Cmd.Wait()\n\td.Stdin.Close()\n\td.Stdout.Close()\n}\n\nfunc (d *DuplexTerm) SendInput(input string) (result []string, e error) {\n\tiwrite, e := d.Writer.WriteString(input + \"\\n\")\n\tif iwrite == 0 {\n\t\te = errors.New(\"Writing only 0 byte\")\n\t} else {\n\t\te = d.Writer.Flush()\n\t}\n\n\tif e != nil {\n\t\treturn\n\t}\n\n\tif d.FnReceive == nil {\n\t\tfor {\n\t\t\tbread, e := d.Reader.ReadString('\\n')\n\t\t\tpeek, _ := d.Reader.Peek(14)\n\t\t\tpeekStr := string(peek)\n\n\t\t\tif !strings.Contains(bread, BEE_CLI_STR) {\n\t\t\t\tresult = append(result, strings.TrimRight(bread, \"\\n\"))\n\t\t\t}\n\n\t\t\tif (e != nil && e.Error() == \"EOF\") || (BEE_CLI_STR == peekStr) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package gofakeit\n\nimport \"fmt\"\n\nfunc Example() {\n\tfmt.Println(\"Name:\", Name())\n\tfmt.Println(\"Email:\", Email())\n\tfmt.Println(\"Phone:\", Phone())\n\tfmt.Println(\"Address:\", Address())\n\t\/\/ Output: Name: Lura Lockman\n\t\/\/ Email: sylvanmraz@murphy.com\n\t\/\/ Phone: (457)485-3675\n\t\/\/ Address: 6037 Port Groves stad, Noeville, North Carolina 50286-8748\n}\n<commit_msg>updated example.<commit_after>package gofakeit\n\nimport \"fmt\"\n\nfunc Example() {\n\tSeed(11)\n\tfmt.Println(\"Name:\", Name())\n\tfmt.Println(\"Email:\", Email())\n\tfmt.Println(\"Phone:\", Phone())\n\tfmt.Println(\"Address:\", Address())\n\t\/\/ Output:\n\t\/\/ Name: Markus Moen\n\t\/\/ Email: alaynawuckert@kozey.biz\n\t\/\/ Phone: (570)245-7485\n\t\/\/ Address: 75776 Lake View land, Rueckerstad, New Hampshire 82250-2868\n}\n<|endoftext|>"} {"text":"<commit_before>package services\n\nimport (\n\t\"github.com\/vonji\/vonji-api\/models\"\n\t\"github.com\/vonji\/vonji-api\/utils\"\n)\n\ntype UserService struct {\n\tBaseService\n}\n\nfunc (service UserService) GetAll() []models.User {\n\tif Error != nil {\n\t\treturn nil\n\t}\n\n\tusers := []models.User{}\n\n\tif err := service.GetDB().Find(&users); err.Error != nil {\n\t\tError = utils.DatabaseError(err)\n\t\treturn nil\n\t}\n\n\tfor i, user := range users {\n\t\tif err := service.GetDB().Model(&user).Association(\"tags\").Find(&users[i].Tags); err.Error != nil {\n\t\t\tError = utils.AssociationError(err)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn users\n}\n\nfunc (service UserService) GetOne(id uint) models.User {\n\tif Error != nil {\n\t\treturn models.User{}\n\t}\n\n\tuser := models.User{}\n\n\tif err := service.GetDB().First(&user, id); err.Error != nil {\n\t\tError = utils.DatabaseError(err)\n\t\treturn models.User{}\n\t}\n\n\tif err := service.GetDB().Model(&user).Association(\"tags\").Find(&user.Tags); err.Error != nil {\n\t\tError = utils.AssociationError(err)\n\t\treturn models.User{}\n\t}\n\n\treturn user\n}\n\nfunc (service UserService) GetOneByEmail(email string) models.User {\n\tif Error != nil {\n\t\treturn models.User{}\n\t}\n\n\tuser := models.User{ Email: email }\n\n\tif err := service.GetDB().Where(&user).First(&user); err.Error != nil {\n\t\tError = utils.DatabaseError(err)\n\t\treturn models.User{}\n\t}\n\n\tif err := service.GetDB().Model(&user).Association(\"tags\").Find(&user.Tags); err.Error != nil {\n\t\tError = utils.AssociationError(err)\n\t\treturn models.User{}\n\t}\n\n\treturn user\n}\n\nfunc (service UserService) Create(user models.User) {\n\tif Error != nil {\n\t\treturn\n\t}\n\n\tif err := service.GetDB().Create(&user); err.Error != nil {\n\t\tError = utils.DatabaseError(err)\n\t\treturn\n\t}\n}\n\n\nfunc (service UserService) Update(user models.User) {\n\tif Error != nil {\n\t\treturn\n\t}\n\n\tif err := service.GetDB().Save(&user); err != nil {\n\t\tError = utils.DatabaseError(err)\n\t}\n}\n\nfunc (service UserService) Delete(id uint) {\n\tif Error != nil {\n\t\treturn\n\t}\n\n\tuser := models.User{}\n\tuser.ID = id\n\n\tif err := service.GetDB().Delete(&user); err != nil {\n\t\tError = utils.DatabaseError(err)\n\t}\n}<commit_msg>Fix error check in user service<commit_after>package services\n\nimport (\n\t\"github.com\/vonji\/vonji-api\/models\"\n\t\"github.com\/vonji\/vonji-api\/utils\"\n)\n\ntype UserService struct {\n\tBaseService\n}\n\nfunc (service UserService) GetAll() []models.User {\n\tif Error != nil {\n\t\treturn nil\n\t}\n\n\tusers := []models.User{}\n\n\tif db := service.GetDB().Find(&users); db.Error != nil {\n\t\tError = utils.DatabaseError(db)\n\t\treturn nil\n\t}\n\n\tfor i, user := range users {\n\t\tif db := service.GetDB().Model(&user).Association(\"tags\").Find(&users[i].Tags); db.Error != nil {\n\t\t\tError = utils.AssociationError(db)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn users\n}\n\nfunc (service UserService) GetOne(id uint) models.User {\n\tif Error != nil {\n\t\treturn models.User{}\n\t}\n\n\tuser := models.User{}\n\n\tif db := service.GetDB().First(&user, id); db.Error != nil {\n\t\tError = utils.DatabaseError(db)\n\t\treturn models.User{}\n\t}\n\n\tif db := service.GetDB().Model(&user).Association(\"tags\").Find(&user.Tags); db.Error != nil {\n\t\tError = utils.AssociationError(db)\n\t\treturn models.User{}\n\t}\n\n\treturn user\n}\n\nfunc (service UserService) GetOneByEmail(email string) models.User {\n\tif Error != nil {\n\t\treturn models.User{}\n\t}\n\n\tuser := models.User{ Email: email }\n\n\tif db := service.GetDB().Where(&user).First(&user); db.Error != nil {\n\t\tError = utils.DatabaseError(db)\n\t\treturn models.User{}\n\t}\n\n\tif db := service.GetDB().Model(&user).Association(\"tags\").Find(&user.Tags); db.Error != nil {\n\t\tError = utils.AssociationError(db)\n\t\treturn models.User{}\n\t}\n\n\treturn user\n}\n\nfunc (service UserService) Create(user models.User) {\n\tif Error != nil {\n\t\treturn\n\t}\n\n\tif db := service.GetDB().Create(&user); db.Error != nil {\n\t\tError = utils.DatabaseError(db)\n\t\treturn\n\t}\n}\n\n\nfunc (service UserService) Update(user models.User) {\n\tif Error != nil {\n\t\treturn\n\t}\n\n\tif db := service.GetDB().Save(&user); db.Error != nil {\n\t\tError = utils.DatabaseError(db)\n\t}\n}\n\nfunc (service UserService) Delete(id uint) {\n\tif Error != nil {\n\t\treturn\n\t}\n\n\tuser := models.User{}\n\tuser.ID = id\n\n\tif db := service.GetDB().Delete(&user); db.Error != nil {\n\t\tError = utils.DatabaseError(db)\n\t}\n}<|endoftext|>"} {"text":"<commit_before>package srtp\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\n\t\"github.com\/pion\/logging\"\n\t\"github.com\/pion\/rtcp\"\n)\n\n\/\/ SessionSRTCP implements io.ReadWriteCloser and provides a bi-directional SRTCP session\n\/\/ SRTCP itself does not have a design like this, but it is common in most applications\n\/\/ for local\/remote to each have their own keying material. This provides those patterns\n\/\/ instead of making everyone re-implement\ntype SessionSRTCP struct {\n\tsession\n\twriteStream *WriteStreamSRTCP\n}\n\n\/\/ NewSessionSRTCP creates a SRTCP session using conn as the underlying transport.\nfunc NewSessionSRTCP(conn net.Conn, config *Config) (*SessionSRTCP, error) {\n\tif config == nil {\n\t\treturn nil, errors.New(\"no config provided\")\n\t}\n\n\tloggerFactory := config.LoggerFactory\n\tif loggerFactory == nil {\n\t\tloggerFactory = logging.NewDefaultLoggerFactory()\n\t}\n\n\ts := &SessionSRTCP{\n\t\tsession: session{\n\t\t\tnextConn: conn,\n\t\t\treadStreams: map[uint32]readStream{},\n\t\t\tnewStream: make(chan readStream),\n\t\t\tstarted: make(chan interface{}),\n\t\t\tclosed: make(chan interface{}),\n\t\t\tlog: loggerFactory.NewLogger(\"srtp\"),\n\t\t},\n\t}\n\ts.writeStream = &WriteStreamSRTCP{s}\n\n\terr := s.session.start(\n\t\tconfig.Keys.LocalMasterKey, config.Keys.LocalMasterSalt,\n\t\tconfig.Keys.RemoteMasterKey, config.Keys.RemoteMasterSalt,\n\t\tconfig.Profile,\n\t\ts,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s, nil\n}\n\n\/\/ OpenWriteStream returns the global write stream for the Session\nfunc (s *SessionSRTCP) OpenWriteStream() (*WriteStreamSRTCP, error) {\n\treturn s.writeStream, nil\n}\n\n\/\/ OpenReadStream opens a read stream for the given SSRC, it can be used\n\/\/ if you want a certain SSRC, but don't want to wait for AcceptStream\nfunc (s *SessionSRTCP) OpenReadStream(SSRC uint32) (*ReadStreamSRTCP, error) {\n\tr, _ := s.session.getOrCreateReadStream(SSRC, s, newReadStreamSRTCP)\n\n\tif readStream, ok := r.(*ReadStreamSRTCP); ok {\n\t\treturn readStream, nil\n\t}\n\treturn nil, fmt.Errorf(\"failed to open ReadStreamSRCTP, type assertion failed\")\n}\n\n\/\/ AcceptStream returns a stream to handle RTCP for a single SSRC\nfunc (s *SessionSRTCP) AcceptStream() (*ReadStreamSRTCP, uint32, error) {\n\tstream, ok := <-s.newStream\n\tif !ok {\n\t\treturn nil, 0, fmt.Errorf(\"SessionSRTCP has been closed\")\n\t}\n\n\treadStream, ok := stream.(*ReadStreamSRTCP)\n\tif !ok {\n\t\treturn nil, 0, fmt.Errorf(\"newStream was found, but failed type assertion\")\n\t}\n\n\treturn readStream, stream.GetSSRC(), nil\n}\n\n\/\/ Close ends the session\nfunc (s *SessionSRTCP) Close() error {\n\treturn s.session.close()\n}\n\n\/\/ Private\n\nfunc (s *SessionSRTCP) write(buf []byte) (int, error) {\n\tif _, ok := <-s.session.started; ok {\n\t\treturn 0, fmt.Errorf(\"started channel used incorrectly, should only be closed\")\n\t}\n\n\ts.session.localContextMutex.Lock()\n\tdefer s.session.localContextMutex.Unlock()\n\n\tencrypted, err := s.localContext.EncryptRTCP(nil, buf, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn s.session.nextConn.Write(encrypted)\n}\n\n\/\/create a list of Destination SSRCs\n\/\/that's a superset of all Destinations in the slice.\nfunc destinationSSRC(pkts []rtcp.Packet) []uint32 {\n\tssrcSet := make(map[uint32]struct{})\n\tfor _, p := range pkts {\n\t\tfor _, ssrc := range p.DestinationSSRC() {\n\t\t\tssrcSet[ssrc] = struct{}{}\n\t\t}\n\t}\n\n\tout := make([]uint32, len(ssrcSet))\n\n\tfor ssrc := range ssrcSet {\n\t\tout = append(out, ssrc)\n\t}\n\n\treturn out\n}\n\nfunc (s *SessionSRTCP) decrypt(buf []byte) error {\n\tdecrypted, err := s.remoteContext.DecryptRTCP(buf, buf, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpkt, err := rtcp.Unmarshal(decrypted)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, ssrc := range destinationSSRC(pkt) {\n\t\tr, isNew := s.session.getOrCreateReadStream(ssrc, s, newReadStreamSRTCP)\n\t\tif r == nil {\n\t\t\treturn nil \/\/ Session has been closed\n\t\t} else if isNew {\n\t\t\ts.session.newStream <- r \/\/ Notify AcceptStream\n\t\t}\n\n\t\treadStream, ok := r.(*ReadStreamSRTCP)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"failed to get\/create ReadStreamSRTP\")\n\t\t}\n\n\t\t_, err = readStream.write(decrypted)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>Create destinationSSRC without initial values<commit_after>package srtp\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\n\t\"github.com\/pion\/logging\"\n\t\"github.com\/pion\/rtcp\"\n)\n\n\/\/ SessionSRTCP implements io.ReadWriteCloser and provides a bi-directional SRTCP session\n\/\/ SRTCP itself does not have a design like this, but it is common in most applications\n\/\/ for local\/remote to each have their own keying material. This provides those patterns\n\/\/ instead of making everyone re-implement\ntype SessionSRTCP struct {\n\tsession\n\twriteStream *WriteStreamSRTCP\n}\n\n\/\/ NewSessionSRTCP creates a SRTCP session using conn as the underlying transport.\nfunc NewSessionSRTCP(conn net.Conn, config *Config) (*SessionSRTCP, error) {\n\tif config == nil {\n\t\treturn nil, errors.New(\"no config provided\")\n\t}\n\n\tloggerFactory := config.LoggerFactory\n\tif loggerFactory == nil {\n\t\tloggerFactory = logging.NewDefaultLoggerFactory()\n\t}\n\n\ts := &SessionSRTCP{\n\t\tsession: session{\n\t\t\tnextConn: conn,\n\t\t\treadStreams: map[uint32]readStream{},\n\t\t\tnewStream: make(chan readStream),\n\t\t\tstarted: make(chan interface{}),\n\t\t\tclosed: make(chan interface{}),\n\t\t\tlog: loggerFactory.NewLogger(\"srtp\"),\n\t\t},\n\t}\n\ts.writeStream = &WriteStreamSRTCP{s}\n\n\terr := s.session.start(\n\t\tconfig.Keys.LocalMasterKey, config.Keys.LocalMasterSalt,\n\t\tconfig.Keys.RemoteMasterKey, config.Keys.RemoteMasterSalt,\n\t\tconfig.Profile,\n\t\ts,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s, nil\n}\n\n\/\/ OpenWriteStream returns the global write stream for the Session\nfunc (s *SessionSRTCP) OpenWriteStream() (*WriteStreamSRTCP, error) {\n\treturn s.writeStream, nil\n}\n\n\/\/ OpenReadStream opens a read stream for the given SSRC, it can be used\n\/\/ if you want a certain SSRC, but don't want to wait for AcceptStream\nfunc (s *SessionSRTCP) OpenReadStream(SSRC uint32) (*ReadStreamSRTCP, error) {\n\tr, _ := s.session.getOrCreateReadStream(SSRC, s, newReadStreamSRTCP)\n\n\tif readStream, ok := r.(*ReadStreamSRTCP); ok {\n\t\treturn readStream, nil\n\t}\n\treturn nil, fmt.Errorf(\"failed to open ReadStreamSRCTP, type assertion failed\")\n}\n\n\/\/ AcceptStream returns a stream to handle RTCP for a single SSRC\nfunc (s *SessionSRTCP) AcceptStream() (*ReadStreamSRTCP, uint32, error) {\n\tstream, ok := <-s.newStream\n\tif !ok {\n\t\treturn nil, 0, fmt.Errorf(\"SessionSRTCP has been closed\")\n\t}\n\n\treadStream, ok := stream.(*ReadStreamSRTCP)\n\tif !ok {\n\t\treturn nil, 0, fmt.Errorf(\"newStream was found, but failed type assertion\")\n\t}\n\n\treturn readStream, stream.GetSSRC(), nil\n}\n\n\/\/ Close ends the session\nfunc (s *SessionSRTCP) Close() error {\n\treturn s.session.close()\n}\n\n\/\/ Private\n\nfunc (s *SessionSRTCP) write(buf []byte) (int, error) {\n\tif _, ok := <-s.session.started; ok {\n\t\treturn 0, fmt.Errorf(\"started channel used incorrectly, should only be closed\")\n\t}\n\n\ts.session.localContextMutex.Lock()\n\tdefer s.session.localContextMutex.Unlock()\n\n\tencrypted, err := s.localContext.EncryptRTCP(nil, buf, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn s.session.nextConn.Write(encrypted)\n}\n\n\/\/create a list of Destination SSRCs\n\/\/that's a superset of all Destinations in the slice.\nfunc destinationSSRC(pkts []rtcp.Packet) []uint32 {\n\tssrcSet := make(map[uint32]struct{})\n\tfor _, p := range pkts {\n\t\tfor _, ssrc := range p.DestinationSSRC() {\n\t\t\tssrcSet[ssrc] = struct{}{}\n\t\t}\n\t}\n\n\tout := make([]uint32, 0, len(ssrcSet))\n\tfor ssrc := range ssrcSet {\n\t\tout = append(out, ssrc)\n\t}\n\n\treturn out\n}\n\nfunc (s *SessionSRTCP) decrypt(buf []byte) error {\n\tdecrypted, err := s.remoteContext.DecryptRTCP(buf, buf, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpkt, err := rtcp.Unmarshal(decrypted)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, ssrc := range destinationSSRC(pkt) {\n\t\tr, isNew := s.session.getOrCreateReadStream(ssrc, s, newReadStreamSRTCP)\n\t\tif r == nil {\n\t\t\treturn nil \/\/ Session has been closed\n\t\t} else if isNew {\n\t\t\ts.session.newStream <- r \/\/ Notify AcceptStream\n\t\t}\n\n\t\treadStream, ok := r.(*ReadStreamSRTCP)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"failed to get\/create ReadStreamSRTP\")\n\t\t}\n\n\t\t_, err = readStream.write(decrypted)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package mail\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"io\/ioutil\"\n\t\"net\/textproto\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc parseMailFromString(source string) (*MimeEntity, error) {\n\treturn parseMailFromStringWithGpg(source, nil)\n}\n\nfunc parseMailFromStringWithGpg(source string, gpg GpgUtility) (*MimeEntity, error) {\n\tparser := Parser{gpg}\n\treader := strings.NewReader(source)\n\treturn parser.ParseMail(reader)\n}\n\nfunc loadTestMail(fileName string) ([]byte, error) {\n\tinput, err := os.Open(\"..\/test\/mails\/\" + fileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() { input.Close() }()\n\tdata, err := ioutil.ReadAll(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tregex := regexp.MustCompile(\"\\r?\\n\")\n\treturn regex.ReplaceAll(data, []byte(\"\\r\\n\")), nil\n}\n\nfunc parseMailFromFile(fileName string) (*MimeEntity, error) {\n\treturn parseMailFromFileWithGpg(fileName, nil)\n}\n\nfunc parseMailFromFileWithGpg(fileName string, gpg GpgUtility) (*MimeEntity, error) {\n\tdata, err := loadTestMail(fileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tparser := Parser{gpg}\n\treturn parser.ParseMail(bytes.NewReader(data))\n}\n\ntype MultipartBuilder struct {\n\tBoundary string\n\tBuffer bytes.Buffer\n}\n\nfunc createMultipart(boundary string) *MultipartBuilder {\n\treturn &MultipartBuilder{Boundary: boundary}\n}\n\nfunc (builder *MultipartBuilder) withPart(contentType string,\n\ttext string, header textproto.MIMEHeader) *MultipartBuilder {\n\tbuilder.Buffer.WriteString(fmt.Sprintf(\"--%s\\r\\n\", builder.Boundary))\n\tif contentType != \"\" {\n\t\tbuilder.Buffer.WriteString(fmt.Sprintf(\"Content-Type: %s\\r\\n\", contentType))\n\t}\n\tif header != nil {\n\t\tfor key, values := range header {\n\t\t\tfor _, value := range values {\n\t\t\t\tbuilder.Buffer.WriteString(fmt.Sprintf(\"%s: %s\\r\\n\", key, value))\n\t\t\t}\n\t\t}\n\t}\n\tbuilder.Buffer.WriteString(\"\\r\\n\")\n\tbuilder.Buffer.WriteString(text)\n\tbuilder.Buffer.WriteString(\"\\r\\n\")\n\treturn builder\n}\n\nfunc (builder *MultipartBuilder) build() string {\n\tbuilder.Buffer.WriteString(fmt.Sprintf(\"--%s--\\r\\n\", builder.Boundary))\n\treturn builder.Buffer.String()\n}\n\nfunc TestGetMimeMediaTypeFromHeader(t *testing.T) {\n\theader := textproto.MIMEHeader{\"Content-Type\": {\"foo; param=bar\", \"stuff\"}}\n\tmediaType, err := getMimeMediaTypeFromHeader(header, \"Content-Type\", \"\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"foo\", mediaType.Value)\n\tassert.Equal(t, 1, len(mediaType.Params))\n\tassert.Equal(t, \"bar\", mediaType.Params[\"param\"])\n\tmediaType, err = getMimeMediaTypeFromHeader(header, \"key\", \"default\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"default\", mediaType.Value)\n}\n\nfunc TestParseHeaders(t *testing.T) {\n\tmail, err := parseMailFromFile(\"plaintext.eml\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, 8, len(mail.Header))\n\tassert.Equal(t, \"Basic test\", mail.Header[\"Subject\"][0])\n\tassert.Equal(t, \"text\/plain\", mail.Header[\"Content-Type\"][0])\n}\n\nfunc TestParseEmptyMail(t *testing.T) {\n\tmail, err := parseMailFromString(\"\\r\\n\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, 0, len(mail.Parts))\n\tassert.Equal(t, 0, len(mail.Header))\n\tassert.False(t, mail.IsAttachment)\n\tassert.False(t, mail.IsSigned)\n}\n\nfunc TestParseText(t *testing.T) {\n\tparser := &Parser{Gpg: nil}\n\tcontentType := MimeMediaType{\"text\/plain\", nil}\n\theader := textproto.MIMEHeader{}\n\tentity, err := parser.parseText(contentType, header, strings.NewReader(\"Hello world!\"))\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"Hello world!\", string(entity.Content))\n}\n\nfunc TestParseTextWithCharset(t *testing.T) {\n\tparser := &Parser{Gpg: nil}\n\tcontentType := MimeMediaType{\"text\/plain\", map[string]string{\"charset\": \"ISO-8859-1\"}}\n\theader := textproto.MIMEHeader{}\n\tentity, err := parser.parseText(contentType, header, strings.NewReader(\"\\xe0\\x20\\x63\\xf4\\x74\\xe9\"))\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"à côté\", string(entity.Content))\n}\n\nfunc TestParseTextWithInvalidEncoding(t *testing.T) {\n\tparser := &Parser{Gpg: nil}\n\tcontentType := MimeMediaType{\"text\/plain\", map[string]string{\"charset\": \"utf-8\"}}\n\theader := textproto.MIMEHeader{}\n\tentity, err := parser.parseText(contentType, header, bytes.NewReader([]byte{0xC0}))\n\tassert.Nil(t, err)\n\tassert.Equal(t, \"\\uFFFD\", string(entity.Content))\n}\n\nfunc TestParseMultipart(t *testing.T) {\n\tparser := &Parser{Gpg: nil}\n\tcontentType := MimeMediaType{\"multipart\/mixed\", map[string]string{\"boundary\": \"frontier\"}}\n\theader := textproto.MIMEHeader{}\n\ttext := createMultipart(\"frontier\").\n\t\twithPart(\"text\/plain\", \"Part0\", nil).\n\t\twithPart(\"text\/plain\", \"Part1\", nil).\n\t\twithPart(\"text\/plain\", \"Part2\", nil).\n\t\tbuild()\n\tentity, err := parser.parseMultipart(contentType, header, strings.NewReader(text))\n\tassert.NoError(t, err)\n\tassert.Equal(t, 3, len(entity.Parts))\n\tassert.Equal(t, \"Part0\", string(entity.Parts[0].Content))\n\tassert.Equal(t, \"Part1\", string(entity.Parts[1].Content))\n\tassert.Equal(t, \"Part2\", string(entity.Parts[2].Content))\n}\n\nfunc TestParseMultipartWithoutBoundary(t *testing.T) {\n\tparser := &Parser{Gpg: nil}\n\tcontentType := MimeMediaType{\"multipart\/mixed\", map[string]string{}}\n\theader := textproto.MIMEHeader{}\n\ttext := createMultipart(\"frontier\").withPart(\"text\/plain\", \"Hello world!\", nil).build()\n\tentity, err := parser.parseMultipart(contentType, header, strings.NewReader(text))\n\tassert.NotNil(t, err)\n\tassert.Nil(t, entity)\n}\n\nfunc TestParseMultipartNested(t *testing.T) {\n\tparser := &Parser{Gpg: nil}\n\tcontentType := MimeMediaType{\"multipart\/mixed\", map[string]string{\"boundary\": \"frontier\"}}\n\theader := textproto.MIMEHeader{}\n\tinner := createMultipart(\"inner\").withPart(\"text\/plain\", \"Nested text\", nil).build()\n\ttext := createMultipart(\"frontier\").\n\t\twithPart(\"multipart\/mixed; boundary=inner\", inner, nil).\n\t\tbuild()\n\tentity, err := parser.parseMultipart(contentType, header, strings.NewReader(text))\n\tassert.NoError(t, err)\n\tassert.Equal(t, 1, len(entity.Parts))\n\tinnerPart := entity.Parts[0]\n\tassert.Equal(t, 1, len(innerPart.Header))\n\tassert.Equal(t, \"multipart\/mixed; boundary=inner\", innerPart.Header.Get(\"Content-Type\"))\n\tassert.Equal(t, 1, len(innerPart.Parts))\n\tassert.Equal(t, \"Nested text\", string(innerPart.Parts[0].Content))\n}\n\nfunc TestParseMultipartInvalid(t *testing.T) {\n\tparser := &Parser{Gpg: nil}\n\tcontentType := MimeMediaType{\"multipart\/mixed\", map[string]string{\"boundary\": \"frontier\"}}\n\theader := textproto.MIMEHeader{}\n\ttext := createMultipart(\"frontier\").\n\t\twithPart(\"text\/plain\", \"Hello\", nil).\n\t\tbuild()\n\ttext = text[:len(text)-4] \/\/ Remove final boundary end '--' (and \\r\\n).\n\tentity, err := parser.parseMultipart(contentType, header, strings.NewReader(text))\n\tassert.Nil(t, entity)\n\tassert.NotNil(t, err)\n}\n\nfunc TestParseMultipartWithQuotedPrintable(t *testing.T) {\n\tparser := &Parser{Gpg: nil}\n\tcontentType := MimeMediaType{\"multipart\/mixed\", map[string]string{\"boundary\": \"frontier\"}}\n\theader := textproto.MIMEHeader{}\n\tinnerHeader := textproto.MIMEHeader{}\n\tinnerHeader.Set(\"Content-Type\", \"text\/plain; charset=ISO-8859-1\")\n\tinnerHeader.Set(\"Content-Transfer-Encoding\", \"quoted-printable\")\n\ttext := createMultipart(\"frontier\").withPart(\"\", \"B=E4renf=FC=DFe\", innerHeader).build()\n\tentity, err := parser.parseMultipart(contentType, header, strings.NewReader(text))\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"Bärenfüße\", string(entity.Parts[0].Content))\n}\n\ntype MockGpg struct {\n\tt *testing.T\n\texpectedMicAlgorithm, expectedData, expectedSignature string\n\tchecked bool\n}\n\nfunc (gpg *MockGpg) CheckSignature(micAlgorithm string, data []byte, signature []byte) bool {\n\tassert.Equal(gpg.t, gpg.expectedMicAlgorithm, micAlgorithm)\n\tassert.Equal(gpg.t, gpg.expectedData, string(data))\n\tassert.Equal(gpg.t, gpg.expectedSignature, string(signature))\n\tgpg.checked = true\n\treturn true\n}\n\nfunc TestParseMultipartSigned(t *testing.T) {\n\ttext := createMultipart(\"frontier\").\n\t\twithPart(\"text\/plain\", \"Hello there!\", nil).\n\t\twithPart(\"application\/pgp-signature\", \"SIGNATURE\", nil).\n\t\tbuild()\n\n\texpectedSignedPart := \"Content-Type: text\/plain\\r\\n\\r\\nHello there!\\r\\n\"\n\tmockGpg := &MockGpg{t, \"pgp-sha1\", expectedSignedPart, \"SIGNATURE\", false}\n\tparser := Parser{mockGpg}\n\tcontentType := MimeMediaType{\"multipart\/signed\", map[string]string{\"boundary\": \"frontier\", \"micalg\": \"pgp-sha1\"}}\n\tmail, err := parser.parseMultipartSigned(contentType, textproto.MIMEHeader{}, strings.NewReader(text))\n\tassert.NoError(t, err)\n\tassert.True(t, mail.IsSigned)\n\tassert.True(t, mockGpg.checked)\n\tassert.Equal(t, 2, len(mail.Parts))\n}\n\nfunc TestPlainText(t *testing.T) {\n\tmail, err := parseMailFromFile(\"plaintext.eml\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"This is some nice plain text!\\r\\n\", string(mail.Content))\n}\n\nfunc TestMultipartSigned(t *testing.T) {\n\tsignedPart := \"Content-Type: text\/plain\\r\\n\\r\\nHello this is me!\\r\\n\\r\\n\"\n\tmockGpg := &MockGpg{t, \"pgp-sha1\", signedPart, \"SIGNATURE\", false}\n\tmail, err := parseMailFromFileWithGpg(\"signed_multipart_simple.eml\", mockGpg)\n\tassert.NoError(t, err)\n\tassert.True(t, mockGpg.checked)\n\tassert.Equal(t, 2, len(mail.Parts))\n\tassert.True(t, mail.IsSigned)\n}\n\nfunc TestFindAttachment(t *testing.T) {\n\tmail, err := parseMailFromFile(\"attachment.eml\")\n\tassert.NoError(t, err)\n\tdata := mail.FindAttachment(\"application\/octet-stream\")\n\tassert.Equal(t, \"This is a PDF file.\", string(data))\n\tdata = mail.FindAttachment(\"image\/png\")\n\tassert.Nil(t, data)\n}\n<commit_msg>Added a comment.<commit_after>package mail\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"io\/ioutil\"\n\t\"net\/textproto\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc parseMailFromString(source string) (*MimeEntity, error) {\n\treturn parseMailFromStringWithGpg(source, nil)\n}\n\nfunc parseMailFromStringWithGpg(source string, gpg GpgUtility) (*MimeEntity, error) {\n\tparser := Parser{gpg}\n\treader := strings.NewReader(source)\n\treturn parser.ParseMail(reader)\n}\n\nfunc loadTestMail(fileName string) ([]byte, error) {\n\tinput, err := os.Open(\"..\/test\/mails\/\" + fileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() { input.Close() }()\n\tdata, err := ioutil.ReadAll(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Do not rely on correct line endings in test files, ensure them here.\n\tregex := regexp.MustCompile(\"\\r?\\n\")\n\treturn regex.ReplaceAll(data, []byte(\"\\r\\n\")), nil\n}\n\nfunc parseMailFromFile(fileName string) (*MimeEntity, error) {\n\treturn parseMailFromFileWithGpg(fileName, nil)\n}\n\nfunc parseMailFromFileWithGpg(fileName string, gpg GpgUtility) (*MimeEntity, error) {\n\tdata, err := loadTestMail(fileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tparser := Parser{gpg}\n\treturn parser.ParseMail(bytes.NewReader(data))\n}\n\ntype MultipartBuilder struct {\n\tBoundary string\n\tBuffer bytes.Buffer\n}\n\nfunc createMultipart(boundary string) *MultipartBuilder {\n\treturn &MultipartBuilder{Boundary: boundary}\n}\n\nfunc (builder *MultipartBuilder) withPart(contentType string,\n\ttext string, header textproto.MIMEHeader) *MultipartBuilder {\n\tbuilder.Buffer.WriteString(fmt.Sprintf(\"--%s\\r\\n\", builder.Boundary))\n\tif contentType != \"\" {\n\t\tbuilder.Buffer.WriteString(fmt.Sprintf(\"Content-Type: %s\\r\\n\", contentType))\n\t}\n\tif header != nil {\n\t\tfor key, values := range header {\n\t\t\tfor _, value := range values {\n\t\t\t\tbuilder.Buffer.WriteString(fmt.Sprintf(\"%s: %s\\r\\n\", key, value))\n\t\t\t}\n\t\t}\n\t}\n\tbuilder.Buffer.WriteString(\"\\r\\n\")\n\tbuilder.Buffer.WriteString(text)\n\tbuilder.Buffer.WriteString(\"\\r\\n\")\n\treturn builder\n}\n\nfunc (builder *MultipartBuilder) build() string {\n\tbuilder.Buffer.WriteString(fmt.Sprintf(\"--%s--\\r\\n\", builder.Boundary))\n\treturn builder.Buffer.String()\n}\n\nfunc TestGetMimeMediaTypeFromHeader(t *testing.T) {\n\theader := textproto.MIMEHeader{\"Content-Type\": {\"foo; param=bar\", \"stuff\"}}\n\tmediaType, err := getMimeMediaTypeFromHeader(header, \"Content-Type\", \"\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"foo\", mediaType.Value)\n\tassert.Equal(t, 1, len(mediaType.Params))\n\tassert.Equal(t, \"bar\", mediaType.Params[\"param\"])\n\tmediaType, err = getMimeMediaTypeFromHeader(header, \"key\", \"default\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"default\", mediaType.Value)\n}\n\nfunc TestParseHeaders(t *testing.T) {\n\tmail, err := parseMailFromFile(\"plaintext.eml\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, 8, len(mail.Header))\n\tassert.Equal(t, \"Basic test\", mail.Header[\"Subject\"][0])\n\tassert.Equal(t, \"text\/plain\", mail.Header[\"Content-Type\"][0])\n}\n\nfunc TestParseEmptyMail(t *testing.T) {\n\tmail, err := parseMailFromString(\"\\r\\n\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, 0, len(mail.Parts))\n\tassert.Equal(t, 0, len(mail.Header))\n\tassert.False(t, mail.IsAttachment)\n\tassert.False(t, mail.IsSigned)\n}\n\nfunc TestParseText(t *testing.T) {\n\tparser := &Parser{Gpg: nil}\n\tcontentType := MimeMediaType{\"text\/plain\", nil}\n\theader := textproto.MIMEHeader{}\n\tentity, err := parser.parseText(contentType, header, strings.NewReader(\"Hello world!\"))\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"Hello world!\", string(entity.Content))\n}\n\nfunc TestParseTextWithCharset(t *testing.T) {\n\tparser := &Parser{Gpg: nil}\n\tcontentType := MimeMediaType{\"text\/plain\", map[string]string{\"charset\": \"ISO-8859-1\"}}\n\theader := textproto.MIMEHeader{}\n\tentity, err := parser.parseText(contentType, header, strings.NewReader(\"\\xe0\\x20\\x63\\xf4\\x74\\xe9\"))\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"à côté\", string(entity.Content))\n}\n\nfunc TestParseTextWithInvalidEncoding(t *testing.T) {\n\tparser := &Parser{Gpg: nil}\n\tcontentType := MimeMediaType{\"text\/plain\", map[string]string{\"charset\": \"utf-8\"}}\n\theader := textproto.MIMEHeader{}\n\tentity, err := parser.parseText(contentType, header, bytes.NewReader([]byte{0xC0}))\n\tassert.Nil(t, err)\n\tassert.Equal(t, \"\\uFFFD\", string(entity.Content))\n}\n\nfunc TestParseMultipart(t *testing.T) {\n\tparser := &Parser{Gpg: nil}\n\tcontentType := MimeMediaType{\"multipart\/mixed\", map[string]string{\"boundary\": \"frontier\"}}\n\theader := textproto.MIMEHeader{}\n\ttext := createMultipart(\"frontier\").\n\t\twithPart(\"text\/plain\", \"Part0\", nil).\n\t\twithPart(\"text\/plain\", \"Part1\", nil).\n\t\twithPart(\"text\/plain\", \"Part2\", nil).\n\t\tbuild()\n\tentity, err := parser.parseMultipart(contentType, header, strings.NewReader(text))\n\tassert.NoError(t, err)\n\tassert.Equal(t, 3, len(entity.Parts))\n\tassert.Equal(t, \"Part0\", string(entity.Parts[0].Content))\n\tassert.Equal(t, \"Part1\", string(entity.Parts[1].Content))\n\tassert.Equal(t, \"Part2\", string(entity.Parts[2].Content))\n}\n\nfunc TestParseMultipartWithoutBoundary(t *testing.T) {\n\tparser := &Parser{Gpg: nil}\n\tcontentType := MimeMediaType{\"multipart\/mixed\", map[string]string{}}\n\theader := textproto.MIMEHeader{}\n\ttext := createMultipart(\"frontier\").withPart(\"text\/plain\", \"Hello world!\", nil).build()\n\tentity, err := parser.parseMultipart(contentType, header, strings.NewReader(text))\n\tassert.NotNil(t, err)\n\tassert.Nil(t, entity)\n}\n\nfunc TestParseMultipartNested(t *testing.T) {\n\tparser := &Parser{Gpg: nil}\n\tcontentType := MimeMediaType{\"multipart\/mixed\", map[string]string{\"boundary\": \"frontier\"}}\n\theader := textproto.MIMEHeader{}\n\tinner := createMultipart(\"inner\").withPart(\"text\/plain\", \"Nested text\", nil).build()\n\ttext := createMultipart(\"frontier\").\n\t\twithPart(\"multipart\/mixed; boundary=inner\", inner, nil).\n\t\tbuild()\n\tentity, err := parser.parseMultipart(contentType, header, strings.NewReader(text))\n\tassert.NoError(t, err)\n\tassert.Equal(t, 1, len(entity.Parts))\n\tinnerPart := entity.Parts[0]\n\tassert.Equal(t, 1, len(innerPart.Header))\n\tassert.Equal(t, \"multipart\/mixed; boundary=inner\", innerPart.Header.Get(\"Content-Type\"))\n\tassert.Equal(t, 1, len(innerPart.Parts))\n\tassert.Equal(t, \"Nested text\", string(innerPart.Parts[0].Content))\n}\n\nfunc TestParseMultipartInvalid(t *testing.T) {\n\tparser := &Parser{Gpg: nil}\n\tcontentType := MimeMediaType{\"multipart\/mixed\", map[string]string{\"boundary\": \"frontier\"}}\n\theader := textproto.MIMEHeader{}\n\ttext := createMultipart(\"frontier\").\n\t\twithPart(\"text\/plain\", \"Hello\", nil).\n\t\tbuild()\n\ttext = text[:len(text)-4] \/\/ Remove final boundary end '--' (and \\r\\n).\n\tentity, err := parser.parseMultipart(contentType, header, strings.NewReader(text))\n\tassert.Nil(t, entity)\n\tassert.NotNil(t, err)\n}\n\nfunc TestParseMultipartWithQuotedPrintable(t *testing.T) {\n\tparser := &Parser{Gpg: nil}\n\tcontentType := MimeMediaType{\"multipart\/mixed\", map[string]string{\"boundary\": \"frontier\"}}\n\theader := textproto.MIMEHeader{}\n\tinnerHeader := textproto.MIMEHeader{}\n\tinnerHeader.Set(\"Content-Type\", \"text\/plain; charset=ISO-8859-1\")\n\tinnerHeader.Set(\"Content-Transfer-Encoding\", \"quoted-printable\")\n\ttext := createMultipart(\"frontier\").withPart(\"\", \"B=E4renf=FC=DFe\", innerHeader).build()\n\tentity, err := parser.parseMultipart(contentType, header, strings.NewReader(text))\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"Bärenfüße\", string(entity.Parts[0].Content))\n}\n\ntype MockGpg struct {\n\tt *testing.T\n\texpectedMicAlgorithm, expectedData, expectedSignature string\n\tchecked bool\n}\n\nfunc (gpg *MockGpg) CheckSignature(micAlgorithm string, data []byte, signature []byte) bool {\n\tassert.Equal(gpg.t, gpg.expectedMicAlgorithm, micAlgorithm)\n\tassert.Equal(gpg.t, gpg.expectedData, string(data))\n\tassert.Equal(gpg.t, gpg.expectedSignature, string(signature))\n\tgpg.checked = true\n\treturn true\n}\n\nfunc TestParseMultipartSigned(t *testing.T) {\n\ttext := createMultipart(\"frontier\").\n\t\twithPart(\"text\/plain\", \"Hello there!\", nil).\n\t\twithPart(\"application\/pgp-signature\", \"SIGNATURE\", nil).\n\t\tbuild()\n\n\texpectedSignedPart := \"Content-Type: text\/plain\\r\\n\\r\\nHello there!\\r\\n\"\n\tmockGpg := &MockGpg{t, \"pgp-sha1\", expectedSignedPart, \"SIGNATURE\", false}\n\tparser := Parser{mockGpg}\n\tcontentType := MimeMediaType{\"multipart\/signed\", map[string]string{\"boundary\": \"frontier\", \"micalg\": \"pgp-sha1\"}}\n\tmail, err := parser.parseMultipartSigned(contentType, textproto.MIMEHeader{}, strings.NewReader(text))\n\tassert.NoError(t, err)\n\tassert.True(t, mail.IsSigned)\n\tassert.True(t, mockGpg.checked)\n\tassert.Equal(t, 2, len(mail.Parts))\n}\n\nfunc TestPlainText(t *testing.T) {\n\tmail, err := parseMailFromFile(\"plaintext.eml\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"This is some nice plain text!\\r\\n\", string(mail.Content))\n}\n\nfunc TestMultipartSigned(t *testing.T) {\n\tsignedPart := \"Content-Type: text\/plain\\r\\n\\r\\nHello this is me!\\r\\n\\r\\n\"\n\tmockGpg := &MockGpg{t, \"pgp-sha1\", signedPart, \"SIGNATURE\", false}\n\tmail, err := parseMailFromFileWithGpg(\"signed_multipart_simple.eml\", mockGpg)\n\tassert.NoError(t, err)\n\tassert.True(t, mockGpg.checked)\n\tassert.Equal(t, 2, len(mail.Parts))\n\tassert.True(t, mail.IsSigned)\n}\n\nfunc TestFindAttachment(t *testing.T) {\n\tmail, err := parseMailFromFile(\"attachment.eml\")\n\tassert.NoError(t, err)\n\tdata := mail.FindAttachment(\"application\/octet-stream\")\n\tassert.Equal(t, \"This is a PDF file.\", string(data))\n\tdata = mail.FindAttachment(\"image\/png\")\n\tassert.Nil(t, data)\n}\n<|endoftext|>"} {"text":"<commit_before>package sgload\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tsgreplicate \"github.com\/couchbaselabs\/sg-replicate\"\n\t\"github.com\/peterbourgon\/g2s\"\n)\n\ntype Reader struct {\n\tAgent\n\tSGChannels []string \/\/ The Sync Gateway channels this reader is assigned to pull from\n\tNumDocsExpected int \/\/ The total number of docs this reader is expected to pull\n\tBatchSize int \/\/ The number of docs to pull in batch (_changes feed and bulk_get)\n\tStatsdClient *g2s.Statsd \/\/ The statsd client instance to use to push stats to statdsd\n}\n\nfunc NewReader(wg *sync.WaitGroup, ID int, u UserCred, d DataStore, batchsize int) *Reader {\n\n\treturn &Reader{\n\t\tAgent: Agent{\n\t\t\tFinishedWg: wg,\n\t\t\tUserCred: u,\n\t\t\tID: ID,\n\t\t\tDataStore: d,\n\t\t\tBatchSize: batchsize,\n\t\t},\n\t}\n}\n\nfunc (r *Reader) SetChannels(sgChannels []string) {\n\tr.SGChannels = sgChannels\n}\n\nfunc (r *Reader) SetNumDocsExpected(n int) {\n\tr.NumDocsExpected = n\n}\n\nfunc (r *Reader) SetBatchSize(batchSize int) {\n\tr.BatchSize = batchSize\n}\n\nfunc (r *Reader) SetStatsdClient(statsdClient *g2s.Statsd) {\n\tr.StatsdClient = statsdClient\n}\n\nfunc (r *Reader) Run() {\n\n\tnumDocsPulled := 0\n\tsince := StringSincer{}\n\tresult := pullMoreDocsResult{}\n\tvar err error\n\tvar timeStartedCreatingDocs time.Time\n\n\tdefer r.FinishedWg.Done()\n\tdefer func() {\n\t\tdelta := time.Since(timeStartedCreatingDocs)\n\t\tr.StatsdClient.Timing(\n\t\t\tstatsdSampleRate,\n\t\t\t\"get_all_documents\",\n\t\t\tdelta,\n\t\t)\n\t}()\n\n\tr.createSGUserIfNeeded()\n\n\ttimeStartedCreatingDocs = time.Now()\n\n\tfor {\n\n\t\tif r.isFinished(numDocsPulled) {\n\t\t\tlogger.Info(\"Reader finished\", \"agent.ID\", r.ID, \"numDocs\", r.NumDocsExpected)\n\t\t\tbreak\n\t\t}\n\t\tresult, err = r.pullMoreDocs(since)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Got error getting changes: %v\", err))\n\t\t}\n\t\tsince = result.since\n\t\tnumDocsPulled += result.numDocsPulled\n\n\t}\n\n}\n\nfunc (r *Reader) createSGUserIfNeeded() {\n\tif r.CreateDataStoreUser == true {\n\n\t\tlogger.Info(\"Creating reader SG user\", \"username\", r.UserCred.Username, \"channels\", r.SGChannels)\n\n\t\tif err := r.DataStore.CreateUser(r.UserCred, r.SGChannels); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Error creating user in datastore. User: %v, Err: %v\", r.UserCred, err))\n\t\t}\n\t}\n\n}\n\nfunc (r *Reader) isFinished(numDocsPulled int) bool {\n\n\tlogger.Info(\"Reader checking if finished\", \"agent.ID\", r.ID, \"numDocsPulled\", numDocsPulled, \"numDocsExpected\", r.NumDocsExpected)\n\n\tswitch {\n\tcase numDocsPulled > r.NumDocsExpected:\n\t\tpanic(fmt.Sprintf(\"Reader was only expected to pull %d docs, but pulled %d.\", r.NumDocsExpected, numDocsPulled))\n\tcase numDocsPulled == r.NumDocsExpected:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n\n}\n\ntype pullMoreDocsResult struct {\n\tsince StringSincer\n\tnumDocsPulled int\n}\n\nfunc (r *Reader) pullMoreDocs(since Sincer) (pullMoreDocsResult, error) {\n\n\t\/\/ Create a retry sleeper which controls how many times to retry\n\t\/\/ and how long to wait in between retries\n\tnumRetries := 7\n\tsleepMsBetweenRetry := 500\n\tretrySleeper := CreateDoublingSleeperFunc(numRetries, sleepMsBetweenRetry)\n\n\t\/\/ Create retry worker that knows how to do actual work\n\tretryWorker := func() (shouldRetry bool, err error, value interface{}) {\n\n\t\tresult := pullMoreDocsResult{}\n\n\t\tchanges, newSince, err := r.DataStore.Changes(since, r.BatchSize)\n\t\tif err != nil {\n\t\t\treturn false, err, result\n\t\t}\n\n\t\tif len(changes.Results) == 0 {\n\t\t\tlogger.Warn(\"No changes pulled\", \"agent.ID\", r.ID, \"since\", since)\n\t\t\treturn true, nil, result\n\t\t}\n\t\tif newSince.Equals(since) {\n\t\t\tlogger.Warn(\"Since value should have changed\", \"agent.ID\", r.ID, \"since\", since, \"newsince\", newSince)\n\t\t\treturn true, nil, result\n\t\t}\n\n\t\t\/\/ Strip out any changes with id \"id\":\"_user\/*\"\n\t\t\/\/ since they are user docs and we don't care about them\n\t\tchanges = stripUserDocChanges(changes)\n\n\t\tbulkGetRequest := getBulkGetRequest(changes)\n\n\t\terr = r.DataStore.BulkGetDocuments(bulkGetRequest)\n\t\tif err != nil {\n\t\t\treturn false, err, result\n\t\t}\n\n\t\tresult.since = newSince.(StringSincer)\n\t\tresult.numDocsPulled = len(bulkGetRequest.Docs)\n\t\treturn false, nil, result\n\n\t}\n\n\t\/\/ Invoke the retry worker \/ sleeper combo in a loop\n\terr, workerReturnVal := RetryLoop(\"pullMoreDocs\", retryWorker, retrySleeper)\n\tif err != nil {\n\t\treturn pullMoreDocsResult{}, err\n\t}\n\n\treturn workerReturnVal.(pullMoreDocsResult), nil\n\n}\n\nfunc getBulkGetRequest(changes sgreplicate.Changes) sgreplicate.BulkGetRequest {\n\n\tbulkDocsRequest := sgreplicate.BulkGetRequest{}\n\tdocs := []sgreplicate.DocumentRevisionPair{}\n\tfor _, change := range changes.Results {\n\t\tdocRevPair := sgreplicate.DocumentRevisionPair{}\n\t\tdocRevPair.Id = change.Id\n\t\tdocRevPair.Revision = change.ChangedRevs[0].Revision\n\t\tdocs = append(docs, docRevPair)\n\t}\n\tbulkDocsRequest.Docs = docs\n\treturn bulkDocsRequest\n\n}\n\nfunc stripUserDocChanges(changes sgreplicate.Changes) (changesStripped sgreplicate.Changes) {\n\tchangesStripped.LastSequence = changes.LastSequence\n\n\tfor _, change := range changes.Results {\n\t\tif strings.Contains(change.Id, \"_user\") {\n\t\t\tcontinue\n\t\t}\n\t\tchangesStripped.Results = append(changesStripped.Results, change)\n\n\t}\n\n\treturn changesStripped\n}\n\n\/\/ A retry sleeper is called back by the retry loop and passed\n\/\/ the current retryCount, and should return the amount of milliseconds\n\/\/ that the retry should sleep.\ntype RetrySleeper func(retryCount int) (shouldContinue bool, timeTosleepMs int)\n\n\/\/ A RetryWorker encapsulates the work being done in a Retry Loop. The shouldRetry\n\/\/ return value determines whether the worker will retry, regardless of the err value.\n\/\/ If the worker has exceeded it's retry attempts, then it will not be called again\n\/\/ even if it returns shouldRetry = true.\ntype RetryWorker func() (shouldRetry bool, err error, value interface{})\n\nfunc RetryLoop(description string, worker RetryWorker, sleeper RetrySleeper) (error, interface{}) {\n\n\tnumAttempts := 1\n\n\tfor {\n\t\tshouldRetry, err, value := worker()\n\t\tif !shouldRetry {\n\t\t\tif err != nil {\n\t\t\t\treturn err, nil\n\t\t\t}\n\t\t\treturn nil, value\n\t\t}\n\t\tshouldContinue, sleepMs := sleeper(numAttempts)\n\t\tif !shouldContinue {\n\t\t\tif err == nil {\n\t\t\t\terr = fmt.Errorf(\"RetryLoop for %v giving up after %v attempts\", description, numAttempts)\n\t\t\t}\n\t\t\tlogger.Warn(\"RetryLoop giving up\", \"description\", description, \"numAttempts\", numAttempts)\n\t\t\treturn err, value\n\t\t}\n\t\tlogger.Warn(\"RetryLoop will retry soon\", \"description\", description, \"sleepMs\", sleepMs)\n\n\t\t<-time.After(time.Millisecond * time.Duration(sleepMs))\n\n\t\tnumAttempts += 1\n\n\t}\n}\n\n\/\/ Create a RetrySleeper that will double the retry time on every iteration and\n\/\/ use the given parameters\nfunc CreateDoublingSleeperFunc(maxNumAttempts, initialTimeToSleepMs int) RetrySleeper {\n\n\ttimeToSleepMs := initialTimeToSleepMs\n\n\tsleeper := func(numAttempts int) (bool, int) {\n\t\tif numAttempts > maxNumAttempts {\n\t\t\treturn false, -1\n\t\t}\n\t\tif numAttempts > 1 {\n\t\t\ttimeToSleepMs *= 2\n\t\t}\n\t\treturn true, timeToSleepMs\n\t}\n\treturn sleeper\n\n}\n<commit_msg>Add get_change_and_document<commit_after>package sgload\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tsgreplicate \"github.com\/couchbaselabs\/sg-replicate\"\n\t\"github.com\/peterbourgon\/g2s\"\n)\n\ntype Reader struct {\n\tAgent\n\tSGChannels []string \/\/ The Sync Gateway channels this reader is assigned to pull from\n\tNumDocsExpected int \/\/ The total number of docs this reader is expected to pull\n\tBatchSize int \/\/ The number of docs to pull in batch (_changes feed and bulk_get)\n\tStatsdClient *g2s.Statsd \/\/ The statsd client instance to use to push stats to statdsd\n}\n\nfunc NewReader(wg *sync.WaitGroup, ID int, u UserCred, d DataStore, batchsize int) *Reader {\n\n\treturn &Reader{\n\t\tAgent: Agent{\n\t\t\tFinishedWg: wg,\n\t\t\tUserCred: u,\n\t\t\tID: ID,\n\t\t\tDataStore: d,\n\t\t\tBatchSize: batchsize,\n\t\t},\n\t}\n}\n\nfunc (r *Reader) SetChannels(sgChannels []string) {\n\tr.SGChannels = sgChannels\n}\n\nfunc (r *Reader) SetNumDocsExpected(n int) {\n\tr.NumDocsExpected = n\n}\n\nfunc (r *Reader) SetBatchSize(batchSize int) {\n\tr.BatchSize = batchSize\n}\n\nfunc (r *Reader) SetStatsdClient(statsdClient *g2s.Statsd) {\n\tr.StatsdClient = statsdClient\n}\n\nfunc (r *Reader) Run() {\n\n\tnumDocsPulled := 0\n\tsince := StringSincer{}\n\tresult := pullMoreDocsResult{}\n\tvar err error\n\tvar timeStartedCreatingDocs time.Time\n\n\tdefer r.FinishedWg.Done()\n\tdefer func() {\n\t\t\/\/ Calculate how long it took for this reader to read\n\t\t\/\/ all of its docs\n\t\tdelta := time.Since(timeStartedCreatingDocs)\n\t\tr.StatsdClient.Timing(\n\t\t\tstatsdSampleRate,\n\t\t\t\"get_all_documents\",\n\t\t\tdelta,\n\t\t)\n\t}()\n\tdefer func() {\n\t\t\/\/ Calculate the average time it took to read each doc from\n\t\t\/\/ the changes feed and the doc itself\n\t\tdelta := time.Since(timeStartedCreatingDocs)\n\t\tdelta = time.Duration(int64(delta) \/ int64(numDocsPulled))\n\t\tr.StatsdClient.Timing(\n\t\t\tstatsdSampleRate,\n\t\t\t\"get_change_and_document\",\n\t\t\tdelta,\n\t\t)\n\t}()\n\n\tr.createSGUserIfNeeded()\n\n\ttimeStartedCreatingDocs = time.Now()\n\n\tfor {\n\n\t\tif r.isFinished(numDocsPulled) {\n\t\t\tlogger.Info(\"Reader finished\", \"agent.ID\", r.ID, \"numDocs\", r.NumDocsExpected)\n\t\t\tbreak\n\t\t}\n\t\tresult, err = r.pullMoreDocs(since)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Got error getting changes: %v\", err))\n\t\t}\n\t\tsince = result.since\n\t\tnumDocsPulled += result.numDocsPulled\n\n\t}\n\n}\n\nfunc (r *Reader) createSGUserIfNeeded() {\n\tif r.CreateDataStoreUser == true {\n\n\t\tlogger.Info(\"Creating reader SG user\", \"username\", r.UserCred.Username, \"channels\", r.SGChannels)\n\n\t\tif err := r.DataStore.CreateUser(r.UserCred, r.SGChannels); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Error creating user in datastore. User: %v, Err: %v\", r.UserCred, err))\n\t\t}\n\t}\n\n}\n\nfunc (r *Reader) isFinished(numDocsPulled int) bool {\n\n\tlogger.Info(\"Reader checking if finished\", \"agent.ID\", r.ID, \"numDocsPulled\", numDocsPulled, \"numDocsExpected\", r.NumDocsExpected)\n\n\tswitch {\n\tcase numDocsPulled > r.NumDocsExpected:\n\t\tpanic(fmt.Sprintf(\"Reader was only expected to pull %d docs, but pulled %d.\", r.NumDocsExpected, numDocsPulled))\n\tcase numDocsPulled == r.NumDocsExpected:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n\n}\n\ntype pullMoreDocsResult struct {\n\tsince StringSincer\n\tnumDocsPulled int\n}\n\nfunc (r *Reader) pullMoreDocs(since Sincer) (pullMoreDocsResult, error) {\n\n\t\/\/ Create a retry sleeper which controls how many times to retry\n\t\/\/ and how long to wait in between retries\n\tnumRetries := 7\n\tsleepMsBetweenRetry := 500\n\tretrySleeper := CreateDoublingSleeperFunc(numRetries, sleepMsBetweenRetry)\n\n\t\/\/ Create retry worker that knows how to do actual work\n\tretryWorker := func() (shouldRetry bool, err error, value interface{}) {\n\n\t\tresult := pullMoreDocsResult{}\n\n\t\tchanges, newSince, err := r.DataStore.Changes(since, r.BatchSize)\n\t\tif err != nil {\n\t\t\treturn false, err, result\n\t\t}\n\n\t\tif len(changes.Results) == 0 {\n\t\t\tlogger.Warn(\"No changes pulled\", \"agent.ID\", r.ID, \"since\", since)\n\t\t\treturn true, nil, result\n\t\t}\n\t\tif newSince.Equals(since) {\n\t\t\tlogger.Warn(\"Since value should have changed\", \"agent.ID\", r.ID, \"since\", since, \"newsince\", newSince)\n\t\t\treturn true, nil, result\n\t\t}\n\n\t\t\/\/ Strip out any changes with id \"id\":\"_user\/*\"\n\t\t\/\/ since they are user docs and we don't care about them\n\t\tchanges = stripUserDocChanges(changes)\n\n\t\tbulkGetRequest := getBulkGetRequest(changes)\n\n\t\terr = r.DataStore.BulkGetDocuments(bulkGetRequest)\n\t\tif err != nil {\n\t\t\treturn false, err, result\n\t\t}\n\n\t\tresult.since = newSince.(StringSincer)\n\t\tresult.numDocsPulled = len(bulkGetRequest.Docs)\n\t\treturn false, nil, result\n\n\t}\n\n\t\/\/ Invoke the retry worker \/ sleeper combo in a loop\n\terr, workerReturnVal := RetryLoop(\"pullMoreDocs\", retryWorker, retrySleeper)\n\tif err != nil {\n\t\treturn pullMoreDocsResult{}, err\n\t}\n\n\treturn workerReturnVal.(pullMoreDocsResult), nil\n\n}\n\nfunc getBulkGetRequest(changes sgreplicate.Changes) sgreplicate.BulkGetRequest {\n\n\tbulkDocsRequest := sgreplicate.BulkGetRequest{}\n\tdocs := []sgreplicate.DocumentRevisionPair{}\n\tfor _, change := range changes.Results {\n\t\tdocRevPair := sgreplicate.DocumentRevisionPair{}\n\t\tdocRevPair.Id = change.Id\n\t\tdocRevPair.Revision = change.ChangedRevs[0].Revision\n\t\tdocs = append(docs, docRevPair)\n\t}\n\tbulkDocsRequest.Docs = docs\n\treturn bulkDocsRequest\n\n}\n\nfunc stripUserDocChanges(changes sgreplicate.Changes) (changesStripped sgreplicate.Changes) {\n\tchangesStripped.LastSequence = changes.LastSequence\n\n\tfor _, change := range changes.Results {\n\t\tif strings.Contains(change.Id, \"_user\") {\n\t\t\tcontinue\n\t\t}\n\t\tchangesStripped.Results = append(changesStripped.Results, change)\n\n\t}\n\n\treturn changesStripped\n}\n\n\/\/ A retry sleeper is called back by the retry loop and passed\n\/\/ the current retryCount, and should return the amount of milliseconds\n\/\/ that the retry should sleep.\ntype RetrySleeper func(retryCount int) (shouldContinue bool, timeTosleepMs int)\n\n\/\/ A RetryWorker encapsulates the work being done in a Retry Loop. The shouldRetry\n\/\/ return value determines whether the worker will retry, regardless of the err value.\n\/\/ If the worker has exceeded it's retry attempts, then it will not be called again\n\/\/ even if it returns shouldRetry = true.\ntype RetryWorker func() (shouldRetry bool, err error, value interface{})\n\nfunc RetryLoop(description string, worker RetryWorker, sleeper RetrySleeper) (error, interface{}) {\n\n\tnumAttempts := 1\n\n\tfor {\n\t\tshouldRetry, err, value := worker()\n\t\tif !shouldRetry {\n\t\t\tif err != nil {\n\t\t\t\treturn err, nil\n\t\t\t}\n\t\t\treturn nil, value\n\t\t}\n\t\tshouldContinue, sleepMs := sleeper(numAttempts)\n\t\tif !shouldContinue {\n\t\t\tif err == nil {\n\t\t\t\terr = fmt.Errorf(\"RetryLoop for %v giving up after %v attempts\", description, numAttempts)\n\t\t\t}\n\t\t\tlogger.Warn(\"RetryLoop giving up\", \"description\", description, \"numAttempts\", numAttempts)\n\t\t\treturn err, value\n\t\t}\n\t\tlogger.Warn(\"RetryLoop will retry soon\", \"description\", description, \"sleepMs\", sleepMs)\n\n\t\t<-time.After(time.Millisecond * time.Duration(sleepMs))\n\n\t\tnumAttempts += 1\n\n\t}\n}\n\n\/\/ Create a RetrySleeper that will double the retry time on every iteration and\n\/\/ use the given parameters\nfunc CreateDoublingSleeperFunc(maxNumAttempts, initialTimeToSleepMs int) RetrySleeper {\n\n\ttimeToSleepMs := initialTimeToSleepMs\n\n\tsleeper := func(numAttempts int) (bool, int) {\n\t\tif numAttempts > maxNumAttempts {\n\t\t\treturn false, -1\n\t\t}\n\t\tif numAttempts > 1 {\n\t\t\ttimeToSleepMs *= 2\n\t\t}\n\t\treturn true, timeToSleepMs\n\t}\n\treturn sleeper\n\n}\n<|endoftext|>"} {"text":"<commit_before>package manifest\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/bpicode\/fritzctl\/console\"\n\t\"github.com\/bpicode\/fritzctl\/fritz\"\n)\n\n\/\/ Applier defines the interface to apply a plan to the AHA system.\ntype Applier interface {\n\t\/\/ Apply performs the changes necessary to transition from src to target configuration. If the target plan\n\t\/\/ could be realized, it returns an error. If the plan was applied successfully, it returns null.\n\tApply(src, target *Plan) error\n}\n\n\/\/ DryRunner is an Applier that only plans changes to the AHA system.\nfunc DryRunner() Applier {\n\treturn &dryRunner{}\n}\n\ntype dryRunner struct {\n}\n\n\/\/ Apply does only log the proposed changes.\nfunc (d *dryRunner) Apply(src, target *Plan) error {\n\tplanner := TargetBasedPlanner(justLogSwitchState, justLogThermostat)\n\tactions, err := planner.Plan(src, target)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(\"\\n\\nThe following actions would be applied by the manifest:\\n\")\n\tfor _, action := range actions {\n\t\taction.Perform(nil)\n\t}\n\treturn nil\n}\n\nfunc justLogThermostat(before, after Thermostat) Action {\n\treturn &justLogThermostatAction{before: before, after: after}\n}\n\ntype justLogThermostatAction struct {\n\tbefore Thermostat\n\tafter Thermostat\n}\n\n\/\/ Perform only logs changes.\nfunc (a *justLogThermostatAction) Perform(f fritz.HomeAutomationApi) error {\n\tif a.before.Temperature != a.after.Temperature {\n\t\tfmt.Printf(\"\\t'%s'\\t%.1f°C\\t⟶\\t%.1f°C\\n\", a.before.Name, a.before.Temperature, a.after.Temperature)\n\t}\n\treturn nil\n}\n\ntype justLogSwitchAction struct {\n\tbefore Switch\n\tafter Switch\n}\n\nfunc justLogSwitchState(before, after Switch) Action {\n\treturn &justLogSwitchAction{before: before, after: after}\n}\n\n\/\/ Perform only logs changes.\nfunc (a *justLogSwitchAction) Perform(f fritz.HomeAutomationApi) error {\n\tif a.before.State != a.after.State {\n\t\tfmt.Printf(\"\\t'%s'\\t%s\\t⟶\\t%s\\n\", a.before.Name, console.Btoc(a.before.State), console.Btoc(a.after.State))\n\t}\n\treturn nil\n}\n\n\/\/ AhaApiApplier is an Applier that performs changes to the AHA system via the HTTP API.\nfunc AhaApiApplier(f fritz.HomeAutomationApi) Applier {\n\treturn &ahaApiApplier{fritz: f}\n}\n\ntype ahaApiApplier struct {\n\tfritz fritz.HomeAutomationApi\n}\n\n\/\/ Apply does only log the proposed changes.\nfunc (a *ahaApiApplier) Apply(src, target *Plan) error {\n\tplanner := TargetBasedPlanner(reconfigureSwitch, reconfigureThermostat)\n\tactions, err := planner.Plan(src, target)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, action := range actions {\n\t\taction.Perform(a.fritz)\n\t}\n\treturn nil\n}\n\ntype reconfigureSwitchAction struct {\n\tbefore Switch\n\tafter Switch\n}\n\nfunc reconfigureSwitch(before, after Switch) Action {\n\treturn &reconfigureSwitchAction{before: before, after: after}\n}\n\n\/\/ Perform applies the target state to a switch by turning it on\/off.\nfunc (a *reconfigureSwitchAction) Perform(f fritz.HomeAutomationApi) error {\n\tif a.before.State != a.after.State {\n\t\tif a.after.State {\n\t\t\tf.SwitchOn(a.before.ain)\n\t\t} else {\n\t\t\tf.SwitchOff(a.before.ain)\n\t\t}\n\t}\n\treturn nil\n}\n\ntype reconfigureThermostatAction struct {\n\tbefore Thermostat\n\tafter Thermostat\n}\n\nfunc reconfigureThermostat(before, after Thermostat) Action {\n\treturn &reconfigureThermostatAction{before: before, after: after}\n}\n\n\/\/ Perform applies the target state to a switch by turning it on\/off.\nfunc (a *reconfigureThermostatAction) Perform(f fritz.HomeAutomationApi) error {\n\tif a.before.Temperature != a.after.Temperature {\n\t\tf.ApplyTemperature(a.after.Temperature, a.before.ain)\n\t}\n\treturn nil\n}\n<commit_msg>add logs and return errors<commit_after>package manifest\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/bpicode\/fritzctl\/console\"\n\t\"github.com\/bpicode\/fritzctl\/fritz\"\n)\n\n\/\/ Applier defines the interface to apply a plan to the AHA system.\ntype Applier interface {\n\t\/\/ Apply performs the changes necessary to transition from src to target configuration. If the target plan\n\t\/\/ could be realized, it returns an error. If the plan was applied successfully, it returns null.\n\tApply(src, target *Plan) error\n}\n\n\/\/ DryRunner is an Applier that only plans changes to the AHA system.\nfunc DryRunner() Applier {\n\treturn &dryRunner{}\n}\n\ntype dryRunner struct {\n}\n\n\/\/ Apply does only log the proposed changes.\nfunc (d *dryRunner) Apply(src, target *Plan) error {\n\tplanner := TargetBasedPlanner(justLogSwitchState, justLogThermostat)\n\tactions, err := planner.Plan(src, target)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(\"\\n\\nThe following actions would be applied by the manifest:\\n\")\n\tfor _, action := range actions {\n\t\taction.Perform(nil)\n\t}\n\treturn nil\n}\n\nfunc justLogThermostat(before, after Thermostat) Action {\n\treturn &justLogThermostatAction{before: before, after: after}\n}\n\ntype justLogThermostatAction struct {\n\tbefore Thermostat\n\tafter Thermostat\n}\n\n\/\/ Perform only logs changes.\nfunc (a *justLogThermostatAction) Perform(f fritz.HomeAutomationApi) error {\n\tif a.before.Temperature != a.after.Temperature {\n\t\tfmt.Printf(\"\\t'%s'\\t%.1f°C\\t⟶\\t%.1f°C\\n\", a.before.Name, a.before.Temperature, a.after.Temperature)\n\t}\n\treturn nil\n}\n\ntype justLogSwitchAction struct {\n\tbefore Switch\n\tafter Switch\n}\n\nfunc justLogSwitchState(before, after Switch) Action {\n\treturn &justLogSwitchAction{before: before, after: after}\n}\n\n\/\/ Perform only logs changes.\nfunc (a *justLogSwitchAction) Perform(f fritz.HomeAutomationApi) error {\n\tif a.before.State != a.after.State {\n\t\tfmt.Printf(\"\\t'%s'\\t%s\\t⟶\\t%s\\n\", a.before.Name, console.Btoc(a.before.State), console.Btoc(a.after.State))\n\t}\n\treturn nil\n}\n\n\/\/ AhaApiApplier is an Applier that performs changes to the AHA system via the HTTP API.\nfunc AhaApiApplier(f fritz.HomeAutomationApi) Applier {\n\treturn &ahaApiApplier{fritz: f}\n}\n\ntype ahaApiApplier struct {\n\tfritz fritz.HomeAutomationApi\n}\n\n\/\/ Apply does only log the proposed changes.\nfunc (a *ahaApiApplier) Apply(src, target *Plan) error {\n\tplanner := TargetBasedPlanner(reconfigureSwitch, reconfigureThermostat)\n\tactions, err := planner.Plan(src, target)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, action := range actions {\n\t\taction.Perform(a.fritz)\n\t}\n\treturn nil\n}\n\ntype reconfigureSwitchAction struct {\n\tbefore Switch\n\tafter Switch\n}\n\nfunc reconfigureSwitch(before, after Switch) Action {\n\treturn &reconfigureSwitchAction{before: before, after: after}\n}\n\n\/\/ Perform applies the target state to a switch by turning it on\/off.\nfunc (a *reconfigureSwitchAction) Perform(f fritz.HomeAutomationApi) (err error) {\n\tif a.before.State != a.after.State {\n\t\tif a.after.State {\n\t\t\t_, err = f.SwitchOn(a.before.ain)\n\t\t} else {\n\t\t\t_, err = f.SwitchOff(a.before.ain)\n\t\t}\n\t\tif err == nil {\n\t\t\tfmt.Printf(\"\\tOK\\t'%s'\\t%s\\t⟶\\t%s\\n\", a.before.Name, console.Btoc(a.before.State), console.Btoc(a.after.State))\n\t\t}\n\t}\n\treturn err\n}\n\ntype reconfigureThermostatAction struct {\n\tbefore Thermostat\n\tafter Thermostat\n}\n\nfunc reconfigureThermostat(before, after Thermostat) Action {\n\treturn &reconfigureThermostatAction{before: before, after: after}\n}\n\n\/\/ Perform applies the target state to a switch by turning it on\/off.\nfunc (a *reconfigureThermostatAction) Perform(f fritz.HomeAutomationApi) (err error) {\n\tif a.before.Temperature != a.after.Temperature {\n\t\t_, err = f.ApplyTemperature(a.after.Temperature, a.before.ain)\n\t\tif err == nil {\n\t\t\tfmt.Printf(\"\\tOK\\t'%s'\\t%.1f°C\\t⟶\\t%.1f°C\\n\", a.before.Name, a.before.Temperature, a.after.Temperature)\n\t\t}\n\t\treturn err\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015, David Howden\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage index\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc stringToPath(s string) Path {\n\treturn stringSliceToPath(strings.Split(s, PathSeparator))\n}\n\nfunc stringSliceToPath(s []string) Path {\n\tp := make(Path, len(s))\n\tfor i, x := range s {\n\t\tp[i] = Key(x)\n\t}\n\treturn p\n}\n\nfunc TestPathEqual(t *testing.T) {\n\ttests := []struct {\n\t\tp, q Path\n\t\tequal bool\n\t}{\n\t\t{\n\t\t\tPath(nil), Path(nil),\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\tPath([]Key{}), Path([]Key{}),\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\tstringToPath(\"a\"), stringToPath(\"a\"),\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\tstringToPath(\"a:b\"), stringToPath(\"a\"),\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\tstringToPath(\"a:b\"), stringToPath(\"a:c\"),\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\tPath(nil), stringToPath(\"a\"),\n\t\t\tfalse,\n\t\t},\n\t}\n\n\tfor ii, tt := range tests {\n\t\tif tt.p.Equal(tt.q) != tt.equal {\n\t\t\tt.Errorf(\"[%d] (%#v).Equal(%#v) = %v, expected %v\", ii, tt.p, tt.q, !tt.equal, tt.equal)\n\t\t}\n\t}\n}\n\nfunc TestPathContains(t *testing.T) {\n\ttests := []struct {\n\t\tp, q Path\n\t\tcontains bool\n\t}{\n\t\t{\n\t\t\tPath(nil), Path(nil),\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\tPath([]Key{}), Path([]Key{}),\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\tNewPath(\"a\"), NewPath(\"a\"),\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\tNewPath(\"a:b\"), NewPath(\"a\"),\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\tNewPath(\"a:b\"), NewPath(\"a:c\"),\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\tPath(nil), NewPath(\"a\"),\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\tNewPath(\"a:b\"), NewPath(\"a:b:c\"),\n\t\t\ttrue,\n\t\t},\n\t}\n\n\tfor ii, tt := range tests {\n\t\tif tt.p.Contains(tt.q) != tt.contains {\n\t\t\tt.Errorf(\"[%d] (%#v).Contains(%#v) = %v, expected %v\", ii, tt.p, tt.q, !tt.contains, tt.contains)\n\t\t}\n\t}\n}\n\nfunc TestOrderedIntersection(t *testing.T) {\n\ttests := []struct {\n\t\tin [][]Path\n\t\tout []Path\n\t}{\n\t\t{\n\t\t\tin: nil,\n\t\t\tout: []Path{},\n\t\t},\n\n\t\t{\n\t\t\tin: [][]Path{\n\t\t\t\t{stringToPath(\"A\")},\n\t\t\t},\n\t\t\tout: []Path{stringToPath(\"A\")},\n\t\t},\n\n\t\t{\n\t\t\tin: [][]Path{\n\t\t\t\t{stringToPath(\"A\")},\n\t\t\t\t{stringToPath(\"B\")},\n\t\t\t},\n\t\t\tout: []Path{},\n\t\t},\n\n\t\t{\n\t\t\tin: [][]Path{\n\t\t\t\t{stringToPath(\"A\")},\n\t\t\t\t{stringToPath(\"B\"), stringToPath(\"A\")},\n\t\t\t},\n\t\t\tout: []Path{stringToPath(\"A\")},\n\t\t},\n\n\t\t{\n\t\t\tin: [][]Path{\n\t\t\t\t{stringToPath(\"A\"), stringToPath(\"B\")},\n\t\t\t\t{stringToPath(\"B\"), stringToPath(\"A\")},\n\t\t\t\t{stringToPath(\"A\"), stringToPath(\"B\"), stringToPath(\"C\")},\n\t\t\t\t{stringToPath(\"C\"), stringToPath(\"A\"), stringToPath(\"B\"), stringToPath(\"B\")},\n\t\t\t},\n\t\t\tout: []Path{stringToPath(\"B\"), stringToPath(\"A\")},\n\t\t},\n\t}\n\n\tfor ii, tt := range tests {\n\t\tgot := OrderedIntersection(tt.in...)\n\t\tif !reflect.DeepEqual(got, tt.out) {\n\t\t\tt.Errorf(\"[%d] got %#v, expected: %#v\", ii, got, tt.out)\n\t\t}\n\t}\n}\n\nfunc TestUnion(t *testing.T) {\n\ttests := []struct {\n\t\tin [][]Path\n\t\tout []Path\n\t}{\n\t\t{\n\t\t\tin: nil,\n\t\t\tout: []Path{},\n\t\t},\n\n\t\t{\n\t\t\tin: [][]Path{\n\t\t\t\t{stringToPath(\"A\")},\n\t\t\t},\n\t\t\tout: []Path{stringToPath(\"A\")},\n\t\t},\n\n\t\t{\n\t\t\tin: [][]Path{\n\t\t\t\t{stringToPath(\"A\")},\n\t\t\t\t{stringToPath(\"B\")},\n\t\t\t},\n\t\t\tout: []Path{stringToPath(\"A\"), stringToPath(\"B\")},\n\t\t},\n\n\t\t{\n\t\t\tin: [][]Path{\n\t\t\t\t{stringToPath(\"A\")},\n\t\t\t\t{stringToPath(\"B\"), stringToPath(\"A\")},\n\t\t\t},\n\t\t\tout: []Path{stringToPath(\"A\"), stringToPath(\"B\")},\n\t\t},\n\n\t\t{\n\t\t\tin: [][]Path{\n\t\t\t\t{stringToPath(\"A\"), stringToPath(\"B\")},\n\t\t\t\t{stringToPath(\"B\"), stringToPath(\"A\")},\n\t\t\t\t{stringToPath(\"A\"), stringToPath(\"B\"), stringToPath(\"C\")},\n\t\t\t\t{stringToPath(\"C\"), stringToPath(\"A\"), stringToPath(\"B\"), stringToPath(\"B\")},\n\t\t\t},\n\t\t\tout: []Path{stringToPath(\"A\"), stringToPath(\"B\"), stringToPath(\"C\")},\n\t\t},\n\t}\n\n\tfor ii, tt := range tests {\n\t\tgot := Union(tt.in...)\n\t\tif !reflect.DeepEqual(got, tt.out) {\n\t\t\tt.Errorf(\"[%d] got %#v, expected: %#v\", ii, got, tt.out)\n\t\t}\n\t}\n}\n\nfunc TestIndexOfPath(t *testing.T) {\n\ttests := []struct {\n\t\thaystack []Path\n\t\tneedle Path\n\t\tidx int\n\t}{\n\t\t{\n\t\t\t[]Path{},\n\t\t\tPath{},\n\t\t\t-1,\n\t\t},\n\t\t{\n\t\t\t[]Path{\n\t\t\t\tPath{\"Root\"},\n\t\t\t},\n\t\t\tPath{\"Root\"},\n\t\t\t0,\n\t\t}, {\n\t\t\t[]Path{\n\t\t\t\tPath{\"Root\"}, Path{\"Root\", \"One\"},\n\t\t\t},\n\t\t\tPath{\"Root\", \"One\"},\n\t\t\t1,\n\t\t},\n\t}\n\n\tfor ii, tt := range tests {\n\t\ti := IndexOfPath(tt.haystack, tt.needle)\n\t\tif i != tt.idx {\n\t\t\tt.Errorf(\"[%d] IndexOfPath(%v, %v) = %d, expected %d\", ii, tt.haystack, tt.needle, i, tt.idx)\n\t\t}\n\t}\n}\n\nfunc TestPathFromJSONInterface(t *testing.T) {\n\ttests := []struct {\n\t\tin interface{}\n\t\tout Path\n\t}{\n\t\t{\n\t\t\tinterface{}(nil),\n\t\t\tPath(nil),\n\t\t},\n\n\t\t{\n\t\t\t[]interface{}{nil},\n\t\t\tPath(nil),\n\t\t},\n\n\t\t{\n\t\t\t[]interface{}{\"\"},\n\t\t\tPath{\"\"},\n\t\t},\n\t}\n\n\tfor ii, tt := range tests {\n\t\tgot, _ := PathFromJSONInterface(tt.in)\n\t\tif !reflect.DeepEqual(got, tt.out) {\n\t\t\tt.Errorf(\"[%d] got: %#v, expected %#v\", ii, got, tt.out)\n\t\t}\n\t}\n}\n<commit_msg>Improve test coverage for PathFromJSONInterface<commit_after>\/\/ Copyright 2015, David Howden\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage index\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc stringToPath(s string) Path {\n\treturn stringSliceToPath(strings.Split(s, PathSeparator))\n}\n\nfunc stringSliceToPath(s []string) Path {\n\tp := make(Path, len(s))\n\tfor i, x := range s {\n\t\tp[i] = Key(x)\n\t}\n\treturn p\n}\n\nfunc TestPathEqual(t *testing.T) {\n\ttests := []struct {\n\t\tp, q Path\n\t\tequal bool\n\t}{\n\t\t{\n\t\t\tPath(nil), Path(nil),\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\tPath([]Key{}), Path([]Key{}),\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\tstringToPath(\"a\"), stringToPath(\"a\"),\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\tstringToPath(\"a:b\"), stringToPath(\"a\"),\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\tstringToPath(\"a:b\"), stringToPath(\"a:c\"),\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\tPath(nil), stringToPath(\"a\"),\n\t\t\tfalse,\n\t\t},\n\t}\n\n\tfor ii, tt := range tests {\n\t\tif tt.p.Equal(tt.q) != tt.equal {\n\t\t\tt.Errorf(\"[%d] (%#v).Equal(%#v) = %v, expected %v\", ii, tt.p, tt.q, !tt.equal, tt.equal)\n\t\t}\n\t}\n}\n\nfunc TestPathContains(t *testing.T) {\n\ttests := []struct {\n\t\tp, q Path\n\t\tcontains bool\n\t}{\n\t\t{\n\t\t\tPath(nil), Path(nil),\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\tPath([]Key{}), Path([]Key{}),\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\tNewPath(\"a\"), NewPath(\"a\"),\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\tNewPath(\"a:b\"), NewPath(\"a\"),\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\tNewPath(\"a:b\"), NewPath(\"a:c\"),\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\tPath(nil), NewPath(\"a\"),\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\tNewPath(\"a:b\"), NewPath(\"a:b:c\"),\n\t\t\ttrue,\n\t\t},\n\t}\n\n\tfor ii, tt := range tests {\n\t\tif tt.p.Contains(tt.q) != tt.contains {\n\t\t\tt.Errorf(\"[%d] (%#v).Contains(%#v) = %v, expected %v\", ii, tt.p, tt.q, !tt.contains, tt.contains)\n\t\t}\n\t}\n}\n\nfunc TestOrderedIntersection(t *testing.T) {\n\ttests := []struct {\n\t\tin [][]Path\n\t\tout []Path\n\t}{\n\t\t{\n\t\t\tin: nil,\n\t\t\tout: []Path{},\n\t\t},\n\n\t\t{\n\t\t\tin: [][]Path{\n\t\t\t\t{stringToPath(\"A\")},\n\t\t\t},\n\t\t\tout: []Path{stringToPath(\"A\")},\n\t\t},\n\n\t\t{\n\t\t\tin: [][]Path{\n\t\t\t\t{stringToPath(\"A\")},\n\t\t\t\t{stringToPath(\"B\")},\n\t\t\t},\n\t\t\tout: []Path{},\n\t\t},\n\n\t\t{\n\t\t\tin: [][]Path{\n\t\t\t\t{stringToPath(\"A\")},\n\t\t\t\t{stringToPath(\"B\"), stringToPath(\"A\")},\n\t\t\t},\n\t\t\tout: []Path{stringToPath(\"A\")},\n\t\t},\n\n\t\t{\n\t\t\tin: [][]Path{\n\t\t\t\t{stringToPath(\"A\"), stringToPath(\"B\")},\n\t\t\t\t{stringToPath(\"B\"), stringToPath(\"A\")},\n\t\t\t\t{stringToPath(\"A\"), stringToPath(\"B\"), stringToPath(\"C\")},\n\t\t\t\t{stringToPath(\"C\"), stringToPath(\"A\"), stringToPath(\"B\"), stringToPath(\"B\")},\n\t\t\t},\n\t\t\tout: []Path{stringToPath(\"B\"), stringToPath(\"A\")},\n\t\t},\n\t}\n\n\tfor ii, tt := range tests {\n\t\tgot := OrderedIntersection(tt.in...)\n\t\tif !reflect.DeepEqual(got, tt.out) {\n\t\t\tt.Errorf(\"[%d] got %#v, expected: %#v\", ii, got, tt.out)\n\t\t}\n\t}\n}\n\nfunc TestUnion(t *testing.T) {\n\ttests := []struct {\n\t\tin [][]Path\n\t\tout []Path\n\t}{\n\t\t{\n\t\t\tin: nil,\n\t\t\tout: []Path{},\n\t\t},\n\n\t\t{\n\t\t\tin: [][]Path{\n\t\t\t\t{stringToPath(\"A\")},\n\t\t\t},\n\t\t\tout: []Path{stringToPath(\"A\")},\n\t\t},\n\n\t\t{\n\t\t\tin: [][]Path{\n\t\t\t\t{stringToPath(\"A\")},\n\t\t\t\t{stringToPath(\"B\")},\n\t\t\t},\n\t\t\tout: []Path{stringToPath(\"A\"), stringToPath(\"B\")},\n\t\t},\n\n\t\t{\n\t\t\tin: [][]Path{\n\t\t\t\t{stringToPath(\"A\")},\n\t\t\t\t{stringToPath(\"B\"), stringToPath(\"A\")},\n\t\t\t},\n\t\t\tout: []Path{stringToPath(\"A\"), stringToPath(\"B\")},\n\t\t},\n\n\t\t{\n\t\t\tin: [][]Path{\n\t\t\t\t{stringToPath(\"A\"), stringToPath(\"B\")},\n\t\t\t\t{stringToPath(\"B\"), stringToPath(\"A\")},\n\t\t\t\t{stringToPath(\"A\"), stringToPath(\"B\"), stringToPath(\"C\")},\n\t\t\t\t{stringToPath(\"C\"), stringToPath(\"A\"), stringToPath(\"B\"), stringToPath(\"B\")},\n\t\t\t},\n\t\t\tout: []Path{stringToPath(\"A\"), stringToPath(\"B\"), stringToPath(\"C\")},\n\t\t},\n\t}\n\n\tfor ii, tt := range tests {\n\t\tgot := Union(tt.in...)\n\t\tif !reflect.DeepEqual(got, tt.out) {\n\t\t\tt.Errorf(\"[%d] got %#v, expected: %#v\", ii, got, tt.out)\n\t\t}\n\t}\n}\n\nfunc TestIndexOfPath(t *testing.T) {\n\ttests := []struct {\n\t\thaystack []Path\n\t\tneedle Path\n\t\tidx int\n\t}{\n\t\t{\n\t\t\t[]Path{},\n\t\t\tPath{},\n\t\t\t-1,\n\t\t},\n\t\t{\n\t\t\t[]Path{\n\t\t\t\tPath{\"Root\"},\n\t\t\t},\n\t\t\tPath{\"Root\"},\n\t\t\t0,\n\t\t}, {\n\t\t\t[]Path{\n\t\t\t\tPath{\"Root\"}, Path{\"Root\", \"One\"},\n\t\t\t},\n\t\t\tPath{\"Root\", \"One\"},\n\t\t\t1,\n\t\t},\n\t}\n\n\tfor ii, tt := range tests {\n\t\ti := IndexOfPath(tt.haystack, tt.needle)\n\t\tif i != tt.idx {\n\t\t\tt.Errorf(\"[%d] IndexOfPath(%v, %v) = %d, expected %d\", ii, tt.haystack, tt.needle, i, tt.idx)\n\t\t}\n\t}\n}\n\nfunc TestPathFromJSONInterface(t *testing.T) {\n\ttests := []struct {\n\t\tin interface{}\n\t\tout Path\n\t}{\n\t\t{\n\t\t\tinterface{}(nil),\n\t\t\tPath(nil),\n\t\t},\n\n\t\t{\n\t\t\t[]interface{}{nil},\n\t\t\tPath(nil),\n\t\t},\n\n\t\t{\n\t\t\t[]interface{}{\"\"},\n\t\t\tPath{\"\"},\n\t\t},\n\n\t\t{\n\t\t\t[]interface{}{\"\", 123.},\n\t\t\tPath{\"\", \"123\"},\n\t\t},\n\t}\n\n\tfor ii, tt := range tests {\n\t\tgot, _ := PathFromJSONInterface(tt.in)\n\t\tif !reflect.DeepEqual(got, tt.out) {\n\t\t\tt.Errorf(\"[%d] got: %#v, expected %#v\", ii, got, tt.out)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package bench\n\nimport (\n\t\"fmt\"\n\n\t\"flag\"\n\t\"io\/ioutil\"\n\n\t\"context\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"time\"\n)\n\n\/\/ ZipfSetBits sets random bits according to the Zipf-Mandelbrot distribution.\n\/\/ This distribution accepts two parameters, Exponent and Ratio, for both bitmaps and profiles.\n\/\/ It also uses PermutationGenerator to permute IDs randomly.\ntype ZipfSetBits struct {\n\tHasClient\n\tName string `json:\"name\"`\n\tBaseBitmapID int64 `json:\"base-bitmap-id\"`\n\tBaseProfileID int64 `json:\"base-profile-id\"`\n\tBitmapIDRange int64 `json:\"bitmap-id-range\"`\n\tProfileIDRange int64 `json:\"profile-id-range\"`\n\tIterations int `json:\"iterations\"`\n\tSeed int64 `json:\"seed\"`\n\tDB string `json:\"db\"`\n\tbitmapRng *rand.Zipf `json:\"-\"`\n\tProfileRng *rand.Zipf `json:\"-\"`\n\tBitmapPerm *PermutationGenerator `json:\"-\"`\n\tProfilePerm *PermutationGenerator `json:\"-\"`\n\n\tBitmapExponent float64 `json:\"bitmap-exponent\"`\n\tBitmapRatio float64 `json:\"bitmap-ratio\"`\n\tProfileExponent float64 `json:\"profile-exponent\"`\n\tProfileRatio float64 `json:\"profile-ratio\"`\n}\n\nfunc (b *ZipfSetBits) Usage() string {\n\treturn `\nzipf-set-bits sets random bits according to the Zipf distribution.\nThis is a power-law distribution controlled by two parameters.\nExponent, in the range (1, inf), with a default value of 1.001, controls\nthe \"sharpness\" of the distribution, with higher exponent being sharper.\nRatio, in the range (0, 1), with a default value of 0.25, controls the\nmaximum variation of the distribution, with higher ratio being more uniform.\n\nUsage: zipf-set-bits [arguments]\n\nThe following arguments are available:\n\n\t-base-bitmap-id int\n\t\tbits being set will all be greater than BaseBitmapID\n\n\t-bitmap-id-range int\n\t\tnumber of possible bitmap ids that can be set\n\n\t-base-profile-id int\n\t\tprofile id num to start from\n\n\t-profile-id-range int\n\t\tnumber of possible profile ids that can be set\n\n\t-iterations int\n\t\tnumber of bits to set\n\n\t-seed int\n\t\tSeed for RNG\n\n\t-db string\n\t\tpilosa db to use\n\n\t-bitmap-exponent float64\n\t\tzipf exponent parameter for bitmap IDs\n\n\t-bitmap-ratio float64\n\t\tzipf probability ratio parameter for bitmap IDs\n\n\t-profile-exponent float64\n\t\tzipf exponent parameter for profile IDs\n\n\t-profile-ratio float64\n\t\tzipf probability ratio parameter for profile IDs\n\n\t-client-type string\n\t\tCan be 'single' (all agents hitting one host) or 'round_robin'\n`[1:]\n}\n\nfunc (b *ZipfSetBits) ConsumeFlags(args []string) ([]string, error) {\n\tfs := flag.NewFlagSet(\"ZipfSetBits\", flag.ContinueOnError)\n\tfs.SetOutput(ioutil.Discard)\n\tfs.Int64Var(&b.BaseBitmapID, \"base-bitmap-id\", 0, \"\")\n\tfs.Int64Var(&b.BitmapIDRange, \"bitmap-id-range\", 100000, \"\")\n\tfs.Int64Var(&b.BaseProfileID, \"base-profile-id\", 0, \"\")\n\tfs.Int64Var(&b.ProfileIDRange, \"profile-id-range\", 100000, \"\")\n\tfs.Int64Var(&b.Seed, \"seed\", 1, \"\")\n\tfs.IntVar(&b.Iterations, \"iterations\", 100, \"\")\n\tfs.StringVar(&b.DB, \"db\", \"benchdb\", \"\")\n\tfs.Float64Var(&b.BitmapExponent, \"bitmap-exponent\", 1.01, \"\")\n\tfs.Float64Var(&b.BitmapRatio, \"bitmap-ratio\", 0.25, \"\")\n\tfs.Float64Var(&b.ProfileExponent, \"profile-exponent\", 1.01, \"\")\n\tfs.Float64Var(&b.ProfileRatio, \"profile-ratio\", 0.25, \"\")\n\tfs.StringVar(&b.ClientType, \"client-type\", \"single\", \"\")\n\n\tif err := fs.Parse(args); err != nil {\n\t\treturn nil, err\n\t}\n\treturn fs.Args(), nil\n}\n\n\/\/ Offset is the true parameter used by the Zipf distribution, but the ratio,\n\/\/ as defined here, is a simpler, readable way to define the distribution.\n\/\/ Offset is in [1, inf), and its meaning depends on N (a pain for updating benchmark configs)\n\/\/ ratio is in (0, 1), and its meaning does not depend on N.\n\/\/ it is the ratio of the lowest probability in the distribution to the highest.\n\/\/ ratio=0.01 corresponds to a very small offset - the most skewed distribution for a given pair (N, exp)\n\/\/ ratio=0.99 corresponds to a very large offset - the most nearly uniform distribution for a given (N, exp)\nfunc getZipfOffset(N int64, exp, ratio float64) float64 {\n\tz := math.Pow(ratio, 1\/exp)\n\treturn z * float64(N-1) \/ (1 - z)\n}\n\nfunc (b *ZipfSetBits) Init(hosts []string, agentNum int) error {\n\tb.Name = \"ZipfSetBits\"\n\trnd := rand.New(rand.NewSource(b.Seed + int64(agentNum)))\n\tbitmapOffset := getZipfOffset(b.BitmapIDRange, b.BitmapExponent, b.BitmapRatio)\n\tb.bitmapRng = rand.NewZipf(rnd, b.BitmapExponent, bitmapOffset, uint64(b.BitmapIDRange-1))\n\tprofileOffset := getZipfOffset(b.ProfileIDRange, b.ProfileExponent, b.ProfileRatio)\n\tb.profileRng = rand.NewZipf(rnd, b.ProfileExponent, profileOffset, uint64(b.ProfileIDRange-1))\n\n\tb.bitmapPerm = NewPermutationGenerator(b.BitmapIDRange, b.Seed)\n\tb.profilePerm = NewPermutationGenerator(b.ProfileIDRange, b.Seed+1)\n\n\treturn b.HasClient.Init(hosts, agentNum)\n}\n\n\/\/ Run runs the ZipfSetBits benchmark\nfunc (b *ZipfSetBits) Run(ctx context.Context, agentNum int) map[string]interface{} {\n\tresults := make(map[string]interface{})\n\tif b.cli == nil {\n\t\tresults[\"error\"] = fmt.Errorf(\"No client set for ZipfSetBits agent: %v\", agentNum)\n\t\treturn results\n\t}\n\ts := NewStats()\n\tvar start time.Time\n\tfor n := 0; n < b.Iterations; n++ {\n\t\t\/\/ generate IDs from Zipf distribution\n\t\tbitmapIDOriginal := b.bitmapRng.Uint64()\n\t\tprofIDOriginal := b.profileRng.Uint64()\n\t\t\/\/ permute IDs randomly, but repeatably\n\t\tbitmapID := b.bitmapPerm.Next(int64(bitmapIDOriginal))\n\t\tprofID := b.profilePerm.Next(int64(profIDOriginal))\n\n\t\tquery := fmt.Sprintf(\"SetBit(%d, 'frame.n', %d)\", b.BaseBitmapID+int64(bitmapID), b.BaseProfileID+int64(profID))\n\t\tstart = time.Now()\n\t\tb.cli.ExecuteQuery(ctx, b.DB, query, true)\n\t\ts.Add(time.Now().Sub(start))\n\t}\n\tAddToResults(s, results)\n\treturn results\n}\n<commit_msg>Remove extraneous JSON tags<commit_after>package bench\n\nimport (\n\t\"fmt\"\n\n\t\"flag\"\n\t\"io\/ioutil\"\n\n\t\"context\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"time\"\n)\n\n\/\/ ZipfSetBits sets random bits according to the Zipf-Mandelbrot distribution.\n\/\/ This distribution accepts two parameters, Exponent and Ratio, for both bitmaps and profiles.\n\/\/ It also uses PermutationGenerator to permute IDs randomly.\ntype ZipfSetBits struct {\n\tHasClient\n\tName string `json:\"name\"`\n\tBaseBitmapID int64 `json:\"base-bitmap-id\"`\n\tBaseProfileID int64 `json:\"base-profile-id\"`\n\tBitmapIDRange int64 `json:\"bitmap-id-range\"`\n\tProfileIDRange int64 `json:\"profile-id-range\"`\n\tIterations int `json:\"iterations\"`\n\tSeed int64 `json:\"seed\"`\n\tDB string `json:\"db\"`\n\tBitmapExponent float64 `json:\"bitmap-exponent\"`\n\tBitmapRatio float64 `json:\"bitmap-ratio\"`\n\tProfileExponent float64 `json:\"profile-exponent\"`\n\tProfileRatio float64 `json:\"profile-ratio\"`\n\tbitmapRng *rand.Zipf\n\tProfileRng *rand.Zipf\n\tBitmapPerm *PermutationGenerator\n\tProfilePerm *PermutationGenerator\n}\n\nfunc (b *ZipfSetBits) Usage() string {\n\treturn `\nzipf-set-bits sets random bits according to the Zipf distribution.\nThis is a power-law distribution controlled by two parameters.\nExponent, in the range (1, inf), with a default value of 1.001, controls\nthe \"sharpness\" of the distribution, with higher exponent being sharper.\nRatio, in the range (0, 1), with a default value of 0.25, controls the\nmaximum variation of the distribution, with higher ratio being more uniform.\n\nUsage: zipf-set-bits [arguments]\n\nThe following arguments are available:\n\n\t-base-bitmap-id int\n\t\tbits being set will all be greater than BaseBitmapID\n\n\t-bitmap-id-range int\n\t\tnumber of possible bitmap ids that can be set\n\n\t-base-profile-id int\n\t\tprofile id num to start from\n\n\t-profile-id-range int\n\t\tnumber of possible profile ids that can be set\n\n\t-iterations int\n\t\tnumber of bits to set\n\n\t-seed int\n\t\tSeed for RNG\n\n\t-db string\n\t\tpilosa db to use\n\n\t-bitmap-exponent float64\n\t\tzipf exponent parameter for bitmap IDs\n\n\t-bitmap-ratio float64\n\t\tzipf probability ratio parameter for bitmap IDs\n\n\t-profile-exponent float64\n\t\tzipf exponent parameter for profile IDs\n\n\t-profile-ratio float64\n\t\tzipf probability ratio parameter for profile IDs\n\n\t-client-type string\n\t\tCan be 'single' (all agents hitting one host) or 'round_robin'\n`[1:]\n}\n\nfunc (b *ZipfSetBits) ConsumeFlags(args []string) ([]string, error) {\n\tfs := flag.NewFlagSet(\"ZipfSetBits\", flag.ContinueOnError)\n\tfs.SetOutput(ioutil.Discard)\n\tfs.Int64Var(&b.BaseBitmapID, \"base-bitmap-id\", 0, \"\")\n\tfs.Int64Var(&b.BitmapIDRange, \"bitmap-id-range\", 100000, \"\")\n\tfs.Int64Var(&b.BaseProfileID, \"base-profile-id\", 0, \"\")\n\tfs.Int64Var(&b.ProfileIDRange, \"profile-id-range\", 100000, \"\")\n\tfs.Int64Var(&b.Seed, \"seed\", 1, \"\")\n\tfs.IntVar(&b.Iterations, \"iterations\", 100, \"\")\n\tfs.StringVar(&b.DB, \"db\", \"benchdb\", \"\")\n\tfs.Float64Var(&b.BitmapExponent, \"bitmap-exponent\", 1.01, \"\")\n\tfs.Float64Var(&b.BitmapRatio, \"bitmap-ratio\", 0.25, \"\")\n\tfs.Float64Var(&b.ProfileExponent, \"profile-exponent\", 1.01, \"\")\n\tfs.Float64Var(&b.ProfileRatio, \"profile-ratio\", 0.25, \"\")\n\tfs.StringVar(&b.ClientType, \"client-type\", \"single\", \"\")\n\n\tif err := fs.Parse(args); err != nil {\n\t\treturn nil, err\n\t}\n\treturn fs.Args(), nil\n}\n\n\/\/ Offset is the true parameter used by the Zipf distribution, but the ratio,\n\/\/ as defined here, is a simpler, readable way to define the distribution.\n\/\/ Offset is in [1, inf), and its meaning depends on N (a pain for updating benchmark configs)\n\/\/ ratio is in (0, 1), and its meaning does not depend on N.\n\/\/ it is the ratio of the lowest probability in the distribution to the highest.\n\/\/ ratio=0.01 corresponds to a very small offset - the most skewed distribution for a given pair (N, exp)\n\/\/ ratio=0.99 corresponds to a very large offset - the most nearly uniform distribution for a given (N, exp)\nfunc getZipfOffset(N int64, exp, ratio float64) float64 {\n\tz := math.Pow(ratio, 1\/exp)\n\treturn z * float64(N-1) \/ (1 - z)\n}\n\nfunc (b *ZipfSetBits) Init(hosts []string, agentNum int) error {\n\tb.Name = \"ZipfSetBits\"\n\trnd := rand.New(rand.NewSource(b.Seed + int64(agentNum)))\n\tbitmapOffset := getZipfOffset(b.BitmapIDRange, b.BitmapExponent, b.BitmapRatio)\n\tb.bitmapRng = rand.NewZipf(rnd, b.BitmapExponent, bitmapOffset, uint64(b.BitmapIDRange-1))\n\tprofileOffset := getZipfOffset(b.ProfileIDRange, b.ProfileExponent, b.ProfileRatio)\n\tb.profileRng = rand.NewZipf(rnd, b.ProfileExponent, profileOffset, uint64(b.ProfileIDRange-1))\n\n\tb.bitmapPerm = NewPermutationGenerator(b.BitmapIDRange, b.Seed)\n\tb.profilePerm = NewPermutationGenerator(b.ProfileIDRange, b.Seed+1)\n\n\treturn b.HasClient.Init(hosts, agentNum)\n}\n\n\/\/ Run runs the ZipfSetBits benchmark\nfunc (b *ZipfSetBits) Run(ctx context.Context, agentNum int) map[string]interface{} {\n\tresults := make(map[string]interface{})\n\tif b.cli == nil {\n\t\tresults[\"error\"] = fmt.Errorf(\"No client set for ZipfSetBits agent: %v\", agentNum)\n\t\treturn results\n\t}\n\ts := NewStats()\n\tvar start time.Time\n\tfor n := 0; n < b.Iterations; n++ {\n\t\t\/\/ generate IDs from Zipf distribution\n\t\tbitmapIDOriginal := b.bitmapRng.Uint64()\n\t\tprofIDOriginal := b.profileRng.Uint64()\n\t\t\/\/ permute IDs randomly, but repeatably\n\t\tbitmapID := b.bitmapPerm.Next(int64(bitmapIDOriginal))\n\t\tprofID := b.profilePerm.Next(int64(profIDOriginal))\n\n\t\tquery := fmt.Sprintf(\"SetBit(%d, 'frame.n', %d)\", b.BaseBitmapID+int64(bitmapID), b.BaseProfileID+int64(profID))\n\t\tstart = time.Now()\n\t\tb.cli.ExecuteQuery(ctx, b.DB, query, true)\n\t\ts.Add(time.Now().Sub(start))\n\t}\n\tAddToResults(s, results)\n\treturn results\n}\n<|endoftext|>"} {"text":"<commit_before>package metrics\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"sort\"\n)\n\nvar prefix = \"\"\n\nfunc SetPrefix(p string) {\n\tprefix = p\n}\n\nvar memstats = new(runtime.MemStats)\n\nfunc init() {\n\thttp.Handle(\"\/metrics\", http.HandlerFunc(printMetrics))\n}\n\nfunc printMetrics(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\n\t\/\/ Runtime memory stats\n\truntime.ReadMemStats(memstats)\n\t\/\/ General statistics.\n\tfmt.Fprintf(w, \"%smem_alloc %d\\n\", prefix, memstats.Alloc) \/\/ bytes allocated and not yet freed\n\tfmt.Fprintf(w, \"%smem_alloc_total %d\\n\", prefix, memstats.TotalAlloc) \/\/ bytes allocated (even if freed)\n\tfmt.Fprintf(w, \"%smem_sys %d\\n\", prefix, memstats.Sys) \/\/ bytes obtained from system (sum of XxxSys below)\n\tfmt.Fprintf(w, \"%smem_ptr_lookups %d\\n\", prefix, memstats.Lookups) \/\/ number of pointer lookups\n\tfmt.Fprintf(w, \"%smem_mallocs %d\\n\", prefix, memstats.Mallocs) \/\/ number of mallocs\n\tfmt.Fprintf(w, \"%smem_frees %d\\n\", prefix, memstats.Frees) \/\/ number of frees\n\n\t\/\/ Main allocation heap statistics.\n\tfmt.Fprintf(w, \"%smem_heap_alloc %d\\n\", prefix, memstats.HeapAlloc) \/\/ bytes allocated and not yet freed (same as Alloc above)\n\tfmt.Fprintf(w, \"%smem_heap_sys %d\\n\", prefix, memstats.HeapSys) \/\/ bytes obtained from system\n\tfmt.Fprintf(w, \"%smem_heap_idle %d\\n\", prefix, memstats.HeapIdle) \/\/ bytes in idle spans\n\tfmt.Fprintf(w, \"%smem_heap_in_use %d\\n\", prefix, memstats.HeapInuse) \/\/ bytes in non-idle span\n\tfmt.Fprintf(w, \"%smem_heap_released %d\\n\", prefix, memstats.HeapReleased) \/\/ bytes released to the OS\n\tfmt.Fprintf(w, \"%smem_heap_objects %d\\n\", prefix, memstats.HeapObjects) \/\/ total number of allocated objects\n\n\tfmt.Fprintf(w, \"%smem_stack_in_use %d\\n\", prefix, memstats.StackInuse) \/\/ bytes used by stack allocator\n\tfmt.Fprintf(w, \"%smem_stack_sys %d\\n\", prefix, memstats.StackSys)\n\tfmt.Fprintf(w, \"%smem_mspan_in_use %d\\n\", prefix, memstats.MSpanInuse) \/\/ mspan structures\n\tfmt.Fprintf(w, \"%smem_mspan_sys %d\\n\", prefix, memstats.MSpanSys)\n\tfmt.Fprintf(w, \"%smem_mcache_in_use %d\\n\", prefix, memstats.MCacheInuse) \/\/ mcache structures\n\tfmt.Fprintf(w, \"%smem_mcache_sys %d\\n\", prefix, memstats.MCacheSys)\n\tfmt.Fprintf(w, \"%smem_buck_hash_sys %d\\n\", prefix, memstats.BuckHashSys) \/\/ profiling bucket hash table\n\tfmt.Fprintf(w, \"%smem_gc_sys %d\\n\", prefix, memstats.GCSys) \/\/ GC metadata\n\tfmt.Fprintf(w, \"%smem_other_sys %d\\n\", prefix, memstats.OtherSys) \/\/ other system allocations\n\n\tfmt.Fprintf(w, \"%sgc_next_gc_heap_alloc %d\\n\", prefix, memstats.NextGC) \/\/ next collection will happen when HeapAlloc ≥ this amount\n\tfmt.Fprintf(w, \"%sgc_last_gc_time %d\\n\", prefix, memstats.LastGC) \/\/ end time of last collection (nanoseconds since 1970)\n\tfmt.Fprintf(w, \"%sgc_pause_total %d\\n\", prefix, memstats.PauseTotalNs)\n\tfmt.Fprintf(w, \"%sgc_num_gc %d\\n\", prefix, memstats.NumGC)\n\tfmt.Fprintf(w, \"%sgc_gc_cpu_frac %f\\n\", prefix, memstats.GCCPUFraction)\n\n\t\/\/ circular buffer of recent GC pause durations, most recent at [(NumGC+255)%256]\n\tfor i, p := range pausePercentiles(memstats.PauseNs[:], memstats.NumGC) {\n\t\tfmt.Fprintf(w, \"%sgc_pause_pctl_%d %d\\n\", prefix, i*5, p)\n\t}\n\n\t\/\/ Per-size allocation statistics.\n\tfor _, b := range memstats.BySize {\n\t\tfmt.Fprintf(w, \"%salloc_size_%d_mallocs %d\\n\", prefix, b.Size, b.Mallocs)\n\t\tfmt.Fprintf(w, \"%salloc_size_%d_frees %d\\n\", prefix, b.Size, b.Frees)\n\t}\n\n\t\/\/ Histograms\n\thists := getAllHistograms()\n\tfor name, dat := range hists {\n\t\tfmt.Fprintf(w, \"%shist_%s_count %d\\n\", prefix, name, *dat.count)\n\t\tfmt.Fprintf(w, \"%shist_%s_kept %d\\n\", prefix, name, *dat.kept)\n\t\tfor i, p := range hdatPercentiles(dat) {\n\t\t\tfmt.Fprintf(w, \"%shist_%s_pctl_%d %d\\n\", prefix, name, i*5, p)\n\t\t}\n\t}\n\n\t\/\/ Application stats\n\tctrs := getAllCounters()\n\tfor name, val := range ctrs {\n\t\tfmt.Fprintf(w, \"%s%s %d\\n\", prefix, name, val)\n\t}\n}\n\n\/\/ Percentiles go by 5% percentile steps from min to max. We report all of them even though it's\n\/\/ likely only min, 25th, 50th, 75th, 95th, and max will be used. It's assumed the metric poller\n\/\/ that is consuming this output will choose to only report to the metrics system what it considers\n\/\/ useful information.\n\/\/\n\/\/ Slice layout:\n\/\/ [0]: min (0th)\n\/\/ [1]: 5th\n\/\/ [n]: 5n\n\/\/ [19]: 95th\n\/\/ [20]: max (100th)\nfunc hdatPercentiles(dat *hdat) []uint64 {\n\tbuf := dat.buf\n\tkept := *dat.kept\n\n\tif kept == 0 {\n\t\treturn nil\n\t}\n\tif kept < uint64(len(buf)) {\n\t\tbuf = buf[:kept]\n\t}\n\n\tsort.Sort(uint64slice(buf))\n\n\tpctls := make([]uint64, 21)\n\tpctls[0] = *dat.min\n\tpctls[20] = *dat.max\n\n\tfor i := 1; i < 20; i++ {\n\t\tidx := len(buf) * i \/ 20\n\t\tpctls[i] = buf[idx]\n\t}\n\n\treturn pctls\n}\n\nfunc pausePercentiles(pauses []uint64, ngc uint32) []uint64 {\n\tif ngc < uint32(len(pauses)) {\n\t\tpauses = pauses[:ngc]\n\t}\n\n\tsort.Sort(uint64slice(pauses))\n\n\tpctls := make([]uint64, 21)\n\tpctls[0] = pauses[0]\n\tpctls[20] = pauses[len(pauses)-1]\n\n\tfor i := 1; i < 20; i++ {\n\t\tidx := len(pauses) * i \/ 20\n\t\tpctls[i] = pauses[idx]\n\t}\n\n\treturn pctls\n}\n\ntype uint64slice []uint64\n\nfunc (u uint64slice) Len() int { return len(u) }\nfunc (u uint64slice) Swap(i, j int) { u[i], u[j] = u[j], u[i] }\nfunc (u uint64slice) Less(i, j int) bool { return u[i] < u[j] }\n<commit_msg>Adding 99th percentiles for all histograms and GC times<commit_after>package metrics\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"sort\"\n)\n\nvar prefix = \"\"\n\nfunc SetPrefix(p string) {\n\tprefix = p\n}\n\nvar memstats = new(runtime.MemStats)\n\nfunc init() {\n\thttp.Handle(\"\/metrics\", http.HandlerFunc(printMetrics))\n}\n\nfunc printMetrics(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\n\t\/\/ Runtime memory stats\n\truntime.ReadMemStats(memstats)\n\t\/\/ General statistics.\n\tfmt.Fprintf(w, \"%smem_alloc %d\\n\", prefix, memstats.Alloc) \/\/ bytes allocated and not yet freed\n\tfmt.Fprintf(w, \"%smem_alloc_total %d\\n\", prefix, memstats.TotalAlloc) \/\/ bytes allocated (even if freed)\n\tfmt.Fprintf(w, \"%smem_sys %d\\n\", prefix, memstats.Sys) \/\/ bytes obtained from system (sum of XxxSys below)\n\tfmt.Fprintf(w, \"%smem_ptr_lookups %d\\n\", prefix, memstats.Lookups) \/\/ number of pointer lookups\n\tfmt.Fprintf(w, \"%smem_mallocs %d\\n\", prefix, memstats.Mallocs) \/\/ number of mallocs\n\tfmt.Fprintf(w, \"%smem_frees %d\\n\", prefix, memstats.Frees) \/\/ number of frees\n\n\t\/\/ Main allocation heap statistics.\n\tfmt.Fprintf(w, \"%smem_heap_alloc %d\\n\", prefix, memstats.HeapAlloc) \/\/ bytes allocated and not yet freed (same as Alloc above)\n\tfmt.Fprintf(w, \"%smem_heap_sys %d\\n\", prefix, memstats.HeapSys) \/\/ bytes obtained from system\n\tfmt.Fprintf(w, \"%smem_heap_idle %d\\n\", prefix, memstats.HeapIdle) \/\/ bytes in idle spans\n\tfmt.Fprintf(w, \"%smem_heap_in_use %d\\n\", prefix, memstats.HeapInuse) \/\/ bytes in non-idle span\n\tfmt.Fprintf(w, \"%smem_heap_released %d\\n\", prefix, memstats.HeapReleased) \/\/ bytes released to the OS\n\tfmt.Fprintf(w, \"%smem_heap_objects %d\\n\", prefix, memstats.HeapObjects) \/\/ total number of allocated objects\n\n\tfmt.Fprintf(w, \"%smem_stack_in_use %d\\n\", prefix, memstats.StackInuse) \/\/ bytes used by stack allocator\n\tfmt.Fprintf(w, \"%smem_stack_sys %d\\n\", prefix, memstats.StackSys)\n\tfmt.Fprintf(w, \"%smem_mspan_in_use %d\\n\", prefix, memstats.MSpanInuse) \/\/ mspan structures\n\tfmt.Fprintf(w, \"%smem_mspan_sys %d\\n\", prefix, memstats.MSpanSys)\n\tfmt.Fprintf(w, \"%smem_mcache_in_use %d\\n\", prefix, memstats.MCacheInuse) \/\/ mcache structures\n\tfmt.Fprintf(w, \"%smem_mcache_sys %d\\n\", prefix, memstats.MCacheSys)\n\tfmt.Fprintf(w, \"%smem_buck_hash_sys %d\\n\", prefix, memstats.BuckHashSys) \/\/ profiling bucket hash table\n\tfmt.Fprintf(w, \"%smem_gc_sys %d\\n\", prefix, memstats.GCSys) \/\/ GC metadata\n\tfmt.Fprintf(w, \"%smem_other_sys %d\\n\", prefix, memstats.OtherSys) \/\/ other system allocations\n\n\tfmt.Fprintf(w, \"%sgc_next_gc_heap_alloc %d\\n\", prefix, memstats.NextGC) \/\/ next collection will happen when HeapAlloc ≥ this amount\n\tfmt.Fprintf(w, \"%sgc_last_gc_time %d\\n\", prefix, memstats.LastGC) \/\/ end time of last collection (nanoseconds since 1970)\n\tfmt.Fprintf(w, \"%sgc_pause_total %d\\n\", prefix, memstats.PauseTotalNs)\n\tfmt.Fprintf(w, \"%sgc_num_gc %d\\n\", prefix, memstats.NumGC)\n\tfmt.Fprintf(w, \"%sgc_gc_cpu_frac %f\\n\", prefix, memstats.GCCPUFraction)\n\n\t\/\/ circular buffer of recent GC pause durations, most recent at [(NumGC+255)%256]\n\tpctls := pausePercentiles(memstats.PauseNs[:], memstats.NumGC)\n\tfor i := 0; i < 20; i++ {\n\t\tp := pctls[i]\n\t\tfmt.Fprintf(w, \"%sgc_pause_pctl_%d %d\\n\", prefix, i*5, p)\n\t}\n\tfmt.Fprintf(w, \"%sgc_pause_pctl_%d %d\\n\", prefix, 99, pctls[20])\n\tfmt.Fprintf(w, \"%sgc_pause_pctl_%d %d\\n\", prefix, 100, pctls[21])\n\n\t\/\/ Per-size allocation statistics.\n\tfor _, b := range memstats.BySize {\n\t\tfmt.Fprintf(w, \"%salloc_size_%d_mallocs %d\\n\", prefix, b.Size, b.Mallocs)\n\t\tfmt.Fprintf(w, \"%salloc_size_%d_frees %d\\n\", prefix, b.Size, b.Frees)\n\t}\n\n\t\/\/ Histograms\n\thists := getAllHistograms()\n\tfor name, dat := range hists {\n\t\tfmt.Fprintf(w, \"%shist_%s_count %d\\n\", prefix, name, *dat.count)\n\t\tfmt.Fprintf(w, \"%shist_%s_kept %d\\n\", prefix, name, *dat.kept)\n\t\tpctls := hdatPercentiles(dat)\n\t\tfor i := 0; i < 20; i++ {\n\t\t\tp := pctls[i]\n\t\t\tfmt.Fprintf(w, \"%shist_%s_pctl_%d %d\\n\", prefix, name, i*5, p)\n\t\t}\n\t\tfmt.Fprintf(w, \"%shist_%s_pctl_%d %d\\n\", prefix, name, 99, pctls[20])\n\t\tfmt.Fprintf(w, \"%shist_%s_pctl_%d %d\\n\", prefix, name, 100, pctls[21])\n\t}\n\n\t\/\/ Application stats\n\tctrs := getAllCounters()\n\tfor name, val := range ctrs {\n\t\tfmt.Fprintf(w, \"%s%s %d\\n\", prefix, name, val)\n\t}\n}\n\n\/\/ Percentiles go by 5% percentile steps from min to max. We report all of them even though it's\n\/\/ likely only min, 25th, 50th, 75th, 95th, 99th, and max will be used. It's assumed the metric\n\/\/ poller that is consuming this output will choose to only report to the metrics system what it\n\/\/ considers useful information.\n\/\/\n\/\/ Slice layout:\n\/\/ [0]: min (0th)\n\/\/ [1]: 5th\n\/\/ [n]: 5n\n\/\/ [19]: 95th\n\/\/ [20]: 99th\n\/\/ [21]: max (100th)\nfunc hdatPercentiles(dat *hdat) []uint64 {\n\tbuf := dat.buf\n\tkept := *dat.kept\n\n\tif kept == 0 {\n\t\treturn nil\n\t}\n\tif kept < uint64(len(buf)) {\n\t\tbuf = buf[:kept]\n\t}\n\n\tsort.Sort(uint64slice(buf))\n\n\tpctls := make([]uint64, 22)\n\tpctls[0] = *dat.min\n\tpctls[21] = *dat.max\n\n\tfor i := 1; i < 20; i++ {\n\t\tidx := len(buf) * i \/ 20\n\t\tpctls[i] = buf[idx]\n\t}\n\n\tidx := len(buf) * 99 \/ 100\n\tpctls[20] = buf[idx]\n\n\treturn pctls\n}\n\nfunc pausePercentiles(pauses []uint64, ngc uint32) []uint64 {\n\tif ngc < uint32(len(pauses)) {\n\t\tpauses = pauses[:ngc]\n\t}\n\n\tsort.Sort(uint64slice(pauses))\n\n\tpctls := make([]uint64, 22)\n\tpctls[0] = pauses[0]\n\tpctls[21] = pauses[len(pauses)-1]\n\n\tfor i := 1; i < 20; i++ {\n\t\tidx := len(pauses) * i \/ 20\n\t\tpctls[i] = pauses[idx]\n\t}\n\n\tidx := len(pauses) * 99 \/ 100\n\tpctls[20] = pauses[idx]\n\n\treturn pctls\n}\n\ntype uint64slice []uint64\n\nfunc (u uint64slice) Len() int { return len(u) }\nfunc (u uint64slice) Swap(i, j int) { u[i], u[j] = u[j], u[i] }\nfunc (u uint64slice) Less(i, j int) bool { return u[i] < u[j] }\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The grok_exporter Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage fswatcher\n\nimport (\n\t\"fmt\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"syscall\"\n)\n\ntype watcher struct {\n\tfd int\n}\n\ntype fileWithReader struct {\n\tfile *os.File\n\treader *lineReader\n}\n\nfunc (w *watcher) unwatchDir(dir *Dir) error {\n\t\/\/ After calling eventProducerLoop.Close(), we need to call inotify_rm_watch()\n\t\/\/ in order to terminate the inotify loop. See eventProducerLoop.Close().\n\tsuccess, err := syscall.InotifyRmWatch(w.fd, uint32(dir.wd))\n\tif success != 0 || err != nil {\n\t\treturn fmt.Errorf(\"inotify_rm_watch(%q) failed: status=%v, err=%v\", dir.path, success, err)\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (w *watcher) Close() error {\n\terr := syscall.Close(w.fd)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to close the inotify file descriptor: %v\", err)\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc unwatchDirByEvent(t *fileTailer, event inotifyEvent) {\n\twatchedDirsAfter := make([]*Dir, 0, len(t.watchedDirs)-1)\n\tfor _, existing := range t.watchedDirs {\n\t\tif existing.wd != int(event.Wd) {\n\t\t\twatchedDirsAfter = append(watchedDirsAfter, existing)\n\t\t}\n\t}\n\tt.watchedDirs = watchedDirsAfter\n}\n\nfunc (w *watcher) runFseventProducerLoop() fseventProducerLoop {\n\treturn runInotifyLoop(w.fd)\n}\n\nfunc initWatcher() (fswatcher, Error) {\n\tfd, err := syscall.InotifyInit1(syscall.IN_CLOEXEC)\n\tif err != nil {\n\t\treturn nil, NewError(NotSpecified, err, \"inotify_init1() failed\")\n\t}\n\treturn &watcher{fd: fd}, nil\n}\n\nfunc (w *watcher) watchDir(path string) (*Dir, Error) {\n\tvar (\n\t\tdir *Dir\n\t\terr error\n\t\tErr Error\n\t)\n\tdir, Err = newDir(path)\n\tif Err != nil {\n\t\treturn nil, Err\n\t}\n\tdir.wd, err = syscall.InotifyAddWatch(w.fd, path, syscall.IN_MODIFY|syscall.IN_MOVED_FROM|syscall.IN_MOVED_TO|syscall.IN_DELETE|syscall.IN_CREATE)\n\tif err != nil {\n\t\treturn nil, NewErrorf(NotSpecified, err, \"%q: inotify_add_watch() failed\", path)\n\t}\n\treturn dir, nil\n}\n\nfunc newDir(path string) (*Dir, Error) {\n\treturn &Dir{path: path}, nil\n}\n\nfunc (w *watcher) watchFile(_ fileMeta) Error {\n\t\/\/ nothing to do, because on Linux we watch the directory and don't need to watch individual files.\n\treturn nil\n}\n\nfunc findDir(t *fileTailer, event inotifyEvent) (*Dir, Error) {\n\tfor _, dir := range t.watchedDirs {\n\t\tif dir.wd == int(event.Wd) {\n\t\t\treturn dir, nil\n\t\t}\n\t}\n\t\/\/ We should always find the directory in watchedDirs if we receive an inotifyEvent.\n\t\/\/ The following code should never be executed.\n\t\/\/ However, Github issue #124 is a bug report where we receive an inotifyEvent for an unwatched directory.\n\t\/\/ Create a detailed error message to help figuring out why this happens.\n\tvar (\n\t\terrMsg strings.Builder\n\t\tfirst = true\n\t)\n\tfmt.Fprintf(&errMsg, \"watch list inconsistent: cannot find directory for file system event %q in the list of watched directories [\", event.String())\n\tfor _, dir := range t.watchedDirs {\n\t\tif !first {\n\t\t\tfmt.Fprintf(&errMsg, \", \")\n\t\t}\n\t\tfmt.Fprintf(&errMsg, \"%q\", dir.Path())\n\t\tfirst = false\n\t}\n\tfmt.Fprintf(&errMsg, \"]\")\n\treturn nil, NewError(NotSpecified, nil, errMsg.String())\n}\n\nfunc (w *watcher) processEvent(t *fileTailer, fsevent fsevent, log logrus.FieldLogger) Error {\n\tevent, ok := fsevent.(inotifyEvent)\n\tif !ok {\n\t\treturn NewErrorf(NotSpecified, nil, \"received a file system event of unknown type %T\", event)\n\t}\n\tdir, Err := findDir(t, event)\n\tif Err != nil {\n\t\treturn Err\n\t}\n\tdirLogger := log.WithField(\"directory\", dir.path)\n\tdirLogger.Debugf(\"received event: %v\", event)\n\tif event.Mask&syscall.IN_IGNORED == syscall.IN_IGNORED {\n\t\tunwatchDirByEvent(t, event) \/\/ need to remove it from watchedDirs, because otherwise we close the removed dir on shutdown which causes an error\n\t\treturn NewErrorf(NotSpecified, nil, \"%s: directory was removed while being watched\", dir.path)\n\t}\n\tif event.Mask&syscall.IN_MODIFY == syscall.IN_MODIFY {\n\t\tfile, ok := t.watchedFiles[filepath.Join(dir.path, event.Name)]\n\t\tif !ok {\n\t\t\treturn nil \/\/ unrelated file was modified\n\t\t}\n\t\ttruncated, err := isTruncated(file.file)\n\t\tif err != nil {\n\t\t\treturn NewErrorf(NotSpecified, err, \"%v: seek() or stat() failed\", file.file.Name())\n\t\t}\n\t\tif truncated {\n\t\t\t_, err = file.file.Seek(0, io.SeekStart)\n\t\t\tif err != nil {\n\t\t\t\treturn NewErrorf(NotSpecified, err, \"%v: seek() failed\", file.file.Name())\n\t\t\t}\n\t\t\tfile.reader.Clear()\n\t\t}\n\t\treadErr := t.readNewLines(file, dirLogger)\n\t\tif readErr != nil {\n\t\t\treturn readErr\n\t\t}\n\t}\n\tif event.Mask&syscall.IN_MOVED_FROM == syscall.IN_MOVED_FROM || event.Mask&syscall.IN_DELETE == syscall.IN_DELETE || event.Mask&syscall.IN_CREATE == syscall.IN_CREATE || event.Mask&syscall.IN_MOVED_TO == syscall.IN_MOVED_TO {\n\t\t\/\/ There are a lot of corner cases here:\n\t\t\/\/ * a file is renamed, but still matches the pattern so we continue watching it (MOVED_FROM followed by MOVED_TO)\n\t\t\/\/ * a file is created overwriting an existing file\n\t\t\/\/ * a file is moved to the watched directory overwriting an existing file\n\t\t\/\/ Trying to figure out what happened from the events would be error prone.\n\t\t\/\/ Therefore, we don't care which of the above events we received, we just update our watched files with the current\n\t\t\/\/ state of the watched directory.\n\t\terr := t.syncFilesInDir(dir, true, dirLogger)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc isTruncated(file *os.File) (bool, error) {\n\tcurrentPos, err := file.Seek(0, io.SeekCurrent)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tfileInfo, err := file.Stat()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn currentPos > fileInfo.Size(), nil\n}\n\nfunc findSameFile(t *fileTailer, file os.FileInfo, _ string) (*fileWithReader, Error) {\n\tvar (\n\t\tfileInfo os.FileInfo\n\t\terr error\n\t)\n\tfor _, watchedFile := range t.watchedFiles {\n\t\tfileInfo, err = watchedFile.file.Stat()\n\t\tif err != nil {\n\t\t\treturn nil, NewErrorf(NotSpecified, err, \"%v: stat failed\", watchedFile.file.Name())\n\t\t}\n\t\tif os.SameFile(fileInfo, file) {\n\t\t\treturn watchedFile, nil\n\t\t}\n\t}\n\treturn nil, nil\n}\n<commit_msg>#124 improved error message for file system watcher<commit_after>\/\/ Copyright 2019 The grok_exporter Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage fswatcher\n\nimport (\n\t\"fmt\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"syscall\"\n)\n\ntype watcher struct {\n\tfd int\n}\n\ntype fileWithReader struct {\n\tfile *os.File\n\treader *lineReader\n}\n\nfunc (w *watcher) unwatchDir(dir *Dir) error {\n\t\/\/ After calling eventProducerLoop.Close(), we need to call inotify_rm_watch()\n\t\/\/ in order to terminate the inotify loop. See eventProducerLoop.Close().\n\tsuccess, err := syscall.InotifyRmWatch(w.fd, uint32(dir.wd))\n\tif success != 0 || err != nil {\n\t\treturn fmt.Errorf(\"inotify_rm_watch(%q) failed: status=%v, err=%v\", dir.path, success, err)\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (w *watcher) Close() error {\n\terr := syscall.Close(w.fd)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to close the inotify file descriptor: %v\", err)\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc unwatchDirByEvent(t *fileTailer, event inotifyEvent) {\n\twatchedDirsAfter := make([]*Dir, 0, len(t.watchedDirs)-1)\n\tfor _, existing := range t.watchedDirs {\n\t\tif existing.wd != int(event.Wd) {\n\t\t\twatchedDirsAfter = append(watchedDirsAfter, existing)\n\t\t}\n\t}\n\tt.watchedDirs = watchedDirsAfter\n}\n\nfunc (w *watcher) runFseventProducerLoop() fseventProducerLoop {\n\treturn runInotifyLoop(w.fd)\n}\n\nfunc initWatcher() (fswatcher, Error) {\n\tfd, err := syscall.InotifyInit1(syscall.IN_CLOEXEC)\n\tif err != nil {\n\t\treturn nil, NewError(NotSpecified, err, \"inotify_init1() failed\")\n\t}\n\treturn &watcher{fd: fd}, nil\n}\n\nfunc (w *watcher) watchDir(path string) (*Dir, Error) {\n\tvar (\n\t\tdir *Dir\n\t\terr error\n\t\tErr Error\n\t)\n\tdir, Err = newDir(path)\n\tif Err != nil {\n\t\treturn nil, Err\n\t}\n\tdir.wd, err = syscall.InotifyAddWatch(w.fd, path, syscall.IN_MODIFY|syscall.IN_MOVED_FROM|syscall.IN_MOVED_TO|syscall.IN_DELETE|syscall.IN_CREATE)\n\tif err != nil {\n\t\treturn nil, NewErrorf(NotSpecified, err, \"%q: inotify_add_watch() failed\", path)\n\t}\n\treturn dir, nil\n}\n\nfunc newDir(path string) (*Dir, Error) {\n\treturn &Dir{path: path}, nil\n}\n\nfunc (w *watcher) watchFile(_ fileMeta) Error {\n\t\/\/ nothing to do, because on Linux we watch the directory and don't need to watch individual files.\n\treturn nil\n}\n\nfunc findDir(t *fileTailer, event inotifyEvent) (*Dir, Error) {\n\tfor _, dir := range t.watchedDirs {\n\t\tif dir.wd == int(event.Wd) {\n\t\t\treturn dir, nil\n\t\t}\n\t}\n\t\/\/ We should always find the directory in watchedDirs if we receive an inotifyEvent.\n\t\/\/ The following code should never be executed.\n\t\/\/ However, Github issue #124 is a bug report where we receive an inotifyEvent for an unwatched directory.\n\t\/\/ Create a detailed error message to help figuring out why this happens.\n\tvar (\n\t\terrMsg strings.Builder\n\t\tfirst = true\n\t)\n\tfmt.Fprintf(&errMsg, \"watch list inconsistent: cannot find directory for file system event %q with watch descriptor %d in the list of watched directories [\", event.String(), event.Wd)\n\tfor _, dir := range t.watchedDirs {\n\t\tif !first {\n\t\t\tfmt.Fprintf(&errMsg, \", \")\n\t\t}\n\t\tfmt.Fprintf(&errMsg, \"Wd=%d (%q)\", dir.wd, dir.Path())\n\t\tfirst = false\n\t}\n\tfmt.Fprintf(&errMsg, \"]\")\n\treturn nil, NewError(NotSpecified, nil, errMsg.String())\n}\n\nfunc (w *watcher) processEvent(t *fileTailer, fsevent fsevent, log logrus.FieldLogger) Error {\n\tevent, ok := fsevent.(inotifyEvent)\n\tif !ok {\n\t\treturn NewErrorf(NotSpecified, nil, \"received a file system event of unknown type %T\", event)\n\t}\n\tdir, Err := findDir(t, event)\n\tif Err != nil {\n\t\treturn Err\n\t}\n\tdirLogger := log.WithField(\"directory\", dir.path)\n\tdirLogger.Debugf(\"received event: %v\", event)\n\tif event.Mask&syscall.IN_IGNORED == syscall.IN_IGNORED {\n\t\tunwatchDirByEvent(t, event) \/\/ need to remove it from watchedDirs, because otherwise we close the removed dir on shutdown which causes an error\n\t\treturn NewErrorf(NotSpecified, nil, \"%s: directory was removed while being watched\", dir.path)\n\t}\n\tif event.Mask&syscall.IN_MODIFY == syscall.IN_MODIFY {\n\t\tfile, ok := t.watchedFiles[filepath.Join(dir.path, event.Name)]\n\t\tif !ok {\n\t\t\treturn nil \/\/ unrelated file was modified\n\t\t}\n\t\ttruncated, err := isTruncated(file.file)\n\t\tif err != nil {\n\t\t\treturn NewErrorf(NotSpecified, err, \"%v: seek() or stat() failed\", file.file.Name())\n\t\t}\n\t\tif truncated {\n\t\t\t_, err = file.file.Seek(0, io.SeekStart)\n\t\t\tif err != nil {\n\t\t\t\treturn NewErrorf(NotSpecified, err, \"%v: seek() failed\", file.file.Name())\n\t\t\t}\n\t\t\tfile.reader.Clear()\n\t\t}\n\t\treadErr := t.readNewLines(file, dirLogger)\n\t\tif readErr != nil {\n\t\t\treturn readErr\n\t\t}\n\t}\n\tif event.Mask&syscall.IN_MOVED_FROM == syscall.IN_MOVED_FROM || event.Mask&syscall.IN_DELETE == syscall.IN_DELETE || event.Mask&syscall.IN_CREATE == syscall.IN_CREATE || event.Mask&syscall.IN_MOVED_TO == syscall.IN_MOVED_TO {\n\t\t\/\/ There are a lot of corner cases here:\n\t\t\/\/ * a file is renamed, but still matches the pattern so we continue watching it (MOVED_FROM followed by MOVED_TO)\n\t\t\/\/ * a file is created overwriting an existing file\n\t\t\/\/ * a file is moved to the watched directory overwriting an existing file\n\t\t\/\/ Trying to figure out what happened from the events would be error prone.\n\t\t\/\/ Therefore, we don't care which of the above events we received, we just update our watched files with the current\n\t\t\/\/ state of the watched directory.\n\t\terr := t.syncFilesInDir(dir, true, dirLogger)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc isTruncated(file *os.File) (bool, error) {\n\tcurrentPos, err := file.Seek(0, io.SeekCurrent)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tfileInfo, err := file.Stat()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn currentPos > fileInfo.Size(), nil\n}\n\nfunc findSameFile(t *fileTailer, file os.FileInfo, _ string) (*fileWithReader, Error) {\n\tvar (\n\t\tfileInfo os.FileInfo\n\t\terr error\n\t)\n\tfor _, watchedFile := range t.watchedFiles {\n\t\tfileInfo, err = watchedFile.file.Stat()\n\t\tif err != nil {\n\t\t\treturn nil, NewErrorf(NotSpecified, err, \"%v: stat failed\", watchedFile.file.Name())\n\t\t}\n\t\tif os.SameFile(fileInfo, file) {\n\t\t\treturn watchedFile, nil\n\t\t}\n\t}\n\treturn nil, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/hashcode\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/mitchellh\/goamz\/route53\"\n)\n\nfunc resourceAwsRoute53Record() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsRoute53RecordCreate,\n\t\tRead: resourceAwsRoute53RecordRead,\n\t\tDelete: resourceAwsRoute53RecordDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"type\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"zone_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"ttl\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"records\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\treturn hashcode.String(v.(string))\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceAwsRoute53RecordCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).route53\n\n\tzone := d.Get(\"zone_id\").(string)\n\n\tzoneRecord, err := conn.GetHostedZone(zone)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Check if the current record name contains the zone suffix.\n\t\/\/ If it does not, add the zone name to form a fully qualified name\n\t\/\/ and keep AWS happy.\n\trecordName := d.Get(\"name\").(string)\n\tzoneName := strings.Trim(zoneRecord.HostedZone.Name, \".\")\n\tif ok := strings.HasSuffix(recordName, zoneName); !ok {\n\t\td.Set(\"name\", strings.Join([]string{recordName, zoneName}, \".\"))\n\t}\n\n\t\/\/ Get the record\n\trec, err := resourceAwsRoute53RecordBuildSet(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Create the new records. We abuse StateChangeConf for this to\n\t\/\/ retry for us since Route53 sometimes returns errors about another\n\t\/\/ operation happening at the same time.\n\treq := &route53.ChangeResourceRecordSetsRequest{\n\t\tComment: \"Managed by Terraform\",\n\t\tChanges: []route53.Change{\n\t\t\troute53.Change{\n\t\t\t\tAction: \"UPSERT\",\n\t\t\t\tRecord: *rec,\n\t\t\t},\n\t\t},\n\t}\n\n\tlog.Printf(\"[DEBUG] Creating resource records for zone: %s, name: %s\",\n\t\tzone, d.Get(\"name\").(string))\n\n\twait := resource.StateChangeConf{\n\t\tPending: []string{\"rejected\"},\n\t\tTarget: \"accepted\",\n\t\tTimeout: 5 * time.Minute,\n\t\tMinTimeout: 1 * time.Second,\n\t\tRefresh: func() (interface{}, string, error) {\n\t\t\tresp, err := conn.ChangeResourceRecordSets(zone, req)\n\t\t\tif err != nil {\n\t\t\t\tif strings.Contains(err.Error(), \"PriorRequestNotComplete\") {\n\t\t\t\t\t\/\/ There is some pending operation, so just retry\n\t\t\t\t\t\/\/ in a bit.\n\t\t\t\t\treturn nil, \"rejected\", nil\n\t\t\t\t}\n\n\t\t\t\treturn nil, \"failure\", err\n\t\t\t}\n\n\t\t\treturn resp.ChangeInfo, \"accepted\", nil\n\t\t},\n\t}\n\n\trespRaw, err := wait.WaitForState()\n\tif err != nil {\n\t\treturn err\n\t}\n\tchangeInfo := respRaw.(route53.ChangeInfo)\n\n\t\/\/ Generate an ID\n\td.SetId(fmt.Sprintf(\"%s_%s_%s\", zone, d.Get(\"name\").(string), d.Get(\"type\").(string)))\n\n\t\/\/ Wait until we are done\n\twait = resource.StateChangeConf{\n\t\tDelay: 30 * time.Second,\n\t\tPending: []string{\"PENDING\"},\n\t\tTarget: \"INSYNC\",\n\t\tTimeout: 10 * time.Minute,\n\t\tMinTimeout: 5 * time.Second,\n\t\tRefresh: func() (result interface{}, state string, err error) {\n\t\t\treturn resourceAwsRoute53Wait(conn, changeInfo.ID)\n\t\t},\n\t}\n\t_, err = wait.WaitForState()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsRoute53RecordRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).route53\n\n\tzone := d.Get(\"zone_id\").(string)\n\tlopts := &route53.ListOpts{\n\t\tName: d.Get(\"name\").(string),\n\t\tType: d.Get(\"type\").(string),\n\t}\n\tresp, err := conn.ListResourceRecordSets(zone, lopts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Scan for a matching record\n\tfound := false\n\tfor _, record := range resp.Records {\n\t\tif route53.FQDN(record.Name) != route53.FQDN(lopts.Name) {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.ToUpper(record.Type) != strings.ToUpper(lopts.Type) {\n\t\t\tcontinue\n\t\t}\n\n\t\tfound = true\n\n\t\td.Set(\"records\", record.Records)\n\t\td.Set(\"ttl\", record.TTL)\n\n\t\tbreak\n\t}\n\n\tif !found {\n\t\td.SetId(\"\")\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsRoute53RecordDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).route53\n\n\t\/\/ Get the records\n\trec, err := resourceAwsRoute53RecordBuildSet(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Create the new records\n\treq := &route53.ChangeResourceRecordSetsRequest{\n\t\tComment: \"Deleted by Terraform\",\n\t\tChanges: []route53.Change{\n\t\t\troute53.Change{\n\t\t\t\tAction: \"DELETE\",\n\t\t\t\tRecord: *rec,\n\t\t\t},\n\t\t},\n\t}\n\tzone := d.Get(\"zone_id\").(string)\n\tlog.Printf(\"[DEBUG] Deleting resource records for zone: %s, name: %s\",\n\t\tzone, d.Get(\"name\").(string))\n\n\twait := resource.StateChangeConf{\n\t\tPending: []string{\"rejected\"},\n\t\tTarget: \"accepted\",\n\t\tTimeout: 5 * time.Minute,\n\t\tMinTimeout: 1 * time.Second,\n\t\tRefresh: func() (interface{}, string, error) {\n\t\t\t_, err := conn.ChangeResourceRecordSets(zone, req)\n\t\t\tif err != nil {\n\t\t\t\tif strings.Contains(err.Error(), \"PriorRequestNotComplete\") {\n\t\t\t\t\t\/\/ There is some pending operation, so just retry\n\t\t\t\t\t\/\/ in a bit.\n\t\t\t\t\treturn 42, \"rejected\", nil\n\t\t\t\t}\n\n\t\t\t\tif strings.Contains(err.Error(), \"InvalidChangeBatch\") {\n\t\t\t\t\t\/\/ This means that the record is already gone.\n\t\t\t\t\treturn 42, \"accepted\", nil\n\t\t\t\t}\n\n\t\t\t\treturn 42, \"failure\", err\n\t\t\t}\n\n\t\t\treturn 42, \"accepted\", nil\n\t\t},\n\t}\n\n\tif _, err := wait.WaitForState(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsRoute53RecordBuildSet(d *schema.ResourceData) (*route53.ResourceRecordSet, error) {\n\trecs := d.Get(\"records\").(*schema.Set).List()\n\trecords := make([]string, 0, len(recs))\n\n\tfor _, r := range recs {\n\t\trecords = append(records, r.(string))\n\t}\n\n\trec := &route53.ResourceRecordSet{\n\t\tName: d.Get(\"name\").(string),\n\t\tType: d.Get(\"type\").(string),\n\t\tTTL: d.Get(\"ttl\").(int),\n\t\tRecords: records,\n\t}\n\treturn rec, nil\n}\n<commit_msg>simplify HasSuffix check<commit_after>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/hashcode\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/mitchellh\/goamz\/route53\"\n)\n\nfunc resourceAwsRoute53Record() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsRoute53RecordCreate,\n\t\tRead: resourceAwsRoute53RecordRead,\n\t\tDelete: resourceAwsRoute53RecordDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"type\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"zone_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"ttl\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"records\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\treturn hashcode.String(v.(string))\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceAwsRoute53RecordCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).route53\n\n\tzone := d.Get(\"zone_id\").(string)\n\n\tzoneRecord, err := conn.GetHostedZone(zone)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Check if the current record name contains the zone suffix.\n\t\/\/ If it does not, add the zone name to form a fully qualified name\n\t\/\/ and keep AWS happy.\n\trecordName := d.Get(\"name\").(string)\n\tzoneName := strings.Trim(zoneRecord.HostedZone.Name, \".\")\n\tif !strings.HasSuffix(recordName, zoneName) {\n\t\td.Set(\"name\", strings.Join([]string{recordName, zoneName}, \".\"))\n\t}\n\n\t\/\/ Get the record\n\trec, err := resourceAwsRoute53RecordBuildSet(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Create the new records. We abuse StateChangeConf for this to\n\t\/\/ retry for us since Route53 sometimes returns errors about another\n\t\/\/ operation happening at the same time.\n\treq := &route53.ChangeResourceRecordSetsRequest{\n\t\tComment: \"Managed by Terraform\",\n\t\tChanges: []route53.Change{\n\t\t\troute53.Change{\n\t\t\t\tAction: \"UPSERT\",\n\t\t\t\tRecord: *rec,\n\t\t\t},\n\t\t},\n\t}\n\n\tlog.Printf(\"[DEBUG] Creating resource records for zone: %s, name: %s\",\n\t\tzone, d.Get(\"name\").(string))\n\n\twait := resource.StateChangeConf{\n\t\tPending: []string{\"rejected\"},\n\t\tTarget: \"accepted\",\n\t\tTimeout: 5 * time.Minute,\n\t\tMinTimeout: 1 * time.Second,\n\t\tRefresh: func() (interface{}, string, error) {\n\t\t\tresp, err := conn.ChangeResourceRecordSets(zone, req)\n\t\t\tif err != nil {\n\t\t\t\tif strings.Contains(err.Error(), \"PriorRequestNotComplete\") {\n\t\t\t\t\t\/\/ There is some pending operation, so just retry\n\t\t\t\t\t\/\/ in a bit.\n\t\t\t\t\treturn nil, \"rejected\", nil\n\t\t\t\t}\n\n\t\t\t\treturn nil, \"failure\", err\n\t\t\t}\n\n\t\t\treturn resp.ChangeInfo, \"accepted\", nil\n\t\t},\n\t}\n\n\trespRaw, err := wait.WaitForState()\n\tif err != nil {\n\t\treturn err\n\t}\n\tchangeInfo := respRaw.(route53.ChangeInfo)\n\n\t\/\/ Generate an ID\n\td.SetId(fmt.Sprintf(\"%s_%s_%s\", zone, d.Get(\"name\").(string), d.Get(\"type\").(string)))\n\n\t\/\/ Wait until we are done\n\twait = resource.StateChangeConf{\n\t\tDelay: 30 * time.Second,\n\t\tPending: []string{\"PENDING\"},\n\t\tTarget: \"INSYNC\",\n\t\tTimeout: 10 * time.Minute,\n\t\tMinTimeout: 5 * time.Second,\n\t\tRefresh: func() (result interface{}, state string, err error) {\n\t\t\treturn resourceAwsRoute53Wait(conn, changeInfo.ID)\n\t\t},\n\t}\n\t_, err = wait.WaitForState()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsRoute53RecordRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).route53\n\n\tzone := d.Get(\"zone_id\").(string)\n\tlopts := &route53.ListOpts{\n\t\tName: d.Get(\"name\").(string),\n\t\tType: d.Get(\"type\").(string),\n\t}\n\tresp, err := conn.ListResourceRecordSets(zone, lopts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Scan for a matching record\n\tfound := false\n\tfor _, record := range resp.Records {\n\t\tif route53.FQDN(record.Name) != route53.FQDN(lopts.Name) {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.ToUpper(record.Type) != strings.ToUpper(lopts.Type) {\n\t\t\tcontinue\n\t\t}\n\n\t\tfound = true\n\n\t\td.Set(\"records\", record.Records)\n\t\td.Set(\"ttl\", record.TTL)\n\n\t\tbreak\n\t}\n\n\tif !found {\n\t\td.SetId(\"\")\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsRoute53RecordDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).route53\n\n\t\/\/ Get the records\n\trec, err := resourceAwsRoute53RecordBuildSet(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Create the new records\n\treq := &route53.ChangeResourceRecordSetsRequest{\n\t\tComment: \"Deleted by Terraform\",\n\t\tChanges: []route53.Change{\n\t\t\troute53.Change{\n\t\t\t\tAction: \"DELETE\",\n\t\t\t\tRecord: *rec,\n\t\t\t},\n\t\t},\n\t}\n\tzone := d.Get(\"zone_id\").(string)\n\tlog.Printf(\"[DEBUG] Deleting resource records for zone: %s, name: %s\",\n\t\tzone, d.Get(\"name\").(string))\n\n\twait := resource.StateChangeConf{\n\t\tPending: []string{\"rejected\"},\n\t\tTarget: \"accepted\",\n\t\tTimeout: 5 * time.Minute,\n\t\tMinTimeout: 1 * time.Second,\n\t\tRefresh: func() (interface{}, string, error) {\n\t\t\t_, err := conn.ChangeResourceRecordSets(zone, req)\n\t\t\tif err != nil {\n\t\t\t\tif strings.Contains(err.Error(), \"PriorRequestNotComplete\") {\n\t\t\t\t\t\/\/ There is some pending operation, so just retry\n\t\t\t\t\t\/\/ in a bit.\n\t\t\t\t\treturn 42, \"rejected\", nil\n\t\t\t\t}\n\n\t\t\t\tif strings.Contains(err.Error(), \"InvalidChangeBatch\") {\n\t\t\t\t\t\/\/ This means that the record is already gone.\n\t\t\t\t\treturn 42, \"accepted\", nil\n\t\t\t\t}\n\n\t\t\t\treturn 42, \"failure\", err\n\t\t\t}\n\n\t\t\treturn 42, \"accepted\", nil\n\t\t},\n\t}\n\n\tif _, err := wait.WaitForState(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsRoute53RecordBuildSet(d *schema.ResourceData) (*route53.ResourceRecordSet, error) {\n\trecs := d.Get(\"records\").(*schema.Set).List()\n\trecords := make([]string, 0, len(recs))\n\n\tfor _, r := range recs {\n\t\trecords = append(records, r.(string))\n\t}\n\n\trec := &route53.ResourceRecordSet{\n\t\tName: d.Get(\"name\").(string),\n\t\tType: d.Get(\"type\").(string),\n\t\tTTL: d.Get(\"ttl\").(int),\n\t\tRecords: records,\n\t}\n\treturn rec, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/hashcode\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/mitchellh\/goamz\/ec2\"\n)\n\nfunc resourceAwsSecurityGroup() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsSecurityGroupCreate,\n\t\tRead: resourceAwsSecurityGroupRead,\n\t\tUpdate: resourceAwsSecurityGroupUpdate,\n\t\tDelete: resourceAwsSecurityGroupDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"description\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\n\t\t\t\"vpc_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"ingress\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"from_port\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"to_port\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"protocol\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"cidr_blocks\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeList,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"security_groups\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\t\t\t\treturn hashcode.String(v.(string))\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"self\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tDefault: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSet: resourceAwsSecurityGroupRuleHash,\n\t\t\t},\n\n\t\t\t\"egress\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"from_port\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"to_port\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"protocol\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"cidr_blocks\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeList,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"security_groups\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\t\t\t\treturn hashcode.String(v.(string))\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"self\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tDefault: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSet: resourceAwsSecurityGroupRuleHash,\n\t\t\t},\n\n\t\t\t\"owner_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"tags\": tagsSchema(),\n\t\t},\n\t}\n}\n\nfunc resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error {\n\tec2conn := meta.(*AWSClient).ec2conn\n\n\tsecurityGroupOpts := ec2.SecurityGroup{\n\t\tName: d.Get(\"name\").(string),\n\t}\n\n\tif v := d.Get(\"vpc_id\"); v != nil {\n\t\tsecurityGroupOpts.VpcId = v.(string)\n\t}\n\n\tif v := d.Get(\"description\"); v != nil {\n\t\tsecurityGroupOpts.Description = v.(string)\n\t}\n\n\tlog.Printf(\n\t\t\"[DEBUG] Security Group create configuration: %#v\", securityGroupOpts)\n\tcreateResp, err := ec2conn.CreateSecurityGroup(securityGroupOpts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating Security Group: %s\", err)\n\t}\n\n\td.SetId(createResp.Id)\n\n\tlog.Printf(\"[INFO] Security Group ID: %s\", d.Id())\n\n\t\/\/ Wait for the security group to truly exist\n\tlog.Printf(\n\t\t\"[DEBUG] Waiting for Security Group (%s) to exist\",\n\t\td.Id())\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"\"},\n\t\tTarget: \"exists\",\n\t\tRefresh: SGStateRefreshFunc(ec2conn, d.Id()),\n\t\tTimeout: 1 * time.Minute,\n\t}\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Error waiting for Security Group (%s) to become available: %s\",\n\t\t\td.Id(), err)\n\t}\n\n\treturn resourceAwsSecurityGroupUpdate(d, meta)\n}\n\nfunc resourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) error {\n\tec2conn := meta.(*AWSClient).ec2conn\n\n\tsgRaw, _, err := SGStateRefreshFunc(ec2conn, d.Id())()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif sgRaw == nil {\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\n\tsg := sgRaw.(*ec2.SecurityGroupInfo)\n\n\tingressRules := resourceAwsSecurityGroupIPPermGather(d, sg.IPPerms)\n\tegressRules := resourceAwsSecurityGroupIPPermGather(d, sg.IPPermsEgress)\n\n\td.Set(\"description\", sg.Description)\n\td.Set(\"name\", sg.Name)\n\td.Set(\"vpc_id\", sg.VpcId)\n\td.Set(\"owner_id\", sg.OwnerId)\n\td.Set(\"ingress\", ingressRules)\n\td.Set(\"egress\", egressRules)\n\td.Set(\"tags\", tagsToMap(sg.Tags))\n\n\treturn nil\n}\n\nfunc resourceAwsSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error {\n\tec2conn := meta.(*AWSClient).ec2conn\n\n\tsgRaw, _, err := SGStateRefreshFunc(ec2conn, d.Id())()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif sgRaw == nil {\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\tgroup := sgRaw.(*ec2.SecurityGroupInfo).SecurityGroup\n\n\terr = resourceAwsSecurityGroupUpdateRules(d, \"ingress\", meta, group)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif d.Get(\"vpc_id\") != nil {\n\t\terr = resourceAwsSecurityGroupUpdateRules(d, \"egress\", meta, group)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := setTags(ec2conn, d); err != nil {\n\t\treturn err\n\t}\n\n\td.SetPartial(\"tags\")\n\n\treturn resourceAwsSecurityGroupRead(d, meta)\n}\n\nfunc resourceAwsSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error {\n\tec2conn := meta.(*AWSClient).ec2conn\n\n\tlog.Printf(\"[DEBUG] Security Group destroy: %v\", d.Id())\n\n\treturn resource.Retry(5*time.Minute, func() error {\n\t\t_, err := ec2conn.DeleteSecurityGroup(ec2.SecurityGroup{Id: d.Id()})\n\t\tif err != nil {\n\t\t\tec2err, ok := err.(*ec2.Error)\n\t\t\tif !ok {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tswitch ec2err.Code {\n\t\t\tcase \"InvalidGroup.NotFound\":\n\t\t\t\treturn nil\n\t\t\tcase \"DependencyViolation\":\n\t\t\t\t\/\/ If it is a dependency violation, we want to retry\n\t\t\t\treturn err\n\t\t\tdefault:\n\t\t\t\t\/\/ Any other error, we want to quit the retry loop immediately\n\t\t\t\treturn resource.RetryError{err}\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\nfunc resourceAwsSecurityGroupRuleHash(v interface{}) int {\n\tvar buf bytes.Buffer\n\tm := v.(map[string]interface{})\n\tbuf.WriteString(fmt.Sprintf(\"%d-\", m[\"from_port\"].(int)))\n\tbuf.WriteString(fmt.Sprintf(\"%d-\", m[\"to_port\"].(int)))\n\tbuf.WriteString(fmt.Sprintf(\"%s-\", m[\"protocol\"].(string)))\n\n\t\/\/ We need to make sure to sort the strings below so that we always\n\t\/\/ generate the same hash code no matter what is in the set.\n\tif v, ok := m[\"cidr_blocks\"]; ok {\n\t\tvs := v.([]interface{})\n\t\ts := make([]string, len(vs))\n\t\tfor i, raw := range vs {\n\t\t\ts[i] = raw.(string)\n\t\t}\n\t\tsort.Strings(s)\n\n\t\tfor _, v := range s {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%s-\", v))\n\t\t}\n\t}\n\tif v, ok := m[\"security_groups\"]; ok {\n\t\tvs := v.(*schema.Set).List()\n\t\ts := make([]string, len(vs))\n\t\tfor i, raw := range vs {\n\t\t\ts[i] = raw.(string)\n\t\t}\n\t\tsort.Strings(s)\n\n\t\tfor _, v := range s {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%s-\", v))\n\t\t}\n\t}\n\n\treturn hashcode.String(buf.String())\n}\n\nfunc resourceAwsSecurityGroupIPPermGather(d *schema.ResourceData, permissions []ec2.IPPerm) []map[string]interface{} {\n\truleMap := make(map[string]map[string]interface{})\n\tfor _, perm := range permissions {\n\t\tk := fmt.Sprintf(\"%s-%d-%d\", perm.Protocol, perm.FromPort, perm.ToPort)\n\t\tm, ok := ruleMap[k]\n\t\tif !ok {\n\t\t\tm = make(map[string]interface{})\n\t\t\truleMap[k] = m\n\t\t}\n\n\t\tm[\"from_port\"] = perm.FromPort\n\t\tm[\"to_port\"] = perm.ToPort\n\t\tm[\"protocol\"] = perm.Protocol\n\n\t\tif len(perm.SourceIPs) > 0 {\n\t\t\traw, ok := m[\"cidr_blocks\"]\n\t\t\tif !ok {\n\t\t\t\traw = make([]string, 0, len(perm.SourceIPs))\n\t\t\t}\n\t\t\tlist := raw.([]string)\n\n\t\t\tlist = append(list, perm.SourceIPs...)\n\t\t\tm[\"cidr_blocks\"] = list\n\t\t}\n\n\t\tvar groups []string\n\t\tif len(perm.SourceGroups) > 0 {\n\t\t\tgroups = flattenSecurityGroups(perm.SourceGroups)\n\t\t}\n\t\tfor i, id := range groups {\n\t\t\tif id == d.Id() {\n\t\t\t\tgroups[i], groups = groups[len(groups)-1], groups[:len(groups)-1]\n\t\t\t\tm[\"self\"] = true\n\t\t\t}\n\t\t}\n\n\t\tif len(groups) > 0 {\n\t\t\traw, ok := m[\"security_groups\"]\n\t\t\tif !ok {\n\t\t\t\traw = make([]string, 0, len(groups))\n\t\t\t}\n\t\t\tlist := raw.([]string)\n\n\t\t\tlist = append(list, groups...)\n\t\t\tm[\"security_groups\"] = list\n\t\t}\n\t}\n\trules := make([]map[string]interface{}, 0, len(ruleMap))\n\tfor _, m := range ruleMap {\n\t\trules = append(rules, m)\n\t}\n\n\treturn rules\n}\n\nfunc resourceAwsSecurityGroupUpdateRules(\n\td *schema.ResourceData, ruleset string,\n\tmeta interface{}, group ec2.SecurityGroup) error {\n\tif d.HasChange(ruleset) {\n\t\to, n := d.GetChange(ruleset)\n\t\tif o == nil {\n\t\t\to = new(schema.Set)\n\t\t}\n\t\tif n == nil {\n\t\t\tn = new(schema.Set)\n\t\t}\n\n\t\tos := o.(*schema.Set)\n\t\tns := n.(*schema.Set)\n\n\t\tremove := expandIPPerms(d.Id(), os.Difference(ns).List())\n\t\tadd := expandIPPerms(d.Id(), ns.Difference(os).List())\n\n\t\t\/\/ TODO: We need to handle partial state better in the in-between\n\t\t\/\/ in this update.\n\n\t\t\/\/ TODO: It'd be nicer to authorize before removing, but then we have\n\t\t\/\/ to deal with complicated unrolling to get individual CIDR blocks\n\t\t\/\/ to avoid authorizing already authorized sources. Removing before\n\t\t\/\/ adding is easier here, and Terraform should be fast enough to\n\t\t\/\/ not have service issues.\n\n\t\tif len(remove) > 0 || len(add) > 0 {\n\t\t\tec2conn := meta.(*AWSClient).ec2conn\n\n\t\t\tif len(remove) > 0 {\n\t\t\t\t\/\/ Revoke the old rules\n\t\t\t\trevoke := ec2conn.RevokeSecurityGroup\n\t\t\t\tif ruleset == \"egress\" {\n\t\t\t\t\trevoke = ec2conn.RevokeSecurityGroupEgress\n\t\t\t\t}\n\t\t\t\tif _, err := revoke(group, remove); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\n\t\t\t\t\t\t\"Error revoking security group %s rules: %s\",\n\t\t\t\t\t\truleset, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(add) > 0 {\n\t\t\t\t\/\/ Authorize the new rules\n\t\t\t\tauthorize := ec2conn.AuthorizeSecurityGroup\n\t\t\t\tif ruleset == \"egress\" {\n\t\t\t\t\tauthorize = ec2conn.AuthorizeSecurityGroupEgress\n\t\t\t\t}\n\t\t\t\tif _, err := authorize(group, add); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\n\t\t\t\t\t\t\"Error authorizing security group %s rules: %s\",\n\t\t\t\t\t\truleset, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ SGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch\n\/\/ a security group.\nfunc SGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc {\n\treturn func() (interface{}, string, error) {\n\t\tsgs := []ec2.SecurityGroup{ec2.SecurityGroup{Id: id}}\n\t\tresp, err := conn.SecurityGroups(sgs, nil)\n\t\tif err != nil {\n\t\t\tif ec2err, ok := err.(*ec2.Error); ok {\n\t\t\t\tif ec2err.Code == \"InvalidSecurityGroupID.NotFound\" ||\n\t\t\t\t\tec2err.Code == \"InvalidGroup.NotFound\" {\n\t\t\t\t\tresp = nil\n\t\t\t\t\terr = nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error on SGStateRefresh: %s\", err)\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t}\n\n\t\tif resp == nil {\n\t\t\treturn nil, \"\", nil\n\t\t}\n\n\t\tgroup := &resp.Groups[0]\n\t\treturn group, \"exists\", nil\n\t}\n}\n<commit_msg>providers\/aws: egress should be computed<commit_after>package aws\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/hashcode\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/mitchellh\/goamz\/ec2\"\n)\n\nfunc resourceAwsSecurityGroup() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsSecurityGroupCreate,\n\t\tRead: resourceAwsSecurityGroupRead,\n\t\tUpdate: resourceAwsSecurityGroupUpdate,\n\t\tDelete: resourceAwsSecurityGroupDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"description\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\n\t\t\t\"vpc_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"ingress\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"from_port\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"to_port\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"protocol\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"cidr_blocks\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeList,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"security_groups\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\t\t\t\treturn hashcode.String(v.(string))\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"self\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tDefault: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSet: resourceAwsSecurityGroupRuleHash,\n\t\t\t},\n\n\t\t\t\"egress\": &schema.Schema{\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"from_port\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"to_port\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"protocol\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"cidr_blocks\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeList,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"security_groups\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\t\t\t\tSet: func(v interface{}) int {\n\t\t\t\t\t\t\t\treturn hashcode.String(v.(string))\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"self\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tDefault: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSet: resourceAwsSecurityGroupRuleHash,\n\t\t\t},\n\n\t\t\t\"owner_id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"tags\": tagsSchema(),\n\t\t},\n\t}\n}\n\nfunc resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error {\n\tec2conn := meta.(*AWSClient).ec2conn\n\n\tsecurityGroupOpts := ec2.SecurityGroup{\n\t\tName: d.Get(\"name\").(string),\n\t}\n\n\tif v := d.Get(\"vpc_id\"); v != nil {\n\t\tsecurityGroupOpts.VpcId = v.(string)\n\t}\n\n\tif v := d.Get(\"description\"); v != nil {\n\t\tsecurityGroupOpts.Description = v.(string)\n\t}\n\n\tlog.Printf(\n\t\t\"[DEBUG] Security Group create configuration: %#v\", securityGroupOpts)\n\tcreateResp, err := ec2conn.CreateSecurityGroup(securityGroupOpts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating Security Group: %s\", err)\n\t}\n\n\td.SetId(createResp.Id)\n\n\tlog.Printf(\"[INFO] Security Group ID: %s\", d.Id())\n\n\t\/\/ Wait for the security group to truly exist\n\tlog.Printf(\n\t\t\"[DEBUG] Waiting for Security Group (%s) to exist\",\n\t\td.Id())\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"\"},\n\t\tTarget: \"exists\",\n\t\tRefresh: SGStateRefreshFunc(ec2conn, d.Id()),\n\t\tTimeout: 1 * time.Minute,\n\t}\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Error waiting for Security Group (%s) to become available: %s\",\n\t\t\td.Id(), err)\n\t}\n\n\treturn resourceAwsSecurityGroupUpdate(d, meta)\n}\n\nfunc resourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) error {\n\tec2conn := meta.(*AWSClient).ec2conn\n\n\tsgRaw, _, err := SGStateRefreshFunc(ec2conn, d.Id())()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif sgRaw == nil {\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\n\tsg := sgRaw.(*ec2.SecurityGroupInfo)\n\n\tingressRules := resourceAwsSecurityGroupIPPermGather(d, sg.IPPerms)\n\tegressRules := resourceAwsSecurityGroupIPPermGather(d, sg.IPPermsEgress)\n\n\td.Set(\"description\", sg.Description)\n\td.Set(\"name\", sg.Name)\n\td.Set(\"vpc_id\", sg.VpcId)\n\td.Set(\"owner_id\", sg.OwnerId)\n\td.Set(\"ingress\", ingressRules)\n\td.Set(\"egress\", egressRules)\n\td.Set(\"tags\", tagsToMap(sg.Tags))\n\n\treturn nil\n}\n\nfunc resourceAwsSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error {\n\tec2conn := meta.(*AWSClient).ec2conn\n\n\tsgRaw, _, err := SGStateRefreshFunc(ec2conn, d.Id())()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif sgRaw == nil {\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\tgroup := sgRaw.(*ec2.SecurityGroupInfo).SecurityGroup\n\n\terr = resourceAwsSecurityGroupUpdateRules(d, \"ingress\", meta, group)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif d.Get(\"vpc_id\") != nil {\n\t\terr = resourceAwsSecurityGroupUpdateRules(d, \"egress\", meta, group)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := setTags(ec2conn, d); err != nil {\n\t\treturn err\n\t}\n\n\td.SetPartial(\"tags\")\n\n\treturn resourceAwsSecurityGroupRead(d, meta)\n}\n\nfunc resourceAwsSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error {\n\tec2conn := meta.(*AWSClient).ec2conn\n\n\tlog.Printf(\"[DEBUG] Security Group destroy: %v\", d.Id())\n\n\treturn resource.Retry(5*time.Minute, func() error {\n\t\t_, err := ec2conn.DeleteSecurityGroup(ec2.SecurityGroup{Id: d.Id()})\n\t\tif err != nil {\n\t\t\tec2err, ok := err.(*ec2.Error)\n\t\t\tif !ok {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tswitch ec2err.Code {\n\t\t\tcase \"InvalidGroup.NotFound\":\n\t\t\t\treturn nil\n\t\t\tcase \"DependencyViolation\":\n\t\t\t\t\/\/ If it is a dependency violation, we want to retry\n\t\t\t\treturn err\n\t\t\tdefault:\n\t\t\t\t\/\/ Any other error, we want to quit the retry loop immediately\n\t\t\t\treturn resource.RetryError{err}\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\nfunc resourceAwsSecurityGroupRuleHash(v interface{}) int {\n\tvar buf bytes.Buffer\n\tm := v.(map[string]interface{})\n\tbuf.WriteString(fmt.Sprintf(\"%d-\", m[\"from_port\"].(int)))\n\tbuf.WriteString(fmt.Sprintf(\"%d-\", m[\"to_port\"].(int)))\n\tbuf.WriteString(fmt.Sprintf(\"%s-\", m[\"protocol\"].(string)))\n\n\t\/\/ We need to make sure to sort the strings below so that we always\n\t\/\/ generate the same hash code no matter what is in the set.\n\tif v, ok := m[\"cidr_blocks\"]; ok {\n\t\tvs := v.([]interface{})\n\t\ts := make([]string, len(vs))\n\t\tfor i, raw := range vs {\n\t\t\ts[i] = raw.(string)\n\t\t}\n\t\tsort.Strings(s)\n\n\t\tfor _, v := range s {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%s-\", v))\n\t\t}\n\t}\n\tif v, ok := m[\"security_groups\"]; ok {\n\t\tvs := v.(*schema.Set).List()\n\t\ts := make([]string, len(vs))\n\t\tfor i, raw := range vs {\n\t\t\ts[i] = raw.(string)\n\t\t}\n\t\tsort.Strings(s)\n\n\t\tfor _, v := range s {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%s-\", v))\n\t\t}\n\t}\n\n\treturn hashcode.String(buf.String())\n}\n\nfunc resourceAwsSecurityGroupIPPermGather(d *schema.ResourceData, permissions []ec2.IPPerm) []map[string]interface{} {\n\truleMap := make(map[string]map[string]interface{})\n\tfor _, perm := range permissions {\n\t\tk := fmt.Sprintf(\"%s-%d-%d\", perm.Protocol, perm.FromPort, perm.ToPort)\n\t\tm, ok := ruleMap[k]\n\t\tif !ok {\n\t\t\tm = make(map[string]interface{})\n\t\t\truleMap[k] = m\n\t\t}\n\n\t\tm[\"from_port\"] = perm.FromPort\n\t\tm[\"to_port\"] = perm.ToPort\n\t\tm[\"protocol\"] = perm.Protocol\n\n\t\tif len(perm.SourceIPs) > 0 {\n\t\t\traw, ok := m[\"cidr_blocks\"]\n\t\t\tif !ok {\n\t\t\t\traw = make([]string, 0, len(perm.SourceIPs))\n\t\t\t}\n\t\t\tlist := raw.([]string)\n\n\t\t\tlist = append(list, perm.SourceIPs...)\n\t\t\tm[\"cidr_blocks\"] = list\n\t\t}\n\n\t\tvar groups []string\n\t\tif len(perm.SourceGroups) > 0 {\n\t\t\tgroups = flattenSecurityGroups(perm.SourceGroups)\n\t\t}\n\t\tfor i, id := range groups {\n\t\t\tif id == d.Id() {\n\t\t\t\tgroups[i], groups = groups[len(groups)-1], groups[:len(groups)-1]\n\t\t\t\tm[\"self\"] = true\n\t\t\t}\n\t\t}\n\n\t\tif len(groups) > 0 {\n\t\t\traw, ok := m[\"security_groups\"]\n\t\t\tif !ok {\n\t\t\t\traw = make([]string, 0, len(groups))\n\t\t\t}\n\t\t\tlist := raw.([]string)\n\n\t\t\tlist = append(list, groups...)\n\t\t\tm[\"security_groups\"] = list\n\t\t}\n\t}\n\trules := make([]map[string]interface{}, 0, len(ruleMap))\n\tfor _, m := range ruleMap {\n\t\trules = append(rules, m)\n\t}\n\n\treturn rules\n}\n\nfunc resourceAwsSecurityGroupUpdateRules(\n\td *schema.ResourceData, ruleset string,\n\tmeta interface{}, group ec2.SecurityGroup) error {\n\tif d.HasChange(ruleset) {\n\t\to, n := d.GetChange(ruleset)\n\t\tif o == nil {\n\t\t\to = new(schema.Set)\n\t\t}\n\t\tif n == nil {\n\t\t\tn = new(schema.Set)\n\t\t}\n\n\t\tos := o.(*schema.Set)\n\t\tns := n.(*schema.Set)\n\n\t\tremove := expandIPPerms(d.Id(), os.Difference(ns).List())\n\t\tadd := expandIPPerms(d.Id(), ns.Difference(os).List())\n\n\t\t\/\/ TODO: We need to handle partial state better in the in-between\n\t\t\/\/ in this update.\n\n\t\t\/\/ TODO: It'd be nicer to authorize before removing, but then we have\n\t\t\/\/ to deal with complicated unrolling to get individual CIDR blocks\n\t\t\/\/ to avoid authorizing already authorized sources. Removing before\n\t\t\/\/ adding is easier here, and Terraform should be fast enough to\n\t\t\/\/ not have service issues.\n\n\t\tif len(remove) > 0 || len(add) > 0 {\n\t\t\tec2conn := meta.(*AWSClient).ec2conn\n\n\t\t\tif len(remove) > 0 {\n\t\t\t\t\/\/ Revoke the old rules\n\t\t\t\trevoke := ec2conn.RevokeSecurityGroup\n\t\t\t\tif ruleset == \"egress\" {\n\t\t\t\t\trevoke = ec2conn.RevokeSecurityGroupEgress\n\t\t\t\t}\n\n\t\t\t\tlog.Printf(\"[DEBUG] Revoking security group %s %s rule: %#v\",\n\t\t\t\t\tgroup, ruleset, remove)\n\t\t\t\tif _, err := revoke(group, remove); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\n\t\t\t\t\t\t\"Error revoking security group %s rules: %s\",\n\t\t\t\t\t\truleset, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(add) > 0 {\n\t\t\t\t\/\/ Authorize the new rules\n\t\t\t\tauthorize := ec2conn.AuthorizeSecurityGroup\n\t\t\t\tif ruleset == \"egress\" {\n\t\t\t\t\tauthorize = ec2conn.AuthorizeSecurityGroupEgress\n\t\t\t\t}\n\n\t\t\t\tlog.Printf(\"[DEBUG] Authorizing security group %s %s rule: %#v\",\n\t\t\t\t\tgroup, ruleset, add)\n\t\t\t\tif _, err := authorize(group, add); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\n\t\t\t\t\t\t\"Error authorizing security group %s rules: %s\",\n\t\t\t\t\t\truleset, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ SGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch\n\/\/ a security group.\nfunc SGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc {\n\treturn func() (interface{}, string, error) {\n\t\tsgs := []ec2.SecurityGroup{ec2.SecurityGroup{Id: id}}\n\t\tresp, err := conn.SecurityGroups(sgs, nil)\n\t\tif err != nil {\n\t\t\tif ec2err, ok := err.(*ec2.Error); ok {\n\t\t\t\tif ec2err.Code == \"InvalidSecurityGroupID.NotFound\" ||\n\t\t\t\t\tec2err.Code == \"InvalidGroup.NotFound\" {\n\t\t\t\t\tresp = nil\n\t\t\t\t\terr = nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error on SGStateRefresh: %s\", err)\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t}\n\n\t\tif resp == nil {\n\t\t\treturn nil, \"\", nil\n\t\t}\n\n\t\tgroup := &resp.Groups[0]\n\t\treturn group, \"exists\", nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package fblib\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\n\t\"github.com\/novatrixtech\/go-fbmessenger\/fbmodelsend\"\n)\n\nvar logLevelDebug = false\n\n\/\/ErrInvalidCallToFacebook is specific error when Facebook Messenger returns error after being called\nvar ErrInvalidCallToFacebook = errors.New(\"\")\n\n\/\/MessageTypeResponse is in response to a received message.\nconst MessageTypeResponse = 1\n\n\/\/MessageTypeUpdate is being sent proactively and is not in response to a received message.\nconst MessageTypeUpdate = 2\n\n\/\/MessageTypeMessageTag is non-promotional and is being sent outside the 24-hour standard messaging window with a message tag.\nconst MessageTypeMessageTag = 3\n\n\/\/defineMessageType returns the Message Type description defined by Messenger\nfunc defineMessageType(msgType int) (msgTypeDescription string) {\n\tswitch msgType {\n\tcase 2:\n\t\tmsgTypeDescription = \"UPDATE\"\n\tcase 3:\n\t\tmsgTypeDescription = \"MESSAGE_TAG\"\n\tdefault:\n\t\tmsgTypeDescription = \"RESPONSE\"\n\t}\n\treturn\n}\n\n\/*\nSendTextMessage - Send text message to a recipient on Facebook Messenger\n*\/\nfunc SendTextMessage(text string, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tletter := new(fbmodelsend.Letter)\n\tletter.Message.Text = text\n\tletter.Recipient.ID = recipient\n\tletter.MessageType = defineMessageType(msgType)\n\terr = sendMessage(letter, recipient, accessToken)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][sendTextMessage] Error during the call to Facebook to send the text message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/\/SendPersonalFinanceUpdateMessage sends a Finance Update information to recipient\nfunc SendPersonalFinanceUpdateMessage(text string, recipient string, accessToken string) (err error) {\n\terr = nil\n\tletter := new(fbmodelsend.Letter)\n\tletter.Message.Text = text\n\tletter.Tag = \"PERSONAL_FINANCE_UPDATE\"\n\tletter.Recipient.ID = recipient\n\tletter.MessageType = defineMessageType(3)\n\terr = sendMessage(letter, recipient, accessToken)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][sendTextMessage] Error during the call to Facebook to send the text message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSendImageMessage - Sends image message to a recipient on Facebook Messenger\n*\/\nfunc SendImageMessage(url string, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tmessage := new(fbmodelsend.Letter)\n\tmessage.MessageType = defineMessageType(msgType)\n\n\tattch := new(fbmodelsend.Attachment)\n\tattch.AttachmentType = \"image\"\n\tattch.Payload.URL = url\n\tmessage.Message.Attachment = attch\n\n\tmessage.Recipient.ID = recipient\n\terr = sendMessage(message, recipient, accessToken)\n\tif err != nil {\n\t\tfmt.Print(\"[fblib][sendImageMessage] Error during the call to Facebook to send the image message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSendAudioMessage - Sends audio message to a recipient on Facebook Messenger\n*\/\nfunc SendAudioMessage(url string, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tmessage := new(fbmodelsend.Letter)\n\tmessage.MessageType = defineMessageType(msgType)\n\tattch := new(fbmodelsend.Attachment)\n\tattch.AttachmentType = \"audio\"\n\tattch.Payload.URL = url\n\tmessage.Message.Attachment = attch\n\n\tmessage.Recipient.ID = recipient\n\terr = sendMessage(message, recipient, accessToken)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][sendImageMessage] Error during the call to Facebook to send the audio message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSendTypingMessage - Sends typing message to user\n*\/\nfunc SendTypingMessage(onoff bool, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tsenderAction := new(fbmodelsend.SenderAction)\n\tsenderAction.MessageType = defineMessageType(msgType)\n\tsenderAction.Recipient.ID = recipient\n\tif onoff {\n\t\tsenderAction.SenderActionState = \"typing_on\"\n\t} else {\n\t\tsenderAction.SenderActionState = \"typing_off\"\n\t}\n\terr = sendMessage(senderAction, recipient, accessToken)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][sendImageMessage] Error during the call to Facebook to send the typing message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSendGenericTemplateMessage - Sends a generic rich message to Facebook user.\nIt can include text, buttons, URLs Butttons, lists to reply\n*\/\nfunc SendGenericTemplateMessage(template []*fbmodelsend.TemplateElement, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tmsg := new(fbmodelsend.Letter)\n\tmsg.Recipient.ID = recipient\n\tmsg.MessageType = defineMessageType(msgType)\n\tattch := new(fbmodelsend.Attachment)\n\tattch.AttachmentType = \"template\"\n\tattch.Payload.TemplateType = \"generic\"\n\tattch.Payload.Elements = template\n\n\tmsg.Message.Attachment = attch\n\n\terr = sendMessage(msg, recipient, accessToken)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][SendGenericTemplateMessage] Error during the call to Facebook to send the text message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSendButtonMessage - Sends a generic rich message to Facebook user.\nIt can include text, buttons, URLs Butttons, lists to reply\n*\/\nfunc SendButtonMessage(template []*fbmodelsend.Button, text string, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tmsg := new(fbmodelsend.Letter)\n\tmsg.Recipient.ID = recipient\n\tmsg.MessageType = defineMessageType(msgType)\n\n\tattch := new(fbmodelsend.Attachment)\n\tattch.AttachmentType = \"template\"\n\tattch.Payload.TemplateType = \"button\"\n\tattch.Payload.Text = text\n\tattch.Payload.Buttons = template\n\n\tmsg.Message.Attachment = attch\n\n\terr = sendMessage(msg, recipient, accessToken)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][sendTextMessage] Error during the call to Facebook to send the text message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSendURLButtonMessage - Sends a message with a button that redirects the user to an external web page.\n*\/\nfunc SendURLButtonMessage(text string, buttonTitle string, URL string, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tmsgElement := new(fbmodelsend.TemplateElement)\n\tmsgElement.Title = text\n\n\topt1 := new(fbmodelsend.Button)\n\topt1.ButtonType = \"web_url\"\n\topt1.Title = buttonTitle\n\topt1.URL = URL\n\n\tbuttons := []*fbmodelsend.Button{opt1}\n\n\tmsgElement.Buttons = buttons\n\telements := []*fbmodelsend.TemplateElement{msgElement}\n\n\terr = SendGenericTemplateMessage(elements, recipient, accessToken, msgType)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][SendURLButtonMessage] Error during the call to Facebook to send the text message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSendShareMessage sends the message along with Share Button\n*\/\nfunc SendShareMessage(title string, subtitle string, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tmsgElement := new(fbmodelsend.TemplateElement)\n\tmsgElement.Title = title\n\tmsgElement.Subtitle = subtitle\n\n\topt1 := new(fbmodelsend.Button)\n\topt1.ButtonType = \"element_share\"\n\tbuttons := []*fbmodelsend.Button{opt1}\n\n\tmsgElement.Buttons = buttons\n\telements := []*fbmodelsend.TemplateElement{msgElement}\n\n\terr = SendGenericTemplateMessage(elements, recipient, accessToken, msgType)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][SendShareMessage] Error during the call to Facebook to send the text message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n\n}\n\n\/*\nSendShareContent share rich content media and url button\n*\/\nfunc SendShareContent(titleToSender string, subtitleToSender string, imageURLToSender string, titleToRecipient string, subtitleToRecipient string, buttonTitleToRecipient string, imageURLToRecipient string, destinationURL string, recipient string, accessToken string, msgType int) (err error) {\n\n\tbtnRecipient := new(fbmodelsend.Button)\n\tbtnRecipient.ButtonType = \"web_url\"\n\tbtnRecipient.Title = buttonTitleToRecipient\n\tbtnRecipient.URL = destinationURL\n\tarrBtnRecipient := []*fbmodelsend.Button{btnRecipient}\n\n\telementRecipient := new(fbmodelsend.TemplateElement)\n\telementRecipient.Title = titleToRecipient\n\telementRecipient.Subtitle = subtitleToRecipient\n\telementRecipient.ImageURL = imageURLToRecipient\n\telementRecipient.Buttons = arrBtnRecipient\n\tarrElementRecipient := []*fbmodelsend.TemplateElement{elementRecipient}\n\n\tbtnSender := new(fbmodelsend.ButtonSharedContent)\n\tbtnSender.ButtonType = \"element_share\"\n\tbtnSender.ShareContents.Attachment.AttachmentType = \"template\"\n\tbtnSender.ShareContents.Attachment.Payload.TemplateType = \"generic\"\n\tbtnSender.ShareContents.Attachment.Payload.Elements = arrElementRecipient\n\tarrBtnSender := []*fbmodelsend.ButtonSharedContent{btnSender}\n\n\telementSender := new(fbmodelsend.TemplateElementShareContent)\n\telementSender.Title = titleToSender\n\telementSender.Subtitle = subtitleToSender\n\telementSender.ImageURL = imageURLToSender\n\telementSender.Buttons = arrBtnSender\n\tarrElementSender := []*fbmodelsend.TemplateElementShareContent{elementSender}\n\n\tattch := new(fbmodelsend.SharedAttachment)\n\tattch.AttachmentType = \"template\"\n\tattch.Payload.TemplateType = \"generic\"\n\tattch.Payload.Elements = arrElementSender\n\n\tsi := new(fbmodelsend.SharedInvite)\n\tsi.MessageType = defineMessageType(msgType)\n\tsi.Recipient.ID = recipient\n\tsi.Message.Attachment = attch\n\n\terr = sendMessage(si, recipient, accessToken)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][SendGenericTemplateMessage] Error during the call to Facebook to send the text message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSendQuickReply sends small messages in order to get small and quick answers from the users\n*\/\nfunc SendQuickReply(text string, options []*fbmodelsend.QuickReply, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tmsg := new(fbmodelsend.Letter)\n\tmsg.MessageType = defineMessageType(msgType)\n\tmsg.Recipient.ID = recipient\n\tmsg.Message.Text = text\n\tmsg.Message.QuickReplies = options\n\t\/\/log.Printf(\"[SendQuickReply] Enviado: [%s]\\n\", text)\n\terr = sendMessage(msg, recipient, accessToken)\n\tif err != nil {\n\t\t\/\/log.Print(\"[fblib][SendQuickReply] Error during the call to Facebook to send the text message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSendAskUserLocation sends small message asking the users their location\n*\/\nfunc SendAskUserLocation(text string, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tqr := new(fbmodelsend.QuickReply)\n\tqr.ContentType = \"location\"\n\n\tarrayQr := []*fbmodelsend.QuickReply{qr}\n\n\terr = SendQuickReply(text, arrayQr, recipient, accessToken, msgType)\n\tif err != nil {\n\t\t\/\/log.Print(\"[fblib][SendAskUserLocation] Error during the call to Facebook to send the text message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSend Message - Sends a generic message to Facebook Messenger\n*\/\nfunc sendMessage(message interface{}, recipient string, accessToken string) error {\n\n\tif logLevelDebug {\n\t\tscs := spew.ConfigState{Indent: \"\\t\"}\n\t\tscs.Dump(message)\n\t\treturn nil\n\t}\n\n\tvar url string\n\tif strings.Contains(accessToken, \"http\") {\n\t\turl = accessToken\n\t} else {\n\t\turl = \"https:\/\/graph.facebook.com\/v6.0\/me\/messages?access_token=\" + accessToken\n\t}\n\n\tdata, err := json.Marshal(message)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][sendMessage] Error to convert message object: \" + err.Error())\n\t\treturn err\n\t}\n\n\treqFb, _ := http.NewRequest(\"POST\", url, bytes.NewBuffer(data))\n\treqFb.Header.Set(\"Content-Type\", \"application\/json\")\n\treqFb.Header.Set(\"Connection\", \"close\")\n\treqFb.Close = true\n\n\tclient := &http.Client{\n\t\tTimeout: time.Second * 30,\n\t}\n\n\t\/\/fmt.Println(\"[sendMessage] Replying at: \" + url + \" the message \" + string(data))\n\n\trespFb, err := client.Do(reqFb)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][sendMessage] Error during the call to Facebook to send the message: \" + err.Error())\n\t\treturn err\n\t}\n\tdefer respFb.Body.Close()\n\n\tif respFb.StatusCode < 200 || respFb.StatusCode >= 300 {\n\t\tbodyFromFb, _ := ioutil.ReadAll(respFb.Body)\n\t\tstatus := string(bodyFromFb)\n\t\tfmt.Printf(\"[fblib][sendMessage] Response status code: [%d]\\n\", respFb.StatusCode)\n\t\tfmt.Println(\"[fblib][sendMessage] Response status: \", respFb.Status)\n\t\tfmt.Println(\"[fblib][sendMessage] Response Body from Facebook: \", status)\n\t\tfmt.Printf(\"[fblib][sendMessage] Facebook URL Called: [%s]\\n\", url)\n\t\tfmt.Printf(\"[fblib][sendMessage] Object sent to Facebook: [%s]\\n\", string(data))\n\t\tstrErr := fmt.Sprintf(\"[fblib][sendMessage] Response status code: [%d]\\nResponse status: [%s]\\nResponse Body from Facebook: [%s]\\nFacebook URL Called: [%s]\\nObject sent to Facebook: [%s]\\n\",\n\t\t\trespFb.StatusCode,\n\t\t\trespFb.Status,\n\t\t\tstatus,\n\t\t\turl,\n\t\t\tstring(data),\n\t\t)\n\t\tErrInvalidCallToFacebook = errors.New(strErr)\n\t\treturn ErrInvalidCallToFacebook\n\t}\n\n\treturn nil\n}\n<commit_msg>remove outdated api<commit_after>package fblib\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\n\t\"github.com\/novatrixtech\/go-fbmessenger\/fbmodelsend\"\n)\n\nvar logLevelDebug = false\n\n\/\/ErrInvalidCallToFacebook is specific error when Facebook Messenger returns error after being called\nvar ErrInvalidCallToFacebook = errors.New(\"\")\n\n\/\/MessageTypeResponse is in response to a received message.\nconst MessageTypeResponse = 1\n\n\/\/MessageTypeUpdate is being sent proactively and is not in response to a received message.\nconst MessageTypeUpdate = 2\n\n\/\/MessageTypeMessageTag is non-promotional and is being sent outside the 24-hour standard messaging window with a message tag.\nconst MessageTypeMessageTag = 3\n\n\/\/defineMessageType returns the Message Type description defined by Messenger\nfunc defineMessageType(msgType int) (msgTypeDescription string) {\n\tswitch msgType {\n\tcase 2:\n\t\tmsgTypeDescription = \"UPDATE\"\n\tcase 3:\n\t\tmsgTypeDescription = \"MESSAGE_TAG\"\n\tdefault:\n\t\tmsgTypeDescription = \"RESPONSE\"\n\t}\n\treturn\n}\n\n\/*\nSendTextMessage - Send text message to a recipient on Facebook Messenger\n*\/\nfunc SendTextMessage(text string, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tletter := new(fbmodelsend.Letter)\n\tletter.Message.Text = text\n\tletter.Recipient.ID = recipient\n\tletter.MessageType = defineMessageType(msgType)\n\terr = sendMessage(letter, recipient, accessToken)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][sendTextMessage] Error during the call to Facebook to send the text message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/\/SendPersonalFinanceUpdateMessage sends a Finance Update information to recipient\nfunc SendPersonalFinanceUpdateMessage(text string, recipient string, accessToken string) (err error) {\n\terr = nil\n\tletter := new(fbmodelsend.Letter)\n\tletter.Message.Text = text\n\tletter.Tag = \"PERSONAL_FINANCE_UPDATE\"\n\tletter.Recipient.ID = recipient\n\tletter.MessageType = defineMessageType(3)\n\terr = sendMessage(letter, recipient, accessToken)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][sendTextMessage] Error during the call to Facebook to send the text message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSendImageMessage - Sends image message to a recipient on Facebook Messenger\n*\/\nfunc SendImageMessage(url string, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tmessage := new(fbmodelsend.Letter)\n\tmessage.MessageType = defineMessageType(msgType)\n\n\tattch := new(fbmodelsend.Attachment)\n\tattch.AttachmentType = \"image\"\n\tattch.Payload.URL = url\n\tmessage.Message.Attachment = attch\n\n\tmessage.Recipient.ID = recipient\n\terr = sendMessage(message, recipient, accessToken)\n\tif err != nil {\n\t\tfmt.Print(\"[fblib][sendImageMessage] Error during the call to Facebook to send the image message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSendAudioMessage - Sends audio message to a recipient on Facebook Messenger\n*\/\nfunc SendAudioMessage(url string, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tmessage := new(fbmodelsend.Letter)\n\tmessage.MessageType = defineMessageType(msgType)\n\tattch := new(fbmodelsend.Attachment)\n\tattch.AttachmentType = \"audio\"\n\tattch.Payload.URL = url\n\tmessage.Message.Attachment = attch\n\n\tmessage.Recipient.ID = recipient\n\terr = sendMessage(message, recipient, accessToken)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][sendImageMessage] Error during the call to Facebook to send the audio message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSendTypingMessage - Sends typing message to user\n*\/\nfunc SendTypingMessage(onoff bool, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tsenderAction := new(fbmodelsend.SenderAction)\n\tsenderAction.MessageType = defineMessageType(msgType)\n\tsenderAction.Recipient.ID = recipient\n\tif onoff {\n\t\tsenderAction.SenderActionState = \"typing_on\"\n\t} else {\n\t\tsenderAction.SenderActionState = \"typing_off\"\n\t}\n\terr = sendMessage(senderAction, recipient, accessToken)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][sendImageMessage] Error during the call to Facebook to send the typing message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSendGenericTemplateMessage - Sends a generic rich message to Facebook user.\nIt can include text, buttons, URLs Butttons, lists to reply\n*\/\nfunc SendGenericTemplateMessage(template []*fbmodelsend.TemplateElement, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tmsg := new(fbmodelsend.Letter)\n\tmsg.Recipient.ID = recipient\n\tmsg.MessageType = defineMessageType(msgType)\n\tattch := new(fbmodelsend.Attachment)\n\tattch.AttachmentType = \"template\"\n\tattch.Payload.TemplateType = \"generic\"\n\tattch.Payload.Elements = template\n\n\tmsg.Message.Attachment = attch\n\n\terr = sendMessage(msg, recipient, accessToken)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][SendGenericTemplateMessage] Error during the call to Facebook to send the text message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSendButtonMessage - Sends a generic rich message to Facebook user.\nIt can include text, buttons, URLs Butttons, lists to reply\n*\/\nfunc SendButtonMessage(template []*fbmodelsend.Button, text string, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tmsg := new(fbmodelsend.Letter)\n\tmsg.Recipient.ID = recipient\n\tmsg.MessageType = defineMessageType(msgType)\n\n\tattch := new(fbmodelsend.Attachment)\n\tattch.AttachmentType = \"template\"\n\tattch.Payload.TemplateType = \"button\"\n\tattch.Payload.Text = text\n\tattch.Payload.Buttons = template\n\n\tmsg.Message.Attachment = attch\n\n\terr = sendMessage(msg, recipient, accessToken)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][sendTextMessage] Error during the call to Facebook to send the text message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSendURLButtonMessage - Sends a message with a button that redirects the user to an external web page.\n*\/\nfunc SendURLButtonMessage(text string, buttonTitle string, URL string, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tmsgElement := new(fbmodelsend.TemplateElement)\n\tmsgElement.Title = text\n\n\topt1 := new(fbmodelsend.Button)\n\topt1.ButtonType = \"web_url\"\n\topt1.Title = buttonTitle\n\topt1.URL = URL\n\n\tbuttons := []*fbmodelsend.Button{opt1}\n\n\tmsgElement.Buttons = buttons\n\telements := []*fbmodelsend.TemplateElement{msgElement}\n\n\terr = SendGenericTemplateMessage(elements, recipient, accessToken, msgType)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][SendURLButtonMessage] Error during the call to Facebook to send the text message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\n\/*\nSendQuickReply sends small messages in order to get small and quick answers from the users\n*\/\nfunc SendQuickReply(text string, options []*fbmodelsend.QuickReply, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tmsg := new(fbmodelsend.Letter)\n\tmsg.MessageType = defineMessageType(msgType)\n\tmsg.Recipient.ID = recipient\n\tmsg.Message.Text = text\n\tmsg.Message.QuickReplies = options\n\t\/\/log.Printf(\"[SendQuickReply] Enviado: [%s]\\n\", text)\n\terr = sendMessage(msg, recipient, accessToken)\n\tif err != nil {\n\t\t\/\/log.Print(\"[fblib][SendQuickReply] Error during the call to Facebook to send the text message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSendAskUserLocation sends small message asking the users their location\n*\/\nfunc SendAskUserLocation(text string, recipient string, accessToken string, msgType int) (err error) {\n\terr = nil\n\tqr := new(fbmodelsend.QuickReply)\n\tqr.ContentType = \"location\"\n\n\tarrayQr := []*fbmodelsend.QuickReply{qr}\n\n\terr = SendQuickReply(text, arrayQr, recipient, accessToken, msgType)\n\tif err != nil {\n\t\t\/\/log.Print(\"[fblib][SendAskUserLocation] Error during the call to Facebook to send the text message: \" + err.Error())\n\t\treturn\n\t}\n\treturn\n}\n\n\/*\nSend Message - Sends a generic message to Facebook Messenger\n*\/\nfunc sendMessage(message interface{}, recipient string, accessToken string) error {\n\n\tif logLevelDebug {\n\t\tscs := spew.ConfigState{Indent: \"\\t\"}\n\t\tscs.Dump(message)\n\t\treturn nil\n\t}\n\n\tvar url string\n\tif strings.Contains(accessToken, \"http\") {\n\t\turl = accessToken\n\t} else {\n\t\turl = \"https:\/\/graph.facebook.com\/v6.0\/me\/messages?access_token=\" + accessToken\n\t}\n\n\tdata, err := json.Marshal(message)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][sendMessage] Error to convert message object: \" + err.Error())\n\t\treturn err\n\t}\n\n\treqFb, _ := http.NewRequest(\"POST\", url, bytes.NewBuffer(data))\n\treqFb.Header.Set(\"Content-Type\", \"application\/json\")\n\treqFb.Header.Set(\"Connection\", \"close\")\n\treqFb.Close = true\n\n\tclient := &http.Client{\n\t\tTimeout: time.Second * 30,\n\t}\n\n\t\/\/fmt.Println(\"[sendMessage] Replying at: \" + url + \" the message \" + string(data))\n\n\trespFb, err := client.Do(reqFb)\n\tif err != nil {\n\t\t\/\/fmt.Print(\"[fblib][sendMessage] Error during the call to Facebook to send the message: \" + err.Error())\n\t\treturn err\n\t}\n\tdefer respFb.Body.Close()\n\n\tif respFb.StatusCode < 200 || respFb.StatusCode >= 300 {\n\t\tbodyFromFb, _ := ioutil.ReadAll(respFb.Body)\n\t\tstatus := string(bodyFromFb)\n\t\tfmt.Printf(\"[fblib][sendMessage] Response status code: [%d]\\n\", respFb.StatusCode)\n\t\tfmt.Println(\"[fblib][sendMessage] Response status: \", respFb.Status)\n\t\tfmt.Println(\"[fblib][sendMessage] Response Body from Facebook: \", status)\n\t\tfmt.Printf(\"[fblib][sendMessage] Facebook URL Called: [%s]\\n\", url)\n\t\tfmt.Printf(\"[fblib][sendMessage] Object sent to Facebook: [%s]\\n\", string(data))\n\t\tstrErr := fmt.Sprintf(\"[fblib][sendMessage] Response status code: [%d]\\nResponse status: [%s]\\nResponse Body from Facebook: [%s]\\nFacebook URL Called: [%s]\\nObject sent to Facebook: [%s]\\n\",\n\t\t\trespFb.StatusCode,\n\t\t\trespFb.Status,\n\t\t\tstatus,\n\t\t\turl,\n\t\t\tstring(data),\n\t\t)\n\t\tErrInvalidCallToFacebook = errors.New(strErr)\n\t\treturn ErrInvalidCallToFacebook\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage aws\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"k8s.io\/klog\"\n\n\t\"k8s.io\/api\/core\/v1\"\n)\n\n\/\/ awsInstanceRegMatch represents Regex Match for AWS instance.\nvar awsInstanceRegMatch = regexp.MustCompile(\"^i-[^\/]*$\")\n\n\/\/ InstanceID represents the ID of the instance in the AWS API, e.g. i-12345678\n\/\/ The \"traditional\" format is \"i-12345678\"\n\/\/ A new longer format is also being introduced: \"i-12345678abcdef01\"\n\/\/ We should not assume anything about the length or format, though it seems\n\/\/ reasonable to assume that instances will continue to start with \"i-\".\ntype InstanceID string\n\nfunc (i InstanceID) awsString() *string {\n\treturn aws.String(string(i))\n}\n\n\/\/ KubernetesInstanceID represents the id for an instance in the kubernetes API;\n\/\/ the following form\n\/\/ * aws:\/\/\/<zone>\/<awsInstanceId>\n\/\/ * aws:\/\/\/\/<awsInstanceId>\n\/\/ * <awsInstanceId>\ntype KubernetesInstanceID string\n\n\/\/ MapToAWSInstanceID extracts the InstanceID from the KubernetesInstanceID\nfunc (name KubernetesInstanceID) MapToAWSInstanceID() (InstanceID, error) {\n\ts := string(name)\n\n\tif !strings.HasPrefix(s, \"aws:\/\/\") {\n\t\t\/\/ Assume a bare aws volume id (vol-1234...)\n\t\t\/\/ Build a URL with an empty host (AZ)\n\t\ts = \"aws:\/\/\" + \"\/\" + \"\/\" + s\n\t}\n\turl, err := url.Parse(s)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Invalid instance name (%s): %v\", name, err)\n\t}\n\tif url.Scheme != \"aws\" {\n\t\treturn \"\", fmt.Errorf(\"Invalid scheme for AWS instance (%s)\", name)\n\t}\n\n\tawsID := \"\"\n\ttokens := strings.Split(strings.Trim(url.Path, \"\/\"), \"\/\")\n\tif len(tokens) == 1 {\n\t\t\/\/ instanceId\n\t\tawsID = tokens[0]\n\t} else if len(tokens) == 2 {\n\t\t\/\/ az\/instanceId\n\t\tawsID = tokens[1]\n\t}\n\n\t\/\/ We sanity check the resulting volume; the two known formats are\n\t\/\/ i-12345678 and i-12345678abcdef01\n\tif awsID == \"\" || !awsInstanceRegMatch.MatchString(awsID) {\n\t\treturn \"\", fmt.Errorf(\"Invalid format for AWS instance (%s)\", name)\n\t}\n\n\treturn InstanceID(awsID), nil\n}\n\n\/\/ mapToAWSInstanceID extracts the InstanceIDs from the Nodes, returning an error if a Node cannot be mapped\nfunc mapToAWSInstanceIDs(nodes []*v1.Node) ([]InstanceID, error) {\n\tvar instanceIDs []InstanceID\n\tfor _, node := range nodes {\n\t\tif node.Spec.ProviderID == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"node %q did not have ProviderID set\", node.Name)\n\t\t}\n\t\tinstanceID, err := KubernetesInstanceID(node.Spec.ProviderID).MapToAWSInstanceID()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to parse ProviderID %q for node %q\", node.Spec.ProviderID, node.Name)\n\t\t}\n\t\tinstanceIDs = append(instanceIDs, instanceID)\n\t}\n\n\treturn instanceIDs, nil\n}\n\n\/\/ mapToAWSInstanceIDsTolerant extracts the InstanceIDs from the Nodes, skipping Nodes that cannot be mapped\nfunc mapToAWSInstanceIDsTolerant(nodes []*v1.Node) []InstanceID {\n\tvar instanceIDs []InstanceID\n\tfor _, node := range nodes {\n\t\tif node.Spec.ProviderID == \"\" {\n\t\t\tklog.Warningf(\"node %q did not have ProviderID set\", node.Name)\n\t\t\tcontinue\n\t\t}\n\t\tinstanceID, err := KubernetesInstanceID(node.Spec.ProviderID).MapToAWSInstanceID()\n\t\tif err != nil {\n\t\t\tklog.Warningf(\"unable to parse ProviderID %q for node %q\", node.Spec.ProviderID, node.Name)\n\t\t\tcontinue\n\t\t}\n\t\tinstanceIDs = append(instanceIDs, instanceID)\n\t}\n\n\treturn instanceIDs\n}\n\n\/\/ Gets the full information about this instance from the EC2 API\nfunc describeInstance(ec2Client EC2, instanceID InstanceID) (*ec2.Instance, error) {\n\trequest := &ec2.DescribeInstancesInput{\n\t\tInstanceIds: []*string{instanceID.awsString()},\n\t}\n\n\tinstances, err := ec2Client.DescribeInstances(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(instances) == 0 {\n\t\treturn nil, fmt.Errorf(\"no instances found for instance: %s\", instanceID)\n\t}\n\tif len(instances) > 1 {\n\t\treturn nil, fmt.Errorf(\"multiple instances found for instance: %s\", instanceID)\n\t}\n\treturn instances[0], nil\n}\n\n\/\/ instanceCache manages the cache of DescribeInstances\ntype instanceCache struct {\n\t\/\/ TODO: Get rid of this field, send all calls through the instanceCache\n\tcloud *Cloud\n\n\tmutex sync.Mutex\n\tsnapshot *allInstancesSnapshot\n}\n\n\/\/ Gets the full information about these instance from the EC2 API\nfunc (c *instanceCache) describeAllInstancesUncached() (*allInstancesSnapshot, error) {\n\tnow := time.Now()\n\n\tklog.V(4).Infof(\"EC2 DescribeInstances - fetching all instances\")\n\n\tfilters := []*ec2.Filter{}\n\tinstances, err := c.cloud.describeInstances(filters)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm := make(map[InstanceID]*ec2.Instance)\n\tfor _, i := range instances {\n\t\tid := InstanceID(aws.StringValue(i.InstanceId))\n\t\tm[id] = i\n\t}\n\n\tsnapshot := &allInstancesSnapshot{now, m}\n\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\tif c.snapshot != nil && snapshot.olderThan(c.snapshot) {\n\t\t\/\/ If this happens a lot, we could run this function in a mutex and only return one result\n\t\tklog.Infof(\"Not caching concurrent AWS DescribeInstances results\")\n\t} else {\n\t\tc.snapshot = snapshot\n\t}\n\n\treturn snapshot, nil\n}\n\n\/\/ cacheCriteria holds criteria that must hold to use a cached snapshot\ntype cacheCriteria struct {\n\t\/\/ MaxAge indicates the maximum age of a cached snapshot we can accept.\n\t\/\/ If set to 0 (i.e. unset), cached values will not time out because of age.\n\tMaxAge time.Duration\n\n\t\/\/ HasInstances is a list of InstanceIDs that must be in a cached snapshot for it to be considered valid.\n\t\/\/ If an instance is not found in the cached snapshot, the snapshot be ignored and we will re-fetch.\n\tHasInstances []InstanceID\n}\n\n\/\/ describeAllInstancesCached returns all instances, using cached results if applicable\nfunc (c *instanceCache) describeAllInstancesCached(criteria cacheCriteria) (*allInstancesSnapshot, error) {\n\tvar err error\n\tsnapshot := c.getSnapshot()\n\tif snapshot != nil && !snapshot.MeetsCriteria(criteria) {\n\t\tsnapshot = nil\n\t}\n\n\tif snapshot == nil {\n\t\tsnapshot, err = c.describeAllInstancesUncached()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tklog.V(6).Infof(\"EC2 DescribeInstances - using cached results\")\n\t}\n\n\treturn snapshot, nil\n}\n\n\/\/ getSnapshot returns a snapshot if one exists\nfunc (c *instanceCache) getSnapshot() *allInstancesSnapshot {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\treturn c.snapshot\n}\n\n\/\/ olderThan is a simple helper to encapsulate timestamp comparison\nfunc (s *allInstancesSnapshot) olderThan(other *allInstancesSnapshot) bool {\n\t\/\/ After() is technically broken by time changes until we have monotonic time\n\treturn other.timestamp.After(s.timestamp)\n}\n\n\/\/ MeetsCriteria returns true if the snapshot meets the criteria in cacheCriteria\nfunc (s *allInstancesSnapshot) MeetsCriteria(criteria cacheCriteria) bool {\n\tif criteria.MaxAge > 0 {\n\t\t\/\/ Sub() is technically broken by time changes until we have monotonic time\n\t\tnow := time.Now()\n\t\tif now.Sub(s.timestamp) > criteria.MaxAge {\n\t\t\tklog.V(6).Infof(\"instanceCache snapshot cannot be used as is older than MaxAge=%s\", criteria.MaxAge)\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif len(criteria.HasInstances) != 0 {\n\t\tfor _, id := range criteria.HasInstances {\n\t\t\tif nil == s.instances[id] {\n\t\t\t\tklog.V(6).Infof(\"instanceCache snapshot cannot be used as does not contain instance %s\", id)\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true\n}\n\n\/\/ allInstancesSnapshot holds the results from querying for all instances,\n\/\/ along with the timestamp for cache-invalidation purposes\ntype allInstancesSnapshot struct {\n\ttimestamp time.Time\n\tinstances map[InstanceID]*ec2.Instance\n}\n\n\/\/ FindInstances returns the instances corresponding to the specified ids. If an id is not found, it is ignored.\nfunc (s *allInstancesSnapshot) FindInstances(ids []InstanceID) map[InstanceID]*ec2.Instance {\n\tm := make(map[InstanceID]*ec2.Instance)\n\tfor _, id := range ids {\n\t\tinstance := s.instances[id]\n\t\tif instance != nil {\n\t\t\tm[id] = instance\n\t\t}\n\t}\n\treturn m\n}\n<commit_msg>fix bug that awsSDKGO expect nil instead empty slice for ec2.DescribeInstances<commit_after>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage aws\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"k8s.io\/klog\"\n\n\t\"k8s.io\/api\/core\/v1\"\n)\n\n\/\/ awsInstanceRegMatch represents Regex Match for AWS instance.\nvar awsInstanceRegMatch = regexp.MustCompile(\"^i-[^\/]*$\")\n\n\/\/ InstanceID represents the ID of the instance in the AWS API, e.g. i-12345678\n\/\/ The \"traditional\" format is \"i-12345678\"\n\/\/ A new longer format is also being introduced: \"i-12345678abcdef01\"\n\/\/ We should not assume anything about the length or format, though it seems\n\/\/ reasonable to assume that instances will continue to start with \"i-\".\ntype InstanceID string\n\nfunc (i InstanceID) awsString() *string {\n\treturn aws.String(string(i))\n}\n\n\/\/ KubernetesInstanceID represents the id for an instance in the kubernetes API;\n\/\/ the following form\n\/\/ * aws:\/\/\/<zone>\/<awsInstanceId>\n\/\/ * aws:\/\/\/\/<awsInstanceId>\n\/\/ * <awsInstanceId>\ntype KubernetesInstanceID string\n\n\/\/ MapToAWSInstanceID extracts the InstanceID from the KubernetesInstanceID\nfunc (name KubernetesInstanceID) MapToAWSInstanceID() (InstanceID, error) {\n\ts := string(name)\n\n\tif !strings.HasPrefix(s, \"aws:\/\/\") {\n\t\t\/\/ Assume a bare aws volume id (vol-1234...)\n\t\t\/\/ Build a URL with an empty host (AZ)\n\t\ts = \"aws:\/\/\" + \"\/\" + \"\/\" + s\n\t}\n\turl, err := url.Parse(s)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Invalid instance name (%s): %v\", name, err)\n\t}\n\tif url.Scheme != \"aws\" {\n\t\treturn \"\", fmt.Errorf(\"Invalid scheme for AWS instance (%s)\", name)\n\t}\n\n\tawsID := \"\"\n\ttokens := strings.Split(strings.Trim(url.Path, \"\/\"), \"\/\")\n\tif len(tokens) == 1 {\n\t\t\/\/ instanceId\n\t\tawsID = tokens[0]\n\t} else if len(tokens) == 2 {\n\t\t\/\/ az\/instanceId\n\t\tawsID = tokens[1]\n\t}\n\n\t\/\/ We sanity check the resulting volume; the two known formats are\n\t\/\/ i-12345678 and i-12345678abcdef01\n\tif awsID == \"\" || !awsInstanceRegMatch.MatchString(awsID) {\n\t\treturn \"\", fmt.Errorf(\"Invalid format for AWS instance (%s)\", name)\n\t}\n\n\treturn InstanceID(awsID), nil\n}\n\n\/\/ mapToAWSInstanceID extracts the InstanceIDs from the Nodes, returning an error if a Node cannot be mapped\nfunc mapToAWSInstanceIDs(nodes []*v1.Node) ([]InstanceID, error) {\n\tvar instanceIDs []InstanceID\n\tfor _, node := range nodes {\n\t\tif node.Spec.ProviderID == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"node %q did not have ProviderID set\", node.Name)\n\t\t}\n\t\tinstanceID, err := KubernetesInstanceID(node.Spec.ProviderID).MapToAWSInstanceID()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to parse ProviderID %q for node %q\", node.Spec.ProviderID, node.Name)\n\t\t}\n\t\tinstanceIDs = append(instanceIDs, instanceID)\n\t}\n\n\treturn instanceIDs, nil\n}\n\n\/\/ mapToAWSInstanceIDsTolerant extracts the InstanceIDs from the Nodes, skipping Nodes that cannot be mapped\nfunc mapToAWSInstanceIDsTolerant(nodes []*v1.Node) []InstanceID {\n\tvar instanceIDs []InstanceID\n\tfor _, node := range nodes {\n\t\tif node.Spec.ProviderID == \"\" {\n\t\t\tklog.Warningf(\"node %q did not have ProviderID set\", node.Name)\n\t\t\tcontinue\n\t\t}\n\t\tinstanceID, err := KubernetesInstanceID(node.Spec.ProviderID).MapToAWSInstanceID()\n\t\tif err != nil {\n\t\t\tklog.Warningf(\"unable to parse ProviderID %q for node %q\", node.Spec.ProviderID, node.Name)\n\t\t\tcontinue\n\t\t}\n\t\tinstanceIDs = append(instanceIDs, instanceID)\n\t}\n\n\treturn instanceIDs\n}\n\n\/\/ Gets the full information about this instance from the EC2 API\nfunc describeInstance(ec2Client EC2, instanceID InstanceID) (*ec2.Instance, error) {\n\trequest := &ec2.DescribeInstancesInput{\n\t\tInstanceIds: []*string{instanceID.awsString()},\n\t}\n\n\tinstances, err := ec2Client.DescribeInstances(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(instances) == 0 {\n\t\treturn nil, fmt.Errorf(\"no instances found for instance: %s\", instanceID)\n\t}\n\tif len(instances) > 1 {\n\t\treturn nil, fmt.Errorf(\"multiple instances found for instance: %s\", instanceID)\n\t}\n\treturn instances[0], nil\n}\n\n\/\/ instanceCache manages the cache of DescribeInstances\ntype instanceCache struct {\n\t\/\/ TODO: Get rid of this field, send all calls through the instanceCache\n\tcloud *Cloud\n\n\tmutex sync.Mutex\n\tsnapshot *allInstancesSnapshot\n}\n\n\/\/ Gets the full information about these instance from the EC2 API\nfunc (c *instanceCache) describeAllInstancesUncached() (*allInstancesSnapshot, error) {\n\tnow := time.Now()\n\n\tklog.V(4).Infof(\"EC2 DescribeInstances - fetching all instances\")\n\n\tvar filters []*ec2.Filter\n\tinstances, err := c.cloud.describeInstances(filters)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm := make(map[InstanceID]*ec2.Instance)\n\tfor _, i := range instances {\n\t\tid := InstanceID(aws.StringValue(i.InstanceId))\n\t\tm[id] = i\n\t}\n\n\tsnapshot := &allInstancesSnapshot{now, m}\n\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\tif c.snapshot != nil && snapshot.olderThan(c.snapshot) {\n\t\t\/\/ If this happens a lot, we could run this function in a mutex and only return one result\n\t\tklog.Infof(\"Not caching concurrent AWS DescribeInstances results\")\n\t} else {\n\t\tc.snapshot = snapshot\n\t}\n\n\treturn snapshot, nil\n}\n\n\/\/ cacheCriteria holds criteria that must hold to use a cached snapshot\ntype cacheCriteria struct {\n\t\/\/ MaxAge indicates the maximum age of a cached snapshot we can accept.\n\t\/\/ If set to 0 (i.e. unset), cached values will not time out because of age.\n\tMaxAge time.Duration\n\n\t\/\/ HasInstances is a list of InstanceIDs that must be in a cached snapshot for it to be considered valid.\n\t\/\/ If an instance is not found in the cached snapshot, the snapshot be ignored and we will re-fetch.\n\tHasInstances []InstanceID\n}\n\n\/\/ describeAllInstancesCached returns all instances, using cached results if applicable\nfunc (c *instanceCache) describeAllInstancesCached(criteria cacheCriteria) (*allInstancesSnapshot, error) {\n\tvar err error\n\tsnapshot := c.getSnapshot()\n\tif snapshot != nil && !snapshot.MeetsCriteria(criteria) {\n\t\tsnapshot = nil\n\t}\n\n\tif snapshot == nil {\n\t\tsnapshot, err = c.describeAllInstancesUncached()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tklog.V(6).Infof(\"EC2 DescribeInstances - using cached results\")\n\t}\n\n\treturn snapshot, nil\n}\n\n\/\/ getSnapshot returns a snapshot if one exists\nfunc (c *instanceCache) getSnapshot() *allInstancesSnapshot {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\treturn c.snapshot\n}\n\n\/\/ olderThan is a simple helper to encapsulate timestamp comparison\nfunc (s *allInstancesSnapshot) olderThan(other *allInstancesSnapshot) bool {\n\t\/\/ After() is technically broken by time changes until we have monotonic time\n\treturn other.timestamp.After(s.timestamp)\n}\n\n\/\/ MeetsCriteria returns true if the snapshot meets the criteria in cacheCriteria\nfunc (s *allInstancesSnapshot) MeetsCriteria(criteria cacheCriteria) bool {\n\tif criteria.MaxAge > 0 {\n\t\t\/\/ Sub() is technically broken by time changes until we have monotonic time\n\t\tnow := time.Now()\n\t\tif now.Sub(s.timestamp) > criteria.MaxAge {\n\t\t\tklog.V(6).Infof(\"instanceCache snapshot cannot be used as is older than MaxAge=%s\", criteria.MaxAge)\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif len(criteria.HasInstances) != 0 {\n\t\tfor _, id := range criteria.HasInstances {\n\t\t\tif nil == s.instances[id] {\n\t\t\t\tklog.V(6).Infof(\"instanceCache snapshot cannot be used as does not contain instance %s\", id)\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true\n}\n\n\/\/ allInstancesSnapshot holds the results from querying for all instances,\n\/\/ along with the timestamp for cache-invalidation purposes\ntype allInstancesSnapshot struct {\n\ttimestamp time.Time\n\tinstances map[InstanceID]*ec2.Instance\n}\n\n\/\/ FindInstances returns the instances corresponding to the specified ids. If an id is not found, it is ignored.\nfunc (s *allInstancesSnapshot) FindInstances(ids []InstanceID) map[InstanceID]*ec2.Instance {\n\tm := make(map[InstanceID]*ec2.Instance)\n\tfor _, id := range ids {\n\t\tinstance := s.instances[id]\n\t\tif instance != nil {\n\t\t\tm[id] = instance\n\t\t}\n\t}\n\treturn m\n}\n<|endoftext|>"} {"text":"<commit_before>package gofeed\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/mmcdole\/gofeed\/atom\"\n\t\"github.com\/mmcdole\/gofeed\/feed\"\n\t\"github.com\/mmcdole\/gofeed\/rss\"\n\t\"github.com\/mmcdole\/goxpp\"\n)\n\ntype FeedType int\n\nconst (\n\tFeedTypeUnknown FeedType = iota\n\tFeedTypeAtom\n\tFeedTypeRSS\n)\n\ntype FeedParser struct {\n\tAtomTrans atom.Translator\n\tRSSTrans rss.Translator\n\trp *rss.Parser\n\tap *atom.Parser\n}\n\nfunc NewFeedParser() *FeedParser {\n\tfp := FeedParser{\n\t\trp: &rss.Parser{},\n\t\tap: &atom.Parser{},\n\t}\n\treturn &fp\n}\n\nfunc (f *FeedParser) ParseFeedURL(feedURL string) (*feed.Feed, error) {\n\tresp, err := http.Get(feedURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\treturn f.ParseFeed(string(body))\n}\n\nfunc (f *FeedParser) ParseFeed(feed string) (*feed.Feed, error) {\n\tfmt.Println(feed)\n\tft := DetectFeedType(feed)\n\tswitch ft {\n\tcase FeedTypeAtom:\n\t\treturn f.parseFeedFromAtom(feed)\n\tcase FeedTypeRSS:\n\t\treturn f.parseFeedFromRSS(feed)\n\t}\n\treturn nil, errors.New(\"Failed to detect feed type\")\n}\n\nfunc DetectFeedType(feed string) FeedType {\n\tp := xpp.NewXMLPullParser(strings.NewReader(feed))\n\n\t_, err := p.NextTag()\n\tif err != nil {\n\t\tfmt.Printf(\"Error %s: \\n\", err)\n\t\treturn FeedTypeUnknown\n\t}\n\n\tname := strings.ToLower(p.Name)\n\tswitch name {\n\tcase \"rdf\":\n\t\treturn FeedTypeRSS\n\tcase \"rss\":\n\t\treturn FeedTypeRSS\n\tcase \"feed\":\n\t\treturn FeedTypeAtom\n\tdefault:\n\t\treturn FeedTypeUnknown\n\t}\n}\n\nfunc (f *FeedParser) parseFeedFromAtom(feed string) (*feed.Feed, error) {\n\taf, err := f.ap.ParseFeed(feed)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult := f.atomTrans().Translate(af)\n\treturn result, nil\n}\n\nfunc (f *FeedParser) parseFeedFromRSS(feed string) (*feed.Feed, error) {\n\trf, err := f.rp.ParseFeed(feed)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult := f.rssTrans().Translate(rf)\n\treturn result, nil\n}\n\nfunc (f *FeedParser) atomTrans() atom.Translator {\n\tif f.AtomTrans != nil {\n\t\treturn f.AtomTrans\n\t}\n\tf.AtomTrans = &atom.DefaultTranslator{}\n\treturn f.AtomTrans\n}\n\nfunc (f *FeedParser) rssTrans() rss.Translator {\n\tif f.RSSTrans != nil {\n\t\treturn f.RSSTrans\n\t}\n\tf.RSSTrans = &rss.DefaultTranslator{}\n\treturn f.RSSTrans\n}\n<commit_msg>Move detect method into feedparser<commit_after>package gofeed\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/mmcdole\/gofeed\/atom\"\n\t\"github.com\/mmcdole\/gofeed\/feed\"\n\t\"github.com\/mmcdole\/gofeed\/rss\"\n\t\"github.com\/mmcdole\/goxpp\"\n)\n\ntype FeedType int\n\nconst (\n\tFeedTypeUnknown FeedType = iota\n\tFeedTypeAtom\n\tFeedTypeRSS\n)\n\ntype FeedParser struct {\n\tAtomTrans atom.Translator\n\tRSSTrans rss.Translator\n\trp *rss.Parser\n\tap *atom.Parser\n}\n\nfunc NewFeedParser() *FeedParser {\n\tfp := FeedParser{\n\t\trp: &rss.Parser{},\n\t\tap: &atom.Parser{},\n\t}\n\treturn &fp\n}\n\nfunc (f *FeedParser) ParseFeedURL(feedURL string) (*feed.Feed, error) {\n\tresp, err := http.Get(feedURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\treturn f.ParseFeed(string(body))\n}\n\nfunc (f *FeedParser) ParseFeed(feed string) (*feed.Feed, error) {\n\tfmt.Println(feed)\n\tft := DetectFeedType(feed)\n\tswitch ft {\n\tcase FeedTypeAtom:\n\t\treturn f.parseFeedFromAtom(feed)\n\tcase FeedTypeRSS:\n\t\treturn f.parseFeedFromRSS(feed)\n\t}\n\treturn nil, errors.New(\"Failed to detect feed type\")\n}\n\nfunc (f *FeedParser) DetectFeedType(feed string) FeedType {\n\tp := xpp.NewXMLPullParser(strings.NewReader(feed))\n\n\t_, err := p.NextTag()\n\tif err != nil {\n\t\tfmt.Printf(\"Error %s: \\n\", err)\n\t\treturn FeedTypeUnknown\n\t}\n\n\tname := strings.ToLower(p.Name)\n\tswitch name {\n\tcase \"rdf\":\n\t\treturn FeedTypeRSS\n\tcase \"rss\":\n\t\treturn FeedTypeRSS\n\tcase \"feed\":\n\t\treturn FeedTypeAtom\n\tdefault:\n\t\treturn FeedTypeUnknown\n\t}\n}\n\nfunc (f *FeedParser) parseFeedFromAtom(feed string) (*feed.Feed, error) {\n\taf, err := f.ap.ParseFeed(feed)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult := f.atomTrans().Translate(af)\n\treturn result, nil\n}\n\nfunc (f *FeedParser) parseFeedFromRSS(feed string) (*feed.Feed, error) {\n\trf, err := f.rp.ParseFeed(feed)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult := f.rssTrans().Translate(rf)\n\treturn result, nil\n}\n\nfunc (f *FeedParser) atomTrans() atom.Translator {\n\tif f.AtomTrans != nil {\n\t\treturn f.AtomTrans\n\t}\n\tf.AtomTrans = &atom.DefaultTranslator{}\n\treturn f.AtomTrans\n}\n\nfunc (f *FeedParser) rssTrans() rss.Translator {\n\tif f.RSSTrans != nil {\n\t\treturn f.RSSTrans\n\t}\n\tf.RSSTrans = &rss.DefaultTranslator{}\n\treturn f.RSSTrans\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build riak\n\npackage rkive\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\t\"os\"\n\t\"fmt\"\n)\n\nvar testClient *Client\n\nfunc init() {\n var err error\n testClient, err = DialOne(\"localhost:8087\", \"testClient\")\n if err != nil {\n fmt.Printf(\"Couldn't connect to Riak: %s\\n\", err)\n os.Exit(1) \n }\n}\n\ntype TestObject struct {\n\tData []byte\n\tinfo *Info\n}\n\nfunc (t *TestObject) Unmarshal(b []byte) error {\n\tt.Data = b\n\treturn nil\n}\n\nfunc (t *TestObject) Marshal() ([]byte, error) {\n\treturn t.Data, nil\n}\n\nfunc (t *TestObject) Info() *Info { return t.info }\n\nfunc (t *TestObject) NewEmpty() ObjectM { return &TestObject{nil, &Info{}} }\n\n\/\/ naive merge\nfunc (t *TestObject) Merge(o ObjectM) {\n\ttn := o.(*TestObject)\n\tif len(tn.Data) > len(t.Data) {\n\t\tt.Data = tn.Data\n\t}\n}\n\nfunc TestMultipleVclocks(t *testing.T) {\n\toba := &TestObject{\n\t\tData: []byte(\"Body 1\"),\n\t\tinfo: &Info{},\n\t}\n\n\tobb := &TestObject{\n\t\tData: []byte(\"Body 2...\"),\n\t\tinfo: &Info{},\n\t}\n\n\t\/\/ manually create conflict - a user can't ordinarily do this\n\toba.Info().bucket, oba.Info().key = []byte(\"testbucket\"), []byte(\"conflict\")\n\tobb.Info().bucket, obb.Info().key = []byte(\"testbucket\"), []byte(\"conflict\")\n\n\tcl := testClient\n\n\t\/\/ The store operations should not error,\n\t\/\/ because we are doing a fetch and merge\n\t\/\/ when we detect multiple responses on\n\t\/\/ Store()\n\terr := cl.Store(obb, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = cl.Store(oba, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Since our Merge() function takes the longer of the\n\t\/\/ two Data fields, the body should always be \"Body 2...\"\n\terr = cl.Fetch(oba, \"testbucket\", \"conflict\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !bytes.Equal(oba.Data, []byte(\"Body 2...\")) {\n\t\tt.Errorf(\"Data should be %q; got %q\", \"Body 2...\", oba.Data)\n\t}\n}\n\nfunc TestFetchNotFound(t *testing.T) {\n\tcl := testClient\n\tob := &TestObject{}\n\n\terr := cl.Fetch(ob, \"anybucket\", \"dne\", nil)\n\tif err == nil {\n\t\tt.Error(\"'err' should not be nil\")\n\t}\n\tif err != ErrNotFound {\n\t\tt.Errorf(\"err is not ErrNotFound: %q\", err)\n\t}\n}\n\nfunc TestUpdate(t *testing.T) {\n\tcl := testClient\n\n\ttest := cl.Bucket(\"testbucket\")\n\n\tlb := &TestObject{\n\t\tData: []byte(\"flibbertyibbitygibbit\"),\n\t\tinfo: &Info{},\n\t}\n\n\terr := test.New(lb, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tnewlb := &TestObject{\n\t\tinfo: &Info{},\n\t}\n\n\terr = test.Fetch(newlb, lb.Info().Key())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !bytes.Equal(newlb.Data, lb.Data) {\n\t\tt.Logf(\"Object 1 data: %q\", lb.Data)\n\t\tt.Logf(\"Object 2 data: %q\", newlb.Data)\n\t\tt.Errorf(\"Objects don't have the same body\")\n\t}\n\n\t\/\/ make a modification\n\tnewlb.Data = []byte(\"new data.\")\n\terr = test.Push(newlb)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ this should return true\n\tupd, err := test.Update(lb)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !upd {\n\t\tt.Error(\"Object was not updated.\")\n\t}\n\n\tif !bytes.Equal(lb.Data, newlb.Data) {\n\t\tt.Error(\"Objects are not equal after update.\")\n\t}\n\n\t\/\/ this should return false\n\tupd, err = test.Update(newlb)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif upd {\n\t\tt.Error(\"Object was spuriously updated...?\")\n\t}\n}\n<commit_msg>FetchHead test<commit_after>\/\/ +build riak\n\npackage rkive\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\t\"os\"\n\t\"fmt\"\n)\n\nvar testClient *Client\n\nfunc init() {\n var err error\n testClient, err = DialOne(\"localhost:8087\", \"testClient\")\n if err != nil {\n fmt.Printf(\"Couldn't connect to Riak: %s\\n\", err)\n os.Exit(1) \n }\n}\n\ntype TestObject struct {\n\tData []byte\n\tinfo *Info\n}\n\nfunc (t *TestObject) Unmarshal(b []byte) error {\n\tt.Data = b\n\treturn nil\n}\n\nfunc (t *TestObject) Marshal() ([]byte, error) {\n\treturn t.Data, nil\n}\n\nfunc (t *TestObject) Info() *Info { return t.info }\n\nfunc (t *TestObject) NewEmpty() ObjectM { return &TestObject{nil, &Info{}} }\n\n\/\/ naive merge\nfunc (t *TestObject) Merge(o ObjectM) {\n\ttn := o.(*TestObject)\n\tif len(tn.Data) > len(t.Data) {\n\t\tt.Data = tn.Data\n\t}\n}\n\nfunc TestMultipleVclocks(t *testing.T) {\n\toba := &TestObject{\n\t\tData: []byte(\"Body 1\"),\n\t\tinfo: &Info{},\n\t}\n\n\tobb := &TestObject{\n\t\tData: []byte(\"Body 2...\"),\n\t\tinfo: &Info{},\n\t}\n\n\t\/\/ manually create conflict - a user can't ordinarily do this\n\toba.Info().bucket, oba.Info().key = []byte(\"testbucket\"), []byte(\"conflict\")\n\tobb.Info().bucket, obb.Info().key = []byte(\"testbucket\"), []byte(\"conflict\")\n\n\tcl := testClient\n\n\t\/\/ The store operations should not error,\n\t\/\/ because we are doing a fetch and merge\n\t\/\/ when we detect multiple responses on\n\t\/\/ Store()\n\terr := cl.Store(obb, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = cl.Store(oba, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Since our Merge() function takes the longer of the\n\t\/\/ two Data fields, the body should always be \"Body 2...\"\n\terr = cl.Fetch(oba, \"testbucket\", \"conflict\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !bytes.Equal(oba.Data, []byte(\"Body 2...\")) {\n\t\tt.Errorf(\"Data should be %q; got %q\", \"Body 2...\", oba.Data)\n\t}\n}\n\nfunc TestFetchNotFound(t *testing.T) {\n\tcl := testClient\n\tob := &TestObject{}\n\n\terr := cl.Fetch(ob, \"anybucket\", \"dne\", nil)\n\tif err == nil {\n\t\tt.Error(\"'err' should not be nil\")\n\t}\n\tif err != ErrNotFound {\n\t\tt.Errorf(\"err is not ErrNotFound: %q\", err)\n\t}\n}\n\nfunc TestUpdate(t *testing.T) {\n\tcl := testClient\n\n\ttest := cl.Bucket(\"testbucket\")\n\n\tlb := &TestObject{\n\t\tData: []byte(\"flibbertyibbitygibbit\"),\n\t\tinfo: &Info{},\n\t}\n\n\terr := test.New(lb, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tnewlb := &TestObject{\n\t\tinfo: &Info{},\n\t}\n\n\terr = test.Fetch(newlb, lb.Info().Key())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !bytes.Equal(newlb.Data, lb.Data) {\n\t\tt.Logf(\"Object 1 data: %q\", lb.Data)\n\t\tt.Logf(\"Object 2 data: %q\", newlb.Data)\n\t\tt.Errorf(\"Objects don't have the same body\")\n\t}\n\n\t\/\/ make a modification\n\tnewlb.Data = []byte(\"new data.\")\n\terr = test.Push(newlb)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ this should return true\n\tupd, err := test.Update(lb)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !upd {\n\t\tt.Error(\"Object was not updated.\")\n\t}\n\n\tif !bytes.Equal(lb.Data, newlb.Data) {\n\t\tt.Error(\"Objects are not equal after update.\")\n\t}\n\n\t\/\/ this should return false\n\tupd, err = test.Update(newlb)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif upd {\n\t\tt.Error(\"Object was spuriously updated...?\")\n\t}\n}\n\nfunc TestHead(t *testing.T) {\n t.Parallel()\n cl := testClient\n \n tests := cl.Bucket(\"testbucket\")\n \n ob := &TestObject{\n info: &Info{},\n Data: []byte(\"exists.\"),\n }\n \n err := tests.New(ob, nil)\n if err != nil {\n t.Fatal(err)\n }\n \n \/\/ fetch head exists\n var info *Info\n info, err = cl.FetchHead(\"testbucket\", ob.Info().Key())\n if err != nil {\n t.Fatal(err)\n }\n \n if !bytes.Equal(info.vclock, ob.info.vclock) {\n t.Errorf(\"vclocks not equal: %q and %q\", info.vclock, ob.info.vclock) \n }\n \n \/\/ fetch dne\n _, err = cl.FetchHead(\"testbucket\", \"dne\")\n if err != ErrNotFound {\n t.Errorf(\"expected ErrNotFound, got: %q\", err) \n }\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ (c) 2019-2020, Ava Labs, Inc. All rights reserved.\n\/\/ See the file LICENSE for licensing terms.\n\npackage platformvm\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/ava-labs\/avalanchego\/vms\/components\/avax\"\n\n\tsafemath \"github.com\/ava-labs\/avalanchego\/utils\/math\"\n)\n\nvar (\n\t_ UnsignedProposalTx = &UnsignedAdvanceTimeTx{}\n)\n\n\/\/ UnsignedAdvanceTimeTx is a transaction to increase the chain's timestamp.\n\/\/ When the chain's timestamp is updated (a AdvanceTimeTx is accepted and\n\/\/ followed by a commit block) the staker set is also updated accordingly.\n\/\/ It must be that:\n\/\/ * proposed timestamp > [current chain time]\n\/\/ * proposed timestamp <= [time for next staker to be removed]\ntype UnsignedAdvanceTimeTx struct {\n\tavax.Metadata\n\n\t\/\/ Unix time this block proposes increasing the timestamp to\n\tTime uint64 `serialize:\"true\" json:\"time\"`\n}\n\n\/\/ Timestamp returns the time this block is proposing the chain should be set to\nfunc (tx *UnsignedAdvanceTimeTx) Timestamp() time.Time {\n\treturn time.Unix(int64(tx.Time), 0)\n}\n\n\/\/ SemanticVerify this transaction is valid.\nfunc (tx *UnsignedAdvanceTimeTx) SemanticVerify(\n\tvm *VM,\n\tparentState MutableState,\n\tstx *Tx,\n) (\n\tVersionedState,\n\tVersionedState,\n\tfunc() error,\n\tfunc() error,\n\tTxError,\n) {\n\tswitch {\n\tcase tx == nil:\n\t\treturn nil, nil, nil, nil, tempError{errNilTx}\n\tcase len(stx.Creds) != 0:\n\t\treturn nil, nil, nil, nil, permError{errWrongNumberOfCredentials}\n\t}\n\n\ttimestamp := tx.Timestamp()\n\tlocalTimestamp := vm.clock.Time()\n\tif localTimestamp.Add(syncBound).Before(timestamp) {\n\t\treturn nil, nil, nil, nil, tempError{\n\t\t\tfmt.Errorf(\n\t\t\t\t\"proposed time (%s) is too far in the future relative to local time (%s)\",\n\t\t\t\ttimestamp,\n\t\t\t\tlocalTimestamp,\n\t\t\t),\n\t\t}\n\t}\n\n\tif currentTimestamp := parentState.GetTimestamp(); !timestamp.After(currentTimestamp) {\n\t\treturn nil, nil, nil, nil, permError{\n\t\t\tfmt.Errorf(\n\t\t\t\t\"proposed timestamp (%s), not after current timestamp (%s)\",\n\t\t\t\ttimestamp,\n\t\t\t\tcurrentTimestamp,\n\t\t\t),\n\t\t}\n\t}\n\n\t\/\/ Only allow timestamp to move forward as far as the time of next staker\n\t\/\/ set change time\n\tnextStakerChangeTime, err := vm.nextStakerChangeTime(parentState)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, tempError{err}\n\t}\n\n\tif timestamp.After(nextStakerChangeTime) {\n\t\treturn nil, nil, nil, nil, permError{\n\t\t\tfmt.Errorf(\n\t\t\t\t\"proposed timestamp (%s) later than next staker change time (%s)\",\n\t\t\t\ttimestamp,\n\t\t\t\tnextStakerChangeTime,\n\t\t\t),\n\t\t}\n\t}\n\n\tcurrentSupply := parentState.GetCurrentSupply()\n\n\tpendingStakers := parentState.PendingStakerChainState()\n\ttoAddValidatorsWithRewardToCurrent := []*validatorReward(nil)\n\ttoAddDelegatorsWithRewardToCurrent := []*validatorReward(nil)\n\ttoAddWithoutRewardToCurrent := []*Tx(nil)\n\tnumToRemoveFromPending := 0\n\n\t\/\/ Add to the staker set any pending stakers whose start time is at or\n\t\/\/ before the new timestamp. [pendingStakers.Stakers()] is sorted in order\n\t\/\/ of increasing startTime\npendingStakerLoop:\n\tfor _, tx := range pendingStakers.Stakers() {\n\t\tswitch staker := tx.UnsignedTx.(type) {\n\t\tcase *UnsignedAddDelegatorTx:\n\t\t\tif staker.StartTime().After(timestamp) {\n\t\t\t\tbreak pendingStakerLoop\n\t\t\t}\n\n\t\t\tr := reward(\n\t\t\t\tstaker.Validator.Duration(),\n\t\t\t\tstaker.Validator.Wght,\n\t\t\t\tcurrentSupply,\n\t\t\t\tvm.StakeMintingPeriod,\n\t\t\t)\n\t\t\tcurrentSupply, err = safemath.Add64(currentSupply, r)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, nil, permError{err}\n\t\t\t}\n\n\t\t\ttoAddDelegatorsWithRewardToCurrent = append(toAddDelegatorsWithRewardToCurrent, &validatorReward{\n\t\t\t\taddStakerTx: tx,\n\t\t\t\tpotentialReward: r,\n\t\t\t})\n\t\t\tnumToRemoveFromPending++\n\t\tcase *UnsignedAddValidatorTx:\n\t\t\tif staker.StartTime().After(timestamp) {\n\t\t\t\tbreak pendingStakerLoop\n\t\t\t}\n\n\t\t\tr := reward(\n\t\t\t\tstaker.Validator.Duration(),\n\t\t\t\tstaker.Validator.Wght,\n\t\t\t\tcurrentSupply,\n\t\t\t\tvm.StakeMintingPeriod,\n\t\t\t)\n\t\t\tcurrentSupply, err = safemath.Add64(currentSupply, r)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, nil, permError{err}\n\t\t\t}\n\n\t\t\ttoAddValidatorsWithRewardToCurrent = append(toAddValidatorsWithRewardToCurrent, &validatorReward{\n\t\t\t\taddStakerTx: tx,\n\t\t\t\tpotentialReward: r,\n\t\t\t})\n\t\t\tnumToRemoveFromPending++\n\t\tcase *UnsignedAddSubnetValidatorTx:\n\t\t\tif staker.StartTime().After(timestamp) {\n\t\t\t\tbreak pendingStakerLoop\n\t\t\t}\n\n\t\t\t\/\/ If this staker should already be removed, then we should just\n\t\t\t\/\/ never add them.\n\t\t\tif staker.EndTime().After(timestamp) {\n\t\t\t\ttoAddWithoutRewardToCurrent = append(toAddWithoutRewardToCurrent, tx)\n\t\t\t}\n\t\t\tnumToRemoveFromPending++\n\t\tdefault:\n\t\t\treturn nil, nil, nil, nil, permError{\n\t\t\t\tfmt.Errorf(\"expected validator but got %T\", tx.UnsignedTx),\n\t\t\t}\n\t\t}\n\t}\n\tnewlyPendingStakers := pendingStakers.DeleteStakers(numToRemoveFromPending)\n\n\tcurrentStakers := parentState.CurrentStakerChainState()\n\tnumToRemoveFromCurrent := 0\n\n\t\/\/ Remove from the staker set any subnet validators whose endTime is at or\n\t\/\/ before the new timestamp\ncurrentStakerLoop:\n\tfor _, tx := range currentStakers.Stakers() {\n\t\tswitch staker := tx.UnsignedTx.(type) {\n\t\tcase *UnsignedAddSubnetValidatorTx:\n\t\t\tif staker.EndTime().After(timestamp) {\n\t\t\t\tbreak currentStakerLoop\n\t\t\t}\n\n\t\t\tnumToRemoveFromCurrent++\n\t\tcase *UnsignedAddValidatorTx, *UnsignedAddDelegatorTx:\n\t\t\t\/\/ We shouldn't be removing any primary network validators here\n\t\t\tbreak currentStakerLoop\n\t\tdefault:\n\t\t\treturn nil, nil, nil, nil, permError{errWrongTxType}\n\t\t}\n\t}\n\tnewlyCurrentStakers, err := currentStakers.UpdateStakers(\n\t\ttoAddValidatorsWithRewardToCurrent,\n\t\ttoAddDelegatorsWithRewardToCurrent,\n\t\ttoAddWithoutRewardToCurrent,\n\t\tnumToRemoveFromCurrent,\n\t)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, tempError{err}\n\t}\n\n\tonCommitState := newVersionedState(parentState, newlyCurrentStakers, newlyPendingStakers)\n\tonCommitState.SetTimestamp(timestamp)\n\tonCommitState.SetCurrentSupply(currentSupply)\n\n\t\/\/ State doesn't change if this proposal is aborted\n\tonAbortState := newVersionedState(parentState, currentStakers, pendingStakers)\n\n\t\/\/ If this block is committed, update the validator sets.\n\t\/\/ onCommitDB will be committed to vm.DB before this is called.\n\tonCommitFunc := func() error {\n\t\t\/\/ For each Subnet, update the node's validator manager to reflect\n\t\t\/\/ current Subnet membership\n\t\treturn vm.updateValidators(false)\n\t}\n\n\treturn onCommitState, onAbortState, onCommitFunc, nil, nil\n}\n\n\/\/ InitiallyPrefersCommit returns true if the proposed time is at\n\/\/ or before the current time plus the synchrony bound\nfunc (tx *UnsignedAdvanceTimeTx) InitiallyPrefersCommit(vm *VM) bool {\n\treturn !tx.Timestamp().After(vm.clock.Time().Add(syncBound))\n}\n\n\/\/ newAdvanceTimeTx creates a new tx that, if it is accepted and followed by a\n\/\/ Commit block, will set the chain's timestamp to [timestamp].\nfunc (vm *VM) newAdvanceTimeTx(timestamp time.Time) (*Tx, error) {\n\ttx := &Tx{UnsignedTx: &UnsignedAdvanceTimeTx{\n\t\tTime: uint64(timestamp.Unix()),\n\t}}\n\treturn tx, tx.Sign(vm.codec, nil)\n}\n<commit_msg>fix advance timestamp tx comment<commit_after>\/\/ (c) 2019-2020, Ava Labs, Inc. All rights reserved.\n\/\/ See the file LICENSE for licensing terms.\n\npackage platformvm\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/ava-labs\/avalanchego\/vms\/components\/avax\"\n\n\tsafemath \"github.com\/ava-labs\/avalanchego\/utils\/math\"\n)\n\nvar (\n\t_ UnsignedProposalTx = &UnsignedAdvanceTimeTx{}\n)\n\n\/\/ UnsignedAdvanceTimeTx is a transaction to increase the chain's timestamp.\n\/\/ When the chain's timestamp is updated (a AdvanceTimeTx is accepted and\n\/\/ followed by a commit block) the staker set is also updated accordingly.\n\/\/ It must be that:\n\/\/ * proposed timestamp > [current chain time]\n\/\/ * proposed timestamp <= [time for next staker set change]\ntype UnsignedAdvanceTimeTx struct {\n\tavax.Metadata\n\n\t\/\/ Unix time this block proposes increasing the timestamp to\n\tTime uint64 `serialize:\"true\" json:\"time\"`\n}\n\n\/\/ Timestamp returns the time this block is proposing the chain should be set to\nfunc (tx *UnsignedAdvanceTimeTx) Timestamp() time.Time {\n\treturn time.Unix(int64(tx.Time), 0)\n}\n\n\/\/ SemanticVerify this transaction is valid.\nfunc (tx *UnsignedAdvanceTimeTx) SemanticVerify(\n\tvm *VM,\n\tparentState MutableState,\n\tstx *Tx,\n) (\n\tVersionedState,\n\tVersionedState,\n\tfunc() error,\n\tfunc() error,\n\tTxError,\n) {\n\tswitch {\n\tcase tx == nil:\n\t\treturn nil, nil, nil, nil, tempError{errNilTx}\n\tcase len(stx.Creds) != 0:\n\t\treturn nil, nil, nil, nil, permError{errWrongNumberOfCredentials}\n\t}\n\n\ttimestamp := tx.Timestamp()\n\tlocalTimestamp := vm.clock.Time()\n\tif localTimestamp.Add(syncBound).Before(timestamp) {\n\t\treturn nil, nil, nil, nil, tempError{\n\t\t\tfmt.Errorf(\n\t\t\t\t\"proposed time (%s) is too far in the future relative to local time (%s)\",\n\t\t\t\ttimestamp,\n\t\t\t\tlocalTimestamp,\n\t\t\t),\n\t\t}\n\t}\n\n\tif currentTimestamp := parentState.GetTimestamp(); !timestamp.After(currentTimestamp) {\n\t\treturn nil, nil, nil, nil, permError{\n\t\t\tfmt.Errorf(\n\t\t\t\t\"proposed timestamp (%s), not after current timestamp (%s)\",\n\t\t\t\ttimestamp,\n\t\t\t\tcurrentTimestamp,\n\t\t\t),\n\t\t}\n\t}\n\n\t\/\/ Only allow timestamp to move forward as far as the time of next staker\n\t\/\/ set change time\n\tnextStakerChangeTime, err := vm.nextStakerChangeTime(parentState)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, tempError{err}\n\t}\n\n\tif timestamp.After(nextStakerChangeTime) {\n\t\treturn nil, nil, nil, nil, permError{\n\t\t\tfmt.Errorf(\n\t\t\t\t\"proposed timestamp (%s) later than next staker change time (%s)\",\n\t\t\t\ttimestamp,\n\t\t\t\tnextStakerChangeTime,\n\t\t\t),\n\t\t}\n\t}\n\n\tcurrentSupply := parentState.GetCurrentSupply()\n\n\tpendingStakers := parentState.PendingStakerChainState()\n\ttoAddValidatorsWithRewardToCurrent := []*validatorReward(nil)\n\ttoAddDelegatorsWithRewardToCurrent := []*validatorReward(nil)\n\ttoAddWithoutRewardToCurrent := []*Tx(nil)\n\tnumToRemoveFromPending := 0\n\n\t\/\/ Add to the staker set any pending stakers whose start time is at or\n\t\/\/ before the new timestamp. [pendingStakers.Stakers()] is sorted in order\n\t\/\/ of increasing startTime\npendingStakerLoop:\n\tfor _, tx := range pendingStakers.Stakers() {\n\t\tswitch staker := tx.UnsignedTx.(type) {\n\t\tcase *UnsignedAddDelegatorTx:\n\t\t\tif staker.StartTime().After(timestamp) {\n\t\t\t\tbreak pendingStakerLoop\n\t\t\t}\n\n\t\t\tr := reward(\n\t\t\t\tstaker.Validator.Duration(),\n\t\t\t\tstaker.Validator.Wght,\n\t\t\t\tcurrentSupply,\n\t\t\t\tvm.StakeMintingPeriod,\n\t\t\t)\n\t\t\tcurrentSupply, err = safemath.Add64(currentSupply, r)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, nil, permError{err}\n\t\t\t}\n\n\t\t\ttoAddDelegatorsWithRewardToCurrent = append(toAddDelegatorsWithRewardToCurrent, &validatorReward{\n\t\t\t\taddStakerTx: tx,\n\t\t\t\tpotentialReward: r,\n\t\t\t})\n\t\t\tnumToRemoveFromPending++\n\t\tcase *UnsignedAddValidatorTx:\n\t\t\tif staker.StartTime().After(timestamp) {\n\t\t\t\tbreak pendingStakerLoop\n\t\t\t}\n\n\t\t\tr := reward(\n\t\t\t\tstaker.Validator.Duration(),\n\t\t\t\tstaker.Validator.Wght,\n\t\t\t\tcurrentSupply,\n\t\t\t\tvm.StakeMintingPeriod,\n\t\t\t)\n\t\t\tcurrentSupply, err = safemath.Add64(currentSupply, r)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, nil, permError{err}\n\t\t\t}\n\n\t\t\ttoAddValidatorsWithRewardToCurrent = append(toAddValidatorsWithRewardToCurrent, &validatorReward{\n\t\t\t\taddStakerTx: tx,\n\t\t\t\tpotentialReward: r,\n\t\t\t})\n\t\t\tnumToRemoveFromPending++\n\t\tcase *UnsignedAddSubnetValidatorTx:\n\t\t\tif staker.StartTime().After(timestamp) {\n\t\t\t\tbreak pendingStakerLoop\n\t\t\t}\n\n\t\t\t\/\/ If this staker should already be removed, then we should just\n\t\t\t\/\/ never add them.\n\t\t\tif staker.EndTime().After(timestamp) {\n\t\t\t\ttoAddWithoutRewardToCurrent = append(toAddWithoutRewardToCurrent, tx)\n\t\t\t}\n\t\t\tnumToRemoveFromPending++\n\t\tdefault:\n\t\t\treturn nil, nil, nil, nil, permError{\n\t\t\t\tfmt.Errorf(\"expected validator but got %T\", tx.UnsignedTx),\n\t\t\t}\n\t\t}\n\t}\n\tnewlyPendingStakers := pendingStakers.DeleteStakers(numToRemoveFromPending)\n\n\tcurrentStakers := parentState.CurrentStakerChainState()\n\tnumToRemoveFromCurrent := 0\n\n\t\/\/ Remove from the staker set any subnet validators whose endTime is at or\n\t\/\/ before the new timestamp\ncurrentStakerLoop:\n\tfor _, tx := range currentStakers.Stakers() {\n\t\tswitch staker := tx.UnsignedTx.(type) {\n\t\tcase *UnsignedAddSubnetValidatorTx:\n\t\t\tif staker.EndTime().After(timestamp) {\n\t\t\t\tbreak currentStakerLoop\n\t\t\t}\n\n\t\t\tnumToRemoveFromCurrent++\n\t\tcase *UnsignedAddValidatorTx, *UnsignedAddDelegatorTx:\n\t\t\t\/\/ We shouldn't be removing any primary network validators here\n\t\t\tbreak currentStakerLoop\n\t\tdefault:\n\t\t\treturn nil, nil, nil, nil, permError{errWrongTxType}\n\t\t}\n\t}\n\tnewlyCurrentStakers, err := currentStakers.UpdateStakers(\n\t\ttoAddValidatorsWithRewardToCurrent,\n\t\ttoAddDelegatorsWithRewardToCurrent,\n\t\ttoAddWithoutRewardToCurrent,\n\t\tnumToRemoveFromCurrent,\n\t)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, tempError{err}\n\t}\n\n\tonCommitState := newVersionedState(parentState, newlyCurrentStakers, newlyPendingStakers)\n\tonCommitState.SetTimestamp(timestamp)\n\tonCommitState.SetCurrentSupply(currentSupply)\n\n\t\/\/ State doesn't change if this proposal is aborted\n\tonAbortState := newVersionedState(parentState, currentStakers, pendingStakers)\n\n\t\/\/ If this block is committed, update the validator sets.\n\t\/\/ onCommitDB will be committed to vm.DB before this is called.\n\tonCommitFunc := func() error {\n\t\t\/\/ For each Subnet, update the node's validator manager to reflect\n\t\t\/\/ current Subnet membership\n\t\treturn vm.updateValidators(false)\n\t}\n\n\treturn onCommitState, onAbortState, onCommitFunc, nil, nil\n}\n\n\/\/ InitiallyPrefersCommit returns true if the proposed time is at\n\/\/ or before the current time plus the synchrony bound\nfunc (tx *UnsignedAdvanceTimeTx) InitiallyPrefersCommit(vm *VM) bool {\n\treturn !tx.Timestamp().After(vm.clock.Time().Add(syncBound))\n}\n\n\/\/ newAdvanceTimeTx creates a new tx that, if it is accepted and followed by a\n\/\/ Commit block, will set the chain's timestamp to [timestamp].\nfunc (vm *VM) newAdvanceTimeTx(timestamp time.Time) (*Tx, error) {\n\ttx := &Tx{UnsignedTx: &UnsignedAdvanceTimeTx{\n\t\tTime: uint64(timestamp.Unix()),\n\t}}\n\treturn tx, tx.Sign(vm.codec, nil)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ (c) 2019-2020, Ava Labs, Inc. All rights reserved.\n\/\/ See the file LICENSE for licensing terms.\n\npackage platformvm\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com\/ava-labs\/gecko\/database\"\n\t\"github.com\/ava-labs\/gecko\/ids\"\n\t\"github.com\/ava-labs\/gecko\/utils\/crypto\"\n\t\"github.com\/ava-labs\/gecko\/utils\/hashing\"\n\t\"github.com\/ava-labs\/gecko\/vms\/components\/verify\"\n)\n\nvar (\n\terrInvalidVMID = errors.New(\"invalid VM ID\")\n\terrFxIDsNotSortedAndUnique = errors.New(\"feature extensions IDs must be sorted and unique\")\n\terrControlSigsNotSortedAndUnique = errors.New(\"control signatures must be sorted and unique\")\n)\n\n\/\/ UnsignedCreateChainTx is an unsigned CreateChainTx\ntype UnsignedCreateChainTx struct {\n\tvm *VM\n\n\t\/\/ Metadata, inputs and outputs\n\tCommonTx `serialize:\"true\"`\n\n\t\/\/ ID of the Subnet that validates this blockchain\n\tSubnetID ids.ID `serialize:\"true\"`\n\n\t\/\/ A human readable name for the chain; need not be unique\n\tChainName string `serialize:\"true\"`\n\n\t\/\/ ID of the VM running on the new chain\n\tVMID ids.ID `serialize:\"true\"`\n\n\t\/\/ IDs of the feature extensions running on the new chain\n\tFxIDs []ids.ID `serialize:\"true\"`\n\n\t\/\/ Byte representation of genesis state of the new chain\n\tGenesisData []byte `serialize:\"true\"`\n}\n\n\/\/ UnsignedBytes returns the byte representation of this unsigned tx\nfunc (tx *UnsignedCreateChainTx) UnsignedBytes() []byte {\n\treturn tx.unsignedBytes\n}\n\n\/\/ CreateChainTx is a proposal to create a chain\ntype CreateChainTx struct {\n\tUnsignedCreateChainTx `serialize:\"true\"`\n\n\t\/\/ Credentials that authorize the inputs to spend the corresponding outputs\n\tCreds []verify.Verifiable `serialize:\"true\"`\n\n\t\/\/ Signatures from Subnet's control keys\n\t\/\/ Should not empty slice, not nil, if there are no control sigs\n\tControlSigs [][crypto.SECP256K1RSigLen]byte `serialize:\"true\"`\n}\n\nfunc (tx *CreateChainTx) initialize(vm *VM) error {\n\ttx.vm = vm\n\tvar err error\n\ttx.unsignedBytes, err = Codec.Marshal(interface{}(tx.UnsignedCreateChainTx))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't marshal UnsignedCreateChainTx: %w\", err)\n\t}\n\ttx.bytes, err = Codec.Marshal(tx) \/\/ byte representation of the signed transaction\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't marshal CreateChainTx: %w\", err)\n\t}\n\ttx.id = ids.NewID(hashing.ComputeHash256Array(tx.bytes))\n\treturn err\n}\n\n\/\/ ID of this transaction\nfunc (tx *CreateChainTx) ID() ids.ID { return tx.id }\n\n\/\/ Bytes returns the byte representation of a CreateChainTx\nfunc (tx *CreateChainTx) Bytes() []byte { return tx.bytes }\n\n\/\/ SyntacticVerify this transaction is well-formed\n\/\/ Also populates [tx.Key] with the public key that signed this transaction\n\/\/ TODO: Verify only once\nfunc (tx *CreateChainTx) SyntacticVerify() error {\n\tswitch {\n\tcase tx == nil:\n\t\treturn errNilTx\n\tcase tx.NetworkID != tx.vm.Ctx.NetworkID: \/\/ verify the transaction is on this network\n\t\treturn errWrongNetworkID\n\tcase tx.id.IsZero():\n\t\treturn errInvalidID\n\tcase tx.VMID.IsZero():\n\t\treturn errInvalidVMID\n\tcase tx.SubnetID.Equals(DefaultSubnetID):\n\t\treturn errDSCantValidate\n\tcase !ids.IsSortedAndUniqueIDs(tx.FxIDs):\n\t\treturn errFxIDsNotSortedAndUnique\n\tcase !crypto.IsSortedAndUniqueSECP2561RSigs(tx.ControlSigs):\n\t\treturn errControlSigsNotSortedAndUnique\n\t}\n\n\tif err := syntacticVerifySpend(tx.Ins, tx.Outs, tx.vm.txFee, tx.vm.avaxAssetID); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ SemanticVerify this transaction is valid.\nfunc (tx *CreateChainTx) SemanticVerify(db database.Database) (func(), error) {\n\tif err := tx.SyntacticVerify(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcurrentChains, err := tx.vm.getChains(db) \/\/ chains that currently exist\n\tif err != nil {\n\t\treturn nil, errDBChains\n\t}\n\tfor _, chain := range currentChains {\n\t\tif chain.ID().Equals(tx.ID()) {\n\t\t\treturn nil, fmt.Errorf(\"chain with ID %s already exists\", chain.ID())\n\t\t}\n\t}\n\tcurrentChains = append(currentChains, tx) \/\/ add this new chain\n\tif err := tx.vm.putChains(db, currentChains); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/* TODO deduct fees\n\t\/\/ Deduct tx fee from payer's account\n\taccount, err := tx.vm.getAccount(db, tx.PayerAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ txFee is removed in account.Remove\n\t\/\/ TODO: Consider changing Remove to be parameterized on total amount (inc. tx fee) to remove\n\taccount, err = account.Remove(0, tx.Nonce)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := tx.vm.putAccount(db, account); err != nil {\n\t\treturn nil, err\n\t}\n\t*\/\n\n\t\/\/ Verify that this transaction has sufficient control signatures\n\tsubnets, err := tx.vm.getSubnets(db) \/\/ all subnets that exist\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar subnet *CreateSubnetTx \/\/ the subnet that will validate the new chain\n\tfor _, sn := range subnets {\n\t\tif sn.id.Equals(tx.SubnetID) {\n\t\t\tsubnet = sn\n\t\t\tbreak\n\t\t}\n\t}\n\tif subnet == nil {\n\t\treturn nil, fmt.Errorf(\"there is no subnet with ID %s\", tx.SubnetID)\n\t}\n\tif len(tx.ControlSigs) != int(subnet.Threshold) {\n\t\treturn nil, fmt.Errorf(\"expected tx to have %d control sigs but has %d\", subnet.Threshold, len(tx.ControlSigs))\n\t}\n\n\tunsignedIntf := interface{}(&tx.UnsignedCreateChainTx)\n\tunsignedBytes, err := Codec.Marshal(&unsignedIntf) \/\/ Byte representation of the unsigned transaction\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tunsignedBytesHash := hashing.ComputeHash256(unsignedBytes)\n\n\t\/\/ Each element is ID of key that signed this tx\n\tcontrolIDs := make([]ids.ShortID, len(tx.ControlSigs))\n\tfor i, sig := range tx.ControlSigs {\n\t\tkey, err := tx.vm.factory.RecoverHashPublicKey(unsignedBytesHash, sig[:])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcontrolIDs[i] = key.Address()\n\t}\n\n\t\/\/ Verify each control signature on this tx is from a control key\n\tcontrolKeys := ids.ShortSet{}\n\tcontrolKeys.Add(subnet.ControlKeys...)\n\tfor _, controlID := range controlIDs {\n\t\tif !controlKeys.Contains(controlID) {\n\t\t\treturn nil, errors.New(\"tx has control signature from key not in subnet's ControlKeys\")\n\t\t}\n\t}\n\n\t\/\/ If this proposal is committed and this node is a member of the\n\t\/\/ subnet that validates the blockchain, create the blockchain\n\tonAccept := func() {\n\t\ttx.vm.createChain(tx)\n\t}\n\n\treturn onAccept, nil\n}\n\n\/\/ We use this type so we can serialize a list of *CreateChainTx\n\/\/ by defining a Bytes method on it\ntype createChainList []*CreateChainTx\n\n\/\/ Bytes returns the byte representation of a list of *CreateChainTx\nfunc (chains createChainList) Bytes() []byte {\n\tbytes, _ := Codec.Marshal(chains)\n\treturn bytes\n}\n\n\/* TODO implement\nfunc (vm *VM) newCreateChainTx(nonce uint64, subnetID ids.ID, genesisData []byte,\n\tvmID ids.ID, fxIDs []ids.ID, chainName string, networkID uint32,\n\tcontrolKeys []*crypto.PrivateKeySECP256K1R,\n\tpayerKey *crypto.PrivateKeySECP256K1R) (*CreateChainTx, error) {\n\ttx := &CreateChainTx{\n\t\tUnsignedCreateChainTx: UnsignedCreateChainTx{\n\t\t\tNetworkID: networkID,\n\t\t\tSubnetID: subnetID,\n\t\t\tNonce: nonce,\n\t\t\tGenesisData: genesisData,\n\t\t\tVMID: vmID,\n\t\t\tFxIDs: fxIDs,\n\t\t\tChainName: chainName,\n\t\t},\n\t}\n\n\t\/\/ Generate byte repr. of unsigned transaction\n\tunsignedIntf := interface{}(&tx.UnsignedCreateChainTx)\n\tunsignedBytes, err := Codec.Marshal(&unsignedIntf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tunsignedBytesHash := hashing.ComputeHash256(unsignedBytes)\n\n\t\/\/ Sign the tx with control keys\n\ttx.ControlSigs = make([][crypto.SECP256K1RSigLen]byte, len(controlKeys))\n\tfor i, key := range controlKeys {\n\t\tsig, err := key.SignHash(unsignedBytesHash)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcopy(tx.ControlSigs[i][:], sig)\n\t}\n\n\t\/\/ Sort the control signatures\n\tcrypto.SortSECP2561RSigs(tx.ControlSigs)\n\n\t\/\/ Sign with the payer key\n\tpayerSig, err := payerKey.Sign(unsignedBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcopy(tx.PayerSig[:], payerSig)\n\n\treturn tx, tx.initialize(vm)\n}\n*\/\n<commit_msg>update CreateChainTx<commit_after>\/\/ (c) 2019-2020, Ava Labs, Inc. All rights reserved.\n\/\/ See the file LICENSE for licensing terms.\n\npackage platformvm\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com\/ava-labs\/gecko\/database\"\n\t\"github.com\/ava-labs\/gecko\/ids\"\n\t\"github.com\/ava-labs\/gecko\/utils\/crypto\"\n\t\"github.com\/ava-labs\/gecko\/utils\/hashing\"\n\t\"github.com\/ava-labs\/gecko\/vms\/components\/verify\"\n)\n\nvar (\n\terrInvalidVMID = errors.New(\"invalid VM ID\")\n\terrFxIDsNotSortedAndUnique = errors.New(\"feature extensions IDs must be sorted and unique\")\n\terrControlSigsNotSortedAndUnique = errors.New(\"control signatures must be sorted and unique\")\n)\n\n\/\/ UnsignedCreateChainTx is an unsigned CreateChainTx\ntype UnsignedCreateChainTx struct {\n\tvm *VM\n\n\t\/\/ Metadata, inputs and outputs\n\tCommonTx `serialize:\"true\"`\n\n\t\/\/ ID of the Subnet that validates this blockchain\n\tSubnetID ids.ID `serialize:\"true\"`\n\n\t\/\/ A human readable name for the chain; need not be unique\n\tChainName string `serialize:\"true\"`\n\n\t\/\/ ID of the VM running on the new chain\n\tVMID ids.ID `serialize:\"true\"`\n\n\t\/\/ IDs of the feature extensions running on the new chain\n\tFxIDs []ids.ID `serialize:\"true\"`\n\n\t\/\/ Byte representation of genesis state of the new chain\n\tGenesisData []byte `serialize:\"true\"`\n}\n\n\/\/ UnsignedBytes returns the byte representation of this unsigned tx\nfunc (tx *UnsignedCreateChainTx) UnsignedBytes() []byte {\n\treturn tx.unsignedBytes\n}\n\n\/\/ CreateChainTx is a proposal to create a chain\ntype CreateChainTx struct {\n\tUnsignedCreateChainTx `serialize:\"true\"`\n\n\t\/\/ Credentials that authorize the inputs to spend the corresponding outputs\n\tCreds []verify.Verifiable `serialize:\"true\"`\n\n\t\/\/ Signatures from Subnet's control keys\n\t\/\/ Should not empty slice, not nil, if there are no control sigs\n\tControlSigs [][crypto.SECP256K1RSigLen]byte `serialize:\"true\"`\n}\n\n\/\/ initialize [tx]\n\/\/ set tx.unsignedBytes, tx.bytes, tx.id\nfunc (tx *CreateChainTx) initialize(vm *VM) error {\n\ttx.vm = vm\n\tvar err error\n\ttx.unsignedBytes, err = Codec.Marshal(interface{}(tx.UnsignedCreateChainTx))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't marshal UnsignedCreateChainTx: %w\", err)\n\t}\n\ttx.bytes, err = Codec.Marshal(tx) \/\/ byte representation of the signed transaction\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't marshal CreateChainTx: %w\", err)\n\t}\n\ttx.id = ids.NewID(hashing.ComputeHash256Array(tx.bytes))\n\treturn err\n}\n\n\/\/ ID of this transaction\nfunc (tx *CreateChainTx) ID() ids.ID { return tx.id }\n\n\/\/ Bytes returns the byte representation of a CreateChainTx\nfunc (tx *CreateChainTx) Bytes() []byte { return tx.bytes }\n\n\/\/ SyntacticVerify this transaction is well-formed\n\/\/ Also populates [tx.Key] with the public key that signed this transaction\n\/\/ TODO: Verify only once\nfunc (tx *CreateChainTx) SyntacticVerify() error {\n\tswitch {\n\tcase tx == nil:\n\t\treturn errNilTx\n\tcase tx.NetworkID != tx.vm.Ctx.NetworkID: \/\/ verify the transaction is on this network\n\t\treturn errWrongNetworkID\n\tcase tx.id.IsZero():\n\t\treturn errInvalidID\n\tcase tx.VMID.IsZero():\n\t\treturn errInvalidVMID\n\tcase tx.SubnetID.Equals(DefaultSubnetID):\n\t\treturn errDSCantValidate\n\tcase !ids.IsSortedAndUniqueIDs(tx.FxIDs):\n\t\treturn errFxIDsNotSortedAndUnique\n\tcase !crypto.IsSortedAndUniqueSECP2561RSigs(tx.ControlSigs):\n\t\treturn errControlSigsNotSortedAndUnique\n\t}\n\n\tif err := syntacticVerifySpend(tx.Ins, tx.Outs, tx.vm.txFee, tx.vm.avaxAssetID); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ SemanticVerify this transaction is valid.\nfunc (tx *CreateChainTx) SemanticVerify(db database.Database) (func(), error) {\n\tif err := tx.SyntacticVerify(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcurrentChains, err := tx.vm.getChains(db) \/\/ chains that currently exist\n\tif err != nil {\n\t\treturn nil, errDBChains\n\t}\n\tfor _, chain := range currentChains {\n\t\tif chain.ID().Equals(tx.ID()) {\n\t\t\treturn nil, fmt.Errorf(\"chain %s already exists\", chain.ID())\n\t\t}\n\t}\n\tcurrentChains = append(currentChains, tx) \/\/ add this new chain\n\tif err := tx.vm.putChains(db, currentChains); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Update the UTXO set\n\tfor _, in := range tx.Ins {\n\t\tutxoID := in.InputID() \/\/ ID of the UTXO that [in] spends\n\t\tif err := tx.vm.removeUTXO(db, utxoID); err != nil {\n\t\t\treturn nil, tempError{fmt.Errorf(\"couldn't remove UTXO %s from UTXO set: %w\", utxoID, err)}\n\t\t}\n\t}\n\tfor _, out := range tx.Outs {\n\t\tif err := tx.vm.putUTXO(db, tx.ID(), out); err != nil {\n\t\t\treturn nil, tempError{fmt.Errorf(\"couldn't add UTXO to UTXO set: %w\", err)}\n\t\t}\n\t}\n\n\t\/\/ Verify that this transaction has sufficient control signatures\n\tsubnets, err := tx.vm.getSubnets(db) \/\/ all subnets that exist\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar subnet *CreateSubnetTx \/\/ the subnet that will validate the new chain\n\tfor _, sn := range subnets {\n\t\tif sn.id.Equals(tx.SubnetID) {\n\t\t\tsubnet = sn\n\t\t\tbreak\n\t\t}\n\t}\n\tif subnet == nil {\n\t\treturn nil, fmt.Errorf(\"subnet %s doesn't exist\", tx.SubnetID)\n\t} else if len(tx.ControlSigs) != int(subnet.Threshold) {\n\t\treturn nil, fmt.Errorf(\"expected tx to have %d control sigs but has %d\", subnet.Threshold, len(tx.ControlSigs))\n\t}\n\tunsignedBytesHash := hashing.ComputeHash256(tx.unsignedBytes)\n\t\/\/ Each element is ID of key that signed this tx\n\tcontrolIDs := make([]ids.ShortID, len(tx.ControlSigs))\n\tfor i, sig := range tx.ControlSigs {\n\t\tkey, err := tx.vm.factory.RecoverHashPublicKey(unsignedBytesHash, sig[:])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcontrolIDs[i] = key.Address()\n\t}\n\n\t\/\/ Verify each control signature on this tx is from a control key\n\tcontrolKeys := ids.ShortSet{}\n\tcontrolKeys.Add(subnet.ControlKeys...)\n\tfor _, controlID := range controlIDs {\n\t\tif !controlKeys.Contains(controlID) {\n\t\t\treturn nil, errors.New(\"tx has control signature from key not in subnet's ControlKeys\")\n\t\t}\n\t}\n\n\t\/\/ If this proposal is committed and this node is a member of the\n\t\/\/ subnet that validates the blockchain, create the blockchain\n\tonAccept := func() { tx.vm.createChain(tx) }\n\treturn onAccept, nil\n}\n\n\/\/ We use this type so we can serialize a list of *CreateChainTx\n\/\/ by defining a Bytes method on it\ntype createChainList []*CreateChainTx\n\n\/\/ Bytes returns the byte representation of a list of *CreateChainTx\nfunc (chains createChainList) Bytes() []byte {\n\tbytes, _ := Codec.Marshal(chains)\n\treturn bytes\n}\n\n\/* TODO implement\nfunc (vm *VM) newCreateChainTx(nonce uint64, subnetID ids.ID, genesisData []byte,\n\tvmID ids.ID, fxIDs []ids.ID, chainName string, networkID uint32,\n\tcontrolKeys []*crypto.PrivateKeySECP256K1R,\n\tpayerKey *crypto.PrivateKeySECP256K1R) (*CreateChainTx, error) {\n\ttx := &CreateChainTx{\n\t\tUnsignedCreateChainTx: UnsignedCreateChainTx{\n\t\t\tNetworkID: networkID,\n\t\t\tSubnetID: subnetID,\n\t\t\tNonce: nonce,\n\t\t\tGenesisData: genesisData,\n\t\t\tVMID: vmID,\n\t\t\tFxIDs: fxIDs,\n\t\t\tChainName: chainName,\n\t\t},\n\t}\n\n\t\/\/ Generate byte repr. of unsigned transaction\n\tunsignedIntf := interface{}(&tx.UnsignedCreateChainTx)\n\tunsignedBytes, err := Codec.Marshal(&unsignedIntf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tunsignedBytesHash := hashing.ComputeHash256(unsignedBytes)\n\n\t\/\/ Sign the tx with control keys\n\ttx.ControlSigs = make([][crypto.SECP256K1RSigLen]byte, len(controlKeys))\n\tfor i, key := range controlKeys {\n\t\tsig, err := key.SignHash(unsignedBytesHash)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcopy(tx.ControlSigs[i][:], sig)\n\t}\n\n\t\/\/ Sort the control signatures\n\tcrypto.SortSECP2561RSigs(tx.ControlSigs)\n\n\t\/\/ Sign with the payer key\n\tpayerSig, err := payerKey.Sign(unsignedBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcopy(tx.PayerSig[:], payerSig)\n\n\treturn tx, tx.initialize(vm)\n}\n*\/\n<|endoftext|>"} {"text":"<commit_before>package fields\n\nimport (\n\t\"github.com\/cjtoolkit\/form\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/*\nImplement:\n\tFormFieldInterface in \"github.com\/cjtoolkit\/form\"\n*\/\ntype Int struct {\n\tName string \/\/ Mandatory\n\tLabel string \/\/ Mandatory\n\tNorm *string \/\/ Mandatory\n\tModel *int64 \/\/ Mandatory\n\tErr *error \/\/ Mandatory\n\tRequired bool\n\tMin int64\n\tMinZero bool\n\tMax int64\n\tMaxZero bool\n\tStep int64\n\tInList []int64\n}\n\nconst (\n\tINT_DECIMAL = 10\n\tINT_BIT = 64\n)\n\nfunc (i Int) PreCheck() {\n\tswitch {\n\tcase \"\" == strings.TrimSpace(i.Name):\n\t\tpanic(form.ErrorPreCheck(\"Int Field: Name cannot be empty string\"))\n\tcase \"\" == strings.TrimSpace(i.Label):\n\t\tpanic(form.ErrorPreCheck(\"Int Field: \" + i.Name + \": Label cannot be empty string\"))\n\tcase nil == i.Norm:\n\t\tpanic(form.ErrorPreCheck(\"Int Field: \" + i.Name + \": Norm cannot be nil value\"))\n\tcase nil == i.Model:\n\t\tpanic(form.ErrorPreCheck(\"Int Field: \" + i.Name + \": Model cannot be nil value\"))\n\tcase nil == i.Err:\n\t\tpanic(form.ErrorPreCheck(\"Int Field: \" + i.Name + \": Err cannot be nil value\"))\n\t}\n}\n\nfunc (i Int) GetErrorPtr() *error {\n\treturn i.Err\n}\n\nfunc (i Int) PopulateNorm(values form.ValuesInterface) {\n\t*i.Norm = values.GetOne(i.Name)\n}\n\nfunc (i Int) Transform() {\n\t*i.Norm = strconv.FormatInt(*i.Model, INT_DECIMAL)\n}\n\nfunc (i Int) ReverseTransform() {\n\tnum, err := strconv.ParseInt(strings.TrimSpace(*i.Norm), INT_BIT, INT_DECIMAL)\n\tExecFuncIfErrIsNotNil(err, func() {\n\t\tpanic(&form.ErrorReverseTransform{\n\t\t\tKey: form.LANG_NOT_INT,\n\t\t\tValue: map[string]interface{}{\n\t\t\t\t\"Label\": i.Label,\n\t\t\t},\n\t\t})\n\t})\n\t*i.Model = num\n}\n\nfunc (i Int) ValidateModel() {\n\ti.validateRequired()\n\ti.validateMin()\n\ti.validateMax()\n\ti.validateStep()\n}\n\nfunc (i Int) validateRequired() {\n\tswitch {\n\tcase !i.Required:\n\t\treturn\n\tcase 0 == *i.Model:\n\t\tpanic(&form.ErrorValidateModel{\n\t\t\tKey: form.LANG_FIELD_REQUIRED,\n\t\t\tValue: map[string]interface{}{\n\t\t\t\t\"Label\": i.Label,\n\t\t\t},\n\t\t})\n\t}\n}\n\nfunc (i Int) validateMin() {\n\tswitch {\n\tcase 0 == i.Min && !i.MinZero:\n\t\treturn\n\tcase i.Min > *i.Model:\n\t\tpanic(&form.ErrorValidateModel{\n\t\t\tKey: form.LANG_NUMBER_MIN,\n\t\t\tValue: map[string]interface{}{\n\t\t\t\t\"Label\": i.Label,\n\t\t\t\t\"Min\": i.Min,\n\t\t\t},\n\t\t})\n\t}\n}\n\nfunc (i Int) validateMax() {\n\tswitch {\n\tcase 0 == i.Max && !i.MaxZero:\n\t\treturn\n\tcase i.Max < *i.Model:\n\t\tpanic(&form.ErrorValidateModel{\n\t\t\tKey: form.LANG_NUMBER_MAX,\n\t\t\tValue: map[string]interface{}{\n\t\t\t\t\"Label\": i.Label,\n\t\t\t\t\"Max\": i.Max,\n\t\t\t},\n\t\t})\n\t}\n}\n\nfunc (i Int) validateStep() {\n\tswitch {\n\tcase 0 == i.Step:\n\t\treturn\n\tcase 0 != *i.Model%i.Step:\n\t\tpanic(&form.ErrorValidateModel{\n\t\t\tKey: form.LANG_NUMBER_STEP,\n\t\t\tValue: map[string]interface{}{\n\t\t\t\t\"Label\": i.Label,\n\t\t\t\t\"Step\": i.Step,\n\t\t\t},\n\t\t})\n\t}\n}\n\nfunc (i Int) validateInList() {\n\tif nil == i.InList {\n\t\treturn\n\t}\n\n\tmodel := *i.Model\n\n\tfor _, value := range i.InList {\n\t\tif model == value {\n\t\t\treturn\n\t\t}\n\t}\n\n\tpanic(&form.ErrorValidateModel{\n\t\tKey: form.LANG_IN_LIST,\n\t\tValue: map[string]interface{}{\n\t\t\t\"Label\": i.Label,\n\t\t\t\"List\": i.InList,\n\t\t},\n\t})\n}\n<commit_msg>Added json to int as well.<commit_after>package fields\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/cjtoolkit\/form\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/*\nImplement:\n\tFormFieldInterface in \"github.com\/cjtoolkit\/form\"\n*\/\ntype Int struct {\n\tName string \/\/ Mandatory\n\tLabel string \/\/ Mandatory\n\tNorm *string \/\/ Mandatory\n\tModel *int64 \/\/ Mandatory\n\tErr *error \/\/ Mandatory\n\tRequired bool\n\tMin int64\n\tMinZero bool\n\tMax int64\n\tMaxZero bool\n\tStep int64\n\tInList []int64\n\tExtra func()\n}\n\ntype intJson struct {\n\tType string `json:\"type\"`\n\tName string `json:\"name\"`\n\tRequired bool `json:\"required\"`\n\tSuccess bool `json:\"success\"`\n\tError string `json:\"error,omitempty\"`\n\tMin int64 `json:\"min,omitempty\"`\n\tMinZero bool `json:\"minZero,omitempty\"`\n\tMax int64 `json:\"max,omitempty\"`\n\tMaxZero bool `json:\"maxZero,omitempty\"`\n\tStep int64 `json:\"step,omitempty\"`\n\tList []int64 `json:\"list,omitempty\"`\n}\n\nconst (\n\tINT_DECIMAL = 10\n\tINT_BIT = 64\n)\n\nfunc (i Int) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(intJson{\n\t\tType: \"int\",\n\t\tName: i.Name,\n\t\tRequired: i.Required,\n\t\tSuccess: nil == *i.Err,\n\t\tMin: i.Min,\n\t\tMinZero: i.MinZero,\n\t\tMax: i.Max,\n\t\tMaxZero: i.MaxZero,\n\t\tStep: i.Step,\n\t\tList: i.InList,\n\t})\n}\n\nfunc (i Int) PreCheck() {\n\tswitch {\n\tcase \"\" == strings.TrimSpace(i.Name):\n\t\tpanic(form.ErrorPreCheck(\"Int Field: Name cannot be empty string\"))\n\tcase \"\" == strings.TrimSpace(i.Label):\n\t\tpanic(form.ErrorPreCheck(\"Int Field: \" + i.Name + \": Label cannot be empty string\"))\n\tcase nil == i.Norm:\n\t\tpanic(form.ErrorPreCheck(\"Int Field: \" + i.Name + \": Norm cannot be nil value\"))\n\tcase nil == i.Model:\n\t\tpanic(form.ErrorPreCheck(\"Int Field: \" + i.Name + \": Model cannot be nil value\"))\n\tcase nil == i.Err:\n\t\tpanic(form.ErrorPreCheck(\"Int Field: \" + i.Name + \": Err cannot be nil value\"))\n\t}\n}\n\nfunc (i Int) GetErrorPtr() *error {\n\treturn i.Err\n}\n\nfunc (i Int) PopulateNorm(values form.ValuesInterface) {\n\t*i.Norm = values.GetOne(i.Name)\n}\n\nfunc (i Int) Transform() {\n\t*i.Norm = strconv.FormatInt(*i.Model, INT_DECIMAL)\n}\n\nfunc (i Int) ReverseTransform() {\n\tnum, err := strconv.ParseInt(strings.TrimSpace(*i.Norm), INT_BIT, INT_DECIMAL)\n\tExecFuncIfErrIsNotNil(err, func() {\n\t\tpanic(&form.ErrorReverseTransform{\n\t\t\tKey: form.LANG_NOT_INT,\n\t\t\tValue: map[string]interface{}{\n\t\t\t\t\"Label\": i.Label,\n\t\t\t},\n\t\t})\n\t})\n\t*i.Model = num\n}\n\nfunc (i Int) ValidateModel() {\n\ti.validateRequired()\n\ti.validateMin()\n\ti.validateMax()\n\ti.validateStep()\n\ti.validateInList()\n\texecFnIfNotNil(i.Extra)\n}\n\nfunc (i Int) validateRequired() {\n\tswitch {\n\tcase !i.Required:\n\t\treturn\n\tcase 0 == *i.Model:\n\t\tpanic(&form.ErrorValidateModel{\n\t\t\tKey: form.LANG_FIELD_REQUIRED,\n\t\t\tValue: map[string]interface{}{\n\t\t\t\t\"Label\": i.Label,\n\t\t\t},\n\t\t})\n\t}\n}\n\nfunc (i Int) validateMin() {\n\tswitch {\n\tcase 0 == i.Min && !i.MinZero:\n\t\treturn\n\tcase i.Min > *i.Model:\n\t\tpanic(&form.ErrorValidateModel{\n\t\t\tKey: form.LANG_NUMBER_MIN,\n\t\t\tValue: map[string]interface{}{\n\t\t\t\t\"Label\": i.Label,\n\t\t\t\t\"Min\": i.Min,\n\t\t\t},\n\t\t})\n\t}\n}\n\nfunc (i Int) validateMax() {\n\tswitch {\n\tcase 0 == i.Max && !i.MaxZero:\n\t\treturn\n\tcase i.Max < *i.Model:\n\t\tpanic(&form.ErrorValidateModel{\n\t\t\tKey: form.LANG_NUMBER_MAX,\n\t\t\tValue: map[string]interface{}{\n\t\t\t\t\"Label\": i.Label,\n\t\t\t\t\"Max\": i.Max,\n\t\t\t},\n\t\t})\n\t}\n}\n\nfunc (i Int) validateStep() {\n\tswitch {\n\tcase 0 == i.Step:\n\t\treturn\n\tcase 0 != *i.Model%i.Step:\n\t\tpanic(&form.ErrorValidateModel{\n\t\t\tKey: form.LANG_NUMBER_STEP,\n\t\t\tValue: map[string]interface{}{\n\t\t\t\t\"Label\": i.Label,\n\t\t\t\t\"Step\": i.Step,\n\t\t\t},\n\t\t})\n\t}\n}\n\nfunc (i Int) validateInList() {\n\tif nil == i.InList {\n\t\treturn\n\t}\n\n\tmodel := *i.Model\n\n\tfor _, value := range i.InList {\n\t\tif model == value {\n\t\t\treturn\n\t\t}\n\t}\n\n\tpanic(&form.ErrorValidateModel{\n\t\tKey: form.LANG_IN_LIST,\n\t\tValue: map[string]interface{}{\n\t\t\t\"Label\": i.Label,\n\t\t\t\"List\": i.InList,\n\t\t},\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2015 Janoš Guljaš <janos@resenje.org>\n\/\/ All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nPackage boltdbpool implements a pool container for BoltDB github.com\/coreos\/bbolt databases.\nPool elements called connections keep reference counts for each database to close it\nwhen it when the count is 0. Database is reused or opened based on database file path. Closing\nthe database must not be done directly, instead Connection.Close() method should be used.\nDatabase is removed form the pool and closed by the goroutine in the background in respect to\nreference count and delay in time if it is specified.\n\nExample:\n\n package main\n\n import (\n \"fmt\"\n \"time\"\n\n \"resenje.org\/boltdbpool\"\n )\n\n func main() {\n pool := boltdbpool.New(&boltdbpool.Options{\n ConnectionExpires: 5 * time.Second,\n ErrorHandler: boltdbpool.ErrorHandlerFunc(func(err error) {\n fmt.Printf(\"error: %v\", err)\n }),\n })\n defer p.Close()\n\n ...\n\n c, err := pool.Get(\"\/tmp\/db.bolt\")\n if err != nil {\n panic(err)\n }\n defer c.Close()\n\n ...\n\n c.DB.Update(func(tx *bolt.TX) error {\n ...\n })\n }\n*\/\npackage boltdbpool \/\/ import \"resenje.org\/boltdbpool\"\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"time\"\n\n\tbolt \"go.etcd.io\/bbolt\"\n)\n\nvar (\n\t\/\/ DefaultFileMode is used in bolt.Open() as file mode for database file\n\t\/\/ if FileMode is not specified in boltdbpool.Options.\n\tDefaultFileMode = os.FileMode(0640)\n\n\t\/\/ DefaultDirMode is used in os.MkdirAll() as file mode for database directories\n\t\/\/ if DirMode is not specified in boltdbpool.Options.\n\tDefaultDirMode = os.FileMode(0750)\n\n\t\/\/ DefaultErrorHandler accepts errors from\n\t\/\/ goroutine that closes the databases if ErrorHandler is not specified in\n\t\/\/ boltdbpool.Options.\n\tDefaultErrorHandler = ErrorHandlerFunc(func(err error) {\n\t\tlog.Printf(\"error: %v\", err)\n\t})\n\n\t\/\/ DefaultCloseSleep is a time between database closing iterations.\n\tdefaultCloseSleep = 250 * time.Millisecond\n)\n\n\/\/ Connection encapsulates bolt.DB and keeps reference counter and closing time information.\ntype Connection struct {\n\tDB *bolt.DB\n\n\tpool *Pool\n\tpath string\n\tcount int64\n\texpires time.Duration\n\tcloseTime time.Time\n\tmu sync.RWMutex\n}\n\n\/\/ Close function on Connection decrements reference counter and closes the database if needed.\nfunc (c *Connection) Close() {\n\tc.decrement()\n\tif c.count <= 0 {\n\t\tif c.expires == 0 {\n\t\t\tc.pool.mu.Lock()\n\t\t\tc.pool.errorChannel <- c.removeFromPool()\n\t\t\tc.pool.mu.Unlock()\n\t\t\treturn\n\t\t}\n\n\t\tc.mu.Lock()\n\t\tc.closeTime = time.Now().Add(c.expires)\n\t\tc.mu.Unlock()\n\t}\n}\n\nfunc (c *Connection) increment() {\n\tc.mu.Lock()\n\n\t\/\/ Reset the closing time\n\tc.closeTime = time.Time{}\n\tc.count++\n\n\tc.mu.Unlock()\n}\n\nfunc (c *Connection) decrement() {\n\tc.mu.Lock()\n\n\tc.count--\n\n\tc.mu.Unlock()\n}\n\nfunc (c *Connection) removeFromPool() error {\n\treturn c.pool.remove(c.path)\n}\n\n\/\/ ErrorHandler interface can be used for objects that log or panic on error\ntype ErrorHandler interface {\n\tHandleError(err error)\n}\n\n\/\/ The ErrorHandlerFunc type is an adapter to allow the use of\n\/\/ ordinary functions as error handlers.\ntype ErrorHandlerFunc func(err error)\n\n\/\/ HandleError calls f(err).\nfunc (f ErrorHandlerFunc) HandleError(err error) {\n\tf(err)\n}\n\n\/\/ Options are used when a new pool is created that.\ntype Options struct {\n\t\/\/ BoltOptions is used on bolt.Open().\n\tBoltOptions *bolt.Options\n\n\t\/\/ FileMode is used in bolt.Open() as file mode for database file. Default: 0640.\n\tFileMode os.FileMode\n\n\t\/\/ DirMode is used in os.MkdirAll() as file mode for database directories. Default: 0750.\n\tDirMode os.FileMode\n\n\t\/\/ ConnectionExpires is a duration between the reference count drops to 0 and\n\t\/\/ the time when the database is closed. It is useful to avoid frequent\n\t\/\/ openings of the same database. If the value is 0 (default), no caching is done.\n\tConnectionExpires time.Duration\n\n\t\/\/ ErrorHandler represents interface that accepts errors from goroutine that closes the databases.\n\tErrorHandler ErrorHandler\n}\n\n\/\/ Pool keeps track of connections.\ntype Pool struct {\n\toptions *Options\n\terrorChannel chan error\n\tconnections map[string]*Connection\n\tmu sync.RWMutex\n}\n\n\/\/ New creates new pool with provided options and also starts database closing goroutone\n\/\/ and goroutine for errors handling to ErrorHandler.\nfunc New(options *Options) *Pool {\n\tif options == nil {\n\t\toptions = &Options{}\n\t}\n\tif options.FileMode == 0 {\n\t\toptions.FileMode = DefaultFileMode\n\t}\n\tif options.DirMode == 0 {\n\t\toptions.DirMode = DefaultDirMode\n\t}\n\tif options.ErrorHandler == nil {\n\t\toptions.ErrorHandler = DefaultErrorHandler\n\t}\n\tp := &Pool{\n\t\toptions: options,\n\t\terrorChannel: make(chan error),\n\t\tconnections: map[string]*Connection{},\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tp.mu.Lock()\n\t\t\tfor _, c := range p.connections {\n\t\t\t\tc.mu.RLock()\n\t\t\t\tif !c.closeTime.IsZero() && c.closeTime.Before(time.Now()) {\n\t\t\t\t\tp.errorChannel <- c.removeFromPool()\n\t\t\t\t}\n\t\t\t\tc.mu.RUnlock()\n\t\t\t}\n\t\t\tp.mu.Unlock()\n\t\t\ttime.Sleep(defaultCloseSleep)\n\t\t}\n\t}()\n\tgo func() {\n\t\tfor err := range p.errorChannel {\n\t\t\tif err != nil {\n\t\t\t\tp.options.ErrorHandler.HandleError(err)\n\t\t\t}\n\t\t}\n\t}()\n\treturn p\n}\n\n\/\/ Get returns a connection that contains a database or creates a new connection\n\/\/ with newly opened database based on options specified on pool creation.\nfunc (p *Pool) Get(path string) (*Connection, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tif c, ok := p.connections[path]; ok {\n\t\tc.increment()\n\t\treturn c, nil\n\t}\n\tif _, err := os.Stat(filepath.Dir(path)); os.IsNotExist(err) {\n\t\tif err := os.MkdirAll(filepath.Dir(path), p.options.DirMode); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\tdb, err := bolt.Open(path, p.options.FileMode, p.options.BoltOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := &Connection{\n\t\tDB: db,\n\t\tpath: path,\n\t\tpool: p,\n\t\texpires: p.options.ConnectionExpires,\n\t}\n\tp.connections[path] = c\n\n\tc.increment()\n\treturn c, nil\n}\n\n\/\/ Has returns true if a database with a file path is in the pool.\nfunc (p *Pool) Has(path string) bool {\n\tp.mu.RLock()\n\tdefer p.mu.RUnlock()\n\n\t_, ok := p.connections[path]\n\treturn ok\n}\n\n\/\/ Close function closes and removes from the pool all databases. After the execution\n\/\/ pool is not usable.\nfunc (p *Pool) Close() {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tfor _, c := range p.connections {\n\t\tp.errorChannel <- c.removeFromPool()\n\t}\n\tclose(p.errorChannel)\n}\n\nfunc (p *Pool) remove(path string) error {\n\tc, ok := p.connections[path]\n\tif !ok {\n\t\treturn fmt.Errorf(\"boltdbpool: Unknown DB %s\", path)\n\t}\n\tdelete(p.connections, path)\n\treturn c.DB.Close()\n}\n<commit_msg>Prevent goroutine leak<commit_after>\/\/ Copyright (c) 2015 Janoš Guljaš <janos@resenje.org>\n\/\/ All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nPackage boltdbpool implements a pool container for BoltDB github.com\/coreos\/bbolt databases.\nPool elements called connections keep reference counts for each database to close it\nwhen it when the count is 0. Database is reused or opened based on database file path. Closing\nthe database must not be done directly, instead Connection.Close() method should be used.\nDatabase is removed form the pool and closed by the goroutine in the background in respect to\nreference count and delay in time if it is specified.\n\nExample:\n\n package main\n\n import (\n \"fmt\"\n \"time\"\n\n \"resenje.org\/boltdbpool\"\n )\n\n func main() {\n pool := boltdbpool.New(&boltdbpool.Options{\n ConnectionExpires: 5 * time.Second,\n ErrorHandler: boltdbpool.ErrorHandlerFunc(func(err error) {\n fmt.Printf(\"error: %v\", err)\n }),\n })\n defer p.Close()\n\n ...\n\n c, err := pool.Get(\"\/tmp\/db.bolt\")\n if err != nil {\n panic(err)\n }\n defer c.Close()\n\n ...\n\n c.DB.Update(func(tx *bolt.TX) error {\n ...\n })\n }\n*\/\npackage boltdbpool \/\/ import \"resenje.org\/boltdbpool\"\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"time\"\n\n\tbolt \"go.etcd.io\/bbolt\"\n)\n\nvar (\n\t\/\/ DefaultFileMode is used in bolt.Open() as file mode for database file\n\t\/\/ if FileMode is not specified in boltdbpool.Options.\n\tDefaultFileMode = os.FileMode(0640)\n\n\t\/\/ DefaultDirMode is used in os.MkdirAll() as file mode for database directories\n\t\/\/ if DirMode is not specified in boltdbpool.Options.\n\tDefaultDirMode = os.FileMode(0750)\n\n\t\/\/ DefaultErrorHandler accepts errors from\n\t\/\/ goroutine that closes the databases if ErrorHandler is not specified in\n\t\/\/ boltdbpool.Options.\n\tDefaultErrorHandler = ErrorHandlerFunc(func(err error) {\n\t\tlog.Printf(\"error: %v\", err)\n\t})\n\n\t\/\/ DefaultCloseSleep is a time between database closing iterations.\n\tdefaultCloseSleep = 250 * time.Millisecond\n)\n\n\/\/ Connection encapsulates bolt.DB and keeps reference counter and closing time information.\ntype Connection struct {\n\tDB *bolt.DB\n\n\tpool *Pool\n\tpath string\n\tcount int64\n\texpires time.Duration\n\tcloseTime time.Time\n\tmu sync.RWMutex\n}\n\n\/\/ Close function on Connection decrements reference counter and closes the database if needed.\nfunc (c *Connection) Close() {\n\tc.decrement()\n\tif c.count <= 0 {\n\t\tif c.expires == 0 {\n\t\t\tc.pool.mu.Lock()\n\t\t\tc.pool.errorChannel <- c.removeFromPool()\n\t\t\tc.pool.mu.Unlock()\n\t\t\treturn\n\t\t}\n\n\t\tc.mu.Lock()\n\t\tc.closeTime = time.Now().Add(c.expires)\n\t\tc.mu.Unlock()\n\t}\n}\n\nfunc (c *Connection) increment() {\n\tc.mu.Lock()\n\n\t\/\/ Reset the closing time\n\tc.closeTime = time.Time{}\n\tc.count++\n\n\tc.mu.Unlock()\n}\n\nfunc (c *Connection) decrement() {\n\tc.mu.Lock()\n\n\tc.count--\n\n\tc.mu.Unlock()\n}\n\nfunc (c *Connection) removeFromPool() error {\n\treturn c.pool.remove(c.path)\n}\n\n\/\/ ErrorHandler interface can be used for objects that log or panic on error\ntype ErrorHandler interface {\n\tHandleError(err error)\n}\n\n\/\/ The ErrorHandlerFunc type is an adapter to allow the use of\n\/\/ ordinary functions as error handlers.\ntype ErrorHandlerFunc func(err error)\n\n\/\/ HandleError calls f(err).\nfunc (f ErrorHandlerFunc) HandleError(err error) {\n\tf(err)\n}\n\n\/\/ Options are used when a new pool is created that.\ntype Options struct {\n\t\/\/ BoltOptions is used on bolt.Open().\n\tBoltOptions *bolt.Options\n\n\t\/\/ FileMode is used in bolt.Open() as file mode for database file. Default: 0640.\n\tFileMode os.FileMode\n\n\t\/\/ DirMode is used in os.MkdirAll() as file mode for database directories. Default: 0750.\n\tDirMode os.FileMode\n\n\t\/\/ ConnectionExpires is a duration between the reference count drops to 0 and\n\t\/\/ the time when the database is closed. It is useful to avoid frequent\n\t\/\/ openings of the same database. If the value is 0 (default), no caching is done.\n\tConnectionExpires time.Duration\n\n\t\/\/ ErrorHandler represents interface that accepts errors from goroutine that closes the databases.\n\tErrorHandler ErrorHandler\n}\n\n\/\/ Pool keeps track of connections.\ntype Pool struct {\n\toptions *Options\n\terrorChannel chan error\n\tconnections map[string]*Connection\n\tmu sync.RWMutex\n\tquit chan struct{}\n}\n\n\/\/ New creates new pool with provided options and also starts database closing goroutone\n\/\/ and goroutine for errors handling to ErrorHandler.\nfunc New(options *Options) *Pool {\n\tif options == nil {\n\t\toptions = &Options{}\n\t}\n\tif options.FileMode == 0 {\n\t\toptions.FileMode = DefaultFileMode\n\t}\n\tif options.DirMode == 0 {\n\t\toptions.DirMode = DefaultDirMode\n\t}\n\tif options.ErrorHandler == nil {\n\t\toptions.ErrorHandler = DefaultErrorHandler\n\t}\n\tp := &Pool{\n\t\toptions: options,\n\t\terrorChannel: make(chan error),\n\t\tconnections: map[string]*Connection{},\n\t\tquit: make(chan struct{}),\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.After(defaultCloseSleep):\n\t\t\t\tp.mu.Lock()\n\t\t\t\tfor _, c := range p.connections {\n\t\t\t\t\tc.mu.RLock()\n\t\t\t\t\tif !c.closeTime.IsZero() && c.closeTime.Before(time.Now()) {\n\t\t\t\t\t\tp.errorChannel <- c.removeFromPool()\n\t\t\t\t\t}\n\t\t\t\t\tc.mu.RUnlock()\n\t\t\t\t}\n\t\t\t\tp.mu.Unlock()\n\t\t\tcase <-p.quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\tgo func() {\n\t\tfor err := range p.errorChannel {\n\t\t\tif err != nil {\n\t\t\t\tp.options.ErrorHandler.HandleError(err)\n\t\t\t}\n\t\t}\n\t}()\n\treturn p\n}\n\n\/\/ Get returns a connection that contains a database or creates a new connection\n\/\/ with newly opened database based on options specified on pool creation.\nfunc (p *Pool) Get(path string) (*Connection, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tif c, ok := p.connections[path]; ok {\n\t\tc.increment()\n\t\treturn c, nil\n\t}\n\tif _, err := os.Stat(filepath.Dir(path)); os.IsNotExist(err) {\n\t\tif err := os.MkdirAll(filepath.Dir(path), p.options.DirMode); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\tdb, err := bolt.Open(path, p.options.FileMode, p.options.BoltOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := &Connection{\n\t\tDB: db,\n\t\tpath: path,\n\t\tpool: p,\n\t\texpires: p.options.ConnectionExpires,\n\t}\n\tp.connections[path] = c\n\n\tc.increment()\n\treturn c, nil\n}\n\n\/\/ Has returns true if a database with a file path is in the pool.\nfunc (p *Pool) Has(path string) bool {\n\tp.mu.RLock()\n\tdefer p.mu.RUnlock()\n\n\t_, ok := p.connections[path]\n\treturn ok\n}\n\n\/\/ Close function closes and removes from the pool all databases. After the execution\n\/\/ pool is not usable.\nfunc (p *Pool) Close() {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tfor _, c := range p.connections {\n\t\tp.errorChannel <- c.removeFromPool()\n\t}\n\tclose(p.errorChannel)\n\tclose(p.quit)\n}\n\nfunc (p *Pool) remove(path string) error {\n\tc, ok := p.connections[path]\n\tif !ok {\n\t\treturn fmt.Errorf(\"boltdbpool: Unknown DB %s\", path)\n\t}\n\tdelete(p.connections, path)\n\treturn c.DB.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package file\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/b3log\/wide\/conf\"\n\t\"github.com\/golang\/glog\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n)\n\nfunc GetFiles(w http.ResponseWriter, r *http.Request) {\n\tdata := map[string]interface{}{\"succ\": true}\n\n\troot := FileNode{\"projects\", conf.Wide.GOPATH, \"d\", []*FileNode{}}\n\tfileInfo, _ := os.Lstat(conf.Wide.GOPATH)\n\n\twalk(conf.Wide.GOPATH, fileInfo, &root)\n\n\tdata[\"root\"] = root\n\n\tret, _ := json.Marshal(data)\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(ret)\n}\n\nfunc GetFile(w http.ResponseWriter, r *http.Request) {\n\tdecoder := json.NewDecoder(r.Body)\n\n\tvar args map[string]interface{}\n\n\tif err := decoder.Decode(&args); err != nil {\n\t\tglog.Error(err)\n\t\thttp.Error(w, err.Error(), 500)\n\n\t\treturn\n\t}\n\n\tpath := args[\"path\"].(string)\n\n\tidx := strings.LastIndex(path, \".\")\n\n\tbuf, _ := ioutil.ReadFile(path)\n\n\tcontent := string(buf)\n\n\tdata := map[string]interface{}{\"succ\": true}\n\tdata[\"content\"] = content\n\n\textension := \"\"\n\tif 0 <= idx {\n\t\textension = path[idx:]\n\t}\n\tdata[\"mode\"] = getEditorMode(extension)\n\n\tret, _ := json.Marshal(data)\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(ret)\n}\n\nfunc SaveFile(w http.ResponseWriter, r *http.Request) {\n\tdecoder := json.NewDecoder(r.Body)\n\n\tvar args map[string]interface{}\n\n\tif err := decoder.Decode(&args); err != nil {\n\t\tglog.Error(err)\n\t\thttp.Error(w, err.Error(), 500)\n\n\t\treturn\n\t}\n\n\tfilePath := args[\"file\"].(string)\n\n\tfout, err := os.Create(filePath)\n\n\tif nil != err {\n\t\tglog.Error(err)\n\t\thttp.Error(w, err.Error(), 500)\n\n\t\treturn\n\t}\n\n\tcode := args[\"code\"].(string)\n\n\tfout.WriteString(code)\n\n\tif err := fout.Close(); nil != err {\n\t\tglog.Error(err)\n\t\thttp.Error(w, err.Error(), 500)\n\n\t\treturn\n\t}\n\n\tret, _ := json.Marshal(map[string]interface{}{\"succ\": true})\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(ret)\n}\n\nfunc NewFile(w http.ResponseWriter, r *http.Request) {\n\tdecoder := json.NewDecoder(r.Body)\n\n\tvar args map[string]interface{}\n\n\tif err := decoder.Decode(&args); err != nil {\n\t\tglog.Error(err)\n\t\thttp.Error(w, err.Error(), 500)\n\n\t\treturn\n\t}\n\n\tdata := map[string]interface{}{\"succ\": true}\n\n\tpath := args[\"path\"].(string)\n\tfileType := args[\"fileType\"].(string)\n\n\tif !createFile(path, fileType) {\n\t\tdata[\"succ\"] = false\n\t}\n\n\tret, _ := json.Marshal(data)\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(ret)\n}\n\nfunc RemoveFile(w http.ResponseWriter, r *http.Request) {\n\tdecoder := json.NewDecoder(r.Body)\n\n\tvar args map[string]interface{}\n\n\tif err := decoder.Decode(&args); err != nil {\n\t\tglog.Error(err)\n\t\thttp.Error(w, err.Error(), 500)\n\n\t\treturn\n\t}\n\n\tdata := map[string]interface{}{\"succ\": true}\n\n\tpath := args[\"path\"].(string)\n\n\tif !removeFile(path) {\n\t\tdata[\"succ\"] = false\n\t}\n\n\tret, _ := json.Marshal(data)\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(ret)\n}\n\ntype FileNode struct {\n\tName string `json:\"name\"`\n\tPath string `json:\"path\"`\n\tType string `json:\"type\"`\n\tFileNodes []*FileNode `json:\"children\"`\n}\n\nfunc walk(path string, info os.FileInfo, node *FileNode) {\n\tfiles := listFiles(path)\n\n\tfor _, filename := range files {\n\t\tfpath := filepath.Join(path, filename)\n\n\t\tfio, _ := os.Lstat(fpath)\n\n\t\tchild := FileNode{Name: filename, Path: fpath, FileNodes: []*FileNode{}}\n\t\tnode.FileNodes = append(node.FileNodes, &child)\n\n\t\tif nil == fio {\n\t\t\tglog.Warningf(\"Path [%s] is nil\", fpath)\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif fio.IsDir() {\n\t\t\tchild.Type = \"d\"\n\t\t\twalk(fpath, fio, &child)\n\t\t} else {\n\t\t\tchild.Type = \"f\"\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc listFiles(dirname string) []string {\n\tf, _ := os.Open(dirname)\n\n\tnames, _ := f.Readdirnames(-1)\n\tf.Close()\n\n\tsort.Strings(names)\n\n\treturn names\n}\n\nfunc getEditorMode(filenameExtension string) string {\n\tswitch filenameExtension {\n\tcase \".go\":\n\t\treturn \"go\"\n\tcase \".html\":\n\t\treturn \"htmlmixed\"\n\tcase \".md\":\n\t\treturn \"markdown\"\n\tcase \".js\", \".json\":\n\t\treturn \"javascript\"\n\tcase \".css\":\n\t\treturn \"css\"\n\tcase \".xml\":\n\t\treturn \"xml\"\n\tcase \".sh\":\n\t\treturn \"shell\"\n\tcase \".sql\":\n\t\treturn \"sql\"\n\tdefault:\n\t\treturn \"text\"\n\t}\n}\n\nfunc createFile(path, fileType string) bool {\n\tswitch fileType {\n\tcase \"f\":\n\t\tfile, err := os.OpenFile(path, os.O_CREATE, 0664)\n\t\tif nil != err {\n\t\t\tglog.Info(err)\n\n\t\t\treturn false\n\t\t}\n\n\t\tdefer file.Close()\n\n\t\tglog.Infof(\"Created file [%s]\", path)\n\n\t\treturn true\n\tcase \"d\":\n\t\terr := os.Mkdir(path, 0775)\n\n\t\tif nil != err {\n\t\t\tglog.Info(err)\n\t\t}\n\n\t\tglog.Infof(\"Created directory [%s]\", path)\n\n\t\treturn true\n\tdefault:\n\n\t\tglog.Infof(\"Unsupported file type [%s]\", fileType)\n\n\t\treturn false\n\t}\n}\n\nfunc removeFile(path string) bool {\n\tif err := os.RemoveAll(path); nil != err {\n\t\tglog.Errorf(\"Removes [%s] failed: [%s]\", path, err.Error())\n\n\t\treturn false\n\t}\n\n\tglog.Infof(\"Removed [%s]\", path)\n\n\treturn true\n}\n<commit_msg>Fix #6<commit_after>package file\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/b3log\/wide\/conf\"\n\t\"github.com\/golang\/glog\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n)\n\nfunc GetFiles(w http.ResponseWriter, r *http.Request) {\n\tdata := map[string]interface{}{\"succ\": true}\n\n\troot := FileNode{\"projects\", conf.Wide.GOPATH, \"d\", []*FileNode{}}\n\tfileInfo, _ := os.Lstat(conf.Wide.GOPATH)\n\n\twalk(conf.Wide.GOPATH, fileInfo, &root)\n\n\tdata[\"root\"] = root\n\n\tret, _ := json.Marshal(data)\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(ret)\n}\n\nfunc GetFile(w http.ResponseWriter, r *http.Request) {\n\tdecoder := json.NewDecoder(r.Body)\n\n\tvar args map[string]interface{}\n\n\tif err := decoder.Decode(&args); err != nil {\n\t\tglog.Error(err)\n\t\thttp.Error(w, err.Error(), 500)\n\n\t\treturn\n\t}\n\n\tpath := args[\"path\"].(string)\n\n\tidx := strings.LastIndex(path, \".\")\n\n\tbuf, _ := ioutil.ReadFile(path)\n\n\tcontent := string(buf)\n\n\tdata := map[string]interface{}{\"succ\": true}\n\tdata[\"content\"] = content\n\n\textension := \"\"\n\tif 0 <= idx {\n\t\textension = path[idx:]\n\t}\n\tdata[\"mode\"] = getEditorMode(extension)\n\n\tret, _ := json.Marshal(data)\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(ret)\n}\n\nfunc SaveFile(w http.ResponseWriter, r *http.Request) {\n\tdecoder := json.NewDecoder(r.Body)\n\n\tvar args map[string]interface{}\n\n\tif err := decoder.Decode(&args); err != nil {\n\t\tglog.Error(err)\n\t\thttp.Error(w, err.Error(), 500)\n\n\t\treturn\n\t}\n\n\tfilePath := args[\"file\"].(string)\n\n\tfout, err := os.Create(filePath)\n\n\tif nil != err {\n\t\tglog.Error(err)\n\t\thttp.Error(w, err.Error(), 500)\n\n\t\treturn\n\t}\n\n\tcode := args[\"code\"].(string)\n\n\tfout.WriteString(code)\n\n\tif err := fout.Close(); nil != err {\n\t\tglog.Error(err)\n\t\thttp.Error(w, err.Error(), 500)\n\n\t\treturn\n\t}\n\n\tret, _ := json.Marshal(map[string]interface{}{\"succ\": true})\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(ret)\n}\n\nfunc NewFile(w http.ResponseWriter, r *http.Request) {\n\tdecoder := json.NewDecoder(r.Body)\n\n\tvar args map[string]interface{}\n\n\tif err := decoder.Decode(&args); err != nil {\n\t\tglog.Error(err)\n\t\thttp.Error(w, err.Error(), 500)\n\n\t\treturn\n\t}\n\n\tdata := map[string]interface{}{\"succ\": true}\n\n\tpath := args[\"path\"].(string)\n\tfileType := args[\"fileType\"].(string)\n\n\tif !createFile(path, fileType) {\n\t\tdata[\"succ\"] = false\n\t}\n\n\tret, _ := json.Marshal(data)\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(ret)\n}\n\nfunc RemoveFile(w http.ResponseWriter, r *http.Request) {\n\tdecoder := json.NewDecoder(r.Body)\n\n\tvar args map[string]interface{}\n\n\tif err := decoder.Decode(&args); err != nil {\n\t\tglog.Error(err)\n\t\thttp.Error(w, err.Error(), 500)\n\n\t\treturn\n\t}\n\n\tdata := map[string]interface{}{\"succ\": true}\n\n\tpath := args[\"path\"].(string)\n\n\tif !removeFile(path) {\n\t\tdata[\"succ\"] = false\n\t}\n\n\tret, _ := json.Marshal(data)\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(ret)\n}\n\ntype FileNode struct {\n\tName string `json:\"name\"`\n\tPath string `json:\"path\"`\n\tType string `json:\"type\"`\n\tFileNodes []*FileNode `json:\"children\"`\n}\n\nfunc walk(path string, info os.FileInfo, node *FileNode) {\n\tfiles := listFiles(path)\n\n\tfor _, filename := range files {\n\t\tfpath := filepath.Join(path, filename)\n\n\t\tfio, _ := os.Lstat(fpath)\n\n\t\tchild := FileNode{Name: filename, Path: fpath, FileNodes: []*FileNode{}}\n\t\tnode.FileNodes = append(node.FileNodes, &child)\n\n\t\tif nil == fio {\n\t\t\tglog.Warningf(\"Path [%s] is nil\", fpath)\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif fio.IsDir() {\n\t\t\tchild.Type = \"d\"\n\n\t\t\twalk(fpath, fio, &child)\n\t\t} else {\n\t\t\tchild.Type = \"f\"\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc listFiles(dirname string) []string {\n\tf, _ := os.Open(dirname)\n\n\tnames, _ := f.Readdirnames(-1)\n\tf.Close()\n\n\tsort.Strings(names)\n\n\tdirs := []string{}\n\tfiles := []string{}\n\n\t\/\/ 目录靠前,文件靠后\n\tfor _, name := range names {\n\t\tfio, _ := os.Lstat(filepath.Join(dirname, name))\n\n\t\tif fio.IsDir() {\n\t\t\tdirs = append(dirs, name)\n\t\t} else {\n\t\t\tfiles = append(files, name)\n\t\t}\n\t}\n\n\treturn append(dirs, files...)\n}\n\nfunc getEditorMode(filenameExtension string) string {\n\tswitch filenameExtension {\n\tcase \".go\":\n\t\treturn \"go\"\n\tcase \".html\":\n\t\treturn \"htmlmixed\"\n\tcase \".md\":\n\t\treturn \"markdown\"\n\tcase \".js\", \".json\":\n\t\treturn \"javascript\"\n\tcase \".css\":\n\t\treturn \"css\"\n\tcase \".xml\":\n\t\treturn \"xml\"\n\tcase \".sh\":\n\t\treturn \"shell\"\n\tcase \".sql\":\n\t\treturn \"sql\"\n\tdefault:\n\t\treturn \"text\"\n\t}\n}\n\nfunc createFile(path, fileType string) bool {\n\tswitch fileType {\n\tcase \"f\":\n\t\tfile, err := os.OpenFile(path, os.O_CREATE, 0664)\n\t\tif nil != err {\n\t\t\tglog.Info(err)\n\n\t\t\treturn false\n\t\t}\n\n\t\tdefer file.Close()\n\n\t\tglog.Infof(\"Created file [%s]\", path)\n\n\t\treturn true\n\tcase \"d\":\n\t\terr := os.Mkdir(path, 0775)\n\n\t\tif nil != err {\n\t\t\tglog.Info(err)\n\t\t}\n\n\t\tglog.Infof(\"Created directory [%s]\", path)\n\n\t\treturn true\n\tdefault:\n\n\t\tglog.Infof(\"Unsupported file type [%s]\", fileType)\n\n\t\treturn false\n\t}\n}\n\nfunc removeFile(path string) bool {\n\tif err := os.RemoveAll(path); nil != err {\n\t\tglog.Errorf(\"Removes [%s] failed: [%s]\", path, err.Error())\n\n\t\treturn false\n\t}\n\n\tglog.Infof(\"Removed [%s]\", path)\n\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"os\/exec\"\nimport \"testing\"\nimport \"time\"\nimport \"os\"\nimport \"golang.org\/x\/exp\/inotify\"\n\nfunc TestTouchFile(test *testing.T) {\n\ttestFile := \".\/testFile.file\"\n\n\t\/\/ Remove any existing testFile\n\tif _, err := os.Stat(testFile); err == nil {\n\t\terr := os.Remove(testFile)\n\t\tif err != nil {\n\t\t\ttest.Fatal(err)\n\t\t}\n\t}\n\n\t\/\/ Test new file creation\n\terr := touchFile(testFile)\n\tif err != nil {\n\t\ttest.Fatal(err)\n\t}\n\tif _, err := os.Stat(testFile); err != nil {\n\t\ttest.Fatal(\"File was not created\")\n\t}\n\n\t\/\/ Test existing file updation\n\tfile, err := os.Stat(testFile)\n\tif err != nil {\n\t\ttest.Fatal(err)\n\t}\n\tmtime := file.ModTime()\n\terr = touchFile(testFile)\n\tif err != nil {\n\t\ttest.Fatal(err)\n\t}\n\tfile, err = os.Stat(testFile)\n\tif err != nil {\n\t\ttest.Fatal(err)\n\t}\n\tif file.ModTime() == mtime {\n\t\ttest.Fatal(\"Mtime was not updated\")\n\t} else {\n\t\ttest.Logf(\"Mtime was updated from %s to %s\", mtime, file.ModTime())\n\t}\n\n\t\/\/ Cleanup\n\terr = os.Remove(testFile)\n\tif err != nil {\n\t\ttest.Fatal(err)\n\t}\n}\n\ntype fileWait struct {\n\tSignal uint32\n\tCommand string\n\tCommandOptions []string\n}\n\nfunc TestWaitFileFor(test *testing.T) {\n\ttestFile := \"testWatchFile.file\"\n\n\t\/\/\/\/ test file creation - inotify.IN_CLOSE_WRITE\n\ttestSet1 := new(fileWait)\n\ttestSet1.Signal = inotify.IN_CLOSE_WRITE\n\ttestSet1.Command = \"touch\"\n\ttestSet1.CommandOptions = []string{testFile}\n\t\/\/\/\/ test file deletion - inotify.IN_DELETE\n\ttestSet2 := new(fileWait)\n\ttestSet2.Signal = inotify.IN_DELETE\n\ttestSet2.Command = \"rm\"\n\ttestSet2.CommandOptions = []string{\"-f\", testFile}\n\n\ttestSets := []*fileWait{testSet1, testSet2}\n\t\/\/ testSets := []*fileWait{testSet2, testSet1} \/\/ reversing breaks, since the second run expects the file to exist already\n\n\t\/\/ ensure file deoesn't exist\n\tif _, err := os.Stat(testFile); err == nil {\n\t\terr = os.Remove(testFile)\n\t\tif err != nil {\n\t\t\ttest.Fatal(err)\n\t\t}\n\t}\n\n\tfor _, testSet := range testSets {\n\t\ttest.Logf(\"testSet %s\", testSet)\n\t\t\/\/ setup watch\n\t\ttestChan := make(chan bool)\n\t\tgo waitFileFor(testChan, testFile, testSet.Signal)\n\n\t\t\/\/ setup timeout\n\t\ttimeout := make(chan bool)\n\t\tgo func() {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\ttimeout <- true\n\t\t}()\n\n\t\t\/\/ create\/remove file\n\t\tgo func() {\n\t\t\tcmd := exec.Command(testSet.Command, testSet.CommandOptions...)\n\t\t\terr := cmd.Start()\n\t\t\tif err != nil {\n\t\t\t\ttest.Fatal(err)\n\t\t\t}\n\t\t\terr = cmd.Wait()\n\t\t\tif err != nil {\n\t\t\t\ttest.Fatalf(\"Command finished with error: %v\", err)\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ setup channel receive\n\t\tselect {\n\t\tcase <-testChan:\n\t\t\ttest.Logf(\"Received signal for test file %s\", testFile)\n\t\tcase <-timeout:\n\t\t\ttest.Fatalf(\"Received no signal after 2 seconds for test file %s\", testFile)\n\t\t}\n\t}\n\n\t\/\/ cleanup\n\tif _, err := os.Stat(testFile); err == nil {\n\t\terr := os.Remove(testFile)\n\t\tif err != nil {\n\t\t\ttest.Fatal(err)\n\t\t}\n\t}\n\n}\n<commit_msg>Better naming<commit_after>package main\n\nimport \"os\/exec\"\nimport \"testing\"\nimport \"time\"\nimport \"os\"\nimport \"golang.org\/x\/exp\/inotify\"\n\nfunc TestTouchFile(test *testing.T) {\n\ttestFile := \".\/testFile.file\"\n\n\t\/\/ Remove any existing testFile\n\tif _, err := os.Stat(testFile); err == nil {\n\t\terr := os.Remove(testFile)\n\t\tif err != nil {\n\t\t\ttest.Fatal(err)\n\t\t}\n\t}\n\n\t\/\/ Test new file creation\n\terr := touchFile(testFile)\n\tif err != nil {\n\t\ttest.Fatal(err)\n\t}\n\tif _, err := os.Stat(testFile); err != nil {\n\t\ttest.Fatal(\"File was not created\")\n\t}\n\n\t\/\/ Test existing file updation\n\tfile, err := os.Stat(testFile)\n\tif err != nil {\n\t\ttest.Fatal(err)\n\t}\n\tmtime := file.ModTime()\n\terr = touchFile(testFile)\n\tif err != nil {\n\t\ttest.Fatal(err)\n\t}\n\tfile, err = os.Stat(testFile)\n\tif err != nil {\n\t\ttest.Fatal(err)\n\t}\n\tif file.ModTime() == mtime {\n\t\ttest.Fatal(\"Mtime was not updated\")\n\t} else {\n\t\ttest.Logf(\"Mtime was updated from %s to %s\", mtime, file.ModTime())\n\t}\n\n\t\/\/ Cleanup\n\terr = os.Remove(testFile)\n\tif err != nil {\n\t\ttest.Fatal(err)\n\t}\n}\n\ntype testFileWait struct {\n\tSignal uint32\n\tCommand string\n\tCommandOptions []string\n}\n\nfunc TestWaitFileFor(test *testing.T) {\n\ttestFile := \"testWatchFile.file\"\n\n\t\/\/\/\/ test file creation - inotify.IN_CLOSE_WRITE\n\ttestSet1 := new(testFileWait)\n\ttestSet1.Signal = inotify.IN_CLOSE_WRITE\n\ttestSet1.Command = \"touch\"\n\ttestSet1.CommandOptions = []string{testFile}\n\t\/\/\/\/ test file deletion - inotify.IN_DELETE\n\ttestSet2 := new(testFileWait)\n\ttestSet2.Signal = inotify.IN_DELETE\n\ttestSet2.Command = \"rm\"\n\ttestSet2.CommandOptions = []string{\"-f\", testFile}\n\n\ttestSets := []*testFileWait{testSet1, testSet2}\n\t\/\/ testSets := []*testFileWait{testSet2, testSet1} \/\/ reversing breaks, since the second run expects the file to exist already\n\n\t\/\/ ensure file deoesn't exist\n\tif _, err := os.Stat(testFile); err == nil {\n\t\terr = os.Remove(testFile)\n\t\tif err != nil {\n\t\t\ttest.Fatal(err)\n\t\t}\n\t}\n\n\tfor _, testSet := range testSets {\n\t\ttest.Logf(\"testSet %s\", testSet)\n\t\t\/\/ setup watch\n\t\ttestChan := make(chan bool)\n\t\tgo waitFileFor(testChan, testFile, testSet.Signal)\n\n\t\t\/\/ setup timeout\n\t\ttimeout := make(chan bool)\n\t\tgo func() {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\ttimeout <- true\n\t\t}()\n\n\t\t\/\/ create\/remove file\n\t\tgo func() {\n\t\t\tcmd := exec.Command(testSet.Command, testSet.CommandOptions...)\n\t\t\terr := cmd.Start()\n\t\t\tif err != nil {\n\t\t\t\ttest.Fatal(err)\n\t\t\t}\n\t\t\terr = cmd.Wait()\n\t\t\tif err != nil {\n\t\t\t\ttest.Fatalf(\"Command finished with error: %v\", err)\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ setup channel receive\n\t\tselect {\n\t\tcase <-testChan:\n\t\t\ttest.Logf(\"Received signal for test file %s\", testFile)\n\t\tcase <-timeout:\n\t\t\ttest.Fatalf(\"Received no signal after 2 seconds for test file %s\", testFile)\n\t\t}\n\t}\n\n\t\/\/ cleanup\n\tif _, err := os.Stat(testFile); err == nil {\n\t\terr := os.Remove(testFile)\n\t\tif err != nil {\n\t\t\ttest.Fatal(err)\n\t\t}\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package httpgzip is a simple wrapper around http.FileServer that looks for\n\/\/ a gzip compressed version of a file and serves that if the client requested\n\/\/ gzip content\npackage httpgzip\n\nimport (\n\t\"mime\"\n\t\"net\/http\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype fileServer struct {\n\troot http.FileSystem\n\th http.Handler\n}\n\n\/\/ FileServer creates a wrapper around http.FileServer using the given\n\/\/ http.FileSystem\nfunc FileServer(root http.FileSystem) http.Handler {\n\treturn fileServer{\n\t\troot,\n\t\thttp.FileServer(root),\n\t}\n}\n\n\/\/ ServerHTTP implements the http.Handler interface\nfunc (f fileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tfor _, e := range strings.Split(r.Header.Get(\"Accept-Encoding\"), \",\") {\n\t\tif strings.TrimSpace(e) == \"gzip\" {\n\t\t\tp := path.Clean(r.URL.Path)\n\t\t\tm := p\n\t\t\tnf, err := f.root.Open(p + \".gz\")\n\t\t\tif strings.HasSuffix(p, \"\/\") {\n\t\t\t\tm += \"index.html\"\n\t\t\t\tif err != nil {\n\t\t\t\t\tnf, err = f.root.Open(p + \".gz\")\n\t\t\t\t\tp += \"index.html\"\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\tif ctype := mime.TypeByExtension(filepath.Ext(m)); ctype != \"\" {\n\t\t\t\t\ts, err := nf.Stat()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tw.Header().Set(\"Content-Type\", ctype)\n\t\t\t\t\tw.Header().Set(\"Content-Length\", strconv.FormatInt(s.Size(), 10))\n\t\t\t\t}\n\t\t\t\tw.Header().Set(\"Content-Encoding\", \"gzip\")\n\t\t\t\tr.URL.Path = p + \".gz\"\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\tf.h.ServeHTTP(w, r)\n}\n<commit_msg>if we cannot set the MIME we shouldn't send the gzipped content as it may not be understood on the client side<commit_after>\/\/ Package httpgzip is a simple wrapper around http.FileServer that looks for\n\/\/ a gzip compressed version of a file and serves that if the client requested\n\/\/ gzip content\npackage httpgzip\n\nimport (\n\t\"mime\"\n\t\"net\/http\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype fileServer struct {\n\troot http.FileSystem\n\th http.Handler\n}\n\n\/\/ FileServer creates a wrapper around http.FileServer using the given\n\/\/ http.FileSystem\nfunc FileServer(root http.FileSystem) http.Handler {\n\treturn fileServer{\n\t\troot,\n\t\thttp.FileServer(root),\n\t}\n}\n\n\/\/ ServerHTTP implements the http.Handler interface\nfunc (f fileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tfor _, e := range strings.Split(r.Header.Get(\"Accept-Encoding\"), \",\") {\n\t\tif strings.TrimSpace(e) == \"gzip\" {\n\t\t\tp := path.Clean(r.URL.Path)\n\t\t\tm := p\n\t\t\tnf, err := f.root.Open(p + \".gz\")\n\t\t\tif strings.HasSuffix(p, \"\/\") {\n\t\t\t\tm += \"index.html\"\n\t\t\t\tif err != nil {\n\t\t\t\t\tnf, err = f.root.Open(p + \".gz\")\n\t\t\t\t\tp += \"index.html\"\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\tif ctype := mime.TypeByExtension(filepath.Ext(m)); ctype != \"\" {\n\t\t\t\t\ts, err := nf.Stat()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tw.Header().Set(\"Content-Type\", ctype)\n\t\t\t\t\tw.Header().Set(\"Content-Length\", strconv.FormatInt(s.Size(), 10))\n\t\t\t\t\tw.Header().Set(\"Content-Encoding\", \"gzip\")\n\t\t\t\t\tr.URL.Path = p + \".gz\"\n\t\t\t\t}\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\tf.h.ServeHTTP(w, r)\n}\n<|endoftext|>"} {"text":"<commit_before>package bot\n\nimport (\n\t\"fmt\"\n\t\"github.com\/dghubble\/go-twitter\/twitter\"\n\t\"github.com\/dghubble\/oauth1\"\n\t\"github.com\/op\/go-logging\"\n\t\"os\"\n)\n\nvar log = logging.MustGetLogger(\"twibot\")\n\n\/\/ Twibot is the main app struct\ntype Twibot struct {\n\tconf *Config\n\tclient *twitter.Client\n}\n\n\/\/ SetupLog will setup the logger and its verbosity\nfunc (t *Twibot) SetupLog(verbose bool) {\n\n\tvar format = logging.MustStringFormatter(\n\t\t`[%{time:15:04:05.000}] %{level:.5s}: %{message}`,\n\t)\n\tbackend1 := logging.NewLogBackend(os.Stderr, \"\", 0)\n\tformatt := logging.NewBackendFormatter(backend1, format)\n\tleveled := logging.AddModuleLevel(formatt)\n\n\tlogging.SetBackend(leveled)\n\tif verbose {\n\t\tlogging.SetLevel(logging.DEBUG, \"\")\n\t} else {\n\t\tlogging.SetLevel(logging.INFO, \"\")\n\t}\n}\n\n\/\/ Run will run twibot\nfunc (t *Twibot) Run(path string) {\n\n\t\/\/ Parse config file\n\tt.conf = new(Config)\n\tlog.Debugf(\"Loading config from %s...\\n\", path)\n\terr := t.conf.FromJSON(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Debugf(\"Lodaded conf, Got %d actions\\n\", len(t.conf.OnMentions)+len(t.conf.OnDM))\n\n\t\/\/ Twitter client\n\tconfig := oauth1.NewConfig(t.conf.ConsumerKey, t.conf.ConsumerSecret)\n\ttoken := oauth1.NewToken(t.conf.Token, t.conf.TokenSecret)\n\thttpClient := config.Client(oauth1.NoContext, token)\n\tt.client = twitter.NewClient(httpClient)\n\tstream, err := t.client.Streams.User(nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Demux handlers\n\tdemux := twitter.NewSwitchDemux()\n\tif len(t.conf.OnMentions) > 0 {\n\t\tdemux.Tweet = t.handleTweet\n\t}\n\tif len(t.conf.OnDM) > 0 {\n\t\tdemux.DM = t.handleDM\n\t}\n\n\tlog.Info(\"start listening...\")\n\tfor message := range stream.Messages {\n\t\tdemux.Handle(message)\n\t}\n}\n\nfunc (t *Twibot) handleMessage(text string, user *twitter.User, actions []Action) {\n\tlog.Debugf(\"received from: %s message: %s\", user.ScreenName, text)\n\n\t\/\/ check authorization\n\tif !t.isAuthorized(user.ScreenName) {\n\t\treturn\n\t}\n\n\tfor _, action := range actions {\n\t\tlog.Debug(\"trying action: %s regex: %s case: %t\", action.Name, action.Match, action.Case)\n\t\tif action.WillMatch(text) {\n\t\t\tlog.Debug(\"OK\")\n\n\t\t\tif action.Bundled {\n\t\t\t\t\/\/ if matched action is BUndled we run the bundled code\n\t\t\t\tlog.Info(\"Executing Bundled Command: %s\", action.Name)\n\t\t\t\taction.BundledCommand(text, user, t)\n\t\t\t} else {\n\t\t\t\t\/\/ We run the script\n\t\t\t\tlog.Infof(\"Executing: %s\", action.Name)\n\t\t\t\tout, err := action.Exec()\n\n\t\t\t\tvar replyMessage string\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"executing %s: %s\", action.Name, err.Error())\n\t\t\t\t\treplyMessage = fmt.Sprintf(\"[%s] FAILED:\\n%s\", action.Name, err.Error())\n\t\t\t\t} else {\n\t\t\t\t\tlog.Debugf(\"executed correctly Task %s\", action.Name)\n\t\t\t\t\treplyMessage = fmt.Sprintf(\"[%s] OK\", action.Name)\n\t\t\t\t\tlog.Debugf(\"output:\\n%s\", out)\n\t\t\t\t\t\/\/ TODO: decide what to do with output\n\t\t\t\t}\n\n\t\t\t\tif action.Reply {\n\t\t\t\t\tlog.Debug(\"sending reply\")\n\n\t\t\t\t\tt.client.DirectMessages.New(&twitter.DirectMessageNewParams{ScreenName: user.ScreenName, Text: replyMessage})\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Debug(\"KO\")\n\t\t}\n\t}\n\tlog.Debugf(\"not matched: %s \", text)\n}\n\nfunc (t *Twibot) isAuthorized(user string) bool {\n\tif len(t.conf.AuthorizedUsers) == 0 {\n\t\treturn true\n\t}\n\tfor _, a := range t.conf.AuthorizedUsers {\n\t\tif a == user {\n\t\t\treturn true\n\t\t}\n\t}\n\tlog.Debugf(\"user: %s is not authorized\", user)\n\treturn false\n}\n\nfunc (t *Twibot) handleDM(dm *twitter.DirectMessage) {\n\tif dm.SenderScreenName == t.conf.BotName {\n\t\t\/\/ ignore DM sent by ourselves\n\t\treturn\n\t}\n\tt.handleMessage(dm.Text, dm.Sender, t.conf.OnDM)\n}\n\nfunc (t *Twibot) handleTweet(tweet *twitter.Tweet) {\n\t\/\/ TODO: to implement Mentions scanning\n\tif tweet.User.ScreenName == t.conf.BotName {\n\t\t\/\/ ignore Tweet sent by ourselves\n\t\treturn\n\t}\n\tt.handleMessage(tweet.Text, tweet.User, t.conf.OnMentions)\n}\n<commit_msg>mentions implemented<commit_after>package bot\n\nimport (\n\t\"fmt\"\n\t\"github.com\/dghubble\/go-twitter\/twitter\"\n\t\"github.com\/dghubble\/oauth1\"\n\t\"github.com\/op\/go-logging\"\n\t\"os\"\n)\n\nvar log = logging.MustGetLogger(\"twibot\")\n\n\/\/ Twibot is the main app struct\ntype Twibot struct {\n\tconf *Config\n\tclient *twitter.Client\n}\n\n\/\/ SetupLog will setup the logger and its verbosity\nfunc (t *Twibot) SetupLog(verbose bool) {\n\n\tvar format = logging.MustStringFormatter(\n\t\t`[%{time:15:04:05.000}] %{level:.5s}: %{message}`,\n\t)\n\tbackend1 := logging.NewLogBackend(os.Stderr, \"\", 0)\n\tformatt := logging.NewBackendFormatter(backend1, format)\n\tleveled := logging.AddModuleLevel(formatt)\n\n\tlogging.SetBackend(leveled)\n\tif verbose {\n\t\tlogging.SetLevel(logging.DEBUG, \"\")\n\t} else {\n\t\tlogging.SetLevel(logging.INFO, \"\")\n\t}\n}\n\n\/\/ Run will run twibot\nfunc (t *Twibot) Run(path string) {\n\n\t\/\/ Parse config file\n\tt.conf = new(Config)\n\tlog.Debugf(\"Loading config from %s...\\n\", path)\n\terr := t.conf.FromJSON(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Debugf(\"Lodaded conf, Got %d actions\\n\", len(t.conf.OnMentions)+len(t.conf.OnDM))\n\n\t\/\/ Twitter client\n\tconfig := oauth1.NewConfig(t.conf.ConsumerKey, t.conf.ConsumerSecret)\n\ttoken := oauth1.NewToken(t.conf.Token, t.conf.TokenSecret)\n\thttpClient := config.Client(oauth1.NoContext, token)\n\tt.client = twitter.NewClient(httpClient)\n\tstream, err := t.client.Streams.User(nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Demux handlers\n\tdemux := twitter.NewSwitchDemux()\n\tif len(t.conf.OnMentions) > 0 {\n\t\tdemux.Tweet = t.handleTweet\n\t}\n\tif len(t.conf.OnDM) > 0 {\n\t\tdemux.DM = t.handleDM\n\t}\n\n\tlog.Info(\"start listening...\")\n\tfor message := range stream.Messages {\n\t\tdemux.Handle(message)\n\t}\n}\n\nfunc (t *Twibot) handleMessage(text string, user *twitter.User, actions []Action) {\n\tlog.Debugf(\"received from: %s message: %s\", user.ScreenName, text)\n\n\t\/\/ check authorization\n\tif !t.isAuthorized(user.ScreenName) {\n\t\treturn\n\t}\n\n\tfor _, action := range actions {\n\t\tlog.Debug(\"trying action: %s regex: %s case: %t\", action.Name, action.Match, action.Case)\n\t\tif action.WillMatch(text) {\n\t\t\tlog.Debug(\"OK\")\n\n\t\t\tif action.Bundled {\n\t\t\t\t\/\/ if matched action is BUndled we run the bundled code\n\t\t\t\tlog.Info(\"Executing Bundled Command: %s\", action.Name)\n\t\t\t\taction.BundledCommand(text, user, t)\n\t\t\t} else {\n\t\t\t\t\/\/ We run the script\n\t\t\t\tlog.Infof(\"Executing: %s\", action.Name)\n\t\t\t\tout, err := action.Exec()\n\n\t\t\t\tvar replyMessage string\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"executing %s: %s\", action.Name, err.Error())\n\t\t\t\t\treplyMessage = fmt.Sprintf(\"[%s] FAILED:\\n%s\", action.Name, err.Error())\n\t\t\t\t} else {\n\t\t\t\t\tlog.Debugf(\"executed correctly Task %s\", action.Name)\n\t\t\t\t\treplyMessage = fmt.Sprintf(\"[%s] OK\", action.Name)\n\t\t\t\t\tlog.Debugf(\"output:\\n%s\", out)\n\t\t\t\t\t\/\/ TODO: decide what to do with output\n\t\t\t\t}\n\n\t\t\t\tif action.Reply {\n\t\t\t\t\tlog.Debug(\"sending reply\")\n\n\t\t\t\t\tt.client.DirectMessages.New(&twitter.DirectMessageNewParams{ScreenName: user.ScreenName, Text: replyMessage})\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Debug(\"KO\")\n\t\t}\n\t}\n\tlog.Debugf(\"not matched: %s \", text)\n}\n\nfunc (t *Twibot) isAuthorized(user string) bool {\n\tif len(t.conf.AuthorizedUsers) == 0 {\n\t\treturn true\n\t}\n\tfor _, a := range t.conf.AuthorizedUsers {\n\t\tif a == user {\n\t\t\treturn true\n\t\t}\n\t}\n\tlog.Debugf(\"user: %s is not authorized\", user)\n\treturn false\n}\n\nfunc (t *Twibot) handleDM(dm *twitter.DirectMessage) {\n\tif dm.SenderScreenName == t.conf.BotName {\n\t\t\/\/ ignore DM sent by ourselves\n\t\treturn\n\t}\n\tt.handleMessage(dm.Text, dm.Sender, t.conf.OnDM)\n}\n\nfunc (t *Twibot) handleTweet(tweet *twitter.Tweet) {\n\tif tweet.User.ScreenName == t.conf.BotName {\n\t\t\/\/ ignore Tweet sent by ourselves\n\t\treturn\n\t}\n\t\/\/ scan for mentions\n\tfor _, mention := range tweet.Entities.UserMentions {\n\t\tif mention.ScreenName == t.conf.BotName {\n\t\t\tt.handleMessage(tweet.Text, tweet.User, t.conf.OnMentions)\n\t\t\tbreak\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\tiniconf \"code.google.com\/p\/goconf\/conf\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/alanjcfs\/bot3server\/module\/cah\"\n\tnsq \"github.com\/bitly\/go-nsq\"\n\t\/\/ \"github.com\/gamelost\/bot3server\/module\/cah\"\n\t\"github.com\/alanjcfs\/bot3server\/module\/mongo\"\n\t\"github.com\/gamelost\/bot3server\/module\/catfacts\"\n\t\"github.com\/gamelost\/bot3server\/module\/dice\"\n\t\"github.com\/gamelost\/bot3server\/module\/fight\"\n\t\"github.com\/gamelost\/bot3server\/module\/help\"\n\t\"github.com\/gamelost\/bot3server\/module\/inconceivable\"\n\t\/\/ \"github.com\/gamelost\/bot3server\/module\/mongo\"\n\n\t\"github.com\/gamelost\/bot3server\/module\/nextwedding\"\n\t\"github.com\/gamelost\/bot3server\/module\/remindme\"\n\t\"github.com\/gamelost\/bot3server\/module\/slap\"\n\twuconditions \"github.com\/gamelost\/bot3server\/module\/weather\/conditions\"\n\twuforecast \"github.com\/gamelost\/bot3server\/module\/weather\/forecast\"\n\t\"github.com\/gamelost\/bot3server\/module\/zed\"\n\t\"github.com\/gamelost\/bot3server\/server\"\n\t\"github.com\/twinj\/uuid\"\n\t\"labix.org\/v2\/mgo\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\nconst DEFAULT_CONFIG_FILENAME = \"bot3server.config\"\nconst CONFIG_CAT_DEFAULT = \"default\"\nconst CONFIG_CAT_PLUGINS = \"plugins\"\n\n\/\/ nsq specific constants\nconst CONFIG_CAT_NSQ = \"nsq\"\nconst CONFIG_BOT3SERVER_INPUT = \"bot3server-input\"\nconst CONFIG_BOT3SERVER_OUTPUT = \"bot3server-output\"\nconst CONFIG_OUTPUT_WRITER_ADDR = \"output-writer-address\"\nconst CONFIG_LOOKUPD_ADDR = \"lookupd-address\"\nconst TOPIC_MAIN = \"main\"\n\nvar conf *iniconf.ConfigFile\n\nfunc main() {\n\n\t\/\/ the quit channel\n\tsigChan := make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)\n\n\tconfig, err := iniconf.ReadConfigFile(DEFAULT_CONFIG_FILENAME)\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to read configuration file. Exiting now.\")\n\t}\n\n\tbot3serverInput, _ := config.GetString(CONFIG_CAT_DEFAULT, CONFIG_BOT3SERVER_INPUT)\n\toutputWriterAddress, _ := config.GetString(CONFIG_CAT_NSQ, CONFIG_OUTPUT_WRITER_ADDR)\n\tlookupdAddress, _ := config.GetString(CONFIG_CAT_NSQ, CONFIG_LOOKUPD_ADDR)\n\n\t\/\/ set up listener instance\n\tincomingFromIRC, err := nsq.NewConsumer(bot3serverInput, TOPIC_MAIN, nsq.NewConfig())\n\tif err != nil {\n\t\tpanic(err)\n\t\tsigChan <- syscall.SIGINT\n\t}\n\n\t\/\/ set up channels\n\toutgoingToNSQChan := make(chan *server.BotResponse)\n\n\toutputWriter, err := nsq.NewProducer(outputWriterAddress, nsq.NewConfig())\n\tif err != nil {\n\t\tpanic(err)\n\t\tsigChan <- syscall.SIGINT\n\t}\n\n\t\/\/ set up heartbeat ticker\n\theartbeatTicker := time.NewTicker(1 * time.Second)\n\n\t\/\/ initialize the handlers\n\tbot3server := &Bot3Server{Config: config, OutgoingChan: outgoingToNSQChan}\n\tbot3server.Initialize()\n\n\tincomingFromIRC.SetHandler(bot3server)\n\tincomingFromIRC.ConnectToNSQLookupd(lookupdAddress)\n\n\tgo bot3server.HandleOutgoing(outgoingToNSQChan, heartbeatTicker.C, outputWriter, bot3server.UniqueID)\n\n\tlog.Printf(\"Done starting up. UUID:[%s]. Waiting on quit signal.\", bot3server.UniqueID.String)\n\t<-sigChan\n}\n\ntype Bot3Server struct {\n\tInitialized bool\n\tConfig *iniconf.ConfigFile\n\tHandlers map[string]server.BotHandler\n\tOutgoingChan chan *server.BotResponse\n\tUniqueID uuid.UUID\n\tMongoSession *mgo.Session\n\tMongoDB *mgo.Database\n}\n\nfunc (bs *Bot3Server) Initialize() {\n\t\/\/ run only once\n\tif !bs.Initialized {\n\n\t\tbs.UniqueID = uuid.NewV1()\n\t\tbs.initServices()\n\t\tbs.Initialized = true\n\t}\n}\n\nfunc (bs *Bot3Server) AddHandler(key string, h server.BotHandler) {\n\tplugins, err := bs.Config.GetString(CONFIG_CAT_PLUGINS, \"enabled\")\n\t\/\/ If plugins string does not exist, assume that all plugins\n\t\/\/ are enabled.\n\tif err == nil {\n\t\tif !strings.Contains(\" \"+plugins+\" \", \" \"+key+\" \") {\n\t\t\treturn\n\t\t}\n\t}\n\tbs.Handlers[key] = h\n}\n\nfunc (bs *Bot3Server) GetHandler(key string) server.BotHandler {\n\treturn bs.Handlers[key]\n}\n\nfunc (bs *Bot3Server) initServices() error {\n\n\tbs.Handlers = make(map[string]server.BotHandler)\n\n\t\/\/ implement all services\n\tbs.AddHandler(\"fight\", (new(fight.FightService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"cah\", (new(cah.CahService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"mongo\", (new(mongo.MongoService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"slap\", (new(slap.SlapService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"inconceivable\", (new(inconceivable.InconceivableService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"help\", (new(help.HelpService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"remindme\", (new(remindme.RemindMeService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"nextwedding\", (new(nextwedding.NextWeddingService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"weather\", (new(wuconditions.WeatherConditionsService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"forecast\", (new(wuforecast.WeatherForecastService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"zed\", (new(zed.ZedsDeadService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"dice\", (new(dice.DiceService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"catfacts\", (new(catfacts.CatFactsService)).NewService(bs.Config, bs.OutgoingChan))\n\treturn nil\n}\n\nfunc (bs *Bot3Server) HandleMessage(message *nsq.Message) error {\n\tvar req = &server.BotRequest{}\n\tjson.Unmarshal(message.Body, req)\n\n\terr := bs.doInsert(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/\tbs.IncomingChan <- message\n\tgo bs.HandleIncoming(req)\n\treturn nil\n}\n\nfunc (bs *Bot3Server) doInsert(req *server.BotRequest) error {\n\tif bs.MongoDB == nil {\n\t\tlog.Printf(\"Setting up connection and inserting\")\n\t\tbs.SetupMongoDBConnection()\n\t} else {\n\t\tlog.Printf(\"Already connected to Mongo, inserting\")\n\t}\n\n\tc := bs.MongoDB.C(\"chatlog\")\n\terr := c.Insert(map[string]string{req.Nick: req.Text()})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (bs *Bot3Server) SetupMongoDBConnection() error {\n\t\/\/ Connect to Mongo.\n\tservers, err := bs.Config.GetString(\"mongo\", \"servers\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbs.MongoSession, err = mgo.Dial(servers)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdb, err := bs.Config.GetString(\"mongo\", \"db\")\n\tif err != nil {\n\t\treturn err\n\t} else {\n\t\tfmt.Println(\"Successfully obtained config from mongo\")\n\t}\n\n\tbs.MongoDB = bs.MongoSession.DB(db)\n\treturn nil\n}\n\nfunc (bs *Bot3Server) HandleOutgoing(outgoingToNSQChan chan *server.BotResponse, heartbeatTicker <-chan time.Time, outputWriter *nsq.Producer, serverID uuid.UUID) {\n\n\tfor {\n\t\tselect {\n\t\tcase msg := <-outgoingToNSQChan:\n\t\t\tval, _ := json.Marshal(msg)\n\t\t\toutputWriter.Publish(\"bot3server-output\", val)\n\t\t\tbreak\n\t\tcase t := <-heartbeatTicker:\n\t\t\thb := &server.Bot3ServerHeartbeat{ServerID: serverID.String(), Timestamp: t}\n\t\t\tval, _ := json.Marshal(hb)\n\t\t\toutputWriter.Publish(\"bot3server-heartbeat\", val)\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (bs *Bot3Server) HandleIncoming(botRequest *server.BotRequest) error {\n\n\t\/\/ since we dont want the entire botapi to crash if a module throws an error or panic\n\t\/\/ we'll trap it here and log\/notify the owner.\n\n\t\/\/ mabye a good idea to automatically-disable the module if it panics too frequently\n\n\t\/\/ throwout malformed requests\n\tif len(botRequest.ChatText) < 1 {\n\t\treturn nil\n\t}\n\n\t\/\/ deferred method to protect against panics\n\tdefer func(rawline string) {\n\t\t\/\/ if module panics, log it and discard\n\t\tif x := recover(); x != nil {\n\t\t\tlog.Printf(\"Trapped panic. Rawline is:[%s]: %v\\n\", rawline, x)\n\t\t}\n\t}(botRequest.Text())\n\n\t\/\/ check if command before processing\n\tif botRequest.RequestIsCommand() {\n\n\t\tcommand := botRequest.Command()\n\n\t\thandler := bs.GetHandler(command)\n\n\t\tif handler != nil {\n\t\t\t\/\/log.Printf(\"Assigning handler for: %s\\n\", command)\n\t\t\t\/\/botResponse := &server.BotResponse{Target: botRequest.Channel, Identifier: botRequest.Identifier}\n\t\t\thandler.DispatchRequest(botRequest)\n\t\t}\n\t}\n\n\t\/\/ return error object (nil for now)\n\treturn nil\n}\n<commit_msg>Better way to store the data so it's easier to query<commit_after>package main\n\nimport (\n\tiniconf \"code.google.com\/p\/goconf\/conf\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/alanjcfs\/bot3server\/module\/cah\"\n\tnsq \"github.com\/bitly\/go-nsq\"\n\t\/\/ \"github.com\/gamelost\/bot3server\/module\/cah\"\n\t\"github.com\/alanjcfs\/bot3server\/module\/mongo\"\n\t\"github.com\/gamelost\/bot3server\/module\/catfacts\"\n\t\"github.com\/gamelost\/bot3server\/module\/dice\"\n\t\"github.com\/gamelost\/bot3server\/module\/fight\"\n\t\"github.com\/gamelost\/bot3server\/module\/help\"\n\t\"github.com\/gamelost\/bot3server\/module\/inconceivable\"\n\t\/\/ \"github.com\/gamelost\/bot3server\/module\/mongo\"\n\n\t\"github.com\/gamelost\/bot3server\/module\/nextwedding\"\n\t\"github.com\/gamelost\/bot3server\/module\/remindme\"\n\t\"github.com\/gamelost\/bot3server\/module\/slap\"\n\twuconditions \"github.com\/gamelost\/bot3server\/module\/weather\/conditions\"\n\twuforecast \"github.com\/gamelost\/bot3server\/module\/weather\/forecast\"\n\t\"github.com\/gamelost\/bot3server\/module\/zed\"\n\t\"github.com\/gamelost\/bot3server\/server\"\n\t\"github.com\/twinj\/uuid\"\n\t\"labix.org\/v2\/mgo\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\nconst DEFAULT_CONFIG_FILENAME = \"bot3server.config\"\nconst CONFIG_CAT_DEFAULT = \"default\"\nconst CONFIG_CAT_PLUGINS = \"plugins\"\n\n\/\/ nsq specific constants\nconst CONFIG_CAT_NSQ = \"nsq\"\nconst CONFIG_BOT3SERVER_INPUT = \"bot3server-input\"\nconst CONFIG_BOT3SERVER_OUTPUT = \"bot3server-output\"\nconst CONFIG_OUTPUT_WRITER_ADDR = \"output-writer-address\"\nconst CONFIG_LOOKUPD_ADDR = \"lookupd-address\"\nconst TOPIC_MAIN = \"main\"\n\nvar conf *iniconf.ConfigFile\n\nfunc main() {\n\n\t\/\/ the quit channel\n\tsigChan := make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)\n\n\tconfig, err := iniconf.ReadConfigFile(DEFAULT_CONFIG_FILENAME)\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to read configuration file. Exiting now.\")\n\t}\n\n\tbot3serverInput, _ := config.GetString(CONFIG_CAT_DEFAULT, CONFIG_BOT3SERVER_INPUT)\n\toutputWriterAddress, _ := config.GetString(CONFIG_CAT_NSQ, CONFIG_OUTPUT_WRITER_ADDR)\n\tlookupdAddress, _ := config.GetString(CONFIG_CAT_NSQ, CONFIG_LOOKUPD_ADDR)\n\n\t\/\/ set up listener instance\n\tincomingFromIRC, err := nsq.NewConsumer(bot3serverInput, TOPIC_MAIN, nsq.NewConfig())\n\tif err != nil {\n\t\tpanic(err)\n\t\tsigChan <- syscall.SIGINT\n\t}\n\n\t\/\/ set up channels\n\toutgoingToNSQChan := make(chan *server.BotResponse)\n\n\toutputWriter, err := nsq.NewProducer(outputWriterAddress, nsq.NewConfig())\n\tif err != nil {\n\t\tpanic(err)\n\t\tsigChan <- syscall.SIGINT\n\t}\n\n\t\/\/ set up heartbeat ticker\n\theartbeatTicker := time.NewTicker(1 * time.Second)\n\n\t\/\/ initialize the handlers\n\tbot3server := &Bot3Server{Config: config, OutgoingChan: outgoingToNSQChan}\n\tbot3server.Initialize()\n\n\tincomingFromIRC.SetHandler(bot3server)\n\tincomingFromIRC.ConnectToNSQLookupd(lookupdAddress)\n\n\tgo bot3server.HandleOutgoing(outgoingToNSQChan, heartbeatTicker.C, outputWriter, bot3server.UniqueID)\n\n\tlog.Printf(\"Done starting up. UUID:[%s]. Waiting on quit signal.\", bot3server.UniqueID.String)\n\t<-sigChan\n}\n\ntype Bot3Server struct {\n\tInitialized bool\n\tConfig *iniconf.ConfigFile\n\tHandlers map[string]server.BotHandler\n\tOutgoingChan chan *server.BotResponse\n\tUniqueID uuid.UUID\n\tMongoSession *mgo.Session\n\tMongoDB *mgo.Database\n}\n\nfunc (bs *Bot3Server) Initialize() {\n\t\/\/ run only once\n\tif !bs.Initialized {\n\n\t\tbs.UniqueID = uuid.NewV1()\n\t\tbs.initServices()\n\t\tbs.Initialized = true\n\t}\n}\n\nfunc (bs *Bot3Server) AddHandler(key string, h server.BotHandler) {\n\tplugins, err := bs.Config.GetString(CONFIG_CAT_PLUGINS, \"enabled\")\n\t\/\/ If plugins string does not exist, assume that all plugins\n\t\/\/ are enabled.\n\tif err == nil {\n\t\tif !strings.Contains(\" \"+plugins+\" \", \" \"+key+\" \") {\n\t\t\treturn\n\t\t}\n\t}\n\tbs.Handlers[key] = h\n}\n\nfunc (bs *Bot3Server) GetHandler(key string) server.BotHandler {\n\treturn bs.Handlers[key]\n}\n\nfunc (bs *Bot3Server) initServices() error {\n\n\tbs.Handlers = make(map[string]server.BotHandler)\n\n\t\/\/ implement all services\n\tbs.AddHandler(\"fight\", (new(fight.FightService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"cah\", (new(cah.CahService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"mongo\", (new(mongo.MongoService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"slap\", (new(slap.SlapService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"inconceivable\", (new(inconceivable.InconceivableService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"help\", (new(help.HelpService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"remindme\", (new(remindme.RemindMeService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"nextwedding\", (new(nextwedding.NextWeddingService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"weather\", (new(wuconditions.WeatherConditionsService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"forecast\", (new(wuforecast.WeatherForecastService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"zed\", (new(zed.ZedsDeadService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"dice\", (new(dice.DiceService)).NewService(bs.Config, bs.OutgoingChan))\n\tbs.AddHandler(\"catfacts\", (new(catfacts.CatFactsService)).NewService(bs.Config, bs.OutgoingChan))\n\treturn nil\n}\n\nfunc (bs *Bot3Server) HandleMessage(message *nsq.Message) error {\n\tvar req = &server.BotRequest{}\n\tjson.Unmarshal(message.Body, req)\n\n\terr := bs.doInsert(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/\tbs.IncomingChan <- message\n\tgo bs.HandleIncoming(req)\n\treturn nil\n}\n\nfunc (bs *Bot3Server) doInsert(req *server.BotRequest) error {\n\tif bs.MongoDB == nil {\n\t\tlog.Printf(\"Setting up connection and inserting\")\n\t\tbs.SetupMongoDBConnection()\n\t} else {\n\t\tlog.Printf(\"Already connected to Mongo, inserting\")\n\t}\n\n\tc := bs.MongoDB.C(\"chatlog\")\n\terr := c.Insert(map[string]string{\"nick\": req.Nick, \"text\": req.Text()})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (bs *Bot3Server) SetupMongoDBConnection() error {\n\t\/\/ Connect to Mongo.\n\tservers, err := bs.Config.GetString(\"mongo\", \"servers\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbs.MongoSession, err = mgo.Dial(servers)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdb, err := bs.Config.GetString(\"mongo\", \"db\")\n\tif err != nil {\n\t\treturn err\n\t} else {\n\t\tfmt.Println(\"Successfully obtained config from mongo\")\n\t}\n\n\tbs.MongoDB = bs.MongoSession.DB(db)\n\treturn nil\n}\n\nfunc (bs *Bot3Server) HandleOutgoing(outgoingToNSQChan chan *server.BotResponse, heartbeatTicker <-chan time.Time, outputWriter *nsq.Producer, serverID uuid.UUID) {\n\n\tfor {\n\t\tselect {\n\t\tcase msg := <-outgoingToNSQChan:\n\t\t\tval, _ := json.Marshal(msg)\n\t\t\toutputWriter.Publish(\"bot3server-output\", val)\n\t\t\tbreak\n\t\tcase t := <-heartbeatTicker:\n\t\t\thb := &server.Bot3ServerHeartbeat{ServerID: serverID.String(), Timestamp: t}\n\t\t\tval, _ := json.Marshal(hb)\n\t\t\toutputWriter.Publish(\"bot3server-heartbeat\", val)\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (bs *Bot3Server) HandleIncoming(botRequest *server.BotRequest) error {\n\n\t\/\/ since we dont want the entire botapi to crash if a module throws an error or panic\n\t\/\/ we'll trap it here and log\/notify the owner.\n\n\t\/\/ mabye a good idea to automatically-disable the module if it panics too frequently\n\n\t\/\/ throwout malformed requests\n\tif len(botRequest.ChatText) < 1 {\n\t\treturn nil\n\t}\n\n\t\/\/ deferred method to protect against panics\n\tdefer func(rawline string) {\n\t\t\/\/ if module panics, log it and discard\n\t\tif x := recover(); x != nil {\n\t\t\tlog.Printf(\"Trapped panic. Rawline is:[%s]: %v\\n\", rawline, x)\n\t\t}\n\t}(botRequest.Text())\n\n\t\/\/ check if command before processing\n\tif botRequest.RequestIsCommand() {\n\n\t\tcommand := botRequest.Command()\n\n\t\thandler := bs.GetHandler(command)\n\n\t\tif handler != nil {\n\t\t\t\/\/log.Printf(\"Assigning handler for: %s\\n\", command)\n\t\t\t\/\/botResponse := &server.BotResponse{Target: botRequest.Channel, Identifier: botRequest.Identifier}\n\t\t\thandler.DispatchRequest(botRequest)\n\t\t}\n\t}\n\n\t\/\/ return error object (nil for now)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\n\tPackage unsafe contains operations that step around the type safety of Go programs.\n\n\tPackages that import unsafe may be non-portable and are not protected by the\n\tGo 1 compatibility guidelines.\n*\/\npackage unsafe\n\n\/\/ ArbitraryType is here for the purposes of documentation only and is not actually\n\/\/ part of the unsafe package. It represents the type of an arbitrary Go expression.\ntype ArbitraryType int\n\n\/\/ Pointer represents a pointer to an arbitrary type. There are four special operations\n\/\/ available for type Pointer that are not available for other types.\n\/\/\t1) A pointer value of any type can be converted to a Pointer.\n\/\/\t2) A Pointer can be converted to a pointer value of any type.\n\/\/\t3) A uintptr can be converted to a Pointer.\n\/\/\t4) A Pointer can be converted to a uintptr.\n\/\/ Pointer therefore allows a program to defeat the type system and read and write\n\/\/ arbitrary memory. It should be used with extreme care.\ntype Pointer *ArbitraryType\n\n\/\/ Sizeof returns the size in bytes occupied by the value v. The size is that of the\n\/\/ \"top level\" of the value only. For instance, if v is a slice, it returns the size of\n\/\/ the slice descriptor, not the size of the memory referenced by the slice.\nfunc Sizeof(v ArbitraryType) uintptr\n\n\/\/ Offsetof returns the offset within the struct of the field represented by v,\n\/\/ which must be of the form structValue.field. In other words, it returns the\n\/\/ number of bytes between the start of the struct and the start of the field.\nfunc Offsetof(v ArbitraryType) uintptr\n\n\/\/ Alignof returns the alignment of the value v. It is the maximum value m such\n\/\/ that the address of a variable with the type of v will always be zero mod m.\n\/\/ If v is of the form structValue.field, it returns the alignment of field f within struct object obj.\nfunc Alignof(v ArbitraryType) uintptr\n<commit_msg>unsafe: fix doc strings<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\n\tPackage unsafe contains operations that step around the type safety of Go programs.\n\n\tPackages that import unsafe may be non-portable and are not protected by the\n\tGo 1 compatibility guidelines.\n*\/\npackage unsafe\n\n\/\/ ArbitraryType is here for the purposes of documentation only and is not actually\n\/\/ part of the unsafe package. It represents the type of an arbitrary Go expression.\ntype ArbitraryType int\n\n\/\/ Pointer represents a pointer to an arbitrary type. There are four special operations\n\/\/ available for type Pointer that are not available for other types.\n\/\/\t1) A pointer value of any type can be converted to a Pointer.\n\/\/\t2) A Pointer can be converted to a pointer value of any type.\n\/\/\t3) A uintptr can be converted to a Pointer.\n\/\/\t4) A Pointer can be converted to a uintptr.\n\/\/ Pointer therefore allows a program to defeat the type system and read and write\n\/\/ arbitrary memory. It should be used with extreme care.\ntype Pointer *ArbitraryType\n\n\/\/ Sizeof takes an expression x of any type and returns the size of\n\/\/ a hypothetical variable v as if v was declared via var v = x.\n\/\/ Note that the size does not include any memory possibly referenced\n\/\/ by x. For instance, if x is a slice, Sizeof returns the size of the\n\/\/ slice descriptor, not the size of the memory referenced by the slice.\nfunc Sizeof(x ArbitraryType) uintptr\n\n\/\/ Offsetof returns the offset within the struct of the field represented by x,\n\/\/ which must be of the form structValue.field. In other words, it returns the\n\/\/ number of bytes between the start of the struct and the start of the field.\nfunc Offsetof(x ArbitraryType) uintptr\n\n\/\/ Alignof takes an expression x of any type and returns the alignment\n\/\/ of a hypothetical variable v as if v was declared via var v = x.\n\/\/ It is the largest value m such that the address of v is zero mod m.\nfunc Alignof(x ArbitraryType) uintptr\n<|endoftext|>"} {"text":"<commit_before>package operatortests\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/nats-io\/nats-operator\/pkg\/spec\"\n\tk8sv1 \"k8s.io\/api\/core\/v1\"\n\tk8smetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\tk8swaitutil \"k8s.io\/apimachinery\/pkg\/util\/wait\"\n)\n\nfunc TestConfigMapReload_Servers(t *testing.T) {\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)\n\tdefer cancel()\n\trunController(ctx, t)\n\n\tcl, err := newKubeClients()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Wait for the CRD to be registered in the background.\n\ttime.Sleep(10 * time.Second)\n\tname := \"test-nats-cluster-reload-1\"\n\tnamespace := \"default\"\n\n\t\/\/ Start with a single node, then wait for the reload event\n\t\/\/ due to increasing size of the cluster.\n\tvar size = 1\n\tcluster := &spec.NatsCluster{\n\t\tTypeMeta: k8smetav1.TypeMeta{\n\t\t\tKind: spec.CRDResourceKind,\n\t\t\tAPIVersion: spec.SchemeGroupVersion.String(),\n\t\t},\n\t\tObjectMeta: k8smetav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tSpec: spec.ClusterSpec{\n\t\t\tSize: size,\n\t\t\tVersion: \"1.1.0\",\n\t\t\tPod: &spec.PodPolicy{\n\t\t\t\tEnableConfigReload: true,\n\t\t\t},\n\t\t},\n\t}\n\t_, err = cl.ncli.Create(ctx, cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Wait for the pods to be created\n\tparams := k8smetav1.ListOptions{\n\t\tLabelSelector: fmt.Sprintf(\"app=nats,nats_cluster=%s\", name),\n\t}\n\tvar podList *k8sv1.PodList\n\terr = k8swaitutil.Poll(3*time.Second, 1*time.Minute, func() (bool, error) {\n\t\tpodList, err = cl.kc.Pods(namespace).List(params)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(podList.Items) < size {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"Error waiting for pods to be created: %s\", err)\n\t}\n\n\t\/\/ Now scale up and resize the cluster which will trigger a reload,\n\t\/\/ for that need to get first and to be able to make the update.\n\tsize = 3\n\tcluster, err = cl.ncli.Get(ctx, namespace, name)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcluster.Spec.Size = size\n\n\t_, err = cl.ncli.Update(ctx, cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Wait for the pods to be created\n\tparams = k8smetav1.ListOptions{\n\t\tLabelSelector: fmt.Sprintf(\"app=nats,nats_cluster=%s\", name),\n\t}\n\terr = k8swaitutil.Poll(3*time.Second, 3*time.Minute, func() (bool, error) {\n\t\tpodList, err = cl.kc.Pods(namespace).List(params)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(podList.Items) < size {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tsinceTime := k8smetav1.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))\n\t\topts := &k8sv1.PodLogOptions{\n\t\t\tSinceTime: &sinceTime,\n\t\t\tContainer: \"nats\",\n\t\t}\n\t\trc, err := cl.kc.Pods(namespace).GetLogs(fmt.Sprintf(\"%s-1\", name), opts).Stream()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Logs request has failed: %v\", err)\n\t\t}\n\t\tbuf := new(bytes.Buffer)\n\t\tbuf.ReadFrom(rc)\n\n\t\toutput := buf.String()\n\n\t\tif !strings.Contains(output, \"Reloaded server configuration\") {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"Error waiting for pods to be reloaded: %s\", err)\n\t}\n\ttime.Sleep(1 * time.Minute)\n}\n\nfunc TestConfigMapReload_Auth(t *testing.T) {\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)\n\tdefer cancel()\n\trunController(ctx, t)\n\n\tcl, err := newKubeClients()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Wait for the CRD to be registered in the background.\n\ttime.Sleep(10 * time.Second)\n\tname := \"test-nats-cluster-reload-auth-1\"\n\tnamespace := \"default\"\n\n\t\/\/ Create the secret with the auth credentials\n\tsec := `{\n \"users\": [\n { \"username\": \"user1\", \"password\": \"secret1\",\n \"permissions\": {\n\t\"publish\": [\"hello.*\"],\n\t\"subscribe\": [\"hello.world\"]\n }\n }\n ],\n \"default_permissions\": {\n \"publish\": [\"SANDBOX.*\"],\n \"subscribe\": [\"PUBLIC.>\"]\n }\n}\n`\n\tcm := &k8sv1.Secret{\n\t\tObjectMeta: k8smetav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\t\"anything\": []byte(sec),\n\t\t},\n\t}\n\t_, err = cl.kc.Secrets(namespace).Create(cm)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Start with a single node, then wait for the reload event\n\t\/\/ due to increasing size of the cluster.\n\tvar size = 1\n\tcluster := &spec.NatsCluster{\n\t\tTypeMeta: k8smetav1.TypeMeta{\n\t\t\tKind: spec.CRDResourceKind,\n\t\t\tAPIVersion: spec.SchemeGroupVersion.String(),\n\t\t},\n\t\tObjectMeta: k8smetav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tSpec: spec.ClusterSpec{\n\t\t\tSize: size,\n\t\t\tVersion: \"1.1.0\",\n\t\t\tPod: &spec.PodPolicy{\n\t\t\t\tEnableConfigReload: true,\n\t\t\t},\n\t\t\tAuth: &spec.AuthConfig{\n\t\t\t\tClientsAuthSecret: name,\n\t\t\t\tClientsAuthTimeout: 10,\n\t\t\t},\n\t\t},\n\t}\n\t_, err = cl.ncli.Create(ctx, cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Wait for the pods to be created\n\tparams := k8smetav1.ListOptions{\n\t\tLabelSelector: fmt.Sprintf(\"app=nats,nats_cluster=%s\", name),\n\t}\n\tvar podList *k8sv1.PodList\n\terr = k8swaitutil.Poll(3*time.Second, 1*time.Minute, func() (bool, error) {\n\t\tpodList, err = cl.kc.Pods(namespace).List(params)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(podList.Items) < size {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"Error waiting for pods to be created: %s\", err)\n\t}\n\n\topsPodName := fmt.Sprintf(\"%s-ops-1\", name)\n\tpod := &k8sv1.Pod{\n\t\tObjectMeta: k8smetav1.ObjectMeta{\n\t\t\tName: opsPodName,\n\t\t\tLabels: map[string]string{\"name\": \"nats-ops\"},\n\t\t},\n\t\tSpec: k8sv1.PodSpec{\n\t\t\tContainers: []k8sv1.Container{\n\t\t\t\t{\n\t\t\t\t\tName: opsPodName,\n\t\t\t\t\tImage: \"wallyqs\/nats-ops:tools\",\n\t\t\t\t\tImagePullPolicy: k8sv1.PullIfNotPresent,\n\t\t\t\t\tCommand: []string{\n\t\t\t\t\t\t\"\/nats-sub\",\n\t\t\t\t\t\t\"-s\",\n\t\t\t\t\t\tfmt.Sprintf(\"nats:\/\/user1:secret1@%s:4222\", name),\n\t\t\t\t\t\t\"hello.world\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\t_, err = cl.kc.Pods(namespace).Create(pod)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Should poll until pod is available\n\terr = k8swaitutil.Poll(3*time.Second, 1*time.Minute, func() (bool, error) {\n\t\tpod2, err := cl.kc.Pods(namespace).Get(opsPodName, k8smetav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif pod2.Status.Phase != k8sv1.PodRunning {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"Error waiting for pods to be created: %s\", err)\n\t}\n\n\t\/\/ Confirm that the pod subscribed successfully, then do a reload\n\t\/\/ removing its user.\n\terr = k8swaitutil.Poll(3*time.Second, 3*time.Minute, func() (bool, error) {\n\t\tsinceTime := k8smetav1.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))\n\t\topts := &k8sv1.PodLogOptions{\n\t\t\tSinceTime: &sinceTime,\n\t\t}\n\t\trc, err := cl.kc.Pods(namespace).GetLogs(opsPodName, opts).Stream()\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tbuf := new(bytes.Buffer)\n\t\tbuf.ReadFrom(rc)\n\n\t\toutput := buf.String()\n\t\tfmt.Println(output)\n\t\tif !strings.Contains(output, \"Listening on [hello.world]\") {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"Error waiting for pod state: %s\", err)\n\t}\n\n\t\/\/ Remove the user and then current connection will be closed.\n\tsec = `{\n \"users\": [\n { \"username\": \"user2\", \"password\": \"secret2\" }\n ],\n \"default_permissions\": {\n \"publish\": [\"SANDBOX.*\"],\n \"subscribe\": [\"PUBLIC.>\"]\n }\n}\n`\n\tresult, err := cl.kc.Secrets(namespace).Get(name, k8smetav1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tresult.Data[\"anything\"] = []byte(sec)\n\t_, err = cl.kc.Secrets(namespace).Update(result)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Wait for the pods to be updated with new auth creds.\n\tparams = k8smetav1.ListOptions{\n\t\tLabelSelector: fmt.Sprintf(\"app=nats,nats_cluster=%s\", name),\n\t}\n\terr = k8swaitutil.Poll(3*time.Second, 3*time.Minute, func() (bool, error) {\n\t\tpodList, err = cl.kc.Pods(namespace).List(params)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(podList.Items) < size {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tsinceTime := k8smetav1.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))\n\t\topts := &k8sv1.PodLogOptions{\n\t\t\tSinceTime: &sinceTime,\n\t\t\tContainer: \"nats\",\n\t\t}\n\t\trc, err := cl.kc.Pods(namespace).GetLogs(fmt.Sprintf(\"%s-1\", name), opts).Stream()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Logs request has failed: %v\", err)\n\t\t}\n\t\tbuf := new(bytes.Buffer)\n\t\tbuf.ReadFrom(rc)\n\n\t\toutput := buf.String()\n\t\tfmt.Println(output)\n\t\tif !strings.Contains(output, \"Authorization Error\") {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"Error waiting for pods to be reloaded: %s\", err)\n\t}\n}\n<commit_msg>More test fixes<commit_after>package operatortests\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/nats-io\/nats-operator\/pkg\/spec\"\n\tk8sv1 \"k8s.io\/api\/core\/v1\"\n\tk8smetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\tk8swaitutil \"k8s.io\/apimachinery\/pkg\/util\/wait\"\n)\n\nfunc TestConfigMapReload_Servers(t *testing.T) {\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)\n\tdefer cancel()\n\trunController(ctx, t)\n\n\tcl, err := newKubeClients()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Wait for the CRD to be registered in the background.\n\ttime.Sleep(10 * time.Second)\n\tname := \"test-nats-cluster-reload-1\"\n\tnamespace := \"default\"\n\n\t\/\/ Start with a single node, then wait for the reload event\n\t\/\/ due to increasing size of the cluster.\n\tvar size = 1\n\tcluster := &spec.NatsCluster{\n\t\tTypeMeta: k8smetav1.TypeMeta{\n\t\t\tKind: spec.CRDResourceKind,\n\t\t\tAPIVersion: spec.SchemeGroupVersion.String(),\n\t\t},\n\t\tObjectMeta: k8smetav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tSpec: spec.ClusterSpec{\n\t\t\tSize: size,\n\t\t\tVersion: \"1.1.0\",\n\t\t\tPod: &spec.PodPolicy{\n\t\t\t\tEnableConfigReload: true,\n\t\t\t},\n\t\t},\n\t}\n\t_, err = cl.ncli.Create(ctx, cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Wait for the pods to be created\n\tparams := k8smetav1.ListOptions{\n\t\tLabelSelector: fmt.Sprintf(\"app=nats,nats_cluster=%s\", name),\n\t}\n\tvar podList *k8sv1.PodList\n\terr = k8swaitutil.Poll(3*time.Second, 1*time.Minute, func() (bool, error) {\n\t\tpodList, err = cl.kc.Pods(namespace).List(params)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(podList.Items) < size {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"Error waiting for pods to be created: %s\", err)\n\t}\n\n\t\/\/ Now scale up and resize the cluster which will trigger a reload,\n\t\/\/ for that need to get first and to be able to make the update.\n\tsize = 3\n\tcluster, err = cl.ncli.Get(ctx, namespace, name)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcluster.Spec.Size = size\n\n\t_, err = cl.ncli.Update(ctx, cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Wait for the pods to be created\n\tparams = k8smetav1.ListOptions{\n\t\tLabelSelector: fmt.Sprintf(\"app=nats,nats_cluster=%s\", name),\n\t}\n\terr = k8swaitutil.Poll(3*time.Second, 3*time.Minute, func() (bool, error) {\n\t\tpodList, err = cl.kc.Pods(namespace).List(params)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(podList.Items) < size {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tsinceTime := k8smetav1.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))\n\t\topts := &k8sv1.PodLogOptions{\n\t\t\tSinceTime: &sinceTime,\n\t\t\tContainer: \"nats\",\n\t\t}\n\t\trc, err := cl.kc.Pods(namespace).GetLogs(fmt.Sprintf(\"%s-1\", name), opts).Stream()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Logs request has failed: %v\", err)\n\t\t}\n\t\tbuf := new(bytes.Buffer)\n\t\tbuf.ReadFrom(rc)\n\n\t\toutput := buf.String()\n\n\t\tif !strings.Contains(output, \"Reloaded server configuration\") {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"Error waiting for pods to be reloaded: %s\", err)\n\t}\n\ttime.Sleep(1 * time.Minute)\n}\n\nfunc TestConfigMapReload_Auth(t *testing.T) {\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)\n\tdefer cancel()\n\trunController(ctx, t)\n\n\tcl, err := newKubeClients()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Wait for the CRD to be registered in the background.\n\ttime.Sleep(10 * time.Second)\n\tname := \"test-nats-cluster-reload-auth-1\"\n\tnamespace := \"default\"\n\n\t\/\/ Create the secret with the auth credentials\n\tsec := `{\n \"users\": [\n { \"username\": \"user1\", \"password\": \"secret1\",\n \"permissions\": {\n\t\"publish\": [\"hello.*\"],\n\t\"subscribe\": [\"hello.world\"]\n }\n }\n ],\n \"default_permissions\": {\n \"publish\": [\"SANDBOX.*\"],\n \"subscribe\": [\"PUBLIC.>\"]\n }\n}\n`\n\tcm := &k8sv1.Secret{\n\t\tObjectMeta: k8smetav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\t\"anything\": []byte(sec),\n\t\t},\n\t}\n\t_, err = cl.kc.Secrets(namespace).Create(cm)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Start with a single node, then wait for the reload event\n\t\/\/ due to increasing size of the cluster.\n\tvar size = 1\n\tcluster := &spec.NatsCluster{\n\t\tTypeMeta: k8smetav1.TypeMeta{\n\t\t\tKind: spec.CRDResourceKind,\n\t\t\tAPIVersion: spec.SchemeGroupVersion.String(),\n\t\t},\n\t\tObjectMeta: k8smetav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tSpec: spec.ClusterSpec{\n\t\t\tSize: size,\n\t\t\tVersion: \"1.1.0\",\n\t\t\tPod: &spec.PodPolicy{\n\t\t\t\tEnableConfigReload: true,\n\t\t\t},\n\t\t\tAuth: &spec.AuthConfig{\n\t\t\t\tClientsAuthSecret: name,\n\t\t\t\tClientsAuthTimeout: 10,\n\t\t\t},\n\t\t},\n\t}\n\t_, err = cl.ncli.Create(ctx, cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Wait for the pods to be created\n\tparams := k8smetav1.ListOptions{\n\t\tLabelSelector: fmt.Sprintf(\"app=nats,nats_cluster=%s\", name),\n\t}\n\tvar podList *k8sv1.PodList\n\terr = k8swaitutil.Poll(3*time.Second, 1*time.Minute, func() (bool, error) {\n\t\tpodList, err = cl.kc.Pods(namespace).List(params)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(podList.Items) < size {\n\t\t\treturn false, nil\n\t\t}\n\t\tpod := podList.Items[0]\n\t\tif pod.Status.Phase != k8sv1.PodRunning {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"Error waiting for pods to be created: %s\", err)\n\t}\n\n\topsPodName := fmt.Sprintf(\"%s-ops-1\", name)\n\tpod := &k8sv1.Pod{\n\t\tObjectMeta: k8smetav1.ObjectMeta{\n\t\t\tName: opsPodName,\n\t\t\tLabels: map[string]string{\"name\": \"nats-ops\"},\n\t\t},\n\t\tSpec: k8sv1.PodSpec{\n\t\t\tContainers: []k8sv1.Container{\n\t\t\t\t{\n\t\t\t\t\tName: opsPodName,\n\t\t\t\t\tImage: \"wallyqs\/nats-ops:tools\",\n\t\t\t\t\tImagePullPolicy: k8sv1.PullIfNotPresent,\n\t\t\t\t\tCommand: []string{\n\t\t\t\t\t\t\"\/nats-sub\",\n\t\t\t\t\t\t\"-s\",\n\t\t\t\t\t\tfmt.Sprintf(\"nats:\/\/user1:secret1@%s.default.svc.cluster.local:4222\", name),\n\t\t\t\t\t\t\"hello.world\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\t_, err = cl.kc.Pods(namespace).Create(pod)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Should poll until pod is available\n\terr = k8swaitutil.Poll(3*time.Second, 1*time.Minute, func() (bool, error) {\n\t\tpod2, err := cl.kc.Pods(namespace).Get(opsPodName, k8smetav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif pod2.Status.Phase != k8sv1.PodRunning {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"Error waiting for pods to be created: %s\", err)\n\t}\n\n\t\/\/ Confirm that the pod subscribed successfully, then do a reload\n\t\/\/ removing its user.\n\tk8swaitutil.Poll(3*time.Second, 30*time.Second, func() (bool, error) {\n\t\tsinceTime := k8smetav1.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))\n\t\topts := &k8sv1.PodLogOptions{\n\t\t\tSinceTime: &sinceTime,\n\t\t}\n\t\trc, err := cl.kc.Pods(namespace).GetLogs(opsPodName, opts).Stream()\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tbuf := new(bytes.Buffer)\n\t\tbuf.ReadFrom(rc)\n\n\t\toutput := buf.String()\n\t\tt.Logf(\"OUTPUT: %s\", output)\n\t\tif !strings.Contains(output, \"Listening on [hello.world]\") {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\n\t\/\/ Remove the user and then current connection will be closed.\n\tsec = `{\n \"users\": [\n { \"username\": \"user2\", \"password\": \"secret2\" }\n ],\n \"default_permissions\": {\n \"publish\": [\"SANDBOX.*\"],\n \"subscribe\": [\"PUBLIC.>\"]\n }\n}\n`\n\tresult, err := cl.kc.Secrets(namespace).Get(name, k8smetav1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tresult.Data[\"anything\"] = []byte(sec)\n\t_, err = cl.kc.Secrets(namespace).Update(result)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Wait for the pods to be updated with new auth creds.\n\tparams = k8smetav1.ListOptions{\n\t\tLabelSelector: fmt.Sprintf(\"app=nats,nats_cluster=%s\", name),\n\t}\n\terr = k8swaitutil.Poll(3*time.Second, 3*time.Minute, func() (bool, error) {\n\t\tpodList, err = cl.kc.Pods(namespace).List(params)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(podList.Items) < size {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tsinceTime := k8smetav1.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))\n\t\topts := &k8sv1.PodLogOptions{\n\t\t\tSinceTime: &sinceTime,\n\t\t\tContainer: \"nats\",\n\t\t}\n\t\trc, err := cl.kc.Pods(namespace).GetLogs(fmt.Sprintf(\"%s-1\", name), opts).Stream()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Logs request has failed: %v\", err)\n\t\t}\n\t\tbuf := new(bytes.Buffer)\n\t\tbuf.ReadFrom(rc)\n\n\t\toutput := buf.String()\n\n\t\t\/\/ On reload now pod would get:\n\t\t\/\/\n\t\t\/\/ Authorization Error - User \"user1\"\n\t\t\/\/\n\t\tif !strings.Contains(output, \"Reloaded: authorization users\") {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"Error waiting for pods to be reloaded: %s\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ rundir -P -l=4 -ldflags -strictdups=2\n\n\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build !nacl,!js\n\npackage ignored\n<commit_msg>test: fix fixedbugs\/issue30908.go to work with no-opt builder<commit_after>\/\/ rundir -P -ldflags -strictdups=2 -w=0\n\n\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build !nacl,!js\n\npackage ignored\n<|endoftext|>"} {"text":"<commit_before>package worker\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/ovh\/cds\/engine\/api\/action\"\n\t\"github.com\/ovh\/cds\/engine\/api\/context\"\n\t\"github.com\/ovh\/cds\/engine\/api\/database\"\n\t\"github.com\/ovh\/cds\/engine\/log\"\n\t\"github.com\/ovh\/cds\/sdk\"\n)\n\nvar (\n\tmodelCapabilities map[int64][]sdk.Requirement\n\tmodelCapaMutex sync.RWMutex\n\tactionRequirements map[int64][]sdk.Requirement\n\tactionRequirementsMutex sync.RWMutex\n)\n\nfunc logTime(name string, then time.Time) {\n\n\td := time.Since(then)\n\tif d > 2000*time.Millisecond {\n\t\tlog.Warning(\"%s took %s to execute\\n\", name, d)\n\t}\n}\n\nfunc loadWorkerModelStatus(db *sql.DB, c *context.Context) ([]sdk.ModelStatus, error) {\n\tdefer logTime(\"loadWorkerModelStatus\", time.Now())\n\n\tswitch c.Agent {\n\tcase sdk.HatcheryAgent:\n\t\treturn loadWorkerModelStatusForGroup(db, c.User.Groups[0].ID)\n\tdefault:\n\t\treturn loadWorkerModelStatusForUser(db, c.User.ID)\n\t}\n}\n\nfunc loadWorkerModelStatusForGroup(db *sql.DB, groupID int64) ([]sdk.ModelStatus, error) {\n\tquery := `\nSELECT worker_model.id, worker_model.name, COALESCE(waiting.count, 0) as waiting, COALESCE(building.count,0) as building FROM worker_model\n\tLEFT JOIN LATERAL (SELECT model, COUNT(worker.id) as count FROM worker\n\t\tWHERE worker.group_id = $1 AND worker.status = 'Waiting'\n\t\tAND worker.model = worker_model.id\n\t\tGROUP BY model) AS waiting ON waiting.model = worker_model.id\n\tLEFT JOIN LATERAL (SELECT model, COUNT(worker.id) as count FROM worker\n\t\tWHERE worker.group_id = $1 AND worker.status = 'Building'\n\t\tAND worker.model = worker_model.id\n\t\tGROUP BY model) AS building ON building.model = worker_model.id\nORDER BY worker_model.name ASC;\n`\n\trows, err := db.Query(query, groupID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tvar status []sdk.ModelStatus\n\tfor rows.Next() {\n\t\tvar ms sdk.ModelStatus\n\t\terr := rows.Scan(&ms.ModelID, &ms.ModelName, &ms.CurrentCount, &ms.BuildingCount)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstatus = append(status, ms)\n\t}\n\n\treturn status, nil\n\n}\n\n\/\/ loadWorkerModelStatus loads from database the number of worker deployed for each model\n\/\/ start with group permissions calling user has access to.\nfunc loadWorkerModelStatusForUser(db *sql.DB, userID int64) ([]sdk.ModelStatus, error) {\n\n\tquery := `\nSELECT worker_model.id, worker_model.name, COALESCE(waiting.count, 0) as waiting, COALESCE(building.count,0) as building FROM worker_model\n\tLEFT JOIN LATERAL (SELECT model, COUNT(worker.id) as count FROM worker\n\t\tJOIN \"group\" ON \"group\".id = worker.group_id\n\t\tJOIN group_user ON \"group\".id = group_user.group_id\n\t\tWHERE group_user.user_id = $1 AND worker.status = 'Waiting'\n\t\tAND worker.model = worker_model.id\n\t\tGROUP BY model) AS waiting ON waiting.model = worker_model.id\n\tLEFT JOIN LATERAL (SELECT model, COUNT(worker.id) as count FROM worker\n\t\tJOIN \"group\" ON \"group\".id = worker.group_id\n\t\tJOIN group_user ON \"group\".id = group_user.group_id\n\t\tWHERE group_user.user_id = $1 AND worker.status = 'Building'\n\t\tAND worker.model = worker_model.id\n\t\tGROUP BY model) AS building ON building.model = worker_model.id\nORDER BY worker_model.name ASC;\n`\n\n\trows, err := db.Query(query, userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tvar status []sdk.ModelStatus\n\tfor rows.Next() {\n\t\tvar ms sdk.ModelStatus\n\t\terr := rows.Scan(&ms.ModelID, &ms.ModelName, &ms.CurrentCount, &ms.BuildingCount)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstatus = append(status, ms)\n\t}\n\n\treturn status, nil\n}\n\nfunc modelCanRun(db *sql.DB, name string, req []sdk.Requirement, capa []sdk.Requirement) bool {\n\tdefer logTime(\"compareRequirements\", time.Now())\n\n\tm, err := LoadWorkerModel(db, name)\n\tif err != nil {\n\t\tlog.Warning(\"modelCanRun> Unable to load model %s\", name)\n\t\treturn false\n\t}\n\n\tlog.Info(\"Comparing %d requirements to %d capa\\n\", len(req), len(capa))\n\tfor _, r := range req {\n\t\t\/\/ service requirement are only supported by docker model\n\t\tif r.Type == sdk.ServiceRequirement && m.Type != sdk.Docker {\n\t\t\treturn false\n\t\t}\n\n\t\tfound := false\n\n\t\t\/\/ If requirement is a Model requirement, it's easy. It's either can or can't run\n\t\tif r.Type == sdk.ModelRequirement {\n\t\t\treturn r.Value == name\n\t\t}\n\n\t\t\/\/ If requirement is an hostname requirement, it's for a specific worker\n\t\tif r.Type == sdk.HostnameRequirement {\n\t\t\treturn false \/\/ TODO: update when hatchery in local mode declare an hostname capa\n\t\t}\n\n\t\t\/\/ Skip network access requirement as we can't check it\n\t\tif r.Type == sdk.NetworkAccessRequirement {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Everyone can play plugins\n\t\tif r.Type == sdk.PluginRequirement {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Check binary requirement against worker model capabilities\n\t\tfor _, c := range capa {\n\t\t\tlog.Debug(\"Comparing [%s] and [%s]\\n\", r.Name, c.Name)\n\t\t\tif r.Value == c.Value || r.Value == c.Name {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\ntype actioncount struct {\n\tAction sdk.Action\n\tCount int64\n}\n\nfunc scanActionCount(db *sql.DB, s database.Scanner) (actioncount, error) {\n\tac := actioncount{}\n\tvar actionID int64\n\n\terr := s.Scan(&ac.Count, &actionID)\n\tif err != nil {\n\t\treturn ac, fmt.Errorf(\"scanActionCount> cannot scan: %s\", err)\n\t}\n\n\tactionRequirementsMutex.RLock()\n\treq, ok := actionRequirements[actionID]\n\tactionRequirementsMutex.RUnlock()\n\tif !ok {\n\t\treq, err = action.LoadActionRequirements(db, actionID)\n\t\tif err != nil {\n\t\t\treturn ac, fmt.Errorf(\"scanActionCount> cannot LoadActionRequirements for %d: %s\\n\", actionID, err)\n\t\t}\n\t\tactionRequirementsMutex.Lock()\n\t\tactionRequirements[actionID] = req\n\t\tactionRequirementsMutex.Unlock()\n\t}\n\n\tlog.Debug(\"Action %d: %d in queue with %d requirements\\n\", actionID, ac.Count, len(req))\n\tac.Action.Requirements = req\n\treturn ac, nil\n}\n\nfunc loadGroupActionCount(db *sql.DB, groupID int64) ([]actioncount, error) {\n\tacs := []actioncount{}\n\tquery := `\n\tSELECT COUNT(action_build.id), pipeline_action.action_id\n\tFROM action_build\n\tJOIN pipeline_action ON pipeline_action.id = action_build.pipeline_action_id\n JOIN pipeline_build ON pipeline_build.id = action_build.pipeline_build_id\n JOIN pipeline ON pipeline.id = pipeline_build.pipeline_id\n\tJOIN pipeline_group ON pipeline_group.pipeline_id = pipeline.id\n\tWHERE action_build.status = $1 AND pipeline_group.group_id = $2\n\tGROUP BY pipeline_action.action_id\n\tLIMIT 1000\n\t`\n\n\trows, err := db.Query(query, string(sdk.StatusWaiting), groupID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tac, err := scanActionCount(db, rows)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tacs = append(acs, ac)\n\t}\n\n\treturn acs, nil\n}\nfunc loadUserActionCount(db *sql.DB, userID int64) ([]actioncount, error) {\n\tacs := []actioncount{}\n\tquery := `\n\tSELECT COUNT(action_build.id), pipeline_action.action_id\n\tFROM action_build\n\tJOIN pipeline_action ON pipeline_action.id = action_build.pipeline_action_id\n JOIN pipeline_build ON pipeline_build.id = action_build.pipeline_build_id\n JOIN pipeline ON pipeline.id = pipeline_build.pipeline_id\n\tJOIN pipeline_group ON pipeline_group.pipeline_id = pipeline.id\n\tJOIN group_user ON group_user.group_id = pipeline_group.group_id\n\tWHERE action_build.status = $1 AND group_user.user_id = $2\n\tGROUP BY pipeline_action.action_id\n\tLIMIT 1000\n\t`\n\n\trows, err := db.Query(query, string(sdk.StatusWaiting), userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tac, err := scanActionCount(db, rows)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tacs = append(acs, ac)\n\t}\n\n\treturn acs, nil\n}\n\nfunc loadActionCount(db *sql.DB, c *context.Context) ([]actioncount, error) {\n\tdefer logTime(\"EstimateWorkerModelNeeds\", time.Now())\n\n\tswitch c.Agent {\n\tcase sdk.HatcheryAgent:\n\t\treturn loadGroupActionCount(db, c.User.Groups[0].ID)\n\tdefault:\n\t\treturn loadUserActionCount(db, c.User.ID)\n\t}\n\n}\n\n\/\/ UpdateModelCapabilitiesCache updates model capabilities cache\nfunc UpdateModelCapabilitiesCache() {\n\tmodelCapabilities = make(map[int64][]sdk.Requirement)\n\n\tfor {\n\t\ttime.Sleep(5 * time.Second)\n\t\tdb := database.DB()\n\t\tif db != nil {\n\t\t\twms, err := LoadWorkerModels(db)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warning(\"updateModelCapabilities> Cannot load worker models: %s\\n\", err)\n\t\t\t}\n\t\t\tmodelCapaMutex.Lock()\n\t\t\tfor _, wm := range wms {\n\t\t\t\tmodelCapabilities[wm.ID] = wm.Capabilities\n\t\t\t}\n\t\t\tmodelCapaMutex.Unlock()\n\t\t}\n\t}\n}\n\n\/\/ UpdateActionRequirementsCache updates internal action cache\nfunc UpdateActionRequirementsCache() {\n\tactionRequirements = make(map[int64][]sdk.Requirement)\n\n\tfor {\n\t\ttime.Sleep(10 * time.Second)\n\t\tdb := database.DB()\n\t\tif db != nil {\n\t\t\tactionRequirementsMutex.Lock()\n\t\t\tfor actionID := range actionRequirements {\n\t\t\t\treq, err := action.LoadActionRequirements(db, actionID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Warning(\"UpdateActionRequirementsCache> Cannot LoadActionRequirements for %d: %s\\n\", actionID, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tactionRequirements[actionID] = req\n\t\t\t}\n\t\t\tactionRequirementsMutex.Unlock()\n\t\t}\n\t}\n}\n\n\/\/ EstimateWorkerModelNeeds returns for each worker model the needs of instances\nfunc EstimateWorkerModelNeeds(db *sql.DB, c *context.Context) ([]sdk.ModelStatus, error) {\n\tdefer logTime(\"EstimateWorkerModelNeeds\", time.Now())\n\tu := c.User\n\n\t\/\/ Load models stats\n\tms, err := loadWorkerModelStatus(db, c)\n\tif err != nil {\n\t\tlog.Warning(\"EstimateWorkerModelsNeeds> Cannot LoadWorkerModelStatus for user %d: %s\\n\", u.ID, err)\n\t\treturn nil, fmt.Errorf(\"EstimateWorkerModelNeeds> Cannot loadWorkerModelStatus> %s\", err)\n\t}\n\n\t\/\/ Load actions in queue grouped by action (same requirement, same worker model)\n\tacs, err := loadActionCount(db, c)\n\tif err != nil {\n\t\tlog.Warning(\"EstimateWorkerModelsNeeds> Cannot LoadActionCount for user %d: %s\\n\", u.ID, err)\n\t\treturn nil, fmt.Errorf(\"EstimateWorkerModelNeeds> cannot loadActionCount> %s\", err)\n\t}\n\n\t\/\/ Now for each unique action in queue, find a worker model able to run it\n\tvar capas []sdk.Requirement\n\tvar ok bool\n\tfor _, ac := range acs {\n\t\t\/\/ Loop through model in case there is multiple models with the capacity to build current ActionBuild\n\t\t\/\/ This allow a dispatch of Count via round robin on all matching models\n\t\t\/\/ Thus dispatching the load potentially on multiple architectures\/hatcheries\n\t\tloopModels := true\n\t\tfor loopModels {\n\t\t\tloopModels = false\n\n\t\t\tfor i := range ms {\n\t\t\t\tmodelCapaMutex.RLock()\n\t\t\t\tcapas, ok = modelCapabilities[ms[i].ModelID]\n\t\t\t\tmodelCapaMutex.RUnlock()\n\t\t\t\tif !ok {\n\t\t\t\t\tcapas, err = LoadWorkerModelCapabilities(db, ms[i].ModelID)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"EstimateWorkerModelNees> cannot loadWorkerModelCapabilities: %s\\n\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif modelCanRun(db, ms[i].ModelName, ac.Action.Requirements, capas) {\n\t\t\t\t\tif ac.Count > 0 {\n\t\t\t\t\t\tms[i].WantedCount++\n\t\t\t\t\t\tac.Count--\n\t\t\t\t\t\tloopModels = true\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/Add model requirement if action has specific kind of requirements\n\t\t\t\t\tms[i].Requirements = []sdk.Requirement{}\n\t\t\t\t\tfor j := range ac.Action.Requirements {\n\t\t\t\t\t\tif ac.Action.Requirements[j].Type == sdk.ServiceRequirement {\n\t\t\t\t\t\t\tms[i].Requirements = append(ms[i].Requirements, ac.Action.Requirements[j])\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/break\n\t\t\t\t}\n\t\t\t} \/\/ !range models\n\t\t} \/\/ !range loopModels\n\t} \/\/ !range acs\n\n\treturn ms, nil\n}\n<commit_msg>feat (api): elasticity logs<commit_after>package worker\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/ovh\/cds\/engine\/api\/action\"\n\t\"github.com\/ovh\/cds\/engine\/api\/context\"\n\t\"github.com\/ovh\/cds\/engine\/api\/database\"\n\t\"github.com\/ovh\/cds\/engine\/log\"\n\t\"github.com\/ovh\/cds\/sdk\"\n)\n\nvar (\n\tmodelCapabilities map[int64][]sdk.Requirement\n\tmodelCapaMutex sync.RWMutex\n\tactionRequirements map[int64][]sdk.Requirement\n\tactionRequirementsMutex sync.RWMutex\n)\n\nfunc logTime(name string, then time.Time) {\n\td := time.Since(then)\n\tif d > 10*time.Second {\n\t\tlog.Warning(\"%s took %s to execute\\n\", name, d)\n\t\treturn\n\t}\n\n\tif d > 2*time.Second {\n\t\tlog.Warning(\"%s took %s to execute\\n\", name, d)\n\t\treturn\n\t}\n\n\tlog.Debug(\"%s took %s to execute\\n\", name, d)\n}\n\nfunc loadWorkerModelStatus(db *sql.DB, c *context.Context) ([]sdk.ModelStatus, error) {\n\tdefer logTime(\"loadWorkerModelStatus\", time.Now())\n\n\tswitch c.Agent {\n\tcase sdk.HatcheryAgent:\n\t\treturn loadWorkerModelStatusForGroup(db, c.User.Groups[0].ID)\n\tdefault:\n\t\treturn loadWorkerModelStatusForUser(db, c.User.ID)\n\t}\n}\n\nfunc loadWorkerModelStatusForGroup(db *sql.DB, groupID int64) ([]sdk.ModelStatus, error) {\n\tquery := `\nSELECT worker_model.id, worker_model.name, COALESCE(waiting.count, 0) as waiting, COALESCE(building.count,0) as building FROM worker_model\n\tLEFT JOIN LATERAL (SELECT model, COUNT(worker.id) as count FROM worker\n\t\tWHERE worker.group_id = $1 AND worker.status = 'Waiting'\n\t\tAND worker.model = worker_model.id\n\t\tGROUP BY model) AS waiting ON waiting.model = worker_model.id\n\tLEFT JOIN LATERAL (SELECT model, COUNT(worker.id) as count FROM worker\n\t\tWHERE worker.group_id = $1 AND worker.status = 'Building'\n\t\tAND worker.model = worker_model.id\n\t\tGROUP BY model) AS building ON building.model = worker_model.id\nORDER BY worker_model.name ASC;\n`\n\trows, err := db.Query(query, groupID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tvar status []sdk.ModelStatus\n\tfor rows.Next() {\n\t\tvar ms sdk.ModelStatus\n\t\terr := rows.Scan(&ms.ModelID, &ms.ModelName, &ms.CurrentCount, &ms.BuildingCount)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstatus = append(status, ms)\n\t}\n\n\treturn status, nil\n\n}\n\n\/\/ loadWorkerModelStatus loads from database the number of worker deployed for each model\n\/\/ start with group permissions calling user has access to.\nfunc loadWorkerModelStatusForUser(db *sql.DB, userID int64) ([]sdk.ModelStatus, error) {\n\n\tquery := `\nSELECT worker_model.id, worker_model.name, COALESCE(waiting.count, 0) as waiting, COALESCE(building.count,0) as building FROM worker_model\n\tLEFT JOIN LATERAL (SELECT model, COUNT(worker.id) as count FROM worker\n\t\tJOIN \"group\" ON \"group\".id = worker.group_id\n\t\tJOIN group_user ON \"group\".id = group_user.group_id\n\t\tWHERE group_user.user_id = $1 AND worker.status = 'Waiting'\n\t\tAND worker.model = worker_model.id\n\t\tGROUP BY model) AS waiting ON waiting.model = worker_model.id\n\tLEFT JOIN LATERAL (SELECT model, COUNT(worker.id) as count FROM worker\n\t\tJOIN \"group\" ON \"group\".id = worker.group_id\n\t\tJOIN group_user ON \"group\".id = group_user.group_id\n\t\tWHERE group_user.user_id = $1 AND worker.status = 'Building'\n\t\tAND worker.model = worker_model.id\n\t\tGROUP BY model) AS building ON building.model = worker_model.id\nORDER BY worker_model.name ASC;\n`\n\n\trows, err := db.Query(query, userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tvar status []sdk.ModelStatus\n\tfor rows.Next() {\n\t\tvar ms sdk.ModelStatus\n\t\terr := rows.Scan(&ms.ModelID, &ms.ModelName, &ms.CurrentCount, &ms.BuildingCount)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstatus = append(status, ms)\n\t}\n\n\treturn status, nil\n}\n\nfunc modelCanRun(db *sql.DB, name string, req []sdk.Requirement, capa []sdk.Requirement) bool {\n\tdefer logTime(\"compareRequirements\", time.Now())\n\n\tm, err := LoadWorkerModel(db, name)\n\tif err != nil {\n\t\tlog.Warning(\"modelCanRun> Unable to load model %s\", name)\n\t\treturn false\n\t}\n\n\tlog.Info(\"Comparing %d requirements to %d capa\\n\", len(req), len(capa))\n\tfor _, r := range req {\n\t\t\/\/ service requirement are only supported by docker model\n\t\tif r.Type == sdk.ServiceRequirement && m.Type != sdk.Docker {\n\t\t\treturn false\n\t\t}\n\n\t\tfound := false\n\n\t\t\/\/ If requirement is a Model requirement, it's easy. It's either can or can't run\n\t\tif r.Type == sdk.ModelRequirement {\n\t\t\treturn r.Value == name\n\t\t}\n\n\t\t\/\/ If requirement is an hostname requirement, it's for a specific worker\n\t\tif r.Type == sdk.HostnameRequirement {\n\t\t\treturn false \/\/ TODO: update when hatchery in local mode declare an hostname capa\n\t\t}\n\n\t\t\/\/ Skip network access requirement as we can't check it\n\t\tif r.Type == sdk.NetworkAccessRequirement {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Everyone can play plugins\n\t\tif r.Type == sdk.PluginRequirement {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Check binary requirement against worker model capabilities\n\t\tfor _, c := range capa {\n\t\t\tlog.Debug(\"Comparing [%s] and [%s]\\n\", r.Name, c.Name)\n\t\t\tif r.Value == c.Value || r.Value == c.Name {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\ntype actioncount struct {\n\tAction sdk.Action\n\tCount int64\n}\n\nfunc scanActionCount(db *sql.DB, s database.Scanner) (actioncount, error) {\n\tac := actioncount{}\n\tvar actionID int64\n\n\terr := s.Scan(&ac.Count, &actionID)\n\tif err != nil {\n\t\treturn ac, fmt.Errorf(\"scanActionCount> cannot scan: %s\", err)\n\t}\n\n\tactionRequirementsMutex.RLock()\n\treq, ok := actionRequirements[actionID]\n\tactionRequirementsMutex.RUnlock()\n\tif !ok {\n\t\treq, err = action.LoadActionRequirements(db, actionID)\n\t\tif err != nil {\n\t\t\treturn ac, fmt.Errorf(\"scanActionCount> cannot LoadActionRequirements for %d: %s\\n\", actionID, err)\n\t\t}\n\t\tactionRequirementsMutex.Lock()\n\t\tactionRequirements[actionID] = req\n\t\tactionRequirementsMutex.Unlock()\n\t}\n\n\tlog.Debug(\"Action %d: %d in queue with %d requirements\\n\", actionID, ac.Count, len(req))\n\tac.Action.Requirements = req\n\treturn ac, nil\n}\n\nfunc loadGroupActionCount(db *sql.DB, groupID int64) ([]actioncount, error) {\n\tacs := []actioncount{}\n\tquery := `\n\tSELECT COUNT(action_build.id), pipeline_action.action_id\n\tFROM action_build\n\tJOIN pipeline_action ON pipeline_action.id = action_build.pipeline_action_id\n JOIN pipeline_build ON pipeline_build.id = action_build.pipeline_build_id\n JOIN pipeline ON pipeline.id = pipeline_build.pipeline_id\n\tJOIN pipeline_group ON pipeline_group.pipeline_id = pipeline.id\n\tWHERE action_build.status = $1 AND pipeline_group.group_id = $2\n\tGROUP BY pipeline_action.action_id\n\tLIMIT 1000\n\t`\n\n\trows, err := db.Query(query, string(sdk.StatusWaiting), groupID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tac, err := scanActionCount(db, rows)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tacs = append(acs, ac)\n\t}\n\n\treturn acs, nil\n}\nfunc loadUserActionCount(db *sql.DB, userID int64) ([]actioncount, error) {\n\tacs := []actioncount{}\n\tquery := `\n\tSELECT COUNT(action_build.id), pipeline_action.action_id\n\tFROM action_build\n\tJOIN pipeline_action ON pipeline_action.id = action_build.pipeline_action_id\n JOIN pipeline_build ON pipeline_build.id = action_build.pipeline_build_id\n JOIN pipeline ON pipeline.id = pipeline_build.pipeline_id\n\tJOIN pipeline_group ON pipeline_group.pipeline_id = pipeline.id\n\tJOIN group_user ON group_user.group_id = pipeline_group.group_id\n\tWHERE action_build.status = $1 AND group_user.user_id = $2\n\tGROUP BY pipeline_action.action_id\n\tLIMIT 1000\n\t`\n\n\trows, err := db.Query(query, string(sdk.StatusWaiting), userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tac, err := scanActionCount(db, rows)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tacs = append(acs, ac)\n\t}\n\n\treturn acs, nil\n}\n\nfunc loadActionCount(db *sql.DB, c *context.Context) ([]actioncount, error) {\n\tdefer logTime(\"EstimateWorkerModelNeeds\", time.Now())\n\n\tswitch c.Agent {\n\tcase sdk.HatcheryAgent:\n\t\treturn loadGroupActionCount(db, c.User.Groups[0].ID)\n\tdefault:\n\t\treturn loadUserActionCount(db, c.User.ID)\n\t}\n\n}\n\n\/\/ UpdateModelCapabilitiesCache updates model capabilities cache\nfunc UpdateModelCapabilitiesCache() {\n\tmodelCapabilities = make(map[int64][]sdk.Requirement)\n\n\tfor {\n\t\ttime.Sleep(5 * time.Second)\n\t\tdb := database.DB()\n\t\tif db != nil {\n\t\t\twms, err := LoadWorkerModels(db)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warning(\"updateModelCapabilities> Cannot load worker models: %s\\n\", err)\n\t\t\t}\n\t\t\tmodelCapaMutex.Lock()\n\t\t\tfor _, wm := range wms {\n\t\t\t\tmodelCapabilities[wm.ID] = wm.Capabilities\n\t\t\t}\n\t\t\tmodelCapaMutex.Unlock()\n\t\t}\n\t}\n}\n\n\/\/ UpdateActionRequirementsCache updates internal action cache\nfunc UpdateActionRequirementsCache() {\n\tactionRequirements = make(map[int64][]sdk.Requirement)\n\n\tfor {\n\t\ttime.Sleep(10 * time.Second)\n\t\tdb := database.DB()\n\t\tif db != nil {\n\t\t\tactionRequirementsMutex.Lock()\n\t\t\tfor actionID := range actionRequirements {\n\t\t\t\treq, err := action.LoadActionRequirements(db, actionID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Warning(\"UpdateActionRequirementsCache> Cannot LoadActionRequirements for %d: %s\\n\", actionID, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tactionRequirements[actionID] = req\n\t\t\t}\n\t\t\tactionRequirementsMutex.Unlock()\n\t\t}\n\t}\n}\n\n\/\/ EstimateWorkerModelNeeds returns for each worker model the needs of instances\nfunc EstimateWorkerModelNeeds(db *sql.DB, c *context.Context) ([]sdk.ModelStatus, error) {\n\tdefer logTime(\"EstimateWorkerModelNeeds\", time.Now())\n\tu := c.User\n\n\t\/\/ Load models stats\n\tms, err := loadWorkerModelStatus(db, c)\n\tif err != nil {\n\t\tlog.Warning(\"EstimateWorkerModelsNeeds> Cannot LoadWorkerModelStatus for user %d: %s\\n\", u.ID, err)\n\t\treturn nil, fmt.Errorf(\"EstimateWorkerModelNeeds> Cannot loadWorkerModelStatus> %s\", err)\n\t}\n\n\t\/\/ Load actions in queue grouped by action (same requirement, same worker model)\n\tacs, err := loadActionCount(db, c)\n\tif err != nil {\n\t\tlog.Warning(\"EstimateWorkerModelsNeeds> Cannot LoadActionCount for user %d: %s\\n\", u.ID, err)\n\t\treturn nil, fmt.Errorf(\"EstimateWorkerModelNeeds> cannot loadActionCount> %s\", err)\n\t}\n\n\t\/\/ Now for each unique action in queue, find a worker model able to run it\n\tvar capas []sdk.Requirement\n\tvar ok bool\n\tfor _, ac := range acs {\n\t\t\/\/ Loop through model in case there is multiple models with the capacity to build current ActionBuild\n\t\t\/\/ This allow a dispatch of Count via round robin on all matching models\n\t\t\/\/ Thus dispatching the load potentially on multiple architectures\/hatcheries\n\t\tloopModels := true\n\t\tfor loopModels {\n\t\t\tloopModels = false\n\n\t\t\tfor i := range ms {\n\t\t\t\tmodelCapaMutex.RLock()\n\t\t\t\tcapas, ok = modelCapabilities[ms[i].ModelID]\n\t\t\t\tmodelCapaMutex.RUnlock()\n\t\t\t\tif !ok {\n\t\t\t\t\tcapas, err = LoadWorkerModelCapabilities(db, ms[i].ModelID)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"EstimateWorkerModelNees> cannot loadWorkerModelCapabilities: %s\\n\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif modelCanRun(db, ms[i].ModelName, ac.Action.Requirements, capas) {\n\t\t\t\t\tif ac.Count > 0 {\n\t\t\t\t\t\tms[i].WantedCount++\n\t\t\t\t\t\tac.Count--\n\t\t\t\t\t\tloopModels = true\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/Add model requirement if action has specific kind of requirements\n\t\t\t\t\tms[i].Requirements = []sdk.Requirement{}\n\t\t\t\t\tfor j := range ac.Action.Requirements {\n\t\t\t\t\t\tif ac.Action.Requirements[j].Type == sdk.ServiceRequirement {\n\t\t\t\t\t\t\tms[i].Requirements = append(ms[i].Requirements, ac.Action.Requirements[j])\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/break\n\t\t\t\t}\n\t\t\t} \/\/ !range models\n\t\t} \/\/ !range loopModels\n\t} \/\/ !range acs\n\n\treturn ms, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package web\n\nimport (\n\t\"net\/http\"\n\t\"draringi\/codejam2013\/src\/forecasting\"\n\t\"draringi\/codejam2013\/src\/data\"\n \"encoding\/json\"\n \"time\"\n)\n\ntype future struct {\n Records []record\n}\n\ntype record struct {\n Date time.Time\n Power float64\n}\n\ntype dashboardHelper struct {\n Data *data.CSVData\n Forcast *future\n}\n\ntype Dashboard struct {\n\tchannel chan (*data.CSVData)\n\tJSONAid *dashboardHelper\n}\n\nfunc (self *Dashboard) Init () {\n\tself.channel = make(chan (*data.CSVData), 1)\n self.JSONAid = new(dashboardHelper)\n\tforecasting.PredictPulse(self.channel)\n\tgo func () {\n\t\tfor {\n\t\t\ttmp := <-self.channel\n if tmp != nil {\n\t\t\t\tself.JSONAid.Data = tmp\n\t\t\t}\n\t\t}\n\t} ()\n}\n\nfunc (self *Dashboard) ServeHTTP (w http.ResponseWriter, request *http.Request) {\n\thttp.ServeFile(w, request, \"dashboard.html\")\n}\n\nfunc (self *dashboardHelper) Build (Data *data.CSVData) {\n self.Data = Data\n self.Forcast = new(future)\n self.Forcast.Records = make([]record,len(Data.Data))\n for i :=0; i<len(Data.Data); i++ {\n self.Forcast.Records[i].Date = Data.Data[i].Time\n self.Forcast.Records[i].Power = Data.Data[i].Power\n }\n}\n\nfunc (self *dashboardHelper) jsonify (w http.ResponseWriter) {\n encoder := json.NewEncoder(w)\n encoder.Encode(self.Forcast)\n}\n\nfunc (self *dashboardHelper) ServeHTTP (w http.ResponseWriter, request *http.Request) {\n self.jsonify(w)\n}\n<commit_msg>added error checking to jsonify<commit_after>package web\n\nimport (\n\t\"net\/http\"\n\t\"draringi\/codejam2013\/src\/forecasting\"\n\t\"draringi\/codejam2013\/src\/data\"\n \"encoding\/json\"\n \"time\"\n)\n\ntype future struct {\n Records []record\n}\n\ntype record struct {\n Date time.Time\n Power float64\n}\n\ntype dashboardHelper struct {\n Data *data.CSVData\n Forcast *future\n}\n\ntype Dashboard struct {\n\tchannel chan (*data.CSVData)\n\tJSONAid *dashboardHelper\n}\n\nfunc (self *Dashboard) Init () {\n\tself.channel = make(chan (*data.CSVData), 1)\n self.JSONAid = new(dashboardHelper)\n\tforecasting.PredictPulse(self.channel)\n\tgo func () {\n\t\tfor {\n\t\t\ttmp := <-self.channel\n if tmp != nil {\n\t\t\t\tself.JSONAid.Data = tmp\n\t\t\t}\n\t\t}\n\t} ()\n}\n\nfunc (self *Dashboard) ServeHTTP (w http.ResponseWriter, request *http.Request) {\n\thttp.ServeFile(w, request, \"dashboard.html\")\n}\n\nfunc (self *dashboardHelper) Build (Data *data.CSVData) {\n self.Data = Data\n self.Forcast = new(future)\n self.Forcast.Records = make([]record,len(Data.Data))\n for i :=0; i<len(Data.Data); i++ {\n self.Forcast.Records[i].Date = Data.Data[i].Time\n self.Forcast.Records[i].Power = Data.Data[i].Power\n }\n}\n\nfunc (self *dashboardHelper) jsonify (w http.ResponseWriter) {\n encoder := json.NewEncoder(w)\n if self.Data != nil {\n encoder.Encode(self.Forcast)\n } else {\n http.Error(w,\"Data not loaded...\", 404)\n }\n}\n\nfunc (self *dashboardHelper) ServeHTTP (w http.ResponseWriter, request *http.Request) {\n self.jsonify(w)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ go-sonos\n\/\/ ========\n\/\/\n\/\/ Copyright (c) 2012-2015, Ian T. Richards <ianr@panix.com>\n\/\/ All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions\n\/\/ are met:\n\/\/\n\/\/ * Redistributions of source code must retain the above copyright notice,\n\/\/ this list of conditions and the following disclaimer.\n\/\/ * Redistributions in binary form must reproduce the above copyright\n\/\/ notice, this list of conditions and the following disclaimer in the\n\/\/ documentation and\/or other materials provided with the distribution.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\/\/ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n\/\/ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n\/\/ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n\/\/ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n\/\/ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\n\/\/ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n\/\/ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n\/\/ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n\/\/ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n\/\/ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\/\/\npackage main\n\nimport (\n\t\"github.com\/ianr0bkny\/go-sonos\/ssdp\"\n\t\"log\"\n)\n\n\/\/ This code identifies UPnP devices on the netork that support the\n\/\/ MusicServices API.\nfunc main() {\n\tlog.Print(\"go-sonos example discovery\\n\")\n\tmgr := ssdp.MakeManager()\n\tmgr.Discover(\"eth0\", \"11209\", false)\n\tqry := ssdp.ServiceQueryTerms{\n\t\tssdp.ServiceKey(\"schemas-upnp-org-MusicServices\"): -1,\n\t}\n\tresult := mgr.QueryServices(qry)\n\tif dev_list, has := result[\"schemas-upnp-org-MusicServices\"]; has {\n\t\tfor _, dev := range dev_list {\n\t\t\tlog.Printf(\"%s %s %s %s %s\\n\", dev.Product(), dev.ProductVersion(), dev.Name(), dev.Location(), dev.UUID())\n\t\t}\n\t}\n\tmgr.Close()\n}\n<commit_msg>adding comments to example code<commit_after>\/\/\n\/\/ go-sonos\n\/\/ ========\n\/\/\n\/\/ Copyright (c) 2012-2015, Ian T. Richards <ianr@panix.com>\n\/\/ All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions\n\/\/ are met:\n\/\/\n\/\/ * Redistributions of source code must retain the above copyright notice,\n\/\/ this list of conditions and the following disclaimer.\n\/\/ * Redistributions in binary form must reproduce the above copyright\n\/\/ notice, this list of conditions and the following disclaimer in the\n\/\/ documentation and\/or other materials provided with the distribution.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\/\/ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n\/\/ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n\/\/ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n\/\/ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n\/\/ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\n\/\/ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n\/\/ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n\/\/ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n\/\/ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n\/\/ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\/\/\npackage main\n\nimport (\n\t\"github.com\/ianr0bkny\/go-sonos\/ssdp\"\n\t\"log\"\n)\n\n\/\/ This code identifies UPnP devices on the netork that support the\n\/\/ MusicServices API.\nfunc main() {\n\tlog.Print(\"go-sonos example discovery\\n\")\n\n\tmgr := ssdp.MakeManager()\n\n\t\/\/ Discover()\n\t\/\/ eth0 := Network device to query for UPnP devices\n\t\/\/ 11209 := Free local port for discovery replies\n\t\/\/ false := Do not subscribe for asynchronous updates\n\tmgr.Discover(\"eth0\", \"11209\", false)\n\n\t\/\/ SericeQueryTerms\n\t\/\/ A map of service keys to minimum required version\n\tqry := ssdp.ServiceQueryTerms{\n\t\tssdp.ServiceKey(\"schemas-upnp-org-MusicServices\"): -1,\n\t}\n\n\t\/\/ Look for the service keys in qry in the database of discovered devices\n\tresult := mgr.QueryServices(qry)\n\tif dev_list, has := result[\"schemas-upnp-org-MusicServices\"]; has {\n\t\tfor _, dev := range dev_list {\n\t\t\tlog.Printf(\"%s %s %s %s %s\\n\", dev.Product(), dev.ProductVersion(), dev.Name(), dev.Location(), dev.UUID())\n\t\t}\n\t}\n\tmgr.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\/\/\n\/\/ Author: Peter Mattis (peter@cockroachlabs.com)\n\npackage sql_test\n\nimport (\n\t\"bytes\"\n\t\"database\/sql\"\n\t\"flag\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"testing\"\n\n\t\"github.com\/cockroachdb\/cockroach\/util\/log\"\n)\n\nvar maxTransfer = flag.Int(\"max-transfer\", 999, \"Maximum amount to transfer in one transaction.\")\nvar numAccounts = flag.Int(\"num-accounts\", 999, \"Number of accounts.\")\n\n\/\/ runBenchmarkBank mirrors the SQL performed by examples\/sql_bank, but\n\/\/ structured as a benchmark for easier usage of the Go performance analysis\n\/\/ tools like pprof, memprof and trace.\nfunc runBenchmarkBank(b *testing.B, db *sql.DB) {\n\tif _, err := db.Exec(`CREATE DATABASE IF NOT EXISTS bank`); err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\t{\n\t\t\/\/ Initialize the \"accounts\" table.\n\t\tschema := `\nCREATE TABLE IF NOT EXISTS bank.accounts (\n id INT PRIMARY KEY,\n balance INT NOT NULL\n)`\n\t\tif _, err := db.Exec(schema); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tif _, err := db.Exec(\"TRUNCATE TABLE bank.accounts\"); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\n\t\tvar placeholders bytes.Buffer\n\t\tvar values []interface{}\n\t\tfor i := 0; i < *numAccounts; i++ {\n\t\t\tif i > 0 {\n\t\t\t\tplaceholders.WriteString(\", \")\n\t\t\t}\n\t\t\tfmt.Fprintf(&placeholders, \"($%d, 0)\", i+1)\n\t\t\tvalues = append(values, i)\n\t\t}\n\t\tstmt := `INSERT INTO bank.accounts (id, balance) VALUES ` + placeholders.String()\n\t\tif _, err := db.Exec(stmt, values...); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n\n\tb.ResetTimer()\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tfrom := rand.Intn(*numAccounts)\n\t\t\tto := rand.Intn(*numAccounts - 1)\n\t\t\tif from == to {\n\t\t\t\tto = *numAccounts - 1\n\t\t\t}\n\n\t\t\tamount := rand.Intn(*maxTransfer)\n\n\t\t\tupdate := `\nUPDATE bank.accounts\n SET balance = CASE id WHEN $1 THEN balance-$3 WHEN $2 THEN balance+$3 END\n WHERE id IN ($1, $2) AND (SELECT balance >= $3 FROM bank.accounts WHERE id = $1)\n`\n\t\t\tif _, err := db.Exec(update, from, to, amount); err != nil {\n\t\t\t\tif log.V(1) {\n\t\t\t\t\tlog.Warning(err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t})\n\tb.StopTimer()\n}\n\nfunc BenchmarkBank(b *testing.B) {\n\tbenchmarkCockroach(b, runBenchmarkBank)\n}\n<commit_msg>Fix the bank benchmark.<commit_after>\/\/ Copyright 2015 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\/\/\n\/\/ Author: Peter Mattis (peter@cockroachlabs.com)\n\npackage sql_test\n\nimport (\n\t\"bytes\"\n\t\"database\/sql\"\n\t\"flag\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"testing\"\n\n\t\"github.com\/cockroachdb\/cockroach\/util\/log\"\n)\n\nvar maxTransfer = flag.Int(\"max-transfer\", 999, \"Maximum amount to transfer in one transaction.\")\nvar numAccounts = flag.Int(\"num-accounts\", 999, \"Number of accounts.\")\n\n\/\/ runBenchmarkBank mirrors the SQL performed by examples\/sql_bank, but\n\/\/ structured as a benchmark for easier usage of the Go performance analysis\n\/\/ tools like pprof, memprof and trace.\nfunc runBenchmarkBank(b *testing.B, db *sql.DB) {\n\t{\n\t\t\/\/ Initialize the \"bank\" table.\n\t\tschema := `\nCREATE TABLE IF NOT EXISTS bench.bank (\n id INT PRIMARY KEY,\n balance INT NOT NULL\n)`\n\t\tif _, err := db.Exec(schema); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tif _, err := db.Exec(\"TRUNCATE TABLE bench.bank\"); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\n\t\tvar placeholders bytes.Buffer\n\t\tvar values []interface{}\n\t\tfor i := 0; i < *numAccounts; i++ {\n\t\t\tif i > 0 {\n\t\t\t\tplaceholders.WriteString(\", \")\n\t\t\t}\n\t\t\tfmt.Fprintf(&placeholders, \"($%d, 0)\", i+1)\n\t\t\tvalues = append(values, i)\n\t\t}\n\t\tstmt := `INSERT INTO bench.bank (id, balance) VALUES ` + placeholders.String()\n\t\tif _, err := db.Exec(stmt, values...); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n\n\tb.ResetTimer()\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tfrom := rand.Intn(*numAccounts)\n\t\t\tto := rand.Intn(*numAccounts - 1)\n\t\t\tif from == to {\n\t\t\t\tto = *numAccounts - 1\n\t\t\t}\n\n\t\t\tamount := rand.Intn(*maxTransfer)\n\n\t\t\t\/\/ TODO(mjibson): We can't use query parameters with this query because\n\t\t\t\/\/ it fails type checking on the CASE expression.\n\t\t\tupdate := fmt.Sprintf(`\nUPDATE bench.bank\n SET balance = CASE id WHEN %[1]d THEN balance-%[3]d WHEN %[2]d THEN balance+%[3]d END\n WHERE id IN (%[1]d, %[2]d) AND (SELECT balance >= %[3]d FROM bench.bank WHERE id = %[1]d)\n`, from, to, amount)\n\t\t\tif _, err := db.Exec(update); err != nil {\n\t\t\t\tif log.V(1) {\n\t\t\t\t\tlog.Warning(err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t})\n\tb.StopTimer()\n}\n\nfunc BenchmarkBank_Cockroach(b *testing.B) {\n\tbenchmarkCockroach(b, runBenchmarkBank)\n}\n\nfunc BenchmarkBank_Postgres(b *testing.B) {\n\tbenchmarkPostgres(b, runBenchmarkBank)\n}\n<|endoftext|>"} {"text":"<commit_before>package executor_test\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"code.cloudfoundry.org\/consuladapter\/consulrunner\"\n\t\"code.cloudfoundry.org\/localip\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t\"github.com\/onsi\/ginkgo\/config\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/ginkgomon\"\n\n\t\"code.cloudfoundry.org\/garden\"\n\t\"code.cloudfoundry.org\/inigo\/helpers\"\n\t\"code.cloudfoundry.org\/inigo\/helpers\/certauthority\"\n\t\"code.cloudfoundry.org\/inigo\/helpers\/portauthority\"\n\t\"code.cloudfoundry.org\/inigo\/world\"\n)\n\nvar (\n\tcomponentMaker world.ComponentMaker\n\n\tgardenProcess ifrit.Process\n\tgardenClient garden.Client\n\tsuiteTempDir string\n)\n\nvar _ = SynchronizedBeforeSuite(func() []byte {\n\tsuiteTempDir = world.TempDir(\"before-suite\")\n\tpayload, err := json.Marshal(world.BuiltArtifacts{\n\t\tExecutables: CompileTestedExecutables(),\n\t})\n\tExpect(err).NotTo(HaveOccurred())\n\n\treturn payload\n}, func(encodedBuiltArtifacts []byte) {\n\tvar builtArtifacts world.BuiltArtifacts\n\n\terr := json.Unmarshal(encodedBuiltArtifacts, &builtArtifacts)\n\tExpect(err).NotTo(HaveOccurred())\n\n\t_, dbBaseConnectionString := world.DBInfo()\n\n\tlocalIP, err := localip.LocalIP()\n\tExpect(err).NotTo(HaveOccurred())\n\n\taddresses := world.ComponentAddresses{\n\t\tGarden: fmt.Sprintf(\"127.0.0.1:%d\", 10000+config.GinkgoConfig.ParallelNode),\n\t\tNATS: fmt.Sprintf(\"127.0.0.1:%d\", 11000+config.GinkgoConfig.ParallelNode),\n\t\tConsul: fmt.Sprintf(\"127.0.0.1:%d\", 12750+config.GinkgoConfig.ParallelNode*consulrunner.PortOffsetLength),\n\t\tRep: fmt.Sprintf(\"127.0.0.1:%d\", 14000+config.GinkgoConfig.ParallelNode),\n\t\tFileServer: fmt.Sprintf(\"%s:%d\", localIP, 17000+config.GinkgoConfig.ParallelNode),\n\t\tRouter: fmt.Sprintf(\"127.0.0.1:%d\", 18000+config.GinkgoConfig.ParallelNode),\n\t\tBBS: fmt.Sprintf(\"127.0.0.1:%d\", 20500+config.GinkgoConfig.ParallelNode*2),\n\t\tHealth: fmt.Sprintf(\"127.0.0.1:%d\", 20500+config.GinkgoConfig.ParallelNode*2+1),\n\t\tAuctioneer: fmt.Sprintf(\"127.0.0.1:%d\", 23000+config.GinkgoConfig.ParallelNode),\n\t\tSSHProxy: fmt.Sprintf(\"127.0.0.1:%d\", 23500+config.GinkgoConfig.ParallelNode),\n\t\tSSHProxyHealthCheck: fmt.Sprintf(\"127.0.0.1:%d\", 24500+config.GinkgoConfig.ParallelNode),\n\t\tFakeVolmanDriver: fmt.Sprintf(\"127.0.0.1:%d\", 25500+config.GinkgoConfig.ParallelNode),\n\t\tLocket: fmt.Sprintf(\"127.0.0.1:%d\", 26500+config.GinkgoConfig.ParallelNode),\n\t\tSQL: fmt.Sprintf(\"%sdiego_%d\", dbBaseConnectionString, config.GinkgoConfig.ParallelNode),\n\t}\n\n\tnode := GinkgoParallelNode()\n\tstartPort := 1000 * node\n\tportRange := 950\n\tendPort := startPort + portRange\n\n\tallocator, err := portauthority.New(startPort, endPort)\n\tExpect(err).NotTo(HaveOccurred())\n\n\tcertDepot := world.TempDirWithParent(suiteTempDir, \"cert-depot\")\n\n\tcertAuthority, err := certauthority.NewCertAuthority(certDepot, \"ca\")\n\tExpect(err).NotTo(HaveOccurred())\n\n\tcomponentMaker = world.MakeComponentMaker(builtArtifacts, addresses, allocator, certAuthority)\n\tcomponentMaker.Setup()\n})\n\nvar _ = AfterSuite(func() {\n\tcomponentMaker.Teardown()\n\n\tdeleteSuiteTempDir := func() error { return os.RemoveAll(suiteTempDir) }\n\tEventually(deleteSuiteTempDir).Should(Succeed())\n})\n\nvar _ = BeforeEach(func() {\n\tgardenProcess = ginkgomon.Invoke(componentMaker.Garden())\n\tgardenClient = componentMaker.GardenClient()\n})\n\nvar _ = AfterEach(func() {\n\tdestroyContainerErrors := helpers.CleanupGarden(gardenClient)\n\n\thelpers.StopProcesses(gardenProcess)\n\n\tExpect(destroyContainerErrors).To(\n\t\tBeEmpty(),\n\t\t\"%d containers failed to be destroyed!\",\n\t\tlen(destroyContainerErrors),\n\t)\n})\n\nfunc TestExecutor(t *testing.T) {\n\thelpers.RegisterDefaultTimeouts()\n\n\tRegisterFailHandler(Fail)\n\n\tRunSpecs(t, \"Executor Integration Suite\")\n}\n\nfunc CompileTestedExecutables() world.BuiltExecutables {\n\tvar err error\n\n\tbuiltExecutables := world.BuiltExecutables{}\n\n\tbuiltExecutables[\"garden\"], err = gexec.BuildIn(os.Getenv(\"GARDEN_GOPATH\"), \"guardian\/cmd\/gdn\", \"-race\", \"-a\", \"-tags\", \"daemon\")\n\tExpect(err).NotTo(HaveOccurred())\n\n\treturn builtExecutables\n}\n<commit_msg>fix executor's suite<commit_after>package executor_test\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"code.cloudfoundry.org\/consuladapter\/consulrunner\"\n\t\"code.cloudfoundry.org\/localip\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t\"github.com\/onsi\/ginkgo\/config\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/ginkgomon\"\n\n\t\"code.cloudfoundry.org\/garden\"\n\t\"code.cloudfoundry.org\/inigo\/helpers\"\n\t\"code.cloudfoundry.org\/inigo\/helpers\/certauthority\"\n\t\"code.cloudfoundry.org\/inigo\/helpers\/portauthority\"\n\t\"code.cloudfoundry.org\/inigo\/world\"\n)\n\nvar (\n\tcomponentMaker world.ComponentMaker\n\n\tgardenProcess ifrit.Process\n\tgardenClient garden.Client\n\tsuiteTempDir string\n)\n\nvar _ = SynchronizedBeforeSuite(func() []byte {\n\tsuiteTempDir = world.TempDir(\"before-suite\")\n\tpayload, err := json.Marshal(world.BuiltArtifacts{\n\t\tExecutables: CompileTestedExecutables(),\n\t})\n\tExpect(err).NotTo(HaveOccurred())\n\n\treturn payload\n}, func(encodedBuiltArtifacts []byte) {\n\tvar builtArtifacts world.BuiltArtifacts\n\n\terr := json.Unmarshal(encodedBuiltArtifacts, &builtArtifacts)\n\tExpect(err).NotTo(HaveOccurred())\n\n\t_, dbBaseConnectionString := world.DBInfo()\n\n\tlocalIP, err := localip.LocalIP()\n\tExpect(err).NotTo(HaveOccurred())\n\n\taddresses := world.ComponentAddresses{\n\t\tGarden: fmt.Sprintf(\"127.0.0.1:%d\", 10000+config.GinkgoConfig.ParallelNode),\n\t\tNATS: fmt.Sprintf(\"127.0.0.1:%d\", 11000+config.GinkgoConfig.ParallelNode),\n\t\tConsul: fmt.Sprintf(\"127.0.0.1:%d\", 12750+config.GinkgoConfig.ParallelNode*consulrunner.PortOffsetLength),\n\t\tRep: fmt.Sprintf(\"127.0.0.1:%d\", 14000+config.GinkgoConfig.ParallelNode),\n\t\tFileServer: fmt.Sprintf(\"%s:%d\", localIP, 17000+config.GinkgoConfig.ParallelNode),\n\t\tRouter: fmt.Sprintf(\"127.0.0.1:%d\", 18000+config.GinkgoConfig.ParallelNode),\n\t\tBBS: fmt.Sprintf(\"127.0.0.1:%d\", 20500+config.GinkgoConfig.ParallelNode*2),\n\t\tHealth: fmt.Sprintf(\"127.0.0.1:%d\", 20500+config.GinkgoConfig.ParallelNode*2+1),\n\t\tAuctioneer: fmt.Sprintf(\"127.0.0.1:%d\", 23000+config.GinkgoConfig.ParallelNode),\n\t\tSSHProxy: fmt.Sprintf(\"127.0.0.1:%d\", 23500+config.GinkgoConfig.ParallelNode),\n\t\tSSHProxyHealthCheck: fmt.Sprintf(\"127.0.0.1:%d\", 24500+config.GinkgoConfig.ParallelNode),\n\t\tFakeVolmanDriver: fmt.Sprintf(\"127.0.0.1:%d\", 25500+config.GinkgoConfig.ParallelNode),\n\t\tLocket: fmt.Sprintf(\"127.0.0.1:%d\", 26500+config.GinkgoConfig.ParallelNode),\n\t\tSQL: fmt.Sprintf(\"%sdiego_%d\", dbBaseConnectionString, config.GinkgoConfig.ParallelNode),\n\t}\n\n\tnode := GinkgoParallelNode()\n\tstartPort := 1000 * node\n\tportRange := 950\n\tendPort := startPort + portRange\n\n\tallocator, err := portauthority.New(startPort, endPort)\n\tExpect(err).NotTo(HaveOccurred())\n\n\tcertDepot := world.TempDirWithParent(suiteTempDir, \"cert-depot\")\n\n\tcertAuthority, err := certauthority.NewCertAuthority(certDepot, \"ca\")\n\tExpect(err).NotTo(HaveOccurred())\n\n\tcomponentMaker = world.MakeComponentMaker(builtArtifacts, addresses, allocator, certAuthority)\n\tcomponentMaker.Setup()\n})\n\nvar _ = AfterSuite(func() {\n\tcomponentMaker.Teardown()\n\n\tdeleteSuiteTempDir := func() error { return os.RemoveAll(suiteTempDir) }\n\tEventually(deleteSuiteTempDir).Should(Succeed())\n})\n\nvar _ = BeforeEach(func() {\n\tgardenProcess = ginkgomon.Invoke(componentMaker.Garden())\n\tgardenClient = componentMaker.GardenClient()\n})\n\nvar _ = AfterEach(func() {\n\tdestroyContainerErrors := helpers.CleanupGarden(gardenClient)\n\n\thelpers.StopProcesses(gardenProcess)\n\n\tExpect(destroyContainerErrors).To(\n\t\tBeEmpty(),\n\t\t\"%d containers failed to be destroyed!\",\n\t\tlen(destroyContainerErrors),\n\t)\n})\n\nfunc TestExecutor(t *testing.T) {\n\thelpers.RegisterDefaultTimeouts()\n\n\tRegisterFailHandler(Fail)\n\n\tRunSpecs(t, \"Executor Integration Suite\")\n}\n\nfunc CompileTestedExecutables() world.BuiltExecutables {\n\tvar err error\n\n\tbuiltExecutables := world.BuiltExecutables{}\n\n\tcwd, err := os.Getwd()\n\tExpect(err).NotTo(HaveOccurred())\n\tExpect(os.Chdir(os.Getenv(\"GARDEN_GOPATH\"))).To(Succeed())\n\tbuiltExecutables[\"garden\"], err = gexec.Build(\".\/cmd\/gdn\", \"-race\", \"-a\", \"-tags\", \"daemon\")\n\tExpect(err).NotTo(HaveOccurred())\n\tExpect(os.Chdir(cwd)).To(Succeed())\n\n\treturn builtExecutables\n}\n<|endoftext|>"} {"text":"<commit_before>package chart\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/golang\/freetype\/truetype\"\n)\n\n\/\/ StackedBar is a bar within a StackedBarChart.\ntype StackedBar struct {\n\tName string\n\tWidth int\n\tValues []Value\n}\n\n\/\/ GetWidth returns the width of the bar.\nfunc (sb StackedBar) GetWidth() int {\n\tif sb.Width == 0 {\n\t\treturn 50\n\t}\n\treturn sb.Width\n}\n\n\/\/ StackedBarChart is a chart that draws sections of a bar based on percentages.\ntype StackedBarChart struct {\n\tTitle string\n\tTitleStyle Style\n\n\tWidth int\n\tHeight int\n\tDPI float64\n\n\tBackground Style\n\tCanvas Style\n\n\tXAxis Style\n\tYAxis Style\n\n\tBarSpacing int\n\n\tFont *truetype.Font\n\tdefaultFont *truetype.Font\n\n\tBars []StackedBar\n\tElements []Renderable\n}\n\n\/\/ GetDPI returns the dpi for the chart.\nfunc (sbc StackedBarChart) GetDPI(defaults ...float64) float64 {\n\tif sbc.DPI == 0 {\n\t\tif len(defaults) > 0 {\n\t\t\treturn defaults[0]\n\t\t}\n\t\treturn DefaultDPI\n\t}\n\treturn sbc.DPI\n}\n\n\/\/ GetFont returns the text font.\nfunc (sbc StackedBarChart) GetFont() *truetype.Font {\n\tif sbc.Font == nil {\n\t\treturn sbc.defaultFont\n\t}\n\treturn sbc.Font\n}\n\n\/\/ GetWidth returns the chart width or the default value.\nfunc (sbc StackedBarChart) GetWidth() int {\n\tif sbc.Width == 0 {\n\t\treturn DefaultChartWidth\n\t}\n\treturn sbc.Width\n}\n\n\/\/ GetHeight returns the chart height or the default value.\nfunc (sbc StackedBarChart) GetHeight() int {\n\tif sbc.Height == 0 {\n\t\treturn DefaultChartWidth\n\t}\n\treturn sbc.Height\n}\n\n\/\/ GetBarSpacing returns the spacing between bars.\nfunc (sbc StackedBarChart) GetBarSpacing() int {\n\tif sbc.BarSpacing == 0 {\n\t\treturn 100\n\t}\n\treturn sbc.BarSpacing\n}\n\n\/\/ Render renders the chart with the given renderer to the given io.Writer.\nfunc (sbc StackedBarChart) Render(rp RendererProvider, w io.Writer) error {\n\tif len(sbc.Bars) == 0 {\n\t\treturn errors.New(\"Please provide at least one bar.\")\n\t}\n\n\tr, err := rp(sbc.GetWidth(), sbc.GetHeight())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif sbc.Font == nil {\n\t\tdefaultFont, err := GetDefaultFont()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsbc.defaultFont = defaultFont\n\t}\n\tr.SetDPI(sbc.GetDPI(DefaultDPI))\n\n\tcanvasBox := sbc.getAdjustedCanvasBox(sbc.getDefaultCanvasBox())\n\tsbc.drawBars(r, canvasBox)\n\tsbc.drawXAxis(r, canvasBox)\n\tsbc.drawYAxis(r, canvasBox)\n\n\tsbc.drawTitle(r)\n\tfor _, a := range sbc.Elements {\n\t\ta(r, canvasBox, sbc.styleDefaultsElements())\n\t}\n\n\treturn r.Save(w)\n}\n\nfunc (sbc StackedBarChart) drawBars(r Renderer, canvasBox Box) {\n\txoffset := canvasBox.Left\n\tfor _, bar := range sbc.Bars {\n\t\tsbc.drawBar(r, canvasBox, xoffset, bar)\n\t\txoffset += sbc.GetBarSpacing()\n\t}\n}\n\nfunc (sbc StackedBarChart) drawBar(r Renderer, canvasBox Box, xoffset int, bar StackedBar) int {\n\tbxl := xoffset + Math.AbsInt(bar.GetWidth()>>1-sbc.GetBarSpacing()>>1)\n\tbxr := bxl + bar.GetWidth()\n\n\tnormalizedBarComponents := Values(bar.Values).Normalize()\n\tyoffset := canvasBox.Top\n\tfor index, bv := range normalizedBarComponents {\n\t\tbarHeight := int(bv.Value * float64(canvasBox.Height()))\n\t\tbarBox := Box{Top: yoffset, Left: bxl, Right: bxr, Bottom: yoffset + barHeight}\n\t\tDraw.Box(r, barBox, bv.Style.InheritFrom(sbc.styleDefaultsStackedBarValue(index)))\n\t\tyoffset += barHeight\n\t}\n\n\treturn bxr\n}\n\nfunc (sbc StackedBarChart) drawXAxis(r Renderer, canvasBox Box) {\n\tif sbc.XAxis.Show {\n\t\taxisStyle := sbc.XAxis.InheritFrom(sbc.styleDefaultsAxes())\n\t\taxisStyle.WriteToRenderer(r)\n\n\t\tr.MoveTo(canvasBox.Left, canvasBox.Bottom)\n\t\tr.LineTo(canvasBox.Right, canvasBox.Bottom)\n\t\tr.Stroke()\n\n\t\tr.MoveTo(canvasBox.Left, canvasBox.Bottom)\n\t\tr.LineTo(canvasBox.Left, canvasBox.Bottom+DefaultVerticalTickHeight)\n\t\tr.Stroke()\n\n\t\tcursor := canvasBox.Left\n\t\tfor _, bar := range sbc.Bars {\n\n\t\t\tspacing := (sbc.GetBarSpacing() >> 1)\n\n\t\t\tbarLabelBox := Box{\n\t\t\t\tTop: canvasBox.Bottom + DefaultXAxisMargin,\n\t\t\t\tLeft: cursor,\n\t\t\t\tRight: cursor + bar.GetWidth() + spacing,\n\t\t\t\tBottom: sbc.GetHeight(),\n\t\t\t}\n\t\t\tif len(bar.Name) > 0 {\n\t\t\t\tDraw.TextWithin(r, bar.Name, barLabelBox, axisStyle)\n\t\t\t}\n\t\t\taxisStyle.WriteToRenderer(r)\n\t\t\tr.MoveTo(barLabelBox.Right, canvasBox.Bottom)\n\t\t\tr.LineTo(barLabelBox.Right, canvasBox.Bottom+DefaultVerticalTickHeight)\n\t\t\tr.Stroke()\n\t\t\tcursor += bar.GetWidth() + spacing\n\t\t}\n\t}\n}\n\nfunc (sbc StackedBarChart) drawYAxis(r Renderer, canvasBox Box) {\n\tif sbc.YAxis.Show {\n\t\taxisStyle := sbc.YAxis.InheritFrom(sbc.styleDefaultsAxes())\n\t\taxisStyle.WriteToRenderer(r)\n\t\tr.MoveTo(canvasBox.Right, canvasBox.Top)\n\t\tr.LineTo(canvasBox.Right, canvasBox.Bottom)\n\t\tr.Stroke()\n\n\t\tr.MoveTo(canvasBox.Right, canvasBox.Bottom)\n\t\tr.LineTo(canvasBox.Right+DefaultHorizontalTickWidth, canvasBox.Bottom)\n\t\tr.Stroke()\n\n\t\tticks := Sequence.Float64(1.0, 0.0, 0.2)\n\t\tfor _, t := range ticks {\n\t\t\taxisStyle.GetStrokeOptions().WriteToRenderer(r)\n\t\t\tty := canvasBox.Bottom - int(t*float64(canvasBox.Height()))\n\t\t\tr.MoveTo(canvasBox.Right, ty)\n\t\t\tr.LineTo(canvasBox.Right+DefaultHorizontalTickWidth, ty)\n\t\t\tr.Stroke()\n\n\t\t\taxisStyle.GetTextOptions().WriteToRenderer(r)\n\t\t\ttext := fmt.Sprintf(\"%0.0f%%\", t*100)\n\n\t\t\ttb := r.MeasureText(text)\n\t\t\tDraw.Text(r, text, canvasBox.Right+DefaultYAxisMargin+5, ty+(tb.Height()>>1), axisStyle)\n\t\t}\n\n\t}\n}\n\nfunc (sbc StackedBarChart) drawTitle(r Renderer) {\n\tif len(sbc.Title) > 0 && sbc.TitleStyle.Show {\n\t\tDraw.TextWithin(r, sbc.Title, sbc.Box(), sbc.styleDefaultsTitle())\n\t}\n}\n\nfunc (sbc StackedBarChart) getDefaultCanvasBox() Box {\n\treturn sbc.Box()\n}\n\nfunc (sbc StackedBarChart) getAdjustedCanvasBox(canvasBox Box) Box {\n\tvar totalWidth int\n\tfor index, bar := range sbc.Bars {\n\t\ttotalWidth += bar.GetWidth()\n\t\tif index < len(sbc.Bars)-1 {\n\t\t\ttotalWidth += sbc.GetBarSpacing()\n\t\t}\n\t}\n\n\treturn Box{\n\t\tTop: canvasBox.Top,\n\t\tLeft: canvasBox.Left,\n\t\tRight: canvasBox.Left + totalWidth,\n\t\tBottom: canvasBox.Bottom,\n\t}\n}\n\n\/\/ Box returns the chart bounds as a box.\nfunc (sbc StackedBarChart) Box() Box {\n\tdpr := sbc.Background.Padding.GetRight(10)\n\tdpb := sbc.Background.Padding.GetBottom(50)\n\n\treturn Box{\n\t\tTop: 20,\n\t\tLeft: 20,\n\t\tRight: sbc.GetWidth() - dpr,\n\t\tBottom: sbc.GetHeight() - dpb,\n\t}\n}\n\nfunc (sbc StackedBarChart) styleDefaultsStackedBarValue(index int) Style {\n\treturn Style{\n\t\tStrokeColor: GetAlternateColor(index),\n\t\tStrokeWidth: 3.0,\n\t\tFillColor: GetAlternateColor(index),\n\t}\n}\n\nfunc (sbc StackedBarChart) styleDefaultsTitle() Style {\n\treturn sbc.TitleStyle.InheritFrom(Style{\n\t\tFontColor: DefaultTextColor,\n\t\tFont: sbc.GetFont(),\n\t\tFontSize: sbc.getTitleFontSize(),\n\t\tTextHorizontalAlign: TextHorizontalAlignCenter,\n\t\tTextVerticalAlign: TextVerticalAlignTop,\n\t\tTextWrap: TextWrapWord,\n\t})\n}\n\nfunc (sbc StackedBarChart) getTitleFontSize() float64 {\n\teffectiveDimension := Math.MinInt(sbc.GetWidth(), sbc.GetHeight())\n\tif effectiveDimension >= 2048 {\n\t\treturn 48\n\t} else if effectiveDimension >= 1024 {\n\t\treturn 24\n\t} else if effectiveDimension >= 512 {\n\t\treturn 18\n\t} else if effectiveDimension >= 256 {\n\t\treturn 12\n\t}\n\treturn 10\n}\n\nfunc (sbc StackedBarChart) styleDefaultsAxes() Style {\n\treturn Style{\n\t\tStrokeColor: DefaultAxisColor,\n\t\tFont: sbc.GetFont(),\n\t\tFontSize: DefaultAxisFontSize,\n\t\tFontColor: DefaultAxisColor,\n\t\tTextHorizontalAlign: TextHorizontalAlignCenter,\n\t\tTextVerticalAlign: TextVerticalAlignTop,\n\t\tTextWrap: TextWrapWord,\n\t}\n}\nfunc (sbc StackedBarChart) styleDefaultsElements() Style {\n\treturn Style{\n\t\tFont: sbc.GetFont(),\n\t}\n}\n<commit_msg>looking spiffy.<commit_after>package chart\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\n\t\"github.com\/golang\/freetype\/truetype\"\n)\n\n\/\/ StackedBar is a bar within a StackedBarChart.\ntype StackedBar struct {\n\tName string\n\tWidth int\n\tValues []Value\n}\n\n\/\/ GetWidth returns the width of the bar.\nfunc (sb StackedBar) GetWidth() int {\n\tif sb.Width == 0 {\n\t\treturn 50\n\t}\n\treturn sb.Width\n}\n\n\/\/ StackedBarChart is a chart that draws sections of a bar based on percentages.\ntype StackedBarChart struct {\n\tTitle string\n\tTitleStyle Style\n\n\tWidth int\n\tHeight int\n\tDPI float64\n\n\tBackground Style\n\tCanvas Style\n\n\tXAxis Style\n\tYAxis Style\n\n\tBarSpacing int\n\n\tFont *truetype.Font\n\tdefaultFont *truetype.Font\n\n\tBars []StackedBar\n\tElements []Renderable\n}\n\n\/\/ GetDPI returns the dpi for the chart.\nfunc (sbc StackedBarChart) GetDPI(defaults ...float64) float64 {\n\tif sbc.DPI == 0 {\n\t\tif len(defaults) > 0 {\n\t\t\treturn defaults[0]\n\t\t}\n\t\treturn DefaultDPI\n\t}\n\treturn sbc.DPI\n}\n\n\/\/ GetFont returns the text font.\nfunc (sbc StackedBarChart) GetFont() *truetype.Font {\n\tif sbc.Font == nil {\n\t\treturn sbc.defaultFont\n\t}\n\treturn sbc.Font\n}\n\n\/\/ GetWidth returns the chart width or the default value.\nfunc (sbc StackedBarChart) GetWidth() int {\n\tif sbc.Width == 0 {\n\t\treturn DefaultChartWidth\n\t}\n\treturn sbc.Width\n}\n\n\/\/ GetHeight returns the chart height or the default value.\nfunc (sbc StackedBarChart) GetHeight() int {\n\tif sbc.Height == 0 {\n\t\treturn DefaultChartWidth\n\t}\n\treturn sbc.Height\n}\n\n\/\/ GetBarSpacing returns the spacing between bars.\nfunc (sbc StackedBarChart) GetBarSpacing() int {\n\tif sbc.BarSpacing == 0 {\n\t\treturn 100\n\t}\n\treturn sbc.BarSpacing\n}\n\n\/\/ Render renders the chart with the given renderer to the given io.Writer.\nfunc (sbc StackedBarChart) Render(rp RendererProvider, w io.Writer) error {\n\tif len(sbc.Bars) == 0 {\n\t\treturn errors.New(\"Please provide at least one bar.\")\n\t}\n\n\tr, err := rp(sbc.GetWidth(), sbc.GetHeight())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif sbc.Font == nil {\n\t\tdefaultFont, err := GetDefaultFont()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsbc.defaultFont = defaultFont\n\t}\n\tr.SetDPI(sbc.GetDPI(DefaultDPI))\n\n\tcanvasBox := sbc.getAdjustedCanvasBox(sbc.getDefaultCanvasBox())\n\tsbc.drawBars(r, canvasBox)\n\tsbc.drawXAxis(r, canvasBox)\n\tsbc.drawYAxis(r, canvasBox)\n\n\tsbc.drawTitle(r)\n\tfor _, a := range sbc.Elements {\n\t\ta(r, canvasBox, sbc.styleDefaultsElements())\n\t}\n\n\treturn r.Save(w)\n}\n\nfunc (sbc StackedBarChart) drawBars(r Renderer, canvasBox Box) {\n\txoffset := canvasBox.Left\n\tfor _, bar := range sbc.Bars {\n\t\tsbc.drawBar(r, canvasBox, xoffset, bar)\n\t\txoffset += sbc.GetBarSpacing()\n\t}\n}\n\nfunc (sbc StackedBarChart) drawBar(r Renderer, canvasBox Box, xoffset int, bar StackedBar) int {\n\tbxl := xoffset + Math.AbsInt(bar.GetWidth()>>1-sbc.GetBarSpacing()>>1)\n\tbxr := bxl + bar.GetWidth()\n\n\tnormalizedBarComponents := Values(bar.Values).Normalize()\n\tyoffset := canvasBox.Top\n\tfor index, bv := range normalizedBarComponents {\n\t\tbarHeight := int(math.Ceil(bv.Value * float64(canvasBox.Height())))\n\t\tbarBox := Box{Top: yoffset, Left: bxl, Right: bxr, Bottom: Math.MinInt(yoffset+barHeight, canvasBox.Bottom-DefaultStrokeWidth)}\n\t\tDraw.Box(r, barBox, bv.Style.InheritFrom(sbc.styleDefaultsStackedBarValue(index)))\n\t\tyoffset += barHeight\n\t}\n\n\treturn bxr\n}\n\nfunc (sbc StackedBarChart) drawXAxis(r Renderer, canvasBox Box) {\n\tif sbc.XAxis.Show {\n\t\taxisStyle := sbc.XAxis.InheritFrom(sbc.styleDefaultsAxes())\n\t\taxisStyle.WriteToRenderer(r)\n\n\t\tr.MoveTo(canvasBox.Left, canvasBox.Bottom)\n\t\tr.LineTo(canvasBox.Right, canvasBox.Bottom)\n\t\tr.Stroke()\n\n\t\tr.MoveTo(canvasBox.Left, canvasBox.Bottom)\n\t\tr.LineTo(canvasBox.Left, canvasBox.Bottom+DefaultVerticalTickHeight)\n\t\tr.Stroke()\n\n\t\tcursor := canvasBox.Left\n\t\tfor _, bar := range sbc.Bars {\n\n\t\t\tspacing := (sbc.GetBarSpacing() >> 1)\n\n\t\t\tbarLabelBox := Box{\n\t\t\t\tTop: canvasBox.Bottom + DefaultXAxisMargin,\n\t\t\t\tLeft: cursor,\n\t\t\t\tRight: cursor + bar.GetWidth() + spacing,\n\t\t\t\tBottom: sbc.GetHeight(),\n\t\t\t}\n\t\t\tif len(bar.Name) > 0 {\n\t\t\t\tDraw.TextWithin(r, bar.Name, barLabelBox, axisStyle)\n\t\t\t}\n\t\t\taxisStyle.WriteToRenderer(r)\n\t\t\tr.MoveTo(barLabelBox.Right, canvasBox.Bottom)\n\t\t\tr.LineTo(barLabelBox.Right, canvasBox.Bottom+DefaultVerticalTickHeight)\n\t\t\tr.Stroke()\n\t\t\tcursor += bar.GetWidth() + spacing\n\t\t}\n\t}\n}\n\nfunc (sbc StackedBarChart) drawYAxis(r Renderer, canvasBox Box) {\n\tif sbc.YAxis.Show {\n\t\taxisStyle := sbc.YAxis.InheritFrom(sbc.styleDefaultsAxes())\n\t\taxisStyle.WriteToRenderer(r)\n\t\tr.MoveTo(canvasBox.Right, canvasBox.Top)\n\t\tr.LineTo(canvasBox.Right, canvasBox.Bottom)\n\t\tr.Stroke()\n\n\t\tr.MoveTo(canvasBox.Right, canvasBox.Bottom)\n\t\tr.LineTo(canvasBox.Right+DefaultHorizontalTickWidth, canvasBox.Bottom)\n\t\tr.Stroke()\n\n\t\tticks := Sequence.Float64(1.0, 0.0, 0.2)\n\t\tfor _, t := range ticks {\n\t\t\taxisStyle.GetStrokeOptions().WriteToRenderer(r)\n\t\t\tty := canvasBox.Bottom - int(t*float64(canvasBox.Height()))\n\t\t\tr.MoveTo(canvasBox.Right, ty)\n\t\t\tr.LineTo(canvasBox.Right+DefaultHorizontalTickWidth, ty)\n\t\t\tr.Stroke()\n\n\t\t\taxisStyle.GetTextOptions().WriteToRenderer(r)\n\t\t\ttext := fmt.Sprintf(\"%0.0f%%\", t*100)\n\n\t\t\ttb := r.MeasureText(text)\n\t\t\tDraw.Text(r, text, canvasBox.Right+DefaultYAxisMargin+5, ty+(tb.Height()>>1), axisStyle)\n\t\t}\n\n\t}\n}\n\nfunc (sbc StackedBarChart) drawTitle(r Renderer) {\n\tif len(sbc.Title) > 0 && sbc.TitleStyle.Show {\n\t\tDraw.TextWithin(r, sbc.Title, sbc.Box(), sbc.styleDefaultsTitle())\n\t}\n}\n\nfunc (sbc StackedBarChart) getDefaultCanvasBox() Box {\n\treturn sbc.Box()\n}\n\nfunc (sbc StackedBarChart) getAdjustedCanvasBox(canvasBox Box) Box {\n\tvar totalWidth int\n\tfor index, bar := range sbc.Bars {\n\t\ttotalWidth += bar.GetWidth()\n\t\tif index < len(sbc.Bars)-1 {\n\t\t\ttotalWidth += sbc.GetBarSpacing()\n\t\t}\n\t}\n\n\treturn Box{\n\t\tTop: canvasBox.Top,\n\t\tLeft: canvasBox.Left,\n\t\tRight: canvasBox.Left + totalWidth,\n\t\tBottom: canvasBox.Bottom,\n\t}\n}\n\n\/\/ Box returns the chart bounds as a box.\nfunc (sbc StackedBarChart) Box() Box {\n\tdpr := sbc.Background.Padding.GetRight(10)\n\tdpb := sbc.Background.Padding.GetBottom(50)\n\n\treturn Box{\n\t\tTop: 20,\n\t\tLeft: 20,\n\t\tRight: sbc.GetWidth() - dpr,\n\t\tBottom: sbc.GetHeight() - dpb,\n\t}\n}\n\nfunc (sbc StackedBarChart) styleDefaultsStackedBarValue(index int) Style {\n\treturn Style{\n\t\tStrokeColor: GetAlternateColor(index),\n\t\tStrokeWidth: 3.0,\n\t\tFillColor: GetAlternateColor(index),\n\t}\n}\n\nfunc (sbc StackedBarChart) styleDefaultsTitle() Style {\n\treturn sbc.TitleStyle.InheritFrom(Style{\n\t\tFontColor: DefaultTextColor,\n\t\tFont: sbc.GetFont(),\n\t\tFontSize: sbc.getTitleFontSize(),\n\t\tTextHorizontalAlign: TextHorizontalAlignCenter,\n\t\tTextVerticalAlign: TextVerticalAlignTop,\n\t\tTextWrap: TextWrapWord,\n\t})\n}\n\nfunc (sbc StackedBarChart) getTitleFontSize() float64 {\n\teffectiveDimension := Math.MinInt(sbc.GetWidth(), sbc.GetHeight())\n\tif effectiveDimension >= 2048 {\n\t\treturn 48\n\t} else if effectiveDimension >= 1024 {\n\t\treturn 24\n\t} else if effectiveDimension >= 512 {\n\t\treturn 18\n\t} else if effectiveDimension >= 256 {\n\t\treturn 12\n\t}\n\treturn 10\n}\n\nfunc (sbc StackedBarChart) styleDefaultsAxes() Style {\n\treturn Style{\n\t\tStrokeColor: DefaultAxisColor,\n\t\tFont: sbc.GetFont(),\n\t\tFontSize: DefaultAxisFontSize,\n\t\tFontColor: DefaultAxisColor,\n\t\tTextHorizontalAlign: TextHorizontalAlignCenter,\n\t\tTextVerticalAlign: TextVerticalAlignTop,\n\t\tTextWrap: TextWrapWord,\n\t}\n}\nfunc (sbc StackedBarChart) styleDefaultsElements() Style {\n\treturn Style{\n\t\tFont: sbc.GetFont(),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package keeper\n\nimport (\n\t\"context\"\n\n\tsdk \"github.com\/cosmos\/cosmos-sdk\/types\"\n\tsdkerrors \"github.com\/cosmos\/cosmos-sdk\/types\/errors\"\n\t\"github.com\/zigbee-alliance\/distributed-compliance-ledger\/x\/validator\/types\"\n)\n\nfunc (k msgServer) ApproveDisableValidator(goCtx context.Context, msg *types.MsgApproveDisableValidator) (*types.MsgApproveDisableValidatorResponse, error) {\n\tctx := sdk.UnwrapSDKContext(goCtx)\n\n\tcreatorAddr, err := sdk.AccAddressFromBech32(msg.Creator)\n\tif err != nil {\n\t\treturn nil, sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, \"Invalid creator address: (%s)\", err)\n\t}\n\n\tvalidatorAddr, err := sdk.ValAddressFromBech32(msg.Address)\n\tif err != nil {\n\t\treturn nil, sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, \"Invalid validator address: (%s)\", err)\n\t}\n\n\t\/\/ check if message creator has enough rights to approve disable validator\n\tif !k.dclauthKeeper.HasRole(ctx, creatorAddr, types.VoteForDisableValidatorRole) {\n\t\treturn nil, sdkerrors.Wrapf(sdkerrors.ErrUnauthorized,\n\t\t\t\"MsgApproveDisableValidator transaction should be signed by an account with the %s role\",\n\t\t\ttypes.VoteForDisableValidatorRole,\n\t\t)\n\t}\n\n\t\/\/ check if proposed disable validator exists\n\tproposedDisableValidator, isFound := k.GetProposedDisableValidator(ctx, validatorAddr.String())\n\tif !isFound {\n\t\treturn nil, types.NewErrProposedDisableValidatorDoesNotExist(msg.Address)\n\t}\n\n\t\/\/ check if disable validator already has approval form message creator\n\tif proposedDisableValidator.HasApprovalFrom(creatorAddr) {\n\t\treturn nil, sdkerrors.Wrapf(sdkerrors.ErrUnauthorized,\n\t\t\t\"Disabled validator with address=%v already has approval from=%v\",\n\t\t\tmsg.Address, msg.Creator,\n\t\t)\n\t}\n\n\t\/\/ append approval\n\tgrant := types.Grant{\n\t\tAddress: creatorAddr.String(),\n\t\tTime: msg.Time,\n\t\tInfo: msg.Info,\n\t}\n\n\tproposedDisableValidator.Approvals = append(proposedDisableValidator.Approvals, &grant)\n\n\t\/\/ check if proposed disable validator has enough approvals\n\tif len(proposedDisableValidator.Approvals) == k.DisableValidatorApprovalsCount(ctx) {\n\t\t\/\/ remove disable validator\n\t\tk.RemoveProposedDisableValidator(ctx, proposedDisableValidator.Address)\n\n\t\tapprovedDisableValidator := types.DisabledValidator{\n\t\t\tAddress: proposedDisableValidator.Address,\n\t\t\tCreator: proposedDisableValidator.Creator,\n\t\t\tApprovals: proposedDisableValidator.Approvals,\n\t\t\tDisabledByNodeAdmin: false,\n\t\t}\n\n\t\t\/\/ Disable validator\n\t\tvalidator, _ := k.GetValidator(ctx, validatorAddr)\n\t\tk.Jail(ctx, validator, proposedDisableValidator.Approvals[0].Info)\n\n\t\tk.SetDisabledValidator(ctx, approvedDisableValidator)\n\t} else {\n\t\t\/\/ update proposed disable validator\n\t\tk.SetProposedDisableValidator(ctx, proposedDisableValidator)\n\t}\n\n\treturn &types.MsgApproveDisableValidatorResponse{}, nil\n}\n<commit_msg>Add checking for double vote for proposedDisableValidator and ApproveDisableValidator from same Trustee<commit_after>package keeper\n\nimport (\n\t\"context\"\n\n\tsdk \"github.com\/cosmos\/cosmos-sdk\/types\"\n\tsdkerrors \"github.com\/cosmos\/cosmos-sdk\/types\/errors\"\n\t\"github.com\/zigbee-alliance\/distributed-compliance-ledger\/x\/validator\/types\"\n)\n\nfunc (k msgServer) ApproveDisableValidator(goCtx context.Context, msg *types.MsgApproveDisableValidator) (*types.MsgApproveDisableValidatorResponse, error) {\n\tctx := sdk.UnwrapSDKContext(goCtx)\n\n\tcreatorAddr, err := sdk.AccAddressFromBech32(msg.Creator)\n\tif err != nil {\n\t\treturn nil, sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, \"Invalid creator address: (%s)\", err)\n\t}\n\n\tvalidatorAddr, err := sdk.ValAddressFromBech32(msg.Address)\n\tif err != nil {\n\t\treturn nil, sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, \"Invalid validator address: (%s)\", err)\n\t}\n\n\t\/\/ check if message creator has enough rights to approve disable validator\n\tif !k.dclauthKeeper.HasRole(ctx, creatorAddr, types.VoteForDisableValidatorRole) {\n\t\treturn nil, sdkerrors.Wrapf(sdkerrors.ErrUnauthorized,\n\t\t\t\"MsgApproveDisableValidator transaction should be signed by an account with the %s role\",\n\t\t\ttypes.VoteForDisableValidatorRole,\n\t\t)\n\t}\n\n\t\/\/ check if proposed disable validator exists\n\tproposedDisableValidator, isFound := k.GetProposedDisableValidator(ctx, validatorAddr.String())\n\tif !isFound {\n\t\treturn nil, types.NewErrProposedDisableValidatorDoesNotExist(msg.Address)\n\t}\n\n\t\/\/ check if disable validator already has reject from message creator\n\tif proposedDisableValidator.HasRejectDisableFrom(creatorAddr) {\n\t\treturn nil, sdkerrors.Wrapf(sdkerrors.ErrUnauthorized,\n\t\t\t\"Disabled validator with address=%v already has reject from=%v\",\n\t\t\tmsg.Address,\n\t\t\tmsg.Creator,\n\t\t)\n\t}\n\n\t\/\/ check if disable validator already has approval form message creator\n\tif proposedDisableValidator.HasApprovalFrom(creatorAddr) {\n\t\treturn nil, sdkerrors.Wrapf(sdkerrors.ErrUnauthorized,\n\t\t\t\"Disabled validator with address=%v already has approval from=%v\",\n\t\t\tmsg.Address, msg.Creator,\n\t\t)\n\t}\n\n\t\/\/ append approval\n\tgrant := types.Grant{\n\t\tAddress: creatorAddr.String(),\n\t\tTime: msg.Time,\n\t\tInfo: msg.Info,\n\t}\n\n\tproposedDisableValidator.Approvals = append(proposedDisableValidator.Approvals, &grant)\n\n\t\/\/ check if proposed disable validator has enough approvals\n\tif len(proposedDisableValidator.Approvals) == k.DisableValidatorApprovalsCount(ctx) {\n\t\t\/\/ remove disable validator\n\t\tk.RemoveProposedDisableValidator(ctx, proposedDisableValidator.Address)\n\n\t\tapprovedDisableValidator := types.DisabledValidator{\n\t\t\tAddress: proposedDisableValidator.Address,\n\t\t\tCreator: proposedDisableValidator.Creator,\n\t\t\tApprovals: proposedDisableValidator.Approvals,\n\t\t\tRejectApprovals: proposedDisableValidator.RejectApprovals,\n\t\t\tDisabledByNodeAdmin: false,\n\t\t}\n\n\t\t\/\/ Disable validator\n\t\tvalidator, _ := k.GetValidator(ctx, validatorAddr)\n\t\tk.Jail(ctx, validator, proposedDisableValidator.Approvals[0].Info)\n\n\t\tk.SetDisabledValidator(ctx, approvedDisableValidator)\n\t} else {\n\t\t\/\/ update proposed disable validator\n\t\tk.SetProposedDisableValidator(ctx, proposedDisableValidator)\n\t}\n\n\treturn &types.MsgApproveDisableValidatorResponse{}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>1db0dc84-2e57-11e5-9284-b827eb9e62be<commit_msg>1db60aa6-2e57-11e5-9284-b827eb9e62be<commit_after>1db60aa6-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright IBM Corp. 2016 All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\t\t http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\n\/\/WARNING - this chaincode's ID is hard-coded in chaincode_example04 to illustrate one way of\n\/\/calling chaincode from a chaincode. If this example is modified, chaincode_example04.go has\n\/\/to be modified as well with the new ID of chaincode_example02.\n\/\/chaincode_example05 show's how chaincode ID can be passed in as a parameter instead of\n\/\/hard-coding.\n\nimport (\t\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"bytes\"\n\t\"github.com\/hyperledger\/fabric\/core\/chaincode\/shim\"\n)\n\n\/\/ SimpleChaincode example simple Chaincode implementation\ntype SimpleChaincode struct {\n}\n\nfunc (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\tvar A, B string \/\/ Entities\n\tvar Aval, Bval int \/\/ Asset holdings\n\tvar err error\n\n\tif len(args) != 4 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 4\")\n\t}\n\n\t\/\/ Initialize the chaincode\n\tA = args[0]\n\tAval, err = strconv.Atoi(args[1])\n\tif err != nil {\n\t\treturn nil, errors.New(\"Expecting integer value for asset holding\")\n\t}\n\tB = args[2]\n\tBval, err = strconv.Atoi(args[3])\n\tif err != nil {\n\t\treturn nil, errors.New(\"Expecting integer value for asset holding\")\n\t}\n\tfmt.Printf(\"Aval = %d, Bval = %d\\n\", Aval, Bval)\n\n\t\/\/ Write the state to the ledger\n\terr = stub.PutState(A, []byte(strconv.Itoa(Aval)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = stub.PutState(B, []byte(strconv.Itoa(Bval)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nil, nil\n}\n\n\nfunc (t *SimpleChaincode) write(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n var key, value string\t\n var err error\n fmt.Println(\"Storing the parameters in hyperledger fabric...\")\n\n \/*if len(args) != 2 {\n return nil, errors.New(\"Incorrect number of arguments. Expecting 2. name of the key and value to set\")\n }*\/\n\t\n\tif(len(args)%2 != 0) {\n\t\t fmt.Printf(\"Incorrect number of arguments. One of the keys or values is missing.\")\n\t\t fmt.Println(\"\")\n\t\t\t\t \n }else{\n\t for i := 0; i < len(args); i++ {\n\t if(i%2 == 0){\n\t\t if args[i] != \"\" {\n fmt.Printf(\"Key: %s\", args[i])\n\t\t\t\t fmt.Println(\"\")\n\t\t\t\t key = args[i] \n\t\t\t\t i++\n }\n\t\t if(i!=len(args)) {\n\t\t\t fmt.Printf(\"Value: %s\", args[i])\n\t\t\t fmt.Println(\"\")\n\t\t\t\t value = args[i]\n\t\t\t }\n\t\t err = stub.PutState(key, []byte(value)) \/\/write the variable into the chaincode state\n\t\t\t if err != nil {\n\t\t\t\treturn nil, err\n\t\t\t }\n\t\t }\n }\n\t}\n\n\t\/*\n key = args[0] \/\/rename for fun\n value = args[1]\n err = stub.PutState(key, []byte(value)) \/\/write the variable into the chaincode state\n if err != nil {\n return nil, err\n }\n\t*\/\n\t\t\n return nil, nil\n}\n\n\n\/\/ Transaction makes payment of X units from A to B\nfunc (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\tif function == \"delete\" {\n\t\t\/\/ Deletes an entity from its state\n\t\treturn t.delete(stub, args)\n\t}\n\tif function == \"write\" {\n\t\tfmt.Println(\"Calling write()\")\n return t.write(stub, args)\n }\n\n\tvar A, B string \/\/ Entities\n\tvar Aval, Bval int \/\/ Asset holdings\n\tvar X int \/\/ Transaction value\n\tvar err error\n\n\tif len(args) != 3 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 3\")\n\t}\n\n\tA = args[0]\n\tB = args[1]\n\t\n\t\n\t\/\/ Get the state from the ledger\n\t\/\/ TODO: will be nice to have a GetAllState call to ledger\n\tAvalbytes, err := stub.GetState(A)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to get state\")\n\t}\n\tif Avalbytes == nil {\n\t\treturn nil, errors.New(\"Entity not found\")\n\t}\n\tAval, _ = strconv.Atoi(string(Avalbytes))\n\n\tBvalbytes, err := stub.GetState(B)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to get state\")\n\t}\n\tif Bvalbytes == nil {\n\t\treturn nil, errors.New(\"Entity not found\")\n\t}\n\tBval, _ = strconv.Atoi(string(Bvalbytes))\n\n\t\/\/ Perform the execution\n\tX, err = strconv.Atoi(args[2])\n\tAval = Aval - X\n\tBval = Bval + X\n\tfmt.Printf(\"Aval = %d, Bval = %d\\n\", Aval, Bval)\n\t\n\t\/\/Store state for transactions\n\tvar transacted, historyval string\n\ttransacted = \"T_\"+args[0]+\"|\"+args[1]\n\tTvalbytes, err := stub.GetState(transacted)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to get transacted state\")\n\t}\n\tif Tvalbytes == nil {\n\t\thistoryval = args[2]\n\t}else{\n\t\thistoryval = string(Tvalbytes)\n\t\thistoryval = historyval+\",\"+args[2]\t\t\n\t}\t\n\terr = stub.PutState(transacted, []byte(historyval))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\n\t\n\t\/\/Store state for sponsor transactions\n\tvar s_transactions, s_history string\n\ts_transactions = \"T_\"+args[0]\n\tSvalbytes, err := stub.GetState(s_transactions)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to get sponsor transacted state\")\n\t}\n\tif Svalbytes == nil {\n\t\ts_history = args[1]+\"|\"+args[2]\n\t}else{\n\t\ts_history = string(Svalbytes)\n\t\ts_history = s_history+\",\"+args[1]+\"|\"+args[2]\t\n\t}\t\n\terr = stub.PutState(s_transactions, []byte(s_history))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\n\t\n\t\/\/Store state for Idea transactions\n\tvar i_transactions, i_history string\n\ti_transactions = \"T_\"+args[1]\n\tIvalbytes, err := stub.GetState(i_transactions)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to get idea transacted state\")\n\t}\n\tif Ivalbytes == nil {\n\t\ti_history = args[0]+\"|\"+args[2]\n\t}else{\n\t\ti_history = string(Ivalbytes)\n\t\ti_history = i_history+\",\"+args[0]+\"|\"+args[2]\t\n\t}\t\n\terr = stub.PutState(i_transactions, []byte(i_history))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\n\t\n\t\n\n\t\/\/ Write the state back to the ledger\n\terr = stub.PutState(A, []byte(strconv.Itoa(Aval)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = stub.PutState(B, []byte(strconv.Itoa(Bval)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nil, nil\n}\n\n\/\/ Deletes an entity from state\nfunc (t *SimpleChaincode) delete(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\tA := args[0]\n\n\t\/\/ Delete the key from the state in ledger\n\terr := stub.DelState(A)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to delete state\")\n\t}\n\n\treturn nil, nil\n}\n\n\/\/ Query callback representing the query of a chaincode\nfunc (t *SimpleChaincode) Query(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\t\/*if function != \"query\" {\n\t\treturn nil, errors.New(\"Invalid query function name. Expecting \\\"query\\\"\")\n\t}*\/\n\t\n\tif function == \"queryAll\" {\n\t\tfmt.Println(\"Calling QueryAll()\")\n return t.queryAll(stub, args)\n }\n\t\n\tif function == \"queryTransact\" {\n\t\tfmt.Println(\"Calling QueryTransact()\")\n return t.queryTransact(stub, args)\n }\n\t\n\tvar A string \/\/ Entities\n\tvar err error\n\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting name of the person to query\")\n\t}\n\n\tA = args[0]\n\n\t\/\/ Get the state from the ledger\n\tAvalbytes, err := stub.GetState(A)\n\tif err != nil {\n\t\tjsonResp := \"{\\\"Error\\\":\\\"Failed to get state for \" + A + \"\\\"}\"\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\n\tif Avalbytes == nil {\n\t\tAvalbytes = []byte(\"0\")\n\t\t\/\/jsonResp := \"{\\\"Error\\\":\\\"Nil amount for \" + A + \"\\\"}\"\n\t\t\/\/return nil, errors.New(jsonResp)\n\t}\n\n\tjsonResp := \"{\\\"Name\\\":\\\"\" + A + \"\\\",\\\"Amount\\\":\\\"\" + string(Avalbytes) + \"\\\"}\"\n\tfmt.Printf(\"Query Response:%s\\n\", jsonResp)\n\treturn Avalbytes, nil\n}\n\n\n\n\n\n\/\/ Query callback representing the query of a chaincode\nfunc (t *SimpleChaincode) queryAll(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\t\t\n\t\/\/var A string \/\/ Entities\n\t\/\/var err error\n\n\t\/*if len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting name of the person to query\")\n\t}*\/\n var RetValue []byte\n\tvar buffer bytes.Buffer \n var jsonRespString string \n\t\tfor i := 0; i < len(args); i++ {\t \n\t\t Avalbytes, err := stub.GetState(args[i])\n\t\t\tif err != nil {\n\t\t\t\tjsonResp := \"{\\\"Error\\\":\\\"Failed to get state for \" + args[i] + \"\\\"}\"\n\t\t\t\treturn nil, errors.New(jsonResp)\n\t\t\t}\n\n\t\t\tif Avalbytes == nil {\n\t\t\t\tAvalbytes = []byte(\"0\")\n\t\t\t\t\/\/jsonResp := \"{\\\"Error\\\":\\\"Nil amount for \" + args[i] + \"\\\"}\"\n\t\t\t\t\/\/return nil, errors.New(jsonResp)\n\t\t\t}\n\t\t\tif(i!=len(args)-1) {\n\t\t\t jsonRespString = string(Avalbytes)+\",\"\n\t\t\t}else{\n\t\t\t jsonRespString = string(Avalbytes)\n\t\t\t}\n\t\t\tbuffer.WriteString(jsonRespString)\t\t\t\n\t\t\tRetValue = []byte(buffer.String())\t\t\t\n\t\t\t\n\t\t}\n\t\tjsonResp := \"{\"+buffer.String()+\"}\"\n\t\tfmt.Printf(\"Query Response:%s\\n\", jsonResp)\n\t\treturn RetValue, nil\n\t\t\n\t\n\t\/*\n\tA = args[0]\n\n\t\/\/ Get the state from the ledger\n\tAvalbytes, err := stub.GetState(A)\n\tif err != nil {\n\t\tjsonResp := \"{\\\"Error\\\":\\\"Failed to get state for \" + A + \"\\\"}\"\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\n\tif Avalbytes == nil {\n\t\tjsonResp := \"{\\\"Error\\\":\\\"Nil amount for \" + A + \"\\\"}\"\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\n\tjsonResp := \"{\\\"Name\\\":\\\"\" + A + \"\\\",\\\"Amount\\\":\\\"\" + string(Avalbytes) + \"\\\"}\"\n\tfmt.Printf(\"Query Response:%s\\n\", jsonResp)\n\treturn Avalbytes, nil\n\t*\/\n}\n\n\n\n\n\n\/\/ Query callback representing the query of a chaincode\nfunc (t *SimpleChaincode) queryTransact(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\t\t\n\t\/\/var A string \/\/ Entities\n\t\/\/var err error\n\n\t\/*if len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting name of the person to query\")\n\t}*\/\n var RetValue []byte\n\tvar buffer bytes.Buffer \n var jsonRespString, queryparam string \n\tqueryparam = \"T_\"+args[0]\n\t\t\t \n\t\tTvalbytes, err := stub.GetState(queryparam)\n\t\tif err != nil {\n\t\t\tjsonResp := \"{\\\"Error\\\":\\\"Failed to get state for \" + args[0] + \"\\\"}\"\n\t\t\treturn nil, errors.New(jsonResp)\n\t\t}\n\n\t\tif Tvalbytes == nil {\n\t\t\tTvalbytes = []byte(\"0\")\n\t\t\t\/\/jsonResp := \"{\\\"Error\\\":\\\"Nil amount for \" + args[i] + \"\\\"}\"\n\t\t\t\/\/return nil, errors.New(jsonResp)\n\t\t}\n\t\t\n\t\tjsonRespString = string(Tvalbytes)\t\t\n\t\tbuffer.WriteString(jsonRespString)\t\t\t\n\t\tRetValue = []byte(buffer.String())\n\t\tjsonResp := \"{\"+buffer.String()+\"}\"\n\t\tfmt.Printf(\"Query Response:%s\\n\", jsonResp)\n\t\treturn RetValue, nil\n\t\t\n\t\n\t\/*\n\tA = args[0]\n\n\t\/\/ Get the state from the ledger\n\tAvalbytes, err := stub.GetState(A)\n\tif err != nil {\n\t\tjsonResp := \"{\\\"Error\\\":\\\"Failed to get state for \" + A + \"\\\"}\"\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\n\tif Avalbytes == nil {\n\t\tjsonResp := \"{\\\"Error\\\":\\\"Nil amount for \" + A + \"\\\"}\"\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\n\tjsonResp := \"{\\\"Name\\\":\\\"\" + A + \"\\\",\\\"Amount\\\":\\\"\" + string(Avalbytes) + \"\\\"}\"\n\tfmt.Printf(\"Query Response:%s\\n\", jsonResp)\n\treturn Avalbytes, nil\n\t*\/\n}\n\n\n\n\nfunc main() {\n\terr := shim.Start(new(SimpleChaincode))\n\tif err != nil {\n\t\tfmt.Printf(\"Error starting Simple chaincode: %s\", err)\n\t}\n}\n<commit_msg>Updated write method<commit_after>\/*\nCopyright IBM Corp. 2016 All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\t\t http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\n\/\/WARNING - this chaincode's ID is hard-coded in chaincode_example04 to illustrate one way of\n\/\/calling chaincode from a chaincode. If this example is modified, chaincode_example04.go has\n\/\/to be modified as well with the new ID of chaincode_example02.\n\/\/chaincode_example05 show's how chaincode ID can be passed in as a parameter instead of\n\/\/hard-coding.\n\nimport (\t\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"bytes\"\n\t\"github.com\/hyperledger\/fabric\/core\/chaincode\/shim\"\n)\n\n\/\/ SimpleChaincode example simple Chaincode implementation\ntype SimpleChaincode struct {\n}\n\nfunc (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\tvar A, B string \/\/ Entities\n\tvar Aval, Bval int \/\/ Asset holdings\n\tvar err error\n\n\tif len(args) != 4 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 4\")\n\t}\n\n\t\/\/ Initialize the chaincode\n\tA = args[0]\n\tAval, err = strconv.Atoi(args[1])\n\tif err != nil {\n\t\treturn nil, errors.New(\"Expecting integer value for asset holding\")\n\t}\n\tB = args[2]\n\tBval, err = strconv.Atoi(args[3])\n\tif err != nil {\n\t\treturn nil, errors.New(\"Expecting integer value for asset holding\")\n\t}\n\tfmt.Printf(\"Aval = %d, Bval = %d\\n\", Aval, Bval)\n\n\t\/\/ Write the state to the ledger\n\terr = stub.PutState(A, []byte(strconv.Itoa(Aval)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = stub.PutState(B, []byte(strconv.Itoa(Bval)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nil, nil\n}\n\n\nfunc (t *SimpleChaincode) write(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n var key, value string\t\n var err error\n fmt.Println(\"Storing the parameters in hyperledger fabric...\")\n\n \/*if len(args) != 2 {\n return nil, errors.New(\"Incorrect number of arguments. Expecting 2. name of the key and value to set\")\n }*\/\n\t\n\tif(len(args)%2 != 0) {\n\t\t fmt.Printf(\"Incorrect number of arguments. One of the keys or values is missing.\")\n\t\t fmt.Println(\"\")\n\t\t\t\t \n }else{\n\t for i := 0; i < len(args); i++ {\n\t if(i%2 == 0){\n\t\t if args[i] != \"\" {\n fmt.Printf(\"Key: %s\", args[i])\n\t\t\t\t fmt.Println(\"\")\n\t\t\t\t key = args[i] \n\t\t\t\t i++\n }\n\t\t if(i!=len(args)) {\n\t\t\t fmt.Printf(\"Value: %s\", args[i])\n\t\t\t fmt.Println(\"\")\n\t\t\t\t value = args[i]\n\t\t\t }\n\t\t\t \n\t\t\t \/\/check if the state exists. If not initialize the state\n\t\t\t Avalbytes, err := stub.GetState(key)\n\t\t\tif err != nil {\n\t\t\t\tjsonResp := \"{\\\"Error\\\":\\\"Failed to get state for \" + key + \"\\\"}\"\n\t\t\t\treturn nil, errors.New(jsonResp)\n\t\t\t}\n\t\t\n\t\t\tif Avalbytes == nil {\n\t\t\t\tAvalbytes = []byte(\"0\")\n\t\t\t\terr = stub.PutState(key, Avalbytes)\n\t\t\t\t\n\t\t\t\t\/\/jsonResp := \"{\\\"Error\\\":\\\"Nil amount for \" + key + \"\\\"}\"\n\t\t\t\t\/\/return nil, errors.New(jsonResp)\n\t\t\t}\n\t\t\t \n\t\t err = stub.PutState(key, []byte(value)) \/\/write the variable into the chaincode state\n\t\t\t if err != nil {\n\t\t\t\treturn nil, err\n\t\t\t }\n\t\t }\n }\n\t}\n\n\t\/*\n key = args[0] \/\/rename for fun\n value = args[1]\n err = stub.PutState(key, []byte(value)) \/\/write the variable into the chaincode state\n if err != nil {\n return nil, err\n }\n\t*\/\n\t\t\n return nil, nil\n}\n\n\n\/\/ Transaction makes payment of X units from A to B\nfunc (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\tif function == \"delete\" {\n\t\t\/\/ Deletes an entity from its state\n\t\treturn t.delete(stub, args)\n\t}\n\tif function == \"write\" {\n\t\tfmt.Println(\"Calling write()\")\n return t.write(stub, args)\n }\n\n\tvar A, B string \/\/ Entities\n\tvar Aval, Bval int \/\/ Asset holdings\n\tvar X int \/\/ Transaction value\n\tvar err error\n\n\tif len(args) != 3 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 3\")\n\t}\n\n\tA = args[0]\n\tB = args[1]\n\t\n\t\n\t\/\/ Get the state from the ledger\n\t\/\/ TODO: will be nice to have a GetAllState call to ledger\n\tAvalbytes, err := stub.GetState(A)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to get state\")\n\t}\n\tif Avalbytes == nil {\n\t\treturn nil, errors.New(\"Entity not found\")\n\t}\n\tAval, _ = strconv.Atoi(string(Avalbytes))\n\n\tBvalbytes, err := stub.GetState(B)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to get state\")\n\t}\n\tif Bvalbytes == nil {\n\t\treturn nil, errors.New(\"Entity not found\")\n\t}\n\tBval, _ = strconv.Atoi(string(Bvalbytes))\n\n\t\/\/ Perform the execution\n\tX, err = strconv.Atoi(args[2])\n\tAval = Aval - X\n\tBval = Bval + X\n\tfmt.Printf(\"Aval = %d, Bval = %d\\n\", Aval, Bval)\n\t\n\t\/\/Store state for transactions\n\tvar transacted, historyval string\n\ttransacted = \"T_\"+args[0]+\"|\"+args[1]\n\tTvalbytes, err := stub.GetState(transacted)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to get transacted state\")\n\t}\n\tif Tvalbytes == nil {\n\t\thistoryval = args[2]\n\t}else{\n\t\thistoryval = string(Tvalbytes)\n\t\thistoryval = historyval+\",\"+args[2]\t\t\n\t}\t\n\terr = stub.PutState(transacted, []byte(historyval))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\n\t\n\t\/\/Store state for sponsor transactions\n\tvar s_transactions, s_history string\n\ts_transactions = \"T_\"+args[0]\n\tSvalbytes, err := stub.GetState(s_transactions)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to get sponsor transacted state\")\n\t}\n\tif Svalbytes == nil {\n\t\ts_history = args[1]+\"|\"+args[2]\n\t}else{\n\t\ts_history = string(Svalbytes)\n\t\ts_history = s_history+\",\"+args[1]+\"|\"+args[2]\t\n\t}\t\n\terr = stub.PutState(s_transactions, []byte(s_history))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\n\t\n\t\/\/Store state for Idea transactions\n\tvar i_transactions, i_history string\n\ti_transactions = \"T_\"+args[1]\n\tIvalbytes, err := stub.GetState(i_transactions)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to get idea transacted state\")\n\t}\n\tif Ivalbytes == nil {\n\t\ti_history = args[0]+\"|\"+args[2]\n\t}else{\n\t\ti_history = string(Ivalbytes)\n\t\ti_history = i_history+\",\"+args[0]+\"|\"+args[2]\t\n\t}\t\n\terr = stub.PutState(i_transactions, []byte(i_history))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\n\t\n\t\n\n\t\/\/ Write the state back to the ledger\n\terr = stub.PutState(A, []byte(strconv.Itoa(Aval)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = stub.PutState(B, []byte(strconv.Itoa(Bval)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nil, nil\n}\n\n\/\/ Deletes an entity from state\nfunc (t *SimpleChaincode) delete(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\tA := args[0]\n\n\t\/\/ Delete the key from the state in ledger\n\terr := stub.DelState(A)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to delete state\")\n\t}\n\n\treturn nil, nil\n}\n\n\/\/ Query callback representing the query of a chaincode\nfunc (t *SimpleChaincode) Query(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\t\/*if function != \"query\" {\n\t\treturn nil, errors.New(\"Invalid query function name. Expecting \\\"query\\\"\")\n\t}*\/\n\t\n\tif function == \"queryAll\" {\n\t\tfmt.Println(\"Calling QueryAll()\")\n return t.queryAll(stub, args)\n }\n\t\n\tif function == \"queryTransact\" {\n\t\tfmt.Println(\"Calling QueryTransact()\")\n return t.queryTransact(stub, args)\n }\n\t\n\tvar A string \/\/ Entities\n\tvar err error\n\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting name of the person to query\")\n\t}\n\n\tA = args[0]\n\n\t\/\/ Get the state from the ledger\n\tAvalbytes, err := stub.GetState(A)\n\tif err != nil {\n\t\tjsonResp := \"{\\\"Error\\\":\\\"Failed to get state for \" + A + \"\\\"}\"\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\n\tif Avalbytes == nil {\n\t\tAvalbytes = []byte(\"0\")\n\t\t\/\/jsonResp := \"{\\\"Error\\\":\\\"Nil amount for \" + A + \"\\\"}\"\n\t\t\/\/return nil, errors.New(jsonResp)\n\t}\n\n\tjsonResp := \"{\\\"Name\\\":\\\"\" + A + \"\\\",\\\"Amount\\\":\\\"\" + string(Avalbytes) + \"\\\"}\"\n\tfmt.Printf(\"Query Response:%s\\n\", jsonResp)\n\treturn Avalbytes, nil\n}\n\n\n\n\n\n\/\/ Query callback representing the query of a chaincode\nfunc (t *SimpleChaincode) queryAll(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\t\t\n\t\/\/var A string \/\/ Entities\n\t\/\/var err error\n\n\t\/*if len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting name of the person to query\")\n\t}*\/\n var RetValue []byte\n\tvar buffer bytes.Buffer \n var jsonRespString string \n\t\tfor i := 0; i < len(args); i++ {\t \n\t\t Avalbytes, err := stub.GetState(args[i])\n\t\t\tif err != nil {\n\t\t\t\tjsonResp := \"{\\\"Error\\\":\\\"Failed to get state for \" + args[i] + \"\\\"}\"\n\t\t\t\treturn nil, errors.New(jsonResp)\n\t\t\t}\n\n\t\t\tif Avalbytes == nil {\n\t\t\t\tAvalbytes = []byte(\"0\")\n\t\t\t\t\/\/jsonResp := \"{\\\"Error\\\":\\\"Nil amount for \" + args[i] + \"\\\"}\"\n\t\t\t\t\/\/return nil, errors.New(jsonResp)\n\t\t\t}\n\t\t\tif(i!=len(args)-1) {\n\t\t\t jsonRespString = string(Avalbytes)+\",\"\n\t\t\t}else{\n\t\t\t jsonRespString = string(Avalbytes)\n\t\t\t}\n\t\t\tbuffer.WriteString(jsonRespString)\t\t\t\n\t\t\tRetValue = []byte(buffer.String())\t\t\t\n\t\t\t\n\t\t}\n\t\tjsonResp := \"{\"+buffer.String()+\"}\"\n\t\tfmt.Printf(\"Query Response:%s\\n\", jsonResp)\n\t\treturn RetValue, nil\n\t\t\n\t\n\t\/*\n\tA = args[0]\n\n\t\/\/ Get the state from the ledger\n\tAvalbytes, err := stub.GetState(A)\n\tif err != nil {\n\t\tjsonResp := \"{\\\"Error\\\":\\\"Failed to get state for \" + A + \"\\\"}\"\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\n\tif Avalbytes == nil {\n\t\tjsonResp := \"{\\\"Error\\\":\\\"Nil amount for \" + A + \"\\\"}\"\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\n\tjsonResp := \"{\\\"Name\\\":\\\"\" + A + \"\\\",\\\"Amount\\\":\\\"\" + string(Avalbytes) + \"\\\"}\"\n\tfmt.Printf(\"Query Response:%s\\n\", jsonResp)\n\treturn Avalbytes, nil\n\t*\/\n}\n\n\n\n\n\n\/\/ Query callback representing the query of a chaincode\nfunc (t *SimpleChaincode) queryTransact(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\t\t\n\t\/\/var A string \/\/ Entities\n\t\/\/var err error\n\n\t\/*if len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting name of the person to query\")\n\t}*\/\n var RetValue []byte\n\tvar buffer bytes.Buffer \n var jsonRespString, queryparam string \n\tqueryparam = \"T_\"+args[0]\n\t\t\t \n\t\tTvalbytes, err := stub.GetState(queryparam)\n\t\tif err != nil {\n\t\t\tjsonResp := \"{\\\"Error\\\":\\\"Failed to get state for \" + args[0] + \"\\\"}\"\n\t\t\treturn nil, errors.New(jsonResp)\n\t\t}\n\n\t\tif Tvalbytes == nil {\n\t\t\tTvalbytes = []byte(\"0\")\n\t\t\t\/\/jsonResp := \"{\\\"Error\\\":\\\"Nil amount for \" + args[i] + \"\\\"}\"\n\t\t\t\/\/return nil, errors.New(jsonResp)\n\t\t}\n\t\t\n\t\tjsonRespString = string(Tvalbytes)\t\t\n\t\tbuffer.WriteString(jsonRespString)\t\t\t\n\t\tRetValue = []byte(buffer.String())\n\t\tjsonResp := \"{\"+buffer.String()+\"}\"\n\t\tfmt.Printf(\"Query Response:%s\\n\", jsonResp)\n\t\treturn RetValue, nil\n\t\t\n\t\n\t\/*\n\tA = args[0]\n\n\t\/\/ Get the state from the ledger\n\tAvalbytes, err := stub.GetState(A)\n\tif err != nil {\n\t\tjsonResp := \"{\\\"Error\\\":\\\"Failed to get state for \" + A + \"\\\"}\"\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\n\tif Avalbytes == nil {\n\t\tjsonResp := \"{\\\"Error\\\":\\\"Nil amount for \" + A + \"\\\"}\"\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\n\tjsonResp := \"{\\\"Name\\\":\\\"\" + A + \"\\\",\\\"Amount\\\":\\\"\" + string(Avalbytes) + \"\\\"}\"\n\tfmt.Printf(\"Query Response:%s\\n\", jsonResp)\n\treturn Avalbytes, nil\n\t*\/\n}\n\n\n\n\nfunc main() {\n\terr := shim.Start(new(SimpleChaincode))\n\tif err != nil {\n\t\tfmt.Printf(\"Error starting Simple chaincode: %s\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n)\n\n\/\/ Opts ...\ntype Opts struct {\n\tdebugFlag bool\n\thelpFlag bool\n\tloadFlag bool\n\tprettyFlag bool\n\tscriptFlag bool\n\ttableFlag bool\n\ttypeFlag bool\n\tversionFlag bool\n\tplanetsCount int\n\tcommand string\n\tcurrentDBDet string\n\tcurrentDet string\n\tscriptName string\n\ttemplate string\n\tplanets []string\n}\n\n\/**\n*\tReturns the contents of args in following order:\n*\tprettyprint flag\n*\tscript flag\n*\tscript path\n*\tcommand\n*\tplanets\n *\/\nfunc (opts *Opts) procArgs(args []string) {\n\tflag.BoolVar(&opts.helpFlag, \"h\", false, \"help\")\n\tflag.BoolVar(&opts.prettyFlag, \"pp\", false, \"prettyprint\")\n\tflag.BoolVar(&opts.typeFlag, \"t\", false, \"type\")\n\tflag.BoolVar(&opts.debugFlag, \"d\", false, \"verbose\")\n\tflag.BoolVar(&opts.loadFlag, \"l\", false, \"ssh profile loading\")\n\tflag.BoolVar(&opts.versionFlag, \"v\", false, \"version\")\n\tflag.StringVar(&opts.template, \"tp\", \"\", \"filename of template\")\n\tflag.StringVar(&opts.scriptName, \"s\", \"\", \"name of the script(regardless wether db or bash) to be executed\")\n\tflag.StringVar(&opts.command, \"c\", \"\", \"command to be executed in quotes\")\n\tflag.Parse()\n\topts.scriptFlag = !(opts.scriptName == \"\")\n\topts.tableFlag = !(opts.template == \"\")\n\tif opts.scriptFlag && !(opts.command == \"\") {\n\t\tvar err error\n\t\tthrowErrExt(err, \"providing both a script AND a command is not possible\")\n\t}\n\n\tplanets := flag.Args()\n\topts.command = strings.TrimSuffix(strings.TrimPrefix(opts.command, \"\\\"\"), \"\\\"\")\n\topts.template = strings.TrimSuffix(strings.TrimPrefix(opts.template, \"\\\"\"), \"\\\"\")\n\topts.scriptName = strings.TrimSuffix(strings.TrimPrefix(opts.scriptName, \"\\\"\"), \"\\\"\")\n\n\tfor _, argument := range planets {\n\t\tif isSupported(argument) {\n\t\t\topts.planets = append(opts.planets, argument)\n\t\t\topts.planetsCount++\n\t\t} else {\n\t\t\tfmt.Fprintf(os.Stderr, \"This Type of Connection is not supported.\")\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\tif len(args) == 1 {\n\t\topts.helpFlag = true\n\t}\n\n}\n\n\/**\n*\tSplits the given connectiondetails and returns the hostname\n*\t@params:\n*\t\tconnDet: Connection details in following form: user@hostname\n*\t@return: hostname\n *\/\nfunc getHost(connDet string) string {\n\ttoReturn := strings.Split(connDet, \"@\")\n\treturn toReturn[1]\n}\n\n\/**\n*\tSplits the given connectiondetails and returns the user\n*\t@params:\n*\t\tconnDet: Connection details in following form: user@hostname\n*\t@return: user\n *\/\nfunc getUser(connDet string) string {\n\ttoReturn := strings.Split(connDet, \"@\")\n\treturn toReturn[0]\n}\n\n\/**\n*\tReturns the type of a given planet\n*\t@params:\n*\t\tid: The planets id\n*\t@return: The planets type\n *\/\nfunc getType(id string) string {\n\tcmd := exec.Command(\"ff\", \"-t\", id)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tthrowErrOut(out, err)\n\t}\n\treturn strings.TrimSpace(string(out))\n}\n\n\/**\n*\tReturns the connection details to a given planet\n*\t@params:\n*\t\tid: The planets id\n*\t@return: The connection details to the planet\n *\/\nfunc getPlanetDetails(id string) string {\n\tcmd := exec.Command(\"ff\", id, \"-f=pqdb\")\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tthrowErrOut(out, err)\n\t}\n\treturn strings.TrimSpace(string(out))\n}\n\n\/**\n*\tcounts the supported planets in a list of planets\n *\/\nfunc countSupported(planets []string) int {\n\ti := 0\n\tfor _, planet := range planets {\n\t\tif getType(planet) == linuxServer {\n\t\t\ti++\n\t\t}\n\t}\n\treturn i\n}\n\n\/**\n*\tchecks, wether a planet is supported by goo or not\n *\/\nfunc isSupported(planet string) bool {\n\tswitch getType(planet) {\n\tcase linuxServer:\n\t\treturn true\n\tcase database:\n\t\treturn true\n\tcase webServer:\n\t\treturn false\n\tdefault:\n\t\treturn false\n\n\t}\n\n}\n\nfunc getMaxLength(toProcess string) int {\n\t_ = toProcess\n\treturn 0\n}\n\n\/**\n*\tsplits db details (dbID:user@host) and returns them as dbID,user@host\n *\/\nfunc procDBDets(dbDet string) (string, string) {\n\tparts := strings.Split(dbDet, \":\")\n\treturn parts[0], parts[1]\n}\n<commit_msg>shortened flags<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n)\n\n\/\/ Opts ...\ntype Opts struct {\n\tdebugFlag bool\n\thelpFlag bool\n\tloadFlag bool\n\tprettyFlag bool\n\tscriptFlag bool\n\ttableFlag bool\n\ttypeFlag bool\n\tversionFlag bool\n\tplanetsCount int\n\tcommand string\n\tcurrentDBDet string\n\tcurrentDet string\n\tscriptName string\n\ttemplate string\n\tplanets []string\n}\n\n\/**\n*\tReturns the contents of args in following order:\n*\tprettyprint flag\n*\tscript flag\n*\tscript path\n*\tcommand\n*\tplanets\n *\/\nfunc (opts *Opts) procArgs(args []string) {\n\tflag.BoolVar(&opts.helpFlag, \"h\", false, \"help\")\n\tflag.BoolVar(&opts.prettyFlag, \"p\", false, \"prettyprint\")\n\tflag.BoolVar(&opts.typeFlag, \"t\", false, \"type\")\n\tflag.BoolVar(&opts.debugFlag, \"d\", false, \"verbose\")\n\tflag.BoolVar(&opts.loadFlag, \"l\", false, \"ssh profile loading\")\n\tflag.BoolVar(&opts.versionFlag, \"v\", false, \"version\")\n\tflag.StringVar(&opts.template, \"t\", \"\", \"filename of template\")\n\tflag.StringVar(&opts.scriptName, \"s\", \"\", \"name of the script(regardless wether db or bash) to be executed\")\n\tflag.StringVar(&opts.command, \"c\", \"\", \"command to be executed in quotes\")\n\tflag.Parse()\n\topts.scriptFlag = !(opts.scriptName == \"\")\n\topts.tableFlag = !(opts.template == \"\")\n\tif opts.scriptFlag && !(opts.command == \"\") {\n\t\tvar err error\n\t\tthrowErrExt(err, \"providing both a script AND a command is not possible\")\n\t}\n\n\tplanets := flag.Args()\n\topts.command = strings.TrimSuffix(strings.TrimPrefix(opts.command, \"\\\"\"), \"\\\"\")\n\topts.template = strings.TrimSuffix(strings.TrimPrefix(opts.template, \"\\\"\"), \"\\\"\")\n\topts.scriptName = strings.TrimSuffix(strings.TrimPrefix(opts.scriptName, \"\\\"\"), \"\\\"\")\n\n\tfor _, argument := range planets {\n\t\tif isSupported(argument) {\n\t\t\topts.planets = append(opts.planets, argument)\n\t\t\topts.planetsCount++\n\t\t} else {\n\t\t\tfmt.Fprintf(os.Stderr, \"This Type of Connection is not supported.\")\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\tif len(args) == 1 {\n\t\topts.helpFlag = true\n\t}\n\n}\n\n\/**\n*\tSplits the given connectiondetails and returns the hostname\n*\t@params:\n*\t\tconnDet: Connection details in following form: user@hostname\n*\t@return: hostname\n *\/\nfunc getHost(connDet string) string {\n\ttoReturn := strings.Split(connDet, \"@\")\n\treturn toReturn[1]\n}\n\n\/**\n*\tSplits the given connectiondetails and returns the user\n*\t@params:\n*\t\tconnDet: Connection details in following form: user@hostname\n*\t@return: user\n *\/\nfunc getUser(connDet string) string {\n\ttoReturn := strings.Split(connDet, \"@\")\n\treturn toReturn[0]\n}\n\n\/**\n*\tReturns the type of a given planet\n*\t@params:\n*\t\tid: The planets id\n*\t@return: The planets type\n *\/\nfunc getType(id string) string {\n\tcmd := exec.Command(\"ff\", \"-t\", id)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tthrowErrOut(out, err)\n\t}\n\treturn strings.TrimSpace(string(out))\n}\n\n\/**\n*\tReturns the connection details to a given planet\n*\t@params:\n*\t\tid: The planets id\n*\t@return: The connection details to the planet\n *\/\nfunc getPlanetDetails(id string) string {\n\tcmd := exec.Command(\"ff\", id, \"-f=pqdb\")\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tthrowErrOut(out, err)\n\t}\n\treturn strings.TrimSpace(string(out))\n}\n\n\/**\n*\tcounts the supported planets in a list of planets\n *\/\nfunc countSupported(planets []string) int {\n\ti := 0\n\tfor _, planet := range planets {\n\t\tif getType(planet) == linuxServer {\n\t\t\ti++\n\t\t}\n\t}\n\treturn i\n}\n\n\/**\n*\tchecks, wether a planet is supported by goo or not\n *\/\nfunc isSupported(planet string) bool {\n\tswitch getType(planet) {\n\tcase linuxServer:\n\t\treturn true\n\tcase database:\n\t\treturn true\n\tcase webServer:\n\t\treturn false\n\tdefault:\n\t\treturn false\n\n\t}\n\n}\n\nfunc getMaxLength(toProcess string) int {\n\t_ = toProcess\n\treturn 0\n}\n\n\/**\n*\tsplits db details (dbID:user@host) and returns them as dbID,user@host\n *\/\nfunc procDBDets(dbDet string) (string, string) {\n\tparts := strings.Split(dbDet, \":\")\n\treturn parts[0], parts[1]\n}\n<|endoftext|>"} {"text":"<commit_before>package dbr\n\nimport (\n\t\"github.com\/corestoreio\/csfw\/util\/bufferpool\"\n\t\"github.com\/corestoreio\/errors\"\n)\n\ntype alias struct {\n\t\/\/ Select used in cases where a sub-select is required.\n\tSelect *Select\n\t\/\/ Expression can be any kind of SQL expression or a valid identifier.\n\tExpression string\n\t\/\/ Alias must be a valid identifier allowed for alias usage.\n\tAlias string\n}\n\n\/\/ MakeAlias creates a new alias expression\nfunc MakeAlias(as ...string) alias {\n\ta := alias{\n\t\tExpression: as[0],\n\t}\n\tif len(as) > 1 {\n\t\ta.Alias = as[1]\n\t}\n\treturn a\n}\n\nfunc (t alias) String() string {\n\treturn Quoter.ExprAlias(t.Expression, t.Alias)\n}\n\nfunc (t alias) QuoteAs() string {\n\treturn Quoter.QuoteAs(t.Expression, t.Alias)\n}\n\n\/\/ QuoteAsWriter writes the quote table and its maybe alias into w.\nfunc (t alias) QuoteAsWriter(w queryWriter) (Arguments, error) {\n\tif t.Select != nil {\n\t\tw.WriteRune('(')\n\t\targs, err := t.Select.toSQL(w)\n\t\tw.WriteRune(')')\n\t\tw.WriteString(\" AS \")\n\t\tQuoter.quote(w, t.Alias)\n\t\treturn args, errors.Wrap(err, \"[dbr] QuoteAsWriter.SubSelect\")\n\t}\n\tQuoter.quoteAs(w, t.Expression, t.Alias)\n\treturn nil, nil\n}\n\n\/\/ DefaultScopeNames specifies the name of the scopes used in all EAV* function\n\/\/ to generate scope based hierarchical fall backs.\nvar DefaultScopeNames = [...]string{\"Store\", \"Group\", \"Website\", \"Default\"}\n\n\/\/ EAVIfNull creates a nested IFNULL SQL statement when a scope based fall back\n\/\/ hierarchy is required. Alias argument will be used as a prefix for the alias\n\/\/ table name and as the final alias name.\n\/\/ TODO: Migrate into EAV package\nfunc EAVIfNull(alias, columnName, defaultVal string, scopeNames ...string) string {\n\tbuf := bufferpool.Get()\n\tdefer bufferpool.Put(buf)\n\n\tif len(scopeNames) == 0 {\n\t\tscopeNames = DefaultScopeNames[:]\n\t}\n\n\tbrackets := 0\n\tfor _, n := range scopeNames {\n\t\tbuf.WriteString(\"IFNULL(\")\n\t\tbuf.WriteRune('`')\n\t\tbuf.WriteString(alias)\n\t\tbuf.WriteString(n)\n\t\tbuf.WriteRune('`')\n\t\tbuf.WriteRune('.')\n\t\tbuf.WriteRune('`')\n\t\tbuf.WriteString(columnName)\n\t\tbuf.WriteRune('`')\n\t\tif brackets < len(scopeNames)-1 {\n\t\t\tbuf.WriteRune(',')\n\t\t}\n\t\tbrackets++\n\t}\n\n\tif defaultVal == \"\" {\n\t\tdefaultVal = `''`\n\t}\n\tbuf.WriteRune(',')\n\tbuf.WriteString(defaultVal)\n\tfor i := 0; i < brackets; i++ {\n\t\tbuf.WriteRune(')')\n\t}\n\tbuf.WriteString(\" AS \")\n\tbuf.WriteRune('`')\n\tbuf.WriteString(alias)\n\tbuf.WriteRune('`')\n\treturn buf.String()\n}\n<commit_msg>storage\/dbr: Add TODO for type aliases<commit_after>package dbr\n\nimport (\n\t\"github.com\/corestoreio\/csfw\/util\/bufferpool\"\n\t\"github.com\/corestoreio\/errors\"\n)\n\ntype alias struct {\n\t\/\/ Select used in cases where a sub-select is required.\n\tSelect *Select\n\t\/\/ Expression can be any kind of SQL expression or a valid identifier.\n\tExpression string\n\t\/\/ Alias must be a valid identifier allowed for alias usage.\n\tAlias string\n}\n\n\/\/ MakeAlias creates a new alias expression\nfunc MakeAlias(as ...string) alias {\n\ta := alias{\n\t\tExpression: as[0],\n\t}\n\tif len(as) > 1 {\n\t\ta.Alias = as[1]\n\t}\n\treturn a\n}\n\nfunc (t alias) String() string {\n\treturn Quoter.ExprAlias(t.Expression, t.Alias)\n}\n\nfunc (t alias) QuoteAs() string {\n\treturn Quoter.QuoteAs(t.Expression, t.Alias)\n}\n\n\/\/ QuoteAsWriter writes the quote table and its maybe alias into w.\nfunc (t alias) QuoteAsWriter(w queryWriter) (Arguments, error) {\n\tif t.Select != nil {\n\t\tw.WriteRune('(')\n\t\targs, err := t.Select.toSQL(w)\n\t\tw.WriteRune(')')\n\t\tw.WriteString(\" AS \")\n\t\tQuoter.quote(w, t.Alias)\n\t\treturn args, errors.Wrap(err, \"[dbr] QuoteAsWriter.SubSelect\")\n\t}\n\tQuoter.quoteAs(w, t.Expression, t.Alias)\n\treturn nil, nil\n}\n\n\/\/ TODO(CyS) if we need to distinguish between table name and the column or even need\n\/\/ a sub select in the column list, then we can implement type aliases and replace\n\/\/ all []string with type aliases. This costs some allocs but for modifying queries\n\/\/ in dispatched events, it's getting easier ...\n\/\/type aliases []alias\n\/\/\n\/\/func makeAliasesFromStrings(columns ...string) aliases {\n\/\/\n\/\/}\n\n\/\/ DefaultScopeNames specifies the name of the scopes used in all EAV* function\n\/\/ to generate scope based hierarchical fall backs.\nvar DefaultScopeNames = [...]string{\"Store\", \"Group\", \"Website\", \"Default\"}\n\n\/\/ EAVIfNull creates a nested IFNULL SQL statement when a scope based fall back\n\/\/ hierarchy is required. Alias argument will be used as a prefix for the alias\n\/\/ table name and as the final alias name.\n\/\/ TODO: Migrate into EAV package\nfunc EAVIfNull(alias, columnName, defaultVal string, scopeNames ...string) string {\n\tbuf := bufferpool.Get()\n\tdefer bufferpool.Put(buf)\n\n\tif len(scopeNames) == 0 {\n\t\tscopeNames = DefaultScopeNames[:]\n\t}\n\n\tbrackets := 0\n\tfor _, n := range scopeNames {\n\t\tbuf.WriteString(\"IFNULL(\")\n\t\tbuf.WriteRune('`')\n\t\tbuf.WriteString(alias)\n\t\tbuf.WriteString(n)\n\t\tbuf.WriteRune('`')\n\t\tbuf.WriteRune('.')\n\t\tbuf.WriteRune('`')\n\t\tbuf.WriteString(columnName)\n\t\tbuf.WriteRune('`')\n\t\tif brackets < len(scopeNames)-1 {\n\t\t\tbuf.WriteRune(',')\n\t\t}\n\t\tbrackets++\n\t}\n\n\tif defaultVal == \"\" {\n\t\tdefaultVal = `''`\n\t}\n\tbuf.WriteRune(',')\n\tbuf.WriteString(defaultVal)\n\tfor i := 0; i < brackets; i++ {\n\t\tbuf.WriteRune(')')\n\t}\n\tbuf.WriteString(\" AS \")\n\tbuf.WriteRune('`')\n\tbuf.WriteString(alias)\n\tbuf.WriteRune('`')\n\treturn buf.String()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\npackage storage\n\nimport (\n\t\"github.com\/golang\/glog\"\n\t\"os\"\n\t\"syscall\"\n\t\"time\"\n)\n\ntype WAL struct {\n\tfd int\n\tmode string\n}\n\nfunc openWriteAheadFile(filename string, mode string) WAL {\n\tvar file int\n\tvar err error\n\tswitch mode {\n\tcase \"osync\":\n\t\tfile, err = syscall.Open(filename, syscall.O_SYNC|os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)\n\tcase \"dsync\":\n\t\tfile, err = syscall.Open(filename, syscall.O_DSYNC|os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)\n\tcase \"direct\":\n\t\tfile, err = syscall.Open(filename, syscall.O_DIRECT|os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)\n\tcase \"none\", \"fsync\":\n\t\tfile, err = syscall.Open(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)\n\tdefault:\n\t\tglog.Fatal(\"PersistenceMode not reconised\")\n\t}\n\tif err != nil {\n\t\tglog.Fatal(err)\n\t}\n\t\/\/ TOD0: remove hardcoded filesize\n\terr = syscall.Fallocate(file, 0, 0, int64(64*1000*1000)) \/\/ 64MB\n\tif err != nil {\n\t\tglog.Fatal(err)\n\t}\n\treturn WAL{file, mode}\n}\n\nfunc (w WAL) writeAhead(bytes []byte) {\n\tstartTime := time.Now()\n\t_, err := syscall.Write(w.fd, bytes)\n\t_,_ = syscall.Write(w.fd, byte[]('\\n'))\n\tif err != nil {\n\t\tglog.Fatal(err)\n\t}\n\tif w.mode == \"fsync\" || w.mode == \"direct\" {\n\t\terr = syscall.Fdatasync(w.fd)\n\t\tif err != nil {\n\t\t\tglog.Fatal(err)\n\t\t}\n\t}\n\tif time.Since(startTime) > time.Millisecond {\n\t\tglog.Info(\" bytes written & synced to persistent log in \", time.Since(startTime).String())\n\t}\n\tglog.V(1).Info(\" bytes written & synced to persistent log in \", time.Since(startTime).String())\n}\n<commit_msg>fixing travis build in wal_linux<commit_after>\/\/ +build linux\n\npackage storage\n\nimport (\n\t\"github.com\/golang\/glog\"\n\t\"os\"\n\t\"syscall\"\n\t\"time\"\n)\n\ntype WAL struct {\n\tfd int\n\tmode string\n}\n\nfunc openWriteAheadFile(filename string, mode string) WAL {\n\tvar file int\n\tvar err error\n\tswitch mode {\n\tcase \"osync\":\n\t\tfile, err = syscall.Open(filename, syscall.O_SYNC|os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)\n\tcase \"dsync\":\n\t\tfile, err = syscall.Open(filename, syscall.O_DSYNC|os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)\n\tcase \"direct\":\n\t\tfile, err = syscall.Open(filename, syscall.O_DIRECT|os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)\n\tcase \"none\", \"fsync\":\n\t\tfile, err = syscall.Open(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)\n\tdefault:\n\t\tglog.Fatal(\"PersistenceMode not reconised\")\n\t}\n\tif err != nil {\n\t\tglog.Fatal(err)\n\t}\n\t\/\/ TOD0: remove hardcoded filesize\n\terr = syscall.Fallocate(file, 0, 0, int64(64*1000*1000)) \/\/ 64MB\n\tif err != nil {\n\t\tglog.Fatal(err)\n\t}\n\treturn WAL{file, mode}\n}\n\nfunc (w WAL) writeAhead(bytes []byte) {\n\tstartTime := time.Now()\n\t_, err := syscall.Write(w.fd, bytes)\n\t_,_ = syscall.Write(w.fd, []byte(\"\\n\"))\n\tif err != nil {\n\t\tglog.Fatal(err)\n\t}\n\tif w.mode == \"fsync\" || w.mode == \"direct\" {\n\t\terr = syscall.Fdatasync(w.fd)\n\t\tif err != nil {\n\t\t\tglog.Fatal(err)\n\t\t}\n\t}\n\tif time.Since(startTime) > time.Millisecond {\n\t\tglog.Info(\" bytes written & synced to persistent log in \", time.Since(startTime).String())\n\t}\n\tglog.V(1).Info(\" bytes written & synced to persistent log in \", time.Since(startTime).String())\n}\n<|endoftext|>"} {"text":"<commit_before>package streamtools\n\nimport (\n\t\"container\/heap\"\n\t\"github.com\/bitly\/go-simplejson\"\n\t\"log\"\n\t\"time\"\n)\n\nfunc Count(inChan chan *simplejson.Json, ruleChan chan *simplejson.Json, queryChan chan stateQuery) {\n\t\/\/ block until we recieve a rule\n\tparams = <-ruleChan\n\twindowSeconds, err := params.Get(\"window\").Float64()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\twindow := time.Duration(windowSeconds) * time.Second\n\twaitTimer := time.NewTimer(100 * time.Millisecond)\n\n\tpq := &PriorityQueue{}\n\theap.Init(pq)\n\n\temptyByte := make([]byte, 0)\n\n\tfor {\n\t\tselect {\n\t\tcase params = <-ruleChan:\n\t\tcase query = <-queryChan:\n\t\t\tout, err := simplejson.NewJson([]byte(\"{}\"))\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err.Error())\n\t\t\t}\n\t\t\tout.Set(\"count\", len(*pq))\n\t\t\tquery.responseChan <- out\n\t\tcase <-inChan:\n\t\t\tqueueMessage := &PQMessage{\n\t\t\t\tval: &emptyByte,\n\t\t\t\tt: time.Now(),\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err.Error())\n\t\t\t}\n\t\t\theap.Push(pq, queueMessage)\n\t\tcase <-waitTimer.C:\n\t\t}\n\t\tfor {\n\t\t\tpqMsg, diff := pq.PeekAndShift(time.Now(), window)\n\t\t\tif pqMsg == nil {\n\t\t\t\t\/\/ either the queue is empty, or it's not time to emit\n\t\t\t\tif diff == 0 {\n\t\t\t\t\t\/\/ then the queue is empty. Pause for 5 seconds before checking again\n\t\t\t\t\tdiff = 5\n\t\t\t\t}\n\t\t\t\twaitTimer.Reset(diff)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>changed the delay from 5ns to 5s (oops)<commit_after>package streamtools\n\nimport (\n\t\"container\/heap\"\n\t\"github.com\/bitly\/go-simplejson\"\n\t\"log\"\n\t\"time\"\n)\n\nfunc Count(inChan chan *simplejson.Json, ruleChan chan *simplejson.Json, queryChan chan stateQuery) {\n\t\/\/ block until we recieve a rule\n\tparams = <-ruleChan\n\twindowSeconds, err := params.Get(\"window\").Float64()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\twindow := time.Duration(windowSeconds) * time.Second\n\twaitTimer := time.NewTimer(100 * time.Millisecond)\n\n\tpq := &PriorityQueue{}\n\theap.Init(pq)\n\n\temptyByte := make([]byte, 0)\n\n\tfor {\n\t\tselect {\n\t\tcase params = <-ruleChan:\n\t\tcase query = <-queryChan:\n\t\t\tout, err := simplejson.NewJson([]byte(\"{}\"))\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err.Error())\n\t\t\t}\n\t\t\tout.Set(\"count\", len(*pq))\n\t\t\tquery.responseChan <- out\n\t\tcase <-inChan:\n\t\t\tqueueMessage := &PQMessage{\n\t\t\t\tval: &emptyByte,\n\t\t\t\tt: time.Now(),\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err.Error())\n\t\t\t}\n\t\t\theap.Push(pq, queueMessage)\n\t\tcase <-waitTimer.C:\n\t\t}\n\t\tfor {\n\t\t\tpqMsg, diff := pq.PeekAndShift(time.Now(), window)\n\t\t\tif pqMsg == nil {\n\t\t\t\t\/\/ either the queue is empty, or it's not time to emit\n\t\t\t\tif diff == 0 {\n\t\t\t\t\t\/\/ then the queue is empty. Pause for 5 seconds before checking again\n\t\t\t\t\tdiff = time.Duration(5)*time.Second\n\t\t\t\t}\n\t\t\t\twaitTimer.Reset(diff)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"gob\"\n\t\"encoding\/binary\"\n\t\"os\"\n\t\"strings\"\n\t\"fmt\"\n\t\"log\"\n\t\"io\"\n\t\"sync\"\n)\n\nfunc main() {\n\ts, err := Encode(os.Args[1])\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\\n\", err)\n\t\treturn\n\t}\n\tfmt.Printf(\"encoded: %d bytes\\n\", len(s))\n\n\tfs, err := Decode(s)\n\tif err != nil {\n\t\tlog.Exitf(\"decode: %v\\n\", err)\n\t\treturn\n\t}\n\tfmt.Printf(\"all:\\n\")\n\tshow(fs, \"\/\")\n}\n\nfunc show(fs *FS, path string) {\n\tf, err := fs.Open(path)\n\tif err != nil {\n\t\tlog.Printf(\"cannot open %s: %v\\n\", path, err)\n\t\treturn\n\t}\n\tif f.IsDirectory() {\n\t\tfmt.Printf(\"d %s\\n\", path)\n\t\tnames, err := f.Readdirnames()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"cannot get contents of %s: %v\\n\", path, err)\n\t\t\treturn\n\t\t}\n\t\tfor _, name := range names {\n\t\t\tshow(fs, path+\"\/\"+name)\n\t\t}\n\t}else{\n\t\tfmt.Printf(\"- %s\\n\", path)\n\t\tn, err := io.Copy(nullWriter{}, f)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"cannot read %s: %v\\n\", err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Printf(\"\t%d bytes\\n\", n)\n\t}\n}\n\ntype nullWriter struct {}\nfunc (nullWriter) Write(data []byte) (int, os.Error) {\n\treturn len(data), nil\n}\n\n\/\/ fsWriter represents file system while it's being encoded.\n\/\/ The gob Encoder writes to the the bytes.Buffer.\ntype fsWriter struct {\n\tbuf bytes.Buffer\n\tenc *gob.Encoder\n}\n\n\/\/ entry is the basic file system structure - it holds\n\/\/ information on one directory entry.\ntype entry struct {\n\tname string\t\t\/\/ name of entry.\n\toffset int\t\t\t\/\/ start of information for this entry.\n\tdir bool\t\t\t\/\/ is it a directory?\n\tlen int\t\t\t\/\/ length of file (only if it's a file)\n}\n\n\/\/ FS represents the file system and all its data.\ntype FS struct {\n\tmu sync.Mutex\n\ts string\n\troot uint32\n\tdec *gob.Decoder\n\trd strings.Reader\n}\n\n\/\/ A File represents an entry in the file system.\ntype File struct {\n\tfs *FS\n\trd strings.Reader\n\tentry *entry\n}\n\n\/\/ Encode recursively reads the directory at path\n\/\/ and encodes it into a read only file system\n\/\/ that can later be read with Decode.\nfunc Encode(path string) (string, os.Error) {\n\tfs := &fsWriter{}\n\tfs.enc = gob.NewEncoder(&fs.buf)\n\t\/\/ make sure entry type is encoded first.\n\tfs.enc.Encode([]entry{})\n\n\te, err := fs.write(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif !e.dir {\n\t\treturn \"\", os.ErrorString(\"root must be a directory\")\n\t}\n\tbinary.Write(&fs.buf, binary.LittleEndian, uint32(e.offset))\n\treturn string(fs.buf.Bytes()), nil\n}\n\n\/\/ write writes path and all its contents to the file system.\nfunc (fs *fsWriter) write(path string) (*entry, os.Error) {\n\tf, err := os.Open(path, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tinfo, err := f.Stat()\n\tif info == nil {\n\t\treturn nil, err\n\t}\n\tif info.IsDirectory() {\n\t\tnames, err := f.Readdirnames(-1)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tentries := make([]entry, len(names))\n\t\tfor i, name := range names {\n\t\t\tent, err := fs.write(path+\"\/\"+name)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tent.name = name\n\t\t\tentries[i] = *ent\n\t\t}\n\t\toff := len(fs.buf.Bytes())\n\t\tfs.enc.Encode(entries)\n\t\treturn &entry{offset: off, dir: true}, nil\n\t}\n\toff := len(fs.buf.Bytes())\n\tbuf := make([]byte, 8192)\n\ttot := 0\n\tfor {\n\t\tn, _ := f.Read(buf)\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\t\tfs.buf.Write(buf[0:n])\n\t\ttot += n\n\t}\n\treturn &entry{offset: off, dir: false, len: tot}, nil\n}\n\n\/\/ Decode converts a file system as encoded by Encode\n\/\/ into an FS.\nfunc Decode(s string) (*FS, os.Error) {\n\tfs := new(FS)\n\tr := strings.NewReader(s[len(s)-4:])\n\tif err := binary.Read(r, binary.LittleEndian, &fs.root); err != nil {\n\t\treturn nil, err\n\t}\n\tfs.s = s[0:len(s)-4]\n\tfs.dec = gob.NewDecoder(&fs.rd)\n\n\t\/\/ read dummy entry at start to prime the gob types.\n\tfs.rd = strings.Reader(fs.s)\n\tif err := fs.dec.Decode(new([]entry)); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn fs, nil\n}\n\nfunc isSlash(c int) bool {\n\treturn c == '\/'\n}\n\n\/\/ Open opens the named path within fs.\n\/\/ Paths are slash-separated, with an optional\n\/\/ slash prefix.\nfunc (fs *FS) Open(path string) (*File, os.Error) {\n\tp := strings.FieldsFunc(path, isSlash)\n\te := &entry{dir: true, offset: int(fs.root)}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\tfor _, name := range p {\n\t\tvar err os.Error\n\t\te, err = fs.walk(e, name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif e.dir {\n\t\treturn &File{fs, \"\", e}, nil\n\t}\n\treturn &File{\n\t\tfs,\n\t\tstrings.Reader(fs.s[e.offset: e.offset+e.len]),\n\t\te,\n\t}, nil\n}\n\nfunc (fs *FS) walk(e *entry, name string) (*entry, os.Error) {\n\tif !e.dir {\n\t\treturn nil, os.ErrorString(\"not a directory\")\n\t}\n\tcontents, err := fs.contents(e)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := range contents {\n\t\tif contents[i].name == name {\n\t\t\treturn &contents[i], nil\n\t\t}\n\t}\n\treturn nil, os.ErrorString(\"file not found\")\n}\n\n\/\/ IsDirectory returns true if the file represents a directory.\nfunc (f *File) IsDirectory() bool {\n\treturn f.entry.dir\n}\n\n\/\/ Read reads from a file. It is invalid to call it on a directory.\nfunc (f *File) Read(buf []byte) (int, os.Error) {\n\tif f.entry.dir {\n\t\treturn 0, os.ErrorString(\"cannot read a directory\")\n\t}\n\treturn f.rd.Read(buf)\n}\n\n\/\/ contents returns all the entries inside a directory.\nfunc (fs *FS) contents(e *entry) (entries []entry, err os.Error) {\n\tif !e.dir {\n\t\treturn nil, os.ErrorString(\"not a directory\")\n\t}\n\tfs.rd = strings.Reader(fs.s[e.offset:])\n\terr = fs.dec.Decode(&entries)\n\treturn\n}\n\n\/\/ Readdirnames returns the names of all the files in\n\/\/ the File, which must be a directory.\nfunc (f *File) Readdirnames() ([]string, os.Error) {\n\tf.fs.mu.Unlock()\n\tdefer f.fs.mu.Unlock()\n\tentries, err := f.fs.contents(f.entry)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnames := make([]string, len(entries))\n\tfor i, e := range entries {\n\t\tnames[i] = e.name\n\t}\n\treturn names, nil\n}\n<commit_msg>added comments<commit_after>\/\/ The stringfs package provides a way to recursively encode the\n\/\/ data in a directory as a string, and to extract the contents later.\npackage stringfs\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"gob\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ The file system encoding uses gob to encode the\n\/\/ metadata.\n\/\/ It takes advantage of the fact that gob encodes all\n\/\/ its types up front, which means we can decode gob\n\/\/ package out of order as long as we know that the\n\/\/ relevant types have been received first.\n\/\/\n\/\/ The first item in the file system is a dummy type\n\/\/ representing an array of directory entries.\n\/\/ The final four bytes hold the offset of the start\n\/\/ of the root directory (note that this uses regular\n\/\/ non-variable-width encoding, so that we know\n\/\/ exactly where it is).\n\/\/ Each file entry is directly encoded as the bytes\n\/\/ of the file, not using gob.\n\/\/ Each directory entry is encoded as an array of\n\/\/ entry types.\n\n\/\/ entry is the basic file system structure - it holds\n\/\/ information on one directory entry.\ntype entry struct {\n\tname string\t\t\/\/ name of entry.\n\toffset int\t\t\t\/\/ start of information for this entry.\n\tdir bool\t\t\t\/\/ is it a directory?\n\tlen int\t\t\t\/\/ length of file (only if it's a file)\n}\n\n\/\/ fsWriter represents file system while it's being encoded.\n\/\/ The gob Encoder writes to the the bytes.Buffer.\ntype fsWriter struct {\n\tbuf bytes.Buffer\n\tenc *gob.Encoder\n}\n\n\/\/ FS represents the file system and all its data.\ntype FS struct {\n\tmu sync.Mutex\n\ts string\n\troot uint32\t\t\t\/\/ offset of root directory.\n\tdec *gob.Decoder\t\t\/\/ primed decoder, reading from &rd.\n\trd strings.Reader\n}\n\n\/\/ A File represents an entry in the file system.\ntype File struct {\n\tfs *FS\n\trd strings.Reader\n\tentry *entry\n}\n\n\/\/ Encode recursively reads the directory at path\n\/\/ and encodes it into a read only file system\n\/\/ that can later be read with Decode.\nfunc Encode(path string) (string, os.Error) {\n\tfs := &fsWriter{}\n\tfs.enc = gob.NewEncoder(&fs.buf)\n\t\/\/ make sure entry type is encoded first.\n\tfs.enc.Encode([]entry{})\n\n\te, err := fs.write(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif !e.dir {\n\t\treturn \"\", os.ErrorString(\"root must be a directory\")\n\t}\n\tbinary.Write(&fs.buf, binary.LittleEndian, uint32(e.offset))\n\treturn string(fs.buf.Bytes()), nil\n}\n\n\/\/ write writes path and all its contents to the file system.\nfunc (fs *fsWriter) write(path string) (*entry, os.Error) {\n\tf, err := os.Open(path, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tinfo, err := f.Stat()\n\tif info == nil {\n\t\treturn nil, err\n\t}\n\tif info.IsDirectory() {\n\t\tnames, err := f.Readdirnames(-1)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tentries := make([]entry, len(names))\n\t\tfor i, name := range names {\n\t\t\tent, err := fs.write(path+\"\/\"+name)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tent.name = name\n\t\t\tentries[i] = *ent\n\t\t}\n\t\toff := len(fs.buf.Bytes())\n\t\tfs.enc.Encode(entries)\n\t\treturn &entry{offset: off, dir: true}, nil\n\t}\n\toff := len(fs.buf.Bytes())\n\tbuf := make([]byte, 8192)\n\ttot := 0\n\tfor {\n\t\tn, _ := f.Read(buf)\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\t\tfs.buf.Write(buf[0:n])\n\t\ttot += n\n\t}\n\treturn &entry{offset: off, dir: false, len: tot}, nil\n}\n\n\/\/ Decode converts a file system as encoded by Encode\n\/\/ into an FS.\nfunc Decode(s string) (*FS, os.Error) {\n\tfs := new(FS)\n\tr := strings.NewReader(s[len(s)-4:])\n\tif err := binary.Read(r, binary.LittleEndian, &fs.root); err != nil {\n\t\treturn nil, err\n\t}\n\tfs.s = s[0:len(s)-4]\n\tfs.dec = gob.NewDecoder(&fs.rd)\n\n\t\/\/ read dummy entry at start to prime the gob types.\n\tfs.rd = strings.Reader(fs.s)\n\tif err := fs.dec.Decode(new([]entry)); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn fs, nil\n}\n\nfunc isSlash(c int) bool {\n\treturn c == '\/'\n}\n\n\/\/ Open opens the named path within fs.\n\/\/ Paths are slash-separated, with an optional\n\/\/ slash prefix.\nfunc (fs *FS) Open(path string) (*File, os.Error) {\n\tp := strings.FieldsFunc(path, isSlash)\n\te := &entry{dir: true, offset: int(fs.root)}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\tfor _, name := range p {\n\t\tvar err os.Error\n\t\te, err = fs.walk(e, name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif e.dir {\n\t\treturn &File{fs, \"\", e}, nil\n\t}\n\treturn &File{\n\t\tfs,\n\t\tstrings.Reader(fs.s[e.offset: e.offset+e.len]),\n\t\te,\n\t}, nil\n}\n\nfunc (fs *FS) walk(e *entry, name string) (*entry, os.Error) {\n\tif !e.dir {\n\t\treturn nil, os.ErrorString(\"not a directory\")\n\t}\n\tcontents, err := fs.contents(e)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := range contents {\n\t\tif contents[i].name == name {\n\t\t\treturn &contents[i], nil\n\t\t}\n\t}\n\treturn nil, os.ErrorString(\"file not found\")\n}\n\n\/\/ IsDirectory returns true if the file represents a directory.\nfunc (f *File) IsDirectory() bool {\n\treturn f.entry.dir\n}\n\n\/\/ Read reads from a file. It is invalid to call it on a directory.\nfunc (f *File) Read(buf []byte) (int, os.Error) {\n\tif f.entry.dir {\n\t\treturn 0, os.ErrorString(\"cannot read a directory\")\n\t}\n\treturn f.rd.Read(buf)\n}\n\n\/\/ contents returns all the entries inside a directory.\nfunc (fs *FS) contents(e *entry) (entries []entry, err os.Error) {\n\tif !e.dir {\n\t\treturn nil, os.ErrorString(\"not a directory\")\n\t}\n\tfs.rd = strings.Reader(fs.s[e.offset:])\n\terr = fs.dec.Decode(&entries)\n\treturn\n}\n\n\/\/ Readdirnames returns the names of all the files in\n\/\/ the File, which must be a directory.\nfunc (f *File) Readdirnames() ([]string, os.Error) {\n\tf.fs.mu.Unlock()\n\tdefer f.fs.mu.Unlock()\n\tentries, err := f.fs.contents(f.entry)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnames := make([]string, len(entries))\n\tfor i, e := range entries {\n\t\tnames[i] = e.name\n\t}\n\treturn names, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package supervisor\n\nimport (\n\t\"fmt\"\n\t\"github.com\/pborman\/uuid\"\n\t\"time\"\n\t\"bytes\"\n\t\"strings\"\n\t\"encoding\/json\"\n)\n\ntype UpdateOp int\n\nconst (\n\tSTOPPED UpdateOp = iota\n\tOUTPUT\n\tRESTORE_KEY\n)\n\ntype WorkerUpdate struct {\n\tTask uuid.UUID\n\tOp UpdateOp\n\tStoppedAt time.Time\n\tOutput string\n}\n\nfunc worker(id uint, work chan Task, updates chan WorkerUpdate) {\n\tfor t := range work {\n\t\tfmt.Printf(\"worker %d received task %v\\n\", id, t.UUID.String())\n\n\t\tvar output []string\n\t\tstderr := make(chan string)\n\t\tstdout := make(chan string)\n\n\t\t\/\/ drain stdout to the output[] array\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\ts, ok := <-stdout\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\toutput = append(output, s)\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ relay messages on stderr to the updates\n\t\t\/\/ channel, wrapped in a WorkerUpdate struct\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\ts, ok := <-stderr\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tupdates <- WorkerUpdate{\n\t\t\t\t\tTask: t.UUID,\n\t\t\t\t\tOp: OUTPUT,\n\t\t\t\t\tOutput: s,\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ run the task...\n\t\terr := t.Run(stdout, stderr)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"oops: %s\\n\", err)\n\t\t}\n\n\t\tif t.Op == BACKUP {\n\t\t\t\/\/ parse JSON from standard output and get the restore key\n\t\t\t\/\/ (this might fail, we might not get a key, etc.)\n\n\t\t\t\/\/ FIXME: stop the drain goroutine for stdout.\n\t\t\t\/\/ FIXME: (geoff noticed some data races here, so that may just happen\n\t\t\t\/\/ when he fixes those)\n\t\t\tv := struct {\n\t\t\t\tKey string\n\t\t\t}{}\n\n\t\t\tbuf := bytes.NewBufferString(strings.Join(output, \"\"))\n\t\t\tdec := json.NewDecoder(buf)\n\t\t\terr := dec.Decode(&v)\n\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"uh-oh: %s\\n\", err)\n\n\t\t\t} else {\n\t\t\t\tupdates <- WorkerUpdate{\n\t\t\t\t\tTask: t.UUID,\n\t\t\t\t\tOp: RESTORE_KEY,\n\t\t\t\t\tOutput: v.Key,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ signal to the supervisor that we finished\n\t\tupdates <- WorkerUpdate{\n\t\t\tTask: t.UUID,\n\t\t\tOp: STOPPED,\n\t\t\tStoppedAt: time.Now(),\n\t\t}\n\t}\n}\n\nfunc (s *Supervisor) SpawnWorker() {\n\ts.nextWorker += 1\n\tgo worker(s.nextWorker, s.workers, s.updates)\n}\n<commit_msg>Fix Data Race conditions in worker.go<commit_after>package supervisor\n\nimport (\n\t\"fmt\"\n\t\"github.com\/pborman\/uuid\"\n\t\"time\"\n\t\"bytes\"\n\t\"strings\"\n\t\"encoding\/json\"\n)\n\ntype UpdateOp int\n\nconst (\n\tSTOPPED UpdateOp = iota\n\tOUTPUT\n\tRESTORE_KEY\n)\n\ntype WorkerUpdate struct {\n\tTask uuid.UUID\n\tOp UpdateOp\n\tStoppedAt time.Time\n\tOutput string\n}\n\nfunc worker(id uint, work chan Task, updates chan WorkerUpdate) {\n\tfor t := range work {\n\t\tfmt.Printf(\"worker %d received task %v\\n\", id, t.UUID.String())\n\n\t\toutput := make(chan string)\n\t\tstderr := make(chan string)\n\t\tstdout := make(chan string)\n\n\t\t\/\/ drain stdout to the output[] array\n\t\tgo func(out chan string, in chan string) {\n\t\t\tvar b []string\n\t\t\tfor {\n\t\t\t\ts, ok := <-in\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tb = append(b, s)\n\t\t\t}\n\n\t\t\tout <- strings.Join(b, \"\")\n\t\t\tclose(out)\n\t\t}(output, stdout)\n\n\t\t\/\/ relay messages on stderr to the updates\n\t\t\/\/ channel, wrapped in a WorkerUpdate struct\n\t\tgo func(t Task, in chan string) {\n\t\t\tfor {\n\t\t\t\ts, ok := <-in\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tupdates <- WorkerUpdate{\n\t\t\t\t\tTask: t.UUID,\n\t\t\t\t\tOp: OUTPUT,\n\t\t\t\t\tOutput: s,\n\t\t\t\t}\n\t\t\t}\n\t\t}(t, stderr)\n\n\t\t\/\/ run the task...\n\t\terr := t.Run(stdout, stderr)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"oops: %s\\n\", err)\n\t\t}\n\n\t\tif t.Op == BACKUP {\n\t\t\t\/\/ parse JSON from standard output and get the restore key\n\t\t\t\/\/ (this might fail, we might not get a key, etc.)\n\n\t\t\t\/\/ FIXME: stop the drain goroutine for stdout.\n\t\t\t\/\/ FIXME: (geoff noticed some data races here, so that may just happen\n\t\t\t\/\/ when he fixes those)\n\t\t\tv := struct {\n\t\t\t\tKey string\n\t\t\t}{}\n\n\t\t\tbuf := bytes.NewBufferString(<-output)\n\t\t\tdec := json.NewDecoder(buf)\n\t\t\terr := dec.Decode(&v)\n\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"uh-oh: %s\\n\", err)\n\n\t\t\t} else {\n\t\t\t\tupdates <- WorkerUpdate{\n\t\t\t\t\tTask: t.UUID,\n\t\t\t\t\tOp: RESTORE_KEY,\n\t\t\t\t\tOutput: v.Key,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ signal to the supervisor that we finished\n\t\tupdates <- WorkerUpdate{\n\t\t\tTask: t.UUID,\n\t\t\tOp: STOPPED,\n\t\t\tStoppedAt: time.Now(),\n\t\t}\n\t}\n}\n\nfunc (s *Supervisor) SpawnWorker() {\n\ts.nextWorker += 1\n\tgo worker(s.nextWorker, s.workers, s.updates)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package router is a network\/router selector\npackage router\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"sort\"\n\t\"strconv\"\n\t\"sync\"\n\n\t\"github.com\/micro\/go-micro\/client\/selector\"\n\t\"github.com\/micro\/go-micro\/network\/router\"\n\t\"github.com\/micro\/go-micro\/registry\"\n)\n\ntype routerSelector struct {\n\topts selector.Options\n\n\t\/\/ the router\n\tr router.Router\n}\n\ntype routerKey struct{}\n\nfunc (r *routerSelector) Init(opts ...selector.Option) error {\n\t\/\/ no op\n\treturn nil\n}\n\nfunc (r *routerSelector) Options() selector.Options {\n\treturn r.opts\n}\n\nfunc (r *routerSelector) Select(service string, opts ...selector.SelectOption) (selector.Next, error) {\n\t\/\/ lookup router for routes for the service\n\troutes, err := r.r.Table().Lookup(router.NewQuery(\n\t\trouter.QueryDestination(service),\n\t))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ no routes return not found error\n\tif len(routes) == 0 {\n\t\treturn nil, selector.ErrNotFound\n\t}\n\n\t\/\/ TODO: apply filters by pseudo constructing service\n\n\t\/\/ sort the routes based on metric\n\tsort.Slice(routes, func(i, j int) bool {\n\t\treturn routes[i].Metric < routes[j].Metric\n\t})\n\n\t\/\/ roundrobin assuming routes are in metric preference order\n\tvar i int\n\tvar mtx sync.Mutex\n\n\treturn func() (*registry.Node, error) {\n\t\t\/\/ get index and increment counter with every call to next\n\t\tmtx.Lock()\n\t\tidx := i\n\t\ti++\n\t\tmtx.Unlock()\n\n\t\t\/\/ get route based on idx\n\t\troute := routes[idx%len(routes)]\n\n\t\t\/\/ defaults to gateway and no port\n\t\taddress := route.Gateway\n\t\tport := 0\n\n\t\t\/\/ check if its host:port\n\t\thost, pr, err := net.SplitHostPort(address)\n\t\tif err == nil {\n\t\t\tpp, _ := strconv.Atoi(pr)\n\t\t\t\/\/ set port\n\t\t\tport = pp\n\t\t\t\/\/ set address\n\t\t\taddress = host\n\t\t}\n\n\t\t\/\/ return as a node\n\t\treturn ®istry.Node{\n\t\t\t\/\/ TODO: add id and metadata if we can\n\t\t\tAddress: address,\n\t\t\tPort: port,\n\t\t}, nil\n\t}, nil\n}\n\nfunc (r *routerSelector) Mark(service string, node *registry.Node, err error) {\n\t\/\/ TODO: pass back metrics or information to the router\n\treturn\n}\n\nfunc (r *routerSelector) Reset(service string) {\n\t\/\/ TODO: reset the metrics or information at the router\n\treturn\n}\n\nfunc (r *routerSelector) Close() error {\n\t\/\/ stop the router advertisements\n\treturn r.r.Stop()\n}\n\nfunc (r *routerSelector) String() string {\n\treturn \"router\"\n}\n\n\/\/ NewSelector returns a new router based selector\nfunc NewSelector(opts ...selector.Option) selector.Selector {\n\toptions := selector.Options{\n\t\tContext: context.Background(),\n\t}\n\n\tfor _, o := range opts {\n\t\to(&options)\n\t}\n\n\t\/\/ set default registry if not set\n\tif options.Registry == nil {\n\t\toptions.Registry = registry.DefaultRegistry\n\t}\n\n\t\/\/ try get from the context\n\tr, ok := options.Context.Value(routerKey{}).(router.Router)\n\tif !ok {\n\t\t\/\/ TODO: Use router.DefaultRouter?\n\t\tr = router.NewRouter(\n\t\t\trouter.Registry(options.Registry),\n\t\t)\n\t}\n\n\t\/\/ start the router advertisements\n\tr.Advertise()\n\n\treturn &routerSelector{\n\t\topts: options,\n\t\tr: r,\n\t}\n}\n\n\/\/ WithRouter sets the router as an option\nfunc WithRouter(r router.Router) selector.Option {\n\treturn func(o *selector.Options) {\n\t\tif o.Context == nil {\n\t\t\to.Context = context.Background()\n\t\t}\n\t\to.Context = context.WithValue(o.Context, routerKey{}, r)\n\t}\n}\n<commit_msg>Add remote lookup via router selector<commit_after>\/\/ Package router is a network\/router selector\npackage router\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"sync\"\n\n\t\"github.com\/micro\/go-micro\/client\"\n\t\"github.com\/micro\/go-micro\/client\/selector\"\n\t\"github.com\/micro\/go-micro\/network\/router\"\n\tpb \"github.com\/micro\/go-micro\/network\/router\/proto\"\n\t\"github.com\/micro\/go-micro\/registry\"\n)\n\ntype routerSelector struct {\n\topts selector.Options\n\n\t\/\/ the router\n\tr router.Router\n\n\t\/\/ the client for the remote router\n\tc pb.RouterService\n\n\t\/\/ address of the remote router\n\taddr string\n\n\t\/\/ whether to use the remote router\n\tremote bool\n}\n\ntype clientKey struct{}\ntype routerKey struct{}\n\n\/\/ getRoutes returns the routes whether they are remote or local\nfunc (r *routerSelector) getRoutes(service string) ([]router.Route, error) {\n\tif !r.remote {\n\t\t\/\/ lookup router for routes for the service\n\t\treturn r.r.Table().Lookup(router.NewQuery(\n\t\t\trouter.QueryDestination(service),\n\t\t))\n\t}\n\n\t\/\/ lookup the remote router\n\n\tvar clientOpts []client.CallOption\n\n\t\/\/ set the remote address if specified\n\tif len(r.addr) > 0 {\n\t\tclientOpts = append(clientOpts, client.WithAddress(r.addr))\n\t}\n\n\t\/\/ call the router\n\tpbRoutes, err := r.c.Lookup(context.Background(), &pb.LookupRequest{\n\t\tQuery: &pb.Query{\n\t\t\tDestination: service,\n\t\t},\n\t}, clientOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar routes []router.Route\n\n\t\/\/ convert from pb to []*router.Route\n\tfor _, r := range pbRoutes.Routes {\n\t\troutes = append(routes, router.Route{\n\t\t\tDestination: r.Destination,\n\t\t\tGateway: r.Gateway,\n\t\t\tRouter: r.Router,\n\t\t\tNetwork: r.Network,\n\t\t\tMetric: int(r.Metric),\n\t\t})\n\t}\n\n\treturn routes, nil\n}\n\nfunc (r *routerSelector) Init(opts ...selector.Option) error {\n\t\/\/ no op\n\treturn nil\n}\n\nfunc (r *routerSelector) Options() selector.Options {\n\treturn r.opts\n}\n\nfunc (r *routerSelector) Select(service string, opts ...selector.SelectOption) (selector.Next, error) {\n\t\/\/ TODO: pull routes asynchronously and cache\n\troutes, err := r.getRoutes(service)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ no routes return not found error\n\tif len(routes) == 0 {\n\t\treturn nil, selector.ErrNotFound\n\t}\n\n\t\/\/ TODO: apply filters by pseudo constructing service\n\n\t\/\/ sort the routes based on metric\n\tsort.Slice(routes, func(i, j int) bool {\n\t\treturn routes[i].Metric < routes[j].Metric\n\t})\n\n\t\/\/ roundrobin assuming routes are in metric preference order\n\tvar i int\n\tvar mtx sync.Mutex\n\n\treturn func() (*registry.Node, error) {\n\t\t\/\/ get index and increment counter with every call to next\n\t\tmtx.Lock()\n\t\tidx := i\n\t\ti++\n\t\tmtx.Unlock()\n\n\t\t\/\/ get route based on idx\n\t\troute := routes[idx%len(routes)]\n\n\t\t\/\/ defaults to gateway and no port\n\t\taddress := route.Gateway\n\t\tport := 0\n\n\t\t\/\/ check if its host:port\n\t\thost, pr, err := net.SplitHostPort(address)\n\t\tif err == nil {\n\t\t\tpp, _ := strconv.Atoi(pr)\n\t\t\t\/\/ set port\n\t\t\tport = pp\n\t\t\t\/\/ set address\n\t\t\taddress = host\n\t\t}\n\n\t\t\/\/ return as a node\n\t\treturn ®istry.Node{\n\t\t\t\/\/ TODO: add id and metadata if we can\n\t\t\tAddress: address,\n\t\t\tPort: port,\n\t\t}, nil\n\t}, nil\n}\n\nfunc (r *routerSelector) Mark(service string, node *registry.Node, err error) {\n\t\/\/ TODO: pass back metrics or information to the router\n\treturn\n}\n\nfunc (r *routerSelector) Reset(service string) {\n\t\/\/ TODO: reset the metrics or information at the router\n\treturn\n}\n\nfunc (r *routerSelector) Close() error {\n\t\/\/ stop the router advertisements\n\treturn r.r.Stop()\n}\n\nfunc (r *routerSelector) String() string {\n\treturn \"router\"\n}\n\n\/\/ NewSelector returns a new router based selector\nfunc NewSelector(opts ...selector.Option) selector.Selector {\n\toptions := selector.Options{\n\t\tContext: context.Background(),\n\t}\n\n\tfor _, o := range opts {\n\t\to(&options)\n\t}\n\n\t\/\/ set default registry if not set\n\tif options.Registry == nil {\n\t\toptions.Registry = registry.DefaultRegistry\n\t}\n\n\t\/\/ try get router from the context\n\tr, ok := options.Context.Value(routerKey{}).(router.Router)\n\tif !ok {\n\t\t\/\/ TODO: Use router.DefaultRouter?\n\t\tr = router.NewRouter(\n\t\t\trouter.Registry(options.Registry),\n\t\t)\n\t}\n\n\t\/\/ try get client from the context\n\tc, ok := options.Context.Value(clientKey{}).(client.Client)\n\tif !ok {\n\t\tc = client.DefaultClient\n\t}\n\n\t\/\/ get the router from env vars if its a remote service\n\tremote := true\n\trouterName := os.Getenv(\"MICRO_ROUTER\")\n\trouterAddress := os.Getenv(\"MICRO_ROUTER_ADDRESS\")\n\n\t\/\/ start the router advertisements if we're running it locally\n\tif len(routerName) == 0 && len(routerAddress) == 0 {\n\t\tgo r.Advertise()\n\t\tremote = false\n\t}\n\n\treturn &routerSelector{\n\t\topts: options,\n\t\t\/\/ set the internal router\n\t\tr: r,\n\t\t\/\/ set the client\n\t\tc: pb.NewRouterService(routerName, c),\n\t\t\/\/ address of router\n\t\taddr: routerAddress,\n\t\t\/\/ let ourselves know to use the remote router\n\t\tremote: remote,\n\t}\n}\n\n\/\/ WithClient sets the client for the request\nfunc WithClient(c client.Client) selector.Option {\n\treturn func(o *selector.Options) {\n\t\tif o.Context == nil {\n\t\t\to.Context = context.Background()\n\t\t}\n\t\to.Context = context.WithValue(o.Context, clientKey{}, c)\n\t}\n}\n\n\/\/ WithRouter sets the router as an option\nfunc WithRouter(r router.Router) selector.Option {\n\treturn func(o *selector.Options) {\n\t\tif o.Context == nil {\n\t\t\to.Context = context.Background()\n\t\t}\n\t\to.Context = context.WithValue(o.Context, routerKey{}, r)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Intel Corporation\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\n\/\/ CommitConfig encapsulates the user configuration options, but is also\n\/\/ used to pass some state between functions (FoundFixes).\ntype CommitConfig struct {\n\t\/\/ set when a fixes #XXX\" commit is found\n\tFoundFixes bool\n\n\t\/\/ All commits must have a sign-off\n\tNeedSOBS bool\n\n\t\/\/ Atleast one commit must specify a bug that it fixes.\n\tNeedFixes bool\n\n\tMaxSubjectLineLength int\n\tMaxBodyLineLength int\n\n\tSobString string\n\tFixesString string\n\n\tFixesPattern *regexp.Regexp\n\tSobPattern *regexp.Regexp\n}\n\nconst (\n\tdefaultSobString = \"Signed-off-by\"\n\tdefaultFixesString = \"Fixes\"\n\n\tdefaultMaxSubjectLineLength = 75\n\tdefaultMaxBodyLineLength = 72\n)\n\nvar (\n\t\/\/ Full path to git(1) command\n\tgitPath = \"\"\n\tverbose = false\n\n\terrNoCommit = errors.New(\"Need commit\")\n\terrNoBranch = errors.New(\"Need branch\")\n\terrNoConfig = errors.New(\"Need config\")\n)\n\nfunc init() {\n\tvar err error\n\tgitPath, err = exec.LookPath(\"git\")\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: cannot find git in PATH\\n\")\n\t\tos.Exit(1)\n\t}\n}\n\nfunc checkCommitSubject(config *CommitConfig, commit, subject string) error {\n\tif config == nil {\n\t\treturn errNoConfig\n\t}\n\n\tif commit == \"\" {\n\t\treturn fmt.Errorf(\"Commit not specified\")\n\t}\n\n\tif subject == \"\" {\n\t\treturn fmt.Errorf(\"Commit %v: empty subject\", commit)\n\t}\n\n\tif strings.TrimSpace(subject) == \"\" {\n\t\treturn fmt.Errorf(\"Commit %v: pure whitespace subject\", commit)\n\t}\n\n\tsubsystemPattern := regexp.MustCompile(`^[^ ][^ ]*.*:`)\n\tmatches := subsystemPattern.FindStringSubmatch(subject)\n\tif matches == nil {\n\t\treturn fmt.Errorf(\"Commit %v: Failed to find subsystem in subject: %q\",\n\t\t\tcommit, subject)\n\t}\n\n\tlength := len(subject)\n\tif length > config.MaxSubjectLineLength {\n\t\treturn fmt.Errorf(\"commit %v: subject too long (max %v, got %v): %q\",\n\t\t\tcommit, config.MaxSubjectLineLength, length, subject)\n\t}\n\n\tif config.FixesString != \"\" && config.FixesPattern != nil {\n\t\tmatches = config.FixesPattern.FindStringSubmatch(subject)\n\n\t\tif matches != nil {\n\t\t\tconfig.FoundFixes = true\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc checkCommitBodyLine(config *CommitConfig, commit string, line string,\n\tlineNum int, nonWhitespaceOnlyLine *int,\n\tsobPattern *regexp.Regexp, sobLine *int) error {\n\tif config == nil {\n\t\treturn errNoConfig\n\t}\n\n\tif line == \"\" {\n\t\treturn nil\n\t}\n\n\tif *nonWhitespaceOnlyLine == -1 {\n\t\tif strings.TrimSpace(line) != \"\" {\n\t\t\t*nonWhitespaceOnlyLine = lineNum\n\t\t}\n\t}\n\n\t\/\/ Check first character of line. If it's _not_\n\t\/\/ alphabetic, length limits don't apply.\n\trune, _ := utf8.DecodeRune([]byte{line[0]})\n\n\tif !unicode.IsLetter(rune) {\n\t\treturn nil\n\t}\n\n\tfixesMatches := config.FixesPattern.FindStringSubmatch(line)\n\tif fixesMatches != nil {\n\t\tconfig.FoundFixes = true\n\t}\n\n\tsobMatch := sobPattern.FindStringSubmatch(line)\n\tif sobMatch != nil {\n\t\t*sobLine = lineNum\n\t}\n\n\t\/\/ Note: SOB lines are *NOT* checked for max line\n\t\/\/ length: it isn't reasonable to penalise someone\n\t\/\/ people with long names ;)\n\tif *sobLine != -1 {\n\t\treturn nil\n\t}\n\n\tlength := len(line)\n\tif length > config.MaxBodyLineLength {\n\t\treturn fmt.Errorf(\"commit %v: body line %d too long (max %v, got %v): %q\",\n\t\t\tcommit, 1+lineNum, config.MaxBodyLineLength, length, line)\n\t}\n\n\treturn nil\n}\n\nfunc checkCommitBody(config *CommitConfig, commit string, body []string) error {\n\tif config == nil {\n\t\treturn errNoConfig\n\t}\n\n\tif commit == \"\" {\n\t\treturn fmt.Errorf(\"Commit not specified\")\n\t}\n\n\tif body == nil {\n\t\treturn fmt.Errorf(\"Commit %v: empty body\", commit)\n\t}\n\n\t\/\/ note that sign-off lines must start in the first column\n\tsobPattern := regexp.MustCompile(fmt.Sprintf(\"^%s:\", config.SobString))\n\n\t\/\/ line number which contains a sign-off line.\n\tsobLine := -1\n\n\t\/\/ line number containing only whitespace\n\tnonWhitespaceOnlyLine := -1\n\n\tfor i, line := range body {\n\t\terr := checkCommitBodyLine(config, commit, line, i,\n\t\t\t&nonWhitespaceOnlyLine, sobPattern, &sobLine)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif nonWhitespaceOnlyLine == -1 {\n\t\treturn fmt.Errorf(\"Commit %v: pure whitespace body\", commit)\n\t}\n\n\tif config.NeedSOBS && sobLine == -1 {\n\t\treturn fmt.Errorf(\"Commit %v: no %v specified\", commit, config.SobString)\n\t}\n\n\tif sobLine == nonWhitespaceOnlyLine {\n\t\treturn fmt.Errorf(\"Commit %v: single-line %q body not permitted\", commit, config.SobString)\n\t}\n\n\treturn nil\n}\n\nfunc getCommitRange(commit, branch string) ([]string, error) {\n\tif commit == \"\" {\n\t\treturn nil, errNoCommit\n\t}\n\n\tif branch == \"\" {\n\t\treturn nil, errNoBranch\n\t}\n\n\tvar args []string\n\n\targs = append(args, gitPath)\n\targs = append(args, \"rev-list\")\n\targs = append(args, \"--no-merges\")\n\targs = append(args, \"--reverse\")\n\targs = append(args, fmt.Sprintf(\"%s..\", branch))\n\n\tcmdLine := exec.Command(args[0], args[1:]...)\n\n\tbytes, err := cmdLine.Output()\n\tif err != nil {\n\t\treturn nil,\n\t\t\tfmt.Errorf(\"Failed to run command %v: %v\",\n\t\t\t\tstrings.Join(args, \" \"),\n\t\t\t\terr)\n\t}\n\n\tlines := strings.Split(string(bytes), \"\\n\")\n\n\t\/\/ Remove last line if empty\n\tlength := len(lines)\n\tlast := lines[length-1]\n\tif last == \"\" {\n\t\tlines = lines[:length-1]\n\t}\n\n\treturn lines, nil\n}\n\nfunc getCommitSubject(commit string) (string, error) {\n\tif commit == \"\" {\n\t\treturn \"\", errNoCommit\n\t}\n\n\tvar args []string\n\n\targs = append(args, gitPath)\n\targs = append(args, \"log\")\n\targs = append(args, \"-1\")\n\targs = append(args, \"--pretty=%s\")\n\targs = append(args, commit)\n\n\tcmdLine := exec.Command(args[0], args[1:]...)\n\n\tbytes, err := cmdLine.Output()\n\tif err != nil {\n\t\treturn \"\",\n\t\t\tfmt.Errorf(\"Failed to run command %v: %v\",\n\t\t\t\tstrings.Join(args, \" \"), err)\n\t}\n\n\treturn string(bytes), nil\n}\n\nfunc getCommitBody(commit string) ([]string, error) {\n\tif commit == \"\" {\n\t\treturn []string{}, errNoCommit\n\t}\n\n\tvar args []string\n\n\targs = append(args, gitPath)\n\targs = append(args, \"log\")\n\targs = append(args, \"-1\")\n\targs = append(args, \"--pretty=%b\")\n\targs = append(args, commit)\n\n\tcmdLine := exec.Command(args[0], args[1:]...)\n\n\tbytes, err := cmdLine.Output()\n\tif err != nil {\n\t\treturn []string{},\n\t\t\tfmt.Errorf(\"Failed to run command %v: %v\",\n\t\t\t\tstrings.Join(args, \" \"), err)\n\t}\n\n\tlines := strings.Split(string(bytes), \"\\n\")\n\n\t\/\/ Remove last line if empty\n\tlength := len(lines)\n\tlast := lines[length-1]\n\tif last == \"\" {\n\t\tlines = lines[:length-1]\n\t}\n\n\treturn lines, nil\n}\n\nfunc checkCommitFull(config *CommitConfig, commit, subject string, body []string) error {\n\tif config == nil {\n\t\treturn errNoConfig\n\t}\n\n\tif commit == \"\" {\n\t\treturn errNoCommit\n\t}\n\n\tif subject == \"\" {\n\t\treturn fmt.Errorf(\"Commit %v: empty subject\", commit)\n\t}\n\n\tif body == nil {\n\t\treturn fmt.Errorf(\"Commit %v: empty body\", commit)\n\t}\n\n\terr := checkCommitSubject(config, commit, subject)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = checkCommitBody(config, commit, body)\n\treturn err\n}\n\nfunc checkCommit(config *CommitConfig, commit string) error {\n\tif config == nil {\n\t\treturn errNoConfig\n\t}\n\n\tif commit == \"\" {\n\t\treturn errNoCommit\n\t}\n\n\tsubject, err := getCommitSubject(commit)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbody, err := getCommitBody(commit)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn checkCommitFull(config, commit, subject, body)\n}\n\n\/\/ checkCommits performs checks on specified list of commits\nfunc checkCommits(config *CommitConfig, commits []string) error {\n\tif config == nil {\n\t\treturn errNoConfig\n\t}\n\n\tif commits == nil {\n\t\treturn errNoCommit\n\t}\n\n\tconfig.FixesPattern = regexp.MustCompile(fmt.Sprintf(\"%s:* *#\\\\d+\", config.FixesString))\n\n\tfor _, commit := range commits {\n\t\tif verbose {\n\t\t\tfmt.Printf(\"Checking commit %s\\n\", commit)\n\t\t}\n\t\terr := checkCommit(config, commit)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif config.NeedFixes && !config.FoundFixes {\n\t\treturn fmt.Errorf(\"No %q found\", config.FixesString)\n\t}\n\n\treturn nil\n}\n\n\/\/ preChecks performs checks on the range of commits described by commit\n\/\/ and branch.\nfunc preChecks(config *CommitConfig, commit, branch string) error {\n\tif config == nil {\n\t\treturn errNoConfig\n\t}\n\n\tif commit == \"\" {\n\t\treturn errNoCommit\n\t}\n\n\tif branch == \"\" {\n\t\tbranch = \"master\"\n\t}\n\n\tcommits, err := getCommitRange(commit, branch)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif verbose {\n\t\tfmt.Printf(\"Found %d commits\\n\", len(commits))\n\t}\n\n\treturn checkCommits(config, commits)\n}\n\n\/\/ NewCommitConfig creates a new CommitConfig object.\nfunc NewCommitConfig(needFixes, needSignOffs bool, fixesPrefix, signoffPrefix string, bodyLength, subjectLength int) *CommitConfig {\n\tconfig := &CommitConfig{\n\t\tNeedSOBS: needSignOffs,\n\t\tNeedFixes: needFixes,\n\t\tMaxBodyLineLength: bodyLength,\n\t\tMaxSubjectLineLength: subjectLength,\n\t\tSobString: defaultSobString,\n\t\tFixesString: defaultFixesString,\n\t}\n\n\tif config.MaxBodyLineLength == 0 {\n\t\tconfig.MaxBodyLineLength = defaultMaxBodyLineLength\n\t}\n\n\tif config.MaxSubjectLineLength == 0 {\n\t\tconfig.MaxSubjectLineLength = defaultMaxSubjectLineLength\n\t}\n\n\tif fixesPrefix != \"\" {\n\t\tconfig.FixesString = fixesPrefix\n\t}\n\n\tif signoffPrefix != \"\" {\n\t\tconfig.SobString = signoffPrefix\n\t}\n\n\treturn config\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"commitchecks\"\n\tapp.Description = \"perform checks on git commits\"\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"need-fixes, f\",\n\t\t\tUsage: fmt.Sprintf(\"Ensure atleast one commit has a %q entry\", defaultFixesString),\n\t\t},\n\n\t\tcli.BoolFlag{\n\t\t\tName: \"need-sign-offs, s\",\n\t\t\tUsage: fmt.Sprintf(\"Ensure all commits have a %q entry\", defaultSobString),\n\t\t},\n\n\t\tcli.BoolFlag{\n\t\t\tName: \"verbose\",\n\t\t\tUsage: \"Display informational messages\",\n\t\t\tEnvVar: \"CHECKCOMMITS_VERBOSE\",\n\t\t\tDestination: &verbose,\n\t\t},\n\n\t\tcli.StringFlag{\n\t\t\tName: \"fixes-prefix\",\n\t\t\tUsage: fmt.Sprintf(\"Fixes prefix used as an alternative to %q\", defaultFixesString),\n\t\t},\n\n\t\tcli.StringFlag{\n\t\t\tName: \"sign-off-prefix\",\n\t\t\tUsage: fmt.Sprintf(\"Sign-off prefix used as an alternative to %q\", defaultSobString),\n\t\t},\n\n\t\tcli.UintFlag{\n\t\t\tName: \"body-length\",\n\t\t\tUsage: \"Specify maximum body line length\",\n\t\t\tValue: uint(defaultMaxBodyLineLength),\n\t\t},\n\n\t\tcli.UintFlag{\n\t\t\tName: \"subject-length\",\n\t\t\tUsage: \"Specify maximum subject line length\",\n\t\t\tValue: uint(defaultMaxSubjectLineLength),\n\t\t},\n\t}\n\n\tapp.Action = func(c *cli.Context) error {\n\t\tvar commit string\n\t\tvar branch string\n\n\t\tcount := c.NArg()\n\n\t\tif count < 1 || count > 2 {\n\t\t\treturn fmt.Errorf(\"Usage: %s [options] <commit> [<branch>]\", c.App.Name)\n\t\t}\n\n\t\tif count >= 1 {\n\t\t\tcommit = c.Args().Get(0)\n\t\t}\n\n\t\tif count == 2 {\n\t\t\tbranch = c.Args().Get(1)\n\t\t}\n\n\t\tconfig := NewCommitConfig(c.Bool(\"need-fixes\"),\n\t\t\tc.Bool(\"need-sign-offs\"),\n\t\t\tc.String(\"fixes-prefix\"),\n\t\t\tc.String(\"sign-off-prefix\"),\n\t\t\tint(c.Uint(\"body-length\")),\n\t\t\tint(c.Uint(\"subject-length\")))\n\n\t\treturn preChecks(config, commit, branch)\n\t}\n\n\terr := app.Run(os.Args)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tos.Exit(0)\n}\n<commit_msg>checkcommits: Improve verbose message.<commit_after>\/\/ Copyright (c) 2017 Intel Corporation\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\n\/\/ CommitConfig encapsulates the user configuration options, but is also\n\/\/ used to pass some state between functions (FoundFixes).\ntype CommitConfig struct {\n\t\/\/ set when a fixes #XXX\" commit is found\n\tFoundFixes bool\n\n\t\/\/ All commits must have a sign-off\n\tNeedSOBS bool\n\n\t\/\/ Atleast one commit must specify a bug that it fixes.\n\tNeedFixes bool\n\n\tMaxSubjectLineLength int\n\tMaxBodyLineLength int\n\n\tSobString string\n\tFixesString string\n\n\tFixesPattern *regexp.Regexp\n\tSobPattern *regexp.Regexp\n}\n\nconst (\n\tdefaultSobString = \"Signed-off-by\"\n\tdefaultFixesString = \"Fixes\"\n\n\tdefaultMaxSubjectLineLength = 75\n\tdefaultMaxBodyLineLength = 72\n)\n\nvar (\n\t\/\/ Full path to git(1) command\n\tgitPath = \"\"\n\tverbose = false\n\n\terrNoCommit = errors.New(\"Need commit\")\n\terrNoBranch = errors.New(\"Need branch\")\n\terrNoConfig = errors.New(\"Need config\")\n)\n\nfunc init() {\n\tvar err error\n\tgitPath, err = exec.LookPath(\"git\")\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: cannot find git in PATH\\n\")\n\t\tos.Exit(1)\n\t}\n}\n\nfunc checkCommitSubject(config *CommitConfig, commit, subject string) error {\n\tif config == nil {\n\t\treturn errNoConfig\n\t}\n\n\tif commit == \"\" {\n\t\treturn fmt.Errorf(\"Commit not specified\")\n\t}\n\n\tif subject == \"\" {\n\t\treturn fmt.Errorf(\"Commit %v: empty subject\", commit)\n\t}\n\n\tif strings.TrimSpace(subject) == \"\" {\n\t\treturn fmt.Errorf(\"Commit %v: pure whitespace subject\", commit)\n\t}\n\n\tsubsystemPattern := regexp.MustCompile(`^[^ ][^ ]*.*:`)\n\tmatches := subsystemPattern.FindStringSubmatch(subject)\n\tif matches == nil {\n\t\treturn fmt.Errorf(\"Commit %v: Failed to find subsystem in subject: %q\",\n\t\t\tcommit, subject)\n\t}\n\n\tlength := len(subject)\n\tif length > config.MaxSubjectLineLength {\n\t\treturn fmt.Errorf(\"commit %v: subject too long (max %v, got %v): %q\",\n\t\t\tcommit, config.MaxSubjectLineLength, length, subject)\n\t}\n\n\tif config.FixesString != \"\" && config.FixesPattern != nil {\n\t\tmatches = config.FixesPattern.FindStringSubmatch(subject)\n\n\t\tif matches != nil {\n\t\t\tconfig.FoundFixes = true\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc checkCommitBodyLine(config *CommitConfig, commit string, line string,\n\tlineNum int, nonWhitespaceOnlyLine *int,\n\tsobPattern *regexp.Regexp, sobLine *int) error {\n\tif config == nil {\n\t\treturn errNoConfig\n\t}\n\n\tif line == \"\" {\n\t\treturn nil\n\t}\n\n\tif *nonWhitespaceOnlyLine == -1 {\n\t\tif strings.TrimSpace(line) != \"\" {\n\t\t\t*nonWhitespaceOnlyLine = lineNum\n\t\t}\n\t}\n\n\t\/\/ Check first character of line. If it's _not_\n\t\/\/ alphabetic, length limits don't apply.\n\trune, _ := utf8.DecodeRune([]byte{line[0]})\n\n\tif !unicode.IsLetter(rune) {\n\t\treturn nil\n\t}\n\n\tfixesMatches := config.FixesPattern.FindStringSubmatch(line)\n\tif fixesMatches != nil {\n\t\tconfig.FoundFixes = true\n\t}\n\n\tsobMatch := sobPattern.FindStringSubmatch(line)\n\tif sobMatch != nil {\n\t\t*sobLine = lineNum\n\t}\n\n\t\/\/ Note: SOB lines are *NOT* checked for max line\n\t\/\/ length: it isn't reasonable to penalise someone\n\t\/\/ people with long names ;)\n\tif *sobLine != -1 {\n\t\treturn nil\n\t}\n\n\tlength := len(line)\n\tif length > config.MaxBodyLineLength {\n\t\treturn fmt.Errorf(\"commit %v: body line %d too long (max %v, got %v): %q\",\n\t\t\tcommit, 1+lineNum, config.MaxBodyLineLength, length, line)\n\t}\n\n\treturn nil\n}\n\nfunc checkCommitBody(config *CommitConfig, commit string, body []string) error {\n\tif config == nil {\n\t\treturn errNoConfig\n\t}\n\n\tif commit == \"\" {\n\t\treturn fmt.Errorf(\"Commit not specified\")\n\t}\n\n\tif body == nil {\n\t\treturn fmt.Errorf(\"Commit %v: empty body\", commit)\n\t}\n\n\t\/\/ note that sign-off lines must start in the first column\n\tsobPattern := regexp.MustCompile(fmt.Sprintf(\"^%s:\", config.SobString))\n\n\t\/\/ line number which contains a sign-off line.\n\tsobLine := -1\n\n\t\/\/ line number containing only whitespace\n\tnonWhitespaceOnlyLine := -1\n\n\tfor i, line := range body {\n\t\terr := checkCommitBodyLine(config, commit, line, i,\n\t\t\t&nonWhitespaceOnlyLine, sobPattern, &sobLine)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif nonWhitespaceOnlyLine == -1 {\n\t\treturn fmt.Errorf(\"Commit %v: pure whitespace body\", commit)\n\t}\n\n\tif config.NeedSOBS && sobLine == -1 {\n\t\treturn fmt.Errorf(\"Commit %v: no %v specified\", commit, config.SobString)\n\t}\n\n\tif sobLine == nonWhitespaceOnlyLine {\n\t\treturn fmt.Errorf(\"Commit %v: single-line %q body not permitted\", commit, config.SobString)\n\t}\n\n\treturn nil\n}\n\nfunc getCommitRange(commit, branch string) ([]string, error) {\n\tif commit == \"\" {\n\t\treturn nil, errNoCommit\n\t}\n\n\tif branch == \"\" {\n\t\treturn nil, errNoBranch\n\t}\n\n\tvar args []string\n\n\targs = append(args, gitPath)\n\targs = append(args, \"rev-list\")\n\targs = append(args, \"--no-merges\")\n\targs = append(args, \"--reverse\")\n\targs = append(args, fmt.Sprintf(\"%s..\", branch))\n\n\tcmdLine := exec.Command(args[0], args[1:]...)\n\n\tbytes, err := cmdLine.Output()\n\tif err != nil {\n\t\treturn nil,\n\t\t\tfmt.Errorf(\"Failed to run command %v: %v\",\n\t\t\t\tstrings.Join(args, \" \"),\n\t\t\t\terr)\n\t}\n\n\tlines := strings.Split(string(bytes), \"\\n\")\n\n\t\/\/ Remove last line if empty\n\tlength := len(lines)\n\tlast := lines[length-1]\n\tif last == \"\" {\n\t\tlines = lines[:length-1]\n\t}\n\n\treturn lines, nil\n}\n\nfunc getCommitSubject(commit string) (string, error) {\n\tif commit == \"\" {\n\t\treturn \"\", errNoCommit\n\t}\n\n\tvar args []string\n\n\targs = append(args, gitPath)\n\targs = append(args, \"log\")\n\targs = append(args, \"-1\")\n\targs = append(args, \"--pretty=%s\")\n\targs = append(args, commit)\n\n\tcmdLine := exec.Command(args[0], args[1:]...)\n\n\tbytes, err := cmdLine.Output()\n\tif err != nil {\n\t\treturn \"\",\n\t\t\tfmt.Errorf(\"Failed to run command %v: %v\",\n\t\t\t\tstrings.Join(args, \" \"), err)\n\t}\n\n\treturn string(bytes), nil\n}\n\nfunc getCommitBody(commit string) ([]string, error) {\n\tif commit == \"\" {\n\t\treturn []string{}, errNoCommit\n\t}\n\n\tvar args []string\n\n\targs = append(args, gitPath)\n\targs = append(args, \"log\")\n\targs = append(args, \"-1\")\n\targs = append(args, \"--pretty=%b\")\n\targs = append(args, commit)\n\n\tcmdLine := exec.Command(args[0], args[1:]...)\n\n\tbytes, err := cmdLine.Output()\n\tif err != nil {\n\t\treturn []string{},\n\t\t\tfmt.Errorf(\"Failed to run command %v: %v\",\n\t\t\t\tstrings.Join(args, \" \"), err)\n\t}\n\n\tlines := strings.Split(string(bytes), \"\\n\")\n\n\t\/\/ Remove last line if empty\n\tlength := len(lines)\n\tlast := lines[length-1]\n\tif last == \"\" {\n\t\tlines = lines[:length-1]\n\t}\n\n\treturn lines, nil\n}\n\nfunc checkCommitFull(config *CommitConfig, commit, subject string, body []string) error {\n\tif config == nil {\n\t\treturn errNoConfig\n\t}\n\n\tif commit == \"\" {\n\t\treturn errNoCommit\n\t}\n\n\tif subject == \"\" {\n\t\treturn fmt.Errorf(\"Commit %v: empty subject\", commit)\n\t}\n\n\tif body == nil {\n\t\treturn fmt.Errorf(\"Commit %v: empty body\", commit)\n\t}\n\n\terr := checkCommitSubject(config, commit, subject)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = checkCommitBody(config, commit, body)\n\treturn err\n}\n\nfunc checkCommit(config *CommitConfig, commit string) error {\n\tif config == nil {\n\t\treturn errNoConfig\n\t}\n\n\tif commit == \"\" {\n\t\treturn errNoCommit\n\t}\n\n\tsubject, err := getCommitSubject(commit)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbody, err := getCommitBody(commit)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn checkCommitFull(config, commit, subject, body)\n}\n\n\/\/ checkCommits performs checks on specified list of commits\nfunc checkCommits(config *CommitConfig, commits []string) error {\n\tif config == nil {\n\t\treturn errNoConfig\n\t}\n\n\tif commits == nil {\n\t\treturn errNoCommit\n\t}\n\n\tconfig.FixesPattern = regexp.MustCompile(fmt.Sprintf(\"%s:* *#\\\\d+\", config.FixesString))\n\n\tfor _, commit := range commits {\n\t\tif verbose {\n\t\t\tfmt.Printf(\"Checking commit %s\\n\", commit)\n\t\t}\n\t\terr := checkCommit(config, commit)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif config.NeedFixes && !config.FoundFixes {\n\t\treturn fmt.Errorf(\"No %q found\", config.FixesString)\n\t}\n\n\treturn nil\n}\n\n\/\/ preChecks performs checks on the range of commits described by commit\n\/\/ and branch.\nfunc preChecks(config *CommitConfig, commit, branch string) error {\n\tif config == nil {\n\t\treturn errNoConfig\n\t}\n\n\tif commit == \"\" {\n\t\treturn errNoCommit\n\t}\n\n\tif branch == \"\" {\n\t\tbranch = \"master\"\n\t}\n\n\tcommits, err := getCommitRange(commit, branch)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif verbose {\n\t\tfmt.Printf(\"Found %d commits between commit %v and branch %v\\n\",\n\t\t\tlen(commits), commit, branch)\n\t}\n\n\treturn checkCommits(config, commits)\n}\n\n\/\/ NewCommitConfig creates a new CommitConfig object.\nfunc NewCommitConfig(needFixes, needSignOffs bool, fixesPrefix, signoffPrefix string, bodyLength, subjectLength int) *CommitConfig {\n\tconfig := &CommitConfig{\n\t\tNeedSOBS: needSignOffs,\n\t\tNeedFixes: needFixes,\n\t\tMaxBodyLineLength: bodyLength,\n\t\tMaxSubjectLineLength: subjectLength,\n\t\tSobString: defaultSobString,\n\t\tFixesString: defaultFixesString,\n\t}\n\n\tif config.MaxBodyLineLength == 0 {\n\t\tconfig.MaxBodyLineLength = defaultMaxBodyLineLength\n\t}\n\n\tif config.MaxSubjectLineLength == 0 {\n\t\tconfig.MaxSubjectLineLength = defaultMaxSubjectLineLength\n\t}\n\n\tif fixesPrefix != \"\" {\n\t\tconfig.FixesString = fixesPrefix\n\t}\n\n\tif signoffPrefix != \"\" {\n\t\tconfig.SobString = signoffPrefix\n\t}\n\n\treturn config\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"commitchecks\"\n\tapp.Description = \"perform checks on git commits\"\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"need-fixes, f\",\n\t\t\tUsage: fmt.Sprintf(\"Ensure atleast one commit has a %q entry\", defaultFixesString),\n\t\t},\n\n\t\tcli.BoolFlag{\n\t\t\tName: \"need-sign-offs, s\",\n\t\t\tUsage: fmt.Sprintf(\"Ensure all commits have a %q entry\", defaultSobString),\n\t\t},\n\n\t\tcli.BoolFlag{\n\t\t\tName: \"verbose\",\n\t\t\tUsage: \"Display informational messages\",\n\t\t\tEnvVar: \"CHECKCOMMITS_VERBOSE\",\n\t\t\tDestination: &verbose,\n\t\t},\n\n\t\tcli.StringFlag{\n\t\t\tName: \"fixes-prefix\",\n\t\t\tUsage: fmt.Sprintf(\"Fixes prefix used as an alternative to %q\", defaultFixesString),\n\t\t},\n\n\t\tcli.StringFlag{\n\t\t\tName: \"sign-off-prefix\",\n\t\t\tUsage: fmt.Sprintf(\"Sign-off prefix used as an alternative to %q\", defaultSobString),\n\t\t},\n\n\t\tcli.UintFlag{\n\t\t\tName: \"body-length\",\n\t\t\tUsage: \"Specify maximum body line length\",\n\t\t\tValue: uint(defaultMaxBodyLineLength),\n\t\t},\n\n\t\tcli.UintFlag{\n\t\t\tName: \"subject-length\",\n\t\t\tUsage: \"Specify maximum subject line length\",\n\t\t\tValue: uint(defaultMaxSubjectLineLength),\n\t\t},\n\t}\n\n\tapp.Action = func(c *cli.Context) error {\n\t\tvar commit string\n\t\tvar branch string\n\n\t\tcount := c.NArg()\n\n\t\tif count < 1 || count > 2 {\n\t\t\treturn fmt.Errorf(\"Usage: %s [options] <commit> [<branch>]\", c.App.Name)\n\t\t}\n\n\t\tif count >= 1 {\n\t\t\tcommit = c.Args().Get(0)\n\t\t}\n\n\t\tif count == 2 {\n\t\t\tbranch = c.Args().Get(1)\n\t\t}\n\n\t\tconfig := NewCommitConfig(c.Bool(\"need-fixes\"),\n\t\t\tc.Bool(\"need-sign-offs\"),\n\t\t\tc.String(\"fixes-prefix\"),\n\t\t\tc.String(\"sign-off-prefix\"),\n\t\t\tint(c.Uint(\"body-length\")),\n\t\t\tint(c.Uint(\"subject-length\")))\n\n\t\treturn preChecks(config, commit, branch)\n\t}\n\n\terr := app.Run(os.Args)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ERROR: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tos.Exit(0)\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"io\"\n\t\"net\"\n\n\t\"github.com\/hashicorp\/scada-client\/scada\"\n\t\"github.com\/hashicorp\/vault\/version\"\n)\n\ntype SCADAListener struct {\n\tln net.Listener\n\tscadaProvider *scada.Provider\n}\n\nfunc (s *SCADAListener) Accept() (net.Conn, error) {\n\treturn s.ln.Accept()\n}\n\nfunc (s *SCADAListener) Close() error {\n\ts.scadaProvider.Shutdown()\n\treturn s.ln.Close()\n}\n\nfunc (s *SCADAListener) Addr() net.Addr {\n\treturn s.ln.Addr()\n}\n\nfunc atlasListenerFactory(config map[string]string, logger io.Writer) (net.Listener, map[string]string, ReloadFunc, error) {\n\tscadaConfig := &scada.Config{\n\t\tService: \"vault\",\n\t\tVersion: version.GetVersion().VersionNumber(),\n\t\tResourceType: \"vault-cluster\",\n\t\tMeta: map[string]string{\n\t\t\t\"node_id\": config[\"node_id\"],\n\t\t},\n\t\tAtlas: scada.AtlasConfig{\n\t\t\tEndpoint: config[\"endpoint\"],\n\t\t\tInfrastructure: config[\"infrastructure\"],\n\t\t\tToken: config[\"token\"],\n\t\t},\n\t}\n\n\tprovider, list, err := scada.NewHTTPProvider(scadaConfig, logger)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tln := &SCADAListener{\n\t\tln: list,\n\t\tscadaProvider: provider,\n\t}\n\n\tprops := map[string]string{\n\t\t\"addr\": \"Atlas\/SCADA\",\n\t\t\"infrastructure\": scadaConfig.Atlas.Infrastructure,\n\t}\n\n\treturn listenerWrapTLS(ln, props, config)\n}\n<commit_msg>Force tls_disable on scada connection inside outer TLS connection as it's not currently supported anyways<commit_after>package server\n\nimport (\n\t\"io\"\n\t\"net\"\n\n\t\"github.com\/hashicorp\/scada-client\/scada\"\n\t\"github.com\/hashicorp\/vault\/version\"\n)\n\ntype SCADAListener struct {\n\tln net.Listener\n\tscadaProvider *scada.Provider\n}\n\nfunc (s *SCADAListener) Accept() (net.Conn, error) {\n\treturn s.ln.Accept()\n}\n\nfunc (s *SCADAListener) Close() error {\n\ts.scadaProvider.Shutdown()\n\treturn s.ln.Close()\n}\n\nfunc (s *SCADAListener) Addr() net.Addr {\n\treturn s.ln.Addr()\n}\n\nfunc atlasListenerFactory(config map[string]string, logger io.Writer) (net.Listener, map[string]string, ReloadFunc, error) {\n\tscadaConfig := &scada.Config{\n\t\tService: \"vault\",\n\t\tVersion: version.GetVersion().VersionNumber(),\n\t\tResourceType: \"vault-cluster\",\n\t\tMeta: map[string]string{\n\t\t\t\"node_id\": config[\"node_id\"],\n\t\t},\n\t\tAtlas: scada.AtlasConfig{\n\t\t\tEndpoint: config[\"endpoint\"],\n\t\t\tInfrastructure: config[\"infrastructure\"],\n\t\t\tToken: config[\"token\"],\n\t\t},\n\t}\n\n\tprovider, list, err := scada.NewHTTPProvider(scadaConfig, logger)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\tln := &SCADAListener{\n\t\tln: list,\n\t\tscadaProvider: provider,\n\t}\n\n\tprops := map[string]string{\n\t\t\"addr\": \"Atlas\/SCADA\",\n\t\t\"infrastructure\": scadaConfig.Atlas.Infrastructure,\n\t}\n\n\t\/\/ The outer connection is already TLS-enabled; this is just the listener\n\t\/\/ that reaches back inside that connection\n\tconfig[\"tls_disable\"] = \"1\"\n\n\treturn listenerWrapTLS(ln, props, config)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Nging is a toolbox for webmasters\n Copyright (C) 2018-present Wenhui Shen <swh@admpub.com>\n\n This program is free software: you can redistribute it and\/or modify\n it under the terms of the GNU Affero General Public License as published\n by the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Affero General Public License for more details.\n\n You should have received a copy of the GNU Affero General Public License\n along with this program. If not, see <https:\/\/www.gnu.org\/licenses\/>.\n*\/\n\npackage perm\n\nimport (\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/admpub\/nging\/application\/registry\/navigate\"\n)\n\ntype Map struct {\n\tV map[string]*Map `json:\",omitempty\" xml:\",omitempty\"`\n\tNav *navigate.Item `json:\",omitempty\" xml:\",omitempty\"`\n\tBehavior BehaviorPerms `json:\",omitempty\" xml:\",omitempty\"`\n}\n\n\/\/Import 导入菜单(用户缓存结果)\nfunc (m *Map) Import(navList *navigate.List) *Map {\n\tif navList == nil {\n\t\treturn m\n\t}\n\tfor _, nav := range *navList {\n\t\titem := NewMap()\n\t\titem.Nav = nav\n\t\tif _, ok := m.V[nav.Action]; ok {\n\t\t\tpanic(`The navigate name conflicts. Already existed name: ` + nav.Action)\n\t\t}\n\t\tm.V[nav.Action] = item\n\t\titem.Import(nav.Children)\n\t}\n\treturn m\n}\n\nfunc BuildPermActions(values []string) string {\n\tvar permActions string\n\n\tif len(values) > 0 && values[0] == `*` {\n\t\tpermActions = `*`\n\t\treturn permActions\n\t}\n\tvar prefix string\n\tfor _, v := range values {\n\t\tlength := len(v)\n\t\tvar suffix string\n\t\tif length > 2 {\n\t\t\tsuffix = v[length-2:]\n\t\t}\n\t\tif suffix == `\/*` {\n\t\t\tif len(prefix) > 0 {\n\t\t\t\tprefix += `|`\n\t\t\t}\n\t\t\tprefix += regexp.QuoteMeta(v[0 : length-2])\n\t\t\tif len(permActions) > 0 {\n\t\t\t\tpermActions += `,`\n\t\t\t}\n\t\t\tpermActions += v\n\t\t\tcontinue\n\t\t}\n\t\tif len(prefix) > 0 {\n\t\t\tre := regexp.MustCompile(`^(` + prefix + `)`)\n\t\t\tif re.MatchString(v) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif len(permActions) > 0 {\n\t\t\tpermActions += `,`\n\t\t}\n\t\tpermActions += v\n\t}\n\treturn permActions\n}\n\n\/\/Parse 解析用户获取的权限\nfunc (m *Map) Parse(permActions string, navTree *Map) *Map {\n\tperms := strings.Split(permActions, `,`)\n\tfor _, perm := range perms {\n\t\tarr := strings.Split(perm, `\/`)\n\t\ttree := navTree\n\t\tresult := m.V\n\t\tvar spath string\n\t\tfor _, a := range arr {\n\t\t\tif _, y := result[a]; !y {\n\t\t\t\tresult[a] = NewMap()\n\t\t\t}\n\t\t\tif a == `*` { \/\/\"*\"是最后一个字符\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif mp, y := tree.V[a]; y {\n\t\t\t\tresult[a].Nav = mp.Nav\n\t\t\t\tspath = ``\n\t\t\t\ttree = mp\n\t\t\t} else {\n\t\t\t\tif len(spath) > 0 {\n\t\t\t\t\tspath += `\/`\n\t\t\t\t}\n\t\t\t\tspath += a\n\t\t\t\tif mp, y := tree.V[spath]; y {\n\t\t\t\t\tresult[a].Nav = mp.Nav\n\t\t\t\t\tspath = ``\n\t\t\t\t\ttree = mp\n\t\t\t\t}\n\t\t\t}\n\t\t\tresult = result[a].V\n\t\t}\n\t}\n\treturn m\n}\n\nfunc (m *Map) checkByNav(perm string) bool {\n\tif m.Nav != nil && m.Nav.Unlimited {\n\t\treturn true\n\t}\n\tarr := strings.Split(perm, `\/`)\n\tnavResult := m.V\n\tvar prefix string\n\tfor _, a := range arr {\n\t\tkey := prefix + a\n\t\tnavV, hasNav := navResult[key]\n\t\tif !hasNav {\n\t\t\tprefix += key + `\/`\n\t\t\tcontinue\n\t\t}\n\t\tprefix = ``\n\t\tif navV.Nav != nil && navV.Nav.Unlimited {\n\t\t\treturn true\n\t\t}\n\t\tnavResult = navV.V\n\t}\n\treturn false\n}\n\n\/\/Check 检测权限\n\/\/perm: \/a\/b\/c\nfunc (m *Map) Check(perm string, nav *Map) bool {\n\tif nav == nil || m == nav {\n\t\treturn m.checkByNav(perm)\n\t}\n\treturn m.checkByChecked(perm, nav)\n}\n\nfunc (m *Map) checkByChecked(perm string, nav *Map) bool {\n\tif m.Nav != nil && m.Nav.Unlimited {\n\t\treturn true\n\t}\n\tarr := strings.Split(perm, `\/`)\n\tresult := m.V\n\tnavResult := nav.V\n\thasPerm := true\n\tvar prefix string\n\tfor _, a := range arr {\n\t\tkey := prefix + a\n\t\tnavV, hasNav := navResult[key]\n\t\tif !hasNav {\n\t\t\tif hasPerm {\n\t\t\t\tvar v *Map\n\t\t\t\tv, hasPerm = result[a]\n\t\t\t\tif hasPerm {\n\t\t\t\t\tif v.Nav != nil && v.Nav.Unlimited {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t\tif _, y := v.V[`*`]; y {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t\tresult = v.V\n\t\t\t\t}\n\t\t\t}\n\t\t\tprefix += key + `\/`\n\t\t\tcontinue\n\t\t}\n\t\tprefix = ``\n\t\tif !hasPerm {\n\t\t\tif navV.Nav != nil && navV.Nav.Unlimited {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tnavResult = navV.V\n\t\t\tcontinue\n\t\t}\n\t\tvar v *Map\n\t\tv, hasPerm = result[a]\n\t\tif hasPerm {\n\t\t\tif v.Nav != nil && v.Nav.Unlimited {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif _, y := v.V[`*`]; y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tresult = v.V\n\t\t}\n\t\tif navV.Nav != nil && navV.Nav.Unlimited {\n\t\t\treturn true\n\t\t}\n\t\tnavResult = navV.V\n\t}\n\treturn hasPerm\n}\n\nfunc NewMap() *Map {\n\treturn &Map{V: map[string]*Map{}}\n}\n<commit_msg>update<commit_after>\/*\n Nging is a toolbox for webmasters\n Copyright (C) 2018-present Wenhui Shen <swh@admpub.com>\n\n This program is free software: you can redistribute it and\/or modify\n it under the terms of the GNU Affero General Public License as published\n by the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Affero General Public License for more details.\n\n You should have received a copy of the GNU Affero General Public License\n along with this program. If not, see <https:\/\/www.gnu.org\/licenses\/>.\n*\/\n\npackage perm\n\nimport (\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/admpub\/nging\/application\/registry\/navigate\"\n)\n\ntype Map struct {\n\tV map[string]*Map `json:\",omitempty\" xml:\",omitempty\"`\n\tNav *navigate.Item `json:\",omitempty\" xml:\",omitempty\"`\n}\n\n\/\/Import 导入菜单(用户缓存结果)\nfunc (m *Map) Import(navList *navigate.List) *Map {\n\tif navList == nil {\n\t\treturn m\n\t}\n\tfor _, nav := range *navList {\n\t\titem := NewMap()\n\t\titem.Nav = nav\n\t\tif _, ok := m.V[nav.Action]; ok {\n\t\t\tpanic(`The navigate name conflicts. Already existed name: ` + nav.Action)\n\t\t}\n\t\tm.V[nav.Action] = item\n\t\titem.Import(nav.Children)\n\t}\n\treturn m\n}\n\nfunc BuildPermActions(values []string) string {\n\tvar permActions string\n\n\tif len(values) > 0 && values[0] == `*` {\n\t\tpermActions = `*`\n\t\treturn permActions\n\t}\n\tvar prefix string\n\tfor _, v := range values {\n\t\tlength := len(v)\n\t\tvar suffix string\n\t\tif length > 2 {\n\t\t\tsuffix = v[length-2:]\n\t\t}\n\t\tif suffix == `\/*` {\n\t\t\tif len(prefix) > 0 {\n\t\t\t\tprefix += `|`\n\t\t\t}\n\t\t\tprefix += regexp.QuoteMeta(v[0 : length-2])\n\t\t\tif len(permActions) > 0 {\n\t\t\t\tpermActions += `,`\n\t\t\t}\n\t\t\tpermActions += v\n\t\t\tcontinue\n\t\t}\n\t\tif len(prefix) > 0 {\n\t\t\tre := regexp.MustCompile(`^(` + prefix + `)`)\n\t\t\tif re.MatchString(v) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif len(permActions) > 0 {\n\t\t\tpermActions += `,`\n\t\t}\n\t\tpermActions += v\n\t}\n\treturn permActions\n}\n\n\/\/Parse 解析用户获取的权限\nfunc (m *Map) Parse(permActions string, navTree *Map) *Map {\n\tperms := strings.Split(permActions, `,`)\n\tfor _, perm := range perms {\n\t\tarr := strings.Split(perm, `\/`)\n\t\ttree := navTree\n\t\tresult := m.V\n\t\tvar spath string\n\t\tfor _, a := range arr {\n\t\t\tif _, y := result[a]; !y {\n\t\t\t\tresult[a] = NewMap()\n\t\t\t}\n\t\t\tif a == `*` { \/\/\"*\"是最后一个字符\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif mp, y := tree.V[a]; y {\n\t\t\t\tresult[a].Nav = mp.Nav\n\t\t\t\tspath = ``\n\t\t\t\ttree = mp\n\t\t\t} else {\n\t\t\t\tif len(spath) > 0 {\n\t\t\t\t\tspath += `\/`\n\t\t\t\t}\n\t\t\t\tspath += a\n\t\t\t\tif mp, y := tree.V[spath]; y {\n\t\t\t\t\tresult[a].Nav = mp.Nav\n\t\t\t\t\tspath = ``\n\t\t\t\t\ttree = mp\n\t\t\t\t}\n\t\t\t}\n\t\t\tresult = result[a].V\n\t\t}\n\t}\n\treturn m\n}\n\nfunc (m *Map) checkByNav(perm string) bool {\n\tif m.Nav != nil && m.Nav.Unlimited {\n\t\treturn true\n\t}\n\tarr := strings.Split(perm, `\/`)\n\tnavResult := m.V\n\tvar prefix string\n\tfor _, a := range arr {\n\t\tkey := prefix + a\n\t\tnavV, hasNav := navResult[key]\n\t\tif !hasNav {\n\t\t\tprefix += key + `\/`\n\t\t\tcontinue\n\t\t}\n\t\tprefix = ``\n\t\tif navV.Nav != nil && navV.Nav.Unlimited {\n\t\t\treturn true\n\t\t}\n\t\tnavResult = navV.V\n\t}\n\treturn false\n}\n\n\/\/Check 检测权限\n\/\/perm: \/a\/b\/c\nfunc (m *Map) Check(perm string, nav *Map) bool {\n\tif nav == nil || m == nav {\n\t\treturn m.checkByNav(perm)\n\t}\n\treturn m.checkByChecked(perm, nav)\n}\n\nfunc (m *Map) checkByChecked(perm string, nav *Map) bool {\n\tif m.Nav != nil && m.Nav.Unlimited {\n\t\treturn true\n\t}\n\tarr := strings.Split(perm, `\/`)\n\tresult := m.V\n\tnavResult := nav.V\n\thasPerm := true\n\tvar prefix string\n\tfor _, a := range arr {\n\t\tkey := prefix + a\n\t\tnavV, hasNav := navResult[key]\n\t\tif !hasNav {\n\t\t\tif hasPerm {\n\t\t\t\tvar v *Map\n\t\t\t\tv, hasPerm = result[a]\n\t\t\t\tif hasPerm {\n\t\t\t\t\tif v.Nav != nil && v.Nav.Unlimited {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t\tif _, y := v.V[`*`]; y {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t\tresult = v.V\n\t\t\t\t}\n\t\t\t}\n\t\t\tprefix += key + `\/`\n\t\t\tcontinue\n\t\t}\n\t\tprefix = ``\n\t\tif !hasPerm {\n\t\t\tif navV.Nav != nil && navV.Nav.Unlimited {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tnavResult = navV.V\n\t\t\tcontinue\n\t\t}\n\t\tvar v *Map\n\t\tv, hasPerm = result[a]\n\t\tif hasPerm {\n\t\t\tif v.Nav != nil && v.Nav.Unlimited {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif _, y := v.V[`*`]; y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tresult = v.V\n\t\t}\n\t\tif navV.Nav != nil && navV.Nav.Unlimited {\n\t\t\treturn true\n\t\t}\n\t\tnavResult = navV.V\n\t}\n\treturn hasPerm\n}\n\nfunc NewMap() *Map {\n\treturn &Map{V: map[string]*Map{}}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\npackage knativeserving\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tmf \"github.com\/jcrossley3\/manifestival\"\n\tservingv1alpha1 \"github.com\/knative\/serving-operator\/pkg\/apis\/serving\/v1alpha1\"\n\t\"github.com\/knative\/serving-operator\/pkg\/controller\/knativeserving\/common\"\n\t\"github.com\/knative\/serving-operator\/version\"\n\n\t\"github.com\/operator-framework\/operator-sdk\/pkg\/predicate\"\n\tappsv1 \"k8s.io\/api\/apps\/v1\"\n\tv1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/client\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/controller\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/handler\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/manager\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/reconcile\"\n\tlogf \"sigs.k8s.io\/controller-runtime\/pkg\/runtime\/log\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/source\"\n)\n\nconst (\n\toperand = \"knative-serving\"\n)\n\nvar (\n\trecursive = flag.Bool(\"recursive\", false,\n\t\t\"If filename is a directory, process all manifests recursively\")\n\tlog = logf.Log.WithName(\"controller_knativeserving\")\n\t\/\/ Platform-specific behavior to affect the installation\n\tplatforms common.Platforms\n)\n\n\/\/ Add creates a new KnativeServing Controller and adds it to the Manager. The Manager will set fields on the Controller\n\/\/ and Start it when the Manager is Started.\nfunc Add(mgr manager.Manager) error {\n\treturn add(mgr, newReconciler(mgr))\n}\n\n\/\/ newReconciler returns a new reconcile.Reconciler\nfunc newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileKnativeServing{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}\n\n\/\/ add adds a new Controller to mgr with r as the reconcile.Reconciler\nfunc add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t\/\/ Create a new controller\n\tc, err := controller.New(\"knativeserving-controller\", mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Watch for changes to primary resource KnativeServing\n\terr = c.Watch(&source.Kind{Type: &servingv1alpha1.KnativeServing{}}, &handler.EnqueueRequestForObject{}, predicate.GenerationChangedPredicate{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Watch child deployments for availability\n\terr = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &servingv1alpha1.KnativeServing{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nvar _ reconcile.Reconciler = &ReconcileKnativeServing{}\n\n\/\/ ReconcileKnativeServing reconciles a KnativeServing object\ntype ReconcileKnativeServing struct {\n\t\/\/ This client, initialized using mgr.Client() above, is a split client\n\t\/\/ that reads objects from the cache and writes to the apiserver\n\tclient client.Client\n\tscheme *runtime.Scheme\n\tconfig mf.Manifest\n}\n\n\/\/ Create manifestival resources and KnativeServing, if necessary\nfunc (r *ReconcileKnativeServing) InjectClient(c client.Client) error {\n\tkoDataDir := os.Getenv(\"KO_DATA_PATH\")\n\tm, err := mf.NewManifest(filepath.Join(koDataDir, \"knative-serving-0.6.0.yaml\"), *recursive, c)\n\tif err != nil {\n\t\tlog.Error(err, \"Failed to load manifest\")\n\t\treturn err\n\t}\n\tr.config = m\n\treturn r.ensureKnativeServing()\n}\n\n\/\/ Reconcile reads that state of the cluster for a KnativeServing object and makes changes based on the state read\n\/\/ and what is in the KnativeServing.Spec\n\/\/ Note:\n\/\/ The Controller will requeue the Request to be processed again if the returned error is non-nil or\n\/\/ Result.Requeue is true, otherwise upon completion it will remove the work from the queue.\nfunc (r *ReconcileKnativeServing) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\treqLogger := log.WithValues(\"Request.Namespace\", request.Namespace, \"Request.Name\", request.Name)\n\treqLogger.Info(\"Reconciling KnativeServing\")\n\n\t\/\/ Fetch the KnativeServing instance\n\tinstance := &servingv1alpha1.KnativeServing{}\n\tif err := r.client.Get(context.TODO(), request.NamespacedName, instance); err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tif isInteresting(request) {\n\t\t\t\tr.config.DeleteAll()\n\t\t\t}\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tif !isInteresting(request) {\n\t\treturn reconcile.Result{}, r.ignore(instance)\n\t}\n\n\tstages := []func(*servingv1alpha1.KnativeServing) error{\n\t\tr.initStatus,\n\t\tr.install,\n\t\tr.checkDeployments,\n\t\tr.deleteObsoleteResources,\n\t}\n\n\tfor _, stage := range stages {\n\t\tif err := stage(instance); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\treturn reconcile.Result{}, nil\n}\n\n\/\/ Initialize status conditions\nfunc (r *ReconcileKnativeServing) initStatus(instance *servingv1alpha1.KnativeServing) error {\n\tif len(instance.Status.Conditions) == 0 {\n\t\tinstance.Status.InitializeConditions()\n\t\tif err := r.updateStatus(instance); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Update the status subresource\nfunc (r *ReconcileKnativeServing) updateStatus(instance *servingv1alpha1.KnativeServing) error {\n\n\t\/\/ Account for https:\/\/github.com\/kubernetes-sigs\/controller-runtime\/issues\/406\n\tgvk := instance.GroupVersionKind()\n\tdefer instance.SetGroupVersionKind(gvk)\n\n\tif err := r.client.Status().Update(context.TODO(), instance); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Apply the embedded resources\nfunc (r *ReconcileKnativeServing) install(instance *servingv1alpha1.KnativeServing) error {\n\tif instance.Status.IsDeploying() {\n\t\treturn nil\n\t}\n\tdefer r.updateStatus(instance)\n\n\textensions, err := platforms.Extend(r.client, r.scheme)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = r.config.Transform(extensions.Transform(instance)...)\n\tif err == nil {\n\t\terr = extensions.PreInstall(instance)\n\t\tif err == nil {\n\t\t\terr = r.config.ApplyAll()\n\t\t\tif err == nil {\n\t\t\t\terr = extensions.PostInstall(instance)\n\t\t\t}\n\t\t}\n\t}\n\tif err != nil {\n\t\tinstance.Status.MarkInstallFailed(err.Error())\n\t\treturn err\n\t}\n\n\t\/\/ Update status\n\tinstance.Status.Version = version.Version\n\tlog.Info(\"Install succeeded\", \"version\", version.Version)\n\tinstance.Status.MarkInstallSucceeded()\n\treturn nil\n}\n\n\/\/ Check for all deployments available\nfunc (r *ReconcileKnativeServing) checkDeployments(instance *servingv1alpha1.KnativeServing) error {\n\tdefer r.updateStatus(instance)\n\tavailable := func(d *appsv1.Deployment) bool {\n\t\tfor _, c := range d.Status.Conditions {\n\t\t\tif c.Type == appsv1.DeploymentAvailable && c.Status == v1.ConditionTrue {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tdeployment := &appsv1.Deployment{}\n\tfor _, u := range r.config.Resources {\n\t\tif u.GetKind() == \"Deployment\" {\n\t\t\tkey := client.ObjectKey{Namespace: u.GetNamespace(), Name: u.GetName()}\n\t\t\tif err := r.client.Get(context.TODO(), key, deployment); err != nil {\n\t\t\t\tinstance.Status.MarkDeploymentsNotReady()\n\t\t\t\tif errors.IsNotFound(err) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !available(deployment) {\n\t\t\t\tinstance.Status.MarkDeploymentsNotReady()\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\tlog.Info(\"All deployments are available\")\n\tinstance.Status.MarkDeploymentsAvailable()\n\treturn nil\n}\n\n\/\/ Delete obsolete resources from previous versions\nfunc (r *ReconcileKnativeServing) deleteObsoleteResources(instance *servingv1alpha1.KnativeServing) error {\n\t\/\/ istio-system resources from 0.3\n\tresource := &unstructured.Unstructured{}\n\tresource.SetNamespace(\"istio-system\")\n\tresource.SetName(\"knative-ingressgateway\")\n\tresource.SetAPIVersion(\"v1\")\n\tresource.SetKind(\"Service\")\n\tif err := r.config.Delete(resource); err != nil {\n\t\treturn err\n\t}\n\tresource.SetAPIVersion(\"apps\/v1\")\n\tresource.SetKind(\"Deployment\")\n\tif err := r.config.Delete(resource); err != nil {\n\t\treturn err\n\t}\n\tresource.SetAPIVersion(\"autoscaling\/v1\")\n\tresource.SetKind(\"HorizontalPodAutoscaler\")\n\tif err := r.config.Delete(resource); err != nil {\n\t\treturn err\n\t}\n\t\/\/ config-controller from 0.5\n\tresource.SetNamespace(instance.GetNamespace())\n\tresource.SetName(\"config-controller\")\n\tresource.SetAPIVersion(\"v1\")\n\tresource.SetKind(\"ConfigMap\")\n\tif err := r.config.Delete(resource); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Because it's effectively cluster-scoped, we only care about a\n\/\/ single, named resource: knative-serving\/knative-serving\nfunc isInteresting(request reconcile.Request) bool {\n\treturn request.Namespace == operand && request.Name == operand\n}\n\n\/\/ Reflect our ignorance in the KnativeServing status\nfunc (r *ReconcileKnativeServing) ignore(instance *servingv1alpha1.KnativeServing) (err error) {\n\terr = r.initStatus(instance)\n\tif err == nil {\n\t\tmsg := fmt.Sprintf(\"The only KnativeServing resource that matters is %s\/%s\", operand, operand)\n\t\tinstance.Status.MarkIgnored(msg)\n\t\terr = r.updateStatus(instance)\n\t}\n\treturn\n}\n\n\/\/ If we can't find knative-serving\/knative-serving, create it\nfunc (r *ReconcileKnativeServing) ensureKnativeServing() (err error) {\n\tconst path = \"config\/crds\/serving_v1alpha1_knativeserving_cr.yaml\"\n\tinstance := &servingv1alpha1.KnativeServing{}\n\tkey := client.ObjectKey{Namespace: operand, Name: operand}\n\tif err = r.client.Get(context.TODO(), key, instance); err != nil {\n\t\tvar manifest mf.Manifest\n\t\tmanifest, err = mf.NewManifest(path, false, r.client)\n\t\tif err == nil {\n\t\t\t\/\/ create namespace\n\t\t\terr = manifest.Apply(&r.config.Resources[0])\n\t\t}\n\t\tif err == nil {\n\t\t\terr = manifest.Transform(mf.InjectNamespace(operand))\n\t\t}\n\t\tif err == nil {\n\t\t\terr = manifest.ApplyAll()\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>Use KO_DATA_PATH for custom resource (#37)<commit_after>\/*\nCopyright 2019 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\npackage knativeserving\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tmf \"github.com\/jcrossley3\/manifestival\"\n\tservingv1alpha1 \"github.com\/knative\/serving-operator\/pkg\/apis\/serving\/v1alpha1\"\n\t\"github.com\/knative\/serving-operator\/pkg\/controller\/knativeserving\/common\"\n\t\"github.com\/knative\/serving-operator\/version\"\n\n\t\"github.com\/operator-framework\/operator-sdk\/pkg\/predicate\"\n\tappsv1 \"k8s.io\/api\/apps\/v1\"\n\tv1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/client\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/controller\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/handler\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/manager\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/reconcile\"\n\tlogf \"sigs.k8s.io\/controller-runtime\/pkg\/runtime\/log\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/source\"\n)\n\nconst (\n\toperand = \"knative-serving\"\n)\n\nvar (\n\trecursive = flag.Bool(\"recursive\", false,\n\t\t\"If filename is a directory, process all manifests recursively\")\n\tlog = logf.Log.WithName(\"controller_knativeserving\")\n\t\/\/ Platform-specific behavior to affect the installation\n\tplatforms common.Platforms\n)\n\n\/\/ Add creates a new KnativeServing Controller and adds it to the Manager. The Manager will set fields on the Controller\n\/\/ and Start it when the Manager is Started.\nfunc Add(mgr manager.Manager) error {\n\treturn add(mgr, newReconciler(mgr))\n}\n\n\/\/ newReconciler returns a new reconcile.Reconciler\nfunc newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileKnativeServing{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}\n\n\/\/ add adds a new Controller to mgr with r as the reconcile.Reconciler\nfunc add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t\/\/ Create a new controller\n\tc, err := controller.New(\"knativeserving-controller\", mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Watch for changes to primary resource KnativeServing\n\terr = c.Watch(&source.Kind{Type: &servingv1alpha1.KnativeServing{}}, &handler.EnqueueRequestForObject{}, predicate.GenerationChangedPredicate{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Watch child deployments for availability\n\terr = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &servingv1alpha1.KnativeServing{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nvar _ reconcile.Reconciler = &ReconcileKnativeServing{}\n\n\/\/ ReconcileKnativeServing reconciles a KnativeServing object\ntype ReconcileKnativeServing struct {\n\t\/\/ This client, initialized using mgr.Client() above, is a split client\n\t\/\/ that reads objects from the cache and writes to the apiserver\n\tclient client.Client\n\tscheme *runtime.Scheme\n\tconfig mf.Manifest\n}\n\n\/\/ Create manifestival resources and KnativeServing, if necessary\nfunc (r *ReconcileKnativeServing) InjectClient(c client.Client) error {\n\tkoDataDir := os.Getenv(\"KO_DATA_PATH\")\n\tm, err := mf.NewManifest(filepath.Join(koDataDir, \"knative-serving-0.6.0.yaml\"), *recursive, c)\n\tif err != nil {\n\t\tlog.Error(err, \"Failed to load manifest\")\n\t\treturn err\n\t}\n\tr.config = m\n\treturn r.ensureKnativeServing()\n}\n\n\/\/ Reconcile reads that state of the cluster for a KnativeServing object and makes changes based on the state read\n\/\/ and what is in the KnativeServing.Spec\n\/\/ Note:\n\/\/ The Controller will requeue the Request to be processed again if the returned error is non-nil or\n\/\/ Result.Requeue is true, otherwise upon completion it will remove the work from the queue.\nfunc (r *ReconcileKnativeServing) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\treqLogger := log.WithValues(\"Request.Namespace\", request.Namespace, \"Request.Name\", request.Name)\n\treqLogger.Info(\"Reconciling KnativeServing\")\n\n\t\/\/ Fetch the KnativeServing instance\n\tinstance := &servingv1alpha1.KnativeServing{}\n\tif err := r.client.Get(context.TODO(), request.NamespacedName, instance); err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tif isInteresting(request) {\n\t\t\t\tr.config.DeleteAll()\n\t\t\t}\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tif !isInteresting(request) {\n\t\treturn reconcile.Result{}, r.ignore(instance)\n\t}\n\n\tstages := []func(*servingv1alpha1.KnativeServing) error{\n\t\tr.initStatus,\n\t\tr.install,\n\t\tr.checkDeployments,\n\t\tr.deleteObsoleteResources,\n\t}\n\n\tfor _, stage := range stages {\n\t\tif err := stage(instance); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\treturn reconcile.Result{}, nil\n}\n\n\/\/ Initialize status conditions\nfunc (r *ReconcileKnativeServing) initStatus(instance *servingv1alpha1.KnativeServing) error {\n\tif len(instance.Status.Conditions) == 0 {\n\t\tinstance.Status.InitializeConditions()\n\t\tif err := r.updateStatus(instance); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Update the status subresource\nfunc (r *ReconcileKnativeServing) updateStatus(instance *servingv1alpha1.KnativeServing) error {\n\n\t\/\/ Account for https:\/\/github.com\/kubernetes-sigs\/controller-runtime\/issues\/406\n\tgvk := instance.GroupVersionKind()\n\tdefer instance.SetGroupVersionKind(gvk)\n\n\tif err := r.client.Status().Update(context.TODO(), instance); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Apply the embedded resources\nfunc (r *ReconcileKnativeServing) install(instance *servingv1alpha1.KnativeServing) error {\n\tif instance.Status.IsDeploying() {\n\t\treturn nil\n\t}\n\tdefer r.updateStatus(instance)\n\n\textensions, err := platforms.Extend(r.client, r.scheme)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = r.config.Transform(extensions.Transform(instance)...)\n\tif err == nil {\n\t\terr = extensions.PreInstall(instance)\n\t\tif err == nil {\n\t\t\terr = r.config.ApplyAll()\n\t\t\tif err == nil {\n\t\t\t\terr = extensions.PostInstall(instance)\n\t\t\t}\n\t\t}\n\t}\n\tif err != nil {\n\t\tinstance.Status.MarkInstallFailed(err.Error())\n\t\treturn err\n\t}\n\n\t\/\/ Update status\n\tinstance.Status.Version = version.Version\n\tlog.Info(\"Install succeeded\", \"version\", version.Version)\n\tinstance.Status.MarkInstallSucceeded()\n\treturn nil\n}\n\n\/\/ Check for all deployments available\nfunc (r *ReconcileKnativeServing) checkDeployments(instance *servingv1alpha1.KnativeServing) error {\n\tdefer r.updateStatus(instance)\n\tavailable := func(d *appsv1.Deployment) bool {\n\t\tfor _, c := range d.Status.Conditions {\n\t\t\tif c.Type == appsv1.DeploymentAvailable && c.Status == v1.ConditionTrue {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tdeployment := &appsv1.Deployment{}\n\tfor _, u := range r.config.Resources {\n\t\tif u.GetKind() == \"Deployment\" {\n\t\t\tkey := client.ObjectKey{Namespace: u.GetNamespace(), Name: u.GetName()}\n\t\t\tif err := r.client.Get(context.TODO(), key, deployment); err != nil {\n\t\t\t\tinstance.Status.MarkDeploymentsNotReady()\n\t\t\t\tif errors.IsNotFound(err) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !available(deployment) {\n\t\t\t\tinstance.Status.MarkDeploymentsNotReady()\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\tlog.Info(\"All deployments are available\")\n\tinstance.Status.MarkDeploymentsAvailable()\n\treturn nil\n}\n\n\/\/ Delete obsolete resources from previous versions\nfunc (r *ReconcileKnativeServing) deleteObsoleteResources(instance *servingv1alpha1.KnativeServing) error {\n\t\/\/ istio-system resources from 0.3\n\tresource := &unstructured.Unstructured{}\n\tresource.SetNamespace(\"istio-system\")\n\tresource.SetName(\"knative-ingressgateway\")\n\tresource.SetAPIVersion(\"v1\")\n\tresource.SetKind(\"Service\")\n\tif err := r.config.Delete(resource); err != nil {\n\t\treturn err\n\t}\n\tresource.SetAPIVersion(\"apps\/v1\")\n\tresource.SetKind(\"Deployment\")\n\tif err := r.config.Delete(resource); err != nil {\n\t\treturn err\n\t}\n\tresource.SetAPIVersion(\"autoscaling\/v1\")\n\tresource.SetKind(\"HorizontalPodAutoscaler\")\n\tif err := r.config.Delete(resource); err != nil {\n\t\treturn err\n\t}\n\t\/\/ config-controller from 0.5\n\tresource.SetNamespace(instance.GetNamespace())\n\tresource.SetName(\"config-controller\")\n\tresource.SetAPIVersion(\"v1\")\n\tresource.SetKind(\"ConfigMap\")\n\tif err := r.config.Delete(resource); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Because it's effectively cluster-scoped, we only care about a\n\/\/ single, named resource: knative-serving\/knative-serving\nfunc isInteresting(request reconcile.Request) bool {\n\treturn request.Namespace == operand && request.Name == operand\n}\n\n\/\/ Reflect our ignorance in the KnativeServing status\nfunc (r *ReconcileKnativeServing) ignore(instance *servingv1alpha1.KnativeServing) (err error) {\n\terr = r.initStatus(instance)\n\tif err == nil {\n\t\tmsg := fmt.Sprintf(\"The only KnativeServing resource that matters is %s\/%s\", operand, operand)\n\t\tinstance.Status.MarkIgnored(msg)\n\t\terr = r.updateStatus(instance)\n\t}\n\treturn\n}\n\n\/\/ If we can't find knative-serving\/knative-serving, create it\nfunc (r *ReconcileKnativeServing) ensureKnativeServing() (err error) {\n\tkoDataDir := os.Getenv(\"KO_DATA_PATH\")\n\tconst path = \"serving_v1alpha1_knativeserving_cr.yaml\"\n\tinstance := &servingv1alpha1.KnativeServing{}\n\tkey := client.ObjectKey{Namespace: operand, Name: operand}\n\tif err = r.client.Get(context.TODO(), key, instance); err != nil {\n\t\tvar manifest mf.Manifest\n\t\tmanifest, err = mf.NewManifest(filepath.Join(koDataDir, path), false, r.client)\n\t\tif err == nil {\n\t\t\t\/\/ create namespace\n\t\t\terr = manifest.Apply(&r.config.Resources[0])\n\t\t}\n\t\tif err == nil {\n\t\t\terr = manifest.Transform(mf.InjectNamespace(operand))\n\t\t}\n\t\tif err == nil {\n\t\t\terr = manifest.ApplyAll()\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package native1\n\n\/\/ 5.8 Protocols (from AdCOM spec 1.0)\n\/\/\n\/\/ Options for the various bid response protocols that could be supported by an exchange.\n\/\/\n\/\/ Dev note: this enum is copied to native package to allow requiring native\/{request,response} in root openrtb package in future.\ntype Protocol int8\n\nconst (\n\tProtocolVAST10 Protocol = 1 \/\/ VAST 1.0\n\tProtocolVAST20 Protocol = 2 \/\/ VAST 2.0\n\tProtocolVAST30 Protocol = 3 \/\/ VAST 3.0\n\tProtocolVAST10Wrapper Protocol = 4 \/\/ VAST 1.0 Wrapper\n\tProtocolVAST20Wrapper Protocol = 5 \/\/ VAST 2.0 Wrapper\n\tProtocolVAST30Wrapper Protocol = 6 \/\/ VAST 3.0 Wrapper\n\tProtocolVAST40 Protocol = 7 \/\/ VAST 4.0\n\tProtocolVAST40Wrapper Protocol = 8 \/\/ VAST 4.0 Wrapper\n\tProtocolDAAST10 Protocol = 9 \/\/ DAAST 1.0\n\tProtocolDAAST10Wrapper Protocol = 10 \/\/ DAAST 1.0 Wrapper\n\tProtocolVAST41 Protocol = 11 \/\/ VAST 4.1\n\tProtocolVAST41Wrapper Protocol = 11 \/\/ VAST 4.1 Wrapper\n\tProtocolVAST42 Protocol = 11 \/\/ VAST 4.2\n\tProtocolVAST42Wrapper Protocol = 11 \/\/ VAST 4.2 Wrapper\n)\n<commit_msg>Protocol Enum Value Typo<commit_after>package native1\n\n\/\/ 5.8 Protocols (from AdCOM spec 1.0)\n\/\/\n\/\/ Options for the various bid response protocols that could be supported by an exchange.\n\/\/\n\/\/ Dev note: this enum is copied to native package to allow requiring native\/{request,response} in root openrtb package in future.\ntype Protocol int8\n\nconst (\n\tProtocolVAST10 Protocol = 1 \/\/ VAST 1.0\n\tProtocolVAST20 Protocol = 2 \/\/ VAST 2.0\n\tProtocolVAST30 Protocol = 3 \/\/ VAST 3.0\n\tProtocolVAST10Wrapper Protocol = 4 \/\/ VAST 1.0 Wrapper\n\tProtocolVAST20Wrapper Protocol = 5 \/\/ VAST 2.0 Wrapper\n\tProtocolVAST30Wrapper Protocol = 6 \/\/ VAST 3.0 Wrapper\n\tProtocolVAST40 Protocol = 7 \/\/ VAST 4.0\n\tProtocolVAST40Wrapper Protocol = 8 \/\/ VAST 4.0 Wrapper\n\tProtocolDAAST10 Protocol = 9 \/\/ DAAST 1.0\n\tProtocolDAAST10Wrapper Protocol = 10 \/\/ DAAST 1.0 Wrapper\n\tProtocolVAST41 Protocol = 11 \/\/ VAST 4.1\n\tProtocolVAST41Wrapper Protocol = 12 \/\/ VAST 4.1 Wrapper\n\tProtocolVAST42 Protocol = 13 \/\/ VAST 4.2\n\tProtocolVAST42Wrapper Protocol = 14 \/\/ VAST 4.2 Wrapper\n)\n<|endoftext|>"} {"text":"<commit_before>package naturalsort\n\nimport (\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n)\n\nfunc TestSortValid(t *testing.T) {\n\tcases := []struct {\n\t\tdata, expected []string\n\t}{\n\t\t{\n\t\t\tnil,\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t[]string{},\n\t\t\t[]string{},\n\t\t},\n\t\t{\n\t\t\t[]string{\"a\"},\n\t\t\t[]string{\"a\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"0\"},\n\t\t\t[]string{\"0\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"data\", \"data20\", \"data3\"},\n\t\t\t[]string{\"data\", \"data3\", \"data20\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"1\", \"2\", \"30\", \"22\", \"0\", \"00\", \"3\"},\n\t\t\t[]string{\"0\", \"00\", \"1\", \"2\", \"3\", \"22\", \"30\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"A1\", \"A0\", \"A21\", \"A11\", \"A111\", \"A2\"},\n\t\t\t[]string{\"A0\", \"A1\", \"A2\", \"A11\", \"A21\", \"A111\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"A1BA1\", \"A11AA1\", \"A2AB0\", \"B1AA1\", \"A1AA1\"},\n\t\t\t[]string{\"A1AA1\", \"A1BA1\", \"A2AB0\", \"A11AA1\", \"B1AA1\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"1ax10\", \"1a10\", \"1ax2\", \"1ax\"},\n\t\t\t[]string{\"1a10\", \"1ax\", \"1ax2\", \"1ax10\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"z1a10\", \"z1ax2\", \"z1ax\"},\n\t\t\t[]string{\"z1a10\", \"z1ax\", \"z1ax2\"},\n\t\t},\n\t\t{\n\t\t\t\/\/ regression test for #8\n\t\t\t[]string{\"a0000001\", \"a0001\"},\n\t\t\t[]string{\"a0001\", \"a0000001\"},\n\t\t},\n\t\t{\n\t\t\t\/\/ regression test for #10 - Number sort before any symbols even if theyre lower on the ASCII table\n\t\t\t[]string{\"#1\", \"1\", \"_1\", \"a\"},\n\t\t\t[]string{\"1\", \"#1\", \"_1\", \"a\"},\n\t\t},\n\t\t{\n\t\t\t\/\/ regression test for #10 - Number sort before any symbols even if theyre lower on the ASCII table\n\t\t\t[]string{\"#1\", \"1\", \"_1\", \"a\"},\n\t\t\t[]string{\"1\", \"#1\", \"_1\", \"a\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"1\", \" \", \"0\"},\n\t\t\t[]string{\" \", \"0\", \"1\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"1\", \" \", \"0\"},\n\t\t\t[]string{\" \", \"0\", \"1\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"1\", \"a\", \"0\"},\n\t\t\t[]string{\"0\", \"1\", \"a\"},\n\t\t},\n\t\t{\n\t\t\t\/\/ regression test for #10\n\t\t\t[]string{\"111111111111111111112\", \"111111111111111111113\", \"1111111111111111111120\"},\n\t\t\t[]string{\"111111111111111111112\", \"111111111111111111113\", \"1111111111111111111120\"},\n\t\t},\n\t}\n\n\tfor i, c := range cases {\n\t\tsort.Sort(NaturalSort(c.data))\n\t\tif !reflect.DeepEqual(c.data, c.expected) {\n\t\t\tt.Fatalf(\"Wrong order in test case #%d.\\nExpected=%v\\nGot=%v\", i, c.expected, c.data)\n\t\t}\n\t}\n\n}\n\nfunc BenchmarkSort(b *testing.B) {\n\tvar data = [...]string{\"A1BA1\", \"A11AA1\", \"A2AB0\", \"B1AA1\", \"A1AA1\"}\n\tfor ii := 0; ii < b.N; ii++ {\n\t\td := NaturalSort(data[:])\n\t\tsort.Sort(d)\n\t}\n}\n<commit_msg>Added a new test case to match OSX behavior<commit_after>package naturalsort\n\nimport (\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n)\n\nfunc TestSortValid(t *testing.T) {\n\tcases := []struct {\n\t\tdata, expected []string\n\t}{\n\t\t{\n\t\t\tnil,\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t[]string{},\n\t\t\t[]string{},\n\t\t},\n\t\t{\n\t\t\t[]string{\"a\"},\n\t\t\t[]string{\"a\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"0\"},\n\t\t\t[]string{\"0\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"data\", \"data20\", \"data3\"},\n\t\t\t[]string{\"data\", \"data3\", \"data20\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"1\", \"2\", \"30\", \"22\", \"0\", \"00\", \"3\"},\n\t\t\t[]string{\"0\", \"00\", \"1\", \"2\", \"3\", \"22\", \"30\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"A1\", \"A0\", \"A21\", \"A11\", \"A111\", \"A2\"},\n\t\t\t[]string{\"A0\", \"A1\", \"A2\", \"A11\", \"A21\", \"A111\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"A1BA1\", \"A11AA1\", \"A2AB0\", \"B1AA1\", \"A1AA1\"},\n\t\t\t[]string{\"A1AA1\", \"A1BA1\", \"A2AB0\", \"A11AA1\", \"B1AA1\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"1ax10\", \"1a10\", \"1ax2\", \"1ax\"},\n\t\t\t[]string{\"1a10\", \"1ax\", \"1ax2\", \"1ax10\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"z1a10\", \"z1ax2\", \"z1ax\"},\n\t\t\t[]string{\"z1a10\", \"z1ax\", \"z1ax2\"},\n\t\t},\n\t\t{\n\t\t\t\/\/ regression test for #8\n\t\t\t[]string{\"a0000001\", \"a0001\"},\n\t\t\t[]string{\"a0001\", \"a0000001\"},\n\t\t},\n\t\t{\n\t\t\t\/\/ regression test for #10 - Number sort before any symbols even if theyre lower on the ASCII table\n\t\t\t[]string{\"#1\", \"1\", \"_1\", \"a\"},\n\t\t\t[]string{\"1\", \"#1\", \"_1\", \"a\"},\n\t\t},\n\t\t{\n\t\t\t\/\/ regression test for #10 - Number sort before any symbols even if theyre lower on the ASCII table\n\t\t\t[]string{\"#1\", \"1\", \"_1\", \"a\"},\n\t\t\t[]string{\"1\", \"#1\", \"_1\", \"a\"},\n\t\t},\n\t\t{\t\/\/ test correct handling of space-only strings - This matches the Mac OSX sorting behavior\n\t\t\t[]string{\"1\", \" \", \"0\"},\n\t\t\t[]string{\" \", \"0\", \"1\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"1\", \" \", \" 1\", \" \"},\n\t\t\t[]string{\" \", \" \", \" 1\", \"1\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"1\", \"a\", \"0\"},\n\t\t\t[]string{\"0\", \"1\", \"a\"},\n\t\t},\n\t\t{\n\t\t\t\/\/ regression test for #10\n\t\t\t[]string{\"111111111111111111112\", \"111111111111111111113\", \"1111111111111111111120\"},\n\t\t\t[]string{\"111111111111111111112\", \"111111111111111111113\", \"1111111111111111111120\"},\n\t\t},\n\t}\n\n\tfor i, c := range cases {\n\t\tsort.Sort(NaturalSort(c.data))\n\t\tif !reflect.DeepEqual(c.data, c.expected) {\n\t\t\tt.Fatalf(\"Wrong order in test case #%d.\\nExpected=%v\\nGot=%v\", i, c.expected, c.data)\n\t\t}\n\t}\n\n}\n\nfunc BenchmarkSort(b *testing.B) {\n\tvar data = [...]string{\"A1BA1\", \"A11AA1\", \"A2AB0\", \"B1AA1\", \"A1AA1\"}\n\tfor ii := 0; ii < b.N; ii++ {\n\t\td := NaturalSort(data[:])\n\t\tsort.Sort(d)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package db\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/bosssauce\/ponzu\/content\"\n\t\"github.com\/bosssauce\/ponzu\/management\/editor\"\n\t\"github.com\/bosssauce\/ponzu\/management\/manager\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/gorilla\/schema\"\n)\n\n\/\/ SetContent inserts or updates values in the database.\n\/\/ The `target` argument is a string made up of namespace:id (string:int)\nfunc SetContent(target string, data url.Values) (int, error) {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\t\/\/ check if content id == -1 (indicating new post).\n\t\/\/ if so, run an insert which will assign the next auto incremented int.\n\t\/\/ this is done because boltdb begins its bucket auto increment value at 0,\n\t\/\/ which is the zero-value of an int in the Item struct field for ID.\n\t\/\/ this is a problem when the original first post (with auto ID = 0) gets\n\t\/\/ overwritten by any new post, originally having no ID, defauting to 0.\n\tif id == \"-1\" {\n\t\treturn insert(ns, data)\n\t}\n\n\treturn update(ns, id, data)\n}\n\nfunc update(ns, id string, data url.Values) (int, error) {\n\tcid, err := strconv.Atoi(id)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\terr = store.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(ns))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tj, err := postToJSON(ns, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.Put([]byte(fmt.Sprintf(\"%d\", cid)), j)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn 0, nil\n\t}\n\n\tgo SortContent(ns)\n\n\treturn cid, nil\n}\n\nfunc insert(ns string, data url.Values) (int, error) {\n\tvar effectedID int\n\tlog.Println(ns)\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(ns))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ get the next available ID and convert to string\n\t\t\/\/ also set effectedID to int of ID\n\t\tid, err := b.NextSequence()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcid := strconv.FormatUint(id, 10)\n\t\teffectedID, err = strconv.Atoi(cid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata.Add(\"id\", cid)\n\n\t\tj, err := postToJSON(ns, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.Put([]byte(cid), j)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t\/\/ since sorting can be expensive, limit sort to non-externally created posts\n\tif !strings.Contains(ns, \"_external\") {\n\t\tgo SortContent(ns)\n\t}\n\n\treturn effectedID, nil\n}\n\nfunc postToJSON(ns string, data url.Values) ([]byte, error) {\n\t\/\/ find the content type and decode values into it\n\tt, ok := content.Types[ns]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(content.ErrTypeNotRegistered, ns)\n\t}\n\tpost := t()\n\n\tdec := schema.NewDecoder()\n\tdec.SetAliasTag(\"json\") \/\/ allows simpler struct tagging when creating a content type\n\tdec.IgnoreUnknownKeys(true) \/\/ will skip over form values submitted, but not in struct\n\terr := dec.Decode(post, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tslug, err := manager.Slug(post.(editor.Editable))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpost.(editor.Editable).SetSlug(slug)\n\n\t\/\/ marshall content struct to json for db storage\n\tj, err := json.Marshal(post)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn j, nil\n}\n\n\/\/ DeleteContent removes an item from the database. Deleting a non-existent item\n\/\/ will return a nil error.\nfunc DeleteContent(target string) error {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\ttx.Bucket([]byte(ns)).Delete([]byte(id))\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ exception to typical \"run in goroutine\" pattern:\n\t\/\/ we want to have an updated admin view as soon as this is deleted, so\n\t\/\/ in some cases, the delete and redirect is faster than the sort,\n\t\/\/ thus still showing a deleted post in the admin view.\n\tSortContent(ns)\n\n\treturn nil\n}\n\n\/\/ Content retrives one item from the database. Non-existent values will return an empty []byte\n\/\/ The `target` argument is a string made up of namespace:id (string:int)\nfunc Content(target string) ([]byte, error) {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\tval := &bytes.Buffer{}\n\terr := store.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(ns))\n\t\t_, err := val.Write(b.Get([]byte(id)))\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn val.Bytes(), nil\n}\n\n\/\/ ContentAll retrives all items from the database within the provided namespace\nfunc ContentAll(namespace string) [][]byte {\n\tvar posts [][]byte\n\tstore.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(namespace))\n\n\t\tlen := b.Stats().KeyN\n\t\tposts = make([][]byte, 0, len)\n\n\t\tb.ForEach(func(k, v []byte) error {\n\t\t\tposts = append(posts, v)\n\n\t\t\treturn nil\n\t\t})\n\n\t\treturn nil\n\t})\n\n\treturn posts\n}\n\n\/\/ SortContent sorts all content of the type supplied as the namespace by time,\n\/\/ in descending order, from most recent to least recent\n\/\/ Should be called from a goroutine after SetContent is successful\nfunc SortContent(namespace string) {\n\tall := ContentAll(namespace)\n\n\tvar posts sortablePosts\n\t\/\/ decode each (json) into Editable\n\tfor i := range all {\n\t\tj := all[i]\n\t\tpost := content.Types[namespace]()\n\n\t\terr := json.Unmarshal(j, &post)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error decoding json while sorting\", namespace, \":\", err)\n\t\t\treturn\n\t\t}\n\n\t\tposts = append(posts, post.(editor.Sortable))\n\t}\n\n\t\/\/ sort posts\n\tsort.Sort(posts)\n\n\t\/\/ store in <namespace>_sorted bucket, first delete existing\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\terr := tx.DeleteBucket([]byte(namespace + \"_sorted\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tb, err := tx.CreateBucket([]byte(namespace + \"_sorted\"))\n\t\tif err != nil {\n\t\t\terr := tx.Rollback()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ encode to json and store as 'i:post.Time()':post\n\t\tfor i := range posts {\n\t\t\tj, err := json.Marshal(posts[i])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcid := fmt.Sprintf(\"%d:%d\", i, posts[i].Time())\n\t\t\terr = b.Put([]byte(cid), j)\n\t\t\tif err != nil {\n\t\t\t\terr := tx.Rollback()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Println(\"Error while updating db with sorted\", namespace, err)\n\t}\n\n}\n\ntype sortablePosts []editor.Sortable\n\nfunc (s sortablePosts) Len() int {\n\treturn len(s)\n}\n\nfunc (s sortablePosts) Less(i, j int) bool {\n\treturn s[i].Time() > s[j].Time()\n}\n\nfunc (s sortablePosts) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n<commit_msg>debugging<commit_after>package db\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/bosssauce\/ponzu\/content\"\n\t\"github.com\/bosssauce\/ponzu\/management\/editor\"\n\t\"github.com\/bosssauce\/ponzu\/management\/manager\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/gorilla\/schema\"\n)\n\n\/\/ SetContent inserts or updates values in the database.\n\/\/ The `target` argument is a string made up of namespace:id (string:int)\nfunc SetContent(target string, data url.Values) (int, error) {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\tlog.Println(ns, id, data)\n\n\t\/\/ check if content id == -1 (indicating new post).\n\t\/\/ if so, run an insert which will assign the next auto incremented int.\n\t\/\/ this is done because boltdb begins its bucket auto increment value at 0,\n\t\/\/ which is the zero-value of an int in the Item struct field for ID.\n\t\/\/ this is a problem when the original first post (with auto ID = 0) gets\n\t\/\/ overwritten by any new post, originally having no ID, defauting to 0.\n\tif id == \"-1\" {\n\t\treturn insert(ns, data)\n\t}\n\n\treturn update(ns, id, data)\n}\n\nfunc update(ns, id string, data url.Values) (int, error) {\n\tcid, err := strconv.Atoi(id)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\terr = store.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(ns))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tj, err := postToJSON(ns, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.Put([]byte(fmt.Sprintf(\"%d\", cid)), j)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn 0, nil\n\t}\n\n\tgo SortContent(ns)\n\n\treturn cid, nil\n}\n\nfunc insert(ns string, data url.Values) (int, error) {\n\tvar effectedID int\n\tlog.Println(ns)\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(ns))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ get the next available ID and convert to string\n\t\t\/\/ also set effectedID to int of ID\n\t\tid, err := b.NextSequence()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcid := strconv.FormatUint(id, 10)\n\t\teffectedID, err = strconv.Atoi(cid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata.Add(\"id\", cid)\n\n\t\tj, err := postToJSON(ns, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.Put([]byte(cid), j)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t\/\/ since sorting can be expensive, limit sort to non-externally created posts\n\tif !strings.Contains(ns, \"_external\") {\n\t\tgo SortContent(ns)\n\t}\n\n\treturn effectedID, nil\n}\n\nfunc postToJSON(ns string, data url.Values) ([]byte, error) {\n\t\/\/ find the content type and decode values into it\n\tt, ok := content.Types[ns]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(content.ErrTypeNotRegistered, ns)\n\t}\n\tpost := t()\n\n\tdec := schema.NewDecoder()\n\tdec.SetAliasTag(\"json\") \/\/ allows simpler struct tagging when creating a content type\n\tdec.IgnoreUnknownKeys(true) \/\/ will skip over form values submitted, but not in struct\n\terr := dec.Decode(post, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tslug, err := manager.Slug(post.(editor.Editable))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpost.(editor.Editable).SetSlug(slug)\n\n\t\/\/ marshall content struct to json for db storage\n\tj, err := json.Marshal(post)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn j, nil\n}\n\n\/\/ DeleteContent removes an item from the database. Deleting a non-existent item\n\/\/ will return a nil error.\nfunc DeleteContent(target string) error {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\ttx.Bucket([]byte(ns)).Delete([]byte(id))\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ exception to typical \"run in goroutine\" pattern:\n\t\/\/ we want to have an updated admin view as soon as this is deleted, so\n\t\/\/ in some cases, the delete and redirect is faster than the sort,\n\t\/\/ thus still showing a deleted post in the admin view.\n\tSortContent(ns)\n\n\treturn nil\n}\n\n\/\/ Content retrives one item from the database. Non-existent values will return an empty []byte\n\/\/ The `target` argument is a string made up of namespace:id (string:int)\nfunc Content(target string) ([]byte, error) {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\tval := &bytes.Buffer{}\n\terr := store.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(ns))\n\t\t_, err := val.Write(b.Get([]byte(id)))\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn val.Bytes(), nil\n}\n\n\/\/ ContentAll retrives all items from the database within the provided namespace\nfunc ContentAll(namespace string) [][]byte {\n\tvar posts [][]byte\n\tstore.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(namespace))\n\n\t\tlen := b.Stats().KeyN\n\t\tposts = make([][]byte, 0, len)\n\n\t\tb.ForEach(func(k, v []byte) error {\n\t\t\tposts = append(posts, v)\n\n\t\t\treturn nil\n\t\t})\n\n\t\treturn nil\n\t})\n\n\treturn posts\n}\n\n\/\/ SortContent sorts all content of the type supplied as the namespace by time,\n\/\/ in descending order, from most recent to least recent\n\/\/ Should be called from a goroutine after SetContent is successful\nfunc SortContent(namespace string) {\n\tall := ContentAll(namespace)\n\n\tvar posts sortablePosts\n\t\/\/ decode each (json) into Editable\n\tfor i := range all {\n\t\tj := all[i]\n\t\tpost := content.Types[namespace]()\n\n\t\terr := json.Unmarshal(j, &post)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error decoding json while sorting\", namespace, \":\", err)\n\t\t\treturn\n\t\t}\n\n\t\tposts = append(posts, post.(editor.Sortable))\n\t}\n\n\t\/\/ sort posts\n\tsort.Sort(posts)\n\n\t\/\/ store in <namespace>_sorted bucket, first delete existing\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\terr := tx.DeleteBucket([]byte(namespace + \"_sorted\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tb, err := tx.CreateBucket([]byte(namespace + \"_sorted\"))\n\t\tif err != nil {\n\t\t\terr := tx.Rollback()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ encode to json and store as 'i:post.Time()':post\n\t\tfor i := range posts {\n\t\t\tj, err := json.Marshal(posts[i])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcid := fmt.Sprintf(\"%d:%d\", i, posts[i].Time())\n\t\t\terr = b.Put([]byte(cid), j)\n\t\t\tif err != nil {\n\t\t\t\terr := tx.Rollback()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Println(\"Error while updating db with sorted\", namespace, err)\n\t}\n\n}\n\ntype sortablePosts []editor.Sortable\n\nfunc (s sortablePosts) Len() int {\n\treturn len(s)\n}\n\nfunc (s sortablePosts) Less(i, j int) bool {\n\treturn s[i].Time() > s[j].Time()\n}\n\nfunc (s sortablePosts) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n<|endoftext|>"} {"text":"<commit_before>package db\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/bosssauce\/ponzu\/content\"\n\t\"github.com\/bosssauce\/ponzu\/management\/editor\"\n\t\"github.com\/bosssauce\/ponzu\/management\/manager\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/gorilla\/schema\"\n)\n\n\/\/ SetContent inserts or updates values in the database.\n\/\/ The `target` argument is a string made up of namespace:id (string:int)\nfunc SetContent(target string, data url.Values) (int, error) {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\t\/\/ check if content id == -1 (indicating new post).\n\t\/\/ if so, run an insert which will assign the next auto incremented int.\n\t\/\/ this is done because boltdb begins its bucket auto increment value at 0,\n\t\/\/ which is the zero-value of an int in the Item struct field for ID.\n\t\/\/ this is a problem when the original first post (with auto ID = 0) gets\n\t\/\/ overwritten by any new post, originally having no ID, defauting to 0.\n\tif id == \"-1\" {\n\t\treturn insert(ns, data)\n\t}\n\n\treturn update(ns, id, data)\n}\n\nfunc update(ns, id string, data url.Values) (int, error) {\n\tcid, err := strconv.Atoi(id)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\terr = store.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(ns))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tj, err := postToJSON(ns, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.Put([]byte(fmt.Sprintf(\"%d\", cid)), j)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn 0, nil\n\t}\n\n\treturn cid, nil\n}\n\nfunc insert(ns string, data url.Values) (int, error) {\n\tvar effectedID int\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(ns))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ get the next available ID and convert to string\n\t\t\/\/ also set effectedID to int of ID\n\t\tid, err := b.NextSequence()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcid := strconv.FormatUint(id, 10)\n\t\teffectedID, err = strconv.Atoi(cid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata.Add(\"id\", cid)\n\n\t\tj, err := postToJSON(ns, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.Put([]byte(cid), j)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn effectedID, nil\n}\n\nfunc postToJSON(ns string, data url.Values) ([]byte, error) {\n\t\/\/ find the content type and decode values into it\n\tt, ok := content.Types[ns]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(content.ErrTypeNotRegistered, ns)\n\t}\n\tpost := t()\n\n\tdec := schema.NewDecoder()\n\tdec.SetAliasTag(\"json\") \/\/ allows simpler struct tagging when creating a content type\n\tdec.IgnoreUnknownKeys(true) \/\/ will skip over form values submitted, but not in struct\n\terr := dec.Decode(post, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tslug, err := manager.Slug(post.(editor.Editable))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpost.(editor.Editable).SetSlug(slug)\n\n\t\/\/ marshall content struct to json for db storage\n\tj, err := json.Marshal(post)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn j, nil\n}\n\n\/\/ DeleteContent removes an item from the database. Deleting a non-existent item\n\/\/ will return a nil error.\nfunc DeleteContent(target string) error {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\ttx.Bucket([]byte(ns)).Delete([]byte(id))\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Content retrives one item from the database. Non-existent values will return an empty []byte\n\/\/ The `target` argument is a string made up of namespace:id (string:int)\nfunc Content(target string) ([]byte, error) {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\tval := &bytes.Buffer{}\n\terr := store.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(ns))\n\t\t_, err := val.Write(b.Get([]byte(id)))\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn val.Bytes(), nil\n}\n\n\/\/ ContentAll retrives all items from the database within the provided namespace\nfunc ContentAll(namespace string) [][]byte {\n\tvar posts [][]byte\n\tstore.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(namespace))\n\n\t\tlen := b.Stats().KeyN\n\t\tposts = make([][]byte, 0, len)\n\n\t\tb.ForEach(func(k, v []byte) error {\n\t\t\tposts = append(posts, v)\n\n\t\t\treturn nil\n\t\t})\n\n\t\treturn nil\n\t})\n\n\treturn posts\n}\n\n\/\/ SortContent sorts all content of the type supplied as the namespace by time,\n\/\/ in descending order, from most recent to least recent\n\/\/ Should be called from a goroutine after SetContent is successful\nfunc SortContent(namespace string) {\n\tall := ContentAll(namespace)\n\n\tfmt.Println(all)\n\n\tvar posts sortablePosts\n\tpost := content.Types[namespace]()\n\t\/\/ decode each (json) into Editable\n\tfor i := range all {\n\t\tj := all[i]\n\t\terr := json.Unmarshal(j, &post)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error decoding json while sorting\", namespace, \":\", err)\n\t\t\treturn\n\t\t}\n\n\t\tposts = append(posts, post.(editor.Sortable))\n\t}\n\n\tfmt.Println(\"-------------------------UN-SORTED------------------------\")\n\n\tfor i := range posts {\n\t\tfmt.Printf(\"%v\\n\", posts[i])\n\t}\n\tfmt.Println(\"------------------------NOW SORTED------------------------\")\n\n\t\/\/ sort posts\n\tsort.Sort(&posts)\n\n\tfor i := range posts {\n\t\tfmt.Printf(\"%v\\n\", posts[i])\n\t}\n\n\t\/\/ one by one, encode to json and store as\n\t\/\/ store in __sorted bucket inside namespace bucket, first delete existing\n\n}\n\ntype sortablePosts []editor.Sortable\n\nfunc (s sortablePosts) Len() int {\n\treturn len(s)\n}\n\nfunc (s sortablePosts) Less(i, j int) bool {\n\treturn s[i].Time() < s[j].Time()\n}\n\nfunc (s sortablePosts) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n<commit_msg>debugging<commit_after>package db\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/bosssauce\/ponzu\/content\"\n\t\"github.com\/bosssauce\/ponzu\/management\/editor\"\n\t\"github.com\/bosssauce\/ponzu\/management\/manager\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/gorilla\/schema\"\n)\n\n\/\/ SetContent inserts or updates values in the database.\n\/\/ The `target` argument is a string made up of namespace:id (string:int)\nfunc SetContent(target string, data url.Values) (int, error) {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\t\/\/ check if content id == -1 (indicating new post).\n\t\/\/ if so, run an insert which will assign the next auto incremented int.\n\t\/\/ this is done because boltdb begins its bucket auto increment value at 0,\n\t\/\/ which is the zero-value of an int in the Item struct field for ID.\n\t\/\/ this is a problem when the original first post (with auto ID = 0) gets\n\t\/\/ overwritten by any new post, originally having no ID, defauting to 0.\n\tif id == \"-1\" {\n\t\treturn insert(ns, data)\n\t}\n\n\treturn update(ns, id, data)\n}\n\nfunc update(ns, id string, data url.Values) (int, error) {\n\tcid, err := strconv.Atoi(id)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\terr = store.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(ns))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tj, err := postToJSON(ns, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.Put([]byte(fmt.Sprintf(\"%d\", cid)), j)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn 0, nil\n\t}\n\n\treturn cid, nil\n}\n\nfunc insert(ns string, data url.Values) (int, error) {\n\tvar effectedID int\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(ns))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ get the next available ID and convert to string\n\t\t\/\/ also set effectedID to int of ID\n\t\tid, err := b.NextSequence()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcid := strconv.FormatUint(id, 10)\n\t\teffectedID, err = strconv.Atoi(cid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata.Add(\"id\", cid)\n\n\t\tj, err := postToJSON(ns, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.Put([]byte(cid), j)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn effectedID, nil\n}\n\nfunc postToJSON(ns string, data url.Values) ([]byte, error) {\n\t\/\/ find the content type and decode values into it\n\tt, ok := content.Types[ns]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(content.ErrTypeNotRegistered, ns)\n\t}\n\tpost := t()\n\n\tdec := schema.NewDecoder()\n\tdec.SetAliasTag(\"json\") \/\/ allows simpler struct tagging when creating a content type\n\tdec.IgnoreUnknownKeys(true) \/\/ will skip over form values submitted, but not in struct\n\terr := dec.Decode(post, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tslug, err := manager.Slug(post.(editor.Editable))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpost.(editor.Editable).SetSlug(slug)\n\n\t\/\/ marshall content struct to json for db storage\n\tj, err := json.Marshal(post)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn j, nil\n}\n\n\/\/ DeleteContent removes an item from the database. Deleting a non-existent item\n\/\/ will return a nil error.\nfunc DeleteContent(target string) error {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\ttx.Bucket([]byte(ns)).Delete([]byte(id))\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Content retrives one item from the database. Non-existent values will return an empty []byte\n\/\/ The `target` argument is a string made up of namespace:id (string:int)\nfunc Content(target string) ([]byte, error) {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\tval := &bytes.Buffer{}\n\terr := store.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(ns))\n\t\t_, err := val.Write(b.Get([]byte(id)))\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn val.Bytes(), nil\n}\n\n\/\/ ContentAll retrives all items from the database within the provided namespace\nfunc ContentAll(namespace string) [][]byte {\n\tvar posts [][]byte\n\tstore.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(namespace))\n\n\t\tlen := b.Stats().KeyN\n\t\tposts = make([][]byte, 0, len)\n\n\t\tb.ForEach(func(k, v []byte) error {\n\t\t\tposts = append(posts, v)\n\n\t\t\treturn nil\n\t\t})\n\n\t\treturn nil\n\t})\n\n\treturn posts\n}\n\n\/\/ SortContent sorts all content of the type supplied as the namespace by time,\n\/\/ in descending order, from most recent to least recent\n\/\/ Should be called from a goroutine after SetContent is successful\nfunc SortContent(namespace string) {\n\tall := ContentAll(namespace)\n\n\tfmt.Println(all)\n\n\tvar posts sortablePosts\n\tpost := content.Types[namespace]()\n\t\/\/ decode each (json) into Editable\n\tfor i := range all {\n\t\tj := all[i]\n\t\terr := json.Unmarshal(j, &post)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error decoding json while sorting\", namespace, \":\", err)\n\t\t\treturn\n\t\t}\n\n\t\tposts = append(posts, post.(editor.Sortable))\n\t}\n\n\tfmt.Println(\"-------------------------UN-SORTED------------------------\")\n\n\tfor i := range posts {\n\t\tfmt.Printf(\"%v\\n\", posts[i])\n\t}\n\tfmt.Println(\"------------------------NOW SORTED------------------------\")\n\n\t\/\/ sort posts\n\tsort.Sort(posts)\n\n\tfor i := range posts {\n\t\tfmt.Printf(\"%v\\n\", posts[i])\n\t}\n\n\t\/\/ one by one, encode to json and store as\n\t\/\/ store in <namespace>__sorted bucket, first delete existing\n\n}\n\ntype sortablePosts []editor.Sortable\n\nfunc (s sortablePosts) Len() int {\n\treturn len(s)\n}\n\nfunc (s sortablePosts) Less(i, j int) bool {\n\treturn s[i].Time() < s[j].Time()\n}\n\nfunc (s sortablePosts) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The lime Authors.\n\/\/ Use of this source code is governed by a 2-clause\n\/\/ BSD-style license that can be found in the LICENSE file.\n\npackage commands\n\nimport (\n\t. \"github.com\/limetext\/lime\/backend\"\n\t. \"github.com\/limetext\/text\"\n\t\"testing\"\n)\n\ntype indentTest struct {\n\ttext string\n\ttranslate_tabs_to_spaces interface{}\n\ttab_size interface{}\n\tsel []Region\n\texpect string\n}\n\nfunc runIndentTest(t *testing.T, tests []indentTest, command string) {\n\ted := GetEditor()\n\tw := ed.NewWindow()\n\tdefer w.Close()\n\n\tfor i, test := range tests {\n\t\tv := w.NewFile()\n\t\tdefer func() {\n\t\t\tv.SetScratch(true)\n\t\t\tv.Close()\n\t\t}()\n\n\t\te := v.BeginEdit()\n\t\tv.Insert(e, 0, test.text)\n\t\tv.EndEdit(e)\n\n\t\tv.Sel().Clear()\n\t\tfor _, r := range test.sel {\n\t\t\tv.Sel().Add(r)\n\t\t}\n\t\tv.Settings().Set(\"translate_tabs_to_spaces\", test.translate_tabs_to_spaces)\n\t\tv.Settings().Set(\"tab_size\", test.tab_size)\n\n\t\ted.CommandHandler().RunTextCommand(v, command, nil)\n\t\tif d := v.Buffer().Substr(Region{0, v.Buffer().Size()}); d != test.expect {\n\t\t\tt.Errorf(\"Test %d: Expected \\n%s, but got \\n%s\", i, test.expect, d)\n\t\t}\n\t}\n}\n\nfunc TestIndent(t *testing.T) {\n\ttests := []indentTest{\n\t\t{ \/\/ translate_tabs_to_spaces = false\n\t\t\t\/\/ indent should be \"\\t\"\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t\tfalse,\n\t\t\t4,\n\t\t\t[]Region{{0, 1}},\n\t\t\t\"\\ta\\n b\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ translate_tabs_to_spaces = nil\n\t\t\t\/\/ indent should be \"\\t\"\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t\tnil,\n\t\t\t1,\n\t\t\t[]Region{{0, 1}},\n\t\t\t\"\\ta\\n b\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ translate_tabs_to_spaces = true and tab_size = 2\n\t\t\t\/\/ indent should be \" \"\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t\ttrue,\n\t\t\t2,\n\t\t\t[]Region{{0, 1}},\n\t\t\t\" a\\n b\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ translate_tabs_to_spaces = true and tab_size = nil\n\t\t\t\/\/ indent should be \" \"\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t\ttrue,\n\t\t\tnil,\n\t\t\t[]Region{{0, 1}},\n\t\t\t\" a\\n b\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ region include the 1st line and the 4th line\n\t\t\t\/\/ indent should add to the begining of 1st and 4th line\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t\tfalse,\n\t\t\t1,\n\t\t\t[]Region{{0, 1}, {11, 12}},\n\t\t\t\"\\ta\\n b\\n c\\n\\t d\\n\",\n\t\t},\n\t\t{ \/\/ region selected reversely\n\t\t\t\/\/ should perform indent\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t\tfalse,\n\t\t\t1,\n\t\t\t[]Region{{3, 0}},\n\t\t\t\"\\ta\\n\\t b\\n c\\n d\\n\",\n\t\t},\n\t}\n\n\trunIndentTest(t, tests, \"indent\")\n}\n\nfunc TestUnindent(t *testing.T) {\n\ttests := []indentTest{\n\t\t{ \/\/ translate_tabs_to_spaces = false\n\t\t\t\/\/ indent should be \"\\t\"\n\t\t\t\"\\ta\\n b\\n c\\n\\t d\\n\",\n\t\t\tfalse,\n\t\t\t4,\n\t\t\t[]Region{{0, 19}},\n\t\t\t\"a\\nb\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ translate_tabs_to_spaces = nil\n\t\t\t\/\/ indent should be \"\\t\"\n\t\t\t\"\\ta\\n b\\n c\\n d\\n\",\n\t\t\tnil,\n\t\t\t1,\n\t\t\t[]Region{{0, 1}},\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ translate_tabs_to_spaces = true and tab_size = 2\n\t\t\t\/\/ indent should be \" \"\n\t\t\t\" a\\n b\\n c\\n d\\n\",\n\t\t\ttrue,\n\t\t\t2,\n\t\t\t[]Region{{0, 1}},\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ translate_tabs_to_spaces = true and tab_size = nil\n\t\t\t\/\/ indent should be \" \"\n\t\t\t\" a\\n b\\n c\\n d\\n\",\n\t\t\ttrue,\n\t\t\tnil,\n\t\t\t[]Region{{0, 1}},\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ region include the 1st line and the 4th line\n\t\t\t\/\/ unindent should remove from the begining of 1st and 4th line\n\t\t\t\"\\ta\\n b\\n c\\n \\t d\\n\",\n\t\t\tfalse,\n\t\t\t1,\n\t\t\t[]Region{{0, 1}, {11, 12}},\n\t\t\t\"a\\n b\\n c\\n\\t d\\n\",\n\t\t},\n\t\t{ \/\/ region selected reversely\n\t\t\t\/\/ should perform unindent\n\t\t\t\"\\ta\\n\\t b\\n c\\n d\\n\",\n\t\t\tfalse,\n\t\t\t4,\n\t\t\t[]Region{{3, 0}},\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t},\n\t}\n\n\trunIndentTest(t, tests, \"unindent\")\n}\n<commit_msg>indent.go coverage to 100%<commit_after>\/\/ Copyright 2013 The lime Authors.\n\/\/ Use of this source code is governed by a 2-clause\n\/\/ BSD-style license that can be found in the LICENSE file.\n\npackage commands\n\nimport (\n\t. \"github.com\/limetext\/lime\/backend\"\n\t. \"github.com\/limetext\/text\"\n\t\"testing\"\n)\n\ntype indentTest struct {\n\ttext string\n\ttranslate_tabs_to_spaces interface{}\n\ttab_size interface{}\n\tsel []Region\n\texpect string\n}\n\nfunc runIndentTest(t *testing.T, tests []indentTest, command string) {\n\ted := GetEditor()\n\tw := ed.NewWindow()\n\tdefer w.Close()\n\n\tfor i, test := range tests {\n\t\tv := w.NewFile()\n\t\tdefer func() {\n\t\t\tv.SetScratch(true)\n\t\t\tv.Close()\n\t\t}()\n\n\t\te := v.BeginEdit()\n\t\tv.Insert(e, 0, test.text)\n\t\tv.EndEdit(e)\n\n\t\tv.Sel().Clear()\n\t\tfor _, r := range test.sel {\n\t\t\tv.Sel().Add(r)\n\t\t}\n\t\tv.Settings().Set(\"translate_tabs_to_spaces\", test.translate_tabs_to_spaces)\n\t\tv.Settings().Set(\"tab_size\", test.tab_size)\n\n\t\ted.CommandHandler().RunTextCommand(v, command, nil)\n\t\tif d := v.Buffer().Substr(Region{0, v.Buffer().Size()}); d != test.expect {\n\t\t\tt.Errorf(\"Test %d: Expected \\n%s, but got \\n%s\", i, test.expect, d)\n\t\t}\n\t}\n}\n\nfunc TestIndent(t *testing.T) {\n\ttests := []indentTest{\n\t\t{ \/\/ translate_tabs_to_spaces = false\n\t\t\t\/\/ indent should be \"\\t\"\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t\tfalse,\n\t\t\t4,\n\t\t\t[]Region{{0, 1}},\n\t\t\t\"\\ta\\n b\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ translate_tabs_to_spaces = nil\n\t\t\t\/\/ indent should be \"\\t\"\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t\tnil,\n\t\t\t1,\n\t\t\t[]Region{{0, 1}},\n\t\t\t\"\\ta\\n b\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ translate_tabs_to_spaces = true and tab_size = 2\n\t\t\t\/\/ indent should be \" \"\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t\ttrue,\n\t\t\t2,\n\t\t\t[]Region{{0, 1}},\n\t\t\t\" a\\n b\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ translate_tabs_to_spaces = true and tab_size = nil\n\t\t\t\/\/ indent should be \" \"\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t\ttrue,\n\t\t\tnil,\n\t\t\t[]Region{{0, 1}},\n\t\t\t\" a\\n b\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ region include the 1st line and the 4th line\n\t\t\t\/\/ indent should add to the begining of 1st and 4th line\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t\tfalse,\n\t\t\t1,\n\t\t\t[]Region{{0, 1}, {11, 12}},\n\t\t\t\"\\ta\\n b\\n c\\n\\t d\\n\",\n\t\t},\n\t\t{ \/\/ region selected reversely\n\t\t\t\/\/ should perform indent\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t\tfalse,\n\t\t\t1,\n\t\t\t[]Region{{3, 0}},\n\t\t\t\"\\ta\\n\\t b\\n c\\n d\\n\",\n\t\t},\n\t}\n\n\trunIndentTest(t, tests, \"indent\")\n}\n\nfunc TestUnindent(t *testing.T) {\n\ttests := []indentTest{\n\t\t{ \/\/ translate_tabs_to_spaces = false\n\t\t\t\/\/ indent should be \"\\t\"\n\t\t\t\"\\ta\\n b\\n c\\n\\t d\\n\",\n\t\t\tfalse,\n\t\t\t4,\n\t\t\t[]Region{{0, 19}},\n\t\t\t\"a\\nb\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ translate_tabs_to_spaces = nil\n\t\t\t\/\/ indent should be \"\\t\"\n\t\t\t\"\\ta\\n b\\n c\\n d\\n\",\n\t\t\tnil,\n\t\t\t1,\n\t\t\t[]Region{{0, 1}},\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ translate_tabs_to_spaces = true and tab_size = 2\n\t\t\t\/\/ indent should be \" \"\n\t\t\t\" a\\n b\\n c\\n d\\n\",\n\t\t\ttrue,\n\t\t\t2,\n\t\t\t[]Region{{0, 1}},\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ translate_tabs_to_spaces = true and tab_size = nil\n\t\t\t\/\/ indent should be \" \"\n\t\t\t\" a\\n b\\n c\\n d\\n\",\n\t\t\ttrue,\n\t\t\tnil,\n\t\t\t[]Region{{0, 1}},\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ region include the 1st line and the 4th line\n\t\t\t\/\/ unindent should remove from the begining of 1st and 4th line\n\t\t\t\"\\ta\\n b\\n c\\n \\t d\\n\",\n\t\t\tfalse,\n\t\t\t1,\n\t\t\t[]Region{{0, 1}, {11, 12}},\n\t\t\t\"a\\n b\\n c\\n\\t d\\n\",\n\t\t},\n\t\t{ \/\/ region selected reversely\n\t\t\t\/\/ should perform unindent\n\t\t\t\"\\ta\\n\\t b\\n c\\n d\\n\",\n\t\t\tfalse,\n\t\t\t4,\n\t\t\t[]Region{{3, 0}},\n\t\t\t\"a\\n b\\n c\\n d\\n\",\n\t\t},\n\t\t{ \/\/ empty strings\n\t\t\t\/\/ should perform unindent\n\t\t\t\"\",\n\t\t\tfalse,\n\t\t\tnil,\n\t\t\t[]Region{{0, 0}},\n\t\t\t\"\",\n\t\t},\n\t}\n\n\trunIndentTest(t, tests, \"unindent\")\n}\n<|endoftext|>"} {"text":"<commit_before>package backend_test\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/cloudfoundry-incubator\/bbs\/models\"\n\t\"github.com\/cloudfoundry-incubator\/runtime-schema\/cc_messages\"\n\t\"github.com\/cloudfoundry-incubator\/stager\/backend\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/ghttp\"\n\t\"github.com\/pivotal-golang\/lager\"\n)\n\nvar _ = Describe(\"DockerBackend\", func() {\n\tconst (\n\t\tstagingGuid = \"staging-guid\"\n\t\tdockerRegistryPort = uint32(8080)\n\t\tdockerRegistryHost = \"docker-registry.service.cf.internal\"\n\t)\n\n\tvar (\n\t\tdockerRegistryIPs = []string{\"10.244.2.6\", \"10.244.2.7\"}\n\t\tdockerRegistryAddress = fmt.Sprintf(\"%s:%d\", dockerRegistryHost, dockerRegistryPort)\n\n\t\tloginServer string\n\t\tuser string\n\t\tpassword string\n\t\temail string\n\t)\n\n\tsetupDockerBackend := func(insecureDockerRegistry bool, payload string) backend.Backend {\n\t\tserver := ghttp.NewServer()\n\n\t\tserver.AppendHandlers(\n\t\t\tghttp.CombineHandlers(\n\t\t\t\tghttp.VerifyRequest(\"GET\", \"\/v1\/catalog\/service\/docker-registry\"),\n\t\t\t\thttp.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\t\t\tw.Write([]byte(payload))\n\t\t\t\t}),\n\t\t\t),\n\t\t)\n\n\t\tconfig := backend.Config{\n\t\t\tFileServerURL: \"http:\/\/file-server.com\",\n\t\t\tCCUploaderURL: \"http:\/\/cc-uploader.com\",\n\t\t\tConsulCluster: server.URL(),\n\t\t\tDockerRegistryAddress: dockerRegistryAddress,\n\t\t\tInsecureDockerRegistry: insecureDockerRegistry,\n\t\t\tLifecycles: map[string]string{\n\t\t\t\t\"docker\": \"docker_lifecycle\/docker_app_lifecycle.tgz\",\n\t\t\t},\n\t\t}\n\n\t\tlogger := lager.NewLogger(\"fakelogger\")\n\t\tlogger.RegisterSink(lager.NewWriterSink(GinkgoWriter, lager.DEBUG))\n\n\t\treturn backend.NewDockerBackend(config, logger)\n\t}\n\n\tsetupStagingRequest := func() cc_messages.StagingRequestFromCC {\n\t\trawJsonBytes, err := json.Marshal(cc_messages.DockerStagingData{\n\t\t\tDockerImageUrl: \"busybox\",\n\t\t\tDockerLoginServer: loginServer,\n\t\t\tDockerUser: user,\n\t\t\tDockerPassword: password,\n\t\t\tDockerEmail: email,\n\t\t})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tlifecycleData := json.RawMessage(rawJsonBytes)\n\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\treturn cc_messages.StagingRequestFromCC{\n\t\t\tAppId: \"bunny\",\n\t\t\tFileDescriptors: 512,\n\t\t\tMemoryMB: 512,\n\t\t\tDiskMB: 512,\n\t\t\tTimeout: 512,\n\t\t\tLifecycleData: &lifecycleData,\n\t\t\tEgressRules: []*models.SecurityGroupRule{\n\t\t\t\t{\n\t\t\t\t\tProtocol: \"TCP\",\n\t\t\t\t\tDestinations: []string{\"0.0.0.0\/0\"},\n\t\t\t\t\tPortRange: &models.PortRange{Start: 80, End: 443},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\tBeforeEach(func() {\n\t\tloginServer = \"\"\n\t\tuser = \"\"\n\t\tpassword = \"\"\n\t\temail = \"\"\n\t})\n\n\tContext(\"when docker registry is running\", func() {\n\t\tvar dockerDownloadAction = models.EmitProgressFor(\n\t\t\t&models.DownloadAction{\n\t\t\t\tFrom: \"http:\/\/file-server.com\/v1\/static\/docker_lifecycle\/docker_app_lifecycle.tgz\",\n\t\t\t\tTo: \"\/tmp\/docker_app_lifecycle\",\n\t\t\t\tCacheKey: \"docker-lifecycle\",\n\t\t\t\tUser: \"vcap\",\n\t\t\t},\n\t\t\t\"\",\n\t\t\t\"\",\n\t\t\t\"Failed to set up docker environment\",\n\t\t)\n\n\t\tvar (\n\t\t\tdockerBackend backend.Backend\n\t\t\texpectedEgressRules []*models.SecurityGroupRule\n\t\t\tinsecureDockerRegistry bool\n\t\t\tstagingRequest cc_messages.StagingRequestFromCC\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tinsecureDockerRegistry = false\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\texpectedEgressRules = []*models.SecurityGroupRule{}\n\t\t})\n\n\t\tJustBeforeEach(func() {\n\t\t\tdockerBackend = setupDockerBackend(\n\t\t\t\tinsecureDockerRegistry,\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t`[\n\t\t\t\t\t\t{\"Address\": \"%s\"},\n\t\t\t\t\t\t{\"Address\": \"%s\"}\n\t\t\t\t ]`,\n\t\t\t\t\tdockerRegistryIPs[0],\n\t\t\t\t\tdockerRegistryIPs[1],\n\t\t\t\t),\n\t\t\t)\n\n\t\t\tstagingRequest = setupStagingRequest()\n\n\t\t\tfor i, _ := range stagingRequest.EgressRules {\n\t\t\t\texpectedEgressRules = append(expectedEgressRules, stagingRequest.EgressRules[i])\n\t\t\t}\n\n\t\t\tfor _, ip := range dockerRegistryIPs {\n\t\t\t\texpectedEgressRules = append(expectedEgressRules, &models.SecurityGroupRule{\n\t\t\t\t\tProtocol: models.TCPProtocol,\n\t\t\t\t\tDestinations: []string{ip},\n\t\t\t\t\tPorts: []uint32{dockerRegistryPort},\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\n\t\tContext(\"user did not opt-in for docker image caching\", func() {\n\t\t\tIt(\"creates a cf-app-docker-staging Task with no additional egress rules\", func() {\n\t\t\t\ttaskDef, _, _, err := dockerBackend.BuildRecipe(stagingGuid, stagingRequest)\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\tExpect(taskDef.EgressRules).To(Equal(stagingRequest.EgressRules))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"user opted-in for docker image caching\", func() {\n\t\t\tJustBeforeEach(func() {\n\t\t\t\tstagingRequest.Environment = append(stagingRequest.Environment, &models.EnvironmentVariable{\n\t\t\t\t\tName: \"DIEGO_DOCKER_CACHE\",\n\t\t\t\t\tValue: \"true\",\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"and Docker Registry is secure\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tinsecureDockerRegistry = false\n\t\t\t\t})\n\n\t\t\t\tIt(\"does not include a -insecureDockerRegistries flag without the dockerRegistryAddress\", func() {\n\t\t\t\t\ttaskDef, _, _, err := dockerBackend.BuildRecipe(stagingGuid, stagingRequest)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\tExpect(taskDef.Privileged).To(BeTrue())\n\t\t\t\t\tExpect(taskDef.Action).NotTo(BeNil())\n\t\t\t\t\tExpect(taskDef.EgressRules).To(Equal(expectedEgressRules))\n\n\t\t\t\t\tactions := actionsFromTaskDef(taskDef)\n\t\t\t\t\tExpect(actions).To(HaveLen(2))\n\t\t\t\t\tExpect(actions[0].GetEmitProgressAction()).To(Equal(dockerDownloadAction))\n\t\t\t\t\tfileDescriptorLimit := uint64(512)\n\t\t\t\t\tinternalRunAction := models.RunAction{\n\t\t\t\t\t\tPath: \"\/tmp\/docker_app_lifecycle\/builder\",\n\t\t\t\t\t\tArgs: []string{\n\t\t\t\t\t\t\t\"-outputMetadataJSONFilename\", \"\/tmp\/docker-result\/result.json\",\n\t\t\t\t\t\t\t\"-dockerRef\", \"busybox\",\n\t\t\t\t\t\t\t\"-cacheDockerImage\",\n\t\t\t\t\t\t\t\"-dockerRegistryHost\", dockerRegistryHost,\n\t\t\t\t\t\t\t\"-dockerRegistryPort\", fmt.Sprintf(\"%d\", dockerRegistryPort),\n\t\t\t\t\t\t\t\"-dockerRegistryIPs\", strings.Join(dockerRegistryIPs, \",\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tEnv: []*models.EnvironmentVariable{\n\t\t\t\t\t\t\t&models.EnvironmentVariable{Name: \"DIEGO_DOCKER_CACHE\", Value: \"true\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceLimits: &models.ResourceLimits{\n\t\t\t\t\t\t\tNofile: &fileDescriptorLimit,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tUser: \"root\",\n\t\t\t\t\t}\n\t\t\t\t\texpectedRunAction := models.EmitProgressFor(\n\t\t\t\t\t\t&internalRunAction,\n\t\t\t\t\t\t\"Staging...\",\n\t\t\t\t\t\t\"Staging Complete\",\n\t\t\t\t\t\t\"Staging Failed\",\n\t\t\t\t\t)\n\n\t\t\t\t\tExpect(actions[1].GetEmitProgressAction()).To(Equal(expectedRunAction))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"and Docker Registry is insecure\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tinsecureDockerRegistry = true\n\t\t\t\t})\n\n\t\t\t\tIt(\"includes a -insecureDockerRegistries flag with the dockerRegistryAddress\", func() {\n\t\t\t\t\ttaskDef, _, _, err := dockerBackend.BuildRecipe(stagingGuid, stagingRequest)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\tExpect(taskDef.Privileged).To(BeTrue())\n\t\t\t\t\tExpect(taskDef.Action).NotTo(BeNil())\n\t\t\t\t\tExpect(taskDef.EgressRules).To(Equal(expectedEgressRules))\n\n\t\t\t\t\tactions := actionsFromTaskDef(taskDef)\n\t\t\t\t\tExpect(actions).To(HaveLen(2))\n\t\t\t\t\tExpect(actions[0].GetEmitProgressAction()).To(Equal(dockerDownloadAction))\n\n\t\t\t\t\tfileDescriptorLimit := uint64(512)\n\t\t\t\t\tinternalRunAction := models.RunAction{\n\t\t\t\t\t\tPath: \"\/tmp\/docker_app_lifecycle\/builder\",\n\t\t\t\t\t\tArgs: []string{\n\t\t\t\t\t\t\t\"-outputMetadataJSONFilename\", \"\/tmp\/docker-result\/result.json\",\n\t\t\t\t\t\t\t\"-dockerRef\", \"busybox\",\n\t\t\t\t\t\t\t\"-cacheDockerImage\",\n\t\t\t\t\t\t\t\"-dockerRegistryHost\", dockerRegistryHost,\n\t\t\t\t\t\t\t\"-dockerRegistryPort\", fmt.Sprintf(\"%d\", dockerRegistryPort),\n\t\t\t\t\t\t\t\"-dockerRegistryIPs\", strings.Join(dockerRegistryIPs, \",\"),\n\t\t\t\t\t\t\t\"-insecureDockerRegistries\", dockerRegistryAddress,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tEnv: []*models.EnvironmentVariable{\n\t\t\t\t\t\t\t&models.EnvironmentVariable{Name: \"DIEGO_DOCKER_CACHE\", Value: \"true\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceLimits: &models.ResourceLimits{\n\t\t\t\t\t\t\tNofile: &fileDescriptorLimit,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tUser: \"root\",\n\t\t\t\t\t}\n\t\t\t\t\texpectedRunAction := models.EmitProgressFor(\n\t\t\t\t\t\t&internalRunAction,\n\t\t\t\t\t\t\"Staging...\",\n\t\t\t\t\t\t\"Staging Complete\",\n\t\t\t\t\t\t\"Staging Failed\",\n\t\t\t\t\t)\n\n\t\t\t\t\tExpect(actions[1].GetEmitProgressAction()).To(Equal(expectedRunAction))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"and credentials are provided\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tloginServer = \"http:\/\/loginServer.com\"\n\t\t\t\t\tuser = \"user\"\n\t\t\t\t\tpassword = \"password\"\n\t\t\t\t\temail = \"email@example.com\"\n\t\t\t\t})\n\n\t\t\t\tIt(\"includes credentials flags\", func() {\n\t\t\t\t\ttaskDef, _, _, err := dockerBackend.BuildRecipe(stagingGuid, stagingRequest)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\tExpect(taskDef.Privileged).To(BeTrue())\n\t\t\t\t\tExpect(taskDef.Action).NotTo(BeNil())\n\t\t\t\t\tExpect(taskDef.EgressRules).To(Equal(expectedEgressRules))\n\n\t\t\t\t\tactions := actionsFromTaskDef(taskDef)\n\t\t\t\t\tExpect(actions).To(HaveLen(2))\n\t\t\t\t\tExpect(actions[0].GetEmitProgressAction()).To(Equal(dockerDownloadAction))\n\t\t\t\t\tfileDescriptorLimit := uint64(512)\n\t\t\t\t\tinternalRunAction := models.RunAction{\n\t\t\t\t\t\tPath: \"\/tmp\/docker_app_lifecycle\/builder\",\n\t\t\t\t\t\tArgs: []string{\n\t\t\t\t\t\t\t\"-outputMetadataJSONFilename\", \"\/tmp\/docker-result\/result.json\",\n\t\t\t\t\t\t\t\"-dockerRef\", \"busybox\",\n\t\t\t\t\t\t\t\"-cacheDockerImage\",\n\t\t\t\t\t\t\t\"-dockerRegistryHost\", dockerRegistryHost,\n\t\t\t\t\t\t\t\"-dockerRegistryPort\", fmt.Sprintf(\"%d\", dockerRegistryPort),\n\t\t\t\t\t\t\t\"-dockerRegistryIPs\", strings.Join(dockerRegistryIPs, \",\"),\n\t\t\t\t\t\t\t\"-dockerLoginServer\", loginServer,\n\t\t\t\t\t\t\t\"-dockerUser\", user,\n\t\t\t\t\t\t\t\"-dockerPassword\", password,\n\t\t\t\t\t\t\t\"-dockerEmail\", email,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tEnv: []*models.EnvironmentVariable{\n\t\t\t\t\t\t\t&models.EnvironmentVariable{Name: \"DIEGO_DOCKER_CACHE\", Value: \"true\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceLimits: &models.ResourceLimits{\n\t\t\t\t\t\t\tNofile: &fileDescriptorLimit,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tUser: \"root\",\n\t\t\t\t\t}\n\t\t\t\t\texpectedRunAction := models.EmitProgressFor(\n\t\t\t\t\t\t&internalRunAction,\n\t\t\t\t\t\t\"Staging...\",\n\t\t\t\t\t\t\"Staging Complete\",\n\t\t\t\t\t\t\"Staging Failed\",\n\t\t\t\t\t)\n\t\t\t\t\tExpect(actions[1].GetEmitProgressAction()).To(Equal(expectedRunAction))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when Docker Registry is not running\", func() {\n\t\tvar (\n\t\t\tdocker backend.Backend\n\t\t\tstagingRequest cc_messages.StagingRequestFromCC\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tdocker = setupDockerBackend(true, \"[]\")\n\t\t\tstagingRequest = setupStagingRequest()\n\t\t})\n\n\t\tContext(\"and user opted-in for docker image caching\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tstagingRequest.Environment = append(stagingRequest.Environment, &models.EnvironmentVariable{\n\t\t\t\t\tName: \"DIEGO_DOCKER_CACHE\",\n\t\t\t\t\tValue: \"true\",\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tIt(\"errors\", func() {\n\t\t\t\t_, _, _, err := docker.BuildRecipe(stagingGuid, stagingRequest)\n\t\t\t\tExpect(err).To(HaveOccurred())\n\t\t\t\tExpect(err).To(Equal(backend.ErrMissingDockerRegistry))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"and user did not opt-in for docker image caching\", func() {\n\t\t\tIt(\"does not error\", func() {\n\t\t\t\t_, _, _, err := docker.BuildRecipe(stagingGuid, stagingRequest)\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t})\n\t\t})\n\t})\n})\n<commit_msg>Test cleanup: Don't modify the staging request<commit_after>package backend_test\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/cloudfoundry-incubator\/bbs\/models\"\n\t\"github.com\/cloudfoundry-incubator\/runtime-schema\/cc_messages\"\n\t\"github.com\/cloudfoundry-incubator\/stager\/backend\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/ghttp\"\n\t\"github.com\/pivotal-golang\/lager\"\n)\n\nvar _ = Describe(\"DockerBackend\", func() {\n\tconst (\n\t\tstagingGuid = \"staging-guid\"\n\t\tdockerRegistryPort = uint32(8080)\n\t\tdockerRegistryHost = \"docker-registry.service.cf.internal\"\n\t)\n\n\tvar (\n\t\tdockerRegistryIPs = []string{\"10.244.2.6\", \"10.244.2.7\"}\n\t\tdockerRegistryAddress = fmt.Sprintf(\"%s:%d\", dockerRegistryHost, dockerRegistryPort)\n\n\t\tloginServer string\n\t\tuser string\n\t\tpassword string\n\t\temail string\n\t)\n\n\tsetupDockerBackend := func(insecureDockerRegistry bool, payload string) backend.Backend {\n\t\tserver := ghttp.NewServer()\n\n\t\tserver.AppendHandlers(\n\t\t\tghttp.CombineHandlers(\n\t\t\t\tghttp.VerifyRequest(\"GET\", \"\/v1\/catalog\/service\/docker-registry\"),\n\t\t\t\thttp.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\t\t\tw.Write([]byte(payload))\n\t\t\t\t}),\n\t\t\t),\n\t\t)\n\n\t\tconfig := backend.Config{\n\t\t\tFileServerURL: \"http:\/\/file-server.com\",\n\t\t\tCCUploaderURL: \"http:\/\/cc-uploader.com\",\n\t\t\tConsulCluster: server.URL(),\n\t\t\tDockerRegistryAddress: dockerRegistryAddress,\n\t\t\tInsecureDockerRegistry: insecureDockerRegistry,\n\t\t\tLifecycles: map[string]string{\n\t\t\t\t\"docker\": \"docker_lifecycle\/docker_app_lifecycle.tgz\",\n\t\t\t},\n\t\t}\n\n\t\tlogger := lager.NewLogger(\"fakelogger\")\n\t\tlogger.RegisterSink(lager.NewWriterSink(GinkgoWriter, lager.DEBUG))\n\n\t\treturn backend.NewDockerBackend(config, logger)\n\t}\n\n\tsetupStagingRequest := func(dockerImageCachingEnabled bool) cc_messages.StagingRequestFromCC {\n\t\trawJsonBytes, err := json.Marshal(cc_messages.DockerStagingData{\n\t\t\tDockerImageUrl: \"busybox\",\n\t\t\tDockerLoginServer: loginServer,\n\t\t\tDockerUser: user,\n\t\t\tDockerPassword: password,\n\t\t\tDockerEmail: email,\n\t\t})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tlifecycleData := json.RawMessage(rawJsonBytes)\n\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tstagingRequest := cc_messages.StagingRequestFromCC{\n\t\t\tAppId: \"bunny\",\n\t\t\tFileDescriptors: 512,\n\t\t\tMemoryMB: 512,\n\t\t\tDiskMB: 512,\n\t\t\tTimeout: 512,\n\t\t\tLifecycleData: &lifecycleData,\n\t\t\tEgressRules: []*models.SecurityGroupRule{\n\t\t\t\t{\n\t\t\t\t\tProtocol: \"TCP\",\n\t\t\t\t\tDestinations: []string{\"0.0.0.0\/0\"},\n\t\t\t\t\tPortRange: &models.PortRange{Start: 80, End: 443},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tif dockerImageCachingEnabled {\n\t\t\tstagingRequest.Environment = append(stagingRequest.Environment, &models.EnvironmentVariable{\n\t\t\t\tName: \"DIEGO_DOCKER_CACHE\",\n\t\t\t\tValue: \"true\",\n\t\t\t})\n\t\t}\n\n\t\treturn stagingRequest\n\t}\n\n\tBeforeEach(func() {\n\t\tloginServer = \"\"\n\t\tuser = \"\"\n\t\tpassword = \"\"\n\t\temail = \"\"\n\t})\n\n\tContext(\"when docker registry is running\", func() {\n\t\tvar dockerDownloadAction = models.EmitProgressFor(\n\t\t\t&models.DownloadAction{\n\t\t\t\tFrom: \"http:\/\/file-server.com\/v1\/static\/docker_lifecycle\/docker_app_lifecycle.tgz\",\n\t\t\t\tTo: \"\/tmp\/docker_app_lifecycle\",\n\t\t\t\tCacheKey: \"docker-lifecycle\",\n\t\t\t\tUser: \"vcap\",\n\t\t\t},\n\t\t\t\"\",\n\t\t\t\"\",\n\t\t\t\"Failed to set up docker environment\",\n\t\t)\n\n\t\tvar (\n\t\t\tdockerBackend backend.Backend\n\t\t\tdockerImageCachingEnabled bool\n\t\t\texpectedEgressRules []*models.SecurityGroupRule\n\t\t\tinsecureDockerRegistry bool\n\t\t\tstagingRequest cc_messages.StagingRequestFromCC\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tinsecureDockerRegistry = false\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\texpectedEgressRules = []*models.SecurityGroupRule{}\n\t\t})\n\n\t\tJustBeforeEach(func() {\n\t\t\tdockerBackend = setupDockerBackend(\n\t\t\t\tinsecureDockerRegistry,\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t`[\n\t\t\t\t\t\t{\"Address\": \"%s\"},\n\t\t\t\t\t\t{\"Address\": \"%s\"}\n\t\t\t\t ]`,\n\t\t\t\t\tdockerRegistryIPs[0],\n\t\t\t\t\tdockerRegistryIPs[1],\n\t\t\t\t),\n\t\t\t)\n\n\t\t\tstagingRequest = setupStagingRequest(dockerImageCachingEnabled)\n\n\t\t\tfor i, _ := range stagingRequest.EgressRules {\n\t\t\t\texpectedEgressRules = append(expectedEgressRules, stagingRequest.EgressRules[i])\n\t\t\t}\n\n\t\t\tfor _, ip := range dockerRegistryIPs {\n\t\t\t\texpectedEgressRules = append(expectedEgressRules, &models.SecurityGroupRule{\n\t\t\t\t\tProtocol: models.TCPProtocol,\n\t\t\t\t\tDestinations: []string{ip},\n\t\t\t\t\tPorts: []uint32{dockerRegistryPort},\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\n\t\tContext(\"user did not opt-in for docker image caching\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tdockerImageCachingEnabled = false\n\t\t\t})\n\n\t\t\tIt(\"creates a cf-app-docker-staging Task with no additional egress rules\", func() {\n\t\t\t\ttaskDef, _, _, err := dockerBackend.BuildRecipe(stagingGuid, stagingRequest)\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\tExpect(taskDef.EgressRules).To(Equal(stagingRequest.EgressRules))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"user opted-in for docker image caching\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tdockerImageCachingEnabled = true\n\t\t\t})\n\n\t\t\tContext(\"and Docker Registry is secure\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tinsecureDockerRegistry = false\n\t\t\t\t})\n\n\t\t\t\tIt(\"does not include a -insecureDockerRegistries flag without the dockerRegistryAddress\", func() {\n\t\t\t\t\ttaskDef, _, _, err := dockerBackend.BuildRecipe(stagingGuid, stagingRequest)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\tExpect(taskDef.Privileged).To(BeTrue())\n\t\t\t\t\tExpect(taskDef.Action).NotTo(BeNil())\n\t\t\t\t\tExpect(taskDef.EgressRules).To(Equal(expectedEgressRules))\n\n\t\t\t\t\tactions := actionsFromTaskDef(taskDef)\n\t\t\t\t\tExpect(actions).To(HaveLen(2))\n\t\t\t\t\tExpect(actions[0].GetEmitProgressAction()).To(Equal(dockerDownloadAction))\n\t\t\t\t\tfileDescriptorLimit := uint64(512)\n\t\t\t\t\tinternalRunAction := models.RunAction{\n\t\t\t\t\t\tPath: \"\/tmp\/docker_app_lifecycle\/builder\",\n\t\t\t\t\t\tArgs: []string{\n\t\t\t\t\t\t\t\"-outputMetadataJSONFilename\", \"\/tmp\/docker-result\/result.json\",\n\t\t\t\t\t\t\t\"-dockerRef\", \"busybox\",\n\t\t\t\t\t\t\t\"-cacheDockerImage\",\n\t\t\t\t\t\t\t\"-dockerRegistryHost\", dockerRegistryHost,\n\t\t\t\t\t\t\t\"-dockerRegistryPort\", fmt.Sprintf(\"%d\", dockerRegistryPort),\n\t\t\t\t\t\t\t\"-dockerRegistryIPs\", strings.Join(dockerRegistryIPs, \",\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tEnv: []*models.EnvironmentVariable{\n\t\t\t\t\t\t\t&models.EnvironmentVariable{Name: \"DIEGO_DOCKER_CACHE\", Value: \"true\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceLimits: &models.ResourceLimits{\n\t\t\t\t\t\t\tNofile: &fileDescriptorLimit,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tUser: \"root\",\n\t\t\t\t\t}\n\t\t\t\t\texpectedRunAction := models.EmitProgressFor(\n\t\t\t\t\t\t&internalRunAction,\n\t\t\t\t\t\t\"Staging...\",\n\t\t\t\t\t\t\"Staging Complete\",\n\t\t\t\t\t\t\"Staging Failed\",\n\t\t\t\t\t)\n\n\t\t\t\t\tExpect(actions[1].GetEmitProgressAction()).To(Equal(expectedRunAction))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"and Docker Registry is insecure\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tinsecureDockerRegistry = true\n\t\t\t\t})\n\n\t\t\t\tIt(\"includes a -insecureDockerRegistries flag with the dockerRegistryAddress\", func() {\n\t\t\t\t\ttaskDef, _, _, err := dockerBackend.BuildRecipe(stagingGuid, stagingRequest)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\tExpect(taskDef.Privileged).To(BeTrue())\n\t\t\t\t\tExpect(taskDef.Action).NotTo(BeNil())\n\t\t\t\t\tExpect(taskDef.EgressRules).To(Equal(expectedEgressRules))\n\n\t\t\t\t\tactions := actionsFromTaskDef(taskDef)\n\t\t\t\t\tExpect(actions).To(HaveLen(2))\n\t\t\t\t\tExpect(actions[0].GetEmitProgressAction()).To(Equal(dockerDownloadAction))\n\n\t\t\t\t\tfileDescriptorLimit := uint64(512)\n\t\t\t\t\tinternalRunAction := models.RunAction{\n\t\t\t\t\t\tPath: \"\/tmp\/docker_app_lifecycle\/builder\",\n\t\t\t\t\t\tArgs: []string{\n\t\t\t\t\t\t\t\"-outputMetadataJSONFilename\", \"\/tmp\/docker-result\/result.json\",\n\t\t\t\t\t\t\t\"-dockerRef\", \"busybox\",\n\t\t\t\t\t\t\t\"-cacheDockerImage\",\n\t\t\t\t\t\t\t\"-dockerRegistryHost\", dockerRegistryHost,\n\t\t\t\t\t\t\t\"-dockerRegistryPort\", fmt.Sprintf(\"%d\", dockerRegistryPort),\n\t\t\t\t\t\t\t\"-dockerRegistryIPs\", strings.Join(dockerRegistryIPs, \",\"),\n\t\t\t\t\t\t\t\"-insecureDockerRegistries\", dockerRegistryAddress,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tEnv: []*models.EnvironmentVariable{\n\t\t\t\t\t\t\t&models.EnvironmentVariable{Name: \"DIEGO_DOCKER_CACHE\", Value: \"true\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceLimits: &models.ResourceLimits{\n\t\t\t\t\t\t\tNofile: &fileDescriptorLimit,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tUser: \"root\",\n\t\t\t\t\t}\n\t\t\t\t\texpectedRunAction := models.EmitProgressFor(\n\t\t\t\t\t\t&internalRunAction,\n\t\t\t\t\t\t\"Staging...\",\n\t\t\t\t\t\t\"Staging Complete\",\n\t\t\t\t\t\t\"Staging Failed\",\n\t\t\t\t\t)\n\n\t\t\t\t\tExpect(actions[1].GetEmitProgressAction()).To(Equal(expectedRunAction))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"and credentials are provided\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tloginServer = \"http:\/\/loginServer.com\"\n\t\t\t\t\tuser = \"user\"\n\t\t\t\t\tpassword = \"password\"\n\t\t\t\t\temail = \"email@example.com\"\n\t\t\t\t})\n\n\t\t\t\tIt(\"includes credentials flags\", func() {\n\t\t\t\t\ttaskDef, _, _, err := dockerBackend.BuildRecipe(stagingGuid, stagingRequest)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\tExpect(taskDef.Privileged).To(BeTrue())\n\t\t\t\t\tExpect(taskDef.Action).NotTo(BeNil())\n\t\t\t\t\tExpect(taskDef.EgressRules).To(Equal(expectedEgressRules))\n\n\t\t\t\t\tactions := actionsFromTaskDef(taskDef)\n\t\t\t\t\tExpect(actions).To(HaveLen(2))\n\t\t\t\t\tExpect(actions[0].GetEmitProgressAction()).To(Equal(dockerDownloadAction))\n\t\t\t\t\tfileDescriptorLimit := uint64(512)\n\t\t\t\t\tinternalRunAction := models.RunAction{\n\t\t\t\t\t\tPath: \"\/tmp\/docker_app_lifecycle\/builder\",\n\t\t\t\t\t\tArgs: []string{\n\t\t\t\t\t\t\t\"-outputMetadataJSONFilename\", \"\/tmp\/docker-result\/result.json\",\n\t\t\t\t\t\t\t\"-dockerRef\", \"busybox\",\n\t\t\t\t\t\t\t\"-cacheDockerImage\",\n\t\t\t\t\t\t\t\"-dockerRegistryHost\", dockerRegistryHost,\n\t\t\t\t\t\t\t\"-dockerRegistryPort\", fmt.Sprintf(\"%d\", dockerRegistryPort),\n\t\t\t\t\t\t\t\"-dockerRegistryIPs\", strings.Join(dockerRegistryIPs, \",\"),\n\t\t\t\t\t\t\t\"-dockerLoginServer\", loginServer,\n\t\t\t\t\t\t\t\"-dockerUser\", user,\n\t\t\t\t\t\t\t\"-dockerPassword\", password,\n\t\t\t\t\t\t\t\"-dockerEmail\", email,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tEnv: []*models.EnvironmentVariable{\n\t\t\t\t\t\t\t&models.EnvironmentVariable{Name: \"DIEGO_DOCKER_CACHE\", Value: \"true\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceLimits: &models.ResourceLimits{\n\t\t\t\t\t\t\tNofile: &fileDescriptorLimit,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tUser: \"root\",\n\t\t\t\t\t}\n\t\t\t\t\texpectedRunAction := models.EmitProgressFor(\n\t\t\t\t\t\t&internalRunAction,\n\t\t\t\t\t\t\"Staging...\",\n\t\t\t\t\t\t\"Staging Complete\",\n\t\t\t\t\t\t\"Staging Failed\",\n\t\t\t\t\t)\n\t\t\t\t\tExpect(actions[1].GetEmitProgressAction()).To(Equal(expectedRunAction))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when Docker Registry is not running\", func() {\n\t\tvar (\n\t\t\tdocker backend.Backend\n\t\t\tstagingRequest cc_messages.StagingRequestFromCC\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tdocker = setupDockerBackend(true, \"[]\")\n\t\t})\n\n\t\tContext(\"and user opted-in for docker image caching\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tstagingRequest = setupStagingRequest(true)\n\t\t\t})\n\n\t\t\tIt(\"errors\", func() {\n\t\t\t\t_, _, _, err := docker.BuildRecipe(stagingGuid, stagingRequest)\n\t\t\t\tExpect(err).To(HaveOccurred())\n\t\t\t\tExpect(err).To(Equal(backend.ErrMissingDockerRegistry))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"and user did not opt-in for docker image caching\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\tstagingRequest = setupStagingRequest(false)\n\t\t\t})\n\n\t\t\tIt(\"does not error\", func() {\n\t\t\t\t_, _, _, err := docker.BuildRecipe(stagingGuid, stagingRequest)\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Albert Nigmatzianov. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage tracksprocessor\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\n\t\"github.com\/bogem\/id3v2\"\n\t\"github.com\/bogem\/nehm\/applescript\"\n\t\"github.com\/bogem\/nehm\/config\"\n\t\"github.com\/bogem\/nehm\/track\"\n\t\"github.com\/bogem\/nehm\/ui\"\n)\n\ntype TracksProcessor struct {\n\tDownloadFolder string \/\/ In this folder tracks will be downloaded\n\tItunesPlaylist string \/\/ In this playlist tracks will be added\n}\n\nfunc NewConfiguredTracksProcessor() *TracksProcessor {\n\treturn &TracksProcessor{\n\t\tDownloadFolder: config.Get(\"dlFolder\"),\n\t\tItunesPlaylist: config.Get(\"itunesPlaylist\"),\n\t}\n}\n\nfunc (tp TracksProcessor) ProcessAll(tracks []track.Track) {\n\tif len(tracks) == 0 {\n\t\tui.Term(\"there are no tracks to download\", nil)\n\t}\n\n\tvar errors []string\n\t\/\/ Start with last track\n\tfor i := len(tracks) - 1; i >= 0; i-- {\n\t\ttrack := tracks[i]\n\t\tif err := tp.Process(track); err != nil {\n\t\t\terrors = append(errors, track.Fullname()+\": \"+err.Error())\n\n\t\t\tui.Error(\"there was an error while downloading \"+track.Fullname(), err)\n\t\t\tui.Newline()\n\t\t\tcontinue\n\t\t}\n\t\tui.Newline()\n\t}\n\n\tif len(errors) > 0 {\n\t\tui.Println(ui.RedString(\"There were errors while downloading tracks:\"))\n\t\tfor _, errText := range errors {\n\t\t\tui.Println(ui.RedString(\" \" + errText))\n\t\t}\n\t\tui.Newline()\n\t}\n\n\tui.Success(\"Done!\")\n\tui.Quit()\n}\n\nfunc (tp TracksProcessor) Process(t track.Track) error {\n\t\/\/ Download track\n\ttrackPath := filepath.Join(tp.DownloadFolder, t.Filename())\n\tif _, err := os.Create(trackPath); err != nil {\n\t\treturn fmt.Errorf(\"couldn't create track file: %v\", err)\n\t}\n\tif err := downloadTrack(t, trackPath); err != nil {\n\t\treturn fmt.Errorf(\"couldn't download track: %v\", err)\n\t}\n\n\t\/\/ Download artwork\n\tartworkFile, err := ioutil.TempFile(\"\", \"\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't create artwork file: %v\", err)\n\t}\n\tartworkPath := artworkFile.Name()\n\tif err := downloadArtwork(t, artworkPath); err != nil {\n\t\treturn fmt.Errorf(\"couldn't download artwork file: %v\", err)\n\t}\n\n\t\/\/ Tag track\n\tif err := tag(t, trackPath, artworkFile); err != nil {\n\t\treturn fmt.Errorf(\"coudln't tag file: %v\", err)\n\t}\n\n\t\/\/ Delete artwork\n\tif err := artworkFile.Close(); err != nil {\n\t\treturn fmt.Errorf(\"couldn't close artwork file: %v\", err)\n\t}\n\tif err := os.Remove(artworkPath); err != nil {\n\t\treturn fmt.Errorf(\"couldn't remove artwork file: %v\", err)\n\t}\n\n\t\/\/ Add to iTunes\n\tif tp.ItunesPlaylist != \"\" {\n\t\tui.Println(\"Adding to iTunes\")\n\t\tif err := applescript.AddTrackToPlaylist(trackPath, tp.ItunesPlaylist); err != nil {\n\t\t\treturn fmt.Errorf(\"couldn't add track to playlist: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc downloadTrack(t track.Track, path string) error {\n\tui.Println(\"Downloading \" + t.Artist() + \" - \" + t.Title())\n\treturn runDownloadCmd(path, t.URL())\n}\n\nfunc downloadArtwork(t track.Track, path string) error {\n\tui.Println(\"Downloading artwork\")\n\treturn runDownloadCmd(path, t.ArtworkURL())\n}\n\nfunc runDownloadCmd(path, url string) error {\n\tcmd := exec.Command(\"curl\", \"-#\", \"-o\", path, \"-L\", url)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\nfunc tag(t track.Track, trackPath string, artwork io.Reader) error {\n\ttag, err := id3v2.Open(trackPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tag.Close()\n\n\ttag.SetArtist(t.Artist())\n\ttag.SetTitle(t.Title())\n\ttag.SetYear(t.Year())\n\n\tpic := id3v2.PictureFrame{\n\t\tEncoding: id3v2.ENUTF8,\n\t\tMimeType: \"image\/jpeg\",\n\t\tPictureType: id3v2.PTFrontCover,\n\t\tPicture: artwork,\n\t}\n\ttag.AddAttachedPicture(pic)\n\n\treturn tag.Save()\n}\n<commit_msg>Delete redundant code in tracksprocessor<commit_after>\/\/ Copyright 2016 Albert Nigmatzianov. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage tracksprocessor\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\n\t\"github.com\/bogem\/id3v2\"\n\t\"github.com\/bogem\/nehm\/applescript\"\n\t\"github.com\/bogem\/nehm\/config\"\n\t\"github.com\/bogem\/nehm\/track\"\n\t\"github.com\/bogem\/nehm\/ui\"\n)\n\ntype TracksProcessor struct {\n\tDownloadFolder string \/\/ In this folder tracks will be downloaded\n\tItunesPlaylist string \/\/ In this playlist tracks will be added\n}\n\nfunc NewConfiguredTracksProcessor() *TracksProcessor {\n\treturn &TracksProcessor{\n\t\tDownloadFolder: config.Get(\"dlFolder\"),\n\t\tItunesPlaylist: config.Get(\"itunesPlaylist\"),\n\t}\n}\n\nfunc (tp TracksProcessor) ProcessAll(tracks []track.Track) {\n\tif len(tracks) == 0 {\n\t\tui.Term(\"there are no tracks to download\", nil)\n\t}\n\n\tvar errors []string\n\t\/\/ Start with last track\n\tfor i := len(tracks) - 1; i >= 0; i-- {\n\t\ttrack := tracks[i]\n\t\tif err := tp.Process(track); err != nil {\n\t\t\terrors = append(errors, track.Fullname()+\": \"+err.Error())\n\t\t\tui.Error(\"there was an error while downloading \"+track.Fullname(), err)\n\t\t}\n\t\tui.Newline()\n\t}\n\n\tif len(errors) > 0 {\n\t\tui.Println(ui.RedString(\"There were errors while downloading tracks:\"))\n\t\tfor _, errText := range errors {\n\t\t\tui.Println(ui.RedString(\" \" + errText))\n\t\t}\n\t\tui.Newline()\n\t}\n\n\tui.Success(\"Done!\")\n\tui.Quit()\n}\n\nfunc (tp TracksProcessor) Process(t track.Track) error {\n\t\/\/ Download track\n\ttrackPath := filepath.Join(tp.DownloadFolder, t.Filename())\n\tif _, err := os.Create(trackPath); err != nil {\n\t\treturn fmt.Errorf(\"couldn't create track file: %v\", err)\n\t}\n\tif err := downloadTrack(t, trackPath); err != nil {\n\t\treturn fmt.Errorf(\"couldn't download track: %v\", err)\n\t}\n\n\t\/\/ Download artwork\n\tartworkFile, err := ioutil.TempFile(\"\", \"\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't create artwork file: %v\", err)\n\t}\n\tartworkPath := artworkFile.Name()\n\tif err := downloadArtwork(t, artworkPath); err != nil {\n\t\treturn fmt.Errorf(\"couldn't download artwork file: %v\", err)\n\t}\n\n\t\/\/ Tag track\n\tif err := tag(t, trackPath, artworkFile); err != nil {\n\t\treturn fmt.Errorf(\"coudln't tag file: %v\", err)\n\t}\n\n\t\/\/ Delete artwork\n\tif err := artworkFile.Close(); err != nil {\n\t\treturn fmt.Errorf(\"couldn't close artwork file: %v\", err)\n\t}\n\tif err := os.Remove(artworkPath); err != nil {\n\t\treturn fmt.Errorf(\"couldn't remove artwork file: %v\", err)\n\t}\n\n\t\/\/ Add to iTunes\n\tif tp.ItunesPlaylist != \"\" {\n\t\tui.Println(\"Adding to iTunes\")\n\t\tif err := applescript.AddTrackToPlaylist(trackPath, tp.ItunesPlaylist); err != nil {\n\t\t\treturn fmt.Errorf(\"couldn't add track to playlist: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc downloadTrack(t track.Track, path string) error {\n\tui.Println(\"Downloading \" + t.Artist() + \" - \" + t.Title())\n\treturn runDownloadCmd(path, t.URL())\n}\n\nfunc downloadArtwork(t track.Track, path string) error {\n\tui.Println(\"Downloading artwork\")\n\treturn runDownloadCmd(path, t.ArtworkURL())\n}\n\nfunc runDownloadCmd(path, url string) error {\n\tcmd := exec.Command(\"curl\", \"-#\", \"-o\", path, \"-L\", url)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\nfunc tag(t track.Track, trackPath string, artwork io.Reader) error {\n\ttag, err := id3v2.Open(trackPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tag.Close()\n\n\ttag.SetArtist(t.Artist())\n\ttag.SetTitle(t.Title())\n\ttag.SetYear(t.Year())\n\n\tpic := id3v2.PictureFrame{\n\t\tEncoding: id3v2.ENUTF8,\n\t\tMimeType: \"image\/jpeg\",\n\t\tPictureType: id3v2.PTFrontCover,\n\t\tPicture: artwork,\n\t}\n\ttag.AddAttachedPicture(pic)\n\n\treturn tag.Save()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package authproxy implements a connector which relies on external\n\/\/ authentication (e.g. mod_auth in Apache2) and returns an identity with the\n\/\/ HTTP header X-Remote-User as verified email.\npackage authproxy\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\n\t\"github.com\/dexidp\/dex\/connector\"\n\t\"github.com\/dexidp\/dex\/pkg\/log\"\n)\n\n\/\/ Config holds the configuration parameters for a connector which returns an\n\/\/ identity with the HTTP header X-Remote-User as verified email.\ntype Config struct {\n\tHeaderName string `json:\"headerName\"`\n}\n\n\/\/ Open returns an authentication strategy which requires no user interaction.\nfunc (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) {\n\tif c.HeaderName == \"\" {\n\t\tc.HeaderName = \"X-Remote-User\"\n\t}\n\treturn &callback{headerName: c.HeaderName, logger: logger, pathSuffix: \"\/\" + id}, nil\n}\n\n\/\/ Callback is a connector which returns an identity with the HTTP header\n\/\/ X-Remote-User as verified email.\ntype callback struct {\n\theaderName string\n\tlogger log.Logger\n\tpathSuffix string\n}\n\n\/\/ LoginURL returns the URL to redirect the user to login with.\nfunc (m *callback) LoginURL(s connector.Scopes, callbackURL, state string) (string, error) {\n\tu, err := url.Parse(callbackURL)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse callbackURL %q: %v\", callbackURL, err)\n\t}\n\tu.Path += m.pathSuffix\n\tv := u.Query()\n\tv.Set(\"state\", state)\n\tu.RawQuery = v.Encode()\n\treturn u.String(), nil\n}\n\n\/\/ HandleCallback parses the request and returns the user's identity\nfunc (m *callback) HandleCallback(s connector.Scopes, r *http.Request) (connector.Identity, error) {\n\tremoteUser := r.Header.Get(m.headerName)\n\tif remoteUser == \"\" {\n\t\treturn connector.Identity{}, fmt.Errorf(\"required HTTP header %s is not set\", m.headerName)\n\t}\n\t\/\/ TODO: add support for X-Remote-Group, see\n\t\/\/ https:\/\/kubernetes.io\/docs\/admin\/authentication\/#authenticating-proxy\n\treturn connector.Identity{\n\t\tUserID: remoteUser, \/\/ TODO: figure out if this is a bad ID value.\n\t\tEmail: remoteUser,\n\t\tEmailVerified: true,\n\t}, nil\n}\n<commit_msg>Allow configuration of groups for authproxy<commit_after>\/\/ Package authproxy implements a connector which relies on external\n\/\/ authentication (e.g. mod_auth in Apache2) and returns an identity with the\n\/\/ HTTP header X-Remote-User as verified email.\npackage authproxy\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\n\t\"github.com\/dexidp\/dex\/connector\"\n\t\"github.com\/dexidp\/dex\/pkg\/log\"\n)\n\n\/\/ Config holds the configuration parameters for a connector which returns an\n\/\/ identity with the HTTP header X-Remote-User as verified email.\ntype Config struct {\n\tHeaderName string `json:\"headerName\"`\n\tGroups []string `json:\"groups\"`\n}\n\n\/\/ Open returns an authentication strategy which requires no user interaction.\nfunc (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) {\n\tif c.HeaderName == \"\" {\n\t\tc.HeaderName = \"X-Remote-User\"\n\t}\n\treturn &callback{headerName: c.HeaderName, logger: logger, pathSuffix: \"\/\" + id, groups: c.Groups}, nil\n}\n\n\/\/ Callback is a connector which returns an identity with the HTTP header\n\/\/ X-Remote-User as verified email.\ntype callback struct {\n\theaderName string\n\tgroups []string\n\tlogger log.Logger\n\tpathSuffix string\n}\n\n\/\/ LoginURL returns the URL to redirect the user to login with.\nfunc (m *callback) LoginURL(s connector.Scopes, callbackURL, state string) (string, error) {\n\tu, err := url.Parse(callbackURL)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse callbackURL %q: %v\", callbackURL, err)\n\t}\n\tu.Path += m.pathSuffix\n\tv := u.Query()\n\tv.Set(\"state\", state)\n\tu.RawQuery = v.Encode()\n\treturn u.String(), nil\n}\n\n\/\/ HandleCallback parses the request and returns the user's identity\nfunc (m *callback) HandleCallback(s connector.Scopes, r *http.Request) (connector.Identity, error) {\n\tremoteUser := r.Header.Get(m.headerName)\n\tif remoteUser == \"\" {\n\t\treturn connector.Identity{}, fmt.Errorf(\"required HTTP header %s is not set\", m.headerName)\n\t}\n\t\/\/ TODO: add support for X-Remote-Group, see\n\t\/\/ https:\/\/kubernetes.io\/docs\/admin\/authentication\/#authenticating-proxy\n\treturn connector.Identity{\n\t\tUserID: remoteUser, \/\/ TODO: figure out if this is a bad ID value.\n\t\tEmail: remoteUser,\n\t\tEmailVerified: true,\n\t\tGroups: m.groups,\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage flex\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\t\"testing\"\n\n\t\"golang.org\/x\/exp\/shiny\/unit\"\n\t\"golang.org\/x\/exp\/shiny\/widget\"\n\t\"golang.org\/x\/exp\/shiny\/widget\/node\"\n)\n\ntype layoutTest struct {\n\tdirection Direction\n\twrap FlexWrap\n\talignContent AlignContent\n\tsize image.Point \/\/ size of container\n\tmeasured [][2]float64 \/\/ MeasuredSize of child elements\n\tlayoutData []LayoutData \/\/ LayoutData of child elements\n\twant []image.Rectangle \/\/ final Rect of child elements\n}\n\nvar colors = []color.RGBA{\n\t{0x00, 0x7f, 0x7f, 0xff}, \/\/ Cyan\n\t{0x7f, 0x00, 0x7f, 0xff}, \/\/ Magenta\n\t{0x7f, 0x7f, 0x00, 0xff}, \/\/ Yellow\n\t{0xff, 0x00, 0x00, 0xff}, \/\/ Red\n\t{0x00, 0xff, 0x00, 0xff}, \/\/ Green\n\t{0x00, 0x00, 0xff, 0xff}, \/\/ Blue\n}\n\nvar layoutTests = []layoutTest{{\n\tsize: image.Point{100, 100},\n\tmeasured: [][2]float64{{100, 100}},\n\twant: []image.Rectangle{\n\t\timage.Rect(0, 0, 100, 100),\n\t},\n}}\n\nfunc TestLayout(t *testing.T) {\n\tfor testNum, test := range layoutTests {\n\t\tw := NewFlex()\n\t\tw.Direction = test.direction\n\t\tw.Wrap = test.wrap\n\t\tw.AlignContent = test.alignContent\n\n\t\tvar children []node.Node\n\t\tfor i, sz := range test.measured {\n\t\t\tn := widget.NewUniform(colors[i], unit.Pixels(sz[0]), unit.Pixels(sz[1]))\n\t\t\tif test.layoutData != nil {\n\t\t\t\tn.LayoutData = test.layoutData[i]\n\t\t\t}\n\t\t\tw.AppendChild(n)\n\t\t\tchildren = append(children, n)\n\t\t}\n\n\t\tw.Measure(nil)\n\t\tw.Rect = image.Rectangle{Max: test.size}\n\t\tw.Layout(nil)\n\n\t\tbad := false\n\t\tfor i, n := range children {\n\t\t\tif n.Wrappee().Rect != test.want[i] {\n\t\t\t\tbad = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif bad {\n\t\t\tt.Logf(\"Bad testNum %d\", testNum)\n\t\t\t\/\/ TODO print html so we can see the correct layout\n\t\t}\n\t\tfor i, n := range children {\n\t\t\tif got, want := n.Wrappee().Rect, test.want[i]; got != want {\n\t\t\t\tt.Errorf(\"[%d].Rect=%v, want %v\", i, got, want)\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>shiny\/widget\/flex: add HTML printing of tests<commit_after>\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage flex\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"image\"\n\t\"image\/color\"\n\t\"testing\"\n\n\t\"golang.org\/x\/exp\/shiny\/unit\"\n\t\"golang.org\/x\/exp\/shiny\/widget\"\n\t\"golang.org\/x\/exp\/shiny\/widget\/node\"\n)\n\ntype layoutTest struct {\n\tdirection Direction\n\twrap FlexWrap\n\talignContent AlignContent\n\tsize image.Point \/\/ size of container\n\tmeasured [][2]float64 \/\/ MeasuredSize of child elements\n\tlayoutData []LayoutData \/\/ LayoutData of child elements\n\twant []image.Rectangle \/\/ final Rect of child elements\n}\n\nfunc (t *layoutTest) html() string {\n\tbuf := new(bytes.Buffer)\n\tfmt.Fprintf(buf, `<style>\n#container {\n\tdisplay: flex;\n\twidth: %dpx;\n\theight: %dpx;\n`, t.size.X, t.size.Y)\n\n\tswitch t.direction {\n\tcase Row:\n\tcase RowReverse:\n\t\tfmt.Fprintf(buf, \"\\tflex-direction: row-reverse;\\n\")\n\tcase Column:\n\t\tfmt.Fprintf(buf, \"\\tflex-direction: column;\\n\")\n\tcase ColumnReverse:\n\t\tfmt.Fprintf(buf, \"\\tflex-direction: column-reverse;\\n\")\n\t}\n\tswitch t.wrap {\n\tcase NoWrap:\n\tcase Wrap:\n\t\tfmt.Fprintf(buf, \"\\tflex-wrap: wrap;\\n\")\n\tcase WrapReverse:\n\t\tfmt.Fprintf(buf, \"\\tflex-wrap: wrap-reverse;\\n\")\n\t}\n\tswitch t.alignContent {\n\tcase AlignContentStart:\n\tcase AlignContentEnd:\n\t\tfmt.Fprintf(buf, \"\\talign-content: flex-end;\\n\")\n\tcase AlignContentCenter:\n\t\tfmt.Fprintf(buf, \"\\talign-content: center;\\n\")\n\tcase AlignContentSpaceBetween:\n\t\tfmt.Fprintf(buf, \"\\talign-content: space-between;\\n\")\n\tcase AlignContentSpaceAround:\n\t\tfmt.Fprintf(buf, \"\\talign-content: space-around;\\n\")\n\tcase AlignContentStretch:\n\t\tfmt.Fprintf(buf, \"\\talign-content: stretch;\\n\")\n\t}\n\tfmt.Fprintf(buf, \"}\\n\")\n\n\tfor i, m := range t.measured {\n\t\tfmt.Fprintf(buf, `#child%d {\n\twidth: %.2fpx;\n\theight: %.2fpx;\n`, i, m[0], m[1])\n\t\tc := colors[i%len(colors)]\n\t\tfmt.Fprintf(buf, \"\\tbackground-color: rgb(%d, %d, %d);\\n\", c.R, c.G, c.B)\n\t\tif t.layoutData != nil {\n\t\t\td := t.layoutData[i]\n\t\t\tif d.MinSize.X != 0 {\n\t\t\t\tfmt.Fprintf(buf, \"\\tmin-width: %dpx;\\n\", d.MinSize.X)\n\t\t\t}\n\t\t\tif d.MinSize.Y != 0 {\n\t\t\t\tfmt.Fprintf(buf, \"\\tmin-height: %dpx;\\n\", d.MinSize.Y)\n\t\t\t}\n\t\t\tif d.MaxSize != nil {\n\t\t\t\tfmt.Fprintf(buf, \"\\tmax-width: %dpx;\\n\", d.MaxSize.X)\n\t\t\t\tfmt.Fprintf(buf, \"\\tmax-height: %dpx;\\n\", d.MaxSize.Y)\n\t\t\t}\n\t\t\tif d.Grow != 0 {\n\t\t\t\tfmt.Fprintf(buf, \"\\tflex-grow: %f;\\n\", d.Grow)\n\t\t\t}\n\t\t\tif d.Shrink != nil {\n\t\t\t\tfmt.Fprintf(buf, \"\\tflex-shrink: %f;\\n\", *d.Shrink)\n\t\t\t}\n\t\t\t\/\/ TODO: Basis, Align, BreakAfter\n\t\t}\n\t\tfmt.Fprintf(buf, \"}\\n\")\n\t}\n\tfmt.Fprintf(buf, `<\/style>\n<div id=\"container\">\n`)\n\tfor i := range t.measured {\n\t\tfmt.Fprintf(buf, \"\\t<div id=\\\"child%d\\\"><\/div>\\n\", i)\n\t}\n\tfmt.Fprintf(buf, `<\/div>\n<pre id=\"out\"><\/pre>\n<script>\nvar out = document.getElementById(\"out\");\nvar container = document.getElementById(\"container\");\nfor (var i = 0; i < container.children.length; i++) {\n\tvar c = container.children[i];\n\tvar ctop = c.offsetTop - container.offsetTop;\n\tvar cleft = c.offsetLeft - container.offsetLeft;\n\tvar cbottom = ctop + c.offsetHeight;\n\tvar cright = cleft + c.offsetWidth;\n\n\tout.innerHTML += \"\\timage.Rect(\" + cleft + \", \" + ctop + \", \" + cright + \", \" + cbottom + \"),\\n\";\n}\n<\/script>\n`)\n\n\treturn buf.String()\n}\n\nvar colors = []color.RGBA{\n\t{0x00, 0x7f, 0x7f, 0xff}, \/\/ Cyan\n\t{0x7f, 0x00, 0x7f, 0xff}, \/\/ Magenta\n\t{0x7f, 0x7f, 0x00, 0xff}, \/\/ Yellow\n\t{0xff, 0x00, 0x00, 0xff}, \/\/ Red\n\t{0x00, 0xff, 0x00, 0xff}, \/\/ Green\n\t{0x00, 0x00, 0xff, 0xff}, \/\/ Blue\n}\n\nvar layoutTests = []layoutTest{{\n\tsize: image.Point{100, 100},\n\tmeasured: [][2]float64{{100, 100}},\n\twant: []image.Rectangle{\n\t\timage.Rect(0, 0, 100, 100),\n\t},\n}}\n\nfunc TestLayout(t *testing.T) {\n\tfor testNum, test := range layoutTests {\n\t\tw := NewFlex()\n\t\tw.Direction = test.direction\n\t\tw.Wrap = test.wrap\n\t\tw.AlignContent = test.alignContent\n\n\t\tvar children []node.Node\n\t\tfor i, sz := range test.measured {\n\t\t\tn := widget.NewUniform(colors[i], unit.Pixels(sz[0]), unit.Pixels(sz[1]))\n\t\t\tif test.layoutData != nil {\n\t\t\t\tn.LayoutData = test.layoutData[i]\n\t\t\t}\n\t\t\tw.AppendChild(n)\n\t\t\tchildren = append(children, n)\n\t\t}\n\n\t\tw.Measure(nil)\n\t\tw.Rect = image.Rectangle{Max: test.size}\n\t\tw.Layout(nil)\n\n\t\tbad := false\n\t\tfor i, n := range children {\n\t\t\tif n.Wrappee().Rect != test.want[i] {\n\t\t\t\tbad = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif bad {\n\t\t\tt.Logf(\"Bad testNum %d:\\n%s\", testNum, test.html())\n\t\t}\n\t\tfor i, n := range children {\n\t\t\tif got, want := n.Wrappee().Rect, test.want[i]; got != want {\n\t\t\t\tt.Errorf(\"[%d].Rect=%v, want %v\", i, got, want)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package googlecompute\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"time\"\n\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/common\/uuid\"\n\t\"github.com\/mitchellh\/packer\/helper\/communicator\"\n\t\"github.com\/mitchellh\/packer\/helper\/config\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"github.com\/mitchellh\/packer\/template\/interpolate\"\n)\n\n\/\/ Config is the configuration structure for the GCE builder. It stores\n\/\/ both the publicly settable state as well as the privately generated\n\/\/ state of the config object.\ntype Config struct {\n\tcommon.PackerConfig `mapstructure:\",squash\"`\n\tComm communicator.Config `mapstructure:\",squash\"`\n\n\tAccountFile string `mapstructure:\"account_file\"`\n\tProjectId string `mapstructure:\"project_id\"`\n\n\tDiskName string `mapstructure:\"disk_name\"`\n\tDiskSizeGb int64 `mapstructure:\"disk_size\"`\n\tDiskType string `mapstructure:\"disk_type\"`\n\tImageName string `mapstructure:\"image_name\"`\n\tImageDescription string `mapstructure:\"image_description\"`\n\tImageFamily string `mapstructure:\"image_family\"`\n\tInstanceName string `mapstructure:\"instance_name\"`\n\tMachineType string `mapstructure:\"machine_type\"`\n\tMetadata map[string]string `mapstructure:\"metadata\"`\n\tNetwork string `mapstructure:\"network\"`\n\tSubnetwork string `mapstructure:\"subnetwork\"`\n\tAddress string `mapstructure:\"address\"`\n\tPreemptible bool `mapstructure:\"preemptible\"`\n\tSourceImage string `mapstructure:\"source_image\"`\n\tSourceImageProjectId string `mapstructure:\"source_image_project_id\"`\n\tRawStateTimeout string `mapstructure:\"state_timeout\"`\n\tTags []string `mapstructure:\"tags\"`\n\tUseInternalIP bool `mapstructure:\"use_internal_ip\"`\n\tRegion string `mapstructure:\"region\"`\n\tZone string `mapstructure:\"zone\"`\n\n\taccount accountFile\n\tprivateKeyBytes []byte\n\tstateTimeout time.Duration\n\tctx interpolate.Context\n}\n\nfunc NewConfig(raws ...interface{}) (*Config, []string, error) {\n\tc := new(Config)\n\terr := config.Decode(c, &config.DecodeOpts{\n\t\tInterpolate: true,\n\t\tInterpolateContext: &c.ctx,\n\t\tInterpolateFilter: &interpolate.RenderFilter{\n\t\t\tExclude: []string{\n\t\t\t\t\"run_command\",\n\t\t\t},\n\t\t},\n\t}, raws...)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar errs *packer.MultiError\n\n\t\/\/ Set defaults.\n\tif c.Network == \"\" {\n\t\tc.Network = \"default\"\n\t}\n\n\tif c.DiskSizeGb == 0 {\n\t\tc.DiskSizeGb = 10\n\t}\n\n\tif c.DiskType == \"\" {\n\t\tc.DiskType = \"pd-standard\"\n\t}\n\n\tif c.ImageDescription == \"\" {\n\t\tc.ImageDescription = \"Created by Packer\"\n\t}\n\n\tif c.ImageName == \"\" {\n\t\timg, err := interpolate.Render(\"packer-{{timestamp}}\", nil)\n\t\tif err != nil {\n\t\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\t\tfmt.Errorf(\"Unable to parse image name: %s \", err))\n\t\t} else {\n\t\t\tc.ImageName = img\n\t\t}\n\t}\n\n\tif len(c.ImageFamily) > 63 {\n\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\terrors.New(\"Invalid image family: Must not be longer than 63 characters\"))\n\t}\n\n\tif c.ImageFamily != \"\" {\n\t\tif !regexp.MustCompile(`^[a-z]([-a-z0-9]{0,61}[a-z0-9])?$`).MatchString(c.ImageFamily) {\n\t\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\t\terrors.New(\"Invalid image family: The first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash\"))\n\t\t}\n\n\t}\n\n\tif c.InstanceName == \"\" {\n\t\tc.InstanceName = fmt.Sprintf(\"packer-%s\", uuid.TimeOrderedUUID())\n\t}\n\n\tif c.DiskName == \"\" {\n\t\tc.DiskName = c.InstanceName\n\t}\n\n\tif c.MachineType == \"\" {\n\t\tc.MachineType = \"n1-standard-1\"\n\t}\n\n\tif c.RawStateTimeout == \"\" {\n\t\tc.RawStateTimeout = \"5m\"\n\t}\n\n\tif c.Comm.SSHUsername == \"\" {\n\t\tc.Comm.SSHUsername = \"root\"\n\t}\n\n\tif es := c.Comm.Prepare(&c.ctx); len(es) > 0 {\n\t\terrs = packer.MultiErrorAppend(errs, es...)\n\t}\n\n\t\/\/ Process required parameters.\n\tif c.ProjectId == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a project_id must be specified\"))\n\t}\n\n\tif c.SourceImage == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a source_image must be specified\"))\n\t}\n\n\tif c.Zone == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a zone must be specified\"))\n\t}\n\tif c.Region == \"\" && len(c.Zone) > 2 {\n\t\t\/\/ get region from Zone\n\t\tregion := c.Zone[:len(c.Zone)-2]\n\t\tc.Region = region\n\t}\n\n\tstateTimeout, err := time.ParseDuration(c.RawStateTimeout)\n\tif err != nil {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, fmt.Errorf(\"Failed parsing state_timeout: %s\", err))\n\t}\n\tc.stateTimeout = stateTimeout\n\n\tif c.AccountFile != \"\" {\n\t\tif err := processAccountFile(&c.account, c.AccountFile); err != nil {\n\t\t\terrs = packer.MultiErrorAppend(errs, err)\n\t\t}\n\t}\n\n\t\/\/ Check for any errors.\n\tif errs != nil && len(errs.Errors) > 0 {\n\t\treturn nil, nil, errs\n\t}\n\n\treturn c, nil, nil\n}\n<commit_msg>Move regexp to package scope so it will be validated by the compiler instead of at runtime<commit_after>package googlecompute\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"time\"\n\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/common\/uuid\"\n\t\"github.com\/mitchellh\/packer\/helper\/communicator\"\n\t\"github.com\/mitchellh\/packer\/helper\/config\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"github.com\/mitchellh\/packer\/template\/interpolate\"\n)\n\nvar reImageFamily = regexp.MustCompile(`^[a-z]([-a-z0-9]{0,61}[a-z0-9])?$`)\n\n\/\/ Config is the configuration structure for the GCE builder. It stores\n\/\/ both the publicly settable state as well as the privately generated\n\/\/ state of the config object.\ntype Config struct {\n\tcommon.PackerConfig `mapstructure:\",squash\"`\n\tComm communicator.Config `mapstructure:\",squash\"`\n\n\tAccountFile string `mapstructure:\"account_file\"`\n\tProjectId string `mapstructure:\"project_id\"`\n\n\tDiskName string `mapstructure:\"disk_name\"`\n\tDiskSizeGb int64 `mapstructure:\"disk_size\"`\n\tDiskType string `mapstructure:\"disk_type\"`\n\tImageName string `mapstructure:\"image_name\"`\n\tImageDescription string `mapstructure:\"image_description\"`\n\tImageFamily string `mapstructure:\"image_family\"`\n\tInstanceName string `mapstructure:\"instance_name\"`\n\tMachineType string `mapstructure:\"machine_type\"`\n\tMetadata map[string]string `mapstructure:\"metadata\"`\n\tNetwork string `mapstructure:\"network\"`\n\tSubnetwork string `mapstructure:\"subnetwork\"`\n\tAddress string `mapstructure:\"address\"`\n\tPreemptible bool `mapstructure:\"preemptible\"`\n\tSourceImage string `mapstructure:\"source_image\"`\n\tSourceImageProjectId string `mapstructure:\"source_image_project_id\"`\n\tRawStateTimeout string `mapstructure:\"state_timeout\"`\n\tTags []string `mapstructure:\"tags\"`\n\tUseInternalIP bool `mapstructure:\"use_internal_ip\"`\n\tRegion string `mapstructure:\"region\"`\n\tZone string `mapstructure:\"zone\"`\n\n\taccount accountFile\n\tprivateKeyBytes []byte\n\tstateTimeout time.Duration\n\tctx interpolate.Context\n}\n\nfunc NewConfig(raws ...interface{}) (*Config, []string, error) {\n\tc := new(Config)\n\terr := config.Decode(c, &config.DecodeOpts{\n\t\tInterpolate: true,\n\t\tInterpolateContext: &c.ctx,\n\t\tInterpolateFilter: &interpolate.RenderFilter{\n\t\t\tExclude: []string{\n\t\t\t\t\"run_command\",\n\t\t\t},\n\t\t},\n\t}, raws...)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar errs *packer.MultiError\n\n\t\/\/ Set defaults.\n\tif c.Network == \"\" {\n\t\tc.Network = \"default\"\n\t}\n\n\tif c.DiskSizeGb == 0 {\n\t\tc.DiskSizeGb = 10\n\t}\n\n\tif c.DiskType == \"\" {\n\t\tc.DiskType = \"pd-standard\"\n\t}\n\n\tif c.ImageDescription == \"\" {\n\t\tc.ImageDescription = \"Created by Packer\"\n\t}\n\n\tif c.ImageName == \"\" {\n\t\timg, err := interpolate.Render(\"packer-{{timestamp}}\", nil)\n\t\tif err != nil {\n\t\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\t\tfmt.Errorf(\"Unable to parse image name: %s \", err))\n\t\t} else {\n\t\t\tc.ImageName = img\n\t\t}\n\t}\n\n\tif len(c.ImageFamily) > 63 {\n\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\terrors.New(\"Invalid image family: Must not be longer than 63 characters\"))\n\t}\n\n\tif c.ImageFamily != \"\" {\n\t\tif !reImageFamily.MatchString(c.ImageFamily) {\n\t\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\t\terrors.New(\"Invalid image family: The first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash\"))\n\t\t}\n\n\t}\n\n\tif c.InstanceName == \"\" {\n\t\tc.InstanceName = fmt.Sprintf(\"packer-%s\", uuid.TimeOrderedUUID())\n\t}\n\n\tif c.DiskName == \"\" {\n\t\tc.DiskName = c.InstanceName\n\t}\n\n\tif c.MachineType == \"\" {\n\t\tc.MachineType = \"n1-standard-1\"\n\t}\n\n\tif c.RawStateTimeout == \"\" {\n\t\tc.RawStateTimeout = \"5m\"\n\t}\n\n\tif c.Comm.SSHUsername == \"\" {\n\t\tc.Comm.SSHUsername = \"root\"\n\t}\n\n\tif es := c.Comm.Prepare(&c.ctx); len(es) > 0 {\n\t\terrs = packer.MultiErrorAppend(errs, es...)\n\t}\n\n\t\/\/ Process required parameters.\n\tif c.ProjectId == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a project_id must be specified\"))\n\t}\n\n\tif c.SourceImage == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a source_image must be specified\"))\n\t}\n\n\tif c.Zone == \"\" {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, errors.New(\"a zone must be specified\"))\n\t}\n\tif c.Region == \"\" && len(c.Zone) > 2 {\n\t\t\/\/ get region from Zone\n\t\tregion := c.Zone[:len(c.Zone)-2]\n\t\tc.Region = region\n\t}\n\n\tstateTimeout, err := time.ParseDuration(c.RawStateTimeout)\n\tif err != nil {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, fmt.Errorf(\"Failed parsing state_timeout: %s\", err))\n\t}\n\tc.stateTimeout = stateTimeout\n\n\tif c.AccountFile != \"\" {\n\t\tif err := processAccountFile(&c.account, c.AccountFile); err != nil {\n\t\t\terrs = packer.MultiErrorAppend(errs, err)\n\t\t}\n\t}\n\n\t\/\/ Check for any errors.\n\tif errs != nil && len(errs.Errors) > 0 {\n\t\treturn nil, nil, errs\n\t}\n\n\treturn c, nil, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package remotecontext\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/docker\/api\/types\/backend\"\n\t\"github.com\/docker\/docker\/builder\"\n\t\"github.com\/docker\/docker\/builder\/dockerfile\/parser\"\n\t\"github.com\/docker\/docker\/builder\/dockerignore\"\n\t\"github.com\/docker\/docker\/pkg\/fileutils\"\n\t\"github.com\/docker\/docker\/pkg\/httputils\"\n\t\"github.com\/docker\/docker\/pkg\/symlink\"\n\t\"github.com\/docker\/docker\/pkg\/urlutil\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ Detect returns a context and dockerfile from remote location or local\n\/\/ archive. progressReader is only used if remoteURL is actually a URL\n\/\/ (not empty, and not a Git endpoint).\nfunc Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *parser.Result, err error) {\n\tremoteURL := config.Options.RemoteContext\n\tdockerfilePath := config.Options.Dockerfile\n\n\tswitch {\n\tcase remoteURL == \"\":\n\t\tremote, dockerfile, err = newArchiveRemote(config.Source, dockerfilePath)\n\tcase urlutil.IsGitURL(remoteURL):\n\t\tremote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath)\n\tcase urlutil.IsURL(remoteURL):\n\t\tremote, dockerfile, err = newURLRemote(remoteURL, dockerfilePath, config.ProgressWriter.ProgressReaderFunc)\n\tdefault:\n\t\terr = fmt.Errorf(\"remoteURL (%s) could not be recognized as URL\", remoteURL)\n\t}\n\treturn\n}\n\nfunc newArchiveRemote(rc io.ReadCloser, dockerfilePath string) (builder.Source, *parser.Result, error) {\n\tc, err := MakeTarSumContext(rc)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn withDockerfileFromContext(c.(modifiableContext), dockerfilePath)\n}\n\nfunc withDockerfileFromContext(c modifiableContext, dockerfilePath string) (builder.Source, *parser.Result, error) {\n\tdf, err := openAt(c, dockerfilePath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tif dockerfilePath == builder.DefaultDockerfileName {\n\t\t\t\tlowercase := strings.ToLower(dockerfilePath)\n\t\t\t\tif _, err := StatAt(c, lowercase); err == nil {\n\t\t\t\t\treturn withDockerfileFromContext(c, lowercase)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil, nil, errors.Errorf(\"Cannot locate specified Dockerfile: %s\", dockerfilePath) \/\/ backwards compatible error\n\t\t}\n\t\tc.Close()\n\t\treturn nil, nil, err\n\t}\n\n\tres, err := readAndParseDockerfile(dockerfilePath, df)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdf.Close()\n\n\tif err := removeDockerfile(c, dockerfilePath); err != nil {\n\t\tc.Close()\n\t\treturn nil, nil, err\n\t}\n\n\treturn c, res, nil\n}\n\nfunc newGitRemote(gitURL string, dockerfilePath string) (builder.Source, *parser.Result, error) {\n\tc, err := MakeGitContext(gitURL) \/\/ TODO: change this to NewLazyContext\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn withDockerfileFromContext(c.(modifiableContext), dockerfilePath)\n}\n\nfunc newURLRemote(url string, dockerfilePath string, progressReader func(in io.ReadCloser) io.ReadCloser) (builder.Source, *parser.Result, error) {\n\tvar dockerfile io.ReadCloser\n\tdockerfileFoundErr := errors.New(\"found-dockerfile\")\n\tc, err := MakeRemoteContext(url, map[string]func(io.ReadCloser) (io.ReadCloser, error){\n\t\thttputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) {\n\t\t\tdockerfile = rc\n\t\t\treturn nil, dockerfileFoundErr\n\t\t},\n\t\t\/\/ fallback handler (tar context)\n\t\t\"\": func(rc io.ReadCloser) (io.ReadCloser, error) {\n\t\t\treturn progressReader(rc), nil\n\t\t},\n\t})\n\tif err != nil {\n\t\tif err == dockerfileFoundErr {\n\t\t\tres, err := parser.Parse(dockerfile)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\treturn nil, res, nil\n\t\t}\n\t\treturn nil, nil, err\n\t}\n\n\treturn withDockerfileFromContext(c.(modifiableContext), dockerfilePath)\n}\n\nfunc removeDockerfile(c modifiableContext, filesToRemove ...string) error {\n\tf, err := openAt(c, \".dockerignore\")\n\t\/\/ Note that a missing .dockerignore file isn't treated as an error\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\treturn nil\n\tcase err != nil:\n\t\treturn err\n\t}\n\texcludes, err := dockerignore.ReadAll(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.Close()\n\tfilesToRemove = append([]string{\".dockerignore\"}, filesToRemove...)\n\tfor _, fileToRemove := range filesToRemove {\n\t\tif rm, _ := fileutils.Matches(fileToRemove, excludes); rm {\n\t\t\tif err := c.Remove(fileToRemove); err != nil {\n\t\t\t\tlogrus.Errorf(\"failed to remove %s: %v\", fileToRemove, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) {\n\tbr := bufio.NewReader(rc)\n\tif _, err := br.Peek(1); err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn nil, errors.Errorf(\"the Dockerfile (%s) cannot be empty\", name)\n\t\t}\n\t\treturn nil, errors.Wrap(err, \"unexpected error reading Dockerfile\")\n\t}\n\treturn parser.Parse(br)\n}\n\nfunc openAt(remote builder.Source, path string) (*os.File, error) {\n\tfullPath, err := FullPath(remote, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn os.Open(fullPath)\n}\n\n\/\/ StatAt is a helper for calling Stat on a path from a source\nfunc StatAt(remote builder.Source, path string) (os.FileInfo, error) {\n\tfullPath, err := FullPath(remote, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn os.Stat(fullPath)\n}\n\n\/\/ FullPath is a helper for getting a full path for a path from a source\nfunc FullPath(remote builder.Source, path string) (string, error) {\n\tfullPath, err := symlink.FollowSymlinkInScope(filepath.Join(remote.Root(), path), remote.Root())\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Forbidden path outside the build context: %s (%s)\", path, fullPath) \/\/ backwards compat with old error\n\t}\n\treturn fullPath, nil\n}\n<commit_msg>Fix cancelling builder on chunked requests<commit_after>package remotecontext\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/docker\/api\/types\/backend\"\n\t\"github.com\/docker\/docker\/builder\"\n\t\"github.com\/docker\/docker\/builder\/dockerfile\/parser\"\n\t\"github.com\/docker\/docker\/builder\/dockerignore\"\n\t\"github.com\/docker\/docker\/pkg\/fileutils\"\n\t\"github.com\/docker\/docker\/pkg\/httputils\"\n\t\"github.com\/docker\/docker\/pkg\/symlink\"\n\t\"github.com\/docker\/docker\/pkg\/urlutil\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ Detect returns a context and dockerfile from remote location or local\n\/\/ archive. progressReader is only used if remoteURL is actually a URL\n\/\/ (not empty, and not a Git endpoint).\nfunc Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *parser.Result, err error) {\n\tremoteURL := config.Options.RemoteContext\n\tdockerfilePath := config.Options.Dockerfile\n\n\tswitch {\n\tcase remoteURL == \"\":\n\t\tremote, dockerfile, err = newArchiveRemote(config.Source, dockerfilePath)\n\tcase urlutil.IsGitURL(remoteURL):\n\t\tremote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath)\n\tcase urlutil.IsURL(remoteURL):\n\t\tremote, dockerfile, err = newURLRemote(remoteURL, dockerfilePath, config.ProgressWriter.ProgressReaderFunc)\n\tdefault:\n\t\terr = fmt.Errorf(\"remoteURL (%s) could not be recognized as URL\", remoteURL)\n\t}\n\treturn\n}\n\nfunc newArchiveRemote(rc io.ReadCloser, dockerfilePath string) (builder.Source, *parser.Result, error) {\n\tdefer rc.Close()\n\tc, err := MakeTarSumContext(rc)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn withDockerfileFromContext(c.(modifiableContext), dockerfilePath)\n}\n\nfunc withDockerfileFromContext(c modifiableContext, dockerfilePath string) (builder.Source, *parser.Result, error) {\n\tdf, err := openAt(c, dockerfilePath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tif dockerfilePath == builder.DefaultDockerfileName {\n\t\t\t\tlowercase := strings.ToLower(dockerfilePath)\n\t\t\t\tif _, err := StatAt(c, lowercase); err == nil {\n\t\t\t\t\treturn withDockerfileFromContext(c, lowercase)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil, nil, errors.Errorf(\"Cannot locate specified Dockerfile: %s\", dockerfilePath) \/\/ backwards compatible error\n\t\t}\n\t\tc.Close()\n\t\treturn nil, nil, err\n\t}\n\n\tres, err := readAndParseDockerfile(dockerfilePath, df)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdf.Close()\n\n\tif err := removeDockerfile(c, dockerfilePath); err != nil {\n\t\tc.Close()\n\t\treturn nil, nil, err\n\t}\n\n\treturn c, res, nil\n}\n\nfunc newGitRemote(gitURL string, dockerfilePath string) (builder.Source, *parser.Result, error) {\n\tc, err := MakeGitContext(gitURL) \/\/ TODO: change this to NewLazyContext\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn withDockerfileFromContext(c.(modifiableContext), dockerfilePath)\n}\n\nfunc newURLRemote(url string, dockerfilePath string, progressReader func(in io.ReadCloser) io.ReadCloser) (builder.Source, *parser.Result, error) {\n\tvar dockerfile io.ReadCloser\n\tdockerfileFoundErr := errors.New(\"found-dockerfile\")\n\tc, err := MakeRemoteContext(url, map[string]func(io.ReadCloser) (io.ReadCloser, error){\n\t\thttputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) {\n\t\t\tdockerfile = rc\n\t\t\treturn nil, dockerfileFoundErr\n\t\t},\n\t\t\/\/ fallback handler (tar context)\n\t\t\"\": func(rc io.ReadCloser) (io.ReadCloser, error) {\n\t\t\treturn progressReader(rc), nil\n\t\t},\n\t})\n\tif err != nil {\n\t\tif err == dockerfileFoundErr {\n\t\t\tres, err := parser.Parse(dockerfile)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\treturn nil, res, nil\n\t\t}\n\t\treturn nil, nil, err\n\t}\n\n\treturn withDockerfileFromContext(c.(modifiableContext), dockerfilePath)\n}\n\nfunc removeDockerfile(c modifiableContext, filesToRemove ...string) error {\n\tf, err := openAt(c, \".dockerignore\")\n\t\/\/ Note that a missing .dockerignore file isn't treated as an error\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\treturn nil\n\tcase err != nil:\n\t\treturn err\n\t}\n\texcludes, err := dockerignore.ReadAll(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.Close()\n\tfilesToRemove = append([]string{\".dockerignore\"}, filesToRemove...)\n\tfor _, fileToRemove := range filesToRemove {\n\t\tif rm, _ := fileutils.Matches(fileToRemove, excludes); rm {\n\t\t\tif err := c.Remove(fileToRemove); err != nil {\n\t\t\t\tlogrus.Errorf(\"failed to remove %s: %v\", fileToRemove, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) {\n\tbr := bufio.NewReader(rc)\n\tif _, err := br.Peek(1); err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn nil, errors.Errorf(\"the Dockerfile (%s) cannot be empty\", name)\n\t\t}\n\t\treturn nil, errors.Wrap(err, \"unexpected error reading Dockerfile\")\n\t}\n\treturn parser.Parse(br)\n}\n\nfunc openAt(remote builder.Source, path string) (*os.File, error) {\n\tfullPath, err := FullPath(remote, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn os.Open(fullPath)\n}\n\n\/\/ StatAt is a helper for calling Stat on a path from a source\nfunc StatAt(remote builder.Source, path string) (os.FileInfo, error) {\n\tfullPath, err := FullPath(remote, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn os.Stat(fullPath)\n}\n\n\/\/ FullPath is a helper for getting a full path for a path from a source\nfunc FullPath(remote builder.Source, path string) (string, error) {\n\tfullPath, err := symlink.FollowSymlinkInScope(filepath.Join(remote.Root(), path), remote.Root())\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Forbidden path outside the build context: %s (%s)\", path, fullPath) \/\/ backwards compat with old error\n\t}\n\treturn fullPath, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package godog\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/DATA-DOG\/godog\/gherkin\"\n)\n\nfunc init() {\n\tFormat(\"pretty\", \"Prints every feature with runtime statuses.\", &pretty{\n\t\tbasefmt: basefmt{\n\t\t\tstarted: time.Now(),\n\t\t\tindent: 2,\n\t\t},\n\t})\n}\n\nvar outlinePlaceholderRegexp = regexp.MustCompile(\"<[^>]+>\")\n\n\/\/ a built in default pretty formatter\ntype pretty struct {\n\tbasefmt\n\n\t\/\/ currently processed\n\tfeature *gherkin.Feature\n\tscenario *gherkin.Scenario\n\toutline *gherkin.ScenarioOutline\n\n\t\/\/ state\n\tbgSteps int\n\tsteps int\n\tcommentPos int\n\n\t\/\/ outline\n\toutlineSteps []*stepResult\n\toutlineNumExample int\n\toutlineNumExamples int\n}\n\nfunc (f *pretty) Feature(ft *gherkin.Feature, p string) {\n\tif len(f.features) != 0 {\n\t\t\/\/ not a first feature, add a newline\n\t\tfmt.Println(\"\")\n\t}\n\tf.features = append(f.features, &feature{Path: p, Feature: ft})\n\tfmt.Println(bcl(ft.Keyword+\": \", white) + ft.Name)\n\tif strings.TrimSpace(ft.Description) != \"\" {\n\t\tfor _, line := range strings.Split(ft.Description, \"\\n\") {\n\t\t\tfmt.Println(s(f.indent) + strings.TrimSpace(line))\n\t\t}\n\t}\n\n\tf.feature = ft\n\tf.scenario = nil\n\tf.outline = nil\n\tf.bgSteps = 0\n\tif ft.Background != nil {\n\t\tf.bgSteps = len(ft.Background.Steps)\n\t}\n}\n\n\/\/ Node takes a gherkin node for formatting\nfunc (f *pretty) Node(node interface{}) {\n\tf.basefmt.Node(node)\n\n\tswitch t := node.(type) {\n\tcase *gherkin.Examples:\n\t\tf.outlineNumExamples = len(t.TableBody)\n\t\tf.outlineNumExample++\n\tcase *gherkin.Scenario:\n\t\tf.scenario = t\n\t\tf.outline = nil\n\t\tf.steps = len(t.Steps)\n\tcase *gherkin.ScenarioOutline:\n\t\tf.outline = t\n\t\tf.scenario = nil\n\t\tf.steps = len(t.Steps)\n\t\tf.outlineNumExample = -1\n\t}\n}\n\n\/\/ Summary sumarize the feature formatter output\nfunc (f *pretty) Summary() {\n\t\/\/ failed steps on background are not scenarios\n\tvar failedScenarios []*stepResult\n\tfor _, fail := range f.failed {\n\t\tswitch fail.owner.(type) {\n\t\tcase *gherkin.Scenario:\n\t\t\tfailedScenarios = append(failedScenarios, fail)\n\t\tcase *gherkin.ScenarioOutline:\n\t\t\tfailedScenarios = append(failedScenarios, fail)\n\t\t}\n\t}\n\tif len(failedScenarios) > 0 {\n\t\tfmt.Println(\"\\n--- \" + cl(\"Failed scenarios:\", red) + \"\\n\")\n\t\tvar unique []string\n\t\tfor _, fail := range failedScenarios {\n\t\t\tvar found bool\n\t\t\tfor _, in := range unique {\n\t\t\t\tif in == fail.line() {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tunique = append(unique, fail.line())\n\t\t\t}\n\t\t}\n\n\t\tfor _, fail := range unique {\n\t\t\tfmt.Println(\" \" + cl(fail, red))\n\t\t}\n\t}\n\tf.basefmt.Summary()\n}\n\nfunc (f *pretty) printOutlineExample(outline *gherkin.ScenarioOutline) {\n\tvar msg string\n\tclr := green\n\n\tex := outline.Examples[f.outlineNumExample]\n\texample, hasExamples := examples(ex)\n\tif !hasExamples {\n\t\t\/\/ do not print empty examples\n\t\treturn\n\t}\n\n\tfirstExample := f.outlineNumExamples == len(example.TableBody)\n\tprintSteps := firstExample && f.outlineNumExample == 0\n\n\tfor i, res := range f.outlineSteps {\n\t\t\/\/ determine example row status\n\t\tswitch {\n\t\tcase res.typ == failed:\n\t\t\tmsg = res.err.Error()\n\t\t\tclr = res.typ.clr()\n\t\tcase res.typ == undefined || res.typ == pending:\n\t\t\tclr = res.typ.clr()\n\t\tcase res.typ == skipped && clr == green:\n\t\t\tclr = cyan\n\t\t}\n\t\tif printSteps {\n\t\t\t\/\/ in first example, we need to print steps\n\t\t\tvar text string\n\t\t\tostep := outline.Steps[i]\n\t\t\tif res.def != nil {\n\t\t\t\tif m := outlinePlaceholderRegexp.FindAllStringIndex(ostep.Text, -1); len(m) > 0 {\n\t\t\t\t\tvar pos int\n\t\t\t\t\tfor i := 0; i < len(m); i++ {\n\t\t\t\t\t\tpair := m[i]\n\t\t\t\t\t\ttext += cl(ostep.Text[pos:pair[0]], cyan)\n\t\t\t\t\t\ttext += bcl(ostep.Text[pair[0]:pair[1]], cyan)\n\t\t\t\t\t\tpos = pair[1]\n\t\t\t\t\t}\n\t\t\t\t\ttext += cl(ostep.Text[pos:len(ostep.Text)], cyan)\n\t\t\t\t} else {\n\t\t\t\t\ttext = cl(ostep.Text, cyan)\n\t\t\t\t}\n\t\t\t\ttext += s(f.commentPos-f.length(ostep)+1) + cl(fmt.Sprintf(\"# %s\", res.def.funcName()), black)\n\t\t\t} else {\n\t\t\t\ttext = cl(ostep.Text, cyan)\n\t\t\t}\n\t\t\t\/\/ print the step outline\n\t\t\tfmt.Println(s(f.indent*2) + cl(strings.TrimSpace(ostep.Keyword), cyan) + \" \" + text)\n\t\t}\n\t}\n\n\tcells := make([]string, len(example.TableHeader.Cells))\n\tmax := longest(example)\n\t\/\/ an example table header\n\tif firstExample {\n\t\tfmt.Println(\"\")\n\t\tfmt.Println(s(f.indent*2) + bcl(example.Keyword+\": \", white) + example.Name)\n\n\t\tfor i, cell := range example.TableHeader.Cells {\n\t\t\tcells[i] = cl(cell.Value, cyan) + s(max[i]-len(cell.Value))\n\t\t}\n\t\tfmt.Println(s(f.indent*3) + \"| \" + strings.Join(cells, \" | \") + \" |\")\n\t}\n\n\t\/\/ an example table row\n\trow := example.TableBody[len(example.TableBody)-f.outlineNumExamples]\n\tfor i, cell := range row.Cells {\n\t\tcells[i] = cl(cell.Value, clr) + s(max[i]-len(cell.Value))\n\t}\n\tfmt.Println(s(f.indent*3) + \"| \" + strings.Join(cells, \" | \") + \" |\")\n\n\t\/\/ if there is an error\n\tif msg != \"\" {\n\t\tfmt.Println(s(f.indent*4) + bcl(msg, red))\n\t}\n}\n\nfunc (f *pretty) printStep(step *gherkin.Step, def *StepDef, c color) {\n\ttext := s(f.indent*2) + cl(strings.TrimSpace(step.Keyword), c) + \" \"\n\tswitch {\n\tcase def != nil:\n\t\tif m := (def.Expr.FindStringSubmatchIndex(step.Text))[2:]; len(m) > 0 {\n\t\t\tvar pos, i int\n\t\t\tfor pos, i = 0, 0; i < len(m); i++ {\n\t\t\t\tif math.Mod(float64(i), 2) == 0 {\n\t\t\t\t\ttext += cl(step.Text[pos:m[i]], c)\n\t\t\t\t} else {\n\t\t\t\t\ttext += bcl(step.Text[pos:m[i]], c)\n\t\t\t\t}\n\t\t\t\tpos = m[i]\n\t\t\t}\n\t\t\ttext += cl(step.Text[pos:len(step.Text)], c)\n\t\t} else {\n\t\t\ttext += cl(step.Text, c)\n\t\t}\n\t\ttext += s(f.commentPos-f.length(step)+1) + cl(fmt.Sprintf(\"# %s\", def.funcName()), black)\n\tdefault:\n\t\ttext += cl(step.Text, c)\n\t}\n\n\tfmt.Println(text)\n\tswitch t := step.Argument.(type) {\n\tcase *gherkin.DataTable:\n\t\tf.printTable(t, c)\n\tcase *gherkin.DocString:\n\t\tvar ct string\n\t\tif len(t.ContentType) > 0 {\n\t\t\tct = \" \" + cl(t.ContentType, c)\n\t\t}\n\t\tfmt.Println(s(f.indent*3) + cl(t.Delimitter, c) + ct)\n\t\tfor _, ln := range strings.Split(t.Content, \"\\n\") {\n\t\t\tfmt.Println(s(f.indent*3) + cl(ln, c))\n\t\t}\n\t\tfmt.Println(s(f.indent*3) + cl(t.Delimitter, c))\n\t}\n}\n\nfunc (f *pretty) printStepKind(res *stepResult) {\n\t_, isBgStep := res.owner.(*gherkin.Background)\n\n\t\/\/ if has not printed background yet\n\tswitch {\n\t\/\/ first background step\n\tcase f.bgSteps > 0 && f.bgSteps == len(f.feature.Background.Steps):\n\t\tf.commentPos = f.longestStep(f.feature.Background.Steps, f.length(f.feature.Background))\n\t\tfmt.Println(\"\\n\" + s(f.indent) + bcl(f.feature.Background.Keyword+\": \"+f.feature.Background.Name, white))\n\t\tf.bgSteps--\n\t\/\/ subsequent background steps\n\tcase f.bgSteps > 0:\n\t\tf.bgSteps--\n\t\/\/ a background step for another scenario, but all bg steps are\n\t\/\/ already printed. so just skip it\n\tcase isBgStep:\n\t\treturn\n\t\/\/ first step of scenario, print header and calculate comment position\n\tcase f.scenario != nil && f.steps == len(f.scenario.Steps):\n\t\tf.commentPos = f.longestStep(f.scenario.Steps, f.length(f.scenario))\n\t\ttext := s(f.indent) + bcl(f.scenario.Keyword+\": \", white) + f.scenario.Name\n\t\ttext += s(f.commentPos-f.length(f.scenario)+1) + f.line(f.scenario.Location)\n\t\tfmt.Println(\"\\n\" + text)\n\t\tf.steps--\n\t\/\/ all subsequent scenario steps\n\tcase f.scenario != nil:\n\t\tf.steps--\n\t\/\/ first step of outline scenario, print header and calculate comment position\n\tcase f.outline != nil && f.steps == len(f.outline.Steps):\n\t\tf.commentPos = f.longestStep(f.outline.Steps, f.length(f.outline))\n\t\ttext := s(f.indent) + bcl(f.outline.Keyword+\": \", white) + f.outline.Name\n\t\ttext += s(f.commentPos-f.length(f.outline)+1) + f.line(f.outline.Location)\n\t\tfmt.Println(\"\\n\" + text)\n\t\tf.outlineSteps = append(f.outlineSteps, res)\n\t\tf.steps--\n\t\tif len(f.outlineSteps) == len(f.outline.Steps) {\n\t\t\t\/\/ an outline example steps has went through\n\t\t\tf.printOutlineExample(f.outline)\n\t\t\tf.outlineSteps = []*stepResult{}\n\t\t\tf.outlineNumExamples--\n\t\t}\n\t\treturn\n\t\/\/ all subsequent outline steps\n\tcase f.outline != nil:\n\t\tf.outlineSteps = append(f.outlineSteps, res)\n\t\tf.steps--\n\t\tif len(f.outlineSteps) == len(f.outline.Steps) {\n\t\t\t\/\/ an outline example steps has went through\n\t\t\tf.printOutlineExample(f.outline)\n\t\t\tf.outlineSteps = []*stepResult{}\n\t\t\tf.outlineNumExamples--\n\t\t}\n\t\treturn\n\t}\n\n\tf.printStep(res.step, res.def, res.typ.clr())\n\tif res.err != nil {\n\t\tfmt.Println(s(f.indent*2) + bcl(res.err, red))\n\t}\n\tif res.typ == pending {\n\t\tfmt.Println(s(f.indent*3) + cl(\"TODO: write pending definition\", yellow))\n\t}\n}\n\n\/\/ print table with aligned table cells\nfunc (f *pretty) printTable(t *gherkin.DataTable, c color) {\n\tvar l = longest(t)\n\tvar cols = make([]string, len(t.Rows[0].Cells))\n\tfor _, row := range t.Rows {\n\t\tfor i, cell := range row.Cells {\n\t\t\tcols[i] = cell.Value + s(l[i]-len(cell.Value))\n\t\t}\n\t\tfmt.Println(s(f.indent*3) + cl(\"| \"+strings.Join(cols, \" | \")+\" |\", c))\n\t}\n}\n\nfunc (f *pretty) Passed(step *gherkin.Step, match *StepDef) {\n\tf.basefmt.Passed(step, match)\n\tf.printStepKind(f.passed[len(f.passed)-1])\n}\n\nfunc (f *pretty) Skipped(step *gherkin.Step) {\n\tf.basefmt.Skipped(step)\n\tf.printStepKind(f.skipped[len(f.skipped)-1])\n}\n\nfunc (f *pretty) Undefined(step *gherkin.Step) {\n\tf.basefmt.Undefined(step)\n\tf.printStepKind(f.undefined[len(f.undefined)-1])\n}\n\nfunc (f *pretty) Failed(step *gherkin.Step, match *StepDef, err error) {\n\tf.basefmt.Failed(step, match, err)\n\tf.printStepKind(f.failed[len(f.failed)-1])\n}\n\nfunc (f *pretty) Pending(step *gherkin.Step, match *StepDef) {\n\tf.basefmt.Pending(step, match)\n\tf.printStepKind(f.pending[len(f.pending)-1])\n}\n\n\/\/ longest gives a list of longest columns of all rows in Table\nfunc longest(tbl interface{}) []int {\n\tvar rows []*gherkin.TableRow\n\tswitch t := tbl.(type) {\n\tcase *gherkin.Examples:\n\t\trows = append(rows, t.TableHeader)\n\t\trows = append(rows, t.TableBody...)\n\tcase *gherkin.DataTable:\n\t\trows = append(rows, t.Rows...)\n\t}\n\n\tlongest := make([]int, len(rows[0].Cells))\n\tfor _, row := range rows {\n\t\tfor i, cell := range row.Cells {\n\t\t\tif longest[i] < len(cell.Value) {\n\t\t\t\tlongest[i] = len(cell.Value)\n\t\t\t}\n\t\t}\n\t}\n\treturn longest\n}\n\nfunc (f *pretty) longestStep(steps []*gherkin.Step, base int) int {\n\tret := base\n\tfor _, step := range steps {\n\t\tlength := f.length(step)\n\t\tif length > ret {\n\t\t\tret = length\n\t\t}\n\t}\n\treturn ret\n}\n\n\/\/ a line number representation in feature file\nfunc (f *pretty) line(loc *gherkin.Location) string {\n\treturn cl(fmt.Sprintf(\"# %s:%d\", f.features[len(f.features)-1].Path, loc.Line), black)\n}\n\nfunc (f *pretty) length(node interface{}) int {\n\tswitch t := node.(type) {\n\tcase *gherkin.Background:\n\t\treturn f.indent + utf8.RuneCountInString(strings.TrimSpace(t.Keyword)+\": \"+t.Name)\n\tcase *gherkin.Step:\n\t\treturn f.indent*2 + utf8.RuneCountInString(strings.TrimSpace(t.Keyword)+\" \"+t.Text)\n\tcase *gherkin.Scenario:\n\t\treturn f.indent + utf8.RuneCountInString(strings.TrimSpace(t.Keyword)+\": \"+t.Name)\n\tcase *gherkin.ScenarioOutline:\n\t\treturn f.indent + utf8.RuneCountInString(strings.TrimSpace(t.Keyword)+\": \"+t.Name)\n\t}\n\tpanic(fmt.Sprintf(\"unexpected node %T to determine length\", node))\n}\n<commit_msg>use maximum step length of background and scenario<commit_after>package godog\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/DATA-DOG\/godog\/gherkin\"\n)\n\nfunc init() {\n\tFormat(\"pretty\", \"Prints every feature with runtime statuses.\", &pretty{\n\t\tbasefmt: basefmt{\n\t\t\tstarted: time.Now(),\n\t\t\tindent: 2,\n\t\t},\n\t})\n}\n\nvar outlinePlaceholderRegexp = regexp.MustCompile(\"<[^>]+>\")\n\n\/\/ a built in default pretty formatter\ntype pretty struct {\n\tbasefmt\n\n\t\/\/ currently processed\n\tfeature *gherkin.Feature\n\tscenario *gherkin.Scenario\n\toutline *gherkin.ScenarioOutline\n\n\t\/\/ state\n\tbgSteps int\n\tsteps int\n\tcommentPos int\n\n\t\/\/ outline\n\toutlineSteps []*stepResult\n\toutlineNumExample int\n\toutlineNumExamples int\n}\n\nfunc (f *pretty) Feature(ft *gherkin.Feature, p string) {\n\tif len(f.features) != 0 {\n\t\t\/\/ not a first feature, add a newline\n\t\tfmt.Println(\"\")\n\t}\n\tf.features = append(f.features, &feature{Path: p, Feature: ft})\n\tfmt.Println(bcl(ft.Keyword+\": \", white) + ft.Name)\n\tif strings.TrimSpace(ft.Description) != \"\" {\n\t\tfor _, line := range strings.Split(ft.Description, \"\\n\") {\n\t\t\tfmt.Println(s(f.indent) + strings.TrimSpace(line))\n\t\t}\n\t}\n\n\tf.feature = ft\n\tf.scenario = nil\n\tf.outline = nil\n\tf.bgSteps = 0\n\tif ft.Background != nil {\n\t\tf.bgSteps = len(ft.Background.Steps)\n\t}\n}\n\n\/\/ Node takes a gherkin node for formatting\nfunc (f *pretty) Node(node interface{}) {\n\tf.basefmt.Node(node)\n\n\tswitch t := node.(type) {\n\tcase *gherkin.Examples:\n\t\tf.outlineNumExamples = len(t.TableBody)\n\t\tf.outlineNumExample++\n\tcase *gherkin.Scenario:\n\t\tf.scenario = t\n\t\tf.outline = nil\n\t\tf.steps = len(t.Steps)\n\tcase *gherkin.ScenarioOutline:\n\t\tf.outline = t\n\t\tf.scenario = nil\n\t\tf.steps = len(t.Steps)\n\t\tf.outlineNumExample = -1\n\t}\n}\n\n\/\/ Summary sumarize the feature formatter output\nfunc (f *pretty) Summary() {\n\t\/\/ failed steps on background are not scenarios\n\tvar failedScenarios []*stepResult\n\tfor _, fail := range f.failed {\n\t\tswitch fail.owner.(type) {\n\t\tcase *gherkin.Scenario:\n\t\t\tfailedScenarios = append(failedScenarios, fail)\n\t\tcase *gherkin.ScenarioOutline:\n\t\t\tfailedScenarios = append(failedScenarios, fail)\n\t\t}\n\t}\n\tif len(failedScenarios) > 0 {\n\t\tfmt.Println(\"\\n--- \" + cl(\"Failed scenarios:\", red) + \"\\n\")\n\t\tvar unique []string\n\t\tfor _, fail := range failedScenarios {\n\t\t\tvar found bool\n\t\t\tfor _, in := range unique {\n\t\t\t\tif in == fail.line() {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tunique = append(unique, fail.line())\n\t\t\t}\n\t\t}\n\n\t\tfor _, fail := range unique {\n\t\t\tfmt.Println(\" \" + cl(fail, red))\n\t\t}\n\t}\n\tf.basefmt.Summary()\n}\n\nfunc (f *pretty) printOutlineExample(outline *gherkin.ScenarioOutline) {\n\tvar msg string\n\tclr := green\n\n\tex := outline.Examples[f.outlineNumExample]\n\texample, hasExamples := examples(ex)\n\tif !hasExamples {\n\t\t\/\/ do not print empty examples\n\t\treturn\n\t}\n\n\tfirstExample := f.outlineNumExamples == len(example.TableBody)\n\tprintSteps := firstExample && f.outlineNumExample == 0\n\n\tfor i, res := range f.outlineSteps {\n\t\t\/\/ determine example row status\n\t\tswitch {\n\t\tcase res.typ == failed:\n\t\t\tmsg = res.err.Error()\n\t\t\tclr = res.typ.clr()\n\t\tcase res.typ == undefined || res.typ == pending:\n\t\t\tclr = res.typ.clr()\n\t\tcase res.typ == skipped && clr == green:\n\t\t\tclr = cyan\n\t\t}\n\t\tif printSteps {\n\t\t\t\/\/ in first example, we need to print steps\n\t\t\tvar text string\n\t\t\tostep := outline.Steps[i]\n\t\t\tif res.def != nil {\n\t\t\t\tif m := outlinePlaceholderRegexp.FindAllStringIndex(ostep.Text, -1); len(m) > 0 {\n\t\t\t\t\tvar pos int\n\t\t\t\t\tfor i := 0; i < len(m); i++ {\n\t\t\t\t\t\tpair := m[i]\n\t\t\t\t\t\ttext += cl(ostep.Text[pos:pair[0]], cyan)\n\t\t\t\t\t\ttext += bcl(ostep.Text[pair[0]:pair[1]], cyan)\n\t\t\t\t\t\tpos = pair[1]\n\t\t\t\t\t}\n\t\t\t\t\ttext += cl(ostep.Text[pos:len(ostep.Text)], cyan)\n\t\t\t\t} else {\n\t\t\t\t\ttext = cl(ostep.Text, cyan)\n\t\t\t\t}\n\t\t\t\ttext += s(f.commentPos-f.length(ostep)+1) + cl(fmt.Sprintf(\"# %s\", res.def.funcName()), black)\n\t\t\t} else {\n\t\t\t\ttext = cl(ostep.Text, cyan)\n\t\t\t}\n\t\t\t\/\/ print the step outline\n\t\t\tfmt.Println(s(f.indent*2) + cl(strings.TrimSpace(ostep.Keyword), cyan) + \" \" + text)\n\t\t}\n\t}\n\n\tcells := make([]string, len(example.TableHeader.Cells))\n\tmax := longest(example)\n\t\/\/ an example table header\n\tif firstExample {\n\t\tfmt.Println(\"\")\n\t\tfmt.Println(s(f.indent*2) + bcl(example.Keyword+\": \", white) + example.Name)\n\n\t\tfor i, cell := range example.TableHeader.Cells {\n\t\t\tcells[i] = cl(cell.Value, cyan) + s(max[i]-len(cell.Value))\n\t\t}\n\t\tfmt.Println(s(f.indent*3) + \"| \" + strings.Join(cells, \" | \") + \" |\")\n\t}\n\n\t\/\/ an example table row\n\trow := example.TableBody[len(example.TableBody)-f.outlineNumExamples]\n\tfor i, cell := range row.Cells {\n\t\tcells[i] = cl(cell.Value, clr) + s(max[i]-len(cell.Value))\n\t}\n\tfmt.Println(s(f.indent*3) + \"| \" + strings.Join(cells, \" | \") + \" |\")\n\n\t\/\/ if there is an error\n\tif msg != \"\" {\n\t\tfmt.Println(s(f.indent*4) + bcl(msg, red))\n\t}\n}\n\nfunc (f *pretty) printStep(step *gherkin.Step, def *StepDef, c color) {\n\ttext := s(f.indent*2) + cl(strings.TrimSpace(step.Keyword), c) + \" \"\n\tswitch {\n\tcase def != nil:\n\t\tif m := (def.Expr.FindStringSubmatchIndex(step.Text))[2:]; len(m) > 0 {\n\t\t\tvar pos, i int\n\t\t\tfor pos, i = 0, 0; i < len(m); i++ {\n\t\t\t\tif math.Mod(float64(i), 2) == 0 {\n\t\t\t\t\ttext += cl(step.Text[pos:m[i]], c)\n\t\t\t\t} else {\n\t\t\t\t\ttext += bcl(step.Text[pos:m[i]], c)\n\t\t\t\t}\n\t\t\t\tpos = m[i]\n\t\t\t}\n\t\t\ttext += cl(step.Text[pos:len(step.Text)], c)\n\t\t} else {\n\t\t\ttext += cl(step.Text, c)\n\t\t}\n\t\ttext += s(f.commentPos-f.length(step)+1) + cl(fmt.Sprintf(\"# %s\", def.funcName()), black)\n\tdefault:\n\t\ttext += cl(step.Text, c)\n\t}\n\n\tfmt.Println(text)\n\tswitch t := step.Argument.(type) {\n\tcase *gherkin.DataTable:\n\t\tf.printTable(t, c)\n\tcase *gherkin.DocString:\n\t\tvar ct string\n\t\tif len(t.ContentType) > 0 {\n\t\t\tct = \" \" + cl(t.ContentType, c)\n\t\t}\n\t\tfmt.Println(s(f.indent*3) + cl(t.Delimitter, c) + ct)\n\t\tfor _, ln := range strings.Split(t.Content, \"\\n\") {\n\t\t\tfmt.Println(s(f.indent*3) + cl(ln, c))\n\t\t}\n\t\tfmt.Println(s(f.indent*3) + cl(t.Delimitter, c))\n\t}\n}\n\nfunc (f *pretty) printStepKind(res *stepResult) {\n\t_, isBgStep := res.owner.(*gherkin.Background)\n\n\t\/\/ if has not printed background yet\n\tswitch {\n\t\/\/ first background step\n\tcase f.bgSteps > 0 && f.bgSteps == len(f.feature.Background.Steps):\n\t\tf.commentPos = f.longestStep(f.feature.Background.Steps, f.length(f.feature.Background))\n\t\tfmt.Println(\"\\n\" + s(f.indent) + bcl(f.feature.Background.Keyword+\": \"+f.feature.Background.Name, white))\n\t\tf.bgSteps--\n\t\/\/ subsequent background steps\n\tcase f.bgSteps > 0:\n\t\tf.bgSteps--\n\t\/\/ a background step for another scenario, but all bg steps are\n\t\/\/ already printed. so just skip it\n\tcase isBgStep:\n\t\treturn\n\t\/\/ first step of scenario, print header and calculate comment position\n\tcase f.scenario != nil && f.steps == len(f.scenario.Steps):\n\t\tf.commentPos = f.longestStep(f.scenario.Steps, f.length(f.scenario))\n\t\tif bgLen := f.longestStep(f.feature.Background.Steps, f.length(f.feature.Background)); bgLen > f.commentPos {\n\t\t\tf.commentPos = bgLen\n\t\t}\n\t\ttext := s(f.indent) + bcl(f.scenario.Keyword+\": \", white) + f.scenario.Name\n\t\ttext += s(f.commentPos-f.length(f.scenario)+1) + f.line(f.scenario.Location)\n\t\tfmt.Println(\"\\n\" + text)\n\t\tf.steps--\n\t\/\/ all subsequent scenario steps\n\tcase f.scenario != nil:\n\t\tf.steps--\n\t\/\/ first step of outline scenario, print header and calculate comment position\n\tcase f.outline != nil && f.steps == len(f.outline.Steps):\n\t\tf.commentPos = f.longestStep(f.outline.Steps, f.length(f.outline))\n\t\ttext := s(f.indent) + bcl(f.outline.Keyword+\": \", white) + f.outline.Name\n\t\ttext += s(f.commentPos-f.length(f.outline)+1) + f.line(f.outline.Location)\n\t\tfmt.Println(\"\\n\" + text)\n\t\tf.outlineSteps = append(f.outlineSteps, res)\n\t\tf.steps--\n\t\tif len(f.outlineSteps) == len(f.outline.Steps) {\n\t\t\t\/\/ an outline example steps has went through\n\t\t\tf.printOutlineExample(f.outline)\n\t\t\tf.outlineSteps = []*stepResult{}\n\t\t\tf.outlineNumExamples--\n\t\t}\n\t\treturn\n\t\/\/ all subsequent outline steps\n\tcase f.outline != nil:\n\t\tf.outlineSteps = append(f.outlineSteps, res)\n\t\tf.steps--\n\t\tif len(f.outlineSteps) == len(f.outline.Steps) {\n\t\t\t\/\/ an outline example steps has went through\n\t\t\tf.printOutlineExample(f.outline)\n\t\t\tf.outlineSteps = []*stepResult{}\n\t\t\tf.outlineNumExamples--\n\t\t}\n\t\treturn\n\t}\n\n\tf.printStep(res.step, res.def, res.typ.clr())\n\tif res.err != nil {\n\t\tfmt.Println(s(f.indent*2) + bcl(res.err, red))\n\t}\n\tif res.typ == pending {\n\t\tfmt.Println(s(f.indent*3) + cl(\"TODO: write pending definition\", yellow))\n\t}\n}\n\n\/\/ print table with aligned table cells\nfunc (f *pretty) printTable(t *gherkin.DataTable, c color) {\n\tvar l = longest(t)\n\tvar cols = make([]string, len(t.Rows[0].Cells))\n\tfor _, row := range t.Rows {\n\t\tfor i, cell := range row.Cells {\n\t\t\tcols[i] = cell.Value + s(l[i]-len(cell.Value))\n\t\t}\n\t\tfmt.Println(s(f.indent*3) + cl(\"| \"+strings.Join(cols, \" | \")+\" |\", c))\n\t}\n}\n\nfunc (f *pretty) Passed(step *gherkin.Step, match *StepDef) {\n\tf.basefmt.Passed(step, match)\n\tf.printStepKind(f.passed[len(f.passed)-1])\n}\n\nfunc (f *pretty) Skipped(step *gherkin.Step) {\n\tf.basefmt.Skipped(step)\n\tf.printStepKind(f.skipped[len(f.skipped)-1])\n}\n\nfunc (f *pretty) Undefined(step *gherkin.Step) {\n\tf.basefmt.Undefined(step)\n\tf.printStepKind(f.undefined[len(f.undefined)-1])\n}\n\nfunc (f *pretty) Failed(step *gherkin.Step, match *StepDef, err error) {\n\tf.basefmt.Failed(step, match, err)\n\tf.printStepKind(f.failed[len(f.failed)-1])\n}\n\nfunc (f *pretty) Pending(step *gherkin.Step, match *StepDef) {\n\tf.basefmt.Pending(step, match)\n\tf.printStepKind(f.pending[len(f.pending)-1])\n}\n\n\/\/ longest gives a list of longest columns of all rows in Table\nfunc longest(tbl interface{}) []int {\n\tvar rows []*gherkin.TableRow\n\tswitch t := tbl.(type) {\n\tcase *gherkin.Examples:\n\t\trows = append(rows, t.TableHeader)\n\t\trows = append(rows, t.TableBody...)\n\tcase *gherkin.DataTable:\n\t\trows = append(rows, t.Rows...)\n\t}\n\n\tlongest := make([]int, len(rows[0].Cells))\n\tfor _, row := range rows {\n\t\tfor i, cell := range row.Cells {\n\t\t\tif longest[i] < len(cell.Value) {\n\t\t\t\tlongest[i] = len(cell.Value)\n\t\t\t}\n\t\t}\n\t}\n\treturn longest\n}\n\nfunc (f *pretty) longestStep(steps []*gherkin.Step, base int) int {\n\tret := base\n\tfor _, step := range steps {\n\t\tlength := f.length(step)\n\t\tif length > ret {\n\t\t\tret = length\n\t\t}\n\t}\n\treturn ret\n}\n\n\/\/ a line number representation in feature file\nfunc (f *pretty) line(loc *gherkin.Location) string {\n\treturn cl(fmt.Sprintf(\"# %s:%d\", f.features[len(f.features)-1].Path, loc.Line), black)\n}\n\nfunc (f *pretty) length(node interface{}) int {\n\tswitch t := node.(type) {\n\tcase *gherkin.Background:\n\t\treturn f.indent + utf8.RuneCountInString(strings.TrimSpace(t.Keyword)+\": \"+t.Name)\n\tcase *gherkin.Step:\n\t\treturn f.indent*2 + utf8.RuneCountInString(strings.TrimSpace(t.Keyword)+\" \"+t.Text)\n\tcase *gherkin.Scenario:\n\t\treturn f.indent + utf8.RuneCountInString(strings.TrimSpace(t.Keyword)+\": \"+t.Name)\n\tcase *gherkin.ScenarioOutline:\n\t\treturn f.indent + utf8.RuneCountInString(strings.TrimSpace(t.Keyword)+\": \"+t.Name)\n\t}\n\tpanic(fmt.Sprintf(\"unexpected node %T to determine length\", node))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Define the options for Open\n\npackage fs\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/ncw\/rclone\/fs\/hash\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ OpenOption is an interface describing options for Open\ntype OpenOption interface {\n\tfmt.Stringer\n\n\t\/\/ Header returns the option as an HTTP header\n\tHeader() (key string, value string)\n\n\t\/\/ Mandatory returns whether this option can be ignored or not\n\tMandatory() bool\n}\n\n\/\/ RangeOption defines an HTTP Range option with start and end. If\n\/\/ either start or end are < 0 then they will be omitted.\n\/\/\n\/\/ End may be bigger than the Size of the object in which case it will\n\/\/ be capped to the size of the object.\n\/\/\n\/\/ Note that the End is inclusive, so to fetch 100 bytes you would use\n\/\/ RangeOption{Start: 0, End: 99}\n\/\/\n\/\/ A RangeOption implements a single byte-range-spec from\n\/\/ https:\/\/tools.ietf.org\/html\/rfc7233#section-2.1\ntype RangeOption struct {\n\tStart int64\n\tEnd int64\n}\n\n\/\/ Header formats the option as an http header\nfunc (o *RangeOption) Header() (key string, value string) {\n\tkey = \"Range\"\n\tvalue = \"bytes=\"\n\tif o.Start >= 0 {\n\t\tvalue += strconv.FormatInt(o.Start, 10)\n\n\t}\n\tvalue += \"-\"\n\tif o.End >= 0 {\n\t\tvalue += strconv.FormatInt(o.End, 10)\n\t}\n\treturn key, value\n}\n\n\/\/ ParseRangeOption parses a RangeOption from a Range: header.\n\/\/ It only appects single ranges.\nfunc ParseRangeOption(s string) (po *RangeOption, err error) {\n\tconst preamble = \"bytes=\"\n\tif !strings.HasPrefix(s, preamble) {\n\t\treturn nil, errors.New(\"Range: header invalid: doesn't start with \" + preamble)\n\t}\n\ts = s[len(preamble):]\n\tif strings.IndexRune(s, ',') >= 0 {\n\t\treturn nil, errors.New(\"Range: header invalid: contains multiple ranges which isn't supported\")\n\t}\n\tdash := strings.IndexRune(s, '-')\n\tif dash < 0 {\n\t\treturn nil, errors.New(\"Range: header invalid: contains no '-'\")\n\t}\n\tstart, end := strings.TrimSpace(s[:dash]), strings.TrimSpace(s[dash+1:])\n\to := RangeOption{Start: -1, End: -1}\n\tif start != \"\" {\n\t\to.Start, err = strconv.ParseInt(start, 10, 64)\n\t\tif err != nil || o.Start < 0 {\n\t\t\treturn nil, errors.New(\"Range: header invalid: bad start\")\n\t\t}\n\t}\n\tif end != \"\" {\n\t\to.End, err = strconv.ParseInt(end, 10, 64)\n\t\tif err != nil || o.End < 0 {\n\t\t\treturn nil, errors.New(\"Range: header invalid: bad end\")\n\t\t}\n\t}\n\treturn &o, nil\n}\n\n\/\/ String formats the option into human readable form\nfunc (o *RangeOption) String() string {\n\treturn fmt.Sprintf(\"RangeOption(%d,%d)\", o.Start, o.End)\n}\n\n\/\/ Mandatory returns whether the option must be parsed or can be ignored\nfunc (o *RangeOption) Mandatory() bool {\n\treturn false\n}\n\n\/\/ Decode interprets the RangeOption into an offset and a limit\n\/\/\n\/\/ The offset is the start of the stream and the limit is how many\n\/\/ bytes should be read from it. If the limit is -1 then the stream\n\/\/ should be read to the end.\nfunc (o *RangeOption) Decode(size int64) (offset, limit int64) {\n\tif o.Start >= 0 {\n\t\toffset = o.Start\n\t\tif o.End >= 0 {\n\t\t\tlimit = o.End - o.Start + 1\n\t\t} else {\n\t\t\tlimit = -1\n\t\t}\n\t} else {\n\t\tif o.End >= 0 {\n\t\t\toffset = size - o.End\n\t\t} else {\n\t\t\toffset = 0\n\t\t}\n\t\tlimit = -1\n\t}\n\treturn offset, limit\n}\n\n\/\/ FixRangeOption looks through the slice of options and adjusts any\n\/\/ RangeOption~s found that request a fetch from the end into an\n\/\/ absolute fetch using the size passed in. Some remotes (eg\n\/\/ Onedrive, Box) don't support range requests which index from the\n\/\/ end.\nfunc FixRangeOption(options []OpenOption, size int64) {\n\tfor i := range options {\n\t\toption := options[i]\n\t\tif x, ok := option.(*RangeOption); ok {\n\t\t\t\/\/ If start is < 0 then fetch from the end\n\t\t\tif x.Start < 0 {\n\t\t\t\tx = &RangeOption{Start: size - x.End, End: -1}\n\t\t\t\toptions[i] = x\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ SeekOption defines an HTTP Range option with start only.\ntype SeekOption struct {\n\tOffset int64\n}\n\n\/\/ Header formats the option as an http header\nfunc (o *SeekOption) Header() (key string, value string) {\n\tkey = \"Range\"\n\tvalue = fmt.Sprintf(\"bytes=%d-\", o.Offset)\n\treturn key, value\n}\n\n\/\/ String formats the option into human readable form\nfunc (o *SeekOption) String() string {\n\treturn fmt.Sprintf(\"SeekOption(%d)\", o.Offset)\n}\n\n\/\/ Mandatory returns whether the option must be parsed or can be ignored\nfunc (o *SeekOption) Mandatory() bool {\n\treturn true\n}\n\n\/\/ HTTPOption defines a general purpose HTTP option\ntype HTTPOption struct {\n\tKey string\n\tValue string\n}\n\n\/\/ Header formats the option as an http header\nfunc (o *HTTPOption) Header() (key string, value string) {\n\treturn o.Key, o.Value\n}\n\n\/\/ String formats the option into human readable form\nfunc (o *HTTPOption) String() string {\n\treturn fmt.Sprintf(\"HTTPOption(%q,%q)\", o.Key, o.Value)\n}\n\n\/\/ Mandatory returns whether the option must be parsed or can be ignored\nfunc (o *HTTPOption) Mandatory() bool {\n\treturn false\n}\n\n\/\/ HashesOption defines an option used to tell the local fs to limit\n\/\/ the number of hashes it calculates.\ntype HashesOption struct {\n\tHashes hash.Set\n}\n\n\/\/ Header formats the option as an http header\nfunc (o *HashesOption) Header() (key string, value string) {\n\treturn \"\", \"\"\n}\n\n\/\/ String formats the option into human readable form\nfunc (o *HashesOption) String() string {\n\treturn fmt.Sprintf(\"HashesOption(%v)\", o.Hashes)\n}\n\n\/\/ Mandatory returns whether the option must be parsed or can be ignored\nfunc (o *HashesOption) Mandatory() bool {\n\treturn false\n}\n\n\/\/ OpenOptionAddHeaders adds each header found in options to the\n\/\/ headers map provided the key was non empty.\nfunc OpenOptionAddHeaders(options []OpenOption, headers map[string]string) {\n\tfor _, option := range options {\n\t\tkey, value := option.Header()\n\t\tif key != \"\" && value != \"\" {\n\t\t\theaders[key] = value\n\t\t}\n\t}\n}\n\n\/\/ OpenOptionHeaders adds each header found in options to the\n\/\/ headers map provided the key was non empty.\n\/\/\n\/\/ It returns a nil map if options was empty\nfunc OpenOptionHeaders(options []OpenOption) (headers map[string]string) {\n\tif len(options) == 0 {\n\t\treturn nil\n\t}\n\theaders = make(map[string]string, len(options))\n\tOpenOptionAddHeaders(options, headers)\n\treturn headers\n}\n\n\/\/ OpenOptionAddHTTPHeaders Sets each header found in options to the\n\/\/ http.Header map provided the key was non empty.\nfunc OpenOptionAddHTTPHeaders(headers http.Header, options []OpenOption) {\n\tfor _, option := range options {\n\t\tkey, value := option.Header()\n\t\tif key != \"\" && value != \"\" {\n\t\t\theaders.Set(key, value)\n\t\t}\n\t}\n}\n\n\/\/ check interface\nvar (\n\t_ OpenOption = (*RangeOption)(nil)\n\t_ OpenOption = (*SeekOption)(nil)\n\t_ OpenOption = (*HTTPOption)(nil)\n)\n<commit_msg>fs: Make RangeOption mandatory #1825<commit_after>\/\/ Define the options for Open\n\npackage fs\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/ncw\/rclone\/fs\/hash\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ OpenOption is an interface describing options for Open\ntype OpenOption interface {\n\tfmt.Stringer\n\n\t\/\/ Header returns the option as an HTTP header\n\tHeader() (key string, value string)\n\n\t\/\/ Mandatory returns whether this option can be ignored or not\n\tMandatory() bool\n}\n\n\/\/ RangeOption defines an HTTP Range option with start and end. If\n\/\/ either start or end are < 0 then they will be omitted.\n\/\/\n\/\/ End may be bigger than the Size of the object in which case it will\n\/\/ be capped to the size of the object.\n\/\/\n\/\/ Note that the End is inclusive, so to fetch 100 bytes you would use\n\/\/ RangeOption{Start: 0, End: 99}\n\/\/\n\/\/ If Start is specified but End is not then it will fetch from Start\n\/\/ to the end of the file.\n\/\/\n\/\/ If End is specified, but Start is not then it will fetch the last\n\/\/ End bytes.\n\/\/\n\/\/ Examples:\n\/\/\n\/\/ RangeOption{Start: 0, End: 99} - fetch the first 100 bytes\n\/\/ RangeOption{Start: 100, End: 199} - fetch the second 100 bytes\n\/\/ RangeOption{Start: 100} - fetch bytes from offset 100 to the end\n\/\/ RangeOption{End: 100} - fetch the last 100 bytes\n\/\/\n\/\/ A RangeOption implements a single byte-range-spec from\n\/\/ https:\/\/tools.ietf.org\/html\/rfc7233#section-2.1\ntype RangeOption struct {\n\tStart int64\n\tEnd int64\n}\n\n\/\/ Header formats the option as an http header\nfunc (o *RangeOption) Header() (key string, value string) {\n\tkey = \"Range\"\n\tvalue = \"bytes=\"\n\tif o.Start >= 0 {\n\t\tvalue += strconv.FormatInt(o.Start, 10)\n\n\t}\n\tvalue += \"-\"\n\tif o.End >= 0 {\n\t\tvalue += strconv.FormatInt(o.End, 10)\n\t}\n\treturn key, value\n}\n\n\/\/ ParseRangeOption parses a RangeOption from a Range: header.\n\/\/ It only appects single ranges.\nfunc ParseRangeOption(s string) (po *RangeOption, err error) {\n\tconst preamble = \"bytes=\"\n\tif !strings.HasPrefix(s, preamble) {\n\t\treturn nil, errors.New(\"Range: header invalid: doesn't start with \" + preamble)\n\t}\n\ts = s[len(preamble):]\n\tif strings.IndexRune(s, ',') >= 0 {\n\t\treturn nil, errors.New(\"Range: header invalid: contains multiple ranges which isn't supported\")\n\t}\n\tdash := strings.IndexRune(s, '-')\n\tif dash < 0 {\n\t\treturn nil, errors.New(\"Range: header invalid: contains no '-'\")\n\t}\n\tstart, end := strings.TrimSpace(s[:dash]), strings.TrimSpace(s[dash+1:])\n\to := RangeOption{Start: -1, End: -1}\n\tif start != \"\" {\n\t\to.Start, err = strconv.ParseInt(start, 10, 64)\n\t\tif err != nil || o.Start < 0 {\n\t\t\treturn nil, errors.New(\"Range: header invalid: bad start\")\n\t\t}\n\t}\n\tif end != \"\" {\n\t\to.End, err = strconv.ParseInt(end, 10, 64)\n\t\tif err != nil || o.End < 0 {\n\t\t\treturn nil, errors.New(\"Range: header invalid: bad end\")\n\t\t}\n\t}\n\treturn &o, nil\n}\n\n\/\/ String formats the option into human readable form\nfunc (o *RangeOption) String() string {\n\treturn fmt.Sprintf(\"RangeOption(%d,%d)\", o.Start, o.End)\n}\n\n\/\/ Mandatory returns whether the option must be parsed or can be ignored\nfunc (o *RangeOption) Mandatory() bool {\n\treturn true\n}\n\n\/\/ Decode interprets the RangeOption into an offset and a limit\n\/\/\n\/\/ The offset is the start of the stream and the limit is how many\n\/\/ bytes should be read from it. If the limit is -1 then the stream\n\/\/ should be read to the end.\nfunc (o *RangeOption) Decode(size int64) (offset, limit int64) {\n\tif o.Start >= 0 {\n\t\toffset = o.Start\n\t\tif o.End >= 0 {\n\t\t\tlimit = o.End - o.Start + 1\n\t\t} else {\n\t\t\tlimit = -1\n\t\t}\n\t} else {\n\t\tif o.End >= 0 {\n\t\t\toffset = size - o.End\n\t\t} else {\n\t\t\toffset = 0\n\t\t}\n\t\tlimit = -1\n\t}\n\treturn offset, limit\n}\n\n\/\/ FixRangeOption looks through the slice of options and adjusts any\n\/\/ RangeOption~s found that request a fetch from the end into an\n\/\/ absolute fetch using the size passed in. Some remotes (eg\n\/\/ Onedrive, Box) don't support range requests which index from the\n\/\/ end.\nfunc FixRangeOption(options []OpenOption, size int64) {\n\tfor i := range options {\n\t\toption := options[i]\n\t\tif x, ok := option.(*RangeOption); ok {\n\t\t\t\/\/ If start is < 0 then fetch from the end\n\t\t\tif x.Start < 0 {\n\t\t\t\tx = &RangeOption{Start: size - x.End, End: -1}\n\t\t\t\toptions[i] = x\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ SeekOption defines an HTTP Range option with start only.\ntype SeekOption struct {\n\tOffset int64\n}\n\n\/\/ Header formats the option as an http header\nfunc (o *SeekOption) Header() (key string, value string) {\n\tkey = \"Range\"\n\tvalue = fmt.Sprintf(\"bytes=%d-\", o.Offset)\n\treturn key, value\n}\n\n\/\/ String formats the option into human readable form\nfunc (o *SeekOption) String() string {\n\treturn fmt.Sprintf(\"SeekOption(%d)\", o.Offset)\n}\n\n\/\/ Mandatory returns whether the option must be parsed or can be ignored\nfunc (o *SeekOption) Mandatory() bool {\n\treturn true\n}\n\n\/\/ HTTPOption defines a general purpose HTTP option\ntype HTTPOption struct {\n\tKey string\n\tValue string\n}\n\n\/\/ Header formats the option as an http header\nfunc (o *HTTPOption) Header() (key string, value string) {\n\treturn o.Key, o.Value\n}\n\n\/\/ String formats the option into human readable form\nfunc (o *HTTPOption) String() string {\n\treturn fmt.Sprintf(\"HTTPOption(%q,%q)\", o.Key, o.Value)\n}\n\n\/\/ Mandatory returns whether the option must be parsed or can be ignored\nfunc (o *HTTPOption) Mandatory() bool {\n\treturn false\n}\n\n\/\/ HashesOption defines an option used to tell the local fs to limit\n\/\/ the number of hashes it calculates.\ntype HashesOption struct {\n\tHashes hash.Set\n}\n\n\/\/ Header formats the option as an http header\nfunc (o *HashesOption) Header() (key string, value string) {\n\treturn \"\", \"\"\n}\n\n\/\/ String formats the option into human readable form\nfunc (o *HashesOption) String() string {\n\treturn fmt.Sprintf(\"HashesOption(%v)\", o.Hashes)\n}\n\n\/\/ Mandatory returns whether the option must be parsed or can be ignored\nfunc (o *HashesOption) Mandatory() bool {\n\treturn false\n}\n\n\/\/ OpenOptionAddHeaders adds each header found in options to the\n\/\/ headers map provided the key was non empty.\nfunc OpenOptionAddHeaders(options []OpenOption, headers map[string]string) {\n\tfor _, option := range options {\n\t\tkey, value := option.Header()\n\t\tif key != \"\" && value != \"\" {\n\t\t\theaders[key] = value\n\t\t}\n\t}\n}\n\n\/\/ OpenOptionHeaders adds each header found in options to the\n\/\/ headers map provided the key was non empty.\n\/\/\n\/\/ It returns a nil map if options was empty\nfunc OpenOptionHeaders(options []OpenOption) (headers map[string]string) {\n\tif len(options) == 0 {\n\t\treturn nil\n\t}\n\theaders = make(map[string]string, len(options))\n\tOpenOptionAddHeaders(options, headers)\n\treturn headers\n}\n\n\/\/ OpenOptionAddHTTPHeaders Sets each header found in options to the\n\/\/ http.Header map provided the key was non empty.\nfunc OpenOptionAddHTTPHeaders(headers http.Header, options []OpenOption) {\n\tfor _, option := range options {\n\t\tkey, value := option.Header()\n\t\tif key != \"\" && value != \"\" {\n\t\t\theaders.Set(key, value)\n\t\t}\n\t}\n}\n\n\/\/ check interface\nvar (\n\t_ OpenOption = (*RangeOption)(nil)\n\t_ OpenOption = (*SeekOption)(nil)\n\t_ OpenOption = (*HTTPOption)(nil)\n)\n<|endoftext|>"} {"text":"<commit_before>package oskite\n\nimport (\n\t\"fmt\"\n\t\"koding\/virt\"\n\t\"time\"\n\n\t\"gopkg.in\/fatih\/set.v0\"\n\t\"labix.org\/v2\/mgo\"\n\t\"labix.org\/v2\/mgo\/bson\"\n)\n\ntype VmCollection map[bson.ObjectId]*virt.VM\n\nvar blacklist = set.New()\n\n\/\/ mongodbVMs returns a map of VMs that are bind to the given\n\/\/ serviceUniquename\/hostkite in mongodb\nfunc mongodbVMs(serviceUniquename string) (VmCollection, error) {\n\tvms := make([]*virt.VM, 0)\n\n\tquery := func(c *mgo.Collection) error {\n\t\treturn c.Find(bson.M{\"hostKite\": serviceUniquename}).All(&vms)\n\t}\n\n\tif err := mongodbConn.Run(\"jVMs\", query); err != nil {\n\t\treturn nil, fmt.Errorf(\"allVMs fetching err: %s\", err.Error())\n\t}\n\n\tvmsMap := make(map[bson.ObjectId]*virt.VM, len(vms))\n\n\tfor _, vm := range vms {\n\t\tvmsMap[vm.Id] = vm\n\t}\n\n\treturn vmsMap, nil\n}\n\n\/\/ updateStates updates the state field of the given ids to the given state argument.\nfunc updateStates(ids []bson.ObjectId, state string) error {\n\tif len(ids) == 0 {\n\t\treturn nil \/\/ no need to update\n\t}\n\n\tlog.Info(\"Updating %s vms to the state %s\", len(ids), state)\n\treturn mongodbConn.Run(\"jVMs\", func(c *mgo.Collection) error {\n\t\t_, err := c.UpdateAll(bson.M{\"_id\": bson.M{\"$in\": ids}}, bson.M{\"$set\": bson.M{\"state\": state}})\n\t\treturn err\n\t})\n}\n\n\/\/ vmUpdater updates the states of current available VMs on the host machine.\nfunc (o *Oskite) vmUpdater() {\n\tvms := make([]virt.VM, 0)\n\n\tquery := func(c *mgo.Collection) error {\n\t\tvm := virt.VM{}\n\n\t\titer := c.Find(bson.M{\"hostKite\": o.ServiceUniquename}).Batch(50).Iter()\n\t\tfor iter.Next(&vm) {\n\t\t\tvms = append(vms, vm)\n\n\t\t\tif !blacklist.Has(vm.Id.Hex()) {\n\t\t\t\to.startAlwaysOn(vm)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\treturn iter.Close()\n\t}\n\n\tfor _ = range time.Tick(time.Second * 30) {\n\t\t\/\/ start alwaysOn Vms\n\t\tif err := mongodbConn.Run(\"jVMs\", query); err != nil {\n\t\t\tlog.Error(\"allVMs fetching err: %s\", err.Error())\n\t\t}\n\n\t\t\/\/ batch update the states now\n\t\tcurrentStates := make(map[bson.ObjectId]string, 0)\n\n\t\tfor _, vm := range vms {\n\t\t\tstate := virt.GetVMState(vm.Id)\n\t\t\tif state == \"\" {\n\t\t\t\tstate = \"UNKNOWN\"\n\t\t\t}\n\n\t\t\t\/\/ do not update if it's the same state\n\t\t\tif vm.State == state {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcurrentStates[vm.Id] = state\n\t\t}\n\n\t\tfilter := func(desiredState string) []bson.ObjectId {\n\t\t\tids := make([]bson.ObjectId, 0)\n\n\t\t\tfor id, state := range currentStates {\n\t\t\t\tif state == desiredState {\n\t\t\t\t\tids = append(ids, id)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn ids\n\t\t}\n\n\t\tif err := updateStates(filter(\"RUNNING\"), \"RUNNING\"); err != nil {\n\t\t\tlog.Error(\"Updating RUNNING vms %v\", err)\n\t\t}\n\n\t\tif err := updateStates(filter(\"STOPPED\"), \"STOPPED\"); err != nil {\n\t\t\tlog.Error(\"Updating STOPPED vms %v\", err)\n\t\t}\n\n\t\tcurrentStates = nil \/\/ garbage collection\n\n\t\t\/\/ re initialize for next iteration\n\t\tvms = make([]virt.VM, 0)\n\t}\n\n}\n\nfunc unprepareInQueue(vm *virt.VM) {\n\tprepareQueue <- &QueueJob{\n\t\tmsg: fmt.Sprintf(\"Vm is stopped, unpreparing it: %s\", vm.HostnameAlias),\n\t\tf: func() error {\n\t\t\tinfo := getInfo(vm)\n\t\t\tinfo.mutex.Lock()\n\t\t\tdefer info.mutex.Unlock()\n\n\t\t\treturn unprepareProgress(nil, vm, false)\n\t\t},\n\t}\n}\n\n\/\/ startAlwaysOn starts a vm if it's alwaysOn and not pinned to the current\n\/\/ hostname\nfunc (o *Oskite) startAlwaysOn(vm virt.VM) {\n\tif !vm.AlwaysOn {\n\t\treturn\n\t}\n\n\t\/\/ means this vm is intended to be start on another kontainer machine\n\tif vm.PinnedToHost != \"\" && vm.PinnedToHost != o.ServiceUniquename {\n\t\treturn\n\t}\n\n\tgo func() {\n\t\tif vm.State != \"RUNNING\" {\n\t\t\tlog.Info(\"alwaysOn is starting [%s - %v]\", vm.HostnameAlias, vm.Id)\n\t\t\terr := o.startSingleVM(vm, nil)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"alwaysOn vm %s couldn't be started. err: %v\", vm.HostnameAlias, err)\n\t\t\t} else {\n\t\t\t\tlog.Info(\"alwaysOn vm started successfull [%s - %v]\", vm.HostnameAlias, vm.Id)\n\t\t\t}\n\t\t}\n\t}()\n}\n<commit_msg>Type error<commit_after>package oskite\n\nimport (\n\t\"fmt\"\n\t\"koding\/virt\"\n\t\"time\"\n\n\t\"gopkg.in\/fatih\/set.v0\"\n\t\"labix.org\/v2\/mgo\"\n\t\"labix.org\/v2\/mgo\/bson\"\n)\n\ntype VmCollection map[bson.ObjectId]*virt.VM\n\nvar blacklist = set.New()\n\n\/\/ mongodbVMs returns a map of VMs that are bind to the given\n\/\/ serviceUniquename\/hostkite in mongodb\nfunc mongodbVMs(serviceUniquename string) (VmCollection, error) {\n\tvms := make([]*virt.VM, 0)\n\n\tquery := func(c *mgo.Collection) error {\n\t\treturn c.Find(bson.M{\"hostKite\": serviceUniquename}).All(&vms)\n\t}\n\n\tif err := mongodbConn.Run(\"jVMs\", query); err != nil {\n\t\treturn nil, fmt.Errorf(\"allVMs fetching err: %s\", err.Error())\n\t}\n\n\tvmsMap := make(map[bson.ObjectId]*virt.VM, len(vms))\n\n\tfor _, vm := range vms {\n\t\tvmsMap[vm.Id] = vm\n\t}\n\n\treturn vmsMap, nil\n}\n\n\/\/ updateStates updates the state field of the given ids to the given state argument.\nfunc updateStates(ids []bson.ObjectId, state string) error {\n\tif len(ids) == 0 {\n\t\treturn nil \/\/ no need to update\n\t}\n\n\tlog.Info(\"Updating %d vms to the state %s\", len(ids), state)\n\treturn mongodbConn.Run(\"jVMs\", func(c *mgo.Collection) error {\n\t\t_, err := c.UpdateAll(bson.M{\"_id\": bson.M{\"$in\": ids}}, bson.M{\"$set\": bson.M{\"state\": state}})\n\t\treturn err\n\t})\n}\n\n\/\/ vmUpdater updates the states of current available VMs on the host machine.\nfunc (o *Oskite) vmUpdater() {\n\tvms := make([]virt.VM, 0)\n\n\tquery := func(c *mgo.Collection) error {\n\t\tvm := virt.VM{}\n\n\t\titer := c.Find(bson.M{\"hostKite\": o.ServiceUniquename}).Batch(50).Iter()\n\t\tfor iter.Next(&vm) {\n\t\t\tvms = append(vms, vm)\n\n\t\t\tif !blacklist.Has(vm.Id.Hex()) {\n\t\t\t\to.startAlwaysOn(vm)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\treturn iter.Close()\n\t}\n\n\tfor _ = range time.Tick(time.Second * 30) {\n\t\t\/\/ start alwaysOn Vms\n\t\tif err := mongodbConn.Run(\"jVMs\", query); err != nil {\n\t\t\tlog.Error(\"allVMs fetching err: %s\", err.Error())\n\t\t}\n\n\t\t\/\/ batch update the states now\n\t\tcurrentStates := make(map[bson.ObjectId]string, 0)\n\n\t\tfor _, vm := range vms {\n\t\t\tstate := vm.GetState()\n\t\t\t\/\/ do not update if it's the same state\n\t\t\tif vm.State == state {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcurrentStates[vm.Id] = state\n\t\t}\n\n\t\tfilter := func(desiredState string) []bson.ObjectId {\n\t\t\tids := make([]bson.ObjectId, 0)\n\n\t\t\tfor id, state := range currentStates {\n\t\t\t\tif state == desiredState {\n\t\t\t\t\tids = append(ids, id)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn ids\n\t\t}\n\n\t\tif err := updateStates(filter(\"RUNNING\"), \"RUNNING\"); err != nil {\n\t\t\tlog.Error(\"Updating RUNNING vms %v\", err)\n\t\t}\n\n\t\tif err := updateStates(filter(\"STOPPED\"), \"STOPPED\"); err != nil {\n\t\t\tlog.Error(\"Updating STOPPED vms %v\", err)\n\t\t}\n\n if err := updateStates(filter(\"UNKNOWN\"), \"UNKNOWN\"); err != nil {\n\t\t\tlog.Error(\"Updating UNKNOWN vms %v\", err)\n\t\t}\n\n\t\tcurrentStates = nil \/\/ garbage collection\n\n\t\t\/\/ re initialize for next iteration\n\t\tvms = make([]virt.VM, 0)\n\t}\n\n}\n\nfunc unprepareInQueue(vm *virt.VM) {\n\tprepareQueue <- &QueueJob{\n\t\tmsg: fmt.Sprintf(\"Vm is stopped, unpreparing it: %s\", vm.HostnameAlias),\n\t\tf: func() error {\n\t\t\tinfo := getInfo(vm)\n\t\t\tinfo.mutex.Lock()\n\t\t\tdefer info.mutex.Unlock()\n\n\t\t\treturn unprepareProgress(nil, vm, false)\n\t\t},\n\t}\n}\n\n\/\/ startAlwaysOn starts a vm if it's alwaysOn and not pinned to the current\n\/\/ hostname\nfunc (o *Oskite) startAlwaysOn(vm virt.VM) {\n\tif !vm.AlwaysOn {\n\t\treturn\n\t}\n\n\t\/\/ means this vm is intended to be start on another kontainer machine\n\tif vm.PinnedToHost != \"\" && vm.PinnedToHost != o.ServiceUniquename {\n\t\treturn\n\t}\n\n\tgo func() {\n\t\tif vm.State != \"RUNNING\" && vm.State != \"UNKNOWN\" {\n\t\t\tlog.Info(\"alwaysOn is starting [%s - %v]\", vm.HostnameAlias, vm.Id)\n\t\t\terr := o.startSingleVM(vm, nil)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"alwaysOn vm %s couldn't be started. err: %v\", vm.HostnameAlias, err)\n\t\t\t} else {\n\t\t\t\tlog.Info(\"alwaysOn vm started successfull [%s - %v]\", vm.HostnameAlias, vm.Id)\n\t\t\t}\n\t\t}\n\t}()\n}\n<|endoftext|>"} {"text":"<commit_before>package fsync\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestSync(t *testing.T) {\n\t\/\/ create test directory and chdir to it\n\tdir, err := ioutil.TempDir(os.TempDir(), \"fsync_test\")\n\tcheck(err)\n\tcheck(os.Chdir(dir))\n\n\t\/\/ create test files and directories\n\tcheck(os.MkdirAll(\"src\/a\", 0755))\n\tcheck(ioutil.WriteFile(\"src\/a\/b\", []byte(\"file b\"), 0644))\n\tcheck(ioutil.WriteFile(\"src\/c\", []byte(\"file c\"), 0644))\n\t\/\/ set times in the past to make sure times are synced, not accidentally\n\t\/\/ the same\n\ttt := time.Now().Add(-1 * time.Hour)\n\tcheck(os.Chtimes(\"src\/a\", tt, tt))\n\tcheck(os.Chtimes(\"src\/a\/b\", tt, tt))\n\tcheck(os.Chtimes(\"src\/c\", tt, tt))\n\n\t\/\/ create Syncer\n\ts := NewSyncer()\n\n\t\/\/ sync\n\tcheck(s.SyncTo(\"dst\", \"src\/a\", \"src\/c\"))\n\n\t\/\/ check results\n\ttestDirContents(\"dst\", 2, t)\n\ttestDirContents(\"dst\/a\", 1, t)\n\ttestFile(\"dst\/a\/b\", []byte(\"file b\"), t)\n\ttestFile(\"dst\/c\", []byte(\"file c\"), t)\n\ttestPerms(\"dst\/a\", getPerms(\"src\/a\"), t)\n\ttestPerms(\"dst\/a\/b\", getPerms(\"src\/a\/b\"), t)\n\ttestPerms(\"dst\/c\", getPerms(\"src\/c\"), t)\n\ttestModTime(\"dst\", getModTime(\"src\"), t)\n\ttestModTime(\"dst\/a\", getModTime(\"src\/a\"), t)\n\ttestModTime(\"dst\/a\/b\", getModTime(\"src\/a\/b\"), t)\n\ttestModTime(\"dst\/c\", getModTime(\"src\/c\"), t)\n\n\t\/\/ modify src\n\tcheck(ioutil.WriteFile(\"src\/a\/b\", []byte(\"file b changed\"), 0644))\n\tcheck(os.Chmod(\"src\/a\", 0775))\n\n\t\/\/ sync\n\tcheck(s.Sync(\"dst\", \"src\"))\n\n\t\/\/ check results\n\ttestFile(\"dst\/a\/b\", []byte(\"file b changed\"), t)\n\ttestPerms(\"dst\/a\", getPerms(\"src\/a\"), t)\n\ttestModTime(\"dst\/a\", getModTime(\"src\/a\"), t)\n\ttestModTime(\"dst\/a\/b\", getModTime(\"src\/a\/b\"), t)\n\ttestModTime(\"dst\/c\", getModTime(\"src\/c\"), t)\n\n\t\/\/ remove c\n\tcheck(os.Remove(\"src\/c\"))\n\n\t\/\/ sync\n\tcheck(s.Sync(\"dst\", \"src\"))\n\n\t\/\/ check results; c should still exist\n\ttestDirContents(\"dst\", 2, t)\n\ttestExistence(\"dst\/c\", true, t)\n\n\t\/\/ sync\n\ts.Delete = true\n\tcheck(s.Sync(\"dst\", \"src\"))\n\n\t\/\/ check results; c should no longer exist\n\ttestDirContents(\"dst\", 1, t)\n\ttestExistence(\"dst\/c\", false, t)\n\n\ts.Delete = false\n\tif err = s.Sync(\"dst\", \"src\/a\/b\"); err == nil {\n\t\tt.Errorf(\"expecting ErrFileOverDir, got nothing.\\n\")\n\t} else if err != nil && err != ErrFileOverDir {\n\t\tpanic(err)\n\t}\n}\n\nfunc testFile(name string, b []byte, t *testing.T) {\n\ttestExistence(name, true, t)\n\tc, err := ioutil.ReadFile(name)\n\tcheck(err)\n\tif !bytes.Equal(b, c) {\n\t\tt.Errorf(\"content of file \\\"%s\\\" is:\\n%s\\nexpected:\\n%s\\n\",\n\t\t\tname, c, b)\n\t}\n}\n\nfunc testExistence(name string, e bool, t *testing.T) {\n\t_, err := os.Stat(name)\n\tif os.IsNotExist(err) {\n\t\tif e {\n\t\t\tt.Errorf(\"file \\\"%s\\\" does not exist.\\n\", name)\n\t\t}\n\t} else if err != nil {\n\t\tpanic(err)\n\t} else {\n\t\tif !e {\n\t\t\tt.Errorf(\"file \\\"%s\\\" exists.\\n\", name)\n\t\t}\n\t}\n}\n\nfunc testDirContents(name string, count int, t *testing.T) {\n\tfiles, err := ioutil.ReadDir(name)\n\tcheck(err)\n\tif len(files) != count {\n\t\tt.Errorf(\"directory \\\"%s\\\" has %d children, shoud have %d.\\n\",\n\t\t\tname, len(files), count)\n\t}\n}\n\nfunc testPerms(name string, p os.FileMode, t *testing.T) {\n\tinfo, err := os.Stat(name)\n\tcheck(err)\n\tif info.Mode().Perm() != p {\n\t\tt.Errorf(\"permissions for \\\"%s\\\" is %v, should be %v.\\n\",\n\t\t\tname, info.Mode().Perm(), p)\n\t}\n}\n\nfunc testModTime(name string, m time.Time, t *testing.T) {\n\tinfo, err := os.Stat(name)\n\tcheck(err)\n\tif !info.ModTime().Equal(m) {\n\t\tt.Errorf(\"modification time for \\\"%s\\\" is %v, should be %v.\\n\",\n\t\t\tname, info.ModTime(), m)\n\t}\n}\n\nfunc getModTime(name string) time.Time {\n\tinfo, err := os.Stat(name)\n\tcheck(err)\n\treturn info.ModTime()\n}\n\nfunc getPerms(name string) os.FileMode {\n\tinfo, err := os.Stat(name)\n\tcheck(err)\n\treturn info.Mode().Perm()\n}\n<commit_msg>Test stats on the \"dst\" after it have been synced<commit_after>package fsync\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestSync(t *testing.T) {\n\t\/\/ create test directory and chdir to it\n\tdir, err := ioutil.TempDir(os.TempDir(), \"fsync_test\")\n\tcheck(err)\n\tcheck(os.Chdir(dir))\n\n\t\/\/ create test files and directories\n\tcheck(os.MkdirAll(\"src\/a\", 0755))\n\tcheck(ioutil.WriteFile(\"src\/a\/b\", []byte(\"file b\"), 0644))\n\tcheck(ioutil.WriteFile(\"src\/c\", []byte(\"file c\"), 0644))\n\t\/\/ set times in the past to make sure times are synced, not accidentally\n\t\/\/ the same\n\ttt := time.Now().Add(-1 * time.Hour)\n\tcheck(os.Chtimes(\"src\/a\/b\", tt, tt))\n\tcheck(os.Chtimes(\"src\/a\", tt, tt))\n\tcheck(os.Chtimes(\"src\/c\", tt, tt))\n\tcheck(os.Chtimes(\"src\", tt, tt))\n\n\t\/\/ create Syncer\n\ts := NewSyncer()\n\n\t\/\/ sync\n\tcheck(s.SyncTo(\"dst\", \"src\/a\", \"src\/c\"))\n\n\t\/\/ check results\n\ttestDirContents(\"dst\", 2, t)\n\ttestDirContents(\"dst\/a\", 1, t)\n\ttestFile(\"dst\/a\/b\", []byte(\"file b\"), t)\n\ttestFile(\"dst\/c\", []byte(\"file c\"), t)\n\ttestPerms(\"dst\/a\", getPerms(\"src\/a\"), t)\n\ttestPerms(\"dst\/a\/b\", getPerms(\"src\/a\/b\"), t)\n\ttestPerms(\"dst\/c\", getPerms(\"src\/c\"), t)\n\ttestModTime(\"dst\/a\", getModTime(\"src\/a\"), t)\n\ttestModTime(\"dst\/a\/b\", getModTime(\"src\/a\/b\"), t)\n\ttestModTime(\"dst\/c\", getModTime(\"src\/c\"), t)\n\n\t\/\/ sync the parent directory too\n\tcheck(s.Sync(\"dst\", \"src\"))\n\n\t\/\/ check the results\n\ttestPerms(\"dst\", getPerms(\"src\"), t)\n\ttestModTime(\"dst\", getModTime(\"src\"), t)\n\n\t\/\/ modify src\n\tcheck(ioutil.WriteFile(\"src\/a\/b\", []byte(\"file b changed\"), 0644))\n\tcheck(os.Chmod(\"src\/a\", 0775))\n\n\t\/\/ sync\n\tcheck(s.Sync(\"dst\", \"src\"))\n\n\t\/\/ check results\n\ttestFile(\"dst\/a\/b\", []byte(\"file b changed\"), t)\n\ttestPerms(\"dst\/a\", getPerms(\"src\/a\"), t)\n\ttestModTime(\"dst\", getModTime(\"src\"), t)\n\ttestModTime(\"dst\/a\", getModTime(\"src\/a\"), t)\n\ttestModTime(\"dst\/a\/b\", getModTime(\"src\/a\/b\"), t)\n\ttestModTime(\"dst\/c\", getModTime(\"src\/c\"), t)\n\n\t\/\/ remove c\n\tcheck(os.Remove(\"src\/c\"))\n\n\t\/\/ sync\n\tcheck(s.Sync(\"dst\", \"src\"))\n\n\t\/\/ check results; c should still exist\n\ttestDirContents(\"dst\", 2, t)\n\ttestExistence(\"dst\/c\", true, t)\n\n\t\/\/ sync\n\ts.Delete = true\n\tcheck(s.Sync(\"dst\", \"src\"))\n\n\t\/\/ check results; c should no longer exist\n\ttestDirContents(\"dst\", 1, t)\n\ttestExistence(\"dst\/c\", false, t)\n\n\ts.Delete = false\n\tif err = s.Sync(\"dst\", \"src\/a\/b\"); err == nil {\n\t\tt.Errorf(\"expecting ErrFileOverDir, got nothing.\\n\")\n\t} else if err != nil && err != ErrFileOverDir {\n\t\tpanic(err)\n\t}\n}\n\nfunc testFile(name string, b []byte, t *testing.T) {\n\ttestExistence(name, true, t)\n\tc, err := ioutil.ReadFile(name)\n\tcheck(err)\n\tif !bytes.Equal(b, c) {\n\t\tt.Errorf(\"content of file \\\"%s\\\" is:\\n%s\\nexpected:\\n%s\\n\",\n\t\t\tname, c, b)\n\t}\n}\n\nfunc testExistence(name string, e bool, t *testing.T) {\n\t_, err := os.Stat(name)\n\tif os.IsNotExist(err) {\n\t\tif e {\n\t\t\tt.Errorf(\"file \\\"%s\\\" does not exist.\\n\", name)\n\t\t}\n\t} else if err != nil {\n\t\tpanic(err)\n\t} else {\n\t\tif !e {\n\t\t\tt.Errorf(\"file \\\"%s\\\" exists.\\n\", name)\n\t\t}\n\t}\n}\n\nfunc testDirContents(name string, count int, t *testing.T) {\n\tfiles, err := ioutil.ReadDir(name)\n\tcheck(err)\n\tif len(files) != count {\n\t\tt.Errorf(\"directory \\\"%s\\\" has %d children, shoud have %d.\\n\",\n\t\t\tname, len(files), count)\n\t}\n}\n\nfunc testPerms(name string, p os.FileMode, t *testing.T) {\n\tinfo, err := os.Stat(name)\n\tcheck(err)\n\tif info.Mode().Perm() != p {\n\t\tt.Errorf(\"permissions for \\\"%s\\\" is %v, should be %v.\\n\",\n\t\t\tname, info.Mode().Perm(), p)\n\t}\n}\n\nfunc testModTime(name string, m time.Time, t *testing.T) {\n\tinfo, err := os.Stat(name)\n\tcheck(err)\n\tif !info.ModTime().Equal(m) {\n\t\tt.Errorf(\"modification time for \\\"%s\\\" is %v, should be %v.\\n\",\n\t\t\tname, info.ModTime(), m)\n\t}\n}\n\nfunc getModTime(name string) time.Time {\n\tinfo, err := os.Stat(name)\n\tcheck(err)\n\treturn info.ModTime()\n}\n\nfunc getPerms(name string) os.FileMode {\n\tinfo, err := os.Stat(name)\n\tcheck(err)\n\treturn info.Mode().Perm()\n}\n<|endoftext|>"} {"text":"<commit_before>package vindexes\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"encoding\/hex\"\n)\n\nfunc init() {\n\tRegister(\"lookup\", NewLookup)\n\tRegister(\"lookup_unique\", NewLookupUnique)\n}\n\n\/* LookupNonUnique defines a vindex that uses a lookup table and create a mapping between id and KeyspaceId.\n * The table is expected to define the id column as unique. It's\n * NonUnique and a Lookup.\n *\/\ntype LookupNonUnique struct {\n\tname string\n\tlkp lookup\n}\n\n\/\/ NewLookup creates a LookupNonUnique vindex.\nfunc NewLookup(name string, m map[string]string) (Vindex, error) {\n\tlookup := &LookupNonUnique{name: name}\n\tlookup.lkp.Init(m)\n\treturn lookup, nil\n}\n\n\/\/ String returns the name of the vindex.\nfunc (vindex *LookupNonUnique) String() string {\n\treturn vindex.name\n}\n\n\/\/ Cost returns the cost of this vindex as 20.\nfunc (vindex *LookupNonUnique) Cost() int {\n\treturn 20\n}\n\n\/\/ Map returns the corresponding KeyspaceId values for the given ids.\nfunc (vindex *LookupNonUnique) Map(vcursor VCursor, ids []interface{}) ([][][]byte, error) {\n\treturn vindex.lkp.MapLookup(vcursor, ids)\n}\n\n\/\/ Verify returns true if id maps to ksid.\nfunc (vindex *LookupNonUnique) Verify(vcursor VCursor, id interface{}, ksid []byte) (bool, error) {\n\treturn vindex.lkp.VerifyLookup(vcursor, id, ksid)\n}\n\n\/\/ Create reserves the id by inserting it into the vindex table.\nfunc (vindex *LookupNonUnique) Create(vcursor VCursor, id interface{}, ksid []byte) error {\n\treturn vindex.lkp.CreateLookup(vcursor, id, ksid)\n}\n\n\/\/ Delete deletes the entry from the vindex table.\nfunc (vindex *LookupNonUnique) Delete(vcursor VCursor, ids []interface{}, ksid []byte) error {\n\treturn vindex.lkp.DeleteLookup(vcursor, ids, ksid)\n}\n\n\/\/ MarshalJSON returns a JSON representation of LookupHash.\nfunc (vindex *LookupNonUnique) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(vindex.lkp)\n}\n\n\/* LookupUnique defines a vindex that uses a lookup table.\n * The table is expected to define the id column as unique. It's\n * Unique and a Lookup.\n *\/\ntype LookupUnique struct {\n\tname string\n\tlkp lookup\n}\n\n\/\/ NewLookupUnique creates a LookupHashUnique vindex.\nfunc NewLookupUnique(name string, m map[string]string) (Vindex, error) {\n\tlu := &LookupUnique{name: name}\n\tlu.lkp.Init(m)\n\treturn lu, nil\n}\n\n\/\/ String returns the name of the vindex.\nfunc (vindex *LookupUnique) String() string {\n\treturn vindex.name\n}\n\n\/\/ Cost returns the cost of this vindex as 10.\nfunc (vindex *LookupUnique) Cost() int {\n\treturn 10\n}\n\n\/\/ Map returns the corresponding KeyspaceId values for the given ids.\nfunc (vindex *LookupUnique) Map(vcursor VCursor, ids []interface{}) ([][]byte, error) {\n\treturn vindex.lkp.MapLookupUnique(vcursor, ids)\n}\n\n\/\/ Verify returns true if id maps to ksid.\nfunc (vindex *LookupUnique) Verify(vcursor VCursor, id interface{}, ksid []byte) (bool, error) {\n\treturn vindex.lkp.VerifyLookup(vcursor, id, ksid)\n}\n\n\/\/ Create reserves the id by inserting it into the vindex table.\nfunc (vindex *LookupUnique) Create(vcursor VCursor, id interface{}, ksid []byte) error {\n\treturn vindex.lkp.CreateLookup(vcursor, id, ksid)\n}\n\n\/\/ Delete deletes the entry from the vindex table.\nfunc (vindex *LookupUnique) Delete(vcursor VCursor, ids []interface{}, ksid []byte) error {\n\treturn vindex.lkp.DeleteLookup(vcursor, ids, ksid)\n}\n\n\/\/ MarshalJSON returns a JSON representation of LookupHashUnique.\nfunc (vindex *LookupUnique) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(vindex.lkp)\n}\n\n\/\/ MapLookupUnique is for a unique vindex.\nfunc (lkp *lookup) MapLookupUnique(vcursor VCursor, ids []interface{}) ([][]byte, error) {\n\tout := make([][]byte, 0, len(ids))\n\tfor _, id := range ids {\n\t\tresult, err := vcursor.Execute(lkp.sel, map[string]interface{}{\n\t\t\tlkp.From: id,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"lookup.Map: %v\", err)\n\t\t}\n\t\tif len(result.Rows) == 0 {\n\t\t\tout = append(out, []byte{})\n\t\t\tcontinue\n\t\t}\n\t\tif len(result.Rows) != 1 {\n\t\t\treturn nil, fmt.Errorf(\"lookup.Map: unexpected multiple results from vindex %s: %v\", lkp.Table, id)\n\t\t}\n\t\tsource, err := getBytes(result.Rows[0][0].ToNative())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdata,err := hex.DecodeString(string(source))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tout = append(out, data)\n\t}\n\treturn out, nil\n}\n\n\/\/ MapLookup is for a non-unique vindex.\nfunc (lkp *lookup) MapLookup(vcursor VCursor, ids []interface{}) ([][][]byte, error) {\n\tout := make([][][]byte, 0, len(ids))\n\tfor _, id := range ids {\n\t\tresult, err := vcursor.Execute(lkp.sel, map[string]interface{}{\n\t\t\tlkp.From: id,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"lookup.Map: %v\", err)\n\t\t}\n\t\tvar ksids [][]byte\n\t\tfor _, row := range result.Rows {\n\t\t\tsource, err := getBytes(row[0].ToNative())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdata, err := hex.DecodeString(string(source))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"lookup.Map: %v\", err)\n\t\t\t}\n\t\t\tksids = append(ksids, data)\n\t\t}\n\t\tout = append(out, ksids)\n\t}\n\treturn out, nil\n}\n\n\/\/ Create creates an association between id and keyspaceid by inserting a row in the vindex table.\nfunc (lkp *lookup) CreateLookup(vcursor VCursor, id interface{}, keyspaceid []byte) error {\n\tkeyspace_id := hex.EncodeToString(keyspaceid)\n\tif _, err := vcursor.Execute(lkp.ins, map[string]interface{}{\n\t\tlkp.From: id,\n\t\tlkp.To: keyspace_id,\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"lookup.Create: %v\", err)\n\t}\n\treturn nil\n}\n\n\/\/ Delete deletes the association between ids and keyspaceid.\nfunc (lkp *lookup) DeleteLookup(vcursor VCursor, ids []interface{}, keyspaceid []byte) error {\n\tkeyspace_id := hex.EncodeToString(keyspaceid)\n\tbindvars := map[string]interface{}{\n\t\tlkp.To: keyspace_id,\n\t}\n\tfor _, id := range ids {\n\t\tbindvars[lkp.From] = id\n\t\tif _, err := vcursor.Execute(lkp.del, bindvars); err != nil {\n\t\t\treturn fmt.Errorf(\"lookup.Delete: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ VerifyLookup returns true if id maps to ksid.\nfunc (lkp *lookup) VerifyLookup(vcursor VCursor, id interface{}, keyspaceid []byte) (bool, error) {\n\tkeyspace_id := hex.EncodeToString(keyspaceid)\n\tresult, err := vcursor.Execute(lkp.ver, map[string]interface{}{\n\t\tlkp.From: id,\n\t\tlkp.To: keyspace_id,\n\t})\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"lookup.Verify: %v\", err)\n\t}\n\tif len(result.Rows) == 0 {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}\n<commit_msg>Changing the Variables<commit_after>package vindexes\n\nimport (\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n)\n\nfunc init() {\n\tRegister(\"lookup\", NewLookup)\n\tRegister(\"lookup_unique\", NewLookupUnique)\n}\n\n\/\/ LookupNonUnique defines a vindex that uses a lookup table and create a mapping between id and KeyspaceId.\n\/\/ The table is expected to define the id column as unique. It's\n\/\/ NonUnique and a Lookup.\ntype LookupNonUnique struct {\n\tname string\n\tlkp lookup\n}\n\n\/\/ NewLookup creates a LookupNonUnique vindex.\nfunc NewLookup(name string, m map[string]string) (Vindex, error) {\n\tlookup := &LookupNonUnique{name: name}\n\tlookup.lkp.Init(m)\n\treturn lookup, nil\n}\n\n\/\/ String returns the name of the vindex.\nfunc (vindex *LookupNonUnique) String() string {\n\treturn vindex.name\n}\n\n\/\/ Cost returns the cost of this vindex as 20.\nfunc (vindex *LookupNonUnique) Cost() int {\n\treturn 20\n}\n\n\/\/ Map returns the corresponding KeyspaceId values for the given ids.\nfunc (vindex *LookupNonUnique) Map(vcursor VCursor, ids []interface{}) ([][][]byte, error) {\n\treturn vindex.lkp.MapLookup(vcursor, ids)\n}\n\n\/\/ Verify returns true if id maps to ksid.\nfunc (vindex *LookupNonUnique) Verify(vcursor VCursor, id interface{}, ksid []byte) (bool, error) {\n\treturn vindex.lkp.VerifyLookup(vcursor, id, ksid)\n}\n\n\/\/ Create reserves the id by inserting it into the vindex table.\nfunc (vindex *LookupNonUnique) Create(vcursor VCursor, id interface{}, ksid []byte) error {\n\treturn vindex.lkp.CreateLookup(vcursor, id, ksid)\n}\n\n\/\/ Delete deletes the entry from the vindex table.\nfunc (vindex *LookupNonUnique) Delete(vcursor VCursor, ids []interface{}, ksid []byte) error {\n\treturn vindex.lkp.DeleteLookup(vcursor, ids, ksid)\n}\n\n\/\/ MarshalJSON returns a JSON representation of LookupHash.\nfunc (vindex *LookupNonUnique) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(vindex.lkp)\n}\n\n\/\/ LookupUnique defines a vindex that uses a lookup table.\n\/\/ The table is expected to define the id column as unique. It's\n\/\/ Unique and a Lookup.\ntype LookupUnique struct {\n\tname string\n\tlkp lookup\n}\n\n\/\/ NewLookupUnique creates a LookupHashUnique vindex.\nfunc NewLookupUnique(name string, m map[string]string) (Vindex, error) {\n\tlu := &LookupUnique{name: name}\n\tlu.lkp.Init(m)\n\treturn lu, nil\n}\n\n\/\/ String returns the name of the vindex.\nfunc (vindex *LookupUnique) String() string {\n\treturn vindex.name\n}\n\n\/\/ Cost returns the cost of this vindex as 10.\nfunc (vindex *LookupUnique) Cost() int {\n\treturn 10\n}\n\n\/\/ Map returns the corresponding KeyspaceId values for the given ids.\nfunc (vindex *LookupUnique) Map(vcursor VCursor, ids []interface{}) ([][]byte, error) {\n\treturn vindex.lkp.MapLookupUnique(vcursor, ids)\n}\n\n\/\/ Verify returns true if id maps to ksid.\nfunc (vindex *LookupUnique) Verify(vcursor VCursor, id interface{}, ksid []byte) (bool, error) {\n\treturn vindex.lkp.VerifyLookup(vcursor, id, ksid)\n}\n\n\/\/ Create reserves the id by inserting it into the vindex table.\nfunc (vindex *LookupUnique) Create(vcursor VCursor, id interface{}, ksid []byte) error {\n\treturn vindex.lkp.CreateLookup(vcursor, id, ksid)\n}\n\n\/\/ Delete deletes the entry from the vindex table.\nfunc (vindex *LookupUnique) Delete(vcursor VCursor, ids []interface{}, ksid []byte) error {\n\treturn vindex.lkp.DeleteLookup(vcursor, ids, ksid)\n}\n\n\/\/ MarshalJSON returns a JSON representation of LookupHashUnique.\nfunc (vindex *LookupUnique) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(vindex.lkp)\n}\n\n\/\/ MapLookupUnique is for a unique vindex.\nfunc (lkp *lookup) MapLookupUnique(vcursor VCursor, ids []interface{}) ([][]byte, error) {\n\tout := make([][]byte, 0, len(ids))\n\tfor _, id := range ids {\n\t\tresult, err := vcursor.Execute(lkp.sel, map[string]interface{}{\n\t\t\tlkp.From: id,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"lookup.Map: %v\", err)\n\t\t}\n\t\tif len(result.Rows) == 0 {\n\t\t\tout = append(out, []byte{})\n\t\t\tcontinue\n\t\t}\n\t\tif len(result.Rows) != 1 {\n\t\t\treturn nil, fmt.Errorf(\"lookup.Map: unexpected multiple results from vindex %s: %v\", lkp.Table, id)\n\t\t}\n\t\tsource, err := getBytes(result.Rows[0][0].ToNative())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdata, err := hex.DecodeString(string(source))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tout = append(out, data)\n\t}\n\treturn out, nil\n}\n\n\/\/ MapLookup is for a non-unique vindex.\nfunc (lkp *lookup) MapLookup(vcursor VCursor, ids []interface{}) ([][][]byte, error) {\n\tout := make([][][]byte, 0, len(ids))\n\tfor _, id := range ids {\n\t\tresult, err := vcursor.Execute(lkp.sel, map[string]interface{}{\n\t\t\tlkp.From: id,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"lookup.Map: %v\", err)\n\t\t}\n\t\tvar ksids [][]byte\n\t\tfor _, row := range result.Rows {\n\t\t\tsource, err := getBytes(row[0].ToNative())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdata, err := hex.DecodeString(string(source))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"lookup.Map: %v\", err)\n\t\t\t}\n\t\t\tksids = append(ksids, data)\n\t\t}\n\t\tout = append(out, ksids)\n\t}\n\treturn out, nil\n}\n\n\/\/ Create creates an association between id and keyspaceid by inserting a row in the vindex table.\nfunc (lkp *lookup) CreateLookup(vcursor VCursor, id interface{}, ksid []byte) error {\n\tkeyspaceID := hex.EncodeToString(ksid)\n\tif _, err := vcursor.Execute(lkp.ins, map[string]interface{}{\n\t\tlkp.From: id,\n\t\tlkp.To: keyspaceID,\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"lookup.Create: %v\", err)\n\t}\n\treturn nil\n}\n\n\/\/ Delete deletes the association between ids and keyspaceid.\nfunc (lkp *lookup) DeleteLookup(vcursor VCursor, ids []interface{}, ksid []byte) error {\n\tkeyspaceID := hex.EncodeToString(ksid)\n\tbindvars := map[string]interface{}{\n\t\tlkp.To: keyspaceID,\n\t}\n\tfor _, id := range ids {\n\t\tbindvars[lkp.From] = id\n\t\tif _, err := vcursor.Execute(lkp.del, bindvars); err != nil {\n\t\t\treturn fmt.Errorf(\"lookup.Delete: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ VerifyLookup returns true if id maps to ksid.\nfunc (lkp *lookup) VerifyLookup(vcursor VCursor, id interface{}, ksid []byte) (bool, error) {\n\tkeyspaceID := hex.EncodeToString(ksid)\n\tresult, err := vcursor.Execute(lkp.ver, map[string]interface{}{\n\t\tlkp.From: id,\n\t\tlkp.To: keyspaceID,\n\t})\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"lookup.Verify: %v\", err)\n\t}\n\tif len(result.Rows) == 0 {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package dry\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"strings\"\n\t\"testing\"\n)\n\ntype MyString struct {\n\tstr string\n}\n\nfunc (t MyString) String() string {\n\treturn t.str\n}\n\ntype MyError struct {\n\tstr string\n}\n\nfunc (t MyError) Error() string {\n\treturn t.str\n}\n\nfunc Test_BytesReader(t *testing.T) {\n\texpected := []byte(\"hello\")\n\ttestBytesReaderFn := func(input interface{}) {\n\t\tresult := make([]byte, 5)\n\t\treturnedIoReader := BytesReader(input)\n\t\tn, _ := returnedIoReader.Read(result)\n\t\tif n != 5 {\n\t\t\tt.FailNow()\n\t\t}\n\t\tfor i, _ := range result {\n\t\t\tif result[i] != expected[i] {\n\t\t\t\tt.FailNow()\n\t\t\t}\n\t\t}\n\t\tn, err := returnedIoReader.Read(result)\n\t\tif n != 0 || err != io.EOF {\n\t\t\tt.FailNow()\n\t\t}\n\t}\n\n\ttestBytesReaderFn(strings.NewReader(\"hello\"))\n\n\tbytesInput := []byte(\"hello\")\n\ttestBytesReaderFn(bytesInput)\n\n\ttestBytesReaderFn(\"hello\")\n\n\tmyStr := MyString{\"hello\"}\n\ttestBytesReaderFn(myStr)\n\n\tmyErr := MyError{\"hello\"}\n\ttestBytesReaderFn(myErr)\n}\n\nfunc testCompressDecompress(t *testing.T,\n\tcompressFunc func([]byte) []byte,\n\tdecompressFunc func([]byte) []byte) {\n\ttestFn := func(testData []byte) {\n\t\tcompressedData := compressFunc(testData)\n\t\tuncompressedData := decompressFunc(compressedData)\n\t\tif !bytes.Equal(testData, uncompressedData) {\n\t\t\tt.FailNow()\n\t\t}\n\t}\n\n\tgo testFn([]byte(\"hello123\"))\n\tgo testFn([]byte(\"gopher456\"))\n\tgo testFn([]byte(\"dry789\"))\n}\n\nfunc Test_BytesDeflateInflate(t *testing.T) {\n\ttestCompressDecompress(t, BytesDeflate, BytesInflate)\n}\n\nfunc Test_BytesGzipUnGzip(t *testing.T) {\n\ttestCompressDecompress(t, BytesGzip, BytesUnGzip)\n}\n\nfunc Test_BytesMap(t *testing.T) {\n\tupper := func(b byte) byte {\n\t\treturn b - ('a' - 'A')\n\t}\n\tresult := BytesMap(upper, []byte(\"hello\"))\n\tcorrect := []byte(\"HELLO\")\n\tif len(result) != len(correct) {\n\t\tt.Fail()\n\t}\n\tfor i, _ := range result {\n\t\tif result[i] != correct[i] {\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n\nfunc Test_BytesFilter(t *testing.T) {\n\tazFunc := func(b byte) bool {\n\t\treturn b >= 'A' && b <= 'Z'\n\t}\n\tresult := BytesFilter(azFunc, []byte{1, 2, 3, 'A', 'f', 'R', 123})\n\tcorrect := []byte{'A', 'R'}\n\tif len(result) != len(correct) {\n\t\tt.Fail()\n\t}\n\tfor i, _ := range result {\n\t\tif result[i] != correct[i] {\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n<commit_msg>Add test cases for some methods in bytes<commit_after>package dry\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"strings\"\n\t\"testing\"\n)\n\ntype MyString struct {\n\tstr string\n}\n\nfunc (t MyString) String() string {\n\treturn t.str\n}\n\ntype MyError struct {\n\tstr string\n}\n\nfunc (t MyError) Error() string {\n\treturn t.str\n}\n\nfunc Test_BytesReader(t *testing.T) {\n\texpected := []byte(\"hello\")\n\ttestBytesReaderFn := func(input interface{}) {\n\t\tresult := make([]byte, 5)\n\t\treturnedIoReader := BytesReader(input)\n\t\tn, _ := returnedIoReader.Read(result)\n\t\tif n != 5 {\n\t\t\tt.FailNow()\n\t\t}\n\t\tfor i, _ := range result {\n\t\t\tif result[i] != expected[i] {\n\t\t\t\tt.FailNow()\n\t\t\t}\n\t\t}\n\t\tn, err := returnedIoReader.Read(result)\n\t\tif n != 0 || err != io.EOF {\n\t\t\tt.FailNow()\n\t\t}\n\t}\n\n\ttestBytesReaderFn(strings.NewReader(\"hello\"))\n\n\tbytesInput := []byte(\"hello\")\n\ttestBytesReaderFn(bytesInput)\n\n\ttestBytesReaderFn(\"hello\")\n\n\tmyStr := MyString{\"hello\"}\n\ttestBytesReaderFn(myStr)\n\n\tmyErr := MyError{\"hello\"}\n\ttestBytesReaderFn(myErr)\n}\n\nfunc assertStringsEqual(t *testing.T, str1, str2 string) {\n\tif str1 != str2 {\n\t\tt.FailNow()\n\t}\n}\n\nfunc Test_BytesMD5(t *testing.T) {\n\tassertStringsEqual(\n\t\tt, \"5d41402abc4b2a76b9719d911017c592\",\n\t\tBytesMD5(\"hello\"))\n}\n\nfunc Test_BytesEncodeBase64(t *testing.T) {\n\tassertStringsEqual(\n\t\tt, \"aGVsbG8=\",\n\t\tBytesEncodeBase64(\"hello\"))\n}\n\nfunc Test_BytesDecodeBase64(t *testing.T) {\n\tassertStringsEqual(\n\t\tt, \"hello\",\n\t\tBytesDecodeBase64(\"aGVsbG8=\"))\n}\n\nfunc Test_BytesEncodeHex(t *testing.T) {\n\tassertStringsEqual(\n\t\tt, \"68656c6c6f\",\n\t\tBytesEncodeHex(\"hello\"))\n}\n\nfunc Test_BytesDecodeHex(t *testing.T) {\n\tassertStringsEqual(\n\t\tt, \"hello\",\n\t\tBytesDecodeHex(\"68656C6C6F\"))\n}\n\nfunc testCompressDecompress(t *testing.T,\n\tcompressFunc func([]byte) []byte,\n\tdecompressFunc func([]byte) []byte) {\n\ttestFn := func(testData []byte) {\n\t\tcompressedData := compressFunc(testData)\n\t\tuncompressedData := decompressFunc(compressedData)\n\t\tif !bytes.Equal(testData, uncompressedData) {\n\t\t\tt.FailNow()\n\t\t}\n\t}\n\n\tgo testFn([]byte(\"hello123\"))\n\tgo testFn([]byte(\"gopher456\"))\n\tgo testFn([]byte(\"dry789\"))\n}\n\nfunc Test_BytesDeflateInflate(t *testing.T) {\n\ttestCompressDecompress(t, BytesDeflate, BytesInflate)\n}\n\nfunc Test_BytesGzipUnGzip(t *testing.T) {\n\ttestCompressDecompress(t, BytesGzip, BytesUnGzip)\n}\n\nfunc Test_BytesMap(t *testing.T) {\n\tupper := func(b byte) byte {\n\t\treturn b - ('a' - 'A')\n\t}\n\tresult := BytesMap(upper, []byte(\"hello\"))\n\tcorrect := []byte(\"HELLO\")\n\tif len(result) != len(correct) {\n\t\tt.Fail()\n\t}\n\tfor i, _ := range result {\n\t\tif result[i] != correct[i] {\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n\nfunc Test_BytesFilter(t *testing.T) {\n\tazFunc := func(b byte) bool {\n\t\treturn b >= 'A' && b <= 'Z'\n\t}\n\tresult := BytesFilter(azFunc, []byte{1, 2, 3, 'A', 'f', 'R', 123})\n\tcorrect := []byte{'A', 'R'}\n\tif len(result) != len(correct) {\n\t\tt.Fail()\n\t}\n\tfor i, _ := range result {\n\t\tif result[i] != correct[i] {\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Netstack Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package buffer provides the implementation of a buffer view.\npackage buffer\n\n\/\/ View is a slice of a buffer, with convenience methods.\ntype View []byte\n\n\/\/ NewView allocates a new buffer and returns an initialized view that covers\n\/\/ the whole buffer.\nfunc NewView(size int) View {\n\treturn make(View, size)\n}\n\n\/\/ TrimFront removes the first \"count\" bytes from the visible section of the\n\/\/ buffer.\nfunc (v *View) TrimFront(count int) {\n\t*v = (*v)[count:]\n}\n\n\/\/ CapLength irreversibly reduces the length of the visible section of the\n\/\/ buffer to the value specified.\nfunc (v *View) CapLength(length int) {\n\t\/\/ We also set the slice cap because if we don't, one would be able to\n\t\/\/ expand the view back to include the region just excluded. We want to\n\t\/\/ prevent that to avoid potential data leak if we have uninitialized\n\t\/\/ data in excluded region.\n\t*v = (*v)[:length:length]\n}\n\n\/\/ ToVectorisedView transforms a View in a VectorisedView from an\n\/\/ already-allocated slice of View.\nfunc (v *View) ToVectorisedView(views [1]View) VectorisedView {\n\tviews[0] = *v\n\treturn NewVectorisedView(len(*v), views[:])\n}\n\n\/\/ VectorisedView is a vectorised version of View using non contigous memory.\n\/\/ It supports all the convenience methods supported by View.\ntype VectorisedView struct {\n\tviews []View\n\tsize int\n}\n\n\/\/ NewVectorisedView creates a new vectorised view from an already-allocated slice\n\/\/ of View and sets its size.\nfunc NewVectorisedView(size int, views []View) VectorisedView {\n\treturn VectorisedView{views: views, size: size}\n}\n\n\/\/ TrimFront removes the first \"count\" bytes of the vectorised view.\nfunc (vv *VectorisedView) TrimFront(count int) {\n\tfor count > 0 && len(vv.views) > 0 {\n\t\tif count < len(vv.views[0]) {\n\t\t\tvv.size -= count\n\t\t\tvv.views[0].TrimFront(count)\n\t\t\treturn\n\t\t}\n\t\tcount -= len(vv.views[0])\n\t\tvv.RemoveFirst()\n\t}\n}\n\n\/\/ CapLength irreversibly reduces the length of the vectorised view.\nfunc (vv *VectorisedView) CapLength(length int) {\n\tif length < 0 {\n\t\tlength = 0\n\t}\n\tif vv.size < length {\n\t\treturn\n\t}\n\tvv.size = length\n\tfor i := range vv.views {\n\t\tv := &vv.views[i]\n\t\tif len(*v) >= length {\n\t\t\tif length == 0 {\n\t\t\t\tvv.views = vv.views[:i]\n\t\t\t} else {\n\t\t\t\tv.CapLength(length)\n\t\t\t\tvv.views = vv.views[:i+1]\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tlength -= len(*v)\n\t}\n}\n\n\/\/ Clone returns a clone of this VectorisedView.\n\/\/ If the buffer argument is large enough to contain all the Views of this VectorisedView,\n\/\/ the method will avoid allocations and use the buffer to store the Views of the clone.\nfunc (vv *VectorisedView) Clone(buffer []View) VectorisedView {\n\tvar views []View\n\tif len(buffer) >= len(vv.views) {\n\t\tviews = buffer[:len(vv.views)]\n\t} else {\n\t\tviews = make([]View, len(vv.views))\n\t}\n\tfor i, v := range vv.views {\n\t\tviews[i] = v\n\t}\n\treturn VectorisedView{views: views, size: vv.size}\n}\n\n\/\/ First returns the first view of the vectorised view.\n\/\/ It panics if the vectorised view is empty.\nfunc (vv *VectorisedView) First() View {\n\tif len(vv.views) == 0 {\n\t\tpanic(\"vview is empty\")\n\t}\n\treturn vv.views[0]\n}\n\n\/\/ RemoveFirst removes the first view of the vectorised view.\nfunc (vv *VectorisedView) RemoveFirst() {\n\tif len(vv.views) == 0 {\n\t\treturn\n\t}\n\tvv.size -= len(vv.views[0])\n\tvv.views = vv.views[1:]\n}\n\n\/\/ SetSize unsafely sets the size of the VectorisedView.\nfunc (vv *VectorisedView) SetSize(size int) {\n\tvv.size = size\n}\n\n\/\/ SetViews unsafely sets the views of the VectorisedView.\nfunc (vv *VectorisedView) SetViews(views []View) {\n\tvv.views = views\n}\n\n\/\/ Size returns the size in bytes of the entire content stored in the vectorised view.\nfunc (vv *VectorisedView) Size() int {\n\treturn vv.size\n}\n\n\/\/ ToView returns the a single view containing the content of the vectorised view.\nfunc (vv *VectorisedView) ToView() View {\n\tv := make([]byte, vv.size)\n\tu := v\n\tfor i := range vv.views {\n\t\tn := copy(u, vv.views[i])\n\t\tu = u[n:]\n\t}\n\treturn v\n}\n\n\/\/ Views returns the slice containing the all views.\nfunc (vv *VectorisedView) Views() []View {\n\treturn vv.views\n}\n\n\/\/ copy returns a deep-copy of the vectorised view.\n\/\/ It is an expensive method that should be used only in tests.\nfunc (vv *VectorisedView) copy() *VectorisedView {\n\tuu := &VectorisedView{\n\t\tviews: make([]View, len(vv.views)),\n\t\tsize: vv.size,\n\t}\n\tfor i, v := range vv.views {\n\t\tuu.views[i] = make(View, len(v))\n\t\tcopy(uu.views[i], v)\n\t}\n\treturn uu\n}\n<commit_msg>buffer: add ByteSlice method<commit_after>\/\/ Copyright 2016 The Netstack Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package buffer provides the implementation of a buffer view.\npackage buffer\n\n\/\/ View is a slice of a buffer, with convenience methods.\ntype View []byte\n\n\/\/ NewView allocates a new buffer and returns an initialized view that covers\n\/\/ the whole buffer.\nfunc NewView(size int) View {\n\treturn make(View, size)\n}\n\n\/\/ TrimFront removes the first \"count\" bytes from the visible section of the\n\/\/ buffer.\nfunc (v *View) TrimFront(count int) {\n\t*v = (*v)[count:]\n}\n\n\/\/ CapLength irreversibly reduces the length of the visible section of the\n\/\/ buffer to the value specified.\nfunc (v *View) CapLength(length int) {\n\t\/\/ We also set the slice cap because if we don't, one would be able to\n\t\/\/ expand the view back to include the region just excluded. We want to\n\t\/\/ prevent that to avoid potential data leak if we have uninitialized\n\t\/\/ data in excluded region.\n\t*v = (*v)[:length:length]\n}\n\n\/\/ ToVectorisedView transforms a View in a VectorisedView from an\n\/\/ already-allocated slice of View.\nfunc (v *View) ToVectorisedView(views [1]View) VectorisedView {\n\tviews[0] = *v\n\treturn NewVectorisedView(len(*v), views[:])\n}\n\n\/\/ VectorisedView is a vectorised version of View using non contigous memory.\n\/\/ It supports all the convenience methods supported by View.\ntype VectorisedView struct {\n\tviews []View\n\tsize int\n}\n\n\/\/ NewVectorisedView creates a new vectorised view from an already-allocated slice\n\/\/ of View and sets its size.\nfunc NewVectorisedView(size int, views []View) VectorisedView {\n\treturn VectorisedView{views: views, size: size}\n}\n\n\/\/ TrimFront removes the first \"count\" bytes of the vectorised view.\nfunc (vv *VectorisedView) TrimFront(count int) {\n\tfor count > 0 && len(vv.views) > 0 {\n\t\tif count < len(vv.views[0]) {\n\t\t\tvv.size -= count\n\t\t\tvv.views[0].TrimFront(count)\n\t\t\treturn\n\t\t}\n\t\tcount -= len(vv.views[0])\n\t\tvv.RemoveFirst()\n\t}\n}\n\n\/\/ CapLength irreversibly reduces the length of the vectorised view.\nfunc (vv *VectorisedView) CapLength(length int) {\n\tif length < 0 {\n\t\tlength = 0\n\t}\n\tif vv.size < length {\n\t\treturn\n\t}\n\tvv.size = length\n\tfor i := range vv.views {\n\t\tv := &vv.views[i]\n\t\tif len(*v) >= length {\n\t\t\tif length == 0 {\n\t\t\t\tvv.views = vv.views[:i]\n\t\t\t} else {\n\t\t\t\tv.CapLength(length)\n\t\t\t\tvv.views = vv.views[:i+1]\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tlength -= len(*v)\n\t}\n}\n\n\/\/ Clone returns a clone of this VectorisedView.\n\/\/ If the buffer argument is large enough to contain all the Views of this VectorisedView,\n\/\/ the method will avoid allocations and use the buffer to store the Views of the clone.\nfunc (vv *VectorisedView) Clone(buffer []View) VectorisedView {\n\tvar views []View\n\tif len(buffer) >= len(vv.views) {\n\t\tviews = buffer[:len(vv.views)]\n\t} else {\n\t\tviews = make([]View, len(vv.views))\n\t}\n\tfor i, v := range vv.views {\n\t\tviews[i] = v\n\t}\n\treturn VectorisedView{views: views, size: vv.size}\n}\n\n\/\/ First returns the first view of the vectorised view.\n\/\/ It panics if the vectorised view is empty.\nfunc (vv *VectorisedView) First() View {\n\tif len(vv.views) == 0 {\n\t\tpanic(\"vview is empty\")\n\t}\n\treturn vv.views[0]\n}\n\n\/\/ RemoveFirst removes the first view of the vectorised view.\nfunc (vv *VectorisedView) RemoveFirst() {\n\tif len(vv.views) == 0 {\n\t\treturn\n\t}\n\tvv.size -= len(vv.views[0])\n\tvv.views = vv.views[1:]\n}\n\n\/\/ SetSize unsafely sets the size of the VectorisedView.\nfunc (vv *VectorisedView) SetSize(size int) {\n\tvv.size = size\n}\n\n\/\/ SetViews unsafely sets the views of the VectorisedView.\nfunc (vv *VectorisedView) SetViews(views []View) {\n\tvv.views = views\n}\n\n\/\/ Size returns the size in bytes of the entire content stored in the vectorised view.\nfunc (vv *VectorisedView) Size() int {\n\treturn vv.size\n}\n\n\/\/ ToView returns the a single view containing the content of the vectorised view.\nfunc (vv *VectorisedView) ToView() View {\n\tv := make([]byte, vv.size)\n\tu := v\n\tfor i := range vv.views {\n\t\tn := copy(u, vv.views[i])\n\t\tu = u[n:]\n\t}\n\treturn v\n}\n\n\/\/ Views returns the slice containing the all views.\nfunc (vv *VectorisedView) Views() []View {\n\treturn vv.views\n}\n\n\/\/ ByteSlice returns a slice containing the all views as a []byte.\nfunc (vv *VectorisedView) ByteSlice() [][]byte {\n\ts := make([][]byte, len(vv.views))\n\tfor i := range vv.views {\n\t\ts[i] = []byte(vv.views[i])\n\t}\n\treturn s\n}\n\n\/\/ copy returns a deep-copy of the vectorised view.\n\/\/ It is an expensive method that should be used only in tests.\nfunc (vv *VectorisedView) copy() *VectorisedView {\n\tuu := &VectorisedView{\n\t\tviews: make([]View, len(vv.views)),\n\t\tsize: vv.size,\n\t}\n\tfor i, v := range vv.views {\n\t\tuu.views[i] = make(View, len(v))\n\t\tcopy(uu.views[i], v)\n\t}\n\treturn uu\n}\n<|endoftext|>"} {"text":"<commit_before>package cache2go\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\ntype myStruct struct {\n\tXEntry\n\tdata string\n}\n\nfunc TestCache(t *testing.T) {\n\ta := &myStruct{data: \"mama are mere\"}\n\ta.XCache(\"mama\", 1*time.Second, a, nil)\n\tb, err := GetXCached(\"mama\")\n\tif err != nil || b == nil || b != a {\n\t\tt.Error(\"Error retrieving data from cache\", err)\n\t}\n}\n\nfunc TestCacheExpire(t *testing.T) {\n\ta := &myStruct{data: \"mama are mere\"}\n\ta.XCache(\"mama\", 1*time.Second, a, nil)\n\tb, err := GetXCached(\"mama\")\n\tif err != nil || b == nil || b.(*myStruct).data != \"mama are mere\" {\n\t\tt.Error(\"Error retrieving data from cache\", err)\n\t}\n\ttime.Sleep(1001 * time.Millisecond)\n\tb, err = GetXCached(\"mama\")\n\tif err == nil || b != nil {\n\t\tt.Error(\"Error expiring data\")\n\t}\n}\n\nfunc TestCacheKeepAlive(t *testing.T) {\n\ta := &myStruct{data: \"mama are mere\"}\n\ta.XCache(\"mama\", 1*time.Second, a, nil)\n\ta = &myStruct{data: \"mama are mere2\"}\n\ta.XCache(\"mama2\", 2*time.Second, a, nil)\n\tb, err := GetXCached(\"mama\")\n\tif err != nil || b == nil || b.(*myStruct).data != \"mama are mere\" {\n\t\tt.Error(\"Error retrieving data from cache\", err)\n\t}\n\ttime.Sleep(500 * time.Millisecond)\n\tb.KeepAlive()\n\ttime.Sleep(1001 * time.Millisecond)\n\tb, err = GetXCached(\"mama\")\n\tif err == nil || b != nil {\n\t\tt.Error(\"Error expiring data\")\n\t}\n\tb, err = GetXCached(\"mama2\")\n\tif err != nil || b == nil || b.(*myStruct).data != \"mama are mere2\" {\n\t\tt.Error(\"Error retrieving data from cache\", err)\n\t}\n\ttime.Sleep(2001 * time.Millisecond)\n\tb, err = GetXCached(\"mama2\")\n\tif err == nil || b != nil {\n\t\tt.Error(\"Error expiring data\")\n\t}\n}\n\nfunc TestFlush(t *testing.T) {\n\ta := &myStruct{data: \"mama are mere\"}\n\ta.XCache(\"mama\", 10*time.Second, a, nil)\n\tXFlush()\n\tb, err := GetXCached(\"mama\")\n\tif err == nil || b != nil {\n\t\tt.Error(\"Error expiring data\")\n\t}\n}\n\nfunc TestFlushNoTimout(t *testing.T) {\n\ta := &myStruct{data: \"mama are mere\"}\n\ta.XCache(\"mama\", 10*time.Second, a, nil)\n\tXFlush()\n\tb, err := GetXCached(\"mama\")\n\tif err == nil || b != nil {\n\t\tt.Error(\"Error expiring data\")\n\t}\n}\n<commit_msg>* Be a bit more lax with timer accuracy testing.<commit_after>package cache2go\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\ntype myStruct struct {\n\tXEntry\n\tdata string\n}\n\nfunc TestCache(t *testing.T) {\n\ta := &myStruct{data: \"mama are mere\"}\n\ta.XCache(\"mama\", 1*time.Second, a, nil)\n\tb, err := GetXCached(\"mama\")\n\tif err != nil || b == nil || b != a {\n\t\tt.Error(\"Error retrieving data from cache\", err)\n\t}\n}\n\nfunc TestCacheExpire(t *testing.T) {\n\ta := &myStruct{data: \"mama are mere\"}\n\ta.XCache(\"mama\", 1*time.Second, a, nil)\n\tb, err := GetXCached(\"mama\")\n\tif err != nil || b == nil || b.(*myStruct).data != \"mama are mere\" {\n\t\tt.Error(\"Error retrieving data from cache\", err)\n\t}\n\ttime.Sleep(1001 * time.Millisecond)\n\tb, err = GetXCached(\"mama\")\n\tif err == nil || b != nil {\n\t\tt.Error(\"Error expiring data\")\n\t}\n}\n\nfunc TestCacheKeepAlive(t *testing.T) {\n\ta := &myStruct{data: \"mama are mere\"}\n\ta.XCache(\"mama\", 1*time.Second, a, nil)\n\ta = &myStruct{data: \"mama are mere2\"}\n\ta.XCache(\"mama2\", 2*time.Second, a, nil)\n\tb, err := GetXCached(\"mama\")\n\tif err != nil || b == nil || b.(*myStruct).data != \"mama are mere\" {\n\t\tt.Error(\"Error retrieving data from cache\", err)\n\t}\n\ttime.Sleep(500 * time.Millisecond)\n\tb.KeepAlive()\n\ttime.Sleep(1001 * time.Millisecond)\n\tb, err = GetXCached(\"mama\")\n\tif err == nil || b != nil {\n\t\tt.Error(\"Error expiring data\")\n\t}\n\tb, err = GetXCached(\"mama2\")\n\tif err != nil || b == nil || b.(*myStruct).data != \"mama are mere2\" {\n\t\tt.Error(\"Error retrieving data from cache\", err)\n\t}\n\ttime.Sleep(2100 * time.Millisecond)\n\tb, err = GetXCached(\"mama2\")\n\tif err == nil || b != nil {\n\t\tt.Error(\"Error expiring data\")\n\t}\n}\n\nfunc TestFlush(t *testing.T) {\n\ta := &myStruct{data: \"mama are mere\"}\n\ta.XCache(\"mama\", 10*time.Second, a, nil)\n\tXFlush()\n\tb, err := GetXCached(\"mama\")\n\tif err == nil || b != nil {\n\t\tt.Error(\"Error expiring data\")\n\t}\n}\n\nfunc TestFlushNoTimout(t *testing.T) {\n\ta := &myStruct{data: \"mama are mere\"}\n\ta.XCache(\"mama\", 10*time.Second, a, nil)\n\tXFlush()\n\tb, err := GetXCached(\"mama\")\n\tif err == nil || b != nil {\n\t\tt.Error(\"Error expiring data\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package template\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/HeavyHorst\/memkv\"\n\t\"github.com\/HeavyHorst\/remco\/backends\"\n\t\"github.com\/HeavyHorst\/remco\/template\/fileutil\"\n\t\"github.com\/cloudflare\/cfssl\/log\"\n\t\"github.com\/flosch\/pongo2\"\n\tflag \"github.com\/spf13\/pflag\"\n)\n\ntype StoreConfig struct {\n\tbackends.StoreClient\n\tName string\n\tOnetime bool\n\tWatch bool\n\tPrefix string\n\tInterval int\n\tKeys []string\n\tstore *memkv.Store\n}\n\n\/\/ Resource is the representation of a parsed template resource.\ntype Resource struct {\n\tstoreClients []StoreConfig\n\tfuncMap map[string]interface{}\n\tstore memkv.Store\n\tsources []*SrcDst\n}\n\nvar ErrEmptySrc = errors.New(\"empty src template\")\n\n\/\/ NewResource creates a Resource.\nfunc NewResource(storeClients []StoreConfig, sources []*SrcDst) (*Resource, error) {\n\tif len(storeClients) == 0 {\n\t\treturn nil, errors.New(\"A valid StoreClient is required.\")\n\t}\n\n\tfor _, v := range sources {\n\t\tif v.Src == \"\" {\n\t\t\treturn nil, ErrEmptySrc\n\t\t}\n\t}\n\n\ttr := &Resource{\n\t\tstoreClients: storeClients,\n\t\tstore: memkv.New(),\n\t\tfuncMap: newFuncMap(),\n\t\tsources: sources,\n\t}\n\n\t\/\/ initialize the inidividual backend memkv Stores\n\tfor i := range tr.storeClients {\n\t\tstore := memkv.New()\n\t\ttr.storeClients[i].store = &store\n\t}\n\n\taddFuncs(tr.funcMap, tr.store.FuncMap)\n\n\treturn tr, nil\n}\n\n\/\/ NewResourceFromFlags creates a Resource from the provided commandline flags.\nfunc NewResourceFromFlags(s backends.Store, flags *flag.FlagSet, watch bool) (*Resource, error) {\n\tsrc, _ := flags.GetString(\"src\")\n\tdst, _ := flags.GetString(\"dst\")\n\tkeys, _ := flags.GetStringSlice(\"keys\")\n\tfileMode, _ := flags.GetString(\"fileMode\")\n\tprefix, _ := flags.GetString(\"prefix\")\n\treloadCmd, _ := flags.GetString(\"reload_cmd\")\n\tcheckCmd, _ := flags.GetString(\"check_cmd\")\n\tonetime, _ := flags.GetBool(\"onetime\")\n\tinterval, _ := flags.GetInt(\"interval\")\n\tUID := os.Geteuid()\n\tGID := os.Getegid()\n\n\tsd := &SrcDst{\n\t\tSrc: src,\n\t\tDst: dst,\n\t\tMode: fileMode,\n\t\tReloadCmd: reloadCmd,\n\t\tCheckCmd: checkCmd,\n\t\tGID: GID,\n\t\tUID: UID,\n\t}\n\n\tb := StoreConfig{\n\t\tStoreClient: s.Client,\n\t\tName: s.Name,\n\t\tOnetime: onetime,\n\t\tPrefix: prefix,\n\t\tWatch: watch,\n\t\tInterval: interval,\n\t\tKeys: keys,\n\t}\n\n\treturn NewResource([]StoreConfig{b}, []*SrcDst{sd})\n}\n\n\/\/ setVars sets the Vars for template resource.\nfunc (t *Resource) setVars(storeClient StoreConfig) error {\n\tvar err error\n\tlog.Debug(\"Retrieving keys from store: \" + storeClient.Name)\n\tlog.Debug(\"Key prefix set to \" + storeClient.Prefix)\n\n\tresult, err := storeClient.GetValues(appendPrefix(storeClient.Prefix, storeClient.Keys))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstoreClient.store.Purge()\n\n\tfor k, res := range result {\n\t\tstoreClient.store.Set(path.Join(\"\/\", strings.TrimPrefix(k, storeClient.Prefix)), res)\n\t}\n\n\t\/\/merge all stores\n\tt.store.Purge()\n\tfor _, v := range t.storeClients {\n\t\tfor _, kv := range v.store.GetAllKVs() {\n\t\t\tt.store.Set(kv.Key, kv.Value)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ createStageFile stages the src configuration file by processing the src\n\/\/ template and setting the desired owner, group, and mode. It also sets the\n\/\/ StageFile for the template resource.\n\/\/ It returns an error if any.\nfunc (t *Resource) createStageFileAndSync() error {\n\tfor _, s := range t.sources {\n\t\tlog.Debug(\"Using source template \" + s.Src)\n\n\t\tif !fileutil.IsFileExist(s.Src) {\n\t\t\treturn errors.New(\"Missing template: \" + s.Src)\n\t\t}\n\n\t\tlog.Debug(\"Compiling source template(pongo2) \" + s.Src)\n\t\ttmpl, err := pongo2.FromFile(s.Src)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to process template %s, %s\", s.Src, err)\n\t\t}\n\n\t\t\/\/ create TempFile in Dest directory to avoid cross-filesystem issues\n\t\ttemp, err := ioutil.TempFile(filepath.Dir(s.Dst), \".\"+filepath.Base(s.Dst))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = tmpl.ExecuteWriter(t.funcMap, temp); err != nil {\n\t\t\ttemp.Close()\n\t\t\tos.Remove(temp.Name())\n\t\t\treturn err\n\t\t}\n\n\t\tdefer temp.Close()\n\n\t\t\/\/ Set the owner, group, and mode on the stage file now to make it easier to\n\t\t\/\/ compare against the destination configuration file later.\n\t\tos.Chmod(temp.Name(), s.fileMode)\n\t\tos.Chown(temp.Name(), s.UID, s.GID)\n\t\ts.stageFile = temp\n\n\t\tif err = t.sync(s); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ sync compares the staged and dest config files and attempts to sync them\n\/\/ if they differ. sync will run a config check command if set before\n\/\/ overwriting the target config file. Finally, sync will run a reload command\n\/\/ if set to have the application or service pick up the changes.\n\/\/ It returns an error if any.\nfunc (t *Resource) sync(s *SrcDst) error {\n\tstaged := s.stageFile.Name()\n\tdefer os.Remove(staged)\n\n\tlog.Debug(\"Comparing candidate config to \" + s.Dst)\n\tok, err := fileutil.SameFile(staged, s.Dst)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t}\n\n\tif !ok {\n\t\tlog.Info(\"Target config \" + s.Dst + \" out of sync\")\n\t\tif s.CheckCmd != \"\" {\n\t\t\tif err := s.check(staged); err != nil {\n\t\t\t\treturn errors.New(\"Config check failed: \" + err.Error())\n\t\t\t}\n\t\t}\n\t\tlog.Debug(\"Overwriting target config \" + s.Dst)\n\t\terr := os.Rename(staged, s.Dst)\n\t\tif err != nil {\n\t\t\tif strings.Contains(err.Error(), \"device or resource busy\") {\n\t\t\t\tlog.Debug(\"Rename failed - target is likely a mount. Trying to write instead\")\n\t\t\t\t\/\/ try to open the file and write to it\n\t\t\t\tvar contents []byte\n\t\t\t\tvar rerr error\n\t\t\t\tcontents, rerr = ioutil.ReadFile(staged)\n\t\t\t\tif rerr != nil {\n\t\t\t\t\treturn rerr\n\t\t\t\t}\n\t\t\t\terr := ioutil.WriteFile(s.Dst, contents, s.fileMode)\n\t\t\t\t\/\/ make sure owner and group match the temp file, in case the file was created with WriteFile\n\t\t\t\tos.Chown(s.Dst, s.UID, s.GID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif s.ReloadCmd != \"\" {\n\t\t\tif err := s.reload(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tlog.Info(\"Target config \" + s.Dst + \" has been updated\")\n\t} else {\n\t\tlog.Debug(\"Target config \" + s.Dst + \" in sync\")\n\t}\n\treturn nil\n}\n\n\/\/ setFileMode sets the FileMode.\nfunc (t *Resource) setFileMode() error {\n\tfor _, s := range t.sources {\n\t\tif err := s.setFileMode(); err != nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Process is a convenience function that wraps calls to the three main tasks\n\/\/ required to keep local configuration files in sync. First we gather vars\n\/\/ from the store, then we stage a candidate configuration file, and finally sync\n\/\/ things up.\n\/\/ It returns an error if any.\nfunc (t *Resource) process(storeClient StoreConfig) error {\n\tif err := t.setFileMode(); err != nil {\n\t\treturn err\n\t}\n\tif err := t.setVars(storeClient); err != nil {\n\t\treturn err\n\t}\n\tif err := t.createStageFileAndSync(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (t *Resource) processAll() error {\n\t\/\/load all KV-pairs the first time\n\tvar err error\n\tif err = t.setFileMode(); err != nil {\n\t\treturn err\n\t}\n\tfor _, storeClient := range t.storeClients {\n\t\tif err := t.setVars(storeClient); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err = t.createStageFileAndSync(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (s StoreConfig) watch(stopChan chan bool, processChan chan StoreConfig) {\n\tvar lastIndex uint64\n\tkeysPrefix := appendPrefix(s.Prefix, s.Keys)\n\n\tfor {\n\t\tselect {\n\t\tcase <-stopChan:\n\t\t\treturn\n\t\tdefault:\n\t\t\tindex, err := s.WatchPrefix(s.Prefix, keysPrefix, lastIndex, stopChan)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\t\/\/ Prevent backend errors from consuming all resources.\n\t\t\t\ttime.Sleep(time.Second * 2)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprocessChan <- s\n\t\t\tlastIndex = index\n\t\t}\n\t}\n}\n\nfunc (s StoreConfig) interval(stopChan chan bool, processChan chan StoreConfig) {\n\tif s.Onetime {\n\t\treturn\n\t}\n\tfor {\n\t\tselect {\n\t\tcase <-stopChan:\n\t\t\treturn\n\t\tcase <-time.After(time.Duration(s.Interval) * time.Second):\n\t\t\tprocessChan <- s\n\t\t}\n\t}\n}\n\nfunc (t *Resource) Monitor() {\n\twg := &sync.WaitGroup{}\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)\n\tstopChan := make(chan bool)\n\tprocessChan := make(chan StoreConfig)\n\n\tdefer close(processChan)\n\n\tif err := t.processAll(); err != nil {\n\t\tlog.Error(err)\n\t}\n\n\tfor _, sc := range t.storeClients {\n\t\twg.Add(1)\n\t\tif sc.Watch {\n\t\t\tgo func(s StoreConfig) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t\/\/t.watch(s, stopChan, processChan)\n\t\t\t\ts.watch(stopChan, processChan)\n\t\t\t}(sc)\n\t\t} else {\n\t\t\tgo func(s StoreConfig) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\ts.interval(stopChan, processChan)\n\t\t\t}(sc)\n\t\t}\n\t}\n\n\tgo func() {\n\t\t\/\/ If there is no goroutine left - quit\n\t\twg.Wait()\n\t\tsignalChan <- syscall.SIGINT\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase storeClient := <-processChan:\n\t\t\tif err := t.process(storeClient); err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\tcase s := <-signalChan:\n\t\t\tlog.Info(fmt.Sprintf(\"Captured %v. Exiting...\", s))\n\t\t\tclose(stopChan)\n\t\t\t\/\/ drain processChan\n\t\t\tgo func() {\n\t\t\t\tfor range processChan {\n\t\t\t\t}\n\t\t\t}()\n\t\t\twg.Wait()\n\t\t\treturn\n\t\t}\n\t}\n}\n<commit_msg>renamed some variables<commit_after>package template\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/HeavyHorst\/memkv\"\n\t\"github.com\/HeavyHorst\/remco\/backends\"\n\t\"github.com\/HeavyHorst\/remco\/template\/fileutil\"\n\t\"github.com\/cloudflare\/cfssl\/log\"\n\t\"github.com\/flosch\/pongo2\"\n\tflag \"github.com\/spf13\/pflag\"\n)\n\ntype StoreConfig struct {\n\tbackends.StoreClient\n\tName string\n\tOnetime bool\n\tWatch bool\n\tPrefix string\n\tInterval int\n\tKeys []string\n\tstore *memkv.Store\n}\n\n\/\/ Resource is the representation of a parsed template resource.\ntype Resource struct {\n\tstoreClients []StoreConfig\n\tfuncMap map[string]interface{}\n\tstore memkv.Store\n\tsources []*SrcDst\n}\n\nvar ErrEmptySrc = errors.New(\"empty src template\")\n\n\/\/ NewResource creates a Resource.\nfunc NewResource(storeClients []StoreConfig, sources []*SrcDst) (*Resource, error) {\n\tif len(storeClients) == 0 {\n\t\treturn nil, errors.New(\"A valid StoreClient is required.\")\n\t}\n\n\tfor _, v := range sources {\n\t\tif v.Src == \"\" {\n\t\t\treturn nil, ErrEmptySrc\n\t\t}\n\t}\n\n\ttr := &Resource{\n\t\tstoreClients: storeClients,\n\t\tstore: memkv.New(),\n\t\tfuncMap: newFuncMap(),\n\t\tsources: sources,\n\t}\n\n\t\/\/ initialize the inidividual backend memkv Stores\n\tfor i := range tr.storeClients {\n\t\tstore := memkv.New()\n\t\ttr.storeClients[i].store = &store\n\t}\n\n\taddFuncs(tr.funcMap, tr.store.FuncMap)\n\n\treturn tr, nil\n}\n\n\/\/ NewResourceFromFlags creates a Resource from the provided commandline flags.\nfunc NewResourceFromFlags(s backends.Store, flags *flag.FlagSet, watch bool) (*Resource, error) {\n\tsrc, _ := flags.GetString(\"src\")\n\tdst, _ := flags.GetString(\"dst\")\n\tkeys, _ := flags.GetStringSlice(\"keys\")\n\tfileMode, _ := flags.GetString(\"fileMode\")\n\tprefix, _ := flags.GetString(\"prefix\")\n\treloadCmd, _ := flags.GetString(\"reload_cmd\")\n\tcheckCmd, _ := flags.GetString(\"check_cmd\")\n\tonetime, _ := flags.GetBool(\"onetime\")\n\tinterval, _ := flags.GetInt(\"interval\")\n\tUID := os.Geteuid()\n\tGID := os.Getegid()\n\n\tsd := &SrcDst{\n\t\tSrc: src,\n\t\tDst: dst,\n\t\tMode: fileMode,\n\t\tReloadCmd: reloadCmd,\n\t\tCheckCmd: checkCmd,\n\t\tGID: GID,\n\t\tUID: UID,\n\t}\n\n\tb := StoreConfig{\n\t\tStoreClient: s.Client,\n\t\tName: s.Name,\n\t\tOnetime: onetime,\n\t\tPrefix: prefix,\n\t\tWatch: watch,\n\t\tInterval: interval,\n\t\tKeys: keys,\n\t}\n\n\treturn NewResource([]StoreConfig{b}, []*SrcDst{sd})\n}\n\n\/\/ setVars sets the Vars for template resource.\nfunc (t *Resource) setVars(storeClient StoreConfig) error {\n\tvar err error\n\tlog.Debug(\"Retrieving keys from store: \" + storeClient.Name)\n\tlog.Debug(\"Key prefix set to \" + storeClient.Prefix)\n\n\tresult, err := storeClient.GetValues(appendPrefix(storeClient.Prefix, storeClient.Keys))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstoreClient.store.Purge()\n\n\tfor key, value := range result {\n\t\tstoreClient.store.Set(path.Join(\"\/\", strings.TrimPrefix(key, storeClient.Prefix)), value)\n\t}\n\n\t\/\/merge all stores\n\tt.store.Purge()\n\tfor _, v := range t.storeClients {\n\t\tfor _, kv := range v.store.GetAllKVs() {\n\t\t\tt.store.Set(kv.Key, kv.Value)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ createStageFile stages the src configuration file by processing the src\n\/\/ template and setting the desired owner, group, and mode. It also sets the\n\/\/ StageFile for the template resource.\n\/\/ It returns an error if any.\nfunc (t *Resource) createStageFileAndSync() error {\n\tfor _, s := range t.sources {\n\t\tlog.Debug(\"Using source template \" + s.Src)\n\n\t\tif !fileutil.IsFileExist(s.Src) {\n\t\t\treturn errors.New(\"Missing template: \" + s.Src)\n\t\t}\n\n\t\tlog.Debug(\"Compiling source template(pongo2) \" + s.Src)\n\t\ttmpl, err := pongo2.FromFile(s.Src)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to process template %s, %s\", s.Src, err)\n\t\t}\n\n\t\t\/\/ create TempFile in Dest directory to avoid cross-filesystem issues\n\t\ttemp, err := ioutil.TempFile(filepath.Dir(s.Dst), \".\"+filepath.Base(s.Dst))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = tmpl.ExecuteWriter(t.funcMap, temp); err != nil {\n\t\t\ttemp.Close()\n\t\t\tos.Remove(temp.Name())\n\t\t\treturn err\n\t\t}\n\n\t\tdefer temp.Close()\n\n\t\t\/\/ Set the owner, group, and mode on the stage file now to make it easier to\n\t\t\/\/ compare against the destination configuration file later.\n\t\tos.Chmod(temp.Name(), s.fileMode)\n\t\tos.Chown(temp.Name(), s.UID, s.GID)\n\t\ts.stageFile = temp\n\n\t\tif err = t.sync(s); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ sync compares the staged and dest config files and attempts to sync them\n\/\/ if they differ. sync will run a config check command if set before\n\/\/ overwriting the target config file. Finally, sync will run a reload command\n\/\/ if set to have the application or service pick up the changes.\n\/\/ It returns an error if any.\nfunc (t *Resource) sync(s *SrcDst) error {\n\tstaged := s.stageFile.Name()\n\tdefer os.Remove(staged)\n\n\tlog.Debug(\"Comparing candidate config to \" + s.Dst)\n\tok, err := fileutil.SameFile(staged, s.Dst)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t}\n\n\tif !ok {\n\t\tlog.Info(\"Target config \" + s.Dst + \" out of sync\")\n\t\tif s.CheckCmd != \"\" {\n\t\t\tif err := s.check(staged); err != nil {\n\t\t\t\treturn errors.New(\"Config check failed: \" + err.Error())\n\t\t\t}\n\t\t}\n\t\tlog.Debug(\"Overwriting target config \" + s.Dst)\n\t\terr := os.Rename(staged, s.Dst)\n\t\tif err != nil {\n\t\t\tif strings.Contains(err.Error(), \"device or resource busy\") {\n\t\t\t\tlog.Debug(\"Rename failed - target is likely a mount. Trying to write instead\")\n\t\t\t\t\/\/ try to open the file and write to it\n\t\t\t\tvar contents []byte\n\t\t\t\tvar rerr error\n\t\t\t\tcontents, rerr = ioutil.ReadFile(staged)\n\t\t\t\tif rerr != nil {\n\t\t\t\t\treturn rerr\n\t\t\t\t}\n\t\t\t\terr := ioutil.WriteFile(s.Dst, contents, s.fileMode)\n\t\t\t\t\/\/ make sure owner and group match the temp file, in case the file was created with WriteFile\n\t\t\t\tos.Chown(s.Dst, s.UID, s.GID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif s.ReloadCmd != \"\" {\n\t\t\tif err := s.reload(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tlog.Info(\"Target config \" + s.Dst + \" has been updated\")\n\t} else {\n\t\tlog.Debug(\"Target config \" + s.Dst + \" in sync\")\n\t}\n\treturn nil\n}\n\n\/\/ setFileMode sets the FileMode.\nfunc (t *Resource) setFileMode() error {\n\tfor _, s := range t.sources {\n\t\tif err := s.setFileMode(); err != nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Process is a convenience function that wraps calls to the three main tasks\n\/\/ required to keep local configuration files in sync. First we gather vars\n\/\/ from the store, then we stage a candidate configuration file, and finally sync\n\/\/ things up.\n\/\/ It returns an error if any.\nfunc (t *Resource) process(storeClient StoreConfig) error {\n\tif err := t.setFileMode(); err != nil {\n\t\treturn err\n\t}\n\tif err := t.setVars(storeClient); err != nil {\n\t\treturn err\n\t}\n\tif err := t.createStageFileAndSync(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (t *Resource) processAll() error {\n\t\/\/load all KV-pairs the first time\n\tvar err error\n\tif err = t.setFileMode(); err != nil {\n\t\treturn err\n\t}\n\tfor _, storeClient := range t.storeClients {\n\t\tif err := t.setVars(storeClient); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err = t.createStageFileAndSync(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (s StoreConfig) watch(stopChan chan bool, processChan chan StoreConfig) {\n\tvar lastIndex uint64\n\tkeysPrefix := appendPrefix(s.Prefix, s.Keys)\n\n\tfor {\n\t\tselect {\n\t\tcase <-stopChan:\n\t\t\treturn\n\t\tdefault:\n\t\t\tindex, err := s.WatchPrefix(s.Prefix, keysPrefix, lastIndex, stopChan)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\t\/\/ Prevent backend errors from consuming all resources.\n\t\t\t\ttime.Sleep(time.Second * 2)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprocessChan <- s\n\t\t\tlastIndex = index\n\t\t}\n\t}\n}\n\nfunc (s StoreConfig) interval(stopChan chan bool, processChan chan StoreConfig) {\n\tif s.Onetime {\n\t\treturn\n\t}\n\tfor {\n\t\tselect {\n\t\tcase <-stopChan:\n\t\t\treturn\n\t\tcase <-time.After(time.Duration(s.Interval) * time.Second):\n\t\t\tprocessChan <- s\n\t\t}\n\t}\n}\n\nfunc (t *Resource) Monitor() {\n\twg := &sync.WaitGroup{}\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)\n\tstopChan := make(chan bool)\n\tprocessChan := make(chan StoreConfig)\n\n\tdefer close(processChan)\n\n\tif err := t.processAll(); err != nil {\n\t\tlog.Error(err)\n\t}\n\n\tfor _, sc := range t.storeClients {\n\t\twg.Add(1)\n\t\tif sc.Watch {\n\t\t\tgo func(s StoreConfig) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t\/\/t.watch(s, stopChan, processChan)\n\t\t\t\ts.watch(stopChan, processChan)\n\t\t\t}(sc)\n\t\t} else {\n\t\t\tgo func(s StoreConfig) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\ts.interval(stopChan, processChan)\n\t\t\t}(sc)\n\t\t}\n\t}\n\n\tgo func() {\n\t\t\/\/ If there is no goroutine left - quit\n\t\twg.Wait()\n\t\tsignalChan <- syscall.SIGINT\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase storeClient := <-processChan:\n\t\t\tif err := t.process(storeClient); err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\tcase s := <-signalChan:\n\t\t\tlog.Info(fmt.Sprintf(\"Captured %v. Exiting...\", s))\n\t\t\tclose(stopChan)\n\t\t\t\/\/ drain processChan\n\t\t\tgo func() {\n\t\t\t\tfor range processChan {\n\t\t\t\t}\n\t\t\t}()\n\t\t\twg.Wait()\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package game\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/ivan1993spb\/snake-server\/engine\"\n\t\"github.com\/ivan1993spb\/snake-server\/playground\"\n)\n\nconst worldEventsBufferSize = 512\n\nconst worldEventsTimeout = time.Second\n\nconst worldEventsNumberLimit = 128\n\ntype World interface {\n\tObjectExists(object interface{}) bool\n\tLocationExists(location engine.Location) bool\n\tEntityExists(object interface{}, location engine.Location) bool\n\tGetObjectByLocation(location engine.Location) interface{}\n\tGetObjectByDot(dot *engine.Dot) interface{}\n\tGetEntityByDot(dot *engine.Dot) (interface{}, engine.Location)\n\tGetObjectsByDots(dots []*engine.Dot) []interface{}\n\tCreateObject(object interface{}, location engine.Location) error\n\tCreateObjectAvailableDots(object interface{}, location engine.Location) (engine.Location, *playground.ErrCreateObjectAvailableDots)\n\tDeleteObject(object interface{}, location engine.Location) *playground.ErrDeleteObject\n\tUpdateObject(object interface{}, old, new engine.Location) *playground.ErrUpdateObject\n\tUpdateObjectAvailableDots(object interface{}, old, new engine.Location) (engine.Location, *playground.ErrUpdateObjectAvailableDots)\n\tCreateObjectRandomDot(object interface{}) (engine.Location, error)\n\tCreateObjectRandomRect(object interface{}, rw, rh uint8) (engine.Location, error)\n\tNavigate(dot *engine.Dot, dir engine.Direction, dis uint8) (*engine.Dot, error)\n\tSize() uint16\n\tWidth() uint8\n\tHeight() uint8\n}\n\ntype world struct {\n\tpg *playground.Playground\n\tch chan Event\n\tchs []chan Event\n\tchsMux *sync.RWMutex\n\tstop chan struct{}\n\ttimeout time.Duration\n}\n\nfunc newWorld(pg *playground.Playground) *world {\n\treturn &world{\n\t\tpg: pg,\n\t\tch: make(chan Event, worldEventsBufferSize),\n\t\tchs: make([]chan Event, 0),\n\t\tchsMux: &sync.RWMutex{},\n\t\tstop: make(chan struct{}, 0),\n\t\ttimeout: worldEventsTimeout,\n\t}\n}\n\nfunc (w *world) run() {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-w.ch:\n\t\t\t\tw.broadcast(event)\n\t\t\tcase <-w.stop:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (w *world) broadcast(event Event) {\n\tw.chsMux.RLock()\n\n\twg := sync.WaitGroup{}\n\twg.Add(len(w.chs))\n\tfor _, ch := range w.chs {\n\t\tgo func() {\n\t\t\tvar timer = time.NewTimer(w.timeout)\n\t\t\tselect {\n\t\t\tcase ch <- event:\n\t\t\tcase <-w.stop:\n\t\t\t\treturn\n\t\t\tcase <-timer.C:\n\t\t\t}\n\t\t\ttimer.Stop()\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n\n\tw.chsMux.RUnlock()\n}\n\n\/\/ TODO: Create reset by count events in awaiting\nfunc (w *world) event(event Event) {\n\tif len(w.ch) == worldEventsBufferSize {\n\t\t\/\/ TODO: Create warning messages?\n\t\t<-w.ch\n\t}\n\tif worldEventsBufferSize == 0 {\n\t\t\/\/ TODO: Async?\n\t\tvar timer = time.NewTimer(worldEventsTimeout)\n\t\tselect {\n\t\tcase w.ch <- event:\n\t\tcase <-w.stop:\n\t\tcase <-timer.C:\n\t\t}\n\t\ttimer.Stop()\n\t} else {\n\t\tw.ch <- event\n\t}\n}\n\nfunc (w *world) RunObserver(observer interface {\n\tRun(<-chan Event)\n}) {\n\tch := make(chan Event, worldEventsBufferSize)\n\n\tw.chsMux.Lock()\n\tw.chs = append(w.chs, ch)\n\tw.chsMux.Unlock()\n\n\tobserver.Run(ch)\n\n\tw.chsMux.Lock()\n\tfor i := range w.chs {\n\t\tif w.chs[i] == ch {\n\t\t\tw.chs = append(w.chs[:i], w.chs[i+1:]...)\n\t\t\tclose(ch)\n\t\t\tbreak\n\t\t}\n\t}\n\tw.chsMux.Unlock()\n}\n\nfunc (w *world) Stop() {\n\tclose(w.stop)\n\tclose(w.ch)\n\n\tw.chsMux.Lock()\n\tdefer w.chsMux.Unlock()\n\n\tfor _, ch := range w.chs {\n\t\tclose(ch)\n\t}\n\n\tw.chs = w.chs[:0]\n}\n\nfunc (w *world) ObjectExists(object interface{}) bool {\n\treturn w.pg.ObjectExists(object)\n}\n\nfunc (w *world) LocationExists(location engine.Location) bool {\n\treturn w.pg.LocationExists(location)\n}\n\nfunc (w *world) EntityExists(object interface{}, location engine.Location) bool {\n\treturn w.pg.EntityExists(object, location)\n}\n\nfunc (w *world) GetObjectByLocation(location engine.Location) interface{} {\n\tif object := w.pg.GetObjectByLocation(location); object != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeObjectChecked,\n\t\t\tPayload: object,\n\t\t})\n\t\treturn object\n\t}\n\treturn nil\n\n}\n\nfunc (w *world) GetObjectByDot(dot *engine.Dot) interface{} {\n\tif object := w.pg.GetObjectByDot(dot); object != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeObjectChecked,\n\t\t\tPayload: object,\n\t\t})\n\t\treturn object\n\t}\n\treturn nil\n}\n\nfunc (w *world) GetEntityByDot(dot *engine.Dot) (interface{}, engine.Location) {\n\tif object, location := w.pg.GetEntityByDot(dot); object != nil && !location.Empty() {\n\t\tw.event(Event{\n\t\t\tType: EventTypeObjectChecked,\n\t\t\tPayload: object,\n\t\t})\n\t\treturn object, location\n\t}\n\treturn nil, nil\n}\n\nfunc (w *world) GetObjectsByDots(dots []*engine.Dot) []interface{} {\n\tif objects := w.pg.GetObjectsByDots(dots); len(objects) > 0 {\n\t\tfor _, object := range objects {\n\t\t\tw.event(Event{\n\t\t\t\tType: EventTypeObjectChecked,\n\t\t\t\tPayload: object,\n\t\t\t})\n\t\t}\n\t\treturn objects\n\t}\n\treturn nil\n}\n\nfunc (w *world) CreateObject(object interface{}, location engine.Location) error {\n\tif err := w.pg.CreateObject(object, location); err != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeError,\n\t\t\tPayload: err,\n\t\t})\n\t\treturn err\n\t}\n\tw.event(Event{\n\t\tType: EventTypeObjectCreate,\n\t\tPayload: object,\n\t})\n\treturn nil\n}\n\nfunc (w *world) CreateObjectAvailableDots(object interface{}, location engine.Location) (engine.Location, *playground.ErrCreateObjectAvailableDots) {\n\tlocation, err := w.pg.CreateObjectAvailableDots(object, location)\n\tif err != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeError,\n\t\t\tPayload: err,\n\t\t})\n\t\treturn nil, err\n\t}\n\tw.event(Event{\n\t\tType: EventTypeObjectCreate,\n\t\tPayload: object,\n\t})\n\treturn location, err\n}\n\nfunc (w *world) DeleteObject(object interface{}, location engine.Location) *playground.ErrDeleteObject {\n\terr := w.pg.DeleteObject(object, location)\n\tif err != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeError,\n\t\t\tPayload: err,\n\t\t})\n\t\treturn err\n\t}\n\tw.event(Event{\n\t\tType: EventTypeObjectDelete,\n\t\tPayload: object,\n\t})\n\treturn err\n}\n\nfunc (w *world) UpdateObject(object interface{}, old, new engine.Location) *playground.ErrUpdateObject {\n\tif err := w.pg.UpdateObject(object, old, new); err != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeError,\n\t\t\tPayload: err,\n\t\t})\n\t\treturn err\n\t}\n\tw.event(Event{\n\t\tType: EventTypeObjectUpdate,\n\t\tPayload: object,\n\t})\n\treturn nil\n}\n\nfunc (w *world) UpdateObjectAvailableDots(object interface{}, old, new engine.Location) (engine.Location, *playground.ErrUpdateObjectAvailableDots) {\n\tlocation, err := w.pg.UpdateObjectAvailableDots(object, old, new)\n\tif err != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeError,\n\t\t\tPayload: err,\n\t\t})\n\t\treturn nil, err\n\t}\n\tw.event(Event{\n\t\tType: EventTypeObjectUpdate,\n\t\tPayload: object,\n\t})\n\treturn location, err\n}\n\nfunc (w *world) CreateObjectRandomDot(object interface{}) (engine.Location, error) {\n\tlocation, err := w.pg.CreateObjectRandomDot(object)\n\tif err != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeError,\n\t\t\tPayload: err,\n\t\t})\n\t\treturn nil, err\n\t}\n\tw.event(Event{\n\t\tType: EventTypeObjectCreate,\n\t\tPayload: object,\n\t})\n\treturn location, err\n}\n\nfunc (w *world) CreateObjectRandomRect(object interface{}, rw, rh uint8) (engine.Location, error) {\n\tlocation, err := w.pg.CreateObjectRandomRect(object, rw, rh)\n\tif err != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeError,\n\t\t\tPayload: err,\n\t\t})\n\t\treturn nil, err\n\t}\n\tw.event(Event{\n\t\tType: EventTypeObjectCreate,\n\t\tPayload: object,\n\t})\n\treturn location, err\n}\n\nfunc (w *world) Navigate(dot *engine.Dot, dir engine.Direction, dis uint8) (*engine.Dot, error) {\n\treturn w.pg.Navigate(dot, dir, dis)\n}\n\nfunc (w *world) Size() uint16 {\n\treturn w.pg.Size()\n}\n\nfunc (w *world) Width() uint8 {\n\treturn w.pg.Width()\n}\n\nfunc (w *world) Height() uint8 {\n\treturn w.pg.Height()\n}\n<commit_msg>Refactor world event emitter<commit_after>package game\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/ivan1993spb\/snake-server\/engine\"\n\t\"github.com\/ivan1993spb\/snake-server\/playground\"\n)\n\nconst worldEventsBufferSize = 512\n\nconst worldEventsTimeout = time.Second\n\nconst worldEventsNumberLimit = 128\n\ntype World interface {\n\tObjectExists(object interface{}) bool\n\tLocationExists(location engine.Location) bool\n\tEntityExists(object interface{}, location engine.Location) bool\n\tGetObjectByLocation(location engine.Location) interface{}\n\tGetObjectByDot(dot *engine.Dot) interface{}\n\tGetEntityByDot(dot *engine.Dot) (interface{}, engine.Location)\n\tGetObjectsByDots(dots []*engine.Dot) []interface{}\n\tCreateObject(object interface{}, location engine.Location) error\n\tCreateObjectAvailableDots(object interface{}, location engine.Location) (engine.Location, *playground.ErrCreateObjectAvailableDots)\n\tDeleteObject(object interface{}, location engine.Location) *playground.ErrDeleteObject\n\tUpdateObject(object interface{}, old, new engine.Location) *playground.ErrUpdateObject\n\tUpdateObjectAvailableDots(object interface{}, old, new engine.Location) (engine.Location, *playground.ErrUpdateObjectAvailableDots)\n\tCreateObjectRandomDot(object interface{}) (engine.Location, error)\n\tCreateObjectRandomRect(object interface{}, rw, rh uint8) (engine.Location, error)\n\tNavigate(dot *engine.Dot, dir engine.Direction, dis uint8) (*engine.Dot, error)\n\tSize() uint16\n\tWidth() uint8\n\tHeight() uint8\n}\n\ntype world struct {\n\tpg *playground.Playground\n\tch chan Event\n\tchs []chan Event\n\tchsMux *sync.RWMutex\n\tstop chan struct{}\n\ttimeout time.Duration\n}\n\nfunc newWorld(pg *playground.Playground) *world {\n\treturn &world{\n\t\tpg: pg,\n\t\tch: make(chan Event, worldEventsBufferSize),\n\t\tchs: make([]chan Event, 0),\n\t\tchsMux: &sync.RWMutex{},\n\t\tstop: make(chan struct{}, 0),\n\t\ttimeout: worldEventsTimeout,\n\t}\n}\n\nfunc (w *world) run() {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-w.ch:\n\t\t\t\tw.broadcast(event)\n\t\t\tcase <-w.stop:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (w *world) broadcast(event Event) {\n\tw.chsMux.RLock()\n\n\tfor _, ch := range w.chs {\n\t\tw.sendEvent(ch, event, worldEventsTimeout)\n\t}\n\n\tw.chsMux.RUnlock()\n}\n\nfunc (w *world) sendEvent(ch chan Event, event Event, timeout time.Duration) {\n\tvar timer = time.NewTimer(timeout)\n\tdefer timer.Stop()\n\tif cap(ch) == 0 {\n\t\tselect {\n\t\tcase ch <- event:\n\t\tcase <-w.stop:\n\t\tcase <-timer.C:\n\t\t}\n\t} else {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase ch <- event:\n\t\t\t\treturn\n\t\t\tcase <-w.stop:\n\t\t\t\treturn\n\t\t\tcase <-timer.C:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tif len(ch) == cap(ch) {\n\t\t\t\t\t<-ch\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (w *world) event(event Event) {\n\tw.sendEvent(w.ch, event, worldEventsTimeout)\n}\n\nfunc (w *world) RunObserver(observer interface {\n\tRun(<-chan Event)\n}) {\n\tch := make(chan Event, worldEventsBufferSize)\n\n\tw.chsMux.Lock()\n\tw.chs = append(w.chs, ch)\n\tw.chsMux.Unlock()\n\n\tobserver.Run(ch)\n\n\tw.chsMux.Lock()\n\tfor i := range w.chs {\n\t\tif w.chs[i] == ch {\n\t\t\tw.chs = append(w.chs[:i], w.chs[i+1:]...)\n\t\t\tclose(ch)\n\t\t\tbreak\n\t\t}\n\t}\n\tw.chsMux.Unlock()\n}\n\nfunc (w *world) Stop() {\n\tclose(w.stop)\n\tclose(w.ch)\n\n\tw.chsMux.Lock()\n\tdefer w.chsMux.Unlock()\n\n\tfor _, ch := range w.chs {\n\t\tclose(ch)\n\t}\n\n\tw.chs = w.chs[:0]\n}\n\nfunc (w *world) ObjectExists(object interface{}) bool {\n\treturn w.pg.ObjectExists(object)\n}\n\nfunc (w *world) LocationExists(location engine.Location) bool {\n\treturn w.pg.LocationExists(location)\n}\n\nfunc (w *world) EntityExists(object interface{}, location engine.Location) bool {\n\treturn w.pg.EntityExists(object, location)\n}\n\nfunc (w *world) GetObjectByLocation(location engine.Location) interface{} {\n\tif object := w.pg.GetObjectByLocation(location); object != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeObjectChecked,\n\t\t\tPayload: object,\n\t\t})\n\t\treturn object\n\t}\n\treturn nil\n\n}\n\nfunc (w *world) GetObjectByDot(dot *engine.Dot) interface{} {\n\tif object := w.pg.GetObjectByDot(dot); object != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeObjectChecked,\n\t\t\tPayload: object,\n\t\t})\n\t\treturn object\n\t}\n\treturn nil\n}\n\nfunc (w *world) GetEntityByDot(dot *engine.Dot) (interface{}, engine.Location) {\n\tif object, location := w.pg.GetEntityByDot(dot); object != nil && !location.Empty() {\n\t\tw.event(Event{\n\t\t\tType: EventTypeObjectChecked,\n\t\t\tPayload: object,\n\t\t})\n\t\treturn object, location\n\t}\n\treturn nil, nil\n}\n\nfunc (w *world) GetObjectsByDots(dots []*engine.Dot) []interface{} {\n\tif objects := w.pg.GetObjectsByDots(dots); len(objects) > 0 {\n\t\tfor _, object := range objects {\n\t\t\tw.event(Event{\n\t\t\t\tType: EventTypeObjectChecked,\n\t\t\t\tPayload: object,\n\t\t\t})\n\t\t}\n\t\treturn objects\n\t}\n\treturn nil\n}\n\nfunc (w *world) CreateObject(object interface{}, location engine.Location) error {\n\tif err := w.pg.CreateObject(object, location); err != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeError,\n\t\t\tPayload: err,\n\t\t})\n\t\treturn err\n\t}\n\tw.event(Event{\n\t\tType: EventTypeObjectCreate,\n\t\tPayload: object,\n\t})\n\treturn nil\n}\n\nfunc (w *world) CreateObjectAvailableDots(object interface{}, location engine.Location) (engine.Location, *playground.ErrCreateObjectAvailableDots) {\n\tlocation, err := w.pg.CreateObjectAvailableDots(object, location)\n\tif err != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeError,\n\t\t\tPayload: err,\n\t\t})\n\t\treturn nil, err\n\t}\n\tw.event(Event{\n\t\tType: EventTypeObjectCreate,\n\t\tPayload: object,\n\t})\n\treturn location, err\n}\n\nfunc (w *world) DeleteObject(object interface{}, location engine.Location) *playground.ErrDeleteObject {\n\terr := w.pg.DeleteObject(object, location)\n\tif err != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeError,\n\t\t\tPayload: err,\n\t\t})\n\t\treturn err\n\t}\n\tw.event(Event{\n\t\tType: EventTypeObjectDelete,\n\t\tPayload: object,\n\t})\n\treturn err\n}\n\nfunc (w *world) UpdateObject(object interface{}, old, new engine.Location) *playground.ErrUpdateObject {\n\tif err := w.pg.UpdateObject(object, old, new); err != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeError,\n\t\t\tPayload: err,\n\t\t})\n\t\treturn err\n\t}\n\tw.event(Event{\n\t\tType: EventTypeObjectUpdate,\n\t\tPayload: object,\n\t})\n\treturn nil\n}\n\nfunc (w *world) UpdateObjectAvailableDots(object interface{}, old, new engine.Location) (engine.Location, *playground.ErrUpdateObjectAvailableDots) {\n\tlocation, err := w.pg.UpdateObjectAvailableDots(object, old, new)\n\tif err != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeError,\n\t\t\tPayload: err,\n\t\t})\n\t\treturn nil, err\n\t}\n\tw.event(Event{\n\t\tType: EventTypeObjectUpdate,\n\t\tPayload: object,\n\t})\n\treturn location, err\n}\n\nfunc (w *world) CreateObjectRandomDot(object interface{}) (engine.Location, error) {\n\tlocation, err := w.pg.CreateObjectRandomDot(object)\n\tif err != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeError,\n\t\t\tPayload: err,\n\t\t})\n\t\treturn nil, err\n\t}\n\tw.event(Event{\n\t\tType: EventTypeObjectCreate,\n\t\tPayload: object,\n\t})\n\treturn location, err\n}\n\nfunc (w *world) CreateObjectRandomRect(object interface{}, rw, rh uint8) (engine.Location, error) {\n\tlocation, err := w.pg.CreateObjectRandomRect(object, rw, rh)\n\tif err != nil {\n\t\tw.event(Event{\n\t\t\tType: EventTypeError,\n\t\t\tPayload: err,\n\t\t})\n\t\treturn nil, err\n\t}\n\tw.event(Event{\n\t\tType: EventTypeObjectCreate,\n\t\tPayload: object,\n\t})\n\treturn location, err\n}\n\nfunc (w *world) Navigate(dot *engine.Dot, dir engine.Direction, dis uint8) (*engine.Dot, error) {\n\treturn w.pg.Navigate(dot, dir, dis)\n}\n\nfunc (w *world) Size() uint16 {\n\treturn w.pg.Size()\n}\n\nfunc (w *world) Width() uint8 {\n\treturn w.pg.Width()\n}\n\nfunc (w *world) Height() uint8 {\n\treturn w.pg.Height()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package jsonrpc implements JSON-RPC 2.0 protocol for Neptulon framework.\npackage jsonrpc\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\n\t\"github.com\/nbusy\/neptulon\"\n)\n\n\/\/ App is a Neptulon JSON-RPC app.\ntype App struct {\n\tneptulon *neptulon.App\n\treqMiddleware []func(ctx *ReqContext)\n\tnotMiddleware []func(ctx *NotContext)\n\tresMiddleware []func(ctx *ResContext)\n}\n\n\/\/ NewApp creates a Neptulon JSON-RPC app.\nfunc NewApp(n *neptulon.App) (*App, error) {\n\ta := App{neptulon: n}\n\tn.Middleware(a.neptulonMiddleware)\n\treturn &a, nil\n}\n\n\/\/ ReqMiddleware registers a new request middleware to handle incoming requests.\nfunc (a *App) ReqMiddleware(reqMiddleware func(ctx *ReqContext)) {\n\ta.reqMiddleware = append(a.reqMiddleware, reqMiddleware)\n}\n\n\/\/ NotMiddleware registers a new notification middleware to handle incoming notifications.\nfunc (a *App) NotMiddleware(notMiddleware func(ctx *NotContext)) {\n\ta.notMiddleware = append(a.notMiddleware, notMiddleware)\n}\n\n\/\/ ResMiddleware registers a new response middleware to handle incoming responses.\nfunc (a *App) ResMiddleware(resMiddleware func(ctx *ResContext)) {\n\ta.resMiddleware = append(a.resMiddleware, resMiddleware)\n}\n\n\/\/ Send sends a message throught the connection denoted by the connection ID.\nfunc (a *App) Send(connID string, msg interface{}) {\n\tdata, err := json.Marshal(msg)\n\tif err != nil {\n\t\tlog.Fatalln(\"Errored while serializing JSON-RPC response:\", err)\n\t}\n\n\terr = a.neptulon.Send(connID, data)\n\tif err != nil {\n\t\tlog.Fatalln(\"Errored sending JSON-RPC message:\", err)\n\t}\n}\n\nfunc (a *App) neptulonMiddleware(conn *neptulon.Conn, msg []byte) []byte {\n\tvar m message\n\tif err := json.Unmarshal(msg, &m); err != nil {\n\t\tlog.Fatalln(\"Cannot deserialize incoming message:\", err)\n\t}\n\n\t\/\/ if incoming message is a request or response\n\tif m.ID != \"\" {\n\t\t\/\/ if incoming message is a request\n\t\tif m.Method != \"\" {\n\t\t\tctx := ReqContext{Conn: conn, Req: &Request{ID: m.ID, Method: m.Method, Params: m.Params}}\n\t\t\tfor _, mid := range a.reqMiddleware {\n\t\t\t\tmid(&ctx)\n\t\t\t\tif ctx.Done {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif ctx.Res != nil || ctx.ResErr != nil {\n\t\t\t\tdata, err := json.Marshal(Response{ID: m.ID, Result: ctx.Res, Error: ctx.ResErr})\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalln(\"Errored while serializing JSON-RPC response:\", err)\n\t\t\t\t}\n\n\t\t\t\treturn data\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ if incoming message is a response\n\t\tctx := ResContext{Conn: conn, Res: &Response{ID: m.ID, Result: m.Result, Error: m.Error}}\n\t\tfor _, mid := range a.resMiddleware {\n\t\t\tmid(&ctx)\n\t\t\tif ctx.Done {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t\/\/ if incoming message is a notification\n\tif m.Method != \"\" {\n\t\tctx := NotContext{Conn: conn, Not: &Notification{Method: m.Method, Params: m.Params}}\n\t\tfor _, mid := range a.notMiddleware {\n\t\t\tmid(&ctx)\n\t\t\tif ctx.Done {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t\/\/ if incoming message is none of the above\n\tdata, err := json.Marshal(Notification{Method: \"invalidMessage\"})\n\tif err != nil {\n\t\tlog.Fatalln(\"Errored while serializing JSON-RPC response:\", err)\n\t}\n\n\treturn data\n\t\/\/ todo: close conn\n}\n<commit_msg>setting response now short circuits the request middleware<commit_after>\/\/ Package jsonrpc implements JSON-RPC 2.0 protocol for Neptulon framework.\npackage jsonrpc\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\n\t\"github.com\/nbusy\/neptulon\"\n)\n\n\/\/ App is a Neptulon JSON-RPC app.\ntype App struct {\n\tneptulon *neptulon.App\n\treqMiddleware []func(ctx *ReqContext)\n\tnotMiddleware []func(ctx *NotContext)\n\tresMiddleware []func(ctx *ResContext)\n}\n\n\/\/ NewApp creates a Neptulon JSON-RPC app.\nfunc NewApp(n *neptulon.App) (*App, error) {\n\ta := App{neptulon: n}\n\tn.Middleware(a.neptulonMiddleware)\n\treturn &a, nil\n}\n\n\/\/ ReqMiddleware registers a new request middleware to handle incoming requests.\nfunc (a *App) ReqMiddleware(reqMiddleware func(ctx *ReqContext)) {\n\ta.reqMiddleware = append(a.reqMiddleware, reqMiddleware)\n}\n\n\/\/ NotMiddleware registers a new notification middleware to handle incoming notifications.\nfunc (a *App) NotMiddleware(notMiddleware func(ctx *NotContext)) {\n\ta.notMiddleware = append(a.notMiddleware, notMiddleware)\n}\n\n\/\/ ResMiddleware registers a new response middleware to handle incoming responses.\nfunc (a *App) ResMiddleware(resMiddleware func(ctx *ResContext)) {\n\ta.resMiddleware = append(a.resMiddleware, resMiddleware)\n}\n\n\/\/ Send sends a message throught the connection denoted by the connection ID.\nfunc (a *App) Send(connID string, msg interface{}) {\n\tdata, err := json.Marshal(msg)\n\tif err != nil {\n\t\tlog.Fatalln(\"Errored while serializing JSON-RPC response:\", err)\n\t}\n\n\terr = a.neptulon.Send(connID, data)\n\tif err != nil {\n\t\tlog.Fatalln(\"Errored sending JSON-RPC message:\", err)\n\t}\n}\n\nfunc (a *App) neptulonMiddleware(conn *neptulon.Conn, msg []byte) []byte {\n\tvar m message\n\tif err := json.Unmarshal(msg, &m); err != nil {\n\t\tlog.Fatalln(\"Cannot deserialize incoming message:\", err)\n\t}\n\n\t\/\/ if incoming message is a request or response\n\tif m.ID != \"\" {\n\t\t\/\/ if incoming message is a request\n\t\tif m.Method != \"\" {\n\t\t\tctx := ReqContext{Conn: conn, Req: &Request{ID: m.ID, Method: m.Method, Params: m.Params}}\n\t\t\tfor _, mid := range a.reqMiddleware {\n\t\t\t\tmid(&ctx)\n\t\t\t\tif ctx.Done || ctx.Res != nil || ctx.ResErr != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif ctx.Res != nil || ctx.ResErr != nil {\n\t\t\t\tdata, err := json.Marshal(Response{ID: m.ID, Result: ctx.Res, Error: ctx.ResErr})\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalln(\"Errored while serializing JSON-RPC response:\", err)\n\t\t\t\t}\n\n\t\t\t\treturn data\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ if incoming message is a response\n\t\tctx := ResContext{Conn: conn, Res: &Response{ID: m.ID, Result: m.Result, Error: m.Error}}\n\t\tfor _, mid := range a.resMiddleware {\n\t\t\tmid(&ctx)\n\t\t\tif ctx.Done {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t\/\/ if incoming message is a notification\n\tif m.Method != \"\" {\n\t\tctx := NotContext{Conn: conn, Not: &Notification{Method: m.Method, Params: m.Params}}\n\t\tfor _, mid := range a.notMiddleware {\n\t\t\tmid(&ctx)\n\t\t\tif ctx.Done {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t\/\/ if incoming message is none of the above\n\tdata, err := json.Marshal(Notification{Method: \"invalidMessage\"})\n\tif err != nil {\n\t\tlog.Fatalln(\"Errored while serializing JSON-RPC response:\", err)\n\t}\n\n\treturn data\n\t\/\/ todo: close conn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !windows\n\n\/*\n Copyright The containerd Authors.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage run\n\nimport (\n\tgocontext \"context\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/containerd\/containerd\"\n\t\"github.com\/containerd\/containerd\/cmd\/ctr\/commands\"\n\t\"github.com\/containerd\/containerd\/contrib\/nvidia\"\n\t\"github.com\/containerd\/containerd\/contrib\/seccomp\"\n\t\"github.com\/containerd\/containerd\/oci\"\n\t\"github.com\/containerd\/containerd\/platforms\"\n\t\"github.com\/containerd\/containerd\/runtime\/v2\/runc\/options\"\n\t\"github.com\/opencontainers\/runtime-spec\/specs-go\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar platformRunFlags = []cli.Flag{\n\tcli.StringFlag{\n\t\tName: \"runc-binary\",\n\t\tUsage: \"specify runc-compatible binary\",\n\t},\n\tcli.BoolFlag{\n\t\tName: \"runc-systemd-cgroup\",\n\t\tUsage: \"start runc with systemd cgroup manager\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"uidmap\",\n\t\tUsage: \"run inside a user namespace with the specified UID mapping range; specified with the format `container-uid:host-uid:length`\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"gidmap\",\n\t\tUsage: \"run inside a user namespace with the specified GID mapping range; specified with the format `container-gid:host-gid:length`\",\n\t},\n\tcli.BoolFlag{\n\t\tName: \"remap-labels\",\n\t\tUsage: \"provide the user namespace ID remapping to the snapshotter via label options; requires snapshotter support\",\n\t},\n\tcli.Float64Flag{\n\t\tName: \"cpus\",\n\t\tUsage: \"set the CFS cpu qouta\",\n\t\tValue: 0.0,\n\t},\n}\n\n\/\/ NewContainer creates a new container\nfunc NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli.Context) (containerd.Container, error) {\n\tvar (\n\t\tid string\n\t\tconfig = context.IsSet(\"config\")\n\t)\n\tif config {\n\t\tid = context.Args().First()\n\t} else {\n\t\tid = context.Args().Get(1)\n\t}\n\n\tvar (\n\t\topts []oci.SpecOpts\n\t\tcOpts []containerd.NewContainerOpts\n\t\tspec containerd.NewContainerOpts\n\t)\n\n\tcOpts = append(cOpts, containerd.WithContainerLabels(commands.LabelArgs(context.StringSlice(\"label\"))))\n\tif config {\n\t\topts = append(opts, oci.WithSpecFromFile(context.String(\"config\")))\n\t} else {\n\t\tvar (\n\t\t\tref = context.Args().First()\n\t\t\t\/\/for container's id is Args[1]\n\t\t\targs = context.Args()[2:]\n\t\t)\n\t\topts = append(opts, oci.WithDefaultSpec(), oci.WithDefaultUnixDevices)\n\t\tif ef := context.String(\"env-file\"); ef != \"\" {\n\t\t\topts = append(opts, oci.WithEnvFile(ef))\n\t\t}\n\t\topts = append(opts, oci.WithEnv(context.StringSlice(\"env\")))\n\t\topts = append(opts, withMounts(context))\n\n\t\tif context.Bool(\"rootfs\") {\n\t\t\trootfs, err := filepath.Abs(ref)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\topts = append(opts, oci.WithRootFSPath(rootfs))\n\t\t} else {\n\t\t\tsnapshotter := context.String(\"snapshotter\")\n\t\t\tvar image containerd.Image\n\t\t\ti, err := client.ImageService().Get(ctx, ref)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif ps := context.String(\"platform\"); ps != \"\" {\n\t\t\t\tplatform, err := platforms.Parse(ps)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\timage = containerd.NewImageWithPlatform(client, i, platforms.Only(platform))\n\t\t\t} else {\n\t\t\t\timage = containerd.NewImage(client, i)\n\t\t\t}\n\n\t\t\tunpacked, err := image.IsUnpacked(ctx, snapshotter)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif !unpacked {\n\t\t\t\tif err := image.Unpack(ctx, snapshotter); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\topts = append(opts, oci.WithImageConfig(image))\n\t\t\tcOpts = append(cOpts,\n\t\t\t\tcontainerd.WithImage(image),\n\t\t\t\tcontainerd.WithSnapshotter(snapshotter))\n\t\t\tif uidmap, gidmap := context.String(\"uidmap\"), context.String(\"gidmap\"); uidmap != \"\" && gidmap != \"\" {\n\t\t\t\tuidMap, err := parseIDMapping(uidmap)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tgidMap, err := parseIDMapping(gidmap)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\topts = append(opts,\n\t\t\t\t\toci.WithUserNamespace([]specs.LinuxIDMapping{uidMap}, []specs.LinuxIDMapping{gidMap}))\n\t\t\t\t\/\/ use snapshotter opts or the remapped snapshot support to shift the filesystem\n\t\t\t\t\/\/ currently the only snapshotter known to support the labels is fuse-overlayfs:\n\t\t\t\t\/\/ https:\/\/github.com\/AkihiroSuda\/containerd-fuse-overlayfs\n\t\t\t\tif context.Bool(\"remap-labels\") {\n\t\t\t\t\tcOpts = append(cOpts, containerd.WithNewSnapshot(id, image,\n\t\t\t\t\t\tcontainerd.WithRemapperLabels(0, uidMap.HostID, 0, gidMap.HostID, uidMap.Size)))\n\t\t\t\t} else {\n\t\t\t\t\tcOpts = append(cOpts, containerd.WithRemappedSnapshot(id, image, uidMap.HostID, gidMap.HostID))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t\/\/ Even when \"read-only\" is set, we don't use KindView snapshot here. (#1495)\n\t\t\t\t\/\/ We pass writable snapshot to the OCI runtime, and the runtime remounts it as read-only,\n\t\t\t\t\/\/ after creating some mount points on demand.\n\t\t\t\tcOpts = append(cOpts, containerd.WithNewSnapshot(id, image))\n\t\t\t}\n\t\t\tcOpts = append(cOpts, containerd.WithImageStopSignal(image, \"SIGTERM\"))\n\t\t}\n\t\tif context.Bool(\"read-only\") {\n\t\t\topts = append(opts, oci.WithRootFSReadonly())\n\t\t}\n\t\tif len(args) > 0 {\n\t\t\topts = append(opts, oci.WithProcessArgs(args...))\n\t\t}\n\t\tif cwd := context.String(\"cwd\"); cwd != \"\" {\n\t\t\topts = append(opts, oci.WithProcessCwd(cwd))\n\t\t}\n\t\tif context.Bool(\"tty\") {\n\t\t\topts = append(opts, oci.WithTTY)\n\t\t}\n\t\tif context.Bool(\"privileged\") {\n\t\t\topts = append(opts, oci.WithPrivileged, oci.WithAllDevicesAllowed, oci.WithHostDevices)\n\t\t}\n\t\tif context.Bool(\"net-host\") {\n\t\t\topts = append(opts, oci.WithHostNamespace(specs.NetworkNamespace), oci.WithHostHostsFile, oci.WithHostResolvconf)\n\t\t}\n\t\tif context.Bool(\"seccomp\") {\n\t\t\topts = append(opts, seccomp.WithDefaultProfile())\n\t\t}\n\t\tif cpus := context.Float64(\"cpus\"); cpus > 0.0 {\n\t\t\tvar (\n\t\t\t\tperiod = uint64(100000)\n\t\t\t\tquota = int64(cpus * 100000.0)\n\t\t\t)\n\t\t\topts = append(opts, oci.WithCPUCFS(quota, period))\n\t\t}\n\n\t\tquota := context.Int64(\"cpu-quota\")\n\t\tperiod := context.Uint64(\"cpu-period\")\n\t\tif quota != -1 || period != 0 {\n\t\t\tif cpus := context.Float64(\"cpus\"); cpus > 0.0 {\n\t\t\t\treturn nil, errors.New(\"cpus and quota\/period should be used separately\")\n\t\t\t}\n\t\t\topts = append(opts, oci.WithCPUCFS(quota, period))\n\t\t}\n\n\t\tjoinNs := context.StringSlice(\"with-ns\")\n\t\tfor _, ns := range joinNs {\n\t\t\tparts := strings.Split(ns, \":\")\n\t\t\tif len(parts) != 2 {\n\t\t\t\treturn nil, errors.New(\"joining a Linux namespace using --with-ns requires the format 'nstype:path'\")\n\t\t\t}\n\t\t\tif !validNamespace(parts[0]) {\n\t\t\t\treturn nil, errors.New(\"the Linux namespace type specified in --with-ns is not valid: \" + parts[0])\n\t\t\t}\n\t\t\topts = append(opts, oci.WithLinuxNamespace(specs.LinuxNamespace{\n\t\t\t\tType: specs.LinuxNamespaceType(parts[0]),\n\t\t\t\tPath: parts[1],\n\t\t\t}))\n\t\t}\n\t\tif context.IsSet(\"gpus\") {\n\t\t\topts = append(opts, nvidia.WithGPUs(nvidia.WithDevices(context.Int(\"gpus\")), nvidia.WithAllCapabilities))\n\t\t}\n\t\tif context.IsSet(\"allow-new-privs\") {\n\t\t\topts = append(opts, oci.WithNewPrivileges)\n\t\t}\n\t\tif context.IsSet(\"cgroup\") {\n\t\t\t\/\/ NOTE: can be set to \"\" explicitly for disabling cgroup.\n\t\t\topts = append(opts, oci.WithCgroup(context.String(\"cgroup\")))\n\t\t}\n\t\tlimit := context.Uint64(\"memory-limit\")\n\t\tif limit != 0 {\n\t\t\topts = append(opts, oci.WithMemoryLimit(limit))\n\t\t}\n\t\tfor _, dev := range context.StringSlice(\"device\") {\n\t\t\topts = append(opts, oci.WithLinuxDevice(dev, \"rwm\"))\n\t\t}\n\t}\n\n\truntimeOpts, err := getRuntimeOptions(context)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcOpts = append(cOpts, containerd.WithRuntime(context.String(\"runtime\"), runtimeOpts))\n\n\topts = append(opts, oci.WithAnnotations(commands.LabelArgs(context.StringSlice(\"label\"))))\n\tvar s specs.Spec\n\tspec = containerd.WithSpec(&s, opts...)\n\n\tcOpts = append(cOpts, spec)\n\n\t\/\/ oci.WithImageConfig (WithUsername, WithUserID) depends on access to rootfs for resolving via\n\t\/\/ the \/etc\/{passwd,group} files. So cOpts needs to have precedence over opts.\n\treturn client.NewContainer(ctx, id, cOpts...)\n}\n\nfunc getRuncOptions(context *cli.Context) (*options.Options, error) {\n\truntimeOpts := &options.Options{}\n\tif runcBinary := context.String(\"runc-binary\"); runcBinary != \"\" {\n\t\truntimeOpts.BinaryName = runcBinary\n\t}\n\tif context.Bool(\"runc-systemd-cgroup\") {\n\t\tif context.String(\"cgroup\") == \"\" {\n\t\t\t\/\/ runc maps \"machine.slice:foo:deadbeef\" to \"\/machine.slice\/foo-deadbeef.scope\"\n\t\t\treturn nil, errors.New(\"option --runc-systemd-cgroup requires --cgroup to be set, e.g. \\\"machine.slice:foo:deadbeef\\\"\")\n\t\t}\n\t\truntimeOpts.SystemdCgroup = true\n\t}\n\n\treturn runtimeOpts, nil\n}\n\nfunc getRuntimeOptions(context *cli.Context) (interface{}, error) {\n\t\/\/ validate first\n\tif (context.String(\"runc-binary\") != \"\" || context.Bool(\"runc-systemd-cgroup\")) &&\n\t\tcontext.String(\"runtime\") != \"io.containerd.runc.v2\" {\n\t\treturn nil, errors.New(\"specifying runc-binary and runc-systemd-cgroup is only supported for \\\"io.containerd.runc.v2\\\" runtime\")\n\t}\n\n\tif context.String(\"runtime\") == \"io.containerd.runc.v2\" {\n\t\treturn getRuncOptions(context)\n\t}\n\n\treturn nil, nil\n}\n\nfunc getNewTaskOpts(context *cli.Context) []containerd.NewTaskOpts {\n\tvar (\n\t\ttOpts []containerd.NewTaskOpts\n\t)\n\tif context.Bool(\"no-pivot\") {\n\t\ttOpts = append(tOpts, containerd.WithNoPivotRoot)\n\t}\n\tif uidmap := context.String(\"uidmap\"); uidmap != \"\" {\n\t\tuidMap, err := parseIDMapping(uidmap)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Warn(\"unable to parse uidmap; defaulting to uid 0 IO ownership\")\n\t\t}\n\t\ttOpts = append(tOpts, containerd.WithUIDOwner(uidMap.HostID))\n\t}\n\tif gidmap := context.String(\"gidmap\"); gidmap != \"\" {\n\t\tgidMap, err := parseIDMapping(gidmap)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Warn(\"unable to parse gidmap; defaulting to gid 0 IO ownership\")\n\t\t}\n\t\ttOpts = append(tOpts, containerd.WithGIDOwner(gidMap.HostID))\n\t}\n\treturn tOpts\n}\n\nfunc parseIDMapping(mapping string) (specs.LinuxIDMapping, error) {\n\tparts := strings.Split(mapping, \":\")\n\tif len(parts) != 3 {\n\t\treturn specs.LinuxIDMapping{}, errors.New(\"user namespace mappings require the format `container-id:host-id:size`\")\n\t}\n\tcID, err := strconv.ParseUint(parts[0], 0, 32)\n\tif err != nil {\n\t\treturn specs.LinuxIDMapping{}, errors.Wrapf(err, \"invalid container id for user namespace remapping\")\n\t}\n\thID, err := strconv.ParseUint(parts[1], 0, 32)\n\tif err != nil {\n\t\treturn specs.LinuxIDMapping{}, errors.Wrapf(err, \"invalid host id for user namespace remapping\")\n\t}\n\tsize, err := strconv.ParseUint(parts[2], 0, 32)\n\tif err != nil {\n\t\treturn specs.LinuxIDMapping{}, errors.Wrapf(err, \"invalid size for user namespace remapping\")\n\t}\n\treturn specs.LinuxIDMapping{\n\t\tContainerID: uint32(cID),\n\t\tHostID: uint32(hID),\n\t\tSize: uint32(size),\n\t}, nil\n}\n\nfunc validNamespace(ns string) bool {\n\tlinuxNs := specs.LinuxNamespaceType(ns)\n\tswitch linuxNs {\n\tcase specs.PIDNamespace,\n\t\tspecs.NetworkNamespace,\n\t\tspecs.UTSNamespace,\n\t\tspecs.MountNamespace,\n\t\tspecs.UserNamespace,\n\t\tspecs.IPCNamespace,\n\t\tspecs.CgroupNamespace:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n<commit_msg>Add --runtime-root to ctr<commit_after>\/\/ +build !windows\n\n\/*\n Copyright The containerd Authors.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage run\n\nimport (\n\tgocontext \"context\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/containerd\/containerd\"\n\t\"github.com\/containerd\/containerd\/cmd\/ctr\/commands\"\n\t\"github.com\/containerd\/containerd\/contrib\/nvidia\"\n\t\"github.com\/containerd\/containerd\/contrib\/seccomp\"\n\t\"github.com\/containerd\/containerd\/oci\"\n\t\"github.com\/containerd\/containerd\/platforms\"\n\t\"github.com\/containerd\/containerd\/runtime\/v2\/runc\/options\"\n\t\"github.com\/opencontainers\/runtime-spec\/specs-go\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar platformRunFlags = []cli.Flag{\n\tcli.StringFlag{\n\t\tName: \"runc-binary\",\n\t\tUsage: \"specify runc-compatible binary\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"runc-root\",\n\t\tUsage: \"specify runc-compatible root\",\n\t},\n\tcli.BoolFlag{\n\t\tName: \"runc-systemd-cgroup\",\n\t\tUsage: \"start runc with systemd cgroup manager\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"uidmap\",\n\t\tUsage: \"run inside a user namespace with the specified UID mapping range; specified with the format `container-uid:host-uid:length`\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"gidmap\",\n\t\tUsage: \"run inside a user namespace with the specified GID mapping range; specified with the format `container-gid:host-gid:length`\",\n\t},\n\tcli.BoolFlag{\n\t\tName: \"remap-labels\",\n\t\tUsage: \"provide the user namespace ID remapping to the snapshotter via label options; requires snapshotter support\",\n\t},\n\tcli.Float64Flag{\n\t\tName: \"cpus\",\n\t\tUsage: \"set the CFS cpu qouta\",\n\t\tValue: 0.0,\n\t},\n}\n\n\/\/ NewContainer creates a new container\nfunc NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli.Context) (containerd.Container, error) {\n\tvar (\n\t\tid string\n\t\tconfig = context.IsSet(\"config\")\n\t)\n\tif config {\n\t\tid = context.Args().First()\n\t} else {\n\t\tid = context.Args().Get(1)\n\t}\n\n\tvar (\n\t\topts []oci.SpecOpts\n\t\tcOpts []containerd.NewContainerOpts\n\t\tspec containerd.NewContainerOpts\n\t)\n\n\tcOpts = append(cOpts, containerd.WithContainerLabels(commands.LabelArgs(context.StringSlice(\"label\"))))\n\tif config {\n\t\topts = append(opts, oci.WithSpecFromFile(context.String(\"config\")))\n\t} else {\n\t\tvar (\n\t\t\tref = context.Args().First()\n\t\t\t\/\/for container's id is Args[1]\n\t\t\targs = context.Args()[2:]\n\t\t)\n\t\topts = append(opts, oci.WithDefaultSpec(), oci.WithDefaultUnixDevices)\n\t\tif ef := context.String(\"env-file\"); ef != \"\" {\n\t\t\topts = append(opts, oci.WithEnvFile(ef))\n\t\t}\n\t\topts = append(opts, oci.WithEnv(context.StringSlice(\"env\")))\n\t\topts = append(opts, withMounts(context))\n\n\t\tif context.Bool(\"rootfs\") {\n\t\t\trootfs, err := filepath.Abs(ref)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\topts = append(opts, oci.WithRootFSPath(rootfs))\n\t\t} else {\n\t\t\tsnapshotter := context.String(\"snapshotter\")\n\t\t\tvar image containerd.Image\n\t\t\ti, err := client.ImageService().Get(ctx, ref)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif ps := context.String(\"platform\"); ps != \"\" {\n\t\t\t\tplatform, err := platforms.Parse(ps)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\timage = containerd.NewImageWithPlatform(client, i, platforms.Only(platform))\n\t\t\t} else {\n\t\t\t\timage = containerd.NewImage(client, i)\n\t\t\t}\n\n\t\t\tunpacked, err := image.IsUnpacked(ctx, snapshotter)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif !unpacked {\n\t\t\t\tif err := image.Unpack(ctx, snapshotter); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\topts = append(opts, oci.WithImageConfig(image))\n\t\t\tcOpts = append(cOpts,\n\t\t\t\tcontainerd.WithImage(image),\n\t\t\t\tcontainerd.WithSnapshotter(snapshotter))\n\t\t\tif uidmap, gidmap := context.String(\"uidmap\"), context.String(\"gidmap\"); uidmap != \"\" && gidmap != \"\" {\n\t\t\t\tuidMap, err := parseIDMapping(uidmap)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tgidMap, err := parseIDMapping(gidmap)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\topts = append(opts,\n\t\t\t\t\toci.WithUserNamespace([]specs.LinuxIDMapping{uidMap}, []specs.LinuxIDMapping{gidMap}))\n\t\t\t\t\/\/ use snapshotter opts or the remapped snapshot support to shift the filesystem\n\t\t\t\t\/\/ currently the only snapshotter known to support the labels is fuse-overlayfs:\n\t\t\t\t\/\/ https:\/\/github.com\/AkihiroSuda\/containerd-fuse-overlayfs\n\t\t\t\tif context.Bool(\"remap-labels\") {\n\t\t\t\t\tcOpts = append(cOpts, containerd.WithNewSnapshot(id, image,\n\t\t\t\t\t\tcontainerd.WithRemapperLabels(0, uidMap.HostID, 0, gidMap.HostID, uidMap.Size)))\n\t\t\t\t} else {\n\t\t\t\t\tcOpts = append(cOpts, containerd.WithRemappedSnapshot(id, image, uidMap.HostID, gidMap.HostID))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t\/\/ Even when \"read-only\" is set, we don't use KindView snapshot here. (#1495)\n\t\t\t\t\/\/ We pass writable snapshot to the OCI runtime, and the runtime remounts it as read-only,\n\t\t\t\t\/\/ after creating some mount points on demand.\n\t\t\t\tcOpts = append(cOpts, containerd.WithNewSnapshot(id, image))\n\t\t\t}\n\t\t\tcOpts = append(cOpts, containerd.WithImageStopSignal(image, \"SIGTERM\"))\n\t\t}\n\t\tif context.Bool(\"read-only\") {\n\t\t\topts = append(opts, oci.WithRootFSReadonly())\n\t\t}\n\t\tif len(args) > 0 {\n\t\t\topts = append(opts, oci.WithProcessArgs(args...))\n\t\t}\n\t\tif cwd := context.String(\"cwd\"); cwd != \"\" {\n\t\t\topts = append(opts, oci.WithProcessCwd(cwd))\n\t\t}\n\t\tif context.Bool(\"tty\") {\n\t\t\topts = append(opts, oci.WithTTY)\n\t\t}\n\t\tif context.Bool(\"privileged\") {\n\t\t\topts = append(opts, oci.WithPrivileged, oci.WithAllDevicesAllowed, oci.WithHostDevices)\n\t\t}\n\t\tif context.Bool(\"net-host\") {\n\t\t\topts = append(opts, oci.WithHostNamespace(specs.NetworkNamespace), oci.WithHostHostsFile, oci.WithHostResolvconf)\n\t\t}\n\t\tif context.Bool(\"seccomp\") {\n\t\t\topts = append(opts, seccomp.WithDefaultProfile())\n\t\t}\n\t\tif cpus := context.Float64(\"cpus\"); cpus > 0.0 {\n\t\t\tvar (\n\t\t\t\tperiod = uint64(100000)\n\t\t\t\tquota = int64(cpus * 100000.0)\n\t\t\t)\n\t\t\topts = append(opts, oci.WithCPUCFS(quota, period))\n\t\t}\n\n\t\tquota := context.Int64(\"cpu-quota\")\n\t\tperiod := context.Uint64(\"cpu-period\")\n\t\tif quota != -1 || period != 0 {\n\t\t\tif cpus := context.Float64(\"cpus\"); cpus > 0.0 {\n\t\t\t\treturn nil, errors.New(\"cpus and quota\/period should be used separately\")\n\t\t\t}\n\t\t\topts = append(opts, oci.WithCPUCFS(quota, period))\n\t\t}\n\n\t\tjoinNs := context.StringSlice(\"with-ns\")\n\t\tfor _, ns := range joinNs {\n\t\t\tparts := strings.Split(ns, \":\")\n\t\t\tif len(parts) != 2 {\n\t\t\t\treturn nil, errors.New(\"joining a Linux namespace using --with-ns requires the format 'nstype:path'\")\n\t\t\t}\n\t\t\tif !validNamespace(parts[0]) {\n\t\t\t\treturn nil, errors.New(\"the Linux namespace type specified in --with-ns is not valid: \" + parts[0])\n\t\t\t}\n\t\t\topts = append(opts, oci.WithLinuxNamespace(specs.LinuxNamespace{\n\t\t\t\tType: specs.LinuxNamespaceType(parts[0]),\n\t\t\t\tPath: parts[1],\n\t\t\t}))\n\t\t}\n\t\tif context.IsSet(\"gpus\") {\n\t\t\topts = append(opts, nvidia.WithGPUs(nvidia.WithDevices(context.Int(\"gpus\")), nvidia.WithAllCapabilities))\n\t\t}\n\t\tif context.IsSet(\"allow-new-privs\") {\n\t\t\topts = append(opts, oci.WithNewPrivileges)\n\t\t}\n\t\tif context.IsSet(\"cgroup\") {\n\t\t\t\/\/ NOTE: can be set to \"\" explicitly for disabling cgroup.\n\t\t\topts = append(opts, oci.WithCgroup(context.String(\"cgroup\")))\n\t\t}\n\t\tlimit := context.Uint64(\"memory-limit\")\n\t\tif limit != 0 {\n\t\t\topts = append(opts, oci.WithMemoryLimit(limit))\n\t\t}\n\t\tfor _, dev := range context.StringSlice(\"device\") {\n\t\t\topts = append(opts, oci.WithLinuxDevice(dev, \"rwm\"))\n\t\t}\n\t}\n\n\truntimeOpts, err := getRuntimeOptions(context)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcOpts = append(cOpts, containerd.WithRuntime(context.String(\"runtime\"), runtimeOpts))\n\n\topts = append(opts, oci.WithAnnotations(commands.LabelArgs(context.StringSlice(\"label\"))))\n\tvar s specs.Spec\n\tspec = containerd.WithSpec(&s, opts...)\n\n\tcOpts = append(cOpts, spec)\n\n\t\/\/ oci.WithImageConfig (WithUsername, WithUserID) depends on access to rootfs for resolving via\n\t\/\/ the \/etc\/{passwd,group} files. So cOpts needs to have precedence over opts.\n\treturn client.NewContainer(ctx, id, cOpts...)\n}\n\nfunc getRuncOptions(context *cli.Context) (*options.Options, error) {\n\truntimeOpts := &options.Options{}\n\tif runcBinary := context.String(\"runc-binary\"); runcBinary != \"\" {\n\t\truntimeOpts.BinaryName = runcBinary\n\t}\n\tif context.Bool(\"runc-systemd-cgroup\") {\n\t\tif context.String(\"cgroup\") == \"\" {\n\t\t\t\/\/ runc maps \"machine.slice:foo:deadbeef\" to \"\/machine.slice\/foo-deadbeef.scope\"\n\t\t\treturn nil, errors.New(\"option --runc-systemd-cgroup requires --cgroup to be set, e.g. \\\"machine.slice:foo:deadbeef\\\"\")\n\t\t}\n\t\truntimeOpts.SystemdCgroup = true\n\t}\n\tif root := context.String(\"runc-root\"); root != \"\" {\n\t\truntimeOpts.Root = root\n\t}\n\n\treturn runtimeOpts, nil\n}\n\nfunc getRuntimeOptions(context *cli.Context) (interface{}, error) {\n\t\/\/ validate first\n\tif (context.String(\"runc-binary\") != \"\" || context.Bool(\"runc-systemd-cgroup\")) &&\n\t\tcontext.String(\"runtime\") != \"io.containerd.runc.v2\" {\n\t\treturn nil, errors.New(\"specifying runc-binary and runc-systemd-cgroup is only supported for \\\"io.containerd.runc.v2\\\" runtime\")\n\t}\n\n\tif context.String(\"runtime\") == \"io.containerd.runc.v2\" {\n\t\treturn getRuncOptions(context)\n\t}\n\n\treturn nil, nil\n}\n\nfunc getNewTaskOpts(context *cli.Context) []containerd.NewTaskOpts {\n\tvar (\n\t\ttOpts []containerd.NewTaskOpts\n\t)\n\tif context.Bool(\"no-pivot\") {\n\t\ttOpts = append(tOpts, containerd.WithNoPivotRoot)\n\t}\n\tif uidmap := context.String(\"uidmap\"); uidmap != \"\" {\n\t\tuidMap, err := parseIDMapping(uidmap)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Warn(\"unable to parse uidmap; defaulting to uid 0 IO ownership\")\n\t\t}\n\t\ttOpts = append(tOpts, containerd.WithUIDOwner(uidMap.HostID))\n\t}\n\tif gidmap := context.String(\"gidmap\"); gidmap != \"\" {\n\t\tgidMap, err := parseIDMapping(gidmap)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Warn(\"unable to parse gidmap; defaulting to gid 0 IO ownership\")\n\t\t}\n\t\ttOpts = append(tOpts, containerd.WithGIDOwner(gidMap.HostID))\n\t}\n\treturn tOpts\n}\n\nfunc parseIDMapping(mapping string) (specs.LinuxIDMapping, error) {\n\tparts := strings.Split(mapping, \":\")\n\tif len(parts) != 3 {\n\t\treturn specs.LinuxIDMapping{}, errors.New(\"user namespace mappings require the format `container-id:host-id:size`\")\n\t}\n\tcID, err := strconv.ParseUint(parts[0], 0, 32)\n\tif err != nil {\n\t\treturn specs.LinuxIDMapping{}, errors.Wrapf(err, \"invalid container id for user namespace remapping\")\n\t}\n\thID, err := strconv.ParseUint(parts[1], 0, 32)\n\tif err != nil {\n\t\treturn specs.LinuxIDMapping{}, errors.Wrapf(err, \"invalid host id for user namespace remapping\")\n\t}\n\tsize, err := strconv.ParseUint(parts[2], 0, 32)\n\tif err != nil {\n\t\treturn specs.LinuxIDMapping{}, errors.Wrapf(err, \"invalid size for user namespace remapping\")\n\t}\n\treturn specs.LinuxIDMapping{\n\t\tContainerID: uint32(cID),\n\t\tHostID: uint32(hID),\n\t\tSize: uint32(size),\n\t}, nil\n}\n\nfunc validNamespace(ns string) bool {\n\tlinuxNs := specs.LinuxNamespaceType(ns)\n\tswitch linuxNs {\n\tcase specs.PIDNamespace,\n\t\tspecs.NetworkNamespace,\n\t\tspecs.UTSNamespace,\n\t\tspecs.MountNamespace,\n\t\tspecs.UserNamespace,\n\t\tspecs.IPCNamespace,\n\t\tspecs.CgroupNamespace:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package app\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/mesos\/mesos-go\"\n\t\"github.com\/mesos\/mesos-go\/backoff\"\n\t\"github.com\/mesos\/mesos-go\/encoding\"\n\t\"github.com\/mesos\/mesos-go\/scheduler\"\n\t\"github.com\/mesos\/mesos-go\/scheduler\/calls\"\n\t\"github.com\/mesos\/mesos-go\/scheduler\/events\"\n)\n\nfunc Run(cfg Config) error {\n\tlog.Printf(\"scheduler running with configuration: %+v\", cfg)\n\n\tstate, err := newInternalState(cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar (\n\t\tframeworkInfo = buildFrameworkInfo(cfg)\n\t\tsubscribe = calls.Subscribe(true, frameworkInfo)\n\t\tregistrationTokens = backoff.Notifier(1*time.Second, 15*time.Second, nil)\n\t\thandler = buildEventHandler(state)\n\t)\n\tfor !state.done {\n\t\tif frameworkInfo.GetFailoverTimeout() > 0 && state.frameworkID != \"\" {\n\t\t\tsubscribe.Subscribe.FrameworkInfo.ID = &mesos.FrameworkID{Value: state.frameworkID}\n\t\t}\n\t\t<-registrationTokens\n\t\tlog.Println(\"connecting..\")\n\t\tstate.metricsAPI.subscriptionAttempts()\n\t\tresp, opt, err := state.cli.Call(subscribe)\n\t\tfunc() {\n\t\t\tif resp != nil {\n\t\t\t\tdefer resp.Close()\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\terr = state.cli.WithTemporary(opt, func() error {\n\t\t\t\t\treturn eventLoop(state, resp.Decoder(), handler)\n\t\t\t\t})\n\t\t\t}\n\t\t\tif err != nil && err != io.EOF {\n\t\t\t\tstate.metricsAPI.apiErrorCount(\"subscribe\")\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\tlog.Println(\"disconnected\")\n\t\t\t}\n\t\t}()\n\t}\n\treturn state.err\n}\n\n\/\/ eventLoop returns the framework ID received by mesos (if any); callers should check for a\n\/\/ framework ID regardless of whether error != nil.\nfunc eventLoop(state *internalState, eventDecoder encoding.Decoder, handler events.Handler) (err error) {\n\tstate.frameworkID = \"\"\n\tfor err == nil && !state.done {\n\t\tvar e scheduler.Event\n\t\tif err = eventDecoder.Invoke(&e); err == nil {\n\t\t\tif state.config.verbose {\n\t\t\t\tlog.Printf(\"%+v\\n\", e)\n\t\t\t}\n\t\t\terr = handler.HandleEvent(&e)\n\t\t}\n\t}\n\treturn err\n}\n\n\/\/ buildEventHandler generates and returns a handler to process events received from the subscription.\nfunc buildEventHandler(state *internalState) events.Handler {\n\tcallOptions := scheduler.CallOptions{} \/\/ should be applied to every outgoing call\n\t\/\/ TODO(jdef) refactor per-event metrics as a generic, named counter; build generic functional wrapper that\n\t\/\/ increments the count and apply the wrapper to each handler.\n\treturn events.NewMux(\n\t\tevents.Handle(scheduler.Event_FAILURE, events.HandlerFunc(func(e *scheduler.Event) error {\n\t\t\tstate.metricsAPI.failuresReceived()\n\t\t\tlog.Println(\"received a FAILURE event\")\n\t\t\tf := e.GetFailure()\n\t\t\tfailure(f.ExecutorID, f.AgentID, f.Status)\n\t\t\treturn nil\n\t\t})),\n\t\tevents.Handle(scheduler.Event_OFFERS, events.HandlerFunc(func(e *scheduler.Event) error {\n\t\t\tif state.config.verbose {\n\t\t\t\tlog.Println(\"received an OFFERS event\")\n\t\t\t}\n\t\t\toffers := e.GetOffers().GetOffers()\n\t\t\tstate.metricsAPI.offersReceived.Int(len(offers))\n\t\t\tt := time.Now()\n\t\t\tresourceOffers(state, callOptions[:], offers)\n\t\t\tif state.config.summaryMetrics {\n\t\t\t\tstate.metricsAPI.processOffersLatency.Since(t)\n\t\t\t}\n\t\t\treturn nil\n\t\t})),\n\t\tevents.Handle(scheduler.Event_UPDATE, events.HandlerFunc(func(e *scheduler.Event) error {\n\t\t\tstate.metricsAPI.updatesReceived()\n\t\t\tstatusUpdate(state, callOptions[:], e.GetUpdate().GetStatus())\n\t\t\treturn nil\n\t\t})),\n\t\tevents.Handle(scheduler.Event_ERROR, events.HandlerFunc(func(e *scheduler.Event) error {\n\t\t\tstate.metricsAPI.errorsReceived()\n\t\t\t\/\/ it's recommended that we abort and re-try subscribing; setting\n\t\t\t\/\/ err here will cause the event loop to terminate and the connection\n\t\t\t\/\/ will be reset.\n\t\t\treturn fmt.Errorf(\"ERROR: \" + e.GetError().GetMessage())\n\t\t})),\n\t\tevents.Handle(scheduler.Event_SUBSCRIBED, events.HandlerFunc(func(e *scheduler.Event) (err error) {\n\t\t\tstate.metricsAPI.subscribedReceived()\n\t\t\tlog.Println(\"received a SUBSCRIBED event\")\n\t\t\tif state.frameworkID == \"\" {\n\t\t\t\tstate.frameworkID = e.GetSubscribed().GetFrameworkID().GetValue()\n\t\t\t\tif state.frameworkID == \"\" {\n\t\t\t\t\t\/\/ sanity check\n\t\t\t\t\terr = errors.New(\"mesos gave us an empty frameworkID\")\n\t\t\t\t} else {\n\t\t\t\t\tcallOptions = append(callOptions, calls.Framework(state.frameworkID))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t})),\n\t)\n} \/\/ buildEventHandler\n\nfunc failure(eid *mesos.ExecutorID, aid *mesos.AgentID, stat *int32) {\n\tif eid != nil {\n\t\t\/\/ executor failed..\n\t\tmsg := \"executor '\" + eid.Value + \"' terminated\"\n\t\tif aid != nil {\n\t\t\tmsg += \"on agent '\" + aid.Value + \"'\"\n\t\t}\n\t\tif stat != nil {\n\t\t\tmsg += \", with status=\" + strconv.Itoa(int(*stat))\n\t\t}\n\t\tlog.Println(msg)\n\t} else if aid != nil {\n\t\t\/\/ agent failed..\n\t\tlog.Println(\"agent '\" + aid.Value + \"' terminated\")\n\t}\n}\n\nfunc refuseSecondsWithJitter(d time.Duration) scheduler.CallOpt {\n\treturn calls.Filters(func(f *mesos.Filters) {\n\t\ts := time.Duration(rand.Int63n(int64(d))).Seconds()\n\t\tf.RefuseSeconds = &s\n\t})\n}\n\nfunc resourceOffers(state *internalState, callOptions scheduler.CallOptions, offers []mesos.Offer) {\n\tcallOptions = append(callOptions, refuseSecondsWithJitter(state.config.maxRefuseSeconds))\n\ttasksLaunchedThisCycle := 0\n\toffersDeclined := 0\n\tfor i := range offers {\n\t\tvar (\n\t\t\tremaining = mesos.Resources(offers[i].Resources)\n\t\t\ttasks = []mesos.TaskInfo{}\n\t\t)\n\n\t\tif state.config.verbose {\n\t\t\tlog.Println(\"received offer id '\" + offers[i].ID.Value + \"' with resources \" + remaining.String())\n\t\t}\n\n\t\tvar wantsExecutorResources mesos.Resources\n\t\tif len(offers[i].ExecutorIDs) == 0 {\n\t\t\twantsExecutorResources = mesos.Resources(state.executor.Resources)\n\t\t}\n\n\t\tflattened := remaining.Flatten()\n\n\t\t\/\/ avoid the expense of computing these if we can...\n\t\tif state.config.summaryMetrics && state.config.resourceTypeMetrics {\n\t\t\tfor name, restype := range flattened.Types() {\n\t\t\t\tif restype == mesos.SCALAR {\n\t\t\t\t\tsum := flattened.SumScalars(mesos.NamedResources(name))\n\t\t\t\t\tstate.metricsAPI.offeredResources(sum.GetValue(), name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\ttaskWantsResources := state.wantsTaskResources.Plus(wantsExecutorResources...)\n\t\tfor state.tasksLaunched < state.totalTasks && flattened.ContainsAll(taskWantsResources) {\n\t\t\tstate.tasksLaunched++\n\t\t\ttaskID := state.tasksLaunched\n\n\t\t\tif state.config.verbose {\n\t\t\t\tlog.Println(\"launching task \" + strconv.Itoa(taskID) + \" using offer \" + offers[i].ID.Value)\n\t\t\t}\n\n\t\t\ttask := mesos.TaskInfo{TaskID: mesos.TaskID{Value: strconv.Itoa(taskID)}}\n\t\t\ttask.Name = \"Task \" + task.TaskID.Value\n\t\t\ttask.AgentID = offers[i].AgentID\n\t\t\ttask.Executor = state.executor\n\t\t\ttask.Resources = remaining.Find(state.wantsTaskResources.Flatten(mesos.Role(state.role).Assign()))\n\n\t\t\tremaining.Subtract(task.Resources...)\n\t\t\ttasks = append(tasks, task)\n\n\t\t\tflattened = remaining.Flatten()\n\t\t}\n\n\t\t\/\/ build Accept call to launch all of the tasks we've assembled\n\t\taccept := calls.Accept(\n\t\t\tcalls.OfferWithOperations(\n\t\t\t\toffers[i].ID,\n\t\t\t\tcalls.OpLaunch(tasks...),\n\t\t\t),\n\t\t).With(callOptions...)\n\n\t\t\/\/ send Accept call to mesos\n\t\terr := state.cli.CallNoData(accept)\n\t\tif err != nil {\n\t\t\tstate.metricsAPI.apiErrorCount(\"accept\")\n\t\t\tlog.Printf(\"failed to launch tasks: %+v\", err)\n\t\t} else {\n\t\t\tif n := len(tasks); n > 0 {\n\t\t\t\ttasksLaunchedThisCycle += n\n\t\t\t} else {\n\t\t\t\toffersDeclined++\n\t\t\t}\n\t\t}\n\t}\n\tstate.metricsAPI.offersDeclined.Int(offersDeclined)\n\tstate.metricsAPI.tasksLaunched.Int(tasksLaunchedThisCycle)\n\tif state.config.summaryMetrics {\n\t\tstate.metricsAPI.launchesPerOfferCycle(float64(tasksLaunchedThisCycle))\n\t}\n}\n\nfunc statusUpdate(state *internalState, callOptions scheduler.CallOptions, s mesos.TaskStatus) {\n\tmsg := \"Task \" + s.TaskID.Value + \" is in state \" + s.GetState().String()\n\tif m := s.GetMessage(); m != \"\" {\n\t\tmsg += \" with message '\" + m + \"'\"\n\t}\n\tif state.config.verbose {\n\t\tlog.Println(msg)\n\t}\n\n\tif uuid := s.GetUUID(); len(uuid) > 0 {\n\t\tack := calls.Acknowledge(\n\t\t\ts.GetAgentID().GetValue(),\n\t\t\ts.TaskID.Value,\n\t\t\tuuid,\n\t\t).With(callOptions...)\n\n\t\t\/\/ send Ack call to mesos\n\t\terr := state.cli.CallNoData(ack)\n\t\tif err != nil {\n\t\t\tstate.metricsAPI.apiErrorCount(\"ack\")\n\t\t\tlog.Printf(\"failed to ack status update for task: %+v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\tswitch st := s.GetState(); st {\n\tcase mesos.TASK_FINISHED:\n\t\tstate.tasksFinished++\n\t\tstate.metricsAPI.tasksFinished()\n\n\tcase mesos.TASK_LOST, mesos.TASK_KILLED, mesos.TASK_FAILED, mesos.TASK_ERROR:\n\t\tstate.err = errors.New(\"Exiting because task \" + s.GetTaskID().Value +\n\t\t\t\" is in an unexpected state \" + st.String() +\n\t\t\t\" with reason \" + s.GetReason().String() +\n\t\t\t\" from source \" + s.GetSource().String() +\n\t\t\t\" with message '\" + s.GetMessage() + \"'\")\n\t\tstate.done = true\n\t\treturn\n\t}\n\n\tif state.tasksFinished == state.totalTasks {\n\t\tlog.Println(\"mission accomplished, terminating\")\n\t\tstate.done = true\n\t} else {\n\t\ttryReviveOffers(state, callOptions)\n\t}\n}\n\nfunc tryReviveOffers(state *internalState, callOptions scheduler.CallOptions) {\n\t\/\/ limit the rate at which we request offer revival\n\tselect {\n\tcase <-state.reviveTokens:\n\t\t\/\/ not done yet, revive offers!\n\t\tstate.metricsAPI.reviveCount()\n\t\terr := state.cli.CallNoData(calls.Revive().With(callOptions...))\n\t\tif err != nil {\n\t\t\tstate.metricsAPI.apiErrorCount(\"revive\")\n\t\t\tlog.Printf(\"failed to revive offers: %+v\", err)\n\t\t\treturn\n\t\t}\n\tdefault:\n\t\t\/\/ noop\n\t}\n}\n<commit_msg>make event-loop less dependent on state\/config; configure log-per-event during init phase<commit_after>package app\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/mesos\/mesos-go\"\n\t\"github.com\/mesos\/mesos-go\/backoff\"\n\t\"github.com\/mesos\/mesos-go\/encoding\"\n\t\"github.com\/mesos\/mesos-go\/scheduler\"\n\t\"github.com\/mesos\/mesos-go\/scheduler\/calls\"\n\t\"github.com\/mesos\/mesos-go\/scheduler\/events\"\n)\n\nfunc Run(cfg Config) error {\n\tlog.Printf(\"scheduler running with configuration: %+v\", cfg)\n\n\tstate, err := newInternalState(cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar (\n\t\tframeworkInfo = buildFrameworkInfo(cfg)\n\t\tsubscribe = calls.Subscribe(true, frameworkInfo)\n\t\tregistrationTokens = backoff.Notifier(1*time.Second, 15*time.Second, nil)\n\t\thandler = buildEventHandler(state)\n\t)\n\tif state.config.verbose {\n\t\thandler = logAllEvents(handler)\n\t}\n\tfor !state.done {\n\t\tif frameworkInfo.GetFailoverTimeout() > 0 && state.frameworkID != \"\" {\n\t\t\tsubscribe.Subscribe.FrameworkInfo.ID = &mesos.FrameworkID{Value: state.frameworkID}\n\t\t}\n\t\t<-registrationTokens\n\t\tlog.Println(\"connecting..\")\n\t\tstate.metricsAPI.subscriptionAttempts()\n\t\tresp, opt, err := state.cli.Call(subscribe)\n\t\tfunc() {\n\t\t\tif resp != nil {\n\t\t\t\tdefer resp.Close()\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\tstate.frameworkID = \"\" \/\/ we're newly (re?)subscribed, forget this\n\t\t\t\terr = state.cli.WithTemporary(opt, func() error {\n\t\t\t\t\treturn eventLoop(state, resp.Decoder(), handler)\n\t\t\t\t})\n\t\t\t}\n\t\t\tif err != nil && err != io.EOF {\n\t\t\t\tstate.metricsAPI.apiErrorCount(\"subscribe\")\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\tlog.Println(\"disconnected\")\n\t\t\t}\n\t\t}()\n\t}\n\treturn state.err\n}\n\n\/\/ eventLoop returns the framework ID received by mesos (if any); callers should check for a\n\/\/ framework ID regardless of whether error != nil.\nfunc eventLoop(state *internalState, eventDecoder encoding.Decoder, handler events.Handler) (err error) {\n\tfor err == nil && !state.done {\n\t\tvar e scheduler.Event\n\t\tif err = eventDecoder.Invoke(&e); err == nil {\n\t\t\terr = handler.HandleEvent(&e)\n\t\t}\n\t}\n\treturn err\n}\n\n\/\/ logAllEvents logs every observed event; this is somewhat expensive to do\nfunc logAllEvents(h events.Handler) events.Handler {\n\treturn events.HandlerFunc(func(e *scheduler.Event) error {\n\t\tlog.Printf(\"%+v\\n\", *e)\n\t\treturn h.HandleEvent(e)\n\t})\n}\n\n\/\/ buildEventHandler generates and returns a handler to process events received from the subscription.\nfunc buildEventHandler(state *internalState) events.Handler {\n\tcallOptions := scheduler.CallOptions{} \/\/ should be applied to every outgoing call\n\t\/\/ TODO(jdef) refactor per-event metrics as a generic, named counter; build generic functional wrapper that\n\t\/\/ increments the count and apply the wrapper to each handler.\n\treturn events.NewMux(\n\t\tevents.Handle(scheduler.Event_FAILURE, events.HandlerFunc(func(e *scheduler.Event) error {\n\t\t\tstate.metricsAPI.failuresReceived()\n\t\t\tlog.Println(\"received a FAILURE event\")\n\t\t\tf := e.GetFailure()\n\t\t\tfailure(f.ExecutorID, f.AgentID, f.Status)\n\t\t\treturn nil\n\t\t})),\n\t\tevents.Handle(scheduler.Event_OFFERS, events.HandlerFunc(func(e *scheduler.Event) error {\n\t\t\tif state.config.verbose {\n\t\t\t\tlog.Println(\"received an OFFERS event\")\n\t\t\t}\n\t\t\toffers := e.GetOffers().GetOffers()\n\t\t\tstate.metricsAPI.offersReceived.Int(len(offers))\n\t\t\tt := time.Now()\n\t\t\tresourceOffers(state, callOptions[:], offers)\n\t\t\tif state.config.summaryMetrics {\n\t\t\t\tstate.metricsAPI.processOffersLatency.Since(t)\n\t\t\t}\n\t\t\treturn nil\n\t\t})),\n\t\tevents.Handle(scheduler.Event_UPDATE, events.HandlerFunc(func(e *scheduler.Event) error {\n\t\t\tstate.metricsAPI.updatesReceived()\n\t\t\tstatusUpdate(state, callOptions[:], e.GetUpdate().GetStatus())\n\t\t\treturn nil\n\t\t})),\n\t\tevents.Handle(scheduler.Event_ERROR, events.HandlerFunc(func(e *scheduler.Event) error {\n\t\t\tstate.metricsAPI.errorsReceived()\n\t\t\t\/\/ it's recommended that we abort and re-try subscribing; setting\n\t\t\t\/\/ err here will cause the event loop to terminate and the connection\n\t\t\t\/\/ will be reset.\n\t\t\treturn fmt.Errorf(\"ERROR: \" + e.GetError().GetMessage())\n\t\t})),\n\t\tevents.Handle(scheduler.Event_SUBSCRIBED, events.HandlerFunc(func(e *scheduler.Event) (err error) {\n\t\t\tstate.metricsAPI.subscribedReceived()\n\t\t\tlog.Println(\"received a SUBSCRIBED event\")\n\t\t\tif state.frameworkID == \"\" {\n\t\t\t\tstate.frameworkID = e.GetSubscribed().GetFrameworkID().GetValue()\n\t\t\t\tif state.frameworkID == \"\" {\n\t\t\t\t\t\/\/ sanity check\n\t\t\t\t\terr = errors.New(\"mesos gave us an empty frameworkID\")\n\t\t\t\t} else {\n\t\t\t\t\tcallOptions = append(callOptions, calls.Framework(state.frameworkID))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t})),\n\t)\n} \/\/ buildEventHandler\n\nfunc failure(eid *mesos.ExecutorID, aid *mesos.AgentID, stat *int32) {\n\tif eid != nil {\n\t\t\/\/ executor failed..\n\t\tmsg := \"executor '\" + eid.Value + \"' terminated\"\n\t\tif aid != nil {\n\t\t\tmsg += \"on agent '\" + aid.Value + \"'\"\n\t\t}\n\t\tif stat != nil {\n\t\t\tmsg += \", with status=\" + strconv.Itoa(int(*stat))\n\t\t}\n\t\tlog.Println(msg)\n\t} else if aid != nil {\n\t\t\/\/ agent failed..\n\t\tlog.Println(\"agent '\" + aid.Value + \"' terminated\")\n\t}\n}\n\nfunc refuseSecondsWithJitter(d time.Duration) scheduler.CallOpt {\n\treturn calls.Filters(func(f *mesos.Filters) {\n\t\ts := time.Duration(rand.Int63n(int64(d))).Seconds()\n\t\tf.RefuseSeconds = &s\n\t})\n}\n\nfunc resourceOffers(state *internalState, callOptions scheduler.CallOptions, offers []mesos.Offer) {\n\tcallOptions = append(callOptions, refuseSecondsWithJitter(state.config.maxRefuseSeconds))\n\ttasksLaunchedThisCycle := 0\n\toffersDeclined := 0\n\tfor i := range offers {\n\t\tvar (\n\t\t\tremaining = mesos.Resources(offers[i].Resources)\n\t\t\ttasks = []mesos.TaskInfo{}\n\t\t)\n\n\t\tif state.config.verbose {\n\t\t\tlog.Println(\"received offer id '\" + offers[i].ID.Value + \"' with resources \" + remaining.String())\n\t\t}\n\n\t\tvar wantsExecutorResources mesos.Resources\n\t\tif len(offers[i].ExecutorIDs) == 0 {\n\t\t\twantsExecutorResources = mesos.Resources(state.executor.Resources)\n\t\t}\n\n\t\tflattened := remaining.Flatten()\n\n\t\t\/\/ avoid the expense of computing these if we can...\n\t\tif state.config.summaryMetrics && state.config.resourceTypeMetrics {\n\t\t\tfor name, restype := range flattened.Types() {\n\t\t\t\tif restype == mesos.SCALAR {\n\t\t\t\t\tsum := flattened.SumScalars(mesos.NamedResources(name))\n\t\t\t\t\tstate.metricsAPI.offeredResources(sum.GetValue(), name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\ttaskWantsResources := state.wantsTaskResources.Plus(wantsExecutorResources...)\n\t\tfor state.tasksLaunched < state.totalTasks && flattened.ContainsAll(taskWantsResources) {\n\t\t\tstate.tasksLaunched++\n\t\t\ttaskID := state.tasksLaunched\n\n\t\t\tif state.config.verbose {\n\t\t\t\tlog.Println(\"launching task \" + strconv.Itoa(taskID) + \" using offer \" + offers[i].ID.Value)\n\t\t\t}\n\n\t\t\ttask := mesos.TaskInfo{TaskID: mesos.TaskID{Value: strconv.Itoa(taskID)}}\n\t\t\ttask.Name = \"Task \" + task.TaskID.Value\n\t\t\ttask.AgentID = offers[i].AgentID\n\t\t\ttask.Executor = state.executor\n\t\t\ttask.Resources = remaining.Find(state.wantsTaskResources.Flatten(mesos.Role(state.role).Assign()))\n\n\t\t\tremaining.Subtract(task.Resources...)\n\t\t\ttasks = append(tasks, task)\n\n\t\t\tflattened = remaining.Flatten()\n\t\t}\n\n\t\t\/\/ build Accept call to launch all of the tasks we've assembled\n\t\taccept := calls.Accept(\n\t\t\tcalls.OfferWithOperations(\n\t\t\t\toffers[i].ID,\n\t\t\t\tcalls.OpLaunch(tasks...),\n\t\t\t),\n\t\t).With(callOptions...)\n\n\t\t\/\/ send Accept call to mesos\n\t\terr := state.cli.CallNoData(accept)\n\t\tif err != nil {\n\t\t\tstate.metricsAPI.apiErrorCount(\"accept\")\n\t\t\tlog.Printf(\"failed to launch tasks: %+v\", err)\n\t\t} else {\n\t\t\tif n := len(tasks); n > 0 {\n\t\t\t\ttasksLaunchedThisCycle += n\n\t\t\t} else {\n\t\t\t\toffersDeclined++\n\t\t\t}\n\t\t}\n\t}\n\tstate.metricsAPI.offersDeclined.Int(offersDeclined)\n\tstate.metricsAPI.tasksLaunched.Int(tasksLaunchedThisCycle)\n\tif state.config.summaryMetrics {\n\t\tstate.metricsAPI.launchesPerOfferCycle(float64(tasksLaunchedThisCycle))\n\t}\n}\n\nfunc statusUpdate(state *internalState, callOptions scheduler.CallOptions, s mesos.TaskStatus) {\n\tmsg := \"Task \" + s.TaskID.Value + \" is in state \" + s.GetState().String()\n\tif m := s.GetMessage(); m != \"\" {\n\t\tmsg += \" with message '\" + m + \"'\"\n\t}\n\tif state.config.verbose {\n\t\tlog.Println(msg)\n\t}\n\n\tif uuid := s.GetUUID(); len(uuid) > 0 {\n\t\tack := calls.Acknowledge(\n\t\t\ts.GetAgentID().GetValue(),\n\t\t\ts.TaskID.Value,\n\t\t\tuuid,\n\t\t).With(callOptions...)\n\n\t\t\/\/ send Ack call to mesos\n\t\terr := state.cli.CallNoData(ack)\n\t\tif err != nil {\n\t\t\tstate.metricsAPI.apiErrorCount(\"ack\")\n\t\t\tlog.Printf(\"failed to ack status update for task: %+v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\tswitch st := s.GetState(); st {\n\tcase mesos.TASK_FINISHED:\n\t\tstate.tasksFinished++\n\t\tstate.metricsAPI.tasksFinished()\n\n\tcase mesos.TASK_LOST, mesos.TASK_KILLED, mesos.TASK_FAILED, mesos.TASK_ERROR:\n\t\tstate.err = errors.New(\"Exiting because task \" + s.GetTaskID().Value +\n\t\t\t\" is in an unexpected state \" + st.String() +\n\t\t\t\" with reason \" + s.GetReason().String() +\n\t\t\t\" from source \" + s.GetSource().String() +\n\t\t\t\" with message '\" + s.GetMessage() + \"'\")\n\t\tstate.done = true\n\t\treturn\n\t}\n\n\tif state.tasksFinished == state.totalTasks {\n\t\tlog.Println(\"mission accomplished, terminating\")\n\t\tstate.done = true\n\t} else {\n\t\ttryReviveOffers(state, callOptions)\n\t}\n}\n\nfunc tryReviveOffers(state *internalState, callOptions scheduler.CallOptions) {\n\t\/\/ limit the rate at which we request offer revival\n\tselect {\n\tcase <-state.reviveTokens:\n\t\t\/\/ not done yet, revive offers!\n\t\tstate.metricsAPI.reviveCount()\n\t\terr := state.cli.CallNoData(calls.Revive().With(callOptions...))\n\t\tif err != nil {\n\t\t\tstate.metricsAPI.apiErrorCount(\"revive\")\n\t\t\tlog.Printf(\"failed to revive offers: %+v\", err)\n\t\t\treturn\n\t\t}\n\tdefault:\n\t\t\/\/ noop\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"flag\"\n\tss \"github.com\/shadowsocks\/shadowsocks-go\/shadowsocks\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strconv\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"syscall\"\n\t\"time\"\n)\n\nvar debug ss.DebugLog\n\nvar errAddrType = errors.New(\"addr type not supported\")\n\nfunc getRequest(conn *ss.Conn) (host string, extra []byte, err error) {\n\tconst (\n\t\tidType = 0 \/\/ address type index\n\t\tidIP0 = 1 \/\/ ip addres start index\n\t\tidDmLen = 1 \/\/ domain address length index\n\t\tidDm0 = 2 \/\/ domain address start index\n\n\t\ttypeIP = 1 \/\/ type is ip address\n\t\ttypeDm = 3 \/\/ type is domain address\n\n\t\tlenIP = 1 + 4 + 2 \/\/ 1addrType + 4ip + 2port\n\t\tlenDmBase = 1 + 1 + 2 \/\/ 1addrType + 1addrLen + 2port, plus addrLen\n\t)\n\n\t\/\/ buf size should at least have the same size with the largest possible\n\t\/\/ request size (when addrType is 3, domain name has at most 256 bytes)\n\t\/\/ 1(addrType) + 1(lenByte) + 256(max length address) + 2(port)\n\tbuf := make([]byte, 260, 260)\n\tvar n int\n\t\/\/ read till we get possible domain length field\n\tss.SetReadTimeout(conn)\n\tif n, err = io.ReadAtLeast(conn, buf, idDmLen+1); err != nil {\n\t\treturn\n\t}\n\n\treqLen := lenIP\n\tif buf[idType] == typeDm {\n\t\treqLen = int(buf[idDmLen]) + lenDmBase\n\t} else if buf[idType] != typeIP {\n\t\terr = errAddrType\n\t\treturn\n\t}\n\n\tif n < reqLen { \/\/ rare case\n\t\tss.SetReadTimeout(conn)\n\t\tif _, err = io.ReadFull(conn, buf[n:reqLen]); err != nil {\n\t\t\treturn\n\t\t}\n\t} else if n > reqLen {\n\t\t\/\/ it's possible to read more than just the request head\n\t\textra = buf[reqLen:n]\n\t}\n\n\t\/\/ TODO add ipv6 support\n\tif buf[idType] == typeDm {\n\t\thost = string(buf[idDm0 : idDm0+buf[idDmLen]])\n\t} else if buf[idType] == typeIP {\n\t\taddrIp := make(net.IP, 4)\n\t\tcopy(addrIp, buf[idIP0:idIP0+4])\n\t\thost = addrIp.String()\n\t}\n\t\/\/ parse port\n\tvar port int16\n\tsb := bytes.NewBuffer(buf[reqLen-2 : reqLen])\n\tbinary.Read(sb, binary.BigEndian, &port)\n\n\thost += \":\" + strconv.Itoa(int(port))\n\treturn\n}\n\nfunc handleConnection(conn *ss.Conn) {\n\tif debug {\n\t\t\/\/ function arguments are always evaluated, so surround debug\n\t\t\/\/ statement with if statement\n\t\tdebug.Printf(\"socks connect from %s\\n\", conn.RemoteAddr().String())\n\t}\n\tdefer conn.Close()\n\n\thost, extra, err := getRequest(conn)\n\tif err != nil {\n\t\tlog.Println(\"error getting request:\", err)\n\t\treturn\n\t}\n\tdebug.Println(\"connecting\", host)\n\tremote, err := net.Dial(\"tcp\", host)\n\tif err != nil {\n\t\tdebug.Println(\"error connecting to:\", host, err)\n\t\treturn\n\t}\n\tdefer remote.Close()\n\t\/\/ write extra bytes read from \n\tif extra != nil {\n\t\tdebug.Println(\"getRequest read extra data, writing to remote, len\", len(extra))\n\t\tif _, err = remote.Write(extra); err != nil {\n\t\t\tdebug.Println(\"write request extra error:\", err)\n\t\t\treturn\n\t\t}\n\t}\n\tdebug.Println(\"piping\", host)\n\tc := make(chan byte, 2)\n\tgo ss.Pipe(conn, remote, c)\n\tgo ss.Pipe(remote, conn, c)\n\t<-c \/\/ close the other connection whenever one connection is closed\n\tdebug.Println(\"closing\", host)\n\treturn\n}\n\n\/\/ Add a encrypt table cache to save memory and startup time in case of many\n\/\/ same password.\n\/\/ If startup time becomes an issue, save the encrypt table on disk.\nvar tableCache = map[string]*ss.EncryptTable{}\nvar tableGetCnt int32\n\nfunc getTable(password string) (tbl *ss.EncryptTable) {\n\tif tableCache != nil {\n\t\tvar ok bool\n\t\ttbl, ok = tableCache[password]\n\t\tif ok {\n\t\t\tdebug.Println(\"table cache hit for password:\", password)\n\t\t\treturn\n\t\t}\n\t\ttbl = ss.GetTable(password)\n\t\ttableCache[password] = tbl\n\t} else {\n\t\ttbl = ss.GetTable(password)\n\t}\n\treturn\n}\n\ntype PortListener struct {\n\tpassword string\n\tlistener net.Listener\n}\n\ntype PasswdManager struct {\n\tsync.Mutex\n\tportListener map[string]*PortListener\n}\n\nfunc (pm *PasswdManager) add(port, password string, listener net.Listener) {\n\tpm.Lock()\n\tpm.portListener[port] = &PortListener{password, listener}\n\tpm.Unlock()\n}\n\nfunc (pm *PasswdManager) get(port string) (pl *PortListener, ok bool) {\n\tpm.Lock()\n\tpl, ok = pm.portListener[port]\n\tpm.Unlock()\n\treturn\n}\n\nfunc (pm *PasswdManager) del(port string) {\n\tpl, ok := pm.get(port)\n\tif !ok {\n\t\treturn\n\t}\n\tpl.listener.Close()\n\tpm.Lock()\n\tdelete(pm.portListener, port)\n\tpm.Unlock()\n}\n\nfunc (pm *PasswdManager) updatePortPasswd(port, password string) {\n\tpl, ok := pm.get(port)\n\tif !ok {\n\t\tlog.Printf(\"new port %s added\\n\", port)\n\t} else {\n\t\tif pl.password == password {\n\t\t\treturn\n\t\t}\n\t\tlog.Printf(\"closing port %s to update password\\n\", port)\n\t\tpl.listener.Close()\n\t}\n\t\/\/ run will add the new port listener to passwdManager.\n\t\/\/ So there maybe concurrent access to passwdManager and we need lock to protect it.\n\tgo run(port, password)\n}\n\nvar passwdManager = PasswdManager{portListener: map[string]*PortListener{}}\n\nfunc updatePasswd() {\n\tlog.Println(\"updating password\")\n\tnewconfig, err := ss.ParseConfig(configFile)\n\tif err != nil {\n\t\tlog.Printf(\"error parsing config file %s to update password: %v\\n\", configFile, err)\n\t\treturn\n\t}\n\toldconfig := config\n\tconfig = newconfig\n\n\tif err = unifyPortPassword(config); err != nil {\n\t\treturn\n\t}\n\tfor port, passwd := range config.PortPassword {\n\t\tpasswdManager.updatePortPasswd(port, passwd)\n\t\tif oldconfig.PortPassword != nil {\n\t\t\tdelete(oldconfig.PortPassword, port)\n\t\t}\n\t}\n\t\/\/ port password still left in the old config should be closed\n\tfor port, _ := range oldconfig.PortPassword {\n\t\tlog.Printf(\"closing port %s as it's deleted\\n\", port)\n\t\tpasswdManager.del(port)\n\t}\n\tlog.Println(\"password updated\")\n}\n\nfunc waitSignal() {\n\tvar sigChan = make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, syscall.SIGHUP)\n\tfor sig := range sigChan {\n\t\tif sig == syscall.SIGHUP {\n\t\t\tupdatePasswd()\n\t\t} else {\n\t\t\t\/\/ is this going to happen?\n\t\t\tlog.Printf(\"caught signal %v, exit\", sig)\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n}\n\nfunc run(port, password string) {\n\tln, err := net.Listen(\"tcp\", \":\"+port)\n\tif err != nil {\n\t\tlog.Printf(\"try listening port %v: %v\\n\", port, err)\n\t\treturn\n\t}\n\tpasswdManager.add(port, password, ln)\n\tencTbl := getTable(password)\n\tatomic.AddInt32(&tableGetCnt, 1)\n\tlog.Printf(\"server listening port %v ...\\n\", port)\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\t\/\/ listener maybe closed to update password\n\t\t\tdebug.Printf(\"accept error: %v\\n\", err)\n\t\t\treturn\n\t\t}\n\t\tgo handleConnection(ss.NewConn(conn, encTbl))\n\t}\n}\n\nfunc enoughOptions(config *ss.Config) bool {\n\treturn config.ServerPort != 0 && config.Password != \"\"\n}\n\nfunc unifyPortPassword(config *ss.Config) (err error) {\n\tif len(config.PortPassword) == 0 { \/\/ this handles both nil PortPassword and empty one\n\t\tif !enoughOptions(config) {\n\t\t\tlog.Println(\"must specify both port and password\")\n\t\t\treturn errors.New(\"not enough options\")\n\t\t}\n\t\tport := strconv.Itoa(config.ServerPort)\n\t\tconfig.PortPassword = map[string]string{port: config.Password}\n\t} else {\n\t\tif config.Password != \"\" || config.ServerPort != 0 {\n\t\t\tlog.Println(\"given port_password, ignore server_port and password option\")\n\t\t}\n\t}\n\treturn\n}\n\nvar configFile string\nvar config *ss.Config\n\nfunc main() {\n\tvar cmdConfig ss.Config\n\n\tflag.StringVar(&configFile, \"c\", \"config.json\", \"specify config file\")\n\tflag.StringVar(&cmdConfig.Password, \"k\", \"\", \"password\")\n\tflag.IntVar(&cmdConfig.ServerPort, \"p\", 0, \"server port\")\n\tflag.IntVar(&cmdConfig.Timeout, \"t\", 60, \"connection timeout (in seconds)\")\n\tflag.BoolVar((*bool)(&debug), \"d\", false, \"print debug message\")\n\n\tflag.Parse()\n\n\tvar err error\n\tconfig, err = ss.ParseConfig(configFile)\n\tif err != nil {\n\t\tenough := enoughOptions(&cmdConfig)\n\t\tif !(enough && os.IsNotExist(err)) {\n\t\t\tlog.Printf(\"error reading %s: %v\\n\", configFile, err)\n\t\t}\n\t\tif !enough {\n\t\t\treturn\n\t\t}\n\t\tlog.Println(\"using all options from command line\")\n\t\tconfig = &cmdConfig\n\t} else {\n\t\tss.UpdateConfig(config, &cmdConfig)\n\t}\n\tss.SetDebug(debug)\n\n\tif err = unifyPortPassword(config); err != nil {\n\t\tos.Exit(1)\n\t}\n\tfor port, password := range config.PortPassword {\n\t\tgo run(port, password)\n\t}\n\t\/\/ Wait all ports have get it's encryption table\n\tfor int(tableGetCnt) != len(config.PortPassword) {\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\tlog.Println(\"all ports ready\")\n\ttableCache = nil \/\/ release memory\n\twaitSignal()\n}\n<commit_msg>Log too many open file error.<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"flag\"\n\tss \"github.com\/shadowsocks\/shadowsocks-go\/shadowsocks\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strconv\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"syscall\"\n\t\"time\"\n)\n\nvar debug ss.DebugLog\n\nvar errAddrType = errors.New(\"addr type not supported\")\n\nfunc getRequest(conn *ss.Conn) (host string, extra []byte, err error) {\n\tconst (\n\t\tidType = 0 \/\/ address type index\n\t\tidIP0 = 1 \/\/ ip addres start index\n\t\tidDmLen = 1 \/\/ domain address length index\n\t\tidDm0 = 2 \/\/ domain address start index\n\n\t\ttypeIP = 1 \/\/ type is ip address\n\t\ttypeDm = 3 \/\/ type is domain address\n\n\t\tlenIP = 1 + 4 + 2 \/\/ 1addrType + 4ip + 2port\n\t\tlenDmBase = 1 + 1 + 2 \/\/ 1addrType + 1addrLen + 2port, plus addrLen\n\t)\n\n\t\/\/ buf size should at least have the same size with the largest possible\n\t\/\/ request size (when addrType is 3, domain name has at most 256 bytes)\n\t\/\/ 1(addrType) + 1(lenByte) + 256(max length address) + 2(port)\n\tbuf := make([]byte, 260, 260)\n\tvar n int\n\t\/\/ read till we get possible domain length field\n\tss.SetReadTimeout(conn)\n\tif n, err = io.ReadAtLeast(conn, buf, idDmLen+1); err != nil {\n\t\treturn\n\t}\n\n\treqLen := lenIP\n\tif buf[idType] == typeDm {\n\t\treqLen = int(buf[idDmLen]) + lenDmBase\n\t} else if buf[idType] != typeIP {\n\t\terr = errAddrType\n\t\treturn\n\t}\n\n\tif n < reqLen { \/\/ rare case\n\t\tss.SetReadTimeout(conn)\n\t\tif _, err = io.ReadFull(conn, buf[n:reqLen]); err != nil {\n\t\t\treturn\n\t\t}\n\t} else if n > reqLen {\n\t\t\/\/ it's possible to read more than just the request head\n\t\textra = buf[reqLen:n]\n\t}\n\n\t\/\/ TODO add ipv6 support\n\tif buf[idType] == typeDm {\n\t\thost = string(buf[idDm0 : idDm0+buf[idDmLen]])\n\t} else if buf[idType] == typeIP {\n\t\taddrIp := make(net.IP, 4)\n\t\tcopy(addrIp, buf[idIP0:idIP0+4])\n\t\thost = addrIp.String()\n\t}\n\t\/\/ parse port\n\tvar port int16\n\tsb := bytes.NewBuffer(buf[reqLen-2 : reqLen])\n\tbinary.Read(sb, binary.BigEndian, &port)\n\n\thost += \":\" + strconv.Itoa(int(port))\n\treturn\n}\n\nfunc handleConnection(conn *ss.Conn) {\n\tif debug {\n\t\t\/\/ function arguments are always evaluated, so surround debug\n\t\t\/\/ statement with if statement\n\t\tdebug.Printf(\"socks connect from %s\\n\", conn.RemoteAddr().String())\n\t}\n\tdefer conn.Close()\n\n\thost, extra, err := getRequest(conn)\n\tif err != nil {\n\t\tlog.Println(\"error getting request:\", err)\n\t\treturn\n\t}\n\tdebug.Println(\"connecting\", host)\n\tremote, err := net.Dial(\"tcp\", host)\n\tif err != nil {\n\t\tif ne, ok := err.(*net.OpError); ok && (ne.Err == syscall.EMFILE || ne.Err == syscall.ENFILE) {\n\t\t\t\/\/ log too many open file error\n\t\t\t\/\/ EMFILE is process reaches open file limits, ENFILE is system limit\n\t\t\tlog.Println(\"dial error:\", err)\n\t\t} else {\n\t\t\tdebug.Println(\"error connecting to:\", host, err)\n\t\t}\n\t\treturn\n\t}\n\tdefer remote.Close()\n\t\/\/ write extra bytes read from \n\tif extra != nil {\n\t\tdebug.Println(\"getRequest read extra data, writing to remote, len\", len(extra))\n\t\tif _, err = remote.Write(extra); err != nil {\n\t\t\tdebug.Println(\"write request extra error:\", err)\n\t\t\treturn\n\t\t}\n\t}\n\tdebug.Println(\"piping\", host)\n\tc := make(chan byte, 2)\n\tgo ss.Pipe(conn, remote, c)\n\tgo ss.Pipe(remote, conn, c)\n\t<-c \/\/ close the other connection whenever one connection is closed\n\tdebug.Println(\"closing\", host)\n\treturn\n}\n\n\/\/ Add a encrypt table cache to save memory and startup time in case of many\n\/\/ same password.\n\/\/ If startup time becomes an issue, save the encrypt table on disk.\nvar tableCache = map[string]*ss.EncryptTable{}\nvar tableGetCnt int32\n\nfunc getTable(password string) (tbl *ss.EncryptTable) {\n\tif tableCache != nil {\n\t\tvar ok bool\n\t\ttbl, ok = tableCache[password]\n\t\tif ok {\n\t\t\tdebug.Println(\"table cache hit for password:\", password)\n\t\t\treturn\n\t\t}\n\t\ttbl = ss.GetTable(password)\n\t\ttableCache[password] = tbl\n\t} else {\n\t\ttbl = ss.GetTable(password)\n\t}\n\treturn\n}\n\ntype PortListener struct {\n\tpassword string\n\tlistener net.Listener\n}\n\ntype PasswdManager struct {\n\tsync.Mutex\n\tportListener map[string]*PortListener\n}\n\nfunc (pm *PasswdManager) add(port, password string, listener net.Listener) {\n\tpm.Lock()\n\tpm.portListener[port] = &PortListener{password, listener}\n\tpm.Unlock()\n}\n\nfunc (pm *PasswdManager) get(port string) (pl *PortListener, ok bool) {\n\tpm.Lock()\n\tpl, ok = pm.portListener[port]\n\tpm.Unlock()\n\treturn\n}\n\nfunc (pm *PasswdManager) del(port string) {\n\tpl, ok := pm.get(port)\n\tif !ok {\n\t\treturn\n\t}\n\tpl.listener.Close()\n\tpm.Lock()\n\tdelete(pm.portListener, port)\n\tpm.Unlock()\n}\n\nfunc (pm *PasswdManager) updatePortPasswd(port, password string) {\n\tpl, ok := pm.get(port)\n\tif !ok {\n\t\tlog.Printf(\"new port %s added\\n\", port)\n\t} else {\n\t\tif pl.password == password {\n\t\t\treturn\n\t\t}\n\t\tlog.Printf(\"closing port %s to update password\\n\", port)\n\t\tpl.listener.Close()\n\t}\n\t\/\/ run will add the new port listener to passwdManager.\n\t\/\/ So there maybe concurrent access to passwdManager and we need lock to protect it.\n\tgo run(port, password)\n}\n\nvar passwdManager = PasswdManager{portListener: map[string]*PortListener{}}\n\nfunc updatePasswd() {\n\tlog.Println(\"updating password\")\n\tnewconfig, err := ss.ParseConfig(configFile)\n\tif err != nil {\n\t\tlog.Printf(\"error parsing config file %s to update password: %v\\n\", configFile, err)\n\t\treturn\n\t}\n\toldconfig := config\n\tconfig = newconfig\n\n\tif err = unifyPortPassword(config); err != nil {\n\t\treturn\n\t}\n\tfor port, passwd := range config.PortPassword {\n\t\tpasswdManager.updatePortPasswd(port, passwd)\n\t\tif oldconfig.PortPassword != nil {\n\t\t\tdelete(oldconfig.PortPassword, port)\n\t\t}\n\t}\n\t\/\/ port password still left in the old config should be closed\n\tfor port, _ := range oldconfig.PortPassword {\n\t\tlog.Printf(\"closing port %s as it's deleted\\n\", port)\n\t\tpasswdManager.del(port)\n\t}\n\tlog.Println(\"password updated\")\n}\n\nfunc waitSignal() {\n\tvar sigChan = make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, syscall.SIGHUP)\n\tfor sig := range sigChan {\n\t\tif sig == syscall.SIGHUP {\n\t\t\tupdatePasswd()\n\t\t} else {\n\t\t\t\/\/ is this going to happen?\n\t\t\tlog.Printf(\"caught signal %v, exit\", sig)\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n}\n\nfunc run(port, password string) {\n\tln, err := net.Listen(\"tcp\", \":\"+port)\n\tif err != nil {\n\t\tlog.Printf(\"try listening port %v: %v\\n\", port, err)\n\t\treturn\n\t}\n\tpasswdManager.add(port, password, ln)\n\tencTbl := getTable(password)\n\tatomic.AddInt32(&tableGetCnt, 1)\n\tlog.Printf(\"server listening port %v ...\\n\", port)\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\t\/\/ listener maybe closed to update password\n\t\t\tdebug.Printf(\"accept error: %v\\n\", err)\n\t\t\treturn\n\t\t}\n\t\tgo handleConnection(ss.NewConn(conn, encTbl))\n\t}\n}\n\nfunc enoughOptions(config *ss.Config) bool {\n\treturn config.ServerPort != 0 && config.Password != \"\"\n}\n\nfunc unifyPortPassword(config *ss.Config) (err error) {\n\tif len(config.PortPassword) == 0 { \/\/ this handles both nil PortPassword and empty one\n\t\tif !enoughOptions(config) {\n\t\t\tlog.Println(\"must specify both port and password\")\n\t\t\treturn errors.New(\"not enough options\")\n\t\t}\n\t\tport := strconv.Itoa(config.ServerPort)\n\t\tconfig.PortPassword = map[string]string{port: config.Password}\n\t} else {\n\t\tif config.Password != \"\" || config.ServerPort != 0 {\n\t\t\tlog.Println(\"given port_password, ignore server_port and password option\")\n\t\t}\n\t}\n\treturn\n}\n\nvar configFile string\nvar config *ss.Config\n\nfunc main() {\n\tvar cmdConfig ss.Config\n\n\tflag.StringVar(&configFile, \"c\", \"config.json\", \"specify config file\")\n\tflag.StringVar(&cmdConfig.Password, \"k\", \"\", \"password\")\n\tflag.IntVar(&cmdConfig.ServerPort, \"p\", 0, \"server port\")\n\tflag.IntVar(&cmdConfig.Timeout, \"t\", 60, \"connection timeout (in seconds)\")\n\tflag.BoolVar((*bool)(&debug), \"d\", false, \"print debug message\")\n\n\tflag.Parse()\n\n\tvar err error\n\tconfig, err = ss.ParseConfig(configFile)\n\tif err != nil {\n\t\tenough := enoughOptions(&cmdConfig)\n\t\tif !(enough && os.IsNotExist(err)) {\n\t\t\tlog.Printf(\"error reading %s: %v\\n\", configFile, err)\n\t\t}\n\t\tif !enough {\n\t\t\treturn\n\t\t}\n\t\tlog.Println(\"using all options from command line\")\n\t\tconfig = &cmdConfig\n\t} else {\n\t\tss.UpdateConfig(config, &cmdConfig)\n\t}\n\tss.SetDebug(debug)\n\n\tif err = unifyPortPassword(config); err != nil {\n\t\tos.Exit(1)\n\t}\n\tfor port, password := range config.PortPassword {\n\t\tgo run(port, password)\n\t}\n\t\/\/ Wait all ports have get it's encryption table\n\tfor int(tableGetCnt) != len(config.PortPassword) {\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\tlog.Println(\"all ports ready\")\n\ttableCache = nil \/\/ release memory\n\twaitSignal()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package gcnotifier provides a way to receive notifications after every time\n\/\/ garbage collection (GC) runs. This can be useful to instruct your code to\n\/\/ free additional memory resources that you may be using.\n\/\/\n\/\/ To minimize the load on the GC the code that runs after receiving the\n\/\/ notification should try to avoid allocations as much as possible, or at the\n\/\/ very least that the amount of new memory allocated is significantly smaller\n\/\/ than the amount of memory that has been \"freed\" by your code.\npackage gcnotifier\n\nimport \"runtime\"\n\ntype sentinel struct {\n\tgcCh chan struct{}\n}\n\n\/\/ AfterGC returns a channel that will receive a notification after every GC\n\/\/ run. If a notification is not consumed before another GC runs only one of the\n\/\/ two notifications is sent. To stop the notifications you can safely close the\n\/\/ channel.\n\/\/\n\/\/ A common use case for this is when you have a custom pool of objects: instead\n\/\/ of setting a maximum size you can leave it unbounded and then drop all or\n\/\/ some of them after every GC run (e.g. sync.Pool drops all objects during GC).\nfunc AfterGC() chan struct{} {\n\ts := &sentinel{gcCh: make(chan struct{})}\n\truntime.SetFinalizer(s, finalizer)\n\treturn s.gcCh\n}\n\nfunc finalizer(obj interface{}) {\n\tdefer recover() \/\/ writing to a closed channel will panic\n\ts := obj.(*sentinel)\n\tselect {\n\tcase s.gcCh <- struct{}{}:\n\tdefault:\n\t}\n\t\/\/ we get here only if the channel was not closed\n\truntime.SetFinalizer(s, finalizer)\n}\n<commit_msg>Comment fail<commit_after>\/\/ Package gcnotifier provides a way to receive notifications after every time\n\/\/ garbage collection (GC) runs. This can be useful to instruct your code to\n\/\/ free additional memory resources that you may be using.\n\/\/\n\/\/ To minimize the load on the GC the code that runs after receiving the\n\/\/ notification should try to avoid allocations as much as possible, or at the\n\/\/ very least make sure that the amount of new memory allocated is significantly\n\/\/ smaller than the amount of memory that has been \"freed\" by your code.\npackage gcnotifier\n\nimport \"runtime\"\n\ntype sentinel struct {\n\tgcCh chan struct{}\n}\n\n\/\/ AfterGC returns a channel that will receive a notification after every GC\n\/\/ run. If a notification is not consumed before another GC runs only one of the\n\/\/ two notifications is sent. To stop the notifications you can safely close the\n\/\/ channel.\n\/\/\n\/\/ A common use case for this is when you have a custom pool of objects: instead\n\/\/ of setting a maximum size you can leave it unbounded and then drop all or\n\/\/ some of them after every GC run (e.g. sync.Pool drops all objects during GC).\nfunc AfterGC() chan struct{} {\n\ts := &sentinel{gcCh: make(chan struct{})}\n\truntime.SetFinalizer(s, finalizer)\n\treturn s.gcCh\n}\n\nfunc finalizer(obj interface{}) {\n\tdefer recover() \/\/ writing to a closed channel will panic\n\ts := obj.(*sentinel)\n\tselect {\n\tcase s.gcCh <- struct{}{}:\n\tdefault:\n\t}\n\t\/\/ we get here only if the channel was not closed\n\truntime.SetFinalizer(s, finalizer)\n}\n<|endoftext|>"} {"text":"<commit_before>package codegen\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/grokify\/gotilla\/io\/ioutilmore\"\n\t\"github.com\/pkg\/errors\"\n)\n\nfunc ConvertFilesInPlaceNestedstructsToPointers(dir string, rx *regexp.Regexp) ([]string, error) {\n\tfilepaths := []string{}\n\tif rx == nil {\n\t\trx = regexp.MustCompile(`.*\\.go$`)\n\t}\n\tfiles, err := ioutilmore.DirEntriesRxSizeGt0(dir, ioutilmore.File, rx)\n\tif err != nil {\n\t\treturn filepaths, errors.Wrap(err, \"codegen.ConvertFilesInPlace.DirEntriesReNotEmpty\")\n\t}\n\tfor _, file := range files {\n\t\tfilepath := filepath.Join(dir, file.Name())\n\t\terr := ConvertFileNestedstructsToPointers(filepath, filepath, file.Mode().Perm())\n\t\tif err != nil {\n\t\t\treturn filepaths, errors.Wrap(err, \"codegen.ConvertFilesInPlace.ConvertFile\")\n\t\t}\n\t\tfilepaths = append(filepaths, filepath)\n\t}\n\treturn filepaths, nil\n}\n\nfunc ConvertFileNestedstructsToPointers(inFile, outFile string, perm os.FileMode) error {\n\tdata, err := ioutil.ReadFile(inFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(\n\t\toutFile,\n\t\t[]byte(GoCodeNestedstructsToPointers(string(data))),\n\t\tperm)\n}\n\nvar (\n\trxParenOpen = regexp.MustCompile(`^type\\s+\\S+\\s+struct\\s+{\\s*$`)\n\trxParenClose = regexp.MustCompile(`^\\s*}\\s*$`)\n\trxCustomType = regexp.MustCompile(`^(\\s*[0-9A-Za-z]+\\s+(?:[0-9a-z\\]\\[]+\\])?)([A-Z].*)$`)\n)\n\n\/\/ GoCodeNestedstructsToPointers is designed to convert\n\/\/ nested structs to pointers.\nfunc GoCodeNestedstructsToPointers(code string) string {\n\toldLines := strings.Split(code, \"\\n\")\n\tnewLines := []string{}\n\tinParen := false\n\tfor _, line := range oldLines {\n\t\tif rxParenOpen.MatchString(line) {\n\t\t\tinParen = true\n\t\t\tnewLines = append(newLines, line)\n\t\t\tcontinue\n\t\t} else if rxParenClose.MatchString(line) {\n\t\t\tinParen = false\n\t\t\tnewLines = append(newLines, line)\n\t\t\tcontinue\n\t\t} else if inParen {\n\t\t\tmc := rxCustomType.FindStringSubmatch(line)\n\t\t\tif len(mc) > 0 {\n\t\t\t\tnewLines = append(newLines, mc[1]+\"*\"+mc[2])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tnewLines = append(newLines, line)\n\t}\n\treturn strings.Join(newLines, \"\\n\")\n}\n<commit_msg>streamline code<commit_after>package codegen\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/grokify\/gotilla\/io\/ioutilmore\"\n\t\"github.com\/pkg\/errors\"\n)\n\nfunc ConvertFilesInPlaceNestedstructsToPointers(dir string, rx *regexp.Regexp) ([]string, error) {\n\tfilepaths := []string{}\n\tif rx == nil {\n\t\trx = regexp.MustCompile(`.*\\.go$`)\n\t}\n\tfiles, err := ioutilmore.DirEntriesRxSizeGt0(dir, ioutilmore.File, rx)\n\tif err != nil {\n\t\treturn filepaths, errors.Wrap(err, \"codegen.ConvertFilesInPlace.DirEntriesReNotEmpty\")\n\t}\n\tfor _, file := range files {\n\t\tfilepath := filepath.Join(dir, file.Name())\n\t\terr := ConvertFileNestedstructsToPointers(filepath, filepath, file.Mode().Perm())\n\t\tif err != nil {\n\t\t\treturn filepaths, errors.Wrap(err, \"codegen.ConvertFilesInPlace.ConvertFile\")\n\t\t}\n\t\tfilepaths = append(filepaths, filepath)\n\t}\n\treturn filepaths, nil\n}\n\nfunc ConvertFileNestedstructsToPointers(inFile, outFile string, perm os.FileMode) error {\n\tdata, err := ioutil.ReadFile(inFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(\n\t\toutFile,\n\t\t[]byte(GoCodeNestedstructsToPointers(string(data))),\n\t\tperm)\n}\n\nvar (\n\trxParenOpen = regexp.MustCompile(`^type\\s+\\S+\\s+struct\\s+{\\s*$`)\n\trxParenClose = regexp.MustCompile(`^\\s*}\\s*$`)\n\trxCustomType = regexp.MustCompile(`^(\\s*[0-9A-Za-z]+\\s+(?:[0-9a-z\\]\\[]+\\])?)([A-Z].*)$`)\n)\n\n\/\/ GoCodeNestedstructsToPointers is designed to convert\n\/\/ nested structs to pointers.\nfunc GoCodeNestedstructsToPointers(code string) string {\n\toldLines := strings.Split(code, \"\\n\")\n\tnewLines := []string{}\n\tinParen := false\n\tfor _, line := range oldLines {\n\t\tif rxParenOpen.MatchString(line) {\n\t\t\tinParen = true\n\t\t\tnewLines = append(newLines, line)\n\t\t\tcontinue\n\t\t} else if rxParenClose.MatchString(line) {\n\t\t\tinParen = false\n\t\t\tnewLines = append(newLines, line)\n\t\t\tcontinue\n\t\t} else if inParen {\n\t\t\tm := rxCustomType.FindStringSubmatch(line)\n\t\t\tif len(m) > 0 {\n\t\t\t\tnewLines = append(newLines, m[1]+\"*\"+m[2])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tnewLines = append(newLines, line)\n\t}\n\treturn strings.Join(newLines, \"\\n\")\n}\n<|endoftext|>"} {"text":"<commit_before>package cham\n\nimport (\n\t\"time\"\n)\n\nfunc mainStart(service *Service, args ...interface{}) Dispatch {\n\treturn func(session int32, source Address, ptype uint8, args ...interface{}) []interface{} {\n\t\treturn NORET\n\t}\n}\n\nvar Main *Service\n\nvar DTimer *Timer\n\nfunc init() {\n\tMain = NewService(\"main\", mainStart)\n\tDTimer = NewWheelTimer(time.Millisecond * 10)\n\tgo func() { DTimer.Start() }()\n}\n<commit_msg>cham Run and Stop<commit_after>package cham\n\nimport (\n\t\"time\"\n)\n\nconst (\n\tCHAM_STOP uint8 = iota\n)\n\nvar Main *Service\n\nvar DTimer *Timer\n\nvar stop chan NULL\n\nfunc mainStart(service *Service, args ...interface{}) Dispatch {\n\treturn func(session int32, source Address, ptype uint8, args ...interface{}) []interface{} {\n\t\tswitch ptype {\n\t\tcase CHAM_STOP:\n\t\t\tstop <- NULLVALUE\n\t\t}\n\t\treturn NORET\n\t}\n}\n\nfunc Run() {\n\t<-stop\n}\n\nfunc init() {\n\tMain = NewService(\"main\", mainStart)\n\tDTimer = NewWheelTimer(time.Millisecond * 10)\n\tgo func() { DTimer.Start() }()\n}\n<|endoftext|>"} {"text":"<commit_before>package hatchery\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"go.opencensus.io\/stats\"\n\n\t\"github.com\/ovh\/cds\/sdk\"\n\t\"github.com\/ovh\/cds\/sdk\/log\"\n)\n\n\/\/ WorkerPool returns all the worker owned by the hatchery h, registered or not on the CDS API\nfunc WorkerPool(h Interface, status ...sdk.Status) ([]sdk.Worker, error) {\n\tctx := WithTags(context.Background(), h)\n\n\t\/\/ First: call API\n\tregisteredWorkers, err := h.CDSClient().WorkerList()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to get registered workers: %v\", err)\n\t}\n\n\t\/\/ Then: get all worker in the orchestrator queue\n\tstartedWorkers := h.WorkersStarted()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to get started workers: %v\", err)\n\t}\n\n\t\/\/ Make the union of the two slices\n\tallWorkers := make([]sdk.Worker, 0, len(startedWorkers)+len(registeredWorkers))\n\n\t\/\/ Consider the registered worker\n\tfor _, w := range registeredWorkers {\n\t\tvar found bool\n\t\tfor i := range startedWorkers {\n\t\t\tif startedWorkers[i] == w.Name {\n\t\t\t\tstartedWorkers = append(startedWorkers[:i], startedWorkers[i+1:]...)\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found && w.Status != sdk.StatusDisabled {\n\t\t\tlog.Error(\"Hatchery > WorkerPool> Worker %s (status = %s) inconsistency\", w.Name, w.Status.String())\n\t\t\tif err := h.CDSClient().WorkerDisable(w.ID); err != nil {\n\t\t\t\tlog.Error(\"Hatchery > WorkerPool> Unable to disable worker [%s]%s\", w.ID, w.Name)\n\t\t\t}\n\t\t\tw.Status = sdk.StatusDisabled\n\t\t}\n\t\tallWorkers = append(allWorkers, w)\n\t}\n\n\t\/\/ And add the other worker with status pending of registering\n\tfor _, w := range startedWorkers {\n\t\tname := w\n\t\tstatus := sdk.StatusWorkerPending\n\t\tif strings.HasPrefix(w, \"register-\") {\n\t\t\tname = strings.Replace(w, \"register-\", \"\", 1)\n\t\t\tstatus = sdk.StatusWorkerRegistering\n\t\t}\n\t\tallWorkers = append(allWorkers, sdk.Worker{\n\t\t\tName: name,\n\t\t\tStatus: status,\n\t\t})\n\t}\n\n\tnbPerStatus := map[sdk.Status]int{}\n\tfor _, w := range allWorkers {\n\t\tnbPerStatus[w.Status] = nbPerStatus[w.Status] + 1\n\t}\n\n\tmeasures := []stats.Measurement{\n\t\th.Stats().PendingWorkers.M(int64(nbPerStatus[sdk.StatusWorkerPending])),\n\t\th.Stats().RegisteringWorkers.M(int64(nbPerStatus[sdk.StatusWorkerPending])),\n\t\th.Stats().WaitingWorkers.M(int64(nbPerStatus[sdk.StatusWaiting])),\n\t\th.Stats().CheckingWorkers.M(int64(nbPerStatus[sdk.StatusChecking])),\n\t\th.Stats().BuildingWorkers.M(int64(nbPerStatus[sdk.StatusBuilding])),\n\t\th.Stats().DisabledWorkers.M(int64(nbPerStatus[sdk.StatusDisabled])),\n\t}\n\tstats.Record(ctx, measures...)\n\n\t\/\/ Filter by status\n\tres := make([]sdk.Worker, 0, len(allWorkers))\n\tif len(status) == 0 {\n\t\tres = allWorkers\n\t} else {\n\t\tfor _, w := range allWorkers {\n\t\t\tfor _, s := range status {\n\t\t\t\tif s == w.Status {\n\t\t\t\t\tres = append(res, w)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn res, nil\n}\n<commit_msg>fix(hatcheries): worker pool count (#3183)<commit_after>package hatchery\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"go.opencensus.io\/stats\"\n\n\t\"github.com\/ovh\/cds\/sdk\"\n\t\"github.com\/ovh\/cds\/sdk\/log\"\n)\n\n\/\/ WorkerPool returns all the worker owned by the hatchery h, registered or not on the CDS API\nfunc WorkerPool(h Interface, status ...sdk.Status) ([]sdk.Worker, error) {\n\tctx := WithTags(context.Background(), h)\n\n\t\/\/ First: call API\n\tregisteredWorkers, err := h.CDSClient().WorkerList()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to get registered workers: %v\", err)\n\t}\n\n\t\/\/ Then: get all worker in the orchestrator queue\n\tstartedWorkers := h.WorkersStarted()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to get started workers: %v\", err)\n\t}\n\n\t\/\/ Make the union of the two slices\n\tallWorkers := make([]sdk.Worker, 0, len(startedWorkers)+len(registeredWorkers))\n\n\t\/\/ Consider the registered worker\n\tfor k, w := range registeredWorkers {\n\t\tvar found bool\n\t\tfor i := range startedWorkers {\n\t\t\tif startedWorkers[i] == w.Name {\n\t\t\t\tstartedWorkers = append(startedWorkers[:i], startedWorkers[i+1:]...)\n\t\t\t\tfound = true\n\n\t\t\t\tif strings.HasPrefix(w.Name, \"register-\") {\n\t\t\t\t\tregisteredWorkers[k].Status = sdk.StatusWorkerRegistering\n\t\t\t\t}\n\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found && w.Status != sdk.StatusDisabled {\n\t\t\tlog.Error(\"Hatchery > WorkerPool> Worker %s (status = %s) inconsistency\", w.Name, w.Status.String())\n\t\t\tif err := h.CDSClient().WorkerDisable(w.ID); err != nil {\n\t\t\t\tlog.Error(\"Hatchery > WorkerPool> Unable to disable worker [%s]%s\", w.ID, w.Name)\n\t\t\t}\n\t\t\tw.Status = sdk.StatusDisabled\n\t\t}\n\t\tallWorkers = append(allWorkers, w)\n\t}\n\n\t\/\/ And add the other worker with status pending of registering\n\tfor _, w := range startedWorkers {\n\t\tname := w\n\t\tvar status sdk.Status\n\n\t\tvar found bool\n\t\tfor _, wr := range registeredWorkers {\n\t\t\tif wr.Name == name {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif found {\n\t\t\tcontinue \/\/ worker is registered\n\t\t}\n\n\t\tif strings.HasPrefix(w, \"register-\") {\n\t\t\tname = strings.Replace(w, \"register-\", \"\", 1)\n\t\t\tstatus = sdk.StatusWorkerRegistering\n\t\t}\n\n\t\tif status == \"\" {\n\t\t\tstatus = sdk.StatusWorkerPending\n\t\t}\n\t\tallWorkers = append(allWorkers, sdk.Worker{\n\t\t\tName: name,\n\t\t\tStatus: status,\n\t\t})\n\t}\n\n\tnbPerStatus := map[sdk.Status]int{}\n\tfor _, w := range allWorkers {\n\t\tnbPerStatus[w.Status] = nbPerStatus[w.Status] + 1\n\t}\n\n\tmeasures := []stats.Measurement{\n\t\th.Stats().PendingWorkers.M(int64(nbPerStatus[sdk.StatusWorkerPending])),\n\t\th.Stats().RegisteringWorkers.M(int64(nbPerStatus[sdk.StatusWorkerPending])),\n\t\th.Stats().WaitingWorkers.M(int64(nbPerStatus[sdk.StatusWaiting])),\n\t\th.Stats().CheckingWorkers.M(int64(nbPerStatus[sdk.StatusChecking])),\n\t\th.Stats().BuildingWorkers.M(int64(nbPerStatus[sdk.StatusBuilding])),\n\t\th.Stats().DisabledWorkers.M(int64(nbPerStatus[sdk.StatusDisabled])),\n\t}\n\tstats.Record(ctx, measures...)\n\n\t\/\/ Filter by status\n\tres := make([]sdk.Worker, 0, len(allWorkers))\n\tif len(status) == 0 {\n\t\tres = allWorkers\n\t} else {\n\t\tfor _, w := range allWorkers {\n\t\t\tfor _, s := range status {\n\t\t\t\tif s == w.Status {\n\t\t\t\t\tres = append(res, w)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn res, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package handlers\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/ivan1993spb\/snake-server\/connections\"\n)\n\nconst URLRouteCreateGame = \"\/games\"\n\nconst MethodCreateGame = http.MethodPost\n\nconst (\n\tpostFieldConnectionLimit = \"limit\"\n\tpostFieldMapWidth = \"width\"\n\tpostFieldMapHeight = \"height\"\n)\n\ntype responseCreateGameHandler struct {\n\tID int `json:\"id\"`\n\tLimit int `json:\"limit\"`\n\tWidth uint8 `json:\"width\"`\n\tHeight uint8 `json:\"height\"`\n}\n\ntype createGameHandler struct {\n\tlogger logrus.FieldLogger\n\tgroupManager *connections.ConnectionGroupManager\n}\n\ntype ErrCreateGameHandler string\n\nfunc (e ErrCreateGameHandler) Error() string {\n\treturn \"create game handler error: \" + string(e)\n}\n\nfunc NewCreateGameHandler(logger logrus.FieldLogger, groupManager *connections.ConnectionGroupManager) http.Handler {\n\treturn &createGameHandler{\n\t\tlogger: logger,\n\t\tgroupManager: groupManager,\n\t}\n}\n\nfunc (h *createGameHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tconnectionLimit, err := strconv.Atoi(r.PostFormValue(postFieldConnectionLimit))\n\tif err != nil {\n\t\th.logger.Error(ErrCreateGameHandler(err.Error()))\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\tif connectionLimit <= 0 {\n\t\th.logger.Warnln(ErrCreateGameHandler(\"invalid connection limit\"), connectionLimit)\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tmapWidth, err := strconv.ParseUint(r.PostFormValue(postFieldMapWidth), 10, 8)\n\tif err != nil {\n\t\th.logger.Error(ErrCreateGameHandler(err.Error()))\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\tif mapWidth == 0 {\n\t\th.logger.Warnln(ErrCreateGameHandler(\"invalid map width\"), mapWidth)\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tmapHeight, err := strconv.ParseUint(r.PostFormValue(postFieldMapHeight), 10, 8)\n\tif err != nil {\n\t\th.logger.Error(ErrCreateGameHandler(err.Error()))\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\tif mapHeight == 0 {\n\t\th.logger.Warnln(ErrCreateGameHandler(\"invalid map height\"), mapHeight)\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\th.logger.WithFields(logrus.Fields{\n\t\t\"width\": mapWidth,\n\t\t\"height\": mapHeight,\n\t\t\"connection_limit\": connectionLimit,\n\t}).Debug(\"create game group\")\n\n\tgroup, err := connections.NewConnectionGroup(h.logger, connectionLimit, uint8(mapWidth), uint8(mapHeight))\n\tif err != nil {\n\t\th.logger.Error(ErrCreateGameHandler(err.Error()))\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tid, err := h.groupManager.Add(group)\n\tif err != nil {\n\t\th.logger.Error(ErrCreateGameHandler(err.Error()))\n\n\t\tswitch err {\n\t\tcase connections.ErrGroupLimitReached:\n\t\t\thttp.Error(w, http.StatusText(http.StatusServiceUnavailable), http.StatusServiceUnavailable)\n\t\tcase connections.ErrConnsLimitReached:\n\t\t\thttp.Error(w, http.StatusText(http.StatusServiceUnavailable), http.StatusServiceUnavailable)\n\t\tdefault:\n\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\n\th.logger.Info(\"start group\")\n\tgroup.Start()\n\n\th.logger.WithField(\"group_id\", id).Infoln(\"created group\")\n\n\tw.WriteHeader(http.StatusCreated)\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\n\terr = json.NewEncoder(w).Encode(responseCreateGameHandler{\n\t\tID: id,\n\t\tLimit: group.GetLimit(),\n\t\tWidth: uint8(mapWidth),\n\t\tHeight: uint8(mapHeight),\n\t})\n\tif err != nil {\n\t\th.logger.Error(ErrCreateGameHandler(err.Error()))\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n<commit_msg>Create JSON error response in createGameHandler<commit_after>package handlers\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/ivan1993spb\/snake-server\/connections\"\n)\n\nconst URLRouteCreateGame = \"\/games\"\n\nconst MethodCreateGame = http.MethodPost\n\nconst (\n\tpostFieldConnectionLimit = \"limit\"\n\tpostFieldMapWidth = \"width\"\n\tpostFieldMapHeight = \"height\"\n)\n\ntype responseCreateGameHandler struct {\n\tID int `json:\"id\"`\n\tLimit int `json:\"limit\"`\n\tWidth uint8 `json:\"width\"`\n\tHeight uint8 `json:\"height\"`\n}\n\ntype responseCreateGameHandlerError struct {\n\tCode int `json:\"code\"`\n\tText string `json:\"text\"`\n}\n\ntype createGameHandler struct {\n\tlogger logrus.FieldLogger\n\tgroupManager *connections.ConnectionGroupManager\n}\n\ntype ErrCreateGameHandler string\n\nfunc (e ErrCreateGameHandler) Error() string {\n\treturn \"create game handler error: \" + string(e)\n}\n\nfunc NewCreateGameHandler(logger logrus.FieldLogger, groupManager *connections.ConnectionGroupManager) http.Handler {\n\treturn &createGameHandler{\n\t\tlogger: logger,\n\t\tgroupManager: groupManager,\n\t}\n}\n\nfunc (h *createGameHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tconnectionLimit, err := strconv.Atoi(r.PostFormValue(postFieldConnectionLimit))\n\tif err != nil {\n\t\th.logger.Error(ErrCreateGameHandler(err.Error()))\n\t\th.writeResponseJSON(w, http.StatusBadRequest, &responseCreateGameHandlerError{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tText: \"invalid limit\",\n\t\t})\n\t\treturn\n\t}\n\tif connectionLimit <= 0 {\n\t\th.logger.Warnln(ErrCreateGameHandler(\"invalid connection limit\"), connectionLimit)\n\t\th.writeResponseJSON(w, http.StatusBadRequest, &responseCreateGameHandlerError{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tText: \"invalid limit\",\n\t\t})\n\t\treturn\n\t}\n\n\tmapWidth, err := strconv.ParseUint(r.PostFormValue(postFieldMapWidth), 10, 8)\n\tif err != nil {\n\t\th.logger.Error(ErrCreateGameHandler(err.Error()))\n\t\th.writeResponseJSON(w, http.StatusBadRequest, &responseCreateGameHandlerError{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tText: \"invalid width\",\n\t\t})\n\t\treturn\n\t}\n\tif mapWidth == 0 {\n\t\th.logger.Warnln(ErrCreateGameHandler(\"invalid map width\"), mapWidth)\n\t\th.writeResponseJSON(w, http.StatusBadRequest, &responseCreateGameHandlerError{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tText: \"invalid width\",\n\t\t})\n\t\treturn\n\t}\n\n\tmapHeight, err := strconv.ParseUint(r.PostFormValue(postFieldMapHeight), 10, 8)\n\tif err != nil {\n\t\th.logger.Error(ErrCreateGameHandler(err.Error()))\n\t\th.writeResponseJSON(w, http.StatusBadRequest, &responseCreateGameHandlerError{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tText: \"invalid height\",\n\t\t})\n\t\treturn\n\t}\n\tif mapHeight == 0 {\n\t\th.logger.Warnln(ErrCreateGameHandler(\"invalid map height\"), mapHeight)\n\t\th.writeResponseJSON(w, http.StatusBadRequest, &responseCreateGameHandlerError{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tText: \"invalid height\",\n\t\t})\n\t\treturn\n\t}\n\n\th.logger.WithFields(logrus.Fields{\n\t\t\"width\": mapWidth,\n\t\t\"height\": mapHeight,\n\t\t\"connection_limit\": connectionLimit,\n\t}).Debug(\"create game group\")\n\n\tgroup, err := connections.NewConnectionGroup(h.logger, connectionLimit, uint8(mapWidth), uint8(mapHeight))\n\tif err != nil {\n\t\th.logger.Error(ErrCreateGameHandler(err.Error()))\n\t\th.writeResponseJSON(w, http.StatusInternalServerError, &responseCreateGameHandlerError{\n\t\t\tCode: http.StatusInternalServerError,\n\t\t\tText: \"cannot create game\",\n\t\t})\n\t\treturn\n\t}\n\n\tid, err := h.groupManager.Add(group)\n\tif err != nil {\n\t\th.logger.Error(ErrCreateGameHandler(err.Error()))\n\n\t\tswitch err {\n\t\tcase connections.ErrGroupLimitReached:\n\t\t\th.writeResponseJSON(w, http.StatusServiceUnavailable, &responseCreateGameHandlerError{\n\t\t\t\tCode: http.StatusServiceUnavailable,\n\t\t\t\tText: \"groups limit reached\",\n\t\t\t})\n\t\tcase connections.ErrConnsLimitReached:\n\t\t\th.writeResponseJSON(w, http.StatusServiceUnavailable, &responseCreateGameHandlerError{\n\t\t\t\tCode: http.StatusServiceUnavailable,\n\t\t\t\tText: \"connections limit reached\",\n\t\t\t})\n\t\tdefault:\n\t\t\th.writeResponseJSON(w, http.StatusInternalServerError, &responseCreateGameHandlerError{\n\t\t\t\tCode: http.StatusInternalServerError,\n\t\t\t\tText: \"unknown error\",\n\t\t\t})\n\t\t}\n\t\treturn\n\t}\n\n\th.logger.Info(\"start group\")\n\tgroup.Start()\n\n\th.logger.WithField(\"group_id\", id).Infoln(\"created group\")\n\n\th.writeResponseJSON(w, http.StatusCreated, &responseCreateGameHandler{\n\t\tID: id,\n\t\tLimit: group.GetLimit(),\n\t\tWidth: uint8(mapWidth),\n\t\tHeight: uint8(mapHeight),\n\t})\n}\n\nfunc (h *createGameHandler) writeResponseJSON(w http.ResponseWriter, statusCode int, response interface{}) {\n\tw.WriteHeader(statusCode)\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\n\tif err := json.NewEncoder(w).Encode(response); err != nil {\n\t\th.logger.Error(ErrCreateGameHandler(err.Error()))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package terraform\n\n\/\/ The main version number that is being run at the moment.\nconst Version = \"0.6.13\"\n\n\/\/ A pre-release marker for the version. If this is \"\" (empty string)\n\/\/ then it means that it is a final release. Otherwise, this is a pre-release\n\/\/ such as \"dev\" (in development), \"beta\", \"rc1\", etc.\nconst VersionPrerelease = \"dev\"\n<commit_msg>v0.6.13<commit_after>package terraform\n\n\/\/ The main version number that is being run at the moment.\nconst Version = \"0.6.13\"\n\n\/\/ A pre-release marker for the version. If this is \"\" (empty string)\n\/\/ then it means that it is a final release. Otherwise, this is a pre-release\n\/\/ such as \"dev\" (in development), \"beta\", \"rc1\", etc.\nconst VersionPrerelease = \"\"\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \/\/\"fmt\" \n \"image\"\n \"image\/color\"\n \/\/\"image\/gif\"\n \"os\"\n \"bufio\"\n \"io\"\n \"compress\/lzw\"\n \"bytes\"\n \"math\"\n)\n\nfunc writeHeader(w io.Writer, m image.Image) {\n buffer := bufio.NewWriter(w)\n\n buffer.WriteByte('G')\n buffer.WriteByte('I')\n buffer.WriteByte('F')\n buffer.WriteByte('8')\n buffer.WriteByte('9')\n buffer.WriteByte('a')\n\n b := m.Bounds()\n buffer.WriteByte(byte(b.Max.X \/ 255)) \/\/ Image width, LSB.\n buffer.WriteByte(byte(b.Max.X % 255)) \/\/ Image width, MSB.\n buffer.WriteByte(byte(b.Max.Y \/ 255)) \/\/ Image height, LSB.\n buffer.WriteByte(byte(b.Max.Y % 255)) \/\/ Image height, MSB.\n\n buffer.WriteByte(byte(0xF7)) \/\/ GCT follows for 256 colors with resolution\n \/\/ 3 x 8 bits\/primary\n buffer.WriteByte(byte(0x00)) \/\/ Background color.\n buffer.WriteByte(byte(0x00)) \/\/ Default pixel aspect ratio.\n\n \/\/ Grayscale color table.\n for i := 0; i < 256; i++ {\n buffer.WriteByte(byte(i)) \/\/ Repeat the same color value to get\n buffer.WriteByte(byte(i)) \/\/ grayscale.\n buffer.WriteByte(byte(i))\n }\n\n buffer.WriteByte(byte(0x21)) \/\/ Application Extension block.\n buffer.WriteByte(byte(0xF9)) \/\/ Application Extension block (cont).\n buffer.WriteByte(byte(0x0B)) \/\/ Next 11 bytes are Application Extension.\n buffer.WriteByte('N') \/\/ 8 Character application name.\n buffer.WriteByte('E')\n buffer.WriteByte('T')\n buffer.WriteByte('S')\n buffer.WriteByte('C')\n buffer.WriteByte('A')\n buffer.WriteByte('P')\n buffer.WriteByte('E')\n buffer.WriteByte('2')\n buffer.WriteByte('.')\n buffer.WriteByte('0')\n buffer.WriteByte(byte(0x03)) \/\/ 3 more bytes of Application Extension.\n buffer.WriteByte(byte(0x01)) \/\/ Data sub-block index (always 1).\n buffer.WriteByte(byte(0xFF)) \/\/ Number of repetitions, LSB.\n buffer.WriteByte(byte(0xFF)) \/\/ Number of repetitions, MSB.\n buffer.WriteByte(byte(0x00)) \/\/ End of Application Extension block.\n\n buffer.Flush()\n}\n\nfunc writeFrameHeader(w io.Writer, m image.Image) {\n buffer := bufio.NewWriter(w)\n\n buffer.WriteByte(byte(0x21)) \/\/ Start of Graphic Control Extension.\n buffer.WriteByte(byte(0xF9)) \/\/ Start of Graphic Control Extension (cont).\n buffer.WriteByte(byte(0x04)) \/\/ 4 more bytes of GCE.\n\n buffer.WriteByte(byte(0x00)) \/\/ There is no transparent pixel.\n buffer.WriteByte(byte(0x10)) \/\/ Animation delay, in centiseconds, LSB.\n buffer.WriteByte(byte(0x00)) \/\/ Animation delay, in centiseconds, MSB.\n buffer.WriteByte(byte(0x00)) \/\/ Transparent color #, if we were using.\n buffer.WriteByte(byte(0x00)) \/\/ End of Application Extension data.\n\n buffer.WriteByte(byte(0x2C)) \/\/ Start of Image Descriptor.\n\n b := m.Bounds()\n buffer.WriteByte(byte(b.Min.X \/ 255))\n buffer.WriteByte(byte(b.Min.X % 255))\n buffer.WriteByte(byte(b.Min.Y \/ 255))\n buffer.WriteByte(byte(b.Min.Y % 255))\n\n buffer.WriteByte(byte(b.Max.X \/ 255))\n buffer.WriteByte(byte(b.Max.X % 255))\n buffer.WriteByte(byte(b.Max.Y \/ 255))\n buffer.WriteByte(byte(b.Max.Y % 255))\n\n buffer.WriteByte(byte(0x00)) \/\/ No local color table.\n\n buffer.WriteByte(byte(0x08)) \/\/ Start of LZW with minimum code size 8.\n\n buffer.Flush()\n}\n\nfunc compressImage(m image.Image) *bytes.Buffer {\n b := m.Bounds()\n\n compressedImageBuffer := bytes.NewBuffer(make([]byte, 0, 255))\n lzww := lzw.NewWriter(compressedImageBuffer, lzw.LSB, int(8))\n\n for y := b.Min.Y; y < b.Max.Y; y++ {\n for x := b.Min.X; x < b.Max.X; x++ {\n c := color.GrayModel.Convert(m.At(x, y)).(color.Gray)\n lzww.Write([]byte{c.Y})\n }\n }\n lzww.Close()\n\n return compressedImageBuffer\n}\n\nfunc writeBlocks(w io.Writer, m image.Image) {\n compressedImage := compressImage(m)\n\n const maxBlockSize = 255\n bytesSoFar := 0\n bytesRemaining := compressedImage.Len()\n for bytesRemaining > 0 {\n if bytesSoFar == 0 {\n blockSize := math.Min(maxBlockSize, float64(bytesRemaining))\n w.Write([]byte{byte(blockSize)})\n }\n\n b, _ := compressedImage.ReadByte()\n writeFrameHeader(w, m)\n w.Write([]byte{b})\n\n bytesSoFar = (bytesSoFar + 1) % maxBlockSize\n bytesRemaining--\n }\n}\n\nfunc Encode(w io.Writer, m image.Image) error {\n return EncodeAll(w, []image.Image{m})\n}\n\nfunc EncodeAll(w io.Writer, images []image.Image) error {\n writeHeader(w, images[0])\n for _, m := range images {\n writeBlocks(w, m)\n }\n w.Write([]byte{0, ';'})\n\n return nil\n}\n\nfunc main() {\n m := image.NewRGBA(image.Rect(0, 0, 100, 100))\n for x := 0; x < 100; x++ {\n for y := 0; y < 100; y++ {\n c := byte(x ^ y)\n m.Set(x, y, color.RGBA{c, c, c, 0xFF})\n }\n }\n file, _ := os.Create(\"new_image.gif\")\n Encode(file, m)\n}\n<commit_msg>Use a single buffer<commit_after>package main\n\nimport (\n \/\/\"fmt\" \n \"image\"\n \"image\/color\"\n \/\/\"image\/gif\"\n \"os\"\n \"bufio\"\n \"io\"\n \"compress\/lzw\"\n \"bytes\"\n \"math\"\n)\n\nfunc writeHeader(w *bufio.Writer, m image.Image) {\n w.WriteByte('G')\n w.WriteByte('I')\n w.WriteByte('F')\n w.WriteByte('8')\n w.WriteByte('9')\n w.WriteByte('a')\n\n b := m.Bounds()\n w.WriteByte(byte(b.Max.X \/ 255)) \/\/ Image width, LSB.\n w.WriteByte(byte(b.Max.X % 255)) \/\/ Image width, MSB.\n w.WriteByte(byte(b.Max.Y \/ 255)) \/\/ Image height, LSB.\n w.WriteByte(byte(b.Max.Y % 255)) \/\/ Image height, MSB.\n\n w.WriteByte(byte(0xF7)) \/\/ GCT follows for 256 colors with resolution\n \/\/ 3 x 8 bits\/primary\n w.WriteByte(byte(0x00)) \/\/ Background color.\n w.WriteByte(byte(0x00)) \/\/ Default pixel aspect ratio.\n\n \/\/ Grayscale color table.\n for i := 0; i < 256; i++ {\n w.WriteByte(byte(i)) \/\/ Repeat the same color value to get\n w.WriteByte(byte(i)) \/\/ grayscale.\n w.WriteByte(byte(i))\n }\n\n w.WriteByte(byte(0x21)) \/\/ Application Extension block.\n w.WriteByte(byte(0xF9)) \/\/ Application Extension block (cont).\n w.WriteByte(byte(0x0B)) \/\/ Next 11 bytes are Application Extension.\n w.WriteByte('N') \/\/ 8 Character application name.\n w.WriteByte('E')\n w.WriteByte('T')\n w.WriteByte('S')\n w.WriteByte('C')\n w.WriteByte('A')\n w.WriteByte('P')\n w.WriteByte('E')\n w.WriteByte('2')\n w.WriteByte('.')\n w.WriteByte('0')\n w.WriteByte(byte(0x03)) \/\/ 3 more bytes of Application Extension.\n w.WriteByte(byte(0x01)) \/\/ Data sub-block index (always 1).\n w.WriteByte(byte(0xFF)) \/\/ Number of repetitions, LSB.\n w.WriteByte(byte(0xFF)) \/\/ Number of repetitions, MSB.\n w.WriteByte(byte(0x00)) \/\/ End of Application Extension block.\n}\n\nfunc writeFrameHeader(w *bufio.Writer, m image.Image) {\n w.WriteByte(byte(0x21)) \/\/ Start of Graphic Control Extension.\n w.WriteByte(byte(0xF9)) \/\/ Start of Graphic Control Extension (cont).\n w.WriteByte(byte(0x04)) \/\/ 4 more bytes of GCE.\n\n w.WriteByte(byte(0x00)) \/\/ There is no transparent pixel.\n w.WriteByte(byte(0x10)) \/\/ Animation delay, in centiseconds, LSB.\n w.WriteByte(byte(0x00)) \/\/ Animation delay, in centiseconds, MSB.\n w.WriteByte(byte(0x00)) \/\/ Transparent color #, if we were using.\n w.WriteByte(byte(0x00)) \/\/ End of Application Extension data.\n\n w.WriteByte(byte(0x2C)) \/\/ Start of Image Descriptor.\n\n b := m.Bounds()\n w.WriteByte(byte(b.Min.X \/ 255))\n w.WriteByte(byte(b.Min.X % 255))\n w.WriteByte(byte(b.Min.Y \/ 255))\n w.WriteByte(byte(b.Min.Y % 255))\n\n w.WriteByte(byte(b.Max.X \/ 255))\n w.WriteByte(byte(b.Max.X % 255))\n w.WriteByte(byte(b.Max.Y \/ 255))\n w.WriteByte(byte(b.Max.Y % 255))\n\n w.WriteByte(byte(0x00)) \/\/ No local color table.\n\n w.WriteByte(byte(0x08)) \/\/ Start of LZW with minimum code size 8.\n}\n\nfunc compressImage(m image.Image) *bytes.Buffer {\n b := m.Bounds()\n\n compressedImageBuffer := bytes.NewBuffer(make([]byte, 0, 255))\n lzww := lzw.NewWriter(compressedImageBuffer, lzw.LSB, int(8))\n\n for y := b.Min.Y; y < b.Max.Y; y++ {\n for x := b.Min.X; x < b.Max.X; x++ {\n c := color.GrayModel.Convert(m.At(x, y)).(color.Gray)\n lzww.Write([]byte{c.Y})\n }\n }\n lzww.Close()\n\n return compressedImageBuffer\n}\n\nfunc writeFrame(w *bufio.Writer, m image.Image) {\n writeFrameHeader(w, m)\n\n compressedImage := compressImage(m)\n\n const maxBlockSize = 255\n bytesSoFar := 0\n bytesRemaining := compressedImage.Len()\n for bytesRemaining > 0 {\n if bytesSoFar == 0 {\n blockSize := math.Min(maxBlockSize, float64(bytesRemaining))\n w.WriteByte(byte(blockSize))\n }\n\n b, _ := compressedImage.ReadByte()\n w.WriteByte(b)\n\n bytesSoFar = (bytesSoFar + 1) % maxBlockSize\n bytesRemaining--\n }\n}\n\nfunc Encode(w io.Writer, m image.Image) error {\n return EncodeAll(w, []image.Image{m})\n}\n\nfunc EncodeAll(w io.Writer, images []image.Image) error {\n buffer := bufio.NewWriter(w)\n\n writeHeader(buffer, images[0])\n for _, m := range images {\n writeFrame(buffer, m)\n }\n buffer.WriteByte(';')\n buffer.Flush()\n\n return nil\n}\n\nfunc main() {\n m := image.NewRGBA(image.Rect(0, 0, 100, 100))\n for x := 0; x < 100; x++ {\n for y := 0; y < 100; y++ {\n c := byte(x ^ y)\n m.Set(x, y, color.RGBA{c, c, c, 0xFF})\n }\n }\n file, _ := os.Create(\"new_image.gif\")\n Encode(file, m)\n}\n<|endoftext|>"} {"text":"<commit_before>package command\n\nimport (\n\t\"log\"\n\t\"testing\"\n\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/sclevine\/spec\"\n\t\"github.com\/sclevine\/spec\/report\"\n)\n\nfunc TestRunner(t *testing.T) {\n\tspec.Run(t, \"Regexp\", testRegexp, spec.Report(report.Terminal{}))\n}\n\ntype Case struct {\n\tinput string\n\tmatches bool\n\targs []string\n}\n\nfunc testRegexp(t *testing.T, when spec.G, it spec.S) {\n\tvar cases []Case\n\n\tit.Before(func() {\n\t\tRegisterTestingT(t)\n\t\tlog.SetFlags(log.Llongfile)\n\t\tcases = []Case{\n\t\t\t{\n\t\t\t\tinput: \"\/\/go:generate counterfeiter . Intf\",\n\t\t\t\tmatches: false,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: \"\/\/go:generate go run github.com\/maxbrunsfeld\/counterfeiter\/v6 . Intf\",\n\t\t\t\tmatches: false,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: \"\/\/counterfeiter:generate . Intf\",\n\t\t\t\tmatches: true,\n\t\t\t\targs: []string{\".\", \"Intf\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: \"\/\/go:generate stringer -type=Enum\",\n\t\t\t\tmatches: false,\n\t\t\t},\n\t\t}\n\t})\n\n\tit.Focus(\"splits args correctly\", func() {\n\t\tExpect(stringToArgs(\". Intf\")).To(ConsistOf([]string{\"counterfeiter\", \".\", \"Intf\"}))\n\t\tExpect(stringToArgs(\" . Intf \")).To(ConsistOf([]string{\"counterfeiter\", \".\", \"Intf\"}))\n\t})\n\n\tit(\"matches lines appropriately\", func() {\n\t\tfor _, c := range cases {\n\t\t\tresult, ok := matchForString(c.input)\n\t\t\tif c.matches {\n\t\t\t\tExpect(ok).To(BeTrue())\n\t\t\t\tExpect(result).To(ConsistOf(c.args))\n\t\t\t} else {\n\t\t\t\tExpect(ok).To(BeFalse())\n\t\t\t}\n\t\t}\n\t})\n}\n<commit_msg>unfocus test<commit_after>package command\n\nimport (\n\t\"log\"\n\t\"testing\"\n\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/sclevine\/spec\"\n\t\"github.com\/sclevine\/spec\/report\"\n)\n\nfunc TestRunner(t *testing.T) {\n\tspec.Run(t, \"Regexp\", testRegexp, spec.Report(report.Terminal{}))\n}\n\ntype Case struct {\n\tinput string\n\tmatches bool\n\targs []string\n}\n\nfunc testRegexp(t *testing.T, when spec.G, it spec.S) {\n\tvar cases []Case\n\n\tit.Before(func() {\n\t\tRegisterTestingT(t)\n\t\tlog.SetFlags(log.Llongfile)\n\t\tcases = []Case{\n\t\t\t{\n\t\t\t\tinput: \"\/\/go:generate counterfeiter . Intf\",\n\t\t\t\tmatches: false,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: \"\/\/go:generate go run github.com\/maxbrunsfeld\/counterfeiter\/v6 . Intf\",\n\t\t\t\tmatches: false,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: \"\/\/counterfeiter:generate . Intf\",\n\t\t\t\tmatches: true,\n\t\t\t\targs: []string{\".\", \"Intf\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: \"\/\/go:generate stringer -type=Enum\",\n\t\t\t\tmatches: false,\n\t\t\t},\n\t\t}\n\t})\n\n\tit(\"splits args correctly\", func() {\n\t\tExpect(stringToArgs(\". Intf\")).To(ConsistOf([]string{\"counterfeiter\", \".\", \"Intf\"}))\n\t\tExpect(stringToArgs(\" . Intf \")).To(ConsistOf([]string{\"counterfeiter\", \".\", \"Intf\"}))\n\t})\n\n\tit(\"matches lines appropriately\", func() {\n\t\tfor _, c := range cases {\n\t\t\tresult, ok := matchForString(c.input)\n\t\t\tif c.matches {\n\t\t\t\tExpect(ok).To(BeTrue())\n\t\t\t\tExpect(result).To(ConsistOf(c.args))\n\t\t\t} else {\n\t\t\t\tExpect(ok).To(BeFalse())\n\t\t\t}\n\t\t}\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\tflag \"github.com\/ogier\/pflag\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"time\"\n)\n\nconst (\n\tversion string = \"0.1\"\n)\n\nvar (\n\tdownloaded []int64\n\tboards string = \"a|b|c|d|e|f|g|gif|h|hr|k|m|o|p|r|s|t|u|v|vg|w|wg|i|ic|r9k|cm|hm|y|3|adv|an|cgl|ck|co|diy|fa|fit|hc|int|jp|lit|mlp|mu|n|po|pol|sci|soc|sp|tg|toy|trv|tv|vp|wsg|x|q\"\n\n\t\/\/ Options\n\tminWidth *int64 = flag.Int64P(\"min-width\", \"w\", 0, \"Minimum width of images\")\n\tminHeight *int64 = flag.Int64P(\"min-height\", \"h\", 0, \"Minimum height of images\")\n\trefresh *time.Duration = flag.DurationP(\"refresh\", \"r\", time.Second*30, \"Refresh rate (min 30s)\")\n\torignalNames *bool = flag.BoolP(\"original-names\", \"o\", false, \"Save images under original filenames\")\n\tshowVersion *bool = flag.BoolP(\"version\", \"v\", false, \"Show version\")\n)\n\ntype Post struct {\n\tNo int64\n\tResto int64\n\tSticky bool\n\tClosed bool\n\tNow string\n\tTime int64\n\tName string\n\tTrip string\n\tId string\n\tCapcode string\n\tCountry string\n\tCountry_name string\n\tEmail string\n\tSub string\n\tCom string\n\tTim int64\n\tFilename string\n\tExt string\n\tFsize int64\n\tMd5 string\n\tW int64\n\tH int64\n\tTn_w int64\n\tTn_h int64\n\tFiledeleted bool\n\tSpoiler bool\n\tCustom_spoiler bool\n\tOmitted_posts int64\n\tOmitted_images int64\n}\n\nfunc (p *Post) FullFileName(original bool) (s string) {\n\tif original {\n\t\ts = fmt.Sprintf(\"%s%s\", p.Filename, p.Ext)\n\t} else {\n\t\ts = fmt.Sprintf(\"%d%s\", p.Tim, p.Ext)\n\t}\n\treturn\n}\n\ntype Thread struct {\n\tPosts []Post\n}\n\nfunc checkError(err error) {\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tos.Exit(1)\n\t}\n}\n\nfunc parseThreadId(s string) (b, id string, err error) {\n\tr, err := regexp.Compile(fmt.Sprintf(\"^\/?(%s)\/(?:res\/)?(\\\\d+)\", boards))\n\tif err != nil {\n\t\treturn b, id, err\n\t}\n\n\tgroups := r.FindSubmatch([]byte(s))\n\tif len(groups) < 1 {\n\t\treturn b, id, errors.New(\"Input does not match regex\")\n\t}\n\n\tb = string(groups[1])\n\tid = string(groups[2])\n\n\treturn b, id, nil\n}\n\nfunc parseThreadFromJson(r io.Reader) (Thread, error) {\n\tdec := json.NewDecoder(r)\n\tvar t Thread\n\tfor {\n\t\tif err := dec.Decode(&t); err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn t, err\n\t\t}\n\t}\n\treturn t, nil\n}\n\nfunc downloadImage(board string, p Post) {\n\tif p.W < *minWidth || p.H < *minHeight || p.Filedeleted {\n\t\tdownloaded = append(downloaded, p.Tim)\n\t\treturn\n\t}\n\n\turl := fmt.Sprintf(\"https:\/\/images.4chan.org\/%s\/src\/%d%s\", board, p.Tim, p.Ext)\n\timg, _, err := getUrl(url)\n\tcheckError(err)\n\n\terr = ioutil.WriteFile(p.FullFileName(*orignalNames), img, 0644)\n\tcheckError(err)\n\n\tdownloaded = append(downloaded, p.Tim)\n}\n\nfunc getUrl(url string) ([]byte, int, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn []byte{}, 0, nil\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn []byte{}, 0, nil\n\t}\n\treturn body, resp.StatusCode, nil\n\n}\n\nfunc wasDownloaded(p Post) bool {\n\tfor _, e := range downloaded {\n\t\tif e == p.Tim {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc loadThread(board string, id string) {\n\turl := fmt.Sprintf(\"https:\/\/api.4chan.org\/%s\/res\/%s.json\", board, id)\n\tresp, status, err := getUrl(url)\n\tcheckError(err)\n\tif status != 200 {\n\t\tif status == 404 {\n\t\t\tlog.Println(\"Thread 404'd\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Printf(\"Something went wrong\\nGot return code %d at '%s'.\\n\", status, url)\n\t\treturn\n\t}\n\n\tthread, err := parseThreadFromJson(bytes.NewReader(resp))\n\tif err != nil {\n\t\tlog.Println(\"Could not parse thread\")\n\t}\n\n\tfor _, e := range thread.Posts {\n\t\tif !wasDownloaded(e) {\n\t\t\tgo downloadImage(board, e)\n\t\t}\n\t}\n}\n\nfunc init() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintln(os.Stderr, \"Usage: chanloader [options] \/b\/res\/123456\\nOptions:\")\n\t\tflag.PrintDefaults()\n\t}\n\tflag.Parse()\n\tif *refresh < time.Second*30 {\n\t\t*refresh = time.Second * 30\n\t}\n\n\tif *showVersion {\n\t\tfmt.Println(version)\n\t\tos.Exit(0)\n\t}\n\n\tif flag.NArg() < 1 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\truntime.GOMAXPROCS(runtime.NumCPU())\n}\n\nfunc main() {\n\tboard, id, err := parseThreadId(flag.Arg(0))\n\tif err != nil {\n\t\tfmt.Printf(\"Invalid input: %s\\n\", flag.Arg(0))\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tticker := time.NewTicker(*refresh)\n\tfor {\n\t\tlog.Println(\"Loading thread\")\n\t\tgo loadThread(board, id)\n\t\t<-ticker.C\n\t}\n}\n<commit_msg>Set version to 0.0.1<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\tflag \"github.com\/ogier\/pflag\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"time\"\n)\n\nconst (\n\tversion string = \"0.0.1\"\n)\n\nvar (\n\tdownloaded []int64\n\tboards string = \"a|b|c|d|e|f|g|gif|h|hr|k|m|o|p|r|s|t|u|v|vg|w|wg|i|ic|r9k|cm|hm|y|3|adv|an|cgl|ck|co|diy|fa|fit|hc|int|jp|lit|mlp|mu|n|po|pol|sci|soc|sp|tg|toy|trv|tv|vp|wsg|x|q\"\n\n\t\/\/ Options\n\tminWidth *int64 = flag.Int64P(\"min-width\", \"w\", 0, \"Minimum width of images\")\n\tminHeight *int64 = flag.Int64P(\"min-height\", \"h\", 0, \"Minimum height of images\")\n\trefresh *time.Duration = flag.DurationP(\"refresh\", \"r\", time.Second*30, \"Refresh rate (min 30s)\")\n\torignalNames *bool = flag.BoolP(\"original-names\", \"o\", false, \"Save images under original filenames\")\n\tshowVersion *bool = flag.BoolP(\"version\", \"v\", false, \"Show version\")\n)\n\ntype Post struct {\n\tNo int64\n\tResto int64\n\tSticky bool\n\tClosed bool\n\tNow string\n\tTime int64\n\tName string\n\tTrip string\n\tId string\n\tCapcode string\n\tCountry string\n\tCountry_name string\n\tEmail string\n\tSub string\n\tCom string\n\tTim int64\n\tFilename string\n\tExt string\n\tFsize int64\n\tMd5 string\n\tW int64\n\tH int64\n\tTn_w int64\n\tTn_h int64\n\tFiledeleted bool\n\tSpoiler bool\n\tCustom_spoiler bool\n\tOmitted_posts int64\n\tOmitted_images int64\n}\n\nfunc (p *Post) FullFileName(original bool) (s string) {\n\tif original {\n\t\ts = fmt.Sprintf(\"%s%s\", p.Filename, p.Ext)\n\t} else {\n\t\ts = fmt.Sprintf(\"%d%s\", p.Tim, p.Ext)\n\t}\n\treturn\n}\n\ntype Thread struct {\n\tPosts []Post\n}\n\nfunc checkError(err error) {\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tos.Exit(1)\n\t}\n}\n\nfunc parseThreadId(s string) (b, id string, err error) {\n\tr, err := regexp.Compile(fmt.Sprintf(\"^\/?(%s)\/(?:res\/)?(\\\\d+)\", boards))\n\tif err != nil {\n\t\treturn b, id, err\n\t}\n\n\tgroups := r.FindSubmatch([]byte(s))\n\tif len(groups) < 1 {\n\t\treturn b, id, errors.New(\"Input does not match regex\")\n\t}\n\n\tb = string(groups[1])\n\tid = string(groups[2])\n\n\treturn b, id, nil\n}\n\nfunc parseThreadFromJson(r io.Reader) (Thread, error) {\n\tdec := json.NewDecoder(r)\n\tvar t Thread\n\tfor {\n\t\tif err := dec.Decode(&t); err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn t, err\n\t\t}\n\t}\n\treturn t, nil\n}\n\nfunc downloadImage(board string, p Post) {\n\tif p.W < *minWidth || p.H < *minHeight || p.Filedeleted {\n\t\tdownloaded = append(downloaded, p.Tim)\n\t\treturn\n\t}\n\n\turl := fmt.Sprintf(\"https:\/\/images.4chan.org\/%s\/src\/%d%s\", board, p.Tim, p.Ext)\n\timg, _, err := getUrl(url)\n\tcheckError(err)\n\n\terr = ioutil.WriteFile(p.FullFileName(*orignalNames), img, 0644)\n\tcheckError(err)\n\n\tdownloaded = append(downloaded, p.Tim)\n}\n\nfunc getUrl(url string) ([]byte, int, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn []byte{}, 0, nil\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn []byte{}, 0, nil\n\t}\n\treturn body, resp.StatusCode, nil\n\n}\n\nfunc wasDownloaded(p Post) bool {\n\tfor _, e := range downloaded {\n\t\tif e == p.Tim {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc loadThread(board string, id string) {\n\turl := fmt.Sprintf(\"https:\/\/api.4chan.org\/%s\/res\/%s.json\", board, id)\n\tresp, status, err := getUrl(url)\n\tcheckError(err)\n\tif status != 200 {\n\t\tif status == 404 {\n\t\t\tlog.Println(\"Thread 404'd\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Printf(\"Something went wrong\\nGot return code %d at '%s'.\\n\", status, url)\n\t\treturn\n\t}\n\n\tthread, err := parseThreadFromJson(bytes.NewReader(resp))\n\tif err != nil {\n\t\tlog.Println(\"Could not parse thread\")\n\t}\n\n\tfor _, e := range thread.Posts {\n\t\tif !wasDownloaded(e) {\n\t\t\tgo downloadImage(board, e)\n\t\t}\n\t}\n}\n\nfunc init() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintln(os.Stderr, \"Usage: chanloader [options] \/b\/res\/123456\\nOptions:\")\n\t\tflag.PrintDefaults()\n\t}\n\tflag.Parse()\n\tif *refresh < time.Second*30 {\n\t\t*refresh = time.Second * 30\n\t}\n\n\tif *showVersion {\n\t\tfmt.Println(version)\n\t\tos.Exit(0)\n\t}\n\n\tif flag.NArg() < 1 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\truntime.GOMAXPROCS(runtime.NumCPU())\n}\n\nfunc main() {\n\tboard, id, err := parseThreadId(flag.Arg(0))\n\tif err != nil {\n\t\tfmt.Printf(\"Invalid input: %s\\n\", flag.Arg(0))\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tticker := time.NewTicker(*refresh)\n\tfor {\n\t\tlog.Println(\"Loading thread\")\n\t\tgo loadThread(board, id)\n\t\t<-ticker.C\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package oauth\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/RichardKnop\/go-oauth2-server\/config\"\n\t\"github.com\/RichardKnop\/go-oauth2-server\/migrations\"\n\t\"github.com\/areatech\/go-fixtures\"\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/stretchr\/testify\/suite\"\n\t\/\/ sqlite driver\n\t_ \"github.com\/mattn\/go-sqlite3\"\n)\n\nvar testDbPath = \"\/tmp\/oauth_testdb.sqlite\"\n\n\/\/ OauthTestSuite needs to be exported so the tests run\ntype OauthTestSuite struct {\n\tsuite.Suite\n\tcnf *config.Config\n\tdb *gorm.DB\n\tservice *Service\n\tclient *Client\n\tuser *User\n}\n\n\/\/ The SetupSuite method will be run by testify once, at the very\n\/\/ start of the testing suite, before any tests are run.\nfunc (suite *OauthTestSuite) SetupSuite() {\n\t\/\/ Delete the test database\n\tos.Remove(testDbPath)\n\n\t\/\/ Initialise the config\n\tsuite.cnf = config.NewConfig()\n\n\t\/\/ Init in-memory test database\n\tinMemoryDB, err := gorm.Open(\"sqlite3\", testDbPath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tsuite.db = &inMemoryDB\n\n\t\/\/ Run all migrations\n\tif err := migrations.Bootstrap(suite.db); err != nil {\n\t\tlog.Print(err)\n\t}\n\tif err := MigrateAll(suite.db); err != nil {\n\t\tlog.Print(err)\n\t}\n\n\t\/\/ Load test data from fixtures\n\tfor _, path := range []string{\n\t\t\"..\/fixtures\/test_data.yml\",\n\t} {\n\t\t\/\/ Read fixture data from the file\n\t\tdata, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\t\/\/ Insert the fixture data\n\t\terr = fixtures.Load(data, suite.db.DB(), suite.cnf.Database.Type)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\t\/\/ Fetch the test client\n\tsuite.client = new(Client)\n\tif err := suite.db.First(suite.client, 1).Error; err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Fetch the test user\n\tsuite.user = new(User)\n\tif err := suite.db.First(suite.user, 1).Error; err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Initialise the service\n\tsuite.service = NewService(suite.cnf, suite.db)\n}\n\n\/\/ The TearDownSuite method will be run by testify once, at the very\n\/\/ end of the testing suite, after all tests have been run.\nfunc (suite *OauthTestSuite) TearDownSuite() {\n\t\/\/\n}\n\n\/\/ The SetupTest method will be run before every test in the suite.\nfunc (suite *OauthTestSuite) SetupTest() {\n\t\/\/\n}\n\n\/\/ The TearDownTest method will be run after every test in the suite.\nfunc (suite *OauthTestSuite) TearDownTest() {\n\t\/\/ Scopes are static, populated from fixtures,\n\t\/\/ so there is no need to clear them after running a test\n\tsuite.db.Unscoped().Delete(AuthorizationCode{})\n\tsuite.db.Unscoped().Delete(RefreshToken{})\n\tsuite.db.Unscoped().Delete(AccessToken{})\n\tsuite.db.Unscoped().Not(\"id\", suite.user.ID).Delete(User{})\n\tsuite.db.Unscoped().Not(\"id\", suite.client.ID).Delete(Client{})\n}\n\n\/\/ TestOauthTestSuite ...\n\/\/ In order for 'go test' to run this suite, we need to create\n\/\/ a normal test function and pass our suite to suite.Run\nfunc TestOauthTestSuite(t *testing.T) {\n\tsuite.Run(t, new(OauthTestSuite))\n}\n<commit_msg>Update suite_test.go<commit_after>package oauth\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/RichardKnop\/go-oauth2-server\/config\"\n\t\"github.com\/RichardKnop\/go-oauth2-server\/migrations\"\n\t\"github.com\/areatech\/go-fixtures\"\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/stretchr\/testify\/suite\"\n\t\/\/ sqlite driver\n\t_ \"github.com\/mattn\/go-sqlite3\"\n)\n\nvar testDbPath = \"\/tmp\/oauth_testdb.sqlite\"\n\n\/\/ OauthTestSuite needs to be exported so the tests run\ntype OauthTestSuite struct {\n\tsuite.Suite\n\tcnf *config.Config\n\tdb *gorm.DB\n\tservice *Service\n\tclient *Client\n\tuser *User\n}\n\n\/\/ The SetupSuite method will be run by testify once, at the very\n\/\/ start of the testing suite, before any tests are run.\nfunc (suite *OauthTestSuite) SetupSuite() {\n\t\/\/ Delete the test database\n\tos.Remove(testDbPath)\n\n\t\/\/ Initialise the config\n\tsuite.cnf = config.NewConfig()\n\n\t\/\/ Init in-memory test database\n\tinMemoryDB, err := gorm.Open(\"sqlite3\", testDbPath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tsuite.db = &inMemoryDB\n\n\t\/\/ Run all migrations\n\tif err := migrations.Bootstrap(suite.db); err != nil {\n\t\tlog.Print(err)\n\t}\n\tif err := MigrateAll(suite.db); err != nil {\n\t\tlog.Print(err)\n\t}\n\n\t\/\/ Load test data from fixtures\n\tfor _, path := range []string{\n\t\t\"..\/fixtures\/test_data.yml\",\n\t} {\n\t\t\/\/ Read fixture data from the file\n\t\tdata, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\t\/\/ Insert the fixture data\n\t\terr = fixtures.Load(data, suite.db.DB(), suite.cnf.Database.Type)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\t\/\/ Fetch the test client\n\tsuite.client = new(Client)\n\tif err := suite.db.First(suite.client, 1).Error; err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Fetch the test user\n\tsuite.user = new(User)\n\tif err := suite.db.First(suite.user, 1).Error; err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Initialise the service\n\tsuite.service = NewService(suite.cnf, suite.db)\n}\n\n\/\/ The TearDownSuite method will be run by testify once, at the very\n\/\/ end of the testing suite, after all tests have been run.\nfunc (suite *OauthTestSuite) TearDownSuite() {\n\t\/\/\n}\n\n\/\/ The SetupTest method will be run before every test in the suite.\nfunc (suite *OauthTestSuite) SetupTest() {\n\t\/\/\n}\n\n\/\/ The TearDownTest method will be run after every test in the suite.\nfunc (suite *OauthTestSuite) TearDownTest() {\n\t\/\/ Scopes are static, populated from fixtures,\n\t\/\/ so there is no need to clear them after running a test\n\tsuite.db.Unscoped().Delete(new(AuthorizationCode))\n\tsuite.db.Unscoped().Delete(new(RefreshToken))\n\tsuite.db.Unscoped().Delete(new(AccessToken))\n\tsuite.db.Unscoped().Not(\"id\", suite.user.ID).Delete(new(User))\n\tsuite.db.Unscoped().Not(\"id\", suite.client.ID).Delete(new(Client))\n}\n\n\/\/ TestOauthTestSuite ...\n\/\/ In order for 'go test' to run this suite, we need to create\n\/\/ a normal test function and pass our suite to suite.Run\nfunc TestOauthTestSuite(t *testing.T) {\n\tsuite.Run(t, new(OauthTestSuite))\n}\n<|endoftext|>"}